=== added directory 'src/github.com/Azure' === added directory 'src/github.com/Azure/azure-sdk-for-go' === added file 'src/github.com/Azure/azure-sdk-for-go/.gitignore' --- src/github.com/Azure/azure-sdk-for-go/.gitignore 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/.gitignore 2016-03-22 15:18:22 +0000 @@ -0,0 +1,29 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# Editor swap files +*.swp +*~ +.DS_Store === added file 'src/github.com/Azure/azure-sdk-for-go/.travis.yml' --- src/github.com/Azure/azure-sdk-for-go/.travis.yml 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/.travis.yml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +sudo: false + +language: go + +before_script: + - go get -u golang.org/x/tools/cmd/vet + - go get -u github.com/golang/lint/golint + +go: tip +script: + - test -z "$(gofmt -s -l $(find ./arm/* -type d -print) | tee /dev/stderr)" + - test -z "$(gofmt -s -l -w management | tee /dev/stderr)" + - test -z "$(gofmt -s -l -w storage | tee /dev/stderr)" + - go build -v ./... + - test -z "$(go vet $(find ./arm/* -type d -print) | tee /dev/stderr)" + - test -z "$(golint ./arm/... | tee /dev/stderr)" + - go test -v ./storage/... -check.v + - test -z "$(golint ./storage/... | tee /dev/stderr)" + - go vet ./storage/... + - go test -v ./management/... + - test -z "$(golint ./management/... | grep -v 'should have comment' | grep -v 'stutters' | tee /dev/stderr)" + - go vet ./management/... === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps' === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/Godeps.json' --- src/github.com/Azure/azure-sdk-for-go/Godeps/Godeps.json 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/Godeps.json 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +{ + "ImportPath": "github.com/Azure/azure-sdk-for-go", + "GoVersion": "go1.5.1", + "Packages": [ + "./management/...", + "./arm/..." + ], + "Deps": [ + { + "ImportPath": "github.com/Azure/go-autorest/autorest", + "Comment": "v1.0.1", + "Rev": "1cebd0377050996f91857917c73f00a377e64262" + }, + { + "ImportPath": "golang.org/x/crypto/pkcs12", + "Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784" + } + ] +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/Readme' --- src/github.com/Azure/azure-sdk-for-go/Godeps/Readme 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/Readme 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace' === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/.gitignore' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/.gitignore 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/.gitignore 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +/pkg +/bin === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src' === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com' === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure' === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest' === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest' === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,163 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +import ( + "net/http" + "time" +) + +const ( + headerLocation = "Location" + headerRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + return containsInt(codes, resp.StatusCode) +} + +// ResponseRequiresPolling returns true if the passed http.Response requires polling follow-up +// request (as determined by the status code being in the passed set, which defaults to HTTP 202 +// Accepted). +func ResponseRequiresPolling(resp *http.Response, codes ...int) bool { + if resp.StatusCode == http.StatusOK { + return false + } + + if len(codes) == 0 { + codes = []int{http.StatusAccepted} + } + + return ResponseHasStatusCode(resp, codes...) +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. If +// it successfully creates the request, it will also close the body of the passed response, +// otherwise the body remains open. +func NewPollingRequest(resp *http.Response, authorizer Authorizer) (*http.Request, error) { + location := GetPollingLocation(resp) + if location == "" { + return nil, NewError("autorest", "NewPollingRequest", "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{}, + AsGet(), + WithBaseURL(location), + authorizer.WithAuthorization()) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", "Failure creating poll request to %s", location) + } + + Respond(resp, + ByClosing()) + + return req, nil +} + +// GetPollingDelay extracts the polling delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetPollingDelay(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(headerRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// GetPollingLocation retrieves the polling URL from the Location header of the passed response. +func GetPollingLocation(resp *http.Response) string { + return resp.Header.Get(headerLocation) +} + +// PollForAttempts will retry the passed http.Request until it receives an HTTP status code outside +// the passed set or has made the specified number of attempts. The set of status codes defaults to +// HTTP 202 Accepted. +func PollForAttempts(s Sender, req *http.Request, defaultDelay time.Duration, attempts int, codes ...int) (*http.Response, error) { + return SendWithSender( + decorateForPolling(s, defaultDelay, codes...), + req, + DoRetryForAttempts(attempts, time.Duration(0))) +} + +// PollForDuration will retry the passed http.Request until it receives an HTTP status code outside +// the passed set or the total time meets or exceeds the specified duration. The set of status codes +// defaults to HTTP 202 Accepted. +func PollForDuration(s Sender, req *http.Request, defaultDelay time.Duration, total time.Duration, codes ...int) (*http.Response, error) { + return SendWithSender( + decorateForPolling(s, defaultDelay, codes...), + req, + DoRetryForDuration(total, time.Duration(0))) +} + +func decorateForPolling(s Sender, defaultDelay time.Duration, codes ...int) Sender { + if len(codes) == 0 { + codes = []int{http.StatusAccepted} + } + + return DecorateSender(s, + AfterRetryDelay(defaultDelay), + DoErrorIfStatusCode(codes...), + DoCloseIfError()) +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest_test.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/autorest_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,348 @@ +package autorest + +import ( + "net/http" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" +) + +func TestResponseHasStatusCode(t *testing.T) { + codes := []int{http.StatusOK, http.StatusAccepted} + resp := &http.Response{StatusCode: http.StatusAccepted} + if !ResponseHasStatusCode(resp, codes...) { + t.Errorf("autorest: ResponseHasStatusCode failed to find %v in %v", resp.StatusCode, codes) + } +} + +func TestResponseHasStatusCodeNotPresent(t *testing.T) { + codes := []int{http.StatusOK, http.StatusAccepted} + resp := &http.Response{StatusCode: http.StatusInternalServerError} + if ResponseHasStatusCode(resp, codes...) { + t.Errorf("autorest: ResponseHasStatusCode unexpectedly found %v in %v", resp.StatusCode, codes) + } +} + +func TestResponseRequiresPollingIgnoresSuccess(t *testing.T) { + resp := mocks.NewResponse() + + if ResponseRequiresPolling(resp) { + t.Error("autorest: ResponseRequiresPolling did not ignore a successful response") + } +} + +func TestResponseRequiresPollingLeavesBodyOpen(t *testing.T) { + resp := mocks.NewResponse() + + ResponseRequiresPolling(resp) + if !resp.Body.(*mocks.Body).IsOpen() { + t.Error("autorest: ResponseRequiresPolling closed the responise body while ignoring a successful response") + } +} + +func TestResponseRequiresPollingDefaultsToAcceptedStatusCode(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + if !ResponseRequiresPolling(resp) { + t.Error("autorest: ResponseRequiresPolling failed to create a request for default 202 Accepted status code") + } +} + +func TestResponseRequiresPollingReturnsFalseForUnexpectedStatusCodes(t *testing.T) { + resp := mocks.NewResponseWithStatus("500 InternalServerError", http.StatusInternalServerError) + mocks.SetAcceptedHeaders(resp) + + if ResponseRequiresPolling(resp) { + t.Error("autorest: ResponseRequiresPolling did not return false when ignoring a status code") + } +} + +func TestNewPollingRequestLeavesBodyOpenWhenLocationHeaderIsMissing(t *testing.T) { + resp := mocks.NewResponseWithStatus("500 InternalServerError", http.StatusInternalServerError) + + NewPollingRequest(resp, NullAuthorizer{}) + if !resp.Body.(*mocks.Body).IsOpen() { + t.Error("autorest: NewPollingRequest closed the http.Request Body when the Location header was missing") + } +} + +func TestNewPollingRequestDoesNotReturnARequestWhenLocationHeaderIsMissing(t *testing.T) { + resp := mocks.NewResponseWithStatus("500 InternalServerError", http.StatusInternalServerError) + + req, _ := NewPollingRequest(resp, NullAuthorizer{}) + if req != nil { + t.Error("autorest: NewPollingRequest returned an http.Request when the Location header was missing") + } +} + +func TestNewPollingRequestReturnsAnErrorWhenPrepareFails(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + _, err := NewPollingRequest(resp, mockFailingAuthorizer{}) + if err == nil { + t.Error("autorest: NewPollingRequest failed to return an error when Prepare fails") + } +} + +func TestNewPollingRequestLeavesBodyOpenWhenPrepareFails(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + resp.Header.Set(http.CanonicalHeaderKey(headerLocation), testBadURL) + + _, err := NewPollingRequest(resp, NullAuthorizer{}) + if !resp.Body.(*mocks.Body).IsOpen() { + t.Errorf("autorest: NewPollingRequest closed the http.Request Body when Prepare returned an error (%v)", err) + } +} + +func TestNewPollingRequestDoesNotReturnARequestWhenPrepareFails(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + resp.Header.Set(http.CanonicalHeaderKey(headerLocation), testBadURL) + + req, _ := NewPollingRequest(resp, NullAuthorizer{}) + if req != nil { + t.Error("autorest: NewPollingRequest returned an http.Request when Prepare failed") + } +} + +func TestNewPollingRequestClosesTheResponseBody(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + NewPollingRequest(resp, NullAuthorizer{}) + if resp.Body.(*mocks.Body).IsOpen() { + t.Error("autorest: NewPollingRequest failed to close the response body when creating a new request") + } +} + +func TestNewPollingRequestReturnsAGetRequest(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + req, _ := NewPollingRequest(resp, NullAuthorizer{}) + if req.Method != "GET" { + t.Errorf("autorest: NewPollingRequest did not create an HTTP GET request -- actual method %v", req.Method) + } +} + +func TestNewPollingRequestProvidesTheURL(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + req, _ := NewPollingRequest(resp, NullAuthorizer{}) + if req.URL.String() != mocks.TestURL { + t.Errorf("autorest: NewPollingRequest did not create an HTTP with the expected URL -- received %v, expected %v", req.URL, mocks.TestURL) + } +} + +func TestNewPollingRequestAppliesAuthorization(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + req, _ := NewPollingRequest(resp, mockAuthorizer{}) + if req.Header.Get(http.CanonicalHeaderKey(headerAuthorization)) != testAuthorizationHeader { + t.Errorf("autorest: NewPollingRequest did not apply authorization -- received %v, expected %v", + req.Header.Get(http.CanonicalHeaderKey(headerAuthorization)), testAuthorizationHeader) + } +} + +func TestGetPollingLocation(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + l := GetPollingLocation(resp) + if len(l) == 0 { + t.Errorf("autorest: GetPollingLocation failed to return Location header -- expected %v, received %v", mocks.TestURL, l) + } +} + +func TestGetPollingLocationReturnsEmptyStringForMissingLocation(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + + l := GetPollingLocation(resp) + if len(l) != 0 { + t.Errorf("autorest: GetPollingLocation return a value without a Location header -- received %v", l) + } +} + +func TestGetPollingDelay(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + d := GetPollingDelay(resp, DefaultPollingDelay) + if d != mocks.TestDelay { + t.Errorf("autorest: GetPollingDelay failed to returned the expected delay -- expected %v, received %v", mocks.TestDelay, d) + } +} + +func TestGetPollingDelayReturnsDefaultDelayIfRetryHeaderIsMissing(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + + d := GetPollingDelay(resp, DefaultPollingDelay) + if d != DefaultPollingDelay { + t.Errorf("autorest: GetPollingDelay failed to returned the default delay for a missing Retry-After header -- expected %v, received %v", + DefaultPollingDelay, d) + } +} + +func TestGetPollingDelayReturnsDefaultDelayIfRetryHeaderIsMalformed(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + resp.Header.Set(http.CanonicalHeaderKey(headerRetryAfter), "a very bad non-integer value") + + d := GetPollingDelay(resp, DefaultPollingDelay) + if d != DefaultPollingDelay { + t.Errorf("autorest: GetPollingDelay failed to returned the default delay for a malformed Retry-After header -- expected %v, received %v", + DefaultPollingDelay, d) + } +} + +func TestPollForAttemptsStops(t *testing.T) { + client := mocks.NewSender() + client.EmitErrors(-1) + + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + req, _ := NewPollingRequest(resp, NullAuthorizer{}) + + PollForAttempts(client, req, time.Duration(0), 5) + if client.Attempts() < 5 || client.Attempts() > 5 { + t.Errorf("autorest: PollForAttempts stopped incorrectly -- expected %v attempts, actual attempts were %v", 5, client.Attempts()) + } +} + +func TestPollForDurationsStops(t *testing.T) { + client := mocks.NewSender() + client.EmitErrors(-1) + + d := 10 * time.Millisecond + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + req, _ := NewPollingRequest(resp, NullAuthorizer{}) + + start := time.Now() + PollForDuration(client, req, time.Duration(0), d) + if time.Now().Sub(start) < d { + t.Error("autorest: PollForDuration stopped too soon") + } +} + +func TestPollForDurationsStopsWithinReason(t *testing.T) { + client := mocks.NewSender() + client.EmitErrors(-1) + + d := 10 * time.Millisecond + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + mocks.SetRetryHeader(resp, d) + client.SetResponse(resp) + + req, _ := NewPollingRequest(resp, NullAuthorizer{}) + + start := time.Now() + PollForDuration(client, req, time.Duration(0), d) + if time.Now().Sub(start) > (time.Duration(5.0) * d) { + t.Error("autorest: PollForDuration took too long to stop -- exceeded 5 times expected duration") + } +} + +func TestPollingHonorsDelay(t *testing.T) { + client := mocks.NewSender() + client.EmitErrors(-1) + + d1 := 10 * time.Millisecond + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + mocks.SetRetryHeader(resp, d1) + client.SetResponse(resp) + + req, _ := NewPollingRequest(resp, NullAuthorizer{}) + + start := time.Now() + PollForAttempts(client, req, time.Duration(0), 2) + d2 := time.Now().Sub(start) + if d2 < d1 { + t.Errorf("autorest: Polling failed to honor delay -- expected %v, actual %v", d1.Seconds(), d2.Seconds()) + } +} + +func TestPollingReturnsErrorForExpectedStatusCode(t *testing.T) { + client := mocks.NewSender() + + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + client.SetResponse(resp) + + req, _ := NewPollingRequest(resp, NullAuthorizer{}) + + resp, err := PollForAttempts(client, req, time.Duration(0), 1, http.StatusAccepted) + if err == nil { + t.Error("autorest: Polling failed to emit error for known status code") + } +} + +func TestPollingReturnsNoErrorForUnexpectedStatusCode(t *testing.T) { + client := mocks.NewSender() + client.EmitStatus("500 InternalServerError", http.StatusInternalServerError) + + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + req, _ := NewPollingRequest(resp, NullAuthorizer{}) + + resp, err := PollForAttempts(client, req, time.Duration(0), 1, http.StatusAccepted) + if err != nil { + t.Error("autorest: Polling emitted error for unknown status code") + } +} + +func TestPollingReturnsDefaultsToAcceptedStatusCode(t *testing.T) { + client := mocks.NewSender() + + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + client.SetResponse(resp) + + req, _ := NewPollingRequest(resp, NullAuthorizer{}) + + resp, err := PollForAttempts(client, req, time.Duration(0), 1) + if err == nil { + t.Error("autorest: Polling failed to default to HTTP 202") + } +} + +func TestPollingLeavesFinalBodyOpen(t *testing.T) { + client := mocks.NewSender() + client.EmitStatus("500 InternalServerError", http.StatusInternalServerError) + + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + req, _ := NewPollingRequest(resp, NullAuthorizer{}) + + resp, _ = PollForAttempts(client, req, time.Duration(0), 1) + if !resp.Body.(*mocks.Body).IsOpen() { + t.Error("autorest: Polling unexpectedly closed the response body") + } +} + +func TestDecorateForPollingCloseBodyOnEachAttempt(t *testing.T) { + client := mocks.NewSender() + + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + client.SetResponse(resp) + + req, _ := NewPollingRequest(resp, NullAuthorizer{}) + resp, _ = PollForAttempts(client, req, time.Duration(0), 5) + if resp.Body.(*mocks.Body).CloseAttempts() < 5 { + t.Errorf("autorest: decorateForPolling failed to close the response Body between requests -- expected %v, received %v", + 5, resp.Body.(*mocks.Body).CloseAttempts()) + } +} === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure' === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,72 @@ +/* +Package azure provides Azure-specific implementations used with AutoRest. + +See the included examples for more detail. +*/ +package azure + +import ( + "net/http" + "strconv" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure_test.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/azure_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,97 @@ +package azure + +import ( + "fmt" + "net/http" + "strconv" + "testing" + + . "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" +) + +// Use a Client Inspector to set the request identifier. +func ExampleWithClientID() { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + req, _ := Prepare(&http.Request{}, + AsGet(), + WithBaseURL("https://microsoft.com/a/b/c/")) + + c := Client{Sender: mocks.NewSender()} + c.RequestInspector = WithReturningClientID(uuid) + + c.Send(req) + fmt.Printf("Inspector added the %s header with the value %s\n", + HeaderClientID, req.Header.Get(HeaderClientID)) + fmt.Printf("Inspector added the %s header with the value %s\n", + HeaderReturnClientID, req.Header.Get(HeaderReturnClientID)) + // Output: + // Inspector added the x-ms-client-request-id header with the value 71FDB9F4-5E49-4C12-B266-DE7B4FD999A6 + // Inspector added the x-ms-return-client-request-id header with the value true +} + +func TestWithReturningClientIDReturnsError(t *testing.T) { + var errIn error + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + _, errOut := Prepare(&http.Request{}, + withErrorPrepareDecorator(&errIn), + WithReturningClientID(uuid)) + + if errOut == nil || errIn != errOut { + t.Errorf("azure: WithReturningClientID failed to exit early when receiving an error -- expected (%v), received (%v)", + errIn, errOut) + } +} + +func TestWithClientID(t *testing.T) { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + req, _ := Prepare(&http.Request{}, + WithClientID(uuid)) + + if req.Header.Get(HeaderClientID) != uuid { + t.Errorf("azure: WithClientID failed to set %s -- expected %s, received %s", + HeaderClientID, uuid, req.Header.Get(HeaderClientID)) + } +} + +func TestWithReturnClientID(t *testing.T) { + b := false + req, _ := Prepare(&http.Request{}, + WithReturnClientID(b)) + + if req.Header.Get(HeaderReturnClientID) != strconv.FormatBool(b) { + t.Errorf("azure: WithReturnClientID failed to set %s -- expected %s, received %s", + HeaderClientID, strconv.FormatBool(b), req.Header.Get(HeaderClientID)) + } +} + +func TestExtractClientID(t *testing.T) { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + resp := mocks.NewResponse() + mocks.SetResponseHeader(resp, HeaderClientID, uuid) + + if ExtractClientID(resp) != uuid { + t.Errorf("azure: ExtractClientID failed to extract the %s -- expected %s, received %s", + HeaderClientID, uuid, ExtractClientID(resp)) + } +} + +func TestExtractRequestID(t *testing.T) { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + resp := mocks.NewResponse() + mocks.SetResponseHeader(resp, HeaderRequestID, uuid) + + if ExtractRequestID(resp) != uuid { + t.Errorf("azure: ExtractRequestID failed to extract the %s -- expected %s, received %s", + HeaderRequestID, uuid, ExtractRequestID(resp)) + } +} + +func withErrorPrepareDecorator(e *error) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + *e = fmt.Errorf("autorest: Faux Prepare Error") + return r, *e + }) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,183 @@ +package azure + +import ( + "net/http" + "net/url" + "strconv" + "time" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" +) + +const ( + defaultRefresh = 5 * time.Minute + oauthURL = "https://login.microsoftonline.com/{tenantID}/oauth2/{requestType}?api-version=1.0" + tokenBaseDate = "1970-01-01T00:00:00Z" + + // AzureResourceManagerScope is the OAuth scope for the Azure Resource Manager. + AzureResourceManagerScope = "https://management.azure.com/" +) + +var expirationBase time.Time + +func init() { + expirationBase, _ = time.Parse(time.RFC3339, tokenBaseDate) +} + +// Token encapsulates the access token used to authorize Azure requests. +type Token struct { + AccessToken string `json:"access_token"` + + ExpiresIn string `json:"expires_in"` + ExpiresOn string `json:"expires_on"` + NotBefore string `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := strconv.Atoi(t.ExpiresOn) + if err != nil { + s = -3600 + } + return expirationBase.Add(time.Duration(s) * time.Second).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the AccessToken of the Token. +func (t *Token) WithAuthorization() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + return (autorest.WithBearerAuthorization(t.AccessToken)(p)).Prepare(r) + }) + } +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + Token + + clientID string + clientSecret string + resource string + tenantID string + autoRefresh bool + refreshWithin time.Duration + sender autorest.Sender +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(id string, secret string, tenantID string, resource string) (*ServicePrincipalToken, error) { + spt := &ServicePrincipalToken{ + clientID: id, + clientSecret: secret, + resource: resource, + tenantID: tenantID, + autoRefresh: true, + refreshWithin: defaultRefresh, + sender: &http.Client{}} + return spt, nil +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin). +func (spt *ServicePrincipalToken) EnsureFresh() error { + if spt.WillExpireIn(spt.refreshWithin) { + return spt.Refresh() + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +func (spt *ServicePrincipalToken) Refresh() error { + p := map[string]interface{}{ + "tenantID": spt.tenantID, + "requestType": "token", + } + + v := url.Values{} + v.Set("client_id", spt.clientID) + v.Set("client_secret", spt.clientSecret) + v.Set("grant_type", "client_credentials") + v.Set("resource", spt.resource) + + req, _ := autorest.Prepare(&http.Request{}, + autorest.AsPost(), + autorest.AsFormURLEncoded(), + autorest.WithBaseURL(oauthURL), + autorest.WithPathParameters(p), + autorest.WithFormData(v)) + + resp, err := autorest.SendWithSender(spt.sender, req) + if err != nil { + return autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "Refresh", "Failure sending request for Service Principal %s", + spt.clientID) + } + + err = autorest.Respond(resp, + autorest.WithErrorUnlessOK(), + autorest.ByUnmarshallingJSON(spt), + autorest.ByClosing()) + if err != nil { + return autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "Refresh", "Failure handling response to Service Principal %s request", + spt.clientID) + } + + return nil +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.autoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.refreshWithin = d + return +} + +// SetSender sets the autorest.Sender used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s autorest.Sender) { + spt.sender = s +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the AccessToken of the ServicePrincipalToken. +// +// By default, the token will automatically refresh if nearly expired (as determined by the +// RefreshWithin interval). Use the AutoRefresh method to enable or disable automatically refreshing +// tokens. +func (spt *ServicePrincipalToken) WithAuthorization() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + if spt.autoRefresh { + err := spt.EnsureFresh() + if err != nil { + return r, autorest.NewErrorWithError(err, + "azure.ServicePrincipalToken", "WithAuthorization", "Failed to refresh Service Principal Token for request to %s", + r.URL) + } + } + return (autorest.WithBearerAuthorization(spt.AccessToken)(p)).Prepare(r) + }) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token_test.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure/token_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,384 @@ +package azure + +import ( + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + defaultFormData = "client_id=id&client_secret=secret&grant_type=client_credentials&resource=resource" +) + +func TestTokenExpires(t *testing.T) { + tt := time.Now().Add(5 * time.Second) + tk := newTokenExpiresAt(tt) + + if tk.Expires().Equal(tt) { + t.Errorf("azure: Token#Expires miscalculated expiration time -- received %v, expected %v", tk.Expires(), tt) + } +} + +func TestTokenIsExpired(t *testing.T) { + tk := newTokenExpiresAt(time.Now().Add(-5 * time.Second)) + + if !tk.IsExpired() { + t.Errorf("azure: Token#IsExpired failed to mark a stale token as expired -- now %v, token expires at %v", + time.Now().UTC(), tk.Expires()) + } +} + +func TestTokenIsExpiredUninitialized(t *testing.T) { + tk := &Token{} + + if !tk.IsExpired() { + t.Errorf("azure: An uninitialized Token failed to mark itself as expired (expiration time %v)", tk.Expires()) + } +} + +func TestTokenIsNoExpired(t *testing.T) { + tk := newTokenExpiresAt(time.Now().Add(1000 * time.Second)) + + if tk.IsExpired() { + t.Errorf("azure: Token marked a fresh token as expired -- now %v, token expires at %v", time.Now().UTC(), tk.Expires()) + } +} + +func TestTokenWillExpireIn(t *testing.T) { + d := 5 * time.Second + tk := newTokenExpiresIn(d) + + if !tk.WillExpireIn(d) { + t.Error("azure: Token#WillExpireIn mismeasured expiration time") + } +} + +func TestTokenWithAuthorization(t *testing.T) { + tk := newToken() + + req, err := autorest.Prepare(&http.Request{}, tk.WithAuthorization()) + if err != nil { + t.Errorf("azure: Token#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", tk.AccessToken) { + t.Error("azure: Token#WithAuthorization failed to set Authorization header") + } +} + +func TestServicePrincipalTokenSetAutoRefresh(t *testing.T) { + spt := newServicePrincipalToken() + + if !spt.autoRefresh { + t.Error("azure: ServicePrincipalToken did not default to automatic token refreshing") + } + + spt.SetAutoRefresh(false) + if spt.autoRefresh { + t.Error("azure: ServicePrincipalToken#SetAutoRefresh did not disable automatic token refreshing") + } +} + +func TestServicePrincipalTokenSetRefreshWithin(t *testing.T) { + spt := newServicePrincipalToken() + + if spt.refreshWithin != defaultRefresh { + t.Error("azure: ServicePrincipalToken did not correctly set the default refresh interval") + } + + spt.SetRefreshWithin(2 * defaultRefresh) + if spt.refreshWithin != 2*defaultRefresh { + t.Error("azure: ServicePrincipalToken#SetRefreshWithin did not set the refresh interval") + } +} + +func TestServicePrincipalTokenSetSender(t *testing.T) { + spt := newServicePrincipalToken() + + var s autorest.Sender + s = mocks.NewSender() + spt.SetSender(s) + if !reflect.DeepEqual(s, spt.sender) { + t.Error("azure: ServicePrincipalToken#SetSender did not set the sender") + } +} + +func TestServicePrincipalTokenRefreshUsesPOST(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.Method != "POST" { + t.Errorf("azure: ServicePrincipalToken#Refresh did not correctly set HTTP method -- expected %v, received %v", "POST", r.Method) + } + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + spt.Refresh() +} + +func TestServicePrincipalTokenRefreshSetsMimeType(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.Header.Get(http.CanonicalHeaderKey("Content-Type")) != "application/x-www-form-urlencoded" { + t.Errorf("azure: ServicePrincipalToken#Refresh did not correctly set Content-Type -- expected %v, received %v", + "application/x-form-urlencoded", + r.Header.Get(http.CanonicalHeaderKey("Content-Type"))) + } + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + spt.Refresh() +} + +func TestServicePrincipalTokenRefreshSetsURL(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + u := fmt.Sprintf("https://login.microsoftonline.com/%s/oauth2/token?api-version=1.0", spt.tenantID) + if r.URL.String() != u { + t.Errorf("azure: ServicePrincipalToken#Refresh did not correctly set the URL -- expected %v, received %v", + u, r.URL) + } + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + spt.Refresh() +} + +func TestServicePrincipalTokenRefreshSetsBody(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("azure: Failed to read body of Service Principal token request (%v)", err) + } else if string(b) != defaultFormData { + t.Errorf("azure: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v", + defaultFormData, string(b)) + } + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + spt.Refresh() +} + +func TestServicePrincipalTokenRefreshClosesRequestBody(t *testing.T) { + spt := newServicePrincipalToken() + + resp := mocks.NewResponse() + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + return resp, nil + }) + } + })()) + spt.SetSender(s) + spt.Refresh() + + if resp.Body.(*mocks.Body).IsOpen() { + t.Error("azure: ServicePrincipalToken#Refresh failed to close the HTTP Response Body") + } +} + +func TestServicePrincipalTokenRefreshPropagatesErrors(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + c.EmitErrors(1) + spt.SetSender(c) + + err := spt.Refresh() + if err == nil { + t.Error("azure: Failed to propagate the request error") + } +} + +func TestServicePrincipalTokenRefreshReturnsErrorIfNotOk(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + c.EmitStatus("401 NotAuthorized", 401) + spt.SetSender(c) + + err := spt.Refresh() + if err == nil { + t.Error("azure: Failed to return an when receiving a status code other than HTTP 200") + } +} + +func TestServicePrincipalTokenRefreshUnmarshals(t *testing.T) { + spt := newServicePrincipalToken() + + expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(expirationBase).Seconds())) + j := newTokenJSON(expiresOn, "resource") + resp := mocks.NewResponseWithContent(j) + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + return resp, nil + }) + } + })()) + spt.SetSender(s) + + err := spt.Refresh() + if err != nil { + t.Errorf("azure: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } else if spt.AccessToken != "accessToken" || + spt.ExpiresIn != "3600" || + spt.ExpiresOn != expiresOn || + spt.NotBefore != expiresOn || + spt.Resource != "resource" || + spt.Type != "Bearer" { + t.Errorf("azure: ServicePrincipalToken#Refresh failed correctly unmarshal the JSON -- expected %v, received %v", + j, *spt) + } +} + +func TestServicePrincipalTokenEnsureFreshRefreshes(t *testing.T) { + spt := newServicePrincipalToken() + expireToken(&spt.Token) + + f := false + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + spt.EnsureFresh() + if !f { + t.Error("azure: ServicePrincipalToken#EnsureFresh failed to call Refresh for stale token") + } +} + +func TestServicePrincipalTokenEnsureFreshSkipsIfFresh(t *testing.T) { + spt := newServicePrincipalToken() + setTokenToExpireIn(&spt.Token, 1000*time.Second) + + f := false + c := mocks.NewSender() + s := autorest.DecorateSender(c, + (func() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + spt.EnsureFresh() + if f { + t.Error("azure: ServicePrincipalToken#EnsureFresh invoked Refresh for fresh token") + } +} + +func TestServicePrincipalTokenWithAuthorization(t *testing.T) { + spt := newServicePrincipalToken() + setTokenToExpireIn(&spt.Token, 1000*time.Second) + + req, err := autorest.Prepare(&http.Request{}, spt.WithAuthorization()) + if err != nil { + t.Errorf("azure: ServicePrincipalToken#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", spt.AccessToken) { + t.Error("azure: ServicePrincipalToken#WithAuthorization failed to set Authorization header") + } +} + +func TestServicePrincipalTokenWithAuthorizationReturnsErrorIfCannotRefresh(t *testing.T) { + spt := newServicePrincipalToken() + + _, err := autorest.Prepare(&http.Request{}, spt.WithAuthorization()) + if err == nil { + t.Error("azure: ServicePrincipalToken#WithAuthorization failed to return an error when refresh fails") + } +} + +func newToken() *Token { + return &Token{ + AccessToken: "ASECRETVALUE", + Resource: "https://azure.microsoft.com/", + Type: "Bearer", + } +} + +func newTokenJSON(expiresOn string, resource string) string { + return fmt.Sprintf(`{ + "access_token" : "accessToken", + "expires_in" : "3600", + "expires_on" : "%s", + "not_before" : "%s", + "resource" : "%s", + "token_type" : "Bearer" + }`, + expiresOn, expiresOn, resource) +} + +func newTokenExpiresIn(expireIn time.Duration) *Token { + return setTokenToExpireIn(newToken(), expireIn) +} + +func newTokenExpiresAt(expireAt time.Time) *Token { + return setTokenToExpireAt(newToken(), expireAt) +} + +func expireToken(t *Token) *Token { + return setTokenToExpireIn(t, 0) +} + +func setTokenToExpireAt(t *Token, expireAt time.Time) *Token { + t.ExpiresIn = "3600" + t.ExpiresOn = strconv.Itoa(int(expireAt.Sub(expirationBase).Seconds())) + t.NotBefore = t.ExpiresOn + return t +} + +func setTokenToExpireIn(t *Token, expireIn time.Duration) *Token { + return setTokenToExpireAt(t, time.Now().Add(expireIn)) +} + +func newServicePrincipalToken() *ServicePrincipalToken { + spt, _ := NewServicePrincipalToken("id", "secret", "tenentId", "resource") + return spt +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,288 @@ +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "log" + "net/http" + "time" +) + +const ( + // DefaultPollingDelay is the default delay between polling requests (only used if the + // http.Request lacks a well-formed Retry-After header). + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is the default total polling duration. + DefaultPollingDuration = 15 * time.Minute +) + +// PollingMode sets how, if at all, clients composed with Client will poll. +type PollingMode string + +const ( + // PollUntilAttempts polling mode polls until reaching a maximum number of attempts. + PollUntilAttempts PollingMode = "poll-until-attempts" + + // PollUntilDuration polling mode polls until a specified time.Duration has passed. + PollUntilDuration PollingMode = "poll-until-duration" + + // DoNotPoll disables polling. + DoNotPoll PollingMode = "not-at-all" +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + r.Write(&b) + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + + defer resp.Body.Close() + + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + resp.Write(&b) + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +var ( + // DefaultClient is the base from which generated clients should create a Client instance. Users + // can then established widely used Client defaults by replacing or modifying the DefaultClient + // before instantiating a generated client. + DefaultClient = Client{PollingMode: PollUntilDuration, PollingDuration: DefaultPollingDuration} +) + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. Lastly, it supports basic request polling, +// limited to a maximum number of attempts or a specified duration. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + PollingMode PollingMode + PollingAttempts int + PollingDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string +} + +// NewClientWithUserAgent returns an instance of the DefaultClient with the UserAgent set to the +// passed string. +func NewClientWithUserAgent(ua string) Client { + c := DefaultClient + c.UserAgent = ua + return c +} + +// IsPollingAllowed returns an error if the client allows polling and the passed http.Response +// requires it, otherwise it returns nil. +func (c Client) IsPollingAllowed(resp *http.Response, codes ...int) error { + if c.DoNotPoll() && ResponseRequiresPolling(resp, codes...) { + return NewError("autorest/Client", "IsPollingAllowed", "Response to %s requires polling but polling is disabled", + resp.Request.URL) + } + return nil +} + +// PollAsNeeded is a convenience method that will poll if the passed http.Response requires it. +func (c Client) PollAsNeeded(resp *http.Response, codes ...int) (*http.Response, error) { + if !ResponseRequiresPolling(resp, codes...) { + return resp, nil + } + + if c.DoNotPoll() { + return resp, NewError("autorest/Client", "PollAsNeeded", "Polling for %s is required, but polling is disabled", + resp.Request.URL) + } + + req, err := NewPollingRequest(resp, c) + if err != nil { + return resp, NewErrorWithError(err, "autorest/Client", "PollAsNeeded", "Unable to create polling request for response to %s", + resp.Request.URL) + } + + Prepare(req, + c.WithInspection()) + + if c.PollForAttempts() { + return PollForAttempts(c, req, DefaultPollingDelay, c.PollingAttempts, codes...) + } + return PollForDuration(c, req, DefaultPollingDelay, c.PollingDuration, codes...) +} + +// DoNotPoll returns true if the client should not poll, false otherwise. +func (c Client) DoNotPoll() bool { + return len(c.PollingMode) == 0 || c.PollingMode == DoNotPoll +} + +// PollForAttempts returns true if the PollingMode is set to ForAttempts, false otherwise. +func (c Client) PollForAttempts() bool { + return c.PollingMode == PollUntilAttempts +} + +// PollForDuration return true if the PollingMode is set to ForDuration, false otherwise. +func (c Client) PollForDuration() bool { + return c.PollingMode == PollUntilDuration +} + +// Send sends the passed http.Request after applying authorization. It will poll if the client +// allows polling and the http.Response status code requires it. It will close the http.Response +// Body if the request returns an error. +func (c Client) Send(req *http.Request, codes ...int) (*http.Response, error) { + if len(codes) == 0 { + codes = []int{http.StatusOK} + } + + req, err := Prepare(req, + c.WithAuthorization(), + c.WithInspection()) + if err != nil { + return nil, NewErrorWithError(err, "autorest/Client", "Send", "Preparing request failed") + } + + resp, err := SendWithSender(c, req, + DoErrorUnlessStatusCode(codes...)) + if err == nil { + err = c.IsPollingAllowed(resp) + if err == nil { + resp, err = c.PollAsNeeded(resp) + } + } + + if err != nil { + Respond(resp, + ByClosing()) + } + + return resp, err +} + +// Do implements the Sender interface by invoking the active Sender. If Sender is not set, it uses +// a new instance of http.Client. In both cases it will, if UserAgent is set, apply set the +// User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if len(c.UserAgent) > 0 { + r, _ = Prepare(r, WithUserAgent(c.UserAgent)) + } + return c.sender().Do(r) +} + +// sender returns the Sender to which to send requests. +func (c Client) sender() Sender { + if c.Sender == nil { + return &http.Client{} + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// GetPollingDelay extracts the polling delay from the Retry-After header of the response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func (r Response) GetPollingDelay(defaultDelay time.Duration) time.Duration { + return GetPollingDelay(r.Response, defaultDelay) +} + +// GetPollingLocation retrieves the polling URL from the Location header of the response. +func (r Response) GetPollingLocation() string { + return GetPollingLocation(r.Response) +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client_test.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/client_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,621 @@ +package autorest + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "net/http" + "reflect" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" +) + +func TestLoggingInspectorWithInspection(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.RequestInspector = li.WithInspection() + + Prepare(mocks.NewRequestWithContent("Content"), + c.WithInspection()) + + if len(b.String()) <= 0 { + t.Error("autorest: LoggingInspector#WithInspection did not record Request to the log") + } +} + +func TestLoggingInspectorWithInspectionEmitsErrors(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewRequestWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.RequestInspector = li.WithInspection() + + r.Body.Close() + Prepare(r, + c.WithInspection()) + + if len(b.String()) <= 0 { + t.Error("autorest: LoggingInspector#WithInspection did not record Request to the log") + } +} + +func TestLoggingInspectorWithInspectionRestoresBody(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewRequestWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.RequestInspector = li.WithInspection() + + Prepare(r, + c.WithInspection()) + + s, _ := ioutil.ReadAll(r.Body) + if len(s) <= 0 { + t.Error("autorest: LoggingInspector#WithInspection did not restore the Request body") + } +} + +func TestLoggingInspectorByInspecting(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.ResponseInspector = li.ByInspecting() + + Respond(mocks.NewResponseWithContent("Content"), + c.ByInspecting()) + + if len(b.String()) <= 0 { + t.Error("autorest: LoggingInspector#ByInspection did not record Response to the log") + } +} + +func TestLoggingInspectorByInspectingEmitsErrors(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewResponseWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.ResponseInspector = li.ByInspecting() + + r.Body.Close() + Respond(r, + c.ByInspecting()) + + if len(b.String()) <= 0 { + t.Error("autorest: LoggingInspector#ByInspection did not record Response to the log") + } +} + +func TestLoggingInspectorByInspectingRestoresBody(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewResponseWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.ResponseInspector = li.ByInspecting() + + Respond(r, + c.ByInspecting()) + + s, _ := ioutil.ReadAll(r.Body) + if len(s) <= 0 { + t.Error("autorest: LoggingInspector#ByInspecting did not restore the Response body") + } +} + +func TestNewClientWithUserAgent(t *testing.T) { + ua := "UserAgent" + c := NewClientWithUserAgent(ua) + + if c.UserAgent != ua { + t.Errorf("autorest: NewClientWithUserAgent failed to set the UserAgent -- expected %s, received %s", + ua, c.UserAgent) + } +} + +func TestClientIsPollingAllowed(t *testing.T) { + c := Client{PollingMode: PollUntilAttempts} + r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + + err := c.IsPollingAllowed(r) + if err != nil { + t.Errorf("autorest: Client#IsPollingAllowed returned an error for an http.Response that requires polling (%v)", err) + } +} + +func TestClientIsPollingAllowedIgnoresOk(t *testing.T) { + c := Client{PollingMode: PollUntilAttempts} + r := mocks.NewResponse() + + err := c.IsPollingAllowed(r) + if err != nil { + t.Error("autorest: Client#IsPollingAllowed returned an error for an http.Response that does not require polling") + } +} + +func TestClientIsPollingAllowedIgnoresDisabledForIgnoredStatusCode(t *testing.T) { + c := Client{PollingMode: PollUntilAttempts} + r := mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest) + + err := c.IsPollingAllowed(r) + if err != nil { + t.Errorf("autorest: Client#IsPollingAllowed returned an error for an http.Response that requires polling (%v)", err) + } +} + +func TestClientIsPollingAllowedIgnoredPollingMode(t *testing.T) { + c := Client{PollingMode: DoNotPoll} + r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + + err := c.IsPollingAllowed(r) + if err == nil { + t.Error("autorest: Client#IsPollingAllowed failed to return an error when polling is disabled") + } +} + +func TestClientPollAsNeededIgnoresOk(t *testing.T) { + c := Client{} + s := mocks.NewSender() + c.Sender = s + r := mocks.NewResponse() + + resp, err := c.PollAsNeeded(r) + if err != nil { + t.Errorf("autorest: Client#PollAsNeeded failed when given a successful HTTP request (%v)", err) + } + if s.Attempts() > 0 { + t.Error("autorest: Client#PollAsNeeded attempted to poll a successful HTTP request") + } + + Respond(resp, + ByClosing()) +} + +func TestClientPollAsNeededLeavesBodyOpen(t *testing.T) { + c := Client{} + c.Sender = mocks.NewSender() + r := mocks.NewResponse() + + resp, err := c.PollAsNeeded(r) + if err != nil { + t.Errorf("autorest: Client#PollAsNeeded failed when given a successful HTTP request (%v)", err) + } + if !resp.Body.(*mocks.Body).IsOpen() { + t.Error("autorest: Client#PollAsNeeded unexpectedly closed the response body") + } + + Respond(resp, + ByClosing()) +} + +func TestClientPollAsNeededReturnsErrorWhenPollingDisabled(t *testing.T) { + c := Client{} + c.Sender = mocks.NewSender() + c.PollingMode = DoNotPoll + + r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(r) + + _, err := c.PollAsNeeded(r) + if err == nil { + t.Error("autorest: Client#PollAsNeeded failed to return an error when polling was disabled but required") + } + + Respond(r, + ByClosing()) +} + +func TestClientPollAsNeededAllowsInspectionOfRequest(t *testing.T) { + c := Client{} + c.Sender = mocks.NewSender() + c.PollingMode = PollUntilAttempts + c.PollingAttempts = 1 + + mi := &mockInspector{} + c.RequestInspector = mi.WithInspection() + + r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(r) + + c.PollAsNeeded(r) + if !mi.wasInvoked { + t.Error("autorest: Client#PollAsNeeded failed to allow inspection of polling request") + } + + Respond(r, + ByClosing()) +} + +func TestClientPollAsNeededReturnsErrorIfUnableToCreateRequest(t *testing.T) { + c := Client{} + c.Authorizer = mockFailingAuthorizer{} + c.Sender = mocks.NewSender() + c.PollingMode = PollUntilAttempts + c.PollingAttempts = 1 + + r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(r) + + _, err := c.PollAsNeeded(r) + if err == nil { + t.Error("autorest: Client#PollAsNeeded failed to return error when unable to create request") + } + + Respond(r, + ByClosing()) +} + +func TestClientPollAsNeededPollsForAttempts(t *testing.T) { + c := Client{} + c.PollingMode = PollUntilAttempts + c.PollingAttempts = 5 + + s := mocks.NewSender() + s.EmitStatus("202 Accepted", http.StatusAccepted) + c.Sender = s + + r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(r) + s.SetResponse(r) + + resp, _ := c.PollAsNeeded(r) + if s.Attempts() != 5 { + t.Errorf("autorest: Client#PollAsNeeded did not poll the expected number of attempts -- expected %v, actual %v", + c.PollingAttempts, s.Attempts()) + } + + Respond(resp, + ByClosing()) +} + +func TestClientPollAsNeededPollsForDuration(t *testing.T) { + c := Client{} + c.PollingMode = PollUntilDuration + c.PollingDuration = 10 * time.Millisecond + + s := mocks.NewSender() + s.EmitStatus("202 Accepted", http.StatusAccepted) + c.Sender = s + + r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(r) + s.SetResponse(r) + + d1 := 10 * time.Millisecond + start := time.Now() + resp, _ := c.PollAsNeeded(r) + d2 := time.Now().Sub(start) + if d2 < d1 { + t.Errorf("autorest: Client#PollAsNeeded did not poll for the expected duration -- expected %v, actual %v", + d1.Seconds(), d2.Seconds()) + } + + Respond(resp, + ByClosing()) +} + +func TestClientDoNotPoll(t *testing.T) { + c := Client{} + + if !c.DoNotPoll() { + t.Errorf("autorest: Client requested polling by default, expected no polling (%v)", c.PollingMode) + } +} + +func TestClientDoNotPollForAttempts(t *testing.T) { + c := Client{} + c.PollingMode = PollUntilAttempts + + if c.DoNotPoll() { + t.Errorf("autorest: Client failed to request polling after polling mode set to %s", c.PollingMode) + } +} + +func TestClientDoNotPollForDuration(t *testing.T) { + c := Client{} + c.PollingMode = PollUntilDuration + + if c.DoNotPoll() { + t.Errorf("autorest: Client failed to request polling after polling mode set to %s", c.PollingMode) + } +} + +func TestClientPollForAttempts(t *testing.T) { + c := Client{} + c.PollingMode = PollUntilAttempts + + if !c.PollForAttempts() { + t.Errorf("autorest: Client#SetPollingMode failed to set polling by attempts -- polling mode set to %s", c.PollingMode) + } +} + +func TestClientPollForDuration(t *testing.T) { + c := Client{} + c.PollingMode = PollUntilDuration + + if !c.PollForDuration() { + t.Errorf("autorest: Client#SetPollingMode failed to set polling for duration -- polling mode set to %s", c.PollingMode) + } +} + +func TestClientSenderReturnsHttpClientByDefault(t *testing.T) { + c := Client{} + + if fmt.Sprintf("%T", c.sender()) != "*http.Client" { + t.Error("autorest: Client#sender failed to return http.Client by default") + } +} + +func TestClientSendSetsAuthorization(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + c := Client{Authorizer: mockAuthorizer{}, Sender: s} + + c.Send(r) + if len(r.Header.Get(http.CanonicalHeaderKey(headerAuthorization))) <= 0 { + t.Errorf("autorest: Client#Send failed to set Authorization header -- %s=%s", + http.CanonicalHeaderKey(headerAuthorization), + r.Header.Get(http.CanonicalHeaderKey(headerAuthorization))) + } +} + +func TestClientSendInvokesInspector(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + i := &mockInspector{} + c := Client{RequestInspector: i.WithInspection(), Sender: s} + + c.Send(r) + if !i.wasInvoked { + t.Error("autorest: Client#Send failed to invoke the RequestInspector") + } +} + +func TestClientSendReturnsPrepareError(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + c := Client{Authorizer: mockFailingAuthorizer{}, Sender: s} + + _, err := c.Send(r) + if err == nil { + t.Error("autorest: Client#Send failed to return an error the Prepare error") + } +} + +func TestClientSendSends(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + c := Client{Sender: s} + + c.Send(r) + if s.Attempts() <= 0 { + t.Error("autorest: Client#Send failed to invoke the Sender") + } +} + +func TestClientSendDefaultsToUsingStatusCodeOK(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + c := Client{Authorizer: mockAuthorizer{}, Sender: s} + + _, err := c.Send(r) + if err != nil { + t.Errorf("autorest: Client#Send returned an error for Status Code OK -- %v", + err) + } +} + +func TestClientSendClosesReponseBodyWhenReturningError(t *testing.T) { + s := mocks.NewSender() + r := mocks.NewResponseWithStatus("500 InternalServerError", http.StatusInternalServerError) + s.SetResponse(r) + c := Client{Sender: s} + + c.Send(mocks.NewRequest()) + if r.Body.(*mocks.Body).IsOpen() { + t.Error("autorest: Client#Send failed to close the response body when returning an error") + } +} + +func TestClientSendReturnsErrorWithUnexpectedStatusCode(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + s.EmitStatus("500 InternalServerError", http.StatusInternalServerError) + c := Client{Sender: s} + + _, err := c.Send(r) + if err == nil { + t.Error("autorest: Client#Send failed to return an error for an unexpected Status Code") + } +} + +func TestClientSendDoesNotReturnErrorForExpectedStatusCode(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + s.EmitStatus("500 InternalServerError", http.StatusInternalServerError) + c := Client{Sender: s} + + _, err := c.Send(r, http.StatusInternalServerError) + if err != nil { + t.Errorf("autorest: Client#Send returned an error for an expected Status Code -- %v", + err) + } +} + +func TestClientSendPollsIfNeeded(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + s.SetPollAttempts(5) + c := Client{Sender: s, PollingMode: PollUntilAttempts, PollingAttempts: 10} + + c.Send(r, http.StatusOK, http.StatusAccepted) + if s.Attempts() != (5 + 1) { + t.Errorf("autorest: Client#Send failed to poll the expected number of times -- attempts %d", + s.Attempts()) + } +} + +func TestClientSendDoesNotPollIfUnnecessary(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + c := Client{Sender: s, PollingMode: PollUntilAttempts, PollingAttempts: 10} + + c.Send(r, http.StatusOK, http.StatusAccepted) + if s.Attempts() != 1 { + t.Errorf("autorest: Client#Send unexpectedly polled -- attempts %d", + s.Attempts()) + } +} + +func TestClientSenderReturnsSetSender(t *testing.T) { + c := Client{} + + s := mocks.NewSender() + c.Sender = s + + if c.sender() != s { + t.Error("autorest: Client#sender failed to return set Sender") + } +} + +func TestClientDoInvokesSender(t *testing.T) { + c := Client{} + + s := mocks.NewSender() + c.Sender = s + + c.Do(&http.Request{}) + if s.Attempts() != 1 { + t.Error("autorest: Client#Do failed to invoke the Sender") + } +} + +func TestClientDoSetsUserAgent(t *testing.T) { + c := Client{UserAgent: "UserAgent"} + r := mocks.NewRequest() + + c.Do(r) + + if r.Header.Get(http.CanonicalHeaderKey(headerUserAgent)) != "UserAgent" { + t.Errorf("autorest: Client#Do failed to correctly set User-Agent header: %s=%s", + http.CanonicalHeaderKey(headerUserAgent), + r.Header.Get(http.CanonicalHeaderKey(headerUserAgent))) + } +} + +func TestClientAuthorizerReturnsNullAuthorizerByDefault(t *testing.T) { + c := Client{} + + if fmt.Sprintf("%T", c.authorizer()) != "autorest.NullAuthorizer" { + t.Error("autorest: Client#authorizer failed to return the NullAuthorizer by default") + } +} + +func TestClientAuthorizerReturnsSetAuthorizer(t *testing.T) { + c := Client{} + c.Authorizer = mockAuthorizer{} + + if fmt.Sprintf("%T", c.authorizer()) != "autorest.mockAuthorizer" { + t.Error("autorest: Client#authorizer failed to return the set Authorizer") + } +} + +func TestClientWithAuthorizer(t *testing.T) { + c := Client{} + c.Authorizer = mockAuthorizer{} + + req, _ := Prepare(&http.Request{}, + c.WithAuthorization()) + + if req.Header.Get(headerAuthorization) == "" { + t.Error("autorest: Client#WithAuthorizer failed to return the WithAuthorizer from the active Authorizer") + } +} + +func TestClientWithInspection(t *testing.T) { + c := Client{} + r := &mockInspector{} + c.RequestInspector = r.WithInspection() + + Prepare(&http.Request{}, + c.WithInspection()) + + if !r.wasInvoked { + t.Error("autorest: Client#WithInspection failed to invoke RequestInspector") + } +} + +func TestClientWithInspectionSetsDefault(t *testing.T) { + c := Client{} + + r1 := &http.Request{} + r2, _ := Prepare(r1, + c.WithInspection()) + + if !reflect.DeepEqual(r1, r2) { + t.Error("autorest: Client#WithInspection failed to provide a default RequestInspector") + } +} + +func TestClientByInspecting(t *testing.T) { + c := Client{} + r := &mockInspector{} + c.ResponseInspector = r.ByInspecting() + + Respond(&http.Response{}, + c.ByInspecting()) + + if !r.wasInvoked { + t.Error("autorest: Client#ByInspecting failed to invoke ResponseInspector") + } +} + +func TestClientByInspectingSetsDefault(t *testing.T) { + c := Client{} + + r := &http.Response{} + Respond(r, + c.ByInspecting()) + + if !reflect.DeepEqual(r, &http.Response{}) { + t.Error("autorest: Client#ByInspecting failed to provide a default ResponseInspector") + } +} + +func TestResponseGetPollingDelay(t *testing.T) { + d1 := 10 * time.Second + + r := mocks.NewResponse() + mocks.SetRetryHeader(r, d1) + ar := Response{Response: r} + + d2 := ar.GetPollingDelay(time.Duration(0)) + if d1 != d2 { + t.Errorf("autorest: Response#GetPollingDelay failed to return the correct delay -- expected %v, received %v", + d1, d2) + } +} + +func TestResponseGetPollingDelayReturnsDefault(t *testing.T) { + ar := Response{Response: mocks.NewResponse()} + + d1 := 10 * time.Second + d2 := ar.GetPollingDelay(d1) + if d1 != d2 { + t.Errorf("autorest: Response#GetPollingDelay failed to return the default delay -- expected %v, received %v", + d1, d2) + } +} + +func TestResponseGetPollingLocation(t *testing.T) { + r := mocks.NewResponse() + mocks.SetLocationHeader(r, mocks.TestURL) + ar := Response{Response: r} + + if ar.GetPollingLocation() != mocks.TestURL { + t.Errorf("autorest: Response#GetPollingLocation failed to return correct URL -- expected %v, received %v", + mocks.TestURL, ar.GetPollingLocation()) + } +} === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date' === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,87 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +import ( + "fmt" + "time" +) + +const ( + rfc3339FullDate = "2006-01-02" + dateFormat = "%4d-%02d-%02d" + jsonFormat = `"%4d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + d = Date{} + d.Time, err = time.Parse(rfc3339FullDate, date) + return d, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + if data[0] == '"' { + data = data[1 : len(data)-1] + } + d.Time, err = time.Parse(rfc3339FullDate, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(rfc3339FullDate, string(data)) + if err != nil { + return err + } + return nil +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date_test.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/date_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,201 @@ +package date + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" +) + +func ExampleParseDate() { + d, _ := ParseDate("2001-02-03") + fmt.Println(d) + // Output: 2001-02-03 +} + +func ExampleDate() { + d, _ := ParseDate("2001-02-03") + t, _ := time.Parse(time.RFC3339, "2001-02-04T00:00:00Z") + + // Date acts as time.Time when the receiver + if d.Before(t) { + fmt.Printf("Before ") + } else { + fmt.Printf("After ") + } + + // Convert Date when needing a time.Time + if t.After(d.ToTime()) { + fmt.Printf("After") + } else { + fmt.Printf("Before") + } + // Output: Before After +} + +func ExampleDate_MarshalBinary() { + d, _ := ParseDate("2001-02-03") + t, _ := d.MarshalBinary() + fmt.Println(string(t)) + // Output: 2001-02-03 +} + +func ExampleDate_UnmarshalBinary() { + d := Date{} + t := "2001-02-03" + + err := d.UnmarshalBinary([]byte(t)) + if err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03 +} + +func ExampleDate_MarshalJSON() { + d, _ := ParseDate("2001-02-03") + j, _ := json.Marshal(d) + fmt.Println(string(j)) + // Output: "2001-02-03" +} + +func ExampleDate_UnmarshalJSON() { + var d struct { + Date Date `json:"date"` + } + j := `{ + "date" : "2001-02-03" + }` + + err := json.Unmarshal([]byte(j), &d) + if err != nil { + fmt.Println(err) + } + fmt.Println(d.Date) + // Output: 2001-02-03 +} + +func ExampleDate_MarshalText() { + d, _ := ParseDate("2001-02-03") + t, _ := d.MarshalText() + fmt.Println(string(t)) + // Output: 2001-02-03 +} + +func ExampleDate_UnmarshalText() { + d := Date{} + t := "2001-02-03" + + err := d.UnmarshalText([]byte(t)) + if err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03 +} + +func TestDateString(t *testing.T) { + d, _ := ParseDate("2001-02-03") + if d.String() != "2001-02-03" { + t.Errorf("date: String failed (%v)", d.String()) + } +} + +func TestDateBinaryRoundTrip(t *testing.T) { + d1, err := ParseDate("2001-02-03") + t1, err := d1.MarshalBinary() + if err != nil { + t.Errorf("date: MarshalBinary failed (%v)", err) + } + + d2 := Date{} + err = d2.UnmarshalBinary(t1) + if err != nil { + t.Errorf("date: UnmarshalBinary failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Errorf("date: Round-trip Binary failed (%v, %v)", d1, d2) + } +} + +func TestDateJSONRoundTrip(t *testing.T) { + type s struct { + Date Date `json:"date"` + } + var err error + d1 := s{} + d1.Date, err = ParseDate("2001-02-03") + j, err := json.Marshal(d1) + if err != nil { + t.Errorf("date: MarshalJSON failed (%v)", err) + } + + d2 := s{} + err = json.Unmarshal(j, &d2) + if err != nil { + t.Errorf("date: UnmarshalJSON failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Errorf("date: Round-trip JSON failed (%v, %v)", d1, d2) + } +} + +func TestDateTextRoundTrip(t *testing.T) { + d1, err := ParseDate("2001-02-03") + t1, err := d1.MarshalText() + if err != nil { + t.Errorf("date: MarshalText failed (%v)", err) + } + + d2 := Date{} + err = d2.UnmarshalText(t1) + if err != nil { + t.Errorf("date: UnmarshalText failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Errorf("date: Round-trip Text failed (%v, %v)", d1, d2) + } +} + +func TestDateToTime(t *testing.T) { + var d Date + d, err := ParseDate("2001-02-03") + if err != nil { + t.Errorf("date: ParseDate failed (%v)", err) + } + var v interface{} = d.ToTime() + switch v.(type) { + case time.Time: + return + default: + t.Errorf("date: ToTime failed to return a time.Time") + } +} + +func TestDateUnmarshalJSONReturnsError(t *testing.T) { + var d struct { + Date Date `json:"date"` + } + j := `{ + "date" : "February 3, 2001" + }` + + err := json.Unmarshal([]byte(j), &d) + if err == nil { + t.Error("date: Date failed to return error for malformed JSON date") + } +} + +func TestDateUnmarshalTextReturnsError(t *testing.T) { + d := Date{} + txt := "February 3, 2001" + + err := d.UnmarshalText([]byte(txt)) + if err == nil { + t.Error("date: Date failed to return error for malformed Text date") + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,70 @@ +package date + +import ( + "time" +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// ParseTime creates a new Time from the passed string. +func ParseTime(date string) (d Time, err error) { + d = Time{} + d.Time, err = time.Parse(time.RFC3339, date) + return d, err +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (d Time) MarshalBinary() ([]byte, error) { + return d.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (d *Time) UnmarshalBinary(data []byte) error { + return d.Time.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (d Time) MarshalJSON() (json []byte, err error) { + return d.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (d *Time) UnmarshalJSON(data []byte) (err error) { + return d.Time.UnmarshalJSON(data) +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (d Time) MarshalText() (text []byte, err error) { + return d.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (d *Time) UnmarshalText(data []byte) (err error) { + return d.Time.UnmarshalText(data) +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (d Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := d.Time.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (d Time) ToTime() time.Time { + return d.Time +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time_test.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date/time_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,164 @@ +package date + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" +) + +func ExampleParseTime() { + d, _ := ParseTime("2001-02-03T04:05:06Z") + fmt.Println(d) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_MarshalBinary() { + d, _ := ParseTime("2001-02-03T04:05:06Z") + t, _ := d.MarshalBinary() + fmt.Println(string(t)) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_UnmarshalBinary() { + d := Time{} + t := "2001-02-03T04:05:06Z" + + err := d.UnmarshalBinary([]byte(t)) + if err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_MarshalJSON() { + d, _ := ParseTime("2001-02-03T04:05:06Z") + j, _ := json.Marshal(d) + fmt.Println(string(j)) + // Output: "2001-02-03T04:05:06Z" +} + +func ExampleTime_UnmarshalJSON() { + var d struct { + Time Time `json:"datetime"` + } + j := `{ + "datetime" : "2001-02-03T04:05:06Z" + }` + + err := json.Unmarshal([]byte(j), &d) + if err != nil { + fmt.Println(err) + } + fmt.Println(d.Time) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_MarshalText() { + d, _ := ParseTime("2001-02-03T04:05:06Z") + t, _ := d.MarshalText() + fmt.Println(string(t)) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_UnmarshalText() { + d := Time{} + t := "2001-02-03T04:05:06Z" + + err := d.UnmarshalText([]byte(t)) + if err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03T04:05:06Z +} + +func TestTimeString(t *testing.T) { + d, _ := ParseTime("2001-02-03T04:05:06Z") + if d.String() != "2001-02-03T04:05:06Z" { + t.Errorf("date: String failed (%v)", d.String()) + } +} + +func TestTimeStringReturnsEmptyStringForError(t *testing.T) { + d := Time{Time: time.Date(20000, 01, 01, 01, 01, 01, 01, time.UTC)} + if d.String() != "" { + t.Errorf("date: Time#String failed empty string for an error") + } +} + +func TestTimeBinaryRoundTrip(t *testing.T) { + d1, err := ParseTime("2001-02-03T04:05:06Z") + t1, err := d1.MarshalBinary() + if err != nil { + t.Errorf("datetime: MarshalBinary failed (%v)", err) + } + + d2 := Time{} + err = d2.UnmarshalBinary(t1) + if err != nil { + t.Errorf("datetime: UnmarshalBinary failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Errorf("datetime: Round-trip Binary failed (%v, %v)", d1, d2) + } +} + +func TestTimeJSONRoundTrip(t *testing.T) { + type s struct { + Time Time `json:"datetime"` + } + var err error + d1 := s{} + d1.Time, err = ParseTime("2001-02-03T04:05:06Z") + j, err := json.Marshal(d1) + if err != nil { + t.Errorf("datetime: MarshalJSON failed (%v)", err) + } + + d2 := s{} + err = json.Unmarshal(j, &d2) + if err != nil { + t.Errorf("datetime: UnmarshalJSON failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Errorf("datetime: Round-trip JSON failed (%v, %v)", d1, d2) + } +} + +func TestTimeTextRoundTrip(t *testing.T) { + d1, err := ParseTime("2001-02-03T04:05:06Z") + t1, err := d1.MarshalText() + if err != nil { + t.Errorf("datetime: MarshalText failed (%v)", err) + } + + d2 := Time{} + err = d2.UnmarshalText(t1) + if err != nil { + t.Errorf("datetime: UnmarshalText failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Errorf("datetime: Round-trip Text failed (%v, %v)", d1, d2) + } +} + +func TestTimeToTime(t *testing.T) { + var d Time + d, err := ParseTime("2001-02-03T04:05:06Z") + if err != nil { + t.Errorf("datetime: ParseTime failed (%v)", err) + } + var v interface{} = d.ToTime() + switch v.(type) { + case time.Time: + return + default: + t.Errorf("datetime: ToTime failed to return a time.Time") + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,93 @@ +package autorest + +import ( + "fmt" +) + +// Error describes the methods implemented by autorest errors. +type Error interface { + error + + // PackageType should return the package type of the object emitting the error. For types, the + // value should match that produced the the '%T' format specifier of the fmt package. For other + // elements, such as functions, it returns just the package name (e.g., "autorest"). + PackageType() string + + // Method should return the name of the method raising the error. + Method() string + + // Message should return the error message. + Message() string + + // String should return a formatted containing all available details (i.e., PackageType, Method, + // Message, and original error (if any)). + String() string + + // Original should return the original error, if any, and nil otherwise. + Original() error +} + +type baseError struct { + packageType string + method string + message string + + original error +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) Error { + return NewErrorWithError(nil, packageType, method, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the passed packageType, method, +// message, and original error. message is treated as a format string to which the optional args +// apply. +func NewErrorWithError(original error, packageType string, method string, message string, args ...interface{}) Error { + if _, ok := original.(Error); ok { + return original.(Error) + } + return baseError{ + packageType: packageType, + method: method, + message: fmt.Sprintf(message, args...), + original: original, + } +} + +// PackageType returns the package type of the object emitting the error. For types, the value +// matches that produced the the '%T' format specifier of the fmt package. For other elements, +// such as functions, it returns just the package name (e.g., "autorest"). +func (be baseError) PackageType() string { + return be.packageType +} + +// Method returns the name of the method raising the error. +func (be baseError) Method() string { + return be.method +} + +// Message is the error message. +func (be baseError) Message() string { + return be.message +} + +// Original returns the original error, if any, and nil otherwise. +func (be baseError) Original() error { + return be.original +} + +// Error returns the same formatted string as String. +func (be baseError) Error() string { + return be.String() +} + +// String returns a formatted containing all available details (i.e., PackageType, Method, +// Message, and original error (if any)). +func (be baseError) String() string { + if be.original == nil { + return fmt.Sprintf("%s:%s %s", be.packageType, be.method, be.message) + } + return fmt.Sprintf("%s:%s %s -- Original Error: %v", be.packageType, be.method, be.message, be.original) +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error_test.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/error_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,114 @@ +package autorest + +import ( + "fmt" + "reflect" + "regexp" + "testing" +) + +func TestNewErrorWithErrorAssignsPackageType(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", "message") + + if e.PackageType() != "packageType" { + t.Errorf("autorest: Error failed to set package type -- expected %v, received %v", "packageType", e.PackageType()) + } +} + +func TestNewErrorWithErrorAssignsMethod(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", "message") + + if e.Method() != "method" { + t.Errorf("autorest: Error failed to set package type -- expected %v, received %v", "method", e.Method()) + } +} + +func TestNewErrorWithErrorAssignsMessage(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", "message") + + if e.Message() != "message" { + t.Errorf("autorest: Error failed to set package type -- expected %v, received %v", "message", e.Message()) + } +} + +func TestNewErrorWithErrorAcceptsArgs(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", "message %s", "arg") + + if matched, _ := regexp.MatchString(`.*arg.*`, e.Message()); !matched { + t.Errorf("autorest: Error failed to apply message arguments -- expected %v, received %v", + `.*arg.*`, e.Message()) + } +} + +func TestNewErrorWithErrorAssignsError(t *testing.T) { + err := fmt.Errorf("original") + e := NewErrorWithError(err, "packageType", "method", "message") + + if e.Original() != err { + t.Errorf("autorest: Error failed to set package type -- expected %v, received %v", err, e.Original()) + } +} + +func TestNewErrorForwards(t *testing.T) { + e1 := NewError("packageType", "method", "message %s", "arg") + e2 := NewErrorWithError(nil, "packageType", "method", "message %s", "arg") + + if !reflect.DeepEqual(e1, e2) { + t.Error("autorest: NewError did not return an error equivelent to NewErrorWithError") + } +} + +func TestErrorError(t *testing.T) { + err := fmt.Errorf("original") + e := NewErrorWithError(err, "packageType", "method", "message") + + if matched, _ := regexp.MatchString(`.*original.*`, e.Error()); !matched { + t.Errorf("autorest: Error#Error failed to return original error message -- expected %v, received %v", + `.*original.*`, e.Error()) + } +} + +func TestErrorStringConstainsPackageType(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", "message") + + if matched, _ := regexp.MatchString(`.*packageType.*`, e.String()); !matched { + t.Errorf("autorest: Error#String failed to include PackageType -- expected %v, received %v", + `.*packageType.*`, e.String()) + } +} + +func TestErrorStringConstainsMethod(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", "message") + + if matched, _ := regexp.MatchString(`.*method.*`, e.String()); !matched { + t.Errorf("autorest: Error#String failed to include Method -- expected %v, received %v", + `.*method.*`, e.String()) + } +} + +func TestErrorStringConstainsMessage(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", "message") + + if matched, _ := regexp.MatchString(`.*message.*`, e.String()); !matched { + t.Errorf("autorest: Error#String failed to include Message -- expected %v, received %v", + `.*message.*`, e.String()) + } +} + +func TestErrorStringConstainsOriginal(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", "message") + + if matched, _ := regexp.MatchString(`.*original.*`, e.String()); !matched { + t.Errorf("autorest: Error#String failed to include Original error -- expected %v, received %v", + `.*original.*`, e.String()) + } +} + +func TestErrorStringSkipsOriginal(t *testing.T) { + e := NewError("packageType", "method", "message") + + if matched, _ := regexp.MatchString(`.*Original.*`, e.String()); matched { + t.Errorf("autorest: Error#String included missing Original error -- unexpected %v, received %v", + `.*Original.*`, e.String()) + } +} === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks' === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,104 @@ +package mocks + +import ( + "fmt" + "net/http" + "time" +) + +const ( + // TestDelay is the Retry-After delay used in tests. + TestDelay = 0 * time.Second + + // TestHeader is the header used in tests. + TestHeader = "x-test-header" + + // TestURL is the URL used in tests. + TestURL = "https://microsoft.com/a/b/c/" +) + +const ( + headerLocation = "Location" + headerRetryAfter = "Retry-After" +) + +// NewRequest instantiates a new request. +func NewRequest() *http.Request { + return NewRequestWithContent("") +} + +// NewRequestWithContent instantiates a new request using the passed string for the body content. +func NewRequestWithContent(c string) *http.Request { + r, _ := http.NewRequest("GET", "https://microsoft.com/a/b/c/", NewBody(c)) + return r +} + +// NewRequestForURL instantiates a new request using the passed URL. +func NewRequestForURL(u string) *http.Request { + r, err := http.NewRequest("GET", u, NewBody("")) + if err != nil { + panic(fmt.Sprintf("mocks: ERROR (%v) parsing testing URL %s", err, u)) + } + return r +} + +// NewResponse instantiates a new response. +func NewResponse() *http.Response { + return NewResponseWithContent("") +} + +// NewResponseWithContent instantiates a new response with the passed string as the body content. +func NewResponseWithContent(c string) *http.Response { + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Body: NewBody(c), + Request: NewRequest(), + } +} + +// NewResponseWithStatus instantiates a new response using the passed string and integer as the +// status and status code. +func NewResponseWithStatus(s string, c int) *http.Response { + resp := NewResponse() + resp.Status = s + resp.StatusCode = c + return resp +} + +// SetResponseHeader adds a header to the passed response. +func SetResponseHeader(resp *http.Response, h string, v string) { + if resp.Header == nil { + resp.Header = make(http.Header) + } + resp.Header.Set(h, v) +} + +// SetResponseHeaderValues adds a header containing all the passed string values. +func SetResponseHeaderValues(resp *http.Response, h string, values []string) { + if resp.Header == nil { + resp.Header = make(http.Header) + } + for _, v := range values { + resp.Header.Add(h, v) + } +} + +// SetAcceptedHeaders adds the headers usually associated with a 202 Accepted response. +func SetAcceptedHeaders(resp *http.Response) { + SetLocationHeader(resp, TestURL) + SetRetryHeader(resp, TestDelay) +} + +// SetLocationHeader adds the Location header. +func SetLocationHeader(resp *http.Response, location string) { + SetResponseHeader(resp, http.CanonicalHeaderKey(headerLocation), location) +} + +// SetRetryHeader adds the Retry-After header. +func SetRetryHeader(resp *http.Response, delay time.Duration) { + SetResponseHeader(resp, http.CanonicalHeaderKey(headerRetryAfter), fmt.Sprintf("%v", delay.Seconds())) +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers_test.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/helpers_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +package mocks === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,165 @@ +/* +Package mocks provides mocks and helpers used in testing. +*/ +package mocks + +import ( + "fmt" + "io" + "net/http" +) + +// Body implements acceptable body over a string. +type Body struct { + s string + b []byte + isOpen bool + closeAttempts int +} + +// NewBody creates a new instance of Body. +func NewBody(s string) *Body { + return (&Body{s: s}).reset() +} + +// Read reads into the passed byte slice and returns the bytes read. +func (body *Body) Read(b []byte) (n int, err error) { + if !body.IsOpen() { + return 0, fmt.Errorf("ERROR: Body has been closed\n") + } + if len(body.b) == 0 { + return 0, io.EOF + } + n = copy(b, body.b) + body.b = body.b[n:] + return n, nil +} + +// Close closes the body. +func (body *Body) Close() error { + if body.isOpen { + body.isOpen = false + body.closeAttempts++ + } + return nil +} + +// CloseAttempts returns the number of times Close was called. +func (body *Body) CloseAttempts() int { + return body.closeAttempts +} + +// IsOpen returns true if the Body has not been closed, false otherwise. +func (body *Body) IsOpen() bool { + return body.isOpen +} + +func (body *Body) reset() *Body { + body.isOpen = true + body.b = []byte(body.s) + return body +} + +// Sender implements a simple null sender. +type Sender struct { + attempts int + pollAttempts int + content string + reuseResponse bool + resp *http.Response + status string + statusCode int + emitErrors int + err error +} + +// NewSender creates a new instance of Sender. +func NewSender() *Sender { + return &Sender{status: "200 OK", statusCode: 200} +} + +// Do accepts the passed request and, based on settings, emits a response and possible error. +func (c *Sender) Do(r *http.Request) (*http.Response, error) { + c.attempts++ + + if !c.reuseResponse || c.resp == nil { + resp := NewResponse() + resp.Request = r + resp.Body = NewBody(c.content) + resp.Status = c.status + resp.StatusCode = c.statusCode + c.resp = resp + } else { + c.resp.Body.(*Body).reset() + } + + if c.pollAttempts > 0 { + c.pollAttempts-- + c.resp.Status = "Accepted" + c.resp.StatusCode = http.StatusAccepted + SetAcceptedHeaders(c.resp) + } + + if c.emitErrors > 0 || c.emitErrors < 0 { + c.emitErrors-- + if c.err == nil { + return c.resp, fmt.Errorf("Faux Error") + } + return c.resp, c.err + } + return c.resp, nil +} + +// Attempts returns the number of times Do was called. +func (c *Sender) Attempts() int { + return c.attempts +} + +// EmitErrors sets the number times Do should emit an error. +func (c *Sender) EmitErrors(emit int) { + c.emitErrors = emit +} + +// SetError sets the error Do should return. +func (c *Sender) SetError(err error) { + c.err = err +} + +// ClearError clears the error Do emits. +func (c *Sender) ClearError() { + c.SetError(nil) +} + +// EmitContent sets the content to be returned by Do in the response body. +func (c *Sender) EmitContent(s string) { + c.content = s +} + +// EmitStatus sets the status of the response Do emits. +func (c *Sender) EmitStatus(status string, code int) { + c.status = status + c.statusCode = code +} + +// SetPollAttempts sets the number of times the returned response emits the default polling +// status code (i.e., 202 Accepted). +func (c *Sender) SetPollAttempts(pa int) { + c.pollAttempts = pa +} + +// ReuseResponse sets if the just one response object should be reused by all calls to Do. +func (c *Sender) ReuseResponse(reuseResponse bool) { + c.reuseResponse = reuseResponse +} + +// SetResponse sets the response from Do. +func (c *Sender) SetResponse(resp *http.Response) { + c.resp = resp + c.reuseResponse = true +} + +// T is a simple testing struct. +type T struct { + Name string `json:"name"` + Age int `json:"age"` +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks_test.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks/mocks_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +package mocks === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,311 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + u, err := url.Parse(baseURL) + if err == nil { + r.URL = u + } + } + return r, err + }) + } +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + u := r.URL + u.Path = strings.TrimRight(u.Path, "/") + if strings.HasPrefix(path, "/") { + u.Path = path + } else { + u.Path += "/" + path + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + r.URL.Path = strings.Replace(r.URL.Path, "{"+key+"}", value, -1) + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + r.URL.Path = strings.Replace(r.URL.Path, "{"+key+"}", value, -1) + } + } + return r, err + }) + } +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + v := r.URL.Query() + for key, value := range parameters { + v.Add(key, value) + } + r.URL.RawQuery = v.Encode() + } + return r, err + }) + } +} + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer_test.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/preparer_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,496 @@ +package autorest + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "testing" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" +) + +// PrepareDecorators wrap and invoke a Preparer. Most often, the decorator invokes the passed +// Preparer and decorates the response. +func ExamplePrepareDecorator() { + path := "a/b/c/" + pd := func() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, fmt.Errorf("ERROR: URL is not set") + } + r.URL.Path += path + } + return r, err + }) + } + } + + r, _ := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + pd()) + + fmt.Printf("Path is %s\n", r.URL) + // Output: Path is https://microsoft.com/a/b/c/ +} + +// PrepareDecorators may also modify and then invoke the Preparer. +func ExamplePrepareDecorator_pre() { + pd := func() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Header.Add(http.CanonicalHeaderKey("ContentType"), "application/json") + return p.Prepare(r) + }) + } + } + + r, _ := Prepare(&http.Request{Header: http.Header{}}, + pd()) + + fmt.Printf("ContentType is %s\n", r.Header.Get("ContentType")) + // Output: ContentType is application/json +} + +// Create a sequence of three Preparers that build up the URL path. +func ExampleCreatePreparer() { + p := CreatePreparer( + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + r, err := p.Prepare(&http.Request{}) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c +} + +// Create and apply separate Preparers +func ExampleCreatePreparer_multiple() { + params := map[string]interface{}{ + "param1": "a", + "param2": "c", + } + + p1 := CreatePreparer(WithBaseURL("https://microsoft.com/"), WithPath("/{param1}/b/{param2}/")) + p2 := CreatePreparer(WithPathParameters(params)) + + r, err := p1.Prepare(&http.Request{}) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } + + r, err = p2.Prepare(r) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +// Create and chain separate Preparers +func ExampleCreatePreparer_chain() { + params := map[string]interface{}{ + "param1": "a", + "param2": "c", + } + + p := CreatePreparer(WithBaseURL("https://microsoft.com/"), WithPath("/{param1}/b/{param2}/")) + p = DecoratePreparer(p, WithPathParameters(params)) + + r, err := p.Prepare(&http.Request{}) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +// Create and prepare an http.Request in one call +func ExamplePrepare() { + r, err := Prepare(&http.Request{}, + AsGet(), + WithBaseURL("https://microsoft.com/"), + WithPath("a/b/c/")) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("%s %s", r.Method, r.URL) + } + // Output: GET https://microsoft.com/a/b/c/ +} + +// Create a request for a supplied base URL and path +func ExampleWithBaseURL() { + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/a/b/c/")) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +// Create a request with a custom HTTP header +func ExampleWithHeader() { + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/a/b/c/"), + WithHeader("x-foo", "bar")) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("Header %s=%s\n", "x-foo", r.Header.Get("x-foo")) + } + // Output: Header x-foo=bar +} + +// Create a request whose Body is the JSON encoding of a structure +func ExampleWithFormData() { + v := url.Values{} + v.Add("name", "Rob Pike") + v.Add("age", "42") + + r, err := Prepare(&http.Request{}, + WithFormData(v)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("Request Body contains %s\n", string(b)) + } + // Output: Request Body contains age=42&name=Rob+Pike +} + +// Create a request whose Body is the JSON encoding of a structure +func ExampleWithJSON() { + t := mocks.T{Name: "Rob Pike", Age: 42} + + r, err := Prepare(&http.Request{}, + WithJSON(&t)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("Request Body contains %s\n", string(b)) + } + // Output: Request Body contains {"name":"Rob Pike","age":42} +} + +// Create a request from a path with escaped parameters +func ExampleWithEscapedPathParameters() { + params := map[string]interface{}{ + "param1": "a b c", + "param2": "d e f", + } + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("/{param1}/b/{param2}/"), + WithEscapedPathParameters(params)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a+b+c/b/d+e+f/ +} + +// Create a request from a path with parameters +func ExampleWithPathParameters() { + params := map[string]interface{}{ + "param1": "a", + "param2": "c", + } + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("/{param1}/b/{param2}/"), + WithPathParameters(params)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +// Create a request with query parameters +func ExampleWithQueryParameters() { + params := map[string]interface{}{ + "q1": "value1", + "q2": "value2", + } + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("/a/b/c/"), + WithQueryParameters(params)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/?q1=value1&q2=value2 +} + +func TestCreatePreparerDoesNotModify(t *testing.T) { + r1 := &http.Request{} + p := CreatePreparer() + r2, err := p.Prepare(r1) + if err != nil { + t.Errorf("autorest: CreatePreparer failed (%v)", err) + } + if !reflect.DeepEqual(r1, r2) { + t.Errorf("autorest: CreatePreparer without decorators modified the request") + } +} + +func TestCreatePreparerRunsDecoratorsInOrder(t *testing.T) { + p := CreatePreparer(WithBaseURL("https://microsoft.com/"), WithPath("1"), WithPath("2"), WithPath("3")) + r, err := p.Prepare(&http.Request{}) + if err != nil { + t.Errorf("autorest: CreatePreparer failed (%v)", err) + } + if r.URL.String() != "https://microsoft.com/1/2/3" { + t.Errorf("autorest: CreatePreparer failed to run decorators in order") + } +} + +func TestAsContentType(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), AsContentType("application/text")) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerContentType) != "application/text" { + t.Errorf("autorest: AsContentType failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) + } +} + +func TestAsFormURLEncoded(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), AsFormURLEncoded()) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerContentType) != mimeTypeFormPost { + t.Errorf("autorest: AsFormURLEncoded failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) + } +} + +func TestAsJSON(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), AsJSON()) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerContentType) != mimeTypeJSON { + t.Errorf("autorest: AsJSON failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) + } +} + +func TestWithNothing(t *testing.T) { + r1 := mocks.NewRequest() + r2, err := Prepare(r1, WithNothing()) + if err != nil { + t.Errorf("autorest: WithNothing returned an unexpected error (%v)", err) + } + + if !reflect.DeepEqual(r1, r2) { + t.Error("azure: WithNothing modified the passed HTTP Request") + } +} + +func TestWithBearerAuthorization(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), WithBearerAuthorization("SOME-TOKEN")) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerAuthorization) != "Bearer SOME-TOKEN" { + t.Errorf("autorest: WithBearerAuthorization failed to add header (%s=%s)", headerAuthorization, r.Header.Get(headerAuthorization)) + } +} + +func TestWithUserAgent(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), WithUserAgent("User Agent Go")) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerUserAgent) != "User Agent Go" { + t.Errorf("autorest: WithUserAgent failed to add header (%s=%s)", headerUserAgent, r.Header.Get(headerUserAgent)) + } +} + +func TestWithMethod(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), WithMethod("HEAD")) + if r.Method != "HEAD" { + t.Error("autorest: WithMethod failed to set HTTP method header") + } +} + +func TestAsDelete(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsDelete()) + if r.Method != "DELETE" { + t.Error("autorest: AsDelete failed to set HTTP method header to DELETE") + } +} + +func TestAsGet(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsGet()) + if r.Method != "GET" { + t.Error("autorest: AsGet failed to set HTTP method header to GET") + } +} + +func TestAsHead(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsHead()) + if r.Method != "HEAD" { + t.Error("autorest: AsHead failed to set HTTP method header to HEAD") + } +} + +func TestAsOptions(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsOptions()) + if r.Method != "OPTIONS" { + t.Error("autorest: AsOptions failed to set HTTP method header to OPTIONS") + } +} + +func TestAsPatch(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsPatch()) + if r.Method != "PATCH" { + t.Error("autorest: AsPatch failed to set HTTP method header to PATCH") + } +} + +func TestAsPost(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsPost()) + if r.Method != "POST" { + t.Error("autorest: AsPost failed to set HTTP method header to POST") + } +} + +func TestAsPut(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsPut()) + if r.Method != "PUT" { + t.Error("autorest: AsPut failed to set HTTP method header to PUT") + } +} + +func TestPrepareWithNullRequest(t *testing.T) { + _, err := Prepare(nil) + if err == nil { + t.Error("autorest: Prepare failed to return an error when given a null http.Request") + } +} + +func TestWithFormDataSetsContentLength(t *testing.T) { + v := url.Values{} + v.Add("name", "Rob Pike") + v.Add("age", "42") + + r, err := Prepare(&http.Request{}, + WithFormData(v)) + if err != nil { + t.Errorf("autorest: WithFormData failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("autorest: WithFormData failed with error (%v)", err) + } + + if r.ContentLength != int64(len(b)) { + t.Errorf("autorest:WithFormData set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithJSONSetsContentLength(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithJSON(&mocks.T{Name: "Rob Pike", Age: 42})) + if err != nil { + t.Errorf("autorest: WithJSON failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("autorest: WithJSON failed with error (%v)", err) + } + + if r.ContentLength != int64(len(b)) { + t.Errorf("autorest:WithJSON set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithHeaderAllocatesHeaders(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), WithHeader("x-foo", "bar")) + if err != nil { + t.Errorf("autorest: WithHeader failed (%v)", err) + } + if r.Header.Get("x-foo") != "bar" { + t.Errorf("autorest: WithHeader failed to add header (%s=%s)", "x-foo", r.Header.Get("x-foo")) + } +} + +func TestWithPathCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithPath("a")) + if err == nil { + t.Errorf("autorest: WithPath failed to catch a nil URL") + } +} + +func TestWithEscapedPathParametersCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithEscapedPathParameters(map[string]interface{}{"foo": "bar"})) + if err == nil { + t.Errorf("autorest: WithEscapedPathParameters failed to catch a nil URL") + } +} + +func TestWithPathParametersCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithPathParameters(map[string]interface{}{"foo": "bar"})) + if err == nil { + t.Errorf("autorest: WithPathParameters failed to catch a nil URL") + } +} + +func TestWithQueryParametersCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithQueryParameters(map[string]interface{}{"foo": "bar"})) + if err == nil { + t.Errorf("autorest: WithQueryParameters failed to catch a nil URL") + } +} + +func TestModifyingExistingRequest(t *testing.T) { + r, err := Prepare(mocks.NewRequestForURL("https://bing.com"), WithPath("search"), WithQueryParameters(map[string]interface{}{"q": "golang"})) + if err != nil { + t.Errorf("autorest: Preparing an existing request returned an error (%v)", err) + } + if r.URL.String() != "https://bing.com/search?q=golang" { + t.Errorf("autorest: Preparing an existing request failed (%s)", r.URL) + } +} + +func TestWithAuthorizer(t *testing.T) { + r1 := mocks.NewRequest() + + na := &NullAuthorizer{} + r2, err := Prepare(r1, + na.WithAuthorization()) + if err != nil { + t.Errorf("autorest: NullAuthorizer#WithAuthorization returned an unexpected error (%v)", err) + } else if !reflect.DeepEqual(r1, r2) { + t.Errorf("autorest: NullAuthorizer#WithAuthorization modified the request -- received %v, expected %v", r2, r1) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,163 @@ +package autorest + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + resp.Body.Close() + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b := bytes.Buffer{} + d := json.NewDecoder(io.TeeReader(resp.Body, &b)) + err = d.Decode(v) + if err != nil { + err = fmt.Errorf("Error (%v) occurred decoding JSON (\"%s\")", err, b.String()) + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewError("autorest", "WithErrorUnlessStatusCode", "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder_test.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/responder_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,387 @@ +package autorest + +import ( + "fmt" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" +) + +func ExampleWithErrorUnlessOK() { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + + // Respond and leave the response body open (for a subsequent responder to close) + err := Respond(r, + WithErrorUnlessOK(), + ByClosingIfError()) + + if err == nil { + fmt.Printf("%s of %s returned HTTP 200", r.Request.Method, r.Request.URL) + + // Complete handling the response and close the body + Respond(r, + ByClosing()) + } + // Output: GET of https://microsoft.com/a/b/c/ returned HTTP 200 +} + +func ExampleByUnmarshallingJSON() { + c := ` + { + "name" : "Rob Pike", + "age" : 42 + } + ` + + type V struct { + Name string `json:"name"` + Age int `json:"age"` + } + + v := &V{} + + Respond(mocks.NewResponseWithContent(c), + ByUnmarshallingJSON(v), + ByClosing()) + + fmt.Printf("%s is %d years old\n", v.Name, v.Age) + // Output: Rob Pike is 42 years old +} + +func TestCreateResponderDoesNotModify(t *testing.T) { + r1 := mocks.NewResponse() + r2 := mocks.NewResponse() + p := CreateResponder() + err := p.Respond(r1) + if err != nil { + t.Errorf("autorest: CreateResponder failed (%v)", err) + } + if !reflect.DeepEqual(r1, r2) { + t.Errorf("autorest: CreateResponder without decorators modified the response") + } +} + +func TestCreateResponderRunsDecoratorsInOrder(t *testing.T) { + s := "" + + d := func(n int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + s += fmt.Sprintf("%d", n) + } + return err + }) + } + } + + p := CreateResponder(d(1), d(2), d(3)) + err := p.Respond(&http.Response{}) + if err != nil { + t.Errorf("autorest: Respond failed (%v)", err) + } + + if s != "123" { + t.Errorf("autorest: CreateResponder invoked decorators in an incorrect order; expected '123', received '%s'", s) + } +} + +func TestByIgnoring(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(r2 *http.Response) error { + r1 := mocks.NewResponse() + if !reflect.DeepEqual(r1, r2) { + t.Errorf("autorest: ByIgnoring modified the HTTP Response -- received %v, expected %v", r2, r1) + } + return nil + }) + } + })(), + ByIgnoring(), + ByClosing()) +} + +func TestByClosing(t *testing.T) { + r := mocks.NewResponse() + err := Respond(r, ByClosing()) + if err != nil { + t.Errorf("autorest: ByClosing failed (%v)", err) + } + if r.Body.(*mocks.Body).IsOpen() { + t.Errorf("autorest: ByClosing did not close the response body") + } +} + +func TestByClosingAcceptsNilResponse(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByClosing()) +} + +func TestByClosingAcceptsNilBody(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByClosing()) +} + +func TestByClosingClosesEvenAfterErrors(t *testing.T) { + var e error + + r := mocks.NewResponse() + Respond(r, + withErrorRespondDecorator(&e), + ByClosing()) + + if r.Body.(*mocks.Body).IsOpen() { + t.Errorf("autorest: ByClosing did not close the response body after an error occurred") + } +} + +func TestByClosingClosesReturnsNestedErrors(t *testing.T) { + var e error + + r := mocks.NewResponse() + err := Respond(r, + withErrorRespondDecorator(&e), + ByClosing()) + + if err == nil || !reflect.DeepEqual(e, err) { + t.Errorf("autorest: ByClosing failed to return a nested error") + } +} + +func TestByClosingIfErrorAcceptsNilResponse(t *testing.T) { + var e error + + r := mocks.NewResponse() + + Respond(r, + withErrorRespondDecorator(&e), + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByClosingIfError()) +} + +func TestByClosingIfErrorAcceptsNilBody(t *testing.T) { + var e error + + r := mocks.NewResponse() + + Respond(r, + withErrorRespondDecorator(&e), + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByClosingIfError()) +} + +func TestByClosingIfErrorClosesIfAnErrorOccurs(t *testing.T) { + var e error + + r := mocks.NewResponse() + Respond(r, + withErrorRespondDecorator(&e), + ByClosingIfError()) + + if r.Body.(*mocks.Body).IsOpen() { + t.Errorf("autorest: ByClosingIfError did not close the response body after an error occurred") + } +} + +func TestByClosingIfErrorDoesNotClosesIfNoErrorOccurs(t *testing.T) { + r := mocks.NewResponse() + Respond(r, + ByClosingIfError()) + + if !r.Body.(*mocks.Body).IsOpen() { + t.Errorf("autorest: ByClosingIfError closed the response body even though no error occurred") + } +} + +func TestByUnmarhallingJSON(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Errorf("autorest: ByUnmarshallingJSON failed (%v)", err) + } + if v.Name != "Rob Pike" || v.Age != 42 { + t.Errorf("autorest: ByUnmarshallingJSON failed to properly unmarshal") + } +} + +func TestByUnmarhallingJSONIncludesJSONInErrors(t *testing.T) { + v := &mocks.T{} + j := jsonT[0 : len(jsonT)-2] + r := mocks.NewResponseWithContent(j) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err == nil || !strings.Contains(err.Error(), j) { + t.Errorf("autorest: ByUnmarshallingJSON failed to return JSON in error (%v)", err) + } +} + +func TestRespondAcceptsNullResponse(t *testing.T) { + err := Respond(nil) + if err != nil { + t.Errorf("autorest: Respond returned an unexpected error when given a null Response (%v)", err) + } +} + +func TestWithErrorUnlessStatusCode(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessStatusCode(http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError), + ByClosingIfError()) + + if err != nil { + t.Errorf("autorest: WithErrorUnlessStatusCode returned an error (%v) for an acceptable status code (%s)", err, r.Status) + } +} + +func TestWithErrorUnlessStatusCodeEmitsErrorForUnacceptableStatusCode(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessStatusCode(http.StatusOK, http.StatusUnauthorized, http.StatusInternalServerError), + ByClosingIfError()) + + if err == nil { + t.Errorf("autorest: WithErrorUnlessStatusCode failed to return an error for an unacceptable status code (%s)", r.Status) + } +} + +func TestWithErrorUnlessOK(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + + err := Respond(r, + WithErrorUnlessOK(), + ByClosingIfError()) + + if err != nil { + t.Errorf("autorest: WithErrorUnlessOK returned an error for OK status code (%v)", err) + } +} + +func TestWithErrorUnlessOKEmitsErrorIfNotOK(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessOK(), + ByClosingIfError()) + + if err == nil { + t.Errorf("autorest: WithErrorUnlessOK failed to return an error for a non-OK status code (%v)", err) + } +} + +func TestExtractHeader(t *testing.T) { + r := mocks.NewResponse() + v := []string{"v1", "v2", "v3"} + mocks.SetResponseHeaderValues(r, mocks.TestHeader, v) + + if !reflect.DeepEqual(ExtractHeader(mocks.TestHeader, r), v) { + t.Errorf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v, mocks.TestHeader, ExtractHeader(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderHandlesMissingHeader(t *testing.T) { + var v []string + r := mocks.NewResponse() + + if !reflect.DeepEqual(ExtractHeader(mocks.TestHeader, r), v) { + t.Errorf("autorest: ExtractHeader failed to handle a missing header -- expected %v, received %v", + v, ExtractHeader(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderValue(t *testing.T) { + r := mocks.NewResponse() + v := "v1" + mocks.SetResponseHeader(r, mocks.TestHeader, v) + + if ExtractHeaderValue(mocks.TestHeader, r) != v { + t.Errorf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v, mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderValueHandlesMissingHeader(t *testing.T) { + r := mocks.NewResponse() + v := "" + + if ExtractHeaderValue(mocks.TestHeader, r) != v { + t.Errorf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v, mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderValueRetrievesFirstValue(t *testing.T) { + r := mocks.NewResponse() + v := []string{"v1", "v2", "v3"} + mocks.SetResponseHeaderValues(r, mocks.TestHeader, v) + + if ExtractHeaderValue(mocks.TestHeader, r) != v[0] { + t.Errorf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v[0], mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,211 @@ +package autorest + +import ( + "log" + "math" + "net/http" + "time" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(&http.Client{}, r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + time.Sleep(d) + return s.Do(r) + }) + } +} + +// AfterRetryDelay returns a SendDecorator that delays for the number of seconds specified in the +// Retry-After header of the prior response when polling is required. +func AfterRetryDelay(defaultDelay time.Duration, codes ...int) SendDecorator { + delay := time.Duration(0) + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if delay > time.Duration(0) { + time.Sleep(delay) + } + resp, err := s.Do(r) + if ResponseRequiresPolling(resp, codes...) { + delay = GetPollingDelay(resp, defaultDelay) + } else { + delay = time.Duration(0) + } + return resp, err + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s\n", r.Method, r.URL) + resp, err := s.Do(r) + logger.Printf("%s %s received %s\n", r.Method, r.URL, resp.Status) + return resp, err + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewError("autorest", "DoErrorIfStatusCode", "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewError("autorest", "DoErrorUnlessStatusCode", "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries the request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + for attempt := 0; attempt < attempts; attempt++ { + resp, err = s.Do(r) + if err == nil { + return resp, err + } + DelayForBackoff(backoff, attempt) + } + return resp, err + }) + } +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + resp, err = s.Do(r) + if err == nil { + return resp, err + } + DelayForBackoff(backoff, attempt) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.Sleep for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff may be zero. +func DelayForBackoff(backoff time.Duration, attempt int) { + time.Sleep(time.Duration(math.Pow(float64(backoff), float64(attempt)))) +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender_test.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/sender_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,504 @@ +package autorest + +import ( + "fmt" + "log" + "net/http" + "os" + "reflect" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" +) + +func ExampleSendWithSender() { + client := mocks.NewSender() + client.EmitStatus("202 Accepted", http.StatusAccepted) + + logger := log.New(os.Stdout, "autorest: ", 0) + na := NullAuthorizer{} + + req, _ := Prepare(&http.Request{}, + AsGet(), + WithBaseURL("https://microsoft.com/a/b/c/"), + na.WithAuthorization()) + + r, _ := SendWithSender(client, req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusAccepted), + DoCloseIfError(), + DoRetryForAttempts(5, time.Duration(0))) + + Respond(r, + ByClosing()) + + // Output: + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted +} + +func ExampleDoRetryForAttempts() { + client := mocks.NewSender() + client.EmitErrors(10) + + // Retry with backoff -- ensure returned Bodies are closed + r, _ := SendWithSender(client, mocks.NewRequest(), + DoCloseIfError(), + DoRetryForAttempts(5, time.Duration(0))) + + Respond(r, + ByClosing()) + + fmt.Printf("Retry stopped after %d attempts", client.Attempts()) + // Output: Retry stopped after 5 attempts +} + +func ExampleDoErrorIfStatusCode() { + client := mocks.NewSender() + client.EmitStatus("204 NoContent", http.StatusNoContent) + + // Chain decorators to retry the request, up to five times, if the status code is 204 + r, _ := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusNoContent), + DoCloseIfError(), + DoRetryForAttempts(5, time.Duration(0))) + + Respond(r, + ByClosing()) + + fmt.Printf("Retry stopped after %d attempts with code %s", client.Attempts(), r.Status) + // Output: Retry stopped after 5 attempts with code 204 NoContent +} + +func TestSendWithSenderRunsDecoratorsInOrder(t *testing.T) { + client := mocks.NewSender() + s := "" + + r, err := SendWithSender(client, mocks.NewRequest(), + withMessage(&s, "a"), + withMessage(&s, "b"), + withMessage(&s, "c")) + if err != nil { + t.Errorf("autorest: SendWithSender returned an error (%v)", err) + } + + Respond(r, + ByClosing()) + + if s != "abc" { + t.Errorf("autorest: SendWithSender invoke decorators out of order; expected 'abc', received '%s'", s) + } +} + +func TestCreateSender(t *testing.T) { + f := false + + s := CreateSender( + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return nil, nil + }) + } + })()) + s.Do(&http.Request{}) + + if !f { + t.Error("autorest: CreateSender failed to apply supplied decorator") + } +} + +func TestSend(t *testing.T) { + f := false + + Send(&http.Request{}, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return nil, nil + }) + } + })()) + + if !f { + t.Error("autorest: Send failed to apply supplied decorator") + } +} + +func TestAfterDelayWaits(t *testing.T) { + client := mocks.NewSender() + + d := 10 * time.Millisecond + + tt := time.Now() + r, _ := SendWithSender(client, mocks.NewRequest(), + AfterDelay(d)) + s := time.Since(tt) + if s < d { + t.Error("autorest: AfterDelay failed to wait for at least the specified duration") + } + + Respond(r, + ByClosing()) +} + +func TestAfterRetryDelayWaits(t *testing.T) { + client := mocks.NewSender() + client.EmitErrors(-1) + + d := 10 * time.Millisecond + + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + mocks.SetRetryHeader(resp, d) + client.SetResponse(resp) + + tt := time.Now() + r, _ := SendWithSender(client, mocks.NewRequest(), + AfterRetryDelay(d), + DoRetryForAttempts(2, time.Duration(0))) + s := time.Since(tt) + if s < d { + t.Error("autorest: AfterRetryDelay failed to wait for at least the specified duration") + } + + Respond(r, + ByClosing()) +} + +// Disable test for TravisCI +// func TestAfterDelayDoesNotWaitTooLong(t *testing.T) { +// client := mocks.NewSender() + +// // Establish a baseline and then set the wait to 10x that amount +// // -- Waiting 10x the baseline should be long enough for a real test while not slowing the +// // tests down too much +// tt := time.Now() +// SendWithSender(client, mocks.NewRequest()) +// d := 10 * time.Since(tt) + +// tt = time.Now() +// r, _ := SendWithSender(client, mocks.NewRequest(), +// AfterDelay(d)) +// s := time.Since(tt) +// if s > 5*d { +// t.Error("autorest: AfterDelay waited too long (more than five times the specified duration") +// } + +// Respond(r, +// ByClosing()) +// } + +func TestAsIs(t *testing.T) { + client := mocks.NewSender() + + r1 := mocks.NewResponse() + r2, err := SendWithSender(client, mocks.NewRequest(), + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return r1, nil + }) + } + })(), + AsIs()) + if err != nil { + t.Errorf("autorest: AsIs returned an unexpected error (%v)", err) + } else if !reflect.DeepEqual(r1, r2) { + t.Errorf("autorest: AsIs modified the response -- received %v, expected %v", r2, r1) + } + + Respond(r1, + ByClosing()) + Respond(r2, + ByClosing()) +} + +func TestDoCloseIfError(t *testing.T) { + client := mocks.NewSender() + client.EmitStatus("400 BadRequest", http.StatusBadRequest) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusBadRequest), + DoCloseIfError()) + + if r.Body.(*mocks.Body).IsOpen() { + t.Error("autorest: Expected DoCloseIfError to close response body -- it was left open") + } + + Respond(r, + ByClosing()) +} + +func TestDoCloseIfErrorAcceptsNilResponse(t *testing.T) { + client := mocks.NewSender() + + SendWithSender(client, mocks.NewRequest(), + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + resp.Body.Close() + } + return nil, fmt.Errorf("Faux Error") + }) + } + })(), + DoCloseIfError()) +} + +func TestDoCloseIfErrorAcceptsNilBody(t *testing.T) { + client := mocks.NewSender() + + SendWithSender(client, mocks.NewRequest(), + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + resp.Body.Close() + } + resp.Body = nil + return resp, fmt.Errorf("Faux Error") + }) + } + })(), + DoCloseIfError()) +} + +func TestDoErrorIfStatusCode(t *testing.T) { + client := mocks.NewSender() + client.EmitStatus("400 BadRequest", http.StatusBadRequest) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusBadRequest), + DoCloseIfError()) + if err == nil { + t.Error("autorest: DoErrorIfStatusCode failed to emit an error for passed code") + } + + Respond(r, + ByClosing()) +} + +func TestDoErrorIfStatusCodeIgnoresStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.EmitStatus("202 Accepted", http.StatusAccepted) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusBadRequest), + DoCloseIfError()) + if err != nil { + t.Error("autorest: DoErrorIfStatusCode failed to ignore a status code") + } + + Respond(r, + ByClosing()) +} + +func TestDoErrorUnlessStatusCode(t *testing.T) { + client := mocks.NewSender() + client.EmitStatus("400 BadRequest", http.StatusBadRequest) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorUnlessStatusCode(http.StatusAccepted), + DoCloseIfError()) + if err == nil { + t.Error("autorest: DoErrorUnlessStatusCode failed to emit an error for an unknown status code") + } + + Respond(r, + ByClosing()) +} + +func TestDoErrorUnlessStatusCodeIgnoresStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.EmitStatus("202 Accepted", http.StatusAccepted) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorUnlessStatusCode(http.StatusAccepted), + DoCloseIfError()) + if err != nil { + t.Error("autorest: DoErrorUnlessStatusCode emitted an error for a knonwn status code") + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForAttemptsStopsAfterSuccess(t *testing.T) { + client := mocks.NewSender() + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForAttempts(5, time.Duration(0))) + if client.Attempts() != 1 { + t.Errorf("autorest: DoRetryForAttempts failed to stop after success -- expected attempts %v, actual %v", + 1, client.Attempts()) + } + if err != nil { + t.Errorf("autorest: DoRetryForAttempts returned an unexpected error (%v)", err) + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForAttemptsStopsAfterAttempts(t *testing.T) { + client := mocks.NewSender() + client.EmitErrors(10) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForAttempts(5, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Error("autorest: Mock client failed to emit errors") + } + + Respond(r, + ByClosing()) + + if client.Attempts() != 5 { + t.Error("autorest: DoRetryForAttempts failed to stop after specified number of attempts") + } +} + +func TestDoRetryForAttemptsReturnsResponse(t *testing.T) { + client := mocks.NewSender() + client.EmitErrors(1) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForAttempts(1, time.Duration(0))) + if err == nil { + t.Error("autorest: Mock client failed to emit errors") + } + + if r == nil { + t.Error("autorest: DoRetryForAttempts failed to return the underlying response") + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForDurationStopsAfterSuccess(t *testing.T) { + client := mocks.NewSender() + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(10*time.Millisecond, time.Duration(0))) + if client.Attempts() != 1 { + t.Errorf("autorest: DoRetryForDuration failed to stop after success -- expected attempts %v, actual %v", + 1, client.Attempts()) + } + if err != nil { + t.Errorf("autorest: DoRetryForDuration returned an unexpected error (%v)", err) + } + + Respond(r, + ByClosing()) +} + +func TestDoRetryForDurationStopsAfterDuration(t *testing.T) { + client := mocks.NewSender() + client.EmitErrors(-1) + + d := 10 * time.Millisecond + start := time.Now() + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(d, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Error("autorest: Mock client failed to emit errors") + } + + Respond(r, + ByClosing()) + + if time.Now().Sub(start) < d { + t.Error("autorest: DoRetryForDuration failed stopped too soon") + } +} + +func TestDoRetryForDurationStopsWithinReason(t *testing.T) { + client := mocks.NewSender() + client.EmitErrors(-1) + + d := 10 * time.Millisecond + start := time.Now() + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(d, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Error("autorest: Mock client failed to emit errors") + } + + Respond(r, + ByClosing()) + + if time.Now().Sub(start) > (5 * d) { + t.Error("autorest: DoRetryForDuration failed stopped soon enough (exceeded 5 times specified duration)") + } +} + +func TestDoRetryForDurationReturnsResponse(t *testing.T) { + client := mocks.NewSender() + client.EmitErrors(-1) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(10*time.Millisecond, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Error("autorest: Mock client failed to emit errors") + } + + if r == nil { + t.Error("autorest: DoRetryForDuration failed to return the underlying response") + } + + Respond(r, + ByClosing()) +} + +func TestDelayForBackoff(t *testing.T) { + + // Establish a baseline and then set the wait to 10x that amount + // -- Waiting 10x the baseline should be long enough for a real test while not slowing the + // tests down too much + tt := time.Now() + DelayForBackoff(time.Millisecond, 0) + d := 10 * time.Since(tt) + + start := time.Now() + DelayForBackoff(d, 1) + if time.Now().Sub(start) < d { + t.Error("autorest: DelayForBackoff did not delay as long as expected") + } +} + +// Disable test for TravisCI +// func TestDelayForBackoffWithinReason(t *testing.T) { + +// // Establish a baseline and then set the wait to 10x that amount +// // -- Waiting 10x the baseline should be long enough for a real test while not slowing the +// // tests down too much +// tt := time.Now() +// DelayForBackoff(time.Millisecond, 0) +// d := 10 * time.Since(tt) + +// start := time.Now() +// DelayForBackoff(d, 1) +// if time.Now().Sub(start) > (time.Duration(5.0) * d) { +// t.Error("autorest: DelayForBackoff delayed too long (exceeded 5 times the specified duration)") +// } +// } === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to' === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,119 @@ +/* +Package to provides helpers to ease working with pointer values of marshalled structures. +*/ +package to + +// String returns a string value for the passed string pointer. It returns the empty string if the +// pointer is nil. +func String(s *string) string { + if s != nil { + return *s + } + return "" +} + +// StringPtr returns a pointer to the passed string. +func StringPtr(s string) *string { + return &s +} + +// StringMap returns a map of strings built from the map of string pointers. The empty string is +// used for nil pointers. +func StringMap(msp map[string]*string) map[string]string { + ms := make(map[string]string, len(msp)) + for k, sp := range msp { + if sp != nil { + ms[k] = *sp + } else { + ms[k] = "" + } + } + return ms +} + +// StringMapPtr returns a map of string pointers built from the passed map of strings. +func StringMapPtr(ms map[string]string) map[string]*string { + msp := make(map[string]*string, len(ms)) + for k, s := range ms { + msp[k] = StringPtr(s) + } + return msp +} + +// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. +func Bool(b *bool) bool { + if b != nil { + return *b + } + return false +} + +// BoolPtr returns a pointer to the passed bool. +func BoolPtr(b bool) *bool { + return &b +} + +// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int(i *int) int { + if i != nil { + return *i + } + return 0 +} + +// IntPtr returns a pointer to the passed int. +func IntPtr(i int) *int { + return &i +} + +// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int32(i *int32) int32 { + if i != nil { + return *i + } + return 0 +} + +// Int32Ptr returns a pointer to the passed int32. +func Int32Ptr(i int32) *int32 { + return &i +} + +// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int64(i *int64) int64 { + if i != nil { + return *i + } + return 0 +} + +// Int64Ptr returns a pointer to the passed int64. +func Int64Ptr(i int64) *int64 { + return &i +} + +// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float32(i *float32) float32 { + if i != nil { + return *i + } + return 0.0 +} + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float64(i *float64) float64 { + if i != nil { + return *i + } + return 0.0 +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert_test.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to/convert_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,196 @@ +package to + +import ( + "testing" +) + +func TestString(t *testing.T) { + v := "" + if String(&v) != v { + t.Errorf("to: String failed to return the correct string -- expected %v, received %v", + v, String(&v)) + } +} + +func TestStringHandlesNil(t *testing.T) { + if String(nil) != "" { + t.Errorf("to: String failed to correctly convert nil -- expected %v, received %v", + "", String(nil)) + } +} + +func TestStringPtr(t *testing.T) { + v := "" + if *StringPtr(v) != v { + t.Errorf("to: StringPtr failed to return the correct string -- expected %v, received %v", + v, *StringPtr(v)) + } +} + +func TestStringMap(t *testing.T) { + msp := map[string]*string{"foo": StringPtr("foo"), "bar": StringPtr("bar"), "baz": StringPtr("baz")} + for k, v := range StringMap(msp) { + if *msp[k] != v { + t.Errorf("to: StringMap incorrectly converted an entry -- expected [%s]%v, received[%s]%v", + k, v, k, *msp[k]) + } + } +} + +func TestStringMapHandlesNil(t *testing.T) { + msp := map[string]*string{"foo": StringPtr("foo"), "bar": nil, "baz": StringPtr("baz")} + for k, v := range StringMap(msp) { + if msp[k] == nil && v != "" { + t.Errorf("to: StringMap incorrectly converted a nil entry -- expected [%s]%v, received[%s]%v", + k, v, k, *msp[k]) + } + } +} + +func TestStringMapPtr(t *testing.T) { + ms := map[string]string{"foo": "foo", "bar": "bar", "baz": "baz"} + for k, msp := range StringMapPtr(ms) { + if ms[k] != *msp { + t.Errorf("to: StringMapPtr incorrectly converted an entry -- expected [%s]%v, received[%s]%v", + k, ms[k], k, *msp) + } + } +} + +func TestBool(t *testing.T) { + v := false + if Bool(&v) != v { + t.Errorf("to: Bool failed to return the correct string -- expected %v, received %v", + v, Bool(&v)) + } +} + +func TestBoolHandlesNil(t *testing.T) { + if Bool(nil) != false { + t.Errorf("to: Bool failed to correctly convert nil -- expected %v, received %v", + false, Bool(nil)) + } +} + +func TestBoolPtr(t *testing.T) { + v := false + if *BoolPtr(v) != v { + t.Errorf("to: BoolPtr failed to return the correct string -- expected %v, received %v", + v, *BoolPtr(v)) + } +} + +func TestInt(t *testing.T) { + v := 0 + if Int(&v) != v { + t.Errorf("to: Int failed to return the correct string -- expected %v, received %v", + v, Int(&v)) + } +} + +func TestIntHandlesNil(t *testing.T) { + if Int(nil) != 0 { + t.Errorf("to: Int failed to correctly convert nil -- expected %v, received %v", + 0, Int(nil)) + } +} + +func TestIntPtr(t *testing.T) { + v := 0 + if *IntPtr(v) != v { + t.Errorf("to: IntPtr failed to return the correct string -- expected %v, received %v", + v, *IntPtr(v)) + } +} + +func TestInt32(t *testing.T) { + v := int32(0) + if Int32(&v) != v { + t.Errorf("to: Int32 failed to return the correct string -- expected %v, received %v", + v, Int32(&v)) + } +} + +func TestInt32HandlesNil(t *testing.T) { + if Int32(nil) != int32(0) { + t.Errorf("to: Int32 failed to correctly convert nil -- expected %v, received %v", + 0, Int32(nil)) + } +} + +func TestInt32Ptr(t *testing.T) { + v := int32(0) + if *Int32Ptr(v) != v { + t.Errorf("to: Int32Ptr failed to return the correct string -- expected %v, received %v", + v, *Int32Ptr(v)) + } +} + +func TestInt64(t *testing.T) { + v := int64(0) + if Int64(&v) != v { + t.Errorf("to: Int64 failed to return the correct string -- expected %v, received %v", + v, Int64(&v)) + } +} + +func TestInt64HandlesNil(t *testing.T) { + if Int64(nil) != int64(0) { + t.Errorf("to: Int64 failed to correctly convert nil -- expected %v, received %v", + 0, Int64(nil)) + } +} + +func TestInt64Ptr(t *testing.T) { + v := int64(0) + if *Int64Ptr(v) != v { + t.Errorf("to: Int64Ptr failed to return the correct string -- expected %v, received %v", + v, *Int64Ptr(v)) + } +} + +func TestFloat32(t *testing.T) { + v := float32(0) + if Float32(&v) != v { + t.Errorf("to: Float32 failed to return the correct string -- expected %v, received %v", + v, Float32(&v)) + } +} + +func TestFloat32HandlesNil(t *testing.T) { + if Float32(nil) != float32(0) { + t.Errorf("to: Float32 failed to correctly convert nil -- expected %v, received %v", + 0, Float32(nil)) + } +} + +func TestFloat32Ptr(t *testing.T) { + v := float32(0) + if *Float32Ptr(v) != v { + t.Errorf("to: Float32Ptr failed to return the correct string -- expected %v, received %v", + v, *Float32Ptr(v)) + } +} + +func TestFloat64(t *testing.T) { + v := float64(0) + if Float64(&v) != v { + t.Errorf("to: Float64 failed to return the correct string -- expected %v, received %v", + v, Float64(&v)) + } +} + +func TestFloat64HandlesNil(t *testing.T) { + if Float64(nil) != float64(0) { + t.Errorf("to: Float64 failed to correctly convert nil -- expected %v, received %v", + 0, Float64(nil)) + } +} + +func TestFloat64Ptr(t *testing.T) { + v := float64(0) + if *Float64Ptr(v) != v { + t.Errorf("to: Float64Ptr failed to return the correct string -- expected %v, received %v", + v, *Float64Ptr(v)) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,42 @@ +package autorest + +import ( + "fmt" + "net/url" +) + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + default: + return fmt.Sprintf("%v", v) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility_test.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/utility_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,145 @@ +package autorest + +import ( + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + testAuthorizationHeader = "BEARER SECRETTOKEN" + testBadURL = "" + jsonT = ` + { + "name":"Rob Pike", + "age":42 + }` +) + +func TestContainsIntFindsValue(t *testing.T) { + ints := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + v := 5 + if !containsInt(ints, v) { + t.Errorf("autorest: containsInt failed to find %v in %v", v, ints) + } +} + +func TestContainsIntDoesNotFindValue(t *testing.T) { + ints := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + v := 42 + if containsInt(ints, v) { + t.Errorf("autorest: containsInt unexpectedly found %v in %v", v, ints) + } +} + +func TestEscapeStrings(t *testing.T) { + m := map[string]string{ + "string": "a long string with = odd characters", + "int": "42", + "nil": "", + } + r := map[string]string{ + "string": "a+long+string+with+%3D+odd+characters", + "int": "42", + "nil": "", + } + v := escapeValueStrings(m) + if !reflect.DeepEqual(v, r) { + t.Errorf("autorest: ensureValueStrings returned %v\n", v) + } +} + +func TestEnsureStrings(t *testing.T) { + m := map[string]interface{}{ + "string": "string", + "int": 42, + "nil": nil, + } + r := map[string]string{ + "string": "string", + "int": "42", + "nil": "", + } + v := ensureValueStrings(m) + if !reflect.DeepEqual(v, r) { + t.Errorf("autorest: ensureValueStrings returned %v\n", v) + } +} + +func doEnsureBodyClosed(t *testing.T) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if resp != nil && resp.Body != nil && resp.Body.(*mocks.Body).IsOpen() { + t.Error("autorest: Expected Body to be closed -- it was left open") + } + return resp, err + }) + } +} + +type mockAuthorizer struct{} + +func (ma mockAuthorizer) WithAuthorization() PrepareDecorator { + return WithHeader(headerAuthorization, testAuthorizationHeader) +} + +type mockFailingAuthorizer struct{} + +func (mfa mockFailingAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return r, fmt.Errorf("ERROR: mockFailingAuthorizer returned expected error") + }) + } +} + +func withMessage(output *string, msg string) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil { + *output += msg + } + return resp, err + }) + } +} + +type mockInspector struct { + wasInvoked bool +} + +func (mi *mockInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + mi.wasInvoked = true + return p.Prepare(r) + }) + } +} + +func (mi *mockInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + mi.wasInvoked = true + return r.Respond(resp) + }) + } +} + +func withErrorRespondDecorator(e *error) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil { + return err + } + *e = fmt.Errorf("autorest: Faux Respond Error") + return *e + }) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,18 @@ +package autorest + +import ( + "fmt" +) + +const ( + major = "1" + minor = "0" + patch = "1" + tag = "" + semVerFormat = "%s.%s.%s%s" +) + +// Version returns the semantic version (see http://semver.org). +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version_test.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/version_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,13 @@ +package autorest + +import ( + "testing" +) + +func TestVersion(t *testing.T) { + v := "1.0.1" + if Version() != v { + t.Errorf("autorest: Version failed to return the expected version -- expected %s, received %s", + v, Version()) + } +} === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org' === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x' === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto' === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12' === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/bmp-string.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "errors" + "unicode/utf16" +) + +// bmpString returns s encoded in UCS-2 with a zero terminator. +func bmpString(s string) ([]byte, error) { + // References: + // https://tools.ietf.org/html/rfc7292#appendix-B.1 + // http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane + // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes + // EncodeRune returns 0xfffd if the rune does not need special encoding + // - the above RFC provides the info that BMPStrings are NULL terminated. + + ret := make([]byte, 0, 2*len(s)+2) + + for _, r := range s { + if t, _ := utf16.EncodeRune(r); t != 0xfffd { + return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2") + } + ret = append(ret, byte(r/256), byte(r%256)) + } + + return append(ret, 0, 0), nil +} + +func decodeBMPString(bmpString []byte) (string, error) { + if len(bmpString)%2 != 0 { + return "", errors.New("pkcs12: odd-length BMP string") + } + + // strip terminator if present + if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { + bmpString = bmpString[:l-2] + } + + s := make([]uint16, 0, len(bmpString)/2) + for len(bmpString) > 0 { + s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) + bmpString = bmpString[2:] + } + + return string(utf16.Decode(s)), nil +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/crypto.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,131 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/cipher" + "crypto/des" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2" +) + +var ( + oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) + oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6}) +) + +// pbeCipher is an abstraction of a PKCS#12 cipher. +type pbeCipher interface { + // create returns a cipher.Block given a key. + create(key []byte) (cipher.Block, error) + // deriveKey returns a key derived from the given password and salt. + deriveKey(salt, password []byte, iterations int) []byte + // deriveKey returns an IV derived from the given password and salt. + deriveIV(salt, password []byte, iterations int) []byte +} + +type shaWithTripleDESCBC struct{} + +func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) { + return des.NewTripleDESCipher(key) +} + +func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24) +} + +func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type shaWith40BitRC2CBC struct{} + +func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) { + return rc2.New(key, len(key)*8) +} + +func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5) +} + +func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type pbeParams struct { + Salt []byte + Iterations int +} + +func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) { + var cipherType pbeCipher + + switch { + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC): + cipherType = shaWithTripleDESCBC{} + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC): + cipherType = shaWith40BitRC2CBC{} + default: + return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported") + } + + var params pbeParams + if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil { + return nil, 0, err + } + + key := cipherType.deriveKey(params.Salt, password, params.Iterations) + iv := cipherType.deriveIV(params.Salt, password, params.Iterations) + + block, err := cipherType.create(key) + if err != nil { + return nil, 0, err + } + + return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil +} + +func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) { + cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password) + if err != nil { + return nil, err + } + + encrypted := info.Data() + if len(encrypted) == 0 { + return nil, errors.New("pkcs12: empty encrypted data") + } + if len(encrypted)%blockSize != 0 { + return nil, errors.New("pkcs12: input is not a multiple of the block size") + } + decrypted = make([]byte, len(encrypted)) + cbc.CryptBlocks(decrypted, encrypted) + + psLen := int(decrypted[len(decrypted)-1]) + if psLen == 0 || psLen > blockSize { + return nil, ErrDecryption + } + + if len(decrypted) < psLen { + return nil, ErrDecryption + } + ps := decrypted[len(decrypted)-psLen:] + decrypted = decrypted[:len(decrypted)-psLen] + if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 { + return nil, ErrDecryption + } + + return +} + +// decryptable abstracts a object that contains ciphertext. +type decryptable interface { + Algorithm() pkix.AlgorithmIdentifier + Data() []byte +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/errors.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/errors.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/errors.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import "errors" + +var ( + // ErrDecryption represents a failure to decrypt the input. + ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding") + + // ErrIncorrectPassword is returned when an incorrect password is detected. + // Usually, P12/PFX data is signed to be able to verify the password. + ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect") +) + +// NotImplementedError indicates that the input is not currently supported. +type NotImplementedError string + +func (e NotImplementedError) Error() string { + return "pkcs12: " + string(e) +} === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal' === added directory 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2' === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,274 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rc2 implements the RC2 cipher +/* +https://www.ietf.org/rfc/rfc2268.txt +http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf + +This code is licensed under the MIT license. +*/ +package rc2 + +import ( + "crypto/cipher" + "encoding/binary" +) + +// The rc2 block size in bytes +const BlockSize = 8 + +type rc2Cipher struct { + k [64]uint16 +} + +// New returns a new rc2 cipher with the given key and effective key length t1 +func New(key []byte, t1 int) (cipher.Block, error) { + // TODO(dgryski): error checking for key length + return &rc2Cipher{ + k: expandKey(key, t1), + }, nil +} + +func (*rc2Cipher) BlockSize() int { return BlockSize } + +var piTable = [256]byte{ + 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d, + 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2, + 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32, + 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82, + 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc, + 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26, + 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03, + 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7, + 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a, + 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec, + 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39, + 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31, + 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9, + 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9, + 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e, + 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad, +} + +func expandKey(key []byte, t1 int) [64]uint16 { + + l := make([]byte, 128) + copy(l, key) + + var t = len(key) + var t8 = (t1 + 7) / 8 + var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8)))) + + for i := len(key); i < 128; i++ { + l[i] = piTable[l[i-1]+l[uint8(i-t)]] + } + + l[128-t8] = piTable[l[128-t8]&tm] + + for i := 127 - t8; i >= 0; i-- { + l[i] = piTable[l[i+1]^l[i+t8]] + } + + var k [64]uint16 + + for i := range k { + k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256 + } + + return k +} + +func rotl16(x uint16, b uint) uint16 { + return (x >> (16 - b)) | (x << b) +} + +func (c *rc2Cipher) Encrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + var j int + + for j <= 16 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 40 { + + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 60 { + + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} + +func (c *rc2Cipher) Decrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + j := 63 + + for j >= 44 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 20 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 0 { + + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/mac.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,45 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/x509/pkix" + "encoding/asn1" +) + +type macData struct { + Mac digestInfo + MacSalt []byte + Iterations int `asn1:"optional,default:1"` +} + +// from PKCS#7: +type digestInfo struct { + Algorithm pkix.AlgorithmIdentifier + Digest []byte +} + +var ( + oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) +) + +func verifyMac(macData *macData, message, password []byte) error { + if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) { + return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String()) + } + + key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20) + + mac := hmac.New(sha1.New, key) + mac.Write(message) + expectedMAC := mac.Sum(nil) + + if !hmac.Equal(macData.Mac.Digest, expectedMAC) { + return ErrIncorrectPassword + } + return nil +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pbkdf.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,155 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/sha1" + "math/big" +) + +var ( + one = big.NewInt(1) +) + +// sha1Sum returns the SHA-1 hash of in. +func sha1Sum(in []byte) []byte { + sum := sha1.Sum(in) + return sum[:] +} + +// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of +// repeats of pattern. +func fillWithRepeats(pattern []byte, v int) []byte { + if len(pattern) == 0 { + return nil + } + outputLen := v * ((len(pattern) + v - 1) / v) + return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen] +} + +func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) { + // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments + + // Let H be a hash function built around a compression function f: + + // Z_2^u x Z_2^v -> Z_2^u + + // (that is, H has a chaining variable and output of length u bits, and + // the message input to the compression function of H is v bits). The + // values for u and v are as follows: + + // HASH FUNCTION VALUE u VALUE v + // MD2, MD5 128 512 + // SHA-1 160 512 + // SHA-224 224 512 + // SHA-256 256 512 + // SHA-384 384 1024 + // SHA-512 512 1024 + // SHA-512/224 224 1024 + // SHA-512/256 256 1024 + + // Furthermore, let r be the iteration count. + + // We assume here that u and v are both multiples of 8, as are the + // lengths of the password and salt strings (which we denote by p and s, + // respectively) and the number n of pseudorandom bits required. In + // addition, u and v are of course non-zero. + + // For information on security considerations for MD5 [19], see [25] and + // [1], and on those for MD2, see [18]. + + // The following procedure can be used to produce pseudorandom bits for + // a particular "purpose" that is identified by a byte called "ID". + // This standard specifies 3 different values for the ID byte: + + // 1. If ID=1, then the pseudorandom bits being produced are to be used + // as key material for performing encryption or decryption. + + // 2. If ID=2, then the pseudorandom bits being produced are to be used + // as an IV (Initial Value) for encryption or decryption. + + // 3. If ID=3, then the pseudorandom bits being produced are to be used + // as an integrity key for MACing. + + // 1. Construct a string, D (the "diversifier"), by concatenating v/8 + // copies of ID. + var D []byte + for i := 0; i < v; i++ { + D = append(D, ID) + } + + // 2. Concatenate copies of the salt together to create a string S of + // length v(ceiling(s/v)) bits (the final copy of the salt may be + // truncated to create S). Note that if the salt is the empty + // string, then so is S. + + S := fillWithRepeats(salt, v) + + // 3. Concatenate copies of the password together to create a string P + // of length v(ceiling(p/v)) bits (the final copy of the password + // may be truncated to create P). Note that if the password is the + // empty string, then so is P. + + P := fillWithRepeats(password, v) + + // 4. Set I=S||P to be the concatenation of S and P. + I := append(S, P...) + + // 5. Set c=ceiling(n/u). + c := (size + u - 1) / u + + // 6. For i=1, 2, ..., c, do the following: + A := make([]byte, c*20) + for i := 0; i < c; i++ { + // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1, + // H(H(H(... H(D||I)))) + Ai := hash(append(D, I...)) + for j := 1; j < r; j++ { + Ai = hash(Ai) + } + copy(A[i*20:], Ai[:]) + + if i < c-1 { // skip on last iteration + // B. Concatenate copies of Ai to create a string B of length v + // bits (the final copy of Ai may be truncated to create B). + var B []byte + for len(B) < v { + B = append(B, Ai[:]...) + } + B = B[:v] + + // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit + // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by + // setting I_j=(I_j+B+1) mod 2^v for each j. + { + Bbi := new(big.Int).SetBytes(B) + Ij := new(big.Int) + + for j := 0; j < len(I)/v; j++ { + Ij.SetBytes(I[j*v : (j+1)*v]) + Ij.Add(Ij, Bbi) + Ij.Add(Ij, one) + Ijb := Ij.Bytes() + if len(Ijb) > v { + Ijb = Ijb[len(Ijb)-v:] + } + copy(I[j*v:(j+1)*v], Ijb) + } + } + } + } + // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom + // bit string, A. + + // 8. Use the first n bits of A as the output of this entire process. + return A[:size] + + // If the above process is being used to generate a DES key, the process + // should be used to create 64 random bits, and the key's parity bits + // should be set after the 64 bits have been produced. Similar concerns + // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any + // similar keys with parity bits "built into them". +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/pkcs12.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,342 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkcs12 implements some of PKCS#12. +// +// This implementation is distilled from https://tools.ietf.org/html/rfc7292 +// and referenced documents. It is intended for decoding P12/PFX-stored +// certificates and keys for use with the crypto/tls package. +package pkcs12 + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" +) + +var ( + oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1}) + oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6}) + + oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20}) + oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21}) + oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1}) +) + +type pfxPdu struct { + Version int + AuthSafe contentInfo + MacData macData `asn1:"optional"` +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"tag:0,explicit,optional"` +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent []byte `asn1:"tag:0,optional"` +} + +func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.ContentEncryptionAlgorithm +} + +func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent } + +type safeBag struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"tag:0,explicit"` + Attributes []pkcs12Attribute `asn1:"set,optional"` +} + +type pkcs12Attribute struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `ans1:"set"` +} + +type encryptedPrivateKeyInfo struct { + AlgorithmIdentifier pkix.AlgorithmIdentifier + EncryptedData []byte +} + +func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.AlgorithmIdentifier +} + +func (i encryptedPrivateKeyInfo) Data() []byte { + return i.EncryptedData +} + +// PEM block types +const ( + certificateType = "CERTIFICATE" + privateKeyType = "PRIVATE KEY" +) + +// unmarshal calls asn1.Unmarshal, but also returns an error if there is any +// trailing data after unmarshaling. +func unmarshal(in []byte, out interface{}) error { + trailing, err := asn1.Unmarshal(in, out) + if err != nil { + return err + } + if len(trailing) != 0 { + return errors.New("pkcs12: trailing data found") + } + return nil +} + +// ConvertToPEM converts all "safe bags" contained in pfxData to PEM blocks. +func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, ErrIncorrectPassword + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + + blocks := make([]*pem.Block, 0, len(bags)) + for _, bag := range bags { + block, err := convertBag(&bag, encodedPassword) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +func convertBag(bag *safeBag, password []byte) (*pem.Block, error) { + block := &pem.Block{ + Headers: make(map[string]string), + } + + for _, attribute := range bag.Attributes { + k, v, err := convertAttribute(&attribute) + if err != nil { + return nil, err + } + block.Headers[k] = v + } + + switch { + case bag.Id.Equal(oidCertBag): + block.Type = certificateType + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, err + } + block.Bytes = certsData + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + block.Type = privateKeyType + + key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password) + if err != nil { + return nil, err + } + + switch key := key.(type) { + case *rsa.PrivateKey: + block.Bytes = x509.MarshalPKCS1PrivateKey(key) + case *ecdsa.PrivateKey: + block.Bytes, err = x509.MarshalECPrivateKey(key) + if err != nil { + return nil, err + } + default: + return nil, errors.New("found unknown private key type in PKCS#8 wrapping") + } + default: + return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String()) + } + return block, nil +} + +func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) { + isString := false + + switch { + case attribute.Id.Equal(oidFriendlyName): + key = "friendlyName" + isString = true + case attribute.Id.Equal(oidLocalKeyID): + key = "localKeyId" + case attribute.Id.Equal(oidMicrosoftCSPName): + // This key is chosen to match OpenSSL. + key = "Microsoft CSP Name" + isString = true + default: + return "", "", errors.New("pkcs12: unknown attribute with OID " + attribute.Id.String()) + } + + if isString { + if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil { + return "", "", err + } + if value, err = decodeBMPString(attribute.Value.Bytes); err != nil { + return "", "", err + } + } else { + var id []byte + if err := unmarshal(attribute.Value.Bytes, &id); err != nil { + return "", "", err + } + value = hex.EncodeToString(id) + } + + return key, value, nil +} + +// Decode extracts a certificate and private key from pfxData. This function +// assumes that there is only one certificate and only one private key in the +// pfxData. +func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, nil, err + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + if err != nil { + return nil, nil, err + } + + if len(bags) != 2 { + err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU") + return + } + + for _, bag := range bags { + switch { + case bag.Id.Equal(oidCertBag): + if certificate != nil { + err = errors.New("pkcs12: expected exactly one certificate bag") + } + + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, nil, err + } + certs, err := x509.ParseCertificates(certsData) + if err != nil { + return nil, nil, err + } + if len(certs) != 1 { + err = errors.New("pkcs12: expected exactly one certificate in the certBag") + return nil, nil, err + } + certificate = certs[0] + + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + if privateKey != nil { + err = errors.New("pkcs12: expected exactly one key bag") + } + + if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil { + return nil, nil, err + } + } + } + + if certificate == nil { + return nil, nil, errors.New("pkcs12: certificate missing") + } + if privateKey == nil { + return nil, nil, errors.New("pkcs12: private key missing") + } + + return +} + +func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) { + pfx := new(pfxPdu) + if err := unmarshal(p12Data, pfx); err != nil { + return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error()) + } + + if pfx.Version != 3 { + return nil, nil, NotImplementedError("can only decode v3 PFX PDU's") + } + + if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) { + return nil, nil, NotImplementedError("only password-protected PFX is implemented") + } + + // unmarshal the explicit bytes in the content for type 'data' + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil { + return nil, nil, err + } + + if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 { + return nil, nil, errors.New("pkcs12: no MAC in data") + } + + if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil { + if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 { + // some implementations use an empty byte array + // for the empty string password try one more + // time with empty-empty password + password = nil + err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password) + } + if err != nil { + return nil, nil, err + } + } + + var authenticatedSafe []contentInfo + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil { + return nil, nil, err + } + + if len(authenticatedSafe) != 2 { + return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe") + } + + for _, ci := range authenticatedSafe { + var data []byte + + switch { + case ci.ContentType.Equal(oidDataContentType): + if err := unmarshal(ci.Content.Bytes, &data); err != nil { + return nil, nil, err + } + case ci.ContentType.Equal(oidEncryptedDataContentType): + var encryptedData encryptedData + if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil { + return nil, nil, err + } + if encryptedData.Version != 0 { + return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported") + } + if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil { + return nil, nil, err + } + default: + return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe") + } + + var safeContents []safeBag + if err := unmarshal(data, &safeContents); err != nil { + return nil, nil, err + } + bags = append(bags, safeContents...) + } + + return bags, password, nil +} === added file 'src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/safebags.go' --- src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/safebags.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12/safebags.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/x509" + "encoding/asn1" + "errors" +) + +var ( + // see https://tools.ietf.org/html/rfc7292#appendix-D + oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1}) + oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2}) + oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3}) +) + +type certBag struct { + Id asn1.ObjectIdentifier + Data []byte `asn1:"tag:0,explicit"` +} + +func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) { + pkinfo := new(encryptedPrivateKeyInfo) + if err = unmarshal(asn1Data, pkinfo); err != nil { + return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error()) + } + + pkData, err := pbDecrypt(pkinfo, password) + if err != nil { + return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error()) + } + + ret := new(asn1.RawValue) + if err = unmarshal(pkData, ret); err != nil { + return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error()) + } + + if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil { + return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error()) + } + + return privateKey, nil +} + +func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) { + bag := new(certBag) + if err := unmarshal(asn1Data, bag); err != nil { + return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error()) + } + if !bag.Id.Equal(oidCertTypeX509Certificate) { + return nil, NotImplementedError("only X509 certificates are supported") + } + return bag.Data, nil +} === added file 'src/github.com/Azure/azure-sdk-for-go/LICENSE' --- src/github.com/Azure/azure-sdk-for-go/LICENSE 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. === added file 'src/github.com/Azure/azure-sdk-for-go/README.md' --- src/github.com/Azure/azure-sdk-for-go/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,88 @@ +# Microsoft Azure SDK for Go + +This project provides various Go packages to perform operations +on Microsoft Azure REST APIs. + +[![GoDoc](https://godoc.org/github.com/Azure/azure-sdk-for-go?status.svg)](https://godoc.org/github.com/Azure/azure-sdk-for-go) [![Build Status](https://travis-ci.org/Azure/azure-sdk-for-go.svg?branch=master)](https://travis-ci.org/Azure/azure-sdk-for-go) + +See list of implemented API clients [here](http://godoc.org/github.com/Azure/azure-sdk-for-go). + +> **NOTE:** This repository is under heavy ongoing development and +is likely to break over time. We currently do not have any releases +yet. If you are planning to use the repository, please consider vendoring +the packages in your project and update them when a stable tag is out. + +# Installation + + go get -d github.com/Azure/azure-sdk-for-go/management + +# Usage + +Read Godoc of the repository at: http://godoc.org/github.com/Azure/azure-sdk-for-go/ + +The client currently supports authentication to the Service Management +API with certificates or Azure `.publishSettings` file. You can +download the `.publishSettings` file for your subscriptions +[here](https://manage.windowsazure.com/publishsettings). + +### Example: Creating a Linux Virtual Machine + +```go +package main + +import ( + "encoding/base64" + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" + "github.com/Azure/azure-sdk-for-go/management/hostedservice" + "github.com/Azure/azure-sdk-for-go/management/virtualmachine" + "github.com/Azure/azure-sdk-for-go/management/vmutils" +) + +func main() { + dnsName := "test-vm-from-go" + storageAccount := "mystorageaccount" + location := "West US" + vmSize := "Small" + vmImage := "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB" + userName := "testuser" + userPassword := "Test123" + + client, err := management.ClientFromPublishSettingsFile("path/to/downloaded.publishsettings", "") + if err != nil { + panic(err) + } + + // create hosted service + if err := hostedservice.NewClient(client).CreateHostedService(hostedservice.CreateHostedServiceParameters{ + ServiceName: dnsName, + Location: location, + Label: base64.StdEncoding.EncodeToString([]byte(dnsName))}); err != nil { + panic(err) + } + + // create virtual machine + role := vmutils.NewVMConfiguration(dnsName, vmSize) + vmutils.ConfigureDeploymentFromPlatformImage( + &role, + vmImage, + fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", storageAccount, dnsName), + "") + vmutils.ConfigureForLinux(&role, dnsName, userName, userPassword) + vmutils.ConfigureWithPublicSSH(&role) + + operationID, err := virtualmachine.NewClient(client). + CreateDeployment(role, dnsName, virtualmachine.CreateDeploymentOptions{}) + if err != nil { + panic(err) + } + if err := client.WaitForOperation(operationID, nil); err != nil { + panic(err) + } +} +``` + +# License + +This project is published under [Apache 2.0 License](LICENSE). === added directory 'src/github.com/Azure/azure-sdk-for-go/arm' === added file 'src/github.com/Azure/azure-sdk-for-go/arm/CHANGELOG.md' --- src/github.com/Azure/azure-sdk-for-go/arm/CHANGELOG.md 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/CHANGELOG.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,48 @@ +# CHANGELOG + +## v0.1.1-beta ------------------------------------------------------------------------------------- + +- Improves the UserAgent string to disambiguate arm packages from others in the SDK +- Improves setting the http.Response into generated results (reduces likelihood of a nil reference) +- Adds gofmt, golint, and govet to Travis CI for the arm packages + +### Fixed Issues + +- https://github.com/Azure/azure-sdk-for-go/issues/196 +- https://github.com/Azure/azure-sdk-for-go/issues/213 + +## v0.1.0-beta ------------------------------------------------------------------------------------- + +This release addresses the issues raised against the alpha release and adds more features. Most +notably, to address the challenges of encoding JSON +(see the [comments](https://github.com/Azure/go-autorest#handling-empty-values) in the +[go-autorest](https://github.com/Azure/go-autorest) package) by using pointers for *all* structure +fields (with the exception of enumerations). The +[go-autorest/autorest/to](https://github.com/Azure/go-autorest/tree/master/autorest/to) package +provides helpers to convert to / from pointers. The examples demonstrate their usage. + +Additionally, the packages now align with Go coding standards and pass both `golint` and `govet`. +Accomplishing this required renaming various fields and parameters (such as changing Url to URL). + +### Changes + +- Changed request / response structures to use pointer fields. +- Changed methods to return `error` instead of `autorest.Error`. +- Re-divided methods to ease asynchronous requests. +- Added paged results support. +- Added a UserAgent string. +- Added changes necessary to pass golint and govet. +- Updated README.md with details on asynchronous requests and paging. +- Saved package dependencies through Godep (for the entire SDK). + +### Fixed Issues: + +- https://github.com/Azure/azure-sdk-for-go/issues/205 +- https://github.com/Azure/azure-sdk-for-go/issues/206 +- https://github.com/Azure/azure-sdk-for-go/issues/211 +- https://github.com/Azure/azure-sdk-for-go/issues/212 + +## v0.1.0-alpha ------------------------------------------------------------------------------------ + +This release introduces the Azure Resource Manager packages generated from the corresponding +[Swagger API](http://swagger.io) [definitions](https://github.com/Azure/azure-rest-api-specs). === added file 'src/github.com/Azure/azure-sdk-for-go/arm/README.md' --- src/github.com/Azure/azure-sdk-for-go/arm/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,355 @@ +# Introducing the Azure Resource Manager packages for Go + +## How Did We Get Here? + +Azure is growing rapidly, regularly adding new services and features. While rapid growth +is good for users, it is hard on SDKs. Each new service and each new feature requires someone to +learn the details and add the needed code to the SDK. As a result, the +[Azure SDK for Go](https://github.com/Azure/azure-sdk-for-go) +has lagged behind Azure. It is missing +entire services and has not kept current with features. There is simply too much change to maintain +a hand-written SDK. + +For this reason, the +[Azure SDK for Go](https://github.com/Azure/azure-sdk-for-go), +with the release of the Azure Resource Manager (ARM) +packages, is transitioning to a generated-code model. Other Azure SDKs, notably the +[Azure SDK for .NET](https://github.com/Azure/azure-sdk-for-net), have successfully adopted a +generated-code strategy. Recently, Microsoft published the +[Autorest](https://github.com/Azure/autorest) tool used to create these SDKs. While the code is not +yet public (mostly because work remains), we have been adding support for Go. The ARM packages are +the first set generated using this new toolchain. + +There are a couple of items to note. First, since both the tooling and the underlying support +packages are new, the code is not yet "production ready." Treat these packages as of +***beta*** quality. +That's not to say we don't believe in the code, but we want to see what others think and how well +they work in a variety of environments before settling down into an official, first release. If you +find problems or have suggestions, please submit a pull request to document what you find. However, +since the code is generated, we'll use your pull request to guide changes we make to the underlying +generator versus merging the pull request itself. + +The second item of note is that, to keep the generated code clean and reliable, it depends on +another new package [go-autorest](https://github.com/Azure/go-autorest). +Though part of the SDK, we separated the code to better control versioning and maintain agility. +Since +[go-autorest](https://github.com/Azure/go-autorest) +is hand-crafted, we will take pull requests in the same manner as for our other repositories. + +We intend to rapidly improve these packages until they are "production ready." +So, try them out and give us your thoughts. + +## What Have We Done? + +Creating new frameworks is hard and often leads to "cliffs": The code is easy to use until some +special case or tweak arises and then, well, then you're stuck. Often times small differences in +requirements can lead to forking the code and investing a lot of time. Cliffs occur even more +frequently in generated code. We wanted to avoid them and believe the new model does. Our initial +goals were: + +* Easy-to-use out of the box. It should be "clone and go" for straight-forward use. +* Easy composition to handle the majority of complex cases. +* Easy to integrate with existing frameworks, fit nicely with channels, supporting fan-out / +fan-in set ups. + +These are best shown in a series of examples, all of which are included in the +[arm/examples](https://github.com/Azure/azure-sdk-for-go/blob/master/arm/examples/) +sub-folder. + +## First a Sidenote: Authentication and the Azure Resource Manager + +Before using the Azure Resource Manager packages, you need to understand how it authenticates and +authorizes requests. +Unlike the earlier Azure service APIs, the Azure Resource Manager does *not* use certificates. +Instead, it relies on [OAuth2](http://oauth.net). While OAuth2 provides many advantages over +certificates, programmatic use, such as for scripts on headless servers, requires understanding and +creating one or more *Service Principals.* +There are several good blog posts, such as +[Automating Azure on your CI server using a Service Principal](http://blog.davidebbo.com/2014/12/azure-service-principal.html) +and +[Microsoft Azure REST API + OAuth 2.0](https://ahmetalpbalkan.com/blog/azure-rest-api-with-oauth2/), +that describe what this means. +For details on creating and authorizing Service Principals, see the MSDN articles +[Azure API Management REST API Authentication](https://msdn.microsoft.com/en-us/library/azure/5b13010a-d202-4af5-aabf-7ebc26800b3d) +and +[Create a new Azure Service Principal using the Azure portal](https://azure.microsoft.com/en-us/documentation/articles/resource-group-create-service-principal-portal/). +Dushyant Gill, a Senior Program Manager for Azure Active Directory, has written an extensive blog +post, +[Developer's Guide to Auth with Azure Resource Manager API](http://www.dushyantgill.com/blog/2015/05/23/developers-guide-to-auth-with-azure-resource-manager-api/), +that is also quite helpful. + +## A Simple Example: Checking availability of name within Azure Storage + +Each ARM provider, such as +[Azure Storage](http://azure.microsoft.com/en-us/documentation/services/storage/) +or +[Azure Compute](https://azure.microsoft.com/en-us/documentation/services/virtual-machines/), +has its own package. Start by importing +the packages for the providers you need. Next, most packages divide their APIs across multiple +clients to avoid name collision and improve usability. For example, the +[Azure Storage](http://azure.microsoft.com/en-us/documentation/services/storage/) +package has +two clients: +[storage.StorageAccountsClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/arm/storage#StorageAccountsClient) +and +[storage.UsageOperationsClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/arm/storage#UsageOperationsClient). +To check if a name is available, use the +[storage.StorageAccountsClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/arm/storage#StorageAccountsClient): + +```go +package main + +import( + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/arm/examples/helpers" + "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" +) + +func checkName(name string) { + c, err := helpers.LoadCredentials() + if err != nil { + log.Fatalf("Error: %v", err) + } + + ac := storage.NewAccountsClient(c["subscriptionID"]) + + spt, err := helpers.NewServicePrincipalTokenFromCredentials(c, azure.AzureResourceManagerScope) + if err != nil { + log.Fatalf("Error: %v", err) + } + ac.Authorizer = spt + + ac.Sender = autorest.CreateSender( + autorest.WithLogging(log.New(os.Stdout, "sdk-example: ", log.LstdFlags))) + + cna, err := ac.CheckNameAvailability( + storage.AccountCheckNameAvailabilityParameters{ + Name: to.StringPtr(name), + Type: to.StringPtr("Microsoft.Storage/storageAccounts")}) + + if err != nil { + log.Fatalf("Error: %v", err) + } else { + if to.Bool(cna.NameAvailable) { + fmt.Printf("The name '%s' is available\n", name) + } else { + fmt.Printf("The name '%s' is unavailable because %s\n", name, cna.Message) + } + } +} +``` + +Each ARM client composes with [autorest.Client](https://godoc.org/github.com/Azure/go-autorest/autorest#Client). +[autorest.Client](https://godoc.org/github.com/Azure/go-autorest/autorest#Client) +enables altering the behavior of the API calls by leveraging the decorator pattern of +[go-autorest](https://github.com/Azure/go-autorest). For example, in the code above, the +[azure.ServicePrincipalToken](https://godoc.org/github.com/Azure/go-autorest/autorest/azure#ServicePrincipalToken) +includes a +[WithAuthorization](https://godoc.org/github.com/Azure/go-autorest/autorest#Client.WithAuthorization) +[autorest.PrepareDecorator](https://godoc.org/github.com/Azure/go-autorest/autorest#PrepareDecorator) +that applies the OAuth2 authorization token to the request. It will, as needed, refresh the token +using the supplied credentials. + +Providing a decorated +[autorest.Sender](https://godoc.org/github.com/Azure/go-autorest/autorest#Sender) or populating +the [autorest.Client](https://godoc.org/github.com/Azure/go-autorest/autorest#Client) +with a custom +[autorest.PrepareDecorator](https://godoc.org/github.com/Azure/go-autorest/autorest#PrepareDecorator) +or +[autorest.RespondDecorator](https://godoc.org/github.com/Azure/go-autorest/autorest#RespondDecorator) +enables more control. See the included example file +[check.go](https://github.com/Azure/azure-sdk-for-go/blob/master/arm/examples/check.go) +for more details. Through these you can modify the outgoing request, inspect the incoming response, +or even go so far as to provide a +[circuit breaker](https://msdn.microsoft.com/en-us/library/dn589784.aspx) +to protect your service from unexpected latencies. + +Lastly, all Azure ARM API calls return an instance of the +[autorest.Error](https://godoc.org/github.com/Azure/go-autorest/autorest#Error) interface. +Not only does the interface give anonymous access to the original +[error](http://golang.org/ref/spec#Errors), +but provides the package type (e.g., +[storage.StorageAccountsClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/arm/storage#StorageAccountsClient)), +the failing method (e.g., +[CheckNameAvailability](https://godoc.org/github.com/Azure/azure-sdk-for-go/arm/storage#StorageAccountsClient.CheckNameAvailability)), +and a detailed error message. + +## Something a Bit More Complex: Creating a new Azure Storage account + +Redundancy, both local and across regions, and service load affect service responsiveness. Some +API calls will return before having completed the request. An Azure ARM API call indicates the +request is incomplete (versus the request failed for some reason) by returning HTTP status code +'202 Accepted.' The +[autorest.Client](https://godoc.org/github.com/Azure/go-autorest/autorest#Client) +composed into +all of the Azure ARM clients, provides support for basic request polling. The default is to +poll until a specified duration has passed (with polling frequency determined by the +HTTP [Retry-After](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.37) +header in the response). By changing the +[autorest.Client](https://godoc.org/github.com/Azure/go-autorest/autorest#Client) +settings, you can poll for a fixed number of attempts or elect to not poll at all. + +Whether you elect to poll or not, all Azure ARM client responses compose with an instance of +[autorest.Response](https://godoc.org/github.com/Azure/go-autorest/autorest#Response). +At present, +[autorest.Response](https://godoc.org/github.com/Azure/go-autorest/autorest#Response) +only composes over the standard +[http.Response](https://golang.org/pkg/net/http/#Response) +object (that may change as we implement more features). When your code receives an error from an +Azure ARM API call, you may find it useful to inspect the HTTP status code contained in the returned +[autorest.Response](https://godoc.org/github.com/Azure/go-autorest/autorest#Response). +If, for example, it is an HTTP 202, then you can use the +[GetPollingLocation](https://godoc.org/github.com/Azure/go-autorest/autorest#Response.GetPollingLocation) +response method to extract the URL at which to continue polling. Similarly, the +[GetPollingDelay](https://godoc.org/github.com/Azure/go-autorest/autorest#Response.GetPollingDelay) +response method returns, as a +[time.Duration](http://golang.org/pkg/time/#Duration), +the service suggested minimum polling delay. + +Creating a new Azure storage account is a straight-forward way to see these concepts. + +```go + +package main + +import( + "fmt" + + "github.com/Azure/azure-sdk-for-go/arm/examples/helpers" + "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" +) + +func create_account(resourceGroup, name string) { + c, err := helpers.LoadCredentials() + if err != nil { + log.Fatalf("Error: %v", err) + } + + ac := storage.NewAccountsClient(c["subscriptionID"]) + + spt, err := helpers.NewServicePrincipalTokenFromCredentials(c, azure.AzureResourceManagerScope) + if err != nil { + log.Fatalf("Error: %v", err) + } + ac.Authorizer = spt + ac.PollingMode = autorest.PollUntilAttempts + ac.PollingAttempts = 5 + + cp := storage.AccountCreateParameters{} + cp.Location = to.StringPtr("westus") + cp.Properties = &storage.AccountPropertiesCreateParameters{AccountType: storage.StandardLRS} + + sa, err := ac.Create(resourceGroup, name, cp) + if err != nil { + if sa.Response.StatusCode != http.StatusAccepted { + fmt.Printf("Creation of %s.%s failed with err -- %v\n", resourceGroup, name, err) + return + } else { + fmt.Printf("Create initiated for %s.%s -- poll %s to check status\n", + resourceGroup, + name, + sa.GetPollingLocation()) + return + } + } + + fmt.Printf("Successfully created %s.%s\n\n", resourceGroup, name) +} +``` + +The above example modifies the +[autorest.Client](https://godoc.org/github.com/Azure/go-autorest/autorest#Client) +portion of the +[storage.StorageAccountsClient](https://godoc.org/github.com/Azure/azure-sdk-for-go/arm/storage#StorageAccountsClient) +to poll for a fixed number of attempts versus polling for a set duration (which is the default). +If an error occurs creating the storage account, the code inspects the HTTP status code and +prints the URL the +[Azure Storage](http://azure.microsoft.com/en-us/documentation/services/storage/) +service returned for polling. +More details, including deleting the created account, are in the example code file +[create.go](https://github.com/Azure/azure-sdk-for-go/blob/master/arm/examples/create.go). + +## Making Asynchronous Requests + +One of Go's many strong points is how natural it makes sending and managing asynchronous requests +by means of goroutines. We wanted the ARM packages to fit naturally in the variety of asynchronous +patterns used in Go code, but also be straight-forward for simple use cases. We accomplished both +by adopting a pattern for all APIs. Each package API includes (at least) four methods +(more if the API returns a paged result set). For example, for an API call named `Foo` the package +defines: + +- `FooPreparer`: This method accepts the arguments for the API and returns a prepared +`http.Request`. +- `FooSender`: This method sends the prepared `http.Request`. It handles the possible status codes +and will, unless the disabled in the [autorest.Client](https://godoc.org/github.com/Azure/go- +autorest/autorest#Client), handling polling. +- `FooResponder`: This method accepts and handles the `http.Response` returned by the sender +and unmarshals the JSON, if any, into the result. +- `Foo`: This method accepts the arguments for the API and returns the result. It is a wrapper +around the `FooPreparer`, `FooSender`, and `FooResponder`. + +By using the preparer, sender, and responder methods, package users can spread request and +response handling across goroutines as needed. Further, adding a cancel channel to the +`http.Response` (most easily through a +[PrepareDecorator](https://godoc.org/github.com/Azure/go-autorest/autorest#PrepareDecorator)), +enables canceling sent requests (see the documentation on +[http.Request](https://golang.org/pkg/net/http/#Request)) for details. + +## Paged Result Sets + +Some API calls return partial results. Typically, when they do, the result structure will include +a `Value` array and a `NextLink` URL. The `NextLink` URL is used to retrieve the next page or +block of results. + +The packages add two methods to make working with and retrieving paged results natural. First, +on paged result structures, the packages include a preparer method that returns an `http.Request` +for the next set of results. For a result set returned in a structure named `FooResults`, the +package will include a method named `FooResultsPreparer`. If the `NextLink` is `nil` or empty, the +method returns `nil`. + +The corresponding API (which typically includes "List" in the name) has a method to ease retrieving +the next result set given a result set. For example, for an API named `FooList`, the package will +include `FooListNextResults` that accepts the results of the last call and returns the next set. + +## Summing Up + +The new Azure Resource Manager packages for the Azure SDK for Go are a big step toward keeping the +SDK current with Azure's rapid growth. +As mentioned, we intend to rapidly stabilize these packages for production use. +We'll also add more examples, including some highlighting the +[Azure Resource Manager Templates](https://msdn.microsoft.com/en-us/library/azure/dn790568.aspx) +and the other providers. + +So, give the packages a try, explore the various ARM providers, and let us know what you think. + +We look forward to hearing from you! + + +## Installing the Azure Resource Manager Packages + +Install the packages you require as you would any other Go package: + +```bash +go get github.com/azure/azure-sdk-for-go/arm/authorization +go get github.com/azure/azure-sdk-for-go/arm/compute +go get github.com/azure/azure-sdk-for-go/arm/features +go get github.com/azure/azure-sdk-for-go/arm/logic +go get github.com/azure/azure-sdk-for-go/arm/network +go get github.com/azure/azure-sdk-for-go/arm/resources +go get github.com/azure/azure-sdk-for-go/arm/scheduler +go get github.com/azure/azure-sdk-for-go/arm/search +go get github.com/azure/azure-sdk-for-go/arm/storage +go get github.com/azure/azure-sdk-for-go/arm/subscriptions +``` + +## License + +See the Azure SDK for Go LICENSE file. === added directory 'src/github.com/Azure/azure-sdk-for-go/arm/authorization' === added file 'src/github.com/Azure/azure-sdk-for-go/arm/authorization/client.go' --- src/github.com/Azure/azure-sdk-for-go/arm/authorization/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/authorization/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,52 @@ +package authorization + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Authorization + APIVersion = "2015-01-01" + + // DefaultBaseURI is the default URI used for the service Authorization + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Authorization. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/authorization/managementlocks.go' --- src/github.com/Azure/azure-sdk-for-go/arm/authorization/managementlocks.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/authorization/managementlocks.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,852 @@ +package authorization + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// ManagementLocksClient is the client for the ManagementLocks methods of the +// Authorization service. +type ManagementLocksClient struct { + ManagementClient +} + +// NewManagementLocksClient creates an instance of the ManagementLocksClient +// client. +func NewManagementLocksClient(subscriptionID string) ManagementLocksClient { + return NewManagementLocksClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewManagementLocksClientWithBaseURI creates an instance of the +// ManagementLocksClient client. +func NewManagementLocksClientWithBaseURI(baseURI string, subscriptionID string) ManagementLocksClient { + return ManagementLocksClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdateAtResourceGroupLevel create or update a management lock at +// the resource group level. +// +// resourceGroupName is the resource group name. lockName is the lock name. +// parameters is the management lock parameters. +func (client ManagementLocksClient) CreateOrUpdateAtResourceGroupLevel(resourceGroupName string, lockName string, parameters ManagementLock) (result ManagementLock, ae error) { + req, err := client.CreateOrUpdateAtResourceGroupLevelPreparer(resourceGroupName, lockName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtResourceGroupLevel", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateAtResourceGroupLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtResourceGroupLevel", "Failure sending request") + } + + result, err = client.CreateOrUpdateAtResourceGroupLevelResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtResourceGroupLevel", "Failure responding to request") + } + + return +} + +// CreateOrUpdateAtResourceGroupLevelPreparer prepares the CreateOrUpdateAtResourceGroupLevel request. +func (client ManagementLocksClient) CreateOrUpdateAtResourceGroupLevelPreparer(resourceGroupName string, lockName string, parameters ManagementLock) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "lockName": url.QueryEscape(lockName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks/{lockName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateAtResourceGroupLevelSender sends the CreateOrUpdateAtResourceGroupLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) CreateOrUpdateAtResourceGroupLevelSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateAtResourceGroupLevelResponder handles the response to the CreateOrUpdateAtResourceGroupLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) CreateOrUpdateAtResourceGroupLevelResponder(resp *http.Response) (result ManagementLock, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateAtResourceLevel create or update a management lock at the +// resource level or any level below resource. +// +// resourceGroupName is the name of the resource group. +// resourceProviderNamespace is resource identity. parentResourcePath is +// resource identity. resourceType is resource identity. resourceName is +// resource identity. lockName is the name of lock. parameters is create or +// update management lock parameters. +func (client ManagementLocksClient) CreateOrUpdateAtResourceLevel(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, lockName string, parameters ManagementLock) (result ManagementLock, ae error) { + req, err := client.CreateOrUpdateAtResourceLevelPreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, lockName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtResourceLevel", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateAtResourceLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtResourceLevel", "Failure sending request") + } + + result, err = client.CreateOrUpdateAtResourceLevelResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtResourceLevel", "Failure responding to request") + } + + return +} + +// CreateOrUpdateAtResourceLevelPreparer prepares the CreateOrUpdateAtResourceLevel request. +func (client ManagementLocksClient) CreateOrUpdateAtResourceLevelPreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, lockName string, parameters ManagementLock) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "lockName": url.QueryEscape(lockName), + "parentResourcePath": parentResourcePath, + "resourceGroupName": url.QueryEscape(resourceGroupName), + "resourceName": url.QueryEscape(resourceName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks/{lockName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateAtResourceLevelSender sends the CreateOrUpdateAtResourceLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) CreateOrUpdateAtResourceLevelSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateAtResourceLevelResponder handles the response to the CreateOrUpdateAtResourceLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) CreateOrUpdateAtResourceLevelResponder(resp *http.Response) (result ManagementLock, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateAtSubscriptionLevel create or update a management lock at the +// subscription level. +// +// lockName is the name of lock. parameters is the management lock parameters. +func (client ManagementLocksClient) CreateOrUpdateAtSubscriptionLevel(lockName string, parameters ManagementLock) (result ManagementLock, ae error) { + req, err := client.CreateOrUpdateAtSubscriptionLevelPreparer(lockName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtSubscriptionLevel", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateAtSubscriptionLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtSubscriptionLevel", "Failure sending request") + } + + result, err = client.CreateOrUpdateAtSubscriptionLevelResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "CreateOrUpdateAtSubscriptionLevel", "Failure responding to request") + } + + return +} + +// CreateOrUpdateAtSubscriptionLevelPreparer prepares the CreateOrUpdateAtSubscriptionLevel request. +func (client ManagementLocksClient) CreateOrUpdateAtSubscriptionLevelPreparer(lockName string, parameters ManagementLock) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "lockName": url.QueryEscape(lockName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateAtSubscriptionLevelSender sends the CreateOrUpdateAtSubscriptionLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) CreateOrUpdateAtSubscriptionLevelSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateAtSubscriptionLevelResponder handles the response to the CreateOrUpdateAtSubscriptionLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) CreateOrUpdateAtSubscriptionLevelResponder(resp *http.Response) (result ManagementLock, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteAtResourceGroupLevel deletes the management lock of a resource group. +// +// resourceGroup is the resource group names. lockName is the name of lock. +func (client ManagementLocksClient) DeleteAtResourceGroupLevel(resourceGroup string, lockName string) (result autorest.Response, ae error) { + req, err := client.DeleteAtResourceGroupLevelPreparer(resourceGroup, lockName) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtResourceGroupLevel", "Failure preparing request") + } + + resp, err := client.DeleteAtResourceGroupLevelSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtResourceGroupLevel", "Failure sending request") + } + + result, err = client.DeleteAtResourceGroupLevelResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtResourceGroupLevel", "Failure responding to request") + } + + return +} + +// DeleteAtResourceGroupLevelPreparer prepares the DeleteAtResourceGroupLevel request. +func (client ManagementLocksClient) DeleteAtResourceGroupLevelPreparer(resourceGroup string, lockName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "lockName": url.QueryEscape(lockName), + "resourceGroup": url.QueryEscape(resourceGroup), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Authorization/locks/{lockName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteAtResourceGroupLevelSender sends the DeleteAtResourceGroupLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) DeleteAtResourceGroupLevelSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusOK, http.StatusAccepted) +} + +// DeleteAtResourceGroupLevelResponder handles the response to the DeleteAtResourceGroupLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) DeleteAtResourceGroupLevelResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteAtResourceLevel deletes the management lock of a resource or any +// level below resource. +// +// resourceGroupName is the name of the resource group. +// resourceProviderNamespace is resource identity. parentResourcePath is +// resource identity. resourceType is resource identity. resourceName is +// resource identity. lockName is the name of lock. +func (client ManagementLocksClient) DeleteAtResourceLevel(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, lockName string) (result autorest.Response, ae error) { + req, err := client.DeleteAtResourceLevelPreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, lockName) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtResourceLevel", "Failure preparing request") + } + + resp, err := client.DeleteAtResourceLevelSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtResourceLevel", "Failure sending request") + } + + result, err = client.DeleteAtResourceLevelResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtResourceLevel", "Failure responding to request") + } + + return +} + +// DeleteAtResourceLevelPreparer prepares the DeleteAtResourceLevel request. +func (client ManagementLocksClient) DeleteAtResourceLevelPreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, lockName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "lockName": url.QueryEscape(lockName), + "parentResourcePath": parentResourcePath, + "resourceGroupName": url.QueryEscape(resourceGroupName), + "resourceName": url.QueryEscape(resourceName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks/{lockName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteAtResourceLevelSender sends the DeleteAtResourceLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) DeleteAtResourceLevelSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusOK, http.StatusAccepted) +} + +// DeleteAtResourceLevelResponder handles the response to the DeleteAtResourceLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) DeleteAtResourceLevelResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteAtSubscriptionLevel deletes the management lock of a subscription. +// +// lockName is the name of lock. +func (client ManagementLocksClient) DeleteAtSubscriptionLevel(lockName string) (result autorest.Response, ae error) { + req, err := client.DeleteAtSubscriptionLevelPreparer(lockName) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtSubscriptionLevel", "Failure preparing request") + } + + resp, err := client.DeleteAtSubscriptionLevelSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtSubscriptionLevel", "Failure sending request") + } + + result, err = client.DeleteAtSubscriptionLevelResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "DeleteAtSubscriptionLevel", "Failure responding to request") + } + + return +} + +// DeleteAtSubscriptionLevelPreparer prepares the DeleteAtSubscriptionLevel request. +func (client ManagementLocksClient) DeleteAtSubscriptionLevelPreparer(lockName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "lockName": url.QueryEscape(lockName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteAtSubscriptionLevelSender sends the DeleteAtSubscriptionLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) DeleteAtSubscriptionLevelSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusOK, http.StatusAccepted) +} + +// DeleteAtSubscriptionLevelResponder handles the response to the DeleteAtSubscriptionLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) DeleteAtSubscriptionLevelResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the management lock of a scope. +// +// lockName is name of the management lock. +func (client ManagementLocksClient) Get(lockName string) (result ManagementLock, ae error) { + req, err := client.GetPreparer(lockName) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ManagementLocksClient) GetPreparer(lockName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "lockName": url.QueryEscape(lockName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks/{lockName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) GetResponder(resp *http.Response) (result ManagementLock, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAtResourceGroupLevel gets all the management locks of a resource group. +// +// resourceGroupName is resource group name. filter is the filter to apply on +// the operation. +func (client ManagementLocksClient) ListAtResourceGroupLevel(resourceGroupName string, filter string) (result ManagementLockListResult, ae error) { + req, err := client.ListAtResourceGroupLevelPreparer(resourceGroupName, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceGroupLevel", "Failure preparing request") + } + + resp, err := client.ListAtResourceGroupLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceGroupLevel", "Failure sending request") + } + + result, err = client.ListAtResourceGroupLevelResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceGroupLevel", "Failure responding to request") + } + + return +} + +// ListAtResourceGroupLevelPreparer prepares the ListAtResourceGroupLevel request. +func (client ManagementLocksClient) ListAtResourceGroupLevelPreparer(resourceGroupName string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "$filter": filter, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/locks"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAtResourceGroupLevelSender sends the ListAtResourceGroupLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) ListAtResourceGroupLevelSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAtResourceGroupLevelResponder handles the response to the ListAtResourceGroupLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) ListAtResourceGroupLevelResponder(resp *http.Response) (result ManagementLockListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAtResourceGroupLevelNextResults retrieves the next set of results, if any. +func (client ManagementLocksClient) ListAtResourceGroupLevelNextResults(lastResults ManagementLockListResult) (result ManagementLockListResult, ae error) { + req, err := lastResults.ManagementLockListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceGroupLevel", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAtResourceGroupLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceGroupLevel", "Failure sending next results request request") + } + + result, err = client.ListAtResourceGroupLevelResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceGroupLevel", "Failure responding to next results request request") + } + + return +} + +// ListAtResourceLevel gets all the management locks of a resource or any +// level below resource. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. filter is the filter to apply +// on the operation. +func (client ManagementLocksClient) ListAtResourceLevel(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (result ManagementLockListResult, ae error) { + req, err := client.ListAtResourceLevelPreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceLevel", "Failure preparing request") + } + + resp, err := client.ListAtResourceLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceLevel", "Failure sending request") + } + + result, err = client.ListAtResourceLevelResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceLevel", "Failure responding to request") + } + + return +} + +// ListAtResourceLevelPreparer prepares the ListAtResourceLevel request. +func (client ManagementLocksClient) ListAtResourceLevelPreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": url.QueryEscape(resourceGroupName), + "resourceName": url.QueryEscape(resourceName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "$filter": filter, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/locks"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAtResourceLevelSender sends the ListAtResourceLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) ListAtResourceLevelSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAtResourceLevelResponder handles the response to the ListAtResourceLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) ListAtResourceLevelResponder(resp *http.Response) (result ManagementLockListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAtResourceLevelNextResults retrieves the next set of results, if any. +func (client ManagementLocksClient) ListAtResourceLevelNextResults(lastResults ManagementLockListResult) (result ManagementLockListResult, ae error) { + req, err := lastResults.ManagementLockListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceLevel", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAtResourceLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceLevel", "Failure sending next results request request") + } + + result, err = client.ListAtResourceLevelResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtResourceLevel", "Failure responding to next results request request") + } + + return +} + +// ListAtSubscriptionLevel gets all the management locks of a subscription. +// +// filter is the filter to apply on the operation. +func (client ManagementLocksClient) ListAtSubscriptionLevel(filter string) (result ManagementLockListResult, ae error) { + req, err := client.ListAtSubscriptionLevelPreparer(filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtSubscriptionLevel", "Failure preparing request") + } + + resp, err := client.ListAtSubscriptionLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtSubscriptionLevel", "Failure sending request") + } + + result, err = client.ListAtSubscriptionLevelResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtSubscriptionLevel", "Failure responding to request") + } + + return +} + +// ListAtSubscriptionLevelPreparer prepares the ListAtSubscriptionLevel request. +func (client ManagementLocksClient) ListAtSubscriptionLevelPreparer(filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "$filter": filter, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/locks"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAtSubscriptionLevelSender sends the ListAtSubscriptionLevel request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) ListAtSubscriptionLevelSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAtSubscriptionLevelResponder handles the response to the ListAtSubscriptionLevel request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) ListAtSubscriptionLevelResponder(resp *http.Response) (result ManagementLockListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAtSubscriptionLevelNextResults retrieves the next set of results, if any. +func (client ManagementLocksClient) ListAtSubscriptionLevelNextResults(lastResults ManagementLockListResult) (result ManagementLockListResult, ae error) { + req, err := lastResults.ManagementLockListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtSubscriptionLevel", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAtSubscriptionLevelSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtSubscriptionLevel", "Failure sending next results request request") + } + + result, err = client.ListAtSubscriptionLevelResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListAtSubscriptionLevel", "Failure responding to next results request request") + } + + return +} + +// ListNext get a list of management locks at resource level or below. +// +// nextLink is nextLink from the previous successful call to List operation. +func (client ManagementLocksClient) ListNext(nextLink string) (result ManagementLockListResult, ae error) { + req, err := client.ListNextPreparer(nextLink) + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListNext", "Failure preparing request") + } + + resp, err := client.ListNextSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListNext", "Failure sending request") + } + + result, err = client.ListNextResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListNext", "Failure responding to request") + } + + return +} + +// ListNextPreparer prepares the ListNext request. +func (client ManagementLocksClient) ListNextPreparer(nextLink string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "nextLink": nextLink, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/{nextLink}"), + autorest.WithPathParameters(pathParameters)) +} + +// ListNextSender sends the ListNext request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementLocksClient) ListNextSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListNextResponder handles the response to the ListNext request. The method always +// closes the http.Response Body. +func (client ManagementLocksClient) ListNextResponder(resp *http.Response) (result ManagementLockListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextNextResults retrieves the next set of results, if any. +func (client ManagementLocksClient) ListNextNextResults(lastResults ManagementLockListResult) (result ManagementLockListResult, ae error) { + req, err := lastResults.ManagementLockListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListNext", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListNextSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListNext", "Failure sending next results request request") + } + + result, err = client.ListNextResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "authorization/ManagementLocksClient", "ListNext", "Failure responding to next results request request") + } + + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/authorization/models.go' --- src/github.com/Azure/azure-sdk-for-go/arm/authorization/models.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/authorization/models.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,103 @@ +package authorization + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// LockLevel enumerates the values for lock level. +type LockLevel string + +const ( + // CanNotDelete specifies the can not delete state for lock level. + CanNotDelete LockLevel = "CanNotDelete" + // NotSpecified specifies the not specified state for lock level. + NotSpecified LockLevel = "NotSpecified" + // ReadOnly specifies the read only state for lock level. + ReadOnly LockLevel = "ReadOnly" +) + +// DeploymentExtendedFilter is deployment filter. +type DeploymentExtendedFilter struct { + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// GenericResourceFilter is resource filter. +type GenericResourceFilter struct { + ResourceType *string `json:"resourceType,omitempty"` + Tagname *string `json:"tagname,omitempty"` + Tagvalue *string `json:"tagvalue,omitempty"` +} + +// ManagementLock is management lock information. +type ManagementLock struct { + autorest.Response `json:"-"` + Properties *ManagementLockProperties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Type *string `json:"type,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ManagementLockListResult is list of management locks. +type ManagementLockListResult struct { + autorest.Response `json:"-"` + Value *[]ManagementLock `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ManagementLockListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ManagementLockListResult) ManagementLockListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ManagementLockProperties is the management lock properties. +type ManagementLockProperties struct { + Level LockLevel `json:"level,omitempty"` + Notes *string `json:"notes,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceGroupFilter is resource group filter. +type ResourceGroupFilter struct { + TagName *string `json:"tagName,omitempty"` + TagValue *string `json:"tagValue,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/authorization/version.go' --- src/github.com/Azure/azure-sdk-for-go/arm/authorization/version.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/authorization/version.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +package authorization + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "0" + minor = "1" + patch = "1" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "authorization", "2015-01-01") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} === added directory 'src/github.com/Azure/azure-sdk-for-go/arm/compute' === added file 'src/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go' --- src/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,363 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// AvailabilitySetsClient is the client for the AvailabilitySets methods of +// the Compute service. +type AvailabilitySetsClient struct { + ManagementClient +} + +// NewAvailabilitySetsClient creates an instance of the AvailabilitySetsClient +// client. +func NewAvailabilitySetsClient(subscriptionID string) AvailabilitySetsClient { + return NewAvailabilitySetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAvailabilitySetsClientWithBaseURI creates an instance of the +// AvailabilitySetsClient client. +func NewAvailabilitySetsClientWithBaseURI(baseURI string, subscriptionID string) AvailabilitySetsClient { + return AvailabilitySetsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the operation to create or update the availability set. +// +// resourceGroupName is the name of the resource group. name is parameters +// supplied to the Create Availability Set operation. parameters is +// parameters supplied to the Create Availability Set operation. +func (client AvailabilitySetsClient) CreateOrUpdate(resourceGroupName string, name string, parameters AvailabilitySet) (result AvailabilitySet, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, name, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client AvailabilitySetsClient) CreateOrUpdatePreparer(resourceGroupName string, name string, parameters AvailabilitySet) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": url.QueryEscape(name), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{name}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) CreateOrUpdateResponder(resp *http.Response) (result AvailabilitySet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the operation to delete the availability set. +// +// resourceGroupName is the name of the resource group. availabilitySetName is +// the name of the availability set. +func (client AvailabilitySetsClient) Delete(resourceGroupName string, availabilitySetName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, availabilitySetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AvailabilitySetsClient) DeletePreparer(resourceGroupName string, availabilitySetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "availabilitySetName": url.QueryEscape(availabilitySetName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the operation to get the availability set. +// +// resourceGroupName is the name of the resource group. availabilitySetName is +// the name of the availability set. +func (client AvailabilitySetsClient) Get(resourceGroupName string, availabilitySetName string) (result AvailabilitySet, ae error) { + req, err := client.GetPreparer(resourceGroupName, availabilitySetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client AvailabilitySetsClient) GetPreparer(resourceGroupName string, availabilitySetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "availabilitySetName": url.QueryEscape(availabilitySetName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) GetResponder(resp *http.Response) (result AvailabilitySet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the operation to list the availability sets. +// +// resourceGroupName is the name of the resource group. +func (client AvailabilitySetsClient) List(resourceGroupName string) (result AvailabilitySetListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AvailabilitySetsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) ListResponder(resp *http.Response) (result AvailabilitySetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAvailableSizes lists virtual-machine-sizes available to be used for an +// availability set. +// +// resourceGroupName is the name of the resource group. availabilitySetName is +// the name of the availability set. +func (client AvailabilitySetsClient) ListAvailableSizes(resourceGroupName string, availabilitySetName string) (result VirtualMachineSizeListResult, ae error) { + req, err := client.ListAvailableSizesPreparer(resourceGroupName, availabilitySetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "ListAvailableSizes", "Failure preparing request") + } + + resp, err := client.ListAvailableSizesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "ListAvailableSizes", "Failure sending request") + } + + result, err = client.ListAvailableSizesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/AvailabilitySetsClient", "ListAvailableSizes", "Failure responding to request") + } + + return +} + +// ListAvailableSizesPreparer prepares the ListAvailableSizes request. +func (client AvailabilitySetsClient) ListAvailableSizesPreparer(resourceGroupName string, availabilitySetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "availabilitySetName": url.QueryEscape(availabilitySetName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) ListAvailableSizesResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/compute/client.go' --- src/github.com/Azure/azure-sdk-for-go/arm/compute/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/compute/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,52 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Compute + APIVersion = "2015-06-15" + + // DefaultBaseURI is the default URI used for the service Compute + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Compute. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/compute/models.go' --- src/github.com/Azure/azure-sdk-for-go/arm/compute/models.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/compute/models.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,680 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// CachingTypes enumerates the values for caching types. +type CachingTypes string + +const ( + // None specifies the none state for caching types. + None CachingTypes = "None" + // ReadOnly specifies the read only state for caching types. + ReadOnly CachingTypes = "ReadOnly" + // ReadWrite specifies the read write state for caching types. + ReadWrite CachingTypes = "ReadWrite" +) + +// ComponentNames enumerates the values for component names. +type ComponentNames string + +const ( + // MicrosoftWindowsShellSetup specifies the microsoft windows shell setup + // state for component names. + MicrosoftWindowsShellSetup ComponentNames = "Microsoft-Windows-Shell-Setup" +) + +// DiskCreateOptionTypes enumerates the values for disk create option types. +type DiskCreateOptionTypes string + +const ( + // Attach specifies the attach state for disk create option types. + Attach DiskCreateOptionTypes = "attach" + // Empty specifies the empty state for disk create option types. + Empty DiskCreateOptionTypes = "empty" + // FromImage specifies the from image state for disk create option types. + FromImage DiskCreateOptionTypes = "fromImage" +) + +// OperatingSystemTypes enumerates the values for operating system types. +type OperatingSystemTypes string + +const ( + // Linux specifies the linux state for operating system types. + Linux OperatingSystemTypes = "Linux" + // Windows specifies the windows state for operating system types. + Windows OperatingSystemTypes = "Windows" +) + +// OperationStatus enumerates the values for operation status. +type OperationStatus string + +const ( + // OperationStatusFailed specifies the operation status failed state for + // operation status. + OperationStatusFailed OperationStatus = "Failed" + // OperationStatusInProgress specifies the operation status in progress + // state for operation status. + OperationStatusInProgress OperationStatus = "InProgress" + // OperationStatusPreempted specifies the operation status preempted state + // for operation status. + OperationStatusPreempted OperationStatus = "Preempted" + // OperationStatusSucceeded specifies the operation status succeeded state + // for operation status. + OperationStatusSucceeded OperationStatus = "Succeeded" +) + +// OperationStatusEnum enumerates the values for operation status enum. +type OperationStatusEnum string + +const ( + // OperationStatusEnumFailed specifies the operation status enum failed + // state for operation status enum. + OperationStatusEnumFailed OperationStatusEnum = "Failed" + // OperationStatusEnumInProgress specifies the operation status enum in + // progress state for operation status enum. + OperationStatusEnumInProgress OperationStatusEnum = "InProgress" + // OperationStatusEnumSucceeded specifies the operation status enum + // succeeded state for operation status enum. + OperationStatusEnumSucceeded OperationStatusEnum = "Succeeded" +) + +// PassNames enumerates the values for pass names. +type PassNames string + +const ( + // OobeSystem specifies the oobe system state for pass names. + OobeSystem PassNames = "oobeSystem" +) + +// ProtocolTypes enumerates the values for protocol types. +type ProtocolTypes string + +const ( + // HTTP specifies the http state for protocol types. + HTTP ProtocolTypes = "Http" + // HTTPS specifies the https state for protocol types. + HTTPS ProtocolTypes = "Https" +) + +// SettingNames enumerates the values for setting names. +type SettingNames string + +const ( + // AutoLogon specifies the auto logon state for setting names. + AutoLogon SettingNames = "AutoLogon" + // FirstLogonCommands specifies the first logon commands state for setting + // names. + FirstLogonCommands SettingNames = "FirstLogonCommands" +) + +// StatusLevelTypes enumerates the values for status level types. +type StatusLevelTypes string + +const ( + // Error specifies the error state for status level types. + Error StatusLevelTypes = "Error" + // Info specifies the info state for status level types. + Info StatusLevelTypes = "Info" + // Warning specifies the warning state for status level types. + Warning StatusLevelTypes = "Warning" +) + +// UsageUnit enumerates the values for usage unit. +type UsageUnit string + +const ( + // Count specifies the count state for usage unit. + Count UsageUnit = "Count" +) + +// VirtualMachineSizeTypes enumerates the values for virtual machine size +// types. +type VirtualMachineSizeTypes string + +const ( + // BasicA0 specifies the basic a0 state for virtual machine size types. + BasicA0 VirtualMachineSizeTypes = "Basic_A0" + // BasicA1 specifies the basic a1 state for virtual machine size types. + BasicA1 VirtualMachineSizeTypes = "Basic_A1" + // BasicA2 specifies the basic a2 state for virtual machine size types. + BasicA2 VirtualMachineSizeTypes = "Basic_A2" + // BasicA3 specifies the basic a3 state for virtual machine size types. + BasicA3 VirtualMachineSizeTypes = "Basic_A3" + // BasicA4 specifies the basic a4 state for virtual machine size types. + BasicA4 VirtualMachineSizeTypes = "Basic_A4" + // StandardA0 specifies the standard a0 state for virtual machine size + // types. + StandardA0 VirtualMachineSizeTypes = "Standard_A0" + // StandardA1 specifies the standard a1 state for virtual machine size + // types. + StandardA1 VirtualMachineSizeTypes = "Standard_A1" + // StandardA2 specifies the standard a2 state for virtual machine size + // types. + StandardA2 VirtualMachineSizeTypes = "Standard_A2" + // StandardA3 specifies the standard a3 state for virtual machine size + // types. + StandardA3 VirtualMachineSizeTypes = "Standard_A3" + // StandardA4 specifies the standard a4 state for virtual machine size + // types. + StandardA4 VirtualMachineSizeTypes = "Standard_A4" + // StandardA5 specifies the standard a5 state for virtual machine size + // types. + StandardA5 VirtualMachineSizeTypes = "Standard_A5" + // StandardA6 specifies the standard a6 state for virtual machine size + // types. + StandardA6 VirtualMachineSizeTypes = "Standard_A6" + // StandardA7 specifies the standard a7 state for virtual machine size + // types. + StandardA7 VirtualMachineSizeTypes = "Standard_A7" + // StandardA8 specifies the standard a8 state for virtual machine size + // types. + StandardA8 VirtualMachineSizeTypes = "Standard_A8" + // StandardA9 specifies the standard a9 state for virtual machine size + // types. + StandardA9 VirtualMachineSizeTypes = "Standard_A9" + // StandardG1 specifies the standard g1 state for virtual machine size + // types. + StandardG1 VirtualMachineSizeTypes = "Standard_G1" + // StandardG2 specifies the standard g2 state for virtual machine size + // types. + StandardG2 VirtualMachineSizeTypes = "Standard_G2" + // StandardG3 specifies the standard g3 state for virtual machine size + // types. + StandardG3 VirtualMachineSizeTypes = "Standard_G3" + // StandardG4 specifies the standard g4 state for virtual machine size + // types. + StandardG4 VirtualMachineSizeTypes = "Standard_G4" + // StandardG5 specifies the standard g5 state for virtual machine size + // types. + StandardG5 VirtualMachineSizeTypes = "Standard_G5" +) + +// AdditionalUnattendContent is gets or sets additional XML formatted +// information that can be included in the Unattend.xml file, which is used +// by Windows Setup. Contents are defined by setting name, component name, +// and the pass in which the content is a applied. +type AdditionalUnattendContent struct { + PassName PassNames `json:"passName,omitempty"` + ComponentName ComponentNames `json:"componentName,omitempty"` + SettingName SettingNames `json:"settingName,omitempty"` + Content *string `json:"content,omitempty"` +} + +// APIError is api error. +type APIError struct { + Details *[]APIErrorBase `json:"details,omitempty"` + Innererror *InnerError `json:"innererror,omitempty"` + Code *string `json:"code,omitempty"` + Target *string `json:"target,omitempty"` + Message *string `json:"message,omitempty"` +} + +// APIErrorBase is api error base. +type APIErrorBase struct { + Code *string `json:"code,omitempty"` + Target *string `json:"target,omitempty"` + Message *string `json:"message,omitempty"` +} + +// AvailabilitySet is create or update Availability Set parameters. +type AvailabilitySet struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *AvailabilitySetProperties `json:"properties,omitempty"` +} + +// AvailabilitySetListResult is the List Availability Set operation response. +type AvailabilitySetListResult struct { + autorest.Response `json:"-"` + Value *[]AvailabilitySet `json:"value,omitempty"` +} + +// AvailabilitySetProperties is the instance view of a resource. +type AvailabilitySetProperties struct { + PlatformUpdateDomainCount *int `json:"platformUpdateDomainCount,omitempty"` + PlatformFaultDomainCount *int `json:"platformFaultDomainCount,omitempty"` + VirtualMachines *[]SubResource `json:"virtualMachines,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// DataDisk is describes a data disk. +type DataDisk struct { + Lun *int `json:"lun,omitempty"` + DiskSizeGB *int `json:"diskSizeGB,omitempty"` + Name *string `json:"name,omitempty"` + Vhd *VirtualHardDisk `json:"vhd,omitempty"` + Image *VirtualHardDisk `json:"image,omitempty"` + Caching CachingTypes `json:"caching,omitempty"` + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` +} + +// DataDiskImage is contains the data disk images information. +type DataDiskImage struct { + Lun *int `json:"lun,omitempty"` +} + +// DeleteOperationResult is the compute long running operation response. +type DeleteOperationResult struct { + OperationID *string `json:"operationId,omitempty"` + Status OperationStatusEnum `json:"status,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + Error *APIError `json:"error,omitempty"` +} + +// DiskInstanceView is the instance view of the disk. +type DiskInstanceView struct { + Name *string `json:"name,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// HardwareProfile is describes a hardware profile. +type HardwareProfile struct { + VMSize VirtualMachineSizeTypes `json:"vmSize,omitempty"` +} + +// ImageReference is the image reference. +type ImageReference struct { + Publisher *string `json:"publisher,omitempty"` + Offer *string `json:"offer,omitempty"` + Sku *string `json:"sku,omitempty"` + Version *string `json:"version,omitempty"` +} + +// InnerError is inner error details. +type InnerError struct { + Exceptiontype *string `json:"exceptiontype,omitempty"` + Errordetail *string `json:"errordetail,omitempty"` +} + +// InstanceViewStatus is instance view status. +type InstanceViewStatus struct { + Code *string `json:"code,omitempty"` + Level StatusLevelTypes `json:"level,omitempty"` + DisplayStatus *string `json:"displayStatus,omitempty"` + Message *string `json:"message,omitempty"` + Time *date.Time `json:"time,omitempty"` +} + +// LinuxConfiguration is describes Windows Configuration of the OS Profile. +type LinuxConfiguration struct { + DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty"` + SSH *SSHConfiguration `json:"ssh,omitempty"` +} + +// ListUsagesResult is the List Usages operation response. +type ListUsagesResult struct { + autorest.Response `json:"-"` + Value *[]Usage `json:"value,omitempty"` +} + +// LongRunningOperationProperties is compute-specific operation properties, +// including output +type LongRunningOperationProperties struct { + Output *map[string]*string `json:"output,omitempty"` +} + +// LongRunningOperationResult is the Compute service response for long-running +// operations. +type LongRunningOperationResult struct { + autorest.Response `json:"-"` + OperationID *string `json:"operationId,omitempty"` + Status OperationStatus `json:"status,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + Properties *LongRunningOperationProperties `json:"properties,omitempty"` + Error *APIError `json:"error,omitempty"` +} + +// NetworkInterfaceReference is describes a network interface reference. +type NetworkInterfaceReference struct { + ID *string `json:"id,omitempty"` + Properties *NetworkInterfaceReferenceProperties `json:"properties,omitempty"` +} + +// NetworkInterfaceReferenceProperties is describes a network interface +// reference properties. +type NetworkInterfaceReferenceProperties struct { + Primary *bool `json:"primary,omitempty"` +} + +// NetworkProfile is describes a network profile. +type NetworkProfile struct { + NetworkInterfaces *[]NetworkInterfaceReference `json:"networkInterfaces,omitempty"` +} + +// OSDisk is describes an Operating System disk. +type OSDisk struct { + OsType OperatingSystemTypes `json:"osType,omitempty"` + Name *string `json:"name,omitempty"` + Vhd *VirtualHardDisk `json:"vhd,omitempty"` + Image *VirtualHardDisk `json:"image,omitempty"` + Caching CachingTypes `json:"caching,omitempty"` + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` +} + +// OSDiskImage is contains the os disk image information. +type OSDiskImage struct { + OperatingSystem OperatingSystemTypes `json:"operatingSystem,omitempty"` +} + +// OSProfile is describes an OS profile. +type OSProfile struct { + ComputerName *string `json:"computerName,omitempty"` + AdminUsername *string `json:"adminUsername,omitempty"` + AdminPassword *string `json:"adminPassword,omitempty"` + CustomData *string `json:"customData,omitempty"` + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` + LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` + Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` +} + +// Plan is plan for the resource. +type Plan struct { + Name *string `json:"name,omitempty"` + Publisher *string `json:"publisher,omitempty"` + Product *string `json:"product,omitempty"` + PromotionCode *string `json:"promotionCode,omitempty"` +} + +// PurchasePlan is used for establishing the purchase context of any 3rd Party +// artifact through MarketPlace. +type PurchasePlan struct { + Publisher *string `json:"publisher,omitempty"` + Name *string `json:"name,omitempty"` + Product *string `json:"product,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// SSHConfiguration is sSH configuration for Linux based VMs running on Azure +type SSHConfiguration struct { + PublicKeys *[]SSHPublicKey `json:"publicKeys,omitempty"` +} + +// SSHPublicKey is contains information about SSH certificate public key and +// the path on the Linux VM where the public key is placed. +type SSHPublicKey struct { + Path *string `json:"path,omitempty"` + KeyData *string `json:"keyData,omitempty"` +} + +// StorageProfile is describes a storage profile. +type StorageProfile struct { + ImageReference *ImageReference `json:"imageReference,omitempty"` + OsDisk *OSDisk `json:"osDisk,omitempty"` + DataDisks *[]DataDisk `json:"dataDisks,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} + +// Usage is describes Compute Resource Usage. +type Usage struct { + Unit UsageUnit `json:"unit,omitempty"` + CurrentValue *int `json:"currentValue,omitempty"` + Limit *int32 `json:"limit,omitempty"` + Name *UsageName `json:"name,omitempty"` +} + +// UsageName is the Usage Names. +type UsageName struct { + Value *string `json:"value,omitempty"` + LocalizedValue *string `json:"localizedValue,omitempty"` +} + +// VaultCertificate is describes a single certificate reference in a Key +// Vault, and where the certificate should reside on the VM. +type VaultCertificate struct { + CertificateURL *string `json:"certificateUrl,omitempty"` + CertificateStore *string `json:"certificateStore,omitempty"` +} + +// VaultSecretGroup is describes a set of certificates which are all in the +// same Key Vault. +type VaultSecretGroup struct { + SourceVault *SubResource `json:"sourceVault,omitempty"` + VaultCertificates *[]VaultCertificate `json:"vaultCertificates,omitempty"` +} + +// VirtualHardDisk is describes the uri of a disk. +type VirtualHardDisk struct { + URI *string `json:"uri,omitempty"` +} + +// VirtualMachine is describes a Virtual Machine. +type VirtualMachine struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Plan *Plan `json:"plan,omitempty"` + Properties *VirtualMachineProperties `json:"properties,omitempty"` + Resources *[]VirtualMachineExtension `json:"resources,omitempty"` +} + +// VirtualMachineAgentInstanceView is the instance view of the VM Agent +// running on the virtual machine. +type VirtualMachineAgentInstanceView struct { + VMAgentVersion *string `json:"vmAgentVersion,omitempty"` + ExtensionHandlers *[]VirtualMachineExtensionHandlerInstanceView `json:"extensionHandlers,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineCaptureParameters is capture Virtual Machine parameters. +type VirtualMachineCaptureParameters struct { + VhdPrefix *string `json:"vhdPrefix,omitempty"` + DestinationContainerName *string `json:"destinationContainerName,omitempty"` + OverwriteVhds *bool `json:"overwriteVhds,omitempty"` +} + +// VirtualMachineExtension is describes a Virtual Machine Extension. +type VirtualMachineExtension struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *VirtualMachineExtensionProperties `json:"properties,omitempty"` +} + +// VirtualMachineExtensionHandlerInstanceView is the instance view of a +// virtual machine extension handler. +type VirtualMachineExtensionHandlerInstanceView struct { + Type *string `json:"type,omitempty"` + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + Status *InstanceViewStatus `json:"status,omitempty"` +} + +// VirtualMachineExtensionImage is describes a Virtual Machine Extension Image. +type VirtualMachineExtensionImage struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *VirtualMachineExtensionImageProperties `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// VirtualMachineExtensionImageProperties is describes the properties of a +// Virtual Machine Extension Image. +type VirtualMachineExtensionImageProperties struct { + OperatingSystem *string `json:"operatingSystem,omitempty"` + ComputeRole *string `json:"computeRole,omitempty"` + HandlerSchema *string `json:"handlerSchema,omitempty"` + VMScaleSetEnabled *bool `json:"vmScaleSetEnabled,omitempty"` + SupportsMultipleExtensions *bool `json:"supportsMultipleExtensions,omitempty"` +} + +// VirtualMachineExtensionInstanceView is the instance view of a virtual +// machine extension. +type VirtualMachineExtensionInstanceView struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + Substatuses *[]InstanceViewStatus `json:"substatuses,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineExtensionProperties is describes the properties of a Virtual +// Machine Extension. +type VirtualMachineExtensionProperties struct { + Publisher *string `json:"publisher,omitempty"` + Type *string `json:"type,omitempty"` + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` + Settings *map[string]*string `json:"settings,omitempty"` + ProtectedSettings *map[string]*string `json:"protectedSettings,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + InstanceView *VirtualMachineExtensionInstanceView `json:"instanceView,omitempty"` +} + +// VirtualMachineImage is describes a Virtual Machine Image. +type VirtualMachineImage struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *VirtualMachineImageProperties `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// VirtualMachineImageProperties is describes the properties of a Virtual +// Machine Image. +type VirtualMachineImageProperties struct { + Plan *PurchasePlan `json:"plan,omitempty"` + OsDiskImage *OSDiskImage `json:"osDiskImage,omitempty"` + DataDiskImages *[]DataDiskImage `json:"dataDiskImages,omitempty"` +} + +// VirtualMachineImageResource is virtual machine image resource information. +type VirtualMachineImageResource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// VirtualMachineImageResourceList is +type VirtualMachineImageResourceList struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineImageResource `json:"value,omitempty"` +} + +// VirtualMachineInstanceView is the instance view of a virtual machine. +type VirtualMachineInstanceView struct { + PlatformUpdateDomain *int `json:"platformUpdateDomain,omitempty"` + PlatformFaultDomain *int `json:"platformFaultDomain,omitempty"` + RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` + VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` + Disks *[]DiskInstanceView `json:"disks,omitempty"` + Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineListResult is the List Virtual Machine operation response. +type VirtualMachineListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualMachine `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualMachineListResult) VirtualMachineListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualMachineProperties is describes the properties of a Virtual Machine. +type VirtualMachineProperties struct { + HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` + StorageProfile *StorageProfile `json:"storageProfile,omitempty"` + OsProfile *OSProfile `json:"osProfile,omitempty"` + NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` + AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` + InstanceView *VirtualMachineInstanceView `json:"instanceView,omitempty"` +} + +// VirtualMachineSize is describes the properties of a VM size. +type VirtualMachineSize struct { + Name *string `json:"name,omitempty"` + NumberOfCores *int `json:"numberOfCores,omitempty"` + OsDiskSizeInMB *int `json:"osDiskSizeInMB,omitempty"` + ResourceDiskSizeInMB *int `json:"resourceDiskSizeInMB,omitempty"` + MemoryInMB *int `json:"memoryInMB,omitempty"` + MaxDataDiskCount *int `json:"maxDataDiskCount,omitempty"` +} + +// VirtualMachineSizeListResult is the List Virtual Machine operation response. +type VirtualMachineSizeListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineSize `json:"value,omitempty"` +} + +// WindowsConfiguration is describes Windows Configuration of the OS Profile. +type WindowsConfiguration struct { + ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"` + EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty"` + TimeZone *string `json:"timeZone,omitempty"` + AdditionalUnattendContent *[]AdditionalUnattendContent `json:"additionalUnattendContent,omitempty"` + WinRM *WinRMConfiguration `json:"winRM,omitempty"` +} + +// WinRMConfiguration is describes Windows Remote Management configuration of +// the VM +type WinRMConfiguration struct { + Listeners *[]WinRMListener `json:"listeners,omitempty"` +} + +// WinRMListener is describes Protocol and thumbprint of Windows Remote +// Management listener +type WinRMListener struct { + Protocol ProtocolTypes `json:"protocol,omitempty"` + CertificateURL *string `json:"certificateUrl,omitempty"` +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go' --- src/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,105 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// UsageOperationsClient is the client for the UsageOperations methods of the +// Compute service. +type UsageOperationsClient struct { + ManagementClient +} + +// NewUsageOperationsClient creates an instance of the UsageOperationsClient +// client. +func NewUsageOperationsClient(subscriptionID string) UsageOperationsClient { + return NewUsageOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsageOperationsClientWithBaseURI creates an instance of the +// UsageOperationsClient client. +func NewUsageOperationsClientWithBaseURI(baseURI string, subscriptionID string) UsageOperationsClient { + return UsageOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists compute usages for a subscription. +// +// location is the location upon which resource usage is queried. +func (client UsageOperationsClient) List(location string) (result ListUsagesResult, ae error) { + req, err := client.ListPreparer(location) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/UsageOperationsClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/UsageOperationsClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/UsageOperationsClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client UsageOperationsClient) ListPreparer(location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client UsageOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client UsageOperationsClient) ListResponder(resp *http.Response) (result ListUsagesResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/compute/version.go' --- src/github.com/Azure/azure-sdk-for-go/arm/compute/version.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/compute/version.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "0" + minor = "1" + patch = "1" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "compute", "2015-06-15") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go' --- src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,236 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualMachineExtensionImagesClient is the client for the +// VirtualMachineExtensionImages methods of the Compute service. +type VirtualMachineExtensionImagesClient struct { + ManagementClient +} + +// NewVirtualMachineExtensionImagesClient creates an instance of the +// VirtualMachineExtensionImagesClient client. +func NewVirtualMachineExtensionImagesClient(subscriptionID string) VirtualMachineExtensionImagesClient { + return NewVirtualMachineExtensionImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineExtensionImagesClientWithBaseURI creates an instance of +// the VirtualMachineExtensionImagesClient client. +func NewVirtualMachineExtensionImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionImagesClient { + return VirtualMachineExtensionImagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a virtual machine extension image. +// +func (client VirtualMachineExtensionImagesClient) Get(location string, publisherName string, typeParameter string, version string) (result VirtualMachineExtensionImage, ae error) { + req, err := client.GetPreparer(location, publisherName, typeParameter, version) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineExtensionImagesClient) GetPreparer(location string, publisherName string, typeParameter string, version string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "publisherName": url.QueryEscape(publisherName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "type": url.QueryEscape(typeParameter), + "version": url.QueryEscape(version), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionImagesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionImagesClient) GetResponder(resp *http.Response) (result VirtualMachineExtensionImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListTypes gets a list of virtual machine extension image types. +// +func (client VirtualMachineExtensionImagesClient) ListTypes(location string, publisherName string) (result VirtualMachineImageResourceList, ae error) { + req, err := client.ListTypesPreparer(location, publisherName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListTypes", "Failure preparing request") + } + + resp, err := client.ListTypesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListTypes", "Failure sending request") + } + + result, err = client.ListTypesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListTypes", "Failure responding to request") + } + + return +} + +// ListTypesPreparer prepares the ListTypes request. +func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(location string, publisherName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "publisherName": url.QueryEscape(publisherName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListTypesSender sends the ListTypes request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionImagesClient) ListTypesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListTypesResponder handles the response to the ListTypes request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionImagesClient) ListTypesResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListVersions gets a list of virtual machine extension image versions. +// +// filter is the filter to apply on the operation. +func (client VirtualMachineExtensionImagesClient) ListVersions(location string, publisherName string, typeParameter string, filter string, top int, orderby string) (result VirtualMachineImageResourceList, ae error) { + req, err := client.ListVersionsPreparer(location, publisherName, typeParameter, filter, top, orderby) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListVersions", "Failure preparing request") + } + + resp, err := client.ListVersionsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListVersions", "Failure sending request") + } + + result, err = client.ListVersionsResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionImagesClient", "ListVersions", "Failure responding to request") + } + + return +} + +// ListVersionsPreparer prepares the ListVersions request. +func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(location string, publisherName string, typeParameter string, filter string, top int, orderby string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "publisherName": url.QueryEscape(publisherName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "type": url.QueryEscape(typeParameter), + } + + queryParameters := map[string]interface{}{ + "$filter": filter, + "$orderby": orderby, + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListVersionsSender sends the ListVersions request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionImagesClient) ListVersionsSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListVersionsResponder handles the response to the ListVersions request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionImagesClient) ListVersionsResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go' --- src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,245 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualMachineExtensionsClient is the client for the +// VirtualMachineExtensions methods of the Compute service. +type VirtualMachineExtensionsClient struct { + ManagementClient +} + +// NewVirtualMachineExtensionsClient creates an instance of the +// VirtualMachineExtensionsClient client. +func NewVirtualMachineExtensionsClient(subscriptionID string) VirtualMachineExtensionsClient { + return NewVirtualMachineExtensionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineExtensionsClientWithBaseURI creates an instance of the +// VirtualMachineExtensionsClient client. +func NewVirtualMachineExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionsClient { + return VirtualMachineExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the operation to create or update the extension. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine where the extension should be create or updated. +// vmExtensionName is the name of the virtual machine extension. +// extensionParameters is parameters supplied to the Create Virtual Machine +// Extension operation. +func (client VirtualMachineExtensionsClient) CreateOrUpdate(resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension) (result VirtualMachineExtension, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, vmName, vmExtensionName, extensionParameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(resourceGroupName string, vmName string, vmExtensionName string, extensionParameters VirtualMachineExtension) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmExtensionName": url.QueryEscape(vmExtensionName), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"), + autorest.WithJSON(extensionParameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineExtension, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the operation to delete the extension. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine where the extension should be deleted. vmExtensionName +// is the name of the virtual machine extension. +func (client VirtualMachineExtensionsClient) Delete(resourceGroupName string, vmName string, vmExtensionName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, vmName, vmExtensionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachineExtensionsClient) DeletePreparer(resourceGroupName string, vmName string, vmExtensionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmExtensionName": url.QueryEscape(vmExtensionName), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusAccepted) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the operation to get the extension. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine containing the extension. vmExtensionName is the name +// of the virtual machine extension. expand is name of the property to +// expand. Allowed value is null or 'instanceView'. +func (client VirtualMachineExtensionsClient) Get(resourceGroupName string, vmName string, vmExtensionName string, expand string) (result VirtualMachineExtension, ae error) { + req, err := client.GetPreparer(resourceGroupName, vmName, vmExtensionName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineExtensionsClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineExtensionsClient) GetPreparer(resourceGroupName string, vmName string, vmExtensionName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmExtensionName": url.QueryEscape(vmExtensionName), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "$expand": expand, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionsClient) GetResponder(resp *http.Response) (result VirtualMachineExtension, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go' --- src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,362 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualMachineImagesClient is the client for the VirtualMachineImages +// methods of the Compute service. +type VirtualMachineImagesClient struct { + ManagementClient +} + +// NewVirtualMachineImagesClient creates an instance of the +// VirtualMachineImagesClient client. +func NewVirtualMachineImagesClient(subscriptionID string) VirtualMachineImagesClient { + return NewVirtualMachineImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineImagesClientWithBaseURI creates an instance of the +// VirtualMachineImagesClient client. +func NewVirtualMachineImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineImagesClient { + return VirtualMachineImagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a virtual machine image. +// +func (client VirtualMachineImagesClient) Get(location string, publisherName string, offer string, skus string, version string) (result VirtualMachineImage, ae error) { + req, err := client.GetPreparer(location, publisherName, offer, skus, version) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineImagesClient) GetPreparer(location string, publisherName string, offer string, skus string, version string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "offer": url.QueryEscape(offer), + "publisherName": url.QueryEscape(publisherName), + "skus": url.QueryEscape(skus), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "version": url.QueryEscape(version), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) GetResponder(resp *http.Response) (result VirtualMachineImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of virtual machine images. +// +// filter is the filter to apply on the operation. +func (client VirtualMachineImagesClient) List(location string, publisherName string, offer string, skus string, filter string, top int, orderby string) (result VirtualMachineImageResourceList, ae error) { + req, err := client.ListPreparer(location, publisherName, offer, skus, filter, top, orderby) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineImagesClient) ListPreparer(location string, publisherName string, offer string, skus string, filter string, top int, orderby string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "offer": url.QueryEscape(offer), + "publisherName": url.QueryEscape(publisherName), + "skus": url.QueryEscape(skus), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "$filter": filter, + "$orderby": orderby, + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) ListResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListOffers gets a list of virtual machine image offers. +// +func (client VirtualMachineImagesClient) ListOffers(location string, publisherName string) (result VirtualMachineImageResourceList, ae error) { + req, err := client.ListOffersPreparer(location, publisherName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListOffers", "Failure preparing request") + } + + resp, err := client.ListOffersSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListOffers", "Failure sending request") + } + + result, err = client.ListOffersResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListOffers", "Failure responding to request") + } + + return +} + +// ListOffersPreparer prepares the ListOffers request. +func (client VirtualMachineImagesClient) ListOffersPreparer(location string, publisherName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "publisherName": url.QueryEscape(publisherName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListOffersSender sends the ListOffers request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) ListOffersSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListOffersResponder handles the response to the ListOffers request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) ListOffersResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListPublishers gets a list of virtual machine image publishers. +// +func (client VirtualMachineImagesClient) ListPublishers(location string) (result VirtualMachineImageResourceList, ae error) { + req, err := client.ListPublishersPreparer(location) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListPublishers", "Failure preparing request") + } + + resp, err := client.ListPublishersSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListPublishers", "Failure sending request") + } + + result, err = client.ListPublishersResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListPublishers", "Failure responding to request") + } + + return +} + +// ListPublishersPreparer prepares the ListPublishers request. +func (client VirtualMachineImagesClient) ListPublishersPreparer(location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListPublishersSender sends the ListPublishers request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) ListPublishersSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListPublishersResponder handles the response to the ListPublishers request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) ListPublishersResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListSkus gets a list of virtual machine image skus. +// +func (client VirtualMachineImagesClient) ListSkus(location string, publisherName string, offer string) (result VirtualMachineImageResourceList, ae error) { + req, err := client.ListSkusPreparer(location, publisherName, offer) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListSkus", "Failure preparing request") + } + + resp, err := client.ListSkusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListSkus", "Failure sending request") + } + + result, err = client.ListSkusResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineImagesClient", "ListSkus", "Failure responding to request") + } + + return +} + +// ListSkusPreparer prepares the ListSkus request. +func (client VirtualMachineImagesClient) ListSkusPreparer(location string, publisherName string, offer string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "offer": url.QueryEscape(offer), + "publisherName": url.QueryEscape(publisherName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSkusSender sends the ListSkus request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) ListSkusSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListSkusResponder handles the response to the ListSkus request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) ListSkusResponder(resp *http.Response) (result VirtualMachineImageResourceList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go' --- src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,858 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualMachinesClient is the client for the VirtualMachines methods of the +// Compute service. +type VirtualMachinesClient struct { + ManagementClient +} + +// NewVirtualMachinesClient creates an instance of the VirtualMachinesClient +// client. +func NewVirtualMachinesClient(subscriptionID string) VirtualMachinesClient { + return NewVirtualMachinesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachinesClientWithBaseURI creates an instance of the +// VirtualMachinesClient client. +func NewVirtualMachinesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachinesClient { + return VirtualMachinesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Capture captures the VM by copying VirtualHardDisks of the VM and outputs a +// template that can be used to create similar VMs. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. parameters is parameters supplied to the Capture +// Virtual Machine operation. +func (client VirtualMachinesClient) Capture(resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters) (result LongRunningOperationResult, ae error) { + req, err := client.CapturePreparer(resourceGroupName, vmName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Capture", "Failure preparing request") + } + + resp, err := client.CaptureSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Capture", "Failure sending request") + } + + result, err = client.CaptureResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Capture", "Failure responding to request") + } + + return +} + +// CapturePreparer prepares the Capture request. +func (client VirtualMachinesClient) CapturePreparer(resourceGroupName string, vmName string, parameters VirtualMachineCaptureParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CaptureSender sends the Capture request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) CaptureSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// CaptureResponder handles the response to the Capture request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) CaptureResponder(resp *http.Response) (result LongRunningOperationResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdate the operation to create or update a virtual machine. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. parameters is parameters supplied to the Create +// Virtual Machine operation. +func (client VirtualMachinesClient) CreateOrUpdate(resourceGroupName string, vmName string, parameters VirtualMachine) (result VirtualMachine, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, vmName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualMachinesClient) CreateOrUpdatePreparer(resourceGroupName string, vmName string, parameters VirtualMachine) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachine, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Deallocate shuts down the Virtual Machine and releases the compute +// resources. You are not billed for the compute resources that this Virtual +// Machine uses. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Deallocate(resourceGroupName string, vmName string) (result autorest.Response, ae error) { + req, err := client.DeallocatePreparer(resourceGroupName, vmName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Deallocate", "Failure preparing request") + } + + resp, err := client.DeallocateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Deallocate", "Failure sending request") + } + + result, err = client.DeallocateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Deallocate", "Failure responding to request") + } + + return +} + +// DeallocatePreparer prepares the Deallocate request. +func (client VirtualMachinesClient) DeallocatePreparer(resourceGroupName string, vmName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeallocateSender sends the Deallocate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted) +} + +// DeallocateResponder handles the response to the Deallocate request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) DeallocateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete the operation to delete a virtual machine. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Delete(resourceGroupName string, vmName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, vmName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachinesClient) DeletePreparer(resourceGroupName string, vmName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusAccepted) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Generalize sets the state of the VM as Generalized. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Generalize(resourceGroupName string, vmName string) (result autorest.Response, ae error) { + req, err := client.GeneralizePreparer(resourceGroupName, vmName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Generalize", "Failure preparing request") + } + + resp, err := client.GeneralizeSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Generalize", "Failure sending request") + } + + result, err = client.GeneralizeResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Generalize", "Failure responding to request") + } + + return +} + +// GeneralizePreparer prepares the Generalize request. +func (client VirtualMachinesClient) GeneralizePreparer(resourceGroupName string, vmName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GeneralizeSender sends the Generalize request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) GeneralizeSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GeneralizeResponder handles the response to the Generalize request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) GeneralizeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the operation to get a virtual machine. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. expand is name of the property to expand. Allowed +// value is null or 'instanceView'. +func (client VirtualMachinesClient) Get(resourceGroupName string, vmName string, expand string) (result VirtualMachine, ae error) { + req, err := client.GetPreparer(resourceGroupName, vmName, expand) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachinesClient) GetPreparer(resourceGroupName string, vmName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "$expand": expand, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) GetResponder(resp *http.Response) (result VirtualMachine, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the operation to list virtual machines under a resource group. +// +// resourceGroupName is the name of the resource group. +func (client VirtualMachinesClient) List(resourceGroupName string) (result VirtualMachineListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachinesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ListResponder(resp *http.Response) (result VirtualMachineListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualMachinesClient) ListNextResults(lastResults VirtualMachineListResult) (result VirtualMachineListResult, ae error) { + req, err := lastResults.VirtualMachineListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "List", "Failure responding to next results request request") + } + + return +} + +// ListAll gets the list of Virtual Machines in the subscription. Use nextLink +// property in the response to get the next page of Virtual Machines. Do this +// till nextLink is not null to fetch all the Virtual Machines. +func (client VirtualMachinesClient) ListAll() (result VirtualMachineListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client VirtualMachinesClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ListAllResponder(resp *http.Response) (result VirtualMachineListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client VirtualMachinesClient) ListAllNextResults(lastResults VirtualMachineListResult) (result VirtualMachineListResult, ae error) { + req, err := lastResults.VirtualMachineListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAll", "Failure responding to next results request request") + } + + return +} + +// ListAvailableSizes lists virtual-machine-sizes available to be used for a +// virtual machine. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) ListAvailableSizes(resourceGroupName string, vmName string) (result VirtualMachineSizeListResult, ae error) { + req, err := client.ListAvailableSizesPreparer(resourceGroupName, vmName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAvailableSizes", "Failure preparing request") + } + + resp, err := client.ListAvailableSizesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAvailableSizes", "Failure sending request") + } + + result, err = client.ListAvailableSizesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "ListAvailableSizes", "Failure responding to request") + } + + return +} + +// ListAvailableSizesPreparer prepares the ListAvailableSizes request. +func (client VirtualMachinesClient) ListAvailableSizesPreparer(resourceGroupName string, vmName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ListAvailableSizesResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// PowerOff the operation to power off (stop) a virtual machine. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) PowerOff(resourceGroupName string, vmName string) (result autorest.Response, ae error) { + req, err := client.PowerOffPreparer(resourceGroupName, vmName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "PowerOff", "Failure preparing request") + } + + resp, err := client.PowerOffSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "PowerOff", "Failure sending request") + } + + result, err = client.PowerOffResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "PowerOff", "Failure responding to request") + } + + return +} + +// PowerOffPreparer prepares the PowerOff request. +func (client VirtualMachinesClient) PowerOffPreparer(resourceGroupName string, vmName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// PowerOffSender sends the PowerOff request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted) +} + +// PowerOffResponder handles the response to the PowerOff request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) PowerOffResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Restart the operation to restart a virtual machine. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Restart(resourceGroupName string, vmName string) (result autorest.Response, ae error) { + req, err := client.RestartPreparer(resourceGroupName, vmName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Restart", "Failure preparing request") + } + + resp, err := client.RestartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Restart", "Failure sending request") + } + + result, err = client.RestartResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Restart", "Failure responding to request") + } + + return +} + +// RestartPreparer prepares the Restart request. +func (client VirtualMachinesClient) RestartPreparer(resourceGroupName string, vmName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// RestartSender sends the Restart request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) RestartSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted) +} + +// RestartResponder handles the response to the Restart request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) RestartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Start the operation to start a virtual machine. +// +// resourceGroupName is the name of the resource group. vmName is the name of +// the virtual machine. +func (client VirtualMachinesClient) Start(resourceGroupName string, vmName string) (result autorest.Response, ae error) { + req, err := client.StartPreparer(resourceGroupName, vmName) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Start", "Failure preparing request") + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Start", "Failure sending request") + } + + result, err = client.StartResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachinesClient", "Start", "Failure responding to request") + } + + return +} + +// StartPreparer prepares the Start request. +func (client VirtualMachinesClient) StartPreparer(resourceGroupName string, vmName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "vmName": url.QueryEscape(vmName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) StartSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted) +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go' --- src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,105 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualMachineSizesClient is the client for the VirtualMachineSizes methods +// of the Compute service. +type VirtualMachineSizesClient struct { + ManagementClient +} + +// NewVirtualMachineSizesClient creates an instance of the +// VirtualMachineSizesClient client. +func NewVirtualMachineSizesClient(subscriptionID string) VirtualMachineSizesClient { + return NewVirtualMachineSizesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineSizesClientWithBaseURI creates an instance of the +// VirtualMachineSizesClient client. +func NewVirtualMachineSizesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineSizesClient { + return VirtualMachineSizesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists virtual-machine-sizes available in a location for a subscription. +// +// location is the location upon which virtual-machine-sizes is queried. +func (client VirtualMachineSizesClient) List(location string) (result VirtualMachineSizeListResult, ae error) { + req, err := client.ListPreparer(location) + if err != nil { + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineSizesClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute/VirtualMachineSizesClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "compute/VirtualMachineSizesClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineSizesClient) ListPreparer(location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/vmSizes"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineSizesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineSizesClient) ListResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added directory 'src/github.com/Azure/azure-sdk-for-go/arm/examples' === added file 'src/github.com/Azure/azure-sdk-for-go/arm/examples/check.go' --- src/github.com/Azure/azure-sdk-for-go/arm/examples/check.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/examples/check.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,68 @@ +package examples + +import ( + "fmt" + "log" + "net/http" + "os" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/azure-sdk-for-go/arm/examples/helpers" + "github.com/Azure/azure-sdk-for-go/arm/storage" +) + +func withInspection() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + fmt.Printf("Inspecting Request: %s %s\n", r.Method, r.URL) + return p.Prepare(r) + }) + } +} + +func byInspecting() autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + fmt.Printf("Inspecting Response: %s for %s %s\n", resp.Status, resp.Request.Method, resp.Request.URL) + return r.Respond(resp) + }) + } +} + +func checkName(name string) { + c, err := helpers.LoadCredentials() + if err != nil { + log.Fatalf("Error: %v", err) + } + + ac := storage.NewAccountsClient(c["subscriptionID"]) + + spt, err := helpers.NewServicePrincipalTokenFromCredentials(c, azure.AzureResourceManagerScope) + if err != nil { + log.Fatalf("Error: %v", err) + } + ac.Authorizer = spt + + ac.Sender = autorest.CreateSender( + autorest.WithLogging(log.New(os.Stdout, "sdk-example: ", log.LstdFlags))) + + ac.RequestInspector = withInspection() + ac.ResponseInspector = byInspecting() + + cna, err := ac.CheckNameAvailability( + storage.AccountCheckNameAvailabilityParameters{ + Name: to.StringPtr(name), + Type: to.StringPtr("Microsoft.Storage/storageAccounts")}) + + if err != nil { + log.Fatalf("Error: %v", err) + } else { + if to.Bool(cna.NameAvailable) { + fmt.Printf("The name '%s' is available\n", name) + } else { + fmt.Printf("The name '%s' is unavailable because %s\n", name, to.String(cna.Message)) + } + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/examples/check_test.go' --- src/github.com/Azure/azure-sdk-for-go/arm/examples/check_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/examples/check_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +package examples + +import ( + "testing" +) + +func TestCheckName(t *testing.T) { + checkName("testname01") +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/examples/create.go' --- src/github.com/Azure/azure-sdk-for-go/arm/examples/create.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/examples/create.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,91 @@ +package examples + +import ( + "fmt" + "log" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/azure-sdk-for-go/arm/examples/helpers" + "github.com/Azure/azure-sdk-for-go/arm/storage" +) + +func withWatcher() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + fmt.Printf("Sending %s %s\n", r.Method, r.URL) + resp, err := s.Do(r) + fmt.Printf("...received status %s\n", resp.Status) + if autorest.ResponseRequiresPolling(resp) { + fmt.Printf("...will poll after %d seconds\n", + int(autorest.GetPollingDelay(resp, time.Duration(0))/time.Second)) + fmt.Printf("...will poll at %s\n", autorest.GetPollingLocation(resp)) + } + fmt.Println("") + return resp, err + }) + } +} + +func createAccount(resourceGroup, name string) { + c, err := helpers.LoadCredentials() + if err != nil { + log.Fatalf("Error: %v", err) + } + + ac := storage.NewAccountsClient(c["subscriptionID"]) + + spt, err := helpers.NewServicePrincipalTokenFromCredentials(c, azure.AzureResourceManagerScope) + if err != nil { + log.Fatalf("Error: %v", err) + } + ac.Authorizer = spt + + cna, err := ac.CheckNameAvailability( + storage.AccountCheckNameAvailabilityParameters{ + Name: to.StringPtr(name), + Type: to.StringPtr("Microsoft.Storage/storageAccounts")}) + if err != nil { + log.Fatalf("Error: %v", err) + return + } + if !to.Bool(cna.NameAvailable) { + fmt.Printf("%s is unavailable -- try again\n", name) + return + } + fmt.Printf("%s is available\n\n", name) + + ac.Sender = autorest.CreateSender(withWatcher()) + ac.PollingMode = autorest.PollUntilAttempts + ac.PollingAttempts = 5 + + cp := storage.AccountCreateParameters{} + cp.Location = to.StringPtr("westus") + cp.Properties = &storage.AccountPropertiesCreateParameters{AccountType: storage.StandardLRS} + + sa, err := ac.Create(resourceGroup, name, cp) + if err != nil { + if sa.Response.StatusCode != http.StatusAccepted { + fmt.Printf("Creation of %s.%s failed with err -- %v\n", resourceGroup, name, err) + return + } + fmt.Printf("Create initiated for %s.%s -- poll %s to check status\n", + resourceGroup, + name, + sa.GetPollingLocation()) + return + } + + fmt.Printf("Successfully created %s.%s\n\n", resourceGroup, name) + + ac.Sender = nil + r, err := ac.Delete(resourceGroup, name) + if err != nil { + fmt.Printf("Delete of %s.%s failed with status %s\n...%v\n", resourceGroup, name, r.Status, err) + return + } + fmt.Printf("Deletion of %s.%s succeeded -- %s\n", resourceGroup, name, r.Status) +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/examples/create_test.go' --- src/github.com/Azure/azure-sdk-for-go/arm/examples/create_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/examples/create_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +package examples + +import ( + "testing" +) + +func TestCreateAccount(t *testing.T) { + createAccount("gosdk", "gosdktestname01") +} === added directory 'src/github.com/Azure/azure-sdk-for-go/arm/examples/helpers' === added file 'src/github.com/Azure/azure-sdk-for-go/arm/examples/helpers/credentials_sample.json' --- src/github.com/Azure/azure-sdk-for-go/arm/examples/helpers/credentials_sample.json 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/examples/helpers/credentials_sample.json 2016-03-22 15:18:22 +0000 @@ -0,0 +1,6 @@ +{ + "clientID" : "", + "clientSecret" : "", + "subscriptionID" : "", + "tenantID" : "" +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/examples/helpers/helpers.go' --- src/github.com/Azure/azure-sdk-for-go/arm/examples/helpers/helpers.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/examples/helpers/helpers.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,79 @@ +package helpers + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/user" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/azure" +) + +const ( + credentialsPath = "/.azure/credentials.json" +) + +// ToJSON returns the passed item as a pretty-printed JSON string. If any JSON error occurs, +// it returns the empty string. +func ToJSON(v interface{}) string { + j, _ := json.MarshalIndent(v, "", " ") + return string(j) +} + +// NewServicePrincipalTokenFromCredentials creates a new ServicePrincipalToken using values of the +// passed credentials map. +func NewServicePrincipalTokenFromCredentials(c map[string]string, scope string) (*azure.ServicePrincipalToken, error) { + return azure.NewServicePrincipalToken(c["clientID"], c["clientSecret"], c["tenantID"], scope) +} + +// LoadCredentials reads credentials from a ~/.azure/credentials.json file. See the accompanying +// credentials_sample.json file for an example. +// +// Note: Storing crendentials in a local file must be secured and not shared. It is used here +// simply to reduce code in the examples. +func LoadCredentials() (map[string]string, error) { + u, err := user.Current() + if err != nil { + return nil, fmt.Errorf("ERROR: Unable to determine current user") + } + + n := u.HomeDir + credentialsPath + f, err := os.Open(n) + if err != nil { + return nil, fmt.Errorf("ERROR: Unable to locate or open Azure credentials at %s (%v)", n, err) + } + + b, err := ioutil.ReadAll(f) + if err != nil { + return nil, fmt.Errorf("ERROR: Unable to read %s (%v)", n, err) + } + + c := map[string]interface{}{} + err = json.Unmarshal(b, &c) + if err != nil { + return nil, fmt.Errorf("ERROR: %s contained invalid JSON (%s)", n, err) + } + + return ensureValueStrings(c), nil +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + default: + return fmt.Sprintf("%v", v) + } +} === added directory 'src/github.com/Azure/azure-sdk-for-go/arm/features' === added file 'src/github.com/Azure/azure-sdk-for-go/arm/features/client.go' --- src/github.com/Azure/azure-sdk-for-go/arm/features/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/features/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,352 @@ +package features + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +const ( + // APIVersion is the version of the Features + APIVersion = "2014-08-01-preview" + + // DefaultBaseURI is the default URI used for the service Features + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Features. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} + +// Get get all features under the subscription. +// +// resourceProviderNamespace is namespace of the resource provider. +// featureName is previewed feature name in the resource provider. +func (client ManagementClient) Get(resourceProviderNamespace string, featureName string) (result Result, ae error) { + req, err := client.GetPreparer(resourceProviderNamespace, featureName) + if err != nil { + return result, autorest.NewErrorWithError(err, "features/ManagementClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features/ManagementClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "features/ManagementClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ManagementClient) GetPreparer(resourceProviderNamespace string, featureName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "featureName": url.QueryEscape(featureName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features/{featureName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetResponder(resp *http.Response) (result Result, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of previewed features of a resource provider. +// +// resourceProviderNamespace is the namespace of the resource provider. +func (client ManagementClient) List(resourceProviderNamespace string) (result OperationsListResult, ae error) { + req, err := client.ListPreparer(resourceProviderNamespace) + if err != nil { + return result, autorest.NewErrorWithError(err, "features/ManagementClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features/ManagementClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "features/ManagementClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ManagementClient) ListPreparer(resourceProviderNamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ManagementClient) ListResponder(resp *http.Response) (result OperationsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ManagementClient) ListNextResults(lastResults OperationsListResult) (result OperationsListResult, ae error) { + req, err := lastResults.OperationsListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "features/ManagementClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features/ManagementClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "features/ManagementClient", "List", "Failure responding to next results request request") + } + + return +} + +// ListAll gets a list of previewed features for all the providers in the +// current subscription. +func (client ManagementClient) ListAll() (result OperationsListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "features/ManagementClient", "ListAll", "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features/ManagementClient", "ListAll", "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "features/ManagementClient", "ListAll", "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client ManagementClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Features/features"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client ManagementClient) ListAllResponder(resp *http.Response) (result OperationsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client ManagementClient) ListAllNextResults(lastResults OperationsListResult) (result OperationsListResult, ae error) { + req, err := lastResults.OperationsListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "features/ManagementClient", "ListAll", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features/ManagementClient", "ListAll", "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "features/ManagementClient", "ListAll", "Failure responding to next results request request") + } + + return +} + +// Register registers for a previewed feature of a resource provider. +// +// resourceProviderNamespace is namespace of the resource provider. +// featureName is previewed feature name in the resource provider. +func (client ManagementClient) Register(resourceProviderNamespace string, featureName string) (result Result, ae error) { + req, err := client.RegisterPreparer(resourceProviderNamespace, featureName) + if err != nil { + return result, autorest.NewErrorWithError(err, "features/ManagementClient", "Register", "Failure preparing request") + } + + resp, err := client.RegisterSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "features/ManagementClient", "Register", "Failure sending request") + } + + result, err = client.RegisterResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "features/ManagementClient", "Register", "Failure responding to request") + } + + return +} + +// RegisterPreparer prepares the Register request. +func (client ManagementClient) RegisterPreparer(resourceProviderNamespace string, featureName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "featureName": url.QueryEscape(featureName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features/{featureName}/register"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// RegisterSender sends the Register request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) RegisterSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// RegisterResponder handles the response to the Register request. The method always +// closes the http.Response Body. +func (client ManagementClient) RegisterResponder(resp *http.Response) (result Result, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/features/models.go' --- src/github.com/Azure/azure-sdk-for-go/arm/features/models.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/features/models.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,90 @@ +package features + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// DeploymentExtendedFilter is deployment filter. +type DeploymentExtendedFilter struct { + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// GenericResourceFilter is resource filter. +type GenericResourceFilter struct { + ResourceType *string `json:"resourceType,omitempty"` + Tagname *string `json:"tagname,omitempty"` + Tagvalue *string `json:"tagvalue,omitempty"` +} + +// OperationsListResult is list of previewed features. +type OperationsListResult struct { + autorest.Response `json:"-"` + Value *[]Result `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// OperationsListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client OperationsListResult) OperationsListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// Properties is previewed feature information. +type Properties struct { + State *string `json:"state,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceGroupFilter is resource group filter. +type ResourceGroupFilter struct { + TagName *string `json:"tagName,omitempty"` + TagValue *string `json:"tagValue,omitempty"` +} + +// Result is previewed feature information. +type Result struct { + autorest.Response `json:"-"` + Name *string `json:"name,omitempty"` + Properties *Properties `json:"properties,omitempty"` + ID *string `json:"id,omitempty"` + Type *string `json:"type,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/features/version.go' --- src/github.com/Azure/azure-sdk-for-go/arm/features/version.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/features/version.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +package features + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "0" + minor = "1" + patch = "1" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "features", "2014-08-01-preview") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} === added directory 'src/github.com/Azure/azure-sdk-for-go/arm/logic' === added file 'src/github.com/Azure/azure-sdk-for-go/arm/logic/client.go' --- src/github.com/Azure/azure-sdk-for-go/arm/logic/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/logic/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,52 @@ +package logic + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Logic + APIVersion = "2015-02-01-preview" + + // DefaultBaseURI is the default URI used for the service Logic + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Logic. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/logic/models.go' --- src/github.com/Azure/azure-sdk-for-go/arm/logic/models.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/logic/models.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,549 @@ +package logic + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// KeyType enumerates the values for key type. +type KeyType string + +const ( + // KeyTypeNotSpecified specifies the key type not specified state for key + // type. + KeyTypeNotSpecified KeyType = "NotSpecified" + // KeyTypePrimary specifies the key type primary state for key type. + KeyTypePrimary KeyType = "Primary" + // KeyTypeSecondary specifies the key type secondary state for key type. + KeyTypeSecondary KeyType = "Secondary" +) + +// ParameterType enumerates the values for parameter type. +type ParameterType string + +const ( + // ParameterTypeArray specifies the parameter type array state for + // parameter type. + ParameterTypeArray ParameterType = "Array" + // ParameterTypeBool specifies the parameter type bool state for parameter + // type. + ParameterTypeBool ParameterType = "Bool" + // ParameterTypeFloat specifies the parameter type float state for + // parameter type. + ParameterTypeFloat ParameterType = "Float" + // ParameterTypeInt specifies the parameter type int state for parameter + // type. + ParameterTypeInt ParameterType = "Int" + // ParameterTypeNotSpecified specifies the parameter type not specified + // state for parameter type. + ParameterTypeNotSpecified ParameterType = "NotSpecified" + // ParameterTypeObject specifies the parameter type object state for + // parameter type. + ParameterTypeObject ParameterType = "Object" + // ParameterTypeSecureObject specifies the parameter type secure object + // state for parameter type. + ParameterTypeSecureObject ParameterType = "SecureObject" + // ParameterTypeSecureString specifies the parameter type secure string + // state for parameter type. + ParameterTypeSecureString ParameterType = "SecureString" + // ParameterTypeString specifies the parameter type string state for + // parameter type. + ParameterTypeString ParameterType = "String" +) + +// RecurrenceFrequency enumerates the values for recurrence frequency. +type RecurrenceFrequency string + +const ( + // Day specifies the day state for recurrence frequency. + Day RecurrenceFrequency = "Day" + // Hour specifies the hour state for recurrence frequency. + Hour RecurrenceFrequency = "Hour" + // Minute specifies the minute state for recurrence frequency. + Minute RecurrenceFrequency = "Minute" + // Month specifies the month state for recurrence frequency. + Month RecurrenceFrequency = "Month" + // Second specifies the second state for recurrence frequency. + Second RecurrenceFrequency = "Second" + // Week specifies the week state for recurrence frequency. + Week RecurrenceFrequency = "Week" + // Year specifies the year state for recurrence frequency. + Year RecurrenceFrequency = "Year" +) + +// SkuName enumerates the values for sku name. +type SkuName string + +const ( + // SkuNameBasic specifies the sku name basic state for sku name. + SkuNameBasic SkuName = "Basic" + // SkuNameFree specifies the sku name free state for sku name. + SkuNameFree SkuName = "Free" + // SkuNameNotSpecified specifies the sku name not specified state for sku + // name. + SkuNameNotSpecified SkuName = "NotSpecified" + // SkuNamePremium specifies the sku name premium state for sku name. + SkuNamePremium SkuName = "Premium" + // SkuNameShared specifies the sku name shared state for sku name. + SkuNameShared SkuName = "Shared" + // SkuNameStandard specifies the sku name standard state for sku name. + SkuNameStandard SkuName = "Standard" +) + +// WorkflowState enumerates the values for workflow state. +type WorkflowState string + +const ( + // WorkflowStateDeleted specifies the workflow state deleted state for + // workflow state. + WorkflowStateDeleted WorkflowState = "Deleted" + // WorkflowStateDisabled specifies the workflow state disabled state for + // workflow state. + WorkflowStateDisabled WorkflowState = "Disabled" + // WorkflowStateEnabled specifies the workflow state enabled state for + // workflow state. + WorkflowStateEnabled WorkflowState = "Enabled" + // WorkflowStateNotSpecified specifies the workflow state not specified + // state for workflow state. + WorkflowStateNotSpecified WorkflowState = "NotSpecified" + // WorkflowStateSuspended specifies the workflow state suspended state for + // workflow state. + WorkflowStateSuspended WorkflowState = "Suspended" +) + +// WorkflowStatus enumerates the values for workflow status. +type WorkflowStatus string + +const ( + // WorkflowStatusCancelled specifies the workflow status cancelled state + // for workflow status. + WorkflowStatusCancelled WorkflowStatus = "Cancelled" + // WorkflowStatusFailed specifies the workflow status failed state for + // workflow status. + WorkflowStatusFailed WorkflowStatus = "Failed" + // WorkflowStatusNotSpecified specifies the workflow status not specified + // state for workflow status. + WorkflowStatusNotSpecified WorkflowStatus = "NotSpecified" + // WorkflowStatusPaused specifies the workflow status paused state for + // workflow status. + WorkflowStatusPaused WorkflowStatus = "Paused" + // WorkflowStatusRunning specifies the workflow status running state for + // workflow status. + WorkflowStatusRunning WorkflowStatus = "Running" + // WorkflowStatusSkipped specifies the workflow status skipped state for + // workflow status. + WorkflowStatusSkipped WorkflowStatus = "Skipped" + // WorkflowStatusSucceeded specifies the workflow status succeeded state + // for workflow status. + WorkflowStatusSucceeded WorkflowStatus = "Succeeded" + // WorkflowStatusSuspended specifies the workflow status suspended state + // for workflow status. + WorkflowStatusSuspended WorkflowStatus = "Suspended" + // WorkflowStatusWaiting specifies the workflow status waiting state for + // workflow status. + WorkflowStatusWaiting WorkflowStatus = "Waiting" +) + +// ContentHash is +type ContentHash struct { + Algorithm *string `json:"algorithm,omitempty"` + Value *string `json:"value,omitempty"` +} + +// ContentLink is +type ContentLink struct { + URI *string `json:"uri,omitempty"` + ContentVersion *string `json:"contentVersion,omitempty"` + ContentSize *int32 `json:"contentSize,omitempty"` + ContentHash *ContentHash `json:"contentHash,omitempty"` + Metadata *map[string]*string `json:"metadata,omitempty"` +} + +// RegenerateSecretKeyParameters is +type RegenerateSecretKeyParameters struct { + KeyType KeyType `json:"keyType,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceReference is +type ResourceReference struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// RunWorkflowParameters is +type RunWorkflowParameters struct { + Name *string `json:"name,omitempty"` + Outputs *map[string]*string `json:"outputs,omitempty"` +} + +// Sku is +type Sku struct { + Name SkuName `json:"name,omitempty"` + Plan *ResourceReference `json:"plan,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} + +// Workflow is +type Workflow struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *WorkflowProperties `json:"properties,omitempty"` +} + +// WorkflowAccessKey is +type WorkflowAccessKey struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *WorkflowAccessKeyProperties `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// WorkflowAccessKeyListResult is +type WorkflowAccessKeyListResult struct { + autorest.Response `json:"-"` + Value *[]WorkflowAccessKey `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// WorkflowAccessKeyListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client WorkflowAccessKeyListResult) WorkflowAccessKeyListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// WorkflowAccessKeyProperties is +type WorkflowAccessKeyProperties struct { + NotBefore *date.Time `json:"notBefore,omitempty"` + NotAfter *date.Time `json:"notAfter,omitempty"` +} + +// WorkflowFilter is +type WorkflowFilter struct { + State WorkflowState `json:"state,omitempty"` +} + +// WorkflowListResult is +type WorkflowListResult struct { + autorest.Response `json:"-"` + Value *[]Workflow `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// WorkflowListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client WorkflowListResult) WorkflowListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// WorkflowOutputParameter is +type WorkflowOutputParameter struct { + Type ParameterType `json:"type,omitempty"` + Value *map[string]*string `json:"value,omitempty"` + Metadata *map[string]*string `json:"metadata,omitempty"` + Error *map[string]*string `json:"error,omitempty"` +} + +// WorkflowParameter is +type WorkflowParameter struct { + Type ParameterType `json:"type,omitempty"` + Value *map[string]*string `json:"value,omitempty"` + Metadata *map[string]*string `json:"metadata,omitempty"` +} + +// WorkflowProperties is +type WorkflowProperties struct { + CreatedTime *date.Time `json:"createdTime,omitempty"` + ChangedTime *date.Time `json:"changedTime,omitempty"` + State WorkflowState `json:"state,omitempty"` + Version *string `json:"version,omitempty"` + AccessEndpoint *string `json:"accessEndpoint,omitempty"` + Sku *Sku `json:"sku,omitempty"` + DefinitionLink *ContentLink `json:"definitionLink,omitempty"` + Definition *map[string]*string `json:"definition,omitempty"` + ParametersLink *ContentLink `json:"parametersLink,omitempty"` + Parameters *map[string]*WorkflowParameter `json:"parameters,omitempty"` +} + +// WorkflowRun is +type WorkflowRun struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *WorkflowRunProperties `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// WorkflowRunAction is +type WorkflowRunAction struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *WorkflowRunActionProperties `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// WorkflowRunActionFilter is +type WorkflowRunActionFilter struct { + Status WorkflowStatus `json:"status,omitempty"` +} + +// WorkflowRunActionListResult is +type WorkflowRunActionListResult struct { + autorest.Response `json:"-"` + Value *[]WorkflowRunAction `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// WorkflowRunActionListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client WorkflowRunActionListResult) WorkflowRunActionListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// WorkflowRunActionProperties is +type WorkflowRunActionProperties struct { + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + Status WorkflowStatus `json:"status,omitempty"` + Code *string `json:"code,omitempty"` + Error *map[string]*string `json:"error,omitempty"` + TrackingID *string `json:"trackingId,omitempty"` + InputsLink *ContentLink `json:"inputsLink,omitempty"` + OutputsLink *ContentLink `json:"outputsLink,omitempty"` +} + +// WorkflowRunFilter is +type WorkflowRunFilter struct { + Status WorkflowStatus `json:"status,omitempty"` +} + +// WorkflowRunListResult is +type WorkflowRunListResult struct { + autorest.Response `json:"-"` + Value *[]WorkflowRun `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// WorkflowRunListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client WorkflowRunListResult) WorkflowRunListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// WorkflowRunProperties is +type WorkflowRunProperties struct { + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + Status WorkflowStatus `json:"status,omitempty"` + Code *string `json:"code,omitempty"` + Error *map[string]*string `json:"error,omitempty"` + CorrelationID *string `json:"correlationId,omitempty"` + Workflow *ResourceReference `json:"workflow,omitempty"` + Trigger *WorkflowRunTrigger `json:"trigger,omitempty"` + Outputs *map[string]*WorkflowOutputParameter `json:"outputs,omitempty"` +} + +// WorkflowRunTrigger is +type WorkflowRunTrigger struct { + Name *string `json:"name,omitempty"` + Inputs *map[string]*string `json:"inputs,omitempty"` + InputsLink *ContentLink `json:"inputsLink,omitempty"` + Outputs *map[string]*string `json:"outputs,omitempty"` + OutputsLink *ContentLink `json:"outputsLink,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + TrackingID *string `json:"trackingId,omitempty"` + Code *string `json:"code,omitempty"` + Status WorkflowStatus `json:"status,omitempty"` + Error *map[string]*string `json:"error,omitempty"` +} + +// WorkflowSecretKeys is +type WorkflowSecretKeys struct { + autorest.Response `json:"-"` + PrimarySecretKey *string `json:"primarySecretKey,omitempty"` + SecondarySecretKey *string `json:"secondarySecretKey,omitempty"` +} + +// WorkflowTrigger is +type WorkflowTrigger struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *WorkflowTriggerProperties `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// WorkflowTriggerFilter is +type WorkflowTriggerFilter struct { + State WorkflowState `json:"state,omitempty"` +} + +// WorkflowTriggerHistory is +type WorkflowTriggerHistory struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *WorkflowTriggerHistoryProperties `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// WorkflowTriggerHistoryListResult is +type WorkflowTriggerHistoryListResult struct { + autorest.Response `json:"-"` + Value *[]WorkflowTriggerHistory `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// WorkflowTriggerHistoryListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client WorkflowTriggerHistoryListResult) WorkflowTriggerHistoryListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// WorkflowTriggerHistoryProperties is +type WorkflowTriggerHistoryProperties struct { + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + Status WorkflowStatus `json:"status,omitempty"` + Code *string `json:"code,omitempty"` + Error *map[string]*string `json:"error,omitempty"` + TrackingID *string `json:"trackingId,omitempty"` + InputsLink *ContentLink `json:"inputsLink,omitempty"` + OutputsLink *ContentLink `json:"outputsLink,omitempty"` + Fired *bool `json:"fired,omitempty"` + Run *ResourceReference `json:"run,omitempty"` +} + +// WorkflowTriggerListResult is +type WorkflowTriggerListResult struct { + autorest.Response `json:"-"` + Value *[]WorkflowTrigger `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// WorkflowTriggerListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client WorkflowTriggerListResult) WorkflowTriggerListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// WorkflowTriggerProperties is +type WorkflowTriggerProperties struct { + CreatedTime *date.Time `json:"createdTime,omitempty"` + ChangedTime *date.Time `json:"changedTime,omitempty"` + State WorkflowState `json:"state,omitempty"` + Status WorkflowStatus `json:"status,omitempty"` + LastExecutionTime *date.Time `json:"lastExecutionTime,omitempty"` + NextExecutionTime *date.Time `json:"nextExecutionTime,omitempty"` + Recurrence *WorkflowTriggerRecurrence `json:"recurrence,omitempty"` + Workflow *ResourceReference `json:"workflow,omitempty"` +} + +// WorkflowTriggerRecurrence is +type WorkflowTriggerRecurrence struct { + Frequency RecurrenceFrequency `json:"frequency,omitempty"` + Interval *int `json:"interval,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + TimeZone *string `json:"timeZone,omitempty"` +} + +// WorkflowVersion is +type WorkflowVersion struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *WorkflowVersionProperties `json:"properties,omitempty"` +} + +// WorkflowVersionProperties is +type WorkflowVersionProperties struct { + CreatedTime *date.Time `json:"createdTime,omitempty"` + ChangedTime *date.Time `json:"changedTime,omitempty"` + State WorkflowState `json:"state,omitempty"` + Version *string `json:"version,omitempty"` + AccessEndpoint *string `json:"accessEndpoint,omitempty"` + Sku *Sku `json:"sku,omitempty"` + DefinitionLink *ContentLink `json:"definitionLink,omitempty"` + Definition *map[string]*string `json:"definition,omitempty"` + ParametersLink *ContentLink `json:"parametersLink,omitempty"` + Parameters *map[string]*WorkflowParameter `json:"parameters,omitempty"` +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/logic/version.go' --- src/github.com/Azure/azure-sdk-for-go/arm/logic/version.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/logic/version.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +package logic + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "0" + minor = "1" + patch = "1" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "logic", "2015-02-01-preview") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowaccesskeys.go' --- src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowaccesskeys.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowaccesskeys.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,460 @@ +package logic + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// WorkflowAccessKeysClient is the client for the WorkflowAccessKeys methods +// of the Logic service. +type WorkflowAccessKeysClient struct { + ManagementClient +} + +// NewWorkflowAccessKeysClient creates an instance of the +// WorkflowAccessKeysClient client. +func NewWorkflowAccessKeysClient(subscriptionID string) WorkflowAccessKeysClient { + return NewWorkflowAccessKeysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWorkflowAccessKeysClientWithBaseURI creates an instance of the +// WorkflowAccessKeysClient client. +func NewWorkflowAccessKeysClientWithBaseURI(baseURI string, subscriptionID string) WorkflowAccessKeysClient { + return WorkflowAccessKeysClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a workflow access key. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. accessKeyName is the workflow access key name. workflowAccesskey is +// the workflow access key. +func (client WorkflowAccessKeysClient) CreateOrUpdate(resourceGroupName string, workflowName string, accessKeyName string, workflowAccesskey WorkflowAccessKey) (result WorkflowAccessKey, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, workflowName, accessKeyName, workflowAccesskey) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client WorkflowAccessKeysClient) CreateOrUpdatePreparer(resourceGroupName string, workflowName string, accessKeyName string, workflowAccesskey WorkflowAccessKey) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accessKeyName": url.QueryEscape(accessKeyName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/accessKeys/{accessKeyName}"), + autorest.WithJSON(workflowAccesskey), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowAccessKeysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client WorkflowAccessKeysClient) CreateOrUpdateResponder(resp *http.Response) (result WorkflowAccessKey, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a workflow access key. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. accessKeyName is the workflow access key name. +func (client WorkflowAccessKeysClient) Delete(resourceGroupName string, workflowName string, accessKeyName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, workflowName, accessKeyName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client WorkflowAccessKeysClient) DeletePreparer(resourceGroupName string, workflowName string, accessKeyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accessKeyName": url.QueryEscape(accessKeyName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/accessKeys/{accessKeyName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowAccessKeysClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client WorkflowAccessKeysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a workflow access key. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. accessKeyName is the workflow access key name. +func (client WorkflowAccessKeysClient) Get(resourceGroupName string, workflowName string, accessKeyName string) (result WorkflowAccessKey, ae error) { + req, err := client.GetPreparer(resourceGroupName, workflowName, accessKeyName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client WorkflowAccessKeysClient) GetPreparer(resourceGroupName string, workflowName string, accessKeyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accessKeyName": url.QueryEscape(accessKeyName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/accessKeys/{accessKeyName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowAccessKeysClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client WorkflowAccessKeysClient) GetResponder(resp *http.Response) (result WorkflowAccessKey, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of workflow access keys. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. top is the number of items to be included in the result. +func (client WorkflowAccessKeysClient) List(resourceGroupName string, workflowName string, top int) (result WorkflowAccessKeyListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, workflowName, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client WorkflowAccessKeysClient) ListPreparer(resourceGroupName string, workflowName string, top int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/accessKeys"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowAccessKeysClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client WorkflowAccessKeysClient) ListResponder(resp *http.Response) (result WorkflowAccessKeyListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client WorkflowAccessKeysClient) ListNextResults(lastResults WorkflowAccessKeyListResult) (result WorkflowAccessKeyListResult, ae error) { + req, err := lastResults.WorkflowAccessKeyListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "List", "Failure responding to next results request request") + } + + return +} + +// ListSecretKeys lists secret keys. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. accessKeyName is the workflow access key name. +func (client WorkflowAccessKeysClient) ListSecretKeys(resourceGroupName string, workflowName string, accessKeyName string) (result WorkflowSecretKeys, ae error) { + req, err := client.ListSecretKeysPreparer(resourceGroupName, workflowName, accessKeyName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "ListSecretKeys", "Failure preparing request") + } + + resp, err := client.ListSecretKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "ListSecretKeys", "Failure sending request") + } + + result, err = client.ListSecretKeysResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "ListSecretKeys", "Failure responding to request") + } + + return +} + +// ListSecretKeysPreparer prepares the ListSecretKeys request. +func (client WorkflowAccessKeysClient) ListSecretKeysPreparer(resourceGroupName string, workflowName string, accessKeyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accessKeyName": url.QueryEscape(accessKeyName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/accessKeys/{accessKeyName}/list"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSecretKeysSender sends the ListSecretKeys request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowAccessKeysClient) ListSecretKeysSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListSecretKeysResponder handles the response to the ListSecretKeys request. The method always +// closes the http.Response Body. +func (client WorkflowAccessKeysClient) ListSecretKeysResponder(resp *http.Response) (result WorkflowSecretKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateSecretKey regenerates secret key. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. accessKeyName is the workflow access key name. parameters is the +// parameters. +func (client WorkflowAccessKeysClient) RegenerateSecretKey(resourceGroupName string, workflowName string, accessKeyName string, parameters RegenerateSecretKeyParameters) (result WorkflowSecretKeys, ae error) { + req, err := client.RegenerateSecretKeyPreparer(resourceGroupName, workflowName, accessKeyName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "RegenerateSecretKey", "Failure preparing request") + } + + resp, err := client.RegenerateSecretKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "RegenerateSecretKey", "Failure sending request") + } + + result, err = client.RegenerateSecretKeyResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowAccessKeysClient", "RegenerateSecretKey", "Failure responding to request") + } + + return +} + +// RegenerateSecretKeyPreparer prepares the RegenerateSecretKey request. +func (client WorkflowAccessKeysClient) RegenerateSecretKeyPreparer(resourceGroupName string, workflowName string, accessKeyName string, parameters RegenerateSecretKeyParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accessKeyName": url.QueryEscape(accessKeyName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/accessKeys/{accessKeyName}/regenerate"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// RegenerateSecretKeySender sends the RegenerateSecretKey request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowAccessKeysClient) RegenerateSecretKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// RegenerateSecretKeyResponder handles the response to the RegenerateSecretKey request. The method always +// closes the http.Response Body. +func (client WorkflowAccessKeysClient) RegenerateSecretKeyResponder(resp *http.Response) (result WorkflowSecretKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowrunactions.go' --- src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowrunactions.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowrunactions.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,202 @@ +package logic + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// WorkflowRunActionsClient is the client for the WorkflowRunActions methods +// of the Logic service. +type WorkflowRunActionsClient struct { + ManagementClient +} + +// NewWorkflowRunActionsClient creates an instance of the +// WorkflowRunActionsClient client. +func NewWorkflowRunActionsClient(subscriptionID string) WorkflowRunActionsClient { + return NewWorkflowRunActionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWorkflowRunActionsClientWithBaseURI creates an instance of the +// WorkflowRunActionsClient client. +func NewWorkflowRunActionsClientWithBaseURI(baseURI string, subscriptionID string) WorkflowRunActionsClient { + return WorkflowRunActionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a workflow run action. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. runName is the workflow run name. actionName is the workflow action +// name. +func (client WorkflowRunActionsClient) Get(resourceGroupName string, workflowName string, runName string, actionName string) (result WorkflowRunAction, ae error) { + req, err := client.GetPreparer(resourceGroupName, workflowName, runName, actionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client WorkflowRunActionsClient) GetPreparer(resourceGroupName string, workflowName string, runName string, actionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "actionName": url.QueryEscape(actionName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "runName": url.QueryEscape(runName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/runs/{runName}/actions/{actionName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowRunActionsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client WorkflowRunActionsClient) GetResponder(resp *http.Response) (result WorkflowRunAction, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of workflow run actions. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. runName is the workflow run name. top is the number of items to be +// included in the result. filter is the filter to apply on the operation. +func (client WorkflowRunActionsClient) List(resourceGroupName string, workflowName string, runName string, top int, filter string) (result WorkflowRunActionListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, workflowName, runName, top, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client WorkflowRunActionsClient) ListPreparer(resourceGroupName string, workflowName string, runName string, top int, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "runName": url.QueryEscape(runName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "$filter": filter, + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/runs/{runName}/actions"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowRunActionsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client WorkflowRunActionsClient) ListResponder(resp *http.Response) (result WorkflowRunActionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client WorkflowRunActionsClient) ListNextResults(lastResults WorkflowRunActionListResult) (result WorkflowRunActionListResult, ae error) { + req, err := lastResults.WorkflowRunActionListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowRunActionsClient", "List", "Failure responding to next results request request") + } + + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowruns.go' --- src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowruns.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowruns.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,262 @@ +package logic + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// WorkflowRunsClient is the client for the WorkflowRuns methods of the Logic +// service. +type WorkflowRunsClient struct { + ManagementClient +} + +// NewWorkflowRunsClient creates an instance of the WorkflowRunsClient client. +func NewWorkflowRunsClient(subscriptionID string) WorkflowRunsClient { + return NewWorkflowRunsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWorkflowRunsClientWithBaseURI creates an instance of the +// WorkflowRunsClient client. +func NewWorkflowRunsClientWithBaseURI(baseURI string, subscriptionID string) WorkflowRunsClient { + return WorkflowRunsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Cancel cancels a workflow run. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. runName is the workflow run name. +func (client WorkflowRunsClient) Cancel(resourceGroupName string, workflowName string, runName string) (result autorest.Response, ae error) { + req, err := client.CancelPreparer(resourceGroupName, workflowName, runName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "Cancel", "Failure preparing request") + } + + resp, err := client.CancelSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "Cancel", "Failure sending request") + } + + result, err = client.CancelResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "Cancel", "Failure responding to request") + } + + return +} + +// CancelPreparer prepares the Cancel request. +func (client WorkflowRunsClient) CancelPreparer(resourceGroupName string, workflowName string, runName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "runName": url.QueryEscape(runName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/runs/{runName}/cancel"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CancelSender sends the Cancel request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowRunsClient) CancelSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// CancelResponder handles the response to the Cancel request. The method always +// closes the http.Response Body. +func (client WorkflowRunsClient) CancelResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a workflow run. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. runName is the workflow run name. +func (client WorkflowRunsClient) Get(resourceGroupName string, workflowName string, runName string) (result WorkflowRun, ae error) { + req, err := client.GetPreparer(resourceGroupName, workflowName, runName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client WorkflowRunsClient) GetPreparer(resourceGroupName string, workflowName string, runName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "runName": url.QueryEscape(runName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/runs/{runName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowRunsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client WorkflowRunsClient) GetResponder(resp *http.Response) (result WorkflowRun, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of workflow runs. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. top is the number of items to be included in the result. filter is +// the filter to apply on the operation. +func (client WorkflowRunsClient) List(resourceGroupName string, workflowName string, top int, filter string) (result WorkflowRunListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, workflowName, top, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client WorkflowRunsClient) ListPreparer(resourceGroupName string, workflowName string, top int, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "$filter": filter, + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/runs"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowRunsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client WorkflowRunsClient) ListResponder(resp *http.Response) (result WorkflowRunListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client WorkflowRunsClient) ListNextResults(lastResults WorkflowRunListResult) (result WorkflowRunListResult, ae error) { + req, err := lastResults.WorkflowRunListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowRunsClient", "List", "Failure responding to next results request request") + } + + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/logic/workflows.go' --- src/github.com/Azure/azure-sdk-for-go/arm/logic/workflows.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/logic/workflows.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,731 @@ +package logic + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// WorkflowsClient is the client for the Workflows methods of the Logic +// service. +type WorkflowsClient struct { + ManagementClient +} + +// NewWorkflowsClient creates an instance of the WorkflowsClient client. +func NewWorkflowsClient(subscriptionID string) WorkflowsClient { + return NewWorkflowsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWorkflowsClientWithBaseURI creates an instance of the WorkflowsClient +// client. +func NewWorkflowsClientWithBaseURI(baseURI string, subscriptionID string) WorkflowsClient { + return WorkflowsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a workflow. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. workflow is the workflow. +func (client WorkflowsClient) CreateOrUpdate(resourceGroupName string, workflowName string, workflow Workflow) (result Workflow, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, workflowName, workflow) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client WorkflowsClient) CreateOrUpdatePreparer(resourceGroupName string, workflowName string, workflow Workflow) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}"), + autorest.WithJSON(workflow), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client WorkflowsClient) CreateOrUpdateResponder(resp *http.Response) (result Workflow, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a workflow. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. +func (client WorkflowsClient) Delete(resourceGroupName string, workflowName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, workflowName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client WorkflowsClient) DeletePreparer(resourceGroupName string, workflowName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client WorkflowsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Disable disables a workflow. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. +func (client WorkflowsClient) Disable(resourceGroupName string, workflowName string) (result autorest.Response, ae error) { + req, err := client.DisablePreparer(resourceGroupName, workflowName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Disable", "Failure preparing request") + } + + resp, err := client.DisableSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Disable", "Failure sending request") + } + + result, err = client.DisableResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Disable", "Failure responding to request") + } + + return +} + +// DisablePreparer prepares the Disable request. +func (client WorkflowsClient) DisablePreparer(resourceGroupName string, workflowName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/disable"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DisableSender sends the Disable request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowsClient) DisableSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// DisableResponder handles the response to the Disable request. The method always +// closes the http.Response Body. +func (client WorkflowsClient) DisableResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Enable enables a workflow. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. +func (client WorkflowsClient) Enable(resourceGroupName string, workflowName string) (result autorest.Response, ae error) { + req, err := client.EnablePreparer(resourceGroupName, workflowName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Enable", "Failure preparing request") + } + + resp, err := client.EnableSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Enable", "Failure sending request") + } + + result, err = client.EnableResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Enable", "Failure responding to request") + } + + return +} + +// EnablePreparer prepares the Enable request. +func (client WorkflowsClient) EnablePreparer(resourceGroupName string, workflowName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/enable"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// EnableSender sends the Enable request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowsClient) EnableSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// EnableResponder handles the response to the Enable request. The method always +// closes the http.Response Body. +func (client WorkflowsClient) EnableResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a workflow. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. +func (client WorkflowsClient) Get(resourceGroupName string, workflowName string) (result Workflow, ae error) { + req, err := client.GetPreparer(resourceGroupName, workflowName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client WorkflowsClient) GetPreparer(resourceGroupName string, workflowName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client WorkflowsClient) GetResponder(resp *http.Response) (result Workflow, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup gets a list of workflows by resource group. +// +// resourceGroupName is the resource group name. top is the number of items to +// be included in the result. filter is the filter to apply on the operation. +func (client WorkflowsClient) ListByResourceGroup(resourceGroupName string, top int, filter string) (result WorkflowListResult, ae error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName, top, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListByResourceGroup", "Failure preparing request") + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListByResourceGroup", "Failure sending request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListByResourceGroup", "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client WorkflowsClient) ListByResourceGroupPreparer(resourceGroupName string, top int, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "$filter": filter, + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client WorkflowsClient) ListByResourceGroupResponder(resp *http.Response) (result WorkflowListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client WorkflowsClient) ListByResourceGroupNextResults(lastResults WorkflowListResult) (result WorkflowListResult, ae error) { + req, err := lastResults.WorkflowListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListByResourceGroup", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListByResourceGroup", "Failure sending next results request request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListByResourceGroup", "Failure responding to next results request request") + } + + return +} + +// ListBySubscription gets a list of workflows by subscription. +// +// top is the number of items to be included in the result. filter is the +// filter to apply on the operation. +func (client WorkflowsClient) ListBySubscription(top int, filter string) (result WorkflowListResult, ae error) { + req, err := client.ListBySubscriptionPreparer(top, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListBySubscription", "Failure preparing request") + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListBySubscription", "Failure sending request") + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListBySubscription", "Failure responding to request") + } + + return +} + +// ListBySubscriptionPreparer prepares the ListBySubscription request. +func (client WorkflowsClient) ListBySubscriptionPreparer(top int, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "$filter": filter, + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Logic/workflows"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListBySubscriptionSender sends the ListBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (client WorkflowsClient) ListBySubscriptionResponder(resp *http.Response) (result WorkflowListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListBySubscriptionNextResults retrieves the next set of results, if any. +func (client WorkflowsClient) ListBySubscriptionNextResults(lastResults WorkflowListResult) (result WorkflowListResult, ae error) { + req, err := lastResults.WorkflowListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListBySubscription", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListBySubscription", "Failure sending next results request request") + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "ListBySubscription", "Failure responding to next results request request") + } + + return +} + +// Run runs a workflow. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. parameters is the parameters. +func (client WorkflowsClient) Run(resourceGroupName string, workflowName string, parameters RunWorkflowParameters) (result WorkflowRun, ae error) { + req, err := client.RunPreparer(resourceGroupName, workflowName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Run", "Failure preparing request") + } + + resp, err := client.RunSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Run", "Failure sending request") + } + + result, err = client.RunResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Run", "Failure responding to request") + } + + return +} + +// RunPreparer prepares the Run request. +func (client WorkflowsClient) RunPreparer(resourceGroupName string, workflowName string, parameters RunWorkflowParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/run"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// RunSender sends the Run request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowsClient) RunSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted) +} + +// RunResponder handles the response to the Run request. The method always +// closes the http.Response Body. +func (client WorkflowsClient) RunResponder(resp *http.Response) (result WorkflowRun, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates a workflow. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. workflow is the workflow. +func (client WorkflowsClient) Update(resourceGroupName string, workflowName string, workflow Workflow) (result Workflow, ae error) { + req, err := client.UpdatePreparer(resourceGroupName, workflowName, workflow) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Update", "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Update", "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Update", "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client WorkflowsClient) UpdatePreparer(resourceGroupName string, workflowName string, workflow Workflow) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}"), + autorest.WithJSON(workflow), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client WorkflowsClient) UpdateResponder(resp *http.Response) (result Workflow, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Validate validates a workflow. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. workflow is the workflow. +func (client WorkflowsClient) Validate(resourceGroupName string, workflowName string, workflow Workflow) (result autorest.Response, ae error) { + req, err := client.ValidatePreparer(resourceGroupName, workflowName, workflow) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Validate", "Failure preparing request") + } + + resp, err := client.ValidateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Validate", "Failure sending request") + } + + result, err = client.ValidateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowsClient", "Validate", "Failure responding to request") + } + + return +} + +// ValidatePreparer prepares the Validate request. +func (client WorkflowsClient) ValidatePreparer(resourceGroupName string, workflowName string, workflow Workflow) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/validate"), + autorest.WithJSON(workflow), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ValidateSender sends the Validate request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowsClient) ValidateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ValidateResponder handles the response to the Validate request. The method always +// closes the http.Response Body. +func (client WorkflowsClient) ValidateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowtriggerhistories.go' --- src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowtriggerhistories.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowtriggerhistories.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,201 @@ +package logic + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// WorkflowTriggerHistoriesClient is the client for the +// WorkflowTriggerHistories methods of the Logic service. +type WorkflowTriggerHistoriesClient struct { + ManagementClient +} + +// NewWorkflowTriggerHistoriesClient creates an instance of the +// WorkflowTriggerHistoriesClient client. +func NewWorkflowTriggerHistoriesClient(subscriptionID string) WorkflowTriggerHistoriesClient { + return NewWorkflowTriggerHistoriesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWorkflowTriggerHistoriesClientWithBaseURI creates an instance of the +// WorkflowTriggerHistoriesClient client. +func NewWorkflowTriggerHistoriesClientWithBaseURI(baseURI string, subscriptionID string) WorkflowTriggerHistoriesClient { + return WorkflowTriggerHistoriesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a workflow trigger history. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. triggerName is the workflow trigger name. historyName is the +// workflow trigger history name. +func (client WorkflowTriggerHistoriesClient) Get(resourceGroupName string, workflowName string, triggerName string, historyName string) (result WorkflowTriggerHistory, ae error) { + req, err := client.GetPreparer(resourceGroupName, workflowName, triggerName, historyName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client WorkflowTriggerHistoriesClient) GetPreparer(resourceGroupName string, workflowName string, triggerName string, historyName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "historyName": url.QueryEscape(historyName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "triggerName": url.QueryEscape(triggerName), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}/histories/{historyName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowTriggerHistoriesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client WorkflowTriggerHistoriesClient) GetResponder(resp *http.Response) (result WorkflowTriggerHistory, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of workflow trigger histories. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. triggerName is the workflow trigger name. top is the number of items +// to be included in the result. +func (client WorkflowTriggerHistoriesClient) List(resourceGroupName string, workflowName string, triggerName string, top int) (result WorkflowTriggerHistoryListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, workflowName, triggerName, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client WorkflowTriggerHistoriesClient) ListPreparer(resourceGroupName string, workflowName string, triggerName string, top int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "triggerName": url.QueryEscape(triggerName), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}/histories"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowTriggerHistoriesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client WorkflowTriggerHistoriesClient) ListResponder(resp *http.Response) (result WorkflowTriggerHistoryListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client WorkflowTriggerHistoriesClient) ListNextResults(lastResults WorkflowTriggerHistoryListResult) (result WorkflowTriggerHistoryListResult, ae error) { + req, err := lastResults.WorkflowTriggerHistoryListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowTriggerHistoriesClient", "List", "Failure responding to next results request request") + } + + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowtriggers.go' --- src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowtriggers.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowtriggers.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,263 @@ +package logic + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// WorkflowTriggersClient is the client for the WorkflowTriggers methods of +// the Logic service. +type WorkflowTriggersClient struct { + ManagementClient +} + +// NewWorkflowTriggersClient creates an instance of the WorkflowTriggersClient +// client. +func NewWorkflowTriggersClient(subscriptionID string) WorkflowTriggersClient { + return NewWorkflowTriggersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWorkflowTriggersClientWithBaseURI creates an instance of the +// WorkflowTriggersClient client. +func NewWorkflowTriggersClientWithBaseURI(baseURI string, subscriptionID string) WorkflowTriggersClient { + return WorkflowTriggersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a workflow trigger. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. triggerName is the workflow trigger name. +func (client WorkflowTriggersClient) Get(resourceGroupName string, workflowName string, triggerName string) (result WorkflowTrigger, ae error) { + req, err := client.GetPreparer(resourceGroupName, workflowName, triggerName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client WorkflowTriggersClient) GetPreparer(resourceGroupName string, workflowName string, triggerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "triggerName": url.QueryEscape(triggerName), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowTriggersClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client WorkflowTriggersClient) GetResponder(resp *http.Response) (result WorkflowTrigger, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of workflow triggers. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. top is the number of items to be included in the result. filter is +// the filter to apply on the operation. +func (client WorkflowTriggersClient) List(resourceGroupName string, workflowName string, top int, filter string) (result WorkflowTriggerListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, workflowName, top, filter) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client WorkflowTriggersClient) ListPreparer(resourceGroupName string, workflowName string, top int, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "$filter": filter, + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowTriggersClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client WorkflowTriggersClient) ListResponder(resp *http.Response) (result WorkflowTriggerListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client WorkflowTriggersClient) ListNextResults(lastResults WorkflowTriggerListResult) (result WorkflowTriggerListResult, ae error) { + req, err := lastResults.WorkflowTriggerListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "List", "Failure responding to next results request request") + } + + return +} + +// Run runs a workflow trigger. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. triggerName is the workflow trigger name. +func (client WorkflowTriggersClient) Run(resourceGroupName string, workflowName string, triggerName string) (result autorest.Response, ae error) { + req, err := client.RunPreparer(resourceGroupName, workflowName, triggerName) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "Run", "Failure preparing request") + } + + resp, err := client.RunSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "Run", "Failure sending request") + } + + result, err = client.RunResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowTriggersClient", "Run", "Failure responding to request") + } + + return +} + +// RunPreparer prepares the Run request. +func (client WorkflowTriggersClient) RunPreparer(resourceGroupName string, workflowName string, triggerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "triggerName": url.QueryEscape(triggerName), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/triggers/{triggerName}/run"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// RunSender sends the Run request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowTriggersClient) RunSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// RunResponder handles the response to the Run request. The method always +// closes the http.Response Body. +func (client WorkflowTriggersClient) RunResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowversions.go' --- src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowversions.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/logic/workflowversions.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,108 @@ +package logic + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// WorkflowVersionsClient is the client for the WorkflowVersions methods of +// the Logic service. +type WorkflowVersionsClient struct { + ManagementClient +} + +// NewWorkflowVersionsClient creates an instance of the WorkflowVersionsClient +// client. +func NewWorkflowVersionsClient(subscriptionID string) WorkflowVersionsClient { + return NewWorkflowVersionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWorkflowVersionsClientWithBaseURI creates an instance of the +// WorkflowVersionsClient client. +func NewWorkflowVersionsClientWithBaseURI(baseURI string, subscriptionID string) WorkflowVersionsClient { + return WorkflowVersionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a workflow version. +// +// resourceGroupName is the resource group name. workflowName is the workflow +// name. versionID is the workflow versionId. +func (client WorkflowVersionsClient) Get(resourceGroupName string, workflowName string, versionID string) (result WorkflowVersion, ae error) { + req, err := client.GetPreparer(resourceGroupName, workflowName, versionID) + if err != nil { + return result, autorest.NewErrorWithError(err, "logic/WorkflowVersionsClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "logic/WorkflowVersionsClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "logic/WorkflowVersionsClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client WorkflowVersionsClient) GetPreparer(resourceGroupName string, workflowName string, versionID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "versionId": url.QueryEscape(versionID), + "workflowName": url.QueryEscape(workflowName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/versions/{versionId}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client WorkflowVersionsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client WorkflowVersionsClient) GetResponder(resp *http.Response) (result WorkflowVersion, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added directory 'src/github.com/Azure/azure-sdk-for-go/arm/network' === added file 'src/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go' --- src/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,538 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// ApplicationGatewaysClient is the client for the ApplicationGateways methods +// of the Network service. +type ApplicationGatewaysClient struct { + ManagementClient +} + +// NewApplicationGatewaysClient creates an instance of the +// ApplicationGatewaysClient client. +func NewApplicationGatewaysClient(subscriptionID string) ApplicationGatewaysClient { + return NewApplicationGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewApplicationGatewaysClientWithBaseURI creates an instance of the +// ApplicationGatewaysClient client. +func NewApplicationGatewaysClientWithBaseURI(baseURI string, subscriptionID string) ApplicationGatewaysClient { + return ApplicationGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put ApplicationGateway operation creates/updates a +// ApplicationGateway +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the ApplicationGateway. parameters is parameters supplied +// to the create/delete ApplicationGateway operation +func (client ApplicationGatewaysClient) CreateOrUpdate(resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway) (result ApplicationGateway, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, applicationGatewayName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ApplicationGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, applicationGatewayName string, parameters ApplicationGateway) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": url.QueryEscape(applicationGatewayName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result ApplicationGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the delete applicationgateway operation deletes the specified +// applicationgateway. +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the applicationgateway. +func (client ApplicationGatewaysClient) Delete(resourceGroupName string, applicationGatewayName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, applicationGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ApplicationGatewaysClient) DeletePreparer(resourceGroupName string, applicationGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": url.QueryEscape(applicationGatewayName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get applicationgateway operation retreives information about the +// specified applicationgateway. +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the applicationgateway. +func (client ApplicationGatewaysClient) Get(resourceGroupName string, applicationGatewayName string) (result ApplicationGateway, ae error) { + req, err := client.GetPreparer(resourceGroupName, applicationGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ApplicationGatewaysClient) GetPreparer(resourceGroupName string, applicationGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": url.QueryEscape(applicationGatewayName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) GetResponder(resp *http.Response) (result ApplicationGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List ApplicationGateway opertion retrieves all the +// applicationgateways in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client ApplicationGatewaysClient) List(resourceGroupName string) (result ApplicationGatewayListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ApplicationGatewaysClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) ListResponder(resp *http.Response) (result ApplicationGatewayListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ApplicationGatewaysClient) ListNextResults(lastResults ApplicationGatewayListResult) (result ApplicationGatewayListResult, ae error) { + req, err := lastResults.ApplicationGatewayListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "List", "Failure responding to next results request request") + } + + return +} + +// ListAll the List applicationgateway opertion retrieves all the +// applicationgateways in a subscription. +func (client ApplicationGatewaysClient) ListAll() (result ApplicationGatewayListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client ApplicationGatewaysClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/applicationGateways"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) ListAllResponder(resp *http.Response) (result ApplicationGatewayListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client ApplicationGatewaysClient) ListAllNextResults(lastResults ApplicationGatewayListResult) (result ApplicationGatewayListResult, ae error) { + req, err := lastResults.ApplicationGatewayListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "ListAll", "Failure responding to next results request request") + } + + return +} + +// Start the Start ApplicationGateway operation starts application gatewayin +// the specified resource group through Network resource provider. +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the application gateway. +func (client ApplicationGatewaysClient) Start(resourceGroupName string, applicationGatewayName string) (result autorest.Response, ae error) { + req, err := client.StartPreparer(resourceGroupName, applicationGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Start", "Failure preparing request") + } + + resp, err := client.StartSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Start", "Failure sending request") + } + + result, err = client.StartResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Start", "Failure responding to request") + } + + return +} + +// StartPreparer prepares the Start request. +func (client ApplicationGatewaysClient) StartPreparer(resourceGroupName string, applicationGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": url.QueryEscape(applicationGatewayName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/start"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) StartSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted) +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) StartResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Stop the STOP ApplicationGateway operation stops application gatewayin the +// specified resource group through Network resource provider. +// +// resourceGroupName is the name of the resource group. applicationGatewayName +// is the name of the application gateway. +func (client ApplicationGatewaysClient) Stop(resourceGroupName string, applicationGatewayName string) (result autorest.Response, ae error) { + req, err := client.StopPreparer(resourceGroupName, applicationGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Stop", "Failure preparing request") + } + + resp, err := client.StopSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Stop", "Failure sending request") + } + + result, err = client.StopResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ApplicationGatewaysClient", "Stop", "Failure responding to request") + } + + return +} + +// StopPreparer prepares the Stop request. +func (client ApplicationGatewaysClient) StopPreparer(resourceGroupName string, applicationGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "applicationGatewayName": url.QueryEscape(applicationGatewayName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/applicationGateways/{applicationGatewayName}/stop"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// StopSender sends the Stop request. The method will close the +// http.Response Body if it receives an error. +func (client ApplicationGatewaysClient) StopSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted, http.StatusOK) +} + +// StopResponder handles the response to the Stop request. The method always +// closes the http.Response Body. +func (client ApplicationGatewaysClient) StopResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/network/client.go' --- src/github.com/Azure/azure-sdk-for-go/arm/network/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/network/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,120 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +const ( + // APIVersion is the version of the Network + APIVersion = "2015-05-01-preview" + + // DefaultBaseURI is the default URI used for the service Network + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Network. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} + +// CheckDNSNameAvailability checks whether a domain name in the cloudapp.net +// zone is available for use. +// +// location is the location of the domain name domainNameLabel is the domain +// name to be verified. It must conform to the following regular expression: +// ^[a-z][a-z0-9-]{1,61}[a-z0-9]$. +func (client ManagementClient) CheckDNSNameAvailability(location string, domainNameLabel string) (result DNSNameAvailabilityResult, ae error) { + req, err := client.CheckDNSNameAvailabilityPreparer(location, domainNameLabel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/ManagementClient", "CheckDNSNameAvailability", "Failure preparing request") + } + + resp, err := client.CheckDNSNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/ManagementClient", "CheckDNSNameAvailability", "Failure sending request") + } + + result, err = client.CheckDNSNameAvailabilityResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/ManagementClient", "CheckDNSNameAvailability", "Failure responding to request") + } + + return +} + +// CheckDNSNameAvailabilityPreparer prepares the CheckDNSNameAvailability request. +func (client ManagementClient) CheckDNSNameAvailabilityPreparer(location string, domainNameLabel string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + "domainNameLabel": domainNameLabel, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/CheckDnsNameAvailability"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CheckDNSNameAvailabilitySender sends the CheckDNSNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) CheckDNSNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// CheckDNSNameAvailabilityResponder handles the response to the CheckDNSNameAvailability request. The method always +// closes the http.Response Body. +func (client ManagementClient) CheckDNSNameAvailabilityResponder(resp *http.Response) (result DNSNameAvailabilityResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go' --- src/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,409 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// InterfacesClient is the client for the Interfaces methods of the Network +// service. +type InterfacesClient struct { + ManagementClient +} + +// NewInterfacesClient creates an instance of the InterfacesClient client. +func NewInterfacesClient(subscriptionID string) InterfacesClient { + return NewInterfacesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewInterfacesClientWithBaseURI creates an instance of the InterfacesClient +// client. +func NewInterfacesClientWithBaseURI(baseURI string, subscriptionID string) InterfacesClient { + return InterfacesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put NetworkInterface operation creates/updates a +// networkInterface +// +// resourceGroupName is the name of the resource group. networkInterfaceName +// is the name of the network interface. parameters is parameters supplied to +// the create/update NetworkInterface operation +func (client InterfacesClient) CreateOrUpdate(resourceGroupName string, networkInterfaceName string, parameters Interface) (result Interface, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkInterfaceName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client InterfacesClient) CreateOrUpdatePreparer(resourceGroupName string, networkInterfaceName string, parameters Interface) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": url.QueryEscape(networkInterfaceName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client InterfacesClient) CreateOrUpdateResponder(resp *http.Response) (result Interface, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the delete netwokInterface operation deletes the specified +// netwokInterface. +// +// resourceGroupName is the name of the resource group. networkInterfaceName +// is the name of the network interface. +func (client InterfacesClient) Delete(resourceGroupName string, networkInterfaceName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, networkInterfaceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client InterfacesClient) DeletePreparer(resourceGroupName string, networkInterfaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": url.QueryEscape(networkInterfaceName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusAccepted) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client InterfacesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get ntework interface operation retreives information about the +// specified network interface. +// +// resourceGroupName is the name of the resource group. networkInterfaceName +// is the name of the network interface. +func (client InterfacesClient) Get(resourceGroupName string, networkInterfaceName string) (result Interface, ae error) { + req, err := client.GetPreparer(resourceGroupName, networkInterfaceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client InterfacesClient) GetPreparer(resourceGroupName string, networkInterfaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": url.QueryEscape(networkInterfaceName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client InterfacesClient) GetResponder(resp *http.Response) (result Interface, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List networkInterfaces opertion retrieves all the +// networkInterfaces in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client InterfacesClient) List(resourceGroupName string) (result InterfaceListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client InterfacesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client InterfacesClient) ListResponder(resp *http.Response) (result InterfaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client InterfacesClient) ListNextResults(lastResults InterfaceListResult) (result InterfaceListResult, ae error) { + req, err := lastResults.InterfaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "List", "Failure responding to next results request request") + } + + return +} + +// ListAll the List networkInterfaces opertion retrieves all the +// networkInterfaces in a subscription. +func (client InterfacesClient) ListAll() (result InterfaceListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client InterfacesClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client InterfacesClient) ListAllResponder(resp *http.Response) (result InterfaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client InterfacesClient) ListAllNextResults(lastResults InterfaceListResult) (result InterfaceListResult, ae error) { + req, err := lastResults.InterfaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/InterfacesClient", "ListAll", "Failure responding to next results request request") + } + + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go' --- src/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,408 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// LoadBalancersClient is the client for the LoadBalancers methods of the +// Network service. +type LoadBalancersClient struct { + ManagementClient +} + +// NewLoadBalancersClient creates an instance of the LoadBalancersClient +// client. +func NewLoadBalancersClient(subscriptionID string) LoadBalancersClient { + return NewLoadBalancersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLoadBalancersClientWithBaseURI creates an instance of the +// LoadBalancersClient client. +func NewLoadBalancersClientWithBaseURI(baseURI string, subscriptionID string) LoadBalancersClient { + return LoadBalancersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put LoadBalancer operation creates/updates a LoadBalancer +// +// resourceGroupName is the name of the resource group. loadBalancerName is +// the name of the loadBalancer. parameters is parameters supplied to the +// create/delete LoadBalancer operation +func (client LoadBalancersClient) CreateOrUpdate(resourceGroupName string, loadBalancerName string, parameters LoadBalancer) (result LoadBalancer, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, loadBalancerName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client LoadBalancersClient) CreateOrUpdatePreparer(resourceGroupName string, loadBalancerName string, parameters LoadBalancer) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": url.QueryEscape(loadBalancerName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) CreateOrUpdateResponder(resp *http.Response) (result LoadBalancer, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the delete loadbalancer operation deletes the specified loadbalancer. +// +// resourceGroupName is the name of the resource group. loadBalancerName is +// the name of the loadBalancer. +func (client LoadBalancersClient) Delete(resourceGroupName string, loadBalancerName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, loadBalancerName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client LoadBalancersClient) DeletePreparer(resourceGroupName string, loadBalancerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": url.QueryEscape(loadBalancerName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusAccepted) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get ntework interface operation retreives information about the +// specified network interface. +// +// resourceGroupName is the name of the resource group. loadBalancerName is +// the name of the loadBalancer. +func (client LoadBalancersClient) Get(resourceGroupName string, loadBalancerName string) (result LoadBalancer, ae error) { + req, err := client.GetPreparer(resourceGroupName, loadBalancerName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client LoadBalancersClient) GetPreparer(resourceGroupName string, loadBalancerName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "loadBalancerName": url.QueryEscape(loadBalancerName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) GetResponder(resp *http.Response) (result LoadBalancer, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List loadBalancer opertion retrieves all the loadbalancers in a +// resource group. +// +// resourceGroupName is the name of the resource group. +func (client LoadBalancersClient) List(resourceGroupName string) (result LoadBalancerListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client LoadBalancersClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) ListResponder(resp *http.Response) (result LoadBalancerListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client LoadBalancersClient) ListNextResults(lastResults LoadBalancerListResult) (result LoadBalancerListResult, ae error) { + req, err := lastResults.LoadBalancerListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "List", "Failure responding to next results request request") + } + + return +} + +// ListAll the List loadBalancer opertion retrieves all the loadbalancers in a +// subscription. +func (client LoadBalancersClient) ListAll() (result LoadBalancerListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client LoadBalancersClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client LoadBalancersClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client LoadBalancersClient) ListAllResponder(resp *http.Response) (result LoadBalancerListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client LoadBalancersClient) ListAllNextResults(lastResults LoadBalancerListResult) (result LoadBalancerListResult, ae error) { + req, err := lastResults.LoadBalancerListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LoadBalancersClient", "ListAll", "Failure responding to next results request request") + } + + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go' --- src/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,328 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// LocalNetworkGatewaysClient is the client for the LocalNetworkGateways +// methods of the Network service. +type LocalNetworkGatewaysClient struct { + ManagementClient +} + +// NewLocalNetworkGatewaysClient creates an instance of the +// LocalNetworkGatewaysClient client. +func NewLocalNetworkGatewaysClient(subscriptionID string) LocalNetworkGatewaysClient { + return NewLocalNetworkGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLocalNetworkGatewaysClientWithBaseURI creates an instance of the +// LocalNetworkGatewaysClient client. +func NewLocalNetworkGatewaysClientWithBaseURI(baseURI string, subscriptionID string) LocalNetworkGatewaysClient { + return LocalNetworkGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put LocalNetworkGateway operation creates/updates a +// local network gateway in the specified resource group through Network +// resource provider. +// +// resourceGroupName is the name of the resource group. +// localNetworkGatewayName is the name of the local network gateway. +// parameters is parameters supplied to the Begin Create or update Local +// Network Gateway operation through Network resource provider. +func (client LocalNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway) (result LocalNetworkGateway, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, localNetworkGatewayName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client LocalNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, localNetworkGatewayName string, parameters LocalNetworkGateway) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "localNetworkGatewayName": url.QueryEscape(localNetworkGatewayName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client LocalNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client LocalNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result LocalNetworkGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the Delete LocalNetworkGateway operation deletes the specifed local +// network Gateway through Network resource provider. +// +// resourceGroupName is the name of the resource group. +// localNetworkGatewayName is the name of the local network gateway. +func (client LocalNetworkGatewaysClient) Delete(resourceGroupName string, localNetworkGatewayName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, localNetworkGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client LocalNetworkGatewaysClient) DeletePreparer(resourceGroupName string, localNetworkGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "localNetworkGatewayName": url.QueryEscape(localNetworkGatewayName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}/"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client LocalNetworkGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted, http.StatusNoContent, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client LocalNetworkGatewaysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get LocalNetworkGateway operation retrieves information about the +// specified local network gateway through Network resource provider. +// +// resourceGroupName is the name of the resource group. +// localNetworkGatewayName is the name of the local network gateway. +func (client LocalNetworkGatewaysClient) Get(resourceGroupName string, localNetworkGatewayName string) (result LocalNetworkGateway, ae error) { + req, err := client.GetPreparer(resourceGroupName, localNetworkGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client LocalNetworkGatewaysClient) GetPreparer(resourceGroupName string, localNetworkGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "localNetworkGatewayName": url.QueryEscape(localNetworkGatewayName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways/{localNetworkGatewayName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client LocalNetworkGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client LocalNetworkGatewaysClient) GetResponder(resp *http.Response) (result LocalNetworkGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List LocalNetworkGateways opertion retrieves all the local network +// gateways stored. +// +// resourceGroupName is the name of the resource group. +func (client LocalNetworkGatewaysClient) List(resourceGroupName string) (result LocalNetworkGatewayListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client LocalNetworkGatewaysClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/localNetworkGateways"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client LocalNetworkGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client LocalNetworkGatewaysClient) ListResponder(resp *http.Response) (result LocalNetworkGatewayListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client LocalNetworkGatewaysClient) ListNextResults(lastResults LocalNetworkGatewayListResult) (result LocalNetworkGatewayListResult, ae error) { + req, err := lastResults.LocalNetworkGatewayListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/LocalNetworkGatewaysClient", "List", "Failure responding to next results request request") + } + + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/network/models.go' --- src/github.com/Azure/azure-sdk-for-go/arm/network/models.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/network/models.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1104 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// ApplicationGatewayCookieBasedAffinity enumerates the values for application +// gateway cookie based affinity. +type ApplicationGatewayCookieBasedAffinity string + +const ( + // Disabled specifies the disabled state for application gateway cookie + // based affinity. + Disabled ApplicationGatewayCookieBasedAffinity = "Disabled" + // Enabled specifies the enabled state for application gateway cookie + // based affinity. + Enabled ApplicationGatewayCookieBasedAffinity = "Enabled" +) + +// ApplicationGatewayOperationalState enumerates the values for application +// gateway operational state. +type ApplicationGatewayOperationalState string + +const ( + // Running specifies the running state for application gateway operational + // state. + Running ApplicationGatewayOperationalState = "Running" + // Starting specifies the starting state for application gateway + // operational state. + Starting ApplicationGatewayOperationalState = "Starting" + // Stopped specifies the stopped state for application gateway operational + // state. + Stopped ApplicationGatewayOperationalState = "Stopped" + // Stopping specifies the stopping state for application gateway + // operational state. + Stopping ApplicationGatewayOperationalState = "Stopping" +) + +// ApplicationGatewayProtocol enumerates the values for application gateway +// protocol. +type ApplicationGatewayProtocol string + +const ( + // ApplicationGatewayProtocolHTTP specifies the application gateway + // protocol http state for application gateway protocol. + ApplicationGatewayProtocolHTTP ApplicationGatewayProtocol = "Http" + // ApplicationGatewayProtocolHTTPS specifies the application gateway + // protocol https state for application gateway protocol. + ApplicationGatewayProtocolHTTPS ApplicationGatewayProtocol = "Https" +) + +// ApplicationGatewayRequestRoutingRuleType enumerates the values for +// application gateway request routing rule type. +type ApplicationGatewayRequestRoutingRuleType string + +const ( + // Basic specifies the basic state for application gateway request routing + // rule type. + Basic ApplicationGatewayRequestRoutingRuleType = "Basic" +) + +// ApplicationGatewaySkuName enumerates the values for application gateway sku +// name. +type ApplicationGatewaySkuName string + +const ( + // StandardLarge specifies the standard large state for application + // gateway sku name. + StandardLarge ApplicationGatewaySkuName = "Standard_Large" + // StandardMedium specifies the standard medium state for application + // gateway sku name. + StandardMedium ApplicationGatewaySkuName = "Standard_Medium" + // StandardSmall specifies the standard small state for application + // gateway sku name. + StandardSmall ApplicationGatewaySkuName = "Standard_Small" +) + +// ApplicationGatewayTier enumerates the values for application gateway tier. +type ApplicationGatewayTier string + +const ( + // Standard specifies the standard state for application gateway tier. + Standard ApplicationGatewayTier = "Standard" +) + +// IPAllocationMethod enumerates the values for ip allocation method. +type IPAllocationMethod string + +const ( + // Dynamic specifies the dynamic state for ip allocation method. + Dynamic IPAllocationMethod = "Dynamic" + // Static specifies the static state for ip allocation method. + Static IPAllocationMethod = "Static" +) + +// LoadDistribution enumerates the values for load distribution. +type LoadDistribution string + +const ( + // Default specifies the default state for load distribution. + Default LoadDistribution = "Default" + // SourceIP specifies the source ip state for load distribution. + SourceIP LoadDistribution = "SourceIP" + // SourceIPProtocol specifies the source ip protocol state for load + // distribution. + SourceIPProtocol LoadDistribution = "SourceIPProtocol" +) + +// OperationStatus enumerates the values for operation status. +type OperationStatus string + +const ( + // Failed specifies the failed state for operation status. + Failed OperationStatus = "Failed" + // InProgress specifies the in progress state for operation status. + InProgress OperationStatus = "InProgress" + // Succeeded specifies the succeeded state for operation status. + Succeeded OperationStatus = "Succeeded" +) + +// ProbeProtocol enumerates the values for probe protocol. +type ProbeProtocol string + +const ( + // ProbeProtocolHTTP specifies the probe protocol http state for probe + // protocol. + ProbeProtocolHTTP ProbeProtocol = "Http" + // ProbeProtocolTCP specifies the probe protocol tcp state for probe + // protocol. + ProbeProtocolTCP ProbeProtocol = "Tcp" +) + +// SecurityRuleAccess enumerates the values for security rule access. +type SecurityRuleAccess string + +const ( + // Allow specifies the allow state for security rule access. + Allow SecurityRuleAccess = "Allow" + // Deny specifies the deny state for security rule access. + Deny SecurityRuleAccess = "Deny" +) + +// SecurityRuleDirection enumerates the values for security rule direction. +type SecurityRuleDirection string + +const ( + // Inbound specifies the inbound state for security rule direction. + Inbound SecurityRuleDirection = "Inbound" + // Outbound specifies the outbound state for security rule direction. + Outbound SecurityRuleDirection = "Outbound" +) + +// SecurityRuleProtocol enumerates the values for security rule protocol. +type SecurityRuleProtocol string + +const ( + // SecurityRuleProtocolAsterisk specifies the security rule protocol + // asterisk state for security rule protocol. + SecurityRuleProtocolAsterisk SecurityRuleProtocol = "*" + // SecurityRuleProtocolTCP specifies the security rule protocol tcp state + // for security rule protocol. + SecurityRuleProtocolTCP SecurityRuleProtocol = "Tcp" + // SecurityRuleProtocolUDP specifies the security rule protocol udp state + // for security rule protocol. + SecurityRuleProtocolUDP SecurityRuleProtocol = "Udp" +) + +// TransportProtocol enumerates the values for transport protocol. +type TransportProtocol string + +const ( + // TransportProtocolTCP specifies the transport protocol tcp state for + // transport protocol. + TransportProtocolTCP TransportProtocol = "Tcp" + // TransportProtocolUDP specifies the transport protocol udp state for + // transport protocol. + TransportProtocolUDP TransportProtocol = "Udp" +) + +// UsageUnit enumerates the values for usage unit. +type UsageUnit string + +const ( + // Count specifies the count state for usage unit. + Count UsageUnit = "Count" +) + +// VirtualNetworkGatewayConnectionType enumerates the values for virtual +// network gateway connection type. +type VirtualNetworkGatewayConnectionType string + +const ( + // ExpressRoute specifies the express route state for virtual network + // gateway connection type. + ExpressRoute VirtualNetworkGatewayConnectionType = "ExpressRoute" + // IPsec specifies the i psec state for virtual network gateway connection + // type. + IPsec VirtualNetworkGatewayConnectionType = "IPsec" + // Vnet2Vnet specifies the vnet 2 vnet state for virtual network gateway + // connection type. + Vnet2Vnet VirtualNetworkGatewayConnectionType = "Vnet2Vnet" + // VPNClient specifies the vpn client state for virtual network gateway + // connection type. + VPNClient VirtualNetworkGatewayConnectionType = "VPNClient" +) + +// VirtualNetworkGatewayType enumerates the values for virtual network gateway +// type. +type VirtualNetworkGatewayType string + +const ( + // Vpn specifies the vpn state for virtual network gateway type. + Vpn VirtualNetworkGatewayType = "Vpn" +) + +// VpnType enumerates the values for vpn type. +type VpnType string + +const ( + // PolicyBased specifies the policy based state for vpn type. + PolicyBased VpnType = "PolicyBased" + // RouteBased specifies the route based state for vpn type. + RouteBased VpnType = "RouteBased" +) + +// AddressSpace is addressSpace contains an array of IP address ranges that +// can be used by subnets +type AddressSpace struct { + AddressPrefixes *[]string `json:"addressPrefixes,omitempty"` +} + +// ApplicationGateway is applicationGateways resource +type ApplicationGateway struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *ApplicationGatewayPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayBackendAddress is backend Address of application gateway +type ApplicationGatewayBackendAddress struct { + Fqdn *string `json:"fqdn,omitempty"` + IPAddress *string `json:"ipAddress,omitempty"` +} + +// ApplicationGatewayBackendAddressPool is backend Address Pool of application +// gateway +type ApplicationGatewayBackendAddressPool struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayBackendAddressPoolPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayBackendAddressPoolPropertiesFormat is properties of +// Backend Address Pool of application gateway +type ApplicationGatewayBackendAddressPoolPropertiesFormat struct { + BackendIPConfigurations *[]SubResource `json:"backendIpConfigurations,omitempty"` + BackendAddresses *[]ApplicationGatewayBackendAddress `json:"backendAddresses,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayBackendHTTPSettings is backend address pool settings of +// application gateway +type ApplicationGatewayBackendHTTPSettings struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayBackendHTTPSettingsPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayBackendHTTPSettingsPropertiesFormat is properties of +// Backend address pool settings of application gateway +type ApplicationGatewayBackendHTTPSettingsPropertiesFormat struct { + Port *int `json:"port,omitempty"` + Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` + CookieBasedAffinity ApplicationGatewayCookieBasedAffinity `json:"cookieBasedAffinity,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayFrontendIPConfiguration is frontend IP configuration of +// application gateway +type ApplicationGatewayFrontendIPConfiguration struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayFrontendIPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayFrontendIPConfigurationPropertiesFormat is properties of +// Frontend IP configuration of application gateway +type ApplicationGatewayFrontendIPConfigurationPropertiesFormat struct { + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + Subnet *SubResource `json:"subnet,omitempty"` + PublicIPAddress *SubResource `json:"publicIPAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayFrontendPort is frontend Port of application gateway +type ApplicationGatewayFrontendPort struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayFrontendPortPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayFrontendPortPropertiesFormat is properties of Frontend +// Port of application gateway +type ApplicationGatewayFrontendPortPropertiesFormat struct { + Port *int `json:"port,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayHTTPListener is http listener of application gateway +type ApplicationGatewayHTTPListener struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayHTTPListenerPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayHTTPListenerPropertiesFormat is properties of Http +// listener of application gateway +type ApplicationGatewayHTTPListenerPropertiesFormat struct { + FrontendIPConfiguration *SubResource `json:"frontendIpConfiguration,omitempty"` + FrontendPort *SubResource `json:"frontendPort,omitempty"` + Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` + SslCertificate *SubResource `json:"sslCertificate,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayIPConfiguration is iP configuration of application gateway +type ApplicationGatewayIPConfiguration struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayIPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayIPConfigurationPropertiesFormat is properties of IP +// configuration of application gateway +type ApplicationGatewayIPConfigurationPropertiesFormat struct { + Subnet *SubResource `json:"subnet,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayListResult is response for ListLoadBalancers Api service +// call +type ApplicationGatewayListResult struct { + autorest.Response `json:"-"` + Value *[]ApplicationGateway `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ApplicationGatewayListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ApplicationGatewayListResult) ApplicationGatewayListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ApplicationGatewayPropertiesFormat is properties of Application Gateway +type ApplicationGatewayPropertiesFormat struct { + Sku *ApplicationGatewaySku `json:"sku,omitempty"` + OperationalState ApplicationGatewayOperationalState `json:"operationalState,omitempty"` + GatewayIPConfigurations *[]ApplicationGatewayIPConfiguration `json:"gatewayIpConfigurations,omitempty"` + SslCertificates *[]ApplicationGatewaySslCertificate `json:"sslCertificates,omitempty"` + FrontendIPConfigurations *[]ApplicationGatewayFrontendIPConfiguration `json:"frontendIpConfigurations,omitempty"` + FrontendPorts *[]ApplicationGatewayFrontendPort `json:"frontendPorts,omitempty"` + BackendAddressPools *[]ApplicationGatewayBackendAddressPool `json:"backendAddressPools,omitempty"` + BackendHTTPSettingsCollection *[]ApplicationGatewayBackendHTTPSettings `json:"backendHttpSettingsCollection,omitempty"` + HTTPListeners *[]ApplicationGatewayHTTPListener `json:"httpListeners,omitempty"` + RequestRoutingRules *[]ApplicationGatewayRequestRoutingRule `json:"requestRoutingRules,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewayRequestRoutingRule is request routing rule of application +// gateway +type ApplicationGatewayRequestRoutingRule struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayRequestRoutingRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayRequestRoutingRulePropertiesFormat is properties of +// Request routing rule of application gateway +type ApplicationGatewayRequestRoutingRulePropertiesFormat struct { + RuleType ApplicationGatewayRequestRoutingRuleType `json:"ruleType,omitempty"` + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + BackendHTTPSettings *SubResource `json:"backendHttpSettings,omitempty"` + HTTPListener *SubResource `json:"httpListener,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ApplicationGatewaySku is sKU of application gateway +type ApplicationGatewaySku struct { + Name ApplicationGatewaySkuName `json:"name,omitempty"` + Tier ApplicationGatewayTier `json:"tier,omitempty"` + Capacity *int `json:"capacity,omitempty"` +} + +// ApplicationGatewaySslCertificate is sSL certificates of application gateway +type ApplicationGatewaySslCertificate struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewaySslCertificatePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewaySslCertificatePropertiesFormat is properties of SSL +// certificates of application gateway +type ApplicationGatewaySslCertificatePropertiesFormat struct { + Data *string `json:"data,omitempty"` + Password *string `json:"password,omitempty"` + PublicCertData *string `json:"publicCertData,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// AzureAsyncOperationResult is the response body contains the status of the +// specified asynchronous operation, indicating whether it has succeeded, is +// inprogress, or has failed. Note that this status is distinct from the HTTP +// status code returned for the Get Operation Status operation itself. If the +// asynchronous operation succeeded, the response body includes the HTTP +// status code for the successful request. If the asynchronous operation +// failed, the response body includes the HTTP status code for the failed +// request and error information regarding the failure. +type AzureAsyncOperationResult struct { + Status OperationStatus `json:"status,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// BackendAddressPool is pool of backend IP addresseses +type BackendAddressPool struct { + ID *string `json:"id,omitempty"` + Properties *BackendAddressPoolPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// BackendAddressPoolPropertiesFormat is properties of BackendAddressPool +type BackendAddressPoolPropertiesFormat struct { + BackendIPConfigurations *[]SubResource `json:"backendIPConfigurations,omitempty"` + LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ConnectionResetSharedKey is +type ConnectionResetSharedKey struct { + autorest.Response `json:"-"` + Properties *ConnectionResetSharedKeyPropertiesFormat `json:"properties,omitempty"` +} + +// ConnectionResetSharedKeyPropertiesFormat is +// virtualNeworkGatewayConnectionResetSharedKey properties +type ConnectionResetSharedKeyPropertiesFormat struct { + KeyLength *int32 `json:"keyLength,omitempty"` +} + +// ConnectionSharedKey is response for GetConnectionSharedKey Api servive call +type ConnectionSharedKey struct { + autorest.Response `json:"-"` + Value *string `json:"value,omitempty"` +} + +// DhcpOptions is dHCPOptions contains an array of DNS servers available to +// VMs deployed in the virtual networkStandard DHCP option for a subnet +// overrides VNET DHCP options. +type DhcpOptions struct { + DNSServers *[]string `json:"dnsServers,omitempty"` +} + +// DNSNameAvailabilityResult is response for CheckDnsNameAvailability Api +// servive call +type DNSNameAvailabilityResult struct { + autorest.Response `json:"-"` + Available *bool `json:"available,omitempty"` +} + +// Error is +type Error struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` + Details *[]ErrorDetails `json:"details,omitempty"` + InnerError *string `json:"innerError,omitempty"` +} + +// ErrorDetails is +type ErrorDetails struct { + Code *string `json:"code,omitempty"` + Target *string `json:"target,omitempty"` + Message *string `json:"message,omitempty"` +} + +// FrontendIPConfiguration is frontend IP address of the load balancer +type FrontendIPConfiguration struct { + ID *string `json:"id,omitempty"` + Properties *FrontendIPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// FrontendIPConfigurationPropertiesFormat is properties of Frontend IP +// Configuration of the load balancer +type FrontendIPConfigurationPropertiesFormat struct { + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + Subnet *SubResource `json:"subnet,omitempty"` + PublicIPAddress *SubResource `json:"publicIPAddress,omitempty"` + InboundNatRules *[]SubResource `json:"inboundNatRules,omitempty"` + LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// InboundNatRule is inbound NAT rule of the loadbalancer +type InboundNatRule struct { + ID *string `json:"id,omitempty"` + Properties *InboundNatRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// InboundNatRulePropertiesFormat is properties of Inbound NAT rule +type InboundNatRulePropertiesFormat struct { + FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` + BackendIPConfiguration *SubResource `json:"backendIPConfiguration,omitempty"` + Protocol TransportProtocol `json:"protocol,omitempty"` + FrontendPort *int `json:"frontendPort,omitempty"` + BackendPort *int `json:"backendPort,omitempty"` + IdleTimeoutInMinutes *int `json:"idleTimeoutInMinutes,omitempty"` + EnableFloatingIP *bool `json:"enableFloatingIP,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// Interface is a NetworkInterface in a resource group +type Interface struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *InterfacePropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// InterfaceDNSSettings is dns Settings of a network interface +type InterfaceDNSSettings struct { + DNSServers *[]string `json:"dnsServers,omitempty"` + AppliedDNSServers *[]string `json:"appliedDnsServers,omitempty"` + InternalDNSNameLabel *string `json:"internalDnsNameLabel,omitempty"` + InternalFqdn *string `json:"internalFqdn,omitempty"` +} + +// InterfaceIPConfiguration is iPConfiguration in a NetworkInterface +type InterfaceIPConfiguration struct { + ID *string `json:"id,omitempty"` + Properties *InterfaceIPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// InterfaceIPConfigurationPropertiesFormat is properties of IPConfiguration +type InterfaceIPConfigurationPropertiesFormat struct { + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + Subnet *SubResource `json:"subnet,omitempty"` + PublicIPAddress *SubResource `json:"publicIPAddress,omitempty"` + LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` + LoadBalancerInboundNatRules *[]SubResource `json:"loadBalancerInboundNatRules,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// InterfaceListResult is response for ListNetworkInterface Api service call +type InterfaceListResult struct { + autorest.Response `json:"-"` + Value *[]Interface `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// InterfaceListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client InterfaceListResult) InterfaceListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// InterfacePropertiesFormat is networkInterface properties. +type InterfacePropertiesFormat struct { + VirtualMachine *SubResource `json:"virtualMachine,omitempty"` + NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` + IPConfigurations *[]InterfaceIPConfiguration `json:"ipConfigurations,omitempty"` + DNSSettings *InterfaceDNSSettings `json:"dnsSettings,omitempty"` + MacAddress *string `json:"macAddress,omitempty"` + Primary *bool `json:"primary,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// LoadBalancer is loadBalancer resource +type LoadBalancer struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *LoadBalancerPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// LoadBalancerListResult is response for ListLoadBalancers Api service call +type LoadBalancerListResult struct { + autorest.Response `json:"-"` + Value *[]LoadBalancer `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// LoadBalancerListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client LoadBalancerListResult) LoadBalancerListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// LoadBalancerPropertiesFormat is properties of Load Balancer +type LoadBalancerPropertiesFormat struct { + FrontendIPConfigurations *[]FrontendIPConfiguration `json:"frontendIPConfigurations,omitempty"` + BackendAddressPools *[]BackendAddressPool `json:"backendAddressPools,omitempty"` + LoadBalancingRules *[]LoadBalancingRule `json:"loadBalancingRules,omitempty"` + Probes *[]Probe `json:"probes,omitempty"` + InboundNatRules *[]InboundNatRule `json:"inboundNatRules,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// LoadBalancingRule is rules of the load balancer +type LoadBalancingRule struct { + ID *string `json:"id,omitempty"` + Properties *LoadBalancingRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// LoadBalancingRulePropertiesFormat is properties of the load balancer +type LoadBalancingRulePropertiesFormat struct { + FrontendIPConfiguration *SubResource `json:"frontendIPConfiguration,omitempty"` + BackendAddressPool *SubResource `json:"backendAddressPool,omitempty"` + Probe *SubResource `json:"probe,omitempty"` + Protocol TransportProtocol `json:"protocol,omitempty"` + LoadDistribution LoadDistribution `json:"loadDistribution,omitempty"` + FrontendPort *int `json:"frontendPort,omitempty"` + BackendPort *int `json:"backendPort,omitempty"` + IdleTimeoutInMinutes *int `json:"idleTimeoutInMinutes,omitempty"` + EnableFloatingIP *bool `json:"enableFloatingIP,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// LocalNetworkGateway is a common class for general resource information +type LocalNetworkGateway struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *LocalNetworkGatewayPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// LocalNetworkGatewayListResult is response for ListLocalNetworkGateways Api +// service call +type LocalNetworkGatewayListResult struct { + autorest.Response `json:"-"` + Value *[]LocalNetworkGateway `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// LocalNetworkGatewayListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client LocalNetworkGatewayListResult) LocalNetworkGatewayListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// LocalNetworkGatewayPropertiesFormat is localNetworkGateway properties +type LocalNetworkGatewayPropertiesFormat struct { + LocalNetworkAddressSpace *AddressSpace `json:"localNetworkAddressSpace,omitempty"` + GatewayIPAddress *string `json:"gatewayIpAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// Probe is load balancer Probe +type Probe struct { + ID *string `json:"id,omitempty"` + Properties *ProbePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ProbePropertiesFormat is +type ProbePropertiesFormat struct { + LoadBalancingRules *[]SubResource `json:"loadBalancingRules,omitempty"` + Protocol ProbeProtocol `json:"protocol,omitempty"` + Port *int `json:"port,omitempty"` + IntervalInSeconds *int `json:"intervalInSeconds,omitempty"` + NumberOfProbes *int `json:"numberOfProbes,omitempty"` + RequestPath *string `json:"requestPath,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// PublicIPAddress is publicIPAddress resource +type PublicIPAddress struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *PublicIPAddressPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// PublicIPAddressDNSSettings is contains FQDN of the DNS record associated +// with the public IP address +type PublicIPAddressDNSSettings struct { + DomainNameLabel *string `json:"domainNameLabel,omitempty"` + Fqdn *string `json:"fqdn,omitempty"` + ReverseFqdn *string `json:"reverseFqdn,omitempty"` +} + +// PublicIPAddressListResult is response for ListPublicIpAddresses Api service +// call +type PublicIPAddressListResult struct { + autorest.Response `json:"-"` + Value *[]PublicIPAddress `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// PublicIPAddressListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client PublicIPAddressListResult) PublicIPAddressListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// PublicIPAddressPropertiesFormat is publicIpAddress properties +type PublicIPAddressPropertiesFormat struct { + PublicIPAllocationMethod IPAllocationMethod `json:"publicIPAllocationMethod,omitempty"` + IPConfiguration *SubResource `json:"ipConfiguration,omitempty"` + DNSSettings *PublicIPAddressDNSSettings `json:"dnsSettings,omitempty"` + IPAddress *string `json:"ipAddress,omitempty"` + IdleTimeoutInMinutes *int `json:"idleTimeoutInMinutes,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// SecurityGroup is networkSecurityGroup resource +type SecurityGroup struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *SecurityGroupPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// SecurityGroupListResult is response for ListNetworkSecurityGroups Api +// servive call +type SecurityGroupListResult struct { + autorest.Response `json:"-"` + Value *[]SecurityGroup `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SecurityGroupListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SecurityGroupListResult) SecurityGroupListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SecurityGroupPropertiesFormat is network Security Group resource +type SecurityGroupPropertiesFormat struct { + SecurityRules *[]SecurityRule `json:"securityRules,omitempty"` + DefaultSecurityRules *[]SecurityRule `json:"defaultSecurityRules,omitempty"` + NetworkInterfaces *[]SubResource `json:"networkInterfaces,omitempty"` + Subnets *[]SubResource `json:"subnets,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// SecurityRule is network security rule +type SecurityRule struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *SecurityRulePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// SecurityRuleListResult is response for ListSecurityRule Api service +// callRetrieves all security rules that belongs to a network security group +type SecurityRuleListResult struct { + autorest.Response `json:"-"` + Value *[]SecurityRule `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SecurityRuleListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SecurityRuleListResult) SecurityRuleListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SecurityRulePropertiesFormat is +type SecurityRulePropertiesFormat struct { + Description *string `json:"description,omitempty"` + Protocol SecurityRuleProtocol `json:"protocol,omitempty"` + SourcePortRange *string `json:"sourcePortRange,omitempty"` + DestinationPortRange *string `json:"destinationPortRange,omitempty"` + SourceAddressPrefix *string `json:"sourceAddressPrefix,omitempty"` + DestinationAddressPrefix *string `json:"destinationAddressPrefix,omitempty"` + Access SecurityRuleAccess `json:"access,omitempty"` + Priority *int `json:"priority,omitempty"` + Direction SecurityRuleDirection `json:"direction,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// Subnet is subnet in a VirtualNework resource +type Subnet struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *SubnetPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// SubnetListResult is response for ListSubnets Api service callRetrieves all +// subnet that belongs to a virtual network +type SubnetListResult struct { + autorest.Response `json:"-"` + Value *[]Subnet `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SubnetListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SubnetListResult) SubnetListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SubnetPropertiesFormat is +type SubnetPropertiesFormat struct { + AddressPrefix *string `json:"addressPrefix,omitempty"` + NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` + IPConfigurations *[]SubResource `json:"ipConfigurations,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} + +// Usage is describes Network Resource Usage. +type Usage struct { + Unit UsageUnit `json:"unit,omitempty"` + CurrentValue *int `json:"currentValue,omitempty"` + Limit *int32 `json:"limit,omitempty"` + Name *UsageName `json:"name,omitempty"` +} + +// UsageName is the Usage Names. +type UsageName struct { + Value *string `json:"value,omitempty"` + LocalizedValue *string `json:"localizedValue,omitempty"` +} + +// UsagesListResult is the List Usages operation response. +type UsagesListResult struct { + autorest.Response `json:"-"` + Value *[]Usage `json:"value,omitempty"` +} + +// VirtualNetwork is virtual Network resource +type VirtualNetwork struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *VirtualNetworkPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VirtualNetworkGateway is a common class for general resource information +type VirtualNetworkGateway struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *VirtualNetworkGatewayPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VirtualNetworkGatewayConnection is a common class for general resource +// information +type VirtualNetworkGatewayConnection struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *VirtualNetworkGatewayConnectionPropertiesFormat `json:"properties,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VirtualNetworkGatewayConnectionListResult is response for +// ListVirtualNetworkGatewayConnections Api service call +type VirtualNetworkGatewayConnectionListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualNetworkGatewayConnection `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualNetworkGatewayConnectionListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualNetworkGatewayConnectionListResult) VirtualNetworkGatewayConnectionListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualNetworkGatewayConnectionPropertiesFormat is +// virtualNeworkGatewayConnection properties +type VirtualNetworkGatewayConnectionPropertiesFormat struct { + VirtualNetworkGateway1 *VirtualNetworkGateway `json:"virtualNetworkGateway1,omitempty"` + VirtualNetworkGateway2 *VirtualNetworkGateway `json:"virtualNetworkGateway2,omitempty"` + LocalNetworkGateway2 *LocalNetworkGateway `json:"localNetworkGateway2,omitempty"` + ConnectionType VirtualNetworkGatewayConnectionType `json:"connectionType,omitempty"` + RoutingWeight *int `json:"routingWeight,omitempty"` + SharedKey *string `json:"sharedKey,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworkGatewayIPConfiguration is ipConfiguration for Virtual network +// gateway +type VirtualNetworkGatewayIPConfiguration struct { + ID *string `json:"id,omitempty"` + Properties *VirtualNetworkGatewayIPConfigurationPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VirtualNetworkGatewayIPConfigurationPropertiesFormat is properties of +// VirtualNetworkGatewayIPConfiguration +type VirtualNetworkGatewayIPConfigurationPropertiesFormat struct { + PrivateIPAddress *string `json:"privateIPAddress,omitempty"` + PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` + Subnet *SubResource `json:"subnet,omitempty"` + PublicIPAddress *SubResource `json:"publicIPAddress,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworkGatewayListResult is response for ListVirtualNetworkGateways +// Api service call +type VirtualNetworkGatewayListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualNetworkGateway `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualNetworkGatewayListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualNetworkGatewayListResult) VirtualNetworkGatewayListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualNetworkGatewayPropertiesFormat is virtualNeworkGateay properties +type VirtualNetworkGatewayPropertiesFormat struct { + IPConfigurations *[]VirtualNetworkGatewayIPConfiguration `json:"ipConfigurations,omitempty"` + GatewayType VirtualNetworkGatewayType `json:"gatewayType,omitempty"` + VpnType VpnType `json:"vpnType,omitempty"` + EnableBgp *bool `json:"enableBgp,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualNetworkListResult is response for ListVirtualNetworks Api servive +// call +type VirtualNetworkListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualNetwork `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualNetworkListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualNetworkListResult) VirtualNetworkListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualNetworkPropertiesFormat is +type VirtualNetworkPropertiesFormat struct { + AddressSpace *AddressSpace `json:"addressSpace,omitempty"` + DhcpOptions *DhcpOptions `json:"dhcpOptions,omitempty"` + Subnets *[]Subnet `json:"subnets,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go' --- src/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,410 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// PublicIPAddressesClient is the client for the PublicIPAddresses methods of +// the Network service. +type PublicIPAddressesClient struct { + ManagementClient +} + +// NewPublicIPAddressesClient creates an instance of the +// PublicIPAddressesClient client. +func NewPublicIPAddressesClient(subscriptionID string) PublicIPAddressesClient { + return NewPublicIPAddressesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewPublicIPAddressesClientWithBaseURI creates an instance of the +// PublicIPAddressesClient client. +func NewPublicIPAddressesClientWithBaseURI(baseURI string, subscriptionID string) PublicIPAddressesClient { + return PublicIPAddressesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put PublicIPAddress operation creates/updates a +// stable/dynamic PublicIP address +// +// resourceGroupName is the name of the resource group. publicIPAddressName is +// the name of the publicIpAddress. parameters is parameters supplied to the +// create/update PublicIPAddress operation +func (client PublicIPAddressesClient) CreateOrUpdate(resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress) (result PublicIPAddress, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, publicIPAddressName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client PublicIPAddressesClient) CreateOrUpdatePreparer(resourceGroupName string, publicIPAddressName string, parameters PublicIPAddress) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "publicIpAddressName": url.QueryEscape(publicIPAddressName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}/"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) CreateOrUpdateResponder(resp *http.Response) (result PublicIPAddress, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the delete publicIpAddress operation deletes the specified +// publicIpAddress. +// +// resourceGroupName is the name of the resource group. publicIPAddressName is +// the name of the subnet. +func (client PublicIPAddressesClient) Delete(resourceGroupName string, publicIPAddressName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, publicIPAddressName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client PublicIPAddressesClient) DeletePreparer(resourceGroupName string, publicIPAddressName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "publicIpAddressName": url.QueryEscape(publicIPAddressName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}/"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted, http.StatusNoContent, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusNoContent, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get publicIpAddress operation retreives information about the +// specified pubicIpAddress +// +// resourceGroupName is the name of the resource group. publicIPAddressName is +// the name of the subnet. +func (client PublicIPAddressesClient) Get(resourceGroupName string, publicIPAddressName string) (result PublicIPAddress, ae error) { + req, err := client.GetPreparer(resourceGroupName, publicIPAddressName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client PublicIPAddressesClient) GetPreparer(resourceGroupName string, publicIPAddressName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "publicIpAddressName": url.QueryEscape(publicIPAddressName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses/{publicIpAddressName}/"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) GetResponder(resp *http.Response) (result PublicIPAddress, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List publicIpAddress opertion retrieves all the publicIpAddresses +// in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client PublicIPAddressesClient) List(resourceGroupName string) (result PublicIPAddressListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client PublicIPAddressesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPAddresses"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) ListResponder(resp *http.Response) (result PublicIPAddressListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client PublicIPAddressesClient) ListNextResults(lastResults PublicIPAddressListResult) (result PublicIPAddressListResult, ae error) { + req, err := lastResults.PublicIPAddressListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "List", "Failure responding to next results request request") + } + + return +} + +// ListAll the List publicIpAddress opertion retrieves all the +// publicIpAddresses in a subscription. +func (client PublicIPAddressesClient) ListAll() (result PublicIPAddressListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client PublicIPAddressesClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPAddresses"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client PublicIPAddressesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client PublicIPAddressesClient) ListAllResponder(resp *http.Response) (result PublicIPAddressListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client PublicIPAddressesClient) ListAllNextResults(lastResults PublicIPAddressListResult) (result PublicIPAddressListResult, ae error) { + req, err := lastResults.PublicIPAddressListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/PublicIPAddressesClient", "ListAll", "Failure responding to next results request request") + } + + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go' --- src/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,411 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// SecurityGroupsClient is the client for the SecurityGroups methods of the +// Network service. +type SecurityGroupsClient struct { + ManagementClient +} + +// NewSecurityGroupsClient creates an instance of the SecurityGroupsClient +// client. +func NewSecurityGroupsClient(subscriptionID string) SecurityGroupsClient { + return NewSecurityGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSecurityGroupsClientWithBaseURI creates an instance of the +// SecurityGroupsClient client. +func NewSecurityGroupsClientWithBaseURI(baseURI string, subscriptionID string) SecurityGroupsClient { + return SecurityGroupsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put NetworkSecurityGroup operation creates/updates a +// network security groupin the specified resource group. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +// parameters is parameters supplied to the create/update Network Security +// Group operation +func (client SecurityGroupsClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup) (result SecurityGroup, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkSecurityGroupName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SecurityGroupsClient) CreateOrUpdatePreparer(resourceGroupName string, networkSecurityGroupName string, parameters SecurityGroup) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result SecurityGroup, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the Delete NetworkSecurityGroup operation deletes the specifed +// network security group +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +func (client SecurityGroupsClient) Delete(resourceGroupName string, networkSecurityGroupName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, networkSecurityGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SecurityGroupsClient) DeletePreparer(resourceGroupName string, networkSecurityGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get NetworkSecurityGroups operation retrieves information about the +// specified network security group. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +func (client SecurityGroupsClient) Get(resourceGroupName string, networkSecurityGroupName string) (result SecurityGroup, ae error) { + req, err := client.GetPreparer(resourceGroupName, networkSecurityGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SecurityGroupsClient) GetPreparer(resourceGroupName string, networkSecurityGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) GetResponder(resp *http.Response) (result SecurityGroup, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the list NetworkSecurityGroups returns all network security groups in +// a resource group +// +// resourceGroupName is the name of the resource group. +func (client SecurityGroupsClient) List(resourceGroupName string) (result SecurityGroupListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SecurityGroupsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) ListResponder(resp *http.Response) (result SecurityGroupListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client SecurityGroupsClient) ListNextResults(lastResults SecurityGroupListResult) (result SecurityGroupListResult, ae error) { + req, err := lastResults.SecurityGroupListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "List", "Failure responding to next results request request") + } + + return +} + +// ListAll the list NetworkSecurityGroups returns all network security groups +// in a subscription +func (client SecurityGroupsClient) ListAll() (result SecurityGroupListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client SecurityGroupsClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityGroupsClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client SecurityGroupsClient) ListAllResponder(resp *http.Response) (result SecurityGroupListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client SecurityGroupsClient) ListAllNextResults(lastResults SecurityGroupListResult) (result SecurityGroupListResult, ae error) { + req, err := lastResults.SecurityGroupListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityGroupsClient", "ListAll", "Failure responding to next results request request") + } + + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go' --- src/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,335 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// SecurityRulesClient is the client for the SecurityRules methods of the +// Network service. +type SecurityRulesClient struct { + ManagementClient +} + +// NewSecurityRulesClient creates an instance of the SecurityRulesClient +// client. +func NewSecurityRulesClient(subscriptionID string) SecurityRulesClient { + return NewSecurityRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSecurityRulesClientWithBaseURI creates an instance of the +// SecurityRulesClient client. +func NewSecurityRulesClientWithBaseURI(baseURI string, subscriptionID string) SecurityRulesClient { + return SecurityRulesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put network security rule operation creates/updates a +// security rule in the specified network security group +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +// securityRuleName is the name of the security rule. securityRuleParameters +// is parameters supplied to the create/update network security rule +// operation +func (client SecurityRulesClient) CreateOrUpdate(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule) (result SecurityRule, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, networkSecurityGroupName, securityRuleName, securityRuleParameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityRulesClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SecurityRulesClient) CreateOrUpdatePreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string, securityRuleParameters SecurityRule) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "securityRuleName": url.QueryEscape(securityRuleName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}"), + autorest.WithJSON(securityRuleParameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SecurityRulesClient) CreateOrUpdateResponder(resp *http.Response) (result SecurityRule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the delete network security rule operation deletes the specified +// network security rule. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +// securityRuleName is the name of the security rule. +func (client SecurityRulesClient) Delete(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, networkSecurityGroupName, securityRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SecurityRulesClient) DeletePreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "securityRuleName": url.QueryEscape(securityRuleName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted, http.StatusOK, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SecurityRulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get NetworkSecurityRule operation retreives information about the +// specified network security rule. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +// securityRuleName is the name of the security rule. +func (client SecurityRulesClient) Get(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (result SecurityRule, ae error) { + req, err := client.GetPreparer(resourceGroupName, networkSecurityGroupName, securityRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityRulesClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SecurityRulesClient) GetPreparer(resourceGroupName string, networkSecurityGroupName string, securityRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "securityRuleName": url.QueryEscape(securityRuleName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityRulesClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SecurityRulesClient) GetResponder(resp *http.Response) (result SecurityRule, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List network security rule opertion retrieves all the security +// rules in a network security group. +// +// resourceGroupName is the name of the resource group. +// networkSecurityGroupName is the name of the network security group. +func (client SecurityRulesClient) List(resourceGroupName string, networkSecurityGroupName string) (result SecurityRuleListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, networkSecurityGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SecurityRulesClient) ListPreparer(resourceGroupName string, networkSecurityGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkSecurityGroupName": url.QueryEscape(networkSecurityGroupName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SecurityRulesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SecurityRulesClient) ListResponder(resp *http.Response) (result SecurityRuleListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client SecurityRulesClient) ListNextResults(lastResults SecurityRuleListResult) (result SecurityRuleListResult, ae error) { + req, err := lastResults.SecurityRuleListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SecurityRulesClient", "List", "Failure responding to next results request request") + } + + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go' --- src/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,328 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// SubnetsClient is the client for the Subnets methods of the Network service. +type SubnetsClient struct { + ManagementClient +} + +// NewSubnetsClient creates an instance of the SubnetsClient client. +func NewSubnetsClient(subscriptionID string) SubnetsClient { + return NewSubnetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSubnetsClientWithBaseURI creates an instance of the SubnetsClient client. +func NewSubnetsClientWithBaseURI(baseURI string, subscriptionID string) SubnetsClient { + return SubnetsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put Subnet operation creates/updates a subnet in +// thespecified virtual network +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. subnetName is the name of the subnet. +// subnetParameters is parameters supplied to the create/update Subnet +// operation +func (client SubnetsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet) (result Subnet, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, subnetName, subnetParameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SubnetsClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SubnetsClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkName string, subnetName string, subnetParameters Subnet) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subnetName": url.QueryEscape(subnetName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkName": url.QueryEscape(virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}/subnets/{subnetName}"), + autorest.WithJSON(subnetParameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SubnetsClient) CreateOrUpdateResponder(resp *http.Response) (result Subnet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the delete subnet operation deletes the specified subnet. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. subnetName is the name of the subnet. +func (client SubnetsClient) Delete(resourceGroupName string, virtualNetworkName string, subnetName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, subnetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SubnetsClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SubnetsClient) DeletePreparer(resourceGroupName string, virtualNetworkName string, subnetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subnetName": url.QueryEscape(subnetName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkName": url.QueryEscape(virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}/subnets/{subnetName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusAccepted, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SubnetsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get subnet operation retreives information about the specified +// subnet. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. subnetName is the name of the subnet. +func (client SubnetsClient) Get(resourceGroupName string, virtualNetworkName string, subnetName string) (result Subnet, ae error) { + req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, subnetName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SubnetsClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SubnetsClient) GetPreparer(resourceGroupName string, virtualNetworkName string, subnetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subnetName": url.QueryEscape(subnetName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkName": url.QueryEscape(virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}/subnets/{subnetName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SubnetsClient) GetResponder(resp *http.Response) (result Subnet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List subnets opertion retrieves all the subnets in a virtual +// network. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. +func (client SubnetsClient) List(resourceGroupName string, virtualNetworkName string) (result SubnetListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, virtualNetworkName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SubnetsClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SubnetsClient) ListPreparer(resourceGroupName string, virtualNetworkName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkName": url.QueryEscape(virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}/subnets"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SubnetsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SubnetsClient) ListResponder(resp *http.Response) (result SubnetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client SubnetsClient) ListNextResults(lastResults SubnetListResult) (result SubnetListResult, ae error) { + req, err := lastResults.SubnetListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/SubnetsClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/SubnetsClient", "List", "Failure responding to next results request request") + } + + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/network/usages.go' --- src/github.com/Azure/azure-sdk-for-go/arm/network/usages.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/network/usages.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,102 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// UsagesClient is the client for the Usages methods of the Network service. +type UsagesClient struct { + ManagementClient +} + +// NewUsagesClient creates an instance of the UsagesClient client. +func NewUsagesClient(subscriptionID string) UsagesClient { + return NewUsagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsagesClientWithBaseURI creates an instance of the UsagesClient client. +func NewUsagesClientWithBaseURI(baseURI string, subscriptionID string) UsagesClient { + return UsagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists compute usages for a subscription. +// +// location is the location upon which resource usage is queried. +func (client UsagesClient) List(location string) (result UsagesListResult, ae error) { + req, err := client.ListPreparer(location) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/UsagesClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/UsagesClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/UsagesClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client UsagesClient) ListPreparer(location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": url.QueryEscape(location), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client UsagesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client UsagesClient) ListResponder(resp *http.Response) (result UsagesListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/network/version.go' --- src/github.com/Azure/azure-sdk-for-go/arm/network/version.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/network/version.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "0" + minor = "1" + patch = "1" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "network", "2015-05-01-preview") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go' --- src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,542 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualNetworkGatewayConnectionsClient is the client for the +// VirtualNetworkGatewayConnections methods of the Network service. +type VirtualNetworkGatewayConnectionsClient struct { + ManagementClient +} + +// NewVirtualNetworkGatewayConnectionsClient creates an instance of the +// VirtualNetworkGatewayConnectionsClient client. +func NewVirtualNetworkGatewayConnectionsClient(subscriptionID string) VirtualNetworkGatewayConnectionsClient { + return NewVirtualNetworkGatewayConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworkGatewayConnectionsClientWithBaseURI creates an instance of +// the VirtualNetworkGatewayConnectionsClient client. +func NewVirtualNetworkGatewayConnectionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkGatewayConnectionsClient { + return VirtualNetworkGatewayConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put VirtualNetworkGatewayConnection operation +// creates/updates a virtual network gateway connection in the specified +// resource group through Network resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the name of the virtual network +// gateway conenction. parameters is parameters supplied to the Begin Create +// or update Virtual Network Gateway connection operation through Network +// resource provider. +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection) (result VirtualNetworkGatewayConnection, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters VirtualNetworkGatewayConnection) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetworkGatewayConnection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the Delete VirtualNetworkGatewayConnection operation deletes the +// specifed virtual network Gateway connection through Network resource +// provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the name of the virtual network +// gateway connection. +func (client VirtualNetworkGatewayConnectionsClient) Delete(resourceGroupName string, virtualNetworkGatewayConnectionName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkGatewayConnectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualNetworkGatewayConnectionsClient) DeletePreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get VirtualNetworkGatewayConnection operation retrieves information +// about the specified virtual network gateway connection through Network +// resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the name of the virtual network +// gateway connection. +func (client VirtualNetworkGatewayConnectionsClient) Get(resourceGroupName string, virtualNetworkGatewayConnectionName string) (result VirtualNetworkGatewayConnection, ae error) { + req, err := client.GetPreparer(resourceGroupName, virtualNetworkGatewayConnectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualNetworkGatewayConnectionsClient) GetPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) GetResponder(resp *http.Response) (result VirtualNetworkGatewayConnection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetSharedKey the Get VirtualNetworkGatewayConnectionSharedKey operation +// retrieves information about the specified virtual network gateway +// connection shared key through Network resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the virtual network gateway +// connection shared key name. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string) (result ConnectionSharedKey, ae error) { + req, err := client.GetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "GetSharedKey", "Failure preparing request") + } + + resp, err := client.GetSharedKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "GetSharedKey", "Failure sending request") + } + + result, err = client.GetSharedKeyResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "GetSharedKey", "Failure responding to request") + } + + return +} + +// GetSharedKeyPreparer prepares the GetSharedKey request. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSharedKeySender sends the GetSharedKey request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetSharedKeyResponder handles the response to the GetSharedKey request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyResponder(resp *http.Response) (result ConnectionSharedKey, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List VirtualNetworkGatewayConnections operation retrieves all the +// virtual network gateways connections created. +// +// resourceGroupName is the name of the resource group. +func (client VirtualNetworkGatewayConnectionsClient) List(resourceGroupName string) (result VirtualNetworkGatewayConnectionListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualNetworkGatewayConnectionsClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) ListResponder(resp *http.Response) (result VirtualNetworkGatewayConnectionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualNetworkGatewayConnectionsClient) ListNextResults(lastResults VirtualNetworkGatewayConnectionListResult) (result VirtualNetworkGatewayConnectionListResult, ae error) { + req, err := lastResults.VirtualNetworkGatewayConnectionListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "List", "Failure responding to next results request request") + } + + return +} + +// ResetSharedKey the VirtualNetworkGatewayConnectionResetSharedKey operation +// resets the virtual network gateway connection shared key for passed +// virtual network gateway connection in the specified resource group through +// Network resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the virtual network gateway +// connection reset shared key Name. parameters is parameters supplied to the +// Begin Reset Virtual Network Gateway connection shared key operation +// through Network resource provider. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey) (result ConnectionResetSharedKey, ae error) { + req, err := client.ResetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", "Failure preparing request") + } + + resp, err := client.ResetSharedKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", "Failure sending request") + } + + result, err = client.ResetSharedKeyResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "ResetSharedKey", "Failure responding to request") + } + + return +} + +// ResetSharedKeyPreparer prepares the ResetSharedKey request. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionResetSharedKey) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey/reset"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ResetSharedKeySender sends the ResetSharedKey request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// ResetSharedKeyResponder handles the response to the ResetSharedKey request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyResponder(resp *http.Response) (result ConnectionResetSharedKey, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// SetSharedKey the Put VirtualNetworkGatewayConnectionSharedKey operation +// sets the virtual network gateway connection shared key for passed virtual +// network gateway connection in the specified resource group through Network +// resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayConnectionName is the virtual network gateway +// connection name. parameters is parameters supplied to the Begin Set +// Virtual Network Gateway conection Shared key operation throughNetwork +// resource provider. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKey(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey) (result ConnectionSharedKey, ae error) { + req, err := client.SetSharedKeyPreparer(resourceGroupName, virtualNetworkGatewayConnectionName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "SetSharedKey", "Failure preparing request") + } + + resp, err := client.SetSharedKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "SetSharedKey", "Failure sending request") + } + + result, err = client.SetSharedKeyResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewayConnectionsClient", "SetSharedKey", "Failure responding to request") + } + + return +} + +// SetSharedKeyPreparer prepares the SetSharedKey request. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyPreparer(resourceGroupName string, virtualNetworkGatewayConnectionName string, parameters ConnectionSharedKey) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayConnectionName": url.QueryEscape(virtualNetworkGatewayConnectionName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// SetSharedKeySender sends the SetSharedKey request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// SetSharedKeyResponder handles the response to the SetSharedKey request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyResponder(resp *http.Response) (result ConnectionSharedKey, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go' --- src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,397 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualNetworkGatewaysClient is the client for the VirtualNetworkGateways +// methods of the Network service. +type VirtualNetworkGatewaysClient struct { + ManagementClient +} + +// NewVirtualNetworkGatewaysClient creates an instance of the +// VirtualNetworkGatewaysClient client. +func NewVirtualNetworkGatewaysClient(subscriptionID string) VirtualNetworkGatewaysClient { + return NewVirtualNetworkGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworkGatewaysClientWithBaseURI creates an instance of the +// VirtualNetworkGatewaysClient client. +func NewVirtualNetworkGatewaysClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkGatewaysClient { + return VirtualNetworkGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put VirtualNetworkGateway operation creates/updates a +// virtual network gateway in the specified resource group through Network +// resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. +// parameters is parameters supplied to the Begin Create or update Virtual +// Network Gateway operation through Network resource provider. +func (client VirtualNetworkGatewaysClient) CreateOrUpdate(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway) (result VirtualNetworkGateway, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkGatewayName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayName": url.QueryEscape(virtualNetworkGatewayName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworkgateways/{virtualNetworkGatewayName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetworkGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the Delete VirtualNetworkGateway operation deletes the specifed +// virtual network Gateway through Network resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. +func (client VirtualNetworkGatewaysClient) Delete(resourceGroupName string, virtualNetworkGatewayName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualNetworkGatewaysClient) DeletePreparer(resourceGroupName string, virtualNetworkGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayName": url.QueryEscape(virtualNetworkGatewayName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted, http.StatusOK, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get VirtualNetworkGateway operation retrieves information about the +// specified virtual network gateway through Network resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. +func (client VirtualNetworkGatewaysClient) Get(resourceGroupName string, virtualNetworkGatewayName string) (result VirtualNetworkGateway, ae error) { + req, err := client.GetPreparer(resourceGroupName, virtualNetworkGatewayName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualNetworkGatewaysClient) GetPreparer(resourceGroupName string, virtualNetworkGatewayName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayName": url.QueryEscape(virtualNetworkGatewayName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworkgateways/{virtualNetworkGatewayName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) GetResponder(resp *http.Response) (result VirtualNetworkGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List VirtualNetworkGateways opertion retrieves all the virtual +// network gateways stored. +// +// resourceGroupName is the name of the resource group. +func (client VirtualNetworkGatewaysClient) List(resourceGroupName string) (result VirtualNetworkGatewayListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualNetworkGatewaysClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) ListResponder(resp *http.Response) (result VirtualNetworkGatewayListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualNetworkGatewaysClient) ListNextResults(lastResults VirtualNetworkGatewayListResult) (result VirtualNetworkGatewayListResult, ae error) { + req, err := lastResults.VirtualNetworkGatewayListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "List", "Failure responding to next results request request") + } + + return +} + +// Reset the Reset VirtualNetworkGateway operation resets the primary of the +// virtual network gatewayin the specified resource group through Network +// resource provider. +// +// resourceGroupName is the name of the resource group. +// virtualNetworkGatewayName is the name of the virtual network gateway. +// parameters is parameters supplied to the Begin Reset Virtual Network +// Gateway operation through Network resource provider. +func (client VirtualNetworkGatewaysClient) Reset(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway) (result VirtualNetworkGateway, ae error) { + req, err := client.ResetPreparer(resourceGroupName, virtualNetworkGatewayName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Reset", "Failure preparing request") + } + + resp, err := client.ResetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Reset", "Failure sending request") + } + + result, err = client.ResetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworkGatewaysClient", "Reset", "Failure responding to request") + } + + return +} + +// ResetPreparer prepares the Reset request. +func (client VirtualNetworkGatewaysClient) ResetPreparer(resourceGroupName string, virtualNetworkGatewayName string, parameters VirtualNetworkGateway) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkGatewayName": url.QueryEscape(virtualNetworkGatewayName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworkgateways/{virtualNetworkGatewayName}/reset"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ResetSender sends the Reset request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkGatewaysClient) ResetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted, http.StatusOK) +} + +// ResetResponder handles the response to the Reset request. The method always +// closes the http.Response Body. +func (client VirtualNetworkGatewaysClient) ResetResponder(resp *http.Response) (result VirtualNetworkGateway, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go' --- src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,410 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// VirtualNetworksClient is the client for the VirtualNetworks methods of the +// Network service. +type VirtualNetworksClient struct { + ManagementClient +} + +// NewVirtualNetworksClient creates an instance of the VirtualNetworksClient +// client. +func NewVirtualNetworksClient(subscriptionID string) VirtualNetworksClient { + return NewVirtualNetworksClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworksClientWithBaseURI creates an instance of the +// VirtualNetworksClient client. +func NewVirtualNetworksClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworksClient { + return VirtualNetworksClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put VirtualNetwork operation creates/updates a virtual +// network in the specified resource group. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. parameters is parameters supplied to the +// create/update Virtual Network operation +func (client VirtualNetworksClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork) (result VirtualNetwork, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualNetworksClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkName": url.QueryEscape(virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualNetwork, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the Delete VirtualNetwork operation deletes the specifed virtual +// network +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. +func (client VirtualNetworksClient) Delete(resourceGroupName string, virtualNetworkName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualNetworksClient) DeletePreparer(resourceGroupName string, virtualNetworkName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkName": url.QueryEscape(virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusAccepted) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get VirtualNetwork operation retrieves information about the +// specified virtual network. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. +func (client VirtualNetworksClient) Get(resourceGroupName string, virtualNetworkName string) (result VirtualNetwork, ae error) { + req, err := client.GetPreparer(resourceGroupName, virtualNetworkName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualNetworksClient) GetPreparer(resourceGroupName string, virtualNetworkName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "virtualNetworkName": url.QueryEscape(virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks/{virtualNetworkName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) GetResponder(resp *http.Response) (result VirtualNetwork, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the list VirtualNetwork returns all Virtual Networks in a resource +// group +// +// resourceGroupName is the name of the resource group. +func (client VirtualNetworksClient) List(resourceGroupName string) (result VirtualNetworkListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualNetworksClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualnetworks"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) ListResponder(resp *http.Response) (result VirtualNetworkListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualNetworksClient) ListNextResults(lastResults VirtualNetworkListResult) (result VirtualNetworkListResult, ae error) { + req, err := lastResults.VirtualNetworkListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "List", "Failure responding to next results request request") + } + + return +} + +// ListAll the list VirtualNetwork returns all Virtual Networks in a +// subscription +func (client VirtualNetworksClient) ListAll() (result VirtualNetworkListResult, ae error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client VirtualNetworksClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualnetworks"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) ListAllSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) ListAllResponder(resp *http.Response) (result VirtualNetworkListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client VirtualNetworksClient) ListAllNextResults(lastResults VirtualNetworkListResult) (result VirtualNetworkListResult, ae error) { + req, err := lastResults.VirtualNetworkListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "network/VirtualNetworksClient", "ListAll", "Failure responding to next results request request") + } + + return +} === added directory 'src/github.com/Azure/azure-sdk-for-go/arm/resources' === added file 'src/github.com/Azure/azure-sdk-for-go/arm/resources/client.go' --- src/github.com/Azure/azure-sdk-for-go/arm/resources/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/resources/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,481 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +const ( + // APIVersion is the version of the Resources + APIVersion = "2014-04-01-preview" + + // DefaultBaseURI is the default URI used for the service Resources + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Resources. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} + +// CheckExistence checks whether resource exists. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. +func (client ManagementClient) CheckExistence(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (result autorest.Response, ae error) { + req, err := client.CheckExistencePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "CheckExistence", "Failure preparing request") + } + + resp, err := client.CheckExistenceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "CheckExistence", "Failure sending request") + } + + result, err = client.CheckExistenceResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "CheckExistence", "Failure responding to request") + } + + return +} + +// CheckExistencePreparer prepares the CheckExistence request. +func (client ManagementClient) CheckExistencePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": url.QueryEscape(resourceGroupName), + "resourceName": url.QueryEscape(resourceName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CheckExistenceSender sends the CheckExistence request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) CheckExistenceSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusNotFound) +} + +// CheckExistenceResponder handles the response to the CheckExistence request. The method always +// closes the http.Response Body. +func (client ManagementClient) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate create a resource. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. parameters is create or +// update resource parameters. +func (client ManagementClient) CreateOrUpdate(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string, parameters GenericResource) (result GenericResource, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ManagementClient) CreateOrUpdatePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string, parameters GenericResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": url.QueryEscape(resourceGroupName), + "resourceName": url.QueryEscape(resourceName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusAccepted, http.StatusOK) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ManagementClient) CreateOrUpdateResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusAccepted, http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete resource and all of its resources. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. +func (client ManagementClient) Delete(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ManagementClient) DeletePreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": url.QueryEscape(resourceGroupName), + "resourceName": url.QueryEscape(resourceName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent, http.StatusAccepted) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ManagementClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get returns a resource belonging to a resource group. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. resourceProviderNamespace is resource identity. +// parentResourcePath is resource identity. resourceType is resource +// identity. resourceName is resource identity. +func (client ManagementClient) Get(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (result GenericResource, ae error) { + req, err := client.GetPreparer(resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, apiVersion) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ManagementClient) GetPreparer(resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, apiVersion string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": url.QueryEscape(resourceGroupName), + "resourceName": url.QueryEscape(resourceName), + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get all of the resources under a subscription. +// +// filter is the filter to apply on the operation. top is query parameters. If +// null is passed returns all resource groups. +func (client ManagementClient) List(filter string, top int) (result ListResult, ae error) { + req, err := client.ListPreparer(filter, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ManagementClient) ListPreparer(filter string, top int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "$filter": filter, + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resources"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ManagementClient) ListResponder(resp *http.Response) (result ListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ManagementClient) ListNextResults(lastResults ListResult) (result ListResult, ae error) { + req, err := lastResults.ListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "List", "Failure responding to next results request request") + } + + return +} + +// MoveResources move resources within or across subscriptions. +// +// sourceResourceGroupName is source resource group name. parameters is move +// resources' parameters. +func (client ManagementClient) MoveResources(sourceResourceGroupName string, parameters MoveInfo) (result autorest.Response, ae error) { + req, err := client.MoveResourcesPreparer(sourceResourceGroupName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "MoveResources", "Failure preparing request") + } + + resp, err := client.MoveResourcesSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/ManagementClient", "MoveResources", "Failure sending request") + } + + result, err = client.MoveResourcesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ManagementClient", "MoveResources", "Failure responding to request") + } + + return +} + +// MoveResourcesPreparer prepares the MoveResources request. +func (client ManagementClient) MoveResourcesPreparer(sourceResourceGroupName string, parameters MoveInfo) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "sourceResourceGroupName": url.QueryEscape(sourceResourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// MoveResourcesSender sends the MoveResources request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) MoveResourcesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted) +} + +// MoveResourcesResponder handles the response to the MoveResources request. The method always +// closes the http.Response Body. +func (client ManagementClient) MoveResourcesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/resources/deploymentoperations.go' --- src/github.com/Azure/azure-sdk-for-go/arm/resources/deploymentoperations.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/resources/deploymentoperations.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,199 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// DeploymentOperationsClient is the client for the DeploymentOperations +// methods of the Resources service. +type DeploymentOperationsClient struct { + ManagementClient +} + +// NewDeploymentOperationsClient creates an instance of the +// DeploymentOperationsClient client. +func NewDeploymentOperationsClient(subscriptionID string) DeploymentOperationsClient { + return NewDeploymentOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDeploymentOperationsClientWithBaseURI creates an instance of the +// DeploymentOperationsClient client. +func NewDeploymentOperationsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentOperationsClient { + return DeploymentOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get get a list of deployments operations. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment. operationID is +// operation Id. +func (client DeploymentOperationsClient) Get(resourceGroupName string, deploymentName string, operationID string) (result DeploymentOperation, ae error) { + req, err := client.GetPreparer(resourceGroupName, deploymentName, operationID) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DeploymentOperationsClient) GetPreparer(resourceGroupName string, deploymentName string, operationID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": url.QueryEscape(deploymentName), + "operationId": url.QueryEscape(operationID), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations/{operationId}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentOperationsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DeploymentOperationsClient) GetResponder(resp *http.Response) (result DeploymentOperation, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of deployments operations. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment. top is query +// parameters. +func (client DeploymentOperationsClient) List(resourceGroupName string, deploymentName string, top int) (result DeploymentOperationsListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, deploymentName, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DeploymentOperationsClient) ListPreparer(resourceGroupName string, deploymentName string, top int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": url.QueryEscape(deploymentName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DeploymentOperationsClient) ListResponder(resp *http.Response) (result DeploymentOperationsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client DeploymentOperationsClient) ListNextResults(lastResults DeploymentOperationsListResult) (result DeploymentOperationsListResult, ae error) { + req, err := lastResults.DeploymentOperationsListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentOperationsClient", "List", "Failure responding to next results request request") + } + + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/resources/deployments.go' --- src/github.com/Azure/azure-sdk-for-go/arm/resources/deployments.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/resources/deployments.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,391 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// DeploymentsClient is the client for the Deployments methods of the +// Resources service. +type DeploymentsClient struct { + ManagementClient +} + +// NewDeploymentsClient creates an instance of the DeploymentsClient client. +func NewDeploymentsClient(subscriptionID string) DeploymentsClient { + return NewDeploymentsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDeploymentsClientWithBaseURI creates an instance of the +// DeploymentsClient client. +func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentsClient { + return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Cancel cancel a currently running template deployment. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment. +func (client DeploymentsClient) Cancel(resourceGroupName string, deploymentName string) (result autorest.Response, ae error) { + req, err := client.CancelPreparer(resourceGroupName, deploymentName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Cancel", "Failure preparing request") + } + + resp, err := client.CancelSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Cancel", "Failure sending request") + } + + result, err = client.CancelResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Cancel", "Failure responding to request") + } + + return +} + +// CancelPreparer prepares the Cancel request. +func (client DeploymentsClient) CancelPreparer(resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": url.QueryEscape(deploymentName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/cancel"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CancelSender sends the Cancel request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CancelSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent) +} + +// CancelResponder handles the response to the Cancel request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CancelResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate create a named template deployment using a template. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment. parameters is +// additional parameters supplied to the operation. +func (client DeploymentsClient) CreateOrUpdate(resourceGroupName string, deploymentName string, parameters Deployment) (result DeploymentExtended, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, deploymentName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client DeploymentsClient) CreateOrUpdatePreparer(resourceGroupName string, deploymentName string, parameters Deployment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": url.QueryEscape(deploymentName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CreateOrUpdateResponder(resp *http.Response) (result DeploymentExtended, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get get a deployment. +// +// resourceGroupName is the name of the resource group to get. The name is +// case insensitive. deploymentName is the name of the deployment. +func (client DeploymentsClient) Get(resourceGroupName string, deploymentName string) (result DeploymentExtended, ae error) { + req, err := client.GetPreparer(resourceGroupName, deploymentName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DeploymentsClient) GetPreparer(resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": url.QueryEscape(deploymentName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) GetResponder(resp *http.Response) (result DeploymentExtended, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get a list of deployments. +// +// resourceGroupName is the name of the resource group to filter by. The name +// is case insensitive. filter is the filter to apply on the operation. top +// is query parameters. If null is passed returns all deployments. +func (client DeploymentsClient) List(resourceGroupName string, filter string, top int) (result DeploymentListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, filter, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DeploymentsClient) ListPreparer(resourceGroupName string, filter string, top int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "$filter": filter, + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) ListResponder(resp *http.Response) (result DeploymentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client DeploymentsClient) ListNextResults(lastResults DeploymentListResult) (result DeploymentListResult, ae error) { + req, err := lastResults.DeploymentListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "List", "Failure responding to next results request request") + } + + return +} + +// Validate validate a deployment template. +// +// resourceGroupName is the name of the resource group. The name is case +// insensitive. deploymentName is the name of the deployment. parameters is +// deployment to validate. +func (client DeploymentsClient) Validate(resourceGroupName string, deploymentName string, parameters Deployment) (result DeploymentValidateResult, ae error) { + req, err := client.ValidatePreparer(resourceGroupName, deploymentName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Validate", "Failure preparing request") + } + + resp, err := client.ValidateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Validate", "Failure sending request") + } + + result, err = client.ValidateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/DeploymentsClient", "Validate", "Failure responding to request") + } + + return +} + +// ValidatePreparer prepares the Validate request. +func (client DeploymentsClient) ValidatePreparer(resourceGroupName string, deploymentName string, parameters Deployment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": url.QueryEscape(deploymentName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.resources/deployments/{deploymentName}/validate"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ValidateSender sends the Validate request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) ValidateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusBadRequest) +} + +// ValidateResponder handles the response to the Validate request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) ValidateResponder(resp *http.Response) (result DeploymentValidateResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/resources/groups.go' --- src/github.com/Azure/azure-sdk-for-go/arm/resources/groups.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/resources/groups.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,539 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// GroupsClient is the client for the Groups methods of the Resources service. +type GroupsClient struct { + ManagementClient +} + +// NewGroupsClient creates an instance of the GroupsClient client. +func NewGroupsClient(subscriptionID string) GroupsClient { + return NewGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGroupsClientWithBaseURI creates an instance of the GroupsClient client. +func NewGroupsClientWithBaseURI(baseURI string, subscriptionID string) GroupsClient { + return GroupsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckExistence checks whether resource group exists. +// +// resourceGroupName is the name of the resource group to check. The name is +// case insensitive. +func (client GroupsClient) CheckExistence(resourceGroupName string) (result autorest.Response, ae error) { + req, err := client.CheckExistencePreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "CheckExistence", "Failure preparing request") + } + + resp, err := client.CheckExistenceSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "CheckExistence", "Failure sending request") + } + + result, err = client.CheckExistenceResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "CheckExistence", "Failure responding to request") + } + + return +} + +// CheckExistencePreparer prepares the CheckExistence request. +func (client GroupsClient) CheckExistencePreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CheckExistenceSender sends the CheckExistence request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) CheckExistenceSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusNoContent, http.StatusNotFound) +} + +// CheckExistenceResponder handles the response to the CheckExistence request. The method always +// closes the http.Response Body. +func (client GroupsClient) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate create a resource group. +// +// resourceGroupName is the name of the resource group to be created or +// updated. parameters is parameters supplied to the create or update +// resource group service operation. +func (client GroupsClient) CreateOrUpdate(resourceGroupName string, parameters Group) (result Group, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client GroupsClient) CreateOrUpdatePreparer(resourceGroupName string, parameters Group) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusCreated, http.StatusOK, http.StatusAccepted) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client GroupsClient) CreateOrUpdateResponder(resp *http.Response) (result Group, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete begin deleting resource group.To determine whether the operation has +// finished processing the request, call GetLongRunningOperationStatus. +// +// resourceGroupName is the name of the resource group to be deleted. The name +// is case insensitive. +func (client GroupsClient) Delete(resourceGroupName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client GroupsClient) DeletePreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusAccepted, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client GroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get a resource group. +// +// resourceGroupName is the name of the resource group to get. The name is +// case insensitive. +func (client GroupsClient) Get(resourceGroupName string) (result Group, ae error) { + req, err := client.GetPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client GroupsClient) GetPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client GroupsClient) GetResponder(resp *http.Response) (result Group, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a collection of resource groups. +// +// filter is the filter to apply on the operation. top is query parameters. If +// null is passed returns all resource groups. +func (client GroupsClient) List(filter string, top int) (result GroupListResult, ae error) { + req, err := client.ListPreparer(filter, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client GroupsClient) ListPreparer(filter string, top int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "$filter": filter, + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client GroupsClient) ListResponder(resp *http.Response) (result GroupListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client GroupsClient) ListNextResults(lastResults GroupListResult) (result GroupListResult, ae error) { + req, err := lastResults.GroupListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "List", "Failure responding to next results request request") + } + + return +} + +// ListResources get all of the resources under a subscription. +// +// resourceGroupName is query parameters. If null is passed returns all +// resource groups. filter is the filter to apply on the operation. top is +// query parameters. If null is passed returns all resource groups. +func (client GroupsClient) ListResources(resourceGroupName string, filter string, top int) (result ListResult, ae error) { + req, err := client.ListResourcesPreparer(resourceGroupName, filter, top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", "Failure preparing request") + } + + resp, err := client.ListResourcesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", "Failure sending request") + } + + result, err = client.ListResourcesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", "Failure responding to request") + } + + return +} + +// ListResourcesPreparer prepares the ListResources request. +func (client GroupsClient) ListResourcesPreparer(resourceGroupName string, filter string, top int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "$filter": filter, + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/resources"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListResourcesSender sends the ListResources request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) ListResourcesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResourcesResponder handles the response to the ListResources request. The method always +// closes the http.Response Body. +func (client GroupsClient) ListResourcesResponder(resp *http.Response) (result ListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListResourcesNextResults retrieves the next set of results, if any. +func (client GroupsClient) ListResourcesNextResults(lastResults ListResult) (result ListResult, ae error) { + req, err := lastResults.ListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListResourcesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", "Failure sending next results request request") + } + + result, err = client.ListResourcesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "ListResources", "Failure responding to next results request request") + } + + return +} + +// Patch resource groups can be updated through a simple PATCH operation to a +// group address. The format of the request is the same as that for creating +// a resource groups, though if a field is unspecified current value will be +// carried over. +// +// resourceGroupName is the name of the resource group to be created or +// updated. The name is case insensitive. parameters is parameters supplied +// to the update state resource group service operation. +func (client GroupsClient) Patch(resourceGroupName string, parameters Group) (result Group, ae error) { + req, err := client.PatchPreparer(resourceGroupName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Patch", "Failure preparing request") + } + + resp, err := client.PatchSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/GroupsClient", "Patch", "Failure sending request") + } + + result, err = client.PatchResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/GroupsClient", "Patch", "Failure responding to request") + } + + return +} + +// PatchPreparer prepares the Patch request. +func (client GroupsClient) PatchPreparer(resourceGroupName string, parameters Group) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// PatchSender sends the Patch request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) PatchSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// PatchResponder handles the response to the Patch request. The method always +// closes the http.Response Body. +func (client GroupsClient) PatchResponder(resp *http.Response) (result Group, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/resources/models.go' --- src/github.com/Azure/azure-sdk-for-go/arm/resources/models.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/resources/models.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,396 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// DeploymentMode enumerates the values for deployment mode. +type DeploymentMode string + +const ( + // Incremental specifies the incremental state for deployment mode. + Incremental DeploymentMode = "Incremental" +) + +// BasicDependency is deployment dependency information. +type BasicDependency struct { + ID *string `json:"id,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` + ResourceName *string `json:"resourceName,omitempty"` +} + +// Dependency is deployment dependency information. +type Dependency struct { + DependsOn *[]BasicDependency `json:"dependsOn,omitempty"` + ID *string `json:"id,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` + ResourceName *string `json:"resourceName,omitempty"` +} + +// Deployment is deployment operation parameters. +type Deployment struct { + Properties *DeploymentProperties `json:"properties,omitempty"` +} + +// DeploymentExtended is deployment information. +type DeploymentExtended struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *DeploymentPropertiesExtended `json:"properties,omitempty"` +} + +// DeploymentExtendedFilter is deployment filter. +type DeploymentExtendedFilter struct { + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// DeploymentListResult is list of deployments. +type DeploymentListResult struct { + autorest.Response `json:"-"` + Value *[]DeploymentExtended `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// DeploymentListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client DeploymentListResult) DeploymentListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// DeploymentOperation is deployment operation information. +type DeploymentOperation struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + OperationID *string `json:"operationId,omitempty"` + Properties *DeploymentOperationProperties `json:"properties,omitempty"` +} + +// DeploymentOperationProperties is deployment operation properties. +type DeploymentOperationProperties struct { + ProvisioningState *string `json:"provisioningState,omitempty"` + Timestamp *date.Time `json:"timestamp,omitempty"` + StatusCode *string `json:"statusCode,omitempty"` + StatusMessage *map[string]*string `json:"statusMessage,omitempty"` + TargetResource *TargetResource `json:"targetResource,omitempty"` +} + +// DeploymentOperationsListResult is list of deployment operations. +type DeploymentOperationsListResult struct { + autorest.Response `json:"-"` + Value *[]DeploymentOperation `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// DeploymentOperationsListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client DeploymentOperationsListResult) DeploymentOperationsListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// DeploymentProperties is deployment properties. +type DeploymentProperties struct { + Template *map[string]*string `json:"template,omitempty"` + TemplateLink *TemplateLink `json:"templateLink,omitempty"` + Parameters *map[string]*string `json:"parameters,omitempty"` + ParametersLink *ParametersLink `json:"parametersLink,omitempty"` + Mode DeploymentMode `json:"mode,omitempty"` +} + +// DeploymentPropertiesExtended is deployment properties with additional +// details. +type DeploymentPropertiesExtended struct { + ProvisioningState *string `json:"provisioningState,omitempty"` + CorrelationID *string `json:"correlationId,omitempty"` + Timestamp *date.Time `json:"timestamp,omitempty"` + Outputs *map[string]*string `json:"outputs,omitempty"` + Providers *[]Provider `json:"providers,omitempty"` + Dependencies *[]Dependency `json:"dependencies,omitempty"` + Template *map[string]*string `json:"template,omitempty"` + TemplateLink *TemplateLink `json:"templateLink,omitempty"` + Parameters *map[string]*string `json:"parameters,omitempty"` + ParametersLink *ParametersLink `json:"parametersLink,omitempty"` + Mode DeploymentMode `json:"mode,omitempty"` +} + +// DeploymentValidateResult is information from validate template deployment +// response. +type DeploymentValidateResult struct { + autorest.Response `json:"-"` + Error *ManagementErrorWithDetails `json:"error,omitempty"` + Properties *DeploymentPropertiesExtended `json:"properties,omitempty"` +} + +// GenericResource is resource information. +type GenericResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Plan *Plan `json:"plan,omitempty"` + Properties *map[string]*string `json:"properties,omitempty"` +} + +// GenericResourceFilter is resource filter. +type GenericResourceFilter struct { + ResourceType *string `json:"resourceType,omitempty"` + Tagname *string `json:"tagname,omitempty"` + Tagvalue *string `json:"tagvalue,omitempty"` +} + +// Group is resource group information. +type Group struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Properties *GroupProperties `json:"properties,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// GroupFilter is resource group filter. +type GroupFilter struct { + TagName *string `json:"tagName,omitempty"` + TagValue *string `json:"tagValue,omitempty"` +} + +// GroupListResult is list of resource groups. +type GroupListResult struct { + autorest.Response `json:"-"` + Value *[]Group `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// GroupListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client GroupListResult) GroupListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// GroupProperties is the resource group properties. +type GroupProperties struct { + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ListResult is list of resource groups. +type ListResult struct { + autorest.Response `json:"-"` + Value *[]GenericResource `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ListResult) ListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ManagementError is +type ManagementError struct { + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` +} + +// ManagementErrorWithDetails is +type ManagementErrorWithDetails struct { + Details *[]ManagementError `json:"details,omitempty"` + Code *string `json:"code,omitempty"` + Message *string `json:"message,omitempty"` + Target *string `json:"target,omitempty"` +} + +// MoveInfo is parameters of move resources. +type MoveInfo struct { + Resources *[]string `json:"resources,omitempty"` + TargetResourceGroup *string `json:"targetResourceGroup,omitempty"` +} + +// ParametersLink is entity representing the reference to the deployment +// paramaters. +type ParametersLink struct { + URI *string `json:"uri,omitempty"` + ContentVersion *string `json:"contentVersion,omitempty"` +} + +// Plan is plan for the resource. +type Plan struct { + Name *string `json:"name,omitempty"` + Publisher *string `json:"publisher,omitempty"` + Product *string `json:"product,omitempty"` + PromotionCode *string `json:"promotionCode,omitempty"` +} + +// Provider is resource provider information. +type Provider struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Namespace *string `json:"namespace,omitempty"` + RegistrationState *string `json:"registrationState,omitempty"` + ResourceTypes *[]ProviderResourceType `json:"resourceTypes,omitempty"` +} + +// ProviderListResult is list of resource providers. +type ProviderListResult struct { + autorest.Response `json:"-"` + Value *[]Provider `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ProviderListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ProviderListResult) ProviderListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// ProviderOperationDefinition is resource provider operation information. +type ProviderOperationDefinition struct { + Name *string `json:"name,omitempty"` + Display *ProviderOperationDisplayProperties `json:"display,omitempty"` +} + +// ProviderOperationDetailListResult is list of resource provider operations. +type ProviderOperationDetailListResult struct { + autorest.Response `json:"-"` + Value *[]ProviderOperationDefinition `json:"value,omitempty"` +} + +// ProviderOperationDisplayProperties is resource provider operation's display +// properties. +type ProviderOperationDisplayProperties struct { + Publisher *string `json:"publisher,omitempty"` + Provider *string `json:"provider,omitempty"` + Resource *string `json:"resource,omitempty"` + Operation *string `json:"operation,omitempty"` + Description *string `json:"description,omitempty"` +} + +// ProviderResourceType is resource type managed by the resource provider. +type ProviderResourceType struct { + ResourceType *string `json:"resourceType,omitempty"` + Locations *[]string `json:"locations,omitempty"` + APIVersions *[]string `json:"apiVersions,omitempty"` + Properties *map[string]*string `json:"properties,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} + +// TagCount is tag count. +type TagCount struct { + Type *string `json:"type,omitempty"` + Value *string `json:"value,omitempty"` +} + +// TagDetails is tag details. +type TagDetails struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + TagName *string `json:"tagName,omitempty"` + Count *TagCount `json:"count,omitempty"` + Values *[]TagValue `json:"values,omitempty"` +} + +// TagsListResult is list of subscription tags. +type TagsListResult struct { + autorest.Response `json:"-"` + Value *[]TagDetails `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// TagsListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client TagsListResult) TagsListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// TagValue is tag information. +type TagValue struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + TagValueProperty *string `json:"tagValue,omitempty"` + Count *TagCount `json:"count,omitempty"` +} + +// TargetResource is target resource. +type TargetResource struct { + ID *string `json:"id,omitempty"` + ResourceName *string `json:"resourceName,omitempty"` + ResourceType *string `json:"resourceType,omitempty"` +} + +// TemplateLink is entity representing the reference to the template. +type TemplateLink struct { + URI *string `json:"uri,omitempty"` + ContentVersion *string `json:"contentVersion,omitempty"` +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/resources/provideroperationdetails.go' --- src/github.com/Azure/azure-sdk-for-go/arm/resources/provideroperationdetails.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/resources/provideroperationdetails.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,105 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// ProviderOperationDetailsClient is the client for the +// ProviderOperationDetails methods of the Resources service. +type ProviderOperationDetailsClient struct { + ManagementClient +} + +// NewProviderOperationDetailsClient creates an instance of the +// ProviderOperationDetailsClient client. +func NewProviderOperationDetailsClient(subscriptionID string) ProviderOperationDetailsClient { + return NewProviderOperationDetailsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewProviderOperationDetailsClientWithBaseURI creates an instance of the +// ProviderOperationDetailsClient client. +func NewProviderOperationDetailsClientWithBaseURI(baseURI string, subscriptionID string) ProviderOperationDetailsClient { + return ProviderOperationDetailsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets a list of resource providers. +// +// resourceProviderNamespace is resource identity. +func (client ProviderOperationDetailsClient) List(resourceProviderNamespace string, apiVersion string) (result ProviderOperationDetailListResult, ae error) { + req, err := client.ListPreparer(resourceProviderNamespace, apiVersion) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ProviderOperationDetailsClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ProviderOperationDetailsClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ProviderOperationDetailsClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ProviderOperationDetailsClient) ListPreparer(resourceProviderNamespace string, apiVersion string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/{resourceProviderNamespace}/operations"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ProviderOperationDetailsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ProviderOperationDetailsClient) ListResponder(resp *http.Response) (result ProviderOperationDetailListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/resources/providers.go' --- src/github.com/Azure/azure-sdk-for-go/arm/resources/providers.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/resources/providers.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,314 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// ProvidersClient is the client for the Providers methods of the Resources +// service. +type ProvidersClient struct { + ManagementClient +} + +// NewProvidersClient creates an instance of the ProvidersClient client. +func NewProvidersClient(subscriptionID string) ProvidersClient { + return NewProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewProvidersClientWithBaseURI creates an instance of the ProvidersClient +// client. +func NewProvidersClientWithBaseURI(baseURI string, subscriptionID string) ProvidersClient { + return ProvidersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a resource provider. +// +// resourceProviderNamespace is namespace of the resource provider. +func (client ProvidersClient) Get(resourceProviderNamespace string) (result Provider, ae error) { + req, err := client.GetPreparer(resourceProviderNamespace) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ProvidersClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ProvidersClient) GetPreparer(resourceProviderNamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ProvidersClient) GetResponder(resp *http.Response) (result Provider, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of resource providers. +// +// top is query parameters. If null is passed returns all deployments. +func (client ProvidersClient) List(top int) (result ProviderListResult, ae error) { + req, err := client.ListPreparer(top) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ProvidersClient) ListPreparer(top int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ProvidersClient) ListResponder(resp *http.Response) (result ProviderListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ProvidersClient) ListNextResults(lastResults ProviderListResult) (result ProviderListResult, ae error) { + req, err := lastResults.ProviderListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ProvidersClient", "List", "Failure responding to next results request request") + } + + return +} + +// Register registers provider to be used with a subscription. +// +// resourceProviderNamespace is namespace of the resource provider. +func (client ProvidersClient) Register(resourceProviderNamespace string) (result Provider, ae error) { + req, err := client.RegisterPreparer(resourceProviderNamespace) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Register", "Failure preparing request") + } + + resp, err := client.RegisterSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Register", "Failure sending request") + } + + result, err = client.RegisterResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ProvidersClient", "Register", "Failure responding to request") + } + + return +} + +// RegisterPreparer prepares the Register request. +func (client ProvidersClient) RegisterPreparer(resourceProviderNamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// RegisterSender sends the Register request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) RegisterSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// RegisterResponder handles the response to the Register request. The method always +// closes the http.Response Body. +func (client ProvidersClient) RegisterResponder(resp *http.Response) (result Provider, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Unregister unregisters provider from a subscription. +// +// resourceProviderNamespace is namespace of the resource provider. +func (client ProvidersClient) Unregister(resourceProviderNamespace string) (result Provider, ae error) { + req, err := client.UnregisterPreparer(resourceProviderNamespace) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Unregister", "Failure preparing request") + } + + resp, err := client.UnregisterSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/ProvidersClient", "Unregister", "Failure sending request") + } + + result, err = client.UnregisterResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/ProvidersClient", "Unregister", "Failure responding to request") + } + + return +} + +// UnregisterPreparer prepares the Unregister request. +func (client ProvidersClient) UnregisterPreparer(resourceProviderNamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": url.QueryEscape(resourceProviderNamespace), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// UnregisterSender sends the Unregister request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) UnregisterSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// UnregisterResponder handles the response to the Unregister request. The method always +// closes the http.Response Body. +func (client ProvidersClient) UnregisterResponder(resp *http.Response) (result Provider, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/resources/tags.go' --- src/github.com/Azure/azure-sdk-for-go/arm/resources/tags.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/resources/tags.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,371 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// TagsClient is the client for the Tags methods of the Resources service. +type TagsClient struct { + ManagementClient +} + +// NewTagsClient creates an instance of the TagsClient client. +func NewTagsClient(subscriptionID string) TagsClient { + return NewTagsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewTagsClientWithBaseURI creates an instance of the TagsClient client. +func NewTagsClientWithBaseURI(baseURI string, subscriptionID string) TagsClient { + return TagsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create a subscription resource tag. +// +// tagName is the name of the tag. +func (client TagsClient) CreateOrUpdate(tagName string) (result TagDetails, ae error) { + req, err := client.CreateOrUpdatePreparer(tagName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client TagsClient) CreateOrUpdatePreparer(tagName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "tagName": url.QueryEscape(tagName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/tagNames/{tagName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client TagsClient) CreateOrUpdateResponder(resp *http.Response) (result TagDetails, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateValue create a subscription resource tag value. +// +// tagName is the name of the tag. tagValue is the value of the tag. +func (client TagsClient) CreateOrUpdateValue(tagName string, tagValue string) (result TagValue, ae error) { + req, err := client.CreateOrUpdateValuePreparer(tagName, tagValue) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdateValue", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateValueSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdateValue", "Failure sending request") + } + + result, err = client.CreateOrUpdateValueResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/TagsClient", "CreateOrUpdateValue", "Failure responding to request") + } + + return +} + +// CreateOrUpdateValuePreparer prepares the CreateOrUpdateValue request. +func (client TagsClient) CreateOrUpdateValuePreparer(tagName string, tagValue string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "tagName": url.QueryEscape(tagName), + "tagValue": url.QueryEscape(tagValue), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateValueSender sends the CreateOrUpdateValue request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) CreateOrUpdateValueSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateValueResponder handles the response to the CreateOrUpdateValue request. The method always +// closes the http.Response Body. +func (client TagsClient) CreateOrUpdateValueResponder(resp *http.Response) (result TagValue, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a subscription resource tag. +// +// tagName is the name of the tag. +func (client TagsClient) Delete(tagName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(tagName) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/TagsClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client TagsClient) DeletePreparer(tagName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "tagName": url.QueryEscape(tagName), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/tagNames/{tagName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client TagsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteValue delete a subscription resource tag value. +// +// tagName is the name of the tag. tagValue is the value of the tag. +func (client TagsClient) DeleteValue(tagName string, tagValue string) (result autorest.Response, ae error) { + req, err := client.DeleteValuePreparer(tagName, tagValue) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "DeleteValue", "Failure preparing request") + } + + resp, err := client.DeleteValueSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "DeleteValue", "Failure sending request") + } + + result, err = client.DeleteValueResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/TagsClient", "DeleteValue", "Failure responding to request") + } + + return +} + +// DeleteValuePreparer prepares the DeleteValue request. +func (client TagsClient) DeleteValuePreparer(tagName string, tagValue string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + "tagName": url.QueryEscape(tagName), + "tagValue": url.QueryEscape(tagValue), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteValueSender sends the DeleteValue request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) DeleteValueSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent) +} + +// DeleteValueResponder handles the response to the DeleteValue request. The method always +// closes the http.Response Body. +func (client TagsClient) DeleteValueResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// List get a list of subscription resource tags. +func (client TagsClient) List() (result TagsListResult, ae error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/TagsClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client TagsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/tagNames"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client TagsClient) ListResponder(resp *http.Response) (result TagsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client TagsClient) ListNextResults(lastResults TagsListResult) (result TagsListResult, ae error) { + req, err := lastResults.TagsListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources/TagsClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "resources/TagsClient", "List", "Failure responding to next results request request") + } + + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/resources/version.go' --- src/github.com/Azure/azure-sdk-for-go/arm/resources/version.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/resources/version.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "0" + minor = "1" + patch = "1" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "resources", "2014-04-01-preview") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} === added directory 'src/github.com/Azure/azure-sdk-for-go/arm/scheduler' === added file 'src/github.com/Azure/azure-sdk-for-go/arm/scheduler/client.go' --- src/github.com/Azure/azure-sdk-for-go/arm/scheduler/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/scheduler/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,52 @@ +package scheduler + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Scheduler + APIVersion = "2014-08-01-preview" + + // DefaultBaseURI is the default URI used for the service Scheduler + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Scheduler. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobcollections.go' --- src/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobcollections.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobcollections.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,596 @@ +package scheduler + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// JobCollectionsClient is the client for the JobCollections methods of the +// Scheduler service. +type JobCollectionsClient struct { + ManagementClient +} + +// NewJobCollectionsClient creates an instance of the JobCollectionsClient +// client. +func NewJobCollectionsClient(subscriptionID string) JobCollectionsClient { + return NewJobCollectionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewJobCollectionsClientWithBaseURI creates an instance of the +// JobCollectionsClient client. +func NewJobCollectionsClientWithBaseURI(baseURI string, subscriptionID string) JobCollectionsClient { + return JobCollectionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate provisions a new job collection or updates an existing job +// collection. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. jobCollection is the job collection definition. +func (client JobCollectionsClient) CreateOrUpdate(resourceGroupName string, jobCollectionName string, jobCollection JobCollectionDefinition) (result JobCollectionDefinition, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, jobCollectionName, jobCollection) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client JobCollectionsClient) CreateOrUpdatePreparer(resourceGroupName string, jobCollectionName string, jobCollection JobCollectionDefinition) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}"), + autorest.WithJSON(jobCollection), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client JobCollectionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client JobCollectionsClient) CreateOrUpdateResponder(resp *http.Response) (result JobCollectionDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a job collection. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. +func (client JobCollectionsClient) Delete(resourceGroupName string, jobCollectionName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, jobCollectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client JobCollectionsClient) DeletePreparer(resourceGroupName string, jobCollectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client JobCollectionsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client JobCollectionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Disable disables all of the jobs in the job collection. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. +func (client JobCollectionsClient) Disable(resourceGroupName string, jobCollectionName string) (result autorest.Response, ae error) { + req, err := client.DisablePreparer(resourceGroupName, jobCollectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Disable", "Failure preparing request") + } + + resp, err := client.DisableSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Disable", "Failure sending request") + } + + result, err = client.DisableResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Disable", "Failure responding to request") + } + + return +} + +// DisablePreparer prepares the Disable request. +func (client JobCollectionsClient) DisablePreparer(resourceGroupName string, jobCollectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/disable"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DisableSender sends the Disable request. The method will close the +// http.Response Body if it receives an error. +func (client JobCollectionsClient) DisableSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// DisableResponder handles the response to the Disable request. The method always +// closes the http.Response Body. +func (client JobCollectionsClient) DisableResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Enable enables all of the jobs in the job collection. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. +func (client JobCollectionsClient) Enable(resourceGroupName string, jobCollectionName string) (result autorest.Response, ae error) { + req, err := client.EnablePreparer(resourceGroupName, jobCollectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Enable", "Failure preparing request") + } + + resp, err := client.EnableSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Enable", "Failure sending request") + } + + result, err = client.EnableResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Enable", "Failure responding to request") + } + + return +} + +// EnablePreparer prepares the Enable request. +func (client JobCollectionsClient) EnablePreparer(resourceGroupName string, jobCollectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/enable"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// EnableSender sends the Enable request. The method will close the +// http.Response Body if it receives an error. +func (client JobCollectionsClient) EnableSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// EnableResponder handles the response to the Enable request. The method always +// closes the http.Response Body. +func (client JobCollectionsClient) EnableResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a job collection. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. +func (client JobCollectionsClient) Get(resourceGroupName string, jobCollectionName string) (result JobCollectionDefinition, ae error) { + req, err := client.GetPreparer(resourceGroupName, jobCollectionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client JobCollectionsClient) GetPreparer(resourceGroupName string, jobCollectionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client JobCollectionsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client JobCollectionsClient) GetResponder(resp *http.Response) (result JobCollectionDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup gets all job collections under specified resource group. +// +// resourceGroupName is the resource group name. +func (client JobCollectionsClient) ListByResourceGroup(resourceGroupName string) (result JobCollectionListResult, ae error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", "Failure preparing request") + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", "Failure sending request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client JobCollectionsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client JobCollectionsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client JobCollectionsClient) ListByResourceGroupResponder(resp *http.Response) (result JobCollectionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client JobCollectionsClient) ListByResourceGroupNextResults(lastResults JobCollectionListResult) (result JobCollectionListResult, ae error) { + req, err := lastResults.JobCollectionListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", "Failure sending next results request request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListByResourceGroup", "Failure responding to next results request request") + } + + return +} + +// ListBySubscription gets all job collections under specified subscription. +func (client JobCollectionsClient) ListBySubscription() (result JobCollectionListResult, ae error) { + req, err := client.ListBySubscriptionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", "Failure preparing request") + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", "Failure sending request") + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", "Failure responding to request") + } + + return +} + +// ListBySubscriptionPreparer prepares the ListBySubscription request. +func (client JobCollectionsClient) ListBySubscriptionPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Scheduler/jobCollections"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListBySubscriptionSender sends the ListBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client JobCollectionsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (client JobCollectionsClient) ListBySubscriptionResponder(resp *http.Response) (result JobCollectionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListBySubscriptionNextResults retrieves the next set of results, if any. +func (client JobCollectionsClient) ListBySubscriptionNextResults(lastResults JobCollectionListResult) (result JobCollectionListResult, ae error) { + req, err := lastResults.JobCollectionListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", "Failure sending next results request request") + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "ListBySubscription", "Failure responding to next results request request") + } + + return +} + +// Patch patches an existing job collection. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. jobCollection is the job collection definition. +func (client JobCollectionsClient) Patch(resourceGroupName string, jobCollectionName string, jobCollection JobCollectionDefinition) (result JobCollectionDefinition, ae error) { + req, err := client.PatchPreparer(resourceGroupName, jobCollectionName, jobCollection) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Patch", "Failure preparing request") + } + + resp, err := client.PatchSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Patch", "Failure sending request") + } + + result, err = client.PatchResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobCollectionsClient", "Patch", "Failure responding to request") + } + + return +} + +// PatchPreparer prepares the Patch request. +func (client JobCollectionsClient) PatchPreparer(resourceGroupName string, jobCollectionName string, jobCollection JobCollectionDefinition) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}"), + autorest.WithJSON(jobCollection), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// PatchSender sends the Patch request. The method will close the +// http.Response Body if it receives an error. +func (client JobCollectionsClient) PatchSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// PatchResponder handles the response to the Patch request. The method always +// closes the http.Response Body. +func (client JobCollectionsClient) PatchResponder(resp *http.Response) (result JobCollectionDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobs.go' --- src/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobs.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobs.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,550 @@ +package scheduler + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// JobsClient is the client for the Jobs methods of the Scheduler service. +type JobsClient struct { + ManagementClient +} + +// NewJobsClient creates an instance of the JobsClient client. +func NewJobsClient(subscriptionID string) JobsClient { + return NewJobsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewJobsClientWithBaseURI creates an instance of the JobsClient client. +func NewJobsClientWithBaseURI(baseURI string, subscriptionID string) JobsClient { + return JobsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate provisions a new job or updates an existing job. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. jobName is the job name. job is the job definition. +func (client JobsClient) CreateOrUpdate(resourceGroupName string, jobCollectionName string, jobName string, job JobDefinition) (result JobDefinition, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, jobCollectionName, jobName, job) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client JobsClient) CreateOrUpdatePreparer(resourceGroupName string, jobCollectionName string, jobName string, job JobDefinition) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "jobName": url.QueryEscape(jobName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}"), + autorest.WithJSON(job), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client JobsClient) CreateOrUpdateResponder(resp *http.Response) (result JobDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a job. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. jobName is the job name. +func (client JobsClient) Delete(resourceGroupName string, jobCollectionName string, jobName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, jobCollectionName, jobName) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client JobsClient) DeletePreparer(resourceGroupName string, jobCollectionName string, jobName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "jobName": url.QueryEscape(jobName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client JobsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a job. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. jobName is the job name. +func (client JobsClient) Get(resourceGroupName string, jobCollectionName string, jobName string) (result JobDefinition, ae error) { + req, err := client.GetPreparer(resourceGroupName, jobCollectionName, jobName) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client JobsClient) GetPreparer(resourceGroupName string, jobCollectionName string, jobName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "jobName": url.QueryEscape(jobName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client JobsClient) GetResponder(resp *http.Response) (result JobDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all jobs under the specified job collection. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. top is the number of jobs to request, in the of range +// [1..100]. skip is the (0-based) index of the job history list from which +// to begin requesting entries. +func (client JobsClient) List(resourceGroupName string, jobCollectionName string, top int, skip int) (result JobListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, jobCollectionName, top, skip) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client JobsClient) ListPreparer(resourceGroupName string, jobCollectionName string, top int, skip int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "$skip": skip, + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client JobsClient) ListResponder(resp *http.Response) (result JobListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client JobsClient) ListNextResults(lastResults JobListResult) (result JobListResult, ae error) { + req, err := lastResults.JobListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "List", "Failure responding to next results request request") + } + + return +} + +// ListJobHistory lists job history. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. jobName is the job name. top is the number of job history +// to request, in the of range [1..100]. skip is the (0-based) index of the +// job history list from which to begin requesting entries. +func (client JobsClient) ListJobHistory(resourceGroupName string, jobCollectionName string, jobName string, top int, skip int) (result JobHistoryListResult, ae error) { + req, err := client.ListJobHistoryPreparer(resourceGroupName, jobCollectionName, jobName, top, skip) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", "Failure preparing request") + } + + resp, err := client.ListJobHistorySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", "Failure sending request") + } + + result, err = client.ListJobHistoryResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", "Failure responding to request") + } + + return +} + +// ListJobHistoryPreparer prepares the ListJobHistory request. +func (client JobsClient) ListJobHistoryPreparer(resourceGroupName string, jobCollectionName string, jobName string, top int, skip int) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "jobName": url.QueryEscape(jobName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "$skip": skip, + "$top": top, + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}/history"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListJobHistorySender sends the ListJobHistory request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) ListJobHistorySender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListJobHistoryResponder handles the response to the ListJobHistory request. The method always +// closes the http.Response Body. +func (client JobsClient) ListJobHistoryResponder(resp *http.Response) (result JobHistoryListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListJobHistoryNextResults retrieves the next set of results, if any. +func (client JobsClient) ListJobHistoryNextResults(lastResults JobHistoryListResult) (result JobHistoryListResult, ae error) { + req, err := lastResults.JobHistoryListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListJobHistorySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", "Failure sending next results request request") + } + + result, err = client.ListJobHistoryResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "ListJobHistory", "Failure responding to next results request request") + } + + return +} + +// Patch patches an existing job. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. jobName is the job name. job is the job definition. +func (client JobsClient) Patch(resourceGroupName string, jobCollectionName string, jobName string, job JobDefinition) (result JobDefinition, ae error) { + req, err := client.PatchPreparer(resourceGroupName, jobCollectionName, jobName, job) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Patch", "Failure preparing request") + } + + resp, err := client.PatchSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Patch", "Failure sending request") + } + + result, err = client.PatchResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "Patch", "Failure responding to request") + } + + return +} + +// PatchPreparer prepares the Patch request. +func (client JobsClient) PatchPreparer(resourceGroupName string, jobCollectionName string, jobName string, job JobDefinition) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "jobName": url.QueryEscape(jobName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}"), + autorest.WithJSON(job), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// PatchSender sends the Patch request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) PatchSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// PatchResponder handles the response to the Patch request. The method always +// closes the http.Response Body. +func (client JobsClient) PatchResponder(resp *http.Response) (result JobDefinition, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Run runs a job. +// +// resourceGroupName is the resource group name. jobCollectionName is the job +// collection name. jobName is the job name. +func (client JobsClient) Run(resourceGroupName string, jobCollectionName string, jobName string) (result autorest.Response, ae error) { + req, err := client.RunPreparer(resourceGroupName, jobCollectionName, jobName) + if err != nil { + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Run", "Failure preparing request") + } + + resp, err := client.RunSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "scheduler/JobsClient", "Run", "Failure sending request") + } + + result, err = client.RunResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "scheduler/JobsClient", "Run", "Failure responding to request") + } + + return +} + +// RunPreparer prepares the Run request. +func (client JobsClient) RunPreparer(resourceGroupName string, jobCollectionName string, jobName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "jobCollectionName": url.QueryEscape(jobCollectionName), + "jobName": url.QueryEscape(jobName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}/run"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// RunSender sends the Run request. The method will close the +// http.Response Body if it receives an error. +func (client JobsClient) RunSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// RunResponder handles the response to the Run request. The method always +// closes the http.Response Body. +func (client JobsClient) RunResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/scheduler/models.go' --- src/github.com/Azure/azure-sdk-for-go/arm/scheduler/models.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/scheduler/models.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,444 @@ +package scheduler + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// DayOfWeek enumerates the values for day of week. +type DayOfWeek string + +const ( + // DayOfWeekFriday specifies the day of week friday state for day of week. + DayOfWeekFriday DayOfWeek = "Friday" + // DayOfWeekMonday specifies the day of week monday state for day of week. + DayOfWeekMonday DayOfWeek = "Monday" + // DayOfWeekSaturday specifies the day of week saturday state for day of + // week. + DayOfWeekSaturday DayOfWeek = "Saturday" + // DayOfWeekSunday specifies the day of week sunday state for day of week. + DayOfWeekSunday DayOfWeek = "Sunday" + // DayOfWeekThursday specifies the day of week thursday state for day of + // week. + DayOfWeekThursday DayOfWeek = "Thursday" + // DayOfWeekTuesday specifies the day of week tuesday state for day of + // week. + DayOfWeekTuesday DayOfWeek = "Tuesday" + // DayOfWeekWednesday specifies the day of week wednesday state for day of + // week. + DayOfWeekWednesday DayOfWeek = "Wednesday" +) + +// HTTPAuthenticationType enumerates the values for http authentication type. +type HTTPAuthenticationType string + +const ( + // ActiveDirectoryOAuth specifies the active directory o auth state for + // http authentication type. + ActiveDirectoryOAuth HTTPAuthenticationType = "ActiveDirectoryOAuth" + // Basic specifies the basic state for http authentication type. + Basic HTTPAuthenticationType = "Basic" + // ClientCertificate specifies the client certificate state for http + // authentication type. + ClientCertificate HTTPAuthenticationType = "ClientCertificate" + // NotSpecified specifies the not specified state for http authentication + // type. + NotSpecified HTTPAuthenticationType = "NotSpecified" +) + +// JobActionType enumerates the values for job action type. +type JobActionType string + +const ( + // HTTP specifies the http state for job action type. + HTTP JobActionType = "Http" + // HTTPS specifies the https state for job action type. + HTTPS JobActionType = "Https" + // StorageQueue specifies the storage queue state for job action type. + StorageQueue JobActionType = "StorageQueue" +) + +// JobCollectionState enumerates the values for job collection state. +type JobCollectionState string + +const ( + // JobCollectionStateDeleted specifies the job collection state deleted + // state for job collection state. + JobCollectionStateDeleted JobCollectionState = "Deleted" + // JobCollectionStateDisabled specifies the job collection state disabled + // state for job collection state. + JobCollectionStateDisabled JobCollectionState = "Disabled" + // JobCollectionStateEnabled specifies the job collection state enabled + // state for job collection state. + JobCollectionStateEnabled JobCollectionState = "Enabled" + // JobCollectionStateSuspended specifies the job collection state + // suspended state for job collection state. + JobCollectionStateSuspended JobCollectionState = "Suspended" +) + +// JobExecutionStatus enumerates the values for job execution status. +type JobExecutionStatus string + +const ( + // JobExecutionStatusCallbackNotFound specifies the job execution status + // callback not found state for job execution status. + JobExecutionStatusCallbackNotFound JobExecutionStatus = "CallbackNotFound" + // JobExecutionStatusCancelled specifies the job execution status + // cancelled state for job execution status. + JobExecutionStatusCancelled JobExecutionStatus = "Cancelled" + // JobExecutionStatusCompleted specifies the job execution status + // completed state for job execution status. + JobExecutionStatusCompleted JobExecutionStatus = "Completed" + // JobExecutionStatusFailed specifies the job execution status failed + // state for job execution status. + JobExecutionStatusFailed JobExecutionStatus = "Failed" + // JobExecutionStatusPostponed specifies the job execution status + // postponed state for job execution status. + JobExecutionStatusPostponed JobExecutionStatus = "Postponed" +) + +// JobHistoryActionName enumerates the values for job history action name. +type JobHistoryActionName string + +const ( + // ErrorAction specifies the error action state for job history action + // name. + ErrorAction JobHistoryActionName = "ErrorAction" + // MainAction specifies the main action state for job history action name. + MainAction JobHistoryActionName = "MainAction" +) + +// JobScheduleDay enumerates the values for job schedule day. +type JobScheduleDay string + +const ( + // JobScheduleDayFriday specifies the job schedule day friday state for + // job schedule day. + JobScheduleDayFriday JobScheduleDay = "Friday" + // JobScheduleDayMonday specifies the job schedule day monday state for + // job schedule day. + JobScheduleDayMonday JobScheduleDay = "Monday" + // JobScheduleDaySaturday specifies the job schedule day saturday state + // for job schedule day. + JobScheduleDaySaturday JobScheduleDay = "Saturday" + // JobScheduleDaySunday specifies the job schedule day sunday state for + // job schedule day. + JobScheduleDaySunday JobScheduleDay = "Sunday" + // JobScheduleDayThursday specifies the job schedule day thursday state + // for job schedule day. + JobScheduleDayThursday JobScheduleDay = "Thursday" + // JobScheduleDayTuesday specifies the job schedule day tuesday state for + // job schedule day. + JobScheduleDayTuesday JobScheduleDay = "Tuesday" + // JobScheduleDayWednesday specifies the job schedule day wednesday state + // for job schedule day. + JobScheduleDayWednesday JobScheduleDay = "Wednesday" +) + +// JobState enumerates the values for job state. +type JobState string + +const ( + // JobStateCompleted specifies the job state completed state for job state. + JobStateCompleted JobState = "Completed" + // JobStateDisabled specifies the job state disabled state for job state. + JobStateDisabled JobState = "Disabled" + // JobStateEnabled specifies the job state enabled state for job state. + JobStateEnabled JobState = "Enabled" + // JobStateFaulted specifies the job state faulted state for job state. + JobStateFaulted JobState = "Faulted" +) + +// RecurrenceFrequency enumerates the values for recurrence frequency. +type RecurrenceFrequency string + +const ( + // Day specifies the day state for recurrence frequency. + Day RecurrenceFrequency = "Day" + // Hour specifies the hour state for recurrence frequency. + Hour RecurrenceFrequency = "Hour" + // Minute specifies the minute state for recurrence frequency. + Minute RecurrenceFrequency = "Minute" + // Month specifies the month state for recurrence frequency. + Month RecurrenceFrequency = "Month" + // Week specifies the week state for recurrence frequency. + Week RecurrenceFrequency = "Week" +) + +// RetryType enumerates the values for retry type. +type RetryType string + +const ( + // Fixed specifies the fixed state for retry type. + Fixed RetryType = "Fixed" + // None specifies the none state for retry type. + None RetryType = "None" +) + +// SkuDefinition enumerates the values for sku definition. +type SkuDefinition string + +const ( + // Free specifies the free state for sku definition. + Free SkuDefinition = "Free" + // Premium specifies the premium state for sku definition. + Premium SkuDefinition = "Premium" + // Standard specifies the standard state for sku definition. + Standard SkuDefinition = "Standard" +) + +// BasicAuthentication is +type BasicAuthentication struct { + Type HTTPAuthenticationType `json:"type,omitempty"` + Username *string `json:"username,omitempty"` + Password *string `json:"password,omitempty"` +} + +// ClientCertAuthentication is +type ClientCertAuthentication struct { + Type HTTPAuthenticationType `json:"type,omitempty"` + Password *string `json:"password,omitempty"` + Pfx *string `json:"pfx,omitempty"` + CertificateThumbprint *string `json:"certificateThumbprint,omitempty"` + CertificateExpirationDate *date.Time `json:"certificateExpirationDate,omitempty"` + CertificateSubjectName *string `json:"certificateSubjectName,omitempty"` +} + +// HTTPAuthentication is +type HTTPAuthentication struct { + Type HTTPAuthenticationType `json:"type,omitempty"` +} + +// HTTPRequest is +type HTTPRequest struct { + HTTPAuthentication *HTTPAuthentication `json:"httpAuthentication,omitempty"` + URI *string `json:"uri,omitempty"` + Method *string `json:"method,omitempty"` + Body *string `json:"body,omitempty"` + Headers *map[string]*string `json:"headers,omitempty"` +} + +// JobAction is +type JobAction struct { + Type JobActionType `json:"type,omitempty"` + Request *HTTPRequest `json:"request,omitempty"` + QueueMessage *StorageQueueMessage `json:"queueMessage,omitempty"` + RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` + ErrorAction *JobErrorAction `json:"errorAction,omitempty"` +} + +// JobCollectionDefinition is +type JobCollectionDefinition struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Type *string `json:"type,omitempty"` + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *JobCollectionProperties `json:"properties,omitempty"` +} + +// JobCollectionListResult is +type JobCollectionListResult struct { + autorest.Response `json:"-"` + Value *[]JobCollectionDefinition `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// JobCollectionListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client JobCollectionListResult) JobCollectionListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// JobCollectionProperties is +type JobCollectionProperties struct { + Sku *Sku `json:"sku,omitempty"` + State JobCollectionState `json:"state,omitempty"` + Quota *JobCollectionQuota `json:"quota,omitempty"` +} + +// JobCollectionQuota is +type JobCollectionQuota struct { + MaxJobCount *int `json:"maxJobCount,omitempty"` + MaxJobOccurrence *int `json:"maxJobOccurrence,omitempty"` + MaxRecurrence *JobMaxRecurrence `json:"maxRecurrence,omitempty"` +} + +// JobDefinition is +type JobDefinition struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Type *string `json:"type,omitempty"` + Name *string `json:"name,omitempty"` + Properties *JobProperties `json:"properties,omitempty"` +} + +// JobErrorAction is +type JobErrorAction struct { + Type JobActionType `json:"type,omitempty"` + Request *HTTPRequest `json:"request,omitempty"` + QueueMessage *StorageQueueMessage `json:"queueMessage,omitempty"` + RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"` +} + +// JobHistoryDefinition is +type JobHistoryDefinition struct { + ID *string `json:"id,omitempty"` + Type *string `json:"type,omitempty"` + Name *string `json:"name,omitempty"` + Properties *JobHistoryDefinitionProperties `json:"properties,omitempty"` +} + +// JobHistoryDefinitionProperties is +type JobHistoryDefinitionProperties struct { + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + ExpectedExecutionTime *date.Time `json:"expectedExecutionTime,omitempty"` + ActionName JobHistoryActionName `json:"actionName,omitempty"` + Status JobExecutionStatus `json:"status,omitempty"` + Message *string `json:"message,omitempty"` + RetryCount *int `json:"retryCount,omitempty"` + RepeatCount *int `json:"repeatCount,omitempty"` +} + +// JobHistoryListResult is +type JobHistoryListResult struct { + autorest.Response `json:"-"` + Value *[]JobHistoryDefinition `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// JobHistoryListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client JobHistoryListResult) JobHistoryListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// JobListResult is +type JobListResult struct { + autorest.Response `json:"-"` + Value *[]JobDefinition `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// JobListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client JobListResult) JobListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// JobMaxRecurrence is +type JobMaxRecurrence struct { + Frequency RecurrenceFrequency `json:"frequency,omitempty"` + Interval *int `json:"interval,omitempty"` +} + +// JobProperties is +type JobProperties struct { + StartTime *date.Time `json:"startTime,omitempty"` + Action *JobAction `json:"action,omitempty"` + Recurrence *JobRecurrence `json:"recurrence,omitempty"` + State JobState `json:"state,omitempty"` + Status *JobStatus `json:"status,omitempty"` +} + +// JobRecurrence is +type JobRecurrence struct { + Frequency RecurrenceFrequency `json:"frequency,omitempty"` + Interval *int `json:"interval,omitempty"` + Count *int `json:"count,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + Schedule *JobRecurrenceSchedule `json:"schedule,omitempty"` +} + +// JobRecurrenceSchedule is +type JobRecurrenceSchedule struct { + WeekDays *[]DayOfWeek `json:"weekDays,omitempty"` + Hours *[]int `json:"hours,omitempty"` + Minutes *[]int `json:"minutes,omitempty"` + MonthDays *[]int `json:"monthDays,omitempty"` + MonthlyOccurrences *[]JobRecurrenceScheduleMonthlyOccurrence `json:"monthlyOccurrences,omitempty"` +} + +// JobRecurrenceScheduleMonthlyOccurrence is +type JobRecurrenceScheduleMonthlyOccurrence struct { + Day JobScheduleDay `json:"day,omitempty"` + Occurrence *int `json:"Occurrence,omitempty"` +} + +// JobStatus is +type JobStatus struct { + ExecutionCount *int `json:"executionCount,omitempty"` + FailureCount *int `json:"failureCount,omitempty"` + FaultedCount *int `json:"faultedCount,omitempty"` + LastExecutionTime *date.Time `json:"lastExecutionTime,omitempty"` + NextExecutionTime *date.Time `json:"nextExecutionTime,omitempty"` +} + +// OAuthAuthentication is +type OAuthAuthentication struct { + Type HTTPAuthenticationType `json:"type,omitempty"` + Secret *string `json:"secret,omitempty"` + Tenant *string `json:"tenant,omitempty"` + Audience *string `json:"audience,omitempty"` + ClientID *string `json:"clientId,omitempty"` +} + +// RetryPolicy is +type RetryPolicy struct { + RetryType RetryType `json:"retryType,omitempty"` + RetryInterval *int `json:"retryInterval,omitempty"` + RetryCount *int `json:"retryCount,omitempty"` +} + +// Sku is +type Sku struct { + Name SkuDefinition `json:"name,omitempty"` +} + +// StorageQueueMessage is +type StorageQueueMessage struct { + StorageAccount *string `json:"storageAccount,omitempty"` + QueueName *string `json:"queueName,omitempty"` + SasToken *string `json:"sasToken,omitempty"` + Message *string `json:"message,omitempty"` +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/scheduler/version.go' --- src/github.com/Azure/azure-sdk-for-go/arm/scheduler/version.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/scheduler/version.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +package scheduler + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "0" + minor = "1" + patch = "1" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "scheduler", "2014-08-01-preview") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} === added directory 'src/github.com/Azure/azure-sdk-for-go/arm/search' === added file 'src/github.com/Azure/azure-sdk-for-go/arm/search/adminkeys.go' --- src/github.com/Azure/azure-sdk-for-go/arm/search/adminkeys.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/search/adminkeys.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,108 @@ +package search + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// AdminKeysClient is the client that can be used to manage Azure Search +// services and API keys. +type AdminKeysClient struct { + ManagementClient +} + +// NewAdminKeysClient creates an instance of the AdminKeysClient client. +func NewAdminKeysClient(subscriptionID string) AdminKeysClient { + return NewAdminKeysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAdminKeysClientWithBaseURI creates an instance of the AdminKeysClient +// client. +func NewAdminKeysClientWithBaseURI(baseURI string, subscriptionID string) AdminKeysClient { + return AdminKeysClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List returns the primary and secondary API keys for the given Azure Search +// service. +// +// resourceGroupName is the name of the resource group within the current +// subscription. serviceName is the name of the Search service for which to +// list admin keys. +func (client AdminKeysClient) List(resourceGroupName string, serviceName string) (result AdminKeyResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, serviceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "search/AdminKeysClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "search/AdminKeysClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "search/AdminKeysClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AdminKeysClient) ListPreparer(resourceGroupName string, serviceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "serviceName": url.QueryEscape(serviceName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{serviceName}/listAdminKeys"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AdminKeysClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AdminKeysClient) ListResponder(resp *http.Response) (result AdminKeyResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/search/client.go' --- src/github.com/Azure/azure-sdk-for-go/arm/search/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/search/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,53 @@ +package search + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Search + APIVersion = "2015-02-28" + + // DefaultBaseURI is the default URI used for the service Search + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the client that can be used to manage Azure Search +// services and API keys. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/search/models.go' --- src/github.com/Azure/azure-sdk-for-go/arm/search/models.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/search/models.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,159 @@ +package search + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" +) + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // ProvisioningStateFailed specifies the provisioning state failed state + // for provisioning state. + ProvisioningStateFailed ProvisioningState = "failed" + // ProvisioningStateProvisioning specifies the provisioning state + // provisioning state for provisioning state. + ProvisioningStateProvisioning ProvisioningState = "provisioning" + // ProvisioningStateSucceeded specifies the provisioning state succeeded + // state for provisioning state. + ProvisioningStateSucceeded ProvisioningState = "succeeded" +) + +// ServiceStatus enumerates the values for service status. +type ServiceStatus string + +const ( + // ServiceStatusDegraded specifies the service status degraded state for + // service status. + ServiceStatusDegraded ServiceStatus = "degraded" + // ServiceStatusDeleting specifies the service status deleting state for + // service status. + ServiceStatusDeleting ServiceStatus = "deleting" + // ServiceStatusDisabled specifies the service status disabled state for + // service status. + ServiceStatusDisabled ServiceStatus = "disabled" + // ServiceStatusError specifies the service status error state for service + // status. + ServiceStatusError ServiceStatus = "error" + // ServiceStatusProvisioning specifies the service status provisioning + // state for service status. + ServiceStatusProvisioning ServiceStatus = "provisioning" + // ServiceStatusRunning specifies the service status running state for + // service status. + ServiceStatusRunning ServiceStatus = "running" +) + +// SkuType enumerates the values for sku type. +type SkuType string + +const ( + // Free specifies the free state for sku type. + Free SkuType = "free" + // Standard specifies the standard state for sku type. + Standard SkuType = "standard" + // Standard2 specifies the standard 2 state for sku type. + Standard2 SkuType = "standard2" +) + +// AdminKeyResult is response containing the primary and secondary API keys +// for a given Azure Search service. +type AdminKeyResult struct { + autorest.Response `json:"-"` + PrimaryKey *string `json:"primaryKey,omitempty"` + SecondaryKey *string `json:"secondaryKey,omitempty"` +} + +// ListQueryKeysResult is response containing the query API keys for a given +// Azure Search service. +type ListQueryKeysResult struct { + autorest.Response `json:"-"` + Value *[]QueryKey `json:"value,omitempty"` +} + +// QueryKey is describes an API key for a given Azure Search service that has +// permissions for query operations only. +type QueryKey struct { + Name *string `json:"name,omitempty"` + Key *string `json:"key,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ServiceCreateOrUpdateParameters is properties that describe an Azure Search +// service. +type ServiceCreateOrUpdateParameters struct { + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *ServiceProperties `json:"properties,omitempty"` +} + +// ServiceListResult is response containing a list of Azure Search services +// for a given resource group. +type ServiceListResult struct { + autorest.Response `json:"-"` + Value *[]ServiceResource `json:"value,omitempty"` +} + +// ServiceProperties is defines properties of an Azure Search service that can +// be modified. +type ServiceProperties struct { + Sku *Sku `json:"sku,omitempty"` + ReplicaCount *int `json:"replicaCount,omitempty"` + PartitionCount *int `json:"partitionCount,omitempty"` +} + +// ServiceReadableProperties is defines all the properties of an Azure Search +// service. +type ServiceReadableProperties struct { + Status ServiceStatus `json:"status,omitempty"` + StatusDetails *string `json:"statusDetails,omitempty"` + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + Sku *Sku `json:"sku,omitempty"` + ReplicaCount *int `json:"replicaCount,omitempty"` + PartitionCount *int `json:"partitionCount,omitempty"` +} + +// ServiceResource is describes an Azure Search service and its current state. +type ServiceResource struct { + autorest.Response `json:"-"` + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *ServiceReadableProperties `json:"properties,omitempty"` +} + +// Sku is defines the SKU of an Azure Search Service, which determines price +// tier and capacity limits. +type Sku struct { + Name SkuType `json:"name,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/search/querykeys.go' --- src/github.com/Azure/azure-sdk-for-go/arm/search/querykeys.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/search/querykeys.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,107 @@ +package search + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// QueryKeysClient is the client that can be used to manage Azure Search +// services and API keys. +type QueryKeysClient struct { + ManagementClient +} + +// NewQueryKeysClient creates an instance of the QueryKeysClient client. +func NewQueryKeysClient(subscriptionID string) QueryKeysClient { + return NewQueryKeysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewQueryKeysClientWithBaseURI creates an instance of the QueryKeysClient +// client. +func NewQueryKeysClientWithBaseURI(baseURI string, subscriptionID string) QueryKeysClient { + return QueryKeysClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List returns the list of query API keys for the given Azure Search service. +// +// resourceGroupName is the name of the resource group within the current +// subscription. serviceName is the name of the Search service for which to +// list query keys. +func (client QueryKeysClient) List(resourceGroupName string, serviceName string) (result ListQueryKeysResult, ae error) { + req, err := client.ListPreparer(resourceGroupName, serviceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "search/QueryKeysClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "search/QueryKeysClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "search/QueryKeysClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client QueryKeysClient) ListPreparer(resourceGroupName string, serviceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "serviceName": url.QueryEscape(serviceName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{serviceName}/listQueryKeys"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client QueryKeysClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client QueryKeysClient) ListResponder(resp *http.Response) (result ListQueryKeysResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/search/services.go' --- src/github.com/Azure/azure-sdk-for-go/arm/search/services.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/search/services.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,238 @@ +package search + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// ServicesClient is the client that can be used to manage Azure Search +// services and API keys. +type ServicesClient struct { + ManagementClient +} + +// NewServicesClient creates an instance of the ServicesClient client. +func NewServicesClient(subscriptionID string) ServicesClient { + return NewServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewServicesClientWithBaseURI creates an instance of the ServicesClient +// client. +func NewServicesClientWithBaseURI(baseURI string, subscriptionID string) ServicesClient { + return ServicesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a Search service in the given resource +// group. If the Search service already exists, all properties will be +// updated with the given values. +// +// resourceGroupName is the name of the resource group within the current +// subscription. serviceName is the name of the Search service to create or +// update. parameters is the properties to set or update on the Search +// service. +func (client ServicesClient) CreateOrUpdate(resourceGroupName string, serviceName string, parameters ServiceCreateOrUpdateParameters) (result ServiceResource, ae error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, serviceName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "search/ServicesClient", "CreateOrUpdate", "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "search/ServicesClient", "CreateOrUpdate", "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "search/ServicesClient", "CreateOrUpdate", "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ServicesClient) CreateOrUpdatePreparer(resourceGroupName string, serviceName string, parameters ServiceCreateOrUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "serviceName": url.QueryEscape(serviceName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{serviceName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ServicesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusCreated) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ServicesClient) CreateOrUpdateResponder(resp *http.Response) (result ServiceResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a Search service in the given resource group, along with its +// associated resources. +// +// resourceGroupName is the name of the resource group within the current +// subscription. serviceName is the name of the Search service to delete. +func (client ServicesClient) Delete(resourceGroupName string, serviceName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, serviceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "search/ServicesClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "search/ServicesClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "search/ServicesClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ServicesClient) DeletePreparer(resourceGroupName string, serviceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "serviceName": url.QueryEscape(serviceName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{serviceName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ServicesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNotFound, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ServicesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNotFound, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// List returns a list of all Search services in the given resource group. +// +// resourceGroupName is the name of the resource group within the current +// subscription. +func (client ServicesClient) List(resourceGroupName string) (result ServiceListResult, ae error) { + req, err := client.ListPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "search/ServicesClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "search/ServicesClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "search/ServicesClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ServicesClient) ListPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ServicesClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ServicesClient) ListResponder(resp *http.Response) (result ServiceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/search/version.go' --- src/github.com/Azure/azure-sdk-for-go/arm/search/version.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/search/version.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +package search + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "0" + minor = "1" + patch = "1" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "search", "2015-02-28") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} === added directory 'src/github.com/Azure/azure-sdk-for-go/arm/storage' === added file 'src/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go' --- src/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,645 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// AccountsClient is the client for the Accounts methods of the Storage +// service. +type AccountsClient struct { + ManagementClient +} + +// NewAccountsClient creates an instance of the AccountsClient client. +func NewAccountsClient(subscriptionID string) AccountsClient { + return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAccountsClientWithBaseURI creates an instance of the AccountsClient +// client. +func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient { + return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckNameAvailability checks that account name is valid and is not in use. +// +// accountName is the name of the storage account within the specified +// resource group. Storage account names must be between 3 and 24 characters +// in length and use numbers and lower-case letters only. +func (client AccountsClient) CheckNameAvailability(accountName AccountCheckNameAvailabilityParameters) (result CheckNameAvailabilityResult, ae error) { + req, err := client.CheckNameAvailabilityPreparer(accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "CheckNameAvailability", "Failure preparing request") + } + + resp, err := client.CheckNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "CheckNameAvailability", "Failure sending request") + } + + result, err = client.CheckNameAvailabilityResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "CheckNameAvailability", "Failure responding to request") + } + + return +} + +// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. +func (client AccountsClient) CheckNameAvailabilityPreparer(accountName AccountCheckNameAvailabilityParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability"), + autorest.WithJSON(accountName), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always +// closes the http.Response Body. +func (client AccountsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Create asynchronously creates a new storage account with the specified +// parameters. Existing accounts cannot be updated with this API and should +// instead use the Update Storage Account API. If an account is already +// created and subsequent PUT request is issued with exact same set of +// properties, then HTTP 200 would be returned. +// +// resourceGroupName is the name of the resource group within the user’s +// subscription. accountName is the name of the storage account within the +// specified resource group. Storage account names must be between 3 and 24 +// characters in length and use numbers and lower-case letters only. +// parameters is the parameters to provide for the created account. +func (client AccountsClient) Create(resourceGroupName string, accountName string, parameters AccountCreateParameters) (result Account, ae error) { + req, err := client.CreatePreparer(resourceGroupName, accountName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Create", "Failure preparing request") + } + + resp, err := client.CreateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Create", "Failure sending request") + } + + result, err = client.CreateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "Create", "Failure responding to request") + } + + return +} + +// CreatePreparer prepares the Create request. +func (client AccountsClient) CreatePreparer(resourceGroupName string, accountName string, parameters AccountCreateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": url.QueryEscape(accountName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) CreateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusAccepted) +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client AccountsClient) CreateResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a storage account in Microsoft Azure. +// +// resourceGroupName is the name of the resource group within the user’s +// subscription. accountName is the name of the storage account within the +// specified resource group. Storage account names must be between 3 and 24 +// characters in length and use numbers and lower-case letters only. +func (client AccountsClient) Delete(resourceGroupName string, accountName string) (result autorest.Response, ae error) { + req, err := client.DeletePreparer(resourceGroupName, accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Delete", "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Delete", "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "Delete", "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AccountsClient) DeletePreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": url.QueryEscape(accountName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK, http.StatusNoContent) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetProperties returns the properties for the specified storage account +// including but not limited to name, account type, location, and account +// status. The ListKeys operation should be used to retrieve storage keys. +// +// resourceGroupName is the name of the resource group within the user’s +// subscription. accountName is the name of the storage account within the +// specified resource group. Storage account names must be between 3 and 24 +// characters in length and use numbers and lower-case letters only. +func (client AccountsClient) GetProperties(resourceGroupName string, accountName string) (result Account, ae error) { + req, err := client.GetPropertiesPreparer(resourceGroupName, accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "GetProperties", "Failure preparing request") + } + + resp, err := client.GetPropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "GetProperties", "Failure sending request") + } + + result, err = client.GetPropertiesResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "GetProperties", "Failure responding to request") + } + + return +} + +// GetPropertiesPreparer prepares the GetProperties request. +func (client AccountsClient) GetPropertiesPreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": url.QueryEscape(accountName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetPropertiesSender sends the GetProperties request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) GetPropertiesSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetPropertiesResponder handles the response to the GetProperties request. The method always +// closes the http.Response Body. +func (client AccountsClient) GetPropertiesResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the storage accounts available under the subscription. Note +// that storage keys are not returned; use the ListKeys operation for this. +func (client AccountsClient) List() (result AccountListResult, ae error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AccountsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListResponder(resp *http.Response) (result AccountListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup lists all the storage accounts available under the +// given resource group. Note that storage keys are not returned; use the +// ListKeys operation for this. +// +// resourceGroupName is the name of the resource group within the user’s +// subscription. +func (client AccountsClient) ListByResourceGroup(resourceGroupName string) (result AccountListResult, ae error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "ListByResourceGroup", "Failure preparing request") + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "ListByResourceGroup", "Failure sending request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "ListByResourceGroup", "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client AccountsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) (result AccountListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListKeys lists the access keys for the specified storage account. +// +// resourceGroupName is the name of the resource group. accountName is the +// name of the storage account. +func (client AccountsClient) ListKeys(resourceGroupName string, accountName string) (result AccountKeys, ae error) { + req, err := client.ListKeysPreparer(resourceGroupName, accountName) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "ListKeys", "Failure preparing request") + } + + resp, err := client.ListKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "ListKeys", "Failure sending request") + } + + result, err = client.ListKeysResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "ListKeys", "Failure responding to request") + } + + return +} + +// ListKeysPreparer prepares the ListKeys request. +func (client AccountsClient) ListKeysPreparer(resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": url.QueryEscape(accountName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListKeysSender sends the ListKeys request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListKeysResponder handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListKeysResponder(resp *http.Response) (result AccountKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateKey regenerates the access keys for the specified storage account. +// +// resourceGroupName is the name of the resource group within the user’s +// subscription. accountName is the name of the storage account within the +// specified resource group. Storage account names must be between 3 and 24 +// characters in length and use numbers and lower-case letters only. +// regenerateKey is specifies name of the key which should be regenerated. +func (client AccountsClient) RegenerateKey(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (result AccountKeys, ae error) { + req, err := client.RegenerateKeyPreparer(resourceGroupName, accountName, regenerateKey) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "RegenerateKey", "Failure preparing request") + } + + resp, err := client.RegenerateKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "RegenerateKey", "Failure sending request") + } + + result, err = client.RegenerateKeyResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "RegenerateKey", "Failure responding to request") + } + + return +} + +// RegenerateKeyPreparer prepares the RegenerateKey request. +func (client AccountsClient) RegenerateKeyPreparer(resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": url.QueryEscape(accountName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey"), + autorest.WithJSON(regenerateKey), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// RegenerateKeySender sends the RegenerateKey request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always +// closes the http.Response Body. +func (client AccountsClient) RegenerateKeyResponder(resp *http.Response) (result AccountKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates the account type or tags for a storage account. It can also +// be used to add a custom domain (note that custom domains cannot be added +// via the Create operation). Only one custom domain is supported per storage +// account. This API can only be used to update one of tags, accountType, or +// customDomain per call. To update multiple of these properties, call the +// API multiple times with one change per call. This call does not change the +// storage keys for the account. If you want to change storage account keys, +// use the RegenerateKey operation. The location and name of the storage +// account cannot be changed after creation. +// +// resourceGroupName is the name of the resource group within the user’s +// subscription. accountName is the name of the storage account within the +// specified resource group. Storage account names must be between 3 and 24 +// characters in length and use numbers and lower-case letters only. +// parameters is the parameters to update on the account. Note that only one +// property can be changed at a time using this API. +func (client AccountsClient) Update(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result Account, ae error) { + req, err := client.UpdatePreparer(resourceGroupName, accountName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Update", "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/AccountsClient", "Update", "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/AccountsClient", "Update", "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client AccountsClient) UpdatePreparer(resourceGroupName string, accountName string, parameters AccountUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": url.QueryEscape(accountName), + "resourceGroupName": url.QueryEscape(resourceGroupName), + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}"), + autorest.WithJSON(parameters), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client AccountsClient) UpdateResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/storage/client.go' --- src/github.com/Azure/azure-sdk-for-go/arm/storage/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/storage/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,52 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Storage + APIVersion = "2015-05-01-preview" + + // DefaultBaseURI is the default URI used for the service Storage + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Storage. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/storage/models.go' --- src/github.com/Azure/azure-sdk-for-go/arm/storage/models.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/storage/models.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,238 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/date" +) + +// AccountStatus enumerates the values for account status. +type AccountStatus string + +const ( + // Available specifies the available state for account status. + Available AccountStatus = "Available" + // Unavailable specifies the unavailable state for account status. + Unavailable AccountStatus = "Unavailable" +) + +// AccountType enumerates the values for account type. +type AccountType string + +const ( + // PremiumLRS specifies the premium lrs state for account type. + PremiumLRS AccountType = "Premium_LRS" + // StandardGRS specifies the standard grs state for account type. + StandardGRS AccountType = "Standard_GRS" + // StandardLRS specifies the standard lrs state for account type. + StandardLRS AccountType = "Standard_LRS" + // StandardRAGRS specifies the standard ragrs state for account type. + StandardRAGRS AccountType = "Standard_RAGRS" + // StandardZRS specifies the standard zrs state for account type. + StandardZRS AccountType = "Standard_ZRS" +) + +// KeyName enumerates the values for key name. +type KeyName string + +const ( + // Key1 specifies the key 1 state for key name. + Key1 KeyName = "key1" + // Key2 specifies the key 2 state for key name. + Key2 KeyName = "key2" +) + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // Creating specifies the creating state for provisioning state. + Creating ProvisioningState = "Creating" + // ResolvingDNS specifies the resolving dns state for provisioning state. + ResolvingDNS ProvisioningState = "ResolvingDns" + // Succeeded specifies the succeeded state for provisioning state. + Succeeded ProvisioningState = "Succeeded" +) + +// Reason enumerates the values for reason. +type Reason string + +const ( + // AccountNameInvalid specifies the account name invalid state for reason. + AccountNameInvalid Reason = "AccountNameInvalid" + // AlreadyExists specifies the already exists state for reason. + AlreadyExists Reason = "AlreadyExists" +) + +// UsageUnit enumerates the values for usage unit. +type UsageUnit string + +const ( + // Bytes specifies the bytes state for usage unit. + Bytes UsageUnit = "Bytes" + // BytesPerSecond specifies the bytes per second state for usage unit. + BytesPerSecond UsageUnit = "BytesPerSecond" + // Count specifies the count state for usage unit. + Count UsageUnit = "Count" + // CountsPerSecond specifies the counts per second state for usage unit. + CountsPerSecond UsageUnit = "CountsPerSecond" + // Percent specifies the percent state for usage unit. + Percent UsageUnit = "Percent" + // Seconds specifies the seconds state for usage unit. + Seconds UsageUnit = "Seconds" +) + +// Account is the storage account. +type Account struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *AccountProperties `json:"properties,omitempty"` +} + +// AccountCheckNameAvailabilityParameters is +type AccountCheckNameAvailabilityParameters struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// AccountCreateParameters is the parameters to provide for the account. +type AccountCreateParameters struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *AccountPropertiesCreateParameters `json:"properties,omitempty"` +} + +// AccountKeys is the access keys for the storage account. +type AccountKeys struct { + autorest.Response `json:"-"` + Key1 *string `json:"key1,omitempty"` + Key2 *string `json:"key2,omitempty"` +} + +// AccountListResult is the list storage accounts operation response. +type AccountListResult struct { + autorest.Response `json:"-"` + Value *[]Account `json:"value,omitempty"` +} + +// AccountProperties is +type AccountProperties struct { + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + AccountType AccountType `json:"accountType,omitempty"` + PrimaryEndpoints *Endpoints `json:"primaryEndpoints,omitempty"` + PrimaryLocation *string `json:"primaryLocation,omitempty"` + StatusOfPrimary AccountStatus `json:"statusOfPrimary,omitempty"` + LastGeoFailoverTime *date.Time `json:"lastGeoFailoverTime,omitempty"` + SecondaryLocation *string `json:"secondaryLocation,omitempty"` + StatusOfSecondary AccountStatus `json:"statusOfSecondary,omitempty"` + CreationTime *date.Time `json:"creationTime,omitempty"` + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + SecondaryEndpoints *Endpoints `json:"secondaryEndpoints,omitempty"` +} + +// AccountPropertiesCreateParameters is +type AccountPropertiesCreateParameters struct { + AccountType AccountType `json:"accountType,omitempty"` +} + +// AccountPropertiesUpdateParameters is +type AccountPropertiesUpdateParameters struct { + AccountType AccountType `json:"accountType,omitempty"` + CustomDomain *CustomDomain `json:"customDomain,omitempty"` +} + +// AccountRegenerateKeyParameters is +type AccountRegenerateKeyParameters struct { + KeyName KeyName `json:"keyName,omitempty"` +} + +// AccountUpdateParameters is the parameters to update on the account. +type AccountUpdateParameters struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *AccountPropertiesUpdateParameters `json:"properties,omitempty"` +} + +// CheckNameAvailabilityResult is the CheckNameAvailability operation response. +type CheckNameAvailabilityResult struct { + autorest.Response `json:"-"` + NameAvailable *bool `json:"nameAvailable,omitempty"` + Reason Reason `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// CustomDomain is the custom domain assigned to this storage account. This +// can be set via Update. +type CustomDomain struct { + Name *string `json:"name,omitempty"` + UseSubDomain *bool `json:"useSubDomain,omitempty"` +} + +// Endpoints is the URIs that are used to perform a retrieval of a public +// blob, queue or table object. +type Endpoints struct { + Blob *string `json:"blob,omitempty"` + Queue *string `json:"queue,omitempty"` + Table *string `json:"table,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} + +// Usage is describes Storage Resource Usage. +type Usage struct { + Unit UsageUnit `json:"unit,omitempty"` + CurrentValue *int `json:"currentValue,omitempty"` + Limit *int `json:"limit,omitempty"` + Name *UsageName `json:"name,omitempty"` +} + +// UsageListResult is the List Usages operation response. +type UsageListResult struct { + autorest.Response `json:"-"` + Value *[]Usage `json:"value,omitempty"` +} + +// UsageName is the Usage Names. +type UsageName struct { + Value *string `json:"value,omitempty"` + LocalizedValue *string `json:"localizedValue,omitempty"` +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go' --- src/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,103 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// UsageOperationsClient is the client for the UsageOperations methods of the +// Storage service. +type UsageOperationsClient struct { + ManagementClient +} + +// NewUsageOperationsClient creates an instance of the UsageOperationsClient +// client. +func NewUsageOperationsClient(subscriptionID string) UsageOperationsClient { + return NewUsageOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsageOperationsClientWithBaseURI creates an instance of the +// UsageOperationsClient client. +func NewUsageOperationsClientWithBaseURI(baseURI string, subscriptionID string) UsageOperationsClient { + return UsageOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets the current usage count and the limit for the resources under the +// subscription. +func (client UsageOperationsClient) List() (result UsageListResult, ae error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "storage/UsageOperationsClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "storage/UsageOperationsClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "storage/UsageOperationsClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client UsageOperationsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/usages"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client UsageOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client UsageOperationsClient) ListResponder(resp *http.Response) (result UsageListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/storage/version.go' --- src/github.com/Azure/azure-sdk-for-go/arm/storage/version.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/storage/version.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "0" + minor = "1" + patch = "1" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "storage", "2015-05-01-preview") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} === added directory 'src/github.com/Azure/azure-sdk-for-go/arm/subscriptions' === added file 'src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/client.go' --- src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,198 @@ +package subscriptions + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +const ( + // APIVersion is the version of the Subscriptions + APIVersion = "2014-04-01-preview" + + // DefaultBaseURI is the default URI used for the service Subscriptions + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Subscriptions. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} + +// Get gets details about particular subscription. +// +// subscriptionID is id of the subscription. +func (client ManagementClient) Get(subscriptionID string) (result Subscription, ae error) { + req, err := client.GetPreparer(subscriptionID) + if err != nil { + return result, autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "Get", "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "Get", "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "Get", "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ManagementClient) GetPreparer(subscriptionID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(subscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions/{subscriptionId}"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) GetSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ManagementClient) GetResponder(resp *http.Response) (result Subscription, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of the subscriptionIds. +func (client ManagementClient) List() (result ListResult, ae error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ManagementClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/subscriptions"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ManagementClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ManagementClient) ListResponder(resp *http.Response) (result ListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client ManagementClient) ListNextResults(lastResults ListResult) (result ListResult, ae error) { + req, err := lastResults.ListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "subscriptions/ManagementClient", "List", "Failure responding to next results request request") + } + + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/models.go' --- src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/models.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/models.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,110 @@ +package subscriptions + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// DeploymentExtendedFilter is deployment filter. +type DeploymentExtendedFilter struct { + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// GenericResourceFilter is resource filter. +type GenericResourceFilter struct { + ResourceType *string `json:"resourceType,omitempty"` + Tagname *string `json:"tagname,omitempty"` + Tagvalue *string `json:"tagvalue,omitempty"` +} + +// ListResult is subscription list operation response. +type ListResult struct { + autorest.Response `json:"-"` + Value *[]Subscription `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ListResult) ListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceGroupFilter is resource group filter. +type ResourceGroupFilter struct { + TagName *string `json:"tagName,omitempty"` + TagValue *string `json:"tagValue,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} + +// Subscription is subscription information. +type Subscription struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + SubscriptionID *string `json:"subscriptionId,omitempty"` + DisplayName *string `json:"displayName,omitempty"` + State *string `json:"state,omitempty"` +} + +// TenantIDDescription is tenant Id information +type TenantIDDescription struct { + ID *string `json:"id,omitempty"` + TenantID *string `json:"tenantId,omitempty"` +} + +// TenantListResult is tenant Ids information. +type TenantListResult struct { + autorest.Response `json:"-"` + Value *[]TenantIDDescription `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// TenantListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client TenantListResult) TenantListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/tenants.go' --- src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/tenants.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/tenants.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,124 @@ +package subscriptions + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/github.com/Azure/go-autorest/autorest" + "net/http" + "net/url" +) + +// TenantsClient is the client for the Tenants methods of the Subscriptions +// service. +type TenantsClient struct { + ManagementClient +} + +// NewTenantsClient creates an instance of the TenantsClient client. +func NewTenantsClient(subscriptionID string) TenantsClient { + return NewTenantsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewTenantsClientWithBaseURI creates an instance of the TenantsClient client. +func NewTenantsClientWithBaseURI(baseURI string, subscriptionID string) TenantsClient { + return TenantsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets a list of the tenantIds. +func (client TenantsClient) List() (result TenantListResult, ae error) { + req, err := client.ListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "subscriptions/TenantsClient", "List", "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "subscriptions/TenantsClient", "List", "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "subscriptions/TenantsClient", "List", "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client TenantsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": url.QueryEscape(client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/tenants"), + autorest.WithPathParameters(pathParameters), + autorest.WithQueryParameters(queryParameters)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client TenantsClient) ListSender(req *http.Request) (*http.Response, error) { + return client.Send(req, http.StatusOK) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client TenantsClient) ListResponder(resp *http.Response) (result TenantListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + autorest.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client TenantsClient) ListNextResults(lastResults TenantListResult) (result TenantListResult, ae error) { + req, err := lastResults.TenantListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "subscriptions/TenantsClient", "List", "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "subscriptions/TenantsClient", "List", "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + ae = autorest.NewErrorWithError(err, "subscriptions/TenantsClient", "List", "Failure responding to next results request request") + } + + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/version.go' --- src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/version.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/arm/subscriptions/version.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +package subscriptions + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.11.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "0" + minor = "1" + patch = "1" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s;Package arm/%s;API %s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "subscriptions", "2014-04-01-preview") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} === added directory 'src/github.com/Azure/azure-sdk-for-go/core' === added directory 'src/github.com/Azure/azure-sdk-for-go/core/http' === added directory 'src/github.com/Azure/azure-sdk-for-go/core/http/cgi' === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/cgi/child.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/cgi/child.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/cgi/child.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,206 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements CGI from the perspective of a child +// process. + +package cgi + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" +) + +// Request returns the HTTP request as represented in the current +// environment. This assumes the current program is being run +// by a web server in a CGI environment. +// The returned Request's Body is populated, if applicable. +func Request() (*http.Request, error) { + r, err := RequestFromMap(envMap(os.Environ())) + if err != nil { + return nil, err + } + if r.ContentLength > 0 { + r.Body = ioutil.NopCloser(io.LimitReader(os.Stdin, r.ContentLength)) + } + return r, nil +} + +func envMap(env []string) map[string]string { + m := make(map[string]string) + for _, kv := range env { + if idx := strings.Index(kv, "="); idx != -1 { + m[kv[:idx]] = kv[idx+1:] + } + } + return m +} + +// RequestFromMap creates an http.Request from CGI variables. +// The returned Request's Body field is not populated. +func RequestFromMap(params map[string]string) (*http.Request, error) { + r := new(http.Request) + r.Method = params["REQUEST_METHOD"] + if r.Method == "" { + return nil, errors.New("cgi: no REQUEST_METHOD in environment") + } + + r.Proto = params["SERVER_PROTOCOL"] + var ok bool + r.ProtoMajor, r.ProtoMinor, ok = http.ParseHTTPVersion(r.Proto) + if !ok { + return nil, errors.New("cgi: invalid SERVER_PROTOCOL version") + } + + r.Close = true + r.Trailer = http.Header{} + r.Header = http.Header{} + + r.Host = params["HTTP_HOST"] + + if lenstr := params["CONTENT_LENGTH"]; lenstr != "" { + clen, err := strconv.ParseInt(lenstr, 10, 64) + if err != nil { + return nil, errors.New("cgi: bad CONTENT_LENGTH in environment: " + lenstr) + } + r.ContentLength = clen + } + + if ct := params["CONTENT_TYPE"]; ct != "" { + r.Header.Set("Content-Type", ct) + } + + // Copy "HTTP_FOO_BAR" variables to "Foo-Bar" Headers + for k, v := range params { + if !strings.HasPrefix(k, "HTTP_") || k == "HTTP_HOST" { + continue + } + r.Header.Add(strings.Replace(k[5:], "_", "-", -1), v) + } + + // TODO: cookies. parsing them isn't exported, though. + + uriStr := params["REQUEST_URI"] + if uriStr == "" { + // Fallback to SCRIPT_NAME, PATH_INFO and QUERY_STRING. + uriStr = params["SCRIPT_NAME"] + params["PATH_INFO"] + s := params["QUERY_STRING"] + if s != "" { + uriStr += "?" + s + } + } + + // There's apparently a de-facto standard for this. + // http://docstore.mik.ua/orelly/linux/cgi/ch03_02.htm#ch03-35636 + if s := params["HTTPS"]; s == "on" || s == "ON" || s == "1" { + r.TLS = &tls.ConnectionState{HandshakeComplete: true} + } + + if r.Host != "" { + // Hostname is provided, so we can reasonably construct a URL. + rawurl := r.Host + uriStr + if r.TLS == nil { + rawurl = "http://" + rawurl + } else { + rawurl = "https://" + rawurl + } + url, err := url.Parse(rawurl) + if err != nil { + return nil, errors.New("cgi: failed to parse host and REQUEST_URI into a URL: " + rawurl) + } + r.URL = url + } + // Fallback logic if we don't have a Host header or the URL + // failed to parse + if r.URL == nil { + url, err := url.Parse(uriStr) + if err != nil { + return nil, errors.New("cgi: failed to parse REQUEST_URI into a URL: " + uriStr) + } + r.URL = url + } + + // Request.RemoteAddr has its port set by Go's standard http + // server, so we do here too. We don't have one, though, so we + // use a dummy one. + r.RemoteAddr = net.JoinHostPort(params["REMOTE_ADDR"], "0") + + return r, nil +} + +// Serve executes the provided Handler on the currently active CGI +// request, if any. If there's no current CGI environment +// an error is returned. The provided handler may be nil to use +// http.DefaultServeMux. +func Serve(handler http.Handler) error { + req, err := Request() + if err != nil { + return err + } + if handler == nil { + handler = http.DefaultServeMux + } + rw := &response{ + req: req, + header: make(http.Header), + bufw: bufio.NewWriter(os.Stdout), + } + handler.ServeHTTP(rw, req) + rw.Write(nil) // make sure a response is sent + if err = rw.bufw.Flush(); err != nil { + return err + } + return nil +} + +type response struct { + req *http.Request + header http.Header + bufw *bufio.Writer + headerSent bool +} + +func (r *response) Flush() { + r.bufw.Flush() +} + +func (r *response) Header() http.Header { + return r.header +} + +func (r *response) Write(p []byte) (n int, err error) { + if !r.headerSent { + r.WriteHeader(http.StatusOK) + } + return r.bufw.Write(p) +} + +func (r *response) WriteHeader(code int) { + if r.headerSent { + // Note: explicitly using Stderr, as Stdout is our HTTP output. + fmt.Fprintf(os.Stderr, "CGI attempted to write header twice on request for %s", r.req.URL) + return + } + r.headerSent = true + fmt.Fprintf(r.bufw, "Status: %d %s\r\n", code, http.StatusText(code)) + + // Set a default Content-Type + if _, hasType := r.header["Content-Type"]; !hasType { + r.header.Add("Content-Type", "text/html; charset=utf-8") + } + + r.header.Write(r.bufw) + r.bufw.WriteString("\r\n") + r.bufw.Flush() +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/cgi/child_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/cgi/child_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/cgi/child_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,131 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests for CGI (the child process perspective) + +package cgi + +import ( + "testing" +) + +func TestRequest(t *testing.T) { + env := map[string]string{ + "SERVER_PROTOCOL": "HTTP/1.1", + "REQUEST_METHOD": "GET", + "HTTP_HOST": "example.com", + "HTTP_REFERER": "elsewhere", + "HTTP_USER_AGENT": "goclient", + "HTTP_FOO_BAR": "baz", + "REQUEST_URI": "/path?a=b", + "CONTENT_LENGTH": "123", + "CONTENT_TYPE": "text/xml", + "REMOTE_ADDR": "5.6.7.8", + } + req, err := RequestFromMap(env) + if err != nil { + t.Fatalf("RequestFromMap: %v", err) + } + if g, e := req.UserAgent(), "goclient"; e != g { + t.Errorf("expected UserAgent %q; got %q", e, g) + } + if g, e := req.Method, "GET"; e != g { + t.Errorf("expected Method %q; got %q", e, g) + } + if g, e := req.Header.Get("Content-Type"), "text/xml"; e != g { + t.Errorf("expected Content-Type %q; got %q", e, g) + } + if g, e := req.ContentLength, int64(123); e != g { + t.Errorf("expected ContentLength %d; got %d", e, g) + } + if g, e := req.Referer(), "elsewhere"; e != g { + t.Errorf("expected Referer %q; got %q", e, g) + } + if req.Header == nil { + t.Fatalf("unexpected nil Header") + } + if g, e := req.Header.Get("Foo-Bar"), "baz"; e != g { + t.Errorf("expected Foo-Bar %q; got %q", e, g) + } + if g, e := req.URL.String(), "http://example.com/path?a=b"; e != g { + t.Errorf("expected URL %q; got %q", e, g) + } + if g, e := req.FormValue("a"), "b"; e != g { + t.Errorf("expected FormValue(a) %q; got %q", e, g) + } + if req.Trailer == nil { + t.Errorf("unexpected nil Trailer") + } + if req.TLS != nil { + t.Errorf("expected nil TLS") + } + if e, g := "5.6.7.8:0", req.RemoteAddr; e != g { + t.Errorf("RemoteAddr: got %q; want %q", g, e) + } +} + +func TestRequestWithTLS(t *testing.T) { + env := map[string]string{ + "SERVER_PROTOCOL": "HTTP/1.1", + "REQUEST_METHOD": "GET", + "HTTP_HOST": "example.com", + "HTTP_REFERER": "elsewhere", + "REQUEST_URI": "/path?a=b", + "CONTENT_TYPE": "text/xml", + "HTTPS": "1", + "REMOTE_ADDR": "5.6.7.8", + } + req, err := RequestFromMap(env) + if err != nil { + t.Fatalf("RequestFromMap: %v", err) + } + if g, e := req.URL.String(), "https://example.com/path?a=b"; e != g { + t.Errorf("expected URL %q; got %q", e, g) + } + if req.TLS == nil { + t.Errorf("expected non-nil TLS") + } +} + +func TestRequestWithoutHost(t *testing.T) { + env := map[string]string{ + "SERVER_PROTOCOL": "HTTP/1.1", + "HTTP_HOST": "", + "REQUEST_METHOD": "GET", + "REQUEST_URI": "/path?a=b", + "CONTENT_LENGTH": "123", + } + req, err := RequestFromMap(env) + if err != nil { + t.Fatalf("RequestFromMap: %v", err) + } + if req.URL == nil { + t.Fatalf("unexpected nil URL") + } + if g, e := req.URL.String(), "/path?a=b"; e != g { + t.Errorf("URL = %q; want %q", g, e) + } +} + +func TestRequestWithoutRequestURI(t *testing.T) { + env := map[string]string{ + "SERVER_PROTOCOL": "HTTP/1.1", + "HTTP_HOST": "example.com", + "REQUEST_METHOD": "GET", + "SCRIPT_NAME": "/dir/scriptname", + "PATH_INFO": "/p1/p2", + "QUERY_STRING": "a=1&b=2", + "CONTENT_LENGTH": "123", + } + req, err := RequestFromMap(env) + if err != nil { + t.Fatalf("RequestFromMap: %v", err) + } + if req.URL == nil { + t.Fatalf("unexpected nil URL") + } + if g, e := req.URL.String(), "http://example.com/dir/scriptname/p1/p2?a=1&b=2"; e != g { + t.Errorf("URL = %q; want %q", g, e) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/cgi/host.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/cgi/host.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/cgi/host.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,377 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements the host side of CGI (being the webserver +// parent process). + +// Package cgi implements CGI (Common Gateway Interface) as specified +// in RFC 3875. +// +// Note that using CGI means starting a new process to handle each +// request, which is typically less efficient than using a +// long-running server. This package is intended primarily for +// compatibility with existing systems. +package cgi + +import ( + "bufio" + "fmt" + "io" + "log" + "net/http" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" +) + +var trailingPort = regexp.MustCompile(`:([0-9]+)$`) + +var osDefaultInheritEnv = map[string][]string{ + "darwin": {"DYLD_LIBRARY_PATH"}, + "freebsd": {"LD_LIBRARY_PATH"}, + "hpux": {"LD_LIBRARY_PATH", "SHLIB_PATH"}, + "irix": {"LD_LIBRARY_PATH", "LD_LIBRARYN32_PATH", "LD_LIBRARY64_PATH"}, + "linux": {"LD_LIBRARY_PATH"}, + "openbsd": {"LD_LIBRARY_PATH"}, + "solaris": {"LD_LIBRARY_PATH", "LD_LIBRARY_PATH_32", "LD_LIBRARY_PATH_64"}, + "windows": {"SystemRoot", "COMSPEC", "PATHEXT", "WINDIR"}, +} + +// Handler runs an executable in a subprocess with a CGI environment. +type Handler struct { + Path string // path to the CGI executable + Root string // root URI prefix of handler or empty for "/" + + // Dir specifies the CGI executable's working directory. + // If Dir is empty, the base directory of Path is used. + // If Path has no base directory, the current working + // directory is used. + Dir string + + Env []string // extra environment variables to set, if any, as "key=value" + InheritEnv []string // environment variables to inherit from host, as "key" + Logger *log.Logger // optional log for errors or nil to use log.Print + Args []string // optional arguments to pass to child process + + // PathLocationHandler specifies the root http Handler that + // should handle internal redirects when the CGI process + // returns a Location header value starting with a "/", as + // specified in RFC 3875 § 6.3.2. This will likely be + // http.DefaultServeMux. + // + // If nil, a CGI response with a local URI path is instead sent + // back to the client and not redirected internally. + PathLocationHandler http.Handler +} + +// removeLeadingDuplicates remove leading duplicate in environments. +// It's possible to override environment like following. +// cgi.Handler{ +// ... +// Env: []string{"SCRIPT_FILENAME=foo.php"}, +// } +func removeLeadingDuplicates(env []string) (ret []string) { + n := len(env) + for i := 0; i < n; i++ { + e := env[i] + s := strings.SplitN(e, "=", 2)[0] + found := false + for j := i + 1; j < n; j++ { + if s == strings.SplitN(env[j], "=", 2)[0] { + found = true + break + } + } + if !found { + ret = append(ret, e) + } + } + return +} + +func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + root := h.Root + if root == "" { + root = "/" + } + + if len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" { + rw.WriteHeader(http.StatusBadRequest) + rw.Write([]byte("Chunked request bodies are not supported by CGI.")) + return + } + + pathInfo := req.URL.Path + if root != "/" && strings.HasPrefix(pathInfo, root) { + pathInfo = pathInfo[len(root):] + } + + port := "80" + if matches := trailingPort.FindStringSubmatch(req.Host); len(matches) != 0 { + port = matches[1] + } + + env := []string{ + "SERVER_SOFTWARE=go", + "SERVER_NAME=" + req.Host, + "SERVER_PROTOCOL=HTTP/1.1", + "HTTP_HOST=" + req.Host, + "GATEWAY_INTERFACE=CGI/1.1", + "REQUEST_METHOD=" + req.Method, + "QUERY_STRING=" + req.URL.RawQuery, + "REQUEST_URI=" + req.URL.RequestURI(), + "PATH_INFO=" + pathInfo, + "SCRIPT_NAME=" + root, + "SCRIPT_FILENAME=" + h.Path, + "REMOTE_ADDR=" + req.RemoteAddr, + "REMOTE_HOST=" + req.RemoteAddr, + "SERVER_PORT=" + port, + } + + if req.TLS != nil { + env = append(env, "HTTPS=on") + } + + for k, v := range req.Header { + k = strings.Map(upperCaseAndUnderscore, k) + joinStr := ", " + if k == "COOKIE" { + joinStr = "; " + } + env = append(env, "HTTP_"+k+"="+strings.Join(v, joinStr)) + } + + if req.ContentLength > 0 { + env = append(env, fmt.Sprintf("CONTENT_LENGTH=%d", req.ContentLength)) + } + if ctype := req.Header.Get("Content-Type"); ctype != "" { + env = append(env, "CONTENT_TYPE="+ctype) + } + + if h.Env != nil { + env = append(env, h.Env...) + } + + envPath := os.Getenv("PATH") + if envPath == "" { + envPath = "/bin:/usr/bin:/usr/ucb:/usr/bsd:/usr/local/bin" + } + env = append(env, "PATH="+envPath) + + for _, e := range h.InheritEnv { + if v := os.Getenv(e); v != "" { + env = append(env, e+"="+v) + } + } + + for _, e := range osDefaultInheritEnv[runtime.GOOS] { + if v := os.Getenv(e); v != "" { + env = append(env, e+"="+v) + } + } + + env = removeLeadingDuplicates(env) + + var cwd, path string + if h.Dir != "" { + path = h.Path + cwd = h.Dir + } else { + cwd, path = filepath.Split(h.Path) + } + if cwd == "" { + cwd = "." + } + + internalError := func(err error) { + rw.WriteHeader(http.StatusInternalServerError) + h.printf("CGI error: %v", err) + } + + cmd := &exec.Cmd{ + Path: path, + Args: append([]string{h.Path}, h.Args...), + Dir: cwd, + Env: env, + Stderr: os.Stderr, // for now + } + if req.ContentLength != 0 { + cmd.Stdin = req.Body + } + stdoutRead, err := cmd.StdoutPipe() + if err != nil { + internalError(err) + return + } + + err = cmd.Start() + if err != nil { + internalError(err) + return + } + if hook := testHookStartProcess; hook != nil { + hook(cmd.Process) + } + defer cmd.Wait() + defer stdoutRead.Close() + + linebody := bufio.NewReaderSize(stdoutRead, 1024) + headers := make(http.Header) + statusCode := 0 + headerLines := 0 + sawBlankLine := false + for { + line, isPrefix, err := linebody.ReadLine() + if isPrefix { + rw.WriteHeader(http.StatusInternalServerError) + h.printf("cgi: long header line from subprocess.") + return + } + if err == io.EOF { + break + } + if err != nil { + rw.WriteHeader(http.StatusInternalServerError) + h.printf("cgi: error reading headers: %v", err) + return + } + if len(line) == 0 { + sawBlankLine = true + break + } + headerLines++ + parts := strings.SplitN(string(line), ":", 2) + if len(parts) < 2 { + h.printf("cgi: bogus header line: %s", string(line)) + continue + } + header, val := parts[0], parts[1] + header = strings.TrimSpace(header) + val = strings.TrimSpace(val) + switch { + case header == "Status": + if len(val) < 3 { + h.printf("cgi: bogus status (short): %q", val) + return + } + code, err := strconv.Atoi(val[0:3]) + if err != nil { + h.printf("cgi: bogus status: %q", val) + h.printf("cgi: line was %q", line) + return + } + statusCode = code + default: + headers.Add(header, val) + } + } + if headerLines == 0 || !sawBlankLine { + rw.WriteHeader(http.StatusInternalServerError) + h.printf("cgi: no headers") + return + } + + if loc := headers.Get("Location"); loc != "" { + if strings.HasPrefix(loc, "/") && h.PathLocationHandler != nil { + h.handleInternalRedirect(rw, req, loc) + return + } + if statusCode == 0 { + statusCode = http.StatusFound + } + } + + if statusCode == 0 && headers.Get("Content-Type") == "" { + rw.WriteHeader(http.StatusInternalServerError) + h.printf("cgi: missing required Content-Type in headers") + return + } + + if statusCode == 0 { + statusCode = http.StatusOK + } + + // Copy headers to rw's headers, after we've decided not to + // go into handleInternalRedirect, which won't want its rw + // headers to have been touched. + for k, vv := range headers { + for _, v := range vv { + rw.Header().Add(k, v) + } + } + + rw.WriteHeader(statusCode) + + _, err = io.Copy(rw, linebody) + if err != nil { + h.printf("cgi: copy error: %v", err) + // And kill the child CGI process so we don't hang on + // the deferred cmd.Wait above if the error was just + // the client (rw) going away. If it was a read error + // (because the child died itself), then the extra + // kill of an already-dead process is harmless (the PID + // won't be reused until the Wait above). + cmd.Process.Kill() + } +} + +func (h *Handler) printf(format string, v ...interface{}) { + if h.Logger != nil { + h.Logger.Printf(format, v...) + } else { + log.Printf(format, v...) + } +} + +func (h *Handler) handleInternalRedirect(rw http.ResponseWriter, req *http.Request, path string) { + url, err := req.URL.Parse(path) + if err != nil { + rw.WriteHeader(http.StatusInternalServerError) + h.printf("cgi: error resolving local URI path %q: %v", path, err) + return + } + // TODO: RFC 3875 isn't clear if only GET is supported, but it + // suggests so: "Note that any message-body attached to the + // request (such as for a POST request) may not be available + // to the resource that is the target of the redirect." We + // should do some tests against Apache to see how it handles + // POST, HEAD, etc. Does the internal redirect get the same + // method or just GET? What about incoming headers? + // (e.g. Cookies) Which headers, if any, are copied into the + // second request? + newReq := &http.Request{ + Method: "GET", + URL: url, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: url.Host, + RemoteAddr: req.RemoteAddr, + TLS: req.TLS, + } + h.PathLocationHandler.ServeHTTP(rw, newReq) +} + +func upperCaseAndUnderscore(r rune) rune { + switch { + case r >= 'a' && r <= 'z': + return r - ('a' - 'A') + case r == '-': + return '_' + case r == '=': + // Maybe not part of the CGI 'spec' but would mess up + // the environment in any case, as Go represents the + // environment as a slice of "key=value" strings. + return '_' + } + // TODO: other transformations in spec or practice? + return r +} + +var testHookStartProcess func(*os.Process) // nil except for some tests === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/cgi/host_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/cgi/host_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/cgi/host_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,461 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests for package cgi + +package cgi + +import ( + "bufio" + "fmt" + "io" + "net" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "testing" + "time" +) + +func newRequest(httpreq string) *http.Request { + buf := bufio.NewReader(strings.NewReader(httpreq)) + req, err := http.ReadRequest(buf) + if err != nil { + panic("cgi: bogus http request in test: " + httpreq) + } + req.RemoteAddr = "1.2.3.4" + return req +} + +func runCgiTest(t *testing.T, h *Handler, httpreq string, expectedMap map[string]string) *httptest.ResponseRecorder { + rw := httptest.NewRecorder() + req := newRequest(httpreq) + h.ServeHTTP(rw, req) + + // Make a map to hold the test map that the CGI returns. + m := make(map[string]string) + m["_body"] = rw.Body.String() + linesRead := 0 +readlines: + for { + line, err := rw.Body.ReadString('\n') + switch { + case err == io.EOF: + break readlines + case err != nil: + t.Fatalf("unexpected error reading from CGI: %v", err) + } + linesRead++ + trimmedLine := strings.TrimRight(line, "\r\n") + split := strings.SplitN(trimmedLine, "=", 2) + if len(split) != 2 { + t.Fatalf("Unexpected %d parts from invalid line number %v: %q; existing map=%v", + len(split), linesRead, line, m) + } + m[split[0]] = split[1] + } + + for key, expected := range expectedMap { + got := m[key] + if key == "cwd" { + // For Windows. golang.org/issue/4645. + fi1, _ := os.Stat(got) + fi2, _ := os.Stat(expected) + if os.SameFile(fi1, fi2) { + got = expected + } + } + if got != expected { + t.Errorf("for key %q got %q; expected %q", key, got, expected) + } + } + return rw +} + +var cgiTested, cgiWorks bool + +func check(t *testing.T) { + if !cgiTested { + cgiTested = true + cgiWorks = exec.Command("./testdata/test.cgi").Run() == nil + } + if !cgiWorks { + // No Perl on Windows, needed by test.cgi + // TODO: make the child process be Go, not Perl. + t.Skip("Skipping test: test.cgi failed.") + } +} + +func TestCGIBasicGet(t *testing.T) { + check(t) + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap := map[string]string{ + "test": "Hello CGI", + "param-a": "b", + "param-foo": "bar", + "env-GATEWAY_INTERFACE": "CGI/1.1", + "env-HTTP_HOST": "example.com", + "env-PATH_INFO": "", + "env-QUERY_STRING": "foo=bar&a=b", + "env-REMOTE_ADDR": "1.2.3.4", + "env-REMOTE_HOST": "1.2.3.4", + "env-REQUEST_METHOD": "GET", + "env-REQUEST_URI": "/test.cgi?foo=bar&a=b", + "env-SCRIPT_FILENAME": "testdata/test.cgi", + "env-SCRIPT_NAME": "/test.cgi", + "env-SERVER_NAME": "example.com", + "env-SERVER_PORT": "80", + "env-SERVER_SOFTWARE": "go", + } + replay := runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) + + if expected, got := "text/html", replay.Header().Get("Content-Type"); got != expected { + t.Errorf("got a Content-Type of %q; expected %q", got, expected) + } + if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected { + t.Errorf("got a X-Test-Header of %q; expected %q", got, expected) + } +} + +func TestCGIBasicGetAbsPath(t *testing.T) { + check(t) + pwd, err := os.Getwd() + if err != nil { + t.Fatalf("getwd error: %v", err) + } + h := &Handler{ + Path: pwd + "/testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap := map[string]string{ + "env-REQUEST_URI": "/test.cgi?foo=bar&a=b", + "env-SCRIPT_FILENAME": pwd + "/testdata/test.cgi", + "env-SCRIPT_NAME": "/test.cgi", + } + runCgiTest(t, h, "GET /test.cgi?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +func TestPathInfo(t *testing.T) { + check(t) + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap := map[string]string{ + "param-a": "b", + "env-PATH_INFO": "/extrapath", + "env-QUERY_STRING": "a=b", + "env-REQUEST_URI": "/test.cgi/extrapath?a=b", + "env-SCRIPT_FILENAME": "testdata/test.cgi", + "env-SCRIPT_NAME": "/test.cgi", + } + runCgiTest(t, h, "GET /test.cgi/extrapath?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +func TestPathInfoDirRoot(t *testing.T) { + check(t) + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/myscript/", + } + expectedMap := map[string]string{ + "env-PATH_INFO": "bar", + "env-QUERY_STRING": "a=b", + "env-REQUEST_URI": "/myscript/bar?a=b", + "env-SCRIPT_FILENAME": "testdata/test.cgi", + "env-SCRIPT_NAME": "/myscript/", + } + runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +func TestDupHeaders(t *testing.T) { + check(t) + h := &Handler{ + Path: "testdata/test.cgi", + } + expectedMap := map[string]string{ + "env-REQUEST_URI": "/myscript/bar?a=b", + "env-SCRIPT_FILENAME": "testdata/test.cgi", + "env-HTTP_COOKIE": "nom=NOM; yum=YUM", + "env-HTTP_X_FOO": "val1, val2", + } + runCgiTest(t, h, "GET /myscript/bar?a=b HTTP/1.0\n"+ + "Cookie: nom=NOM\n"+ + "Cookie: yum=YUM\n"+ + "X-Foo: val1\n"+ + "X-Foo: val2\n"+ + "Host: example.com\n\n", + expectedMap) +} + +func TestPathInfoNoRoot(t *testing.T) { + check(t) + h := &Handler{ + Path: "testdata/test.cgi", + Root: "", + } + expectedMap := map[string]string{ + "env-PATH_INFO": "/bar", + "env-QUERY_STRING": "a=b", + "env-REQUEST_URI": "/bar?a=b", + "env-SCRIPT_FILENAME": "testdata/test.cgi", + "env-SCRIPT_NAME": "/", + } + runCgiTest(t, h, "GET /bar?a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +func TestCGIBasicPost(t *testing.T) { + check(t) + postReq := `POST /test.cgi?a=b HTTP/1.0 +Host: example.com +Content-Type: application/x-www-form-urlencoded +Content-Length: 15 + +postfoo=postbar` + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap := map[string]string{ + "test": "Hello CGI", + "param-postfoo": "postbar", + "env-REQUEST_METHOD": "POST", + "env-CONTENT_LENGTH": "15", + "env-REQUEST_URI": "/test.cgi?a=b", + } + runCgiTest(t, h, postReq, expectedMap) +} + +func chunk(s string) string { + return fmt.Sprintf("%x\r\n%s\r\n", len(s), s) +} + +// The CGI spec doesn't allow chunked requests. +func TestCGIPostChunked(t *testing.T) { + check(t) + postReq := `POST /test.cgi?a=b HTTP/1.1 +Host: example.com +Content-Type: application/x-www-form-urlencoded +Transfer-Encoding: chunked + +` + chunk("postfoo") + chunk("=") + chunk("postbar") + chunk("") + + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap := map[string]string{} + resp := runCgiTest(t, h, postReq, expectedMap) + if got, expected := resp.Code, http.StatusBadRequest; got != expected { + t.Fatalf("Expected %v response code from chunked request body; got %d", + expected, got) + } +} + +func TestRedirect(t *testing.T) { + check(t) + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + rec := runCgiTest(t, h, "GET /test.cgi?loc=http://foo.com/ HTTP/1.0\nHost: example.com\n\n", nil) + if e, g := 302, rec.Code; e != g { + t.Errorf("expected status code %d; got %d", e, g) + } + if e, g := "http://foo.com/", rec.Header().Get("Location"); e != g { + t.Errorf("expected Location header of %q; got %q", e, g) + } +} + +func TestInternalRedirect(t *testing.T) { + check(t) + baseHandler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + fmt.Fprintf(rw, "basepath=%s\n", req.URL.Path) + fmt.Fprintf(rw, "remoteaddr=%s\n", req.RemoteAddr) + }) + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + PathLocationHandler: baseHandler, + } + expectedMap := map[string]string{ + "basepath": "/foo", + "remoteaddr": "1.2.3.4", + } + runCgiTest(t, h, "GET /test.cgi?loc=/foo HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +// TestCopyError tests that we kill the process if there's an error copying +// its output. (for example, from the client having gone away) +func TestCopyError(t *testing.T) { + check(t) + if runtime.GOOS == "windows" { + t.Skipf("skipping test on %q", runtime.GOOS) + } + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + ts := httptest.NewServer(h) + defer ts.Close() + + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + req, _ := http.NewRequest("GET", "http://example.com/test.cgi?bigresponse=1", nil) + err = req.Write(conn) + if err != nil { + t.Fatalf("Write: %v", err) + } + + res, err := http.ReadResponse(bufio.NewReader(conn), req) + if err != nil { + t.Fatalf("ReadResponse: %v", err) + } + + pidstr := res.Header.Get("X-CGI-Pid") + if pidstr == "" { + t.Fatalf("expected an X-CGI-Pid header in response") + } + pid, err := strconv.Atoi(pidstr) + if err != nil { + t.Fatalf("invalid X-CGI-Pid value") + } + + var buf [5000]byte + n, err := io.ReadFull(res.Body, buf[:]) + if err != nil { + t.Fatalf("ReadFull: %d bytes, %v", n, err) + } + + childRunning := func() bool { + return isProcessRunning(t, pid) + } + + if !childRunning() { + t.Fatalf("pre-conn.Close, expected child to be running") + } + conn.Close() + + tries := 0 + for tries < 25 && childRunning() { + time.Sleep(50 * time.Millisecond * time.Duration(tries)) + tries++ + } + if childRunning() { + t.Fatalf("post-conn.Close, expected child to be gone") + } +} + +func TestDirUnix(t *testing.T) { + check(t) + if runtime.GOOS == "windows" { + t.Skipf("skipping test on %q", runtime.GOOS) + } + cwd, _ := os.Getwd() + h := &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + Dir: cwd, + } + expectedMap := map[string]string{ + "cwd": cwd, + } + runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) + + cwd, _ = os.Getwd() + cwd = filepath.Join(cwd, "testdata") + h = &Handler{ + Path: "testdata/test.cgi", + Root: "/test.cgi", + } + expectedMap = map[string]string{ + "cwd": cwd, + } + runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +func TestDirWindows(t *testing.T) { + if runtime.GOOS != "windows" { + t.Skip("Skipping windows specific test.") + } + + cgifile, _ := filepath.Abs("testdata/test.cgi") + + var perl string + var err error + perl, err = exec.LookPath("perl") + if err != nil { + t.Skip("Skipping test: perl not found.") + } + perl, _ = filepath.Abs(perl) + + cwd, _ := os.Getwd() + h := &Handler{ + Path: perl, + Root: "/test.cgi", + Dir: cwd, + Args: []string{cgifile}, + Env: []string{"SCRIPT_FILENAME=" + cgifile}, + } + expectedMap := map[string]string{ + "cwd": cwd, + } + runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) + + // If not specify Dir on windows, working directory should be + // base directory of perl. + cwd, _ = filepath.Split(perl) + if cwd != "" && cwd[len(cwd)-1] == filepath.Separator { + cwd = cwd[:len(cwd)-1] + } + h = &Handler{ + Path: perl, + Root: "/test.cgi", + Args: []string{cgifile}, + Env: []string{"SCRIPT_FILENAME=" + cgifile}, + } + expectedMap = map[string]string{ + "cwd": cwd, + } + runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) +} + +func TestEnvOverride(t *testing.T) { + cgifile, _ := filepath.Abs("testdata/test.cgi") + + var perl string + var err error + perl, err = exec.LookPath("perl") + if err != nil { + t.Skipf("Skipping test: perl not found.") + } + perl, _ = filepath.Abs(perl) + + cwd, _ := os.Getwd() + h := &Handler{ + Path: perl, + Root: "/test.cgi", + Dir: cwd, + Args: []string{cgifile}, + Env: []string{ + "SCRIPT_FILENAME=" + cgifile, + "REQUEST_URI=/foo/bar"}, + } + expectedMap := map[string]string{ + "cwd": cwd, + "env-SCRIPT_FILENAME": cgifile, + "env-REQUEST_URI": "/foo/bar", + } + runCgiTest(t, h, "GET /test.cgi HTTP/1.0\nHost: example.com\n\n", expectedMap) +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/cgi/matryoshka_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/cgi/matryoshka_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/cgi/matryoshka_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,228 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests a Go CGI program running under a Go CGI host process. +// Further, the two programs are the same binary, just checking +// their environment to figure out what mode to run in. + +package cgi + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "runtime" + "testing" + "time" +) + +// This test is a CGI host (testing host.go) that runs its own binary +// as a child process testing the other half of CGI (child.go). +func TestHostingOurselves(t *testing.T) { + if runtime.GOOS == "nacl" { + t.Skip("skipping on nacl") + } + + h := &Handler{ + Path: os.Args[0], + Root: "/test.go", + Args: []string{"-test.run=TestBeChildCGIProcess"}, + } + expectedMap := map[string]string{ + "test": "Hello CGI-in-CGI", + "param-a": "b", + "param-foo": "bar", + "env-GATEWAY_INTERFACE": "CGI/1.1", + "env-HTTP_HOST": "example.com", + "env-PATH_INFO": "", + "env-QUERY_STRING": "foo=bar&a=b", + "env-REMOTE_ADDR": "1.2.3.4", + "env-REMOTE_HOST": "1.2.3.4", + "env-REQUEST_METHOD": "GET", + "env-REQUEST_URI": "/test.go?foo=bar&a=b", + "env-SCRIPT_FILENAME": os.Args[0], + "env-SCRIPT_NAME": "/test.go", + "env-SERVER_NAME": "example.com", + "env-SERVER_PORT": "80", + "env-SERVER_SOFTWARE": "go", + } + replay := runCgiTest(t, h, "GET /test.go?foo=bar&a=b HTTP/1.0\nHost: example.com\n\n", expectedMap) + + if expected, got := "text/html; charset=utf-8", replay.Header().Get("Content-Type"); got != expected { + t.Errorf("got a Content-Type of %q; expected %q", got, expected) + } + if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected { + t.Errorf("got a X-Test-Header of %q; expected %q", got, expected) + } +} + +type customWriterRecorder struct { + w io.Writer + *httptest.ResponseRecorder +} + +func (r *customWriterRecorder) Write(p []byte) (n int, err error) { + return r.w.Write(p) +} + +type limitWriter struct { + w io.Writer + n int +} + +func (w *limitWriter) Write(p []byte) (n int, err error) { + if len(p) > w.n { + p = p[:w.n] + } + if len(p) > 0 { + n, err = w.w.Write(p) + w.n -= n + } + if w.n == 0 { + err = errors.New("past write limit") + } + return +} + +// If there's an error copying the child's output to the parent, test +// that we kill the child. +func TestKillChildAfterCopyError(t *testing.T) { + if runtime.GOOS == "nacl" { + t.Skip("skipping on nacl") + } + + defer func() { testHookStartProcess = nil }() + proc := make(chan *os.Process, 1) + testHookStartProcess = func(p *os.Process) { + proc <- p + } + + h := &Handler{ + Path: os.Args[0], + Root: "/test.go", + Args: []string{"-test.run=TestBeChildCGIProcess"}, + } + req, _ := http.NewRequest("GET", "http://example.com/test.cgi?write-forever=1", nil) + rec := httptest.NewRecorder() + var out bytes.Buffer + const writeLen = 50 << 10 + rw := &customWriterRecorder{&limitWriter{&out, writeLen}, rec} + + donec := make(chan bool, 1) + go func() { + h.ServeHTTP(rw, req) + donec <- true + }() + + select { + case <-donec: + if out.Len() != writeLen || out.Bytes()[0] != 'a' { + t.Errorf("unexpected output: %q", out.Bytes()) + } + case <-time.After(5 * time.Second): + t.Errorf("timeout. ServeHTTP hung and didn't kill the child process?") + select { + case p := <-proc: + p.Kill() + t.Logf("killed process") + default: + t.Logf("didn't kill process") + } + } +} + +// Test that a child handler writing only headers works. +// golang.org/issue/7196 +func TestChildOnlyHeaders(t *testing.T) { + if runtime.GOOS == "nacl" { + t.Skip("skipping on nacl") + } + + h := &Handler{ + Path: os.Args[0], + Root: "/test.go", + Args: []string{"-test.run=TestBeChildCGIProcess"}, + } + expectedMap := map[string]string{ + "_body": "", + } + replay := runCgiTest(t, h, "GET /test.go?no-body=1 HTTP/1.0\nHost: example.com\n\n", expectedMap) + if expected, got := "X-Test-Value", replay.Header().Get("X-Test-Header"); got != expected { + t.Errorf("got a X-Test-Header of %q; expected %q", got, expected) + } +} + +// golang.org/issue/7198 +func Test500WithNoHeaders(t *testing.T) { want500Test(t, "/immediate-disconnect") } +func Test500WithNoContentType(t *testing.T) { want500Test(t, "/no-content-type") } +func Test500WithEmptyHeaders(t *testing.T) { want500Test(t, "/empty-headers") } + +func want500Test(t *testing.T, path string) { + h := &Handler{ + Path: os.Args[0], + Root: "/test.go", + Args: []string{"-test.run=TestBeChildCGIProcess"}, + } + expectedMap := map[string]string{ + "_body": "", + } + replay := runCgiTest(t, h, "GET "+path+" HTTP/1.0\nHost: example.com\n\n", expectedMap) + if replay.Code != 500 { + t.Errorf("Got code %d; want 500", replay.Code) + } +} + +type neverEnding byte + +func (b neverEnding) Read(p []byte) (n int, err error) { + for i := range p { + p[i] = byte(b) + } + return len(p), nil +} + +// Note: not actually a test. +func TestBeChildCGIProcess(t *testing.T) { + if os.Getenv("REQUEST_METHOD") == "" { + // Not in a CGI environment; skipping test. + return + } + switch os.Getenv("REQUEST_URI") { + case "/immediate-disconnect": + os.Exit(0) + case "/no-content-type": + fmt.Printf("Content-Length: 6\n\nHello\n") + os.Exit(0) + case "/empty-headers": + fmt.Printf("\nHello") + os.Exit(0) + } + Serve(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("X-Test-Header", "X-Test-Value") + req.ParseForm() + if req.FormValue("no-body") == "1" { + return + } + if req.FormValue("write-forever") == "1" { + io.Copy(rw, neverEnding('a')) + for { + time.Sleep(5 * time.Second) // hang forever, until killed + } + } + fmt.Fprintf(rw, "test=Hello CGI-in-CGI\n") + for k, vv := range req.Form { + for _, v := range vv { + fmt.Fprintf(rw, "param-%s=%s\n", k, v) + } + } + for _, kv := range os.Environ() { + fmt.Fprintf(rw, "env-%s\n", kv) + } + })) + os.Exit(0) +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/cgi/plan9_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/cgi/plan9_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/cgi/plan9_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,18 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build plan9 + +package cgi + +import ( + "os" + "strconv" + "testing" +) + +func isProcessRunning(t *testing.T, pid int) bool { + _, err := os.Stat("/proc/" + strconv.Itoa(pid)) + return err == nil +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/cgi/posix_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/cgi/posix_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/cgi/posix_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,21 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +package cgi + +import ( + "os" + "syscall" + "testing" +) + +func isProcessRunning(t *testing.T, pid int) bool { + p, err := os.FindProcess(pid) + if err != nil { + return false + } + return p.Signal(syscall.Signal(0)) == nil +} === added directory 'src/github.com/Azure/azure-sdk-for-go/core/http/cgi/testdata' === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/cgi/testdata/test.cgi' --- src/github.com/Azure/azure-sdk-for-go/core/http/cgi/testdata/test.cgi 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/cgi/testdata/test.cgi 2016-03-22 15:18:22 +0000 @@ -0,0 +1,91 @@ +#!/usr/bin/perl +# Copyright 2011 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. +# +# Test script run as a child process under cgi_test.go + +use strict; +use Cwd; + +binmode STDOUT; + +my $q = MiniCGI->new; +my $params = $q->Vars; + +if ($params->{"loc"}) { + print "Location: $params->{loc}\r\n\r\n"; + exit(0); +} + +print "Content-Type: text/html\r\n"; +print "X-CGI-Pid: $$\r\n"; +print "X-Test-Header: X-Test-Value\r\n"; +print "\r\n"; + +if ($params->{"bigresponse"}) { + # 17 MB, for OS X: golang.org/issue/4958 + for (1..(17 * 1024)) { + print "A" x 1024, "\r\n"; + } + exit 0; +} + +print "test=Hello CGI\r\n"; + +foreach my $k (sort keys %$params) { + print "param-$k=$params->{$k}\r\n"; +} + +foreach my $k (sort keys %ENV) { + my $clean_env = $ENV{$k}; + $clean_env =~ s/[\n\r]//g; + print "env-$k=$clean_env\r\n"; +} + +# NOTE: msys perl returns /c/go/src/... not C:\go\.... +my $dir = getcwd(); +if ($^O eq 'MSWin32' || $^O eq 'msys') { + if ($dir =~ /^.:/) { + $dir =~ s!/!\\!g; + } else { + my $cmd = $ENV{'COMSPEC'} || 'c:\\windows\\system32\\cmd.exe'; + $cmd =~ s!\\!/!g; + $dir = `$cmd /c cd`; + chomp $dir; + } +} +print "cwd=$dir\r\n"; + +# A minimal version of CGI.pm, for people without the perl-modules +# package installed. (CGI.pm used to be part of the Perl core, but +# some distros now bundle perl-base and perl-modules separately...) +package MiniCGI; + +sub new { + my $class = shift; + return bless {}, $class; +} + +sub Vars { + my $self = shift; + my $pairs; + if ($ENV{CONTENT_LENGTH}) { + $pairs = do { local $/; }; + } else { + $pairs = $ENV{QUERY_STRING}; + } + my $vars = {}; + foreach my $kv (split(/&/, $pairs)) { + my ($k, $v) = split(/=/, $kv, 2); + $vars->{_urldecode($k)} = _urldecode($v); + } + return $vars; +} + +sub _urldecode { + my $v = shift; + $v =~ tr/+/ /; + $v =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg; + return $v; +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/chunked.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/chunked.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/chunked.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,203 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The wire protocol for HTTP's "chunked" Transfer-Encoding. + +// This code is duplicated in net/http and net/http/httputil. +// Please make any changes in both files. + +package http + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" +) + +const maxLineLength = 4096 // assumed <= bufio.defaultBufSize + +var ErrLineTooLong = errors.New("header line too long") + +// newChunkedReader returns a new chunkedReader that translates the data read from r +// out of HTTP "chunked" format before returning it. +// The chunkedReader returns io.EOF when the final 0-length chunk is read. +// +// newChunkedReader is not needed by normal applications. The http package +// automatically decodes chunking when reading response bodies. +func newChunkedReader(r io.Reader) io.Reader { + br, ok := r.(*bufio.Reader) + if !ok { + br = bufio.NewReader(r) + } + return &chunkedReader{r: br} +} + +type chunkedReader struct { + r *bufio.Reader + n uint64 // unread bytes in chunk + err error + buf [2]byte +} + +func (cr *chunkedReader) beginChunk() { + // chunk-size CRLF + var line []byte + line, cr.err = readLine(cr.r) + if cr.err != nil { + return + } + cr.n, cr.err = parseHexUint(line) + if cr.err != nil { + return + } + if cr.n == 0 { + cr.err = io.EOF + } +} + +func (cr *chunkedReader) chunkHeaderAvailable() bool { + n := cr.r.Buffered() + if n > 0 { + peek, _ := cr.r.Peek(n) + return bytes.IndexByte(peek, '\n') >= 0 + } + return false +} + +func (cr *chunkedReader) Read(b []uint8) (n int, err error) { + for cr.err == nil { + if cr.n == 0 { + if n > 0 && !cr.chunkHeaderAvailable() { + // We've read enough. Don't potentially block + // reading a new chunk header. + break + } + cr.beginChunk() + continue + } + if len(b) == 0 { + break + } + rbuf := b + if uint64(len(rbuf)) > cr.n { + rbuf = rbuf[:cr.n] + } + var n0 int + n0, cr.err = cr.r.Read(rbuf) + n += n0 + b = b[n0:] + cr.n -= uint64(n0) + // If we're at the end of a chunk, read the next two + // bytes to verify they are "\r\n". + if cr.n == 0 && cr.err == nil { + if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil { + if cr.buf[0] != '\r' || cr.buf[1] != '\n' { + cr.err = errors.New("malformed chunked encoding") + } + } + } + } + return n, cr.err +} + +// Read a line of bytes (up to \n) from b. +// Give up if the line exceeds maxLineLength. +// The returned bytes are a pointer into storage in +// the bufio, so they are only valid until the next bufio read. +func readLine(b *bufio.Reader) (p []byte, err error) { + if p, err = b.ReadSlice('\n'); err != nil { + // We always know when EOF is coming. + // If the caller asked for a line, there should be a line. + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if err == bufio.ErrBufferFull { + err = ErrLineTooLong + } + return nil, err + } + if len(p) >= maxLineLength { + return nil, ErrLineTooLong + } + return trimTrailingWhitespace(p), nil +} + +func trimTrailingWhitespace(b []byte) []byte { + for len(b) > 0 && isASCIISpace(b[len(b)-1]) { + b = b[:len(b)-1] + } + return b +} + +func isASCIISpace(b byte) bool { + return b == ' ' || b == '\t' || b == '\n' || b == '\r' +} + +// newChunkedWriter returns a new chunkedWriter that translates writes into HTTP +// "chunked" format before writing them to w. Closing the returned chunkedWriter +// sends the final 0-length chunk that marks the end of the stream. +// +// newChunkedWriter is not needed by normal applications. The http +// package adds chunking automatically if handlers don't set a +// Content-Length header. Using newChunkedWriter inside a handler +// would result in double chunking or chunking with a Content-Length +// length, both of which are wrong. +func newChunkedWriter(w io.Writer) io.WriteCloser { + return &chunkedWriter{w} +} + +// Writing to chunkedWriter translates to writing in HTTP chunked Transfer +// Encoding wire format to the underlying Wire chunkedWriter. +type chunkedWriter struct { + Wire io.Writer +} + +// Write the contents of data as one chunk to Wire. +// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has +// a bug since it does not check for success of io.WriteString +func (cw *chunkedWriter) Write(data []byte) (n int, err error) { + + // Don't send 0-length data. It looks like EOF for chunked encoding. + if len(data) == 0 { + return 0, nil + } + + if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil { + return 0, err + } + if n, err = cw.Wire.Write(data); err != nil { + return + } + if n != len(data) { + err = io.ErrShortWrite + return + } + _, err = io.WriteString(cw.Wire, "\r\n") + + return +} + +func (cw *chunkedWriter) Close() error { + _, err := io.WriteString(cw.Wire, "0\r\n") + return err +} + +func parseHexUint(v []byte) (n uint64, err error) { + for _, b := range v { + n <<= 4 + switch { + case '0' <= b && b <= '9': + b = b - '0' + case 'a' <= b && b <= 'f': + b = b - 'a' + 10 + case 'A' <= b && b <= 'F': + b = b - 'A' + 10 + default: + return 0, errors.New("invalid byte in chunk length") + } + n |= uint64(b) + } + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/chunked_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/chunked_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/chunked_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,159 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code is duplicated in net/http and net/http/httputil. +// Please make any changes in both files. + +package http + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" +) + +func TestChunk(t *testing.T) { + var b bytes.Buffer + + w := newChunkedWriter(&b) + const chunk1 = "hello, " + const chunk2 = "world! 0123456789abcdef" + w.Write([]byte(chunk1)) + w.Write([]byte(chunk2)) + w.Close() + + if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e { + t.Fatalf("chunk writer wrote %q; want %q", g, e) + } + + r := newChunkedReader(&b) + data, err := ioutil.ReadAll(r) + if err != nil { + t.Logf(`data: "%s"`, data) + t.Fatalf("ReadAll from reader: %v", err) + } + if g, e := string(data), chunk1+chunk2; g != e { + t.Errorf("chunk reader read %q; want %q", g, e) + } +} + +func TestChunkReadMultiple(t *testing.T) { + // Bunch of small chunks, all read together. + { + var b bytes.Buffer + w := newChunkedWriter(&b) + w.Write([]byte("foo")) + w.Write([]byte("bar")) + w.Close() + + r := newChunkedReader(&b) + buf := make([]byte, 10) + n, err := r.Read(buf) + if n != 6 || err != io.EOF { + t.Errorf("Read = %d, %v; want 6, EOF", n, err) + } + buf = buf[:n] + if string(buf) != "foobar" { + t.Errorf("Read = %q; want %q", buf, "foobar") + } + } + + // One big chunk followed by a little chunk, but the small bufio.Reader size + // should prevent the second chunk header from being read. + { + var b bytes.Buffer + w := newChunkedWriter(&b) + // fillBufChunk is 11 bytes + 3 bytes header + 2 bytes footer = 16 bytes, + // the same as the bufio ReaderSize below (the minimum), so even + // though we're going to try to Read with a buffer larger enough to also + // receive "foo", the second chunk header won't be read yet. + const fillBufChunk = "0123456789a" + const shortChunk = "foo" + w.Write([]byte(fillBufChunk)) + w.Write([]byte(shortChunk)) + w.Close() + + r := newChunkedReader(bufio.NewReaderSize(&b, 16)) + buf := make([]byte, len(fillBufChunk)+len(shortChunk)) + n, err := r.Read(buf) + if n != len(fillBufChunk) || err != nil { + t.Errorf("Read = %d, %v; want %d, nil", n, err, len(fillBufChunk)) + } + buf = buf[:n] + if string(buf) != fillBufChunk { + t.Errorf("Read = %q; want %q", buf, fillBufChunk) + } + + n, err = r.Read(buf) + if n != len(shortChunk) || err != io.EOF { + t.Errorf("Read = %d, %v; want %d, EOF", n, err, len(shortChunk)) + } + } + + // And test that we see an EOF chunk, even though our buffer is already full: + { + r := newChunkedReader(bufio.NewReader(strings.NewReader("3\r\nfoo\r\n0\r\n"))) + buf := make([]byte, 3) + n, err := r.Read(buf) + if n != 3 || err != io.EOF { + t.Errorf("Read = %d, %v; want 3, EOF", n, err) + } + if string(buf) != "foo" { + t.Errorf("buf = %q; want foo", buf) + } + } +} + +func TestChunkReaderAllocs(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + var buf bytes.Buffer + w := newChunkedWriter(&buf) + a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc") + w.Write(a) + w.Write(b) + w.Write(c) + w.Close() + + readBuf := make([]byte, len(a)+len(b)+len(c)+1) + byter := bytes.NewReader(buf.Bytes()) + bufr := bufio.NewReader(byter) + mallocs := testing.AllocsPerRun(100, func() { + byter.Seek(0, 0) + bufr.Reset(byter) + r := newChunkedReader(bufr) + n, err := io.ReadFull(r, readBuf) + if n != len(readBuf)-1 { + t.Fatalf("read %d bytes; want %d", n, len(readBuf)-1) + } + if err != io.ErrUnexpectedEOF { + t.Fatalf("read error = %v; want ErrUnexpectedEOF", err) + } + }) + if mallocs > 1.5 { + t.Errorf("mallocs = %v; want 1", mallocs) + } +} + +func TestParseHexUint(t *testing.T) { + for i := uint64(0); i <= 1234; i++ { + line := []byte(fmt.Sprintf("%x", i)) + got, err := parseHexUint(line) + if err != nil { + t.Fatalf("on %d: %v", i, err) + } + if got != i { + t.Errorf("for input %q = %d; want %d", line, got, i) + } + } + _, err := parseHexUint([]byte("bogus")) + if err == nil { + t.Error("expected error on bogus input") + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/client.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,487 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// HTTP client. See RFC 2616. +// +// This is the high-level Client interface. +// The low-level implementation is in transport.go. + +package http + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net/url" + "strings" + "sync" + "time" +) + +// A Client is an HTTP client. Its zero value (DefaultClient) is a +// usable client that uses DefaultTransport. +// +// The Client's Transport typically has internal state (cached TCP +// connections), so Clients should be reused instead of created as +// needed. Clients are safe for concurrent use by multiple goroutines. +// +// A Client is higher-level than a RoundTripper (such as Transport) +// and additionally handles HTTP details such as cookies and +// redirects. +type Client struct { + // Transport specifies the mechanism by which individual + // HTTP requests are made. + // If nil, DefaultTransport is used. + Transport RoundTripper + + // CheckRedirect specifies the policy for handling redirects. + // If CheckRedirect is not nil, the client calls it before + // following an HTTP redirect. The arguments req and via are + // the upcoming request and the requests made already, oldest + // first. If CheckRedirect returns an error, the Client's Get + // method returns both the previous Response and + // CheckRedirect's error (wrapped in a url.Error) instead of + // issuing the Request req. + // + // If CheckRedirect is nil, the Client uses its default policy, + // which is to stop after 10 consecutive requests. + CheckRedirect func(req *Request, via []*Request) error + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar CookieJar + + // Timeout specifies a time limit for requests made by this + // Client. The timeout includes connection time, any + // redirects, and reading the response body. The timer remains + // running after Get, Head, Post, or Do return and will + // interrupt reading of the Response.Body. + // + // A Timeout of zero means no timeout. + // + // The Client's Transport must support the CancelRequest + // method or Client will return errors when attempting to make + // a request with Get, Head, Post, or Do. Client's default + // Transport (DefaultTransport) supports CancelRequest. + Timeout time.Duration +} + +// DefaultClient is the default Client and is used by Get, Head, and Post. +var DefaultClient = &Client{} + +// RoundTripper is an interface representing the ability to execute a +// single HTTP transaction, obtaining the Response for a given Request. +// +// A RoundTripper must be safe for concurrent use by multiple +// goroutines. +type RoundTripper interface { + // RoundTrip executes a single HTTP transaction, returning + // the Response for the request req. RoundTrip should not + // attempt to interpret the response. In particular, + // RoundTrip must return err == nil if it obtained a response, + // regardless of the response's HTTP status code. A non-nil + // err should be reserved for failure to obtain a response. + // Similarly, RoundTrip should not attempt to handle + // higher-level protocol details such as redirects, + // authentication, or cookies. + // + // RoundTrip should not modify the request, except for + // consuming and closing the Body, including on errors. The + // request's URL and Header fields are guaranteed to be + // initialized. + RoundTrip(*Request) (*Response, error) +} + +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// Used in Send to implement io.ReadCloser by bundling together the +// bufio.Reader through which we read the response, and the underlying +// network connection. +type readClose struct { + io.Reader + io.Closer +} + +func (c *Client) send(req *Request) (*Response, error) { + if c.Jar != nil { + for _, cookie := range c.Jar.Cookies(req.URL) { + req.AddCookie(cookie) + } + } + resp, err := send(req, c.transport()) + if err != nil { + return nil, err + } + if c.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + c.Jar.SetCookies(req.URL, rc) + } + } + return resp, err +} + +// Do sends an HTTP request and returns an HTTP response, following +// policy (e.g. redirects, cookies, auth) as configured on the client. +// +// An error is returned if caused by client policy (such as +// CheckRedirect), or if there was an HTTP protocol error. +// A non-2xx response doesn't cause an error. +// +// When err is nil, resp always contains a non-nil resp.Body. +// +// Callers should close resp.Body when done reading from it. If +// resp.Body is not closed, the Client's underlying RoundTripper +// (typically Transport) may not be able to re-use a persistent TCP +// connection to the server for a subsequent "keep-alive" request. +// +// The request Body, if non-nil, will be closed by the underlying +// Transport, even on errors. +// +// Generally Get, Post, or PostForm will be used instead of Do. +func (c *Client) Do(req *Request) (resp *Response, err error) { + if req.Method == "GET" || req.Method == "HEAD" { + return c.doFollowingRedirects(req, shouldRedirectGet) + } + if req.Method == "POST" || req.Method == "PUT" { + return c.doFollowingRedirects(req, shouldRedirectPost) + } + return c.send(req) +} + +func (c *Client) transport() RoundTripper { + if c.Transport != nil { + return c.Transport + } + return DefaultTransport +} + +// send issues an HTTP request. +// Caller should close resp.Body when done reading from it. +func send(req *Request, t RoundTripper) (resp *Response, err error) { + if t == nil { + req.closeBody() + return nil, errors.New("http: no Client.Transport or DefaultTransport") + } + + if req.URL == nil { + req.closeBody() + return nil, errors.New("http: nil Request.URL") + } + + if req.RequestURI != "" { + req.closeBody() + return nil, errors.New("http: Request.RequestURI can't be set in client requests.") + } + + // Most the callers of send (Get, Post, et al) don't need + // Headers, leaving it uninitialized. We guarantee to the + // Transport that this has been initialized, though. + if req.Header == nil { + req.Header = make(Header) + } + + if u := req.URL.User; u != nil { + username := u.Username() + password, _ := u.Password() + req.Header.Set("Authorization", "Basic "+basicAuth(username, password)) + } + resp, err = t.RoundTrip(req) + if err != nil { + if resp != nil { + log.Printf("RoundTripper returned a response & error; ignoring response") + } + return nil, err + } + return resp, nil +} + +// See 2 (end of page 4) http://www.ietf.org/rfc/rfc2617.txt +// "To receive authorization, the client sends the userid and password, +// separated by a single colon (":") character, within a base64 +// encoded string in the credentials." +// It is not meant to be urlencoded. +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +// True if the specified HTTP status code is one for which the Get utility should +// automatically redirect. +func shouldRedirectGet(statusCode int) bool { + switch statusCode { + case StatusMovedPermanently, StatusFound, StatusSeeOther, StatusTemporaryRedirect: + return true + } + return false +} + +// True if the specified HTTP status code is one for which the Post utility should +// automatically redirect. +func shouldRedirectPost(statusCode int) bool { + switch statusCode { + case StatusFound, StatusSeeOther: + return true + } + return false +} + +// Get issues a GET to the specified URL. If the response is one of the following +// redirect codes, Get follows the redirect, up to a maximum of 10 redirects: +// +// 301 (Moved Permanently) +// 302 (Found) +// 303 (See Other) +// 307 (Temporary Redirect) +// +// An error is returned if there were too many redirects or if there +// was an HTTP protocol error. A non-2xx response doesn't cause an +// error. +// +// When err is nil, resp always contains a non-nil resp.Body. +// Caller should close resp.Body when done reading from it. +// +// Get is a wrapper around DefaultClient.Get. +func Get(url string) (resp *Response, err error) { + return DefaultClient.Get(url) +} + +// Get issues a GET to the specified URL. If the response is one of the +// following redirect codes, Get follows the redirect after calling the +// Client's CheckRedirect function. +// +// 301 (Moved Permanently) +// 302 (Found) +// 303 (See Other) +// 307 (Temporary Redirect) +// +// An error is returned if the Client's CheckRedirect function fails +// or if there was an HTTP protocol error. A non-2xx response doesn't +// cause an error. +// +// When err is nil, resp always contains a non-nil resp.Body. +// Caller should close resp.Body when done reading from it. +func (c *Client) Get(url string) (resp *Response, err error) { + req, err := NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.doFollowingRedirects(req, shouldRedirectGet) +} + +func (c *Client) doFollowingRedirects(ireq *Request, shouldRedirect func(int) bool) (resp *Response, err error) { + var base *url.URL + redirectChecker := c.CheckRedirect + if redirectChecker == nil { + redirectChecker = defaultCheckRedirect + } + var via []*Request + + if ireq.URL == nil { + ireq.closeBody() + return nil, errors.New("http: nil Request.URL") + } + + var reqmu sync.Mutex // guards req + req := ireq + + var timer *time.Timer + if c.Timeout > 0 { + type canceler interface { + CancelRequest(*Request) + } + tr, ok := c.transport().(canceler) + if !ok { + return nil, fmt.Errorf("net/http: Client Transport of type %T doesn't support CancelRequest; Timeout not supported", c.transport()) + } + timer = time.AfterFunc(c.Timeout, func() { + reqmu.Lock() + defer reqmu.Unlock() + tr.CancelRequest(req) + }) + } + + urlStr := "" // next relative or absolute URL to fetch (after first request) + redirectFailed := false + for redirect := 0; ; redirect++ { + if redirect != 0 { + nreq := new(Request) + nreq.Method = ireq.Method + if ireq.Method == "POST" || ireq.Method == "PUT" { + nreq.Method = "GET" + } + nreq.Header = make(Header) + nreq.URL, err = base.Parse(urlStr) + if err != nil { + break + } + if len(via) > 0 { + // Add the Referer header. + lastReq := via[len(via)-1] + if lastReq.URL.Scheme != "https" { + nreq.Header.Set("Referer", lastReq.URL.String()) + } + + err = redirectChecker(nreq, via) + if err != nil { + redirectFailed = true + break + } + } + reqmu.Lock() + req = nreq + reqmu.Unlock() + } + + urlStr = req.URL.String() + if resp, err = c.send(req); err != nil { + break + } + + if shouldRedirect(resp.StatusCode) { + // Read the body if small so underlying TCP connection will be re-used. + // No need to check for errors: if it fails, Transport won't reuse it anyway. + const maxBodySlurpSize = 2 << 10 + if resp.ContentLength == -1 || resp.ContentLength <= maxBodySlurpSize { + io.CopyN(ioutil.Discard, resp.Body, maxBodySlurpSize) + } + resp.Body.Close() + if urlStr = resp.Header.Get("Location"); urlStr == "" { + err = errors.New(fmt.Sprintf("%d response missing Location header", resp.StatusCode)) + break + } + base = req.URL + via = append(via, req) + continue + } + if timer != nil { + resp.Body = &cancelTimerBody{timer, resp.Body} + } + return resp, nil + } + + method := ireq.Method + urlErr := &url.Error{ + Op: method[0:1] + strings.ToLower(method[1:]), + URL: urlStr, + Err: err, + } + + if redirectFailed { + // Special case for Go 1 compatibility: return both the response + // and an error if the CheckRedirect function failed. + // See http://golang.org/issue/3795 + return resp, urlErr + } + + if resp != nil { + resp.Body.Close() + } + return nil, urlErr +} + +func defaultCheckRedirect(req *Request, via []*Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + return nil +} + +// Post issues a POST to the specified URL. +// +// Caller should close resp.Body when done reading from it. +// +// Post is a wrapper around DefaultClient.Post +func Post(url string, bodyType string, body io.Reader) (resp *Response, err error) { + return DefaultClient.Post(url, bodyType, body) +} + +// Post issues a POST to the specified URL. +// +// Caller should close resp.Body when done reading from it. +// +// If the provided body is also an io.Closer, it is closed after the +// request. +func (c *Client) Post(url string, bodyType string, body io.Reader) (resp *Response, err error) { + req, err := NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return c.doFollowingRedirects(req, shouldRedirectPost) +} + +// PostForm issues a POST to the specified URL, with data's keys and +// values URL-encoded as the request body. +// +// When err is nil, resp always contains a non-nil resp.Body. +// Caller should close resp.Body when done reading from it. +// +// PostForm is a wrapper around DefaultClient.PostForm +func PostForm(url string, data url.Values) (resp *Response, err error) { + return DefaultClient.PostForm(url, data) +} + +// PostForm issues a POST to the specified URL, +// with data's keys and values urlencoded as the request body. +// +// When err is nil, resp always contains a non-nil resp.Body. +// Caller should close resp.Body when done reading from it. +func (c *Client) PostForm(url string, data url.Values) (resp *Response, err error) { + return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// Head issues a HEAD to the specified URL. If the response is one of the +// following redirect codes, Head follows the redirect after calling the +// Client's CheckRedirect function. +// +// 301 (Moved Permanently) +// 302 (Found) +// 303 (See Other) +// 307 (Temporary Redirect) +// +// Head is a wrapper around DefaultClient.Head +func Head(url string) (resp *Response, err error) { + return DefaultClient.Head(url) +} + +// Head issues a HEAD to the specified URL. If the response is one of the +// following redirect codes, Head follows the redirect after calling the +// Client's CheckRedirect function. +// +// 301 (Moved Permanently) +// 302 (Found) +// 303 (See Other) +// 307 (Temporary Redirect) +func (c *Client) Head(url string) (resp *Response, err error) { + req, err := NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return c.doFollowingRedirects(req, shouldRedirectGet) +} + +type cancelTimerBody struct { + t *time.Timer + rc io.ReadCloser +} + +func (b *cancelTimerBody) Read(p []byte) (n int, err error) { + n, err = b.rc.Read(p) + if err == io.EOF { + b.t.Stop() + } + return +} + +func (b *cancelTimerBody) Close() error { + err := b.rc.Close() + b.t.Stop() + return err +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/client_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/client_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/client_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1038 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests for client.go + +package http_test + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + . "net/http" + "net/http/httptest" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +var robotsTxtHandler = HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Last-Modified", "sometime") + fmt.Fprintf(w, "User-agent: go\nDisallow: /something/") +}) + +// pedanticReadAll works like ioutil.ReadAll but additionally +// verifies that r obeys the documented io.Reader contract. +func pedanticReadAll(r io.Reader) (b []byte, err error) { + var bufa [64]byte + buf := bufa[:] + for { + n, err := r.Read(buf) + if n == 0 && err == nil { + return nil, fmt.Errorf("Read: n=0 with err=nil") + } + b = append(b, buf[:n]...) + if err == io.EOF { + n, err := r.Read(buf) + if n != 0 || err != io.EOF { + return nil, fmt.Errorf("Read: n=%d err=%#v after EOF", n, err) + } + return b, nil + } + if err != nil { + return b, err + } + } +} + +type chanWriter chan string + +func (w chanWriter) Write(p []byte) (n int, err error) { + w <- string(p) + return len(p), nil +} + +func TestClient(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(robotsTxtHandler) + defer ts.Close() + + r, err := Get(ts.URL) + var b []byte + if err == nil { + b, err = pedanticReadAll(r.Body) + r.Body.Close() + } + if err != nil { + t.Error(err) + } else if s := string(b); !strings.HasPrefix(s, "User-agent:") { + t.Errorf("Incorrect page body (did not begin with User-agent): %q", s) + } +} + +func TestClientHead(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(robotsTxtHandler) + defer ts.Close() + + r, err := Head(ts.URL) + if err != nil { + t.Fatal(err) + } + if _, ok := r.Header["Last-Modified"]; !ok { + t.Error("Last-Modified header not found.") + } +} + +type recordingTransport struct { + req *Request +} + +func (t *recordingTransport) RoundTrip(req *Request) (resp *Response, err error) { + t.req = req + return nil, errors.New("dummy impl") +} + +func TestGetRequestFormat(t *testing.T) { + defer afterTest(t) + tr := &recordingTransport{} + client := &Client{Transport: tr} + url := "http://dummy.faketld/" + client.Get(url) // Note: doesn't hit network + if tr.req.Method != "GET" { + t.Errorf("expected method %q; got %q", "GET", tr.req.Method) + } + if tr.req.URL.String() != url { + t.Errorf("expected URL %q; got %q", url, tr.req.URL.String()) + } + if tr.req.Header == nil { + t.Errorf("expected non-nil request Header") + } +} + +func TestPostRequestFormat(t *testing.T) { + defer afterTest(t) + tr := &recordingTransport{} + client := &Client{Transport: tr} + + url := "http://dummy.faketld/" + json := `{"key":"value"}` + b := strings.NewReader(json) + client.Post(url, "application/json", b) // Note: doesn't hit network + + if tr.req.Method != "POST" { + t.Errorf("got method %q, want %q", tr.req.Method, "POST") + } + if tr.req.URL.String() != url { + t.Errorf("got URL %q, want %q", tr.req.URL.String(), url) + } + if tr.req.Header == nil { + t.Fatalf("expected non-nil request Header") + } + if tr.req.Close { + t.Error("got Close true, want false") + } + if g, e := tr.req.ContentLength, int64(len(json)); g != e { + t.Errorf("got ContentLength %d, want %d", g, e) + } +} + +func TestPostFormRequestFormat(t *testing.T) { + defer afterTest(t) + tr := &recordingTransport{} + client := &Client{Transport: tr} + + urlStr := "http://dummy.faketld/" + form := make(url.Values) + form.Set("foo", "bar") + form.Add("foo", "bar2") + form.Set("bar", "baz") + client.PostForm(urlStr, form) // Note: doesn't hit network + + if tr.req.Method != "POST" { + t.Errorf("got method %q, want %q", tr.req.Method, "POST") + } + if tr.req.URL.String() != urlStr { + t.Errorf("got URL %q, want %q", tr.req.URL.String(), urlStr) + } + if tr.req.Header == nil { + t.Fatalf("expected non-nil request Header") + } + if g, e := tr.req.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; g != e { + t.Errorf("got Content-Type %q, want %q", g, e) + } + if tr.req.Close { + t.Error("got Close true, want false") + } + // Depending on map iteration, body can be either of these. + expectedBody := "foo=bar&foo=bar2&bar=baz" + expectedBody1 := "bar=baz&foo=bar&foo=bar2" + if g, e := tr.req.ContentLength, int64(len(expectedBody)); g != e { + t.Errorf("got ContentLength %d, want %d", g, e) + } + bodyb, err := ioutil.ReadAll(tr.req.Body) + if err != nil { + t.Fatalf("ReadAll on req.Body: %v", err) + } + if g := string(bodyb); g != expectedBody && g != expectedBody1 { + t.Errorf("got body %q, want %q or %q", g, expectedBody, expectedBody1) + } +} + +func TestClientRedirects(t *testing.T) { + defer afterTest(t) + var ts *httptest.Server + ts = httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + n, _ := strconv.Atoi(r.FormValue("n")) + // Test Referer header. (7 is arbitrary position to test at) + if n == 7 { + if g, e := r.Referer(), ts.URL+"/?n=6"; e != g { + t.Errorf("on request ?n=7, expected referer of %q; got %q", e, g) + } + } + if n < 15 { + Redirect(w, r, fmt.Sprintf("/?n=%d", n+1), StatusFound) + return + } + fmt.Fprintf(w, "n=%d", n) + })) + defer ts.Close() + + c := &Client{} + _, err := c.Get(ts.URL) + if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g { + t.Errorf("with default client Get, expected error %q, got %q", e, g) + } + + // HEAD request should also have the ability to follow redirects. + _, err = c.Head(ts.URL) + if e, g := "Head /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g { + t.Errorf("with default client Head, expected error %q, got %q", e, g) + } + + // Do should also follow redirects. + greq, _ := NewRequest("GET", ts.URL, nil) + _, err = c.Do(greq) + if e, g := "Get /?n=10: stopped after 10 redirects", fmt.Sprintf("%v", err); e != g { + t.Errorf("with default client Do, expected error %q, got %q", e, g) + } + + var checkErr error + var lastVia []*Request + c = &Client{CheckRedirect: func(_ *Request, via []*Request) error { + lastVia = via + return checkErr + }} + res, err := c.Get(ts.URL) + if err != nil { + t.Fatalf("Get error: %v", err) + } + res.Body.Close() + finalUrl := res.Request.URL.String() + if e, g := "", fmt.Sprintf("%v", err); e != g { + t.Errorf("with custom client, expected error %q, got %q", e, g) + } + if !strings.HasSuffix(finalUrl, "/?n=15") { + t.Errorf("expected final url to end in /?n=15; got url %q", finalUrl) + } + if e, g := 15, len(lastVia); e != g { + t.Errorf("expected lastVia to have contained %d elements; got %d", e, g) + } + + checkErr = errors.New("no redirects allowed") + res, err = c.Get(ts.URL) + if urlError, ok := err.(*url.Error); !ok || urlError.Err != checkErr { + t.Errorf("with redirects forbidden, expected a *url.Error with our 'no redirects allowed' error inside; got %#v (%q)", err, err) + } + if res == nil { + t.Fatalf("Expected a non-nil Response on CheckRedirect failure (http://golang.org/issue/3795)") + } + res.Body.Close() + if res.Header.Get("Location") == "" { + t.Errorf("no Location header in Response") + } +} + +func TestPostRedirects(t *testing.T) { + defer afterTest(t) + var log struct { + sync.Mutex + bytes.Buffer + } + var ts *httptest.Server + ts = httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + log.Lock() + fmt.Fprintf(&log.Buffer, "%s %s ", r.Method, r.RequestURI) + log.Unlock() + if v := r.URL.Query().Get("code"); v != "" { + code, _ := strconv.Atoi(v) + if code/100 == 3 { + w.Header().Set("Location", ts.URL) + } + w.WriteHeader(code) + } + })) + defer ts.Close() + tests := []struct { + suffix string + want int // response code + }{ + {"/", 200}, + {"/?code=301", 301}, + {"/?code=302", 200}, + {"/?code=303", 200}, + {"/?code=404", 404}, + } + for _, tt := range tests { + res, err := Post(ts.URL+tt.suffix, "text/plain", strings.NewReader("Some content")) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != tt.want { + t.Errorf("POST %s: status code = %d; want %d", tt.suffix, res.StatusCode, tt.want) + } + } + log.Lock() + got := log.String() + log.Unlock() + want := "POST / POST /?code=301 POST /?code=302 GET / POST /?code=303 GET / POST /?code=404 " + if got != want { + t.Errorf("Log differs.\n Got: %q\nWant: %q", got, want) + } +} + +var expectedCookies = []*Cookie{ + {Name: "ChocolateChip", Value: "tasty"}, + {Name: "First", Value: "Hit"}, + {Name: "Second", Value: "Hit"}, +} + +var echoCookiesRedirectHandler = HandlerFunc(func(w ResponseWriter, r *Request) { + for _, cookie := range r.Cookies() { + SetCookie(w, cookie) + } + if r.URL.Path == "/" { + SetCookie(w, expectedCookies[1]) + Redirect(w, r, "/second", StatusMovedPermanently) + } else { + SetCookie(w, expectedCookies[2]) + w.Write([]byte("hello")) + } +}) + +func TestClientSendsCookieFromJar(t *testing.T) { + tr := &recordingTransport{} + client := &Client{Transport: tr} + client.Jar = &TestJar{perURL: make(map[string][]*Cookie)} + us := "http://dummy.faketld/" + u, _ := url.Parse(us) + client.Jar.SetCookies(u, expectedCookies) + + client.Get(us) // Note: doesn't hit network + matchReturnedCookies(t, expectedCookies, tr.req.Cookies()) + + client.Head(us) // Note: doesn't hit network + matchReturnedCookies(t, expectedCookies, tr.req.Cookies()) + + client.Post(us, "text/plain", strings.NewReader("body")) // Note: doesn't hit network + matchReturnedCookies(t, expectedCookies, tr.req.Cookies()) + + client.PostForm(us, url.Values{}) // Note: doesn't hit network + matchReturnedCookies(t, expectedCookies, tr.req.Cookies()) + + req, _ := NewRequest("GET", us, nil) + client.Do(req) // Note: doesn't hit network + matchReturnedCookies(t, expectedCookies, tr.req.Cookies()) + + req, _ = NewRequest("POST", us, nil) + client.Do(req) // Note: doesn't hit network + matchReturnedCookies(t, expectedCookies, tr.req.Cookies()) +} + +// Just enough correctness for our redirect tests. Uses the URL.Host as the +// scope of all cookies. +type TestJar struct { + m sync.Mutex + perURL map[string][]*Cookie +} + +func (j *TestJar) SetCookies(u *url.URL, cookies []*Cookie) { + j.m.Lock() + defer j.m.Unlock() + if j.perURL == nil { + j.perURL = make(map[string][]*Cookie) + } + j.perURL[u.Host] = cookies +} + +func (j *TestJar) Cookies(u *url.URL) []*Cookie { + j.m.Lock() + defer j.m.Unlock() + return j.perURL[u.Host] +} + +func TestRedirectCookiesJar(t *testing.T) { + defer afterTest(t) + var ts *httptest.Server + ts = httptest.NewServer(echoCookiesRedirectHandler) + defer ts.Close() + c := &Client{ + Jar: new(TestJar), + } + u, _ := url.Parse(ts.URL) + c.Jar.SetCookies(u, []*Cookie{expectedCookies[0]}) + resp, err := c.Get(ts.URL) + if err != nil { + t.Fatalf("Get: %v", err) + } + resp.Body.Close() + matchReturnedCookies(t, expectedCookies, resp.Cookies()) +} + +func matchReturnedCookies(t *testing.T, expected, given []*Cookie) { + if len(given) != len(expected) { + t.Logf("Received cookies: %v", given) + t.Errorf("Expected %d cookies, got %d", len(expected), len(given)) + } + for _, ec := range expected { + foundC := false + for _, c := range given { + if ec.Name == c.Name && ec.Value == c.Value { + foundC = true + break + } + } + if !foundC { + t.Errorf("Missing cookie %v", ec) + } + } +} + +func TestJarCalls(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + pathSuffix := r.RequestURI[1:] + if r.RequestURI == "/nosetcookie" { + return // dont set cookies for this path + } + SetCookie(w, &Cookie{Name: "name" + pathSuffix, Value: "val" + pathSuffix}) + if r.RequestURI == "/" { + Redirect(w, r, "http://secondhost.fake/secondpath", 302) + } + })) + defer ts.Close() + jar := new(RecordingJar) + c := &Client{ + Jar: jar, + Transport: &Transport{ + Dial: func(_ string, _ string) (net.Conn, error) { + return net.Dial("tcp", ts.Listener.Addr().String()) + }, + }, + } + _, err := c.Get("http://firsthost.fake/") + if err != nil { + t.Fatal(err) + } + _, err = c.Get("http://firsthost.fake/nosetcookie") + if err != nil { + t.Fatal(err) + } + got := jar.log.String() + want := `Cookies("http://firsthost.fake/") +SetCookie("http://firsthost.fake/", [name=val]) +Cookies("http://secondhost.fake/secondpath") +SetCookie("http://secondhost.fake/secondpath", [namesecondpath=valsecondpath]) +Cookies("http://firsthost.fake/nosetcookie") +` + if got != want { + t.Errorf("Got Jar calls:\n%s\nWant:\n%s", got, want) + } +} + +// RecordingJar keeps a log of calls made to it, without +// tracking any cookies. +type RecordingJar struct { + mu sync.Mutex + log bytes.Buffer +} + +func (j *RecordingJar) SetCookies(u *url.URL, cookies []*Cookie) { + j.logf("SetCookie(%q, %v)\n", u, cookies) +} + +func (j *RecordingJar) Cookies(u *url.URL) []*Cookie { + j.logf("Cookies(%q)\n", u) + return nil +} + +func (j *RecordingJar) logf(format string, args ...interface{}) { + j.mu.Lock() + defer j.mu.Unlock() + fmt.Fprintf(&j.log, format, args...) +} + +func TestStreamingGet(t *testing.T) { + defer afterTest(t) + say := make(chan string) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.(Flusher).Flush() + for str := range say { + w.Write([]byte(str)) + w.(Flusher).Flush() + } + })) + defer ts.Close() + + c := &Client{} + res, err := c.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + var buf [10]byte + for _, str := range []string{"i", "am", "also", "known", "as", "comet"} { + say <- str + n, err := io.ReadFull(res.Body, buf[0:len(str)]) + if err != nil { + t.Fatalf("ReadFull on %q: %v", str, err) + } + if n != len(str) { + t.Fatalf("Receiving %q, only read %d bytes", str, n) + } + got := string(buf[0:n]) + if got != str { + t.Fatalf("Expected %q, got %q", str, got) + } + } + close(say) + _, err = io.ReadFull(res.Body, buf[0:1]) + if err != io.EOF { + t.Fatalf("at end expected EOF, got %v", err) + } +} + +type writeCountingConn struct { + net.Conn + count *int +} + +func (c *writeCountingConn) Write(p []byte) (int, error) { + *c.count++ + return c.Conn.Write(p) +} + +// TestClientWrites verifies that client requests are buffered and we +// don't send a TCP packet per line of the http request + body. +func TestClientWrites(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + })) + defer ts.Close() + + writes := 0 + dialer := func(netz string, addr string) (net.Conn, error) { + c, err := net.Dial(netz, addr) + if err == nil { + c = &writeCountingConn{c, &writes} + } + return c, err + } + c := &Client{Transport: &Transport{Dial: dialer}} + + _, err := c.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + if writes != 1 { + t.Errorf("Get request did %d Write calls, want 1", writes) + } + + writes = 0 + _, err = c.PostForm(ts.URL, url.Values{"foo": {"bar"}}) + if err != nil { + t.Fatal(err) + } + if writes != 1 { + t.Errorf("Post request did %d Write calls, want 1", writes) + } +} + +func TestClientInsecureTransport(t *testing.T) { + defer afterTest(t) + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Write([]byte("Hello")) + })) + errc := make(chanWriter, 10) // but only expecting 1 + ts.Config.ErrorLog = log.New(errc, "", 0) + defer ts.Close() + + // TODO(bradfitz): add tests for skipping hostname checks too? + // would require a new cert for testing, and probably + // redundant with these tests. + for _, insecure := range []bool{true, false} { + tr := &Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: insecure, + }, + } + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + res, err := c.Get(ts.URL) + if (err == nil) != insecure { + t.Errorf("insecure=%v: got unexpected err=%v", insecure, err) + } + if res != nil { + res.Body.Close() + } + } + + select { + case v := <-errc: + if !strings.Contains(v, "TLS handshake error") { + t.Errorf("expected an error log message containing 'TLS handshake error'; got %q", v) + } + case <-time.After(5 * time.Second): + t.Errorf("timeout waiting for logged error") + } + +} + +func TestClientErrorWithRequestURI(t *testing.T) { + defer afterTest(t) + req, _ := NewRequest("GET", "http://localhost:1234/", nil) + req.RequestURI = "/this/field/is/illegal/and/should/error/" + _, err := DefaultClient.Do(req) + if err == nil { + t.Fatalf("expected an error") + } + if !strings.Contains(err.Error(), "RequestURI") { + t.Errorf("wanted error mentioning RequestURI; got error: %v", err) + } +} + +func newTLSTransport(t *testing.T, ts *httptest.Server) *Transport { + certs := x509.NewCertPool() + for _, c := range ts.TLS.Certificates { + roots, err := x509.ParseCertificates(c.Certificate[len(c.Certificate)-1]) + if err != nil { + t.Fatalf("error parsing server's root cert: %v", err) + } + for _, root := range roots { + certs.AddCert(root) + } + } + return &Transport{ + TLSClientConfig: &tls.Config{RootCAs: certs}, + } +} + +func TestClientWithCorrectTLSServerName(t *testing.T) { + defer afterTest(t) + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.TLS.ServerName != "127.0.0.1" { + t.Errorf("expected client to set ServerName 127.0.0.1, got: %q", r.TLS.ServerName) + } + })) + defer ts.Close() + + c := &Client{Transport: newTLSTransport(t, ts)} + if _, err := c.Get(ts.URL); err != nil { + t.Fatalf("expected successful TLS connection, got error: %v", err) + } +} + +func TestClientWithIncorrectTLSServerName(t *testing.T) { + defer afterTest(t) + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) {})) + defer ts.Close() + errc := make(chanWriter, 10) // but only expecting 1 + ts.Config.ErrorLog = log.New(errc, "", 0) + + trans := newTLSTransport(t, ts) + trans.TLSClientConfig.ServerName = "badserver" + c := &Client{Transport: trans} + _, err := c.Get(ts.URL) + if err == nil { + t.Fatalf("expected an error") + } + if !strings.Contains(err.Error(), "127.0.0.1") || !strings.Contains(err.Error(), "badserver") { + t.Errorf("wanted error mentioning 127.0.0.1 and badserver; got error: %v", err) + } + select { + case v := <-errc: + if !strings.Contains(v, "TLS handshake error") { + t.Errorf("expected an error log message containing 'TLS handshake error'; got %q", v) + } + case <-time.After(5 * time.Second): + t.Errorf("timeout waiting for logged error") + } +} + +// Test for golang.org/issue/5829; the Transport should respect TLSClientConfig.ServerName +// when not empty. +// +// tls.Config.ServerName (non-empty, set to "example.com") takes +// precedence over "some-other-host.tld" which previously incorrectly +// took precedence. We don't actually connect to (or even resolve) +// "some-other-host.tld", though, because of the Transport.Dial hook. +// +// The httptest.Server has a cert with "example.com" as its name. +func TestTransportUsesTLSConfigServerName(t *testing.T) { + defer afterTest(t) + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Write([]byte("Hello")) + })) + defer ts.Close() + + tr := newTLSTransport(t, ts) + tr.TLSClientConfig.ServerName = "example.com" // one of httptest's Server cert names + tr.Dial = func(netw, addr string) (net.Conn, error) { + return net.Dial(netw, ts.Listener.Addr().String()) + } + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + res, err := c.Get("https://some-other-host.tld/") + if err != nil { + t.Fatal(err) + } + res.Body.Close() +} + +func TestResponseSetsTLSConnectionState(t *testing.T) { + defer afterTest(t) + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Write([]byte("Hello")) + })) + defer ts.Close() + + tr := newTLSTransport(t, ts) + tr.TLSClientConfig.CipherSuites = []uint16{tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA} + tr.Dial = func(netw, addr string) (net.Conn, error) { + return net.Dial(netw, ts.Listener.Addr().String()) + } + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + res, err := c.Get("https://example.com/") + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + if res.TLS == nil { + t.Fatal("Response didn't set TLS Connection State.") + } + if got, want := res.TLS.CipherSuite, tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA; got != want { + t.Errorf("TLS Cipher Suite = %d; want %d", got, want) + } +} + +// Verify Response.ContentLength is populated. http://golang.org/issue/4126 +func TestClientHeadContentLength(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if v := r.FormValue("cl"); v != "" { + w.Header().Set("Content-Length", v) + } + })) + defer ts.Close() + tests := []struct { + suffix string + want int64 + }{ + {"/?cl=1234", 1234}, + {"/?cl=0", 0}, + {"", -1}, + } + for _, tt := range tests { + req, _ := NewRequest("HEAD", ts.URL+tt.suffix, nil) + res, err := DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + if res.ContentLength != tt.want { + t.Errorf("Content-Length = %d; want %d", res.ContentLength, tt.want) + } + bs, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if len(bs) != 0 { + t.Errorf("Unexpected content: %q", bs) + } + } +} + +func TestEmptyPasswordAuth(t *testing.T) { + defer afterTest(t) + gopher := "gopher" + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + auth := r.Header.Get("Authorization") + if strings.HasPrefix(auth, "Basic ") { + encoded := auth[6:] + decoded, err := base64.StdEncoding.DecodeString(encoded) + if err != nil { + t.Fatal(err) + } + expected := gopher + ":" + s := string(decoded) + if expected != s { + t.Errorf("Invalid Authorization header. Got %q, wanted %q", s, expected) + } + } else { + t.Errorf("Invalid auth %q", auth) + } + })) + defer ts.Close() + c := &Client{} + req, err := NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + req.URL.User = url.User(gopher) + resp, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() +} + +func TestBasicAuth(t *testing.T) { + defer afterTest(t) + tr := &recordingTransport{} + client := &Client{Transport: tr} + + url := "http://My%20User:My%20Pass@dummy.faketld/" + expected := "My User:My Pass" + client.Get(url) + + if tr.req.Method != "GET" { + t.Errorf("got method %q, want %q", tr.req.Method, "GET") + } + if tr.req.URL.String() != url { + t.Errorf("got URL %q, want %q", tr.req.URL.String(), url) + } + if tr.req.Header == nil { + t.Fatalf("expected non-nil request Header") + } + auth := tr.req.Header.Get("Authorization") + if strings.HasPrefix(auth, "Basic ") { + encoded := auth[6:] + decoded, err := base64.StdEncoding.DecodeString(encoded) + if err != nil { + t.Fatal(err) + } + s := string(decoded) + if expected != s { + t.Errorf("Invalid Authorization header. Got %q, wanted %q", s, expected) + } + } else { + t.Errorf("Invalid auth %q", auth) + } +} + +func TestClientTimeout(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + defer afterTest(t) + sawRoot := make(chan bool, 1) + sawSlow := make(chan bool, 1) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.URL.Path == "/" { + sawRoot <- true + Redirect(w, r, "/slow", StatusFound) + return + } + if r.URL.Path == "/slow" { + w.Write([]byte("Hello")) + w.(Flusher).Flush() + sawSlow <- true + time.Sleep(2 * time.Second) + return + } + })) + defer ts.Close() + const timeout = 500 * time.Millisecond + c := &Client{ + Timeout: timeout, + } + + res, err := c.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + + select { + case <-sawRoot: + // good. + default: + t.Fatal("handler never got / request") + } + + select { + case <-sawSlow: + // good. + default: + t.Fatal("handler never got /slow request") + } + + errc := make(chan error, 1) + go func() { + _, err := ioutil.ReadAll(res.Body) + errc <- err + res.Body.Close() + }() + + const failTime = timeout * 2 + select { + case err := <-errc: + if err == nil { + t.Error("expected error from ReadAll") + } + // Expected error. + case <-time.After(failTime): + t.Errorf("timeout after %v waiting for timeout of %v", failTime, timeout) + } +} + +func TestClientRedirectEatsBody(t *testing.T) { + defer afterTest(t) + saw := make(chan string, 2) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + saw <- r.RemoteAddr + if r.URL.Path == "/" { + Redirect(w, r, "/foo", StatusFound) // which includes a body + } + })) + defer ts.Close() + + res, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + _, err = ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + + var first string + select { + case first = <-saw: + default: + t.Fatal("server didn't see a request") + } + + var second string + select { + case second = <-saw: + default: + t.Fatal("server didn't see a second request") + } + + if first != second { + t.Fatal("server saw different client ports before & after the redirect") + } +} + +// eofReaderFunc is an io.Reader that runs itself, and then returns io.EOF. +type eofReaderFunc func() + +func (f eofReaderFunc) Read(p []byte) (n int, err error) { + f() + return 0, io.EOF +} + +func TestClientTrailers(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Connection", "close") + w.Header().Set("Trailer", "Server-Trailer-A, Server-Trailer-B") + w.Header().Add("Trailer", "Server-Trailer-C") + + var decl []string + for k := range r.Trailer { + decl = append(decl, k) + } + sort.Strings(decl) + + slurp, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Server reading request body: %v", err) + } + if string(slurp) != "foo" { + t.Errorf("Server read request body %q; want foo", slurp) + } + if r.Trailer == nil { + io.WriteString(w, "nil Trailer") + } else { + fmt.Fprintf(w, "decl: %v, vals: %s, %s", + decl, + r.Trailer.Get("Client-Trailer-A"), + r.Trailer.Get("Client-Trailer-B")) + } + + // TODO: golang.org/issue/7759: there's no way yet for + // the server to set trailers without hijacking, so do + // that for now, just to test the client. Later, in + // Go 1.4, it should be implicit that any mutations + // to w.Header() after the initial write are the + // trailers to be sent, if and only if they were + // previously declared with w.Header().Set("Trailer", + // ..keys..) + w.(Flusher).Flush() + conn, buf, _ := w.(Hijacker).Hijack() + t := Header{} + t.Set("Server-Trailer-A", "valuea") + t.Set("Server-Trailer-C", "valuec") // skipping B + buf.WriteString("0\r\n") // eof + t.Write(buf) + buf.WriteString("\r\n") // end of trailers + buf.Flush() + conn.Close() + })) + defer ts.Close() + + var req *Request + req, _ = NewRequest("POST", ts.URL, io.MultiReader( + eofReaderFunc(func() { + req.Trailer["Client-Trailer-A"] = []string{"valuea"} + }), + strings.NewReader("foo"), + eofReaderFunc(func() { + req.Trailer["Client-Trailer-B"] = []string{"valueb"} + }), + )) + req.Trailer = Header{ + "Client-Trailer-A": nil, // to be set later + "Client-Trailer-B": nil, // to be set later + } + req.ContentLength = -1 + res, err := DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + if err := wantBody(res, err, "decl: [Client-Trailer-A Client-Trailer-B], vals: valuea, valueb"); err != nil { + t.Error(err) + } + want := Header{ + "Server-Trailer-A": []string{"valuea"}, + "Server-Trailer-B": nil, + "Server-Trailer-C": []string{"valuec"}, + } + if !reflect.DeepEqual(res.Trailer, want) { + t.Errorf("Response trailers = %#v; want %#v", res.Trailer, want) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/cookie.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/cookie.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/cookie.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,363 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bytes" + "fmt" + "log" + "net" + "strconv" + "strings" + "time" +) + +// This implementation is done according to RFC 6265: +// +// http://tools.ietf.org/html/rfc6265 + +// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an +// HTTP response or the Cookie header of an HTTP request. +type Cookie struct { + Name string + Value string + Path string + Domain string + Expires time.Time + RawExpires string + + // MaxAge=0 means no 'Max-Age' attribute specified. + // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0' + // MaxAge>0 means Max-Age attribute present and given in seconds + MaxAge int + Secure bool + HttpOnly bool + Raw string + Unparsed []string // Raw text of unparsed attribute-value pairs +} + +// readSetCookies parses all "Set-Cookie" values from +// the header h and returns the successfully parsed Cookies. +func readSetCookies(h Header) []*Cookie { + cookies := []*Cookie{} + for _, line := range h["Set-Cookie"] { + parts := strings.Split(strings.TrimSpace(line), ";") + if len(parts) == 1 && parts[0] == "" { + continue + } + parts[0] = strings.TrimSpace(parts[0]) + j := strings.Index(parts[0], "=") + if j < 0 { + continue + } + name, value := parts[0][:j], parts[0][j+1:] + if !isCookieNameValid(name) { + continue + } + value, success := parseCookieValue(value) + if !success { + continue + } + c := &Cookie{ + Name: name, + Value: value, + Raw: line, + } + for i := 1; i < len(parts); i++ { + parts[i] = strings.TrimSpace(parts[i]) + if len(parts[i]) == 0 { + continue + } + + attr, val := parts[i], "" + if j := strings.Index(attr, "="); j >= 0 { + attr, val = attr[:j], attr[j+1:] + } + lowerAttr := strings.ToLower(attr) + val, success = parseCookieValue(val) + if !success { + c.Unparsed = append(c.Unparsed, parts[i]) + continue + } + switch lowerAttr { + case "secure": + c.Secure = true + continue + case "httponly": + c.HttpOnly = true + continue + case "domain": + c.Domain = val + continue + case "max-age": + secs, err := strconv.Atoi(val) + if err != nil || secs != 0 && val[0] == '0' { + break + } + if secs <= 0 { + c.MaxAge = -1 + } else { + c.MaxAge = secs + } + continue + case "expires": + c.RawExpires = val + exptime, err := time.Parse(time.RFC1123, val) + if err != nil { + exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", val) + if err != nil { + c.Expires = time.Time{} + break + } + } + c.Expires = exptime.UTC() + continue + case "path": + c.Path = val + continue + } + c.Unparsed = append(c.Unparsed, parts[i]) + } + cookies = append(cookies, c) + } + return cookies +} + +// SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers. +func SetCookie(w ResponseWriter, cookie *Cookie) { + w.Header().Add("Set-Cookie", cookie.String()) +} + +// String returns the serialization of the cookie for use in a Cookie +// header (if only Name and Value are set) or a Set-Cookie response +// header (if other fields are set). +func (c *Cookie) String() string { + var b bytes.Buffer + fmt.Fprintf(&b, "%s=%s", sanitizeCookieName(c.Name), sanitizeCookieValue(c.Value)) + if len(c.Path) > 0 { + fmt.Fprintf(&b, "; Path=%s", sanitizeCookiePath(c.Path)) + } + if len(c.Domain) > 0 { + if validCookieDomain(c.Domain) { + // A c.Domain containing illegal characters is not + // sanitized but simply dropped which turns the cookie + // into a host-only cookie. A leading dot is okay + // but won't be sent. + d := c.Domain + if d[0] == '.' { + d = d[1:] + } + fmt.Fprintf(&b, "; Domain=%s", d) + } else { + log.Printf("net/http: invalid Cookie.Domain %q; dropping domain attribute", + c.Domain) + } + } + if c.Expires.Unix() > 0 { + fmt.Fprintf(&b, "; Expires=%s", c.Expires.UTC().Format(time.RFC1123)) + } + if c.MaxAge > 0 { + fmt.Fprintf(&b, "; Max-Age=%d", c.MaxAge) + } else if c.MaxAge < 0 { + fmt.Fprintf(&b, "; Max-Age=0") + } + if c.HttpOnly { + fmt.Fprintf(&b, "; HttpOnly") + } + if c.Secure { + fmt.Fprintf(&b, "; Secure") + } + return b.String() +} + +// readCookies parses all "Cookie" values from the header h and +// returns the successfully parsed Cookies. +// +// if filter isn't empty, only cookies of that name are returned +func readCookies(h Header, filter string) []*Cookie { + cookies := []*Cookie{} + lines, ok := h["Cookie"] + if !ok { + return cookies + } + + for _, line := range lines { + parts := strings.Split(strings.TrimSpace(line), ";") + if len(parts) == 1 && parts[0] == "" { + continue + } + // Per-line attributes + parsedPairs := 0 + for i := 0; i < len(parts); i++ { + parts[i] = strings.TrimSpace(parts[i]) + if len(parts[i]) == 0 { + continue + } + name, val := parts[i], "" + if j := strings.Index(name, "="); j >= 0 { + name, val = name[:j], name[j+1:] + } + if !isCookieNameValid(name) { + continue + } + if filter != "" && filter != name { + continue + } + val, success := parseCookieValue(val) + if !success { + continue + } + cookies = append(cookies, &Cookie{Name: name, Value: val}) + parsedPairs++ + } + } + return cookies +} + +// validCookieDomain returns wheter v is a valid cookie domain-value. +func validCookieDomain(v string) bool { + if isCookieDomainName(v) { + return true + } + if net.ParseIP(v) != nil && !strings.Contains(v, ":") { + return true + } + return false +} + +// isCookieDomainName returns whether s is a valid domain name or a valid +// domain name with a leading dot '.'. It is almost a direct copy of +// package net's isDomainName. +func isCookieDomainName(s string) bool { + if len(s) == 0 { + return false + } + if len(s) > 255 { + return false + } + + if s[0] == '.' { + // A cookie a domain attribute may start with a leading dot. + s = s[1:] + } + last := byte('.') + ok := false // Ok once we've seen a letter. + partlen := 0 + for i := 0; i < len(s); i++ { + c := s[i] + switch { + default: + return false + case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': + // No '_' allowed here (in contrast to package net). + ok = true + partlen++ + case '0' <= c && c <= '9': + // fine + partlen++ + case c == '-': + // Byte before dash cannot be dot. + if last == '.' { + return false + } + partlen++ + case c == '.': + // Byte before dot cannot be dot, dash. + if last == '.' || last == '-' { + return false + } + if partlen > 63 || partlen == 0 { + return false + } + partlen = 0 + } + last = c + } + if last == '-' || partlen > 63 { + return false + } + + return ok +} + +var cookieNameSanitizer = strings.NewReplacer("\n", "-", "\r", "-") + +func sanitizeCookieName(n string) string { + return cookieNameSanitizer.Replace(n) +} + +// http://tools.ietf.org/html/rfc6265#section-4.1.1 +// cookie-value = *cookie-octet / ( DQUOTE *cookie-octet DQUOTE ) +// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E +// ; US-ASCII characters excluding CTLs, +// ; whitespace DQUOTE, comma, semicolon, +// ; and backslash +// We loosen this as spaces and commas are common in cookie values +// but we produce a quoted cookie-value in when value starts or ends +// with a comma or space. +// See http://golang.org/issue/7243 for the discussion. +func sanitizeCookieValue(v string) string { + v = sanitizeOrWarn("Cookie.Value", validCookieValueByte, v) + if len(v) == 0 { + return v + } + if v[0] == ' ' || v[0] == ',' || v[len(v)-1] == ' ' || v[len(v)-1] == ',' { + return `"` + v + `"` + } + return v +} + +func validCookieValueByte(b byte) bool { + return 0x20 <= b && b < 0x7f && b != '"' && b != ';' && b != '\\' +} + +// path-av = "Path=" path-value +// path-value = +func sanitizeCookiePath(v string) string { + return sanitizeOrWarn("Cookie.Path", validCookiePathByte, v) +} + +func validCookiePathByte(b byte) bool { + return 0x20 <= b && b < 0x7f && b != ';' +} + +func sanitizeOrWarn(fieldName string, valid func(byte) bool, v string) string { + ok := true + for i := 0; i < len(v); i++ { + if valid(v[i]) { + continue + } + log.Printf("net/http: invalid byte %q in %s; dropping invalid bytes", v[i], fieldName) + ok = false + break + } + if ok { + return v + } + buf := make([]byte, 0, len(v)) + for i := 0; i < len(v); i++ { + if b := v[i]; valid(b) { + buf = append(buf, b) + } + } + return string(buf) +} + +func parseCookieValue(raw string) (string, bool) { + // Strip the quotes, if present. + if len(raw) > 1 && raw[0] == '"' && raw[len(raw)-1] == '"' { + raw = raw[1 : len(raw)-1] + } + for i := 0; i < len(raw); i++ { + if !validCookieValueByte(raw[i]) { + return "", false + } + } + return raw, true +} + +func isCookieNameValid(raw string) bool { + return strings.IndexFunc(raw, isNotToken) < 0 +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/cookie_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/cookie_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/cookie_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,380 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "os" + "reflect" + "strings" + "testing" + "time" +) + +var writeSetCookiesTests = []struct { + Cookie *Cookie + Raw string +}{ + { + &Cookie{Name: "cookie-1", Value: "v$1"}, + "cookie-1=v$1", + }, + { + &Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600}, + "cookie-2=two; Max-Age=3600", + }, + { + &Cookie{Name: "cookie-3", Value: "three", Domain: ".example.com"}, + "cookie-3=three; Domain=example.com", + }, + { + &Cookie{Name: "cookie-4", Value: "four", Path: "/restricted/"}, + "cookie-4=four; Path=/restricted/", + }, + { + &Cookie{Name: "cookie-5", Value: "five", Domain: "wrong;bad.abc"}, + "cookie-5=five", + }, + { + &Cookie{Name: "cookie-6", Value: "six", Domain: "bad-.abc"}, + "cookie-6=six", + }, + { + &Cookie{Name: "cookie-7", Value: "seven", Domain: "127.0.0.1"}, + "cookie-7=seven; Domain=127.0.0.1", + }, + { + &Cookie{Name: "cookie-8", Value: "eight", Domain: "::1"}, + "cookie-8=eight", + }, + // The "special" cookies have values containing commas or spaces which + // are disallowed by RFC 6265 but are common in the wild. + { + &Cookie{Name: "special-1", Value: "a z"}, + `special-1=a z`, + }, + { + &Cookie{Name: "special-2", Value: " z"}, + `special-2=" z"`, + }, + { + &Cookie{Name: "special-3", Value: "a "}, + `special-3="a "`, + }, + { + &Cookie{Name: "special-4", Value: " "}, + `special-4=" "`, + }, + { + &Cookie{Name: "special-5", Value: "a,z"}, + `special-5=a,z`, + }, + { + &Cookie{Name: "special-6", Value: ",z"}, + `special-6=",z"`, + }, + { + &Cookie{Name: "special-7", Value: "a,"}, + `special-7="a,"`, + }, + { + &Cookie{Name: "special-8", Value: ","}, + `special-8=","`, + }, + { + &Cookie{Name: "empty-value", Value: ""}, + `empty-value=`, + }, +} + +func TestWriteSetCookies(t *testing.T) { + defer log.SetOutput(os.Stderr) + var logbuf bytes.Buffer + log.SetOutput(&logbuf) + + for i, tt := range writeSetCookiesTests { + if g, e := tt.Cookie.String(), tt.Raw; g != e { + t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, e, g) + continue + } + } + + if got, sub := logbuf.String(), "dropping domain attribute"; !strings.Contains(got, sub) { + t.Errorf("Expected substring %q in log output. Got:\n%s", sub, got) + } +} + +type headerOnlyResponseWriter Header + +func (ho headerOnlyResponseWriter) Header() Header { + return Header(ho) +} + +func (ho headerOnlyResponseWriter) Write([]byte) (int, error) { + panic("NOIMPL") +} + +func (ho headerOnlyResponseWriter) WriteHeader(int) { + panic("NOIMPL") +} + +func TestSetCookie(t *testing.T) { + m := make(Header) + SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-1", Value: "one", Path: "/restricted/"}) + SetCookie(headerOnlyResponseWriter(m), &Cookie{Name: "cookie-2", Value: "two", MaxAge: 3600}) + if l := len(m["Set-Cookie"]); l != 2 { + t.Fatalf("expected %d cookies, got %d", 2, l) + } + if g, e := m["Set-Cookie"][0], "cookie-1=one; Path=/restricted/"; g != e { + t.Errorf("cookie #1: want %q, got %q", e, g) + } + if g, e := m["Set-Cookie"][1], "cookie-2=two; Max-Age=3600"; g != e { + t.Errorf("cookie #2: want %q, got %q", e, g) + } +} + +var addCookieTests = []struct { + Cookies []*Cookie + Raw string +}{ + { + []*Cookie{}, + "", + }, + { + []*Cookie{{Name: "cookie-1", Value: "v$1"}}, + "cookie-1=v$1", + }, + { + []*Cookie{ + {Name: "cookie-1", Value: "v$1"}, + {Name: "cookie-2", Value: "v$2"}, + {Name: "cookie-3", Value: "v$3"}, + }, + "cookie-1=v$1; cookie-2=v$2; cookie-3=v$3", + }, +} + +func TestAddCookie(t *testing.T) { + for i, tt := range addCookieTests { + req, _ := NewRequest("GET", "http://example.com/", nil) + for _, c := range tt.Cookies { + req.AddCookie(c) + } + if g := req.Header.Get("Cookie"); g != tt.Raw { + t.Errorf("Test %d:\nwant: %s\n got: %s\n", i, tt.Raw, g) + continue + } + } +} + +var readSetCookiesTests = []struct { + Header Header + Cookies []*Cookie +}{ + { + Header{"Set-Cookie": {"Cookie-1=v$1"}}, + []*Cookie{{Name: "Cookie-1", Value: "v$1", Raw: "Cookie-1=v$1"}}, + }, + { + Header{"Set-Cookie": {"NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly"}}, + []*Cookie{{ + Name: "NID", + Value: "99=YsDT5i3E-CXax-", + Path: "/", + Domain: ".google.ch", + HttpOnly: true, + Expires: time.Date(2011, 11, 23, 1, 5, 3, 0, time.UTC), + RawExpires: "Wed, 23-Nov-2011 01:05:03 GMT", + Raw: "NID=99=YsDT5i3E-CXax-; expires=Wed, 23-Nov-2011 01:05:03 GMT; path=/; domain=.google.ch; HttpOnly", + }}, + }, + { + Header{"Set-Cookie": {".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}}, + []*Cookie{{ + Name: ".ASPXAUTH", + Value: "7E3AA", + Path: "/", + Expires: time.Date(2012, 3, 7, 14, 25, 6, 0, time.UTC), + RawExpires: "Wed, 07-Mar-2012 14:25:06 GMT", + HttpOnly: true, + Raw: ".ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly", + }}, + }, + { + Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly"}}, + []*Cookie{{ + Name: "ASP.NET_SessionId", + Value: "foo", + Path: "/", + HttpOnly: true, + Raw: "ASP.NET_SessionId=foo; path=/; HttpOnly", + }}, + }, + // Make sure we can properly read back the Set-Cookie headers we create + // for values containing spaces or commas: + { + Header{"Set-Cookie": {`special-1=a z`}}, + []*Cookie{{Name: "special-1", Value: "a z", Raw: `special-1=a z`}}, + }, + { + Header{"Set-Cookie": {`special-2=" z"`}}, + []*Cookie{{Name: "special-2", Value: " z", Raw: `special-2=" z"`}}, + }, + { + Header{"Set-Cookie": {`special-3="a "`}}, + []*Cookie{{Name: "special-3", Value: "a ", Raw: `special-3="a "`}}, + }, + { + Header{"Set-Cookie": {`special-4=" "`}}, + []*Cookie{{Name: "special-4", Value: " ", Raw: `special-4=" "`}}, + }, + { + Header{"Set-Cookie": {`special-5=a,z`}}, + []*Cookie{{Name: "special-5", Value: "a,z", Raw: `special-5=a,z`}}, + }, + { + Header{"Set-Cookie": {`special-6=",z"`}}, + []*Cookie{{Name: "special-6", Value: ",z", Raw: `special-6=",z"`}}, + }, + { + Header{"Set-Cookie": {`special-7=a,`}}, + []*Cookie{{Name: "special-7", Value: "a,", Raw: `special-7=a,`}}, + }, + { + Header{"Set-Cookie": {`special-8=","`}}, + []*Cookie{{Name: "special-8", Value: ",", Raw: `special-8=","`}}, + }, + + // TODO(bradfitz): users have reported seeing this in the + // wild, but do browsers handle it? RFC 6265 just says "don't + // do that" (section 3) and then never mentions header folding + // again. + // Header{"Set-Cookie": {"ASP.NET_SessionId=foo; path=/; HttpOnly, .ASPXAUTH=7E3AA; expires=Wed, 07-Mar-2012 14:25:06 GMT; path=/; HttpOnly"}}, +} + +func toJSON(v interface{}) string { + b, err := json.Marshal(v) + if err != nil { + return fmt.Sprintf("%#v", v) + } + return string(b) +} + +func TestReadSetCookies(t *testing.T) { + for i, tt := range readSetCookiesTests { + for n := 0; n < 2; n++ { // to verify readSetCookies doesn't mutate its input + c := readSetCookies(tt.Header) + if !reflect.DeepEqual(c, tt.Cookies) { + t.Errorf("#%d readSetCookies: have\n%s\nwant\n%s\n", i, toJSON(c), toJSON(tt.Cookies)) + continue + } + } + } +} + +var readCookiesTests = []struct { + Header Header + Filter string + Cookies []*Cookie +}{ + { + Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, + "", + []*Cookie{ + {Name: "Cookie-1", Value: "v$1"}, + {Name: "c2", Value: "v2"}, + }, + }, + { + Header{"Cookie": {"Cookie-1=v$1", "c2=v2"}}, + "c2", + []*Cookie{ + {Name: "c2", Value: "v2"}, + }, + }, + { + Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, + "", + []*Cookie{ + {Name: "Cookie-1", Value: "v$1"}, + {Name: "c2", Value: "v2"}, + }, + }, + { + Header{"Cookie": {"Cookie-1=v$1; c2=v2"}}, + "c2", + []*Cookie{ + {Name: "c2", Value: "v2"}, + }, + }, +} + +func TestReadCookies(t *testing.T) { + for i, tt := range readCookiesTests { + for n := 0; n < 2; n++ { // to verify readCookies doesn't mutate its input + c := readCookies(tt.Header, tt.Filter) + if !reflect.DeepEqual(c, tt.Cookies) { + t.Errorf("#%d readCookies:\nhave: %s\nwant: %s\n", i, toJSON(c), toJSON(tt.Cookies)) + continue + } + } + } +} + +func TestCookieSanitizeValue(t *testing.T) { + defer log.SetOutput(os.Stderr) + var logbuf bytes.Buffer + log.SetOutput(&logbuf) + + tests := []struct { + in, want string + }{ + {"foo", "foo"}, + {"foo;bar", "foobar"}, + {"foo\\bar", "foobar"}, + {"foo\"bar", "foobar"}, + {"\x00\x7e\x7f\x80", "\x7e"}, + {`"withquotes"`, "withquotes"}, + {"a z", "a z"}, + {" z", `" z"`}, + {"a ", `"a "`}, + } + for _, tt := range tests { + if got := sanitizeCookieValue(tt.in); got != tt.want { + t.Errorf("sanitizeCookieValue(%q) = %q; want %q", tt.in, got, tt.want) + } + } + + if got, sub := logbuf.String(), "dropping invalid bytes"; !strings.Contains(got, sub) { + t.Errorf("Expected substring %q in log output. Got:\n%s", sub, got) + } +} + +func TestCookieSanitizePath(t *testing.T) { + defer log.SetOutput(os.Stderr) + var logbuf bytes.Buffer + log.SetOutput(&logbuf) + + tests := []struct { + in, want string + }{ + {"/path", "/path"}, + {"/path with space/", "/path with space/"}, + {"/just;no;semicolon\x00orstuff/", "/justnosemicolonorstuff/"}, + } + for _, tt := range tests { + if got := sanitizeCookiePath(tt.in); got != tt.want { + t.Errorf("sanitizeCookiePath(%q) = %q; want %q", tt.in, got, tt.want) + } + } + + if got, sub := logbuf.String(), "dropping invalid bytes"; !strings.Contains(got, sub) { + t.Errorf("Expected substring %q in log output. Got:\n%s", sub, got) + } +} === added directory 'src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar' === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,497 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cookiejar implements an in-memory RFC 6265-compliant http.CookieJar. +package cookiejar + +import ( + "errors" + "fmt" + "net" + "net/http" + "net/url" + "sort" + "strings" + "sync" + "time" +) + +// PublicSuffixList provides the public suffix of a domain. For example: +// - the public suffix of "example.com" is "com", +// - the public suffix of "foo1.foo2.foo3.co.uk" is "co.uk", and +// - the public suffix of "bar.pvt.k12.ma.us" is "pvt.k12.ma.us". +// +// Implementations of PublicSuffixList must be safe for concurrent use by +// multiple goroutines. +// +// An implementation that always returns "" is valid and may be useful for +// testing but it is not secure: it means that the HTTP server for foo.com can +// set a cookie for bar.com. +// +// A public suffix list implementation is in the package +// code.google.com/p/go.net/publicsuffix. +type PublicSuffixList interface { + // PublicSuffix returns the public suffix of domain. + // + // TODO: specify which of the caller and callee is responsible for IP + // addresses, for leading and trailing dots, for case sensitivity, and + // for IDN/Punycode. + PublicSuffix(domain string) string + + // String returns a description of the source of this public suffix + // list. The description will typically contain something like a time + // stamp or version number. + String() string +} + +// Options are the options for creating a new Jar. +type Options struct { + // PublicSuffixList is the public suffix list that determines whether + // an HTTP server can set a cookie for a domain. + // + // A nil value is valid and may be useful for testing but it is not + // secure: it means that the HTTP server for foo.co.uk can set a cookie + // for bar.co.uk. + PublicSuffixList PublicSuffixList +} + +// Jar implements the http.CookieJar interface from the net/http package. +type Jar struct { + psList PublicSuffixList + + // mu locks the remaining fields. + mu sync.Mutex + + // entries is a set of entries, keyed by their eTLD+1 and subkeyed by + // their name/domain/path. + entries map[string]map[string]entry + + // nextSeqNum is the next sequence number assigned to a new cookie + // created SetCookies. + nextSeqNum uint64 +} + +// New returns a new cookie jar. A nil *Options is equivalent to a zero +// Options. +func New(o *Options) (*Jar, error) { + jar := &Jar{ + entries: make(map[string]map[string]entry), + } + if o != nil { + jar.psList = o.PublicSuffixList + } + return jar, nil +} + +// entry is the internal representation of a cookie. +// +// This struct type is not used outside of this package per se, but the exported +// fields are those of RFC 6265. +type entry struct { + Name string + Value string + Domain string + Path string + Secure bool + HttpOnly bool + Persistent bool + HostOnly bool + Expires time.Time + Creation time.Time + LastAccess time.Time + + // seqNum is a sequence number so that Cookies returns cookies in a + // deterministic order, even for cookies that have equal Path length and + // equal Creation time. This simplifies testing. + seqNum uint64 +} + +// Id returns the domain;path;name triple of e as an id. +func (e *entry) id() string { + return fmt.Sprintf("%s;%s;%s", e.Domain, e.Path, e.Name) +} + +// shouldSend determines whether e's cookie qualifies to be included in a +// request to host/path. It is the caller's responsibility to check if the +// cookie is expired. +func (e *entry) shouldSend(https bool, host, path string) bool { + return e.domainMatch(host) && e.pathMatch(path) && (https || !e.Secure) +} + +// domainMatch implements "domain-match" of RFC 6265 section 5.1.3. +func (e *entry) domainMatch(host string) bool { + if e.Domain == host { + return true + } + return !e.HostOnly && hasDotSuffix(host, e.Domain) +} + +// pathMatch implements "path-match" according to RFC 6265 section 5.1.4. +func (e *entry) pathMatch(requestPath string) bool { + if requestPath == e.Path { + return true + } + if strings.HasPrefix(requestPath, e.Path) { + if e.Path[len(e.Path)-1] == '/' { + return true // The "/any/" matches "/any/path" case. + } else if requestPath[len(e.Path)] == '/' { + return true // The "/any" matches "/any/path" case. + } + } + return false +} + +// hasDotSuffix reports whether s ends in "."+suffix. +func hasDotSuffix(s, suffix string) bool { + return len(s) > len(suffix) && s[len(s)-len(suffix)-1] == '.' && s[len(s)-len(suffix):] == suffix +} + +// byPathLength is a []entry sort.Interface that sorts according to RFC 6265 +// section 5.4 point 2: by longest path and then by earliest creation time. +type byPathLength []entry + +func (s byPathLength) Len() int { return len(s) } + +func (s byPathLength) Less(i, j int) bool { + if len(s[i].Path) != len(s[j].Path) { + return len(s[i].Path) > len(s[j].Path) + } + if !s[i].Creation.Equal(s[j].Creation) { + return s[i].Creation.Before(s[j].Creation) + } + return s[i].seqNum < s[j].seqNum +} + +func (s byPathLength) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Cookies implements the Cookies method of the http.CookieJar interface. +// +// It returns an empty slice if the URL's scheme is not HTTP or HTTPS. +func (j *Jar) Cookies(u *url.URL) (cookies []*http.Cookie) { + return j.cookies(u, time.Now()) +} + +// cookies is like Cookies but takes the current time as a parameter. +func (j *Jar) cookies(u *url.URL, now time.Time) (cookies []*http.Cookie) { + if u.Scheme != "http" && u.Scheme != "https" { + return cookies + } + host, err := canonicalHost(u.Host) + if err != nil { + return cookies + } + key := jarKey(host, j.psList) + + j.mu.Lock() + defer j.mu.Unlock() + + submap := j.entries[key] + if submap == nil { + return cookies + } + + https := u.Scheme == "https" + path := u.Path + if path == "" { + path = "/" + } + + modified := false + var selected []entry + for id, e := range submap { + if e.Persistent && !e.Expires.After(now) { + delete(submap, id) + modified = true + continue + } + if !e.shouldSend(https, host, path) { + continue + } + e.LastAccess = now + submap[id] = e + selected = append(selected, e) + modified = true + } + if modified { + if len(submap) == 0 { + delete(j.entries, key) + } else { + j.entries[key] = submap + } + } + + sort.Sort(byPathLength(selected)) + for _, e := range selected { + cookies = append(cookies, &http.Cookie{Name: e.Name, Value: e.Value}) + } + + return cookies +} + +// SetCookies implements the SetCookies method of the http.CookieJar interface. +// +// It does nothing if the URL's scheme is not HTTP or HTTPS. +func (j *Jar) SetCookies(u *url.URL, cookies []*http.Cookie) { + j.setCookies(u, cookies, time.Now()) +} + +// setCookies is like SetCookies but takes the current time as parameter. +func (j *Jar) setCookies(u *url.URL, cookies []*http.Cookie, now time.Time) { + if len(cookies) == 0 { + return + } + if u.Scheme != "http" && u.Scheme != "https" { + return + } + host, err := canonicalHost(u.Host) + if err != nil { + return + } + key := jarKey(host, j.psList) + defPath := defaultPath(u.Path) + + j.mu.Lock() + defer j.mu.Unlock() + + submap := j.entries[key] + + modified := false + for _, cookie := range cookies { + e, remove, err := j.newEntry(cookie, now, defPath, host) + if err != nil { + continue + } + id := e.id() + if remove { + if submap != nil { + if _, ok := submap[id]; ok { + delete(submap, id) + modified = true + } + } + continue + } + if submap == nil { + submap = make(map[string]entry) + } + + if old, ok := submap[id]; ok { + e.Creation = old.Creation + e.seqNum = old.seqNum + } else { + e.Creation = now + e.seqNum = j.nextSeqNum + j.nextSeqNum++ + } + e.LastAccess = now + submap[id] = e + modified = true + } + + if modified { + if len(submap) == 0 { + delete(j.entries, key) + } else { + j.entries[key] = submap + } + } +} + +// canonicalHost strips port from host if present and returns the canonicalized +// host name. +func canonicalHost(host string) (string, error) { + var err error + host = strings.ToLower(host) + if hasPort(host) { + host, _, err = net.SplitHostPort(host) + if err != nil { + return "", err + } + } + if strings.HasSuffix(host, ".") { + // Strip trailing dot from fully qualified domain names. + host = host[:len(host)-1] + } + return toASCII(host) +} + +// hasPort reports whether host contains a port number. host may be a host +// name, an IPv4 or an IPv6 address. +func hasPort(host string) bool { + colons := strings.Count(host, ":") + if colons == 0 { + return false + } + if colons == 1 { + return true + } + return host[0] == '[' && strings.Contains(host, "]:") +} + +// jarKey returns the key to use for a jar. +func jarKey(host string, psl PublicSuffixList) string { + if isIP(host) { + return host + } + + var i int + if psl == nil { + i = strings.LastIndex(host, ".") + if i == -1 { + return host + } + } else { + suffix := psl.PublicSuffix(host) + if suffix == host { + return host + } + i = len(host) - len(suffix) + if i <= 0 || host[i-1] != '.' { + // The provided public suffix list psl is broken. + // Storing cookies under host is a safe stopgap. + return host + } + } + prevDot := strings.LastIndex(host[:i-1], ".") + return host[prevDot+1:] +} + +// isIP reports whether host is an IP address. +func isIP(host string) bool { + return net.ParseIP(host) != nil +} + +// defaultPath returns the directory part of an URL's path according to +// RFC 6265 section 5.1.4. +func defaultPath(path string) string { + if len(path) == 0 || path[0] != '/' { + return "/" // Path is empty or malformed. + } + + i := strings.LastIndex(path, "/") // Path starts with "/", so i != -1. + if i == 0 { + return "/" // Path has the form "/abc". + } + return path[:i] // Path is either of form "/abc/xyz" or "/abc/xyz/". +} + +// newEntry creates an entry from a http.Cookie c. now is the current time and +// is compared to c.Expires to determine deletion of c. defPath and host are the +// default-path and the canonical host name of the URL c was received from. +// +// remove records whether the jar should delete this cookie, as it has already +// expired with respect to now. In this case, e may be incomplete, but it will +// be valid to call e.id (which depends on e's Name, Domain and Path). +// +// A malformed c.Domain will result in an error. +func (j *Jar) newEntry(c *http.Cookie, now time.Time, defPath, host string) (e entry, remove bool, err error) { + e.Name = c.Name + + if c.Path == "" || c.Path[0] != '/' { + e.Path = defPath + } else { + e.Path = c.Path + } + + e.Domain, e.HostOnly, err = j.domainAndType(host, c.Domain) + if err != nil { + return e, false, err + } + + // MaxAge takes precedence over Expires. + if c.MaxAge < 0 { + return e, true, nil + } else if c.MaxAge > 0 { + e.Expires = now.Add(time.Duration(c.MaxAge) * time.Second) + e.Persistent = true + } else { + if c.Expires.IsZero() { + e.Expires = endOfTime + e.Persistent = false + } else { + if !c.Expires.After(now) { + return e, true, nil + } + e.Expires = c.Expires + e.Persistent = true + } + } + + e.Value = c.Value + e.Secure = c.Secure + e.HttpOnly = c.HttpOnly + + return e, false, nil +} + +var ( + errIllegalDomain = errors.New("cookiejar: illegal cookie domain attribute") + errMalformedDomain = errors.New("cookiejar: malformed cookie domain attribute") + errNoHostname = errors.New("cookiejar: no host name available (IP only)") +) + +// endOfTime is the time when session (non-persistent) cookies expire. +// This instant is representable in most date/time formats (not just +// Go's time.Time) and should be far enough in the future. +var endOfTime = time.Date(9999, 12, 31, 23, 59, 59, 0, time.UTC) + +// domainAndType determines the cookie's domain and hostOnly attribute. +func (j *Jar) domainAndType(host, domain string) (string, bool, error) { + if domain == "" { + // No domain attribute in the SetCookie header indicates a + // host cookie. + return host, true, nil + } + + if isIP(host) { + // According to RFC 6265 domain-matching includes not being + // an IP address. + // TODO: This might be relaxed as in common browsers. + return "", false, errNoHostname + } + + // From here on: If the cookie is valid, it is a domain cookie (with + // the one exception of a public suffix below). + // See RFC 6265 section 5.2.3. + if domain[0] == '.' { + domain = domain[1:] + } + + if len(domain) == 0 || domain[0] == '.' { + // Received either "Domain=." or "Domain=..some.thing", + // both are illegal. + return "", false, errMalformedDomain + } + domain = strings.ToLower(domain) + + if domain[len(domain)-1] == '.' { + // We received stuff like "Domain=www.example.com.". + // Browsers do handle such stuff (actually differently) but + // RFC 6265 seems to be clear here (e.g. section 4.1.2.3) in + // requiring a reject. 4.1.2.3 is not normative, but + // "Domain Matching" (5.1.3) and "Canonicalized Host Names" + // (5.1.2) are. + return "", false, errMalformedDomain + } + + // See RFC 6265 section 5.3 #5. + if j.psList != nil { + if ps := j.psList.PublicSuffix(domain); ps != "" && !hasDotSuffix(domain, ps) { + if host == domain { + // This is the one exception in which a cookie + // with a domain attribute is a host cookie. + return host, true, nil + } + return "", false, errIllegalDomain + } + } + + // The domain must domain-match host: www.mycompany.com cannot + // set cookies for .ourcompetitors.com. + if host != domain && !hasDotSuffix(host, domain) { + return "", false, errIllegalDomain + } + + return domain, false, nil +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/jar_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1267 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cookiejar + +import ( + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "testing" + "time" +) + +// tNow is the synthetic current time used as now during testing. +var tNow = time.Date(2013, 1, 1, 12, 0, 0, 0, time.UTC) + +// testPSL implements PublicSuffixList with just two rules: "co.uk" +// and the default rule "*". +type testPSL struct{} + +func (testPSL) String() string { + return "testPSL" +} +func (testPSL) PublicSuffix(d string) string { + if d == "co.uk" || strings.HasSuffix(d, ".co.uk") { + return "co.uk" + } + return d[strings.LastIndex(d, ".")+1:] +} + +// newTestJar creates an empty Jar with testPSL as the public suffix list. +func newTestJar() *Jar { + jar, err := New(&Options{PublicSuffixList: testPSL{}}) + if err != nil { + panic(err) + } + return jar +} + +var hasDotSuffixTests = [...]struct { + s, suffix string +}{ + {"", ""}, + {"", "."}, + {"", "x"}, + {".", ""}, + {".", "."}, + {".", ".."}, + {".", "x"}, + {".", "x."}, + {".", ".x"}, + {".", ".x."}, + {"x", ""}, + {"x", "."}, + {"x", ".."}, + {"x", "x"}, + {"x", "x."}, + {"x", ".x"}, + {"x", ".x."}, + {".x", ""}, + {".x", "."}, + {".x", ".."}, + {".x", "x"}, + {".x", "x."}, + {".x", ".x"}, + {".x", ".x."}, + {"x.", ""}, + {"x.", "."}, + {"x.", ".."}, + {"x.", "x"}, + {"x.", "x."}, + {"x.", ".x"}, + {"x.", ".x."}, + {"com", ""}, + {"com", "m"}, + {"com", "om"}, + {"com", "com"}, + {"com", ".com"}, + {"com", "x.com"}, + {"com", "xcom"}, + {"com", "xorg"}, + {"com", "org"}, + {"com", "rg"}, + {"foo.com", ""}, + {"foo.com", "m"}, + {"foo.com", "om"}, + {"foo.com", "com"}, + {"foo.com", ".com"}, + {"foo.com", "o.com"}, + {"foo.com", "oo.com"}, + {"foo.com", "foo.com"}, + {"foo.com", ".foo.com"}, + {"foo.com", "x.foo.com"}, + {"foo.com", "xfoo.com"}, + {"foo.com", "xfoo.org"}, + {"foo.com", "foo.org"}, + {"foo.com", "oo.org"}, + {"foo.com", "o.org"}, + {"foo.com", ".org"}, + {"foo.com", "org"}, + {"foo.com", "rg"}, +} + +func TestHasDotSuffix(t *testing.T) { + for _, tc := range hasDotSuffixTests { + got := hasDotSuffix(tc.s, tc.suffix) + want := strings.HasSuffix(tc.s, "."+tc.suffix) + if got != want { + t.Errorf("s=%q, suffix=%q: got %v, want %v", tc.s, tc.suffix, got, want) + } + } +} + +var canonicalHostTests = map[string]string{ + "www.example.com": "www.example.com", + "WWW.EXAMPLE.COM": "www.example.com", + "wWw.eXAmple.CoM": "www.example.com", + "www.example.com:80": "www.example.com", + "192.168.0.10": "192.168.0.10", + "192.168.0.5:8080": "192.168.0.5", + "2001:4860:0:2001::68": "2001:4860:0:2001::68", + "[2001:4860:0:::68]:8080": "2001:4860:0:::68", + "www.bücher.de": "www.xn--bcher-kva.de", + "www.example.com.": "www.example.com", + "[bad.unmatched.bracket:": "error", +} + +func TestCanonicalHost(t *testing.T) { + for h, want := range canonicalHostTests { + got, err := canonicalHost(h) + if want == "error" { + if err == nil { + t.Errorf("%q: got nil error, want non-nil", h) + } + continue + } + if err != nil { + t.Errorf("%q: %v", h, err) + continue + } + if got != want { + t.Errorf("%q: got %q, want %q", h, got, want) + continue + } + } +} + +var hasPortTests = map[string]bool{ + "www.example.com": false, + "www.example.com:80": true, + "127.0.0.1": false, + "127.0.0.1:8080": true, + "2001:4860:0:2001::68": false, + "[2001::0:::68]:80": true, +} + +func TestHasPort(t *testing.T) { + for host, want := range hasPortTests { + if got := hasPort(host); got != want { + t.Errorf("%q: got %t, want %t", host, got, want) + } + } +} + +var jarKeyTests = map[string]string{ + "foo.www.example.com": "example.com", + "www.example.com": "example.com", + "example.com": "example.com", + "com": "com", + "foo.www.bbc.co.uk": "bbc.co.uk", + "www.bbc.co.uk": "bbc.co.uk", + "bbc.co.uk": "bbc.co.uk", + "co.uk": "co.uk", + "uk": "uk", + "192.168.0.5": "192.168.0.5", +} + +func TestJarKey(t *testing.T) { + for host, want := range jarKeyTests { + if got := jarKey(host, testPSL{}); got != want { + t.Errorf("%q: got %q, want %q", host, got, want) + } + } +} + +var jarKeyNilPSLTests = map[string]string{ + "foo.www.example.com": "example.com", + "www.example.com": "example.com", + "example.com": "example.com", + "com": "com", + "foo.www.bbc.co.uk": "co.uk", + "www.bbc.co.uk": "co.uk", + "bbc.co.uk": "co.uk", + "co.uk": "co.uk", + "uk": "uk", + "192.168.0.5": "192.168.0.5", +} + +func TestJarKeyNilPSL(t *testing.T) { + for host, want := range jarKeyNilPSLTests { + if got := jarKey(host, nil); got != want { + t.Errorf("%q: got %q, want %q", host, got, want) + } + } +} + +var isIPTests = map[string]bool{ + "127.0.0.1": true, + "1.2.3.4": true, + "2001:4860:0:2001::68": true, + "example.com": false, + "1.1.1.300": false, + "www.foo.bar.net": false, + "123.foo.bar.net": false, +} + +func TestIsIP(t *testing.T) { + for host, want := range isIPTests { + if got := isIP(host); got != want { + t.Errorf("%q: got %t, want %t", host, got, want) + } + } +} + +var defaultPathTests = map[string]string{ + "/": "/", + "/abc": "/", + "/abc/": "/abc", + "/abc/xyz": "/abc", + "/abc/xyz/": "/abc/xyz", + "/a/b/c.html": "/a/b", + "": "/", + "strange": "/", + "//": "/", + "/a//b": "/a/", + "/a/./b": "/a/.", + "/a/../b": "/a/..", +} + +func TestDefaultPath(t *testing.T) { + for path, want := range defaultPathTests { + if got := defaultPath(path); got != want { + t.Errorf("%q: got %q, want %q", path, got, want) + } + } +} + +var domainAndTypeTests = [...]struct { + host string // host Set-Cookie header was received from + domain string // domain attribute in Set-Cookie header + wantDomain string // expected domain of cookie + wantHostOnly bool // expected host-cookie flag + wantErr error // expected error +}{ + {"www.example.com", "", "www.example.com", true, nil}, + {"127.0.0.1", "", "127.0.0.1", true, nil}, + {"2001:4860:0:2001::68", "", "2001:4860:0:2001::68", true, nil}, + {"www.example.com", "example.com", "example.com", false, nil}, + {"www.example.com", ".example.com", "example.com", false, nil}, + {"www.example.com", "www.example.com", "www.example.com", false, nil}, + {"www.example.com", ".www.example.com", "www.example.com", false, nil}, + {"foo.sso.example.com", "sso.example.com", "sso.example.com", false, nil}, + {"bar.co.uk", "bar.co.uk", "bar.co.uk", false, nil}, + {"foo.bar.co.uk", ".bar.co.uk", "bar.co.uk", false, nil}, + {"127.0.0.1", "127.0.0.1", "", false, errNoHostname}, + {"2001:4860:0:2001::68", "2001:4860:0:2001::68", "2001:4860:0:2001::68", false, errNoHostname}, + {"www.example.com", ".", "", false, errMalformedDomain}, + {"www.example.com", "..", "", false, errMalformedDomain}, + {"www.example.com", "other.com", "", false, errIllegalDomain}, + {"www.example.com", "com", "", false, errIllegalDomain}, + {"www.example.com", ".com", "", false, errIllegalDomain}, + {"foo.bar.co.uk", ".co.uk", "", false, errIllegalDomain}, + {"127.www.0.0.1", "127.0.0.1", "", false, errIllegalDomain}, + {"com", "", "com", true, nil}, + {"com", "com", "com", true, nil}, + {"com", ".com", "com", true, nil}, + {"co.uk", "", "co.uk", true, nil}, + {"co.uk", "co.uk", "co.uk", true, nil}, + {"co.uk", ".co.uk", "co.uk", true, nil}, +} + +func TestDomainAndType(t *testing.T) { + jar := newTestJar() + for _, tc := range domainAndTypeTests { + domain, hostOnly, err := jar.domainAndType(tc.host, tc.domain) + if err != tc.wantErr { + t.Errorf("%q/%q: got %q error, want %q", + tc.host, tc.domain, err, tc.wantErr) + continue + } + if err != nil { + continue + } + if domain != tc.wantDomain || hostOnly != tc.wantHostOnly { + t.Errorf("%q/%q: got %q/%t want %q/%t", + tc.host, tc.domain, domain, hostOnly, + tc.wantDomain, tc.wantHostOnly) + } + } +} + +// expiresIn creates an expires attribute delta seconds from tNow. +func expiresIn(delta int) string { + t := tNow.Add(time.Duration(delta) * time.Second) + return "expires=" + t.Format(time.RFC1123) +} + +// mustParseURL parses s to an URL and panics on error. +func mustParseURL(s string) *url.URL { + u, err := url.Parse(s) + if err != nil || u.Scheme == "" || u.Host == "" { + panic(fmt.Sprintf("Unable to parse URL %s.", s)) + } + return u +} + +// jarTest encapsulates the following actions on a jar: +// 1. Perform SetCookies with fromURL and the cookies from setCookies. +// (Done at time tNow + 0 ms.) +// 2. Check that the entries in the jar matches content. +// (Done at time tNow + 1001 ms.) +// 3. For each query in tests: Check that Cookies with toURL yields the +// cookies in want. +// (Query n done at tNow + (n+2)*1001 ms.) +type jarTest struct { + description string // The description of what this test is supposed to test + fromURL string // The full URL of the request from which Set-Cookie headers where received + setCookies []string // All the cookies received from fromURL + content string // The whole (non-expired) content of the jar + queries []query // Queries to test the Jar.Cookies method +} + +// query contains one test of the cookies returned from Jar.Cookies. +type query struct { + toURL string // the URL in the Cookies call + want string // the expected list of cookies (order matters) +} + +// run runs the jarTest. +func (test jarTest) run(t *testing.T, jar *Jar) { + now := tNow + + // Populate jar with cookies. + setCookies := make([]*http.Cookie, len(test.setCookies)) + for i, cs := range test.setCookies { + cookies := (&http.Response{Header: http.Header{"Set-Cookie": {cs}}}).Cookies() + if len(cookies) != 1 { + panic(fmt.Sprintf("Wrong cookie line %q: %#v", cs, cookies)) + } + setCookies[i] = cookies[0] + } + jar.setCookies(mustParseURL(test.fromURL), setCookies, now) + now = now.Add(1001 * time.Millisecond) + + // Serialize non-expired entries in the form "name1=val1 name2=val2". + var cs []string + for _, submap := range jar.entries { + for _, cookie := range submap { + if !cookie.Expires.After(now) { + continue + } + cs = append(cs, cookie.Name+"="+cookie.Value) + } + } + sort.Strings(cs) + got := strings.Join(cs, " ") + + // Make sure jar content matches our expectations. + if got != test.content { + t.Errorf("Test %q Content\ngot %q\nwant %q", + test.description, got, test.content) + } + + // Test different calls to Cookies. + for i, query := range test.queries { + now = now.Add(1001 * time.Millisecond) + var s []string + for _, c := range jar.cookies(mustParseURL(query.toURL), now) { + s = append(s, c.Name+"="+c.Value) + } + if got := strings.Join(s, " "); got != query.want { + t.Errorf("Test %q #%d\ngot %q\nwant %q", test.description, i, got, query.want) + } + } +} + +// basicsTests contains fundamental tests. Each jarTest has to be performed on +// a fresh, empty Jar. +var basicsTests = [...]jarTest{ + { + "Retrieval of a plain host cookie.", + "http://www.host.test/", + []string{"A=a"}, + "A=a", + []query{ + {"http://www.host.test", "A=a"}, + {"http://www.host.test/", "A=a"}, + {"http://www.host.test/some/path", "A=a"}, + {"https://www.host.test", "A=a"}, + {"https://www.host.test/", "A=a"}, + {"https://www.host.test/some/path", "A=a"}, + {"ftp://www.host.test", ""}, + {"ftp://www.host.test/", ""}, + {"ftp://www.host.test/some/path", ""}, + {"http://www.other.org", ""}, + {"http://sibling.host.test", ""}, + {"http://deep.www.host.test", ""}, + }, + }, + { + "Secure cookies are not returned to http.", + "http://www.host.test/", + []string{"A=a; secure"}, + "A=a", + []query{ + {"http://www.host.test", ""}, + {"http://www.host.test/", ""}, + {"http://www.host.test/some/path", ""}, + {"https://www.host.test", "A=a"}, + {"https://www.host.test/", "A=a"}, + {"https://www.host.test/some/path", "A=a"}, + }, + }, + { + "Explicit path.", + "http://www.host.test/", + []string{"A=a; path=/some/path"}, + "A=a", + []query{ + {"http://www.host.test", ""}, + {"http://www.host.test/", ""}, + {"http://www.host.test/some", ""}, + {"http://www.host.test/some/", ""}, + {"http://www.host.test/some/path", "A=a"}, + {"http://www.host.test/some/paths", ""}, + {"http://www.host.test/some/path/foo", "A=a"}, + {"http://www.host.test/some/path/foo/", "A=a"}, + }, + }, + { + "Implicit path #1: path is a directory.", + "http://www.host.test/some/path/", + []string{"A=a"}, + "A=a", + []query{ + {"http://www.host.test", ""}, + {"http://www.host.test/", ""}, + {"http://www.host.test/some", ""}, + {"http://www.host.test/some/", ""}, + {"http://www.host.test/some/path", "A=a"}, + {"http://www.host.test/some/paths", ""}, + {"http://www.host.test/some/path/foo", "A=a"}, + {"http://www.host.test/some/path/foo/", "A=a"}, + }, + }, + { + "Implicit path #2: path is not a directory.", + "http://www.host.test/some/path/index.html", + []string{"A=a"}, + "A=a", + []query{ + {"http://www.host.test", ""}, + {"http://www.host.test/", ""}, + {"http://www.host.test/some", ""}, + {"http://www.host.test/some/", ""}, + {"http://www.host.test/some/path", "A=a"}, + {"http://www.host.test/some/paths", ""}, + {"http://www.host.test/some/path/foo", "A=a"}, + {"http://www.host.test/some/path/foo/", "A=a"}, + }, + }, + { + "Implicit path #3: no path in URL at all.", + "http://www.host.test", + []string{"A=a"}, + "A=a", + []query{ + {"http://www.host.test", "A=a"}, + {"http://www.host.test/", "A=a"}, + {"http://www.host.test/some/path", "A=a"}, + }, + }, + { + "Cookies are sorted by path length.", + "http://www.host.test/", + []string{ + "A=a; path=/foo/bar", + "B=b; path=/foo/bar/baz/qux", + "C=c; path=/foo/bar/baz", + "D=d; path=/foo"}, + "A=a B=b C=c D=d", + []query{ + {"http://www.host.test/foo/bar/baz/qux", "B=b C=c A=a D=d"}, + {"http://www.host.test/foo/bar/baz/", "C=c A=a D=d"}, + {"http://www.host.test/foo/bar", "A=a D=d"}, + }, + }, + { + "Creation time determines sorting on same length paths.", + "http://www.host.test/", + []string{ + "A=a; path=/foo/bar", + "X=x; path=/foo/bar", + "Y=y; path=/foo/bar/baz/qux", + "B=b; path=/foo/bar/baz/qux", + "C=c; path=/foo/bar/baz", + "W=w; path=/foo/bar/baz", + "Z=z; path=/foo", + "D=d; path=/foo"}, + "A=a B=b C=c D=d W=w X=x Y=y Z=z", + []query{ + {"http://www.host.test/foo/bar/baz/qux", "Y=y B=b C=c W=w A=a X=x Z=z D=d"}, + {"http://www.host.test/foo/bar/baz/", "C=c W=w A=a X=x Z=z D=d"}, + {"http://www.host.test/foo/bar", "A=a X=x Z=z D=d"}, + }, + }, + { + "Sorting of same-name cookies.", + "http://www.host.test/", + []string{ + "A=1; path=/", + "A=2; path=/path", + "A=3; path=/quux", + "A=4; path=/path/foo", + "A=5; domain=.host.test; path=/path", + "A=6; domain=.host.test; path=/quux", + "A=7; domain=.host.test; path=/path/foo", + }, + "A=1 A=2 A=3 A=4 A=5 A=6 A=7", + []query{ + {"http://www.host.test/path", "A=2 A=5 A=1"}, + {"http://www.host.test/path/foo", "A=4 A=7 A=2 A=5 A=1"}, + }, + }, + { + "Disallow domain cookie on public suffix.", + "http://www.bbc.co.uk", + []string{ + "a=1", + "b=2; domain=co.uk", + }, + "a=1", + []query{{"http://www.bbc.co.uk", "a=1"}}, + }, + { + "Host cookie on IP.", + "http://192.168.0.10", + []string{"a=1"}, + "a=1", + []query{{"http://192.168.0.10", "a=1"}}, + }, + { + "Port is ignored #1.", + "http://www.host.test/", + []string{"a=1"}, + "a=1", + []query{ + {"http://www.host.test", "a=1"}, + {"http://www.host.test:8080/", "a=1"}, + }, + }, + { + "Port is ignored #2.", + "http://www.host.test:8080/", + []string{"a=1"}, + "a=1", + []query{ + {"http://www.host.test", "a=1"}, + {"http://www.host.test:8080/", "a=1"}, + {"http://www.host.test:1234/", "a=1"}, + }, + }, +} + +func TestBasics(t *testing.T) { + for _, test := range basicsTests { + jar := newTestJar() + test.run(t, jar) + } +} + +// updateAndDeleteTests contains jarTests which must be performed on the same +// Jar. +var updateAndDeleteTests = [...]jarTest{ + { + "Set initial cookies.", + "http://www.host.test", + []string{ + "a=1", + "b=2; secure", + "c=3; httponly", + "d=4; secure; httponly"}, + "a=1 b=2 c=3 d=4", + []query{ + {"http://www.host.test", "a=1 c=3"}, + {"https://www.host.test", "a=1 b=2 c=3 d=4"}, + }, + }, + { + "Update value via http.", + "http://www.host.test", + []string{ + "a=w", + "b=x; secure", + "c=y; httponly", + "d=z; secure; httponly"}, + "a=w b=x c=y d=z", + []query{ + {"http://www.host.test", "a=w c=y"}, + {"https://www.host.test", "a=w b=x c=y d=z"}, + }, + }, + { + "Clear Secure flag from a http.", + "http://www.host.test/", + []string{ + "b=xx", + "d=zz; httponly"}, + "a=w b=xx c=y d=zz", + []query{{"http://www.host.test", "a=w b=xx c=y d=zz"}}, + }, + { + "Delete all.", + "http://www.host.test/", + []string{ + "a=1; max-Age=-1", // delete via MaxAge + "b=2; " + expiresIn(-10), // delete via Expires + "c=2; max-age=-1; " + expiresIn(-10), // delete via both + "d=4; max-age=-1; " + expiresIn(10)}, // MaxAge takes precedence + "", + []query{{"http://www.host.test", ""}}, + }, + { + "Refill #1.", + "http://www.host.test", + []string{ + "A=1", + "A=2; path=/foo", + "A=3; domain=.host.test", + "A=4; path=/foo; domain=.host.test"}, + "A=1 A=2 A=3 A=4", + []query{{"http://www.host.test/foo", "A=2 A=4 A=1 A=3"}}, + }, + { + "Refill #2.", + "http://www.google.com", + []string{ + "A=6", + "A=7; path=/foo", + "A=8; domain=.google.com", + "A=9; path=/foo; domain=.google.com"}, + "A=1 A=2 A=3 A=4 A=6 A=7 A=8 A=9", + []query{ + {"http://www.host.test/foo", "A=2 A=4 A=1 A=3"}, + {"http://www.google.com/foo", "A=7 A=9 A=6 A=8"}, + }, + }, + { + "Delete A7.", + "http://www.google.com", + []string{"A=; path=/foo; max-age=-1"}, + "A=1 A=2 A=3 A=4 A=6 A=8 A=9", + []query{ + {"http://www.host.test/foo", "A=2 A=4 A=1 A=3"}, + {"http://www.google.com/foo", "A=9 A=6 A=8"}, + }, + }, + { + "Delete A4.", + "http://www.host.test", + []string{"A=; path=/foo; domain=host.test; max-age=-1"}, + "A=1 A=2 A=3 A=6 A=8 A=9", + []query{ + {"http://www.host.test/foo", "A=2 A=1 A=3"}, + {"http://www.google.com/foo", "A=9 A=6 A=8"}, + }, + }, + { + "Delete A6.", + "http://www.google.com", + []string{"A=; max-age=-1"}, + "A=1 A=2 A=3 A=8 A=9", + []query{ + {"http://www.host.test/foo", "A=2 A=1 A=3"}, + {"http://www.google.com/foo", "A=9 A=8"}, + }, + }, + { + "Delete A3.", + "http://www.host.test", + []string{"A=; domain=host.test; max-age=-1"}, + "A=1 A=2 A=8 A=9", + []query{ + {"http://www.host.test/foo", "A=2 A=1"}, + {"http://www.google.com/foo", "A=9 A=8"}, + }, + }, + { + "No cross-domain delete.", + "http://www.host.test", + []string{ + "A=; domain=google.com; max-age=-1", + "A=; path=/foo; domain=google.com; max-age=-1"}, + "A=1 A=2 A=8 A=9", + []query{ + {"http://www.host.test/foo", "A=2 A=1"}, + {"http://www.google.com/foo", "A=9 A=8"}, + }, + }, + { + "Delete A8 and A9.", + "http://www.google.com", + []string{ + "A=; domain=google.com; max-age=-1", + "A=; path=/foo; domain=google.com; max-age=-1"}, + "A=1 A=2", + []query{ + {"http://www.host.test/foo", "A=2 A=1"}, + {"http://www.google.com/foo", ""}, + }, + }, +} + +func TestUpdateAndDelete(t *testing.T) { + jar := newTestJar() + for _, test := range updateAndDeleteTests { + test.run(t, jar) + } +} + +func TestExpiration(t *testing.T) { + jar := newTestJar() + jarTest{ + "Expiration.", + "http://www.host.test", + []string{ + "a=1", + "b=2; max-age=3", + "c=3; " + expiresIn(3), + "d=4; max-age=5", + "e=5; " + expiresIn(5), + "f=6; max-age=100", + }, + "a=1 b=2 c=3 d=4 e=5 f=6", // executed at t0 + 1001 ms + []query{ + {"http://www.host.test", "a=1 b=2 c=3 d=4 e=5 f=6"}, // t0 + 2002 ms + {"http://www.host.test", "a=1 d=4 e=5 f=6"}, // t0 + 3003 ms + {"http://www.host.test", "a=1 d=4 e=5 f=6"}, // t0 + 4004 ms + {"http://www.host.test", "a=1 f=6"}, // t0 + 5005 ms + {"http://www.host.test", "a=1 f=6"}, // t0 + 6006 ms + }, + }.run(t, jar) +} + +// +// Tests derived from Chromium's cookie_store_unittest.h. +// + +// See http://src.chromium.org/viewvc/chrome/trunk/src/net/cookies/cookie_store_unittest.h?revision=159685&content-type=text/plain +// Some of the original tests are in a bad condition (e.g. +// DomainWithTrailingDotTest) or are not RFC 6265 conforming (e.g. +// TestNonDottedAndTLD #1 and #6) and have not been ported. + +// chromiumBasicsTests contains fundamental tests. Each jarTest has to be +// performed on a fresh, empty Jar. +var chromiumBasicsTests = [...]jarTest{ + { + "DomainWithTrailingDotTest.", + "http://www.google.com/", + []string{ + "a=1; domain=.www.google.com.", + "b=2; domain=.www.google.com.."}, + "", + []query{ + {"http://www.google.com", ""}, + }, + }, + { + "ValidSubdomainTest #1.", + "http://a.b.c.d.com", + []string{ + "a=1; domain=.a.b.c.d.com", + "b=2; domain=.b.c.d.com", + "c=3; domain=.c.d.com", + "d=4; domain=.d.com"}, + "a=1 b=2 c=3 d=4", + []query{ + {"http://a.b.c.d.com", "a=1 b=2 c=3 d=4"}, + {"http://b.c.d.com", "b=2 c=3 d=4"}, + {"http://c.d.com", "c=3 d=4"}, + {"http://d.com", "d=4"}, + }, + }, + { + "ValidSubdomainTest #2.", + "http://a.b.c.d.com", + []string{ + "a=1; domain=.a.b.c.d.com", + "b=2; domain=.b.c.d.com", + "c=3; domain=.c.d.com", + "d=4; domain=.d.com", + "X=bcd; domain=.b.c.d.com", + "X=cd; domain=.c.d.com"}, + "X=bcd X=cd a=1 b=2 c=3 d=4", + []query{ + {"http://b.c.d.com", "b=2 c=3 d=4 X=bcd X=cd"}, + {"http://c.d.com", "c=3 d=4 X=cd"}, + }, + }, + { + "InvalidDomainTest #1.", + "http://foo.bar.com", + []string{ + "a=1; domain=.yo.foo.bar.com", + "b=2; domain=.foo.com", + "c=3; domain=.bar.foo.com", + "d=4; domain=.foo.bar.com.net", + "e=5; domain=ar.com", + "f=6; domain=.", + "g=7; domain=/", + "h=8; domain=http://foo.bar.com", + "i=9; domain=..foo.bar.com", + "j=10; domain=..bar.com", + "k=11; domain=.foo.bar.com?blah", + "l=12; domain=.foo.bar.com/blah", + "m=12; domain=.foo.bar.com:80", + "n=14; domain=.foo.bar.com:", + "o=15; domain=.foo.bar.com#sup", + }, + "", // Jar is empty. + []query{{"http://foo.bar.com", ""}}, + }, + { + "InvalidDomainTest #2.", + "http://foo.com.com", + []string{"a=1; domain=.foo.com.com.com"}, + "", + []query{{"http://foo.bar.com", ""}}, + }, + { + "DomainWithoutLeadingDotTest #1.", + "http://manage.hosted.filefront.com", + []string{"a=1; domain=filefront.com"}, + "a=1", + []query{{"http://www.filefront.com", "a=1"}}, + }, + { + "DomainWithoutLeadingDotTest #2.", + "http://www.google.com", + []string{"a=1; domain=www.google.com"}, + "a=1", + []query{ + {"http://www.google.com", "a=1"}, + {"http://sub.www.google.com", "a=1"}, + {"http://something-else.com", ""}, + }, + }, + { + "CaseInsensitiveDomainTest.", + "http://www.google.com", + []string{ + "a=1; domain=.GOOGLE.COM", + "b=2; domain=.www.gOOgLE.coM"}, + "a=1 b=2", + []query{{"http://www.google.com", "a=1 b=2"}}, + }, + { + "TestIpAddress #1.", + "http://1.2.3.4/foo", + []string{"a=1; path=/"}, + "a=1", + []query{{"http://1.2.3.4/foo", "a=1"}}, + }, + { + "TestIpAddress #2.", + "http://1.2.3.4/foo", + []string{ + "a=1; domain=.1.2.3.4", + "b=2; domain=.3.4"}, + "", + []query{{"http://1.2.3.4/foo", ""}}, + }, + { + "TestIpAddress #3.", + "http://1.2.3.4/foo", + []string{"a=1; domain=1.2.3.4"}, + "", + []query{{"http://1.2.3.4/foo", ""}}, + }, + { + "TestNonDottedAndTLD #2.", + "http://com./index.html", + []string{"a=1"}, + "a=1", + []query{ + {"http://com./index.html", "a=1"}, + {"http://no-cookies.com./index.html", ""}, + }, + }, + { + "TestNonDottedAndTLD #3.", + "http://a.b", + []string{ + "a=1; domain=.b", + "b=2; domain=b"}, + "", + []query{{"http://bar.foo", ""}}, + }, + { + "TestNonDottedAndTLD #4.", + "http://google.com", + []string{ + "a=1; domain=.com", + "b=2; domain=com"}, + "", + []query{{"http://google.com", ""}}, + }, + { + "TestNonDottedAndTLD #5.", + "http://google.co.uk", + []string{ + "a=1; domain=.co.uk", + "b=2; domain=.uk"}, + "", + []query{ + {"http://google.co.uk", ""}, + {"http://else.co.com", ""}, + {"http://else.uk", ""}, + }, + }, + { + "TestHostEndsWithDot.", + "http://www.google.com", + []string{ + "a=1", + "b=2; domain=.www.google.com."}, + "a=1", + []query{{"http://www.google.com", "a=1"}}, + }, + { + "PathTest", + "http://www.google.izzle", + []string{"a=1; path=/wee"}, + "a=1", + []query{ + {"http://www.google.izzle/wee", "a=1"}, + {"http://www.google.izzle/wee/", "a=1"}, + {"http://www.google.izzle/wee/war", "a=1"}, + {"http://www.google.izzle/wee/war/more/more", "a=1"}, + {"http://www.google.izzle/weehee", ""}, + {"http://www.google.izzle/", ""}, + }, + }, +} + +func TestChromiumBasics(t *testing.T) { + for _, test := range chromiumBasicsTests { + jar := newTestJar() + test.run(t, jar) + } +} + +// chromiumDomainTests contains jarTests which must be executed all on the +// same Jar. +var chromiumDomainTests = [...]jarTest{ + { + "Fill #1.", + "http://www.google.izzle", + []string{"A=B"}, + "A=B", + []query{{"http://www.google.izzle", "A=B"}}, + }, + { + "Fill #2.", + "http://www.google.izzle", + []string{"C=D; domain=.google.izzle"}, + "A=B C=D", + []query{{"http://www.google.izzle", "A=B C=D"}}, + }, + { + "Verify A is a host cookie and not accessible from subdomain.", + "http://unused.nil", + []string{}, + "A=B C=D", + []query{{"http://foo.www.google.izzle", "C=D"}}, + }, + { + "Verify domain cookies are found on proper domain.", + "http://www.google.izzle", + []string{"E=F; domain=.www.google.izzle"}, + "A=B C=D E=F", + []query{{"http://www.google.izzle", "A=B C=D E=F"}}, + }, + { + "Leading dots in domain attributes are optional.", + "http://www.google.izzle", + []string{"G=H; domain=www.google.izzle"}, + "A=B C=D E=F G=H", + []query{{"http://www.google.izzle", "A=B C=D E=F G=H"}}, + }, + { + "Verify domain enforcement works #1.", + "http://www.google.izzle", + []string{"K=L; domain=.bar.www.google.izzle"}, + "A=B C=D E=F G=H", + []query{{"http://bar.www.google.izzle", "C=D E=F G=H"}}, + }, + { + "Verify domain enforcement works #2.", + "http://unused.nil", + []string{}, + "A=B C=D E=F G=H", + []query{{"http://www.google.izzle", "A=B C=D E=F G=H"}}, + }, +} + +func TestChromiumDomain(t *testing.T) { + jar := newTestJar() + for _, test := range chromiumDomainTests { + test.run(t, jar) + } + +} + +// chromiumDeletionTests must be performed all on the same Jar. +var chromiumDeletionTests = [...]jarTest{ + { + "Create session cookie a1.", + "http://www.google.com", + []string{"a=1"}, + "a=1", + []query{{"http://www.google.com", "a=1"}}, + }, + { + "Delete sc a1 via MaxAge.", + "http://www.google.com", + []string{"a=1; max-age=-1"}, + "", + []query{{"http://www.google.com", ""}}, + }, + { + "Create session cookie b2.", + "http://www.google.com", + []string{"b=2"}, + "b=2", + []query{{"http://www.google.com", "b=2"}}, + }, + { + "Delete sc b2 via Expires.", + "http://www.google.com", + []string{"b=2; " + expiresIn(-10)}, + "", + []query{{"http://www.google.com", ""}}, + }, + { + "Create persistent cookie c3.", + "http://www.google.com", + []string{"c=3; max-age=3600"}, + "c=3", + []query{{"http://www.google.com", "c=3"}}, + }, + { + "Delete pc c3 via MaxAge.", + "http://www.google.com", + []string{"c=3; max-age=-1"}, + "", + []query{{"http://www.google.com", ""}}, + }, + { + "Create persistent cookie d4.", + "http://www.google.com", + []string{"d=4; max-age=3600"}, + "d=4", + []query{{"http://www.google.com", "d=4"}}, + }, + { + "Delete pc d4 via Expires.", + "http://www.google.com", + []string{"d=4; " + expiresIn(-10)}, + "", + []query{{"http://www.google.com", ""}}, + }, +} + +func TestChromiumDeletion(t *testing.T) { + jar := newTestJar() + for _, test := range chromiumDeletionTests { + test.run(t, jar) + } +} + +// domainHandlingTests tests and documents the rules for domain handling. +// Each test must be performed on an empty new Jar. +var domainHandlingTests = [...]jarTest{ + { + "Host cookie", + "http://www.host.test", + []string{"a=1"}, + "a=1", + []query{ + {"http://www.host.test", "a=1"}, + {"http://host.test", ""}, + {"http://bar.host.test", ""}, + {"http://foo.www.host.test", ""}, + {"http://other.test", ""}, + {"http://test", ""}, + }, + }, + { + "Domain cookie #1", + "http://www.host.test", + []string{"a=1; domain=host.test"}, + "a=1", + []query{ + {"http://www.host.test", "a=1"}, + {"http://host.test", "a=1"}, + {"http://bar.host.test", "a=1"}, + {"http://foo.www.host.test", "a=1"}, + {"http://other.test", ""}, + {"http://test", ""}, + }, + }, + { + "Domain cookie #2", + "http://www.host.test", + []string{"a=1; domain=.host.test"}, + "a=1", + []query{ + {"http://www.host.test", "a=1"}, + {"http://host.test", "a=1"}, + {"http://bar.host.test", "a=1"}, + {"http://foo.www.host.test", "a=1"}, + {"http://other.test", ""}, + {"http://test", ""}, + }, + }, + { + "Host cookie on IDNA domain #1", + "http://www.bücher.test", + []string{"a=1"}, + "a=1", + []query{ + {"http://www.bücher.test", "a=1"}, + {"http://www.xn--bcher-kva.test", "a=1"}, + {"http://bücher.test", ""}, + {"http://xn--bcher-kva.test", ""}, + {"http://bar.bücher.test", ""}, + {"http://bar.xn--bcher-kva.test", ""}, + {"http://foo.www.bücher.test", ""}, + {"http://foo.www.xn--bcher-kva.test", ""}, + {"http://other.test", ""}, + {"http://test", ""}, + }, + }, + { + "Host cookie on IDNA domain #2", + "http://www.xn--bcher-kva.test", + []string{"a=1"}, + "a=1", + []query{ + {"http://www.bücher.test", "a=1"}, + {"http://www.xn--bcher-kva.test", "a=1"}, + {"http://bücher.test", ""}, + {"http://xn--bcher-kva.test", ""}, + {"http://bar.bücher.test", ""}, + {"http://bar.xn--bcher-kva.test", ""}, + {"http://foo.www.bücher.test", ""}, + {"http://foo.www.xn--bcher-kva.test", ""}, + {"http://other.test", ""}, + {"http://test", ""}, + }, + }, + { + "Domain cookie on IDNA domain #1", + "http://www.bücher.test", + []string{"a=1; domain=xn--bcher-kva.test"}, + "a=1", + []query{ + {"http://www.bücher.test", "a=1"}, + {"http://www.xn--bcher-kva.test", "a=1"}, + {"http://bücher.test", "a=1"}, + {"http://xn--bcher-kva.test", "a=1"}, + {"http://bar.bücher.test", "a=1"}, + {"http://bar.xn--bcher-kva.test", "a=1"}, + {"http://foo.www.bücher.test", "a=1"}, + {"http://foo.www.xn--bcher-kva.test", "a=1"}, + {"http://other.test", ""}, + {"http://test", ""}, + }, + }, + { + "Domain cookie on IDNA domain #2", + "http://www.xn--bcher-kva.test", + []string{"a=1; domain=xn--bcher-kva.test"}, + "a=1", + []query{ + {"http://www.bücher.test", "a=1"}, + {"http://www.xn--bcher-kva.test", "a=1"}, + {"http://bücher.test", "a=1"}, + {"http://xn--bcher-kva.test", "a=1"}, + {"http://bar.bücher.test", "a=1"}, + {"http://bar.xn--bcher-kva.test", "a=1"}, + {"http://foo.www.bücher.test", "a=1"}, + {"http://foo.www.xn--bcher-kva.test", "a=1"}, + {"http://other.test", ""}, + {"http://test", ""}, + }, + }, + { + "Host cookie on TLD.", + "http://com", + []string{"a=1"}, + "a=1", + []query{ + {"http://com", "a=1"}, + {"http://any.com", ""}, + {"http://any.test", ""}, + }, + }, + { + "Domain cookie on TLD becomes a host cookie.", + "http://com", + []string{"a=1; domain=com"}, + "a=1", + []query{ + {"http://com", "a=1"}, + {"http://any.com", ""}, + {"http://any.test", ""}, + }, + }, + { + "Host cookie on public suffix.", + "http://co.uk", + []string{"a=1"}, + "a=1", + []query{ + {"http://co.uk", "a=1"}, + {"http://uk", ""}, + {"http://some.co.uk", ""}, + {"http://foo.some.co.uk", ""}, + {"http://any.uk", ""}, + }, + }, + { + "Domain cookie on public suffix is ignored.", + "http://some.co.uk", + []string{"a=1; domain=co.uk"}, + "", + []query{ + {"http://co.uk", ""}, + {"http://uk", ""}, + {"http://some.co.uk", ""}, + {"http://foo.some.co.uk", ""}, + {"http://any.uk", ""}, + }, + }, +} + +func TestDomainHandling(t *testing.T) { + for _, test := range domainHandlingTests { + jar := newTestJar() + test.run(t, jar) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,159 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cookiejar + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", fmt.Errorf("cookiejar: invalid label %q", s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", fmt.Errorf("cookiejar: invalid label %q", s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("cookiejar: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} + +// Strictly speaking, the remaining code below deals with IDNA (RFC 5890 and +// friends) and not Punycode (RFC 3492) per se. + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +// toASCII converts a domain or domain label to its ASCII form. For example, +// toASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// toASCII("golang") is "golang". +func toASCII(s string) (string, error) { + if ascii(s) { + return s, nil + } + labels := strings.Split(s, ".") + for i, label := range labels { + if !ascii(label) { + a, err := encode(acePrefix, label) + if err != nil { + return "", err + } + labels[i] = a + } + } + return strings.Join(labels, "."), nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/cookiejar/punycode_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,161 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cookiejar + +import ( + "testing" +) + +var punycodeTestCases = [...]struct { + s, encoded string +}{ + {"", ""}, + {"-", "--"}, + {"-a", "-a-"}, + {"-a-", "-a--"}, + {"a", "a-"}, + {"a-", "a--"}, + {"a-b", "a-b-"}, + {"books", "books-"}, + {"bücher", "bcher-kva"}, + {"Hello世界", "Hello-ck1hg65u"}, + {"ü", "tda"}, + {"üý", "tdac"}, + + // The test cases below come from RFC 3492 section 7.1 with Errata 3026. + { + // (A) Arabic (Egyptian). + "\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644" + + "\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F", + "egbpdaj6bu4bxfgehfvwxn", + }, + { + // (B) Chinese (simplified). + "\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587", + "ihqwcrb4cv8a8dqg056pqjye", + }, + { + // (C) Chinese (traditional). + "\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587", + "ihqwctvzc91f659drss3x8bo0yb", + }, + { + // (D) Czech. + "\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074" + + "\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D" + + "\u0065\u0073\u006B\u0079", + "Proprostnemluvesky-uyb24dma41a", + }, + { + // (E) Hebrew. + "\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8" + + "\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2" + + "\u05D1\u05E8\u05D9\u05EA", + "4dbcagdahymbxekheh6e0a7fei0b", + }, + { + // (F) Hindi (Devanagari). + "\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D" + + "\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939" + + "\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947" + + "\u0939\u0948\u0902", + "i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd", + }, + { + // (G) Japanese (kanji and hiragana). + "\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092" + + "\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B", + "n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa", + }, + { + // (H) Korean (Hangul syllables). + "\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774" + + "\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74" + + "\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C", + "989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j" + + "psd879ccm6fea98c", + }, + { + // (I) Russian (Cyrillic). + "\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E" + + "\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440" + + "\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A" + + "\u0438", + "b1abfaaepdrnnbgefbadotcwatmq2g4l", + }, + { + // (J) Spanish. + "\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070" + + "\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070" + + "\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061" + + "\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070" + + "\u0061\u00F1\u006F\u006C", + "PorqunopuedensimplementehablarenEspaol-fmd56a", + }, + { + // (K) Vietnamese. + "\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B" + + "\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068" + + "\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067" + + "\u0056\u0069\u1EC7\u0074", + "TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g", + }, + { + // (L) 3B. + "\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F", + "3B-ww4c5e180e575a65lsy2b", + }, + { + // (M) -with-SUPER-MONKEYS. + "\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074" + + "\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D" + + "\u004F\u004E\u004B\u0045\u0059\u0053", + "-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n", + }, + { + // (N) Hello-Another-Way-. + "\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F" + + "\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D" + + "\u305D\u308C\u305E\u308C\u306E\u5834\u6240", + "Hello-Another-Way--fc4qua05auwb3674vfr0b", + }, + { + // (O) 2. + "\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032", + "2-u9tlzr9756bt3uc0v", + }, + { + // (P) MajiKoi5 + "\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059" + + "\u308B\u0035\u79D2\u524D", + "MajiKoi5-783gue6qz075azm5e", + }, + { + // (Q) de + "\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0", + "de-jg4avhby1noc0d", + }, + { + // (R) + "\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067", + "d9juau41awczczp", + }, + { + // (S) -> $1.00 <- + "\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020" + + "\u003C\u002D", + "-> $1.00 <--", + }, +} + +func TestPunycode(t *testing.T) { + for _, tc := range punycodeTestCases { + if got, err := encode("", tc.s); err != nil { + t.Errorf(`encode("", %q): %v`, tc.s, err) + } else if got != tc.encoded { + t.Errorf(`encode("", %q): got %q, want %q`, tc.s, got, tc.encoded) + } + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/doc.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/doc.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/doc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,80 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package http provides HTTP client and server implementations. + +Get, Head, Post, and PostForm make HTTP (or HTTPS) requests: + + resp, err := http.Get("http://example.com/") + ... + resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf) + ... + resp, err := http.PostForm("http://example.com/form", + url.Values{"key": {"Value"}, "id": {"123"}}) + +The client must close the response body when finished with it: + + resp, err := http.Get("http://example.com/") + if err != nil { + // handle error + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + // ... + +For control over HTTP client headers, redirect policy, and other +settings, create a Client: + + client := &http.Client{ + CheckRedirect: redirectPolicyFunc, + } + + resp, err := client.Get("http://example.com") + // ... + + req, err := http.NewRequest("GET", "http://example.com", nil) + // ... + req.Header.Add("If-None-Match", `W/"wyzzy"`) + resp, err := client.Do(req) + // ... + +For control over proxies, TLS configuration, keep-alives, +compression, and other settings, create a Transport: + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{RootCAs: pool}, + DisableCompression: true, + } + client := &http.Client{Transport: tr} + resp, err := client.Get("https://example.com") + +Clients and Transports are safe for concurrent use by multiple +goroutines and for efficiency should only be created once and re-used. + +ListenAndServe starts an HTTP server with a given address and handler. +The handler is usually nil, which means to use DefaultServeMux. +Handle and HandleFunc add handlers to DefaultServeMux: + + http.Handle("/foo", fooHandler) + + http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path)) + }) + + log.Fatal(http.ListenAndServe(":8080", nil)) + +More control over the server's behavior is available by creating a +custom Server: + + s := &http.Server{ + Addr: ":8080", + Handler: myHandler, + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + MaxHeaderBytes: 1 << 20, + } + log.Fatal(s.ListenAndServe()) +*/ +package http === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/example_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/example_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/example_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,88 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http_test + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" +) + +func ExampleHijacker() { + http.HandleFunc("/hijack", func(w http.ResponseWriter, r *http.Request) { + hj, ok := w.(http.Hijacker) + if !ok { + http.Error(w, "webserver doesn't support hijacking", http.StatusInternalServerError) + return + } + conn, bufrw, err := hj.Hijack() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + // Don't forget to close the connection: + defer conn.Close() + bufrw.WriteString("Now we're speaking raw TCP. Say hi: ") + bufrw.Flush() + s, err := bufrw.ReadString('\n') + if err != nil { + log.Printf("error reading string: %v", err) + return + } + fmt.Fprintf(bufrw, "You said: %q\nBye.\n", s) + bufrw.Flush() + }) +} + +func ExampleGet() { + res, err := http.Get("http://www.google.com/robots.txt") + if err != nil { + log.Fatal(err) + } + robots, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + log.Fatal(err) + } + fmt.Printf("%s", robots) +} + +func ExampleFileServer() { + // Simple static webserver: + log.Fatal(http.ListenAndServe(":8080", http.FileServer(http.Dir("/usr/share/doc")))) +} + +func ExampleFileServer_stripPrefix() { + // To serve a directory on disk (/tmp) under an alternate URL + // path (/tmpfiles/), use StripPrefix to modify the request + // URL's path before the FileServer sees it: + http.Handle("/tmpfiles/", http.StripPrefix("/tmpfiles/", http.FileServer(http.Dir("/tmp")))) +} + +func ExampleStripPrefix() { + // To serve a directory on disk (/tmp) under an alternate URL + // path (/tmpfiles/), use StripPrefix to modify the request + // URL's path before the FileServer sees it: + http.Handle("/tmpfiles/", http.StripPrefix("/tmpfiles/", http.FileServer(http.Dir("/tmp")))) +} + +type apiHandler struct{} + +func (apiHandler) ServeHTTP(http.ResponseWriter, *http.Request) {} + +func ExampleServeMux_Handle() { + mux := http.NewServeMux() + mux.Handle("/api/", apiHandler{}) + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + // The "/" pattern matches everything, so we need to check + // that we're at the root here. + if req.URL.Path != "/" { + http.NotFound(w, req) + return + } + fmt.Fprintf(w, "Welcome to the home page!") + }) +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/export_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,72 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Bridge package to expose http internals to tests in the http_test +// package. + +package http + +import ( + "net" + "time" +) + +func NewLoggingConn(baseName string, c net.Conn) net.Conn { + return newLoggingConn(baseName, c) +} + +var ExportAppendTime = appendTime + +func (t *Transport) NumPendingRequestsForTesting() int { + t.reqMu.Lock() + defer t.reqMu.Unlock() + return len(t.reqCanceler) +} + +func (t *Transport) IdleConnKeysForTesting() (keys []string) { + keys = make([]string, 0) + t.idleMu.Lock() + defer t.idleMu.Unlock() + if t.idleConn == nil { + return + } + for key := range t.idleConn { + keys = append(keys, key.String()) + } + return +} + +func (t *Transport) IdleConnCountForTesting(cacheKey string) int { + t.idleMu.Lock() + defer t.idleMu.Unlock() + if t.idleConn == nil { + return 0 + } + for k, conns := range t.idleConn { + if k.String() == cacheKey { + return len(conns) + } + } + return 0 +} + +func (t *Transport) IdleConnChMapSizeForTesting() int { + t.idleMu.Lock() + defer t.idleMu.Unlock() + return len(t.idleConnCh) +} + +func NewTestTimeoutHandler(handler Handler, ch <-chan time.Time) Handler { + f := func() <-chan time.Time { + return ch + } + return &timeoutHandler{handler, f, ""} +} + +func ResetCachedEnvironment() { + httpProxyEnv.reset() + noProxyEnv.reset() +} + +var DefaultUserAgent = defaultUserAgent === added directory 'src/github.com/Azure/azure-sdk-for-go/core/http/fcgi' === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/child.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/child.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/child.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,305 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fcgi + +// This file implements FastCGI from the perspective of a child process. + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/cgi" + "os" + "strings" + "sync" + "time" +) + +// request holds the state for an in-progress request. As soon as it's complete, +// it's converted to an http.Request. +type request struct { + pw *io.PipeWriter + reqId uint16 + params map[string]string + buf [1024]byte + rawParams []byte + keepConn bool +} + +func newRequest(reqId uint16, flags uint8) *request { + r := &request{ + reqId: reqId, + params: map[string]string{}, + keepConn: flags&flagKeepConn != 0, + } + r.rawParams = r.buf[:0] + return r +} + +// parseParams reads an encoded []byte into Params. +func (r *request) parseParams() { + text := r.rawParams + r.rawParams = nil + for len(text) > 0 { + keyLen, n := readSize(text) + if n == 0 { + return + } + text = text[n:] + valLen, n := readSize(text) + if n == 0 { + return + } + text = text[n:] + key := readString(text, keyLen) + text = text[keyLen:] + val := readString(text, valLen) + text = text[valLen:] + r.params[key] = val + } +} + +// response implements http.ResponseWriter. +type response struct { + req *request + header http.Header + w *bufWriter + wroteHeader bool +} + +func newResponse(c *child, req *request) *response { + return &response{ + req: req, + header: http.Header{}, + w: newWriter(c.conn, typeStdout, req.reqId), + } +} + +func (r *response) Header() http.Header { + return r.header +} + +func (r *response) Write(data []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + return r.w.Write(data) +} + +func (r *response) WriteHeader(code int) { + if r.wroteHeader { + return + } + r.wroteHeader = true + if code == http.StatusNotModified { + // Must not have body. + r.header.Del("Content-Type") + r.header.Del("Content-Length") + r.header.Del("Transfer-Encoding") + } else if r.header.Get("Content-Type") == "" { + r.header.Set("Content-Type", "text/html; charset=utf-8") + } + + if r.header.Get("Date") == "" { + r.header.Set("Date", time.Now().UTC().Format(http.TimeFormat)) + } + + fmt.Fprintf(r.w, "Status: %d %s\r\n", code, http.StatusText(code)) + r.header.Write(r.w) + r.w.WriteString("\r\n") +} + +func (r *response) Flush() { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + r.w.Flush() +} + +func (r *response) Close() error { + r.Flush() + return r.w.Close() +} + +type child struct { + conn *conn + handler http.Handler + + mu sync.Mutex // protects requests: + requests map[uint16]*request // keyed by request ID +} + +func newChild(rwc io.ReadWriteCloser, handler http.Handler) *child { + return &child{ + conn: newConn(rwc), + handler: handler, + requests: make(map[uint16]*request), + } +} + +func (c *child) serve() { + defer c.conn.Close() + var rec record + for { + if err := rec.read(c.conn.rwc); err != nil { + return + } + if err := c.handleRecord(&rec); err != nil { + return + } + } +} + +var errCloseConn = errors.New("fcgi: connection should be closed") + +var emptyBody = ioutil.NopCloser(strings.NewReader("")) + +func (c *child) handleRecord(rec *record) error { + c.mu.Lock() + req, ok := c.requests[rec.h.Id] + c.mu.Unlock() + if !ok && rec.h.Type != typeBeginRequest && rec.h.Type != typeGetValues { + // The spec says to ignore unknown request IDs. + return nil + } + + switch rec.h.Type { + case typeBeginRequest: + if req != nil { + // The server is trying to begin a request with the same ID + // as an in-progress request. This is an error. + return errors.New("fcgi: received ID that is already in-flight") + } + + var br beginRequest + if err := br.read(rec.content()); err != nil { + return err + } + if br.role != roleResponder { + c.conn.writeEndRequest(rec.h.Id, 0, statusUnknownRole) + return nil + } + req = newRequest(rec.h.Id, br.flags) + c.mu.Lock() + c.requests[rec.h.Id] = req + c.mu.Unlock() + return nil + case typeParams: + // NOTE(eds): Technically a key-value pair can straddle the boundary + // between two packets. We buffer until we've received all parameters. + if len(rec.content()) > 0 { + req.rawParams = append(req.rawParams, rec.content()...) + return nil + } + req.parseParams() + return nil + case typeStdin: + content := rec.content() + if req.pw == nil { + var body io.ReadCloser + if len(content) > 0 { + // body could be an io.LimitReader, but it shouldn't matter + // as long as both sides are behaving. + body, req.pw = io.Pipe() + } else { + body = emptyBody + } + go c.serveRequest(req, body) + } + if len(content) > 0 { + // TODO(eds): This blocks until the handler reads from the pipe. + // If the handler takes a long time, it might be a problem. + req.pw.Write(content) + } else if req.pw != nil { + req.pw.Close() + } + return nil + case typeGetValues: + values := map[string]string{"FCGI_MPXS_CONNS": "1"} + c.conn.writePairs(typeGetValuesResult, 0, values) + return nil + case typeData: + // If the filter role is implemented, read the data stream here. + return nil + case typeAbortRequest: + println("abort") + c.mu.Lock() + delete(c.requests, rec.h.Id) + c.mu.Unlock() + c.conn.writeEndRequest(rec.h.Id, 0, statusRequestComplete) + if !req.keepConn { + // connection will close upon return + return errCloseConn + } + return nil + default: + b := make([]byte, 8) + b[0] = byte(rec.h.Type) + c.conn.writeRecord(typeUnknownType, 0, b) + return nil + } +} + +func (c *child) serveRequest(req *request, body io.ReadCloser) { + r := newResponse(c, req) + httpReq, err := cgi.RequestFromMap(req.params) + if err != nil { + // there was an error reading the request + r.WriteHeader(http.StatusInternalServerError) + c.conn.writeRecord(typeStderr, req.reqId, []byte(err.Error())) + } else { + httpReq.Body = body + c.handler.ServeHTTP(r, httpReq) + } + r.Close() + c.mu.Lock() + delete(c.requests, req.reqId) + c.mu.Unlock() + c.conn.writeEndRequest(req.reqId, 0, statusRequestComplete) + + // Consume the entire body, so the host isn't still writing to + // us when we close the socket below in the !keepConn case, + // otherwise we'd send a RST. (golang.org/issue/4183) + // TODO(bradfitz): also bound this copy in time. Or send + // some sort of abort request to the host, so the host + // can properly cut off the client sending all the data. + // For now just bound it a little and + io.CopyN(ioutil.Discard, body, 100<<20) + body.Close() + + if !req.keepConn { + c.conn.Close() + } +} + +// Serve accepts incoming FastCGI connections on the listener l, creating a new +// goroutine for each. The goroutine reads requests and then calls handler +// to reply to them. +// If l is nil, Serve accepts connections from os.Stdin. +// If handler is nil, http.DefaultServeMux is used. +func Serve(l net.Listener, handler http.Handler) error { + if l == nil { + var err error + l, err = net.FileListener(os.Stdin) + if err != nil { + return err + } + defer l.Close() + } + if handler == nil { + handler = http.DefaultServeMux + } + for { + rw, err := l.Accept() + if err != nil { + return err + } + c := newChild(rw, handler) + go c.serve() + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,274 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fcgi implements the FastCGI protocol. +// Currently only the responder role is supported. +// The protocol is defined at http://www.fastcgi.com/drupal/node/6?q=node/22 +package fcgi + +// This file defines the raw protocol and some utilities used by the child and +// the host. + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "io" + "sync" +) + +// recType is a record type, as defined by +// http://www.fastcgi.com/devkit/doc/fcgi-spec.html#S8 +type recType uint8 + +const ( + typeBeginRequest recType = 1 + typeAbortRequest recType = 2 + typeEndRequest recType = 3 + typeParams recType = 4 + typeStdin recType = 5 + typeStdout recType = 6 + typeStderr recType = 7 + typeData recType = 8 + typeGetValues recType = 9 + typeGetValuesResult recType = 10 + typeUnknownType recType = 11 +) + +// keep the connection between web-server and responder open after request +const flagKeepConn = 1 + +const ( + maxWrite = 65535 // maximum record body + maxPad = 255 +) + +const ( + roleResponder = iota + 1 // only Responders are implemented. + roleAuthorizer + roleFilter +) + +const ( + statusRequestComplete = iota + statusCantMultiplex + statusOverloaded + statusUnknownRole +) + +const headerLen = 8 + +type header struct { + Version uint8 + Type recType + Id uint16 + ContentLength uint16 + PaddingLength uint8 + Reserved uint8 +} + +type beginRequest struct { + role uint16 + flags uint8 + reserved [5]uint8 +} + +func (br *beginRequest) read(content []byte) error { + if len(content) != 8 { + return errors.New("fcgi: invalid begin request record") + } + br.role = binary.BigEndian.Uint16(content) + br.flags = content[2] + return nil +} + +// for padding so we don't have to allocate all the time +// not synchronized because we don't care what the contents are +var pad [maxPad]byte + +func (h *header) init(recType recType, reqId uint16, contentLength int) { + h.Version = 1 + h.Type = recType + h.Id = reqId + h.ContentLength = uint16(contentLength) + h.PaddingLength = uint8(-contentLength & 7) +} + +// conn sends records over rwc +type conn struct { + mutex sync.Mutex + rwc io.ReadWriteCloser + + // to avoid allocations + buf bytes.Buffer + h header +} + +func newConn(rwc io.ReadWriteCloser) *conn { + return &conn{rwc: rwc} +} + +func (c *conn) Close() error { + c.mutex.Lock() + defer c.mutex.Unlock() + return c.rwc.Close() +} + +type record struct { + h header + buf [maxWrite + maxPad]byte +} + +func (rec *record) read(r io.Reader) (err error) { + if err = binary.Read(r, binary.BigEndian, &rec.h); err != nil { + return err + } + if rec.h.Version != 1 { + return errors.New("fcgi: invalid header version") + } + n := int(rec.h.ContentLength) + int(rec.h.PaddingLength) + if _, err = io.ReadFull(r, rec.buf[:n]); err != nil { + return err + } + return nil +} + +func (r *record) content() []byte { + return r.buf[:r.h.ContentLength] +} + +// writeRecord writes and sends a single record. +func (c *conn) writeRecord(recType recType, reqId uint16, b []byte) error { + c.mutex.Lock() + defer c.mutex.Unlock() + c.buf.Reset() + c.h.init(recType, reqId, len(b)) + if err := binary.Write(&c.buf, binary.BigEndian, c.h); err != nil { + return err + } + if _, err := c.buf.Write(b); err != nil { + return err + } + if _, err := c.buf.Write(pad[:c.h.PaddingLength]); err != nil { + return err + } + _, err := c.rwc.Write(c.buf.Bytes()) + return err +} + +func (c *conn) writeBeginRequest(reqId uint16, role uint16, flags uint8) error { + b := [8]byte{byte(role >> 8), byte(role), flags} + return c.writeRecord(typeBeginRequest, reqId, b[:]) +} + +func (c *conn) writeEndRequest(reqId uint16, appStatus int, protocolStatus uint8) error { + b := make([]byte, 8) + binary.BigEndian.PutUint32(b, uint32(appStatus)) + b[4] = protocolStatus + return c.writeRecord(typeEndRequest, reqId, b) +} + +func (c *conn) writePairs(recType recType, reqId uint16, pairs map[string]string) error { + w := newWriter(c, recType, reqId) + b := make([]byte, 8) + for k, v := range pairs { + n := encodeSize(b, uint32(len(k))) + n += encodeSize(b[n:], uint32(len(v))) + if _, err := w.Write(b[:n]); err != nil { + return err + } + if _, err := w.WriteString(k); err != nil { + return err + } + if _, err := w.WriteString(v); err != nil { + return err + } + } + w.Close() + return nil +} + +func readSize(s []byte) (uint32, int) { + if len(s) == 0 { + return 0, 0 + } + size, n := uint32(s[0]), 1 + if size&(1<<7) != 0 { + if len(s) < 4 { + return 0, 0 + } + n = 4 + size = binary.BigEndian.Uint32(s) + size &^= 1 << 31 + } + return size, n +} + +func readString(s []byte, size uint32) string { + if size > uint32(len(s)) { + return "" + } + return string(s[:size]) +} + +func encodeSize(b []byte, size uint32) int { + if size > 127 { + size |= 1 << 31 + binary.BigEndian.PutUint32(b, size) + return 4 + } + b[0] = byte(size) + return 1 +} + +// bufWriter encapsulates bufio.Writer but also closes the underlying stream when +// Closed. +type bufWriter struct { + closer io.Closer + *bufio.Writer +} + +func (w *bufWriter) Close() error { + if err := w.Writer.Flush(); err != nil { + w.closer.Close() + return err + } + return w.closer.Close() +} + +func newWriter(c *conn, recType recType, reqId uint16) *bufWriter { + s := &streamWriter{c: c, recType: recType, reqId: reqId} + w := bufio.NewWriterSize(s, maxWrite) + return &bufWriter{s, w} +} + +// streamWriter abstracts out the separation of a stream into discrete records. +// It only writes maxWrite bytes at a time. +type streamWriter struct { + c *conn + recType recType + reqId uint16 +} + +func (w *streamWriter) Write(p []byte) (int, error) { + nn := 0 + for len(p) > 0 { + n := len(p) + if n > maxWrite { + n = maxWrite + } + if err := w.c.writeRecord(w.recType, w.reqId, p[:n]); err != nil { + return nn, err + } + nn += n + p = p[n:] + } + return nn, nil +} + +func (w *streamWriter) Close() error { + // send empty record to close the stream + return w.c.writeRecord(w.recType, w.reqId, nil) +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/fcgi/fcgi_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,150 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fcgi + +import ( + "bytes" + "errors" + "io" + "testing" +) + +var sizeTests = []struct { + size uint32 + bytes []byte +}{ + {0, []byte{0x00}}, + {127, []byte{0x7F}}, + {128, []byte{0x80, 0x00, 0x00, 0x80}}, + {1000, []byte{0x80, 0x00, 0x03, 0xE8}}, + {33554431, []byte{0x81, 0xFF, 0xFF, 0xFF}}, +} + +func TestSize(t *testing.T) { + b := make([]byte, 4) + for i, test := range sizeTests { + n := encodeSize(b, test.size) + if !bytes.Equal(b[:n], test.bytes) { + t.Errorf("%d expected %x, encoded %x", i, test.bytes, b) + } + size, n := readSize(test.bytes) + if size != test.size { + t.Errorf("%d expected %d, read %d", i, test.size, size) + } + if len(test.bytes) != n { + t.Errorf("%d did not consume all the bytes", i) + } + } +} + +var streamTests = []struct { + desc string + recType recType + reqId uint16 + content []byte + raw []byte +}{ + {"single record", typeStdout, 1, nil, + []byte{1, byte(typeStdout), 0, 1, 0, 0, 0, 0}, + }, + // this data will have to be split into two records + {"two records", typeStdin, 300, make([]byte, 66000), + bytes.Join([][]byte{ + // header for the first record + {1, byte(typeStdin), 0x01, 0x2C, 0xFF, 0xFF, 1, 0}, + make([]byte, 65536), + // header for the second + {1, byte(typeStdin), 0x01, 0x2C, 0x01, 0xD1, 7, 0}, + make([]byte, 472), + // header for the empty record + {1, byte(typeStdin), 0x01, 0x2C, 0, 0, 0, 0}, + }, + nil), + }, +} + +type nilCloser struct { + io.ReadWriter +} + +func (c *nilCloser) Close() error { return nil } + +func TestStreams(t *testing.T) { + var rec record +outer: + for _, test := range streamTests { + buf := bytes.NewBuffer(test.raw) + var content []byte + for buf.Len() > 0 { + if err := rec.read(buf); err != nil { + t.Errorf("%s: error reading record: %v", test.desc, err) + continue outer + } + content = append(content, rec.content()...) + } + if rec.h.Type != test.recType { + t.Errorf("%s: got type %d expected %d", test.desc, rec.h.Type, test.recType) + continue + } + if rec.h.Id != test.reqId { + t.Errorf("%s: got request ID %d expected %d", test.desc, rec.h.Id, test.reqId) + continue + } + if !bytes.Equal(content, test.content) { + t.Errorf("%s: read wrong content", test.desc) + continue + } + buf.Reset() + c := newConn(&nilCloser{buf}) + w := newWriter(c, test.recType, test.reqId) + if _, err := w.Write(test.content); err != nil { + t.Errorf("%s: error writing record: %v", test.desc, err) + continue + } + if err := w.Close(); err != nil { + t.Errorf("%s: error closing stream: %v", test.desc, err) + continue + } + if !bytes.Equal(buf.Bytes(), test.raw) { + t.Errorf("%s: wrote wrong content", test.desc) + } + } +} + +type writeOnlyConn struct { + buf []byte +} + +func (c *writeOnlyConn) Write(p []byte) (int, error) { + c.buf = append(c.buf, p...) + return len(p), nil +} + +func (c *writeOnlyConn) Read(p []byte) (int, error) { + return 0, errors.New("conn is write-only") +} + +func (c *writeOnlyConn) Close() error { + return nil +} + +func TestGetValues(t *testing.T) { + var rec record + rec.h.Type = typeGetValues + + wc := new(writeOnlyConn) + c := newChild(wc, nil) + err := c.handleRecord(&rec) + if err != nil { + t.Fatalf("handleRecord: %v", err) + } + + const want = "\x01\n\x00\x00\x00\x12\x06\x00" + + "\x0f\x01FCGI_MPXS_CONNS1" + + "\x00\x00\x00\x00\x00\x00\x01\n\x00\x00\x00\x00\x00\x00" + if got := string(wc.buf); got != want { + t.Errorf(" got: %q\nwant: %q\n", got, want) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/filetransport.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/filetransport.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/filetransport.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "fmt" + "io" +) + +// fileTransport implements RoundTripper for the 'file' protocol. +type fileTransport struct { + fh fileHandler +} + +// NewFileTransport returns a new RoundTripper, serving the provided +// FileSystem. The returned RoundTripper ignores the URL host in its +// incoming requests, as well as most other properties of the +// request. +// +// The typical use case for NewFileTransport is to register the "file" +// protocol with a Transport, as in: +// +// t := &http.Transport{} +// t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/"))) +// c := &http.Client{Transport: t} +// res, err := c.Get("file:///etc/passwd") +// ... +func NewFileTransport(fs FileSystem) RoundTripper { + return fileTransport{fileHandler{fs}} +} + +func (t fileTransport) RoundTrip(req *Request) (resp *Response, err error) { + // We start ServeHTTP in a goroutine, which may take a long + // time if the file is large. The newPopulateResponseWriter + // call returns a channel which either ServeHTTP or finish() + // sends our *Response on, once the *Response itself has been + // populated (even if the body itself is still being + // written to the res.Body, a pipe) + rw, resc := newPopulateResponseWriter() + go func() { + t.fh.ServeHTTP(rw, req) + rw.finish() + }() + return <-resc, nil +} + +func newPopulateResponseWriter() (*populateResponse, <-chan *Response) { + pr, pw := io.Pipe() + rw := &populateResponse{ + ch: make(chan *Response), + pw: pw, + res: &Response{ + Proto: "HTTP/1.0", + ProtoMajor: 1, + Header: make(Header), + Close: true, + Body: pr, + }, + } + return rw, rw.ch +} + +// populateResponse is a ResponseWriter that populates the *Response +// in res, and writes its body to a pipe connected to the response +// body. Once writes begin or finish() is called, the response is sent +// on ch. +type populateResponse struct { + res *Response + ch chan *Response + wroteHeader bool + hasContent bool + sentResponse bool + pw *io.PipeWriter +} + +func (pr *populateResponse) finish() { + if !pr.wroteHeader { + pr.WriteHeader(500) + } + if !pr.sentResponse { + pr.sendResponse() + } + pr.pw.Close() +} + +func (pr *populateResponse) sendResponse() { + if pr.sentResponse { + return + } + pr.sentResponse = true + + if pr.hasContent { + pr.res.ContentLength = -1 + } + pr.ch <- pr.res +} + +func (pr *populateResponse) Header() Header { + return pr.res.Header +} + +func (pr *populateResponse) WriteHeader(code int) { + if pr.wroteHeader { + return + } + pr.wroteHeader = true + + pr.res.StatusCode = code + pr.res.Status = fmt.Sprintf("%d %s", code, StatusText(code)) +} + +func (pr *populateResponse) Write(p []byte) (n int, err error) { + if !pr.wroteHeader { + pr.WriteHeader(StatusOK) + } + pr.hasContent = true + if !pr.sentResponse { + pr.sendResponse() + } + return pr.pw.Write(p) +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/filetransport_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/filetransport_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/filetransport_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,65 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func checker(t *testing.T) func(string, error) { + return func(call string, err error) { + if err == nil { + return + } + t.Fatalf("%s: %v", call, err) + } +} + +func TestFileTransport(t *testing.T) { + check := checker(t) + + dname, err := ioutil.TempDir("", "") + check("TempDir", err) + fname := filepath.Join(dname, "foo.txt") + err = ioutil.WriteFile(fname, []byte("Bar"), 0644) + check("WriteFile", err) + defer os.Remove(dname) + defer os.Remove(fname) + + tr := &Transport{} + tr.RegisterProtocol("file", NewFileTransport(Dir(dname))) + c := &Client{Transport: tr} + + fooURLs := []string{"file:///foo.txt", "file://../foo.txt"} + for _, urlstr := range fooURLs { + res, err := c.Get(urlstr) + check("Get "+urlstr, err) + if res.StatusCode != 200 { + t.Errorf("for %s, StatusCode = %d, want 200", urlstr, res.StatusCode) + } + if res.ContentLength != -1 { + t.Errorf("for %s, ContentLength = %d, want -1", urlstr, res.ContentLength) + } + if res.Body == nil { + t.Fatalf("for %s, nil Body", urlstr) + } + slurp, err := ioutil.ReadAll(res.Body) + check("ReadAll "+urlstr, err) + if string(slurp) != "Bar" { + t.Errorf("for %s, got content %q, want %q", urlstr, string(slurp), "Bar") + } + } + + const badURL = "file://../no-exist.txt" + res, err := c.Get(badURL) + check("Get "+badURL, err) + if res.StatusCode != 404 { + t.Errorf("for %s, StatusCode = %d, want 404", badURL, res.StatusCode) + } + res.Body.Close() +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/fs.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/fs.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/fs.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,549 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// HTTP file system request handler + +package http + +import ( + "errors" + "fmt" + "io" + "mime" + "mime/multipart" + "net/textproto" + "net/url" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "time" +) + +// A Dir implements http.FileSystem using the native file +// system restricted to a specific directory tree. +// +// An empty Dir is treated as ".". +type Dir string + +func (d Dir) Open(name string) (File, error) { + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return nil, errors.New("http: invalid character in file path") + } + dir := string(d) + if dir == "" { + dir = "." + } + f, err := os.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) + if err != nil { + return nil, err + } + return f, nil +} + +// A FileSystem implements access to a collection of named files. +// The elements in a file path are separated by slash ('/', U+002F) +// characters, regardless of host operating system convention. +type FileSystem interface { + Open(name string) (File, error) +} + +// A File is returned by a FileSystem's Open method and can be +// served by the FileServer implementation. +// +// The methods should behave the same as those on an *os.File. +type File interface { + io.Closer + io.Reader + Readdir(count int) ([]os.FileInfo, error) + Seek(offset int64, whence int) (int64, error) + Stat() (os.FileInfo, error) +} + +func dirList(w ResponseWriter, f File) { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + fmt.Fprintf(w, "
\n")
+	for {
+		dirs, err := f.Readdir(100)
+		if err != nil || len(dirs) == 0 {
+			break
+		}
+		for _, d := range dirs {
+			name := d.Name()
+			if d.IsDir() {
+				name += "/"
+			}
+			// name may contain '?' or '#', which must be escaped to remain
+			// part of the URL path, and not indicate the start of a query
+			// string or fragment.
+			url := url.URL{Path: name}
+			fmt.Fprintf(w, "%s\n", url.String(), htmlReplacer.Replace(name))
+		}
+	}
+	fmt.Fprintf(w, "
\n") +} + +// ServeContent replies to the request using the content in the +// provided ReadSeeker. The main benefit of ServeContent over io.Copy +// is that it handles Range requests properly, sets the MIME type, and +// handles If-Modified-Since requests. +// +// If the response's Content-Type header is not set, ServeContent +// first tries to deduce the type from name's file extension and, +// if that fails, falls back to reading the first block of the content +// and passing it to DetectContentType. +// The name is otherwise unused; in particular it can be empty and is +// never sent in the response. +// +// If modtime is not the zero time, ServeContent includes it in a +// Last-Modified header in the response. If the request includes an +// If-Modified-Since header, ServeContent uses modtime to decide +// whether the content needs to be sent at all. +// +// The content's Seek method must work: ServeContent uses +// a seek to the end of the content to determine its size. +// +// If the caller has set w's ETag header, ServeContent uses it to +// handle requests using If-Range and If-None-Match. +// +// Note that *os.File implements the io.ReadSeeker interface. +func ServeContent(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker) { + sizeFunc := func() (int64, error) { + size, err := content.Seek(0, os.SEEK_END) + if err != nil { + return 0, errSeeker + } + _, err = content.Seek(0, os.SEEK_SET) + if err != nil { + return 0, errSeeker + } + return size, nil + } + serveContent(w, req, name, modtime, sizeFunc, content) +} + +// errSeeker is returned by ServeContent's sizeFunc when the content +// doesn't seek properly. The underlying Seeker's error text isn't +// included in the sizeFunc reply so it's not sent over HTTP to end +// users. +var errSeeker = errors.New("seeker can't seek") + +// if name is empty, filename is unknown. (used for mime type, before sniffing) +// if modtime.IsZero(), modtime is unknown. +// content must be seeked to the beginning of the file. +// The sizeFunc is called at most once. Its error, if any, is sent in the HTTP response. +func serveContent(w ResponseWriter, r *Request, name string, modtime time.Time, sizeFunc func() (int64, error), content io.ReadSeeker) { + if checkLastModified(w, r, modtime) { + return + } + rangeReq, done := checkETag(w, r) + if done { + return + } + + code := StatusOK + + // If Content-Type isn't set, use the file's extension to find it, but + // if the Content-Type is unset explicitly, do not sniff the type. + ctypes, haveType := w.Header()["Content-Type"] + var ctype string + if !haveType { + ctype = mime.TypeByExtension(filepath.Ext(name)) + if ctype == "" { + // read a chunk to decide between utf-8 text and binary + var buf [sniffLen]byte + n, _ := io.ReadFull(content, buf[:]) + ctype = DetectContentType(buf[:n]) + _, err := content.Seek(0, os.SEEK_SET) // rewind to output whole file + if err != nil { + Error(w, "seeker can't seek", StatusInternalServerError) + return + } + } + w.Header().Set("Content-Type", ctype) + } else if len(ctypes) > 0 { + ctype = ctypes[0] + } + + size, err := sizeFunc() + if err != nil { + Error(w, err.Error(), StatusInternalServerError) + return + } + + // handle Content-Range header. + sendSize := size + var sendContent io.Reader = content + if size >= 0 { + ranges, err := parseRange(rangeReq, size) + if err != nil { + Error(w, err.Error(), StatusRequestedRangeNotSatisfiable) + return + } + if sumRangesSize(ranges) > size { + // The total number of bytes in all the ranges + // is larger than the size of the file by + // itself, so this is probably an attack, or a + // dumb client. Ignore the range request. + ranges = nil + } + switch { + case len(ranges) == 1: + // RFC 2616, Section 14.16: + // "When an HTTP message includes the content of a single + // range (for example, a response to a request for a + // single range, or to a request for a set of ranges + // that overlap without any holes), this content is + // transmitted with a Content-Range header, and a + // Content-Length header showing the number of bytes + // actually transferred. + // ... + // A response to a request for a single range MUST NOT + // be sent using the multipart/byteranges media type." + ra := ranges[0] + if _, err := content.Seek(ra.start, os.SEEK_SET); err != nil { + Error(w, err.Error(), StatusRequestedRangeNotSatisfiable) + return + } + sendSize = ra.length + code = StatusPartialContent + w.Header().Set("Content-Range", ra.contentRange(size)) + case len(ranges) > 1: + for _, ra := range ranges { + if ra.start > size { + Error(w, err.Error(), StatusRequestedRangeNotSatisfiable) + return + } + } + sendSize = rangesMIMESize(ranges, ctype, size) + code = StatusPartialContent + + pr, pw := io.Pipe() + mw := multipart.NewWriter(pw) + w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary()) + sendContent = pr + defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish. + go func() { + for _, ra := range ranges { + part, err := mw.CreatePart(ra.mimeHeader(ctype, size)) + if err != nil { + pw.CloseWithError(err) + return + } + if _, err := content.Seek(ra.start, os.SEEK_SET); err != nil { + pw.CloseWithError(err) + return + } + if _, err := io.CopyN(part, content, ra.length); err != nil { + pw.CloseWithError(err) + return + } + } + mw.Close() + pw.Close() + }() + } + + w.Header().Set("Accept-Ranges", "bytes") + if w.Header().Get("Content-Encoding") == "" { + w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) + } + } + + w.WriteHeader(code) + + if r.Method != "HEAD" { + io.CopyN(w, sendContent, sendSize) + } +} + +// modtime is the modification time of the resource to be served, or IsZero(). +// return value is whether this request is now complete. +func checkLastModified(w ResponseWriter, r *Request, modtime time.Time) bool { + if modtime.IsZero() { + return false + } + + // The Date-Modified header truncates sub-second precision, so + // use mtime < t+1s instead of mtime <= t to check for unmodified. + if t, err := time.Parse(TimeFormat, r.Header.Get("If-Modified-Since")); err == nil && modtime.Before(t.Add(1*time.Second)) { + h := w.Header() + delete(h, "Content-Type") + delete(h, "Content-Length") + w.WriteHeader(StatusNotModified) + return true + } + w.Header().Set("Last-Modified", modtime.UTC().Format(TimeFormat)) + return false +} + +// checkETag implements If-None-Match and If-Range checks. +// The ETag must have been previously set in the ResponseWriter's headers. +// +// The return value is the effective request "Range" header to use and +// whether this request is now considered done. +func checkETag(w ResponseWriter, r *Request) (rangeReq string, done bool) { + etag := w.Header().get("Etag") + rangeReq = r.Header.get("Range") + + // Invalidate the range request if the entity doesn't match the one + // the client was expecting. + // "If-Range: version" means "ignore the Range: header unless version matches the + // current file." + // We only support ETag versions. + // The caller must have set the ETag on the response already. + if ir := r.Header.get("If-Range"); ir != "" && ir != etag { + // TODO(bradfitz): handle If-Range requests with Last-Modified + // times instead of ETags? I'd rather not, at least for + // now. That seems like a bug/compromise in the RFC 2616, and + // I've never heard of anybody caring about that (yet). + rangeReq = "" + } + + if inm := r.Header.get("If-None-Match"); inm != "" { + // Must know ETag. + if etag == "" { + return rangeReq, false + } + + // TODO(bradfitz): non-GET/HEAD requests require more work: + // sending a different status code on matches, and + // also can't use weak cache validators (those with a "W/ + // prefix). But most users of ServeContent will be using + // it on GET or HEAD, so only support those for now. + if r.Method != "GET" && r.Method != "HEAD" { + return rangeReq, false + } + + // TODO(bradfitz): deal with comma-separated or multiple-valued + // list of If-None-match values. For now just handle the common + // case of a single item. + if inm == etag || inm == "*" { + h := w.Header() + delete(h, "Content-Type") + delete(h, "Content-Length") + w.WriteHeader(StatusNotModified) + return "", true + } + } + return rangeReq, false +} + +// name is '/'-separated, not filepath.Separator. +func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirect bool) { + const indexPage = "/index.html" + + // redirect .../index.html to .../ + // can't use Redirect() because that would make the path absolute, + // which would be a problem running under StripPrefix + if strings.HasSuffix(r.URL.Path, indexPage) { + localRedirect(w, r, "./") + return + } + + f, err := fs.Open(name) + if err != nil { + // TODO expose actual error? + NotFound(w, r) + return + } + defer f.Close() + + d, err1 := f.Stat() + if err1 != nil { + // TODO expose actual error? + NotFound(w, r) + return + } + + if redirect { + // redirect to canonical path: / at end of directory url + // r.URL.Path always begins with / + url := r.URL.Path + if d.IsDir() { + if url[len(url)-1] != '/' { + localRedirect(w, r, path.Base(url)+"/") + return + } + } else { + if url[len(url)-1] == '/' { + localRedirect(w, r, "../"+path.Base(url)) + return + } + } + } + + // use contents of index.html for directory, if present + if d.IsDir() { + index := name + indexPage + ff, err := fs.Open(index) + if err == nil { + defer ff.Close() + dd, err := ff.Stat() + if err == nil { + name = index + d = dd + f = ff + } + } + } + + // Still a directory? (we didn't find an index.html file) + if d.IsDir() { + if checkLastModified(w, r, d.ModTime()) { + return + } + dirList(w, f) + return + } + + // serverContent will check modification time + sizeFunc := func() (int64, error) { return d.Size(), nil } + serveContent(w, r, d.Name(), d.ModTime(), sizeFunc, f) +} + +// localRedirect gives a Moved Permanently response. +// It does not convert relative paths to absolute paths like Redirect does. +func localRedirect(w ResponseWriter, r *Request, newPath string) { + if q := r.URL.RawQuery; q != "" { + newPath += "?" + q + } + w.Header().Set("Location", newPath) + w.WriteHeader(StatusMovedPermanently) +} + +// ServeFile replies to the request with the contents of the named file or directory. +func ServeFile(w ResponseWriter, r *Request, name string) { + dir, file := filepath.Split(name) + serveFile(w, r, Dir(dir), file, false) +} + +type fileHandler struct { + root FileSystem +} + +// FileServer returns a handler that serves HTTP requests +// with the contents of the file system rooted at root. +// +// To use the operating system's file system implementation, +// use http.Dir: +// +// http.Handle("/", http.FileServer(http.Dir("/tmp"))) +func FileServer(root FileSystem) Handler { + return &fileHandler{root} +} + +func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) { + upath := r.URL.Path + if !strings.HasPrefix(upath, "/") { + upath = "/" + upath + r.URL.Path = upath + } + serveFile(w, r, f.root, path.Clean(upath), true) +} + +// httpRange specifies the byte range to be sent to the client. +type httpRange struct { + start, length int64 +} + +func (r httpRange) contentRange(size int64) string { + return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size) +} + +func (r httpRange) mimeHeader(contentType string, size int64) textproto.MIMEHeader { + return textproto.MIMEHeader{ + "Content-Range": {r.contentRange(size)}, + "Content-Type": {contentType}, + } +} + +// parseRange parses a Range header string as per RFC 2616. +func parseRange(s string, size int64) ([]httpRange, error) { + if s == "" { + return nil, nil // header not present + } + const b = "bytes=" + if !strings.HasPrefix(s, b) { + return nil, errors.New("invalid range") + } + var ranges []httpRange + for _, ra := range strings.Split(s[len(b):], ",") { + ra = strings.TrimSpace(ra) + if ra == "" { + continue + } + i := strings.Index(ra, "-") + if i < 0 { + return nil, errors.New("invalid range") + } + start, end := strings.TrimSpace(ra[:i]), strings.TrimSpace(ra[i+1:]) + var r httpRange + if start == "" { + // If no start is specified, end specifies the + // range start relative to the end of the file. + i, err := strconv.ParseInt(end, 10, 64) + if err != nil { + return nil, errors.New("invalid range") + } + if i > size { + i = size + } + r.start = size - i + r.length = size - r.start + } else { + i, err := strconv.ParseInt(start, 10, 64) + if err != nil || i > size || i < 0 { + return nil, errors.New("invalid range") + } + r.start = i + if end == "" { + // If no end is specified, range extends to end of the file. + r.length = size - r.start + } else { + i, err := strconv.ParseInt(end, 10, 64) + if err != nil || r.start > i { + return nil, errors.New("invalid range") + } + if i >= size { + i = size - 1 + } + r.length = i - r.start + 1 + } + } + ranges = append(ranges, r) + } + return ranges, nil +} + +// countingWriter counts how many bytes have been written to it. +type countingWriter int64 + +func (w *countingWriter) Write(p []byte) (n int, err error) { + *w += countingWriter(len(p)) + return len(p), nil +} + +// rangesMIMESize returns the number of bytes it takes to encode the +// provided ranges as a multipart response. +func rangesMIMESize(ranges []httpRange, contentType string, contentSize int64) (encSize int64) { + var w countingWriter + mw := multipart.NewWriter(&w) + for _, ra := range ranges { + mw.CreatePart(ra.mimeHeader(contentType, contentSize)) + encSize += ra.length + } + mw.Close() + encSize += int64(w) + return +} + +func sumRangesSize(ranges []httpRange) (size int64) { + for _, ra := range ranges { + size += ra.length + } + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/fs_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/fs_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/fs_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,858 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http_test + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "mime" + "mime/multipart" + "net" + . "net/http" + "net/http/httptest" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "testing" + "time" +) + +const ( + testFile = "testdata/file" + testFileLen = 11 +) + +type wantRange struct { + start, end int64 // range [start,end) +} + +var itoa = strconv.Itoa + +var ServeFileRangeTests = []struct { + r string + code int + ranges []wantRange +}{ + {r: "", code: StatusOK}, + {r: "bytes=0-4", code: StatusPartialContent, ranges: []wantRange{{0, 5}}}, + {r: "bytes=2-", code: StatusPartialContent, ranges: []wantRange{{2, testFileLen}}}, + {r: "bytes=-5", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 5, testFileLen}}}, + {r: "bytes=3-7", code: StatusPartialContent, ranges: []wantRange{{3, 8}}}, + {r: "bytes=20-", code: StatusRequestedRangeNotSatisfiable}, + {r: "bytes=0-0,-2", code: StatusPartialContent, ranges: []wantRange{{0, 1}, {testFileLen - 2, testFileLen}}}, + {r: "bytes=0-1,5-8", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, 9}}}, + {r: "bytes=0-1,5-", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, testFileLen}}}, + {r: "bytes=5-1000", code: StatusPartialContent, ranges: []wantRange{{5, testFileLen}}}, + {r: "bytes=0-,1-,2-,3-,4-", code: StatusOK}, // ignore wasteful range request + {r: "bytes=0-" + itoa(testFileLen-2), code: StatusPartialContent, ranges: []wantRange{{0, testFileLen - 1}}}, + {r: "bytes=0-" + itoa(testFileLen-1), code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}}, + {r: "bytes=0-" + itoa(testFileLen), code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}}, +} + +func TestServeFile(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + ServeFile(w, r, "testdata/file") + })) + defer ts.Close() + + var err error + + file, err := ioutil.ReadFile(testFile) + if err != nil { + t.Fatal("reading file:", err) + } + + // set up the Request (re-used for all tests) + var req Request + req.Header = make(Header) + if req.URL, err = url.Parse(ts.URL); err != nil { + t.Fatal("ParseURL:", err) + } + req.Method = "GET" + + // straight GET + _, body := getBody(t, "straight get", req) + if !bytes.Equal(body, file) { + t.Fatalf("body mismatch: got %q, want %q", body, file) + } + + // Range tests +Cases: + for _, rt := range ServeFileRangeTests { + if rt.r != "" { + req.Header.Set("Range", rt.r) + } + resp, body := getBody(t, fmt.Sprintf("range test %q", rt.r), req) + if resp.StatusCode != rt.code { + t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, resp.StatusCode, rt.code) + } + if rt.code == StatusRequestedRangeNotSatisfiable { + continue + } + wantContentRange := "" + if len(rt.ranges) == 1 { + rng := rt.ranges[0] + wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen) + } + cr := resp.Header.Get("Content-Range") + if cr != wantContentRange { + t.Errorf("range=%q: Content-Range = %q, want %q", rt.r, cr, wantContentRange) + } + ct := resp.Header.Get("Content-Type") + if len(rt.ranges) == 1 { + rng := rt.ranges[0] + wantBody := file[rng.start:rng.end] + if !bytes.Equal(body, wantBody) { + t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody) + } + if strings.HasPrefix(ct, "multipart/byteranges") { + t.Errorf("range=%q content-type = %q; unexpected multipart/byteranges", rt.r, ct) + } + } + if len(rt.ranges) > 1 { + typ, params, err := mime.ParseMediaType(ct) + if err != nil { + t.Errorf("range=%q content-type = %q; %v", rt.r, ct, err) + continue + } + if typ != "multipart/byteranges" { + t.Errorf("range=%q content-type = %q; want multipart/byteranges", rt.r, typ) + continue + } + if params["boundary"] == "" { + t.Errorf("range=%q content-type = %q; lacks boundary", rt.r, ct) + continue + } + if g, w := resp.ContentLength, int64(len(body)); g != w { + t.Errorf("range=%q Content-Length = %d; want %d", rt.r, g, w) + continue + } + mr := multipart.NewReader(bytes.NewReader(body), params["boundary"]) + for ri, rng := range rt.ranges { + part, err := mr.NextPart() + if err != nil { + t.Errorf("range=%q, reading part index %d: %v", rt.r, ri, err) + continue Cases + } + wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen) + if g, w := part.Header.Get("Content-Range"), wantContentRange; g != w { + t.Errorf("range=%q: part Content-Range = %q; want %q", rt.r, g, w) + } + body, err := ioutil.ReadAll(part) + if err != nil { + t.Errorf("range=%q, reading part index %d body: %v", rt.r, ri, err) + continue Cases + } + wantBody := file[rng.start:rng.end] + if !bytes.Equal(body, wantBody) { + t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody) + } + } + _, err = mr.NextPart() + if err != io.EOF { + t.Errorf("range=%q; expected final error io.EOF; got %v", rt.r, err) + } + } + } +} + +var fsRedirectTestData = []struct { + original, redirect string +}{ + {"/test/index.html", "/test/"}, + {"/test/testdata", "/test/testdata/"}, + {"/test/testdata/file/", "/test/testdata/file"}, +} + +func TestFSRedirect(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(StripPrefix("/test", FileServer(Dir(".")))) + defer ts.Close() + + for _, data := range fsRedirectTestData { + res, err := Get(ts.URL + data.original) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + if g, e := res.Request.URL.Path, data.redirect; g != e { + t.Errorf("redirect from %s: got %s, want %s", data.original, g, e) + } + } +} + +type testFileSystem struct { + open func(name string) (File, error) +} + +func (fs *testFileSystem) Open(name string) (File, error) { + return fs.open(name) +} + +func TestFileServerCleans(t *testing.T) { + defer afterTest(t) + ch := make(chan string, 1) + fs := FileServer(&testFileSystem{func(name string) (File, error) { + ch <- name + return nil, errors.New("file does not exist") + }}) + tests := []struct { + reqPath, openArg string + }{ + {"/foo.txt", "/foo.txt"}, + {"//foo.txt", "/foo.txt"}, + {"/../foo.txt", "/foo.txt"}, + } + req, _ := NewRequest("GET", "http://example.com", nil) + for n, test := range tests { + rec := httptest.NewRecorder() + req.URL.Path = test.reqPath + fs.ServeHTTP(rec, req) + if got := <-ch; got != test.openArg { + t.Errorf("test %d: got %q, want %q", n, got, test.openArg) + } + } +} + +func TestFileServerEscapesNames(t *testing.T) { + defer afterTest(t) + const dirListPrefix = "
\n"
+	const dirListSuffix = "\n
\n" + tests := []struct { + name, escaped string + }{ + {`simple_name`, `simple_name`}, + {`"'<>&`, `"'<>&`}, + {`?foo=bar#baz`, `?foo=bar#baz`}, + {`?foo`, `<combo>?foo`}, + } + + // We put each test file in its own directory in the fakeFS so we can look at it in isolation. + fs := make(fakeFS) + for i, test := range tests { + testFile := &fakeFileInfo{basename: test.name} + fs[fmt.Sprintf("/%d", i)] = &fakeFileInfo{ + dir: true, + modtime: time.Unix(1000000000, 0).UTC(), + ents: []*fakeFileInfo{testFile}, + } + fs[fmt.Sprintf("/%d/%s", i, test.name)] = testFile + } + + ts := httptest.NewServer(FileServer(&fs)) + defer ts.Close() + for i, test := range tests { + url := fmt.Sprintf("%s/%d", ts.URL, i) + res, err := Get(url) + if err != nil { + t.Fatalf("test %q: Get: %v", test.name, err) + } + b, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("test %q: read Body: %v", test.name, err) + } + s := string(b) + if !strings.HasPrefix(s, dirListPrefix) || !strings.HasSuffix(s, dirListSuffix) { + t.Errorf("test %q: listing dir, full output is %q, want prefix %q and suffix %q", test.name, s, dirListPrefix, dirListSuffix) + } + if trimmed := strings.TrimSuffix(strings.TrimPrefix(s, dirListPrefix), dirListSuffix); trimmed != test.escaped { + t.Errorf("test %q: listing dir, filename escaped to %q, want %q", test.name, trimmed, test.escaped) + } + res.Body.Close() + } +} + +func mustRemoveAll(dir string) { + err := os.RemoveAll(dir) + if err != nil { + panic(err) + } +} + +func TestFileServerImplicitLeadingSlash(t *testing.T) { + defer afterTest(t) + tempDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("TempDir: %v", err) + } + defer mustRemoveAll(tempDir) + if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil { + t.Fatalf("WriteFile: %v", err) + } + ts := httptest.NewServer(StripPrefix("/bar/", FileServer(Dir(tempDir)))) + defer ts.Close() + get := func(suffix string) string { + res, err := Get(ts.URL + suffix) + if err != nil { + t.Fatalf("Get %s: %v", suffix, err) + } + b, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("ReadAll %s: %v", suffix, err) + } + res.Body.Close() + return string(b) + } + if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") { + t.Logf("expected a directory listing with foo.txt, got %q", s) + } + if s := get("/bar/foo.txt"); s != "Hello world" { + t.Logf("expected %q, got %q", "Hello world", s) + } +} + +func TestDirJoin(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("skipping test on windows") + } + wfi, err := os.Stat("/etc/hosts") + if err != nil { + t.Skip("skipping test; no /etc/hosts file") + } + test := func(d Dir, name string) { + f, err := d.Open(name) + if err != nil { + t.Fatalf("open of %s: %v", name, err) + } + defer f.Close() + gfi, err := f.Stat() + if err != nil { + t.Fatalf("stat of %s: %v", name, err) + } + if !os.SameFile(gfi, wfi) { + t.Errorf("%s got different file", name) + } + } + test(Dir("/etc/"), "/hosts") + test(Dir("/etc/"), "hosts") + test(Dir("/etc/"), "../../../../hosts") + test(Dir("/etc"), "/hosts") + test(Dir("/etc"), "hosts") + test(Dir("/etc"), "../../../../hosts") + + // Not really directories, but since we use this trick in + // ServeFile, test it: + test(Dir("/etc/hosts"), "") + test(Dir("/etc/hosts"), "/") + test(Dir("/etc/hosts"), "../") +} + +func TestEmptyDirOpenCWD(t *testing.T) { + test := func(d Dir) { + name := "fs_test.go" + f, err := d.Open(name) + if err != nil { + t.Fatalf("open of %s: %v", name, err) + } + defer f.Close() + } + test(Dir("")) + test(Dir(".")) + test(Dir("./")) +} + +func TestServeFileContentType(t *testing.T) { + defer afterTest(t) + const ctype = "icecream/chocolate" + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + switch r.FormValue("override") { + case "1": + w.Header().Set("Content-Type", ctype) + case "2": + // Explicitly inhibit sniffing. + w.Header()["Content-Type"] = []string{} + } + ServeFile(w, r, "testdata/file") + })) + defer ts.Close() + get := func(override string, want []string) { + resp, err := Get(ts.URL + "?override=" + override) + if err != nil { + t.Fatal(err) + } + if h := resp.Header["Content-Type"]; !reflect.DeepEqual(h, want) { + t.Errorf("Content-Type mismatch: got %v, want %v", h, want) + } + resp.Body.Close() + } + get("0", []string{"text/plain; charset=utf-8"}) + get("1", []string{ctype}) + get("2", nil) +} + +func TestServeFileMimeType(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + ServeFile(w, r, "testdata/style.css") + })) + defer ts.Close() + resp, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + resp.Body.Close() + want := "text/css; charset=utf-8" + if h := resp.Header.Get("Content-Type"); h != want { + t.Errorf("Content-Type mismatch: got %q, want %q", h, want) + } +} + +func TestServeFileFromCWD(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + ServeFile(w, r, "fs_test.go") + })) + defer ts.Close() + r, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + r.Body.Close() + if r.StatusCode != 200 { + t.Fatalf("expected 200 OK, got %s", r.Status) + } +} + +func TestServeFileWithContentEncoding(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Encoding", "foo") + ServeFile(w, r, "testdata/file") + })) + defer ts.Close() + resp, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + resp.Body.Close() + if g, e := resp.ContentLength, int64(-1); g != e { + t.Errorf("Content-Length mismatch: got %d, want %d", g, e) + } +} + +func TestServeIndexHtml(t *testing.T) { + defer afterTest(t) + const want = "index.html says hello\n" + ts := httptest.NewServer(FileServer(Dir("."))) + defer ts.Close() + + for _, path := range []string{"/testdata/", "/testdata/index.html"} { + res, err := Get(ts.URL + path) + if err != nil { + t.Fatal(err) + } + b, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal("reading Body:", err) + } + if s := string(b); s != want { + t.Errorf("for path %q got %q, want %q", path, s, want) + } + res.Body.Close() + } +} + +func TestFileServerZeroByte(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(FileServer(Dir("."))) + defer ts.Close() + + res, err := Get(ts.URL + "/..\x00") + if err != nil { + t.Fatal(err) + } + b, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal("reading Body:", err) + } + if res.StatusCode == 200 { + t.Errorf("got status 200; want an error. Body is:\n%s", string(b)) + } +} + +type fakeFileInfo struct { + dir bool + basename string + modtime time.Time + ents []*fakeFileInfo + contents string +} + +func (f *fakeFileInfo) Name() string { return f.basename } +func (f *fakeFileInfo) Sys() interface{} { return nil } +func (f *fakeFileInfo) ModTime() time.Time { return f.modtime } +func (f *fakeFileInfo) IsDir() bool { return f.dir } +func (f *fakeFileInfo) Size() int64 { return int64(len(f.contents)) } +func (f *fakeFileInfo) Mode() os.FileMode { + if f.dir { + return 0755 | os.ModeDir + } + return 0644 +} + +type fakeFile struct { + io.ReadSeeker + fi *fakeFileInfo + path string // as opened + entpos int +} + +func (f *fakeFile) Close() error { return nil } +func (f *fakeFile) Stat() (os.FileInfo, error) { return f.fi, nil } +func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) { + if !f.fi.dir { + return nil, os.ErrInvalid + } + var fis []os.FileInfo + + limit := f.entpos + count + if count <= 0 || limit > len(f.fi.ents) { + limit = len(f.fi.ents) + } + for ; f.entpos < limit; f.entpos++ { + fis = append(fis, f.fi.ents[f.entpos]) + } + + if len(fis) == 0 && count > 0 { + return fis, io.EOF + } else { + return fis, nil + } +} + +type fakeFS map[string]*fakeFileInfo + +func (fs fakeFS) Open(name string) (File, error) { + name = path.Clean(name) + f, ok := fs[name] + if !ok { + return nil, os.ErrNotExist + } + return &fakeFile{ReadSeeker: strings.NewReader(f.contents), fi: f, path: name}, nil +} + +func TestDirectoryIfNotModified(t *testing.T) { + defer afterTest(t) + const indexContents = "I am a fake index.html file" + fileMod := time.Unix(1000000000, 0).UTC() + fileModStr := fileMod.Format(TimeFormat) + dirMod := time.Unix(123, 0).UTC() + indexFile := &fakeFileInfo{ + basename: "index.html", + modtime: fileMod, + contents: indexContents, + } + fs := fakeFS{ + "/": &fakeFileInfo{ + dir: true, + modtime: dirMod, + ents: []*fakeFileInfo{indexFile}, + }, + "/index.html": indexFile, + } + + ts := httptest.NewServer(FileServer(fs)) + defer ts.Close() + + res, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + b, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != indexContents { + t.Fatalf("Got body %q; want %q", b, indexContents) + } + res.Body.Close() + + lastMod := res.Header.Get("Last-Modified") + if lastMod != fileModStr { + t.Fatalf("initial Last-Modified = %q; want %q", lastMod, fileModStr) + } + + req, _ := NewRequest("GET", ts.URL, nil) + req.Header.Set("If-Modified-Since", lastMod) + + res, err = DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != 304 { + t.Fatalf("Code after If-Modified-Since request = %v; want 304", res.StatusCode) + } + res.Body.Close() + + // Advance the index.html file's modtime, but not the directory's. + indexFile.modtime = indexFile.modtime.Add(1 * time.Hour) + + res, err = DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != 200 { + t.Fatalf("Code after second If-Modified-Since request = %v; want 200; res is %#v", res.StatusCode, res) + } + res.Body.Close() +} + +func mustStat(t *testing.T, fileName string) os.FileInfo { + fi, err := os.Stat(fileName) + if err != nil { + t.Fatal(err) + } + return fi +} + +func TestServeContent(t *testing.T) { + defer afterTest(t) + type serveParam struct { + name string + modtime time.Time + content io.ReadSeeker + contentType string + etag string + } + servec := make(chan serveParam, 1) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + p := <-servec + if p.etag != "" { + w.Header().Set("ETag", p.etag) + } + if p.contentType != "" { + w.Header().Set("Content-Type", p.contentType) + } + ServeContent(w, r, p.name, p.modtime, p.content) + })) + defer ts.Close() + + type testCase struct { + // One of file or content must be set: + file string + content io.ReadSeeker + + modtime time.Time + serveETag string // optional + serveContentType string // optional + reqHeader map[string]string + wantLastMod string + wantContentType string + wantStatus int + } + htmlModTime := mustStat(t, "testdata/index.html").ModTime() + tests := map[string]testCase{ + "no_last_modified": { + file: "testdata/style.css", + wantContentType: "text/css; charset=utf-8", + wantStatus: 200, + }, + "with_last_modified": { + file: "testdata/index.html", + wantContentType: "text/html; charset=utf-8", + modtime: htmlModTime, + wantLastMod: htmlModTime.UTC().Format(TimeFormat), + wantStatus: 200, + }, + "not_modified_modtime": { + file: "testdata/style.css", + modtime: htmlModTime, + reqHeader: map[string]string{ + "If-Modified-Since": htmlModTime.UTC().Format(TimeFormat), + }, + wantStatus: 304, + }, + "not_modified_modtime_with_contenttype": { + file: "testdata/style.css", + serveContentType: "text/css", // explicit content type + modtime: htmlModTime, + reqHeader: map[string]string{ + "If-Modified-Since": htmlModTime.UTC().Format(TimeFormat), + }, + wantStatus: 304, + }, + "not_modified_etag": { + file: "testdata/style.css", + serveETag: `"foo"`, + reqHeader: map[string]string{ + "If-None-Match": `"foo"`, + }, + wantStatus: 304, + }, + "not_modified_etag_no_seek": { + content: panicOnSeek{nil}, // should never be called + serveETag: `"foo"`, + reqHeader: map[string]string{ + "If-None-Match": `"foo"`, + }, + wantStatus: 304, + }, + "range_good": { + file: "testdata/style.css", + serveETag: `"A"`, + reqHeader: map[string]string{ + "Range": "bytes=0-4", + }, + wantStatus: StatusPartialContent, + wantContentType: "text/css; charset=utf-8", + }, + // An If-Range resource for entity "A", but entity "B" is now current. + // The Range request should be ignored. + "range_no_match": { + file: "testdata/style.css", + serveETag: `"A"`, + reqHeader: map[string]string{ + "Range": "bytes=0-4", + "If-Range": `"B"`, + }, + wantStatus: 200, + wantContentType: "text/css; charset=utf-8", + }, + } + for testName, tt := range tests { + var content io.ReadSeeker + if tt.file != "" { + f, err := os.Open(tt.file) + if err != nil { + t.Fatalf("test %q: %v", testName, err) + } + defer f.Close() + content = f + } else { + content = tt.content + } + + servec <- serveParam{ + name: filepath.Base(tt.file), + content: content, + modtime: tt.modtime, + etag: tt.serveETag, + contentType: tt.serveContentType, + } + req, err := NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + for k, v := range tt.reqHeader { + req.Header.Set(k, v) + } + res, err := DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + if res.StatusCode != tt.wantStatus { + t.Errorf("test %q: status = %d; want %d", testName, res.StatusCode, tt.wantStatus) + } + if g, e := res.Header.Get("Content-Type"), tt.wantContentType; g != e { + t.Errorf("test %q: content-type = %q, want %q", testName, g, e) + } + if g, e := res.Header.Get("Last-Modified"), tt.wantLastMod; g != e { + t.Errorf("test %q: last-modified = %q, want %q", testName, g, e) + } + } +} + +// verifies that sendfile is being used on Linux +func TestLinuxSendfile(t *testing.T) { + defer afterTest(t) + if runtime.GOOS != "linux" { + t.Skip("skipping; linux-only test") + } + if _, err := exec.LookPath("strace"); err != nil { + t.Skip("skipping; strace not found in path") + } + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + lnf, err := ln.(*net.TCPListener).File() + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + var buf bytes.Buffer + child := exec.Command("strace", "-f", "-q", "-e", "trace=sendfile,sendfile64", os.Args[0], "-test.run=TestLinuxSendfileChild") + child.ExtraFiles = append(child.ExtraFiles, lnf) + child.Env = append([]string{"GO_WANT_HELPER_PROCESS=1"}, os.Environ()...) + child.Stdout = &buf + child.Stderr = &buf + if err := child.Start(); err != nil { + t.Skipf("skipping; failed to start straced child: %v", err) + } + + res, err := Get(fmt.Sprintf("http://%s/", ln.Addr())) + if err != nil { + t.Fatalf("http client error: %v", err) + } + _, err = io.Copy(ioutil.Discard, res.Body) + if err != nil { + t.Fatalf("client body read error: %v", err) + } + res.Body.Close() + + // Force child to exit cleanly. + Get(fmt.Sprintf("http://%s/quit", ln.Addr())) + child.Wait() + + rx := regexp.MustCompile(`sendfile(64)?\(\d+,\s*\d+,\s*NULL,\s*\d+\)\s*=\s*\d+\s*\n`) + rxResume := regexp.MustCompile(`<\.\.\. sendfile(64)? resumed> \)\s*=\s*\d+\s*\n`) + out := buf.String() + if !rx.MatchString(out) && !rxResume.MatchString(out) { + t.Errorf("no sendfile system call found in:\n%s", out) + } +} + +func getBody(t *testing.T, testName string, req Request) (*Response, []byte) { + r, err := DefaultClient.Do(&req) + if err != nil { + t.Fatalf("%s: for URL %q, send error: %v", testName, req.URL.String(), err) + } + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("%s: for URL %q, reading body: %v", testName, req.URL.String(), err) + } + return r, b +} + +// TestLinuxSendfileChild isn't a real test. It's used as a helper process +// for TestLinuxSendfile. +func TestLinuxSendfileChild(*testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + defer os.Exit(0) + fd3 := os.NewFile(3, "ephemeral-port-listener") + ln, err := net.FileListener(fd3) + if err != nil { + panic(err) + } + mux := NewServeMux() + mux.Handle("/", FileServer(Dir("testdata"))) + mux.HandleFunc("/quit", func(ResponseWriter, *Request) { + os.Exit(0) + }) + s := &Server{Handler: mux} + err = s.Serve(ln) + if err != nil { + panic(err) + } +} + +type panicOnSeek struct{ io.ReadSeeker } === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/header.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/header.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/header.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,211 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "io" + "net/textproto" + "sort" + "strings" + "sync" + "time" +) + +var raceEnabled = false // set by race.go + +// A Header represents the key-value pairs in an HTTP header. +type Header map[string][]string + +// Add adds the key, value pair to the header. +// It appends to any existing values associated with key. +func (h Header) Add(key, value string) { + textproto.MIMEHeader(h).Add(key, value) +} + +// Set sets the header entries associated with key to +// the single element value. It replaces any existing +// values associated with key. +func (h Header) Set(key, value string) { + textproto.MIMEHeader(h).Set(key, value) +} + +// Get gets the first value associated with the given key. +// If there are no values associated with the key, Get returns "". +// To access multiple values of a key, access the map directly +// with CanonicalHeaderKey. +func (h Header) Get(key string) string { + return textproto.MIMEHeader(h).Get(key) +} + +// get is like Get, but key must already be in CanonicalHeaderKey form. +func (h Header) get(key string) string { + if v := h[key]; len(v) > 0 { + return v[0] + } + return "" +} + +// Del deletes the values associated with key. +func (h Header) Del(key string) { + textproto.MIMEHeader(h).Del(key) +} + +// Write writes a header in wire format. +func (h Header) Write(w io.Writer) error { + return h.WriteSubset(w, nil) +} + +func (h Header) clone() Header { + h2 := make(Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +var timeFormats = []string{ + TimeFormat, + time.RFC850, + time.ANSIC, +} + +// ParseTime parses a time header (such as the Date: header), +// trying each of the three formats allowed by HTTP/1.1: +// TimeFormat, time.RFC850, and time.ANSIC. +func ParseTime(text string) (t time.Time, err error) { + for _, layout := range timeFormats { + t, err = time.Parse(layout, text) + if err == nil { + return + } + } + return +} + +var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ") + +type writeStringer interface { + WriteString(string) (int, error) +} + +// stringWriter implements WriteString on a Writer. +type stringWriter struct { + w io.Writer +} + +func (w stringWriter) WriteString(s string) (n int, err error) { + return w.w.Write([]byte(s)) +} + +type keyValues struct { + key string + values []string +} + +// A headerSorter implements sort.Interface by sorting a []keyValues +// by key. It's used as a pointer, so it can fit in a sort.Interface +// interface value without allocation. +type headerSorter struct { + kvs []keyValues +} + +func (s *headerSorter) Len() int { return len(s.kvs) } +func (s *headerSorter) Swap(i, j int) { s.kvs[i], s.kvs[j] = s.kvs[j], s.kvs[i] } +func (s *headerSorter) Less(i, j int) bool { return s.kvs[i].key < s.kvs[j].key } + +var headerSorterPool = sync.Pool{ + New: func() interface{} { return new(headerSorter) }, +} + +// sortedKeyValues returns h's keys sorted in the returned kvs +// slice. The headerSorter used to sort is also returned, for possible +// return to headerSorterCache. +func (h Header) sortedKeyValues(exclude map[string]bool) (kvs []keyValues, hs *headerSorter) { + hs = headerSorterPool.Get().(*headerSorter) + if cap(hs.kvs) < len(h) { + hs.kvs = make([]keyValues, 0, len(h)) + } + kvs = hs.kvs[:0] + for k, vv := range h { + if !exclude[k] { + kvs = append(kvs, keyValues{k, vv}) + } + } + hs.kvs = kvs + sort.Sort(hs) + return kvs, hs +} + +// WriteSubset writes a header in wire format. +// If exclude is not nil, keys where exclude[key] == true are not written. +func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error { + ws, ok := w.(writeStringer) + if !ok { + ws = stringWriter{w} + } + kvs, sorter := h.sortedKeyValues(exclude) + for _, kv := range kvs { + for _, v := range kv.values { + v = headerNewlineToSpace.Replace(v) + v = textproto.TrimString(v) + for _, s := range []string{kv.key, ": ", v, "\r\n"} { + if _, err := ws.WriteString(s); err != nil { + return err + } + } + } + } + headerSorterPool.Put(sorter) + return nil +} + +// CanonicalHeaderKey returns the canonical format of the +// header key s. The canonicalization converts the first +// letter and any letter following a hyphen to upper case; +// the rest are converted to lowercase. For example, the +// canonical key for "accept-encoding" is "Accept-Encoding". +func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) } + +// hasToken reports whether token appears with v, ASCII +// case-insensitive, with space or comma boundaries. +// token must be all lowercase. +// v may contain mixed cased. +func hasToken(v, token string) bool { + if len(token) > len(v) || token == "" { + return false + } + if v == token { + return true + } + for sp := 0; sp <= len(v)-len(token); sp++ { + // Check that first character is good. + // The token is ASCII, so checking only a single byte + // is sufficient. We skip this potential starting + // position if both the first byte and its potential + // ASCII uppercase equivalent (b|0x20) don't match. + // False positives ('^' => '~') are caught by EqualFold. + if b := v[sp]; b != token[0] && b|0x20 != token[0] { + continue + } + // Check that start pos is on a valid token boundary. + if sp > 0 && !isTokenBoundary(v[sp-1]) { + continue + } + // Check that end pos is on a valid token boundary. + if endPos := sp + len(token); endPos != len(v) && !isTokenBoundary(v[endPos]) { + continue + } + if strings.EqualFold(v[sp:sp+len(token)], token) { + return true + } + } + return false +} + +func isTokenBoundary(b byte) bool { + return b == ' ' || b == ',' || b == '\t' +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/header_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/header_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/header_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,212 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bytes" + "runtime" + "testing" + "time" +) + +var headerWriteTests = []struct { + h Header + exclude map[string]bool + expected string +}{ + {Header{}, nil, ""}, + { + Header{ + "Content-Type": {"text/html; charset=UTF-8"}, + "Content-Length": {"0"}, + }, + nil, + "Content-Length: 0\r\nContent-Type: text/html; charset=UTF-8\r\n", + }, + { + Header{ + "Content-Length": {"0", "1", "2"}, + }, + nil, + "Content-Length: 0\r\nContent-Length: 1\r\nContent-Length: 2\r\n", + }, + { + Header{ + "Expires": {"-1"}, + "Content-Length": {"0"}, + "Content-Encoding": {"gzip"}, + }, + map[string]bool{"Content-Length": true}, + "Content-Encoding: gzip\r\nExpires: -1\r\n", + }, + { + Header{ + "Expires": {"-1"}, + "Content-Length": {"0", "1", "2"}, + "Content-Encoding": {"gzip"}, + }, + map[string]bool{"Content-Length": true}, + "Content-Encoding: gzip\r\nExpires: -1\r\n", + }, + { + Header{ + "Expires": {"-1"}, + "Content-Length": {"0"}, + "Content-Encoding": {"gzip"}, + }, + map[string]bool{"Content-Length": true, "Expires": true, "Content-Encoding": true}, + "", + }, + { + Header{ + "Nil": nil, + "Empty": {}, + "Blank": {""}, + "Double-Blank": {"", ""}, + }, + nil, + "Blank: \r\nDouble-Blank: \r\nDouble-Blank: \r\n", + }, + // Tests header sorting when over the insertion sort threshold side: + { + Header{ + "k1": {"1a", "1b"}, + "k2": {"2a", "2b"}, + "k3": {"3a", "3b"}, + "k4": {"4a", "4b"}, + "k5": {"5a", "5b"}, + "k6": {"6a", "6b"}, + "k7": {"7a", "7b"}, + "k8": {"8a", "8b"}, + "k9": {"9a", "9b"}, + }, + map[string]bool{"k5": true}, + "k1: 1a\r\nk1: 1b\r\nk2: 2a\r\nk2: 2b\r\nk3: 3a\r\nk3: 3b\r\n" + + "k4: 4a\r\nk4: 4b\r\nk6: 6a\r\nk6: 6b\r\n" + + "k7: 7a\r\nk7: 7b\r\nk8: 8a\r\nk8: 8b\r\nk9: 9a\r\nk9: 9b\r\n", + }, +} + +func TestHeaderWrite(t *testing.T) { + var buf bytes.Buffer + for i, test := range headerWriteTests { + test.h.WriteSubset(&buf, test.exclude) + if buf.String() != test.expected { + t.Errorf("#%d:\n got: %q\nwant: %q", i, buf.String(), test.expected) + } + buf.Reset() + } +} + +var parseTimeTests = []struct { + h Header + err bool +}{ + {Header{"Date": {""}}, true}, + {Header{"Date": {"invalid"}}, true}, + {Header{"Date": {"1994-11-06T08:49:37Z00:00"}}, true}, + {Header{"Date": {"Sun, 06 Nov 1994 08:49:37 GMT"}}, false}, + {Header{"Date": {"Sunday, 06-Nov-94 08:49:37 GMT"}}, false}, + {Header{"Date": {"Sun Nov 6 08:49:37 1994"}}, false}, +} + +func TestParseTime(t *testing.T) { + expect := time.Date(1994, 11, 6, 8, 49, 37, 0, time.UTC) + for i, test := range parseTimeTests { + d, err := ParseTime(test.h.Get("Date")) + if err != nil { + if !test.err { + t.Errorf("#%d:\n got err: %v", i, err) + } + continue + } + if test.err { + t.Errorf("#%d:\n should err", i) + continue + } + if !expect.Equal(d) { + t.Errorf("#%d:\n got: %v\nwant: %v", i, d, expect) + } + } +} + +type hasTokenTest struct { + header string + token string + want bool +} + +var hasTokenTests = []hasTokenTest{ + {"", "", false}, + {"", "foo", false}, + {"foo", "foo", true}, + {"foo ", "foo", true}, + {" foo", "foo", true}, + {" foo ", "foo", true}, + {"foo,bar", "foo", true}, + {"bar,foo", "foo", true}, + {"bar, foo", "foo", true}, + {"bar,foo, baz", "foo", true}, + {"bar, foo,baz", "foo", true}, + {"bar,foo, baz", "foo", true}, + {"bar, foo, baz", "foo", true}, + {"FOO", "foo", true}, + {"FOO ", "foo", true}, + {" FOO", "foo", true}, + {" FOO ", "foo", true}, + {"FOO,BAR", "foo", true}, + {"BAR,FOO", "foo", true}, + {"BAR, FOO", "foo", true}, + {"BAR,FOO, baz", "foo", true}, + {"BAR, FOO,BAZ", "foo", true}, + {"BAR,FOO, BAZ", "foo", true}, + {"BAR, FOO, BAZ", "foo", true}, + {"foobar", "foo", false}, + {"barfoo ", "foo", false}, +} + +func TestHasToken(t *testing.T) { + for _, tt := range hasTokenTests { + if hasToken(tt.header, tt.token) != tt.want { + t.Errorf("hasToken(%q, %q) = %v; want %v", tt.header, tt.token, !tt.want, tt.want) + } + } +} + +var testHeader = Header{ + "Content-Length": {"123"}, + "Content-Type": {"text/plain"}, + "Date": {"some date at some time Z"}, + "Server": {DefaultUserAgent}, +} + +var buf bytes.Buffer + +func BenchmarkHeaderWriteSubset(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + buf.Reset() + testHeader.WriteSubset(&buf, nil) + } +} + +func TestHeaderWriteSubsetAllocs(t *testing.T) { + if testing.Short() { + t.Skip("skipping alloc test in short mode") + } + if raceEnabled { + t.Skip("skipping test under race detector") + } + if runtime.GOMAXPROCS(0) > 1 { + t.Skip("skipping; GOMAXPROCS>1") + } + n := testing.AllocsPerRun(100, func() { + buf.Reset() + testHeader.WriteSubset(&buf, nil) + }) + if n > 0 { + t.Errorf("allocs = %g; want 0", n) + } +} === added directory 'src/github.com/Azure/azure-sdk-for-go/core/http/httptest' === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/httptest/example_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/httptest/example_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/httptest/example_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,50 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httptest_test + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "net/http/httptest" +) + +func ExampleResponseRecorder() { + handler := func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "something failed", http.StatusInternalServerError) + } + + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + log.Fatal(err) + } + + w := httptest.NewRecorder() + handler(w, req) + + fmt.Printf("%d - %s", w.Code, w.Body.String()) + // Output: 500 - something failed +} + +func ExampleServer() { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "Hello, client") + })) + defer ts.Close() + + res, err := http.Get(ts.URL) + if err != nil { + log.Fatal(err) + } + greeting, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + log.Fatal(err) + } + + fmt.Printf("%s", greeting) + // Output: Hello, client +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,72 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httptest provides utilities for HTTP testing. +package httptest + +import ( + "bytes" + "net/http" +) + +// ResponseRecorder is an implementation of http.ResponseWriter that +// records its mutations for later inspection in tests. +type ResponseRecorder struct { + Code int // the HTTP response code from WriteHeader + HeaderMap http.Header // the HTTP response headers + Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to + Flushed bool + + wroteHeader bool +} + +// NewRecorder returns an initialized ResponseRecorder. +func NewRecorder() *ResponseRecorder { + return &ResponseRecorder{ + HeaderMap: make(http.Header), + Body: new(bytes.Buffer), + Code: 200, + } +} + +// DefaultRemoteAddr is the default remote address to return in RemoteAddr if +// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. +const DefaultRemoteAddr = "1.2.3.4" + +// Header returns the response headers. +func (rw *ResponseRecorder) Header() http.Header { + m := rw.HeaderMap + if m == nil { + m = make(http.Header) + rw.HeaderMap = m + } + return m +} + +// Write always succeeds and writes to rw.Body, if not nil. +func (rw *ResponseRecorder) Write(buf []byte) (int, error) { + if !rw.wroteHeader { + rw.WriteHeader(200) + } + if rw.Body != nil { + rw.Body.Write(buf) + } + return len(buf), nil +} + +// WriteHeader sets rw.Code. +func (rw *ResponseRecorder) WriteHeader(code int) { + if !rw.wroteHeader { + rw.Code = code + } + rw.wroteHeader = true +} + +// Flush sets rw.Flushed to true. +func (rw *ResponseRecorder) Flush() { + if !rw.wroteHeader { + rw.WriteHeader(200) + } + rw.Flushed = true +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/httptest/recorder_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,90 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httptest + +import ( + "fmt" + "net/http" + "testing" +) + +func TestRecorder(t *testing.T) { + type checkFunc func(*ResponseRecorder) error + check := func(fns ...checkFunc) []checkFunc { return fns } + + hasStatus := func(wantCode int) checkFunc { + return func(rec *ResponseRecorder) error { + if rec.Code != wantCode { + return fmt.Errorf("Status = %d; want %d", rec.Code, wantCode) + } + return nil + } + } + hasContents := func(want string) checkFunc { + return func(rec *ResponseRecorder) error { + if rec.Body.String() != want { + return fmt.Errorf("wrote = %q; want %q", rec.Body.String(), want) + } + return nil + } + } + hasFlush := func(want bool) checkFunc { + return func(rec *ResponseRecorder) error { + if rec.Flushed != want { + return fmt.Errorf("Flushed = %v; want %v", rec.Flushed, want) + } + return nil + } + } + + tests := []struct { + name string + h func(w http.ResponseWriter, r *http.Request) + checks []checkFunc + }{ + { + "200 default", + func(w http.ResponseWriter, r *http.Request) {}, + check(hasStatus(200), hasContents("")), + }, + { + "first code only", + func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(201) + w.WriteHeader(202) + w.Write([]byte("hi")) + }, + check(hasStatus(201), hasContents("hi")), + }, + { + "write sends 200", + func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hi first")) + w.WriteHeader(201) + w.WriteHeader(202) + }, + check(hasStatus(200), hasContents("hi first"), hasFlush(false)), + }, + { + "flush", + func(w http.ResponseWriter, r *http.Request) { + w.(http.Flusher).Flush() // also sends a 200 + w.WriteHeader(201) + }, + check(hasStatus(200), hasFlush(true)), + }, + } + r, _ := http.NewRequest("GET", "http://foo.com/", nil) + for _, tt := range tests { + h := http.HandlerFunc(tt.h) + rec := NewRecorder() + h.ServeHTTP(rec, r) + for _, check := range tt.checks { + if err := check(rec); err != nil { + t.Errorf("%s: %v", tt.name, err) + } + } + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/httptest/server.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/httptest/server.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/httptest/server.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,228 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Implementation of Server + +package httptest + +import ( + "crypto/tls" + "flag" + "fmt" + "net" + "net/http" + "os" + "sync" +) + +// A Server is an HTTP server listening on a system-chosen port on the +// local loopback interface, for use in end-to-end HTTP tests. +type Server struct { + URL string // base URL of form http://ipaddr:port with no trailing slash + Listener net.Listener + + // TLS is the optional TLS configuration, populated with a new config + // after TLS is started. If set on an unstarted server before StartTLS + // is called, existing fields are copied into the new config. + TLS *tls.Config + + // Config may be changed after calling NewUnstartedServer and + // before Start or StartTLS. + Config *http.Server + + // wg counts the number of outstanding HTTP requests on this server. + // Close blocks until all requests are finished. + wg sync.WaitGroup +} + +// historyListener keeps track of all connections that it's ever +// accepted. +type historyListener struct { + net.Listener + sync.Mutex // protects history + history []net.Conn +} + +func (hs *historyListener) Accept() (c net.Conn, err error) { + c, err = hs.Listener.Accept() + if err == nil { + hs.Lock() + hs.history = append(hs.history, c) + hs.Unlock() + } + return +} + +func newLocalListener() net.Listener { + if *serve != "" { + l, err := net.Listen("tcp", *serve) + if err != nil { + panic(fmt.Sprintf("httptest: failed to listen on %v: %v", *serve, err)) + } + return l + } + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + if l, err = net.Listen("tcp6", "[::1]:0"); err != nil { + panic(fmt.Sprintf("httptest: failed to listen on a port: %v", err)) + } + } + return l +} + +// When debugging a particular http server-based test, +// this flag lets you run +// go test -run=BrokenTest -httptest.serve=127.0.0.1:8000 +// to start the broken server so you can interact with it manually. +var serve = flag.String("httptest.serve", "", "if non-empty, httptest.NewServer serves on this address and blocks") + +// NewServer starts and returns a new Server. +// The caller should call Close when finished, to shut it down. +func NewServer(handler http.Handler) *Server { + ts := NewUnstartedServer(handler) + ts.Start() + return ts +} + +// NewUnstartedServer returns a new Server but doesn't start it. +// +// After changing its configuration, the caller should call Start or +// StartTLS. +// +// The caller should call Close when finished, to shut it down. +func NewUnstartedServer(handler http.Handler) *Server { + return &Server{ + Listener: newLocalListener(), + Config: &http.Server{Handler: handler}, + } +} + +// Start starts a server from NewUnstartedServer. +func (s *Server) Start() { + if s.URL != "" { + panic("Server already started") + } + s.Listener = &historyListener{Listener: s.Listener} + s.URL = "http://" + s.Listener.Addr().String() + s.wrapHandler() + go s.Config.Serve(s.Listener) + if *serve != "" { + fmt.Fprintln(os.Stderr, "httptest: serving on", s.URL) + select {} + } +} + +// StartTLS starts TLS on a server from NewUnstartedServer. +func (s *Server) StartTLS() { + if s.URL != "" { + panic("Server already started") + } + cert, err := tls.X509KeyPair(localhostCert, localhostKey) + if err != nil { + panic(fmt.Sprintf("httptest: NewTLSServer: %v", err)) + } + + existingConfig := s.TLS + s.TLS = new(tls.Config) + if existingConfig != nil { + *s.TLS = *existingConfig + } + if s.TLS.NextProtos == nil { + s.TLS.NextProtos = []string{"http/1.1"} + } + if len(s.TLS.Certificates) == 0 { + s.TLS.Certificates = []tls.Certificate{cert} + } + tlsListener := tls.NewListener(s.Listener, s.TLS) + + s.Listener = &historyListener{Listener: tlsListener} + s.URL = "https://" + s.Listener.Addr().String() + s.wrapHandler() + go s.Config.Serve(s.Listener) +} + +func (s *Server) wrapHandler() { + h := s.Config.Handler + if h == nil { + h = http.DefaultServeMux + } + s.Config.Handler = &waitGroupHandler{ + s: s, + h: h, + } +} + +// NewTLSServer starts and returns a new Server using TLS. +// The caller should call Close when finished, to shut it down. +func NewTLSServer(handler http.Handler) *Server { + ts := NewUnstartedServer(handler) + ts.StartTLS() + return ts +} + +// Close shuts down the server and blocks until all outstanding +// requests on this server have completed. +func (s *Server) Close() { + s.Listener.Close() + s.wg.Wait() + s.CloseClientConnections() + if t, ok := http.DefaultTransport.(*http.Transport); ok { + t.CloseIdleConnections() + } +} + +// CloseClientConnections closes any currently open HTTP connections +// to the test Server. +func (s *Server) CloseClientConnections() { + hl, ok := s.Listener.(*historyListener) + if !ok { + return + } + hl.Lock() + for _, conn := range hl.history { + conn.Close() + } + hl.Unlock() +} + +// waitGroupHandler wraps a handler, incrementing and decrementing a +// sync.WaitGroup on each request, to enable Server.Close to block +// until outstanding requests are finished. +type waitGroupHandler struct { + s *Server + h http.Handler // non-nil +} + +func (h *waitGroupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.s.wg.Add(1) + defer h.s.wg.Done() // a defer, in case ServeHTTP below panics + h.h.ServeHTTP(w, r) +} + +// localhostCert is a PEM-encoded TLS cert with SAN IPs +// "127.0.0.1" and "[::1]", expiring at the last second of 2049 (the end +// of ASN.1 time). +// generated from src/pkg/crypto/tls: +// go run generate_cert.go --rsa-bits 512 --host 127.0.0.1,::1,example.com --ca --start-date "Jan 1 00:00:00 1970" --duration=1000000h +var localhostCert = []byte(`-----BEGIN CERTIFICATE----- +MIIBdzCCASOgAwIBAgIBADALBgkqhkiG9w0BAQUwEjEQMA4GA1UEChMHQWNtZSBD +bzAeFw03MDAxMDEwMDAwMDBaFw00OTEyMzEyMzU5NTlaMBIxEDAOBgNVBAoTB0Fj +bWUgQ28wWjALBgkqhkiG9w0BAQEDSwAwSAJBAN55NcYKZeInyTuhcCwFMhDHCmwa +IUSdtXdcbItRB/yfXGBhiex00IaLXQnSU+QZPRZWYqeTEbFSgihqi1PUDy8CAwEA +AaNoMGYwDgYDVR0PAQH/BAQDAgCkMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA8GA1Ud +EwEB/wQFMAMBAf8wLgYDVR0RBCcwJYILZXhhbXBsZS5jb22HBH8AAAGHEAAAAAAA +AAAAAAAAAAAAAAEwCwYJKoZIhvcNAQEFA0EAAoQn/ytgqpiLcZu9XKbCJsJcvkgk +Se6AbGXgSlq+ZCEVo0qIwSgeBqmsJxUu7NCSOwVJLYNEBO2DtIxoYVk+MA== +-----END CERTIFICATE-----`) + +// localhostKey is the private key for localhostCert. +var localhostKey = []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIBPAIBAAJBAN55NcYKZeInyTuhcCwFMhDHCmwaIUSdtXdcbItRB/yfXGBhiex0 +0IaLXQnSU+QZPRZWYqeTEbFSgihqi1PUDy8CAwEAAQJBAQdUx66rfh8sYsgfdcvV +NoafYpnEcB5s4m/vSVe6SU7dCK6eYec9f9wpT353ljhDUHq3EbmE4foNzJngh35d +AekCIQDhRQG5Li0Wj8TM4obOnnXUXf1jRv0UkzE9AHWLG5q3AwIhAPzSjpYUDjVW +MCUXgckTpKCuGwbJk7424Nb8bLzf3kllAiA5mUBgjfr/WtFSJdWcPQ4Zt9KTMNKD +EUO0ukpTwEIl6wIhAMbGqZK3zAAFdq8DD2jPx+UJXnh0rnOkZBzDtJ6/iN69AiEA +1Aq8MJgTaYsDQWyU/hDq5YkDJc9e9DSCvUIzqxQWMQE= +-----END RSA PRIVATE KEY-----`) === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/httptest/server_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/httptest/server_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/httptest/server_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,52 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httptest + +import ( + "io/ioutil" + "net/http" + "testing" + "time" +) + +func TestServer(t *testing.T) { + ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("hello")) + })) + defer ts.Close() + res, err := http.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + got, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(got) != "hello" { + t.Errorf("got %q, want hello", string(got)) + } +} + +func TestIssue7264(t *testing.T) { + for i := 0; i < 1000; i++ { + func() { + inHandler := make(chan bool, 1) + ts := NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + inHandler <- true + })) + defer ts.Close() + tr := &http.Transport{ + ResponseHeaderTimeout: time.Nanosecond, + } + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + res, err := c.Get(ts.URL) + <-inHandler + if err == nil { + res.Body.Close() + } + }() + } +} === added directory 'src/github.com/Azure/azure-sdk-for-go/core/http/httputil' === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,203 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The wire protocol for HTTP's "chunked" Transfer-Encoding. + +// This code is duplicated in net/http and net/http/httputil. +// Please make any changes in both files. + +package httputil + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" +) + +const maxLineLength = 4096 // assumed <= bufio.defaultBufSize + +var ErrLineTooLong = errors.New("header line too long") + +// newChunkedReader returns a new chunkedReader that translates the data read from r +// out of HTTP "chunked" format before returning it. +// The chunkedReader returns io.EOF when the final 0-length chunk is read. +// +// newChunkedReader is not needed by normal applications. The http package +// automatically decodes chunking when reading response bodies. +func newChunkedReader(r io.Reader) io.Reader { + br, ok := r.(*bufio.Reader) + if !ok { + br = bufio.NewReader(r) + } + return &chunkedReader{r: br} +} + +type chunkedReader struct { + r *bufio.Reader + n uint64 // unread bytes in chunk + err error + buf [2]byte +} + +func (cr *chunkedReader) beginChunk() { + // chunk-size CRLF + var line []byte + line, cr.err = readLine(cr.r) + if cr.err != nil { + return + } + cr.n, cr.err = parseHexUint(line) + if cr.err != nil { + return + } + if cr.n == 0 { + cr.err = io.EOF + } +} + +func (cr *chunkedReader) chunkHeaderAvailable() bool { + n := cr.r.Buffered() + if n > 0 { + peek, _ := cr.r.Peek(n) + return bytes.IndexByte(peek, '\n') >= 0 + } + return false +} + +func (cr *chunkedReader) Read(b []uint8) (n int, err error) { + for cr.err == nil { + if cr.n == 0 { + if n > 0 && !cr.chunkHeaderAvailable() { + // We've read enough. Don't potentially block + // reading a new chunk header. + break + } + cr.beginChunk() + continue + } + if len(b) == 0 { + break + } + rbuf := b + if uint64(len(rbuf)) > cr.n { + rbuf = rbuf[:cr.n] + } + var n0 int + n0, cr.err = cr.r.Read(rbuf) + n += n0 + b = b[n0:] + cr.n -= uint64(n0) + // If we're at the end of a chunk, read the next two + // bytes to verify they are "\r\n". + if cr.n == 0 && cr.err == nil { + if _, cr.err = io.ReadFull(cr.r, cr.buf[:2]); cr.err == nil { + if cr.buf[0] != '\r' || cr.buf[1] != '\n' { + cr.err = errors.New("malformed chunked encoding") + } + } + } + } + return n, cr.err +} + +// Read a line of bytes (up to \n) from b. +// Give up if the line exceeds maxLineLength. +// The returned bytes are a pointer into storage in +// the bufio, so they are only valid until the next bufio read. +func readLine(b *bufio.Reader) (p []byte, err error) { + if p, err = b.ReadSlice('\n'); err != nil { + // We always know when EOF is coming. + // If the caller asked for a line, there should be a line. + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if err == bufio.ErrBufferFull { + err = ErrLineTooLong + } + return nil, err + } + if len(p) >= maxLineLength { + return nil, ErrLineTooLong + } + return trimTrailingWhitespace(p), nil +} + +func trimTrailingWhitespace(b []byte) []byte { + for len(b) > 0 && isASCIISpace(b[len(b)-1]) { + b = b[:len(b)-1] + } + return b +} + +func isASCIISpace(b byte) bool { + return b == ' ' || b == '\t' || b == '\n' || b == '\r' +} + +// newChunkedWriter returns a new chunkedWriter that translates writes into HTTP +// "chunked" format before writing them to w. Closing the returned chunkedWriter +// sends the final 0-length chunk that marks the end of the stream. +// +// newChunkedWriter is not needed by normal applications. The http +// package adds chunking automatically if handlers don't set a +// Content-Length header. Using newChunkedWriter inside a handler +// would result in double chunking or chunking with a Content-Length +// length, both of which are wrong. +func newChunkedWriter(w io.Writer) io.WriteCloser { + return &chunkedWriter{w} +} + +// Writing to chunkedWriter translates to writing in HTTP chunked Transfer +// Encoding wire format to the underlying Wire chunkedWriter. +type chunkedWriter struct { + Wire io.Writer +} + +// Write the contents of data as one chunk to Wire. +// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has +// a bug since it does not check for success of io.WriteString +func (cw *chunkedWriter) Write(data []byte) (n int, err error) { + + // Don't send 0-length data. It looks like EOF for chunked encoding. + if len(data) == 0 { + return 0, nil + } + + if _, err = fmt.Fprintf(cw.Wire, "%x\r\n", len(data)); err != nil { + return 0, err + } + if n, err = cw.Wire.Write(data); err != nil { + return + } + if n != len(data) { + err = io.ErrShortWrite + return + } + _, err = io.WriteString(cw.Wire, "\r\n") + + return +} + +func (cw *chunkedWriter) Close() error { + _, err := io.WriteString(cw.Wire, "0\r\n") + return err +} + +func parseHexUint(v []byte) (n uint64, err error) { + for _, b := range v { + n <<= 4 + switch { + case '0' <= b && b <= '9': + b = b - '0' + case 'a' <= b && b <= 'f': + b = b - 'a' + 10 + case 'A' <= b && b <= 'F': + b = b - 'A' + 10 + default: + return 0, errors.New("invalid byte in chunk length") + } + n |= uint64(b) + } + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/httputil/chunked_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,159 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code is duplicated in net/http and net/http/httputil. +// Please make any changes in both files. + +package httputil + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" +) + +func TestChunk(t *testing.T) { + var b bytes.Buffer + + w := newChunkedWriter(&b) + const chunk1 = "hello, " + const chunk2 = "world! 0123456789abcdef" + w.Write([]byte(chunk1)) + w.Write([]byte(chunk2)) + w.Close() + + if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e { + t.Fatalf("chunk writer wrote %q; want %q", g, e) + } + + r := newChunkedReader(&b) + data, err := ioutil.ReadAll(r) + if err != nil { + t.Logf(`data: "%s"`, data) + t.Fatalf("ReadAll from reader: %v", err) + } + if g, e := string(data), chunk1+chunk2; g != e { + t.Errorf("chunk reader read %q; want %q", g, e) + } +} + +func TestChunkReadMultiple(t *testing.T) { + // Bunch of small chunks, all read together. + { + var b bytes.Buffer + w := newChunkedWriter(&b) + w.Write([]byte("foo")) + w.Write([]byte("bar")) + w.Close() + + r := newChunkedReader(&b) + buf := make([]byte, 10) + n, err := r.Read(buf) + if n != 6 || err != io.EOF { + t.Errorf("Read = %d, %v; want 6, EOF", n, err) + } + buf = buf[:n] + if string(buf) != "foobar" { + t.Errorf("Read = %q; want %q", buf, "foobar") + } + } + + // One big chunk followed by a little chunk, but the small bufio.Reader size + // should prevent the second chunk header from being read. + { + var b bytes.Buffer + w := newChunkedWriter(&b) + // fillBufChunk is 11 bytes + 3 bytes header + 2 bytes footer = 16 bytes, + // the same as the bufio ReaderSize below (the minimum), so even + // though we're going to try to Read with a buffer larger enough to also + // receive "foo", the second chunk header won't be read yet. + const fillBufChunk = "0123456789a" + const shortChunk = "foo" + w.Write([]byte(fillBufChunk)) + w.Write([]byte(shortChunk)) + w.Close() + + r := newChunkedReader(bufio.NewReaderSize(&b, 16)) + buf := make([]byte, len(fillBufChunk)+len(shortChunk)) + n, err := r.Read(buf) + if n != len(fillBufChunk) || err != nil { + t.Errorf("Read = %d, %v; want %d, nil", n, err, len(fillBufChunk)) + } + buf = buf[:n] + if string(buf) != fillBufChunk { + t.Errorf("Read = %q; want %q", buf, fillBufChunk) + } + + n, err = r.Read(buf) + if n != len(shortChunk) || err != io.EOF { + t.Errorf("Read = %d, %v; want %d, EOF", n, err, len(shortChunk)) + } + } + + // And test that we see an EOF chunk, even though our buffer is already full: + { + r := newChunkedReader(bufio.NewReader(strings.NewReader("3\r\nfoo\r\n0\r\n"))) + buf := make([]byte, 3) + n, err := r.Read(buf) + if n != 3 || err != io.EOF { + t.Errorf("Read = %d, %v; want 3, EOF", n, err) + } + if string(buf) != "foo" { + t.Errorf("buf = %q; want foo", buf) + } + } +} + +func TestChunkReaderAllocs(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + var buf bytes.Buffer + w := newChunkedWriter(&buf) + a, b, c := []byte("aaaaaa"), []byte("bbbbbbbbbbbb"), []byte("cccccccccccccccccccccccc") + w.Write(a) + w.Write(b) + w.Write(c) + w.Close() + + readBuf := make([]byte, len(a)+len(b)+len(c)+1) + byter := bytes.NewReader(buf.Bytes()) + bufr := bufio.NewReader(byter) + mallocs := testing.AllocsPerRun(100, func() { + byter.Seek(0, 0) + bufr.Reset(byter) + r := newChunkedReader(bufr) + n, err := io.ReadFull(r, readBuf) + if n != len(readBuf)-1 { + t.Fatalf("read %d bytes; want %d", n, len(readBuf)-1) + } + if err != io.ErrUnexpectedEOF { + t.Fatalf("read error = %v; want ErrUnexpectedEOF", err) + } + }) + if mallocs > 1.5 { + t.Errorf("mallocs = %v; want 1", mallocs) + } +} + +func TestParseHexUint(t *testing.T) { + for i := uint64(0); i <= 1234; i++ { + line := []byte(fmt.Sprintf("%x", i)) + got, err := parseHexUint(line) + if err != nil { + t.Fatalf("on %d: %v", i, err) + } + if got != i { + t.Errorf("for input %q = %d; want %d", line, got, i) + } + } + _, err := parseHexUint([]byte("bogus")) + if err == nil { + t.Error("expected error on bogus input") + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,276 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httputil + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// One of the copies, say from b to r2, could be avoided by using a more +// elaborate trick where the other copy is made during Request/Response.Write. +// This would complicate things too much, given that these functions are for +// debugging only. +func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) { + var buf bytes.Buffer + if _, err = buf.ReadFrom(b); err != nil { + return nil, nil, err + } + if err = b.Close(); err != nil { + return nil, nil, err + } + return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewReader(buf.Bytes())), nil +} + +// dumpConn is a net.Conn which writes to Writer and reads from Reader +type dumpConn struct { + io.Writer + io.Reader +} + +func (c *dumpConn) Close() error { return nil } +func (c *dumpConn) LocalAddr() net.Addr { return nil } +func (c *dumpConn) RemoteAddr() net.Addr { return nil } +func (c *dumpConn) SetDeadline(t time.Time) error { return nil } +func (c *dumpConn) SetReadDeadline(t time.Time) error { return nil } +func (c *dumpConn) SetWriteDeadline(t time.Time) error { return nil } + +type neverEnding byte + +func (b neverEnding) Read(p []byte) (n int, err error) { + for i := range p { + p[i] = byte(b) + } + return len(p), nil +} + +// DumpRequestOut is like DumpRequest but includes +// headers that the standard http.Transport adds, +// such as User-Agent. +func DumpRequestOut(req *http.Request, body bool) ([]byte, error) { + save := req.Body + dummyBody := false + if !body || req.Body == nil { + req.Body = nil + if req.ContentLength != 0 { + req.Body = ioutil.NopCloser(io.LimitReader(neverEnding('x'), req.ContentLength)) + dummyBody = true + } + } else { + var err error + save, req.Body, err = drainBody(req.Body) + if err != nil { + return nil, err + } + } + + // Since we're using the actual Transport code to write the request, + // switch to http so the Transport doesn't try to do an SSL + // negotiation with our dumpConn and its bytes.Buffer & pipe. + // The wire format for https and http are the same, anyway. + reqSend := req + if req.URL.Scheme == "https" { + reqSend = new(http.Request) + *reqSend = *req + reqSend.URL = new(url.URL) + *reqSend.URL = *req.URL + reqSend.URL.Scheme = "http" + } + + // Use the actual Transport code to record what we would send + // on the wire, but not using TCP. Use a Transport with a + // custom dialer that returns a fake net.Conn that waits + // for the full input (and recording it), and then responds + // with a dummy response. + var buf bytes.Buffer // records the output + pr, pw := io.Pipe() + dr := &delegateReader{c: make(chan io.Reader)} + // Wait for the request before replying with a dummy response: + go func() { + http.ReadRequest(bufio.NewReader(pr)) + dr.c <- strings.NewReader("HTTP/1.1 204 No Content\r\n\r\n") + }() + + t := &http.Transport{ + Dial: func(net, addr string) (net.Conn, error) { + return &dumpConn{io.MultiWriter(&buf, pw), dr}, nil + }, + } + defer t.CloseIdleConnections() + + _, err := t.RoundTrip(reqSend) + + req.Body = save + if err != nil { + return nil, err + } + dump := buf.Bytes() + + // If we used a dummy body above, remove it now. + // TODO: if the req.ContentLength is large, we allocate memory + // unnecessarily just to slice it off here. But this is just + // a debug function, so this is acceptable for now. We could + // discard the body earlier if this matters. + if dummyBody { + if i := bytes.Index(dump, []byte("\r\n\r\n")); i >= 0 { + dump = dump[:i+4] + } + } + return dump, nil +} + +// delegateReader is a reader that delegates to another reader, +// once it arrives on a channel. +type delegateReader struct { + c chan io.Reader + r io.Reader // nil until received from c +} + +func (r *delegateReader) Read(p []byte) (int, error) { + if r.r == nil { + r.r = <-r.c + } + return r.r.Read(p) +} + +// Return value if nonempty, def otherwise. +func valueOrDefault(value, def string) string { + if value != "" { + return value + } + return def +} + +var reqWriteExcludeHeaderDump = map[string]bool{ + "Host": true, // not in Header map anyway + "Content-Length": true, + "Transfer-Encoding": true, + "Trailer": true, +} + +// dumpAsReceived writes req to w in the form as it was received, or +// at least as accurately as possible from the information retained in +// the request. +func dumpAsReceived(req *http.Request, w io.Writer) error { + return nil +} + +// DumpRequest returns the as-received wire representation of req, +// optionally including the request body, for debugging. +// DumpRequest is semantically a no-op, but in order to +// dump the body, it reads the body data into memory and +// changes req.Body to refer to the in-memory copy. +// The documentation for http.Request.Write details which fields +// of req are used. +func DumpRequest(req *http.Request, body bool) (dump []byte, err error) { + save := req.Body + if !body || req.Body == nil { + req.Body = nil + } else { + save, req.Body, err = drainBody(req.Body) + if err != nil { + return + } + } + + var b bytes.Buffer + + fmt.Fprintf(&b, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"), + req.URL.RequestURI(), req.ProtoMajor, req.ProtoMinor) + + host := req.Host + if host == "" && req.URL != nil { + host = req.URL.Host + } + if host != "" { + fmt.Fprintf(&b, "Host: %s\r\n", host) + } + + chunked := len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked" + if len(req.TransferEncoding) > 0 { + fmt.Fprintf(&b, "Transfer-Encoding: %s\r\n", strings.Join(req.TransferEncoding, ",")) + } + if req.Close { + fmt.Fprintf(&b, "Connection: close\r\n") + } + + err = req.Header.WriteSubset(&b, reqWriteExcludeHeaderDump) + if err != nil { + return + } + + io.WriteString(&b, "\r\n") + + if req.Body != nil { + var dest io.Writer = &b + if chunked { + dest = NewChunkedWriter(dest) + } + _, err = io.Copy(dest, req.Body) + if chunked { + dest.(io.Closer).Close() + io.WriteString(&b, "\r\n") + } + } + + req.Body = save + if err != nil { + return + } + dump = b.Bytes() + return +} + +// errNoBody is a sentinel error value used by failureToReadBody so we can detect +// that the lack of body was intentional. +var errNoBody = errors.New("sentinel error value") + +// failureToReadBody is a io.ReadCloser that just returns errNoBody on +// Read. It's swapped in when we don't actually want to consume the +// body, but need a non-nil one, and want to distinguish the error +// from reading the dummy body. +type failureToReadBody struct{} + +func (failureToReadBody) Read([]byte) (int, error) { return 0, errNoBody } +func (failureToReadBody) Close() error { return nil } + +var emptyBody = ioutil.NopCloser(strings.NewReader("")) + +// DumpResponse is like DumpRequest but dumps a response. +func DumpResponse(resp *http.Response, body bool) (dump []byte, err error) { + var b bytes.Buffer + save := resp.Body + savecl := resp.ContentLength + + if !body { + resp.Body = failureToReadBody{} + } else if resp.Body == nil { + resp.Body = emptyBody + } else { + save, resp.Body, err = drainBody(resp.Body) + if err != nil { + return + } + } + err = resp.Write(&b) + if err == errNoBody { + err = nil + } + resp.Body = save + resp.ContentLength = savecl + if err != nil { + return nil, err + } + return b.Bytes(), nil +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/httputil/dump_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,263 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httputil + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "runtime" + "strings" + "testing" +) + +type dumpTest struct { + Req http.Request + Body interface{} // optional []byte or func() io.ReadCloser to populate Req.Body + + WantDump string + WantDumpOut string + NoBody bool // if true, set DumpRequest{,Out} body to false +} + +var dumpTests = []dumpTest{ + + // HTTP/1.1 => chunked coding; body; empty trailer + { + Req: http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/search", + }, + ProtoMajor: 1, + ProtoMinor: 1, + TransferEncoding: []string{"chunked"}, + }, + + Body: []byte("abcdef"), + + WantDump: "GET /search HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + chunk("abcdef") + chunk(""), + }, + + // Verify that DumpRequest preserves the HTTP version number, doesn't add a Host, + // and doesn't add a User-Agent. + { + Req: http.Request{ + Method: "GET", + URL: mustParseURL("/foo"), + ProtoMajor: 1, + ProtoMinor: 0, + Header: http.Header{ + "X-Foo": []string{"X-Bar"}, + }, + }, + + WantDump: "GET /foo HTTP/1.0\r\n" + + "X-Foo: X-Bar\r\n\r\n", + }, + + { + Req: *mustNewRequest("GET", "http://example.com/foo", nil), + + WantDumpOut: "GET /foo HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Accept-Encoding: gzip\r\n\r\n", + }, + + // Test that an https URL doesn't try to do an SSL negotiation + // with a bytes.Buffer and hang with all goroutines not + // runnable. + { + Req: *mustNewRequest("GET", "https://example.com/foo", nil), + + WantDumpOut: "GET /foo HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Accept-Encoding: gzip\r\n\r\n", + }, + + // Request with Body, but Dump requested without it. + { + Req: http.Request{ + Method: "POST", + URL: &url.URL{ + Scheme: "http", + Host: "post.tld", + Path: "/", + }, + ContentLength: 6, + ProtoMajor: 1, + ProtoMinor: 1, + }, + + Body: []byte("abcdef"), + + WantDumpOut: "POST / HTTP/1.1\r\n" + + "Host: post.tld\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Content-Length: 6\r\n" + + "Accept-Encoding: gzip\r\n\r\n", + + NoBody: true, + }, +} + +func TestDumpRequest(t *testing.T) { + numg0 := runtime.NumGoroutine() + for i, tt := range dumpTests { + setBody := func() { + if tt.Body == nil { + return + } + switch b := tt.Body.(type) { + case []byte: + tt.Req.Body = ioutil.NopCloser(bytes.NewReader(b)) + case func() io.ReadCloser: + tt.Req.Body = b() + } + } + setBody() + if tt.Req.Header == nil { + tt.Req.Header = make(http.Header) + } + + if tt.WantDump != "" { + setBody() + dump, err := DumpRequest(&tt.Req, !tt.NoBody) + if err != nil { + t.Errorf("DumpRequest #%d: %s", i, err) + continue + } + if string(dump) != tt.WantDump { + t.Errorf("DumpRequest %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDump, string(dump)) + continue + } + } + + if tt.WantDumpOut != "" { + setBody() + dump, err := DumpRequestOut(&tt.Req, !tt.NoBody) + if err != nil { + t.Errorf("DumpRequestOut #%d: %s", i, err) + continue + } + if string(dump) != tt.WantDumpOut { + t.Errorf("DumpRequestOut %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDumpOut, string(dump)) + continue + } + } + } + if dg := runtime.NumGoroutine() - numg0; dg > 4 { + t.Errorf("Unexpectedly large number of new goroutines: %d new", dg) + } +} + +func chunk(s string) string { + return fmt.Sprintf("%x\r\n%s\r\n", len(s), s) +} + +func mustParseURL(s string) *url.URL { + u, err := url.Parse(s) + if err != nil { + panic(fmt.Sprintf("Error parsing URL %q: %v", s, err)) + } + return u +} + +func mustNewRequest(method, url string, body io.Reader) *http.Request { + req, err := http.NewRequest(method, url, body) + if err != nil { + panic(fmt.Sprintf("NewRequest(%q, %q, %p) err = %v", method, url, body, err)) + } + return req +} + +var dumpResTests = []struct { + res *http.Response + body bool + want string +}{ + { + res: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 50, + Header: http.Header{ + "Foo": []string{"Bar"}, + }, + Body: ioutil.NopCloser(strings.NewReader("foo")), // shouldn't be used + }, + body: false, // to verify we see 50, not empty or 3. + want: `HTTP/1.1 200 OK +Content-Length: 50 +Foo: Bar`, + }, + + { + res: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 3, + Body: ioutil.NopCloser(strings.NewReader("foo")), + }, + body: true, + want: `HTTP/1.1 200 OK +Content-Length: 3 + +foo`, + }, + + { + res: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: -1, + Body: ioutil.NopCloser(strings.NewReader("foo")), + TransferEncoding: []string{"chunked"}, + }, + body: true, + want: `HTTP/1.1 200 OK +Transfer-Encoding: chunked + +3 +foo +0`, + }, +} + +func TestDumpResponse(t *testing.T) { + for i, tt := range dumpResTests { + gotb, err := DumpResponse(tt.res, tt.body) + if err != nil { + t.Errorf("%d. DumpResponse = %v", i, err) + continue + } + got := string(gotb) + got = strings.TrimSpace(got) + got = strings.Replace(got, "\r", "", -1) + + if got != tt.want { + t.Errorf("%d.\nDumpResponse got:\n%s\n\nWant:\n%s\n", i, got, tt.want) + } + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/httputil/httputil.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/httputil/httputil.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/httputil/httputil.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,32 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httputil provides HTTP utility functions, complementing the +// more common ones in the net/http package. +package httputil + +import "io" + +// NewChunkedReader returns a new chunkedReader that translates the data read from r +// out of HTTP "chunked" format before returning it. +// The chunkedReader returns io.EOF when the final 0-length chunk is read. +// +// NewChunkedReader is not needed by normal applications. The http package +// automatically decodes chunking when reading response bodies. +func NewChunkedReader(r io.Reader) io.Reader { + return newChunkedReader(r) +} + +// NewChunkedWriter returns a new chunkedWriter that translates writes into HTTP +// "chunked" format before writing them to w. Closing the returned chunkedWriter +// sends the final 0-length chunk that marks the end of the stream. +// +// NewChunkedWriter is not needed by normal applications. The http +// package adds chunking automatically if handlers don't set a +// Content-Length header. Using NewChunkedWriter inside a handler +// would result in double chunking or chunking with a Content-Length +// length, both of which are wrong. +func NewChunkedWriter(w io.Writer) io.WriteCloser { + return newChunkedWriter(w) +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/httputil/persist.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/httputil/persist.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/httputil/persist.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,429 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httputil + +import ( + "bufio" + "errors" + "io" + "net" + "net/http" + "net/textproto" + "sync" +) + +var ( + ErrPersistEOF = &http.ProtocolError{ErrorString: "persistent connection closed"} + ErrClosed = &http.ProtocolError{ErrorString: "connection closed by user"} + ErrPipeline = &http.ProtocolError{ErrorString: "pipeline error"} +) + +// This is an API usage error - the local side is closed. +// ErrPersistEOF (above) reports that the remote side is closed. +var errClosed = errors.New("i/o operation on closed connection") + +// A ServerConn reads requests and sends responses over an underlying +// connection, until the HTTP keepalive logic commands an end. ServerConn +// also allows hijacking the underlying connection by calling Hijack +// to regain control over the connection. ServerConn supports pipe-lining, +// i.e. requests can be read out of sync (but in the same order) while the +// respective responses are sent. +// +// ServerConn is low-level and old. Applications should instead use Server +// in the net/http package. +type ServerConn struct { + lk sync.Mutex // read-write protects the following fields + c net.Conn + r *bufio.Reader + re, we error // read/write errors + lastbody io.ReadCloser + nread, nwritten int + pipereq map[*http.Request]uint + + pipe textproto.Pipeline +} + +// NewServerConn returns a new ServerConn reading and writing c. If r is not +// nil, it is the buffer to use when reading c. +// +// ServerConn is low-level and old. Applications should instead use Server +// in the net/http package. +func NewServerConn(c net.Conn, r *bufio.Reader) *ServerConn { + if r == nil { + r = bufio.NewReader(c) + } + return &ServerConn{c: c, r: r, pipereq: make(map[*http.Request]uint)} +} + +// Hijack detaches the ServerConn and returns the underlying connection as well +// as the read-side bufio which may have some left over data. Hijack may be +// called before Read has signaled the end of the keep-alive logic. The user +// should not call Hijack while Read or Write is in progress. +func (sc *ServerConn) Hijack() (c net.Conn, r *bufio.Reader) { + sc.lk.Lock() + defer sc.lk.Unlock() + c = sc.c + r = sc.r + sc.c = nil + sc.r = nil + return +} + +// Close calls Hijack and then also closes the underlying connection +func (sc *ServerConn) Close() error { + c, _ := sc.Hijack() + if c != nil { + return c.Close() + } + return nil +} + +// Read returns the next request on the wire. An ErrPersistEOF is returned if +// it is gracefully determined that there are no more requests (e.g. after the +// first request on an HTTP/1.0 connection, or after a Connection:close on a +// HTTP/1.1 connection). +func (sc *ServerConn) Read() (req *http.Request, err error) { + + // Ensure ordered execution of Reads and Writes + id := sc.pipe.Next() + sc.pipe.StartRequest(id) + defer func() { + sc.pipe.EndRequest(id) + if req == nil { + sc.pipe.StartResponse(id) + sc.pipe.EndResponse(id) + } else { + // Remember the pipeline id of this request + sc.lk.Lock() + sc.pipereq[req] = id + sc.lk.Unlock() + } + }() + + sc.lk.Lock() + if sc.we != nil { // no point receiving if write-side broken or closed + defer sc.lk.Unlock() + return nil, sc.we + } + if sc.re != nil { + defer sc.lk.Unlock() + return nil, sc.re + } + if sc.r == nil { // connection closed by user in the meantime + defer sc.lk.Unlock() + return nil, errClosed + } + r := sc.r + lastbody := sc.lastbody + sc.lastbody = nil + sc.lk.Unlock() + + // Make sure body is fully consumed, even if user does not call body.Close + if lastbody != nil { + // body.Close is assumed to be idempotent and multiple calls to + // it should return the error that its first invocation + // returned. + err = lastbody.Close() + if err != nil { + sc.lk.Lock() + defer sc.lk.Unlock() + sc.re = err + return nil, err + } + } + + req, err = http.ReadRequest(r) + sc.lk.Lock() + defer sc.lk.Unlock() + if err != nil { + if err == io.ErrUnexpectedEOF { + // A close from the opposing client is treated as a + // graceful close, even if there was some unparse-able + // data before the close. + sc.re = ErrPersistEOF + return nil, sc.re + } else { + sc.re = err + return req, err + } + } + sc.lastbody = req.Body + sc.nread++ + if req.Close { + sc.re = ErrPersistEOF + return req, sc.re + } + return req, err +} + +// Pending returns the number of unanswered requests +// that have been received on the connection. +func (sc *ServerConn) Pending() int { + sc.lk.Lock() + defer sc.lk.Unlock() + return sc.nread - sc.nwritten +} + +// Write writes resp in response to req. To close the connection gracefully, set the +// Response.Close field to true. Write should be considered operational until +// it returns an error, regardless of any errors returned on the Read side. +func (sc *ServerConn) Write(req *http.Request, resp *http.Response) error { + + // Retrieve the pipeline ID of this request/response pair + sc.lk.Lock() + id, ok := sc.pipereq[req] + delete(sc.pipereq, req) + if !ok { + sc.lk.Unlock() + return ErrPipeline + } + sc.lk.Unlock() + + // Ensure pipeline order + sc.pipe.StartResponse(id) + defer sc.pipe.EndResponse(id) + + sc.lk.Lock() + if sc.we != nil { + defer sc.lk.Unlock() + return sc.we + } + if sc.c == nil { // connection closed by user in the meantime + defer sc.lk.Unlock() + return ErrClosed + } + c := sc.c + if sc.nread <= sc.nwritten { + defer sc.lk.Unlock() + return errors.New("persist server pipe count") + } + if resp.Close { + // After signaling a keep-alive close, any pipelined unread + // requests will be lost. It is up to the user to drain them + // before signaling. + sc.re = ErrPersistEOF + } + sc.lk.Unlock() + + err := resp.Write(c) + sc.lk.Lock() + defer sc.lk.Unlock() + if err != nil { + sc.we = err + return err + } + sc.nwritten++ + + return nil +} + +// A ClientConn sends request and receives headers over an underlying +// connection, while respecting the HTTP keepalive logic. ClientConn +// supports hijacking the connection calling Hijack to +// regain control of the underlying net.Conn and deal with it as desired. +// +// ClientConn is low-level and old. Applications should instead use +// Client or Transport in the net/http package. +type ClientConn struct { + lk sync.Mutex // read-write protects the following fields + c net.Conn + r *bufio.Reader + re, we error // read/write errors + lastbody io.ReadCloser + nread, nwritten int + pipereq map[*http.Request]uint + + pipe textproto.Pipeline + writeReq func(*http.Request, io.Writer) error +} + +// NewClientConn returns a new ClientConn reading and writing c. If r is not +// nil, it is the buffer to use when reading c. +// +// ClientConn is low-level and old. Applications should use Client or +// Transport in the net/http package. +func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn { + if r == nil { + r = bufio.NewReader(c) + } + return &ClientConn{ + c: c, + r: r, + pipereq: make(map[*http.Request]uint), + writeReq: (*http.Request).Write, + } +} + +// NewProxyClientConn works like NewClientConn but writes Requests +// using Request's WriteProxy method. +// +// New code should not use NewProxyClientConn. See Client or +// Transport in the net/http package instead. +func NewProxyClientConn(c net.Conn, r *bufio.Reader) *ClientConn { + cc := NewClientConn(c, r) + cc.writeReq = (*http.Request).WriteProxy + return cc +} + +// Hijack detaches the ClientConn and returns the underlying connection as well +// as the read-side bufio which may have some left over data. Hijack may be +// called before the user or Read have signaled the end of the keep-alive +// logic. The user should not call Hijack while Read or Write is in progress. +func (cc *ClientConn) Hijack() (c net.Conn, r *bufio.Reader) { + cc.lk.Lock() + defer cc.lk.Unlock() + c = cc.c + r = cc.r + cc.c = nil + cc.r = nil + return +} + +// Close calls Hijack and then also closes the underlying connection +func (cc *ClientConn) Close() error { + c, _ := cc.Hijack() + if c != nil { + return c.Close() + } + return nil +} + +// Write writes a request. An ErrPersistEOF error is returned if the connection +// has been closed in an HTTP keepalive sense. If req.Close equals true, the +// keepalive connection is logically closed after this request and the opposing +// server is informed. An ErrUnexpectedEOF indicates the remote closed the +// underlying TCP connection, which is usually considered as graceful close. +func (cc *ClientConn) Write(req *http.Request) (err error) { + + // Ensure ordered execution of Writes + id := cc.pipe.Next() + cc.pipe.StartRequest(id) + defer func() { + cc.pipe.EndRequest(id) + if err != nil { + cc.pipe.StartResponse(id) + cc.pipe.EndResponse(id) + } else { + // Remember the pipeline id of this request + cc.lk.Lock() + cc.pipereq[req] = id + cc.lk.Unlock() + } + }() + + cc.lk.Lock() + if cc.re != nil { // no point sending if read-side closed or broken + defer cc.lk.Unlock() + return cc.re + } + if cc.we != nil { + defer cc.lk.Unlock() + return cc.we + } + if cc.c == nil { // connection closed by user in the meantime + defer cc.lk.Unlock() + return errClosed + } + c := cc.c + if req.Close { + // We write the EOF to the write-side error, because there + // still might be some pipelined reads + cc.we = ErrPersistEOF + } + cc.lk.Unlock() + + err = cc.writeReq(req, c) + cc.lk.Lock() + defer cc.lk.Unlock() + if err != nil { + cc.we = err + return err + } + cc.nwritten++ + + return nil +} + +// Pending returns the number of unanswered requests +// that have been sent on the connection. +func (cc *ClientConn) Pending() int { + cc.lk.Lock() + defer cc.lk.Unlock() + return cc.nwritten - cc.nread +} + +// Read reads the next response from the wire. A valid response might be +// returned together with an ErrPersistEOF, which means that the remote +// requested that this be the last request serviced. Read can be called +// concurrently with Write, but not with another Read. +func (cc *ClientConn) Read(req *http.Request) (resp *http.Response, err error) { + // Retrieve the pipeline ID of this request/response pair + cc.lk.Lock() + id, ok := cc.pipereq[req] + delete(cc.pipereq, req) + if !ok { + cc.lk.Unlock() + return nil, ErrPipeline + } + cc.lk.Unlock() + + // Ensure pipeline order + cc.pipe.StartResponse(id) + defer cc.pipe.EndResponse(id) + + cc.lk.Lock() + if cc.re != nil { + defer cc.lk.Unlock() + return nil, cc.re + } + if cc.r == nil { // connection closed by user in the meantime + defer cc.lk.Unlock() + return nil, errClosed + } + r := cc.r + lastbody := cc.lastbody + cc.lastbody = nil + cc.lk.Unlock() + + // Make sure body is fully consumed, even if user does not call body.Close + if lastbody != nil { + // body.Close is assumed to be idempotent and multiple calls to + // it should return the error that its first invocation + // returned. + err = lastbody.Close() + if err != nil { + cc.lk.Lock() + defer cc.lk.Unlock() + cc.re = err + return nil, err + } + } + + resp, err = http.ReadResponse(r, req) + cc.lk.Lock() + defer cc.lk.Unlock() + if err != nil { + cc.re = err + return resp, err + } + cc.lastbody = resp.Body + + cc.nread++ + + if resp.Close { + cc.re = ErrPersistEOF // don't send any more requests + return resp, cc.re + } + return resp, err +} + +// Do is convenience method that writes a request and reads a response. +func (cc *ClientConn) Do(req *http.Request) (resp *http.Response, err error) { + err = cc.Write(req) + if err != nil { + return + } + return cc.Read(req) +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,211 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// HTTP reverse proxy handler + +package httputil + +import ( + "io" + "log" + "net" + "net/http" + "net/url" + "strings" + "sync" + "time" +) + +// onExitFlushLoop is a callback set by tests to detect the state of the +// flushLoop() goroutine. +var onExitFlushLoop func() + +// ReverseProxy is an HTTP Handler that takes an incoming request and +// sends it to another server, proxying the response back to the +// client. +type ReverseProxy struct { + // Director must be a function which modifies + // the request into a new request to be sent + // using Transport. Its response is then copied + // back to the original client unmodified. + Director func(*http.Request) + + // The transport used to perform proxy requests. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper + + // FlushInterval specifies the flush interval + // to flush to the client while copying the + // response body. + // If zero, no periodic flushing is done. + FlushInterval time.Duration +} + +func singleJoiningSlash(a, b string) string { + aslash := strings.HasSuffix(a, "/") + bslash := strings.HasPrefix(b, "/") + switch { + case aslash && bslash: + return a + b[1:] + case !aslash && !bslash: + return a + "/" + b + } + return a + b +} + +// NewSingleHostReverseProxy returns a new ReverseProxy that rewrites +// URLs to the scheme, host, and base path provided in target. If the +// target's path is "/base" and the incoming request was for "/dir", +// the target request will be for /base/dir. +func NewSingleHostReverseProxy(target *url.URL) *ReverseProxy { + targetQuery := target.RawQuery + director := func(req *http.Request) { + req.URL.Scheme = target.Scheme + req.URL.Host = target.Host + req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path) + if targetQuery == "" || req.URL.RawQuery == "" { + req.URL.RawQuery = targetQuery + req.URL.RawQuery + } else { + req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery + } + } + return &ReverseProxy{Director: director} +} + +func copyHeader(dst, src http.Header) { + for k, vv := range src { + for _, v := range vv { + dst.Add(k, v) + } + } +} + +// Hop-by-hop headers. These are removed when sent to the backend. +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html +var hopHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Authenticate", + "Proxy-Authorization", + "Te", // canonicalized version of "TE" + "Trailers", + "Transfer-Encoding", + "Upgrade", +} + +func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + transport := p.Transport + if transport == nil { + transport = http.DefaultTransport + } + + outreq := new(http.Request) + *outreq = *req // includes shallow copies of maps, but okay + + p.Director(outreq) + outreq.Proto = "HTTP/1.1" + outreq.ProtoMajor = 1 + outreq.ProtoMinor = 1 + outreq.Close = false + + // Remove hop-by-hop headers to the backend. Especially + // important is "Connection" because we want a persistent + // connection, regardless of what the client sent to us. This + // is modifying the same underlying map from req (shallow + // copied above) so we only copy it if necessary. + copiedHeaders := false + for _, h := range hopHeaders { + if outreq.Header.Get(h) != "" { + if !copiedHeaders { + outreq.Header = make(http.Header) + copyHeader(outreq.Header, req.Header) + copiedHeaders = true + } + outreq.Header.Del(h) + } + } + + if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil { + // If we aren't the first proxy retain prior + // X-Forwarded-For information as a comma+space + // separated list and fold multiple headers into one. + if prior, ok := outreq.Header["X-Forwarded-For"]; ok { + clientIP = strings.Join(prior, ", ") + ", " + clientIP + } + outreq.Header.Set("X-Forwarded-For", clientIP) + } + + res, err := transport.RoundTrip(outreq) + if err != nil { + log.Printf("http: proxy error: %v", err) + rw.WriteHeader(http.StatusInternalServerError) + return + } + defer res.Body.Close() + + for _, h := range hopHeaders { + res.Header.Del(h) + } + + copyHeader(rw.Header(), res.Header) + + rw.WriteHeader(res.StatusCode) + p.copyResponse(rw, res.Body) +} + +func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) { + if p.FlushInterval != 0 { + if wf, ok := dst.(writeFlusher); ok { + mlw := &maxLatencyWriter{ + dst: wf, + latency: p.FlushInterval, + done: make(chan bool), + } + go mlw.flushLoop() + defer mlw.stop() + dst = mlw + } + } + + io.Copy(dst, src) +} + +type writeFlusher interface { + io.Writer + http.Flusher +} + +type maxLatencyWriter struct { + dst writeFlusher + latency time.Duration + + lk sync.Mutex // protects Write + Flush + done chan bool +} + +func (m *maxLatencyWriter) Write(p []byte) (int, error) { + m.lk.Lock() + defer m.lk.Unlock() + return m.dst.Write(p) +} + +func (m *maxLatencyWriter) flushLoop() { + t := time.NewTicker(m.latency) + defer t.Stop() + for { + select { + case <-m.done: + if onExitFlushLoop != nil { + onExitFlushLoop() + } + return + case <-t.C: + m.lk.Lock() + m.dst.Flush() + m.lk.Unlock() + } + } +} + +func (m *maxLatencyWriter) stop() { m.done <- true } === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/httputil/reverseproxy_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,213 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Reverse proxy tests. + +package httputil + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + "time" +) + +const fakeHopHeader = "X-Fake-Hop-Header-For-Test" + +func init() { + hopHeaders = append(hopHeaders, fakeHopHeader) +} + +func TestReverseProxy(t *testing.T) { + const backendResponse = "I am the backend" + const backendStatus = 404 + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if len(r.TransferEncoding) > 0 { + t.Errorf("backend got unexpected TransferEncoding: %v", r.TransferEncoding) + } + if r.Header.Get("X-Forwarded-For") == "" { + t.Errorf("didn't get X-Forwarded-For header") + } + if c := r.Header.Get("Connection"); c != "" { + t.Errorf("handler got Connection header value %q", c) + } + if c := r.Header.Get("Upgrade"); c != "" { + t.Errorf("handler got Upgrade header value %q", c) + } + if g, e := r.Host, "some-name"; g != e { + t.Errorf("backend got Host header %q, want %q", g, e) + } + w.Header().Set("X-Foo", "bar") + w.Header().Set("Upgrade", "foo") + w.Header().Set(fakeHopHeader, "foo") + w.Header().Add("X-Multi-Value", "foo") + w.Header().Add("X-Multi-Value", "bar") + http.SetCookie(w, &http.Cookie{Name: "flavor", Value: "chocolateChip"}) + w.WriteHeader(backendStatus) + w.Write([]byte(backendResponse)) + })) + defer backend.Close() + backendURL, err := url.Parse(backend.URL) + if err != nil { + t.Fatal(err) + } + proxyHandler := NewSingleHostReverseProxy(backendURL) + frontend := httptest.NewServer(proxyHandler) + defer frontend.Close() + + getReq, _ := http.NewRequest("GET", frontend.URL, nil) + getReq.Host = "some-name" + getReq.Header.Set("Connection", "close") + getReq.Header.Set("Upgrade", "foo") + getReq.Close = true + res, err := http.DefaultClient.Do(getReq) + if err != nil { + t.Fatalf("Get: %v", err) + } + if g, e := res.StatusCode, backendStatus; g != e { + t.Errorf("got res.StatusCode %d; expected %d", g, e) + } + if g, e := res.Header.Get("X-Foo"), "bar"; g != e { + t.Errorf("got X-Foo %q; expected %q", g, e) + } + if c := res.Header.Get(fakeHopHeader); c != "" { + t.Errorf("got %s header value %q", fakeHopHeader, c) + } + if g, e := len(res.Header["X-Multi-Value"]), 2; g != e { + t.Errorf("got %d X-Multi-Value header values; expected %d", g, e) + } + if g, e := len(res.Header["Set-Cookie"]), 1; g != e { + t.Fatalf("got %d SetCookies, want %d", g, e) + } + if cookie := res.Cookies()[0]; cookie.Name != "flavor" { + t.Errorf("unexpected cookie %q", cookie.Name) + } + bodyBytes, _ := ioutil.ReadAll(res.Body) + if g, e := string(bodyBytes), backendResponse; g != e { + t.Errorf("got body %q; expected %q", g, e) + } +} + +func TestXForwardedFor(t *testing.T) { + const prevForwardedFor = "client ip" + const backendResponse = "I am the backend" + const backendStatus = 404 + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-Forwarded-For") == "" { + t.Errorf("didn't get X-Forwarded-For header") + } + if !strings.Contains(r.Header.Get("X-Forwarded-For"), prevForwardedFor) { + t.Errorf("X-Forwarded-For didn't contain prior data") + } + w.WriteHeader(backendStatus) + w.Write([]byte(backendResponse)) + })) + defer backend.Close() + backendURL, err := url.Parse(backend.URL) + if err != nil { + t.Fatal(err) + } + proxyHandler := NewSingleHostReverseProxy(backendURL) + frontend := httptest.NewServer(proxyHandler) + defer frontend.Close() + + getReq, _ := http.NewRequest("GET", frontend.URL, nil) + getReq.Host = "some-name" + getReq.Header.Set("Connection", "close") + getReq.Header.Set("X-Forwarded-For", prevForwardedFor) + getReq.Close = true + res, err := http.DefaultClient.Do(getReq) + if err != nil { + t.Fatalf("Get: %v", err) + } + if g, e := res.StatusCode, backendStatus; g != e { + t.Errorf("got res.StatusCode %d; expected %d", g, e) + } + bodyBytes, _ := ioutil.ReadAll(res.Body) + if g, e := string(bodyBytes), backendResponse; g != e { + t.Errorf("got body %q; expected %q", g, e) + } +} + +var proxyQueryTests = []struct { + baseSuffix string // suffix to add to backend URL + reqSuffix string // suffix to add to frontend's request URL + want string // what backend should see for final request URL (without ?) +}{ + {"", "", ""}, + {"?sta=tic", "?us=er", "sta=tic&us=er"}, + {"", "?us=er", "us=er"}, + {"?sta=tic", "", "sta=tic"}, +} + +func TestReverseProxyQuery(t *testing.T) { + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Got-Query", r.URL.RawQuery) + w.Write([]byte("hi")) + })) + defer backend.Close() + + for i, tt := range proxyQueryTests { + backendURL, err := url.Parse(backend.URL + tt.baseSuffix) + if err != nil { + t.Fatal(err) + } + frontend := httptest.NewServer(NewSingleHostReverseProxy(backendURL)) + req, _ := http.NewRequest("GET", frontend.URL+tt.reqSuffix, nil) + req.Close = true + res, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("%d. Get: %v", i, err) + } + if g, e := res.Header.Get("X-Got-Query"), tt.want; g != e { + t.Errorf("%d. got query %q; expected %q", i, g, e) + } + res.Body.Close() + frontend.Close() + } +} + +func TestReverseProxyFlushInterval(t *testing.T) { + const expected = "hi" + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(expected)) + })) + defer backend.Close() + + backendURL, err := url.Parse(backend.URL) + if err != nil { + t.Fatal(err) + } + + proxyHandler := NewSingleHostReverseProxy(backendURL) + proxyHandler.FlushInterval = time.Microsecond + + done := make(chan bool) + onExitFlushLoop = func() { done <- true } + defer func() { onExitFlushLoop = nil }() + + frontend := httptest.NewServer(proxyHandler) + defer frontend.Close() + + req, _ := http.NewRequest("GET", frontend.URL, nil) + req.Close = true + res, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Get: %v", err) + } + defer res.Body.Close() + if bodyBytes, _ := ioutil.ReadAll(res.Body); string(bodyBytes) != expected { + t.Errorf("got body %q; expected %q", bodyBytes, expected) + } + + select { + case <-done: + // OK + case <-time.After(5 * time.Second): + t.Error("maxLatencyWriter flushLoop() never exited") + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/jar.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/jar.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/jar.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,27 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "net/url" +) + +// A CookieJar manages storage and use of cookies in HTTP requests. +// +// Implementations of CookieJar must be safe for concurrent use by multiple +// goroutines. +// +// The net/http/cookiejar package provides a CookieJar implementation. +type CookieJar interface { + // SetCookies handles the receipt of the cookies in a reply for the + // given URL. It may or may not choose to save the cookies, depending + // on the jar's policy and implementation. + SetCookies(u *url.URL, cookies []*Cookie) + + // Cookies returns the cookies to send in a request for the given URL. + // It is up to the implementation to honor the standard cookie use + // restrictions such as in RFC 6265. + Cookies(u *url.URL) []*Cookie +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/lex.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/lex.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/lex.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,96 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +// This file deals with lexical matters of HTTP + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func isToken(r rune) bool { + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !isToken(r) +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/lex_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/lex_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/lex_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,31 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "testing" +) + +func isChar(c rune) bool { return c <= 127 } + +func isCtl(c rune) bool { return c <= 31 || c == 127 } + +func isSeparator(c rune) bool { + switch c { + case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t': + return true + } + return false +} + +func TestIsToken(t *testing.T) { + for i := 0; i <= 130; i++ { + r := rune(i) + expected := isChar(r) && !isCtl(r) && !isSeparator(r) + if isToken(r) != expected { + t.Errorf("isToken(0x%x) = %v", r, !expected) + } + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/npn_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/npn_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/npn_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,118 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http_test + +import ( + "bufio" + "crypto/tls" + "fmt" + "io" + "io/ioutil" + . "net/http" + "net/http/httptest" + "strings" + "testing" +) + +func TestNextProtoUpgrade(t *testing.T) { + ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) { + fmt.Fprintf(w, "path=%s,proto=", r.URL.Path) + if r.TLS != nil { + w.Write([]byte(r.TLS.NegotiatedProtocol)) + } + if r.RemoteAddr == "" { + t.Error("request with no RemoteAddr") + } + if r.Body == nil { + t.Errorf("request with nil Body") + } + })) + ts.TLS = &tls.Config{ + NextProtos: []string{"unhandled-proto", "tls-0.9"}, + } + ts.Config.TLSNextProto = map[string]func(*Server, *tls.Conn, Handler){ + "tls-0.9": handleTLSProtocol09, + } + ts.StartTLS() + defer ts.Close() + + tr := newTLSTransport(t, ts) + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + + // Normal request, without NPN. + { + res, err := c.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if want := "path=/,proto="; string(body) != want { + t.Errorf("plain request = %q; want %q", body, want) + } + } + + // Request to an advertised but unhandled NPN protocol. + // Server will hang up. + { + tr.CloseIdleConnections() + tr.TLSClientConfig.NextProtos = []string{"unhandled-proto"} + _, err := c.Get(ts.URL) + if err == nil { + t.Errorf("expected error on unhandled-proto request") + } + } + + // Request using the "tls-0.9" protocol, which we register here. + // It is HTTP/0.9 over TLS. + { + tlsConfig := newTLSTransport(t, ts).TLSClientConfig + tlsConfig.NextProtos = []string{"tls-0.9"} + conn, err := tls.Dial("tcp", ts.Listener.Addr().String(), tlsConfig) + if err != nil { + t.Fatal(err) + } + conn.Write([]byte("GET /foo\n")) + body, err := ioutil.ReadAll(conn) + if err != nil { + t.Fatal(err) + } + if want := "path=/foo,proto=tls-0.9"; string(body) != want { + t.Errorf("plain request = %q; want %q", body, want) + } + } +} + +// handleTLSProtocol09 implements the HTTP/0.9 protocol over TLS, for the +// TestNextProtoUpgrade test. +func handleTLSProtocol09(srv *Server, conn *tls.Conn, h Handler) { + br := bufio.NewReader(conn) + line, err := br.ReadString('\n') + if err != nil { + return + } + line = strings.TrimSpace(line) + path := strings.TrimPrefix(line, "GET ") + if path == line { + return + } + req, _ := NewRequest("GET", path, nil) + req.Proto = "HTTP/0.9" + req.ProtoMajor = 0 + req.ProtoMinor = 9 + rw := &http09Writer{conn, make(Header)} + h.ServeHTTP(rw, req) +} + +type http09Writer struct { + io.Writer + h Header +} + +func (w http09Writer) Header() Header { return w.h } +func (w http09Writer) WriteHeader(int) {} // no headers === added directory 'src/github.com/Azure/azure-sdk-for-go/core/http/pprof' === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/pprof/pprof.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/pprof/pprof.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/pprof/pprof.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,205 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pprof serves via its HTTP server runtime profiling data +// in the format expected by the pprof visualization tool. +// For more information about pprof, see +// http://code.google.com/p/google-perftools/. +// +// The package is typically only imported for the side effect of +// registering its HTTP handlers. +// The handled paths all begin with /debug/pprof/. +// +// To use pprof, link this package into your program: +// import _ "net/http/pprof" +// +// If your application is not already running an http server, you +// need to start one. Add "net/http" and "log" to your imports and +// the following code to your main function: +// +// go func() { +// log.Println(http.ListenAndServe("localhost:6060", nil)) +// }() +// +// Then use the pprof tool to look at the heap profile: +// +// go tool pprof http://localhost:6060/debug/pprof/heap +// +// Or to look at a 30-second CPU profile: +// +// go tool pprof http://localhost:6060/debug/pprof/profile +// +// Or to look at the goroutine blocking profile: +// +// go tool pprof http://localhost:6060/debug/pprof/block +// +// To view all available profiles, open http://localhost:6060/debug/pprof/ +// in your browser. +// +// For a study of the facility in action, visit +// +// http://blog.golang.org/2011/06/profiling-go-programs.html +// +package pprof + +import ( + "bufio" + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "os" + "runtime" + "runtime/pprof" + "strconv" + "strings" + "time" +) + +func init() { + http.Handle("/debug/pprof/", http.HandlerFunc(Index)) + http.Handle("/debug/pprof/cmdline", http.HandlerFunc(Cmdline)) + http.Handle("/debug/pprof/profile", http.HandlerFunc(Profile)) + http.Handle("/debug/pprof/symbol", http.HandlerFunc(Symbol)) +} + +// Cmdline responds with the running program's +// command line, with arguments separated by NUL bytes. +// The package initialization registers it as /debug/pprof/cmdline. +func Cmdline(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprintf(w, strings.Join(os.Args, "\x00")) +} + +// Profile responds with the pprof-formatted cpu profile. +// The package initialization registers it as /debug/pprof/profile. +func Profile(w http.ResponseWriter, r *http.Request) { + sec, _ := strconv.ParseInt(r.FormValue("seconds"), 10, 64) + if sec == 0 { + sec = 30 + } + + // Set Content Type assuming StartCPUProfile will work, + // because if it does it starts writing. + w.Header().Set("Content-Type", "application/octet-stream") + if err := pprof.StartCPUProfile(w); err != nil { + // StartCPUProfile failed, so no writes yet. + // Can change header back to text content + // and send error code. + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "Could not enable CPU profiling: %s\n", err) + return + } + time.Sleep(time.Duration(sec) * time.Second) + pprof.StopCPUProfile() +} + +// Symbol looks up the program counters listed in the request, +// responding with a table mapping program counters to function names. +// The package initialization registers it as /debug/pprof/symbol. +func Symbol(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + + // We have to read the whole POST body before + // writing any output. Buffer the output here. + var buf bytes.Buffer + + // We don't know how many symbols we have, but we + // do have symbol information. Pprof only cares whether + // this number is 0 (no symbols available) or > 0. + fmt.Fprintf(&buf, "num_symbols: 1\n") + + var b *bufio.Reader + if r.Method == "POST" { + b = bufio.NewReader(r.Body) + } else { + b = bufio.NewReader(strings.NewReader(r.URL.RawQuery)) + } + + for { + word, err := b.ReadSlice('+') + if err == nil { + word = word[0 : len(word)-1] // trim + + } + pc, _ := strconv.ParseUint(string(word), 0, 64) + if pc != 0 { + f := runtime.FuncForPC(uintptr(pc)) + if f != nil { + fmt.Fprintf(&buf, "%#x %s\n", pc, f.Name()) + } + } + + // Wait until here to check for err; the last + // symbol will have an err because it doesn't end in +. + if err != nil { + if err != io.EOF { + fmt.Fprintf(&buf, "reading request: %v\n", err) + } + break + } + } + + w.Write(buf.Bytes()) +} + +// Handler returns an HTTP handler that serves the named profile. +func Handler(name string) http.Handler { + return handler(name) +} + +type handler string + +func (name handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + debug, _ := strconv.Atoi(r.FormValue("debug")) + p := pprof.Lookup(string(name)) + if p == nil { + w.WriteHeader(404) + fmt.Fprintf(w, "Unknown profile: %s\n", name) + return + } + p.WriteTo(w, debug) + return +} + +// Index responds with the pprof-formatted profile named by the request. +// For example, "/debug/pprof/heap" serves the "heap" profile. +// Index responds to a request for "/debug/pprof/" with an HTML page +// listing the available profiles. +func Index(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.URL.Path, "/debug/pprof/") { + name := strings.TrimPrefix(r.URL.Path, "/debug/pprof/") + if name != "" { + handler(name).ServeHTTP(w, r) + return + } + } + + profiles := pprof.Profiles() + if err := indexTmpl.Execute(w, profiles); err != nil { + log.Print(err) + } +} + +var indexTmpl = template.Must(template.New("index").Parse(` + +/debug/pprof/ + +/debug/pprof/
+
+ +profiles:
+ +{{range .}} +
{{.Count}}{{.Name}} +{{end}} +
+
+full goroutine stack dump
+ + +`)) === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/proxy_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/proxy_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/proxy_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,81 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "net/url" + "os" + "testing" +) + +// TODO(mattn): +// test ProxyAuth + +var UseProxyTests = []struct { + host string + match bool +}{ + // Never proxy localhost: + {"localhost:80", false}, + {"127.0.0.1", false}, + {"127.0.0.2", false}, + {"[::1]", false}, + {"[::2]", true}, // not a loopback address + + {"barbaz.net", false}, // match as .barbaz.net + {"foobar.com", false}, // have a port but match + {"foofoobar.com", true}, // not match as a part of foobar.com + {"baz.com", true}, // not match as a part of barbaz.com + {"localhost.net", true}, // not match as suffix of address + {"local.localhost", true}, // not match as prefix as address + {"barbarbaz.net", true}, // not match because NO_PROXY have a '.' + {"www.foobar.com", false}, // match because NO_PROXY includes "foobar.com" +} + +func TestUseProxy(t *testing.T) { + ResetProxyEnv() + os.Setenv("NO_PROXY", "foobar.com, .barbaz.net") + for _, test := range UseProxyTests { + if useProxy(test.host+":80") != test.match { + t.Errorf("useProxy(%v) = %v, want %v", test.host, !test.match, test.match) + } + } +} + +var cacheKeysTests = []struct { + proxy string + scheme string + addr string + key string +}{ + {"", "http", "foo.com", "|http|foo.com"}, + {"", "https", "foo.com", "|https|foo.com"}, + {"http://foo.com", "http", "foo.com", "http://foo.com|http|"}, + {"http://foo.com", "https", "foo.com", "http://foo.com|https|foo.com"}, +} + +func TestCacheKeys(t *testing.T) { + for _, tt := range cacheKeysTests { + var proxy *url.URL + if tt.proxy != "" { + u, err := url.Parse(tt.proxy) + if err != nil { + t.Fatal(err) + } + proxy = u + } + cm := connectMethod{proxy, tt.scheme, tt.addr} + if got := cm.key().String(); got != tt.key { + t.Fatalf("{%q, %q, %q} cache key = %q; want %q", tt.proxy, tt.scheme, tt.addr, got, tt.key) + } + } +} + +func ResetProxyEnv() { + for _, v := range []string{"HTTP_PROXY", "http_proxy", "NO_PROXY", "no_proxy"} { + os.Setenv(v, "") + } + ResetCachedEnvironment() +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/race.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/race.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/race.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build race + +package http + +func init() { + raceEnabled = true +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/range_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/range_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/range_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,79 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "testing" +) + +var ParseRangeTests = []struct { + s string + length int64 + r []httpRange +}{ + {"", 0, nil}, + {"", 1000, nil}, + {"foo", 0, nil}, + {"bytes=", 0, nil}, + {"bytes=7", 10, nil}, + {"bytes= 7 ", 10, nil}, + {"bytes=1-", 0, nil}, + {"bytes=5-4", 10, nil}, + {"bytes=0-2,5-4", 10, nil}, + {"bytes=2-5,4-3", 10, nil}, + {"bytes=--5,4--3", 10, nil}, + {"bytes=A-", 10, nil}, + {"bytes=A- ", 10, nil}, + {"bytes=A-Z", 10, nil}, + {"bytes= -Z", 10, nil}, + {"bytes=5-Z", 10, nil}, + {"bytes=Ran-dom, garbage", 10, nil}, + {"bytes=0x01-0x02", 10, nil}, + {"bytes= ", 10, nil}, + {"bytes= , , , ", 10, nil}, + + {"bytes=0-9", 10, []httpRange{{0, 10}}}, + {"bytes=0-", 10, []httpRange{{0, 10}}}, + {"bytes=5-", 10, []httpRange{{5, 5}}}, + {"bytes=0-20", 10, []httpRange{{0, 10}}}, + {"bytes=15-,0-5", 10, nil}, + {"bytes=1-2,5-", 10, []httpRange{{1, 2}, {5, 5}}}, + {"bytes=-2 , 7-", 11, []httpRange{{9, 2}, {7, 4}}}, + {"bytes=0-0 ,2-2, 7-", 11, []httpRange{{0, 1}, {2, 1}, {7, 4}}}, + {"bytes=-5", 10, []httpRange{{5, 5}}}, + {"bytes=-15", 10, []httpRange{{0, 10}}}, + {"bytes=0-499", 10000, []httpRange{{0, 500}}}, + {"bytes=500-999", 10000, []httpRange{{500, 500}}}, + {"bytes=-500", 10000, []httpRange{{9500, 500}}}, + {"bytes=9500-", 10000, []httpRange{{9500, 500}}}, + {"bytes=0-0,-1", 10000, []httpRange{{0, 1}, {9999, 1}}}, + {"bytes=500-600,601-999", 10000, []httpRange{{500, 101}, {601, 399}}}, + {"bytes=500-700,601-999", 10000, []httpRange{{500, 201}, {601, 399}}}, + + // Match Apache laxity: + {"bytes= 1 -2 , 4- 5, 7 - 8 , ,,", 11, []httpRange{{1, 2}, {4, 2}, {7, 2}}}, +} + +func TestParseRange(t *testing.T) { + for _, test := range ParseRangeTests { + r := test.r + ranges, err := parseRange(test.s, test.length) + if err != nil && r != nil { + t.Errorf("parseRange(%q) returned error %q", test.s, err) + } + if len(ranges) != len(r) { + t.Errorf("len(parseRange(%q)) = %d, want %d", test.s, len(ranges), len(r)) + continue + } + for i := range r { + if ranges[i].start != r[i].start { + t.Errorf("parseRange(%q)[%d].start = %d, want %d", test.s, i, ranges[i].start, r[i].start) + } + if ranges[i].length != r[i].length { + t.Errorf("parseRange(%q)[%d].length = %d, want %d", test.s, i, ranges[i].length, r[i].length) + } + } + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/readrequest_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/readrequest_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/readrequest_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,331 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/url" + "reflect" + "testing" +) + +type reqTest struct { + Raw string + Req *Request + Body string + Trailer Header + Error string +} + +var noError = "" +var noBody = "" +var noTrailer Header = nil + +var reqTests = []reqTest{ + // Baseline test; All Request fields included for template use + { + "GET http://www.techcrunch.com/ HTTP/1.1\r\n" + + "Host: www.techcrunch.com\r\n" + + "User-Agent: Fake\r\n" + + "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" + + "Accept-Language: en-us,en;q=0.5\r\n" + + "Accept-Encoding: gzip,deflate\r\n" + + "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" + + "Keep-Alive: 300\r\n" + + "Content-Length: 7\r\n" + + "Proxy-Connection: keep-alive\r\n\r\n" + + "abcdef\n???", + + &Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: "www.techcrunch.com", + Path: "/", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{ + "Accept": {"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"}, + "Accept-Language": {"en-us,en;q=0.5"}, + "Accept-Encoding": {"gzip,deflate"}, + "Accept-Charset": {"ISO-8859-1,utf-8;q=0.7,*;q=0.7"}, + "Keep-Alive": {"300"}, + "Proxy-Connection": {"keep-alive"}, + "Content-Length": {"7"}, + "User-Agent": {"Fake"}, + }, + Close: false, + ContentLength: 7, + Host: "www.techcrunch.com", + RequestURI: "http://www.techcrunch.com/", + }, + + "abcdef\n", + + noTrailer, + noError, + }, + + // GET request with no body (the normal case) + { + "GET / HTTP/1.1\r\n" + + "Host: foo.com\r\n\r\n", + + &Request{ + Method: "GET", + URL: &url.URL{ + Path: "/", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Close: false, + ContentLength: 0, + Host: "foo.com", + RequestURI: "/", + }, + + noBody, + noTrailer, + noError, + }, + + // Tests that we don't parse a path that looks like a + // scheme-relative URI as a scheme-relative URI. + { + "GET //user@host/is/actually/a/path/ HTTP/1.1\r\n" + + "Host: test\r\n\r\n", + + &Request{ + Method: "GET", + URL: &url.URL{ + Path: "//user@host/is/actually/a/path/", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Close: false, + ContentLength: 0, + Host: "test", + RequestURI: "//user@host/is/actually/a/path/", + }, + + noBody, + noTrailer, + noError, + }, + + // Tests a bogus abs_path on the Request-Line (RFC 2616 section 5.1.2) + { + "GET ../../../../etc/passwd HTTP/1.1\r\n" + + "Host: test\r\n\r\n", + nil, + noBody, + noTrailer, + "parse ../../../../etc/passwd: invalid URI for request", + }, + + // Tests missing URL: + { + "GET HTTP/1.1\r\n" + + "Host: test\r\n\r\n", + nil, + noBody, + noTrailer, + "parse : empty url", + }, + + // Tests chunked body with trailer: + { + "POST / HTTP/1.1\r\n" + + "Host: foo.com\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + "3\r\nfoo\r\n" + + "3\r\nbar\r\n" + + "0\r\n" + + "Trailer-Key: Trailer-Value\r\n" + + "\r\n", + &Request{ + Method: "POST", + URL: &url.URL{ + Path: "/", + }, + TransferEncoding: []string{"chunked"}, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + ContentLength: -1, + Host: "foo.com", + RequestURI: "/", + }, + + "foobar", + Header{ + "Trailer-Key": {"Trailer-Value"}, + }, + noError, + }, + + // CONNECT request with domain name: + { + "CONNECT www.google.com:443 HTTP/1.1\r\n\r\n", + + &Request{ + Method: "CONNECT", + URL: &url.URL{ + Host: "www.google.com:443", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Close: false, + ContentLength: 0, + Host: "www.google.com:443", + RequestURI: "www.google.com:443", + }, + + noBody, + noTrailer, + noError, + }, + + // CONNECT request with IP address: + { + "CONNECT 127.0.0.1:6060 HTTP/1.1\r\n\r\n", + + &Request{ + Method: "CONNECT", + URL: &url.URL{ + Host: "127.0.0.1:6060", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Close: false, + ContentLength: 0, + Host: "127.0.0.1:6060", + RequestURI: "127.0.0.1:6060", + }, + + noBody, + noTrailer, + noError, + }, + + // CONNECT request for RPC: + { + "CONNECT /_goRPC_ HTTP/1.1\r\n\r\n", + + &Request{ + Method: "CONNECT", + URL: &url.URL{ + Path: "/_goRPC_", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Close: false, + ContentLength: 0, + Host: "", + RequestURI: "/_goRPC_", + }, + + noBody, + noTrailer, + noError, + }, + + // SSDP Notify request. golang.org/issue/3692 + { + "NOTIFY * HTTP/1.1\r\nServer: foo\r\n\r\n", + &Request{ + Method: "NOTIFY", + URL: &url.URL{ + Path: "*", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{ + "Server": []string{"foo"}, + }, + Close: false, + ContentLength: 0, + RequestURI: "*", + }, + + noBody, + noTrailer, + noError, + }, + + // OPTIONS request. Similar to golang.org/issue/3692 + { + "OPTIONS * HTTP/1.1\r\nServer: foo\r\n\r\n", + &Request{ + Method: "OPTIONS", + URL: &url.URL{ + Path: "*", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{ + "Server": []string{"foo"}, + }, + Close: false, + ContentLength: 0, + RequestURI: "*", + }, + + noBody, + noTrailer, + noError, + }, +} + +func TestReadRequest(t *testing.T) { + for i := range reqTests { + tt := &reqTests[i] + var braw bytes.Buffer + braw.WriteString(tt.Raw) + req, err := ReadRequest(bufio.NewReader(&braw)) + if err != nil { + if err.Error() != tt.Error { + t.Errorf("#%d: error %q, want error %q", i, err.Error(), tt.Error) + } + continue + } + rbody := req.Body + req.Body = nil + diff(t, fmt.Sprintf("#%d Request", i), req, tt.Req) + var bout bytes.Buffer + if rbody != nil { + _, err := io.Copy(&bout, rbody) + if err != nil { + t.Fatalf("#%d. copying body: %v", i, err) + } + rbody.Close() + } + body := bout.String() + if body != tt.Body { + t.Errorf("#%d: Body = %q want %q", i, body, tt.Body) + } + if !reflect.DeepEqual(tt.Trailer, req.Trailer) { + t.Errorf("#%d. Trailers differ.\n got: %v\nwant: %v", i, req.Trailer, tt.Trailer) + } + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/request.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/request.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/request.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,875 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// HTTP Request reading and parsing. + +package http + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "mime" + "mime/multipart" + "net/textproto" + "net/url" + "strconv" + "strings" + "sync" +) + +const ( + maxValueLength = 4096 + maxHeaderLines = 1024 + chunkSize = 4 << 10 // 4 KB chunks + defaultMaxMemory = 32 << 20 // 32 MB +) + +// ErrMissingFile is returned by FormFile when the provided file field name +// is either not present in the request or not a file field. +var ErrMissingFile = errors.New("http: no such file") + +// HTTP request parsing errors. +type ProtocolError struct { + ErrorString string +} + +func (err *ProtocolError) Error() string { return err.ErrorString } + +var ( + ErrHeaderTooLong = &ProtocolError{"header too long"} + ErrShortBody = &ProtocolError{"entity body too short"} + ErrNotSupported = &ProtocolError{"feature not supported"} + ErrUnexpectedTrailer = &ProtocolError{"trailer header without chunked transfer encoding"} + ErrMissingContentLength = &ProtocolError{"missing ContentLength in HEAD response"} + ErrNotMultipart = &ProtocolError{"request Content-Type isn't multipart/form-data"} + ErrMissingBoundary = &ProtocolError{"no multipart boundary param in Content-Type"} +) + +type badStringError struct { + what string + str string +} + +func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } + +// Headers that Request.Write handles itself and should be skipped. +var reqWriteExcludeHeader = map[string]bool{ + "Host": true, // not in Header map anyway + "User-Agent": true, + "Content-Length": true, + "Transfer-Encoding": true, + "Trailer": true, +} + +// A Request represents an HTTP request received by a server +// or to be sent by a client. +// +// The field semantics differ slightly between client and server +// usage. In addition to the notes on the fields below, see the +// documentation for Request.Write and RoundTripper. +type Request struct { + // Method specifies the HTTP method (GET, POST, PUT, etc.). + // For client requests an empty string means GET. + Method string + + // URL specifies either the URI being requested (for server + // requests) or the URL to access (for client requests). + // + // For server requests the URL is parsed from the URI + // supplied on the Request-Line as stored in RequestURI. For + // most requests, fields other than Path and RawQuery will be + // empty. (See RFC 2616, Section 5.1.2) + // + // For client requests, the URL's Host specifies the server to + // connect to, while the Request's Host field optionally + // specifies the Host header value to send in the HTTP + // request. + URL *url.URL + + // The protocol version for incoming requests. + // Client requests always use HTTP/1.1. + Proto string // "HTTP/1.0" + ProtoMajor int // 1 + ProtoMinor int // 0 + + // A header maps request lines to their values. + // If the header says + // + // accept-encoding: gzip, deflate + // Accept-Language: en-us + // Connection: keep-alive + // + // then + // + // Header = map[string][]string{ + // "Accept-Encoding": {"gzip, deflate"}, + // "Accept-Language": {"en-us"}, + // "Connection": {"keep-alive"}, + // } + // + // HTTP defines that header names are case-insensitive. + // The request parser implements this by canonicalizing the + // name, making the first character and any characters + // following a hyphen uppercase and the rest lowercase. + // + // For client requests certain headers are automatically + // added and may override values in Header. + // + // See the documentation for the Request.Write method. + Header Header + + // Body is the request's body. + // + // For client requests a nil body means the request has no + // body, such as a GET request. The HTTP Client's Transport + // is responsible for calling the Close method. + // + // For server requests the Request Body is always non-nil + // but will return EOF immediately when no body is present. + // The Server will close the request body. The ServeHTTP + // Handler does not need to. + Body io.ReadCloser + + // ContentLength records the length of the associated content. + // The value -1 indicates that the length is unknown. + // Values >= 0 indicate that the given number of bytes may + // be read from Body. + // For client requests, a value of 0 means unknown if Body is not nil. + ContentLength int64 + + // TransferEncoding lists the transfer encodings from outermost to + // innermost. An empty list denotes the "identity" encoding. + // TransferEncoding can usually be ignored; chunked encoding is + // automatically added and removed as necessary when sending and + // receiving requests. + TransferEncoding []string + + // Close indicates whether to close the connection after + // replying to this request (for servers) or after sending + // the request (for clients). + Close bool + + // For server requests Host specifies the host on which the + // URL is sought. Per RFC 2616, this is either the value of + // the "Host" header or the host name given in the URL itself. + // It may be of the form "host:port". + // + // For client requests Host optionally overrides the Host + // header to send. If empty, the Request.Write method uses + // the value of URL.Host. + Host string + + // Form contains the parsed form data, including both the URL + // field's query parameters and the POST or PUT form data. + // This field is only available after ParseForm is called. + // The HTTP client ignores Form and uses Body instead. + Form url.Values + + // PostForm contains the parsed form data from POST or PUT + // body parameters. + // This field is only available after ParseForm is called. + // The HTTP client ignores PostForm and uses Body instead. + PostForm url.Values + + // MultipartForm is the parsed multipart form, including file uploads. + // This field is only available after ParseMultipartForm is called. + // The HTTP client ignores MultipartForm and uses Body instead. + MultipartForm *multipart.Form + + // Trailer specifies additional headers that are sent after the request + // body. + // + // For server requests the Trailer map initially contains only the + // trailer keys, with nil values. (The client declares which trailers it + // will later send.) While the handler is reading from Body, it must + // not reference Trailer. After reading from Body returns EOF, Trailer + // can be read again and will contain non-nil values, if they were sent + // by the client. + // + // For client requests Trailer must be initialized to a map containing + // the trailer keys to later send. The values may be nil or their final + // values. The ContentLength must be 0 or -1, to send a chunked request. + // After the HTTP request is sent the map values can be updated while + // the request body is read. Once the body returns EOF, the caller must + // not mutate Trailer. + // + // Few HTTP clients, servers, or proxies support HTTP trailers. + Trailer Header + + // RemoteAddr allows HTTP servers and other software to record + // the network address that sent the request, usually for + // logging. This field is not filled in by ReadRequest and + // has no defined format. The HTTP server in this package + // sets RemoteAddr to an "IP:port" address before invoking a + // handler. + // This field is ignored by the HTTP client. + RemoteAddr string + + // RequestURI is the unmodified Request-URI of the + // Request-Line (RFC 2616, Section 5.1) as sent by the client + // to a server. Usually the URL field should be used instead. + // It is an error to set this field in an HTTP client request. + RequestURI string + + // TLS allows HTTP servers and other software to record + // information about the TLS connection on which the request + // was received. This field is not filled in by ReadRequest. + // The HTTP server in this package sets the field for + // TLS-enabled connections before invoking a handler; + // otherwise it leaves the field nil. + // This field is ignored by the HTTP client. + TLS *tls.ConnectionState +} + +// ProtoAtLeast reports whether the HTTP protocol used +// in the request is at least major.minor. +func (r *Request) ProtoAtLeast(major, minor int) bool { + return r.ProtoMajor > major || + r.ProtoMajor == major && r.ProtoMinor >= minor +} + +// UserAgent returns the client's User-Agent, if sent in the request. +func (r *Request) UserAgent() string { + return r.Header.Get("User-Agent") +} + +// Cookies parses and returns the HTTP cookies sent with the request. +func (r *Request) Cookies() []*Cookie { + return readCookies(r.Header, "") +} + +var ErrNoCookie = errors.New("http: named cookie not present") + +// Cookie returns the named cookie provided in the request or +// ErrNoCookie if not found. +func (r *Request) Cookie(name string) (*Cookie, error) { + for _, c := range readCookies(r.Header, name) { + return c, nil + } + return nil, ErrNoCookie +} + +// AddCookie adds a cookie to the request. Per RFC 6265 section 5.4, +// AddCookie does not attach more than one Cookie header field. That +// means all cookies, if any, are written into the same line, +// separated by semicolon. +func (r *Request) AddCookie(c *Cookie) { + s := fmt.Sprintf("%s=%s", sanitizeCookieName(c.Name), sanitizeCookieValue(c.Value)) + if c := r.Header.Get("Cookie"); c != "" { + r.Header.Set("Cookie", c+"; "+s) + } else { + r.Header.Set("Cookie", s) + } +} + +// Referer returns the referring URL, if sent in the request. +// +// Referer is misspelled as in the request itself, a mistake from the +// earliest days of HTTP. This value can also be fetched from the +// Header map as Header["Referer"]; the benefit of making it available +// as a method is that the compiler can diagnose programs that use the +// alternate (correct English) spelling req.Referrer() but cannot +// diagnose programs that use Header["Referrer"]. +func (r *Request) Referer() string { + return r.Header.Get("Referer") +} + +// multipartByReader is a sentinel value. +// Its presence in Request.MultipartForm indicates that parsing of the request +// body has been handed off to a MultipartReader instead of ParseMultipartFrom. +var multipartByReader = &multipart.Form{ + Value: make(map[string][]string), + File: make(map[string][]*multipart.FileHeader), +} + +// MultipartReader returns a MIME multipart reader if this is a +// multipart/form-data POST request, else returns nil and an error. +// Use this function instead of ParseMultipartForm to +// process the request body as a stream. +func (r *Request) MultipartReader() (*multipart.Reader, error) { + if r.MultipartForm == multipartByReader { + return nil, errors.New("http: MultipartReader called twice") + } + if r.MultipartForm != nil { + return nil, errors.New("http: multipart handled by ParseMultipartForm") + } + r.MultipartForm = multipartByReader + return r.multipartReader() +} + +func (r *Request) multipartReader() (*multipart.Reader, error) { + v := r.Header.Get("Content-Type") + if v == "" { + return nil, ErrNotMultipart + } + d, params, err := mime.ParseMediaType(v) + if err != nil || d != "multipart/form-data" { + return nil, ErrNotMultipart + } + boundary, ok := params["boundary"] + if !ok { + return nil, ErrMissingBoundary + } + return multipart.NewReader(r.Body, boundary), nil +} + +// Return value if nonempty, def otherwise. +func valueOrDefault(value, def string) string { + if value != "" { + return value + } + return def +} + +// NOTE: This is not intended to reflect the actual Go version being used. +// It was changed from "Go http package" to "Go 1.1 package http" at the +// time of the Go 1.1 release because the former User-Agent had ended up +// on a blacklist for some intrusion detection systems. +// See https://codereview.appspot.com/7532043. +const defaultUserAgent = "Go 1.1 package http" + +// Write writes an HTTP/1.1 request -- header and body -- in wire format. +// This method consults the following fields of the request: +// Host +// URL +// Method (defaults to "GET") +// Header +// ContentLength +// TransferEncoding +// Body +// +// If Body is present, Content-Length is <= 0 and TransferEncoding +// hasn't been set to "identity", Write adds "Transfer-Encoding: +// chunked" to the header. Body is closed after it is sent. +func (r *Request) Write(w io.Writer) error { + return r.write(w, false, nil) +} + +// WriteProxy is like Write but writes the request in the form +// expected by an HTTP proxy. In particular, WriteProxy writes the +// initial Request-URI line of the request with an absolute URI, per +// section 5.1.2 of RFC 2616, including the scheme and host. +// In either case, WriteProxy also writes a Host header, using +// either r.Host or r.URL.Host. +func (r *Request) WriteProxy(w io.Writer) error { + return r.write(w, true, nil) +} + +// extraHeaders may be nil +func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) error { + host := req.Host + if host == "" { + if req.URL == nil { + return errors.New("http: Request.Write on Request with no Host or URL set") + } + host = req.URL.Host + } + + ruri := req.URL.RequestURI() + if usingProxy && req.URL.Scheme != "" && req.URL.Opaque == "" { + ruri = req.URL.Scheme + "://" + host + ruri + } else if req.Method == "CONNECT" && req.URL.Path == "" { + // CONNECT requests normally give just the host and port, not a full URL. + ruri = host + } + // TODO(bradfitz): escape at least newlines in ruri? + + // Wrap the writer in a bufio Writer if it's not already buffered. + // Don't always call NewWriter, as that forces a bytes.Buffer + // and other small bufio Writers to have a minimum 4k buffer + // size. + var bw *bufio.Writer + if _, ok := w.(io.ByteWriter); !ok { + bw = bufio.NewWriter(w) + w = bw + } + + fmt.Fprintf(w, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), ruri) + + // Header lines + fmt.Fprintf(w, "Host: %s\r\n", host) + + // Use the defaultUserAgent unless the Header contains one, which + // may be blank to not send the header. + userAgent := defaultUserAgent + if req.Header != nil { + if ua := req.Header["User-Agent"]; len(ua) > 0 { + userAgent = ua[0] + } + } + if userAgent != "" { + fmt.Fprintf(w, "User-Agent: %s\r\n", userAgent) + } + + // Process Body,ContentLength,Close,Trailer + tw, err := newTransferWriter(req) + if err != nil { + return err + } + err = tw.WriteHeader(w) + if err != nil { + return err + } + + err = req.Header.WriteSubset(w, reqWriteExcludeHeader) + if err != nil { + return err + } + + if extraHeaders != nil { + err = extraHeaders.Write(w) + if err != nil { + return err + } + } + + io.WriteString(w, "\r\n") + + // Write body and trailer + err = tw.WriteBody(w) + if err != nil { + return err + } + + if bw != nil { + return bw.Flush() + } + return nil +} + +// ParseHTTPVersion parses a HTTP version string. +// "HTTP/1.0" returns (1, 0, true). +func ParseHTTPVersion(vers string) (major, minor int, ok bool) { + const Big = 1000000 // arbitrary upper bound + switch vers { + case "HTTP/1.1": + return 1, 1, true + case "HTTP/1.0": + return 1, 0, true + } + if !strings.HasPrefix(vers, "HTTP/") { + return 0, 0, false + } + dot := strings.Index(vers, ".") + if dot < 0 { + return 0, 0, false + } + major, err := strconv.Atoi(vers[5:dot]) + if err != nil || major < 0 || major > Big { + return 0, 0, false + } + minor, err = strconv.Atoi(vers[dot+1:]) + if err != nil || minor < 0 || minor > Big { + return 0, 0, false + } + return major, minor, true +} + +// NewRequest returns a new Request given a method, URL, and optional body. +// +// If the provided body is also an io.Closer, the returned +// Request.Body is set to body and will be closed by the Client +// methods Do, Post, and PostForm, and Transport.RoundTrip. +func NewRequest(method, urlStr string, body io.Reader) (*Request, error) { + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + rc, ok := body.(io.ReadCloser) + if !ok && body != nil { + rc = ioutil.NopCloser(body) + } + req := &Request{ + Method: method, + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(Header), + Body: rc, + Host: u.Host, + } + if body != nil { + switch v := body.(type) { + case *bytes.Buffer: + req.ContentLength = int64(v.Len()) + case *bytes.Reader: + req.ContentLength = int64(v.Len()) + case *strings.Reader: + req.ContentLength = int64(v.Len()) + } + } + + return req, nil +} + +// SetBasicAuth sets the request's Authorization header to use HTTP +// Basic Authentication with the provided username and password. +// +// With HTTP Basic Authentication the provided username and password +// are not encrypted. +func (r *Request) SetBasicAuth(username, password string) { + r.Header.Set("Authorization", "Basic "+basicAuth(username, password)) +} + +// parseRequestLine parses "GET /foo HTTP/1.1" into its three parts. +func parseRequestLine(line string) (method, requestURI, proto string, ok bool) { + s1 := strings.Index(line, " ") + s2 := strings.Index(line[s1+1:], " ") + if s1 < 0 || s2 < 0 { + return + } + s2 += s1 + 1 + return line[:s1], line[s1+1 : s2], line[s2+1:], true +} + +var textprotoReaderPool sync.Pool + +func newTextprotoReader(br *bufio.Reader) *textproto.Reader { + if v := textprotoReaderPool.Get(); v != nil { + tr := v.(*textproto.Reader) + tr.R = br + return tr + } + return textproto.NewReader(br) +} + +func putTextprotoReader(r *textproto.Reader) { + r.R = nil + textprotoReaderPool.Put(r) +} + +// ReadRequest reads and parses a request from b. +func ReadRequest(b *bufio.Reader) (req *Request, err error) { + + tp := newTextprotoReader(b) + req = new(Request) + + // First line: GET /index.html HTTP/1.0 + var s string + if s, err = tp.ReadLine(); err != nil { + return nil, err + } + defer func() { + putTextprotoReader(tp) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + }() + + var ok bool + req.Method, req.RequestURI, req.Proto, ok = parseRequestLine(s) + if !ok { + return nil, &badStringError{"malformed HTTP request", s} + } + rawurl := req.RequestURI + if req.ProtoMajor, req.ProtoMinor, ok = ParseHTTPVersion(req.Proto); !ok { + return nil, &badStringError{"malformed HTTP version", req.Proto} + } + + // CONNECT requests are used two different ways, and neither uses a full URL: + // The standard use is to tunnel HTTPS through an HTTP proxy. + // It looks like "CONNECT www.google.com:443 HTTP/1.1", and the parameter is + // just the authority section of a URL. This information should go in req.URL.Host. + // + // The net/rpc package also uses CONNECT, but there the parameter is a path + // that starts with a slash. It can be parsed with the regular URL parser, + // and the path will end up in req.URL.Path, where it needs to be in order for + // RPC to work. + justAuthority := req.Method == "CONNECT" && !strings.HasPrefix(rawurl, "/") + if justAuthority { + rawurl = "http://" + rawurl + } + + if req.URL, err = url.ParseRequestURI(rawurl); err != nil { + return nil, err + } + + if justAuthority { + // Strip the bogus "http://" back off. + req.URL.Scheme = "" + } + + // Subsequent lines: Key: value. + mimeHeader, err := tp.ReadMIMEHeader() + if err != nil { + return nil, err + } + req.Header = Header(mimeHeader) + + // RFC2616: Must treat + // GET /index.html HTTP/1.1 + // Host: www.google.com + // and + // GET http://www.google.com/index.html HTTP/1.1 + // Host: doesntmatter + // the same. In the second case, any Host line is ignored. + req.Host = req.URL.Host + if req.Host == "" { + req.Host = req.Header.get("Host") + } + delete(req.Header, "Host") + + fixPragmaCacheControl(req.Header) + + err = readTransfer(req, b) + if err != nil { + return nil, err + } + + return req, nil +} + +// MaxBytesReader is similar to io.LimitReader but is intended for +// limiting the size of incoming request bodies. In contrast to +// io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a +// non-EOF error for a Read beyond the limit, and Closes the +// underlying reader when its Close method is called. +// +// MaxBytesReader prevents clients from accidentally or maliciously +// sending a large request and wasting server resources. +func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser { + return &maxBytesReader{w: w, r: r, n: n} +} + +type maxBytesReader struct { + w ResponseWriter + r io.ReadCloser // underlying reader + n int64 // max bytes remaining + stopped bool +} + +func (l *maxBytesReader) Read(p []byte) (n int, err error) { + if l.n <= 0 { + if !l.stopped { + l.stopped = true + if res, ok := l.w.(*response); ok { + res.requestTooLarge() + } + } + return 0, errors.New("http: request body too large") + } + if int64(len(p)) > l.n { + p = p[:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + return +} + +func (l *maxBytesReader) Close() error { + return l.r.Close() +} + +func copyValues(dst, src url.Values) { + for k, vs := range src { + for _, value := range vs { + dst.Add(k, value) + } + } +} + +func parsePostForm(r *Request) (vs url.Values, err error) { + if r.Body == nil { + err = errors.New("missing form body") + return + } + ct := r.Header.Get("Content-Type") + // RFC 2616, section 7.2.1 - empty type + // SHOULD be treated as application/octet-stream + if ct == "" { + ct = "application/octet-stream" + } + ct, _, err = mime.ParseMediaType(ct) + switch { + case ct == "application/x-www-form-urlencoded": + var reader io.Reader = r.Body + maxFormSize := int64(1<<63 - 1) + if _, ok := r.Body.(*maxBytesReader); !ok { + maxFormSize = int64(10 << 20) // 10 MB is a lot of text. + reader = io.LimitReader(r.Body, maxFormSize+1) + } + b, e := ioutil.ReadAll(reader) + if e != nil { + if err == nil { + err = e + } + break + } + if int64(len(b)) > maxFormSize { + err = errors.New("http: POST too large") + return + } + vs, e = url.ParseQuery(string(b)) + if err == nil { + err = e + } + case ct == "multipart/form-data": + // handled by ParseMultipartForm (which is calling us, or should be) + // TODO(bradfitz): there are too many possible + // orders to call too many functions here. + // Clean this up and write more tests. + // request_test.go contains the start of this, + // in TestParseMultipartFormOrder and others. + } + return +} + +// ParseForm parses the raw query from the URL and updates r.Form. +// +// For POST or PUT requests, it also parses the request body as a form and +// put the results into both r.PostForm and r.Form. +// POST and PUT body parameters take precedence over URL query string values +// in r.Form. +// +// If the request Body's size has not already been limited by MaxBytesReader, +// the size is capped at 10MB. +// +// ParseMultipartForm calls ParseForm automatically. +// It is idempotent. +func (r *Request) ParseForm() error { + var err error + if r.PostForm == nil { + if r.Method == "POST" || r.Method == "PUT" || r.Method == "PATCH" { + r.PostForm, err = parsePostForm(r) + } + if r.PostForm == nil { + r.PostForm = make(url.Values) + } + } + if r.Form == nil { + if len(r.PostForm) > 0 { + r.Form = make(url.Values) + copyValues(r.Form, r.PostForm) + } + var newValues url.Values + if r.URL != nil { + var e error + newValues, e = url.ParseQuery(r.URL.RawQuery) + if err == nil { + err = e + } + } + if newValues == nil { + newValues = make(url.Values) + } + if r.Form == nil { + r.Form = newValues + } else { + copyValues(r.Form, newValues) + } + } + return err +} + +// ParseMultipartForm parses a request body as multipart/form-data. +// The whole request body is parsed and up to a total of maxMemory bytes of +// its file parts are stored in memory, with the remainder stored on +// disk in temporary files. +// ParseMultipartForm calls ParseForm if necessary. +// After one call to ParseMultipartForm, subsequent calls have no effect. +func (r *Request) ParseMultipartForm(maxMemory int64) error { + if r.MultipartForm == multipartByReader { + return errors.New("http: multipart handled by MultipartReader") + } + if r.Form == nil { + err := r.ParseForm() + if err != nil { + return err + } + } + if r.MultipartForm != nil { + return nil + } + + mr, err := r.multipartReader() + if err != nil { + return err + } + + f, err := mr.ReadForm(maxMemory) + if err != nil { + return err + } + for k, v := range f.Value { + r.Form[k] = append(r.Form[k], v...) + } + r.MultipartForm = f + + return nil +} + +// FormValue returns the first value for the named component of the query. +// POST and PUT body parameters take precedence over URL query string values. +// FormValue calls ParseMultipartForm and ParseForm if necessary. +// To access multiple values of the same key use ParseForm. +func (r *Request) FormValue(key string) string { + if r.Form == nil { + r.ParseMultipartForm(defaultMaxMemory) + } + if vs := r.Form[key]; len(vs) > 0 { + return vs[0] + } + return "" +} + +// PostFormValue returns the first value for the named component of the POST +// or PUT request body. URL query parameters are ignored. +// PostFormValue calls ParseMultipartForm and ParseForm if necessary. +func (r *Request) PostFormValue(key string) string { + if r.PostForm == nil { + r.ParseMultipartForm(defaultMaxMemory) + } + if vs := r.PostForm[key]; len(vs) > 0 { + return vs[0] + } + return "" +} + +// FormFile returns the first file for the provided form key. +// FormFile calls ParseMultipartForm and ParseForm if necessary. +func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, error) { + if r.MultipartForm == multipartByReader { + return nil, nil, errors.New("http: multipart handled by MultipartReader") + } + if r.MultipartForm == nil { + err := r.ParseMultipartForm(defaultMaxMemory) + if err != nil { + return nil, nil, err + } + } + if r.MultipartForm != nil && r.MultipartForm.File != nil { + if fhs := r.MultipartForm.File[key]; len(fhs) > 0 { + f, err := fhs[0].Open() + return f, fhs[0], err + } + } + return nil, nil, ErrMissingFile +} + +func (r *Request) expectsContinue() bool { + return hasToken(r.Header.get("Expect"), "100-continue") +} + +func (r *Request) wantsHttp10KeepAlive() bool { + if r.ProtoMajor != 1 || r.ProtoMinor != 0 { + return false + } + return hasToken(r.Header.get("Connection"), "keep-alive") +} + +func (r *Request) wantsClose() bool { + return hasToken(r.Header.get("Connection"), "close") +} + +func (r *Request) closeBody() { + if r.Body != nil { + r.Body.Close() + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/request_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/request_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/request_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,610 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http_test + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + . "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "regexp" + "strings" + "testing" +) + +func TestQuery(t *testing.T) { + req := &Request{Method: "GET"} + req.URL, _ = url.Parse("http://www.google.com/search?q=foo&q=bar") + if q := req.FormValue("q"); q != "foo" { + t.Errorf(`req.FormValue("q") = %q, want "foo"`, q) + } +} + +func TestPostQuery(t *testing.T) { + req, _ := NewRequest("POST", "http://www.google.com/search?q=foo&q=bar&both=x&prio=1&empty=not", + strings.NewReader("z=post&both=y&prio=2&empty=")) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded; param=value") + + if q := req.FormValue("q"); q != "foo" { + t.Errorf(`req.FormValue("q") = %q, want "foo"`, q) + } + if z := req.FormValue("z"); z != "post" { + t.Errorf(`req.FormValue("z") = %q, want "post"`, z) + } + if bq, found := req.PostForm["q"]; found { + t.Errorf(`req.PostForm["q"] = %q, want no entry in map`, bq) + } + if bz := req.PostFormValue("z"); bz != "post" { + t.Errorf(`req.PostFormValue("z") = %q, want "post"`, bz) + } + if qs := req.Form["q"]; !reflect.DeepEqual(qs, []string{"foo", "bar"}) { + t.Errorf(`req.Form["q"] = %q, want ["foo", "bar"]`, qs) + } + if both := req.Form["both"]; !reflect.DeepEqual(both, []string{"y", "x"}) { + t.Errorf(`req.Form["both"] = %q, want ["y", "x"]`, both) + } + if prio := req.FormValue("prio"); prio != "2" { + t.Errorf(`req.FormValue("prio") = %q, want "2" (from body)`, prio) + } + if empty := req.FormValue("empty"); empty != "" { + t.Errorf(`req.FormValue("empty") = %q, want "" (from body)`, empty) + } +} + +func TestPatchQuery(t *testing.T) { + req, _ := NewRequest("PATCH", "http://www.google.com/search?q=foo&q=bar&both=x&prio=1&empty=not", + strings.NewReader("z=post&both=y&prio=2&empty=")) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded; param=value") + + if q := req.FormValue("q"); q != "foo" { + t.Errorf(`req.FormValue("q") = %q, want "foo"`, q) + } + if z := req.FormValue("z"); z != "post" { + t.Errorf(`req.FormValue("z") = %q, want "post"`, z) + } + if bq, found := req.PostForm["q"]; found { + t.Errorf(`req.PostForm["q"] = %q, want no entry in map`, bq) + } + if bz := req.PostFormValue("z"); bz != "post" { + t.Errorf(`req.PostFormValue("z") = %q, want "post"`, bz) + } + if qs := req.Form["q"]; !reflect.DeepEqual(qs, []string{"foo", "bar"}) { + t.Errorf(`req.Form["q"] = %q, want ["foo", "bar"]`, qs) + } + if both := req.Form["both"]; !reflect.DeepEqual(both, []string{"y", "x"}) { + t.Errorf(`req.Form["both"] = %q, want ["y", "x"]`, both) + } + if prio := req.FormValue("prio"); prio != "2" { + t.Errorf(`req.FormValue("prio") = %q, want "2" (from body)`, prio) + } + if empty := req.FormValue("empty"); empty != "" { + t.Errorf(`req.FormValue("empty") = %q, want "" (from body)`, empty) + } +} + +type stringMap map[string][]string +type parseContentTypeTest struct { + shouldError bool + contentType stringMap +} + +var parseContentTypeTests = []parseContentTypeTest{ + {false, stringMap{"Content-Type": {"text/plain"}}}, + // Empty content type is legal - shoult be treated as + // application/octet-stream (RFC 2616, section 7.2.1) + {false, stringMap{}}, + {true, stringMap{"Content-Type": {"text/plain; boundary="}}}, + {false, stringMap{"Content-Type": {"application/unknown"}}}, +} + +func TestParseFormUnknownContentType(t *testing.T) { + for i, test := range parseContentTypeTests { + req := &Request{ + Method: "POST", + Header: Header(test.contentType), + Body: ioutil.NopCloser(strings.NewReader("body")), + } + err := req.ParseForm() + switch { + case err == nil && test.shouldError: + t.Errorf("test %d should have returned error", i) + case err != nil && !test.shouldError: + t.Errorf("test %d should not have returned error, got %v", i, err) + } + } +} + +func TestParseFormInitializeOnError(t *testing.T) { + nilBody, _ := NewRequest("POST", "http://www.google.com/search?q=foo", nil) + tests := []*Request{ + nilBody, + {Method: "GET", URL: nil}, + } + for i, req := range tests { + err := req.ParseForm() + if req.Form == nil { + t.Errorf("%d. Form not initialized, error %v", i, err) + } + if req.PostForm == nil { + t.Errorf("%d. PostForm not initialized, error %v", i, err) + } + } +} + +func TestMultipartReader(t *testing.T) { + req := &Request{ + Method: "POST", + Header: Header{"Content-Type": {`multipart/form-data; boundary="foo123"`}}, + Body: ioutil.NopCloser(new(bytes.Buffer)), + } + multipart, err := req.MultipartReader() + if multipart == nil { + t.Errorf("expected multipart; error: %v", err) + } + + req.Header = Header{"Content-Type": {"text/plain"}} + multipart, err = req.MultipartReader() + if multipart != nil { + t.Error("unexpected multipart for text/plain") + } +} + +func TestParseMultipartForm(t *testing.T) { + req := &Request{ + Method: "POST", + Header: Header{"Content-Type": {`multipart/form-data; boundary="foo123"`}}, + Body: ioutil.NopCloser(new(bytes.Buffer)), + } + err := req.ParseMultipartForm(25) + if err == nil { + t.Error("expected multipart EOF, got nil") + } + + req.Header = Header{"Content-Type": {"text/plain"}} + err = req.ParseMultipartForm(25) + if err != ErrNotMultipart { + t.Error("expected ErrNotMultipart for text/plain") + } +} + +func TestRedirect(t *testing.T) { + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + switch r.URL.Path { + case "/": + w.Header().Set("Location", "/foo/") + w.WriteHeader(StatusSeeOther) + case "/foo/": + fmt.Fprintf(w, "foo") + default: + w.WriteHeader(StatusBadRequest) + } + })) + defer ts.Close() + + var end = regexp.MustCompile("/foo/$") + r, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + r.Body.Close() + url := r.Request.URL.String() + if r.StatusCode != 200 || !end.MatchString(url) { + t.Fatalf("Get got status %d at %q, want 200 matching /foo/$", r.StatusCode, url) + } +} + +func TestSetBasicAuth(t *testing.T) { + r, _ := NewRequest("GET", "http://example.com/", nil) + r.SetBasicAuth("Aladdin", "open sesame") + if g, e := r.Header.Get("Authorization"), "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="; g != e { + t.Errorf("got header %q, want %q", g, e) + } +} + +func TestMultipartRequest(t *testing.T) { + // Test that we can read the values and files of a + // multipart request with FormValue and FormFile, + // and that ParseMultipartForm can be called multiple times. + req := newTestMultipartRequest(t) + if err := req.ParseMultipartForm(25); err != nil { + t.Fatal("ParseMultipartForm first call:", err) + } + defer req.MultipartForm.RemoveAll() + validateTestMultipartContents(t, req, false) + if err := req.ParseMultipartForm(25); err != nil { + t.Fatal("ParseMultipartForm second call:", err) + } + validateTestMultipartContents(t, req, false) +} + +func TestMultipartRequestAuto(t *testing.T) { + // Test that FormValue and FormFile automatically invoke + // ParseMultipartForm and return the right values. + req := newTestMultipartRequest(t) + defer func() { + if req.MultipartForm != nil { + req.MultipartForm.RemoveAll() + } + }() + validateTestMultipartContents(t, req, true) +} + +func TestMissingFileMultipartRequest(t *testing.T) { + // Test that FormFile returns an error if + // the named file is missing. + req := newTestMultipartRequest(t) + testMissingFile(t, req) +} + +// Test that FormValue invokes ParseMultipartForm. +func TestFormValueCallsParseMultipartForm(t *testing.T) { + req, _ := NewRequest("POST", "http://www.google.com/", strings.NewReader("z=post")) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded; param=value") + if req.Form != nil { + t.Fatal("Unexpected request Form, want nil") + } + req.FormValue("z") + if req.Form == nil { + t.Fatal("ParseMultipartForm not called by FormValue") + } +} + +// Test that FormFile invokes ParseMultipartForm. +func TestFormFileCallsParseMultipartForm(t *testing.T) { + req := newTestMultipartRequest(t) + if req.Form != nil { + t.Fatal("Unexpected request Form, want nil") + } + req.FormFile("") + if req.Form == nil { + t.Fatal("ParseMultipartForm not called by FormFile") + } +} + +// Test that ParseMultipartForm errors if called +// after MultipartReader on the same request. +func TestParseMultipartFormOrder(t *testing.T) { + req := newTestMultipartRequest(t) + if _, err := req.MultipartReader(); err != nil { + t.Fatalf("MultipartReader: %v", err) + } + if err := req.ParseMultipartForm(1024); err == nil { + t.Fatal("expected an error from ParseMultipartForm after call to MultipartReader") + } +} + +// Test that MultipartReader errors if called +// after ParseMultipartForm on the same request. +func TestMultipartReaderOrder(t *testing.T) { + req := newTestMultipartRequest(t) + if err := req.ParseMultipartForm(25); err != nil { + t.Fatalf("ParseMultipartForm: %v", err) + } + defer req.MultipartForm.RemoveAll() + if _, err := req.MultipartReader(); err == nil { + t.Fatal("expected an error from MultipartReader after call to ParseMultipartForm") + } +} + +// Test that FormFile errors if called after +// MultipartReader on the same request. +func TestFormFileOrder(t *testing.T) { + req := newTestMultipartRequest(t) + if _, err := req.MultipartReader(); err != nil { + t.Fatalf("MultipartReader: %v", err) + } + if _, _, err := req.FormFile(""); err == nil { + t.Fatal("expected an error from FormFile after call to MultipartReader") + } +} + +var readRequestErrorTests = []struct { + in string + err error +}{ + {"GET / HTTP/1.1\r\nheader:foo\r\n\r\n", nil}, + {"GET / HTTP/1.1\r\nheader:foo\r\n", io.ErrUnexpectedEOF}, + {"", io.EOF}, +} + +func TestReadRequestErrors(t *testing.T) { + for i, tt := range readRequestErrorTests { + _, err := ReadRequest(bufio.NewReader(strings.NewReader(tt.in))) + if err != tt.err { + t.Errorf("%d. got error = %v; want %v", i, err, tt.err) + } + } +} + +func TestNewRequestHost(t *testing.T) { + req, err := NewRequest("GET", "http://localhost:1234/", nil) + if err != nil { + t.Fatal(err) + } + if req.Host != "localhost:1234" { + t.Errorf("Host = %q; want localhost:1234", req.Host) + } +} + +func TestNewRequestContentLength(t *testing.T) { + readByte := func(r io.Reader) io.Reader { + var b [1]byte + r.Read(b[:]) + return r + } + tests := []struct { + r io.Reader + want int64 + }{ + {bytes.NewReader([]byte("123")), 3}, + {bytes.NewBuffer([]byte("1234")), 4}, + {strings.NewReader("12345"), 5}, + // Not detected: + {struct{ io.Reader }{strings.NewReader("xyz")}, 0}, + {io.NewSectionReader(strings.NewReader("x"), 0, 6), 0}, + {readByte(io.NewSectionReader(strings.NewReader("xy"), 0, 6)), 0}, + } + for _, tt := range tests { + req, err := NewRequest("POST", "http://localhost/", tt.r) + if err != nil { + t.Fatal(err) + } + if req.ContentLength != tt.want { + t.Errorf("ContentLength(%T) = %d; want %d", tt.r, req.ContentLength, tt.want) + } + } +} + +var parseHTTPVersionTests = []struct { + vers string + major, minor int + ok bool +}{ + {"HTTP/0.9", 0, 9, true}, + {"HTTP/1.0", 1, 0, true}, + {"HTTP/1.1", 1, 1, true}, + {"HTTP/3.14", 3, 14, true}, + + {"HTTP", 0, 0, false}, + {"HTTP/one.one", 0, 0, false}, + {"HTTP/1.1/", 0, 0, false}, + {"HTTP/-1,0", 0, 0, false}, + {"HTTP/0,-1", 0, 0, false}, + {"HTTP/", 0, 0, false}, + {"HTTP/1,1", 0, 0, false}, +} + +func TestParseHTTPVersion(t *testing.T) { + for _, tt := range parseHTTPVersionTests { + major, minor, ok := ParseHTTPVersion(tt.vers) + if ok != tt.ok || major != tt.major || minor != tt.minor { + type version struct { + major, minor int + ok bool + } + t.Errorf("failed to parse %q, expected: %#v, got %#v", tt.vers, version{tt.major, tt.minor, tt.ok}, version{major, minor, ok}) + } + } +} + +type logWrites struct { + t *testing.T + dst *[]string +} + +func (l logWrites) WriteByte(c byte) error { + l.t.Fatalf("unexpected WriteByte call") + return nil +} + +func (l logWrites) Write(p []byte) (n int, err error) { + *l.dst = append(*l.dst, string(p)) + return len(p), nil +} + +func TestRequestWriteBufferedWriter(t *testing.T) { + got := []string{} + req, _ := NewRequest("GET", "http://foo.com/", nil) + req.Write(logWrites{t, &got}) + want := []string{ + "GET / HTTP/1.1\r\n", + "Host: foo.com\r\n", + "User-Agent: " + DefaultUserAgent + "\r\n", + "\r\n", + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Writes = %q\n Want = %q", got, want) + } +} + +func testMissingFile(t *testing.T, req *Request) { + f, fh, err := req.FormFile("missing") + if f != nil { + t.Errorf("FormFile file = %v, want nil", f) + } + if fh != nil { + t.Errorf("FormFile file header = %q, want nil", fh) + } + if err != ErrMissingFile { + t.Errorf("FormFile err = %q, want ErrMissingFile", err) + } +} + +func newTestMultipartRequest(t *testing.T) *Request { + b := strings.NewReader(strings.Replace(message, "\n", "\r\n", -1)) + req, err := NewRequest("POST", "/", b) + if err != nil { + t.Fatal("NewRequest:", err) + } + ctype := fmt.Sprintf(`multipart/form-data; boundary="%s"`, boundary) + req.Header.Set("Content-type", ctype) + return req +} + +func validateTestMultipartContents(t *testing.T, req *Request, allMem bool) { + if g, e := req.FormValue("texta"), textaValue; g != e { + t.Errorf("texta value = %q, want %q", g, e) + } + if g, e := req.FormValue("textb"), textbValue; g != e { + t.Errorf("textb value = %q, want %q", g, e) + } + if g := req.FormValue("missing"); g != "" { + t.Errorf("missing value = %q, want empty string", g) + } + + assertMem := func(n string, fd multipart.File) { + if _, ok := fd.(*os.File); ok { + t.Error(n, " is *os.File, should not be") + } + } + fda := testMultipartFile(t, req, "filea", "filea.txt", fileaContents) + defer fda.Close() + assertMem("filea", fda) + fdb := testMultipartFile(t, req, "fileb", "fileb.txt", filebContents) + defer fdb.Close() + if allMem { + assertMem("fileb", fdb) + } else { + if _, ok := fdb.(*os.File); !ok { + t.Errorf("fileb has unexpected underlying type %T", fdb) + } + } + + testMissingFile(t, req) +} + +func testMultipartFile(t *testing.T, req *Request, key, expectFilename, expectContent string) multipart.File { + f, fh, err := req.FormFile(key) + if err != nil { + t.Fatalf("FormFile(%q): %q", key, err) + } + if fh.Filename != expectFilename { + t.Errorf("filename = %q, want %q", fh.Filename, expectFilename) + } + var b bytes.Buffer + _, err = io.Copy(&b, f) + if err != nil { + t.Fatal("copying contents:", err) + } + if g := b.String(); g != expectContent { + t.Errorf("contents = %q, want %q", g, expectContent) + } + return f +} + +const ( + fileaContents = "This is a test file." + filebContents = "Another test file." + textaValue = "foo" + textbValue = "bar" + boundary = `MyBoundary` +) + +const message = ` +--MyBoundary +Content-Disposition: form-data; name="filea"; filename="filea.txt" +Content-Type: text/plain + +` + fileaContents + ` +--MyBoundary +Content-Disposition: form-data; name="fileb"; filename="fileb.txt" +Content-Type: text/plain + +` + filebContents + ` +--MyBoundary +Content-Disposition: form-data; name="texta" + +` + textaValue + ` +--MyBoundary +Content-Disposition: form-data; name="textb" + +` + textbValue + ` +--MyBoundary-- +` + +func benchmarkReadRequest(b *testing.B, request string) { + request = request + "\n" // final \n + request = strings.Replace(request, "\n", "\r\n", -1) // expand \n to \r\n + b.SetBytes(int64(len(request))) + r := bufio.NewReader(&infiniteReader{buf: []byte(request)}) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := ReadRequest(r) + if err != nil { + b.Fatalf("failed to read request: %v", err) + } + } +} + +// infiniteReader satisfies Read requests as if the contents of buf +// loop indefinitely. +type infiniteReader struct { + buf []byte + offset int +} + +func (r *infiniteReader) Read(b []byte) (int, error) { + n := copy(b, r.buf[r.offset:]) + r.offset = (r.offset + n) % len(r.buf) + return n, nil +} + +func BenchmarkReadRequestChrome(b *testing.B) { + // https://github.com/felixge/node-http-perf/blob/master/fixtures/get.http + benchmarkReadRequest(b, `GET / HTTP/1.1 +Host: localhost:8080 +Connection: keep-alive +Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 +User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17 +Accept-Encoding: gzip,deflate,sdch +Accept-Language: en-US,en;q=0.8 +Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.3 +Cookie: __utma=1.1978842379.1323102373.1323102373.1323102373.1; EPi:NumberOfVisits=1,2012-02-28T13:42:18; CrmSession=5b707226b9563e1bc69084d07a107c98; plushContainerWidth=100%25; plushNoTopMenu=0; hudson_auto_refresh=false +`) +} + +func BenchmarkReadRequestCurl(b *testing.B) { + // curl http://localhost:8080/ + benchmarkReadRequest(b, `GET / HTTP/1.1 +User-Agent: curl/7.27.0 +Host: localhost:8080 +Accept: */* +`) +} + +func BenchmarkReadRequestApachebench(b *testing.B) { + // ab -n 1 -c 1 http://localhost:8080/ + benchmarkReadRequest(b, `GET / HTTP/1.0 +Host: localhost:8080 +User-Agent: ApacheBench/2.3 +Accept: */* +`) +} + +func BenchmarkReadRequestSiege(b *testing.B) { + // siege -r 1 -c 1 http://localhost:8080/ + benchmarkReadRequest(b, `GET / HTTP/1.1 +Host: localhost:8080 +Accept: */* +Accept-Encoding: gzip +User-Agent: JoeDog/1.00 [en] (X11; I; Siege 2.70) +Connection: keep-alive +`) +} + +func BenchmarkReadRequestWrk(b *testing.B) { + // wrk -t 1 -r 1 -c 1 http://localhost:8080/ + benchmarkReadRequest(b, `GET / HTTP/1.1 +Host: localhost:8080 +`) +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/requestwrite_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/requestwrite_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/requestwrite_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,565 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net/url" + "strings" + "testing" +) + +type reqWriteTest struct { + Req Request + Body interface{} // optional []byte or func() io.ReadCloser to populate Req.Body + + // Any of these three may be empty to skip that test. + WantWrite string // Request.Write + WantProxy string // Request.WriteProxy + + WantError error // wanted error from Request.Write +} + +var reqWriteTests = []reqWriteTest{ + // HTTP/1.1 => chunked coding; no body; no trailer + { + Req: Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: "www.techcrunch.com", + Path: "/", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{ + "Accept": {"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"}, + "Accept-Charset": {"ISO-8859-1,utf-8;q=0.7,*;q=0.7"}, + "Accept-Encoding": {"gzip,deflate"}, + "Accept-Language": {"en-us,en;q=0.5"}, + "Keep-Alive": {"300"}, + "Proxy-Connection": {"keep-alive"}, + "User-Agent": {"Fake"}, + }, + Body: nil, + Close: false, + Host: "www.techcrunch.com", + Form: map[string][]string{}, + }, + + WantWrite: "GET / HTTP/1.1\r\n" + + "Host: www.techcrunch.com\r\n" + + "User-Agent: Fake\r\n" + + "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" + + "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" + + "Accept-Encoding: gzip,deflate\r\n" + + "Accept-Language: en-us,en;q=0.5\r\n" + + "Keep-Alive: 300\r\n" + + "Proxy-Connection: keep-alive\r\n\r\n", + + WantProxy: "GET http://www.techcrunch.com/ HTTP/1.1\r\n" + + "Host: www.techcrunch.com\r\n" + + "User-Agent: Fake\r\n" + + "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" + + "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n" + + "Accept-Encoding: gzip,deflate\r\n" + + "Accept-Language: en-us,en;q=0.5\r\n" + + "Keep-Alive: 300\r\n" + + "Proxy-Connection: keep-alive\r\n\r\n", + }, + // HTTP/1.1 => chunked coding; body; empty trailer + { + Req: Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/search", + }, + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + TransferEncoding: []string{"chunked"}, + }, + + Body: []byte("abcdef"), + + WantWrite: "GET /search HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + chunk("abcdef") + chunk(""), + + WantProxy: "GET http://www.google.com/search HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + chunk("abcdef") + chunk(""), + }, + // HTTP/1.1 POST => chunked coding; body; empty trailer + { + Req: Request{ + Method: "POST", + URL: &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/search", + }, + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Close: true, + TransferEncoding: []string{"chunked"}, + }, + + Body: []byte("abcdef"), + + WantWrite: "POST /search HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Connection: close\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + chunk("abcdef") + chunk(""), + + WantProxy: "POST http://www.google.com/search HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Connection: close\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + chunk("abcdef") + chunk(""), + }, + + // HTTP/1.1 POST with Content-Length, no chunking + { + Req: Request{ + Method: "POST", + URL: &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/search", + }, + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Close: true, + ContentLength: 6, + }, + + Body: []byte("abcdef"), + + WantWrite: "POST /search HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Connection: close\r\n" + + "Content-Length: 6\r\n" + + "\r\n" + + "abcdef", + + WantProxy: "POST http://www.google.com/search HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Connection: close\r\n" + + "Content-Length: 6\r\n" + + "\r\n" + + "abcdef", + }, + + // HTTP/1.1 POST with Content-Length in headers + { + Req: Request{ + Method: "POST", + URL: mustParseURL("http://example.com/"), + Host: "example.com", + Header: Header{ + "Content-Length": []string{"10"}, // ignored + }, + ContentLength: 6, + }, + + Body: []byte("abcdef"), + + WantWrite: "POST / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Content-Length: 6\r\n" + + "\r\n" + + "abcdef", + + WantProxy: "POST http://example.com/ HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Content-Length: 6\r\n" + + "\r\n" + + "abcdef", + }, + + // default to HTTP/1.1 + { + Req: Request{ + Method: "GET", + URL: mustParseURL("/search"), + Host: "www.google.com", + }, + + WantWrite: "GET /search HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "\r\n", + }, + + // Request with a 0 ContentLength and a 0 byte body. + { + Req: Request{ + Method: "POST", + URL: mustParseURL("/"), + Host: "example.com", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 0, // as if unset by user + }, + + Body: func() io.ReadCloser { return ioutil.NopCloser(io.LimitReader(strings.NewReader("xx"), 0)) }, + + // RFC 2616 Section 14.13 says Content-Length should be specified + // unless body is prohibited by the request method. + // Also, nginx expects it for POST and PUT. + WantWrite: "POST / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Content-Length: 0\r\n" + + "\r\n", + + WantProxy: "POST / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Content-Length: 0\r\n" + + "\r\n", + }, + + // Request with a 0 ContentLength and a 1 byte body. + { + Req: Request{ + Method: "POST", + URL: mustParseURL("/"), + Host: "example.com", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 0, // as if unset by user + }, + + Body: func() io.ReadCloser { return ioutil.NopCloser(io.LimitReader(strings.NewReader("xx"), 1)) }, + + WantWrite: "POST / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + chunk("x") + chunk(""), + + WantProxy: "POST / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + chunk("x") + chunk(""), + }, + + // Request with a ContentLength of 10 but a 5 byte body. + { + Req: Request{ + Method: "POST", + URL: mustParseURL("/"), + Host: "example.com", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 10, // but we're going to send only 5 bytes + }, + Body: []byte("12345"), + WantError: errors.New("http: Request.ContentLength=10 with Body length 5"), + }, + + // Request with a ContentLength of 4 but an 8 byte body. + { + Req: Request{ + Method: "POST", + URL: mustParseURL("/"), + Host: "example.com", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 4, // but we're going to try to send 8 bytes + }, + Body: []byte("12345678"), + WantError: errors.New("http: Request.ContentLength=4 with Body length 8"), + }, + + // Request with a 5 ContentLength and nil body. + { + Req: Request{ + Method: "POST", + URL: mustParseURL("/"), + Host: "example.com", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 5, // but we'll omit the body + }, + WantError: errors.New("http: Request.ContentLength=5 with nil Body"), + }, + + // Request with a 0 ContentLength and a body with 1 byte content and an error. + { + Req: Request{ + Method: "POST", + URL: mustParseURL("/"), + Host: "example.com", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 0, // as if unset by user + }, + + Body: func() io.ReadCloser { + err := errors.New("Custom reader error") + errReader := &errorReader{err} + return ioutil.NopCloser(io.MultiReader(strings.NewReader("x"), errReader)) + }, + + WantError: errors.New("Custom reader error"), + }, + + // Request with a 0 ContentLength and a body without content and an error. + { + Req: Request{ + Method: "POST", + URL: mustParseURL("/"), + Host: "example.com", + ProtoMajor: 1, + ProtoMinor: 1, + ContentLength: 0, // as if unset by user + }, + + Body: func() io.ReadCloser { + err := errors.New("Custom reader error") + errReader := &errorReader{err} + return ioutil.NopCloser(errReader) + }, + + WantError: errors.New("Custom reader error"), + }, + + // Verify that DumpRequest preserves the HTTP version number, doesn't add a Host, + // and doesn't add a User-Agent. + { + Req: Request{ + Method: "GET", + URL: mustParseURL("/foo"), + ProtoMajor: 1, + ProtoMinor: 0, + Header: Header{ + "X-Foo": []string{"X-Bar"}, + }, + }, + + WantWrite: "GET /foo HTTP/1.1\r\n" + + "Host: \r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "X-Foo: X-Bar\r\n\r\n", + }, + + // If no Request.Host and no Request.URL.Host, we send + // an empty Host header, and don't use + // Request.Header["Host"]. This is just testing that + // we don't change Go 1.0 behavior. + { + Req: Request{ + Method: "GET", + Host: "", + URL: &url.URL{ + Scheme: "http", + Host: "", + Path: "/search", + }, + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{ + "Host": []string{"bad.example.com"}, + }, + }, + + WantWrite: "GET /search HTTP/1.1\r\n" + + "Host: \r\n" + + "User-Agent: Go 1.1 package http\r\n\r\n", + }, + + // Opaque test #1 from golang.org/issue/4860 + { + Req: Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: "www.google.com", + Opaque: "/%2F/%2F/", + }, + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + }, + + WantWrite: "GET /%2F/%2F/ HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n\r\n", + }, + + // Opaque test #2 from golang.org/issue/4860 + { + Req: Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: "x.google.com", + Opaque: "//y.google.com/%2F/%2F/", + }, + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + }, + + WantWrite: "GET http://y.google.com/%2F/%2F/ HTTP/1.1\r\n" + + "Host: x.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n\r\n", + }, + + // Testing custom case in header keys. Issue 5022. + { + Req: Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/", + }, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{ + "ALL-CAPS": {"x"}, + }, + }, + + WantWrite: "GET / HTTP/1.1\r\n" + + "Host: www.google.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "ALL-CAPS: x\r\n" + + "\r\n", + }, +} + +func TestRequestWrite(t *testing.T) { + for i := range reqWriteTests { + tt := &reqWriteTests[i] + + setBody := func() { + if tt.Body == nil { + return + } + switch b := tt.Body.(type) { + case []byte: + tt.Req.Body = ioutil.NopCloser(bytes.NewReader(b)) + case func() io.ReadCloser: + tt.Req.Body = b() + } + } + setBody() + if tt.Req.Header == nil { + tt.Req.Header = make(Header) + } + + var braw bytes.Buffer + err := tt.Req.Write(&braw) + if g, e := fmt.Sprintf("%v", err), fmt.Sprintf("%v", tt.WantError); g != e { + t.Errorf("writing #%d, err = %q, want %q", i, g, e) + continue + } + if err != nil { + continue + } + + if tt.WantWrite != "" { + sraw := braw.String() + if sraw != tt.WantWrite { + t.Errorf("Test %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantWrite, sraw) + continue + } + } + + if tt.WantProxy != "" { + setBody() + var praw bytes.Buffer + err = tt.Req.WriteProxy(&praw) + if err != nil { + t.Errorf("WriteProxy #%d: %s", i, err) + continue + } + sraw := praw.String() + if sraw != tt.WantProxy { + t.Errorf("Test Proxy %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantProxy, sraw) + continue + } + } + } +} + +type closeChecker struct { + io.Reader + closed bool +} + +func (rc *closeChecker) Close() error { + rc.closed = true + return nil +} + +// TestRequestWriteClosesBody tests that Request.Write does close its request.Body. +// It also indirectly tests NewRequest and that it doesn't wrap an existing Closer +// inside a NopCloser, and that it serializes it correctly. +func TestRequestWriteClosesBody(t *testing.T) { + rc := &closeChecker{Reader: strings.NewReader("my body")} + req, _ := NewRequest("POST", "http://foo.com/", rc) + if req.ContentLength != 0 { + t.Errorf("got req.ContentLength %d, want 0", req.ContentLength) + } + buf := new(bytes.Buffer) + req.Write(buf) + if !rc.closed { + t.Error("body not closed after write") + } + expected := "POST / HTTP/1.1\r\n" + + "Host: foo.com\r\n" + + "User-Agent: Go 1.1 package http\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + // TODO: currently we don't buffer before chunking, so we get a + // single "m" chunk before the other chunks, as this was the 1-byte + // read from our MultiReader where we stiched the Body back together + // after sniffing whether the Body was 0 bytes or not. + chunk("m") + + chunk("y body") + + chunk("") + if buf.String() != expected { + t.Errorf("write:\n got: %s\nwant: %s", buf.String(), expected) + } +} + +func chunk(s string) string { + return fmt.Sprintf("%x\r\n%s\r\n", len(s), s) +} + +func mustParseURL(s string) *url.URL { + u, err := url.Parse(s) + if err != nil { + panic(fmt.Sprintf("Error parsing URL %q: %v", s, err)) + } + return u +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/response.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/response.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/response.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,291 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// HTTP Response reading and parsing. + +package http + +import ( + "bufio" + "bytes" + "errors" + "github.com/Azure/azure-sdk-for-go/core/tls" + "io" + "net/textproto" + "net/url" + "strconv" + "strings" +) + +var respExcludeHeader = map[string]bool{ + "Content-Length": true, + "Transfer-Encoding": true, + "Trailer": true, +} + +// Response represents the response from an HTTP request. +// +type Response struct { + Status string // e.g. "200 OK" + StatusCode int // e.g. 200 + Proto string // e.g. "HTTP/1.0" + ProtoMajor int // e.g. 1 + ProtoMinor int // e.g. 0 + + // Header maps header keys to values. If the response had multiple + // headers with the same key, they may be concatenated, with comma + // delimiters. (Section 4.2 of RFC 2616 requires that multiple headers + // be semantically equivalent to a comma-delimited sequence.) Values + // duplicated by other fields in this struct (e.g., ContentLength) are + // omitted from Header. + // + // Keys in the map are canonicalized (see CanonicalHeaderKey). + Header Header + + // Body represents the response body. + // + // The http Client and Transport guarantee that Body is always + // non-nil, even on responses without a body or responses with + // a zero-length body. It is the caller's responsibility to + // close Body. + // + // The Body is automatically dechunked if the server replied + // with a "chunked" Transfer-Encoding. + Body io.ReadCloser + + // ContentLength records the length of the associated content. The + // value -1 indicates that the length is unknown. Unless Request.Method + // is "HEAD", values >= 0 indicate that the given number of bytes may + // be read from Body. + ContentLength int64 + + // Contains transfer encodings from outer-most to inner-most. Value is + // nil, means that "identity" encoding is used. + TransferEncoding []string + + // Close records whether the header directed that the connection be + // closed after reading Body. The value is advice for clients: neither + // ReadResponse nor Response.Write ever closes a connection. + Close bool + + // Trailer maps trailer keys to values, in the same + // format as the header. + Trailer Header + + // The Request that was sent to obtain this Response. + // Request's Body is nil (having already been consumed). + // This is only populated for Client requests. + Request *Request + + // TLS contains information about the TLS connection on which the + // response was received. It is nil for unencrypted responses. + // The pointer is shared between responses and should not be + // modified. + TLS *tls.ConnectionState +} + +// Cookies parses and returns the cookies set in the Set-Cookie headers. +func (r *Response) Cookies() []*Cookie { + return readSetCookies(r.Header) +} + +var ErrNoLocation = errors.New("http: no Location header in response") + +// Location returns the URL of the response's "Location" header, +// if present. Relative redirects are resolved relative to +// the Response's Request. ErrNoLocation is returned if no +// Location header is present. +func (r *Response) Location() (*url.URL, error) { + lv := r.Header.Get("Location") + if lv == "" { + return nil, ErrNoLocation + } + if r.Request != nil && r.Request.URL != nil { + return r.Request.URL.Parse(lv) + } + return url.Parse(lv) +} + +// ReadResponse reads and returns an HTTP response from r. +// The req parameter optionally specifies the Request that corresponds +// to this Response. If nil, a GET request is assumed. +// Clients must call resp.Body.Close when finished reading resp.Body. +// After that call, clients can inspect resp.Trailer to find key/value +// pairs included in the response trailer. +func ReadResponse(r *bufio.Reader, req *Request) (*Response, error) { + tp := textproto.NewReader(r) + resp := &Response{ + Request: req, + } + + // Parse the first line of the response. + line, err := tp.ReadLine() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + f := strings.SplitN(line, " ", 3) + if len(f) < 2 { + return nil, &badStringError{"malformed HTTP response", line} + } + reasonPhrase := "" + if len(f) > 2 { + reasonPhrase = f[2] + } + resp.Status = f[1] + " " + reasonPhrase + resp.StatusCode, err = strconv.Atoi(f[1]) + if err != nil { + return nil, &badStringError{"malformed HTTP status code", f[1]} + } + + resp.Proto = f[0] + var ok bool + if resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok { + return nil, &badStringError{"malformed HTTP version", resp.Proto} + } + + // Parse the response headers. + mimeHeader, err := tp.ReadMIMEHeader() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + resp.Header = Header(mimeHeader) + + fixPragmaCacheControl(resp.Header) + + err = readTransfer(resp, r) + if err != nil { + return nil, err + } + + return resp, nil +} + +// RFC2616: Should treat +// Pragma: no-cache +// like +// Cache-Control: no-cache +func fixPragmaCacheControl(header Header) { + if hp, ok := header["Pragma"]; ok && len(hp) > 0 && hp[0] == "no-cache" { + if _, presentcc := header["Cache-Control"]; !presentcc { + header["Cache-Control"] = []string{"no-cache"} + } + } +} + +// ProtoAtLeast reports whether the HTTP protocol used +// in the response is at least major.minor. +func (r *Response) ProtoAtLeast(major, minor int) bool { + return r.ProtoMajor > major || + r.ProtoMajor == major && r.ProtoMinor >= minor +} + +// Writes the response (header, body and trailer) in wire format. This method +// consults the following fields of the response: +// +// StatusCode +// ProtoMajor +// ProtoMinor +// Request.Method +// TransferEncoding +// Trailer +// Body +// ContentLength +// Header, values for non-canonical keys will have unpredictable behavior +// +// Body is closed after it is sent. +func (r *Response) Write(w io.Writer) error { + // Status line + text := r.Status + if text == "" { + var ok bool + text, ok = statusText[r.StatusCode] + if !ok { + text = "status code " + strconv.Itoa(r.StatusCode) + } + } + protoMajor, protoMinor := strconv.Itoa(r.ProtoMajor), strconv.Itoa(r.ProtoMinor) + statusCode := strconv.Itoa(r.StatusCode) + " " + text = strings.TrimPrefix(text, statusCode) + if _, err := io.WriteString(w, "HTTP/"+protoMajor+"."+protoMinor+" "+statusCode+text+"\r\n"); err != nil { + return err + } + + // Clone it, so we can modify r1 as needed. + r1 := new(Response) + *r1 = *r + if r1.ContentLength == 0 && r1.Body != nil { + // Is it actually 0 length? Or just unknown? + var buf [1]byte + n, err := r1.Body.Read(buf[:]) + if err != nil && err != io.EOF { + return err + } + if n == 0 { + // Reset it to a known zero reader, in case underlying one + // is unhappy being read repeatedly. + r1.Body = eofReader + } else { + r1.ContentLength = -1 + r1.Body = struct { + io.Reader + io.Closer + }{ + io.MultiReader(bytes.NewReader(buf[:1]), r.Body), + r.Body, + } + } + } + // If we're sending a non-chunked HTTP/1.1 response without a + // content-length, the only way to do that is the old HTTP/1.0 + // way, by noting the EOF with a connection close, so we need + // to set Close. + if r1.ContentLength == -1 && !r1.Close && r1.ProtoAtLeast(1, 1) && !chunked(r1.TransferEncoding) { + r1.Close = true + } + + // Process Body,ContentLength,Close,Trailer + tw, err := newTransferWriter(r1) + if err != nil { + return err + } + err = tw.WriteHeader(w) + if err != nil { + return err + } + + // Rest of header + err = r.Header.WriteSubset(w, respExcludeHeader) + if err != nil { + return err + } + + // contentLengthAlreadySent may have been already sent for + // POST/PUT requests, even if zero length. See Issue 8180. + contentLengthAlreadySent := tw.shouldSendContentLength() + if r1.ContentLength == 0 && !chunked(r1.TransferEncoding) && !contentLengthAlreadySent { + if _, err := io.WriteString(w, "Content-Length: 0\r\n"); err != nil { + return err + } + } + + // End-of-header + if _, err := io.WriteString(w, "\r\n"); err != nil { + return err + } + + // Write body and trailer + err = tw.WriteBody(w) + if err != nil { + return err + } + + // Success + return nil +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/response_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/response_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/response_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,645 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/rand" + "fmt" + "io" + "io/ioutil" + "net/url" + "reflect" + "regexp" + "strings" + "testing" +) + +type respTest struct { + Raw string + Resp Response + Body string +} + +func dummyReq(method string) *Request { + return &Request{Method: method} +} + +func dummyReq11(method string) *Request { + return &Request{Method: method, Proto: "HTTP/1.1", ProtoMajor: 1, ProtoMinor: 1} +} + +var respTests = []respTest{ + // Unchunked response without Content-Length. + { + "HTTP/1.0 200 OK\r\n" + + "Connection: close\r\n" + + "\r\n" + + "Body here\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("GET"), + Header: Header{ + "Connection": {"close"}, // TODO(rsc): Delete? + }, + Close: true, + ContentLength: -1, + }, + + "Body here\n", + }, + + // Unchunked HTTP/1.1 response without Content-Length or + // Connection headers. + { + "HTTP/1.1 200 OK\r\n" + + "\r\n" + + "Body here\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Request: dummyReq("GET"), + Close: true, + ContentLength: -1, + }, + + "Body here\n", + }, + + // Unchunked HTTP/1.1 204 response without Content-Length. + { + "HTTP/1.1 204 No Content\r\n" + + "\r\n" + + "Body should not be read!\n", + + Response{ + Status: "204 No Content", + StatusCode: 204, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: Header{}, + Request: dummyReq("GET"), + Close: false, + ContentLength: 0, + }, + + "", + }, + + // Unchunked response with Content-Length. + { + "HTTP/1.0 200 OK\r\n" + + "Content-Length: 10\r\n" + + "Connection: close\r\n" + + "\r\n" + + "Body here\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("GET"), + Header: Header{ + "Connection": {"close"}, + "Content-Length": {"10"}, + }, + Close: true, + ContentLength: 10, + }, + + "Body here\n", + }, + + // Chunked response without Content-Length. + { + "HTTP/1.1 200 OK\r\n" + + "Transfer-Encoding: chunked\r\n" + + "\r\n" + + "0a\r\n" + + "Body here\n\r\n" + + "09\r\n" + + "continued\r\n" + + "0\r\n" + + "\r\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("GET"), + Header: Header{}, + Close: false, + ContentLength: -1, + TransferEncoding: []string{"chunked"}, + }, + + "Body here\ncontinued", + }, + + // Chunked response with Content-Length. + { + "HTTP/1.1 200 OK\r\n" + + "Transfer-Encoding: chunked\r\n" + + "Content-Length: 10\r\n" + + "\r\n" + + "0a\r\n" + + "Body here\n\r\n" + + "0\r\n" + + "\r\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("GET"), + Header: Header{}, + Close: false, + ContentLength: -1, + TransferEncoding: []string{"chunked"}, + }, + + "Body here\n", + }, + + // Chunked response in response to a HEAD request + { + "HTTP/1.1 200 OK\r\n" + + "Transfer-Encoding: chunked\r\n" + + "\r\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("HEAD"), + Header: Header{}, + TransferEncoding: []string{"chunked"}, + Close: false, + ContentLength: -1, + }, + + "", + }, + + // Content-Length in response to a HEAD request + { + "HTTP/1.0 200 OK\r\n" + + "Content-Length: 256\r\n" + + "\r\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("HEAD"), + Header: Header{"Content-Length": {"256"}}, + TransferEncoding: nil, + Close: true, + ContentLength: 256, + }, + + "", + }, + + // Content-Length in response to a HEAD request with HTTP/1.1 + { + "HTTP/1.1 200 OK\r\n" + + "Content-Length: 256\r\n" + + "\r\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("HEAD"), + Header: Header{"Content-Length": {"256"}}, + TransferEncoding: nil, + Close: false, + ContentLength: 256, + }, + + "", + }, + + // No Content-Length or Chunked in response to a HEAD request + { + "HTTP/1.0 200 OK\r\n" + + "\r\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("HEAD"), + Header: Header{}, + TransferEncoding: nil, + Close: true, + ContentLength: -1, + }, + + "", + }, + + // explicit Content-Length of 0. + { + "HTTP/1.1 200 OK\r\n" + + "Content-Length: 0\r\n" + + "\r\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("GET"), + Header: Header{ + "Content-Length": {"0"}, + }, + Close: false, + ContentLength: 0, + }, + + "", + }, + + // Status line without a Reason-Phrase, but trailing space. + // (permitted by RFC 2616) + { + "HTTP/1.0 303 \r\n\r\n", + Response{ + Status: "303 ", + StatusCode: 303, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("GET"), + Header: Header{}, + Close: true, + ContentLength: -1, + }, + + "", + }, + + // Status line without a Reason-Phrase, and no trailing space. + // (not permitted by RFC 2616, but we'll accept it anyway) + { + "HTTP/1.0 303\r\n\r\n", + Response{ + Status: "303 ", + StatusCode: 303, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("GET"), + Header: Header{}, + Close: true, + ContentLength: -1, + }, + + "", + }, + + // golang.org/issue/4767: don't special-case multipart/byteranges responses + { + `HTTP/1.1 206 Partial Content +Connection: close +Content-Type: multipart/byteranges; boundary=18a75608c8f47cef + +some body`, + Response{ + Status: "206 Partial Content", + StatusCode: 206, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("GET"), + Header: Header{ + "Content-Type": []string{"multipart/byteranges; boundary=18a75608c8f47cef"}, + }, + Close: true, + ContentLength: -1, + }, + + "some body", + }, + + // Unchunked response without Content-Length, Request is nil + { + "HTTP/1.0 200 OK\r\n" + + "Connection: close\r\n" + + "\r\n" + + "Body here\n", + + Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Header: Header{ + "Connection": {"close"}, // TODO(rsc): Delete? + }, + Close: true, + ContentLength: -1, + }, + + "Body here\n", + }, +} + +func TestReadResponse(t *testing.T) { + for i, tt := range respTests { + resp, err := ReadResponse(bufio.NewReader(strings.NewReader(tt.Raw)), tt.Resp.Request) + if err != nil { + t.Errorf("#%d: %v", i, err) + continue + } + rbody := resp.Body + resp.Body = nil + diff(t, fmt.Sprintf("#%d Response", i), resp, &tt.Resp) + var bout bytes.Buffer + if rbody != nil { + _, err = io.Copy(&bout, rbody) + if err != nil { + t.Errorf("#%d: %v", i, err) + continue + } + rbody.Close() + } + body := bout.String() + if body != tt.Body { + t.Errorf("#%d: Body = %q want %q", i, body, tt.Body) + } + } +} + +func TestWriteResponse(t *testing.T) { + for i, tt := range respTests { + resp, err := ReadResponse(bufio.NewReader(strings.NewReader(tt.Raw)), tt.Resp.Request) + if err != nil { + t.Errorf("#%d: %v", i, err) + continue + } + err = resp.Write(ioutil.Discard) + if err != nil { + t.Errorf("#%d: %v", i, err) + continue + } + } +} + +var readResponseCloseInMiddleTests = []struct { + chunked, compressed bool +}{ + {false, false}, + {true, false}, + {true, true}, +} + +// TestReadResponseCloseInMiddle tests that closing a body after +// reading only part of its contents advances the read to the end of +// the request, right up until the next request. +func TestReadResponseCloseInMiddle(t *testing.T) { + for _, test := range readResponseCloseInMiddleTests { + fatalf := func(format string, args ...interface{}) { + args = append([]interface{}{test.chunked, test.compressed}, args...) + t.Fatalf("on test chunked=%v, compressed=%v: "+format, args...) + } + checkErr := func(err error, msg string) { + if err == nil { + return + } + fatalf(msg+": %v", err) + } + var buf bytes.Buffer + buf.WriteString("HTTP/1.1 200 OK\r\n") + if test.chunked { + buf.WriteString("Transfer-Encoding: chunked\r\n") + } else { + buf.WriteString("Content-Length: 1000000\r\n") + } + var wr io.Writer = &buf + if test.chunked { + wr = newChunkedWriter(wr) + } + if test.compressed { + buf.WriteString("Content-Encoding: gzip\r\n") + wr = gzip.NewWriter(wr) + } + buf.WriteString("\r\n") + + chunk := bytes.Repeat([]byte{'x'}, 1000) + for i := 0; i < 1000; i++ { + if test.compressed { + // Otherwise this compresses too well. + _, err := io.ReadFull(rand.Reader, chunk) + checkErr(err, "rand.Reader ReadFull") + } + wr.Write(chunk) + } + if test.compressed { + err := wr.(*gzip.Writer).Close() + checkErr(err, "compressor close") + } + if test.chunked { + buf.WriteString("0\r\n\r\n") + } + buf.WriteString("Next Request Here") + + bufr := bufio.NewReader(&buf) + resp, err := ReadResponse(bufr, dummyReq("GET")) + checkErr(err, "ReadResponse") + expectedLength := int64(-1) + if !test.chunked { + expectedLength = 1000000 + } + if resp.ContentLength != expectedLength { + fatalf("expected response length %d, got %d", expectedLength, resp.ContentLength) + } + if resp.Body == nil { + fatalf("nil body") + } + if test.compressed { + gzReader, err := gzip.NewReader(resp.Body) + checkErr(err, "gzip.NewReader") + resp.Body = &readerAndCloser{gzReader, resp.Body} + } + + rbuf := make([]byte, 2500) + n, err := io.ReadFull(resp.Body, rbuf) + checkErr(err, "2500 byte ReadFull") + if n != 2500 { + fatalf("ReadFull only read %d bytes", n) + } + if test.compressed == false && !bytes.Equal(bytes.Repeat([]byte{'x'}, 2500), rbuf) { + fatalf("ReadFull didn't read 2500 'x'; got %q", string(rbuf)) + } + resp.Body.Close() + + rest, err := ioutil.ReadAll(bufr) + checkErr(err, "ReadAll on remainder") + if e, g := "Next Request Here", string(rest); e != g { + g = regexp.MustCompile(`(xx+)`).ReplaceAllStringFunc(g, func(match string) string { + return fmt.Sprintf("x(repeated x%d)", len(match)) + }) + fatalf("remainder = %q, expected %q", g, e) + } + } +} + +func diff(t *testing.T, prefix string, have, want interface{}) { + hv := reflect.ValueOf(have).Elem() + wv := reflect.ValueOf(want).Elem() + if hv.Type() != wv.Type() { + t.Errorf("%s: type mismatch %v want %v", prefix, hv.Type(), wv.Type()) + } + for i := 0; i < hv.NumField(); i++ { + hf := hv.Field(i).Interface() + wf := wv.Field(i).Interface() + if !reflect.DeepEqual(hf, wf) { + t.Errorf("%s: %s = %v want %v", prefix, hv.Type().Field(i).Name, hf, wf) + } + } +} + +type responseLocationTest struct { + location string // Response's Location header or "" + requrl string // Response.Request.URL or "" + want string + wantErr error +} + +var responseLocationTests = []responseLocationTest{ + {"/foo", "http://bar.com/baz", "http://bar.com/foo", nil}, + {"http://foo.com/", "http://bar.com/baz", "http://foo.com/", nil}, + {"", "http://bar.com/baz", "", ErrNoLocation}, +} + +func TestLocationResponse(t *testing.T) { + for i, tt := range responseLocationTests { + res := new(Response) + res.Header = make(Header) + res.Header.Set("Location", tt.location) + if tt.requrl != "" { + res.Request = &Request{} + var err error + res.Request.URL, err = url.Parse(tt.requrl) + if err != nil { + t.Fatalf("bad test URL %q: %v", tt.requrl, err) + } + } + + got, err := res.Location() + if tt.wantErr != nil { + if err == nil { + t.Errorf("%d. err=nil; want %q", i, tt.wantErr) + continue + } + if g, e := err.Error(), tt.wantErr.Error(); g != e { + t.Errorf("%d. err=%q; want %q", i, g, e) + continue + } + continue + } + if err != nil { + t.Errorf("%d. err=%q", i, err) + continue + } + if g, e := got.String(), tt.want; g != e { + t.Errorf("%d. Location=%q; want %q", i, g, e) + } + } +} + +func TestResponseStatusStutter(t *testing.T) { + r := &Response{ + Status: "123 some status", + StatusCode: 123, + ProtoMajor: 1, + ProtoMinor: 3, + } + var buf bytes.Buffer + r.Write(&buf) + if strings.Contains(buf.String(), "123 123") { + t.Errorf("stutter in status: %s", buf.String()) + } +} + +func TestResponseContentLengthShortBody(t *testing.T) { + const shortBody = "Short body, not 123 bytes." + br := bufio.NewReader(strings.NewReader("HTTP/1.1 200 OK\r\n" + + "Content-Length: 123\r\n" + + "\r\n" + + shortBody)) + res, err := ReadResponse(br, &Request{Method: "GET"}) + if err != nil { + t.Fatal(err) + } + if res.ContentLength != 123 { + t.Fatalf("Content-Length = %d; want 123", res.ContentLength) + } + var buf bytes.Buffer + n, err := io.Copy(&buf, res.Body) + if n != int64(len(shortBody)) { + t.Errorf("Copied %d bytes; want %d, len(%q)", n, len(shortBody), shortBody) + } + if buf.String() != shortBody { + t.Errorf("Read body %q; want %q", buf.String(), shortBody) + } + if err != io.ErrUnexpectedEOF { + t.Errorf("io.Copy error = %#v; want io.ErrUnexpectedEOF", err) + } +} + +func TestReadResponseUnexpectedEOF(t *testing.T) { + br := bufio.NewReader(strings.NewReader("HTTP/1.1 301 Moved Permanently\r\n" + + "Location: http://example.com")) + _, err := ReadResponse(br, nil) + if err != io.ErrUnexpectedEOF { + t.Errorf("ReadResponse = %v; want io.ErrUnexpectedEOF", err) + } +} + +func TestNeedsSniff(t *testing.T) { + // needsSniff returns true with an empty response. + r := &response{} + if got, want := r.needsSniff(), true; got != want { + t.Errorf("needsSniff = %t; want %t", got, want) + } + // needsSniff returns false when Content-Type = nil. + r.handlerHeader = Header{"Content-Type": nil} + if got, want := r.needsSniff(), false; got != want { + t.Errorf("needsSniff empty Content-Type = %t; want %t", got, want) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/responsewrite_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/responsewrite_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/responsewrite_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,226 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bytes" + "io/ioutil" + "strings" + "testing" +) + +type respWriteTest struct { + Resp Response + Raw string +} + +func TestResponseWrite(t *testing.T) { + respWriteTests := []respWriteTest{ + // HTTP/1.0, identity coding; no trailer + { + Response{ + StatusCode: 503, + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("GET"), + Header: Header{}, + Body: ioutil.NopCloser(strings.NewReader("abcdef")), + ContentLength: 6, + }, + + "HTTP/1.0 503 Service Unavailable\r\n" + + "Content-Length: 6\r\n\r\n" + + "abcdef", + }, + // Unchunked response without Content-Length. + { + Response{ + StatusCode: 200, + ProtoMajor: 1, + ProtoMinor: 0, + Request: dummyReq("GET"), + Header: Header{}, + Body: ioutil.NopCloser(strings.NewReader("abcdef")), + ContentLength: -1, + }, + "HTTP/1.0 200 OK\r\n" + + "\r\n" + + "abcdef", + }, + // HTTP/1.1 response with unknown length and Connection: close + { + Response{ + StatusCode: 200, + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("GET"), + Header: Header{}, + Body: ioutil.NopCloser(strings.NewReader("abcdef")), + ContentLength: -1, + Close: true, + }, + "HTTP/1.1 200 OK\r\n" + + "Connection: close\r\n" + + "\r\n" + + "abcdef", + }, + // HTTP/1.1 response with unknown length and not setting connection: close + { + Response{ + StatusCode: 200, + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq11("GET"), + Header: Header{}, + Body: ioutil.NopCloser(strings.NewReader("abcdef")), + ContentLength: -1, + Close: false, + }, + "HTTP/1.1 200 OK\r\n" + + "Connection: close\r\n" + + "\r\n" + + "abcdef", + }, + // HTTP/1.1 response with unknown length and not setting connection: close, but + // setting chunked. + { + Response{ + StatusCode: 200, + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq11("GET"), + Header: Header{}, + Body: ioutil.NopCloser(strings.NewReader("abcdef")), + ContentLength: -1, + TransferEncoding: []string{"chunked"}, + Close: false, + }, + "HTTP/1.1 200 OK\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + "6\r\nabcdef\r\n0\r\n\r\n", + }, + // HTTP/1.1 response 0 content-length, and nil body + { + Response{ + StatusCode: 200, + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq11("GET"), + Header: Header{}, + Body: nil, + ContentLength: 0, + Close: false, + }, + "HTTP/1.1 200 OK\r\n" + + "Content-Length: 0\r\n" + + "\r\n", + }, + // HTTP/1.1 response 0 content-length, and non-nil empty body + { + Response{ + StatusCode: 200, + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq11("GET"), + Header: Header{}, + Body: ioutil.NopCloser(strings.NewReader("")), + ContentLength: 0, + Close: false, + }, + "HTTP/1.1 200 OK\r\n" + + "Content-Length: 0\r\n" + + "\r\n", + }, + // HTTP/1.1 response 0 content-length, and non-nil non-empty body + { + Response{ + StatusCode: 200, + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq11("GET"), + Header: Header{}, + Body: ioutil.NopCloser(strings.NewReader("foo")), + ContentLength: 0, + Close: false, + }, + "HTTP/1.1 200 OK\r\n" + + "Connection: close\r\n" + + "\r\nfoo", + }, + // HTTP/1.1, chunked coding; empty trailer; close + { + Response{ + StatusCode: 200, + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("GET"), + Header: Header{}, + Body: ioutil.NopCloser(strings.NewReader("abcdef")), + ContentLength: 6, + TransferEncoding: []string{"chunked"}, + Close: true, + }, + + "HTTP/1.1 200 OK\r\n" + + "Connection: close\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + "6\r\nabcdef\r\n0\r\n\r\n", + }, + + // Header value with a newline character (Issue 914). + // Also tests removal of leading and trailing whitespace. + { + Response{ + StatusCode: 204, + ProtoMajor: 1, + ProtoMinor: 1, + Request: dummyReq("GET"), + Header: Header{ + "Foo": []string{" Bar\nBaz "}, + }, + Body: nil, + ContentLength: 0, + TransferEncoding: []string{"chunked"}, + Close: true, + }, + + "HTTP/1.1 204 No Content\r\n" + + "Connection: close\r\n" + + "Foo: Bar Baz\r\n" + + "\r\n", + }, + + // Want a single Content-Length header. Fixing issue 8180 where + // there were two. + { + Response{ + StatusCode: StatusOK, + ProtoMajor: 1, + ProtoMinor: 1, + Request: &Request{Method: "POST"}, + Header: Header{}, + ContentLength: 0, + TransferEncoding: nil, + Body: nil, + }, + "HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n", + }, + } + + for i := range respWriteTests { + tt := &respWriteTests[i] + var braw bytes.Buffer + err := tt.Resp.Write(&braw) + if err != nil { + t.Errorf("error writing #%d: %s", i, err) + continue + } + sraw := braw.String() + if sraw != tt.Raw { + t.Errorf("Test %d, expecting:\n%q\nGot:\n%q\n", i, tt.Raw, sraw) + continue + } + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/serve_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/serve_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/serve_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2848 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// End-to-end serving tests + +package http_test + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + . "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "os" + "os/exec" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "testing" + "time" +) + +type dummyAddr string +type oneConnListener struct { + conn net.Conn +} + +func (l *oneConnListener) Accept() (c net.Conn, err error) { + c = l.conn + if c == nil { + err = io.EOF + return + } + err = nil + l.conn = nil + return +} + +func (l *oneConnListener) Close() error { + return nil +} + +func (l *oneConnListener) Addr() net.Addr { + return dummyAddr("test-address") +} + +func (a dummyAddr) Network() string { + return string(a) +} + +func (a dummyAddr) String() string { + return string(a) +} + +type noopConn struct{} + +func (noopConn) LocalAddr() net.Addr { return dummyAddr("local-addr") } +func (noopConn) RemoteAddr() net.Addr { return dummyAddr("remote-addr") } +func (noopConn) SetDeadline(t time.Time) error { return nil } +func (noopConn) SetReadDeadline(t time.Time) error { return nil } +func (noopConn) SetWriteDeadline(t time.Time) error { return nil } + +type rwTestConn struct { + io.Reader + io.Writer + noopConn + + closeFunc func() error // called if non-nil + closec chan bool // else, if non-nil, send value to it on close +} + +func (c *rwTestConn) Close() error { + if c.closeFunc != nil { + return c.closeFunc() + } + select { + case c.closec <- true: + default: + } + return nil +} + +type testConn struct { + readBuf bytes.Buffer + writeBuf bytes.Buffer + closec chan bool // if non-nil, send value to it on close + noopConn +} + +func (c *testConn) Read(b []byte) (int, error) { + return c.readBuf.Read(b) +} + +func (c *testConn) Write(b []byte) (int, error) { + return c.writeBuf.Write(b) +} + +func (c *testConn) Close() error { + select { + case c.closec <- true: + default: + } + return nil +} + +// reqBytes treats req as a request (with \n delimiters) and returns it with \r\n delimiters, +// ending in \r\n\r\n +func reqBytes(req string) []byte { + return []byte(strings.Replace(strings.TrimSpace(req), "\n", "\r\n", -1) + "\r\n\r\n") +} + +type handlerTest struct { + handler Handler +} + +func newHandlerTest(h Handler) handlerTest { + return handlerTest{h} +} + +func (ht handlerTest) rawResponse(req string) string { + reqb := reqBytes(req) + var output bytes.Buffer + conn := &rwTestConn{ + Reader: bytes.NewReader(reqb), + Writer: &output, + closec: make(chan bool, 1), + } + ln := &oneConnListener{conn: conn} + go Serve(ln, ht.handler) + <-conn.closec + return output.String() +} + +func TestConsumingBodyOnNextConn(t *testing.T) { + conn := new(testConn) + for i := 0; i < 2; i++ { + conn.readBuf.Write([]byte( + "POST / HTTP/1.1\r\n" + + "Host: test\r\n" + + "Content-Length: 11\r\n" + + "\r\n" + + "foo=1&bar=1")) + } + + reqNum := 0 + ch := make(chan *Request) + servech := make(chan error) + listener := &oneConnListener{conn} + handler := func(res ResponseWriter, req *Request) { + reqNum++ + ch <- req + } + + go func() { + servech <- Serve(listener, HandlerFunc(handler)) + }() + + var req *Request + req = <-ch + if req == nil { + t.Fatal("Got nil first request.") + } + if req.Method != "POST" { + t.Errorf("For request #1's method, got %q; expected %q", + req.Method, "POST") + } + + req = <-ch + if req == nil { + t.Fatal("Got nil first request.") + } + if req.Method != "POST" { + t.Errorf("For request #2's method, got %q; expected %q", + req.Method, "POST") + } + + if serveerr := <-servech; serveerr != io.EOF { + t.Errorf("Serve returned %q; expected EOF", serveerr) + } +} + +type stringHandler string + +func (s stringHandler) ServeHTTP(w ResponseWriter, r *Request) { + w.Header().Set("Result", string(s)) +} + +var handlers = []struct { + pattern string + msg string +}{ + {"/", "Default"}, + {"/someDir/", "someDir"}, + {"someHost.com/someDir/", "someHost.com/someDir"}, +} + +var vtests = []struct { + url string + expected string +}{ + {"http://localhost/someDir/apage", "someDir"}, + {"http://localhost/otherDir/apage", "Default"}, + {"http://someHost.com/someDir/apage", "someHost.com/someDir"}, + {"http://otherHost.com/someDir/apage", "someDir"}, + {"http://otherHost.com/aDir/apage", "Default"}, + // redirections for trees + {"http://localhost/someDir", "/someDir/"}, + {"http://someHost.com/someDir", "/someDir/"}, +} + +func TestHostHandlers(t *testing.T) { + defer afterTest(t) + mux := NewServeMux() + for _, h := range handlers { + mux.Handle(h.pattern, stringHandler(h.msg)) + } + ts := httptest.NewServer(mux) + defer ts.Close() + + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + cc := httputil.NewClientConn(conn, nil) + for _, vt := range vtests { + var r *Response + var req Request + if req.URL, err = url.Parse(vt.url); err != nil { + t.Errorf("cannot parse url: %v", err) + continue + } + if err := cc.Write(&req); err != nil { + t.Errorf("writing request: %v", err) + continue + } + r, err := cc.Read(&req) + if err != nil { + t.Errorf("reading response: %v", err) + continue + } + switch r.StatusCode { + case StatusOK: + s := r.Header.Get("Result") + if s != vt.expected { + t.Errorf("Get(%q) = %q, want %q", vt.url, s, vt.expected) + } + case StatusMovedPermanently: + s := r.Header.Get("Location") + if s != vt.expected { + t.Errorf("Get(%q) = %q, want %q", vt.url, s, vt.expected) + } + default: + t.Errorf("Get(%q) unhandled status code %d", vt.url, r.StatusCode) + } + } +} + +var serveMuxRegister = []struct { + pattern string + h Handler +}{ + {"/dir/", serve(200)}, + {"/search", serve(201)}, + {"codesearch.google.com/search", serve(202)}, + {"codesearch.google.com/", serve(203)}, + {"example.com/", HandlerFunc(checkQueryStringHandler)}, +} + +// serve returns a handler that sends a response with the given code. +func serve(code int) HandlerFunc { + return func(w ResponseWriter, r *Request) { + w.WriteHeader(code) + } +} + +// checkQueryStringHandler checks if r.URL.RawQuery has the same value +// as the URL excluding the scheme and the query string and sends 200 +// response code if it is, 500 otherwise. +func checkQueryStringHandler(w ResponseWriter, r *Request) { + u := *r.URL + u.Scheme = "http" + u.Host = r.Host + u.RawQuery = "" + if "http://"+r.URL.RawQuery == u.String() { + w.WriteHeader(200) + } else { + w.WriteHeader(500) + } +} + +var serveMuxTests = []struct { + method string + host string + path string + code int + pattern string +}{ + {"GET", "google.com", "/", 404, ""}, + {"GET", "google.com", "/dir", 301, "/dir/"}, + {"GET", "google.com", "/dir/", 200, "/dir/"}, + {"GET", "google.com", "/dir/file", 200, "/dir/"}, + {"GET", "google.com", "/search", 201, "/search"}, + {"GET", "google.com", "/search/", 404, ""}, + {"GET", "google.com", "/search/foo", 404, ""}, + {"GET", "codesearch.google.com", "/search", 202, "codesearch.google.com/search"}, + {"GET", "codesearch.google.com", "/search/", 203, "codesearch.google.com/"}, + {"GET", "codesearch.google.com", "/search/foo", 203, "codesearch.google.com/"}, + {"GET", "codesearch.google.com", "/", 203, "codesearch.google.com/"}, + {"GET", "images.google.com", "/search", 201, "/search"}, + {"GET", "images.google.com", "/search/", 404, ""}, + {"GET", "images.google.com", "/search/foo", 404, ""}, + {"GET", "google.com", "/../search", 301, "/search"}, + {"GET", "google.com", "/dir/..", 301, ""}, + {"GET", "google.com", "/dir/..", 301, ""}, + {"GET", "google.com", "/dir/./file", 301, "/dir/"}, + + // The /foo -> /foo/ redirect applies to CONNECT requests + // but the path canonicalization does not. + {"CONNECT", "google.com", "/dir", 301, "/dir/"}, + {"CONNECT", "google.com", "/../search", 404, ""}, + {"CONNECT", "google.com", "/dir/..", 200, "/dir/"}, + {"CONNECT", "google.com", "/dir/..", 200, "/dir/"}, + {"CONNECT", "google.com", "/dir/./file", 200, "/dir/"}, +} + +func TestServeMuxHandler(t *testing.T) { + mux := NewServeMux() + for _, e := range serveMuxRegister { + mux.Handle(e.pattern, e.h) + } + + for _, tt := range serveMuxTests { + r := &Request{ + Method: tt.method, + Host: tt.host, + URL: &url.URL{ + Path: tt.path, + }, + } + h, pattern := mux.Handler(r) + rr := httptest.NewRecorder() + h.ServeHTTP(rr, r) + if pattern != tt.pattern || rr.Code != tt.code { + t.Errorf("%s %s %s = %d, %q, want %d, %q", tt.method, tt.host, tt.path, rr.Code, pattern, tt.code, tt.pattern) + } + } +} + +var serveMuxTests2 = []struct { + method string + host string + url string + code int + redirOk bool +}{ + {"GET", "google.com", "/", 404, false}, + {"GET", "example.com", "/test/?example.com/test/", 200, false}, + {"GET", "example.com", "test/?example.com/test/", 200, true}, +} + +// TestServeMuxHandlerRedirects tests that automatic redirects generated by +// mux.Handler() shouldn't clear the request's query string. +func TestServeMuxHandlerRedirects(t *testing.T) { + mux := NewServeMux() + for _, e := range serveMuxRegister { + mux.Handle(e.pattern, e.h) + } + + for _, tt := range serveMuxTests2 { + tries := 1 + turl := tt.url + for tries > 0 { + u, e := url.Parse(turl) + if e != nil { + t.Fatal(e) + } + r := &Request{ + Method: tt.method, + Host: tt.host, + URL: u, + } + h, _ := mux.Handler(r) + rr := httptest.NewRecorder() + h.ServeHTTP(rr, r) + if rr.Code != 301 { + if rr.Code != tt.code { + t.Errorf("%s %s %s = %d, want %d", tt.method, tt.host, tt.url, rr.Code, tt.code) + } + break + } + if !tt.redirOk { + t.Errorf("%s %s %s, unexpected redirect", tt.method, tt.host, tt.url) + break + } + turl = rr.HeaderMap.Get("Location") + tries-- + } + if tries < 0 { + t.Errorf("%s %s %s, too many redirects", tt.method, tt.host, tt.url) + } + } +} + +// Tests for http://code.google.com/p/go/issues/detail?id=900 +func TestMuxRedirectLeadingSlashes(t *testing.T) { + paths := []string{"//foo.txt", "///foo.txt", "/../../foo.txt"} + for _, path := range paths { + req, err := ReadRequest(bufio.NewReader(strings.NewReader("GET " + path + " HTTP/1.1\r\nHost: test\r\n\r\n"))) + if err != nil { + t.Errorf("%s", err) + } + mux := NewServeMux() + resp := httptest.NewRecorder() + + mux.ServeHTTP(resp, req) + + if loc, expected := resp.Header().Get("Location"), "/foo.txt"; loc != expected { + t.Errorf("Expected Location header set to %q; got %q", expected, loc) + return + } + + if code, expected := resp.Code, StatusMovedPermanently; code != expected { + t.Errorf("Expected response code of StatusMovedPermanently; got %d", code) + return + } + } +} + +func TestServerTimeouts(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7237") + } + defer afterTest(t) + reqNum := 0 + ts := httptest.NewUnstartedServer(HandlerFunc(func(res ResponseWriter, req *Request) { + reqNum++ + fmt.Fprintf(res, "req=%d", reqNum) + })) + ts.Config.ReadTimeout = 250 * time.Millisecond + ts.Config.WriteTimeout = 250 * time.Millisecond + ts.Start() + defer ts.Close() + + // Hit the HTTP server successfully. + tr := &Transport{DisableKeepAlives: true} // they interfere with this test + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + r, err := c.Get(ts.URL) + if err != nil { + t.Fatalf("http Get #1: %v", err) + } + got, _ := ioutil.ReadAll(r.Body) + expected := "req=1" + if string(got) != expected { + t.Errorf("Unexpected response for request #1; got %q; expected %q", + string(got), expected) + } + + // Slow client that should timeout. + t1 := time.Now() + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("Dial: %v", err) + } + buf := make([]byte, 1) + n, err := conn.Read(buf) + latency := time.Since(t1) + if n != 0 || err != io.EOF { + t.Errorf("Read = %v, %v, wanted %v, %v", n, err, 0, io.EOF) + } + if latency < 200*time.Millisecond /* fudge from 250 ms above */ { + t.Errorf("got EOF after %s, want >= %s", latency, 200*time.Millisecond) + } + + // Hit the HTTP server successfully again, verifying that the + // previous slow connection didn't run our handler. (that we + // get "req=2", not "req=3") + r, err = Get(ts.URL) + if err != nil { + t.Fatalf("http Get #2: %v", err) + } + got, _ = ioutil.ReadAll(r.Body) + expected = "req=2" + if string(got) != expected { + t.Errorf("Get #2 got %q, want %q", string(got), expected) + } + + if !testing.Short() { + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer conn.Close() + go io.Copy(ioutil.Discard, conn) + for i := 0; i < 5; i++ { + _, err := conn.Write([]byte("GET / HTTP/1.1\r\nHost: foo\r\n\r\n")) + if err != nil { + t.Fatalf("on write %d: %v", i, err) + } + time.Sleep(ts.Config.ReadTimeout / 2) + } + } +} + +// golang.org/issue/4741 -- setting only a write timeout that triggers +// shouldn't cause a handler to block forever on reads (next HTTP +// request) that will never happen. +func TestOnlyWriteTimeout(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7237") + } + defer afterTest(t) + var conn net.Conn + var afterTimeoutErrc = make(chan error, 1) + ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, req *Request) { + buf := make([]byte, 512<<10) + _, err := w.Write(buf) + if err != nil { + t.Errorf("handler Write error: %v", err) + return + } + conn.SetWriteDeadline(time.Now().Add(-30 * time.Second)) + _, err = w.Write(buf) + afterTimeoutErrc <- err + })) + ts.Listener = trackLastConnListener{ts.Listener, &conn} + ts.Start() + defer ts.Close() + + tr := &Transport{DisableKeepAlives: false} + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + + errc := make(chan error) + go func() { + res, err := c.Get(ts.URL) + if err != nil { + errc <- err + return + } + _, err = io.Copy(ioutil.Discard, res.Body) + errc <- err + }() + select { + case err := <-errc: + if err == nil { + t.Errorf("expected an error from Get request") + } + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for Get error") + } + if err := <-afterTimeoutErrc; err == nil { + t.Error("expected write error after timeout") + } +} + +// trackLastConnListener tracks the last net.Conn that was accepted. +type trackLastConnListener struct { + net.Listener + last *net.Conn // destination +} + +func (l trackLastConnListener) Accept() (c net.Conn, err error) { + c, err = l.Listener.Accept() + *l.last = c + return +} + +// TestIdentityResponse verifies that a handler can unset +func TestIdentityResponse(t *testing.T) { + defer afterTest(t) + handler := HandlerFunc(func(rw ResponseWriter, req *Request) { + rw.Header().Set("Content-Length", "3") + rw.Header().Set("Transfer-Encoding", req.FormValue("te")) + switch { + case req.FormValue("overwrite") == "1": + _, err := rw.Write([]byte("foo TOO LONG")) + if err != ErrContentLength { + t.Errorf("expected ErrContentLength; got %v", err) + } + case req.FormValue("underwrite") == "1": + rw.Header().Set("Content-Length", "500") + rw.Write([]byte("too short")) + default: + rw.Write([]byte("foo")) + } + }) + + ts := httptest.NewServer(handler) + defer ts.Close() + + // Note: this relies on the assumption (which is true) that + // Get sends HTTP/1.1 or greater requests. Otherwise the + // server wouldn't have the choice to send back chunked + // responses. + for _, te := range []string{"", "identity"} { + url := ts.URL + "/?te=" + te + res, err := Get(url) + if err != nil { + t.Fatalf("error with Get of %s: %v", url, err) + } + if cl, expected := res.ContentLength, int64(3); cl != expected { + t.Errorf("for %s expected res.ContentLength of %d; got %d", url, expected, cl) + } + if cl, expected := res.Header.Get("Content-Length"), "3"; cl != expected { + t.Errorf("for %s expected Content-Length header of %q; got %q", url, expected, cl) + } + if tl, expected := len(res.TransferEncoding), 0; tl != expected { + t.Errorf("for %s expected len(res.TransferEncoding) of %d; got %d (%v)", + url, expected, tl, res.TransferEncoding) + } + res.Body.Close() + } + + // Verify that ErrContentLength is returned + url := ts.URL + "/?overwrite=1" + res, err := Get(url) + if err != nil { + t.Fatalf("error with Get of %s: %v", url, err) + } + res.Body.Close() + + // Verify that the connection is closed when the declared Content-Length + // is larger than what the handler wrote. + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("error dialing: %v", err) + } + _, err = conn.Write([]byte("GET /?underwrite=1 HTTP/1.1\r\nHost: foo\r\n\r\n")) + if err != nil { + t.Fatalf("error writing: %v", err) + } + + // The ReadAll will hang for a failing test, so use a Timer to + // fail explicitly. + goTimeout(t, 2*time.Second, func() { + got, _ := ioutil.ReadAll(conn) + expectedSuffix := "\r\n\r\ntoo short" + if !strings.HasSuffix(string(got), expectedSuffix) { + t.Errorf("Expected output to end with %q; got response body %q", + expectedSuffix, string(got)) + } + }) +} + +func testTCPConnectionCloses(t *testing.T, req string, h Handler) { + defer afterTest(t) + s := httptest.NewServer(h) + defer s.Close() + + conn, err := net.Dial("tcp", s.Listener.Addr().String()) + if err != nil { + t.Fatal("dial error:", err) + } + defer conn.Close() + + _, err = fmt.Fprint(conn, req) + if err != nil { + t.Fatal("print error:", err) + } + + r := bufio.NewReader(conn) + res, err := ReadResponse(r, &Request{Method: "GET"}) + if err != nil { + t.Fatal("ReadResponse error:", err) + } + + didReadAll := make(chan bool, 1) + go func() { + select { + case <-time.After(5 * time.Second): + t.Error("body not closed after 5s") + return + case <-didReadAll: + } + }() + + _, err = ioutil.ReadAll(r) + if err != nil { + t.Fatal("read error:", err) + } + didReadAll <- true + + if !res.Close { + t.Errorf("Response.Close = false; want true") + } +} + +// TestServeHTTP10Close verifies that HTTP/1.0 requests won't be kept alive. +func TestServeHTTP10Close(t *testing.T) { + testTCPConnectionCloses(t, "GET / HTTP/1.0\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) { + ServeFile(w, r, "testdata/file") + })) +} + +// TestClientCanClose verifies that clients can also force a connection to close. +func TestClientCanClose(t *testing.T) { + testTCPConnectionCloses(t, "GET / HTTP/1.1\r\nConnection: close\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) { + // Nothing. + })) +} + +// TestHandlersCanSetConnectionClose verifies that handlers can force a connection to close, +// even for HTTP/1.1 requests. +func TestHandlersCanSetConnectionClose11(t *testing.T) { + testTCPConnectionCloses(t, "GET / HTTP/1.1\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Connection", "close") + })) +} + +func TestHandlersCanSetConnectionClose10(t *testing.T) { + testTCPConnectionCloses(t, "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n", HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Connection", "close") + })) +} + +func TestSetsRemoteAddr(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + fmt.Fprintf(w, "%s", r.RemoteAddr) + })) + defer ts.Close() + + res, err := Get(ts.URL) + if err != nil { + t.Fatalf("Get error: %v", err) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("ReadAll error: %v", err) + } + ip := string(body) + if !strings.HasPrefix(ip, "127.0.0.1:") && !strings.HasPrefix(ip, "[::1]:") { + t.Fatalf("Expected local addr; got %q", ip) + } +} + +func TestChunkedResponseHeaders(t *testing.T) { + defer afterTest(t) + log.SetOutput(ioutil.Discard) // is noisy otherwise + defer log.SetOutput(os.Stderr) + + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Length", "intentional gibberish") // we check that this is deleted + w.(Flusher).Flush() + fmt.Fprintf(w, "I am a chunked response.") + })) + defer ts.Close() + + res, err := Get(ts.URL) + if err != nil { + t.Fatalf("Get error: %v", err) + } + defer res.Body.Close() + if g, e := res.ContentLength, int64(-1); g != e { + t.Errorf("expected ContentLength of %d; got %d", e, g) + } + if g, e := res.TransferEncoding, []string{"chunked"}; !reflect.DeepEqual(g, e) { + t.Errorf("expected TransferEncoding of %v; got %v", e, g) + } + if _, haveCL := res.Header["Content-Length"]; haveCL { + t.Errorf("Unexpected Content-Length") + } +} + +// Test304Responses verifies that 304s don't declare that they're +// chunking in their response headers and aren't allowed to produce +// output. +func Test304Responses(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.WriteHeader(StatusNotModified) + _, err := w.Write([]byte("illegal body")) + if err != ErrBodyNotAllowed { + t.Errorf("on Write, expected ErrBodyNotAllowed, got %v", err) + } + })) + defer ts.Close() + res, err := Get(ts.URL) + if err != nil { + t.Error(err) + } + if len(res.TransferEncoding) > 0 { + t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Error(err) + } + if len(body) > 0 { + t.Errorf("got unexpected body %q", string(body)) + } +} + +// TestHeadResponses verifies that all MIME type sniffing and Content-Length +// counting of GET requests also happens on HEAD requests. +func TestHeadResponses(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + _, err := w.Write([]byte("")) + if err != nil { + t.Errorf("ResponseWriter.Write: %v", err) + } + + // Also exercise the ReaderFrom path + _, err = io.Copy(w, strings.NewReader("789a")) + if err != nil { + t.Errorf("Copy(ResponseWriter, ...): %v", err) + } + })) + defer ts.Close() + res, err := Head(ts.URL) + if err != nil { + t.Error(err) + } + if len(res.TransferEncoding) > 0 { + t.Errorf("expected no TransferEncoding; got %v", res.TransferEncoding) + } + if ct := res.Header.Get("Content-Type"); ct != "text/html; charset=utf-8" { + t.Errorf("Content-Type: %q; want text/html; charset=utf-8", ct) + } + if v := res.ContentLength; v != 10 { + t.Errorf("Content-Length: %d; want 10", v) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Error(err) + } + if len(body) > 0 { + t.Errorf("got unexpected body %q", string(body)) + } +} + +func TestTLSHandshakeTimeout(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7237") + } + defer afterTest(t) + ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {})) + errc := make(chanWriter, 10) // but only expecting 1 + ts.Config.ReadTimeout = 250 * time.Millisecond + ts.Config.ErrorLog = log.New(errc, "", 0) + ts.StartTLS() + defer ts.Close() + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer conn.Close() + goTimeout(t, 10*time.Second, func() { + var buf [1]byte + n, err := conn.Read(buf[:]) + if err == nil || n != 0 { + t.Errorf("Read = %d, %v; want an error and no bytes", n, err) + } + }) + select { + case v := <-errc: + if !strings.Contains(v, "timeout") && !strings.Contains(v, "TLS handshake") { + t.Errorf("expected a TLS handshake timeout error; got %q", v) + } + case <-time.After(5 * time.Second): + t.Errorf("timeout waiting for logged error") + } +} + +func TestTLSServer(t *testing.T) { + defer afterTest(t) + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.TLS != nil { + w.Header().Set("X-TLS-Set", "true") + if r.TLS.HandshakeComplete { + w.Header().Set("X-TLS-HandshakeComplete", "true") + } + } + })) + ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + defer ts.Close() + + // Connect an idle TCP connection to this server before we run + // our real tests. This idle connection used to block forever + // in the TLS handshake, preventing future connections from + // being accepted. It may prevent future accidental blocking + // in newConn. + idleConn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer idleConn.Close() + goTimeout(t, 10*time.Second, func() { + if !strings.HasPrefix(ts.URL, "https://") { + t.Errorf("expected test TLS server to start with https://, got %q", ts.URL) + return + } + noVerifyTransport := &Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + client := &Client{Transport: noVerifyTransport} + res, err := client.Get(ts.URL) + if err != nil { + t.Error(err) + return + } + if res == nil { + t.Errorf("got nil Response") + return + } + defer res.Body.Close() + if res.Header.Get("X-TLS-Set") != "true" { + t.Errorf("expected X-TLS-Set response header") + return + } + if res.Header.Get("X-TLS-HandshakeComplete") != "true" { + t.Errorf("expected X-TLS-HandshakeComplete header") + } + }) +} + +type serverExpectTest struct { + contentLength int // of request body + chunked bool + expectation string // e.g. "100-continue" + readBody bool // whether handler should read the body (if false, sends StatusUnauthorized) + expectedResponse string // expected substring in first line of http response +} + +func expectTest(contentLength int, expectation string, readBody bool, expectedResponse string) serverExpectTest { + return serverExpectTest{ + contentLength: contentLength, + expectation: expectation, + readBody: readBody, + expectedResponse: expectedResponse, + } +} + +var serverExpectTests = []serverExpectTest{ + // Normal 100-continues, case-insensitive. + expectTest(100, "100-continue", true, "100 Continue"), + expectTest(100, "100-cOntInUE", true, "100 Continue"), + + // No 100-continue. + expectTest(100, "", true, "200 OK"), + + // 100-continue but requesting client to deny us, + // so it never reads the body. + expectTest(100, "100-continue", false, "401 Unauthorized"), + // Likewise without 100-continue: + expectTest(100, "", false, "401 Unauthorized"), + + // Non-standard expectations are failures + expectTest(0, "a-pony", false, "417 Expectation Failed"), + + // Expect-100 requested but no body (is apparently okay: Issue 7625) + expectTest(0, "100-continue", true, "200 OK"), + // Expect-100 requested but handler doesn't read the body + expectTest(0, "100-continue", false, "401 Unauthorized"), + // Expect-100 continue with no body, but a chunked body. + { + expectation: "100-continue", + readBody: true, + chunked: true, + expectedResponse: "100 Continue", + }, +} + +// Tests that the server responds to the "Expect" request header +// correctly. +func TestServerExpect(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + // Note using r.FormValue("readbody") because for POST + // requests that would read from r.Body, which we only + // conditionally want to do. + if strings.Contains(r.URL.RawQuery, "readbody=true") { + ioutil.ReadAll(r.Body) + w.Write([]byte("Hi")) + } else { + w.WriteHeader(StatusUnauthorized) + } + })) + defer ts.Close() + + runTest := func(test serverExpectTest) { + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer conn.Close() + + // Only send the body immediately if we're acting like an HTTP client + // that doesn't send 100-continue expectations. + writeBody := test.contentLength != 0 && strings.ToLower(test.expectation) != "100-continue" + + go func() { + contentLen := fmt.Sprintf("Content-Length: %d", test.contentLength) + if test.chunked { + contentLen = "Transfer-Encoding: chunked" + } + _, err := fmt.Fprintf(conn, "POST /?readbody=%v HTTP/1.1\r\n"+ + "Connection: close\r\n"+ + "%s\r\n"+ + "Expect: %s\r\nHost: foo\r\n\r\n", + test.readBody, contentLen, test.expectation) + if err != nil { + t.Errorf("On test %#v, error writing request headers: %v", test, err) + return + } + if writeBody { + var targ io.WriteCloser = struct { + io.Writer + io.Closer + }{ + conn, + ioutil.NopCloser(nil), + } + if test.chunked { + targ = httputil.NewChunkedWriter(conn) + } + body := strings.Repeat("A", test.contentLength) + _, err = fmt.Fprint(targ, body) + if err == nil { + err = targ.Close() + } + if err != nil { + if !test.readBody { + // Server likely already hung up on us. + // See larger comment below. + t.Logf("On test %#v, acceptable error writing request body: %v", test, err) + return + } + t.Errorf("On test %#v, error writing request body: %v", test, err) + } + } + }() + bufr := bufio.NewReader(conn) + line, err := bufr.ReadString('\n') + if err != nil { + if writeBody && !test.readBody { + // This is an acceptable failure due to a possible TCP race: + // We were still writing data and the server hung up on us. A TCP + // implementation may send a RST if our request body data was known + // to be lost, which may trigger our reads to fail. + // See RFC 1122 page 88. + t.Logf("On test %#v, acceptable error from ReadString: %v", test, err) + return + } + t.Fatalf("On test %#v, ReadString: %v", test, err) + } + if !strings.Contains(line, test.expectedResponse) { + t.Errorf("On test %#v, got first line = %q; want %q", test, line, test.expectedResponse) + } + } + + for _, test := range serverExpectTests { + runTest(test) + } +} + +// Under a ~256KB (maxPostHandlerReadBytes) threshold, the server +// should consume client request bodies that a handler didn't read. +func TestServerUnreadRequestBodyLittle(t *testing.T) { + conn := new(testConn) + body := strings.Repeat("x", 100<<10) + conn.readBuf.Write([]byte(fmt.Sprintf( + "POST / HTTP/1.1\r\n"+ + "Host: test\r\n"+ + "Content-Length: %d\r\n"+ + "\r\n", len(body)))) + conn.readBuf.Write([]byte(body)) + + done := make(chan bool) + + ls := &oneConnListener{conn} + go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) { + defer close(done) + if conn.readBuf.Len() < len(body)/2 { + t.Errorf("on request, read buffer length is %d; expected about 100 KB", conn.readBuf.Len()) + } + rw.WriteHeader(200) + rw.(Flusher).Flush() + if g, e := conn.readBuf.Len(), 0; g != e { + t.Errorf("after WriteHeader, read buffer length is %d; want %d", g, e) + } + if c := rw.Header().Get("Connection"); c != "" { + t.Errorf(`Connection header = %q; want ""`, c) + } + })) + <-done +} + +// Over a ~256KB (maxPostHandlerReadBytes) threshold, the server +// should ignore client request bodies that a handler didn't read +// and close the connection. +func TestServerUnreadRequestBodyLarge(t *testing.T) { + conn := new(testConn) + body := strings.Repeat("x", 1<<20) + conn.readBuf.Write([]byte(fmt.Sprintf( + "POST / HTTP/1.1\r\n"+ + "Host: test\r\n"+ + "Content-Length: %d\r\n"+ + "\r\n", len(body)))) + conn.readBuf.Write([]byte(body)) + conn.closec = make(chan bool, 1) + + ls := &oneConnListener{conn} + go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) { + if conn.readBuf.Len() < len(body)/2 { + t.Errorf("on request, read buffer length is %d; expected about 1MB", conn.readBuf.Len()) + } + rw.WriteHeader(200) + rw.(Flusher).Flush() + if conn.readBuf.Len() < len(body)/2 { + t.Errorf("post-WriteHeader, read buffer length is %d; expected about 1MB", conn.readBuf.Len()) + } + })) + <-conn.closec + + if res := conn.writeBuf.String(); !strings.Contains(res, "Connection: close") { + t.Errorf("Expected a Connection: close header; got response: %s", res) + } +} + +func TestTimeoutHandler(t *testing.T) { + defer afterTest(t) + sendHi := make(chan bool, 1) + writeErrors := make(chan error, 1) + sayHi := HandlerFunc(func(w ResponseWriter, r *Request) { + <-sendHi + _, werr := w.Write([]byte("hi")) + writeErrors <- werr + }) + timeout := make(chan time.Time, 1) // write to this to force timeouts + ts := httptest.NewServer(NewTestTimeoutHandler(sayHi, timeout)) + defer ts.Close() + + // Succeed without timing out: + sendHi <- true + res, err := Get(ts.URL) + if err != nil { + t.Error(err) + } + if g, e := res.StatusCode, StatusOK; g != e { + t.Errorf("got res.StatusCode %d; expected %d", g, e) + } + body, _ := ioutil.ReadAll(res.Body) + if g, e := string(body), "hi"; g != e { + t.Errorf("got body %q; expected %q", g, e) + } + if g := <-writeErrors; g != nil { + t.Errorf("got unexpected Write error on first request: %v", g) + } + + // Times out: + timeout <- time.Time{} + res, err = Get(ts.URL) + if err != nil { + t.Error(err) + } + if g, e := res.StatusCode, StatusServiceUnavailable; g != e { + t.Errorf("got res.StatusCode %d; expected %d", g, e) + } + body, _ = ioutil.ReadAll(res.Body) + if !strings.Contains(string(body), "Timeout") { + t.Errorf("expected timeout body; got %q", string(body)) + } + + // Now make the previously-timed out handler speak again, + // which verifies the panic is handled: + sendHi <- true + if g, e := <-writeErrors, ErrHandlerTimeout; g != e { + t.Errorf("expected Write error of %v; got %v", e, g) + } +} + +// Verifies we don't path.Clean() on the wrong parts in redirects. +func TestRedirectMunging(t *testing.T) { + req, _ := NewRequest("GET", "http://example.com/", nil) + + resp := httptest.NewRecorder() + Redirect(resp, req, "/foo?next=http://bar.com/", 302) + if g, e := resp.Header().Get("Location"), "/foo?next=http://bar.com/"; g != e { + t.Errorf("Location header was %q; want %q", g, e) + } + + resp = httptest.NewRecorder() + Redirect(resp, req, "http://localhost:8080/_ah/login?continue=http://localhost:8080/", 302) + if g, e := resp.Header().Get("Location"), "http://localhost:8080/_ah/login?continue=http://localhost:8080/"; g != e { + t.Errorf("Location header was %q; want %q", g, e) + } +} + +func TestRedirectBadPath(t *testing.T) { + // This used to crash. It's not valid input (bad path), but it + // shouldn't crash. + rr := httptest.NewRecorder() + req := &Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Path: "not-empty-but-no-leading-slash", // bogus + }, + } + Redirect(rr, req, "", 304) + if rr.Code != 304 { + t.Errorf("Code = %d; want 304", rr.Code) + } +} + +// TestZeroLengthPostAndResponse exercises an optimization done by the Transport: +// when there is no body (either because the method doesn't permit a body, or an +// explicit Content-Length of zero is present), then the transport can re-use the +// connection immediately. But when it re-uses the connection, it typically closes +// the previous request's body, which is not optimal for zero-lengthed bodies, +// as the client would then see http.ErrBodyReadAfterClose and not 0, io.EOF. +func TestZeroLengthPostAndResponse(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) { + all, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("handler ReadAll: %v", err) + } + if len(all) != 0 { + t.Errorf("handler got %d bytes; expected 0", len(all)) + } + rw.Header().Set("Content-Length", "0") + })) + defer ts.Close() + + req, err := NewRequest("POST", ts.URL, strings.NewReader("")) + if err != nil { + t.Fatal(err) + } + req.ContentLength = 0 + + var resp [5]*Response + for i := range resp { + resp[i], err = DefaultClient.Do(req) + if err != nil { + t.Fatalf("client post #%d: %v", i, err) + } + } + + for i := range resp { + all, err := ioutil.ReadAll(resp[i].Body) + if err != nil { + t.Fatalf("req #%d: client ReadAll: %v", i, err) + } + if len(all) != 0 { + t.Errorf("req #%d: client got %d bytes; expected 0", i, len(all)) + } + } +} + +func TestHandlerPanicNil(t *testing.T) { + testHandlerPanic(t, false, nil) +} + +func TestHandlerPanic(t *testing.T) { + testHandlerPanic(t, false, "intentional death for testing") +} + +func TestHandlerPanicWithHijack(t *testing.T) { + testHandlerPanic(t, true, "intentional death for testing") +} + +func testHandlerPanic(t *testing.T, withHijack bool, panicValue interface{}) { + defer afterTest(t) + // Unlike the other tests that set the log output to ioutil.Discard + // to quiet the output, this test uses a pipe. The pipe serves three + // purposes: + // + // 1) The log.Print from the http server (generated by the caught + // panic) will go to the pipe instead of stderr, making the + // output quiet. + // + // 2) We read from the pipe to verify that the handler + // actually caught the panic and logged something. + // + // 3) The blocking Read call prevents this TestHandlerPanic + // function from exiting before the HTTP server handler + // finishes crashing. If this text function exited too + // early (and its defer log.SetOutput(os.Stderr) ran), + // then the crash output could spill into the next test. + pr, pw := io.Pipe() + log.SetOutput(pw) + defer log.SetOutput(os.Stderr) + defer pw.Close() + + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if withHijack { + rwc, _, err := w.(Hijacker).Hijack() + if err != nil { + t.Logf("unexpected error: %v", err) + } + defer rwc.Close() + } + panic(panicValue) + })) + defer ts.Close() + + // Do a blocking read on the log output pipe so its logging + // doesn't bleed into the next test. But wait only 5 seconds + // for it. + done := make(chan bool, 1) + go func() { + buf := make([]byte, 4<<10) + _, err := pr.Read(buf) + pr.Close() + if err != nil && err != io.EOF { + t.Error(err) + } + done <- true + }() + + _, err := Get(ts.URL) + if err == nil { + t.Logf("expected an error") + } + + if panicValue == nil { + return + } + + select { + case <-done: + return + case <-time.After(5 * time.Second): + t.Fatal("expected server handler to log an error") + } +} + +func TestNoDate(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header()["Date"] = nil + })) + defer ts.Close() + res, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + _, present := res.Header["Date"] + if present { + t.Fatalf("Expected no Date header; got %v", res.Header["Date"]) + } +} + +func TestStripPrefix(t *testing.T) { + defer afterTest(t) + h := HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("X-Path", r.URL.Path) + }) + ts := httptest.NewServer(StripPrefix("/foo", h)) + defer ts.Close() + + res, err := Get(ts.URL + "/foo/bar") + if err != nil { + t.Fatal(err) + } + if g, e := res.Header.Get("X-Path"), "/bar"; g != e { + t.Errorf("test 1: got %s, want %s", g, e) + } + res.Body.Close() + + res, err = Get(ts.URL + "/bar") + if err != nil { + t.Fatal(err) + } + if g, e := res.StatusCode, 404; g != e { + t.Errorf("test 2: got status %v, want %v", g, e) + } + res.Body.Close() +} + +func TestRequestLimit(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + t.Fatalf("didn't expect to get request in Handler") + })) + defer ts.Close() + req, _ := NewRequest("GET", ts.URL, nil) + var bytesPerHeader = len("header12345: val12345\r\n") + for i := 0; i < ((DefaultMaxHeaderBytes+4096)/bytesPerHeader)+1; i++ { + req.Header.Set(fmt.Sprintf("header%05d", i), fmt.Sprintf("val%05d", i)) + } + res, err := DefaultClient.Do(req) + if err != nil { + // Some HTTP clients may fail on this undefined behavior (server replying and + // closing the connection while the request is still being written), but + // we do support it (at least currently), so we expect a response below. + t.Fatalf("Do: %v", err) + } + defer res.Body.Close() + if res.StatusCode != 413 { + t.Fatalf("expected 413 response status; got: %d %s", res.StatusCode, res.Status) + } +} + +type neverEnding byte + +func (b neverEnding) Read(p []byte) (n int, err error) { + for i := range p { + p[i] = byte(b) + } + return len(p), nil +} + +type countReader struct { + r io.Reader + n *int64 +} + +func (cr countReader) Read(p []byte) (n int, err error) { + n, err = cr.r.Read(p) + atomic.AddInt64(cr.n, int64(n)) + return +} + +func TestRequestBodyLimit(t *testing.T) { + defer afterTest(t) + const limit = 1 << 20 + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + r.Body = MaxBytesReader(w, r.Body, limit) + n, err := io.Copy(ioutil.Discard, r.Body) + if err == nil { + t.Errorf("expected error from io.Copy") + } + if n != limit { + t.Errorf("io.Copy = %d, want %d", n, limit) + } + })) + defer ts.Close() + + nWritten := new(int64) + req, _ := NewRequest("POST", ts.URL, io.LimitReader(countReader{neverEnding('a'), nWritten}, limit*200)) + + // Send the POST, but don't care it succeeds or not. The + // remote side is going to reply and then close the TCP + // connection, and HTTP doesn't really define if that's + // allowed or not. Some HTTP clients will get the response + // and some (like ours, currently) will complain that the + // request write failed, without reading the response. + // + // But that's okay, since what we're really testing is that + // the remote side hung up on us before we wrote too much. + _, _ = DefaultClient.Do(req) + + if atomic.LoadInt64(nWritten) > limit*100 { + t.Errorf("handler restricted the request body to %d bytes, but client managed to write %d", + limit, nWritten) + } +} + +// TestClientWriteShutdown tests that if the client shuts down the write +// side of their TCP connection, the server doesn't send a 400 Bad Request. +func TestClientWriteShutdown(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7237") + } + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {})) + defer ts.Close() + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("Dial: %v", err) + } + err = conn.(*net.TCPConn).CloseWrite() + if err != nil { + t.Fatalf("Dial: %v", err) + } + donec := make(chan bool) + go func() { + defer close(donec) + bs, err := ioutil.ReadAll(conn) + if err != nil { + t.Fatalf("ReadAll: %v", err) + } + got := string(bs) + if got != "" { + t.Errorf("read %q from server; want nothing", got) + } + }() + select { + case <-donec: + case <-time.After(10 * time.Second): + t.Fatalf("timeout") + } +} + +// Tests that chunked server responses that write 1 byte at a time are +// buffered before chunk headers are added, not after chunk headers. +func TestServerBufferedChunking(t *testing.T) { + conn := new(testConn) + conn.readBuf.Write([]byte("GET / HTTP/1.1\r\n\r\n")) + conn.closec = make(chan bool, 1) + ls := &oneConnListener{conn} + go Serve(ls, HandlerFunc(func(rw ResponseWriter, req *Request) { + rw.(Flusher).Flush() // force the Header to be sent, in chunking mode, not counting the length + rw.Write([]byte{'x'}) + rw.Write([]byte{'y'}) + rw.Write([]byte{'z'}) + })) + <-conn.closec + if !bytes.HasSuffix(conn.writeBuf.Bytes(), []byte("\r\n\r\n3\r\nxyz\r\n0\r\n\r\n")) { + t.Errorf("response didn't end with a single 3 byte 'xyz' chunk; got:\n%q", + conn.writeBuf.Bytes()) + } +} + +// Tests that the server flushes its response headers out when it's +// ignoring the response body and waits a bit before forcefully +// closing the TCP connection, causing the client to get a RST. +// See http://golang.org/issue/3595 +func TestServerGracefulClose(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + Error(w, "bye", StatusUnauthorized) + })) + defer ts.Close() + + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + const bodySize = 5 << 20 + req := []byte(fmt.Sprintf("POST / HTTP/1.1\r\nHost: foo.com\r\nContent-Length: %d\r\n\r\n", bodySize)) + for i := 0; i < bodySize; i++ { + req = append(req, 'x') + } + writeErr := make(chan error) + go func() { + _, err := conn.Write(req) + writeErr <- err + }() + br := bufio.NewReader(conn) + lineNum := 0 + for { + line, err := br.ReadString('\n') + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("ReadLine: %v", err) + } + lineNum++ + if lineNum == 1 && !strings.Contains(line, "401 Unauthorized") { + t.Errorf("Response line = %q; want a 401", line) + } + } + // Wait for write to finish. This is a broken pipe on both + // Darwin and Linux, but checking this isn't the point of + // the test. + <-writeErr +} + +func TestCaseSensitiveMethod(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.Method != "get" { + t.Errorf(`Got method %q; want "get"`, r.Method) + } + })) + defer ts.Close() + req, _ := NewRequest("get", ts.URL, nil) + res, err := DefaultClient.Do(req) + if err != nil { + t.Error(err) + return + } + res.Body.Close() +} + +// TestContentLengthZero tests that for both an HTTP/1.0 and HTTP/1.1 +// request (both keep-alive), when a Handler never writes any +// response, the net/http package adds a "Content-Length: 0" response +// header. +func TestContentLengthZero(t *testing.T) { + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) {})) + defer ts.Close() + + for _, version := range []string{"HTTP/1.0", "HTTP/1.1"} { + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("error dialing: %v", err) + } + _, err = fmt.Fprintf(conn, "GET / %v\r\nConnection: keep-alive\r\nHost: foo\r\n\r\n", version) + if err != nil { + t.Fatalf("error writing: %v", err) + } + req, _ := NewRequest("GET", "/", nil) + res, err := ReadResponse(bufio.NewReader(conn), req) + if err != nil { + t.Fatalf("error reading response: %v", err) + } + if te := res.TransferEncoding; len(te) > 0 { + t.Errorf("For version %q, Transfer-Encoding = %q; want none", version, te) + } + if cl := res.ContentLength; cl != 0 { + t.Errorf("For version %q, Content-Length = %v; want 0", version, cl) + } + conn.Close() + } +} + +func TestCloseNotifier(t *testing.T) { + defer afterTest(t) + gotReq := make(chan bool, 1) + sawClose := make(chan bool, 1) + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { + gotReq <- true + cc := rw.(CloseNotifier).CloseNotify() + <-cc + sawClose <- true + })) + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatalf("error dialing: %v", err) + } + diec := make(chan bool) + go func() { + _, err = fmt.Fprintf(conn, "GET / HTTP/1.1\r\nConnection: keep-alive\r\nHost: foo\r\n\r\n") + if err != nil { + t.Fatal(err) + } + <-diec + conn.Close() + }() +For: + for { + select { + case <-gotReq: + diec <- true + case <-sawClose: + break For + case <-time.After(5 * time.Second): + t.Fatal("timeout") + } + } + ts.Close() +} + +func TestCloseNotifierChanLeak(t *testing.T) { + defer afterTest(t) + req := reqBytes("GET / HTTP/1.0\nHost: golang.org") + for i := 0; i < 20; i++ { + var output bytes.Buffer + conn := &rwTestConn{ + Reader: bytes.NewReader(req), + Writer: &output, + closec: make(chan bool, 1), + } + ln := &oneConnListener{conn: conn} + handler := HandlerFunc(func(rw ResponseWriter, r *Request) { + // Ignore the return value and never read from + // it, testing that we don't leak goroutines + // on the sending side: + _ = rw.(CloseNotifier).CloseNotify() + }) + go Serve(ln, handler) + <-conn.closec + } +} + +func TestOptions(t *testing.T) { + uric := make(chan string, 2) // only expect 1, but leave space for 2 + mux := NewServeMux() + mux.HandleFunc("/", func(w ResponseWriter, r *Request) { + uric <- r.RequestURI + }) + ts := httptest.NewServer(mux) + defer ts.Close() + + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + // An OPTIONS * request should succeed. + _, err = conn.Write([]byte("OPTIONS * HTTP/1.1\r\nHost: foo.com\r\n\r\n")) + if err != nil { + t.Fatal(err) + } + br := bufio.NewReader(conn) + res, err := ReadResponse(br, &Request{Method: "OPTIONS"}) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != 200 { + t.Errorf("Got non-200 response to OPTIONS *: %#v", res) + } + + // A GET * request on a ServeMux should fail. + _, err = conn.Write([]byte("GET * HTTP/1.1\r\nHost: foo.com\r\n\r\n")) + if err != nil { + t.Fatal(err) + } + res, err = ReadResponse(br, &Request{Method: "GET"}) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != 400 { + t.Errorf("Got non-400 response to GET *: %#v", res) + } + + res, err = Get(ts.URL + "/second") + if err != nil { + t.Fatal(err) + } + res.Body.Close() + if got := <-uric; got != "/second" { + t.Errorf("Handler saw request for %q; want /second", got) + } +} + +// Tests regarding the ordering of Write, WriteHeader, Header, and +// Flush calls. In Go 1.0, rw.WriteHeader immediately flushed the +// (*response).header to the wire. In Go 1.1, the actual wire flush is +// delayed, so we could maybe tack on a Content-Length and better +// Content-Type after we see more (or all) of the output. To preserve +// compatibility with Go 1, we need to be careful to track which +// headers were live at the time of WriteHeader, so we write the same +// ones, even if the handler modifies them (~erroneously) after the +// first Write. +func TestHeaderToWire(t *testing.T) { + tests := []struct { + name string + handler func(ResponseWriter, *Request) + check func(output string) error + }{ + { + name: "write without Header", + handler: func(rw ResponseWriter, r *Request) { + rw.Write([]byte("hello world")) + }, + check: func(got string) error { + if !strings.Contains(got, "Content-Length:") { + return errors.New("no content-length") + } + if !strings.Contains(got, "Content-Type: text/plain") { + return errors.New("no content-length") + } + return nil + }, + }, + { + name: "Header mutation before write", + handler: func(rw ResponseWriter, r *Request) { + h := rw.Header() + h.Set("Content-Type", "some/type") + rw.Write([]byte("hello world")) + h.Set("Too-Late", "bogus") + }, + check: func(got string) error { + if !strings.Contains(got, "Content-Length:") { + return errors.New("no content-length") + } + if !strings.Contains(got, "Content-Type: some/type") { + return errors.New("wrong content-type") + } + if strings.Contains(got, "Too-Late") { + return errors.New("don't want too-late header") + } + return nil + }, + }, + { + name: "write then useless Header mutation", + handler: func(rw ResponseWriter, r *Request) { + rw.Write([]byte("hello world")) + rw.Header().Set("Too-Late", "Write already wrote headers") + }, + check: func(got string) error { + if strings.Contains(got, "Too-Late") { + return errors.New("header appeared from after WriteHeader") + } + return nil + }, + }, + { + name: "flush then write", + handler: func(rw ResponseWriter, r *Request) { + rw.(Flusher).Flush() + rw.Write([]byte("post-flush")) + rw.Header().Set("Too-Late", "Write already wrote headers") + }, + check: func(got string) error { + if !strings.Contains(got, "Transfer-Encoding: chunked") { + return errors.New("not chunked") + } + if strings.Contains(got, "Too-Late") { + return errors.New("header appeared from after WriteHeader") + } + return nil + }, + }, + { + name: "header then flush", + handler: func(rw ResponseWriter, r *Request) { + rw.Header().Set("Content-Type", "some/type") + rw.(Flusher).Flush() + rw.Write([]byte("post-flush")) + rw.Header().Set("Too-Late", "Write already wrote headers") + }, + check: func(got string) error { + if !strings.Contains(got, "Transfer-Encoding: chunked") { + return errors.New("not chunked") + } + if strings.Contains(got, "Too-Late") { + return errors.New("header appeared from after WriteHeader") + } + if !strings.Contains(got, "Content-Type: some/type") { + return errors.New("wrong content-length") + } + return nil + }, + }, + { + name: "sniff-on-first-write content-type", + handler: func(rw ResponseWriter, r *Request) { + rw.Write([]byte("some html")) + rw.Header().Set("Content-Type", "x/wrong") + }, + check: func(got string) error { + if !strings.Contains(got, "Content-Type: text/html") { + return errors.New("wrong content-length; want html") + } + return nil + }, + }, + { + name: "explicit content-type wins", + handler: func(rw ResponseWriter, r *Request) { + rw.Header().Set("Content-Type", "some/type") + rw.Write([]byte("some html")) + }, + check: func(got string) error { + if !strings.Contains(got, "Content-Type: some/type") { + return errors.New("wrong content-length; want html") + } + return nil + }, + }, + { + name: "empty handler", + handler: func(rw ResponseWriter, r *Request) { + }, + check: func(got string) error { + if !strings.Contains(got, "Content-Type: text/plain") { + return errors.New("wrong content-length; want text/plain") + } + if !strings.Contains(got, "Content-Length: 0") { + return errors.New("want 0 content-length") + } + return nil + }, + }, + { + name: "only Header, no write", + handler: func(rw ResponseWriter, r *Request) { + rw.Header().Set("Some-Header", "some-value") + }, + check: func(got string) error { + if !strings.Contains(got, "Some-Header") { + return errors.New("didn't get header") + } + return nil + }, + }, + { + name: "WriteHeader call", + handler: func(rw ResponseWriter, r *Request) { + rw.WriteHeader(404) + rw.Header().Set("Too-Late", "some-value") + }, + check: func(got string) error { + if !strings.Contains(got, "404") { + return errors.New("wrong status") + } + if strings.Contains(got, "Some-Header") { + return errors.New("shouldn't have seen Too-Late") + } + return nil + }, + }, + } + for _, tc := range tests { + ht := newHandlerTest(HandlerFunc(tc.handler)) + got := ht.rawResponse("GET / HTTP/1.1\nHost: golang.org") + if err := tc.check(got); err != nil { + t.Errorf("%s: %v\nGot response:\n%s", tc.name, err, got) + } + } +} + +// goTimeout runs f, failing t if f takes more than ns to complete. +func goTimeout(t *testing.T, d time.Duration, f func()) { + ch := make(chan bool, 2) + timer := time.AfterFunc(d, func() { + t.Errorf("Timeout expired after %v", d) + ch <- true + }) + defer timer.Stop() + go func() { + defer func() { ch <- true }() + f() + }() + <-ch +} + +type errorListener struct { + errs []error +} + +func (l *errorListener) Accept() (c net.Conn, err error) { + if len(l.errs) == 0 { + return nil, io.EOF + } + err = l.errs[0] + l.errs = l.errs[1:] + return +} + +func (l *errorListener) Close() error { + return nil +} + +func (l *errorListener) Addr() net.Addr { + return dummyAddr("test-address") +} + +func TestAcceptMaxFds(t *testing.T) { + log.SetOutput(ioutil.Discard) // is noisy otherwise + defer log.SetOutput(os.Stderr) + + ln := &errorListener{[]error{ + &net.OpError{ + Op: "accept", + Err: syscall.EMFILE, + }}} + err := Serve(ln, HandlerFunc(HandlerFunc(func(ResponseWriter, *Request) {}))) + if err != io.EOF { + t.Errorf("got error %v, want EOF", err) + } +} + +func TestWriteAfterHijack(t *testing.T) { + req := reqBytes("GET / HTTP/1.1\nHost: golang.org") + var buf bytes.Buffer + wrotec := make(chan bool, 1) + conn := &rwTestConn{ + Reader: bytes.NewReader(req), + Writer: &buf, + closec: make(chan bool, 1), + } + handler := HandlerFunc(func(rw ResponseWriter, r *Request) { + conn, bufrw, err := rw.(Hijacker).Hijack() + if err != nil { + t.Error(err) + return + } + go func() { + bufrw.Write([]byte("[hijack-to-bufw]")) + bufrw.Flush() + conn.Write([]byte("[hijack-to-conn]")) + conn.Close() + wrotec <- true + }() + }) + ln := &oneConnListener{conn: conn} + go Serve(ln, handler) + <-conn.closec + <-wrotec + if g, w := buf.String(), "[hijack-to-bufw][hijack-to-conn]"; g != w { + t.Errorf("wrote %q; want %q", g, w) + } +} + +func TestDoubleHijack(t *testing.T) { + req := reqBytes("GET / HTTP/1.1\nHost: golang.org") + var buf bytes.Buffer + conn := &rwTestConn{ + Reader: bytes.NewReader(req), + Writer: &buf, + closec: make(chan bool, 1), + } + handler := HandlerFunc(func(rw ResponseWriter, r *Request) { + conn, _, err := rw.(Hijacker).Hijack() + if err != nil { + t.Error(err) + return + } + _, _, err = rw.(Hijacker).Hijack() + if err == nil { + t.Errorf("got err = nil; want err != nil") + } + conn.Close() + }) + ln := &oneConnListener{conn: conn} + go Serve(ln, handler) + <-conn.closec +} + +// http://code.google.com/p/go/issues/detail?id=5955 +// Note that this does not test the "request too large" +// exit path from the http server. This is intentional; +// not sending Connection: close is just a minor wire +// optimization and is pointless if dealing with a +// badly behaved client. +func TestHTTP10ConnectionHeader(t *testing.T) { + defer afterTest(t) + + mux := NewServeMux() + mux.Handle("/", HandlerFunc(func(resp ResponseWriter, req *Request) {})) + ts := httptest.NewServer(mux) + defer ts.Close() + + // net/http uses HTTP/1.1 for requests, so write requests manually + tests := []struct { + req string // raw http request + expect []string // expected Connection header(s) + }{ + { + req: "GET / HTTP/1.0\r\n\r\n", + expect: nil, + }, + { + req: "OPTIONS * HTTP/1.0\r\n\r\n", + expect: nil, + }, + { + req: "GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n", + expect: []string{"keep-alive"}, + }, + } + + for _, tt := range tests { + conn, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal("dial err:", err) + } + + _, err = fmt.Fprint(conn, tt.req) + if err != nil { + t.Fatal("conn write err:", err) + } + + resp, err := ReadResponse(bufio.NewReader(conn), &Request{Method: "GET"}) + if err != nil { + t.Fatal("ReadResponse err:", err) + } + conn.Close() + resp.Body.Close() + + got := resp.Header["Connection"] + if !reflect.DeepEqual(got, tt.expect) { + t.Errorf("wrong Connection headers for request %q. Got %q expect %q", tt.req, got, tt.expect) + } + } +} + +// See golang.org/issue/5660 +func TestServerReaderFromOrder(t *testing.T) { + defer afterTest(t) + pr, pw := io.Pipe() + const size = 3 << 20 + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { + rw.Header().Set("Content-Type", "text/plain") // prevent sniffing path + done := make(chan bool) + go func() { + io.Copy(rw, pr) + close(done) + }() + time.Sleep(25 * time.Millisecond) // give Copy a chance to break things + n, err := io.Copy(ioutil.Discard, req.Body) + if err != nil { + t.Errorf("handler Copy: %v", err) + return + } + if n != size { + t.Errorf("handler Copy = %d; want %d", n, size) + } + pw.Write([]byte("hi")) + pw.Close() + <-done + })) + defer ts.Close() + + req, err := NewRequest("POST", ts.URL, io.LimitReader(neverEnding('a'), size)) + if err != nil { + t.Fatal(err) + } + res, err := DefaultClient.Do(req) + if err != nil { + t.Fatal(err) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + if string(all) != "hi" { + t.Errorf("Body = %q; want hi", all) + } +} + +// Issue 6157, Issue 6685 +func TestCodesPreventingContentTypeAndBody(t *testing.T) { + for _, code := range []int{StatusNotModified, StatusNoContent, StatusContinue} { + ht := newHandlerTest(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.URL.Path == "/header" { + w.Header().Set("Content-Length", "123") + } + w.WriteHeader(code) + if r.URL.Path == "/more" { + w.Write([]byte("stuff")) + } + })) + for _, req := range []string{ + "GET / HTTP/1.0", + "GET /header HTTP/1.0", + "GET /more HTTP/1.0", + "GET / HTTP/1.1", + "GET /header HTTP/1.1", + "GET /more HTTP/1.1", + } { + got := ht.rawResponse(req) + wantStatus := fmt.Sprintf("%d %s", code, StatusText(code)) + if !strings.Contains(got, wantStatus) { + t.Errorf("Code %d: Wanted %q Modified for %q: %s", code, wantStatus, req, got) + } else if strings.Contains(got, "Content-Length") { + t.Errorf("Code %d: Got a Content-Length from %q: %s", code, req, got) + } else if strings.Contains(got, "stuff") { + t.Errorf("Code %d: Response contains a body from %q: %s", code, req, got) + } + } + } +} + +func TestContentTypeOkayOn204(t *testing.T) { + ht := newHandlerTest(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Length", "123") // suppressed + w.Header().Set("Content-Type", "foo/bar") + w.WriteHeader(204) + })) + got := ht.rawResponse("GET / HTTP/1.1") + if !strings.Contains(got, "Content-Type: foo/bar") { + t.Errorf("Response = %q; want Content-Type: foo/bar", got) + } + if strings.Contains(got, "Content-Length: 123") { + t.Errorf("Response = %q; don't want a Content-Length", got) + } +} + +// Issue 6995 +// A server Handler can receive a Request, and then turn around and +// give a copy of that Request.Body out to the Transport (e.g. any +// proxy). So then two people own that Request.Body (both the server +// and the http client), and both think they can close it on failure. +// Therefore, all incoming server requests Bodies need to be thread-safe. +func TestTransportAndServerSharedBodyRace(t *testing.T) { + defer afterTest(t) + + const bodySize = 1 << 20 + + unblockBackend := make(chan bool) + backend := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { + io.CopyN(rw, req.Body, bodySize/2) + <-unblockBackend + })) + defer backend.Close() + + backendRespc := make(chan *Response, 1) + proxy := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { + if req.RequestURI == "/foo" { + rw.Write([]byte("bar")) + return + } + req2, _ := NewRequest("POST", backend.URL, req.Body) + req2.ContentLength = bodySize + + bresp, err := DefaultClient.Do(req2) + if err != nil { + t.Errorf("Proxy outbound request: %v", err) + return + } + _, err = io.CopyN(ioutil.Discard, bresp.Body, bodySize/4) + if err != nil { + t.Errorf("Proxy copy error: %v", err) + return + } + backendRespc <- bresp // to close later + + // Try to cause a race: Both the DefaultTransport and the proxy handler's Server + // will try to read/close req.Body (aka req2.Body) + DefaultTransport.(*Transport).CancelRequest(req2) + rw.Write([]byte("OK")) + })) + defer proxy.Close() + + req, _ := NewRequest("POST", proxy.URL, io.LimitReader(neverEnding('a'), bodySize)) + res, err := DefaultClient.Do(req) + if err != nil { + t.Fatalf("Original request: %v", err) + } + + // Cleanup, so we don't leak goroutines. + res.Body.Close() + close(unblockBackend) + (<-backendRespc).Body.Close() +} + +// Test that a hanging Request.Body.Read from another goroutine can't +// cause the Handler goroutine's Request.Body.Close to block. +func TestRequestBodyCloseDoesntBlock(t *testing.T) { + t.Skipf("Skipping known issue; see golang.org/issue/7121") + if testing.Short() { + t.Skip("skipping in -short mode") + } + defer afterTest(t) + + readErrCh := make(chan error, 1) + errCh := make(chan error, 2) + + server := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { + go func(body io.Reader) { + _, err := body.Read(make([]byte, 100)) + readErrCh <- err + }(req.Body) + time.Sleep(500 * time.Millisecond) + })) + defer server.Close() + + closeConn := make(chan bool) + defer close(closeConn) + go func() { + conn, err := net.Dial("tcp", server.Listener.Addr().String()) + if err != nil { + errCh <- err + return + } + defer conn.Close() + _, err = conn.Write([]byte("POST / HTTP/1.1\r\nConnection: close\r\nHost: foo\r\nContent-Length: 100000\r\n\r\n")) + if err != nil { + errCh <- err + return + } + // And now just block, making the server block on our + // 100000 bytes of body that will never arrive. + <-closeConn + }() + select { + case err := <-readErrCh: + if err == nil { + t.Error("Read was nil. Expected error.") + } + case err := <-errCh: + t.Error(err) + case <-time.After(5 * time.Second): + t.Error("timeout") + } +} + +func TestResponseWriterWriteStringAllocs(t *testing.T) { + ht := newHandlerTest(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.URL.Path == "/s" { + io.WriteString(w, "Hello world") + } else { + w.Write([]byte("Hello world")) + } + })) + before := testing.AllocsPerRun(50, func() { ht.rawResponse("GET / HTTP/1.0") }) + after := testing.AllocsPerRun(50, func() { ht.rawResponse("GET /s HTTP/1.0") }) + if int(after) >= int(before) { + t.Errorf("WriteString allocs of %v >= Write allocs of %v", after, before) + } +} + +func TestAppendTime(t *testing.T) { + var b [len(TimeFormat)]byte + t1 := time.Date(2013, 9, 21, 15, 41, 0, 0, time.FixedZone("CEST", 2*60*60)) + res := ExportAppendTime(b[:0], t1) + t2, err := ParseTime(string(res)) + if err != nil { + t.Fatalf("Error parsing time: %s", err) + } + if !t1.Equal(t2) { + t.Fatalf("Times differ; expected: %v, got %v (%s)", t1, t2, string(res)) + } +} + +func TestServerConnState(t *testing.T) { + defer afterTest(t) + handler := map[string]func(w ResponseWriter, r *Request){ + "/": func(w ResponseWriter, r *Request) { + fmt.Fprintf(w, "Hello.") + }, + "/close": func(w ResponseWriter, r *Request) { + w.Header().Set("Connection", "close") + fmt.Fprintf(w, "Hello.") + }, + "/hijack": func(w ResponseWriter, r *Request) { + c, _, _ := w.(Hijacker).Hijack() + c.Write([]byte("HTTP/1.0 200 OK\r\nConnection: close\r\n\r\nHello.")) + c.Close() + }, + "/hijack-panic": func(w ResponseWriter, r *Request) { + c, _, _ := w.(Hijacker).Hijack() + c.Write([]byte("HTTP/1.0 200 OK\r\nConnection: close\r\n\r\nHello.")) + c.Close() + panic("intentional panic") + }, + } + ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) { + handler[r.URL.Path](w, r) + })) + defer ts.Close() + + var mu sync.Mutex // guard stateLog and connID + var stateLog = map[int][]ConnState{} + var connID = map[net.Conn]int{} + + ts.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + ts.Config.ConnState = func(c net.Conn, state ConnState) { + if c == nil { + t.Errorf("nil conn seen in state %s", state) + return + } + mu.Lock() + defer mu.Unlock() + id, ok := connID[c] + if !ok { + id = len(connID) + 1 + connID[c] = id + } + stateLog[id] = append(stateLog[id], state) + } + ts.Start() + + mustGet(t, ts.URL+"/") + mustGet(t, ts.URL+"/close") + + mustGet(t, ts.URL+"/") + mustGet(t, ts.URL+"/", "Connection", "close") + + mustGet(t, ts.URL+"/hijack") + mustGet(t, ts.URL+"/hijack-panic") + + // New->Closed + { + c, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + c.Close() + } + + // New->Active->Closed + { + c, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + if _, err := io.WriteString(c, "BOGUS REQUEST\r\n\r\n"); err != nil { + t.Fatal(err) + } + c.Close() + } + + // New->Idle->Closed + { + c, err := net.Dial("tcp", ts.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + if _, err := io.WriteString(c, "GET / HTTP/1.1\r\nHost: foo\r\n\r\n"); err != nil { + t.Fatal(err) + } + res, err := ReadResponse(bufio.NewReader(c), nil) + if err != nil { + t.Fatal(err) + } + if _, err := io.Copy(ioutil.Discard, res.Body); err != nil { + t.Fatal(err) + } + c.Close() + } + + want := map[int][]ConnState{ + 1: []ConnState{StateNew, StateActive, StateIdle, StateActive, StateClosed}, + 2: []ConnState{StateNew, StateActive, StateIdle, StateActive, StateClosed}, + 3: []ConnState{StateNew, StateActive, StateHijacked}, + 4: []ConnState{StateNew, StateActive, StateHijacked}, + 5: []ConnState{StateNew, StateClosed}, + 6: []ConnState{StateNew, StateActive, StateClosed}, + 7: []ConnState{StateNew, StateActive, StateIdle, StateClosed}, + } + logString := func(m map[int][]ConnState) string { + var b bytes.Buffer + for id, l := range m { + fmt.Fprintf(&b, "Conn %d: ", id) + for _, s := range l { + fmt.Fprintf(&b, "%s ", s) + } + b.WriteString("\n") + } + return b.String() + } + + for i := 0; i < 5; i++ { + time.Sleep(time.Duration(i) * 50 * time.Millisecond) + mu.Lock() + match := reflect.DeepEqual(stateLog, want) + mu.Unlock() + if match { + return + } + } + + mu.Lock() + t.Errorf("Unexpected events.\nGot log: %s\n Want: %s\n", logString(stateLog), logString(want)) + mu.Unlock() +} + +func mustGet(t *testing.T, url string, headers ...string) { + req, err := NewRequest("GET", url, nil) + if err != nil { + t.Fatal(err) + } + for len(headers) > 0 { + req.Header.Add(headers[0], headers[1]) + headers = headers[2:] + } + res, err := DefaultClient.Do(req) + if err != nil { + t.Errorf("Error fetching %s: %v", url, err) + return + } + _, err = ioutil.ReadAll(res.Body) + defer res.Body.Close() + if err != nil { + t.Errorf("Error reading %s: %v", url, err) + } +} + +func TestServerKeepAlivesEnabled(t *testing.T) { + defer afterTest(t) + ts := httptest.NewUnstartedServer(HandlerFunc(func(w ResponseWriter, r *Request) {})) + ts.Config.SetKeepAlivesEnabled(false) + ts.Start() + defer ts.Close() + res, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + if !res.Close { + t.Errorf("Body.Close == false; want true") + } +} + +// golang.org/issue/7856 +func TestServerEmptyBodyRace(t *testing.T) { + defer afterTest(t) + var n int32 + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { + atomic.AddInt32(&n, 1) + })) + defer ts.Close() + var wg sync.WaitGroup + const reqs = 20 + for i := 0; i < reqs; i++ { + wg.Add(1) + go func() { + defer wg.Done() + res, err := Get(ts.URL) + if err != nil { + t.Error(err) + return + } + defer res.Body.Close() + _, err = io.Copy(ioutil.Discard, res.Body) + if err != nil { + t.Error(err) + return + } + }() + } + wg.Wait() + if got := atomic.LoadInt32(&n); got != reqs { + t.Errorf("handler ran %d times; want %d", got, reqs) + } +} + +func TestServerConnStateNew(t *testing.T) { + sawNew := false // if the test is buggy, we'll race on this variable. + srv := &Server{ + ConnState: func(c net.Conn, state ConnState) { + if state == StateNew { + sawNew = true // testing that this write isn't racy + } + }, + Handler: HandlerFunc(func(w ResponseWriter, r *Request) {}), // irrelevant + } + srv.Serve(&oneConnListener{ + conn: &rwTestConn{ + Reader: strings.NewReader("GET / HTTP/1.1\r\nHost: foo\r\n\r\n"), + Writer: ioutil.Discard, + }, + }) + if !sawNew { // testing that this read isn't racy + t.Error("StateNew not seen") + } +} + +func BenchmarkClientServer(b *testing.B) { + b.ReportAllocs() + b.StopTimer() + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) { + fmt.Fprintf(rw, "Hello world.\n") + })) + defer ts.Close() + b.StartTimer() + + for i := 0; i < b.N; i++ { + res, err := Get(ts.URL) + if err != nil { + b.Fatal("Get:", err) + } + all, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + b.Fatal("ReadAll:", err) + } + body := string(all) + if body != "Hello world.\n" { + b.Fatal("Got body:", body) + } + } + + b.StopTimer() +} + +func BenchmarkClientServerParallel4(b *testing.B) { + benchmarkClientServerParallel(b, 4) +} + +func BenchmarkClientServerParallel64(b *testing.B) { + benchmarkClientServerParallel(b, 64) +} + +func benchmarkClientServerParallel(b *testing.B, parallelism int) { + b.ReportAllocs() + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) { + fmt.Fprintf(rw, "Hello world.\n") + })) + defer ts.Close() + b.ResetTimer() + b.SetParallelism(parallelism) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + res, err := Get(ts.URL) + if err != nil { + b.Logf("Get: %v", err) + continue + } + all, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + b.Logf("ReadAll: %v", err) + continue + } + body := string(all) + if body != "Hello world.\n" { + panic("Got body: " + body) + } + } + }) +} + +// A benchmark for profiling the server without the HTTP client code. +// The client code runs in a subprocess. +// +// For use like: +// $ go test -c +// $ ./http.test -test.run=XX -test.bench=BenchmarkServer -test.benchtime=15s -test.cpuprofile=http.prof +// $ go tool pprof http.test http.prof +// (pprof) web +func BenchmarkServer(b *testing.B) { + b.ReportAllocs() + // Child process mode; + if url := os.Getenv("TEST_BENCH_SERVER_URL"); url != "" { + n, err := strconv.Atoi(os.Getenv("TEST_BENCH_CLIENT_N")) + if err != nil { + panic(err) + } + for i := 0; i < n; i++ { + res, err := Get(url) + if err != nil { + log.Panicf("Get: %v", err) + } + all, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + log.Panicf("ReadAll: %v", err) + } + body := string(all) + if body != "Hello world.\n" { + log.Panicf("Got body: %q", body) + } + } + os.Exit(0) + return + } + + var res = []byte("Hello world.\n") + b.StopTimer() + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, r *Request) { + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + rw.Write(res) + })) + defer ts.Close() + b.StartTimer() + + cmd := exec.Command(os.Args[0], "-test.run=XXXX", "-test.bench=BenchmarkServer") + cmd.Env = append([]string{ + fmt.Sprintf("TEST_BENCH_CLIENT_N=%d", b.N), + fmt.Sprintf("TEST_BENCH_SERVER_URL=%s", ts.URL), + }, os.Environ()...) + out, err := cmd.CombinedOutput() + if err != nil { + b.Errorf("Test failure: %v, with output: %s", err, out) + } +} + +func BenchmarkServerFakeConnNoKeepAlive(b *testing.B) { + b.ReportAllocs() + req := reqBytes(`GET / HTTP/1.0 +Host: golang.org +Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 +User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17 +Accept-Encoding: gzip,deflate,sdch +Accept-Language: en-US,en;q=0.8 +Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.3 +`) + res := []byte("Hello world!\n") + + conn := &testConn{ + // testConn.Close will not push into the channel + // if it's full. + closec: make(chan bool, 1), + } + handler := HandlerFunc(func(rw ResponseWriter, r *Request) { + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + rw.Write(res) + }) + ln := new(oneConnListener) + for i := 0; i < b.N; i++ { + conn.readBuf.Reset() + conn.writeBuf.Reset() + conn.readBuf.Write(req) + ln.conn = conn + Serve(ln, handler) + <-conn.closec + } +} + +// repeatReader reads content count times, then EOFs. +type repeatReader struct { + content []byte + count int + off int +} + +func (r *repeatReader) Read(p []byte) (n int, err error) { + if r.count <= 0 { + return 0, io.EOF + } + n = copy(p, r.content[r.off:]) + r.off += n + if r.off == len(r.content) { + r.count-- + r.off = 0 + } + return +} + +func BenchmarkServerFakeConnWithKeepAlive(b *testing.B) { + b.ReportAllocs() + + req := reqBytes(`GET / HTTP/1.1 +Host: golang.org +Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 +User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.52 Safari/537.17 +Accept-Encoding: gzip,deflate,sdch +Accept-Language: en-US,en;q=0.8 +Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.3 +`) + res := []byte("Hello world!\n") + + conn := &rwTestConn{ + Reader: &repeatReader{content: req, count: b.N}, + Writer: ioutil.Discard, + closec: make(chan bool, 1), + } + handled := 0 + handler := HandlerFunc(func(rw ResponseWriter, r *Request) { + handled++ + rw.Header().Set("Content-Type", "text/html; charset=utf-8") + rw.Write(res) + }) + ln := &oneConnListener{conn: conn} + go Serve(ln, handler) + <-conn.closec + if b.N != handled { + b.Errorf("b.N=%d but handled %d", b.N, handled) + } +} + +// same as above, but representing the most simple possible request +// and handler. Notably: the handler does not call rw.Header(). +func BenchmarkServerFakeConnWithKeepAliveLite(b *testing.B) { + b.ReportAllocs() + + req := reqBytes(`GET / HTTP/1.1 +Host: golang.org +`) + res := []byte("Hello world!\n") + + conn := &rwTestConn{ + Reader: &repeatReader{content: req, count: b.N}, + Writer: ioutil.Discard, + closec: make(chan bool, 1), + } + handled := 0 + handler := HandlerFunc(func(rw ResponseWriter, r *Request) { + handled++ + rw.Write(res) + }) + ln := &oneConnListener{conn: conn} + go Serve(ln, handler) + <-conn.closec + if b.N != handled { + b.Errorf("b.N=%d but handled %d", b.N, handled) + } +} + +const someResponse = "some response" + +// A Response that's just no bigger than 2KB, the buffer-before-chunking threshold. +var response = bytes.Repeat([]byte(someResponse), 2<<10/len(someResponse)) + +// Both Content-Type and Content-Length set. Should be no buffering. +func BenchmarkServerHandlerTypeLen(b *testing.B) { + benchmarkHandler(b, HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Type", "text/html") + w.Header().Set("Content-Length", strconv.Itoa(len(response))) + w.Write(response) + })) +} + +// A Content-Type is set, but no length. No sniffing, but will count the Content-Length. +func BenchmarkServerHandlerNoLen(b *testing.B) { + benchmarkHandler(b, HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Type", "text/html") + w.Write(response) + })) +} + +// A Content-Length is set, but the Content-Type will be sniffed. +func BenchmarkServerHandlerNoType(b *testing.B) { + benchmarkHandler(b, HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Length", strconv.Itoa(len(response))) + w.Write(response) + })) +} + +// Neither a Content-Type or Content-Length, so sniffed and counted. +func BenchmarkServerHandlerNoHeader(b *testing.B) { + benchmarkHandler(b, HandlerFunc(func(w ResponseWriter, r *Request) { + w.Write(response) + })) +} + +func benchmarkHandler(b *testing.B, h Handler) { + b.ReportAllocs() + req := reqBytes(`GET / HTTP/1.1 +Host: golang.org +`) + conn := &rwTestConn{ + Reader: &repeatReader{content: req, count: b.N}, + Writer: ioutil.Discard, + closec: make(chan bool, 1), + } + handled := 0 + handler := HandlerFunc(func(rw ResponseWriter, r *Request) { + handled++ + h.ServeHTTP(rw, r) + }) + ln := &oneConnListener{conn: conn} + go Serve(ln, handler) + <-conn.closec + if b.N != handled { + b.Errorf("b.N=%d but handled %d", b.N, handled) + } +} + +func BenchmarkServerHijack(b *testing.B) { + b.ReportAllocs() + req := reqBytes(`GET / HTTP/1.1 +Host: golang.org +`) + h := HandlerFunc(func(w ResponseWriter, r *Request) { + conn, _, err := w.(Hijacker).Hijack() + if err != nil { + panic(err) + } + conn.Close() + }) + conn := &rwTestConn{ + Writer: ioutil.Discard, + closec: make(chan bool, 1), + } + ln := &oneConnListener{conn: conn} + for i := 0; i < b.N; i++ { + conn.Reader = bytes.NewReader(req) + ln.conn = conn + Serve(ln, h) + <-conn.closec + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/server.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/server.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/server.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2052 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// HTTP server. See RFC 2616. + +package http + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/url" + "os" + "path" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Errors introduced by the HTTP server. +var ( + ErrWriteAfterFlush = errors.New("Conn.Write called after Flush") + ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") + ErrHijacked = errors.New("Conn has been hijacked") + ErrContentLength = errors.New("Conn.Write wrote more than the declared Content-Length") +) + +// Objects implementing the Handler interface can be +// registered to serve a particular path or subtree +// in the HTTP server. +// +// ServeHTTP should write reply headers and data to the ResponseWriter +// and then return. Returning signals that the request is finished +// and that the HTTP server can move on to the next request on +// the connection. +type Handler interface { + ServeHTTP(ResponseWriter, *Request) +} + +// A ResponseWriter interface is used by an HTTP handler to +// construct an HTTP response. +type ResponseWriter interface { + // Header returns the header map that will be sent by WriteHeader. + // Changing the header after a call to WriteHeader (or Write) has + // no effect. + Header() Header + + // Write writes the data to the connection as part of an HTTP reply. + // If WriteHeader has not yet been called, Write calls WriteHeader(http.StatusOK) + // before writing the data. If the Header does not contain a + // Content-Type line, Write adds a Content-Type set to the result of passing + // the initial 512 bytes of written data to DetectContentType. + Write([]byte) (int, error) + + // WriteHeader sends an HTTP response header with status code. + // If WriteHeader is not called explicitly, the first call to Write + // will trigger an implicit WriteHeader(http.StatusOK). + // Thus explicit calls to WriteHeader are mainly used to + // send error codes. + WriteHeader(int) +} + +// The Flusher interface is implemented by ResponseWriters that allow +// an HTTP handler to flush buffered data to the client. +// +// Note that even for ResponseWriters that support Flush, +// if the client is connected through an HTTP proxy, +// the buffered data may not reach the client until the response +// completes. +type Flusher interface { + // Flush sends any buffered data to the client. + Flush() +} + +// The Hijacker interface is implemented by ResponseWriters that allow +// an HTTP handler to take over the connection. +type Hijacker interface { + // Hijack lets the caller take over the connection. + // After a call to Hijack(), the HTTP server library + // will not do anything else with the connection. + // It becomes the caller's responsibility to manage + // and close the connection. + Hijack() (net.Conn, *bufio.ReadWriter, error) +} + +// The CloseNotifier interface is implemented by ResponseWriters which +// allow detecting when the underlying connection has gone away. +// +// This mechanism can be used to cancel long operations on the server +// if the client has disconnected before the response is ready. +type CloseNotifier interface { + // CloseNotify returns a channel that receives a single value + // when the client connection has gone away. + CloseNotify() <-chan bool +} + +// A conn represents the server side of an HTTP connection. +type conn struct { + remoteAddr string // network address of remote side + server *Server // the Server on which the connection arrived + rwc net.Conn // i/o connection + sr liveSwitchReader // where the LimitReader reads from; usually the rwc + lr *io.LimitedReader // io.LimitReader(sr) + buf *bufio.ReadWriter // buffered(lr,rwc), reading from bufio->limitReader->sr->rwc + tlsState *tls.ConnectionState // or nil when not using TLS + + mu sync.Mutex // guards the following + clientGone bool // if client has disconnected mid-request + closeNotifyc chan bool // made lazily + hijackedv bool // connection has been hijacked by handler +} + +func (c *conn) hijacked() bool { + c.mu.Lock() + defer c.mu.Unlock() + return c.hijackedv +} + +func (c *conn) hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.hijackedv { + return nil, nil, ErrHijacked + } + if c.closeNotifyc != nil { + return nil, nil, errors.New("http: Hijack is incompatible with use of CloseNotifier") + } + c.hijackedv = true + rwc = c.rwc + buf = c.buf + c.rwc = nil + c.buf = nil + c.setState(rwc, StateHijacked) + return +} + +func (c *conn) closeNotify() <-chan bool { + c.mu.Lock() + defer c.mu.Unlock() + if c.closeNotifyc == nil { + c.closeNotifyc = make(chan bool, 1) + if c.hijackedv { + // to obey the function signature, even though + // it'll never receive a value. + return c.closeNotifyc + } + pr, pw := io.Pipe() + + readSource := c.sr.r + c.sr.Lock() + c.sr.r = pr + c.sr.Unlock() + go func() { + _, err := io.Copy(pw, readSource) + if err == nil { + err = io.EOF + } + pw.CloseWithError(err) + c.noteClientGone() + }() + } + return c.closeNotifyc +} + +func (c *conn) noteClientGone() { + c.mu.Lock() + defer c.mu.Unlock() + if c.closeNotifyc != nil && !c.clientGone { + c.closeNotifyc <- true + } + c.clientGone = true +} + +// A switchReader can have its Reader changed at runtime. +// It's not safe for concurrent Reads and switches. +type switchReader struct { + io.Reader +} + +// A switchWriter can have its Writer changed at runtime. +// It's not safe for concurrent Writes and switches. +type switchWriter struct { + io.Writer +} + +// A liveSwitchReader is a switchReader that's safe for concurrent +// reads and switches, if its mutex is held. +type liveSwitchReader struct { + sync.Mutex + r io.Reader +} + +func (sr *liveSwitchReader) Read(p []byte) (n int, err error) { + sr.Lock() + r := sr.r + sr.Unlock() + return r.Read(p) +} + +// This should be >= 512 bytes for DetectContentType, +// but otherwise it's somewhat arbitrary. +const bufferBeforeChunkingSize = 2048 + +// chunkWriter writes to a response's conn buffer, and is the writer +// wrapped by the response.bufw buffered writer. +// +// chunkWriter also is responsible for finalizing the Header, including +// conditionally setting the Content-Type and setting a Content-Length +// in cases where the handler's final output is smaller than the buffer +// size. It also conditionally adds chunk headers, when in chunking mode. +// +// See the comment above (*response).Write for the entire write flow. +type chunkWriter struct { + res *response + + // header is either nil or a deep clone of res.handlerHeader + // at the time of res.WriteHeader, if res.WriteHeader is + // called and extra buffering is being done to calculate + // Content-Type and/or Content-Length. + header Header + + // wroteHeader tells whether the header's been written to "the + // wire" (or rather: w.conn.buf). this is unlike + // (*response).wroteHeader, which tells only whether it was + // logically written. + wroteHeader bool + + // set by the writeHeader method: + chunking bool // using chunked transfer encoding for reply body +} + +var ( + crlf = []byte("\r\n") + colonSpace = []byte(": ") +) + +func (cw *chunkWriter) Write(p []byte) (n int, err error) { + if !cw.wroteHeader { + cw.writeHeader(p) + } + if cw.res.req.Method == "HEAD" { + // Eat writes. + return len(p), nil + } + if cw.chunking { + _, err = fmt.Fprintf(cw.res.conn.buf, "%x\r\n", len(p)) + if err != nil { + cw.res.conn.rwc.Close() + return + } + } + n, err = cw.res.conn.buf.Write(p) + if cw.chunking && err == nil { + _, err = cw.res.conn.buf.Write(crlf) + } + if err != nil { + cw.res.conn.rwc.Close() + } + return +} + +func (cw *chunkWriter) flush() { + if !cw.wroteHeader { + cw.writeHeader(nil) + } + cw.res.conn.buf.Flush() +} + +func (cw *chunkWriter) close() { + if !cw.wroteHeader { + cw.writeHeader(nil) + } + if cw.chunking { + // zero EOF chunk, trailer key/value pairs (currently + // unsupported in Go's server), followed by a blank + // line. + cw.res.conn.buf.WriteString("0\r\n\r\n") + } +} + +// A response represents the server side of an HTTP response. +type response struct { + conn *conn + req *Request // request for this response + wroteHeader bool // reply header has been (logically) written + wroteContinue bool // 100 Continue response was written + + w *bufio.Writer // buffers output in chunks to chunkWriter + cw chunkWriter + sw *switchWriter // of the bufio.Writer, for return to putBufioWriter + + // handlerHeader is the Header that Handlers get access to, + // which may be retained and mutated even after WriteHeader. + // handlerHeader is copied into cw.header at WriteHeader + // time, and privately mutated thereafter. + handlerHeader Header + calledHeader bool // handler accessed handlerHeader via Header + + written int64 // number of bytes written in body + contentLength int64 // explicitly-declared Content-Length; or -1 + status int // status code passed to WriteHeader + + // close connection after this reply. set on request and + // updated after response from handler if there's a + // "Connection: keep-alive" response header and a + // Content-Length. + closeAfterReply bool + + // requestBodyLimitHit is set by requestTooLarge when + // maxBytesReader hits its max size. It is checked in + // WriteHeader, to make sure we don't consume the + // remaining request body to try to advance to the next HTTP + // request. Instead, when this is set, we stop reading + // subsequent requests on this connection and stop reading + // input from it. + requestBodyLimitHit bool + + handlerDone bool // set true when the handler exits + + // Buffers for Date and Content-Length + dateBuf [len(TimeFormat)]byte + clenBuf [10]byte +} + +// requestTooLarge is called by maxBytesReader when too much input has +// been read from the client. +func (w *response) requestTooLarge() { + w.closeAfterReply = true + w.requestBodyLimitHit = true + if !w.wroteHeader { + w.Header().Set("Connection", "close") + } +} + +// needsSniff reports whether a Content-Type still needs to be sniffed. +func (w *response) needsSniff() bool { + _, haveType := w.handlerHeader["Content-Type"] + return !w.cw.wroteHeader && !haveType && w.written < sniffLen +} + +// writerOnly hides an io.Writer value's optional ReadFrom method +// from io.Copy. +type writerOnly struct { + io.Writer +} + +func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { + switch v := src.(type) { + case *os.File: + fi, err := v.Stat() + if err != nil { + return false, err + } + return fi.Mode().IsRegular(), nil + case *io.LimitedReader: + return srcIsRegularFile(v.R) + default: + return + } +} + +// ReadFrom is here to optimize copying from an *os.File regular file +// to a *net.TCPConn with sendfile. +func (w *response) ReadFrom(src io.Reader) (n int64, err error) { + // Our underlying w.conn.rwc is usually a *TCPConn (with its + // own ReadFrom method). If not, or if our src isn't a regular + // file, just fall back to the normal copy method. + rf, ok := w.conn.rwc.(io.ReaderFrom) + regFile, err := srcIsRegularFile(src) + if err != nil { + return 0, err + } + if !ok || !regFile { + return io.Copy(writerOnly{w}, src) + } + + // sendfile path: + + if !w.wroteHeader { + w.WriteHeader(StatusOK) + } + + if w.needsSniff() { + n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) + n += n0 + if err != nil { + return n, err + } + } + + w.w.Flush() // get rid of any previous writes + w.cw.flush() // make sure Header is written; flush data to rwc + + // Now that cw has been flushed, its chunking field is guaranteed initialized. + if !w.cw.chunking && w.bodyAllowed() { + n0, err := rf.ReadFrom(src) + n += n0 + w.written += n0 + return n, err + } + + n0, err := io.Copy(writerOnly{w}, src) + n += n0 + return n, err +} + +// noLimit is an effective infinite upper bound for io.LimitedReader +const noLimit int64 = (1 << 63) - 1 + +// debugServerConnections controls whether all server connections are wrapped +// with a verbose logging wrapper. +const debugServerConnections = false + +// Create new connection from rwc. +func (srv *Server) newConn(rwc net.Conn) (c *conn, err error) { + c = new(conn) + c.remoteAddr = rwc.RemoteAddr().String() + c.server = srv + c.rwc = rwc + if debugServerConnections { + c.rwc = newLoggingConn("server", c.rwc) + } + c.sr = liveSwitchReader{r: c.rwc} + c.lr = io.LimitReader(&c.sr, noLimit).(*io.LimitedReader) + br := newBufioReader(c.lr) + bw := newBufioWriterSize(c.rwc, 4<<10) + c.buf = bufio.NewReadWriter(br, bw) + return c, nil +} + +var ( + bufioReaderPool sync.Pool + bufioWriter2kPool sync.Pool + bufioWriter4kPool sync.Pool +) + +func bufioWriterPool(size int) *sync.Pool { + switch size { + case 2 << 10: + return &bufioWriter2kPool + case 4 << 10: + return &bufioWriter4kPool + } + return nil +} + +func newBufioReader(r io.Reader) *bufio.Reader { + if v := bufioReaderPool.Get(); v != nil { + br := v.(*bufio.Reader) + br.Reset(r) + return br + } + return bufio.NewReader(r) +} + +func putBufioReader(br *bufio.Reader) { + br.Reset(nil) + bufioReaderPool.Put(br) +} + +func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { + pool := bufioWriterPool(size) + if pool != nil { + if v := pool.Get(); v != nil { + bw := v.(*bufio.Writer) + bw.Reset(w) + return bw + } + } + return bufio.NewWriterSize(w, size) +} + +func putBufioWriter(bw *bufio.Writer) { + bw.Reset(nil) + if pool := bufioWriterPool(bw.Available()); pool != nil { + pool.Put(bw) + } +} + +// DefaultMaxHeaderBytes is the maximum permitted size of the headers +// in an HTTP request. +// This can be overridden by setting Server.MaxHeaderBytes. +const DefaultMaxHeaderBytes = 1 << 20 // 1 MB + +func (srv *Server) maxHeaderBytes() int { + if srv.MaxHeaderBytes > 0 { + return srv.MaxHeaderBytes + } + return DefaultMaxHeaderBytes +} + +func (srv *Server) initialLimitedReaderSize() int64 { + return int64(srv.maxHeaderBytes()) + 4096 // bufio slop +} + +// wrapper around io.ReaderCloser which on first read, sends an +// HTTP/1.1 100 Continue header +type expectContinueReader struct { + resp *response + readCloser io.ReadCloser + closed bool +} + +func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { + if ecr.closed { + return 0, ErrBodyReadAfterClose + } + if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { + ecr.resp.wroteContinue = true + ecr.resp.conn.buf.WriteString("HTTP/1.1 100 Continue\r\n\r\n") + ecr.resp.conn.buf.Flush() + } + return ecr.readCloser.Read(p) +} + +func (ecr *expectContinueReader) Close() error { + ecr.closed = true + return ecr.readCloser.Close() +} + +// TimeFormat is the time format to use with +// time.Parse and time.Time.Format when parsing +// or generating times in HTTP headers. +// It is like time.RFC1123 but hard codes GMT as the time zone. +const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + +// appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) +func appendTime(b []byte, t time.Time) []byte { + const days = "SunMonTueWedThuFriSat" + const months = "JanFebMarAprMayJunJulAugSepOctNovDec" + + t = t.UTC() + yy, mm, dd := t.Date() + hh, mn, ss := t.Clock() + day := days[3*t.Weekday():] + mon := months[3*(mm-1):] + + return append(b, + day[0], day[1], day[2], ',', ' ', + byte('0'+dd/10), byte('0'+dd%10), ' ', + mon[0], mon[1], mon[2], ' ', + byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', + byte('0'+hh/10), byte('0'+hh%10), ':', + byte('0'+mn/10), byte('0'+mn%10), ':', + byte('0'+ss/10), byte('0'+ss%10), ' ', + 'G', 'M', 'T') +} + +var errTooLarge = errors.New("http: request too large") + +// Read next request from connection. +func (c *conn) readRequest() (w *response, err error) { + if c.hijacked() { + return nil, ErrHijacked + } + + if d := c.server.ReadTimeout; d != 0 { + c.rwc.SetReadDeadline(time.Now().Add(d)) + } + if d := c.server.WriteTimeout; d != 0 { + defer func() { + c.rwc.SetWriteDeadline(time.Now().Add(d)) + }() + } + + c.lr.N = c.server.initialLimitedReaderSize() + var req *Request + if req, err = ReadRequest(c.buf.Reader); err != nil { + if c.lr.N == 0 { + return nil, errTooLarge + } + return nil, err + } + c.lr.N = noLimit + + req.RemoteAddr = c.remoteAddr + req.TLS = c.tlsState + + w = &response{ + conn: c, + req: req, + handlerHeader: make(Header), + contentLength: -1, + } + w.cw.res = w + w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) + return w, nil +} + +func (w *response) Header() Header { + if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { + // Accessing the header between logically writing it + // and physically writing it means we need to allocate + // a clone to snapshot the logically written state. + w.cw.header = w.handlerHeader.clone() + } + w.calledHeader = true + return w.handlerHeader +} + +// maxPostHandlerReadBytes is the max number of Request.Body bytes not +// consumed by a handler that the server will read from the client +// in order to keep a connection alive. If there are more bytes than +// this then the server to be paranoid instead sends a "Connection: +// close" response. +// +// This number is approximately what a typical machine's TCP buffer +// size is anyway. (if we have the bytes on the machine, we might as +// well read them) +const maxPostHandlerReadBytes = 256 << 10 + +func (w *response) WriteHeader(code int) { + if w.conn.hijacked() { + w.conn.server.logf("http: response.WriteHeader on hijacked connection") + return + } + if w.wroteHeader { + w.conn.server.logf("http: multiple response.WriteHeader calls") + return + } + w.wroteHeader = true + w.status = code + + if w.calledHeader && w.cw.header == nil { + w.cw.header = w.handlerHeader.clone() + } + + if cl := w.handlerHeader.get("Content-Length"); cl != "" { + v, err := strconv.ParseInt(cl, 10, 64) + if err == nil && v >= 0 { + w.contentLength = v + } else { + w.conn.server.logf("http: invalid Content-Length of %q", cl) + w.handlerHeader.Del("Content-Length") + } + } +} + +// extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. +// This type is used to avoid extra allocations from cloning and/or populating +// the response Header map and all its 1-element slices. +type extraHeader struct { + contentType string + connection string + transferEncoding string + date []byte // written if not nil + contentLength []byte // written if not nil +} + +// Sorted the same as extraHeader.Write's loop. +var extraHeaderKeys = [][]byte{ + []byte("Content-Type"), + []byte("Connection"), + []byte("Transfer-Encoding"), +} + +var ( + headerContentLength = []byte("Content-Length: ") + headerDate = []byte("Date: ") +) + +// Write writes the headers described in h to w. +// +// This method has a value receiver, despite the somewhat large size +// of h, because it prevents an allocation. The escape analysis isn't +// smart enough to realize this function doesn't mutate h. +func (h extraHeader) Write(w *bufio.Writer) { + if h.date != nil { + w.Write(headerDate) + w.Write(h.date) + w.Write(crlf) + } + if h.contentLength != nil { + w.Write(headerContentLength) + w.Write(h.contentLength) + w.Write(crlf) + } + for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { + if v != "" { + w.Write(extraHeaderKeys[i]) + w.Write(colonSpace) + w.WriteString(v) + w.Write(crlf) + } + } +} + +// writeHeader finalizes the header sent to the client and writes it +// to cw.res.conn.buf. +// +// p is not written by writeHeader, but is the first chunk of the body +// that will be written. It is sniffed for a Content-Type if none is +// set explicitly. It's also used to set the Content-Length, if the +// total body size was small and the handler has already finished +// running. +func (cw *chunkWriter) writeHeader(p []byte) { + if cw.wroteHeader { + return + } + cw.wroteHeader = true + + w := cw.res + keepAlivesEnabled := w.conn.server.doKeepAlives() + isHEAD := w.req.Method == "HEAD" + + // header is written out to w.conn.buf below. Depending on the + // state of the handler, we either own the map or not. If we + // don't own it, the exclude map is created lazily for + // WriteSubset to remove headers. The setHeader struct holds + // headers we need to add. + header := cw.header + owned := header != nil + if !owned { + header = w.handlerHeader + } + var excludeHeader map[string]bool + delHeader := func(key string) { + if owned { + header.Del(key) + return + } + if _, ok := header[key]; !ok { + return + } + if excludeHeader == nil { + excludeHeader = make(map[string]bool) + } + excludeHeader[key] = true + } + var setHeader extraHeader + + // If the handler is done but never sent a Content-Length + // response header and this is our first (and last) write, set + // it, even to zero. This helps HTTP/1.0 clients keep their + // "keep-alive" connections alive. + // Exceptions: 304/204/1xx responses never get Content-Length, and if + // it was a HEAD request, we don't know the difference between + // 0 actual bytes and 0 bytes because the handler noticed it + // was a HEAD request and chose not to write anything. So for + // HEAD, the handler should either write the Content-Length or + // write non-zero bytes. If it's actually 0 bytes and the + // handler never looked at the Request.Method, we just don't + // send a Content-Length header. + if w.handlerDone && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { + w.contentLength = int64(len(p)) + setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) + } + + // If this was an HTTP/1.0 request with keep-alive and we sent a + // Content-Length back, we can make this a keep-alive response ... + if w.req.wantsHttp10KeepAlive() && keepAlivesEnabled { + sentLength := header.get("Content-Length") != "" + if sentLength && header.get("Connection") == "keep-alive" { + w.closeAfterReply = false + } + } + + // Check for a explicit (and valid) Content-Length header. + hasCL := w.contentLength != -1 + + if w.req.wantsHttp10KeepAlive() && (isHEAD || hasCL) { + _, connectionHeaderSet := header["Connection"] + if !connectionHeaderSet { + setHeader.connection = "keep-alive" + } + } else if !w.req.ProtoAtLeast(1, 1) || w.req.wantsClose() { + w.closeAfterReply = true + } + + if header.get("Connection") == "close" || !keepAlivesEnabled { + w.closeAfterReply = true + } + + // Per RFC 2616, we should consume the request body before + // replying, if the handler hasn't already done so. But we + // don't want to do an unbounded amount of reading here for + // DoS reasons, so we only try up to a threshold. + if w.req.ContentLength != 0 && !w.closeAfterReply { + ecr, isExpecter := w.req.Body.(*expectContinueReader) + if !isExpecter || ecr.resp.wroteContinue { + n, _ := io.CopyN(ioutil.Discard, w.req.Body, maxPostHandlerReadBytes+1) + if n >= maxPostHandlerReadBytes { + w.requestTooLarge() + delHeader("Connection") + setHeader.connection = "close" + } else { + w.req.Body.Close() + } + } + } + + code := w.status + if bodyAllowedForStatus(code) { + // If no content type, apply sniffing algorithm to body. + _, haveType := header["Content-Type"] + if !haveType { + setHeader.contentType = DetectContentType(p) + } + } else { + for _, k := range suppressedHeaders(code) { + delHeader(k) + } + } + + if _, ok := header["Date"]; !ok { + setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) + } + + te := header.get("Transfer-Encoding") + hasTE := te != "" + if hasCL && hasTE && te != "identity" { + // TODO: return an error if WriteHeader gets a return parameter + // For now just ignore the Content-Length. + w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", + te, w.contentLength) + delHeader("Content-Length") + hasCL = false + } + + if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { + // do nothing + } else if code == StatusNoContent { + delHeader("Transfer-Encoding") + } else if hasCL { + delHeader("Transfer-Encoding") + } else if w.req.ProtoAtLeast(1, 1) { + // HTTP/1.1 or greater: use chunked transfer encoding + // to avoid closing the connection at EOF. + // TODO: this blows away any custom or stacked Transfer-Encoding they + // might have set. Deal with that as need arises once we have a valid + // use case. + cw.chunking = true + setHeader.transferEncoding = "chunked" + } else { + // HTTP version < 1.1: cannot do chunked transfer + // encoding and we don't know the Content-Length so + // signal EOF by closing connection. + w.closeAfterReply = true + delHeader("Transfer-Encoding") // in case already set + } + + // Cannot use Content-Length with non-identity Transfer-Encoding. + if cw.chunking { + delHeader("Content-Length") + } + if !w.req.ProtoAtLeast(1, 0) { + return + } + + if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { + delHeader("Connection") + if w.req.ProtoAtLeast(1, 1) { + setHeader.connection = "close" + } + } + + w.conn.buf.WriteString(statusLine(w.req, code)) + cw.header.WriteSubset(w.conn.buf, excludeHeader) + setHeader.Write(w.conn.buf.Writer) + w.conn.buf.Write(crlf) +} + +// statusLines is a cache of Status-Line strings, keyed by code (for +// HTTP/1.1) or negative code (for HTTP/1.0). This is faster than a +// map keyed by struct of two fields. This map's max size is bounded +// by 2*len(statusText), two protocol types for each known official +// status code in the statusText map. +var ( + statusMu sync.RWMutex + statusLines = make(map[int]string) +) + +// statusLine returns a response Status-Line (RFC 2616 Section 6.1) +// for the given request and response status code. +func statusLine(req *Request, code int) string { + // Fast path: + key := code + proto11 := req.ProtoAtLeast(1, 1) + if !proto11 { + key = -key + } + statusMu.RLock() + line, ok := statusLines[key] + statusMu.RUnlock() + if ok { + return line + } + + // Slow path: + proto := "HTTP/1.0" + if proto11 { + proto = "HTTP/1.1" + } + codestring := strconv.Itoa(code) + text, ok := statusText[code] + if !ok { + text = "status code " + codestring + } + line = proto + " " + codestring + " " + text + "\r\n" + if ok { + statusMu.Lock() + defer statusMu.Unlock() + statusLines[key] = line + } + return line +} + +// bodyAllowed returns true if a Write is allowed for this response type. +// It's illegal to call this before the header has been flushed. +func (w *response) bodyAllowed() bool { + if !w.wroteHeader { + panic("") + } + return bodyAllowedForStatus(w.status) +} + +// The Life Of A Write is like this: +// +// Handler starts. No header has been sent. The handler can either +// write a header, or just start writing. Writing before sending a header +// sends an implicitly empty 200 OK header. +// +// If the handler didn't declare a Content-Length up front, we either +// go into chunking mode or, if the handler finishes running before +// the chunking buffer size, we compute a Content-Length and send that +// in the header instead. +// +// Likewise, if the handler didn't set a Content-Type, we sniff that +// from the initial chunk of output. +// +// The Writers are wired together like: +// +// 1. *response (the ResponseWriter) -> +// 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes +// 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) +// and which writes the chunk headers, if needed. +// 4. conn.buf, a bufio.Writer of default (4kB) bytes +// 5. the rwc, the net.Conn. +// +// TODO(bradfitz): short-circuit some of the buffering when the +// initial header contains both a Content-Type and Content-Length. +// Also short-circuit in (1) when the header's been sent and not in +// chunking mode, writing directly to (4) instead, if (2) has no +// buffered data. More generally, we could short-circuit from (1) to +// (3) even in chunking mode if the write size from (1) is over some +// threshold and nothing is in (2). The answer might be mostly making +// bufferBeforeChunkingSize smaller and having bufio's fast-paths deal +// with this instead. +func (w *response) Write(data []byte) (n int, err error) { + return w.write(len(data), data, "") +} + +func (w *response) WriteString(data string) (n int, err error) { + return w.write(len(data), nil, data) +} + +// either dataB or dataS is non-zero. +func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { + if w.conn.hijacked() { + w.conn.server.logf("http: response.Write on hijacked connection") + return 0, ErrHijacked + } + if !w.wroteHeader { + w.WriteHeader(StatusOK) + } + if lenData == 0 { + return 0, nil + } + if !w.bodyAllowed() { + return 0, ErrBodyNotAllowed + } + + w.written += int64(lenData) // ignoring errors, for errorKludge + if w.contentLength != -1 && w.written > w.contentLength { + return 0, ErrContentLength + } + if dataB != nil { + return w.w.Write(dataB) + } else { + return w.w.WriteString(dataS) + } +} + +func (w *response) finishRequest() { + w.handlerDone = true + + if !w.wroteHeader { + w.WriteHeader(StatusOK) + } + + w.w.Flush() + putBufioWriter(w.w) + w.cw.close() + w.conn.buf.Flush() + + // Close the body (regardless of w.closeAfterReply) so we can + // re-use its bufio.Reader later safely. + w.req.Body.Close() + + if w.req.MultipartForm != nil { + w.req.MultipartForm.RemoveAll() + } + + if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { + // Did not write enough. Avoid getting out of sync. + w.closeAfterReply = true + } +} + +func (w *response) Flush() { + if !w.wroteHeader { + w.WriteHeader(StatusOK) + } + w.w.Flush() + w.cw.flush() +} + +func (c *conn) finalFlush() { + if c.buf != nil { + c.buf.Flush() + + // Steal the bufio.Reader (~4KB worth of memory) and its associated + // reader for a future connection. + putBufioReader(c.buf.Reader) + + // Steal the bufio.Writer (~4KB worth of memory) and its associated + // writer for a future connection. + putBufioWriter(c.buf.Writer) + + c.buf = nil + } +} + +// Close the connection. +func (c *conn) close() { + c.finalFlush() + if c.rwc != nil { + c.rwc.Close() + c.rwc = nil + } +} + +// rstAvoidanceDelay is the amount of time we sleep after closing the +// write side of a TCP connection before closing the entire socket. +// By sleeping, we increase the chances that the client sees our FIN +// and processes its final data before they process the subsequent RST +// from closing a connection with known unread data. +// This RST seems to occur mostly on BSD systems. (And Windows?) +// This timeout is somewhat arbitrary (~latency around the planet). +const rstAvoidanceDelay = 500 * time.Millisecond + +// closeWrite flushes any outstanding data and sends a FIN packet (if +// client is connected via TCP), signalling that we're done. We then +// pause for a bit, hoping the client processes it before `any +// subsequent RST. +// +// See http://golang.org/issue/3595 +func (c *conn) closeWriteAndWait() { + c.finalFlush() + if tcp, ok := c.rwc.(*net.TCPConn); ok { + tcp.CloseWrite() + } + time.Sleep(rstAvoidanceDelay) +} + +// validNPN reports whether the proto is not a blacklisted Next +// Protocol Negotiation protocol. Empty and built-in protocol types +// are blacklisted and can't be overridden with alternate +// implementations. +func validNPN(proto string) bool { + switch proto { + case "", "http/1.1", "http/1.0": + return false + } + return true +} + +func (c *conn) setState(nc net.Conn, state ConnState) { + if hook := c.server.ConnState; hook != nil { + hook(nc, state) + } +} + +// Serve a new connection. +func (c *conn) serve() { + origConn := c.rwc // copy it before it's set nil on Close or Hijack + defer func() { + if err := recover(); err != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) + } + if !c.hijacked() { + c.close() + c.setState(origConn, StateClosed) + } + }() + + if tlsConn, ok := c.rwc.(*tls.Conn); ok { + if d := c.server.ReadTimeout; d != 0 { + c.rwc.SetReadDeadline(time.Now().Add(d)) + } + if d := c.server.WriteTimeout; d != 0 { + c.rwc.SetWriteDeadline(time.Now().Add(d)) + } + if err := tlsConn.Handshake(); err != nil { + c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) + return + } + c.tlsState = new(tls.ConnectionState) + *c.tlsState = tlsConn.ConnectionState() + if proto := c.tlsState.NegotiatedProtocol; validNPN(proto) { + if fn := c.server.TLSNextProto[proto]; fn != nil { + h := initNPNRequest{tlsConn, serverHandler{c.server}} + fn(c.server, tlsConn, h) + } + return + } + } + + for { + w, err := c.readRequest() + if c.lr.N != c.server.initialLimitedReaderSize() { + // If we read any bytes off the wire, we're active. + c.setState(c.rwc, StateActive) + } + if err != nil { + if err == errTooLarge { + // Their HTTP client may or may not be + // able to read this if we're + // responding to them and hanging up + // while they're still writing their + // request. Undefined behavior. + io.WriteString(c.rwc, "HTTP/1.1 413 Request Entity Too Large\r\n\r\n") + c.closeWriteAndWait() + break + } else if err == io.EOF { + break // Don't reply + } else if neterr, ok := err.(net.Error); ok && neterr.Timeout() { + break // Don't reply + } + io.WriteString(c.rwc, "HTTP/1.1 400 Bad Request\r\n\r\n") + break + } + + // Expect 100 Continue support + req := w.req + if req.expectsContinue() { + if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { + // Wrap the Body reader with one that replies on the connection + req.Body = &expectContinueReader{readCloser: req.Body, resp: w} + } + req.Header.Del("Expect") + } else if req.Header.get("Expect") != "" { + w.sendExpectationFailed() + break + } + + // HTTP cannot have multiple simultaneous active requests.[*] + // Until the server replies to this request, it can't read another, + // so we might as well run the handler in this goroutine. + // [*] Not strictly true: HTTP pipelining. We could let them all process + // in parallel even if their responses need to be serialized. + serverHandler{c.server}.ServeHTTP(w, w.req) + if c.hijacked() { + return + } + w.finishRequest() + if w.closeAfterReply { + if w.requestBodyLimitHit { + c.closeWriteAndWait() + } + break + } + c.setState(c.rwc, StateIdle) + } +} + +func (w *response) sendExpectationFailed() { + // TODO(bradfitz): let ServeHTTP handlers handle + // requests with non-standard expectation[s]? Seems + // theoretical at best, and doesn't fit into the + // current ServeHTTP model anyway. We'd need to + // make the ResponseWriter an optional + // "ExpectReplier" interface or something. + // + // For now we'll just obey RFC 2616 14.20 which says + // "If a server receives a request containing an + // Expect field that includes an expectation- + // extension that it does not support, it MUST + // respond with a 417 (Expectation Failed) status." + w.Header().Set("Connection", "close") + w.WriteHeader(StatusExpectationFailed) + w.finishRequest() +} + +// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter +// and a Hijacker. +func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { + if w.wroteHeader { + w.cw.flush() + } + // Release the bufioWriter that writes to the chunk writer, it is not + // used after a connection has been hijacked. + rwc, buf, err = w.conn.hijack() + if err == nil { + putBufioWriter(w.w) + w.w = nil + } + return rwc, buf, err +} + +func (w *response) CloseNotify() <-chan bool { + return w.conn.closeNotify() +} + +// The HandlerFunc type is an adapter to allow the use of +// ordinary functions as HTTP handlers. If f is a function +// with the appropriate signature, HandlerFunc(f) is a +// Handler object that calls f. +type HandlerFunc func(ResponseWriter, *Request) + +// ServeHTTP calls f(w, r). +func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { + f(w, r) +} + +// Helper handlers + +// Error replies to the request with the specified error message and HTTP code. +// The error message should be plain text. +func Error(w ResponseWriter, error string, code int) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.WriteHeader(code) + fmt.Fprintln(w, error) +} + +// NotFound replies to the request with an HTTP 404 not found error. +func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } + +// NotFoundHandler returns a simple request handler +// that replies to each request with a ``404 page not found'' reply. +func NotFoundHandler() Handler { return HandlerFunc(NotFound) } + +// StripPrefix returns a handler that serves HTTP requests +// by removing the given prefix from the request URL's Path +// and invoking the handler h. StripPrefix handles a +// request for a path that doesn't begin with prefix by +// replying with an HTTP 404 not found error. +func StripPrefix(prefix string, h Handler) Handler { + if prefix == "" { + return h + } + return HandlerFunc(func(w ResponseWriter, r *Request) { + if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { + r.URL.Path = p + h.ServeHTTP(w, r) + } else { + NotFound(w, r) + } + }) +} + +// Redirect replies to the request with a redirect to url, +// which may be a path relative to the request path. +func Redirect(w ResponseWriter, r *Request, urlStr string, code int) { + if u, err := url.Parse(urlStr); err == nil { + // If url was relative, make absolute by + // combining with request path. + // The browser would probably do this for us, + // but doing it ourselves is more reliable. + + // NOTE(rsc): RFC 2616 says that the Location + // line must be an absolute URI, like + // "http://www.google.com/redirect/", + // not a path like "/redirect/". + // Unfortunately, we don't know what to + // put in the host name section to get the + // client to connect to us again, so we can't + // know the right absolute URI to send back. + // Because of this problem, no one pays attention + // to the RFC; they all send back just a new path. + // So do we. + oldpath := r.URL.Path + if oldpath == "" { // should not happen, but avoid a crash if it does + oldpath = "/" + } + if u.Scheme == "" { + // no leading http://server + if urlStr == "" || urlStr[0] != '/' { + // make relative path absolute + olddir, _ := path.Split(oldpath) + urlStr = olddir + urlStr + } + + var query string + if i := strings.Index(urlStr, "?"); i != -1 { + urlStr, query = urlStr[:i], urlStr[i:] + } + + // clean up but preserve trailing slash + trailing := strings.HasSuffix(urlStr, "/") + urlStr = path.Clean(urlStr) + if trailing && !strings.HasSuffix(urlStr, "/") { + urlStr += "/" + } + urlStr += query + } + } + + w.Header().Set("Location", urlStr) + w.WriteHeader(code) + + // RFC2616 recommends that a short note "SHOULD" be included in the + // response because older user agents may not understand 301/307. + // Shouldn't send the response for POST or HEAD; that leaves GET. + if r.Method == "GET" { + note := "" + statusText[code] + ".\n" + fmt.Fprintln(w, note) + } +} + +var htmlReplacer = strings.NewReplacer( + "&", "&", + "<", "<", + ">", ">", + // """ is shorter than """. + `"`, """, + // "'" is shorter than "'" and apos was not in HTML until HTML5. + "'", "'", +) + +func htmlEscape(s string) string { + return htmlReplacer.Replace(s) +} + +// Redirect to a fixed URL +type redirectHandler struct { + url string + code int +} + +func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { + Redirect(w, r, rh.url, rh.code) +} + +// RedirectHandler returns a request handler that redirects +// each request it receives to the given url using the given +// status code. +func RedirectHandler(url string, code int) Handler { + return &redirectHandler{url, code} +} + +// ServeMux is an HTTP request multiplexer. +// It matches the URL of each incoming request against a list of registered +// patterns and calls the handler for the pattern that +// most closely matches the URL. +// +// Patterns name fixed, rooted paths, like "/favicon.ico", +// or rooted subtrees, like "/images/" (note the trailing slash). +// Longer patterns take precedence over shorter ones, so that +// if there are handlers registered for both "/images/" +// and "/images/thumbnails/", the latter handler will be +// called for paths beginning "/images/thumbnails/" and the +// former will receive requests for any other paths in the +// "/images/" subtree. +// +// Note that since a pattern ending in a slash names a rooted subtree, +// the pattern "/" matches all paths not matched by other registered +// patterns, not just the URL with Path == "/". +// +// Patterns may optionally begin with a host name, restricting matches to +// URLs on that host only. Host-specific patterns take precedence over +// general patterns, so that a handler might register for the two patterns +// "/codesearch" and "codesearch.google.com/" without also taking over +// requests for "http://www.google.com/". +// +// ServeMux also takes care of sanitizing the URL request path, +// redirecting any request containing . or .. elements to an +// equivalent .- and ..-free URL. +type ServeMux struct { + mu sync.RWMutex + m map[string]muxEntry + hosts bool // whether any patterns contain hostnames +} + +type muxEntry struct { + explicit bool + h Handler + pattern string +} + +// NewServeMux allocates and returns a new ServeMux. +func NewServeMux() *ServeMux { return &ServeMux{m: make(map[string]muxEntry)} } + +// DefaultServeMux is the default ServeMux used by Serve. +var DefaultServeMux = NewServeMux() + +// Does path match pattern? +func pathMatch(pattern, path string) bool { + if len(pattern) == 0 { + // should not happen + return false + } + n := len(pattern) + if pattern[n-1] != '/' { + return pattern == path + } + return len(path) >= n && path[0:n] == pattern +} + +// Return the canonical path for p, eliminating . and .. elements. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} + +// Find a handler on a handler map given a path string +// Most-specific (longest) pattern wins +func (mux *ServeMux) match(path string) (h Handler, pattern string) { + var n = 0 + for k, v := range mux.m { + if !pathMatch(k, path) { + continue + } + if h == nil || len(k) > n { + n = len(k) + h = v.h + pattern = v.pattern + } + } + return +} + +// Handler returns the handler to use for the given request, +// consulting r.Method, r.Host, and r.URL.Path. It always returns +// a non-nil handler. If the path is not in its canonical form, the +// handler will be an internally-generated handler that redirects +// to the canonical path. +// +// Handler also returns the registered pattern that matches the +// request or, in the case of internally-generated redirects, +// the pattern that will match after following the redirect. +// +// If there is no registered handler that applies to the request, +// Handler returns a ``page not found'' handler and an empty pattern. +func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) { + if r.Method != "CONNECT" { + if p := cleanPath(r.URL.Path); p != r.URL.Path { + _, pattern = mux.handler(r.Host, p) + url := *r.URL + url.Path = p + return RedirectHandler(url.String(), StatusMovedPermanently), pattern + } + } + + return mux.handler(r.Host, r.URL.Path) +} + +// handler is the main implementation of Handler. +// The path is known to be in canonical form, except for CONNECT methods. +func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) { + mux.mu.RLock() + defer mux.mu.RUnlock() + + // Host-specific pattern takes precedence over generic ones + if mux.hosts { + h, pattern = mux.match(host + path) + } + if h == nil { + h, pattern = mux.match(path) + } + if h == nil { + h, pattern = NotFoundHandler(), "" + } + return +} + +// ServeHTTP dispatches the request to the handler whose +// pattern most closely matches the request URL. +func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { + if r.RequestURI == "*" { + if r.ProtoAtLeast(1, 1) { + w.Header().Set("Connection", "close") + } + w.WriteHeader(StatusBadRequest) + return + } + h, _ := mux.Handler(r) + h.ServeHTTP(w, r) +} + +// Handle registers the handler for the given pattern. +// If a handler already exists for pattern, Handle panics. +func (mux *ServeMux) Handle(pattern string, handler Handler) { + mux.mu.Lock() + defer mux.mu.Unlock() + + if pattern == "" { + panic("http: invalid pattern " + pattern) + } + if handler == nil { + panic("http: nil handler") + } + if mux.m[pattern].explicit { + panic("http: multiple registrations for " + pattern) + } + + mux.m[pattern] = muxEntry{explicit: true, h: handler, pattern: pattern} + + if pattern[0] != '/' { + mux.hosts = true + } + + // Helpful behavior: + // If pattern is /tree/, insert an implicit permanent redirect for /tree. + // It can be overridden by an explicit registration. + n := len(pattern) + if n > 0 && pattern[n-1] == '/' && !mux.m[pattern[0:n-1]].explicit { + // If pattern contains a host name, strip it and use remaining + // path for redirect. + path := pattern + if pattern[0] != '/' { + // In pattern, at least the last character is a '/', so + // strings.Index can't be -1. + path = pattern[strings.Index(pattern, "/"):] + } + mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(path, StatusMovedPermanently), pattern: pattern} + } +} + +// HandleFunc registers the handler function for the given pattern. +func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { + mux.Handle(pattern, HandlerFunc(handler)) +} + +// Handle registers the handler for the given pattern +// in the DefaultServeMux. +// The documentation for ServeMux explains how patterns are matched. +func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } + +// HandleFunc registers the handler function for the given pattern +// in the DefaultServeMux. +// The documentation for ServeMux explains how patterns are matched. +func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { + DefaultServeMux.HandleFunc(pattern, handler) +} + +// Serve accepts incoming HTTP connections on the listener l, +// creating a new service goroutine for each. The service goroutines +// read requests and then call handler to reply to them. +// Handler is typically nil, in which case the DefaultServeMux is used. +func Serve(l net.Listener, handler Handler) error { + srv := &Server{Handler: handler} + return srv.Serve(l) +} + +// A Server defines parameters for running an HTTP server. +// The zero value for Server is a valid configuration. +type Server struct { + Addr string // TCP address to listen on, ":http" if empty + Handler Handler // handler to invoke, http.DefaultServeMux if nil + ReadTimeout time.Duration // maximum duration before timing out read of the request + WriteTimeout time.Duration // maximum duration before timing out write of the response + MaxHeaderBytes int // maximum size of request headers, DefaultMaxHeaderBytes if 0 + TLSConfig *tls.Config // optional TLS config, used by ListenAndServeTLS + + // TLSNextProto optionally specifies a function to take over + // ownership of the provided TLS connection when an NPN + // protocol upgrade has occurred. The map key is the protocol + // name negotiated. The Handler argument should be used to + // handle HTTP requests and will initialize the Request's TLS + // and RemoteAddr if not already set. The connection is + // automatically closed when the function returns. + TLSNextProto map[string]func(*Server, *tls.Conn, Handler) + + // ConnState specifies an optional callback function that is + // called when a client connection changes state. See the + // ConnState type and associated constants for details. + ConnState func(net.Conn, ConnState) + + // ErrorLog specifies an optional logger for errors accepting + // connections and unexpected behavior from handlers. + // If nil, logging goes to os.Stderr via the log package's + // standard logger. + ErrorLog *log.Logger + + disableKeepAlives int32 // accessed atomically. +} + +// A ConnState represents the state of a client connection to a server. +// It's used by the optional Server.ConnState hook. +type ConnState int + +const ( + // StateNew represents a new connection that is expected to + // send a request immediately. Connections begin at this + // state and then transition to either StateActive or + // StateClosed. + StateNew ConnState = iota + + // StateActive represents a connection that has read 1 or more + // bytes of a request. The Server.ConnState hook for + // StateActive fires before the request has entered a handler + // and doesn't fire again until the request has been + // handled. After the request is handled, the state + // transitions to StateClosed, StateHijacked, or StateIdle. + StateActive + + // StateIdle represents a connection that has finished + // handling a request and is in the keep-alive state, waiting + // for a new request. Connections transition from StateIdle + // to either StateActive or StateClosed. + StateIdle + + // StateHijacked represents a hijacked connection. + // This is a terminal state. It does not transition to StateClosed. + StateHijacked + + // StateClosed represents a closed connection. + // This is a terminal state. Hijacked connections do not + // transition to StateClosed. + StateClosed +) + +var stateName = map[ConnState]string{ + StateNew: "new", + StateActive: "active", + StateIdle: "idle", + StateHijacked: "hijacked", + StateClosed: "closed", +} + +func (c ConnState) String() string { + return stateName[c] +} + +// serverHandler delegates to either the server's Handler or +// DefaultServeMux and also handles "OPTIONS *" requests. +type serverHandler struct { + srv *Server +} + +func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { + handler := sh.srv.Handler + if handler == nil { + handler = DefaultServeMux + } + if req.RequestURI == "*" && req.Method == "OPTIONS" { + handler = globalOptionsHandler{} + } + handler.ServeHTTP(rw, req) +} + +// ListenAndServe listens on the TCP network address srv.Addr and then +// calls Serve to handle requests on incoming connections. If +// srv.Addr is blank, ":http" is used. +func (srv *Server) ListenAndServe() error { + addr := srv.Addr + if addr == "" { + addr = ":http" + } + ln, err := net.Listen("tcp", addr) + if err != nil { + return err + } + return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}) +} + +// Serve accepts incoming connections on the Listener l, creating a +// new service goroutine for each. The service goroutines read requests and +// then call srv.Handler to reply to them. +func (srv *Server) Serve(l net.Listener) error { + defer l.Close() + var tempDelay time.Duration // how long to sleep on accept failure + for { + rw, e := l.Accept() + if e != nil { + if ne, ok := e.(net.Error); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + srv.logf("http: Accept error: %v; retrying in %v", e, tempDelay) + time.Sleep(tempDelay) + continue + } + return e + } + tempDelay = 0 + c, err := srv.newConn(rw) + if err != nil { + continue + } + c.setState(c.rwc, StateNew) // before Serve can return + go c.serve() + } +} + +func (s *Server) doKeepAlives() bool { + return atomic.LoadInt32(&s.disableKeepAlives) == 0 +} + +// SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled. +// By default, keep-alives are always enabled. Only very +// resource-constrained environments or servers in the process of +// shutting down should disable them. +func (s *Server) SetKeepAlivesEnabled(v bool) { + if v { + atomic.StoreInt32(&s.disableKeepAlives, 0) + } else { + atomic.StoreInt32(&s.disableKeepAlives, 1) + } +} + +func (s *Server) logf(format string, args ...interface{}) { + if s.ErrorLog != nil { + s.ErrorLog.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// ListenAndServe listens on the TCP network address addr +// and then calls Serve with handler to handle requests +// on incoming connections. Handler is typically nil, +// in which case the DefaultServeMux is used. +// +// A trivial example server is: +// +// package main +// +// import ( +// "io" +// "net/http" +// "log" +// ) +// +// // hello world, the web server +// func HelloServer(w http.ResponseWriter, req *http.Request) { +// io.WriteString(w, "hello, world!\n") +// } +// +// func main() { +// http.HandleFunc("/hello", HelloServer) +// err := http.ListenAndServe(":12345", nil) +// if err != nil { +// log.Fatal("ListenAndServe: ", err) +// } +// } +func ListenAndServe(addr string, handler Handler) error { + server := &Server{Addr: addr, Handler: handler} + return server.ListenAndServe() +} + +// ListenAndServeTLS acts identically to ListenAndServe, except that it +// expects HTTPS connections. Additionally, files containing a certificate and +// matching private key for the server must be provided. If the certificate +// is signed by a certificate authority, the certFile should be the concatenation +// of the server's certificate followed by the CA's certificate. +// +// A trivial example server is: +// +// import ( +// "log" +// "net/http" +// ) +// +// func handler(w http.ResponseWriter, req *http.Request) { +// w.Header().Set("Content-Type", "text/plain") +// w.Write([]byte("This is an example server.\n")) +// } +// +// func main() { +// http.HandleFunc("/", handler) +// log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/") +// err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil) +// if err != nil { +// log.Fatal(err) +// } +// } +// +// One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem. +func ListenAndServeTLS(addr string, certFile string, keyFile string, handler Handler) error { + server := &Server{Addr: addr, Handler: handler} + return server.ListenAndServeTLS(certFile, keyFile) +} + +// ListenAndServeTLS listens on the TCP network address srv.Addr and +// then calls Serve to handle requests on incoming TLS connections. +// +// Filenames containing a certificate and matching private key for +// the server must be provided. If the certificate is signed by a +// certificate authority, the certFile should be the concatenation +// of the server's certificate followed by the CA's certificate. +// +// If srv.Addr is blank, ":https" is used. +func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { + addr := srv.Addr + if addr == "" { + addr = ":https" + } + config := &tls.Config{} + if srv.TLSConfig != nil { + *config = *srv.TLSConfig + } + if config.NextProtos == nil { + config.NextProtos = []string{"http/1.1"} + } + + var err error + config.Certificates = make([]tls.Certificate, 1) + config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return err + } + + ln, err := net.Listen("tcp", addr) + if err != nil { + return err + } + + tlsListener := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, config) + return srv.Serve(tlsListener) +} + +// TimeoutHandler returns a Handler that runs h with the given time limit. +// +// The new Handler calls h.ServeHTTP to handle each request, but if a +// call runs for longer than its time limit, the handler responds with +// a 503 Service Unavailable error and the given message in its body. +// (If msg is empty, a suitable default message will be sent.) +// After such a timeout, writes by h to its ResponseWriter will return +// ErrHandlerTimeout. +func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { + f := func() <-chan time.Time { + return time.After(dt) + } + return &timeoutHandler{h, f, msg} +} + +// ErrHandlerTimeout is returned on ResponseWriter Write calls +// in handlers which have timed out. +var ErrHandlerTimeout = errors.New("http: Handler timeout") + +type timeoutHandler struct { + handler Handler + timeout func() <-chan time.Time // returns channel producing a timeout + body string +} + +func (h *timeoutHandler) errorBody() string { + if h.body != "" { + return h.body + } + return "Timeout

Timeout

" +} + +func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { + done := make(chan bool, 1) + tw := &timeoutWriter{w: w} + go func() { + h.handler.ServeHTTP(tw, r) + done <- true + }() + select { + case <-done: + return + case <-h.timeout(): + tw.mu.Lock() + defer tw.mu.Unlock() + if !tw.wroteHeader { + tw.w.WriteHeader(StatusServiceUnavailable) + tw.w.Write([]byte(h.errorBody())) + } + tw.timedOut = true + } +} + +type timeoutWriter struct { + w ResponseWriter + + mu sync.Mutex + timedOut bool + wroteHeader bool +} + +func (tw *timeoutWriter) Header() Header { + return tw.w.Header() +} + +func (tw *timeoutWriter) Write(p []byte) (int, error) { + tw.mu.Lock() + timedOut := tw.timedOut + tw.mu.Unlock() + if timedOut { + return 0, ErrHandlerTimeout + } + return tw.w.Write(p) +} + +func (tw *timeoutWriter) WriteHeader(code int) { + tw.mu.Lock() + if tw.timedOut || tw.wroteHeader { + tw.mu.Unlock() + return + } + tw.wroteHeader = true + tw.mu.Unlock() + tw.w.WriteHeader(code) +} + +// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +// globalOptionsHandler responds to "OPTIONS *" requests. +type globalOptionsHandler struct{} + +func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) { + w.Header().Set("Content-Length", "0") + if r.ContentLength != 0 { + // Read up to 4KB of OPTIONS body (as mentioned in the + // spec as being reserved for future use), but anything + // over that is considered a waste of server resources + // (or an attack) and we abort and close the connection, + // courtesy of MaxBytesReader's EOF behavior. + mb := MaxBytesReader(w, r.Body, 4<<10) + io.Copy(ioutil.Discard, mb) + } +} + +type eofReaderWithWriteTo struct{} + +func (eofReaderWithWriteTo) WriteTo(io.Writer) (int64, error) { return 0, nil } +func (eofReaderWithWriteTo) Read([]byte) (int, error) { return 0, io.EOF } + +// eofReader is a non-nil io.ReadCloser that always returns EOF. +// It has a WriteTo method so io.Copy won't need a buffer. +var eofReader = &struct { + eofReaderWithWriteTo + io.Closer +}{ + eofReaderWithWriteTo{}, + ioutil.NopCloser(nil), +} + +// Verify that an io.Copy from an eofReader won't require a buffer. +var _ io.WriterTo = eofReader + +// initNPNRequest is an HTTP handler that initializes certain +// uninitialized fields in its *Request. Such partially-initialized +// Requests come from NPN protocol handlers. +type initNPNRequest struct { + c *tls.Conn + h serverHandler +} + +func (h initNPNRequest) ServeHTTP(rw ResponseWriter, req *Request) { + if req.TLS == nil { + req.TLS = &tls.ConnectionState{} + *req.TLS = h.c.ConnectionState() + } + if req.Body == nil { + req.Body = eofReader + } + if req.RemoteAddr == "" { + req.RemoteAddr = h.c.RemoteAddr().String() + } + h.h.ServeHTTP(rw, req) +} + +// loggingConn is used for debugging. +type loggingConn struct { + name string + net.Conn +} + +var ( + uniqNameMu sync.Mutex + uniqNameNext = make(map[string]int) +) + +func newLoggingConn(baseName string, c net.Conn) net.Conn { + uniqNameMu.Lock() + defer uniqNameMu.Unlock() + uniqNameNext[baseName]++ + return &loggingConn{ + name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]), + Conn: c, + } +} + +func (c *loggingConn) Write(p []byte) (n int, err error) { + log.Printf("%s.Write(%d) = ....", c.name, len(p)) + n, err = c.Conn.Write(p) + log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err) + return +} + +func (c *loggingConn) Read(p []byte) (n int, err error) { + log.Printf("%s.Read(%d) = ....", c.name, len(p)) + n, err = c.Conn.Read(p) + log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err) + return +} + +func (c *loggingConn) Close() (err error) { + log.Printf("%s.Close() = ...", c.name) + err = c.Conn.Close() + log.Printf("%s.Close() = %v", c.name, err) + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/sniff.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/sniff.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/sniff.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,214 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bytes" + "encoding/binary" +) + +// The algorithm uses at most sniffLen bytes to make its decision. +const sniffLen = 512 + +// DetectContentType implements the algorithm described +// at http://mimesniff.spec.whatwg.org/ to determine the +// Content-Type of the given data. It considers at most the +// first 512 bytes of data. DetectContentType always returns +// a valid MIME type: if it cannot determine a more specific one, it +// returns "application/octet-stream". +func DetectContentType(data []byte) string { + if len(data) > sniffLen { + data = data[:sniffLen] + } + + // Index of the first non-whitespace byte in data. + firstNonWS := 0 + for ; firstNonWS < len(data) && isWS(data[firstNonWS]); firstNonWS++ { + } + + for _, sig := range sniffSignatures { + if ct := sig.match(data, firstNonWS); ct != "" { + return ct + } + } + + return "application/octet-stream" // fallback +} + +func isWS(b byte) bool { + return bytes.IndexByte([]byte("\t\n\x0C\r "), b) != -1 +} + +type sniffSig interface { + // match returns the MIME type of the data, or "" if unknown. + match(data []byte, firstNonWS int) string +} + +// Data matching the table in section 6. +var sniffSignatures = []sniffSig{ + htmlSig("' { + return "" + } + return "text/html; charset=utf-8" +} + +type mp4Sig int + +func (mp4Sig) match(data []byte, firstNonWS int) string { + // c.f. section 6.1. + if len(data) < 8 { + return "" + } + boxSize := int(binary.BigEndian.Uint32(data[:4])) + if boxSize%4 != 0 || len(data) < boxSize { + return "" + } + if !bytes.Equal(data[4:8], []byte("ftyp")) { + return "" + } + for st := 8; st < boxSize; st += 4 { + if st == 12 { + // minor version number + continue + } + seg := string(data[st : st+3]) + switch seg { + case "mp4", "iso", "M4V", "M4P", "M4B": + return "video/mp4" + /* The remainder are not in the spec. + case "M4A": + return "audio/mp4" + case "3gp": + return "video/3gpp" + case "jp2": + return "image/jp2" // JPEG 2000 + */ + } + } + return "" +} + +type textSig int + +func (textSig) match(data []byte, firstNonWS int) string { + // c.f. section 5, step 4. + for _, b := range data[firstNonWS:] { + switch { + case 0x00 <= b && b <= 0x08, + b == 0x0B, + 0x0E <= b && b <= 0x1A, + 0x1C <= b && b <= 0x1F: + return "" + } + } + return "text/plain; charset=utf-8" +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/sniff_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/sniff_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/sniff_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,171 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http_test + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + . "net/http" + "net/http/httptest" + "reflect" + "strconv" + "strings" + "testing" +) + +var sniffTests = []struct { + desc string + data []byte + contentType string +}{ + // Some nonsense. + {"Empty", []byte{}, "text/plain; charset=utf-8"}, + {"Binary", []byte{1, 2, 3}, "application/octet-stream"}, + + {"HTML document #1", []byte(`blah blah blah`), "text/html; charset=utf-8"}, + {"HTML document #2", []byte(``), "text/html; charset=utf-8"}, + {"HTML document #3 (leading whitespace)", []byte(` ...`), "text/html; charset=utf-8"}, + {"HTML document #4 (leading CRLF)", []byte("\r\n..."), "text/html; charset=utf-8"}, + + {"Plain text", []byte(`This is not HTML. It has ☃ though.`), "text/plain; charset=utf-8"}, + + {"XML", []byte("\nhi") + })) + defer ts.Close() + + resp, err := Get(ts.URL) + if err != nil { + t.Fatal(err) + } + + got := resp.Header["Content-Type"] + want := []string{""} + if !reflect.DeepEqual(got, want) { + t.Errorf("Content-Type = %q; want %q", got, want) + } + resp.Body.Close() +} + +func TestContentTypeWithCopy(t *testing.T) { + defer afterTest(t) + + const ( + input = "\n\n\t\n" + expected = "text/html; charset=utf-8" + ) + + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + // Use io.Copy from a bytes.Buffer to trigger ReadFrom. + buf := bytes.NewBuffer([]byte(input)) + n, err := io.Copy(w, buf) + if int(n) != len(input) || err != nil { + t.Errorf("io.Copy(w, %q) = %v, %v want %d, nil", input, n, err, len(input)) + } + })) + defer ts.Close() + + resp, err := Get(ts.URL) + if err != nil { + t.Fatalf("Get: %v", err) + } + if ct := resp.Header.Get("Content-Type"); ct != expected { + t.Errorf("Content-Type = %q, want %q", ct, expected) + } + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("reading body: %v", err) + } else if !bytes.Equal(data, []byte(input)) { + t.Errorf("data is %q, want %q", data, input) + } + resp.Body.Close() +} + +func TestSniffWriteSize(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + size, _ := strconv.Atoi(r.FormValue("size")) + written, err := io.WriteString(w, strings.Repeat("a", size)) + if err != nil { + t.Errorf("write of %d bytes: %v", size, err) + return + } + if written != size { + t.Errorf("write of %d bytes wrote %d bytes", size, written) + } + })) + defer ts.Close() + for _, size := range []int{0, 1, 200, 600, 999, 1000, 1023, 1024, 512 << 10, 1 << 20} { + res, err := Get(fmt.Sprintf("%s/?size=%d", ts.URL, size)) + if err != nil { + t.Fatalf("size %d: %v", size, err) + } + if _, err := io.Copy(ioutil.Discard, res.Body); err != nil { + t.Fatalf("size %d: io.Copy of body = %v", size, err) + } + if err := res.Body.Close(); err != nil { + t.Fatalf("size %d: body Close = %v", size, err) + } + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/status.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/status.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/status.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,120 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +// HTTP status codes, defined in RFC 2616. +const ( + StatusContinue = 100 + StatusSwitchingProtocols = 101 + + StatusOK = 200 + StatusCreated = 201 + StatusAccepted = 202 + StatusNonAuthoritativeInfo = 203 + StatusNoContent = 204 + StatusResetContent = 205 + StatusPartialContent = 206 + + StatusMultipleChoices = 300 + StatusMovedPermanently = 301 + StatusFound = 302 + StatusSeeOther = 303 + StatusNotModified = 304 + StatusUseProxy = 305 + StatusTemporaryRedirect = 307 + + StatusBadRequest = 400 + StatusUnauthorized = 401 + StatusPaymentRequired = 402 + StatusForbidden = 403 + StatusNotFound = 404 + StatusMethodNotAllowed = 405 + StatusNotAcceptable = 406 + StatusProxyAuthRequired = 407 + StatusRequestTimeout = 408 + StatusConflict = 409 + StatusGone = 410 + StatusLengthRequired = 411 + StatusPreconditionFailed = 412 + StatusRequestEntityTooLarge = 413 + StatusRequestURITooLong = 414 + StatusUnsupportedMediaType = 415 + StatusRequestedRangeNotSatisfiable = 416 + StatusExpectationFailed = 417 + StatusTeapot = 418 + + StatusInternalServerError = 500 + StatusNotImplemented = 501 + StatusBadGateway = 502 + StatusServiceUnavailable = 503 + StatusGatewayTimeout = 504 + StatusHTTPVersionNotSupported = 505 + + // New HTTP status codes from RFC 6585. Not exported yet in Go 1.1. + // See discussion at https://codereview.appspot.com/7678043/ + statusPreconditionRequired = 428 + statusTooManyRequests = 429 + statusRequestHeaderFieldsTooLarge = 431 + statusNetworkAuthenticationRequired = 511 +) + +var statusText = map[int]string{ + StatusContinue: "Continue", + StatusSwitchingProtocols: "Switching Protocols", + + StatusOK: "OK", + StatusCreated: "Created", + StatusAccepted: "Accepted", + StatusNonAuthoritativeInfo: "Non-Authoritative Information", + StatusNoContent: "No Content", + StatusResetContent: "Reset Content", + StatusPartialContent: "Partial Content", + + StatusMultipleChoices: "Multiple Choices", + StatusMovedPermanently: "Moved Permanently", + StatusFound: "Found", + StatusSeeOther: "See Other", + StatusNotModified: "Not Modified", + StatusUseProxy: "Use Proxy", + StatusTemporaryRedirect: "Temporary Redirect", + + StatusBadRequest: "Bad Request", + StatusUnauthorized: "Unauthorized", + StatusPaymentRequired: "Payment Required", + StatusForbidden: "Forbidden", + StatusNotFound: "Not Found", + StatusMethodNotAllowed: "Method Not Allowed", + StatusNotAcceptable: "Not Acceptable", + StatusProxyAuthRequired: "Proxy Authentication Required", + StatusRequestTimeout: "Request Timeout", + StatusConflict: "Conflict", + StatusGone: "Gone", + StatusLengthRequired: "Length Required", + StatusPreconditionFailed: "Precondition Failed", + StatusRequestEntityTooLarge: "Request Entity Too Large", + StatusRequestURITooLong: "Request URI Too Long", + StatusUnsupportedMediaType: "Unsupported Media Type", + StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable", + StatusExpectationFailed: "Expectation Failed", + StatusTeapot: "I'm a teapot", + + StatusInternalServerError: "Internal Server Error", + StatusNotImplemented: "Not Implemented", + StatusBadGateway: "Bad Gateway", + StatusServiceUnavailable: "Service Unavailable", + StatusGatewayTimeout: "Gateway Timeout", + StatusHTTPVersionNotSupported: "HTTP Version Not Supported", + + statusPreconditionRequired: "Precondition Required", + statusTooManyRequests: "Too Many Requests", + statusRequestHeaderFieldsTooLarge: "Request Header Fields Too Large", + statusNetworkAuthenticationRequired: "Network Authentication Required", +} + +// StatusText returns a text for the HTTP status code. It returns the empty +// string if the code is unknown. +func StatusText(code int) string { + return statusText[code] +} === added directory 'src/github.com/Azure/azure-sdk-for-go/core/http/testdata' === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/testdata/file' --- src/github.com/Azure/azure-sdk-for-go/core/http/testdata/file 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/testdata/file 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +0123456789 === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/testdata/index.html' --- src/github.com/Azure/azure-sdk-for-go/core/http/testdata/index.html 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/testdata/index.html 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +index.html says hello === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/testdata/style.css' --- src/github.com/Azure/azure-sdk-for-go/core/http/testdata/style.css 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/testdata/style.css 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +body {} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/transfer.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/transfer.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/transfer.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,730 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "net/textproto" + "sort" + "strconv" + "strings" + "sync" +) + +type errorReader struct { + err error +} + +func (r *errorReader) Read(p []byte) (n int, err error) { + return 0, r.err +} + +// transferWriter inspects the fields of a user-supplied Request or Response, +// sanitizes them without changing the user object and provides methods for +// writing the respective header, body and trailer in wire format. +type transferWriter struct { + Method string + Body io.Reader + BodyCloser io.Closer + ResponseToHEAD bool + ContentLength int64 // -1 means unknown, 0 means exactly none + Close bool + TransferEncoding []string + Trailer Header +} + +func newTransferWriter(r interface{}) (t *transferWriter, err error) { + t = &transferWriter{} + + // Extract relevant fields + atLeastHTTP11 := false + switch rr := r.(type) { + case *Request: + if rr.ContentLength != 0 && rr.Body == nil { + return nil, fmt.Errorf("http: Request.ContentLength=%d with nil Body", rr.ContentLength) + } + t.Method = rr.Method + t.Body = rr.Body + t.BodyCloser = rr.Body + t.ContentLength = rr.ContentLength + t.Close = rr.Close + t.TransferEncoding = rr.TransferEncoding + t.Trailer = rr.Trailer + atLeastHTTP11 = rr.ProtoAtLeast(1, 1) + if t.Body != nil && len(t.TransferEncoding) == 0 && atLeastHTTP11 { + if t.ContentLength == 0 { + // Test to see if it's actually zero or just unset. + var buf [1]byte + n, rerr := io.ReadFull(t.Body, buf[:]) + if rerr != nil && rerr != io.EOF { + t.ContentLength = -1 + t.Body = &errorReader{rerr} + } else if n == 1 { + // Oh, guess there is data in this Body Reader after all. + // The ContentLength field just wasn't set. + // Stich the Body back together again, re-attaching our + // consumed byte. + t.ContentLength = -1 + t.Body = io.MultiReader(bytes.NewReader(buf[:]), t.Body) + } else { + // Body is actually empty. + t.Body = nil + t.BodyCloser = nil + } + } + if t.ContentLength < 0 { + t.TransferEncoding = []string{"chunked"} + } + } + case *Response: + if rr.Request != nil { + t.Method = rr.Request.Method + } + t.Body = rr.Body + t.BodyCloser = rr.Body + t.ContentLength = rr.ContentLength + t.Close = rr.Close + t.TransferEncoding = rr.TransferEncoding + t.Trailer = rr.Trailer + atLeastHTTP11 = rr.ProtoAtLeast(1, 1) + t.ResponseToHEAD = noBodyExpected(t.Method) + } + + // Sanitize Body,ContentLength,TransferEncoding + if t.ResponseToHEAD { + t.Body = nil + if chunked(t.TransferEncoding) { + t.ContentLength = -1 + } + } else { + if !atLeastHTTP11 || t.Body == nil { + t.TransferEncoding = nil + } + if chunked(t.TransferEncoding) { + t.ContentLength = -1 + } else if t.Body == nil { // no chunking, no body + t.ContentLength = 0 + } + } + + // Sanitize Trailer + if !chunked(t.TransferEncoding) { + t.Trailer = nil + } + + return t, nil +} + +func noBodyExpected(requestMethod string) bool { + return requestMethod == "HEAD" +} + +func (t *transferWriter) shouldSendContentLength() bool { + if chunked(t.TransferEncoding) { + return false + } + if t.ContentLength > 0 { + return true + } + // Many servers expect a Content-Length for these methods + if t.Method == "POST" || t.Method == "PUT" { + return true + } + if t.ContentLength == 0 && isIdentity(t.TransferEncoding) { + return true + } + + return false +} + +func (t *transferWriter) WriteHeader(w io.Writer) error { + if t.Close { + if _, err := io.WriteString(w, "Connection: close\r\n"); err != nil { + return err + } + } + + // Write Content-Length and/or Transfer-Encoding whose values are a + // function of the sanitized field triple (Body, ContentLength, + // TransferEncoding) + if t.shouldSendContentLength() { + if _, err := io.WriteString(w, "Content-Length: "); err != nil { + return err + } + if _, err := io.WriteString(w, strconv.FormatInt(t.ContentLength, 10)+"\r\n"); err != nil { + return err + } + } else if chunked(t.TransferEncoding) { + if _, err := io.WriteString(w, "Transfer-Encoding: chunked\r\n"); err != nil { + return err + } + } + + // Write Trailer header + if t.Trailer != nil { + keys := make([]string, 0, len(t.Trailer)) + for k := range t.Trailer { + k = CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return &badStringError{"invalid Trailer key", k} + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + // TODO: could do better allocation-wise here, but trailers are rare, + // so being lazy for now. + if _, err := io.WriteString(w, "Trailer: "+strings.Join(keys, ",")+"\r\n"); err != nil { + return err + } + } + } + + return nil +} + +func (t *transferWriter) WriteBody(w io.Writer) error { + var err error + var ncopy int64 + + // Write body + if t.Body != nil { + if chunked(t.TransferEncoding) { + cw := newChunkedWriter(w) + _, err = io.Copy(cw, t.Body) + if err == nil { + err = cw.Close() + } + } else if t.ContentLength == -1 { + ncopy, err = io.Copy(w, t.Body) + } else { + ncopy, err = io.Copy(w, io.LimitReader(t.Body, t.ContentLength)) + if err != nil { + return err + } + var nextra int64 + nextra, err = io.Copy(ioutil.Discard, t.Body) + ncopy += nextra + } + if err != nil { + return err + } + if err = t.BodyCloser.Close(); err != nil { + return err + } + } + + if !t.ResponseToHEAD && t.ContentLength != -1 && t.ContentLength != ncopy { + return fmt.Errorf("http: Request.ContentLength=%d with Body length %d", + t.ContentLength, ncopy) + } + + // TODO(petar): Place trailer writer code here. + if chunked(t.TransferEncoding) { + // Write Trailer header + if t.Trailer != nil { + if err := t.Trailer.Write(w); err != nil { + return err + } + } + // Last chunk, empty trailer + _, err = io.WriteString(w, "\r\n") + } + return err +} + +type transferReader struct { + // Input + Header Header + StatusCode int + RequestMethod string + ProtoMajor int + ProtoMinor int + // Output + Body io.ReadCloser + ContentLength int64 + TransferEncoding []string + Close bool + Trailer Header +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC2616, section 4.4. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +var ( + suppressedHeaders304 = []string{"Content-Type", "Content-Length", "Transfer-Encoding"} + suppressedHeadersNoBody = []string{"Content-Length", "Transfer-Encoding"} +) + +func suppressedHeaders(status int) []string { + switch { + case status == 304: + // RFC 2616 section 10.3.5: "the response MUST NOT include other entity-headers" + return suppressedHeaders304 + case !bodyAllowedForStatus(status): + return suppressedHeadersNoBody + } + return nil +} + +// msg is *Request or *Response. +func readTransfer(msg interface{}, r *bufio.Reader) (err error) { + t := &transferReader{RequestMethod: "GET"} + + // Unify input + isResponse := false + switch rr := msg.(type) { + case *Response: + t.Header = rr.Header + t.StatusCode = rr.StatusCode + t.ProtoMajor = rr.ProtoMajor + t.ProtoMinor = rr.ProtoMinor + t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header) + isResponse = true + if rr.Request != nil { + t.RequestMethod = rr.Request.Method + } + case *Request: + t.Header = rr.Header + t.ProtoMajor = rr.ProtoMajor + t.ProtoMinor = rr.ProtoMinor + // Transfer semantics for Requests are exactly like those for + // Responses with status code 200, responding to a GET method + t.StatusCode = 200 + default: + panic("unexpected type") + } + + // Default to HTTP/1.1 + if t.ProtoMajor == 0 && t.ProtoMinor == 0 { + t.ProtoMajor, t.ProtoMinor = 1, 1 + } + + // Transfer encoding, content length + t.TransferEncoding, err = fixTransferEncoding(t.RequestMethod, t.Header) + if err != nil { + return err + } + + realLength, err := fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.TransferEncoding) + if err != nil { + return err + } + if isResponse && t.RequestMethod == "HEAD" { + if n, err := parseContentLength(t.Header.get("Content-Length")); err != nil { + return err + } else { + t.ContentLength = n + } + } else { + t.ContentLength = realLength + } + + // Trailer + t.Trailer, err = fixTrailer(t.Header, t.TransferEncoding) + if err != nil { + return err + } + + // If there is no Content-Length or chunked Transfer-Encoding on a *Response + // and the status is not 1xx, 204 or 304, then the body is unbounded. + // See RFC2616, section 4.4. + switch msg.(type) { + case *Response: + if realLength == -1 && + !chunked(t.TransferEncoding) && + bodyAllowedForStatus(t.StatusCode) { + // Unbounded body. + t.Close = true + } + } + + // Prepare body reader. ContentLength < 0 means chunked encoding + // or close connection when finished, since multipart is not supported yet + switch { + case chunked(t.TransferEncoding): + if noBodyExpected(t.RequestMethod) { + t.Body = eofReader + } else { + t.Body = &body{src: newChunkedReader(r), hdr: msg, r: r, closing: t.Close} + } + case realLength == 0: + t.Body = eofReader + case realLength > 0: + t.Body = &body{src: io.LimitReader(r, realLength), closing: t.Close} + default: + // realLength < 0, i.e. "Content-Length" not mentioned in header + if t.Close { + // Close semantics (i.e. HTTP/1.0) + t.Body = &body{src: r, closing: t.Close} + } else { + // Persistent connection (i.e. HTTP/1.1) + t.Body = eofReader + } + } + + // Unify output + switch rr := msg.(type) { + case *Request: + rr.Body = t.Body + rr.ContentLength = t.ContentLength + rr.TransferEncoding = t.TransferEncoding + rr.Close = t.Close + rr.Trailer = t.Trailer + case *Response: + rr.Body = t.Body + rr.ContentLength = t.ContentLength + rr.TransferEncoding = t.TransferEncoding + rr.Close = t.Close + rr.Trailer = t.Trailer + } + + return nil +} + +// Checks whether chunked is part of the encodings stack +func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" } + +// Checks whether the encoding is explicitly "identity". +func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" } + +// Sanitize transfer encoding +func fixTransferEncoding(requestMethod string, header Header) ([]string, error) { + raw, present := header["Transfer-Encoding"] + if !present { + return nil, nil + } + + delete(header, "Transfer-Encoding") + + encodings := strings.Split(raw[0], ",") + te := make([]string, 0, len(encodings)) + // TODO: Even though we only support "identity" and "chunked" + // encodings, the loop below is designed with foresight. One + // invariant that must be maintained is that, if present, + // chunked encoding must always come first. + for _, encoding := range encodings { + encoding = strings.ToLower(strings.TrimSpace(encoding)) + // "identity" encoding is not recorded + if encoding == "identity" { + break + } + if encoding != "chunked" { + return nil, &badStringError{"unsupported transfer encoding", encoding} + } + te = te[0 : len(te)+1] + te[len(te)-1] = encoding + } + if len(te) > 1 { + return nil, &badStringError{"too many transfer encodings", strings.Join(te, ",")} + } + if len(te) > 0 { + // Chunked encoding trumps Content-Length. See RFC 2616 + // Section 4.4. Currently len(te) > 0 implies chunked + // encoding. + delete(header, "Content-Length") + return te, nil + } + + return nil, nil +} + +// Determine the expected body length, using RFC 2616 Section 4.4. This +// function is not a method, because ultimately it should be shared by +// ReadResponse and ReadRequest. +func fixLength(isResponse bool, status int, requestMethod string, header Header, te []string) (int64, error) { + + // Logic based on response type or status + if noBodyExpected(requestMethod) { + return 0, nil + } + if status/100 == 1 { + return 0, nil + } + switch status { + case 204, 304: + return 0, nil + } + + // Logic based on Transfer-Encoding + if chunked(te) { + return -1, nil + } + + // Logic based on Content-Length + cl := strings.TrimSpace(header.get("Content-Length")) + if cl != "" { + n, err := parseContentLength(cl) + if err != nil { + return -1, err + } + return n, nil + } else { + header.Del("Content-Length") + } + + if !isResponse && requestMethod == "GET" { + // RFC 2616 doesn't explicitly permit nor forbid an + // entity-body on a GET request so we permit one if + // declared, but we default to 0 here (not -1 below) + // if there's no mention of a body. + return 0, nil + } + + // Body-EOF logic based on other methods (like closing, or chunked coding) + return -1, nil +} + +// Determine whether to hang up after sending a request and body, or +// receiving a response and body +// 'header' is the request headers +func shouldClose(major, minor int, header Header) bool { + if major < 1 { + return true + } else if major == 1 && minor == 0 { + if !strings.Contains(strings.ToLower(header.get("Connection")), "keep-alive") { + return true + } + return false + } else { + // TODO: Should split on commas, toss surrounding white space, + // and check each field. + if strings.ToLower(header.get("Connection")) == "close" { + header.Del("Connection") + return true + } + } + return false +} + +// Parse the trailer header +func fixTrailer(header Header, te []string) (Header, error) { + raw := header.get("Trailer") + if raw == "" { + return nil, nil + } + + header.Del("Trailer") + trailer := make(Header) + keys := strings.Split(raw, ",") + for _, key := range keys { + key = CanonicalHeaderKey(strings.TrimSpace(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + return nil, &badStringError{"bad trailer key", key} + } + trailer[key] = nil + } + if len(trailer) == 0 { + return nil, nil + } + if !chunked(te) { + // Trailer and no chunking + return nil, ErrUnexpectedTrailer + } + return trailer, nil +} + +// body turns a Reader into a ReadCloser. +// Close ensures that the body has been fully read +// and then reads the trailer if necessary. +type body struct { + src io.Reader + hdr interface{} // non-nil (Response or Request) value means read trailer + r *bufio.Reader // underlying wire-format reader for the trailer + closing bool // is the connection to be closed after reading body? + + mu sync.Mutex // guards closed, and calls to Read and Close + closed bool +} + +// ErrBodyReadAfterClose is returned when reading a Request or Response +// Body after the body has been closed. This typically happens when the body is +// read after an HTTP Handler calls WriteHeader or Write on its +// ResponseWriter. +var ErrBodyReadAfterClose = errors.New("http: invalid Read on closed Body") + +func (b *body) Read(p []byte) (n int, err error) { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return 0, ErrBodyReadAfterClose + } + return b.readLocked(p) +} + +// Must hold b.mu. +func (b *body) readLocked(p []byte) (n int, err error) { + n, err = b.src.Read(p) + + if err == io.EOF { + // Chunked case. Read the trailer. + if b.hdr != nil { + if e := b.readTrailer(); e != nil { + err = e + } + b.hdr = nil + } else { + // If the server declared the Content-Length, our body is a LimitedReader + // and we need to check whether this EOF arrived early. + if lr, ok := b.src.(*io.LimitedReader); ok && lr.N > 0 { + err = io.ErrUnexpectedEOF + } + } + } + + // If we can return an EOF here along with the read data, do + // so. This is optional per the io.Reader contract, but doing + // so helps the HTTP transport code recycle its connection + // earlier (since it will see this EOF itself), even if the + // client doesn't do future reads or Close. + if err == nil && n > 0 { + if lr, ok := b.src.(*io.LimitedReader); ok && lr.N == 0 { + err = io.EOF + } + } + + return n, err +} + +var ( + singleCRLF = []byte("\r\n") + doubleCRLF = []byte("\r\n\r\n") +) + +func seeUpcomingDoubleCRLF(r *bufio.Reader) bool { + for peekSize := 4; ; peekSize++ { + // This loop stops when Peek returns an error, + // which it does when r's buffer has been filled. + buf, err := r.Peek(peekSize) + if bytes.HasSuffix(buf, doubleCRLF) { + return true + } + if err != nil { + break + } + } + return false +} + +var errTrailerEOF = errors.New("http: unexpected EOF reading trailer") + +func (b *body) readTrailer() error { + // The common case, since nobody uses trailers. + buf, err := b.r.Peek(2) + if bytes.Equal(buf, singleCRLF) { + b.r.ReadByte() + b.r.ReadByte() + return nil + } + if len(buf) < 2 { + return errTrailerEOF + } + if err != nil { + return err + } + + // Make sure there's a header terminator coming up, to prevent + // a DoS with an unbounded size Trailer. It's not easy to + // slip in a LimitReader here, as textproto.NewReader requires + // a concrete *bufio.Reader. Also, we can't get all the way + // back up to our conn's LimitedReader that *might* be backing + // this bufio.Reader. Instead, a hack: we iteratively Peek up + // to the bufio.Reader's max size, looking for a double CRLF. + // This limits the trailer to the underlying buffer size, typically 4kB. + if !seeUpcomingDoubleCRLF(b.r) { + return errors.New("http: suspiciously long trailer after chunked body") + } + + hdr, err := textproto.NewReader(b.r).ReadMIMEHeader() + if err != nil { + if err == io.EOF { + return errTrailerEOF + } + return err + } + switch rr := b.hdr.(type) { + case *Request: + mergeSetHeader(&rr.Trailer, Header(hdr)) + case *Response: + mergeSetHeader(&rr.Trailer, Header(hdr)) + } + return nil +} + +func mergeSetHeader(dst *Header, src Header) { + if *dst == nil { + *dst = src + return + } + for k, vv := range src { + (*dst)[k] = vv + } +} + +func (b *body) Close() error { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return nil + } + var err error + switch { + case b.hdr == nil && b.closing: + // no trailer and closing the connection next. + // no point in reading to EOF. + default: + // Fully consume the body, which will also lead to us reading + // the trailer headers after the body, if present. + _, err = io.Copy(ioutil.Discard, bodyLocked{b}) + } + b.closed = true + return err +} + +// bodyLocked is a io.Reader reading from a *body when its mutex is +// already held. +type bodyLocked struct { + b *body +} + +func (bl bodyLocked) Read(p []byte) (n int, err error) { + if bl.b.closed { + return 0, ErrBodyReadAfterClose + } + return bl.b.readLocked(p) +} + +// parseContentLength trims whitespace from s and returns -1 if no value +// is set, or the value if it's >= 0. +func parseContentLength(cl string) (int64, error) { + cl = strings.TrimSpace(cl) + if cl == "" { + return -1, nil + } + n, err := strconv.ParseInt(cl, 10, 64) + if err != nil || n < 0 { + return 0, &badStringError{"bad Content-Length", cl} + } + return n, nil + +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/transfer_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/transfer_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/transfer_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,64 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http + +import ( + "bufio" + "io" + "strings" + "testing" +) + +func TestBodyReadBadTrailer(t *testing.T) { + b := &body{ + src: strings.NewReader("foobar"), + hdr: true, // force reading the trailer + r: bufio.NewReader(strings.NewReader("")), + } + buf := make([]byte, 7) + n, err := b.Read(buf[:3]) + got := string(buf[:n]) + if got != "foo" || err != nil { + t.Fatalf(`first Read = %d (%q), %v; want 3 ("foo")`, n, got, err) + } + + n, err = b.Read(buf[:]) + got = string(buf[:n]) + if got != "bar" || err != nil { + t.Fatalf(`second Read = %d (%q), %v; want 3 ("bar")`, n, got, err) + } + + n, err = b.Read(buf[:]) + got = string(buf[:n]) + if err == nil { + t.Errorf("final Read was successful (%q), expected error from trailer read", got) + } +} + +func TestFinalChunkedBodyReadEOF(t *testing.T) { + res, err := ReadResponse(bufio.NewReader(strings.NewReader( + "HTTP/1.1 200 OK\r\n"+ + "Transfer-Encoding: chunked\r\n"+ + "\r\n"+ + "0a\r\n"+ + "Body here\n\r\n"+ + "09\r\n"+ + "continued\r\n"+ + "0\r\n"+ + "\r\n")), nil) + if err != nil { + t.Fatal(err) + } + want := "Body here\ncontinued" + buf := make([]byte, len(want)) + n, err := res.Body.Read(buf) + if n != len(want) || err != io.EOF { + t.Logf("body = %#v", res.Body) + t.Errorf("Read = %v, %v; want %d, EOF", n, err, len(want)) + } + if string(buf) != want { + t.Errorf("buf = %q; want %q", buf, want) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/transport.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/transport.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/transport.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1208 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// HTTP client implementation. See RFC 2616. +// +// This is the low-level Transport implementation of RoundTripper. +// The high-level interface is in client.go. + +package http + +import ( + "bufio" + "compress/gzip" + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/core/tls" + "io" + "log" + "net" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// DefaultTransport is the default implementation of Transport and is +// used by DefaultClient. It establishes network connections as needed +// and caches them for reuse by subsequent calls. It uses HTTP proxies +// as directed by the $HTTP_PROXY and $NO_PROXY (or $http_proxy and +// $no_proxy) environment variables. +var DefaultTransport RoundTripper = &Transport{ + Proxy: ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, +} + +// DefaultMaxIdleConnsPerHost is the default value of Transport's +// MaxIdleConnsPerHost. +const DefaultMaxIdleConnsPerHost = 2 + +// Transport is an implementation of RoundTripper that supports http, +// https, and http proxies (for either http or https with CONNECT). +// Transport can also cache connections for future re-use. +type Transport struct { + idleMu sync.Mutex + idleConn map[connectMethodKey][]*persistConn + idleConnCh map[connectMethodKey]chan *persistConn + reqMu sync.Mutex + reqCanceler map[*Request]func() + altMu sync.RWMutex + altProto map[string]RoundTripper // nil or map of URI scheme => RoundTripper + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*Request) (*url.URL, error) + + // Dial specifies the dial function for creating TCP + // connections. + // If Dial is nil, net.Dial is used. + Dial func(network, addr string) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // TLSHandshakeTimeout specifies the maximum amount of time waiting to + // wait for a TLS handshake. Zero means no timeout. + TLSHandshakeTimeout time.Duration + + // DisableKeepAlives, if true, prevents re-use of TCP connections + // between different HTTP requests. + DisableKeepAlives bool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // MaxIdleConnsPerHost, if non-zero, controls the maximum idle + // (keep-alive) to keep per-host. If zero, + // DefaultMaxIdleConnsPerHost is used. + MaxIdleConnsPerHost int + + // ResponseHeaderTimeout, if non-zero, specifies the amount of + // time to wait for a server's response headers after fully + // writing the request (including its body, if any). This + // time does not include the time to read the response body. + ResponseHeaderTimeout time.Duration + + // TODO: tunable on global max cached connections + // TODO: tunable on timeout on cached connections +} + +// ProxyFromEnvironment returns the URL of the proxy to use for a +// given request, as indicated by the environment variables +// $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy). +// An error is returned if the proxy environment is invalid. +// A nil URL and nil error are returned if no proxy is defined in the +// environment, or a proxy should not be used for the given request. +// +// As a special case, if req.URL.Host is "localhost" (with or without +// a port number), then a nil URL and nil error will be returned. +func ProxyFromEnvironment(req *Request) (*url.URL, error) { + proxy := httpProxyEnv.Get() + if proxy == "" { + return nil, nil + } + if !useProxy(canonicalAddr(req.URL)) { + return nil, nil + } + proxyURL, err := url.Parse(proxy) + if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") { + // proxy was bogus. Try prepending "http://" to it and + // see if that parses correctly. If not, we fall + // through and complain about the original one. + if proxyURL, err := url.Parse("http://" + proxy); err == nil { + return proxyURL, nil + } + } + if err != nil { + return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) + } + return proxyURL, nil +} + +// ProxyURL returns a proxy function (for use in a Transport) +// that always returns the same URL. +func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) { + return func(*Request) (*url.URL, error) { + return fixedURL, nil + } +} + +// transportRequest is a wrapper around a *Request that adds +// optional extra headers to write. +type transportRequest struct { + *Request // original request, not to be mutated + extra Header // extra headers to write, or nil +} + +func (tr *transportRequest) extraHeaders() Header { + if tr.extra == nil { + tr.extra = make(Header) + } + return tr.extra +} + +// RoundTrip implements the RoundTripper interface. +// +// For higher-level HTTP client support (such as handling of cookies +// and redirects), see Get, Post, and the Client type. +func (t *Transport) RoundTrip(req *Request) (resp *Response, err error) { + if req.URL == nil { + req.closeBody() + return nil, errors.New("http: nil Request.URL") + } + if req.Header == nil { + req.closeBody() + return nil, errors.New("http: nil Request.Header") + } + if req.URL.Scheme != "http" && req.URL.Scheme != "https" { + t.altMu.RLock() + var rt RoundTripper + if t.altProto != nil { + rt = t.altProto[req.URL.Scheme] + } + t.altMu.RUnlock() + if rt == nil { + req.closeBody() + return nil, &badStringError{"unsupported protocol scheme", req.URL.Scheme} + } + return rt.RoundTrip(req) + } + if req.URL.Host == "" { + req.closeBody() + return nil, errors.New("http: no Host in request URL") + } + treq := &transportRequest{Request: req} + cm, err := t.connectMethodForRequest(treq) + if err != nil { + req.closeBody() + return nil, err + } + + // Get the cached or newly-created connection to either the + // host (for http or https), the http proxy, or the http proxy + // pre-CONNECTed to https server. In any case, we'll be ready + // to send it requests. + pconn, err := t.getConn(req, cm) + if err != nil { + t.setReqCanceler(req, nil) + req.closeBody() + return nil, err + } + + return pconn.roundTrip(treq) +} + +// RegisterProtocol registers a new protocol with scheme. +// The Transport will pass requests using the given scheme to rt. +// It is rt's responsibility to simulate HTTP request semantics. +// +// RegisterProtocol can be used by other packages to provide +// implementations of protocol schemes like "ftp" or "file". +func (t *Transport) RegisterProtocol(scheme string, rt RoundTripper) { + if scheme == "http" || scheme == "https" { + panic("protocol " + scheme + " already registered") + } + t.altMu.Lock() + defer t.altMu.Unlock() + if t.altProto == nil { + t.altProto = make(map[string]RoundTripper) + } + if _, exists := t.altProto[scheme]; exists { + panic("protocol " + scheme + " already registered") + } + t.altProto[scheme] = rt +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle in +// a "keep-alive" state. It does not interrupt any connections currently +// in use. +func (t *Transport) CloseIdleConnections() { + t.idleMu.Lock() + m := t.idleConn + t.idleConn = nil + t.idleConnCh = nil + t.idleMu.Unlock() + for _, conns := range m { + for _, pconn := range conns { + pconn.close() + } + } +} + +// CancelRequest cancels an in-flight request by closing its +// connection. +func (t *Transport) CancelRequest(req *Request) { + t.reqMu.Lock() + cancel := t.reqCanceler[req] + t.reqMu.Unlock() + if cancel != nil { + cancel() + } +} + +// +// Private implementation past this point. +// + +var ( + httpProxyEnv = &envOnce{ + names: []string{"HTTP_PROXY", "http_proxy"}, + } + noProxyEnv = &envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +type envOnce struct { + names []string + once sync.Once + val string +} + +func (e *envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// reset is used by tests +func (e *envOnce) reset() { + e.once = sync.Once{} + e.val = "" +} + +func (t *Transport) connectMethodForRequest(treq *transportRequest) (cm connectMethod, err error) { + cm.targetScheme = treq.URL.Scheme + cm.targetAddr = canonicalAddr(treq.URL) + if t.Proxy != nil { + cm.proxyURL, err = t.Proxy(treq.Request) + } + return cm, nil +} + +// proxyAuth returns the Proxy-Authorization header to set +// on requests, if applicable. +func (cm *connectMethod) proxyAuth() string { + if cm.proxyURL == nil { + return "" + } + if u := cm.proxyURL.User; u != nil { + username := u.Username() + password, _ := u.Password() + return "Basic " + basicAuth(username, password) + } + return "" +} + +// putIdleConn adds pconn to the list of idle persistent connections awaiting +// a new request. +// If pconn is no longer needed or not in a good state, putIdleConn +// returns false. +func (t *Transport) putIdleConn(pconn *persistConn) bool { + if t.DisableKeepAlives || t.MaxIdleConnsPerHost < 0 { + pconn.close() + return false + } + if pconn.isBroken() { + return false + } + key := pconn.cacheKey + max := t.MaxIdleConnsPerHost + if max == 0 { + max = DefaultMaxIdleConnsPerHost + } + t.idleMu.Lock() + + waitingDialer := t.idleConnCh[key] + select { + case waitingDialer <- pconn: + // We're done with this pconn and somebody else is + // currently waiting for a conn of this type (they're + // actively dialing, but this conn is ready + // first). Chrome calls this socket late binding. See + // https://insouciant.org/tech/connection-management-in-chromium/ + t.idleMu.Unlock() + return true + default: + if waitingDialer != nil { + // They had populated this, but their dial won + // first, so we can clean up this map entry. + delete(t.idleConnCh, key) + } + } + if t.idleConn == nil { + t.idleConn = make(map[connectMethodKey][]*persistConn) + } + if len(t.idleConn[key]) >= max { + t.idleMu.Unlock() + pconn.close() + return false + } + for _, exist := range t.idleConn[key] { + if exist == pconn { + log.Fatalf("dup idle pconn %p in freelist", pconn) + } + } + t.idleConn[key] = append(t.idleConn[key], pconn) + t.idleMu.Unlock() + return true +} + +// getIdleConnCh returns a channel to receive and return idle +// persistent connection for the given connectMethod. +// It may return nil, if persistent connections are not being used. +func (t *Transport) getIdleConnCh(cm connectMethod) chan *persistConn { + if t.DisableKeepAlives { + return nil + } + key := cm.key() + t.idleMu.Lock() + defer t.idleMu.Unlock() + if t.idleConnCh == nil { + t.idleConnCh = make(map[connectMethodKey]chan *persistConn) + } + ch, ok := t.idleConnCh[key] + if !ok { + ch = make(chan *persistConn) + t.idleConnCh[key] = ch + } + return ch +} + +func (t *Transport) getIdleConn(cm connectMethod) (pconn *persistConn) { + key := cm.key() + t.idleMu.Lock() + defer t.idleMu.Unlock() + if t.idleConn == nil { + return nil + } + for { + pconns, ok := t.idleConn[key] + if !ok { + return nil + } + if len(pconns) == 1 { + pconn = pconns[0] + delete(t.idleConn, key) + } else { + // 2 or more cached connections; pop last + // TODO: queue? + pconn = pconns[len(pconns)-1] + t.idleConn[key] = pconns[:len(pconns)-1] + } + if !pconn.isBroken() { + return + } + } +} + +func (t *Transport) setReqCanceler(r *Request, fn func()) { + t.reqMu.Lock() + defer t.reqMu.Unlock() + if t.reqCanceler == nil { + t.reqCanceler = make(map[*Request]func()) + } + if fn != nil { + t.reqCanceler[r] = fn + } else { + delete(t.reqCanceler, r) + } +} + +func (t *Transport) dial(network, addr string) (c net.Conn, err error) { + if t.Dial != nil { + return t.Dial(network, addr) + } + return net.Dial(network, addr) +} + +// getConn dials and creates a new persistConn to the target as +// specified in the connectMethod. This includes doing a proxy CONNECT +// and/or setting up TLS. If this doesn't return an error, the persistConn +// is ready to write requests to. +func (t *Transport) getConn(req *Request, cm connectMethod) (*persistConn, error) { + if pc := t.getIdleConn(cm); pc != nil { + return pc, nil + } + + type dialRes struct { + pc *persistConn + err error + } + dialc := make(chan dialRes) + + handlePendingDial := func() { + if v := <-dialc; v.err == nil { + t.putIdleConn(v.pc) + } + } + + cancelc := make(chan struct{}) + t.setReqCanceler(req, func() { close(cancelc) }) + + go func() { + pc, err := t.dialConn(cm) + dialc <- dialRes{pc, err} + }() + + idleConnCh := t.getIdleConnCh(cm) + select { + case v := <-dialc: + // Our dial finished. + return v.pc, v.err + case pc := <-idleConnCh: + // Another request finished first and its net.Conn + // became available before our dial. Or somebody + // else's dial that they didn't use. + // But our dial is still going, so give it away + // when it finishes: + go handlePendingDial() + return pc, nil + case <-cancelc: + go handlePendingDial() + return nil, errors.New("net/http: request canceled while waiting for connection") + } +} + +func (t *Transport) dialConn(cm connectMethod) (*persistConn, error) { + conn, err := t.dial("tcp", cm.addr()) + if err != nil { + if cm.proxyURL != nil { + err = fmt.Errorf("http: error connecting to proxy %s: %v", cm.proxyURL, err) + } + return nil, err + } + + pa := cm.proxyAuth() + + pconn := &persistConn{ + t: t, + cacheKey: cm.key(), + conn: conn, + reqch: make(chan requestAndChan, 1), + writech: make(chan writeRequest, 1), + closech: make(chan struct{}), + writeErrCh: make(chan error, 1), + } + + switch { + case cm.proxyURL == nil: + // Do nothing. + case cm.targetScheme == "http": + pconn.isProxy = true + if pa != "" { + pconn.mutateHeaderFunc = func(h Header) { + h.Set("Proxy-Authorization", pa) + } + } + case cm.targetScheme == "https": + connectReq := &Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: cm.targetAddr}, + Host: cm.targetAddr, + Header: make(Header), + } + if pa != "" { + connectReq.Header.Set("Proxy-Authorization", pa) + } + connectReq.Write(conn) + + // Read response. + // Okay to use and discard buffered reader here, because + // TLS server will not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := ReadResponse(br, connectReq) + if err != nil { + conn.Close() + return nil, err + } + if resp.StatusCode != 200 { + f := strings.SplitN(resp.Status, " ", 2) + conn.Close() + return nil, errors.New(f[1]) + } + } + + if cm.targetScheme == "https" { + // Initiate TLS and check remote host name against certificate. + cfg := t.TLSClientConfig + if cfg == nil || cfg.ServerName == "" { + host := cm.tlsHost() + if cfg == nil { + cfg = &tls.Config{ServerName: host} + } else { + clone := *cfg // shallow clone + clone.ServerName = host + cfg = &clone + } + } + plainConn := conn + tlsConn := tls.Client(plainConn, cfg) + errc := make(chan error, 2) + var timer *time.Timer // for canceling TLS handshake + if d := t.TLSHandshakeTimeout; d != 0 { + timer = time.AfterFunc(d, func() { + errc <- tlsHandshakeTimeoutError{} + }) + } + go func() { + err := tlsConn.Handshake() + if timer != nil { + timer.Stop() + } + errc <- err + }() + if err := <-errc; err != nil { + plainConn.Close() + return nil, err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + plainConn.Close() + return nil, err + } + } + cs := tlsConn.ConnectionState() + pconn.tlsState = &cs + pconn.conn = tlsConn + } + + pconn.br = bufio.NewReader(noteEOFReader{pconn.conn, &pconn.sawEOF}) + pconn.bw = bufio.NewWriter(pconn.conn) + go pconn.readLoop() + go pconn.writeLoop() + return pconn, nil +} + +// useProxy returns true if requests to addr should use a proxy, +// according to the NO_PROXY or no_proxy environment variable. +// addr is always a canonicalAddr with a host and port. +func useProxy(addr string) bool { + if len(addr) == 0 { + return true + } + host, _, err := net.SplitHostPort(addr) + if err != nil { + return false + } + if host == "localhost" { + return false + } + if ip := net.ParseIP(host); ip != nil { + if ip.IsLoopback() { + return false + } + } + + no_proxy := noProxyEnv.Get() + if no_proxy == "*" { + return false + } + + addr = strings.ToLower(strings.TrimSpace(addr)) + if hasPort(addr) { + addr = addr[:strings.LastIndex(addr, ":")] + } + + for _, p := range strings.Split(no_proxy, ",") { + p = strings.ToLower(strings.TrimSpace(p)) + if len(p) == 0 { + continue + } + if hasPort(p) { + p = p[:strings.LastIndex(p, ":")] + } + if addr == p { + return false + } + if p[0] == '.' && (strings.HasSuffix(addr, p) || addr == p[1:]) { + // no_proxy ".foo.com" matches "bar.foo.com" or "foo.com" + return false + } + if p[0] != '.' && strings.HasSuffix(addr, p) && addr[len(addr)-len(p)-1] == '.' { + // no_proxy "foo.com" matches "bar.foo.com" + return false + } + } + return true +} + +// connectMethod is the map key (in its String form) for keeping persistent +// TCP connections alive for subsequent HTTP requests. +// +// A connect method may be of the following types: +// +// Cache key form Description +// ----------------- ------------------------- +// |http|foo.com http directly to server, no proxy +// |https|foo.com https directly to server, no proxy +// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com +// http://proxy.com|http http to proxy, http to anywhere after that +// +// Note: no support to https to the proxy yet. +// +type connectMethod struct { + proxyURL *url.URL // nil for no proxy, else full proxy URL + targetScheme string // "http" or "https" + targetAddr string // Not used if proxy + http targetScheme (4th example in table) +} + +func (cm *connectMethod) key() connectMethodKey { + proxyStr := "" + targetAddr := cm.targetAddr + if cm.proxyURL != nil { + proxyStr = cm.proxyURL.String() + if cm.targetScheme == "http" { + targetAddr = "" + } + } + return connectMethodKey{ + proxy: proxyStr, + scheme: cm.targetScheme, + addr: targetAddr, + } +} + +// addr returns the first hop "host:port" to which we need to TCP connect. +func (cm *connectMethod) addr() string { + if cm.proxyURL != nil { + return canonicalAddr(cm.proxyURL) + } + return cm.targetAddr +} + +// tlsHost returns the host name to match against the peer's +// TLS certificate. +func (cm *connectMethod) tlsHost() string { + h := cm.targetAddr + if hasPort(h) { + h = h[:strings.LastIndex(h, ":")] + } + return h +} + +// connectMethodKey is the map key version of connectMethod, with a +// stringified proxy URL (or the empty string) instead of a pointer to +// a URL. +type connectMethodKey struct { + proxy, scheme, addr string +} + +func (k connectMethodKey) String() string { + // Only used by tests. + return fmt.Sprintf("%s|%s|%s", k.proxy, k.scheme, k.addr) +} + +// persistConn wraps a connection, usually a persistent one +// (but may be used for non-keep-alive requests as well) +type persistConn struct { + t *Transport + cacheKey connectMethodKey + conn net.Conn + tlsState *tls.ConnectionState + br *bufio.Reader // from conn + sawEOF bool // whether we've seen EOF from conn; owned by readLoop + bw *bufio.Writer // to conn + reqch chan requestAndChan // written by roundTrip; read by readLoop + writech chan writeRequest // written by roundTrip; read by writeLoop + closech chan struct{} // closed when conn closed + isProxy bool + // writeErrCh passes the request write error (usually nil) + // from the writeLoop goroutine to the readLoop which passes + // it off to the res.Body reader, which then uses it to decide + // whether or not a connection can be reused. Issue 7569. + writeErrCh chan error + + lk sync.Mutex // guards following fields + numExpectedResponses int + closed bool // whether conn has been closed + broken bool // an error has happened on this connection; marked broken so it's not reused. + // mutateHeaderFunc is an optional func to modify extra + // headers on each outbound request before it's written. (the + // original Request given to RoundTrip is not modified) + mutateHeaderFunc func(Header) +} + +// isBroken reports whether this connection is in a known broken state. +func (pc *persistConn) isBroken() bool { + pc.lk.Lock() + b := pc.broken + pc.lk.Unlock() + return b +} + +func (pc *persistConn) cancelRequest() { + pc.conn.Close() +} + +var remoteSideClosedFunc func(error) bool // or nil to use default + +func remoteSideClosed(err error) bool { + if err == io.EOF { + return true + } + if remoteSideClosedFunc != nil { + return remoteSideClosedFunc(err) + } + return false +} + +func (pc *persistConn) readLoop() { + alive := true + + for alive { + pb, err := pc.br.Peek(1) + + pc.lk.Lock() + if pc.numExpectedResponses == 0 { + if !pc.closed { + pc.closeLocked() + if len(pb) > 0 { + log.Printf("Unsolicited response received on idle HTTP channel starting with %q; err=%v", + string(pb), err) + } + } + pc.lk.Unlock() + return + } + pc.lk.Unlock() + + rc := <-pc.reqch + + var resp *Response + if err == nil { + resp, err = ReadResponse(pc.br, rc.req) + if err == nil && resp.StatusCode == 100 { + // Skip any 100-continue for now. + // TODO(bradfitz): if rc.req had "Expect: 100-continue", + // actually block the request body write and signal the + // writeLoop now to begin sending it. (Issue 2184) For now we + // eat it, since we're never expecting one. + resp, err = ReadResponse(pc.br, rc.req) + } + } + + if resp != nil { + resp.TLS = pc.tlsState + } + + hasBody := resp != nil && rc.req.Method != "HEAD" && resp.ContentLength != 0 + + if err != nil { + pc.close() + } else { + if rc.addedGzip && hasBody && resp.Header.Get("Content-Encoding") == "gzip" { + resp.Header.Del("Content-Encoding") + resp.Header.Del("Content-Length") + resp.ContentLength = -1 + resp.Body = &gzipReader{body: resp.Body} + } + resp.Body = &bodyEOFSignal{body: resp.Body} + } + + if err != nil || resp.Close || rc.req.Close || resp.StatusCode <= 199 { + // Don't do keep-alive on error if either party requested a close + // or we get an unexpected informational (1xx) response. + // StatusCode 100 is already handled above. + alive = false + } + + var waitForBodyRead chan bool + if hasBody { + waitForBodyRead = make(chan bool, 2) + resp.Body.(*bodyEOFSignal).earlyCloseFn = func() error { + // Sending false here sets alive to + // false and closes the connection + // below. + waitForBodyRead <- false + return nil + } + resp.Body.(*bodyEOFSignal).fn = func(err error) { + waitForBodyRead <- alive && + err == nil && + !pc.sawEOF && + pc.wroteRequest() && + pc.t.putIdleConn(pc) + } + } + + if alive && !hasBody { + alive = !pc.sawEOF && + pc.wroteRequest() && + pc.t.putIdleConn(pc) + } + + rc.ch <- responseAndError{resp, err} + + // Wait for the just-returned response body to be fully consumed + // before we race and peek on the underlying bufio reader. + if waitForBodyRead != nil { + select { + case alive = <-waitForBodyRead: + case <-pc.closech: + alive = false + } + } + + pc.t.setReqCanceler(rc.req, nil) + + if !alive { + pc.close() + } + } +} + +func (pc *persistConn) writeLoop() { + for { + select { + case wr := <-pc.writech: + if pc.isBroken() { + wr.ch <- errors.New("http: can't write HTTP request on broken connection") + continue + } + err := wr.req.Request.write(pc.bw, pc.isProxy, wr.req.extra) + if err == nil { + err = pc.bw.Flush() + } + if err != nil { + pc.markBroken() + wr.req.Request.closeBody() + } + pc.writeErrCh <- err // to the body reader, which might recycle us + wr.ch <- err // to the roundTrip function + case <-pc.closech: + return + } + } +} + +// wroteRequest is a check before recycling a connection that the previous write +// (from writeLoop above) happened and was successful. +func (pc *persistConn) wroteRequest() bool { + select { + case err := <-pc.writeErrCh: + // Common case: the write happened well before the response, so + // avoid creating a timer. + return err == nil + default: + // Rare case: the request was written in writeLoop above but + // before it could send to pc.writeErrCh, the reader read it + // all, processed it, and called us here. In this case, give the + // write goroutine a bit of time to finish its send. + // + // Less rare case: We also get here in the legitimate case of + // Issue 7569, where the writer is still writing (or stalled), + // but the server has already replied. In this case, we don't + // want to wait too long, and we want to return false so this + // connection isn't re-used. + select { + case err := <-pc.writeErrCh: + return err == nil + case <-time.After(50 * time.Millisecond): + return false + } + } +} + +type responseAndError struct { + res *Response + err error +} + +type requestAndChan struct { + req *Request + ch chan responseAndError + + // did the Transport (as opposed to the client code) add an + // Accept-Encoding gzip header? only if it we set it do + // we transparently decode the gzip. + addedGzip bool +} + +// A writeRequest is sent by the readLoop's goroutine to the +// writeLoop's goroutine to write a request while the read loop +// concurrently waits on both the write response and the server's +// reply. +type writeRequest struct { + req *transportRequest + ch chan<- error +} + +type httpError struct { + err string + timeout bool +} + +func (e *httpError) Error() string { return e.err } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{err: "net/http: timeout awaiting response headers", timeout: true} +var errClosed error = &httpError{err: "net/http: transport closed before response was received"} + +func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) { + pc.t.setReqCanceler(req.Request, pc.cancelRequest) + pc.lk.Lock() + pc.numExpectedResponses++ + headerFn := pc.mutateHeaderFunc + pc.lk.Unlock() + + if headerFn != nil { + headerFn(req.extraHeaders()) + } + + // Ask for a compressed version if the caller didn't set their + // own value for Accept-Encoding. We only attempted to + // uncompress the gzip stream if we were the layer that + // requested it. + requestedGzip := false + if !pc.t.DisableCompression && req.Header.Get("Accept-Encoding") == "" && req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // http://golang.org/issue/5522 + requestedGzip = true + req.extraHeaders().Set("Accept-Encoding", "gzip") + } + + // Write the request concurrently with waiting for a response, + // in case the server decides to reply before reading our full + // request body. + writeErrCh := make(chan error, 1) + pc.writech <- writeRequest{req, writeErrCh} + + resc := make(chan responseAndError, 1) + pc.reqch <- requestAndChan{req.Request, resc, requestedGzip} + + var re responseAndError + var pconnDeadCh = pc.closech + var failTicker <-chan time.Time + var respHeaderTimer <-chan time.Time +WaitResponse: + for { + select { + case err := <-writeErrCh: + if err != nil { + re = responseAndError{nil, err} + pc.close() + break WaitResponse + } + if d := pc.t.ResponseHeaderTimeout; d > 0 { + respHeaderTimer = time.After(d) + } + case <-pconnDeadCh: + // The persist connection is dead. This shouldn't + // usually happen (only with Connection: close responses + // with no response bodies), but if it does happen it + // means either a) the remote server hung up on us + // prematurely, or b) the readLoop sent us a response & + // closed its closech at roughly the same time, and we + // selected this case first, in which case a response + // might still be coming soon. + // + // We can't avoid the select race in b) by using a unbuffered + // resc channel instead, because then goroutines can + // leak if we exit due to other errors. + pconnDeadCh = nil // avoid spinning + failTicker = time.After(100 * time.Millisecond) // arbitrary time to wait for resc + case <-failTicker: + re = responseAndError{err: errClosed} + break WaitResponse + case <-respHeaderTimer: + pc.close() + re = responseAndError{err: errTimeout} + break WaitResponse + case re = <-resc: + break WaitResponse + } + } + + pc.lk.Lock() + pc.numExpectedResponses-- + pc.lk.Unlock() + + if re.err != nil { + pc.t.setReqCanceler(req.Request, nil) + } + return re.res, re.err +} + +// markBroken marks a connection as broken (so it's not reused). +// It differs from close in that it doesn't close the underlying +// connection for use when it's still being read. +func (pc *persistConn) markBroken() { + pc.lk.Lock() + defer pc.lk.Unlock() + pc.broken = true +} + +func (pc *persistConn) close() { + pc.lk.Lock() + defer pc.lk.Unlock() + pc.closeLocked() +} + +func (pc *persistConn) closeLocked() { + pc.broken = true + if !pc.closed { + pc.conn.Close() + pc.closed = true + close(pc.closech) + } + pc.mutateHeaderFunc = nil +} + +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +func canonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} + +// bodyEOFSignal wraps a ReadCloser but runs fn (if non-nil) at most +// once, right before its final (error-producing) Read or Close call +// returns. If earlyCloseFn is non-nil and Close is called before +// io.EOF is seen, earlyCloseFn is called instead of fn, and its +// return value is the return value from Close. +type bodyEOFSignal struct { + body io.ReadCloser + mu sync.Mutex // guards following 4 fields + closed bool // whether Close has been called + rerr error // sticky Read error + fn func(error) // error will be nil on Read io.EOF + earlyCloseFn func() error // optional alt Close func used if io.EOF not seen +} + +func (es *bodyEOFSignal) Read(p []byte) (n int, err error) { + es.mu.Lock() + closed, rerr := es.closed, es.rerr + es.mu.Unlock() + if closed { + return 0, errors.New("http: read on closed response body") + } + if rerr != nil { + return 0, rerr + } + + n, err = es.body.Read(p) + if err != nil { + es.mu.Lock() + defer es.mu.Unlock() + if es.rerr == nil { + es.rerr = err + } + es.condfn(err) + } + return +} + +func (es *bodyEOFSignal) Close() error { + es.mu.Lock() + defer es.mu.Unlock() + if es.closed { + return nil + } + es.closed = true + if es.earlyCloseFn != nil && es.rerr != io.EOF { + return es.earlyCloseFn() + } + err := es.body.Close() + es.condfn(err) + return err +} + +// caller must hold es.mu. +func (es *bodyEOFSignal) condfn(err error) { + if es.fn == nil { + return + } + if err == io.EOF { + err = nil + } + es.fn(err) + es.fn = nil +} + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr io.Reader // lazily-initialized gzip reader +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type readerAndCloser struct { + io.Reader + io.Closer +} + +type tlsHandshakeTimeoutError struct{} + +func (tlsHandshakeTimeoutError) Timeout() bool { return true } +func (tlsHandshakeTimeoutError) Temporary() bool { return true } +func (tlsHandshakeTimeoutError) Error() string { return "net/http: TLS handshake timeout" } + +type noteEOFReader struct { + r io.Reader + sawEOF *bool +} + +func (nr noteEOFReader) Read(p []byte) (n int, err error) { + n, err = nr.r.Read(p) + if err == io.EOF { + *nr.sawEOF = true + } + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/transport_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/transport_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/transport_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2173 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests for transport.go + +package http_test + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/rand" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + . "net/http" + "net/http/httptest" + "net/url" + "os" + "runtime" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +// TODO: test 5 pipelined requests with responses: 1) OK, 2) OK, Connection: Close +// and then verify that the final 2 responses get errors back. + +// hostPortHandler writes back the client's "host:port". +var hostPortHandler = HandlerFunc(func(w ResponseWriter, r *Request) { + if r.FormValue("close") == "true" { + w.Header().Set("Connection", "close") + } + w.Write([]byte(r.RemoteAddr)) +}) + +// testCloseConn is a net.Conn tracked by a testConnSet. +type testCloseConn struct { + net.Conn + set *testConnSet +} + +func (c *testCloseConn) Close() error { + c.set.remove(c) + return c.Conn.Close() +} + +// testConnSet tracks a set of TCP connections and whether they've +// been closed. +type testConnSet struct { + t *testing.T + mu sync.Mutex // guards closed and list + closed map[net.Conn]bool + list []net.Conn // in order created +} + +func (tcs *testConnSet) insert(c net.Conn) { + tcs.mu.Lock() + defer tcs.mu.Unlock() + tcs.closed[c] = false + tcs.list = append(tcs.list, c) +} + +func (tcs *testConnSet) remove(c net.Conn) { + tcs.mu.Lock() + defer tcs.mu.Unlock() + tcs.closed[c] = true +} + +// some tests use this to manage raw tcp connections for later inspection +func makeTestDial(t *testing.T) (*testConnSet, func(n, addr string) (net.Conn, error)) { + connSet := &testConnSet{ + t: t, + closed: make(map[net.Conn]bool), + } + dial := func(n, addr string) (net.Conn, error) { + c, err := net.Dial(n, addr) + if err != nil { + return nil, err + } + tc := &testCloseConn{c, connSet} + connSet.insert(tc) + return tc, nil + } + return connSet, dial +} + +func (tcs *testConnSet) check(t *testing.T) { + tcs.mu.Lock() + defer tcs.mu.Unlock() + for i := 4; i >= 0; i-- { + for i, c := range tcs.list { + if tcs.closed[c] { + continue + } + if i != 0 { + tcs.mu.Unlock() + time.Sleep(50 * time.Millisecond) + tcs.mu.Lock() + continue + } + t.Errorf("TCP connection #%d, %+v (of %d total) was not closed", i+1, c, len(tcs.list)) + } + } +} + +// Two subsequent requests and verify their response is the same. +// The response from the server is our own IP:port +func TestTransportKeepAlives(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(hostPortHandler) + defer ts.Close() + + for _, disableKeepAlive := range []bool{false, true} { + tr := &Transport{DisableKeepAlives: disableKeepAlive} + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + + fetch := func(n int) string { + res, err := c.Get(ts.URL) + if err != nil { + t.Fatalf("error in disableKeepAlive=%v, req #%d, GET: %v", disableKeepAlive, n, err) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("error in disableKeepAlive=%v, req #%d, ReadAll: %v", disableKeepAlive, n, err) + } + return string(body) + } + + body1 := fetch(1) + body2 := fetch(2) + + bodiesDiffer := body1 != body2 + if bodiesDiffer != disableKeepAlive { + t.Errorf("error in disableKeepAlive=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q", + disableKeepAlive, bodiesDiffer, body1, body2) + } + } +} + +func TestTransportConnectionCloseOnResponse(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(hostPortHandler) + defer ts.Close() + + connSet, testDial := makeTestDial(t) + + for _, connectionClose := range []bool{false, true} { + tr := &Transport{ + Dial: testDial, + } + c := &Client{Transport: tr} + + fetch := func(n int) string { + req := new(Request) + var err error + req.URL, err = url.Parse(ts.URL + fmt.Sprintf("/?close=%v", connectionClose)) + if err != nil { + t.Fatalf("URL parse error: %v", err) + } + req.Method = "GET" + req.Proto = "HTTP/1.1" + req.ProtoMajor = 1 + req.ProtoMinor = 1 + + res, err := c.Do(req) + if err != nil { + t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err) + } + defer res.Body.Close() + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err) + } + return string(body) + } + + body1 := fetch(1) + body2 := fetch(2) + bodiesDiffer := body1 != body2 + if bodiesDiffer != connectionClose { + t.Errorf("error in connectionClose=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q", + connectionClose, bodiesDiffer, body1, body2) + } + + tr.CloseIdleConnections() + } + + connSet.check(t) +} + +func TestTransportConnectionCloseOnRequest(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(hostPortHandler) + defer ts.Close() + + connSet, testDial := makeTestDial(t) + + for _, connectionClose := range []bool{false, true} { + tr := &Transport{ + Dial: testDial, + } + c := &Client{Transport: tr} + + fetch := func(n int) string { + req := new(Request) + var err error + req.URL, err = url.Parse(ts.URL) + if err != nil { + t.Fatalf("URL parse error: %v", err) + } + req.Method = "GET" + req.Proto = "HTTP/1.1" + req.ProtoMajor = 1 + req.ProtoMinor = 1 + req.Close = connectionClose + + res, err := c.Do(req) + if err != nil { + t.Fatalf("error in connectionClose=%v, req #%d, Do: %v", connectionClose, n, err) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("error in connectionClose=%v, req #%d, ReadAll: %v", connectionClose, n, err) + } + return string(body) + } + + body1 := fetch(1) + body2 := fetch(2) + bodiesDiffer := body1 != body2 + if bodiesDiffer != connectionClose { + t.Errorf("error in connectionClose=%v. unexpected bodiesDiffer=%v; body1=%q; body2=%q", + connectionClose, bodiesDiffer, body1, body2) + } + + tr.CloseIdleConnections() + } + + connSet.check(t) +} + +func TestTransportIdleCacheKeys(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(hostPortHandler) + defer ts.Close() + + tr := &Transport{DisableKeepAlives: false} + c := &Client{Transport: tr} + + if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g { + t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g) + } + + resp, err := c.Get(ts.URL) + if err != nil { + t.Error(err) + } + ioutil.ReadAll(resp.Body) + + keys := tr.IdleConnKeysForTesting() + if e, g := 1, len(keys); e != g { + t.Fatalf("After Get expected %d idle conn cache keys; got %d", e, g) + } + + if e := "|http|" + ts.Listener.Addr().String(); keys[0] != e { + t.Errorf("Expected idle cache key %q; got %q", e, keys[0]) + } + + tr.CloseIdleConnections() + if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g { + t.Errorf("After CloseIdleConnections expected %d idle conn cache keys; got %d", e, g) + } +} + +// Tests that the HTTP transport re-uses connections when a client +// reads to the end of a response Body without closing it. +func TestTransportReadToEndReusesConn(t *testing.T) { + defer afterTest(t) + const msg = "foobar" + + var addrSeen map[string]int + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + addrSeen[r.RemoteAddr]++ + if r.URL.Path == "/chunked/" { + w.WriteHeader(200) + w.(http.Flusher).Flush() + } else { + w.Header().Set("Content-Type", strconv.Itoa(len(msg))) + w.WriteHeader(200) + } + w.Write([]byte(msg)) + })) + defer ts.Close() + + buf := make([]byte, len(msg)) + + for pi, path := range []string{"/content-length/", "/chunked/"} { + wantLen := []int{len(msg), -1}[pi] + addrSeen = make(map[string]int) + for i := 0; i < 3; i++ { + res, err := http.Get(ts.URL + path) + if err != nil { + t.Errorf("Get %s: %v", path, err) + continue + } + // We want to close this body eventually (before the + // defer afterTest at top runs), but not before the + // len(addrSeen) check at the bottom of this test, + // since Closing this early in the loop would risk + // making connections be re-used for the wrong reason. + defer res.Body.Close() + + if res.ContentLength != int64(wantLen) { + t.Errorf("%s res.ContentLength = %d; want %d", path, res.ContentLength, wantLen) + } + n, err := res.Body.Read(buf) + if n != len(msg) || err != io.EOF { + t.Errorf("%s Read = %v, %v; want %d, EOF", path, n, err, len(msg)) + } + } + if len(addrSeen) != 1 { + t.Errorf("for %s, server saw %d distinct client addresses; want 1", path, len(addrSeen)) + } + } +} + +func TestTransportMaxPerHostIdleConns(t *testing.T) { + defer afterTest(t) + resch := make(chan string) + gotReq := make(chan bool) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + gotReq <- true + msg := <-resch + _, err := w.Write([]byte(msg)) + if err != nil { + t.Fatalf("Write: %v", err) + } + })) + defer ts.Close() + maxIdleConns := 2 + tr := &Transport{DisableKeepAlives: false, MaxIdleConnsPerHost: maxIdleConns} + c := &Client{Transport: tr} + + // Start 3 outstanding requests and wait for the server to get them. + // Their responses will hang until we write to resch, though. + donech := make(chan bool) + doReq := func() { + resp, err := c.Get(ts.URL) + if err != nil { + t.Error(err) + return + } + if _, err := ioutil.ReadAll(resp.Body); err != nil { + t.Errorf("ReadAll: %v", err) + return + } + donech <- true + } + go doReq() + <-gotReq + go doReq() + <-gotReq + go doReq() + <-gotReq + + if e, g := 0, len(tr.IdleConnKeysForTesting()); e != g { + t.Fatalf("Before writes, expected %d idle conn cache keys; got %d", e, g) + } + + resch <- "res1" + <-donech + keys := tr.IdleConnKeysForTesting() + if e, g := 1, len(keys); e != g { + t.Fatalf("after first response, expected %d idle conn cache keys; got %d", e, g) + } + cacheKey := "|http|" + ts.Listener.Addr().String() + if keys[0] != cacheKey { + t.Fatalf("Expected idle cache key %q; got %q", cacheKey, keys[0]) + } + if e, g := 1, tr.IdleConnCountForTesting(cacheKey); e != g { + t.Errorf("after first response, expected %d idle conns; got %d", e, g) + } + + resch <- "res2" + <-donech + if e, g := 2, tr.IdleConnCountForTesting(cacheKey); e != g { + t.Errorf("after second response, expected %d idle conns; got %d", e, g) + } + + resch <- "res3" + <-donech + if e, g := maxIdleConns, tr.IdleConnCountForTesting(cacheKey); e != g { + t.Errorf("after third response, still expected %d idle conns; got %d", e, g) + } +} + +func TestTransportServerClosingUnexpectedly(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(hostPortHandler) + defer ts.Close() + + tr := &Transport{} + c := &Client{Transport: tr} + + fetch := func(n, retries int) string { + condFatalf := func(format string, arg ...interface{}) { + if retries <= 0 { + t.Fatalf(format, arg...) + } + t.Logf("retrying shortly after expected error: "+format, arg...) + time.Sleep(time.Second / time.Duration(retries)) + } + for retries >= 0 { + retries-- + res, err := c.Get(ts.URL) + if err != nil { + condFatalf("error in req #%d, GET: %v", n, err) + continue + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + condFatalf("error in req #%d, ReadAll: %v", n, err) + continue + } + res.Body.Close() + return string(body) + } + panic("unreachable") + } + + body1 := fetch(1, 0) + body2 := fetch(2, 0) + + ts.CloseClientConnections() // surprise! + + // This test has an expected race. Sleeping for 25 ms prevents + // it on most fast machines, causing the next fetch() call to + // succeed quickly. But if we do get errors, fetch() will retry 5 + // times with some delays between. + time.Sleep(25 * time.Millisecond) + + body3 := fetch(3, 5) + + if body1 != body2 { + t.Errorf("expected body1 and body2 to be equal") + } + if body2 == body3 { + t.Errorf("expected body2 and body3 to be different") + } +} + +// Test for http://golang.org/issue/2616 (appropriate issue number) +// This fails pretty reliably with GOMAXPROCS=100 or something high. +func TestStressSurpriseServerCloses(t *testing.T) { + defer afterTest(t) + if testing.Short() { + t.Skip("skipping test in short mode") + } + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Length", "5") + w.Header().Set("Content-Type", "text/plain") + w.Write([]byte("Hello")) + w.(Flusher).Flush() + conn, buf, _ := w.(Hijacker).Hijack() + buf.Flush() + conn.Close() + })) + defer ts.Close() + + tr := &Transport{DisableKeepAlives: false} + c := &Client{Transport: tr} + + // Do a bunch of traffic from different goroutines. Send to activityc + // after each request completes, regardless of whether it failed. + const ( + numClients = 50 + reqsPerClient = 250 + ) + activityc := make(chan bool) + for i := 0; i < numClients; i++ { + go func() { + for i := 0; i < reqsPerClient; i++ { + res, err := c.Get(ts.URL) + if err == nil { + // We expect errors since the server is + // hanging up on us after telling us to + // send more requests, so we don't + // actually care what the error is. + // But we want to close the body in cases + // where we won the race. + res.Body.Close() + } + activityc <- true + } + }() + } + + // Make sure all the request come back, one way or another. + for i := 0; i < numClients*reqsPerClient; i++ { + select { + case <-activityc: + case <-time.After(5 * time.Second): + t.Fatalf("presumed deadlock; no HTTP client activity seen in awhile") + } + } +} + +// TestTransportHeadResponses verifies that we deal with Content-Lengths +// with no bodies properly +func TestTransportHeadResponses(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.Method != "HEAD" { + panic("expected HEAD; got " + r.Method) + } + w.Header().Set("Content-Length", "123") + w.WriteHeader(200) + })) + defer ts.Close() + + tr := &Transport{DisableKeepAlives: false} + c := &Client{Transport: tr} + for i := 0; i < 2; i++ { + res, err := c.Head(ts.URL) + if err != nil { + t.Errorf("error on loop %d: %v", i, err) + continue + } + if e, g := "123", res.Header.Get("Content-Length"); e != g { + t.Errorf("loop %d: expected Content-Length header of %q, got %q", i, e, g) + } + if e, g := int64(123), res.ContentLength; e != g { + t.Errorf("loop %d: expected res.ContentLength of %v, got %v", i, e, g) + } + if all, err := ioutil.ReadAll(res.Body); err != nil { + t.Errorf("loop %d: Body ReadAll: %v", i, err) + } else if len(all) != 0 { + t.Errorf("Bogus body %q", all) + } + } +} + +// TestTransportHeadChunkedResponse verifies that we ignore chunked transfer-encoding +// on responses to HEAD requests. +func TestTransportHeadChunkedResponse(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.Method != "HEAD" { + panic("expected HEAD; got " + r.Method) + } + w.Header().Set("Transfer-Encoding", "chunked") // client should ignore + w.Header().Set("x-client-ipport", r.RemoteAddr) + w.WriteHeader(200) + })) + defer ts.Close() + + tr := &Transport{DisableKeepAlives: false} + c := &Client{Transport: tr} + + res1, err := c.Head(ts.URL) + if err != nil { + t.Fatalf("request 1 error: %v", err) + } + res2, err := c.Head(ts.URL) + if err != nil { + t.Fatalf("request 2 error: %v", err) + } + if v1, v2 := res1.Header.Get("x-client-ipport"), res2.Header.Get("x-client-ipport"); v1 != v2 { + t.Errorf("ip/ports differed between head requests: %q vs %q", v1, v2) + } +} + +var roundTripTests = []struct { + accept string + expectAccept string + compressed bool +}{ + // Requests with no accept-encoding header use transparent compression + {"", "gzip", false}, + // Requests with other accept-encoding should pass through unmodified + {"foo", "foo", false}, + // Requests with accept-encoding == gzip should be passed through + {"gzip", "gzip", true}, +} + +// Test that the modification made to the Request by the RoundTripper is cleaned up +func TestRoundTripGzip(t *testing.T) { + defer afterTest(t) + const responseBody = "test response body" + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { + accept := req.Header.Get("Accept-Encoding") + if expect := req.FormValue("expect_accept"); accept != expect { + t.Errorf("in handler, test %v: Accept-Encoding = %q, want %q", + req.FormValue("testnum"), accept, expect) + } + if accept == "gzip" { + rw.Header().Set("Content-Encoding", "gzip") + gz := gzip.NewWriter(rw) + gz.Write([]byte(responseBody)) + gz.Close() + } else { + rw.Header().Set("Content-Encoding", accept) + rw.Write([]byte(responseBody)) + } + })) + defer ts.Close() + + for i, test := range roundTripTests { + // Test basic request (no accept-encoding) + req, _ := NewRequest("GET", fmt.Sprintf("%s/?testnum=%d&expect_accept=%s", ts.URL, i, test.expectAccept), nil) + if test.accept != "" { + req.Header.Set("Accept-Encoding", test.accept) + } + res, err := DefaultTransport.RoundTrip(req) + var body []byte + if test.compressed { + var r *gzip.Reader + r, err = gzip.NewReader(res.Body) + if err != nil { + t.Errorf("%d. gzip NewReader: %v", i, err) + continue + } + body, err = ioutil.ReadAll(r) + res.Body.Close() + } else { + body, err = ioutil.ReadAll(res.Body) + } + if err != nil { + t.Errorf("%d. Error: %q", i, err) + continue + } + if g, e := string(body), responseBody; g != e { + t.Errorf("%d. body = %q; want %q", i, g, e) + } + if g, e := req.Header.Get("Accept-Encoding"), test.accept; g != e { + t.Errorf("%d. Accept-Encoding = %q; want %q (it was mutated, in violation of RoundTrip contract)", i, g, e) + } + if g, e := res.Header.Get("Content-Encoding"), test.accept; g != e { + t.Errorf("%d. Content-Encoding = %q; want %q", i, g, e) + } + } + +} + +func TestTransportGzip(t *testing.T) { + defer afterTest(t) + const testString = "The test string aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + const nRandBytes = 1024 * 1024 + ts := httptest.NewServer(HandlerFunc(func(rw ResponseWriter, req *Request) { + if req.Method == "HEAD" { + if g := req.Header.Get("Accept-Encoding"); g != "" { + t.Errorf("HEAD request sent with Accept-Encoding of %q; want none", g) + } + return + } + if g, e := req.Header.Get("Accept-Encoding"), "gzip"; g != e { + t.Errorf("Accept-Encoding = %q, want %q", g, e) + } + rw.Header().Set("Content-Encoding", "gzip") + + var w io.Writer = rw + var buf bytes.Buffer + if req.FormValue("chunked") == "0" { + w = &buf + defer io.Copy(rw, &buf) + defer func() { + rw.Header().Set("Content-Length", strconv.Itoa(buf.Len())) + }() + } + gz := gzip.NewWriter(w) + gz.Write([]byte(testString)) + if req.FormValue("body") == "large" { + io.CopyN(gz, rand.Reader, nRandBytes) + } + gz.Close() + })) + defer ts.Close() + + for _, chunked := range []string{"1", "0"} { + c := &Client{Transport: &Transport{}} + + // First fetch something large, but only read some of it. + res, err := c.Get(ts.URL + "/?body=large&chunked=" + chunked) + if err != nil { + t.Fatalf("large get: %v", err) + } + buf := make([]byte, len(testString)) + n, err := io.ReadFull(res.Body, buf) + if err != nil { + t.Fatalf("partial read of large response: size=%d, %v", n, err) + } + if e, g := testString, string(buf); e != g { + t.Errorf("partial read got %q, expected %q", g, e) + } + res.Body.Close() + // Read on the body, even though it's closed + n, err = res.Body.Read(buf) + if n != 0 || err == nil { + t.Errorf("expected error post-closed large Read; got = %d, %v", n, err) + } + + // Then something small. + res, err = c.Get(ts.URL + "/?chunked=" + chunked) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if g, e := string(body), testString; g != e { + t.Fatalf("body = %q; want %q", g, e) + } + if g, e := res.Header.Get("Content-Encoding"), ""; g != e { + t.Fatalf("Content-Encoding = %q; want %q", g, e) + } + + // Read on the body after it's been fully read: + n, err = res.Body.Read(buf) + if n != 0 || err == nil { + t.Errorf("expected Read error after exhausted reads; got %d, %v", n, err) + } + res.Body.Close() + n, err = res.Body.Read(buf) + if n != 0 || err == nil { + t.Errorf("expected Read error after Close; got %d, %v", n, err) + } + } + + // And a HEAD request too, because they're always weird. + c := &Client{Transport: &Transport{}} + res, err := c.Head(ts.URL) + if err != nil { + t.Fatalf("Head: %v", err) + } + if res.StatusCode != 200 { + t.Errorf("Head status=%d; want=200", res.StatusCode) + } +} + +func TestTransportProxy(t *testing.T) { + defer afterTest(t) + ch := make(chan string, 1) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + ch <- "real server" + })) + defer ts.Close() + proxy := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + ch <- "proxy for " + r.URL.String() + })) + defer proxy.Close() + + pu, err := url.Parse(proxy.URL) + if err != nil { + t.Fatal(err) + } + c := &Client{Transport: &Transport{Proxy: ProxyURL(pu)}} + c.Head(ts.URL) + got := <-ch + want := "proxy for " + ts.URL + "/" + if got != want { + t.Errorf("want %q, got %q", want, got) + } +} + +// TestTransportGzipRecursive sends a gzip quine and checks that the +// client gets the same value back. This is more cute than anything, +// but checks that we don't recurse forever, and checks that +// Content-Encoding is removed. +func TestTransportGzipRecursive(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Encoding", "gzip") + w.Write(rgz) + })) + defer ts.Close() + + c := &Client{Transport: &Transport{}} + res, err := c.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(body, rgz) { + t.Fatalf("Incorrect result from recursive gz:\nhave=%x\nwant=%x", + body, rgz) + } + if g, e := res.Header.Get("Content-Encoding"), ""; g != e { + t.Fatalf("Content-Encoding = %q; want %q", g, e) + } +} + +// golang.org/issue/7750: request fails when server replies with +// a short gzip body +func TestTransportGzipShort(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Encoding", "gzip") + w.Write([]byte{0x1f, 0x8b}) + })) + defer ts.Close() + + tr := &Transport{} + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + res, err := c.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + _, err = ioutil.ReadAll(res.Body) + if err == nil { + t.Fatal("Expect an error from reading a body.") + } + if err != io.ErrUnexpectedEOF { + t.Errorf("ReadAll error = %v; want io.ErrUnexpectedEOF", err) + } +} + +// tests that persistent goroutine connections shut down when no longer desired. +func TestTransportPersistConnLeak(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7237") + } + defer afterTest(t) + gotReqCh := make(chan bool) + unblockCh := make(chan bool) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + gotReqCh <- true + <-unblockCh + w.Header().Set("Content-Length", "0") + w.WriteHeader(204) + })) + defer ts.Close() + + tr := &Transport{} + c := &Client{Transport: tr} + + n0 := runtime.NumGoroutine() + + const numReq = 25 + didReqCh := make(chan bool) + for i := 0; i < numReq; i++ { + go func() { + res, err := c.Get(ts.URL) + didReqCh <- true + if err != nil { + t.Errorf("client fetch error: %v", err) + return + } + res.Body.Close() + }() + } + + // Wait for all goroutines to be stuck in the Handler. + for i := 0; i < numReq; i++ { + <-gotReqCh + } + + nhigh := runtime.NumGoroutine() + + // Tell all handlers to unblock and reply. + for i := 0; i < numReq; i++ { + unblockCh <- true + } + + // Wait for all HTTP clients to be done. + for i := 0; i < numReq; i++ { + <-didReqCh + } + + tr.CloseIdleConnections() + time.Sleep(100 * time.Millisecond) + runtime.GC() + runtime.GC() // even more. + nfinal := runtime.NumGoroutine() + + growth := nfinal - n0 + + // We expect 0 or 1 extra goroutine, empirically. Allow up to 5. + // Previously we were leaking one per numReq. + if int(growth) > 5 { + t.Logf("goroutine growth: %d -> %d -> %d (delta: %d)", n0, nhigh, nfinal, growth) + t.Error("too many new goroutines") + } +} + +// golang.org/issue/4531: Transport leaks goroutines when +// request.ContentLength is explicitly short +func TestTransportPersistConnLeakShortBody(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7237") + } + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + })) + defer ts.Close() + + tr := &Transport{} + c := &Client{Transport: tr} + + n0 := runtime.NumGoroutine() + body := []byte("Hello") + for i := 0; i < 20; i++ { + req, err := NewRequest("POST", ts.URL, bytes.NewReader(body)) + if err != nil { + t.Fatal(err) + } + req.ContentLength = int64(len(body) - 2) // explicitly short + _, err = c.Do(req) + if err == nil { + t.Fatal("Expect an error from writing too long of a body.") + } + } + nhigh := runtime.NumGoroutine() + tr.CloseIdleConnections() + time.Sleep(400 * time.Millisecond) + runtime.GC() + nfinal := runtime.NumGoroutine() + + growth := nfinal - n0 + + // We expect 0 or 1 extra goroutine, empirically. Allow up to 5. + // Previously we were leaking one per numReq. + t.Logf("goroutine growth: %d -> %d -> %d (delta: %d)", n0, nhigh, nfinal, growth) + if int(growth) > 5 { + t.Error("too many new goroutines") + } +} + +// This used to crash; http://golang.org/issue/3266 +func TestTransportIdleConnCrash(t *testing.T) { + defer afterTest(t) + tr := &Transport{} + c := &Client{Transport: tr} + + unblockCh := make(chan bool, 1) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + <-unblockCh + tr.CloseIdleConnections() + })) + defer ts.Close() + + didreq := make(chan bool) + go func() { + res, err := c.Get(ts.URL) + if err != nil { + t.Error(err) + } else { + res.Body.Close() // returns idle conn + } + didreq <- true + }() + unblockCh <- true + <-didreq +} + +// Test that the transport doesn't close the TCP connection early, +// before the response body has been read. This was a regression +// which sadly lacked a triggering test. The large response body made +// the old race easier to trigger. +func TestIssue3644(t *testing.T) { + defer afterTest(t) + const numFoos = 5000 + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Connection", "close") + for i := 0; i < numFoos; i++ { + w.Write([]byte("foo ")) + } + })) + defer ts.Close() + tr := &Transport{} + c := &Client{Transport: tr} + res, err := c.Get(ts.URL) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + bs, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if len(bs) != numFoos*len("foo ") { + t.Errorf("unexpected response length") + } +} + +// Test that a client receives a server's reply, even if the server doesn't read +// the entire request body. +func TestIssue3595(t *testing.T) { + defer afterTest(t) + const deniedMsg = "sorry, denied." + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + Error(w, deniedMsg, StatusUnauthorized) + })) + defer ts.Close() + tr := &Transport{} + c := &Client{Transport: tr} + res, err := c.Post(ts.URL, "application/octet-stream", neverEnding('a')) + if err != nil { + t.Errorf("Post: %v", err) + return + } + got, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("Body ReadAll: %v", err) + } + if !strings.Contains(string(got), deniedMsg) { + t.Errorf("Known bug: response %q does not contain %q", got, deniedMsg) + } +} + +// From http://golang.org/issue/4454 , +// "client fails to handle requests with no body and chunked encoding" +func TestChunkedNoContent(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + w.WriteHeader(StatusNoContent) + })) + defer ts.Close() + + for _, closeBody := range []bool{true, false} { + c := &Client{Transport: &Transport{}} + const n = 4 + for i := 1; i <= n; i++ { + res, err := c.Get(ts.URL) + if err != nil { + t.Errorf("closingBody=%v, req %d/%d: %v", closeBody, i, n, err) + } else { + if closeBody { + res.Body.Close() + } + } + } + } +} + +func TestTransportConcurrency(t *testing.T) { + defer afterTest(t) + maxProcs, numReqs := 16, 500 + if testing.Short() { + maxProcs, numReqs = 4, 50 + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(maxProcs)) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + fmt.Fprintf(w, "%v", r.FormValue("echo")) + })) + defer ts.Close() + + var wg sync.WaitGroup + wg.Add(numReqs) + + tr := &Transport{ + Dial: func(netw, addr string) (c net.Conn, err error) { + // Due to the Transport's "socket late + // binding" (see idleConnCh in transport.go), + // the numReqs HTTP requests below can finish + // with a dial still outstanding. So count + // our dials as work too so the leak checker + // doesn't complain at us. + wg.Add(1) + defer wg.Done() + return net.Dial(netw, addr) + }, + } + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + reqs := make(chan string) + defer close(reqs) + + for i := 0; i < maxProcs*2; i++ { + go func() { + for req := range reqs { + res, err := c.Get(ts.URL + "/?echo=" + req) + if err != nil { + t.Errorf("error on req %s: %v", req, err) + wg.Done() + continue + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Errorf("read error on req %s: %v", req, err) + wg.Done() + continue + } + if string(all) != req { + t.Errorf("body of req %s = %q; want %q", req, all, req) + } + res.Body.Close() + wg.Done() + } + }() + } + for i := 0; i < numReqs; i++ { + reqs <- fmt.Sprintf("request-%d", i) + } + wg.Wait() +} + +func TestIssue4191_InfiniteGetTimeout(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7237") + } + defer afterTest(t) + const debug = false + mux := NewServeMux() + mux.HandleFunc("/get", func(w ResponseWriter, r *Request) { + io.Copy(w, neverEnding('a')) + }) + ts := httptest.NewServer(mux) + timeout := 100 * time.Millisecond + + client := &Client{ + Transport: &Transport{ + Dial: func(n, addr string) (net.Conn, error) { + conn, err := net.Dial(n, addr) + if err != nil { + return nil, err + } + conn.SetDeadline(time.Now().Add(timeout)) + if debug { + conn = NewLoggingConn("client", conn) + } + return conn, nil + }, + DisableKeepAlives: true, + }, + } + + getFailed := false + nRuns := 5 + if testing.Short() { + nRuns = 1 + } + for i := 0; i < nRuns; i++ { + if debug { + println("run", i+1, "of", nRuns) + } + sres, err := client.Get(ts.URL + "/get") + if err != nil { + if !getFailed { + // Make the timeout longer, once. + getFailed = true + t.Logf("increasing timeout") + i-- + timeout *= 10 + continue + } + t.Errorf("Error issuing GET: %v", err) + break + } + _, err = io.Copy(ioutil.Discard, sres.Body) + if err == nil { + t.Errorf("Unexpected successful copy") + break + } + } + if debug { + println("tests complete; waiting for handlers to finish") + } + ts.Close() +} + +func TestIssue4191_InfiniteGetToPutTimeout(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7237") + } + defer afterTest(t) + const debug = false + mux := NewServeMux() + mux.HandleFunc("/get", func(w ResponseWriter, r *Request) { + io.Copy(w, neverEnding('a')) + }) + mux.HandleFunc("/put", func(w ResponseWriter, r *Request) { + defer r.Body.Close() + io.Copy(ioutil.Discard, r.Body) + }) + ts := httptest.NewServer(mux) + timeout := 100 * time.Millisecond + + client := &Client{ + Transport: &Transport{ + Dial: func(n, addr string) (net.Conn, error) { + conn, err := net.Dial(n, addr) + if err != nil { + return nil, err + } + conn.SetDeadline(time.Now().Add(timeout)) + if debug { + conn = NewLoggingConn("client", conn) + } + return conn, nil + }, + DisableKeepAlives: true, + }, + } + + getFailed := false + nRuns := 5 + if testing.Short() { + nRuns = 1 + } + for i := 0; i < nRuns; i++ { + if debug { + println("run", i+1, "of", nRuns) + } + sres, err := client.Get(ts.URL + "/get") + if err != nil { + if !getFailed { + // Make the timeout longer, once. + getFailed = true + t.Logf("increasing timeout") + i-- + timeout *= 10 + continue + } + t.Errorf("Error issuing GET: %v", err) + break + } + req, _ := NewRequest("PUT", ts.URL+"/put", sres.Body) + _, err = client.Do(req) + if err == nil { + sres.Body.Close() + t.Errorf("Unexpected successful PUT") + break + } + sres.Body.Close() + } + if debug { + println("tests complete; waiting for handlers to finish") + } + ts.Close() +} + +func TestTransportResponseHeaderTimeout(t *testing.T) { + defer afterTest(t) + if testing.Short() { + t.Skip("skipping timeout test in -short mode") + } + inHandler := make(chan bool, 1) + mux := NewServeMux() + mux.HandleFunc("/fast", func(w ResponseWriter, r *Request) { + inHandler <- true + }) + mux.HandleFunc("/slow", func(w ResponseWriter, r *Request) { + inHandler <- true + time.Sleep(2 * time.Second) + }) + ts := httptest.NewServer(mux) + defer ts.Close() + + tr := &Transport{ + ResponseHeaderTimeout: 500 * time.Millisecond, + } + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + + tests := []struct { + path string + want int + wantErr string + }{ + {path: "/fast", want: 200}, + {path: "/slow", wantErr: "timeout awaiting response headers"}, + {path: "/fast", want: 200}, + } + for i, tt := range tests { + res, err := c.Get(ts.URL + tt.path) + select { + case <-inHandler: + case <-time.After(5 * time.Second): + t.Errorf("never entered handler for test index %d, %s", i, tt.path) + continue + } + if err != nil { + uerr, ok := err.(*url.Error) + if !ok { + t.Errorf("error is not an url.Error; got: %#v", err) + continue + } + nerr, ok := uerr.Err.(net.Error) + if !ok { + t.Errorf("error does not satisfy net.Error interface; got: %#v", err) + continue + } + if !nerr.Timeout() { + t.Errorf("want timeout error; got: %q", nerr) + continue + } + if strings.Contains(err.Error(), tt.wantErr) { + continue + } + t.Errorf("%d. unexpected error: %v", i, err) + continue + } + if tt.wantErr != "" { + t.Errorf("%d. no error. expected error: %v", i, tt.wantErr) + continue + } + if res.StatusCode != tt.want { + t.Errorf("%d for path %q status = %d; want %d", i, tt.path, res.StatusCode, tt.want) + } + } +} + +func TestTransportCancelRequest(t *testing.T) { + defer afterTest(t) + if testing.Short() { + t.Skip("skipping test in -short mode") + } + unblockc := make(chan bool) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + fmt.Fprintf(w, "Hello") + w.(Flusher).Flush() // send headers and some body + <-unblockc + })) + defer ts.Close() + defer close(unblockc) + + tr := &Transport{} + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + + req, _ := NewRequest("GET", ts.URL, nil) + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + go func() { + time.Sleep(1 * time.Second) + tr.CancelRequest(req) + }() + t0 := time.Now() + body, err := ioutil.ReadAll(res.Body) + d := time.Since(t0) + + if err == nil { + t.Error("expected an error reading the body") + } + if string(body) != "Hello" { + t.Errorf("Body = %q; want Hello", body) + } + if d < 500*time.Millisecond { + t.Errorf("expected ~1 second delay; got %v", d) + } + // Verify no outstanding requests after readLoop/writeLoop + // goroutines shut down. + for tries := 3; tries > 0; tries-- { + n := tr.NumPendingRequestsForTesting() + if n == 0 { + break + } + time.Sleep(100 * time.Millisecond) + if tries == 1 { + t.Errorf("pending requests = %d; want 0", n) + } + } +} + +func TestTransportCancelRequestInDial(t *testing.T) { + defer afterTest(t) + if testing.Short() { + t.Skip("skipping test in -short mode") + } + var logbuf bytes.Buffer + eventLog := log.New(&logbuf, "", 0) + + unblockDial := make(chan bool) + defer close(unblockDial) + + inDial := make(chan bool) + tr := &Transport{ + Dial: func(network, addr string) (net.Conn, error) { + eventLog.Println("dial: blocking") + inDial <- true + <-unblockDial + return nil, errors.New("nope") + }, + } + cl := &Client{Transport: tr} + gotres := make(chan bool) + req, _ := NewRequest("GET", "http://something.no-network.tld/", nil) + go func() { + _, err := cl.Do(req) + eventLog.Printf("Get = %v", err) + gotres <- true + }() + + select { + case <-inDial: + case <-time.After(5 * time.Second): + t.Fatal("timeout; never saw blocking dial") + } + + eventLog.Printf("canceling") + tr.CancelRequest(req) + + select { + case <-gotres: + case <-time.After(5 * time.Second): + panic("hang. events are: " + logbuf.String()) + } + + got := logbuf.String() + want := `dial: blocking +canceling +Get = Get http://something.no-network.tld/: net/http: request canceled while waiting for connection +` + if got != want { + t.Errorf("Got events:\n%s\nWant:\n%s", got, want) + } +} + +// golang.org/issue/3672 -- Client can't close HTTP stream +// Calling Close on a Response.Body used to just read until EOF. +// Now it actually closes the TCP connection. +func TestTransportCloseResponseBody(t *testing.T) { + defer afterTest(t) + writeErr := make(chan error, 1) + msg := []byte("young\n") + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + for { + _, err := w.Write(msg) + if err != nil { + writeErr <- err + return + } + w.(Flusher).Flush() + } + })) + defer ts.Close() + + tr := &Transport{} + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + + req, _ := NewRequest("GET", ts.URL, nil) + defer tr.CancelRequest(req) + + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + + const repeats = 3 + buf := make([]byte, len(msg)*repeats) + want := bytes.Repeat(msg, repeats) + + _, err = io.ReadFull(res.Body, buf) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(buf, want) { + t.Fatalf("read %q; want %q", buf, want) + } + didClose := make(chan error, 1) + go func() { + didClose <- res.Body.Close() + }() + select { + case err := <-didClose: + if err != nil { + t.Errorf("Close = %v", err) + } + case <-time.After(10 * time.Second): + t.Fatal("too long waiting for close") + } + select { + case err := <-writeErr: + if err == nil { + t.Errorf("expected non-nil write error") + } + case <-time.After(10 * time.Second): + t.Fatal("too long waiting for write error") + } +} + +type fooProto struct{} + +func (fooProto) RoundTrip(req *Request) (*Response, error) { + res := &Response{ + Status: "200 OK", + StatusCode: 200, + Header: make(Header), + Body: ioutil.NopCloser(strings.NewReader("You wanted " + req.URL.String())), + } + return res, nil +} + +func TestTransportAltProto(t *testing.T) { + defer afterTest(t) + tr := &Transport{} + c := &Client{Transport: tr} + tr.RegisterProtocol("foo", fooProto{}) + res, err := c.Get("foo://bar.com/path") + if err != nil { + t.Fatal(err) + } + bodyb, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + body := string(bodyb) + if e := "You wanted foo://bar.com/path"; body != e { + t.Errorf("got response %q, want %q", body, e) + } +} + +func TestTransportNoHost(t *testing.T) { + defer afterTest(t) + tr := &Transport{} + _, err := tr.RoundTrip(&Request{ + Header: make(Header), + URL: &url.URL{ + Scheme: "http", + }, + }) + want := "http: no Host in request URL" + if got := fmt.Sprint(err); got != want { + t.Errorf("error = %v; want %q", err, want) + } +} + +func TestTransportSocketLateBinding(t *testing.T) { + defer afterTest(t) + + mux := NewServeMux() + fooGate := make(chan bool, 1) + mux.HandleFunc("/foo", func(w ResponseWriter, r *Request) { + w.Header().Set("foo-ipport", r.RemoteAddr) + w.(Flusher).Flush() + <-fooGate + }) + mux.HandleFunc("/bar", func(w ResponseWriter, r *Request) { + w.Header().Set("bar-ipport", r.RemoteAddr) + }) + ts := httptest.NewServer(mux) + defer ts.Close() + + dialGate := make(chan bool, 1) + tr := &Transport{ + Dial: func(n, addr string) (net.Conn, error) { + if <-dialGate { + return net.Dial(n, addr) + } + return nil, errors.New("manually closed") + }, + DisableKeepAlives: false, + } + defer tr.CloseIdleConnections() + c := &Client{ + Transport: tr, + } + + dialGate <- true // only allow one dial + fooRes, err := c.Get(ts.URL + "/foo") + if err != nil { + t.Fatal(err) + } + fooAddr := fooRes.Header.Get("foo-ipport") + if fooAddr == "" { + t.Fatal("No addr on /foo request") + } + time.AfterFunc(200*time.Millisecond, func() { + // let the foo response finish so we can use its + // connection for /bar + fooGate <- true + io.Copy(ioutil.Discard, fooRes.Body) + fooRes.Body.Close() + }) + + barRes, err := c.Get(ts.URL + "/bar") + if err != nil { + t.Fatal(err) + } + barAddr := barRes.Header.Get("bar-ipport") + if barAddr != fooAddr { + t.Fatalf("/foo came from conn %q; /bar came from %q instead", fooAddr, barAddr) + } + barRes.Body.Close() + dialGate <- false +} + +// Issue 2184 +func TestTransportReading100Continue(t *testing.T) { + defer afterTest(t) + + const numReqs = 5 + reqBody := func(n int) string { return fmt.Sprintf("request body %d", n) } + reqID := func(n int) string { return fmt.Sprintf("REQ-ID-%d", n) } + + send100Response := func(w *io.PipeWriter, r *io.PipeReader) { + defer w.Close() + defer r.Close() + br := bufio.NewReader(r) + n := 0 + for { + n++ + req, err := ReadRequest(br) + if err == io.EOF { + return + } + if err != nil { + t.Error(err) + return + } + slurp, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("Server request body slurp: %v", err) + return + } + id := req.Header.Get("Request-Id") + resCode := req.Header.Get("X-Want-Response-Code") + if resCode == "" { + resCode = "100 Continue" + if string(slurp) != reqBody(n) { + t.Errorf("Server got %q, %v; want %q", slurp, err, reqBody(n)) + } + } + body := fmt.Sprintf("Response number %d", n) + v := []byte(strings.Replace(fmt.Sprintf(`HTTP/1.1 %s +Date: Thu, 28 Feb 2013 17:55:41 GMT + +HTTP/1.1 200 OK +Content-Type: text/html +Echo-Request-Id: %s +Content-Length: %d + +%s`, resCode, id, len(body), body), "\n", "\r\n", -1)) + w.Write(v) + if id == reqID(numReqs) { + return + } + } + + } + + tr := &Transport{ + Dial: func(n, addr string) (net.Conn, error) { + sr, sw := io.Pipe() // server read/write + cr, cw := io.Pipe() // client read/write + conn := &rwTestConn{ + Reader: cr, + Writer: sw, + closeFunc: func() error { + sw.Close() + cw.Close() + return nil + }, + } + go send100Response(cw, sr) + return conn, nil + }, + DisableKeepAlives: false, + } + defer tr.CloseIdleConnections() + c := &Client{Transport: tr} + + testResponse := func(req *Request, name string, wantCode int) { + res, err := c.Do(req) + if err != nil { + t.Fatalf("%s: Do: %v", name, err) + } + if res.StatusCode != wantCode { + t.Fatalf("%s: Response Statuscode=%d; want %d", name, res.StatusCode, wantCode) + } + if id, idBack := req.Header.Get("Request-Id"), res.Header.Get("Echo-Request-Id"); id != "" && id != idBack { + t.Errorf("%s: response id %q != request id %q", name, idBack, id) + } + _, err = ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("%s: Slurp error: %v", name, err) + } + } + + // Few 100 responses, making sure we're not off-by-one. + for i := 1; i <= numReqs; i++ { + req, _ := NewRequest("POST", "http://dummy.tld/", strings.NewReader(reqBody(i))) + req.Header.Set("Request-Id", reqID(i)) + testResponse(req, fmt.Sprintf("100, %d/%d", i, numReqs), 200) + } + + // And some other informational 1xx but non-100 responses, to test + // we return them but don't re-use the connection. + for i := 1; i <= numReqs; i++ { + req, _ := NewRequest("POST", "http://other.tld/", strings.NewReader(reqBody(i))) + req.Header.Set("X-Want-Response-Code", "123 Sesame Street") + testResponse(req, fmt.Sprintf("123, %d/%d", i, numReqs), 123) + } +} + +type proxyFromEnvTest struct { + req string // URL to fetch; blank means "http://example.com" + env string + noenv string + want string + wanterr error +} + +func (t proxyFromEnvTest) String() string { + var buf bytes.Buffer + if t.env != "" { + fmt.Fprintf(&buf, "http_proxy=%q", t.env) + } + if t.noenv != "" { + fmt.Fprintf(&buf, " no_proxy=%q", t.noenv) + } + req := "http://example.com" + if t.req != "" { + req = t.req + } + fmt.Fprintf(&buf, " req=%q", req) + return strings.TrimSpace(buf.String()) +} + +var proxyFromEnvTests = []proxyFromEnvTest{ + {env: "127.0.0.1:8080", want: "http://127.0.0.1:8080"}, + {env: "cache.corp.example.com:1234", want: "http://cache.corp.example.com:1234"}, + {env: "cache.corp.example.com", want: "http://cache.corp.example.com"}, + {env: "https://cache.corp.example.com", want: "https://cache.corp.example.com"}, + {env: "http://127.0.0.1:8080", want: "http://127.0.0.1:8080"}, + {env: "https://127.0.0.1:8080", want: "https://127.0.0.1:8080"}, + {want: ""}, + {noenv: "example.com", req: "http://example.com/", env: "proxy", want: ""}, + {noenv: ".example.com", req: "http://example.com/", env: "proxy", want: ""}, + {noenv: "ample.com", req: "http://example.com/", env: "proxy", want: "http://proxy"}, + {noenv: "example.com", req: "http://foo.example.com/", env: "proxy", want: ""}, + {noenv: ".foo.com", req: "http://example.com/", env: "proxy", want: "http://proxy"}, +} + +func TestProxyFromEnvironment(t *testing.T) { + ResetProxyEnv() + for _, tt := range proxyFromEnvTests { + os.Setenv("HTTP_PROXY", tt.env) + os.Setenv("NO_PROXY", tt.noenv) + ResetCachedEnvironment() + reqURL := tt.req + if reqURL == "" { + reqURL = "http://example.com" + } + req, _ := NewRequest("GET", reqURL, nil) + url, err := ProxyFromEnvironment(req) + if g, e := fmt.Sprintf("%v", err), fmt.Sprintf("%v", tt.wanterr); g != e { + t.Errorf("%v: got error = %q, want %q", tt, g, e) + continue + } + if got := fmt.Sprintf("%s", url); got != tt.want { + t.Errorf("%v: got URL = %q, want %q", tt, url, tt.want) + } + } +} + +func TestIdleConnChannelLeak(t *testing.T) { + var mu sync.Mutex + var n int + + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + mu.Lock() + n++ + mu.Unlock() + })) + defer ts.Close() + + tr := &Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + return net.Dial(netw, ts.Listener.Addr().String()) + }, + } + defer tr.CloseIdleConnections() + + c := &Client{Transport: tr} + + // First, without keep-alives. + for _, disableKeep := range []bool{true, false} { + tr.DisableKeepAlives = disableKeep + for i := 0; i < 5; i++ { + _, err := c.Get(fmt.Sprintf("http://foo-host-%d.tld/", i)) + if err != nil { + t.Fatal(err) + } + } + if got := tr.IdleConnChMapSizeForTesting(); got != 0 { + t.Fatalf("ForDisableKeepAlives = %v, map size = %d; want 0", disableKeep, got) + } + } +} + +// Verify the status quo: that the Client.Post function coerces its +// body into a ReadCloser if it's a Closer, and that the Transport +// then closes it. +func TestTransportClosesRequestBody(t *testing.T) { + defer afterTest(t) + ts := httptest.NewServer(http.HandlerFunc(func(w ResponseWriter, r *Request) { + io.Copy(ioutil.Discard, r.Body) + })) + defer ts.Close() + + tr := &Transport{} + defer tr.CloseIdleConnections() + cl := &Client{Transport: tr} + + closes := 0 + + res, err := cl.Post(ts.URL, "text/plain", countCloseReader{&closes, strings.NewReader("hello")}) + if err != nil { + t.Fatal(err) + } + res.Body.Close() + if closes != 1 { + t.Errorf("closes = %d; want 1", closes) + } +} + +func TestTransportTLSHandshakeTimeout(t *testing.T) { + defer afterTest(t) + if testing.Short() { + t.Skip("skipping in short mode") + } + ln := newLocalListener(t) + defer ln.Close() + testdonec := make(chan struct{}) + defer close(testdonec) + + go func() { + c, err := ln.Accept() + if err != nil { + t.Error(err) + return + } + <-testdonec + c.Close() + }() + + getdonec := make(chan struct{}) + go func() { + defer close(getdonec) + tr := &Transport{ + Dial: func(_, _ string) (net.Conn, error) { + return net.Dial("tcp", ln.Addr().String()) + }, + TLSHandshakeTimeout: 250 * time.Millisecond, + } + cl := &Client{Transport: tr} + _, err := cl.Get("https://dummy.tld/") + if err == nil { + t.Error("expected error") + return + } + ue, ok := err.(*url.Error) + if !ok { + t.Errorf("expected url.Error; got %#v", err) + return + } + ne, ok := ue.Err.(net.Error) + if !ok { + t.Errorf("expected net.Error; got %#v", err) + return + } + if !ne.Timeout() { + t.Errorf("expected timeout error; got %v", err) + } + if !strings.Contains(err.Error(), "handshake timeout") { + t.Errorf("expected 'handshake timeout' in error; got %v", err) + } + }() + select { + case <-getdonec: + case <-time.After(5 * time.Second): + t.Error("test timeout; TLS handshake hung?") + } +} + +// Trying to repro golang.org/issue/3514 +func TestTLSServerClosesConnection(t *testing.T) { + defer afterTest(t) + if runtime.GOOS == "windows" { + t.Skip("skipping flaky test on Windows; golang.org/issue/7634") + } + closedc := make(chan bool, 1) + ts := httptest.NewTLSServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if strings.Contains(r.URL.Path, "/keep-alive-then-die") { + conn, _, _ := w.(Hijacker).Hijack() + conn.Write([]byte("HTTP/1.1 200 OK\r\nContent-Length: 3\r\n\r\nfoo")) + conn.Close() + closedc <- true + return + } + fmt.Fprintf(w, "hello") + })) + defer ts.Close() + tr := &Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + defer tr.CloseIdleConnections() + client := &Client{Transport: tr} + + var nSuccess = 0 + var errs []error + const trials = 20 + for i := 0; i < trials; i++ { + tr.CloseIdleConnections() + res, err := client.Get(ts.URL + "/keep-alive-then-die") + if err != nil { + t.Fatal(err) + } + <-closedc + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if string(slurp) != "foo" { + t.Errorf("Got %q, want foo", slurp) + } + + // Now try again and see if we successfully + // pick a new connection. + res, err = client.Get(ts.URL + "/") + if err != nil { + errs = append(errs, err) + continue + } + slurp, err = ioutil.ReadAll(res.Body) + if err != nil { + errs = append(errs, err) + continue + } + nSuccess++ + } + if nSuccess > 0 { + t.Logf("successes = %d of %d", nSuccess, trials) + } else { + t.Errorf("All runs failed:") + } + for _, err := range errs { + t.Logf(" err: %v", err) + } +} + +// byteFromChanReader is an io.Reader that reads a single byte at a +// time from the channel. When the channel is closed, the reader +// returns io.EOF. +type byteFromChanReader chan byte + +func (c byteFromChanReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return + } + b, ok := <-c + if !ok { + return 0, io.EOF + } + p[0] = b + return 1, nil +} + +// Verifies that the Transport doesn't reuse a connection in the case +// where the server replies before the request has been fully +// written. We still honor that reply (see TestIssue3595), but don't +// send future requests on the connection because it's then in a +// questionable state. +// golang.org/issue/7569 +func TestTransportNoReuseAfterEarlyResponse(t *testing.T) { + defer afterTest(t) + var sconn struct { + sync.Mutex + c net.Conn + } + var getOkay bool + closeConn := func() { + sconn.Lock() + defer sconn.Unlock() + if sconn.c != nil { + sconn.c.Close() + sconn.c = nil + if !getOkay { + t.Logf("Closed server connection") + } + } + } + defer closeConn() + + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + if r.Method == "GET" { + io.WriteString(w, "bar") + return + } + conn, _, _ := w.(Hijacker).Hijack() + sconn.Lock() + sconn.c = conn + sconn.Unlock() + conn.Write([]byte("HTTP/1.1 200 OK\r\nContent-Length: 3\r\n\r\nfoo")) // keep-alive + go io.Copy(ioutil.Discard, conn) + })) + defer ts.Close() + tr := &Transport{} + defer tr.CloseIdleConnections() + client := &Client{Transport: tr} + + const bodySize = 256 << 10 + finalBit := make(byteFromChanReader, 1) + req, _ := NewRequest("POST", ts.URL, io.MultiReader(io.LimitReader(neverEnding('x'), bodySize-1), finalBit)) + req.ContentLength = bodySize + res, err := client.Do(req) + if err := wantBody(res, err, "foo"); err != nil { + t.Errorf("POST response: %v", err) + } + donec := make(chan bool) + go func() { + defer close(donec) + res, err = client.Get(ts.URL) + if err := wantBody(res, err, "bar"); err != nil { + t.Errorf("GET response: %v", err) + return + } + getOkay = true // suppress test noise + }() + time.AfterFunc(5*time.Second, closeConn) + select { + case <-donec: + finalBit <- 'x' // unblock the writeloop of the first Post + close(finalBit) + case <-time.After(7 * time.Second): + t.Fatal("timeout waiting for GET request to finish") + } +} + +type errorReader struct { + err error +} + +func (e errorReader) Read(p []byte) (int, error) { return 0, e.err } + +type closerFunc func() error + +func (f closerFunc) Close() error { return f() } + +// Issue 6981 +func TestTransportClosesBodyOnError(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("skipping test; see http://golang.org/issue/7782") + } + defer afterTest(t) + readBody := make(chan error, 1) + ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) { + _, err := ioutil.ReadAll(r.Body) + readBody <- err + })) + defer ts.Close() + fakeErr := errors.New("fake error") + didClose := make(chan bool, 1) + req, _ := NewRequest("POST", ts.URL, struct { + io.Reader + io.Closer + }{ + io.MultiReader(io.LimitReader(neverEnding('x'), 1<<20), errorReader{fakeErr}), + closerFunc(func() error { + select { + case didClose <- true: + default: + } + return nil + }), + }) + res, err := DefaultClient.Do(req) + if res != nil { + defer res.Body.Close() + } + if err == nil || !strings.Contains(err.Error(), fakeErr.Error()) { + t.Fatalf("Do error = %v; want something containing %q", err, fakeErr.Error()) + } + select { + case err := <-readBody: + if err == nil { + t.Errorf("Unexpected success reading request body from handler; want 'unexpected EOF reading trailer'") + } + case <-time.After(5 * time.Second): + t.Error("timeout waiting for server handler to complete") + } + select { + case <-didClose: + default: + t.Errorf("didn't see Body.Close") + } +} + +func wantBody(res *http.Response, err error, want string) error { + if err != nil { + return err + } + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("error reading body: %v", err) + } + if string(slurp) != want { + return fmt.Errorf("body = %q; want %q", slurp, want) + } + if err := res.Body.Close(); err != nil { + return fmt.Errorf("body Close = %v", err) + } + return nil +} + +func newLocalListener(t *testing.T) net.Listener { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + ln, err = net.Listen("tcp6", "[::1]:0") + } + if err != nil { + t.Fatal(err) + } + return ln +} + +type countCloseReader struct { + n *int + io.Reader +} + +func (cr countCloseReader) Close() error { + (*cr.n)++ + return nil +} + +// rgz is a gzip quine that uncompresses to itself. +var rgz = []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, + 0x69, 0x76, 0x65, 0x00, 0x92, 0xef, 0xe6, 0xe0, + 0x60, 0x00, 0x83, 0xa2, 0xd4, 0xe4, 0xd2, 0xa2, + 0xe2, 0xcc, 0xb2, 0x54, 0x06, 0x00, 0x00, 0x17, + 0x00, 0xe8, 0xff, 0x92, 0xef, 0xe6, 0xe0, 0x60, + 0x00, 0x83, 0xa2, 0xd4, 0xe4, 0xd2, 0xa2, 0xe2, + 0xcc, 0xb2, 0x54, 0x06, 0x00, 0x00, 0x17, 0x00, + 0xe8, 0xff, 0x42, 0x12, 0x46, 0x16, 0x06, 0x00, + 0x05, 0x00, 0xfa, 0xff, 0x42, 0x12, 0x46, 0x16, + 0x06, 0x00, 0x05, 0x00, 0xfa, 0xff, 0x00, 0x05, + 0x00, 0xfa, 0xff, 0x00, 0x14, 0x00, 0xeb, 0xff, + 0x42, 0x12, 0x46, 0x16, 0x06, 0x00, 0x05, 0x00, + 0xfa, 0xff, 0x00, 0x05, 0x00, 0xfa, 0xff, 0x00, + 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4, + 0x00, 0x00, 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, + 0x21, 0xc4, 0x00, 0x00, 0x14, 0x00, 0xeb, 0xff, + 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, 0x14, 0x00, + 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, + 0x14, 0x00, 0xeb, 0xff, 0x42, 0x88, 0x21, 0xc4, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x00, 0x17, 0x00, 0xe8, 0xff, + 0x42, 0x88, 0x21, 0xc4, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, + 0x17, 0x00, 0xe8, 0xff, 0x42, 0x12, 0x46, 0x16, + 0x06, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x08, + 0x00, 0xf7, 0xff, 0x3d, 0xb1, 0x20, 0x85, 0xfa, + 0x00, 0x00, 0x00, 0x42, 0x12, 0x46, 0x16, 0x06, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x08, 0x00, + 0xf7, 0xff, 0x3d, 0xb1, 0x20, 0x85, 0xfa, 0x00, + 0x00, 0x00, 0x3d, 0xb1, 0x20, 0x85, 0xfa, 0x00, + 0x00, 0x00, +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/triv.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/triv.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/triv.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,141 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "bytes" + "expvar" + "flag" + "fmt" + "io" + "log" + "net/http" + "os" + "os/exec" + "strconv" + "sync" +) + +// hello world, the web server +var helloRequests = expvar.NewInt("hello-requests") + +func HelloServer(w http.ResponseWriter, req *http.Request) { + helloRequests.Add(1) + io.WriteString(w, "hello, world!\n") +} + +// Simple counter server. POSTing to it will set the value. +type Counter struct { + mu sync.Mutex // protects n + n int +} + +// This makes Counter satisfy the expvar.Var interface, so we can export +// it directly. +func (ctr *Counter) String() string { + ctr.mu.Lock() + defer ctr.mu.Unlock() + return fmt.Sprintf("%d", ctr.n) +} + +func (ctr *Counter) ServeHTTP(w http.ResponseWriter, req *http.Request) { + ctr.mu.Lock() + defer ctr.mu.Unlock() + switch req.Method { + case "GET": + ctr.n++ + case "POST": + buf := new(bytes.Buffer) + io.Copy(buf, req.Body) + body := buf.String() + if n, err := strconv.Atoi(body); err != nil { + fmt.Fprintf(w, "bad POST: %v\nbody: [%v]\n", err, body) + } else { + ctr.n = n + fmt.Fprint(w, "counter reset\n") + } + } + fmt.Fprintf(w, "counter = %d\n", ctr.n) +} + +// simple flag server +var booleanflag = flag.Bool("boolean", true, "another flag for testing") + +func FlagServer(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprint(w, "Flags:\n") + flag.VisitAll(func(f *flag.Flag) { + if f.Value.String() != f.DefValue { + fmt.Fprintf(w, "%s = %s [default = %s]\n", f.Name, f.Value.String(), f.DefValue) + } else { + fmt.Fprintf(w, "%s = %s\n", f.Name, f.Value.String()) + } + }) +} + +// simple argument server +func ArgServer(w http.ResponseWriter, req *http.Request) { + for _, s := range os.Args { + fmt.Fprint(w, s, " ") + } +} + +// a channel (just for the fun of it) +type Chan chan int + +func ChanCreate() Chan { + c := make(Chan) + go func(c Chan) { + for x := 0; ; x++ { + c <- x + } + }(c) + return c +} + +func (ch Chan) ServeHTTP(w http.ResponseWriter, req *http.Request) { + io.WriteString(w, fmt.Sprintf("channel send #%d\n", <-ch)) +} + +// exec a program, redirecting output +func DateServer(rw http.ResponseWriter, req *http.Request) { + rw.Header().Set("Content-Type", "text/plain; charset=utf-8") + + date, err := exec.Command("/bin/date").Output() + if err != nil { + http.Error(rw, err.Error(), 500) + return + } + rw.Write(date) +} + +func Logger(w http.ResponseWriter, req *http.Request) { + log.Print(req.URL) + http.Error(w, "oops", 404) +} + +var webroot = flag.String("root", os.Getenv("HOME"), "web root directory") + +func main() { + flag.Parse() + + // The counter is published as a variable directly. + ctr := new(Counter) + expvar.Publish("counter", ctr) + http.Handle("/counter", ctr) + http.Handle("/", http.HandlerFunc(Logger)) + http.Handle("/go/", http.StripPrefix("/go/", http.FileServer(http.Dir(*webroot)))) + http.Handle("/chan", ChanCreate()) + http.HandleFunc("/flags", FlagServer) + http.HandleFunc("/args", ArgServer) + http.HandleFunc("/go/hello", HelloServer) + http.HandleFunc("/date", DateServer) + err := http.ListenAndServe(":12345", nil) + if err != nil { + log.Panicln("ListenAndServe:", err) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/http/z_last_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/http/z_last_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/http/z_last_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,97 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http_test + +import ( + "net/http" + "runtime" + "sort" + "strings" + "testing" + "time" +) + +func interestingGoroutines() (gs []string) { + buf := make([]byte, 2<<20) + buf = buf[:runtime.Stack(buf, true)] + for _, g := range strings.Split(string(buf), "\n\n") { + sl := strings.SplitN(g, "\n", 2) + if len(sl) != 2 { + continue + } + stack := strings.TrimSpace(sl[1]) + if stack == "" || + strings.Contains(stack, "created by net.startServer") || + strings.Contains(stack, "created by testing.RunTests") || + strings.Contains(stack, "closeWriteAndWait") || + strings.Contains(stack, "testing.Main(") || + // These only show up with GOTRACEBACK=2; Issue 5005 (comment 28) + strings.Contains(stack, "runtime.goexit") || + strings.Contains(stack, "created by runtime.gc") || + strings.Contains(stack, "runtime.MHeap_Scavenger") { + continue + } + gs = append(gs, stack) + } + sort.Strings(gs) + return +} + +// Verify the other tests didn't leave any goroutines running. +// This is in a file named z_last_test.go so it sorts at the end. +func TestGoroutinesRunning(t *testing.T) { + if testing.Short() { + t.Skip("not counting goroutines for leakage in -short mode") + } + gs := interestingGoroutines() + + n := 0 + stackCount := make(map[string]int) + for _, g := range gs { + stackCount[g]++ + n++ + } + + t.Logf("num goroutines = %d", n) + if n > 0 { + t.Error("Too many goroutines.") + for stack, count := range stackCount { + t.Logf("%d instances of:\n%s", count, stack) + } + } +} + +func afterTest(t *testing.T) { + http.DefaultTransport.(*http.Transport).CloseIdleConnections() + if testing.Short() { + return + } + var bad string + badSubstring := map[string]string{ + ").readLoop(": "a Transport", + ").writeLoop(": "a Transport", + "created by net/http/httptest.(*Server).Start": "an httptest.Server", + "timeoutHandler": "a TimeoutHandler", + "net.(*netFD).connect(": "a timing out dial", + ").noteClientGone(": "a closenotifier sender", + } + var stacks string + for i := 0; i < 4; i++ { + bad = "" + stacks = strings.Join(interestingGoroutines(), "\n\n") + for substr, what := range badSubstring { + if strings.Contains(stacks, substr) { + bad = what + } + } + if bad == "" { + return + } + // Bad stuff found, but goroutines might just still be + // shutting down, so give it some time. + time.Sleep(250 * time.Millisecond) + } + t.Errorf("Test appears to have leaked %s:\n%s", bad, stacks) +} === added directory 'src/github.com/Azure/azure-sdk-for-go/core/tls' === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/alert.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/alert.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/alert.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,77 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import "strconv" + +type alert uint8 + +const ( + // alert level + alertLevelWarning = 1 + alertLevelError = 2 +) + +const ( + alertCloseNotify alert = 0 + alertUnexpectedMessage alert = 10 + alertBadRecordMAC alert = 20 + alertDecryptionFailed alert = 21 + alertRecordOverflow alert = 22 + alertDecompressionFailure alert = 30 + alertHandshakeFailure alert = 40 + alertBadCertificate alert = 42 + alertUnsupportedCertificate alert = 43 + alertCertificateRevoked alert = 44 + alertCertificateExpired alert = 45 + alertCertificateUnknown alert = 46 + alertIllegalParameter alert = 47 + alertUnknownCA alert = 48 + alertAccessDenied alert = 49 + alertDecodeError alert = 50 + alertDecryptError alert = 51 + alertProtocolVersion alert = 70 + alertInsufficientSecurity alert = 71 + alertInternalError alert = 80 + alertUserCanceled alert = 90 + alertNoRenegotiation alert = 100 +) + +var alertText = map[alert]string{ + alertCloseNotify: "close notify", + alertUnexpectedMessage: "unexpected message", + alertBadRecordMAC: "bad record MAC", + alertDecryptionFailed: "decryption failed", + alertRecordOverflow: "record overflow", + alertDecompressionFailure: "decompression failure", + alertHandshakeFailure: "handshake failure", + alertBadCertificate: "bad certificate", + alertUnsupportedCertificate: "unsupported certificate", + alertCertificateRevoked: "revoked certificate", + alertCertificateExpired: "expired certificate", + alertCertificateUnknown: "unknown certificate", + alertIllegalParameter: "illegal parameter", + alertUnknownCA: "unknown certificate authority", + alertAccessDenied: "access denied", + alertDecodeError: "error decoding message", + alertDecryptError: "error decrypting message", + alertProtocolVersion: "protocol version not supported", + alertInsufficientSecurity: "insufficient security level", + alertInternalError: "internal error", + alertUserCanceled: "user canceled", + alertNoRenegotiation: "no renegotiation", +} + +func (e alert) String() string { + s, ok := alertText[e] + if ok { + return s + } + return "alert(" + strconv.Itoa(int(e)) + ")" +} + +func (e alert) Error() string { + return e.String() +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/cipher_suites.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/cipher_suites.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/cipher_suites.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,270 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/hmac" + "crypto/rc4" + "crypto/sha1" + "crypto/x509" + "hash" +) + +// a keyAgreement implements the client and server side of a TLS key agreement +// protocol by generating and processing key exchange messages. +type keyAgreement interface { + // On the server side, the first two methods are called in order. + + // In the case that the key agreement protocol doesn't use a + // ServerKeyExchange message, generateServerKeyExchange can return nil, + // nil. + generateServerKeyExchange(*Config, *Certificate, *clientHelloMsg, *serverHelloMsg) (*serverKeyExchangeMsg, error) + processClientKeyExchange(*Config, *Certificate, *clientKeyExchangeMsg, uint16) ([]byte, error) + + // On the client side, the next two methods are called in order. + + // This method may not be called if the server doesn't send a + // ServerKeyExchange message. + processServerKeyExchange(*Config, *clientHelloMsg, *serverHelloMsg, *x509.Certificate, *serverKeyExchangeMsg) error + generateClientKeyExchange(*Config, *clientHelloMsg, *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) +} + +const ( + // suiteECDH indicates that the cipher suite involves elliptic curve + // Diffie-Hellman. This means that it should only be selected when the + // client indicates that it supports ECC with a curve and point format + // that we're happy with. + suiteECDHE = 1 << iota + // suiteECDSA indicates that the cipher suite involves an ECDSA + // signature and therefore may only be selected when the server's + // certificate is ECDSA. If this is not set then the cipher suite is + // RSA based. + suiteECDSA + // suiteTLS12 indicates that the cipher suite should only be advertised + // and accepted when using TLS 1.2. + suiteTLS12 +) + +// A cipherSuite is a specific combination of key agreement, cipher and MAC +// function. All cipher suites currently assume RSA key agreement. +type cipherSuite struct { + id uint16 + // the lengths, in bytes, of the key material needed for each component. + keyLen int + macLen int + ivLen int + ka func(version uint16) keyAgreement + // flags is a bitmask of the suite* values, above. + flags int + cipher func(key, iv []byte, isRead bool) interface{} + mac func(version uint16, macKey []byte) macFunction + aead func(key, fixedNonce []byte) cipher.AEAD +} + +var cipherSuites = []*cipherSuite{ + // Ciphersuite order is chosen so that ECDHE comes before plain RSA + // and RC4 comes before AES (because of the Lucky13 attack). + {TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadAESGCM}, + {TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECDSA | suiteTLS12, nil, nil, aeadAESGCM}, + {TLS_ECDHE_RSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheRSAKA, suiteECDHE, cipherRC4, macSHA1, nil}, + {TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheECDSAKA, suiteECDHE | suiteECDSA, cipherRC4, macSHA1, nil}, + {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil}, + {TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECDSA, cipherAES, macSHA1, nil}, + {TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil}, + {TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECDSA, cipherAES, macSHA1, nil}, + {TLS_RSA_WITH_RC4_128_SHA, 16, 20, 0, rsaKA, 0, cipherRC4, macSHA1, nil}, + {TLS_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil}, + {TLS_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil}, + {TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, ecdheRSAKA, suiteECDHE, cipher3DES, macSHA1, nil}, + {TLS_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, rsaKA, 0, cipher3DES, macSHA1, nil}, +} + +func cipherRC4(key, iv []byte, isRead bool) interface{} { + cipher, _ := rc4.NewCipher(key) + return cipher +} + +func cipher3DES(key, iv []byte, isRead bool) interface{} { + block, _ := des.NewTripleDESCipher(key) + if isRead { + return cipher.NewCBCDecrypter(block, iv) + } + return cipher.NewCBCEncrypter(block, iv) +} + +func cipherAES(key, iv []byte, isRead bool) interface{} { + block, _ := aes.NewCipher(key) + if isRead { + return cipher.NewCBCDecrypter(block, iv) + } + return cipher.NewCBCEncrypter(block, iv) +} + +// macSHA1 returns a macFunction for the given protocol version. +func macSHA1(version uint16, key []byte) macFunction { + if version == VersionSSL30 { + mac := ssl30MAC{ + h: sha1.New(), + key: make([]byte, len(key)), + } + copy(mac.key, key) + return mac + } + return tls10MAC{hmac.New(sha1.New, key)} +} + +type macFunction interface { + Size() int + MAC(digestBuf, seq, header, data []byte) []byte +} + +// fixedNonceAEAD wraps an AEAD and prefixes a fixed portion of the nonce to +// each call. +type fixedNonceAEAD struct { + // sealNonce and openNonce are buffers where the larger nonce will be + // constructed. Since a seal and open operation may be running + // concurrently, there is a separate buffer for each. + sealNonce, openNonce []byte + aead cipher.AEAD +} + +func (f *fixedNonceAEAD) NonceSize() int { return 8 } +func (f *fixedNonceAEAD) Overhead() int { return f.aead.Overhead() } + +func (f *fixedNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte { + copy(f.sealNonce[len(f.sealNonce)-8:], nonce) + return f.aead.Seal(out, f.sealNonce, plaintext, additionalData) +} + +func (f *fixedNonceAEAD) Open(out, nonce, plaintext, additionalData []byte) ([]byte, error) { + copy(f.openNonce[len(f.openNonce)-8:], nonce) + return f.aead.Open(out, f.openNonce, plaintext, additionalData) +} + +func aeadAESGCM(key, fixedNonce []byte) cipher.AEAD { + aes, err := aes.NewCipher(key) + if err != nil { + panic(err) + } + aead, err := cipher.NewGCM(aes) + if err != nil { + panic(err) + } + + nonce1, nonce2 := make([]byte, 12), make([]byte, 12) + copy(nonce1, fixedNonce) + copy(nonce2, fixedNonce) + + return &fixedNonceAEAD{nonce1, nonce2, aead} +} + +// ssl30MAC implements the SSLv3 MAC function, as defined in +// www.mozilla.org/projects/security/pki/nss/ssl/draft302.txt section 5.2.3.1 +type ssl30MAC struct { + h hash.Hash + key []byte +} + +func (s ssl30MAC) Size() int { + return s.h.Size() +} + +var ssl30Pad1 = [48]byte{0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36} + +var ssl30Pad2 = [48]byte{0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c} + +func (s ssl30MAC) MAC(digestBuf, seq, header, data []byte) []byte { + padLength := 48 + if s.h.Size() == 20 { + padLength = 40 + } + + s.h.Reset() + s.h.Write(s.key) + s.h.Write(ssl30Pad1[:padLength]) + s.h.Write(seq) + s.h.Write(header[:1]) + s.h.Write(header[3:5]) + s.h.Write(data) + digestBuf = s.h.Sum(digestBuf[:0]) + + s.h.Reset() + s.h.Write(s.key) + s.h.Write(ssl30Pad2[:padLength]) + s.h.Write(digestBuf) + return s.h.Sum(digestBuf[:0]) +} + +// tls10MAC implements the TLS 1.0 MAC function. RFC 2246, section 6.2.3. +type tls10MAC struct { + h hash.Hash +} + +func (s tls10MAC) Size() int { + return s.h.Size() +} + +func (s tls10MAC) MAC(digestBuf, seq, header, data []byte) []byte { + s.h.Reset() + s.h.Write(seq) + s.h.Write(header) + s.h.Write(data) + return s.h.Sum(digestBuf[:0]) +} + +func rsaKA(version uint16) keyAgreement { + return rsaKeyAgreement{} +} + +func ecdheECDSAKA(version uint16) keyAgreement { + return &ecdheKeyAgreement{ + sigType: signatureECDSA, + version: version, + } +} + +func ecdheRSAKA(version uint16) keyAgreement { + return &ecdheKeyAgreement{ + sigType: signatureRSA, + version: version, + } +} + +// mutualCipherSuite returns a cipherSuite given a list of supported +// ciphersuites and the id requested by the peer. +func mutualCipherSuite(have []uint16, want uint16) *cipherSuite { + for _, id := range have { + if id == want { + for _, suite := range cipherSuites { + if suite.id == want { + return suite + } + } + return nil + } + } + return nil +} + +// A list of the possible cipher suite ids. Taken from +// http://www.iana.org/assignments/tls-parameters/tls-parameters.xml +const ( + TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 + TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000a + TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002f + TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 + TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xc007 + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xc009 + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xc00a + TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xc011 + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xc012 + TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xc013 + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xc014 + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02f + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02b +) === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/common.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/common.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/common.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,438 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "io" + "math/big" + "strings" + "sync" + "time" +) + +const ( + VersionSSL30 = 0x0300 + VersionTLS10 = 0x0301 + VersionTLS11 = 0x0302 + VersionTLS12 = 0x0303 +) + +const ( + maxPlaintext = 16384 // maximum plaintext payload length + maxCiphertext = 16384 + 2048 // maximum ciphertext payload length + recordHeaderLen = 5 // record header length + maxHandshake = 65536 // maximum handshake we support (protocol max is 16 MB) + + minVersion = VersionSSL30 + maxVersion = VersionTLS12 +) + +// TLS record types. +type recordType uint8 + +const ( + recordTypeChangeCipherSpec recordType = 20 + recordTypeAlert recordType = 21 + recordTypeHandshake recordType = 22 + recordTypeApplicationData recordType = 23 +) + +// TLS handshake message types. +const ( + typeHelloRequest uint8 = 0 + typeClientHello uint8 = 1 + typeServerHello uint8 = 2 + typeNewSessionTicket uint8 = 4 + typeCertificate uint8 = 11 + typeServerKeyExchange uint8 = 12 + typeCertificateRequest uint8 = 13 + typeServerHelloDone uint8 = 14 + typeCertificateVerify uint8 = 15 + typeClientKeyExchange uint8 = 16 + typeFinished uint8 = 20 + typeCertificateStatus uint8 = 22 + typeNextProtocol uint8 = 67 // Not IANA assigned +) + +// TLS compression types. +const ( + compressionNone uint8 = 0 +) + +// TLS extension numbers +var ( + extensionServerName uint16 = 0 + extensionStatusRequest uint16 = 5 + extensionSupportedCurves uint16 = 10 + extensionSupportedPoints uint16 = 11 + extensionSignatureAlgorithms uint16 = 13 + extensionSessionTicket uint16 = 35 + extensionNextProtoNeg uint16 = 13172 // not IANA assigned +) + +// TLS Elliptic Curves +// http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-8 +var ( + curveP256 uint16 = 23 + curveP384 uint16 = 24 + curveP521 uint16 = 25 +) + +// TLS Elliptic Curve Point Formats +// http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-9 +var ( + pointFormatUncompressed uint8 = 0 +) + +// TLS CertificateStatusType (RFC 3546) +const ( + statusTypeOCSP uint8 = 1 +) + +// Certificate types (for certificateRequestMsg) +const ( + certTypeRSASign = 1 // A certificate containing an RSA key + certTypeDSSSign = 2 // A certificate containing a DSA key + certTypeRSAFixedDH = 3 // A certificate containing a static DH key + certTypeDSSFixedDH = 4 // A certificate containing a static DH key + + // See RFC4492 sections 3 and 5.5. + certTypeECDSASign = 64 // A certificate containing an ECDSA-capable public key, signed with ECDSA. + certTypeRSAFixedECDH = 65 // A certificate containing an ECDH-capable public key, signed with RSA. + certTypeECDSAFixedECDH = 66 // A certificate containing an ECDH-capable public key, signed with ECDSA. + + // Rest of these are reserved by the TLS spec +) + +// Hash functions for TLS 1.2 (See RFC 5246, section A.4.1) +const ( + hashSHA1 uint8 = 2 + hashSHA256 uint8 = 4 +) + +// Signature algorithms for TLS 1.2 (See RFC 5246, section A.4.1) +const ( + signatureRSA uint8 = 1 + signatureECDSA uint8 = 3 +) + +// signatureAndHash mirrors the TLS 1.2, SignatureAndHashAlgorithm struct. See +// RFC 5246, section A.4.1. +type signatureAndHash struct { + hash, signature uint8 +} + +// supportedSKXSignatureAlgorithms contains the signature and hash algorithms +// that the code advertises as supported in a TLS 1.2 ClientHello. +var supportedSKXSignatureAlgorithms = []signatureAndHash{ + {hashSHA256, signatureRSA}, + {hashSHA256, signatureECDSA}, + {hashSHA1, signatureRSA}, + {hashSHA1, signatureECDSA}, +} + +// supportedClientCertSignatureAlgorithms contains the signature and hash +// algorithms that the code advertises as supported in a TLS 1.2 +// CertificateRequest. +var supportedClientCertSignatureAlgorithms = []signatureAndHash{ + {hashSHA256, signatureRSA}, + {hashSHA256, signatureECDSA}, +} + +// ConnectionState records basic TLS details about the connection. +type ConnectionState struct { + HandshakeComplete bool // TLS handshake is complete + DidResume bool // connection resumes a previous TLS connection + CipherSuite uint16 // cipher suite in use (TLS_RSA_WITH_RC4_128_SHA, ...) + NegotiatedProtocol string // negotiated next protocol (from Config.NextProtos) + NegotiatedProtocolIsMutual bool // negotiated protocol was advertised by server + ServerName string // server name requested by client, if any (server side only) + PeerCertificates []*x509.Certificate // certificate chain presented by remote peer + VerifiedChains [][]*x509.Certificate // verified chains built from PeerCertificates +} + +// ClientAuthType declares the policy the server will follow for +// TLS Client Authentication. +type ClientAuthType int + +const ( + NoClientCert ClientAuthType = iota + RequestClientCert + RequireAnyClientCert + VerifyClientCertIfGiven + RequireAndVerifyClientCert +) + +// A Config structure is used to configure a TLS client or server. After one +// has been passed to a TLS function it must not be modified. +type Config struct { + // Rand provides the source of entropy for nonces and RSA blinding. + // If Rand is nil, TLS uses the cryptographic random reader in package + // crypto/rand. + Rand io.Reader + + // Time returns the current time as the number of seconds since the epoch. + // If Time is nil, TLS uses time.Now. + Time func() time.Time + + // Certificates contains one or more certificate chains + // to present to the other side of the connection. + // Server configurations must include at least one certificate. + Certificates []Certificate + + // NameToCertificate maps from a certificate name to an element of + // Certificates. Note that a certificate name can be of the form + // '*.example.com' and so doesn't have to be a domain name as such. + // See Config.BuildNameToCertificate + // The nil value causes the first element of Certificates to be used + // for all connections. + NameToCertificate map[string]*Certificate + + // RootCAs defines the set of root certificate authorities + // that clients use when verifying server certificates. + // If RootCAs is nil, TLS uses the host's root CA set. + RootCAs *x509.CertPool + + // NextProtos is a list of supported, application level protocols. + NextProtos []string + + // ServerName is included in the client's handshake to support virtual + // hosting. + ServerName string + + // ClientAuth determines the server's policy for + // TLS Client Authentication. The default is NoClientCert. + ClientAuth ClientAuthType + + // ClientCAs defines the set of root certificate authorities + // that servers use if required to verify a client certificate + // by the policy in ClientAuth. + ClientCAs *x509.CertPool + + // InsecureSkipVerify controls whether a client verifies the + // server's certificate chain and host name. + // If InsecureSkipVerify is true, TLS accepts any certificate + // presented by the server and any host name in that certificate. + // In this mode, TLS is susceptible to man-in-the-middle attacks. + // This should be used only for testing. + InsecureSkipVerify bool + + // CipherSuites is a list of supported cipher suites. If CipherSuites + // is nil, TLS uses a list of suites supported by the implementation. + CipherSuites []uint16 + + // PreferServerCipherSuites controls whether the server selects the + // client's most preferred ciphersuite, or the server's most preferred + // ciphersuite. If true then the server's preference, as expressed in + // the order of elements in CipherSuites, is used. + PreferServerCipherSuites bool + + // SessionTicketsDisabled may be set to true to disable session ticket + // (resumption) support. + SessionTicketsDisabled bool + + // SessionTicketKey is used by TLS servers to provide session + // resumption. See RFC 5077. If zero, it will be filled with + // random data before the first server handshake. + // + // If multiple servers are terminating connections for the same host + // they should all have the same SessionTicketKey. If the + // SessionTicketKey leaks, previously recorded and future TLS + // connections using that key are compromised. + SessionTicketKey [32]byte + + // MinVersion contains the minimum SSL/TLS version that is acceptable. + // If zero, then SSLv3 is taken as the minimum. + MinVersion uint16 + + // MaxVersion contains the maximum SSL/TLS version that is acceptable. + // If zero, then the maximum version supported by this package is used, + // which is currently TLS 1.2. + MaxVersion uint16 + + serverInitOnce sync.Once // guards calling (*Config).serverInit +} + +func (c *Config) serverInit() { + if c.SessionTicketsDisabled { + return + } + + // If the key has already been set then we have nothing to do. + for _, b := range c.SessionTicketKey { + if b != 0 { + return + } + } + + if _, err := io.ReadFull(c.rand(), c.SessionTicketKey[:]); err != nil { + c.SessionTicketsDisabled = true + } +} + +func (c *Config) rand() io.Reader { + r := c.Rand + if r == nil { + return rand.Reader + } + return r +} + +func (c *Config) time() time.Time { + t := c.Time + if t == nil { + t = time.Now + } + return t() +} + +func (c *Config) cipherSuites() []uint16 { + s := c.CipherSuites + if s == nil { + s = defaultCipherSuites() + } + return s +} + +func (c *Config) minVersion() uint16 { + if c == nil || c.MinVersion == 0 { + return minVersion + } + return c.MinVersion +} + +func (c *Config) maxVersion() uint16 { + if c == nil || c.MaxVersion == 0 { + return maxVersion + } + return c.MaxVersion +} + +// mutualVersion returns the protocol version to use given the advertised +// version of the peer. +func (c *Config) mutualVersion(vers uint16) (uint16, bool) { + minVersion := c.minVersion() + maxVersion := c.maxVersion() + + if vers < minVersion { + return 0, false + } + if vers > maxVersion { + vers = maxVersion + } + return vers, true +} + +// getCertificateForName returns the best certificate for the given name, +// defaulting to the first element of c.Certificates if there are no good +// options. +func (c *Config) getCertificateForName(name string) *Certificate { + if len(c.Certificates) == 1 || c.NameToCertificate == nil { + // There's only one choice, so no point doing any work. + return &c.Certificates[0] + } + + name = strings.ToLower(name) + for len(name) > 0 && name[len(name)-1] == '.' { + name = name[:len(name)-1] + } + + if cert, ok := c.NameToCertificate[name]; ok { + return cert + } + + // try replacing labels in the name with wildcards until we get a + // match. + labels := strings.Split(name, ".") + for i := range labels { + labels[i] = "*" + candidate := strings.Join(labels, ".") + if cert, ok := c.NameToCertificate[candidate]; ok { + return cert + } + } + + // If nothing matches, return the first certificate. + return &c.Certificates[0] +} + +// BuildNameToCertificate parses c.Certificates and builds c.NameToCertificate +// from the CommonName and SubjectAlternateName fields of each of the leaf +// certificates. +func (c *Config) BuildNameToCertificate() { + c.NameToCertificate = make(map[string]*Certificate) + for i := range c.Certificates { + cert := &c.Certificates[i] + x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + continue + } + if len(x509Cert.Subject.CommonName) > 0 { + c.NameToCertificate[x509Cert.Subject.CommonName] = cert + } + for _, san := range x509Cert.DNSNames { + c.NameToCertificate[san] = cert + } + } +} + +// A Certificate is a chain of one or more certificates, leaf first. +type Certificate struct { + Certificate [][]byte + PrivateKey crypto.PrivateKey // supported types: *rsa.PrivateKey, *ecdsa.PrivateKey + // OCSPStaple contains an optional OCSP response which will be served + // to clients that request it. + OCSPStaple []byte + // Leaf is the parsed form of the leaf certificate, which may be + // initialized using x509.ParseCertificate to reduce per-handshake + // processing for TLS clients doing client authentication. If nil, the + // leaf certificate will be parsed as needed. + Leaf *x509.Certificate +} + +// A TLS record. +type record struct { + contentType recordType + major, minor uint8 + payload []byte +} + +type handshakeMessage interface { + marshal() []byte + unmarshal([]byte) bool +} + +// TODO(jsing): Make these available to both crypto/x509 and crypto/tls. +type dsaSignature struct { + R, S *big.Int +} + +type ecdsaSignature dsaSignature + +var emptyConfig Config + +func defaultConfig() *Config { + return &emptyConfig +} + +var ( + once sync.Once + varDefaultCipherSuites []uint16 +) + +func defaultCipherSuites() []uint16 { + once.Do(initDefaultCipherSuites) + return varDefaultCipherSuites +} + +func initDefaultCipherSuites() { + varDefaultCipherSuites = make([]uint16, len(cipherSuites)) + for i, suite := range cipherSuites { + varDefaultCipherSuites[i] = suite.id + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/conn.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/conn.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/conn.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1026 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TLS low level connection and record layer + +package tls + +import ( + "bytes" + "crypto/cipher" + "crypto/subtle" + "crypto/x509" + "errors" + "io" + "net" + "sync" + "time" +) + +// A Conn represents a secured connection. +// It implements the net.Conn interface. +type Conn struct { + // constant + conn net.Conn + isClient bool + + // constant after handshake; protected by handshakeMutex + handshakeMutex sync.Mutex // handshakeMutex < in.Mutex, out.Mutex, errMutex + vers uint16 // TLS version + haveVers bool // version has been negotiated + config *Config // configuration passed to constructor + handshakeComplete bool + didResume bool // whether this connection was a session resumption + cipherSuite uint16 + ocspResponse []byte // stapled OCSP response + peerCertificates []*x509.Certificate + // verifiedChains contains the certificate chains that we built, as + // opposed to the ones presented by the server. + verifiedChains [][]*x509.Certificate + // serverName contains the server name indicated by the client, if any. + serverName string + + clientProtocol string + clientProtocolFallback bool + + // first permanent error + connErr + + // input/output + in, out halfConn // in.Mutex < out.Mutex + rawInput *block // raw input, right off the wire + input *block // application data waiting to be read + hand bytes.Buffer // handshake data waiting to be read + + tmp [16]byte +} + +type connErr struct { + mu sync.Mutex + value error +} + +func (e *connErr) setError(err error) error { + e.mu.Lock() + defer e.mu.Unlock() + + if e.value == nil { + e.value = err + } + return err +} + +func (e *connErr) error() error { + e.mu.Lock() + defer e.mu.Unlock() + return e.value +} + +// Access to net.Conn methods. +// Cannot just embed net.Conn because that would +// export the struct field too. + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// SetDeadline sets the read and write deadlines associated with the connection. +// A zero value for t means Read and Write will not time out. +// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error. +func (c *Conn) SetDeadline(t time.Time) error { + return c.conn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline on the underlying connection. +// A zero value for t means Read will not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline on the underlying conneciton. +// A zero value for t means Write will not time out. +// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error. +func (c *Conn) SetWriteDeadline(t time.Time) error { + return c.conn.SetWriteDeadline(t) +} + +// A halfConn represents one direction of the record layer +// connection, either sending or receiving. +type halfConn struct { + sync.Mutex + version uint16 // protocol version + cipher interface{} // cipher algorithm + mac macFunction + seq [8]byte // 64-bit sequence number + bfree *block // list of free blocks + + nextCipher interface{} // next encryption state + nextMac macFunction // next MAC algorithm + + // used to save allocating a new buffer for each MAC. + inDigestBuf, outDigestBuf []byte +} + +// prepareCipherSpec sets the encryption and MAC states +// that a subsequent changeCipherSpec will use. +func (hc *halfConn) prepareCipherSpec(version uint16, cipher interface{}, mac macFunction) { + hc.version = version + hc.nextCipher = cipher + hc.nextMac = mac +} + +// changeCipherSpec changes the encryption and MAC states +// to the ones previously passed to prepareCipherSpec. +func (hc *halfConn) changeCipherSpec() error { + if hc.nextCipher == nil { + return alertInternalError + } + hc.cipher = hc.nextCipher + hc.mac = hc.nextMac + hc.nextCipher = nil + hc.nextMac = nil + for i := range hc.seq { + hc.seq[i] = 0 + } + return nil +} + +// incSeq increments the sequence number. +func (hc *halfConn) incSeq() { + for i := 7; i >= 0; i-- { + hc.seq[i]++ + if hc.seq[i] != 0 { + return + } + } + + // Not allowed to let sequence number wrap. + // Instead, must renegotiate before it does. + // Not likely enough to bother. + panic("TLS: sequence number wraparound") +} + +// resetSeq resets the sequence number to zero. +func (hc *halfConn) resetSeq() { + for i := range hc.seq { + hc.seq[i] = 0 + } +} + +// removePadding returns an unpadded slice, in constant time, which is a prefix +// of the input. It also returns a byte which is equal to 255 if the padding +// was valid and 0 otherwise. See RFC 2246, section 6.2.3.2 +func removePadding(payload []byte) ([]byte, byte) { + if len(payload) < 1 { + return payload, 0 + } + + paddingLen := payload[len(payload)-1] + t := uint(len(payload)-1) - uint(paddingLen) + // if len(payload) >= (paddingLen - 1) then the MSB of t is zero + good := byte(int32(^t) >> 31) + + toCheck := 255 // the maximum possible padding length + // The length of the padded data is public, so we can use an if here + if toCheck+1 > len(payload) { + toCheck = len(payload) - 1 + } + + for i := 0; i < toCheck; i++ { + t := uint(paddingLen) - uint(i) + // if i <= paddingLen then the MSB of t is zero + mask := byte(int32(^t) >> 31) + b := payload[len(payload)-1-i] + good &^= mask&paddingLen ^ mask&b + } + + // We AND together the bits of good and replicate the result across + // all the bits. + good &= good << 4 + good &= good << 2 + good &= good << 1 + good = uint8(int8(good) >> 7) + + toRemove := good&paddingLen + 1 + return payload[:len(payload)-int(toRemove)], good +} + +// removePaddingSSL30 is a replacement for removePadding in the case that the +// protocol version is SSLv3. In this version, the contents of the padding +// are random and cannot be checked. +func removePaddingSSL30(payload []byte) ([]byte, byte) { + if len(payload) < 1 { + return payload, 0 + } + + paddingLen := int(payload[len(payload)-1]) + 1 + if paddingLen > len(payload) { + return payload, 0 + } + + return payload[:len(payload)-paddingLen], 255 +} + +func roundUp(a, b int) int { + return a + (b-a%b)%b +} + +// cbcMode is an interface for block ciphers using cipher block chaining. +type cbcMode interface { + cipher.BlockMode + SetIV([]byte) +} + +// decrypt checks and strips the mac and decrypts the data in b. Returns a +// success boolean, the number of bytes to skip from the start of the record in +// order to get the application payload, and an optional alert value. +func (hc *halfConn) decrypt(b *block) (ok bool, prefixLen int, alertValue alert) { + // pull out payload + payload := b.data[recordHeaderLen:] + + macSize := 0 + if hc.mac != nil { + macSize = hc.mac.Size() + } + + paddingGood := byte(255) + explicitIVLen := 0 + + // decrypt + if hc.cipher != nil { + switch c := hc.cipher.(type) { + case cipher.Stream: + c.XORKeyStream(payload, payload) + case cipher.AEAD: + explicitIVLen = 8 + if len(payload) < explicitIVLen { + return false, 0, alertBadRecordMAC + } + nonce := payload[:8] + payload = payload[8:] + + var additionalData [13]byte + copy(additionalData[:], hc.seq[:]) + copy(additionalData[8:], b.data[:3]) + n := len(payload) - c.Overhead() + additionalData[11] = byte(n >> 8) + additionalData[12] = byte(n) + var err error + payload, err = c.Open(payload[:0], nonce, payload, additionalData[:]) + if err != nil { + return false, 0, alertBadRecordMAC + } + b.resize(recordHeaderLen + explicitIVLen + len(payload)) + case cbcMode: + blockSize := c.BlockSize() + if hc.version >= VersionTLS11 { + explicitIVLen = blockSize + } + + if len(payload)%blockSize != 0 || len(payload) < roundUp(explicitIVLen+macSize+1, blockSize) { + return false, 0, alertBadRecordMAC + } + + if explicitIVLen > 0 { + c.SetIV(payload[:explicitIVLen]) + payload = payload[explicitIVLen:] + } + c.CryptBlocks(payload, payload) + if hc.version == VersionSSL30 { + payload, paddingGood = removePaddingSSL30(payload) + } else { + payload, paddingGood = removePadding(payload) + } + b.resize(recordHeaderLen + explicitIVLen + len(payload)) + + // note that we still have a timing side-channel in the + // MAC check, below. An attacker can align the record + // so that a correct padding will cause one less hash + // block to be calculated. Then they can iteratively + // decrypt a record by breaking each byte. See + // "Password Interception in a SSL/TLS Channel", Brice + // Canvel et al. + // + // However, our behavior matches OpenSSL, so we leak + // only as much as they do. + default: + panic("unknown cipher type") + } + } + + // check, strip mac + if hc.mac != nil { + if len(payload) < macSize { + return false, 0, alertBadRecordMAC + } + + // strip mac off payload, b.data + n := len(payload) - macSize + b.data[3] = byte(n >> 8) + b.data[4] = byte(n) + b.resize(recordHeaderLen + explicitIVLen + n) + remoteMAC := payload[n:] + localMAC := hc.mac.MAC(hc.inDigestBuf, hc.seq[0:], b.data[:recordHeaderLen], payload[:n]) + + if subtle.ConstantTimeCompare(localMAC, remoteMAC) != 1 || paddingGood != 255 { + return false, 0, alertBadRecordMAC + } + hc.inDigestBuf = localMAC + } + hc.incSeq() + + return true, recordHeaderLen + explicitIVLen, 0 +} + +// padToBlockSize calculates the needed padding block, if any, for a payload. +// On exit, prefix aliases payload and extends to the end of the last full +// block of payload. finalBlock is a fresh slice which contains the contents of +// any suffix of payload as well as the needed padding to make finalBlock a +// full block. +func padToBlockSize(payload []byte, blockSize int) (prefix, finalBlock []byte) { + overrun := len(payload) % blockSize + paddingLen := blockSize - overrun + prefix = payload[:len(payload)-overrun] + finalBlock = make([]byte, blockSize) + copy(finalBlock, payload[len(payload)-overrun:]) + for i := overrun; i < blockSize; i++ { + finalBlock[i] = byte(paddingLen - 1) + } + return +} + +// encrypt encrypts and macs the data in b. +func (hc *halfConn) encrypt(b *block, explicitIVLen int) (bool, alert) { + // mac + if hc.mac != nil { + mac := hc.mac.MAC(hc.outDigestBuf, hc.seq[0:], b.data[:recordHeaderLen], b.data[recordHeaderLen+explicitIVLen:]) + + n := len(b.data) + b.resize(n + len(mac)) + copy(b.data[n:], mac) + hc.outDigestBuf = mac + } + + payload := b.data[recordHeaderLen:] + + // encrypt + if hc.cipher != nil { + switch c := hc.cipher.(type) { + case cipher.Stream: + c.XORKeyStream(payload, payload) + case cipher.AEAD: + payloadLen := len(b.data) - recordHeaderLen - explicitIVLen + b.resize(len(b.data) + c.Overhead()) + nonce := b.data[recordHeaderLen : recordHeaderLen+explicitIVLen] + payload := b.data[recordHeaderLen+explicitIVLen:] + payload = payload[:payloadLen] + + var additionalData [13]byte + copy(additionalData[:], hc.seq[:]) + copy(additionalData[8:], b.data[:3]) + additionalData[11] = byte(payloadLen >> 8) + additionalData[12] = byte(payloadLen) + + c.Seal(payload[:0], nonce, payload, additionalData[:]) + case cbcMode: + blockSize := c.BlockSize() + if explicitIVLen > 0 { + c.SetIV(payload[:explicitIVLen]) + payload = payload[explicitIVLen:] + } + prefix, finalBlock := padToBlockSize(payload, blockSize) + b.resize(recordHeaderLen + explicitIVLen + len(prefix) + len(finalBlock)) + c.CryptBlocks(b.data[recordHeaderLen+explicitIVLen:], prefix) + c.CryptBlocks(b.data[recordHeaderLen+explicitIVLen+len(prefix):], finalBlock) + default: + panic("unknown cipher type") + } + } + + // update length to include MAC and any block padding needed. + n := len(b.data) - recordHeaderLen + b.data[3] = byte(n >> 8) + b.data[4] = byte(n) + hc.incSeq() + + return true, 0 +} + +// A block is a simple data buffer. +type block struct { + data []byte + off int // index for Read + link *block +} + +// resize resizes block to be n bytes, growing if necessary. +func (b *block) resize(n int) { + if n > cap(b.data) { + b.reserve(n) + } + b.data = b.data[0:n] +} + +// reserve makes sure that block contains a capacity of at least n bytes. +func (b *block) reserve(n int) { + if cap(b.data) >= n { + return + } + m := cap(b.data) + if m == 0 { + m = 1024 + } + for m < n { + m *= 2 + } + data := make([]byte, len(b.data), m) + copy(data, b.data) + b.data = data +} + +// readFromUntil reads from r into b until b contains at least n bytes +// or else returns an error. +func (b *block) readFromUntil(r io.Reader, n int) error { + // quick case + if len(b.data) >= n { + return nil + } + + // read until have enough. + b.reserve(n) + for { + m, err := r.Read(b.data[len(b.data):cap(b.data)]) + b.data = b.data[0 : len(b.data)+m] + if len(b.data) >= n { + break + } + if err != nil { + return err + } + } + return nil +} + +func (b *block) Read(p []byte) (n int, err error) { + n = copy(p, b.data[b.off:]) + b.off += n + return +} + +// newBlock allocates a new block, from hc's free list if possible. +func (hc *halfConn) newBlock() *block { + b := hc.bfree + if b == nil { + return new(block) + } + hc.bfree = b.link + b.link = nil + b.resize(0) + return b +} + +// freeBlock returns a block to hc's free list. +// The protocol is such that each side only has a block or two on +// its free list at a time, so there's no need to worry about +// trimming the list, etc. +func (hc *halfConn) freeBlock(b *block) { + b.link = hc.bfree + hc.bfree = b +} + +// splitBlock splits a block after the first n bytes, +// returning a block with those n bytes and a +// block with the remainder. the latter may be nil. +func (hc *halfConn) splitBlock(b *block, n int) (*block, *block) { + if len(b.data) <= n { + return b, nil + } + bb := hc.newBlock() + bb.resize(len(b.data) - n) + copy(bb.data, b.data[n:]) + b.data = b.data[0:n] + return b, bb +} + +// readRecord reads the next TLS record from the connection +// and updates the record layer state. +// c.in.Mutex <= L; c.input == nil. +func (c *Conn) readRecord(want recordType) error { + // Caller must be in sync with connection: + // handshake data if handshake not yet completed, + // else application data. (We don't support renegotiation.) + switch want { + default: + return c.sendAlert(alertInternalError) + case recordTypeHandshake, recordTypeChangeCipherSpec: + if c.handshakeComplete { + return c.sendAlert(alertInternalError) + } + case recordTypeApplicationData: + if !c.handshakeComplete { + return c.sendAlert(alertInternalError) + } + } + +Again: + if c.rawInput == nil { + c.rawInput = c.in.newBlock() + } + b := c.rawInput + + // Read header, payload. + if err := b.readFromUntil(c.conn, recordHeaderLen); err != nil { + // RFC suggests that EOF without an alertCloseNotify is + // an error, but popular web sites seem to do this, + // so we can't make it an error. + // if err == io.EOF { + // err = io.ErrUnexpectedEOF + // } + if e, ok := err.(net.Error); !ok || !e.Temporary() { + c.setError(err) + } + return err + } + typ := recordType(b.data[0]) + + // No valid TLS record has a type of 0x80, however SSLv2 handshakes + // start with a uint16 length where the MSB is set and the first record + // is always < 256 bytes long. Therefore typ == 0x80 strongly suggests + // an SSLv2 client. + if want == recordTypeHandshake && typ == 0x80 { + c.sendAlert(alertProtocolVersion) + return errors.New("tls: unsupported SSLv2 handshake received") + } + + vers := uint16(b.data[1])<<8 | uint16(b.data[2]) + n := int(b.data[3])<<8 | int(b.data[4]) + if c.haveVers && vers != c.vers { + return c.sendAlert(alertProtocolVersion) + } + if n > maxCiphertext { + return c.sendAlert(alertRecordOverflow) + } + if !c.haveVers { + // First message, be extra suspicious: + // this might not be a TLS client. + // Bail out before reading a full 'body', if possible. + // The current max version is 3.1. + // If the version is >= 16.0, it's probably not real. + // Similarly, a clientHello message encodes in + // well under a kilobyte. If the length is >= 12 kB, + // it's probably not real. + if (typ != recordTypeAlert && typ != want) || vers >= 0x1000 || n >= 0x3000 { + return c.sendAlert(alertUnexpectedMessage) + } + } + if err := b.readFromUntil(c.conn, recordHeaderLen+n); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if e, ok := err.(net.Error); !ok || !e.Temporary() { + c.setError(err) + } + return err + } + + // Process message. + b, c.rawInput = c.in.splitBlock(b, recordHeaderLen+n) + ok, off, err := c.in.decrypt(b) + if !ok { + return c.sendAlert(err) + } + b.off = off + data := b.data[b.off:] + if len(data) > maxPlaintext { + c.sendAlert(alertRecordOverflow) + c.in.freeBlock(b) + return c.error() + } + + switch typ { + default: + c.sendAlert(alertUnexpectedMessage) + + case recordTypeAlert: + if len(data) != 2 { + c.sendAlert(alertUnexpectedMessage) + break + } + if alert(data[1]) == alertCloseNotify { + c.setError(io.EOF) + break + } + switch data[0] { + case alertLevelWarning: + // drop on the floor + c.in.freeBlock(b) + goto Again + case alertLevelError: + c.setError(&net.OpError{Op: "remote error", Err: alert(data[1])}) + default: + c.sendAlert(alertUnexpectedMessage) + } + + case recordTypeChangeCipherSpec: + if typ != want || len(data) != 1 || data[0] != 1 { + c.sendAlert(alertUnexpectedMessage) + break + } + err := c.in.changeCipherSpec() + if err != nil { + c.sendAlert(err.(alert)) + } + + case recordTypeApplicationData: + if typ != want { + c.sendAlert(alertUnexpectedMessage) + break + } + c.input = b + b = nil + + case recordTypeHandshake: + // TODO(rsc): Should at least pick off connection close. + if typ != want && !c.isClient { + return c.sendAlert(alertNoRenegotiation) + } + c.hand.Write(data) + } + + if b != nil { + c.in.freeBlock(b) + } + return c.error() +} + +// sendAlert sends a TLS alert message. +// c.out.Mutex <= L. +func (c *Conn) sendAlertLocked(err alert) error { + switch err { + case alertNoRenegotiation, alertCloseNotify: + c.tmp[0] = alertLevelWarning + default: + c.tmp[0] = alertLevelError + } + c.tmp[1] = byte(err) + c.writeRecord(recordTypeAlert, c.tmp[0:2]) + // closeNotify is a special case in that it isn't an error: + if err != alertCloseNotify { + return c.setError(&net.OpError{Op: "local error", Err: err}) + } + return nil +} + +// sendAlert sends a TLS alert message. +// L < c.out.Mutex. +func (c *Conn) sendAlert(err alert) error { + c.out.Lock() + defer c.out.Unlock() + return c.sendAlertLocked(err) +} + +// writeRecord writes a TLS record with the given type and payload +// to the connection and updates the record layer state. +// c.out.Mutex <= L. +func (c *Conn) writeRecord(typ recordType, data []byte) (n int, err error) { + b := c.out.newBlock() + for len(data) > 0 { + m := len(data) + if m > maxPlaintext { + m = maxPlaintext + } + explicitIVLen := 0 + explicitIVIsSeq := false + + var cbc cbcMode + if c.out.version >= VersionTLS11 { + var ok bool + if cbc, ok = c.out.cipher.(cbcMode); ok { + explicitIVLen = cbc.BlockSize() + } + } + if explicitIVLen == 0 { + if _, ok := c.out.cipher.(cipher.AEAD); ok { + explicitIVLen = 8 + // The AES-GCM construction in TLS has an + // explicit nonce so that the nonce can be + // random. However, the nonce is only 8 bytes + // which is too small for a secure, random + // nonce. Therefore we use the sequence number + // as the nonce. + explicitIVIsSeq = true + } + } + b.resize(recordHeaderLen + explicitIVLen + m) + b.data[0] = byte(typ) + vers := c.vers + if vers == 0 { + // Some TLS servers fail if the record version is + // greater than TLS 1.0 for the initial ClientHello. + vers = VersionTLS10 + } + b.data[1] = byte(vers >> 8) + b.data[2] = byte(vers) + b.data[3] = byte(m >> 8) + b.data[4] = byte(m) + if explicitIVLen > 0 { + explicitIV := b.data[recordHeaderLen : recordHeaderLen+explicitIVLen] + if explicitIVIsSeq { + copy(explicitIV, c.out.seq[:]) + } else { + if _, err = io.ReadFull(c.config.rand(), explicitIV); err != nil { + break + } + } + } + copy(b.data[recordHeaderLen+explicitIVLen:], data) + c.out.encrypt(b, explicitIVLen) + _, err = c.conn.Write(b.data) + if err != nil { + break + } + n += m + data = data[m:] + } + c.out.freeBlock(b) + + if typ == recordTypeChangeCipherSpec { + err = c.out.changeCipherSpec() + if err != nil { + // Cannot call sendAlert directly, + // because we already hold c.out.Mutex. + c.tmp[0] = alertLevelError + c.tmp[1] = byte(err.(alert)) + c.writeRecord(recordTypeAlert, c.tmp[0:2]) + return n, c.setError(&net.OpError{Op: "local error", Err: err}) + } + } + return +} + +// readHandshake reads the next handshake message from +// the record layer. +// c.in.Mutex < L; c.out.Mutex < L. +func (c *Conn) readHandshake() (interface{}, error) { + for c.hand.Len() < 4 { + if err := c.error(); err != nil { + return nil, err + } + if err := c.readRecord(recordTypeHandshake); err != nil { + return nil, err + } + } + + data := c.hand.Bytes() + n := int(data[1])<<16 | int(data[2])<<8 | int(data[3]) + if n > maxHandshake { + c.sendAlert(alertInternalError) + return nil, c.error() + } + for c.hand.Len() < 4+n { + if err := c.error(); err != nil { + return nil, err + } + if err := c.readRecord(recordTypeHandshake); err != nil { + return nil, err + } + } + data = c.hand.Next(4 + n) + var m handshakeMessage + switch data[0] { + case typeHelloRequest: + m = new(helloRequestMsg) + case typeClientHello: + m = new(clientHelloMsg) + case typeServerHello: + m = new(serverHelloMsg) + case typeCertificate: + m = new(certificateMsg) + case typeCertificateRequest: + m = &certificateRequestMsg{ + hasSignatureAndHash: c.vers >= VersionTLS12, + } + case typeCertificateStatus: + m = new(certificateStatusMsg) + case typeServerKeyExchange: + m = new(serverKeyExchangeMsg) + case typeServerHelloDone: + m = new(serverHelloDoneMsg) + case typeClientKeyExchange: + m = new(clientKeyExchangeMsg) + case typeCertificateVerify: + m = &certificateVerifyMsg{ + hasSignatureAndHash: c.vers >= VersionTLS12, + } + case typeNextProtocol: + m = new(nextProtoMsg) + case typeFinished: + m = new(finishedMsg) + default: + c.sendAlert(alertUnexpectedMessage) + return nil, alertUnexpectedMessage + } + + // The handshake message unmarshallers + // expect to be able to keep references to data, + // so pass in a fresh copy that won't be overwritten. + data = append([]byte(nil), data...) + + if !m.unmarshal(data) { + c.sendAlert(alertUnexpectedMessage) + return nil, alertUnexpectedMessage + } + return m, nil +} + +// Write writes data to the connection. +func (c *Conn) Write(b []byte) (int, error) { + if err := c.error(); err != nil { + return 0, err + } + + if err := c.Handshake(); err != nil { + return 0, c.setError(err) + } + + c.out.Lock() + defer c.out.Unlock() + + if !c.handshakeComplete { + return 0, alertInternalError + } + + // SSL 3.0 and TLS 1.0 are susceptible to a chosen-plaintext + // attack when using block mode ciphers due to predictable IVs. + // This can be prevented by splitting each Application Data + // record into two records, effectively randomizing the IV. + // + // http://www.openssl.org/~bodo/tls-cbc.txt + // https://bugzilla.mozilla.org/show_bug.cgi?id=665814 + // http://www.imperialviolet.org/2012/01/15/beastfollowup.html + + var m int + if len(b) > 1 && c.vers <= VersionTLS10 { + if _, ok := c.out.cipher.(cipher.BlockMode); ok { + n, err := c.writeRecord(recordTypeApplicationData, b[:1]) + if err != nil { + return n, c.setError(err) + } + m, b = 1, b[1:] + } + } + + n, err := c.writeRecord(recordTypeApplicationData, b) + return n + m, c.setError(err) +} + +func (c *Conn) handleRenegotiation() error { + c.handshakeComplete = false + if !c.isClient { + panic("renegotiation should only happen for a client") + } + + msg, err := c.readHandshake() + if err != nil { + return err + } + _, ok := msg.(*helloRequestMsg) + if !ok { + c.sendAlert(alertUnexpectedMessage) + return alertUnexpectedMessage + } + + return c.Handshake() +} + +// Read can be made to time out and return a net.Error with Timeout() == true +// after a fixed time limit; see SetDeadline and SetReadDeadline. +func (c *Conn) Read(b []byte) (n int, err error) { + if err = c.Handshake(); err != nil { + return + } + + c.in.Lock() + defer c.in.Unlock() + + // Some OpenSSL servers send empty records in order to randomize the + // CBC IV. So this loop ignores a limited number of empty records. + const maxConsecutiveEmptyRecords = 100 + for emptyRecordCount := 0; emptyRecordCount <= maxConsecutiveEmptyRecords; emptyRecordCount++ { + for c.input == nil && c.error() == nil { + if err := c.readRecord(recordTypeApplicationData); err != nil { + // Soft error, like EAGAIN + return 0, err + } + if c.hand.Len() > 0 { + // We received handshake bytes, indicating the start of + // a renegotiation. + if err := c.handleRenegotiation(); err != nil { + return 0, err + } + continue + } + } + if err := c.error(); err != nil { + return 0, err + } + + n, err = c.input.Read(b) + if c.input.off >= len(c.input.data) { + c.in.freeBlock(c.input) + c.input = nil + } + + if n != 0 || err != nil { + return n, err + } + } + + return 0, io.ErrNoProgress +} + +// Close closes the connection. +func (c *Conn) Close() error { + var alertErr error + + c.handshakeMutex.Lock() + defer c.handshakeMutex.Unlock() + if c.handshakeComplete { + alertErr = c.sendAlert(alertCloseNotify) + } + + if err := c.conn.Close(); err != nil { + return err + } + return alertErr +} + +// Handshake runs the client or server handshake +// protocol if it has not yet been run. +// Most uses of this package need not call Handshake +// explicitly: the first Read or Write will call it automatically. +func (c *Conn) Handshake() error { + c.handshakeMutex.Lock() + defer c.handshakeMutex.Unlock() + if err := c.error(); err != nil { + return err + } + if c.handshakeComplete { + return nil + } + if c.isClient { + return c.clientHandshake() + } + return c.serverHandshake() +} + +// ConnectionState returns basic TLS details about the connection. +func (c *Conn) ConnectionState() ConnectionState { + c.handshakeMutex.Lock() + defer c.handshakeMutex.Unlock() + + var state ConnectionState + state.HandshakeComplete = c.handshakeComplete + if c.handshakeComplete { + state.NegotiatedProtocol = c.clientProtocol + state.DidResume = c.didResume + state.NegotiatedProtocolIsMutual = !c.clientProtocolFallback + state.CipherSuite = c.cipherSuite + state.PeerCertificates = c.peerCertificates + state.VerifiedChains = c.verifiedChains + state.ServerName = c.serverName + } + + return state +} + +// OCSPResponse returns the stapled OCSP response from the TLS server, if +// any. (Only valid for client connections.) +func (c *Conn) OCSPResponse() []byte { + c.handshakeMutex.Lock() + defer c.handshakeMutex.Unlock() + + return c.ocspResponse +} + +// VerifyHostname checks that the peer certificate chain is valid for +// connecting to host. If so, it returns nil; if not, it returns an error +// describing the problem. +func (c *Conn) VerifyHostname(host string) error { + c.handshakeMutex.Lock() + defer c.handshakeMutex.Unlock() + if !c.isClient { + return errors.New("VerifyHostname called on TLS server connection") + } + if !c.handshakeComplete { + return errors.New("TLS handshake has not yet been performed") + } + return c.peerCertificates[0].VerifyHostname(host) +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/conn_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/conn_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/conn_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,106 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "testing" +) + +func TestRoundUp(t *testing.T) { + if roundUp(0, 16) != 0 || + roundUp(1, 16) != 16 || + roundUp(15, 16) != 16 || + roundUp(16, 16) != 16 || + roundUp(17, 16) != 32 { + t.Error("roundUp broken") + } +} + +var paddingTests = []struct { + in []byte + good bool + expectedLen int +}{ + {[]byte{1, 2, 3, 4, 0}, true, 4}, + {[]byte{1, 2, 3, 4, 0, 1}, false, 0}, + {[]byte{1, 2, 3, 4, 99, 99}, false, 0}, + {[]byte{1, 2, 3, 4, 1, 1}, true, 4}, + {[]byte{1, 2, 3, 2, 2, 2}, true, 3}, + {[]byte{1, 2, 3, 3, 3, 3}, true, 2}, + {[]byte{1, 2, 3, 4, 3, 3}, false, 0}, + {[]byte{1, 4, 4, 4, 4, 4}, true, 1}, + {[]byte{5, 5, 5, 5, 5, 5}, true, 0}, + {[]byte{6, 6, 6, 6, 6, 6}, false, 0}, +} + +func TestRemovePadding(t *testing.T) { + for i, test := range paddingTests { + payload, good := removePadding(test.in) + expectedGood := byte(255) + if !test.good { + expectedGood = 0 + } + if good != expectedGood { + t.Errorf("#%d: wrong validity, want:%d got:%d", i, expectedGood, good) + } + if good == 255 && len(payload) != test.expectedLen { + t.Errorf("#%d: got %d, want %d", i, len(payload), test.expectedLen) + } + } +} + +var certExampleCom = `308201403081eda003020102020101300b06092a864886f70d010105301e311c301a060355040a131354657374696e67204365727469666963617465301e170d3131313030313138353835325a170d3132303933303138353835325a301e311c301a060355040a131354657374696e67204365727469666963617465305a300b06092a864886f70d010101034b003048024100bced6e32368599eeddf18796bfd03958a154f87e5b084f96e85136a56b886733592f493f0fc68b0d6b3551781cb95e13c5de458b28d6fb60d20a9129313261410203010001a31a301830160603551d11040f300d820b6578616d706c652e636f6d300b06092a864886f70d0101050341001a0b419d2c74474c6450654e5f10b32bf426ffdf55cad1c52602e7a9151513a3424c70f5960dcd682db0c33769cc1daa3fcdd3db10809d2392ed4a1bf50ced18` + +var certWildcardExampleCom = `308201423081efa003020102020101300b06092a864886f70d010105301e311c301a060355040a131354657374696e67204365727469666963617465301e170d3131313030313139303034365a170d3132303933303139303034365a301e311c301a060355040a131354657374696e67204365727469666963617465305a300b06092a864886f70d010101034b003048024100bced6e32368599eeddf18796bfd03958a154f87e5b084f96e85136a56b886733592f493f0fc68b0d6b3551781cb95e13c5de458b28d6fb60d20a9129313261410203010001a31c301a30180603551d110411300f820d2a2e6578616d706c652e636f6d300b06092a864886f70d0101050341001676f0c9e7c33c1b656ed5a6476c4e2ee9ec8e62df7407accb1875272b2edd0a22096cb2c22598d11604104d604f810eb4b5987ca6bb319c7e6ce48725c54059` + +var certFooExampleCom = `308201443081f1a003020102020101300b06092a864886f70d010105301e311c301a060355040a131354657374696e67204365727469666963617465301e170d3131313030313139303131345a170d3132303933303139303131345a301e311c301a060355040a131354657374696e67204365727469666963617465305a300b06092a864886f70d010101034b003048024100bced6e32368599eeddf18796bfd03958a154f87e5b084f96e85136a56b886733592f493f0fc68b0d6b3551781cb95e13c5de458b28d6fb60d20a9129313261410203010001a31e301c301a0603551d1104133011820f666f6f2e6578616d706c652e636f6d300b06092a864886f70d010105034100646a2a51f2aa2477add854b462cf5207ba16d3213ffb5d3d0eed473fbf09935019192d1d5b8ca6a2407b424cf04d97c4cd9197c83ecf81f0eab9464a1109d09f` + +var certDoubleWildcardExampleCom = `308201443081f1a003020102020101300b06092a864886f70d010105301e311c301a060355040a131354657374696e67204365727469666963617465301e170d3131313030313139303134315a170d3132303933303139303134315a301e311c301a060355040a131354657374696e67204365727469666963617465305a300b06092a864886f70d010101034b003048024100bced6e32368599eeddf18796bfd03958a154f87e5b084f96e85136a56b886733592f493f0fc68b0d6b3551781cb95e13c5de458b28d6fb60d20a9129313261410203010001a31e301c301a0603551d1104133011820f2a2e2a2e6578616d706c652e636f6d300b06092a864886f70d0101050341001c3de267975f56ef57771c6218ef95ecc65102e57bd1defe6f7efea90d9b26cf40de5bd7ad75e46201c7f2a92aaa3e907451e9409f65e28ddb6db80d726290f6` + +func TestCertificateSelection(t *testing.T) { + config := Config{ + Certificates: []Certificate{ + { + Certificate: [][]byte{fromHex(certExampleCom)}, + }, + { + Certificate: [][]byte{fromHex(certWildcardExampleCom)}, + }, + { + Certificate: [][]byte{fromHex(certFooExampleCom)}, + }, + { + Certificate: [][]byte{fromHex(certDoubleWildcardExampleCom)}, + }, + }, + } + + config.BuildNameToCertificate() + + pointerToIndex := func(c *Certificate) int { + for i := range config.Certificates { + if c == &config.Certificates[i] { + return i + } + } + return -1 + } + + if n := pointerToIndex(config.getCertificateForName("example.com")); n != 0 { + t.Errorf("example.com returned certificate %d, not 0", n) + } + if n := pointerToIndex(config.getCertificateForName("bar.example.com")); n != 1 { + t.Errorf("bar.example.com returned certificate %d, not 1", n) + } + if n := pointerToIndex(config.getCertificateForName("foo.example.com")); n != 2 { + t.Errorf("foo.example.com returned certificate %d, not 2", n) + } + if n := pointerToIndex(config.getCertificateForName("foo.bar.example.com")); n != 3 { + t.Errorf("foo.bar.example.com returned certificate %d, not 3", n) + } + if n := pointerToIndex(config.getCertificateForName("foo.bar.baz.example.com")); n != 0 { + t.Errorf("foo.bar.baz.example.com returned certificate %d, not 0", n) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/generate_cert.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/generate_cert.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/generate_cert.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,118 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// Generate a self-signed X.509 certificate for a TLS server. Outputs to +// 'cert.pem' and 'key.pem' and will overwrite existing files. + +package main + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "flag" + "fmt" + "log" + "math/big" + "net" + "os" + "strings" + "time" +) + +var ( + host = flag.String("host", "", "Comma-separated hostnames and IPs to generate a certificate for") + validFrom = flag.String("start-date", "", "Creation date formatted as Jan 1 15:04:05 2011") + validFor = flag.Duration("duration", 365*24*time.Hour, "Duration that certificate is valid for") + isCA = flag.Bool("ca", false, "whether this cert should be its own Certificate Authority") + rsaBits = flag.Int("rsa-bits", 2048, "Size of RSA key to generate") +) + +func main() { + flag.Parse() + + if len(*host) == 0 { + log.Fatalf("Missing required --host parameter") + } + + priv, err := rsa.GenerateKey(rand.Reader, *rsaBits) + if err != nil { + log.Fatalf("failed to generate private key: %s", err) + return + } + + var notBefore time.Time + if len(*validFrom) == 0 { + notBefore = time.Now() + } else { + notBefore, err = time.Parse("Jan 2 15:04:05 2006", *validFrom) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to parse creation date: %s\n", err) + os.Exit(1) + } + } + + notAfter := notBefore.Add(*validFor) + + // end of ASN.1 time + endOfTime := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC) + if notAfter.After(endOfTime) { + notAfter = endOfTime + } + + template := x509.Certificate{ + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{ + Organization: []string{"Acme Co"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + hosts := strings.Split(*host, ",") + for _, h := range hosts { + if ip := net.ParseIP(h); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else { + template.DNSNames = append(template.DNSNames, h) + } + } + + if *isCA { + template.IsCA = true + template.KeyUsage |= x509.KeyUsageCertSign + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + log.Fatalf("Failed to create certificate: %s", err) + return + } + + certOut, err := os.Create("cert.pem") + if err != nil { + log.Fatalf("failed to open cert.pem for writing: %s", err) + return + } + pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + certOut.Close() + log.Print("written cert.pem\n") + + keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + log.Print("failed to open key.pem for writing:", err) + return + } + pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}) + keyOut.Close() + log.Print("written key.pem\n") +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_client.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,411 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "bytes" + "crypto/ecdsa" + "crypto/rsa" + "crypto/subtle" + "crypto/x509" + "encoding/asn1" + "errors" + "io" + "strconv" +) + +func (c *Conn) clientHandshake() error { + if c.config == nil { + c.config = defaultConfig() + } + + hello := &clientHelloMsg{ + vers: c.config.maxVersion(), + compressionMethods: []uint8{compressionNone}, + random: make([]byte, 32), + ocspStapling: true, + serverName: c.config.ServerName, + supportedCurves: []uint16{curveP256, curveP384, curveP521}, + supportedPoints: []uint8{pointFormatUncompressed}, + nextProtoNeg: len(c.config.NextProtos) > 0, + } + + possibleCipherSuites := c.config.cipherSuites() + hello.cipherSuites = make([]uint16, 0, len(possibleCipherSuites)) + +NextCipherSuite: + for _, suiteId := range possibleCipherSuites { + for _, suite := range cipherSuites { + if suite.id != suiteId { + continue + } + // Don't advertise TLS 1.2-only cipher suites unless + // we're attempting TLS 1.2. + if hello.vers < VersionTLS12 && suite.flags&suiteTLS12 != 0 { + continue + } + hello.cipherSuites = append(hello.cipherSuites, suiteId) + continue NextCipherSuite + } + } + + t := uint32(c.config.time().Unix()) + hello.random[0] = byte(t >> 24) + hello.random[1] = byte(t >> 16) + hello.random[2] = byte(t >> 8) + hello.random[3] = byte(t) + _, err := io.ReadFull(c.config.rand(), hello.random[4:]) + if err != nil { + c.sendAlert(alertInternalError) + return errors.New("short read from Rand") + } + + if hello.vers >= VersionTLS12 { + hello.signatureAndHashes = supportedSKXSignatureAlgorithms + } + + c.writeRecord(recordTypeHandshake, hello.marshal()) + + msg, err := c.readHandshake() + if err != nil { + return err + } + serverHello, ok := msg.(*serverHelloMsg) + if !ok { + return c.sendAlert(alertUnexpectedMessage) + } + + vers, ok := c.config.mutualVersion(serverHello.vers) + if !ok || vers < VersionTLS10 { + // TLS 1.0 is the minimum version supported as a client. + return c.sendAlert(alertProtocolVersion) + } + c.vers = vers + c.haveVers = true + + finishedHash := newFinishedHash(c.vers) + finishedHash.Write(hello.marshal()) + finishedHash.Write(serverHello.marshal()) + + if serverHello.compressionMethod != compressionNone { + return c.sendAlert(alertUnexpectedMessage) + } + + if !hello.nextProtoNeg && serverHello.nextProtoNeg { + c.sendAlert(alertHandshakeFailure) + return errors.New("server advertised unrequested NPN") + } + + suite := mutualCipherSuite(c.config.cipherSuites(), serverHello.cipherSuite) + if suite == nil { + return c.sendAlert(alertHandshakeFailure) + } + + msg, err = c.readHandshake() + if err != nil { + return err + } + certMsg, ok := msg.(*certificateMsg) + if !ok || len(certMsg.certificates) == 0 { + return c.sendAlert(alertUnexpectedMessage) + } + finishedHash.Write(certMsg.marshal()) + + certs := make([]*x509.Certificate, len(certMsg.certificates)) + for i, asn1Data := range certMsg.certificates { + cert, err := x509.ParseCertificate(asn1Data) + if err != nil { + c.sendAlert(alertBadCertificate) + return errors.New("failed to parse certificate from server: " + err.Error()) + } + certs[i] = cert + } + + if !c.config.InsecureSkipVerify { + opts := x509.VerifyOptions{ + Roots: c.config.RootCAs, + CurrentTime: c.config.time(), + DNSName: c.config.ServerName, + Intermediates: x509.NewCertPool(), + } + + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + c.verifiedChains, err = certs[0].Verify(opts) + if err != nil { + c.sendAlert(alertBadCertificate) + return err + } + } + + switch certs[0].PublicKey.(type) { + case *rsa.PublicKey, *ecdsa.PublicKey: + break + default: + return c.sendAlert(alertUnsupportedCertificate) + } + + c.peerCertificates = certs + + if serverHello.ocspStapling { + msg, err = c.readHandshake() + if err != nil { + return err + } + cs, ok := msg.(*certificateStatusMsg) + if !ok { + return c.sendAlert(alertUnexpectedMessage) + } + finishedHash.Write(cs.marshal()) + + if cs.statusType == statusTypeOCSP { + c.ocspResponse = cs.response + } + } + + msg, err = c.readHandshake() + if err != nil { + return err + } + + keyAgreement := suite.ka(c.vers) + + skx, ok := msg.(*serverKeyExchangeMsg) + if ok { + finishedHash.Write(skx.marshal()) + err = keyAgreement.processServerKeyExchange(c.config, hello, serverHello, certs[0], skx) + if err != nil { + c.sendAlert(alertUnexpectedMessage) + return err + } + + msg, err = c.readHandshake() + if err != nil { + return err + } + } + + var chainToSend *Certificate + var certRequested bool + certReq, ok := msg.(*certificateRequestMsg) + if ok { + certRequested = true + + // RFC 4346 on the certificateAuthorities field: + // A list of the distinguished names of acceptable certificate + // authorities. These distinguished names may specify a desired + // distinguished name for a root CA or for a subordinate CA; + // thus, this message can be used to describe both known roots + // and a desired authorization space. If the + // certificate_authorities list is empty then the client MAY + // send any certificate of the appropriate + // ClientCertificateType, unless there is some external + // arrangement to the contrary. + + finishedHash.Write(certReq.marshal()) + + var rsaAvail, ecdsaAvail bool + for _, certType := range certReq.certificateTypes { + switch certType { + case certTypeRSASign: + rsaAvail = true + case certTypeECDSASign: + ecdsaAvail = true + } + } + + // We need to search our list of client certs for one + // where SignatureAlgorithm is RSA and the Issuer is in + // certReq.certificateAuthorities + findCert: + for i, chain := range c.config.Certificates { + if !rsaAvail && !ecdsaAvail { + continue + } + + for j, cert := range chain.Certificate { + x509Cert := chain.Leaf + // parse the certificate if this isn't the leaf + // node, or if chain.Leaf was nil + if j != 0 || x509Cert == nil { + if x509Cert, err = x509.ParseCertificate(cert); err != nil { + c.sendAlert(alertInternalError) + return errors.New("tls: failed to parse client certificate #" + strconv.Itoa(i) + ": " + err.Error()) + } + } + + switch { + case rsaAvail && x509Cert.PublicKeyAlgorithm == x509.RSA: + case ecdsaAvail && x509Cert.PublicKeyAlgorithm == x509.ECDSA: + default: + continue findCert + } + + if len(certReq.certificateAuthorities) == 0 { + // they gave us an empty list, so just take the + // first RSA cert from c.config.Certificates + chainToSend = &chain + break findCert + } + + for _, ca := range certReq.certificateAuthorities { + if bytes.Equal(x509Cert.RawIssuer, ca) { + chainToSend = &chain + break findCert + } + } + } + } + + msg, err = c.readHandshake() + if err != nil { + return err + } + } + + shd, ok := msg.(*serverHelloDoneMsg) + if !ok { + return c.sendAlert(alertUnexpectedMessage) + } + finishedHash.Write(shd.marshal()) + + // If the server requested a certificate then we have to send a + // Certificate message, even if it's empty because we don't have a + // certificate to send. + if certRequested { + certMsg = new(certificateMsg) + if chainToSend != nil { + certMsg.certificates = chainToSend.Certificate + } + finishedHash.Write(certMsg.marshal()) + c.writeRecord(recordTypeHandshake, certMsg.marshal()) + } + + preMasterSecret, ckx, err := keyAgreement.generateClientKeyExchange(c.config, hello, certs[0]) + if err != nil { + c.sendAlert(alertInternalError) + return err + } + if ckx != nil { + finishedHash.Write(ckx.marshal()) + c.writeRecord(recordTypeHandshake, ckx.marshal()) + } + + if chainToSend != nil { + var signed []byte + certVerify := &certificateVerifyMsg{ + hasSignatureAndHash: c.vers >= VersionTLS12, + } + + switch key := c.config.Certificates[0].PrivateKey.(type) { + case *ecdsa.PrivateKey: + digest, _, hashId := finishedHash.hashForClientCertificate(signatureECDSA) + r, s, err := ecdsa.Sign(c.config.rand(), key, digest) + if err == nil { + signed, err = asn1.Marshal(ecdsaSignature{r, s}) + } + certVerify.signatureAndHash.signature = signatureECDSA + certVerify.signatureAndHash.hash = hashId + case *rsa.PrivateKey: + digest, hashFunc, hashId := finishedHash.hashForClientCertificate(signatureRSA) + signed, err = rsa.SignPKCS1v15(c.config.rand(), key, hashFunc, digest) + certVerify.signatureAndHash.signature = signatureRSA + certVerify.signatureAndHash.hash = hashId + default: + err = errors.New("unknown private key type") + } + if err != nil { + return c.sendAlert(alertInternalError) + } + certVerify.signature = signed + + finishedHash.Write(certVerify.marshal()) + c.writeRecord(recordTypeHandshake, certVerify.marshal()) + } + + masterSecret := masterFromPreMasterSecret(c.vers, preMasterSecret, hello.random, serverHello.random) + clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV := + keysFromMasterSecret(c.vers, masterSecret, hello.random, serverHello.random, suite.macLen, suite.keyLen, suite.ivLen) + + var clientCipher interface{} + var clientHash macFunction + if suite.cipher != nil { + clientCipher = suite.cipher(clientKey, clientIV, false /* not for reading */) + clientHash = suite.mac(c.vers, clientMAC) + } else { + clientCipher = suite.aead(clientKey, clientIV) + } + c.out.prepareCipherSpec(c.vers, clientCipher, clientHash) + c.writeRecord(recordTypeChangeCipherSpec, []byte{1}) + + if serverHello.nextProtoNeg { + nextProto := new(nextProtoMsg) + proto, fallback := mutualProtocol(c.config.NextProtos, serverHello.nextProtos) + nextProto.proto = proto + c.clientProtocol = proto + c.clientProtocolFallback = fallback + + finishedHash.Write(nextProto.marshal()) + c.writeRecord(recordTypeHandshake, nextProto.marshal()) + } + + finished := new(finishedMsg) + finished.verifyData = finishedHash.clientSum(masterSecret) + finishedHash.Write(finished.marshal()) + c.writeRecord(recordTypeHandshake, finished.marshal()) + + var serverCipher interface{} + var serverHash macFunction + if suite.cipher != nil { + serverCipher = suite.cipher(serverKey, serverIV, true /* for reading */) + serverHash = suite.mac(c.vers, serverMAC) + } else { + serverCipher = suite.aead(serverKey, serverIV) + } + c.in.prepareCipherSpec(c.vers, serverCipher, serverHash) + c.readRecord(recordTypeChangeCipherSpec) + if err := c.error(); err != nil { + return err + } + + msg, err = c.readHandshake() + if err != nil { + return err + } + serverFinished, ok := msg.(*finishedMsg) + if !ok { + return c.sendAlert(alertUnexpectedMessage) + } + + verify := finishedHash.serverSum(masterSecret) + if len(verify) != len(serverFinished.verifyData) || + subtle.ConstantTimeCompare(verify, serverFinished.verifyData) != 1 { + return c.sendAlert(alertHandshakeFailure) + } + + c.handshakeComplete = true + c.cipherSuite = suite.id + return nil +} + +// mutualProtocol finds the mutual Next Protocol Negotiation protocol given the +// set of client and server supported protocols. The set of client supported +// protocols must not be empty. It returns the resulting protocol and flag +// indicating if the fallback case was reached. +func mutualProtocol(clientProtos, serverProtos []string) (string, bool) { + for _, s := range serverProtos { + for _, c := range clientProtos { + if s == c { + return s, false + } + } + } + + return clientProtos[0], true +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_client_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_client_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_client_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,3050 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "bytes" + "flag" + "io" + "net" + "os" + "testing" +) + +func testClientScript(t *testing.T, name string, clientScript [][]byte, config *Config) { + c, s := net.Pipe() + cli := Client(c, config) + go func() { + cli.Write([]byte("hello\n")) + cli.Close() + c.Close() + }() + + defer c.Close() + for i, b := range clientScript { + if i%2 == 1 { + s.Write(b) + continue + } + bb := make([]byte, len(b)) + _, err := io.ReadFull(s, bb) + if err != nil { + t.Fatalf("%s #%d: %s", name, i, err) + } + if !bytes.Equal(b, bb) { + t.Fatalf("%s #%d: mismatch on read: got:%x want:%x", name, i, bb, b) + } + } +} + +func TestHandshakeClientRSARC4(t *testing.T) { + var config = *testConfig + config.CipherSuites = []uint16{TLS_RSA_WITH_RC4_128_SHA} + testClientScript(t, "RSA-RC4", rsaRC4ClientScript, &config) +} + +func TestHandshakeClientECDHERSAAES(t *testing.T) { + var config = *testConfig + config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA} + testClientScript(t, "ECDHE-RSA-AES", ecdheRSAAESClientScript, &config) +} + +func TestHandshakeClientECDHECDSAAES(t *testing.T) { + var config = *testConfig + config.CipherSuites = []uint16{TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA} + config.Certificates = nil + config.BuildNameToCertificate() + testClientScript(t, "ECDHE-ECDSA-AES", ecdheECDSAAESClientScript, &config) +} + +func TestLongClientCerticiateChain(t *testing.T) { + config := *testConfig + cert, _ := X509KeyPair(testClientChainCertificate, testClientChainCertificate) + config.Certificates = []Certificate{cert} + testClientScript(t, "Long client certificate chains", clientChainCertificateScript, &config) +} + +func TestHandshakeClientTLS11(t *testing.T) { + var config = *testConfig + config.MaxVersion = VersionTLS11 + config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA} + testClientScript(t, "TLS11-ECDHE-AES", tls11ECDHEAESClientScript, &config) +} + +func TestHandshakeClientTLS12(t *testing.T) { + config := *testConfig + config.MaxVersion = VersionTLS12 + config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA} + cert, _ := X509KeyPair(testClientChainCertificate, testClientChainCertificate) + config.Certificates = []Certificate{cert} + testClientScript(t, "TLS12", clientTLS12Script, &config) +} + +func TestHandshakeClientTLS12ClientCert(t *testing.T) { + config := *testConfig + config.MaxVersion = VersionTLS12 + config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256} + cert, _ := X509KeyPair(testClientChainCertificate, testClientChainCertificate) + config.Certificates = []Certificate{cert} + testClientScript(t, "TLS12ClientCert", clientTLS12ClientCertScript, &config) +} + +var connect = flag.Bool("connect", false, "connect to a TLS server on :10443") + +func TestRunClient(t *testing.T) { + if !*connect { + return + } + + tcpConn, err := net.Dial("tcp", "127.0.0.1:10443") + if err != nil { + t.Fatal(err) + } + + record := &recordingConn{ + Conn: tcpConn, + } + + config := GetTestConfig() + conn := Client(record, config) + if err := conn.Handshake(); err != nil { + t.Fatalf("error from TLS handshake: %s", err) + } + + conn.Write([]byte("hello\n")) + conn.Close() + + record.WriteTo(os.Stdout) +} + +func TestEmptyRecords(t *testing.T) { + // emptyRecordScript contains a TLS connection with an empty record as + // the first application data from the server. This test ensures that + // the empty record doesn't cause (0, nil) to be returned from + // Conn.Read. + config := *testConfig + config.CipherSuites = []uint16{TLS_RSA_WITH_AES_256_CBC_SHA} + + c, s := net.Pipe() + cli := Client(c, &config) + go func() { + buf := make([]byte, 1024) + n, err := cli.Read(buf) + defer c.Close() + defer cli.Close() + + if err != nil { + t.Fatalf("error reading from tls.Client: %s", err) + } + const expectedLength = 197 + if n != expectedLength { + t.Fatalf("incorrect length reading from tls.Client, got %d, want %d", n, expectedLength) + } + }() + + defer c.Close() + for i, b := range emptyRecordScript { + if i%2 == 1 { + s.Write(b) + continue + } + bb := make([]byte, len(b)) + _, err := io.ReadFull(s, bb) + if err != nil { + t.Fatalf("#%d: %s", i, err) + } + if !bytes.Equal(b, bb) { + t.Fatalf("#%d: mismatch on read: got:%x want:%x", i, bb, b) + } + } +} + +// Script of interaction with gnutls implementation. +// The values for this test are obtained by building and running in client mode: +// % go test -test.run "TestRunClient" -connect +// The recorded bytes are written to stdout. +// +// The server private key is: +// -----BEGIN RSA PRIVATE KEY----- +// MIIBPAIBAAJBAJ+zw4Qnlf8SMVIPFe9GEcStgOY2Ww/dgNdhjeD8ckUJNP5VZkVD +// TGiXav6ooKXfX3j/7tdkuD8Ey2//Kv7+ue0CAwEAAQJAN6W31vDEP2DjdqhzCDDu +// OA4NACqoiFqyblo7yc2tM4h4xMbC3Yx5UKMN9ZkCtX0gzrz6DyF47bdKcWBzNWCj +// gQIhANEoojVt7hq+SQ6MCN6FTAysGgQf56Q3TYoJMoWvdiXVAiEAw3e3rc+VJpOz +// rHuDo6bgpjUAAXM+v3fcpsfZSNO6V7kCIQCtbVjanpUwvZkMI9by02oUk9taki3b +// PzPfAfNPYAbCJQIhAJXNQDWyqwn/lGmR11cqY2y9nZ1+5w3yHGatLrcDnQHxAiEA +// vnlEGo8K85u+KwIOimM48ZG8oTk7iFdkqLJR1utT3aU= +// -----END RSA PRIVATE KEY----- +// +// and certificate is: +// -----BEGIN CERTIFICATE----- +// MIICKzCCAdWgAwIBAgIJALE1E2URIMWSMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +// BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX +// aWRnaXRzIFB0eSBMdGQwHhcNMTIwNDA2MTcxMDEzWhcNMTUwNDA2MTcxMDEzWjBF +// MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50 +// ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAJ+z +// w4Qnlf8SMVIPFe9GEcStgOY2Ww/dgNdhjeD8ckUJNP5VZkVDTGiXav6ooKXfX3j/ +// 7tdkuD8Ey2//Kv7+ue0CAwEAAaOBpzCBpDAdBgNVHQ4EFgQUeKaXmmO1xaGlM7oi +// fCNuWxt6zCswdQYDVR0jBG4wbIAUeKaXmmO1xaGlM7oifCNuWxt6zCuhSaRHMEUx +// CzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRl +// cm5ldCBXaWRnaXRzIFB0eSBMdGSCCQCxNRNlESDFkjAMBgNVHRMEBTADAQH/MA0G +// CSqGSIb3DQEBBQUAA0EAhTZAc8G7GtrUWZ8tonAxRnTsg26oyDxRrzms7EC86CJG +// HZnWRiok1IsFCEv7NRFukrt3uuQSu/TIXpyBqJdgTA== +// -----END CERTIFICATE----- +var rsaRC4ClientScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x01, 0x00, 0x00, + 0x46, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x1b, 0x00, 0x05, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x08, 0x00, 0x06, 0x00, 0x17, 0x00, 0x18, 0x00, + 0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, + }, + + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x02, 0x00, 0x00, + 0x46, 0x03, 0x01, 0x4d, 0x0a, 0x56, 0x16, 0xb5, + 0x91, 0xd1, 0xcb, 0x80, 0x4d, 0xc7, 0x46, 0xf3, + 0x37, 0x0c, 0xef, 0xea, 0x64, 0x11, 0x14, 0x56, + 0x97, 0x9b, 0xc5, 0x67, 0x08, 0xb7, 0x13, 0xea, + 0xf8, 0xc9, 0xb3, 0x20, 0xe2, 0xfc, 0x41, 0xf6, + 0x96, 0x90, 0x9d, 0x43, 0x9b, 0xe9, 0x6e, 0xf8, + 0x41, 0x16, 0xcc, 0xf3, 0xc7, 0xde, 0xda, 0x5a, + 0xa1, 0x33, 0x69, 0xe2, 0xde, 0x5b, 0xaf, 0x2a, + 0x92, 0xe7, 0xd4, 0xa0, 0x00, 0x05, 0x00, 0x16, + 0x03, 0x01, 0x01, 0xf7, 0x0b, 0x00, 0x01, 0xf3, + 0x00, 0x01, 0xf0, 0x00, 0x01, 0xed, 0x30, 0x82, + 0x01, 0xe9, 0x30, 0x82, 0x01, 0x52, 0x02, 0x01, + 0x06, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, + 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x04, 0x05, 0x00, + 0x30, 0x5b, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, + 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, + 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, + 0x13, 0x0a, 0x51, 0x75, 0x65, 0x65, 0x6e, 0x73, + 0x6c, 0x61, 0x6e, 0x64, 0x31, 0x1a, 0x30, 0x18, + 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x11, 0x43, + 0x72, 0x79, 0x70, 0x74, 0x53, 0x6f, 0x66, 0x74, + 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, + 0x31, 0x1b, 0x30, 0x19, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x13, 0x12, 0x54, 0x65, 0x73, 0x74, 0x20, + 0x43, 0x41, 0x20, 0x28, 0x31, 0x30, 0x32, 0x34, + 0x20, 0x62, 0x69, 0x74, 0x29, 0x30, 0x1e, 0x17, + 0x0d, 0x30, 0x30, 0x31, 0x30, 0x31, 0x36, 0x32, + 0x32, 0x33, 0x31, 0x30, 0x33, 0x5a, 0x17, 0x0d, + 0x30, 0x33, 0x30, 0x31, 0x31, 0x34, 0x32, 0x32, + 0x33, 0x31, 0x30, 0x33, 0x5a, 0x30, 0x63, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x51, + 0x75, 0x65, 0x65, 0x6e, 0x73, 0x6c, 0x61, 0x6e, + 0x64, 0x31, 0x1a, 0x30, 0x18, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x11, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x53, 0x6f, 0x66, 0x74, 0x20, 0x50, 0x74, + 0x79, 0x20, 0x4c, 0x74, 0x64, 0x31, 0x23, 0x30, + 0x21, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, 0x1a, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, 0x74, + 0x65, 0x73, 0x74, 0x20, 0x63, 0x65, 0x72, 0x74, + 0x20, 0x28, 0x35, 0x31, 0x32, 0x20, 0x62, 0x69, + 0x74, 0x29, 0x30, 0x5c, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x4b, 0x00, 0x30, 0x48, + 0x02, 0x41, 0x00, 0x9f, 0xb3, 0xc3, 0x84, 0x27, + 0x95, 0xff, 0x12, 0x31, 0x52, 0x0f, 0x15, 0xef, + 0x46, 0x11, 0xc4, 0xad, 0x80, 0xe6, 0x36, 0x5b, + 0x0f, 0xdd, 0x80, 0xd7, 0x61, 0x8d, 0xe0, 0xfc, + 0x72, 0x45, 0x09, 0x34, 0xfe, 0x55, 0x66, 0x45, + 0x43, 0x4c, 0x68, 0x97, 0x6a, 0xfe, 0xa8, 0xa0, + 0xa5, 0xdf, 0x5f, 0x78, 0xff, 0xee, 0xd7, 0x64, + 0xb8, 0x3f, 0x04, 0xcb, 0x6f, 0xff, 0x2a, 0xfe, + 0xfe, 0xb9, 0xed, 0x02, 0x03, 0x01, 0x00, 0x01, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x04, 0x05, 0x00, 0x03, + 0x81, 0x81, 0x00, 0x93, 0xd2, 0x0a, 0xc5, 0x41, + 0xe6, 0x5a, 0xa9, 0x86, 0xf9, 0x11, 0x87, 0xe4, + 0xdb, 0x45, 0xe2, 0xc5, 0x95, 0x78, 0x1a, 0x6c, + 0x80, 0x6d, 0x73, 0x1f, 0xb4, 0x6d, 0x44, 0xa3, + 0xba, 0x86, 0x88, 0xc8, 0x58, 0xcd, 0x1c, 0x06, + 0x35, 0x6c, 0x44, 0x62, 0x88, 0xdf, 0xe4, 0xf6, + 0x64, 0x61, 0x95, 0xef, 0x4a, 0xa6, 0x7f, 0x65, + 0x71, 0xd7, 0x6b, 0x88, 0x39, 0xf6, 0x32, 0xbf, + 0xac, 0x93, 0x67, 0x69, 0x51, 0x8c, 0x93, 0xec, + 0x48, 0x5f, 0xc9, 0xb1, 0x42, 0xf9, 0x55, 0xd2, + 0x7e, 0x4e, 0xf4, 0xf2, 0x21, 0x6b, 0x90, 0x57, + 0xe6, 0xd7, 0x99, 0x9e, 0x41, 0xca, 0x80, 0xbf, + 0x1a, 0x28, 0xa2, 0xca, 0x5b, 0x50, 0x4a, 0xed, + 0x84, 0xe7, 0x82, 0xc7, 0xd2, 0xcf, 0x36, 0x9e, + 0x6a, 0x67, 0xb9, 0x88, 0xa7, 0xf3, 0x8a, 0xd0, + 0x04, 0xf8, 0xe8, 0xc6, 0x17, 0xe3, 0xc5, 0x29, + 0xbc, 0x17, 0xf1, 0x16, 0x03, 0x01, 0x00, 0x04, + 0x0e, 0x00, 0x00, 0x00, + }, + + { + 0x16, 0x03, 0x01, 0x00, 0x46, 0x10, 0x00, 0x00, + 0x42, 0x00, 0x40, 0x87, 0xa1, 0x1f, 0x14, 0xe1, + 0xfb, 0x91, 0xac, 0x58, 0x2e, 0xf3, 0x71, 0xce, + 0x01, 0x85, 0x2c, 0xc7, 0xfe, 0x84, 0x87, 0x82, + 0xb7, 0x57, 0xdb, 0x37, 0x4d, 0x46, 0x83, 0x67, + 0x52, 0x82, 0x51, 0x01, 0x95, 0x23, 0x68, 0x69, + 0x6b, 0xd0, 0xa7, 0xa7, 0xe5, 0x88, 0xd0, 0x47, + 0x71, 0xb8, 0xd2, 0x03, 0x05, 0x25, 0x56, 0x5c, + 0x10, 0x08, 0xc6, 0x9b, 0xd4, 0x67, 0xcd, 0x28, + 0xbe, 0x9c, 0x48, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0x24, 0xc1, 0xb8, + 0xd3, 0x7f, 0xc5, 0xc2, 0x5a, 0x1d, 0x6d, 0x5b, + 0x2d, 0x5c, 0x82, 0x87, 0xc2, 0x6f, 0x0d, 0x63, + 0x7b, 0x72, 0x2b, 0xda, 0x69, 0xc4, 0xfe, 0x3c, + 0x84, 0xa1, 0x5a, 0x62, 0x38, 0x37, 0xc6, 0x54, + 0x25, 0x2a, + }, + + { + 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x01, 0x00, 0x24, 0xea, 0x88, 0x9c, 0x00, 0xf6, + 0x35, 0xb8, 0x42, 0x7f, 0x15, 0x17, 0x76, 0x5e, + 0x4b, 0x24, 0xcb, 0x7e, 0xa0, 0x7b, 0xc3, 0x70, + 0x52, 0x0a, 0x88, 0x2a, 0x7a, 0x45, 0x59, 0x90, + 0x59, 0xac, 0xc6, 0xb5, 0x56, 0x55, 0x96, + }, +} + +var ecdheRSAAESClientScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x01, 0x00, 0x00, + 0x46, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xc0, 0x13, + 0x01, 0x00, 0x00, 0x1b, 0x00, 0x05, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x08, 0x00, 0x06, 0x00, 0x17, 0x00, 0x18, 0x00, + 0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x52, 0x02, 0x00, 0x00, + 0x4e, 0x03, 0x01, 0x50, 0xad, 0x72, 0xb1, 0x14, + 0x45, 0xce, 0x0a, 0x95, 0xf9, 0x63, 0xef, 0xa8, + 0xe5, 0x07, 0x34, 0x04, 0xe9, 0x08, 0x0f, 0x38, + 0xe4, 0x28, 0x27, 0x91, 0x07, 0x03, 0xe2, 0xfe, + 0xe3, 0x25, 0xf7, 0x20, 0x08, 0x42, 0xa2, 0x01, + 0x69, 0x53, 0xf0, 0xd9, 0x4c, 0xfa, 0x01, 0xa1, + 0xce, 0x4b, 0xf8, 0x28, 0x21, 0xad, 0x06, 0xbe, + 0xe0, 0x1b, 0x3b, 0xf7, 0xec, 0xd2, 0x52, 0xae, + 0x2a, 0x57, 0xb7, 0xa8, 0xc0, 0x13, 0x00, 0x00, + 0x06, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, 0x16, + 0x03, 0x01, 0x02, 0x39, 0x0b, 0x00, 0x02, 0x35, + 0x00, 0x02, 0x32, 0x00, 0x02, 0x2f, 0x30, 0x82, + 0x02, 0x2b, 0x30, 0x82, 0x01, 0xd5, 0xa0, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, 0xb1, 0x35, + 0x13, 0x65, 0x11, 0x20, 0xc5, 0x92, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, + 0x31, 0x32, 0x30, 0x34, 0x30, 0x36, 0x31, 0x37, + 0x31, 0x30, 0x31, 0x33, 0x5a, 0x17, 0x0d, 0x31, + 0x35, 0x30, 0x34, 0x30, 0x36, 0x31, 0x37, 0x31, + 0x30, 0x31, 0x33, 0x5a, 0x30, 0x45, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, + 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, + 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, + 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, + 0x4c, 0x74, 0x64, 0x30, 0x5c, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x01, 0x05, 0x00, 0x03, 0x4b, 0x00, 0x30, + 0x48, 0x02, 0x41, 0x00, 0x9f, 0xb3, 0xc3, 0x84, + 0x27, 0x95, 0xff, 0x12, 0x31, 0x52, 0x0f, 0x15, + 0xef, 0x46, 0x11, 0xc4, 0xad, 0x80, 0xe6, 0x36, + 0x5b, 0x0f, 0xdd, 0x80, 0xd7, 0x61, 0x8d, 0xe0, + 0xfc, 0x72, 0x45, 0x09, 0x34, 0xfe, 0x55, 0x66, + 0x45, 0x43, 0x4c, 0x68, 0x97, 0x6a, 0xfe, 0xa8, + 0xa0, 0xa5, 0xdf, 0x5f, 0x78, 0xff, 0xee, 0xd7, + 0x64, 0xb8, 0x3f, 0x04, 0xcb, 0x6f, 0xff, 0x2a, + 0xfe, 0xfe, 0xb9, 0xed, 0x02, 0x03, 0x01, 0x00, + 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, 0xa4, 0x30, + 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, + 0x04, 0x14, 0x78, 0xa6, 0x97, 0x9a, 0x63, 0xb5, + 0xc5, 0xa1, 0xa5, 0x33, 0xba, 0x22, 0x7c, 0x23, + 0x6e, 0x5b, 0x1b, 0x7a, 0xcc, 0x2b, 0x30, 0x75, + 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x6e, 0x30, + 0x6c, 0x80, 0x14, 0x78, 0xa6, 0x97, 0x9a, 0x63, + 0xb5, 0xc5, 0xa1, 0xa5, 0x33, 0xba, 0x22, 0x7c, + 0x23, 0x6e, 0x5b, 0x1b, 0x7a, 0xcc, 0x2b, 0xa1, + 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x82, 0x09, 0x00, 0xb1, 0x35, 0x13, + 0x65, 0x11, 0x20, 0xc5, 0x92, 0x30, 0x0c, 0x06, + 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, 0x30, 0x03, + 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, + 0x05, 0x00, 0x03, 0x41, 0x00, 0x85, 0x36, 0x40, + 0x73, 0xc1, 0xbb, 0x1a, 0xda, 0xd4, 0x59, 0x9f, + 0x2d, 0xa2, 0x70, 0x31, 0x46, 0x74, 0xec, 0x83, + 0x6e, 0xa8, 0xc8, 0x3c, 0x51, 0xaf, 0x39, 0xac, + 0xec, 0x40, 0xbc, 0xe8, 0x22, 0x46, 0x1d, 0x99, + 0xd6, 0x46, 0x2a, 0x24, 0xd4, 0x8b, 0x05, 0x08, + 0x4b, 0xfb, 0x35, 0x11, 0x6e, 0x92, 0xbb, 0x77, + 0xba, 0xe4, 0x12, 0xbb, 0xf4, 0xc8, 0x5e, 0x9c, + 0x81, 0xa8, 0x97, 0x60, 0x4c, 0x16, 0x03, 0x01, + 0x00, 0x8b, 0x0c, 0x00, 0x00, 0x87, 0x03, 0x00, + 0x17, 0x41, 0x04, 0x1c, 0x8f, 0x9c, 0x6d, 0xe7, + 0xab, 0x3e, 0xf8, 0x0a, 0x5d, 0xe1, 0x86, 0xb4, + 0xe2, 0x8e, 0xb2, 0x1c, 0x3b, 0xd9, 0xb6, 0x08, + 0x80, 0x58, 0x21, 0xe9, 0x0e, 0xc6, 0x66, 0x67, + 0x97, 0xcb, 0xb9, 0x92, 0x07, 0x00, 0xc4, 0xe5, + 0xec, 0x5f, 0xb4, 0xe2, 0x20, 0xa9, 0xc9, 0x62, + 0xd0, 0x98, 0xd5, 0xe3, 0x53, 0xff, 0xd0, 0x0a, + 0x6e, 0x29, 0x69, 0x39, 0x2a, 0x4b, 0x5c, 0xd8, + 0x6c, 0xf5, 0xfe, 0x00, 0x40, 0x35, 0xa7, 0x26, + 0x2e, 0xc2, 0x48, 0x93, 0x32, 0xf7, 0x7d, 0x0f, + 0x0d, 0x77, 0x56, 0x9a, 0x85, 0x0c, 0xa6, 0x74, + 0x06, 0xb8, 0x3d, 0x90, 0x56, 0x12, 0x63, 0xff, + 0x00, 0x5e, 0x0f, 0xf7, 0x24, 0xf7, 0xdb, 0x48, + 0x71, 0xe9, 0x2e, 0x03, 0xd3, 0xfa, 0x3a, 0xae, + 0xa0, 0xc1, 0x77, 0x3c, 0x4c, 0x59, 0xce, 0x33, + 0x1a, 0xd2, 0x47, 0x83, 0xfa, 0xea, 0xd8, 0x1e, + 0x06, 0xe7, 0x7d, 0xa0, 0x9b, 0x16, 0x03, 0x01, + 0x00, 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x46, 0x10, 0x00, 0x00, + 0x42, 0x41, 0x04, 0x1e, 0x18, 0x37, 0xef, 0x0d, + 0x19, 0x51, 0x88, 0x35, 0x75, 0x71, 0xb5, 0xe5, + 0x54, 0x5b, 0x12, 0x2e, 0x8f, 0x09, 0x67, 0xfd, + 0xa7, 0x24, 0x20, 0x3e, 0xb2, 0x56, 0x1c, 0xce, + 0x97, 0x28, 0x5e, 0xf8, 0x2b, 0x2d, 0x4f, 0x9e, + 0xf1, 0x07, 0x9f, 0x6c, 0x4b, 0x5b, 0x83, 0x56, + 0xe2, 0x32, 0x42, 0xe9, 0x58, 0xb6, 0xd7, 0x49, + 0xa6, 0xb5, 0x68, 0x1a, 0x41, 0x03, 0x56, 0x6b, + 0xdc, 0x5a, 0x89, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0x30, 0xd9, 0xa7, + 0x80, 0x56, 0x3f, 0xa3, 0x8f, 0x96, 0x72, 0x4e, + 0x4e, 0x6e, 0x23, 0x41, 0x8f, 0xda, 0x91, 0xb2, + 0x9e, 0x63, 0x23, 0x82, 0x64, 0xcd, 0x07, 0x24, + 0xd3, 0x40, 0x20, 0x22, 0x4c, 0xe3, 0xff, 0x38, + 0xbb, 0x43, 0x9d, 0x57, 0x11, 0xd5, 0x46, 0xa5, + 0x05, 0x29, 0x92, 0x02, 0xce, 0xdf, + }, + { + 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x01, 0x00, 0x90, 0xe7, 0xba, 0x0e, 0xb1, 0xda, + 0x92, 0xb5, 0x77, 0x56, 0x38, 0xa6, 0x22, 0xc1, + 0x72, 0xeb, 0x8a, 0x68, 0x09, 0xb6, 0x74, 0xad, + 0xb3, 0x4a, 0xf2, 0xdd, 0x09, 0x9b, 0xc9, 0x4f, + 0x84, 0x73, 0x8b, 0xd6, 0x97, 0x50, 0x23, 0x1c, + 0xa0, 0xc2, 0x0c, 0x25, 0x18, 0xdd, 0x5e, 0x15, + 0x4d, 0xd9, 0xef, 0x4f, 0x6a, 0x43, 0x61, 0x9c, + 0x95, 0xde, 0x3c, 0x66, 0xc4, 0xc1, 0x33, 0x56, + 0xdd, 0x2f, 0x90, 0xaf, 0x68, 0x5c, 0x9c, 0xa4, + 0x90, 0x6d, 0xbf, 0x51, 0x1d, 0x68, 0xcb, 0x81, + 0x77, 0x52, 0xa0, 0x93, 0x2a, 0xf8, 0xc7, 0x61, + 0x87, 0x76, 0xca, 0x93, 0x9e, 0xd6, 0xee, 0x6f, + 0x3f, 0xeb, 0x7d, 0x06, 0xdd, 0x73, 0x4e, 0x27, + 0x16, 0x63, 0x92, 0xe4, 0xb2, 0x3f, 0x91, 0x23, + 0x21, 0x97, 0x90, 0xce, 0x53, 0xb8, 0xb0, 0x9d, + 0xbd, 0xbd, 0x33, 0x84, 0xad, 0x6b, 0x2e, 0x7b, + 0xf5, 0xeb, 0x1d, 0x64, 0x37, 0x2e, 0x29, 0x4e, + 0xb0, 0x93, 0xdb, 0x92, 0xc7, 0xaa, 0x94, 0xa5, + 0x3b, 0x64, 0xd0, + }, + { + 0x17, 0x03, 0x01, 0x00, 0x20, 0x11, 0xd8, 0x6b, + 0x3c, 0xf6, 0xbe, 0xf4, 0x54, 0x87, 0xec, 0x75, + 0x0c, 0x44, 0xdb, 0x92, 0xfc, 0xde, 0x7e, 0x0f, + 0x9f, 0x87, 0x87, 0x9c, 0x03, 0xd5, 0x07, 0x84, + 0xe0, 0x3a, 0xf8, 0xae, 0x14, 0x17, 0x03, 0x01, + 0x00, 0x20, 0xba, 0x54, 0xef, 0x5b, 0xce, 0xfd, + 0x47, 0x76, 0x6d, 0xa1, 0x8b, 0xfd, 0x48, 0xde, + 0x6e, 0x26, 0xc1, 0x0c, 0x9d, 0x54, 0xbf, 0x98, + 0xf6, 0x1c, 0x80, 0xb9, 0xca, 0x93, 0x81, 0x0a, + 0x2e, 0x06, 0x15, 0x03, 0x01, 0x00, 0x20, 0x93, + 0x3e, 0x38, 0x17, 0xc9, 0x0a, 0xc3, 0xea, 0xd3, + 0x92, 0x75, 0xa6, 0x53, 0x37, 0x4d, 0x74, 0x94, + 0xbe, 0x01, 0xdc, 0x5c, 0x5a, 0x0f, 0x09, 0xf6, + 0x57, 0x33, 0xc3, 0xbc, 0x3f, 0x7a, 0x4d, + }, +} + +var emptyRecordScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x01, 0x00, 0x00, + 0x46, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x35, + 0x01, 0x00, 0x00, 0x1b, 0x00, 0x05, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x08, 0x00, 0x06, 0x00, 0x17, 0x00, 0x18, 0x00, + 0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x02, 0x00, 0x00, + 0x46, 0x03, 0x01, 0x51, 0x71, 0x8e, 0x03, 0x02, + 0xef, 0x09, 0xf2, 0x0e, 0xf5, 0x3b, 0x29, 0x9a, + 0xa8, 0x8b, 0x46, 0xa3, 0xd4, 0xb4, 0xc1, 0x14, + 0xc3, 0x19, 0x99, 0xba, 0x3d, 0x78, 0xcf, 0x50, + 0xd1, 0xe7, 0x26, 0x20, 0xa0, 0x37, 0x6d, 0xc9, + 0xae, 0x93, 0x33, 0x81, 0x20, 0xe3, 0xc1, 0x90, + 0x64, 0x6e, 0x67, 0x93, 0xdb, 0xb4, 0x04, 0x16, + 0xc4, 0x25, 0xdd, 0x10, 0x79, 0x3c, 0x18, 0x0a, + 0x7c, 0xfd, 0x28, 0x65, 0x00, 0x35, 0x00, 0x16, + 0x03, 0x01, 0x09, 0x9e, 0x0b, 0x00, 0x09, 0x9a, + 0x00, 0x09, 0x97, 0x00, 0x04, 0xea, 0x30, 0x82, + 0x04, 0xe6, 0x30, 0x82, 0x03, 0xce, 0xa0, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x11, 0x00, 0xff, 0xab, + 0x02, 0x93, 0xe0, 0x72, 0x99, 0x18, 0x6c, 0x9e, + 0x96, 0xb8, 0xb9, 0xf7, 0x47, 0xcb, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x41, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x46, 0x52, 0x31, 0x12, 0x30, 0x10, + 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x09, 0x47, + 0x41, 0x4e, 0x44, 0x49, 0x20, 0x53, 0x41, 0x53, + 0x31, 0x1e, 0x30, 0x1c, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x13, 0x15, 0x47, 0x61, 0x6e, 0x64, 0x69, + 0x20, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, + 0x64, 0x20, 0x53, 0x53, 0x4c, 0x20, 0x43, 0x41, + 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x33, 0x30, 0x31, + 0x31, 0x34, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x5a, 0x17, 0x0d, 0x31, 0x34, 0x30, 0x31, 0x31, + 0x34, 0x32, 0x33, 0x35, 0x39, 0x35, 0x39, 0x5a, + 0x30, 0x62, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0b, 0x13, 0x18, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x20, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x20, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x31, 0x24, 0x30, + 0x22, 0x06, 0x03, 0x55, 0x04, 0x0b, 0x13, 0x1b, + 0x47, 0x61, 0x6e, 0x64, 0x69, 0x20, 0x53, 0x74, + 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x20, 0x57, + 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, 0x64, 0x20, + 0x53, 0x53, 0x4c, 0x31, 0x17, 0x30, 0x15, 0x06, + 0x03, 0x55, 0x04, 0x03, 0x14, 0x0e, 0x2a, 0x2e, + 0x66, 0x72, 0x65, 0x65, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x6e, 0x65, 0x74, 0x30, 0x82, 0x01, 0x22, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, + 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, + 0x02, 0x82, 0x01, 0x01, 0x00, 0xdc, 0xe3, 0xfd, + 0xce, 0xc1, 0x66, 0x62, 0x28, 0x8b, 0x99, 0x65, + 0x72, 0x52, 0x88, 0x93, 0x5b, 0x3f, 0x8d, 0xde, + 0x2b, 0xb0, 0xa0, 0xf4, 0xbd, 0xb4, 0x07, 0x5f, + 0x9e, 0x01, 0x47, 0x60, 0x57, 0x5f, 0xdf, 0xdc, + 0x63, 0x28, 0x1c, 0x1e, 0x5b, 0xc8, 0xe6, 0x29, + 0xdd, 0xeb, 0x26, 0x63, 0xd5, 0xbf, 0x83, 0xb2, + 0x2d, 0xcd, 0x2c, 0xa0, 0xb6, 0x91, 0xad, 0xaf, + 0x95, 0x21, 0x1d, 0x1f, 0x39, 0x8d, 0x3e, 0x17, + 0xd6, 0xbd, 0x99, 0xf5, 0x6c, 0xd4, 0xcb, 0x79, + 0x12, 0x3e, 0x11, 0xb9, 0x7e, 0x62, 0xbc, 0x2d, + 0xbf, 0xe0, 0x55, 0x1b, 0x5c, 0x1e, 0xce, 0x31, + 0xd9, 0xf8, 0x56, 0x68, 0x95, 0x2b, 0x15, 0x84, + 0x35, 0xae, 0x98, 0x2c, 0x63, 0x01, 0xb2, 0x0d, + 0xab, 0xa8, 0x61, 0xef, 0x7f, 0x15, 0x2c, 0x6d, + 0xf7, 0x67, 0x1d, 0xb8, 0x8d, 0xf6, 0xa2, 0x1c, + 0x4e, 0x85, 0xf0, 0xea, 0x1a, 0x2b, 0xc8, 0xac, + 0x70, 0x86, 0x9a, 0xbb, 0x9e, 0x9d, 0xbd, 0xc9, + 0x87, 0x2b, 0x9f, 0x5e, 0x40, 0x44, 0x9b, 0xba, + 0x96, 0x45, 0x24, 0xbc, 0x49, 0xb8, 0xfe, 0x26, + 0x3a, 0x1d, 0x1a, 0x0a, 0x3a, 0x90, 0x9c, 0x75, + 0x51, 0x59, 0x89, 0x98, 0x1a, 0x56, 0xe1, 0x3a, + 0x1a, 0xba, 0xff, 0xb4, 0x37, 0x7d, 0xd8, 0x99, + 0xe2, 0xeb, 0x45, 0x27, 0xe2, 0x42, 0x42, 0x46, + 0xbb, 0x00, 0x29, 0x9f, 0x30, 0xc9, 0x1e, 0x6c, + 0xce, 0x59, 0x0e, 0xbe, 0x16, 0x03, 0x31, 0xec, + 0x10, 0xc1, 0x6d, 0xca, 0x9d, 0x5f, 0x6d, 0xf1, + 0x26, 0x11, 0xe5, 0x50, 0xa1, 0xbb, 0x67, 0xb2, + 0xe0, 0x2b, 0xed, 0x76, 0x5b, 0xc7, 0x68, 0xc0, + 0x18, 0xad, 0x91, 0x9e, 0xb5, 0xd4, 0x4d, 0x21, + 0xcd, 0x98, 0xd9, 0xe0, 0x05, 0x0a, 0x4d, 0x24, + 0xa3, 0xe6, 0x12, 0x04, 0xdd, 0x50, 0xe6, 0xc8, + 0x7a, 0x69, 0xb9, 0x32, 0x43, 0x02, 0x03, 0x01, + 0x00, 0x01, 0xa3, 0x82, 0x01, 0xb6, 0x30, 0x82, + 0x01, 0xb2, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x1d, + 0x23, 0x04, 0x18, 0x30, 0x16, 0x80, 0x14, 0xb6, + 0xa8, 0xff, 0xa2, 0xa8, 0x2f, 0xd0, 0xa6, 0xcd, + 0x4b, 0xb1, 0x68, 0xf3, 0xe7, 0x50, 0x10, 0x31, + 0xa7, 0x79, 0x21, 0x30, 0x1d, 0x06, 0x03, 0x55, + 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0x62, 0x37, + 0xd4, 0x3c, 0xbf, 0xd9, 0xc2, 0x99, 0xf3, 0x28, + 0x3e, 0xdb, 0xca, 0xee, 0xf3, 0xb3, 0xc8, 0x73, + 0xb0, 0x3c, 0x30, 0x0e, 0x06, 0x03, 0x55, 0x1d, + 0x0f, 0x01, 0x01, 0xff, 0x04, 0x04, 0x03, 0x02, + 0x05, 0xa0, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, + 0x13, 0x01, 0x01, 0xff, 0x04, 0x02, 0x30, 0x00, + 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x25, 0x04, + 0x16, 0x30, 0x14, 0x06, 0x08, 0x2b, 0x06, 0x01, + 0x05, 0x05, 0x07, 0x03, 0x01, 0x06, 0x08, 0x2b, + 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x02, 0x30, + 0x60, 0x06, 0x03, 0x55, 0x1d, 0x20, 0x04, 0x59, + 0x30, 0x57, 0x30, 0x4b, 0x06, 0x0b, 0x2b, 0x06, + 0x01, 0x04, 0x01, 0xb2, 0x31, 0x01, 0x02, 0x02, + 0x1a, 0x30, 0x3c, 0x30, 0x3a, 0x06, 0x08, 0x2b, + 0x06, 0x01, 0x05, 0x05, 0x07, 0x02, 0x01, 0x16, + 0x2e, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, + 0x77, 0x77, 0x77, 0x2e, 0x67, 0x61, 0x6e, 0x64, + 0x69, 0x2e, 0x6e, 0x65, 0x74, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x73, 0x2f, + 0x66, 0x72, 0x2f, 0x73, 0x73, 0x6c, 0x2f, 0x63, + 0x70, 0x73, 0x2f, 0x70, 0x64, 0x66, 0x2f, 0x30, + 0x08, 0x06, 0x06, 0x67, 0x81, 0x0c, 0x01, 0x02, + 0x01, 0x30, 0x3c, 0x06, 0x03, 0x55, 0x1d, 0x1f, + 0x04, 0x35, 0x30, 0x33, 0x30, 0x31, 0xa0, 0x2f, + 0xa0, 0x2d, 0x86, 0x2b, 0x68, 0x74, 0x74, 0x70, + 0x3a, 0x2f, 0x2f, 0x63, 0x72, 0x6c, 0x2e, 0x67, + 0x61, 0x6e, 0x64, 0x69, 0x2e, 0x6e, 0x65, 0x74, + 0x2f, 0x47, 0x61, 0x6e, 0x64, 0x69, 0x53, 0x74, + 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x53, 0x53, + 0x4c, 0x43, 0x41, 0x2e, 0x63, 0x72, 0x6c, 0x30, + 0x6a, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, + 0x07, 0x01, 0x01, 0x04, 0x5e, 0x30, 0x5c, 0x30, + 0x37, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, + 0x07, 0x30, 0x02, 0x86, 0x2b, 0x68, 0x74, 0x74, + 0x70, 0x3a, 0x2f, 0x2f, 0x63, 0x72, 0x74, 0x2e, + 0x67, 0x61, 0x6e, 0x64, 0x69, 0x2e, 0x6e, 0x65, + 0x74, 0x2f, 0x47, 0x61, 0x6e, 0x64, 0x69, 0x53, + 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x53, + 0x53, 0x4c, 0x43, 0x41, 0x2e, 0x63, 0x72, 0x74, + 0x30, 0x21, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, + 0x05, 0x07, 0x30, 0x01, 0x86, 0x15, 0x68, 0x74, + 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x6f, 0x63, 0x73, + 0x70, 0x2e, 0x67, 0x61, 0x6e, 0x64, 0x69, 0x2e, + 0x6e, 0x65, 0x74, 0x30, 0x27, 0x06, 0x03, 0x55, + 0x1d, 0x11, 0x04, 0x20, 0x30, 0x1e, 0x82, 0x0e, + 0x2a, 0x2e, 0x66, 0x72, 0x65, 0x65, 0x6e, 0x6f, + 0x64, 0x65, 0x2e, 0x6e, 0x65, 0x74, 0x82, 0x0c, + 0x66, 0x72, 0x65, 0x65, 0x6e, 0x6f, 0x64, 0x65, + 0x2e, 0x6e, 0x65, 0x74, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, + 0x5b, 0x4a, 0x3a, 0x1d, 0x75, 0xe0, 0xc0, 0x9e, + 0xc9, 0x16, 0x66, 0x7f, 0x73, 0x95, 0x6e, 0x35, + 0xe4, 0x27, 0xfa, 0x8c, 0x9d, 0xee, 0xb1, 0x37, + 0x42, 0x3f, 0x54, 0x6a, 0x9d, 0x41, 0x84, 0x57, + 0xe1, 0x03, 0x3d, 0x69, 0x61, 0x77, 0x3b, 0x91, + 0xa2, 0x70, 0x94, 0xb6, 0x8e, 0x41, 0x63, 0x70, + 0xf2, 0x16, 0x04, 0x50, 0x05, 0x14, 0xfb, 0x59, + 0x7d, 0x89, 0x09, 0x3f, 0xb6, 0xef, 0xca, 0x3c, + 0x89, 0x88, 0x08, 0xe9, 0xa1, 0xf3, 0x33, 0x31, + 0x05, 0x4d, 0x70, 0xff, 0xdd, 0xa7, 0xd2, 0xe2, + 0xa0, 0x94, 0x3a, 0xf7, 0xc2, 0x9f, 0xad, 0x2b, + 0x2e, 0x20, 0xfa, 0x6c, 0xe1, 0xfc, 0xe6, 0x62, + 0x22, 0xa1, 0x38, 0x93, 0xec, 0x3e, 0xce, 0xfd, + 0x1f, 0xdd, 0xd4, 0x7c, 0x39, 0x46, 0x8b, 0xb4, + 0x64, 0xfa, 0xa1, 0x46, 0x87, 0x78, 0x2c, 0xd7, + 0x9c, 0xdd, 0x60, 0xd6, 0xda, 0x8e, 0xd8, 0x29, + 0x6d, 0x61, 0xa7, 0x29, 0x07, 0x76, 0xfc, 0xf9, + 0xbd, 0xfd, 0x14, 0xeb, 0x44, 0x70, 0xff, 0xd0, + 0x23, 0x99, 0x83, 0xc5, 0x5c, 0x56, 0x88, 0xaa, + 0x34, 0xda, 0xa6, 0xb3, 0x9a, 0xbf, 0xda, 0x58, + 0x1e, 0xa4, 0xb8, 0xc0, 0x40, 0x9d, 0xf0, 0xfc, + 0xf1, 0x23, 0xc2, 0xbc, 0x59, 0xe1, 0x82, 0xed, + 0x5d, 0xfb, 0x99, 0xaf, 0xf5, 0xf5, 0x15, 0xb8, + 0x8b, 0x59, 0xce, 0xaa, 0xca, 0xdf, 0xdc, 0x94, + 0x11, 0xe0, 0x96, 0xbf, 0x9f, 0x54, 0xa4, 0x9f, + 0x54, 0x36, 0x4a, 0xe8, 0x93, 0xda, 0xf4, 0x8c, + 0xb0, 0x6b, 0x8d, 0x4a, 0x9e, 0x11, 0xae, 0xcb, + 0xcb, 0x33, 0x8a, 0x4d, 0xcd, 0x4e, 0xa5, 0x9b, + 0xe9, 0x14, 0x46, 0x43, 0x9b, 0x96, 0x5f, 0x6d, + 0xf2, 0xea, 0x40, 0xef, 0x14, 0xc3, 0x99, 0x9f, + 0x23, 0x1e, 0xa5, 0x13, 0xab, 0x08, 0xea, 0x8f, + 0x68, 0x5b, 0x7d, 0x71, 0xdf, 0x18, 0xd1, 0x57, + 0x00, 0x04, 0xa7, 0x30, 0x82, 0x04, 0xa3, 0x30, + 0x82, 0x03, 0x8b, 0xa0, 0x03, 0x02, 0x01, 0x02, + 0x02, 0x10, 0x5a, 0xb6, 0x1d, 0xac, 0x1e, 0x4d, + 0xa2, 0x06, 0x14, 0xc7, 0x55, 0x3d, 0x3d, 0xa9, + 0xb2, 0xdc, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, + 0x00, 0x30, 0x81, 0x97, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, + 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x02, 0x55, 0x54, 0x31, 0x17, + 0x30, 0x15, 0x06, 0x03, 0x55, 0x04, 0x07, 0x13, + 0x0e, 0x53, 0x61, 0x6c, 0x74, 0x20, 0x4c, 0x61, + 0x6b, 0x65, 0x20, 0x43, 0x69, 0x74, 0x79, 0x31, + 0x1e, 0x30, 0x1c, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x15, 0x54, 0x68, 0x65, 0x20, 0x55, 0x53, + 0x45, 0x52, 0x54, 0x52, 0x55, 0x53, 0x54, 0x20, + 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0b, + 0x13, 0x18, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x75, 0x73, 0x65, + 0x72, 0x74, 0x72, 0x75, 0x73, 0x74, 0x2e, 0x63, + 0x6f, 0x6d, 0x31, 0x1f, 0x30, 0x1d, 0x06, 0x03, + 0x55, 0x04, 0x03, 0x13, 0x16, 0x55, 0x54, 0x4e, + 0x2d, 0x55, 0x53, 0x45, 0x52, 0x46, 0x69, 0x72, + 0x73, 0x74, 0x2d, 0x48, 0x61, 0x72, 0x64, 0x77, + 0x61, 0x72, 0x65, 0x30, 0x1e, 0x17, 0x0d, 0x30, + 0x38, 0x31, 0x30, 0x32, 0x33, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x5a, 0x17, 0x0d, 0x32, 0x30, + 0x30, 0x35, 0x33, 0x30, 0x31, 0x30, 0x34, 0x38, + 0x33, 0x38, 0x5a, 0x30, 0x41, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x46, 0x52, 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x13, 0x09, 0x47, 0x41, 0x4e, + 0x44, 0x49, 0x20, 0x53, 0x41, 0x53, 0x31, 0x1e, + 0x30, 0x1c, 0x06, 0x03, 0x55, 0x04, 0x03, 0x13, + 0x15, 0x47, 0x61, 0x6e, 0x64, 0x69, 0x20, 0x53, + 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x20, + 0x53, 0x53, 0x4c, 0x20, 0x43, 0x41, 0x30, 0x82, + 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, + 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, + 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xb6, + 0x54, 0x3d, 0xa5, 0xdb, 0x0d, 0x22, 0x78, 0x50, + 0x6a, 0x5a, 0x23, 0x89, 0x3f, 0x97, 0xa1, 0xd4, + 0x07, 0x1a, 0xa9, 0x58, 0x08, 0x9b, 0xa0, 0x15, + 0xc3, 0x32, 0xb6, 0xb7, 0xf1, 0xe8, 0xb9, 0xa5, + 0x6f, 0xad, 0x37, 0xf6, 0x6e, 0x71, 0x1b, 0xb4, + 0x75, 0x2d, 0x48, 0x5e, 0x9f, 0xc6, 0x15, 0xaa, + 0x81, 0xef, 0xe5, 0xc4, 0x88, 0x95, 0x8a, 0x3a, + 0x6c, 0x77, 0xcc, 0xb5, 0xcd, 0x65, 0xe4, 0x67, + 0xe5, 0x73, 0xc9, 0x50, 0x52, 0x94, 0xc1, 0x27, + 0x49, 0x3e, 0xa0, 0x6b, 0x41, 0x16, 0x41, 0xb6, + 0x94, 0x99, 0x41, 0xae, 0x3e, 0xcb, 0xe2, 0x06, + 0x46, 0x09, 0xe9, 0x4d, 0xbe, 0xc9, 0x4c, 0x55, + 0xa9, 0x18, 0x7e, 0xa6, 0xdf, 0x6e, 0xfd, 0x4a, + 0xb2, 0xcc, 0x6c, 0x4e, 0xd9, 0xc8, 0x50, 0x15, + 0x93, 0xb3, 0xf2, 0xe9, 0xe3, 0xc2, 0x6a, 0xad, + 0x3a, 0xd5, 0xfb, 0xc3, 0x79, 0x50, 0x9f, 0x25, + 0x79, 0x29, 0xb2, 0x47, 0x64, 0x7c, 0x20, 0x3e, + 0xe2, 0x08, 0x4d, 0x93, 0x29, 0x14, 0xb6, 0x34, + 0x6e, 0xcf, 0x71, 0x46, 0x7e, 0x76, 0x10, 0xf4, + 0xfd, 0x6c, 0xaa, 0x01, 0xd2, 0xc2, 0x06, 0xde, + 0x92, 0x83, 0xcc, 0x58, 0x90, 0x2e, 0x92, 0xde, + 0x1e, 0x65, 0xb7, 0x63, 0x2f, 0x3d, 0xb2, 0xeb, + 0x70, 0x8c, 0x4c, 0xe0, 0xbe, 0x15, 0x9d, 0xde, + 0xc1, 0x4d, 0x56, 0xf8, 0x0b, 0xc6, 0x8e, 0x07, + 0xb9, 0x5d, 0xdf, 0x95, 0xf0, 0x7b, 0x40, 0x1f, + 0x1a, 0x2c, 0xd7, 0x9c, 0x2b, 0x4b, 0x76, 0xf4, + 0x59, 0xf5, 0x43, 0xc1, 0x2c, 0x66, 0x10, 0x9e, + 0x9e, 0x66, 0x96, 0x60, 0x9d, 0x1c, 0x74, 0x1b, + 0x4e, 0x18, 0x5c, 0x08, 0xb0, 0x6e, 0x6c, 0xca, + 0x69, 0x1a, 0x02, 0xe9, 0xbb, 0xca, 0x78, 0xef, + 0x66, 0x2e, 0xe3, 0x32, 0xfd, 0x41, 0x5c, 0x95, + 0x74, 0x81, 0x4d, 0xf4, 0xda, 0xfe, 0x4b, 0x02, + 0x03, 0x01, 0x00, 0x01, 0xa3, 0x82, 0x01, 0x3e, + 0x30, 0x82, 0x01, 0x3a, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16, 0x80, + 0x14, 0xa1, 0x72, 0x5f, 0x26, 0x1b, 0x28, 0x98, + 0x43, 0x95, 0x5d, 0x07, 0x37, 0xd5, 0x85, 0x96, + 0x9d, 0x4b, 0xd2, 0xc3, 0x45, 0x30, 0x1d, 0x06, + 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, + 0xb6, 0xa8, 0xff, 0xa2, 0xa8, 0x2f, 0xd0, 0xa6, + 0xcd, 0x4b, 0xb1, 0x68, 0xf3, 0xe7, 0x50, 0x10, + 0x31, 0xa7, 0x79, 0x21, 0x30, 0x0e, 0x06, 0x03, + 0x55, 0x1d, 0x0f, 0x01, 0x01, 0xff, 0x04, 0x04, + 0x03, 0x02, 0x01, 0x06, 0x30, 0x12, 0x06, 0x03, + 0x55, 0x1d, 0x13, 0x01, 0x01, 0xff, 0x04, 0x08, + 0x30, 0x06, 0x01, 0x01, 0xff, 0x02, 0x01, 0x00, + 0x30, 0x18, 0x06, 0x03, 0x55, 0x1d, 0x20, 0x04, + 0x11, 0x30, 0x0f, 0x30, 0x0d, 0x06, 0x0b, 0x2b, + 0x06, 0x01, 0x04, 0x01, 0xb2, 0x31, 0x01, 0x02, + 0x02, 0x1a, 0x30, 0x44, 0x06, 0x03, 0x55, 0x1d, + 0x1f, 0x04, 0x3d, 0x30, 0x3b, 0x30, 0x39, 0xa0, + 0x37, 0xa0, 0x35, 0x86, 0x33, 0x68, 0x74, 0x74, + 0x70, 0x3a, 0x2f, 0x2f, 0x63, 0x72, 0x6c, 0x2e, + 0x75, 0x73, 0x65, 0x72, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x54, + 0x4e, 0x2d, 0x55, 0x53, 0x45, 0x52, 0x46, 0x69, + 0x72, 0x73, 0x74, 0x2d, 0x48, 0x61, 0x72, 0x64, + 0x77, 0x61, 0x72, 0x65, 0x2e, 0x63, 0x72, 0x6c, + 0x30, 0x74, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, + 0x05, 0x07, 0x01, 0x01, 0x04, 0x68, 0x30, 0x66, + 0x30, 0x3d, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, + 0x05, 0x07, 0x30, 0x02, 0x86, 0x31, 0x68, 0x74, + 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x63, 0x72, 0x74, + 0x2e, 0x75, 0x73, 0x65, 0x72, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, + 0x54, 0x4e, 0x41, 0x64, 0x64, 0x54, 0x72, 0x75, + 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x43, 0x41, 0x2e, 0x63, 0x72, 0x74, 0x30, + 0x25, 0x06, 0x08, 0x2b, 0x06, 0x01, 0x05, 0x05, + 0x07, 0x30, 0x01, 0x86, 0x19, 0x68, 0x74, 0x74, + 0x70, 0x3a, 0x2f, 0x2f, 0x6f, 0x63, 0x73, 0x70, + 0x2e, 0x75, 0x73, 0x65, 0x72, 0x74, 0x72, 0x75, + 0x73, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, + 0x01, 0x00, 0x19, 0x53, 0xbf, 0x03, 0x3d, 0x9b, + 0xe2, 0x6b, 0x5a, 0xfd, 0xba, 0x49, 0x1f, 0x4f, + 0xec, 0xe1, 0xc6, 0x82, 0x39, 0x3c, 0xd2, 0x03, + 0x04, 0x0f, 0xab, 0x7b, 0x3e, 0x82, 0xa9, 0x85, + 0x10, 0x1f, 0xf4, 0xde, 0x32, 0xaf, 0x58, 0x3f, + 0xff, 0x70, 0xf3, 0x30, 0x1d, 0x97, 0x2d, 0x4c, + 0x9a, 0xe2, 0xec, 0x0c, 0x3e, 0x14, 0x2d, 0x2f, + 0x98, 0x48, 0x9d, 0xae, 0x16, 0x6a, 0xac, 0x2d, + 0x42, 0xaa, 0xb5, 0x64, 0xa4, 0x70, 0xbb, 0xeb, + 0x73, 0x94, 0x7b, 0x46, 0x4c, 0xe7, 0x7a, 0x14, + 0x76, 0x5b, 0x4c, 0x1d, 0x84, 0xa1, 0x20, 0x74, + 0x1f, 0x2e, 0x4b, 0x5c, 0x70, 0x88, 0xdc, 0xbd, + 0xf7, 0x19, 0x3d, 0xed, 0x59, 0x0d, 0xe2, 0x3f, + 0x26, 0xe2, 0x9c, 0xac, 0xa4, 0x3c, 0x95, 0x1c, + 0xf8, 0xbe, 0x8c, 0x03, 0xae, 0xf0, 0xe5, 0x9c, + 0x4d, 0xbc, 0xc7, 0x9b, 0x58, 0x00, 0xbf, 0xaf, + 0xad, 0xfa, 0x37, 0x6e, 0x71, 0x6d, 0x18, 0x34, + 0x0e, 0xc1, 0xea, 0x6a, 0xf8, 0x0d, 0xdf, 0x69, + 0x54, 0x56, 0x15, 0xf2, 0x28, 0xb3, 0xfe, 0xa4, + 0x63, 0xec, 0xc5, 0x04, 0x64, 0x60, 0xbb, 0xfe, + 0x2a, 0xf0, 0xf4, 0x87, 0xa1, 0xb0, 0xae, 0xbd, + 0xaa, 0xe4, 0x2f, 0xe3, 0x03, 0x0b, 0x2f, 0x66, + 0x5f, 0x85, 0xa4, 0x32, 0x7b, 0x46, 0xed, 0x25, + 0x0c, 0xe7, 0xf1, 0xb7, 0xe7, 0x19, 0xfd, 0x60, + 0xba, 0x5f, 0x87, 0x77, 0xde, 0x98, 0x07, 0x96, + 0xe4, 0x5e, 0xea, 0x63, 0x7d, 0xa8, 0xde, 0x55, + 0xda, 0x61, 0x5c, 0x3c, 0x90, 0x83, 0x43, 0x04, + 0x07, 0x3c, 0xdd, 0xf3, 0xf8, 0x9f, 0x06, 0x52, + 0x0a, 0xde, 0xc7, 0xb6, 0x7b, 0x8f, 0xe1, 0x11, + 0xf7, 0x04, 0x7a, 0x35, 0xff, 0x6a, 0xbc, 0x5b, + 0xc7, 0x50, 0x49, 0x08, 0x70, 0x6f, 0x94, 0x43, + 0xcd, 0x9e, 0xc7, 0x70, 0xf1, 0xdb, 0xd0, 0x6d, + 0xda, 0x8f, 0x16, 0x03, 0x01, 0x00, 0x0e, 0x0d, + 0x00, 0x00, 0x06, 0x03, 0x01, 0x02, 0x40, 0x00, + 0x00, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x02, 0xbe, 0x0b, 0x00, 0x02, + 0xba, 0x00, 0x02, 0xb7, 0x00, 0x02, 0xb4, 0x30, + 0x82, 0x02, 0xb0, 0x30, 0x82, 0x02, 0x19, 0xa0, + 0x03, 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x45, + 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, + 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, + 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, + 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, + 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, + 0x79, 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, 0x17, + 0x0d, 0x31, 0x30, 0x30, 0x34, 0x32, 0x34, 0x30, + 0x39, 0x30, 0x39, 0x33, 0x38, 0x5a, 0x17, 0x0d, + 0x31, 0x31, 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, + 0x30, 0x39, 0x33, 0x38, 0x5a, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x30, 0x81, 0x9f, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x81, + 0x8d, 0x00, 0x30, 0x81, 0x89, 0x02, 0x81, 0x81, + 0x00, 0xbb, 0x79, 0xd6, 0xf5, 0x17, 0xb5, 0xe5, + 0xbf, 0x46, 0x10, 0xd0, 0xdc, 0x69, 0xbe, 0xe6, + 0x2b, 0x07, 0x43, 0x5a, 0xd0, 0x03, 0x2d, 0x8a, + 0x7a, 0x43, 0x85, 0xb7, 0x14, 0x52, 0xe7, 0xa5, + 0x65, 0x4c, 0x2c, 0x78, 0xb8, 0x23, 0x8c, 0xb5, + 0xb4, 0x82, 0xe5, 0xde, 0x1f, 0x95, 0x3b, 0x7e, + 0x62, 0xa5, 0x2c, 0xa5, 0x33, 0xd6, 0xfe, 0x12, + 0x5c, 0x7a, 0x56, 0xfc, 0xf5, 0x06, 0xbf, 0xfa, + 0x58, 0x7b, 0x26, 0x3f, 0xb5, 0xcd, 0x04, 0xd3, + 0xd0, 0xc9, 0x21, 0x96, 0x4a, 0xc7, 0xf4, 0x54, + 0x9f, 0x5a, 0xbf, 0xef, 0x42, 0x71, 0x00, 0xfe, + 0x18, 0x99, 0x07, 0x7f, 0x7e, 0x88, 0x7d, 0x7d, + 0xf1, 0x04, 0x39, 0xc4, 0xa2, 0x2e, 0xdb, 0x51, + 0xc9, 0x7c, 0xe3, 0xc0, 0x4c, 0x3b, 0x32, 0x66, + 0x01, 0xcf, 0xaf, 0xb1, 0x1d, 0xb8, 0x71, 0x9a, + 0x1d, 0xdb, 0xdb, 0x89, 0x6b, 0xae, 0xda, 0x2d, + 0x79, 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x81, + 0xa7, 0x30, 0x81, 0xa4, 0x30, 0x1d, 0x06, 0x03, + 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0xb1, + 0xad, 0xe2, 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, + 0x69, 0xce, 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, + 0x18, 0x88, 0x39, 0x30, 0x75, 0x06, 0x03, 0x55, + 0x1d, 0x23, 0x04, 0x6e, 0x30, 0x6c, 0x80, 0x14, + 0xb1, 0xad, 0xe2, 0x85, 0x5a, 0xcf, 0xcb, 0x28, + 0xdb, 0x69, 0xce, 0x23, 0x69, 0xde, 0xd3, 0x26, + 0x8e, 0x18, 0x88, 0x39, 0xa1, 0x49, 0xa4, 0x47, + 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, + 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, + 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, + 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, + 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, + 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, + 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, 0x82, + 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, + 0xb8, 0xca, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, + 0x13, 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xff, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x03, + 0x81, 0x81, 0x00, 0x08, 0x6c, 0x45, 0x24, 0xc7, + 0x6b, 0xb1, 0x59, 0xab, 0x0c, 0x52, 0xcc, 0xf2, + 0xb0, 0x14, 0xd7, 0x87, 0x9d, 0x7a, 0x64, 0x75, + 0xb5, 0x5a, 0x95, 0x66, 0xe4, 0xc5, 0x2b, 0x8e, + 0xae, 0x12, 0x66, 0x1f, 0xeb, 0x4f, 0x38, 0xb3, + 0x6e, 0x60, 0xd3, 0x92, 0xfd, 0xf7, 0x41, 0x08, + 0xb5, 0x25, 0x13, 0xb1, 0x18, 0x7a, 0x24, 0xfb, + 0x30, 0x1d, 0xba, 0xed, 0x98, 0xb9, 0x17, 0xec, + 0xe7, 0xd7, 0x31, 0x59, 0xdb, 0x95, 0xd3, 0x1d, + 0x78, 0xea, 0x50, 0x56, 0x5c, 0xd5, 0x82, 0x5a, + 0x2d, 0x5a, 0x5f, 0x33, 0xc4, 0xb6, 0xd8, 0xc9, + 0x75, 0x90, 0x96, 0x8c, 0x0f, 0x52, 0x98, 0xb5, + 0xcd, 0x98, 0x1f, 0x89, 0x20, 0x5f, 0xf2, 0xa0, + 0x1c, 0xa3, 0x1b, 0x96, 0x94, 0xdd, 0xa9, 0xfd, + 0x57, 0xe9, 0x70, 0xe8, 0x26, 0x6d, 0x71, 0x99, + 0x9b, 0x26, 0x6e, 0x38, 0x50, 0x29, 0x6c, 0x90, + 0xa7, 0xbd, 0xd9, 0x16, 0x03, 0x01, 0x01, 0x06, + 0x10, 0x00, 0x01, 0x02, 0x01, 0x00, 0x25, 0x48, + 0x6c, 0x0a, 0xde, 0x9d, 0x3a, 0x57, 0xe4, 0x2e, + 0xb9, 0xfc, 0xb4, 0x46, 0x1f, 0x20, 0x4f, 0x58, + 0x4d, 0x12, 0x08, 0xb4, 0x3e, 0x4c, 0xf5, 0xa8, + 0xa5, 0x16, 0x40, 0x29, 0x19, 0x04, 0x4d, 0xf9, + 0x54, 0x3a, 0x32, 0xd7, 0x79, 0xf2, 0x0e, 0xc1, + 0x7b, 0x0c, 0x62, 0x71, 0xbb, 0xb4, 0x8c, 0xe7, + 0x84, 0xd5, 0xf8, 0x11, 0x77, 0x7f, 0x87, 0x6c, + 0xfc, 0x25, 0xf3, 0x2d, 0x97, 0x3d, 0x1f, 0xf5, + 0xfc, 0x64, 0x94, 0x9f, 0xdd, 0x90, 0x82, 0xdd, + 0x11, 0x74, 0x74, 0x59, 0xa2, 0x1a, 0x71, 0xb2, + 0x55, 0x6d, 0x18, 0xca, 0x85, 0x47, 0x8b, 0x79, + 0x73, 0x06, 0x24, 0x38, 0xc3, 0x34, 0x98, 0x84, + 0x62, 0x81, 0xd8, 0xad, 0x54, 0xad, 0x13, 0xa5, + 0xf4, 0xe4, 0x82, 0x85, 0xd3, 0xe3, 0x9e, 0xeb, + 0xb5, 0xf5, 0x95, 0x83, 0x0e, 0xb9, 0x7d, 0xb6, + 0xda, 0x0c, 0xf6, 0x14, 0x6a, 0x60, 0x8c, 0x75, + 0x56, 0xf0, 0xe9, 0x60, 0xe0, 0x4c, 0xf4, 0x4e, + 0x84, 0x8b, 0x4f, 0xf4, 0x2f, 0xde, 0xb7, 0xec, + 0x61, 0xd3, 0x77, 0x07, 0x6e, 0x41, 0x57, 0xc9, + 0xd9, 0x1d, 0x75, 0xee, 0x42, 0x63, 0xdc, 0x58, + 0xad, 0xfc, 0xc7, 0xe1, 0x77, 0x49, 0xb1, 0x58, + 0x21, 0x96, 0x00, 0x55, 0x90, 0x6b, 0xf6, 0x2a, + 0x5a, 0x19, 0x25, 0x93, 0x59, 0x9d, 0xaf, 0x79, + 0x9b, 0x18, 0x5d, 0xf6, 0x5d, 0x64, 0x4b, 0x9a, + 0xf4, 0xde, 0xf2, 0x7f, 0xbd, 0x93, 0x7e, 0x45, + 0x3e, 0x17, 0xae, 0xbf, 0x52, 0xe1, 0xba, 0x8e, + 0x0b, 0xbc, 0x1e, 0x91, 0x9d, 0xf1, 0x4e, 0x0b, + 0xab, 0x9e, 0x5c, 0x4c, 0x6f, 0xf7, 0xf3, 0x8d, + 0x8c, 0x6d, 0xeb, 0x46, 0x05, 0x36, 0x7e, 0x2f, + 0x9c, 0xa1, 0x86, 0x15, 0xe1, 0xe4, 0xb4, 0x20, + 0x06, 0x44, 0x7b, 0x3c, 0x8b, 0x13, 0x96, 0xf5, + 0x02, 0xb1, 0x4f, 0x3c, 0x2d, 0x4a, 0x16, 0x03, + 0x01, 0x00, 0x86, 0x0f, 0x00, 0x00, 0x82, 0x00, + 0x80, 0x52, 0xb1, 0x0d, 0xfc, 0x85, 0x34, 0x56, + 0xb9, 0xdf, 0xa7, 0x8e, 0xf4, 0xfd, 0x02, 0x46, + 0x8a, 0x23, 0xcc, 0x53, 0x3b, 0x0f, 0xa7, 0x61, + 0xf3, 0xb5, 0xbf, 0xfe, 0x59, 0x77, 0x10, 0xd6, + 0x56, 0x93, 0x19, 0x6b, 0x2c, 0xf1, 0x35, 0x71, + 0xe3, 0x36, 0x2f, 0xa0, 0x90, 0x4e, 0x5a, 0xdf, + 0x8d, 0x06, 0x88, 0xcf, 0xb1, 0x06, 0x56, 0x8b, + 0x74, 0x8f, 0x02, 0x8e, 0x10, 0xd2, 0xab, 0x8d, + 0x3f, 0x3e, 0x02, 0xf1, 0x1a, 0x80, 0x6d, 0x0f, + 0x9e, 0x77, 0xd8, 0xfa, 0x92, 0xb3, 0x16, 0x40, + 0xeb, 0x9e, 0xca, 0xd7, 0xe4, 0x31, 0xcc, 0x63, + 0x5f, 0xe2, 0x4c, 0x85, 0x0e, 0xf2, 0xdd, 0xd3, + 0xfe, 0x7e, 0xa7, 0x60, 0x1c, 0xb4, 0x00, 0xd8, + 0xbe, 0x4b, 0x9b, 0x66, 0x78, 0x0f, 0xfb, 0x3b, + 0x52, 0x30, 0x2b, 0x8b, 0xd9, 0xef, 0x82, 0x0a, + 0xa4, 0x18, 0x1d, 0xb0, 0xb5, 0xbf, 0x54, 0x97, + 0x0c, 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, + 0x03, 0x01, 0x00, 0x30, 0xa1, 0x74, 0x22, 0xd8, + 0x86, 0x6a, 0xbe, 0x53, 0x34, 0x1d, 0xb3, 0x73, + 0xff, 0x51, 0xc0, 0xce, 0x8e, 0x7d, 0x9b, 0xab, + 0xcb, 0x8b, 0x79, 0xae, 0x04, 0x01, 0xa7, 0xf2, + 0x8e, 0x9d, 0xab, 0xa3, 0x73, 0x80, 0x5c, 0xff, + 0x96, 0x20, 0xbb, 0x8d, 0xc0, 0x02, 0x66, 0x6c, + 0x83, 0x4b, 0x78, 0x20, + }, + { + 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x01, 0x00, 0x30, 0x29, 0xd4, 0xfd, 0x03, 0x8b, + 0x30, 0x20, 0xf7, 0xca, 0xc0, 0x6c, 0x83, 0x5d, + 0x73, 0xcb, 0x81, 0x60, 0xe0, 0x9a, 0x09, 0xcb, + 0x33, 0x03, 0x80, 0x81, 0x4e, 0x84, 0x47, 0xd5, + 0x74, 0x6c, 0x3b, 0xb5, 0xc0, 0x48, 0x0d, 0x52, + 0xdd, 0xbe, 0xc2, 0x06, 0xf5, 0x79, 0x2b, 0x3e, + 0x99, 0x56, 0x94, 0x17, 0x03, 0x01, 0x00, 0x20, + 0x26, 0x46, 0x90, 0x9d, 0xef, 0x59, 0x00, 0xb6, + 0x70, 0xe8, 0x1e, 0x1a, 0x80, 0x8b, 0x04, 0xb2, + 0xfc, 0x51, 0xf8, 0x93, 0xbe, 0x00, 0x28, 0xba, + 0xb8, 0xdc, 0x51, 0x7e, 0x92, 0x80, 0xfa, 0xf2, + 0x17, 0x03, 0x01, 0x00, 0xe0, 0xb8, 0x2e, 0xc4, + 0x6b, 0x3f, 0xda, 0x39, 0x87, 0x7f, 0x03, 0x43, + 0x28, 0xdd, 0xb9, 0xf9, 0x9e, 0x16, 0xf5, 0xce, + 0x3f, 0x7e, 0x6a, 0x7b, 0xb3, 0x60, 0x14, 0xe1, + 0xea, 0x54, 0xc5, 0xe6, 0x05, 0x0a, 0x6c, 0xe0, + 0xef, 0x58, 0x29, 0x8a, 0x77, 0x64, 0x77, 0x5d, + 0x9c, 0xe2, 0xe0, 0x3c, 0x6d, 0x87, 0x82, 0xbe, + 0x47, 0x63, 0xd4, 0xfd, 0x0c, 0x25, 0xc4, 0xb1, + 0xfe, 0x29, 0x6f, 0x84, 0xfb, 0xab, 0x6e, 0xa7, + 0xf9, 0x22, 0x89, 0x97, 0x5b, 0x91, 0x0a, 0x07, + 0xe0, 0xef, 0x3d, 0x67, 0xee, 0x87, 0xa8, 0x33, + 0x02, 0x64, 0x33, 0xca, 0x15, 0x10, 0xb9, 0x57, + 0xd8, 0xe5, 0x1a, 0x4b, 0xe3, 0x45, 0xc1, 0x62, + 0x85, 0x50, 0xf1, 0x79, 0x54, 0xe1, 0x2e, 0x25, + 0x01, 0x3c, 0xdb, 0x2d, 0x39, 0x14, 0x2f, 0x9b, + 0xd0, 0x1d, 0xc1, 0xac, 0x73, 0x7d, 0xa4, 0xed, + 0x89, 0x98, 0xb1, 0xae, 0x8a, 0x9e, 0xc8, 0xa7, + 0xfe, 0x55, 0x27, 0xb5, 0xb5, 0xa2, 0xec, 0x7e, + 0xe3, 0x6b, 0x45, 0x19, 0xfa, 0x20, 0x1c, 0x33, + 0x83, 0x22, 0x33, 0x97, 0xd2, 0x5a, 0xc4, 0xf8, + 0x9a, 0x03, 0x13, 0x85, 0xf2, 0x2b, 0x04, 0x59, + 0x27, 0xd7, 0x0b, 0x42, 0x47, 0x9b, 0x7d, 0x4d, + 0xb2, 0x1a, 0x85, 0x7f, 0x97, 0xc2, 0xf2, 0x10, + 0xf0, 0xfa, 0x4e, 0x4b, 0x62, 0x43, 0x3a, 0x09, + 0x2e, 0xcd, 0x8f, 0xa8, 0xb6, 0x0b, 0x5f, 0x34, + 0xd7, 0x3b, 0xba, 0xd9, 0xe5, 0x01, 0x2d, 0x35, + 0xae, 0xc5, 0x4c, 0xab, 0x40, 0x64, 0xc2, 0xc9, + 0x8c, 0x69, 0x44, 0xf4, 0xb8, 0xb5, 0x3a, 0x05, + 0x3c, 0x29, 0x19, 0xb4, 0x09, 0x17, 0x03, 0x01, + 0x00, 0x20, 0xc8, 0xc5, 0xb7, 0xe3, 0xd2, 0x3e, + 0x27, 0xb5, 0x71, 0x8f, 0x52, 0x0b, 0xce, 0x17, + 0x64, 0x86, 0xa4, 0x34, 0x16, 0x1b, 0x61, 0x64, + 0x7c, 0xb3, 0xf2, 0xe5, 0x3e, 0xfd, 0xdd, 0xfb, + 0x40, 0x78, 0x17, 0x03, 0x01, 0x00, 0x50, 0x8e, + 0x79, 0xf0, 0x8e, 0x76, 0x5d, 0x34, 0x09, 0xdc, + 0xec, 0x6d, 0xc3, 0x43, 0x1d, 0xcb, 0x2d, 0xaa, + 0x08, 0x7a, 0x51, 0x94, 0x4e, 0xc5, 0x26, 0xe4, + 0x0b, 0x8e, 0x8f, 0x51, 0xf2, 0x9f, 0xeb, 0xc3, + 0x18, 0x43, 0x95, 0x15, 0xfc, 0x59, 0x18, 0x25, + 0x47, 0xb6, 0x4a, 0x6e, 0xa3, 0xa4, 0x3b, 0xa3, + 0x47, 0x34, 0x74, 0x6b, 0xc5, 0x3d, 0x41, 0x14, + 0x64, 0xd5, 0x69, 0x5f, 0x77, 0xf3, 0x7c, 0x41, + 0xc6, 0xed, 0x2e, 0xcf, 0xff, 0x40, 0xf2, 0xce, + 0xbb, 0xa7, 0x4e, 0x73, 0x88, 0x98, 0x10, + }, + { + 0x15, 0x03, 0x01, 0x00, 0x20, 0x1a, 0xbc, 0x70, + 0x24, 0xf8, 0xfb, 0xf2, 0x4a, 0xf9, 0x44, 0x1e, + 0x58, 0xf8, 0xaa, 0x41, 0x24, 0xe8, 0x80, 0x33, + 0x45, 0x18, 0xa1, 0x5d, 0xee, 0x16, 0x80, 0xae, + 0x40, 0x41, 0x8e, 0x41, 0x9b, + }, +} + +var tls11ECDHEAESClientScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x01, 0x00, 0x00, + 0x46, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xc0, 0x13, + 0x01, 0x00, 0x00, 0x1b, 0x00, 0x05, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x08, 0x00, 0x06, 0x00, 0x17, 0x00, 0x18, 0x00, + 0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, + }, + { + 0x16, 0x03, 0x02, 0x00, 0x54, 0x02, 0x00, 0x00, + 0x50, 0x03, 0x02, 0x51, 0x9f, 0xa2, 0x21, 0x1a, + 0xb7, 0x75, 0x42, 0x69, 0xd3, 0x14, 0xdd, 0x05, + 0x1e, 0xda, 0x13, 0x71, 0x8d, 0x6a, 0x45, 0x97, + 0xcb, 0xee, 0x0e, 0x77, 0x01, 0x0d, 0x6e, 0xe5, + 0x22, 0x70, 0x16, 0x20, 0x69, 0xfc, 0xa6, 0x9a, + 0xe8, 0x21, 0xcc, 0x46, 0x65, 0x05, 0xb4, 0x48, + 0x0f, 0x34, 0x63, 0x2c, 0xac, 0xa4, 0xf5, 0x4b, + 0x64, 0xd1, 0x07, 0x13, 0xa7, 0xe4, 0x5b, 0xa3, + 0x4d, 0x31, 0x41, 0x53, 0xc0, 0x13, 0x00, 0x00, + 0x08, 0x00, 0x0b, 0x00, 0x04, 0x03, 0x00, 0x01, + 0x02, 0x16, 0x03, 0x02, 0x02, 0x39, 0x0b, 0x00, + 0x02, 0x35, 0x00, 0x02, 0x32, 0x00, 0x02, 0x2f, + 0x30, 0x82, 0x02, 0x2b, 0x30, 0x82, 0x01, 0xd5, + 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, + 0xb1, 0x35, 0x13, 0x65, 0x11, 0x20, 0xc5, 0x92, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, + 0x45, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, + 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, + 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, + 0x69, 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, + 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, + 0x17, 0x0d, 0x31, 0x32, 0x30, 0x34, 0x30, 0x36, + 0x31, 0x37, 0x31, 0x30, 0x31, 0x33, 0x5a, 0x17, + 0x0d, 0x31, 0x35, 0x30, 0x34, 0x30, 0x36, 0x31, + 0x37, 0x31, 0x30, 0x31, 0x33, 0x5a, 0x30, 0x45, + 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, + 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, + 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, + 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, + 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, + 0x79, 0x20, 0x4c, 0x74, 0x64, 0x30, 0x5c, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x4b, + 0x00, 0x30, 0x48, 0x02, 0x41, 0x00, 0x9f, 0xb3, + 0xc3, 0x84, 0x27, 0x95, 0xff, 0x12, 0x31, 0x52, + 0x0f, 0x15, 0xef, 0x46, 0x11, 0xc4, 0xad, 0x80, + 0xe6, 0x36, 0x5b, 0x0f, 0xdd, 0x80, 0xd7, 0x61, + 0x8d, 0xe0, 0xfc, 0x72, 0x45, 0x09, 0x34, 0xfe, + 0x55, 0x66, 0x45, 0x43, 0x4c, 0x68, 0x97, 0x6a, + 0xfe, 0xa8, 0xa0, 0xa5, 0xdf, 0x5f, 0x78, 0xff, + 0xee, 0xd7, 0x64, 0xb8, 0x3f, 0x04, 0xcb, 0x6f, + 0xff, 0x2a, 0xfe, 0xfe, 0xb9, 0xed, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0x78, 0xa6, 0x97, 0x9a, + 0x63, 0xb5, 0xc5, 0xa1, 0xa5, 0x33, 0xba, 0x22, + 0x7c, 0x23, 0x6e, 0x5b, 0x1b, 0x7a, 0xcc, 0x2b, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0x78, 0xa6, 0x97, + 0x9a, 0x63, 0xb5, 0xc5, 0xa1, 0xa5, 0x33, 0xba, + 0x22, 0x7c, 0x23, 0x6e, 0x5b, 0x1b, 0x7a, 0xcc, + 0x2b, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0xb1, + 0x35, 0x13, 0x65, 0x11, 0x20, 0xc5, 0x92, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x41, 0x00, 0x85, + 0x36, 0x40, 0x73, 0xc1, 0xbb, 0x1a, 0xda, 0xd4, + 0x59, 0x9f, 0x2d, 0xa2, 0x70, 0x31, 0x46, 0x74, + 0xec, 0x83, 0x6e, 0xa8, 0xc8, 0x3c, 0x51, 0xaf, + 0x39, 0xac, 0xec, 0x40, 0xbc, 0xe8, 0x22, 0x46, + 0x1d, 0x99, 0xd6, 0x46, 0x2a, 0x24, 0xd4, 0x8b, + 0x05, 0x08, 0x4b, 0xfb, 0x35, 0x11, 0x6e, 0x92, + 0xbb, 0x77, 0xba, 0xe4, 0x12, 0xbb, 0xf4, 0xc8, + 0x5e, 0x9c, 0x81, 0xa8, 0x97, 0x60, 0x4c, 0x16, + 0x03, 0x02, 0x00, 0x8b, 0x0c, 0x00, 0x00, 0x87, + 0x03, 0x00, 0x17, 0x41, 0x04, 0x34, 0xde, 0x50, + 0x32, 0x8f, 0x25, 0x6b, 0x37, 0x2c, 0x36, 0x24, + 0x27, 0x0e, 0xf9, 0x67, 0xb4, 0xf8, 0x29, 0x1c, + 0xa5, 0xa4, 0x59, 0x9a, 0xca, 0x40, 0x26, 0x15, + 0x61, 0x72, 0x34, 0x4a, 0xd3, 0x0c, 0xac, 0x69, + 0xcb, 0x2a, 0x9e, 0xf8, 0x80, 0xfb, 0x7a, 0xc4, + 0xd4, 0x4b, 0x91, 0x1b, 0xbe, 0x24, 0x26, 0xad, + 0x19, 0x24, 0xbe, 0x32, 0x58, 0xfb, 0xc7, 0x77, + 0xce, 0x7e, 0x71, 0x51, 0x1a, 0x00, 0x40, 0x1a, + 0x0b, 0xe8, 0x91, 0x84, 0x64, 0x54, 0xb6, 0x19, + 0xe8, 0xd4, 0x43, 0x7c, 0x09, 0x0c, 0x2e, 0xba, + 0x42, 0xb9, 0x74, 0xc3, 0x6c, 0x06, 0x9b, 0xa6, + 0x7e, 0x92, 0xe9, 0xee, 0x7c, 0x74, 0xa9, 0xd3, + 0x63, 0xf0, 0x16, 0x20, 0x60, 0x71, 0x8e, 0x24, + 0xc7, 0x7f, 0xc5, 0x5b, 0x9c, 0x19, 0x0c, 0x80, + 0x15, 0x61, 0xbf, 0xb6, 0xed, 0x5b, 0x7b, 0x90, + 0xc5, 0x05, 0x13, 0x72, 0x45, 0x79, 0xdf, 0x16, + 0x03, 0x02, 0x00, 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x02, 0x00, 0x46, 0x10, 0x00, 0x00, + 0x42, 0x41, 0x04, 0x1e, 0x18, 0x37, 0xef, 0x0d, + 0x19, 0x51, 0x88, 0x35, 0x75, 0x71, 0xb5, 0xe5, + 0x54, 0x5b, 0x12, 0x2e, 0x8f, 0x09, 0x67, 0xfd, + 0xa7, 0x24, 0x20, 0x3e, 0xb2, 0x56, 0x1c, 0xce, + 0x97, 0x28, 0x5e, 0xf8, 0x2b, 0x2d, 0x4f, 0x9e, + 0xf1, 0x07, 0x9f, 0x6c, 0x4b, 0x5b, 0x83, 0x56, + 0xe2, 0x32, 0x42, 0xe9, 0x58, 0xb6, 0xd7, 0x49, + 0xa6, 0xb5, 0x68, 0x1a, 0x41, 0x03, 0x56, 0x6b, + 0xdc, 0x5a, 0x89, 0x14, 0x03, 0x02, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x02, 0x00, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x50, + 0x32, 0x26, 0x51, 0xbd, 0xbd, 0x3c, 0x4f, 0x72, + 0xbf, 0xbc, 0x91, 0x70, 0x4b, 0x5d, 0x43, 0x4a, + 0x65, 0x26, 0x0d, 0xaa, 0xed, 0x00, 0x91, 0xaf, + 0x4f, 0x47, 0x09, 0xaa, 0x79, 0xc4, 0x47, 0x21, + 0x71, 0xd8, 0x2b, 0xc1, 0x51, 0xc8, 0xef, 0xed, + 0x67, 0xde, 0x97, 0xef, 0x18, 0x53, + }, + { + 0x14, 0x03, 0x02, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x02, 0x00, 0x40, 0x72, 0x20, 0xbf, 0xd1, 0xbd, + 0x83, 0x53, 0x57, 0xb0, 0x4e, 0xac, 0xba, 0x1a, + 0x2b, 0x2d, 0xeb, 0x8a, 0x48, 0x17, 0xfa, 0x69, + 0xf9, 0xb5, 0x94, 0x8e, 0x6f, 0x9c, 0xda, 0x59, + 0xba, 0x6c, 0x7c, 0x82, 0xe2, 0x53, 0xa9, 0x46, + 0xdc, 0x33, 0xa0, 0x9b, 0xf0, 0x1e, 0xf1, 0x53, + 0x83, 0x48, 0xbf, 0x5e, 0xef, 0x03, 0x2b, 0x50, + 0x7a, 0xa6, 0xf8, 0xc3, 0x9e, 0x24, 0x43, 0x3a, + 0xdf, 0x44, 0x3e, + }, + { + 0x17, 0x03, 0x02, 0x00, 0x30, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x0b, 0x8f, + 0x6b, 0xf9, 0xd3, 0x9f, 0x2b, 0x49, 0xe0, 0x62, + 0x9a, 0x0b, 0x3e, 0xa2, 0x72, 0x8b, 0x96, 0x0c, + 0x41, 0x09, 0x95, 0x9e, 0x6b, 0x26, 0xa1, 0x46, + 0xca, 0xb8, 0xb6, 0xd2, 0xd4, 0x15, 0x03, 0x02, + 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0xa0, 0xd4, 0x84, 0xc6, 0x7e, 0x1c, + 0x2f, 0xbd, 0x6b, 0x45, 0x31, 0x1d, 0x7d, 0x8f, + 0x31, 0x39, 0x5a, 0x4e, 0xaa, 0xf1, 0x0a, 0x8a, + 0x6c, 0x33, 0x59, 0x19, 0xd8, 0x75, 0x80, 0xab, + 0x93, 0x81, + }, +} + +var clientChainCertificateScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x01, 0x00, 0x00, + 0x46, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x1b, 0x00, 0x05, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x08, 0x00, 0x06, 0x00, 0x17, 0x00, 0x18, 0x00, + 0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x02, 0x00, 0x00, + 0x46, 0x03, 0x01, 0x51, 0xa2, 0x9b, 0x8b, 0xd4, + 0xe6, 0x33, 0xa2, 0x70, 0x38, 0x37, 0xba, 0x55, + 0x86, 0xcf, 0x87, 0xea, 0x6d, 0x2c, 0x3e, 0x17, + 0xc2, 0x09, 0xf8, 0x4d, 0xb0, 0x5d, 0x93, 0x2b, + 0x15, 0x99, 0x0c, 0x20, 0x5d, 0x61, 0x21, 0x2c, + 0xed, 0x49, 0x32, 0x29, 0x08, 0x6e, 0x21, 0x58, + 0x00, 0xdb, 0x34, 0xb7, 0x37, 0xcd, 0x27, 0x75, + 0x31, 0x1e, 0x6c, 0x74, 0xa6, 0xef, 0xa2, 0xc4, + 0x2b, 0x6c, 0xc3, 0x03, 0x00, 0x05, 0x00, 0x16, + 0x03, 0x01, 0x03, 0xef, 0x0b, 0x00, 0x03, 0xeb, + 0x00, 0x03, 0xe8, 0x00, 0x03, 0xe5, 0x30, 0x82, + 0x03, 0xe1, 0x30, 0x82, 0x02, 0xc9, 0xa0, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, 0xcc, 0x22, + 0x4c, 0x4b, 0x98, 0xa2, 0x88, 0xfc, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x81, 0x86, + 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, + 0x06, 0x13, 0x02, 0x55, 0x53, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x08, 0x0c, 0x02, + 0x4e, 0x59, 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, + 0x55, 0x04, 0x07, 0x0c, 0x08, 0x42, 0x72, 0x6f, + 0x6f, 0x6b, 0x6c, 0x79, 0x6e, 0x31, 0x21, 0x30, + 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x18, + 0x4d, 0x79, 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x20, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, + 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x0c, 0x08, 0x6d, 0x79, 0x63, 0x61, 0x2e, + 0x6f, 0x72, 0x67, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x09, 0x01, 0x16, 0x12, 0x6a, 0x76, 0x73, 0x68, + 0x61, 0x68, 0x69, 0x64, 0x40, 0x67, 0x6d, 0x61, + 0x69, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x1e, + 0x17, 0x0d, 0x31, 0x33, 0x30, 0x35, 0x32, 0x36, + 0x32, 0x31, 0x30, 0x35, 0x30, 0x31, 0x5a, 0x17, + 0x0d, 0x32, 0x33, 0x30, 0x35, 0x32, 0x34, 0x32, + 0x31, 0x30, 0x35, 0x30, 0x31, 0x5a, 0x30, 0x81, + 0x86, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x06, 0x13, 0x02, 0x55, 0x53, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x08, 0x0c, + 0x02, 0x4e, 0x59, 0x31, 0x11, 0x30, 0x0f, 0x06, + 0x03, 0x55, 0x04, 0x07, 0x0c, 0x08, 0x42, 0x72, + 0x6f, 0x6f, 0x6b, 0x6c, 0x79, 0x6e, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x0c, + 0x18, 0x4d, 0x79, 0x20, 0x43, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x20, + 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, + 0x79, 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, + 0x04, 0x03, 0x0c, 0x08, 0x6d, 0x79, 0x63, 0x61, + 0x2e, 0x6f, 0x72, 0x67, 0x31, 0x21, 0x30, 0x1f, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x09, 0x01, 0x16, 0x12, 0x6a, 0x76, 0x73, + 0x68, 0x61, 0x68, 0x69, 0x64, 0x40, 0x67, 0x6d, + 0x61, 0x69, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x30, + 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, + 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, + 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, + 0xf0, 0xfb, 0xad, 0x80, 0x5e, 0x37, 0xd3, 0x6d, + 0xee, 0x2e, 0xcc, 0xbc, 0x0c, 0xd7, 0x56, 0x4b, + 0x56, 0x45, 0xcd, 0x28, 0xb6, 0x22, 0xe9, 0xe2, + 0x0f, 0xd1, 0x87, 0x2a, 0x27, 0xce, 0x77, 0x8d, + 0x6e, 0x0e, 0x0f, 0xfb, 0x66, 0xe1, 0xb5, 0x0e, + 0x9a, 0xb6, 0x05, 0x8e, 0xb3, 0xe1, 0xc5, 0x77, + 0x86, 0x5b, 0x46, 0xd2, 0x0b, 0x92, 0x03, 0x1b, + 0x89, 0x0c, 0x1b, 0x10, 0x0e, 0x99, 0x8f, 0xe2, + 0x17, 0xe8, 0xc2, 0x30, 0x00, 0x47, 0xd6, 0xfc, + 0xf9, 0x0f, 0x3b, 0x75, 0x34, 0x8d, 0x4d, 0xb0, + 0x99, 0xb7, 0xa0, 0x6d, 0xa0, 0xb6, 0xad, 0xda, + 0x07, 0x5e, 0x38, 0x2e, 0x02, 0xe4, 0x30, 0x6d, + 0xae, 0x13, 0x72, 0xd4, 0xc8, 0xce, 0x14, 0x07, + 0xae, 0x23, 0x8c, 0x8f, 0x9e, 0x8c, 0x60, 0xd6, + 0x06, 0xb9, 0xef, 0x00, 0x18, 0xc0, 0x1d, 0x25, + 0x1e, 0xda, 0x3e, 0x2f, 0xcf, 0x2b, 0x56, 0x84, + 0x9e, 0x30, 0x21, 0xc7, 0x29, 0xf6, 0x03, 0x8a, + 0x24, 0xf9, 0x34, 0xac, 0x65, 0x9d, 0x80, 0x36, + 0xc8, 0x3b, 0x15, 0x10, 0xbd, 0x51, 0xe9, 0xbc, + 0x02, 0xe1, 0xe9, 0xb3, 0x5a, 0x9a, 0x99, 0x41, + 0x1b, 0x27, 0xa0, 0x4d, 0x50, 0x9e, 0x27, 0x7f, + 0xa1, 0x7d, 0x09, 0x87, 0xbd, 0x8a, 0xca, 0x5f, + 0xb1, 0xa5, 0x08, 0xb8, 0x04, 0xd4, 0x52, 0x89, + 0xaa, 0xe0, 0x7d, 0x42, 0x2e, 0x2f, 0x15, 0xee, + 0x66, 0x57, 0x0f, 0x13, 0x19, 0x45, 0xa8, 0x4b, + 0x5d, 0x81, 0x66, 0xcc, 0x12, 0x37, 0x94, 0x5e, + 0xfd, 0x3c, 0x10, 0x81, 0x51, 0x3f, 0xfa, 0x0f, + 0xdd, 0xa1, 0x89, 0x03, 0xa9, 0x78, 0x91, 0xf5, + 0x3b, 0xf3, 0xbc, 0xac, 0xbe, 0x93, 0x30, 0x2e, + 0xbe, 0xca, 0x7f, 0x46, 0xd3, 0x28, 0xb4, 0x4e, + 0x91, 0x7b, 0x5b, 0x43, 0x6c, 0xaf, 0x9b, 0x5c, + 0x6a, 0x6d, 0x5a, 0xdb, 0x79, 0x5e, 0x6a, 0x6b, + 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x50, 0x30, + 0x4e, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0x6b, 0x1e, 0x00, 0xa8, + 0x9f, 0xfa, 0x7d, 0x00, 0xf9, 0xe0, 0x9d, 0x0f, + 0x90, 0x8c, 0x90, 0xa8, 0xa1, 0x37, 0x6b, 0xda, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x18, 0x30, 0x16, 0x80, 0x14, 0x6b, 0x1e, 0x00, + 0xa8, 0x9f, 0xfa, 0x7d, 0x00, 0xf9, 0xe0, 0x9d, + 0x0f, 0x90, 0x8c, 0x90, 0xa8, 0xa1, 0x37, 0x6b, + 0xda, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, + 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x03, 0x82, + 0x01, 0x01, 0x00, 0xcd, 0x6f, 0x73, 0x4d, 0x56, + 0x0b, 0xf3, 0x2e, 0x1c, 0xe2, 0x02, 0x0c, 0x14, + 0xbb, 0x2f, 0xdd, 0x3c, 0x43, 0xfe, 0xdf, 0x94, + 0x2d, 0xa9, 0x89, 0x81, 0x51, 0xf8, 0x5f, 0xa7, + 0xa0, 0x13, 0xaa, 0xcc, 0xb0, 0x18, 0xe2, 0x57, + 0x3e, 0x0d, 0x29, 0x93, 0xe8, 0x95, 0xd5, 0x1b, + 0x53, 0xd2, 0x51, 0xf2, 0xbd, 0xf5, 0x9e, 0x7b, + 0x22, 0x65, 0x62, 0x5c, 0xc4, 0x4c, 0x1d, 0xe8, + 0xe9, 0xc3, 0xd4, 0x2b, 0xe7, 0x78, 0xcb, 0x10, + 0xf3, 0xfe, 0x06, 0x83, 0xdc, 0x3a, 0x1e, 0x62, + 0x10, 0xc0, 0x46, 0x77, 0xc6, 0x9d, 0x9f, 0xab, + 0x96, 0x25, 0x5c, 0xfb, 0x26, 0xc1, 0x15, 0x1f, + 0xa5, 0x33, 0xee, 0x4f, 0x9a, 0x14, 0x6a, 0x14, + 0x97, 0x93, 0x2b, 0x95, 0x0b, 0xdc, 0xa8, 0xd7, + 0x69, 0x2e, 0xf0, 0x01, 0x0e, 0xfd, 0x4e, 0xd0, + 0xd9, 0xa8, 0xe5, 0x65, 0xde, 0xfb, 0xca, 0xca, + 0x1c, 0x5f, 0xf9, 0x53, 0xa0, 0x87, 0xe7, 0x33, + 0x9b, 0x2f, 0xcf, 0xe4, 0x13, 0xfc, 0xec, 0x7a, + 0x6c, 0xb0, 0x90, 0x13, 0x9b, 0xb6, 0xc5, 0x03, + 0xf6, 0x0e, 0x5e, 0xe2, 0xe4, 0x26, 0xc1, 0x7e, + 0x53, 0xfe, 0x69, 0xa3, 0xc7, 0xd8, 0x8e, 0x6e, + 0x94, 0x32, 0xa0, 0xde, 0xca, 0xb6, 0xcc, 0xd6, + 0x01, 0xd5, 0x78, 0x40, 0x28, 0x63, 0x9b, 0xee, + 0xcf, 0x09, 0x3b, 0x35, 0x04, 0xf0, 0x14, 0x02, + 0xf6, 0x80, 0x0e, 0x90, 0xb2, 0x94, 0xd2, 0x25, + 0x16, 0xb8, 0x7a, 0x76, 0x87, 0x84, 0x9f, 0x84, + 0xc5, 0xaf, 0xc2, 0x6d, 0x68, 0x7a, 0x84, 0x9c, + 0xc6, 0x8a, 0x63, 0x60, 0x87, 0x6a, 0x25, 0xc1, + 0xa1, 0x78, 0x0f, 0xba, 0xe8, 0x5f, 0xe1, 0xba, + 0xac, 0xa4, 0x6f, 0xdd, 0x09, 0x3f, 0x12, 0xcb, + 0x1d, 0xf3, 0xcf, 0x48, 0xd7, 0xd3, 0x26, 0xe8, + 0x9c, 0xc3, 0x53, 0xb3, 0xba, 0xdc, 0x32, 0x99, + 0x98, 0x96, 0xd6, 0x16, 0x03, 0x01, 0x00, 0x99, + 0x0d, 0x00, 0x00, 0x91, 0x03, 0x01, 0x02, 0x40, + 0x00, 0x8b, 0x00, 0x89, 0x30, 0x81, 0x86, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, + 0x59, 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, + 0x04, 0x07, 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, + 0x6b, 0x6c, 0x79, 0x6e, 0x31, 0x21, 0x30, 0x1f, + 0x06, 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x18, 0x4d, + 0x79, 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, + 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31, + 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x03, + 0x0c, 0x08, 0x6d, 0x79, 0x63, 0x61, 0x2e, 0x6f, + 0x72, 0x67, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, + 0x01, 0x16, 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, + 0x68, 0x69, 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, + 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x0e, 0x00, 0x00, + 0x00, + }, + { + 0x16, 0x03, 0x01, 0x0a, 0xfb, 0x0b, 0x00, 0x0a, + 0xf7, 0x00, 0x0a, 0xf4, 0x00, 0x03, 0x7e, 0x30, + 0x82, 0x03, 0x7a, 0x30, 0x82, 0x02, 0x62, 0x02, + 0x09, 0x00, 0xb4, 0x47, 0x58, 0x57, 0x2b, 0x67, + 0xc8, 0xc2, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, + 0x00, 0x30, 0x81, 0x80, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, + 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, 0x31, 0x11, + 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x07, 0x0c, + 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, 0x6c, 0x79, + 0x6e, 0x31, 0x15, 0x30, 0x13, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x0c, 0x0c, 0x4d, 0x79, 0x20, 0x43, + 0x41, 0x20, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x31, 0x17, 0x30, 0x15, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x0c, 0x0e, 0x6d, 0x79, 0x63, 0x61, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, + 0x6d, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, + 0x16, 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, + 0x69, 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, + 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d, + 0x31, 0x33, 0x30, 0x35, 0x32, 0x36, 0x32, 0x31, + 0x34, 0x34, 0x30, 0x30, 0x5a, 0x17, 0x0d, 0x31, + 0x33, 0x30, 0x36, 0x32, 0x35, 0x32, 0x31, 0x34, + 0x34, 0x30, 0x30, 0x5a, 0x30, 0x7d, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x55, 0x53, 0x31, 0x11, 0x30, 0x0f, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x0c, 0x08, 0x4e, 0x65, + 0x77, 0x20, 0x59, 0x6f, 0x72, 0x6b, 0x31, 0x11, + 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x07, 0x0c, + 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, 0x6c, 0x79, + 0x6e, 0x31, 0x10, 0x30, 0x0e, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x0c, 0x07, 0x4d, 0x79, 0x20, 0x4c, + 0x65, 0x61, 0x66, 0x31, 0x13, 0x30, 0x11, 0x06, + 0x03, 0x55, 0x04, 0x03, 0x0c, 0x0a, 0x6d, 0x79, + 0x6c, 0x65, 0x61, 0x66, 0x2e, 0x63, 0x6f, 0x6d, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, 0x16, + 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, 0x69, + 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, 0x2e, + 0x63, 0x6f, 0x6d, 0x30, 0x82, 0x01, 0x22, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, + 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, + 0x82, 0x01, 0x01, 0x00, 0xa0, 0xa3, 0xef, 0xc1, + 0x44, 0x7d, 0xa2, 0xe3, 0x71, 0x98, 0x27, 0x63, + 0xb3, 0x1d, 0x71, 0x50, 0xa6, 0x34, 0x15, 0xcb, + 0xc9, 0x2a, 0xc3, 0xea, 0xe4, 0x9e, 0x9c, 0x49, + 0xa6, 0x01, 0x9b, 0x7e, 0xa9, 0xb5, 0x7a, 0xff, + 0x15, 0x92, 0x71, 0xc8, 0x97, 0x9c, 0x25, 0xb7, + 0x79, 0x2b, 0xff, 0xab, 0xc6, 0xb1, 0xa7, 0x00, + 0x90, 0xb2, 0x8b, 0xd7, 0x71, 0xd5, 0xc2, 0x3a, + 0xe6, 0x82, 0x42, 0x37, 0x89, 0x41, 0x04, 0xb0, + 0xba, 0xc7, 0x5b, 0x8a, 0x43, 0x9f, 0x97, 0x39, + 0x0c, 0x0f, 0xd5, 0x6d, 0x9e, 0x8d, 0xeb, 0xc0, + 0x26, 0xc5, 0x18, 0xe8, 0x7a, 0x3d, 0x32, 0x2e, + 0x38, 0x90, 0x40, 0x5b, 0x39, 0x2c, 0x07, 0xcb, + 0x24, 0x10, 0xc5, 0xc9, 0x3b, 0xe3, 0x66, 0x47, + 0x57, 0xb9, 0x6a, 0xad, 0x44, 0xf8, 0xd0, 0x70, + 0x62, 0x3b, 0x8e, 0xed, 0x60, 0x5f, 0x22, 0xf8, + 0xb8, 0x0c, 0xc9, 0x41, 0x2b, 0xc9, 0x80, 0x6e, + 0x4e, 0x1b, 0xe1, 0x20, 0xfc, 0x47, 0xa4, 0xac, + 0xc3, 0x3f, 0xe6, 0xc2, 0x81, 0x79, 0x03, 0x37, + 0x25, 0x89, 0xca, 0xd6, 0xa5, 0x46, 0x91, 0x63, + 0x41, 0xc5, 0x3e, 0xd5, 0xed, 0x7f, 0x4f, 0x8d, + 0x06, 0xc0, 0x89, 0x00, 0xbe, 0x37, 0x7b, 0x7e, + 0x73, 0xca, 0x70, 0x00, 0x14, 0x34, 0xbe, 0x47, + 0xbc, 0xb2, 0x6a, 0x28, 0xa5, 0x29, 0x84, 0xa8, + 0x9d, 0xc8, 0x1e, 0x77, 0x66, 0x1f, 0x9f, 0xaa, + 0x2b, 0x47, 0xdb, 0xdd, 0x6b, 0x9c, 0xa8, 0xfc, + 0x82, 0x36, 0x94, 0x62, 0x0d, 0x5c, 0x3f, 0xb2, + 0x01, 0xb4, 0xa5, 0xb8, 0xc6, 0x0e, 0x94, 0x5b, + 0xec, 0x5e, 0xbb, 0x7a, 0x63, 0x24, 0xf1, 0xf9, + 0xd6, 0x50, 0x08, 0xc1, 0xa3, 0xcc, 0x90, 0x07, + 0x5b, 0x04, 0x04, 0x42, 0x74, 0xcf, 0x37, 0xfa, + 0xf0, 0xa5, 0xd9, 0xd3, 0x86, 0x89, 0x89, 0x18, + 0xf3, 0x4c, 0xe2, 0x11, 0x02, 0x03, 0x01, 0x00, + 0x01, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, + 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, + 0x03, 0x82, 0x01, 0x01, 0x00, 0x90, 0xbb, 0xf9, + 0x5e, 0xba, 0x17, 0x1f, 0xac, 0x21, 0x9f, 0x6b, + 0x4a, 0x46, 0xd0, 0x6d, 0x3c, 0x8f, 0x3d, 0xf8, + 0x5e, 0x3e, 0x72, 0xaf, 0xa0, 0x1a, 0xf3, 0xff, + 0x89, 0xac, 0x5b, 0x7a, 0xe2, 0x91, 0x2a, 0x23, + 0x85, 0xc6, 0x4d, 0x47, 0x67, 0x01, 0x08, 0xa8, + 0x05, 0x1d, 0x01, 0x60, 0x50, 0x5f, 0x59, 0xad, + 0xfe, 0x7b, 0xc6, 0x0c, 0x54, 0x90, 0x68, 0x70, + 0x67, 0x2e, 0xed, 0x87, 0xf8, 0x69, 0x8a, 0xac, + 0x32, 0xfe, 0x6f, 0x90, 0x19, 0x2a, 0x64, 0x8d, + 0x82, 0x66, 0x05, 0x43, 0x88, 0xee, 0xf2, 0x30, + 0xed, 0xa4, 0x8f, 0xbf, 0xd6, 0x57, 0x20, 0xd4, + 0x43, 0x1d, 0x52, 0x96, 0x6f, 0xae, 0x09, 0x96, + 0x01, 0x52, 0x38, 0xe3, 0xaf, 0x99, 0xd7, 0xdc, + 0x14, 0x99, 0xc4, 0x8b, 0x0e, 0x04, 0x0f, 0xb3, + 0x14, 0x14, 0xd4, 0xa5, 0x93, 0xe1, 0xc9, 0x8a, + 0x81, 0xef, 0x63, 0xfc, 0x36, 0x77, 0x05, 0x06, + 0xf0, 0x2a, 0x04, 0x0a, 0xbe, 0x2e, 0xce, 0x81, + 0x3d, 0x23, 0xa1, 0xda, 0xd8, 0xeb, 0xc6, 0xea, + 0x5e, 0xcf, 0x28, 0x36, 0x51, 0x31, 0x95, 0x5e, + 0x40, 0x04, 0xed, 0xac, 0xc1, 0xc8, 0x56, 0x69, + 0x87, 0xec, 0x3b, 0x03, 0x3e, 0x9d, 0x0f, 0x4c, + 0x4c, 0xeb, 0xd7, 0xba, 0x26, 0xdf, 0xe3, 0xde, + 0x10, 0xee, 0x93, 0x62, 0x8d, 0x73, 0x52, 0x6e, + 0xff, 0x37, 0x36, 0x98, 0x7b, 0x2d, 0x56, 0x4c, + 0xba, 0x09, 0xb8, 0xa7, 0xf0, 0x3b, 0x16, 0x81, + 0xca, 0xdb, 0x43, 0xab, 0xec, 0x4c, 0x6e, 0x7c, + 0xc1, 0x0b, 0x22, 0x22, 0x43, 0x1d, 0xb6, 0x0c, + 0xc1, 0xb9, 0xcf, 0xe4, 0x53, 0xee, 0x1d, 0x3e, + 0x88, 0xa7, 0x13, 0xbe, 0x7f, 0xbd, 0xae, 0x72, + 0xcf, 0xcd, 0x63, 0xd2, 0xc3, 0x18, 0x58, 0x92, + 0xa2, 0xad, 0xb5, 0x09, 0x9d, 0x91, 0x03, 0xdd, + 0x3c, 0xe2, 0x1c, 0xde, 0x78, 0x00, 0x03, 0x88, + 0x30, 0x82, 0x03, 0x84, 0x30, 0x82, 0x02, 0x6c, + 0x02, 0x09, 0x00, 0xab, 0xed, 0xa6, 0xe4, 0x4a, + 0x2b, 0x2b, 0xf8, 0x30, 0x0d, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, + 0x05, 0x00, 0x30, 0x81, 0x86, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, 0x31, + 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x07, + 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, 0x6c, + 0x79, 0x6e, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x0c, 0x18, 0x4d, 0x79, 0x20, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31, 0x11, 0x30, + 0x0f, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, + 0x6d, 0x79, 0x63, 0x61, 0x2e, 0x6f, 0x72, 0x67, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, 0x16, + 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, 0x69, + 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, 0x2e, + 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d, 0x31, + 0x33, 0x30, 0x35, 0x32, 0x36, 0x32, 0x31, 0x31, + 0x38, 0x34, 0x30, 0x5a, 0x17, 0x0d, 0x31, 0x33, + 0x30, 0x36, 0x32, 0x35, 0x32, 0x31, 0x31, 0x38, + 0x34, 0x30, 0x5a, 0x30, 0x81, 0x80, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, + 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, + 0x07, 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, + 0x6c, 0x79, 0x6e, 0x31, 0x15, 0x30, 0x13, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x0c, 0x4d, 0x79, + 0x20, 0x43, 0x41, 0x20, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x31, 0x17, 0x30, 0x15, 0x06, 0x03, + 0x55, 0x04, 0x03, 0x0c, 0x0e, 0x6d, 0x79, 0x63, + 0x61, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x63, 0x6f, 0x6d, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x09, 0x01, 0x16, 0x12, 0x6a, 0x76, 0x73, 0x68, + 0x61, 0x68, 0x69, 0x64, 0x40, 0x67, 0x6d, 0x61, + 0x69, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x82, + 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, + 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, + 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xce, + 0x13, 0xf0, 0x72, 0xb0, 0x61, 0xc8, 0x18, 0x37, + 0x8a, 0x41, 0x3d, 0x20, 0xa1, 0x1c, 0xcb, 0xbf, + 0xf6, 0x3b, 0x74, 0x26, 0x2a, 0x96, 0x11, 0xec, + 0x53, 0xa1, 0xcc, 0x7d, 0x77, 0x56, 0x45, 0x0f, + 0x36, 0xb7, 0xf2, 0x48, 0x92, 0x1a, 0x62, 0xcc, + 0xb6, 0xc0, 0xa1, 0x2f, 0x44, 0x2b, 0xc1, 0x89, + 0xcb, 0x6e, 0x1e, 0xdb, 0x57, 0x92, 0xd5, 0x97, + 0x60, 0x8c, 0x41, 0x2c, 0xd9, 0x20, 0xfe, 0xe9, + 0x1f, 0x8e, 0xfc, 0x7f, 0x02, 0x44, 0x0f, 0x28, + 0x81, 0xd6, 0x0c, 0xcd, 0xbc, 0xf0, 0x57, 0x6c, + 0xcc, 0xa7, 0xba, 0x06, 0xa0, 0xa6, 0x91, 0xda, + 0xef, 0x46, 0x8a, 0x60, 0x0f, 0x52, 0x6c, 0x90, + 0x6c, 0x8c, 0x44, 0xaf, 0xb0, 0x9d, 0x90, 0xba, + 0x21, 0x58, 0xa0, 0x3c, 0xee, 0x54, 0xb5, 0x29, + 0x26, 0x1f, 0x0a, 0xac, 0xef, 0x48, 0x68, 0x33, + 0xd0, 0x33, 0xd0, 0x8b, 0x1a, 0xec, 0x6e, 0x2f, + 0xb5, 0x4a, 0x53, 0xc2, 0x1a, 0xd2, 0xf1, 0x50, + 0x05, 0x59, 0x5c, 0xd9, 0xda, 0x03, 0x0a, 0x47, + 0xb7, 0xdd, 0xf7, 0x3a, 0x69, 0xf5, 0x4e, 0xea, + 0x4a, 0xc2, 0xca, 0x54, 0xb0, 0x8b, 0x76, 0xe1, + 0x02, 0x2d, 0x52, 0x67, 0xb9, 0xdd, 0x50, 0xc9, + 0x3b, 0x07, 0x24, 0x22, 0x6a, 0x00, 0x1d, 0x58, + 0x83, 0xa8, 0xec, 0x95, 0xf1, 0xda, 0xe2, 0x73, + 0xa0, 0xa1, 0x72, 0x60, 0x9e, 0x86, 0x53, 0xcb, + 0x45, 0xa8, 0xc2, 0xa0, 0x50, 0xa0, 0x53, 0xd6, + 0xfc, 0x18, 0x84, 0xb5, 0x4a, 0x26, 0xd0, 0xa2, + 0xaa, 0xd0, 0xff, 0xb6, 0xfe, 0x3a, 0x9c, 0xb5, + 0x19, 0x3b, 0x3f, 0xe1, 0x48, 0x0d, 0xa4, 0x09, + 0x4f, 0x83, 0xc9, 0xc0, 0xc9, 0xa6, 0x0b, 0x58, + 0x1f, 0x1c, 0x7b, 0xac, 0xa2, 0x42, 0xbc, 0x61, + 0xf4, 0x21, 0x8a, 0x00, 0xda, 0x14, 0xa0, 0x60, + 0x03, 0xfe, 0x93, 0x12, 0x6c, 0x56, 0xcd, 0x02, + 0x03, 0x01, 0x00, 0x01, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, + 0x25, 0x29, 0x3b, 0x1e, 0xc3, 0x58, 0x32, 0xe6, + 0x23, 0xc8, 0xee, 0x18, 0xf0, 0x1d, 0x62, 0x6d, + 0x3b, 0x59, 0x99, 0x3a, 0xfe, 0x49, 0x72, 0x07, + 0x3f, 0x58, 0x93, 0xdb, 0xc0, 0xaf, 0xb0, 0xb3, + 0x5c, 0xd1, 0x5c, 0x98, 0xc8, 0xea, 0x4a, 0xe4, + 0x58, 0x73, 0x0d, 0x57, 0xc5, 0x13, 0x7c, 0x5c, + 0x79, 0x66, 0xda, 0x04, 0x1d, 0xe5, 0x98, 0xda, + 0x35, 0x47, 0x44, 0xb0, 0xd2, 0x7a, 0x66, 0x9d, + 0xcd, 0x41, 0xa5, 0x8f, 0xa1, 0x11, 0xb2, 0x1a, + 0x87, 0xc0, 0xcd, 0x55, 0xed, 0xb4, 0x7b, 0x33, + 0x72, 0xeb, 0xf7, 0xe3, 0x7b, 0x8b, 0x02, 0x86, + 0xe9, 0x2b, 0x26, 0x32, 0x9f, 0x99, 0xf1, 0xcb, + 0x93, 0xab, 0xb9, 0x16, 0xb3, 0x9a, 0xb2, 0x22, + 0x13, 0x21, 0x1f, 0x5b, 0xcc, 0xa2, 0x59, 0xbb, + 0x69, 0xf2, 0xb8, 0x07, 0x80, 0xce, 0x0c, 0xf7, + 0x98, 0x4c, 0x85, 0xc2, 0x96, 0x6a, 0x22, 0x05, + 0xe9, 0xbe, 0x48, 0xb0, 0x02, 0x5b, 0x69, 0x28, + 0x18, 0x88, 0x96, 0xe3, 0xd7, 0xc6, 0x7a, 0xd3, + 0xe9, 0x99, 0xff, 0x9d, 0xc3, 0x61, 0x4d, 0x9a, + 0x96, 0xf2, 0xc6, 0x33, 0x4d, 0xe5, 0x5d, 0x5a, + 0x68, 0x64, 0x5a, 0x82, 0x35, 0x65, 0x25, 0xe3, + 0x8c, 0x5b, 0xb0, 0xf6, 0x96, 0x56, 0xbc, 0xbf, + 0x97, 0x76, 0x4b, 0x66, 0x44, 0x81, 0xa4, 0xc4, + 0xa7, 0x31, 0xc5, 0xa1, 0x4f, 0xe8, 0xa4, 0xca, + 0x20, 0xf5, 0x01, 0x5b, 0x99, 0x4f, 0x5a, 0xf4, + 0xf0, 0x78, 0xbf, 0x71, 0x49, 0xd5, 0xf1, 0xc1, + 0xa2, 0x18, 0xfd, 0x72, 0x5b, 0x16, 0xe8, 0x92, + 0xc7, 0x37, 0x48, 0xaf, 0xee, 0x24, 0xfc, 0x35, + 0x0b, 0xc2, 0xdd, 0x05, 0xc7, 0x6e, 0xa3, 0x29, + 0xbb, 0x29, 0x7d, 0xd3, 0x2b, 0x94, 0x80, 0xc3, + 0x40, 0x53, 0x0e, 0x03, 0x54, 0x3d, 0x7b, 0x8b, + 0xce, 0xf9, 0xa4, 0x03, 0x27, 0x63, 0xec, 0x51, + 0x00, 0x03, 0xe5, 0x30, 0x82, 0x03, 0xe1, 0x30, + 0x82, 0x02, 0xc9, 0xa0, 0x03, 0x02, 0x01, 0x02, + 0x02, 0x09, 0x00, 0xcc, 0x22, 0x4c, 0x4b, 0x98, + 0xa2, 0x88, 0xfc, 0x30, 0x0d, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, + 0x05, 0x00, 0x30, 0x81, 0x86, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, 0x31, + 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x07, + 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, 0x6c, + 0x79, 0x6e, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x0c, 0x18, 0x4d, 0x79, 0x20, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31, 0x11, 0x30, + 0x0f, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, + 0x6d, 0x79, 0x63, 0x61, 0x2e, 0x6f, 0x72, 0x67, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, 0x16, + 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, 0x69, + 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, 0x2e, + 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d, 0x31, + 0x33, 0x30, 0x35, 0x32, 0x36, 0x32, 0x31, 0x30, + 0x35, 0x30, 0x31, 0x5a, 0x17, 0x0d, 0x32, 0x33, + 0x30, 0x35, 0x32, 0x34, 0x32, 0x31, 0x30, 0x35, + 0x30, 0x31, 0x5a, 0x30, 0x81, 0x86, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, + 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, + 0x07, 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, + 0x6c, 0x79, 0x6e, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x18, 0x4d, 0x79, + 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31, 0x11, + 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, + 0x08, 0x6d, 0x79, 0x63, 0x61, 0x2e, 0x6f, 0x72, + 0x67, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, + 0x16, 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, + 0x69, 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, + 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x82, 0x01, 0x22, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, + 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, + 0x02, 0x82, 0x01, 0x01, 0x00, 0xf0, 0xfb, 0xad, + 0x80, 0x5e, 0x37, 0xd3, 0x6d, 0xee, 0x2e, 0xcc, + 0xbc, 0x0c, 0xd7, 0x56, 0x4b, 0x56, 0x45, 0xcd, + 0x28, 0xb6, 0x22, 0xe9, 0xe2, 0x0f, 0xd1, 0x87, + 0x2a, 0x27, 0xce, 0x77, 0x8d, 0x6e, 0x0e, 0x0f, + 0xfb, 0x66, 0xe1, 0xb5, 0x0e, 0x9a, 0xb6, 0x05, + 0x8e, 0xb3, 0xe1, 0xc5, 0x77, 0x86, 0x5b, 0x46, + 0xd2, 0x0b, 0x92, 0x03, 0x1b, 0x89, 0x0c, 0x1b, + 0x10, 0x0e, 0x99, 0x8f, 0xe2, 0x17, 0xe8, 0xc2, + 0x30, 0x00, 0x47, 0xd6, 0xfc, 0xf9, 0x0f, 0x3b, + 0x75, 0x34, 0x8d, 0x4d, 0xb0, 0x99, 0xb7, 0xa0, + 0x6d, 0xa0, 0xb6, 0xad, 0xda, 0x07, 0x5e, 0x38, + 0x2e, 0x02, 0xe4, 0x30, 0x6d, 0xae, 0x13, 0x72, + 0xd4, 0xc8, 0xce, 0x14, 0x07, 0xae, 0x23, 0x8c, + 0x8f, 0x9e, 0x8c, 0x60, 0xd6, 0x06, 0xb9, 0xef, + 0x00, 0x18, 0xc0, 0x1d, 0x25, 0x1e, 0xda, 0x3e, + 0x2f, 0xcf, 0x2b, 0x56, 0x84, 0x9e, 0x30, 0x21, + 0xc7, 0x29, 0xf6, 0x03, 0x8a, 0x24, 0xf9, 0x34, + 0xac, 0x65, 0x9d, 0x80, 0x36, 0xc8, 0x3b, 0x15, + 0x10, 0xbd, 0x51, 0xe9, 0xbc, 0x02, 0xe1, 0xe9, + 0xb3, 0x5a, 0x9a, 0x99, 0x41, 0x1b, 0x27, 0xa0, + 0x4d, 0x50, 0x9e, 0x27, 0x7f, 0xa1, 0x7d, 0x09, + 0x87, 0xbd, 0x8a, 0xca, 0x5f, 0xb1, 0xa5, 0x08, + 0xb8, 0x04, 0xd4, 0x52, 0x89, 0xaa, 0xe0, 0x7d, + 0x42, 0x2e, 0x2f, 0x15, 0xee, 0x66, 0x57, 0x0f, + 0x13, 0x19, 0x45, 0xa8, 0x4b, 0x5d, 0x81, 0x66, + 0xcc, 0x12, 0x37, 0x94, 0x5e, 0xfd, 0x3c, 0x10, + 0x81, 0x51, 0x3f, 0xfa, 0x0f, 0xdd, 0xa1, 0x89, + 0x03, 0xa9, 0x78, 0x91, 0xf5, 0x3b, 0xf3, 0xbc, + 0xac, 0xbe, 0x93, 0x30, 0x2e, 0xbe, 0xca, 0x7f, + 0x46, 0xd3, 0x28, 0xb4, 0x4e, 0x91, 0x7b, 0x5b, + 0x43, 0x6c, 0xaf, 0x9b, 0x5c, 0x6a, 0x6d, 0x5a, + 0xdb, 0x79, 0x5e, 0x6a, 0x6b, 0x02, 0x03, 0x01, + 0x00, 0x01, 0xa3, 0x50, 0x30, 0x4e, 0x30, 0x1d, + 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, + 0x14, 0x6b, 0x1e, 0x00, 0xa8, 0x9f, 0xfa, 0x7d, + 0x00, 0xf9, 0xe0, 0x9d, 0x0f, 0x90, 0x8c, 0x90, + 0xa8, 0xa1, 0x37, 0x6b, 0xda, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16, + 0x80, 0x14, 0x6b, 0x1e, 0x00, 0xa8, 0x9f, 0xfa, + 0x7d, 0x00, 0xf9, 0xe0, 0x9d, 0x0f, 0x90, 0x8c, + 0x90, 0xa8, 0xa1, 0x37, 0x6b, 0xda, 0x30, 0x0c, + 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, 0x30, + 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, + 0xcd, 0x6f, 0x73, 0x4d, 0x56, 0x0b, 0xf3, 0x2e, + 0x1c, 0xe2, 0x02, 0x0c, 0x14, 0xbb, 0x2f, 0xdd, + 0x3c, 0x43, 0xfe, 0xdf, 0x94, 0x2d, 0xa9, 0x89, + 0x81, 0x51, 0xf8, 0x5f, 0xa7, 0xa0, 0x13, 0xaa, + 0xcc, 0xb0, 0x18, 0xe2, 0x57, 0x3e, 0x0d, 0x29, + 0x93, 0xe8, 0x95, 0xd5, 0x1b, 0x53, 0xd2, 0x51, + 0xf2, 0xbd, 0xf5, 0x9e, 0x7b, 0x22, 0x65, 0x62, + 0x5c, 0xc4, 0x4c, 0x1d, 0xe8, 0xe9, 0xc3, 0xd4, + 0x2b, 0xe7, 0x78, 0xcb, 0x10, 0xf3, 0xfe, 0x06, + 0x83, 0xdc, 0x3a, 0x1e, 0x62, 0x10, 0xc0, 0x46, + 0x77, 0xc6, 0x9d, 0x9f, 0xab, 0x96, 0x25, 0x5c, + 0xfb, 0x26, 0xc1, 0x15, 0x1f, 0xa5, 0x33, 0xee, + 0x4f, 0x9a, 0x14, 0x6a, 0x14, 0x97, 0x93, 0x2b, + 0x95, 0x0b, 0xdc, 0xa8, 0xd7, 0x69, 0x2e, 0xf0, + 0x01, 0x0e, 0xfd, 0x4e, 0xd0, 0xd9, 0xa8, 0xe5, + 0x65, 0xde, 0xfb, 0xca, 0xca, 0x1c, 0x5f, 0xf9, + 0x53, 0xa0, 0x87, 0xe7, 0x33, 0x9b, 0x2f, 0xcf, + 0xe4, 0x13, 0xfc, 0xec, 0x7a, 0x6c, 0xb0, 0x90, + 0x13, 0x9b, 0xb6, 0xc5, 0x03, 0xf6, 0x0e, 0x5e, + 0xe2, 0xe4, 0x26, 0xc1, 0x7e, 0x53, 0xfe, 0x69, + 0xa3, 0xc7, 0xd8, 0x8e, 0x6e, 0x94, 0x32, 0xa0, + 0xde, 0xca, 0xb6, 0xcc, 0xd6, 0x01, 0xd5, 0x78, + 0x40, 0x28, 0x63, 0x9b, 0xee, 0xcf, 0x09, 0x3b, + 0x35, 0x04, 0xf0, 0x14, 0x02, 0xf6, 0x80, 0x0e, + 0x90, 0xb2, 0x94, 0xd2, 0x25, 0x16, 0xb8, 0x7a, + 0x76, 0x87, 0x84, 0x9f, 0x84, 0xc5, 0xaf, 0xc2, + 0x6d, 0x68, 0x7a, 0x84, 0x9c, 0xc6, 0x8a, 0x63, + 0x60, 0x87, 0x6a, 0x25, 0xc1, 0xa1, 0x78, 0x0f, + 0xba, 0xe8, 0x5f, 0xe1, 0xba, 0xac, 0xa4, 0x6f, + 0xdd, 0x09, 0x3f, 0x12, 0xcb, 0x1d, 0xf3, 0xcf, + 0x48, 0xd7, 0xd3, 0x26, 0xe8, 0x9c, 0xc3, 0x53, + 0xb3, 0xba, 0xdc, 0x32, 0x99, 0x98, 0x96, 0xd6, + 0x16, 0x03, 0x01, 0x01, 0x06, 0x10, 0x00, 0x01, + 0x02, 0x01, 0x00, 0x6e, 0xea, 0x15, 0x6f, 0x21, + 0xbd, 0x2d, 0x14, 0xde, 0x9d, 0x02, 0xeb, 0xdf, + 0x3b, 0x09, 0x75, 0xaf, 0x32, 0x80, 0x0c, 0xe2, + 0xc2, 0x7b, 0x0d, 0xca, 0x24, 0x96, 0xf6, 0x3e, + 0xa5, 0x97, 0xba, 0x0c, 0x50, 0x7e, 0xb3, 0x68, + 0x58, 0xc6, 0xd8, 0xec, 0xab, 0xa9, 0xd9, 0x3a, + 0xb1, 0x49, 0xea, 0x2f, 0xd7, 0xdb, 0x15, 0x1b, + 0xb5, 0xaf, 0xec, 0xcc, 0x40, 0x5c, 0xe6, 0x0f, + 0xc4, 0x33, 0x71, 0xe7, 0x41, 0xc0, 0x04, 0x89, + 0x60, 0x3e, 0xb7, 0xe6, 0xda, 0x38, 0x62, 0x27, + 0x6a, 0xd9, 0xfb, 0x93, 0x94, 0x9d, 0xc1, 0x63, + 0x92, 0x5c, 0x88, 0x19, 0x38, 0x81, 0x79, 0x9d, + 0x59, 0x48, 0x5e, 0xd3, 0xc8, 0xea, 0xcb, 0x6e, + 0x66, 0x66, 0x03, 0xdc, 0x0c, 0x2d, 0x95, 0xb1, + 0x4d, 0x68, 0xc7, 0xc5, 0x6e, 0xfa, 0x94, 0x14, + 0xdf, 0x2c, 0x70, 0x69, 0x04, 0xf4, 0x69, 0xf1, + 0xf0, 0x07, 0xbd, 0x23, 0x53, 0x63, 0xb3, 0x41, + 0xec, 0xa7, 0x10, 0xa5, 0x04, 0x84, 0x24, 0xb5, + 0xf5, 0x0c, 0x0f, 0x5d, 0x02, 0x47, 0x79, 0x60, + 0x76, 0xbb, 0xdf, 0x60, 0xa6, 0xd7, 0x4d, 0x08, + 0x7d, 0xa6, 0x85, 0x4f, 0x61, 0xac, 0x96, 0x3d, + 0xbc, 0xaf, 0x07, 0xb0, 0x7c, 0xb6, 0x23, 0x3e, + 0x1f, 0x0a, 0x62, 0x77, 0x97, 0x77, 0xae, 0x33, + 0x55, 0x0f, 0x85, 0xdf, 0xdc, 0xbe, 0xc6, 0xe0, + 0xe0, 0x14, 0x83, 0x4c, 0x50, 0xf0, 0xe5, 0x2d, + 0xdc, 0x0b, 0x74, 0x7f, 0xc3, 0x28, 0x98, 0x16, + 0xda, 0x74, 0xe6, 0x40, 0xc2, 0xf0, 0xea, 0xc0, + 0x00, 0xd5, 0xfc, 0x16, 0xe4, 0x43, 0xa1, 0xfc, + 0x31, 0x19, 0x81, 0x62, 0xec, 0x2b, 0xfe, 0xcc, + 0xe8, 0x19, 0xed, 0xa1, 0x1e, 0x6a, 0x49, 0x73, + 0xde, 0xc4, 0xe9, 0x22, 0x0a, 0x21, 0xde, 0x45, + 0x1e, 0x55, 0x12, 0xd9, 0x44, 0xef, 0x4e, 0xaa, + 0x5e, 0x26, 0x57, 0x16, 0x03, 0x01, 0x01, 0x06, + 0x0f, 0x00, 0x01, 0x02, 0x01, 0x00, 0x23, 0xde, + 0xb0, 0x39, 0x60, 0xe9, 0x82, 0xb8, 0xed, 0x17, + 0x78, 0xd2, 0x37, 0x0e, 0x85, 0x69, 0xda, 0xcc, + 0x9f, 0x54, 0x4d, 0xda, 0xce, 0xe8, 0x5a, 0xeb, + 0x3c, 0x61, 0x4c, 0x7a, 0x84, 0x1f, 0x21, 0x03, + 0xb3, 0x8a, 0x74, 0x3b, 0x6a, 0x9e, 0x4f, 0x44, + 0xd9, 0x75, 0x0a, 0xd8, 0x7e, 0x56, 0xa3, 0xef, + 0x5a, 0xfe, 0x8a, 0x35, 0xce, 0x29, 0x18, 0xfe, + 0xa6, 0x61, 0x8e, 0x8f, 0x00, 0x90, 0x2d, 0x85, + 0xe3, 0x6c, 0x0e, 0x8d, 0x8c, 0x27, 0x80, 0x8c, + 0x9f, 0x51, 0xe9, 0xd3, 0xe6, 0x7d, 0x70, 0xe9, + 0xfb, 0xcb, 0xb8, 0x24, 0x94, 0x30, 0x9b, 0xba, + 0x01, 0x14, 0x49, 0x9f, 0xaf, 0x09, 0xd8, 0x26, + 0x1b, 0x23, 0xa4, 0xb8, 0xd9, 0x44, 0x0a, 0xdc, + 0x4e, 0x27, 0xe7, 0x32, 0xf5, 0x9c, 0xf3, 0x8d, + 0xa0, 0xc5, 0xc4, 0xbe, 0x92, 0x02, 0x85, 0x4f, + 0x33, 0x8f, 0xa7, 0xf7, 0x87, 0xa9, 0x44, 0xf3, + 0x64, 0xbd, 0x32, 0x04, 0xeb, 0xc5, 0xc3, 0x62, + 0xe9, 0xda, 0x2f, 0x95, 0x5c, 0xf7, 0x58, 0x3e, + 0xad, 0x35, 0xd7, 0x7e, 0xad, 0xdd, 0x32, 0x8d, + 0xce, 0x81, 0x08, 0xad, 0x49, 0xf7, 0xdb, 0xf7, + 0xaf, 0xe3, 0xc6, 0xb2, 0xdd, 0x76, 0x0c, 0xcf, + 0x0f, 0x87, 0x79, 0x90, 0x10, 0x79, 0xc6, 0xc8, + 0x7b, 0xe6, 0x23, 0xf2, 0xda, 0x33, 0xca, 0xe1, + 0xf0, 0x59, 0x42, 0x43, 0x03, 0x56, 0x19, 0xe3, + 0x8b, 0xe6, 0xa8, 0x70, 0xbc, 0x80, 0xfa, 0x24, + 0xae, 0x03, 0x13, 0x30, 0x0d, 0x1f, 0xab, 0xb7, + 0x82, 0xd9, 0x24, 0x90, 0x80, 0xbf, 0x75, 0xe1, + 0x0d, 0x1c, 0xb2, 0xfe, 0x92, 0x2c, 0x4d, 0x21, + 0xe9, 0x5d, 0xa1, 0x68, 0xf3, 0x16, 0xd8, 0x3f, + 0xb2, 0xc3, 0x00, 0x3e, 0xd8, 0x42, 0x25, 0x5c, + 0x90, 0x11, 0xc0, 0x1b, 0xd4, 0x26, 0x5c, 0x37, + 0x47, 0xbd, 0xf8, 0x1e, 0x34, 0xa9, 0x14, 0x03, + 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, 0x00, + 0x24, 0x8f, 0x94, 0x7e, 0x01, 0xee, 0xd5, 0x4f, + 0x83, 0x41, 0x31, 0xc0, 0x36, 0x81, 0x46, 0xc3, + 0xc0, 0xcc, 0x9c, 0xea, 0x0f, 0x29, 0x04, 0x10, + 0x43, 0x1e, 0x08, 0x6e, 0x08, 0xce, 0xb2, 0x62, + 0xa6, 0x0f, 0x68, 0x9f, 0x99, + }, + { + 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x01, 0x00, 0x24, 0xd9, 0x46, 0x5b, 0xbf, 0xfd, + 0x8a, 0xa1, 0x08, 0xd5, 0xf3, 0x0c, 0x1c, 0xd8, + 0xa8, 0xb3, 0xe5, 0x89, 0x83, 0x9e, 0x23, 0x47, + 0x81, 0x66, 0x77, 0x11, 0x98, 0xe5, 0xf4, 0xac, + 0x06, 0xe9, 0x4c, 0x05, 0x8b, 0xc4, 0x16, + }, + { + 0x17, 0x03, 0x01, 0x00, 0x1a, 0xc5, 0x28, 0xfd, + 0x71, 0xc0, 0xe6, 0x89, 0xb8, 0x82, 0x92, 0x1b, + 0xdd, 0x39, 0xe5, 0xbf, 0x41, 0x82, 0x1f, 0xc1, + 0xbc, 0x85, 0xe5, 0x32, 0x1b, 0x93, 0x46, 0x15, + 0x03, 0x01, 0x00, 0x16, 0x1a, 0x8b, 0x10, 0x42, + 0x12, 0xb2, 0xbd, 0xd3, 0xf1, 0x74, 0x1f, 0xc2, + 0x10, 0x08, 0xc2, 0x79, 0x99, 0x2c, 0x55, 0xef, + 0x4a, 0xbd, + }, +} + +// $ openssl s_server -tls1_2 -cert server.crt -key server.key \ +// -cipher ECDHE-RSA-AES128-SHA -port 10443 +// $ go test -test.run "TestRunClient" -connect -ciphersuites=0xc013 \ +// -minversion=0x0303 -maxversion=0x0303 +var clientTLS12Script = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x58, 0x01, 0x00, 0x00, + 0x54, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xc0, 0x13, + 0x01, 0x00, 0x00, 0x29, 0x00, 0x05, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x08, 0x00, 0x06, 0x00, 0x17, 0x00, 0x18, 0x00, + 0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, 0x00, + 0x0d, 0x00, 0x0a, 0x00, 0x08, 0x04, 0x01, 0x04, + 0x03, 0x02, 0x01, 0x02, 0x03, + }, + { + 0x16, 0x03, 0x03, 0x00, 0x54, 0x02, 0x00, 0x00, + 0x50, 0x03, 0x03, 0x52, 0x65, 0x67, 0xbd, 0xe8, + 0x72, 0x03, 0x6a, 0x52, 0x8d, 0x28, 0x2c, 0x9a, + 0x53, 0xff, 0xc2, 0xa1, 0x62, 0x5f, 0x54, 0xfb, + 0x73, 0x00, 0xcf, 0x4d, 0x28, 0x36, 0xc2, 0xee, + 0xfd, 0x78, 0xf0, 0x20, 0x6f, 0xbe, 0x49, 0xec, + 0x5b, 0x6f, 0xf9, 0x53, 0x42, 0x69, 0x0d, 0x6d, + 0x8b, 0x68, 0x2e, 0xca, 0x3c, 0x3c, 0x88, 0x9e, + 0x8b, 0xf9, 0x32, 0x65, 0x09, 0xd6, 0xa0, 0x7d, + 0xea, 0xc6, 0xd5, 0xc4, 0xc0, 0x13, 0x00, 0x00, + 0x08, 0x00, 0x0b, 0x00, 0x04, 0x03, 0x00, 0x01, + 0x02, 0x16, 0x03, 0x03, 0x02, 0x39, 0x0b, 0x00, + 0x02, 0x35, 0x00, 0x02, 0x32, 0x00, 0x02, 0x2f, + 0x30, 0x82, 0x02, 0x2b, 0x30, 0x82, 0x01, 0xd5, + 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, + 0xb1, 0x35, 0x13, 0x65, 0x11, 0x20, 0xc5, 0x92, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, + 0x45, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, + 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, + 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, + 0x69, 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, + 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, + 0x17, 0x0d, 0x31, 0x32, 0x30, 0x34, 0x30, 0x36, + 0x31, 0x37, 0x31, 0x30, 0x31, 0x33, 0x5a, 0x17, + 0x0d, 0x31, 0x35, 0x30, 0x34, 0x30, 0x36, 0x31, + 0x37, 0x31, 0x30, 0x31, 0x33, 0x5a, 0x30, 0x45, + 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, + 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, + 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, + 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, + 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, + 0x79, 0x20, 0x4c, 0x74, 0x64, 0x30, 0x5c, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x4b, + 0x00, 0x30, 0x48, 0x02, 0x41, 0x00, 0x9f, 0xb3, + 0xc3, 0x84, 0x27, 0x95, 0xff, 0x12, 0x31, 0x52, + 0x0f, 0x15, 0xef, 0x46, 0x11, 0xc4, 0xad, 0x80, + 0xe6, 0x36, 0x5b, 0x0f, 0xdd, 0x80, 0xd7, 0x61, + 0x8d, 0xe0, 0xfc, 0x72, 0x45, 0x09, 0x34, 0xfe, + 0x55, 0x66, 0x45, 0x43, 0x4c, 0x68, 0x97, 0x6a, + 0xfe, 0xa8, 0xa0, 0xa5, 0xdf, 0x5f, 0x78, 0xff, + 0xee, 0xd7, 0x64, 0xb8, 0x3f, 0x04, 0xcb, 0x6f, + 0xff, 0x2a, 0xfe, 0xfe, 0xb9, 0xed, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0x78, 0xa6, 0x97, 0x9a, + 0x63, 0xb5, 0xc5, 0xa1, 0xa5, 0x33, 0xba, 0x22, + 0x7c, 0x23, 0x6e, 0x5b, 0x1b, 0x7a, 0xcc, 0x2b, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0x78, 0xa6, 0x97, + 0x9a, 0x63, 0xb5, 0xc5, 0xa1, 0xa5, 0x33, 0xba, + 0x22, 0x7c, 0x23, 0x6e, 0x5b, 0x1b, 0x7a, 0xcc, + 0x2b, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0xb1, + 0x35, 0x13, 0x65, 0x11, 0x20, 0xc5, 0x92, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x41, 0x00, 0x85, + 0x36, 0x40, 0x73, 0xc1, 0xbb, 0x1a, 0xda, 0xd4, + 0x59, 0x9f, 0x2d, 0xa2, 0x70, 0x31, 0x46, 0x74, + 0xec, 0x83, 0x6e, 0xa8, 0xc8, 0x3c, 0x51, 0xaf, + 0x39, 0xac, 0xec, 0x40, 0xbc, 0xe8, 0x22, 0x46, + 0x1d, 0x99, 0xd6, 0x46, 0x2a, 0x24, 0xd4, 0x8b, + 0x05, 0x08, 0x4b, 0xfb, 0x35, 0x11, 0x6e, 0x92, + 0xbb, 0x77, 0xba, 0xe4, 0x12, 0xbb, 0xf4, 0xc8, + 0x5e, 0x9c, 0x81, 0xa8, 0x97, 0x60, 0x4c, 0x16, + 0x03, 0x03, 0x00, 0x8d, 0x0c, 0x00, 0x00, 0x89, + 0x03, 0x00, 0x17, 0x41, 0x04, 0x48, 0x93, 0x62, + 0x6a, 0xf8, 0x7c, 0x94, 0xcc, 0xcc, 0x0a, 0x9b, + 0x5e, 0x11, 0xad, 0x0b, 0x30, 0xc4, 0x5d, 0xf7, + 0x63, 0x24, 0xc1, 0xb0, 0x40, 0x5f, 0xff, 0x9f, + 0x0d, 0x7e, 0xd5, 0xa5, 0xd0, 0x4f, 0x80, 0x16, + 0xa8, 0x66, 0x18, 0x31, 0x1f, 0x81, 0xb2, 0x9a, + 0x41, 0x62, 0x5b, 0xcf, 0x73, 0xac, 0x4a, 0x64, + 0xb5, 0xc1, 0x46, 0x4d, 0x8a, 0xac, 0x25, 0xba, + 0x81, 0x7f, 0xbe, 0x64, 0x68, 0x04, 0x01, 0x00, + 0x40, 0x4e, 0x3f, 0x1e, 0x04, 0x4c, 0xef, 0xd2, + 0xa6, 0x82, 0xe6, 0x7c, 0x76, 0x23, 0x17, 0xb9, + 0xe7, 0x52, 0x15, 0x6b, 0x3d, 0xb2, 0xb1, 0x17, + 0x7d, 0xe6, 0xde, 0x06, 0x87, 0x30, 0xb0, 0xb5, + 0x57, 0xae, 0xdf, 0xb2, 0xdc, 0x8d, 0xab, 0x76, + 0x9c, 0xaa, 0x45, 0x6d, 0x23, 0x5d, 0xc1, 0xa8, + 0x7b, 0x79, 0x79, 0xb1, 0x3c, 0xdc, 0xf5, 0x33, + 0x2c, 0xa1, 0x62, 0x3e, 0xbd, 0xf5, 0x5d, 0x6c, + 0x87, 0x16, 0x03, 0x03, 0x00, 0x04, 0x0e, 0x00, + 0x00, 0x00, + }, + { + 0x16, 0x03, 0x03, 0x00, 0x46, 0x10, 0x00, 0x00, + 0x42, 0x41, 0x04, 0x1e, 0x18, 0x37, 0xef, 0x0d, + 0x19, 0x51, 0x88, 0x35, 0x75, 0x71, 0xb5, 0xe5, + 0x54, 0x5b, 0x12, 0x2e, 0x8f, 0x09, 0x67, 0xfd, + 0xa7, 0x24, 0x20, 0x3e, 0xb2, 0x56, 0x1c, 0xce, + 0x97, 0x28, 0x5e, 0xf8, 0x2b, 0x2d, 0x4f, 0x9e, + 0xf1, 0x07, 0x9f, 0x6c, 0x4b, 0x5b, 0x83, 0x56, + 0xe2, 0x32, 0x42, 0xe9, 0x58, 0xb6, 0xd7, 0x49, + 0xa6, 0xb5, 0x68, 0x1a, 0x41, 0x03, 0x56, 0x6b, + 0xdc, 0x5a, 0x89, 0x14, 0x03, 0x03, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x03, 0x00, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xee, 0x17, + 0x54, 0x51, 0xb6, 0x1d, 0x8e, 0xe4, 0x6b, 0xed, + 0x5b, 0xa1, 0x27, 0x7f, 0xdc, 0xa9, 0xa5, 0xcf, + 0x38, 0xe6, 0x5d, 0x17, 0x34, 0xf9, 0xc0, 0x07, + 0xb8, 0xbe, 0x56, 0xe6, 0xd6, 0x6a, 0xb6, 0x26, + 0x4e, 0x45, 0x8d, 0x48, 0xe9, 0xc6, 0xb1, 0xa1, + 0xea, 0xdc, 0xb1, 0x37, 0xd9, 0xf6, + }, + { + 0x14, 0x03, 0x03, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x03, 0x00, 0x40, 0x00, 0x68, 0xc5, 0x27, 0xd5, + 0x3d, 0xba, 0x04, 0xde, 0x63, 0xf1, 0x5b, 0xc3, + 0x86, 0xb9, 0x82, 0xc7, 0xb3, 0x90, 0x31, 0xea, + 0x15, 0xe1, 0x42, 0x76, 0x7d, 0x90, 0xcb, 0xc9, + 0xd1, 0x05, 0xe6, 0x8c, 0x76, 0xc7, 0x9a, 0x35, + 0x67, 0xa2, 0x70, 0x9a, 0x8a, 0x6c, 0xb5, 0x6b, + 0xc7, 0x87, 0xf3, 0x65, 0x0a, 0xa0, 0x98, 0xba, + 0x57, 0xbb, 0x31, 0x7b, 0x1f, 0x1a, 0xf7, 0x2a, + 0xf3, 0x12, 0xf6, + }, + { + 0x17, 0x03, 0x03, 0x00, 0x30, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x10, 0x80, + 0x54, 0x1e, 0x72, 0xd3, 0x1a, 0x86, 0x1c, 0xc4, + 0x4a, 0x9b, 0xd4, 0x80, 0xd2, 0x03, 0x35, 0x0d, + 0xe4, 0x12, 0xc2, 0x3d, 0x79, 0x4a, 0x2c, 0xba, + 0xc2, 0xad, 0xf3, 0xd2, 0x16, 0x15, 0x03, 0x03, + 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x9b, 0x68, 0x78, 0x92, 0x28, + 0x62, 0x02, 0x65, 0x87, 0x90, 0xe4, 0x32, 0xd7, + 0x72, 0x08, 0x70, 0xb8, 0x52, 0x32, 0x1f, 0x97, + 0xd4, 0x6a, 0xc6, 0x28, 0x83, 0xb0, 0x1d, 0x6e, + 0x16, 0xd5, + }, +} + +// $ openssl s_server -tls1_2 -cert server.crt -key server.key \ +// -port 10443 -verify 0 +// $ go test -test.run "TestRunClient" -connect -ciphersuites=0xc02f \ +// -maxversion=0x0303 +var clientTLS12ClientCertScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x58, 0x01, 0x00, 0x00, + 0x54, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xc0, 0x2f, + 0x01, 0x00, 0x00, 0x29, 0x00, 0x05, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x08, 0x00, 0x06, 0x00, 0x17, 0x00, 0x18, 0x00, + 0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, 0x00, + 0x0d, 0x00, 0x0a, 0x00, 0x08, 0x04, 0x01, 0x04, + 0x03, 0x02, 0x01, 0x02, 0x03, + }, + { + 0x16, 0x03, 0x03, 0x00, 0x54, 0x02, 0x00, 0x00, + 0x50, 0x03, 0x03, 0x52, 0x65, 0x67, 0xe0, 0xe8, + 0xf1, 0x13, 0x2a, 0x83, 0x28, 0xa8, 0x2e, 0x76, + 0x69, 0xe6, 0x89, 0x55, 0x6c, 0x48, 0x49, 0x2e, + 0x00, 0xf6, 0x87, 0x6c, 0x13, 0xa1, 0xd4, 0xaa, + 0xd0, 0x76, 0x3b, 0x20, 0xe4, 0xd6, 0x5b, 0x1d, + 0x11, 0xf2, 0x42, 0xf2, 0x82, 0x0c, 0x0d, 0x66, + 0x6d, 0xec, 0x52, 0xf8, 0x4a, 0xd9, 0x45, 0xcf, + 0xe4, 0x4a, 0xba, 0x8b, 0xf1, 0xab, 0x55, 0xe4, + 0x57, 0x18, 0xa9, 0x36, 0xc0, 0x2f, 0x00, 0x00, + 0x08, 0x00, 0x0b, 0x00, 0x04, 0x03, 0x00, 0x01, + 0x02, 0x16, 0x03, 0x03, 0x02, 0x39, 0x0b, 0x00, + 0x02, 0x35, 0x00, 0x02, 0x32, 0x00, 0x02, 0x2f, + 0x30, 0x82, 0x02, 0x2b, 0x30, 0x82, 0x01, 0xd5, + 0xa0, 0x03, 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, + 0xb1, 0x35, 0x13, 0x65, 0x11, 0x20, 0xc5, 0x92, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, + 0x45, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, + 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, + 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, + 0x69, 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, + 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, + 0x17, 0x0d, 0x31, 0x32, 0x30, 0x34, 0x30, 0x36, + 0x31, 0x37, 0x31, 0x30, 0x31, 0x33, 0x5a, 0x17, + 0x0d, 0x31, 0x35, 0x30, 0x34, 0x30, 0x36, 0x31, + 0x37, 0x31, 0x30, 0x31, 0x33, 0x5a, 0x30, 0x45, + 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, + 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, + 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, + 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, + 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, + 0x79, 0x20, 0x4c, 0x74, 0x64, 0x30, 0x5c, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x4b, + 0x00, 0x30, 0x48, 0x02, 0x41, 0x00, 0x9f, 0xb3, + 0xc3, 0x84, 0x27, 0x95, 0xff, 0x12, 0x31, 0x52, + 0x0f, 0x15, 0xef, 0x46, 0x11, 0xc4, 0xad, 0x80, + 0xe6, 0x36, 0x5b, 0x0f, 0xdd, 0x80, 0xd7, 0x61, + 0x8d, 0xe0, 0xfc, 0x72, 0x45, 0x09, 0x34, 0xfe, + 0x55, 0x66, 0x45, 0x43, 0x4c, 0x68, 0x97, 0x6a, + 0xfe, 0xa8, 0xa0, 0xa5, 0xdf, 0x5f, 0x78, 0xff, + 0xee, 0xd7, 0x64, 0xb8, 0x3f, 0x04, 0xcb, 0x6f, + 0xff, 0x2a, 0xfe, 0xfe, 0xb9, 0xed, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0x78, 0xa6, 0x97, 0x9a, + 0x63, 0xb5, 0xc5, 0xa1, 0xa5, 0x33, 0xba, 0x22, + 0x7c, 0x23, 0x6e, 0x5b, 0x1b, 0x7a, 0xcc, 0x2b, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0x78, 0xa6, 0x97, + 0x9a, 0x63, 0xb5, 0xc5, 0xa1, 0xa5, 0x33, 0xba, + 0x22, 0x7c, 0x23, 0x6e, 0x5b, 0x1b, 0x7a, 0xcc, + 0x2b, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0xb1, + 0x35, 0x13, 0x65, 0x11, 0x20, 0xc5, 0x92, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x41, 0x00, 0x85, + 0x36, 0x40, 0x73, 0xc1, 0xbb, 0x1a, 0xda, 0xd4, + 0x59, 0x9f, 0x2d, 0xa2, 0x70, 0x31, 0x46, 0x74, + 0xec, 0x83, 0x6e, 0xa8, 0xc8, 0x3c, 0x51, 0xaf, + 0x39, 0xac, 0xec, 0x40, 0xbc, 0xe8, 0x22, 0x46, + 0x1d, 0x99, 0xd6, 0x46, 0x2a, 0x24, 0xd4, 0x8b, + 0x05, 0x08, 0x4b, 0xfb, 0x35, 0x11, 0x6e, 0x92, + 0xbb, 0x77, 0xba, 0xe4, 0x12, 0xbb, 0xf4, 0xc8, + 0x5e, 0x9c, 0x81, 0xa8, 0x97, 0x60, 0x4c, 0x16, + 0x03, 0x03, 0x00, 0x8d, 0x0c, 0x00, 0x00, 0x89, + 0x03, 0x00, 0x17, 0x41, 0x04, 0xaa, 0xf0, 0x0c, + 0xa3, 0x60, 0xcf, 0x69, 0x1e, 0xad, 0x16, 0x9a, + 0x01, 0x40, 0xc6, 0x22, 0xc4, 0xbb, 0x06, 0x3b, + 0x84, 0x65, 0xea, 0xc7, 0xa2, 0x96, 0x79, 0x17, + 0x2f, 0xc7, 0xbe, 0x56, 0x39, 0xe4, 0x79, 0xf3, + 0xad, 0x17, 0xf3, 0x7e, 0xe2, 0x7b, 0xa2, 0x6f, + 0x3f, 0x96, 0xea, 0xe5, 0x0e, 0xea, 0x39, 0x79, + 0x77, 0xeb, 0x14, 0x18, 0xbb, 0x7c, 0x95, 0xda, + 0xa7, 0x51, 0x09, 0xba, 0xd7, 0x04, 0x01, 0x00, + 0x40, 0x82, 0x3e, 0xce, 0xee, 0x7e, 0xba, 0x3b, + 0x51, 0xb1, 0xba, 0x71, 0x2e, 0x54, 0xa9, 0xb9, + 0xe2, 0xb1, 0x59, 0x17, 0xa1, 0xac, 0x76, 0xb4, + 0x4e, 0xf1, 0xae, 0x65, 0x17, 0x2b, 0x43, 0x06, + 0x31, 0x29, 0x0b, 0xa0, 0x1e, 0xb6, 0xfa, 0x35, + 0xe8, 0x63, 0x06, 0xde, 0x13, 0x89, 0x83, 0x69, + 0x3b, 0xc2, 0x15, 0x73, 0x1c, 0xc5, 0x07, 0xe9, + 0x38, 0x9b, 0x06, 0x81, 0x1b, 0x97, 0x7c, 0xa6, + 0x89, 0x16, 0x03, 0x03, 0x00, 0x30, 0x0d, 0x00, + 0x00, 0x28, 0x03, 0x01, 0x02, 0x40, 0x00, 0x20, + 0x06, 0x01, 0x06, 0x02, 0x06, 0x03, 0x05, 0x01, + 0x05, 0x02, 0x05, 0x03, 0x04, 0x01, 0x04, 0x02, + 0x04, 0x03, 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x02, 0x03, 0x01, 0x01, + 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x03, 0x0a, 0xfb, 0x0b, 0x00, 0x0a, + 0xf7, 0x00, 0x0a, 0xf4, 0x00, 0x03, 0x7e, 0x30, + 0x82, 0x03, 0x7a, 0x30, 0x82, 0x02, 0x62, 0x02, + 0x09, 0x00, 0xb4, 0x47, 0x58, 0x57, 0x2b, 0x67, + 0xc8, 0xc2, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, + 0x00, 0x30, 0x81, 0x80, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x55, + 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, 0x31, 0x11, + 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x07, 0x0c, + 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, 0x6c, 0x79, + 0x6e, 0x31, 0x15, 0x30, 0x13, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x0c, 0x0c, 0x4d, 0x79, 0x20, 0x43, + 0x41, 0x20, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x31, 0x17, 0x30, 0x15, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x0c, 0x0e, 0x6d, 0x79, 0x63, 0x61, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x63, 0x6f, + 0x6d, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, + 0x16, 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, + 0x69, 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, + 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d, + 0x31, 0x33, 0x30, 0x35, 0x32, 0x36, 0x32, 0x31, + 0x34, 0x34, 0x30, 0x30, 0x5a, 0x17, 0x0d, 0x31, + 0x33, 0x30, 0x36, 0x32, 0x35, 0x32, 0x31, 0x34, + 0x34, 0x30, 0x30, 0x5a, 0x30, 0x7d, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x55, 0x53, 0x31, 0x11, 0x30, 0x0f, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x0c, 0x08, 0x4e, 0x65, + 0x77, 0x20, 0x59, 0x6f, 0x72, 0x6b, 0x31, 0x11, + 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x07, 0x0c, + 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, 0x6c, 0x79, + 0x6e, 0x31, 0x10, 0x30, 0x0e, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x0c, 0x07, 0x4d, 0x79, 0x20, 0x4c, + 0x65, 0x61, 0x66, 0x31, 0x13, 0x30, 0x11, 0x06, + 0x03, 0x55, 0x04, 0x03, 0x0c, 0x0a, 0x6d, 0x79, + 0x6c, 0x65, 0x61, 0x66, 0x2e, 0x63, 0x6f, 0x6d, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, 0x16, + 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, 0x69, + 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, 0x2e, + 0x63, 0x6f, 0x6d, 0x30, 0x82, 0x01, 0x22, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x82, + 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, 0x02, + 0x82, 0x01, 0x01, 0x00, 0xa0, 0xa3, 0xef, 0xc1, + 0x44, 0x7d, 0xa2, 0xe3, 0x71, 0x98, 0x27, 0x63, + 0xb3, 0x1d, 0x71, 0x50, 0xa6, 0x34, 0x15, 0xcb, + 0xc9, 0x2a, 0xc3, 0xea, 0xe4, 0x9e, 0x9c, 0x49, + 0xa6, 0x01, 0x9b, 0x7e, 0xa9, 0xb5, 0x7a, 0xff, + 0x15, 0x92, 0x71, 0xc8, 0x97, 0x9c, 0x25, 0xb7, + 0x79, 0x2b, 0xff, 0xab, 0xc6, 0xb1, 0xa7, 0x00, + 0x90, 0xb2, 0x8b, 0xd7, 0x71, 0xd5, 0xc2, 0x3a, + 0xe6, 0x82, 0x42, 0x37, 0x89, 0x41, 0x04, 0xb0, + 0xba, 0xc7, 0x5b, 0x8a, 0x43, 0x9f, 0x97, 0x39, + 0x0c, 0x0f, 0xd5, 0x6d, 0x9e, 0x8d, 0xeb, 0xc0, + 0x26, 0xc5, 0x18, 0xe8, 0x7a, 0x3d, 0x32, 0x2e, + 0x38, 0x90, 0x40, 0x5b, 0x39, 0x2c, 0x07, 0xcb, + 0x24, 0x10, 0xc5, 0xc9, 0x3b, 0xe3, 0x66, 0x47, + 0x57, 0xb9, 0x6a, 0xad, 0x44, 0xf8, 0xd0, 0x70, + 0x62, 0x3b, 0x8e, 0xed, 0x60, 0x5f, 0x22, 0xf8, + 0xb8, 0x0c, 0xc9, 0x41, 0x2b, 0xc9, 0x80, 0x6e, + 0x4e, 0x1b, 0xe1, 0x20, 0xfc, 0x47, 0xa4, 0xac, + 0xc3, 0x3f, 0xe6, 0xc2, 0x81, 0x79, 0x03, 0x37, + 0x25, 0x89, 0xca, 0xd6, 0xa5, 0x46, 0x91, 0x63, + 0x41, 0xc5, 0x3e, 0xd5, 0xed, 0x7f, 0x4f, 0x8d, + 0x06, 0xc0, 0x89, 0x00, 0xbe, 0x37, 0x7b, 0x7e, + 0x73, 0xca, 0x70, 0x00, 0x14, 0x34, 0xbe, 0x47, + 0xbc, 0xb2, 0x6a, 0x28, 0xa5, 0x29, 0x84, 0xa8, + 0x9d, 0xc8, 0x1e, 0x77, 0x66, 0x1f, 0x9f, 0xaa, + 0x2b, 0x47, 0xdb, 0xdd, 0x6b, 0x9c, 0xa8, 0xfc, + 0x82, 0x36, 0x94, 0x62, 0x0d, 0x5c, 0x3f, 0xb2, + 0x01, 0xb4, 0xa5, 0xb8, 0xc6, 0x0e, 0x94, 0x5b, + 0xec, 0x5e, 0xbb, 0x7a, 0x63, 0x24, 0xf1, 0xf9, + 0xd6, 0x50, 0x08, 0xc1, 0xa3, 0xcc, 0x90, 0x07, + 0x5b, 0x04, 0x04, 0x42, 0x74, 0xcf, 0x37, 0xfa, + 0xf0, 0xa5, 0xd9, 0xd3, 0x86, 0x89, 0x89, 0x18, + 0xf3, 0x4c, 0xe2, 0x11, 0x02, 0x03, 0x01, 0x00, + 0x01, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, + 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, + 0x03, 0x82, 0x01, 0x01, 0x00, 0x90, 0xbb, 0xf9, + 0x5e, 0xba, 0x17, 0x1f, 0xac, 0x21, 0x9f, 0x6b, + 0x4a, 0x46, 0xd0, 0x6d, 0x3c, 0x8f, 0x3d, 0xf8, + 0x5e, 0x3e, 0x72, 0xaf, 0xa0, 0x1a, 0xf3, 0xff, + 0x89, 0xac, 0x5b, 0x7a, 0xe2, 0x91, 0x2a, 0x23, + 0x85, 0xc6, 0x4d, 0x47, 0x67, 0x01, 0x08, 0xa8, + 0x05, 0x1d, 0x01, 0x60, 0x50, 0x5f, 0x59, 0xad, + 0xfe, 0x7b, 0xc6, 0x0c, 0x54, 0x90, 0x68, 0x70, + 0x67, 0x2e, 0xed, 0x87, 0xf8, 0x69, 0x8a, 0xac, + 0x32, 0xfe, 0x6f, 0x90, 0x19, 0x2a, 0x64, 0x8d, + 0x82, 0x66, 0x05, 0x43, 0x88, 0xee, 0xf2, 0x30, + 0xed, 0xa4, 0x8f, 0xbf, 0xd6, 0x57, 0x20, 0xd4, + 0x43, 0x1d, 0x52, 0x96, 0x6f, 0xae, 0x09, 0x96, + 0x01, 0x52, 0x38, 0xe3, 0xaf, 0x99, 0xd7, 0xdc, + 0x14, 0x99, 0xc4, 0x8b, 0x0e, 0x04, 0x0f, 0xb3, + 0x14, 0x14, 0xd4, 0xa5, 0x93, 0xe1, 0xc9, 0x8a, + 0x81, 0xef, 0x63, 0xfc, 0x36, 0x77, 0x05, 0x06, + 0xf0, 0x2a, 0x04, 0x0a, 0xbe, 0x2e, 0xce, 0x81, + 0x3d, 0x23, 0xa1, 0xda, 0xd8, 0xeb, 0xc6, 0xea, + 0x5e, 0xcf, 0x28, 0x36, 0x51, 0x31, 0x95, 0x5e, + 0x40, 0x04, 0xed, 0xac, 0xc1, 0xc8, 0x56, 0x69, + 0x87, 0xec, 0x3b, 0x03, 0x3e, 0x9d, 0x0f, 0x4c, + 0x4c, 0xeb, 0xd7, 0xba, 0x26, 0xdf, 0xe3, 0xde, + 0x10, 0xee, 0x93, 0x62, 0x8d, 0x73, 0x52, 0x6e, + 0xff, 0x37, 0x36, 0x98, 0x7b, 0x2d, 0x56, 0x4c, + 0xba, 0x09, 0xb8, 0xa7, 0xf0, 0x3b, 0x16, 0x81, + 0xca, 0xdb, 0x43, 0xab, 0xec, 0x4c, 0x6e, 0x7c, + 0xc1, 0x0b, 0x22, 0x22, 0x43, 0x1d, 0xb6, 0x0c, + 0xc1, 0xb9, 0xcf, 0xe4, 0x53, 0xee, 0x1d, 0x3e, + 0x88, 0xa7, 0x13, 0xbe, 0x7f, 0xbd, 0xae, 0x72, + 0xcf, 0xcd, 0x63, 0xd2, 0xc3, 0x18, 0x58, 0x92, + 0xa2, 0xad, 0xb5, 0x09, 0x9d, 0x91, 0x03, 0xdd, + 0x3c, 0xe2, 0x1c, 0xde, 0x78, 0x00, 0x03, 0x88, + 0x30, 0x82, 0x03, 0x84, 0x30, 0x82, 0x02, 0x6c, + 0x02, 0x09, 0x00, 0xab, 0xed, 0xa6, 0xe4, 0x4a, + 0x2b, 0x2b, 0xf8, 0x30, 0x0d, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, + 0x05, 0x00, 0x30, 0x81, 0x86, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, 0x31, + 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x07, + 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, 0x6c, + 0x79, 0x6e, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x0c, 0x18, 0x4d, 0x79, 0x20, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31, 0x11, 0x30, + 0x0f, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, + 0x6d, 0x79, 0x63, 0x61, 0x2e, 0x6f, 0x72, 0x67, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, 0x16, + 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, 0x69, + 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, 0x2e, + 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d, 0x31, + 0x33, 0x30, 0x35, 0x32, 0x36, 0x32, 0x31, 0x31, + 0x38, 0x34, 0x30, 0x5a, 0x17, 0x0d, 0x31, 0x33, + 0x30, 0x36, 0x32, 0x35, 0x32, 0x31, 0x31, 0x38, + 0x34, 0x30, 0x5a, 0x30, 0x81, 0x80, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, + 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, + 0x07, 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, + 0x6c, 0x79, 0x6e, 0x31, 0x15, 0x30, 0x13, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x0c, 0x4d, 0x79, + 0x20, 0x43, 0x41, 0x20, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x31, 0x17, 0x30, 0x15, 0x06, 0x03, + 0x55, 0x04, 0x03, 0x0c, 0x0e, 0x6d, 0x79, 0x63, + 0x61, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x63, 0x6f, 0x6d, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x09, 0x01, 0x16, 0x12, 0x6a, 0x76, 0x73, 0x68, + 0x61, 0x68, 0x69, 0x64, 0x40, 0x67, 0x6d, 0x61, + 0x69, 0x6c, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x82, + 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, + 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, + 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, 0x00, 0xce, + 0x13, 0xf0, 0x72, 0xb0, 0x61, 0xc8, 0x18, 0x37, + 0x8a, 0x41, 0x3d, 0x20, 0xa1, 0x1c, 0xcb, 0xbf, + 0xf6, 0x3b, 0x74, 0x26, 0x2a, 0x96, 0x11, 0xec, + 0x53, 0xa1, 0xcc, 0x7d, 0x77, 0x56, 0x45, 0x0f, + 0x36, 0xb7, 0xf2, 0x48, 0x92, 0x1a, 0x62, 0xcc, + 0xb6, 0xc0, 0xa1, 0x2f, 0x44, 0x2b, 0xc1, 0x89, + 0xcb, 0x6e, 0x1e, 0xdb, 0x57, 0x92, 0xd5, 0x97, + 0x60, 0x8c, 0x41, 0x2c, 0xd9, 0x20, 0xfe, 0xe9, + 0x1f, 0x8e, 0xfc, 0x7f, 0x02, 0x44, 0x0f, 0x28, + 0x81, 0xd6, 0x0c, 0xcd, 0xbc, 0xf0, 0x57, 0x6c, + 0xcc, 0xa7, 0xba, 0x06, 0xa0, 0xa6, 0x91, 0xda, + 0xef, 0x46, 0x8a, 0x60, 0x0f, 0x52, 0x6c, 0x90, + 0x6c, 0x8c, 0x44, 0xaf, 0xb0, 0x9d, 0x90, 0xba, + 0x21, 0x58, 0xa0, 0x3c, 0xee, 0x54, 0xb5, 0x29, + 0x26, 0x1f, 0x0a, 0xac, 0xef, 0x48, 0x68, 0x33, + 0xd0, 0x33, 0xd0, 0x8b, 0x1a, 0xec, 0x6e, 0x2f, + 0xb5, 0x4a, 0x53, 0xc2, 0x1a, 0xd2, 0xf1, 0x50, + 0x05, 0x59, 0x5c, 0xd9, 0xda, 0x03, 0x0a, 0x47, + 0xb7, 0xdd, 0xf7, 0x3a, 0x69, 0xf5, 0x4e, 0xea, + 0x4a, 0xc2, 0xca, 0x54, 0xb0, 0x8b, 0x76, 0xe1, + 0x02, 0x2d, 0x52, 0x67, 0xb9, 0xdd, 0x50, 0xc9, + 0x3b, 0x07, 0x24, 0x22, 0x6a, 0x00, 0x1d, 0x58, + 0x83, 0xa8, 0xec, 0x95, 0xf1, 0xda, 0xe2, 0x73, + 0xa0, 0xa1, 0x72, 0x60, 0x9e, 0x86, 0x53, 0xcb, + 0x45, 0xa8, 0xc2, 0xa0, 0x50, 0xa0, 0x53, 0xd6, + 0xfc, 0x18, 0x84, 0xb5, 0x4a, 0x26, 0xd0, 0xa2, + 0xaa, 0xd0, 0xff, 0xb6, 0xfe, 0x3a, 0x9c, 0xb5, + 0x19, 0x3b, 0x3f, 0xe1, 0x48, 0x0d, 0xa4, 0x09, + 0x4f, 0x83, 0xc9, 0xc0, 0xc9, 0xa6, 0x0b, 0x58, + 0x1f, 0x1c, 0x7b, 0xac, 0xa2, 0x42, 0xbc, 0x61, + 0xf4, 0x21, 0x8a, 0x00, 0xda, 0x14, 0xa0, 0x60, + 0x03, 0xfe, 0x93, 0x12, 0x6c, 0x56, 0xcd, 0x02, + 0x03, 0x01, 0x00, 0x01, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, + 0x25, 0x29, 0x3b, 0x1e, 0xc3, 0x58, 0x32, 0xe6, + 0x23, 0xc8, 0xee, 0x18, 0xf0, 0x1d, 0x62, 0x6d, + 0x3b, 0x59, 0x99, 0x3a, 0xfe, 0x49, 0x72, 0x07, + 0x3f, 0x58, 0x93, 0xdb, 0xc0, 0xaf, 0xb0, 0xb3, + 0x5c, 0xd1, 0x5c, 0x98, 0xc8, 0xea, 0x4a, 0xe4, + 0x58, 0x73, 0x0d, 0x57, 0xc5, 0x13, 0x7c, 0x5c, + 0x79, 0x66, 0xda, 0x04, 0x1d, 0xe5, 0x98, 0xda, + 0x35, 0x47, 0x44, 0xb0, 0xd2, 0x7a, 0x66, 0x9d, + 0xcd, 0x41, 0xa5, 0x8f, 0xa1, 0x11, 0xb2, 0x1a, + 0x87, 0xc0, 0xcd, 0x55, 0xed, 0xb4, 0x7b, 0x33, + 0x72, 0xeb, 0xf7, 0xe3, 0x7b, 0x8b, 0x02, 0x86, + 0xe9, 0x2b, 0x26, 0x32, 0x9f, 0x99, 0xf1, 0xcb, + 0x93, 0xab, 0xb9, 0x16, 0xb3, 0x9a, 0xb2, 0x22, + 0x13, 0x21, 0x1f, 0x5b, 0xcc, 0xa2, 0x59, 0xbb, + 0x69, 0xf2, 0xb8, 0x07, 0x80, 0xce, 0x0c, 0xf7, + 0x98, 0x4c, 0x85, 0xc2, 0x96, 0x6a, 0x22, 0x05, + 0xe9, 0xbe, 0x48, 0xb0, 0x02, 0x5b, 0x69, 0x28, + 0x18, 0x88, 0x96, 0xe3, 0xd7, 0xc6, 0x7a, 0xd3, + 0xe9, 0x99, 0xff, 0x9d, 0xc3, 0x61, 0x4d, 0x9a, + 0x96, 0xf2, 0xc6, 0x33, 0x4d, 0xe5, 0x5d, 0x5a, + 0x68, 0x64, 0x5a, 0x82, 0x35, 0x65, 0x25, 0xe3, + 0x8c, 0x5b, 0xb0, 0xf6, 0x96, 0x56, 0xbc, 0xbf, + 0x97, 0x76, 0x4b, 0x66, 0x44, 0x81, 0xa4, 0xc4, + 0xa7, 0x31, 0xc5, 0xa1, 0x4f, 0xe8, 0xa4, 0xca, + 0x20, 0xf5, 0x01, 0x5b, 0x99, 0x4f, 0x5a, 0xf4, + 0xf0, 0x78, 0xbf, 0x71, 0x49, 0xd5, 0xf1, 0xc1, + 0xa2, 0x18, 0xfd, 0x72, 0x5b, 0x16, 0xe8, 0x92, + 0xc7, 0x37, 0x48, 0xaf, 0xee, 0x24, 0xfc, 0x35, + 0x0b, 0xc2, 0xdd, 0x05, 0xc7, 0x6e, 0xa3, 0x29, + 0xbb, 0x29, 0x7d, 0xd3, 0x2b, 0x94, 0x80, 0xc3, + 0x40, 0x53, 0x0e, 0x03, 0x54, 0x3d, 0x7b, 0x8b, + 0xce, 0xf9, 0xa4, 0x03, 0x27, 0x63, 0xec, 0x51, + 0x00, 0x03, 0xe5, 0x30, 0x82, 0x03, 0xe1, 0x30, + 0x82, 0x02, 0xc9, 0xa0, 0x03, 0x02, 0x01, 0x02, + 0x02, 0x09, 0x00, 0xcc, 0x22, 0x4c, 0x4b, 0x98, + 0xa2, 0x88, 0xfc, 0x30, 0x0d, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, + 0x05, 0x00, 0x30, 0x81, 0x86, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, 0x31, + 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x07, + 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, 0x6c, + 0x79, 0x6e, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x0c, 0x18, 0x4d, 0x79, 0x20, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31, 0x11, 0x30, + 0x0f, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, 0x08, + 0x6d, 0x79, 0x63, 0x61, 0x2e, 0x6f, 0x72, 0x67, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, 0x16, + 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, 0x69, + 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, 0x2e, + 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, 0x0d, 0x31, + 0x33, 0x30, 0x35, 0x32, 0x36, 0x32, 0x31, 0x30, + 0x35, 0x30, 0x31, 0x5a, 0x17, 0x0d, 0x32, 0x33, + 0x30, 0x35, 0x32, 0x34, 0x32, 0x31, 0x30, 0x35, + 0x30, 0x31, 0x5a, 0x30, 0x81, 0x86, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x55, 0x53, 0x31, 0x0b, 0x30, 0x09, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x0c, 0x02, 0x4e, 0x59, + 0x31, 0x11, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, + 0x07, 0x0c, 0x08, 0x42, 0x72, 0x6f, 0x6f, 0x6b, + 0x6c, 0x79, 0x6e, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x0c, 0x18, 0x4d, 0x79, + 0x20, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x20, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x31, 0x11, + 0x30, 0x0f, 0x06, 0x03, 0x55, 0x04, 0x03, 0x0c, + 0x08, 0x6d, 0x79, 0x63, 0x61, 0x2e, 0x6f, 0x72, + 0x67, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x01, + 0x16, 0x12, 0x6a, 0x76, 0x73, 0x68, 0x61, 0x68, + 0x69, 0x64, 0x40, 0x67, 0x6d, 0x61, 0x69, 0x6c, + 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x82, 0x01, 0x22, + 0x30, 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, + 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, + 0x82, 0x01, 0x0f, 0x00, 0x30, 0x82, 0x01, 0x0a, + 0x02, 0x82, 0x01, 0x01, 0x00, 0xf0, 0xfb, 0xad, + 0x80, 0x5e, 0x37, 0xd3, 0x6d, 0xee, 0x2e, 0xcc, + 0xbc, 0x0c, 0xd7, 0x56, 0x4b, 0x56, 0x45, 0xcd, + 0x28, 0xb6, 0x22, 0xe9, 0xe2, 0x0f, 0xd1, 0x87, + 0x2a, 0x27, 0xce, 0x77, 0x8d, 0x6e, 0x0e, 0x0f, + 0xfb, 0x66, 0xe1, 0xb5, 0x0e, 0x9a, 0xb6, 0x05, + 0x8e, 0xb3, 0xe1, 0xc5, 0x77, 0x86, 0x5b, 0x46, + 0xd2, 0x0b, 0x92, 0x03, 0x1b, 0x89, 0x0c, 0x1b, + 0x10, 0x0e, 0x99, 0x8f, 0xe2, 0x17, 0xe8, 0xc2, + 0x30, 0x00, 0x47, 0xd6, 0xfc, 0xf9, 0x0f, 0x3b, + 0x75, 0x34, 0x8d, 0x4d, 0xb0, 0x99, 0xb7, 0xa0, + 0x6d, 0xa0, 0xb6, 0xad, 0xda, 0x07, 0x5e, 0x38, + 0x2e, 0x02, 0xe4, 0x30, 0x6d, 0xae, 0x13, 0x72, + 0xd4, 0xc8, 0xce, 0x14, 0x07, 0xae, 0x23, 0x8c, + 0x8f, 0x9e, 0x8c, 0x60, 0xd6, 0x06, 0xb9, 0xef, + 0x00, 0x18, 0xc0, 0x1d, 0x25, 0x1e, 0xda, 0x3e, + 0x2f, 0xcf, 0x2b, 0x56, 0x84, 0x9e, 0x30, 0x21, + 0xc7, 0x29, 0xf6, 0x03, 0x8a, 0x24, 0xf9, 0x34, + 0xac, 0x65, 0x9d, 0x80, 0x36, 0xc8, 0x3b, 0x15, + 0x10, 0xbd, 0x51, 0xe9, 0xbc, 0x02, 0xe1, 0xe9, + 0xb3, 0x5a, 0x9a, 0x99, 0x41, 0x1b, 0x27, 0xa0, + 0x4d, 0x50, 0x9e, 0x27, 0x7f, 0xa1, 0x7d, 0x09, + 0x87, 0xbd, 0x8a, 0xca, 0x5f, 0xb1, 0xa5, 0x08, + 0xb8, 0x04, 0xd4, 0x52, 0x89, 0xaa, 0xe0, 0x7d, + 0x42, 0x2e, 0x2f, 0x15, 0xee, 0x66, 0x57, 0x0f, + 0x13, 0x19, 0x45, 0xa8, 0x4b, 0x5d, 0x81, 0x66, + 0xcc, 0x12, 0x37, 0x94, 0x5e, 0xfd, 0x3c, 0x10, + 0x81, 0x51, 0x3f, 0xfa, 0x0f, 0xdd, 0xa1, 0x89, + 0x03, 0xa9, 0x78, 0x91, 0xf5, 0x3b, 0xf3, 0xbc, + 0xac, 0xbe, 0x93, 0x30, 0x2e, 0xbe, 0xca, 0x7f, + 0x46, 0xd3, 0x28, 0xb4, 0x4e, 0x91, 0x7b, 0x5b, + 0x43, 0x6c, 0xaf, 0x9b, 0x5c, 0x6a, 0x6d, 0x5a, + 0xdb, 0x79, 0x5e, 0x6a, 0x6b, 0x02, 0x03, 0x01, + 0x00, 0x01, 0xa3, 0x50, 0x30, 0x4e, 0x30, 0x1d, + 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x16, 0x04, + 0x14, 0x6b, 0x1e, 0x00, 0xa8, 0x9f, 0xfa, 0x7d, + 0x00, 0xf9, 0xe0, 0x9d, 0x0f, 0x90, 0x8c, 0x90, + 0xa8, 0xa1, 0x37, 0x6b, 0xda, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x1d, 0x23, 0x04, 0x18, 0x30, 0x16, + 0x80, 0x14, 0x6b, 0x1e, 0x00, 0xa8, 0x9f, 0xfa, + 0x7d, 0x00, 0xf9, 0xe0, 0x9d, 0x0f, 0x90, 0x8c, + 0x90, 0xa8, 0xa1, 0x37, 0x6b, 0xda, 0x30, 0x0c, + 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, 0x30, + 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x03, 0x82, 0x01, 0x01, 0x00, + 0xcd, 0x6f, 0x73, 0x4d, 0x56, 0x0b, 0xf3, 0x2e, + 0x1c, 0xe2, 0x02, 0x0c, 0x14, 0xbb, 0x2f, 0xdd, + 0x3c, 0x43, 0xfe, 0xdf, 0x94, 0x2d, 0xa9, 0x89, + 0x81, 0x51, 0xf8, 0x5f, 0xa7, 0xa0, 0x13, 0xaa, + 0xcc, 0xb0, 0x18, 0xe2, 0x57, 0x3e, 0x0d, 0x29, + 0x93, 0xe8, 0x95, 0xd5, 0x1b, 0x53, 0xd2, 0x51, + 0xf2, 0xbd, 0xf5, 0x9e, 0x7b, 0x22, 0x65, 0x62, + 0x5c, 0xc4, 0x4c, 0x1d, 0xe8, 0xe9, 0xc3, 0xd4, + 0x2b, 0xe7, 0x78, 0xcb, 0x10, 0xf3, 0xfe, 0x06, + 0x83, 0xdc, 0x3a, 0x1e, 0x62, 0x10, 0xc0, 0x46, + 0x77, 0xc6, 0x9d, 0x9f, 0xab, 0x96, 0x25, 0x5c, + 0xfb, 0x26, 0xc1, 0x15, 0x1f, 0xa5, 0x33, 0xee, + 0x4f, 0x9a, 0x14, 0x6a, 0x14, 0x97, 0x93, 0x2b, + 0x95, 0x0b, 0xdc, 0xa8, 0xd7, 0x69, 0x2e, 0xf0, + 0x01, 0x0e, 0xfd, 0x4e, 0xd0, 0xd9, 0xa8, 0xe5, + 0x65, 0xde, 0xfb, 0xca, 0xca, 0x1c, 0x5f, 0xf9, + 0x53, 0xa0, 0x87, 0xe7, 0x33, 0x9b, 0x2f, 0xcf, + 0xe4, 0x13, 0xfc, 0xec, 0x7a, 0x6c, 0xb0, 0x90, + 0x13, 0x9b, 0xb6, 0xc5, 0x03, 0xf6, 0x0e, 0x5e, + 0xe2, 0xe4, 0x26, 0xc1, 0x7e, 0x53, 0xfe, 0x69, + 0xa3, 0xc7, 0xd8, 0x8e, 0x6e, 0x94, 0x32, 0xa0, + 0xde, 0xca, 0xb6, 0xcc, 0xd6, 0x01, 0xd5, 0x78, + 0x40, 0x28, 0x63, 0x9b, 0xee, 0xcf, 0x09, 0x3b, + 0x35, 0x04, 0xf0, 0x14, 0x02, 0xf6, 0x80, 0x0e, + 0x90, 0xb2, 0x94, 0xd2, 0x25, 0x16, 0xb8, 0x7a, + 0x76, 0x87, 0x84, 0x9f, 0x84, 0xc5, 0xaf, 0xc2, + 0x6d, 0x68, 0x7a, 0x84, 0x9c, 0xc6, 0x8a, 0x63, + 0x60, 0x87, 0x6a, 0x25, 0xc1, 0xa1, 0x78, 0x0f, + 0xba, 0xe8, 0x5f, 0xe1, 0xba, 0xac, 0xa4, 0x6f, + 0xdd, 0x09, 0x3f, 0x12, 0xcb, 0x1d, 0xf3, 0xcf, + 0x48, 0xd7, 0xd3, 0x26, 0xe8, 0x9c, 0xc3, 0x53, + 0xb3, 0xba, 0xdc, 0x32, 0x99, 0x98, 0x96, 0xd6, + 0x16, 0x03, 0x03, 0x00, 0x46, 0x10, 0x00, 0x00, + 0x42, 0x41, 0x04, 0x1e, 0x18, 0x37, 0xef, 0x0d, + 0x19, 0x51, 0x88, 0x35, 0x75, 0x71, 0xb5, 0xe5, + 0x54, 0x5b, 0x12, 0x2e, 0x8f, 0x09, 0x67, 0xfd, + 0xa7, 0x24, 0x20, 0x3e, 0xb2, 0x56, 0x1c, 0xce, + 0x97, 0x28, 0x5e, 0xf8, 0x2b, 0x2d, 0x4f, 0x9e, + 0xf1, 0x07, 0x9f, 0x6c, 0x4b, 0x5b, 0x83, 0x56, + 0xe2, 0x32, 0x42, 0xe9, 0x58, 0xb6, 0xd7, 0x49, + 0xa6, 0xb5, 0x68, 0x1a, 0x41, 0x03, 0x56, 0x6b, + 0xdc, 0x5a, 0x89, 0x16, 0x03, 0x03, 0x01, 0x08, + 0x0f, 0x00, 0x01, 0x04, 0x04, 0x01, 0x01, 0x00, + 0x7e, 0xe4, 0x65, 0x02, 0x8e, 0xb3, 0x34, 0x6a, + 0x47, 0x71, 0xd1, 0xb0, 0x8d, 0x3c, 0x0c, 0xe1, + 0xde, 0x7e, 0x5f, 0xb4, 0x15, 0x2d, 0x32, 0x0a, + 0x2a, 0xdb, 0x9b, 0x40, 0xba, 0xce, 0x8b, 0xf5, + 0x74, 0xc1, 0x68, 0x20, 0x7c, 0x87, 0x23, 0x13, + 0xc3, 0x13, 0xa7, 0xdb, 0xec, 0x59, 0xa0, 0x40, + 0x9e, 0x64, 0x03, 0x60, 0xac, 0x76, 0xff, 0x01, + 0x34, 0x7b, 0x32, 0x26, 0xd9, 0x41, 0x31, 0x93, + 0xaa, 0x30, 0x51, 0x83, 0x85, 0x40, 0xeb, 0x4e, + 0x66, 0x39, 0x83, 0xb1, 0x30, 0x0d, 0x96, 0x01, + 0xee, 0x81, 0x53, 0x5e, 0xec, 0xa9, 0xc9, 0xdf, + 0x7e, 0xc1, 0x09, 0x47, 0x8b, 0x35, 0xdb, 0x10, + 0x15, 0xd4, 0xc7, 0x5a, 0x39, 0xe3, 0xc0, 0xf3, + 0x93, 0x38, 0x11, 0xdc, 0x71, 0xbb, 0xc7, 0x62, + 0x2b, 0x85, 0xad, 0x6b, 0x4f, 0x09, 0xb3, 0x31, + 0xa8, 0xe5, 0xd1, 0xb3, 0xa9, 0x21, 0x37, 0x50, + 0xc8, 0x7d, 0xc3, 0xd2, 0xf7, 0x00, 0xd3, 0xdb, + 0x0f, 0x82, 0xf2, 0x43, 0xcf, 0x36, 0x6c, 0x98, + 0x63, 0xd8, 0x1d, 0xb3, 0xf3, 0xde, 0x63, 0x79, + 0x64, 0xf0, 0xdb, 0x46, 0x04, 0xe1, 0x1c, 0x57, + 0x0f, 0x9e, 0x96, 0xb9, 0x93, 0x45, 0x71, 0x1c, + 0x8b, 0x65, 0x7d, 0x1e, 0xad, 0xbd, 0x03, 0x51, + 0xae, 0x44, 0xef, 0x97, 0x45, 0x0d, 0x8d, 0x41, + 0x5c, 0x80, 0x7b, 0xe6, 0xe0, 0xbc, 0xa6, 0x72, + 0x95, 0xa0, 0x97, 0xe1, 0xbb, 0xc0, 0xcc, 0xe5, + 0x1e, 0xc3, 0xbe, 0xd7, 0x42, 0x2a, 0xf3, 0x75, + 0x8a, 0x44, 0x67, 0x3c, 0xe5, 0x68, 0x78, 0xe5, + 0x40, 0x1f, 0xf0, 0x89, 0x57, 0xda, 0xee, 0x45, + 0xf4, 0x44, 0x81, 0x01, 0x77, 0xf0, 0x4a, 0x14, + 0xb1, 0x3f, 0x60, 0x2b, 0xeb, 0x42, 0x38, 0xa6, + 0xfb, 0xe5, 0x4d, 0x71, 0xdc, 0x7d, 0x0a, 0x72, + 0x56, 0x28, 0x9d, 0xa6, 0x8e, 0x74, 0x2d, 0xbd, + 0x14, 0x03, 0x03, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x03, 0x00, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x31, 0x4d, 0x58, 0x94, 0x0b, + 0x0b, 0x06, 0x5f, 0xae, 0x57, 0x17, 0x98, 0x86, + 0xaa, 0x49, 0x17, 0x7f, 0xbd, 0x41, 0x05, 0xa5, + 0x74, 0x1c, 0x58, 0xc8, 0x38, 0x2d, 0x99, 0x5d, + 0xe5, 0x12, 0x43, + }, + { + 0x14, 0x03, 0x03, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x03, 0x00, 0x28, 0xf2, 0x60, 0xc2, 0x75, 0x27, + 0x64, 0xf4, 0x05, 0x98, 0xc9, 0xd3, 0xa8, 0x00, + 0x4c, 0xa0, 0x49, 0x82, 0x68, 0xf1, 0x21, 0x05, + 0x7b, 0x4b, 0x25, 0x3e, 0xe1, 0x5f, 0x0f, 0x84, + 0x26, 0x2d, 0x16, 0x2e, 0xc0, 0xfd, 0xdf, 0x0a, + 0xf4, 0xba, 0x19, + }, + { + 0x17, 0x03, 0x03, 0x00, 0x1e, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x01, 0x35, 0xef, 0x9d, + 0x6a, 0x86, 0x98, 0xc5, 0xca, 0x55, 0xca, 0x89, + 0x29, 0xb4, 0x55, 0xd4, 0x41, 0x08, 0x96, 0xe0, + 0xf3, 0x39, 0xfc, 0x15, 0x03, 0x03, 0x00, 0x1a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x02, 0x63, 0x1b, 0xaa, 0xc6, 0xc9, 0x6d, 0x72, + 0x24, 0x10, 0x55, 0xa9, 0x8c, 0x3b, 0x23, 0xce, + 0xd8, 0x4a, + }, +} + +var testClientChainCertificate = fromHex( + "2d2d2d2d2d424547494e2050524956415445204b" + + "45592d2d2d2d2d0a4d494945766749424144414e" + + "42676b71686b6947397730424151454641415343" + + "424b67776767536b41674541416f494241514367" + + "6f2b2f4252483269343347590a4a324f7a485846" + + "51706a515679386b71772b726b6e70784a706747" + + "6266716d31657638566b6e48496c35776c74336b" + + "722f367647736163416b4c4b4c313348560a776a" + + "726d676b493369554545734c7248573470446e35" + + "633544412f56625a364e3638416d78526a6f656a" + + "30794c6a6951514673354c41664c4a4244467954" + + "766a0a5a6b64587557717452506a51634749376a" + + "75316758794c3475417a4a5153764a6747354f47" + + "2b45672f45656b724d4d2f35734b4265514d334a" + + "596e4b317156470a6b574e427854375637583950" + + "6a5162416951432b4e33742b6338707741425130" + + "766b6538736d6f6f70536d45714a3349486e646d" + + "48352b714b306662335775630a715079434e7052" + + "694456772f7367473070626a4744705262374636" + + "37656d4d6b38666e5755416a426f387951423173" + + "4542454a307a7a6636384b585a3034614a0a6952" + + "6a7a544f495241674d424141454367674542414a" + + "4b613676326b5a3144596146786e586d7369624c" + + "386734426f67514c6a42307362524a6d746b6b4d" + + "54370a685343325873537551522f446c654d7148" + + "664555786731784a717579597643544d44585972" + + "473667354a5051744d4432465a424a7239626c65" + + "467138386c706a0a543766514e793571354c2b4f" + + "682f6b62433835436e623641753641656978776d" + + "2b6e77665a4f3766726b6278306d35516b715975" + + "5739392f452b69502b454e570a76396a68773436" + + "76515065563236494b79717656462b4f7362722f" + + "6152316138707948336361566e3579594a433346" + + "5855756c6f5a77516331714a6b4c434c4c0a375a" + + "49744f525a78514c486d4d4a654d44722f5a4942" + + "34675467645650636145375a4d5141714d6d3066" + + "4c6b6d7671723149526b77642f6831455a645650" + + "79320a742f6b6b43413039566336663749556575" + + "6f67706d705a50303130564e376b6277394a6348" + + "75544561564543675945417a47395679426e6d62" + + "6858496c57764f0a71583747524f2f5231636a2b" + + "6b564e35377876674b54756b35592b7a4d774a48" + + "32626c57435945513251753974446c476854756b" + + "664273385746772b6e6263460a7a6f706d535245" + + "6c6d464d2f6141536d464733574e5a7072696a68" + + "504b77726338376470636b31703131635a415478" + + "5a413168566d43743457616343673634690a4d74" + + "64507a334e2f34416147664956794d2b69624949" + + "35332f515543675945417953693556735a356f6a" + + "644a795077426e6c6142554231686f2b336b7068" + + "70770a7264572b2b4d796b51494a345564534437" + + "3052486e5a315839754359713978616671746c51" + + "664c44395963442f436d665264706461586c5673" + + "5249467a5a556c0a454630557149644e77337046" + + "68634f4a6d6e5a3241434470434342476f763542" + + "6e3068302b3137686a4b376f69315833716e4542" + + "7857326c7462593476556a500a44394c5330666e" + + "4a76703043675942504a527330714c4a4a464333" + + "6669796b712f57574d38727474354b364a584b50" + + "734b674b53644144577a7463316645434d0a7a65" + + "2b394a6a5a376b4d77557063666a644c2b745047" + + "3455563048326c524375635735414131396d7058" + + "50367454494733713737655a6b416e65516f6163" + + "41340a716c3073583051476c6a5763414e30464b" + + "6f4759733975582b6378445a6e7265362f52392f" + + "3930567766443237454c57546373677734633463" + + "514b42675143420a6f5432326e745a5a59396d6e" + + "72455a36752f492f4a332f35664e396737783733" + + "3177746e463745745a5361575453587364597256" + + "466b564f6362505135494a6f0a714a6a7249372b" + + "474a4d69376f6a4c69642f4c45656f31764f3163" + + "454158334f43723236554e38612f6c7434394f5a" + + "69354c337348556b756c475951755671650a6737" + + "6e6e4632437749544c34503645486443575a4461" + + "7a4136626d7375524f2b6462536e335a6c567651" + + "4b42674859524c5a665458536c44755264776977" + + "746b0a513148546b6d6b57694156726c4f577864" + + "5858456d546130303045574c46446145797a7358" + + "7834424863357166776b5a4e746b634a56396e58" + + "63536e647441530a35767a427a676e797a4f7962" + + "68315878484a3966427472414f3847555878446c" + + "6634394457616753393449763072596e616b7656" + + "2f673039786875415763366e0a5365757230576b" + + "5376453847666653734d485149584c456b0a2d2d" + + "2d2d2d454e442050524956415445204b45592d2d" + + "2d2d2d0a2d2d2d2d2d424547494e204345525449" + + "4649434154452d2d2d2d2d0a4d494944656a4343" + + "416d494343514330523168584b326649776a414e" + + "42676b71686b6947397730424151554641444342" + + "6744454c4d416b474131554542684d430a56564d" + + "78437a414a42674e564241674d416b355a4d5245" + + "77447759445651514844416843636d3976613278" + + "35626a45564d424d47413155454367774d54586b" + + "670a51304567513278705a5735304d5263774651" + + "5944565151444441357465574e68593278705a57" + + "35304c6d4e76625445684d423847435371475349" + + "62334451454a0a41525953616e5a7a6147466f61" + + "5752415a32316861577775593239744d42345844" + + "54457a4d4455794e6a49784e4451774d466f5844" + + "54457a4d4459794e5449780a4e4451774d466f77" + + "6654454c4d416b474131554542684d4356564d78" + + "4554415042674e564241674d4345356c6479425a" + + "62334a724d52457744775944565151480a444168" + + "43636d397661327835626a45514d413447413155" + + "454367774854586b67544756685a6a45544d4245" + + "47413155454177774b62586c735a57466d4c6d4e" + + "760a625445684d42384743537147534962334451" + + "454a41525953616e5a7a6147466f615752415a32" + + "316861577775593239744d494942496a414e4267" + + "6b71686b69470a397730424151454641414f4341" + + "5138414d49494243674b43415145416f4b507677" + + "5552396f754e786d43646a73783178554b593046" + + "63764a4b735071354a36630a536159426d333670" + + "7458722f465a4a78794a65634a6264354b2f2b72" + + "7872476e414a43796939647831634936356f4a43" + + "4e346c42424c43367831754b51352b580a4f5177" + + "50315732656a6576414a73555936486f394d6934" + + "346b4542624f5377487979515178636b3734325a" + + "4856376c7172555434304842694f343774594638" + + "690a2b4c674d7955457279594275546876684950" + + "7848704b7a44502b624367586b444e79574a7974" + + "616c5270466a5163552b3165312f543430477749" + + "6b41766a64370a666e504b634141554e4c354876" + + "4c4a714b4b5570684b6964794235335a682b6671" + + "697448323931726e4b6a38676a61555967316350" + + "374942744b5734786736550a572b78657533706a" + + "4a504835316c41497761504d6b41646242415243" + + "644d38332b76436c32644f4769596b5938307a69" + + "45514944415141424d413047435371470a534962" + + "3344514542425155414134494241514351752f6c" + + "65756863667243476661307047304730386a7a33" + + "34586a357972364161382f2b4a72467436347045" + + "710a493458475455646e4151696f425230425946" + + "42665761332b6538594d564a426f634763753759" + + "6634615971734d7635766b426b715a4932435a67" + + "5644694f37790a4d4f326b6a372f575679445551" + + "7831536c6d2b75435a5942556a6a6a72356e5833" + + "42535a7849734f42412b7a46425455705a506879" + + "597142373250384e6e63460a427641714241712b" + + "4c73364250534f6832746a72787570657a796732" + + "55544756586b414537617a4279465a70682b7737" + + "417a36644430784d363965364a742f6a0a336844" + + "756b324b4e63314a752f7a63326d487374566b79" + + "364362696e384473576763726251367673544735" + + "3877517369496b4d6474677a4275632f6b552b34" + + "640a506f696e4537352f766135797a38316a3073" + + "4d59574a4b697262554a6e5a454433547a69484e" + + "35340a2d2d2d2d2d454e44204345525449464943" + + "4154452d2d2d2d2d0a2d2d2d2d2d424547494e20" + + "43455254494649434154452d2d2d2d2d0a4d4949" + + "4468444343416d7743435143723761626b536973" + + "722b44414e42676b71686b694739773042415155" + + "4641444342686a454c4d416b474131554542684d" + + "430a56564d78437a414a42674e564241674d416b" + + "355a4d524577447759445651514844416843636d" + + "397661327835626a45684d423847413155454367" + + "775954586b670a5132567964476c6d61574e6864" + + "4755675158563061473979615852354d52457744" + + "775944565151444441687465574e684c6d39795a" + + "7a45684d423847435371470a534962334451454a" + + "41525953616e5a7a6147466f615752415a323168" + + "61577775593239744d4234584454457a4d445579" + + "4e6a49784d5467304d466f584454457a0a4d4459" + + "794e5449784d5467304d466f7767594178437a41" + + "4a42674e5642415954416c56544d517377435159" + + "445651514944414a4f575445524d413847413155" + + "450a42777749516e4a7662327473655734784654" + + "415442674e5642416f4d4445313549454e424945" + + "4e7361575675644445584d425547413155454177" + + "774f62586c6a0a59574e73615756756443356a62" + + "3230784954416642676b71686b69473977304243" + + "514557456d70326332686861476c6b5147647459" + + "576c734c6d4e76625443430a415349774451594a" + + "4b6f5a496876634e415145424251414467674550" + + "4144434341516f4367674542414d345438484b77" + + "596367594e34704250534368484d752f0a396a74" + + "304a697157456578546f63783964315a46447a61" + + "33386b6953476d4c4d747343684c30517277596e" + + "4c6268376256354c566c32434d51537a5a495037" + + "700a4834373866774a454479694231677a4e7650" + + "4258624d796e75676167707048613730614b5941" + + "3953624a42736a455376734a3251756946596f44" + + "7a75564c55700a4a68384b724f3949614450514d" + + "39434c477578754c37564b553849613076465142" + + "566c6332646f44436b6533336663366166564f36" + + "6b7243796c5377693362680a416931535a376e64" + + "554d6b37427951696167416457494f6f374a5878" + + "32754a7a6f4b4679594a364755387446714d4b67" + + "554b425431767759684c564b4a7443690a717444" + + "2f747634366e4c555a4f7a2f685341326b43552b" + + "447963444a7067745948787837724b4a43764748" + + "3049596f41326853675941502b6b784a73567330" + + "430a417745414154414e42676b71686b69473977" + + "30424151554641414f43415145414a536b374873" + + "4e594d75596a794f3459384231696254745a6d54" + + "722b535849480a5031695432384376734c4e6330" + + "567959794f704b3546687a445666464533786365" + + "5762614242336c6d4e6f3152305377306e706d6e" + + "63314270592b68456249610a6838444e56653230" + + "657a4e79362f666a6534734368756b724a6a4b66" + + "6d66484c6b36753546724f617369495449523962" + + "7a4b4a5a75326e79754165417a677a330a6d4579" + + "4677705a7149675870766b6977416c74704b4269" + + "496c755058786e7254365a6e2f6e634e68545a71" + + "573873597a54655664576d686b576f49315a5358" + + "6a0a6a46757739705a57764c2b58646b746d5249" + + "476b784b637878614650364b544b495055425735" + + "6c5057765477654c397853645878776149592f58" + + "4a62467569530a787a6449722b346b2f44554c77" + + "7430467832366a4b62737066644d726c49444451" + + "464d4f413151396534764f2b6151444a32507355" + + "513d3d0a2d2d2d2d2d454e442043455254494649" + + "434154452d2d2d2d2d0a2d2d2d2d2d424547494e" + + "2043455254494649434154452d2d2d2d2d0a4d49" + + "49443454434341736d67417749424167494a414d" + + "7769544575596f6f6a384d413047435371475349" + + "623344514542425155414d4947474d5173774351" + + "59440a5651514745774a56557a454c4d416b4741" + + "31554543417743546c6b784554415042674e5642" + + "41634d43454a796232397262486c754d53457748" + + "7759445651514b0a4442684e655342445a584a30" + + "61575a70593246305a5342426458526f62334a70" + + "64486b784554415042674e5642414d4d43473135" + + "5932457562334a6e4d5345770a4877594a4b6f5a" + + "496876634e41516b4246684a71646e4e6f595768" + + "705a45426e625746706243356a62323077486863" + + "4e4d544d774e5449324d6a45774e5441780a5768" + + "634e4d6a4d774e5449304d6a45774e544178576a" + + "4342686a454c4d416b474131554542684d435656" + + "4d78437a414a42674e564241674d416b355a4d52" + + "45770a447759445651514844416843636d397661" + + "327835626a45684d423847413155454367775954" + + "586b675132567964476c6d61574e686447556751" + + "585630614739790a615852354d52457744775944" + + "565151444441687465574e684c6d39795a7a4568" + + "4d42384743537147534962334451454a41525953" + + "616e5a7a6147466f615752410a5a323168615777" + + "75593239744d494942496a414e42676b71686b69" + + "47397730424151454641414f43415138414d4949" + + "4243674b434151454138507574674634330a3032" + + "33754c737938444e645753315a467a5369324975" + + "6e69443947484b69664f6434317544672f375a75" + + "4731447071324259367a34635633686c74473067" + + "75530a4178754a4442735144706d503468666f77" + + "6a4141523962382b5138376454534e5462435a74" + + "3642746f4c6174326764654f4334433544427472" + + "684e79314d6a4f0a46416575493479506e6f7867" + + "31676135377741597742306c48746f2b4c383872" + + "566f53654d4348484b665944696954354e4b786c" + + "6e59413279447356454c31520a3662774334656d" + + "7a5770715a5152736e6f4531516e69642f6f5830" + + "4a6837324b796c2b7870516934424e5253696172" + + "67665549754c7858755a6c635045786c460a7145" + + "74646757624d456a65555876303845494652502f" + + "6f503361474a41366c346b665537383779737670" + + "4d774c72374b663062544b4c524f6b5874625132" + + "79760a6d31787162567262655635716177494441" + + "5141426f314177546a416442674e564851344546" + + "67515561783441714a2f3666514435344a30506b" + + "497951714b45330a61396f77487759445652306a" + + "42426777466f415561783441714a2f3666514435" + + "344a30506b497951714b453361396f7744415944" + + "5652305442415577417745420a2f7a414e42676b" + + "71686b6947397730424151554641414f43415145" + + "417a57397a5456594c387934633467494d464c73" + + "76335478442f742b554c616d4a675648340a5836" + + "65674536724d73426a69567a344e4b5a506f6c64" + + "556255394a52387233316e6e73695a574a637845" + + "7764364f6e443143766e654d7351382f34476739" + + "77360a486d495177455a33787032667135596c58" + + "50736d775255667054507554356f55616853586b" + + "7975564339796f31326b753841454f2f55375132" + + "616a6c5a6437370a79736f63582f6c546f49666e" + + "4d3573767a2b51542f4f7836624c435145357532" + + "78515032446c376935436242666c502b61615048" + + "324935756c444b67337371320a7a4e5942315868" + + "414b474f623773384a4f7a554538425143396f41" + + "4f6b4c4b55306955577548703268345366684d57" + + "76776d316f656f5363786f706a594964710a4a63" + + "476865412b3636462f687571796b6239304a5078" + + "4c4c48665050534e66544a75696377314f7a7574" + + "77796d5a695731673d3d0a2d2d2d2d2d454e4420" + + "43455254494649434154452d2d2d2d2d0a", +) + +// Script of interaction with openssl implementation: +// +// openssl s_server -cipher ECDHE-ECDSA-AES128-SHA \ +// -key server.key -cert server.crt -port 10443 +// +// The values for this test are obtained by building and running in client mode: +// % go test -test.run "TestRunClient" -connect -ciphersuites=0xc009 +// The recorded bytes are written to stdout. +// +// The server private key is: +// +// -----BEGIN EC PARAMETERS----- +// BgUrgQQAIw== +// -----END EC PARAMETERS----- +// -----BEGIN EC PRIVATE KEY----- +// MIHcAgEBBEIBmIPpCa0Kyeo9M/nq5mHxeFIGlw+MqakWcvHu3Keo7xK9ZWG7JG3a +// XfS01efjqSZJvF2DoL+Sly4A5iBn0Me9mdegBwYFK4EEACOhgYkDgYYABADEoe2+ +// mPkLSHM2fsMWVhEi8j1TwztNIT3Na3Xm9rDcmt8mwbyyh/ByMnyzZC8ckLzqaCMQ +// fv7jJcBIOmngKG3TNwDvBGLdDaCccGKD2IHTZDGqnpcxvZawaMCbI952ZD8aXH/p +// Eg5YWLZfcN2b2OrV1/XVzLm2nzBmW2aaIOIn5b/+Ow== +// -----END EC PRIVATE KEY----- +// +// and certificate is: +// +// -----BEGIN CERTIFICATE----- +// MIICADCCAWICCQC4vy1HoNLr9DAJBgcqhkjOPQQBMEUxCzAJBgNVBAYTAkFVMRMw +// EQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0 +// eSBMdGQwHhcNMTIxMTIyMTUwNjMyWhcNMjIxMTIwMTUwNjMyWjBFMQswCQYDVQQG +// EwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lk +// Z2l0cyBQdHkgTHRkMIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQAxKHtvpj5C0hz +// Nn7DFlYRIvI9U8M7TSE9zWt15vaw3JrfJsG8sofwcjJ8s2QvHJC86mgjEH7+4yXA +// SDpp4Cht0zcA7wRi3Q2gnHBig9iB02Qxqp6XMb2WsGjAmyPedmQ/Glx/6RIOWFi2 +// X3Ddm9jq1df11cy5tp8wZltmmiDiJ+W//jswCQYHKoZIzj0EAQOBjAAwgYgCQgGI +// ok/r4kXFSH0brPXtmJ2uR3DAXhu2L73xtk23YUDTEaLO7gt+kn7/dp3DO36lP876 +// EOJZ7EctfKzaTpcOFaBv0AJCAU38vmcTnC0FDr0/o4wlwTMTgw2UBrvUN3r27HrJ +// hi7d1xFpf4V8Vt77MXgr5Md4Da7Lvp5ONiQxe2oPOZUSB48q +// -----END CERTIFICATE----- +var ecdheECDSAAESClientScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x4a, 0x01, 0x00, 0x00, + 0x46, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xc0, 0x09, + 0x01, 0x00, 0x00, 0x1b, 0x00, 0x05, 0x00, 0x05, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + 0x08, 0x00, 0x06, 0x00, 0x17, 0x00, 0x18, 0x00, + 0x19, 0x00, 0x0b, 0x00, 0x02, 0x01, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x54, 0x02, 0x00, 0x00, + 0x50, 0x03, 0x01, 0x50, 0xd7, 0x19, 0xc9, 0x03, + 0xc2, 0x3a, 0xc6, 0x1f, 0x0a, 0x84, 0x9e, 0xd7, + 0xf4, 0x7e, 0x07, 0x6d, 0xa8, 0xe4, 0xa9, 0x4f, + 0x22, 0x50, 0xa2, 0x19, 0x24, 0x44, 0x42, 0x65, + 0xaa, 0xba, 0x3a, 0x20, 0x90, 0x70, 0xb7, 0xe5, + 0x57, 0xed, 0xb1, 0xb1, 0x43, 0x4b, 0xa1, 0x4e, + 0xee, 0x7a, 0x5b, 0x88, 0xf6, 0xa6, 0x73, 0x3b, + 0xcb, 0xa7, 0xbd, 0x57, 0x50, 0xf2, 0x72, 0x8c, + 0xbc, 0x45, 0x73, 0xaa, 0xc0, 0x09, 0x00, 0x00, + 0x08, 0x00, 0x0b, 0x00, 0x04, 0x03, 0x00, 0x01, + 0x02, 0x16, 0x03, 0x01, 0x02, 0x0e, 0x0b, 0x00, + 0x02, 0x0a, 0x00, 0x02, 0x07, 0x00, 0x02, 0x04, + 0x30, 0x82, 0x02, 0x00, 0x30, 0x82, 0x01, 0x62, + 0x02, 0x09, 0x00, 0xb8, 0xbf, 0x2d, 0x47, 0xa0, + 0xd2, 0xeb, 0xf4, 0x30, 0x09, 0x06, 0x07, 0x2a, + 0x86, 0x48, 0xce, 0x3d, 0x04, 0x01, 0x30, 0x45, + 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, + 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, + 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, + 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, + 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, + 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, + 0x79, 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, 0x17, + 0x0d, 0x31, 0x32, 0x31, 0x31, 0x32, 0x32, 0x31, + 0x35, 0x30, 0x36, 0x33, 0x32, 0x5a, 0x17, 0x0d, + 0x32, 0x32, 0x31, 0x31, 0x32, 0x30, 0x31, 0x35, + 0x30, 0x36, 0x33, 0x32, 0x5a, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x30, 0x81, 0x9b, 0x30, + 0x10, 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, + 0x02, 0x01, 0x06, 0x05, 0x2b, 0x81, 0x04, 0x00, + 0x23, 0x03, 0x81, 0x86, 0x00, 0x04, 0x00, 0xc4, + 0xa1, 0xed, 0xbe, 0x98, 0xf9, 0x0b, 0x48, 0x73, + 0x36, 0x7e, 0xc3, 0x16, 0x56, 0x11, 0x22, 0xf2, + 0x3d, 0x53, 0xc3, 0x3b, 0x4d, 0x21, 0x3d, 0xcd, + 0x6b, 0x75, 0xe6, 0xf6, 0xb0, 0xdc, 0x9a, 0xdf, + 0x26, 0xc1, 0xbc, 0xb2, 0x87, 0xf0, 0x72, 0x32, + 0x7c, 0xb3, 0x64, 0x2f, 0x1c, 0x90, 0xbc, 0xea, + 0x68, 0x23, 0x10, 0x7e, 0xfe, 0xe3, 0x25, 0xc0, + 0x48, 0x3a, 0x69, 0xe0, 0x28, 0x6d, 0xd3, 0x37, + 0x00, 0xef, 0x04, 0x62, 0xdd, 0x0d, 0xa0, 0x9c, + 0x70, 0x62, 0x83, 0xd8, 0x81, 0xd3, 0x64, 0x31, + 0xaa, 0x9e, 0x97, 0x31, 0xbd, 0x96, 0xb0, 0x68, + 0xc0, 0x9b, 0x23, 0xde, 0x76, 0x64, 0x3f, 0x1a, + 0x5c, 0x7f, 0xe9, 0x12, 0x0e, 0x58, 0x58, 0xb6, + 0x5f, 0x70, 0xdd, 0x9b, 0xd8, 0xea, 0xd5, 0xd7, + 0xf5, 0xd5, 0xcc, 0xb9, 0xb6, 0x9f, 0x30, 0x66, + 0x5b, 0x66, 0x9a, 0x20, 0xe2, 0x27, 0xe5, 0xbf, + 0xfe, 0x3b, 0x30, 0x09, 0x06, 0x07, 0x2a, 0x86, + 0x48, 0xce, 0x3d, 0x04, 0x01, 0x03, 0x81, 0x8c, + 0x00, 0x30, 0x81, 0x88, 0x02, 0x42, 0x01, 0x88, + 0xa2, 0x4f, 0xeb, 0xe2, 0x45, 0xc5, 0x48, 0x7d, + 0x1b, 0xac, 0xf5, 0xed, 0x98, 0x9d, 0xae, 0x47, + 0x70, 0xc0, 0x5e, 0x1b, 0xb6, 0x2f, 0xbd, 0xf1, + 0xb6, 0x4d, 0xb7, 0x61, 0x40, 0xd3, 0x11, 0xa2, + 0xce, 0xee, 0x0b, 0x7e, 0x92, 0x7e, 0xff, 0x76, + 0x9d, 0xc3, 0x3b, 0x7e, 0xa5, 0x3f, 0xce, 0xfa, + 0x10, 0xe2, 0x59, 0xec, 0x47, 0x2d, 0x7c, 0xac, + 0xda, 0x4e, 0x97, 0x0e, 0x15, 0xa0, 0x6f, 0xd0, + 0x02, 0x42, 0x01, 0x4d, 0xfc, 0xbe, 0x67, 0x13, + 0x9c, 0x2d, 0x05, 0x0e, 0xbd, 0x3f, 0xa3, 0x8c, + 0x25, 0xc1, 0x33, 0x13, 0x83, 0x0d, 0x94, 0x06, + 0xbb, 0xd4, 0x37, 0x7a, 0xf6, 0xec, 0x7a, 0xc9, + 0x86, 0x2e, 0xdd, 0xd7, 0x11, 0x69, 0x7f, 0x85, + 0x7c, 0x56, 0xde, 0xfb, 0x31, 0x78, 0x2b, 0xe4, + 0xc7, 0x78, 0x0d, 0xae, 0xcb, 0xbe, 0x9e, 0x4e, + 0x36, 0x24, 0x31, 0x7b, 0x6a, 0x0f, 0x39, 0x95, + 0x12, 0x07, 0x8f, 0x2a, 0x16, 0x03, 0x01, 0x00, + 0xd6, 0x0c, 0x00, 0x00, 0xd2, 0x03, 0x00, 0x17, + 0x41, 0x04, 0x33, 0xed, 0xe1, 0x10, 0x3d, 0xe2, + 0xb0, 0x81, 0x5e, 0x01, 0x1b, 0x00, 0x4a, 0x7d, + 0xdc, 0xc5, 0x78, 0x02, 0xb1, 0x9a, 0x78, 0x92, + 0x34, 0xd9, 0x23, 0xcc, 0x01, 0xfb, 0x0c, 0x49, + 0x1c, 0x4a, 0x59, 0x8a, 0x80, 0x1b, 0x34, 0xf0, + 0xe8, 0x87, 0x1b, 0x7c, 0xfb, 0x72, 0xf5, 0xea, + 0xf9, 0xf3, 0xff, 0xa6, 0x3e, 0x4e, 0xac, 0xbc, + 0xee, 0x14, 0x2b, 0x87, 0xd4, 0x0b, 0xda, 0x19, + 0x60, 0x2b, 0x00, 0x8b, 0x30, 0x81, 0x88, 0x02, + 0x42, 0x01, 0x75, 0x46, 0x4f, 0x97, 0x9f, 0xc5, + 0xf9, 0x4c, 0x38, 0xcf, 0x3b, 0x37, 0x1a, 0x6b, + 0x53, 0xfc, 0x05, 0x73, 0x7d, 0x98, 0x2c, 0x5b, + 0x76, 0xd4, 0x37, 0x1f, 0x50, 0x6d, 0xad, 0xc6, + 0x0f, 0x8f, 0x7b, 0xcc, 0x60, 0x8e, 0x04, 0x00, + 0x21, 0x80, 0xa8, 0xa5, 0x98, 0xf2, 0x42, 0xf2, + 0xc3, 0xf6, 0x44, 0x50, 0xc4, 0x7a, 0xae, 0x6f, + 0x74, 0xa0, 0x7f, 0x07, 0x7a, 0x0b, 0xbb, 0x41, + 0x9e, 0x3c, 0x0b, 0x02, 0x42, 0x01, 0xbe, 0x64, + 0xaa, 0x12, 0x03, 0xfb, 0xd8, 0x4f, 0x93, 0xf9, + 0x92, 0x54, 0x0d, 0x9c, 0x9d, 0x53, 0x88, 0x19, + 0x69, 0x94, 0xfc, 0xd6, 0xf7, 0x60, 0xcf, 0x70, + 0x64, 0x15, 0x1b, 0x02, 0x22, 0x56, 0xb0, 0x2c, + 0xb1, 0x72, 0x4c, 0x9e, 0x7b, 0xf0, 0x53, 0x97, + 0x43, 0xac, 0x11, 0x62, 0xe5, 0x5a, 0xf1, 0x7e, + 0x87, 0x8f, 0x5c, 0x43, 0x1d, 0xae, 0x56, 0x28, + 0xdb, 0x76, 0x15, 0xd8, 0x1c, 0x73, 0xce, 0x16, + 0x03, 0x01, 0x00, 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x46, 0x10, 0x00, 0x00, + 0x42, 0x41, 0x04, 0x1e, 0x18, 0x37, 0xef, 0x0d, + 0x19, 0x51, 0x88, 0x35, 0x75, 0x71, 0xb5, 0xe5, + 0x54, 0x5b, 0x12, 0x2e, 0x8f, 0x09, 0x67, 0xfd, + 0xa7, 0x24, 0x20, 0x3e, 0xb2, 0x56, 0x1c, 0xce, + 0x97, 0x28, 0x5e, 0xf8, 0x2b, 0x2d, 0x4f, 0x9e, + 0xf1, 0x07, 0x9f, 0x6c, 0x4b, 0x5b, 0x83, 0x56, + 0xe2, 0x32, 0x42, 0xe9, 0x58, 0xb6, 0xd7, 0x49, + 0xa6, 0xb5, 0x68, 0x1a, 0x41, 0x03, 0x56, 0x6b, + 0xdc, 0x5a, 0x89, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0x30, 0x1a, 0x45, + 0x92, 0x3b, 0xac, 0x8d, 0x91, 0x89, 0xd3, 0x2c, + 0xf4, 0x3c, 0x5f, 0x70, 0xf1, 0x79, 0xa5, 0x6a, + 0xcf, 0x97, 0x8f, 0x3f, 0x73, 0x08, 0xca, 0x3f, + 0x55, 0xb0, 0x28, 0xd1, 0x6f, 0xcd, 0x9b, 0xca, + 0xb6, 0xb7, 0xd0, 0xa5, 0x21, 0x5b, 0x08, 0xf8, + 0x42, 0xe2, 0xdf, 0x25, 0x6a, 0x16, + }, + { + 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x01, 0x00, 0x30, 0x30, 0x83, 0xb6, 0x51, 0x8a, + 0x85, 0x4a, 0xee, 0xe4, 0xb6, 0xae, 0xf3, 0xc1, + 0xdc, 0xd2, 0x04, 0xb3, 0xd0, 0x25, 0x47, 0x5f, + 0xac, 0x83, 0xa3, 0x7d, 0xcf, 0x47, 0x92, 0xed, + 0x92, 0x6c, 0xd1, 0x6e, 0xfd, 0x63, 0xf5, 0x2d, + 0x89, 0xd8, 0x04, 0x8c, 0x62, 0x71, 0xae, 0x5e, + 0x32, 0x48, 0xf8, + }, + { + 0x17, 0x03, 0x01, 0x00, 0x20, 0xcf, 0x5e, 0xba, + 0xf4, 0x47, 0x32, 0x35, 0x9b, 0x85, 0xdc, 0xb3, + 0xff, 0x77, 0x90, 0xd9, 0x2b, 0xbd, 0x59, 0x2a, + 0x33, 0xe4, 0x6e, 0x9b, 0xfc, 0x1c, 0x73, 0x3f, + 0x5e, 0x1e, 0xe3, 0xa4, 0xc2, 0x17, 0x03, 0x01, + 0x00, 0x20, 0x05, 0xdf, 0x2d, 0x9b, 0x29, 0x7f, + 0x97, 0xcd, 0x49, 0x04, 0x53, 0x22, 0x1a, 0xa1, + 0xa1, 0xe6, 0x38, 0x3a, 0x56, 0x37, 0x1f, 0xd8, + 0x3a, 0x12, 0x2c, 0xf0, 0xeb, 0x61, 0x35, 0x76, + 0xe5, 0xf0, 0x15, 0x03, 0x01, 0x00, 0x20, 0xa5, + 0x56, 0xb5, 0x49, 0x4b, 0xc2, 0xd4, 0x4c, 0xf6, + 0x95, 0x15, 0x7d, 0x41, 0x1d, 0x5c, 0x00, 0x0e, + 0x20, 0xb1, 0x0a, 0xbc, 0xc9, 0x2a, 0x09, 0x17, + 0xb4, 0xaa, 0x1c, 0x79, 0xda, 0x79, 0x27, + }, +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_messages.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_messages.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_messages.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1304 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import "bytes" + +type clientHelloMsg struct { + raw []byte + vers uint16 + random []byte + sessionId []byte + cipherSuites []uint16 + compressionMethods []uint8 + nextProtoNeg bool + serverName string + ocspStapling bool + supportedCurves []uint16 + supportedPoints []uint8 + ticketSupported bool + sessionTicket []uint8 + signatureAndHashes []signatureAndHash +} + +func (m *clientHelloMsg) equal(i interface{}) bool { + m1, ok := i.(*clientHelloMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + m.vers == m1.vers && + bytes.Equal(m.random, m1.random) && + bytes.Equal(m.sessionId, m1.sessionId) && + eqUint16s(m.cipherSuites, m1.cipherSuites) && + bytes.Equal(m.compressionMethods, m1.compressionMethods) && + m.nextProtoNeg == m1.nextProtoNeg && + m.serverName == m1.serverName && + m.ocspStapling == m1.ocspStapling && + eqUint16s(m.supportedCurves, m1.supportedCurves) && + bytes.Equal(m.supportedPoints, m1.supportedPoints) && + m.ticketSupported == m1.ticketSupported && + bytes.Equal(m.sessionTicket, m1.sessionTicket) && + eqSignatureAndHashes(m.signatureAndHashes, m1.signatureAndHashes) +} + +func (m *clientHelloMsg) marshal() []byte { + if m.raw != nil { + return m.raw + } + + length := 2 + 32 + 1 + len(m.sessionId) + 2 + len(m.cipherSuites)*2 + 1 + len(m.compressionMethods) + numExtensions := 0 + extensionsLength := 0 + if m.nextProtoNeg { + numExtensions++ + } + if m.ocspStapling { + extensionsLength += 1 + 2 + 2 + numExtensions++ + } + if len(m.serverName) > 0 { + extensionsLength += 5 + len(m.serverName) + numExtensions++ + } + if len(m.supportedCurves) > 0 { + extensionsLength += 2 + 2*len(m.supportedCurves) + numExtensions++ + } + if len(m.supportedPoints) > 0 { + extensionsLength += 1 + len(m.supportedPoints) + numExtensions++ + } + if m.ticketSupported { + extensionsLength += len(m.sessionTicket) + numExtensions++ + } + if len(m.signatureAndHashes) > 0 { + extensionsLength += 2 + 2*len(m.signatureAndHashes) + numExtensions++ + } + if numExtensions > 0 { + extensionsLength += 4 * numExtensions + length += 2 + extensionsLength + } + + x := make([]byte, 4+length) + x[0] = typeClientHello + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + x[4] = uint8(m.vers >> 8) + x[5] = uint8(m.vers) + copy(x[6:38], m.random) + x[38] = uint8(len(m.sessionId)) + copy(x[39:39+len(m.sessionId)], m.sessionId) + y := x[39+len(m.sessionId):] + y[0] = uint8(len(m.cipherSuites) >> 7) + y[1] = uint8(len(m.cipherSuites) << 1) + for i, suite := range m.cipherSuites { + y[2+i*2] = uint8(suite >> 8) + y[3+i*2] = uint8(suite) + } + z := y[2+len(m.cipherSuites)*2:] + z[0] = uint8(len(m.compressionMethods)) + copy(z[1:], m.compressionMethods) + + z = z[1+len(m.compressionMethods):] + if numExtensions > 0 { + z[0] = byte(extensionsLength >> 8) + z[1] = byte(extensionsLength) + z = z[2:] + } + if m.nextProtoNeg { + z[0] = byte(extensionNextProtoNeg >> 8) + z[1] = byte(extensionNextProtoNeg) + // The length is always 0 + z = z[4:] + } + if len(m.serverName) > 0 { + z[0] = byte(extensionServerName >> 8) + z[1] = byte(extensionServerName) + l := len(m.serverName) + 5 + z[2] = byte(l >> 8) + z[3] = byte(l) + z = z[4:] + + // RFC 3546, section 3.1 + // + // struct { + // NameType name_type; + // select (name_type) { + // case host_name: HostName; + // } name; + // } ServerName; + // + // enum { + // host_name(0), (255) + // } NameType; + // + // opaque HostName<1..2^16-1>; + // + // struct { + // ServerName server_name_list<1..2^16-1> + // } ServerNameList; + + z[0] = byte((len(m.serverName) + 3) >> 8) + z[1] = byte(len(m.serverName) + 3) + z[3] = byte(len(m.serverName) >> 8) + z[4] = byte(len(m.serverName)) + copy(z[5:], []byte(m.serverName)) + z = z[l:] + } + if m.ocspStapling { + // RFC 4366, section 3.6 + z[0] = byte(extensionStatusRequest >> 8) + z[1] = byte(extensionStatusRequest) + z[2] = 0 + z[3] = 5 + z[4] = 1 // OCSP type + // Two zero valued uint16s for the two lengths. + z = z[9:] + } + if len(m.supportedCurves) > 0 { + // http://tools.ietf.org/html/rfc4492#section-5.5.1 + z[0] = byte(extensionSupportedCurves >> 8) + z[1] = byte(extensionSupportedCurves) + l := 2 + 2*len(m.supportedCurves) + z[2] = byte(l >> 8) + z[3] = byte(l) + l -= 2 + z[4] = byte(l >> 8) + z[5] = byte(l) + z = z[6:] + for _, curve := range m.supportedCurves { + z[0] = byte(curve >> 8) + z[1] = byte(curve) + z = z[2:] + } + } + if len(m.supportedPoints) > 0 { + // http://tools.ietf.org/html/rfc4492#section-5.5.2 + z[0] = byte(extensionSupportedPoints >> 8) + z[1] = byte(extensionSupportedPoints) + l := 1 + len(m.supportedPoints) + z[2] = byte(l >> 8) + z[3] = byte(l) + l-- + z[4] = byte(l) + z = z[5:] + for _, pointFormat := range m.supportedPoints { + z[0] = byte(pointFormat) + z = z[1:] + } + } + if m.ticketSupported { + // http://tools.ietf.org/html/rfc5077#section-3.2 + z[0] = byte(extensionSessionTicket >> 8) + z[1] = byte(extensionSessionTicket) + l := len(m.sessionTicket) + z[2] = byte(l >> 8) + z[3] = byte(l) + z = z[4:] + copy(z, m.sessionTicket) + z = z[len(m.sessionTicket):] + } + if len(m.signatureAndHashes) > 0 { + // https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 + z[0] = byte(extensionSignatureAlgorithms >> 8) + z[1] = byte(extensionSignatureAlgorithms) + l := 2 + 2*len(m.signatureAndHashes) + z[2] = byte(l >> 8) + z[3] = byte(l) + z = z[4:] + + l -= 2 + z[0] = byte(l >> 8) + z[1] = byte(l) + z = z[2:] + for _, sigAndHash := range m.signatureAndHashes { + z[0] = sigAndHash.hash + z[1] = sigAndHash.signature + z = z[2:] + } + } + + m.raw = x + + return x +} + +func (m *clientHelloMsg) unmarshal(data []byte) bool { + if len(data) < 42 { + return false + } + m.raw = data + m.vers = uint16(data[4])<<8 | uint16(data[5]) + m.random = data[6:38] + sessionIdLen := int(data[38]) + if sessionIdLen > 32 || len(data) < 39+sessionIdLen { + return false + } + m.sessionId = data[39 : 39+sessionIdLen] + data = data[39+sessionIdLen:] + if len(data) < 2 { + return false + } + // cipherSuiteLen is the number of bytes of cipher suite numbers. Since + // they are uint16s, the number must be even. + cipherSuiteLen := int(data[0])<<8 | int(data[1]) + if cipherSuiteLen%2 == 1 || len(data) < 2+cipherSuiteLen { + return false + } + numCipherSuites := cipherSuiteLen / 2 + m.cipherSuites = make([]uint16, numCipherSuites) + for i := 0; i < numCipherSuites; i++ { + m.cipherSuites[i] = uint16(data[2+2*i])<<8 | uint16(data[3+2*i]) + } + data = data[2+cipherSuiteLen:] + if len(data) < 1 { + return false + } + compressionMethodsLen := int(data[0]) + if len(data) < 1+compressionMethodsLen { + return false + } + m.compressionMethods = data[1 : 1+compressionMethodsLen] + + data = data[1+compressionMethodsLen:] + + m.nextProtoNeg = false + m.serverName = "" + m.ocspStapling = false + m.ticketSupported = false + m.sessionTicket = nil + m.signatureAndHashes = nil + + if len(data) == 0 { + // ClientHello is optionally followed by extension data + return true + } + if len(data) < 2 { + return false + } + + extensionsLength := int(data[0])<<8 | int(data[1]) + data = data[2:] + if extensionsLength != len(data) { + return false + } + + for len(data) != 0 { + if len(data) < 4 { + return false + } + extension := uint16(data[0])<<8 | uint16(data[1]) + length := int(data[2])<<8 | int(data[3]) + data = data[4:] + if len(data) < length { + return false + } + + switch extension { + case extensionServerName: + if length < 2 { + return false + } + numNames := int(data[0])<<8 | int(data[1]) + d := data[2:] + for i := 0; i < numNames; i++ { + if len(d) < 3 { + return false + } + nameType := d[0] + nameLen := int(d[1])<<8 | int(d[2]) + d = d[3:] + if len(d) < nameLen { + return false + } + if nameType == 0 { + m.serverName = string(d[0:nameLen]) + break + } + d = d[nameLen:] + } + case extensionNextProtoNeg: + if length > 0 { + return false + } + m.nextProtoNeg = true + case extensionStatusRequest: + m.ocspStapling = length > 0 && data[0] == statusTypeOCSP + case extensionSupportedCurves: + // http://tools.ietf.org/html/rfc4492#section-5.5.1 + if length < 2 { + return false + } + l := int(data[0])<<8 | int(data[1]) + if l%2 == 1 || length != l+2 { + return false + } + numCurves := l / 2 + m.supportedCurves = make([]uint16, numCurves) + d := data[2:] + for i := 0; i < numCurves; i++ { + m.supportedCurves[i] = uint16(d[0])<<8 | uint16(d[1]) + d = d[2:] + } + case extensionSupportedPoints: + // http://tools.ietf.org/html/rfc4492#section-5.5.2 + if length < 1 { + return false + } + l := int(data[0]) + if length != l+1 { + return false + } + m.supportedPoints = make([]uint8, l) + copy(m.supportedPoints, data[1:]) + case extensionSessionTicket: + // http://tools.ietf.org/html/rfc5077#section-3.2 + m.ticketSupported = true + m.sessionTicket = data[:length] + case extensionSignatureAlgorithms: + // https://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 + if length < 2 || length&1 != 0 { + return false + } + l := int(data[0])<<8 | int(data[1]) + if l != length-2 { + return false + } + n := l / 2 + d := data[2:] + m.signatureAndHashes = make([]signatureAndHash, n) + for i := range m.signatureAndHashes { + m.signatureAndHashes[i].hash = d[0] + m.signatureAndHashes[i].signature = d[1] + d = d[2:] + } + } + data = data[length:] + } + + return true +} + +type serverHelloMsg struct { + raw []byte + vers uint16 + random []byte + sessionId []byte + cipherSuite uint16 + compressionMethod uint8 + nextProtoNeg bool + nextProtos []string + ocspStapling bool + ticketSupported bool +} + +func (m *serverHelloMsg) equal(i interface{}) bool { + m1, ok := i.(*serverHelloMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + m.vers == m1.vers && + bytes.Equal(m.random, m1.random) && + bytes.Equal(m.sessionId, m1.sessionId) && + m.cipherSuite == m1.cipherSuite && + m.compressionMethod == m1.compressionMethod && + m.nextProtoNeg == m1.nextProtoNeg && + eqStrings(m.nextProtos, m1.nextProtos) && + m.ocspStapling == m1.ocspStapling && + m.ticketSupported == m1.ticketSupported +} + +func (m *serverHelloMsg) marshal() []byte { + if m.raw != nil { + return m.raw + } + + length := 38 + len(m.sessionId) + numExtensions := 0 + extensionsLength := 0 + + nextProtoLen := 0 + if m.nextProtoNeg { + numExtensions++ + for _, v := range m.nextProtos { + nextProtoLen += len(v) + } + nextProtoLen += len(m.nextProtos) + extensionsLength += nextProtoLen + } + if m.ocspStapling { + numExtensions++ + } + if m.ticketSupported { + numExtensions++ + } + if numExtensions > 0 { + extensionsLength += 4 * numExtensions + length += 2 + extensionsLength + } + + x := make([]byte, 4+length) + x[0] = typeServerHello + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + x[4] = uint8(m.vers >> 8) + x[5] = uint8(m.vers) + copy(x[6:38], m.random) + x[38] = uint8(len(m.sessionId)) + copy(x[39:39+len(m.sessionId)], m.sessionId) + z := x[39+len(m.sessionId):] + z[0] = uint8(m.cipherSuite >> 8) + z[1] = uint8(m.cipherSuite) + z[2] = uint8(m.compressionMethod) + + z = z[3:] + if numExtensions > 0 { + z[0] = byte(extensionsLength >> 8) + z[1] = byte(extensionsLength) + z = z[2:] + } + if m.nextProtoNeg { + z[0] = byte(extensionNextProtoNeg >> 8) + z[1] = byte(extensionNextProtoNeg) + z[2] = byte(nextProtoLen >> 8) + z[3] = byte(nextProtoLen) + z = z[4:] + + for _, v := range m.nextProtos { + l := len(v) + if l > 255 { + l = 255 + } + z[0] = byte(l) + copy(z[1:], []byte(v[0:l])) + z = z[1+l:] + } + } + if m.ocspStapling { + z[0] = byte(extensionStatusRequest >> 8) + z[1] = byte(extensionStatusRequest) + z = z[4:] + } + if m.ticketSupported { + z[0] = byte(extensionSessionTicket >> 8) + z[1] = byte(extensionSessionTicket) + z = z[4:] + } + + m.raw = x + + return x +} + +func (m *serverHelloMsg) unmarshal(data []byte) bool { + if len(data) < 42 { + return false + } + m.raw = data + m.vers = uint16(data[4])<<8 | uint16(data[5]) + m.random = data[6:38] + sessionIdLen := int(data[38]) + if sessionIdLen > 32 || len(data) < 39+sessionIdLen { + return false + } + m.sessionId = data[39 : 39+sessionIdLen] + data = data[39+sessionIdLen:] + if len(data) < 3 { + return false + } + m.cipherSuite = uint16(data[0])<<8 | uint16(data[1]) + m.compressionMethod = data[2] + data = data[3:] + + m.nextProtoNeg = false + m.nextProtos = nil + m.ocspStapling = false + m.ticketSupported = false + + if len(data) == 0 { + // ServerHello is optionally followed by extension data + return true + } + if len(data) < 2 { + return false + } + + extensionsLength := int(data[0])<<8 | int(data[1]) + data = data[2:] + if len(data) != extensionsLength { + return false + } + + for len(data) != 0 { + if len(data) < 4 { + return false + } + extension := uint16(data[0])<<8 | uint16(data[1]) + length := int(data[2])<<8 | int(data[3]) + data = data[4:] + if len(data) < length { + return false + } + + switch extension { + case extensionNextProtoNeg: + m.nextProtoNeg = true + d := data[:length] + for len(d) > 0 { + l := int(d[0]) + d = d[1:] + if l == 0 || l > len(d) { + return false + } + m.nextProtos = append(m.nextProtos, string(d[:l])) + d = d[l:] + } + case extensionStatusRequest: + if length > 0 { + return false + } + m.ocspStapling = true + case extensionSessionTicket: + if length > 0 { + return false + } + m.ticketSupported = true + } + data = data[length:] + } + + return true +} + +type certificateMsg struct { + raw []byte + certificates [][]byte +} + +func (m *certificateMsg) equal(i interface{}) bool { + m1, ok := i.(*certificateMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + eqByteSlices(m.certificates, m1.certificates) +} + +func (m *certificateMsg) marshal() (x []byte) { + if m.raw != nil { + return m.raw + } + + var i int + for _, slice := range m.certificates { + i += len(slice) + } + + length := 3 + 3*len(m.certificates) + i + x = make([]byte, 4+length) + x[0] = typeCertificate + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + + certificateOctets := length - 3 + x[4] = uint8(certificateOctets >> 16) + x[5] = uint8(certificateOctets >> 8) + x[6] = uint8(certificateOctets) + + y := x[7:] + for _, slice := range m.certificates { + y[0] = uint8(len(slice) >> 16) + y[1] = uint8(len(slice) >> 8) + y[2] = uint8(len(slice)) + copy(y[3:], slice) + y = y[3+len(slice):] + } + + m.raw = x + return +} + +func (m *certificateMsg) unmarshal(data []byte) bool { + if len(data) < 7 { + return false + } + + m.raw = data + certsLen := uint32(data[4])<<16 | uint32(data[5])<<8 | uint32(data[6]) + if uint32(len(data)) != certsLen+7 { + return false + } + + numCerts := 0 + d := data[7:] + for certsLen > 0 { + if len(d) < 4 { + return false + } + certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2]) + if uint32(len(d)) < 3+certLen { + return false + } + d = d[3+certLen:] + certsLen -= 3 + certLen + numCerts++ + } + + m.certificates = make([][]byte, numCerts) + d = data[7:] + for i := 0; i < numCerts; i++ { + certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2]) + m.certificates[i] = d[3 : 3+certLen] + d = d[3+certLen:] + } + + return true +} + +type serverKeyExchangeMsg struct { + raw []byte + key []byte +} + +func (m *serverKeyExchangeMsg) equal(i interface{}) bool { + m1, ok := i.(*serverKeyExchangeMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + bytes.Equal(m.key, m1.key) +} + +func (m *serverKeyExchangeMsg) marshal() []byte { + if m.raw != nil { + return m.raw + } + length := len(m.key) + x := make([]byte, length+4) + x[0] = typeServerKeyExchange + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + copy(x[4:], m.key) + + m.raw = x + return x +} + +func (m *serverKeyExchangeMsg) unmarshal(data []byte) bool { + m.raw = data + if len(data) < 4 { + return false + } + m.key = data[4:] + return true +} + +type certificateStatusMsg struct { + raw []byte + statusType uint8 + response []byte +} + +func (m *certificateStatusMsg) equal(i interface{}) bool { + m1, ok := i.(*certificateStatusMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + m.statusType == m1.statusType && + bytes.Equal(m.response, m1.response) +} + +func (m *certificateStatusMsg) marshal() []byte { + if m.raw != nil { + return m.raw + } + + var x []byte + if m.statusType == statusTypeOCSP { + x = make([]byte, 4+4+len(m.response)) + x[0] = typeCertificateStatus + l := len(m.response) + 4 + x[1] = byte(l >> 16) + x[2] = byte(l >> 8) + x[3] = byte(l) + x[4] = statusTypeOCSP + + l -= 4 + x[5] = byte(l >> 16) + x[6] = byte(l >> 8) + x[7] = byte(l) + copy(x[8:], m.response) + } else { + x = []byte{typeCertificateStatus, 0, 0, 1, m.statusType} + } + + m.raw = x + return x +} + +func (m *certificateStatusMsg) unmarshal(data []byte) bool { + m.raw = data + if len(data) < 5 { + return false + } + m.statusType = data[4] + + m.response = nil + if m.statusType == statusTypeOCSP { + if len(data) < 8 { + return false + } + respLen := uint32(data[5])<<16 | uint32(data[6])<<8 | uint32(data[7]) + if uint32(len(data)) != 4+4+respLen { + return false + } + m.response = data[8:] + } + return true +} + +type serverHelloDoneMsg struct{} + +func (m *serverHelloDoneMsg) equal(i interface{}) bool { + _, ok := i.(*serverHelloDoneMsg) + return ok +} + +func (m *serverHelloDoneMsg) marshal() []byte { + x := make([]byte, 4) + x[0] = typeServerHelloDone + return x +} + +func (m *serverHelloDoneMsg) unmarshal(data []byte) bool { + return len(data) == 4 +} + +type clientKeyExchangeMsg struct { + raw []byte + ciphertext []byte +} + +func (m *clientKeyExchangeMsg) equal(i interface{}) bool { + m1, ok := i.(*clientKeyExchangeMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + bytes.Equal(m.ciphertext, m1.ciphertext) +} + +func (m *clientKeyExchangeMsg) marshal() []byte { + if m.raw != nil { + return m.raw + } + length := len(m.ciphertext) + x := make([]byte, length+4) + x[0] = typeClientKeyExchange + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + copy(x[4:], m.ciphertext) + + m.raw = x + return x +} + +func (m *clientKeyExchangeMsg) unmarshal(data []byte) bool { + m.raw = data + if len(data) < 4 { + return false + } + l := int(data[1])<<16 | int(data[2])<<8 | int(data[3]) + if l != len(data)-4 { + return false + } + m.ciphertext = data[4:] + return true +} + +type finishedMsg struct { + raw []byte + verifyData []byte +} + +func (m *finishedMsg) equal(i interface{}) bool { + m1, ok := i.(*finishedMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + bytes.Equal(m.verifyData, m1.verifyData) +} + +func (m *finishedMsg) marshal() (x []byte) { + if m.raw != nil { + return m.raw + } + + x = make([]byte, 4+len(m.verifyData)) + x[0] = typeFinished + x[3] = byte(len(m.verifyData)) + copy(x[4:], m.verifyData) + m.raw = x + return +} + +func (m *finishedMsg) unmarshal(data []byte) bool { + m.raw = data + if len(data) < 4 { + return false + } + m.verifyData = data[4:] + return true +} + +type nextProtoMsg struct { + raw []byte + proto string +} + +func (m *nextProtoMsg) equal(i interface{}) bool { + m1, ok := i.(*nextProtoMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + m.proto == m1.proto +} + +func (m *nextProtoMsg) marshal() []byte { + if m.raw != nil { + return m.raw + } + l := len(m.proto) + if l > 255 { + l = 255 + } + + padding := 32 - (l+2)%32 + length := l + padding + 2 + x := make([]byte, length+4) + x[0] = typeNextProtocol + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + + y := x[4:] + y[0] = byte(l) + copy(y[1:], []byte(m.proto[0:l])) + y = y[1+l:] + y[0] = byte(padding) + + m.raw = x + + return x +} + +func (m *nextProtoMsg) unmarshal(data []byte) bool { + m.raw = data + + if len(data) < 5 { + return false + } + data = data[4:] + protoLen := int(data[0]) + data = data[1:] + if len(data) < protoLen { + return false + } + m.proto = string(data[0:protoLen]) + data = data[protoLen:] + + if len(data) < 1 { + return false + } + paddingLen := int(data[0]) + data = data[1:] + if len(data) != paddingLen { + return false + } + + return true +} + +type certificateRequestMsg struct { + raw []byte + // hasSignatureAndHash indicates whether this message includes a list + // of signature and hash functions. This change was introduced with TLS + // 1.2. + hasSignatureAndHash bool + + certificateTypes []byte + signatureAndHashes []signatureAndHash + certificateAuthorities [][]byte +} + +func (m *certificateRequestMsg) equal(i interface{}) bool { + m1, ok := i.(*certificateRequestMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + bytes.Equal(m.certificateTypes, m1.certificateTypes) && + eqByteSlices(m.certificateAuthorities, m1.certificateAuthorities) && + eqSignatureAndHashes(m.signatureAndHashes, m1.signatureAndHashes) +} + +func (m *certificateRequestMsg) marshal() (x []byte) { + if m.raw != nil { + return m.raw + } + + // See http://tools.ietf.org/html/rfc4346#section-7.4.4 + length := 1 + len(m.certificateTypes) + 2 + casLength := 0 + for _, ca := range m.certificateAuthorities { + casLength += 2 + len(ca) + } + length += casLength + + if m.hasSignatureAndHash { + length += 2 + 2*len(m.signatureAndHashes) + } + + x = make([]byte, 4+length) + x[0] = typeCertificateRequest + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + + x[4] = uint8(len(m.certificateTypes)) + + copy(x[5:], m.certificateTypes) + y := x[5+len(m.certificateTypes):] + + if m.hasSignatureAndHash { + n := len(m.signatureAndHashes) * 2 + y[0] = uint8(n >> 8) + y[1] = uint8(n) + y = y[2:] + for _, sigAndHash := range m.signatureAndHashes { + y[0] = sigAndHash.hash + y[1] = sigAndHash.signature + y = y[2:] + } + } + + y[0] = uint8(casLength >> 8) + y[1] = uint8(casLength) + y = y[2:] + for _, ca := range m.certificateAuthorities { + y[0] = uint8(len(ca) >> 8) + y[1] = uint8(len(ca)) + y = y[2:] + copy(y, ca) + y = y[len(ca):] + } + + m.raw = x + return +} + +func (m *certificateRequestMsg) unmarshal(data []byte) bool { + m.raw = data + + if len(data) < 5 { + return false + } + + length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3]) + if uint32(len(data))-4 != length { + return false + } + + numCertTypes := int(data[4]) + data = data[5:] + if numCertTypes == 0 || len(data) <= numCertTypes { + return false + } + + m.certificateTypes = make([]byte, numCertTypes) + if copy(m.certificateTypes, data) != numCertTypes { + return false + } + + data = data[numCertTypes:] + + if m.hasSignatureAndHash { + if len(data) < 2 { + return false + } + sigAndHashLen := uint16(data[0])<<8 | uint16(data[1]) + data = data[2:] + if sigAndHashLen&1 != 0 { + return false + } + if len(data) < int(sigAndHashLen) { + return false + } + numSigAndHash := sigAndHashLen / 2 + m.signatureAndHashes = make([]signatureAndHash, numSigAndHash) + for i := range m.signatureAndHashes { + m.signatureAndHashes[i].hash = data[0] + m.signatureAndHashes[i].signature = data[1] + data = data[2:] + } + } + + if len(data) < 2 { + return false + } + casLength := uint16(data[0])<<8 | uint16(data[1]) + data = data[2:] + if len(data) < int(casLength) { + return false + } + cas := make([]byte, casLength) + copy(cas, data) + data = data[casLength:] + + m.certificateAuthorities = nil + for len(cas) > 0 { + if len(cas) < 2 { + return false + } + caLen := uint16(cas[0])<<8 | uint16(cas[1]) + cas = cas[2:] + + if len(cas) < int(caLen) { + return false + } + + m.certificateAuthorities = append(m.certificateAuthorities, cas[:caLen]) + cas = cas[caLen:] + } + if len(data) > 0 { + return false + } + + return true +} + +type certificateVerifyMsg struct { + raw []byte + hasSignatureAndHash bool + signatureAndHash signatureAndHash + signature []byte +} + +func (m *certificateVerifyMsg) equal(i interface{}) bool { + m1, ok := i.(*certificateVerifyMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + m.hasSignatureAndHash == m1.hasSignatureAndHash && + m.signatureAndHash.hash == m1.signatureAndHash.hash && + m.signatureAndHash.signature == m1.signatureAndHash.signature && + bytes.Equal(m.signature, m1.signature) +} + +func (m *certificateVerifyMsg) marshal() (x []byte) { + if m.raw != nil { + return m.raw + } + + // See http://tools.ietf.org/html/rfc4346#section-7.4.8 + siglength := len(m.signature) + length := 2 + siglength + if m.hasSignatureAndHash { + length += 2 + } + x = make([]byte, 4+length) + x[0] = typeCertificateVerify + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + y := x[4:] + if m.hasSignatureAndHash { + y[0] = m.signatureAndHash.hash + y[1] = m.signatureAndHash.signature + y = y[2:] + } + y[0] = uint8(siglength >> 8) + y[1] = uint8(siglength) + copy(y[2:], m.signature) + + m.raw = x + + return +} + +func (m *certificateVerifyMsg) unmarshal(data []byte) bool { + m.raw = data + + if len(data) < 6 { + return false + } + + length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3]) + if uint32(len(data))-4 != length { + return false + } + + data = data[4:] + if m.hasSignatureAndHash { + m.signatureAndHash.hash = data[0] + m.signatureAndHash.signature = data[1] + data = data[2:] + } + + if len(data) < 2 { + return false + } + siglength := int(data[0])<<8 + int(data[1]) + data = data[2:] + if len(data) != siglength { + return false + } + + m.signature = data + + return true +} + +type newSessionTicketMsg struct { + raw []byte + ticket []byte +} + +func (m *newSessionTicketMsg) equal(i interface{}) bool { + m1, ok := i.(*newSessionTicketMsg) + if !ok { + return false + } + + return bytes.Equal(m.raw, m1.raw) && + bytes.Equal(m.ticket, m1.ticket) +} + +func (m *newSessionTicketMsg) marshal() (x []byte) { + if m.raw != nil { + return m.raw + } + + // See http://tools.ietf.org/html/rfc5077#section-3.3 + ticketLen := len(m.ticket) + length := 2 + 4 + ticketLen + x = make([]byte, 4+length) + x[0] = typeNewSessionTicket + x[1] = uint8(length >> 16) + x[2] = uint8(length >> 8) + x[3] = uint8(length) + x[8] = uint8(ticketLen >> 8) + x[9] = uint8(ticketLen) + copy(x[10:], m.ticket) + + m.raw = x + + return +} + +func (m *newSessionTicketMsg) unmarshal(data []byte) bool { + m.raw = data + + if len(data) < 10 { + return false + } + + length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3]) + if uint32(len(data))-4 != length { + return false + } + + ticketLen := int(data[8])<<8 + int(data[9]) + if len(data)-10 != ticketLen { + return false + } + + m.ticket = data[10:] + + return true +} + +type helloRequestMsg struct { +} + +func (*helloRequestMsg) marshal() []byte { + return []byte{typeHelloRequest, 0, 0, 0} +} + +func (*helloRequestMsg) unmarshal(data []byte) bool { + return len(data) == 4 +} + +func eqUint16s(x, y []uint16) bool { + if len(x) != len(y) { + return false + } + for i, v := range x { + if y[i] != v { + return false + } + } + return true +} + +func eqStrings(x, y []string) bool { + if len(x) != len(y) { + return false + } + for i, v := range x { + if y[i] != v { + return false + } + } + return true +} + +func eqByteSlices(x, y [][]byte) bool { + if len(x) != len(y) { + return false + } + for i, v := range x { + if !bytes.Equal(v, y[i]) { + return false + } + } + return true +} + +func eqSignatureAndHashes(x, y []signatureAndHash) bool { + if len(x) != len(y) { + return false + } + for i, v := range x { + v2 := y[i] + if v.hash != v2.hash || v.signature != v2.signature { + return false + } + } + return true +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_messages_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_messages_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_messages_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,246 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "math/rand" + "reflect" + "testing" + "testing/quick" +) + +var tests = []interface{}{ + &clientHelloMsg{}, + &serverHelloMsg{}, + &finishedMsg{}, + + &certificateMsg{}, + &certificateRequestMsg{}, + &certificateVerifyMsg{}, + &certificateStatusMsg{}, + &clientKeyExchangeMsg{}, + &nextProtoMsg{}, + &newSessionTicketMsg{}, + &sessionState{}, +} + +type testMessage interface { + marshal() []byte + unmarshal([]byte) bool + equal(interface{}) bool +} + +func TestMarshalUnmarshal(t *testing.T) { + rand := rand.New(rand.NewSource(0)) + + for i, iface := range tests { + ty := reflect.ValueOf(iface).Type() + + n := 100 + if testing.Short() { + n = 5 + } + for j := 0; j < n; j++ { + v, ok := quick.Value(ty, rand) + if !ok { + t.Errorf("#%d: failed to create value", i) + break + } + + m1 := v.Interface().(testMessage) + marshaled := m1.marshal() + m2 := iface.(testMessage) + if !m2.unmarshal(marshaled) { + t.Errorf("#%d failed to unmarshal %#v %x", i, m1, marshaled) + break + } + m2.marshal() // to fill any marshal cache in the message + + if !m1.equal(m2) { + t.Errorf("#%d got:%#v want:%#v %x", i, m2, m1, marshaled) + break + } + + if i >= 3 { + // The first three message types (ClientHello, + // ServerHello and Finished) are allowed to + // have parsable prefixes because the extension + // data is optional and the length of the + // Finished varies across versions. + for j := 0; j < len(marshaled); j++ { + if m2.unmarshal(marshaled[0:j]) { + t.Errorf("#%d unmarshaled a prefix of length %d of %#v", i, j, m1) + break + } + } + } + } + } +} + +func TestFuzz(t *testing.T) { + rand := rand.New(rand.NewSource(0)) + for _, iface := range tests { + m := iface.(testMessage) + + for j := 0; j < 1000; j++ { + len := rand.Intn(100) + bytes := randomBytes(len, rand) + // This just looks for crashes due to bounds errors etc. + m.unmarshal(bytes) + } + } +} + +func randomBytes(n int, rand *rand.Rand) []byte { + r := make([]byte, n) + for i := 0; i < n; i++ { + r[i] = byte(rand.Int31()) + } + return r +} + +func randomString(n int, rand *rand.Rand) string { + b := randomBytes(n, rand) + return string(b) +} + +func (*clientHelloMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &clientHelloMsg{} + m.vers = uint16(rand.Intn(65536)) + m.random = randomBytes(32, rand) + m.sessionId = randomBytes(rand.Intn(32), rand) + m.cipherSuites = make([]uint16, rand.Intn(63)+1) + for i := 0; i < len(m.cipherSuites); i++ { + m.cipherSuites[i] = uint16(rand.Int31()) + } + m.compressionMethods = randomBytes(rand.Intn(63)+1, rand) + if rand.Intn(10) > 5 { + m.nextProtoNeg = true + } + if rand.Intn(10) > 5 { + m.serverName = randomString(rand.Intn(255), rand) + } + m.ocspStapling = rand.Intn(10) > 5 + m.supportedPoints = randomBytes(rand.Intn(5)+1, rand) + m.supportedCurves = make([]uint16, rand.Intn(5)+1) + for i := range m.supportedCurves { + m.supportedCurves[i] = uint16(rand.Intn(30000)) + } + if rand.Intn(10) > 5 { + m.ticketSupported = true + if rand.Intn(10) > 5 { + m.sessionTicket = randomBytes(rand.Intn(300), rand) + } + } + if rand.Intn(10) > 5 { + m.signatureAndHashes = supportedSKXSignatureAlgorithms + } + + return reflect.ValueOf(m) +} + +func (*serverHelloMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &serverHelloMsg{} + m.vers = uint16(rand.Intn(65536)) + m.random = randomBytes(32, rand) + m.sessionId = randomBytes(rand.Intn(32), rand) + m.cipherSuite = uint16(rand.Int31()) + m.compressionMethod = uint8(rand.Intn(256)) + + if rand.Intn(10) > 5 { + m.nextProtoNeg = true + + n := rand.Intn(10) + m.nextProtos = make([]string, n) + for i := 0; i < n; i++ { + m.nextProtos[i] = randomString(20, rand) + } + } + + if rand.Intn(10) > 5 { + m.ocspStapling = true + } + if rand.Intn(10) > 5 { + m.ticketSupported = true + } + + return reflect.ValueOf(m) +} + +func (*certificateMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &certificateMsg{} + numCerts := rand.Intn(20) + m.certificates = make([][]byte, numCerts) + for i := 0; i < numCerts; i++ { + m.certificates[i] = randomBytes(rand.Intn(10)+1, rand) + } + return reflect.ValueOf(m) +} + +func (*certificateRequestMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &certificateRequestMsg{} + m.certificateTypes = randomBytes(rand.Intn(5)+1, rand) + numCAs := rand.Intn(100) + m.certificateAuthorities = make([][]byte, numCAs) + for i := 0; i < numCAs; i++ { + m.certificateAuthorities[i] = randomBytes(rand.Intn(15)+1, rand) + } + return reflect.ValueOf(m) +} + +func (*certificateVerifyMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &certificateVerifyMsg{} + m.signature = randomBytes(rand.Intn(15)+1, rand) + return reflect.ValueOf(m) +} + +func (*certificateStatusMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &certificateStatusMsg{} + if rand.Intn(10) > 5 { + m.statusType = statusTypeOCSP + m.response = randomBytes(rand.Intn(10)+1, rand) + } else { + m.statusType = 42 + } + return reflect.ValueOf(m) +} + +func (*clientKeyExchangeMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &clientKeyExchangeMsg{} + m.ciphertext = randomBytes(rand.Intn(1000)+1, rand) + return reflect.ValueOf(m) +} + +func (*finishedMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &finishedMsg{} + m.verifyData = randomBytes(12, rand) + return reflect.ValueOf(m) +} + +func (*nextProtoMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &nextProtoMsg{} + m.proto = randomString(rand.Intn(255), rand) + return reflect.ValueOf(m) +} + +func (*newSessionTicketMsg) Generate(rand *rand.Rand, size int) reflect.Value { + m := &newSessionTicketMsg{} + m.ticket = randomBytes(rand.Intn(4), rand) + return reflect.ValueOf(m) +} + +func (*sessionState) Generate(rand *rand.Rand, size int) reflect.Value { + s := &sessionState{} + s.vers = uint16(rand.Intn(10000)) + s.cipherSuite = uint16(rand.Intn(10000)) + s.masterSecret = randomBytes(rand.Intn(100), rand) + numCerts := rand.Intn(20) + s.certificates = make([][]byte, numCerts) + for i := 0; i < numCerts; i++ { + s.certificates[i] = randomBytes(rand.Intn(10)+1, rand) + } + return reflect.ValueOf(s) +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_server.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_server.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_server.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,638 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/subtle" + "crypto/x509" + "encoding/asn1" + "errors" + "io" +) + +// serverHandshakeState contains details of a server handshake in progress. +// It's discarded once the handshake has completed. +type serverHandshakeState struct { + c *Conn + clientHello *clientHelloMsg + hello *serverHelloMsg + suite *cipherSuite + ellipticOk bool + ecdsaOk bool + sessionState *sessionState + finishedHash finishedHash + masterSecret []byte + certsFromClient [][]byte + cert *Certificate +} + +// serverHandshake performs a TLS handshake as a server. +func (c *Conn) serverHandshake() error { + config := c.config + + // If this is the first server handshake, we generate a random key to + // encrypt the tickets with. + config.serverInitOnce.Do(config.serverInit) + + hs := serverHandshakeState{ + c: c, + } + isResume, err := hs.readClientHello() + if err != nil { + return err + } + + // For an overview of TLS handshaking, see https://tools.ietf.org/html/rfc5246#section-7.3 + if isResume { + // The client has included a session ticket and so we do an abbreviated handshake. + if err := hs.doResumeHandshake(); err != nil { + return err + } + if err := hs.establishKeys(); err != nil { + return err + } + if err := hs.sendFinished(); err != nil { + return err + } + if err := hs.readFinished(); err != nil { + return err + } + c.didResume = true + } else { + // The client didn't include a session ticket, or it wasn't + // valid so we do a full handshake. + if err := hs.doFullHandshake(); err != nil { + return err + } + if err := hs.establishKeys(); err != nil { + return err + } + if err := hs.readFinished(); err != nil { + return err + } + if err := hs.sendSessionTicket(); err != nil { + return err + } + if err := hs.sendFinished(); err != nil { + return err + } + } + c.handshakeComplete = true + + return nil +} + +// readClientHello reads a ClientHello message from the client and decides +// whether we will perform session resumption. +func (hs *serverHandshakeState) readClientHello() (isResume bool, err error) { + config := hs.c.config + c := hs.c + + msg, err := c.readHandshake() + if err != nil { + return false, err + } + var ok bool + hs.clientHello, ok = msg.(*clientHelloMsg) + if !ok { + return false, c.sendAlert(alertUnexpectedMessage) + } + c.vers, ok = config.mutualVersion(hs.clientHello.vers) + if !ok { + return false, c.sendAlert(alertProtocolVersion) + } + c.haveVers = true + + hs.finishedHash = newFinishedHash(c.vers) + hs.finishedHash.Write(hs.clientHello.marshal()) + + hs.hello = new(serverHelloMsg) + + supportedCurve := false +Curves: + for _, curve := range hs.clientHello.supportedCurves { + switch curve { + case curveP256, curveP384, curveP521: + supportedCurve = true + break Curves + } + } + + supportedPointFormat := false + for _, pointFormat := range hs.clientHello.supportedPoints { + if pointFormat == pointFormatUncompressed { + supportedPointFormat = true + break + } + } + hs.ellipticOk = supportedCurve && supportedPointFormat + + foundCompression := false + // We only support null compression, so check that the client offered it. + for _, compression := range hs.clientHello.compressionMethods { + if compression == compressionNone { + foundCompression = true + break + } + } + + if !foundCompression { + return false, c.sendAlert(alertHandshakeFailure) + } + + hs.hello.vers = c.vers + t := uint32(config.time().Unix()) + hs.hello.random = make([]byte, 32) + hs.hello.random[0] = byte(t >> 24) + hs.hello.random[1] = byte(t >> 16) + hs.hello.random[2] = byte(t >> 8) + hs.hello.random[3] = byte(t) + _, err = io.ReadFull(config.rand(), hs.hello.random[4:]) + if err != nil { + return false, c.sendAlert(alertInternalError) + } + hs.hello.compressionMethod = compressionNone + if len(hs.clientHello.serverName) > 0 { + c.serverName = hs.clientHello.serverName + } + // Although sending an empty NPN extension is reasonable, Firefox has + // had a bug around this. Best to send nothing at all if + // config.NextProtos is empty. See + // https://code.google.com/p/go/issues/detail?id=5445. + if hs.clientHello.nextProtoNeg && len(config.NextProtos) > 0 { + hs.hello.nextProtoNeg = true + hs.hello.nextProtos = config.NextProtos + } + + if len(config.Certificates) == 0 { + return false, c.sendAlert(alertInternalError) + } + hs.cert = &config.Certificates[0] + if len(hs.clientHello.serverName) > 0 { + hs.cert = config.getCertificateForName(hs.clientHello.serverName) + } + + _, hs.ecdsaOk = hs.cert.PrivateKey.(*ecdsa.PrivateKey) + + if hs.checkForResumption() { + return true, nil + } + + var preferenceList, supportedList []uint16 + if c.config.PreferServerCipherSuites { + preferenceList = c.config.cipherSuites() + supportedList = hs.clientHello.cipherSuites + } else { + preferenceList = hs.clientHello.cipherSuites + supportedList = c.config.cipherSuites() + } + + for _, id := range preferenceList { + if hs.suite = c.tryCipherSuite(id, supportedList, c.vers, hs.ellipticOk, hs.ecdsaOk); hs.suite != nil { + break + } + } + + if hs.suite == nil { + return false, c.sendAlert(alertHandshakeFailure) + } + + return false, nil +} + +// checkForResumption returns true if we should perform resumption on this connection. +func (hs *serverHandshakeState) checkForResumption() bool { + c := hs.c + + var ok bool + if hs.sessionState, ok = c.decryptTicket(hs.clientHello.sessionTicket); !ok { + return false + } + + if hs.sessionState.vers > hs.clientHello.vers { + return false + } + if vers, ok := c.config.mutualVersion(hs.sessionState.vers); !ok || vers != hs.sessionState.vers { + return false + } + + cipherSuiteOk := false + // Check that the client is still offering the ciphersuite in the session. + for _, id := range hs.clientHello.cipherSuites { + if id == hs.sessionState.cipherSuite { + cipherSuiteOk = true + break + } + } + if !cipherSuiteOk { + return false + } + + // Check that we also support the ciphersuite from the session. + hs.suite = c.tryCipherSuite(hs.sessionState.cipherSuite, c.config.cipherSuites(), hs.sessionState.vers, hs.ellipticOk, hs.ecdsaOk) + if hs.suite == nil { + return false + } + + sessionHasClientCerts := len(hs.sessionState.certificates) != 0 + needClientCerts := c.config.ClientAuth == RequireAnyClientCert || c.config.ClientAuth == RequireAndVerifyClientCert + if needClientCerts && !sessionHasClientCerts { + return false + } + if sessionHasClientCerts && c.config.ClientAuth == NoClientCert { + return false + } + + return true +} + +func (hs *serverHandshakeState) doResumeHandshake() error { + c := hs.c + + hs.hello.cipherSuite = hs.suite.id + // We echo the client's session ID in the ServerHello to let it know + // that we're doing a resumption. + hs.hello.sessionId = hs.clientHello.sessionId + hs.finishedHash.Write(hs.hello.marshal()) + c.writeRecord(recordTypeHandshake, hs.hello.marshal()) + + if len(hs.sessionState.certificates) > 0 { + if _, err := hs.processCertsFromClient(hs.sessionState.certificates); err != nil { + return err + } + } + + hs.masterSecret = hs.sessionState.masterSecret + + return nil +} + +func (hs *serverHandshakeState) doFullHandshake() error { + config := hs.c.config + c := hs.c + + if hs.clientHello.ocspStapling && len(hs.cert.OCSPStaple) > 0 { + hs.hello.ocspStapling = true + } + + hs.hello.ticketSupported = hs.clientHello.ticketSupported && !config.SessionTicketsDisabled + hs.hello.cipherSuite = hs.suite.id + hs.finishedHash.Write(hs.hello.marshal()) + c.writeRecord(recordTypeHandshake, hs.hello.marshal()) + + certMsg := new(certificateMsg) + certMsg.certificates = hs.cert.Certificate + hs.finishedHash.Write(certMsg.marshal()) + c.writeRecord(recordTypeHandshake, certMsg.marshal()) + + if hs.hello.ocspStapling { + certStatus := new(certificateStatusMsg) + certStatus.statusType = statusTypeOCSP + certStatus.response = hs.cert.OCSPStaple + hs.finishedHash.Write(certStatus.marshal()) + c.writeRecord(recordTypeHandshake, certStatus.marshal()) + } + + keyAgreement := hs.suite.ka(c.vers) + skx, err := keyAgreement.generateServerKeyExchange(config, hs.cert, hs.clientHello, hs.hello) + if err != nil { + c.sendAlert(alertHandshakeFailure) + return err + } + if skx != nil { + hs.finishedHash.Write(skx.marshal()) + c.writeRecord(recordTypeHandshake, skx.marshal()) + } + + if config.ClientAuth >= RequestClientCert { + // Request a client certificate + certReq := new(certificateRequestMsg) + certReq.certificateTypes = []byte{ + byte(certTypeRSASign), + byte(certTypeECDSASign), + } + if c.vers >= VersionTLS12 { + certReq.hasSignatureAndHash = true + certReq.signatureAndHashes = supportedClientCertSignatureAlgorithms + } + + // An empty list of certificateAuthorities signals to + // the client that it may send any certificate in response + // to our request. When we know the CAs we trust, then + // we can send them down, so that the client can choose + // an appropriate certificate to give to us. + if config.ClientCAs != nil { + certReq.certificateAuthorities = config.ClientCAs.Subjects() + } + hs.finishedHash.Write(certReq.marshal()) + c.writeRecord(recordTypeHandshake, certReq.marshal()) + } + + helloDone := new(serverHelloDoneMsg) + hs.finishedHash.Write(helloDone.marshal()) + c.writeRecord(recordTypeHandshake, helloDone.marshal()) + + var pub crypto.PublicKey // public key for client auth, if any + + msg, err := c.readHandshake() + if err != nil { + return err + } + + var ok bool + // If we requested a client certificate, then the client must send a + // certificate message, even if it's empty. + if config.ClientAuth >= RequestClientCert { + if certMsg, ok = msg.(*certificateMsg); !ok { + return c.sendAlert(alertHandshakeFailure) + } + hs.finishedHash.Write(certMsg.marshal()) + + if len(certMsg.certificates) == 0 { + // The client didn't actually send a certificate + switch config.ClientAuth { + case RequireAnyClientCert, RequireAndVerifyClientCert: + c.sendAlert(alertBadCertificate) + return errors.New("tls: client didn't provide a certificate") + } + } + + pub, err = hs.processCertsFromClient(certMsg.certificates) + if err != nil { + return err + } + + msg, err = c.readHandshake() + if err != nil { + return err + } + } + + // Get client key exchange + ckx, ok := msg.(*clientKeyExchangeMsg) + if !ok { + return c.sendAlert(alertUnexpectedMessage) + } + hs.finishedHash.Write(ckx.marshal()) + + // If we received a client cert in response to our certificate request message, + // the client will send us a certificateVerifyMsg immediately after the + // clientKeyExchangeMsg. This message is a digest of all preceding + // handshake-layer messages that is signed using the private key corresponding + // to the client's certificate. This allows us to verify that the client is in + // possession of the private key of the certificate. + if len(c.peerCertificates) > 0 { + msg, err = c.readHandshake() + if err != nil { + return err + } + certVerify, ok := msg.(*certificateVerifyMsg) + if !ok { + return c.sendAlert(alertUnexpectedMessage) + } + + switch key := pub.(type) { + case *ecdsa.PublicKey: + ecdsaSig := new(ecdsaSignature) + if _, err = asn1.Unmarshal(certVerify.signature, ecdsaSig); err != nil { + break + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + err = errors.New("ECDSA signature contained zero or negative values") + break + } + digest, _, _ := hs.finishedHash.hashForClientCertificate(signatureECDSA) + if !ecdsa.Verify(key, digest, ecdsaSig.R, ecdsaSig.S) { + err = errors.New("ECDSA verification failure") + break + } + case *rsa.PublicKey: + digest, hashFunc, _ := hs.finishedHash.hashForClientCertificate(signatureRSA) + err = rsa.VerifyPKCS1v15(key, hashFunc, digest, certVerify.signature) + } + if err != nil { + c.sendAlert(alertBadCertificate) + return errors.New("could not validate signature of connection nonces: " + err.Error()) + } + + hs.finishedHash.Write(certVerify.marshal()) + } + + preMasterSecret, err := keyAgreement.processClientKeyExchange(config, hs.cert, ckx, c.vers) + if err != nil { + c.sendAlert(alertHandshakeFailure) + return err + } + hs.masterSecret = masterFromPreMasterSecret(c.vers, preMasterSecret, hs.clientHello.random, hs.hello.random) + + return nil +} + +func (hs *serverHandshakeState) establishKeys() error { + c := hs.c + + clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV := + keysFromMasterSecret(c.vers, hs.masterSecret, hs.clientHello.random, hs.hello.random, hs.suite.macLen, hs.suite.keyLen, hs.suite.ivLen) + + var clientCipher, serverCipher interface{} + var clientHash, serverHash macFunction + + if hs.suite.aead == nil { + clientCipher = hs.suite.cipher(clientKey, clientIV, true /* for reading */) + clientHash = hs.suite.mac(c.vers, clientMAC) + serverCipher = hs.suite.cipher(serverKey, serverIV, false /* not for reading */) + serverHash = hs.suite.mac(c.vers, serverMAC) + } else { + clientCipher = hs.suite.aead(clientKey, clientIV) + serverCipher = hs.suite.aead(serverKey, serverIV) + } + + c.in.prepareCipherSpec(c.vers, clientCipher, clientHash) + c.out.prepareCipherSpec(c.vers, serverCipher, serverHash) + + return nil +} + +func (hs *serverHandshakeState) readFinished() error { + c := hs.c + + c.readRecord(recordTypeChangeCipherSpec) + if err := c.error(); err != nil { + return err + } + + if hs.hello.nextProtoNeg { + msg, err := c.readHandshake() + if err != nil { + return err + } + nextProto, ok := msg.(*nextProtoMsg) + if !ok { + return c.sendAlert(alertUnexpectedMessage) + } + hs.finishedHash.Write(nextProto.marshal()) + c.clientProtocol = nextProto.proto + } + + msg, err := c.readHandshake() + if err != nil { + return err + } + clientFinished, ok := msg.(*finishedMsg) + if !ok { + return c.sendAlert(alertUnexpectedMessage) + } + + verify := hs.finishedHash.clientSum(hs.masterSecret) + if len(verify) != len(clientFinished.verifyData) || + subtle.ConstantTimeCompare(verify, clientFinished.verifyData) != 1 { + return c.sendAlert(alertHandshakeFailure) + } + + hs.finishedHash.Write(clientFinished.marshal()) + return nil +} + +func (hs *serverHandshakeState) sendSessionTicket() error { + if !hs.hello.ticketSupported { + return nil + } + + c := hs.c + m := new(newSessionTicketMsg) + + var err error + state := sessionState{ + vers: c.vers, + cipherSuite: hs.suite.id, + masterSecret: hs.masterSecret, + certificates: hs.certsFromClient, + } + m.ticket, err = c.encryptTicket(&state) + if err != nil { + return err + } + + hs.finishedHash.Write(m.marshal()) + c.writeRecord(recordTypeHandshake, m.marshal()) + + return nil +} + +func (hs *serverHandshakeState) sendFinished() error { + c := hs.c + + c.writeRecord(recordTypeChangeCipherSpec, []byte{1}) + + finished := new(finishedMsg) + finished.verifyData = hs.finishedHash.serverSum(hs.masterSecret) + hs.finishedHash.Write(finished.marshal()) + c.writeRecord(recordTypeHandshake, finished.marshal()) + + c.cipherSuite = hs.suite.id + + return nil +} + +// processCertsFromClient takes a chain of client certificates either from a +// Certificates message or from a sessionState and verifies them. It returns +// the public key of the leaf certificate. +func (hs *serverHandshakeState) processCertsFromClient(certificates [][]byte) (crypto.PublicKey, error) { + c := hs.c + + hs.certsFromClient = certificates + certs := make([]*x509.Certificate, len(certificates)) + var err error + for i, asn1Data := range certificates { + if certs[i], err = x509.ParseCertificate(asn1Data); err != nil { + c.sendAlert(alertBadCertificate) + return nil, errors.New("tls: failed to parse client certificate: " + err.Error()) + } + } + + if c.config.ClientAuth >= VerifyClientCertIfGiven && len(certs) > 0 { + opts := x509.VerifyOptions{ + Roots: c.config.ClientCAs, + CurrentTime: c.config.time(), + Intermediates: x509.NewCertPool(), + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } + + for _, cert := range certs[1:] { + opts.Intermediates.AddCert(cert) + } + + chains, err := certs[0].Verify(opts) + if err != nil { + c.sendAlert(alertBadCertificate) + return nil, errors.New("tls: failed to verify client's certificate: " + err.Error()) + } + + ok := false + for _, ku := range certs[0].ExtKeyUsage { + if ku == x509.ExtKeyUsageClientAuth { + ok = true + break + } + } + if !ok { + c.sendAlert(alertHandshakeFailure) + return nil, errors.New("tls: client's certificate's extended key usage doesn't permit it to be used for client authentication") + } + + c.verifiedChains = chains + } + + if len(certs) > 0 { + var pub crypto.PublicKey + switch key := certs[0].PublicKey.(type) { + case *ecdsa.PublicKey, *rsa.PublicKey: + pub = key + default: + return nil, c.sendAlert(alertUnsupportedCertificate) + } + c.peerCertificates = certs + return pub, nil + } + + return nil, nil +} + +// tryCipherSuite returns a cipherSuite with the given id if that cipher suite +// is acceptable to use. +func (c *Conn) tryCipherSuite(id uint16, supportedCipherSuites []uint16, version uint16, ellipticOk, ecdsaOk bool) *cipherSuite { + for _, supported := range supportedCipherSuites { + if id == supported { + var candidate *cipherSuite + + for _, s := range cipherSuites { + if s.id == id { + candidate = s + break + } + } + if candidate == nil { + continue + } + // Don't select a ciphersuite which we can't + // support for this client. + if (candidate.flags&suiteECDHE != 0) && !ellipticOk { + continue + } + if (candidate.flags&suiteECDSA != 0) != ecdsaOk { + continue + } + if version < VersionTLS12 && candidate.flags&suiteTLS12 != 0 { + continue + } + return candidate + } + } + + return nil +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_server_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_server_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/handshake_server_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,3796 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/hex" + "encoding/pem" + "flag" + "fmt" + "io" + "log" + "math/big" + "net" + "os" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +type zeroSource struct{} + +func (zeroSource) Read(b []byte) (n int, err error) { + for i := range b { + b[i] = 0 + } + + return len(b), nil +} + +var testConfig *Config + +func init() { + testConfig = new(Config) + testConfig.Time = func() time.Time { return time.Unix(0, 0) } + testConfig.Rand = zeroSource{} + testConfig.Certificates = make([]Certificate, 2) + testConfig.Certificates[0].Certificate = [][]byte{testRSACertificate} + testConfig.Certificates[0].PrivateKey = testRSAPrivateKey + testConfig.Certificates[1].Certificate = [][]byte{testSNICertificate} + testConfig.Certificates[1].PrivateKey = testRSAPrivateKey + testConfig.BuildNameToCertificate() + testConfig.CipherSuites = []uint16{TLS_RSA_WITH_RC4_128_SHA} + testConfig.InsecureSkipVerify = true + testConfig.MinVersion = VersionSSL30 + testConfig.MaxVersion = VersionTLS10 +} + +func testClientHelloFailure(t *testing.T, m handshakeMessage, expected error) { + // Create in-memory network connection, + // send message to server. Should return + // expected error. + c, s := net.Pipe() + go func() { + cli := Client(c, testConfig) + if ch, ok := m.(*clientHelloMsg); ok { + cli.vers = ch.vers + } + cli.writeRecord(recordTypeHandshake, m.marshal()) + c.Close() + }() + err := Server(s, testConfig).Handshake() + s.Close() + if e, ok := err.(*net.OpError); !ok || e.Err != expected { + t.Errorf("Got error: %s; expected: %s", err, expected) + } +} + +func TestSimpleError(t *testing.T) { + testClientHelloFailure(t, &serverHelloDoneMsg{}, alertUnexpectedMessage) +} + +var badProtocolVersions = []uint16{0x0000, 0x0005, 0x0100, 0x0105, 0x0200, 0x0205} + +func TestRejectBadProtocolVersion(t *testing.T) { + for _, v := range badProtocolVersions { + testClientHelloFailure(t, &clientHelloMsg{vers: v}, alertProtocolVersion) + } +} + +func TestNoSuiteOverlap(t *testing.T) { + clientHello := &clientHelloMsg{ + vers: 0x0301, + cipherSuites: []uint16{0xff00}, + compressionMethods: []uint8{0}, + } + testClientHelloFailure(t, clientHello, alertHandshakeFailure) +} + +func TestNoCompressionOverlap(t *testing.T) { + clientHello := &clientHelloMsg{ + vers: 0x0301, + cipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA}, + compressionMethods: []uint8{0xff}, + } + testClientHelloFailure(t, clientHello, alertHandshakeFailure) +} + +func TestTLS12OnlyCipherSuites(t *testing.T) { + // Test that a Server doesn't select a TLS 1.2-only cipher suite when + // the client negotiates TLS 1.1. + var zeros [32]byte + + clientHello := &clientHelloMsg{ + vers: VersionTLS11, + random: zeros[:], + cipherSuites: []uint16{ + // The Server, by default, will use the client's + // preference order. So the GCM cipher suite + // will be selected unless it's excluded because + // of the version in this ClientHello. + TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_RSA_WITH_RC4_128_SHA, + }, + compressionMethods: []uint8{compressionNone}, + supportedCurves: []uint16{curveP256, curveP384, curveP521}, + supportedPoints: []uint8{pointFormatUncompressed}, + } + + c, s := net.Pipe() + var reply interface{} + var clientErr error + go func() { + cli := Client(c, testConfig) + cli.vers = clientHello.vers + cli.writeRecord(recordTypeHandshake, clientHello.marshal()) + reply, clientErr = cli.readHandshake() + c.Close() + }() + config := *testConfig + config.CipherSuites = clientHello.cipherSuites + Server(s, &config).Handshake() + s.Close() + if clientErr != nil { + t.Fatal(clientErr) + } + serverHello, ok := reply.(*serverHelloMsg) + if !ok { + t.Fatalf("didn't get ServerHello message in reply. Got %v\n", reply) + } + if s := serverHello.cipherSuite; s != TLS_RSA_WITH_RC4_128_SHA { + t.Fatalf("bad cipher suite from server: %x", s) + } +} + +func TestAlertForwarding(t *testing.T) { + c, s := net.Pipe() + go func() { + Client(c, testConfig).sendAlert(alertUnknownCA) + c.Close() + }() + + err := Server(s, testConfig).Handshake() + s.Close() + if e, ok := err.(*net.OpError); !ok || e.Err != error(alertUnknownCA) { + t.Errorf("Got error: %s; expected: %s", err, error(alertUnknownCA)) + } +} + +func TestClose(t *testing.T) { + c, s := net.Pipe() + go c.Close() + + err := Server(s, testConfig).Handshake() + s.Close() + if err != io.EOF { + t.Errorf("Got error: %s; expected: %s", err, io.EOF) + } +} + +func testHandshake(clientConfig, serverConfig *Config) (state ConnectionState, err error) { + c, s := net.Pipe() + go func() { + cli := Client(c, clientConfig) + cli.Handshake() + c.Close() + }() + server := Server(s, serverConfig) + err = server.Handshake() + if err == nil { + state = server.ConnectionState() + } + s.Close() + return +} + +func TestCipherSuitePreference(t *testing.T) { + serverConfig := &Config{ + CipherSuites: []uint16{TLS_RSA_WITH_RC4_128_SHA, TLS_RSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_RC4_128_SHA}, + Certificates: testConfig.Certificates, + MaxVersion: VersionTLS11, + } + clientConfig := &Config{ + CipherSuites: []uint16{TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_RC4_128_SHA}, + InsecureSkipVerify: true, + } + state, err := testHandshake(clientConfig, serverConfig) + if err != nil { + t.Fatalf("handshake failed: %s", err) + } + if state.CipherSuite != TLS_RSA_WITH_AES_128_CBC_SHA { + // By default the server should use the client's preference. + t.Fatalf("Client's preference was not used, got %x", state.CipherSuite) + } + + serverConfig.PreferServerCipherSuites = true + state, err = testHandshake(clientConfig, serverConfig) + if err != nil { + t.Fatalf("handshake failed: %s", err) + } + if state.CipherSuite != TLS_RSA_WITH_RC4_128_SHA { + t.Fatalf("Server's preference was not used, got %x", state.CipherSuite) + } +} + +func testServerScript(t *testing.T, name string, serverScript [][]byte, config *Config, peers []*x509.Certificate) { + c, s := net.Pipe() + srv := Server(s, config) + pchan := make(chan []*x509.Certificate, 1) + go func() { + srv.Write([]byte("hello, world\n")) + srv.Close() + s.Close() + st := srv.ConnectionState() + pchan <- st.PeerCertificates + }() + + for i, b := range serverScript { + if i%2 == 0 { + c.Write(b) + continue + } + bb := make([]byte, len(b)) + n, err := io.ReadFull(c, bb) + if err != nil { + t.Fatalf("%s #%d: %s\nRead %d, wanted %d, got %x, wanted %x\n", name, i, err, n, len(bb), bb[:n], b) + } + if !bytes.Equal(b, bb) { + t.Fatalf("%s #%d: mismatch on read: got:%x want:%x", name, i, bb, b) + } + } + c.Close() + + if peers != nil { + gotpeers := <-pchan + if len(peers) == len(gotpeers) { + for i := range peers { + if !peers[i].Equal(gotpeers[i]) { + t.Fatalf("%s: mismatch on peer cert %d", name, i) + } + } + } else { + t.Fatalf("%s: mismatch on peer list length: %d (wanted) != %d (got)", name, len(peers), len(gotpeers)) + } + } +} + +func TestHandshakeServerRSARC4(t *testing.T) { + testServerScript(t, "RSA-RC4", rsaRC4ServerScript, testConfig, nil) +} + +func TestHandshakeServerRSA3DES(t *testing.T) { + des3Config := new(Config) + *des3Config = *testConfig + des3Config.CipherSuites = []uint16{TLS_RSA_WITH_3DES_EDE_CBC_SHA} + testServerScript(t, "RSA-3DES", rsaDES3ServerScript, des3Config, nil) +} + +func TestHandshakeServerRSAAES(t *testing.T) { + aesConfig := new(Config) + *aesConfig = *testConfig + aesConfig.CipherSuites = []uint16{TLS_RSA_WITH_AES_128_CBC_SHA} + testServerScript(t, "RSA-AES", rsaAESServerScript, aesConfig, nil) +} + +func TestHandshakeServerECDHEECDSAAES(t *testing.T) { + ecdsaConfig := new(Config) + *ecdsaConfig = *testConfig + ecdsaConfig.Certificates = make([]Certificate, 1) + ecdsaConfig.Certificates[0].Certificate = [][]byte{testECDSACertificate} + ecdsaConfig.Certificates[0].PrivateKey = testECDSAPrivateKey + ecdsaConfig.BuildNameToCertificate() + ecdsaConfig.CipherSuites = []uint16{TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA} + testServerScript(t, "ECDHE-ECDSA-AES", ecdheECDSAAESServerScript, ecdsaConfig, nil) +} + +func TestHandshakeServerSSLv3(t *testing.T) { + testServerScript(t, "SSLv3", sslv3ServerScript, testConfig, nil) +} + +// TestHandshakeServerSNI involves a client sending an SNI extension of +// "snitest.com", which happens to match the CN of testSNICertificate. The test +// verifies that the server correctly selects that certificate. +func TestHandshakeServerSNI(t *testing.T) { + testServerScript(t, "SNI", selectCertificateBySNIScript, testConfig, nil) +} + +func TestResumption(t *testing.T) { + testServerScript(t, "IssueTicket", issueSessionTicketTest, testConfig, nil) + testServerScript(t, "Resume", serverResumeTest, testConfig, nil) +} + +func TestTLS12ClientCertServer(t *testing.T) { + config := *testConfig + config.MaxVersion = VersionTLS12 + config.ClientAuth = RequireAnyClientCert + config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_RC4_128_SHA} + + testServerScript(t, "TLS12", tls12ServerScript, &config, nil) +} + +type clientauthTest struct { + name string + clientauth ClientAuthType + peers []*x509.Certificate + script [][]byte +} + +func TestClientAuthRSA(t *testing.T) { + for _, cat := range clientauthRSATests { + t.Log("running", cat.name) + cfg := new(Config) + *cfg = *testConfig + cfg.ClientAuth = cat.clientauth + testServerScript(t, cat.name, cat.script, cfg, cat.peers) + } +} + +func TestClientAuthECDSA(t *testing.T) { + for _, cat := range clientauthECDSATests { + t.Log("running", cat.name) + cfg := new(Config) + *cfg = *testConfig + cfg.Certificates = make([]Certificate, 1) + cfg.Certificates[0].Certificate = [][]byte{testECDSACertificate} + cfg.Certificates[0].PrivateKey = testECDSAPrivateKey + cfg.BuildNameToCertificate() + cfg.CipherSuites = []uint16{TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA} + cfg.ClientAuth = cat.clientauth + testServerScript(t, cat.name, cat.script, cfg, cat.peers) + } +} + +// TestCipherSuiteCertPreferance ensures that we select an RSA ciphersuite with +// an RSA certificate and an ECDSA ciphersuite with an ECDSA certificate. +func TestCipherSuiteCertPreferance(t *testing.T) { + var config = *testConfig + config.CipherSuites = []uint16{TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA} + config.MaxVersion = VersionTLS11 + config.PreferServerCipherSuites = true + testServerScript(t, "CipherSuiteCertPreference", tls11ECDHEAESServerScript, &config, nil) + + config = *testConfig + config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA} + config.Certificates = []Certificate{ + Certificate{ + Certificate: [][]byte{testECDSACertificate}, + PrivateKey: testECDSAPrivateKey, + }, + } + config.BuildNameToCertificate() + config.PreferServerCipherSuites = true + testServerScript(t, "CipherSuiteCertPreference2", ecdheECDSAAESServerScript, &config, nil) +} + +func TestTLS11Server(t *testing.T) { + var config = *testConfig + config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA} + config.MaxVersion = VersionTLS11 + testServerScript(t, "TLS11", tls11ECDHEAESServerScript, &config, nil) +} + +func TestAESGCM(t *testing.T) { + var config = *testConfig + config.CipherSuites = []uint16{TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256} + config.MaxVersion = VersionTLS12 + testServerScript(t, "AES-GCM", aesGCMServerScript, &config, nil) +} + +// recordingConn is a net.Conn that records the traffic that passes through it. +// WriteTo can be used to produce Go code that contains the recorded traffic. +type recordingConn struct { + net.Conn + lock sync.Mutex + flows [][]byte + currentlyReading bool +} + +func (r *recordingConn) Read(b []byte) (n int, err error) { + if n, err = r.Conn.Read(b); n == 0 { + return + } + b = b[:n] + + r.lock.Lock() + defer r.lock.Unlock() + + if l := len(r.flows); l == 0 || !r.currentlyReading { + buf := make([]byte, len(b)) + copy(buf, b) + r.flows = append(r.flows, buf) + } else { + r.flows[l-1] = append(r.flows[l-1], b[:n]...) + } + r.currentlyReading = true + return +} + +func (r *recordingConn) Write(b []byte) (n int, err error) { + if n, err = r.Conn.Write(b); n == 0 { + return + } + b = b[:n] + + r.lock.Lock() + defer r.lock.Unlock() + + if l := len(r.flows); l == 0 || r.currentlyReading { + buf := make([]byte, len(b)) + copy(buf, b) + r.flows = append(r.flows, buf) + } else { + r.flows[l-1] = append(r.flows[l-1], b[:n]...) + } + r.currentlyReading = false + return +} + +// WriteTo writes Go source code to w that contains the recorded traffic. +func (r *recordingConn) WriteTo(w io.Writer) { + fmt.Fprintf(w, "var changeMe = [][]byte {\n") + for _, buf := range r.flows { + fmt.Fprintf(w, "\t{") + for i, b := range buf { + if i%8 == 0 { + fmt.Fprintf(w, "\n\t\t") + } + fmt.Fprintf(w, "0x%02x, ", b) + } + fmt.Fprintf(w, "\n\t},\n") + } + fmt.Fprintf(w, "}\n") +} + +var serve = flag.Bool("serve", false, "run a TLS server on :10443") +var testCipherSuites = flag.String("ciphersuites", + "0x"+strconv.FormatInt(int64(TLS_RSA_WITH_RC4_128_SHA), 16), + "cipher suites to accept in serving mode") +var testMinVersion = flag.String("minversion", + "0x"+strconv.FormatInt(int64(VersionSSL30), 16), + "minimum version to negotiate") +var testMaxVersion = flag.String("maxversion", + "0x"+strconv.FormatInt(int64(VersionTLS10), 16), + "maximum version to negotiate") +var testClientAuth = flag.Int("clientauth", 0, "value for tls.Config.ClientAuth") + +func GetTestConfig() *Config { + var config = *testConfig + + minVersion, err := strconv.ParseUint(*testMinVersion, 0, 64) + if err != nil { + panic(err) + } + config.MinVersion = uint16(minVersion) + maxVersion, err := strconv.ParseUint(*testMaxVersion, 0, 64) + if err != nil { + panic(err) + } + config.MaxVersion = uint16(maxVersion) + + suites := strings.Split(*testCipherSuites, ",") + config.CipherSuites = make([]uint16, len(suites)) + for i := range suites { + suite, err := strconv.ParseUint(suites[i], 0, 64) + if err != nil { + panic(err) + } + config.CipherSuites[i] = uint16(suite) + } + + ecdsa := false + for _, suite := range config.CipherSuites { + switch suite { + case TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: + ecdsa = true + } + } + if ecdsa { + config.Certificates = nil + if !*connect { + config.Certificates = make([]Certificate, 1) + config.Certificates[0].Certificate = [][]byte{testECDSACertificate} + config.Certificates[0].PrivateKey = testECDSAPrivateKey + } + config.BuildNameToCertificate() + } + + config.ClientAuth = ClientAuthType(*testClientAuth) + return &config +} + +func TestRunServer(t *testing.T) { + if !*serve { + return + } + + config := GetTestConfig() + + const addr = ":10443" + l, err := net.Listen("tcp", addr) + if err != nil { + t.Fatal(err) + } + log.Printf("Now listening for connections on %s", addr) + + for { + tcpConn, err := l.Accept() + if err != nil { + log.Printf("error accepting connection: %s", err) + break + } + + record := &recordingConn{ + Conn: tcpConn, + } + + conn := Server(record, config) + if err := conn.Handshake(); err != nil { + log.Printf("error from TLS handshake: %s", err) + break + } + + _, err = conn.Write([]byte("hello, world\n")) + if err != nil { + log.Printf("error from Write: %s", err) + continue + } + + conn.Close() + + record.WriteTo(os.Stdout) + } +} + +func bigFromString(s string) *big.Int { + ret := new(big.Int) + ret.SetString(s, 10) + return ret +} + +func fromHex(s string) []byte { + b, _ := hex.DecodeString(s) + return b +} + +var testRSACertificate = fromHex("308202b030820219a00302010202090085b0bba48a7fb8ca300d06092a864886f70d01010505003045310b3009060355040613024155311330110603550408130a536f6d652d53746174653121301f060355040a1318496e7465726e6574205769646769747320507479204c7464301e170d3130303432343039303933385a170d3131303432343039303933385a3045310b3009060355040613024155311330110603550408130a536f6d652d53746174653121301f060355040a1318496e7465726e6574205769646769747320507479204c746430819f300d06092a864886f70d010101050003818d0030818902818100bb79d6f517b5e5bf4610d0dc69bee62b07435ad0032d8a7a4385b71452e7a5654c2c78b8238cb5b482e5de1f953b7e62a52ca533d6fe125c7a56fcf506bffa587b263fb5cd04d3d0c921964ac7f4549f5abfef427100fe1899077f7e887d7df10439c4a22edb51c97ce3c04c3b326601cfafb11db8719a1ddbdb896baeda2d790203010001a381a73081a4301d0603551d0e04160414b1ade2855acfcb28db69ce2369ded3268e18883930750603551d23046e306c8014b1ade2855acfcb28db69ce2369ded3268e188839a149a4473045310b3009060355040613024155311330110603550408130a536f6d652d53746174653121301f060355040a1318496e7465726e6574205769646769747320507479204c746482090085b0bba48a7fb8ca300c0603551d13040530030101ff300d06092a864886f70d010105050003818100086c4524c76bb159ab0c52ccf2b014d7879d7a6475b55a9566e4c52b8eae12661feb4f38b36e60d392fdf74108b52513b1187a24fb301dbaed98b917ece7d73159db95d31d78ea50565cd5825a2d5a5f33c4b6d8c97590968c0f5298b5cd981f89205ff2a01ca31b9694dda9fd57e970e8266d71999b266e3850296c90a7bdd9") + +var testECDSACertificate = fromHex("3082020030820162020900b8bf2d47a0d2ebf4300906072a8648ce3d04013045310b3009060355040613024155311330110603550408130a536f6d652d53746174653121301f060355040a1318496e7465726e6574205769646769747320507479204c7464301e170d3132313132323135303633325a170d3232313132303135303633325a3045310b3009060355040613024155311330110603550408130a536f6d652d53746174653121301f060355040a1318496e7465726e6574205769646769747320507479204c746430819b301006072a8648ce3d020106052b81040023038186000400c4a1edbe98f90b4873367ec316561122f23d53c33b4d213dcd6b75e6f6b0dc9adf26c1bcb287f072327cb3642f1c90bcea6823107efee325c0483a69e0286dd33700ef0462dd0da09c706283d881d36431aa9e9731bd96b068c09b23de76643f1a5c7fe9120e5858b65f70dd9bd8ead5d7f5d5ccb9b69f30665b669a20e227e5bffe3b300906072a8648ce3d040103818c0030818802420188a24febe245c5487d1bacf5ed989dae4770c05e1bb62fbdf1b64db76140d311a2ceee0b7e927eff769dc33b7ea53fcefa10e259ec472d7cacda4e970e15a06fd00242014dfcbe67139c2d050ebd3fa38c25c13313830d9406bbd4377af6ec7ac9862eddd711697f857c56defb31782be4c7780daecbbe9e4e3624317b6a0f399512078f2a") + +var testSNICertificate = fromHex("308201f23082015da003020102020100300b06092a864886f70d01010530283110300e060355040a130741636d6520436f311430120603550403130b736e69746573742e636f6d301e170d3132303431313137343033355a170d3133303431313137343533355a30283110300e060355040a130741636d6520436f311430120603550403130b736e69746573742e636f6d30819d300b06092a864886f70d01010103818d0030818902818100bb79d6f517b5e5bf4610d0dc69bee62b07435ad0032d8a7a4385b71452e7a5654c2c78b8238cb5b482e5de1f953b7e62a52ca533d6fe125c7a56fcf506bffa587b263fb5cd04d3d0c921964ac7f4549f5abfef427100fe1899077f7e887d7df10439c4a22edb51c97ce3c04c3b326601cfafb11db8719a1ddbdb896baeda2d790203010001a3323030300e0603551d0f0101ff0404030200a0300d0603551d0e0406040401020304300f0603551d2304083006800401020304300b06092a864886f70d0101050381810089c6455f1c1f5ef8eb1ab174ee2439059f5c4259bb1a8d86cdb1d056f56a717da40e95ab90f59e8deaf627c157995094db0802266eb34fc6842dea8a4b68d9c1389103ab84fb9e1f85d9b5d23ff2312c8670fbb540148245a4ebafe264d90c8a4cf4f85b0fac12ac2fc4a3154bad52462868af96c62c6525d652b6e31845bdcc") + +var testRSAPrivateKey = &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + N: bigFromString("131650079503776001033793877885499001334664249354723305978524647182322416328664556247316495448366990052837680518067798333412266673813370895702118944398081598789828837447552603077848001020611640547221687072142537202428102790818451901395596882588063427854225330436740647715202971973145151161964464812406232198521"), + E: 65537, + }, + D: bigFromString("29354450337804273969007277378287027274721892607543397931919078829901848876371746653677097639302788129485893852488285045793268732234230875671682624082413996177431586734171663258657462237320300610850244186316880055243099640544518318093544057213190320837094958164973959123058337475052510833916491060913053867729"), + Primes: []*big.Int{ + bigFromString("11969277782311800166562047708379380720136961987713178380670422671426759650127150688426177829077494755200794297055316163155755835813760102405344560929062149"), + bigFromString("10998999429884441391899182616418192492905073053684657075974935218461686523870125521822756579792315215543092255516093840728890783887287417039645833477273829"), + }, +} + +var testECDSAPrivateKey = &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: &elliptic.CurveParams{ + P: bigFromString("6864797660130609714981900799081393217269435300143305409394463459185543183397656052122559640661454554977296311391480858037121987999716643812574028291115057151"), + N: bigFromString("6864797660130609714981900799081393217269435300143305409394463459185543183397655394245057746333217197532963996371363321113864768612440380340372808892707005449"), + B: bigFromString("1093849038073734274511112390766805569936207598951683748994586394495953116150735016013708737573759623248592132296706313309438452531591012912142327488478985984"), + Gx: bigFromString("2661740802050217063228768716723360960729859168756973147706671368418802944996427808491545080627771902352094241225065558662157113545570916814161637315895999846"), + Gy: bigFromString("3757180025770020463545507224491183603594455134769762486694567779615544477440556316691234405012945539562144444537289428522585666729196580810124344277578376784"), + BitSize: 521, + }, + X: bigFromString("2636411247892461147287360222306590634450676461695221912739908880441342231985950069527906976759812296359387337367668045707086543273113073382714101597903639351"), + Y: bigFromString("3204695818431246682253994090650952614555094516658732116404513121125038617915183037601737180082382202488628239201196033284060130040574800684774115478859677243"), + }, + D: bigFromString("5477294338614160138026852784385529180817726002953041720191098180813046231640184669647735805135001309477695746518160084669446643325196003346204701381388769751"), +} + +func loadPEMCert(in string) *x509.Certificate { + block, _ := pem.Decode([]byte(in)) + if block.Type == "CERTIFICATE" && len(block.Headers) == 0 { + cert, err := x509.ParseCertificate(block.Bytes) + if err == nil { + return cert + } + panic("error parsing cert") + } + panic("error parsing PEM") +} + +// Script of interaction with gnutls implementation. +// The values for this test are obtained by building and running in server mode: +// % go test -test.run "TestRunServer" -serve +// The recorded bytes are written to stdout. +var rsaRC4ServerScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x54, 0x01, 0x00, 0x00, + 0x50, 0x03, 0x01, 0x50, 0x77, 0x3d, 0xbd, 0x32, + 0x13, 0xd7, 0xea, 0x33, 0x65, 0x02, 0xb8, 0x70, + 0xb7, 0x84, 0xc4, 0x05, 0x1f, 0xa4, 0x24, 0xc4, + 0x91, 0x69, 0x04, 0x32, 0x96, 0xfe, 0x5b, 0x49, + 0x71, 0x60, 0x9a, 0x00, 0x00, 0x28, 0x00, 0x39, + 0x00, 0x38, 0x00, 0x35, 0x00, 0x16, 0x00, 0x13, + 0x00, 0x0a, 0x00, 0x33, 0x00, 0x32, 0x00, 0x2f, + 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, 0x00, 0x12, + 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, 0x02, 0x01, + 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x2a, 0x02, 0x00, 0x00, + 0x26, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x16, + 0x03, 0x01, 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, + 0x00, 0x02, 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, + 0x02, 0xb0, 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, + 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, + 0x31, 0x30, 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, + 0x30, 0x39, 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, + 0x31, 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, + 0x39, 0x33, 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, + 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, + 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, + 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, + 0x4c, 0x74, 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, + 0x00, 0x30, 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, + 0xbb, 0x79, 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, + 0x46, 0x10, 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, + 0x07, 0x43, 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, + 0x43, 0x85, 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, + 0x4c, 0x2c, 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, + 0x82, 0xe5, 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, + 0xa5, 0x2c, 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, + 0x7a, 0x56, 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, + 0x7b, 0x26, 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, + 0xc9, 0x21, 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, + 0x5a, 0xbf, 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, + 0x99, 0x07, 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, + 0x04, 0x39, 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, + 0x7c, 0xe3, 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, + 0xcf, 0xaf, 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, + 0xdb, 0xdb, 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, + 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, + 0x30, 0x81, 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, + 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, + 0xe2, 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, + 0xce, 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, + 0x88, 0x39, 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, + 0x23, 0x04, 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, + 0xad, 0xe2, 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, + 0x69, 0xce, 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, + 0x18, 0x88, 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, + 0x45, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, + 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, + 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, + 0x69, 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, + 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, + 0x00, 0x85, 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, + 0xca, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, + 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, + 0x81, 0x00, 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, + 0xb1, 0x59, 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, + 0x14, 0xd7, 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, + 0x5a, 0x95, 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, + 0x12, 0x66, 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, + 0x60, 0xd3, 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, + 0x25, 0x13, 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, + 0x1d, 0xba, 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, + 0xd7, 0x31, 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, + 0xea, 0x50, 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, + 0x5a, 0x5f, 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, + 0x90, 0x96, 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, + 0x98, 0x1f, 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, + 0xa3, 0x1b, 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, + 0xe9, 0x70, 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, + 0x26, 0x6e, 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, + 0xbd, 0xd9, 0x16, 0x03, 0x01, 0x00, 0x04, 0x0e, + 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x86, 0x10, 0x00, 0x00, + 0x82, 0x00, 0x80, 0x2d, 0x09, 0x7c, 0x7f, 0xfc, + 0x84, 0xce, 0xb3, 0x30, 0x9b, 0xf9, 0xb7, 0xc8, + 0xc3, 0xff, 0xee, 0x6f, 0x20, 0x8a, 0xf4, 0xfb, + 0x86, 0x55, 0x1f, 0x6a, 0xb4, 0x81, 0x50, 0x3a, + 0x46, 0x1b, 0xd3, 0xca, 0x4b, 0x11, 0xff, 0xef, + 0x02, 0xbc, 0x18, 0xb8, 0x4a, 0x7d, 0x43, 0x23, + 0x96, 0x92, 0x27, 0x7c, 0xca, 0xcf, 0xe6, 0x91, + 0xe8, 0x14, 0x97, 0x68, 0xb4, 0xe5, 0xc0, 0xc9, + 0x23, 0xdd, 0x54, 0x07, 0xa6, 0x2e, 0x8c, 0x98, + 0xfc, 0xc6, 0x8c, 0x04, 0x6b, 0x1b, 0x5f, 0xd5, + 0x3d, 0x8b, 0x6c, 0x55, 0x4f, 0x7a, 0xe6, 0x6c, + 0x74, 0x2c, 0x1e, 0x34, 0xdb, 0xfb, 0x00, 0xb1, + 0x4e, 0x10, 0x21, 0x16, 0xe0, 0x3e, 0xc5, 0x64, + 0x84, 0x28, 0x2b, 0x2b, 0x29, 0x47, 0x51, 0x34, + 0x76, 0x15, 0x20, 0x71, 0x0b, 0x30, 0xa1, 0x85, + 0xd5, 0x15, 0x18, 0x14, 0x64, 0x4b, 0x40, 0x7c, + 0x4f, 0xb3, 0x7b, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0x24, 0xab, 0xee, + 0xf5, 0x97, 0x5f, 0xc6, 0x78, 0xf3, 0xc6, 0x83, + 0x5b, 0x55, 0x4f, 0xcb, 0x45, 0x3f, 0xfa, 0xf7, + 0x05, 0x02, 0xc2, 0x63, 0x87, 0x18, 0xb5, 0x9a, + 0x62, 0xe2, 0x3f, 0x88, 0x5a, 0x60, 0x61, 0x72, + 0xfa, 0x9c, + }, + { + 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x01, 0x00, 0x24, 0x72, 0xa4, 0xe4, 0xaa, 0xd2, + 0xc4, 0x39, 0x7e, 0x2a, 0xc1, 0x6f, 0x34, 0x42, + 0x28, 0xcb, 0x9d, 0x7a, 0x09, 0xca, 0x96, 0xad, + 0x0e, 0x11, 0x51, 0x8a, 0x06, 0xb0, 0xe9, 0xca, + 0xeb, 0xce, 0xe2, 0xd5, 0x2e, 0xc1, 0x8d, 0x17, + 0x03, 0x01, 0x00, 0x21, 0x2e, 0x61, 0x86, 0x17, + 0xdb, 0xa6, 0x30, 0xe2, 0x62, 0x06, 0x2a, 0x8b, + 0x75, 0x2c, 0x2d, 0xcf, 0xf5, 0x01, 0x11, 0x52, + 0x81, 0x38, 0xcf, 0xd5, 0xf7, 0xdc, 0x52, 0x31, + 0x1f, 0x97, 0x43, 0xc2, 0x71, 0x15, 0x03, 0x01, + 0x00, 0x16, 0xe0, 0x21, 0xfe, 0x36, 0x2e, 0x68, + 0x2c, 0xf1, 0xbe, 0x04, 0xec, 0xd4, 0xc6, 0xdd, + 0xac, 0x6f, 0x4c, 0x85, 0x32, 0x3f, 0x87, 0x1b, + }, +} + +var rsaDES3ServerScript = [][]byte{ + { + 0x16, 0x03, 0x00, 0x00, 0xc5, 0x01, 0x00, 0x00, + 0xc1, 0x03, 0x03, 0x50, 0xae, 0x5d, 0x38, 0xec, + 0xaa, 0x2f, 0x41, 0xf9, 0xd2, 0x7b, 0xa1, 0xfd, + 0x0f, 0xff, 0x4e, 0x54, 0x0e, 0x15, 0x57, 0xaf, + 0x2c, 0x91, 0xb5, 0x35, 0x5b, 0x2e, 0xb0, 0xec, + 0x20, 0xe5, 0xd2, 0x00, 0x00, 0x50, 0xc0, 0x09, + 0xc0, 0x23, 0xc0, 0x2b, 0xc0, 0x0a, 0xc0, 0x24, + 0xc0, 0x2c, 0xc0, 0x08, 0xc0, 0x13, 0xc0, 0x27, + 0xc0, 0x2f, 0xc0, 0x14, 0xc0, 0x30, 0xc0, 0x12, + 0x00, 0x33, 0x00, 0x67, 0x00, 0x45, 0x00, 0x9e, + 0x00, 0x39, 0x00, 0x6b, 0x00, 0x88, 0x00, 0x16, + 0x00, 0x32, 0x00, 0x40, 0x00, 0x44, 0x00, 0xa2, + 0x00, 0x38, 0x00, 0x6a, 0x00, 0x87, 0x00, 0x13, + 0x00, 0x66, 0x00, 0x2f, 0x00, 0x3c, 0x00, 0x41, + 0x00, 0x9c, 0x00, 0x35, 0x00, 0x3d, 0x00, 0x84, + 0x00, 0x0a, 0x00, 0x05, 0x00, 0x04, 0x01, 0x00, + 0x00, 0x48, 0x00, 0x05, 0x00, 0x05, 0x01, 0x00, + 0x00, 0x00, 0x00, 0xff, 0x01, 0x00, 0x01, 0x00, + 0x00, 0x23, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x0c, + 0x00, 0x0a, 0x00, 0x13, 0x00, 0x15, 0x00, 0x17, + 0x00, 0x18, 0x00, 0x19, 0x00, 0x0b, 0x00, 0x02, + 0x01, 0x00, 0x00, 0x0d, 0x00, 0x1c, 0x00, 0x1a, + 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x05, 0x01, + 0x05, 0x03, 0x06, 0x01, 0x06, 0x03, 0x03, 0x01, + 0x03, 0x02, 0x03, 0x03, 0x02, 0x01, 0x02, 0x02, + 0x02, 0x03, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x01, + 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, 0x00, 0x02, + 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, 0x02, 0xb0, + 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, + 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x30, + 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, + 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30, + 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, 0x33, + 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, + 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xbb, 0x79, + 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, 0x46, 0x10, + 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, 0x07, 0x43, + 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, 0x43, 0x85, + 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, 0x4c, 0x2c, + 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, 0x82, 0xe5, + 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, 0xa5, 0x2c, + 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, 0x7a, 0x56, + 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, 0x7b, 0x26, + 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, 0xc9, 0x21, + 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, 0x5a, 0xbf, + 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, 0x99, 0x07, + 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, 0x04, 0x39, + 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, 0x7c, 0xe3, + 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, 0xcf, 0xaf, + 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, 0xdb, 0xdb, + 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, 0xe2, 0x85, + 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, 0x23, + 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, 0x39, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, 0xad, 0xe2, + 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, + 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, + 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, + 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, 0xb1, 0x59, + 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, 0x14, 0xd7, + 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, 0x5a, 0x95, + 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, 0x12, 0x66, + 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, 0x60, 0xd3, + 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, 0x25, 0x13, + 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, 0x1d, 0xba, + 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, 0xd7, 0x31, + 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, 0xea, 0x50, + 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, 0x5a, 0x5f, + 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, 0x90, 0x96, + 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, 0x98, 0x1f, + 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, 0xa3, 0x1b, + 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, 0xe9, 0x70, + 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, 0x26, 0x6e, + 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, 0xbd, 0xd9, + 0x16, 0x03, 0x01, 0x00, 0x04, 0x0e, 0x00, 0x00, + 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x86, 0x10, 0x00, 0x00, + 0x82, 0x00, 0x80, 0x51, 0x04, 0xf1, 0x7a, 0xbf, + 0xe8, 0xa5, 0x86, 0x09, 0xa7, 0xf3, 0xcc, 0x93, + 0x00, 0x10, 0x5b, 0xb8, 0xc1, 0x51, 0x0d, 0x5b, + 0xcd, 0xed, 0x26, 0x01, 0x69, 0x73, 0xf4, 0x05, + 0x8a, 0x6a, 0xc3, 0xb1, 0x9e, 0x84, 0x4e, 0x39, + 0xcf, 0x5e, 0x55, 0xa9, 0x70, 0x19, 0x96, 0x91, + 0xcd, 0x2c, 0x78, 0x3c, 0xa2, 0x6d, 0xb0, 0x49, + 0x86, 0xf6, 0xd1, 0x3a, 0xde, 0x00, 0x4b, 0xa6, + 0x25, 0xbf, 0x85, 0x39, 0xce, 0xb1, 0xcf, 0xbc, + 0x16, 0xc7, 0x66, 0xac, 0xf8, 0xd2, 0x3b, 0xd1, + 0xcc, 0x16, 0xac, 0x63, 0x3c, 0xbe, 0xd9, 0xb6, + 0x6a, 0xe4, 0x13, 0x8a, 0xf4, 0x56, 0x2f, 0x92, + 0x54, 0xd8, 0xf0, 0x84, 0x01, 0x32, 0x1a, 0xa9, + 0x2d, 0xaf, 0x82, 0x0e, 0x00, 0xfa, 0x07, 0x88, + 0xd9, 0x87, 0xe7, 0xdc, 0x9e, 0xe9, 0x72, 0x49, + 0xb8, 0xfa, 0x8c, 0x7b, 0x07, 0x0b, 0x03, 0x7c, + 0x10, 0x8c, 0x8a, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0xa8, 0x61, 0xa4, + 0xf4, 0x5f, 0x8a, 0x1f, 0x5c, 0x92, 0x3f, 0x8c, + 0xdb, 0xd6, 0x10, 0xcd, 0x9e, 0xe7, 0xf0, 0xc4, + 0x3c, 0xb6, 0x1c, 0x9a, 0x56, 0x73, 0x7f, 0xa6, + 0x14, 0x24, 0xcb, 0x96, 0x1f, 0xe0, 0xaf, 0xcd, + 0x3c, 0x66, 0x43, 0xb7, 0x37, 0x65, 0x34, 0x47, + 0xf8, 0x43, 0xf1, 0xcc, 0x15, 0xb8, 0xdc, 0x35, + 0xe0, 0xa4, 0x2d, 0x78, 0x94, 0xe0, 0x02, 0xf3, + 0x76, 0x46, 0xf7, 0x9b, 0x8d, 0x0d, 0x5d, 0x0b, + 0xd3, 0xdd, 0x9a, 0x9e, 0x62, 0x2e, 0xc5, 0x98, + 0x75, 0x63, 0x0c, 0xbf, 0x8e, 0x49, 0x33, 0x23, + 0x7c, 0x00, 0xcf, 0xfb, 0xcf, 0xba, 0x0f, 0x41, + 0x39, 0x89, 0xb9, 0xcc, 0x59, 0xd0, 0x2b, 0xb6, + 0xec, 0x04, 0xe2, 0xc0, 0x52, 0xc7, 0xcf, 0x71, + 0x47, 0xff, 0x70, 0x7e, 0xa9, 0xbd, 0x1c, 0xdd, + 0x17, 0xa5, 0x6c, 0xb7, 0x10, 0x4f, 0x42, 0x18, + 0x37, 0x69, 0xa9, 0xd2, 0xb3, 0x18, 0x84, 0x92, + 0xa7, 0x47, 0x21, 0xf6, 0x95, 0x63, 0x29, 0xd6, + 0xa5, 0xb6, 0xda, 0x65, 0x67, 0x69, 0xc4, 0x26, + 0xac, 0x8b, 0x08, 0x58, 0xdd, 0x3c, 0x31, 0x20, + 0xd5, 0x0c, 0x88, 0x72, 0x18, 0x16, 0x88, 0x1e, + 0x4a, 0x0f, 0xe1, 0xcf, 0x95, 0x24, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x72, 0x04, 0x00, 0x00, + 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xe8, 0x4b, 0xde, 0xef, 0xba, 0x3e, 0x18, 0x1c, + 0x1e, 0x5e, 0xbc, 0x87, 0xf1, 0x87, 0x8d, 0x72, + 0xe3, 0xbe, 0x0f, 0xdf, 0xfd, 0xd0, 0xb2, 0x89, + 0xf8, 0x05, 0x9a, 0x52, 0x47, 0x77, 0x9e, 0xe8, + 0xb1, 0x1d, 0x18, 0xed, 0x6a, 0x4b, 0x63, 0x1d, + 0xf1, 0x62, 0xd2, 0x65, 0x21, 0x26, 0x73, 0xd4, + 0x35, 0x5b, 0x95, 0x89, 0x12, 0x59, 0x23, 0x8c, + 0xc3, 0xfc, 0xf9, 0x4d, 0x21, 0x79, 0xa0, 0xbd, + 0xff, 0x33, 0xa2, 0x3d, 0x0b, 0x6f, 0x89, 0xc9, + 0x23, 0xe4, 0xe7, 0x9f, 0x1d, 0x98, 0xf6, 0xed, + 0x02, 0x8d, 0xac, 0x1a, 0xf9, 0xcb, 0xa5, 0x14, + 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, + 0x00, 0x28, 0x91, 0x56, 0x80, 0xe2, 0x6d, 0x51, + 0x88, 0x03, 0xf8, 0x49, 0xe6, 0x6a, 0x5a, 0xfb, + 0x2f, 0x0b, 0xb5, 0xa1, 0x0d, 0x63, 0x83, 0xae, + 0xb9, 0xbc, 0x05, 0xf0, 0x81, 0x00, 0x61, 0x83, + 0x38, 0xda, 0x14, 0xf6, 0xea, 0xd8, 0x78, 0x65, + 0xc7, 0x26, 0x17, 0x03, 0x01, 0x00, 0x18, 0x81, + 0x30, 0x8b, 0x22, 0x5a, 0xd3, 0x7f, 0xc8, 0xf2, + 0x8a, 0x6b, 0xa3, 0xba, 0x4d, 0xe7, 0x6e, 0xd2, + 0xfd, 0xbf, 0xf2, 0xc5, 0x28, 0xa0, 0x62, 0x17, + 0x03, 0x01, 0x00, 0x28, 0x17, 0x83, 0x3c, 0x78, + 0x18, 0xfa, 0x8d, 0x58, 0x5c, 0xaa, 0x05, 0x7d, + 0x67, 0x96, 0x11, 0x60, 0x11, 0xc0, 0x1e, 0x0d, + 0x6a, 0x6e, 0x5f, 0x1d, 0x98, 0x4b, 0xff, 0x82, + 0xee, 0x21, 0x06, 0x29, 0xd3, 0x8b, 0x80, 0x78, + 0x39, 0x05, 0x34, 0x9b, 0x15, 0x03, 0x01, 0x00, + 0x18, 0xa9, 0x38, 0x18, 0x4f, 0x9d, 0x84, 0x75, + 0x88, 0x53, 0xd6, 0x85, 0xc2, 0x15, 0x4b, 0xe3, + 0xe3, 0x35, 0x9a, 0x74, 0xc9, 0x3e, 0x13, 0xc1, + 0x8c, + }, +} + +var rsaAESServerScript = [][]byte{ + { + 0x16, 0x03, 0x00, 0x00, 0xc5, 0x01, 0x00, 0x00, + 0xc1, 0x03, 0x03, 0x50, 0xae, 0x5c, 0xe9, 0x5e, + 0x31, 0x93, 0x82, 0xa5, 0x6f, 0x51, 0x82, 0xc8, + 0x55, 0x4f, 0x1f, 0x2e, 0x90, 0x98, 0x81, 0x13, + 0x27, 0x80, 0x68, 0xb4, 0x2d, 0xba, 0x3a, 0x76, + 0xd8, 0xd7, 0x2c, 0x00, 0x00, 0x50, 0xc0, 0x09, + 0xc0, 0x23, 0xc0, 0x2b, 0xc0, 0x0a, 0xc0, 0x24, + 0xc0, 0x2c, 0xc0, 0x08, 0xc0, 0x13, 0xc0, 0x27, + 0xc0, 0x2f, 0xc0, 0x14, 0xc0, 0x30, 0xc0, 0x12, + 0x00, 0x33, 0x00, 0x67, 0x00, 0x45, 0x00, 0x9e, + 0x00, 0x39, 0x00, 0x6b, 0x00, 0x88, 0x00, 0x16, + 0x00, 0x32, 0x00, 0x40, 0x00, 0x44, 0x00, 0xa2, + 0x00, 0x38, 0x00, 0x6a, 0x00, 0x87, 0x00, 0x13, + 0x00, 0x66, 0x00, 0x2f, 0x00, 0x3c, 0x00, 0x41, + 0x00, 0x9c, 0x00, 0x35, 0x00, 0x3d, 0x00, 0x84, + 0x00, 0x0a, 0x00, 0x05, 0x00, 0x04, 0x01, 0x00, + 0x00, 0x48, 0x00, 0x05, 0x00, 0x05, 0x01, 0x00, + 0x00, 0x00, 0x00, 0xff, 0x01, 0x00, 0x01, 0x00, + 0x00, 0x23, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x0c, + 0x00, 0x0a, 0x00, 0x13, 0x00, 0x15, 0x00, 0x17, + 0x00, 0x18, 0x00, 0x19, 0x00, 0x0b, 0x00, 0x02, + 0x01, 0x00, 0x00, 0x0d, 0x00, 0x1c, 0x00, 0x1a, + 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x05, 0x01, + 0x05, 0x03, 0x06, 0x01, 0x06, 0x03, 0x03, 0x01, + 0x03, 0x02, 0x03, 0x03, 0x02, 0x01, 0x02, 0x02, + 0x02, 0x03, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x01, + 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, 0x00, 0x02, + 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, 0x02, 0xb0, + 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, + 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x30, + 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, + 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30, + 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, 0x33, + 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, + 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xbb, 0x79, + 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, 0x46, 0x10, + 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, 0x07, 0x43, + 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, 0x43, 0x85, + 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, 0x4c, 0x2c, + 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, 0x82, 0xe5, + 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, 0xa5, 0x2c, + 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, 0x7a, 0x56, + 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, 0x7b, 0x26, + 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, 0xc9, 0x21, + 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, 0x5a, 0xbf, + 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, 0x99, 0x07, + 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, 0x04, 0x39, + 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, 0x7c, 0xe3, + 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, 0xcf, 0xaf, + 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, 0xdb, 0xdb, + 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, 0xe2, 0x85, + 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, 0x23, + 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, 0x39, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, 0xad, 0xe2, + 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, + 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, + 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, + 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, 0xb1, 0x59, + 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, 0x14, 0xd7, + 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, 0x5a, 0x95, + 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, 0x12, 0x66, + 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, 0x60, 0xd3, + 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, 0x25, 0x13, + 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, 0x1d, 0xba, + 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, 0xd7, 0x31, + 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, 0xea, 0x50, + 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, 0x5a, 0x5f, + 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, 0x90, 0x96, + 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, 0x98, 0x1f, + 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, 0xa3, 0x1b, + 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, 0xe9, 0x70, + 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, 0x26, 0x6e, + 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, 0xbd, 0xd9, + 0x16, 0x03, 0x01, 0x00, 0x04, 0x0e, 0x00, 0x00, + 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x86, 0x10, 0x00, 0x00, + 0x82, 0x00, 0x80, 0x51, 0x2e, 0xec, 0x0d, 0x86, + 0xf3, 0x9f, 0xf2, 0x77, 0x04, 0x27, 0x2b, 0x0e, + 0x9c, 0xab, 0x35, 0x84, 0x65, 0xff, 0x36, 0xef, + 0xc0, 0x08, 0xc9, 0x1d, 0x9f, 0x29, 0xae, 0x8d, + 0xc5, 0x66, 0x81, 0x31, 0x92, 0x5e, 0x3d, 0xac, + 0xaa, 0x37, 0x28, 0x2c, 0x06, 0x91, 0xa6, 0xc2, + 0xd0, 0x83, 0x34, 0x24, 0x1c, 0x88, 0xfc, 0x0a, + 0xcf, 0xbf, 0xc2, 0x94, 0xe2, 0xed, 0xa7, 0x6a, + 0xa8, 0x8d, 0x3d, 0xf7, 0x06, 0x7d, 0x69, 0xf8, + 0x0d, 0xb2, 0xf7, 0xe4, 0x45, 0xcb, 0x0a, 0x25, + 0xcb, 0xb2, 0x2e, 0x38, 0x9a, 0x84, 0x75, 0xe8, + 0xe1, 0x42, 0x39, 0xa2, 0x18, 0x0e, 0x48, 0xca, + 0x33, 0x16, 0x4e, 0xf6, 0x2f, 0xec, 0x07, 0xe7, + 0x57, 0xe1, 0x20, 0x40, 0x40, 0x6d, 0x4e, 0x29, + 0x04, 0x1a, 0x8c, 0x99, 0xfb, 0x19, 0x3c, 0xaa, + 0x75, 0x64, 0xd3, 0xa6, 0xe6, 0xed, 0x3f, 0x5a, + 0xd2, 0xc9, 0x80, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x01, 0x10, 0xe9, 0x9e, + 0x06, 0x92, 0x18, 0xbf, 0x5e, 0xaf, 0x33, 0xc1, + 0xbf, 0x0e, 0x12, 0x07, 0x48, 0x4f, 0x6b, 0x6c, + 0xf5, 0x23, 0x5e, 0x87, 0xa7, 0xd3, 0x50, 0x79, + 0x38, 0xdc, 0xe0, 0x49, 0xd3, 0x81, 0x21, 0x12, + 0xd0, 0x3d, 0x9a, 0xfb, 0x83, 0xc1, 0x8b, 0xfc, + 0x14, 0xd5, 0xd5, 0xa7, 0xa3, 0x34, 0x14, 0x71, + 0xbe, 0xea, 0x37, 0x18, 0x12, 0x7f, 0x41, 0xfb, + 0xc5, 0x51, 0x17, 0x9d, 0x96, 0x58, 0x14, 0xfb, + 0x4f, 0xd7, 0xd3, 0x15, 0x0f, 0xec, 0x5a, 0x0d, + 0x35, 0xbb, 0x3c, 0x81, 0x5b, 0x3f, 0xdf, 0x52, + 0xa4, 0x4c, 0xcd, 0x13, 0xe1, 0x10, 0x37, 0x34, + 0xbf, 0xb4, 0x80, 0x1e, 0x8d, 0xe2, 0xc3, 0x7a, + 0x0f, 0x7b, 0x7d, 0x23, 0xeb, 0xd0, 0x99, 0x69, + 0xad, 0x0a, 0x2d, 0xb3, 0x6c, 0xd6, 0x80, 0x11, + 0x7f, 0x6c, 0xed, 0x1b, 0xcd, 0x08, 0x22, 0x56, + 0x90, 0x0e, 0xa4, 0xc3, 0x29, 0x33, 0x96, 0x30, + 0x34, 0x94, 0xa1, 0xeb, 0x9c, 0x1b, 0x5a, 0xd1, + 0x03, 0x61, 0xf9, 0xdd, 0xf3, 0x64, 0x8a, 0xfd, + 0x5f, 0x44, 0xdb, 0x2e, 0xa7, 0xfd, 0xe1, 0x1a, + 0x66, 0xc5, 0x01, 0x9c, 0xc7, 0xd1, 0xc4, 0xd3, + 0xea, 0x14, 0x3c, 0xed, 0x74, 0xbb, 0x1b, 0x97, + 0x8f, 0xf1, 0x29, 0x39, 0x33, 0x92, 0x93, 0x4e, + 0xf5, 0x87, 0x91, 0x61, 0x65, 0x8d, 0x27, 0x8d, + 0x76, 0xc1, 0xfa, 0x6a, 0x99, 0x80, 0xb1, 0x9b, + 0x29, 0x53, 0xce, 0x3e, 0xb6, 0x9a, 0xce, 0x3c, + 0x19, 0x5e, 0x48, 0x83, 0xaa, 0xa7, 0x66, 0x98, + 0x59, 0xf4, 0xbb, 0xf2, 0xbc, 0xd9, 0xc5, 0x9a, + 0xc8, 0x2c, 0x63, 0x58, 0xd5, 0xd4, 0xbc, 0x03, + 0xa9, 0x06, 0xa9, 0x80, 0x0d, 0xb3, 0x46, 0x2d, + 0xe3, 0xc6, 0xaf, 0x1a, 0x39, 0x18, 0x7e, 0x1e, + 0x83, 0x80, 0x46, 0x11, 0xd2, 0x13, 0x9f, 0xda, + 0xfc, 0x2d, 0x42, 0xaa, 0x5a, 0x1d, 0x4c, 0x31, + 0xe5, 0x58, 0x78, 0x5e, 0xe2, 0x04, 0xd6, 0x23, + 0x7f, 0x3f, 0x06, 0xc0, 0x54, 0xf8, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x72, 0x04, 0x00, 0x00, + 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xe8, 0x4b, 0xfb, 0xef, 0xba, 0xed, 0xc5, 0x36, + 0xc8, 0x5a, 0x41, 0x3f, 0x05, 0xfa, 0xfe, 0x48, + 0xc3, 0x91, 0x12, 0x8b, 0xe8, 0x32, 0x6a, 0x9f, + 0xdc, 0x97, 0xe2, 0x77, 0xb9, 0x96, 0x2d, 0xd4, + 0xe5, 0xbd, 0xa1, 0xfd, 0x94, 0xbb, 0x74, 0x63, + 0xb1, 0x0c, 0x38, 0xbc, 0x6f, 0x69, 0xaf, 0xa3, + 0x46, 0x9c, 0x96, 0x41, 0xde, 0x59, 0x23, 0xff, + 0x15, 0x6b, 0x3a, 0xef, 0x91, 0x6d, 0x92, 0x44, + 0xdc, 0x72, 0x1f, 0x40, 0x3d, 0xb5, 0x34, 0x8f, + 0x2a, 0xac, 0x21, 0x69, 0x05, 0x6f, 0xb2, 0x60, + 0x32, 0x5d, 0x3d, 0x97, 0xb4, 0x24, 0x99, 0x14, + 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, + 0x00, 0x30, 0x68, 0x27, 0x97, 0xca, 0x63, 0x09, + 0x22, 0xed, 0x0e, 0x61, 0x7c, 0x76, 0x31, 0x9c, + 0xbe, 0x27, 0xc9, 0xe6, 0x09, 0xc3, 0xc3, 0xc2, + 0xf4, 0xa2, 0x32, 0xba, 0x7c, 0xf2, 0x0f, 0xb8, + 0x3d, 0xcb, 0xe2, 0x4c, 0xc0, 0x7d, 0x8e, 0x5b, + 0x5a, 0xed, 0x05, 0x5c, 0x15, 0x96, 0x69, 0xc2, + 0x6f, 0x5f, 0x17, 0x03, 0x01, 0x00, 0x20, 0x5a, + 0xfe, 0x0b, 0xe1, 0x6f, 0xa8, 0x54, 0x19, 0x78, + 0xca, 0xba, 0x2e, 0x1e, 0x2e, 0xe1, 0x5d, 0x17, + 0xe5, 0x97, 0x05, 0x2c, 0x08, 0x0c, 0xff, 0xa8, + 0x59, 0xa9, 0xde, 0x5e, 0x21, 0x34, 0x04, 0x17, + 0x03, 0x01, 0x00, 0x30, 0x86, 0xb1, 0x3f, 0x88, + 0x43, 0xf0, 0x07, 0xee, 0xa8, 0xf4, 0xbc, 0xe7, + 0x5f, 0xc6, 0x8c, 0x86, 0x4c, 0xca, 0x70, 0x88, + 0xcc, 0x6a, 0xb4, 0x3d, 0x40, 0xe8, 0x54, 0x89, + 0x19, 0x43, 0x1f, 0x76, 0xe2, 0xac, 0xb2, 0x5b, + 0x92, 0xf8, 0x57, 0x39, 0x2a, 0xc3, 0x6d, 0x13, + 0x45, 0xfa, 0x36, 0x9e, 0x15, 0x03, 0x01, 0x00, + 0x20, 0x6d, 0xed, 0x7b, 0x59, 0x28, 0x2a, 0x27, + 0x04, 0x15, 0x07, 0x4e, 0xeb, 0x13, 0x00, 0xe3, + 0x3a, 0x3f, 0xf8, 0xaa, 0x2b, 0x3b, 0x1a, 0x8c, + 0x12, 0xd6, 0x4c, 0xec, 0x2a, 0xaf, 0x33, 0x60, + 0xaf, + }, +} + +// Generated using: +// $ go test -test.run TestRunServer -serve -ciphersuites=0xc00a +// $ openssl s_client -host 127.0.0.1 -port 10443 -cipher ECDHE-ECDSA-AES256-SHA +var ecdheECDSAAESServerScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0xa0, 0x01, 0x00, 0x00, + 0x9c, 0x03, 0x03, 0x50, 0xd7, 0x18, 0x31, 0x49, + 0xde, 0x19, 0x8d, 0x08, 0x5c, 0x4b, 0x60, 0x67, + 0x0f, 0xfe, 0xd0, 0x62, 0xf9, 0x31, 0x48, 0x17, + 0x9e, 0x50, 0xc1, 0xd8, 0x35, 0x24, 0x0e, 0xa6, + 0x09, 0x06, 0x51, 0x00, 0x00, 0x04, 0xc0, 0x0a, + 0x00, 0xff, 0x01, 0x00, 0x00, 0x6f, 0x00, 0x0b, + 0x00, 0x04, 0x03, 0x00, 0x01, 0x02, 0x00, 0x0a, + 0x00, 0x34, 0x00, 0x32, 0x00, 0x0e, 0x00, 0x0d, + 0x00, 0x19, 0x00, 0x0b, 0x00, 0x0c, 0x00, 0x18, + 0x00, 0x09, 0x00, 0x0a, 0x00, 0x16, 0x00, 0x17, + 0x00, 0x08, 0x00, 0x06, 0x00, 0x07, 0x00, 0x14, + 0x00, 0x15, 0x00, 0x04, 0x00, 0x05, 0x00, 0x12, + 0x00, 0x13, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, + 0x00, 0x0f, 0x00, 0x10, 0x00, 0x11, 0x00, 0x23, + 0x00, 0x00, 0x00, 0x0d, 0x00, 0x22, 0x00, 0x20, + 0x06, 0x01, 0x06, 0x02, 0x06, 0x03, 0x05, 0x01, + 0x05, 0x02, 0x05, 0x03, 0x04, 0x01, 0x04, 0x02, + 0x04, 0x03, 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x02, 0x03, 0x01, 0x01, + 0x00, 0x0f, 0x00, 0x01, 0x01, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xc0, 0x0a, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x01, + 0x02, 0x0e, 0x0b, 0x00, 0x02, 0x0a, 0x00, 0x02, + 0x07, 0x00, 0x02, 0x04, 0x30, 0x82, 0x02, 0x00, + 0x30, 0x82, 0x01, 0x62, 0x02, 0x09, 0x00, 0xb8, + 0xbf, 0x2d, 0x47, 0xa0, 0xd2, 0xeb, 0xf4, 0x30, + 0x09, 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, + 0x04, 0x01, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x32, 0x31, + 0x31, 0x32, 0x32, 0x31, 0x35, 0x30, 0x36, 0x33, + 0x32, 0x5a, 0x17, 0x0d, 0x32, 0x32, 0x31, 0x31, + 0x32, 0x30, 0x31, 0x35, 0x30, 0x36, 0x33, 0x32, + 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, 0x06, + 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, + 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, + 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, 0x30, + 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, + 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, 0x73, + 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, + 0x30, 0x81, 0x9b, 0x30, 0x10, 0x06, 0x07, 0x2a, + 0x86, 0x48, 0xce, 0x3d, 0x02, 0x01, 0x06, 0x05, + 0x2b, 0x81, 0x04, 0x00, 0x23, 0x03, 0x81, 0x86, + 0x00, 0x04, 0x00, 0xc4, 0xa1, 0xed, 0xbe, 0x98, + 0xf9, 0x0b, 0x48, 0x73, 0x36, 0x7e, 0xc3, 0x16, + 0x56, 0x11, 0x22, 0xf2, 0x3d, 0x53, 0xc3, 0x3b, + 0x4d, 0x21, 0x3d, 0xcd, 0x6b, 0x75, 0xe6, 0xf6, + 0xb0, 0xdc, 0x9a, 0xdf, 0x26, 0xc1, 0xbc, 0xb2, + 0x87, 0xf0, 0x72, 0x32, 0x7c, 0xb3, 0x64, 0x2f, + 0x1c, 0x90, 0xbc, 0xea, 0x68, 0x23, 0x10, 0x7e, + 0xfe, 0xe3, 0x25, 0xc0, 0x48, 0x3a, 0x69, 0xe0, + 0x28, 0x6d, 0xd3, 0x37, 0x00, 0xef, 0x04, 0x62, + 0xdd, 0x0d, 0xa0, 0x9c, 0x70, 0x62, 0x83, 0xd8, + 0x81, 0xd3, 0x64, 0x31, 0xaa, 0x9e, 0x97, 0x31, + 0xbd, 0x96, 0xb0, 0x68, 0xc0, 0x9b, 0x23, 0xde, + 0x76, 0x64, 0x3f, 0x1a, 0x5c, 0x7f, 0xe9, 0x12, + 0x0e, 0x58, 0x58, 0xb6, 0x5f, 0x70, 0xdd, 0x9b, + 0xd8, 0xea, 0xd5, 0xd7, 0xf5, 0xd5, 0xcc, 0xb9, + 0xb6, 0x9f, 0x30, 0x66, 0x5b, 0x66, 0x9a, 0x20, + 0xe2, 0x27, 0xe5, 0xbf, 0xfe, 0x3b, 0x30, 0x09, + 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, + 0x01, 0x03, 0x81, 0x8c, 0x00, 0x30, 0x81, 0x88, + 0x02, 0x42, 0x01, 0x88, 0xa2, 0x4f, 0xeb, 0xe2, + 0x45, 0xc5, 0x48, 0x7d, 0x1b, 0xac, 0xf5, 0xed, + 0x98, 0x9d, 0xae, 0x47, 0x70, 0xc0, 0x5e, 0x1b, + 0xb6, 0x2f, 0xbd, 0xf1, 0xb6, 0x4d, 0xb7, 0x61, + 0x40, 0xd3, 0x11, 0xa2, 0xce, 0xee, 0x0b, 0x7e, + 0x92, 0x7e, 0xff, 0x76, 0x9d, 0xc3, 0x3b, 0x7e, + 0xa5, 0x3f, 0xce, 0xfa, 0x10, 0xe2, 0x59, 0xec, + 0x47, 0x2d, 0x7c, 0xac, 0xda, 0x4e, 0x97, 0x0e, + 0x15, 0xa0, 0x6f, 0xd0, 0x02, 0x42, 0x01, 0x4d, + 0xfc, 0xbe, 0x67, 0x13, 0x9c, 0x2d, 0x05, 0x0e, + 0xbd, 0x3f, 0xa3, 0x8c, 0x25, 0xc1, 0x33, 0x13, + 0x83, 0x0d, 0x94, 0x06, 0xbb, 0xd4, 0x37, 0x7a, + 0xf6, 0xec, 0x7a, 0xc9, 0x86, 0x2e, 0xdd, 0xd7, + 0x11, 0x69, 0x7f, 0x85, 0x7c, 0x56, 0xde, 0xfb, + 0x31, 0x78, 0x2b, 0xe4, 0xc7, 0x78, 0x0d, 0xae, + 0xcb, 0xbe, 0x9e, 0x4e, 0x36, 0x24, 0x31, 0x7b, + 0x6a, 0x0f, 0x39, 0x95, 0x12, 0x07, 0x8f, 0x2a, + 0x16, 0x03, 0x01, 0x01, 0x1a, 0x0c, 0x00, 0x01, + 0x16, 0x03, 0x00, 0x19, 0x85, 0x04, 0x01, 0x39, + 0xdc, 0xee, 0x44, 0x17, 0x5e, 0xdb, 0xd7, 0x27, + 0xaf, 0xb6, 0x56, 0xd9, 0xb4, 0x43, 0x5a, 0x99, + 0xcf, 0xaa, 0x31, 0x37, 0x0c, 0x6f, 0x3a, 0xa0, + 0xf8, 0x53, 0xc4, 0x74, 0xd1, 0x91, 0x0a, 0x46, + 0xf5, 0x38, 0x3b, 0x5c, 0x09, 0xd8, 0x97, 0xdc, + 0x4b, 0xaa, 0x70, 0x26, 0x48, 0xf2, 0xd6, 0x0b, + 0x31, 0xc9, 0xf8, 0xd4, 0x98, 0x43, 0xe1, 0x6c, + 0xd5, 0xc7, 0xb2, 0x8e, 0x0b, 0x01, 0xe6, 0xb6, + 0x00, 0x28, 0x80, 0x7b, 0xfc, 0x96, 0x8f, 0x0d, + 0xa2, 0x4f, 0xb0, 0x79, 0xaf, 0xdc, 0x61, 0x28, + 0x63, 0x33, 0x78, 0xf6, 0x31, 0x39, 0xfd, 0x8a, + 0xf4, 0x15, 0x18, 0x11, 0xfe, 0xdb, 0xd5, 0x07, + 0xda, 0x2c, 0xed, 0x49, 0xa0, 0x23, 0xbf, 0xd0, + 0x3a, 0x38, 0x1d, 0x54, 0xae, 0x1c, 0x7b, 0xea, + 0x29, 0xee, 0xd0, 0x38, 0xc1, 0x76, 0xa7, 0x7f, + 0x2a, 0xf4, 0xce, 0x1e, 0xac, 0xcc, 0x94, 0x79, + 0x90, 0x33, 0x00, 0x8b, 0x30, 0x81, 0x88, 0x02, + 0x42, 0x00, 0xc6, 0x85, 0x8e, 0x06, 0xb7, 0x04, + 0x04, 0xe9, 0xcd, 0x9e, 0x3e, 0xcb, 0x66, 0x23, + 0x95, 0xb4, 0x42, 0x9c, 0x64, 0x81, 0x39, 0x05, + 0x3f, 0xb5, 0x21, 0xf8, 0x28, 0xaf, 0x60, 0x6b, + 0x4d, 0x3d, 0xba, 0xa1, 0x4b, 0x5e, 0x77, 0xef, + 0xe7, 0x59, 0x28, 0xfe, 0x1d, 0xc1, 0x27, 0xa2, + 0xff, 0xa8, 0xde, 0x33, 0x48, 0xb3, 0xc1, 0x85, + 0x6a, 0x42, 0x9b, 0xf9, 0x7e, 0x7e, 0x31, 0xc2, + 0xe5, 0xbd, 0x66, 0x02, 0x42, 0x00, 0xad, 0x7d, + 0x06, 0x35, 0xab, 0xec, 0x8d, 0xac, 0xd4, 0xba, + 0x1b, 0x49, 0x5e, 0x05, 0x5f, 0xf0, 0x97, 0x93, + 0x82, 0xb8, 0x2b, 0x8d, 0x91, 0x98, 0x63, 0x8e, + 0xb4, 0x14, 0x62, 0xdb, 0x1e, 0xc9, 0x2b, 0x30, + 0xf8, 0x41, 0x9b, 0xa6, 0xe6, 0xbc, 0xde, 0x0e, + 0x68, 0x30, 0x22, 0x50, 0xe6, 0x98, 0x97, 0x7b, + 0x69, 0xf7, 0x93, 0xed, 0xcd, 0x19, 0x2f, 0x44, + 0x6c, 0x2e, 0xdf, 0x25, 0xee, 0xcc, 0x46, 0x16, + 0x03, 0x01, 0x00, 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x8a, 0x10, 0x00, 0x00, + 0x86, 0x85, 0x04, 0x00, 0x1c, 0xc5, 0xe8, 0xb3, + 0x42, 0xb4, 0xad, 0xca, 0x45, 0xcd, 0x42, 0x7b, + 0xfb, 0x0c, 0xea, 0x32, 0x26, 0xd4, 0x8a, 0xef, + 0xdf, 0xc9, 0xff, 0xd2, 0xe0, 0x36, 0xea, 0x4e, + 0xbb, 0x3e, 0xf4, 0x9c, 0x76, 0x4f, 0x44, 0xbd, + 0x84, 0x72, 0xdd, 0xcb, 0xe5, 0x28, 0x8d, 0x31, + 0x72, 0x3b, 0xd3, 0xf2, 0x9a, 0x13, 0xfb, 0x8a, + 0xa7, 0x72, 0xca, 0x21, 0x6c, 0xea, 0xbf, 0xe9, + 0x8c, 0x0a, 0xcc, 0x8f, 0xd6, 0x00, 0x20, 0x87, + 0xf3, 0x7d, 0x18, 0xc5, 0xfd, 0x9e, 0xdd, 0x6b, + 0x06, 0xdc, 0x52, 0xeb, 0x14, 0xc0, 0x67, 0x5a, + 0x06, 0xd8, 0x98, 0x19, 0x14, 0xe7, 0xd4, 0x36, + 0x32, 0xee, 0xb7, 0xfa, 0xe2, 0x85, 0x4a, 0x16, + 0x42, 0x0c, 0xa6, 0x21, 0xcf, 0x1f, 0xae, 0x10, + 0x8b, 0x28, 0x32, 0x19, 0xa4, 0x0a, 0xd7, 0xce, + 0xe6, 0xe1, 0x93, 0xfb, 0x5f, 0x08, 0x8b, 0x42, + 0xa2, 0x20, 0xed, 0x0d, 0x62, 0xca, 0xed, 0x14, + 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, + 0x00, 0x30, 0x2e, 0x33, 0xc0, 0x57, 0x6c, 0xb4, + 0x1b, 0xd2, 0x63, 0xe8, 0x67, 0x10, 0x2d, 0x87, + 0x71, 0x6e, 0x19, 0x60, 0xf4, 0xa4, 0x10, 0x52, + 0x73, 0x2d, 0x09, 0x5e, 0xdb, 0x6c, 0xdc, 0xcf, + 0x2d, 0xff, 0x03, 0x11, 0x95, 0x76, 0x90, 0xd7, + 0x87, 0x54, 0x43, 0xed, 0xc2, 0x36, 0x69, 0x14, + 0x72, 0x4a, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x72, 0x04, 0x00, 0x00, + 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xe8, 0x8b, 0xde, 0xef, 0xba, 0xc5, 0x7e, 0x04, + 0xab, 0xfd, 0x79, 0x56, 0xf3, 0xe1, 0xa5, 0x3e, + 0x02, 0xdf, 0x69, 0x6d, 0x1f, 0x41, 0x9f, 0xbc, + 0x93, 0xe2, 0x6c, 0xf1, 0xb1, 0x38, 0xf5, 0x2b, + 0x8c, 0x4c, 0xf4, 0x74, 0xe1, 0x79, 0x35, 0x34, + 0x97, 0x9b, 0xd5, 0xba, 0xfd, 0xf7, 0x2f, 0x2d, + 0x9e, 0x84, 0x54, 0xee, 0x77, 0x59, 0x23, 0x8f, + 0xc8, 0x84, 0xb4, 0xd6, 0xea, 0x4c, 0x44, 0x8a, + 0xc6, 0x9c, 0xf9, 0x9b, 0x27, 0xea, 0x4f, 0x28, + 0x72, 0x33, 0x12, 0x20, 0x7c, 0xd7, 0x3f, 0x56, + 0xa6, 0x76, 0xc7, 0x48, 0xe4, 0x2d, 0x6f, 0x14, + 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, + 0x00, 0x30, 0x36, 0xe3, 0xd4, 0xf7, 0xb1, 0x69, + 0x18, 0x8d, 0x09, 0xba, 0x52, 0x1e, 0xd5, 0x7d, + 0x2c, 0x15, 0x3a, 0xd6, 0xe3, 0x99, 0x30, 0x2c, + 0x99, 0x97, 0xbc, 0x19, 0x3c, 0x63, 0xa1, 0x25, + 0x68, 0xbc, 0x8a, 0x16, 0x47, 0xec, 0xae, 0x13, + 0xa4, 0x03, 0x96, 0x29, 0x11, 0x92, 0x90, 0x1a, + 0xc8, 0xa4, 0x17, 0x03, 0x01, 0x00, 0x20, 0xc1, + 0x10, 0x1d, 0xa6, 0xf1, 0xe2, 0x8a, 0xcc, 0x37, + 0x7d, 0x8e, 0x05, 0x00, 0xfb, 0xd1, 0x9f, 0xc7, + 0x11, 0xd2, 0x00, 0xb4, 0x27, 0x0a, 0x25, 0x14, + 0xd9, 0x79, 0x1b, 0xcb, 0x4d, 0x81, 0x61, 0x17, + 0x03, 0x01, 0x00, 0x30, 0x5c, 0x7c, 0x2d, 0xc0, + 0x9e, 0xa6, 0xc4, 0x8e, 0xfd, 0xf4, 0xe2, 0xe5, + 0xe4, 0xe6, 0x56, 0x9f, 0x7d, 0x4c, 0x4c, 0x2d, + 0xb7, 0xa9, 0xac, 0xfa, 0x9f, 0x12, 0x7f, 0x2d, + 0x30, 0x57, 0xe4, 0x8e, 0x30, 0x86, 0x65, 0x59, + 0xcd, 0x24, 0xda, 0xe2, 0x8a, 0x7b, 0x0c, 0x5e, + 0x86, 0x05, 0x06, 0x2a, 0x15, 0x03, 0x01, 0x00, + 0x20, 0xd6, 0xb7, 0x70, 0xf8, 0x47, 0xbc, 0x0f, + 0xf4, 0x66, 0x98, 0x1b, 0x1e, 0x8a, 0x8c, 0x0b, + 0xa1, 0x4a, 0x04, 0x29, 0x60, 0x72, 0x8b, 0xc4, + 0x73, 0xc1, 0xd6, 0x41, 0x72, 0xb7, 0x17, 0x39, + 0xda, + }, +} + +var sslv3ServerScript = [][]byte{ + { + 0x16, 0x03, 0x00, 0x00, 0x54, 0x01, 0x00, 0x00, + 0x50, 0x03, 0x00, 0x50, 0x77, 0x3d, 0x42, 0xae, + 0x84, 0xbd, 0xc5, 0x07, 0xa5, 0xc4, 0xd6, 0x16, + 0x4e, 0xd5, 0xc5, 0xfa, 0x02, 0x7a, 0x0f, 0x1d, + 0xc1, 0xe1, 0xaa, 0xe3, 0x3b, 0x4b, 0x6f, 0x11, + 0xfa, 0x1a, 0xa4, 0x00, 0x00, 0x28, 0x00, 0x39, + 0x00, 0x38, 0x00, 0x35, 0x00, 0x16, 0x00, 0x13, + 0x00, 0x0a, 0x00, 0x33, 0x00, 0x32, 0x00, 0x2f, + 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, 0x00, 0x12, + 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, 0x02, 0x01, + 0x00, + }, + { + 0x16, 0x03, 0x00, 0x00, 0x2a, 0x02, 0x00, 0x00, + 0x26, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x16, + 0x03, 0x00, 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, + 0x00, 0x02, 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, + 0x02, 0xb0, 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, + 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, + 0x31, 0x30, 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, + 0x30, 0x39, 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, + 0x31, 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, + 0x39, 0x33, 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, + 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, + 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, + 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, + 0x4c, 0x74, 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, + 0x00, 0x30, 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, + 0xbb, 0x79, 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, + 0x46, 0x10, 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, + 0x07, 0x43, 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, + 0x43, 0x85, 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, + 0x4c, 0x2c, 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, + 0x82, 0xe5, 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, + 0xa5, 0x2c, 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, + 0x7a, 0x56, 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, + 0x7b, 0x26, 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, + 0xc9, 0x21, 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, + 0x5a, 0xbf, 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, + 0x99, 0x07, 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, + 0x04, 0x39, 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, + 0x7c, 0xe3, 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, + 0xcf, 0xaf, 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, + 0xdb, 0xdb, 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, + 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, + 0x30, 0x81, 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, + 0x1d, 0x0e, 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, + 0xe2, 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, + 0xce, 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, + 0x88, 0x39, 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, + 0x23, 0x04, 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, + 0xad, 0xe2, 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, + 0x69, 0xce, 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, + 0x18, 0x88, 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, + 0x45, 0x31, 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, + 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, + 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, + 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, + 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, + 0x69, 0x64, 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, + 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, + 0x00, 0x85, 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, + 0xca, 0x30, 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, + 0x04, 0x05, 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, + 0x0d, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, + 0x81, 0x00, 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, + 0xb1, 0x59, 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, + 0x14, 0xd7, 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, + 0x5a, 0x95, 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, + 0x12, 0x66, 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, + 0x60, 0xd3, 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, + 0x25, 0x13, 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, + 0x1d, 0xba, 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, + 0xd7, 0x31, 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, + 0xea, 0x50, 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, + 0x5a, 0x5f, 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, + 0x90, 0x96, 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, + 0x98, 0x1f, 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, + 0xa3, 0x1b, 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, + 0xe9, 0x70, 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, + 0x26, 0x6e, 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, + 0xbd, 0xd9, 0x16, 0x03, 0x00, 0x00, 0x04, 0x0e, + 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x00, 0x00, 0x84, 0x10, 0x00, 0x00, + 0x80, 0x4a, 0x8d, 0xc4, 0x38, 0x7a, 0x9c, 0xd6, + 0xe8, 0x72, 0x9e, 0xa3, 0xdf, 0x37, 0xb4, 0x6c, + 0x58, 0x33, 0x59, 0xd9, 0xc9, 0x4b, 0x50, 0x33, + 0x6c, 0xed, 0x73, 0x38, 0x2a, 0x46, 0x55, 0x31, + 0xa9, 0x8e, 0x8e, 0xfc, 0x0b, 0x5d, 0x5f, 0x3c, + 0x88, 0x28, 0x3f, 0x60, 0x51, 0x13, 0xf1, 0x59, + 0x0c, 0xa3, 0x5e, 0xe0, 0xa3, 0x35, 0x06, 0xb1, + 0x71, 0x59, 0x24, 0x4e, 0xed, 0x07, 0x15, 0x88, + 0x50, 0xef, 0xc2, 0xb2, 0x2a, 0x52, 0x30, 0x6a, + 0x7c, 0xbe, 0x2f, 0xc6, 0x8f, 0xa8, 0x83, 0xc5, + 0x80, 0x14, 0x62, 0x74, 0x7f, 0x96, 0x9f, 0x41, + 0x32, 0x74, 0xdd, 0x76, 0x2d, 0x7b, 0xeb, 0x7b, + 0xea, 0xd0, 0x4f, 0x0c, 0xcf, 0x9a, 0x9c, 0xc5, + 0x7a, 0xe4, 0xbc, 0xf8, 0xa6, 0xe1, 0x09, 0x8e, + 0x7c, 0x53, 0x3a, 0xe3, 0x30, 0x8f, 0x76, 0xee, + 0x58, 0xbb, 0xfd, 0x0b, 0x06, 0xb8, 0xdf, 0xb7, + 0x31, 0x14, 0x03, 0x00, 0x00, 0x01, 0x01, 0x16, + 0x03, 0x00, 0x00, 0x3c, 0x13, 0x91, 0xc6, 0x4a, + 0x0c, 0x59, 0x25, 0xce, 0x54, 0xc0, 0x1d, 0xb9, + 0x2a, 0xff, 0x4d, 0xca, 0x26, 0x0c, 0x8c, 0x04, + 0x98, 0x7c, 0x7c, 0x38, 0xa3, 0xf5, 0xf9, 0x36, + 0x1c, 0x04, 0x32, 0x47, 0x2d, 0x48, 0x0e, 0x96, + 0xe8, 0x2b, 0x5e, 0x5a, 0xc6, 0x0a, 0x48, 0x41, + 0x34, 0x5e, 0x62, 0xd5, 0x68, 0x4e, 0x44, 0x1d, + 0xb2, 0xa1, 0x11, 0xad, 0x6e, 0x14, 0x85, 0x61, + }, + { + 0x14, 0x03, 0x00, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x00, 0x00, 0x3c, 0x88, 0xae, 0xa9, 0xd4, 0xa8, + 0x10, 0x8d, 0x65, 0xa6, 0x3e, 0x1e, 0xed, 0xd2, + 0xfc, 0xc4, 0x7c, 0xa8, 0x94, 0x4f, 0x11, 0xaf, + 0xa6, 0x87, 0x09, 0x37, 0x54, 0xf7, 0x69, 0xd1, + 0xb5, 0x25, 0x6b, 0xb5, 0xed, 0xcb, 0x25, 0x39, + 0x73, 0xeb, 0x53, 0x6c, 0xc7, 0xb4, 0x29, 0x8f, + 0xd6, 0x49, 0xd1, 0x95, 0x59, 0x80, 0x9a, 0x67, + 0x5c, 0xb2, 0xe0, 0xbd, 0x1e, 0xff, 0xaa, 0x17, + 0x03, 0x00, 0x00, 0x21, 0x65, 0x7b, 0x99, 0x09, + 0x02, 0xc3, 0x9d, 0x54, 0xd6, 0xe7, 0x32, 0x62, + 0xab, 0xc1, 0x09, 0x91, 0x30, 0x0a, 0xc9, 0xfa, + 0x70, 0xec, 0x06, 0x7b, 0xa3, 0xe1, 0x5f, 0xb4, + 0x63, 0xe6, 0x5c, 0xba, 0x1f, 0x15, 0x03, 0x00, + 0x00, 0x16, 0x40, 0x70, 0xbe, 0xe6, 0xa6, 0xee, + 0x8f, 0xd0, 0x87, 0xa0, 0x43, 0xa1, 0x92, 0xd7, + 0xd0, 0x1a, 0x0c, 0x20, 0x7c, 0xbf, 0xa2, 0xb5, + }, +} + +var selectCertificateBySNIScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x6a, 0x01, 0x00, 0x00, + 0x66, 0x03, 0x01, 0x50, 0x77, 0x3d, 0xfe, 0xfb, + 0x8d, 0xc2, 0x68, 0xeb, 0xf9, 0xfa, 0x54, 0x97, + 0x86, 0x45, 0xa2, 0xa3, 0xed, 0xb1, 0x91, 0xb8, + 0x28, 0xc0, 0x47, 0xaf, 0xfb, 0xcd, 0xdc, 0x0e, + 0xb3, 0xea, 0xa5, 0x00, 0x00, 0x28, 0x00, 0x39, + 0x00, 0x38, 0x00, 0x35, 0x00, 0x16, 0x00, 0x13, + 0x00, 0x0a, 0x00, 0x33, 0x00, 0x32, 0x00, 0x2f, + 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, 0x00, 0x12, + 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, 0x02, 0x01, + 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x10, 0x00, + 0x0e, 0x00, 0x00, 0x0b, 0x73, 0x6e, 0x69, 0x74, + 0x65, 0x73, 0x74, 0x2e, 0x63, 0x6f, 0x6d, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x2a, 0x02, 0x00, 0x00, + 0x26, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x16, + 0x03, 0x01, 0x02, 0x00, 0x0b, 0x00, 0x01, 0xfc, + 0x00, 0x01, 0xf9, 0x00, 0x01, 0xf6, 0x30, 0x82, + 0x01, 0xf2, 0x30, 0x82, 0x01, 0x5d, 0xa0, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x01, 0x00, 0x30, 0x0b, + 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, + 0x01, 0x01, 0x05, 0x30, 0x28, 0x31, 0x10, 0x30, + 0x0e, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x07, + 0x41, 0x63, 0x6d, 0x65, 0x20, 0x43, 0x6f, 0x31, + 0x14, 0x30, 0x12, 0x06, 0x03, 0x55, 0x04, 0x03, + 0x13, 0x0b, 0x73, 0x6e, 0x69, 0x74, 0x65, 0x73, + 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x30, 0x1e, 0x17, + 0x0d, 0x31, 0x32, 0x30, 0x34, 0x31, 0x31, 0x31, + 0x37, 0x34, 0x30, 0x33, 0x35, 0x5a, 0x17, 0x0d, + 0x31, 0x33, 0x30, 0x34, 0x31, 0x31, 0x31, 0x37, + 0x34, 0x35, 0x33, 0x35, 0x5a, 0x30, 0x28, 0x31, + 0x10, 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x07, 0x41, 0x63, 0x6d, 0x65, 0x20, 0x43, + 0x6f, 0x31, 0x14, 0x30, 0x12, 0x06, 0x03, 0x55, + 0x04, 0x03, 0x13, 0x0b, 0x73, 0x6e, 0x69, 0x74, + 0x65, 0x73, 0x74, 0x2e, 0x63, 0x6f, 0x6d, 0x30, + 0x81, 0x9d, 0x30, 0x0b, 0x06, 0x09, 0x2a, 0x86, + 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x01, 0x03, + 0x81, 0x8d, 0x00, 0x30, 0x81, 0x89, 0x02, 0x81, + 0x81, 0x00, 0xbb, 0x79, 0xd6, 0xf5, 0x17, 0xb5, + 0xe5, 0xbf, 0x46, 0x10, 0xd0, 0xdc, 0x69, 0xbe, + 0xe6, 0x2b, 0x07, 0x43, 0x5a, 0xd0, 0x03, 0x2d, + 0x8a, 0x7a, 0x43, 0x85, 0xb7, 0x14, 0x52, 0xe7, + 0xa5, 0x65, 0x4c, 0x2c, 0x78, 0xb8, 0x23, 0x8c, + 0xb5, 0xb4, 0x82, 0xe5, 0xde, 0x1f, 0x95, 0x3b, + 0x7e, 0x62, 0xa5, 0x2c, 0xa5, 0x33, 0xd6, 0xfe, + 0x12, 0x5c, 0x7a, 0x56, 0xfc, 0xf5, 0x06, 0xbf, + 0xfa, 0x58, 0x7b, 0x26, 0x3f, 0xb5, 0xcd, 0x04, + 0xd3, 0xd0, 0xc9, 0x21, 0x96, 0x4a, 0xc7, 0xf4, + 0x54, 0x9f, 0x5a, 0xbf, 0xef, 0x42, 0x71, 0x00, + 0xfe, 0x18, 0x99, 0x07, 0x7f, 0x7e, 0x88, 0x7d, + 0x7d, 0xf1, 0x04, 0x39, 0xc4, 0xa2, 0x2e, 0xdb, + 0x51, 0xc9, 0x7c, 0xe3, 0xc0, 0x4c, 0x3b, 0x32, + 0x66, 0x01, 0xcf, 0xaf, 0xb1, 0x1d, 0xb8, 0x71, + 0x9a, 0x1d, 0xdb, 0xdb, 0x89, 0x6b, 0xae, 0xda, + 0x2d, 0x79, 0x02, 0x03, 0x01, 0x00, 0x01, 0xa3, + 0x32, 0x30, 0x30, 0x30, 0x0e, 0x06, 0x03, 0x55, + 0x1d, 0x0f, 0x01, 0x01, 0xff, 0x04, 0x04, 0x03, + 0x02, 0x00, 0xa0, 0x30, 0x0d, 0x06, 0x03, 0x55, + 0x1d, 0x0e, 0x04, 0x06, 0x04, 0x04, 0x01, 0x02, + 0x03, 0x04, 0x30, 0x0f, 0x06, 0x03, 0x55, 0x1d, + 0x23, 0x04, 0x08, 0x30, 0x06, 0x80, 0x04, 0x01, + 0x02, 0x03, 0x04, 0x30, 0x0b, 0x06, 0x09, 0x2a, + 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, + 0x03, 0x81, 0x81, 0x00, 0x89, 0xc6, 0x45, 0x5f, + 0x1c, 0x1f, 0x5e, 0xf8, 0xeb, 0x1a, 0xb1, 0x74, + 0xee, 0x24, 0x39, 0x05, 0x9f, 0x5c, 0x42, 0x59, + 0xbb, 0x1a, 0x8d, 0x86, 0xcd, 0xb1, 0xd0, 0x56, + 0xf5, 0x6a, 0x71, 0x7d, 0xa4, 0x0e, 0x95, 0xab, + 0x90, 0xf5, 0x9e, 0x8d, 0xea, 0xf6, 0x27, 0xc1, + 0x57, 0x99, 0x50, 0x94, 0xdb, 0x08, 0x02, 0x26, + 0x6e, 0xb3, 0x4f, 0xc6, 0x84, 0x2d, 0xea, 0x8a, + 0x4b, 0x68, 0xd9, 0xc1, 0x38, 0x91, 0x03, 0xab, + 0x84, 0xfb, 0x9e, 0x1f, 0x85, 0xd9, 0xb5, 0xd2, + 0x3f, 0xf2, 0x31, 0x2c, 0x86, 0x70, 0xfb, 0xb5, + 0x40, 0x14, 0x82, 0x45, 0xa4, 0xeb, 0xaf, 0xe2, + 0x64, 0xd9, 0x0c, 0x8a, 0x4c, 0xf4, 0xf8, 0x5b, + 0x0f, 0xac, 0x12, 0xac, 0x2f, 0xc4, 0xa3, 0x15, + 0x4b, 0xad, 0x52, 0x46, 0x28, 0x68, 0xaf, 0x96, + 0xc6, 0x2c, 0x65, 0x25, 0xd6, 0x52, 0xb6, 0xe3, + 0x18, 0x45, 0xbd, 0xcc, 0x16, 0x03, 0x01, 0x00, + 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x86, 0x10, 0x00, 0x00, + 0x82, 0x00, 0x80, 0x69, 0xc3, 0xd4, 0x0e, 0xcc, + 0xdc, 0xbc, 0x5e, 0xc2, 0x64, 0xa6, 0xde, 0x3c, + 0x0c, 0x7e, 0x0c, 0x6b, 0x80, 0x0f, 0xd4, 0x8f, + 0x02, 0x4b, 0xb2, 0xba, 0x8d, 0x01, 0xeb, 0x6b, + 0xa1, 0x2e, 0x79, 0x37, 0xba, 0xae, 0x24, 0xc2, + 0x26, 0x72, 0x51, 0xe1, 0x82, 0x8e, 0x51, 0x41, + 0x1c, 0x54, 0xa4, 0x26, 0xbe, 0x13, 0xcd, 0x1b, + 0xc6, 0xed, 0x3d, 0x1f, 0xfd, 0x72, 0x80, 0x90, + 0xdb, 0xbf, 0xd6, 0x39, 0x94, 0x5f, 0x48, 0xfb, + 0x25, 0x5a, 0xc9, 0x60, 0x9b, 0xd7, 0xc6, 0x20, + 0xa8, 0x66, 0x64, 0x13, 0xf3, 0x65, 0xc8, 0xb1, + 0xd5, 0x33, 0x21, 0x0e, 0x73, 0x41, 0xc0, 0x18, + 0x1a, 0x37, 0xfe, 0xcf, 0x28, 0x2a, 0xcd, 0xe4, + 0x0b, 0xac, 0xdd, 0x25, 0x5e, 0xcb, 0x17, 0x51, + 0x69, 0xd5, 0x8c, 0xf4, 0xb6, 0x21, 0x98, 0xef, + 0x20, 0xdb, 0x14, 0x67, 0xf3, 0x7c, 0x95, 0x6a, + 0x48, 0x2a, 0x6a, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0x24, 0x36, 0x1b, + 0x09, 0xe5, 0xb9, 0xb9, 0x4d, 0x7d, 0xae, 0x87, + 0xb6, 0x0f, 0xaf, 0xec, 0x22, 0xba, 0x0d, 0xa5, + 0x96, 0x5e, 0x64, 0x65, 0xe7, 0xfb, 0xe3, 0xf3, + 0x6b, 0x72, 0xa8, 0xdb, 0xed, 0xd8, 0x69, 0x9c, + 0x08, 0xd8, + }, + { + 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x01, 0x00, 0x24, 0x60, 0xf7, 0x09, 0x5f, 0xd1, + 0xcb, 0xc9, 0xe1, 0x22, 0xb5, 0x2a, 0xcc, 0xde, + 0x7c, 0xa7, 0xb8, 0x85, 0x00, 0xbc, 0xfd, 0x85, + 0xe1, 0x91, 0x36, 0xbb, 0x07, 0x42, 0xad, 0x3d, + 0x29, 0x62, 0x69, 0xc1, 0x45, 0x92, 0x6f, 0x17, + 0x03, 0x01, 0x00, 0x21, 0x0d, 0xf9, 0xd5, 0x87, + 0xb9, 0x57, 0x3c, 0x50, 0x19, 0xe4, 0x3a, 0x50, + 0x45, 0xcc, 0x86, 0x89, 0xd4, 0x32, 0x79, 0x45, + 0x7c, 0x9f, 0x96, 0xd4, 0x54, 0x56, 0x0c, 0x63, + 0x72, 0x81, 0xc3, 0xd3, 0xe3, 0x15, 0x03, 0x01, + 0x00, 0x16, 0x84, 0xec, 0x2e, 0xf6, 0xaf, 0x4f, + 0xee, 0x48, 0x0f, 0xbe, 0xcd, 0x82, 0x5c, 0x56, + 0x16, 0xe4, 0xfb, 0x89, 0xc5, 0x57, 0x3e, 0x91, + }, +} + +var issueSessionTicketTest = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0x5a, 0x01, 0x00, 0x00, + 0x56, 0x03, 0x01, 0x50, 0x77, 0x3e, 0x49, 0x7a, + 0xb7, 0x86, 0x5c, 0x27, 0xd2, 0x97, 0x61, 0xe3, + 0x49, 0x41, 0x48, 0xe7, 0x0e, 0xaa, 0x7e, 0x4d, + 0xb8, 0xdc, 0x01, 0x97, 0xfb, 0xab, 0x53, 0xb2, + 0x5e, 0x36, 0xf6, 0x00, 0x00, 0x28, 0x00, 0x39, + 0x00, 0x38, 0x00, 0x35, 0x00, 0x16, 0x00, 0x13, + 0x00, 0x0a, 0x00, 0x33, 0x00, 0x32, 0x00, 0x2f, + 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, 0x00, 0x12, + 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, 0x02, 0x01, + 0x00, 0x00, 0x04, 0x00, 0x23, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x01, + 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, 0x00, 0x02, + 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, 0x02, 0xb0, + 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, + 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x30, + 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, + 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30, + 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, 0x33, + 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, + 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xbb, 0x79, + 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, 0x46, 0x10, + 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, 0x07, 0x43, + 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, 0x43, 0x85, + 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, 0x4c, 0x2c, + 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, 0x82, 0xe5, + 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, 0xa5, 0x2c, + 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, 0x7a, 0x56, + 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, 0x7b, 0x26, + 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, 0xc9, 0x21, + 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, 0x5a, 0xbf, + 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, 0x99, 0x07, + 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, 0x04, 0x39, + 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, 0x7c, 0xe3, + 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, 0xcf, 0xaf, + 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, 0xdb, 0xdb, + 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, 0xe2, 0x85, + 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, 0x23, + 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, 0x39, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, 0xad, 0xe2, + 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, + 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, + 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, + 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, 0xb1, 0x59, + 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, 0x14, 0xd7, + 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, 0x5a, 0x95, + 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, 0x12, 0x66, + 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, 0x60, 0xd3, + 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, 0x25, 0x13, + 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, 0x1d, 0xba, + 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, 0xd7, 0x31, + 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, 0xea, 0x50, + 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, 0x5a, 0x5f, + 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, 0x90, 0x96, + 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, 0x98, 0x1f, + 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, 0xa3, 0x1b, + 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, 0xe9, 0x70, + 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, 0x26, 0x6e, + 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, 0xbd, 0xd9, + 0x16, 0x03, 0x01, 0x00, 0x04, 0x0e, 0x00, 0x00, + 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x86, 0x10, 0x00, 0x00, + 0x82, 0x00, 0x80, 0x68, 0x10, 0xdc, 0x80, 0xbc, + 0xb3, 0x5a, 0x10, 0x75, 0x89, 0xcc, 0xe5, 0x9f, + 0xbf, 0xe2, 0xce, 0xa4, 0x9f, 0x7f, 0x60, 0xc4, + 0xfe, 0x5c, 0xb5, 0x02, 0x2d, 0xa5, 0xa9, 0x1e, + 0x2c, 0x10, 0x79, 0x15, 0x0f, 0xed, 0x96, 0xb3, + 0xa8, 0x5e, 0x21, 0xbc, 0x5b, 0xdc, 0x58, 0x04, + 0x7d, 0x37, 0xdb, 0xa0, 0x31, 0xe8, 0x4f, 0x04, + 0xbc, 0x46, 0x7c, 0xdb, 0x2e, 0x93, 0x07, 0xaf, + 0xa6, 0x36, 0xd3, 0x39, 0x8d, 0x1d, 0x95, 0xa8, + 0x50, 0x4b, 0xc4, 0x2b, 0xde, 0xd7, 0x04, 0x6d, + 0x77, 0x6c, 0x4d, 0x70, 0x51, 0x88, 0x16, 0x31, + 0x40, 0xb5, 0xba, 0x90, 0x47, 0x64, 0x0c, 0x87, + 0xa5, 0x19, 0xf9, 0x89, 0x24, 0x3c, 0x5e, 0x4b, + 0xaa, 0xe0, 0x60, 0x47, 0x0f, 0x2e, 0xcc, 0xc2, + 0xd5, 0x21, 0xed, 0x72, 0xd0, 0xa9, 0xdd, 0x2a, + 0x2b, 0xef, 0x08, 0x3a, 0x65, 0xea, 0x8b, 0x52, + 0x77, 0x2d, 0xcc, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0x24, 0xe2, 0x95, + 0x62, 0x3c, 0x18, 0xe5, 0xc7, 0x2c, 0xda, 0x16, + 0x9b, 0x28, 0x0d, 0xf7, 0x88, 0x7b, 0x5d, 0x33, + 0x55, 0x3b, 0x01, 0x73, 0xf2, 0xc6, 0x4e, 0x96, + 0x01, 0x01, 0x83, 0x65, 0xd4, 0xef, 0x12, 0x13, + 0x1d, 0x42, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x72, 0x04, 0x00, 0x00, + 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xe8, 0x4b, 0xd1, 0xef, 0xba, 0xfb, 0x41, 0x92, + 0x6d, 0x37, 0x5f, 0xf8, 0x7d, 0x90, 0x0f, 0x01, + 0xf8, 0x8c, 0xee, 0xbc, 0xd9, 0x0c, 0x97, 0x7e, + 0x23, 0x46, 0xe2, 0x6b, 0x52, 0xc6, 0xc6, 0x97, + 0x1d, 0xab, 0xde, 0xa0, 0x86, 0x94, 0xc8, 0x2e, + 0x8b, 0x2e, 0x42, 0x5f, 0xc2, 0x70, 0x35, 0xc9, + 0xee, 0x37, 0xeb, 0x70, 0xaa, 0x59, 0x23, 0x6c, + 0xc8, 0xc1, 0x84, 0x89, 0x39, 0x87, 0x73, 0x0a, + 0x7e, 0xba, 0xca, 0xed, 0x63, 0xba, 0x4e, 0x4f, + 0xf3, 0x31, 0x4b, 0xf0, 0xee, 0x91, 0xa5, 0xb4, + 0x62, 0x01, 0x9e, 0xbd, 0xbc, 0xb3, 0x35, 0x14, + 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, + 0x00, 0x24, 0x3f, 0x66, 0xe4, 0x98, 0xc1, 0x3f, + 0xc6, 0x2c, 0x81, 0xfb, 0xa9, 0x9f, 0x27, 0xe9, + 0x63, 0x20, 0x1e, 0x0e, 0x4f, 0xfc, 0x5d, 0x12, + 0xee, 0x77, 0x73, 0xc6, 0x96, 0x51, 0xf2, 0x26, + 0x35, 0x3f, 0xce, 0x6a, 0xa9, 0xfd, 0x17, 0x03, + 0x01, 0x00, 0x21, 0x8d, 0xd5, 0x67, 0x60, 0x5d, + 0xa7, 0x93, 0xcc, 0x39, 0x78, 0x59, 0xab, 0xdb, + 0x10, 0x96, 0xf2, 0xad, 0xa2, 0x85, 0xe2, 0x93, + 0x43, 0x43, 0xcf, 0x82, 0xbd, 0x1f, 0xdc, 0x7a, + 0x72, 0xd6, 0x83, 0x3b, 0x15, 0x03, 0x01, 0x00, + 0x16, 0x89, 0x55, 0xf6, 0x42, 0x71, 0xa9, 0xe9, + 0x05, 0x68, 0xe8, 0xce, 0x0d, 0x21, 0xe9, 0xec, + 0xf2, 0x27, 0x67, 0xa7, 0x94, 0xf8, 0x34, + }, +} +var serverResumeTest = [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0xc2, 0x01, 0x00, 0x00, + 0xbe, 0x03, 0x01, 0x50, 0x77, 0x3e, 0x4f, 0x1f, + 0x6f, 0xa5, 0x81, 0xeb, 0xb8, 0x80, 0x55, 0xa4, + 0x76, 0xc2, 0x7f, 0x27, 0xf2, 0xe7, 0xc9, 0x7a, + 0x01, 0x3c, 0xd8, 0xc1, 0xde, 0x99, 0x1f, 0x7c, + 0xab, 0x35, 0x98, 0x00, 0x00, 0x28, 0x00, 0x39, + 0x00, 0x38, 0x00, 0x35, 0x00, 0x16, 0x00, 0x13, + 0x00, 0x0a, 0x00, 0x33, 0x00, 0x32, 0x00, 0x2f, + 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, 0x00, 0x12, + 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, 0x02, 0x01, + 0x00, 0x00, 0x6c, 0x00, 0x23, 0x00, 0x68, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xe8, 0x4b, 0xd1, 0xef, 0xba, 0xfb, 0x41, 0x92, + 0x6d, 0x37, 0x5f, 0xf8, 0x7d, 0x90, 0x0f, 0x01, + 0xf8, 0x8c, 0xee, 0xbc, 0xd9, 0x0c, 0x97, 0x7e, + 0x23, 0x46, 0xe2, 0x6b, 0x52, 0xc6, 0xc6, 0x97, + 0x1d, 0xab, 0xde, 0xa0, 0x86, 0x94, 0xc8, 0x2e, + 0x8b, 0x2e, 0x42, 0x5f, 0xc2, 0x70, 0x35, 0xc9, + 0xee, 0x37, 0xeb, 0x70, 0xaa, 0x59, 0x23, 0x6c, + 0xc8, 0xc1, 0x84, 0x89, 0x39, 0x87, 0x73, 0x0a, + 0x7e, 0xba, 0xca, 0xed, 0x63, 0xba, 0x4e, 0x4f, + 0xf3, 0x31, 0x4b, 0xf0, 0xee, 0x91, 0xa5, 0xb4, + 0x62, 0x01, 0x9e, 0xbd, 0xbc, 0xb3, 0x35, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x2a, 0x02, 0x00, 0x00, + 0x26, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x14, + 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, + 0x00, 0x24, 0xc5, 0x35, 0x74, 0x19, 0x05, 0xc5, + 0x85, 0x68, 0x48, 0xe8, 0xb5, 0xe9, 0xaf, 0x78, + 0xbd, 0x35, 0x6f, 0xe9, 0x79, 0x34, 0x1b, 0xf0, + 0x35, 0xd4, 0x4e, 0x55, 0x2e, 0x3c, 0xd5, 0xaf, + 0xfc, 0xba, 0xf5, 0x1e, 0x83, 0x32, + }, + { + 0x14, 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, + 0x01, 0x00, 0x24, 0x27, 0x28, 0x88, 0xe1, 0x7e, + 0x0d, 0x9c, 0x12, 0x50, 0xf6, 0x7a, 0xa7, 0x32, + 0x21, 0x68, 0xba, 0xd8, 0x0a, 0xdc, 0x39, 0xef, + 0x68, 0x95, 0x82, 0xae, 0xbd, 0x12, 0x79, 0xa1, + 0x99, 0xfd, 0xd0, 0x10, 0x8e, 0x4b, 0xd8, + }, + { + 0x17, 0x03, 0x01, 0x00, 0x21, 0xc5, 0x7e, 0x0a, + 0x52, 0x6a, 0xb9, 0xaa, 0x1d, 0xae, 0x9e, 0x24, + 0x9c, 0x34, 0x1e, 0xdb, 0x50, 0x95, 0xee, 0x76, + 0xd7, 0x28, 0x88, 0x08, 0xe3, 0x2e, 0x58, 0xf7, + 0xdb, 0x34, 0x75, 0xa5, 0x7f, 0x9d, 0x15, 0x03, + 0x01, 0x00, 0x16, 0x2c, 0xc1, 0x29, 0x5f, 0x12, + 0x1d, 0x19, 0xab, 0xb3, 0xf4, 0x35, 0x1c, 0x62, + 0x6a, 0x80, 0x29, 0x0d, 0x0e, 0xef, 0x7d, 0x6e, + 0x50, + }, +} + +var clientauthRSATests = []clientauthTest{ + // Server asks for cert with empty CA list, client doesn't give it. + // go test -run "TestRunServer" -serve -clientauth 1 + {"RequestClientCert, none given", RequestClientCert, nil, [][]byte{ + { + 0x16, 0x03, 0x01, 0x01, 0x1e, 0x01, 0x00, 0x01, + 0x1a, 0x03, 0x03, 0x51, 0xe5, 0x6c, 0xb5, 0x5a, + 0xc2, 0xf5, 0xf0, 0x92, 0x94, 0x8a, 0x64, 0x18, + 0xa4, 0x2b, 0x82, 0x07, 0xbc, 0xd9, 0xd9, 0xf9, + 0x7b, 0xd2, 0xd0, 0xee, 0xa2, 0x70, 0x4e, 0x23, + 0x88, 0x7c, 0x95, 0x00, 0x00, 0x82, 0xc0, 0x30, + 0xc0, 0x2c, 0xc0, 0x28, 0xc0, 0x24, 0xc0, 0x14, + 0xc0, 0x0a, 0x00, 0xa3, 0x00, 0x9f, 0x00, 0x6b, + 0x00, 0x6a, 0x00, 0x39, 0x00, 0x38, 0xc0, 0x32, + 0xc0, 0x2e, 0xc0, 0x2a, 0xc0, 0x26, 0xc0, 0x0f, + 0xc0, 0x05, 0x00, 0x9d, 0x00, 0x3d, 0x00, 0x35, + 0xc0, 0x12, 0xc0, 0x08, 0x00, 0x16, 0x00, 0x13, + 0xc0, 0x0d, 0xc0, 0x03, 0x00, 0x0a, 0xc0, 0x2f, + 0xc0, 0x2b, 0xc0, 0x27, 0xc0, 0x23, 0xc0, 0x13, + 0xc0, 0x09, 0x00, 0xa2, 0x00, 0x9e, 0x00, 0x67, + 0x00, 0x40, 0x00, 0x33, 0x00, 0x32, 0xc0, 0x31, + 0xc0, 0x2d, 0xc0, 0x29, 0xc0, 0x25, 0xc0, 0x0e, + 0xc0, 0x04, 0x00, 0x9c, 0x00, 0x3c, 0x00, 0x2f, + 0x00, 0x07, 0xc0, 0x11, 0xc0, 0x07, 0xc0, 0x0c, + 0xc0, 0x02, 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, + 0x00, 0x12, 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, + 0x00, 0x08, 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, + 0x01, 0x00, 0x00, 0x6f, 0x00, 0x0b, 0x00, 0x04, + 0x03, 0x00, 0x01, 0x02, 0x00, 0x0a, 0x00, 0x34, + 0x00, 0x32, 0x00, 0x0e, 0x00, 0x0d, 0x00, 0x19, + 0x00, 0x0b, 0x00, 0x0c, 0x00, 0x18, 0x00, 0x09, + 0x00, 0x0a, 0x00, 0x16, 0x00, 0x17, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x07, 0x00, 0x14, 0x00, 0x15, + 0x00, 0x04, 0x00, 0x05, 0x00, 0x12, 0x00, 0x13, + 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00, 0x0f, + 0x00, 0x10, 0x00, 0x11, 0x00, 0x23, 0x00, 0x00, + 0x00, 0x0d, 0x00, 0x22, 0x00, 0x20, 0x06, 0x01, + 0x06, 0x02, 0x06, 0x03, 0x05, 0x01, 0x05, 0x02, + 0x05, 0x03, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, + 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x02, 0x03, 0x01, 0x01, 0x00, 0x0f, + 0x00, 0x01, 0x01, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x01, + 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, 0x00, 0x02, + 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, 0x02, 0xb0, + 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, + 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x30, + 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, + 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30, + 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, 0x33, + 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, + 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xbb, 0x79, + 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, 0x46, 0x10, + 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, 0x07, 0x43, + 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, 0x43, 0x85, + 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, 0x4c, 0x2c, + 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, 0x82, 0xe5, + 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, 0xa5, 0x2c, + 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, 0x7a, 0x56, + 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, 0x7b, 0x26, + 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, 0xc9, 0x21, + 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, 0x5a, 0xbf, + 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, 0x99, 0x07, + 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, 0x04, 0x39, + 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, 0x7c, 0xe3, + 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, 0xcf, 0xaf, + 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, 0xdb, 0xdb, + 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, 0xe2, 0x85, + 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, 0x23, + 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, 0x39, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, 0xad, 0xe2, + 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, + 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, + 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, + 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, 0xb1, 0x59, + 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, 0x14, 0xd7, + 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, 0x5a, 0x95, + 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, 0x12, 0x66, + 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, 0x60, 0xd3, + 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, 0x25, 0x13, + 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, 0x1d, 0xba, + 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, 0xd7, 0x31, + 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, 0xea, 0x50, + 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, 0x5a, 0x5f, + 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, 0x90, 0x96, + 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, 0x98, 0x1f, + 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, 0xa3, 0x1b, + 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, 0xe9, 0x70, + 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, 0x26, 0x6e, + 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, 0xbd, 0xd9, + 0x16, 0x03, 0x01, 0x00, 0x09, 0x0d, 0x00, 0x00, + 0x05, 0x02, 0x01, 0x40, 0x00, 0x00, 0x16, 0x03, + 0x01, 0x00, 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x07, 0x0b, 0x00, 0x00, + 0x03, 0x00, 0x00, 0x00, 0x16, 0x03, 0x01, 0x00, + 0x86, 0x10, 0x00, 0x00, 0x82, 0x00, 0x80, 0x36, + 0xfc, 0xd8, 0xc8, 0xa2, 0x67, 0xc8, 0xc6, 0xf4, + 0x28, 0x70, 0xe1, 0x5a, 0x02, 0x8f, 0xef, 0x42, + 0xe0, 0xd3, 0xb8, 0xd6, 0x6b, 0xe4, 0xee, 0x5c, + 0xcf, 0x42, 0xc4, 0xfa, 0xcd, 0x0f, 0xfe, 0xf4, + 0x76, 0x76, 0x47, 0x73, 0xa8, 0x72, 0x8f, 0xa2, + 0x56, 0x81, 0x83, 0xb8, 0x84, 0x72, 0x67, 0xdd, + 0xbe, 0x05, 0x4b, 0x84, 0xd9, 0xd2, 0xb6, 0xc2, + 0xe7, 0x20, 0xac, 0x1f, 0x46, 0x9d, 0x05, 0x47, + 0x8e, 0x89, 0xc0, 0x42, 0x57, 0x4a, 0xa2, 0x98, + 0xe5, 0x39, 0x4f, 0xc4, 0x27, 0x6d, 0x43, 0xa8, + 0x83, 0x76, 0xe6, 0xad, 0xe3, 0x17, 0x68, 0x31, + 0xcb, 0x7e, 0xfc, 0xe7, 0x4b, 0x76, 0x3d, 0x3c, + 0xfa, 0x77, 0x65, 0xc9, 0x4c, 0x5b, 0xce, 0x5e, + 0xf7, 0x8b, 0xa8, 0xa6, 0xdd, 0xb2, 0xef, 0x0b, + 0x46, 0x83, 0xdf, 0x0a, 0x8c, 0x22, 0x12, 0x6e, + 0xe1, 0x45, 0x54, 0x88, 0xd1, 0xe8, 0xd2, 0x14, + 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, + 0x00, 0x24, 0x30, 0x8c, 0x7d, 0x40, 0xfc, 0x5e, + 0x80, 0x9c, 0xc4, 0x7c, 0x62, 0x01, 0xa1, 0x37, + 0xcf, 0x1a, 0x75, 0x28, 0x8d, 0xeb, 0x63, 0xcc, + 0x02, 0xa6, 0x66, 0xdf, 0x36, 0x01, 0xb3, 0x9d, + 0x38, 0x42, 0x16, 0x91, 0xf0, 0x02, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x72, 0x04, 0x00, 0x00, + 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xe8, 0x4b, 0xd1, 0xef, 0xba, 0x96, 0x9a, 0x2a, + 0x6c, 0x8c, 0x7e, 0x38, 0x10, 0x46, 0x86, 0x1d, + 0x19, 0x1d, 0x62, 0x29, 0x3f, 0x58, 0xfb, 0x6d, + 0x89, 0xd2, 0x81, 0x9a, 0x1c, 0xb3, 0x58, 0xb3, + 0x19, 0x39, 0x17, 0x47, 0x49, 0xc9, 0xfe, 0x4a, + 0x7a, 0x32, 0xac, 0x2c, 0x43, 0xf9, 0xa9, 0xea, + 0xec, 0x51, 0x46, 0xf1, 0xb8, 0x59, 0x23, 0x70, + 0xce, 0x7c, 0xb9, 0x47, 0x70, 0xa3, 0xc9, 0xae, + 0x47, 0x7b, 0x7e, 0xc7, 0xcf, 0x76, 0x12, 0x76, + 0x18, 0x90, 0x12, 0xcd, 0xf3, 0xd4, 0x27, 0x81, + 0xfc, 0x46, 0x03, 0x3e, 0x05, 0x87, 0x6f, 0x14, + 0x03, 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, + 0x00, 0x24, 0xc3, 0xa0, 0x29, 0xb1, 0x52, 0x82, + 0xef, 0x85, 0xa1, 0x64, 0x0f, 0xe4, 0xa3, 0xfb, + 0xa7, 0x1d, 0x22, 0x4c, 0xcb, 0xd6, 0x5b, 0x18, + 0x61, 0xc7, 0x7c, 0xf2, 0x67, 0x4a, 0xc7, 0x11, + 0x9d, 0x8e, 0x0e, 0x15, 0x22, 0xcf, 0x17, 0x03, + 0x01, 0x00, 0x21, 0xfd, 0xbb, 0xf1, 0xa9, 0x7c, + 0xbf, 0x92, 0xb3, 0xfa, 0x2c, 0x08, 0x6f, 0x22, + 0x78, 0x80, 0xf2, 0x2e, 0x86, 0x26, 0x21, 0x36, + 0x3f, 0x32, 0xdf, 0xb6, 0x47, 0xa5, 0xf8, 0x27, + 0xc1, 0xe9, 0x53, 0x90, 0x15, 0x03, 0x01, 0x00, + 0x16, 0xfe, 0xef, 0x2e, 0xa0, 0x5d, 0xe0, 0xce, + 0x94, 0x20, 0x56, 0x61, 0x6e, 0xe5, 0x62, 0xce, + 0x27, 0x57, 0x3e, 0x30, 0x32, 0x77, 0x53, + }, + }}, + + // Server asks for cert with empty CA list, client gives one + // go test -run "TestRunServer" -serve -clientauth 1 + {"RequestClientCert, client gives it", RequestClientCert, []*x509.Certificate{clientCertificate}, [][]byte{ + { + 0x16, 0x03, 0x01, 0x01, 0x1e, 0x01, 0x00, 0x01, + 0x1a, 0x03, 0x03, 0x51, 0xe5, 0x74, 0x0e, 0x95, + 0x6f, 0x4f, 0x4a, 0xbf, 0xb7, 0xc0, 0x6c, 0xac, + 0xd9, 0xfe, 0x7d, 0xd0, 0x51, 0x19, 0x62, 0x62, + 0x1c, 0x6e, 0x57, 0x77, 0xd2, 0x31, 0xaf, 0x88, + 0xb9, 0xc0, 0x1d, 0x00, 0x00, 0x82, 0xc0, 0x30, + 0xc0, 0x2c, 0xc0, 0x28, 0xc0, 0x24, 0xc0, 0x14, + 0xc0, 0x0a, 0x00, 0xa3, 0x00, 0x9f, 0x00, 0x6b, + 0x00, 0x6a, 0x00, 0x39, 0x00, 0x38, 0xc0, 0x32, + 0xc0, 0x2e, 0xc0, 0x2a, 0xc0, 0x26, 0xc0, 0x0f, + 0xc0, 0x05, 0x00, 0x9d, 0x00, 0x3d, 0x00, 0x35, + 0xc0, 0x12, 0xc0, 0x08, 0x00, 0x16, 0x00, 0x13, + 0xc0, 0x0d, 0xc0, 0x03, 0x00, 0x0a, 0xc0, 0x2f, + 0xc0, 0x2b, 0xc0, 0x27, 0xc0, 0x23, 0xc0, 0x13, + 0xc0, 0x09, 0x00, 0xa2, 0x00, 0x9e, 0x00, 0x67, + 0x00, 0x40, 0x00, 0x33, 0x00, 0x32, 0xc0, 0x31, + 0xc0, 0x2d, 0xc0, 0x29, 0xc0, 0x25, 0xc0, 0x0e, + 0xc0, 0x04, 0x00, 0x9c, 0x00, 0x3c, 0x00, 0x2f, + 0x00, 0x07, 0xc0, 0x11, 0xc0, 0x07, 0xc0, 0x0c, + 0xc0, 0x02, 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, + 0x00, 0x12, 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, + 0x00, 0x08, 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, + 0x01, 0x00, 0x00, 0x6f, 0x00, 0x0b, 0x00, 0x04, + 0x03, 0x00, 0x01, 0x02, 0x00, 0x0a, 0x00, 0x34, + 0x00, 0x32, 0x00, 0x0e, 0x00, 0x0d, 0x00, 0x19, + 0x00, 0x0b, 0x00, 0x0c, 0x00, 0x18, 0x00, 0x09, + 0x00, 0x0a, 0x00, 0x16, 0x00, 0x17, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x07, 0x00, 0x14, 0x00, 0x15, + 0x00, 0x04, 0x00, 0x05, 0x00, 0x12, 0x00, 0x13, + 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00, 0x0f, + 0x00, 0x10, 0x00, 0x11, 0x00, 0x23, 0x00, 0x00, + 0x00, 0x0d, 0x00, 0x22, 0x00, 0x20, 0x06, 0x01, + 0x06, 0x02, 0x06, 0x03, 0x05, 0x01, 0x05, 0x02, + 0x05, 0x03, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, + 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x02, 0x03, 0x01, 0x01, 0x00, 0x0f, + 0x00, 0x01, 0x01, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x01, + 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, 0x00, 0x02, + 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, 0x02, 0xb0, + 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, + 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x30, + 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, + 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30, + 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, 0x33, + 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, + 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xbb, 0x79, + 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, 0x46, 0x10, + 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, 0x07, 0x43, + 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, 0x43, 0x85, + 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, 0x4c, 0x2c, + 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, 0x82, 0xe5, + 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, 0xa5, 0x2c, + 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, 0x7a, 0x56, + 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, 0x7b, 0x26, + 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, 0xc9, 0x21, + 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, 0x5a, 0xbf, + 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, 0x99, 0x07, + 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, 0x04, 0x39, + 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, 0x7c, 0xe3, + 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, 0xcf, 0xaf, + 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, 0xdb, 0xdb, + 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, 0xe2, 0x85, + 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, 0x23, + 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, 0x39, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, 0xad, 0xe2, + 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, + 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, + 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, + 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, 0xb1, 0x59, + 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, 0x14, 0xd7, + 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, 0x5a, 0x95, + 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, 0x12, 0x66, + 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, 0x60, 0xd3, + 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, 0x25, 0x13, + 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, 0x1d, 0xba, + 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, 0xd7, 0x31, + 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, 0xea, 0x50, + 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, 0x5a, 0x5f, + 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, 0x90, 0x96, + 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, 0x98, 0x1f, + 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, 0xa3, 0x1b, + 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, 0xe9, 0x70, + 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, 0x26, 0x6e, + 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, 0xbd, 0xd9, + 0x16, 0x03, 0x01, 0x00, 0x09, 0x0d, 0x00, 0x00, + 0x05, 0x02, 0x01, 0x40, 0x00, 0x00, 0x16, 0x03, + 0x01, 0x00, 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x01, 0xfb, 0x0b, 0x00, 0x01, + 0xf7, 0x00, 0x01, 0xf4, 0x00, 0x01, 0xf1, 0x30, + 0x82, 0x01, 0xed, 0x30, 0x82, 0x01, 0x58, 0xa0, + 0x03, 0x02, 0x01, 0x02, 0x02, 0x01, 0x00, 0x30, + 0x0b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05, 0x30, 0x26, 0x31, 0x10, + 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x07, 0x41, 0x63, 0x6d, 0x65, 0x20, 0x43, 0x6f, + 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x13, 0x09, 0x31, 0x32, 0x37, 0x2e, 0x30, + 0x2e, 0x30, 0x2e, 0x31, 0x30, 0x1e, 0x17, 0x0d, + 0x31, 0x31, 0x31, 0x32, 0x30, 0x38, 0x30, 0x37, + 0x35, 0x35, 0x31, 0x32, 0x5a, 0x17, 0x0d, 0x31, + 0x32, 0x31, 0x32, 0x30, 0x37, 0x30, 0x38, 0x30, + 0x30, 0x31, 0x32, 0x5a, 0x30, 0x26, 0x31, 0x10, + 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x07, 0x41, 0x63, 0x6d, 0x65, 0x20, 0x43, 0x6f, + 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x13, 0x09, 0x31, 0x32, 0x37, 0x2e, 0x30, + 0x2e, 0x30, 0x2e, 0x31, 0x30, 0x81, 0x9c, 0x30, + 0x0b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x03, 0x81, 0x8c, 0x00, + 0x30, 0x81, 0x88, 0x02, 0x81, 0x80, 0x4e, 0xd0, + 0x7b, 0x31, 0xe3, 0x82, 0x64, 0xd9, 0x59, 0xc0, + 0xc2, 0x87, 0xa4, 0x5e, 0x1e, 0x8b, 0x73, 0x33, + 0xc7, 0x63, 0x53, 0xdf, 0x66, 0x92, 0x06, 0x84, + 0xf6, 0x64, 0xd5, 0x8f, 0xe4, 0x36, 0xa7, 0x1d, + 0x2b, 0xe8, 0xb3, 0x20, 0x36, 0x45, 0x23, 0xb5, + 0xe3, 0x95, 0xae, 0xed, 0xe0, 0xf5, 0x20, 0x9c, + 0x8d, 0x95, 0xdf, 0x7f, 0x5a, 0x12, 0xef, 0x87, + 0xe4, 0x5b, 0x68, 0xe4, 0xe9, 0x0e, 0x74, 0xec, + 0x04, 0x8a, 0x7f, 0xde, 0x93, 0x27, 0xc4, 0x01, + 0x19, 0x7a, 0xbd, 0xf2, 0xdc, 0x3d, 0x14, 0xab, + 0xd0, 0x54, 0xca, 0x21, 0x0c, 0xd0, 0x4d, 0x6e, + 0x87, 0x2e, 0x5c, 0xc5, 0xd2, 0xbb, 0x4d, 0x4b, + 0x4f, 0xce, 0xb6, 0x2c, 0xf7, 0x7e, 0x88, 0xec, + 0x7c, 0xd7, 0x02, 0x91, 0x74, 0xa6, 0x1e, 0x0c, + 0x1a, 0xda, 0xe3, 0x4a, 0x5a, 0x2e, 0xde, 0x13, + 0x9c, 0x4c, 0x40, 0x88, 0x59, 0x93, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x32, 0x30, 0x30, 0x30, + 0x0e, 0x06, 0x03, 0x55, 0x1d, 0x0f, 0x01, 0x01, + 0xff, 0x04, 0x04, 0x03, 0x02, 0x00, 0xa0, 0x30, + 0x0d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x06, + 0x04, 0x04, 0x01, 0x02, 0x03, 0x04, 0x30, 0x0f, + 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x08, 0x30, + 0x06, 0x80, 0x04, 0x01, 0x02, 0x03, 0x04, 0x30, + 0x0b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05, 0x03, 0x81, 0x81, 0x00, + 0x36, 0x1f, 0xb3, 0x7a, 0x0c, 0x75, 0xc9, 0x6e, + 0x37, 0x46, 0x61, 0x2b, 0xd5, 0xbd, 0xc0, 0xa7, + 0x4b, 0xcc, 0x46, 0x9a, 0x81, 0x58, 0x7c, 0x85, + 0x79, 0x29, 0xc8, 0xc8, 0xc6, 0x67, 0xdd, 0x32, + 0x56, 0x45, 0x2b, 0x75, 0xb6, 0xe9, 0x24, 0xa9, + 0x50, 0x9a, 0xbe, 0x1f, 0x5a, 0xfa, 0x1a, 0x15, + 0xd9, 0xcc, 0x55, 0x95, 0x72, 0x16, 0x83, 0xb9, + 0xc2, 0xb6, 0x8f, 0xfd, 0x88, 0x8c, 0x38, 0x84, + 0x1d, 0xab, 0x5d, 0x92, 0x31, 0x13, 0x4f, 0xfd, + 0x83, 0x3b, 0xc6, 0x9d, 0xf1, 0x11, 0x62, 0xb6, + 0x8b, 0xec, 0xab, 0x67, 0xbe, 0xc8, 0x64, 0xb0, + 0x11, 0x50, 0x46, 0x58, 0x17, 0x6b, 0x99, 0x1c, + 0xd3, 0x1d, 0xfc, 0x06, 0xf1, 0x0e, 0xe5, 0x96, + 0xa8, 0x0c, 0xf9, 0x78, 0x20, 0xb7, 0x44, 0x18, + 0x51, 0x8d, 0x10, 0x7e, 0x4f, 0x94, 0x67, 0xdf, + 0xa3, 0x4e, 0x70, 0x73, 0x8e, 0x90, 0x91, 0x85, + 0x16, 0x03, 0x01, 0x00, 0x86, 0x10, 0x00, 0x00, + 0x82, 0x00, 0x80, 0x0a, 0x4e, 0x89, 0xdf, 0x3a, + 0x3f, 0xf0, 0x4f, 0xef, 0x1a, 0x90, 0xd4, 0x3c, + 0xaf, 0x10, 0x57, 0xb0, 0xa1, 0x5f, 0xcd, 0x62, + 0x01, 0xe9, 0x0c, 0x36, 0x42, 0xfd, 0xaf, 0x23, + 0xf9, 0x14, 0xa6, 0x72, 0x26, 0x4e, 0x01, 0xdb, + 0xac, 0xb7, 0x4c, 0xe6, 0xa9, 0x52, 0xe2, 0xec, + 0x26, 0x8c, 0x7a, 0x64, 0xf8, 0x0b, 0x4c, 0x2f, + 0xa9, 0xcb, 0x75, 0xaf, 0x60, 0xd4, 0xb4, 0xe6, + 0xe8, 0xdb, 0x78, 0x78, 0x85, 0xf6, 0x0c, 0x95, + 0xcc, 0xb6, 0x55, 0xb9, 0xba, 0x9e, 0x91, 0xbc, + 0x66, 0xdb, 0x1e, 0x28, 0xab, 0x73, 0xce, 0x8b, + 0xd0, 0xd3, 0xe8, 0xbc, 0xd0, 0x21, 0x28, 0xbd, + 0xfb, 0x74, 0x64, 0xde, 0x3b, 0x3b, 0xd3, 0x4c, + 0x32, 0x40, 0x82, 0xba, 0x91, 0x1e, 0xe8, 0x47, + 0xc2, 0x09, 0xb7, 0x16, 0xaa, 0x25, 0xa9, 0x3c, + 0x6c, 0xa7, 0xf8, 0xc9, 0x54, 0x84, 0xc6, 0xf7, + 0x56, 0x05, 0xa4, 0x16, 0x03, 0x01, 0x00, 0x86, + 0x0f, 0x00, 0x00, 0x82, 0x00, 0x80, 0x4b, 0xab, + 0xda, 0xac, 0x2a, 0xb3, 0xe6, 0x34, 0x55, 0xcd, + 0xf2, 0x4b, 0x67, 0xe3, 0xd3, 0xff, 0xa3, 0xf4, + 0x79, 0x82, 0x01, 0x47, 0x8a, 0xe3, 0x9f, 0x89, + 0x70, 0xbe, 0x24, 0x24, 0xb7, 0x69, 0x60, 0xed, + 0x55, 0xa0, 0xca, 0x72, 0xb6, 0x4a, 0xbc, 0x1d, + 0xe2, 0x3f, 0xb5, 0x31, 0xda, 0x02, 0xf6, 0x37, + 0x51, 0xf8, 0x4c, 0x88, 0x2e, 0xb3, 0x8a, 0xe8, + 0x7b, 0x4a, 0x90, 0x36, 0xe4, 0xa6, 0x31, 0x95, + 0x8b, 0xa0, 0xc6, 0x91, 0x12, 0xb9, 0x35, 0x4e, + 0x72, 0xeb, 0x5c, 0xa2, 0xe8, 0x4c, 0x68, 0xf9, + 0x69, 0xfa, 0x70, 0x60, 0x6c, 0x7f, 0x32, 0x99, + 0xf1, 0xc3, 0x2d, 0xb4, 0x59, 0x58, 0x87, 0xaf, + 0x67, 0x62, 0x90, 0xe7, 0x8d, 0xd0, 0xa3, 0x77, + 0x33, 0xc2, 0x9b, 0xd5, 0x9c, 0xc7, 0xea, 0x25, + 0x98, 0x76, 0x9c, 0xe0, 0x6a, 0x03, 0x3a, 0x10, + 0xfd, 0x10, 0x3d, 0x55, 0x53, 0xa0, 0x14, 0x03, + 0x01, 0x00, 0x01, 0x01, 0x16, 0x03, 0x01, 0x00, + 0x24, 0xd5, 0x12, 0xfc, 0xb9, 0x5a, 0xe3, 0x27, + 0x01, 0xbe, 0xc3, 0x77, 0x17, 0x1a, 0xbb, 0x4f, + 0xae, 0xd5, 0xa7, 0xee, 0x56, 0x61, 0x0d, 0x40, + 0xf4, 0xa4, 0xb5, 0xcc, 0x76, 0xfd, 0xbd, 0x13, + 0x04, 0xe1, 0xb8, 0xc7, 0x36, + }, + { + 0x16, 0x03, 0x01, 0x02, 0x67, 0x04, 0x00, 0x02, + 0x63, 0x00, 0x00, 0x00, 0x00, 0x02, 0x5d, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xe8, 0x4b, 0xd1, 0xef, 0xba, 0x1f, 0xe2, 0x69, + 0x07, 0x7f, 0x85, 0x2d, 0x4e, 0x2a, 0x2e, 0xbd, + 0x05, 0xe9, 0xc1, 0x6c, 0x9e, 0xbf, 0x47, 0x18, + 0x91, 0x77, 0xf7, 0xe8, 0xb6, 0x27, 0x37, 0xa6, + 0x6b, 0x87, 0x29, 0xbb, 0x3b, 0xe5, 0x68, 0x62, + 0x04, 0x3e, 0xad, 0x4d, 0xff, 0xad, 0xf1, 0x22, + 0x87, 0x8d, 0xf6, 0x04, 0x3b, 0x59, 0x22, 0xf7, + 0xfd, 0x88, 0x0e, 0xa4, 0x09, 0xc0, 0x0d, 0x10, + 0x80, 0x10, 0x79, 0xee, 0x70, 0x96, 0xdb, 0x22, + 0x8b, 0xb7, 0xac, 0xe0, 0x98, 0xad, 0xe9, 0xe3, + 0xcb, 0xea, 0x9f, 0xe6, 0x83, 0x28, 0x7c, 0x7e, + 0x4e, 0x9a, 0x8d, 0xd9, 0xf3, 0x86, 0xf4, 0x89, + 0x8b, 0x79, 0x8f, 0xbb, 0xe9, 0x74, 0x02, 0x02, + 0x14, 0x04, 0xea, 0xba, 0x16, 0x10, 0xa1, 0x85, + 0xbe, 0x4e, 0x4e, 0x92, 0xc5, 0x83, 0xf6, 0x1e, + 0x1f, 0xd4, 0x25, 0xc2, 0xc2, 0xb9, 0xce, 0x33, + 0x63, 0x66, 0x79, 0x1f, 0x54, 0x35, 0xc1, 0xe8, + 0x89, 0x34, 0x78, 0x94, 0x36, 0x14, 0xef, 0x01, + 0x1f, 0xf1, 0xbd, 0x77, 0x2c, 0x4d, 0xac, 0x5c, + 0x5c, 0x4a, 0xc6, 0xed, 0xd8, 0x0e, 0x72, 0x84, + 0x83, 0xdc, 0x56, 0x84, 0xc8, 0xf3, 0x89, 0x56, + 0xfd, 0x89, 0xc1, 0xc9, 0x9a, 0x29, 0x91, 0x7e, + 0x19, 0xe9, 0x8b, 0x5b, 0x11, 0x15, 0x4e, 0x6c, + 0xf4, 0x89, 0xe7, 0x6d, 0x68, 0x1e, 0xf9, 0x6c, + 0x23, 0x72, 0x05, 0x68, 0x82, 0x60, 0x84, 0x1f, + 0x83, 0x20, 0x09, 0x86, 0x10, 0x81, 0xec, 0xec, + 0xdc, 0x25, 0x53, 0x20, 0xfa, 0xa9, 0x41, 0x64, + 0xd6, 0x20, 0xf3, 0xf4, 0x52, 0xf2, 0x80, 0x62, + 0x83, 0xc9, 0x23, 0x66, 0x44, 0x95, 0x5a, 0x99, + 0x8a, 0xe1, 0x26, 0x63, 0xc1, 0x8b, 0x31, 0xf9, + 0x21, 0x06, 0x77, 0x04, 0x27, 0xf2, 0x0c, 0x63, + 0x83, 0x45, 0xa0, 0xa9, 0x7b, 0xcf, 0xdf, 0xd7, + 0x56, 0x75, 0xbc, 0xdd, 0x95, 0x36, 0xb1, 0x75, + 0x39, 0x05, 0x00, 0x3c, 0x8a, 0x79, 0xd6, 0xe9, + 0xf0, 0x4b, 0xdc, 0x51, 0x6b, 0x01, 0x94, 0x16, + 0x87, 0x12, 0x92, 0x6c, 0x07, 0xc1, 0xf5, 0x58, + 0xb7, 0x2a, 0x81, 0xf5, 0xa0, 0x37, 0x8b, 0xa6, + 0x22, 0xfe, 0x28, 0x0a, 0x7e, 0x68, 0xe2, 0xda, + 0x6c, 0x53, 0xee, 0x0e, 0x8d, 0x2d, 0x8b, 0x0b, + 0xda, 0xf8, 0x99, 0x3e, 0x0e, 0xed, 0x9f, 0xc1, + 0x2b, 0xf6, 0xfe, 0xe9, 0x52, 0x38, 0x7b, 0x83, + 0x9a, 0x50, 0xa6, 0xd7, 0x49, 0x83, 0x43, 0x7e, + 0x82, 0xec, 0xc7, 0x09, 0x3d, 0x3d, 0xb1, 0xee, + 0xe8, 0xc5, 0x6a, 0xc3, 0x3d, 0x4b, 0x4c, 0x6a, + 0xbb, 0x0b, 0x2c, 0x24, 0x2e, 0xdb, 0x7d, 0x57, + 0x87, 0xb4, 0x80, 0xa5, 0xae, 0xff, 0x54, 0xa8, + 0xa5, 0x27, 0x69, 0x95, 0xc8, 0xe7, 0x79, 0xc7, + 0x89, 0x2a, 0x73, 0x49, 0xcb, 0xf5, 0xc5, 0xbc, + 0x4a, 0xe0, 0x73, 0xa9, 0xbc, 0x88, 0x64, 0x96, + 0x98, 0xa5, 0x1e, 0xe3, 0x43, 0xc1, 0x7d, 0x78, + 0xc7, 0x94, 0x72, 0xd4, 0x2c, 0x6e, 0x85, 0x39, + 0x9a, 0xaf, 0xdb, 0xa1, 0xe9, 0xe2, 0xcb, 0x37, + 0x04, 0xc6, 0x8c, 0x81, 0xd3, 0x2a, 0xb7, 0xbe, + 0x6c, 0x07, 0x1f, 0x5e, 0xd9, 0x00, 0xd2, 0xf7, + 0xe1, 0xa7, 0xbc, 0x0c, 0xb6, 0x6d, 0xfb, 0x3f, + 0x3d, 0x24, 0xaa, 0xfb, 0x7e, 0xe1, 0xb5, 0x1b, + 0xff, 0x38, 0xaa, 0x69, 0x59, 0x38, 0x52, 0x9a, + 0x0e, 0x6d, 0xbc, 0xde, 0x4f, 0x13, 0x09, 0x17, + 0xc4, 0xa9, 0x05, 0x84, 0xbc, 0x50, 0xef, 0x40, + 0xb0, 0x4c, 0x24, 0x32, 0xed, 0x94, 0x2c, 0xdd, + 0xda, 0x20, 0x24, 0x67, 0xe2, 0xea, 0x71, 0x3d, + 0x4a, 0x04, 0x0d, 0x98, 0x29, 0x20, 0x4c, 0xeb, + 0x70, 0xce, 0x45, 0x9e, 0x5a, 0xaf, 0xb6, 0xa3, + 0x92, 0xc8, 0x28, 0xf2, 0xe3, 0xe8, 0x8a, 0x5d, + 0x0a, 0x33, 0x79, 0x9b, 0x6a, 0xf3, 0x30, 0x01, + 0x1d, 0x47, 0xbd, 0x01, 0xcc, 0x4d, 0x71, 0xc0, + 0x56, 0xfa, 0xfd, 0x37, 0xed, 0x0f, 0x27, 0xc0, + 0xbb, 0xa0, 0xee, 0xc3, 0x79, 0x8b, 0xe7, 0x41, + 0x8f, 0xfa, 0x3a, 0xcb, 0x45, 0x3b, 0x85, 0x9f, + 0x06, 0x90, 0xb2, 0x51, 0x7a, 0xc3, 0x11, 0x41, + 0x4b, 0xe3, 0x26, 0x94, 0x3e, 0xa2, 0xfd, 0x0a, + 0xda, 0x50, 0xf6, 0x50, 0x78, 0x19, 0x6c, 0x52, + 0xd1, 0x12, 0x76, 0xc2, 0x50, 0x2f, 0x0b, 0xca, + 0x33, 0xe5, 0x79, 0x93, 0x14, 0x03, 0x01, 0x00, + 0x01, 0x01, 0x16, 0x03, 0x01, 0x00, 0x24, 0x2b, + 0x51, 0x42, 0x95, 0x6b, 0xca, 0x9f, 0x42, 0x5d, + 0xd2, 0xd9, 0x67, 0xf9, 0x49, 0x30, 0xfd, 0x2a, + 0x46, 0xd3, 0x04, 0xf4, 0x86, 0xf9, 0x11, 0x34, + 0x82, 0xac, 0xe2, 0xc2, 0x2d, 0xc4, 0xd0, 0xfe, + 0xa9, 0xc9, 0x4b, 0x17, 0x03, 0x01, 0x00, 0x21, + 0x65, 0x1c, 0xe9, 0x5c, 0xb6, 0xe2, 0x7c, 0x8e, + 0x49, 0x12, 0x1b, 0xe6, 0x40, 0xd3, 0x97, 0x21, + 0x76, 0x01, 0xe5, 0x80, 0x5e, 0xf3, 0x11, 0x47, + 0x25, 0x02, 0x78, 0x8e, 0x6b, 0xae, 0xb3, 0xf3, + 0x59, 0x15, 0x03, 0x01, 0x00, 0x16, 0x38, 0xc1, + 0x99, 0x2e, 0xf8, 0x6f, 0x45, 0xa4, 0x10, 0x79, + 0x5b, 0xc1, 0x47, 0x9a, 0xf6, 0x5c, 0x90, 0xeb, + 0xa6, 0xe3, 0x1a, 0x24, + }, + }}, +} + +var tls11ECDHEAESServerScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x01, 0x46, 0x01, 0x00, 0x01, + 0x42, 0x03, 0x03, 0x51, 0x9f, 0xa3, 0xb0, 0xb7, + 0x1d, 0x26, 0x93, 0x36, 0xc0, 0x8d, 0x7e, 0xf8, + 0x4f, 0x6f, 0xc9, 0x3c, 0x31, 0x1e, 0x7f, 0xb1, + 0xf0, 0xc1, 0x0f, 0xf9, 0x0c, 0xa2, 0xd5, 0xca, + 0x48, 0xe5, 0x35, 0x00, 0x00, 0xd0, 0xc0, 0x30, + 0xc0, 0x2c, 0xc0, 0x28, 0xc0, 0x24, 0xc0, 0x14, + 0xc0, 0x0a, 0xc0, 0x22, 0xc0, 0x21, 0x00, 0xa5, + 0x00, 0xa3, 0x00, 0xa1, 0x00, 0x9f, 0x00, 0x6b, + 0x00, 0x6a, 0x00, 0x69, 0x00, 0x68, 0x00, 0x39, + 0x00, 0x38, 0x00, 0x37, 0x00, 0x36, 0x00, 0x88, + 0x00, 0x87, 0x00, 0x86, 0x00, 0x85, 0xc0, 0x32, + 0xc0, 0x2e, 0xc0, 0x2a, 0xc0, 0x26, 0xc0, 0x0f, + 0xc0, 0x05, 0x00, 0x9d, 0x00, 0x3d, 0x00, 0x35, + 0x00, 0x84, 0xc0, 0x12, 0xc0, 0x08, 0xc0, 0x1c, + 0xc0, 0x1b, 0x00, 0x16, 0x00, 0x13, 0x00, 0x10, + 0x00, 0x0d, 0xc0, 0x0d, 0xc0, 0x03, 0x00, 0x0a, + 0xc0, 0x2f, 0xc0, 0x2b, 0xc0, 0x27, 0xc0, 0x23, + 0xc0, 0x13, 0xc0, 0x09, 0xc0, 0x1f, 0xc0, 0x1e, + 0x00, 0xa4, 0x00, 0xa2, 0x00, 0xa0, 0x00, 0x9e, + 0x00, 0x67, 0x00, 0x40, 0x00, 0x3f, 0x00, 0x3e, + 0x00, 0x33, 0x00, 0x32, 0x00, 0x31, 0x00, 0x30, + 0x00, 0x9a, 0x00, 0x99, 0x00, 0x98, 0x00, 0x97, + 0x00, 0x45, 0x00, 0x44, 0x00, 0x43, 0x00, 0x42, + 0xc0, 0x31, 0xc0, 0x2d, 0xc0, 0x29, 0xc0, 0x25, + 0xc0, 0x0e, 0xc0, 0x04, 0x00, 0x9c, 0x00, 0x3c, + 0x00, 0x2f, 0x00, 0x96, 0x00, 0x41, 0x00, 0x07, + 0xc0, 0x11, 0xc0, 0x07, 0xc0, 0x0c, 0xc0, 0x02, + 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, 0x00, 0x12, + 0x00, 0x0f, 0x00, 0x0c, 0x00, 0x09, 0x00, 0x14, + 0x00, 0x11, 0x00, 0x0e, 0x00, 0x0b, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, 0x01, 0x00, + 0x00, 0x49, 0x00, 0x0b, 0x00, 0x04, 0x03, 0x00, + 0x01, 0x02, 0x00, 0x0a, 0x00, 0x34, 0x00, 0x32, + 0x00, 0x0e, 0x00, 0x0d, 0x00, 0x19, 0x00, 0x0b, + 0x00, 0x0c, 0x00, 0x18, 0x00, 0x09, 0x00, 0x0a, + 0x00, 0x16, 0x00, 0x17, 0x00, 0x08, 0x00, 0x06, + 0x00, 0x07, 0x00, 0x14, 0x00, 0x15, 0x00, 0x04, + 0x00, 0x05, 0x00, 0x12, 0x00, 0x13, 0x00, 0x01, + 0x00, 0x02, 0x00, 0x03, 0x00, 0x0f, 0x00, 0x10, + 0x00, 0x11, 0x00, 0x23, 0x00, 0x00, 0x00, 0x0f, + 0x00, 0x01, 0x01, + }, + { + 0x16, 0x03, 0x02, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xc0, 0x13, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x02, + 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, 0x00, 0x02, + 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, 0x02, 0xb0, + 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, + 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x30, + 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, + 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30, + 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, 0x33, + 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, + 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xbb, 0x79, + 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, 0x46, 0x10, + 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, 0x07, 0x43, + 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, 0x43, 0x85, + 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, 0x4c, 0x2c, + 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, 0x82, 0xe5, + 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, 0xa5, 0x2c, + 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, 0x7a, 0x56, + 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, 0x7b, 0x26, + 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, 0xc9, 0x21, + 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, 0x5a, 0xbf, + 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, 0x99, 0x07, + 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, 0x04, 0x39, + 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, 0x7c, 0xe3, + 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, 0xcf, 0xaf, + 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, 0xdb, 0xdb, + 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, 0xe2, 0x85, + 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, 0x23, + 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, 0x39, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, 0xad, 0xe2, + 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, + 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, + 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, + 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, 0xb1, 0x59, + 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, 0x14, 0xd7, + 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, 0x5a, 0x95, + 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, 0x12, 0x66, + 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, 0x60, 0xd3, + 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, 0x25, 0x13, + 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, 0x1d, 0xba, + 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, 0xd7, 0x31, + 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, 0xea, 0x50, + 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, 0x5a, 0x5f, + 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, 0x90, 0x96, + 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, 0x98, 0x1f, + 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, 0xa3, 0x1b, + 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, 0xe9, 0x70, + 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, 0x26, 0x6e, + 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, 0xbd, 0xd9, + 0x16, 0x03, 0x02, 0x01, 0x0f, 0x0c, 0x00, 0x01, + 0x0b, 0x03, 0x00, 0x19, 0x85, 0x04, 0x01, 0x39, + 0xdc, 0xee, 0x44, 0x17, 0x5e, 0xdb, 0xd7, 0x27, + 0xaf, 0xb6, 0x56, 0xd9, 0xb4, 0x43, 0x5a, 0x99, + 0xcf, 0xaa, 0x31, 0x37, 0x0c, 0x6f, 0x3a, 0xa0, + 0xf8, 0x53, 0xc4, 0x74, 0xd1, 0x91, 0x0a, 0x46, + 0xf5, 0x38, 0x3b, 0x5c, 0x09, 0xd8, 0x97, 0xdc, + 0x4b, 0xaa, 0x70, 0x26, 0x48, 0xf2, 0xd6, 0x0b, + 0x31, 0xc9, 0xf8, 0xd4, 0x98, 0x43, 0xe1, 0x6c, + 0xd5, 0xc7, 0xb2, 0x8e, 0x0b, 0x01, 0xe6, 0xb6, + 0x00, 0x28, 0x80, 0x7b, 0xfc, 0x96, 0x8f, 0x0d, + 0xa2, 0x4f, 0xb0, 0x79, 0xaf, 0xdc, 0x61, 0x28, + 0x63, 0x33, 0x78, 0xf6, 0x31, 0x39, 0xfd, 0x8a, + 0xf4, 0x15, 0x18, 0x11, 0xfe, 0xdb, 0xd5, 0x07, + 0xda, 0x2c, 0xed, 0x49, 0xa0, 0x23, 0xbf, 0xd0, + 0x3a, 0x38, 0x1d, 0x54, 0xae, 0x1c, 0x7b, 0xea, + 0x29, 0xee, 0xd0, 0x38, 0xc1, 0x76, 0xa7, 0x7f, + 0x2a, 0xf4, 0xce, 0x1e, 0xac, 0xcc, 0x94, 0x79, + 0x90, 0x33, 0x00, 0x80, 0x16, 0x83, 0x9b, 0xf9, + 0x72, 0xdb, 0x9f, 0x55, 0x02, 0xe1, 0x04, 0xf7, + 0xb5, 0x3f, 0x4c, 0x71, 0x13, 0x5a, 0x91, 0xe9, + 0x1d, 0xeb, 0x9d, 0x9c, 0xfb, 0x88, 0xef, 0xca, + 0xec, 0x7d, 0x9b, 0xdd, 0xd9, 0xee, 0x2b, 0x8e, + 0xef, 0xf8, 0xb6, 0xc7, 0x7d, 0xfe, 0xda, 0x7f, + 0x90, 0x2e, 0x53, 0xf1, 0x64, 0x95, 0xfc, 0x66, + 0xfc, 0x87, 0x27, 0xb6, 0x9f, 0xc8, 0x3a, 0x95, + 0x68, 0x17, 0xe1, 0x7d, 0xf1, 0x88, 0xe8, 0x17, + 0x5f, 0x99, 0x90, 0x3f, 0x47, 0x47, 0x81, 0x06, + 0xe2, 0x8e, 0x22, 0x56, 0x8f, 0xc2, 0x14, 0xe5, + 0x62, 0xa7, 0x0d, 0x41, 0x3c, 0xc7, 0x4a, 0x0a, + 0x74, 0x4b, 0xda, 0x00, 0x8e, 0x4f, 0x90, 0xe6, + 0xd7, 0x68, 0xe5, 0x8b, 0xf2, 0x3f, 0x53, 0x1d, + 0x7a, 0xe6, 0xb3, 0xe9, 0x8a, 0xc9, 0x4d, 0x19, + 0xa6, 0xcf, 0xf9, 0xed, 0x5e, 0x26, 0xdc, 0x90, + 0x1c, 0x41, 0xad, 0x7c, 0x16, 0x03, 0x02, 0x00, + 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x02, 0x00, 0x8a, 0x10, 0x00, 0x00, + 0x86, 0x85, 0x04, 0x01, 0x11, 0xf2, 0xa4, 0x2d, + 0x1a, 0x75, 0x6c, 0xbc, 0x2d, 0x91, 0x95, 0x07, + 0xbe, 0xd6, 0x41, 0x7a, 0xbb, 0xc2, 0x7b, 0xa6, + 0x9b, 0xe3, 0xdc, 0x41, 0x7f, 0x1e, 0x2e, 0xcc, + 0x6d, 0xa3, 0x85, 0x53, 0x98, 0x9f, 0x2d, 0xe6, + 0x3c, 0xb9, 0x82, 0xa6, 0x80, 0x53, 0x9b, 0x71, + 0xfd, 0x27, 0xe5, 0xe5, 0xdf, 0x13, 0xba, 0x56, + 0x62, 0x30, 0x4a, 0x57, 0x27, 0xa7, 0xcc, 0x26, + 0x54, 0xe8, 0x65, 0x6e, 0x4d, 0x00, 0xbf, 0x8a, + 0xcc, 0x89, 0x6a, 0x6c, 0x88, 0xda, 0x79, 0x4f, + 0xc5, 0xad, 0x6d, 0x1d, 0x7c, 0x53, 0x7b, 0x1a, + 0x96, 0xf2, 0xf8, 0x30, 0x01, 0x0b, 0xc2, 0xf0, + 0x78, 0x41, 0xf4, 0x0d, 0xe0, 0xbe, 0xb9, 0x36, + 0xe0, 0xb7, 0xee, 0x16, 0xeb, 0x25, 0x67, 0x04, + 0xc0, 0x2e, 0xd8, 0x34, 0x4a, 0x65, 0xa5, 0xf1, + 0x95, 0x75, 0xc7, 0x39, 0xa9, 0x68, 0xa9, 0x53, + 0x93, 0x5b, 0xca, 0x7b, 0x7f, 0xc0, 0x63, 0x14, + 0x03, 0x02, 0x00, 0x01, 0x01, 0x16, 0x03, 0x02, + 0x00, 0x40, 0x01, 0xb1, 0xae, 0x1b, 0x8a, 0x65, + 0xf8, 0x37, 0x50, 0x39, 0x76, 0xef, 0xaa, 0xda, + 0x84, 0xc9, 0x5f, 0x80, 0xdc, 0xfa, 0xe0, 0x46, + 0x5a, 0xc7, 0x77, 0x9d, 0x76, 0x03, 0xa6, 0xd5, + 0x0e, 0xbf, 0x25, 0x30, 0x5c, 0x99, 0x7d, 0xcd, + 0x2b, 0xaa, 0x2e, 0x8c, 0xdd, 0xda, 0xaa, 0xd7, + 0xf1, 0xf6, 0x33, 0x47, 0x51, 0x1e, 0x83, 0xa1, + 0x83, 0x04, 0xd2, 0xb2, 0xc8, 0xbc, 0x11, 0xc5, + 0x1a, 0x87, + }, + { + 0x16, 0x03, 0x02, 0x00, 0x72, 0x04, 0x00, 0x00, + 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xeb, 0x8b, 0xc7, 0xef, 0xba, 0xe8, 0x0f, 0x69, + 0xfe, 0xfb, 0xc3, 0x3d, 0x90, 0x5d, 0xd7, 0xb2, + 0x51, 0x64, 0xac, 0xc3, 0xae, 0x33, 0x03, 0x42, + 0x45, 0x2d, 0xa7, 0x57, 0xbd, 0xa3, 0x85, 0x64, + 0xa6, 0xfe, 0x5c, 0x33, 0x04, 0x93, 0xf2, 0x7c, + 0x06, 0x6d, 0xd7, 0xd7, 0xcf, 0x4a, 0xaf, 0xb2, + 0xdd, 0x06, 0xdc, 0x28, 0x14, 0x59, 0x23, 0x02, + 0xef, 0x97, 0x6a, 0xe8, 0xec, 0xca, 0x10, 0x44, + 0xcd, 0xb8, 0x50, 0x16, 0x46, 0x5a, 0x05, 0xda, + 0x04, 0xb3, 0x0e, 0xe9, 0xf0, 0x74, 0xc5, 0x23, + 0xc2, 0x0e, 0xa1, 0x54, 0x66, 0x7b, 0xe8, 0x14, + 0x03, 0x02, 0x00, 0x01, 0x01, 0x16, 0x03, 0x02, + 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x6b, 0x43, 0x1c, 0x58, 0xbc, 0x85, + 0xf7, 0xc1, 0x76, 0xbc, 0x72, 0x33, 0x41, 0x6b, + 0xb8, 0xf8, 0xfd, 0x53, 0x21, 0xc2, 0x41, 0x1b, + 0x72, 0x4f, 0xce, 0x97, 0xca, 0x14, 0x23, 0x4d, + 0xbc, 0x44, 0xd6, 0xd7, 0xfc, 0xbc, 0xfd, 0xfd, + 0x5d, 0x33, 0x42, 0x1b, 0x52, 0x40, 0x0a, 0x2b, + 0x6c, 0x98, 0x17, 0x03, 0x02, 0x00, 0x40, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d, + 0x31, 0xef, 0x03, 0x7d, 0xa5, 0x74, 0x92, 0x24, + 0x34, 0xae, 0x4e, 0xc9, 0xfc, 0x59, 0xcb, 0x64, + 0xf4, 0x45, 0xb1, 0xac, 0x02, 0xf2, 0x87, 0xe7, + 0x2f, 0xfd, 0x01, 0xca, 0x78, 0x02, 0x2e, 0x3a, + 0x38, 0xcd, 0xb1, 0xe0, 0xf2, 0x2e, 0xf6, 0x27, + 0xa0, 0xac, 0x1f, 0x91, 0x43, 0xc2, 0x3d, 0x15, + 0x03, 0x02, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x9f, 0x30, 0x24, 0x56, + 0x2c, 0xde, 0xa0, 0xe6, 0x44, 0x35, 0x30, 0x51, + 0xec, 0xd4, 0x69, 0x2d, 0x46, 0x64, 0x04, 0x21, + 0xfe, 0x7c, 0x4d, 0xc5, 0xd0, 0x8c, 0xf9, 0xd2, + 0x3f, 0x88, 0x69, 0xd5, + }, +} + +// $ go test -run TestRunServer -serve -clientauth 1 \ +// -ciphersuites=0xc011 -minversion=0x0303 -maxversion=0x0303 +var tls12ServerScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x01, 0x1e, 0x01, 0x00, 0x01, + 0x1a, 0x03, 0x03, 0x51, 0xe5, 0x76, 0x84, 0x0e, + 0xb9, 0x17, 0xca, 0x08, 0x47, 0xd9, 0xbd, 0xd0, + 0x94, 0xd1, 0x97, 0xca, 0x5b, 0xe7, 0x20, 0xac, + 0x8e, 0xbb, 0xc7, 0x29, 0xe9, 0x26, 0xcf, 0x7d, + 0xb3, 0xdc, 0x99, 0x00, 0x00, 0x82, 0xc0, 0x30, + 0xc0, 0x2c, 0xc0, 0x28, 0xc0, 0x24, 0xc0, 0x14, + 0xc0, 0x0a, 0x00, 0xa3, 0x00, 0x9f, 0x00, 0x6b, + 0x00, 0x6a, 0x00, 0x39, 0x00, 0x38, 0xc0, 0x32, + 0xc0, 0x2e, 0xc0, 0x2a, 0xc0, 0x26, 0xc0, 0x0f, + 0xc0, 0x05, 0x00, 0x9d, 0x00, 0x3d, 0x00, 0x35, + 0xc0, 0x12, 0xc0, 0x08, 0x00, 0x16, 0x00, 0x13, + 0xc0, 0x0d, 0xc0, 0x03, 0x00, 0x0a, 0xc0, 0x2f, + 0xc0, 0x2b, 0xc0, 0x27, 0xc0, 0x23, 0xc0, 0x13, + 0xc0, 0x09, 0x00, 0xa2, 0x00, 0x9e, 0x00, 0x67, + 0x00, 0x40, 0x00, 0x33, 0x00, 0x32, 0xc0, 0x31, + 0xc0, 0x2d, 0xc0, 0x29, 0xc0, 0x25, 0xc0, 0x0e, + 0xc0, 0x04, 0x00, 0x9c, 0x00, 0x3c, 0x00, 0x2f, + 0x00, 0x07, 0xc0, 0x11, 0xc0, 0x07, 0xc0, 0x0c, + 0xc0, 0x02, 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, + 0x00, 0x12, 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, + 0x00, 0x08, 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, + 0x01, 0x00, 0x00, 0x6f, 0x00, 0x0b, 0x00, 0x04, + 0x03, 0x00, 0x01, 0x02, 0x00, 0x0a, 0x00, 0x34, + 0x00, 0x32, 0x00, 0x0e, 0x00, 0x0d, 0x00, 0x19, + 0x00, 0x0b, 0x00, 0x0c, 0x00, 0x18, 0x00, 0x09, + 0x00, 0x0a, 0x00, 0x16, 0x00, 0x17, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x07, 0x00, 0x14, 0x00, 0x15, + 0x00, 0x04, 0x00, 0x05, 0x00, 0x12, 0x00, 0x13, + 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00, 0x0f, + 0x00, 0x10, 0x00, 0x11, 0x00, 0x23, 0x00, 0x00, + 0x00, 0x0d, 0x00, 0x22, 0x00, 0x20, 0x06, 0x01, + 0x06, 0x02, 0x06, 0x03, 0x05, 0x01, 0x05, 0x02, + 0x05, 0x03, 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, + 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x02, 0x03, 0x01, 0x01, 0x00, 0x0f, + 0x00, 0x01, 0x01, + }, + { + 0x16, 0x03, 0x03, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xc0, 0x11, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x03, + 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, 0x00, 0x02, + 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, 0x02, 0xb0, + 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, + 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x30, + 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, + 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30, + 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, 0x33, + 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, + 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xbb, 0x79, + 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, 0x46, 0x10, + 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, 0x07, 0x43, + 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, 0x43, 0x85, + 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, 0x4c, 0x2c, + 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, 0x82, 0xe5, + 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, 0xa5, 0x2c, + 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, 0x7a, 0x56, + 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, 0x7b, 0x26, + 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, 0xc9, 0x21, + 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, 0x5a, 0xbf, + 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, 0x99, 0x07, + 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, 0x04, 0x39, + 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, 0x7c, 0xe3, + 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, 0xcf, 0xaf, + 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, 0xdb, 0xdb, + 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, 0xe2, 0x85, + 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, 0x23, + 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, 0x39, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, 0xad, 0xe2, + 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, + 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, + 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, + 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, 0xb1, 0x59, + 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, 0x14, 0xd7, + 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, 0x5a, 0x95, + 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, 0x12, 0x66, + 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, 0x60, 0xd3, + 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, 0x25, 0x13, + 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, 0x1d, 0xba, + 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, 0xd7, 0x31, + 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, 0xea, 0x50, + 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, 0x5a, 0x5f, + 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, 0x90, 0x96, + 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, 0x98, 0x1f, + 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, 0xa3, 0x1b, + 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, 0xe9, 0x70, + 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, 0x26, 0x6e, + 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, 0xbd, 0xd9, + 0x16, 0x03, 0x03, 0x01, 0x11, 0x0c, 0x00, 0x01, + 0x0d, 0x03, 0x00, 0x19, 0x85, 0x04, 0x01, 0x39, + 0xdc, 0xee, 0x44, 0x17, 0x5e, 0xdb, 0xd7, 0x27, + 0xaf, 0xb6, 0x56, 0xd9, 0xb4, 0x43, 0x5a, 0x99, + 0xcf, 0xaa, 0x31, 0x37, 0x0c, 0x6f, 0x3a, 0xa0, + 0xf8, 0x53, 0xc4, 0x74, 0xd1, 0x91, 0x0a, 0x46, + 0xf5, 0x38, 0x3b, 0x5c, 0x09, 0xd8, 0x97, 0xdc, + 0x4b, 0xaa, 0x70, 0x26, 0x48, 0xf2, 0xd6, 0x0b, + 0x31, 0xc9, 0xf8, 0xd4, 0x98, 0x43, 0xe1, 0x6c, + 0xd5, 0xc7, 0xb2, 0x8e, 0x0b, 0x01, 0xe6, 0xb6, + 0x00, 0x28, 0x80, 0x7b, 0xfc, 0x96, 0x8f, 0x0d, + 0xa2, 0x4f, 0xb0, 0x79, 0xaf, 0xdc, 0x61, 0x28, + 0x63, 0x33, 0x78, 0xf6, 0x31, 0x39, 0xfd, 0x8a, + 0xf4, 0x15, 0x18, 0x11, 0xfe, 0xdb, 0xd5, 0x07, + 0xda, 0x2c, 0xed, 0x49, 0xa0, 0x23, 0xbf, 0xd0, + 0x3a, 0x38, 0x1d, 0x54, 0xae, 0x1c, 0x7b, 0xea, + 0x29, 0xee, 0xd0, 0x38, 0xc1, 0x76, 0xa7, 0x7f, + 0x2a, 0xf4, 0xce, 0x1e, 0xac, 0xcc, 0x94, 0x79, + 0x90, 0x33, 0x04, 0x01, 0x00, 0x80, 0x4a, 0xf9, + 0xf5, 0x0a, 0x61, 0x37, 0x7e, 0x4e, 0x92, 0xb5, + 0x1c, 0x91, 0x21, 0xb2, 0xb5, 0x17, 0x00, 0xbf, + 0x01, 0x5f, 0x30, 0xec, 0x62, 0x08, 0xd6, 0x9d, + 0x1a, 0x08, 0x05, 0x72, 0x8b, 0xf4, 0x49, 0x85, + 0xa7, 0xbf, 0x3f, 0x75, 0x58, 0x3e, 0x26, 0x82, + 0xc3, 0x28, 0x07, 0xf9, 0x41, 0x7d, 0x03, 0x14, + 0x3b, 0xc3, 0x05, 0x64, 0xff, 0x52, 0xf4, 0x75, + 0x6a, 0x87, 0xcd, 0xdf, 0x93, 0x31, 0x0a, 0x71, + 0x60, 0x17, 0xc6, 0x33, 0xf0, 0x79, 0xb6, 0x7b, + 0xd0, 0x9c, 0xa0, 0x5f, 0x74, 0x14, 0x2c, 0x5a, + 0xb4, 0x3f, 0x39, 0xf5, 0xe4, 0x9f, 0xbe, 0x6d, + 0x21, 0xd2, 0xa9, 0x42, 0xf7, 0xdc, 0xa6, 0x65, + 0xb7, 0x6a, 0x7e, 0x2e, 0x14, 0xd3, 0xf6, 0xf3, + 0x4b, 0x4c, 0x5b, 0x1a, 0x70, 0x7a, 0xbc, 0xb0, + 0x12, 0xf3, 0x6e, 0x0c, 0xcf, 0x43, 0x22, 0xae, + 0x5b, 0xba, 0x00, 0xf8, 0xfd, 0xaf, 0x16, 0x03, + 0x03, 0x00, 0x0f, 0x0d, 0x00, 0x00, 0x0b, 0x02, + 0x01, 0x40, 0x00, 0x04, 0x04, 0x01, 0x04, 0x03, + 0x00, 0x00, 0x16, 0x03, 0x03, 0x00, 0x04, 0x0e, + 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x03, 0x01, 0xfb, 0x0b, 0x00, 0x01, + 0xf7, 0x00, 0x01, 0xf4, 0x00, 0x01, 0xf1, 0x30, + 0x82, 0x01, 0xed, 0x30, 0x82, 0x01, 0x58, 0xa0, + 0x03, 0x02, 0x01, 0x02, 0x02, 0x01, 0x00, 0x30, + 0x0b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05, 0x30, 0x26, 0x31, 0x10, + 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x07, 0x41, 0x63, 0x6d, 0x65, 0x20, 0x43, 0x6f, + 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x13, 0x09, 0x31, 0x32, 0x37, 0x2e, 0x30, + 0x2e, 0x30, 0x2e, 0x31, 0x30, 0x1e, 0x17, 0x0d, + 0x31, 0x31, 0x31, 0x32, 0x30, 0x38, 0x30, 0x37, + 0x35, 0x35, 0x31, 0x32, 0x5a, 0x17, 0x0d, 0x31, + 0x32, 0x31, 0x32, 0x30, 0x37, 0x30, 0x38, 0x30, + 0x30, 0x31, 0x32, 0x5a, 0x30, 0x26, 0x31, 0x10, + 0x30, 0x0e, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x07, 0x41, 0x63, 0x6d, 0x65, 0x20, 0x43, 0x6f, + 0x31, 0x12, 0x30, 0x10, 0x06, 0x03, 0x55, 0x04, + 0x03, 0x13, 0x09, 0x31, 0x32, 0x37, 0x2e, 0x30, + 0x2e, 0x30, 0x2e, 0x31, 0x30, 0x81, 0x9c, 0x30, + 0x0b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x01, 0x03, 0x81, 0x8c, 0x00, + 0x30, 0x81, 0x88, 0x02, 0x81, 0x80, 0x4e, 0xd0, + 0x7b, 0x31, 0xe3, 0x82, 0x64, 0xd9, 0x59, 0xc0, + 0xc2, 0x87, 0xa4, 0x5e, 0x1e, 0x8b, 0x73, 0x33, + 0xc7, 0x63, 0x53, 0xdf, 0x66, 0x92, 0x06, 0x84, + 0xf6, 0x64, 0xd5, 0x8f, 0xe4, 0x36, 0xa7, 0x1d, + 0x2b, 0xe8, 0xb3, 0x20, 0x36, 0x45, 0x23, 0xb5, + 0xe3, 0x95, 0xae, 0xed, 0xe0, 0xf5, 0x20, 0x9c, + 0x8d, 0x95, 0xdf, 0x7f, 0x5a, 0x12, 0xef, 0x87, + 0xe4, 0x5b, 0x68, 0xe4, 0xe9, 0x0e, 0x74, 0xec, + 0x04, 0x8a, 0x7f, 0xde, 0x93, 0x27, 0xc4, 0x01, + 0x19, 0x7a, 0xbd, 0xf2, 0xdc, 0x3d, 0x14, 0xab, + 0xd0, 0x54, 0xca, 0x21, 0x0c, 0xd0, 0x4d, 0x6e, + 0x87, 0x2e, 0x5c, 0xc5, 0xd2, 0xbb, 0x4d, 0x4b, + 0x4f, 0xce, 0xb6, 0x2c, 0xf7, 0x7e, 0x88, 0xec, + 0x7c, 0xd7, 0x02, 0x91, 0x74, 0xa6, 0x1e, 0x0c, + 0x1a, 0xda, 0xe3, 0x4a, 0x5a, 0x2e, 0xde, 0x13, + 0x9c, 0x4c, 0x40, 0x88, 0x59, 0x93, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x32, 0x30, 0x30, 0x30, + 0x0e, 0x06, 0x03, 0x55, 0x1d, 0x0f, 0x01, 0x01, + 0xff, 0x04, 0x04, 0x03, 0x02, 0x00, 0xa0, 0x30, + 0x0d, 0x06, 0x03, 0x55, 0x1d, 0x0e, 0x04, 0x06, + 0x04, 0x04, 0x01, 0x02, 0x03, 0x04, 0x30, 0x0f, + 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, 0x08, 0x30, + 0x06, 0x80, 0x04, 0x01, 0x02, 0x03, 0x04, 0x30, + 0x0b, 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, + 0x0d, 0x01, 0x01, 0x05, 0x03, 0x81, 0x81, 0x00, + 0x36, 0x1f, 0xb3, 0x7a, 0x0c, 0x75, 0xc9, 0x6e, + 0x37, 0x46, 0x61, 0x2b, 0xd5, 0xbd, 0xc0, 0xa7, + 0x4b, 0xcc, 0x46, 0x9a, 0x81, 0x58, 0x7c, 0x85, + 0x79, 0x29, 0xc8, 0xc8, 0xc6, 0x67, 0xdd, 0x32, + 0x56, 0x45, 0x2b, 0x75, 0xb6, 0xe9, 0x24, 0xa9, + 0x50, 0x9a, 0xbe, 0x1f, 0x5a, 0xfa, 0x1a, 0x15, + 0xd9, 0xcc, 0x55, 0x95, 0x72, 0x16, 0x83, 0xb9, + 0xc2, 0xb6, 0x8f, 0xfd, 0x88, 0x8c, 0x38, 0x84, + 0x1d, 0xab, 0x5d, 0x92, 0x31, 0x13, 0x4f, 0xfd, + 0x83, 0x3b, 0xc6, 0x9d, 0xf1, 0x11, 0x62, 0xb6, + 0x8b, 0xec, 0xab, 0x67, 0xbe, 0xc8, 0x64, 0xb0, + 0x11, 0x50, 0x46, 0x58, 0x17, 0x6b, 0x99, 0x1c, + 0xd3, 0x1d, 0xfc, 0x06, 0xf1, 0x0e, 0xe5, 0x96, + 0xa8, 0x0c, 0xf9, 0x78, 0x20, 0xb7, 0x44, 0x18, + 0x51, 0x8d, 0x10, 0x7e, 0x4f, 0x94, 0x67, 0xdf, + 0xa3, 0x4e, 0x70, 0x73, 0x8e, 0x90, 0x91, 0x85, + 0x16, 0x03, 0x03, 0x00, 0x8a, 0x10, 0x00, 0x00, + 0x86, 0x85, 0x04, 0x01, 0x5d, 0x3a, 0x92, 0x59, + 0x7f, 0x9a, 0x22, 0x36, 0x0e, 0x1b, 0x1d, 0x2a, + 0x05, 0xb7, 0xa4, 0xb6, 0x5d, 0xfc, 0x51, 0x6e, + 0x15, 0xe5, 0x89, 0x7c, 0xe2, 0xfa, 0x87, 0x38, + 0x05, 0x79, 0x15, 0x92, 0xb4, 0x8f, 0x88, 0x8f, + 0x9d, 0x5d, 0xa0, 0xaf, 0xf8, 0xce, 0xf9, 0x6f, + 0x83, 0xf4, 0x08, 0x69, 0xe4, 0x91, 0xc5, 0xed, + 0xb9, 0xc5, 0xa8, 0x1f, 0x4b, 0xec, 0xef, 0x91, + 0xc1, 0xa3, 0x34, 0x24, 0x18, 0x00, 0x2d, 0xcd, + 0xe6, 0x44, 0xef, 0x5a, 0x3e, 0x52, 0x63, 0x5b, + 0x36, 0x1f, 0x7e, 0xce, 0x9e, 0xaa, 0xda, 0x8d, + 0xb5, 0xc9, 0xea, 0xd8, 0x1b, 0xd1, 0x1c, 0x7c, + 0x07, 0xfc, 0x3c, 0x2d, 0x70, 0x1f, 0xf9, 0x4d, + 0xcb, 0xaa, 0xad, 0x07, 0xd5, 0x6d, 0xbd, 0xa6, + 0x61, 0xf3, 0x2f, 0xa3, 0x9c, 0x45, 0x02, 0x4a, + 0xac, 0x6c, 0xb6, 0x37, 0x95, 0xb1, 0x4a, 0xb5, + 0x0a, 0x4e, 0x60, 0x67, 0xd7, 0xe0, 0x04, 0x16, + 0x03, 0x03, 0x00, 0x88, 0x0f, 0x00, 0x00, 0x84, + 0x04, 0x01, 0x00, 0x80, 0x08, 0x83, 0x53, 0xf0, + 0xf8, 0x14, 0xf5, 0xc2, 0xd1, 0x8b, 0xf0, 0xa5, + 0xc1, 0xd8, 0x1a, 0x36, 0x4b, 0x75, 0x77, 0x02, + 0x19, 0xd8, 0x11, 0x3f, 0x5a, 0x36, 0xfc, 0xe9, + 0x2b, 0x4b, 0xf9, 0xfe, 0xda, 0x8a, 0x0f, 0x6e, + 0x3d, 0xd3, 0x52, 0x87, 0xf7, 0x9c, 0x78, 0x39, + 0xa8, 0xf1, 0xd7, 0xf7, 0x4e, 0x35, 0x33, 0xf9, + 0xc5, 0x76, 0xa8, 0x12, 0xc4, 0x91, 0x33, 0x1d, + 0x93, 0x8c, 0xbf, 0xb1, 0x83, 0x00, 0x90, 0xc5, + 0x52, 0x3e, 0xe0, 0x0a, 0xe8, 0x92, 0x75, 0xdf, + 0x54, 0x5f, 0x9f, 0x95, 0x76, 0x62, 0xb5, 0x85, + 0x69, 0xa4, 0x86, 0x85, 0x6c, 0xf3, 0x6b, 0x2a, + 0x72, 0x7b, 0x4d, 0x42, 0x33, 0x67, 0x4a, 0xce, + 0xb5, 0xdb, 0x9b, 0xae, 0xc0, 0xb0, 0x10, 0xeb, + 0x3b, 0xf4, 0xc2, 0x9a, 0x64, 0x47, 0x4c, 0x1e, + 0xa5, 0x91, 0x7f, 0x6d, 0xd1, 0x03, 0xf5, 0x4a, + 0x90, 0x69, 0x18, 0xb1, 0x14, 0x03, 0x03, 0x00, + 0x01, 0x01, 0x16, 0x03, 0x03, 0x00, 0x24, 0x59, + 0xfc, 0x7e, 0xae, 0xb3, 0xbf, 0xab, 0x4d, 0xdb, + 0x4e, 0xab, 0xa9, 0x6d, 0x6b, 0x4c, 0x60, 0xb6, + 0x16, 0xe0, 0xab, 0x7f, 0x52, 0x2d, 0xa1, 0xfc, + 0xe1, 0x80, 0xd2, 0x8a, 0xa1, 0xe5, 0x8f, 0xa1, + 0x70, 0x93, 0x23, + }, + { + 0x16, 0x03, 0x03, 0x02, 0x67, 0x04, 0x00, 0x02, + 0x63, 0x00, 0x00, 0x00, 0x00, 0x02, 0x5d, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xea, 0x8b, 0xc5, 0xef, 0xba, 0x64, 0xb7, 0x23, + 0x08, 0x86, 0x4f, 0x37, 0xe0, 0x8f, 0xbd, 0x75, + 0x71, 0x2b, 0xcb, 0x20, 0x75, 0x11, 0x3b, 0xa2, + 0x9e, 0x39, 0x3c, 0x03, 0xef, 0x6e, 0x41, 0xd7, + 0xcf, 0x1a, 0x2c, 0xf2, 0xfe, 0xc2, 0xd3, 0x65, + 0x59, 0x00, 0x9d, 0x03, 0xb4, 0xf2, 0x20, 0xe4, + 0x33, 0x80, 0xcd, 0xf6, 0xe4, 0x59, 0x22, 0xf7, + 0xfd, 0x88, 0x0e, 0xa4, 0x09, 0xc0, 0x0d, 0x10, + 0x80, 0x10, 0x79, 0xee, 0x70, 0x96, 0xdb, 0x22, + 0x8b, 0xb7, 0xac, 0xe0, 0x98, 0xad, 0xe9, 0xe3, + 0xcb, 0xea, 0x9f, 0xe6, 0x83, 0x28, 0x7c, 0x7e, + 0x4e, 0x9a, 0x8d, 0xd9, 0xf3, 0x86, 0xf4, 0x89, + 0x8b, 0x79, 0x8f, 0xbb, 0xe9, 0x74, 0x02, 0x02, + 0x14, 0x04, 0xea, 0xba, 0x16, 0x10, 0xa1, 0x85, + 0xbe, 0x4e, 0x4e, 0x92, 0xc5, 0x83, 0xf6, 0x1e, + 0x1f, 0xd4, 0x25, 0xc2, 0xc2, 0xb9, 0xce, 0x33, + 0x63, 0x66, 0x79, 0x1f, 0x54, 0x35, 0xc1, 0xe8, + 0x89, 0x34, 0x78, 0x94, 0x36, 0x14, 0xef, 0x01, + 0x1f, 0xf1, 0xbd, 0x77, 0x2c, 0x4d, 0xac, 0x5c, + 0x5c, 0x4a, 0xc6, 0xed, 0xd8, 0x0e, 0x72, 0x84, + 0x83, 0xdc, 0x56, 0x84, 0xc8, 0xf3, 0x89, 0x56, + 0xfd, 0x89, 0xc1, 0xc9, 0x9a, 0x29, 0x91, 0x7e, + 0x19, 0xe9, 0x8b, 0x5b, 0x11, 0x15, 0x4e, 0x6c, + 0xf4, 0x89, 0xe7, 0x6d, 0x68, 0x1e, 0xf9, 0x6c, + 0x23, 0x72, 0x05, 0x68, 0x82, 0x60, 0x84, 0x1f, + 0x83, 0x20, 0x09, 0x86, 0x10, 0x81, 0xec, 0xec, + 0xdc, 0x25, 0x53, 0x20, 0xfa, 0xa9, 0x41, 0x64, + 0xd6, 0x20, 0xf3, 0xf4, 0x52, 0xf2, 0x80, 0x62, + 0x83, 0xc9, 0x23, 0x66, 0x44, 0x95, 0x5a, 0x99, + 0x8a, 0xe1, 0x26, 0x63, 0xc1, 0x8b, 0x31, 0xf9, + 0x21, 0x06, 0x77, 0x04, 0x27, 0xf2, 0x0c, 0x63, + 0x83, 0x45, 0xa0, 0xa9, 0x7b, 0xcf, 0xdf, 0xd7, + 0x56, 0x75, 0xbc, 0xdd, 0x95, 0x36, 0xb1, 0x75, + 0x39, 0x05, 0x00, 0x3c, 0x8a, 0x79, 0xd6, 0xe9, + 0xf0, 0x4b, 0xdc, 0x51, 0x6b, 0x01, 0x94, 0x16, + 0x87, 0x12, 0x92, 0x6c, 0x07, 0xc1, 0xf5, 0x58, + 0xb7, 0x2a, 0x81, 0xf5, 0xa0, 0x37, 0x8b, 0xa6, + 0x22, 0xfe, 0x28, 0x0a, 0x7e, 0x68, 0xe2, 0xda, + 0x6c, 0x53, 0xee, 0x0e, 0x8d, 0x2d, 0x8b, 0x0b, + 0xda, 0xf8, 0x99, 0x3e, 0x0e, 0xed, 0x9f, 0xc1, + 0x2b, 0xf6, 0xfe, 0xe9, 0x52, 0x38, 0x7b, 0x83, + 0x9a, 0x50, 0xa6, 0xd7, 0x49, 0x83, 0x43, 0x7e, + 0x82, 0xec, 0xc7, 0x09, 0x3d, 0x3d, 0xb1, 0xee, + 0xe8, 0xc5, 0x6a, 0xc3, 0x3d, 0x4b, 0x4c, 0x6a, + 0xbb, 0x0b, 0x2c, 0x24, 0x2e, 0xdb, 0x7d, 0x57, + 0x87, 0xb4, 0x80, 0xa5, 0xae, 0xff, 0x54, 0xa8, + 0xa5, 0x27, 0x69, 0x95, 0xc8, 0xe7, 0x79, 0xc7, + 0x89, 0x2a, 0x73, 0x49, 0xcb, 0xf5, 0xc5, 0xbc, + 0x4a, 0xe0, 0x73, 0xa9, 0xbc, 0x88, 0x64, 0x96, + 0x98, 0xa5, 0x1e, 0xe3, 0x43, 0xc1, 0x7d, 0x78, + 0xc7, 0x94, 0x72, 0xd4, 0x2c, 0x6e, 0x85, 0x39, + 0x9a, 0xaf, 0xdb, 0xa1, 0xe9, 0xe2, 0xcb, 0x37, + 0x04, 0xc6, 0x8c, 0x81, 0xd3, 0x2a, 0xb7, 0xbe, + 0x6c, 0x07, 0x1f, 0x5e, 0xd9, 0x00, 0xd2, 0xf7, + 0xe1, 0xa7, 0xbc, 0x0c, 0xb6, 0x6d, 0xfb, 0x3f, + 0x3d, 0x24, 0xaa, 0xfb, 0x7e, 0xe1, 0xb5, 0x1b, + 0xff, 0x38, 0xaa, 0x69, 0x59, 0x38, 0x52, 0x9a, + 0x0e, 0x6d, 0xbc, 0xde, 0x4f, 0x13, 0x09, 0x17, + 0xc4, 0xa9, 0x05, 0x84, 0xbc, 0x50, 0xef, 0x40, + 0xb0, 0x4c, 0x24, 0x32, 0xed, 0x94, 0x2c, 0xdd, + 0xda, 0x20, 0x24, 0x67, 0xe2, 0xea, 0x71, 0x3d, + 0x4a, 0x04, 0x0d, 0x98, 0x29, 0x20, 0x4c, 0xeb, + 0x70, 0xce, 0x45, 0x9e, 0x5a, 0xaf, 0xb6, 0xa3, + 0x92, 0xc8, 0x28, 0xf2, 0xe3, 0xe8, 0x8a, 0x5d, + 0x0a, 0x33, 0x79, 0x9b, 0x6a, 0xf3, 0x30, 0x01, + 0x1d, 0x47, 0xbd, 0x01, 0xcc, 0x4d, 0x71, 0xc0, + 0x56, 0xfa, 0xfd, 0x37, 0xed, 0x0f, 0x27, 0xc0, + 0xbb, 0xa0, 0xee, 0xc3, 0x79, 0x8b, 0xe7, 0x41, + 0x8f, 0xfa, 0x3a, 0xcb, 0x45, 0x3b, 0x85, 0x9f, + 0x06, 0x90, 0xb2, 0x51, 0xc0, 0x48, 0x10, 0xac, + 0x2a, 0xec, 0xec, 0x48, 0x7a, 0x19, 0x47, 0xc4, + 0x2a, 0xeb, 0xb3, 0xa2, 0x07, 0x22, 0x32, 0x78, + 0xf4, 0x73, 0x5e, 0x92, 0x42, 0x15, 0xa1, 0x90, + 0x91, 0xd0, 0xeb, 0x12, 0x14, 0x03, 0x03, 0x00, + 0x01, 0x01, 0x16, 0x03, 0x03, 0x00, 0x24, 0x45, + 0x4b, 0x80, 0x42, 0x46, 0xde, 0xbb, 0xe7, 0x76, + 0xd1, 0x33, 0x92, 0xfc, 0x46, 0x17, 0x6d, 0x21, + 0xf6, 0x0e, 0x16, 0xca, 0x9b, 0x9b, 0x04, 0x65, + 0x16, 0x40, 0x44, 0x64, 0xbc, 0x58, 0xfa, 0x2a, + 0x49, 0xe9, 0xed, 0x17, 0x03, 0x03, 0x00, 0x21, + 0x89, 0x71, 0xcd, 0x56, 0x54, 0xbf, 0x73, 0xde, + 0xfb, 0x4b, 0x4e, 0xf1, 0x7f, 0xc6, 0x75, 0xa6, + 0xbd, 0x6b, 0x6c, 0xd9, 0xdc, 0x0c, 0x71, 0xb4, + 0xb9, 0xbb, 0x6e, 0xfa, 0x9e, 0xc7, 0xc7, 0x4c, + 0x24, 0x15, 0x03, 0x03, 0x00, 0x16, 0x62, 0xea, + 0x65, 0x69, 0x68, 0x4a, 0xce, 0xa7, 0x9e, 0xce, + 0xc0, 0xf1, 0x5c, 0x96, 0xd9, 0x1f, 0x49, 0xac, + 0x2d, 0x05, 0x89, 0x94, + }, +} + +// cert.pem and key.pem were generated with generate_cert.go +// Thus, they have no ExtKeyUsage fields and trigger an error +// when verification is turned on. + +var clientCertificate = loadPEMCert(` +-----BEGIN CERTIFICATE----- +MIIB7TCCAVigAwIBAgIBADALBgkqhkiG9w0BAQUwJjEQMA4GA1UEChMHQWNtZSBD +bzESMBAGA1UEAxMJMTI3LjAuMC4xMB4XDTExMTIwODA3NTUxMloXDTEyMTIwNzA4 +MDAxMlowJjEQMA4GA1UEChMHQWNtZSBDbzESMBAGA1UEAxMJMTI3LjAuMC4xMIGc +MAsGCSqGSIb3DQEBAQOBjAAwgYgCgYBO0Hsx44Jk2VnAwoekXh6LczPHY1PfZpIG +hPZk1Y/kNqcdK+izIDZFI7Xjla7t4PUgnI2V339aEu+H5Fto5OkOdOwEin/ekyfE +ARl6vfLcPRSr0FTKIQzQTW6HLlzF0rtNS0/Otiz3fojsfNcCkXSmHgwa2uNKWi7e +E5xMQIhZkwIDAQABozIwMDAOBgNVHQ8BAf8EBAMCAKAwDQYDVR0OBAYEBAECAwQw +DwYDVR0jBAgwBoAEAQIDBDALBgkqhkiG9w0BAQUDgYEANh+zegx1yW43RmEr1b3A +p0vMRpqBWHyFeSnIyMZn3TJWRSt1tukkqVCavh9a+hoV2cxVlXIWg7nCto/9iIw4 +hB2rXZIxE0/9gzvGnfERYraL7KtnvshksBFQRlgXa5kc0x38BvEO5ZaoDPl4ILdE +GFGNEH5PlGffo05wc46QkYU= +-----END CERTIFICATE----- +`) + +/* corresponding key.pem for cert.pem is: +-----BEGIN RSA PRIVATE KEY----- +MIICWgIBAAKBgE7QezHjgmTZWcDCh6ReHotzM8djU99mkgaE9mTVj+Q2px0r6LMg +NkUjteOVru3g9SCcjZXff1oS74fkW2jk6Q507ASKf96TJ8QBGXq98tw9FKvQVMoh +DNBNbocuXMXSu01LT862LPd+iOx81wKRdKYeDBra40paLt4TnExAiFmTAgMBAAEC +gYBxvXd8yNteFTns8A/2yomEMC4yeosJJSpp1CsN3BJ7g8/qTnrVPxBy+RU+qr63 +t2WquaOu/cr5P8iEsa6lk20tf8pjKLNXeX0b1RTzK8rJLbS7nGzP3tvOhL096VtQ +dAo4ROEaro0TzYpHmpciSvxVIeEIAAdFDObDJPKqcJAxyQJBAJizfYgK8Gzx9fsx +hxp+VteCbVPg2euASH5Yv3K5LukRdKoSzHE2grUVQgN/LafC0eZibRanxHegYSr7 +7qaswKUCQQCEIWor/X4XTMdVj3Oj+vpiw75y/S9gh682+myZL+d/02IEkwnB098P +RkKVpenBHyrGg0oeN5La7URILWKj7CPXAkBKo6F+d+phNjwIFoN1Xb/RA32w/D1I +saG9sF+UEhRt9AxUfW/U/tIQ9V0ZHHcSg1XaCM5Nvp934brdKdvTOKnJAkBD5h/3 +Rybatlvg/fzBEaJFyq09zhngkxlZOUtBVTqzl17RVvY2orgH02U4HbCHy4phxOn7 +qTdQRYlHRftgnWK1AkANibn9PRYJ7mJyJ9Dyj2QeNcSkSTzrt0tPvUMf4+meJymN +1Ntu5+S1DLLzfxlaljWG6ylW6DNxujCyuXIV2rvA +-----END RSA PRIVATE KEY----- +*/ + +var clientECDSACertificate = loadPEMCert(` +-----BEGIN CERTIFICATE----- +MIIB/DCCAV4CCQCaMIRsJjXZFzAJBgcqhkjOPQQBMEUxCzAJBgNVBAYTAkFVMRMw +EQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0 +eSBMdGQwHhcNMTIxMTE0MTMyNTUzWhcNMjIxMTEyMTMyNTUzWjBBMQswCQYDVQQG +EwJBVTEMMAoGA1UECBMDTlNXMRAwDgYDVQQHEwdQeXJtb250MRIwEAYDVQQDEwlK +b2VsIFNpbmcwgZswEAYHKoZIzj0CAQYFK4EEACMDgYYABACVjJF1FMBexFe01MNv +ja5oHt1vzobhfm6ySD6B5U7ixohLZNz1MLvT/2XMW/TdtWo+PtAd3kfDdq0Z9kUs +jLzYHQFMH3CQRnZIi4+DzEpcj0B22uCJ7B0rxE4wdihBsmKo+1vx+U56jb0JuK7q +ixgnTy5w/hOWusPTQBbNZU6sER7m8TAJBgcqhkjOPQQBA4GMADCBiAJCAOAUxGBg +C3JosDJdYUoCdFzCgbkWqD8pyDbHgf9stlvZcPE4O1BIKJTLCRpS8V3ujfK58PDa +2RU6+b0DeoeiIzXsAkIBo9SKeDUcSpoj0gq+KxAxnZxfvuiRs9oa9V2jI/Umi0Vw +jWVim34BmT0Y9hCaOGGbLlfk+syxis7iI6CH8OFnUes= +-----END CERTIFICATE----- +`) + +/* corresponding key for cert is: +-----BEGIN EC PARAMETERS----- +BgUrgQQAIw== +-----END EC PARAMETERS----- +-----BEGIN EC PRIVATE KEY----- +MIHcAgEBBEIBkJN9X4IqZIguiEVKMqeBUP5xtRsEv4HJEtOpOGLELwO53SD78Ew8 +k+wLWoqizS3NpQyMtrU8JFdWfj+C57UNkOugBwYFK4EEACOhgYkDgYYABACVjJF1 +FMBexFe01MNvja5oHt1vzobhfm6ySD6B5U7ixohLZNz1MLvT/2XMW/TdtWo+PtAd +3kfDdq0Z9kUsjLzYHQFMH3CQRnZIi4+DzEpcj0B22uCJ7B0rxE4wdihBsmKo+1vx ++U56jb0JuK7qixgnTy5w/hOWusPTQBbNZU6sER7m8Q== +-----END EC PRIVATE KEY----- +*/ +var clientauthECDSATests = []clientauthTest{ + // Server asks for cert with empty CA list, client gives one + // go test -run "TestRunServer" -serve \ + // -clientauth 1 -ciphersuites=0xc00a + // openssl s_client -host 127.0.0.1 -port 10443 \ + // -cipher ECDHE-ECDSA-AES256-SHA -key client.key -cert client.crt + {"RequestClientCert, client gives it", RequestClientCert, []*x509.Certificate{clientECDSACertificate}, [][]byte{ + { + 0x16, 0x03, 0x01, 0x00, 0xa0, 0x01, 0x00, 0x00, + 0x9c, 0x03, 0x03, 0x51, 0xe5, 0x73, 0xc5, 0xae, + 0x51, 0x94, 0xb4, 0xf2, 0xe8, 0xf6, 0x03, 0x0e, + 0x3b, 0x34, 0xaf, 0xf0, 0xdc, 0x1b, 0xcc, 0xd8, + 0x0c, 0x45, 0x82, 0xd4, 0xd6, 0x76, 0x04, 0x6e, + 0x4f, 0x7a, 0x24, 0x00, 0x00, 0x04, 0xc0, 0x0a, + 0x00, 0xff, 0x01, 0x00, 0x00, 0x6f, 0x00, 0x0b, + 0x00, 0x04, 0x03, 0x00, 0x01, 0x02, 0x00, 0x0a, + 0x00, 0x34, 0x00, 0x32, 0x00, 0x0e, 0x00, 0x0d, + 0x00, 0x19, 0x00, 0x0b, 0x00, 0x0c, 0x00, 0x18, + 0x00, 0x09, 0x00, 0x0a, 0x00, 0x16, 0x00, 0x17, + 0x00, 0x08, 0x00, 0x06, 0x00, 0x07, 0x00, 0x14, + 0x00, 0x15, 0x00, 0x04, 0x00, 0x05, 0x00, 0x12, + 0x00, 0x13, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, + 0x00, 0x0f, 0x00, 0x10, 0x00, 0x11, 0x00, 0x23, + 0x00, 0x00, 0x00, 0x0d, 0x00, 0x22, 0x00, 0x20, + 0x06, 0x01, 0x06, 0x02, 0x06, 0x03, 0x05, 0x01, + 0x05, 0x02, 0x05, 0x03, 0x04, 0x01, 0x04, 0x02, + 0x04, 0x03, 0x03, 0x01, 0x03, 0x02, 0x03, 0x03, + 0x02, 0x01, 0x02, 0x02, 0x02, 0x03, 0x01, 0x01, + 0x00, 0x0f, 0x00, 0x01, 0x01, + }, + { + 0x16, 0x03, 0x01, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xc0, 0x0a, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x01, + 0x02, 0x0e, 0x0b, 0x00, 0x02, 0x0a, 0x00, 0x02, + 0x07, 0x00, 0x02, 0x04, 0x30, 0x82, 0x02, 0x00, + 0x30, 0x82, 0x01, 0x62, 0x02, 0x09, 0x00, 0xb8, + 0xbf, 0x2d, 0x47, 0xa0, 0xd2, 0xeb, 0xf4, 0x30, + 0x09, 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, + 0x04, 0x01, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x32, 0x31, + 0x31, 0x32, 0x32, 0x31, 0x35, 0x30, 0x36, 0x33, + 0x32, 0x5a, 0x17, 0x0d, 0x32, 0x32, 0x31, 0x31, + 0x32, 0x30, 0x31, 0x35, 0x30, 0x36, 0x33, 0x32, + 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, 0x06, + 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, 0x55, + 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, 0x04, + 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, 0x2d, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, 0x30, + 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, 0x18, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, + 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, 0x73, + 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, 0x64, + 0x30, 0x81, 0x9b, 0x30, 0x10, 0x06, 0x07, 0x2a, + 0x86, 0x48, 0xce, 0x3d, 0x02, 0x01, 0x06, 0x05, + 0x2b, 0x81, 0x04, 0x00, 0x23, 0x03, 0x81, 0x86, + 0x00, 0x04, 0x00, 0xc4, 0xa1, 0xed, 0xbe, 0x98, + 0xf9, 0x0b, 0x48, 0x73, 0x36, 0x7e, 0xc3, 0x16, + 0x56, 0x11, 0x22, 0xf2, 0x3d, 0x53, 0xc3, 0x3b, + 0x4d, 0x21, 0x3d, 0xcd, 0x6b, 0x75, 0xe6, 0xf6, + 0xb0, 0xdc, 0x9a, 0xdf, 0x26, 0xc1, 0xbc, 0xb2, + 0x87, 0xf0, 0x72, 0x32, 0x7c, 0xb3, 0x64, 0x2f, + 0x1c, 0x90, 0xbc, 0xea, 0x68, 0x23, 0x10, 0x7e, + 0xfe, 0xe3, 0x25, 0xc0, 0x48, 0x3a, 0x69, 0xe0, + 0x28, 0x6d, 0xd3, 0x37, 0x00, 0xef, 0x04, 0x62, + 0xdd, 0x0d, 0xa0, 0x9c, 0x70, 0x62, 0x83, 0xd8, + 0x81, 0xd3, 0x64, 0x31, 0xaa, 0x9e, 0x97, 0x31, + 0xbd, 0x96, 0xb0, 0x68, 0xc0, 0x9b, 0x23, 0xde, + 0x76, 0x64, 0x3f, 0x1a, 0x5c, 0x7f, 0xe9, 0x12, + 0x0e, 0x58, 0x58, 0xb6, 0x5f, 0x70, 0xdd, 0x9b, + 0xd8, 0xea, 0xd5, 0xd7, 0xf5, 0xd5, 0xcc, 0xb9, + 0xb6, 0x9f, 0x30, 0x66, 0x5b, 0x66, 0x9a, 0x20, + 0xe2, 0x27, 0xe5, 0xbf, 0xfe, 0x3b, 0x30, 0x09, + 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, + 0x01, 0x03, 0x81, 0x8c, 0x00, 0x30, 0x81, 0x88, + 0x02, 0x42, 0x01, 0x88, 0xa2, 0x4f, 0xeb, 0xe2, + 0x45, 0xc5, 0x48, 0x7d, 0x1b, 0xac, 0xf5, 0xed, + 0x98, 0x9d, 0xae, 0x47, 0x70, 0xc0, 0x5e, 0x1b, + 0xb6, 0x2f, 0xbd, 0xf1, 0xb6, 0x4d, 0xb7, 0x61, + 0x40, 0xd3, 0x11, 0xa2, 0xce, 0xee, 0x0b, 0x7e, + 0x92, 0x7e, 0xff, 0x76, 0x9d, 0xc3, 0x3b, 0x7e, + 0xa5, 0x3f, 0xce, 0xfa, 0x10, 0xe2, 0x59, 0xec, + 0x47, 0x2d, 0x7c, 0xac, 0xda, 0x4e, 0x97, 0x0e, + 0x15, 0xa0, 0x6f, 0xd0, 0x02, 0x42, 0x01, 0x4d, + 0xfc, 0xbe, 0x67, 0x13, 0x9c, 0x2d, 0x05, 0x0e, + 0xbd, 0x3f, 0xa3, 0x8c, 0x25, 0xc1, 0x33, 0x13, + 0x83, 0x0d, 0x94, 0x06, 0xbb, 0xd4, 0x37, 0x7a, + 0xf6, 0xec, 0x7a, 0xc9, 0x86, 0x2e, 0xdd, 0xd7, + 0x11, 0x69, 0x7f, 0x85, 0x7c, 0x56, 0xde, 0xfb, + 0x31, 0x78, 0x2b, 0xe4, 0xc7, 0x78, 0x0d, 0xae, + 0xcb, 0xbe, 0x9e, 0x4e, 0x36, 0x24, 0x31, 0x7b, + 0x6a, 0x0f, 0x39, 0x95, 0x12, 0x07, 0x8f, 0x2a, + 0x16, 0x03, 0x01, 0x01, 0x1a, 0x0c, 0x00, 0x01, + 0x16, 0x03, 0x00, 0x19, 0x85, 0x04, 0x01, 0x39, + 0xdc, 0xee, 0x44, 0x17, 0x5e, 0xdb, 0xd7, 0x27, + 0xaf, 0xb6, 0x56, 0xd9, 0xb4, 0x43, 0x5a, 0x99, + 0xcf, 0xaa, 0x31, 0x37, 0x0c, 0x6f, 0x3a, 0xa0, + 0xf8, 0x53, 0xc4, 0x74, 0xd1, 0x91, 0x0a, 0x46, + 0xf5, 0x38, 0x3b, 0x5c, 0x09, 0xd8, 0x97, 0xdc, + 0x4b, 0xaa, 0x70, 0x26, 0x48, 0xf2, 0xd6, 0x0b, + 0x31, 0xc9, 0xf8, 0xd4, 0x98, 0x43, 0xe1, 0x6c, + 0xd5, 0xc7, 0xb2, 0x8e, 0x0b, 0x01, 0xe6, 0xb6, + 0x00, 0x28, 0x80, 0x7b, 0xfc, 0x96, 0x8f, 0x0d, + 0xa2, 0x4f, 0xb0, 0x79, 0xaf, 0xdc, 0x61, 0x28, + 0x63, 0x33, 0x78, 0xf6, 0x31, 0x39, 0xfd, 0x8a, + 0xf4, 0x15, 0x18, 0x11, 0xfe, 0xdb, 0xd5, 0x07, + 0xda, 0x2c, 0xed, 0x49, 0xa0, 0x23, 0xbf, 0xd0, + 0x3a, 0x38, 0x1d, 0x54, 0xae, 0x1c, 0x7b, 0xea, + 0x29, 0xee, 0xd0, 0x38, 0xc1, 0x76, 0xa7, 0x7f, + 0x2a, 0xf4, 0xce, 0x1e, 0xac, 0xcc, 0x94, 0x79, + 0x90, 0x33, 0x00, 0x8b, 0x30, 0x81, 0x88, 0x02, + 0x42, 0x00, 0xc6, 0x85, 0x8e, 0x06, 0xb7, 0x04, + 0x04, 0xe9, 0xcd, 0x9e, 0x3e, 0xcb, 0x66, 0x23, + 0x95, 0xb4, 0x42, 0x9c, 0x64, 0x81, 0x39, 0x05, + 0x3f, 0xb5, 0x21, 0xf8, 0x28, 0xaf, 0x60, 0x6b, + 0x4d, 0x3d, 0xba, 0xa1, 0x4b, 0x5e, 0x77, 0xef, + 0xe7, 0x59, 0x28, 0xfe, 0x1d, 0xc1, 0x27, 0xa2, + 0xff, 0xa8, 0xde, 0x33, 0x48, 0xb3, 0xc1, 0x85, + 0x6a, 0x42, 0x9b, 0xf9, 0x7e, 0x7e, 0x31, 0xc2, + 0xe5, 0xbd, 0x66, 0x02, 0x42, 0x00, 0xad, 0x7d, + 0x06, 0x35, 0xab, 0xec, 0x8d, 0xac, 0xd4, 0xba, + 0x1b, 0x49, 0x5e, 0x05, 0x5f, 0xf0, 0x97, 0x93, + 0x82, 0xb8, 0x2b, 0x8d, 0x91, 0x98, 0x63, 0x8e, + 0xb4, 0x14, 0x62, 0xdb, 0x1e, 0xc9, 0x2b, 0x30, + 0xf8, 0x41, 0x9b, 0xa6, 0xe6, 0xbc, 0xde, 0x0e, + 0x68, 0x30, 0x21, 0xf4, 0xa8, 0xa9, 0x1b, 0xec, + 0x44, 0x4f, 0x5d, 0x02, 0x2f, 0x60, 0x45, 0x60, + 0xba, 0xe0, 0x4e, 0xc0, 0xd4, 0x3b, 0x01, 0x16, + 0x03, 0x01, 0x00, 0x09, 0x0d, 0x00, 0x00, 0x05, + 0x02, 0x01, 0x40, 0x00, 0x00, 0x16, 0x03, 0x01, + 0x00, 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x01, 0x02, 0x0a, 0x0b, 0x00, 0x02, + 0x06, 0x00, 0x02, 0x03, 0x00, 0x02, 0x00, 0x30, + 0x82, 0x01, 0xfc, 0x30, 0x82, 0x01, 0x5e, 0x02, + 0x09, 0x00, 0x9a, 0x30, 0x84, 0x6c, 0x26, 0x35, + 0xd9, 0x17, 0x30, 0x09, 0x06, 0x07, 0x2a, 0x86, + 0x48, 0xce, 0x3d, 0x04, 0x01, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, + 0x31, 0x32, 0x31, 0x31, 0x31, 0x34, 0x31, 0x33, + 0x32, 0x35, 0x35, 0x33, 0x5a, 0x17, 0x0d, 0x32, + 0x32, 0x31, 0x31, 0x31, 0x32, 0x31, 0x33, 0x32, + 0x35, 0x35, 0x33, 0x5a, 0x30, 0x41, 0x31, 0x0b, + 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, + 0x02, 0x41, 0x55, 0x31, 0x0c, 0x30, 0x0a, 0x06, + 0x03, 0x55, 0x04, 0x08, 0x13, 0x03, 0x4e, 0x53, + 0x57, 0x31, 0x10, 0x30, 0x0e, 0x06, 0x03, 0x55, + 0x04, 0x07, 0x13, 0x07, 0x50, 0x79, 0x72, 0x6d, + 0x6f, 0x6e, 0x74, 0x31, 0x12, 0x30, 0x10, 0x06, + 0x03, 0x55, 0x04, 0x03, 0x13, 0x09, 0x4a, 0x6f, + 0x65, 0x6c, 0x20, 0x53, 0x69, 0x6e, 0x67, 0x30, + 0x81, 0x9b, 0x30, 0x10, 0x06, 0x07, 0x2a, 0x86, + 0x48, 0xce, 0x3d, 0x02, 0x01, 0x06, 0x05, 0x2b, + 0x81, 0x04, 0x00, 0x23, 0x03, 0x81, 0x86, 0x00, + 0x04, 0x00, 0x95, 0x8c, 0x91, 0x75, 0x14, 0xc0, + 0x5e, 0xc4, 0x57, 0xb4, 0xd4, 0xc3, 0x6f, 0x8d, + 0xae, 0x68, 0x1e, 0xdd, 0x6f, 0xce, 0x86, 0xe1, + 0x7e, 0x6e, 0xb2, 0x48, 0x3e, 0x81, 0xe5, 0x4e, + 0xe2, 0xc6, 0x88, 0x4b, 0x64, 0xdc, 0xf5, 0x30, + 0xbb, 0xd3, 0xff, 0x65, 0xcc, 0x5b, 0xf4, 0xdd, + 0xb5, 0x6a, 0x3e, 0x3e, 0xd0, 0x1d, 0xde, 0x47, + 0xc3, 0x76, 0xad, 0x19, 0xf6, 0x45, 0x2c, 0x8c, + 0xbc, 0xd8, 0x1d, 0x01, 0x4c, 0x1f, 0x70, 0x90, + 0x46, 0x76, 0x48, 0x8b, 0x8f, 0x83, 0xcc, 0x4a, + 0x5c, 0x8f, 0x40, 0x76, 0xda, 0xe0, 0x89, 0xec, + 0x1d, 0x2b, 0xc4, 0x4e, 0x30, 0x76, 0x28, 0x41, + 0xb2, 0x62, 0xa8, 0xfb, 0x5b, 0xf1, 0xf9, 0x4e, + 0x7a, 0x8d, 0xbd, 0x09, 0xb8, 0xae, 0xea, 0x8b, + 0x18, 0x27, 0x4f, 0x2e, 0x70, 0xfe, 0x13, 0x96, + 0xba, 0xc3, 0xd3, 0x40, 0x16, 0xcd, 0x65, 0x4e, + 0xac, 0x11, 0x1e, 0xe6, 0xf1, 0x30, 0x09, 0x06, + 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x04, 0x01, + 0x03, 0x81, 0x8c, 0x00, 0x30, 0x81, 0x88, 0x02, + 0x42, 0x00, 0xe0, 0x14, 0xc4, 0x60, 0x60, 0x0b, + 0x72, 0x68, 0xb0, 0x32, 0x5d, 0x61, 0x4a, 0x02, + 0x74, 0x5c, 0xc2, 0x81, 0xb9, 0x16, 0xa8, 0x3f, + 0x29, 0xc8, 0x36, 0xc7, 0x81, 0xff, 0x6c, 0xb6, + 0x5b, 0xd9, 0x70, 0xf1, 0x38, 0x3b, 0x50, 0x48, + 0x28, 0x94, 0xcb, 0x09, 0x1a, 0x52, 0xf1, 0x5d, + 0xee, 0x8d, 0xf2, 0xb9, 0xf0, 0xf0, 0xda, 0xd9, + 0x15, 0x3a, 0xf9, 0xbd, 0x03, 0x7a, 0x87, 0xa2, + 0x23, 0x35, 0xec, 0x02, 0x42, 0x01, 0xa3, 0xd4, + 0x8a, 0x78, 0x35, 0x1c, 0x4a, 0x9a, 0x23, 0xd2, + 0x0a, 0xbe, 0x2b, 0x10, 0x31, 0x9d, 0x9c, 0x5f, + 0xbe, 0xe8, 0x91, 0xb3, 0xda, 0x1a, 0xf5, 0x5d, + 0xa3, 0x23, 0xf5, 0x26, 0x8b, 0x45, 0x70, 0x8d, + 0x65, 0x62, 0x9b, 0x7e, 0x01, 0x99, 0x3d, 0x18, + 0xf6, 0x10, 0x9a, 0x38, 0x61, 0x9b, 0x2e, 0x57, + 0xe4, 0xfa, 0xcc, 0xb1, 0x8a, 0xce, 0xe2, 0x23, + 0xa0, 0x87, 0xf0, 0xe1, 0x67, 0x51, 0xeb, 0x16, + 0x03, 0x01, 0x00, 0x8a, 0x10, 0x00, 0x00, 0x86, + 0x85, 0x04, 0x00, 0xcd, 0x1c, 0xe8, 0x66, 0x5b, + 0xa8, 0x9d, 0x83, 0x2f, 0x7e, 0x1d, 0x0b, 0x59, + 0x23, 0xbc, 0x30, 0xcf, 0xa3, 0xaf, 0x21, 0xdc, + 0xf2, 0x57, 0x49, 0x56, 0x30, 0x25, 0x7c, 0x84, + 0x5d, 0xad, 0xaa, 0x9c, 0x7b, 0x2a, 0x95, 0x58, + 0x3d, 0x30, 0x87, 0x01, 0x3b, 0xb7, 0xea, 0xcb, + 0xc4, 0xa3, 0xeb, 0x22, 0xbf, 0x2d, 0x61, 0x17, + 0x8c, 0x9b, 0xe8, 0x1b, 0xb2, 0x87, 0x16, 0x78, + 0xd5, 0xfd, 0x8b, 0xdd, 0x00, 0x0f, 0xda, 0x8e, + 0xfd, 0x28, 0x36, 0xeb, 0xe4, 0xc5, 0x42, 0x14, + 0xc7, 0xbd, 0x29, 0x5e, 0x9a, 0xed, 0x5e, 0xc1, + 0xf7, 0xf4, 0xbd, 0xbd, 0x15, 0x9c, 0xe8, 0x44, + 0x71, 0xa7, 0xb6, 0xe9, 0xfa, 0x7e, 0x97, 0xcb, + 0x96, 0x3e, 0x53, 0x76, 0xfb, 0x11, 0x1f, 0x36, + 0x8f, 0x30, 0xfb, 0x71, 0x3a, 0x75, 0x3a, 0x25, + 0x7b, 0xa2, 0xc1, 0xf9, 0x3e, 0x58, 0x5f, 0x07, + 0x16, 0xed, 0xe1, 0xf7, 0xc1, 0xb1, 0x16, 0x03, + 0x01, 0x00, 0x90, 0x0f, 0x00, 0x00, 0x8c, 0x00, + 0x8a, 0x30, 0x81, 0x87, 0x02, 0x42, 0x00, 0xb2, + 0xd3, 0x91, 0xe6, 0xd5, 0x9b, 0xb2, 0xb8, 0x03, + 0xf4, 0x85, 0x4d, 0x43, 0x79, 0x1f, 0xb6, 0x6f, + 0x0c, 0xcd, 0x67, 0x5f, 0x5e, 0xca, 0xee, 0xb3, + 0xe4, 0xab, 0x1e, 0x58, 0xc3, 0x04, 0xa9, 0x8a, + 0xa7, 0xcf, 0xaa, 0x33, 0x88, 0xd5, 0x35, 0xd2, + 0x80, 0x8f, 0xfa, 0x1b, 0x3c, 0x3d, 0xf7, 0x80, + 0x50, 0xde, 0x80, 0x30, 0x64, 0xee, 0xc0, 0xb3, + 0x91, 0x6e, 0x5d, 0x1e, 0xc0, 0xdc, 0x3a, 0x93, + 0x02, 0x41, 0x4e, 0xca, 0x98, 0x41, 0x8c, 0x36, + 0xf2, 0x12, 0xbf, 0x8e, 0x0f, 0x69, 0x8e, 0xf8, + 0x7b, 0x9d, 0xba, 0x9c, 0x5c, 0x48, 0x79, 0xf4, + 0xba, 0x3d, 0x06, 0xa5, 0xab, 0x47, 0xe0, 0x1a, + 0x45, 0x28, 0x3a, 0x8f, 0xbf, 0x14, 0x24, 0x36, + 0xd1, 0x1d, 0x29, 0xdc, 0xde, 0x72, 0x5b, 0x76, + 0x41, 0x67, 0xe8, 0xe5, 0x71, 0x4a, 0x77, 0xe9, + 0xed, 0x02, 0x19, 0xdd, 0xe4, 0xaa, 0xe9, 0x2d, + 0xe7, 0x47, 0x32, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0x30, 0xfa, 0xc3, + 0xf2, 0x35, 0xd0, 0x6d, 0x32, 0x78, 0x6a, 0xd6, + 0xe6, 0x70, 0x5e, 0x00, 0x4c, 0x35, 0xf1, 0xe0, + 0x21, 0xcf, 0xc3, 0x78, 0xcd, 0xe0, 0x2b, 0x0b, + 0xf4, 0xeb, 0xf9, 0xc0, 0x38, 0xf2, 0x9a, 0x31, + 0x55, 0x07, 0x2b, 0x8d, 0x68, 0x40, 0x31, 0x08, + 0xaa, 0xe3, 0x16, 0xcf, 0x4b, 0xd4, + }, + { + 0x16, 0x03, 0x01, 0x02, 0x76, 0x04, 0x00, 0x02, + 0x72, 0x00, 0x00, 0x00, 0x00, 0x02, 0x6c, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xe8, 0x8b, 0xde, 0xef, 0xba, 0xf9, 0xdb, 0x95, + 0x24, 0xa5, 0x49, 0xb3, 0x23, 0xd8, 0x73, 0x88, + 0x50, 0x42, 0xed, 0xeb, 0xa3, 0xd8, 0xab, 0x31, + 0x9c, 0xd0, 0x00, 0x01, 0xef, 0xc0, 0xbf, 0xab, + 0x59, 0x55, 0xb5, 0xb9, 0xef, 0xa5, 0xa6, 0xec, + 0x69, 0xed, 0x00, 0x2f, 0x47, 0xdb, 0x75, 0x52, + 0x0c, 0xe5, 0x86, 0xb7, 0x02, 0x59, 0x22, 0xf7, + 0xfd, 0x8b, 0xff, 0xa4, 0x09, 0xc0, 0x1c, 0x10, + 0x80, 0x10, 0x7f, 0x4c, 0x7a, 0x94, 0x40, 0x10, + 0x0d, 0xda, 0x8a, 0xe5, 0x4a, 0xbc, 0xd0, 0xc0, + 0x4b, 0xa5, 0x33, 0x97, 0xc6, 0xe7, 0x40, 0x7f, + 0x7f, 0x8c, 0xf9, 0xf8, 0xc8, 0xb8, 0xfb, 0x8c, + 0xdd, 0x28, 0x81, 0xae, 0xfd, 0x37, 0x20, 0x3a, + 0x40, 0x37, 0x99, 0xc4, 0x21, 0x01, 0xc4, 0x91, + 0xb0, 0x5e, 0x11, 0xc5, 0xa9, 0xfd, 0x9a, 0x02, + 0x7e, 0x97, 0x6a, 0x86, 0x89, 0xb8, 0xc1, 0x32, + 0x4c, 0x7e, 0x6d, 0x47, 0x61, 0x0e, 0xe3, 0xc2, + 0xf0, 0x62, 0x3c, 0xc6, 0x71, 0x4f, 0xbb, 0x47, + 0x65, 0xb1, 0xd9, 0x22, 0x79, 0x15, 0xea, 0x1f, + 0x4b, 0x2a, 0x8a, 0xa4, 0xc8, 0x73, 0x34, 0xba, + 0x83, 0xe4, 0x70, 0x99, 0xc9, 0xcf, 0xbe, 0x64, + 0x99, 0xb9, 0xfa, 0xe9, 0xaf, 0x5d, 0xc7, 0x20, + 0x26, 0xde, 0xc5, 0x06, 0x12, 0x36, 0x4f, 0x4d, + 0xc0, 0xbb, 0x81, 0x5b, 0x5e, 0x38, 0xc3, 0x07, + 0x21, 0x04, 0x1a, 0x53, 0x9c, 0x59, 0xac, 0x2d, + 0xe6, 0xa5, 0x93, 0xa5, 0x19, 0xc6, 0xb0, 0xf7, + 0x56, 0x5d, 0xdf, 0xd1, 0xf4, 0xfd, 0x44, 0x6d, + 0xc6, 0xa2, 0x31, 0xa7, 0x35, 0x42, 0x18, 0x50, + 0x0c, 0x4f, 0x6e, 0xe3, 0x3b, 0xa3, 0xaa, 0x1c, + 0xbe, 0x41, 0x0d, 0xce, 0x6c, 0x62, 0xe1, 0x96, + 0x2d, 0xbd, 0x14, 0x31, 0xe3, 0xc4, 0x5b, 0xbf, + 0xf6, 0xde, 0xec, 0x42, 0xe8, 0xc7, 0x2a, 0x0b, + 0xdb, 0x2d, 0x7c, 0xf0, 0x3f, 0x45, 0x32, 0x45, + 0x09, 0x47, 0x09, 0x0f, 0x21, 0x22, 0x45, 0x06, + 0x11, 0xb8, 0xf9, 0xe6, 0x67, 0x90, 0x4b, 0x4a, + 0xde, 0x81, 0xfb, 0xeb, 0xe7, 0x9a, 0x08, 0x30, + 0xcf, 0x51, 0xe1, 0xd9, 0xfa, 0x79, 0xa3, 0xcc, + 0x65, 0x1a, 0x83, 0x86, 0xc9, 0x7a, 0x41, 0xf5, + 0xdf, 0xa0, 0x7c, 0x44, 0x23, 0x17, 0xf3, 0x62, + 0xe8, 0xa9, 0x31, 0x1e, 0x6b, 0x05, 0x4b, 0x4f, + 0x9d, 0x91, 0x46, 0x92, 0xa6, 0x25, 0x32, 0xca, + 0xa1, 0x75, 0xda, 0xe6, 0x80, 0x3e, 0x7f, 0xd1, + 0x26, 0x57, 0x07, 0x42, 0xe4, 0x91, 0xff, 0xbd, + 0x44, 0xae, 0x98, 0x5c, 0x1d, 0xdf, 0x11, 0xe3, + 0xae, 0x87, 0x5e, 0xb7, 0x69, 0xad, 0x34, 0x7f, + 0x3a, 0x07, 0x7c, 0xdf, 0xfc, 0x76, 0x17, 0x8b, + 0x62, 0xc8, 0xe1, 0x78, 0x2a, 0xc8, 0xb9, 0x8a, + 0xbb, 0x5c, 0xfb, 0x38, 0x74, 0x91, 0x6e, 0x12, + 0x0c, 0x1f, 0x8e, 0xe1, 0xc2, 0x01, 0xd8, 0x9d, + 0x23, 0x0f, 0xc4, 0x67, 0x5d, 0xe5, 0x67, 0x4b, + 0x94, 0x6e, 0x69, 0x72, 0x90, 0x2d, 0x52, 0x78, + 0x8e, 0x61, 0xba, 0xdf, 0x4e, 0xf5, 0xdc, 0xfb, + 0x73, 0xbe, 0x03, 0x70, 0xd9, 0x01, 0x30, 0xf3, + 0xa1, 0xbb, 0x9a, 0x5f, 0xec, 0x9e, 0xed, 0x8d, + 0xdd, 0x53, 0xfd, 0x60, 0xc3, 0x2b, 0x7a, 0x00, + 0x2c, 0xf9, 0x0a, 0x57, 0x47, 0x45, 0x43, 0xb3, + 0x23, 0x01, 0x9c, 0xee, 0x54, 0x4d, 0x58, 0xd3, + 0x71, 0x1c, 0xc9, 0xd3, 0x30, 0x9e, 0x14, 0xa5, + 0xf3, 0xbf, 0x4d, 0x9b, 0xb7, 0x13, 0x21, 0xae, + 0xd2, 0x8d, 0x6e, 0x6f, 0x1c, 0xcc, 0xb2, 0x41, + 0xb2, 0x64, 0x56, 0x83, 0xce, 0xd1, 0x0c, 0x79, + 0x32, 0x78, 0xef, 0xc5, 0x21, 0xb1, 0xe8, 0xc4, + 0x42, 0xa7, 0x8d, 0xc1, 0xfa, 0xa1, 0x9c, 0x3c, + 0x21, 0xd8, 0xe9, 0x90, 0xe2, 0x7c, 0x14, 0x26, + 0xfe, 0x61, 0x3e, 0xf9, 0x71, 0x1d, 0x5d, 0x49, + 0x3b, 0xb1, 0xb8, 0x42, 0xa1, 0xb8, 0x1c, 0x75, + 0x7d, 0xee, 0xed, 0xfc, 0xe6, 0x20, 0x2b, 0x9e, + 0x10, 0x52, 0xda, 0x56, 0x4d, 0x64, 0x6c, 0x41, + 0xc1, 0xf7, 0x60, 0x0c, 0x10, 0x65, 0x6f, 0xd4, + 0xe9, 0x9b, 0x0d, 0x83, 0x13, 0xc8, 0x5a, 0xa3, + 0x56, 0x2a, 0x42, 0xc6, 0x1c, 0xfe, 0xdb, 0xba, + 0x3d, 0x04, 0x12, 0xfd, 0x28, 0xeb, 0x78, 0xdd, + 0xbc, 0xc8, 0x0d, 0xa1, 0xce, 0xd4, 0x54, 0xbf, + 0xaf, 0xe1, 0x60, 0x0c, 0xa3, 0xc3, 0xc3, 0x62, + 0x58, 0xc1, 0x79, 0xa7, 0x95, 0x41, 0x09, 0x24, + 0xc6, 0x9a, 0x50, 0x14, 0x03, 0x01, 0x00, 0x01, + 0x01, 0x16, 0x03, 0x01, 0x00, 0x30, 0x4d, 0x7b, + 0x5f, 0x28, 0x5e, 0x68, 0x6c, 0xa3, 0x65, 0xc7, + 0x7e, 0x49, 0x6c, 0xb3, 0x67, 0xbb, 0xd0, 0x75, + 0xa2, 0x9e, 0x8c, 0x92, 0x4f, 0x8c, 0x33, 0x14, + 0x7c, 0x6c, 0xf1, 0x74, 0x97, 0xc3, 0xe0, 0x10, + 0xe9, 0x0d, 0xc2, 0x30, 0x5c, 0x23, 0xee, 0x1d, + 0x16, 0x2e, 0xb9, 0x96, 0x2b, 0x2d, 0x17, 0x03, + 0x01, 0x00, 0x20, 0xf2, 0xc8, 0xa7, 0x1b, 0x60, + 0x46, 0xee, 0xe5, 0x7e, 0xc9, 0x35, 0xb3, 0xf1, + 0x7c, 0x32, 0x0c, 0x85, 0x94, 0x59, 0x57, 0x27, + 0xb0, 0xbd, 0x52, 0x86, 0x90, 0xf1, 0xb7, 0x4d, + 0x1e, 0xc1, 0x16, 0x17, 0x03, 0x01, 0x00, 0x30, + 0xff, 0x85, 0x50, 0xdf, 0x3f, 0xfc, 0xa2, 0x61, + 0x1a, 0x12, 0xc0, 0x1e, 0x10, 0x32, 0x88, 0x50, + 0xa0, 0x2c, 0x80, 0xda, 0x77, 0xea, 0x09, 0x47, + 0xe0, 0x85, 0x07, 0x29, 0x45, 0x65, 0x19, 0xa3, + 0x8d, 0x99, 0xb8, 0xbf, 0xb6, 0xbc, 0x76, 0xe2, + 0x50, 0x24, 0x82, 0x0a, 0xfd, 0xdd, 0x35, 0x09, + 0x15, 0x03, 0x01, 0x00, 0x20, 0xe7, 0x36, 0xf6, + 0x61, 0xd2, 0x95, 0x3c, 0xb6, 0x65, 0x7b, 0xb2, + 0xb8, 0xdf, 0x03, 0x53, 0xeb, 0xf7, 0x16, 0xe0, + 0xe0, 0x15, 0x22, 0x71, 0x70, 0x62, 0x73, 0xad, + 0xb5, 0x1a, 0x77, 0x44, 0x57, + }, + }}, +} + +var aesGCMServerScript = [][]byte{ + { + 0x16, 0x03, 0x01, 0x01, 0x1c, 0x01, 0x00, 0x01, + 0x18, 0x03, 0x03, 0x52, 0x1e, 0x74, 0xf0, 0xb0, + 0xc1, 0x8b, 0x16, 0xf9, 0x74, 0xfc, 0x16, 0xc4, + 0x11, 0x18, 0x96, 0x08, 0x25, 0x38, 0x4f, 0x98, + 0x98, 0xbe, 0xb5, 0x61, 0xdf, 0x94, 0x15, 0xcc, + 0x9b, 0x61, 0xef, 0x00, 0x00, 0x80, 0xc0, 0x30, + 0xc0, 0x2c, 0xc0, 0x28, 0xc0, 0x24, 0xc0, 0x14, + 0xc0, 0x0a, 0x00, 0xa3, 0x00, 0x9f, 0x00, 0x6b, + 0x00, 0x6a, 0x00, 0x39, 0x00, 0x38, 0xc0, 0x32, + 0xc0, 0x2e, 0xc0, 0x2a, 0xc0, 0x26, 0xc0, 0x0f, + 0xc0, 0x05, 0x00, 0x9d, 0x00, 0x3d, 0x00, 0x35, + 0xc0, 0x12, 0xc0, 0x08, 0x00, 0x16, 0x00, 0x13, + 0xc0, 0x0d, 0xc0, 0x03, 0x00, 0x0a, 0xc0, 0x2f, + 0xc0, 0x2b, 0xc0, 0x27, 0xc0, 0x23, 0xc0, 0x13, + 0xc0, 0x09, 0x00, 0xa2, 0x00, 0x9e, 0x00, 0x67, + 0x00, 0x40, 0x00, 0x33, 0x00, 0x32, 0xc0, 0x31, + 0xc0, 0x2d, 0xc0, 0x29, 0xc0, 0x25, 0xc0, 0x0e, + 0xc0, 0x04, 0x00, 0x9c, 0x00, 0x3c, 0x00, 0x2f, + 0xc0, 0x11, 0xc0, 0x07, 0xc0, 0x0c, 0xc0, 0x02, + 0x00, 0x05, 0x00, 0x04, 0x00, 0x15, 0x00, 0x12, + 0x00, 0x09, 0x00, 0x14, 0x00, 0x11, 0x00, 0x08, + 0x00, 0x06, 0x00, 0x03, 0x00, 0xff, 0x01, 0x00, + 0x00, 0x6f, 0x00, 0x0b, 0x00, 0x04, 0x03, 0x00, + 0x01, 0x02, 0x00, 0x0a, 0x00, 0x34, 0x00, 0x32, + 0x00, 0x0e, 0x00, 0x0d, 0x00, 0x19, 0x00, 0x0b, + 0x00, 0x0c, 0x00, 0x18, 0x00, 0x09, 0x00, 0x0a, + 0x00, 0x16, 0x00, 0x17, 0x00, 0x08, 0x00, 0x06, + 0x00, 0x07, 0x00, 0x14, 0x00, 0x15, 0x00, 0x04, + 0x00, 0x05, 0x00, 0x12, 0x00, 0x13, 0x00, 0x01, + 0x00, 0x02, 0x00, 0x03, 0x00, 0x0f, 0x00, 0x10, + 0x00, 0x11, 0x00, 0x23, 0x00, 0x00, 0x00, 0x0d, + 0x00, 0x22, 0x00, 0x20, 0x06, 0x01, 0x06, 0x02, + 0x06, 0x03, 0x05, 0x01, 0x05, 0x02, 0x05, 0x03, + 0x04, 0x01, 0x04, 0x02, 0x04, 0x03, 0x03, 0x01, + 0x03, 0x02, 0x03, 0x03, 0x02, 0x01, 0x02, 0x02, + 0x02, 0x03, 0x01, 0x01, 0x00, 0x0f, 0x00, 0x01, + 0x01, + }, + { + 0x16, 0x03, 0x03, 0x00, 0x30, 0x02, 0x00, 0x00, + 0x2c, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xc0, 0x2f, 0x00, 0x00, + 0x04, 0x00, 0x23, 0x00, 0x00, 0x16, 0x03, 0x03, + 0x02, 0xbe, 0x0b, 0x00, 0x02, 0xba, 0x00, 0x02, + 0xb7, 0x00, 0x02, 0xb4, 0x30, 0x82, 0x02, 0xb0, + 0x30, 0x82, 0x02, 0x19, 0xa0, 0x03, 0x02, 0x01, + 0x02, 0x02, 0x09, 0x00, 0x85, 0xb0, 0xbb, 0xa4, + 0x8a, 0x7f, 0xb8, 0xca, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x05, 0x05, 0x00, 0x30, 0x45, 0x31, 0x0b, 0x30, + 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, + 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, + 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, + 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, + 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, + 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, + 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, + 0x74, 0x64, 0x30, 0x1e, 0x17, 0x0d, 0x31, 0x30, + 0x30, 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, + 0x33, 0x38, 0x5a, 0x17, 0x0d, 0x31, 0x31, 0x30, + 0x34, 0x32, 0x34, 0x30, 0x39, 0x30, 0x39, 0x33, + 0x38, 0x5a, 0x30, 0x45, 0x31, 0x0b, 0x30, 0x09, + 0x06, 0x03, 0x55, 0x04, 0x06, 0x13, 0x02, 0x41, + 0x55, 0x31, 0x13, 0x30, 0x11, 0x06, 0x03, 0x55, + 0x04, 0x08, 0x13, 0x0a, 0x53, 0x6f, 0x6d, 0x65, + 0x2d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x31, 0x21, + 0x30, 0x1f, 0x06, 0x03, 0x55, 0x04, 0x0a, 0x13, + 0x18, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, + 0x74, 0x20, 0x57, 0x69, 0x64, 0x67, 0x69, 0x74, + 0x73, 0x20, 0x50, 0x74, 0x79, 0x20, 0x4c, 0x74, + 0x64, 0x30, 0x81, 0x9f, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x81, 0x8d, 0x00, 0x30, + 0x81, 0x89, 0x02, 0x81, 0x81, 0x00, 0xbb, 0x79, + 0xd6, 0xf5, 0x17, 0xb5, 0xe5, 0xbf, 0x46, 0x10, + 0xd0, 0xdc, 0x69, 0xbe, 0xe6, 0x2b, 0x07, 0x43, + 0x5a, 0xd0, 0x03, 0x2d, 0x8a, 0x7a, 0x43, 0x85, + 0xb7, 0x14, 0x52, 0xe7, 0xa5, 0x65, 0x4c, 0x2c, + 0x78, 0xb8, 0x23, 0x8c, 0xb5, 0xb4, 0x82, 0xe5, + 0xde, 0x1f, 0x95, 0x3b, 0x7e, 0x62, 0xa5, 0x2c, + 0xa5, 0x33, 0xd6, 0xfe, 0x12, 0x5c, 0x7a, 0x56, + 0xfc, 0xf5, 0x06, 0xbf, 0xfa, 0x58, 0x7b, 0x26, + 0x3f, 0xb5, 0xcd, 0x04, 0xd3, 0xd0, 0xc9, 0x21, + 0x96, 0x4a, 0xc7, 0xf4, 0x54, 0x9f, 0x5a, 0xbf, + 0xef, 0x42, 0x71, 0x00, 0xfe, 0x18, 0x99, 0x07, + 0x7f, 0x7e, 0x88, 0x7d, 0x7d, 0xf1, 0x04, 0x39, + 0xc4, 0xa2, 0x2e, 0xdb, 0x51, 0xc9, 0x7c, 0xe3, + 0xc0, 0x4c, 0x3b, 0x32, 0x66, 0x01, 0xcf, 0xaf, + 0xb1, 0x1d, 0xb8, 0x71, 0x9a, 0x1d, 0xdb, 0xdb, + 0x89, 0x6b, 0xae, 0xda, 0x2d, 0x79, 0x02, 0x03, + 0x01, 0x00, 0x01, 0xa3, 0x81, 0xa7, 0x30, 0x81, + 0xa4, 0x30, 0x1d, 0x06, 0x03, 0x55, 0x1d, 0x0e, + 0x04, 0x16, 0x04, 0x14, 0xb1, 0xad, 0xe2, 0x85, + 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, 0x23, + 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, 0x39, + 0x30, 0x75, 0x06, 0x03, 0x55, 0x1d, 0x23, 0x04, + 0x6e, 0x30, 0x6c, 0x80, 0x14, 0xb1, 0xad, 0xe2, + 0x85, 0x5a, 0xcf, 0xcb, 0x28, 0xdb, 0x69, 0xce, + 0x23, 0x69, 0xde, 0xd3, 0x26, 0x8e, 0x18, 0x88, + 0x39, 0xa1, 0x49, 0xa4, 0x47, 0x30, 0x45, 0x31, + 0x0b, 0x30, 0x09, 0x06, 0x03, 0x55, 0x04, 0x06, + 0x13, 0x02, 0x41, 0x55, 0x31, 0x13, 0x30, 0x11, + 0x06, 0x03, 0x55, 0x04, 0x08, 0x13, 0x0a, 0x53, + 0x6f, 0x6d, 0x65, 0x2d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x31, 0x21, 0x30, 0x1f, 0x06, 0x03, 0x55, + 0x04, 0x0a, 0x13, 0x18, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x65, 0x74, 0x20, 0x57, 0x69, 0x64, + 0x67, 0x69, 0x74, 0x73, 0x20, 0x50, 0x74, 0x79, + 0x20, 0x4c, 0x74, 0x64, 0x82, 0x09, 0x00, 0x85, + 0xb0, 0xbb, 0xa4, 0x8a, 0x7f, 0xb8, 0xca, 0x30, + 0x0c, 0x06, 0x03, 0x55, 0x1d, 0x13, 0x04, 0x05, + 0x30, 0x03, 0x01, 0x01, 0xff, 0x30, 0x0d, 0x06, + 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, + 0x01, 0x05, 0x05, 0x00, 0x03, 0x81, 0x81, 0x00, + 0x08, 0x6c, 0x45, 0x24, 0xc7, 0x6b, 0xb1, 0x59, + 0xab, 0x0c, 0x52, 0xcc, 0xf2, 0xb0, 0x14, 0xd7, + 0x87, 0x9d, 0x7a, 0x64, 0x75, 0xb5, 0x5a, 0x95, + 0x66, 0xe4, 0xc5, 0x2b, 0x8e, 0xae, 0x12, 0x66, + 0x1f, 0xeb, 0x4f, 0x38, 0xb3, 0x6e, 0x60, 0xd3, + 0x92, 0xfd, 0xf7, 0x41, 0x08, 0xb5, 0x25, 0x13, + 0xb1, 0x18, 0x7a, 0x24, 0xfb, 0x30, 0x1d, 0xba, + 0xed, 0x98, 0xb9, 0x17, 0xec, 0xe7, 0xd7, 0x31, + 0x59, 0xdb, 0x95, 0xd3, 0x1d, 0x78, 0xea, 0x50, + 0x56, 0x5c, 0xd5, 0x82, 0x5a, 0x2d, 0x5a, 0x5f, + 0x33, 0xc4, 0xb6, 0xd8, 0xc9, 0x75, 0x90, 0x96, + 0x8c, 0x0f, 0x52, 0x98, 0xb5, 0xcd, 0x98, 0x1f, + 0x89, 0x20, 0x5f, 0xf2, 0xa0, 0x1c, 0xa3, 0x1b, + 0x96, 0x94, 0xdd, 0xa9, 0xfd, 0x57, 0xe9, 0x70, + 0xe8, 0x26, 0x6d, 0x71, 0x99, 0x9b, 0x26, 0x6e, + 0x38, 0x50, 0x29, 0x6c, 0x90, 0xa7, 0xbd, 0xd9, + 0x16, 0x03, 0x03, 0x01, 0x11, 0x0c, 0x00, 0x01, + 0x0d, 0x03, 0x00, 0x19, 0x85, 0x04, 0x01, 0x39, + 0xdc, 0xee, 0x44, 0x17, 0x5e, 0xdb, 0xd7, 0x27, + 0xaf, 0xb6, 0x56, 0xd9, 0xb4, 0x43, 0x5a, 0x99, + 0xcf, 0xaa, 0x31, 0x37, 0x0c, 0x6f, 0x3a, 0xa0, + 0xf8, 0x53, 0xc4, 0x74, 0xd1, 0x91, 0x0a, 0x46, + 0xf5, 0x38, 0x3b, 0x5c, 0x09, 0xd8, 0x97, 0xdc, + 0x4b, 0xaa, 0x70, 0x26, 0x48, 0xf2, 0xd6, 0x0b, + 0x31, 0xc9, 0xf8, 0xd4, 0x98, 0x43, 0xe1, 0x6c, + 0xd5, 0xc7, 0xb2, 0x8e, 0x0b, 0x01, 0xe6, 0xb6, + 0x00, 0x28, 0x80, 0x7b, 0xfc, 0x96, 0x8f, 0x0d, + 0xa2, 0x4f, 0xb0, 0x79, 0xaf, 0xdc, 0x61, 0x28, + 0x63, 0x33, 0x78, 0xf6, 0x31, 0x39, 0xfd, 0x8a, + 0xf4, 0x15, 0x18, 0x11, 0xfe, 0xdb, 0xd5, 0x07, + 0xda, 0x2c, 0xed, 0x49, 0xa0, 0x23, 0xbf, 0xd0, + 0x3a, 0x38, 0x1d, 0x54, 0xae, 0x1c, 0x7b, 0xea, + 0x29, 0xee, 0xd0, 0x38, 0xc1, 0x76, 0xa7, 0x7f, + 0x2a, 0xf4, 0xce, 0x1e, 0xac, 0xcc, 0x94, 0x79, + 0x90, 0x33, 0x04, 0x01, 0x00, 0x80, 0x0d, 0x8e, + 0x79, 0xe6, 0x86, 0xf6, 0xb6, 0xfb, 0x6b, 0x6a, + 0xcc, 0x55, 0xe4, 0x80, 0x4d, 0xc5, 0x0c, 0xc6, + 0xa3, 0x9f, 0x1d, 0x39, 0xd2, 0x98, 0x57, 0x31, + 0xa2, 0x90, 0x73, 0xe8, 0xd2, 0xcd, 0xb0, 0x93, + 0x1a, 0x60, 0x0f, 0x38, 0x02, 0x3b, 0x1b, 0x25, + 0x56, 0xec, 0x44, 0xab, 0xbe, 0x2e, 0x0c, 0xc0, + 0x6e, 0x54, 0x91, 0x50, 0xd6, 0xb1, 0xa2, 0x98, + 0x14, 0xa8, 0x35, 0x62, 0x9d, 0xca, 0xfb, 0x0f, + 0x64, 0x2b, 0x05, 0xa0, 0xa0, 0x57, 0xef, 0xcd, + 0x95, 0x45, 0x13, 0x5a, 0x9b, 0x3d, 0xdb, 0x42, + 0x54, 0x7f, 0xb9, 0x17, 0x08, 0x7f, 0xb2, 0xf0, + 0xb1, 0xc3, 0xdf, 0x67, 0x95, 0xe2, 0x73, 0xf2, + 0x76, 0xa3, 0x97, 0xfd, 0x9c, 0x92, 0x4a, 0xdb, + 0x95, 0x1e, 0x91, 0x95, 0xae, 0x3d, 0xae, 0x58, + 0xb5, 0x03, 0x6f, 0x5c, 0x3a, 0x19, 0xab, 0x92, + 0xa5, 0x09, 0x6b, 0x40, 0x61, 0xb0, 0x16, 0x03, + 0x03, 0x00, 0x04, 0x0e, 0x00, 0x00, 0x00, + }, + { + 0x16, 0x03, 0x03, 0x00, 0x8a, 0x10, 0x00, 0x00, + 0x86, 0x85, 0x04, 0x01, 0xba, 0xb8, 0xad, 0x69, + 0x20, 0x5e, 0xc1, 0x61, 0xc3, 0x0f, 0xb4, 0x30, + 0x64, 0x66, 0x70, 0x96, 0x33, 0x3c, 0x8e, 0x12, + 0x56, 0xbf, 0x6d, 0xb8, 0x6d, 0xc6, 0xba, 0xea, + 0xfc, 0x38, 0xc0, 0x8b, 0x87, 0xa8, 0xf3, 0x87, + 0xa1, 0xd5, 0xb6, 0xb0, 0x72, 0xc7, 0xd4, 0x19, + 0x56, 0xa0, 0x91, 0xe1, 0x45, 0xc7, 0xf1, 0x7d, + 0xb0, 0x1d, 0x78, 0x18, 0xf6, 0x3d, 0xbf, 0x1a, + 0x23, 0x93, 0x0b, 0x19, 0xb1, 0x00, 0x56, 0xc9, + 0x5e, 0x89, 0xd4, 0x9d, 0xd9, 0x5b, 0xe0, 0xb8, + 0xff, 0x2f, 0x7d, 0x93, 0xae, 0x5b, 0xa5, 0x1f, + 0x1f, 0x2b, 0x09, 0xe5, 0xf6, 0x07, 0x26, 0xa3, + 0xed, 0xcb, 0x6a, 0x1a, 0xd6, 0x14, 0x83, 0x9b, + 0xd3, 0x9d, 0x47, 0x1b, 0xf3, 0x72, 0x5f, 0x69, + 0x21, 0x8f, 0xfa, 0x09, 0x38, 0x1a, 0x6b, 0x91, + 0xcf, 0x19, 0x32, 0x54, 0x58, 0x8e, 0xee, 0xaf, + 0xeb, 0x06, 0x9b, 0x3a, 0x34, 0x16, 0x66, 0x14, + 0x03, 0x03, 0x00, 0x01, 0x01, 0x16, 0x03, 0x03, + 0x00, 0x28, 0xc6, 0x96, 0x67, 0x62, 0xcc, 0x47, + 0x01, 0xb5, 0xbd, 0xb7, 0x24, 0xd3, 0xb6, 0xfd, + 0xb8, 0x46, 0xce, 0x82, 0x6d, 0x31, 0x1f, 0x15, + 0x11, 0x8f, 0xed, 0x62, 0x71, 0x5f, 0xae, 0xb6, + 0xa9, 0x0c, 0x24, 0x1d, 0xe8, 0x26, 0x51, 0xca, + 0x7c, 0x42, + }, + { + 0x16, 0x03, 0x03, 0x00, 0x72, 0x04, 0x00, 0x00, + 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, + 0xea, 0x8b, 0xfb, 0xef, 0xba, 0xc8, 0x88, 0x94, + 0x44, 0x99, 0x5f, 0x02, 0x68, 0x3a, 0x12, 0x67, + 0x7f, 0xb9, 0x39, 0x71, 0x84, 0xe0, 0x30, 0xe6, + 0x90, 0x6c, 0xcf, 0x32, 0x29, 0x29, 0x5c, 0x5a, + 0x8b, 0x7d, 0xaa, 0x11, 0x28, 0x26, 0xb5, 0xce, + 0xd2, 0x88, 0xd5, 0xb0, 0x5f, 0x94, 0x37, 0xa2, + 0x48, 0xd9, 0x53, 0xb2, 0xab, 0x59, 0x23, 0x3d, + 0x81, 0x6e, 0x64, 0x89, 0xca, 0x1a, 0x84, 0x16, + 0xdf, 0x31, 0x10, 0xde, 0x52, 0x7f, 0x50, 0xf3, + 0xd9, 0x27, 0xa0, 0xe8, 0x34, 0x15, 0x9e, 0x11, + 0xdd, 0xba, 0xce, 0x40, 0x17, 0xf3, 0x67, 0x14, + 0x03, 0x03, 0x00, 0x01, 0x01, 0x16, 0x03, 0x03, + 0x00, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x35, 0xcb, 0x17, 0x66, 0xee, 0xfd, + 0x27, 0xdb, 0xb8, 0xa8, 0x8a, 0xf1, 0x56, 0x67, + 0x89, 0x0d, 0x13, 0xac, 0xe2, 0x31, 0xb9, 0xa2, + 0x26, 0xbb, 0x1c, 0xcf, 0xd1, 0xb2, 0x48, 0x1d, + 0x0d, 0xb1, 0x17, 0x03, 0x03, 0x00, 0x25, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xc0, + 0x89, 0x7c, 0x58, 0x6a, 0x9b, 0x00, 0x05, 0x8c, + 0x7f, 0x28, 0x54, 0x61, 0x44, 0x10, 0xee, 0x85, + 0x26, 0xa8, 0x04, 0xcd, 0xca, 0x85, 0x60, 0xf2, + 0xeb, 0x22, 0xbd, 0x9e, 0x15, 0x03, 0x03, 0x00, + 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x02, 0x10, 0xe4, 0xe5, 0xf9, 0x85, 0xe3, 0xb0, + 0xec, 0x84, 0x29, 0x91, 0x05, 0x7d, 0x86, 0xe3, + 0x97, 0xeb, 0xb2, + }, +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/key_agreement.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/key_agreement.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/key_agreement.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,400 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/md5" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "errors" + "io" + "math/big" +) + +// rsaKeyAgreement implements the standard TLS key agreement where the client +// encrypts the pre-master secret to the server's public key. +type rsaKeyAgreement struct{} + +func (ka rsaKeyAgreement) generateServerKeyExchange(config *Config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) { + return nil, nil +} + +func (ka rsaKeyAgreement) processClientKeyExchange(config *Config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) { + preMasterSecret := make([]byte, 48) + _, err := io.ReadFull(config.rand(), preMasterSecret[2:]) + if err != nil { + return nil, err + } + + if len(ckx.ciphertext) < 2 { + return nil, errors.New("bad ClientKeyExchange") + } + + ciphertext := ckx.ciphertext + if version != VersionSSL30 { + ciphertextLen := int(ckx.ciphertext[0])<<8 | int(ckx.ciphertext[1]) + if ciphertextLen != len(ckx.ciphertext)-2 { + return nil, errors.New("bad ClientKeyExchange") + } + ciphertext = ckx.ciphertext[2:] + } + + err = rsa.DecryptPKCS1v15SessionKey(config.rand(), cert.PrivateKey.(*rsa.PrivateKey), ciphertext, preMasterSecret) + if err != nil { + return nil, err + } + // We don't check the version number in the premaster secret. For one, + // by checking it, we would leak information about the validity of the + // encrypted pre-master secret. Secondly, it provides only a small + // benefit against a downgrade attack and some implementations send the + // wrong version anyway. See the discussion at the end of section + // 7.4.7.1 of RFC 4346. + return preMasterSecret, nil +} + +func (ka rsaKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error { + return errors.New("unexpected ServerKeyExchange") +} + +func (ka rsaKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) { + preMasterSecret := make([]byte, 48) + preMasterSecret[0] = byte(clientHello.vers >> 8) + preMasterSecret[1] = byte(clientHello.vers) + _, err := io.ReadFull(config.rand(), preMasterSecret[2:]) + if err != nil { + return nil, nil, err + } + + encrypted, err := rsa.EncryptPKCS1v15(config.rand(), cert.PublicKey.(*rsa.PublicKey), preMasterSecret) + if err != nil { + return nil, nil, err + } + ckx := new(clientKeyExchangeMsg) + ckx.ciphertext = make([]byte, len(encrypted)+2) + ckx.ciphertext[0] = byte(len(encrypted) >> 8) + ckx.ciphertext[1] = byte(len(encrypted)) + copy(ckx.ciphertext[2:], encrypted) + return preMasterSecret, ckx, nil +} + +// sha1Hash calculates a SHA1 hash over the given byte slices. +func sha1Hash(slices [][]byte) []byte { + hsha1 := sha1.New() + for _, slice := range slices { + hsha1.Write(slice) + } + return hsha1.Sum(nil) +} + +// md5SHA1Hash implements TLS 1.0's hybrid hash function which consists of the +// concatenation of an MD5 and SHA1 hash. +func md5SHA1Hash(slices [][]byte) []byte { + md5sha1 := make([]byte, md5.Size+sha1.Size) + hmd5 := md5.New() + for _, slice := range slices { + hmd5.Write(slice) + } + copy(md5sha1, hmd5.Sum(nil)) + copy(md5sha1[md5.Size:], sha1Hash(slices)) + return md5sha1 +} + +// sha256Hash implements TLS 1.2's hash function. +func sha256Hash(slices [][]byte) []byte { + h := sha256.New() + for _, slice := range slices { + h.Write(slice) + } + return h.Sum(nil) +} + +// hashForServerKeyExchange hashes the given slices and returns their digest +// and the identifier of the hash function used. The hashFunc argument is only +// used for >= TLS 1.2 and precisely identifies the hash function to use. +func hashForServerKeyExchange(sigType, hashFunc uint8, version uint16, slices ...[]byte) ([]byte, crypto.Hash, error) { + if version >= VersionTLS12 { + switch hashFunc { + case hashSHA256: + return sha256Hash(slices), crypto.SHA256, nil + case hashSHA1: + return sha1Hash(slices), crypto.SHA1, nil + default: + return nil, crypto.Hash(0), errors.New("tls: unknown hash function used by peer") + } + } + if sigType == signatureECDSA { + return sha1Hash(slices), crypto.SHA1, nil + } + return md5SHA1Hash(slices), crypto.MD5SHA1, nil +} + +// pickTLS12HashForSignature returns a TLS 1.2 hash identifier for signing a +// ServerKeyExchange given the signature type being used and the client's +// advertized list of supported signature and hash combinations. +func pickTLS12HashForSignature(sigType uint8, clientSignatureAndHashes []signatureAndHash) (uint8, error) { + if len(clientSignatureAndHashes) == 0 { + // If the client didn't specify any signature_algorithms + // extension then we can assume that it supports SHA1. See + // http://tools.ietf.org/html/rfc5246#section-7.4.1.4.1 + return hashSHA1, nil + } + + for _, sigAndHash := range clientSignatureAndHashes { + if sigAndHash.signature != sigType { + continue + } + switch sigAndHash.hash { + case hashSHA1, hashSHA256: + return sigAndHash.hash, nil + } + } + + return 0, errors.New("tls: client doesn't support any common hash functions") +} + +// ecdheRSAKeyAgreement implements a TLS key agreement where the server +// generates a ephemeral EC public/private key pair and signs it. The +// pre-master secret is then calculated using ECDH. The signature may +// either be ECDSA or RSA. +type ecdheKeyAgreement struct { + version uint16 + sigType uint8 + privateKey []byte + curve elliptic.Curve + x, y *big.Int +} + +func (ka *ecdheKeyAgreement) generateServerKeyExchange(config *Config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) { + var curveid uint16 + +Curve: + for _, c := range clientHello.supportedCurves { + switch c { + case curveP256: + ka.curve = elliptic.P256() + curveid = c + break Curve + case curveP384: + ka.curve = elliptic.P384() + curveid = c + break Curve + case curveP521: + ka.curve = elliptic.P521() + curveid = c + break Curve + } + } + + if curveid == 0 { + return nil, errors.New("tls: no supported elliptic curves offered") + } + + var x, y *big.Int + var err error + ka.privateKey, x, y, err = elliptic.GenerateKey(ka.curve, config.rand()) + if err != nil { + return nil, err + } + ecdhePublic := elliptic.Marshal(ka.curve, x, y) + + // http://tools.ietf.org/html/rfc4492#section-5.4 + serverECDHParams := make([]byte, 1+2+1+len(ecdhePublic)) + serverECDHParams[0] = 3 // named curve + serverECDHParams[1] = byte(curveid >> 8) + serverECDHParams[2] = byte(curveid) + serverECDHParams[3] = byte(len(ecdhePublic)) + copy(serverECDHParams[4:], ecdhePublic) + + var tls12HashId uint8 + if ka.version >= VersionTLS12 { + if tls12HashId, err = pickTLS12HashForSignature(ka.sigType, clientHello.signatureAndHashes); err != nil { + return nil, err + } + } + + digest, hashFunc, err := hashForServerKeyExchange(ka.sigType, tls12HashId, ka.version, clientHello.random, hello.random, serverECDHParams) + if err != nil { + return nil, err + } + var sig []byte + switch ka.sigType { + case signatureECDSA: + privKey, ok := cert.PrivateKey.(*ecdsa.PrivateKey) + if !ok { + return nil, errors.New("ECDHE ECDSA requires an ECDSA server private key") + } + r, s, err := ecdsa.Sign(config.rand(), privKey, digest) + if err != nil { + return nil, errors.New("failed to sign ECDHE parameters: " + err.Error()) + } + sig, err = asn1.Marshal(ecdsaSignature{r, s}) + case signatureRSA: + privKey, ok := cert.PrivateKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("ECDHE RSA requires a RSA server private key") + } + sig, err = rsa.SignPKCS1v15(config.rand(), privKey, hashFunc, digest) + if err != nil { + return nil, errors.New("failed to sign ECDHE parameters: " + err.Error()) + } + default: + return nil, errors.New("unknown ECDHE signature algorithm") + } + + skx := new(serverKeyExchangeMsg) + sigAndHashLen := 0 + if ka.version >= VersionTLS12 { + sigAndHashLen = 2 + } + skx.key = make([]byte, len(serverECDHParams)+sigAndHashLen+2+len(sig)) + copy(skx.key, serverECDHParams) + k := skx.key[len(serverECDHParams):] + if ka.version >= VersionTLS12 { + k[0] = tls12HashId + k[1] = ka.sigType + k = k[2:] + } + k[0] = byte(len(sig) >> 8) + k[1] = byte(len(sig)) + copy(k[2:], sig) + + return skx, nil +} + +func (ka *ecdheKeyAgreement) processClientKeyExchange(config *Config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) { + if len(ckx.ciphertext) == 0 || int(ckx.ciphertext[0]) != len(ckx.ciphertext)-1 { + return nil, errors.New("bad ClientKeyExchange") + } + x, y := elliptic.Unmarshal(ka.curve, ckx.ciphertext[1:]) + if x == nil { + return nil, errors.New("bad ClientKeyExchange") + } + x, _ = ka.curve.ScalarMult(x, y, ka.privateKey) + preMasterSecret := make([]byte, (ka.curve.Params().BitSize+7)>>3) + xBytes := x.Bytes() + copy(preMasterSecret[len(preMasterSecret)-len(xBytes):], xBytes) + + return preMasterSecret, nil +} + +var errServerKeyExchange = errors.New("invalid ServerKeyExchange") + +func (ka *ecdheKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error { + if len(skx.key) < 4 { + return errServerKeyExchange + } + if skx.key[0] != 3 { // named curve + return errors.New("server selected unsupported curve") + } + curveid := uint16(skx.key[1])<<8 | uint16(skx.key[2]) + + switch curveid { + case curveP256: + ka.curve = elliptic.P256() + case curveP384: + ka.curve = elliptic.P384() + case curveP521: + ka.curve = elliptic.P521() + default: + return errors.New("server selected unsupported curve") + } + + publicLen := int(skx.key[3]) + if publicLen+4 > len(skx.key) { + return errServerKeyExchange + } + ka.x, ka.y = elliptic.Unmarshal(ka.curve, skx.key[4:4+publicLen]) + if ka.x == nil { + return errServerKeyExchange + } + serverECDHParams := skx.key[:4+publicLen] + + sig := skx.key[4+publicLen:] + if len(sig) < 2 { + return errServerKeyExchange + } + + var tls12HashId uint8 + if ka.version >= VersionTLS12 { + // handle SignatureAndHashAlgorithm + var sigAndHash []uint8 + sigAndHash, sig = sig[:2], sig[2:] + if sigAndHash[1] != ka.sigType { + return errServerKeyExchange + } + tls12HashId = sigAndHash[0] + if len(sig) < 2 { + return errServerKeyExchange + } + } + sigLen := int(sig[0])<<8 | int(sig[1]) + if sigLen+2 != len(sig) { + return errServerKeyExchange + } + sig = sig[2:] + + digest, hashFunc, err := hashForServerKeyExchange(ka.sigType, tls12HashId, ka.version, clientHello.random, serverHello.random, serverECDHParams) + if err != nil { + return err + } + switch ka.sigType { + case signatureECDSA: + pubKey, ok := cert.PublicKey.(*ecdsa.PublicKey) + if !ok { + return errors.New("ECDHE ECDSA requires a ECDSA server public key") + } + ecdsaSig := new(ecdsaSignature) + if _, err := asn1.Unmarshal(sig, ecdsaSig); err != nil { + return err + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + return errors.New("ECDSA signature contained zero or negative values") + } + if !ecdsa.Verify(pubKey, digest, ecdsaSig.R, ecdsaSig.S) { + return errors.New("ECDSA verification failure") + } + case signatureRSA: + pubKey, ok := cert.PublicKey.(*rsa.PublicKey) + if !ok { + return errors.New("ECDHE RSA requires a RSA server public key") + } + if err := rsa.VerifyPKCS1v15(pubKey, hashFunc, digest, sig); err != nil { + return err + } + default: + return errors.New("unknown ECDHE signature algorithm") + } + + return nil +} + +func (ka *ecdheKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) { + if ka.curve == nil { + return nil, nil, errors.New("missing ServerKeyExchange message") + } + priv, mx, my, err := elliptic.GenerateKey(ka.curve, config.rand()) + if err != nil { + return nil, nil, err + } + x, _ := ka.curve.ScalarMult(ka.x, ka.y, priv) + preMasterSecret := make([]byte, (ka.curve.Params().BitSize+7)>>3) + xBytes := x.Bytes() + copy(preMasterSecret[len(preMasterSecret)-len(xBytes):], xBytes) + + serialized := elliptic.Marshal(ka.curve, mx, my) + + ckx := new(clientKeyExchangeMsg) + ckx.ciphertext = make([]byte, 1+len(serialized)) + ckx.ciphertext[0] = byte(len(serialized)) + copy(ckx.ciphertext[1:], serialized) + + return preMasterSecret, ckx, nil +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/prf.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/prf.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/prf.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,291 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "crypto" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "hash" +) + +// Split a premaster secret in two as specified in RFC 4346, section 5. +func splitPreMasterSecret(secret []byte) (s1, s2 []byte) { + s1 = secret[0 : (len(secret)+1)/2] + s2 = secret[len(secret)/2:] + return +} + +// pHash implements the P_hash function, as defined in RFC 4346, section 5. +func pHash(result, secret, seed []byte, hash func() hash.Hash) { + h := hmac.New(hash, secret) + h.Write(seed) + a := h.Sum(nil) + + j := 0 + for j < len(result) { + h.Reset() + h.Write(a) + h.Write(seed) + b := h.Sum(nil) + todo := len(b) + if j+todo > len(result) { + todo = len(result) - j + } + copy(result[j:j+todo], b) + j += todo + + h.Reset() + h.Write(a) + a = h.Sum(nil) + } +} + +// prf10 implements the TLS 1.0 pseudo-random function, as defined in RFC 2246, section 5. +func prf10(result, secret, label, seed []byte) { + hashSHA1 := sha1.New + hashMD5 := md5.New + + labelAndSeed := make([]byte, len(label)+len(seed)) + copy(labelAndSeed, label) + copy(labelAndSeed[len(label):], seed) + + s1, s2 := splitPreMasterSecret(secret) + pHash(result, s1, labelAndSeed, hashMD5) + result2 := make([]byte, len(result)) + pHash(result2, s2, labelAndSeed, hashSHA1) + + for i, b := range result2 { + result[i] ^= b + } +} + +// prf12 implements the TLS 1.2 pseudo-random function, as defined in RFC 5246, section 5. +func prf12(result, secret, label, seed []byte) { + labelAndSeed := make([]byte, len(label)+len(seed)) + copy(labelAndSeed, label) + copy(labelAndSeed[len(label):], seed) + + pHash(result, secret, labelAndSeed, sha256.New) +} + +// prf30 implements the SSL 3.0 pseudo-random function, as defined in +// www.mozilla.org/projects/security/pki/nss/ssl/draft302.txt section 6. +func prf30(result, secret, label, seed []byte) { + hashSHA1 := sha1.New() + hashMD5 := md5.New() + + done := 0 + i := 0 + // RFC5246 section 6.3 says that the largest PRF output needed is 128 + // bytes. Since no more ciphersuites will be added to SSLv3, this will + // remain true. Each iteration gives us 16 bytes so 10 iterations will + // be sufficient. + var b [11]byte + for done < len(result) { + for j := 0; j <= i; j++ { + b[j] = 'A' + byte(i) + } + + hashSHA1.Reset() + hashSHA1.Write(b[:i+1]) + hashSHA1.Write(secret) + hashSHA1.Write(seed) + digest := hashSHA1.Sum(nil) + + hashMD5.Reset() + hashMD5.Write(secret) + hashMD5.Write(digest) + + done += copy(result[done:], hashMD5.Sum(nil)) + i++ + } +} + +const ( + tlsRandomLength = 32 // Length of a random nonce in TLS 1.1. + masterSecretLength = 48 // Length of a master secret in TLS 1.1. + finishedVerifyLength = 12 // Length of verify_data in a Finished message. +) + +var masterSecretLabel = []byte("master secret") +var keyExpansionLabel = []byte("key expansion") +var clientFinishedLabel = []byte("client finished") +var serverFinishedLabel = []byte("server finished") + +func prfForVersion(version uint16) func(result, secret, label, seed []byte) { + switch version { + case VersionSSL30: + return prf30 + case VersionTLS10, VersionTLS11: + return prf10 + case VersionTLS12: + return prf12 + default: + panic("unknown version") + } +} + +// masterFromPreMasterSecret generates the master secret from the pre-master +// secret. See http://tools.ietf.org/html/rfc5246#section-8.1 +func masterFromPreMasterSecret(version uint16, preMasterSecret, clientRandom, serverRandom []byte) []byte { + var seed [tlsRandomLength * 2]byte + copy(seed[0:len(clientRandom)], clientRandom) + copy(seed[len(clientRandom):], serverRandom) + masterSecret := make([]byte, masterSecretLength) + prfForVersion(version)(masterSecret, preMasterSecret, masterSecretLabel, seed[0:]) + return masterSecret +} + +// keysFromMasterSecret generates the connection keys from the master +// secret, given the lengths of the MAC key, cipher key and IV, as defined in +// RFC 2246, section 6.3. +func keysFromMasterSecret(version uint16, masterSecret, clientRandom, serverRandom []byte, macLen, keyLen, ivLen int) (clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV []byte) { + var seed [tlsRandomLength * 2]byte + copy(seed[0:len(clientRandom)], serverRandom) + copy(seed[len(serverRandom):], clientRandom) + + n := 2*macLen + 2*keyLen + 2*ivLen + keyMaterial := make([]byte, n) + prfForVersion(version)(keyMaterial, masterSecret, keyExpansionLabel, seed[0:]) + clientMAC = keyMaterial[:macLen] + keyMaterial = keyMaterial[macLen:] + serverMAC = keyMaterial[:macLen] + keyMaterial = keyMaterial[macLen:] + clientKey = keyMaterial[:keyLen] + keyMaterial = keyMaterial[keyLen:] + serverKey = keyMaterial[:keyLen] + keyMaterial = keyMaterial[keyLen:] + clientIV = keyMaterial[:ivLen] + keyMaterial = keyMaterial[ivLen:] + serverIV = keyMaterial[:ivLen] + return +} + +func newFinishedHash(version uint16) finishedHash { + if version >= VersionTLS12 { + return finishedHash{sha256.New(), sha256.New(), nil, nil, version} + } + return finishedHash{sha1.New(), sha1.New(), md5.New(), md5.New(), version} +} + +// A finishedHash calculates the hash of a set of handshake messages suitable +// for including in a Finished message. +type finishedHash struct { + client hash.Hash + server hash.Hash + + // Prior to TLS 1.2, an additional MD5 hash is required. + clientMD5 hash.Hash + serverMD5 hash.Hash + + version uint16 +} + +func (h finishedHash) Write(msg []byte) (n int, err error) { + h.client.Write(msg) + h.server.Write(msg) + + if h.version < VersionTLS12 { + h.clientMD5.Write(msg) + h.serverMD5.Write(msg) + } + return len(msg), nil +} + +// finishedSum30 calculates the contents of the verify_data member of a SSLv3 +// Finished message given the MD5 and SHA1 hashes of a set of handshake +// messages. +func finishedSum30(md5, sha1 hash.Hash, masterSecret []byte, magic [4]byte) []byte { + md5.Write(magic[:]) + md5.Write(masterSecret) + md5.Write(ssl30Pad1[:]) + md5Digest := md5.Sum(nil) + + md5.Reset() + md5.Write(masterSecret) + md5.Write(ssl30Pad2[:]) + md5.Write(md5Digest) + md5Digest = md5.Sum(nil) + + sha1.Write(magic[:]) + sha1.Write(masterSecret) + sha1.Write(ssl30Pad1[:40]) + sha1Digest := sha1.Sum(nil) + + sha1.Reset() + sha1.Write(masterSecret) + sha1.Write(ssl30Pad2[:40]) + sha1.Write(sha1Digest) + sha1Digest = sha1.Sum(nil) + + ret := make([]byte, len(md5Digest)+len(sha1Digest)) + copy(ret, md5Digest) + copy(ret[len(md5Digest):], sha1Digest) + return ret +} + +var ssl3ClientFinishedMagic = [4]byte{0x43, 0x4c, 0x4e, 0x54} +var ssl3ServerFinishedMagic = [4]byte{0x53, 0x52, 0x56, 0x52} + +// clientSum returns the contents of the verify_data member of a client's +// Finished message. +func (h finishedHash) clientSum(masterSecret []byte) []byte { + if h.version == VersionSSL30 { + return finishedSum30(h.clientMD5, h.client, masterSecret, ssl3ClientFinishedMagic) + } + + out := make([]byte, finishedVerifyLength) + if h.version >= VersionTLS12 { + seed := h.client.Sum(nil) + prf12(out, masterSecret, clientFinishedLabel, seed) + } else { + seed := make([]byte, 0, md5.Size+sha1.Size) + seed = h.clientMD5.Sum(seed) + seed = h.client.Sum(seed) + prf10(out, masterSecret, clientFinishedLabel, seed) + } + return out +} + +// serverSum returns the contents of the verify_data member of a server's +// Finished message. +func (h finishedHash) serverSum(masterSecret []byte) []byte { + if h.version == VersionSSL30 { + return finishedSum30(h.serverMD5, h.server, masterSecret, ssl3ServerFinishedMagic) + } + + out := make([]byte, finishedVerifyLength) + if h.version >= VersionTLS12 { + seed := h.server.Sum(nil) + prf12(out, masterSecret, serverFinishedLabel, seed) + } else { + seed := make([]byte, 0, md5.Size+sha1.Size) + seed = h.serverMD5.Sum(seed) + seed = h.server.Sum(seed) + prf10(out, masterSecret, serverFinishedLabel, seed) + } + return out +} + +// hashForClientCertificate returns a digest, hash function, and TLS 1.2 hash +// id suitable for signing by a TLS client certificate. +func (h finishedHash) hashForClientCertificate(sigType uint8) ([]byte, crypto.Hash, uint8) { + if h.version >= VersionTLS12 { + digest := h.server.Sum(nil) + return digest, crypto.SHA256, hashSHA256 + } + if sigType == signatureECDSA { + digest := h.server.Sum(nil) + return digest, crypto.SHA1, hashSHA1 + } + + digest := make([]byte, 0, 36) + digest = h.serverMD5.Sum(digest) + digest = h.server.Sum(digest) + return digest, crypto.MD5SHA1, 0 /* not specified in TLS 1.2. */ +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/prf_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/prf_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/prf_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,126 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "encoding/hex" + "testing" +) + +type testSplitPreMasterSecretTest struct { + in, out1, out2 string +} + +var testSplitPreMasterSecretTests = []testSplitPreMasterSecretTest{ + {"", "", ""}, + {"00", "00", "00"}, + {"0011", "00", "11"}, + {"001122", "0011", "1122"}, + {"00112233", "0011", "2233"}, +} + +func TestSplitPreMasterSecret(t *testing.T) { + for i, test := range testSplitPreMasterSecretTests { + in, _ := hex.DecodeString(test.in) + out1, out2 := splitPreMasterSecret(in) + s1 := hex.EncodeToString(out1) + s2 := hex.EncodeToString(out2) + if s1 != test.out1 || s2 != test.out2 { + t.Errorf("#%d: got: (%s, %s) want: (%s, %s)", i, s1, s2, test.out1, test.out2) + } + } +} + +type testKeysFromTest struct { + version uint16 + preMasterSecret string + clientRandom, serverRandom string + masterSecret string + clientMAC, serverMAC string + clientKey, serverKey string + macLen, keyLen int +} + +func TestKeysFromPreMasterSecret(t *testing.T) { + for i, test := range testKeysFromTests { + in, _ := hex.DecodeString(test.preMasterSecret) + clientRandom, _ := hex.DecodeString(test.clientRandom) + serverRandom, _ := hex.DecodeString(test.serverRandom) + + masterSecret := masterFromPreMasterSecret(test.version, in, clientRandom, serverRandom) + if s := hex.EncodeToString(masterSecret); s != test.masterSecret { + t.Errorf("#%d: bad master secret %s, want %s", i, s, test.masterSecret) + continue + } + + clientMAC, serverMAC, clientKey, serverKey, _, _ := keysFromMasterSecret(test.version, masterSecret, clientRandom, serverRandom, test.macLen, test.keyLen, 0) + clientMACString := hex.EncodeToString(clientMAC) + serverMACString := hex.EncodeToString(serverMAC) + clientKeyString := hex.EncodeToString(clientKey) + serverKeyString := hex.EncodeToString(serverKey) + if clientMACString != test.clientMAC || + serverMACString != test.serverMAC || + clientKeyString != test.clientKey || + serverKeyString != test.serverKey { + t.Errorf("#%d: got: (%s, %s, %s, %s) want: (%s, %s, %s, %s)", i, clientMACString, serverMACString, clientKeyString, serverKeyString, test.clientMAC, test.serverMAC, test.clientKey, test.serverKey) + } + } +} + +// These test vectors were generated from GnuTLS using `gnutls-cli --insecure -d 9 ` +var testKeysFromTests = []testKeysFromTest{ + { + VersionTLS10, + "0302cac83ad4b1db3b9ab49ad05957de2a504a634a386fc600889321e1a971f57479466830ac3e6f468e87f5385fa0c5", + "4ae66303755184a3917fcb44880605fcc53baa01912b22ed94473fc69cebd558", + "4ae663020ec16e6bb5130be918cfcafd4d765979a3136a5d50c593446e4e44db", + "3d851bab6e5556e959a16bc36d66cfae32f672bfa9ecdef6096cbb1b23472df1da63dbbd9827606413221d149ed08ceb", + "805aaa19b3d2c0a0759a4b6c9959890e08480119", + "2d22f9fe519c075c16448305ceee209fc24ad109", + "d50b5771244f850cd8117a9ccafe2cf1", + "e076e33206b30507a85c32855acd0919", + 20, + 16, + }, + { + VersionTLS10, + "03023f7527316bc12cbcd69e4b9e8275d62c028f27e65c745cfcddc7ce01bd3570a111378b63848127f1c36e5f9e4890", + "4ae66364b5ea56b20ce4e25555aed2d7e67f42788dd03f3fee4adae0459ab106", + "4ae66363ab815cbf6a248b87d6b556184e945e9b97fbdf247858b0bdafacfa1c", + "7d64be7c80c59b740200b4b9c26d0baaa1c5ae56705acbcf2307fe62beb4728c19392c83f20483801cce022c77645460", + "97742ed60a0554ca13f04f97ee193177b971e3b0", + "37068751700400e03a8477a5c7eec0813ab9e0dc", + "207cddbc600d2a200abac6502053ee5c", + "df3f94f6e1eacc753b815fe16055cd43", + 20, + 16, + }, + { + VersionTLS10, + "832d515f1d61eebb2be56ba0ef79879efb9b527504abb386fb4310ed5d0e3b1f220d3bb6b455033a2773e6d8bdf951d278a187482b400d45deb88a5d5a6bb7d6a7a1decc04eb9ef0642876cd4a82d374d3b6ff35f0351dc5d411104de431375355addc39bfb1f6329fb163b0bc298d658338930d07d313cd980a7e3d9196cac1", + "4ae663b2ee389c0de147c509d8f18f5052afc4aaf9699efe8cb05ece883d3a5e", + "4ae664d503fd4cff50cfc1fb8fc606580f87b0fcdac9554ba0e01d785bdf278e", + "1aff2e7a2c4279d0126f57a65a77a8d9d0087cf2733366699bec27eb53d5740705a8574bb1acc2abbe90e44f0dd28d6c", + "3c7647c93c1379a31a609542aa44e7f117a70085", + "0d73102994be74a575a3ead8532590ca32a526d4", + "ac7581b0b6c10d85bbd905ffbf36c65e", + "ff07edde49682b45466bd2e39464b306", + 20, + 16, + }, + { + VersionSSL30, + "832d515f1d61eebb2be56ba0ef79879efb9b527504abb386fb4310ed5d0e3b1f220d3bb6b455033a2773e6d8bdf951d278a187482b400d45deb88a5d5a6bb7d6a7a1decc04eb9ef0642876cd4a82d374d3b6ff35f0351dc5d411104de431375355addc39bfb1f6329fb163b0bc298d658338930d07d313cd980a7e3d9196cac1", + "4ae663b2ee389c0de147c509d8f18f5052afc4aaf9699efe8cb05ece883d3a5e", + "4ae664d503fd4cff50cfc1fb8fc606580f87b0fcdac9554ba0e01d785bdf278e", + "a614863e56299dcffeea2938f22c2ba023768dbe4b3f6877bc9c346c6ae529b51d9cb87ff9695ea4d01f2205584405b2", + "2c450d5b6f6e2013ac6bea6a0b32200d4e1ffb94", + "7a7a7438769536f2fb1ae49a61f0703b79b2dc53", + "f8f6b26c10f12855c9aafb1e0e839ccf", + "2b9d4b4a60cb7f396780ebff50650419", + 20, + 16, + }, +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/ticket.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/ticket.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/ticket.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,182 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/subtle" + "errors" + "io" +) + +// sessionState contains the information that is serialized into a session +// ticket in order to later resume a connection. +type sessionState struct { + vers uint16 + cipherSuite uint16 + masterSecret []byte + certificates [][]byte +} + +func (s *sessionState) equal(i interface{}) bool { + s1, ok := i.(*sessionState) + if !ok { + return false + } + + if s.vers != s1.vers || + s.cipherSuite != s1.cipherSuite || + !bytes.Equal(s.masterSecret, s1.masterSecret) { + return false + } + + if len(s.certificates) != len(s1.certificates) { + return false + } + + for i := range s.certificates { + if !bytes.Equal(s.certificates[i], s1.certificates[i]) { + return false + } + } + + return true +} + +func (s *sessionState) marshal() []byte { + length := 2 + 2 + 2 + len(s.masterSecret) + 2 + for _, cert := range s.certificates { + length += 4 + len(cert) + } + + ret := make([]byte, length) + x := ret + x[0] = byte(s.vers >> 8) + x[1] = byte(s.vers) + x[2] = byte(s.cipherSuite >> 8) + x[3] = byte(s.cipherSuite) + x[4] = byte(len(s.masterSecret) >> 8) + x[5] = byte(len(s.masterSecret)) + x = x[6:] + copy(x, s.masterSecret) + x = x[len(s.masterSecret):] + + x[0] = byte(len(s.certificates) >> 8) + x[1] = byte(len(s.certificates)) + x = x[2:] + + for _, cert := range s.certificates { + x[0] = byte(len(cert) >> 24) + x[1] = byte(len(cert) >> 16) + x[2] = byte(len(cert) >> 8) + x[3] = byte(len(cert)) + copy(x[4:], cert) + x = x[4+len(cert):] + } + + return ret +} + +func (s *sessionState) unmarshal(data []byte) bool { + if len(data) < 8 { + return false + } + + s.vers = uint16(data[0])<<8 | uint16(data[1]) + s.cipherSuite = uint16(data[2])<<8 | uint16(data[3]) + masterSecretLen := int(data[4])<<8 | int(data[5]) + data = data[6:] + if len(data) < masterSecretLen { + return false + } + + s.masterSecret = data[:masterSecretLen] + data = data[masterSecretLen:] + + if len(data) < 2 { + return false + } + + numCerts := int(data[0])<<8 | int(data[1]) + data = data[2:] + + s.certificates = make([][]byte, numCerts) + for i := range s.certificates { + if len(data) < 4 { + return false + } + certLen := int(data[0])<<24 | int(data[1])<<16 | int(data[2])<<8 | int(data[3]) + data = data[4:] + if certLen < 0 { + return false + } + if len(data) < certLen { + return false + } + s.certificates[i] = data[:certLen] + data = data[certLen:] + } + + if len(data) > 0 { + return false + } + + return true +} + +func (c *Conn) encryptTicket(state *sessionState) ([]byte, error) { + serialized := state.marshal() + encrypted := make([]byte, aes.BlockSize+len(serialized)+sha256.Size) + iv := encrypted[:aes.BlockSize] + macBytes := encrypted[len(encrypted)-sha256.Size:] + + if _, err := io.ReadFull(c.config.rand(), iv); err != nil { + return nil, err + } + block, err := aes.NewCipher(c.config.SessionTicketKey[:16]) + if err != nil { + return nil, errors.New("tls: failed to create cipher while encrypting ticket: " + err.Error()) + } + cipher.NewCTR(block, iv).XORKeyStream(encrypted[aes.BlockSize:], serialized) + + mac := hmac.New(sha256.New, c.config.SessionTicketKey[16:32]) + mac.Write(encrypted[:len(encrypted)-sha256.Size]) + mac.Sum(macBytes[:0]) + + return encrypted, nil +} + +func (c *Conn) decryptTicket(encrypted []byte) (*sessionState, bool) { + if len(encrypted) < aes.BlockSize+sha256.Size { + return nil, false + } + + iv := encrypted[:aes.BlockSize] + macBytes := encrypted[len(encrypted)-sha256.Size:] + + mac := hmac.New(sha256.New, c.config.SessionTicketKey[16:32]) + mac.Write(encrypted[:len(encrypted)-sha256.Size]) + expected := mac.Sum(nil) + + if subtle.ConstantTimeCompare(macBytes, expected) != 1 { + return nil, false + } + + block, err := aes.NewCipher(c.config.SessionTicketKey[:16]) + if err != nil { + return nil, false + } + ciphertext := encrypted[aes.BlockSize : len(encrypted)-sha256.Size] + plaintext := ciphertext + cipher.NewCTR(block, iv).XORKeyStream(plaintext, ciphertext) + + state := new(sessionState) + ok := state.unmarshal(plaintext) + return state, ok +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/tls.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/tls.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/tls.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,225 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tls partially implements TLS 1.2, as specified in RFC 5246. +package tls + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "io/ioutil" + "net" + "strings" +) + +// Server returns a new TLS server side connection +// using conn as the underlying transport. +// The configuration config must be non-nil and must have +// at least one certificate. +func Server(conn net.Conn, config *Config) *Conn { + return &Conn{conn: conn, config: config} +} + +// Client returns a new TLS client side connection +// using conn as the underlying transport. +// Client interprets a nil configuration as equivalent to +// the zero configuration; see the documentation of Config +// for the defaults. +func Client(conn net.Conn, config *Config) *Conn { + return &Conn{conn: conn, config: config, isClient: true} +} + +// A listener implements a network listener (net.Listener) for TLS connections. +type listener struct { + net.Listener + config *Config +} + +// Accept waits for and returns the next incoming TLS connection. +// The returned connection c is a *tls.Conn. +func (l *listener) Accept() (c net.Conn, err error) { + c, err = l.Listener.Accept() + if err != nil { + return + } + c = Server(c, l.config) + return +} + +// NewListener creates a Listener which accepts connections from an inner +// Listener and wraps each connection with Server. +// The configuration config must be non-nil and must have +// at least one certificate. +func NewListener(inner net.Listener, config *Config) net.Listener { + l := new(listener) + l.Listener = inner + l.config = config + return l +} + +// Listen creates a TLS listener accepting connections on the +// given network address using net.Listen. +// The configuration config must be non-nil and must have +// at least one certificate. +func Listen(network, laddr string, config *Config) (net.Listener, error) { + if config == nil || len(config.Certificates) == 0 { + return nil, errors.New("tls.Listen: no certificates in configuration") + } + l, err := net.Listen(network, laddr) + if err != nil { + return nil, err + } + return NewListener(l, config), nil +} + +// Dial connects to the given network address using net.Dial +// and then initiates a TLS handshake, returning the resulting +// TLS connection. +// Dial interprets a nil configuration as equivalent to +// the zero configuration; see the documentation of Config +// for the defaults. +func Dial(network, addr string, config *Config) (*Conn, error) { + raddr := addr + c, err := net.Dial(network, raddr) + if err != nil { + return nil, err + } + + colonPos := strings.LastIndex(raddr, ":") + if colonPos == -1 { + colonPos = len(raddr) + } + hostname := raddr[:colonPos] + + if config == nil { + config = defaultConfig() + } + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + c := *config + c.ServerName = hostname + config = &c + } + conn := Client(c, config) + if err = conn.Handshake(); err != nil { + c.Close() + return nil, err + } + return conn, nil +} + +// LoadX509KeyPair reads and parses a public/private key pair from a pair of +// files. The files must contain PEM encoded data. +func LoadX509KeyPair(certFile, keyFile string) (cert Certificate, err error) { + certPEMBlock, err := ioutil.ReadFile(certFile) + if err != nil { + return + } + keyPEMBlock, err := ioutil.ReadFile(keyFile) + if err != nil { + return + } + return X509KeyPair(certPEMBlock, keyPEMBlock) +} + +// X509KeyPair parses a public/private key pair from a pair of +// PEM encoded data. +func X509KeyPair(certPEMBlock, keyPEMBlock []byte) (cert Certificate, err error) { + var certDERBlock *pem.Block + for { + certDERBlock, certPEMBlock = pem.Decode(certPEMBlock) + if certDERBlock == nil { + break + } + if certDERBlock.Type == "CERTIFICATE" { + cert.Certificate = append(cert.Certificate, certDERBlock.Bytes) + } + } + + if len(cert.Certificate) == 0 { + err = errors.New("crypto/tls: failed to parse certificate PEM data") + return + } + + var keyDERBlock *pem.Block + for { + keyDERBlock, keyPEMBlock = pem.Decode(keyPEMBlock) + if keyDERBlock == nil { + err = errors.New("crypto/tls: failed to parse key PEM data") + return + } + if keyDERBlock.Type == "PRIVATE KEY" || strings.HasSuffix(keyDERBlock.Type, " PRIVATE KEY") { + break + } + } + + cert.PrivateKey, err = parsePrivateKey(keyDERBlock.Bytes) + if err != nil { + return + } + + // We don't need to parse the public key for TLS, but we so do anyway + // to check that it looks sane and matches the private key. + x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return + } + + switch pub := x509Cert.PublicKey.(type) { + case *rsa.PublicKey: + priv, ok := cert.PrivateKey.(*rsa.PrivateKey) + if !ok { + err = errors.New("crypto/tls: private key type does not match public key type") + return + } + if pub.N.Cmp(priv.N) != 0 { + err = errors.New("crypto/tls: private key does not match public key") + return + } + case *ecdsa.PublicKey: + priv, ok := cert.PrivateKey.(*ecdsa.PrivateKey) + if !ok { + err = errors.New("crypto/tls: private key type does not match public key type") + return + + } + if pub.X.Cmp(priv.X) != 0 || pub.Y.Cmp(priv.Y) != 0 { + err = errors.New("crypto/tls: private key does not match public key") + return + } + default: + err = errors.New("crypto/tls: unknown public key algorithm") + return + } + + return +} + +// Attempt to parse the given private key DER block. OpenSSL 0.9.8 generates +// PKCS#1 private keys by default, while OpenSSL 1.0.0 generates PKCS#8 keys. +// OpenSSL ecparam generates SEC1 EC private keys for ECDSA. We try all three. +func parsePrivateKey(der []byte) (crypto.PrivateKey, error) { + if key, err := x509.ParsePKCS1PrivateKey(der); err == nil { + return key, nil + } + if key, err := x509.ParsePKCS8PrivateKey(der); err == nil { + switch key := key.(type) { + case *rsa.PrivateKey, *ecdsa.PrivateKey: + return key, nil + default: + return nil, errors.New("crypto/tls: found unknown private key type in PKCS#8 wrapping") + } + } + if key, err := x509.ParseECPrivateKey(der); err == nil { + return key, nil + } + + return nil, errors.New("crypto/tls: failed to parse private key") +} === added file 'src/github.com/Azure/azure-sdk-for-go/core/tls/tls_test.go' --- src/github.com/Azure/azure-sdk-for-go/core/tls/tls_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/core/tls/tls_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,107 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tls + +import ( + "testing" +) + +var rsaCertPEM = `-----BEGIN CERTIFICATE----- +MIIB0zCCAX2gAwIBAgIJAI/M7BYjwB+uMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQwHhcNMTIwOTEyMjE1MjAyWhcNMTUwOTEyMjE1MjAyWjBF +MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50 +ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANLJ +hPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wok/4xIA+ui35/MmNa +rtNuC+BdZ1tMuVCPFZcCAwEAAaNQME4wHQYDVR0OBBYEFJvKs8RfJaXTH08W+SGv +zQyKn0H8MB8GA1UdIwQYMBaAFJvKs8RfJaXTH08W+SGvzQyKn0H8MAwGA1UdEwQF +MAMBAf8wDQYJKoZIhvcNAQEFBQADQQBJlffJHybjDGxRMqaRmDhX0+6v02TUKZsW +r5QuVbpQhH6u+0UgcW0jp9QwpxoPTLTWGXEWBBBurxFwiCBhkQ+V +-----END CERTIFICATE----- +` + +var rsaKeyPEM = `-----BEGIN RSA PRIVATE KEY----- +MIIBOwIBAAJBANLJhPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wo +k/4xIA+ui35/MmNartNuC+BdZ1tMuVCPFZcCAwEAAQJAEJ2N+zsR0Xn8/Q6twa4G +6OB1M1WO+k+ztnX/1SvNeWu8D6GImtupLTYgjZcHufykj09jiHmjHx8u8ZZB/o1N +MQIhAPW+eyZo7ay3lMz1V01WVjNKK9QSn1MJlb06h/LuYv9FAiEA25WPedKgVyCW +SmUwbPw8fnTcpqDWE3yTO3vKcebqMSsCIBF3UmVue8YU3jybC3NxuXq3wNm34R8T +xVLHwDXh/6NJAiEAl2oHGGLz64BuAfjKrqwz7qMYr9HCLIe/YsoWq/olzScCIQDi +D2lWusoe2/nEqfDVVWGWlyJ7yOmqaVm/iNUN9B2N2g== +-----END RSA PRIVATE KEY----- +` + +// keyPEM is the same as rsaKeyPEM, but declares itself as just +// "PRIVATE KEY", not "RSA PRIVATE KEY". http://golang.org/issue/4477 +var keyPEM = `-----BEGIN PRIVATE KEY----- +MIIBOwIBAAJBANLJhPHhITqQbPklG3ibCVxwGMRfp/v4XqhfdQHdcVfHap6NQ5Wo +k/4xIA+ui35/MmNartNuC+BdZ1tMuVCPFZcCAwEAAQJAEJ2N+zsR0Xn8/Q6twa4G +6OB1M1WO+k+ztnX/1SvNeWu8D6GImtupLTYgjZcHufykj09jiHmjHx8u8ZZB/o1N +MQIhAPW+eyZo7ay3lMz1V01WVjNKK9QSn1MJlb06h/LuYv9FAiEA25WPedKgVyCW +SmUwbPw8fnTcpqDWE3yTO3vKcebqMSsCIBF3UmVue8YU3jybC3NxuXq3wNm34R8T +xVLHwDXh/6NJAiEAl2oHGGLz64BuAfjKrqwz7qMYr9HCLIe/YsoWq/olzScCIQDi +D2lWusoe2/nEqfDVVWGWlyJ7yOmqaVm/iNUN9B2N2g== +-----END PRIVATE KEY----- +` + +var ecdsaCertPEM = `-----BEGIN CERTIFICATE----- +MIIB/jCCAWICCQDscdUxw16XFDAJBgcqhkjOPQQBMEUxCzAJBgNVBAYTAkFVMRMw +EQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0 +eSBMdGQwHhcNMTIxMTE0MTI0MDQ4WhcNMTUxMTE0MTI0MDQ4WjBFMQswCQYDVQQG +EwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lk +Z2l0cyBQdHkgTHRkMIGbMBAGByqGSM49AgEGBSuBBAAjA4GGAAQBY9+my9OoeSUR +lDQdV/x8LsOuLilthhiS1Tz4aGDHIPwC1mlvnf7fg5lecYpMCrLLhauAc1UJXcgl +01xoLuzgtAEAgv2P/jgytzRSpUYvgLBt1UA0leLYBy6mQQbrNEuqT3INapKIcUv8 +XxYP0xMEUksLPq6Ca+CRSqTtrd/23uTnapkwCQYHKoZIzj0EAQOBigAwgYYCQXJo +A7Sl2nLVf+4Iu/tAX/IF4MavARKC4PPHK3zfuGfPR3oCCcsAoz3kAzOeijvd0iXb +H5jBImIxPL4WxQNiBTexAkF8D1EtpYuWdlVQ80/h/f4pBcGiXPqX5h2PQSQY7hP1 ++jwM1FGS4fREIOvlBYr/SzzQRtwrvrzGYxDEDbsC0ZGRnA== +-----END CERTIFICATE----- +` + +var ecdsaKeyPEM = `-----BEGIN EC PARAMETERS----- +BgUrgQQAIw== +-----END EC PARAMETERS----- +-----BEGIN EC PRIVATE KEY----- +MIHcAgEBBEIBrsoKp0oqcv6/JovJJDoDVSGWdirrkgCWxrprGlzB9o0X8fV675X0 +NwuBenXFfeZvVcwluO7/Q9wkYoPd/t3jGImgBwYFK4EEACOhgYkDgYYABAFj36bL +06h5JRGUNB1X/Hwuw64uKW2GGJLVPPhoYMcg/ALWaW+d/t+DmV5xikwKssuFq4Bz +VQldyCXTXGgu7OC0AQCC/Y/+ODK3NFKlRi+AsG3VQDSV4tgHLqZBBus0S6pPcg1q +kohxS/xfFg/TEwRSSws+roJr4JFKpO2t3/be5OdqmQ== +-----END EC PRIVATE KEY----- +` + +var keyPairTests = []struct { + algo string + cert string + key string +}{ + {"ECDSA", ecdsaCertPEM, ecdsaKeyPEM}, + {"RSA", rsaCertPEM, rsaKeyPEM}, + {"RSA-untyped", rsaCertPEM, keyPEM}, // golang.org/issue/4477 +} + +func TestX509KeyPair(t *testing.T) { + var pem []byte + for _, test := range keyPairTests { + pem = []byte(test.cert + test.key) + if _, err := X509KeyPair(pem, pem); err != nil { + t.Errorf("Failed to load %s cert followed by %s key: %s", test.algo, test.algo, err) + } + pem = []byte(test.key + test.cert) + if _, err := X509KeyPair(pem, pem); err != nil { + t.Errorf("Failed to load %s key followed by %s cert: %s", test.algo, test.algo, err) + } + } +} + +func TestX509MixedKeyPair(t *testing.T) { + if _, err := X509KeyPair([]byte(rsaCertPEM), []byte(ecdsaKeyPEM)); err == nil { + t.Error("Load of RSA certificate succeeded with ECDSA private key") + } + if _, err := X509KeyPair([]byte(ecdsaCertPEM), []byte(rsaKeyPEM)); err == nil { + t.Error("Load of ECDSA certificate succeeded with RSA private key") + } +} === added directory 'src/github.com/Azure/azure-sdk-for-go/management' === added directory 'src/github.com/Azure/azure-sdk-for-go/management/affinitygroup' === added file 'src/github.com/Azure/azure-sdk-for-go/management/affinitygroup/client.go' --- src/github.com/Azure/azure-sdk-for-go/management/affinitygroup/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/affinitygroup/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,131 @@ +package affinitygroup + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + azureCreateAffinityGroupURL = "/affinitygroups" + azureGetAffinityGroupURL = "/affinitygroups/%s" + azureListAffinityGroupsURL = "/affinitygroups" + azureUpdateAffinityGroupURL = "/affinitygroups/%s" + azureDeleteAffinityGroupURL = "/affinitygroups/%s" + + errParameterNotSpecified = "Parameter %s not specified." +) + +// AffinityGroupClient simply contains a management.Client and has +// methods for doing all affinity group-related API calls to Azure. +type AffinityGroupClient struct { + mgmtClient management.Client +} + +// NewClient returns an AffinityGroupClient with the given management.Client. +func NewClient(mgmtClient management.Client) AffinityGroupClient { + return AffinityGroupClient{mgmtClient} +} + +// CreateAffinityGroup creates a new affinity group. +// +// https://msdn.microsoft.com/en-us/library/azure/gg715317.aspx +func (c AffinityGroupClient) CreateAffinityGroup(params CreateAffinityGroupParams) error { + params.Label = encodeLabel(params.Label) + + req, err := xml.Marshal(params) + if err != nil { + return err + } + + _, err = c.mgmtClient.SendAzurePostRequest(azureCreateAffinityGroupURL, req) + return err +} + +// GetAffinityGroup returns the system properties that are associated with the +// specified affinity group. +// +// https://msdn.microsoft.com/en-us/library/azure/ee460789.aspx +func (c AffinityGroupClient) GetAffinityGroup(name string) (AffinityGroup, error) { + var affgroup AffinityGroup + if name == "" { + return affgroup, fmt.Errorf(errParameterNotSpecified, "name") + } + + url := fmt.Sprintf(azureGetAffinityGroupURL, name) + resp, err := c.mgmtClient.SendAzureGetRequest(url) + if err != nil { + return affgroup, err + } + + err = xml.Unmarshal(resp, &affgroup) + affgroup.Label = decodeLabel(affgroup.Label) + return affgroup, err +} + +// ListAffinityGroups lists the affinity groups off Azure. +// +// https://msdn.microsoft.com/en-us/library/azure/ee460797.aspx +func (c AffinityGroupClient) ListAffinityGroups() (ListAffinityGroupsResponse, error) { + var affinitygroups ListAffinityGroupsResponse + + resp, err := c.mgmtClient.SendAzureGetRequest(azureListAffinityGroupsURL) + if err != nil { + return affinitygroups, err + } + + err = xml.Unmarshal(resp, &affinitygroups) + + for i, grp := range affinitygroups.AffinityGroups { + affinitygroups.AffinityGroups[i].Label = decodeLabel(grp.Label) + } + + return affinitygroups, err +} + +// UpdateAffinityGroup updates the label or description for an the group. +// +// https://msdn.microsoft.com/en-us/library/azure/gg715316.aspx +func (c AffinityGroupClient) UpdateAffinityGroup(name string, params UpdateAffinityGroupParams) error { + if name == "" { + return fmt.Errorf(errParameterNotSpecified, "name") + } + + params.Label = encodeLabel(params.Label) + req, err := xml.Marshal(params) + if err != nil { + return err + } + + url := fmt.Sprintf(azureUpdateAffinityGroupURL, name) + _, err = c.mgmtClient.SendAzurePutRequest(url, "text/xml", req) + return err +} + +// DeleteAffinityGroup deletes the given affinity group. +// +// https://msdn.microsoft.com/en-us/library/azure/gg715314.aspx +func (c AffinityGroupClient) DeleteAffinityGroup(name string) error { + if name == "" { + return fmt.Errorf(errParameterNotSpecified, name) + } + + url := fmt.Sprintf(azureDeleteAffinityGroupURL, name) + _, err := c.mgmtClient.SendAzureDeleteRequest(url) + return err +} + +// encodeLabel is a helper function which encodes the given string +// to the base64 string which will be sent to Azure as a Label. +func encodeLabel(label string) string { + return base64.StdEncoding.EncodeToString([]byte(label)) +} + +// decodeLabel is a helper function which decodes the base64 encoded +// label recieved from Azure into standard encoding. +func decodeLabel(label string) string { + res, _ := base64.StdEncoding.DecodeString(label) + return string(res) +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/affinitygroup/entities.go' --- src/github.com/Azure/azure-sdk-for-go/management/affinitygroup/entities.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/affinitygroup/entities.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,80 @@ +package affinitygroup + +import ( + "encoding/xml" +) + +// CreateAffinityGroupParams respresents the set of parameters required for +// creating an affinity group creation request to Azure. +// +// https://msdn.microsoft.com/en-us/library/azure/gg715317.aspx +type CreateAffinityGroupParams struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure CreateAffinityGroup"` + Name string + Label string + Description string `xml:",omitempty"` + Location string +} + +// HostedService is a struct containing details about a hosted service that is +// part of an affinity group on Azure. +type HostedService struct { + URL string `xml:"Url"` + ServiceName string +} + +// StorageService is a struct containing details about a storage service that is +// part of an affinity group on Azure. +type StorageService struct { + URL string `xml:"Url"` + ServiceName string +} + +// AffinityGroup respresents the properties of an affinity group on Azure. +// +// https://msdn.microsoft.com/en-us/library/azure/ee460789.aspx +type AffinityGroup struct { + Name string + Label string + Description string + Location string + HostedServices []HostedService + StorageServices []StorageService + Capabilities []string +} + +// ComputeCapabilities represents the sets of capabilities of an affinity group +// obtained from an affinity group list call to Azure. +type ComputeCapabilities struct { + VirtualMachineRoleSizes []string + WebWorkerRoleSizes []string +} + +// AffinityGroupListResponse represents the properties obtained for each +// affinity group listed off Azure. +// +// https://msdn.microsoft.com/en-us/library/azure/ee460797.aspx +type AffinityGroupListResponse struct { + Name string + Label string + Description string + Location string + Capabilities []string + ComputeCapabilities ComputeCapabilities +} + +// ListAffinityGroupsResponse contains all the affinity groups obtained from a +// call to the Azure API to list all affinity groups. +type ListAffinityGroupsResponse struct { + AffinityGroups []AffinityGroupListResponse `xml:"AffinityGroup"` +} + +// UpdateAffinityGroupParams if the set of parameters required to update an +// affinity group on Azure. +// +// https://msdn.microsoft.com/en-us/library/azure/gg715316.aspx +type UpdateAffinityGroupParams struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure UpdateAffinityGroup"` + Label string `xml:",omitempty"` + Description string `xml:",omitempty"` +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/client.go' --- src/github.com/Azure/azure-sdk-for-go/management/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,138 @@ +// Package management provides the main API client to construct other clients +// and make requests to the Microsoft Azure Service Management REST API. +package management + +import ( + "errors" + "time" +) + +const ( + DefaultAzureManagementURL = "https://management.core.windows.net" + DefaultOperationPollInterval = time.Second * 30 + DefaultAPIVersion = "2014-10-01" + DefaultUserAgent = "azure-sdk-for-go" + + errPublishSettingsConfiguration = "PublishSettingsFilePath is set. Consequently ManagementCertificatePath and SubscriptionId must not be set." + errManagementCertificateConfiguration = "Both ManagementCertificatePath and SubscriptionId should be set, and PublishSettingsFilePath must not be set." + errParamNotSpecified = "Parameter %s is not specified." +) + +type client struct { + publishSettings publishSettings + config ClientConfig +} + +// Client is the base Azure Service Management API client instance that +// can be used to construct client instances for various services. +type Client interface { + // SendAzureGetRequest sends a request to the management API using the HTTP GET method + // and returns the response body or an error. + SendAzureGetRequest(url string) ([]byte, error) + + // SendAzurePostRequest sends a request to the management API using the HTTP POST method + // and returns the request ID or an error. + SendAzurePostRequest(url string, data []byte) (OperationID, error) + + // SendAzurePostRequestWithReturnedResponse sends a request to the management API using + // the HTTP POST method and returns the response body or an error. + SendAzurePostRequestWithReturnedResponse(url string, data []byte) ([]byte, error) + + // SendAzurePutRequest sends a request to the management API using the HTTP PUT method + // and returns the request ID or an error. The content type can be specified, however + // if an empty string is passed, the default of "application/xml" will be used. + SendAzurePutRequest(url, contentType string, data []byte) (OperationID, error) + + // SendAzureDeleteRequest sends a request to the management API using the HTTP DELETE method + // and returns the request ID or an error. + SendAzureDeleteRequest(url string) (OperationID, error) + + // GetOperationStatus gets the status of operation with given Operation ID. + // WaitForOperation utility method can be used for polling for operation status. + GetOperationStatus(operationID OperationID) (GetOperationStatusResponse, error) + + // WaitForOperation polls the Azure API for given operation ID indefinitely + // until the operation is completed with either success or failure. + // It is meant to be used for waiting for the result of the methods that + // return an OperationID value (meaning a long running operation has started). + // + // Cancellation of the polling loop (for instance, timing out) is done through + // cancel channel. If the user does not want to cancel, a nil chan can be provided. + // To cancel the method, it is recommended to close the channel provided to this + // method. + // + // If the operation was not successful or cancelling is signaled, an error + // is returned. + WaitForOperation(operationID OperationID, cancel chan struct{}) error +} + +// ClientConfig provides a configuration for use by a Client. +type ClientConfig struct { + ManagementURL string + OperationPollInterval time.Duration + UserAgent string + APIVersion string +} + +// NewAnonymousClient creates a new azure.Client with no credentials set. +func NewAnonymousClient() Client { + return client{} +} + +// DefaultConfig returns the default client configuration used to construct +// a client. This value can be used to make modifications on the default API +// configuration. +func DefaultConfig() ClientConfig { + return ClientConfig{ + ManagementURL: DefaultAzureManagementURL, + OperationPollInterval: DefaultOperationPollInterval, + APIVersion: DefaultAPIVersion, + UserAgent: DefaultUserAgent, + } +} + +// NewClient creates a new Client using the given subscription ID and +// management certificate. +func NewClient(subscriptionID string, managementCert []byte) (Client, error) { + return NewClientFromConfig(subscriptionID, managementCert, DefaultConfig()) +} + +// NewClientFromConfig creates a new Client using a given ClientConfig. +func NewClientFromConfig(subscriptionID string, managementCert []byte, config ClientConfig) (Client, error) { + return makeClient(subscriptionID, managementCert, config) +} + +func makeClient(subscriptionID string, managementCert []byte, config ClientConfig) (Client, error) { + var c client + + if subscriptionID == "" { + return c, errors.New("azure: subscription ID required") + } + + if len(managementCert) == 0 { + return c, errors.New("azure: management certificate required") + } + + publishSettings := publishSettings{ + SubscriptionID: subscriptionID, + SubscriptionCert: managementCert, + SubscriptionKey: managementCert, + } + + // Validate client configuration + switch { + case config.ManagementURL == "": + return c, errors.New("azure: base URL required") + case config.OperationPollInterval <= 0: + return c, errors.New("azure: operation polling interval must be a positive duration") + case config.APIVersion == "": + return c, errors.New("azure: client configuration must specify an API version") + case config.UserAgent == "": + config.UserAgent = DefaultUserAgent + } + + return client{ + publishSettings: publishSettings, + config: config, + }, nil +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/errors.go' --- src/github.com/Azure/azure-sdk-for-go/management/errors.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/errors.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,36 @@ +package management + +import ( + "encoding/xml" + "fmt" +) + +// AzureError represents an error returned by the management API. It has an error +// code (for example, ResourceNotFound) and a descriptive message. +type AzureError struct { + Code string + Message string +} + +//Error implements the error interface for the AzureError type. +func (e AzureError) Error() string { + return fmt.Sprintf("Error response from Azure. Code: %s, Message: %s", e.Code, e.Message) +} + +// IsResourceNotFoundError returns true if the provided error is an AzureError +// reporting that a given resource has not been found. +func IsResourceNotFoundError(err error) bool { + azureErr, ok := err.(AzureError) + return ok && azureErr.Code == "ResourceNotFound" +} + +// getAzureError converts an error response body into an AzureError instance. +func getAzureError(responseBody []byte) error { + var azErr AzureError + err := xml.Unmarshal(responseBody, &azErr) + if err != nil { + return fmt.Errorf("Failed parsing contents to AzureError format: %v", err) + } + return azErr + +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/errors_test.go' --- src/github.com/Azure/azure-sdk-for-go/management/errors_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/errors_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,30 @@ +package management_test + +import ( + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/management" +) + +// TestIsResourceNotFoundError tests IsResourceNotFoundError with the +// set of given test cases. +func TestIsResourceNotFoundError(t *testing.T) { + // isResourceNotFoundTestCases is a set of structs comprising of the error + // IsResourceNotFoundError should test and the expected result. + var isResourceNotFoundTestCases = []struct { + err error + expected bool + }{ + {nil, false}, + {fmt.Errorf("Some other random error."), false}, + {management.AzureError{Code: "ResourceNotFound"}, true}, + {management.AzureError{Code: "NotAResourceNotFound"}, false}, + } + + for i, testCase := range isResourceNotFoundTestCases { + if res := management.IsResourceNotFoundError(testCase.err); res != testCase.expected { + t.Fatalf("Test %d: error %s - expected %t - got %t", i+1, testCase.err, testCase.expected, res) + } + } +} === added directory 'src/github.com/Azure/azure-sdk-for-go/management/hostedservice' === added file 'src/github.com/Azure/azure-sdk-for-go/management/hostedservice/client.go' --- src/github.com/Azure/azure-sdk-for-go/management/hostedservice/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/hostedservice/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,125 @@ +// Package hostedservice provides a client for Hosted Services. +package hostedservice + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + azureXmlns = "http://schemas.microsoft.com/windowsazure" + azureDeploymentListURL = "services/hostedservices/%s/deployments" + azureHostedServiceListURL = "services/hostedservices" + azureHostedServiceAvailabilityURL = "services/hostedservices/operations/isavailable/%s" + azureDeploymentURL = "services/hostedservices/%s/deployments/%s" + deleteAzureDeploymentURL = "services/hostedservices/%s/deployments/%s" + getHostedServicePropertiesURL = "services/hostedservices/%s" + azureServiceCertificateURL = "services/hostedservices/%s/certificates" + + errParamNotSpecified = "Parameter %s is not specified." +) + +//NewClient is used to return a handle to the HostedService API +func NewClient(client management.Client) HostedServiceClient { + return HostedServiceClient{client: client} +} + +func (h HostedServiceClient) CreateHostedService(params CreateHostedServiceParameters) error { + req, err := xml.Marshal(params) + if err != nil { + return err + } + + _, err = h.client.SendAzurePostRequest(azureHostedServiceListURL, req) // not a long running operation + return err +} + +func (h HostedServiceClient) CheckHostedServiceNameAvailability(dnsName string) (AvailabilityResponse, error) { + var r AvailabilityResponse + if dnsName == "" { + return r, fmt.Errorf(errParamNotSpecified, "dnsName") + } + + requestURL := fmt.Sprintf(azureHostedServiceAvailabilityURL, dnsName) + response, err := h.client.SendAzureGetRequest(requestURL) + if err != nil { + return r, err + } + + err = xml.Unmarshal(response, &r) + return r, err +} + +func (h HostedServiceClient) DeleteHostedService(dnsName string, deleteDisksAndBlobs bool) (management.OperationID, error) { + if dnsName == "" { + return "", fmt.Errorf(errParamNotSpecified, "dnsName") + } + + requestURL := fmt.Sprintf(getHostedServicePropertiesURL, dnsName) + if deleteDisksAndBlobs { + requestURL += "?comp=media" + } + return h.client.SendAzureDeleteRequest(requestURL) +} + +func (h HostedServiceClient) GetHostedService(name string) (HostedService, error) { + hostedService := HostedService{} + if name == "" { + return hostedService, fmt.Errorf(errParamNotSpecified, "name") + } + + requestURL := fmt.Sprintf(getHostedServicePropertiesURL, name) + response, err := h.client.SendAzureGetRequest(requestURL) + if err != nil { + return hostedService, err + } + + err = xml.Unmarshal(response, &hostedService) + if err != nil { + return hostedService, err + } + + decodedLabel, err := base64.StdEncoding.DecodeString(hostedService.LabelBase64) + if err != nil { + return hostedService, err + } + hostedService.Label = string(decodedLabel) + return hostedService, nil +} + +func (h HostedServiceClient) ListHostedServices() (ListHostedServicesResponse, error) { + var response ListHostedServicesResponse + + data, err := h.client.SendAzureGetRequest(azureHostedServiceListURL) + if err != nil { + return response, err + } + + err = xml.Unmarshal(data, &response) + return response, err +} + +func (h HostedServiceClient) AddCertificate(dnsName string, certData []byte, certificateFormat CertificateFormat, password string) (management.OperationID, error) { + if dnsName == "" { + return "", fmt.Errorf(errParamNotSpecified, "dnsName") + } + + certBase64 := base64.StdEncoding.EncodeToString(certData) + + addCertificate := CertificateFile{ + Data: certBase64, + CertificateFormat: certificateFormat, + Password: password, + Xmlns: azureXmlns, + } + buffer, err := xml.Marshal(addCertificate) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(azureServiceCertificateURL, dnsName) + return h.client.SendAzurePostRequest(requestURL, buffer) +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/hostedservice/entities.go' --- src/github.com/Azure/azure-sdk-for-go/management/hostedservice/entities.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/hostedservice/entities.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,58 @@ +package hostedservice + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +//HostedServiceClient is used to perform operations on Azure Hosted Services +type HostedServiceClient struct { + client management.Client +} + +type CreateHostedServiceParameters struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure CreateHostedService"` + ServiceName string + Label string + Description string + Location string + ReverseDNSFqdn string `xml:"ReverseDnsFqdn,omitempty"` +} + +type AvailabilityResponse struct { + Xmlns string `xml:"xmlns,attr"` + Result bool + Reason string +} + +type HostedService struct { + URL string `xml:"Url"` + ServiceName string + Description string `xml:"HostedServiceProperties>Description"` + AffinityGroup string `xml:"HostedServiceProperties>AffinityGroup"` + Location string `xml:"HostedServiceProperties>Location"` + LabelBase64 string `xml:"HostedServiceProperties>Label"` + Label string + Status string `xml:"HostedServiceProperties>Status"` + ReverseDNSFqdn string `xml:"HostedServiceProperties>ReverseDnsFqdn"` + DefaultWinRmCertificateThumbprint string +} + +type CertificateFile struct { + Xmlns string `xml:"xmlns,attr"` + Data string + CertificateFormat CertificateFormat + Password string `xml:",omitempty"` +} + +type CertificateFormat string + +const ( + CertificateFormatPfx = CertificateFormat("pfx") + CertificateFormatCer = CertificateFormat("cer") +) + +type ListHostedServicesResponse struct { + HostedServices []HostedService `xml:"HostedService"` +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/http.go' --- src/github.com/Azure/azure-sdk-for-go/management/http.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/http.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,167 @@ +package management + +import ( + "bytes" + "fmt" + + "github.com/Azure/azure-sdk-for-go/core/http" + "github.com/Azure/azure-sdk-for-go/core/tls" +) + +const ( + msVersionHeader = "x-ms-version" + requestIDHeader = "x-ms-request-id" + uaHeader = "User-Agent" + contentHeader = "Content-Type" + defaultContentHeaderValue = "application/xml" +) + +func (client client) SendAzureGetRequest(url string) ([]byte, error) { + resp, err := client.sendAzureRequest("GET", url, "", nil) + if err != nil { + return nil, err + } + return getResponseBody(resp) +} + +func (client client) SendAzurePostRequest(url string, data []byte) (OperationID, error) { + return client.doAzureOperation("POST", url, "", data) +} + +func (client client) SendAzurePostRequestWithReturnedResponse(url string, data []byte) ([]byte, error) { + resp, err := client.sendAzureRequest("POST", url, "", data) + if err != nil { + return nil, err + } + + return getResponseBody(resp) +} + +func (client client) SendAzurePutRequest(url, contentType string, data []byte) (OperationID, error) { + return client.doAzureOperation("PUT", url, contentType, data) +} + +func (client client) SendAzureDeleteRequest(url string) (OperationID, error) { + return client.doAzureOperation("DELETE", url, "", nil) +} + +func (client client) doAzureOperation(method, url, contentType string, data []byte) (OperationID, error) { + response, err := client.sendAzureRequest(method, url, contentType, data) + if err != nil { + return "", err + } + return getOperationID(response) +} + +func getOperationID(response *http.Response) (OperationID, error) { + requestID := response.Header.Get(requestIDHeader) + if requestID == "" { + return "", fmt.Errorf("Could not retrieve operation id from %q header", requestIDHeader) + } + return OperationID(requestID), nil +} + +// sendAzureRequest constructs an HTTP client for the request, sends it to the +// management API and returns the response or an error. +func (client client) sendAzureRequest(method, url, contentType string, data []byte) (*http.Response, error) { + if method == "" { + return nil, fmt.Errorf(errParamNotSpecified, "method") + } + if url == "" { + return nil, fmt.Errorf(errParamNotSpecified, "url") + } + + httpClient := client.createHTTPClient() + + response, err := client.sendRequest(httpClient, url, method, contentType, data, 5) + if err != nil { + return nil, err + } + + return response, nil +} + +// createHTTPClient creates an HTTP Client configured with the key pair for +// the subscription for this client. +func (client client) createHTTPClient() *http.Client { + cert, _ := tls.X509KeyPair(client.publishSettings.SubscriptionCert, client.publishSettings.SubscriptionKey) + + ssl := &tls.Config{} + ssl.Certificates = []tls.Certificate{cert} + + httpClient := &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: ssl, + }, + } + + return httpClient +} + +// sendRequest sends a request to the Azure management API using the given +// HTTP client and parameters. It returns the response from the call or an +// error. +func (client client) sendRequest(httpClient *http.Client, url, requestType, contentType string, data []byte, numberOfRetries int) (*http.Response, error) { + request, reqErr := client.createAzureRequest(url, requestType, contentType, data) + if reqErr != nil { + return nil, reqErr + } + + response, err := httpClient.Do(request) + if err != nil { + if numberOfRetries == 0 { + return nil, err + } + + return client.sendRequest(httpClient, url, requestType, contentType, data, numberOfRetries-1) + } + + if response.StatusCode >= http.StatusBadRequest { + body, err := getResponseBody(response) + if err != nil { + // Failed to read the response body + return nil, err + } + azureErr := getAzureError(body) + if azureErr != nil { + if numberOfRetries == 0 { + return nil, azureErr + } + + return client.sendRequest(httpClient, url, requestType, contentType, data, numberOfRetries-1) + } + } + + return response, nil +} + +// createAzureRequest packages up the request with the correct set of headers and returns +// the request object or an error. +func (client client) createAzureRequest(url string, requestType string, contentType string, data []byte) (*http.Request, error) { + var request *http.Request + var err error + + url = fmt.Sprintf("%s/%s/%s", client.config.ManagementURL, client.publishSettings.SubscriptionID, url) + if data != nil { + body := bytes.NewBuffer(data) + request, err = http.NewRequest(requestType, url, body) + } else { + request, err = http.NewRequest(requestType, url, nil) + } + + if err != nil { + return nil, err + } + + request.Header.Set(msVersionHeader, client.config.APIVersion) + request.Header.Set(uaHeader, client.config.UserAgent) + + if contentType != "" { + request.Header.Set(contentHeader, contentType) + } else { + request.Header.Set(contentHeader, defaultContentHeaderValue) + } + + return request, nil +} === added directory 'src/github.com/Azure/azure-sdk-for-go/management/location' === added file 'src/github.com/Azure/azure-sdk-for-go/management/location/client.go' --- src/github.com/Azure/azure-sdk-for-go/management/location/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/location/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,30 @@ +// Package location provides a client for Locations. +package location + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + azureLocationListURL = "locations" + errParamNotSpecified = "Parameter %s is not specified." +) + +//NewClient is used to instantiate a new LocationClient from an Azure client +func NewClient(client management.Client) LocationClient { + return LocationClient{client: client} +} + +func (c LocationClient) ListLocations() (ListLocationsResponse, error) { + var l ListLocationsResponse + + response, err := c.client.SendAzureGetRequest(azureLocationListURL) + if err != nil { + return l, err + } + + err = xml.Unmarshal(response, &l) + return l, err +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/location/entities.go' --- src/github.com/Azure/azure-sdk-for-go/management/location/entities.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/location/entities.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,37 @@ +package location + +import ( + "bytes" + "encoding/xml" + "fmt" + "strings" + + "github.com/Azure/azure-sdk-for-go/management" +) + +//LocationClient is used to perform operations on Azure Locations +type LocationClient struct { + client management.Client +} + +type ListLocationsResponse struct { + XMLName xml.Name `xml:"Locations"` + Locations []Location `xml:"Location"` +} + +type Location struct { + Name string + DisplayName string + AvailableServices []string `xml:"AvailableServices>AvailableService"` + WebWorkerRoleSizes []string `xml:"ComputeCapabilities>WebWorkerRoleSizes>RoleSize"` + VirtualMachineRoleSizes []string `xml:"ComputeCapabilities>VirtualMachinesRoleSizes>RoleSize"` +} + +func (ll ListLocationsResponse) String() string { + var buf bytes.Buffer + for _, l := range ll.Locations { + fmt.Fprintf(&buf, "%s, ", l.Name) + } + + return strings.Trim(buf.String(), ", ") +} === added directory 'src/github.com/Azure/azure-sdk-for-go/management/networksecuritygroup' === added file 'src/github.com/Azure/azure-sdk-for-go/management/networksecuritygroup/client.go' --- src/github.com/Azure/azure-sdk-for-go/management/networksecuritygroup/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/networksecuritygroup/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,245 @@ +// Package networksecuritygroup provides a client for Network Security Groups. +package networksecuritygroup + +import ( + "encoding/xml" + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + createSecurityGroupURL = "services/networking/networksecuritygroups" + deleteSecurityGroupURL = "services/networking/networksecuritygroups/%s" + getSecurityGroupURL = "services/networking/networksecuritygroups/%s?detaillevel=full" + listSecurityGroupsURL = "services/networking/networksecuritygroups" + addSecurityGroupToSubnetURL = "services/networking/virtualnetwork/%s/subnets/%s/networksecuritygroups" + getSecurityGroupForSubnetURL = "services/networking/virtualnetwork/%s/subnets/%s/networksecuritygroups" + removeSecurityGroupFromSubnetURL = "services/networking/virtualnetwork/%s/subnets/%s/networksecuritygroups/%s" + setSecurityGroupRuleURL = "services/networking/networksecuritygroups/%s/rules/%s" + deleteSecurityGroupRuleURL = "services/networking/networksecuritygroups/%s/rules/%s" + + errParamNotSpecified = "Parameter %s is not specified." +) + +// NewClient is used to instantiate a new SecurityGroupClient from an Azure client +func NewClient(client management.Client) SecurityGroupClient { + return SecurityGroupClient{client: client} +} + +// CreateNetworkSecurityGroup creates a new network security group within +// the context of the specified subscription +// +// https://msdn.microsoft.com/en-us/library/azure/dn913818.aspx +func (sg SecurityGroupClient) CreateNetworkSecurityGroup( + name string, + label string, + location string) (management.OperationID, error) { + if name == "" { + return "", fmt.Errorf(errParamNotSpecified, "name") + } + if location == "" { + return "", fmt.Errorf(errParamNotSpecified, "location") + } + + data, err := xml.Marshal(SecurityGroupRequest{ + Name: name, + Label: label, + Location: location, + }) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(createSecurityGroupURL) + return sg.client.SendAzurePostRequest(requestURL, data) +} + +// DeleteNetworkSecurityGroup deletes the specified network security group from the subscription +// +// https://msdn.microsoft.com/en-us/library/azure/dn913825.aspx +func (sg SecurityGroupClient) DeleteNetworkSecurityGroup( + name string) (management.OperationID, error) { + if name == "" { + return "", fmt.Errorf(errParamNotSpecified, "name") + } + + requestURL := fmt.Sprintf(deleteSecurityGroupURL, name) + return sg.client.SendAzureDeleteRequest(requestURL) +} + +// GetNetworkSecurityGroup returns information about the specified network security group +// +// https://msdn.microsoft.com/en-us/library/azure/dn913821.aspx +func (sg SecurityGroupClient) GetNetworkSecurityGroup(name string) (SecurityGroupResponse, error) { + if name == "" { + return SecurityGroupResponse{}, fmt.Errorf(errParamNotSpecified, "name") + } + + var securityGroup SecurityGroupResponse + + requestURL := fmt.Sprintf(getSecurityGroupURL, name) + response, err := sg.client.SendAzureGetRequest(requestURL) + if err != nil { + return securityGroup, err + } + + err = xml.Unmarshal(response, &securityGroup) + return securityGroup, err +} + +// ListNetworkSecurityGroups returns a list of the network security groups +// in the specified subscription +// +// https://msdn.microsoft.com/en-us/library/azure/dn913815.aspx +func (sg SecurityGroupClient) ListNetworkSecurityGroups() (SecurityGroupList, error) { + var securityGroups SecurityGroupList + + response, err := sg.client.SendAzureGetRequest(listSecurityGroupsURL) + if err != nil { + return securityGroups, err + } + + err = xml.Unmarshal(response, &securityGroups) + return securityGroups, err +} + +// AddNetworkSecurityToSubnet associates the network security group with +// specified subnet in a virtual network +// +// https://msdn.microsoft.com/en-us/library/azure/dn913822.aspx +func (sg SecurityGroupClient) AddNetworkSecurityToSubnet( + name string, + subnet string, + virtualNetwork string) (management.OperationID, error) { + if name == "" { + return "", fmt.Errorf(errParamNotSpecified, "name") + } + if subnet == "" { + return "", fmt.Errorf(errParamNotSpecified, "subnet") + } + if virtualNetwork == "" { + return "", fmt.Errorf(errParamNotSpecified, "virtualNetwork") + } + + data, err := xml.Marshal(SecurityGroupRequest{Name: name}) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(addSecurityGroupToSubnetURL, virtualNetwork, subnet) + return sg.client.SendAzurePostRequest(requestURL, data) +} + +// GetNetworkSecurityGroupForSubnet returns information about the network +// security group associated with a subnet +// +// https://msdn.microsoft.com/en-us/library/azure/dn913817.aspx +func (sg SecurityGroupClient) GetNetworkSecurityGroupForSubnet( + subnet string, + virtualNetwork string) (SecurityGroupResponse, error) { + if subnet == "" { + return SecurityGroupResponse{}, fmt.Errorf(errParamNotSpecified, "subnet") + } + if virtualNetwork == "" { + return SecurityGroupResponse{}, fmt.Errorf(errParamNotSpecified, "virtualNetwork") + } + + var securityGroup SecurityGroupResponse + + requestURL := fmt.Sprintf(getSecurityGroupForSubnetURL, virtualNetwork, subnet) + response, err := sg.client.SendAzureGetRequest(requestURL) + if err != nil { + return securityGroup, err + } + + err = xml.Unmarshal(response, &securityGroup) + return securityGroup, err +} + +// RemoveNetworkSecurityGroupFromSubnet removes the association of the +// specified network security group from the specified subnet +// +// https://msdn.microsoft.com/en-us/library/azure/dn913820.aspx +func (sg SecurityGroupClient) RemoveNetworkSecurityGroupFromSubnet( + name string, + subnet string, + virtualNetwork string) (management.OperationID, error) { + if name == "" { + return "", fmt.Errorf(errParamNotSpecified, "name") + } + if subnet == "" { + return "", fmt.Errorf(errParamNotSpecified, "subnet") + } + if virtualNetwork == "" { + return "", fmt.Errorf(errParamNotSpecified, "virtualNetwork") + } + + requestURL := fmt.Sprintf(removeSecurityGroupFromSubnetURL, virtualNetwork, subnet, name) + return sg.client.SendAzureDeleteRequest(requestURL) +} + +// SetNetworkSecurityGroupRule adds or updates a network security rule that +// is associated with the specified network security group +// +// https://msdn.microsoft.com/en-us/library/azure/dn913819.aspx +func (sg SecurityGroupClient) SetNetworkSecurityGroupRule( + securityGroup string, + rule RuleRequest) (management.OperationID, error) { + if securityGroup == "" { + return "", fmt.Errorf(errParamNotSpecified, "securityGroup") + } + if rule.Name == "" { + return "", fmt.Errorf(errParamNotSpecified, "Name") + } + if rule.Type == "" { + return "", fmt.Errorf(errParamNotSpecified, "Type") + } + if rule.Priority == 0 { + return "", fmt.Errorf(errParamNotSpecified, "Priority") + } + if rule.Action == "" { + return "", fmt.Errorf(errParamNotSpecified, "Action") + } + if rule.SourceAddressPrefix == "" { + return "", fmt.Errorf(errParamNotSpecified, "SourceAddressPrefix") + } + if rule.SourcePortRange == "" { + return "", fmt.Errorf(errParamNotSpecified, "SourcePortRange") + } + if rule.DestinationAddressPrefix == "" { + return "", fmt.Errorf(errParamNotSpecified, "DestinationAddressPrefix") + } + if rule.DestinationPortRange == "" { + return "", fmt.Errorf(errParamNotSpecified, "DestinationPortRange") + } + if rule.Protocol == "" { + return "", fmt.Errorf(errParamNotSpecified, "Protocol") + } + + data, err := xml.Marshal(rule) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(setSecurityGroupRuleURL, securityGroup, rule.Name) + return sg.client.SendAzurePutRequest(requestURL, "", data) +} + +// DeleteNetworkSecurityGroupRule deletes a network security group rule from +// the specified network security group +// +// https://msdn.microsoft.com/en-us/library/azure/dn913816.aspx +func (sg SecurityGroupClient) DeleteNetworkSecurityGroupRule( + securityGroup string, + rule string) (management.OperationID, error) { + if securityGroup == "" { + return "", fmt.Errorf(errParamNotSpecified, "securityGroup") + } + if rule == "" { + return "", fmt.Errorf(errParamNotSpecified, "rule") + } + + requestURL := fmt.Sprintf(deleteSecurityGroupRuleURL, securityGroup, rule) + return sg.client.SendAzureDeleteRequest(requestURL) +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/networksecuritygroup/entities.go' --- src/github.com/Azure/azure-sdk-for-go/management/networksecuritygroup/entities.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/networksecuritygroup/entities.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,115 @@ +// Package networksecuritygroup implements operations for managing network security groups +// using the Service Management REST API +// +// https://msdn.microsoft.com/en-us/library/azure/dn913824.aspx +package networksecuritygroup + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +// SecurityGroupClient is used to perform operations on network security groups +type SecurityGroupClient struct { + client management.Client +} + +// SecurityGroupRequest represents a network security group +// +// https://msdn.microsoft.com/en-us/library/azure/dn913821.aspx +type SecurityGroupRequest struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure NetworkSecurityGroup"` + Name string + Label string `xml:",omitempty"` + Location string `xml:",omitempty"` +} + +// SecurityGroupResponse represents a network security group +// +// https://msdn.microsoft.com/en-us/library/azure/dn913821.aspx +type SecurityGroupResponse struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure NetworkSecurityGroup"` + Name string + Label string `xml:",omitempty"` + Location string `xml:",omitempty"` + State SecurityGroupState `xml:",omitempty"` + Rules []RuleResponse `xml:">Rule,omitempty"` +} + +// SecurityGroupList represents a list of security groups +type SecurityGroupList []SecurityGroupResponse + +// SecurityGroupState represents a security group state +type SecurityGroupState string + +// These constants represent the possible security group states +const ( + SecurityGroupStateCreated SecurityGroupState = "Created" + SecurityGroupStateCreating SecurityGroupState = "Creating" + SecurityGroupStateUpdating SecurityGroupState = "Updating" + SecurityGroupStateDeleting SecurityGroupState = "Deleting" + SecurityGroupStateUnavailable SecurityGroupState = "Unavailable" +) + +// RuleRequest represents a single rule of a network security group +// +// https://msdn.microsoft.com/en-us/library/azure/dn913821.aspx#bk_rules +type RuleRequest struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Rule"` + Name string + Type RuleType + Priority int + Action RuleAction + SourceAddressPrefix string + SourcePortRange string + DestinationAddressPrefix string + DestinationPortRange string + Protocol RuleProtocol +} + +// RuleResponse represents a single rule of a network security group +// +// https://msdn.microsoft.com/en-us/library/azure/dn913821.aspx#bk_rules +type RuleResponse struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Rule"` + Name string + Type RuleType + Priority int + Action RuleAction + SourceAddressPrefix string + SourcePortRange string + DestinationAddressPrefix string + DestinationPortRange string + Protocol RuleProtocol + State string `xml:",omitempty"` + IsDefault bool `xml:",omitempty"` +} + +// RuleType represents a rule type +type RuleType string + +// These constants represent the possible rule types +const ( + RuleTypeInbound RuleType = "Inbound" + RuleTypeOutbound RuleType = "Outbound" +) + +// RuleAction represents a rule action +type RuleAction string + +// These constants represent the possible rule actions +const ( + RuleActionAllow RuleAction = "Allow" + RuleActionDeny RuleAction = "Deny" +) + +// RuleProtocol represents a rule protocol +type RuleProtocol string + +// These constants represent the possible rule types +const ( + RuleProtocolTCP RuleProtocol = "TCP" + RuleProtocolUDP RuleProtocol = "UDP" + RuleProtocolAll RuleProtocol = "*" +) === added file 'src/github.com/Azure/azure-sdk-for-go/management/operations.go' --- src/github.com/Azure/azure-sdk-for-go/management/operations.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/operations.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,92 @@ +package management + +import ( + "encoding/xml" + "errors" + "fmt" + "time" +) + +var ( + // ErrOperationCancelled from WaitForOperation when the polling loop is + // cancelled through signaling the channel. + ErrOperationCancelled = errors.New("Polling for operation status cancelled") +) + +// GetOperationStatusResponse represents an in-flight operation. Use +// client.GetOperationStatus() to get the operation given the operation ID, or +// use WaitForOperation() to poll and wait until the operation has completed. +// See https://msdn.microsoft.com/en-us/library/azure/ee460783.aspx +type GetOperationStatusResponse struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Operation"` + ID string + Status OperationStatus + HTTPStatusCode string + Error *AzureError +} + +// OperationStatus describes the states an Microsoft Azure Service Management +// operation an be in. +type OperationStatus string + +// List of states an operation can be reported as +const ( + OperationStatusInProgress OperationStatus = "InProgress" + OperationStatusSucceeded OperationStatus = "Succeeded" + OperationStatusFailed OperationStatus = "Failed" +) + +// OperationID is assigned by Azure API and can be used to look up the status of +// an operation +type OperationID string + +func (c client) GetOperationStatus(operationID OperationID) (GetOperationStatusResponse, error) { + operation := GetOperationStatusResponse{} + if operationID == "" { + return operation, fmt.Errorf(errParamNotSpecified, "operationID") + } + + url := fmt.Sprintf("operations/%s", operationID) + response, azureErr := c.SendAzureGetRequest(url) + if azureErr != nil { + return operation, azureErr + } + + err := xml.Unmarshal(response, &operation) + return operation, err +} + +func (c client) WaitForOperation(operationID OperationID, cancel chan struct{}) error { + for { + done, err := c.checkOperationStatus(operationID) + if err != nil || done { + return err + } + select { + case <-time.After(c.config.OperationPollInterval): + case <-cancel: + return ErrOperationCancelled + } + } +} + +func (c client) checkOperationStatus(id OperationID) (done bool, err error) { + op, err := c.GetOperationStatus(id) + if err != nil { + return false, fmt.Errorf("Failed to get operation status '%s': %v", id, err) + } + + switch op.Status { + case OperationStatusSucceeded: + return true, nil + case OperationStatusFailed: + if op.Error != nil { + return true, op.Error + } + return true, fmt.Errorf("Azure Operation (x-ms-request-id=%s) has failed", id) + case OperationStatusInProgress: + return false, nil + default: + return false, fmt.Errorf("Unknown operation status returned from API: %s (x-ms-request-id=%s)", op.Status, id) + } +} === added directory 'src/github.com/Azure/azure-sdk-for-go/management/osimage' === added file 'src/github.com/Azure/azure-sdk-for-go/management/osimage/client.go' --- src/github.com/Azure/azure-sdk-for-go/management/osimage/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/osimage/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,31 @@ +// Package osimage provides a client for Operating System Images. +package osimage + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + azureImageListURL = "services/images" + errInvalidImage = "Can not find image %s in specified subscription, please specify another image name." + errParamNotSpecified = "Parameter %s is not specified." +) + +// NewClient is used to instantiate a new OSImageClient from an Azure client. +func NewClient(client management.Client) OSImageClient { + return OSImageClient{client: client} +} + +func (c OSImageClient) ListOSImages() (ListOSImagesResponse, error) { + var l ListOSImagesResponse + + response, err := c.client.SendAzureGetRequest(azureImageListURL) + if err != nil { + return l, err + } + + err = xml.Unmarshal(response, &l) + return l, err +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/osimage/entities.go' --- src/github.com/Azure/azure-sdk-for-go/management/osimage/entities.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/osimage/entities.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,47 @@ +package osimage + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +// OSImageClient is used to perform operations on Azure Locations +type OSImageClient struct { + client management.Client +} + +type ListOSImagesResponse struct { + XMLName xml.Name `xml:"Images"` + OSImages []OSImage `xml:"OSImage"` +} + +type OSImage struct { + Category string // Public || Private || MSDN + Label string // Specifies an identifier for the image. + LogicalSizeInGB float64 //Specifies the size, in GB, of the image. + Name string // Specifies the name of the operating system image. This is the name that is used when creating one or more virtual machines using the image. + OS string // Linux || Windows + Eula string // Specifies the End User License Agreement that is associated with the image. The value for this element is a string, but it is recommended that the value be a URL that points to a EULA. + Description string // Specifies the description of the image. + Location string // The geo-location in which this media is located. The Location value is derived from storage account that contains the blob in which the media is located. If the storage account belongs to an affinity group the value is NULL. + AffinityGroup string // Specifies the affinity in which the media is located. The AffinityGroup value is derived from storage account that contains the blob in which the media is located. If the storage account does not belong to an affinity group the value is NULL and the element is not displayed in the response. This value is NULL for platform images. + MediaLink string // Specifies the location of the vhd file for the image. The storage account where the vhd is located must be associated with the specified subscription. + ImageFamily string // Specifies a value that can be used to group images. + PublishedDate string // Specifies the date when the image was added to the image repository. + IsPremium string // Indicates whether the image contains software or associated services that will incur charges above the core price for the virtual machine. For additional details, see the PricingDetailLink element. + PrivacyURI string `xml:"PrivacyUri"` // Specifies the URI that points to a document that contains the privacy policy related to the image. + RecommendedVMSize string // Specifies the size to use for the virtual machine that is created from the image. + PublisherName string // The name of the publisher of the image. All user images have a publisher name of User. + PricingDetailLink string // Specifies a URL for an image with IsPremium set to true, which contains the pricing details for a virtual machine that is created from the image. + SmallIconURI string `xml:"SmallIconUri"` // Specifies the URI to the small icon that is displayed when the image is presented in the Microsoft Azure Management Portal. + Language string // Specifies the language of the image. + IOType IOType // Provisioned || Standard +} + +type IOType string + +const ( + IOTypeProvisioned IOType = "Provisioned" + IOTypeStandard IOType = "Standard" +) === added file 'src/github.com/Azure/azure-sdk-for-go/management/publishSettings.go' --- src/github.com/Azure/azure-sdk-for-go/management/publishSettings.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/publishSettings.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,105 @@ +package management + +import ( + "encoding/base64" + "encoding/pem" + "encoding/xml" + "fmt" + "io/ioutil" + + "github.com/Azure/azure-sdk-for-go/Godeps/_workspace/src/golang.org/x/crypto/pkcs12" +) + +// ClientFromPublishSettingsData unmarshalls the contents of a publish settings file +// from https://manage.windowsazure.com/publishsettings. +// If subscriptionID is left empty, the first subscription in the file is used. +func ClientFromPublishSettingsData(settingsData []byte, subscriptionID string) (client Client, err error) { + return ClientFromPublishSettingsDataWithConfig(settingsData, subscriptionID, DefaultConfig()) +} + +// ClientFromPublishSettingsFile reads a publish settings file downloaded from https://manage.windowsazure.com/publishsettings. +// If subscriptionID is left empty, the first subscription in the file is used. +func ClientFromPublishSettingsFile(filePath, subscriptionID string) (client Client, err error) { + return ClientFromPublishSettingsFileWithConfig(filePath, subscriptionID, DefaultConfig()) +} + +// ClientFromPublishSettingsFileWithConfig reads a publish settings file downloaded from https://manage.windowsazure.com/publishsettings. +// If subscriptionID is left empty, the first subscription in the file is used. +func ClientFromPublishSettingsFileWithConfig(filePath, subscriptionID string, config ClientConfig) (client Client, err error) { + if filePath == "" { + return client, fmt.Errorf(errParamNotSpecified, "filePath") + } + + publishSettingsContent, err := ioutil.ReadFile(filePath) + if err != nil { + return client, err + } + + return ClientFromPublishSettingsDataWithConfig(publishSettingsContent, subscriptionID, config) +} + +// ClientFromPublishSettingsDataWithConfig unmarshalls the contents of a publish settings file +// from https://manage.windowsazure.com/publishsettings. +// If subscriptionID is left empty, the first subscription in the string is used. +func ClientFromPublishSettingsDataWithConfig(data []byte, subscriptionID string, config ClientConfig) (client Client, err error) { + publishData := publishData{} + if err = xml.Unmarshal(data, &publishData); err != nil { + return client, err + } + + for _, profile := range publishData.PublishProfiles { + for _, sub := range profile.Subscriptions { + if sub.ID == subscriptionID || subscriptionID == "" { + base64Cert := sub.ManagementCertificate + if base64Cert == "" { + base64Cert = profile.ManagementCertificate + } + + pfxData, err := base64.StdEncoding.DecodeString(base64Cert) + if err != nil { + return client, err + } + + pems, err := pkcs12.ToPEM(pfxData, "") + + cert := []byte{} + for _, b := range pems { + cert = append(cert, pem.EncodeToMemory(b)...) + } + + config.ManagementURL = sub.ServiceManagementURL + return makeClient(sub.ID, cert, config) + } + } + } + + return client, fmt.Errorf("could not find subscription '%s' in settings provided", subscriptionID) +} + +type publishSettings struct { + SubscriptionID string + SubscriptionCert []byte + SubscriptionKey []byte +} + +type publishData struct { + XMLName xml.Name `xml:"PublishData"` + PublishProfiles []publishProfile `xml:"PublishProfile"` +} + +type publishProfile struct { + XMLName xml.Name `xml:"PublishProfile"` + SchemaVersion string `xml:",attr"` + PublishMethod string `xml:",attr"` + URL string `xml:"Url,attr"` + ManagementCertificate string `xml:",attr"` + Subscriptions []subscription `xml:"Subscription"` +} + +type subscription struct { + XMLName xml.Name `xml:"Subscription"` + ServiceManagementURL string `xml:"ServiceManagementUrl,attr"` + ID string `xml:"Id,attr"` + Name string `xml:",attr"` + ManagementCertificate string `xml:",attr"` +} === added directory 'src/github.com/Azure/azure-sdk-for-go/management/sql' === added file 'src/github.com/Azure/azure-sdk-for-go/management/sql/client.go' --- src/github.com/Azure/azure-sdk-for-go/management/sql/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/sql/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,316 @@ +package sql + +import ( + "encoding/xml" + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/management" +) + +// Definitions of numerous constants representing API endpoints. +const ( + azureCreateDatabaseServerURL = "services/sqlservers/servers" + azureListDatabaseServersURL = "services/sqlservers/servers" + azureDeleteDatabaseServerURL = "services/sqlservers/servers/%s" + + azureCreateFirewallRuleURL = "services/sqlservers/servers/%s/firewallrules" + azureGetFirewallRuleURL = "services/sqlservers/servers/%s/firewallrules/%s" + azureListFirewallRulesURL = "services/sqlservers/servers/%s/firewallrules" + azureUpdateFirewallRuleURL = "services/sqlservers/servers/%s/firewallrules/%s" + azureDeleteFirewallRuleURL = "services/sqlservers/servers/%s/firewallrules/%s" + + azureCreateDatabaseURL = "services/sqlservers/servers/%s/databases" + azureGetDatabaseURL = "services/sqlservers/servers/%s/databases/%s" + azureListDatabasesURL = "services/sqlservers/servers/%s/databases?contentview=generic" + azureUpdateDatabaseURL = "services/sqlservers/servers/%s/databases/%s" + azureDeleteDatabaseURL = "services/sqlservers/servers/%s/databases/%s" + + errParamNotSpecified = "Parameter %s was not specified." + + DatabaseStateCreating = "Creating" +) + +// SQLDatabaseClient defines various database CRUD operations. +// It contains a management.Client for making the actual http calls. +type SQLDatabaseClient struct { + mgmtClient management.Client +} + +// NewClient returns a new SQLDatabaseClient struct with the provided +// management.Client as the underlying client. +func NewClient(mgmtClient management.Client) SQLDatabaseClient { + return SQLDatabaseClient{mgmtClient} +} + +// CreateServer creates a new Azure SQL Database server and return its name. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505699.aspx +func (c SQLDatabaseClient) CreateServer(params DatabaseServerCreateParams) (string, error) { + req, err := xml.Marshal(params) + if err != nil { + return "", err + } + + resp, err := c.mgmtClient.SendAzurePostRequestWithReturnedResponse(azureCreateDatabaseServerURL, req) + if err != nil { + return "", err + } + + var name string + err = xml.Unmarshal(resp, &name) + + return name, err +} + +// ListServers retrieves the Azure SQL Database servers for this subscription. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505702.aspx +func (c SQLDatabaseClient) ListServers() (ListServersResponse, error) { + var resp ListServersResponse + + data, err := c.mgmtClient.SendAzureGetRequest(azureListDatabaseServersURL) + if err != nil { + return resp, err + } + + err = xml.Unmarshal(data, &resp) + return resp, err +} + +// DeleteServer deletes an Azure SQL Database server (including all its databases). +// +// https://msdn.microsoft.com/en-us/library/azure/dn505695.aspx +func (c SQLDatabaseClient) DeleteServer(name string) error { + if name == "" { + return fmt.Errorf(errParamNotSpecified, "name") + } + + url := fmt.Sprintf(azureDeleteDatabaseServerURL, name) + _, err := c.mgmtClient.SendAzureDeleteRequest(url) + return err +} + +// CreateFirewallRule creates an Azure SQL Database server +// firewall rule. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505712.aspx +func (c SQLDatabaseClient) CreateFirewallRule(server string, params FirewallRuleCreateParams) error { + if server == "" { + return fmt.Errorf(errParamNotSpecified, "server") + } + + req, err := xml.Marshal(params) + if err != nil { + return err + } + + url := fmt.Sprintf(azureCreateFirewallRuleURL, server) + + _, err = c.mgmtClient.SendAzurePostRequest(url, req) + return err +} + +// GetFirewallRule gets the details of an Azure SQL Database Server firewall rule. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505698.aspx +func (c SQLDatabaseClient) GetFirewallRule(server, ruleName string) (FirewallRuleResponse, error) { + var rule FirewallRuleResponse + + if server == "" { + return rule, fmt.Errorf(errParamNotSpecified, "server") + } + if ruleName == "" { + return rule, fmt.Errorf(errParamNotSpecified, "ruleName") + } + + url := fmt.Sprintf(azureGetFirewallRuleURL, server, ruleName) + resp, err := c.mgmtClient.SendAzureGetRequest(url) + if err != nil { + return rule, err + } + + err = xml.Unmarshal(resp, &rule) + return rule, err +} + +// ListFirewallRules retrieves the set of firewall rules for an Azure SQL +// Database Server. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505715.aspx +func (c SQLDatabaseClient) ListFirewallRules(server string) (ListFirewallRulesResponse, error) { + var rules ListFirewallRulesResponse + + if server == "" { + return rules, fmt.Errorf(errParamNotSpecified, "server") + } + + url := fmt.Sprintf(azureListFirewallRulesURL, server) + resp, err := c.mgmtClient.SendAzureGetRequest(url) + if err != nil { + return rules, err + } + + err = xml.Unmarshal(resp, &rules) + return rules, err +} + +// UpdateFirewallRule update a firewall rule for an Azure SQL Database server. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505707.aspx +func (c SQLDatabaseClient) UpdateFirewallRule(server, ruleName string, params FirewallRuleUpdateParams) error { + if server == "" { + return fmt.Errorf(errParamNotSpecified, "server") + } + if ruleName == "" { + return fmt.Errorf(errParamNotSpecified, "ruleName") + } + + req, err := xml.Marshal(params) + if err != nil { + return err + } + + url := fmt.Sprintf(azureUpdateFirewallRuleURL, server, ruleName) + _, err = c.mgmtClient.SendAzurePutRequest(url, "text/xml", req) + return err +} + +// DeleteFirewallRule deletes an Azure SQL Database server firewall rule. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505706.aspx +func (c SQLDatabaseClient) DeleteFirewallRule(server, ruleName string) error { + if server == "" { + return fmt.Errorf(errParamNotSpecified, "server") + } + if ruleName == "" { + return fmt.Errorf(errParamNotSpecified, "ruleName") + } + + url := fmt.Sprintf(azureDeleteFirewallRuleURL, server, ruleName) + + _, err := c.mgmtClient.SendAzureDeleteRequest(url) + return err +} + +// CreateDatabase creates a new Microsoft Azure SQL Database on the given database server. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505701.aspx +func (c SQLDatabaseClient) CreateDatabase(server string, params DatabaseCreateParams) error { + if server == "" { + return fmt.Errorf(errParamNotSpecified, "server") + } + + req, err := xml.Marshal(params) + if err != nil { + return err + } + + target := fmt.Sprintf(azureCreateDatabaseURL, server) + _, err = c.mgmtClient.SendAzurePostRequest(target, req) + return err +} + +// WaitForDatabaseCreation is a helper method which waits +// for the creation of the database on the given server. +func (c SQLDatabaseClient) WaitForDatabaseCreation( + server, database string, + cancel chan struct{}) error { + for { + stat, err := c.GetDatabase(server, database) + if err != nil { + return err + } + if stat.State != DatabaseStateCreating { + return nil + } + + select { + case <-time.After(management.DefaultOperationPollInterval): + case <-cancel: + return management.ErrOperationCancelled + } + } +} + +// GetDatabase gets the details for an Azure SQL Database. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505708.aspx +func (c SQLDatabaseClient) GetDatabase(server, database string) (ServiceResource, error) { + var db ServiceResource + + if database == "" { + return db, fmt.Errorf(errParamNotSpecified, "database") + } + if server == "" { + return db, fmt.Errorf(errParamNotSpecified, "server") + } + + url := fmt.Sprintf(azureGetDatabaseURL, server, database) + resp, err := c.mgmtClient.SendAzureGetRequest(url) + if err != nil { + return db, err + } + + err = xml.Unmarshal(resp, &db) + return db, err +} + +// ListDatabases returns a list of Azure SQL Databases on the given server. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505711.aspx +func (c SQLDatabaseClient) ListDatabases(server string) (ListDatabasesResponse, error) { + var databases ListDatabasesResponse + if server == "" { + return databases, fmt.Errorf(errParamNotSpecified, "server name") + } + + url := fmt.Sprintf(azureListDatabasesURL, server) + resp, err := c.mgmtClient.SendAzureGetRequest(url) + if err != nil { + return databases, err + } + + err = xml.Unmarshal(resp, &databases) + return databases, err +} + +// UpdateDatabase updates the details of the given Database off the given server. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505718.aspx +func (c SQLDatabaseClient) UpdateDatabase( + server, database string, + params ServiceResourceUpdateParams) (management.OperationID, error) { + if database == "" { + return "", fmt.Errorf(errParamNotSpecified, "database") + } + if server == "" { + return "", fmt.Errorf(errParamNotSpecified, "server") + } + + url := fmt.Sprintf(azureUpdateDatabaseURL, server, database) + req, err := xml.Marshal(params) + if err != nil { + return "", err + } + + return c.mgmtClient.SendAzurePutRequest(url, "text/xml", req) +} + +// DeleteDatabase deletes the Azure SQL Database off the given server. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505705.aspx +func (c SQLDatabaseClient) DeleteDatabase(server, database string) error { + if database == "" { + return fmt.Errorf(errParamNotSpecified, "database") + } + if server == "" { + return fmt.Errorf(errParamNotSpecified, "server") + } + + url := fmt.Sprintf(azureDeleteDatabaseURL, server, database) + + _, err := c.mgmtClient.SendAzureDeleteRequest(url) + + return err +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/sql/entities.go' --- src/github.com/Azure/azure-sdk-for-go/management/sql/entities.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/sql/entities.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,124 @@ +package sql + +import ( + "encoding/xml" +) + +// DatabaseServerCreateParams represents the set of possible parameters +// when issuing a database server creation request to Azure. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505699.aspx +type DatabaseServerCreateParams struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/sqlazure/2010/12/ Server"` + AdministratorLogin string + AdministratorLoginPassword string + Location string + Version string +} + +// DatabaseServerCreateResponse represents the response following the creation of +// a database server on Azure. +type DatabaseServerCreateResponse struct { + ServerName string +} + +const ( + DatabaseServerVersion11 = "2.0" + DatabaseServerVersion12 = "12.0" +) + +// DatabaseServer represents the set of data recieved from +// a database server list operation. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505702.aspx +type DatabaseServer struct { + Name string + AdministratorLogin string + Location string + FullyQualifiedDomainName string + Version string + State string +} + +type ListServersResponse struct { + DatabaseServers []DatabaseServer `xml:"Server"` +} + +// FirewallRuleCreateParams represents the set of possible +// paramaters when creating a firewall rule on an Azure database server. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505712.aspx +type FirewallRuleCreateParams struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure ServiceResource"` + Name string + StartIPAddress string + EndIPAddress string +} + +// FirewallRuleResponse represents the set of data recieved from +// an Azure database server firewall rule get response. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505698.aspx +type FirewallRuleResponse struct { + Name string + StartIPAddress string + EndIPAddress string +} + +type ListFirewallRulesResponse struct { + FirewallRules []FirewallRuleResponse `xml:"ServiceResource"` +} + +// FirewallRuleUpdateParams represents the set of possible +// parameters when issuing an update of a database server firewall rule. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505707.aspx +type FirewallRuleUpdateParams struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure ServiceResource"` + Name string + StartIPAddress string + EndIPAddress string +} + +// DatabaseCreateParams represents the set of possible parameters when issuing +// a database creation to Azure, and reading a list response from Azure. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505701.aspx +type DatabaseCreateParams struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure ServiceResource"` + Name string + Edition string `xml:",omitempty"` + CollationName string `xml:",omitempty"` + MaxSizeBytes int64 `xml:",omitempty"` + ServiceObjectiveID string `xml:"ServiceObjectiveId,omitempty"` +} + +// ServiceResource represents the set of parameters obtained from a database +// get or list call. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505708.aspx +type ServiceResource struct { + Name string + State string + SelfLink string + Edition string + CollationName string + MaxSizeBytes int64 + ServiceObjectiveID string `xml:"ServiceObjectiveId,omitempty"` +} + +type ListDatabasesResponse struct { + ServiceResources []ServiceResource `xml:"ServiceResource"` +} + +// ServiceResourceUpdateParams represents the set of parameters available +// for a database service update operation. +// +// https://msdn.microsoft.com/en-us/library/azure/dn505718.aspx +type ServiceResourceUpdateParams struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure ServiceResource"` + Name string + Edition string `xml:",omitempty"` + MaxSizeBytes int64 `xml:",omitempty"` + ServiceObjectiveID string `xml:"ServiceObjectiveId,omitempty"` +} === added directory 'src/github.com/Azure/azure-sdk-for-go/management/storageservice' === added file 'src/github.com/Azure/azure-sdk-for-go/management/storageservice/client.go' --- src/github.com/Azure/azure-sdk-for-go/management/storageservice/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/storageservice/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,108 @@ +// Package storageservice provides a client for Storage Services. +package storageservice + +import ( + "encoding/xml" + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + azureStorageServiceListURL = "services/storageservices" + azureStorageServiceURL = "services/storageservices/%s" + azureStorageServiceKeysURL = "services/storageservices/%s/keys" + azureStorageAccountAvailabilityURL = "services/storageservices/operations/isavailable/%s" + + azureXmlns = "http://schemas.microsoft.com/windowsazure" + + errParamNotSpecified = "Parameter %s is not specified." +) + +// NewClient is used to instantiate a new StorageServiceClient from an Azure +// client. +func NewClient(s management.Client) StorageServiceClient { + return StorageServiceClient{client: s} +} + +func (s StorageServiceClient) ListStorageServices() (ListStorageServicesResponse, error) { + var l ListStorageServicesResponse + response, err := s.client.SendAzureGetRequest(azureStorageServiceListURL) + if err != nil { + return l, err + } + + err = xml.Unmarshal(response, &l) + return l, err +} + +func (s StorageServiceClient) GetStorageService(serviceName string) (StorageServiceResponse, error) { + var svc StorageServiceResponse + if serviceName == "" { + return svc, fmt.Errorf(errParamNotSpecified, "serviceName") + } + + requestURL := fmt.Sprintf(azureStorageServiceURL, serviceName) + response, err := s.client.SendAzureGetRequest(requestURL) + if err != nil { + return svc, err + } + + err = xml.Unmarshal(response, &svc) + return svc, err +} + +func (s StorageServiceClient) GetStorageServiceKeys(serviceName string) (GetStorageServiceKeysResponse, error) { + var r GetStorageServiceKeysResponse + if serviceName == "" { + return r, fmt.Errorf(errParamNotSpecified, "serviceName") + } + + requestURL := fmt.Sprintf(azureStorageServiceKeysURL, serviceName) + data, err := s.client.SendAzureGetRequest(requestURL) + if err != nil { + return r, err + } + + err = xml.Unmarshal(data, &r) + return r, err +} + +func (s StorageServiceClient) CreateStorageService(parameters StorageAccountCreateParameters) (management.OperationID, error) { + data, err := xml.Marshal(CreateStorageServiceInput{ + StorageAccountCreateParameters: parameters}) + if err != nil { + return "", err + } + + return s.client.SendAzurePostRequest(azureStorageServiceListURL, data) +} + +func (s StorageServiceClient) DeleteStorageService(serviceName string) (management.OperationID, error) { + if serviceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "serviceName") + } + + requestURL := fmt.Sprintf(azureStorageServiceURL, serviceName) + return s.client.SendAzureDeleteRequest(requestURL) +} + +// CheckStorageAccountNameAvailability checks to if the specified storage account +// name is available. +// +// See https://msdn.microsoft.com/en-us/library/azure/jj154125.aspx +func (s StorageServiceClient) CheckStorageAccountNameAvailability(name string) (AvailabilityResponse, error) { + var r AvailabilityResponse + if name == "" { + return r, fmt.Errorf(errParamNotSpecified, "name") + } + + requestURL := fmt.Sprintf(azureStorageAccountAvailabilityURL, name) + response, err := s.client.SendAzureGetRequest(requestURL) + if err != nil { + return r, err + } + + err = xml.Unmarshal(response, &r) + return r, err +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/storageservice/entities.go' --- src/github.com/Azure/azure-sdk-for-go/management/storageservice/entities.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/storageservice/entities.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,79 @@ +package storageservice + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +// StorageServiceClient is used to perform operations on Azure Storage +type StorageServiceClient struct { + client management.Client +} + +type ListStorageServicesResponse struct { + StorageServices []StorageServiceResponse `xml:"StorageService"` +} + +type StorageServiceResponse struct { + URL string `xml:"Url"` + ServiceName string + StorageServiceProperties StorageServiceProperties +} + +type StorageServiceProperties struct { + Description string + Location string + Label string + Status string + Endpoints []string `xml:"Endpoints>Endpoint"` + GeoReplicationEnabled string + GeoPrimaryRegion string +} + +type GetStorageServiceKeysResponse struct { + URL string `xml:"Url"` + PrimaryKey string `xml:"StorageServiceKeys>Primary"` + SecondaryKey string `xml:"StorageServiceKeys>Secondary"` +} + +type CreateStorageServiceInput struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure CreateStorageServiceInput"` + StorageAccountCreateParameters +} + +type StorageAccountCreateParameters struct { + ServiceName string + Description string `xml:",omitempty"` + Label string + AffinityGroup string `xml:",omitempty"` + Location string `xml:",omitempty"` + ExtendedProperties ExtendedPropertyList + AccountType AccountType +} + +type AccountType string + +const ( + AccountTypeStandardLRS AccountType = "Standard_LRS" + AccountTypeStandardZRS AccountType = "Standard_ZRS" + AccountTypeStandardGRS AccountType = "Standard_GRS" + AccountTypeStandardRAGRS AccountType = "Standard_RAGRS" + AccountTypePremiumLRS AccountType = "Premium_LRS" +) + +type ExtendedPropertyList struct { + ExtendedProperty []ExtendedProperty +} + +type ExtendedProperty struct { + Name string + Value string +} + +type AvailabilityResponse struct { + XMLName xml.Name `xml:"AvailabilityResponse"` + Xmlns string `xml:"xmlns,attr"` + Result bool + Reason string +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/storageservice/entities_test.go' --- src/github.com/Azure/azure-sdk-for-go/management/storageservice/entities_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/storageservice/entities_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,31 @@ +package storageservice + +import ( + "encoding/xml" + "testing" +) + +func Test_StorageServiceKeysResponse_Unmarshal(t *testing.T) { + // from https://msdn.microsoft.com/en-us/library/azure/ee460785.aspx + response := []byte(` + + storage-service-url + + primary-key + secondary-key + + `) + + keysResponse := GetStorageServiceKeysResponse{} + err := xml.Unmarshal(response, &keysResponse) + if err != nil { + t.Fatal(err) + } + + if expected := "primary-key"; keysResponse.PrimaryKey != expected { + t.Fatalf("Expected %q but got %q", expected, keysResponse.PrimaryKey) + } + if expected := "secondary-key"; keysResponse.SecondaryKey != expected { + t.Fatalf("Expected %q but got %q", expected, keysResponse.SecondaryKey) + } +} === added directory 'src/github.com/Azure/azure-sdk-for-go/management/testutils' === added file 'src/github.com/Azure/azure-sdk-for-go/management/testutils/managementclient.go' --- src/github.com/Azure/azure-sdk-for-go/management/testutils/managementclient.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/testutils/managementclient.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,87 @@ +// Package testutils contains some test utilities for the Azure SDK +package testutils + +import ( + "encoding/base64" + "os" + "testing" + + "github.com/Azure/azure-sdk-for-go/management" +) + +// GetTestClient returns a management Client for testing. Expects +// AZSUBSCRIPTIONID and AZCERTDATA to be present in the environment. AZCERTDATA +// is the base64encoded binary representation of the PEM certificate data. +func GetTestClient(t *testing.T) management.Client { + subid := os.Getenv("AZSUBSCRIPTIONID") + certdata := os.Getenv("AZCERTDATA") + if subid == "" || certdata == "" { + t.Skip("AZSUBSCRIPTIONID or AZCERTDATA not set, skipping test") + } + cert, err := base64.StdEncoding.DecodeString(certdata) + if err != nil { + t.Fatal(err) + } + + client, err := management.NewClient(subid, cert) + if err != nil { + t.Fatal(err) + } + return testClient{client, t} +} + +type testClient struct { + management.Client + t *testing.T +} + +func chop(d []byte) string { + const maxlen = 5000 + + s := string(d) + + if len(s) > maxlen { + return s[:maxlen] + "..." + } + return s +} + +func (l testClient) SendAzureGetRequest(url string) ([]byte, error) { + d, err := l.Client.SendAzureGetRequest(url) + logOperation(l.t, "GET", url, nil, d, "", err) + return d, err +} + +func (l testClient) SendAzurePostRequest(url string, data []byte) (management.OperationID, error) { + oid, err := l.Client.SendAzurePostRequest(url, data) + logOperation(l.t, "POST", url, data, nil, oid, err) + return oid, err +} + +func (l testClient) SendAzurePutRequest(url string, contentType string, data []byte) (management.OperationID, error) { + oid, err := l.Client.SendAzurePutRequest(url, contentType, data) + logOperation(l.t, "PUT", url, data, nil, oid, err) + return oid, err +} + +func (l testClient) SendAzureDeleteRequest(url string) (management.OperationID, error) { + oid, err := l.Client.SendAzureDeleteRequest(url) + logOperation(l.t, "DELETE", url, nil, nil, oid, err) + return oid, err +} + +func logOperation(t *testing.T, method, url string, requestData, responseData []byte, oid management.OperationID, err error) { + t.Logf("AZURE> %s %s\n", method, url) + if requestData != nil { + t.Logf(" >>> %s\n", chop(requestData)) + } + if err != nil { + t.Logf(" <<< ERROR: %+v\n", err) + } else { + if responseData != nil { + t.Logf(" <<< %s\n", chop(responseData)) + } else { + t.Logf(" <<< OperationID: %s\n", oid) + } + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/util.go' --- src/github.com/Azure/azure-sdk-for-go/management/util.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/util.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +package management + +import ( + "github.com/Azure/azure-sdk-for-go/core/http" + "io/ioutil" +) + +func getResponseBody(response *http.Response) ([]byte, error) { + defer response.Body.Close() + return ioutil.ReadAll(response.Body) +} === added directory 'src/github.com/Azure/azure-sdk-for-go/management/virtualmachine' === added file 'src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/client.go' --- src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,277 @@ +// Package virtualmachine provides a client for Virtual Machines. +package virtualmachine + +import ( + "encoding/xml" + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + azureDeploymentListURL = "services/hostedservices/%s/deployments" + azureDeploymentURL = "services/hostedservices/%s/deployments/%s" + deleteAzureDeploymentURL = "services/hostedservices/%s/deployments/%s?comp=media" + azureRoleURL = "services/hostedservices/%s/deployments/%s/roles/%s" + azureOperationsURL = "services/hostedservices/%s/deployments/%s/roleinstances/%s/Operations" + azureRoleSizeListURL = "rolesizes" + + errParamNotSpecified = "Parameter %s is not specified." +) + +//NewClient is used to instantiate a new VirtualMachineClient from an Azure client +func NewClient(client management.Client) VirtualMachineClient { + return VirtualMachineClient{client: client} +} + +// CreateDeploymentOptions can be used to create a customized deployement request +type CreateDeploymentOptions struct { + DNSServers []DNSServer + LoadBalancers []LoadBalancer + ReservedIPName string + VirtualNetworkName string +} + +// CreateDeployment creates a deployment and then creates a virtual machine +// in the deployment based on the specified configuration. +// +// https://msdn.microsoft.com/en-us/library/azure/jj157194.aspx +func (vm VirtualMachineClient) CreateDeployment( + role Role, + cloudServiceName string, + options CreateDeploymentOptions) (management.OperationID, error) { + + req := DeploymentRequest{ + Name: role.RoleName, + DeploymentSlot: "Production", + Label: role.RoleName, + RoleList: []Role{role}, + DNSServers: options.DNSServers, + LoadBalancers: options.LoadBalancers, + ReservedIPName: options.ReservedIPName, + VirtualNetworkName: options.VirtualNetworkName, + } + + data, err := xml.Marshal(req) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(azureDeploymentListURL, cloudServiceName) + return vm.client.SendAzurePostRequest(requestURL, data) +} + +func (vm VirtualMachineClient) GetDeployment(cloudServiceName, deploymentName string) (DeploymentResponse, error) { + var deployment DeploymentResponse + if cloudServiceName == "" { + return deployment, fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return deployment, fmt.Errorf(errParamNotSpecified, "deploymentName") + } + requestURL := fmt.Sprintf(azureDeploymentURL, cloudServiceName, deploymentName) + response, azureErr := vm.client.SendAzureGetRequest(requestURL) + if azureErr != nil { + return deployment, azureErr + } + + err := xml.Unmarshal(response, &deployment) + return deployment, err +} + +func (vm VirtualMachineClient) DeleteDeployment(cloudServiceName, deploymentName string) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + + requestURL := fmt.Sprintf(deleteAzureDeploymentURL, cloudServiceName, deploymentName) + return vm.client.SendAzureDeleteRequest(requestURL) +} + +func (vm VirtualMachineClient) GetRole(cloudServiceName, deploymentName, roleName string) (*Role, error) { + if cloudServiceName == "" { + return nil, fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return nil, fmt.Errorf(errParamNotSpecified, "deploymentName") + } + if roleName == "" { + return nil, fmt.Errorf(errParamNotSpecified, "roleName") + } + + role := new(Role) + + requestURL := fmt.Sprintf(azureRoleURL, cloudServiceName, deploymentName, roleName) + response, azureErr := vm.client.SendAzureGetRequest(requestURL) + if azureErr != nil { + return nil, azureErr + } + + err := xml.Unmarshal(response, role) + if err != nil { + return nil, err + } + + return role, nil +} + +// UpdateRole updates the configuration of the specified virtual machine +// See https://msdn.microsoft.com/en-us/library/azure/jj157187.aspx +func (vm VirtualMachineClient) UpdateRole(cloudServiceName, deploymentName, roleName string, role Role) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + if roleName == "" { + return "", fmt.Errorf(errParamNotSpecified, "roleName") + } + + data, err := xml.Marshal(PersistentVMRole{Role: role}) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(azureRoleURL, cloudServiceName, deploymentName, roleName) + return vm.client.SendAzurePutRequest(requestURL, "text/xml", data) +} + +func (vm VirtualMachineClient) StartRole(cloudServiceName, deploymentName, roleName string) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + if roleName == "" { + return "", fmt.Errorf(errParamNotSpecified, "roleName") + } + + startRoleOperationBytes, err := xml.Marshal(StartRoleOperation{ + OperationType: "StartRoleOperation", + }) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(azureOperationsURL, cloudServiceName, deploymentName, roleName) + return vm.client.SendAzurePostRequest(requestURL, startRoleOperationBytes) +} + +func (vm VirtualMachineClient) ShutdownRole(cloudServiceName, deploymentName, roleName string) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + if roleName == "" { + return "", fmt.Errorf(errParamNotSpecified, "roleName") + } + + shutdownRoleOperationBytes, err := xml.Marshal(ShutdownRoleOperation{ + OperationType: "ShutdownRoleOperation", + }) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(azureOperationsURL, cloudServiceName, deploymentName, roleName) + return vm.client.SendAzurePostRequest(requestURL, shutdownRoleOperationBytes) +} + +func (vm VirtualMachineClient) RestartRole(cloudServiceName, deploymentName, roleName string) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + if roleName == "" { + return "", fmt.Errorf(errParamNotSpecified, "roleName") + } + + restartRoleOperationBytes, err := xml.Marshal(RestartRoleOperation{ + OperationType: "RestartRoleOperation", + }) + if err != nil { + return "", err + } + + requestURL := fmt.Sprintf(azureOperationsURL, cloudServiceName, deploymentName, roleName) + return vm.client.SendAzurePostRequest(requestURL, restartRoleOperationBytes) +} + +func (vm VirtualMachineClient) DeleteRole(cloudServiceName, deploymentName, roleName string) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + if roleName == "" { + return "", fmt.Errorf(errParamNotSpecified, "roleName") + } + + requestURL := fmt.Sprintf(azureRoleURL, cloudServiceName, deploymentName, roleName) + return vm.client.SendAzureDeleteRequest(requestURL) +} + +func (vm VirtualMachineClient) GetRoleSizeList() (RoleSizeList, error) { + roleSizeList := RoleSizeList{} + + response, err := vm.client.SendAzureGetRequest(azureRoleSizeListURL) + if err != nil { + return roleSizeList, err + } + + err = xml.Unmarshal(response, &roleSizeList) + return roleSizeList, err +} + +// CaptureRole captures a VM role. If reprovisioningConfigurationSet is non-nil, +// the VM role is redeployed after capturing the image, otherwise, the original +// VM role is deleted. +// +// NOTE: an image resulting from this operation shows up in +// osimage.GetImageList() as images with Category "User". +func (vm VirtualMachineClient) CaptureRole(cloudServiceName, deploymentName, roleName, imageName, imageLabel string, + reprovisioningConfigurationSet *ConfigurationSet) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + if roleName == "" { + return "", fmt.Errorf(errParamNotSpecified, "roleName") + } + + if reprovisioningConfigurationSet != nil && + !(reprovisioningConfigurationSet.ConfigurationSetType == ConfigurationSetTypeLinuxProvisioning || + reprovisioningConfigurationSet.ConfigurationSetType == ConfigurationSetTypeWindowsProvisioning) { + return "", fmt.Errorf("ConfigurationSet type can only be WindowsProvisioningConfiguration or LinuxProvisioningConfiguration") + } + + operation := CaptureRoleOperation{ + OperationType: "CaptureRoleOperation", + PostCaptureAction: PostCaptureActionReprovision, + ProvisioningConfiguration: reprovisioningConfigurationSet, + TargetImageLabel: imageLabel, + TargetImageName: imageName, + } + if reprovisioningConfigurationSet == nil { + operation.PostCaptureAction = PostCaptureActionDelete + } + + data, err := xml.Marshal(operation) + if err != nil { + return "", err + } + + return vm.client.SendAzurePostRequest(fmt.Sprintf(azureOperationsURL, cloudServiceName, deploymentName, roleName), data) +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities.go' --- src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,569 @@ +package virtualmachine + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" + vmdisk "github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk" +) + +// VirtualMachineClient is used to perform operations on Azure Virtual Machines +type VirtualMachineClient struct { + client management.Client +} + +// DeploymentRequest is the type for creating a deployment and Virtual Machine +// in the deployment based on the specified configuration. See +// https://msdn.microsoft.com/en-us/library/azure/jj157194.aspx +type DeploymentRequest struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Deployment"` + // Required parameters: + Name string `` // Specifies a name for the deployment. The deployment name must be unique among other deployments for the cloud service. + DeploymentSlot string `` // Specifies the environment in which the Virtual Machine is to be deployed. The only allowable value is Production. + Label string `` // Specifies an identifier for the deployment. The label can be up to 100 characters long. The label can be used for tracking purposes. + RoleList []Role `xml:">Role"` // Contains information about the Virtual Machines that are to be deployed. + // Optional parameters: + VirtualNetworkName string `xml:",omitempty"` // Specifies the name of an existing virtual network to which the deployment will belong. + DNSServers []DNSServer `xml:"Dns>DnsServers>DnsServer,omitempty"` // Contains a list of DNS servers to associate with the Virtual Machine. + LoadBalancers []LoadBalancer `xml:">LoadBalancer,omitempty"` // Contains a list of internal load balancers that can be assigned to input endpoints. + ReservedIPName string `xml:",omitempty"` // Specifies the name of a reserved IP address that is to be assigned to the deployment. +} + +// DeploymentResponse is the type for receiving deployment information +// See https://msdn.microsoft.com/en-us/library/azure/ee460804.aspx +type DeploymentResponse struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Deployment"` + + Name string + DeploymentSlot string + Status DeploymentStatus + Label string + URL string `xml:"Url"` + Configuration string + RoleInstanceList []RoleInstance `xml:">RoleInstance"` + UpgradeStatus UpgradeStatus + UpgradeDomainCount int + RoleList []Role `xml:">Role"` + SdkVersion string + Locked bool + RollbackAllowed bool + CreatedTime string + LastModifiedTime string + VirtualNetworkName string + DNSServers []DNSServer `xml:"Dns>DnsServers>DnsServer"` + LoadBalancers []LoadBalancer `xml:">LoadBalancer"` + ExtendedProperties []ExtendedProperty `xml:">ExtendedProperty"` + PersistentVMDowntime PersistentVMDowntime + VirtualIPs []VirtualIP `xml:">VirtualIP"` + ExtensionConfiguration string // cloud service extensions not fully implemented + ReservedIPName string + InternalDNSSuffix string `xml:"InternalDnsSuffix"` +} + +type DeploymentStatus string + +const ( + DeploymentStatusRunning DeploymentStatus = "Running" + DeploymentStatusSuspended DeploymentStatus = "Suspended" + DeploymentStatusRunningTransitioning DeploymentStatus = "RunningTransitioning" + DeploymentStatusSuspendedTransitioning DeploymentStatus = "SuspendedTransitioning" + DeploymentStatusStarting DeploymentStatus = "Starting" + DeploymentStatusSuspending DeploymentStatus = "Suspending" + DeploymentStatusDeploying DeploymentStatus = "Deploying" + DeploymentStatusDeleting DeploymentStatus = "Deleting" +) + +type RoleInstance struct { + RoleName string + InstanceName string + InstanceStatus InstanceStatus + ExtendedInstanceStatus string + InstanceUpgradeDomain int + InstanceFaultDomain int + InstanceSize string + InstanceStateDetails string + InstanceErrorCode string + IPAddress string `xml:"IpAddress"` + InstanceEndpoints []InstanceEndpoint `xml:">InstanceEndpoint"` + PowerState PowerState + HostName string + RemoteAccessCertificateThumbprint string + GuestAgentStatus string // todo: implement + ResourceExtensionStatusList []ResourceExtensionStatus `xml:">ResourceExtensionStatus"` + PublicIPs []PublicIP `xml:">PublicIP"` +} + +type InstanceStatus string + +const ( + InstanceStatusUnknown = "Unknown" + InstanceStatusCreatingVM = "CreatingVM" + InstanceStatusStartingVM = "StartingVM" + InstanceStatusCreatingRole = "CreatingRole" + InstanceStatusStartingRole = "StartingRole" + InstanceStatusReadyRole = "ReadyRole" + InstanceStatusBusyRole = "BusyRole" + InstanceStatusStoppingRole = "StoppingRole" + InstanceStatusStoppingVM = "StoppingVM" + InstanceStatusDeletingVM = "DeletingVM" + InstanceStatusStoppedVM = "StoppedVM" + InstanceStatusRestartingRole = "RestartingRole" + InstanceStatusCyclingRole = "CyclingRole" + InstanceStatusFailedStartingRole = "FailedStartingRole" + InstanceStatusFailedStartingVM = "FailedStartingVM" + InstanceStatusUnresponsiveRole = "UnresponsiveRole" + InstanceStatusStoppedDeallocated = "StoppedDeallocated" + InstanceStatusPreparing = "Preparing" +) + +type InstanceEndpoint struct { + Name string + Vip string + PublicPort int + LocalPort int + Protocol InputEndpointProtocol +} + +type PowerState string + +const ( + PowerStateStarting PowerState = "Starting" + PowerStateStarted PowerState = "Started" + PowerStateStopping PowerState = "Stopping" + PowerStateStopped PowerState = "Stopped" + PowerStateUnknown PowerState = "Unknown" +) + +type ResourceExtensionStatus struct { + HandlerName string + Version string + Status ResourceExtensionState + Code string + FormattedMessage FormattedMessage + ExtensionSettingStatus ExtensionSettingStatus +} + +type ResourceExtensionState string + +const ( + ResourceExtensionStateInstalling ResourceExtensionState = "Installing" + ResourceExtensionStateReady ResourceExtensionState = "Ready" + ResourceExtensionStateNotReady ResourceExtensionState = "NotReady" + ResourceExtensionStateUnresponsive ResourceExtensionState = "Unresponsive" +) + +type FormattedMessage struct { + Language string + Message string +} + +type ExtensionSettingStatus struct { + Timestamp string + Name string + Operation string + Status ExtensionSettingState + Code string + FormattedMessage FormattedMessage + SubStatusList []SubStatus `xml:">SubStatus"` +} + +type ExtensionSettingState string + +const ( + ExtensionSettingStateTransitioning ExtensionSettingState = "transitioning" + ExtensionSettingStateError ExtensionSettingState = "error" + ExtensionSettingStateSuccess ExtensionSettingState = "success" + ExtensionSettingStateWarning ExtensionSettingState = "warning" +) + +type SubStatus struct { + Name string + Status ExtensionSettingState + FormattedMessage FormattedMessage +} + +type UpgradeStatus struct { + UpgradeType UpgradeType + CurrentUpgradeDomainState CurrentUpgradeDomainState + CurrentUpgradeDomain int +} + +type UpgradeType string + +const ( + UpgradeTypeAuto UpgradeType = "Auto" + UpgradeTypeManual UpgradeType = "Manual" + UpgradeTypeSimultaneous UpgradeType = "Simultaneous" +) + +type CurrentUpgradeDomainState string + +const ( + CurrentUpgradeDomainStateBefore CurrentUpgradeDomainState = "Before" + CurrentUpgradeDomainStateDuring CurrentUpgradeDomainState = "During" +) + +type ExtendedProperty struct { + Name string + Value string +} + +type PersistentVMDowntime struct { + StartTime string + EndTime string + Status string +} + +type VirtualIP struct { + Address string + IsReserved bool + ReservedIPName string + Type IPAddressType +} + +// Role contains the configuration sets that are used to create virtual +// machines. +type Role struct { + RoleName string `xml:",omitempty"` // Specifies the name for the Virtual Machine. + RoleType string `xml:",omitempty"` // Specifies the type of role to use. For Virtual Machines, this must be PersistentVMRole. + ConfigurationSets []ConfigurationSet `xml:"ConfigurationSets>ConfigurationSet,omitempty"` + ResourceExtensionReferences *[]ResourceExtensionReference `xml:"ResourceExtensionReferences>ResourceExtensionReference,omitempty"` + VMImageName string `xml:",omitempty"` // Specifies the name of the VM Image that is to be used to create the Virtual Machine. If this element is used, the ConfigurationSets element is not used. + MediaLocation string `xml:",omitempty"` // Required if the Virtual Machine is being created from a published VM Image. Specifies the location of the VHD file that is created when VMImageName specifies a published VM Image. + AvailabilitySetName string `xml:",omitempty"` // Specifies the name of a collection of Virtual Machines. Virtual Machines specified in the same availability set are allocated to different nodes to maximize availability. + DataVirtualHardDisks []DataVirtualHardDisk `xml:"DataVirtualHardDisks>DataVirtualHardDisk,omitempty"` // Contains the parameters that are used to add a data disk to a Virtual Machine. If you are creating a Virtual Machine by using a VM Image, this element is not used. + OSVirtualHardDisk *OSVirtualHardDisk `xml:",omitempty"` // Contains the parameters that are used to create the operating system disk for a Virtual Machine. If you are creating a Virtual Machine by using a VM Image, this element is not used. + RoleSize string `xml:",omitempty"` // Specifies the size of the Virtual Machine. The default size is Small. + ProvisionGuestAgent bool `xml:",omitempty"` // Indicates whether the VM Agent is installed on the Virtual Machine. To run a resource extension in a Virtual Machine, this service must be installed. + VMImageInput *VMImageInput `xml:",omitempty"` // When a VM Image is used to create a new PersistentVMRole, the DiskConfigurations in the VM Image are used to create new Disks for the new VM. This parameter can be used to resize the newly created Disks to a larger size than the underlying DiskConfigurations in the VM Image. + + UseCertAuth bool `xml:"-"` + CertPath string `xml:"-"` +} + +// VMImageInput is for when a VM Image is used to create a new PersistantVMRole, +// the DiskConfigurations in the VM Image are used to create new Disks for the +// new VM. This parameter can be used to resize the newly created Disks to a +// larger size than the underlying DiskConfigurations in the VM Image. +type VMImageInput struct { + OSDiskConfiguration *OSDiskConfiguration `xml:",omitempty"` // This corresponds to the OSDiskConfiguration of the VM Image used to create a new role. The OSDiskConfiguration element is only available using version 2014-10-01 or higher. + DataDiskConfigurations []DataDiskConfiguration `xml:">DataDiskConfiguration,omitempty"` // This corresponds to the DataDiskConfigurations of the VM Image used to create a new role. The DataDiskConfigurations element is only available using version 2014-10-01 or higher. +} + +// OSDiskConfiguration is used to resize the OS disk of a new VM created from a +// previously saved VM image. +type OSDiskConfiguration struct { + ResizedSizeInGB int +} + +// DataDiskConfiguration is used to resize the data disks of a new VM created +// from a previously saved VM image. +type DataDiskConfiguration struct { + OSDiskConfiguration + Name string // The Name of the DataDiskConfiguration being referenced to. + +} + +// ResourceExtensionReference contains a collection of resource extensions that +// are to be installed on the Virtual Machine. The VM Agent must be installed on +// the Virtual Machine to install resource extensions. For more information, see +// Manage Extensions: +// +// https://msdn.microsoft.com/en-us/library/dn606311.aspx. +type ResourceExtensionReference struct { + ReferenceName string + Publisher string + Name string + Version string + ParameterValues []ResourceExtensionParameter `xml:"ResourceExtensionParameterValues>ResourceExtensionParameterValue,omitempty"` + State string +} + +// ResourceExtensionParameter specifies the key, value, and type of a parameter that is passed to the +// resource extension when it is installed. +type ResourceExtensionParameter struct { + Key string + Value string + Type ResourceExtensionParameterType // If this value is set to Private, the parameter will not be returned by Get Deployment (). +} + +type ResourceExtensionParameterType string + +// Enum values for ResourceExtensionParameterType +const ( + ResourceExtensionParameterTypePublic ResourceExtensionParameterType = "Public" + ResourceExtensionParameterTypePrivate ResourceExtensionParameterType = "Private" +) + +// DataVirtualHardDisk specifies the properties that are used to create a data +// disk. +type DataVirtualHardDisk struct { + HostCaching vmdisk.HostCachingType `xml:",omitempty"` // Specifies the caching mode of the data disk. The default value is None. + DiskLabel string `xml:",omitempty"` // If the disk that is being added is already registered in the subscription, this element is ignored. If a new disk is being created, this element is used to provide a description of the disk. The value of this element is only obtained programmatically and does not appear in the Management Portal. + DiskName string `xml:",omitempty"` // If the disk that is being added is already registered in the subscription, this element is used to identify the disk to add. If a new disk and the associated VHD are being created by Azure, this element is not used and Azure assigns a unique name that is a combination of the deployment name, role name, and identifying number. The name of the disk must contain only alphanumeric characters, underscores, periods, or dashes. The name must not be longer than 256 characters. The name must not end with period or dash. + Lun int `xml:",omitempty"` // Specifies the Logical Unit Number (LUN) for the data disk. If the disk is the first disk that is added, this element is optional and the default value of 0 is used. If more than one disk is being added, this element is required. Valid LUN values are 0 through 31. + LogicalDiskSizeInGB int `xml:",omitempty"` // Specifies the size, in GB, of an empty disk to be attached to the Virtual Machine. If the disk that is being added is already registered in the subscription, this element is ignored. If the disk and VHD is being created by Azure as it is added, this element defines the size of the new disk. + MediaLink string `xml:",omitempty"` // If the disk that is being added is already registered in the subscription or the VHD for the disk already exists in blob storage, this element is ignored. If a VHD file does not exist in blob storage, this element defines the location of the new VHD that is created when the new disk is added. + SourceMediaLink string `xml:",omitempty"` // If the disk that is being added is already registered in the subscription or the VHD for the disk does not exist in blob storage, this element is ignored. If the VHD file exists in blob storage, this element defines the path to the VHD and a disk is registered from it and attached to the virtual machine. +} + +// OSVirtualHardDisk specifies the properties that are used to create an OS +// disk. +type OSVirtualHardDisk struct { + HostCaching vmdisk.HostCachingType `xml:",omitempty"` // Specifies the caching mode of the data disk. The default value is None. + DiskLabel string `xml:",omitempty"` // If the disk that is being added is already registered in the subscription, this element is ignored. If a new disk is being created, this element is used to provide a description of the disk. The value of this element is only obtained programmatically and does not appear in the Management Portal. + DiskName string `xml:",omitempty"` // If the disk that is being added is already registered in the subscription, this element is used to identify the disk to add. If a new disk and the associated VHD are being created by Azure, this element is not used and Azure assigns a unique name that is a combination of the deployment name, role name, and identifying number. The name of the disk must contain only alphanumeric characters, underscores, periods, or dashes. The name must not be longer than 256 characters. The name must not end with period or dash. + MediaLink string `xml:",omitempty"` // If the disk that is being added is already registered in the subscription or the VHD for the disk already exists in blob storage, this element is ignored. If a VHD file does not exist in blob storage, this element defines the location of the new VHD that is created when the new disk is added. + SourceImageName string `xml:",omitempty"` + OS string `xml:",omitempty"` + RemoteSourceImageLink string `xml:",omitempty"` // Specifies a publicly accessible URI or a SAS URI to the location where an OS image is stored that is used to create the Virtual Machine. This location can be a different location than the user or platform image repositories in Azure. An image is always associated with a VHD, which is a .vhd file stored as a page blob in a storage account in Azure. If you specify the path to an image with this element, an associated VHD is created and you must use the MediaLink element to specify the location in storage where the VHD will be located. If this element is used, SourceImageName is not used. + ResizedSizeInGB int `xml:",omitempty"` +} + +// ConfigurationSet specifies the configuration elements of the Virtual Machine. +// The type attribute is required to prevent the administrator password from +// being written to the operation history file. +type ConfigurationSet struct { + ConfigurationSetType ConfigurationSetType + + // Windows provisioning: + ComputerName string `xml:",omitempty"` // Optional. Specifies the computer name for the Virtual Machine. If you do not specify a computer name, one is assigned that is a combination of the deployment name, role name, and identifying number. Computer names must be 1 to 15 characters long. + AdminPassword string `xml:",omitempty"` // Optional. Specifies the password to use for an administrator account on the Virtual Machine that is being created. If you are creating a Virtual Machine using an image, you must specify a name of an administrator account to be created on the machine using the AdminUsername element. You must use the AdminPassword element to specify the password of the administrator account that is being created. If you are creating a Virtual Machine using an existing specialized disk, this element is not used because the account should already exist on the disk. + EnableAutomaticUpdates bool `xml:",omitempty"` // Optional. Specifies whether automatic updates are enabled for the Virtual Machine. The default value is true. + TimeZone string `xml:",omitempty"` // Optional. Specifies the time zone for the Virtual Machine. + DomainJoin *DomainJoin `xml:",omitempty"` // Optional. Contains properties that define a domain to which the Virtual Machine will be joined. + StoredCertificateSettings []CertificateSetting `xml:">StoredCertificateSetting,omitempty"` // Optional. Contains a list of service certificates with which to provision to the new Virtual Machine. + WinRMListeners *WinRMListener `xml:"WinRM>Listeners>Listener,omitempty"` // Optional. Contains configuration settings for the Windows Remote Management service on the Virtual Machine. This enables remote Windows PowerShell. + AdminUsername string `xml:",omitempty"` // Optional. Specifies the name of the administrator account that is created to access the Virtual Machine. If you are creating a Virtual Machine using an image, you must specify a name of an administrator account to be created by using this element. You must use the AdminPassword element to specify the password of the administrator account that is being created. If you are creating a Virtual Machine using an existing specialized disk, this element is not used because the account should already exist on the disk. + AdditionalUnattendContent string `xml:",omitempty"` // Specifies additional base-64 encoded XML formatted information that can be included in the Unattend.xml file, which is used by Windows Setup. + + // Linux provisioning: + HostName string `xml:",omitempty"` // Required. Specifies the host name for the Virtual Machine. Host names must be 1 to 64 characters long. + UserName string `xml:",omitempty"` // Required. Specifies the name of a user account to be created in the sudoer group of the Virtual Machine. User account names must be 1 to 32 characters long. + UserPassword string `xml:",omitempty"` // Required. Specifies the password for the user account. Passwords must be 6 to 72 characters long. + DisableSSHPasswordAuthentication string `xml:"DisableSshPasswordAuthentication,omitempty"` // Optional. Specifies whether SSH password authentication is disabled. By default this value is set to true. + SSH *SSH `xml:",omitempty"` // Optional. Specifies the SSH public keys and key pairs to use with the Virtual Machine. + + // In WindowsProvisioningConfiguration: The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes. The file is saved to %SYSTEMDRIVE%\AzureData\CustomData.bin. If the file exists, it is overwritten. The security on directory is set to System:Full Control and Administrators:Full Control. + // In LinuxProvisioningConfiguration: The base-64 encoded string is located in the ovf-env.xml file on the ISO of the Virtual Machine. The file is copied to /var/lib/waagent/ovf-env.xml by the Azure Linux Agent. The Azure Linux Agent will also place the base-64 encoded data in /var/lib/waagent/CustomData during provisioning. The maximum length of the binary array is 65535 bytes. + CustomData string `xml:",omitempty"` // Specifies a base-64 encoded string of custom data. + + // Network configuration: + InputEndpoints []InputEndpoint `xml:">InputEndpoint,omitempty"` // Optional in NetworkConfiguration. Contains a collection of external endpoints for the Virtual Machine. + SubnetNames []string `xml:">SubnetName,omitempty"` // Required if StaticVirtualNetworkIPAddress is specified; otherwise, optional in NetworkConfiguration. Contains a list of subnets to which the Virtual Machine will belong. + StaticVirtualNetworkIPAddress string `xml:",omitempty"` // Specifies the internal IP address for the Virtual Machine in a Virtual Network. If you specify this element, you must also specify the SubnetNames element with only one subnet defined. The IP address specified in this element must belong to the subnet that is defined in SubnetNames and it should not be the one of the first four IP addresses or the last IP address in the subnet. Deploying web roles or worker roles into a subnet that has Virtual Machines with StaticVirtualNetworkIPAddress defined is not supported. + NetworkSecurityGroup string `xml:",omitempty"` // Optional in NetworkConfiguration. Represents the name of the Network Security Group that will be associated with the Virtual Machine. Network Security Group must exist in the context of subscription and be created in same region to which the virtual machine will be deployed. + PublicIPs []PublicIP `xml:">PublicIP,omitempty"` // Contains a public IP address that can be used in addition to the default virtual IP address for the Virtual Machine. +} + +type ConfigurationSetType string + +// Enum values for ConfigurationSetType +const ( + ConfigurationSetTypeWindowsProvisioning ConfigurationSetType = "WindowsProvisioningConfiguration" + ConfigurationSetTypeLinuxProvisioning ConfigurationSetType = "LinuxProvisioningConfiguration" + ConfigurationSetTypeNetwork ConfigurationSetType = "NetworkConfiguration" +) + +// DomainJoin contains properties that define a domain to which the Virtual +// Machine will be joined. +type DomainJoin struct { + Credentials Credentials `xml:",omitempty"` // Specifies the credentials to use to join the Virtual Machine to the domain. + JoinDomain string `xml:",omitempty"` // Specifies the domain to join. + MachineObjectOU string `xml:",omitempty"` // Specifies the Lightweight Directory Access Protocol (LDAP) X 500-distinguished name of the organizational unit (OU) in which the computer account is created. This account is in Active Directory on a domain controller in the domain to which the computer is being joined. +} + +// Credentials specifies the credentials to use to join the Virtual Machine to +// the domain. If Domain is not specified, Username must specify the user +// principal name (UPN) format (user@fully-qualified-DNS-domain) or the fully- +// qualified-DNS-domain\username format. +type Credentials struct { + Domain string // Specifies the name of the domain used to authenticate an account. The value is a fully qualified DNS domain. + Username string // Specifies a user name in the domain that can be used to join the domain. + Password string // Specifies the password to use to join the domain. +} + +// CertificateSetting specifies the parameters for the certificate which to +// provision to the new Virtual Machine. +type CertificateSetting struct { + StoreLocation string // Required. Specifies the certificate store location on the Virtual Machine. The only supported value is "LocalMachine". + StoreName string // Required. Specifies the name of the certificate store from which the certificate is retrieved. For example, "My". + Thumbprint string // Required. Specifies the thumbprint of the certificate. The thumbprint must specify an existing service certificate. +} + +// WinRMListener specifies the protocol and certificate information for a WinRM +// listener. +type WinRMListener struct { + Protocol WinRMProtocol // Specifies the protocol of listener. + CertificateThumbprint string `xml:",omitempty"` // Specifies the certificate thumbprint for the secure connection. If this value is not specified, a self-signed certificate is generated and used for the Virtual Machine. +} + +type WinRMProtocol string + +// Enum values for WinRMProtocol +const ( + WinRMProtocolHTTP WinRMProtocol = "Http" + WinRMProtocolHTTPS WinRMProtocol = "Https" +) + +// SSH specifies the SSH public keys and key pairs to use with the Virtual Machine. +type SSH struct { + PublicKeys []PublicKey `xml:">PublicKey"` + KeyPairs []KeyPair `xml:">KeyPair"` +} + +// PublicKey specifies a public SSH key. +type PublicKey struct { + Fingerprint string // Specifies the SHA1 fingerprint of an X509 certificate associated with the cloud service and includes the SSH public key. + // Specifies the full path of a file, on the Virtual Machine, where the SSH public key is stored. If + // the file already exists, the specified key is appended to the file. + Path string // Usually /home/username/.ssh/authorized_keys +} + +// KeyPair specifies an SSH keypair. +type KeyPair struct { + Fingerprint string // Specifies the SHA1 fingerprint of an X509 certificate that is associated with the cloud service and includes the SSH keypair. + // Specifies the full path of a file, on the virtual machine, which stores the SSH private key. The + // file is overwritten when multiple keys are written to it. The SSH public key is stored in the same + // directory and has the same name as the private key file with .pub suffix. + Path string // Usually /home/username/.ssh/id_rsa +} + +// InputEndpoint specifies the properties that define an external endpoint for +// the Virtual Machine. +type InputEndpoint struct { + LocalPort int // Specifies the internal port on which the Virtual Machine is listening. + Name string // Specifies the name of the external endpoint. + Port int // Specifies the external port to use for the endpoint. + Protocol InputEndpointProtocol //Specifies the transport protocol for the endpoint. + Vip string `xml:",omitempty"` +} + +type InputEndpointProtocol string + +// Enum values for InputEndpointProtocol +const ( + InputEndpointProtocolTCP InputEndpointProtocol = "TCP" + InputEndpointProtocolUDP InputEndpointProtocol = "UDP" +) + +// PublicIP contains a public IP address that can be used in addition to default +// virtual IP address for the Virtual Machine. +type PublicIP struct { + Name string // Specifies the name of the public IP address. + IdleTimeoutInMinutes int `xml:",omitempty"` // Specifies the timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP. +} + +// ServiceCertificate contains a certificate for adding it to a hosted service +type ServiceCertificate struct { + XMLName xml.Name `xml:"CertificateFile"` + Data string + CertificateFormat string + Password string `xml:",omitempty"` +} + +// StartRoleOperation contains the information for starting a Role. +type StartRoleOperation struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure StartRoleOperation"` + OperationType string +} + +// ShutdownRoleOperation contains the information for shutting down a Role. +type ShutdownRoleOperation struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure ShutdownRoleOperation"` + OperationType string +} + +// RestartRoleOperation contains the information for restarting a Role. +type RestartRoleOperation struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure RestartRoleOperation"` + OperationType string +} + +// CaptureRoleOperation contains the information for capturing a Role +type CaptureRoleOperation struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure CaptureRoleOperation"` + OperationType string + PostCaptureAction PostCaptureAction + ProvisioningConfiguration *ConfigurationSet `xml:",omitempty"` + TargetImageLabel string + TargetImageName string +} + +type PostCaptureAction string + +// Enum values for PostCaptureAction +const ( + PostCaptureActionDelete PostCaptureAction = "Delete" + PostCaptureActionReprovision PostCaptureAction = "Reprovision" +) + +// RoleSizeList contains a list of the available role sizes +type RoleSizeList struct { + XMLName xml.Name `xml:"RoleSizes"` + RoleSizes []RoleSize `xml:"RoleSize"` +} + +// RoleSize contains a detailed explanation of a role size +type RoleSize struct { + Name string + Label string + Cores int + MemoryInMb int + SupportedByWebWorkerRoles bool + SupportedByVirtualMachines bool + MaxDataDiskCount int + WebWorkerResourceDiskSizeInMb int + VirtualMachineResourceDiskSizeInMb int +} + +// DNSServer contains the definition of a DNS server for virtual machine deployment +type DNSServer struct { + Name string + Address string +} + +// LoadBalancer contains the definition of a load balancer for virtual machine deployment +type LoadBalancer struct { + Name string // Specifies the name of the internal load balancer. + Type IPAddressType `xml:"FrontendIpConfiguration>Type"` // Specifies the type of virtual IP address that is provided by the load balancer. The only allowable value is Private. + SubnetName string `xml:"FrontendIpConfiguration>SubnetName,omitempty"` // Required if the deployment exists in a virtual network and a StaticVirtualNetworkIPAddress is assigned. Specifies the subnet of the virtual network that the load balancer uses. The virtual IP address that is managed by the load balancer is contained in this subnet. + StaticVirtualNetworkIPAddress string `xml:"FrontendIpConfiguration>StaticVirtualNetworkIPAddress,omitempty"` // Specifies a specific virtual IP address that the load balancer uses from the subnet in the virtual network. +} + +type IPAddressType string + +// Enum values for IPAddressType +const ( + IPAddressTypePrivate IPAddressType = "Private" // Only allowed value (currently) for IPAddressType +) + +type ResourceExtensions struct { + List []ResourceExtension `xml:"ResourceExtension"` +} + +type ResourceExtension struct { + Publisher string + Name string + Version string + Label string + Description string + PublicConfigurationSchema string + PrivateConfigurationSchema string + SampleConfig string + ReplicationCompleted string + Eula string + PrivacyURI string `xml:"PrivacyUri"` + HomepageURI string `xml:"HomepageUri"` + IsJSONExtension bool `xml:"IsJsonExtension"` + IsInternalExtension bool + DisallowMajorVersionUpgrade bool + CompanyName string + SupportedOS string + PublishedDate string +} + +type PersistentVMRole struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure PersistentVMRole"` + Role +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities_test.go' --- src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/entities_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,285 @@ +package virtualmachine + +import ( + "encoding/xml" + "testing" +) + +func TestDocumentedDeploymentRequest(t *testing.T) { + // xml based on https://msdn.microsoft.com/en-us/library/azure/jj157194.aspx + // fixed typos, replaced strongly typed fields with values of correct type + xmlString := ` + name-of-deployment + deployment-environment + + + + name-of-the-virtual-machine + PersistentVMRole + + + WindowsProvisioningConfiguration + name-of-computer + administrator-password + true + time-zone + + + domain-to-join + user-name-in-the-domain + password-for-the-user-name + + domain-to-join + distinguished-name-of-the-ou + + + + LocalMachine + name-of-store-on-the-machine + certificate-thumbprint + + + + + + listener-protocol + + + certificate-thumbprint + listener-protocol + + + + name-of-administrator-account + base-64-encoded-data + + + + name-of-pass + + + name-of-component + + + name-of-setting + base-64-encoded-XML-content + + + + + + + + + + LinuxProvisioningConfiguration + host-name-for-the-virtual-machine + new-user-name + password-for-the-new-user + true + + + + certificate-fingerprint + SSH-public-key-storage-location + + + + + certificate-fingerprint + SSH-public-key-storage-location + + + + base-64-encoded-data + + + NetworkConfiguration + + + name-of-load-balanced-set + 22 + ZZH + 33 + + /probe/me + 80 + http + 30 + 5 + + endpoint-protocol + enable-direct-server-return + + + + priority-of-the-rule + permit-rule + subnet-of-the-rule + description-of-the-rule + + + + name-of-internal-loadbalancer + 9 + + + + name-of-subnet + + ip-address + + + name-of-public-ip + 11 + + + + + + + name-of-reference + name-of-publisher + name-of-extension + version-of-extension + + + name-of-parameter-key + parameter-value + type-of-parameter + + + state-of-resource + + + certificate-thumbprint + certificate-algorithm + + + + + name-of-vm-image + path-to-vhd + name-of-availability-set + + + caching-mode + label-of-data-disk + name-of-disk + 0 + 50 + path-to-vhd + + + + caching-mode + label-of-operating-system-disk + name-of-disk + path-to-vhd + name-of-source-image + operating-system-of-image + path-to-source-image + 125 + + size-of-virtual-machine + true + + + 126 + + + + disk-name + 127 + + + + + + name-of-virtual-network + + + + dns-name +
dns-ip-address
+
+
+
+ name-of-reserved-ip + + + name-of-internal-load-balancer + + Private + name-of-subnet + static-ip-address + + + +
` + + deployment := DeploymentRequest{} + if err := xml.Unmarshal([]byte(xmlString), &deployment); err != nil { + t.Fatal(err) + } + + if deployment.Name != "name-of-deployment" { + t.Fatalf("Expected deployment.Name=\"name-of-deployment\", but got \"%s\"", + deployment.Name) + } + + // ====== + + t.Logf("deployment.RoleList[0]: %+v", deployment.RoleList[0]) + if expected := "name-of-the-virtual-machine"; deployment.RoleList[0].RoleName != expected { + t.Fatalf("Expected deployment.RoleList[0].RoleName=%v, but got %v", expected, deployment.RoleList[0].RoleName) + } + + // ====== + + t.Logf("deployment.DNSServers[0]: %+v", deployment.DNSServers[0]) + if deployment.DNSServers[0].Name != "dns-name" { + t.Fatalf("Expected deployment.DNSServers[0].Name=\"dns-name\", but got \"%s\"", + deployment.DNSServers[0].Name) + } + + // ====== + + t.Logf("deployment.LoadBalancers[0]: %+v", deployment.LoadBalancers[0]) + if deployment.LoadBalancers[0].Name != "name-of-internal-load-balancer" { + t.Fatalf("Expected deployment.LoadBalancers[0].Name=\"name-of-internal-load-balancer\", but got \"%s\"", + deployment.LoadBalancers[0].Name) + } + + if deployment.LoadBalancers[0].Type != IPAddressTypePrivate { + t.Fatalf("Expected deployment.LoadBalancers[0].Type=IPAddressTypePrivate, but got \"%s\"", + deployment.LoadBalancers[0].Type) + } + + if deployment.LoadBalancers[0].StaticVirtualNetworkIPAddress != "static-ip-address" { + t.Fatalf("Expected deployment.LoadBalancers[0].StaticVirtualNetworkIPAddress=\"static-ip-address\", but got \"%s\"", + deployment.LoadBalancers[0].StaticVirtualNetworkIPAddress) + } + + // ====== + + extensionReferences := (*deployment.RoleList[0].ResourceExtensionReferences) + t.Logf("(*deployment.RoleList[0].ResourceExtensionReferences)[0]: %+v", extensionReferences[0]) + if extensionReferences[0].Name != "name-of-extension" { + t.Fatalf("Expected (*deployment.RoleList[0].ResourceExtensionReferences)[0].Name=\"name-of-extension\", but got \"%s\"", + extensionReferences[0].Name) + } + + if extensionReferences[0].ParameterValues[0].Key != "name-of-parameter-key" { + t.Fatalf("Expected (*deployment.RoleList[0].ResourceExtensionReferences)[0].ParameterValues[0].Key=\"name-of-parameter-key\", but got %v", + extensionReferences[0].ParameterValues[0].Key) + } + + // ====== + + if deployment.RoleList[0].VMImageInput.DataDiskConfigurations[0].ResizedSizeInGB != 127 { + t.Fatalf("Expected deployment.RoleList[0].VMImageInput.DataDiskConfigurations[0].ResizedSizeInGB=127, but got %v", + deployment.RoleList[0].VMImageInput.DataDiskConfigurations[0].ResizedSizeInGB) + } + +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/resourceextensions.go' --- src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/resourceextensions.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/resourceextensions.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,25 @@ +package virtualmachine + +import ( + "encoding/xml" +) + +const ( + azureResourceExtensionsURL = "services/resourceextensions" +) + +// GetResourceExtensions lists the resource extensions that are available to add +// to a virtual machine. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn495441.aspx +func (c VirtualMachineClient) GetResourceExtensions() (extensions []ResourceExtension, err error) { + data, err := c.client.SendAzureGetRequest(azureResourceExtensionsURL) + if err != nil { + return extensions, err + } + + var response ResourceExtensions + err = xml.Unmarshal(data, &response) + extensions = response.List + return +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/resourceextensions_test.go' --- src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/resourceextensions_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/virtualmachine/resourceextensions_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,27 @@ +package virtualmachine + +import ( + "testing" + + "github.com/Azure/azure-sdk-for-go/management/testutils" +) + +func TestAzureGetResourceExtensions(t *testing.T) { + client := testutils.GetTestClient(t) + + list, err := NewClient(client).GetResourceExtensions() + if err != nil { + t.Fatal(err) + } + + t.Logf("Found %d extensions", len(list)) + if len(list) == 0 { + t.Fatal("Huh, no resource extensions at all? Something must be wrong.") + } + + for _, extension := range list { + if extension.Name == "" { + t.Fatalf("Resource with empty name? Something must have gone wrong with serialization: %+v", extension) + } + } +} === added directory 'src/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk' === added file 'src/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/client.go' --- src/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,229 @@ +// Package virtualmachinedisk provides a client for Virtual Machine Disks. +package virtualmachinedisk + +import ( + "encoding/xml" + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + addDataDiskURL = "services/hostedservices/%s/deployments/%s/roles/%s/DataDisks" + addDiskURL = "services/disks" + deleteDataDiskURL = "services/hostedservices/%s/deployments/%s/roles/%s/DataDisks/%d" + deleteDiskURL = "services/disks/%s" + getDataDiskURL = "services/hostedservices/%s/deployments/%s/roles/%s/DataDisks/%d" + getDiskURL = "services/disks/%s" + listDisksURL = "services/disks" + updateDataDiskURL = "services/hostedservices/%s/deployments/%s/roles/%s/DataDisks/%d" + updateDiskURL = "services/disks/%s" + + errParamNotSpecified = "Parameter %s is not specified." +) + +//NewClient is used to instantiate a new DiskClient from an Azure client +func NewClient(client management.Client) DiskClient { + return DiskClient{client: client} +} + +// AddDataDisk adds a data disk to a Virtual Machine +// +// https://msdn.microsoft.com/en-us/library/azure/jj157199.aspx +func (c DiskClient) AddDataDisk( + service string, + deployment string, + role string, + params CreateDataDiskParameters) (management.OperationID, error) { + if service == "" { + return "", fmt.Errorf(errParamNotSpecified, "service") + } + if deployment == "" { + return "", fmt.Errorf(errParamNotSpecified, "deployment") + } + if role == "" { + return "", fmt.Errorf(errParamNotSpecified, "role") + } + + requestURL := fmt.Sprintf(addDataDiskURL, service, deployment, role) + + req, err := xml.Marshal(params) + if err != nil { + return "", err + } + + return c.client.SendAzurePostRequest(requestURL, req) +} + +// AddDisk adds an operating system disk or data disk to the user image repository +// +// https://msdn.microsoft.com/en-us/library/azure/jj157178.aspx +func (c DiskClient) AddDisk(params CreateDiskParameters) (management.OperationID, error) { + req, err := xml.Marshal(params) + if err != nil { + return "", err + } + + return c.client.SendAzurePostRequest(addDiskURL, req) +} + +// DeleteDataDisk removes the specified data disk from a Virtual Machine +// +// https://msdn.microsoft.com/en-us/library/azure/jj157179.aspx +func (c DiskClient) DeleteDataDisk( + service string, + deployment string, + role string, + lun int, + deleteVHD bool) (management.OperationID, error) { + if service == "" { + return "", fmt.Errorf(errParamNotSpecified, "service") + } + if deployment == "" { + return "", fmt.Errorf(errParamNotSpecified, "deployment") + } + if role == "" { + return "", fmt.Errorf(errParamNotSpecified, "role") + } + + requestURL := fmt.Sprintf(deleteDataDiskURL, service, deployment, role, lun) + if deleteVHD { + requestURL += "?comp=media" + } + + return c.client.SendAzureDeleteRequest(requestURL) +} + +// DeleteDisk deletes the specified data or operating system disk from the image +// repository that is associated with the specified subscription +// +// https://msdn.microsoft.com/en-us/library/azure/jj157200.aspx +func (c DiskClient) DeleteDisk(name string, deleteVHD bool) (management.OperationID, error) { + if name == "" { + return "", fmt.Errorf(errParamNotSpecified, "name") + } + + requestURL := fmt.Sprintf(deleteDiskURL, name) + if deleteVHD { + requestURL += "?comp=media" + } + + return c.client.SendAzureDeleteRequest(requestURL) +} + +// GetDataDisk retrieves the specified data disk from a Virtual Machine +// +// https://msdn.microsoft.com/en-us/library/azure/jj157180.aspx +func (c DiskClient) GetDataDisk( + service string, + deployment string, + role string, + lun int) (DataDiskResponse, error) { + var response DataDiskResponse + if service == "" { + return response, fmt.Errorf(errParamNotSpecified, "service") + } + if deployment == "" { + return response, fmt.Errorf(errParamNotSpecified, "deployment") + } + if role == "" { + return response, fmt.Errorf(errParamNotSpecified, "role") + } + + requestURL := fmt.Sprintf(getDataDiskURL, service, deployment, role, lun) + + data, err := c.client.SendAzureGetRequest(requestURL) + if err != nil { + return response, err + } + + err = xml.Unmarshal(data, &response) + return response, err +} + +// GetDisk retrieves information about the specified disk +// +// https://msdn.microsoft.com/en-us/library/azure/dn775053.aspx +func (c DiskClient) GetDisk(name string) (DiskResponse, error) { + var response DiskResponse + if name == "" { + return response, fmt.Errorf(errParamNotSpecified, "name") + } + + requestURL := fmt.Sprintf(getDiskURL, name) + + data, err := c.client.SendAzureGetRequest(requestURL) + if err != nil { + return response, err + } + + err = xml.Unmarshal(data, &response) + return response, err +} + +// ListDisks retrieves a list of the disks in the image repository that is associated +// with the specified subscription +// +// https://msdn.microsoft.com/en-us/library/azure/jj157176.aspx +func (c DiskClient) ListDisks() (ListDiskResponse, error) { + var response ListDiskResponse + + data, err := c.client.SendAzureGetRequest(listDisksURL) + if err != nil { + return response, err + } + + err = xml.Unmarshal(data, &response) + return response, err +} + +// UpdateDataDisk updates the configuration of the specified data disk that is +// attached to the specified Virtual Machine +// +// https://msdn.microsoft.com/en-us/library/azure/jj157190.aspx +func (c DiskClient) UpdateDataDisk( + service string, + deployment string, + role string, + lun int, + params UpdateDataDiskParameters) (management.OperationID, error) { + if service == "" { + return "", fmt.Errorf(errParamNotSpecified, "service") + } + if deployment == "" { + return "", fmt.Errorf(errParamNotSpecified, "deployment") + } + if role == "" { + return "", fmt.Errorf(errParamNotSpecified, "role") + } + + requestURL := fmt.Sprintf(updateDataDiskURL, service, deployment, role, lun) + + req, err := xml.Marshal(params) + if err != nil { + return "", err + } + + return c.client.SendAzurePutRequest(requestURL, "", req) +} + +// UpdateDisk updates the label of an existing disk in the image repository that is +// associated with the specified subscription +// +// https://msdn.microsoft.com/en-us/library/azure/jj157205.aspx +func (c DiskClient) UpdateDisk( + name string, + params UpdateDiskParameters) (management.OperationID, error) { + if name == "" { + return "", fmt.Errorf(errParamNotSpecified, "name") + } + + requestURL := fmt.Sprintf(updateDiskURL, name) + + req, err := xml.Marshal(params) + if err != nil { + return "", err + } + + return c.client.SendAzurePutRequest(requestURL, "", req) +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/entities.go' --- src/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/entities.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk/entities.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,134 @@ +package virtualmachinedisk + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +// DiskClient is used to perform operations on Azure Disks +type DiskClient struct { + client management.Client +} + +// CreateDiskParameters represents a disk +// +// https://msdn.microsoft.com/en-us/library/azure/jj157188.aspx +type CreateDiskParameters struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Disk"` + OS OperatingSystemType `xml:",omitempty"` + Label string + MediaLink string `xml:",omitempty"` + Name string +} + +// UpdateDiskParameters represents a disk +// +// https://msdn.microsoft.com/en-us/library/azure/jj157188.aspx +type UpdateDiskParameters struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Disk"` + Label string `xml:",omitempty"` + Name string + ResizedSizeInGB int `xml:",omitempty"` +} + +// ListDiskResponse represents a disk +// +// https://msdn.microsoft.com/en-us/library/azure/jj157188.aspx +type ListDiskResponse struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Disks"` + Disk []DiskResponse +} + +// DiskResponse represents a disk +// +// https://msdn.microsoft.com/en-us/library/azure/jj157188.aspx +type DiskResponse struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure Disk"` + AffinityGroup string + AttachedTo Resource + IsCorrupted bool + OS OperatingSystemType + Location string + LogicalDiskSizeInGB int + MediaLink string + Name string + SourceImageName string + CreatedTime string + IOType IOType +} + +// Resource describes the resource details a disk is currently attached to +type Resource struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure AttachedTo"` + DeploymentName string + HostedServiceName string + RoleName string +} + +// IOType represents an IO type +type IOType string + +// These constants represent the possible IO types +const ( + IOTypeProvisioned IOType = "Provisioned" + IOTypeStandard IOType = "Standard" +) + +// OperatingSystemType represents an operating system type +type OperatingSystemType string + +// These constants represent the valid operating system types +const ( + OperatingSystemTypeNull OperatingSystemType = "NULL" + OperatingSystemTypeLinux OperatingSystemType = "Linux" + OperatingSystemTypeWindows OperatingSystemType = "Windows" +) + +// CreateDataDiskParameters represents a data disk +// +// https://msdn.microsoft.com/en-us/library/azure/jj157188.aspx +type CreateDataDiskParameters struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure DataVirtualHardDisk"` + HostCaching HostCachingType `xml:",omitempty"` + DiskLabel string `xml:",omitempty"` + DiskName string `xml:",omitempty"` + Lun int `xml:",omitempty"` + LogicalDiskSizeInGB int `xml:",omitempty"` + MediaLink string + SourceMediaLink string `xml:",omitempty"` +} + +// UpdateDataDiskParameters represents a data disk +// +// https://msdn.microsoft.com/en-us/library/azure/jj157188.aspx +type UpdateDataDiskParameters struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure DataVirtualHardDisk"` + HostCaching HostCachingType `xml:",omitempty"` + DiskName string + Lun int + MediaLink string +} + +// DataDiskResponse represents a data disk +// +// https://msdn.microsoft.com/en-us/library/azure/jj157188.aspx +type DataDiskResponse struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure DataVirtualHardDisk"` + HostCaching HostCachingType + DiskLabel string + DiskName string + Lun int + LogicalDiskSizeInGB int + MediaLink string +} + +// HostCachingType represents a host caching type +type HostCachingType string + +// These constants represent the valid host caching types +const ( + HostCachingTypeNone HostCachingType = "None" + HostCachingTypeReadOnly HostCachingType = "ReadOnly" + HostCachingTypeReadWrite HostCachingType = "ReadWrite" +) === added directory 'src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage' === added file 'src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/client.go' --- src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,58 @@ +// Package virtualmachineimage provides a client for Virtual Machine Images. +package virtualmachineimage + +import ( + "encoding/xml" + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + azureImageListURL = "services/vmimages" + azureRoleOperationsURL = "services/hostedservices/%s/deployments/%s/roleinstances/%s/operations" + errParamNotSpecified = "Parameter %s is not specified." +) + +//NewClient is used to instantiate a new Client from an Azure client +func NewClient(client management.Client) Client { + return Client{client} +} + +func (c Client) ListVirtualMachineImages() (ListVirtualMachineImagesResponse, error) { + var imageList ListVirtualMachineImagesResponse + + response, err := c.SendAzureGetRequest(azureImageListURL) + if err != nil { + return imageList, err + } + err = xml.Unmarshal(response, &imageList) + return imageList, err +} + +func (c Client) Capture(cloudServiceName, deploymentName, roleName string, + name, label string, osState OSState, parameters CaptureParameters) (management.OperationID, error) { + if cloudServiceName == "" { + return "", fmt.Errorf(errParamNotSpecified, "cloudServiceName") + } + if deploymentName == "" { + return "", fmt.Errorf(errParamNotSpecified, "deploymentName") + } + if roleName == "" { + return "", fmt.Errorf(errParamNotSpecified, "roleName") + } + + request := CaptureRoleAsVMImageOperation{ + VMImageName: name, + VMImageLabel: label, + OSState: osState, + CaptureParameters: parameters, + } + data, err := xml.Marshal(request) + if err != nil { + return "", err + } + + return c.SendAzurePostRequest(fmt.Sprintf(azureRoleOperationsURL, + cloudServiceName, deploymentName, roleName), data) +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities.go' --- src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/virtualmachineimage/entities.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,97 @@ +package virtualmachineimage + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" + vmdisk "github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk" +) + +// Client is used to perform operations on Azure VM Images. +type Client struct { + management.Client +} + +type ListVirtualMachineImagesResponse struct { + XMLName xml.Name `xml:"VMImages"` + Xmlns string `xml:"xmlns,attr"` + VMImages []VMImage `xml:"VMImage"` +} + +type VMImage struct { + Name string // Specifies the name of the image. + Label string // Specifies an identifier for the image. + Category string // Specifies the repository classification of the image. All user images have the category User. + Description string // Specifies the description of the image. + OSDiskConfiguration OSDiskConfiguration // Specifies configuration information for the operating system disk that is associated with the image. + DataDiskConfigurations []DataDiskConfiguration // Specifies configuration information for the data disks that are associated with the image. A VM Image might not have data disks associated with it. + ServiceName string // Specifies the name of the cloud service that contained the Virtual Machine from which the image was created. + DeploymentName string // Specifies the name of the deployment that contained the Virtual Machine from which the image was created. + RoleName string // Specifies the name of the Virtual Machine from which the image was created. + Location string // Specifies the geo-location in which the media is located. The Location value is derived from the storage account that contains the blob in which the media is located. If the storage account belongs to an affinity group the value is NULL and the element is not displayed in the response. + AffinityGroup string // Specifies the affinity group in which the media is located. The AffinityGroup value is derived from the storage account that contains the blob in which the media is located. If the storage account does not belong to an affinity group the value is NULL and the element is not displayed in the response. + CreatedTime string // Specifies the time that the image was created. + ModifiedTime string // Specifies the time that the image was last updated. + Language string // Specifies the language of the image. + ImageFamily string // Specifies a value that can be used to group VM Images. + RecommendedVMSize string // Optional. Specifies the size to use for the Virtual Machine that is created from the VM Image. + IsPremium string // Indicates whether the image contains software or associated services that will incur charges above the core price for the virtual machine. For additional details, see the PricingDetailLink element. + Eula string // Specifies the End User License Agreement that is associated with the image. The value for this element is a string, but it is recommended that the value be a URL that points to a EULA. + IconURI string `xml:"IconUri"` // Specifies the URI to the icon that is displayed for the image in the Management Portal. + SmallIconURI string `xml:"SmallIconUri"` // Specifies the URI to the small icon that is displayed for the image in the Management Portal. + PrivacyURI string `xml:"PrivacyUri"` // Specifies the URI that points to a document that contains the privacy policy related to the image. + PublishedDate string // Specifies the date when the image was added to the image repository. +} + +type OSState string + +const ( + OSStateGeneralized OSState = "Generalized" + OSStateSpecialized OSState = "Specialized" +) + +type IOType string + +const ( + IOTypeProvisioned IOType = "Provisioned" + IOTypeStandard IOType = "Standard" +) + +// OSDiskConfiguration specifies configuration information for the operating +// system disk that is associated with the image. +type OSDiskConfiguration struct { + Name string // Specifies the name of the operating system disk. + HostCaching vmdisk.HostCachingType // Specifies the caching behavior of the operating system disk. + OSState OSState // Specifies the state of the operating system in the image. + OS string // Specifies the operating system type of the image. + MediaLink string // Specifies the location of the blob in Azure storage. The blob location belongs to a storage account in the subscription specified by the value in the operation call. + LogicalSizeInGB float64 // Specifies the size, in GB, of the operating system disk. + IOType IOType // Identifies the type of the storage account for the backing VHD. If the backing VHD is in an Provisioned Storage account, “Provisioned†is returned otherwise “Standard†is returned. +} + +// DataDiskConfiguration specifies configuration information for the data disks +// that are associated with the image. +type DataDiskConfiguration struct { + Name string // Specifies the name of the data disk. + HostCaching vmdisk.HostCachingType // Specifies the caching behavior of the data disk. + Lun string // Specifies the Logical Unit Number (LUN) for the data disk. + MediaLink string // Specifies the location of the blob in Azure storage. The blob location belongs to a storage account in the subscription specified by the value in the operation call. + LogicalSizeInGB float64 // Specifies the size, in GB, of the data disk. + IOType IOType // Identifies the type of the storage account for the backing VHD. If the backing VHD is in an Provisioned Storage account, “Provisioned†is returned otherwise “Standard†is returned. +} + +type CaptureRoleAsVMImageOperation struct { + XMLName xml.Name `xml:"http://schemas.microsoft.com/windowsazure CaptureRoleAsVMImageOperation"` + OperationType string //CaptureRoleAsVMImageOperation + OSState OSState + VMImageName string + VMImageLabel string + CaptureParameters +} + +type CaptureParameters struct { + Description string `xml:",omitempty"` + Language string `xml:",omitempty"` + ImageFamily string `xml:",omitempty"` + RecommendedVMSize string `xml:",omitempty"` +} === added directory 'src/github.com/Azure/azure-sdk-for-go/management/virtualnetwork' === added file 'src/github.com/Azure/azure-sdk-for-go/management/virtualnetwork/client.go' --- src/github.com/Azure/azure-sdk-for-go/management/virtualnetwork/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/virtualnetwork/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,47 @@ +// Package virtualnetwork provides a client for Virtual Networks. +package virtualnetwork + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const ( + azureNetworkConfigurationURL = "services/networking/media" +) + +// NewClient is used to return new VirtualNetworkClient instance +func NewClient(client management.Client) VirtualNetworkClient { + return VirtualNetworkClient{client: client} +} + +// GetVirtualNetworkConfiguration retreives the current virtual network +// configuration for the currently active subscription. Note that the +// underlying Azure API means that network related operations are not safe +// for running concurrently. +func (c VirtualNetworkClient) GetVirtualNetworkConfiguration() (NetworkConfiguration, error) { + networkConfiguration := c.NewNetworkConfiguration() + response, err := c.client.SendAzureGetRequest(azureNetworkConfigurationURL) + if err != nil { + return networkConfiguration, err + } + + err = xml.Unmarshal(response, &networkConfiguration) + return networkConfiguration, err + +} + +// SetVirtualNetworkConfiguration configures the virtual networks for the +// currently active subscription according to the NetworkConfiguration given. +// Note that the underlying Azure API means that network related operations +// are not safe for running concurrently. +func (c VirtualNetworkClient) SetVirtualNetworkConfiguration(networkConfiguration NetworkConfiguration) (management.OperationID, error) { + networkConfiguration.setXMLNamespaces() + networkConfigurationBytes, err := xml.Marshal(networkConfiguration) + if err != nil { + return "", err + } + + return c.client.SendAzurePutRequest(azureNetworkConfigurationURL, "text/plain", networkConfigurationBytes) +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/virtualnetwork/entities.go' --- src/github.com/Azure/azure-sdk-for-go/management/virtualnetwork/entities.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/virtualnetwork/entities.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,90 @@ +package virtualnetwork + +import ( + "encoding/xml" + + "github.com/Azure/azure-sdk-for-go/management" +) + +const xmlNamespace = "http://schemas.microsoft.com/ServiceHosting/2011/07/NetworkConfiguration" +const xmlNamespaceXsd = "http://www.w3.org/2001/XMLSchema" +const xmlNamespaceXsi = "http://www.w3.org/2001/XMLSchema-instance" + +// VirtualNetworkClient is used to perform operations on Virtual Networks. +type VirtualNetworkClient struct { + client management.Client +} + +// NetworkConfiguration represents the network configuration for an entire Azure +// subscription. +type NetworkConfiguration struct { + XMLName xml.Name `xml:"NetworkConfiguration"` + XMLNamespaceXsd string `xml:"xmlns:xsd,attr"` + XMLNamespaceXsi string `xml:"xmlns:xsi,attr"` + XMLNs string `xml:"xmlns,attr"` + Configuration VirtualNetworkConfiguration `xml:"VirtualNetworkConfiguration"` + + // TODO: Nicer builder methods for these that abstract away the + // underlying structure. +} + +// NewNetworkConfiguration creates a new empty NetworkConfiguration structure +// for further configuration. The XML namespaces are already set correctly. +func (client *VirtualNetworkClient) NewNetworkConfiguration() NetworkConfiguration { + networkConfiguration := NetworkConfiguration{} + networkConfiguration.setXMLNamespaces() + return networkConfiguration +} + +// setXMLNamespaces ensure that all of the required namespaces are set. It +// should be called prior to marshalling the structure to XML for use with the +// Azure REST endpoint. It is used internally prior to submitting requests, but +// since it is idempotent there is no harm in repeat calls. +func (n *NetworkConfiguration) setXMLNamespaces() { + n.XMLNamespaceXsd = xmlNamespaceXsd + n.XMLNamespaceXsi = xmlNamespaceXsi + n.XMLNs = xmlNamespace +} + +type VirtualNetworkConfiguration struct { + DNS DNS `xml:"Dns,omitempty"` + LocalNetworkSites []LocalNetworkSite `xml:"LocalNetworkSites>LocalNetworkSite"` + VirtualNetworkSites []VirtualNetworkSite `xml:"VirtualNetworkSites>VirtualNetworkSite"` +} + +type DNS struct { + DNSServers []DNSServer `xml:"DnsServers>DnsServer,omitempty"` +} + +type DNSServer struct { + XMLName xml.Name `xml:"DnsServer"` + Name string `xml:"name,attr"` + IPAddress string `xml:"IPAddress,attr"` +} + +type DNSServerRef struct { + Name string `xml:"name,attr"` +} + +type VirtualNetworkSite struct { + Name string `xml:"name,attr"` + Location string `xml:"Location,attr"` + AddressSpace AddressSpace `xml:"AddressSpace"` + Subnets []Subnet `xml:"Subnets>Subnet"` + DNSServersRef []DNSServerRef `xml:"DnsServersRef>DnsServerRef,omitempty"` +} + +type LocalNetworkSite struct { + Name string `xml:"name,attr"` + VPNGatewayAddress string + AddressSpace AddressSpace +} + +type AddressSpace struct { + AddressPrefix []string +} + +type Subnet struct { + Name string `xml:"name,attr"` + AddressPrefix string +} === added directory 'src/github.com/Azure/azure-sdk-for-go/management/vmutils' === added file 'src/github.com/Azure/azure-sdk-for-go/management/vmutils/configurationset.go' --- src/github.com/Azure/azure-sdk-for-go/management/vmutils/configurationset.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/vmutils/configurationset.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,28 @@ +package vmutils + +import ( + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +func updateOrAddConfig(configs []vm.ConfigurationSet, configType vm.ConfigurationSetType, update func(*vm.ConfigurationSet)) []vm.ConfigurationSet { + config := findConfig(configs, configType) + if config == nil { + configs = append(configs, vm.ConfigurationSet{ConfigurationSetType: configType}) + config = findConfig(configs, configType) + } + update(config) + + return configs +} + +func findConfig(configs []vm.ConfigurationSet, configType vm.ConfigurationSetType) *vm.ConfigurationSet { + for i, config := range configs { + if config.ConfigurationSetType == configType { + // need to return a pointer to the original set in configs, + // not the copy made by the range iterator + return &configs[i] + } + } + + return nil +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/vmutils/datadisks.go' --- src/github.com/Azure/azure-sdk-for-go/management/vmutils/datadisks.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/vmutils/datadisks.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,58 @@ +package vmutils + +import ( + "fmt" + + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" + vmdisk "github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk" +) + +// ConfigureWithNewDataDisk adds configuration for a new (empty) data disk +func ConfigureWithNewDataDisk(role *vm.Role, label, destinationVhdStorageURL string, sizeInGB int, cachingType vmdisk.HostCachingType) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + appendDataDisk(role, vm.DataVirtualHardDisk{ + DiskLabel: label, + HostCaching: cachingType, + LogicalDiskSizeInGB: sizeInGB, + MediaLink: destinationVhdStorageURL, + }) + + return nil +} + +// ConfigureWithExistingDataDisk adds configuration for an existing data disk +func ConfigureWithExistingDataDisk(role *vm.Role, diskName string, cachingType vmdisk.HostCachingType) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + appendDataDisk(role, vm.DataVirtualHardDisk{ + DiskName: diskName, + HostCaching: cachingType, + }) + + return nil +} + +// ConfigureWithVhdDataDisk adds configuration for adding a vhd in a storage +// account as a data disk +func ConfigureWithVhdDataDisk(role *vm.Role, sourceVhdStorageURL string, cachingType vmdisk.HostCachingType) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + appendDataDisk(role, vm.DataVirtualHardDisk{ + SourceMediaLink: sourceVhdStorageURL, + HostCaching: cachingType, + }) + + return nil +} + +func appendDataDisk(role *vm.Role, disk vm.DataVirtualHardDisk) { + disk.Lun = len(role.DataVirtualHardDisks) + role.DataVirtualHardDisks = append(role.DataVirtualHardDisks, disk) +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/vmutils/deployment.go' --- src/github.com/Azure/azure-sdk-for-go/management/vmutils/deployment.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/vmutils/deployment.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,78 @@ +package vmutils + +import ( + "fmt" + + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +// ConfigureDeploymentFromRemoteImage configures VM Role to deploy from a remote +// image source. "remoteImageSourceURL" can be any publically accessible URL to +// a VHD file, including but not limited to a SAS Azure Storage blob url. "os" +// needs to be either "Linux" or "Windows". "label" is optional. +func ConfigureDeploymentFromRemoteImage( + role *vm.Role, + remoteImageSourceURL string, + os string, + newDiskName string, + destinationVhdStorageURL string, + label string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.OSVirtualHardDisk = &vm.OSVirtualHardDisk{ + RemoteSourceImageLink: remoteImageSourceURL, + MediaLink: destinationVhdStorageURL, + DiskName: newDiskName, + OS: os, + DiskLabel: label, + } + return nil +} + +// ConfigureDeploymentFromPlatformImage configures VM Role to deploy from a +// platform image. See osimage package for methods to retrieve a list of the +// available platform images. "label" is optional. +func ConfigureDeploymentFromPlatformImage( + role *vm.Role, + imageName string, + mediaLink string, + label string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.OSVirtualHardDisk = &vm.OSVirtualHardDisk{ + SourceImageName: imageName, + MediaLink: mediaLink, + } + return nil +} + +// ConfigureDeploymentFromVMImage configures VM Role to deploy from a previously +// captured VM image. +func ConfigureDeploymentFromVMImage( + role *vm.Role, + vmImageName string, + mediaLocation string, + provisionGuestAgent bool) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.VMImageName = vmImageName + role.MediaLocation = mediaLocation + role.ProvisionGuestAgent = provisionGuestAgent + return nil +} + +// ConfigureDeploymentFromExistingOSDisk configures VM Role to deploy from an +// existing disk. 'label' is optional. +func ConfigureDeploymentFromExistingOSDisk(role *vm.Role, osDiskName, label string) error { + role.OSVirtualHardDisk = &vm.OSVirtualHardDisk{ + DiskName: osDiskName, + DiskLabel: label, + } + return nil +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/vmutils/examples_test.go' --- src/github.com/Azure/azure-sdk-for-go/management/vmutils/examples_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/vmutils/examples_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,53 @@ +package vmutils + +import ( + "encoding/base64" + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" + "github.com/Azure/azure-sdk-for-go/management/hostedservice" + "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +func Example() { + dnsName := "test-vm-from-go" + storageAccount := "mystorageaccount" + location := "West US" + vmSize := "Small" + vmImage := "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB" + userName := "testuser" + userPassword := "Test123" + + client, err := management.ClientFromPublishSettingsFile("path/to/downloaded.publishsettings", "") + if err != nil { + panic(err) + } + + // create hosted service + if err := hostedservice.NewClient(client).CreateHostedService(hostedservice.CreateHostedServiceParameters{ + ServiceName: dnsName, + Location: location, + Label: base64.StdEncoding.EncodeToString([]byte(dnsName))}); err != nil { + panic(err) + } + + // create virtual machine + role := NewVMConfiguration(dnsName, vmSize) + ConfigureDeploymentFromPlatformImage( + &role, + vmImage, + fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", storageAccount, dnsName), + "") + ConfigureForLinux(&role, dnsName, userName, userPassword) + ConfigureWithPublicSSH(&role) + + operationID, err := virtualmachine.NewClient(client). + CreateDeployment(role, dnsName, virtualmachine.CreateDeploymentOptions{}) + if err != nil { + panic(err) + } + err = client.WaitForOperation(operationID, nil) + if err != nil { + panic(err) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions.go' --- src/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,88 @@ +package vmutils + +import ( + "encoding/base64" + "encoding/json" + "fmt" + + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +const ( + dockerPublicConfigVersion = 2 +) + +func AddAzureVMExtensionConfiguration(role *vm.Role, name, publisher, version, referenceName, state string, + publicConfigurationValue, privateConfigurationValue []byte) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + extension := vm.ResourceExtensionReference{ + Name: name, + Publisher: publisher, + Version: version, + ReferenceName: referenceName, + State: state, + } + + if len(privateConfigurationValue) != 0 { + extension.ParameterValues = append(extension.ParameterValues, vm.ResourceExtensionParameter{ + Key: "ignored", + Value: base64.StdEncoding.EncodeToString(privateConfigurationValue), + Type: "Private", + }) + } + + if len(publicConfigurationValue) != 0 { + extension.ParameterValues = append(extension.ParameterValues, vm.ResourceExtensionParameter{ + Key: "ignored", + Value: base64.StdEncoding.EncodeToString(publicConfigurationValue), + Type: "Public", + }) + } + + if role.ResourceExtensionReferences == nil { + role.ResourceExtensionReferences = &[]vm.ResourceExtensionReference{} + } + extensionList := append(*role.ResourceExtensionReferences, extension) + role.ResourceExtensionReferences = &extensionList + return nil +} + +// AddAzureDockerVMExtensionConfiguration adds the DockerExtension to the role +// configuratioon and opens a port "dockerPort" +// TODO(ahmetalpbalkan) Deprecate this and move to 'docker-machine' codebase. +func AddAzureDockerVMExtensionConfiguration(role *vm.Role, dockerPort int, version string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + ConfigureWithExternalPort(role, "docker", dockerPort, dockerPort, vm.InputEndpointProtocolTCP) + + publicConfiguration, err := createDockerPublicConfig(dockerPort) + if err != nil { + return err + } + + privateConfiguration, err := json.Marshal(dockerPrivateConfig{}) + if err != nil { + return err + } + + return AddAzureVMExtensionConfiguration(role, + "DockerExtension", "MSOpenTech.Extensions", + version, "DockerExtension", "enable", + publicConfiguration, privateConfiguration) +} + +func createDockerPublicConfig(dockerPort int) ([]byte, error) { + return json.Marshal(dockerPublicConfig{DockerPort: dockerPort, Version: dockerPublicConfigVersion}) +} + +type dockerPublicConfig struct { + DockerPort int `json:"dockerport"` + Version int `json:"version"` +} + +type dockerPrivateConfig struct{} === added file 'src/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions_test.go' --- src/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/vmutils/extensions_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,42 @@ +package vmutils + +import ( + "encoding/xml" + "testing" + + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +func Test_AddAzureVMExtensionConfiguration(t *testing.T) { + + role := vm.Role{} + AddAzureVMExtensionConfiguration(&role, + "nameOfExtension", "nameOfPublisher", "versionOfExtension", "nameOfReference", "state", []byte{1, 2, 3}, []byte{}) + + data, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + if expected := ` + + + + nameOfReference + nameOfPublisher + nameOfExtension + versionOfExtension + + + ignored + AQID + Public + + + state + + + +`; string(data) != expected { + t.Fatalf("Expected %q, but got %q", expected, string(data)) + } +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/vmutils/integration_test.go' --- src/github.com/Azure/azure-sdk-for-go/management/vmutils/integration_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/vmutils/integration_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,359 @@ +package vmutils + +import ( + "encoding/base64" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/management" + "github.com/Azure/azure-sdk-for-go/management/hostedservice" + "github.com/Azure/azure-sdk-for-go/management/location" + "github.com/Azure/azure-sdk-for-go/management/osimage" + storage "github.com/Azure/azure-sdk-for-go/management/storageservice" + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" + vmimage "github.com/Azure/azure-sdk-for-go/management/virtualmachineimage" + + "github.com/Azure/azure-sdk-for-go/management/testutils" +) + +func TestDeployPlatformImage(t *testing.T) { + client := testutils.GetTestClient(t) + vmname := GenerateName() + sa := GetTestStorageAccount(t, client) + location := sa.StorageServiceProperties.Location + + role := NewVMConfiguration(vmname, "Standard_D3") + ConfigureDeploymentFromPlatformImage(&role, + GetLinuxTestImage(t, client).Name, + fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, vmname), + GenerateName()) + ConfigureForLinux(&role, "myvm", "azureuser", GeneratePassword()) + ConfigureWithPublicSSH(&role) + + testRoleConfiguration(t, client, role, location) +} + +func TestVMImageList(t *testing.T) { + client := testutils.GetTestClient(t) + vmic := vmimage.NewClient(client) + il, _ := vmic.ListVirtualMachineImages() + for _, im := range il.VMImages { + t.Logf("%s -%s", im.Name, im.Description) + } +} + +func TestDeployPlatformCaptureRedeploy(t *testing.T) { + client := testutils.GetTestClient(t) + vmname := GenerateName() + sa := GetTestStorageAccount(t, client) + location := sa.StorageServiceProperties.Location + + role := NewVMConfiguration(vmname, "Standard_D3") + ConfigureDeploymentFromPlatformImage(&role, + GetLinuxTestImage(t, client).Name, + fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, vmname), + GenerateName()) + ConfigureForLinux(&role, "myvm", "azureuser", GeneratePassword()) + ConfigureWithPublicSSH(&role) + + t.Logf("Deploying VM: %s", vmname) + createRoleConfiguration(t, client, role, location) + + t.Logf("Wait for deployment to enter running state") + vmc := vm.NewClient(client) + status := vm.DeploymentStatusDeploying + for status != vm.DeploymentStatusRunning { + deployment, err := vmc.GetDeployment(vmname, vmname) + if err != nil { + t.Error(err) + break + } + status = deployment.Status + } + + t.Logf("Shutting down VM: %s", vmname) + if err := Await(client, func() (management.OperationID, error) { + return vmc.ShutdownRole(vmname, vmname, vmname) + }); err != nil { + t.Error(err) + } + + if err := WaitForDeploymentPowerState(client, vmname, vmname, vm.PowerStateStopped); err != nil { + t.Fatal(err) + } + + imagename := GenerateName() + t.Logf("Capturing VMImage: %s", imagename) + if err := Await(client, func() (management.OperationID, error) { + return vmc.CaptureRole(vmname, vmname, vmname, imagename, imagename, nil) + }); err != nil { + t.Error(err) + } + + im := GetUserImage(t, client, imagename) + t.Logf("Found image: %+v", im) + + newvmname := GenerateName() + role = NewVMConfiguration(newvmname, "Standard_D3") + ConfigureDeploymentFromPlatformImage(&role, + im.Name, + fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, newvmname), + GenerateName()) + ConfigureForLinux(&role, newvmname, "azureuser", GeneratePassword()) + ConfigureWithPublicSSH(&role) + + t.Logf("Deploying new VM from freshly captured VM image: %s", newvmname) + if err := Await(client, func() (management.OperationID, error) { + return vmc.CreateDeployment(role, vmname, vm.CreateDeploymentOptions{}) + }); err != nil { + t.Error(err) + } + + deleteHostedService(t, client, vmname) +} + +func TestDeployFromVmImage(t *testing.T) { + client := testutils.GetTestClient(t) + vmname := GenerateName() + sa := GetTestStorageAccount(t, client) + location := sa.StorageServiceProperties.Location + + im := GetVMImage(t, client, func(im vmimage.VMImage) bool { + return im.Name == + "fb83b3509582419d99629ce476bcb5c8__SQL-Server-2014-RTM-12.0.2430.0-OLTP-ENU-Win2012R2-cy14su11" + }) + + role := NewVMConfiguration(vmname, "Standard_D4") + ConfigureDeploymentFromVMImage(&role, im.Name, + fmt.Sprintf("http://%s.blob.core.windows.net/%s", sa.ServiceName, vmname), false) + ConfigureForWindows(&role, vmname, "azureuser", GeneratePassword(), true, "") + ConfigureWithPublicSSH(&role) + + testRoleConfiguration(t, client, role, location) +} + +func TestRoleStateOperations(t *testing.T) { + client := testutils.GetTestClient(t) + vmname := GenerateName() + sa := GetTestStorageAccount(t, client) + location := sa.StorageServiceProperties.Location + + role := NewVMConfiguration(vmname, "Standard_D3") + ConfigureDeploymentFromPlatformImage(&role, + GetLinuxTestImage(t, client).Name, + fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", sa.ServiceName, vmname), + GenerateName()) + ConfigureForLinux(&role, "myvm", "azureuser", GeneratePassword()) + + createRoleConfiguration(t, client, role, location) + + vmc := vm.NewClient(client) + if err := Await(client, func() (management.OperationID, error) { + return vmc.ShutdownRole(vmname, vmname, vmname) + }); err != nil { + t.Error(err) + } + if err := Await(client, func() (management.OperationID, error) { + return vmc.StartRole(vmname, vmname, vmname) + }); err != nil { + t.Error(err) + } + if err := Await(client, func() (management.OperationID, error) { + return vmc.RestartRole(vmname, vmname, vmname) + }); err != nil { + t.Error(err) + } + + deleteHostedService(t, client, vmname) +} + +func testRoleConfiguration(t *testing.T, client management.Client, role vm.Role, location string) { + createRoleConfiguration(t, client, role, location) + + deleteHostedService(t, client, role.RoleName) +} + +func createRoleConfiguration(t *testing.T, client management.Client, role vm.Role, location string) { + vmc := vm.NewClient(client) + hsc := hostedservice.NewClient(client) + vmname := role.RoleName + + if err := hsc.CreateHostedService(hostedservice.CreateHostedServiceParameters{ + ServiceName: vmname, Location: location, + Label: base64.StdEncoding.EncodeToString([]byte(vmname))}); err != nil { + t.Error(err) + } + + if err := Await(client, func() (management.OperationID, error) { + return vmc.CreateDeployment(role, vmname, vm.CreateDeploymentOptions{}) + }); err != nil { + t.Error(err) + } +} + +func deleteHostedService(t *testing.T, client management.Client, vmname string) { + t.Logf("Deleting hosted service: %s", vmname) + if err := Await(client, func() (management.OperationID, error) { + return hostedservice.NewClient(client).DeleteHostedService(vmname, true) + }); err != nil { + t.Error(err) + } +} + +// === utility funcs === + +func GetTestStorageAccount(t *testing.T, client management.Client) storage.StorageServiceResponse { + t.Log("Retrieving storage account") + sc := storage.NewClient(client) + var sa storage.StorageServiceResponse + ssl, err := sc.ListStorageServices() + if err != nil { + t.Fatal(err) + } + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + + if len(ssl.StorageServices) == 0 { + t.Log("No storage accounts found, creating a new one") + lc := location.NewClient(client) + ll, err := lc.ListLocations() + if err != nil { + t.Fatal(err) + } + loc := ll.Locations[rnd.Intn(len(ll.Locations))].Name + + t.Logf("Location for new storage account: %s", loc) + name := GenerateName() + op, err := sc.CreateStorageService(storage.StorageAccountCreateParameters{ + ServiceName: name, + Label: base64.StdEncoding.EncodeToString([]byte(name)), + Location: loc, + AccountType: storage.AccountTypeStandardLRS}) + if err != nil { + t.Fatal(err) + } + if err := client.WaitForOperation(op, nil); err != nil { + t.Fatal(err) + } + sa, err = sc.GetStorageService(name) + } else { + + sa = ssl.StorageServices[rnd.Intn(len(ssl.StorageServices))] + } + + t.Logf("Selected storage account '%s' in location '%s'", + sa.ServiceName, sa.StorageServiceProperties.Location) + + return sa +} + +func GetLinuxTestImage(t *testing.T, client management.Client) osimage.OSImage { + return GetOSImage(t, client, func(im osimage.OSImage) bool { + return im.Category == "Public" && im.ImageFamily == "Ubuntu Server 14.04 LTS" + }) +} + +func GetUserImage(t *testing.T, client management.Client, name string) osimage.OSImage { + return GetOSImage(t, client, func(im osimage.OSImage) bool { + return im.Category == "User" && im.Name == name + }) +} + +func GetOSImage( + t *testing.T, + client management.Client, + filter func(osimage.OSImage) bool) osimage.OSImage { + t.Log("Selecting OS image") + osc := osimage.NewClient(client) + allimages, err := osc.ListOSImages() + if err != nil { + t.Fatal(err) + } + filtered := []osimage.OSImage{} + for _, im := range allimages.OSImages { + if filter(im) { + filtered = append(filtered, im) + } + } + if len(filtered) == 0 { + t.Fatal("Filter too restrictive, no images left?") + } + + image := filtered[0] + for _, im := range filtered { + if im.PublishedDate > image.PublishedDate { + image = im + } + } + + t.Logf("Selecting image '%s'", image.Name) + return image +} + +func GetVMImage( + t *testing.T, + client management.Client, + filter func(vmimage.VMImage) bool) vmimage.VMImage { + t.Log("Selecting VM image") + allimages, err := vmimage.NewClient(client).ListVirtualMachineImages() + if err != nil { + t.Fatal(err) + } + filtered := []vmimage.VMImage{} + for _, im := range allimages.VMImages { + if filter(im) { + filtered = append(filtered, im) + } + } + if len(filtered) == 0 { + t.Fatal("Filter too restrictive, no images left?") + } + + image := filtered[0] + for _, im := range filtered { + if im.PublishedDate > image.PublishedDate { + image = im + } + } + + t.Logf("Selecting image '%s'", image.Name) + return image +} + +func GenerateName() string { + from := "1234567890abcdefghijklmnopqrstuvwxyz" + return "sdk" + GenerateString(12, from) +} + +func GeneratePassword() string { + pw := GenerateString(20, "1234567890") + + GenerateString(20, "abcdefghijklmnopqrstuvwxyz") + + GenerateString(20, "ABCDEFGHIJKLMNOPQRSTUVWXYZ") + + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + i := rnd.Intn(len(pw)-2) + 1 + + pw = string(append([]uint8(pw[i:]), pw[:i-1]...)) + + return pw +} + +func GenerateString(length int, from string) string { + str := "" + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + for len(str) < length { + str += string(from[rnd.Intn(len(from))]) + } + return str +} + +type asyncFunc func() (operationId management.OperationID, err error) + +func Await(client management.Client, async asyncFunc) error { + requestID, err := async() + if err != nil { + return err + } + return client.WaitForOperation(requestID, nil) +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/vmutils/network.go' --- src/github.com/Azure/azure-sdk-for-go/management/vmutils/network.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/vmutils/network.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,83 @@ +package vmutils + +import ( + "fmt" + + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +// ConfigureWithPublicSSH adds configuration exposing port 22 externally +func ConfigureWithPublicSSH(role *vm.Role) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + return ConfigureWithExternalPort(role, "SSH", 22, 22, vm.InputEndpointProtocolTCP) +} + +// ConfigureWithPublicRDP adds configuration exposing port 3389 externally +func ConfigureWithPublicRDP(role *vm.Role) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + return ConfigureWithExternalPort(role, "RDP", 3389, 3389, vm.InputEndpointProtocolTCP) +} + +// ConfigureWithPublicPowerShell adds configuration exposing port 5986 +// externally +func ConfigureWithPublicPowerShell(role *vm.Role) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + return ConfigureWithExternalPort(role, "PowerShell", 5986, 5986, vm.InputEndpointProtocolTCP) +} + +// ConfigureWithExternalPort adds a new InputEndpoint to the Role, exposing a +// port externally +func ConfigureWithExternalPort(role *vm.Role, name string, localport, externalport int, protocol vm.InputEndpointProtocol) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.ConfigurationSets = updateOrAddConfig(role.ConfigurationSets, vm.ConfigurationSetTypeNetwork, + func(config *vm.ConfigurationSet) { + config.InputEndpoints = append(config.InputEndpoints, vm.InputEndpoint{ + LocalPort: localport, + Name: name, + Port: externalport, + Protocol: protocol, + }) + }) + + return nil +} + +// ConfigureWithSecurityGroup associates the Role with a specific network security group +func ConfigureWithSecurityGroup(role *vm.Role, networkSecurityGroup string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.ConfigurationSets = updateOrAddConfig(role.ConfigurationSets, vm.ConfigurationSetTypeNetwork, + func(config *vm.ConfigurationSet) { + config.NetworkSecurityGroup = networkSecurityGroup + }) + + return nil +} + +// ConfigureWithSubnet associates the Role with a specific subnet +func ConfigureWithSubnet(role *vm.Role, subnet string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.ConfigurationSets = updateOrAddConfig(role.ConfigurationSets, vm.ConfigurationSetTypeNetwork, + func(config *vm.ConfigurationSet) { + config.SubnetNames = append(config.SubnetNames, subnet) + }) + + return nil +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/vmutils/rolesize.go' --- src/github.com/Azure/azure-sdk-for-go/management/vmutils/rolesize.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/vmutils/rolesize.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,76 @@ +package vmutils + +import ( + "fmt" + + "github.com/Azure/azure-sdk-for-go/management" + lc "github.com/Azure/azure-sdk-for-go/management/location" + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +// IsRoleSizeValid retrieves the available rolesizes using +// vmclient.GetRoleSizeList() and returns whether that the provided roleSizeName +// is part of that list +func IsRoleSizeValid(vmclient vm.VirtualMachineClient, roleSizeName string) (bool, error) { + if roleSizeName == "" { + return false, fmt.Errorf(errParamNotSpecified, "roleSizeName") + } + + roleSizeList, err := vmclient.GetRoleSizeList() + if err != nil { + return false, err + } + + for _, roleSize := range roleSizeList.RoleSizes { + if roleSize.Name == roleSizeName { + return true, nil + } + } + + return false, nil +} + +// IsRoleSizeAvailableInLocation retrieves all available sizes in the specified +// location and returns whether that the provided roleSizeName is part of that list. +func IsRoleSizeAvailableInLocation(managementclient management.Client, location, roleSizeName string) (bool, error) { + if location == "" { + return false, fmt.Errorf(errParamNotSpecified, "location") + } + if roleSizeName == "" { + return false, fmt.Errorf(errParamNotSpecified, "roleSizeName") + } + + locationClient := lc.NewClient(managementclient) + locationInfo, err := getLocation(locationClient, location) + if err != nil { + return false, err + } + + for _, availableRoleSize := range locationInfo.VirtualMachineRoleSizes { + if availableRoleSize == roleSizeName { + return true, nil + } + } + + return false, nil +} + +func getLocation(c lc.LocationClient, location string) (*lc.Location, error) { + if location == "" { + return nil, fmt.Errorf(errParamNotSpecified, "location") + } + + locations, err := c.ListLocations() + if err != nil { + return nil, err + } + + for _, existingLocation := range locations.Locations { + if existingLocation.Name != location { + continue + } + + return &existingLocation, nil + } + return nil, fmt.Errorf("Invalid location: %s. Available locations: %s", location, locations) +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/vmutils/rolestate.go' --- src/github.com/Azure/azure-sdk-for-go/management/vmutils/rolestate.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/vmutils/rolestate.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,33 @@ +package vmutils + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/management" + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +// WaitForDeploymentPowerState blocks until all role instances in deployment to +// reach desired power state. +func WaitForDeploymentPowerState(client management.Client, cloudServiceName, deploymentName string, desiredPowerstate vm.PowerState) error { + for { + deployment, err := vm.NewClient(client).GetDeployment(cloudServiceName, deploymentName) + if err != nil { + return err + } + if allInstancesInPowerState(deployment.RoleInstanceList, desiredPowerstate) { + return nil + } + time.Sleep(2 * time.Second) + } +} + +func allInstancesInPowerState(instances []vm.RoleInstance, desiredPowerstate vm.PowerState) bool { + for _, r := range instances { + if r.PowerState != desiredPowerstate { + return false + } + } + + return true +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils.go' --- src/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,101 @@ +// Package vmutils provides convenience methods for creating Virtual +// Machine Role configurations. +package vmutils + +import ( + "fmt" + + vm "github.com/Azure/azure-sdk-for-go/management/virtualmachine" +) + +const ( + errParamNotSpecified = "Parameter %s is not specified." +) + +// NewVMConfiguration creates configuration for a new virtual machine Role. +func NewVMConfiguration(name string, roleSize string) vm.Role { + return vm.Role{ + RoleName: name, + RoleType: "PersistentVMRole", + RoleSize: roleSize, + ProvisionGuestAgent: true, + } +} + +// ConfigureForLinux adds configuration for when deploying a generalized Linux +// image. If "password" is left empty, SSH password security will be disabled by +// default. Certificates with SSH public keys should already be uploaded to the +// cloud service where the VM will be deployed and referenced here only by their +// thumbprint. +func ConfigureForLinux(role *vm.Role, hostname, user, password string, sshPubkeyCertificateThumbprint ...string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.ConfigurationSets = updateOrAddConfig(role.ConfigurationSets, vm.ConfigurationSetTypeLinuxProvisioning, + func(config *vm.ConfigurationSet) { + config.HostName = hostname + config.UserName = user + config.UserPassword = password + if password != "" { + config.DisableSSHPasswordAuthentication = "false" + } + if len(sshPubkeyCertificateThumbprint) != 0 { + config.SSH = &vm.SSH{} + for _, k := range sshPubkeyCertificateThumbprint { + config.SSH.PublicKeys = append(config.SSH.PublicKeys, + vm.PublicKey{ + Fingerprint: k, + Path: "/home/" + user + "/.ssh/authorized_keys", + }, + ) + } + } + }, + ) + + return nil +} + +// ConfigureForWindows adds configuration for when deploying a generalized +// Windows image. timeZone can be left empty. For a complete list of supported +// time zone entries, you can either refer to the values listed in the registry +// entry "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time +// Zones" or you can use the tzutil command-line tool to list the valid time. +func ConfigureForWindows(role *vm.Role, hostname, user, password string, enableAutomaticUpdates bool, timeZone string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.ConfigurationSets = updateOrAddConfig(role.ConfigurationSets, vm.ConfigurationSetTypeWindowsProvisioning, + func(config *vm.ConfigurationSet) { + config.ComputerName = hostname + config.AdminUsername = user + config.AdminPassword = password + config.EnableAutomaticUpdates = enableAutomaticUpdates + config.TimeZone = timeZone + }, + ) + + return nil +} + +// ConfigureWindowsToJoinDomain adds configuration to join a new Windows vm to a +// domain. "username" must be in UPN form (user@domain.com), "machineOU" can be +// left empty +func ConfigureWindowsToJoinDomain(role *vm.Role, username, password, domainToJoin, machineOU string) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + winconfig := findConfig(role.ConfigurationSets, vm.ConfigurationSetTypeWindowsProvisioning) + if winconfig != nil { + winconfig.DomainJoin = &vm.DomainJoin{ + Credentials: vm.Credentials{Username: username, Password: password}, + JoinDomain: domainToJoin, + MachineObjectOU: machineOU, + } + } + + return nil +} === added file 'src/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils_test.go' --- src/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,216 @@ +package vmutils + +import ( + "encoding/xml" + "testing" + + vmdisk "github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk" +) + +func TestNewLinuxVmRemoteImage(t *testing.T) { + role := NewVMConfiguration("myvm", "Standard_D3") + ConfigureDeploymentFromRemoteImage(&role, + "http://remote.host/some.vhd?sv=12&sig=ukhfiuwef78687", "Linux", + "myvm-os-disk", "http://mystorageacct.blob.core.windows.net/vhds/mybrandnewvm.vhd", + "OSDisk") + ConfigureForLinux(&role, "myvm", "azureuser", "P@ssword", "2398yyKJGd78e2389ydfncuirowebhf89yh3IUOBY") + ConfigureWithPublicSSH(&role) + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + myvm + PersistentVMRole + + + LinuxProvisioningConfiguration + + myvm + azureuser + P@ssword + false + + + + 2398yyKJGd78e2389ydfncuirowebhf89yh3IUOBY + /home/azureuser/.ssh/authorized_keys + + + + + + + + + + NetworkConfiguration + + + + 22 + SSH + 22 + TCP + + + + + + + + + OSDisk + myvm-os-disk + http://mystorageacct.blob.core.windows.net/vhds/mybrandnewvm.vhd + Linux + http://remote.host/some.vhd?sv=12&sig=ukhfiuwef78687 + + Standard_D3 + true +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} + +func TestNewLinuxVmPlatformImage(t *testing.T) { + role := NewVMConfiguration("myplatformvm", "Standard_D3") + ConfigureDeploymentFromPlatformImage(&role, + "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_2_LTS-amd64-server-20150309-en-us-30GB", + "http://mystorageacct.blob.core.windows.net/vhds/mybrandnewvm.vhd", "mydisklabel") + ConfigureForLinux(&role, "myvm", "azureuser", "", "2398yyKJGd78e2389ydfncuirdebhf89yh3IUOBY") + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + myplatformvm + PersistentVMRole + + + LinuxProvisioningConfiguration + + myvm + azureuser + + + + 2398yyKJGd78e2389ydfncuirdebhf89yh3IUOBY + /home/azureuser/.ssh/authorized_keys + + + + + + + + + + + + http://mystorageacct.blob.core.windows.net/vhds/mybrandnewvm.vhd + b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04_2_LTS-amd64-server-20150309-en-us-30GB + + Standard_D3 + true +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} + +func TestNewVmFromVMImage(t *testing.T) { + role := NewVMConfiguration("restoredbackup", "Standard_D1") + ConfigureDeploymentFromVMImage(&role, "myvm-backup-20150209", + "http://mystorageacct.blob.core.windows.net/vhds/myoldnewvm.vhd", false) + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + restoredbackup + PersistentVMRole + + myvm-backup-20150209 + http://mystorageacct.blob.core.windows.net/vhds/myoldnewvm.vhd + + Standard_D1 +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} + +func TestNewVmFromExistingDisk(t *testing.T) { + role := NewVMConfiguration("blobvm", "Standard_D14") + ConfigureDeploymentFromExistingOSDisk(&role, "myvm-backup-20150209", "OSDisk") + ConfigureForWindows(&role, "WINVM", "azuser", "P2ssw@rd", true, "") + ConfigureWindowsToJoinDomain(&role, "user@domain.com", "youReN3verG0nnaGu3ss", "redmond.corp.contoso.com", "") + ConfigureWithNewDataDisk(&role, "my-brand-new-disk", "http://account.blob.core.windows.net/vhds/newdatadisk.vhd", + 30, vmdisk.HostCachingTypeReadWrite) + ConfigureWithExistingDataDisk(&role, "data-disk", vmdisk.HostCachingTypeReadOnly) + + bytes, err := xml.MarshalIndent(role, "", " ") + if err != nil { + t.Fatal(err) + } + + expected := ` + blobvm + PersistentVMRole + + + WindowsProvisioningConfiguration + WINVM + P2ssw@rd + true + + + + user@domain.com + youReN3verG0nnaGu3ss + + redmond.corp.contoso.com + + + azuser + + + + + + + + ReadWrite + my-brand-new-disk + 30 + http://account.blob.core.windows.net/vhds/newdatadisk.vhd + + + ReadOnly + data-disk + 1 + + + + OSDisk + myvm-backup-20150209 + + Standard_D14 + true +` + + if string(bytes) != expected { + t.Fatalf("Expected marshalled xml to be %q, but got %q", expected, string(bytes)) + } +} === added directory 'src/github.com/Azure/azure-sdk-for-go/storage' === added file 'src/github.com/Azure/azure-sdk-for-go/storage/blob.go' --- src/github.com/Azure/azure-sdk-for-go/storage/blob.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/storage/blob.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,946 @@ +package storage + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// BlobStorageClient contains operations for Microsoft Azure Blob Storage +// Service. +type BlobStorageClient struct { + client Client +} + +// A Container is an entry in ContainerListResponse. +type Container struct { + Name string `xml:"Name"` + Properties ContainerProperties `xml:"Properties"` + // TODO (ahmetalpbalkan) Metadata +} + +// ContainerProperties contains various properties of a container returned from +// various endpoints like ListContainers. +type ContainerProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` + LeaseDuration string `xml:"LeaseDuration"` + // TODO (ahmetalpbalkan) remaining fields +} + +// ContainerListResponse contains the response fields from +// ListContainers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ContainerListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Containers []Container `xml:"Containers>Container"` +} + +// A Blob is an entry in BlobListResponse. +type Blob struct { + Name string `xml:"Name"` + Properties BlobProperties `xml:"Properties"` + // TODO (ahmetalpbalkan) Metadata +} + +// BlobProperties contains various properties of a blob +// returned in various endpoints like ListBlobs or GetBlobProperties. +type BlobProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + ContentMD5 string `xml:"Content-MD5"` + ContentLength int64 `xml:"Content-Length"` + ContentType string `xml:"Content-Type"` + ContentEncoding string `xml:"Content-Encoding"` + BlobType BlobType `xml:"x-ms-blob-blob-type"` + SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` + CopyID string `xml:"CopyId"` + CopyStatus string `xml:"CopyStatus"` + CopySource string `xml:"CopySource"` + CopyProgress string `xml:"CopyProgress"` + CopyCompletionTime string `xml:"CopyCompletionTime"` + CopyStatusDescription string `xml:"CopyStatusDescription"` +} + +// BlobListResponse contains the response fields from ListBlobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type BlobListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Blobs []Blob `xml:"Blobs>Blob"` +} + +// ListContainersParameters defines the set of customizable parameters to make a +// List Containers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ListContainersParameters struct { + Prefix string + Marker string + Include string + MaxResults uint + Timeout uint +} + +func (p ListContainersParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + +// ListBlobsParameters defines the set of customizable +// parameters to make a List Blobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type ListBlobsParameters struct { + Prefix string + Delimiter string + Marker string + Include string + MaxResults uint + Timeout uint +} + +func (p ListBlobsParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Delimiter != "" { + out.Set("delimiter", p.Delimiter) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", fmt.Sprintf("%v", p.MaxResults)) + } + if p.Timeout != 0 { + out.Set("timeout", fmt.Sprintf("%v", p.Timeout)) + } + + return out +} + +// BlobType defines the type of the Azure Blob. +type BlobType string + +// Types of page blobs +const ( + BlobTypeBlock BlobType = "BlockBlob" + BlobTypePage BlobType = "PageBlob" +) + +// PageWriteType defines the type updates that are going to be +// done on the page blob. +type PageWriteType string + +// Types of operations on page blobs +const ( + PageWriteTypeUpdate PageWriteType = "update" + PageWriteTypeClear PageWriteType = "clear" +) + +const ( + blobCopyStatusPending = "pending" + blobCopyStatusSuccess = "success" + blobCopyStatusAborted = "aborted" + blobCopyStatusFailed = "failed" +) + +// BlockListType is used to filter out types of blocks in a Get Blocks List call +// for a block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all +// block types. +type BlockListType string + +// Filters for listing blocks in block blobs +const ( + BlockListTypeAll BlockListType = "all" + BlockListTypeCommitted BlockListType = "committed" + BlockListTypeUncommitted BlockListType = "uncommitted" +) + +// ContainerAccessType defines the access level to the container from a public +// request. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- +// blob-public-access" header. +type ContainerAccessType string + +// Access options for containers +const ( + ContainerAccessTypePrivate ContainerAccessType = "" + ContainerAccessTypeBlob ContainerAccessType = "blob" + ContainerAccessTypeContainer ContainerAccessType = "container" +) + +// Maximum sizes (per REST API) for various concepts +const ( + MaxBlobBlockSize = 4 * 1024 * 1024 + MaxBlobPageSize = 4 * 1024 * 1024 +) + +// BlockStatus defines states a block for a block blob can +// be in. +type BlockStatus string + +// List of statuses that can be used to refer to a block in a block list +const ( + BlockStatusUncommitted BlockStatus = "Uncommitted" + BlockStatusCommitted BlockStatus = "Committed" + BlockStatusLatest BlockStatus = "Latest" +) + +// Block is used to create Block entities for Put Block List +// call. +type Block struct { + ID string + Status BlockStatus +} + +// BlockListResponse contains the response fields from Get Block List call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx +type BlockListResponse struct { + XMLName xml.Name `xml:"BlockList"` + CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` + UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` +} + +// BlockResponse contains the block information returned +// in the GetBlockListCall. +type BlockResponse struct { + Name string `xml:"Name"` + Size int64 `xml:"Size"` +} + +// GetPageRangesResponse contains the reponse fields from +// Get Page Ranges call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type GetPageRangesResponse struct { + XMLName xml.Name `xml:"PageList"` + PageList []PageRange `xml:"PageRange"` +} + +// PageRange contains information about a page of a page blob from +// Get Pages Range call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type PageRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +var ( + errBlobCopyAborted = errors.New("storage: blob copy is aborted") + errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") +) + +// ListContainers returns the list of containers in a storage account along with +// pagination token and other response details. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +func (b BlobStorageClient) ListContainers(params ListContainersParameters) (ContainerListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) + uri := b.client.getEndpoint(blobServiceName, "", q) + headers := b.client.getStandardHeaders() + + var out ContainerListResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// CreateContainer creates a blob container within the storage account +// with given name and access level. Returns error if container already exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx +func (b BlobStorageClient) CreateContainer(name string, access ContainerAccessType) error { + resp, err := b.createContainer(name, access) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// CreateContainerIfNotExists creates a blob container if it does not exist. Returns +// true if container is newly created or false if container already exists. +func (b BlobStorageClient) CreateContainerIfNotExists(name string, access ContainerAccessType) (bool, error) { + resp, err := b.createContainer(name, access) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + return resp.statusCode == http.StatusCreated, nil + } + } + return false, err +} + +func (b BlobStorageClient) createContainer(name string, access ContainerAccessType) (*storageResponse, error) { + verb := "PUT" + uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) + + headers := b.client.getStandardHeaders() + headers["Content-Length"] = "0" + if access != "" { + headers["x-ms-blob-public-access"] = string(access) + } + return b.client.exec(verb, uri, headers, nil) +} + +// ContainerExists returns true if a container with given name exists +// on the storage account, otherwise returns false. +func (b BlobStorageClient) ContainerExists(name string) (bool, error) { + verb := "HEAD" + uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) + headers := b.client.getStandardHeaders() + + resp, err := b.client.exec(verb, uri, headers, nil) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// DeleteContainer deletes the container with given name on the storage +// account. If the container does not exist returns error. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx +func (b BlobStorageClient) DeleteContainer(name string) error { + resp, err := b.deleteContainer(name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteContainerIfExists deletes the container with given name on the storage +// account if it exists. Returns true if container is deleted with this call, or +// false if the container did not exist at the time of the Delete Container +// operation. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx +func (b BlobStorageClient) DeleteContainerIfExists(name string) (bool, error) { + resp, err := b.deleteContainer(name) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +func (b BlobStorageClient) deleteContainer(name string) (*storageResponse, error) { + verb := "DELETE" + uri := b.client.getEndpoint(blobServiceName, pathForContainer(name), url.Values{"restype": {"container"}}) + + headers := b.client.getStandardHeaders() + return b.client.exec(verb, uri, headers, nil) +} + +// ListBlobs returns an object that contains list of blobs in the container, +// pagination token and other information in the response of List Blobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameters) (BlobListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{ + "restype": {"container"}, + "comp": {"list"}}) + uri := b.client.getEndpoint(blobServiceName, pathForContainer(container), q) + headers := b.client.getStandardHeaders() + + var out BlobListResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// BlobExists returns true if a blob with given name exists on the specified +// container of the storage account. +func (b BlobStorageClient) BlobExists(container, name string) (bool, error) { + verb := "HEAD" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + resp, err := b.client.exec(verb, uri, headers, nil) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusOK, nil + } + } + return false, err +} + +// GetBlobURL gets the canonical URL to the blob with the specified name in the +// specified container. This method does not create a publicly accessible URL if +// the blob or container is private and this method does not check if the blob +// exists. +func (b BlobStorageClient) GetBlobURL(container, name string) string { + if container == "" { + container = "$root" + } + return b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) +} + +// GetBlob returns a stream to read the blob. Caller must call Close() the +// reader to close on the underlying connection. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx +func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) { + resp, err := b.getBlobRange(container, name, "") + if err != nil { + return nil, err + } + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + return resp.body, nil +} + +// GetBlobRange reads the specified range of a blob to a stream. The bytesRange +// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx +func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string) (io.ReadCloser, error) { + resp, err := b.getBlobRange(container, name, bytesRange) + if err != nil { + return nil, err + } + + if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil { + return nil, err + } + return resp.body, nil +} + +func (b BlobStorageClient) getBlobRange(container, name, bytesRange string) (*storageResponse, error) { + verb := "GET" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + if bytesRange != "" { + headers["Range"] = fmt.Sprintf("bytes=%s", bytesRange) + } + resp, err := b.client.exec(verb, uri, headers, nil) + if err != nil { + return nil, err + } + return resp, err +} + +// GetBlobProperties provides various information about the specified +// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx +func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) { + verb := "HEAD" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + resp, err := b.client.exec(verb, uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + var contentLength int64 + contentLengthStr := resp.headers.Get("Content-Length") + if contentLengthStr != "" { + contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) + if err != nil { + return nil, err + } + } + + var sequenceNum int64 + sequenceNumStr := resp.headers.Get("x-ms-blob-sequence-number") + if sequenceNumStr != "" { + sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) + if err != nil { + return nil, err + } + } + + return &BlobProperties{ + LastModified: resp.headers.Get("Last-Modified"), + Etag: resp.headers.Get("Etag"), + ContentMD5: resp.headers.Get("Content-MD5"), + ContentLength: contentLength, + ContentEncoding: resp.headers.Get("Content-Encoding"), + SequenceNumber: sequenceNum, + CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"), + CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"), + CopyID: resp.headers.Get("x-ms-copy-id"), + CopyProgress: resp.headers.Get("x-ms-copy-progress"), + CopySource: resp.headers.Get("x-ms-copy-source"), + CopyStatus: resp.headers.Get("x-ms-copy-status"), + BlobType: BlobType(resp.headers.Get("x-ms-blob-type")), + }, nil +} + +// SetBlobMetadata replaces the metadata for the specified blob. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (b BlobStorageClient) SetBlobMetadata(container, name string, metadata map[string]string) error { + params := url.Values{"comp": {"metadata"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + for k, v := range metadata { + headers[userDefinedMetadataHeaderPrefix+k] = v + } + headers["Content-Length"] = "0" + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// GetBlobMetadata returns all user-defined metadata for the specified blob. +// +// All metadata keys will be returned in lower case. (HTTP header +// names are case-insensitive.) +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (b BlobStorageClient) GetBlobMetadata(container, name string) (map[string]string, error) { + params := url.Values{"comp": {"metadata"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return nil, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return nil, err + } + + metadata := make(map[string]string) + for k, v := range resp.headers { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Foo" or "x-ms-meta-foo_bar". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["foo"] = content of the last X-Ms-Meta-Foo header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + return metadata, nil +} + +// CreateBlockBlob initializes an empty block blob with no blocks. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) CreateBlockBlob(container, name string) error { + return b.CreateBlockBlobFromReader(container, name, 0, nil) +} + +// CreateBlockBlobFromReader initializes a block blob using data from +// reader. Size must be the number of bytes read from reader. To +// create an empty blob, use size==0 and reader==nil. +// +// The API rejects requests with size > 64 MiB (but this limit is not +// checked by the SDK). To write a larger blob, use CreateBlockBlob, +// PutBlock, and PutBlockList. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) CreateBlockBlobFromReader(container, name string, size uint64, blob io.Reader) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + headers["Content-Length"] = fmt.Sprintf("%d", size) + + resp, err := b.client.exec("PUT", uri, headers, blob) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutBlock saves the given data chunk to the specified block blob with +// given ID. +// +// The API rejects chunks larger than 4 MiB (but this limit is not +// checked by the SDK). +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx +func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error { + return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk)) +} + +// PutBlockWithLength saves the given data stream of exactly specified size to +// the block blob with given ID. It is an alternative to PutBlocks where data +// comes as stream but the length is known in advance. +// +// The API rejects requests with size > 4 MiB (but this limit is not +// checked by the SDK). +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx +func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader) error { + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + headers["Content-Length"] = fmt.Sprintf("%v", size) + + resp, err := b.client.exec("PUT", uri, headers, blob) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutBlockList saves list of blocks to the specified block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179467.aspx +func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) error { + blockListXML := prepareBlockListRequest(blocks) + + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"blocklist"}}) + headers := b.client.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) + + resp, err := b.client.exec("PUT", uri, headers, strings.NewReader(blockListXML)) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetBlockList retrieves list of blocks in the specified block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx +func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockListType) (BlockListResponse, error) { + params := url.Values{"comp": {"blocklist"}, "blocklisttype": {string(blockType)}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + + var out BlockListResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// PutPageBlob initializes an empty page blob with specified name and maximum +// size in bytes (size must be aligned to a 512-byte boundary). A page blob must +// be created using this method before writing pages. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx +func (b BlobStorageClient) PutPageBlob(container, name string, size int64) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", size) + headers["Content-Length"] = fmt.Sprintf("%v", 0) + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// PutPage writes a range of pages to a page blob or clears the given range. +// In case of 'clear' writes, given chunk is discarded. Ranges must be aligned +// with 512-byte boundaries and chunk must be of size multiplies by 512. +// +// See https://msdn.microsoft.com/en-us/library/ee691975.aspx +func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte) error { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"page"}}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-page-write"] = string(writeType) + headers["x-ms-range"] = fmt.Sprintf("bytes=%v-%v", startByte, endByte) + + var contentLength int64 + var data io.Reader + if writeType == PageWriteTypeClear { + contentLength = 0 + data = bytes.NewReader([]byte{}) + } else { + contentLength = int64(len(chunk)) + data = bytes.NewReader(chunk) + } + headers["Content-Length"] = fmt.Sprintf("%v", contentLength) + + resp, err := b.client.exec("PUT", uri, headers, data) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// GetPageRanges returns the list of valid page ranges for a page blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesResponse, error) { + path := fmt.Sprintf("%s/%s", container, name) + uri := b.client.getEndpoint(blobServiceName, path, url.Values{"comp": {"pagelist"}}) + headers := b.client.getStandardHeaders() + + var out GetPageRangesResponse + resp, err := b.client.exec("GET", uri, headers, nil) + if err != nil { + return out, err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return out, err + } + err = xmlUnmarshal(resp.body, &out) + return out, err +} + +// CopyBlob starts a blob copy operation and waits for the operation to +// complete. sourceBlob parameter must be a canonical URL to the blob (can be +// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore +// this helper method works faster on smaller files. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx +func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error { + copyID, err := b.startBlobCopy(container, name, sourceBlob) + if err != nil { + return err + } + + return b.waitForBlobCopy(container, name, copyID) +} + +func (b BlobStorageClient) startBlobCopy(container, name, sourceBlob string) (string, error) { + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + + headers := b.client.getStandardHeaders() + headers["Content-Length"] = "0" + headers["x-ms-copy-source"] = sourceBlob + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return "", err + } + defer resp.body.Close() + + if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil { + return "", err + } + + copyID := resp.headers.Get("x-ms-copy-id") + if copyID == "" { + return "", errors.New("Got empty copy id header") + } + return copyID, nil +} + +func (b BlobStorageClient) waitForBlobCopy(container, name, copyID string) error { + for { + props, err := b.GetBlobProperties(container, name) + if err != nil { + return err + } + + if props.CopyID != copyID { + return errBlobCopyIDMismatch + } + + switch props.CopyStatus { + case blobCopyStatusSuccess: + return nil + case blobCopyStatusPending: + continue + case blobCopyStatusAborted: + return errBlobCopyAborted + case blobCopyStatusFailed: + return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", props.CopyID, props.CopyStatusDescription) + default: + return fmt.Errorf("storage: unhandled blob copy status: '%s'", props.CopyStatus) + } + } +} + +// DeleteBlob deletes the given blob from the specified container. +// If the blob does not exists at the time of the Delete Blob operation, it +// returns error. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx +func (b BlobStorageClient) DeleteBlob(container, name string) error { + resp, err := b.deleteBlob(container, name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteBlobIfExists deletes the given blob from the specified container If the +// blob is deleted with this call, returns true. Otherwise returns false. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx +func (b BlobStorageClient) DeleteBlobIfExists(container, name string) (bool, error) { + resp, err := b.deleteBlob(container, name) + if resp != nil && (resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound) { + return resp.statusCode == http.StatusAccepted, nil + } + defer resp.body.Close() + return false, err +} + +func (b BlobStorageClient) deleteBlob(container, name string) (*storageResponse, error) { + verb := "DELETE" + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + headers := b.client.getStandardHeaders() + + return b.client.exec(verb, uri, headers, nil) +} + +// helper method to construct the path to a container given its name +func pathForContainer(name string) string { + return fmt.Sprintf("/%s", name) +} + +// helper method to construct the path to a blob given its container and blob +// name +func pathForBlob(container, name string) string { + return fmt.Sprintf("/%s/%s", container, name) +} + +// GetBlobSASURI creates an URL to the specified blob which contains the Shared +// Access Signature with specified permissions and expiration time. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx +func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) { + var ( + signedPermissions = permissions + blobURL = b.GetBlobURL(container, name) + ) + canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL) + if err != nil { + return "", err + } + signedExpiry := expiry.Format(time.RFC3339) + signedResource := "b" + + stringToSign, err := blobSASStringToSign(b.client.apiVersion, canonicalizedResource, signedExpiry, signedPermissions) + if err != nil { + return "", err + } + + sig := b.client.computeHmac256(stringToSign) + sasParams := url.Values{ + "sv": {b.client.apiVersion}, + "se": {signedExpiry}, + "sr": {signedResource}, + "sp": {signedPermissions}, + "sig": {sig}, + } + + sasURL, err := url.Parse(blobURL) + if err != nil { + return "", err + } + sasURL.RawQuery = sasParams.Encode() + return sasURL.String(), nil +} + +func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string) (string, error) { + var signedStart, signedIdentifier, rscc, rscd, rsce, rscl, rsct string + + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + if signedVersion >= "2013-08-15" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil + } + return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") +} === added file 'src/github.com/Azure/azure-sdk-for-go/storage/blob_test.go' --- src/github.com/Azure/azure-sdk-for-go/storage/blob_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/storage/blob_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,715 @@ +package storage + +import ( + "bytes" + "crypto/rand" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "sync" + "testing" + "time" + + chk "gopkg.in/check.v1" +) + +type StorageBlobSuite struct{} + +var _ = chk.Suite(&StorageBlobSuite{}) + +const testContainerPrefix = "zzzztest-" + +func getBlobClient(c *chk.C) BlobStorageClient { + return getBasicClient(c).GetBlobService() +} + +func (s *StorageBlobSuite) Test_pathForContainer(c *chk.C) { + c.Assert(pathForContainer("foo"), chk.Equals, "/foo") +} + +func (s *StorageBlobSuite) Test_pathForBlob(c *chk.C) { + c.Assert(pathForBlob("foo", "blob"), chk.Equals, "/foo/blob") +} + +func (s *StorageBlobSuite) Test_blobSASStringToSign(c *chk.C) { + _, err := blobSASStringToSign("2012-02-12", "CS", "SE", "SP") + c.Assert(err, chk.NotNil) // not implemented SAS for versions earlier than 2013-08-15 + + out, err := blobSASStringToSign("2013-08-15", "CS", "SE", "SP") + c.Assert(err, chk.IsNil) + c.Assert(out, chk.Equals, "SP\n\nSE\nCS\n\n2013-08-15\n\n\n\n\n") +} + +func (s *StorageBlobSuite) TestGetBlobSASURI(c *chk.C) { + api, err := NewClient("foo", "YmFy", DefaultBaseURL, "2013-08-15", true) + c.Assert(err, chk.IsNil) + cli := api.GetBlobService() + expiry := time.Time{} + + expectedParts := url.URL{ + Scheme: "https", + Host: "foo.blob.core.windows.net", + Path: "container/name", + RawQuery: url.Values{ + "sv": {"2013-08-15"}, + "sig": {"/OXG7rWh08jYwtU03GzJM0DHZtidRGpC6g69rSGm3I0="}, + "sr": {"b"}, + "sp": {"r"}, + "se": {"0001-01-01T00:00:00Z"}, + }.Encode()} + + u, err := cli.GetBlobSASURI("container", "name", expiry, "r") + c.Assert(err, chk.IsNil) + sasParts, err := url.Parse(u) + c.Assert(err, chk.IsNil) + c.Assert(expectedParts.String(), chk.Equals, sasParts.String()) + c.Assert(expectedParts.Query(), chk.DeepEquals, sasParts.Query()) +} + +func (s *StorageBlobSuite) TestBlobSASURICorrectness(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + blob := randString(20) + body := []byte(randString(100)) + expiry := time.Now().UTC().Add(time.Hour) + permissions := "r" + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.DeleteContainer(cnt) + + c.Assert(cli.putSingleBlockBlob(cnt, blob, body), chk.IsNil) + + sasURI, err := cli.GetBlobSASURI(cnt, blob, expiry, permissions) + c.Assert(err, chk.IsNil) + + resp, err := http.Get(sasURI) + c.Assert(err, chk.IsNil) + + blobResp, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + c.Assert(err, chk.IsNil) + + c.Assert(resp.StatusCode, chk.Equals, http.StatusOK) + c.Assert(len(blobResp), chk.Equals, len(body)) +} + +func (s *StorageBlobSuite) TestListContainersPagination(c *chk.C) { + cli := getBlobClient(c) + c.Assert(deleteTestContainers(cli), chk.IsNil) + + const n = 5 + const pageSize = 2 + + // Create test containers + created := []string{} + for i := 0; i < n; i++ { + name := randContainer() + c.Assert(cli.CreateContainer(name, ContainerAccessTypePrivate), chk.IsNil) + created = append(created, name) + } + sort.Strings(created) + + // Defer test container deletions + defer func() { + var wg sync.WaitGroup + for _, cnt := range created { + wg.Add(1) + go func(name string) { + c.Assert(cli.DeleteContainer(name), chk.IsNil) + wg.Done() + }(cnt) + } + wg.Wait() + }() + + // Paginate results + seen := []string{} + marker := "" + for { + resp, err := cli.ListContainers(ListContainersParameters{ + Prefix: testContainerPrefix, + MaxResults: pageSize, + Marker: marker}) + c.Assert(err, chk.IsNil) + + containers := resp.Containers + if len(containers) > pageSize { + c.Fatalf("Got a bigger page. Expected: %d, got: %d", pageSize, len(containers)) + } + + for _, c := range containers { + seen = append(seen, c.Name) + } + + marker = resp.NextMarker + if marker == "" || len(containers) == 0 { + break + } + } + + c.Assert(seen, chk.DeepEquals, created) +} + +func (s *StorageBlobSuite) TestContainerExists(c *chk.C) { + cnt := randContainer() + cli := getBlobClient(c) + ok, err := cli.ContainerExists(cnt) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) + defer cli.DeleteContainer(cnt) + + ok, err = cli.ContainerExists(cnt) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageBlobSuite) TestCreateContainerDeleteContainer(c *chk.C) { + cnt := randContainer() + cli := getBlobClient(c) + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + c.Assert(cli.DeleteContainer(cnt), chk.IsNil) +} + +func (s *StorageBlobSuite) TestCreateContainerIfNotExists(c *chk.C) { + cnt := randContainer() + cli := getBlobClient(c) + defer cli.DeleteContainer(cnt) + + // First create + ok, err := cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) + + // Second create, should not give errors + ok, err = cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) +} + +func (s *StorageBlobSuite) TestDeleteContainerIfExists(c *chk.C) { + cnt := randContainer() + cli := getBlobClient(c) + + // Nonexisting container + c.Assert(cli.DeleteContainer(cnt), chk.NotNil) + + ok, err := cli.DeleteContainerIfExists(cnt) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + // Existing container + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + ok, err = cli.DeleteContainerIfExists(cnt) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageBlobSuite) TestBlobExists(c *chk.C) { + cnt := randContainer() + blob := randString(20) + cli := getBlobClient(c) + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) + defer cli.DeleteContainer(cnt) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte("Hello!")), chk.IsNil) + defer cli.DeleteBlob(cnt, blob) + + ok, err := cli.BlobExists(cnt, blob+".foo") + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + ok, err = cli.BlobExists(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageBlobSuite) TestGetBlobURL(c *chk.C) { + api, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + cli := api.GetBlobService() + + c.Assert(cli.GetBlobURL("c", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/c/nested/blob") + c.Assert(cli.GetBlobURL("", "blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/blob") + c.Assert(cli.GetBlobURL("", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/nested/blob") +} + +func (s *StorageBlobSuite) TestBlobCopy(c *chk.C) { + if testing.Short() { + c.Skip("skipping blob copy in short mode, no SLA on async operation") + } + + cli := getBlobClient(c) + cnt := randContainer() + src := randString(20) + dst := randString(20) + body := []byte(randString(1024)) + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + c.Assert(cli.putSingleBlockBlob(cnt, src, body), chk.IsNil) + defer cli.DeleteBlob(cnt, src) + + c.Assert(cli.CopyBlob(cnt, dst, cli.GetBlobURL(cnt, src)), chk.IsNil) + defer cli.DeleteBlob(cnt, dst) + + blobBody, err := cli.GetBlob(cnt, dst) + c.Assert(err, chk.IsNil) + + b, err := ioutil.ReadAll(blobBody) + defer blobBody.Close() + c.Assert(err, chk.IsNil) + c.Assert(b, chk.DeepEquals, body) +} + +func (s *StorageBlobSuite) TestDeleteBlobIfExists(c *chk.C) { + cnt := randContainer() + blob := randString(20) + + cli := getBlobClient(c) + c.Assert(cli.DeleteBlob(cnt, blob), chk.NotNil) + + ok, err := cli.DeleteBlobIfExists(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) +} + +func (s *StorageBlobSuite) TestGetBlobProperties(c *chk.C) { + cnt := randContainer() + blob := randString(20) + contents := randString(64) + + cli := getBlobClient(c) + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.DeleteContainer(cnt) + + // Nonexisting blob + _, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.NotNil) + + // Put the blob + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(contents)), chk.IsNil) + + // Get blob properties + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + + c.Assert(props.ContentLength, chk.Equals, int64(len(contents))) + c.Assert(props.BlobType, chk.Equals, BlobTypeBlock) +} + +func (s *StorageBlobSuite) TestListBlobsPagination(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.DeleteContainer(cnt) + + blobs := []string{} + const n = 5 + const pageSize = 2 + for i := 0; i < n; i++ { + name := randString(20) + c.Assert(cli.putSingleBlockBlob(cnt, name, []byte("Hello, world!")), chk.IsNil) + blobs = append(blobs, name) + } + sort.Strings(blobs) + + // Paginate + seen := []string{} + marker := "" + for { + resp, err := cli.ListBlobs(cnt, ListBlobsParameters{ + MaxResults: pageSize, + Marker: marker}) + c.Assert(err, chk.IsNil) + + for _, v := range resp.Blobs { + seen = append(seen, v.Name) + } + + marker = resp.NextMarker + if marker == "" || len(resp.Blobs) == 0 { + break + } + } + + // Compare + c.Assert(seen, chk.DeepEquals, blobs) +} + +func (s *StorageBlobSuite) TestGetAndSetMetadata(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + m, err := cli.GetBlobMetadata(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(m, chk.Not(chk.Equals), nil) + c.Assert(len(m), chk.Equals, 0) + + mPut := map[string]string{ + "foo": "bar", + "bar_baz": "waz qux", + } + + err = cli.SetBlobMetadata(cnt, blob, mPut) + c.Assert(err, chk.IsNil) + + m, err = cli.GetBlobMetadata(cnt, blob) + c.Assert(err, chk.IsNil) + c.Check(m, chk.DeepEquals, mPut) + + // Case munging + + mPutUpper := map[string]string{ + "Foo": "different bar", + "bar_BAZ": "different waz qux", + } + mExpectLower := map[string]string{ + "foo": "different bar", + "bar_baz": "different waz qux", + } + + err = cli.SetBlobMetadata(cnt, blob, mPutUpper) + c.Assert(err, chk.IsNil) + + m, err = cli.GetBlobMetadata(cnt, blob) + c.Assert(err, chk.IsNil) + c.Check(m, chk.DeepEquals, mExpectLower) +} + +func (s *StorageBlobSuite) TestPutEmptyBlockBlob(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil) + + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(props.ContentLength, chk.Not(chk.Equals), 0) +} + +func (s *StorageBlobSuite) TestGetBlobRange(c *chk.C) { + cnt := randContainer() + blob := randString(20) + body := "0123456789" + + cli := getBlobClient(c) + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil) + defer cli.DeleteContainer(cnt) + + c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(body)), chk.IsNil) + defer cli.DeleteBlob(cnt, blob) + + // Read 1-3 + for _, r := range []struct { + rangeStr string + expected string + }{ + {"0-", body}, + {"1-3", body[1 : 3+1]}, + {"3-", body[3:]}, + } { + resp, err := cli.GetBlobRange(cnt, blob, r.rangeStr) + c.Assert(err, chk.IsNil) + blobBody, err := ioutil.ReadAll(resp) + c.Assert(err, chk.IsNil) + + str := string(blobBody) + c.Assert(str, chk.Equals, r.expected) + } +} + +func (s *StorageBlobSuite) TestCreateBlockBlobFromReader(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + name := randString(20) + data := randBytes(8888) + c.Assert(cli.CreateBlockBlobFromReader(cnt, name, uint64(len(data)), bytes.NewReader(data)), chk.IsNil) + + body, err := cli.GetBlob(cnt, name) + c.Assert(err, chk.IsNil) + gotData, err := ioutil.ReadAll(body) + body.Close() + + c.Assert(err, chk.IsNil) + c.Assert(gotData, chk.DeepEquals, data) +} + +func (s *StorageBlobSuite) TestCreateBlockBlobFromReaderWithShortData(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + name := randString(20) + data := randBytes(8888) + err := cli.CreateBlockBlobFromReader(cnt, name, 9999, bytes.NewReader(data)) + c.Assert(err, chk.Not(chk.IsNil)) + + _, err = cli.GetBlob(cnt, name) + // Upload was incomplete: blob should not have been created. + c.Assert(err, chk.Not(chk.IsNil)) +} + +func (s *StorageBlobSuite) TestPutBlock(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + chunk := []byte(randString(1024)) + blockID := base64.StdEncoding.EncodeToString([]byte("foo")) + c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil) +} + +func (s *StorageBlobSuite) TestGetBlockList_PutBlockList(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + chunk := []byte(randString(1024)) + blockID := base64.StdEncoding.EncodeToString([]byte("foo")) + + // Put one block + c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil) + defer cli.deleteBlob(cnt, blob) + + // Get committed blocks + committed, err := cli.GetBlockList(cnt, blob, BlockListTypeCommitted) + c.Assert(err, chk.IsNil) + + if len(committed.CommittedBlocks) > 0 { + c.Fatal("There are committed blocks") + } + + // Get uncommitted blocks + uncommitted, err := cli.GetBlockList(cnt, blob, BlockListTypeUncommitted) + c.Assert(err, chk.IsNil) + + c.Assert(len(uncommitted.UncommittedBlocks), chk.Equals, 1) + // Commit block list + c.Assert(cli.PutBlockList(cnt, blob, []Block{{blockID, BlockStatusUncommitted}}), chk.IsNil) + + // Get all blocks + all, err := cli.GetBlockList(cnt, blob, BlockListTypeAll) + c.Assert(err, chk.IsNil) + c.Assert(len(all.CommittedBlocks), chk.Equals, 1) + c.Assert(len(all.UncommittedBlocks), chk.Equals, 0) + + // Verify the block + thatBlock := all.CommittedBlocks[0] + c.Assert(thatBlock.Name, chk.Equals, blockID) + c.Assert(thatBlock.Size, chk.Equals, int64(len(chunk))) +} + +func (s *StorageBlobSuite) TestCreateBlockBlob(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + c.Assert(cli.CreateBlockBlob(cnt, blob), chk.IsNil) + + // Verify + blocks, err := cli.GetBlockList(cnt, blob, BlockListTypeAll) + c.Assert(err, chk.IsNil) + c.Assert(len(blocks.CommittedBlocks), chk.Equals, 0) + c.Assert(len(blocks.UncommittedBlocks), chk.Equals, 0) +} + +func (s *StorageBlobSuite) TestPutPageBlob(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + size := int64(10 * 1024 * 1024) + c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil) + + // Verify + props, err := cli.GetBlobProperties(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(props.ContentLength, chk.Equals, size) + c.Assert(props.BlobType, chk.Equals, BlobTypePage) +} + +func (s *StorageBlobSuite) TestPutPagesUpdate(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + size := int64(10 * 1024 * 1024) // larger than we'll use + c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil) + + chunk1 := []byte(randString(1024)) + chunk2 := []byte(randString(512)) + + // Append chunks + c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk1)-1), PageWriteTypeUpdate, chunk1), chk.IsNil) + c.Assert(cli.PutPage(cnt, blob, int64(len(chunk1)), int64(len(chunk1)+len(chunk2)-1), PageWriteTypeUpdate, chunk2), chk.IsNil) + + // Verify contents + out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1)) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err := ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...)) + out.Close() + + // Overwrite first half of chunk1 + chunk0 := []byte(randString(512)) + c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk0)-1), PageWriteTypeUpdate, chunk0), chk.IsNil) + + // Verify contents + out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1)) + c.Assert(err, chk.IsNil) + defer out.Close() + blobContents, err = ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + c.Assert(blobContents, chk.DeepEquals, append(append(chunk0, chunk1[512:]...), chunk2...)) +} + +func (s *StorageBlobSuite) TestPutPagesClear(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + size := int64(10 * 1024 * 1024) // larger than we'll use + c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil) + + // Put 0-2047 + chunk := []byte(randString(2048)) + c.Assert(cli.PutPage(cnt, blob, 0, 2047, PageWriteTypeUpdate, chunk), chk.IsNil) + + // Clear 512-1023 + c.Assert(cli.PutPage(cnt, blob, 512, 1023, PageWriteTypeClear, nil), chk.IsNil) + + // Verify contents + out, err := cli.GetBlobRange(cnt, blob, "0-2047") + c.Assert(err, chk.IsNil) + contents, err := ioutil.ReadAll(out) + c.Assert(err, chk.IsNil) + defer out.Close() + c.Assert(contents, chk.DeepEquals, append(append(chunk[:512], make([]byte, 512)...), chunk[1024:]...)) +} + +func (s *StorageBlobSuite) TestGetPageRanges(c *chk.C) { + cli := getBlobClient(c) + cnt := randContainer() + c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil) + defer cli.deleteContainer(cnt) + + blob := randString(20) + size := int64(10 * 1024 * 1024) // larger than we'll use + c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil) + + // Get page ranges on empty blob + out, err := cli.GetPageRanges(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(len(out.PageList), chk.Equals, 0) + + // Add 0-512 page + c.Assert(cli.PutPage(cnt, blob, 0, 511, PageWriteTypeUpdate, []byte(randString(512))), chk.IsNil) + + out, err = cli.GetPageRanges(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(len(out.PageList), chk.Equals, 1) + + // Add 1024-2048 + c.Assert(cli.PutPage(cnt, blob, 1024, 2047, PageWriteTypeUpdate, []byte(randString(1024))), chk.IsNil) + + out, err = cli.GetPageRanges(cnt, blob) + c.Assert(err, chk.IsNil) + c.Assert(len(out.PageList), chk.Equals, 2) +} + +func deleteTestContainers(cli BlobStorageClient) error { + for { + resp, err := cli.ListContainers(ListContainersParameters{Prefix: testContainerPrefix}) + if err != nil { + return err + } + if len(resp.Containers) == 0 { + break + } + for _, c := range resp.Containers { + err = cli.DeleteContainer(c.Name) + if err != nil { + return err + } + } + } + return nil +} + +func (b BlobStorageClient) putSingleBlockBlob(container, name string, chunk []byte) error { + if len(chunk) > MaxBlobBlockSize { + return fmt.Errorf("storage: provided chunk (%d bytes) cannot fit into single-block blob (max %d bytes)", len(chunk), MaxBlobBlockSize) + } + + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) + headers := b.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) + + resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk)) + if err != nil { + return err + } + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +func randContainer() string { + return testContainerPrefix + randString(32-len(testContainerPrefix)) +} + +func randString(n int) string { + if n <= 0 { + panic("negative number") + } + const alphanum = "0123456789abcdefghijklmnopqrstuvwxyz" + var bytes = make([]byte, n) + rand.Read(bytes) + for i, b := range bytes { + bytes[i] = alphanum[b%byte(len(alphanum))] + } + return string(bytes) +} + +func randBytes(n int) []byte { + data := make([]byte, n) + if _, err := io.ReadFull(rand.Reader, data); err != nil { + panic(err) + } + return data +} === added file 'src/github.com/Azure/azure-sdk-for-go/storage/client.go' --- src/github.com/Azure/azure-sdk-for-go/storage/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/storage/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,386 @@ +// Package storage provides clients for Microsoft Azure Storage Services. +package storage + +import ( + "bytes" + "encoding/base64" + "encoding/xml" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "sort" + "strconv" + "strings" +) + +const ( + // DefaultBaseURL is the domain name used for storage requests when a + // default client is created. + DefaultBaseURL = "core.windows.net" + + // DefaultAPIVersion is the Azure Storage API version string used when a + // basic client is created. + DefaultAPIVersion = "2014-02-14" + + defaultUseHTTPS = true + + blobServiceName = "blob" + tableServiceName = "table" + queueServiceName = "queue" + fileServiceName = "file" +) + +// Client is the object that needs to be constructed to perform +// operations on the storage account. +type Client struct { + accountName string + accountKey []byte + useHTTPS bool + baseURL string + apiVersion string +} + +type storageResponse struct { + statusCode int + headers http.Header + body io.ReadCloser +} + +// AzureStorageServiceError contains fields of the error response from +// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx +// Some fields might be specific to certain calls. +type AzureStorageServiceError struct { + Code string `xml:"Code"` + Message string `xml:"Message"` + AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"` + QueryParameterName string `xml:"QueryParameterName"` + QueryParameterValue string `xml:"QueryParameterValue"` + Reason string `xml:"Reason"` + StatusCode int + RequestID string +} + +// UnexpectedStatusCodeError is returned when a storage service responds with neither an error +// nor with an HTTP status code indicating success. +type UnexpectedStatusCodeError struct { + allowed []int + got int +} + +func (e UnexpectedStatusCodeError) Error() string { + s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } + + got := s(e.got) + expected := []string{} + for _, v := range e.allowed { + expected = append(expected, s(v)) + } + return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or ")) +} + +// Got is the actual status code returned by Azure. +func (e UnexpectedStatusCodeError) Got() int { + return e.got +} + +// NewBasicClient constructs a Client with given storage service name and +// key. +func NewBasicClient(accountName, accountKey string) (Client, error) { + return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) +} + +// NewClient constructs a Client. This should be used if the caller wants +// to specify whether to use HTTPS, a specific REST API version or a custom +// storage endpoint than Azure Public Cloud. +func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { + var c Client + if accountName == "" { + return c, fmt.Errorf("azure: account name required") + } else if accountKey == "" { + return c, fmt.Errorf("azure: account key required") + } else if blobServiceBaseURL == "" { + return c, fmt.Errorf("azure: base storage service url required") + } + + key, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return c, err + } + + return Client{ + accountName: accountName, + accountKey: key, + useHTTPS: useHTTPS, + baseURL: blobServiceBaseURL, + apiVersion: apiVersion, + }, nil +} + +func (c Client) getBaseURL(service string) string { + scheme := "http" + if c.useHTTPS { + scheme = "https" + } + + host := fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) + + u := &url.URL{ + Scheme: scheme, + Host: host} + return u.String() +} + +func (c Client) getEndpoint(service, path string, params url.Values) string { + u, err := url.Parse(c.getBaseURL(service)) + if err != nil { + // really should not be happening + panic(err) + } + + if path == "" { + path = "/" // API doesn't accept path segments not starting with '/' + } + + u.Path = path + u.RawQuery = params.Encode() + return u.String() +} + +// GetBlobService returns a BlobStorageClient which can operate on the blob +// service of the storage account. +func (c Client) GetBlobService() BlobStorageClient { + return BlobStorageClient{c} +} + +// GetQueueService returns a QueueServiceClient which can operate on the queue +// service of the storage account. +func (c Client) GetQueueService() QueueServiceClient { + return QueueServiceClient{c} +} + +// GetFileService returns a FileServiceClient which can operate on the file +// service of the storage account. +func (c Client) GetFileService() FileServiceClient { + return FileServiceClient{c} +} + +func (c Client) createAuthorizationHeader(canonicalizedString string) string { + signature := c.computeHmac256(canonicalizedString) + return fmt.Sprintf("%s %s:%s", "SharedKey", c.accountName, signature) +} + +func (c Client) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) { + canonicalizedResource, err := c.buildCanonicalizedResource(url) + if err != nil { + return "", err + } + + canonicalizedString := c.buildCanonicalizedString(verb, headers, canonicalizedResource) + return c.createAuthorizationHeader(canonicalizedString), nil +} + +func (c Client) getStandardHeaders() map[string]string { + return map[string]string{ + "x-ms-version": c.apiVersion, + "x-ms-date": currentTimeRfc1123Formatted(), + } +} + +func (c Client) buildCanonicalizedHeader(headers map[string]string) string { + cm := make(map[string]string) + + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + match, _ := regexp.MatchString("x-ms-", headerName) + if match { + cm[headerName] = v + } + } + + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := "" + + for i, key := range keys { + if i == len(keys)-1 { + ch += fmt.Sprintf("%s:%s", key, cm[key]) + } else { + ch += fmt.Sprintf("%s:%s\n", key, cm[key]) + } + } + return ch +} + +func (c Client) buildCanonicalizedResource(uri string) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := "/" + c.accountName + if len(u.Path) > 0 { + cr += u.Path + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + if len(params) > 0 { + cr += "\n" + keys := make([]string, 0, len(params)) + for key := range params { + keys = append(keys, key) + } + + sort.Strings(keys) + + for i, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + if i == len(keys)-1 { + cr += fmt.Sprintf("%s:%s", key, strings.Join(params[key], ",")) + } else { + cr += fmt.Sprintf("%s:%s\n", key, strings.Join(params[key], ",")) + } + } + } + return cr, nil +} + +func (c Client) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string { + canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", + verb, + headers["Content-Encoding"], + headers["Content-Language"], + headers["Content-Length"], + headers["Content-MD5"], + headers["Content-Type"], + headers["Date"], + headers["If-Modified-Since"], + headers["If-Match"], + headers["If-None-Match"], + headers["If-Unmodified-Since"], + headers["Range"], + c.buildCanonicalizedHeader(headers), + canonicalizedResource) + + return canonicalizedString +} + +func (c Client) exec(verb, url string, headers map[string]string, body io.Reader) (*storageResponse, error) { + authHeader, err := c.getAuthorizationHeader(verb, url, headers) + if err != nil { + return nil, err + } + headers["Authorization"] = authHeader + + if err != nil { + return nil, err + } + + req, err := http.NewRequest(verb, url, body) + if err != nil { + return nil, errors.New("azure/storage: error creating request: " + err.Error()) + } + if clstr, ok := headers["Content-Length"]; ok { + // content length header is being signed, but completely ignored by golang. + // instead we have to use the ContentLength property on the request struct + // (see https://golang.org/src/net/http/request.go?s=18140:18370#L536 and + // https://golang.org/src/net/http/transfer.go?s=1739:2467#L49) + req.ContentLength, err = strconv.ParseInt(clstr, 10, 64) + if err != nil { + return nil, err + } + } + for k, v := range headers { + req.Header.Add(k, v) + } + httpClient := http.Client{} + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + + statusCode := resp.StatusCode + if statusCode >= 400 && statusCode <= 505 { + var respBody []byte + respBody, err = readResponseBody(resp) + if err != nil { + return nil, err + } + + if len(respBody) == 0 { + // no error in response body + err = fmt.Errorf("storage: service returned without a response body (%s)", resp.Status) + } else { + // response contains storage service error object, unmarshal + storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, resp.Header.Get("x-ms-request-id")) + if err != nil { // error unmarshaling the error response + err = errIn + } + err = storageErr + } + return &storageResponse{ + statusCode: resp.StatusCode, + headers: resp.Header, + body: ioutil.NopCloser(bytes.NewReader(respBody)), /* restore the body */ + }, err + } + + return &storageResponse{ + statusCode: resp.StatusCode, + headers: resp.Header, + body: resp.Body}, nil +} + +func readResponseBody(resp *http.Response) ([]byte, error) { + defer resp.Body.Close() + out, err := ioutil.ReadAll(resp.Body) + if err == io.EOF { + err = nil + } + return out, err +} + +func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) { + var storageErr AzureStorageServiceError + if err := xml.Unmarshal(body, &storageErr); err != nil { + return storageErr, err + } + storageErr.StatusCode = statusCode + storageErr.RequestID = requestID + return storageErr, nil +} + +func (e AzureStorageServiceError) Error() string { + return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s", e.StatusCode, e.Code, e.Message, e.RequestID) +} + +// checkRespCode returns UnexpectedStatusError if the given response code is not +// one of the allowed status codes; otherwise nil. +func checkRespCode(respCode int, allowed []int) error { + for _, v := range allowed { + if respCode == v { + return nil + } + } + return UnexpectedStatusCodeError{allowed, respCode} +} === added file 'src/github.com/Azure/azure-sdk-for-go/storage/client_test.go' --- src/github.com/Azure/azure-sdk-for-go/storage/client_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/storage/client_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,156 @@ +package storage + +import ( + "encoding/base64" + "net/url" + "os" + "testing" + + chk "gopkg.in/check.v1" +) + +// Hook up gocheck to testing +func Test(t *testing.T) { chk.TestingT(t) } + +type StorageClientSuite struct{} + +var _ = chk.Suite(&StorageClientSuite{}) + +// getBasicClient returns a test client from storage credentials in the env +func getBasicClient(c *chk.C) Client { + name := os.Getenv("ACCOUNT_NAME") + if name == "" { + c.Fatal("ACCOUNT_NAME not set, need an empty storage account to test") + } + key := os.Getenv("ACCOUNT_KEY") + if key == "" { + c.Fatal("ACCOUNT_KEY not set") + } + cli, err := NewBasicClient(name, key) + c.Assert(err, chk.IsNil) + return cli +} + +func (s *StorageClientSuite) TestGetBaseURL_Basic_Https(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + c.Assert(cli.apiVersion, chk.Equals, DefaultAPIVersion) + c.Assert(err, chk.IsNil) + c.Assert(cli.getBaseURL("table"), chk.Equals, "https://foo.table.core.windows.net") +} + +func (s *StorageClientSuite) TestGetBaseURL_Custom_NoHttps(c *chk.C) { + apiVersion := "2015-01-01" // a non existing one + cli, err := NewClient("foo", "YmFy", "core.chinacloudapi.cn", apiVersion, false) + c.Assert(err, chk.IsNil) + c.Assert(cli.apiVersion, chk.Equals, apiVersion) + c.Assert(cli.getBaseURL("table"), chk.Equals, "http://foo.table.core.chinacloudapi.cn") +} + +func (s *StorageClientSuite) TestGetEndpoint_None(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + output := cli.getEndpoint(blobServiceName, "", url.Values{}) + c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/") +} + +func (s *StorageClientSuite) TestGetEndpoint_PathOnly(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + output := cli.getEndpoint(blobServiceName, "path", url.Values{}) + c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path") +} + +func (s *StorageClientSuite) TestGetEndpoint_ParamsOnly(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + params := url.Values{} + params.Set("a", "b") + params.Set("c", "d") + output := cli.getEndpoint(blobServiceName, "", params) + c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/?a=b&c=d") +} + +func (s *StorageClientSuite) TestGetEndpoint_Mixed(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + params := url.Values{} + params.Set("a", "b") + params.Set("c", "d") + output := cli.getEndpoint(blobServiceName, "path", params) + c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path?a=b&c=d") +} + +func (s *StorageClientSuite) Test_getStandardHeaders(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + + headers := cli.getStandardHeaders() + c.Assert(len(headers), chk.Equals, 2) + c.Assert(headers["x-ms-version"], chk.Equals, cli.apiVersion) + if _, ok := headers["x-ms-date"]; !ok { + c.Fatal("Missing date header") + } +} + +func (s *StorageClientSuite) Test_buildCanonicalizedResource(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + + type test struct{ url, expected string } + tests := []test{ + {"https://foo.blob.core.windows.net/path?a=b&c=d", "/foo/path\na:b\nc:d"}, + {"https://foo.blob.core.windows.net/?comp=list", "/foo/\ncomp:list"}, + {"https://foo.blob.core.windows.net/cnt/blob", "/foo/cnt/blob"}, + } + + for _, i := range tests { + out, err := cli.buildCanonicalizedResource(i.url) + c.Assert(err, chk.IsNil) + c.Assert(out, chk.Equals, i.expected) + } +} + +func (s *StorageClientSuite) Test_buildCanonicalizedHeader(c *chk.C) { + cli, err := NewBasicClient("foo", "YmFy") + c.Assert(err, chk.IsNil) + + type test struct { + headers map[string]string + expected string + } + tests := []test{ + {map[string]string{}, ""}, + {map[string]string{"x-ms-foo": "bar"}, "x-ms-foo:bar"}, + {map[string]string{"foo:": "bar"}, ""}, + {map[string]string{"foo:": "bar", "x-ms-foo": "bar"}, "x-ms-foo:bar"}, + {map[string]string{ + "x-ms-version": "9999-99-99", + "x-ms-blob-type": "BlockBlob"}, "x-ms-blob-type:BlockBlob\nx-ms-version:9999-99-99"}} + + for _, i := range tests { + c.Assert(cli.buildCanonicalizedHeader(i.headers), chk.Equals, i.expected) + } +} + +func (s *StorageClientSuite) TestReturnsStorageServiceError(c *chk.C) { + // attempt to delete a nonexisting container + _, err := getBlobClient(c).deleteContainer(randContainer()) + c.Assert(err, chk.NotNil) + + v, ok := err.(AzureStorageServiceError) + c.Check(ok, chk.Equals, true) + c.Assert(v.StatusCode, chk.Equals, 404) + c.Assert(v.Code, chk.Equals, "ContainerNotFound") + c.Assert(v.Code, chk.Not(chk.Equals), "") +} + +func (s *StorageClientSuite) Test_createAuthorizationHeader(c *chk.C) { + key := base64.StdEncoding.EncodeToString([]byte("bar")) + cli, err := NewBasicClient("foo", key) + c.Assert(err, chk.IsNil) + + canonicalizedString := `foobarzoo` + expected := `SharedKey foo:h5U0ATVX6SpbFX1H6GNuxIMeXXCILLoIvhflPtuQZ30=` + c.Assert(cli.createAuthorizationHeader(canonicalizedString), chk.Equals, expected) +} === added file 'src/github.com/Azure/azure-sdk-for-go/storage/file.go' --- src/github.com/Azure/azure-sdk-for-go/storage/file.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/storage/file.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,92 @@ +package storage + +import ( + "fmt" + "net/http" + "net/url" +) + +// FileServiceClient contains operations for Microsoft Azure File Service. +type FileServiceClient struct { + client Client +} + +// pathForFileShare returns the URL path segment for a File Share resource +func pathForFileShare(name string) string { + return fmt.Sprintf("/%s", name) +} + +// CreateShare operation creates a new share under the specified account. If the +// share with the same name already exists, the operation fails. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx +func (f FileServiceClient) CreateShare(name string) error { + resp, err := f.createShare(name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// CreateShareIfNotExists creates a new share under the specified account if +// it does not exist. Returns true if container is newly created or false if +// container already exists. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn167008.aspx +func (f FileServiceClient) CreateShareIfNotExists(name string) (bool, error) { + resp, err := f.createShare(name) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict { + return resp.statusCode == http.StatusCreated, nil + } + } + return false, err +} + +// CreateShare creates a Azure File Share and returns its response +func (f FileServiceClient) createShare(name string) (*storageResponse, error) { + uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) + headers := f.client.getStandardHeaders() + headers["Content-Length"] = "0" + return f.client.exec("PUT", uri, headers, nil) +} + +// DeleteShare operation marks the specified share for deletion. The share +// and any files contained within it are later deleted during garbage +// collection. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx +func (f FileServiceClient) DeleteShare(name string) error { + resp, err := f.deleteShare(name) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusAccepted}) +} + +// DeleteShareIfExists operation marks the specified share for deletion if it +// exists. The share and any files contained within it are later deleted during +// garbage collection. Returns true if share existed and deleted with this call, +// false otherwise. +// +// See https://msdn.microsoft.com/en-us/library/azure/dn689090.aspx +func (f FileServiceClient) DeleteShareIfExists(name string) (bool, error) { + resp, err := f.deleteShare(name) + if resp != nil { + defer resp.body.Close() + if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound { + return resp.statusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// deleteShare makes the call to Delete Share operation endpoint and returns +// the response +func (f FileServiceClient) deleteShare(name string) (*storageResponse, error) { + uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) + return f.client.exec("DELETE", uri, f.client.getStandardHeaders(), nil) +} === added file 'src/github.com/Azure/azure-sdk-for-go/storage/file_test.go' --- src/github.com/Azure/azure-sdk-for-go/storage/file_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/storage/file_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,63 @@ +package storage + +import ( + chk "gopkg.in/check.v1" +) + +type StorageFileSuite struct{} + +var _ = chk.Suite(&StorageFileSuite{}) + +func getFileClient(c *chk.C) FileServiceClient { + return getBasicClient(c).GetFileService() +} + +func (s *StorageFileSuite) Test_pathForFileShare(c *chk.C) { + c.Assert(pathForFileShare("foo"), chk.Equals, "/foo") +} + +func (s *StorageFileSuite) TestCreateShareDeleteShare(c *chk.C) { + cli := getFileClient(c) + name := randShare() + c.Assert(cli.CreateShare(name), chk.IsNil) + c.Assert(cli.DeleteShare(name), chk.IsNil) +} + +func (s *StorageFileSuite) TestCreateShareIfNotExists(c *chk.C) { + cli := getFileClient(c) + name := randShare() + defer cli.DeleteShare(name) + + // First create + ok, err := cli.CreateShareIfNotExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) + + // Second create, should not give errors + ok, err = cli.CreateShareIfNotExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) +} + +func (s *StorageFileSuite) TestDeleteShareIfNotExists(c *chk.C) { + cli := getFileClient(c) + name := randShare() + + // delete non-existing share + ok, err := cli.DeleteShareIfExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + c.Assert(cli.CreateShare(name), chk.IsNil) + + // delete existing share + ok, err = cli.DeleteShareIfExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +const testSharePrefix = "zzzzztest" + +func randShare() string { + return testSharePrefix + randString(32-len(testSharePrefix)) +} === added file 'src/github.com/Azure/azure-sdk-for-go/storage/queue.go' --- src/github.com/Azure/azure-sdk-for-go/storage/queue.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/storage/queue.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,308 @@ +package storage + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" +) + +const ( + // casing is per Golang's http.Header canonicalizing the header names. + approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" + userDefinedMetadataHeaderPrefix = "X-Ms-Meta-" +) + +// QueueServiceClient contains operations for Microsoft Azure Queue Storage +// Service. +type QueueServiceClient struct { + client Client +} + +func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) } +func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) } +func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) } + +type putMessageRequest struct { + XMLName xml.Name `xml:"QueueMessage"` + MessageText string `xml:"MessageText"` +} + +// PutMessageParameters is the set of options can be specified for Put Messsage +// operation. A zero struct does not use any preferences for the request. +type PutMessageParameters struct { + VisibilityTimeout int + MessageTTL int +} + +func (p PutMessageParameters) getParameters() url.Values { + out := url.Values{} + if p.VisibilityTimeout != 0 { + out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) + } + if p.MessageTTL != 0 { + out.Set("messagettl", strconv.Itoa(p.MessageTTL)) + } + return out +} + +// GetMessagesParameters is the set of options can be specified for Get +// Messsages operation. A zero struct does not use any preferences for the +// request. +type GetMessagesParameters struct { + NumOfMessages int + VisibilityTimeout int +} + +func (p GetMessagesParameters) getParameters() url.Values { + out := url.Values{} + if p.NumOfMessages != 0 { + out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) + } + if p.VisibilityTimeout != 0 { + out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout)) + } + return out +} + +// PeekMessagesParameters is the set of options can be specified for Peek +// Messsage operation. A zero struct does not use any preferences for the +// request. +type PeekMessagesParameters struct { + NumOfMessages int +} + +func (p PeekMessagesParameters) getParameters() url.Values { + out := url.Values{"peekonly": {"true"}} // Required for peek operation + if p.NumOfMessages != 0 { + out.Set("numofmessages", strconv.Itoa(p.NumOfMessages)) + } + return out +} + +// GetMessagesResponse represents a response returned from Get Messages +// operation. +type GetMessagesResponse struct { + XMLName xml.Name `xml:"QueueMessagesList"` + QueueMessagesList []GetMessageResponse `xml:"QueueMessage"` +} + +// GetMessageResponse represents a QueueMessage object returned from Get +// Messages operation response. +type GetMessageResponse struct { + MessageID string `xml:"MessageId"` + InsertionTime string `xml:"InsertionTime"` + ExpirationTime string `xml:"ExpirationTime"` + PopReceipt string `xml:"PopReceipt"` + TimeNextVisible string `xml:"TimeNextVisible"` + DequeueCount int `xml:"DequeueCount"` + MessageText string `xml:"MessageText"` +} + +// PeekMessagesResponse represents a response returned from Get Messages +// operation. +type PeekMessagesResponse struct { + XMLName xml.Name `xml:"QueueMessagesList"` + QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"` +} + +// PeekMessageResponse represents a QueueMessage object returned from Peek +// Messages operation response. +type PeekMessageResponse struct { + MessageID string `xml:"MessageId"` + InsertionTime string `xml:"InsertionTime"` + ExpirationTime string `xml:"ExpirationTime"` + DequeueCount int `xml:"DequeueCount"` + MessageText string `xml:"MessageText"` +} + +// QueueMetadataResponse represents user defined metadata and queue +// properties on a specific queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx +type QueueMetadataResponse struct { + ApproximateMessageCount int + UserDefinedMetadata map[string]string +} + +// SetMetadata operation sets user-defined metadata on the specified queue. +// Metadata is associated with the queue as name-value pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179348.aspx +func (c QueueServiceClient) SetMetadata(name string, metadata map[string]string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) + headers := c.client.getStandardHeaders() + headers["Content-Length"] = "0" + for k, v := range metadata { + headers[userDefinedMetadataHeaderPrefix+k] = v + } + + resp, err := c.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// GetMetadata operation retrieves user-defined metadata and queue +// properties on the specified queue. Metadata is associated with +// the queue as name-values pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179384.aspx +// +// Because the way Golang's http client (and http.Header in particular) +// canonicalize header names, the returned metadata names would always +// be all lower case. +func (c QueueServiceClient) GetMetadata(name string) (QueueMetadataResponse, error) { + qm := QueueMetadataResponse{} + qm.UserDefinedMetadata = make(map[string]string) + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": []string{"metadata"}}) + headers := c.client.getStandardHeaders() + resp, err := c.client.exec("GET", uri, headers, nil) + if err != nil { + return qm, err + } + defer resp.body.Close() + + for k, v := range resp.headers { + if len(v) != 1 { + return qm, fmt.Errorf("Unexpected number of values (%d) in response header '%s'", len(v), k) + } + + value := v[0] + + if k == approximateMessagesCountHeader { + qm.ApproximateMessageCount, err = strconv.Atoi(value) + if err != nil { + return qm, fmt.Errorf("Unexpected value in response header '%s': '%s' ", k, value) + } + } else if strings.HasPrefix(k, userDefinedMetadataHeaderPrefix) { + name := strings.TrimPrefix(k, userDefinedMetadataHeaderPrefix) + qm.UserDefinedMetadata[strings.ToLower(name)] = value + } + } + + return qm, checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// CreateQueue operation creates a queue under the given account. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx +func (c QueueServiceClient) CreateQueue(name string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) + headers := c.client.getStandardHeaders() + headers["Content-Length"] = "0" + resp, err := c.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// DeleteQueue operation permanently deletes the specified queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx +func (c QueueServiceClient) DeleteQueue(name string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{}) + resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// QueueExists returns true if a queue with given name exists. +func (c QueueServiceClient) QueueExists(name string) (bool, error) { + uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}}) + resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) { + return resp.statusCode == http.StatusOK, nil + } + + return false, err +} + +// PutMessage operation adds a new message to the back of the message queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx +func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) + req := putMessageRequest{MessageText: message} + body, nn, err := xmlMarshal(req) + if err != nil { + return err + } + headers := c.client.getStandardHeaders() + headers["Content-Length"] = strconv.Itoa(nn) + resp, err := c.client.exec("POST", uri, headers, body) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusCreated}) +} + +// ClearMessages operation deletes all messages from the specified queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx +func (c QueueServiceClient) ClearMessages(queue string) error { + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{}) + resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} + +// GetMessages operation retrieves one or more messages from the front of the +// queue. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx +func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) { + var r GetMessagesResponse + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) + resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return r, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &r) + return r, err +} + +// PeekMessages retrieves one or more messages from the front of the queue, but +// does not alter the visibility of the message. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx +func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) { + var r PeekMessagesResponse + uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters()) + resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return r, err + } + defer resp.body.Close() + err = xmlUnmarshal(resp.body, &r) + return r, err +} + +// DeleteMessage operation deletes the specified message. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx +func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error { + uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{ + "popreceipt": {popReceipt}}) + resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil) + if err != nil { + return err + } + defer resp.body.Close() + return checkRespCode(resp.statusCode, []int{http.StatusNoContent}) +} === added file 'src/github.com/Azure/azure-sdk-for-go/storage/queue_test.go' --- src/github.com/Azure/azure-sdk-for-go/storage/queue_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/storage/queue_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,132 @@ +package storage + +import ( + "time" + + chk "gopkg.in/check.v1" +) + +type StorageQueueSuite struct{} + +var _ = chk.Suite(&StorageQueueSuite{}) + +func getQueueClient(c *chk.C) QueueServiceClient { + return getBasicClient(c).GetQueueService() +} + +func (s *StorageQueueSuite) Test_pathForQueue(c *chk.C) { + c.Assert(pathForQueue("q"), chk.Equals, "/q") +} + +func (s *StorageQueueSuite) Test_pathForQueueMessages(c *chk.C) { + c.Assert(pathForQueueMessages("q"), chk.Equals, "/q/messages") +} + +func (s *StorageQueueSuite) Test_pathForMessage(c *chk.C) { + c.Assert(pathForMessage("q", "m"), chk.Equals, "/q/messages/m") +} + +func (s *StorageQueueSuite) TestCreateQueue_DeleteQueue(c *chk.C) { + cli := getQueueClient(c) + name := randString(20) + c.Assert(cli.CreateQueue(name), chk.IsNil) + c.Assert(cli.DeleteQueue(name), chk.IsNil) +} + +func (s *StorageQueueSuite) Test_GetMetadata_GetApproximateCount(c *chk.C) { + cli := getQueueClient(c) + name := randString(20) + c.Assert(cli.CreateQueue(name), chk.IsNil) + defer cli.DeleteQueue(name) + + qm, err := cli.GetMetadata(name) + c.Assert(err, chk.IsNil) + c.Assert(qm.ApproximateMessageCount, chk.Equals, 0) + + for ix := 0; ix < 3; ix++ { + err = cli.PutMessage(name, "foobar", PutMessageParameters{}) + c.Assert(err, chk.IsNil) + } + time.Sleep(1 * time.Second) + + qm, err = cli.GetMetadata(name) + c.Assert(err, chk.IsNil) + c.Assert(qm.ApproximateMessageCount, chk.Equals, 3) +} + +func (s *StorageQueueSuite) Test_SetMetadataGetMetadata_Roundtrips(c *chk.C) { + cli := getQueueClient(c) + name := randString(20) + c.Assert(cli.CreateQueue(name), chk.IsNil) + defer cli.DeleteQueue(name) + + metadata := make(map[string]string) + metadata["Foo1"] = "bar1" + metadata["fooBaz"] = "bar" + err := cli.SetMetadata(name, metadata) + c.Assert(err, chk.IsNil) + + qm, err := cli.GetMetadata(name) + c.Assert(err, chk.IsNil) + c.Assert(qm.UserDefinedMetadata["foo1"], chk.Equals, "bar1") + c.Assert(qm.UserDefinedMetadata["foobaz"], chk.Equals, "bar") +} + +func (s *StorageQueueSuite) TestQueueExists(c *chk.C) { + cli := getQueueClient(c) + ok, err := cli.QueueExists("nonexistent-queue") + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, false) + + name := randString(20) + c.Assert(cli.CreateQueue(name), chk.IsNil) + defer cli.DeleteQueue(name) + + ok, err = cli.QueueExists(name) + c.Assert(err, chk.IsNil) + c.Assert(ok, chk.Equals, true) +} + +func (s *StorageQueueSuite) TestPostMessage_PeekMessage_DeleteMessage(c *chk.C) { + q := randString(20) + cli := getQueueClient(c) + c.Assert(cli.CreateQueue(q), chk.IsNil) + defer cli.DeleteQueue(q) + + msg := randString(64 * 1024) // exercise max length + c.Assert(cli.PutMessage(q, msg, PutMessageParameters{}), chk.IsNil) + r, err := cli.PeekMessages(q, PeekMessagesParameters{}) + c.Assert(err, chk.IsNil) + c.Assert(len(r.QueueMessagesList), chk.Equals, 1) + c.Assert(r.QueueMessagesList[0].MessageText, chk.Equals, msg) +} + +func (s *StorageQueueSuite) TestGetMessages(c *chk.C) { + q := randString(20) + cli := getQueueClient(c) + c.Assert(cli.CreateQueue(q), chk.IsNil) + defer cli.DeleteQueue(q) + + n := 4 + for i := 0; i < n; i++ { + c.Assert(cli.PutMessage(q, randString(10), PutMessageParameters{}), chk.IsNil) + } + + r, err := cli.GetMessages(q, GetMessagesParameters{NumOfMessages: n}) + c.Assert(err, chk.IsNil) + c.Assert(len(r.QueueMessagesList), chk.Equals, n) +} + +func (s *StorageQueueSuite) TestDeleteMessages(c *chk.C) { + q := randString(20) + cli := getQueueClient(c) + c.Assert(cli.CreateQueue(q), chk.IsNil) + defer cli.DeleteQueue(q) + + c.Assert(cli.PutMessage(q, "message", PutMessageParameters{}), chk.IsNil) + r, err := cli.GetMessages(q, GetMessagesParameters{VisibilityTimeout: 1}) + c.Assert(err, chk.IsNil) + c.Assert(len(r.QueueMessagesList), chk.Equals, 1) + m := r.QueueMessagesList[0] + c.Assert(cli.DeleteMessage(q, m.MessageID, m.PopReceipt), chk.IsNil) +} === added file 'src/github.com/Azure/azure-sdk-for-go/storage/util.go' --- src/github.com/Azure/azure-sdk-for-go/storage/util.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/storage/util.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,71 @@ +package storage + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" +) + +func (c Client) computeHmac256(message string) string { + h := hmac.New(sha256.New, c.accountKey) + h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func currentTimeRfc1123Formatted() string { + return timeRfc1123Formatted(time.Now().UTC()) +} + +func timeRfc1123Formatted(t time.Time) string { + return t.Format(http.TimeFormat) +} + +func mergeParams(v1, v2 url.Values) url.Values { + out := url.Values{} + for k, v := range v1 { + out[k] = v + } + for k, v := range v2 { + vals, ok := out[k] + if ok { + vals = append(vals, v...) + out[k] = vals + } else { + out[k] = v + } + } + return out +} + +func prepareBlockListRequest(blocks []Block) string { + s := `` + for _, v := range blocks { + s += fmt.Sprintf("<%s>%s", v.Status, v.ID, v.Status) + } + s += `` + return s +} + +func xmlUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return xml.Unmarshal(data, v) +} + +func xmlMarshal(v interface{}) (io.Reader, int, error) { + b, err := xml.Marshal(v) + if err != nil { + return nil, 0, err + } + return bytes.NewReader(b), len(b), nil +} === added file 'src/github.com/Azure/azure-sdk-for-go/storage/util_test.go' --- src/github.com/Azure/azure-sdk-for-go/storage/util_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/Azure/azure-sdk-for-go/storage/util_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,69 @@ +package storage + +import ( + "encoding/xml" + "io/ioutil" + "net/url" + "strings" + "time" + + chk "gopkg.in/check.v1" +) + +func (s *StorageClientSuite) Test_timeRfc1123Formatted(c *chk.C) { + now := time.Now().UTC() + expectedLayout := "Mon, 02 Jan 2006 15:04:05 GMT" + c.Assert(timeRfc1123Formatted(now), chk.Equals, now.Format(expectedLayout)) +} + +func (s *StorageClientSuite) Test_mergeParams(c *chk.C) { + v1 := url.Values{ + "k1": {"v1"}, + "k2": {"v2"}} + v2 := url.Values{ + "k1": {"v11"}, + "k3": {"v3"}} + out := mergeParams(v1, v2) + c.Assert(out.Get("k1"), chk.Equals, "v1") + c.Assert(out.Get("k2"), chk.Equals, "v2") + c.Assert(out.Get("k3"), chk.Equals, "v3") + c.Assert(out["k1"], chk.DeepEquals, []string{"v1", "v11"}) +} + +func (s *StorageClientSuite) Test_prepareBlockListRequest(c *chk.C) { + empty := []Block{} + expected := `` + c.Assert(prepareBlockListRequest(empty), chk.DeepEquals, expected) + + blocks := []Block{{"foo", BlockStatusLatest}, {"bar", BlockStatusUncommitted}} + expected = `foobar` + c.Assert(prepareBlockListRequest(blocks), chk.DeepEquals, expected) +} + +func (s *StorageClientSuite) Test_xmlUnmarshal(c *chk.C) { + xml := ` + + myblob + ` + var blob Blob + body := ioutil.NopCloser(strings.NewReader(xml)) + c.Assert(xmlUnmarshal(body, &blob), chk.IsNil) + c.Assert(blob.Name, chk.Equals, "myblob") +} + +func (s *StorageClientSuite) Test_xmlMarshal(c *chk.C) { + type t struct { + XMLName xml.Name `xml:"S"` + Name string `xml:"Name"` + } + + b := t{Name: "myblob"} + expected := `myblob` + r, i, err := xmlMarshal(b) + c.Assert(err, chk.IsNil) + o, err := ioutil.ReadAll(r) + c.Assert(err, chk.IsNil) + out := string(o) + c.Assert(out, chk.Equals, expected) + c.Assert(i, chk.Equals, len(expected)) +} === added directory 'src/github.com/chai2010' === added directory 'src/github.com/chai2010/gettext-go' === added file 'src/github.com/chai2010/gettext-go/LICENSE' --- src/github.com/chai2010/gettext-go/LICENSE 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,27 @@ +Copyright 2013 ChaiShushan . All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. === added file 'src/github.com/chai2010/gettext-go/README.md' --- src/github.com/chai2010/gettext-go/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,53 @@ +gettext-go +========== + +PkgDoc: [http://godoc.org/github.com/chai2010/gettext-go/gettext](http://godoc.org/github.com/chai2010/gettext-go/gettext) + +Install +======== + +1. `go get github.com/chai2010/gettext-go/gettext` +2. `go run hello.go` + +The godoc.org or gowalker.org has more information. + +Example +======= + +```Go +package main + +import ( + "fmt" + + "github.com/chai2010/gettext-go/gettext" +) + +func main() { + gettext.SetLocale("zh_CN") + gettext.Textdomain("hello") + + gettext.BindTextdomain("hello", "local", nil) + + // gettext.BindTextdomain("hello", "local", nil) // from local dir + // gettext.BindTextdomain("hello", "local.zip", nil) // from local zip file + // gettext.BindTextdomain("hello", "local.zip", zipData) // from embedded zip data + + // translate source text + fmt.Println(gettext.Gettext("Hello, world!")) + // Output: 你好, 世界! + + // translate resource + fmt.Println(string(gettext.Getdata("poems.txt")))) + // Output: ... +} +``` + +Go file: [hello.go](https://github.com/chai2010/gettext-go/blob/master/examples/hello.go); PO file: [hello.po](https://github.com/chai2010/gettext-go/blob/master/examples/local/default/LC_MESSAGES/hello.po); + +BUGS +==== + +Please report bugs to . + +Thanks! === added directory 'src/github.com/chai2010/gettext-go/examples' === added file 'src/github.com/chai2010/gettext-go/examples/Makefile' --- src/github.com/chai2010/gettext-go/examples/Makefile 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/examples/Makefile 2016-03-22 15:18:22 +0000 @@ -0,0 +1,13 @@ +# Copyright 2013 ChaiShushan . All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +default: + msgfmt -o local/default/LC_MESSAGES/hello.mo local/default/LC_MESSAGES/hello.po + msgfmt -o local/zh_CN/LC_MESSAGES/hello.mo local/zh_CN/LC_MESSAGES/hello.po + msgfmt -o local/zh_TW/LC_MESSAGES/hello.mo local/zh_TW/LC_MESSAGES/hello.po + 7z a -tzip -scsUTF-8 local.zip local + go run hello.go + +clean: + rm local.zip === added file 'src/github.com/chai2010/gettext-go/examples/hello.go' --- src/github.com/chai2010/gettext-go/examples/hello.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/examples/hello.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,83 @@ +// Copyright 2013 . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is a gettext-go exmaple. +package main + +import ( + "fmt" + + "github.com/chai2010/gettext-go/examples/hi" + "github.com/chai2010/gettext-go/gettext" +) + +func init() { + // bind app domain + gettext.BindTextdomain("hello", "local", nil) + gettext.Textdomain("hello") + + // $(LC_MESSAGES) or $(LANG) or empty + fmt.Println(gettext.Gettext("Gettext in init.")) + fmt.Println(gettext.PGettext("main.init", "Gettext in init.")) + hi.SayHi() + // Output(depends on local environment): + // ? + // ? + // ? + // ? + + // set simple chinese + gettext.SetLocale("zh_CN") + + // simple chinese + fmt.Println(gettext.Gettext("Gettext in init.")) + fmt.Println(gettext.PGettext("main.init", "Gettext in init.")) + hi.SayHi() + // Output: + // Init函数中的Gettext. + // Init函数中的Gettext. + // æ¥è‡ª"Hi"包的问候: 你好, 世界! + // æ¥è‡ª"Hi"包的问候: 你好, 世界! +} + +func main() { + // simple chinese + fmt.Println(gettext.Gettext("Hello, world!")) + fmt.Println(gettext.PGettext("main.main", "Hello, world!")) + hi.SayHi() + // Output: + // 你好, 世界! + // 你好, 世界! + // æ¥è‡ª"Hi"包的问候: 你好, 世界! + // æ¥è‡ª"Hi"包的问候: 你好, 世界! + + // set traditional chinese + gettext.SetLocale("zh_TW") + + // traditional chinese + func() { + fmt.Println(gettext.Gettext("Gettext in func.")) + fmt.Println(gettext.PGettext("main.func", "Gettext in func.")) + hi.SayHi() + // Output: + // 閉包函數中的Gettext. + // 閉包函數中的Gettext. + // 來自"Hi"包的å•å€™: 你好, 世界! + // 來自"Hi"包的å•å€™: 你好, 世界! + }() + + fmt.Println() + + // translate resource + gettext.SetLocale("zh_CN") + fmt.Println("poems(simple chinese):") + fmt.Println(string(gettext.Getdata("poems.txt"))) + gettext.SetLocale("zh_TW") + fmt.Println("poems(traditional chinese):") + fmt.Println(string(gettext.Getdata("poems.txt"))) + gettext.SetLocale("??") + fmt.Println("poems(default is english):") + fmt.Println(string(gettext.Getdata("poems.txt"))) + // Output: ... +} === added directory 'src/github.com/chai2010/gettext-go/examples/hi' === added file 'src/github.com/chai2010/gettext-go/examples/hi/hi.go' --- src/github.com/chai2010/gettext-go/examples/hi/hi.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/examples/hi/hi.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,17 @@ +// Copyright 2013 . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hi is a example pkg. +package hi + +import ( + "fmt" + + "github.com/chai2010/gettext-go/gettext" +) + +func SayHi() { + fmt.Println(gettext.Gettext("pkg hi: Hello, world!")) + fmt.Println(gettext.PGettext("code.google.com/p/gettext-go/examples/hi.SayHi", "pkg hi: Hello, world!")) +} === added directory 'src/github.com/chai2010/gettext-go/examples/local' === added file 'src/github.com/chai2010/gettext-go/examples/local.zip' Binary files src/github.com/chai2010/gettext-go/examples/local.zip 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/examples/local.zip 2016-03-22 15:18:22 +0000 differ === added directory 'src/github.com/chai2010/gettext-go/examples/local/default' === added directory 'src/github.com/chai2010/gettext-go/examples/local/default/LC_MESSAGES' === added file 'src/github.com/chai2010/gettext-go/examples/local/default/LC_MESSAGES/hello.mo' Binary files src/github.com/chai2010/gettext-go/examples/local/default/LC_MESSAGES/hello.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/examples/local/default/LC_MESSAGES/hello.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/examples/local/default/LC_MESSAGES/hello.po' --- src/github.com/chai2010/gettext-go/examples/local/default/LC_MESSAGES/hello.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/examples/local/default/LC_MESSAGES/hello.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,35 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-go-examples-hello\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-12-12 20:03+0000\n" +"PO-Revision-Date: 2013-12-30 20:47+0800\n" +"Last-Translator: chai2010 \n" +"Language-Team: chai2010(团队) \n" +"Language: zh_CN\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.5.7\n" +"X-Poedit-SourceCharset: UTF-8\n" + +msgctxt "main.init" +msgid "Gettext in init." +msgstr "" + +msgctxt "main.main" +msgid "Hello, world!" +msgstr "" + +msgctxt "main.func" +msgid "Gettext in func." +msgstr "" + +msgctxt "github.com/chai2010/gettext-go/examples/hi.SayHi" +msgid "pkg hi: Hello, world!" +msgstr "" === added directory 'src/github.com/chai2010/gettext-go/examples/local/default/LC_RESOURCE' === added directory 'src/github.com/chai2010/gettext-go/examples/local/default/LC_RESOURCE/hello' === added file 'src/github.com/chai2010/gettext-go/examples/local/default/LC_RESOURCE/hello/favicon.ico' Binary files src/github.com/chai2010/gettext-go/examples/local/default/LC_RESOURCE/hello/favicon.ico 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/examples/local/default/LC_RESOURCE/hello/favicon.ico 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/examples/local/default/LC_RESOURCE/hello/poems.txt' --- src/github.com/chai2010/gettext-go/examples/local/default/LC_RESOURCE/hello/poems.txt 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/examples/local/default/LC_RESOURCE/hello/poems.txt 2016-03-22 15:18:22 +0000 @@ -0,0 +1,23 @@ +Drinking Alone Under the Moon +Li Bai + +flowers among one jar liquor +alone carouse without mutual intimate + +raise cup greet bright moon +facing shadow become three persons + +moon since not free to-drink +shadow follow accompany my body + +briefly accompany moon with shadow +go happy should avail-oneself-of spring + +my song moon walk-to-and-fro irresolute +my dance shadow fragments disorderly + +sober time together mix glad +drunk after each divide scatter + +eternal connect without consciouness-of-self roam +mutual appointment remote cloud Milky-Way === added directory 'src/github.com/chai2010/gettext-go/examples/local/zh_CN' === added directory 'src/github.com/chai2010/gettext-go/examples/local/zh_CN/LC_MESSAGES' === added file 'src/github.com/chai2010/gettext-go/examples/local/zh_CN/LC_MESSAGES/hello.mo' Binary files src/github.com/chai2010/gettext-go/examples/local/zh_CN/LC_MESSAGES/hello.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/examples/local/zh_CN/LC_MESSAGES/hello.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/examples/local/zh_CN/LC_MESSAGES/hello.po' --- src/github.com/chai2010/gettext-go/examples/local/zh_CN/LC_MESSAGES/hello.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/examples/local/zh_CN/LC_MESSAGES/hello.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,35 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-go-examples-hello\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-12-12 20:03+0000\n" +"PO-Revision-Date: 2013-12-30 20:47+0800\n" +"Last-Translator: chai2010 \n" +"Language-Team: chai2010(团队) \n" +"Language: zh_CN\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.5.7\n" +"X-Poedit-SourceCharset: UTF-8\n" + +msgctxt "main.init" +msgid "Gettext in init." +msgstr "Init函数中的Gettext." + +msgctxt "main.main" +msgid "Hello, world!" +msgstr "你好, 世界!" + +msgctxt "main.func" +msgid "Gettext in func." +msgstr "闭包函数中的Gettext." + +msgctxt "github.com/chai2010/gettext-go/examples/hi.SayHi" +msgid "pkg hi: Hello, world!" +msgstr "æ¥è‡ª\"Hi\"包的问候: 你好, 世界!" === added directory 'src/github.com/chai2010/gettext-go/examples/local/zh_CN/LC_RESOURCE' === added directory 'src/github.com/chai2010/gettext-go/examples/local/zh_CN/LC_RESOURCE/hello' === added file 'src/github.com/chai2010/gettext-go/examples/local/zh_CN/LC_RESOURCE/hello/poems.txt' --- src/github.com/chai2010/gettext-go/examples/local/zh_CN/LC_RESOURCE/hello/poems.txt 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/examples/local/zh_CN/LC_RESOURCE/hello/poems.txt 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +yuèxiàdúzhuó +月下独酌 +lÇbái +æŽç™½ + +huÄjiÄnyÄ«hújiǔ,dúzhuówúxiÄnÉ¡qÄ«n。 +花间一壶酒,独酌无相亲。 +jÇ”bÄ“iyÄomínÉ¡yuè,duìyÇnÉ¡chénÉ¡sÄnrén。 +举æ¯é‚€æ˜Žæœˆï¼Œå¯¹å½±æˆä¸‰äººã€‚ +yuèjìbùjiÄ›yÇn,yÇnÉ¡túsuíwÇ’shÄ“n。 +月既ä¸è§£é¥®ï¼Œå½±å¾’éšæˆ‘身。 +zànbànyuèjiÄnÉ¡yÇnɡ,xínÉ¡lèxÅ«jíchÅ«n。 +暂伴月将影,行ä¹é¡»åŠæ˜¥ã€‚ +wÇ’É¡Ä“yuèpáihuái,wÇ’wÇ”yÇnÉ¡línÉ¡luàn。 +我歌月徘徊,我舞影零乱。 +xÇnÉ¡shítónÉ¡jiÄohuÄn,zuìhòuɡèfÄ“nsàn。 +醒时åŒäº¤æ¬¢ï¼Œé†‰åŽå„分散。 +yÇ’nÉ¡jiéwúqínÉ¡yóu,xiÄnÉ¡qÄ«miÇŽoyúnhàn。 +永结无情游,相期邈云汉。 === added directory 'src/github.com/chai2010/gettext-go/examples/local/zh_TW' === added directory 'src/github.com/chai2010/gettext-go/examples/local/zh_TW/LC_MESSAGES' === added file 'src/github.com/chai2010/gettext-go/examples/local/zh_TW/LC_MESSAGES/hello.mo' Binary files src/github.com/chai2010/gettext-go/examples/local/zh_TW/LC_MESSAGES/hello.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/examples/local/zh_TW/LC_MESSAGES/hello.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/examples/local/zh_TW/LC_MESSAGES/hello.po' --- src/github.com/chai2010/gettext-go/examples/local/zh_TW/LC_MESSAGES/hello.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/examples/local/zh_TW/LC_MESSAGES/hello.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,35 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-go-examples-hello\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-12-12 20:03+0000\n" +"PO-Revision-Date: 2014-01-01 11:39+0800\n" +"Last-Translator: chai2010 \n" +"Language-Team: chai2010(团队) \n" +"Language: zh_TW\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.5.7\n" +"X-Poedit-SourceCharset: UTF-8\n" + +msgctxt "main.init" +msgid "Gettext in init." +msgstr "Init函數中的Gettext." + +msgctxt "main.main" +msgid "Hello, world!" +msgstr "你好, 世界!" + +msgctxt "main.func" +msgid "Gettext in func." +msgstr "閉包函數中的Gettext." + +msgctxt "github.com/chai2010/gettext-go/examples/hi.SayHi" +msgid "pkg hi: Hello, world!" +msgstr "來自\"Hi\"包的å•å€™: 你好, 世界!" === added directory 'src/github.com/chai2010/gettext-go/examples/local/zh_TW/LC_RESOURCE' === added directory 'src/github.com/chai2010/gettext-go/examples/local/zh_TW/LC_RESOURCE/hello' === added file 'src/github.com/chai2010/gettext-go/examples/local/zh_TW/LC_RESOURCE/hello/poems.txt' --- src/github.com/chai2010/gettext-go/examples/local/zh_TW/LC_RESOURCE/hello/poems.txt 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/examples/local/zh_TW/LC_RESOURCE/hello/poems.txt 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +yuèxiàdúzhuó +月下ç¨é…Œ +lÇbái +æŽç™½ + +huÄjiÄnyÄ«hújiǔ,dúzhuówúxiÄnÉ¡qÄ«n。 +花間一壺酒,ç¨é…Œç„¡ç›¸è¦ªã€‚ +jÇ”bÄ“iyÄomínÉ¡yuè,duìyÇnÉ¡chénÉ¡sÄnrén。 +舉æ¯é‚€æ˜Žæœˆï¼Œå°å½±æˆä¸‰äººã€‚ +yuèjìbùjiÄ›yÇn,yÇnÉ¡túsuíwÇ’shÄ“n。 +月既ä¸è§£é£²ï¼Œå½±å¾’隨我身。 +zànbànyuèjiÄnÉ¡yÇnɡ,xínÉ¡lèxÅ«jíchÅ«n。 +暫伴月將影,行樂須åŠæ˜¥ã€‚ +wÇ’É¡Ä“yuèpáihuái,wÇ’wÇ”yÇnÉ¡línÉ¡luàn。 +我歌月徘徊,我舞影零亂。 +xÇnÉ¡shítónÉ¡jiÄohuÄn,zuìhòuɡèfÄ“nsàn。 +醒時åŒäº¤æ­¡ï¼Œé†‰å¾Œå„分散。 +yÇ’nÉ¡jiéwúqínÉ¡yóu,xiÄnÉ¡qÄ«miÇŽoyúnhàn。 +æ°¸çµç„¡æƒ…éŠï¼Œç›¸æœŸé‚ˆé›²æ¼¢ã€‚ === added directory 'src/github.com/chai2010/gettext-go/gettext' === added file 'src/github.com/chai2010/gettext-go/gettext/caller.go' --- src/github.com/chai2010/gettext-go/gettext/caller.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/caller.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,39 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "regexp" + "runtime" +) + +var ( + reInit = regexp.MustCompile(`init·\d+$`) // main.init·1 + reClosure = regexp.MustCompile(`func·\d+$`) // main.func·001 +) + +// caller types: +// runtime.goexit +// runtime.main +// main.init +// main.main +// main.init·1 -> main.init +// main.func·001 -> main.func +// code.google.com/p/gettext-go/gettext.TestCallerName +// ... +func callerName(skip int) string { + pc, _, _, ok := runtime.Caller(skip) + if !ok { + return "" + } + name := runtime.FuncForPC(pc).Name() + if reInit.MatchString(name) { + return reInit.ReplaceAllString(name, "init") + } + if reClosure.MatchString(name) { + return reClosure.ReplaceAllString(name, "func") + } + return name +} === added file 'src/github.com/chai2010/gettext-go/gettext/caller_test.go' --- src/github.com/chai2010/gettext-go/gettext/caller_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/caller_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,89 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "testing" +) + +var ( + testInitCallerName0 string = callerName(1) + testInitCallerName1 string + testInitCallerName2 string +) + +func init() { + testInitCallerName1 = callerName(1) +} + +func init() { + testInitCallerName2 = callerName(1) +} + +var tCaller = func(skip int) string { + return callerName(skip + 1) +} + +func TestCallerName(t *testing.T) { + var name string + + // init + name = `github.com/chai2010/gettext-go/gettext.init` + if s := testInitCallerName0; s != name { + t.Fatalf("expect = %s, got = %s", name, s) + } + name = `github.com/chai2010/gettext-go/gettext.init` + if s := testInitCallerName1; s != name { + t.Fatalf("expect = %s, got = %s", name, s) + } + name = `github.com/chai2010/gettext-go/gettext.init` + if s := testInitCallerName2; s != name { + t.Fatalf("expect = %s, got = %s", name, s) + } + + // tInit -> gettext.func + name = `github.com/chai2010/gettext-go/gettext.func` + if s := tCaller(0); s != name { + t.Fatalf("expect = %s, got = %s", name, s) + } + + // caller stack + name = `github.com/chai2010/gettext-go/gettext.callerName` + if s := callerName(0); s != name { + t.Fatalf("expect = %s, got = %s", name, s) + } + name = `github.com/chai2010/gettext-go/gettext.TestCallerName` + if s := callerName(1); s != name { + t.Fatalf("expect = %s, got = %s", name, s) + } + name = `testing.tRunner` + if s := callerName(2); s != name { + t.Fatalf("expect = %s, got = %s", name, s) + } + name = `runtime.goexit` + if s := callerName(3); s != name { + t.Fatalf("expect = %s, got = %s", name, s) + } + name = "" + if s := callerName(4); s != name { + t.Fatalf("expect = %s, got = %s", name, s) + } + + // closure + func() { + name = `github.com/chai2010/gettext-go/gettext.func` + if s := callerName(1); s != name { + t.Fatalf("expect = %s, got = %s", name, s) + } + }() + func() { + func() { + name = `github.com/chai2010/gettext-go/gettext.func` + if s := callerName(1); s != name { + t.Fatalf("expect = %s, got = %s", name, s) + } + }() + }() +} === added file 'src/github.com/chai2010/gettext-go/gettext/doc.go' --- src/github.com/chai2010/gettext-go/gettext/doc.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/doc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,66 @@ +// Copyright 2013 . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gettext implements a basic GNU's gettext library. + +Example: + import ( + "github.com/chai2010/gettext-go/gettext" + ) + + func main() { + gettext.SetLocale("zh_CN") + gettext.Textdomain("hello") + + // gettext.BindTextdomain("hello", "local", nil) // from local dir + // gettext.BindTextdomain("hello", "local.zip", nil) // from local zip file + // gettext.BindTextdomain("hello", "local.zip", zipData) // from embedded zip data + + gettext.BindTextdomain("hello", "local", nil) + + // translate source text + fmt.Println(gettext.Gettext("Hello, world!")) + // Output: 你好, 世界! + + // translate resource + fmt.Println(string(gettext.Getdata("poems.txt"))) + // Output: ... + } + +Translate directory struct("../examples/local.zip"): + + Root: "path" or "file.zip/zipBaseName" + +-default # local: $(LC_MESSAGES) or $(LANG) or "default" + | +-LC_MESSAGES # just for `gettext.Gettext` + | | +-hello.mo # $(Root)/$(local)/LC_MESSAGES/$(domain).mo + | | \-hello.po # $(Root)/$(local)/LC_MESSAGES/$(domain).mo + | | + | \-LC_RESOURCE # just for `gettext.Getdata` + | +-hello # domain map a dir in resource translate + | +-favicon.ico # $(Root)/$(local)/LC_RESOURCE/$(domain)/$(filename) + | \-poems.txt + | + \-zh_CN # simple chinese translate + +-LC_MESSAGES + | +-hello.mo # try "$(domain).mo" first + | \-hello.po # try "$(domain).po" second + | + \-LC_RESOURCE + +-hello + +-favicon.ico # try "$(local)/$(domain)/file" first + \-poems.txt # try "default/$(domain)/file" second + +See: + http://en.wikipedia.org/wiki/Gettext + http://www.gnu.org/software/gettext/manual/html_node + http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html + http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html + http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html + http://www.poedit.net/ + +Please report bugs to . +Thanks! +*/ +package gettext === added file 'src/github.com/chai2010/gettext-go/gettext/domain.go' --- src/github.com/chai2010/gettext-go/gettext/domain.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/domain.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,119 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "sync" +) + +type domainManager struct { + mutex sync.Mutex + locale string + domain string + domainMap map[string]*fileSystem + trTextMap map[string]*translator +} + +func newDomainManager() *domainManager { + return &domainManager{ + locale: DefaultLocale, + domainMap: make(map[string]*fileSystem), + trTextMap: make(map[string]*translator), + } +} + +func (p *domainManager) makeTrMapKey(domain, locale string) string { + return domain + "_$$$_" + locale +} + +func (p *domainManager) Bind(domain, path string, data []byte) (domains, paths []string) { + p.mutex.Lock() + defer p.mutex.Unlock() + + switch { + case domain != "" && path != "": // bind new domain + p.bindDomainTranslators(domain, path, data) + case domain != "" && path == "": // delete domain + p.deleteDomain(domain) + } + + // return all bind domain + for k, fs := range p.domainMap { + domains = append(domains, k) + paths = append(paths, fs.FsName) + } + return +} + +func (p *domainManager) SetLocale(locale string) string { + p.mutex.Lock() + defer p.mutex.Unlock() + if locale != "" { + p.locale = locale + } + return p.locale +} + +func (p *domainManager) SetDomain(domain string) string { + p.mutex.Lock() + defer p.mutex.Unlock() + if domain != "" { + p.domain = domain + } + return p.domain +} + +func (p *domainManager) Getdata(name string) []byte { + return p.getdata(p.domain, name) +} + +func (p *domainManager) DGetdata(domain, name string) []byte { + return p.getdata(domain, name) +} + +func (p *domainManager) PNGettext(msgctxt, msgid, msgidPlural string, n int) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.gettext(p.domain, msgctxt, msgid, msgidPlural, n) +} + +func (p *domainManager) DPNGettext(domain, msgctxt, msgid, msgidPlural string, n int) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.gettext(domain, msgctxt, msgid, msgidPlural, n) +} + +func (p *domainManager) gettext(domain, msgctxt, msgid, msgidPlural string, n int) string { + if p.locale == "" || p.domain == "" { + return msgid + } + if _, ok := p.domainMap[domain]; !ok { + return msgid + } + if f, ok := p.trTextMap[p.makeTrMapKey(domain, p.locale)]; ok { + return f.PNGettext(msgctxt, msgid, msgidPlural, n) + } + return msgid +} + +func (p *domainManager) getdata(domain, name string) []byte { + if p.locale == "" || p.domain == "" { + return nil + } + if _, ok := p.domainMap[domain]; !ok { + return nil + } + if fs, ok := p.domainMap[domain]; ok { + if data, err := fs.LoadResourceFile(domain, p.locale, name); err == nil { + return data + } + if p.locale != "default" { + if data, err := fs.LoadResourceFile(domain, "default", name); err == nil { + return data + } + } + } + return nil +} === added file 'src/github.com/chai2010/gettext-go/gettext/domain_helper.go' --- src/github.com/chai2010/gettext-go/gettext/domain_helper.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/domain_helper.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,50 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "fmt" + "strings" +) + +func (p *domainManager) bindDomainTranslators(domain, path string, data []byte) { + if _, ok := p.domainMap[domain]; ok { + p.deleteDomain(domain) // delete old domain + } + fs := newFileSystem(path, data) + for locale, _ := range fs.LocaleMap { + trMapKey := p.makeTrMapKey(domain, locale) + if data, err := fs.LoadMessagesFile(domain, locale, ".mo"); err == nil { + p.trTextMap[trMapKey], _ = newMoTranslator( + fmt.Sprintf("%s_%s.mo", domain, locale), + data, + ) + continue + } + if data, err := fs.LoadMessagesFile(domain, locale, ".po"); err == nil { + p.trTextMap[trMapKey], _ = newPoTranslator( + fmt.Sprintf("%s_%s.po", domain, locale), + data, + ) + continue + } + p.trTextMap[p.makeTrMapKey(domain, locale)] = nilTranslator + } + p.domainMap[domain] = fs +} + +func (p *domainManager) deleteDomain(domain string) { + if _, ok := p.domainMap[domain]; !ok { + return + } + // delete all mo files + trMapKeyPrefix := p.makeTrMapKey(domain, "") + for k, _ := range p.trTextMap { + if strings.HasPrefix(k, trMapKeyPrefix) { + delete(p.trTextMap, k) + } + } + delete(p.domainMap, domain) +} === added file 'src/github.com/chai2010/gettext-go/gettext/fs.go' --- src/github.com/chai2010/gettext-go/gettext/fs.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/fs.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,187 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "archive/zip" + "bytes" + "fmt" + "io/ioutil" + "log" + "os" + "strings" +) + +type fileSystem struct { + FsName string + FsRoot string + FsZipData []byte + LocaleMap map[string]bool +} + +func newFileSystem(path string, data []byte) *fileSystem { + fs := &fileSystem{ + FsName: path, + FsZipData: data, + } + if err := fs.init(); err != nil { + log.Printf("gettext-go: invalid domain, err = %v", err) + } + return fs +} + +func (p *fileSystem) init() error { + zipName := func(name string) string { + if x := strings.LastIndexAny(name, `\/`); x != -1 { + name = name[x+1:] + } + name = strings.TrimSuffix(name, ".zip") + return name + } + + // zip data + if len(p.FsZipData) != 0 { + p.FsRoot = zipName(p.FsName) + p.LocaleMap = p.lsZip(p.FsZipData) + return nil + } + + // local dir or zip file + fi, err := os.Stat(p.FsName) + if err != nil { + return err + } + + // local dir + if fi.IsDir() { + p.FsRoot = p.FsName + p.LocaleMap = p.lsDir(p.FsName) + return nil + } + + // local zip file + p.FsZipData, err = ioutil.ReadFile(p.FsName) + if err != nil { + return err + } + p.FsRoot = zipName(p.FsName) + p.LocaleMap = p.lsZip(p.FsZipData) + return nil +} + +func (p *fileSystem) LoadMessagesFile(domain, local, ext string) ([]byte, error) { + if len(p.FsZipData) == 0 { + trName := p.makeMessagesFileName(domain, local, ext) + rcData, err := ioutil.ReadFile(trName) + if err != nil { + return nil, err + } + return rcData, nil + } else { + r, err := zip.NewReader(bytes.NewReader(p.FsZipData), int64(len(p.FsZipData))) + if err != nil { + return nil, err + } + + trName := p.makeMessagesFileName(domain, local, ext) + for _, f := range r.File { + if f.Name != trName { + continue + } + rc, err := f.Open() + if err != nil { + return nil, err + } + rcData, err := ioutil.ReadAll(rc) + rc.Close() + return rcData, err + } + return nil, fmt.Errorf("not found") + } +} + +func (p *fileSystem) LoadResourceFile(domain, local, name string) ([]byte, error) { + if len(p.FsZipData) == 0 { + rcName := p.makeResourceFileName(domain, local, name) + rcData, err := ioutil.ReadFile(rcName) + if err != nil { + return nil, err + } + return rcData, nil + } else { + r, err := zip.NewReader(bytes.NewReader(p.FsZipData), int64(len(p.FsZipData))) + if err != nil { + return nil, err + } + + rcName := p.makeResourceFileName(domain, local, name) + for _, f := range r.File { + if f.Name != rcName { + continue + } + rc, err := f.Open() + if err != nil { + return nil, err + } + rcData, err := ioutil.ReadAll(rc) + rc.Close() + return rcData, err + } + return nil, fmt.Errorf("not found") + } +} + +func (p *fileSystem) makeMessagesFileName(domain, local, ext string) string { + return fmt.Sprintf("%s/%s/LC_MESSAGES/%s%s", p.FsRoot, local, domain, ext) +} + +func (p *fileSystem) makeResourceFileName(domain, local, name string) string { + return fmt.Sprintf("%s/%s/LC_RESOURCE/%s/%s", p.FsRoot, local, domain, name) +} + +func (p *fileSystem) lsZip(data []byte) map[string]bool { + r, err := zip.NewReader(bytes.NewReader(data), int64(len(data))) + if err != nil { + return nil + } + ssMap := make(map[string]bool) + for _, f := range r.File { + if x := strings.Index(f.Name, "LC_MESSAGES"); x != -1 { + s := strings.TrimRight(f.Name[:x], `\/`) + if x = strings.LastIndexAny(s, `\/`); x != -1 { + s = s[x+1:] + } + if s != "" { + ssMap[s] = true + } + continue + } + if x := strings.Index(f.Name, "LC_RESOURCE"); x != -1 { + s := strings.TrimRight(f.Name[:x], `\/`) + if x = strings.LastIndexAny(s, `\/`); x != -1 { + s = s[x+1:] + } + if s != "" { + ssMap[s] = true + } + continue + } + } + return ssMap +} + +func (p *fileSystem) lsDir(path string) map[string]bool { + list, err := ioutil.ReadDir(path) + if err != nil { + return nil + } + ssMap := make(map[string]bool) + for _, dir := range list { + if dir.IsDir() { + ssMap[dir.Name()] = true + } + } + return ssMap +} === added file 'src/github.com/chai2010/gettext-go/gettext/gettext.go' --- src/github.com/chai2010/gettext-go/gettext/gettext.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/gettext.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,184 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +var ( + defaultManager = newDomainManager() +) + +var ( + DefaultLocale = getDefaultLocale() // use $(LC_MESSAGES) or $(LANG) or "default" +) + +// SetLocale sets and queries the program's current locale. +// +// If the locale is not empty string, set the new local. +// +// If the locale is empty string, don't change anything. +// +// Returns is the current locale. +// +// Examples: +// SetLocale("") // get locale: return DefaultLocale +// SetLocale("zh_CN") // set locale: return zh_CN +// SetLocale("") // get locale: return zh_CN +func SetLocale(locale string) string { + return defaultManager.SetLocale(locale) +} + +// BindTextdomain sets and queries program's domains. +// +// If the domain and path are all not empty string, bind the new domain. +// If the domain already exists, return error. +// +// If the domain is not empty string, but the path is the empty string, +// delete the domain. +// If the domain don't exists, return error. +// +// If the domain and the path are all empty string, don't change anything. +// +// Returns is the all bind domains. +// +// Examples: +// BindTextdomain("poedit", "local", nil) // bind "poedit" domain +// BindTextdomain("", "", nil) // return all domains +// BindTextdomain("poedit", "", nil) // delete "poedit" domain +// BindTextdomain("", "", nil) // return all domains +// +// Use zip file: +// BindTextdomain("poedit", "local.zip", nil) // bind "poedit" domain +// BindTextdomain("poedit", "local.zip", zipData) // bind "poedit" domain +// +func BindTextdomain(domain, path string, zipData []byte) (domains, paths []string) { + return defaultManager.Bind(domain, path, zipData) +} + +// Textdomain sets and retrieves the current message domain. +// +// If the domain is not empty string, set the new domains. +// +// If the domain is empty string, don't change anything. +// +// Returns is the all used domains. +// +// Examples: +// Textdomain("poedit") // set domain: poedit +// Textdomain("") // get domain: return poedit +func Textdomain(domain string) string { + return defaultManager.SetDomain(domain) +} + +// Gettext attempt to translate a text string into the user's native language, +// by looking up the translation in a message catalog. +// +// It use the caller's function name as the msgctxt. +// +// Examples: +// func Foo() { +// msg := gettext.Gettext("Hello") // msgctxt is "some/package/name.Foo" +// } +func Gettext(msgid string) string { + return PGettext(callerName(2), msgid) +} + +// Getdata attempt to translate a resource file into the user's native language, +// by looking up the translation in a message catalog. +// +// Examples: +// func Foo() { +// Textdomain("hello") +// BindTextdomain("hello", "local.zip", nilOrZipData) +// poems := gettext.Getdata("poems.txt") +// } +func Getdata(name string) []byte { + return defaultManager.Getdata(name) +} + +// NGettext attempt to translate a text string into the user's native language, +// by looking up the appropriate plural form of the translation in a message +// catalog. +// +// It use the caller's function name as the msgctxt. +// +// Examples: +// func Foo() { +// msg := gettext.NGettext("%d people", "%d peoples", 2) +// } +func NGettext(msgid, msgidPlural string, n int) string { + return PNGettext(callerName(2), msgid, msgidPlural, n) +} + +// PGettext attempt to translate a text string into the user's native language, +// by looking up the translation in a message catalog. +// +// Examples: +// func Foo() { +// msg := gettext.PGettext("gettext-go.example", "Hello") // msgctxt is "gettext-go.example" +// } +func PGettext(msgctxt, msgid string) string { + return PNGettext(msgctxt, msgid, "", 0) +} + +// PNGettext attempt to translate a text string into the user's native language, +// by looking up the appropriate plural form of the translation in a message +// catalog. +// +// Examples: +// func Foo() { +// msg := gettext.PNGettext("gettext-go.example", "%d people", "%d peoples", 2) +// } +func PNGettext(msgctxt, msgid, msgidPlural string, n int) string { + return defaultManager.PNGettext(msgctxt, msgid, msgidPlural, n) +} + +// DGettext like Gettext(), but looking up the message in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.DGettext("poedit", "Hello") +// } +func DGettext(domain, msgid string) string { + return DPGettext(domain, callerName(2), msgid) +} + +// DNGettext like NGettext(), but looking up the message in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.PNGettext("poedit", "gettext-go.example", "%d people", "%d peoples", 2) +// } +func DNGettext(domain, msgid, msgidPlural string, n int) string { + return DPNGettext(domain, callerName(2), msgid, msgidPlural, n) +} + +// DPGettext like PGettext(), but looking up the message in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.DPGettext("poedit", "gettext-go.example", "Hello") +// } +func DPGettext(domain, msgctxt, msgid string) string { + return DPNGettext(domain, msgctxt, msgid, "", 0) +} + +// DPNGettext like PNGettext(), but looking up the message in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.DPNGettext("poedit", "gettext-go.example", "%d people", "%d peoples", 2) +// } +func DPNGettext(domain, msgctxt, msgid, msgidPlural string, n int) string { + return defaultManager.DPNGettext(domain, msgctxt, msgid, msgidPlural, n) +} + +// DGetdata like Getdata(), but looking up the resource in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.DGetdata("hello", "poems.txt") +// } +func DGetdata(domain, name string) []byte { + return defaultManager.DGetdata(domain, name) +} === added file 'src/github.com/chai2010/gettext-go/gettext/gettext_test.go' --- src/github.com/chai2010/gettext-go/gettext/gettext_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/gettext_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,253 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "io/ioutil" + "strings" + "testing" +) + +var testZipData = func() []byte { + if data, err := ioutil.ReadFile("../examples/local.zip"); err == nil { + return data + } + return nil +}() + +func TestGettext(t *testing.T) { + Textdomain("hello") + + // local file system + BindTextdomain("hello", "../examples/local", nil) + testGettext(t, true) + BindTextdomain("hello", "", nil) + testGettext(t, false) + + // local zip file system + BindTextdomain("hello", "../examples/local.zip", nil) + testGettext(t, true) + BindTextdomain("hello", "", nil) + testGettext(t, false) + + // embedded zip file system + BindTextdomain("hello", "local.zip", testZipData) + testGettext(t, true) + BindTextdomain("hello", "", nil) + testGettext(t, false) +} + +func TestGetdata(t *testing.T) { + Textdomain("hello") + + // local file system + BindTextdomain("hello", "../examples/local", nil) + testGetdata(t, true) + BindTextdomain("hello", "", nil) + testGetdata(t, false) + + // local zip file system + BindTextdomain("hello", "../examples/local.zip", nil) + testGetdata(t, true) + BindTextdomain("hello", "", nil) + testGetdata(t, false) + + // embedded zip file system + BindTextdomain("hello", "local.zip", testZipData) + testGetdata(t, true) + BindTextdomain("hello", "", nil) + testGetdata(t, false) +} + +func testGettext(t *testing.T, hasTransle bool) { + for i, v := range testTexts { + if lang := SetLocale(v.lang); lang != v.lang { + t.Fatalf("%d: expect = %s, got = %v", i, v.lang, lang) + } + if hasTransle { + if dst := PGettext(v.ctx, v.src); dst != v.dst { + t.Fatalf("%d: expect = %q, got = %q", i, v.dst, dst) + } + } else { + if dst := PGettext(v.ctx, v.src); dst != v.src { + t.Fatalf("%d: expect = %s, got = %v", i, v.src, dst) + } + } + } +} + +func testGetdata(t *testing.T, hasTransle bool) { + for i, v := range testResources { + if lang := SetLocale(v.lang); lang != v.lang { + t.Fatalf("%d: expect = %s, got = %v", i, v.lang, lang) + } + if hasTransle { + v.data = strings.Replace(v.data, "\r", "", -1) + data := strings.Replace(string(Getdata(v.path)), "\r", "", -1) + if data != v.data { + t.Fatalf("%d: expect = %q, got = %q", i, v.data, data) + } + } else { + if data := string(Getdata(v.path)); data != "" { + t.Fatalf("%d: expect = %s, got = %v", i, "", data) + } + } + } +} + +func BenchmarkGettext(b *testing.B) { + SetLocale("zh_CN") + BindTextdomain("hello", "../examples/local", nil) + Textdomain("hello") + + b.ResetTimer() + for i := 0; i < b.N; i++ { + PGettext(testTexts[0].ctx, testTexts[0].src) + } +} +func BenchmarkGettext_Zip(b *testing.B) { + SetLocale("zh_CN") + BindTextdomain("hello", "../examples/local.zip", nil) + Textdomain("hello") + + b.ResetTimer() + for i := 0; i < b.N; i++ { + PGettext(testTexts[0].ctx, testTexts[0].src) + } +} + +func BenchmarkGetdata(b *testing.B) { + SetLocale("zh_CN") + BindTextdomain("hello", "../examples/local", nil) + Textdomain("hello") + + b.ResetTimer() + for i := 0; i < b.N; i++ { + Getdata(testResources[0].path) + } +} +func BenchmarkGetdata_Zip(b *testing.B) { + SetLocale("zh_CN") + BindTextdomain("hello", "../examples/local.zip", nil) + Textdomain("hello") + + b.ResetTimer() + for i := 0; i < b.N; i++ { + Getdata(testResources[0].path) + } +} + +var testTexts = []struct { + lang string + ctx string + src string + dst string +}{ + // default + {"default", "main.init", "Gettext in init.", "Gettext in init."}, + {"default", "main.main", "Hello, world!", "Hello, world!"}, + {"default", "main.func", "Gettext in func.", "Gettext in func."}, + {"default", "github.com/chai2010/gettext-go/examples/hi.SayHi", "pkg hi: Hello, world!", "pkg hi: Hello, world!"}, + + // zh_CN + {"zh_CN", "main.init", "Gettext in init.", "Init函数中的Gettext."}, + {"zh_CN", "main.main", "Hello, world!", "你好, 世界!"}, + {"zh_CN", "main.func", "Gettext in func.", "闭包函数中的Gettext."}, + {"zh_CN", "github.com/chai2010/gettext-go/examples/hi.SayHi", "pkg hi: Hello, world!", "æ¥è‡ª\"Hi\"包的问候: 你好, 世界!"}, + + // zh_TW + {"zh_TW", "main.init", "Gettext in init.", "Init函數中的Gettext."}, + {"zh_TW", "main.main", "Hello, world!", "你好, 世界!"}, + {"zh_TW", "main.func", "Gettext in func.", "閉包函數中的Gettext."}, + {"zh_TW", "github.com/chai2010/gettext-go/examples/hi.SayHi", "pkg hi: Hello, world!", "來自\"Hi\"包的å•å€™: 你好, 世界!"}, +} + +var testResources = []struct { + lang string + path string + data string +}{ + // default + { + "default", + "poems.txt", + `Drinking Alone Under the Moon +Li Bai + +flowers among one jar liquor +alone carouse without mutual intimate + +raise cup greet bright moon +facing shadow become three persons + +moon since not free to-drink +shadow follow accompany my body + +briefly accompany moon with shadow +go happy should avail-oneself-of spring + +my song moon walk-to-and-fro irresolute +my dance shadow fragments disorderly + +sober time together mix glad +drunk after each divide scatter + +eternal connect without consciouness-of-self roam +mutual appointment remote cloud Milky-Way +`, + }, + + // zh_CN + { + "zh_CN", + "poems.txt", + `yuèxiàdúzhuó +月下独酌 +lÇbái +æŽç™½ + +huÄjiÄnyÄ«hújiǔ,dúzhuówúxiÄnÉ¡qÄ«n。 +花间一壶酒,独酌无相亲。 +jÇ”bÄ“iyÄomínÉ¡yuè,duìyÇnÉ¡chénÉ¡sÄnrén。 +举æ¯é‚€æ˜Žæœˆï¼Œå¯¹å½±æˆä¸‰äººã€‚ +yuèjìbùjiÄ›yÇn,yÇnÉ¡túsuíwÇ’shÄ“n。 +月既ä¸è§£é¥®ï¼Œå½±å¾’éšæˆ‘身。 +zànbànyuèjiÄnÉ¡yÇnɡ,xínÉ¡lèxÅ«jíchÅ«n。 +暂伴月将影,行ä¹é¡»åŠæ˜¥ã€‚ +wÇ’É¡Ä“yuèpáihuái,wÇ’wÇ”yÇnÉ¡línÉ¡luàn。 +我歌月徘徊,我舞影零乱。 +xÇnÉ¡shítónÉ¡jiÄohuÄn,zuìhòuɡèfÄ“nsàn。 +醒时åŒäº¤æ¬¢ï¼Œé†‰åŽå„分散。 +yÇ’nÉ¡jiéwúqínÉ¡yóu,xiÄnÉ¡qÄ«miÇŽoyúnhàn。 +永结无情游,相期邈云汉。 +`, + }, + + // zh_TW + { + "zh_TW", + "poems.txt", + `yuèxiàdúzhuó +月下ç¨é…Œ +lÇbái +æŽç™½ + +huÄjiÄnyÄ«hújiǔ,dúzhuówúxiÄnÉ¡qÄ«n。 +花間一壺酒,ç¨é…Œç„¡ç›¸è¦ªã€‚ +jÇ”bÄ“iyÄomínÉ¡yuè,duìyÇnÉ¡chénÉ¡sÄnrén。 +舉æ¯é‚€æ˜Žæœˆï¼Œå°å½±æˆä¸‰äººã€‚ +yuèjìbùjiÄ›yÇn,yÇnÉ¡túsuíwÇ’shÄ“n。 +月既ä¸è§£é£²ï¼Œå½±å¾’隨我身。 +zànbànyuèjiÄnÉ¡yÇnɡ,xínÉ¡lèxÅ«jíchÅ«n。 +暫伴月將影,行樂須åŠæ˜¥ã€‚ +wÇ’É¡Ä“yuèpáihuái,wÇ’wÇ”yÇnÉ¡línÉ¡luàn。 +我歌月徘徊,我舞影零亂。 +xÇnÉ¡shítónÉ¡jiÄohuÄn,zuìhòuɡèfÄ“nsàn。 +醒時åŒäº¤æ­¡ï¼Œé†‰å¾Œå„分散。 +yÇ’nÉ¡jiéwúqínÉ¡yóu,xiÄnÉ¡qÄ«miÇŽoyúnhàn。 +æ°¸çµç„¡æƒ…éŠï¼Œç›¸æœŸé‚ˆé›²æ¼¢ã€‚ +`, + }, +} === added file 'src/github.com/chai2010/gettext-go/gettext/hello.go' --- src/github.com/chai2010/gettext-go/gettext/hello.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/hello.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +// Copyright 2013 . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "fmt" + + "code.google.com/p/gettext-go/gettext" +) + +func main() { + gettext.SetLocale("zh_CN") + gettext.BindTextdomain("hello", "../examples/local", nil) + gettext.Textdomain("hello") + + fmt.Println(gettext.Gettext("Hello, world!")) + // Output: 你好, 世界! +} === added file 'src/github.com/chai2010/gettext-go/gettext/local.go' --- src/github.com/chai2010/gettext-go/gettext/local.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/local.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,34 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "os" + "strings" +) + +func getDefaultLocale() string { + if v := os.Getenv("LC_MESSAGES"); v != "" { + return simplifiedLocale(v) + } + if v := os.Getenv("LANG"); v != "" { + return simplifiedLocale(v) + } + return "default" +} + +func simplifiedLocale(lang string) string { + // en_US/en_US.UTF-8/zh_CN/zh_TW/el_GR@euro/... + if idx := strings.Index(lang, ":"); idx != -1 { + lang = lang[:idx] + } + if idx := strings.Index(lang, "@"); idx != -1 { + lang = lang[:idx] + } + if idx := strings.Index(lang, "."); idx != -1 { + lang = lang[:idx] + } + return strings.TrimSpace(lang) +} === added directory 'src/github.com/chai2010/gettext-go/gettext/mo' === added file 'src/github.com/chai2010/gettext-go/gettext/mo/doc.go' --- src/github.com/chai2010/gettext-go/gettext/mo/doc.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/mo/doc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,74 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mo provides support for reading and writing GNU MO file. + +Examples: + import ( + "github.com/chai2010/gettext-go/gettext/mo" + ) + + func main() { + moFile, err := mo.Load("test.mo") + if err != nil { + log.Fatal(err) + } + fmt.Printf("%v", moFile) + } + +GNU MO file struct: + + byte + +------------------------------------------+ + 0 | magic number = 0x950412de | + | | + 4 | file format revision = 0 | + | | + 8 | number of strings | == N + | | + 12 | offset of table with original strings | == O + | | + 16 | offset of table with translation strings | == T + | | + 20 | size of hashing table | == S + | | + 24 | offset of hashing table | == H + | | + . . + . (possibly more entries later) . + . . + | | + O | length & offset 0th string ----------------. + O + 8 | length & offset 1st string ------------------. + ... ... | | + O + ((N-1)*8)| length & offset (N-1)th string | | | + | | | | + T | length & offset 0th translation ---------------. + T + 8 | length & offset 1st translation -----------------. + ... ... | | | | + T + ((N-1)*8)| length & offset (N-1)th translation | | | | | + | | | | | | + H | start hash table | | | | | + ... ... | | | | + H + S * 4 | end hash table | | | | | + | | | | | | + | NUL terminated 0th string <----------------' | | | + | | | | | + | NUL terminated 1st string <------------------' | | + | | | | + ... ... | | + | | | | + | NUL terminated 0th translation <---------------' | + | | | + | NUL terminated 1st translation <-----------------' + | | + ... ... + | | + +------------------------------------------+ + +The GNU MO file specification is at +http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html. +*/ +package mo === added file 'src/github.com/chai2010/gettext-go/gettext/mo/encoder.go' --- src/github.com/chai2010/gettext-go/gettext/mo/encoder.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/mo/encoder.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,124 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "encoding/binary" + "sort" + "strings" +) + +type moHeader struct { + MagicNumber uint32 + MajorVersion uint16 + MinorVersion uint16 + MsgIdCount uint32 + MsgIdOffset uint32 + MsgStrOffset uint32 + HashSize uint32 + HashOffset uint32 +} + +type moStrPos struct { + Size uint32 // must keep fields order + Addr uint32 +} + +func encodeFile(f *File) []byte { + hdr := &moHeader{ + MagicNumber: MoMagicLittleEndian, + } + data := encodeData(hdr, f) + data = append(encodeHeader(hdr), data...) + return data +} + +// encode data and init moHeader +func encodeData(hdr *moHeader, f *File) []byte { + msgList := []Message{f.MimeHeader.toMessage()} + for _, v := range f.Messages { + if len(v.MsgId) == 0 { + continue + } + if len(v.MsgStr) == 0 && len(v.MsgStrPlural) == 0 { + continue + } + msgList = append(msgList, v) + } + sort.Sort(byMessages(msgList)) + + var buf bytes.Buffer + var msgIdPosList = make([]moStrPos, len(msgList)) + var msgStrPosList = make([]moStrPos, len(msgList)) + for i, v := range msgList { + // write msgid + msgId := encodeMsgId(v) + msgIdPosList[i].Addr = uint32(buf.Len() + MoHeaderSize) + msgIdPosList[i].Size = uint32(len(msgId)) + buf.WriteString(msgId) + // write msgstr + msgStr := encodeMsgStr(v) + msgStrPosList[i].Addr = uint32(buf.Len() + MoHeaderSize) + msgStrPosList[i].Size = uint32(len(msgStr)) + buf.WriteString(msgStr) + } + + hdr.MsgIdOffset = uint32(buf.Len() + MoHeaderSize) + binary.Write(&buf, binary.LittleEndian, msgIdPosList) + hdr.MsgStrOffset = uint32(buf.Len() + MoHeaderSize) + binary.Write(&buf, binary.LittleEndian, msgStrPosList) + + hdr.MsgIdCount = uint32(len(msgList)) + return buf.Bytes() +} + +// must called after encodeData +func encodeHeader(hdr *moHeader) []byte { + var buf bytes.Buffer + binary.Write(&buf, binary.LittleEndian, hdr) + return buf.Bytes() +} + +func encodeMsgId(v Message) string { + if v.MsgContext != "" && v.MsgIdPlural != "" { + return v.MsgContext + EotSeparator + v.MsgId + NulSeparator + v.MsgIdPlural + } + if v.MsgContext != "" && v.MsgIdPlural == "" { + return v.MsgContext + EotSeparator + v.MsgId + } + if v.MsgContext == "" && v.MsgIdPlural != "" { + return v.MsgId + NulSeparator + v.MsgIdPlural + } + return v.MsgId +} + +func encodeMsgStr(v Message) string { + if v.MsgIdPlural != "" { + return strings.Join(v.MsgStrPlural, NulSeparator) + } + return v.MsgStr +} + +type byMessages []Message + +func (d byMessages) Len() int { + return len(d) +} +func (d byMessages) Less(i, j int) bool { + if a, b := d[i].MsgContext, d[j].MsgContext; a != b { + return a < b + } + if a, b := d[i].MsgId, d[j].MsgId; a != b { + return a < b + } + if a, b := d[i].MsgIdPlural, d[j].MsgIdPlural; a != b { + return a < b + } + return false +} +func (d byMessages) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} === added file 'src/github.com/chai2010/gettext-go/gettext/mo/encoder_test.go' --- src/github.com/chai2010/gettext-go/gettext/mo/encoder_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/mo/encoder_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,55 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "reflect" + "sort" + "testing" +) + +func TestFile_Data(t *testing.T) { + f, err := LoadData(testMoFile.Data()) + if err != nil { + t.Fatal(err) + } + if a, b := len(f.Messages), len(testMoFile.Messages); a != b { + t.Logf("size not equal: expect = %d, got = %d", b, a) + } + for i, v := range f.Messages { + if !reflect.DeepEqual(&v, &testMoFile.Messages[i]) { + t.Fatalf("%d: expect = %v, got = %v", i, testMoFile.Messages[i], v) + } + } +} + +func init() { + sort.Sort(byMessages(testMoFile.Messages)) +} + +var testMoFile = &File{ + Messages: []Message{ + Message{ + MsgContext: "main.init", + MsgId: "Gettext in init.", + MsgStr: "Init函数中的Gettext.", + }, + Message{ + MsgContext: "main.main", + MsgId: "Hello, world!", + MsgStr: "你好, 世界!", + }, + Message{ + MsgContext: "main.func", + MsgId: "Gettext in func.", + MsgStr: "闭包函数中的Gettext.", + }, + Message{ + MsgContext: "code.google.com/p/gettext-go/examples/hi.SayHi", + MsgId: "pkg hi: Hello, world!", + MsgStr: "æ¥è‡ª\"Hi\"包的问候: 你好, 世界!", + }, + }, +} === added file 'src/github.com/chai2010/gettext-go/gettext/mo/file.go' --- src/github.com/chai2010/gettext-go/gettext/mo/file.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/mo/file.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,193 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "encoding/binary" + "fmt" + "io/ioutil" + "strings" +) + +const ( + MoHeaderSize = 28 + MoMagicLittleEndian = 0x950412de + MoMagicBigEndian = 0xde120495 + + EotSeparator = "\x04" // msgctxt and msgid separator + NulSeparator = "\x00" // msgid and msgstr separator +) + +// File represents an MO File. +// +// See http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html +type File struct { + MagicNumber uint32 + MajorVersion uint16 + MinorVersion uint16 + MsgIdCount uint32 + MsgIdOffset uint32 + MsgStrOffset uint32 + HashSize uint32 + HashOffset uint32 + MimeHeader Header + Messages []Message +} + +// Load loads a named mo file. +func Load(name string) (*File, error) { + data, err := ioutil.ReadFile(name) + if err != nil { + return nil, err + } + return LoadData(data) +} + +// LoadData loads mo file format data. +func LoadData(data []byte) (*File, error) { + r := bytes.NewReader(data) + + var magicNumber uint32 + if err := binary.Read(r, binary.LittleEndian, &magicNumber); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + var bo binary.ByteOrder + switch magicNumber { + case MoMagicLittleEndian: + bo = binary.LittleEndian + case MoMagicBigEndian: + bo = binary.BigEndian + default: + return nil, fmt.Errorf("gettext: %v", "invalid magic number") + } + + var header struct { + MajorVersion uint16 + MinorVersion uint16 + MsgIdCount uint32 + MsgIdOffset uint32 + MsgStrOffset uint32 + HashSize uint32 + HashOffset uint32 + } + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + if v := header.MajorVersion; v != 0 && v != 1 { + return nil, fmt.Errorf("gettext: %v", "invalid version number") + } + if v := header.MinorVersion; v != 0 && v != 1 { + return nil, fmt.Errorf("gettext: %v", "invalid version number") + } + + msgIdStart := make([]uint32, header.MsgIdCount) + msgIdLen := make([]uint32, header.MsgIdCount) + if _, err := r.Seek(int64(header.MsgIdOffset), 0); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + for i := 0; i < int(header.MsgIdCount); i++ { + if err := binary.Read(r, bo, &msgIdLen[i]); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + if err := binary.Read(r, bo, &msgIdStart[i]); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + } + + msgStrStart := make([]int32, header.MsgIdCount) + msgStrLen := make([]int32, header.MsgIdCount) + if _, err := r.Seek(int64(header.MsgStrOffset), 0); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + for i := 0; i < int(header.MsgIdCount); i++ { + if err := binary.Read(r, bo, &msgStrLen[i]); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + if err := binary.Read(r, bo, &msgStrStart[i]); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + } + + file := &File{ + MagicNumber: magicNumber, + MajorVersion: header.MajorVersion, + MinorVersion: header.MinorVersion, + MsgIdCount: header.MsgIdCount, + MsgIdOffset: header.MsgIdOffset, + MsgStrOffset: header.MsgStrOffset, + HashSize: header.HashSize, + HashOffset: header.HashOffset, + } + for i := 0; i < int(header.MsgIdCount); i++ { + if _, err := r.Seek(int64(msgIdStart[i]), 0); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + msgIdData := make([]byte, msgIdLen[i]) + if _, err := r.Read(msgIdData); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + + if _, err := r.Seek(int64(msgStrStart[i]), 0); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + msgStrData := make([]byte, msgStrLen[i]) + if _, err := r.Read(msgStrData); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + + if len(msgIdData) == 0 { + var msg = Message{ + MsgId: string(msgIdData), + MsgStr: string(msgStrData), + } + file.MimeHeader.fromMessage(&msg) + } else { + var msg = Message{ + MsgId: string(msgIdData), + MsgStr: string(msgStrData), + } + // Is this a context message? + if idx := strings.Index(msg.MsgId, EotSeparator); idx != -1 { + msg.MsgContext, msg.MsgId = msg.MsgId[:idx], msg.MsgId[idx+1:] + } + // Is this a plural message? + if idx := strings.Index(msg.MsgId, NulSeparator); idx != -1 { + msg.MsgId, msg.MsgIdPlural = msg.MsgId[:idx], msg.MsgId[idx+1:] + msg.MsgStrPlural = strings.Split(msg.MsgStr, NulSeparator) + msg.MsgStr = "" + } + file.Messages = append(file.Messages, msg) + } + } + + return file, nil +} + +// Save saves a mo file. +func (f *File) Save(name string) error { + return ioutil.WriteFile(name, f.Data(), 0666) +} + +// Save returns a mo file format data. +func (f *File) Data() []byte { + return encodeFile(f) +} + +// String returns the po format file string. +func (f *File) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "# version: %d.%d\n", f.MajorVersion, f.MinorVersion) + fmt.Fprintf(&buf, "%s\n", f.MimeHeader.String()) + fmt.Fprintf(&buf, "\n") + + for k, v := range f.Messages { + fmt.Fprintf(&buf, `msgid "%v"`+"\n", k) + fmt.Fprintf(&buf, `msgstr "%s"`+"\n", v.MsgStr) + fmt.Fprintf(&buf, "\n") + } + + return buf.String() +} === added file 'src/github.com/chai2010/gettext-go/gettext/mo/file_test.go' --- src/github.com/chai2010/gettext-go/gettext/mo/file_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/mo/file_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,13 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "testing" +) + +func TestFile(t *testing.T) { + // +} === added file 'src/github.com/chai2010/gettext-go/gettext/mo/header.go' --- src/github.com/chai2010/gettext-go/gettext/mo/header.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/mo/header.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,109 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "fmt" + "strings" +) + +// Header is the initial comments "SOME DESCRIPTIVE TITLE", "YEAR" +// and "FIRST AUTHOR , YEAR" ought to be replaced by sensible information. +// +// See http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html#Header-Entry +type Header struct { + ProjectIdVersion string // Project-Id-Version: PACKAGE VERSION + ReportMsgidBugsTo string // Report-Msgid-Bugs-To: FIRST AUTHOR + POTCreationDate string // POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE + PORevisionDate string // PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE + LastTranslator string // Last-Translator: FIRST AUTHOR + LanguageTeam string // Language-Team: golang-china + Language string // Language: zh_CN + MimeVersion string // MIME-Version: 1.0 + ContentType string // Content-Type: text/plain; charset=UTF-8 + ContentTransferEncoding string // Content-Transfer-Encoding: 8bit + PluralForms string // Plural-Forms: nplurals=2; plural=n == 1 ? 0 : 1; + XGenerator string // X-Generator: Poedit 1.5.5 + UnknowFields map[string]string +} + +func (p *Header) fromMessage(msg *Message) { + if msg.MsgId != "" || msg.MsgStr == "" { + return + } + lines := strings.Split(msg.MsgStr, "\n") + for i := 0; i < len(lines); i++ { + idx := strings.Index(lines[i], ":") + if idx < 0 { + continue + } + key := strings.TrimSpace(lines[i][:idx]) + val := strings.TrimSpace(lines[i][idx+1:]) + switch strings.ToUpper(key) { + case strings.ToUpper("Project-Id-Version"): + p.ProjectIdVersion = val + case strings.ToUpper("Report-Msgid-Bugs-To"): + p.ReportMsgidBugsTo = val + case strings.ToUpper("POT-Creation-Date"): + p.POTCreationDate = val + case strings.ToUpper("PO-Revision-Date"): + p.PORevisionDate = val + case strings.ToUpper("Last-Translator"): + p.LastTranslator = val + case strings.ToUpper("Language-Team"): + p.LanguageTeam = val + case strings.ToUpper("Language"): + p.Language = val + case strings.ToUpper("MIME-Version"): + p.MimeVersion = val + case strings.ToUpper("Content-Type"): + p.ContentType = val + case strings.ToUpper("Content-Transfer-Encoding"): + p.ContentTransferEncoding = val + case strings.ToUpper("Plural-Forms"): + p.PluralForms = val + case strings.ToUpper("X-Generator"): + p.XGenerator = val + default: + if p.UnknowFields == nil { + p.UnknowFields = make(map[string]string) + } + p.UnknowFields[key] = val + } + } +} + +func (p *Header) toMessage() Message { + return Message{ + MsgStr: p.String(), + } +} + +// String returns the po format header string. +func (p Header) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, `msgid ""`+"\n") + fmt.Fprintf(&buf, `msgstr ""`+"\n") + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Project-Id-Version", p.ProjectIdVersion) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Report-Msgid-Bugs-To", p.ReportMsgidBugsTo) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "POT-Creation-Date", p.POTCreationDate) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "PO-Revision-Date", p.PORevisionDate) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Last-Translator", p.LastTranslator) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language-Team", p.LanguageTeam) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language", p.Language) + if p.MimeVersion != "" { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "MIME-Version", p.MimeVersion) + } + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Type", p.ContentType) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Transfer-Encoding", p.ContentTransferEncoding) + if p.XGenerator != "" { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "X-Generator", p.XGenerator) + } + for k, v := range p.UnknowFields { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", k, v) + } + return buf.String() +} === added file 'src/github.com/chai2010/gettext-go/gettext/mo/header_test.go' --- src/github.com/chai2010/gettext-go/gettext/mo/header_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/mo/header_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,13 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "testing" +) + +func TestHeader(t *testing.T) { + // +} === added file 'src/github.com/chai2010/gettext-go/gettext/mo/message.go' --- src/github.com/chai2010/gettext-go/gettext/mo/message.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/mo/message.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,39 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "fmt" +) + +// A MO file is made up of many entries, +// each entry holding the relation between an original untranslated string +// and its corresponding translation. +// +// See http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html +type Message struct { + MsgContext string // msgctxt context + MsgId string // msgid untranslated-string + MsgIdPlural string // msgid_plural untranslated-string-plural + MsgStr string // msgstr translated-string + MsgStrPlural []string // msgstr[0] translated-string-case-0 +} + +// String returns the po format entry string. +func (p Message) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "msgid %s", encodePoString(p.MsgId)) + if p.MsgIdPlural != "" { + fmt.Fprintf(&buf, "msgid_plural %s", encodePoString(p.MsgIdPlural)) + } + if p.MsgStr != "" { + fmt.Fprintf(&buf, "msgstr %s", encodePoString(p.MsgStr)) + } + for i := 0; i < len(p.MsgStrPlural); i++ { + fmt.Fprintf(&buf, "msgstr[%d] %s", i, encodePoString(p.MsgStrPlural[i])) + } + return buf.String() +} === added file 'src/github.com/chai2010/gettext-go/gettext/mo/util.go' --- src/github.com/chai2010/gettext-go/gettext/mo/util.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/mo/util.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,110 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "strings" +) + +func decodePoString(text string) string { + lines := strings.Split(text, "\n") + for i := 0; i < len(lines); i++ { + left := strings.Index(lines[i], `"`) + right := strings.LastIndex(lines[i], `"`) + if left < 0 || right < 0 || left == right { + lines[i] = "" + continue + } + line := lines[i][left+1 : right] + data := make([]byte, 0, len(line)) + for i := 0; i < len(line); i++ { + if line[i] != '\\' { + data = append(data, line[i]) + continue + } + if i+1 >= len(line) { + break + } + switch line[i+1] { + case 'n': // \\n -> \n + data = append(data, '\n') + i++ + case 't': // \\t -> \n + data = append(data, '\t') + i++ + case '\\': // \\\ -> ? + data = append(data, '\\') + i++ + } + } + lines[i] = string(data) + } + return strings.Join(lines, "") +} + +func encodePoString(text string) string { + var buf bytes.Buffer + lines := strings.Split(text, "\n") + for i := 0; i < len(lines); i++ { + if lines[i] == "" { + if i != len(lines)-1 { + buf.WriteString(`"\n"` + "\n") + } + continue + } + buf.WriteRune('"') + for _, r := range lines[i] { + switch r { + case '\\': + buf.WriteString(`\\`) + case '"': + buf.WriteString(`\"`) + case '\n': + buf.WriteString(`\n`) + case '\t': + buf.WriteString(`\t`) + default: + buf.WriteRune(r) + } + } + buf.WriteString(`\n"` + "\n") + } + return buf.String() +} + +func encodeCommentPoString(text string) string { + var buf bytes.Buffer + lines := strings.Split(text, "\n") + if len(lines) > 1 { + buf.WriteString(`""` + "\n") + } + for i := 0; i < len(lines); i++ { + if len(lines) > 0 { + buf.WriteString("#| ") + } + buf.WriteRune('"') + for _, r := range lines[i] { + switch r { + case '\\': + buf.WriteString(`\\`) + case '"': + buf.WriteString(`\"`) + case '\n': + buf.WriteString(`\n`) + case '\t': + buf.WriteString(`\t`) + default: + buf.WriteRune(r) + } + } + if i < len(lines)-1 { + buf.WriteString(`\n"` + "\n") + } else { + buf.WriteString(`"`) + } + } + return buf.String() +} === added file 'src/github.com/chai2010/gettext-go/gettext/mo/util_test.go' --- src/github.com/chai2010/gettext-go/gettext/mo/util_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/mo/util_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,68 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "testing" +) + +func TestDecodePoString(t *testing.T) { + if s := decodePoString(poStrEncode); s != poStrDecode { + t.Fatalf(`expect = %s got = %s`, poStrDecode, s) + } +} + +func TestEncodePoString(t *testing.T) { + if s := encodePoString(poStrDecode); s != poStrEncodeStd { + t.Fatalf(`expect = %s; got = %s`, poStrEncodeStd, s) + } +} + +const poStrEncode = `# noise +123456789 +"Project-Id-Version: Poedit 1.5\n" +"Report-Msgid-Bugs-To: poedit@googlegroups.com\n" +"POT-Creation-Date: 2012-07-30 10:34+0200\n" +"PO-Revision-Date: 2013-02-24 21:00+0800\n" +"Last-Translator: Christopher Meng \n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Poedit 1.5.5\n" +"TestPoString: abc" +"123\n" +>> +123456??? +` + +const poStrEncodeStd = `"Project-Id-Version: Poedit 1.5\n" +"Report-Msgid-Bugs-To: poedit@googlegroups.com\n" +"POT-Creation-Date: 2012-07-30 10:34+0200\n" +"PO-Revision-Date: 2013-02-24 21:00+0800\n" +"Last-Translator: Christopher Meng \n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Poedit 1.5.5\n" +"TestPoString: abc123\n" +` + +const poStrDecode = `Project-Id-Version: Poedit 1.5 +Report-Msgid-Bugs-To: poedit@googlegroups.com +POT-Creation-Date: 2012-07-30 10:34+0200 +PO-Revision-Date: 2013-02-24 21:00+0800 +Last-Translator: Christopher Meng +Language-Team: +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Plural-Forms: nplurals=1; plural=0; +X-Generator: Poedit 1.5.5 +TestPoString: abc123 +` === added directory 'src/github.com/chai2010/gettext-go/gettext/plural' === added file 'src/github.com/chai2010/gettext-go/gettext/plural/doc.go' --- src/github.com/chai2010/gettext-go/gettext/plural/doc.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/plural/doc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,36 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package plural provides standard plural formulas. + +Examples: + import ( + "code.google.com/p/gettext-go/gettext/plural" + ) + + func main() { + enFormula := plural.Formula("en_US") + xxFormula := plural.Formula("zh_CN") + + fmt.Printf("%s: %d\n", "en", enFormula(0)) + fmt.Printf("%s: %d\n", "en", enFormula(1)) + fmt.Printf("%s: %d\n", "en", enFormula(2)) + fmt.Printf("%s: %d\n", "??", xxFormula(0)) + fmt.Printf("%s: %d\n", "??", xxFormula(1)) + fmt.Printf("%s: %d\n", "??", xxFormula(2)) + fmt.Printf("%s: %d\n", "??", xxFormula(9)) + // Output: + // en: 0 + // en: 0 + // en: 1 + // ??: 0 + // ??: 0 + // ??: 1 + // ??: 8 + } + +See http://www.gnu.org/software/gettext/manual/html_node/Plural-forms.html +*/ +package plural === added file 'src/github.com/chai2010/gettext-go/gettext/plural/formula.go' --- src/github.com/chai2010/gettext-go/gettext/plural/formula.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/plural/formula.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,181 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package plural + +import ( + "strings" +) + +// Formula provides the language's standard plural formula. +func Formula(lang string) func(n int) int { + if idx := index(lang); idx != -1 { + return formulaTable[fmtForms(FormsTable[idx].Value)] + } + if idx := index("??"); idx != -1 { + return formulaTable[fmtForms(FormsTable[idx].Value)] + } + return func(n int) int { + return n + } +} + +func index(lang string) int { + for i := 0; i < len(FormsTable); i++ { + if strings.HasPrefix(lang, FormsTable[i].Lang) { + return i + } + } + return -1 +} + +func fmtForms(forms string) string { + forms = strings.TrimSpace(forms) + forms = strings.Replace(forms, " ", "", -1) + return forms +} + +var formulaTable = map[string]func(n int) int{ + fmtForms("nplurals=n; plural=n-1;"): func(n int) int { + if n > 0 { + return n - 1 + } + return 0 + }, + fmtForms("nplurals=1; plural=0;"): func(n int) int { + return 0 + }, + fmtForms("nplurals=2; plural=(n != 1);"): func(n int) int { + if n <= 1 { + return 0 + } + return 1 + }, + fmtForms("nplurals=2; plural=(n > 1);"): func(n int) int { + if n <= 1 { + return 0 + } + return 1 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n != 0 { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=n==1 ? 0 : n==2 ? 1 : 2;"): func(n int) int { + if n == 1 { + return 0 + } + if n == 2 { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2;"): func(n int) int { + if n == 1 { + return 0 + } + if n == 0 || (n%100 > 0 && n%100 < 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"): func(n int) int { + if n == 1 { + return 0 + } + if n >= 2 && n <= 4 { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"): func(n int) int { + if n == 1 { + return 0 + } + if n >= 2 && n <= 4 { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n == 1 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=4; plural=(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3);"): func(n int) int { + if n%100 == 1 { + return 0 + } + if n%100 == 2 { + return 1 + } + if n%100 == 3 || n%100 == 4 { + return 2 + } + return 3 + }, +} === added file 'src/github.com/chai2010/gettext-go/gettext/plural/formula_test.go' --- src/github.com/chai2010/gettext-go/gettext/plural/formula_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/plural/formula_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,50 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package plural + +import ( + "testing" +) + +func TestFormula(t *testing.T) { + for i, v := range testData { + if out := Formula(v.lang)(v.in); out != v.out { + t.Fatalf("%d/%s: expect = %d, got = %d", i, v.lang, v.out, out) + } + } +} + +var testData = []struct { + lang string + in int + out int +}{ + {"#@", 0, 0}, + {"#@", 1, 0}, + {"#@", 10, 0}, + {"#@", -1, 0}, + + {"zh", 0, 0}, + {"zh", 1, 0}, + {"zh", 10, 0}, + {"zh", -1, 0}, + + {"zh_CN", 0, 0}, + {"zh_CN", 1, 0}, + {"zh_CN", 10, 0}, + {"zh_CN", -1, 0}, + + {"en", 0, 0}, + {"en", 1, 0}, + {"en", 2, 1}, + {"en", 10, 1}, + {"en", -1, 0}, + + {"en_US", 0, 0}, + {"en_US", 1, 0}, + {"en_US", 2, 1}, + {"en_US", 10, 1}, + {"en_US", -1, 0}, +} === added file 'src/github.com/chai2010/gettext-go/gettext/plural/table.go' --- src/github.com/chai2010/gettext-go/gettext/plural/table.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/plural/table.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,55 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package plural + +// FormsTable are standard hard-coded plural rules. +// The application developers and the translators need to understand them. +// +// See GNU's gettext library source code: gettext/gettext-tools/src/plural-table.c +var FormsTable = []struct { + Lang string + Language string + Value string +}{ + {"??", "Unknown", "nplurals=1; plural=0;"}, + {"ja", "Japanese", "nplurals=1; plural=0;"}, + {"vi", "Vietnamese", "nplurals=1; plural=0;"}, + {"ko", "Korean", "nplurals=1; plural=0;"}, + {"en", "English", "nplurals=2; plural=(n != 1);"}, + {"de", "German", "nplurals=2; plural=(n != 1);"}, + {"nl", "Dutch", "nplurals=2; plural=(n != 1);"}, + {"sv", "Swedish", "nplurals=2; plural=(n != 1);"}, + {"da", "Danish", "nplurals=2; plural=(n != 1);"}, + {"no", "Norwegian", "nplurals=2; plural=(n != 1);"}, + {"nb", "Norwegian Bokmal", "nplurals=2; plural=(n != 1);"}, + {"nn", "Norwegian Nynorsk", "nplurals=2; plural=(n != 1);"}, + {"fo", "Faroese", "nplurals=2; plural=(n != 1);"}, + {"es", "Spanish", "nplurals=2; plural=(n != 1);"}, + {"pt", "Portuguese", "nplurals=2; plural=(n != 1);"}, + {"it", "Italian", "nplurals=2; plural=(n != 1);"}, + {"bg", "Bulgarian", "nplurals=2; plural=(n != 1);"}, + {"el", "Greek", "nplurals=2; plural=(n != 1);"}, + {"fi", "Finnish", "nplurals=2; plural=(n != 1);"}, + {"et", "Estonian", "nplurals=2; plural=(n != 1);"}, + {"he", "Hebrew", "nplurals=2; plural=(n != 1);"}, + {"eo", "Esperanto", "nplurals=2; plural=(n != 1);"}, + {"hu", "Hungarian", "nplurals=2; plural=(n != 1);"}, + {"tr", "Turkish", "nplurals=2; plural=(n != 1);"}, + {"pt_BR", "Brazilian", "nplurals=2; plural=(n > 1);"}, + {"fr", "French", "nplurals=2; plural=(n > 1);"}, + {"lv", "Latvian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2);"}, + {"ga", "Irish", "nplurals=3; plural=n==1 ? 0 : n==2 ? 1 : 2;"}, + {"ro", "Romanian", "nplurals=3; plural=n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2;"}, + {"lt", "Lithuanian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"ru", "Russian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"uk", "Ukrainian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"be", "Belarusian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"sr", "Serbian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"hr", "Croatian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"cs", "Czech", "nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"}, + {"sk", "Slovak", "nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"}, + {"pl", "Polish", "nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"sl", "Slovenian", "nplurals=4; plural=(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3);"}, +} === added directory 'src/github.com/chai2010/gettext-go/gettext/po' === added file 'src/github.com/chai2010/gettext-go/gettext/po/comment.go' --- src/github.com/chai2010/gettext-go/gettext/po/comment.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/po/comment.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,270 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +// Comment represents every message's comments. +type Comment struct { + StartLine int // comment start line + TranslatorComment string // # translator-comments // TrimSpace + ExtractedComment string // #. extracted-comments + ReferenceFile []string // #: src/msgcmp.c:338 src/po-lex.c:699 + ReferenceLine []int // #: src/msgcmp.c:338 src/po-lex.c:699 + Flags []string // #, fuzzy,c-format,range:0..10 + PrevMsgContext string // #| msgctxt previous-context + PrevMsgId string // #| msgid previous-untranslated-string +} + +func (p *Comment) less(q *Comment) bool { + if p.StartLine != 0 || q.StartLine != 0 { + return p.StartLine < q.StartLine + } + if a, b := len(p.ReferenceFile), len(q.ReferenceFile); a != b { + return a < b + } + for i := 0; i < len(p.ReferenceFile); i++ { + if a, b := p.ReferenceFile[i], q.ReferenceFile[i]; a != b { + return a < b + } + if a, b := p.ReferenceLine[i], q.ReferenceLine[i]; a != b { + return a < b + } + } + return false +} + +func (p *Comment) readPoComment(r *lineReader) (err error) { + *p = Comment{} + if err = r.skipBlankLine(); err != nil { + return err + } + defer func(oldPos int) { + newPos := r.currentPos() + if newPos != oldPos && err == io.EOF { + err = nil + } + }(r.currentPos()) + + p.StartLine = r.currentPos() + 1 + for { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if len(s) == 0 || s[0] != '#' { + return + } + + if err = p.readTranslatorComment(r); err != nil { + return + } + if err = p.readExtractedComment(r); err != nil { + return + } + if err = p.readReferenceComment(r); err != nil { + return + } + if err = p.readFlagsComment(r); err != nil { + return + } + if err = p.readPrevMsgContext(r); err != nil { + return + } + if err = p.readPrevMsgId(r); err != nil { + return + } + } +} + +func (p *Comment) readTranslatorComment(r *lineReader) (err error) { + const prefix = "# " // .,:| + for { + var s string + if s, _, err = r.readLine(); err != nil { + return err + } + if len(s) < 1 || s[0] != '#' { + r.unreadLine() + return nil + } + if len(s) >= 2 { + switch s[1] { + case '.', ',', ':', '|': + r.unreadLine() + return nil + } + } + if p.TranslatorComment != "" { + p.TranslatorComment += "\n" + } + p.TranslatorComment += strings.TrimSpace(s[1:]) + } +} + +func (p *Comment) readExtractedComment(r *lineReader) (err error) { + const prefix = "#." + for { + var s string + if s, _, err = r.readLine(); err != nil { + return err + } + if len(s) < len(prefix) || s[:len(prefix)] != prefix { + r.unreadLine() + return nil + } + if p.ExtractedComment != "" { + p.ExtractedComment += "\n" + } + p.ExtractedComment += strings.TrimSpace(s[len(prefix):]) + } +} + +func (p *Comment) readReferenceComment(r *lineReader) (err error) { + const prefix = "#:" + for { + var s string + if s, _, err = r.readLine(); err != nil { + return err + } + if len(s) < len(prefix) || s[:len(prefix)] != prefix { + r.unreadLine() + return nil + } + ss := strings.Split(strings.TrimSpace(s[len(prefix):]), " ") + for i := 0; i < len(ss); i++ { + idx := strings.Index(ss[i], ":") + if idx <= 0 { + continue + } + name := strings.TrimSpace(ss[i][:idx]) + line, _ := strconv.Atoi(strings.TrimSpace(ss[i][idx+1:])) + p.ReferenceFile = append(p.ReferenceFile, name) + p.ReferenceLine = append(p.ReferenceLine, line) + } + } +} + +func (p *Comment) readFlagsComment(r *lineReader) (err error) { + const prefix = "#," + for { + var s string + if s, _, err = r.readLine(); err != nil { + return err + } + if len(s) < len(prefix) || s[:len(prefix)] != prefix { + r.unreadLine() + return nil + } + ss := strings.Split(strings.TrimSpace(s[len(prefix):]), ",") + for i := 0; i < len(ss); i++ { + p.Flags = append(p.Flags, strings.TrimSpace(ss[i])) + } + } +} + +func (p *Comment) readPrevMsgContext(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !rePrevMsgContextComments.MatchString(s) { + return + } + p.PrevMsgContext, err = p.readString(r) + return +} + +func (p *Comment) readPrevMsgId(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !rePrevMsgIdComments.MatchString(s) { + return + } + p.PrevMsgId, err = p.readString(r) + return +} + +func (p *Comment) readString(r *lineReader) (msg string, err error) { + var s string + if s, _, err = r.readLine(); err != nil { + return + } + msg += decodePoString(s) + for { + if s, _, err = r.readLine(); err != nil { + return + } + if !reStringLineComments.MatchString(s) { + r.unreadLine() + break + } + msg += decodePoString(s) + } + return +} + +// GetFuzzy gets the fuzzy flag. +func (p *Comment) GetFuzzy() bool { + for _, s := range p.Flags { + if s == "fuzzy" { + return true + } + } + return false +} + +// SetFuzzy sets the fuzzy flag. +func (p *Comment) SetFuzzy(fuzzy bool) { + // +} + +// String returns the po format comment string. +func (p Comment) String() string { + var buf bytes.Buffer + if p.TranslatorComment != "" { + ss := strings.Split(p.TranslatorComment, "\n") + for i := 0; i < len(ss); i++ { + fmt.Fprintf(&buf, "# %s\n", ss[i]) + } + } + if p.ExtractedComment != "" { + ss := strings.Split(p.ExtractedComment, "\n") + for i := 0; i < len(ss); i++ { + fmt.Fprintf(&buf, "#. %s\n", ss[i]) + } + } + if a, b := len(p.ReferenceFile), len(p.ReferenceLine); a != 0 && a == b { + fmt.Fprintf(&buf, "#:") + for i := 0; i < len(p.ReferenceFile); i++ { + fmt.Fprintf(&buf, " %s:%d", p.ReferenceFile[i], p.ReferenceLine[i]) + } + fmt.Fprintf(&buf, "\n") + } + if len(p.Flags) != 0 { + fmt.Fprintf(&buf, "#, %s", p.Flags[0]) + for i := 1; i < len(p.Flags); i++ { + fmt.Fprintf(&buf, ", %s", p.Flags[i]) + } + fmt.Fprintf(&buf, "\n") + } + if p.PrevMsgContext != "" { + s := encodeCommentPoString(p.PrevMsgContext) + fmt.Fprintf(&buf, "#| msgctxt %s\n", s) + } + if p.PrevMsgId != "" { + s := encodeCommentPoString(p.PrevMsgId) + fmt.Fprintf(&buf, "#| msgid %s\n", s) + } + return buf.String() +} === added file 'src/github.com/chai2010/gettext-go/gettext/po/comment_test.go' --- src/github.com/chai2010/gettext-go/gettext/po/comment_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/po/comment_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,207 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "reflect" + "testing" +) + +func TestPoComment(t *testing.T) { + var x Comment + for i := 0; i < len(testPoComments); i++ { + if i != 2 { + continue + } + err := x.readPoComment(newLineReader(testPoComments[i].Data)) + if err != nil { + t.Fatalf("%d: %v", i, err) + } + x.StartLine = 0 // ingore comment line + if !reflect.DeepEqual(&x, &testPoComments[i].PoComment) { + t.Logf("expect(%d):\n", i) + t.Logf("\n%v\n", &testPoComments[i].PoComment) + t.Logf("got(%d):\n", i) + t.Logf("\n%v\n", &x) + t.FailNow() + } + if testPoComments[i].CheckStringer { + s := testPoComments[i].PoComment.String() + if s != testPoComments[i].Data { + t.Logf("expect(%d):\n", i) + t.Logf("\n%s\n", testPoComments[i].Data) + t.Logf("got(%d):\n", i) + t.Logf("\n%s\n", testPoComments[i].PoComment.String()) + t.FailNow() + } + } + } +} + +type testPoComment struct { + CheckStringer bool + Data string + PoComment Comment +} + +var testPoComments = []testPoComment{ + + // -------------------------------------------------------------- + // CheckStringer: true + // -------------------------------------------------------------- + + testPoComment{ + CheckStringer: true, + Data: `# translator comments +`, + PoComment: Comment{ + TranslatorComment: `translator comments`, + }, + }, + testPoComment{ + CheckStringer: true, + Data: `# translator comments +`, + PoComment: Comment{ + TranslatorComment: `translator comments`, + }, + }, + + testPoComment{ + CheckStringer: true, + Data: `# translator-comments +# bad comment +#. extracted-comments +#: src/msgcmp.c:338 src/po-lex.c:699 src/msg.c:123 +#, fuzzy, c-format, range:0..10 +#| msgctxt "" +#| "previous-context1\n" +#| "previous-context2" +#| msgid "" +#| "previous-untranslated-string1\n" +#| "previous-untranslated-string2" +`, + PoComment: Comment{ + TranslatorComment: "translator-comments\nbad comment", + ExtractedComment: "extracted-comments", + ReferenceFile: []string{"src/msgcmp.c", "src/po-lex.c", "src/msg.c"}, + ReferenceLine: []int{338, 699, 123}, + Flags: []string{"fuzzy", "c-format", "range:0..10"}, + PrevMsgContext: "previous-context1\nprevious-context2", + PrevMsgId: "previous-untranslated-string1\nprevious-untranslated-string2", + }, + }, + + // -------------------------------------------------------------- + // CheckStringer: false + // -------------------------------------------------------------- + + testPoComment{ + CheckStringer: false, + Data: ` +# translator-comments +#bad comment +#. extracted-comments +#: src/msgcmp.c:338 src/po-lex.c:699 +#: src/msg.c:123 +#, fuzzy,c-format,range:0..10 +#| msgctxt "" +#| "previous-context1\n" +#| "previous-context2" +#| msgid "" +#| "previous-untranslated-string1\n" +#| "previous-untranslated-string2" +`, + PoComment: Comment{ + TranslatorComment: "translator-comments\nbad comment", + ExtractedComment: "extracted-comments", + ReferenceFile: []string{"src/msgcmp.c", "src/po-lex.c", "src/msg.c"}, + ReferenceLine: []int{338, 699, 123}, + Flags: []string{"fuzzy", "c-format", "range:0..10"}, + PrevMsgContext: "previous-context1\nprevious-context2", + PrevMsgId: "previous-untranslated-string1\nprevious-untranslated-string2", + }, + }, + testPoComment{ + CheckStringer: false, + Data: ` +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: Poedit 1.5\n" +"Report-Msgid-Bugs-To: poedit@googlegroups.com\n" +"POT-Creation-Date: 2012-07-30 10:34+0200\n" +"PO-Revision-Date: 2013-12-25 09:32+0800\n" +"Last-Translator: chai2010 \n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Poedit 1.5.7\n" +`, + PoComment: Comment{ + TranslatorComment: `SOME DESCRIPTIVE TITLE. +Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +This file is distributed under the same license as the PACKAGE package. +FIRST AUTHOR , YEAR. +`, + }, + }, + testPoComment{ + CheckStringer: false, + Data: ` +#. TRANSLATORS: This is version information in about dialog, it is followed +#. by version number when used (wxWidgets 2.8) +#: ../src/edframe.cpp:2431 +#| msgctxt "previous-context asdasd" +"asdad \n asdsad" +msgstr "" +`, + PoComment: Comment{ + ExtractedComment: `TRANSLATORS: This is version information in about dialog, it is followed +by version number when used (wxWidgets 2.8)`, + ReferenceFile: []string{"../src/edframe.cpp"}, + ReferenceLine: []int{2431}, + PrevMsgContext: "previous-context asdasd", + }, + }, + testPoComment{ + CheckStringer: false, + Data: ` +#: tst-gettext2.c:33 +msgid "First string for testing." +msgstr "Lang1: 1st string" +`, + PoComment: Comment{ + ReferenceFile: []string{"tst-gettext2.c"}, + ReferenceLine: []int{33}, + }, + }, + testPoComment{ + CheckStringer: false, + Data: ` +#: app/app_procs.c:307 +#, fuzzy, c-format +msgid "Can't find output format %s\n" +msgstr "" +"敲矾弊牢 '%s'甫 佬阑è ç»åš¼èªä¿ƒ\n" +"%s" +`, + PoComment: Comment{ + ReferenceFile: []string{"app/app_procs.c"}, + ReferenceLine: []int{307}, + Flags: []string{"fuzzy", "c-format"}, + }, + }, + + // -------------------------------------------------------------- + // END + // -------------------------------------------------------------- +} === added file 'src/github.com/chai2010/gettext-go/gettext/po/doc.go' --- src/github.com/chai2010/gettext-go/gettext/po/doc.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/po/doc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,24 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package po provides support for reading and writing GNU PO file. + +Examples: + import ( + "github.com/chai2010/gettext-go/gettext/po" + ) + + func main() { + poFile, err := po.Load("test.po") + if err != nil { + log.Fatal(err) + } + fmt.Printf("%v", poFile) + } + +The GNU PO file specification is at +http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html. +*/ +package po === added file 'src/github.com/chai2010/gettext-go/gettext/po/file.go' --- src/github.com/chai2010/gettext-go/gettext/po/file.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/po/file.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,75 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "sort" +) + +// File represents an PO File. +// +// See http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html +type File struct { + MimeHeader Header + Messages []Message +} + +// Load loads a named po file. +func Load(name string) (*File, error) { + data, err := ioutil.ReadFile(name) + if err != nil { + return nil, err + } + return LoadData(data) +} + +// LoadData loads po file format data. +func LoadData(data []byte) (*File, error) { + r := newLineReader(string(data)) + var file File + for { + var msg Message + if err := msg.readPoEntry(r); err != nil { + if err == io.EOF { + return &file, nil + } + return nil, err + } + if msg.MsgId == "" { + file.MimeHeader.parseHeader(&msg) + continue + } + file.Messages = append(file.Messages, msg) + } +} + +// Save saves a po file. +func (f *File) Save(name string) error { + return ioutil.WriteFile(name, []byte(f.String()), 0666) +} + +// Save returns a po file format data. +func (f *File) Data() []byte { + // sort the massge as ReferenceFile/ReferenceLine field + var messages []Message + messages = append(messages, f.Messages...) + sort.Sort(byMessages(messages)) + + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s\n", f.MimeHeader.String()) + for i := 0; i < len(messages); i++ { + fmt.Fprintf(&buf, "%s\n", messages[i].String()) + } + return buf.Bytes() +} + +// String returns the po format file string. +func (f *File) String() string { + return string(f.Data()) +} === added file 'src/github.com/chai2010/gettext-go/gettext/po/file_test.go' --- src/github.com/chai2010/gettext-go/gettext/po/file_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/po/file_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,13 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "testing" +) + +func TestPoFile(t *testing.T) { + // +} === added file 'src/github.com/chai2010/gettext-go/gettext/po/header.go' --- src/github.com/chai2010/gettext-go/gettext/po/header.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/po/header.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,106 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "fmt" + "strings" +) + +// Header is the initial comments "SOME DESCRIPTIVE TITLE", "YEAR" +// and "FIRST AUTHOR , YEAR" ought to be replaced by sensible information. +// +// See http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html#Header-Entry +type Header struct { + Comment // Header Comments + ProjectIdVersion string // Project-Id-Version: PACKAGE VERSION + ReportMsgidBugsTo string // Report-Msgid-Bugs-To: FIRST AUTHOR + POTCreationDate string // POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE + PORevisionDate string // PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE + LastTranslator string // Last-Translator: FIRST AUTHOR + LanguageTeam string // Language-Team: golang-china + Language string // Language: zh_CN + MimeVersion string // MIME-Version: 1.0 + ContentType string // Content-Type: text/plain; charset=UTF-8 + ContentTransferEncoding string // Content-Transfer-Encoding: 8bit + PluralForms string // Plural-Forms: nplurals=2; plural=n == 1 ? 0 : 1; + XGenerator string // X-Generator: Poedit 1.5.5 + UnknowFields map[string]string +} + +func (p *Header) parseHeader(msg *Message) { + if msg.MsgId != "" || msg.MsgStr == "" { + return + } + lines := strings.Split(msg.MsgStr, "\n") + for i := 0; i < len(lines); i++ { + idx := strings.Index(lines[i], ":") + if idx < 0 { + continue + } + key := strings.TrimSpace(lines[i][:idx]) + val := strings.TrimSpace(lines[i][idx+1:]) + switch strings.ToUpper(key) { + case strings.ToUpper("Project-Id-Version"): + p.ProjectIdVersion = val + case strings.ToUpper("Report-Msgid-Bugs-To"): + p.ReportMsgidBugsTo = val + case strings.ToUpper("POT-Creation-Date"): + p.POTCreationDate = val + case strings.ToUpper("PO-Revision-Date"): + p.PORevisionDate = val + case strings.ToUpper("Last-Translator"): + p.LastTranslator = val + case strings.ToUpper("Language-Team"): + p.LanguageTeam = val + case strings.ToUpper("Language"): + p.Language = val + case strings.ToUpper("MIME-Version"): + p.MimeVersion = val + case strings.ToUpper("Content-Type"): + p.ContentType = val + case strings.ToUpper("Content-Transfer-Encoding"): + p.ContentTransferEncoding = val + case strings.ToUpper("Plural-Forms"): + p.PluralForms = val + case strings.ToUpper("X-Generator"): + p.XGenerator = val + default: + if p.UnknowFields == nil { + p.UnknowFields = make(map[string]string) + } + p.UnknowFields[key] = val + } + } + p.Comment = msg.Comment +} + +// String returns the po format header string. +func (p Header) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s", p.Comment.String()) + fmt.Fprintf(&buf, `msgid ""`+"\n") + fmt.Fprintf(&buf, `msgstr ""`+"\n") + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Project-Id-Version", p.ProjectIdVersion) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Report-Msgid-Bugs-To", p.ReportMsgidBugsTo) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "POT-Creation-Date", p.POTCreationDate) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "PO-Revision-Date", p.PORevisionDate) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Last-Translator", p.LastTranslator) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language-Team", p.LanguageTeam) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language", p.Language) + if p.MimeVersion != "" { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "MIME-Version", p.MimeVersion) + } + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Type", p.ContentType) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Transfer-Encoding", p.ContentTransferEncoding) + if p.XGenerator != "" { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "X-Generator", p.XGenerator) + } + for k, v := range p.UnknowFields { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", k, v) + } + return buf.String() +} === added file 'src/github.com/chai2010/gettext-go/gettext/po/header_test.go' --- src/github.com/chai2010/gettext-go/gettext/po/header_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/po/header_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,13 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "testing" +) + +func TestHeader(t *testing.T) { + // +} === added file 'src/github.com/chai2010/gettext-go/gettext/po/line_reader.go' --- src/github.com/chai2010/gettext-go/gettext/po/line_reader.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/po/line_reader.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,62 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "io" + "strings" +) + +type lineReader struct { + lines []string + pos int +} + +func newLineReader(data string) *lineReader { + data = strings.Replace(data, "\r", "", -1) + lines := strings.Split(data, "\n") + return &lineReader{lines: lines} +} + +func (r *lineReader) skipBlankLine() error { + for ; r.pos < len(r.lines); r.pos++ { + if strings.TrimSpace(r.lines[r.pos]) != "" { + break + } + } + if r.pos >= len(r.lines) { + return io.EOF + } + return nil +} + +func (r *lineReader) currentPos() int { + return r.pos +} + +func (r *lineReader) currentLine() (s string, pos int, err error) { + if r.pos >= len(r.lines) { + err = io.EOF + return + } + s, pos = r.lines[r.pos], r.pos + return +} + +func (r *lineReader) readLine() (s string, pos int, err error) { + if r.pos >= len(r.lines) { + err = io.EOF + return + } + s, pos = r.lines[r.pos], r.pos + r.pos++ + return +} + +func (r *lineReader) unreadLine() { + if r.pos >= 0 { + r.pos-- + } +} === added file 'src/github.com/chai2010/gettext-go/gettext/po/message.go' --- src/github.com/chai2010/gettext-go/gettext/po/message.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/po/message.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,189 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +// A PO file is made up of many entries, +// each entry holding the relation between an original untranslated string +// and its corresponding translation. +// +// See http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html +type Message struct { + Comment // Coments + MsgContext string // msgctxt context + MsgId string // msgid untranslated-string + MsgIdPlural string // msgid_plural untranslated-string-plural + MsgStr string // msgstr translated-string + MsgStrPlural []string // msgstr[0] translated-string-case-0 +} + +type byMessages []Message + +func (d byMessages) Len() int { + return len(d) +} +func (d byMessages) Less(i, j int) bool { + if d[i].Comment.less(&d[j].Comment) { + return true + } + if a, b := d[i].MsgContext, d[j].MsgContext; a != b { + return a < b + } + if a, b := d[i].MsgId, d[j].MsgId; a != b { + return a < b + } + if a, b := d[i].MsgIdPlural, d[j].MsgIdPlural; a != b { + return a < b + } + return false +} +func (d byMessages) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} + +func (p *Message) readPoEntry(r *lineReader) (err error) { + *p = Message{} + if err = r.skipBlankLine(); err != nil { + return + } + defer func(oldPos int) { + newPos := r.currentPos() + if newPos != oldPos && err == io.EOF { + err = nil + } + }(r.currentPos()) + + if err = p.Comment.readPoComment(r); err != nil { + return + } + for { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + + if p.isInvalidLine(s) { + err = fmt.Errorf("gettext: line %d, %v", r.currentPos(), "invalid line") + return + } + if reComment.MatchString(s) || reBlankLine.MatchString(s) { + return + } + + if err = p.readMsgContext(r); err != nil { + return + } + if err = p.readMsgId(r); err != nil { + return + } + if err = p.readMsgIdPlural(r); err != nil { + return + } + if err = p.readMsgStrOrPlural(r); err != nil { + return + } + } +} + +func (p *Message) readMsgContext(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !reMsgContext.MatchString(s) { + return + } + p.MsgContext, err = p.readString(r) + return +} + +func (p *Message) readMsgId(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !reMsgId.MatchString(s) { + return + } + p.MsgId, err = p.readString(r) + return +} + +func (p *Message) readMsgIdPlural(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !reMsgIdPlural.MatchString(s) { + return + } + p.MsgIdPlural, err = p.readString(r) + return nil +} + +func (p *Message) readMsgStrOrPlural(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !reMsgStr.MatchString(s) && !reMsgStrPlural.MatchString(s) { + return + } + if reMsgStrPlural.MatchString(s) { + left, right := strings.Index(s, `[`), strings.LastIndex(s, `]`) + idx, _ := strconv.Atoi(s[left+1 : right]) + s, err = p.readString(r) + if n := len(p.MsgStrPlural); (idx + 1) > n { + p.MsgStrPlural = append(p.MsgStrPlural, make([]string, (idx+1)-n)...) + } + p.MsgStrPlural[idx] = s + } else { + p.MsgStr, err = p.readString(r) + } + return nil +} + +func (p *Message) readString(r *lineReader) (msg string, err error) { + var s string + if s, _, err = r.readLine(); err != nil { + return + } + msg += decodePoString(s) + for { + if s, _, err = r.readLine(); err != nil { + return + } + if !reStringLine.MatchString(s) { + r.unreadLine() + break + } + msg += decodePoString(s) + } + return +} + +// String returns the po format entry string. +func (p Message) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s", p.Comment.String()) + fmt.Fprintf(&buf, "msgid %s", encodePoString(p.MsgId)) + if p.MsgIdPlural != "" { + fmt.Fprintf(&buf, "msgid_plural %s", encodePoString(p.MsgIdPlural)) + } + if p.MsgStr != "" { + fmt.Fprintf(&buf, "msgstr %s", encodePoString(p.MsgStr)) + } + for i := 0; i < len(p.MsgStrPlural); i++ { + fmt.Fprintf(&buf, "msgstr[%d] %s", i, encodePoString(p.MsgStrPlural[i])) + } + return buf.String() +} === added file 'src/github.com/chai2010/gettext-go/gettext/po/message_test.go' --- src/github.com/chai2010/gettext-go/gettext/po/message_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/po/message_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,75 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "reflect" + "testing" +) + +func _TestPoEntry(t *testing.T) { + if len(testPoEntrys) != len(testPoEntryStrings) { + t.Fatalf("bad test") + } + var entry Message + for i := 0; i < len(testPoEntrys); i++ { + if err := entry.readPoEntry(newLineReader(testPoEntryStrings[i])); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(&entry, &testPoEntrys[i]) { + t.Fatalf("%d: expect = %v, got = %v", i, testPoEntrys[i], entry) + } + } +} + +var testPoEntryStrings = []string{ + ` +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: 项目å称\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-12-12 20:03+0000\n" +"PO-Revision-Date: 2013-12-02 17:05+0800\n" +"Last-Translator: chai2010 \n" +"Language-Team: chai2010(团队) \n" +"Language: 中文\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.5.7\n" +"X-Poedit-SourceCharset: UTF-8\n" +`, +} + +var testPoEntrys = []Message{ + Message{ + Comment: Comment{ + TranslatorComment: `SOME DESCRIPTIVE TITLE. +Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +This file is distributed under the same license as the PACKAGE package. +FIRST AUTHOR , YEAR. +`, + }, + MsgStr: ` +Project-Id-Version: 项目å称 +Report-Msgid-Bugs-To: +POT-Creation-Date: 2011-12-12 20:03+0000 +PO-Revision-Date: 2013-12-02 17:05+0800 +Last-Translator: chai2010 +Language-Team: chai2010(团队) +Language: 中文 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +X-Generator: Poedit 1.5.7 +X-Poedit-SourceCharset: UTF-8 +`, + }, +} === added file 'src/github.com/chai2010/gettext-go/gettext/po/poedit_test.go' --- src/github.com/chai2010/gettext-go/gettext/po/poedit_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/po/poedit_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,35 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "reflect" + "testing" +) + +var ( + testPoEditPoFile = "../testdata/poedit-1.5.7-zh_CN.po" + testPoEditMoFile = "../testdata/poedit-1.5.7-zh_CN.mo" +) + +func _TestPoEditPoFile(t *testing.T) { + po, err := Load(testPoEditPoFile) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(&po.MimeHeader, &poEditFile.MimeHeader) { + t.Fatalf("expect = %v, got = %v", &poEditFile.MimeHeader, &po.MimeHeader) + } + if len(po.Messages) != len(poEditFile.Messages) { + t.Fatal("size not equal") + } + for k, v0 := range po.Messages { + if v1 := poEditFile.Messages[k]; !reflect.DeepEqual(&v0, &v1) { + t.Fatalf("%d: expect = %v, got = %v", k, v1, v0) + } + } +} + +var poEditFile = &File{} === added file 'src/github.com/chai2010/gettext-go/gettext/po/re.go' --- src/github.com/chai2010/gettext-go/gettext/po/re.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/po/re.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,58 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "regexp" +) + +var ( + reComment = regexp.MustCompile(`^#`) // # + reExtractedComments = regexp.MustCompile(`^#\.`) // #. + reReferenceComments = regexp.MustCompile(`^#:`) // #: + reFlagsComments = regexp.MustCompile(`^#,`) // #, fuzzy,c-format + rePrevMsgContextComments = regexp.MustCompile(`^#\|\s+msgctxt`) // #| msgctxt + rePrevMsgIdComments = regexp.MustCompile(`^#\|\s+msgid`) // #| msgid + reStringLineComments = regexp.MustCompile(`^#\|\s+".*"\s*$`) // #| "message" + + reMsgContext = regexp.MustCompile(`^msgctxt\s+".*"\s*$`) // msgctxt + reMsgId = regexp.MustCompile(`^msgid\s+".*"\s*$`) // msgid + reMsgIdPlural = regexp.MustCompile(`^msgid_plural\s+".*"\s*$`) // msgid_plural + reMsgStr = regexp.MustCompile(`^msgstr\s*".*"\s*$`) // msgstr + reMsgStrPlural = regexp.MustCompile(`^msgstr\s*(\[\d+\])\s*".*"\s*$`) // msgstr[0] + reStringLine = regexp.MustCompile(`^\s*".*"\s*$`) // "message" + reBlankLine = regexp.MustCompile(`^\s*$`) // +) + +func (p *Message) isInvalidLine(s string) bool { + if reComment.MatchString(s) { + return false + } + if reBlankLine.MatchString(s) { + return false + } + + if reMsgContext.MatchString(s) { + return false + } + if reMsgId.MatchString(s) { + return false + } + if reMsgIdPlural.MatchString(s) { + return false + } + if reMsgStr.MatchString(s) { + return false + } + if reMsgStrPlural.MatchString(s) { + return false + } + + if reStringLine.MatchString(s) { + return false + } + + return true +} === added file 'src/github.com/chai2010/gettext-go/gettext/po/util.go' --- src/github.com/chai2010/gettext-go/gettext/po/util.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/po/util.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,110 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "strings" +) + +func decodePoString(text string) string { + lines := strings.Split(text, "\n") + for i := 0; i < len(lines); i++ { + left := strings.Index(lines[i], `"`) + right := strings.LastIndex(lines[i], `"`) + if left < 0 || right < 0 || left == right { + lines[i] = "" + continue + } + line := lines[i][left+1 : right] + data := make([]byte, 0, len(line)) + for i := 0; i < len(line); i++ { + if line[i] != '\\' { + data = append(data, line[i]) + continue + } + if i+1 >= len(line) { + break + } + switch line[i+1] { + case 'n': // \\n -> \n + data = append(data, '\n') + i++ + case 't': // \\t -> \n + data = append(data, '\t') + i++ + case '\\': // \\\ -> ? + data = append(data, '\\') + i++ + } + } + lines[i] = string(data) + } + return strings.Join(lines, "") +} + +func encodePoString(text string) string { + var buf bytes.Buffer + lines := strings.Split(text, "\n") + for i := 0; i < len(lines); i++ { + if lines[i] == "" { + if i != len(lines)-1 { + buf.WriteString(`"\n"` + "\n") + } + continue + } + buf.WriteRune('"') + for _, r := range lines[i] { + switch r { + case '\\': + buf.WriteString(`\\`) + case '"': + buf.WriteString(`\"`) + case '\n': + buf.WriteString(`\n`) + case '\t': + buf.WriteString(`\t`) + default: + buf.WriteRune(r) + } + } + buf.WriteString(`\n"` + "\n") + } + return buf.String() +} + +func encodeCommentPoString(text string) string { + var buf bytes.Buffer + lines := strings.Split(text, "\n") + if len(lines) > 1 { + buf.WriteString(`""` + "\n") + } + for i := 0; i < len(lines); i++ { + if len(lines) > 0 { + buf.WriteString("#| ") + } + buf.WriteRune('"') + for _, r := range lines[i] { + switch r { + case '\\': + buf.WriteString(`\\`) + case '"': + buf.WriteString(`\"`) + case '\n': + buf.WriteString(`\n`) + case '\t': + buf.WriteString(`\t`) + default: + buf.WriteRune(r) + } + } + if i < len(lines)-1 { + buf.WriteString(`\n"` + "\n") + } else { + buf.WriteString(`"`) + } + } + return buf.String() +} === added file 'src/github.com/chai2010/gettext-go/gettext/po/util_test.go' --- src/github.com/chai2010/gettext-go/gettext/po/util_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/po/util_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,68 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "testing" +) + +func TestDecodePoString(t *testing.T) { + if s := decodePoString(poStrEncode); s != poStrDecode { + t.Fatalf(`expect = %s got = %s`, poStrDecode, s) + } +} + +func TestEncodePoString(t *testing.T) { + if s := encodePoString(poStrDecode); s != poStrEncodeStd { + t.Fatalf(`expect = %s; got = %s`, poStrEncodeStd, s) + } +} + +const poStrEncode = `# noise +123456789 +"Project-Id-Version: Poedit 1.5\n" +"Report-Msgid-Bugs-To: poedit@googlegroups.com\n" +"POT-Creation-Date: 2012-07-30 10:34+0200\n" +"PO-Revision-Date: 2013-02-24 21:00+0800\n" +"Last-Translator: Christopher Meng \n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Poedit 1.5.5\n" +"TestPoString: abc" +"123\n" +>> +123456??? +` + +const poStrEncodeStd = `"Project-Id-Version: Poedit 1.5\n" +"Report-Msgid-Bugs-To: poedit@googlegroups.com\n" +"POT-Creation-Date: 2012-07-30 10:34+0200\n" +"PO-Revision-Date: 2013-02-24 21:00+0800\n" +"Last-Translator: Christopher Meng \n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Poedit 1.5.5\n" +"TestPoString: abc123\n" +` + +const poStrDecode = `Project-Id-Version: Poedit 1.5 +Report-Msgid-Bugs-To: poedit@googlegroups.com +POT-Creation-Date: 2012-07-30 10:34+0200 +PO-Revision-Date: 2013-02-24 21:00+0800 +Last-Translator: Christopher Meng +Language-Team: +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Plural-Forms: nplurals=1; plural=0; +X-Generator: Poedit 1.5.5 +TestPoString: abc123 +` === added file 'src/github.com/chai2010/gettext-go/gettext/testdata_test.go' --- src/github.com/chai2010/gettext-go/gettext/testdata_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/testdata_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,62 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "reflect" + "testing" +) + +var testDataDir = "../testdata/" + +var testPoMoFiles = []struct { + poFile string + moFile string +}{ + {"gettext-3-1.po", "gettext-3-1.mo"}, + {"gettext-4.po", "gettext-4.mo"}, + {"gettext-5.po", "gettext-5.mo"}, + {"gettext-6-1.po", "gettext-6-1.mo"}, + {"gettext-6-2.po", "gettext-6-2.mo"}, + {"gettext-7.po", "gettext-7.mo"}, + {"gettextpo-1.de.po", "gettextpo-1.de.mo"}, + {"mm-ko-comp.euc-kr.po", "mm-ko-comp.euc-kr.mo"}, + {"mm-ko.euc-kr.po", "mm-ko.euc-kr.mo"}, + {"mm-viet.comp.po", "mm-viet.comp.mo"}, + {"poedit-1.5.7-zh_CN.po", "poedit-1.5.7-zh_CN.mo"}, + {"qttest2_de.po", "qttest2_de.mo"}, + {"qttest_pl.po", "qttest_pl.mo"}, + {"test.po", "test.mo"}, +} + +func TestPoMoFiles(t *testing.T) { + for i := 0; i < len(testPoMoFiles); i++ { + poName := testPoMoFiles[i].poFile + moName := testPoMoFiles[i].moFile + po, err := newPoTranslator(testDataDir+poName, nil) + if err != nil { + t.Fatalf("%s: %v", poName, err) + } + mo, err := newMoTranslator(testDataDir+moName, nil) + if err != nil { + t.Fatalf("%s: %v", poName, err) + } + // if no translate, the mo will drop the message. + // so len(mo) may less than len(po). + if a, b := len(po.MessageMap), len(mo.MessageMap); a != b { + t.Logf("%s: %v, %d != %d", poName, "size not equal", a, b) + } + for k, v0 := range po.MessageMap { + v1, ok := mo.MessageMap[k] + if !ok { + t.Logf("%s: %q: missing", poName, v0.MsgId) + continue + } + if !reflect.DeepEqual(&v0, &v1) { + t.Fatalf("%s: %q: expect = %v, got = %v", poName, v0.MsgId, v0, v1) + } + } + } +} === added file 'src/github.com/chai2010/gettext-go/gettext/tr.go' --- src/github.com/chai2010/gettext-go/gettext/tr.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/tr.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,128 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "github.com/chai2010/gettext-go/gettext/mo" + "github.com/chai2010/gettext-go/gettext/plural" + "github.com/chai2010/gettext-go/gettext/po" +) + +var nilTranslator = &translator{ + MessageMap: make(map[string]mo.Message), + PluralFormula: plural.Formula("??"), +} + +type translator struct { + MessageMap map[string]mo.Message + PluralFormula func(n int) int +} + +func newMoTranslator(name string, data []byte) (*translator, error) { + var ( + f *mo.File + err error + ) + if len(data) != 0 { + f, err = mo.LoadData(data) + } else { + f, err = mo.Load(name) + } + if err != nil { + return nil, err + } + var tr = &translator{ + MessageMap: make(map[string]mo.Message), + } + for _, v := range f.Messages { + tr.MessageMap[tr.makeMapKey(v.MsgContext, v.MsgId)] = v + } + if lang := f.MimeHeader.Language; lang != "" { + tr.PluralFormula = plural.Formula(lang) + } else { + tr.PluralFormula = plural.Formula("??") + } + return tr, nil +} + +func newPoTranslator(name string, data []byte) (*translator, error) { + var ( + f *po.File + err error + ) + if len(data) != 0 { + f, err = po.LoadData(data) + } else { + f, err = po.Load(name) + } + if err != nil { + return nil, err + } + var tr = &translator{ + MessageMap: make(map[string]mo.Message), + } + for _, v := range f.Messages { + tr.MessageMap[tr.makeMapKey(v.MsgContext, v.MsgId)] = mo.Message{ + MsgContext: v.MsgContext, + MsgId: v.MsgId, + MsgIdPlural: v.MsgIdPlural, + MsgStr: v.MsgStr, + MsgStrPlural: v.MsgStrPlural, + } + } + if lang := f.MimeHeader.Language; lang != "" { + tr.PluralFormula = plural.Formula(lang) + } else { + tr.PluralFormula = plural.Formula("??") + } + return tr, nil +} + +func (p *translator) PGettext(msgctxt, msgid string) string { + return p.PNGettext(msgctxt, msgid, "", 0) +} + +func (p *translator) PNGettext(msgctxt, msgid, msgidPlural string, n int) string { + n = p.PluralFormula(n) + if ss := p.findMsgStrPlural(msgctxt, msgid, msgidPlural); len(ss) != 0 { + if n >= len(ss) { + n = len(ss) - 1 + } + if ss[n] != "" { + return ss[n] + } + } + if msgidPlural != "" && n > 0 { + return msgidPlural + } + return msgid +} + +func (p *translator) findMsgStrPlural(msgctxt, msgid, msgidPlural string) []string { + key := p.makeMapKey(msgctxt, msgid) + if v, ok := p.MessageMap[key]; ok { + if len(v.MsgIdPlural) != 0 { + if len(v.MsgStrPlural) != 0 { + return v.MsgStrPlural + } else { + return nil + } + } else { + if len(v.MsgStr) != 0 { + return []string{v.MsgStr} + } else { + return nil + } + } + } + return nil +} + +func (p *translator) makeMapKey(msgctxt, msgid string) string { + if msgctxt != "" { + return msgctxt + mo.EotSeparator + msgid + } + return msgid +} === added file 'src/github.com/chai2010/gettext-go/gettext/tr_test.go' --- src/github.com/chai2010/gettext-go/gettext/tr_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/gettext/tr_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,100 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "testing" + + "github.com/chai2010/gettext-go/gettext/mo" + "github.com/chai2010/gettext-go/gettext/po" +) + +func TestTranslator_Po(t *testing.T) { + tr, err := newPoTranslator("test", []byte(testTrPoData)) + if err != nil { + t.Fatal(err) + } + for _, v := range testTrData { + if out := tr.PGettext(v.msgctxt, v.msgid); out != v.msgstr { + t.Fatalf("%s/%s: expect = %s, got = %s", v.msgctxt, v.msgid, v.msgstr, out) + } + } +} + +func TestTranslator_Mo(t *testing.T) { + tr, err := newMoTranslator("test", poToMoData(t, []byte(testTrPoData))) + if err != nil { + t.Fatal(err) + } + for _, v := range testTrData { + if out := tr.PGettext(v.msgctxt, v.msgid); out != v.msgstr { + t.Fatalf("%s/%s: expect = %s, got = %s", v.msgctxt, v.msgid, v.msgstr, out) + } + break + } +} + +func poToMoData(t *testing.T, data []byte) []byte { + poFile, err := po.LoadData(data) + if err != nil { + t.Fatal(err) + } + moFile := &mo.File{ + MimeHeader: mo.Header{ + ProjectIdVersion: poFile.MimeHeader.ProjectIdVersion, + ReportMsgidBugsTo: poFile.MimeHeader.ReportMsgidBugsTo, + POTCreationDate: poFile.MimeHeader.POTCreationDate, + PORevisionDate: poFile.MimeHeader.PORevisionDate, + LastTranslator: poFile.MimeHeader.LastTranslator, + LanguageTeam: poFile.MimeHeader.LanguageTeam, + Language: poFile.MimeHeader.Language, + MimeVersion: poFile.MimeHeader.MimeVersion, + ContentType: poFile.MimeHeader.ContentType, + ContentTransferEncoding: poFile.MimeHeader.ContentTransferEncoding, + PluralForms: poFile.MimeHeader.PluralForms, + XGenerator: poFile.MimeHeader.XGenerator, + UnknowFields: poFile.MimeHeader.UnknowFields, + }, + } + for _, v := range poFile.Messages { + moFile.Messages = append(moFile.Messages, mo.Message{ + MsgContext: v.MsgContext, + MsgId: v.MsgId, + MsgIdPlural: v.MsgIdPlural, + MsgStr: v.MsgStr, + MsgStrPlural: v.MsgStrPlural, + }) + } + return moFile.Data() +} + +var testTrData = []struct { + msgctxt string + msgid string + msgstr string +}{ + {"main.init", "Gettext in init.", "Init函数中的Gettext."}, + {"main.main", "Hello, world!", "你好, 世界!"}, + {"main.func", "Gettext in func.", "闭包函数中的Gettext."}, + {"code.google.com/p/gettext-go/examples/hi.SayHi", "pkg hi: Hello, world!", "æ¥è‡ª\"Hi\"包的问候: 你好, 世界!"}, +} + +var testTrPoData = ` +msgctxt "main.init" +msgid "Gettext in init." +msgstr "Init函数中的Gettext." + +msgctxt "main.main" +msgid "Hello, world!" +msgstr "你好, 世界!" + +msgctxt "main.func" +msgid "Gettext in func." +msgstr "闭包函数中的Gettext." + +msgctxt "code.google.com/p/gettext-go/examples/hi.SayHi" +msgid "pkg hi: Hello, world!" +msgstr "æ¥è‡ª\"Hi\"包的问候: 你好, 世界!" +` === added directory 'src/github.com/chai2010/gettext-go/testdata' === added file 'src/github.com/chai2010/gettext-go/testdata/Makefile' --- src/github.com/chai2010/gettext-go/testdata/Makefile 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/Makefile 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +# Copyright 2013 ChaiShushan . All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +PO_FILES = $(wildcard *.po) +MO_FILES = $(patsubst %.po,%.mo,$(PO_FILES)) + +default: $(MO_FILES) + +clean: + rm *.mo + +%.mo: %.po + msgfmt -o $@ $< === added file 'src/github.com/chai2010/gettext-go/testdata/README.txt' --- src/github.com/chai2010/gettext-go/testdata/README.txt 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/README.txt 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +xg-c-1.ok.po has a bad header, msgfmt can't compile it. === added file 'src/github.com/chai2010/gettext-go/testdata/gettext-3-1.mo' Binary files src/github.com/chai2010/gettext-go/testdata/gettext-3-1.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/testdata/gettext-3-1.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/testdata/gettext-3-1.po' --- src/github.com/chai2010/gettext-go/testdata/gettext-3-1.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/gettext-3-1.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,13 @@ +msgid "" +msgstr "" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=US-ASCII\n" +"Content-Transfer-Encoding: 7-bit\n" + +#: tst-gettext2.c:33 +msgid "First string for testing." +msgstr "Lang1: 1st string" + +#: tst-gettext2.c:34 +msgid "Another string for testing." +msgstr "Lang1: 2nd string" === added file 'src/github.com/chai2010/gettext-go/testdata/gettext-3-2.mo' Binary files src/github.com/chai2010/gettext-go/testdata/gettext-3-2.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/testdata/gettext-3-2.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/testdata/gettext-3-2.po' --- src/github.com/chai2010/gettext-go/testdata/gettext-3-2.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/gettext-3-2.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,13 @@ +msgid "" +msgstr "" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=US-ASCII\n" +"Content-Transfer-Encoding: 7-bit\n" + +#: tst-gettext2.c:33 +msgid "First string for testing." +msgstr "Lang2: 1st string" + +#: tst-gettext2.c:34 +msgid "Another string for testing." +msgstr "Lang2: 2nd string" === added file 'src/github.com/chai2010/gettext-go/testdata/gettext-4.mo' Binary files src/github.com/chai2010/gettext-go/testdata/gettext-4.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/testdata/gettext-4.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/testdata/gettext-4.po' --- src/github.com/chai2010/gettext-go/testdata/gettext-4.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/gettext-4.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +msgid "" +msgstr "" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=ISO-8859-1\n" +"Content-Transfer-Encoding: 8-bit\n" + +msgid "cheese" +msgstr "Käse" === added file 'src/github.com/chai2010/gettext-go/testdata/gettext-5.mo' Binary files src/github.com/chai2010/gettext-go/testdata/gettext-5.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/testdata/gettext-5.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/testdata/gettext-5.po' --- src/github.com/chai2010/gettext-go/testdata/gettext-5.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/gettext-5.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +msgid "" +msgstr "" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=ISO-8859-1\n" +"Content-Transfer-Encoding: 8-bit\n" + +msgid "cheese" +msgstr "Käse" === added file 'src/github.com/chai2010/gettext-go/testdata/gettext-6-1.mo' Binary files src/github.com/chai2010/gettext-go/testdata/gettext-6-1.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/testdata/gettext-6-1.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/testdata/gettext-6-1.po' --- src/github.com/chai2010/gettext-go/testdata/gettext-6-1.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/gettext-6-1.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +msgid "" +msgstr "" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=ISO-8859-1\n" +"Content-Transfer-Encoding: 8-bit\n" + +msgid "beauty" +msgstr "Schönheit" === added file 'src/github.com/chai2010/gettext-go/testdata/gettext-6-2.mo' Binary files src/github.com/chai2010/gettext-go/testdata/gettext-6-2.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/testdata/gettext-6-2.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/testdata/gettext-6-2.po' --- src/github.com/chai2010/gettext-go/testdata/gettext-6-2.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/gettext-6-2.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +msgid "" +msgstr "" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=ISO-8859-1\n" +"Content-Transfer-Encoding: 8-bit\n" + +msgid "beauty" +msgstr "beauté" === added file 'src/github.com/chai2010/gettext-go/testdata/gettext-7.mo' Binary files src/github.com/chai2010/gettext-go/testdata/gettext-7.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/testdata/gettext-7.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/testdata/gettext-7.po' --- src/github.com/chai2010/gettext-go/testdata/gettext-7.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/gettext-7.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +msgid "" +msgstr "" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=ISO-8859-1\n" +"Content-Transfer-Encoding: 8-bit\n" + +msgid "cheese" +msgstr "Käse" === added file 'src/github.com/chai2010/gettext-go/testdata/gettextpo-1.de.mo' Binary files src/github.com/chai2010/gettext-go/testdata/gettextpo-1.de.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/testdata/gettextpo-1.de.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/testdata/gettextpo-1.de.po' --- src/github.com/chai2010/gettext-go/testdata/gettextpo-1.de.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/gettextpo-1.de.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +# Test case for the libgettextpo library. +msgid "" +msgstr "" +"Project-Id-Version: libgettextpo 0.18.1\n" +"Report-Msgid-Bugs-To: bug-gnu-gettext@gnu.org\n" +"POT-Creation-Date: 2010-06-04 01:57+0200\n" +"PO-Revision-Date: 2010-06-05 14:39+0200\n" +"Last-Translator: Bruno Haible \n" +"Language-Team: German \n" +"Language: de\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: gnulib-lib/w32spawn.h:81 +#, fuzzy, c-format +msgid "cannot restore fd %d: dup2 failed" +msgstr "Ausgabedatei »%s« kann nicht erstellt werden" + +#: gnulib-lib/wait-process.c:223 gnulib-lib/wait-process.c:255 +#: gnulib-lib/wait-process.c:317 +#, c-format +msgid "%s subprocess" +msgstr "Subprozeß %s" + +# Adjektiv, kein ganzer Satz! +#. Denote a lock's state +msgctxt "Lock state" +msgid "Open" +msgstr "Geöffnet" + +# Französische Weine sind die besten der Welt. +#, java-format +msgid "a bottle of wine" +msgid_plural "{0,number} bottles of wine" +msgstr[0] "eine Flasche Wein" +msgstr[1] "{0,number} Weinflaschen" + +#. Denote a lock's state +#~ msgctxt "Lock state" +#~ msgid "Closed" +#~ msgstr "Geschlossen" === added file 'src/github.com/chai2010/gettext-go/testdata/mm-ko-comp.euc-kr.mo' Binary files src/github.com/chai2010/gettext-go/testdata/mm-ko-comp.euc-kr.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/testdata/mm-ko-comp.euc-kr.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/testdata/mm-ko-comp.euc-kr.po' --- src/github.com/chai2010/gettext-go/testdata/mm-ko-comp.euc-kr.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/mm-ko-comp.euc-kr.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1633 @@ +# Translation of SuSE patches included in gnome-patch-translation. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"POT-Creation-Date: 2006-06-26 19:23+0200\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=EUC-KR\n" +"Content-Transfer-Encoding: 8bit\n" + +#: NetworkManager/gnome/applet/applet.c:286 +msgid "Network configuration could not be run" +msgstr "" + +#: NetworkManager/gnome/applet/applet.c:1799 +msgid "Dialup configuration could not be run" +msgstr "" + +#: NetworkManager/gnome/applet/applet.c:1845 +#, fuzzy +msgid "Configure _Modem..." +msgstr "VPN ¼³Á¤Çϱâ(_C)..." + +#: NetworkManager/gnome/applet/applet.c:1849 +#, fuzzy +msgid "Configure _ISDN..." +msgstr "VPN ¼³Á¤Çϱâ(_C)..." + +#: NetworkManager/gnome/applet/applet.glade.h:21 +#, fuzzy +msgid "Configure _Networking" +msgstr "³×Æ®¿öÅ© »ç¿ë(_N)" + +#: control-center-2.0/capplets/accessibility/at-properties/at-properties.desktop.in.in.h:1 +#, fuzzy +msgid "Assistive Technology" +msgstr "º¸Á¶ ±â¼ú Áö¿ø" + +#: control-center-2.0/capplets/accessibility/keyboard/accessibility-keyboard.desktop.in.in.h:1 +#, fuzzy +msgid "Accessibility" +msgstr "Á¢±Ù¼º ±â´É(_A)" + +#: control-center-2.0/capplets/default-applications/gnome-default-applications-properties-structs.c:61 +msgid "Nautilus" +msgstr "" + +#: control-center-2.0/capplets/default-applications/gnome-default-applications-properties-structs.c:63 +msgid "gFTP" +msgstr "" + +#: control-center-2.0/capplets/default-applications/gnome-default-applications-properties-structs.c:88 +#: control-center-2.0/capplets/default-applications/gnome-default-applications-properties-structs.c:89 +#, fuzzy +msgid "Mozilla News" +msgstr "Mozilla" + +#. FIXME: Pan doesd not yet support %s +#: control-center-2.0/capplets/default-applications/gnome-default-applications-properties-structs.c:92 +msgid "Pan" +msgstr "" + +#: control-center-2.0/capplets/default-applications/gnome-default-applications-properties-structs.c:93 +msgid "TIN" +msgstr "" + +#: control-center-2.0/capplets/default-applications/gnome-default-applications-properties.glade.h:9 +#, fuzzy +msgid "Default FTP Browser" +msgstr "±âº» À¥ ºê¶ó¿ìÀú" + +#: control-center-2.0/capplets/default-applications/gnome-default-applications-properties.glade.h:11 +#, fuzzy +msgid "Default News Reader" +msgstr "±âº» ¸ÞÀÏ Àбâ ÇÁ·Î±×·¥" + +#: control-center-2.0/capplets/default-applications/gnome-default-applications-properties.glade.h:19 +msgid "FTP" +msgstr "" + +#: control-center-2.0/capplets/default-applications/gnome-default-applications-properties.glade.h:21 +#, fuzzy +msgid "News" +msgstr "»õ·Î ¸¸µé±â(_N)" + +#: control-center-2.0/capplets/font/font-properties.desktop.in.in.h:1 +#, fuzzy +msgid "Fonts" +msgstr "±Û²Ã" + +#: control-center-2.0/capplets/keybindings/gnome-keybinding-properties.c:743 +msgid "Only special multimedia keys can be bound to this action!" +msgstr "" + +#: control-center-2.0/capplets/keybindings/keybinding.desktop.in.in.h:2 +#, fuzzy +msgid "Shortcuts" +msgstr "¹Ù·Î °¡±â" + +#: control-center-2.0/capplets/network/gnome-network-preferences.desktop.in.in.h:1 +#, fuzzy +msgid "Network Proxies" +msgstr "³×Æ®¿öÅ© ÇÁ·Ï½Ã" + +#: control-center-2.0/capplets/network/gnome-network-preferences.glade.h:4 +msgid "Use the s_ystem's proxy settings" +msgstr "" + +#: control-center-2.0/capplets/passwd/gnome-passwd.desktop.in.in.h:2 +#, fuzzy +msgid "Change your password" +msgstr "¿­¼è±Û ¹Ù²Ù±â" + +#: control-center-2.0/capplets/passwd/gnome-passwd.c:107 +#, fuzzy +msgid "New Password empty" +msgstr "»õ ¿­¼è±Û(_N):" + +#: control-center-2.0/capplets/passwd/gnome-passwd.c:109 +#, fuzzy +msgid "Passwords match" +msgstr "¿­¼è±ÛÀÌ ³Ê¹« ª½À´Ï´Ù" + +#: control-center-2.0/capplets/passwd/gnome-passwd.c:113 +#, fuzzy +msgid "Passwords do not match" +msgstr "¿­¼è±ÛÀÌ ³Ê¹« ª½À´Ï´Ù" + +#: control-center-2.0/capplets/passwd/gnome-passwd.c:142 +#: control-center-2.0/capplets/passwd/gnome-passwd.glade.h:3 +#, no-c-format +msgid "Changing Password for User '%s'" +msgstr "" + +#: control-center-2.0/capplets/passwd/gnome-passwd.c:185 +msgid "" +"You have got capslock on!\n" +"Passwords are case-sensitive." +msgstr "" + +#: control-center-2.0/capplets/passwd/pam-passwd.c:105 +#, fuzzy +msgid "" +"Could not start helper program.\n" +"Could not change password" +msgstr "³ª¸ÓÁö¸¦ ÀúÀåÇÒ ¼ö ¾ø½À´Ï´Ù" + +#: control-center-2.0/capplets/passwd/pam-passwd.c:110 +#: ../capplets/passwd/pam-passwd.c:114 +#: control-center-2.0/capplets/passwd/pam-passwd.c:119 +#: ../capplets/passwd/pam-passwd.c:123 +#: control-center-2.0/capplets/passwd/pam-passwd.c:128 +#: ../capplets/passwd/pam-passwd.c:132 +#: control-center-2.0/capplets/passwd/pam-passwd.c:170 +#: ../capplets/passwd/pam-passwd.c:176 +#, fuzzy +msgid "" +"Unknown error while changing password.\n" +"Could not change password" +msgstr "¿­¼è±ÛÀ» ¹Ù²Ù·Á¸é ¿­¼è±Û ¹Ù²Ù±â¸¦ ´©¸£½Ê½Ã¿À." + +#: control-center-2.0/capplets/passwd/pam-passwd.c:152 +msgid "Password changed successfully" +msgstr "" + +#: control-center-2.0/capplets/passwd/pam-passwd.c:155 +#, fuzzy +msgid "Old password doesn't match. Please try again." +msgstr "ÀÌÀü ¿­¼è±ÛÀÌ ¿Ã¹Ù¸£Áö ¾Ê½À´Ï´Ù, ´Ù½Ã ÀÔ·ÂÇϽʽÿÀ" + +#: control-center-2.0/capplets/passwd/pam-passwd.c:158 +msgid "" +"Password is insecure.\n" +"Please choose a new password." +msgstr "" + +#: control-center-2.0/capplets/passwd/pam-passwd.c:161 +msgid "" +"Password confirmation doesn't match New Password.\n" +"Please retype new password and confirmation" +msgstr "" + +#: control-center-2.0/capplets/passwd/pam-passwd.c:164 +msgid "Protocol error" +msgstr "" + +#: control-center-2.0/capplets/passwd/pam-passwd.c:242 +#, c-format +msgid "" +"Success:\n" +"%s" +msgstr "" + +#: control-center-2.0/capplets/passwd/gnome-passwd.glade.h:5 +#, fuzzy +msgid "Password confirmation empty" +msgstr "»õ ¿­¼è±Û(_N):" + +#: control-center-2.0/capplets/passwd/gnome-passwd.glade.h:6 +#, fuzzy +msgid "_Confirm Password:" +msgstr "¿­¼è±Û È®ÀÎ:" + +#: control-center-2.0/capplets/passwd/gnome-passwd.glade.h:7 +#, fuzzy +msgid "_New Password:" +msgstr "»õ ¿­¼è±Û:" + +#: control-center-2.0/capplets/passwd/gnome-passwd.glade.h:8 +#, fuzzy +msgid "_Old Password:" +msgstr "ÀÌÀü ¿­¼è±Û(_S):" + +#: control-center-2.0/capplets/sound/sound-properties.glade.h:1 +msgid "E_nable software sound mixing (ESD)" +msgstr "" + +#: control-center-2.0/capplets/sound/sound-properties.glade.h:5 +#, fuzzy +msgid "Sounds" +msgstr "¼Ò¸®" + +#: control-center-2.0/capplets/sound/sound-properties.glade.h:6 +#, fuzzy +msgid "System Beep" +msgstr "½Ã½ºÅÛ º§¼Ò¸®" + +#: control-center-2.0/capplets/sound/sound-properties.glade.h:7 +#, fuzzy +msgid "_Enable system beep" +msgstr "º¸Á¶ ±â¼ú »ç¿ëÇϱâ(_E)" + +#: control-center-2.0/capplets/sound/sound-properties.glade.h:8 +msgid "_Play system sounds" +msgstr "" + +#: control-center-2.0/capplets/sound/sound-properties.glade.h:9 +msgid "_Visual system beep" +msgstr "" + +#: control-center-2.0/libsounds/sound-view.c:42 +msgid "Login" +msgstr "" + +#: control-center-2.0/libsounds/sound-view.c:42 +#, fuzzy +msgid "Logout" +msgstr "·Î±×¾Æ¿ô" + +#: control-center-2.0/libsounds/sound-view.c:42 +msgid "Boing" +msgstr "" + +#: control-center-2.0/libsounds/sound-view.c:42 +#, fuzzy +msgid "Siren" +msgstr "È­¸é" + +#: control-center-2.0/libsounds/sound-view.c:42 +msgid "Clink" +msgstr "" + +#: control-center-2.0/libsounds/sound-view.c:42 +#, fuzzy +msgid "Beep" +msgstr "ÀýÀü ¸ðµå" + +#: control-center-2.0/libsounds/sound-view.c:42 +#, fuzzy +msgid "No sound" +msgstr "¼Ò¸®" + +#: control-center-2.0/libsounds/sound-view.c:115 +#, fuzzy +msgid "Sound not set for this event." +msgstr "»óȲ¿¡ µû¸¥ ¼Ò¸®(_S)" + +#: control-center-2.0/libsounds/sound-view.c:123 +#, fuzzy +msgid "" +"The sound file for this event does not exist.\n" +"You may want to install the gnome-audio packagefor a set of default sounds." +msgstr "" +"ÀÌ »óȲ¿¡ ´ëÇÑ »ç¿îµå ÆÄÀÏÀÌ Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù.\n" +"±âº» »ç¿îµå ÆÄÀÏÀ» ¼³Ä¡ÇÏ·Á¸é gnome-audio ÆÐÅ°Áö¸¦\n" +"¼³Ä¡ÇØ¾ß ÇÕ´Ï´Ù." + +#: control-center-2.0/libsounds/sound-view.c:235 +#, fuzzy +msgid "Select sound file..." +msgstr "¼Ò¸® ÆÄÀÏ ¼±ÅÃ" + +#: dia/plug-ins/cairo/diacairo.c:1066 +msgid "Cairo Portable Document Format" +msgstr "" + +#: dia/plug-ins/cairo/diacairo.c:1083 +msgid "Cairo PNG (with alpha)" +msgstr "" + +#: dia/plug-ins/cairo/diacairo.c:1092 +msgid "Cairo WMF" +msgstr "" + +#: dia/plug-ins/cairo/diacairo.c:1101 +msgid "Cairo old WMF" +msgstr "" + +#: dia/plug-ins/cairo/diacairo.c:1110 +msgid "Cairo Clipboard" +msgstr "" + +#: dia/plug-ins/xfig/xfig-import.c:451 +#, c-format +msgid "Color index %d too high, only 512 colors allowed. Using black instead." +msgstr "" + +#: dia/plug-ins/xfig/xfig-import.c:714 +#, c-format +msgid "Depth %d of of range, only 0-%d allowed.\n" +msgstr "" + +#: dia/plug-ins/xfig/xfig-import.c:1364 +#, c-format +msgid "Color number %d out of range 0..%d. Discarding color.\n" +msgstr "" + +#: eel-2.0/eel/eel-open-with-dialog.c:655 +#, fuzzy +msgid "Potential Applications " +msgstr "ÇÁ·Î±×·¥ ¼±ÅÃ" + +#: eel-2.0/eel/eel-open-with-dialog.c:666 +#, fuzzy +msgid "All Applications" +msgstr "ÇÁ·Î±×·¥À» ´õÇÕ´Ï´Ù" + +#: file-roller/src/ui.h:55 +#, fuzzy +msgid "Delete file from the archive" +msgstr "¾ÐÃà ÆÄÀÏ¿¡¼­ ÆÄÀÏÀ» Áö¿ó´Ï´Ù" + +#: gdm/daemon/slave.c:2101 +#, fuzzy +msgid "You must authenticate as root to shut down." +msgstr "¼³Á¤À» ½ÇÇàÇÏ·Á¸é root»ç¿ëÀÚÀÇ ¿­¼è±ÛÀ» ÀÔ·ÂÇϽʽÿÀ." + +#: gdm/daemon/slave.c:2121 +#, fuzzy +msgid "You must authenticate as root to restart the computer." +msgstr "¼³Á¤À» ½ÇÇàÇÏ·Á¸é root»ç¿ëÀÚÀÇ ¿­¼è±ÛÀ» ÀÔ·ÂÇϽʽÿÀ." + +#: gdm/daemon/verify-pam.c:344 +#, fuzzy +msgid "Your account is disabled. Please contact your system administrator" +msgstr "°èÁ¤ »ç¿ë±âÇÑÀÌ ³Ñ¾ú½À´Ï´Ù. ½Ã½ºÅÛ °ü¸®ÀÚ¿¡°Ô ¿¬¶ôÇϽʽÿÀ" + +#: gdm/daemon/verify-pam.c:1037 +msgid "" +"\n" +"Your account has been disabled." +msgstr "" + +#: gdm/daemon/verify-pam.c:1039 +#, fuzzy +msgid "" +"\n" +"Your account has expired." +msgstr "%sÀÇ ¿­¼è±ÛÀÌ ¸¸·áµÇ¾ú½À´Ï´Ù" + +#: gdm/gui/greeter/greeter_canvas_item.c:379 +#, fuzzy +msgid "" +msgstr "·ÎÄÃ(_O): " + +#: gdm/gui/greeter/greeter_parser.c:1129 +#, fuzzy +msgid "Domain:" +msgstr "¸í·É:" + +#: gedit/data/gedit.schemas.in.h:16 +#, fuzzy +msgid "Enable Document Info Plugin" +msgstr "gedit: ¹®¼­ Á¤º¸ Ç÷¯±×ÀÎ" + +#: gedit/data/gedit.schemas.in.h:18 +msgid "" +"Enable the Document Information plugin, which provides statistics about the " +"current document, such as the number of words." +msgstr "" + +#. Translators: This is the sorted list of encodings used by gedit +#. for auto-detecting the encoding of a file. "CURRENT" is the current locale encoding. +#. Only recognized encodings are used. +#: gedit/data/gedit.schemas.in.h:110 +msgid "[UTF-8,CURRENT,ISO-8859-15,UTF-16]" +msgstr "[UTF-8,CURRENT,EUC-KR,ISO-8859-1,UTF-16]" + +#: gedit/gedit/gedit-document.c:2079 +#, c-format +msgid "" +"This file has less than %d lines. Setting the cursor to last line of the " +"file." +msgstr "" + +#: gnome-applets-2.0/battstat/GNOME_BattstatApplet.xml.h:1 +msgid "Power Management _Settings..." +msgstr "" + +#: gnome-applets-2.0/battstat/battstat_applet.c:510 +#, c-format +msgid "" +"You have an unknown amount of battery power remaining (%d%% of the total " +"capacity)." +msgstr "" + +#: gnome-applets-2.0/battstat/properties.c:264 +#, c-format +msgid "" +"Could not run Power Management " +"Settings!\n" +"\n" +"%s" +msgstr "" + +#: gnome-applets-2.0/modemlights/GNOME_ModemLightsApplet.server.in.in.h:2 +#: gnome-applets-2.0/modemlights/modemlights.c:129 +#: ../modemlights/modemlights.c:1594 +msgid "Modem Lights" +msgstr "¸ðµ© ±ôºýÀÌ" + +#: gnome-applets-2.0/modemlights/modemlights.c:131 +msgid "" +"Released under the GNU general public license.\n" +"A modem status indicator and dialer.\n" +"Lights in order from the top or left are Send data and Receive data." +msgstr "" +"GNU General Public License·Î ¹èÆ÷µË´Ï´Ù.\n" +"¸ðµ© »óŸ¦ º¸¿©ÁÖ°í ÀüÈ­¸¦ °É¾îÁÝ´Ï´Ù.\n" +"À§¿¡¼­ºÎÅÍ È¤Àº Á·κÎÅÍ ±ôºýÀ̵éÀº °¢°¢ µ¥ÀÌÅ͸¦ ¹Þ°í Áִ°ÍÀ» º¸¿©ÁÝ´Ï´Ù." + +#: gnome-applets-2.0/modemlights/modemlights.c:512 +msgid "" +"You are currently connected.\n" +"Do you want to disconnect?" +msgstr "" +"ÇöÀç ¿¬°áµÇ¾î ÀÖ½À´Ï´Ù\n" +"¿¬°áÀ» ²÷°Ú½À´Ï±î?" + +#: gnome-applets-2.0/modemlights/modemlights.c:582 +#, c-format +msgid "%#.1fMb received / %#.1fMb sent / time: %.1d:%.2d" +msgstr "%#.1fMb ¹ÞÀ½ /%#.1fMb º¸³¿ / ½Ã°£: %.1d:%.2d" + +#: gnome-applets-2.0/modemlights/modemlights.c:587 +msgid "not connected" +msgstr "¿¬°áµÇ¾î ÀÖÁö ¾ÊÀ½" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:1 +msgid "Ask for confirmation when connecting/disconnecting" +msgstr "¿¬°á/ÇØÁ¦ÇÒ ¶§ È®ÀÎ Áú¹®À» ÇÕ´Ï´Ù" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:2 +msgid "Blink when connecting" +msgstr "¿¬°áÇÒ ¶§ ±ô¹ÚÀÔ´Ï´Ù" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:3 +msgid "Command executed when connecting" +msgstr "¿¬°áÇÒ ¶§ ½ÇÇàÇÒ ¸í·É¾î" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:4 +msgid "Command executed when disconnecting" +msgstr "¿¬°áÀ» ²÷À» ¶§ ½ÇÇàÇÒ ¸í·É¾î" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:5 +msgid "Display a confirmation dialog when connecting or disconnecting." +msgstr "¿¬°á/ÇØÁ¦ÇÒ ¶§ È®ÀÎ ´ëÈ­ »óÀÚ¸¦ º¸ÀÔ´Ï´Ù." + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:6 +msgid "Make the applet blink when the modem is connecting." +msgstr "¸ðµ©ÀÌ ¿¬°áÁßÀÏ ¶§ ¾ÖÇø´À» ±ô¹ÚÀÔ´Ï´Ù." + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:7 +msgid "Modem device name" +msgstr "¸ðµ© ÀåÄ¡ À̸§" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:8 +msgid "Modem lock file" +msgstr "¸ðµ© Àá±Ý ÆÄÀÏ" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:9 +msgid "Receive background color" +msgstr "¹Þ±â ¹ÙÅÁ »ö" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:10 +msgid "Receive foreground color" +msgstr "¹Þ±â ±ÛÀÚ »ö" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:11 +msgid "Send background color" +msgstr "º¸³»±â ¹ÙÅÁ »ö" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:12 +msgid "Send foreground color" +msgstr "º¸³»±â ±ÛÀÚ »ö" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:13 +msgid "Show connect time and throughput" +msgstr "¿¬°á ½Ã°£°ú 󸮷® º¸¿©ÁÖ±â" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:14 +msgid "" +"Show extra information about the connect time and amount of data transmitted " +"and received." +msgstr "¿¬°á ½Ã°£°ú ¼Û¼ö½ÅÇÑ µ¥ÀÌŸ ¾ç°ú °°Àº Á¤º¸¸¦ º¸¿©ÁÝ´Ï´Ù." + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:15 +msgid "Status connected color" +msgstr "¿¬°áµÈ »óÅÂÀÇ »ö" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:16 +msgid "Status not connected color" +msgstr "¿¬°á ¾È µÈ »óÅÂÀÇ »ö" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:17 +msgid "Status waiting connection color" +msgstr "¿¬°áÀ» ±â´Ù¸®´Â »óÅÂÀÇ »ö" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:18 +msgid "Text background color" +msgstr "ÅؽºÆ® ¹ÙÅÁ »ö" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:19 +msgid "Text foreground color" +msgstr "ÅؽºÆ® ±ÛÀÚ »ö" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:20 +msgid "Text outline color" +msgstr "ÅؽºÆ® ¿Ü°û¼± »ö" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:21 +msgid "The background color of the button used to indicate data received." +msgstr "¹ÞÀº µ¥ÀÌŸ¸¦ ³ªÅ¸³»´Â µ¥ ¾µ ´ÜÃßÀÇ ¹ÙÅÁ »ö." + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:22 +msgid "The background color of the button used to indicate data sent." +msgstr "º¸³½ µ¥ÀÌŸ¸¦ ³ªÅ¸³»´Â µ¥ ¾µ ´ÜÃßÀÇ ¹ÙÅÁ »ö." + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:23 +msgid "" +"The color used to display the status button when the modem is connected." +msgstr "¸ðµ©ÀÌ ¿¬°áµÇ¾úÀ» ¶§ »óÅ ´ÜÃß¿¡ º¸¿©ÁÙ »ö." + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:24 +msgid "" +"The color used to display the status button when the modem is connecting." +msgstr "¸ðµ©ÀÌ ¿¬°áÁßÀÏ ¶§ »óÅ ´ÜÃß¿¡ º¸¿©ÁÙ »ö." + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:25 +msgid "" +"The color used to display the status button when the modem is not connected." +msgstr "¸ðµ©ÀÌ ¿¬°áÁßÀÌ ¾Æ´Ò ¶§ »óÅ ´ÜÃß¿¡ º¸¿©ÁÙ »ö." + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:26 +msgid "The color used to indicate that data has been received." +msgstr "µ¥ÀÌŸ¸¦ ¹Þ¾ÒÀ½À» ¾Ë¸®´Â µ¥ »ç¿ëÇÒ »ö." + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:27 +msgid "The color used to indicate that data has been sent." +msgstr "µ¥ÀÌŸ¸¦ º¸³ÂÀ½À» ¾Ë¸®´Â µ¥ »ç¿ëÇÒ »ö." + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:28 +msgid "The fraction of a second until the applet updates." +msgstr "¾ÖÇø´ÀÌ ¾÷µ¥ÀÌÆ®ÇÏ´Â °£°Ý (ÃÊ ´ÜÀ§)." + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:29 +msgid "The name of the modem device." +msgstr "¸ðµ© ÀåÄ¡ÀÇ À̸§." + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:30 +msgid "The name of the modem lock file." +msgstr "¸ðµ© Àá±Ý ÆÄÀÏÀÇ À̸§." + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:32 +msgid "Use isdn" +msgstr "ISDN »ç¿ë" + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:33 +msgid "Use isdn instead of ppp to connect the modem." +msgstr "¸ðµ©¿¡ ¿¬°áÇÏ´Â µ¥ PPP°¡ ¾Æ´Ï¶ó ISDNÀ» »ç¿ëÇÕ´Ï´Ù." + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:34 +msgid "Use this command to connect the modem." +msgstr "¸ðµ©¿¡ ¿¬°áÇÒ ¶§ ÀÌ ¸í·É¾î¸¦ »ç¿ëÇÕ´Ï´Ù." + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:35 +msgid "Use this command to disconnect the modem." +msgstr "¸ðµ© ¿¬°áÀ» ²÷À» ¶§ ÀÌ ¸í·É¾î¸¦ »ç¿ëÇÕ´Ï´Ù." + +#: gnome-applets-2.0/modemlights/modemlights.schemas.in.h:36 +msgid "Verify owner of lock file" +msgstr "Àá±Ý ÆÄÀÏÀÇ ¼ÒÀ¯ÀÚ È®ÀÎ" + +#: gnome-applets-2.0/modemlights/properties.c:423 +msgid "Modem Lights Preferences" +msgstr "¸ðµ© ±ô¹ÚÀÌ ±âº» ¼³Á¤" + +#: gnome-applets-2.0/modemlights/properties.c:451 +msgid "U_pdate every:" +msgstr "¾÷µ¥ÀÌÆ®(_P): ¸Å" + +#: gnome-applets-2.0/modemlights/properties.c:468 +msgid "seconds" +msgstr "ÃÊ" + +#. extra info checkbox +#: gnome-applets-2.0/modemlights/properties.c:476 +msgid "Sho_w connect time and throughput" +msgstr "¿¬°á ½Ã°£°ú 󸮷® º¸¿©ÁÖ±â(_W)" + +#: gnome-applets-2.0/modemlights/properties.c:486 +msgid "B_link connection status when connecting" +msgstr "¿¬°áÇÒ ¶§ ¿¬°á »óÅ ±ô¹ÚÀ̱â(_L)" + +#: gnome-applets-2.0/modemlights/properties.c:496 +msgid "Connections" +msgstr "¿¬°á" + +#: gnome-applets-2.0/modemlights/properties.c:505 +msgid "Co_nnection command:" +msgstr "¿¬°á ¸í·É¾î(_N):" + +#: gnome-applets-2.0/modemlights/properties.c:531 +msgid "_Disconnection command:" +msgstr "¿¬°á ÇØÁ¦ ¸í·É¾î(_D):" + +#. confirmation checkbox +#: gnome-applets-2.0/modemlights/properties.c:553 +msgid "Con_firm connection" +msgstr "¿¬°á È®ÀÎ(_F)" + +#: gnome-applets-2.0/modemlights/properties.c:572 +msgid "Receive Data" +msgstr "µ¥ÀÌÅÍ ¹Þ±â" + +#: gnome-applets-2.0/modemlights/properties.c:578 +msgid "_Foreground:" +msgstr "±ÛÀÚ»ö(_F):" + +#: gnome-applets-2.0/modemlights/properties.c:583 +msgid "Send Data" +msgstr "µ¥ÀÌÅÍ º¸³»±â" + +#: gnome-applets-2.0/modemlights/properties.c:588 +msgid "Foregroun_d:" +msgstr "±ÛÀÚ»ö(_D):" + +#: gnome-applets-2.0/modemlights/properties.c:590 +msgid "Backg_round:" +msgstr "¹ÙÅÁ»ö(_R):" + +#: gnome-applets-2.0/modemlights/properties.c:593 +msgid "Connection Status" +msgstr "¿¬°á »óÅÂ" + +#: gnome-applets-2.0/modemlights/properties.c:601 +msgid "Co_nnected:" +msgstr "¿¬°áµÊ(_N):" + +#: gnome-applets-2.0/modemlights/properties.c:603 +msgid "Disconnec_ted:" +msgstr "¿¬°á ²÷±è(_D):" + +#: gnome-applets-2.0/modemlights/properties.c:606 +msgid "C_onnecting:" +msgstr "¿¬°áÁß(_O):" + +#: gnome-applets-2.0/modemlights/properties.c:617 +msgid "For_eground:" +msgstr "±ÛÀÚ»ö(_E):" + +#: gnome-applets-2.0/modemlights/properties.c:619 +msgid "Bac_kground:" +msgstr "¹ÙÅÁ»ö(_K):" + +#: gnome-applets-2.0/modemlights/properties.c:621 +msgid "O_utline:" +msgstr "¿Ü°û¼±(_U):" + +#: gnome-applets-2.0/modemlights/properties.c:633 +msgid "Modem Options" +msgstr "¸ðµ© ¿É¼Ç" + +#: gnome-applets-2.0/modemlights/properties.c:642 +msgid "_Device:" +msgstr "ÀåÄ¡(_D):" + +#: gnome-applets-2.0/modemlights/properties.c:668 +msgid "_Lock file:" +msgstr "Àá±Ý ÆÄÀÏ(_L):" + +#: gnome-applets-2.0/modemlights/properties.c:689 +msgid "_Verify owner of lock file" +msgstr "Àá±Ý ÆÄÀÏÀÇ ¼ÒÀ¯ÀÚ È®ÀÎ(_V)" + +#. ISDN checkbox +#: gnome-applets-2.0/modemlights/properties.c:700 +msgid "U_se ISDN" +msgstr "ISDN ¾²±â(_S)" + +#: gnome-applets-2.0/modemlights/properties.c:717 +msgid "Advanced" +msgstr "°í±Þ" + +#: gnome-menus/desktop-directories/Development.directory.in.h:1 +#, fuzzy +msgid "Software Development" +msgstr "¼ÒÇÁÆ®¿þ¾î °³¹ßÀ» À§ÇÑ µµ±¸" + +#: gnome-menus/desktop-directories/Gnomecc-Hardware.directory.in.h:1 +msgid "Hardware" +msgstr "" + +#: gnome-menus/desktop-directories/Gnomecc-Hardware.directory.in.h:2 +msgid "Hardware Settings" +msgstr "" + +#: gnome-menus/desktop-directories/Gnomecc-LookAndFeel.directory.in.h:1 +msgid "Appearance of the desktop" +msgstr "" + +#: gnome-menus/desktop-directories/Gnomecc-LookAndFeel.directory.in.h:2 +msgid "Look and Feel" +msgstr "" + +#: gnome-menus/desktop-directories/Gnomecc-Personal.directory.in.h:1 +msgid "Personal" +msgstr "°³ÀÎ" + +#: gnome-menus/desktop-directories/Gnomecc-System.directory.in.h:1 +#: gnome-system-monitor/src/interface.c:1158 +#, fuzzy +msgid "System" +msgstr "½Ã½ºÅÛ µµ±¸" + +#: gnome-menus/desktop-directories/Gnomecc-System.directory.in.h:2 +msgid "System Settings" +msgstr "" + +#. translators: use %l even in 24 hour locales, +#. * there is a switch in preferences. +#. +#: gnome-panel-2.0/applets/clock/clock.c:337 +#, fuzzy +msgid "" +"%l:%M\n" +"%S %p" +msgstr "%p %I:%M:%S" + +#: gnome-panel-2.0/applets/clock/clock.c:337 +#, fuzzy +msgid "" +"%l:%M\n" +"%p" +msgstr "%p %I:%M" + +#. translators: reverse the order of these arguments +#. * if the time should come before the +#. * date on a clock in your locale. +#. +#: gnome-panel-2.0/applets/clock/clock.c:343 +#, fuzzy +msgid "" +"%H:%M\n" +"%S" +msgstr "%H:%M:%S" + +#: gnome-panel-2.0/applets/clock/clock.c:344 +#, fuzzy +msgid "" +"%a\n" +"%b %e" +msgstr "%b %eÀÏ (%a)" + +#: gnome-panel-2.0/applets/clock/clock.c:349 +#, fuzzy +msgid "" +"%l\n" +"%M\n" +"%S\n" +"%p" +msgstr "%p %I:%M:%S" + +#: gnome-panel-2.0/applets/clock/clock.c:349 +#, fuzzy +msgid "" +"%l\n" +"%M\n" +"%p" +msgstr "%p %I:%M" + +#: gnome-panel-2.0/applets/clock/clock.c:351 +#, fuzzy +msgid "" +"%H\n" +"%M\n" +"%S" +msgstr "%H:%M:%S" + +#: gnome-panel-2.0/applets/clock/clock.c:351 +#, fuzzy +msgid "" +"%H\n" +"%M" +msgstr "%H:%M" + +#: gnome-panel-2.0/applets/clock/clock.c:352 +#, fuzzy +msgid "" +"%a\n" +"%b\n" +"%e" +msgstr "%b %eÀÏ (%a)" + +#: gnome-panel-2.0/gnome-panel/panel-addto.c:128 +msgid "Traditional Main Menu" +msgstr "" + +#: gnome-panel-2.0/gnome-panel/panel-addto.c:129 +#, fuzzy +msgid "The traditional GNOME menu" +msgstr "±×³ð ÁÖ¸Þ´º" + +#: gnome-panel-2.0/gnome-panel/panel-context-menu.c:163 +#: gnome-panel-2.0/gnome-panel/panel-context-menu.c:294 +#, fuzzy +msgid "_Lock Panel Postion" +msgstr "Æгο¡ Àá±×±â(_L)" + +#: gnome-panel-2.0/gnome-panel/panel-context-menu.c:163 +#: gnome-panel-2.0/gnome-panel/panel-context-menu.c:294 +msgid "_Allow Panel to be Moved" +msgstr "" + +#: gnome-panel-2.0/gnome-panel/panel-global.schemas.in.h:10 +#: gnome-panel-2.0/gnome-panel/panel-toplevel.schemas.in.h:9 +msgid "" +"Disable support for moving a panel with a mouse drag. It has been know to " +"cause problems for users that accidentally move or resize their panels." +msgstr "" + +#: gnome-panel-2.0/gnome-panel/panel-global.schemas.in.h:22 +#, fuzzy +msgid "Lock Panel Position" +msgstr "ÆгΠ¹æÇâ" + +#: gnome-panel-2.0/gnome-panel/panel-recent.c:57 +#, c-format +msgid "%s does not exist." +msgstr "" + +#: gnome-panel-2.0/gnome-panel/panel-toplevel.schemas.in.h:26 +msgid "Lock the panel position" +msgstr "" + +#: gnome-session-2.0/gnome-session/logout.c:482 +#, fuzzy +msgid "_Suspend the computer" +msgstr "ÄÄÇ»ÅÍ ´Ù½Ã ½ÃÀÛ(_R)" + +#: gnome-session-2.0/gnome-session/session-properties-capplet.c:125 +msgid "Enable" +msgstr "»ç¿ë" + +#: gnome-session-2.0/gnome-session/session-properties-capplet.c:131 +#: gnome-session-2.0/gnome-session/session-properties-capplet.c:472 +msgid "Disable" +msgstr "»ç¿ë ¾Ê±â" + +#: gnome-system-monitor/gnome-system-monitor.desktop.in.in.h:1 +#, fuzzy +msgid "GNOME System Monitor" +msgstr "½Ã½ºÅÛ °¨½Ã" + +#. hardware section +#: gnome-system-monitor/src/interface.c:458 +msgid "Hardware" +msgstr "" + +#: gnome-system-monitor/src/interface.c:479 +#, fuzzy +msgid "Memory:" +msgstr "¸Þ¸ð¸®" + +#: gnome-system-monitor/src/interface.c:499 +#, fuzzy, c-format +msgid "Processor %d:" +msgstr "ÇÁ·Î¼¼½º Çʵå" + +#: gnome-system-monitor/src/interface.c:504 +#, fuzzy +msgid "Processor:" +msgstr "ÇÁ·Î¼¼½º" + +#. disk space section +#: gnome-system-monitor/src/interface.c:524 +msgid "System Status" +msgstr "" + +#: gnome-system-monitor/src/interface.c:545 +msgid "User Space Free:" +msgstr "" + +#: gnome-utils-2.0/gnome-screenshot/screenshot-xfer.c:161 +#, fuzzy, c-format +msgid "" +"Insufficient permissions to save the file in:\n" +"%s" +msgstr "Ç÷ÎÇÇ %s ÀåÄ¡¸¦ ¿­ ±ÇÇÑÀÌ ¾ø½À´Ï´Ù." + +#: gnome-vfs-2.0/libgnomevfs/gnome-vfs-filesystem-type.c:68 +#, fuzzy +msgid "SubMount Volume" +msgstr "SuperMount º¼·ý" + +#: gnome-vfs-2.0/libgnomevfs/gnome-vfs-volume-ops.c:471 +msgid "Could not get a DBus connection" +msgstr "" + +#: gnome-vfs-2.0/libgnomevfs/gnome-vfs-volume-ops.c:486 +#, fuzzy +msgid "Could not create dbus message" +msgstr "³ª¸ÓÁö¸¦ ÀúÀåÇÒ ¼ö ¾ø½À´Ï´Ù" + +#: gnome-vfs-2.0/libgnomevfs/gnome-vfs-volume-ops.c:506 +#, fuzzy +msgid "Could not append args to dbus message" +msgstr "ÁÖ¿ä ºê¶ó¿ìÀú¸¦ ãÀ» ¼ö°¡ ¾ø½À´Ï´Ù" + +#: gnome-vfs-2.0/libgnomevfs/gnome-vfs-volume-ops.c:520 +msgid "Could not append args args to dbus message" +msgstr "" + +#: gnome-vfs-2.0/libgnomevfs/gnome-vfs-volume-ops.c:528 +#, fuzzy +msgid "Operation failed" +msgstr "ÀÛµ¿ Ãë¼ÒµÇ¾ú½À´Ï´Ù" + +#: gnome-vfs-2.0/modules/network-method.c:1523 +msgid "Novell Services" +msgstr "" + +#: gtk+/gtk/gtkfilesel.c:1753 +msgid "Home" +msgstr "Áý" + +#: gtk+/gtk/gtkfilesel.c:1763 +msgid "Desktop" +msgstr "" + +#: gtk+/gtk/gtkfilesel.c:1777 +msgid "Documents" +msgstr "" + +#: gtk20/gtk/gtkfilechooserdefault.c:1652 +#: nautilus/src/nautilus-places-sidebar.c:159 +msgid "Search" +msgstr "" + +#. Accessible object name for the file chooser's shortcuts pane +#: gtk20/gtk/gtkfilechooserdefault.c:3537 +msgid "Places" +msgstr "" + +#. Column header for the file chooser's shortcuts pane +#: gtk20/gtk/gtkfilechooserdefault.c:3591 +#, fuzzy +msgid "_Places" +msgstr "¹Ù²Ù±â(_R)" + +#: gtk20/gtk/gtkfilechooserdefault.c:4642 +#, fuzzy +msgid "Type a file name" +msgstr "À߸øµÈ ÆÄÀÏ À̸§" + +#: gtk20/gtk/gtkfilechooserdefault.c:7559 +#, fuzzy +msgid "Could not start the search process" +msgstr "³ª¸ÓÁö¸¦ ÀúÀåÇÒ ¼ö ¾ø½À´Ï´Ù" + +#: gtk20/gtk/gtkfilechooserdefault.c:7560 +msgid "" +"The program was not able to create a connection to the Beagle daemon. " +"Please make sure Beagle is running." +msgstr "" + +#: gtk20/gtk/gtkfilechooserdefault.c:7773 +#, fuzzy +msgid "Could not send the search request" +msgstr "³ª¸ÓÁö¸¦ ÀúÀåÇÒ ¼ö ¾ø½À´Ï´Ù" + +#: gtk20/gtk/gtkfilechooserdefault.c:8419 +#, fuzzy, c-format +msgid "%.1f KB" +msgstr "%.1f K" + +#: gtk20/gtk/gtkfilechooserdefault.c:8421 +#, fuzzy, c-format +msgid "%.1f MB" +msgstr "%.1f M" + +#: gtk20/gtk/gtkfilechooserdefault.c:8423 +#, fuzzy, c-format +msgid "%.1f GB" +msgstr "%.1f G" + +#: gtk20/gtk/gtkfilesel.c:778 +#, fuzzy +msgid "D_esktop" +msgstr "¹ÙÅÁ È­¸é" + +#: gtk20/gtk/gtkfilesel.c:800 +#, fuzzy +msgid "Docu_ments" +msgstr "¹®¼­ ÀÛ¼º" + +#. These are the commonly used font styles, listed here only for +#. translations. +#: gtk20/gtk/gtkfontsel.c:78 +msgid "Ultra-Light" +msgstr "" + +#: gtk20/gtk/gtkfontsel.c:79 +msgid "Light" +msgstr "" + +#: gtk20/gtk/gtkfontsel.c:80 +msgid "Medium" +msgstr "" + +#: gtk20/gtk/gtkfontsel.c:81 +#, fuzzy +msgid "Normal" +msgstr "º¸Åë Å©±â(_N)" + +#: gtk20/gtk/gtkfontsel.c:82 +msgid "Regular" +msgstr "" + +#: gtk20/gtk/gtkfontsel.c:83 +#, fuzzy +msgid "Italic" +msgstr "±â¿ïÀÓ²Ã(_I)" + +#: gtk20/gtk/gtkfontsel.c:84 +msgid "Oblique" +msgstr "" + +#: gtk20/gtk/gtkfontsel.c:85 +msgid "Semi-Bold" +msgstr "" + +#: gtk20/gtk/gtkfontsel.c:86 +#, fuzzy +msgid "Bold" +msgstr "±½°Ô(_B)" + +#: gtk20/gtk/gtkfontsel.c:87 +msgid "Ultra-Bold" +msgstr "" + +#: gtk20/gtk/gtkfontsel.c:88 +msgid "Heavy" +msgstr "" + +#: gtk20/gtk/gtkfontsel.c:89 +#, fuzzy +msgid "Bold Italic" +msgstr "±â¿ïÀÓ²Ã(_I)" + +#: libgnomeui-2.0/file-chooser/gtkfilesystemgnomevfs.c:1207 +#, fuzzy +msgid "Network Servers" +msgstr "³×Æ®¿öÅ© ÇÁ·Ï½Ã" + +#: metacity/src/metacity.schemas.in.h:18 +msgid "If true, enables the Windows flag keys to show the panel's main menu" +msgstr "" + +#: metacity/src/metacity.schemas.in.h:19 +msgid "If true, horizontal viewport constraints are used" +msgstr "" + +#: metacity/src/metacity.schemas.in.h:21 +msgid "" +"If true, metacity will give the user feedback using window border effects." +msgstr "" + +#: metacity/src/metacity.schemas.in.h:23 +msgid "" +"If true, pressing a mouse button on a window will cause it to be raised to " +"the top of the stack. If false, windows must be raised explicitly by " +"clicking on their title bar." +msgstr "" + +#: metacity/src/metacity.schemas.in.h:25 +msgid "" +"If true, then pressing the Windows flag keys will cause the panel's main " +"menu to appear." +msgstr "" + +#: metacity/src/metacity.schemas.in.h:27 +msgid "If true, use window border effects" +msgstr "" + +#: metacity/src/metacity.schemas.in.h:28 +msgid "" +"If true, windows are not allowed to be horizontally moved outside the " +"viewport." +msgstr "" + +#: metacity/src/metacity.schemas.in.h:65 +msgid "Raise windows when a mouse button is pressed on them" +msgstr "" + +#: nautilus/libnautilus-private/apps_nautilus_preferences.schemas.in.h:49 +#, fuzzy +msgid "" +"If this is set to true, an icon linking to the Network Servers view will be " +"put on the desktop." +msgstr "ÂüÀ¸·Î ¼³Á¤Çϸé, ÈÞÁöÅë ¾ÆÀÌÄÜÀ» ¹ÙÅÁ È­¸é¿¡ ¸¸µì´Ï´Ù." + +#: nautilus/libnautilus-private/apps_nautilus_preferences.schemas.in.h:66 +#, fuzzy +msgid "Network Servers icon visible on the desktop" +msgstr "¹ÙÅÁ È­¸é¿¡ Ȩ ¾ÆÀÌÄÜÀ» º¸ÀÔ´Ï´Ù" + +#: nautilus/libnautilus-private/nautilus-desktop-link-monitor.c:133 +#, fuzzy, c-format +msgid "You cannot move the drive \"%s\" to the trash." +msgstr "º¼·ý \"%s\"(À»)¸¦ ÈÞÁöÅëÀ¸·Î ¿Å±æ ¼ö ¾ø½À´Ï´Ù." + +#: nautilus/src/file-manager/fm-error-reporting.c:137 +#, c-format +msgid "" +"Couldn't rename \"%s\" to \"%s\". Please make sure the new name has only " +"valid characters in it." +msgstr "" + +#: nautilus/src/nautilus-navigation-window-menus.c:455 +#: nautilus/src/nautilus-spatial-window.c:818 +msgid "_Search" +msgstr "ã±â(_S)" + +#. name, stock id, label +#: nautilus/src/nautilus-navigation-window-menus.c:456 +#: nautilus/src/nautilus-spatial-window.c:819 +#, fuzzy +msgid "Search for files" +msgstr "ÆÄÀϸíÀ¸·Î¸¸ ÆÄÀÏã±â" + +#: nautilus/src/nautilus-places-sidebar.c:373 +#, fuzzy +msgid "Mount failed" +msgstr "ÆÄÀÏ ¿Å±â´Â Áß" + +#: nautilus/src/nautilus-search-bar.c:181 +#, fuzzy +msgid "Search:" +msgstr "À̸§" + +#. name, stock id +#: nautilus/src/nautilus-window-menus.c:689 +#, fuzzy +msgid "_Network" +msgstr "³×Æ®¿öÅ©" + +#. label, accelerator +#: nautilus/src/nautilus-window-menus.c:690 +#, fuzzy +msgid "Go to the network location" +msgstr "ÄÄÇ»ÅÍ À§Ä¡·Î °©´Ï´Ù" + +#: nautilus-share/src/nautilus-share.c:211 +#, c-format +msgid "" +"Nautilus needs to add some permissions to your folder \"%s\" in order to " +"share it" +msgstr "" + +#: nautilus-share/src/nautilus-share.c:219 +#, c-format +msgid "" +"The folder \"%s\" needs the following extra permissions for sharing to " +"work:\n" +"%s%s%sDo you want Nautilus to add these permissions to the folder " +"automatically?" +msgstr "" + +#: nautilus-share/src/nautilus-share.c:223 +msgid " - read permission by others\n" +msgstr "" + +#: nautilus-share/src/nautilus-share.c:224 +msgid " - write permission by others\n" +msgstr "" + +#: nautilus-share/src/nautilus-share.c:225 +msgid " - execute permission by others\n" +msgstr "" + +#: nautilus-share/src/nautilus-share.c:229 +msgid "Add the permissions automatically" +msgstr "" + +#: nautilus-share/src/nautilus-share.c:255 +#, c-format +msgid "Could not change the permissions of folder \"%s\"" +msgstr "" + +#: nautilus-share/src/nautilus-share.c:525 +msgid "Share name is too long" +msgstr "" + +#: nautilus-share/src/nautilus-share.c:559 +msgid "The share name cannot be empty" +msgstr "" + +#: nautilus-share/src/nautilus-share.c:572 +#, c-format +msgid "Error while getting share information: %s" +msgstr "" + +#: nautilus-share/src/nautilus-share.c:582 +msgid "Another share has the same name" +msgstr "" + +#: nautilus-share/src/nautilus-share.c:728 +msgid "There was an error while getting the sharing information" +msgstr "" + +#: nautilus-share/src/nautilus-share.c:820 +msgid "Modify _Share" +msgstr "" + +#: nautilus-share/src/nautilus-share.c:822 +msgid "Create _Share" +msgstr "" + +#: nautilus-share/src/nautilus-share.c:1176 +msgid "Sharing Options" +msgstr "" + +#: nautilus-share/src/shares.c:122 +#, c-format +msgid "%s %s %s returned with signal %d" +msgstr "" + +#: nautilus-share/src/shares.c:131 +#, c-format +msgid "%s %s %s failed for an unknown reason" +msgstr "" + +#: nautilus-share/src/shares.c:151 +#, c-format +msgid "'net usershare' returned error %d: %s" +msgstr "" + +#: nautilus-share/src/shares.c:153 +#, c-format +msgid "'net usershare' returned error %d" +msgstr "" + +#: nautilus-share/src/shares.c:184 +msgid "the output of 'net usershare' is not in valid UTF-8 encoding" +msgstr "" + +#: nautilus-share/src/shares.c:418 ../src/shares.c:577 +msgid "Failed" +msgstr "" + +#: nautilus-share/src/shares.c:512 +#, c-format +msgid "Samba's testparm returned with signal %d" +msgstr "" + +#: nautilus-share/src/shares.c:518 +msgid "Samba's testparm failed for an unknown reason" +msgstr "" + +#: nautilus-share/src/shares.c:533 +#, c-format +msgid "Samba's testparm returned error %d: %s" +msgstr "" + +#: nautilus-share/src/shares.c:535 +#, c-format +msgid "Samba's testparm returned error %d" +msgstr "" + +#: nautilus-share/src/shares.c:642 +#, c-format +msgid "Cannot remove the share for path %s: that path is not shared" +msgstr "" + +#: nautilus-share/src/shares.c:688 +msgid "" +"Cannot change the path of an existing share; please remove the old share " +"first and add a new one" +msgstr "" + +#: nautilus-share/interfaces/share-dialog.glade.in.h:2 +msgid "Co_mment:" +msgstr "" + +#: nautilus-share/interfaces/share-dialog.glade.in.h:3 +msgid "Share _name:" +msgstr "" + +#: nautilus-share/interfaces/share-dialog.glade.in.h:4 +msgid "Share this _folder" +msgstr "" + +#: nautilus-share/interfaces/share-dialog.glade.in.h:5 +msgid "_Allow other people to write in this folder" +msgstr "" + +#: shared-mime-info/freedesktop.org.xml.in.h:317 +#, fuzzy +msgid "WMA audio" +msgstr "WAV ¿Àµð¿À" + +#: xchat/src/common/cfgfiles.c:729 +#, fuzzy +msgid "" +"* Running IRC as root is not recommended! You should\n" +" create a User Account and use that to login.\n" +msgstr "" +"* IRC¸¦ ·çÆ®°èÁ¤À¸·Î ½ÇÇàÇß½À´Ï´Ù!!\n" +" »õ·Î¿î »ç¿ëÀÚ ¾îÄ«¿îÆ®¸¦ »ý¼ºÇÏ°í ±×°ÍÀ» ÀÌ¿ëÇϽʽÿÀ.\n" + +#: xchat/src/fe-gtk/setup.c:209 +#, fuzzy +msgid "Open an extra tab for outgoing msg" +msgstr "¼­¹ö ¾Ë¸²À» ÅÇÀ¸·Î Ç¥½ÃÇÕ´Ï´Ù." + +#: xmms/Effect/echo_plugin/gui.c:23 ../Effect/echo_plugin/gui.c:135 +#: xmms/Effect/stereo_plugin/stereo.c:56 ../Effect/stereo_plugin/stereo.c:120 +#: xmms/Effect/voice/about.c:35 ../General/ir/about.c:51 +#: xmms/General/ir/configure.c:205 ../General/ir/configure.c:376 +#: xmms/General/joystick/about.c:35 ../General/joystick/configure.c:272 +#: xmms/General/song_change/song_change.c:340 ../Input/cdaudio/cddb.c:854 +#: xmms/Input/cdaudio/cddb.c:862 ../Input/cdaudio/cddb.c:870 +#: xmms/Input/cdaudio/cddb.c:894 ../Input/cdaudio/configure.c:715 +#: xmms/Input/mikmod/plugin.c:125 ../Input/mikmod/plugin.c:616 +#: xmms/Input/mpg123/configure.c:616 ../Input/mpg123/fileinfo.c:188 +#: xmms/Input/mpg123/fileinfo.c:295 ../Input/mpg123/http.c:208 +#: xmms/Input/mpg123/mpg123.c:1165 ../Input/tonegen/tonegen.c:55 +#: xmms/Input/vorbis/configure.c:449 ../Input/vorbis/fileinfo.c:210 +#: xmms/Input/vorbis/http.c:208 ../Input/vorbis/vorbis.c:778 +#: xmms/Output/OSS/configure.c:497 ../Output/disk_writer/disk_writer.c:137 +#: xmms/Output/disk_writer/disk_writer.c:406 ../Output/esd/about.c:44 +#: xmms/Output/esd/configure.c:208 ../Output/solaris/about.c:24 +#: xmms/Output/solaris/configure.c:266 ../Output/sun/about.c:36 +#: xmms/Output/sun/configure.c:557 ../Output/alsa/about.c:46 +#: xmms/Output/alsa/configure.c:437 ../Visualization/blur_scope/config.c:101 +#: xmms/Visualization/opengl_spectrum/configure.c:73 +#: ../libxmms/dirbrowser.c:342 xmms/xmms/equalizer.c:1371 +#: ../xmms/equalizer.c:1377 ../xmms/equalizer.c:1441 +#: xmms/xmms/equalizer.c:1450 ../xmms/equalizer.c:1701 ../xmms/input.c:254 +#: xmms/xmms/main.c:3147 ../xmms/playlistwin.c:726 ../xmms/playlistwin.c:837 +#: xmms/xmms/playlistwin.c:1421 ../xmms/playlistwin.c:1474 +#: xmms/xmms/prefswin.c:313 ../xmms/prefswin.c:1207 ../xmms/util.c:582 +msgid "OK" +msgstr "È®ÀÎ" + +#: xmms/General/song_change/song_change.c:238 +#, fuzzy +msgid "Commands" +msgstr "¸í·É¾î:" + +#: xmms/General/song_change/song_change.c:245 +#, fuzzy +msgid "Shell-command to run when xmms starts a new song." +msgstr "¿¬ÁÖ ¸ñ·ÏÀÇ °îÀ» ¸ðµÎ ¿¬ÁÖÇßÀ» ¶§ ½ÇÇàµÉ ½© ¸í·É¾î¸¦ ÀÔ·ÂÇϼ¼¿ä." + +#: xmms/General/song_change/song_change.c:268 +#, fuzzy +msgid "Shell-command to run toward the end of a song." +msgstr "¿¬ÁÖ ¸ñ·ÏÀÇ °îÀ» ¸ðµÎ ¿¬ÁÖÇßÀ» ¶§ ½ÇÇàµÉ ½© ¸í·É¾î¸¦ ÀÔ·ÂÇϼ¼¿ä." + +#: xmms/General/song_change/song_change.c:313 +#, c-format +msgid "" +"You can use the following format strings which will be substituted before " +"calling the command (not all are useful for the end-of-playlist command).\n" +"\n" +"%%F: Frequency (in hertz)\n" +"%%c: Number of channels\n" +"%%f: filename (full path)\n" +"%%l: length (in milliseconds)\n" +"%%n or %%s: Song name\n" +"%%r: Rate (in bits per second)\n" +"%%t: Playlist position (%%02d)\n" +"%%p: Currently playing (1 or 0)" +msgstr "" + +#: xmms/Input/cdaudio/configure.c:339 +#, c-format +msgid "Directory %s exists, but you do not have permission to access it." +msgstr "" + +#: xmms/Input/mikmod/plugin.c:582 +msgid "Always use filename as title" +msgstr "" + +#: xmms/Input/mpg123/configure.c:593 +msgid "Override default ID3V2 encoding" +msgstr "" + +#: xmms/Input/mpg123/configure.c:602 +msgid "Encoding name:" +msgstr "" + +#: xmms/Input/mpg123/fileinfo.c:507 +#, fuzzy +msgid "CCITT J.17" +msgstr "CCIT J.17" + +#: xmms/Input/vorbis/fileinfo.c:852 +#, fuzzy, c-format +msgid "Average bitrate: %.1f kbps" +msgstr "¸í¸ñ ºñÆ®À²: %d kbps" + +#: xmms/Input/vorbis/fileinfo.c:857 +#, c-format +msgid "Vendor: %s" +msgstr "" + +#: xmms/Output/OSS/about.c:31 +#, fuzzy +msgid "" +"XMMS OSS Driver\n" +"\n" +" This program is free software; you can redistribute it and/or modify\n" +"it under the terms of the GNU General Public License as published by\n" +"the Free Software Foundation; either version 2 of the License, or\n" +"(at your option) any later version.\n" +"\n" +"This program is distributed in the hope that it will be useful,\n" +"but WITHOUT ANY WARRANTY; without even the implied warranty of\n" +"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n" +"GNU General Public License for more details.\n" +"\n" +"You should have received a copy of the GNU General Public License\n" +"along with this program; if not, write to the Free Software\n" +"Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,\n" +"USA.\n" +"\n" +"XMMS 3DSE patch release 11 for XMMS 1.2.5\n" +"Copyright (C) 2001 - Cornelis Frank\n" +"e-mail: \n" +"home page: http://studwww.rug.ac.be/~fcorneli/xmms" +msgstr "" +"XMMS ALSA µå¶óÀ̹ö\n" +"\n" +"º» ÇÁ·Î±×·¡Àº ¹«·á ¼ÒÇÁÆ®¿þ¾îÀÔ´Ï´Ù; ¿©·¯ºÐ²²¼­´Â FSFÀÇ GNU General\n" +"Public License¿¡ ÀÇ°ÅÇÏ¿© º» ÇÁ·Î±×·¥À» ¼öÁ¤ ¶Ç´Â ¹èÆ÷ÇÏ½Ç ¼ö\n" +"ÀÖ½À´Ï´Ù; GPL ¶óÀ̼¾½º µÎ¹ø° ¹öÀü ¶Ç´Â (»ç¿ëÀÚ ¼±Åÿ¡ ÀÇ°Å) ÀÌÈÄÀÇ\n" +"¹öÀü.\n" +"\n" +"º» ÇÁ·Î±×·¥ÀÌ À¯¿ëÇÏ°Ô »ç¿ëµÇ¾îÁö±æ ¹Ù¶ó¸ç, ±×·¯³ª º» ÇÁ·Î±×·¥Àº\n" +"»óÇ°¼º ¶Ç´Â ƯÁ¤ ¸ñÀû¿¡ »ç¿ë °¡´É¼ºÀº ¹°·Ð ¾î¶²ÇÑ º¸Áõµµ ÇÏÁú\n" +"¾Ê½À´Ï´Ù. º¸´Ù ÀÚ¼¼ÇÑ »çÇ×Àº GNU General Public License¸¦\n" +"Âü°íÇϽñ⠹ٶø´Ï´Ù.\n" +"\n" +"¿©·¯ºÐ²²¼­´Â GNU General Public License¸¦ º» ÇÁ·Î±×·¥°ú ÇÔ²²\n" +"¹ÞÀ¸¼ÌÀ»°ÍÀÔ´Ï´Ù; ¸¸¾à ±×·¸Áö ¾Ê´Ù¸é, the Free Software\n" +"Foundation, Inc,, 59 Template Place - Suite 330, Boston, NA 02111-1307,\n" +"USA·Î ¿¬¶ôÇϽñ⠹ٶø´Ï´Ù.\n" +"Author: Matthieu Sozeau (mattam@altern.org)" + +#: xmms/Output/disk_writer/disk_writer.c:389 +msgid "Don't strip file name extension" +msgstr "" + +#: xmms/Output/alsa/configure.c:360 +#, fuzzy +msgid "Soundcard:" +msgstr "»ç¿îµåÄ«µå #%d - %s" + +#: xmms/Output/alsa/configure.c:402 +msgid "XMMS:" +msgstr "" + +#: xmms/xmms/about.c:49 +msgid "Ian 'Hixie' Hickson" +msgstr "" + +#: xmms/xmms/main.c:137 +msgid "/Time Display (MMM:SS)" +msgstr "" + +#: xmms/xmms/main.c:273 +#, fuzzy +msgid "/Play" +msgstr "¿¬ÁÖ" + +#: xmms/xmms/main.c:274 +#, fuzzy +msgid "/Play/Play File" +msgstr "/ÆÄÀÏ ¿¬ÁÖ" + +#: xmms/xmms/main.c:275 +#, fuzzy +msgid "/Play/Play Directory" +msgstr "/µð·ºÅ丮 ¿¬ÁÖ" + +#: xmms/xmms/main.c:276 +#, fuzzy +msgid "/Play/Play Location" +msgstr "/À§Ä¡ ¿¬ÁÖ" + +#: xmms/xmms/main.c:277 +#, fuzzy +msgid "/Play/Play AudioCD" +msgstr "/¿¬ÁÖ/¿¬ÁÖ" + +#. I18N: -Q, --queue switch +#: xmms/xmms/main.c:3410 +#, c-format +msgid "Add file(s) to playlist and queue" +msgstr "" + +#. I18N: Only "SWITCH" may be translated +#: xmms/xmms/main.c:3411 ../xmms/main.c:3415 ../xmms/main.c:3419 +msgid "[=SWITCH]" +msgstr "" + +#. I18N: -S, --toggle-shuffle switch +#: xmms/xmms/main.c:3413 +#, c-format +msgid "Toggle the 'shuffle' flag." +msgstr "" + +#. I18N: -R, --toggle-repeat switch +#: xmms/xmms/main.c:3417 +#, c-format +msgid "Toggle the 'repeat' flag." +msgstr "" + +#. I18N: -A, --toggle-advance switch +#: xmms/xmms/main.c:3421 +#, c-format +msgid "Toggle the 'no playlist advance' flag." +msgstr "" + +#. I18N: "on" and "off" is not translated. +#: xmms/xmms/main.c:3424 +#, c-format +msgid "SWITCH may be either 'on' or 'off'\n" +msgstr "" + +#. I18N: -q, --quit switch +#: xmms/xmms/main.c:3436 +#, fuzzy, c-format +msgid "Close remote session." +msgstr "¿ø°Ý È£½ºÆ® »ç¿ë" + +#. +#. * I18N: "on" and "off" is not +#. * translated. +#. +#: xmms/xmms/main.c:3612 ../xmms/main.c:3636 ../xmms/main.c:3660 +#, c-format +msgid "Value '%s' not understood, must be either 'on' or 'off'.\n" +msgstr "" + +#: xmms/xmms/playlistwin.c:832 +#, c-format +msgid "" +"Unknown file type for %s.\n" +"The filename of the playlist should end in either \".m3u\" or \".pls\"." +msgstr "" + +#: xmms/xmms/prefswin.c:1036 +msgid "" +"When moving windows around, snap them together, and towards screen edges at " +"this distance" +msgstr "" + +#: xmms/xmms/prefswin.c:1058 +#, fuzzy +msgid "" +"Recommended if you want to load playlists that were created in MS Windows" +msgstr "MS À©µµ¿ìÁî¿¡¼­ ¸¸µé¾îÁø ¿¬ÁÖ¸ñ·ÏÀ» ÀоîµéÀÌ°í ½ÍÀ» °æ¿ì »ç¿ë" + +#: xmms/xmms/prefswin.c:1074 +msgid "Store information such as song title and length to playlists" +msgstr "" + +#: xmms/xmms/prefswin.c:1152 +#, fuzzy +msgid "Advanced Title Options" +msgstr "°í±Þ ¼³Á¤" + +#: xmms/xmms/prefswin.c:1160 +msgid "" +"%0.2n - Display a 0 padded 2 char long tracknumber\n" +"%!p(...) - Display what's inside parentheses if Performer (%p) is not set\n" +"%?p(...) - Display what's inside parentheses if Performer (%p) is set\n" +"\n" +"For more details, please read the included README or http://www.xmms.org/" +"docs/readme.php" +msgstr "" + +#: xmms/xmms/prefswin.c:1178 +#, fuzzy +msgid "Audio CD directory" +msgstr "/´õÇϱâ/µð·ºÅ丮" + +#: xmms/xmms/prefswin.c:1196 +msgid "Audio CD" +msgstr "" + +#: xmms/xmms/skinwin.c:206 +msgid "(system default)" +msgstr "" === added file 'src/github.com/chai2010/gettext-go/testdata/mm-ko.euc-kr.mo' Binary files src/github.com/chai2010/gettext-go/testdata/mm-ko.euc-kr.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/testdata/mm-ko.euc-kr.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/testdata/mm-ko.euc-kr.po' --- src/github.com/chai2010/gettext-go/testdata/mm-ko.euc-kr.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/mm-ko.euc-kr.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9096 @@ +# Korean translation for dia +# Copyright (C) 2000 Free Software Foundation, Inc. +# Young-Ho,Cha , 2000. +#: app/sheets.c:453 +msgid "" +msgstr "" +"Project-Id-Version: dia 0.85\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2004-08-18 18:13+0200\n" +"PO-Revision-Date: 2000-05-31 10:16:35+0900\n" +"Last-Translator: Young-Ho Cha \n" +"Language-Team: Korean \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=euc-kr\n" +"Content-Transfer-Encoding: 8-bit\n" + +#: app/app_procs.c:219 +#, c-format +msgid "%s error: don't know how to export into %s\n" +msgstr "" + +#: app/app_procs.c:228 +#, c-format +msgid "%s error: input and output file name is identical: %s" +msgstr "" + +#: app/app_procs.c:235 +#, fuzzy, c-format +msgid "%s error: need valid input file %s\n" +msgstr "¿Ã¹Ù¸¥ ÀÔ·Â ÆÄÀÏÀÌ ÇÊ¿äÇÕ´Ï´Ù\n" + +#. if (!quiet) +#: app/app_procs.c:255 +#, c-format +msgid "%s --> %s\n" +msgstr "" + +#: app/app_procs.c:307 +#, fuzzy, c-format +msgid "Can't find output format %s\n" +msgstr "" +"Ç÷¯±×ÀÎ `%s'¸¦ ÀÐÀ»¼ö ¾ø½À´Ï´Ù\n" +"%s" + +#. Translators: The argument is a list of options, not to be translated +#: app/app_procs.c:401 +#, fuzzy, c-format +msgid "Export to file format and exit. Supported formats are: %s" +msgstr "ÀоîµéÀÎ ÆÄÀÏÀ» ÀúÀåÇÏ°í ¸¶Ä¨´Ï´Ù" + +#. &export_file_name +#: app/app_procs.c:410 +msgid "Export loaded file and exit" +msgstr "ÀоîµéÀÎ ÆÄÀÏÀ» ÀúÀåÇÏ°í ¸¶Ä¨´Ï´Ù" + +#: app/app_procs.c:410 +msgid "OUTPUT" +msgstr "Ãâ·Â" + +#. &export_file_format +#: app/app_procs.c:412 +msgid "FORMAT" +msgstr "" + +#: app/app_procs.c:415 +#, fuzzy +msgid "Export graphics size" +msgstr "ÀоîµéÀÎ ÆÄÀÏÀ» ÀúÀåÇÏ°í ¸¶Ä¨´Ï´Ù" + +#: app/app_procs.c:415 +msgid "WxH" +msgstr "" + +#: app/app_procs.c:417 +msgid "Don't show the splash screen" +msgstr "" + +#: app/app_procs.c:419 +msgid "Send error messages to stderr instead of showing dialogs." +msgstr "" + +#: app/app_procs.c:421 +msgid "Display credits list and exit" +msgstr "" + +#: app/app_procs.c:423 +msgid "Display version and exit" +msgstr "" + +#: app/app_procs.c:424 +msgid "Show this help message" +msgstr "ÀÌ µµ¿ò¸»À» º¸¿©ÁÝ´Ï´Ù" + +#: app/app_procs.c:465 +msgid "Can't connect to session manager!\n" +msgstr "¼¼¼Ç °ü¸®ÀÚ¿¡ ¿¬°áÀ» ÇÒ¼ö ¾ø½À´Ï´Ù!\n" + +#. TRANSLATOR: 2nd and 3rd %s are time and date respectively. +#: app/app_procs.c:492 +#, c-format +msgid "Dia version %s, compiled %s %s\n" +msgstr "" + +#: app/app_procs.c:494 +#, c-format +msgid "Dia version %s\n" +msgstr "" + +#: app/app_procs.c:548 app/app_procs.c:550 +msgid "" +"Couldn't find standard objects when looking for object-libs, exiting...\n" +msgstr "object-libs´Â ã¾ÒÁö¸¸ Ç¥ÁØ °´Ã¼´Â ãÁö ¸øÇÏ¿´½À´Ï´Ù, Á¾·áÇÕ´Ï´Ù...\n" + +#: app/app_procs.c:591 +#, fuzzy +msgid "Diagram1.dia" +msgstr "µµÇ¥ ÆíÁý±â" + +#: app/app_procs.c:633 +msgid "" +"This shouldn't happen. Please file a bug report at bugzilla.gnome.org\n" +"describing how you can cause this message to appear.\n" +msgstr "" + +#. no standard buttons +#: app/app_procs.c:645 +#, fuzzy +msgid "" +"Modified diagrams exist.\n" +"Are you sure you want to quit Dia\n" +"without saving them?" +msgstr "" +"º¯°æµÈ µµÇ¥°¡ ÀÖ½À´Ï´Ù.\n" +"Á¤¸»·Î Á¾·áÇϽðڽÀ´Ï±î?" + +#: app/app_procs.c:649 +#, fuzzy +msgid "Quit Dia" +msgstr "ÀÌ ÇÁ·Î±×·¥Àº" + +#. This printf seems to prevent a race condition with unrefs. +#. Yuck. -Lars +#: app/app_procs.c:700 +msgid "Thank you for using Dia.\n" +msgstr "" + +#: app/app_procs.c:716 app/app_procs.c:723 +msgid "Could not create per-user Dia config directory" +msgstr "´Ù¸¥ »ç¿ëÀÚÀÇ Dia ¼³Á¤ µð·ºÅ丮¸¦ »ý¼ºÇÒ¼ö ¾ø½À´Ï´Ù" + +#: app/app_procs.c:725 +msgid "" +"Could not create per-user Dia config directory. Please make sure that the " +"environment variable HOME points to an existing directory." +msgstr "" + +#: app/app_procs.c:747 +msgid "Objects and filters internal to dia" +msgstr "DiaÀÇ ³»ºÎ °´Ã¼¿Í ÇÊÅÍ" + +#: app/app_procs.c:786 +msgid "[OPTION...] [FILE...]" +msgstr "[¿É¼Ç...] [ÆÄÀÏ...]" + +#: app/app_procs.c:789 +#, c-format +msgid "" +"Error on option %s: %s.\n" +"Run '%s --help' to see a full list of available command line options.\n" +msgstr "" +"¿É¼Ç %s¿¡ ¿À·ù°¡ ÀÖ½À´Ï´Ù : %s. \n" +"'%s --help'¸¦ ½ÇÇà½ÃÄѼ­ ¸í·ÉÇà ¿É¼ÇÀÇ Àüü¸ñ·ÏÀ» ÂüÁ¶ÇϽʽÿÀ.\n" + +#: app/app_procs.c:874 +msgid "" +"The original author of Dia was:\n" +"\n" +msgstr "" + +#: app/app_procs.c:879 +msgid "" +"\n" +"The current maintainers of Dia are:\n" +"\n" +msgstr "" + +#: app/app_procs.c:884 +msgid "" +"\n" +"Other authors are:\n" +"\n" +msgstr "" + +#: app/app_procs.c:889 +msgid "" +"\n" +"Dia is documented by:\n" +"\n" +msgstr "" + +#: app/autosave.c:93 +msgid "Recovering autosaved diagrams" +msgstr "" + +#: app/autosave.c:101 +msgid "" +"Autosaved files exist.\n" +"Please select those you wish to recover." +msgstr "" + +#: app/color_area.c:317 app/color_area.c:364 +#, fuzzy +msgid "Select foreground color" +msgstr "Àü°æ »ö»ó:" + +#: app/color_area.c:318 app/color_area.c:365 +#, fuzzy +msgid "Select background color" +msgstr "¹è°æ »ö»ó:" + +#: app/commands.c:134 +#, fuzzy, c-format +msgid "Diagram%d.dia" +msgstr "µµÇ¥ ÆíÁý±â" + +#: app/commands.c:205 +msgid "No existing object to paste.\n" +msgstr "ºÙ¿©³ÖÀ» °´Ã¼°¡ ¾ø½À´Ï´Ù.\n" + +#: app/commands.c:529 app/commands.c:567 +msgid "Could not find help directory" +msgstr "" + +#: app/commands.c:536 +#, fuzzy, c-format +msgid "" +"Could not open help directory:\n" +"%s" +msgstr "`%s'¸¦ ¿­¼ö ¾ø½À´Ï´Ù" + +#. +#. * Translators should localize the following string +#. * which will give them credit in the About box. +#. * E.g. "Fulano de Tal " +#. +#: app/commands.c:606 +msgid "translator_credits-PLEASE_ADD_YOURSELF_HERE" +msgstr "" + +#: app/commands.c:620 dia.desktop.in.h:1 +#, fuzzy +msgid "Dia" +msgstr "´ëÈ­»óÀÚ" + +#: app/commands.c:622 +msgid "Copyright (C) 1998-2002 The Free Software Foundation and the authors" +msgstr "" + +#: app/commands.c:623 +#, fuzzy +msgid "" +"Dia is a program for drawing structured diagrams.\n" +"Please visit http://www.gnome.org/projects/dia for more information." +msgstr "" +"´õ ¸¹Àº Á¤º¸¸¦ ¾Ë°í ½ÍÀ¸¸é http://www.lysator.liu.se/~alla/dia ¸¦ ÂüÁ¶ÇϽʽÃ" +"¿À" + +#: app/commands.c:662 +msgid "About Dia" +msgstr "ÀÌ ÇÁ·Î±×·¥Àº" + +#: app/commands.c:705 +#, c-format +msgid "Dia v %s by Alexander Larsson" +msgstr "" + +#. Exact spelling is Chépélov (using *ML entities) +#: app/commands.c:711 +msgid "Maintainers: Lars Clausen and Cyrille Chepelov" +msgstr "" + +#: app/commands.c:715 +#, fuzzy +msgid "Please visit http://www.gnome.org/projects/dia for more information" +msgstr "" +"´õ ¸¹Àº Á¤º¸¸¦ ¾Ë°í ½ÍÀ¸¸é http://www.lysator.liu.se/~alla/dia ¸¦ ÂüÁ¶ÇϽʽÃ" +"¿À" + +#: app/commands.c:720 +msgid "Contributors:" +msgstr "µµ¿ÍÁֽźеé:" + +#: app/defaults.c:43 app/defaults.c:146 +msgid "Object defaults" +msgstr "±âº» °´Ã¼" + +#: app/defaults.c:61 +msgid "This object has no defaults." +msgstr "ÀÌ °´Ã¼´Â ±âº»¼³Á¤ÀÌ ¾ø½À´Ï´Ù." + +#: app/defaults.c:111 +#, fuzzy +msgid "Defaults: " +msgstr "±âº» º¸±â" + +#: app/dia-props.c:89 +#, fuzzy +msgid "Diagram Properties" +msgstr "/´ëÈ­»óÀÚ/¼Ó¼º(_P)" + +#: app/dia-props.c:123 +msgid "Dynamic grid" +msgstr "" + +#: app/dia-props.c:131 +msgid "x" +msgstr "" + +#: app/dia-props.c:135 +msgid "y" +msgstr "" + +#: app/dia-props.c:140 +#, fuzzy +msgid "Spacing" +msgstr "ºñÀ²" + +#: app/dia-props.c:160 +#, fuzzy +msgid "Visible spacing" +msgstr "°ÝÀÚ º¸±â(_V)" + +#. Hexes! +#: app/dia-props.c:181 app/preferences.c:162 +msgid "Hex grid" +msgstr "" + +#: app/dia-props.c:189 +#, fuzzy +msgid "Hex grid size" +msgstr "°ÝÀÚ °¡·Î Å©±â:" + +#: app/dia-props.c:202 +#, fuzzy +msgid "Grid" +msgstr "°ÝÀÚ:" + +#: app/dia-props.c:213 lib/diagramdata.c:127 +msgid "Background" +msgstr "¹è°æ" + +#: app/dia-props.c:224 app/preferences.c:105 +msgid "Grid Lines" +msgstr "°ÝÀÚ ÁÙ" + +#: app/dia-props.c:235 +#, fuzzy +msgid "Page Breaks" +msgstr "¼± »ö»ó:" + +#: app/dia-props.c:246 +#, fuzzy +msgid "Colors" +msgstr "´Ý±â" + +#. Can we be sure that the filename is the 'proper title'? +#: app/dia-props.c:265 +#, fuzzy, c-format +msgid "Diagram Properties: %s" +msgstr "/´ëÈ­»óÀÚ/¼Ó¼º(_P)" + +#: app/dia_embedd.c:352 +msgid "Could not initialize Bonobo!" +msgstr "Bonobo¸¦ ÃʱâÈ­ ÇÒ¼ö ¾ø½À´Ï´Ù!" + +#: app/diacanvas.c:121 +#, fuzzy +msgid "X position" +msgstr "ÇÕ¼º" + +#: app/diacanvas.c:122 +msgid "X position of child widget" +msgstr "" + +#: app/diacanvas.c:131 +#, fuzzy +msgid "Y position" +msgstr "ÇÕ¼º" + +#: app/diacanvas.c:132 +msgid "Y position of child widget" +msgstr "" + +#: app/diagram_tree_menu.c:45 +#, fuzzy +msgid "/_Sort objects" +msgstr "È帧µµ °´Ã¼" + +#: app/diagram_tree_menu.c:46 +#, fuzzy +msgid "/Sort objects/by _name" +msgstr "°´Ã¼ ¸Þ´º ¾ø½¿" + +#: app/diagram_tree_menu.c:48 +msgid "/Sort objects/by _type" +msgstr "" + +#: app/diagram_tree_menu.c:50 +msgid "/Sort objects/as _inserted" +msgstr "" + +#: app/diagram_tree_menu.c:53 +msgid "/Sort objects/All by name" +msgstr "" + +#: app/diagram_tree_menu.c:55 +msgid "/Sort objects/All by type" +msgstr "" + +#: app/diagram_tree_menu.c:57 +msgid "/Sort objects/All as inserted" +msgstr "" + +#: app/diagram_tree_menu.c:59 +msgid "/Sort objects/_Default" +msgstr "" + +#: app/diagram_tree_menu.c:60 +msgid "/Sort objects/Default/by _name" +msgstr "" + +#: app/diagram_tree_menu.c:62 +msgid "/Sort objects/Default/by _type" +msgstr "" + +#: app/diagram_tree_menu.c:64 +msgid "/Sort objects/Default/as _inserted" +msgstr "" + +#: app/diagram_tree_menu.c:66 +#, fuzzy +msgid "/Sort _diagrams" +msgstr "µµÇ¥ Àμâ" + +#: app/diagram_tree_menu.c:67 +msgid "/Sort _diagrams/by _name" +msgstr "" + +#: app/diagram_tree_menu.c:69 +msgid "/Sort _diagrams/as _inserted" +msgstr "" + +#: app/diagram_tree_menu.c:71 +#, fuzzy +msgid "/Sort diagrams/_Default" +msgstr "µµÇ¥:" + +#: app/diagram_tree_menu.c:72 +msgid "/Sort diagrams/Default/by _name" +msgstr "" + +#: app/diagram_tree_menu.c:74 +msgid "/Sort diagrams/Default/as _inserted" +msgstr "" + +#: app/diagram_tree_menu.c:82 app/diagram_tree_menu.c:93 +#, fuzzy +msgid "/_Locate" +msgstr "¿Ã·ÁÁü:" + +#: app/diagram_tree_menu.c:83 +#, fuzzy +msgid "/_Properties" +msgstr "¼³Á¤(_P)" + +#: app/diagram_tree_menu.c:84 +msgid "/_Hide this type" +msgstr "" + +#: app/diagram_tree_window.c:76 +#, fuzzy +msgid "Diagram tree" +msgstr "µµÇ¥ ÆíÁý±â" + +#: app/dialogs.c:51 +msgid "Ok" +msgstr "" + +#: app/dialogs.c:52 app/layer_dialog.c:1006 app/paginate_psprint.c:291 +msgid "Cancel" +msgstr "Ãë¼Ò" + +#. paper size +#: app/diapagelayout.c:116 +msgid "Paper Size" +msgstr "Á¾ÀÌ Å©±â" + +#. orientation +#: app/diapagelayout.c:149 +msgid "Orientation" +msgstr "¹æÇâ" + +#. margins +#: app/diapagelayout.c:187 +msgid "Margins" +msgstr "¿©¹é" + +#: app/diapagelayout.c:199 +msgid "Top:" +msgstr "À§:" + +#: app/diapagelayout.c:212 +msgid "Bottom:" +msgstr "¾Æ·¡:" + +#: app/diapagelayout.c:225 +msgid "Left:" +msgstr "¿ÞÂÊ:" + +#: app/diapagelayout.c:238 +msgid "Right:" +msgstr "¿À¸¥ÂÊ:" + +#. Scaling +#: app/diapagelayout.c:252 +msgid "Scaling" +msgstr "ºñÀ²" + +#: app/diapagelayout.c:263 +msgid "Scale:" +msgstr "ºñÀ²:" + +#: app/diapagelayout.c:275 +msgid "Fit to:" +msgstr "¸ÂÃã:" + +#: app/diapagelayout.c:287 +msgid "by" +msgstr "" + +#: app/diapagelayout.c:682 +#, c-format +msgid "%0.3gcm x %0.3gcm" +msgstr "" + +#: app/diapagelayout.c:786 app/pagesetup.c:76 +msgid "Page Setup" +msgstr "ÂÊ ¼³Á¤" + +#: app/disp_callbacks.c:81 app/properties.c:152 +msgid "" +"This object doesn't support Undo/Redo.\n" +"Undo information erased." +msgstr "" +"ÀÌ °´Ã¼´Â Ãë¼Ò/Àç½ÇÇàÀ» Áö¿øÇÏÁö ¾Ê½À´Ï´Ù.\n" +"Ãë¼Ò Á¤º¸°¡ Áö¿öÁý´Ï´Ù." + +#: app/disp_callbacks.c:116 +#, fuzzy +msgid "Properties..." +msgstr "¼³Á¤(_P)" + +#: app/disp_callbacks.c:919 +msgid "" +"The object you dropped cannot fit into its parent. \n" +"Either expand the parent object, or drop the object elsewhere." +msgstr "" + +#: app/display.c:95 +msgid "Diagram modified!" +msgstr "µµÇ¥°¡ º¯°æµÇ¾ú½À´Ï´Ù!" + +#: app/display.c:983 +msgid "" +msgstr "" + +#: app/display.c:985 +#, fuzzy, c-format +msgid "" +"The diagram '%s'\n" +"has not been saved. Save changes now?" +msgstr "" +"µµÇ¥°¡ ÀúÀåµÇÁö ¾Ê¾Ò½À´Ï´Ù.\n" +"Á¤¸»·Î ÀÌ Ã¢À» ´Ý°Ú½À´Ï±î?" + +#: app/display.c:996 +#, fuzzy +msgid "Close Diagram" +msgstr "µµÇ¥ ¿­±â" + +#: app/display.c:1001 +msgid "Discard Changes" +msgstr "" + +#: app/export_png.c:134 app/load_save.c:904 app/render_eps.c:103 +#: plug-ins/cairo/diacairo.c:913 plug-ins/cgm/cgm.c:1157 +#: plug-ins/dxf/dxf-export.c:505 plug-ins/hpgl/hpgl.c:732 +#: plug-ins/metapost/render_metapost.c:964 +#: plug-ins/pstricks/render_pstricks.c:800 plug-ins/shape/shape-export.c:135 +#: plug-ins/svg/render_svg.c:152 plug-ins/wpg/wpg.c:1066 +#: plug-ins/xfig/xfig-export.c:1100 plug-ins/xslt/xslt.c:94 +#, fuzzy, c-format +msgid "Can't open output file %s: %s\n" +msgstr "`%s'¸¦ ¿­¼ö ¾ø½À´Ï´Ù" + +#: app/export_png.c:142 +msgid "Could not create PNG write structure" +msgstr "PNG ¾²±â ±¸Á¶¸¦ »ý¼ºÇÒ¼ö ¾ø½À´Ï´Ù" + +#: app/export_png.c:151 +msgid "Could not create PNG header info structure" +msgstr "PNGÇì´õÁ¤º¸ ±¸Á¶¸¦ »ý¼ºÇÒ¼ö ¾ø½À´Ï´Ù" + +#: app/export_png.c:159 +msgid "Error occurred while writing PNG" +msgstr "PNG¸¦ ¾²´ÂÁß ¿À·ù°¡ ¹ß»ýÇß½À´Ï´Ù" + +#. Create a dialog +#: app/export_png.c:299 +#, fuzzy +msgid "PNG Export Options" +msgstr "°ÝÀÚ ¼³Á¤" + +#. Translators: Menu item Verb/Channel/Export +#. Translators: Menu item Verb/Channel/Export/Export +#: app/export_png.c:300 objects/FS/function.c:685 objects/FS/function.c:687 +msgid "Export" +msgstr "" + +#: app/export_png.c:305 +#, fuzzy +msgid "Image width:" +msgstr "±×¸² ÆÄÀÏ:" + +#: app/export_png.c:308 +#, fuzzy +msgid "Image height:" +msgstr "±×¸² ÆÄÀÏ:" + +#: app/export_png.c:354 +msgid "Portable Network Graphics" +msgstr "" + +#: app/filedlg.c:122 app/filedlg.c:404 +msgid "By extension" +msgstr "È®ÀåÀÚ" + +#: app/filedlg.c:184 +msgid "Open Diagram" +msgstr "µµÇ¥ ¿­±â" + +#: app/filedlg.c:215 +msgid "Open Options" +msgstr "¼³Á¤ Çϱâ" + +#: app/filedlg.c:223 app/filedlg.c:526 +msgid "Determine file type:" +msgstr "ÆÄÀÏÇü½Ä °áÁ¤:" + +#: app/filedlg.c:259 +msgid "" +"Some characters in the filename are neither UTF-8 nor your local encoding.\n" +"Some things will break." +msgstr "" + +#: app/filedlg.c:265 app/filedlg.c:448 +#, c-format +msgid "" +"The file '%s' already exists.\n" +"Do you want to overwrite it?" +msgstr "" +"ÆÄÀÏ '%s'°¡ ÀÖ½À´Ï´Ù.\n" +"µ¤¾î ¾²°Ú½À´Ï±î?" + +#: app/filedlg.c:273 app/filedlg.c:455 +msgid "File already exists" +msgstr "ÆÄÀÏÀÌ ÀÌ¹Ì ÀÖ½À´Ï´Ù" + +#: app/filedlg.c:306 +msgid "Save Diagram" +msgstr "µµÇ¥ ÀúÀå" + +#. Need better way to make it a reasonable size. Isn't there some +#. standard look for them (or is that just Gnome?) +#: app/filedlg.c:311 +#, fuzzy +msgid "Compress diagram files" +msgstr "ÀúÀåÆÄÀÏ ¾ÐÃà:" + +#: app/filedlg.c:320 +msgid "" +"Compression reduces file size to less than 1/10th size and speeds up loading " +"and saving. Some text programs cannot manipulate compressed files." +msgstr "" + +#: app/filedlg.c:474 +#, c-format +msgid "" +"Could not determine which export filter\n" +"to use to save '%s'" +msgstr "" + +#: app/filedlg.c:493 +msgid "Export Diagram" +msgstr "" + +#: app/filedlg.c:518 +msgid "Export Options" +msgstr "" + +#: app/interface.c:53 +msgid "Modify object(s)" +msgstr "°´Ã¼ º¯°æ" + +#: app/interface.c:54 +msgid "Modify" +msgstr "º¯°æ" + +#. Translators: Menu item Verb/Control Magnitude/Change/Magnify +#: app/interface.c:58 app/interface.c:59 objects/FS/function.c:975 +msgid "Magnify" +msgstr "È®´ë" + +#: app/interface.c:63 +msgid "Scroll around the diagram" +msgstr "µµÇ¥ " + +#: app/interface.c:64 +msgid "Scroll" +msgstr "" + +#: app/interface.c:68 app/interface.c:69 lib/properties.c:77 +#: lib/properties.h:516 lib/properties.h:519 objects/UML/activity.c:122 +#: objects/UML/actor.c:120 objects/UML/classicon.c:141 +#: objects/UML/component.c:126 objects/UML/component_feature.c:154 +#: objects/UML/node.c:125 objects/UML/note.c:119 objects/UML/object.c:145 +#: objects/UML/small_package.c:126 objects/UML/state.c:139 +#: objects/UML/usecase.c:136 objects/Jackson/requirement.c:144 +#: objects/network/basestation.c:131 objects/network/radiocell.c:146 +msgid "Text" +msgstr "¹®ÀÚ¿­" + +#: app/interface.c:73 app/interface.c:74 +msgid "Box" +msgstr "»óÀÚ" + +#: app/interface.c:78 app/interface.c:79 +msgid "Ellipse" +msgstr "Ÿ¿ø" + +#: app/interface.c:83 app/interface.c:84 +msgid "Polygon" +msgstr "´Ù°¢Çü" + +#: app/interface.c:88 app/interface.c:89 +#, fuzzy +msgid "Beziergon" +msgstr "º£Áö¾î" + +#: app/interface.c:93 app/interface.c:94 objects/standard/line.c:238 +msgid "Line" +msgstr "¼±" + +#: app/interface.c:98 app/interface.c:99 +msgid "Arc" +msgstr "È£" + +#: app/interface.c:103 app/interface.c:104 +msgid "Zigzagline" +msgstr "Áö±×Àç±×" + +#: app/interface.c:108 app/interface.c:109 +msgid "Polyline" +msgstr "¿¬°á¼±" + +#: app/interface.c:113 app/interface.c:114 +msgid "Bezierline" +msgstr "º£Áö¾î" + +#: app/interface.c:118 app/interface.c:119 +msgid "Image" +msgstr "±×¸²" + +#: app/interface.c:391 +#, fuzzy +msgid "Diagram menu." +msgstr "µµÇ¥:" + +#: app/interface.c:429 +msgid "Pops up the Navigation window." +msgstr "" + +#: app/interface.c:511 +msgid "Zoom" +msgstr "È®´ë" + +#: app/interface.c:525 +msgid "Toggles snap-to-grid for this window." +msgstr "" + +#: app/interface.c:588 +msgid "NULL tooldata in tool_select_update" +msgstr "" + +#: app/interface.c:1071 +msgid "" +"Foreground & background colors for new objects. The small black and white " +"squares reset colors. The small arrows swap colors. Double click to change " +"colors." +msgstr "" + +#: app/interface.c:1086 +msgid "" +"Line widths. Click on a line to set the default line width for new " +"objects. Double-click to set the line width more precisely." +msgstr "" + +#: app/interface.c:1126 +msgid "" +"Arrow style at the beginning of new lines. Click to pick an arrow, or set " +"arrow parameters with Details..." +msgstr "" + +#: app/interface.c:1131 +msgid "" +"Line style for new lines. Click to pick a line style, or set line style " +"parameters with Details..." +msgstr "" + +#: app/interface.c:1145 +msgid "" +"Arrow style at the end of new lines. Click to pick an arrow, or set arrow " +"parameters with Details..." +msgstr "" + +#: app/interface.c:1255 +msgid "Diagram Editor" +msgstr "µµÇ¥ ÆíÁý±â" + +#: app/layer_dialog.c:70 +msgid "New Layer" +msgstr "»õ °èÃþ" + +#: app/layer_dialog.c:71 +msgid "Raise Layer" +msgstr "°èÃþ ¿Ã¸²" + +#: app/layer_dialog.c:72 +msgid "Lower Layer" +msgstr "°èÃþ ³»¸²" + +#: app/layer_dialog.c:73 +msgid "Delete Layer" +msgstr "°èÃþ Áö¿ò" + +#: app/layer_dialog.c:212 +msgid "Layers" +msgstr "°ÔÃþ" + +#: app/layer_dialog.c:226 +msgid "Diagrams:" +msgstr "µµÇ¥:" + +#: app/layer_dialog.c:271 +msgid "Close" +msgstr "´Ý±â" + +#: app/layer_dialog.c:331 +msgid "New layer" +msgstr "»õ °èÃþ" + +#: app/layer_dialog.c:537 +msgid "none" +msgstr "" + +#: app/layer_dialog.c:968 +msgid "Edit Layer Attributes" +msgstr "°èÃþ ¼Ó¼º ÆíÁý" + +#: app/layer_dialog.c:987 +msgid "Layer name:" +msgstr "°èÃþ À̸§:" + +#: app/layer_dialog.c:996 app/paginate_psprint.c:283 +msgid "OK" +msgstr "È®ÀÎ" + +#: app/linewidth_area.c:246 lib/properties.h:480 lib/properties.h:483 +#: objects/chronogram/chronoline.c:181 objects/chronogram/chronoref.c:160 +msgid "Line width" +msgstr "¼± ±½±â" + +#: app/linewidth_area.c:258 +msgid "Line width:" +msgstr "¼± ±½±â:" + +#: app/load_save.c:260 +msgid "" +"Error loading diagram.\n" +"Linked object not found in document." +msgstr "" +"µµÇ¥ ÀдÂÁß ¿À·ù.\n" +"¹®¼­¿¡¼­ ¿¬°áµÈ °´Ã¼¸¦ ãÀ»¼ö ¾ø½À´Ï´Ù." + +#: app/load_save.c:263 +msgid "" +"Error loading diagram.\n" +"connection handle does not exist." +msgstr "" + +#: app/load_save.c:280 +#, fuzzy, c-format +msgid "" +"Error loading diagram.\n" +"connection point %s does not exist." +msgstr "" +"µµÇ¥ ÀдÂÁß ¿À·ù.\n" +"¿¬°áÁ¡ÀÌ ¾ø½À´Ï´Ù." + +#: app/load_save.c:305 +#, fuzzy, c-format +msgid "Can't find parent %s of %s object\n" +msgstr "Ç¥ÁØ °´Ã¼" + +#: app/load_save.c:350 +msgid "You must specify a file, not a directory.\n" +msgstr "" + +#: app/load_save.c:357 plug-ins/dxf/dxf-import.c:1304 plug-ins/wpg/wpg.c:1170 +#: plug-ins/xfig/xfig-import.c:1560 plug-ins/xslt/xslt.c:87 +#, c-format +msgid "Couldn't open: '%s' for reading.\n" +msgstr "" + +#: app/load_save.c:374 app/load_save.c:379 +#, c-format +msgid "" +"Error loading diagram %s.\n" +"Unknown file type." +msgstr "" + +#: app/load_save.c:386 +#, c-format +msgid "" +"Error loading diagram %s.\n" +"Not a Dia file." +msgstr "" + +#: app/load_save.c:586 +#, c-format +msgid "" +"Error loading diagram:\n" +"%s.\n" +"A valid Dia file defines at least one layer." +msgstr "" + +#: app/load_save.c:937 +#, c-format +msgid "Failed to save file '%s'.\n" +msgstr "" + +#: app/load_save.c:1008 app/load_save.c:1013 +#, fuzzy +msgid "Dia Diagram File" +msgstr "µµÇ¥ ÆíÁý±â" + +#: app/menus.c:49 app/menus.c:84 +msgid "/_File" +msgstr "/ÆÄÀÏ(_F)" + +#: app/menus.c:51 app/menus.c:86 +msgid "/File/_New" +msgstr "" + +#: app/menus.c:53 app/menus.c:88 +#, fuzzy +msgid "/File/_Open..." +msgstr "/ÆÄÀÏ/ÂÊ ¼³Á¤(_u)..." + +#. recent file list is dynamically inserted here +#: app/menus.c:55 app/menus.c:60 app/menus.c:64 app/menus.c:66 app/menus.c:95 +#: app/menus.c:99 +#, fuzzy +msgid "/File/---" +msgstr "/ÆÄÀÏ(_F)" + +#: app/menus.c:56 +#, fuzzy +msgid "/File/_Diagram tree" +msgstr "/ÆÄÀÏ/µµÇ¥ Àμâ(_P)..." + +#: app/menus.c:58 +msgid "/File/Sheets and Objects..." +msgstr "" + +#: app/menus.c:61 +msgid "/File/_Preferences..." +msgstr "" + +#: app/menus.c:63 +#, fuzzy +msgid "/File/P_lugins..." +msgstr "Ç÷¯±×ÀÎ" + +#: app/menus.c:67 app/menus.c:103 +msgid "/File/_Quit" +msgstr "" + +#: app/menus.c:69 app/menus.c:230 +msgid "/_Help" +msgstr "" + +#: app/menus.c:71 app/menus.c:232 +msgid "/Help/_Manual" +msgstr "" + +#: app/menus.c:73 app/menus.c:234 +msgid "/Help/---" +msgstr "" + +#: app/menus.c:74 app/menus.c:235 +msgid "/Help/_About..." +msgstr "" + +#: app/menus.c:90 +msgid "/File/_Save" +msgstr "" + +#: app/menus.c:92 +msgid "/File/Save _As..." +msgstr "" + +#: app/menus.c:94 +msgid "/File/_Export..." +msgstr "" + +#: app/menus.c:96 +msgid "/File/Page Set_up..." +msgstr "/ÆÄÀÏ/ÂÊ ¼³Á¤(_u)..." + +#: app/menus.c:97 +msgid "/File/_Print Diagram..." +msgstr "/ÆÄÀÏ/µµÇ¥ Àμâ(_P)..." + +#: app/menus.c:100 +msgid "/File/_Close" +msgstr "/ÆÄÀÏ/´Ý±â(_C)" + +#: app/menus.c:105 +msgid "/_Edit" +msgstr "/ÆíÁý(_E)" + +#: app/menus.c:107 +msgid "/Edit/_Undo" +msgstr "/ÆíÁý/Ãë¼Ò(_U)" + +#: app/menus.c:109 +msgid "/Edit/_Redo" +msgstr "/ÆíÁý/Àç½ÇÇà(_R)" + +#: app/menus.c:111 app/menus.c:121 +#, fuzzy +msgid "/Edit/---" +msgstr "/ÆíÁý/ÀÚ¸£±â" + +#: app/menus.c:112 +msgid "/Edit/_Copy" +msgstr "/ÆíÁý/º¹»ç(_C)" + +#: app/menus.c:114 +msgid "/Edit/C_ut" +msgstr "/ÆíÁý/ÀÚ¸£±â(_u)" + +#: app/menus.c:116 +msgid "/Edit/_Paste" +msgstr "/ÆíÁý/ºÙÀ̱â(_P)" + +#: app/menus.c:118 +#, fuzzy +msgid "/Edit/_Duplicate" +msgstr "/ÆíÁý/Áö¿ì±â(_D)" + +#: app/menus.c:119 +msgid "/Edit/_Delete" +msgstr "/ÆíÁý/Áö¿ì±â(_D)" + +#: app/menus.c:122 +msgid "/Edit/Copy Text" +msgstr "/ÆíÁý/¹®ÀÚ¿­ º¹»ç" + +#: app/menus.c:123 +msgid "/Edit/Cut Text" +msgstr "/ÆíÁý/¹®ÀÚ¿­ ÀÚ¸£±â" + +#: app/menus.c:124 +msgid "/Edit/Paste _Text" +msgstr "/ÆíÁý/¹®ÀÚ¿­ ºÙÀ̱â(_T)" + +#: app/menus.c:125 +#, fuzzy +msgid "/_Diagram" +msgstr "µµÇ¥:" + +#: app/menus.c:127 +#, fuzzy +msgid "/Diagram/_Properties..." +msgstr "/´ëÈ­»óÀÚ/¼Ó¼º(_P)" + +#: app/menus.c:128 +#, fuzzy +msgid "/Diagram/_Layers..." +msgstr "/´ëÈ­»óÀÚ/°èÃþ(_L)" + +#: app/menus.c:129 +msgid "/_View" +msgstr "/º¸±â(_V)" + +#: app/menus.c:131 +msgid "/View/Zoom _In" +msgstr "/º¸±â/È®´ë(_I)" + +#: app/menus.c:133 +msgid "/View/Zoom _Out" +msgstr "/º¸±â/Ãà¼Ò(_O)" + +#: app/menus.c:135 +msgid "/View/_Zoom" +msgstr "/º¸±â/È®´ë(_Z)" + +#: app/menus.c:137 +msgid "/View/Zoom/400%" +msgstr "/º¸±â/È®´ë/400%" + +#: app/menus.c:138 +msgid "/View/Zoom/283%" +msgstr "/º¸±â/È®´ë/283%" + +#: app/menus.c:139 +msgid "/View/Zoom/200%" +msgstr "/º¸±â/È®´ë/200%" + +#: app/menus.c:140 +msgid "/View/Zoom/141%" +msgstr "/º¸±â/È®´ë/141%" + +#: app/menus.c:141 +msgid "/View/Zoom/100%" +msgstr "/º¸±â/È®´ë/100%" + +#: app/menus.c:143 +msgid "/View/Zoom/85%" +msgstr "/º¸±â/Ãà¼Ò/85%" + +#: app/menus.c:144 +msgid "/View/Zoom/70.7%" +msgstr "/º¸±â/Ãà¼Ò/70.7%" + +#: app/menus.c:145 +msgid "/View/Zoom/50%" +msgstr "/º¸±â/Ãà¼Ò/50%" + +#: app/menus.c:146 +msgid "/View/Zoom/35.4%" +msgstr "/º¸±â/Ãà¼Ò/35.4%" + +#: app/menus.c:147 +msgid "/View/Zoom/25%" +msgstr "/º¸±â/Ãà¼Ò/25%" + +#: app/menus.c:148 app/menus.c:156 +#, fuzzy +msgid "/View/---" +msgstr "/º¸±â(_V)" + +#: app/menus.c:150 +msgid "/View/_AntiAliased" +msgstr "/º¸±â/¾ÈƼ¾ó¶óÀ̽Ì(_A)" + +#: app/menus.c:152 +#, fuzzy +msgid "/View/Show _Grid" +msgstr "/º¸±â/¸ðµÎ º¸±â(_A)" + +#: app/menus.c:153 +msgid "/View/_Snap To Grid" +msgstr "/º¸±â/²÷±ä °ÝÀÚ" + +#: app/menus.c:154 +msgid "/View/Show _Rulers" +msgstr "/º¸±â/´«±ÝÀÚ º¸±â(_R)" + +#: app/menus.c:155 +msgid "/View/Show _Connection Points" +msgstr "/º¸±â/¿¬°áÁ¡ º¸±â(_C)" + +#: app/menus.c:157 +msgid "/View/New _View" +msgstr "/º¸±â/»õ º¸±â(_V)" + +#. Show All, Best Fit. Same as the Gimp, Ctrl+E +#: app/menus.c:159 +msgid "/View/Show _All" +msgstr "/º¸±â/¸ðµÎ º¸±â(_A)" + +#: app/menus.c:160 +#, fuzzy +msgid "/View/Re_draw" +msgstr "/º¸±â/»õ º¸±â(_V)" + +#: app/menus.c:161 +msgid "/_Objects" +msgstr "/°´Ã¼(_O)" + +#: app/menus.c:163 +msgid "/Objects/Send to _Back" +msgstr "/°´Ã¼/µÚ·Î º¸³»±â(_B)" + +#: app/menus.c:164 +msgid "/Objects/Bring to _Front" +msgstr "/°´Ã¼/¾ÕÀ¸·Î º¸³»±â(_F)" + +#: app/menus.c:165 +#, fuzzy +msgid "/Objects/Send Backwards" +msgstr "/°´Ã¼/µÚ·Î º¸³¿" + +#: app/menus.c:166 +#, fuzzy +msgid "/Objects/Bring Forwards" +msgstr "/°´Ã¼/¾ÕÀ¸·Î º¸³¿" + +#: app/menus.c:167 app/menus.c:171 app/menus.c:175 app/menus.c:189 +#, fuzzy +msgid "/Objects/---" +msgstr "/°´Ã¼(_O)" + +#: app/menus.c:168 +msgid "/Objects/_Group" +msgstr "/°´Ã¼/¹­±â(_G)" + +#. deliberately not using Ctrl+U for Ungroup +#: app/menus.c:170 +msgid "/Objects/_Ungroup" +msgstr "/°´Ã¼/Ç®±â(_U)" + +#: app/menus.c:172 +#, fuzzy +msgid "/Objects/_Parent" +msgstr "/°´Ã¼/¹­±â(_G)" + +#: app/menus.c:173 +#, fuzzy +msgid "/Objects/_Unparent" +msgstr "/°´Ã¼/Ç®±â(_U)" + +#: app/menus.c:174 +msgid "/Objects/_Unparent Children" +msgstr "" + +#: app/menus.c:176 +#, fuzzy +msgid "/Objects/Align" +msgstr "/°´Ã¼/¼öÁ÷ Á¤·Ä(_V)" + +#: app/menus.c:178 +#, fuzzy +msgid "/Objects/Align/Left" +msgstr "/°´Ã¼/¼öÆò Á¤·Ä/¿ÞÂÊ" + +#: app/menus.c:179 +#, fuzzy +msgid "/Objects/Align/Center" +msgstr "/°´Ã¼/¼öÁ÷ Á¤·Ä/Áß°£" + +#: app/menus.c:180 +#, fuzzy +msgid "/Objects/Align/Right" +msgstr "/°´Ã¼/¼öÆò Á¤·Ä/¿À¸¥ÂÊ" + +#: app/menus.c:181 +#, fuzzy +msgid "/Objects/Align/Top" +msgstr "/°´Ã¼/¼öÁ÷ Á¤·Ä/À§" + +#: app/menus.c:182 +#, fuzzy +msgid "/Objects/Align/Middle" +msgstr "/°´Ã¼/¼öÁ÷ Á¤·Ä(_V)" + +#: app/menus.c:183 +#, fuzzy +msgid "/Objects/Align/Bottom" +msgstr "/°´Ã¼/¼öÁ÷ Á¤·Ä/¾Æ·¡" + +#: app/menus.c:184 +#, fuzzy +msgid "/Objects/Align/---" +msgstr "/°´Ã¼(_O)" + +#: app/menus.c:185 +#, fuzzy +msgid "/Objects/Align/Spread Out Horizontally" +msgstr "/°´Ã¼/¼öÆò Á¤·Ä(_H)" + +#: app/menus.c:186 +#, fuzzy +msgid "/Objects/Align/Spread Out Vertically" +msgstr "/°´Ã¼/¼öÁ÷ Á¤·Ä(_V)" + +#: app/menus.c:187 +#, fuzzy +msgid "/Objects/Align/Adjacent" +msgstr "/°´Ã¼/¼öÆò Á¤·Ä/" + +#: app/menus.c:188 +#, fuzzy +msgid "/Objects/Align/Stacked" +msgstr "/°´Ã¼/¼öÁ÷ Á¤·Ä(_V)" + +#: app/menus.c:190 +#, fuzzy +msgid "/Objects/_Properties..." +msgstr "°´Ã¼ ¼Ó¼º" + +#: app/menus.c:191 +#, fuzzy +msgid "/_Select" +msgstr "¼±ÅÃ" + +#: app/menus.c:193 +msgid "/Select/All" +msgstr "/¼±ÅÃ/¸ðµÎ" + +#: app/menus.c:194 +msgid "/Select/None" +msgstr "/¼±ÅÃ/¾ÊÀ½" + +#: app/menus.c:195 app/menus.c:208 +msgid "/Select/Invert" +msgstr "/¼±ÅÃ/¹Ý´ë" + +#: app/menus.c:196 +msgid "/Select/Connected" +msgstr "/¼±ÅÃ/¿¬°á" + +#: app/menus.c:197 +msgid "/Select/Transitive" +msgstr "" + +#: app/menus.c:198 +msgid "/Select/Same Type" +msgstr "/¼±ÅÃ/°°Àº ÇüÅÂ" + +#: app/menus.c:199 +#, fuzzy +msgid "/Select/---" +msgstr "¼±ÅÃ" + +#: app/menus.c:200 +msgid "/Select/Replace" +msgstr "" + +#: app/menus.c:202 +msgid "/Select/Union" +msgstr "" + +#: app/menus.c:204 +msgid "/Select/Intersect" +msgstr "" + +#: app/menus.c:206 +msgid "/Select/Remove" +msgstr "/¼±ÅÃ/Áö¿ò" + +#: app/menus.c:210 +#, fuzzy +msgid "/_Tools" +msgstr "/µµ±¸/" + +#: app/menus.c:212 +msgid "/Tools/Modify" +msgstr "" + +#: app/menus.c:213 +msgid "/Tools/Magnify" +msgstr "" + +#: app/menus.c:214 +msgid "/Tools/Scroll" +msgstr "" + +#: app/menus.c:215 +msgid "/Tools/Text" +msgstr "/µµ±¸/¹®ÀÚ¿­" + +#: app/menus.c:216 +msgid "/Tools/Box" +msgstr "/µµ±¸/»óÀÚ" + +#: app/menus.c:217 +msgid "/Tools/Ellipse" +msgstr "/µµ±¸/Ÿ¿ø" + +#: app/menus.c:218 +msgid "/Tools/Polygon" +msgstr "/µµ±¸/´Ù°¢Çü" + +#: app/menus.c:219 +#, fuzzy +msgid "/Tools/Beziergon" +msgstr "/µµ±¸/º£Áö¾î°î¼±" + +#: app/menus.c:220 app/menus.c:226 +#, fuzzy +msgid "/Tools/---" +msgstr "/µµ±¸/" + +#: app/menus.c:221 +msgid "/Tools/Line" +msgstr "/µµ±¸/¼±" + +#: app/menus.c:222 +msgid "/Tools/Arc" +msgstr "/µµ±¸/È£" + +#: app/menus.c:223 +msgid "/Tools/Zigzagline" +msgstr "/µµ±¸/Áö±×Àç±×" + +#: app/menus.c:224 +msgid "/Tools/Polyline" +msgstr "/µµ±¸/´ÙÇü¼±" + +#: app/menus.c:225 +msgid "/Tools/Bezierline" +msgstr "/µµ±¸/º£Áö¾î°î¼±" + +#: app/menus.c:227 +msgid "/Tools/Image" +msgstr "/µµ±¸/±×¸²" + +#: app/menus.c:228 +msgid "/_Input Methods" +msgstr "" + +#: app/menus.c:399 +msgid "NULL tooldata in tool_menu_select" +msgstr "" + +#: app/menus.c:497 +#, fuzzy +msgid "Diagram Menu" +msgstr "µµÇ¥:" + +#: app/menus.c:632 +#, c-format +msgid "" +"Can't find menu entry '%s'!\n" +"This is probably a i18n problem (try LANG=C)." +msgstr "" + +#: app/modify_tool.c:328 +msgid "Couldn't get GTK settings" +msgstr "" + +#: app/paginate_psprint.c:242 +msgid "Select Printer" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: app/paginate_psprint.c:254 sheets/ciscocomputer.sheet.in.h:25 +msgid "Printer" +msgstr "ÇÁ¸°ÅÍ" + +#: app/paginate_psprint.c:268 sheets/Misc.sheet.in.h:1 +msgid "File" +msgstr "ÆÄÀÏ" + +#: app/paginate_psprint.c:377 +#, fuzzy, c-format +msgid "Could not run command '%s': %s" +msgstr "`%s'¸¦ ¿­¼ö ¾ø½À´Ï´Ù" + +#: app/paginate_psprint.c:380 +#, fuzzy, c-format +msgid "Could not open '%s' for writing: %s" +msgstr "¿­¼ö ¾÷½À´Ï´Ù: '%s' ¾µ¼ö¾ø½¿.\n" + +#: app/paginate_psprint.c:397 +#, c-format +msgid "Printing error: command '%s' returned %d\n" +msgstr "" + +#: app/paginate_psprint.c:408 +#, c-format +msgid "Printing error: command '%s' caused sigpipe." +msgstr "" + +#: app/plugin-manager.c:200 +msgid "Plug-ins" +msgstr "Ç÷¯±×ÀÎ" + +#: app/plugin-manager.c:255 +#, fuzzy +msgid "Loaded" +msgstr "¿Ã·ÁÁü:" + +#: app/plugin-manager.c:262 objects/UML/class.c:112 +#: objects/UML/large_package.c:128 +#, fuzzy +msgid "Name" +msgstr "À̸§:" + +#: app/plugin-manager.c:268 +#, fuzzy +msgid "Description" +msgstr "¼³¸í:" + +#: app/plugin-manager.c:277 +#, fuzzy +msgid "Load at Startup" +msgstr "½ÃÀÛÇÒ¶§ ÀÚµ¿ Àбâ" + +#: app/plugin-manager.c:284 +#, fuzzy +msgid "File Name" +msgstr "ÆÄÀÏ À̸§:" + +#: app/preferences.c:102 +msgid "User Interface" +msgstr "»ç¿ëÀÚ ÀÎÅÍÆäÀ̽º" + +#: app/preferences.c:103 +#, fuzzy +msgid "Diagram Defaults" +msgstr "µµÇ¥:" + +#: app/preferences.c:104 +msgid "View Defaults" +msgstr "±âº» º¸±â" + +#: app/preferences.c:106 +#, fuzzy +msgid "Diagram Tree" +msgstr "µµÇ¥:" + +#: app/preferences.c:120 +#, fuzzy +msgid "Reset tools after create" +msgstr "»ý¼ºÈÄ µµ±¸ ¸®¼Â:" + +#: app/preferences.c:121 +#, fuzzy +msgid "Compress saved files" +msgstr "ÀúÀåÆÄÀÏ ¾ÐÃà:" + +#: app/preferences.c:122 +msgid "Number of undo levels:" +msgstr "Ãë¼ÒÇϱ⠰¹¼ö:" + +#: app/preferences.c:123 +msgid "" +"Reverse dragging selects\n" +"intersecting objects" +msgstr "" + +#: app/preferences.c:124 +msgid "Recent documents list size:" +msgstr "" + +#: app/preferences.c:125 +msgid "Use menu bar" +msgstr "" + +#: app/preferences.c:127 +msgid "Keep tool box on top of diagram windows" +msgstr "" + +#: app/preferences.c:129 +#, fuzzy +msgid "New diagram:" +msgstr "»õ µµÇ¥(_N)" + +#: app/preferences.c:130 +#, fuzzy +msgid "Portrait" +msgstr "¿ªÇÒ:" + +#: app/preferences.c:132 +#, fuzzy +msgid "Paper type:" +msgstr "Á¾ÀÌ Å©±â" + +#: app/preferences.c:135 +#, fuzzy +msgid "Background Color:" +msgstr "¹è°æ »ö»ó:" + +#: app/preferences.c:138 +msgid "New window:" +msgstr "»õ â:" + +#: app/preferences.c:139 +msgid "Width:" +msgstr "Æø:" + +#: app/preferences.c:140 +msgid "Height:" +msgstr "³ôÀÌ:" + +#: app/preferences.c:141 +msgid "Magnify:" +msgstr "" + +#: app/preferences.c:144 +msgid "Connection Points:" +msgstr "¿¬°áÁ¡:" + +#. { NULL, PREF_NONE, 0, NULL, 3, N_("Grid:") }, +#: app/preferences.c:145 app/preferences.c:149 app/preferences.c:155 +#, fuzzy +msgid "Visible" +msgstr "°ÝÀÚ º¸±â(_V)" + +#: app/preferences.c:148 +msgid "Page breaks:" +msgstr "" + +#: app/preferences.c:150 app/preferences.c:160 +#, fuzzy +msgid "Color:" +msgstr "´Ý±â" + +#: app/preferences.c:151 +#, fuzzy +msgid "Solid lines" +msgstr "°ÝÀÚ ÁÙ" + +#: app/preferences.c:156 +#, fuzzy +msgid "Snap to" +msgstr "²÷±ä °ÝÀÚ(_S)" + +#: app/preferences.c:157 +msgid "Dynamic grid resizing" +msgstr "" + +#: app/preferences.c:158 +msgid "X Size:" +msgstr "" + +#: app/preferences.c:159 +msgid "Y Size:" +msgstr "" + +#: app/preferences.c:161 +msgid "Lines per major line" +msgstr "" + +#: app/preferences.c:163 +#, fuzzy +msgid "Hex Size:" +msgstr "B ÂÊ" + +#: app/preferences.c:182 +#, fuzzy +msgid "Diagram tree window:" +msgstr "µµÇ¥:" + +#: app/preferences.c:184 +#, fuzzy +msgid "Save hidden object types" +msgstr "Ç¥ÁØ °´Ã¼" + +#: app/preferences.c:391 lib/prop_inttypes.c:158 +msgid "Yes" +msgstr "¿¹" + +#: app/preferences.c:391 app/preferences.c:402 lib/prop_inttypes.c:160 +#: lib/prop_inttypes.c:186 +msgid "No" +msgstr "¾Æ´Ï¿À" + +#: app/preferences.c:504 +msgid "Preferences" +msgstr "¼³Á¤" + +#: app/properties.c:55 +msgid "Object properties" +msgstr "°´Ã¼ ¼Ó¼º" + +#: app/properties.c:80 +msgid "This object has no properties." +msgstr "ÀÌ °´Ã¼´Â ¼Ó¼ºÀÌ ¾ø½À´Ï´Ù" + +#: app/properties.c:202 +#, fuzzy +msgid "Properties: " +msgstr "¼³Á¤(_P)" + +#: app/properties.c:206 +#, fuzzy +msgid "Object properties:" +msgstr "°´Ã¼ ¼Ó¼º" + +#. Use the Plugins menu item to get a pointer to the File menu, +#. but any item on the File menu will do +#: app/recent_files.c:61 +#, fuzzy +msgid "/File/Plugins..." +msgstr "Ç÷¯±×ÀÎ" + +#: app/recent_files.c:70 app/recent_files.c:143 +msgid "/File/Quit" +msgstr "" + +#: app/render_eps.c:145 +msgid "Encapsulated Postscript (using Pango fonts)" +msgstr "" + +#: app/render_eps.c:154 +msgid "Encapsulated Postscript with preview (using Pango fonts)" +msgstr "" + +#: app/render_eps.c:164 +msgid "Encapsulated Postscript (using PostScript Latin-1 fonts)" +msgstr "" + +#: app/render_libart.c:318 lib/dialibartrenderer.c:288 +msgid "gdk_renderer: Unsupported fill mode specified!\n" +msgstr "" + +#: app/sheets.c:154 +#, c-format +msgid "" +"%s\n" +"System sheet" +msgstr "" + +#: app/sheets.c:156 +#, c-format +msgid "" +"%s\n" +"User sheet" +msgstr "" + +#: app/sheets.c:271 +msgid "" +"Can't get symbol 'custom_type' from any module.\n" +"Editing shapes is disabled." +msgstr "" + +#: app/sheets.c:373 +#, c-format +msgid "Widget not found: %s" +msgstr "" + +#: app/sheets.c:448 +msgid "SVG Shape" +msgstr "" + +#: app/sheets.c:450 +msgid "Programmed DiaObject" +msgstr "" + +#: app/sheets_dialog.c:83 +#, fuzzy +msgid "Sheets and Objects" +msgstr "Ç¥ÁØ °´Ã¼" + +#: app/sheets_dialog.c:109 app/sheets_dialog_callbacks.c:177 +msgid "<- Copy" +msgstr "" + +#: app/sheets_dialog.c:119 app/sheets_dialog_callbacks.c:179 +msgid "<- Copy All" +msgstr "" + +#: app/sheets_dialog.c:126 app/sheets_dialog_callbacks.c:181 +#, fuzzy +msgid "<- Move" +msgstr "À§·Î À̵¿" + +#: app/sheets_dialog.c:136 app/sheets_dialog_callbacks.c:183 +msgid "<- Move All" +msgstr "" + +#: app/sheets_dialog.c:233 +#, fuzzy +msgid "Edit" +msgstr "/ÆíÁý(_E)" + +#: app/sheets_dialog.c:235 +#, fuzzy +msgid "_Edit" +msgstr "/ÆíÁý(_E)" + +#: app/sheets_dialog.c:266 +#, fuzzy +msgid "Revert" +msgstr "»èÁ¦" + +#: app/sheets_dialog.c:354 +msgid "New" +msgstr "" + +#: app/sheets_dialog.c:362 app/sheets_dialog.c:794 objects/Istar/actor.c:143 +#: objects/Istar/actor.c:144 objects/Istar/other.c:150 +#: objects/Istar/other.c:151 objects/KAOS/other.c:151 objects/KAOS/other.c:152 +#, fuzzy +msgid "Type" +msgstr "Çü½Ä:" + +#: app/sheets_dialog.c:377 +msgid "Browse..." +msgstr "" + +#: app/sheets_dialog.c:386 +msgid "SVG Shape:" +msgstr "" + +#: app/sheets_dialog.c:413 app/sheets_dialog.c:476 +#, fuzzy +msgid "description:" +msgstr "¼³¸í:" + +#: app/sheets_dialog.c:425 +#, fuzzy +msgid "Sheet name:" +msgstr "ÆÄÀÏ À̸§:" + +#: app/sheets_dialog.c:446 app/sheets_dialog_callbacks.c:289 +#: app/sheets_dialog_callbacks.c:1090 +#, fuzzy +msgid "Line Break" +msgstr "¼± »ö»ó:" + +#: app/sheets_dialog.c:568 +#, fuzzy +msgid "Edit Attributes" +msgstr "°èÃþ ¼Ó¼º ÆíÁý" + +#: app/sheets_dialog.c:583 +#, fuzzy +msgid "DiaObject" +msgstr "/°´Ã¼(_O)" + +#: app/sheets_dialog.c:599 app/sheets_dialog.c:692 +msgid "Description:" +msgstr "¼³¸í:" + +#: app/sheets_dialog.c:618 objects/FS/flow-ortho.c:168 objects/FS/flow.c:146 +#: objects/UML/class_dialog.c:911 objects/UML/class_dialog.c:1845 +#: objects/UML/class_dialog.c:2074 objects/UML/class_dialog.c:2590 +#: objects/Istar/link.c:167 objects/Jackson/phenomenon.c:144 +#: objects/KAOS/metaandorrel.c:164 objects/KAOS/metabinrel.c:181 +msgid "Type:" +msgstr "Çü½Ä:" + +#: app/sheets_dialog.c:656 +#, fuzzy +msgid "Sheet" +msgstr "¼±ÅÃ" + +#: app/sheets_dialog.c:672 objects/ER/attribute.c:151 objects/ER/entity.c:133 +#: objects/ER/relationship.c:138 objects/UML/association.c:200 +#: objects/UML/association.c:1126 objects/UML/class_dialog.c:900 +#: objects/UML/class_dialog.c:1834 objects/UML/class_dialog.c:2063 +#: objects/UML/class_dialog.c:2579 objects/UML/dependency.c:133 +#: objects/UML/generalization.c:129 objects/UML/realizes.c:130 +msgid "Name:" +msgstr "À̸§:" + +#. Translators: Menu item Verb/Channel/Export/Remove +#. Translators: Menu item Verb/Branch/Separate/Remove +#. Translators: Menu item Verb/Branch/Separate/Remove/Remove +#: app/sheets_dialog.c:784 objects/FS/function.c:695 objects/FS/function.c:847 +#: objects/FS/function.c:849 +msgid "Remove" +msgstr "»èÁ¦" + +#: app/sheets_dialog.c:809 +#, fuzzy +msgid "DiaObject:" +msgstr "/°´Ã¼(_O)" + +#: app/sheets_dialog.c:836 +#, fuzzy +msgid "Sheet:" +msgstr "¼±ÅÃ" + +#: app/sheets_dialog.c:920 +msgid "Select SVG Shape File" +msgstr "" + +#: app/sheets_dialog_callbacks.c:164 +#, fuzzy +msgid "Copy ->" +msgstr "¹®ÀÚ¿­ º¹»ç" + +#: app/sheets_dialog_callbacks.c:166 +msgid "Copy All ->" +msgstr "" + +#: app/sheets_dialog_callbacks.c:168 +#, fuzzy +msgid "Move ->" +msgstr "À§·Î À̵¿" + +#: app/sheets_dialog_callbacks.c:170 +msgid "Move All ->" +msgstr "" + +#: app/sheets_dialog_callbacks.c:303 +#, c-format +msgid "" +"%s\n" +"Shape" +msgstr "" + +#: app/sheets_dialog_callbacks.c:306 +#, fuzzy, c-format +msgid "" +"%s\n" +"Object" +msgstr "/°´Ã¼(_O)" + +#: app/sheets_dialog_callbacks.c:309 +#, c-format +msgid "" +"%s\n" +"Unassigned type" +msgstr "" + +#: app/sheets_dialog_callbacks.c:800 +#, c-format +msgid "Filename must end with '%s': '%s'" +msgstr "" + +#: app/sheets_dialog_callbacks.c:808 +#, fuzzy, c-format +msgid "Error examining %s: %s" +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: app/sheets_dialog_callbacks.c:828 +#, c-format +msgid "Could not interpret shape file: '%s'" +msgstr "" + +#: app/sheets_dialog_callbacks.c:897 +msgid "Sheet must have a Name" +msgstr "" + +#: app/sheets_dialog_callbacks.c:1594 app/sheets_dialog_callbacks.c:1600 +#, fuzzy, c-format +msgid "Couldn't open '%s': %s" +msgstr "`%s'¸¦ ¿­¼ö ¾ø½À´Ï´Ù" + +#: app/sheets_dialog_callbacks.c:1651 +#, fuzzy, c-format +msgid "Couldn't open: '%s' for writing" +msgstr "¿­¼ö ¾÷½À´Ï´Ù: '%s' ¾µ¼ö¾ø½¿.\n" + +#: app/sheets_dialog_callbacks.c:1660 +msgid "a user" +msgstr "" + +#: app/sheets_dialog_callbacks.c:1673 +#, fuzzy, c-format +msgid "File: %s" +msgstr "ÆÄÀÏ" + +#: app/sheets_dialog_callbacks.c:1676 +#, c-format +msgid "Date: %s" +msgstr "" + +#: app/sheets_dialog_callbacks.c:1680 +#, c-format +msgid "For: %s" +msgstr "" + +#: app/sheets_dialog_callbacks.c:1697 +msgid "add shapes here" +msgstr "" + +#: app/splash.c:58 +msgid "Loading ..." +msgstr "" + +#: app/splash.c:76 +#, c-format +msgid "Dia v %s" +msgstr "" + +#: dia.desktop.in.h:2 +#, fuzzy +msgid "Diagram editor" +msgstr "µµÇ¥ ÆíÁý±â" + +#: lib/arrows.c:40 objects/UML/association.c:1149 objects/Jackson/domain.c:96 +msgid "None" +msgstr "" + +#: lib/arrows.c:41 +msgid "Lines" +msgstr "" + +#: lib/arrows.c:42 +msgid "Hollow Triangle" +msgstr "" + +#: lib/arrows.c:43 +msgid "Filled Triangle" +msgstr "" + +#: lib/arrows.c:44 +msgid "Unfilled Triangle" +msgstr "" + +#: lib/arrows.c:45 +#, fuzzy +msgid "Hollow Diamond" +msgstr "À̸§:" + +#: lib/arrows.c:46 +msgid "Filled Diamond" +msgstr "" + +#: lib/arrows.c:47 +#, fuzzy +msgid "Half Diamond" +msgstr "À̸§:" + +#: lib/arrows.c:48 +msgid "Half Head" +msgstr "" + +#: lib/arrows.c:49 +msgid "Slashed Cross" +msgstr "" + +#: lib/arrows.c:50 +msgid "Filled Ellipse" +msgstr "" + +#: lib/arrows.c:51 +msgid "Hollow Ellipse" +msgstr "" + +#: lib/arrows.c:52 +msgid "Filled Dot" +msgstr "" + +#: lib/arrows.c:53 +msgid "Dimension Origin" +msgstr "" + +#: lib/arrows.c:54 +msgid "Blanked Dot" +msgstr "" + +#: lib/arrows.c:55 +msgid "Double Hollow Triangle" +msgstr "" + +#: lib/arrows.c:56 +msgid "Double Filled Triangle" +msgstr "" + +#: lib/arrows.c:57 +msgid "Filled Dot and Triangle" +msgstr "" + +#: lib/arrows.c:58 +msgid "Filled Box" +msgstr "" + +#: lib/arrows.c:59 +msgid "Blanked Box" +msgstr "" + +#: lib/arrows.c:60 +#, fuzzy +msgid "Slashed" +msgstr "±¥¼±" + +#: lib/arrows.c:61 +msgid "Integral Symbol" +msgstr "" + +#: lib/arrows.c:62 +#, fuzzy +msgid "Crow Foot" +msgstr "±Û²Ã Å©±â:" + +#: lib/arrows.c:63 +#, fuzzy +msgid "Cross" +msgstr "Ŭ·¡½º" + +#: lib/arrows.c:64 +msgid "1-or-many" +msgstr "" + +#: lib/arrows.c:65 +msgid "0-or-many" +msgstr "" + +#: lib/arrows.c:66 +msgid "1-or-0" +msgstr "" + +#: lib/arrows.c:67 +msgid "1 exactly" +msgstr "" + +#: lib/arrows.c:68 +#, fuzzy +msgid "Filled Concave" +msgstr "ÆÄÀÏ À̸§:" + +#: lib/arrows.c:69 +msgid "Blanked Concave" +msgstr "" + +#: lib/arrows.c:70 +#, fuzzy +msgid "Round" +msgstr "¾Æ´Ï¿À" + +#: lib/arrows.c:71 +#, fuzzy +msgid "Open Round" +msgstr "¼³Á¤ Çϱâ" + +#: lib/arrows.c:72 +msgid "Backslash" +msgstr "" + +#: lib/bezier_conn.c:533 +msgid "Internal error: Setting corner type of endpoint of bezier" +msgstr "³»ºÎ ¿À·ù: º£Áö¾î °î¼± ³¡Á¡ÀÇ °¡ÀåÀÚ¸® ÇüÅ ¼³Á¤" + +#: lib/dia_xml.c:154 +#, c-format +msgid "" +"The file %s has no encoding specification;\n" +"assuming it is encoded in %s" +msgstr "" + +#: lib/dia_xml.c:471 +msgid "Taking point value of non-point node." +msgstr "" + +#: lib/dia_xml.c:482 +#, c-format +msgid "Incorrect x Point value \"%s\" %f; discarding it." +msgstr "" + +#: lib/dia_xml.c:489 +msgid "Error parsing point." +msgstr "" + +#. don't bother with useless warnings (see above) +#: lib/dia_xml.c:497 +#, c-format +msgid "Incorrect y Point value \"%s\" %f; discarding it." +msgstr "" + +#: lib/dia_xml.c:865 +msgid "" +"Your local character set is UTF-8. Because of issues with libxml1 and the " +"support of files generated by previous versions of dia, you will encounter " +"problems. Please report to dia-list@gnome.org if you see this message." +msgstr "" + +#: lib/diaarrowchooser.c:251 +msgid "Arrow Properties" +msgstr "È­»ìÇ¥ ¼Ó¼º" + +#: lib/diaarrowchooser.c:333 lib/dialinechooser.c:333 +msgid "Details..." +msgstr "ÀÚ¼¼È÷..." + +#. This is the default text shown in the preview entry, though the user +#. can set it. Remember that some fonts only have capital letters. +#: lib/diagtkfontsel.c:74 +msgid "abcdefghijk ABCDEFGHIJK" +msgstr "" + +#: lib/diagtkfontsel.c:209 +#, fuzzy +msgid "Font name" +msgstr "ÆÄÀÏ À̸§:" + +#: lib/diagtkfontsel.c:210 +msgid "The X string that represents this font." +msgstr "" + +#: lib/diagtkfontsel.c:216 +#, fuzzy +msgid "Preview text" +msgstr "¹®ÀÚ¿­ ¸¸µê" + +#: lib/diagtkfontsel.c:217 +msgid "The text to display in order to demonstrate the selected font." +msgstr "" + +#: lib/diagtkfontsel.c:321 +msgid "_Family:" +msgstr "" + +#: lib/diagtkfontsel.c:327 +#, fuzzy +msgid "_Style:" +msgstr "ºñÀ²:" + +#: lib/diagtkfontsel.c:333 +#, fuzzy +msgid "Si_ze:" +msgstr "B ÂÊ" + +#. create the text entry widget +#: lib/diagtkfontsel.c:462 +#, fuzzy +msgid "_Preview:" +msgstr "¼³Á¤(_P)" + +#: lib/diagtkfontsel.c:1252 +#, fuzzy +msgid "Font Selection" +msgstr "¿¬°áÁ¡:" + +#: lib/dialinechooser.c:297 +msgid "Line Style Properties" +msgstr "¼± ÇüÅ ¼Ó¼º" + +#: lib/filter.c:120 +#, c-format +msgid "Multiple export filters with unique name %s" +msgstr "" + +#: lib/font.c:89 +#, c-format +msgid "Can't load font %s.\n" +msgstr "" + +#: lib/message.c:78 lib/message.c:238 +msgid "Error" +msgstr "" + +#: lib/message.c:80 lib/message.c:226 +msgid "Warning" +msgstr "" + +#: lib/message.c:106 +msgid "There is one similar message." +msgstr "" + +#: lib/message.c:111 +#, fuzzy +msgid "Show repeated messages" +msgstr "±×¸² ¸¸µê" + +#: lib/message.c:170 +#, fuzzy, c-format +msgid "There are %d similar messages." +msgstr "±×¸² ¸¸µê" + +#: lib/message.c:215 +msgid "Notice" +msgstr "" + +#: lib/object_defaults.c:127 +#, fuzzy, c-format +msgid "" +"Error loading defaults '%s'.\n" +"Not a Dia diagram file." +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: lib/plug-ins.c:120 +msgid "???" +msgstr "" + +#: lib/plug-ins.c:228 +#, c-format +msgid "Could not deduce correct path for `%s'" +msgstr "" + +#: lib/plug-ins.c:234 +#, fuzzy, c-format +msgid "" +"Could not load plugin '%s'\n" +"%s" +msgstr "" +"Ç÷¯±×ÀÎ `%s'¸¦ ÀÐÀ»¼ö ¾ø½À´Ï´Ù\n" +"%s" + +#: lib/plug-ins.c:247 +#, c-format +msgid "Could not find plugin init function in `%s'" +msgstr "Ç÷¯±×ÀÎ ÃʱâÇÔ¼ö¿¡¼­ `%s'¸¦ ãÀ»¼ö ¾÷½À´Ï´Ù" + +#: lib/plug-ins.c:249 +msgid "Missing symbol 'dia_plugin_init'" +msgstr "" + +#: lib/plug-ins.c:257 lib/plug-ins.c:265 +msgid "dia_plugin_init() call failed" +msgstr "" + +#: lib/plug-ins.c:284 +#, c-format +msgid "%s Plugin could not be unloaded" +msgstr "Ç÷¯±×ÀÎ %s¸¦ Á¦°ÅÇÒ¼ö ¾ø½À´Ï´Ù" + +#: lib/plug-ins.c:372 +#, fuzzy, c-format +msgid "" +"Could not open `%s'\n" +"`%s'" +msgstr "`%s'¸¦ ¿­¼ö ¾ø½À´Ï´Ù" + +#: lib/properties.c:62 lib/properties.h:468 lib/widgets.c:791 +msgid "Left" +msgstr "¿ÞÂÊ" + +#: lib/properties.c:63 lib/properties.h:469 lib/widgets.c:797 +msgid "Center" +msgstr "Áß°£" + +#: lib/properties.c:64 lib/properties.h:470 lib/widgets.c:803 +msgid "Right" +msgstr "¿À¸¥ÂÊ" + +#: lib/properties.c:71 objects/chronogram/chronoline.c:179 +#: objects/chronogram/chronoref.c:158 +#, fuzzy +msgid "Line color" +msgstr "¼± »ö»ó:" + +#: lib/properties.c:72 lib/properties.h:492 lib/properties.h:495 +#, fuzzy +msgid "Line style" +msgstr "¼± ÇüÅÂ:" + +#: lib/properties.c:73 +#, fuzzy +msgid "Fill color" +msgstr "¼± »ö»ó:" + +#: lib/properties.c:74 lib/properties.h:505 +msgid "Draw background" +msgstr "¹è°æ ±×¸²" + +#: lib/properties.c:75 lib/properties.h:509 +#, fuzzy +msgid "Start arrow" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#: lib/properties.c:76 lib/properties.h:512 +#, fuzzy +msgid "End arrow" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#: lib/properties.c:78 lib/properties.h:522 +#, fuzzy +msgid "Text alignment" +msgstr "Á¤·Ä:" + +#. all this just to make the defaults selectable ... +#: lib/properties.c:79 lib/properties.h:525 objects/GRAFCET/condition.c:132 +#: objects/GRAFCET/step.c:165 objects/GRAFCET/transition.c:142 +#: objects/UML/class.c:138 objects/UML/class_dialog.c:351 +#: objects/chronogram/chronoline.c:183 objects/chronogram/chronoref.c:164 +#, fuzzy +msgid "Font" +msgstr "±Û²Ã Å©±â:" + +#: lib/properties.c:80 lib/properties.h:528 objects/GRAFCET/condition.c:134 +#: objects/GRAFCET/step.c:167 objects/GRAFCET/transition.c:144 +#: objects/chronogram/chronoline.c:185 objects/chronogram/chronoref.c:166 +#, fuzzy +msgid "Font size" +msgstr "±Û²Ã Å©±â:" + +#: lib/properties.c:81 objects/GRAFCET/step.c:169 +#: objects/chronogram/chronoline.c:187 objects/chronogram/chronoref.c:168 +#, fuzzy +msgid "Text color" +msgstr "»ö»ó ¼±ÅÃ" + +#: lib/properties.h:486 lib/properties.h:489 +#, fuzzy +msgid "Line colour" +msgstr "¼± »ö»ó:" + +#: lib/properties.h:499 lib/properties.h:502 +#, fuzzy +msgid "Fill colour" +msgstr "¼± »ö»ó:" + +#: lib/properties.h:531 lib/properties.h:534 +#, fuzzy +msgid "Text colour" +msgstr "»ö»ó ¼±ÅÃ" + +#: lib/widgets.c:395 +msgid "Other fonts..." +msgstr "" + +#: lib/widgets.c:522 +#, fuzzy, c-format +msgid "Couldn't find font family for %s\n" +msgstr "" +"Ç÷¯±×ÀÎ `%s'¸¦ ÀÐÀ»¼ö ¾ø½À´Ï´Ù\n" +"%s" + +#. We hit the Other fonts... entry +#: lib/widgets.c:587 +#, fuzzy +msgid "Select font" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#. Translators: Menu item Noun/Material/Solid +#: lib/widgets.c:939 objects/FS/function.c:1063 +msgid "Solid" +msgstr "½Ç¼±" + +#: lib/widgets.c:945 +msgid "Dashed" +msgstr "±¥¼±" + +#: lib/widgets.c:951 +msgid "Dash-Dot" +msgstr "ÀÏÁ¡±¥¼±" + +#: lib/widgets.c:957 +msgid "Dash-Dot-Dot" +msgstr "ÀÌÁ¡±¥¼±" + +#: lib/widgets.c:963 +msgid "Dotted" +msgstr "Á¡¼±" + +#. fs->sizebox = GTK_HBOX(box); +#: lib/widgets.c:982 +msgid "Dash length: " +msgstr "" + +#: lib/widgets.c:1137 +msgid "Select color" +msgstr "»ö»ó ¼±ÅÃ" + +#: lib/widgets.c:1355 +#, fuzzy +msgid "Size: " +msgstr "B ÂÊ" + +#: lib/widgets.c:1504 +msgid "Select image file" +msgstr "" + +#: lib/widgets.c:1540 +msgid "Browse" +msgstr "" + +#: objects/ER/attribute.c:153 +msgid "Key:" +msgstr "" + +#: objects/ER/attribute.c:155 +msgid "Weak key:" +msgstr "" + +#: objects/ER/attribute.c:157 +#, fuzzy +msgid "Derived:" +msgstr "°ÝÀÚ:" + +#: objects/ER/attribute.c:159 +msgid "Multivalue:" +msgstr "" + +#: objects/ER/attribute.c:421 sheets/ER.sheet.in.h:1 +msgid "Attribute" +msgstr "" + +#: objects/ER/entity.c:135 +msgid "Weak:" +msgstr "" + +#: objects/ER/entity.c:137 +msgid "Associative:" +msgstr "" + +#: objects/ER/entity.c:406 objects/UML/classicon.c:126 sheets/ER.sheet.in.h:4 +msgid "Entity" +msgstr "" + +#: objects/ER/er.c:44 +msgid "Entity/Relationship diagram objects" +msgstr "" + +#: objects/ER/participation.c:115 +msgid "Total:" +msgstr "" + +#: objects/ER/participation.c:405 objects/FS/flow-ortho.c:644 +#: objects/GRAFCET/vector.c:311 objects/SADT/arrow.c:462 +#: objects/UML/association.c:752 objects/UML/component_feature.c:183 +#: objects/UML/dependency.c:372 objects/UML/generalization.c:368 +#: objects/UML/realizes.c:359 objects/standard/zigzagline.c:346 +msgid "Add segment" +msgstr "" + +#: objects/ER/participation.c:406 objects/FS/flow-ortho.c:645 +#: objects/GRAFCET/vector.c:312 objects/SADT/arrow.c:463 +#: objects/UML/association.c:753 objects/UML/component_feature.c:184 +#: objects/UML/dependency.c:373 objects/UML/generalization.c:369 +#: objects/UML/realizes.c:360 objects/standard/zigzagline.c:347 +msgid "Delete segment" +msgstr "" + +#: objects/ER/relationship.c:140 +msgid "Left Cardinality:" +msgstr "" + +#: objects/ER/relationship.c:142 +msgid "Right Cardinality:" +msgstr "" + +#: objects/ER/relationship.c:144 +#, fuzzy +msgid "Rotate:" +msgstr "¿ªÇÒ:" + +#: objects/ER/relationship.c:146 +msgid "Identifying:" +msgstr "" + +#: objects/ER/relationship.c:449 sheets/ER.sheet.in.h:6 +msgid "Relationship" +msgstr "" + +#. Translators: Menu item Noun/Energy +#: objects/FS/flow-ortho.c:159 objects/FS/flow-ortho.c:641 +#: objects/FS/flow.c:137 objects/FS/flow.c:576 objects/FS/function.c:1085 +msgid "Energy" +msgstr "" + +#. Translators: Menu item Noun/Material +#: objects/FS/flow-ortho.c:160 objects/FS/flow-ortho.c:642 +#: objects/FS/flow.c:138 objects/FS/flow.c:577 objects/FS/function.c:1061 +msgid "Material" +msgstr "" + +#. Translators: Menu item Verb/Signal +#. Translators: Menu item Verb/Signal/Signal +#. Translators: Menu item Noun/Signal +#. Translators: Menu item Noun/Signal/Signal +#: objects/FS/flow-ortho.c:161 objects/FS/flow-ortho.c:643 +#: objects/FS/flow.c:139 objects/FS/flow.c:578 objects/FS/function.c:1023 +#: objects/FS/function.c:1025 objects/FS/function.c:1161 +#: objects/FS/function.c:1163 +msgid "Signal" +msgstr "" + +#: objects/FS/fs.c:43 +msgid "Function structure diagram objects" +msgstr "" + +#: objects/FS/function.c:140 +msgid "Wish function" +msgstr "" + +#: objects/FS/function.c:142 +msgid "User function" +msgstr "" + +#. Translators: Menu item Verb +#: objects/FS/function.c:665 +msgid "Verb" +msgstr "" + +#. Translators: Menu item Verb/Channel +#. Translators: Menu item Verb/Channel/Channel +#. Translators: Menu item Verb/Channel/Transfer/Transport/Channel +#: objects/FS/function.c:667 objects/FS/function.c:669 +#: objects/FS/function.c:709 +msgid "Channel" +msgstr "" + +#. Translators: Menu item Verb/Channel/Import +#. Translators: Menu item Verb/Channel/Import/Import +#: objects/FS/function.c:671 objects/FS/function.c:673 +msgid "Import" +msgstr "" + +#. Translators: Menu item Verb/Channel/Import/Input +#: objects/FS/function.c:675 objects/KAOS/metabinrel.c:173 +msgid "Input" +msgstr "" + +#. Translators: Menu item Verb/Channel/Import/Receive +#: objects/FS/function.c:677 +#, fuzzy +msgid "Receive" +msgstr "»èÁ¦" + +#. Translators: Menu item Verb/Channel/Import/Allow +#. Translators: Menu item Verb/Control Magnitude/Regulate/Allow +#: objects/FS/function.c:679 objects/FS/function.c:951 +msgid "Allow" +msgstr "" + +#. Translators: Menu item Verb/Channel/Import/Form Entrance +#: objects/FS/function.c:681 +msgid "Form Entrance" +msgstr "" + +#. Translators: Menu item Verb/Channel/Import/Capture +#. Translators: Menu item Verb/Provision/Store/Capture +#: objects/FS/function.c:683 objects/FS/function.c:917 +#, fuzzy +msgid "Capture" +msgstr "»ý¼º" + +#. Translators: Menu item Verb/Channel/Export/Discharge +#: objects/FS/function.c:689 +msgid "Discharge" +msgstr "" + +#. Translators: Menu item Verb/Channel/Export/Eject +#: objects/FS/function.c:691 +#, fuzzy +msgid "Eject" +msgstr "°´Ã¼" + +#. Translators: Menu item Verb/Channel/Export/Dispose +#: objects/FS/function.c:693 +msgid "Dispose" +msgstr "" + +#. Translators: Menu item Verb/Channel/Transfer +#. Translators: Menu item Verb/Channel/Transfer/Transfer +#. Translators: Menu item Verb/Channel/Transfer/Transmit/Transfer +#: objects/FS/function.c:697 objects/FS/function.c:699 +#: objects/FS/function.c:717 +msgid "Transfer" +msgstr "" + +#. Translators: Menu item Verb/Channel/Transfer/Transport +#. Translators: Menu item Verb/Channel/Transfer/Transport/Transport +#: objects/FS/function.c:701 objects/FS/function.c:703 +msgid "Transport" +msgstr "" + +#. Translators: Menu item Verb/Channel/Transfer/Transport/Lift +#: objects/FS/function.c:705 +#, fuzzy +msgid "Lift" +msgstr "¿ÞÂÊ" + +#. Translators: Menu item Verb/Channel/Transfer/Transport/Move +#: objects/FS/function.c:707 +#, fuzzy +msgid "Move" +msgstr "À§·Î À̵¿" + +#. Translators: Menu item Verb/Channel/Transfer/Transmit +#. Translators: Menu item Verb/Channel/Transfer/Transmit/Transmit +#: objects/FS/function.c:711 objects/FS/function.c:713 +msgid "Transmit" +msgstr "" + +#. Translators: Menu item Verb/Channel/Transfer/Transmit/Conduct +#: objects/FS/function.c:715 +msgid "Conduct" +msgstr "" + +#. Translators: Menu item Verb/Channel/Transfer/Transmit/Convey +#: objects/FS/function.c:719 +msgid "Convey" +msgstr "" + +#. Translators: Menu item Verb/Channel/Guide +#. Translators: Menu item Verb/Channel/Guide/Guide +#. Translators: Menu item Verb/Channel/Guide/Guide/Guide +#: objects/FS/function.c:721 objects/FS/function.c:723 +#: objects/FS/function.c:725 +#, fuzzy +msgid "Guide" +msgstr "°ÝÀÚ:" + +#. Translators: Menu item Verb/Channel/Guide/Guide/Direct +#: objects/FS/function.c:727 +msgid "Direct" +msgstr "" + +#. Translators: Menu item Verb/Channel/Guide/Guide/Straighten +#: objects/FS/function.c:729 +msgid "Straighten" +msgstr "" + +#. Translators: Menu item Verb/Channel/Guide/Guide/Steer +#: objects/FS/function.c:731 +msgid "Steer" +msgstr "" + +#. Translators: Menu item Verb/Channel/Guide/Translate +#: objects/FS/function.c:733 +msgid "Translate" +msgstr "" + +#. Translators: Menu item Verb/Channel/Guide/Rotate +#. Translators: Menu item Verb/Channel/Guide/Rotate/Rotate +#: objects/FS/function.c:735 objects/FS/function.c:737 +#, fuzzy +msgid "Rotate" +msgstr "¿ªÇÒ:" + +#. Translators: Menu item Verb/Channel/Guide/Rotate/Turn +#: objects/FS/function.c:739 +msgid "Turn" +msgstr "" + +#. Translators: Menu item Verb/Channel/Guide/Rotate/Spin +#: objects/FS/function.c:741 +#, fuzzy +msgid "Spin" +msgstr "ºñÀ²" + +#. Translators: Menu item Verb/Channel/Guide/Allow DOF +#. Translators: Menu item Verb/Channel/Guide/Allow DOF/Allow DOF +#: objects/FS/function.c:743 objects/FS/function.c:745 +msgid "Allow DOF" +msgstr "" + +#. Translators: Menu item Verb/Channel/Guide/Allow DOF/Constrain +#: objects/FS/function.c:747 +#, fuzzy +msgid "Constrain" +msgstr "ÇÕ¼º" + +#. Translators: Menu item Verb/Channel/Guide/Allow DOF/Unlock +#: objects/FS/function.c:749 +#, fuzzy +msgid "Unlock" +msgstr "Á¦°ÅÇϱâ" + +#. Translators: Menu item Verb/Support +#. Translators: Menu item Verb/Support/Support +#: objects/FS/function.c:751 objects/FS/function.c:753 +msgid "Support" +msgstr "" + +#. Translators: Menu item Verb/Support/Stop +#. Translators: Menu item Verb/Support/Stop/Stop +#: objects/FS/function.c:755 objects/FS/function.c:757 +#: sheets/SDL.sheet.in.h:19 +msgid "Stop" +msgstr "" + +#. Translators: Menu item Verb/Support/Stop/Insulate +#: objects/FS/function.c:759 +msgid "Insulate" +msgstr "" + +#. Translators: Menu item Verb/Support/Stop/Protect +#: objects/FS/function.c:761 +msgid "Protect" +msgstr "" + +#. Translators: Menu item Verb/Support/Stop/Prevent +#. Translators: Menu item Verb/Control Magnitude/Regulate/Prevent +#: objects/FS/function.c:763 objects/FS/function.c:953 +msgid "Prevent" +msgstr "" + +#. Translators: Menu item Verb/Support/Stop/Shield +#: objects/FS/function.c:765 +msgid "Shield" +msgstr "" + +#. Translators: Menu item Verb/Support/Stop/Inhibit +#: objects/FS/function.c:767 +msgid "Inhibit" +msgstr "" + +#. Translators: Menu item Verb/Support/Stabilize +#. Translators: Menu item Verb/Support/Stabilize/Stabilize +#: objects/FS/function.c:769 objects/FS/function.c:771 +msgid "Stabilize" +msgstr "" + +#. Translators: Menu item Verb/Support/Stabilize/Steady +#: objects/FS/function.c:773 +msgid "Steady" +msgstr "" + +#. Translators: Menu item Verb/Support/Secure +#. Translators: Menu item Verb/Support/Secure/Secure +#: objects/FS/function.c:775 objects/FS/function.c:777 +msgid "Secure" +msgstr "" + +#. Translators: Menu item Verb/Support/Secure/Attach +#. Translators: Menu item Verb/Connect/Couple/Attach +#: objects/FS/function.c:779 objects/FS/function.c:811 +msgid "Attach" +msgstr "" + +#. Translators: Menu item Verb/Support/Secure/Mount +#: objects/FS/function.c:781 +#, fuzzy +msgid "Mount" +msgstr "±Û²Ã Å©±â:" + +#. Translators: Menu item Verb/Support/Secure/Lock +#: objects/FS/function.c:783 sheets/ciscomisc.sheet.in.h:21 +msgid "Lock" +msgstr "" + +#. Translators: Menu item Verb/Support/Secure/Fasten +#: objects/FS/function.c:785 +msgid "Fasten" +msgstr "" + +#. Translators: Menu item Verb/Support/Secure/Hold +#: objects/FS/function.c:787 +#, fuzzy +msgid "Hold" +msgstr "½Ç¼±" + +#. Translators: Menu item Verb/Support/Position +#. Translators: Menu item Verb/Support/Position/Position +#: objects/FS/function.c:789 objects/FS/function.c:791 +#: objects/Istar/actor.c:68 +#, fuzzy +msgid "Position" +msgstr "ÇÕ¼º" + +#. Translators: Menu item Verb/Support/Position/Orient +#: objects/FS/function.c:793 +#, fuzzy +msgid "Orient" +msgstr "¹æÇâ" + +#. Translators: Menu item Verb/Support/Position/Align +#: objects/FS/function.c:795 +#, fuzzy +msgid "Align" +msgstr "Á¤·Ä:" + +#. Translators: Menu item Verb/Support/Position/Locate +#. Translators: Menu item Verb/Signal/Sense/Locate +#: objects/FS/function.c:797 objects/FS/function.c:1039 +#, fuzzy +msgid "Locate" +msgstr "¿Ã·ÁÁü:" + +#. Translators: Menu item Verb/Connect +#. Translators: Menu item Verb/Connect/Connect +#: objects/FS/function.c:799 objects/FS/function.c:801 +msgid "Connect" +msgstr "" + +#. Translators: Menu item Verb/Connect/Couple +#. Translators: Menu item Verb/Connect/Couple/Couple +#: objects/FS/function.c:803 objects/FS/function.c:805 +msgid "Couple" +msgstr "" + +#. Translators: Menu item Verb/Connect/Couple/Join +#: objects/FS/function.c:807 +msgid "Join" +msgstr "" + +#. Translators: Menu item Verb/Connect/Couple/Assemble +#: objects/FS/function.c:809 +msgid "Assemble" +msgstr "" + +#. Translators: Menu item Verb/Connect/Mix +#. Translators: Menu item Verb/Connect/Mix/Mix +#: objects/FS/function.c:813 objects/FS/function.c:815 +msgid "Mix" +msgstr "" + +#. Translators: Menu item Verb/Connect/Mix/Combine +#: objects/FS/function.c:817 +msgid "Combine" +msgstr "" + +#. Translators: Menu item Verb/Connect/Mix/Blend +#: objects/FS/function.c:819 +#, fuzzy +msgid "Blend" +msgstr "½ÃÀÛ" + +#. Translators: Menu item Verb/Connect/Mix/Add +#: objects/FS/function.c:821 +msgid "Add" +msgstr "" + +#. Translators: Menu item Verb/Connect/Mix/Pack +#: objects/FS/function.c:823 +msgid "Pack" +msgstr "" + +#. Translators: Menu item Verb/Connect/Mix/Coalesce +#: objects/FS/function.c:825 +#, fuzzy +msgid "Coalesce" +msgstr "´Ý±â" + +#. Translators: Menu item Verb/Branch +#. Translators: Menu item Verb/Branch/Branch +#: objects/FS/function.c:827 objects/FS/function.c:829 sheets/UML.sheet.in.h:5 +msgid "Branch" +msgstr "" + +#. Translators: Menu item Verb/Branch/Separate +#. Translators: Menu item Verb/Branch/Separate/Separate +#. Translators: Menu item Verb/Branch/Separate/Separate/Separate +#: objects/FS/function.c:831 objects/FS/function.c:833 +#: objects/FS/function.c:835 +#, fuzzy +msgid "Separate" +msgstr "°¢µµ º¯°æ:" + +#. Translators: Menu item Verb/Branch/Separate/Separate/Switch +#: objects/FS/function.c:837 +msgid "Switch" +msgstr "" + +#. Translators: Menu item Verb/Branch/Separate/Separate/Divide +#: objects/FS/function.c:839 +msgid "Divide" +msgstr "" + +#. Translators: Menu item Verb/Branch/Separate/Separate/Release +#: objects/FS/function.c:841 +#, fuzzy +msgid "Release" +msgstr "Áö¿ò" + +#. Translators: Menu item Verb/Branch/Separate/Separate/Detach +#: objects/FS/function.c:843 +msgid "Detach" +msgstr "" + +#. Translators: Menu item Verb/Branch/Separate/Separate/Disconnect +#: objects/FS/function.c:845 +msgid "Disconnect" +msgstr "" + +#. Translators: Menu item Verb/Branch/Separate/Remove/Cut +#: objects/FS/function.c:851 +msgid "Cut" +msgstr "" + +#. Translators: Menu item Verb/Branch/Separate/Remove/Polish +#: objects/FS/function.c:853 +msgid "Polish" +msgstr "" + +#. Translators: Menu item Verb/Branch/Separate/Remove/Sand +#: objects/FS/function.c:855 +msgid "Sand" +msgstr "" + +#. Translators: Menu item Verb/Branch/Separate/Remove/Drill +#: objects/FS/function.c:857 +msgid "Drill" +msgstr "" + +#. Translators: Menu item Verb/Branch/Separate/Remove/Lathe +#: objects/FS/function.c:859 +msgid "Lathe" +msgstr "" + +#. Translators: Menu item Verb/Branch/Refine +#. Translators: Menu item Verb/Branch/Refine/Refine +#: objects/FS/function.c:861 objects/FS/function.c:863 +#, fuzzy +msgid "Refine" +msgstr "¼±" + +#. Translators: Menu item Verb/Branch/Refine/Purify +#: objects/FS/function.c:865 +msgid "Purify" +msgstr "" + +#. Translators: Menu item Verb/Branch/Refine/Strain +#: objects/FS/function.c:867 +#, fuzzy +msgid "Strain" +msgstr "ºñÀ²" + +#. Translators: Menu item Verb/Branch/Refine/Filter +#: objects/FS/function.c:869 +#, fuzzy +msgid "Filter" +msgstr "ÆÄÀÏ" + +#. Translators: Menu item Verb/Branch/Refine/Percolate +#: objects/FS/function.c:871 +msgid "Percolate" +msgstr "" + +#. Translators: Menu item Verb/Branch/Refine/Clear +#: objects/FS/function.c:873 +#, fuzzy +msgid "Clear" +msgstr "Ŭ·¡½º" + +#. Translators: Menu item Verb/Branch/Distribute +#. Translators: Menu item Verb/Branch/Distribute/Distribute +#: objects/FS/function.c:875 objects/FS/function.c:877 +#, fuzzy +msgid "Distribute" +msgstr "¼Ó¼º" + +#. Translators: Menu item Verb/Branch/Distribute/Diverge +#: objects/FS/function.c:879 +msgid "Diverge" +msgstr "" + +#. Translators: Menu item Verb/Branch/Distribute/Scatter +#: objects/FS/function.c:881 +#, fuzzy +msgid "Scatter" +msgstr "ºñÀ²:" + +#. Translators: Menu item Verb/Branch/Distribute/Disperse +#: objects/FS/function.c:883 +msgid "Disperse" +msgstr "" + +#. Translators: Menu item Verb/Branch/Distribute/Diffuse +#. Translators: Menu item Verb/Branch/Dissipate/Diffuse +#: objects/FS/function.c:885 objects/FS/function.c:899 +msgid "Diffuse" +msgstr "" + +#. Translators: Menu item Verb/Branch/Distribute/Empty +#: objects/FS/function.c:887 +msgid "Empty" +msgstr "" + +#. Translators: Menu item Verb/Branch/Dissipate +#. Translators: Menu item Verb/Branch/Dissipate/Dissipate +#: objects/FS/function.c:889 objects/FS/function.c:891 +msgid "Dissipate" +msgstr "" + +#. Translators: Menu item Verb/Branch/Dissipate/Absorb +#: objects/FS/function.c:893 +msgid "Absorb" +msgstr "" + +#. Translators: Menu item Verb/Branch/Dissipate/Dampen +#: objects/FS/function.c:895 +#, fuzzy +msgid "Dampen" +msgstr "À̸§:" + +#. Translators: Menu item Verb/Branch/Dissipate/Dispel +#: objects/FS/function.c:897 +msgid "Dispel" +msgstr "" + +#. Translators: Menu item Verb/Branch/Dissipate/Resist +#: objects/FS/function.c:901 +msgid "Resist" +msgstr "" + +#. Translators: Menu item Verb/Provision +#. Translators: Menu item Verb/Provision/Provision +#: objects/FS/function.c:903 objects/FS/function.c:905 +msgid "Provision" +msgstr "" + +#. Translators: Menu item Verb/Provision/Store +#. Translators: Menu item Verb/Provision/Store/Store +#: objects/FS/function.c:907 objects/FS/function.c:909 +msgid "Store" +msgstr "" + +#. Translators: Menu item Verb/Provision/Store/Contain +#: objects/FS/function.c:911 +msgid "Contain" +msgstr "" + +#. Translators: Menu item Verb/Provision/Store/Collect +#: objects/FS/function.c:913 +msgid "Collect" +msgstr "" + +#. Translators: Menu item Verb/Provision/Store/Reserve +#: objects/FS/function.c:915 +#, fuzzy +msgid "Reserve" +msgstr "»èÁ¦" + +#. Translators: Menu item Verb/Provision/Supply +#. Translators: Menu item Verb/Provision/Supply/Supply +#: objects/FS/function.c:919 objects/FS/function.c:921 +#, fuzzy +msgid "Supply" +msgstr "Àû¿ë" + +#. Translators: Menu item Verb/Provision/Supply/Fill +#: objects/FS/function.c:923 +#, fuzzy +msgid "Fill" +msgstr "ÆÄÀÏ" + +#. Translators: Menu item Verb/Provision/Supply/Provide +#: objects/FS/function.c:925 +msgid "Provide" +msgstr "" + +#. Translators: Menu item Verb/Provision/Supply/Replenish +#: objects/FS/function.c:927 +msgid "Replenish" +msgstr "" + +#. Translators: Menu item Verb/Provision/Supply/Expose +#: objects/FS/function.c:929 +#, fuzzy +msgid "Expose" +msgstr "Ÿ¿ø" + +#. Translators: Menu item Verb/Provision/Extract +#: objects/FS/function.c:931 sheets/Flowchart.sheet.in.h:7 +#, fuzzy +msgid "Extract" +msgstr "Ãß»ó" + +#. Translators: Menu item Verb/Control Magnitude +#. Translators: Menu item Verb/Control Magnitude/Control Magnitude +#: objects/FS/function.c:933 objects/FS/function.c:935 +msgid "Control Magnitude" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Actuate +#. Translators: Menu item Verb/Control Magnitude/Actuate/Actuate +#: objects/FS/function.c:937 objects/FS/function.c:939 +msgid "Actuate" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Actuate/Start +#: objects/FS/function.c:941 +#, fuzzy +msgid "Start" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#. Translators: Menu item Verb/Control Magnitude/Actuate/Initiate +#: objects/FS/function.c:943 +msgid "Initiate" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Regulate +#. Translators: Menu item Verb/Control Magnitude/Regulate/Regulate +#: objects/FS/function.c:945 objects/FS/function.c:947 +#, fuzzy +msgid "Regulate" +msgstr "ÅÛÇø´" + +#. Translators: Menu item Verb/Control Magnitude/Regulate/Control +#. Translators: Menu item Noun/Signal/Control +#: objects/FS/function.c:949 objects/FS/function.c:1167 +#: objects/UML/classicon.c:124 +msgid "Control" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Regulate/Enable +#: objects/FS/function.c:955 +msgid "Enable" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Regulate/Disable +#: objects/FS/function.c:957 +msgid "Disable" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Regulate/Limit +#: objects/FS/function.c:959 +msgid "Limit" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Regulate/Interrupt +#: objects/FS/function.c:961 +msgid "Interrupt" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Change +#. Translators: Menu item Verb/Control Magnitude/Change/Change +#: objects/FS/function.c:963 objects/FS/function.c:965 +#, fuzzy +msgid "Change" +msgstr "Ãë¼Ò" + +#. Translators: Menu item Verb/Control Magnitude/Change/Increase +#: objects/FS/function.c:967 +msgid "Increase" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Change/Decrease +#: objects/FS/function.c:969 +msgid "Decrease" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Change/Amplify +#: objects/FS/function.c:971 +#, fuzzy +msgid "Amplify" +msgstr "Àû¿ë" + +#. Translators: Menu item Verb/Control Magnitude/Change/Reduce +#: objects/FS/function.c:973 +msgid "Reduce" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Change/Normalize +#: objects/FS/function.c:977 +msgid "Normalize" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Change/Multiply +#: objects/FS/function.c:979 +#, fuzzy +msgid "Multiply" +msgstr "Áߺ¹" + +#. Translators: Menu item Verb/Control Magnitude/Change/Scale +#: objects/FS/function.c:981 +#, fuzzy +msgid "Scale" +msgstr "ºñÀ²:" + +#. Translators: Menu item Verb/Control Magnitude/Change/Rectify +#: objects/FS/function.c:983 +msgid "Rectify" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Change/Adjust +#: objects/FS/function.c:985 +msgid "Adjust" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Form +#. Translators: Menu item Verb/Control Magnitude/Form/Form +#: objects/FS/function.c:987 objects/FS/function.c:989 +msgid "Form" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Form/Compact +#: objects/FS/function.c:991 +msgid "Compact" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Form/Crush +#: objects/FS/function.c:993 +msgid "Crush" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Form/Shape +#: objects/FS/function.c:995 +msgid "Shape" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Form/Compress +#: objects/FS/function.c:997 +msgid "Compress" +msgstr "" + +#. Translators: Menu item Verb/Control Magnitude/Form/Pierce +#: objects/FS/function.c:999 +#, fuzzy +msgid "Pierce" +msgstr "ÇÁ¸°ÅÍ" + +#. Translators: Menu item Verb/Convert +#. Translators: Menu item Verb/Convert/Convert +#: objects/FS/function.c:1001 objects/FS/function.c:1003 +msgid "Convert" +msgstr "" + +#. Translators: Menu item Verb/Convert/Transform +#: objects/FS/function.c:1005 +msgid "Transform" +msgstr "" + +#. Translators: Menu item Verb/Convert/Liquefy +#: objects/FS/function.c:1007 +msgid "Liquefy" +msgstr "" + +#. Translators: Menu item Verb/Convert/Solidify +#: objects/FS/function.c:1009 +#, fuzzy +msgid "Solidify" +msgstr "½Ç¼±" + +#. Translators: Menu item Verb/Convert/Evaporate +#: objects/FS/function.c:1011 +msgid "Evaporate" +msgstr "" + +#. Translators: Menu item Verb/Convert/Sublimate +#: objects/FS/function.c:1013 +msgid "Sublimate" +msgstr "" + +#. Translators: Menu item Verb/Convert/Condense +#: objects/FS/function.c:1015 +#, fuzzy +msgid "Condense" +msgstr "´Ý±â" + +#. Translators: Menu item Verb/Convert/Integrate +#: objects/FS/function.c:1017 +#, fuzzy +msgid "Integrate" +msgstr "»ç¿ëÀÚ ÀÎÅÍÆäÀ̽º" + +#. Translators: Menu item Verb/Convert/Differentiate +#: objects/FS/function.c:1019 +msgid "Differentiate" +msgstr "" + +#. Translators: Menu item Verb/Convert/Process +#: objects/FS/function.c:1021 sheets/EML.sheet.in.h:5 +msgid "Process" +msgstr "" + +#. Translators: Menu item Verb/Signal/Sense +#. Translators: Menu item Verb/Signal/Sense/Sense +#: objects/FS/function.c:1027 objects/FS/function.c:1029 +msgid "Sense" +msgstr "" + +#. Translators: Menu item Verb/Signal/Sense/Perceive +#: objects/FS/function.c:1031 +#, fuzzy +msgid "Perceive" +msgstr "°ÝÀÚ:" + +#. Translators: Menu item Verb/Signal/Sense/Recognize +#: objects/FS/function.c:1033 +msgid "Recognize" +msgstr "" + +#. Translators: Menu item Verb/Signal/Sense/Discern +#: objects/FS/function.c:1035 +msgid "Discern" +msgstr "" + +#. Translators: Menu item Verb/Signal/Sense/Check +#: objects/FS/function.c:1037 +msgid "Check" +msgstr "" + +#. Translators: Menu item Verb/Signal/Sense/Verify +#: objects/FS/function.c:1041 +msgid "Verify" +msgstr "" + +#. Translators: Menu item Verb/Signal/Indicate +#. Translators: Menu item Verb/Signal/Indicate/Indicate +#: objects/FS/function.c:1043 objects/FS/function.c:1045 +msgid "Indicate" +msgstr "" + +#. Translators: Menu item Verb/Signal/Indicate/Mark +#: objects/FS/function.c:1047 +msgid "Mark" +msgstr "" + +#. Translators: Menu item Verb/Signal/Display +#: objects/FS/function.c:1049 sheets/Flowchart.sheet.in.h:5 +msgid "Display" +msgstr "" + +#. Translators: Menu item Verb/Signal/Measure +#. Translators: Menu item Verb/Signal/Measure/Measure +#: objects/FS/function.c:1051 objects/FS/function.c:1053 +msgid "Measure" +msgstr "" + +#. Translators: Menu item Verb/Signal/Measure/Calculate +#: objects/FS/function.c:1055 +#, fuzzy +msgid "Calculate" +msgstr "È£Ãâ" + +#. Translators: Menu item Verb/Signal/Represent +#: objects/FS/function.c:1057 +msgid "Represent" +msgstr "" + +#. Translators: Menu item Noun +#: objects/FS/function.c:1059 +#, fuzzy +msgid "Noun" +msgstr "¾Æ´Ï¿À" + +#. Translators: Menu item Noun/Material/Liquid +#: objects/FS/function.c:1065 +msgid "Liquid" +msgstr "" + +#. Translators: Menu item Noun/Material/Gas +#: objects/FS/function.c:1067 +msgid "Gas" +msgstr "" + +#. Translators: Menu item Noun/Material/Human +#. Translators: Menu item Noun/Material/Human/Human +#. Translators: Menu item Noun/Energy/Human +#: objects/FS/function.c:1069 objects/FS/function.c:1071 +#: objects/FS/function.c:1155 +msgid "Human" +msgstr "" + +#. Translators: Menu item Noun/Material/Human/Hand +#: objects/FS/function.c:1073 +msgid "Hand" +msgstr "" + +#. Translators: Menu item Noun/Material/Human/Foot +#: objects/FS/function.c:1075 +#, fuzzy +msgid "Foot" +msgstr "±Û²Ã Å©±â:" + +#. Translators: Menu item Noun/Material/Human/Head +#: objects/FS/function.c:1077 +msgid "Head" +msgstr "" + +#. Translators: Menu item Noun/Material/Human/Finger +#: objects/FS/function.c:1079 +#, fuzzy +msgid "Finger" +msgstr "ÇÁ¸°ÅÍ" + +#. Translators: Menu item Noun/Material/Human/Toe +#: objects/FS/function.c:1081 +#, fuzzy +msgid "Toe" +msgstr "À§" + +#. Translators: Menu item Noun/Material/Biological +#: objects/FS/function.c:1083 +msgid "Biological" +msgstr "" + +#. Translators: Menu item Noun/Energy/Mechanical +#: objects/FS/function.c:1087 +msgid "Mechanical" +msgstr "" + +#. Translators: Menu item Noun/Energy/Mechanical/Mech. Energy +#: objects/FS/function.c:1089 +msgid "Mech. Energy" +msgstr "" + +#. Translators: Menu item Noun/Energy/Mechanical/Translation +#: objects/FS/function.c:1091 +#, fuzzy +msgid "Translation" +msgstr "¹æÇâ" + +#. Translators: Menu item Noun/Energy/Mechanical/Force +#: objects/FS/function.c:1093 +msgid "Force" +msgstr "" + +#. Translators: Menu item Noun/Energy/Mechanical/Rotation +#: objects/FS/function.c:1095 +#, fuzzy +msgid "Rotation" +msgstr "¿ªÇÒ:" + +#. Translators: Menu item Noun/Energy/Mechanical/Torque +#: objects/FS/function.c:1097 +msgid "Torque" +msgstr "" + +#. Translators: Menu item Noun/Energy/Mechanical/Random Motion +#: objects/FS/function.c:1099 +msgid "Random Motion" +msgstr "" + +#. Translators: Menu item Noun/Energy/Mechanical/Vibration +#: objects/FS/function.c:1101 +#, fuzzy +msgid "Vibration" +msgstr "ÀÛµ¿" + +#. Translators: Menu item Noun/Energy/Mechanical/Rotational Energy +#: objects/FS/function.c:1103 +msgid "Rotational Energy" +msgstr "" + +#. Translators: Menu item Noun/Energy/Mechanical/Translational Energy +#: objects/FS/function.c:1105 +msgid "Translational Energy" +msgstr "" + +#. Translators: Menu item Noun/Energy/Electricity +#: objects/FS/function.c:1107 +#, fuzzy +msgid "Electrical" +msgstr "ÁÂ¿ì ¹Ù²Þ" + +#. Translators: Menu item Noun/Energy/Electricity/Electricity +#: objects/FS/function.c:1109 +msgid "Electricity" +msgstr "" + +#. Translators: Menu item Noun/Energy/Electricity/Voltage +#: objects/FS/function.c:1111 +msgid "Voltage" +msgstr "" + +#. Translators: Menu item Noun/Energy/Electricity/Current +#: objects/FS/function.c:1113 +#, fuzzy +msgid "Current" +msgstr "Áß°£" + +#. Translators: Menu item Noun/Energy/Hydraulic +#: objects/FS/function.c:1115 +msgid "Hydraulic" +msgstr "" + +#. Translators: Menu item Noun/Energy/Hydraulic/Pressure +#: objects/FS/function.c:1117 +msgid "Pressure" +msgstr "" + +#. Translators: Menu item Noun/Energy/Hydraulic/Volumetric Flow +#: objects/FS/function.c:1119 +msgid "Volumetric Flow" +msgstr "" + +#. Translators: Menu item Noun/Energy/Thermal +#: objects/FS/function.c:1121 +msgid "Thermal" +msgstr "" + +#. Translators: Menu item Noun/Energy/Thermal/Heat +#: objects/FS/function.c:1123 +#, fuzzy +msgid "Heat" +msgstr "»ý¼º" + +#. Translators: Menu item Noun/Energy/Thermal/Conduction +#: objects/FS/function.c:1125 +msgid "Conduction" +msgstr "" + +#. Translators: Menu item Noun/Energy/Thermal/Convection +#: objects/FS/function.c:1127 +#, fuzzy +msgid "Convection" +msgstr "¿¬°áÁ¡:" + +#. Translators: Menu item Noun/Energy/Thermal/Radiation +#. Translators: Menu item Noun/Energy/Radioactive/Radiation +#: objects/FS/function.c:1129 objects/FS/function.c:1137 +#, fuzzy +msgid "Radiation" +msgstr "¹æÇâ" + +#. Translators: Menu item Noun/Energy/Pneumatic +#: objects/FS/function.c:1131 +msgid "Pneumatic" +msgstr "" + +#. Translators: Menu item Noun/Energy/Chemical +#: objects/FS/function.c:1133 +msgid "Chemical" +msgstr "" + +#. Translators: Menu item Noun/Energy/Radioactive +#: objects/FS/function.c:1135 +msgid "Radioactive" +msgstr "" + +#. Translators: Menu item Noun/Energy/Radioactive/Microwaves +#: objects/FS/function.c:1139 +msgid "Microwaves" +msgstr "" + +#. Translators: Menu item Noun/Energy/Radioactive/Radio waves +#: objects/FS/function.c:1141 +msgid "Radio waves" +msgstr "" + +#. Translators: Menu item Noun/Energy/Radioactive/X-Rays +#: objects/FS/function.c:1143 +msgid "X-Rays" +msgstr "" + +#. Translators: Menu item Noun/Energy/Radioactive/Gamma Rays +#: objects/FS/function.c:1145 +msgid "Gamma Rays" +msgstr "" + +#. Translators: Menu item Noun/Energy/Acoustic Energy +#: objects/FS/function.c:1147 +msgid "Acoustic Energy" +msgstr "" + +#. Translators: Menu item Noun/Energy/Optical Energy +#: objects/FS/function.c:1149 +msgid "Optical Energy" +msgstr "" + +#. Translators: Menu item Noun/Energy/Solar Energy +#: objects/FS/function.c:1151 +msgid "Solar Energy" +msgstr "" + +#. Translators: Menu item Noun/Energy/Magnetic Energy +#: objects/FS/function.c:1153 +msgid "Magnetic Energy" +msgstr "" + +#. Translators: Menu item Noun/Energy/Human/Human Motion +#: objects/FS/function.c:1157 +msgid "Human Motion" +msgstr "" + +#. Translators: Menu item Noun/Energy/Human/Human Force +#: objects/FS/function.c:1159 +msgid "Human Force" +msgstr "" + +#. Translators: Menu item Noun/Signal/Status +#: objects/FS/function.c:1165 +msgid "Status" +msgstr "" + +#: objects/FS/function.c:1169 +msgid "User/Device Fn" +msgstr "" + +#: objects/FS/function.c:1170 +msgid "Wish Fn" +msgstr "" + +#: objects/GRAFCET/action.c:135 sheets/SDL.sheet.in.h:9 +msgid "Macro call" +msgstr "" + +#: objects/GRAFCET/action.c:135 +msgid "This action is a call to a macro-step" +msgstr "" + +#: objects/GRAFCET/condition.c:130 +#, fuzzy +msgid "Condition" +msgstr "ÇÕ¼º" + +#: objects/GRAFCET/condition.c:130 +msgid "The boolean equation of the condition" +msgstr "" + +#: objects/GRAFCET/condition.c:132 +msgid "The condition's font" +msgstr "" + +#: objects/GRAFCET/condition.c:134 +msgid "The condition's font size" +msgstr "" + +#: objects/GRAFCET/condition.c:137 objects/GRAFCET/transition.c:147 +#, fuzzy +msgid "Color" +msgstr "´Ý±â" + +#: objects/GRAFCET/condition.c:137 +msgid "The condition's color" +msgstr "" + +#: objects/GRAFCET/grafcet.c:45 +msgid "GRAFCET diagram objects" +msgstr "" + +#: objects/GRAFCET/step.c:146 sheets/GRAFCET.sheet.in.h:13 +msgid "Regular step" +msgstr "" + +#: objects/GRAFCET/step.c:147 sheets/GRAFCET.sheet.in.h:6 +msgid "Initial step" +msgstr "" + +#: objects/GRAFCET/step.c:148 sheets/GRAFCET.sheet.in.h:8 +msgid "Macro entry step" +msgstr "" + +#: objects/GRAFCET/step.c:149 sheets/GRAFCET.sheet.in.h:9 +msgid "Macro exit step" +msgstr "" + +#: objects/GRAFCET/step.c:150 sheets/GRAFCET.sheet.in.h:7 +msgid "Macro call step" +msgstr "" + +#: objects/GRAFCET/step.c:151 +msgid "Subprogram call step" +msgstr "" + +#: objects/GRAFCET/step.c:158 +msgid "Step name" +msgstr "" + +#: objects/GRAFCET/step.c:158 +msgid "The name of the step" +msgstr "" + +#: objects/GRAFCET/step.c:161 +msgid "Step type" +msgstr "" + +#: objects/GRAFCET/step.c:161 +msgid "The kind of step" +msgstr "" + +#: objects/GRAFCET/step.c:163 +msgid "Active" +msgstr "" + +#: objects/GRAFCET/step.c:163 +msgid "Shows a red dot to figure the step's activity" +msgstr "" + +#: objects/GRAFCET/transition.c:140 +msgid "Receptivity" +msgstr "" + +#: objects/GRAFCET/transition.c:140 +msgid "The boolean equation of the receptivity" +msgstr "" + +#: objects/GRAFCET/transition.c:142 +msgid "The receptivity's font" +msgstr "" + +#: objects/GRAFCET/transition.c:144 +msgid "The receptivity's font size" +msgstr "" + +#: objects/GRAFCET/transition.c:147 +msgid "The receptivity's color" +msgstr "" + +#: objects/GRAFCET/transition.c:148 +msgid "North point" +msgstr "" + +#: objects/GRAFCET/transition.c:149 +#, fuzzy +msgid "South point" +msgstr "ºÎµå·¯¿î Á¶Àý" + +#: objects/GRAFCET/vector.c:128 +msgid "Draw arrow heads on upward arcs:" +msgstr "" + +#: objects/GRAFCET/vergent.c:122 +msgid "OR" +msgstr "" + +#: objects/GRAFCET/vergent.c:123 +msgid "AND" +msgstr "" + +#: objects/GRAFCET/vergent.c:134 +msgid "Vergent type:" +msgstr "" + +#: objects/GRAFCET/vergent.c:405 objects/SADT/box.c:445 +#: objects/Istar/other.c:483 objects/Jackson/domain.c:539 +#: objects/KAOS/goal.c:587 objects/KAOS/other.c:535 +#: objects/standard/line.c:232 +msgid "Add connection point" +msgstr "" + +#: objects/GRAFCET/vergent.c:406 +msgid "Delete connection point" +msgstr "" + +#: objects/GRAFCET/vergent.c:410 +msgid "GRAFCET OR/AND vergent" +msgstr "" + +#: objects/Misc/analog_clock.c:136 +#, fuzzy +msgid "Arrow color" +msgstr "¼± »ö»ó:" + +#: objects/Misc/analog_clock.c:138 +#, fuzzy +msgid "Arrow line width" +msgstr "¼± ±½±â" + +#: objects/Misc/analog_clock.c:140 +#, fuzzy +msgid "Seconds arrow color" +msgstr "¼± ±½±â" + +#: objects/Misc/analog_clock.c:142 +#, fuzzy +msgid "Seconds arrow line width" +msgstr "¼± ±½±â" + +#: objects/Misc/analog_clock.c:144 +#, fuzzy +msgid "Show hours" +msgstr "´«±ÝÀÚ º¸±â(_R)" + +#: objects/Misc/libmisc.c:38 +msgid "Miscellaneous objects" +msgstr "" + +#. property rows +#: objects/SADT/arrow.c:134 objects/UML/class.c:143 +#: objects/UML/class_dialog.c:357 +msgid "Normal" +msgstr "" + +#: objects/SADT/arrow.c:135 +msgid "Import resource (not shown upstairs)" +msgstr "" + +#: objects/SADT/arrow.c:136 +msgid "Imply resource (not shown downstairs)" +msgstr "" + +#: objects/SADT/arrow.c:137 +msgid "Dotted arrow" +msgstr "" + +#: objects/SADT/arrow.c:138 +msgid "disable arrow heads" +msgstr "" + +#: objects/SADT/arrow.c:144 +msgid "Flow style:" +msgstr "" + +#: objects/SADT/arrow.c:146 +msgid "Automatically gray vertical flows:" +msgstr "" + +#: objects/SADT/arrow.c:147 +msgid "" +"To improve the ease of reading, flows which begin and end vertically can be " +"rendered gray" +msgstr "" + +#: objects/SADT/arrow.c:468 +msgid "SADT Arrow" +msgstr "" + +#: objects/SADT/box.c:137 objects/flowchart/box.c:150 +#: objects/flowchart/diamond.c:148 objects/flowchart/ellipse.c:147 +#: objects/flowchart/parallelogram.c:152 +msgid "Text padding" +msgstr "¹®ÀÚ¿­ ä¿ò" + +#: objects/SADT/box.c:144 +msgid "Activity/Data identifier" +msgstr "" + +#: objects/SADT/box.c:145 +msgid "The identifier which appears in the lower right corner of the Box" +msgstr "" + +#: objects/SADT/box.c:446 objects/Istar/other.c:484 +#: objects/Jackson/domain.c:540 objects/KAOS/goal.c:588 +#: objects/KAOS/other.c:536 objects/standard/line.c:233 +msgid "Delete connection point" +msgstr "" + +#: objects/SADT/box.c:451 +msgid "SADT box" +msgstr "" + +#: objects/SADT/sadt.c:41 +msgid "SADT diagram objects" +msgstr "" + +#: objects/UML/actor.c:359 objects/UML/actor.c:361 sheets/UML.sheet.in.h:2 +msgid "Actor" +msgstr "" + +#: objects/UML/association.c:1139 objects/UML/class_dialog.c:2107 +msgid "Direction:" +msgstr "" + +#: objects/UML/association.c:1156 +msgid "From A to B" +msgstr "A¿¡¼­ B·Î" + +#: objects/UML/association.c:1163 +msgid "From B to A" +msgstr "B¿¡¼­ A·Î" + +#: objects/UML/association.c:1187 objects/UML/association.c:1195 +msgid "Side A" +msgstr "A ÂÊ" + +#: objects/UML/association.c:1189 objects/UML/association.c:1197 +msgid "Side B" +msgstr "B ÂÊ" + +#: objects/UML/association.c:1203 +msgid "Role:" +msgstr "¿ªÇÒ:" + +#: objects/UML/association.c:1215 +msgid "Multiplicity:" +msgstr "Áߺ¹" + +#. Show arrow: +#: objects/UML/association.c:1226 +msgid "Show arrow" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#. Aggregate +#: objects/UML/association.c:1232 +msgid "Aggregate" +msgstr "ÁýÇÕ" + +#. Composition +#: objects/UML/association.c:1240 +msgid "Composition" +msgstr "ÇÕ¼º" + +#: objects/UML/class.c:110 objects/UML/class.c:247 objects/UML/class.c:1163 +#: sheets/UML.sheet.in.h:6 +msgid "Class" +msgstr "Ŭ·¡½º" + +#: objects/UML/class.c:114 objects/UML/classicon.c:135 +#: objects/UML/component.c:122 objects/UML/large_package.c:126 +#: objects/UML/object.c:147 objects/UML/small_package.c:122 +msgid "Stereotype" +msgstr "" + +#: objects/UML/class.c:116 objects/UML/class.c:153 +#: objects/UML/class_dialog.c:382 sheets/SDL.sheet.in.h:3 +#, fuzzy +msgid "Comment" +msgstr "Áß°£" + +#: objects/UML/class.c:118 objects/UML/class.c:147 +#: objects/UML/class_dialog.c:296 objects/UML/class_dialog.c:367 +#: objects/UML/class_dialog.c:1933 +msgid "Abstract" +msgstr "" + +#: objects/UML/class.c:120 +#, fuzzy +msgid "Template" +msgstr "ÅÛÇø´" + +#: objects/UML/class.c:123 objects/UML/class_dialog.c:305 +msgid "Suppress Attributes" +msgstr "¼Ó¼º ¾Èº¸ÀÓ" + +#: objects/UML/class.c:125 +#, fuzzy +msgid "Suppress Operations" +msgstr "ÀÛµ¿ ¾Èº¸ÀÓ" + +#: objects/UML/class.c:127 +#, fuzzy +msgid "Visible Attributes" +msgstr "¼Ó¼º" + +#: objects/UML/class.c:129 +#, fuzzy +msgid "Visible Operations" +msgstr "ÀÛµ¿" + +#: objects/UML/class.c:131 +#, fuzzy +msgid "Visible Comments" +msgstr "ÀÛµ¿" + +#: objects/UML/class.c:133 objects/UML/class_dialog.c:321 +#, fuzzy +msgid "Wrap Operations" +msgstr "ÀÛµ¿" + +#: objects/UML/class.c:135 +msgid "Wrap after char" +msgstr "" + +#: objects/UML/class.c:145 objects/UML/class_dialog.c:362 +msgid "Polymorphic" +msgstr "" + +#: objects/UML/class.c:149 +#, fuzzy +msgid "Classname" +msgstr "Ŭ·¡½ºÀ̸§:" + +#: objects/UML/class.c:151 +#, fuzzy +msgid "Abstract Classname" +msgstr "Ãß»ó" + +#: objects/UML/class.c:157 objects/UML/class.c:159 objects/UML/class.c:161 +#: objects/UML/class.c:163 objects/UML/class.c:165 objects/UML/class.c:167 +msgid " " +msgstr "" + +#: objects/UML/class.c:172 objects/UML/object.c:151 +msgid "Attributes" +msgstr "¼Ó¼º" + +#: objects/UML/class.c:174 +msgid "Operations" +msgstr "ÀÛµ¿" + +#: objects/UML/class.c:241 +#, fuzzy +msgid "Show Comments" +msgstr "Áß°£" + +#. Class page: +#: objects/UML/class_dialog.c:265 +#, fuzzy +msgid "_Class" +msgstr "Ŭ·¡½º" + +#: objects/UML/class_dialog.c:273 +msgid "Class name:" +msgstr "Ŭ·¡½ºÀ̸§:" + +#: objects/UML/class_dialog.c:281 objects/UML/class_dialog.c:1856 +#: objects/UML/dependency.c:135 objects/UML/generalization.c:131 +#: objects/UML/realizes.c:132 +msgid "Stereotype:" +msgstr "" + +#: objects/UML/class_dialog.c:288 objects/UML/class_dialog.c:933 +#: objects/UML/class_dialog.c:1974 objects/UML/class_dialog.c:2096 +#, fuzzy +msgid "Comment:" +msgstr "Áß°£" + +#: objects/UML/class_dialog.c:302 +msgid "Attributes visible" +msgstr "¼Ó¼º º¸ÀÓ" + +#: objects/UML/class_dialog.c:311 +msgid "Operations visible" +msgstr "ÀÛµ¿ º¸ÀÓ" + +#: objects/UML/class_dialog.c:314 +msgid "Suppress operations" +msgstr "ÀÛµ¿ ¾Èº¸ÀÓ" + +#: objects/UML/class_dialog.c:328 +msgid "Wrap after this length: " +msgstr "" + +#: objects/UML/class_dialog.c:335 +#, fuzzy +msgid "Comments visible" +msgstr "ÀÛµ¿ º¸ÀÓ" + +#. head line +#: objects/UML/class_dialog.c:348 +msgid "Kind" +msgstr "" + +#: objects/UML/class_dialog.c:353 +#, fuzzy +msgid "Size" +msgstr "B ÂÊ" + +#: objects/UML/class_dialog.c:372 +#, fuzzy +msgid "Class Name" +msgstr "Ŭ·¡½ºÀ̸§:" + +#: objects/UML/class_dialog.c:377 +#, fuzzy +msgid "Abstract Class" +msgstr "Ãß»ó" + +#. should probably be refactored too. +#: objects/UML/class_dialog.c:394 +#, fuzzy +msgid "Text Color" +msgstr "»ö»ó ¼±ÅÃ" + +#: objects/UML/class_dialog.c:402 +#, fuzzy +msgid "Foreground Color" +msgstr "Àü°æ »ö»ó:" + +#: objects/UML/class_dialog.c:410 +#, fuzzy +msgid "Background Color" +msgstr "¹è°æ »ö»ó:" + +#. Attributes page: +#: objects/UML/class_dialog.c:833 +#, fuzzy +msgid "_Attributes" +msgstr "¼Ó¼º" + +#: objects/UML/class_dialog.c:861 objects/UML/class_dialog.c:1792 +#: objects/UML/class_dialog.c:2020 objects/UML/class_dialog.c:2540 +#, fuzzy +msgid "_New" +msgstr "/º¸±â(_V)" + +#: objects/UML/class_dialog.c:867 objects/UML/class_dialog.c:1798 +#: objects/UML/class_dialog.c:2027 objects/UML/class_dialog.c:2546 +msgid "_Delete" +msgstr "" + +#: objects/UML/class_dialog.c:873 objects/UML/class_dialog.c:1804 +#: objects/UML/class_dialog.c:2034 objects/UML/class_dialog.c:2552 +msgid "Move up" +msgstr "À§·Î À̵¿" + +#: objects/UML/class_dialog.c:879 objects/UML/class_dialog.c:1810 +#: objects/UML/class_dialog.c:2041 objects/UML/class_dialog.c:2558 +msgid "Move down" +msgstr "¾Æ·¡·Î À̵¿" + +#: objects/UML/class_dialog.c:890 +msgid "Attribute data" +msgstr "¼Ó¼º µ¥ÀÌÅÍ" + +#: objects/UML/class_dialog.c:922 +msgid "Value:" +msgstr "°ª:" + +#: objects/UML/class_dialog.c:945 objects/UML/class_dialog.c:1869 +msgid "Visibility:" +msgstr "Ç¥½Ã:" + +#: objects/UML/class_dialog.c:954 objects/UML/class_dialog.c:1878 +msgid "Public" +msgstr "°ø¿ë" + +#: objects/UML/class_dialog.c:962 objects/UML/class_dialog.c:1886 +msgid "Private" +msgstr "" + +#: objects/UML/class_dialog.c:970 objects/UML/class_dialog.c:1894 +msgid "Protected" +msgstr "" + +#: objects/UML/class_dialog.c:978 objects/UML/class_dialog.c:1902 +msgid "Implementation" +msgstr "" + +#: objects/UML/class_dialog.c:999 objects/UML/class_dialog.c:1918 +msgid "Class scope" +msgstr "" + +#. Operations page: +#: objects/UML/class_dialog.c:1764 +#, fuzzy +msgid "_Operations" +msgstr "ÀÛµ¿" + +#: objects/UML/class_dialog.c:1822 +msgid "Operation data" +msgstr "ÀÛµ¿ µ¥ÀÌÅÍ" + +#: objects/UML/class_dialog.c:1924 +msgid "Inheritance type:" +msgstr "" + +#: objects/UML/class_dialog.c:1941 +msgid "Polymorphic (virtual)" +msgstr "" + +#: objects/UML/class_dialog.c:1949 +msgid "Leaf (final)" +msgstr "" + +#: objects/UML/class_dialog.c:1967 +msgid "Query" +msgstr "" + +#: objects/UML/class_dialog.c:1991 +msgid "Parameters:" +msgstr "ÀÎÀÚ:" + +#: objects/UML/class_dialog.c:2053 +msgid "Parameter data" +msgstr "ÀÎÀÚ µ¥ÀÌÅÍ" + +#: objects/UML/class_dialog.c:2085 +msgid "Def. value:" +msgstr "±âº» °ª:" + +#: objects/UML/class_dialog.c:2116 +#, fuzzy +msgid "Undefined" +msgstr "¼±" + +#: objects/UML/class_dialog.c:2124 +msgid "In" +msgstr "" + +#: objects/UML/class_dialog.c:2133 +msgid "Out" +msgstr "" + +#: objects/UML/class_dialog.c:2142 +msgid "In & Out" +msgstr "" + +#. Templates page: +#: objects/UML/class_dialog.c:2506 +#, fuzzy +msgid "_Templates" +msgstr "ÅÛÇø´" + +#: objects/UML/class_dialog.c:2512 sheets/UML.sheet.in.h:29 +msgid "Template class" +msgstr "ÅÛÇø´ Ŭ·¡½º" + +#: objects/UML/class_dialog.c:2569 +msgid "Formal parameter data" +msgstr "" + +#: objects/UML/classicon.c:125 +msgid "Boundary" +msgstr "" + +#: objects/UML/classicon.c:137 +#, fuzzy +msgid "Is object" +msgstr "°´Ã¼ È°¼º" + +#: objects/UML/component_feature.c:144 sheets/UML.sheet.in.h:14 +msgid "Facet" +msgstr "" + +#: objects/UML/component_feature.c:145 sheets/UML.sheet.in.h:26 +msgid "Receptacle" +msgstr "" + +#: objects/UML/component_feature.c:146 sheets/UML.sheet.in.h:13 +msgid "Event Source" +msgstr "" + +#: objects/UML/component_feature.c:147 sheets/UML.sheet.in.h:12 +msgid "Event Sink" +msgstr "" + +#: objects/UML/constraint.c:125 +msgid "Constraint:" +msgstr "" + +#: objects/UML/dependency.c:137 +msgid "Show arrow:" +msgstr "" + +#: objects/UML/implements.c:128 +msgid "Interface:" +msgstr "" + +#: objects/UML/lifeline.c:144 +msgid "Draw focus of control:" +msgstr "" + +#: objects/UML/lifeline.c:146 +msgid "Draw destruction mark:" +msgstr "" + +#: objects/UML/lifeline.c:417 +#, fuzzy +msgid "Add connection points" +msgstr "¿¬°áÁ¡:" + +#: objects/UML/lifeline.c:418 +#, fuzzy +msgid "Remove connection points" +msgstr "¿¬°áÁ¡ º¸±â(_C)" + +#: objects/UML/lifeline.c:422 +msgid "UML Lifeline" +msgstr "" + +#: objects/UML/message.c:135 +msgid "Call" +msgstr "È£Ãâ" + +#: objects/UML/message.c:136 +msgid "Create" +msgstr "»ý¼º" + +#: objects/UML/message.c:137 +msgid "Destroy" +msgstr "¼Ò¸ê" + +#: objects/UML/message.c:138 +msgid "Simple" +msgstr "" + +#: objects/UML/message.c:139 +msgid "Return" +msgstr "" + +#: objects/UML/message.c:140 +msgid "Send" +msgstr "" + +#: objects/UML/message.c:141 +msgid "Recursive" +msgstr "" + +#: objects/UML/message.c:150 objects/Jackson/phenomenon.c:142 +msgid "Message:" +msgstr "" + +#: objects/UML/message.c:152 +msgid "Message type:" +msgstr "" + +#: objects/UML/object.c:149 +msgid "Explicit state" +msgstr "" + +#: objects/UML/object.c:154 +msgid "Active object" +msgstr "°´Ã¼ È°¼º" + +#: objects/UML/object.c:156 +msgid "Show attributes" +msgstr "" + +#: objects/UML/object.c:158 +#, fuzzy +msgid "Multiple instance" +msgstr "Áߺ¹" + +#. Would like to create a state_term instead, but making the connections +#. * is a pain +#: objects/UML/state.c:410 +msgid "" +"This diagram uses the State object for initial/final states.\n" +"That option will go away in future versions.\n" +"Please use the Initial/Final State object instead\n" +msgstr "" + +#: objects/UML/state_term.c:118 +msgid "Is final" +msgstr "" + +#: objects/UML/uml.c:64 +msgid "Unified Modelling Language diagram objects" +msgstr "" + +#: objects/UML/usecase.c:130 +msgid "Text outside" +msgstr "" + +#: objects/UML/usecase.c:132 +msgid "Collaboration" +msgstr "" + +#: objects/bondgraph/bondgraph.c:39 +#, fuzzy +msgid "Bond graph objects" +msgstr "Ç¥ÁØ °´Ã¼" + +#: objects/chronogram/chronogram.c:40 +msgid "Chronogram diagram objects" +msgstr "" + +#: objects/chronogram/chronoline.c:148 +msgid "Data" +msgstr "" + +#: objects/chronogram/chronoline.c:150 +#, fuzzy +msgid "Data name" +msgstr "Ŭ·¡½ºÀ̸§:" + +#: objects/chronogram/chronoline.c:152 +msgid "Events" +msgstr "" + +#: objects/chronogram/chronoline.c:155 +msgid "Event specification" +msgstr "" + +#: objects/chronogram/chronoline.c:156 +msgid "" +"@ time set the pointer at an absolute time.\n" +"( duration sets the signal up, then wait 'duration'.\n" +") duration sets the signal down, then wait 'duration'.\n" +"u duration sets the signal to \"unknown\" state, then wait 'duration'.\n" +"example : @ 1.0 (2.0)1.0(2.0)\n" +msgstr "" + +#: objects/chronogram/chronoline.c:162 +msgid "Parameters" +msgstr "" + +#: objects/chronogram/chronoline.c:164 objects/chronogram/chronoref.c:146 +#, fuzzy +msgid "Start time" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#: objects/chronogram/chronoline.c:166 objects/chronogram/chronoref.c:148 +msgid "End time" +msgstr "" + +#: objects/chronogram/chronoline.c:168 +#, fuzzy +msgid "Rise time" +msgstr "°èÃþ ¿Ã¸²" + +#: objects/chronogram/chronoline.c:170 +msgid "Fall time" +msgstr "" + +#: objects/chronogram/chronoline.c:171 +msgid "Multi-bit data" +msgstr "" + +#: objects/chronogram/chronoline.c:173 objects/chronogram/chronoref.c:156 +msgid "Aspect" +msgstr "" + +#: objects/chronogram/chronoline.c:175 +#, fuzzy +msgid "Data color" +msgstr "»ö»ó ¼±ÅÃ" + +#: objects/chronogram/chronoline.c:177 +#, fuzzy +msgid "Data line width" +msgstr "¼± ±½±â" + +#: objects/chronogram/chronoref.c:144 +#, fuzzy +msgid "Time data" +msgstr "¼Ó¼º µ¥ÀÌÅÍ" + +#: objects/chronogram/chronoref.c:150 +msgid "Major time step" +msgstr "" + +#: objects/chronogram/chronoref.c:152 +msgid "Minor time step" +msgstr "" + +#: objects/chronogram/chronoref.c:162 +msgid "Minor step line width" +msgstr "" + +#: objects/custom/custom.c:117 +msgid "Custom" +msgstr "" + +#: objects/custom/custom.c:117 +msgid "Custom XML shapes loader" +msgstr "" + +#: objects/custom/custom_object.c:178 objects/custom/custom_object.c:197 +msgid "Flip horizontal" +msgstr "À§¾Æ·¡ ¹Ù²Þ" + +#: objects/custom/custom_object.c:180 objects/custom/custom_object.c:199 +msgid "Flip vertical" +msgstr "ÁÂ¿ì ¹Ù²Þ" + +#: objects/custom/custom_object.c:1431 +msgid "Flip Horizontal" +msgstr "À§¾Æ·¡ ¹Ù²Þ" + +#: objects/custom/custom_object.c:1432 +msgid "Flip Vertical" +msgstr "ÁÂ¿ì ¹Ù²Þ" + +#: objects/custom/custom_object.c:1469 +#, c-format +msgid "Cannot open icon file %s for object type '%s'." +msgstr "" + +#: objects/flowchart/box.c:148 objects/standard/box.c:148 +#: objects/standard/polyline.c:130 objects/standard/zigzagline.c:129 +msgid "Corner radius" +msgstr "°¡ÀåÀÚ¸® °¢µµ" + +#: objects/flowchart/flowchart.c:37 +msgid "Flowchart objects" +msgstr "È帧µµ °´Ã¼" + +#: objects/flowchart/parallelogram.c:150 +#, fuzzy +msgid "Shear angle" +msgstr "°¢µµ º¯°æ:" + +#: objects/Istar/actor.c:66 objects/Istar/link.c:155 +#, fuzzy +msgid "Unspecified" +msgstr "¼±" + +#: objects/Istar/actor.c:67 objects/KAOS/other.c:75 +#, fuzzy +msgid "Agent" +msgstr "Á¤·Ä:" + +#: objects/Istar/actor.c:69 +#, fuzzy +msgid "Role" +msgstr "¿ªÇÒ:" + +#: objects/Istar/goal.c:74 objects/KAOS/goal.c:79 +msgid "Softgoal" +msgstr "" + +#: objects/Istar/goal.c:75 objects/KAOS/goal.c:80 +msgid "Goal" +msgstr "" + +#: objects/Istar/goal.c:155 objects/Istar/goal.c:156 objects/KAOS/goal.c:159 +#: objects/KAOS/goal.c:160 +#, fuzzy +msgid "Goal Type" +msgstr "/¼±ÅÃ/°°Àº ÇüÅÂ" + +#: objects/Istar/istar.c:46 +#, fuzzy +msgid "Istar diagram" +msgstr "µµÇ¥ Àμâ" + +#: objects/Istar/link.c:156 +#, fuzzy +msgid "Positive Contrib" +msgstr "ÇÕ¼º" + +#: objects/Istar/link.c:157 +#, fuzzy +msgid "Negative contrib" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: objects/Istar/link.c:158 sheets/UML.sheet.in.h:10 +#, fuzzy +msgid "Dependency" +msgstr "º£Áö¾î¼± ¸¸µê" + +#: objects/Istar/link.c:159 +#, fuzzy +msgid "Decomposition" +msgstr "ÇÕ¼º" + +#: objects/Istar/link.c:160 +msgid "Means-Ends" +msgstr "" + +#: objects/Istar/other.c:73 +#, fuzzy +msgid "Resource" +msgstr "»èÁ¦" + +#: objects/Istar/other.c:74 +msgid "Task" +msgstr "" + +#: objects/Istar/other.c:489 +msgid "i* other" +msgstr "" + +#: objects/Jackson/domain.c:79 +msgid "Given Domain" +msgstr "" + +#: objects/Jackson/domain.c:80 +msgid "Designed Domain" +msgstr "" + +#: objects/Jackson/domain.c:81 +msgid "Machine Domain" +msgstr "" + +#: objects/Jackson/domain.c:97 +#, fuzzy +msgid "Causal" +msgstr "È£Ãâ" + +#: objects/Jackson/domain.c:98 +#, fuzzy +msgid "Biddable" +msgstr "ÆÄÀÏ" + +#: objects/Jackson/domain.c:99 +msgid "Lexical" +msgstr "" + +#: objects/Jackson/domain.c:178 objects/Jackson/domain.c:179 +msgid "Domain Type" +msgstr "" + +#: objects/Jackson/domain.c:183 +msgid "Domain Kind" +msgstr "" + +#: objects/Jackson/domain.c:184 +msgid "Optional kind which appears in the lower right corner of the Domain" +msgstr "" + +#: objects/Jackson/domain.c:545 +msgid "Jackson domain" +msgstr "" + +#: objects/Jackson/jackson.c:47 +#, fuzzy +msgid "Jackson diagram" +msgstr "»õ µµÇ¥¸¦ ¸¸µì´Ï´Ù" + +#: objects/Jackson/phenomenon.c:134 +#, fuzzy +msgid "Shared" +msgstr "±¥¼±" + +#: objects/Jackson/phenomenon.c:135 objects/KAOS/goal.c:81 +msgid "Requirement" +msgstr "" + +#: objects/KAOS/goal.c:82 +msgid "Assumption" +msgstr "" + +#: objects/KAOS/goal.c:83 +msgid "Obstacle" +msgstr "" + +#: objects/KAOS/goal.c:593 +msgid "KAOS goal" +msgstr "" + +#: objects/KAOS/kaos.c:49 +#, fuzzy +msgid "KAOS diagram" +msgstr "»õ µµÇ¥(_N)" + +#: objects/KAOS/metaandorrel.c:151 +#, fuzzy +msgid "AND Refinement" +msgstr "¼±" + +#: objects/KAOS/metaandorrel.c:152 +msgid "Complete AND Refinement" +msgstr "" + +#: objects/KAOS/metaandorrel.c:153 +#, fuzzy +msgid "OR Refinement" +msgstr "¼±" + +#: objects/KAOS/metaandorrel.c:154 +#, fuzzy +msgid "Operationalization" +msgstr "ÀÛµ¿ µ¥ÀÌÅÍ" + +#: objects/KAOS/metaandorrel.c:161 +#, fuzzy +msgid "Text:" +msgstr "¹®ÀÚ¿­" + +#: objects/KAOS/metabinrel.c:165 +#, fuzzy +msgid "Contributes" +msgstr "µµ¿ÍÁֽźеé:" + +#: objects/KAOS/metabinrel.c:166 +#, fuzzy +msgid "Obstructs" +msgstr "Ãß»ó" + +#: objects/KAOS/metabinrel.c:167 +msgid "Conflicts" +msgstr "" + +#: objects/KAOS/metabinrel.c:168 +#, fuzzy +msgid "Responsibility" +msgstr "Ç¥½Ã:" + +#: objects/KAOS/metabinrel.c:169 +#, fuzzy +msgid "Monitors" +msgstr "ÇÕ¼º" + +#: objects/KAOS/metabinrel.c:170 +#, fuzzy +msgid "Controls" +msgstr "»ÏÁ·ÇÑ Á¶Àý" + +#: objects/KAOS/metabinrel.c:171 +msgid "CapableOf" +msgstr "" + +#: objects/KAOS/metabinrel.c:172 +msgid "Performs" +msgstr "" + +#: objects/KAOS/metabinrel.c:174 +msgid "Output" +msgstr "" + +#: objects/KAOS/other.c:541 +#, fuzzy +msgid "KAOS other" +msgstr "°¡ÀåÀÚ¸® ´õÇϱâ" + +#: objects/network/basestation.c:133 +#, fuzzy +msgid "Sectors" +msgstr "»ö»ó ¼±ÅÃ" + +#: objects/network/basestation.c:388 objects/network/basestation.c:390 +#, fuzzy +msgid "Base Station" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#: objects/network/bus.c:599 +msgid "Add Handle" +msgstr "ÇÚµé Ãß°¡" + +#: objects/network/bus.c:600 +msgid "Delete Handle" +msgstr "ÇÚµé Á¦°Å" + +#: objects/network/network.c:43 +msgid "Network diagram objects" +msgstr "³×Æ®¿÷ µµÇ¥ °´Ã¼" + +#: objects/network/radiocell.c:130 +msgid "Macro Cell" +msgstr "" + +#: objects/network/radiocell.c:131 +msgid "Micro Cell" +msgstr "" + +#: objects/network/radiocell.c:132 +msgid "Pico Cell" +msgstr "" + +#: objects/network/radiocell.c:138 +msgid "Radius" +msgstr "" + +#: objects/network/radiocell.c:140 +#, fuzzy +msgid "Cell Type:" +msgstr "Çü½Ä:" + +#: objects/network/radiocell.c:152 +msgid "Subscribers" +msgstr "" + +#: objects/standard/arc.c:131 +msgid "Curve distance" +msgstr "" + +#: objects/standard/bezier.c:545 objects/standard/beziergon.c:499 +msgid "Add Segment" +msgstr "ºÎºÐ Ãß°¡" + +#: objects/standard/bezier.c:546 objects/standard/beziergon.c:500 +msgid "Delete Segment" +msgstr "ºÎºÐ Á¦°Å" + +#: objects/standard/bezier.c:548 objects/standard/beziergon.c:502 +msgid "Symmetric control" +msgstr "´ëĪ Á¶Àý" + +#: objects/standard/bezier.c:550 objects/standard/beziergon.c:504 +msgid "Smooth control" +msgstr "ºÎµå·¯¿î Á¶Àý" + +#: objects/standard/bezier.c:552 objects/standard/beziergon.c:506 +msgid "Cusp control" +msgstr "»ÏÁ·ÇÑ Á¶Àý" + +#: objects/standard/box.c:135 objects/standard/ellipse.c:131 +#, fuzzy +msgid "Free" +msgstr "»ý¼º" + +#: objects/standard/box.c:136 objects/standard/ellipse.c:132 +#, fuzzy +msgid "Fixed" +msgstr "ÆÄÀÏ" + +#: objects/standard/box.c:137 objects/standard/box.c:687 +msgid "Square" +msgstr "" + +#: objects/standard/box.c:150 objects/standard/ellipse.c:144 +#, fuzzy +msgid "Aspect ratio" +msgstr "ºñÀ² À¯Áö" + +#: objects/standard/box.c:683 objects/standard/ellipse.c:664 +#, fuzzy +msgid "Free aspect" +msgstr "ºñÀ² À¯Áö" + +#: objects/standard/box.c:685 objects/standard/ellipse.c:666 +msgid "Fixed aspect" +msgstr "" + +#: objects/standard/ellipse.c:133 objects/standard/ellipse.c:668 +#, fuzzy +msgid "Circle" +msgstr "ÇÁ¸°ÅÍ" + +#: objects/standard/image.c:130 +msgid "Image file" +msgstr "±×¸² ÆÄÀÏ" + +#: objects/standard/image.c:132 +msgid "Draw border" +msgstr "°¡ÀåÀÚ¸® ±×¸²" + +#: objects/standard/image.c:134 +msgid "Keep aspect ratio" +msgstr "ºñÀ² À¯Áö" + +#. Found file in same dir as diagram. +#. Found file in current dir. +#: objects/standard/image.c:669 objects/standard/image.c:680 +#, c-format +msgid "" +"The image file '%s' was not found in that directory.\n" +"Using the file '%s' instead\n" +msgstr "" +"±× ÀÚ·á¹æ¿¡ ±×¸²ÆÄÀÏ '%s'¸¦ ãÀ»¼ö ¾ø½À´Ï´Ù.\n" +"´ë½Å¿¡ '%s'ÆÄÀÏÀ» ÀÌ¿ëÇÕ´Ï´Ù\n" + +#. Didn't find file in current dir. +#: objects/standard/image.c:686 objects/standard/image.c:714 +#, c-format +msgid "The image file '%s' was not found.\n" +msgstr "±×¸² ÆÄÀÏ '%s' ¸¦ ãÀ»¼ö ¾ø½À´Ï´Ù.\n" + +#: objects/standard/line.c:135 +#, fuzzy +msgid "Arrows" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#: objects/standard/line.c:140 +msgid "Start point" +msgstr "" + +#: objects/standard/line.c:142 +msgid "End point" +msgstr "" + +#: objects/standard/polygon.c:460 objects/standard/polyline.c:515 +msgid "Add Corner" +msgstr "°¡ÀåÀÚ¸® ´õÇϱâ" + +#: objects/standard/polygon.c:461 objects/standard/polyline.c:516 +msgid "Delete Corner" +msgstr "°¡ÀåÀÚ¸® Áö¿ì±â" + +#: objects/standard/standard.c:45 +msgid "Standard objects" +msgstr "Ç¥ÁØ °´Ã¼" + +#: plug-ins/cairo/diacairo.c:967 +msgid "Cairo PostScript" +msgstr "" + +#: plug-ins/cairo/diacairo.c:976 +msgid "Cairo PNG" +msgstr "" + +#: plug-ins/cairo/diacairo.c:1004 +msgid "Cairo based Rendering" +msgstr "" + +#: plug-ins/cgm/cgm.c:1109 +msgid "" +"Image row length larger than maximum cell array.\n" +"Image not exported to CGM." +msgstr "" +"±×¸²ÀÇ ±æÀÌ°¡ ÃÖ´ë ¼¿¼ö¸¦ ³Ñ¾î°¬½À´Ï´Ù.\n" +"±×¸²À» CGMÀ¸·Î ÀúÀåÇÒ¼ö ¾ø½À´Ï´Ù." + +#: plug-ins/cgm/cgm.c:1365 +msgid "Computer Graphics Metafile" +msgstr "ÄÄÇ»ÅÍ ±×·¡ÇÈ ¸ÞŸÆÄÀÏ" + +#: plug-ins/cgm/cgm.c:1379 +msgid "Computer Graphics Metafile export filter" +msgstr "ÄÄÇ»ÅÍ ±×·¡ÇÈ ¸ÞŸÆÄÀÏ ÀúÀå ÇÊÅÍ" + +#: plug-ins/dxf/dxf-export.c:547 plug-ins/dxf/dxf-import.c:1379 +msgid "Drawing Interchange File" +msgstr "" + +#: plug-ins/dxf/dxf-import.c:481 plug-ins/dxf/dxf-import.c:498 +msgid "Bad vertex bulge\n" +msgstr "" + +#: plug-ins/dxf/dxf-import.c:1051 +#, fuzzy, c-format +msgid "Scale: %f\n" +msgstr "ºñÀ²:" + +#: plug-ins/dxf/dxf-import.c:1313 +msgid "read_dxf_codes failed\n" +msgstr "" + +#: plug-ins/dxf/dxf-import.c:1345 +#, c-format +msgid "Unknown dxf code %d\n" +msgstr "" + +#: plug-ins/dxf/dxf.c:37 +msgid "Drawing Interchange File import and export filters" +msgstr "" + +#: plug-ins/hpgl/hpgl.c:772 +#, fuzzy +msgid "HP Graphics Language" +msgstr "ÄÄÇ»ÅÍ ±×·¡ÇÈ ¸ÞŸÆÄÀÏ ÀúÀå ÇÊÅÍ" + +#: plug-ins/hpgl/hpgl.c:786 +#, fuzzy +msgid "HP Graphics Language export filter" +msgstr "ÄÄÇ»ÅÍ ±×·¡ÇÈ ¸ÞŸÆÄÀÏ ÀúÀå ÇÊÅÍ" + +#: plug-ins/metapost/metapost.c:34 +#, fuzzy +msgid "TeX Metapost export filter" +msgstr "ÄÄÇ»ÅÍ ±×·¡ÇÈ ¸ÞŸÆÄÀÏ ÀúÀå ÇÊÅÍ" + +#: plug-ins/metapost/render_metapost.c:1033 +msgid "TeX Metapost macros" +msgstr "" + +#: plug-ins/pixbuf/pixbuf.c:95 +#, fuzzy, c-format +msgid "" +"Could not save file:\n" +"%s\n" +"%s" +msgstr "`%s'¸¦ ¿­¼ö ¾ø½À´Ï´Ù" + +#: plug-ins/pixbuf/pixbuf.c:159 +msgid "GdkPixbuf - not antialiased" +msgstr "" + +#: plug-ins/pixbuf/pixbuf.c:169 +msgid "GdkPixbuf bitmap" +msgstr "" + +#: plug-ins/pixbuf/pixbuf.c:189 +msgid "gdk-pixbuf based bitmap export/import" +msgstr "" + +#: plug-ins/pstricks/pstricks.c:14 +#, fuzzy +msgid "TeX Pstricks export filter" +msgstr "ÄÄÇ»ÅÍ ±×·¡ÇÈ ¸ÞŸÆÄÀÏ ÀúÀå ÇÊÅÍ" + +#: plug-ins/pstricks/render_pstricks.c:653 +msgid "Not valid UTF8" +msgstr "" + +#: plug-ins/pstricks/render_pstricks.c:871 +msgid "TeX PSTricks macros" +msgstr "" + +#: plug-ins/python/pydia-render.c:711 plug-ins/wmf/wmf.cpp:1137 +#, c-format +msgid "Couldn't open: '%s' for writing.\n" +msgstr "¿­¼ö ¾÷½À´Ï´Ù: '%s' ¾µ¼ö¾ø½¿.\n" + +#: plug-ins/python/python.c:92 +msgid "Python scripting support" +msgstr "" + +#: plug-ins/shape/shape-export.c:470 +msgid "Can't export png without libart!" +msgstr "" + +#: plug-ins/shape/shape-export.c:497 +msgid "Dia Shape File" +msgstr "" + +#: plug-ins/shape/shape.c:36 +#, fuzzy +msgid "dia shape export filter" +msgstr "ÄÄÇ»ÅÍ ±×·¡ÇÈ ¸ÞŸÆÄÀÏ ÀúÀå ÇÊÅÍ" + +#: plug-ins/svg/render_svg.c:301 plug-ins/svg/svg-import.c:641 +msgid "Scalable Vector Graphics" +msgstr "" + +#: plug-ins/svg/svg-import.c:203 plug-ins/svg/svg-import.c:217 +#: plug-ins/xfig/xfig-import.c:108 plug-ins/xfig/xfig-import.c:159 +#: plug-ins/xfig/xfig-import.c:188 plug-ins/xfig/xfig-import.c:224 +#: plug-ins/xfig/xfig-import.c:261 plug-ins/xfig/xfig-import.c:289 +#: plug-ins/xfig/xfig-import.c:326 plug-ins/xfig/xfig-import.c:362 +#: plug-ins/xfig/xfig-import.c:403 +#, fuzzy +msgid "Can't find standard object" +msgstr "Ç¥ÁØ °´Ã¼" + +#: plug-ins/svg/svg-import.c:229 +msgid "Unexpected SVG path element" +msgstr "" + +#: plug-ins/svg/svg-import.c:287 +msgid "Courier" +msgstr "" + +#: plug-ins/svg/svg-import.c:590 +msgid "Could not find SVG namespace." +msgstr "" + +#: plug-ins/svg/svg-import.c:596 +#, c-format +msgid "root element was '%s' -- expecting 'svg'." +msgstr "" + +#: plug-ins/svg/svg.c:37 +#, fuzzy +msgid "Scalable Vector Graphics import and export filters" +msgstr "ÄÄÇ»ÅÍ ±×·¡ÇÈ ¸ÞŸÆÄÀÏ ÀúÀå ÇÊÅÍ" + +#: plug-ins/wmf/wmf.cpp:1204 +msgid "Windows Meta File" +msgstr "" + +#: plug-ins/wmf/wmf.cpp:1220 +#, fuzzy +msgid "WMF export filter" +msgstr "ÄÄÇ»ÅÍ ±×·¡ÇÈ ¸ÞŸÆÄÀÏ ÀúÀå ÇÊÅÍ" + +#: plug-ins/wpg/wpg.c:1182 +#, c-format +msgid "File: %s type/version unsupported.\n" +msgstr "" + +#: plug-ins/wpg/wpg.c:1312 plug-ins/wpg/wpg.c:1319 +msgid "WPG" +msgstr "" + +#: plug-ins/wpg/wpg.c:1333 +#, fuzzy +msgid "WordPerfect Graphics export filter" +msgstr "ÄÄÇ»ÅÍ ±×·¡ÇÈ ¸ÞŸÆÄÀÏ ÀúÀå ÇÊÅÍ" + +#: plug-ins/xfig/xfig-export.c:454 +#, c-format +msgid "FIG format has no equivalent of arrow style %s, using simple arrow.\n" +msgstr "" + +#: plug-ins/xfig/xfig-export.c:475 +msgid "No more user-definable colors - using black" +msgstr "" + +#: plug-ins/xfig/xfig-export.c:1149 +msgid "XFig format" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:472 +msgid "Patterns are not supported by Dia" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:500 +msgid "Triple-dotted lines are not supported by Dia, using double-dotted" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:504 +#, fuzzy, c-format +msgid "Line style %d should not appear\n" +msgstr "¼± ÇüÅ ¼Ó¼º" + +#: plug-ins/xfig/xfig-import.c:573 +#, c-format +msgid "Error while reading %dth of %d points: %s\n" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:595 +#, fuzzy +msgid "Error while reading arrowhead\n" +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: plug-ins/xfig/xfig-import.c:615 +#, c-format +msgid "Unknown arrow type %d\n" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:731 +#, fuzzy, c-format +msgid "Couldn't read ellipse info: %s\n" +msgstr "" +"Ç÷¯±×ÀÎ `%s'¸¦ ÀÐÀ»¼ö ¾ø½À´Ï´Ù\n" +"%s" + +#: plug-ins/xfig/xfig-import.c:799 +#, fuzzy, c-format +msgid "Couldn't read polyline info: %s\n" +msgstr "" +"Ç÷¯±×ÀÎ `%s'¸¦ ÀÐÀ»¼ö ¾ø½À´Ï´Ù\n" +"%s" + +#: plug-ins/xfig/xfig-import.c:814 +#, c-format +msgid "Couldn't read flipped bit: %s\n" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:832 +msgid "Negative corner radius, negating" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:876 plug-ins/xfig/xfig-import.c:1192 +#, c-format +msgid "Unknown polyline subtype: %d\n" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:1026 plug-ins/xfig/xfig-import.c:1058 +#, fuzzy, c-format +msgid "Couldn't read spline info: %s\n" +msgstr "" +"Ç÷¯±×ÀÎ `%s'¸¦ ÀÐÀ»¼ö ¾ø½À´Ï´Ù\n" +"%s" + +#. Open approximated spline +#. Closed approximated spline +#: plug-ins/xfig/xfig-import.c:1045 plug-ins/xfig/xfig-import.c:1063 +msgid "Cannot convert approximated spline yet." +msgstr "" + +#: plug-ins/xfig/xfig-import.c:1103 +#, c-format +msgid "Unknown spline subtype: %d\n" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:1166 +#, fuzzy, c-format +msgid "Couldn't read arc info: %s\n" +msgstr "" +"Ç÷¯±×ÀÎ `%s'¸¦ ÀÐÀ»¼ö ¾ø½À´Ï´Ù\n" +"%s" + +#: plug-ins/xfig/xfig-import.c:1256 +#, c-format +msgid "Couldn't read text info: %s\n" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:1319 +#, c-format +msgid "Couldn't identify FIG object: %s\n" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:1327 +msgid "Compound end outside compound\n" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:1346 +#, fuzzy, c-format +msgid "Couldn't read color: %s\n" +msgstr "" +"Ç÷¯±×ÀÎ `%s'¸¦ ÀÐÀ»¼ö ¾ø½À´Ï´Ù\n" +"%s" + +#: plug-ins/xfig/xfig-import.c:1391 +#, fuzzy, c-format +msgid "Couldn't read group extend: %s\n" +msgstr "`%s'¸¦ ¿­¼ö ¾ø½À´Ï´Ù" + +#: plug-ins/xfig/xfig-import.c:1402 +#, c-format +msgid "Unknown object type %d\n" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:1426 +#, c-format +msgid "`%s' is not one of `%s' or `%s'\n" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:1436 +#, fuzzy, c-format +msgid "Error reading paper size: %s\n" +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: plug-ins/xfig/xfig-import.c:1447 +#, c-format +msgid "Unknown paper size `%s', using default\n" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:1459 +#, fuzzy, c-format +msgid "Error reading paper orientation: %s\n" +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: plug-ins/xfig/xfig-import.c:1469 +#, fuzzy, c-format +msgid "Error reading justification: %s\n" +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: plug-ins/xfig/xfig-import.c:1479 +#, fuzzy, c-format +msgid "Error reading units: %s\n" +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: plug-ins/xfig/xfig-import.c:1493 +#, fuzzy, c-format +msgid "Error reading magnification: %s\n" +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: plug-ins/xfig/xfig-import.c:1504 +#, fuzzy, c-format +msgid "Error reading multipage indicator: %s\n" +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: plug-ins/xfig/xfig-import.c:1515 +#, fuzzy, c-format +msgid "Error reading transparent color: %s\n" +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: plug-ins/xfig/xfig-import.c:1524 plug-ins/xfig/xfig-import.c:1579 +#: plug-ins/xfig/xfig-import.c:1597 +#, fuzzy, c-format +msgid "Error reading FIG file: %s\n" +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: plug-ins/xfig/xfig-import.c:1526 plug-ins/xfig/xfig-import.c:1581 +msgid "Premature end of FIG file\n" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:1535 +#, fuzzy, c-format +msgid "Error reading resolution: %s\n" +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: plug-ins/xfig/xfig-import.c:1566 +#, c-format +msgid "Doesn't look like a Fig file: %s\n" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:1572 +#, c-format +msgid "This is a FIG version %d.%d file, I may not understand it\n" +msgstr "" + +#: plug-ins/xfig/xfig-import.c:1620 +msgid "XFig File Format" +msgstr "" + +#: plug-ins/xfig/xfig.c:39 +#, fuzzy +msgid "Fig Format import and export filter" +msgstr "ÄÄÇ»ÅÍ ±×·¡ÇÈ ¸ÞŸÆÄÀÏ ÀúÀå ÇÊÅÍ" + +#: plug-ins/xslt/xslt.c:102 +#, fuzzy, c-format +msgid "Error while parsing %s\n" +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: plug-ins/xslt/xslt.c:110 +#, fuzzy, c-format +msgid "Error while parsing stylesheet %s\n" +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: plug-ins/xslt/xslt.c:116 +#, fuzzy, c-format +msgid "Error while applying stylesheet %s\n" +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: plug-ins/xslt/xslt.c:124 +#, fuzzy, c-format +msgid "Error while parsing stylesheet: %s\n" +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: plug-ins/xslt/xslt.c:132 +#, fuzzy, c-format +msgid "Error while applying stylesheet: %s\n" +msgstr "µµÇ¥ ÆÄÀÏ ÀдÂÁß ¿À·ù\n" + +#: plug-ins/xslt/xslt.c:313 +#, fuzzy +msgid "XSL Transformation filter" +msgstr "¹æÇâ" + +#: plug-ins/xslt/xslt.c:337 +msgid "No valid configuration files found for the XSLT plugin, not loading." +msgstr "" + +#: plug-ins/xslt/xsltdialog.c:99 +msgid "Export through XSLT" +msgstr "" + +#: plug-ins/xslt/xsltdialog.c:114 +msgid "From:" +msgstr "" + +#: plug-ins/xslt/xsltdialog.c:149 +#, fuzzy +msgid "To:" +msgstr "À§:" + +#: sheets/Assorted.sheet.in.h:1 +msgid "" +"An Assorted Collection of Polygons, Beziergons and other Miscellaneous " +"Geometric Shapes" +msgstr "" + +#: sheets/Assorted.sheet.in.h:2 +msgid "Assorted" +msgstr "" + +#: sheets/Assorted.sheet.in.h:3 +msgid "Chevron" +msgstr "" + +#: sheets/Assorted.sheet.in.h:4 +msgid "Curved eight point star" +msgstr "" + +#: sheets/Assorted.sheet.in.h:5 +msgid "Curved four point star" +msgstr "" + +#: sheets/Assorted.sheet.in.h:6 +#, fuzzy +msgid "Diamond" +msgstr "À̸§:" + +#: sheets/Assorted.sheet.in.h:7 +#, fuzzy +msgid "Down arrow" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#: sheets/Assorted.sheet.in.h:8 +msgid "Eight point star" +msgstr "" + +#: sheets/Assorted.sheet.in.h:9 +#, fuzzy +msgid "Four point star" +msgstr "ºÎµå·¯¿î Á¶Àý" + +#: sheets/Assorted.sheet.in.h:10 +#, fuzzy +msgid "Heart" +msgstr "»ý¼º" + +#: sheets/Assorted.sheet.in.h:11 +msgid "Heptagon. Seven sided shape" +msgstr "" + +#: sheets/Assorted.sheet.in.h:12 +msgid "Hexagon. Six sided shape" +msgstr "" + +#: sheets/Assorted.sheet.in.h:13 +#, fuzzy +msgid "Horizontal parallelogram" +msgstr "À§¾Æ·¡ ¹Ù²Þ" + +#: sheets/Assorted.sheet.in.h:14 +msgid "Isoceles triangle" +msgstr "" + +#: sheets/Assorted.sheet.in.h:15 +#, fuzzy +msgid "Left arrow" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#: sheets/Assorted.sheet.in.h:16 +#, fuzzy +msgid "Left-right arrow" +msgstr "¿À¸¥ÂÊ" + +#: sheets/Assorted.sheet.in.h:17 +#, fuzzy +msgid "Left-right-up arrow" +msgstr "¿À¸¥ÂÊ" + +#: sheets/Assorted.sheet.in.h:18 +msgid "Left-up arrow" +msgstr "" + +#: sheets/Assorted.sheet.in.h:19 +msgid "Maltese cross" +msgstr "" + +#: sheets/Assorted.sheet.in.h:20 +msgid "Notched left arrow" +msgstr "" + +#: sheets/Assorted.sheet.in.h:21 +#, fuzzy +msgid "Notched right arrow" +msgstr "¿À¸¥ÂÊ" + +#: sheets/Assorted.sheet.in.h:22 +msgid "Octogon. Eight sided shape" +msgstr "" + +#: sheets/Assorted.sheet.in.h:23 +msgid "Pentagon block arrow" +msgstr "" + +#: sheets/Assorted.sheet.in.h:24 +msgid "Pentagon. Five sided shape" +msgstr "" + +#: sheets/Assorted.sheet.in.h:25 +msgid "Perfect circle" +msgstr "" + +#: sheets/Assorted.sheet.in.h:26 +msgid "Perfect square, height equals width" +msgstr "" + +#: sheets/Assorted.sheet.in.h:27 +#, fuzzy +msgid "Quad arrow" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#: sheets/Assorted.sheet.in.h:28 +msgid "Quarter circle" +msgstr "" + +#: sheets/Assorted.sheet.in.h:29 +msgid "Quarter moon" +msgstr "" + +#: sheets/Assorted.sheet.in.h:30 +msgid "Right angle triangle" +msgstr "" + +#: sheets/Assorted.sheet.in.h:31 +#, fuzzy +msgid "Right arrow" +msgstr "¿À¸¥ÂÊ" + +#: sheets/Assorted.sheet.in.h:32 +#, fuzzy +msgid "Seven point star" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/Assorted.sheet.in.h:33 +msgid "Sharp eight point star" +msgstr "" + +#: sheets/Assorted.sheet.in.h:34 +#, fuzzy +msgid "Six point star" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/Assorted.sheet.in.h:35 +msgid "Sun" +msgstr "" + +#: sheets/Assorted.sheet.in.h:36 +#, fuzzy +msgid "Swiss cross" +msgstr "Ŭ·¡½º" + +#: sheets/Assorted.sheet.in.h:37 +msgid "Trapezoid" +msgstr "" + +#: sheets/Assorted.sheet.in.h:38 +#, fuzzy +msgid "Turn-up arrow" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#: sheets/Assorted.sheet.in.h:39 +#, fuzzy +msgid "Up arrow" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#: sheets/Assorted.sheet.in.h:40 +#, fuzzy +msgid "Up-down arrow" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#: sheets/Assorted.sheet.in.h:41 +#, fuzzy +msgid "Up-down-left arrow" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#: sheets/Assorted.sheet.in.h:42 +#, fuzzy +msgid "Vertical parallelogram" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/ChemEng.sheet.in.h:1 +#, fuzzy +msgid "AC Generator" +msgstr "»ç¿ëÀÚ ÀÎÅÍÆäÀ̽º" + +#: sheets/ChemEng.sheet.in.h:2 +#, fuzzy +msgid "Air Cooler" +msgstr "°¡ÀåÀÚ¸® ´õÇϱâ" + +#: sheets/ChemEng.sheet.in.h:3 +msgid "Autoclave" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:4 +msgid "Axial Flow Fan" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:5 +#, fuzzy +msgid "Basic Filter" +msgstr "ÆÄÀÏ" + +#: sheets/ChemEng.sheet.in.h:6 +msgid "Centrifugal Pump or Fan" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:7 +#, fuzzy +msgid "Centrifuge" +msgstr "Áß°£" + +#: sheets/ChemEng.sheet.in.h:8 +#, fuzzy +msgid "ChemEng" +msgstr "Ãë¼Ò" + +#: sheets/ChemEng.sheet.in.h:9 +msgid "Clarifier or Settling Tank" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:10 +msgid "Collection for chemical engineering" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:11 +#, fuzzy +msgid "Compressor or Turbine" +msgstr "ÀúÀåÆÄÀÏ ¾ÐÃà:" + +#: sheets/ChemEng.sheet.in.h:12 +msgid "Covered tank" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:13 +msgid "Cyclone and hydrocyclone" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:14 +msgid "Double-Pipe Exchanger" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:15 +msgid "Ejector or Injector" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:16 +msgid "Fan or Stirrer" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:17 +msgid "Fixed-Sheet Exchanger" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:18 +msgid "Floating-Head or U-Tube Exchanger" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:19 +msgid "Fluid Contacting Vessel, simple" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:20 +msgid "Forced-Flow Air Cooler" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:21 +msgid "Gas Holder, basic" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:22 +msgid "Heating/Cooling Coil" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:23 +msgid "Heating/Cooling Coil, vertical" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:24 +msgid "Induced-Flow Air Cooler" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:25 +msgid "Kettle Reboiler" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:26 +msgid "Knock-out Drum (with demister pad)" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:27 +msgid "Measurement" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:28 +#, fuzzy +msgid "Mixer" +msgstr "Áߺ¹" + +#: sheets/ChemEng.sheet.in.h:29 +msgid "Open Tank" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:30 +msgid "Plate Exchanger" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:31 +#, fuzzy +msgid "Pneumatic Line" +msgstr "¼± ¸¸µê" + +#: sheets/ChemEng.sheet.in.h:32 +msgid "Pneumatic Line, vertical" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:33 +msgid "Positive Displacement Rotary Pump or Compressor" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:34 +msgid "Reactor or Absorption Vessel, simple" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:35 +msgid "Reciprocating Compressor or Pump" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:36 +#, fuzzy +msgid "Regulable Valve" +msgstr "ÅÛÇø´" + +#: sheets/ChemEng.sheet.in.h:37 +msgid "Regulable Valve, vertical" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:38 +msgid "Sealed Tank" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:39 +#, fuzzy +msgid "Simple Furnace" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/ChemEng.sheet.in.h:40 +msgid "Simple Heat Exchanger" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:41 +msgid "Simple Heat Exchanger, vertical" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:42 +msgid "Simple Vessel" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:43 +msgid "Spray Drier" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:44 +msgid "Spraying Device" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:45 +#, fuzzy +msgid "Storage Sphere" +msgstr "ºñÀ²" + +#: sheets/ChemEng.sheet.in.h:46 +msgid "Tank with Fixed Roof" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:47 +msgid "Tank with Floating Roof" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:48 +msgid "Tray Column, detailed" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:49 +msgid "Tray Column, simple" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:50 +#, fuzzy +msgid "Valve" +msgstr "°ª:" + +#: sheets/ChemEng.sheet.in.h:51 +#, fuzzy +msgid "Valve, vertical" +msgstr "ÁÂ¿ì ¹Ù²Þ" + +#: sheets/ChemEng.sheet.in.h:52 +msgid "Water Cooler" +msgstr "" + +#: sheets/ChemEng.sheet.in.h:53 +msgid "Water Cooler, vertical" +msgstr "" + +#: sheets/Circuit.sheet.in.h:1 +msgid "Circuit" +msgstr "" + +#: sheets/Circuit.sheet.in.h:2 +msgid "Components for circuit diagrams" +msgstr "" + +#: sheets/Circuit.sheet.in.h:3 +#, fuzzy +msgid "Ground point" +msgstr "ºÎµå·¯¿î Á¶Àý" + +#: sheets/Circuit.sheet.in.h:4 +#, fuzzy +msgid "Horizontal jumper" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/Circuit.sheet.in.h:5 +#, fuzzy +msgid "Horizontally aligned LED" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/Circuit.sheet.in.h:6 +#, fuzzy +msgid "Horizontally aligned capacitor" +msgstr "À§¾Æ·¡ ¹Ù²Þ" + +#: sheets/Circuit.sheet.in.h:7 +#, fuzzy +msgid "Horizontally aligned diode" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/Circuit.sheet.in.h:8 +#, fuzzy +msgid "Horizontally aligned fuse" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/Circuit.sheet.in.h:9 +#, fuzzy +msgid "Horizontally aligned inductor" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/Circuit.sheet.in.h:10 +#, fuzzy +msgid "Horizontally aligned inductor (European)" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/Circuit.sheet.in.h:11 +#, fuzzy +msgid "Horizontally aligned powersource" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/Circuit.sheet.in.h:12 +#, fuzzy +msgid "Horizontally aligned resistor" +msgstr "À§¾Æ·¡ ¹Ù²Þ" + +#: sheets/Circuit.sheet.in.h:13 +#, fuzzy +msgid "Horizontally aligned resistor (European)" +msgstr "À§¾Æ·¡ ¹Ù²Þ" + +#: sheets/Circuit.sheet.in.h:14 +#, fuzzy +msgid "Horizontally aligned zener diode" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/Circuit.sheet.in.h:15 +#, fuzzy +msgid "Lamp" +msgstr "À̸§:" + +#: sheets/Circuit.sheet.in.h:16 sheets/ciscomisc.sheet.in.h:26 +msgid "Microphone" +msgstr "" + +#: sheets/Circuit.sheet.in.h:17 +#, fuzzy +msgid "NMOS transistor" +msgstr "¹æÇâ" + +#: sheets/Circuit.sheet.in.h:18 +#, fuzzy +msgid "NPN bipolar transistor" +msgstr "¹æÇâ" + +#: sheets/Circuit.sheet.in.h:19 +#, fuzzy +msgid "Operational amplifier" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/Circuit.sheet.in.h:20 +#, fuzzy +msgid "PMOS transistor" +msgstr "¹æÇâ" + +#: sheets/Circuit.sheet.in.h:21 +#, fuzzy +msgid "PNP bipolar transistor" +msgstr "¹æÇâ" + +#: sheets/Circuit.sheet.in.h:22 sheets/ciscomisc.sheet.in.h:39 +msgid "Speaker" +msgstr "" + +#: sheets/Circuit.sheet.in.h:23 +#, fuzzy +msgid "Vertically aligned LED" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/Circuit.sheet.in.h:24 +#, fuzzy +msgid "Vertically aligned capacitor" +msgstr "À§¾Æ·¡ ¹Ù²Þ" + +#: sheets/Circuit.sheet.in.h:25 +#, fuzzy +msgid "Vertically aligned diode" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/Circuit.sheet.in.h:26 +#, fuzzy +msgid "Vertically aligned fuse" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/Circuit.sheet.in.h:27 +#, fuzzy +msgid "Vertically aligned inductor" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/Circuit.sheet.in.h:28 +#, fuzzy +msgid "Vertically aligned inductor (European)" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/Circuit.sheet.in.h:29 +#, fuzzy +msgid "Vertically aligned powersource" +msgstr "À§¾Æ·¡ ¹Ù²Þ" + +#: sheets/Circuit.sheet.in.h:30 +#, fuzzy +msgid "Vertically aligned resistor" +msgstr "À§¾Æ·¡ ¹Ù²Þ" + +#: sheets/Circuit.sheet.in.h:31 +#, fuzzy +msgid "Vertically aligned resistor (European)" +msgstr "À§¾Æ·¡ ¹Ù²Þ" + +#: sheets/Circuit.sheet.in.h:32 +#, fuzzy +msgid "Vertically aligned zener diode" +msgstr "À§¾Æ·¡ ¹Ù²Þ" + +#: sheets/Contact.sheet.in.h:1 +msgid "'if not' (normally closed) ladder contact" +msgstr "" + +#: sheets/Contact.sheet.in.h:2 +msgid "'if' (normally open) ladder contact" +msgstr "" + +#: sheets/Contact.sheet.in.h:3 +#, fuzzy +msgid "'jump' output variable" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/Contact.sheet.in.h:4 +#, fuzzy +msgid "'reset' output variable" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/Contact.sheet.in.h:5 +#, fuzzy +msgid "'set' output variable" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/Contact.sheet.in.h:6 +msgid "Components for LADDER circuits" +msgstr "" + +#: sheets/Contact.sheet.in.h:7 +#, fuzzy +msgid "Ladder" +msgstr "°ÔÃþ" + +#: sheets/Contact.sheet.in.h:8 +#, fuzzy +msgid "Negative output variable" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/Contact.sheet.in.h:9 +#, fuzzy +msgid "Power-saved 'reset' output variable" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/Contact.sheet.in.h:10 +#, fuzzy +msgid "Power-saved 'set' output variable" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/Contact.sheet.in.h:11 +#, fuzzy +msgid "Power-saved negative output variable" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/Contact.sheet.in.h:12 +#, fuzzy +msgid "Power-saved simple output variable" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/Contact.sheet.in.h:13 +#, fuzzy +msgid "Receptivity output variable" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/Contact.sheet.in.h:14 +#, fuzzy +msgid "Simple output variable" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/Cybernetics.sheet.in.h:1 +msgid "Constant factor below -1" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:2 +msgid "Constant factor between 0 and -1" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:3 +msgid "Constant factor between 0 and 1" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:4 +msgid "Constant factor greater 1" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:5 +msgid "Constant negative shift on the y-axis" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:6 +msgid "Constant positive shift on the y-axis" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:7 +msgid "Cybernetics" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:8 +msgid "Elements of cybernetic circuits" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:9 +msgid "Full wave rectifier (absolute value)" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:10 +msgid "Half wave rectifier or ramp input" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:11 +msgid "High pass filter" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:12 +msgid "Integrator - input bottom" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:13 +msgid "Integrator - input left" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:14 +msgid "Integrator - input right" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:15 +msgid "Integrator - input top" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:16 +msgid "Low pass filter" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:17 +msgid "Product" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:18 +msgid "Relay characteristic (sigma)" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:19 +msgid "Saturation characteristic" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:20 +msgid "Sensor - bottom" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:21 +msgid "Sensor - left" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:22 +msgid "Sensor - right" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:23 +msgid "Sensor - top" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:24 +msgid "Sigmoid characteristic" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:25 +msgid "Sine characteristic or input" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:26 +msgid "Sum" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:27 +msgid "Sum, subtracting bottom input" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:28 +msgid "Sum, subtracting left input" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:29 +msgid "Sum, subtracting right input" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:30 +msgid "Sum, subtracting top input" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:31 +msgid "Template for custom characteristics" +msgstr "" + +#: sheets/Cybernetics.sheet.in.h:32 +#, fuzzy +msgid "Time delay" +msgstr "¼Ó¼º µ¥ÀÌÅÍ" + +#: sheets/EML.sheet.in.h:1 +msgid "EML" +msgstr "" + +#: sheets/EML.sheet.in.h:2 +msgid "Editor for EML Static Structure Diagrams." +msgstr "" + +#: sheets/EML.sheet.in.h:3 +msgid "Instantiation. One process create others" +msgstr "" + +#: sheets/EML.sheet.in.h:4 +msgid "Interaction between processes." +msgstr "" + +#: sheets/ER.sheet.in.h:2 +msgid "ER" +msgstr "" + +#: sheets/ER.sheet.in.h:3 +msgid "Editor for Entity Relations Diagrams" +msgstr "" + +#: sheets/ER.sheet.in.h:5 +#, fuzzy +msgid "Participation" +msgstr "¹æÇâ" + +#: sheets/ER.sheet.in.h:7 +msgid "Weak entity" +msgstr "" + +#: sheets/Electric.sheet.in.h:1 +msgid "Components for electric circuits" +msgstr "" + +#: sheets/Electric.sheet.in.h:2 sheets/Pneumatic.sheet.in.h:9 +#, fuzzy +msgid "Connection point" +msgstr "¿¬°áÁ¡:" + +#: sheets/Electric.sheet.in.h:3 +#, fuzzy +msgid "Electric" +msgstr "ÁÂ¿ì ¹Ù²Þ" + +#: sheets/Electric.sheet.in.h:4 +msgid "Lamp or indicator light (horizontal)" +msgstr "" + +#: sheets/Electric.sheet.in.h:5 +msgid "Lamp or indicator light (vertical)" +msgstr "" + +#: sheets/Electric.sheet.in.h:6 +msgid "Normally closed contact (horizontal)" +msgstr "" + +#: sheets/Electric.sheet.in.h:7 +msgid "Normally closed contact (vertical)" +msgstr "" + +#: sheets/Electric.sheet.in.h:8 +msgid "Normally closed position switch (horizontal)" +msgstr "" + +#: sheets/Electric.sheet.in.h:9 +msgid "Normally closed position switch (vertical)" +msgstr "" + +#: sheets/Electric.sheet.in.h:10 +msgid "Normally open contact (horizontal)" +msgstr "" + +#: sheets/Electric.sheet.in.h:11 +msgid "Normally open contact (vertical)" +msgstr "" + +#: sheets/Electric.sheet.in.h:12 +msgid "Normally open position switch (horizontal)" +msgstr "" + +#: sheets/Electric.sheet.in.h:13 +msgid "Normally open position switch (vertical)" +msgstr "" + +#: sheets/Electric.sheet.in.h:14 +#, fuzzy +msgid "Relay (horizontal)" +msgstr "À§¾Æ·¡ ¹Ù²Þ" + +#: sheets/Electric.sheet.in.h:15 +#, fuzzy +msgid "Relay (vertical)" +msgstr "ÁÂ¿ì ¹Ù²Þ" + +#: sheets/Electric.sheet.in.h:16 +msgid "The command organ of a relay (horizontal)" +msgstr "" + +#: sheets/Electric.sheet.in.h:17 +msgid "The command organ of a relay (vertical)" +msgstr "" + +#: sheets/FS.sheet.in.h:1 +msgid "Editor for Function Structure Diagrams." +msgstr "" + +#: sheets/FS.sheet.in.h:2 +msgid "FS" +msgstr "" + +#: sheets/FS.sheet.in.h:3 +#, fuzzy +msgid "Flow" +msgstr "È帧µµ °´Ã¼" + +#: sheets/FS.sheet.in.h:4 +#, fuzzy +msgid "Function" +msgstr "¿¬°áÁ¡:" + +#: sheets/FS.sheet.in.h:5 +msgid "Orthogonal polyline flow" +msgstr "" + +#: sheets/Flowchart.sheet.in.h:1 +#, fuzzy +msgid "Collate" +msgstr "È£Ãâ" + +#: sheets/Flowchart.sheet.in.h:2 +#, fuzzy +msgid "Connector" +msgstr "¿¬°áÁ¡:" + +#: sheets/Flowchart.sheet.in.h:3 sheets/SDL.sheet.in.h:4 +#, fuzzy +msgid "Decision" +msgstr "¼³¸í:" + +#: sheets/Flowchart.sheet.in.h:4 +msgid "Delay" +msgstr "" + +#: sheets/Flowchart.sheet.in.h:6 +#, fuzzy +msgid "Document" +msgstr "±Û²Ã Å©±â:" + +#: sheets/Flowchart.sheet.in.h:8 +#, fuzzy +msgid "Flowchart" +msgstr "È帧µµ °´Ã¼" + +#: sheets/Flowchart.sheet.in.h:9 +msgid "Input/Output" +msgstr "" + +#: sheets/Flowchart.sheet.in.h:10 +#, fuzzy +msgid "Internal storage" +msgstr "»ç¿ëÀÚ ÀÎÅÍÆäÀ̽º" + +#: sheets/Flowchart.sheet.in.h:11 +msgid "Magnetic disk" +msgstr "" + +#: sheets/Flowchart.sheet.in.h:12 +msgid "Magnetic drum" +msgstr "" + +#: sheets/Flowchart.sheet.in.h:13 +msgid "Magnetic tape" +msgstr "" + +#: sheets/Flowchart.sheet.in.h:14 +#, fuzzy +msgid "Manual input" +msgstr "ÀÛµ¿" + +#: sheets/Flowchart.sheet.in.h:15 +#, fuzzy +msgid "Manual operation" +msgstr "ÀÛµ¿" + +#: sheets/Flowchart.sheet.in.h:16 +msgid "Merge" +msgstr "" + +#: sheets/Flowchart.sheet.in.h:17 +msgid "Objects to draw flowcharts" +msgstr "" + +#: sheets/Flowchart.sheet.in.h:18 +msgid "Off page connector" +msgstr "" + +#: sheets/Flowchart.sheet.in.h:19 +#, fuzzy +msgid "Offline storage" +msgstr "»ç¿ëÀÚ ÀÎÅÍÆäÀ̽º" + +#: sheets/Flowchart.sheet.in.h:20 +msgid "Or" +msgstr "" + +#: sheets/Flowchart.sheet.in.h:21 +#, fuzzy +msgid "Predefined process" +msgstr "¼³Á¤" + +#: sheets/Flowchart.sheet.in.h:22 +#, fuzzy +msgid "Preparation" +msgstr "ÀÛµ¿" + +#: sheets/Flowchart.sheet.in.h:23 +#, fuzzy +msgid "Process/Auxiliary Operation" +msgstr "ÀÛµ¿ ¾Èº¸ÀÓ" + +#: sheets/Flowchart.sheet.in.h:24 +msgid "Punched card" +msgstr "" + +#: sheets/Flowchart.sheet.in.h:25 +msgid "Punched tape" +msgstr "" + +#: sheets/Flowchart.sheet.in.h:26 +#, fuzzy +msgid "Sort" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#: sheets/Flowchart.sheet.in.h:27 +msgid "Summing junction" +msgstr "" + +#: sheets/Flowchart.sheet.in.h:28 +msgid "Terminal Interrupt" +msgstr "" + +#: sheets/Flowchart.sheet.in.h:29 +#, fuzzy +msgid "Transaction file" +msgstr "¹æÇâ" + +#: sheets/Flowchart.sheet.in.h:30 +msgid "Transmittal tape" +msgstr "" + +#: sheets/GRAFCET.sheet.in.h:1 +#, fuzzy +msgid "AND vergent" +msgstr "ºÎºÐ Ãß°¡" + +#: sheets/GRAFCET.sheet.in.h:2 +msgid "Action to associate to a step" +msgstr "" + +#: sheets/GRAFCET.sheet.in.h:3 +msgid "Arc (upward)" +msgstr "" + +#: sheets/GRAFCET.sheet.in.h:4 +msgid "Condition (of an action)" +msgstr "" + +#: sheets/GRAFCET.sheet.in.h:5 +msgid "GRAFCET" +msgstr "" + +#: sheets/GRAFCET.sheet.in.h:10 +#, fuzzy +msgid "Macro sub-program call step" +msgstr "ÅÛÇø´" + +#: sheets/GRAFCET.sheet.in.h:11 +#, fuzzy +msgid "OR vergent" +msgstr "ºÎºÐ Ãß°¡" + +#: sheets/GRAFCET.sheet.in.h:12 +msgid "Objects to design GRAFCET charts" +msgstr "" + +#: sheets/GRAFCET.sheet.in.h:14 +#, fuzzy +msgid "Transition" +msgstr "¹æÇâ" + +#: sheets/IsometricMap.sheet.in.h:1 +msgid "Block 1, 2:4" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:2 +msgid "Block 2, 2:8" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:3 +msgid "Block 3, 4:4" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:4 +msgid "Block 4, 4:8" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:5 +msgid "Block 5, 3:3" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:6 +msgid "Block 6, 4:6" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:7 +#, fuzzy +msgid "Block 7" +msgstr "Á¦°ÅÇϱâ" + +#: sheets/IsometricMap.sheet.in.h:8 +msgid "Car 1, Front View" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:9 +msgid "Car 2, Rear View" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:10 +#, fuzzy +msgid "Corner 1" +msgstr "°¡ÀåÀÚ¸® ´õÇϱâ" + +#: sheets/IsometricMap.sheet.in.h:11 +#, fuzzy +msgid "Corner 2" +msgstr "°¡ÀåÀÚ¸® ´õÇϱâ" + +#: sheets/IsometricMap.sheet.in.h:12 +#, fuzzy +msgid "Crossroads" +msgstr "Ŭ·¡½º" + +#: sheets/IsometricMap.sheet.in.h:13 +msgid "Elevated Road" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:14 +msgid "Factory" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:15 +msgid "Footbridge, Pedestrian Bridge" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:16 +msgid "Isometric Directional Map Shapes" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:17 +msgid "Long Straight Road Section" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:18 +msgid "Map, Isometric" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:19 +msgid "One Way Road Sign" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:20 +#, fuzzy +msgid "River" +msgstr "»èÁ¦" + +#: sheets/IsometricMap.sheet.in.h:21 +#, fuzzy +msgid "Road Section" +msgstr "¿ªÇÒ:" + +#: sheets/IsometricMap.sheet.in.h:22 +msgid "Roof1" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:23 +#, fuzzy +msgid "T-Junction" +msgstr "¿¬°áÁ¡:" + +#: sheets/IsometricMap.sheet.in.h:24 +msgid "Train 1, angled downward" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:25 +msgid "Train 2, angled upward" +msgstr "" + +#: sheets/IsometricMap.sheet.in.h:26 +msgid "Tree 1" +msgstr "" + +#: sheets/Istar.sheet.in.h:1 +#, fuzzy +msgid "An i* agent" +msgstr "ºÎºÐ Ãß°¡" + +#: sheets/Istar.sheet.in.h:2 +msgid "An i* decomposition link" +msgstr "" + +#: sheets/Istar.sheet.in.h:3 +#, fuzzy +msgid "An i* dependency link" +msgstr "º£Áö¾î¼± ¸¸µê" + +#: sheets/Istar.sheet.in.h:4 +#, fuzzy +msgid "An i* goal" +msgstr "ÁýÇÕ" + +#: sheets/Istar.sheet.in.h:5 +msgid "An i* means-ends link" +msgstr "" + +#: sheets/Istar.sheet.in.h:6 +#, fuzzy +msgid "An i* negative contribution" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/Istar.sheet.in.h:7 +#, fuzzy +msgid "An i* position" +msgstr "ÇÕ¼º" + +#: sheets/Istar.sheet.in.h:8 +msgid "An i* positive contribution" +msgstr "" + +#: sheets/Istar.sheet.in.h:9 +msgid "An i* resource" +msgstr "" + +#: sheets/Istar.sheet.in.h:10 +#, fuzzy +msgid "An i* role" +msgstr "ÆÄÀÏ" + +#: sheets/Istar.sheet.in.h:11 +msgid "An i* softgoal" +msgstr "" + +#: sheets/Istar.sheet.in.h:12 +msgid "An i* task" +msgstr "" + +#: sheets/Istar.sheet.in.h:13 +msgid "An i* unspecified actor" +msgstr "" + +#: sheets/Istar.sheet.in.h:14 +msgid "An i* unspecified link" +msgstr "" + +#: sheets/Istar.sheet.in.h:15 +msgid "Objects to design i* diagrams" +msgstr "" + +#: sheets/Istar.sheet.in.h:16 +msgid "RE-i*" +msgstr "" + +#: sheets/Jackson.sheet.in.h:1 +msgid "A Jackson designed domain" +msgstr "" + +#: sheets/Jackson.sheet.in.h:2 +msgid "A Jackson given domain" +msgstr "" + +#: sheets/Jackson.sheet.in.h:3 +msgid "A Jackson machine domain" +msgstr "" + +#: sheets/Jackson.sheet.in.h:4 +msgid "A Jackson requirement" +msgstr "" + +#: sheets/Jackson.sheet.in.h:5 +msgid "A Jackson requirement phenomenon" +msgstr "" + +#: sheets/Jackson.sheet.in.h:6 +msgid "A Jackson shared phenomenon" +msgstr "" + +#: sheets/Jackson.sheet.in.h:7 +msgid "Objects to design Jacskon diagrams" +msgstr "" + +#: sheets/Jackson.sheet.in.h:8 +msgid "RE-Jackson" +msgstr "" + +#: sheets/KAOS.sheet.in.h:1 +msgid "A KAOS AND refinement" +msgstr "" + +#: sheets/KAOS.sheet.in.h:2 +msgid "A KAOS OR refinement" +msgstr "" + +#: sheets/KAOS.sheet.in.h:3 +#, fuzzy +msgid "A KAOS agent" +msgstr "ºÎºÐ Ãß°¡" + +#: sheets/KAOS.sheet.in.h:4 +msgid "A KAOS assumption" +msgstr "" + +#: sheets/KAOS.sheet.in.h:5 +msgid "A KAOS binary conflict" +msgstr "" + +#: sheets/KAOS.sheet.in.h:6 +msgid "A KAOS capable-of" +msgstr "" + +#: sheets/KAOS.sheet.in.h:7 +msgid "A KAOS complete AND refinement" +msgstr "" + +#: sheets/KAOS.sheet.in.h:8 +msgid "A KAOS complete OR refinement" +msgstr "" + +#: sheets/KAOS.sheet.in.h:9 +msgid "A KAOS contribution" +msgstr "" + +#: sheets/KAOS.sheet.in.h:10 +msgid "A KAOS control link" +msgstr "" + +#: sheets/KAOS.sheet.in.h:11 +msgid "A KAOS goal" +msgstr "" + +#: sheets/KAOS.sheet.in.h:12 +msgid "A KAOS input" +msgstr "" + +#: sheets/KAOS.sheet.in.h:13 +#, fuzzy +msgid "A KAOS monitor link" +msgstr "ÇÕ¼º" + +#: sheets/KAOS.sheet.in.h:14 +msgid "A KAOS obstacle" +msgstr "" + +#: sheets/KAOS.sheet.in.h:15 +msgid "A KAOS obstruction" +msgstr "" + +#: sheets/KAOS.sheet.in.h:16 +#, fuzzy +msgid "A KAOS operation" +msgstr "ÀÛµ¿" + +#: sheets/KAOS.sheet.in.h:17 +#, fuzzy +msgid "A KAOS operationalization" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/KAOS.sheet.in.h:18 +msgid "A KAOS output" +msgstr "" + +#: sheets/KAOS.sheet.in.h:19 +msgid "A KAOS performs" +msgstr "" + +#: sheets/KAOS.sheet.in.h:20 +#, fuzzy +msgid "A KAOS requirement" +msgstr "ºÎºÐ Ãß°¡" + +#: sheets/KAOS.sheet.in.h:21 +msgid "A KAOS responsibility" +msgstr "" + +#: sheets/KAOS.sheet.in.h:22 +msgid "A KAOS softgoal" +msgstr "" + +#: sheets/KAOS.sheet.in.h:23 +msgid "Objects to design KAOS diagrams" +msgstr "" + +#: sheets/KAOS.sheet.in.h:24 +msgid "RE-KAOS" +msgstr "" + +#: sheets/Logic.sheet.in.h:1 +#, fuzzy +msgid "AND gate" +msgstr "ÁýÇÕ" + +#: sheets/Logic.sheet.in.h:2 +msgid "Boolean Logic" +msgstr "" + +#: sheets/Logic.sheet.in.h:3 +msgid "Crossconnector" +msgstr "" + +#: sheets/Logic.sheet.in.h:4 +#, fuzzy +msgid "Inverter" +msgstr "»ç¿ëÀÚ ÀÎÅÍÆäÀ̽º" + +#: sheets/Logic.sheet.in.h:5 +msgid "Logic" +msgstr "" + +#: sheets/Logic.sheet.in.h:6 +#, fuzzy +msgid "NAND gate" +msgstr "ÁýÇÕ" + +#: sheets/Logic.sheet.in.h:7 +#, fuzzy +msgid "NOR gate" +msgstr "ÁýÇÕ" + +#: sheets/Logic.sheet.in.h:8 +msgid "NOT" +msgstr "" + +#: sheets/Logic.sheet.in.h:9 +#, fuzzy +msgid "OR gate" +msgstr "ÁýÇÕ" + +#: sheets/Logic.sheet.in.h:10 +#, fuzzy +msgid "Simple buffer" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/Logic.sheet.in.h:11 +#, fuzzy +msgid "XOR gate" +msgstr "ÁýÇÕ" + +#: sheets/MSE.sheet.in.h:1 +#, fuzzy +msgid "Demultiplexer" +msgstr "Áߺ¹" + +#: sheets/MSE.sheet.in.h:2 +#, fuzzy +msgid "Large extension node" +msgstr "È®ÀåÀÚ" + +#: sheets/MSE.sheet.in.h:3 +msgid "MSE" +msgstr "" + +#: sheets/MSE.sheet.in.h:4 +#, fuzzy +msgid "Multiplexer" +msgstr "Áߺ¹" + +#: sheets/MSE.sheet.in.h:5 +#, fuzzy +msgid "Node center" +msgstr "Áß°£" + +#: sheets/MSE.sheet.in.h:6 +#, fuzzy +msgid "Small extension node" +msgstr "È®ÀåÀÚ" + +#: sheets/MSE.sheet.in.h:7 +msgid "Tactical satellite communications terminal" +msgstr "" + +#: sheets/MSE.sheet.in.h:8 +msgid "U.S. Army Mobile Subscriber Equipment Components" +msgstr "" + +#: sheets/Misc.sheet.in.h:2 +#, fuzzy +msgid "Folder" +msgstr "ÆÄÀÏ" + +#: sheets/Misc.sheet.in.h:3 +msgid "Misc" +msgstr "" + +#: sheets/Misc.sheet.in.h:4 +msgid "Miscellaneous Shapes" +msgstr "" + +#: sheets/Misc.sheet.in.h:5 +#, fuzzy +msgid "Traditional clock" +msgstr "¹æÇâ" + +#: sheets/Pneumatic.sheet.in.h:1 +#, fuzzy +msgid "2/2 distributor" +msgstr "¼Ó¼º" + +#: sheets/Pneumatic.sheet.in.h:2 +#, fuzzy +msgid "3/2 distributor" +msgstr "¼Ó¼º" + +#: sheets/Pneumatic.sheet.in.h:3 +#, fuzzy +msgid "3/3 distributor" +msgstr "¼Ó¼º" + +#: sheets/Pneumatic.sheet.in.h:4 +#, fuzzy +msgid "4/2 distributor" +msgstr "¼Ó¼º" + +#: sheets/Pneumatic.sheet.in.h:5 +#, fuzzy +msgid "5/2 distributor" +msgstr "¼Ó¼º" + +#: sheets/Pneumatic.sheet.in.h:6 +#, fuzzy +msgid "5/3 distributor" +msgstr "¼Ó¼º" + +#: sheets/Pneumatic.sheet.in.h:7 +msgid "Air exhaust orifice" +msgstr "" + +#: sheets/Pneumatic.sheet.in.h:8 +msgid "Components for pneumatic and hydraulic circuits" +msgstr "" + +#: sheets/Pneumatic.sheet.in.h:10 +msgid "Double-effect jack" +msgstr "" + +#: sheets/Pneumatic.sheet.in.h:11 +msgid "Electric command (double coil)" +msgstr "" + +#: sheets/Pneumatic.sheet.in.h:12 +msgid "Electric command (single coil)" +msgstr "" + +#: sheets/Pneumatic.sheet.in.h:13 +msgid "Generic pressure source" +msgstr "" + +#: sheets/Pneumatic.sheet.in.h:14 +msgid "Hydraulic pressure source" +msgstr "" + +#: sheets/Pneumatic.sheet.in.h:15 +msgid "Indirect command by hydraulic driver" +msgstr "" + +#: sheets/Pneumatic.sheet.in.h:16 +msgid "Indirect command by pneumatic driver" +msgstr "" + +#: sheets/Pneumatic.sheet.in.h:17 +msgid "Mechanical command by spring" +msgstr "" + +#: sheets/Pneumatic.sheet.in.h:18 +msgid "Mechanical command by tappet" +msgstr "" + +#: sheets/Pneumatic.sheet.in.h:19 +msgid "Muscular command" +msgstr "" + +#: sheets/Pneumatic.sheet.in.h:20 +msgid "Normally-in simple-effect jack" +msgstr "" + +#: sheets/Pneumatic.sheet.in.h:21 +msgid "Normally-out simple-effect jack" +msgstr "" + +#: sheets/Pneumatic.sheet.in.h:22 +msgid "Pneumatic pressure source" +msgstr "" + +#: sheets/Pneumatic.sheet.in.h:23 +msgid "Pneumatic/Hydraulic" +msgstr "" + +#: sheets/Pneumatic.sheet.in.h:24 +msgid "Push-button command" +msgstr "" + +#: sheets/SADT.sheet.in.h:1 +msgid "Activity/data box" +msgstr "" + +#: sheets/SADT.sheet.in.h:2 +msgid "Activity/data flow arrow" +msgstr "" + +#: sheets/SADT.sheet.in.h:3 +msgid "Flow label" +msgstr "" + +#: sheets/SADT.sheet.in.h:4 +msgid "Objects to design SADT diagrams" +msgstr "" + +#: sheets/SADT.sheet.in.h:5 +msgid "SADT/IDEF0" +msgstr "" + +#: sheets/SDL.sheet.in.h:1 +msgid "Action being executed" +msgstr "" + +#: sheets/SDL.sheet.in.h:2 +#, fuzzy +msgid "Block type reference" +msgstr "¼³Á¤" + +#: sheets/SDL.sheet.in.h:5 +msgid "Function call" +msgstr "" + +#: sheets/SDL.sheet.in.h:6 +#, fuzzy +msgid "Function header" +msgstr "ÀÎÀÚ:" + +#: sheets/SDL.sheet.in.h:7 +msgid "Generic text note" +msgstr "" + +#: sheets/SDL.sheet.in.h:8 +msgid "In/Out connector" +msgstr "" + +#: sheets/SDL.sheet.in.h:10 +msgid "Procedure return" +msgstr "" + +#: sheets/SDL.sheet.in.h:11 +#, fuzzy +msgid "Process type reference" +msgstr "¼³Á¤" + +#: sheets/SDL.sheet.in.h:12 +#, fuzzy +msgid "Receive message" +msgstr "»èÁ¦" + +#: sheets/SDL.sheet.in.h:13 +msgid "SDL" +msgstr "" + +#: sheets/SDL.sheet.in.h:14 +#, fuzzy +msgid "Save state" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#: sheets/SDL.sheet.in.h:15 +#, fuzzy +msgid "Send message" +msgstr "»èÁ¦" + +#: sheets/SDL.sheet.in.h:16 +#, fuzzy +msgid "Service type reference" +msgstr "¼³Á¤" + +#: sheets/SDL.sheet.in.h:17 +msgid "Specification and Description Language." +msgstr "" + +#: sheets/SDL.sheet.in.h:18 sheets/UML.sheet.in.h:28 +#, fuzzy +msgid "State" +msgstr "È­»ìÇ¥ º¸ÀÓ" + +#: sheets/UML.sheet.in.h:1 +msgid "Activity" +msgstr "" + +#: sheets/UML.sheet.in.h:3 +msgid "Aggregation, one class is part of another" +msgstr "" + +#: sheets/UML.sheet.in.h:4 +msgid "Association, two classes are associated" +msgstr "" + +#: sheets/UML.sheet.in.h:7 +#, fuzzy +msgid "Class stereotype icon" +msgstr "ÅÛÇø´ Ŭ·¡½º" + +#: sheets/UML.sheet.in.h:8 +#, fuzzy +msgid "Component" +msgstr "Áß°£" + +#: sheets/UML.sheet.in.h:9 +msgid "Constraint, place a constraint on something" +msgstr "" + +#: sheets/UML.sheet.in.h:11 +msgid "Editor for UML Static Structure Diagrams" +msgstr "" + +#: sheets/UML.sheet.in.h:15 +#, fuzzy +msgid "Fork/union" +msgstr "¿¬°áÁ¡:" + +#: sheets/UML.sheet.in.h:16 +msgid "Generalization, class inheritance" +msgstr "" + +#: sheets/UML.sheet.in.h:17 +msgid "Implements, class implements a specific interface" +msgstr "" + +#: sheets/UML.sheet.in.h:18 +#, fuzzy +msgid "Initial/end state" +msgstr "¼± ¸¸µê" + +#: sheets/UML.sheet.in.h:19 +#, fuzzy +msgid "Large package" +msgstr "±×¸² ¸¸µê" + +#: sheets/UML.sheet.in.h:20 +#, fuzzy +msgid "Lifeline" +msgstr "º£Áö¾î" + +#: sheets/UML.sheet.in.h:21 +#, fuzzy +msgid "Message" +msgstr "»èÁ¦" + +#: sheets/UML.sheet.in.h:22 +#, fuzzy +msgid "Node" +msgstr "¾Æ´Ï¿À" + +#: sheets/UML.sheet.in.h:23 +#, fuzzy +msgid "Note" +msgstr "ÁýÇÕ" + +#: sheets/UML.sheet.in.h:24 +#, fuzzy +msgid "Object" +msgstr "/°´Ã¼(_O)" + +#: sheets/UML.sheet.in.h:25 +msgid "Realizes, implements a specific interface" +msgstr "" + +#: sheets/UML.sheet.in.h:27 +#, fuzzy +msgid "Small package" +msgstr "±×¸² ¸¸µê" + +#: sheets/UML.sheet.in.h:30 +msgid "UML" +msgstr "" + +#: sheets/UML.sheet.in.h:31 +msgid "Use case" +msgstr "" + +#: sheets/chronogram.sheet.in.h:1 +msgid "Chronogram" +msgstr "" + +#: sheets/chronogram.sheet.in.h:2 +#, fuzzy +msgid "Data line" +msgstr "¼± ±½±â" + +#: sheets/chronogram.sheet.in.h:3 +msgid "Objects to design chronogram charts" +msgstr "" + +#: sheets/chronogram.sheet.in.h:4 +#, fuzzy +msgid "Time scale" +msgstr "¼Ó¼º µ¥ÀÌÅÍ" + +#: sheets/ciscocomputer.sheet.in.h:1 +#, fuzzy +msgid "Cisco - Computer" +msgstr "°¡ÀåÀÚ¸® ´õÇϱâ" + +#: sheets/ciscocomputer.sheet.in.h:2 +msgid "Cisco CA" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:3 +msgid "CiscoSecurity" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:4 +msgid "CiscoWorks workstation" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:5 +msgid "Communications server" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:6 +msgid "Computer shapes by Cisco" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:7 +msgid "Directory Server" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:8 +#, fuzzy +msgid "File Server" +msgstr "ÆÄÀÏ À̸§:" + +#: sheets/ciscocomputer.sheet.in.h:9 +msgid "HP Mini" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:10 +msgid "Handheld" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:11 +msgid "Host" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:12 +msgid "IBM Mini (AS400)" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:13 +msgid "IBM Tower" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:14 +msgid "IBM mainframe" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:15 +msgid "IP Softphone" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:16 +msgid "IPTV broadcast server" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:17 +msgid "Laptop" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:18 +#, fuzzy +msgid "Macintosh" +msgstr "¿©¹é" + +#: sheets/ciscocomputer.sheet.in.h:19 +msgid "MicroWebserver" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:20 +msgid "Mini VAX" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:21 +msgid "MoH server (Music on Hold)" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:22 +msgid "PC" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:23 +msgid "PC Video" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:24 +msgid "PDA" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:26 +#, fuzzy +msgid "Relational Database" +msgstr "ÀÛµ¿ µ¥ÀÌÅÍ" + +#: sheets/ciscocomputer.sheet.in.h:27 +msgid "SC2200/VSC3000 host" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:28 +msgid "SIP Proxy server" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:29 +msgid "STB (set top box)" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:30 +#, fuzzy +msgid "SUN workstation" +msgstr "¿ªÇÒ:" + +#: sheets/ciscocomputer.sheet.in.h:31 +#, fuzzy +msgid "Scanner" +msgstr "ºñÀ²:" + +#: sheets/ciscocomputer.sheet.in.h:32 +msgid "Server with PC Router" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:33 +#, fuzzy +msgid "Softphone" +msgstr "ºÎµå·¯¿î Á¶Àý" + +#: sheets/ciscocomputer.sheet.in.h:34 +msgid "Software based server" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:35 +#, fuzzy +msgid "Storage array" +msgstr "ºñÀ²" + +#: sheets/ciscocomputer.sheet.in.h:36 +#, fuzzy +msgid "Supercomputer" +msgstr "°¡ÀåÀÚ¸® ´õÇϱâ" + +#: sheets/ciscocomputer.sheet.in.h:37 +msgid "TV" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:38 +msgid "Terminal" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:39 +#, fuzzy +msgid "Turret" +msgstr "Áß°£" + +#: sheets/ciscocomputer.sheet.in.h:40 +msgid "Unity server" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:41 +msgid "Voice commserver" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:42 +msgid "WWW server" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:43 +msgid "Web browser" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:44 +msgid "Web cluster" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:45 +msgid "Wireless" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:46 +msgid "Workgroup director" +msgstr "" + +#: sheets/ciscocomputer.sheet.in.h:47 +#, fuzzy +msgid "Workstation" +msgstr "¿ªÇÒ:" + +#: sheets/ciscohub.sheet.in.h:1 +msgid "100BaseT hub" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:2 +msgid "ATM fast gigabit etherswitch" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:3 +msgid "ATM switch" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:4 +msgid "Cisco - Hub" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:5 +msgid "Cisco hub" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:6 +msgid "Class 4/5 switch" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:7 +msgid "Content service switch 1100" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:8 +msgid "Content switch" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:9 +msgid "Content switch module" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:10 +msgid "Generic softswitch" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:11 +msgid "Hub" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:12 +msgid "Hub and switch shapes by Cisco" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:13 +msgid "ISDN switch" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:14 +#, fuzzy +msgid "Layer 3 switch" +msgstr "°ÔÃþ" + +#: sheets/ciscohub.sheet.in.h:15 +msgid "MGX 8220 switch" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:16 +msgid "MGX 8240 switch" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:17 +msgid "MGX 8260 switch" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:18 +msgid "MultiSwitch device" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:19 +#, fuzzy +msgid "Multilayer switch" +msgstr "Áߺ¹" + +#: sheets/ciscohub.sheet.in.h:20 +msgid "PBX switch" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:21 +msgid "Programmable switch" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:22 +msgid "Route switch processor" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:23 +msgid "Small hub" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:24 +msgid "Voice ATM switch" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:25 +#, fuzzy +msgid "Voice switch" +msgstr "¼± ±½±â" + +#: sheets/ciscohub.sheet.in.h:26 +msgid "Voice switch 2" +msgstr "" + +#: sheets/ciscohub.sheet.in.h:27 +msgid "Workgroup switch" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:1 +msgid "BBFW" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:2 +msgid "BBFW media" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:3 +msgid "Branch office" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:4 +#, fuzzy +msgid "Breakout box" +msgstr "»óÀÚ ¸¸µê" + +#: sheets/ciscomisc.sheet.in.h:5 +#, fuzzy +msgid "Car" +msgstr "Ŭ·¡½º" + +#: sheets/ciscomisc.sheet.in.h:6 +msgid "Cellular phone" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:7 +msgid "Cisco - Misc" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:8 +#, fuzzy +msgid "Diskette" +msgstr "¼Ó¼º" + +#: sheets/ciscomisc.sheet.in.h:9 +#, fuzzy +msgid "Dot-Dot" +msgstr "ÀÌÁ¡±¥¼±" + +#: sheets/ciscomisc.sheet.in.h:10 +msgid "End office" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:11 +msgid "Fax" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:12 +#, fuzzy +msgid "File cabinet" +msgstr "ÆÄÀÏ À̸§:" + +#: sheets/ciscomisc.sheet.in.h:13 +msgid "Generic building" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:14 +msgid "Government building" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:15 +msgid "H.323" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:16 +msgid "HootPhone" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:17 +msgid "IP phone" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:18 +msgid "ITP" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:19 +msgid "Key" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:20 +msgid "LAN to LAN" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:22 +msgid "MAU" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:23 +msgid "MDU" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:24 +msgid "Mac woman" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:25 +msgid "Man/Woman" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:27 +msgid "Miscellaneous shapes by Cisco" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:28 +msgid "PC man" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:29 +#, fuzzy +msgid "Phone" +msgstr "¿¬°á¼±" + +#: sheets/ciscomisc.sheet.in.h:30 +msgid "Phone/Fax" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:31 +msgid "RPS" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:32 +#, fuzzy +msgid "Radio tower" +msgstr "¹æÇâ" + +#: sheets/ciscomisc.sheet.in.h:33 +msgid "Running man" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:34 +msgid "SVX (interchangeable with End office)" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:35 +#, fuzzy +msgid "Satellite" +msgstr "ºñÀ²:" + +#: sheets/ciscomisc.sheet.in.h:36 +msgid "Satellite dish" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:37 +msgid "Sitting woman" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:38 +msgid "Small business" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:40 +msgid "Standing man" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:41 +#, fuzzy +msgid "Tablet" +msgstr "¹®ÀÚ¿­" + +#: sheets/ciscomisc.sheet.in.h:42 +msgid "Telecommuter" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:43 +msgid "Telecommuter house" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:44 +msgid "Telecommuter house/router" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:45 +msgid "TokenRing" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:46 +msgid "Truck" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:47 +msgid "UPS" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:48 +msgid "University" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:49 +msgid "Video camera" +msgstr "" + +#: sheets/ciscomisc.sheet.in.h:50 +msgid "Wireless transport" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:1 +msgid "10700" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:2 +msgid "15200" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:3 +msgid "3174 (desktop) cluster controller" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:4 +msgid "3X74 (floor) cluster controller" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:5 +msgid "6701" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:6 +msgid "6705 Integrated access device" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:7 +msgid "6732 Multiservice access platform" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:8 +msgid "ADM" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:9 +#, fuzzy +msgid "ASIC processor" +msgstr "È£ ¸¸µê" + +#: sheets/cisconetwork.sheet.in.h:10 +msgid "ATA" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:11 +msgid "ATM 3800" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:12 +msgid "AccessPoint" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:13 +msgid "BBSM" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:14 +msgid "BTS 10200" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:15 +#, fuzzy +msgid "Bridge" +msgstr "°ÝÀÚ:" + +#: sheets/cisconetwork.sheet.in.h:16 +msgid "CDDI-FDDI" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:17 +msgid "CDM Content distribution manager" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:18 +msgid "CSU/DSU" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:19 +msgid "Cable modem" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:20 +#, fuzzy +msgid "CallManager" +msgstr "È£Ãâ" + +#: sheets/cisconetwork.sheet.in.h:21 +msgid "Catalyst access gateway" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:22 +msgid "Centri firewall" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:23 +msgid "Cisco - Network" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:24 +#, fuzzy +msgid "Cloud" +msgstr "´Ý±â" + +#: sheets/cisconetwork.sheet.in.h:25 +msgid "Content engine (Cache director)" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:26 +msgid "Content transformation engine (CTE)" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:27 +msgid "DPT" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:28 +msgid "DSLAM" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:29 +#, fuzzy +msgid "DWDM filter" +msgstr "ÄÄÇ»ÅÍ ±×·¡ÇÈ ¸ÞŸÆÄÀÏ ÀúÀå ÇÊÅÍ" + +#: sheets/cisconetwork.sheet.in.h:30 +#, fuzzy +msgid "Distributed director" +msgstr "¼Ó¼º" + +#: sheets/cisconetwork.sheet.in.h:31 +#, fuzzy +msgid "FC storage" +msgstr "ºñÀ²" + +#: sheets/cisconetwork.sheet.in.h:32 +msgid "FDDI ring" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:33 +#, fuzzy +msgid "Firewall" +msgstr "ÆÄÀÏ" + +#: sheets/cisconetwork.sheet.in.h:34 +#, fuzzy +msgid "Front end processor" +msgstr "È£ ¸¸µê" + +#: sheets/cisconetwork.sheet.in.h:35 +msgid "General appliance" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:36 +msgid "Generic gateway" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:37 +#, fuzzy +msgid "Generic processor" +msgstr "È£ ¸¸µê" + +#: sheets/cisconetwork.sheet.in.h:38 +msgid "ICM" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:39 +msgid "ICS" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:40 +msgid "IOS firewall" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:41 +msgid "IP" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:42 +msgid "IP DSL" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:43 +msgid "IPTC" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:44 +msgid "IPTV content manager" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:45 +msgid "LocalDirector" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:46 +msgid "LongReach CPE" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:47 +msgid "MAS gateway" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:48 +msgid "ME 1100" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:49 +msgid "MUX" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:50 +msgid "Metro 1500" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:51 +#, fuzzy +msgid "Modem" +msgstr "À§·Î À̵¿" + +#: sheets/cisconetwork.sheet.in.h:52 +msgid "NetRanger intrusion detection system" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:53 +msgid "NetSonar security scanner" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:54 +#, fuzzy +msgid "Network management" +msgstr "³×Æ®¿÷ µµÇ¥ °´Ã¼" + +#: sheets/cisconetwork.sheet.in.h:55 +msgid "Network shapes by Cisco" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:56 +msgid "ONS15500 DWDM platform" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:57 +msgid "Octel" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:58 +#, fuzzy +msgid "Optical amplifier" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/cisconetwork.sheet.in.h:59 +msgid "Optical transport" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:60 +msgid "PAD" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:61 +msgid "PAD X.28" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:62 +msgid "PBX" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:63 +msgid "PC adapter card" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:64 +msgid "PC router card" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:65 +msgid "PIX firewall" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:66 +msgid "PXF" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:67 +msgid "Protocol translator" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:68 +#, fuzzy +msgid "RateMUX" +msgstr "¿ªÇÒ:" + +#: sheets/cisconetwork.sheet.in.h:69 +#, fuzzy +msgid "Repeater" +msgstr "Áö¿ò" + +#: sheets/cisconetwork.sheet.in.h:70 +msgid "SC2200 (Signalling controller)" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:71 +msgid "STP" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:72 +#, fuzzy +msgid "System controller" +msgstr "´ëĪ Á¶Àý" + +#: sheets/cisconetwork.sheet.in.h:73 +#, fuzzy +msgid "Transpath" +msgstr "¹æÇâ" + +#: sheets/cisconetwork.sheet.in.h:74 +msgid "Universal gateway" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:75 +msgid "VIP" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:76 +msgid "VPN concentrator" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:77 +msgid "VPN gateway" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:78 +msgid "Virtual switch controller (VSC 3000)" +msgstr "" + +#: sheets/cisconetwork.sheet.in.h:79 +msgid "Wireless bridge" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:1 +msgid "7500ARS (7513)" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:2 +msgid "7505" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:3 +msgid "7507" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:4 +#, fuzzy +msgid "ATM router" +msgstr "°¡ÀåÀÚ¸® ´õÇϱâ" + +#: sheets/ciscorouter.sheet.in.h:5 +msgid "ATM tag sw gigabit router" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:6 +msgid "ATM tag switch router" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:7 +msgid "Broadband router" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:8 +msgid "Cisco - Router" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:9 +msgid "Cisco 1000" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:10 +msgid "Content service router" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:11 +msgid "Gigabit switch router (ATM tag)" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:12 +#, fuzzy +msgid "IAD router" +msgstr "°¡ÀåÀÚ¸® ´õÇϱâ" + +#: sheets/ciscorouter.sheet.in.h:13 +msgid "IP telephony router" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:14 +msgid "NetFlow router" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:15 +msgid "Optical services router" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:16 +#, fuzzy +msgid "Router" +msgstr "¿ªÇÒ:" + +#: sheets/ciscorouter.sheet.in.h:17 +msgid "Router in building" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:18 +msgid "Router shapes by Cisco" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:19 +msgid "Router with firewall" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:20 +msgid "Router with silicon switch" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:21 +#, fuzzy +msgid "Storage router" +msgstr "ºñÀ²" + +#: sheets/ciscorouter.sheet.in.h:22 +#, fuzzy +msgid "TDM router" +msgstr "°¡ÀåÀÚ¸® ´õÇϱâ" + +#: sheets/ciscorouter.sheet.in.h:23 +msgid "Voice router" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:24 +msgid "Wavelength router" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:25 +msgid "Workgroup 5000 multilayer switch" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:26 +msgid "Workgroup 5002 multilayer switch" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:27 +msgid "Workgroup 5500 multilayer switch" +msgstr "" + +#: sheets/ciscorouter.sheet.in.h:28 +msgid "uBR910" +msgstr "" + +#: sheets/civil.sheet.in.h:1 +msgid "Aerator with bubbles" +msgstr "" + +#: sheets/civil.sheet.in.h:2 +msgid "Backflow preventer" +msgstr "" + +#: sheets/civil.sheet.in.h:3 +#, fuzzy +msgid "Basin" +msgstr "¹æÇâ" + +#: sheets/civil.sheet.in.h:4 +#, fuzzy +msgid "Bivalent vertical rest" +msgstr "ÁÂ¿ì ¹Ù²Þ" + +#: sheets/civil.sheet.in.h:5 +msgid "Civil" +msgstr "" + +#: sheets/civil.sheet.in.h:6 +msgid "Civil Engineering Components" +msgstr "" + +#: sheets/civil.sheet.in.h:7 +#, fuzzy +msgid "Container" +msgstr "ÇÕ¼º" + +#: sheets/civil.sheet.in.h:8 +msgid "Final-settling basin" +msgstr "" + +#: sheets/civil.sheet.in.h:9 +#, fuzzy +msgid "Frequency converter" +msgstr "»ç¿ëÀÚ ÀÎÅÍÆäÀ̽º" + +#: sheets/civil.sheet.in.h:10 +msgid "Gas bottle" +msgstr "" + +#: sheets/civil.sheet.in.h:11 +#, fuzzy +msgid "Horizontal limiting line" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/civil.sheet.in.h:12 +#, fuzzy +msgid "Horizontal rest" +msgstr "À§¾Æ·¡ ¹Ù²Þ" + +#: sheets/civil.sheet.in.h:13 +#, fuzzy +msgid "Horizontally aligned arrow" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/civil.sheet.in.h:14 +#, fuzzy +msgid "Horizontally aligned compressor" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/civil.sheet.in.h:15 +#, fuzzy +msgid "Horizontally aligned pump" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/civil.sheet.in.h:16 +#, fuzzy +msgid "Horizontally aligned valve" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/civil.sheet.in.h:17 +#, fuzzy +msgid "Motor" +msgstr "ÇÕ¼º" + +#: sheets/civil.sheet.in.h:18 +msgid "Preliminary clarification tank" +msgstr "" + +#: sheets/civil.sheet.in.h:19 +#, fuzzy +msgid "Reference line" +msgstr "¼± ¸¸µê" + +#: sheets/civil.sheet.in.h:20 +#, fuzzy +msgid "Rotor" +msgstr "ÇÕ¼º" + +#: sheets/civil.sheet.in.h:21 +#, fuzzy +msgid "Soil" +msgstr "½Ç¼±" + +#: sheets/civil.sheet.in.h:22 +#, fuzzy +msgid "Vertical limiting line" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/civil.sheet.in.h:23 +#, fuzzy +msgid "Vertical rest" +msgstr "ÁÂ¿ì ¹Ù²Þ" + +#: sheets/civil.sheet.in.h:24 +#, fuzzy +msgid "Vertically aligned arrow" +msgstr "À§¾Æ·¡ ¹Ù²Þ" + +#: sheets/civil.sheet.in.h:25 +#, fuzzy +msgid "Vertically aligned compressor" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/civil.sheet.in.h:26 +#, fuzzy +msgid "Vertically aligned propeller" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/civil.sheet.in.h:27 +#, fuzzy +msgid "Vertically aligned pump" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/civil.sheet.in.h:28 +#, fuzzy +msgid "Vertically aligned valve" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/civil.sheet.in.h:29 +msgid "Water level" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:1 +msgid "Jigsaw" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:2 +msgid "Jigsaw - part_iiii" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:3 +msgid "Jigsaw - part_iiio" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:4 +msgid "Jigsaw - part_iioi" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:5 +msgid "Jigsaw - part_iioo" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:6 +msgid "Jigsaw - part_ioii" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:7 +msgid "Jigsaw - part_ioio" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:8 +msgid "Jigsaw - part_iooi" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:9 +msgid "Jigsaw - part_iooo" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:10 +msgid "Jigsaw - part_oiii" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:11 +msgid "Jigsaw - part_oiio" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:12 +msgid "Jigsaw - part_oioi" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:13 +msgid "Jigsaw - part_oioo" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:14 +msgid "Jigsaw - part_ooii" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:15 +msgid "Jigsaw - part_ooio" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:16 +msgid "Jigsaw - part_oooi" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:17 +msgid "Jigsaw - part_oooo" +msgstr "" + +#: sheets/jigsaw.sheet.in.h:18 +msgid "Pieces of a jigsaw" +msgstr "" + +#: sheets/network.sheet.in.h:1 +msgid "24 Port Patch Panel" +msgstr "" + +#: sheets/network.sheet.in.h:2 +msgid "3 1/2 inch diskette" +msgstr "" + +#: sheets/network.sheet.in.h:3 +msgid "ATM switch symbol" +msgstr "" + +#: sheets/network.sheet.in.h:4 +msgid "Antenna for wireless transmission" +msgstr "" + +#: sheets/network.sheet.in.h:5 +#, fuzzy +msgid "Bigtower PC" +msgstr "ÇÕ¼º" + +#: sheets/network.sheet.in.h:6 +#, fuzzy +msgid "Computer" +msgstr "°¡ÀåÀÚ¸® ´õÇϱâ" + +#: sheets/network.sheet.in.h:7 +msgid "Desktop PC" +msgstr "" + +#: sheets/network.sheet.in.h:8 +msgid "Digitizing board" +msgstr "" + +#: sheets/network.sheet.in.h:9 +msgid "Ethernet bus" +msgstr "" + +#: sheets/network.sheet.in.h:10 +msgid "External DAT drive" +msgstr "" + +#: sheets/network.sheet.in.h:11 +#, fuzzy +msgid "Firewall router" +msgstr "¼± »ö»ó:" + +#: sheets/network.sheet.in.h:12 +msgid "Laptop PC" +msgstr "" + +#: sheets/network.sheet.in.h:13 +#, fuzzy +msgid "Miditower PC" +msgstr "ÇÕ¼º" + +#: sheets/network.sheet.in.h:14 +#, fuzzy +msgid "Minitower PC" +msgstr "ÇÕ¼º" + +#: sheets/network.sheet.in.h:15 +msgid "Mobile phone" +msgstr "" + +#: sheets/network.sheet.in.h:16 +msgid "Mobile telephony base station" +msgstr "" + +#: sheets/network.sheet.in.h:17 +msgid "Mobile telephony cell" +msgstr "" + +#: sheets/network.sheet.in.h:18 +msgid "Modular switching system" +msgstr "" + +#: sheets/network.sheet.in.h:19 +#, fuzzy +msgid "Monitor" +msgstr "ÇÕ¼º" + +#: sheets/network.sheet.in.h:20 +msgid "Network" +msgstr "" + +#: sheets/network.sheet.in.h:21 +msgid "Network cloud" +msgstr "" + +#: sheets/network.sheet.in.h:22 +msgid "Objects to design network diagrams with" +msgstr "" + +#: sheets/network.sheet.in.h:23 +#, fuzzy +msgid "Plotter" +msgstr "°¡ÀåÀÚ¸® ´õÇϱâ" + +#: sheets/network.sheet.in.h:24 +msgid "RJ45 wall-plug" +msgstr "" + +#: sheets/network.sheet.in.h:25 +msgid "Router symbol" +msgstr "" + +#: sheets/network.sheet.in.h:26 +#, fuzzy +msgid "Simple modem" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/network.sheet.in.h:27 +#, fuzzy +msgid "Simple printer" +msgstr "ÇÁ¸°ÅÍ ¼±ÅÃ" + +#: sheets/network.sheet.in.h:28 +#, fuzzy +msgid "Speaker with integrated amplifier" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/network.sheet.in.h:29 +#, fuzzy +msgid "Speaker without amplifier" +msgstr "°¡·Î Á¤·Ä(_H)" + +#: sheets/network.sheet.in.h:30 +msgid "Stackable hub or switch" +msgstr "" + +#: sheets/network.sheet.in.h:31 +#, fuzzy +msgid "Storage" +msgstr "ºñÀ²" + +#: sheets/network.sheet.in.h:32 +msgid "Switch symbol" +msgstr "" + +#: sheets/network.sheet.in.h:33 +msgid "Telephone" +msgstr "" + +#: sheets/network.sheet.in.h:34 +msgid "UNIX workstation" +msgstr "" + +#: sheets/network.sheet.in.h:35 +#, fuzzy +msgid "WAN connection" +msgstr "¿¬°áÁ¡:" + +#: sheets/network.sheet.in.h:36 +#, fuzzy +msgid "WAN link" +msgstr "¿¬°áÁ¡:" + +#: sheets/network.sheet.in.h:37 +msgid "Wall-plug for the scEAD cabling system" +msgstr "" + +#: sheets/network.sheet.in.h:38 +msgid "Workstation monitor" +msgstr "" + +#: sheets/network.sheet.in.h:39 +msgid "ZIP disk" +msgstr "" + +#: sheets/sybase.sheet.in.h:1 +msgid "Log transfer manager or rep agent" +msgstr "" + +#: sheets/sybase.sheet.in.h:2 +msgid "Objects to design Sybase replication domain diagrams with" +msgstr "" + +#: sheets/sybase.sheet.in.h:3 +msgid "Replication server manager" +msgstr "" + +#: sheets/sybase.sheet.in.h:4 +msgid "Stable storage device" +msgstr "" + +#: sheets/sybase.sheet.in.h:5 +#, fuzzy +msgid "Sybase" +msgstr "±¥¼±" + +#: sheets/sybase.sheet.in.h:6 +msgid "Sybase client application" +msgstr "" + +#: sheets/sybase.sheet.in.h:7 +msgid "Sybase dataserver" +msgstr "" + +#: sheets/sybase.sheet.in.h:8 +msgid "Sybase replication server" +msgstr "" + +#~ msgid "Untitled-%d" +#~ msgstr "Á¦¸ñ¾ø½¿-%d" + +#, fuzzy +#~ msgid "Export file name to use" +#~ msgstr "ÀоîµéÀÎ ÆÄÀÏÀ» ÀúÀåÇÏ°í ¸¶Ä¨´Ï´Ù" + +#, fuzzy +#~ msgid "Quiet operation" +#~ msgstr "ÀÛµ¿ ¾Èº¸ÀÓ" + +#, fuzzy +#~ msgid "" +#~ "Error: No arguments found.\n" +#~ "Run '%s --help' to see a full list of available command line options.\n" +#~ msgstr "" +#~ "¿É¼Ç %s¿¡ ¿À·ù°¡ ÀÖ½À´Ï´Ù : %s. \n" +#~ "'%s --help'¸¦ ½ÇÇà½ÃÄѼ­ ¸í·ÉÇà ¿É¼ÇÀÇ Àüü¸ñ·ÏÀ» ÂüÁ¶ÇϽʽÿÀ.\n" + +#, fuzzy +#~ msgid "" +#~ "%s error: must specify only one of -t or -o.\n" +#~ "Run '%s --help' to see a full list of available command line options.\n" +#~ msgstr "" +#~ "¿É¼Ç %s¿¡ ¿À·ù°¡ ÀÖ½À´Ï´Ù : %s. \n" +#~ "'%s --help'¸¦ ½ÇÇà½ÃÄѼ­ ¸í·ÉÇà ¿É¼ÇÀÇ Àüü¸ñ·ÏÀ» ÂüÁ¶ÇϽʽÿÀ.\n" + +#, fuzzy +#~ msgid "%s error: no input file." +#~ msgstr "¿Ã¹Ù¸¥ ÀÔ·Â ÆÄÀÏÀÌ ÇÊ¿äÇÕ´Ï´Ù\n" + +#, fuzzy +#~ msgid "%s error: only one input file expected." +#~ msgstr "¿Ã¹Ù¸¥ ÀÔ·Â ÆÄÀÏÀÌ ÇÊ¿äÇÕ´Ï´Ù\n" + +#, fuzzy +#~ msgid "Background Colour" +#~ msgstr "¹è°æ »ö»ó:" + +#~ msgid "No object menu" +#~ msgstr "°´Ã¼ ¸Þ´º ¾ø½¿" + +#~ msgid "Create Text" +#~ msgstr "¹®ÀÚ¿­ ¸¸µê" + +#~ msgid "Create Ellipse" +#~ msgstr "Ÿ¿ø ¸¸µê" + +#~ msgid "Create Polygon" +#~ msgstr "´Ù°¢Çü ¸¸µê" + +#, fuzzy +#~ msgid "Create Beziergon" +#~ msgstr "º£Áö¾î¼± ¸¸µê" + +#~ msgid "Create Arc" +#~ msgstr "È£ ¸¸µê" + +#~ msgid "Create Zigzagline" +#~ msgstr "Áö±×Àç±× ¸¸µê" + +#~ msgid "Create Polyline" +#~ msgstr "¿¬°á¼± ¸¸µê" + +#~ msgid "Create Bezierline" +#~ msgstr "º£Áö¾î¼± ¸¸µê" + +#~ msgid "Create Image" +#~ msgstr "±×¸² ¸¸µê" + +#~ msgid "_New diagram" +#~ msgstr "»õ µµÇ¥(_N)" + +#, fuzzy +#~ msgid "_Diagram tree" +#~ msgstr "µµÇ¥ ÆíÁý±â" + +#, fuzzy +#~ msgid "Show diagram tree" +#~ msgstr "»õ µµÇ¥(_N)" + +#, fuzzy +#~ msgid "_Sheets and Objects..." +#~ msgstr "Ç¥ÁØ °´Ã¼" + +#, fuzzy +#~ msgid "P_lugins" +#~ msgstr "Ç÷¯±×ÀÎ" + +#~ msgid "Page Set_up..." +#~ msgstr "ÂÊ ¼³Á¤(_u)..." + +#~ msgid "_Print Diagram..." +#~ msgstr "µµÇ¥ Àμâ(_P)..." + +#~ msgid "Copy Text" +#~ msgstr "¹®ÀÚ¿­ º¹»ç" + +#~ msgid "Cut Text" +#~ msgstr "¹®ÀÚ¿­ ÀÚ¸§" + +#~ msgid "Paste _Text" +#~ msgstr "¹®ÀÚ¿­ ºÙÀÓ(_T)" + +#~ msgid "Zoom _In" +#~ msgstr "È®´ë(_I)" + +#~ msgid "Zoom in 50%" +#~ msgstr "2¹è È®´ë" + +#~ msgid "Zoom _Out" +#~ msgstr "Ãà¼Ò(_O)" + +#~ msgid "Zoom out 50%" +#~ msgstr "2¹è Ãà¼Ò" + +#~ msgid "_Zoom" +#~ msgstr "È®´ë(_Z)" + +#~ msgid "_AntiAliased" +#~ msgstr "¾ÈƼ¾ó¶óÀ̽Ì(_A)" + +#, fuzzy +#~ msgid "Show _Grid" +#~ msgstr "°¡ÀåÀÚ¸® º¸ÀÓ:" + +#~ msgid "Show _Rulers" +#~ msgstr "´«±ÝÀÚ º¸±â(_R)" + +#~ msgid "Show _Connection Points" +#~ msgstr "¿¬°áÁ¡ º¸±â(_C)" + +#~ msgid "Top" +#~ msgstr "À§" + +#~ msgid "Bottom" +#~ msgstr "¾Æ·¡" + +#~ msgid "Send to _Back" +#~ msgstr "µÚ·Î º¸³¿(_B)" + +#~ msgid "Bring to _Front" +#~ msgstr "¾ÕÀ¸·Î º¸³¿(_F)" + +#, fuzzy +#~ msgid "Send Backwards" +#~ msgstr "µÚ·Î º¸³¿(_B)" + +#~ msgid "_Group" +#~ msgstr "¹­±â(_G)" + +#~ msgid "_Ungroup" +#~ msgstr "Ç®±â(_U)" + +#~ msgid "Align _Horizontal" +#~ msgstr "°¡·Î Á¤·Ä(_H)" + +#~ msgid "Align _Vertical" +#~ msgstr "¼¼·Î Á¤·Ä(_V)" + +#~ msgid "_Layers" +#~ msgstr "°èÃþ(_L)" + +#, fuzzy +#~ msgid "_Select" +#~ msgstr "¼±ÅÃ" + +#, fuzzy +#~ msgid "_Objects" +#~ msgstr "/°´Ã¼(_O)" + +#, fuzzy +#~ msgid "_Tools" +#~ msgstr "µµ±¸" + +#, fuzzy +#~ msgid "_Dialogs" +#~ msgstr "/´ëÈ­»óÀÚ(_D)" + +#, fuzzy +#~ msgid "/View/Diagram Properties..." +#~ msgstr "/´ëÈ­»óÀÚ/¼Ó¼º(_P)" + +#~ msgid "/Objects/Align Horizontal/Center" +#~ msgstr "/°´Ã¼/¼öÆò Á¤·Ä/Áß°£" + +#~ msgid "/Objects/Align Horizontal/Equal Distance" +#~ msgstr "/°´Ã¼/¼öÆò Á¤·Ä/" + +#~ msgid "/_Dialogs" +#~ msgstr "/´ëÈ­»óÀÚ(_D)" + +#~ msgid "/Dialogs/_Properties" +#~ msgstr "/´ëÈ­»óÀÚ/¼Ó¼º(_P)" + +#~ msgid "Apply" +#~ msgstr "Àû¿ë" + +#, fuzzy +#~ msgid "Error occured while printing" +#~ msgstr "PNG¸¦ ¾²´ÂÁß ¿À·ù°¡ ¹ß»ýÇß½À´Ï´Ù" + +#, fuzzy +#~ msgid "Show at startup:" +#~ msgstr "½ÃÀÛÇÒ¶§ ÀÚµ¿ Àбâ" + +#, fuzzy +#~ msgid "Default width:" +#~ msgstr "¼± ±½±â:" + +#, fuzzy +#~ msgid "Can't open history file for writing." +#~ msgstr "¿­¼ö ¾÷½À´Ï´Ù: '%s' ¾µ¼ö¾ø½¿.\n" + +#~ msgid "Length: " +#~ msgstr "±æÀÌ: " + +#~ msgid "Width: " +#~ msgstr "Æø: " + +#, fuzzy +#~ msgid "Line gaps" +#~ msgstr "¼± ÇüÅÂ:" + +#, fuzzy +#~ msgid "Start at object edge" +#~ msgstr "Ç¥ÁØ °´Ã¼" + +#, fuzzy +#~ msgid "Helvetica" +#~ msgstr "ÁÂ¿ì ¹Ù²Þ" + +#~ msgid "Delete" +#~ msgstr "Áö¿ò" + +#, fuzzy +#~ msgid "Could not load XSLT library (%s) : %s" +#~ msgstr "" +#~ "Ç÷¯±×ÀÎ `%s'¸¦ ÀÐÀ»¼ö ¾ø½À´Ï´Ù\n" +#~ "%s" + +#, fuzzy +#~ msgid "A pnp bipolar transistor" +#~ msgstr "¹æÇâ" + +#, fuzzy +#~ msgid "Create a flow" +#~ msgstr "»óÀÚ ¸¸µê" + +#, fuzzy +#~ msgid "Create a function" +#~ msgstr "¼± ¸¸µê" + +#, fuzzy +#~ msgid "A macro entry step" +#~ msgstr "ÅÛÇø´" + +#, fuzzy +#~ msgid "A macro exit step" +#~ msgstr "ÅÛÇø´" + +#, fuzzy +#~ msgid "A regular step" +#~ msgstr "ÅÛÇø´" + +#, fuzzy +#~ msgid "A transition" +#~ msgstr "¹æÇâ" + +#, fuzzy +#~ msgid "A Nand gate" +#~ msgstr "ÁýÇÕ" + +#, fuzzy +#~ msgid "A Xor gate" +#~ msgstr "ÁýÇÕ" + +#, fuzzy +#~ msgid "An And gate" +#~ msgstr "ÁýÇÕ" + +#, fuzzy +#~ msgid "Create a branch" +#~ msgstr "È£ ¸¸µê" + +#, fuzzy +#~ msgid "Create a class" +#~ msgstr "ÅÛÇø´ Ŭ·¡½º" + +#, fuzzy +#~ msgid "Create a component" +#~ msgstr "´Ù°¢Çü ¸¸µê" + +#, fuzzy +#~ msgid "Create a fork/union" +#~ msgstr "¼± ¸¸µê" + +#, fuzzy +#~ msgid "Create a lifeline" +#~ msgstr "º£Áö¾î¼± ¸¸µê" + +#, fuzzy +#~ msgid "Create a message" +#~ msgstr "±×¸² ¸¸µê" + +#, fuzzy +#~ msgid "Create a node" +#~ msgstr "¼± ¸¸µê" + +#, fuzzy +#~ msgid "Create a note" +#~ msgstr "¼± ¸¸µê" + +#, fuzzy +#~ msgid "Create a state" +#~ msgstr "¼± ¸¸µê" + +#, fuzzy +#~ msgid "Create a template class" +#~ msgstr "ÅÛÇø´ Ŭ·¡½º" + +#, fuzzy +#~ msgid "Create a use case" +#~ msgstr "±×¸² ¸¸µê" + +#, fuzzy +#~ msgid "Create an activity" +#~ msgstr "»õ µµÇ¥¸¦ ¸¸µì´Ï´Ù" + +#, fuzzy +#~ msgid "Create an actor" +#~ msgstr "»õ µµÇ¥¸¦ ¸¸µì´Ï´Ù" + +#, fuzzy +#~ msgid "Create an object" +#~ msgstr "¼± ¸¸µê" + +#~ msgid "/View/_Visible Grid" +#~ msgstr "/º¸±â/°ÝÀÚ º¸±â" + +#~ msgid "Corner rounding:" +#~ msgstr "°¡ÀåÀÚ¸® µÕ±Û°ÔÇÔ:" + +#~ msgid "Shear angle:" +#~ msgstr "°¢µµ º¯°æ:" + +#~ msgid "Begin" +#~ msgstr "½ÃÀÛ" + +#~ msgid "End" +#~ msgstr "³¡" + +#, fuzzy +#~ msgid "A Diagram Editor" +#~ msgstr "µµÇ¥ ÆíÁý±â" + +#~ msgid "Image file:" +#~ msgstr "±×¸² ÆÄÀÏ:" + +#~ msgid "Keep aspect ratio:" +#~ msgstr "ºñÀ² À¯Áö:" + +#~ msgid "Maintainer: James Henstridge" +#~ msgstr "°ü¸®ÀÚ: James Henstridge" + +#~ msgid "Fontsize:" +#~ msgstr "±Û²Ã Å©±â:" + +#~ msgid "Print Diagram" +#~ msgstr "µµÇ¥ Àμâ" + +#, fuzzy +#~ msgid "An error occured while creating the print context" +#~ msgstr "PNG¸¦ ¾²´ÂÁß ¿À·ù°¡ ¹ß»ýÇß½À´Ï´Ù" + +#~ msgid "yes" +#~ msgstr "¿¹" + +#~ msgid "no" +#~ msgstr "¾Æ´Ï¿À" + +#~ msgid "Load" +#~ msgstr "Àбâ" + +#~ msgid "Unload" +#~ msgstr "Á¦°ÅÇϱâ" + +#, fuzzy +#~ msgid "/File/Exit" +#~ msgstr "ÆÄÀÏ" + +#~ msgid "" +#~ "Warning no X Font for %s found, \n" +#~ "using %s instead.\n" +#~ msgstr "" +#~ "%s¿¡¼­ »ç¿ëÇÒ X ±Û²ÃÀÌ ¾÷½À´Ï´Ù. \n" +#~ "%s¸¦ ´ë½Å »ç¿ëÇÕ´Ï´Ù.\n" + +#, fuzzy +#~ msgid "Warning: No X fonts found. The world is ending." +#~ msgstr "" +#~ "%s¿¡¼­ »ç¿ëÇÒ X ±Û²ÃÀÌ ¾÷½À´Ï´Ù. \n" +#~ "%s¸¦ ´ë½Å »ç¿ëÇÕ´Ï´Ù.\n" + +#~ msgid "Quit, are you sure?" +#~ msgstr "Á¤¸»·Î Á¾·áÇϽðڽÀ´Ï±î?" + +#~ msgid "Quit" +#~ msgstr "Á¾·á" + +#~ msgid "Really close?" +#~ msgstr "Á¤¸»·Î ´Ý°Ú½À´Ï±î?" + +#, fuzzy +#~ msgid "Down" +#~ msgstr "¾Æ·¡·Î À̵¿" + +#, fuzzy +#~ msgid "_Remove" +#~ msgstr "»èÁ¦" + +#~ msgid "`%s' is not a directory" +#~ msgstr "`%s'´Â ÀÚ·á¹æÀÌ ¾Æ´Õ´Ï´Ù" + +#, fuzzy +#~ msgid "Number of processes:" +#~ msgstr "Ãë¼ÒÇϱ⠰¹¼ö:" + +#, fuzzy +#~ msgid "multiple" +#~ msgstr "Áߺ¹" + +#, fuzzy +#~ msgid "Single" +#~ msgstr "ÇÁ¸°ÅÍ" + +#, fuzzy +#~ msgid "Multiple" +#~ msgstr "Áߺ¹" + +#, fuzzy +#~ msgid "Instantiation" +#~ msgstr "¹æÇâ" + +#, fuzzy +#~ msgid "Unidirectional" +#~ msgstr "ÇÕ¼º" + +#, fuzzy +#~ msgid "Interaction name:" +#~ msgstr "»ç¿ëÀÚ ÀÎÅÍÆäÀ̽º" + +#, fuzzy +#~ msgid "relation" +#~ msgstr "ÀÛµ¿" + +#, fuzzy +#~ msgid "Interface functions" +#~ msgstr "¼± ¸¸µê" + +#, fuzzy +#~ msgid "Interfaces" +#~ msgstr "»ç¿ëÀÚ ÀÎÅÍÆäÀ̽º" + +#, fuzzy +#~ msgid "Interface" +#~ msgstr "»ç¿ëÀÚ ÀÎÅÍÆäÀ̽º" + +#, fuzzy +#~ msgid "Interface name:" +#~ msgstr "»ç¿ëÀÚ ÀÎÅÍÆäÀ̽º" + +#, fuzzy +#~ msgid "Interface messages" +#~ msgstr "»èÁ¦" + +#, fuzzy +#~ msgid "Message parameters" +#~ msgstr "ÀÎÀÚ:" + +#, fuzzy +#~ msgid "Functions" +#~ msgstr "¿¬°áÁ¡:" + +#, fuzzy +#~ msgid "Messages" +#~ msgstr "»èÁ¦" + +#, fuzzy +#~ msgid "Process name:" +#~ msgstr "Ŭ·¡½ºÀ̸§:" + +#, fuzzy +#~ msgid "Process reference name:" +#~ msgstr "¼³Á¤" + +#, fuzzy +#~ msgid "Module name:" +#~ msgstr "ÆÄÀÏ À̸§:" + +#, fuzzy +#~ msgid "Parameter" +#~ msgstr "ÀÎÀÚ:" + +#~ msgid "Grid:" +#~ msgstr "°ÝÀÚ:" + +#~ msgid "" +#~ "No such file found\n" +#~ "%s\n" +#~ msgstr "" +#~ "ÆÄÀÏÀÌ ¾ø½À´Ï´Ù\n" +#~ "%s\n" + +#~ msgid "Import from XFig" +#~ msgstr "XFig¿¡¼­ º¯È¯" + +#, fuzzy +#~ msgid "File/New diagram" +#~ msgstr "»õ µµÇ¥(_N)" + +#, fuzzy +#~ msgid "File/Save As..." +#~ msgstr "/ÆÄÀÏ/ÂÊ ¼³Á¤(_u)..." + +#, fuzzy +#~ msgid "File/Close" +#~ msgstr "/ÆÄÀÏ/´Ý±â(_C)" + +#, fuzzy +#~ msgid "View/New View" +#~ msgstr "/º¸±â/»õ º¸±â(_V)" + +#~ msgid "/Edit/Copy" +#~ msgstr "/ÆíÁý/º¹»ç" + +#~ msgid "/Edit/Paste" +#~ msgstr "/ÆíÁý/ºÙÀ̱â" + +#~ msgid "/Edit/Delete" +#~ msgstr "/ÆíÁý/Áö¿ì±â" + +#~ msgid "/Edit/Paste Text" +#~ msgstr "/ÆíÁý/¹®ÀÚ¿­ ºÙÀ̱â" + +#~ msgid "/Objects/Group" +#~ msgstr "/°´Ã¼/¹­À½" + +#~ msgid "/Objects/Ungroup" +#~ msgstr "/°´Ã¼/Ç°" + +#~ msgid "/View/Show Rulers" +#~ msgstr "/º¸±â/´«±ÝÀÚ º¸±â" + +#~ msgid "/View/Visible Grid" +#~ msgstr "/º¸±â/°ÝÀÚ º¸±â" + +#~ msgid "/View/Snap To Grid" +#~ msgstr "/º¸±â/²÷±ä °ÝÀÚ" + +#~ msgid "/View/Show Connection Points" +#~ msgstr "/º¸±â/¿¬°áÁ¡ º¸ÀÓ" + +#~ msgid "/View/AntiAliased" +#~ msgstr "/º¸±â/¾ÈƼ¾ó¶óÀ̽Ì" + +#, fuzzy +#~ msgid "/Edit/tearoff" +#~ msgstr "/ÆíÁý/ºÙÀ̱â" + +#, fuzzy +#~ msgid "/View/tearoff" +#~ msgstr "/º¸±â/±¸ºÐÀÚ1" + +#, fuzzy +#~ msgid "/View/Zoom/tearoff" +#~ msgstr "/º¸±â/È®´ë(_Z)" + +#~ msgid "/View/sep1" +#~ msgstr "/º¸±â/±¸ºÐÀÚ1" + +#, fuzzy +#~ msgid "/Select/tearoff" +#~ msgstr "/¼±ÅÃ/¹Ý´ë" + +#~ msgid "/Select/sep1" +#~ msgstr "/¼±ÅÃ/±¸ºÐÀÚ1" + +#, fuzzy +#~ msgid "/Objects/tearoff1" +#~ msgstr "/°´Ã¼/±¸ºÐÀÚ1" + +#~ msgid "/Objects/sep1" +#~ msgstr "/°´Ã¼/±¸ºÐÀÚ1" + +#, fuzzy +#~ msgid "/Objects/Align Horizontal/tearoff" +#~ msgstr "/°´Ã¼/¼öÆò Á¤·Ä/Áß°£" + +#, fuzzy +#~ msgid "/Objects/Align Vertical/tearoff" +#~ msgstr "/°´Ã¼/¼öÁ÷ Á¤·Ä/Áß°£" + +#, fuzzy +#~ msgid "/Tools/tearoff" +#~ msgstr "/µµ±¸/»óÀÚ" + +#, fuzzy +#~ msgid "/Dialogs/tearoff" +#~ msgstr "/´ëÈ­»óÀÚ/°èÃþ(_L)" + +#~ msgid "Asynchronous" +#~ msgstr "ºñµ¿±â" + +#~ msgid "Grid y size:" +#~ msgstr "°ÝÀÚ ¼¼·Î Å©±â:" + +#~ msgid "Edit Grid..." +#~ msgstr "°ÝÀÚ ÆíÁý..." + +#~ msgid "/View/Edit Grid..." +#~ msgstr "/º¸±â/°ÝÀÚ ÆíÁý..." === added file 'src/github.com/chai2010/gettext-go/testdata/mm-viet.comp.mo' Binary files src/github.com/chai2010/gettext-go/testdata/mm-viet.comp.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/testdata/mm-viet.comp.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/testdata/mm-viet.comp.po' --- src/github.com/chai2010/gettext-go/testdata/mm-viet.comp.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/mm-viet.comp.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,49553 @@ +# Vietnamese translation for NAME. +# Copyright © 2006 Gnome i18n Project for Vietnamese. +# Clytie Siddall , 2006. +# +msgid "" +msgstr "" +"Project-Id-Version: NAME_VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2006-02-03 02:05+0100\n" +"PO-Revision-Date: #LOCALTIME %F %R%z#\n" +"Last-Translator: Clytie Siddall \n" +"Language-Team: Vietnamese \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0\n" +"X-Generator: LocFactoryEditor 1.6b36\n" + +# Name: don't translate / Tên: đừng dịch +#: ../balsa.desktop.in.h:1 +#: ../capplets/default-applications/gnome-default-applications-properties-structs.c:42 +msgid "Balsa" +msgstr "Balsa" + +#: ../balsa.desktop.in.h:2 +msgid "E-Mail utility" +msgstr "Tiện ích thÆ° Ä‘iện tá»­" + +#: ../libbalsa/address-book-ldif.c:329 +msgid "No-Id" +msgstr "Không có ID" + +#: ../libbalsa/address-book-vcard.c:312 +msgid "No-Name" +msgstr "Không có tên" + +#: ../libbalsa/address-book-ldap.c:275 +msgid "TLS requested but not compiled in" +msgstr "TLS được yêu cầu còn không biên dịch sẵn." + +#: ../libgnomevfs/gnome-vfs-result.c:42 +msgid "No error" +msgstr "Không có lá»—i" + +#: ../libbalsa/address-book.c:302 +msgid "Cannot read from address book" +msgstr "Không thể Ä‘á»c từ sổ địa chỉ" + +#: ../libbalsa/address-book.c:303 +msgid "Cannot write to address book" +msgstr "Không thể ghi vào sổ địa chỉ" + +#: ../libbalsa/address-book.c:304 ../libbalsa/imap-server.c:511 +msgid "Cannot connect to the server" +msgstr "Không thể kết nối đến máy phục vụ" + +#: ../libbalsa/address-book.c:305 +msgid "Cannot search in the address book" +msgstr "Không thể tìm kiếm trong sổ địa chỉ" + +#: ../libbalsa/address-book.c:307 +msgid "Cannot add duplicate entry" +msgstr "Không thể thêm mục nhập trùng" + +#: ../libbalsa/address-book.c:309 +msgid "Cannot find address in address book" +msgstr "Không tìm thấy địa chỉ trong sổ địa chỉ" + +#: ../testing/gda-test-sql.c:171 ../testing/gda-test-sql.c:392 +msgid "Unknown error" +msgstr "Gặp lá»—i lạ" + +#: ../libbalsa/address.c:436 +msgid "_Displayed Name:" +msgstr "Tên _hiển thị :" + +#: ../libbalsa/address.c:437 ../ui/addcontact.glade.h:6 +#: ../ui/user_info.glade.h:77 +msgid "_First Name:" +msgstr "T_ên:" + +#: ../libbalsa/address.c:438 ../ui/addcontact.glade.h:8 +#: ../ui/user_info.glade.h:81 +msgid "_Last Name:" +msgstr "_Há» :" + +#: ../libbalsa/address.c:439 ../libgames-support/games-network-dialog.c:266 +#: ../ui/addcontact.glade.h:9 ../ui/user_info.glade.h:82 +msgid "_Nickname:" +msgstr "Tên _hiệu :" + +#: ../libbalsa/address.c:440 +msgid "O_rganization:" +msgstr "_Tổ chức:" + +#: ../libbalsa/address.c:441 ../pan/identities/identity-edit-ui.c:142 +msgid "_Email Address:" +msgstr "_Äịa chỉ thÆ° Ä‘iện tá»­ :" + +#: ../libbalsa/body.c:115 ../libbalsa/mailbox_local.c:1644 +#: ../libbalsa/message.c:855 +msgid "(No subject)" +msgstr "(Không có chủ Ä‘á»)" + +#: ../libbalsa/files.c:231 ../src/nautilus-information-panel.c:898 +#, c-format +msgid "Open with %s" +msgstr "Mở bằng %s" + +#: ../libbalsa/filter-error.c:46 +msgid "Syntax error in the filter configuration file" +msgstr "Gặp lá»—i cú pháp trong tập tin cấu hình bá»™ lá»c." + +#: ../libbalsa/filter-error.c:47 +msgid "Unable to allocate memory" +msgstr "Không thể cấp phát bá»™ nhá»›." + +#: ../libbalsa/filter-error.c:48 +msgid "Error in regular expression syntax" +msgstr "Gặp lá»—i trong cú pháp biểu thức chính quy." + +#: ../libbalsa/filter-error.c:49 +msgid "Attempt to apply an invalid filter" +msgstr "Việc cố áp dụng má»™t bá»™ lá»c không hợp lệ" + +#: ../libbalsa/filter-file.c:141 +#, c-format +msgid "Invalid filters %s for mailbox %s" +msgstr "Bá»™ lá»c %s không hợp lệ cho há»™p thÆ° %s" + +#: ../libbalsa/filter.c:132 ../libbalsa/mailbox_local.c:769 +msgid "Unable to load message body to match filter" +msgstr "Không thể tải thân thÆ° để khá»›p bá»™ lá»c." + +#: ../libbalsa/filter.c:237 +#, c-format +msgid "Invalid filter: %s" +msgstr "Bá»™ lá»c không hợp lệ: %s" + +#: ../libbalsa/filter.c:276 ../libbalsa/filter.c:299 +#, c-format +msgid "Bad mailbox name for filter: %s" +msgstr "Tên há»™p thÆ° sai cho bá»™ lá»c: %s" + +#: ../libbalsa/filter.c:280 +#, c-format +msgid "Error when copying messages: %s" +msgstr "Gặp lá»—i khi sao chép thÆ° : %s" + +#: ../libbalsa/filter.c:290 +#, c-format +msgid "Error when trashing messages: %s" +msgstr "Gặp lá»—i khi xoá bá» thÆ° : %s" + +#: ../libbalsa/filter.c:303 +#, c-format +msgid "Error when moving messages: %s" +msgstr "Gặp lá»—i khi di chuyển thÆ° : %s" + +#: ../libbalsa/gmime-application-pkcs7.c:237 +#: ../libbalsa/gmime-application-pkcs7.c:380 +msgid "Failed to decrypt MIME part: parse error" +msgstr "Việc giải mật mã phần MIME bị lá»—i: lá»—i phân tách" + +#: ../libbalsa/gmime-gpgme-context.c:279 ../libbalsa/rfc3156.c:1596 +#, c-format +msgid "" +"The passphrase for this key was bad, please try again!\n" +"\n" +"Key: %s" +msgstr "" +"Bạn đã gõ sai cụm từ mật khẩu cho khoá này. Hãy thá»­ lại.\n" +"\n" +"Khoá: %s" + +#: ../libbalsa/gmime-gpgme-context.c:284 ../libbalsa/rfc3156.c:1601 +#, c-format +msgid "" +"Please enter the passphrase for the secret key!\n" +"\n" +"Key: %s" +msgstr "" +"• Hãy gõ cụm từ mật khẩu cho khoá bí mật. •\n" +"\n" +"Khoá: %s" + +#: ../libbalsa/gmime-gpgme-context.c:404 ../libbalsa/gmime-gpgme-context.c:486 +#: ../libbalsa/gmime-gpgme-context.c:497 ../libbalsa/gmime-gpgme-context.c:612 +#: ../libbalsa/gmime-gpgme-context.c:703 +#, c-format +msgid "%s: could not get data from stream: %s" +msgstr "%s: không thể lấy dữ liệu từ luồng: %s" + +#: ../libbalsa/gmime-gpgme-context.c:412 ../libbalsa/gmime-gpgme-context.c:621 +#: ../libbalsa/gmime-gpgme-context.c:711 +#, c-format +msgid "%s: could not create new data object: %s" +msgstr "%s: không thể tạo đối tượng dữ liệu má»›i: %s" + +#: ../libbalsa/gmime-gpgme-context.c:422 +#, c-format +msgid "%s: signing failed: %s" +msgstr "%s: việc ký tên bị lá»—i: %s" + +#: ../libbalsa/gmime-gpgme-context.c:511 +#, c-format +msgid "%s: signature verification failed: %s" +msgstr "%s: việc thẩm tra chữ ký bị lá»—i: %s" + +#: ../libbalsa/gmime-gpgme-context.c:576 +msgid "combined signing and encryption is only defined for RFC 2440" +msgstr "" +"khả năng ký tên và mật mã được tổ hợp vá»›i nhau chỉ được định nghÄ©a cho RFC " +"2440" + +#: ../libbalsa/gmime-gpgme-context.c:647 +#, c-format +msgid "%s: signing and encryption failed: %s" +msgstr "%s: việc ký tên và mật mã bị lá»—i: %s" + +#: ../libbalsa/gmime-gpgme-context.c:651 +#, c-format +msgid "%s: encryption failed: %s" +msgstr "%s: việc mật mã bị lá»—i: %s" + +#: ../libbalsa/gmime-gpgme-context.c:723 +#, c-format +msgid "%s: decryption failed: %s" +msgstr "%s: việc giải mật mã bị lá»—i: %s" + +#: ../libbalsa/gmime-gpgme-context.c:758 +#, c-format +msgid "%s: could not create context: %s" +msgstr "%s: không thể tạo ngữ cảnh: %s" + +#: ../libbalsa/gmime-gpgme-context.c:814 +msgid "the crypto engine for protocol OpenPGP is not available" +msgstr "cÆ¡ chế mật mã cho giao thức OpenPGP chÆ°a sẵn sàng" + +#: ../libbalsa/gmime-gpgme-context.c:824 +msgid "the crypto engine for protocol CMS is not available" +msgstr "VcÆ¡ chế mật mã cho giao thức CMS chÆ°a sẵn sàng" + +#: ../libbalsa/gmime-gpgme-context.c:831 +#, c-format +msgid "invalid crypto engine %d" +msgstr "Äá»™ng cÆ¡ mật mã không hợp lệ %d" + +#: ../libbalsa/gmime-gpgme-context.c:862 ../libbalsa/gmime-gpgme-context.c:884 +#, c-format +msgid "%s: could not list keys for %s: %s" +msgstr "%s: không thể liệt kê khoá cho %s: %s" + +#: ../libbalsa/gmime-gpgme-context.c:896 +#, c-format +msgid "%s: could not find a key for %s" +msgstr "%s: không thể tìm khoá cho %s" + +#: ../libbalsa/gmime-gpgme-context.c:908 +#, c-format +msgid "%s: multiple keys for %s" +msgstr "%s: có nhiá»u khoá cho %s" + +#: ../libbalsa/gmime-gpgme-context.c:953 +#, c-format +msgid "%s: insufficient validity for uid %s" +msgstr "%s: không có đủ Ä‘á»™ hợp lệ cho mã nhận diện ngÆ°á»i dùng (UID) %s" + +#: ../src/sendmsg-window.c:3501 +msgid "Re:" +msgstr "Vá»:" + +#: ../libbalsa/identity.c:103 ../src/sendmsg-window.c:3531 +#: ../src/sendmsg-window.c:3532 +msgid "Fwd:" +msgstr "Tiếp:" + +#: ../libbalsa/identity.c:160 +msgid "New Identity" +msgstr "Thá»±c thể má»›i" + +#: ../libbalsa/identity.c:413 ../objects/FS/function.c:1124 +#: datebook_gui.c:1582 app/envelope-box.c:879 +msgid "Current" +msgstr "Hiện có" + +#: ../src/session.c:2276 ../users-conf.in:496 ../widgets/gtk+.xml.in.h:48 +msgid "Default" +msgstr "Mặc định" + +#: ../libbalsa/identity.c:862 +msgid "Signature _Path" +msgstr "_ÄÆ°á»ng dẫn chữ ký" + +#. create the "Signature" tab +#: ../libbalsa/identity.c:866 ../libbalsa/identity.c:920 +msgid "Signature" +msgstr "Chữ ký" + +#. Translators: please do not translate Face. +#: ../libbalsa/identity.c:868 +msgid "_Face Path" +msgstr "ÄÆ°á»ng dẫn _mặt" + +#. Translators: please do not translate Face. +#: ../libbalsa/identity.c:874 +msgid "_X-Face Path" +msgstr "ÄÆ°á»ng dẫn mặt _X" + +#: ../glade/glade_project_options.c:174 ../ui/user_info.glade.h:26 +#: ../src/glade-editor.c:815 ../src/dialog-win.cc:86 +#: ../src/form-editor/list-prop.cc:83 ../src/form-editor/table-prop.cc:153 +msgid "General" +msgstr "Chung" + +#: ../libbalsa/identity.c:898 +msgid "_Identity Name:" +msgstr "T_ên thá»±c thể" + +#: ../libbalsa/identity.c:900 ../mail/mail-config.glade.h:160 +#: ../pan/identities/identity-edit-ui.c:139 +msgid "_Full Name:" +msgstr "_Há» tên:" + +#: ../libbalsa/identity.c:902 +msgid "_Mailing Address:" +msgstr "Äị_a chỉ thÆ° tín:" + +#: ../libbalsa/identity.c:904 +msgid "Reply _To:" +msgstr "T_rả lá»i cho :" + +#: ../libbalsa/identity.c:906 ../libgnomeui/gnome-password-dialog.c:254 +msgid "_Domain:" +msgstr "_Miá»n:" + +#: ../libbalsa/identity.c:908 ../composer/e-msg-composer-hdrs.c:654 +#: ../composer/e-msg-composer-hdrs.c:656 +msgid "_Bcc:" +msgstr "_Bcc:" + +#: ../libbalsa/identity.c:910 +msgid "Reply _String:" +msgstr "_Chuá»—i trả lá»i:" + +#: ../libbalsa/identity.c:912 +msgid "F_orward String:" +msgstr "Chuá»—i chuyển _tiếp:" + +#: ../libbalsa/identity.c:915 +msgid "SMT_P Server:" +msgstr "Máy phục vụ SMT_P:" + +#: ../libbalsa/identity.c:925 +msgid "_Execute Signature" +msgstr "T_há»±c hiện chữ ký" + +#: ../libbalsa/identity.c:928 +msgid "Incl_ude Signature" +msgstr "_Kèm theo chữ ký" + +#: ../libbalsa/identity.c:931 +msgid "Include Signature When For_warding" +msgstr "Kèm theo chữ ký khi gởi chu_yển tiếp" + +#: ../libbalsa/identity.c:934 +msgid "Include Signature When Rep_lying" +msgstr "Kèm theo chữ ký khi t_rả lá»i" + +#: ../libbalsa/identity.c:937 +msgid "_Add Signature Separator" +msgstr "Th_êm bá»™ phân cách chữ ký" + +#: ../libbalsa/identity.c:940 +msgid "Prepend Si_gnature" +msgstr "Kèm tr_Æ°á»›c chữ ký" + +#: ../gnomecard/card-editor.glade.h:37 +msgid "Security" +msgstr "Bảo mật" + +#: ../libbalsa/identity.c:955 +msgid "sign messages by default" +msgstr "ký tên thÆ° theo mặc định" + +#: ../libbalsa/identity.c:958 +msgid "encrypt messages by default" +msgstr "mật mã thÆ° theo mặc định" + +#: ../libbalsa/identity.c:961 +msgid "default protocol" +msgstr "giao thức mặc định" + +#: ../libbalsa/identity.c:964 +msgid "always trust GnuPG keys when encrypting" +msgstr "luôn tin khoá GnuGP khi mật mã hóa" + +#: ../libbalsa/identity.c:1069 +#, c-format +msgid "Error reading file %s: %s" +msgstr "Gặp lá»—i khi Ä‘á»c tập tin %s: %s" + +#. Translators: please do not translate Face. +#: ../libbalsa/identity.c:1080 +#, c-format +msgid "Face header file %s is too long (%d bytes)." +msgstr "Tập tin phần đầu mặt %s quá dài (%d byte)." + +#. Translators: please do not translate Face. +#: ../libbalsa/identity.c:1090 +#, c-format +msgid "Face header file %s contains binary data." +msgstr "Tập tin phần đầu mặt %s chứa dữ liệu nhị phân." + +#. Translators: please do not translate Face. +#: ../libbalsa/identity.c:1110 ../src/balsa-message.c:1245 +#, c-format +msgid "Error loading Face: %s" +msgstr "Gặp lá»—i khi tải Face: %s" + +#: ../libbalsa/identity.c:1299 +msgid "Error: The identity does not have a name" +msgstr "Lá»—i: thá»±c thể không có tên" + +#: ../libbalsa/identity.c:1309 +msgid "Error: An identity with that name already exists" +msgstr "Lá»—i: má»™t thá»±c thể cùng tên đã có." + +#: ../libbalsa/identity.c:1501 +msgid "Do you really want to delete the selected identity?" +msgstr "Bạn thật sá»± muốn xoá bá» thá»±c thể đã chá»n không?" + +#: ../libbalsa/identity.c:1536 +#, c-format +msgid "Error displaying help for identities: %s\n" +msgstr "Gặp lá»—i khi hiển thị trợ giúp cho thá»±c thể: %s\n" + +#: ../libbalsa/identity.c:1582 +msgid "Manage Identities" +msgstr "Quản lý thá»±c thể" + +#: ../libbalsa/identity.c:1939 +msgid "GnuPG MIME mode" +msgstr "Chế Ä‘á»™ MIME GnuPG" + +#: ../libbalsa/identity.c:1941 +msgid "GnuPG OpenPGP mode" +msgstr "Chế Ä‘á»™ OpenPGP GnuPG" + +#: ../libbalsa/identity.c:1944 +msgid "GpgSM S/MIME mode" +msgstr "Chế Ä‘á»™ S/MIME GnuPG" + +#. IMAP host name + message +#: ../libbalsa/imap-server.c:256 +#, c-format +msgid "" +"IMAP server %s alert:\n" +"%s" +msgstr "" +"Cảnh giác máy phục vụ IMAP %s:\n" +"%s" + +#. IMAP host name + message +#: ../libbalsa/imap-server.c:260 +#, c-format +msgid "IMAP server %s error: %s" +msgstr "Lá»—i máy phục vụ IMAP %s: %s" + +#: ../libgnomecups/gnome-cups-printer.c:1043 misc.c:326 +#: app/drivers/sun-input.c:229 app/drivers/sun-input.c:311 +#: app/drivers/sun-output.c:389 app/drivers/sun-output.c:460 +#, c-format +msgid "%s: %s" +msgstr "%s: %s" + +#: ../libbalsa/imap-server.c:516 ../libbalsa/imap-server.c:521 +#, c-format +msgid "Cannot connect to the server: %s" +msgstr "Không thể kết nối đến máy phục vụ : %s" + +#: ../libbalsa/imap-server.c:669 +#, c-format +msgid "Exceeded the number of connections per server %s" +msgstr "Vượt quá số kết nối cho má»—i máy phục vụ %s" + +#: ../libbalsa/libbalsa-conf.c:203 +msgid "Your Balsa configuration is now stored in \"~/.balsa/config\"." +msgstr "Cấu hình Balsa của bạn bây giỠđược cất giữ vào <~/.balsa/config>." + +#: ../libbalsa/libbalsa.c:285 +#, c-format +msgid "LDAP Directory for %s" +msgstr "ThÆ° mục LDAP cho %s" + +#: ../libbalsa/libbalsa.c:391 +msgid "Invalid date" +msgstr "Ngày không hợp lệ" + +#: ../gmedia_prop/setup_alias_stuff.c:128 libexif/exif-entry.c:426 +#: libexif/exif-entry.c:441 libexif/exif-entry.c:522 libexif/exif-tag.c:672 +#: libexif/olympus/mnote-olympus-entry.c:452 +#: libexif/olympus/mnote-olympus-entry.c:508 ../src/orca/rolenames.py:483 +msgid "Unknown" +msgstr "Không rõ" + +#: ../libbalsa/libbalsa.c:546 +#, c-format +msgid "" +"Authenticity of this certificate could not be verified.\n" +"Reason: %s\n" +"This certificate belongs to:\n" +msgstr "" +"Không thể thẩm tra sá»± xác thá»±c của chứng nhận này.\n" +"Lý do : %s\n" +"Chứng nhận này thuá»™c vá»:\n" + +#: ../libbalsa/libbalsa.c:558 +msgid "" +"\n" +"This certificate was issued by:\n" +msgstr "" +"\n" +"Chứng nhận này được phát hành bởi:\n" + +#: ../libbalsa/libbalsa.c:569 +#, c-format +msgid "" +"This certificate is valid\n" +"from %s\n" +"to %s\n" +"Fingerprint: %s" +msgstr "" +"Chứng nhận này hợp lệ\n" +"từ %s\n" +"đến %s\n" +"Dấu Ä‘iểm chỉ: %s" + +#: ../libbalsa/libbalsa.c:578 +msgid "SSL/TLS certificate" +msgstr "Chứng nhận SSL/TLS" + +#: ../libbalsa/libbalsa.c:580 +msgid "_Accept Once" +msgstr "_Chấp nhận má»™t lần" + +#: ../libbalsa/libbalsa.c:581 +msgid "Accept&_Save" +msgstr "Chấp nhận và _LÆ°u" + +#: ../libbalsa/libbalsa.c:582 mozilla/CookiePromptService.cpp:151 +msgid "_Reject" +msgstr "_Từ chối" + +#: ../libbalsa/mailbox.c:400 +#, c-format +msgid "Cannot load mailbox %s" +msgstr "Không thể tải há»™p thÆ° %s" + +#: ../libbalsa/mailbox.c:407 +#, c-format +msgid "No such mailbox type: %s" +msgstr "Không có kiểu há»™p thÆ° nhÆ° vậy: %s" + +#: ../libbalsa/mailbox.c:424 +#, c-format +msgid "Bad local mailbox path \"%s\"" +msgstr "ÄÆ°á»ng dẫn há»™p thÆ° cục bá»™ sai « %s »" + +#: ../libbalsa/mailbox.c:429 +#, c-format +msgid "Could not create a mailbox of type %s" +msgstr "Không thể tạo há»™p thÆ° kiểu %s" + +#: ../libbalsa/mailbox.c:736 +#, c-format +msgid "Applying filter rules to %s" +msgstr "Äang áp dụng các quy tắc lá»c cho %s..." + +#: ../libbalsa/mailbox.c:835 +#, c-format +msgid "Copying from %s to %s" +msgstr "Äang sao chép %s sang %s..." + +#: ../libbalsa/mailbox.c:1779 +msgid "Removing messages from source mailbox failed" +msgstr "Việc gỡ bá» thÆ° ra há»™p thÆ° nguồn bị lá»—i." + +#: ../libbalsa/mailbox.c:3559 +#, c-format +msgid "Searching %s for partial messages" +msgstr "Äang tìm kiếm trong %s có thÆ° riêng phần..." + +#: ../libbalsa/mailbox.c:3649 +msgid "Reconstructing message" +msgstr "Äang cấu tạo lại thÆ°..." + +#. ImapIssue macro handles reconnecting. We might issue a +#. LIBBALSA_INFORMATION_MESSAGE here but it would be overwritten by +#. login information... +#: ../libbalsa/mailbox_imap.c:539 +msgid "IMAP connection has been severed. Reconnecting..." +msgstr "Kết nối IMAP đã bị ngắt. Äang tái kết nối..." + +#: ../libbalsa/mailbox_imap.c:542 +#, c-format +msgid "IMAP server has shut the connection: %s Reconnecting..." +msgstr "Máy phục vụ IMAP đã đóng kết nối: %s. Äang tái kết nối..." + +#: ../libbalsa/mailbox_imap.c:1014 ../libbalsa/mailbox_maildir.c:527 +#: ../libbalsa/mailbox_mbox.c:716 ../libbalsa/mailbox_mh.c:553 +#, c-format +msgid "%s: Opening %s Refcount: %d\n" +msgstr "%s: Äang mở %s Số đếm tham chiếu : %d\n" + +#: ../libbalsa/mailbox_imap.c:1088 ../libbalsa/mailbox_imap.c:2228 +#, c-format +msgid "Downloading %ld kB" +msgstr "Äang tải vá» %ld kB..." + +#: ../libbalsa/mailbox_imap.c:1468 +#, c-format +msgid "" +"IMAP SEARCH command failed for mailbox %s\n" +"falling back to default searching method" +msgstr "" +"Lệnh tìm kiếm IMAP bị lá»—i cho há»™p thÆ° %s\n" +"nhÆ° thế thì Ä‘ang dùng phÆ°Æ¡ng pháp tìm kiếm mặc định." + +#: ../libbalsa/mailbox_imap.c:1528 +#, c-format +msgid "No path found for mailbox \"%s\", using \"%s\"" +msgstr "Không tìm thấy Ä‘Æ°á»ng dẫn cho há»™p thÆ° « %s », dùng « %s »." + +#: ../libbalsa/mailbox_imap.c:1617 ../libbalsa/mailbox_imap.c:1658 +msgid "Cannot get IMAP handle" +msgstr "Không thể lấy bá»™ quản lý IMAP" + +#: ../libbalsa/mailbox_imap.c:2261 +msgid "Cannot create temporary file" +msgstr "Không thể tạo tập tin tạm thá»i." + +#: ../libbalsa/mailbox_imap.c:2280 +#, c-format +msgid "Cannot write to temporary file %s" +msgstr "Không thể ghi vào tập tin tạm thá»i %s." + +#: ../libbalsa/mailbox_imap.c:2497 +#, c-format +msgid "Uploading %ld kB" +msgstr "Äang tải lên %ld kB" + +#: ../libbalsa/mailbox_imap.c:2681 +msgid "Server-side threading not supported." +msgstr "Khả năng sắp xếp theo nhánh bên máy phục vụ không được há»— trợ." + +#: ../libbalsa/mailbox_local.c:493 ../libbalsa/mailbox_mbox.c:412 +#, c-format +msgid "Failed to create temporary file \"%s\": %s" +msgstr "Việc tạo tập tin tạm thá»i « %s » bị lá»—i: %s." + +#: ../libbalsa/mailbox_local.c:512 +#, c-format +msgid "Failed to save cache file \"%s\": %s." +msgstr "Việc lÆ°u tập tin lÆ°u tạm « %s » bị lá»—i: %s." + +#: ../libbalsa/mailbox_local.c:522 ../libbalsa/mailbox_mbox.c:423 +#, c-format +msgid "Failed to save cache file \"%s\": %s. New version saved as \"%s\"" +msgstr "" +"Việc lÆ°u tập tin lÆ°u tạm « %s » bị lá»—i: %s. Phiên bản má»›i đã được lÆ°u dạng « " +"%s »." + +#: ../libbalsa/mailbox_local.c:553 +#, c-format +msgid "Cache file for mailbox %s will be created" +msgstr "Sẽ tạo tập tin lÆ°u tạm cho há»™p thÆ° %s." + +#: ../libbalsa/mailbox_local.c:562 +#, c-format +msgid "Failed to read cache file %s: %s" +msgstr "Việc Ä‘á»c tập tin lÆ°u tạm %s bị lá»—i: %s." + +#: ../libbalsa/mailbox_local.c:580 ../libbalsa/mailbox_local.c:595 +#: ../libbalsa/mailbox_local.c:618 +#, c-format +msgid "Cache file for mailbox %s will be repaired" +msgstr "Sẽ sá»­a chữa tập tin lÆ°u tạm cho há»™p thÆ° %s." + +#: ../libbalsa/mailbox_local.c:1054 +#, c-format +msgid "Filtering %s" +msgstr "Äang lá»c %s..." + +#: ../libbalsa/mailbox_local.c:1142 apt-pkg/deb/dpkgpm.cc:358 +#, c-format +msgid "Preparing %s" +msgstr "Äang chuẩn bị %s..." + +#: ../libbalsa/mailbox_local.c:1924 +#, c-format +msgid "Open of %s failed. Errno = %d, " +msgstr "Việc mở %s bị lá»—i. Lá»—i số = %d, " + +#: ../libbalsa/mailbox_local.c:1946 +#, c-format +msgid "Failed to sync mailbox \"%s\"" +msgstr "Việc đồng bá»™ hóa há»™p thÆ° « %s » bị lá»—i." + +#: ../libbalsa/mailbox_maildir.c:216 +#, c-format +msgid "Mailbox %s does not appear to be a Maildir mailbox." +msgstr "Hình nhÆ° há»™p thÆ° %s không phải là há»™p thÆ° kiểu Maildir." + +#: ../libbalsa/mailbox_maildir.c:225 +#, c-format +msgid "Could not create a MailDir directory at %s (%s)" +msgstr "Không thể tạo má»™t thÆ° mục MailDir tại %s (%s)." + +#: ../libbalsa/mailbox_maildir.c:233 ../libbalsa/mailbox_maildir.c:242 +#: ../libbalsa/mailbox_maildir.c:253 +#, c-format +msgid "Could not create a MailDir at %s (%s)" +msgstr "Không thể tạo má»™t thÆ° mục MailDir tại %s (%s)." + +#: ../libbalsa/mailbox_maildir.c:360 ../libbalsa/mailbox_mh.c:311 +#, c-format +msgid "" +"Could not remove contents of %s:\n" +"%s" +msgstr "" +"Không thể gỡ bá» ná»™i dung của %s:\n" +"%s" + +#: ../libbalsa/mailbox_maildir.c:366 ../libbalsa/mailbox_mbox.c:309 +#: ../libbalsa/mailbox_mh.c:317 +#, c-format +msgid "" +"Could not remove %s:\n" +"%s" +msgstr "" +"Không thể gỡ bá» %s:\n" +"%s" + +#: ../libbalsa/mailbox_maildir.c:506 ../libbalsa/mailbox_mbox.c:663 +#: ../libbalsa/mailbox_mh.c:534 +msgid "Mailbox does not exist." +msgstr "Há»™p thÆ° không tồn tại." + +#: ../libbalsa/mailbox_maildir.c:975 ../libbalsa/mailbox_mbox.c:1976 +#: ../libbalsa/mailbox_mh.c:1160 +msgid "Data copy error" +msgstr "Lá»—i sao chép dữ liệu" + +#: ../libbalsa/mailbox_mbox.c:211 +#, c-format +msgid "Mailbox %s does not appear to be an Mbox mailbox." +msgstr "Há»™p thÆ° %s có vẻ không phải là má»™t há»™p thÆ° kiểu Mbox." + +#: ../libbalsa/mailbox_mbox.c:397 +#, c-format +msgid "Could not write file %s: %s" +msgstr "Không thể ghi tập tin %s: %s" + +#: ../libbalsa/mailbox_mbox.c:430 +#, c-format +msgid "Could not unlink file %s: %s" +msgstr "Không thể bá» liên kết tập tin %s: %s" + +#: ../libbalsa/mailbox_mbox.c:671 +msgid "Cannot open mailbox." +msgstr "Không thể mở há»™p thÆ°." + +#: ../libbalsa/mailbox_mbox.c:683 +msgid "Mailbox is not in mbox format." +msgstr "Há»™p thÆ° không phải dạng mbox." + +#: ../libbalsa/mailbox_mbox.c:691 +msgid "Cannot lock mailbox." +msgstr "Không thể khoá há»™p thÆ°." + +#: ../libbalsa/mailbox_mbox.c:1933 +#, c-format +msgid "%s: could not open %s." +msgstr "%s: không thể mở %s." + +#: ../libbalsa/mailbox_mbox.c:1944 +#, c-format +msgid "%s: could not get new mime stream." +msgstr "%s: không thể lấy luông MIME má»›i." + +#: ../libbalsa/mailbox_mbox.c:1952 +#, c-format +msgid "%s: %s is not in mbox format." +msgstr "%s: %s không phải dạng mbox." + +#: ../libbalsa/mailbox_mh.c:192 +#, c-format +msgid "Mailbox %s does not appear to be a Mh mailbox." +msgstr "Há»™p thÆ° %s có vẻ không phải là há»™p thÆ° dạng Mh." + +#: ../libbalsa/mailbox_mh.c:202 +#, c-format +msgid "Could not create MH directory at %s (%s)" +msgstr "Không thể tạo thÆ° mục MH tại %s (%s)." + +#: ../libbalsa/mailbox_mh.c:214 +#, c-format +msgid "Could not create MH structure at %s (%s)" +msgstr "Không thể tạo cấu trúc MH tại %s (%s)." + +#: ../libbalsa/mailbox_mh.c:1141 +msgid "Cannot create message" +msgstr "Không thể tạo thÆ°." + +#: ../libbalsa/mailbox_mh.c:1185 +msgid "Message rename error" +msgstr "Lá»—i thay đổi tên thÆ°." + +#: ../libbalsa/mailbox_pop3.c:189 +#, c-format +msgid "Error appending message %d from %s to %s: %s" +msgstr "Gặp lá»—i khi phụ thêm thÆ° %d từ %s vào %s: %s" + +#: ../libbalsa/mailbox_pop3.c:338 +#, c-format +msgid "Saving POP message to %s failed" +msgstr "Việc lÆ°u thÆ° POP vào %s bị lá»—i." + +#: ../libbalsa/mailbox_pop3.c:345 +#, c-format +msgid "Retrieving Message %d of %d" +msgstr "Äang lấy thÆ° %d trên %d..." + +#: ../libbalsa/mailbox_pop3.c:368 +#, c-format +msgid "Received %ld kB of %ld" +msgstr "Äã nhận %ld kB trên %ld." + +#: ../libbalsa/mailbox_pop3.c:382 +#, c-format +msgid "Saving POP message to %s failed." +msgstr "Việc lÆ°u thÆ° POP vào %s bị lá»—i." + +#: ../libbalsa/mailbox_pop3.c:392 +msgid "Transfering POP message to %s failed." +msgstr "Việc truyá»n thÆ° POP tá»›i %s bị lá»—i." + +#: ../libbalsa/mailbox_pop3.c:468 +#, c-format +msgid "" +"POP3 mailbox %s temp file error:\n" +"%s" +msgstr "" +"Lá»—i tập tin tạm của há»™p thÆ° POP3 %s:\n" +"%s" + +#: ../libbalsa/mailbox_pop3.c:493 +#, c-format +msgid "POP3 mailbox %s error: %s\n" +msgstr "Lá»—i há»™p thÆ° POP3 %s: %s\n" + +#: ../libbalsa/mailbox_pop3.c:530 +#, c-format +msgid "POP3 message %d oversized: %d kB - skipped." +msgstr "ThÆ° POP3 %d quá lá»›n: %d KB - bị nhảy qua." + +#: ../libbalsa/mailbox_pop3.c:542 +#, c-format +msgid "POP3 error: cannot open %s for writing." +msgstr "Lá»—i POP3: không thể mở %s để ghi." + +#: ../libbalsa/mailbox_pop3.c:550 +#, c-format +msgid "POP3 error: cannot close %s." +msgstr "Lá»— POP3i: không thể đóng %s." + +#: ../libbalsa/mailbox_pop3.c:583 +#, c-format +msgid "POP3 error: %s." +msgstr "Lá»—i POP3: %s" + +#: ../libbalsa/message.c:576 ../libbalsa/message.c:616 +#, c-format +msgid "Mailbox (%s) is readonly: cannot change flags." +msgstr "Há»™p thÆ° (%s) chỉ cho phép Ä‘á»c nên không thể thay đổi cá»." + +#: ../libbalsa/misc.c:1248 +msgid "west european" +msgstr "Tây Âu" + +#: ../libbalsa/misc.c:1250 ../libbalsa/misc.c:1284 +msgid "east european" +msgstr "Tây Äông" + +#: ../libbalsa/misc.c:1252 +msgid "south european" +msgstr "Nam Âu" + +#: ../libbalsa/misc.c:1254 +msgid "north european" +msgstr "Bắc Âu" + +#: ../libbalsa/misc.c:1256 ../libbalsa/misc.c:1286 +msgid "cyrillic" +msgstr "Ki-rin" + +#: ../libbalsa/misc.c:1258 ../libbalsa/misc.c:1292 +msgid "arabic" +msgstr "Ả Rập" + +#: ../libbalsa/misc.c:1260 ../libbalsa/misc.c:1288 +msgid "greek" +msgstr "Hy-lạp" + +#: ../console-keymaps-acorn.templates:3 ../console-keymaps-at.templates:3 +msgid "hebrew" +msgstr "Do-thái" + +#: ../libbalsa/misc.c:1264 +msgid "turkish" +msgstr "Thổ-nhÄ©-kỳ" + +#: ../libbalsa/misc.c:1266 +msgid "nordic" +msgstr "Xcăng-Ä‘i-na-vi" + +#: ../libbalsa/misc.c:1268 +msgid "thai" +msgstr "Thái" + +#: ../libbalsa/misc.c:1270 ../libbalsa/misc.c:1294 +msgid "baltic" +msgstr "Ban-tích" + +#: ../libbalsa/misc.c:1272 +msgid "celtic" +msgstr "Xen-tÆ¡" + +#: ../libbalsa/misc.c:1274 +msgid "west europe (euro)" +msgstr "Äông Âu (€)" + +#: ../libbalsa/misc.c:1276 +msgid "russian" +msgstr "Nga" + +#: ../libbalsa/misc.c:1278 +msgid "ukranian" +msgstr "U-cợ-rainh" + +#: ../libbalsa/misc.c:1280 +msgid "japanese" +msgstr "Nhật-bản" + +#: ../libbalsa/misc.c:1282 +msgid "korean" +msgstr "Hàn Quốc" + +#: ../libbalsa/misc.c:1900 +msgid "Timeout exceeded while attempting fcntl lock!" +msgstr "• Quá thá»i khi cố khoá fcntl. •" + +#: ../libbalsa/misc.c:1907 +#, c-format +msgid "Waiting for fcntl lock... %d" +msgstr "Äang đợi khoá fcntl... %d" + +#: ../libbalsa/misc.c:1935 +msgid "Timeout exceeded while attempting flock lock!" +msgstr "• Quá thá»i khi cố khoá flock. •" + +#: ../libbalsa/misc.c:1942 +#, c-format +msgid "Waiting for flock attempt... %d" +msgstr "Äang đợi cố flock... %d" + +#: ../libbalsa/misc.c:2098 +msgid "No image data" +msgstr "Không có dữ liệu ảnh" + +#: ../libbalsa/misc.c:2132 +msgid "Invalid input format" +msgstr "Dạng thức nhập không hợp lệ" + +#: ../libbalsa/misc.c:2136 +msgid "Internal buffer overrun" +msgstr "Tràn qua bá»™ đệm ná»™i bá»™" + +#. Translators: please do not translate Face. +#: ../libbalsa/misc.c:2153 +msgid "Bad X-Face data" +msgstr "Dữ liệu X-Face sai" + +#: ../libbalsa/rfc3156.c:86 +#, c-format +msgid "Gpgme has been compiled without support for protocol %s." +msgstr "Gpgme đã được biên dịch không có há»— trợ giao thức %s." + +#: ../libbalsa/rfc3156.c:91 +#, c-format +msgid "Crypto engine %s is not installed properly." +msgstr "CÆ¡ chế mật mã %s không được cài đặt đúng." + +#: ../libbalsa/rfc3156.c:96 +#, c-format +msgid "" +"Crypto engine %s version %s is installed, but at least version %s is " +"required." +msgstr "" +"CÆ¡ chế mật mã %s phiên bản %s được cài đặt, còn cần thiết ít nhất phiên bản %" +"s." + +#: ../libbalsa/rfc3156.c:103 +#, c-format +msgid "Unknown problem with engine for protocol %s." +msgstr "Gặp lá»—i lạ vá»›i cÆ¡ chế cho giao thức %s." + +#: ../libbalsa/rfc3156.c:108 +#, c-format +msgid "%s: could not retreive crypto engine information: %s." +msgstr "%s: không thể lấy thông tin cÆ¡ chế mật mã: %s." + +#: ../libbalsa/rfc3156.c:112 +#, c-format +msgid "" +"\n" +"Disable support for protocol %s." +msgstr "" +"\n" +"Tắt há»— trợ giao thức %s." + +#: ../libbalsa/rfc3156.c:254 ../libbalsa/rfc3156.c:259 +#: ../libbalsa/rfc3156.c:346 ../libbalsa/rfc3156.c:351 +#: ../libbalsa/rfc3156.c:515 ../libbalsa/rfc3156.c:520 +#: ../libbalsa/rfc3156.c:625 ../libbalsa/rfc3156.c:630 +#: ../libbalsa/rfc3156.c:739 ../libbalsa/rfc3156.c:744 +#: ../libbalsa/rfc3156.c:852 ../libbalsa/rfc3156.c:857 +#: ../libbalsa/rfc3156.c:932 ../libbalsa/rfc3156.c:937 +msgid "creating a gpgme context failed" +msgstr "việc tạo ngữ cảnh GPGME bị lá»—i" + +#: ../libbalsa/rfc3156.c:272 ../libbalsa/rfc3156.c:757 +msgid "Enter passphrase to unlock the secret key for signing" +msgstr "Nhập cụm từ mật khẩu để mở khoá khoá bí mật để ký tên." + +#: ../libbalsa/rfc3156.c:293 ../libbalsa/rfc3156.c:297 +#: ../libbalsa/rfc3156.c:789 +msgid "signing failed" +msgstr "việc ký tên bị lá»—i" + +#: ../libbalsa/rfc3156.c:400 ../libbalsa/rfc3156.c:405 +#: ../libbalsa/rfc3156.c:792 +msgid "encryption failed" +msgstr "việc mật mã bị lá»—i" + +#: ../libbalsa/rfc3156.c:548 ../libbalsa/rfc3156.c:553 +#: ../libbalsa/rfc3156.c:868 ../libbalsa/rfc3156.c:874 +msgid "signature verification failed" +msgstr "việc thẩm tra chữ ký bị lá»—i" + +#: ../libbalsa/rfc3156.c:642 ../libbalsa/rfc3156.c:948 +msgid "Enter passphrase to decrypt message" +msgstr "Hãy gõ lại cụm từ mật khẩu để giải mật mã thÆ°." + +#: ../libbalsa/rfc3156.c:675 ../libbalsa/rfc3156.c:680 +msgid "decryption failed" +msgstr "việc giải mật mã bị lá»—i" + +#: ../libbalsa/rfc3156.c:785 ../libbalsa/rfc3156.c:799 +msgid "signing and encryption failed" +msgstr "việc ký tên và mật mã bị lá»—i" + +#: ../libbalsa/rfc3156.c:802 +#, c-format +msgid "signing failed: %s" +msgstr "việc ký tên bị lá»—i: %s" + +#: ../libbalsa/rfc3156.c:805 +#, c-format +msgid "encryption failed: %s" +msgstr "việc mật mã bị lá»—i: %s" + +#: ../libbalsa/rfc3156.c:957 ../libbalsa/rfc3156.c:964 +msgid "decryption and signature verification failed" +msgstr "việc giải mật mã và thẩm tra chữ ký bị lá»—i" + +#: ../libbalsa/rfc3156.c:995 +msgid "The signature is valid." +msgstr "Chữ ký là hợp lệ." + +#: ../libbalsa/rfc3156.c:997 +msgid "The signature is valid but expired." +msgstr "Chữ ký là hợp lệ còn đã hết hạn." + +#: ../libbalsa/rfc3156.c:1000 +msgid "" +"The signature is valid but the key used to verify the signature has expired." +msgstr "Chữ ký là hợp lệ còn khoá dùng để thẩm tra chữ ký đã hết hạn dùng." + +#: ../libbalsa/rfc3156.c:1003 +msgid "" +"The signature is valid but the key used to verify the signature has been " +"revoked." +msgstr "Chữ ký là hợp lệ còn khoá dùng để thẩm tra chữ ký đã bị hủy bá»." + +#: ../libbalsa/rfc3156.c:1006 +msgid "The signature is invalid." +msgstr "Chữ ký không hợp lệ." + +#: ../libbalsa/rfc3156.c:1009 +msgid "The signature could not be verified due to a missing key." +msgstr "Không thể thẩm tra chữ ký do khoá còn thiếu." + +#: ../libbalsa/rfc3156.c:1011 +msgid "This part is not a real PGP signature." +msgstr "Phần này không phải là chữ ký PGP thật." + +#: ../libbalsa/rfc3156.c:1014 +msgid "The signature could not be verified due to an invalid crypto engine." +msgstr "Không thể thẩm tra chữ ký do cÆ¡ chế mật mã không hợp lệ." + +#: ../libbalsa/rfc3156.c:1017 ../libbalsa/rfc3156.c:1757 +msgid "GnuPG is rebuilding the trust database and is currently unavailable." +msgstr "" +"GnuPG Ä‘ang xây dụng lại cÆ¡ sở dữ liệu tin cây, và hiện thá»i không sẵn sàng." + +#: ../libbalsa/rfc3156.c:1020 +msgid "An error prevented the signature verification." +msgstr "Má»™t lá»—i đã ngăn cản thẩm tra chữ ký." + +#: ../libbalsa/rfc3156.c:1028 +msgid "The user ID is of unknown validity." +msgstr "ID ngÆ°á»i dùng có Ä‘á»™ hợp lệ không rõ." + +#: ../libbalsa/rfc3156.c:1030 +msgid "The validity of the user ID is undefined." +msgstr "ID ngÆ°á»i dùng có Ä‘á»™ hợp lệ chÆ°a định nghÄ©a." + +#: ../libbalsa/rfc3156.c:1032 +msgid "The user ID is never valid." +msgstr "ID ngÆ°á»i dùng không bao giá» hợp lệ." + +#: ../libbalsa/rfc3156.c:1034 +msgid "The user ID is marginally valid." +msgstr "ID ngÆ°á»i dùng là hợp lệ sát giá»›i hạn." + +#: ../libbalsa/rfc3156.c:1036 +msgid "The user ID is fully valid." +msgstr "ID ngÆ°á»i dùng là hợp lệ đầy đủ." + +#: ../libbalsa/rfc3156.c:1038 +msgid "The user ID is ultimately valid." +msgstr "ID ngÆ°á»i dùng là hợp lệ sau cùng." + +#: ../libbalsa/rfc3156.c:1040 ../libbalsa/rfc3156.c:1285 +msgid "bad validity" +msgstr "Ä‘á»™ hợp lệ sai" + +#: ../libbalsa/rfc3156.c:1048 +msgid "PGP signature: " +msgstr "Chữ ký PGP: " + +#: ../libbalsa/rfc3156.c:1050 +msgid "S/MIME signature: " +msgstr "Chữ ký S/MIME: " + +#: ../libbalsa/rfc3156.c:1052 +msgid "(unknown protocol) " +msgstr "(giao thức không rõ) " + +#: ../libbalsa/rfc3156.c:1071 +#, c-format +msgid "" +"\n" +"User ID: %s" +msgstr "" +"\n" +"ID ngÆ°á»i dùng: %s" + +#: ../libbalsa/rfc3156.c:1074 +#, c-format +msgid "" +"\n" +"Signed by: %s" +msgstr "" +"\n" +"Ký tên do : %s" + +#: ../libbalsa/rfc3156.c:1078 +#, c-format +msgid "" +"\n" +"Mail address: %s" +msgstr "" +"\n" +"Äịa chỉ thÆ° : %s" + +#: ../libbalsa/rfc3156.c:1083 +#, c-format +msgid "" +"\n" +"Signed on: %s" +msgstr "" +"\n" +"Ký tên vào : %s" + +#: ../libbalsa/rfc3156.c:1085 +#, c-format +msgid "" +"\n" +"User ID validity: %s" +msgstr "" +"\n" +"Äá»™ hợp lệ ID ngÆ°á»i dùng: %s" + +#: ../libbalsa/rfc3156.c:1089 +#, c-format +msgid "" +"\n" +"Key owner trust: %s" +msgstr "" +"\n" +"Äá»™ tin cây chủ khoá: %s" + +#: ../libbalsa/rfc3156.c:1093 +#, c-format +msgid "" +"\n" +"Key fingerprint: %s" +msgstr "" +"\n" +"Vân tay khoá: %s" + +#: ../libbalsa/rfc3156.c:1098 +#, c-format +msgid "" +"\n" +"Subkey created on: %s" +msgstr "" +"\n" +"Khoá phụ tạo vào : %s" + +#: ../libbalsa/rfc3156.c:1103 +#, c-format +msgid "" +"\n" +"Subkey expires on: %s" +msgstr "" +"\n" +"Khoá phụ hết hạn vào : %s" + +#: ../libbalsa/rfc3156.c:1112 +msgid " revoked" +msgstr " bị hủy bá»" + +#: ../libbalsa/rfc3156.c:1117 +msgid " expired" +msgstr " đã hết hạn" + +#: ../libbalsa/rfc3156.c:1122 +msgid " disabled" +msgstr " bị tắt" + +#: ../libbalsa/rfc3156.c:1127 +msgid " invalid" +msgstr " không hợp lệ" + +#: ../libbalsa/rfc3156.c:1130 +#, c-format +msgid "" +"\n" +"Subkey attributes:%s" +msgstr "" +"\n" +"Thuá»™c tính khoá phụ : %s" + +#: ../libbalsa/rfc3156.c:1132 +#, c-format +msgid "" +"\n" +"Subkey attribute:%s" +msgstr "" +"\n" +"Thuá»™c tính khoá phụ : %s" + +#: ../libbalsa/rfc3156.c:1138 +#, c-format +msgid "" +"\n" +"Issuer name: %s" +msgstr "" +"\n" +"Tên nhà phát hành: %s" + +#: ../libbalsa/rfc3156.c:1142 +#, c-format +msgid "" +"\n" +"Issuer serial number: %s" +msgstr "" +"\n" +"Số sản xuất của nhà phát hành: %s" + +#: ../libbalsa/rfc3156.c:1145 +#, c-format +msgid "" +"\n" +"Chain ID: %s" +msgstr "" +"\n" +"ID dây: %s" + +#: ../libbalsa/rfc3156.c:1192 +#, c-format +msgid "Could not launch %s to get the public key %s." +msgstr "Không thể khởi chạy %s để lấy khoá công %s." + +#: ../libbalsa/rfc3156.c:1242 +#, c-format +msgid "" +"Running gpg failed with return value %d:\n" +"%s" +msgstr "" +"Việc chạy GPG bị lá»—i vá»›i giá trị gởi trả %d:\n" +"%s" + +#: ../libbalsa/rfc3156.c:1249 +#, c-format +msgid "" +"Running gpg successful:\n" +"%s" +msgstr "" +"Việc chạy GPG thành công:\n" +"%s" + +#: ../ui/mlview-validation-report.glade.h:10 ../src/glade-gtk.c:3636 +#: src/friends.c:364 libexif/olympus/mnote-olympus-entry.c:381 +#: ../bin/ical-dump.c:71 ../bin/ical-dump.c:97 ../src/orca/rolenames.py:484 +#: ../freedesktop.org.xml.in.h:385 +msgid "unknown" +msgstr "không rõ" + +#: ../libbalsa/rfc3156.c:1275 avr-dis.c:112 avr-dis.c:122 +#, c-format +msgid "undefined" +msgstr "chÆ°a định nghÄ©a" + +#: ../libbalsa/rfc3156.c:1277 +msgid "never" +msgstr "không bao giá»" + +#: ../libbalsa/rfc3156.c:1279 +msgid "marginal" +msgstr "sát giá»›i hạn" + +#: src/www.c:436 src/www.c:559 src/www.c:560 src/www.c:561 src/www.c:562 +msgid "full" +msgstr "đầy đủ" + +#: ../libbalsa/rfc3156.c:1283 +msgid "ultimate" +msgstr "cuối cùng" + +#: ../libbalsa/rfc3156.c:1346 restore_gui.c:295 +msgid "User ID" +msgstr "ID ngÆ°á»i dùng" + +#: ../libbalsa/rfc3156.c:1346 +msgid "Key ID" +msgstr "ID khoá" + +#: ../libgnomedb/gnome-db-selector.c:500 ../libgnomedb/sel-onetable.c:209 +#: app/envelope-box.c:878 +msgid "Length" +msgstr "Dài" + +# Name: don't translate / Tên: đừng dịch +#: ../libbalsa/rfc3156.c:1346 +msgid "Validity" +msgstr "Hợp lệ" + +#. FIXME: create dialog according to the Gnome HIG +#: ../libbalsa/rfc3156.c:1390 +msgid "Select key" +msgstr "Chá»n khoá" + +#: ../libbalsa/rfc3156.c:1401 +#, c-format +msgid "Select the private key for the signer %s" +msgstr "Chá»n khoá riêng cho ngÆ°á»i ký tên %s" + +#: ../libbalsa/rfc3156.c:1405 +#, c-format +msgid "Select the public key for the recipient %s" +msgstr "Chá»n khoá công cho ngÆ°á»i nhận %s" + +#: ../libbalsa/rfc3156.c:1536 +#, c-format +msgid "Insufficient trust for recipient %s" +msgstr "Không đủ tin cây cho ngÆ°á»i nhận %s" + +#: ../libbalsa/rfc3156.c:1538 +#, c-format +msgid "The validity of the key with user ID \"%s\" is \"%s\"." +msgstr "Äá»™ hợp lệ của khoá có ID ngÆ°á»i dùng « %s » là « %s »." + +#: ../libbalsa/rfc3156.c:1549 +msgid "Use this key anyway?" +msgstr "Vẫn dùng khoá này không?" + +#: ../libbalsa/rfc3156.c:1758 +msgid "Try again later." +msgstr "Thá»­ lại sau." + +#: ../libbalsa/send.c:243 ../libbalsa/send.c:250 +msgid "Sending Mail..." +msgstr "Äang gởi thÆ°..." + +#: ../libbalsa/send.c:246 ../src/main-window.c:2790 +#: ../bonobo/bonobo-ui-config-widget.c:282 +msgid "_Hide" +msgstr "Ẩ_n" + +#: ../libbalsa/send.c:453 +#, c-format +msgid "Copying message to outbox failed: %s" +msgstr "Việc sao chép thÆ° vào há»™p ThÆ° Äi bị lá»—i: %s" + +#: ../libbalsa/send.c:882 +#, c-format +msgid "" +"Could not send the message to %s:\n" +"%d: %s\n" +"Message left in your outbox.\n" +msgstr "" +"Không thể gởi thÆ° cho %s:\n" +"%d: %s\n" +"ThÆ° còn lại trong há»™p ThÆ° Äi của bạn.\n" + +#: ../libbalsa/send.c:926 +#, c-format +msgid "Saving sent message to %s failed: %s" +msgstr "Việc lÆ°u thÆ° đã gởi vào %s bị lá»—i: %s" + +#: ../libbalsa/send.c:946 +#, c-format +msgid "" +"Relaying refused:\n" +"%d: %s\n" +"Message left in your outbox.\n" +msgstr "" +"Việc tiếp lại bị từ chối:\n" +"%d: %s\n" +"ThÆ° còn lại trong há»™p ThÆ° Äi của bạn.\n" + +#: ../libbalsa/send.c:956 +msgid "" +"Message submission problem, placing it into your outbox.\n" +"System will attempt to resubmit the message until you delete it." +msgstr "" +"Lá»—i đệ trình thÆ°, bá» nó vào trong há»™p ThÆ° Äi.\n" +"Hệ thống sẽ cố đệ trình lại thÆ° cho tá»›i khi bạn xoá nó." + +#: ../libbalsa/send.c:986 +msgid "Connected to MTA" +msgstr "Äã kết nối đến MTA" + +#. status code, mailbox +#: ../libbalsa/send.c:995 +#, c-format +msgid "From: %d <%s>" +msgstr "Từ : %d <%s>" + +#. mailbox, status code, status text +#: ../libbalsa/send.c:999 +#, c-format +msgid "From %s: %d %s" +msgstr "Từ %s: %d %s" + +#. status code, mailbox +#: ../libbalsa/send.c:1010 +#, c-format +msgid "To: %d <%s>" +msgstr "Cho : %d <%s>" + +#. mailbox, status code, status text +#: ../libbalsa/send.c:1014 +#, c-format +msgid "To %s: %d %s" +msgstr "Cho %s: %d %s" + +#: ../libbalsa/send.c:1052 ../src/netstatus-util.c:152 ../src/netapplet.c:152 +msgid "Disconnected" +msgstr "Kết nối bị ngắt" + +#: ../libbalsa/send.c:1325 +msgid "" +"SMTP server refused connection.\n" +"Check your internet connection." +msgstr "" +"Máy phục vụ SMTP đã từ chối kết nối.\n" +"Hãy kiểm tra xem có kết nối ná»™i bá»™ đúng." + +#: ../libbalsa/send.c:1330 +msgid "" +"SMTP server cannot be reached.\n" +"Check your internet connection." +msgstr "" +"Không thể tá»›i máy phục vụ SMTP.\n" +"Hãy kiểm tra xem có kết nối Mạng đúng." + +#: ../libbalsa/send.c:1339 +msgid "Message left in Outbox (try again later)" +msgstr "ThÆ° còn lại trong há»™p ThÆ° Äi (thá»­ lại sau)." + +#: ../libbalsa/send.c:1343 +#, c-format +msgid "" +"SMTP server problem (%d): %s\n" +"Message is left in outbox." +msgstr "" +"Vấn Ä‘á» máy phục vụ SMTP (%d): %s\n" +"ThÆ° còn lại trong há»™p ThÆ° Äi." + +#: ../libbalsa/send.c:1648 +#, c-format +msgid "Cannot determine charset for text file `%s'; sending as mime type `%s'" +msgstr "" +"Không thể giải quyết bá»™ ký tá»± cho tập tin văn bản « %s » nên Ä‘ang gởi dạng " +"kiểu MIME « %s »." + +#: ../libbalsa/send.c:1852 +#, c-format +msgid "Postponing message failed: %s" +msgstr "Việc hoãn thÆ° bị lá»—i: %s" + +#: ../libbalsa/send.c:1995 ../libbalsa/send.c:2077 +msgid "This message will not be encrypted for the BCC: recipient(s)." +msgstr "ThÆ° này sẽ không được mật mã cho những ngÆ°á»i nhận BCC (Bí Mật Cho)." + +#. host, authentication method +#: ../libbalsa/server.c:366 +#, c-format +msgid "Logging in to %s using %s" +msgstr "Äang đăng nhập vào %s bằng %s..." + +#: ../mail/em-account-editor.c:766 ../src/personal_info.c:267 +#: ../widgets/gtk+.xml.in.h:125 +msgid "Never" +msgstr "Không bao giá»" + +#: ../libbalsa/smtp-server.c:371 ../src/mailbox-conf.c:205 +#: ../src/mailbox-conf.c:1387 +msgid "If Possible" +msgstr "Nếu có thể" + +#: ../libbalsa/smtp-server.c:372 ../src/mailbox-conf.c:206 +#: dselect/pkgdisplay.cc:60 ../src/personal_info.c:299 +msgid "Required" +msgstr "Cần thiết" + +#: ../libbalsa/smtp-server.c:409 ../src/folder-conf.c:104 +#, c-format +msgid "Error displaying %s: %s\n" +msgstr "Gặp lá»—i khi hiển thị %s: %s\n" + +#: ../libbalsa/smtp-server.c:525 +msgid "SMTP Server" +msgstr "Máy phục vụ SMTP" + +#: ../libbalsa/smtp-server.c:547 +msgid "_Descriptive Name:" +msgstr "Tên _mô tả:" + +#: ../src/baobab-remote-connect-dialog.c:448 +msgid "_Server:" +msgstr "Máy _phục vụ :" + +#: ../libbalsa/smtp-server.c:561 ../src/baobab-remote-connect-dialog.c:540 +msgid "_User Name:" +msgstr "Tên _ngươì dùng:" + +#: ../libbalsa/smtp-server.c:568 +msgid "_Pass Phrase:" +msgstr "Cụm từ _mật khẩu :" + +#: ../libbalsa/smtp-server.c:576 ../src/mailbox-conf.c:219 +msgid "Use _TLS:" +msgstr "Dùng _TLS:" + +#: ../libbalsa/smtp-server.c:582 +msgid "C_ertificate Pass Phrase:" +msgstr "_Cụm từ mật khẩu chứng nhận:" + +#: ../libbalsa/smtp-server.c:594 +msgid "Sp_lit message larger than" +msgstr "Chia _tách thÆ° lá»›n hÆ¡n" + +#: ../libbalsa/smtp-server.c:600 ../src/pref-manager.c:1697 +#: ../src/smart-playlist-dialog.c:185 +msgid "MB" +msgstr "MB" + +#: ../ui/connect.glade.h:9 ../gmedia_format/gmedia_format.c:373 +msgid "_Close" +msgstr "_Äóng" + +#: ../libbalsa/source-viewer.c:48 ../ec-job-list.c:783 +msgid "Close the window" +msgstr "Äóng cá»­a sổ" + +#: ../libbalsa/source-viewer.c:49 ../plug-ins/common/curve_bend.c:1426 +#: src/floatwin.cpp:899 app/menubar.c:451 app/menubar.c:462 app/menubar.c:518 +#: app/menubar.c:549 +msgid "_Copy" +msgstr "_Chép" + +#: ../libbalsa/source-viewer.c:50 +msgid "Copy text" +msgstr "Chép phần" + +#: ../libbalsa/source-viewer.c:51 ../src/main-window.c:661 +msgid "_Select Text" +msgstr "_Lá»±a chá»n phần" + +#: ../libbalsa/source-viewer.c:52 ../src/main-window.c:662 +msgid "Select entire mail" +msgstr "Chá»n toàn bá»™ thÆ°" + +#: ../libbalsa/source-viewer.c:57 +msgid "_Escape Special Characters" +msgstr "T_hoát ký tá»± đặc biệt" + +#: ../libbalsa/source-viewer.c:58 +msgid "Escape special and non-ASCII characters" +msgstr "Thoát má»i ký tá»± đặc biệt và khác ASCII" + +#: ../libbalsa/source-viewer.c:154 +msgid "Mailbox closed" +msgstr "Há»™p thÆ° được đóng" + +#: ../libbalsa/source-viewer.c:257 ../ui/evolution-mail-message.xml.h:57 +#: ../ui/evolution-mail-message.xml.h:56 +msgid "Message Source" +msgstr "Mã nguồn thÆ°" + +#: ../libinit_balsa/balsa-druid-page-defclient.c:58 +msgid "Use balsa as default email client?" +msgstr "Äặt trình balsa là ứng dụng khách thÆ° Ä‘iện tá»­ mặc định không?" + +#: ../libinit_balsa/balsa-druid-page-defclient.c:62 ../gtk/gtkstock.c:412 +msgid "_Yes" +msgstr "_Có" + +#: ../libinit_balsa/balsa-druid-page-defclient.c:64 ../gtk/gtkstock.c:387 +msgid "_No" +msgstr "_Không" + +#: ../libinit_balsa/balsa-druid-page-defclient.c:100 +msgid "Default Client" +msgstr "Ứng dụng khách mặc định" + +#: ../libinit_balsa/balsa-druid-page-directory.c:43 +#: ../storage/exchange-delegates.glade.h:14 +msgid "_Inbox:" +msgstr "ThÆ° _Äến:" + +#: ../libinit_balsa/balsa-druid-page-directory.c:43 +msgid "_Outbox:" +msgstr "ThÆ° _Äi:" + +#: ../libinit_balsa/balsa-druid-page-directory.c:43 +msgid "_Sentbox:" +msgstr "Äã _Gởi:" + +#: ../libinit_balsa/balsa-druid-page-directory.c:43 +msgid "_Draftbox:" +msgstr "Nhá_p:" + +#: ../libinit_balsa/balsa-druid-page-directory.c:44 +msgid "_Trash:" +msgstr "_Rác:" + +#: ../libinit_balsa/balsa-druid-page-directory.c:71 +#, c-format +msgid "" +"The pathname \"%s\" must be specified canonically -- it must start with a " +"'/'." +msgstr "" +"Phải chỉ định Ä‘Æ°á»ng dẫn « %s » theo đúng quy tắc — nó phải bắt đầu bằng sổ " +"chéo « / »." + +#: ../libinit_balsa/balsa-druid-page-directory.c:135 +#, c-format +msgid "The mailbox \"%s\" does not appear to be valid." +msgstr "Há»™p thÆ° « %s » có vẻ không phải là hợp lệ." + +#: ../libinit_balsa/balsa-druid-page-directory.c:188 +msgid "" +"Please verify the locations of your default mail files.\n" +"These will be created if necessary." +msgstr "" +"Hãy kiểm tra lại vị trí các tập tin thÆ° mặc định.\n" +"Chúng sẽ được tạo nếu cần thiết." + +#: ../libinit_balsa/balsa-druid-page-directory.c:246 +msgid "Mail Files" +msgstr "Tập tin thÆ°" + +#: ../libinit_balsa/balsa-druid-page-directory.c:349 +#: ../libinit_balsa/balsa-druid-page-directory.c:383 +#, c-format +msgid "" +"Problem Creating Mailboxes\n" +"%s" +msgstr "" +"Gặp khó khăn tạo há»™p thÆ°\n" +"%s" + +#: ../libinit_balsa/balsa-druid-page-finish.c:41 +msgid "" +"You've successfully set up Balsa. Have fun!\n" +" -- The Balsa development team" +msgstr "" +"Bạn đã thiết lập xong Balsa. Hãy tận hưởng!\n" +" — Nhóm phát triển Balsa" + +#: ../libinit_balsa/balsa-druid-page-finish.c:47 +msgid "All Done!" +msgstr "Äã xong!" + +#: ../libinit_balsa/balsa-druid-page-finish.c:77 +msgid "GnomeCard Address Book" +msgstr "Sổ địa chỉ Thẻ Gnome" + +#: ../libinit_balsa/balsa-druid-page-finish.c:84 +#: ../libinit_balsa/balsa-druid-page-finish.c:92 ../src/ab-window.c:226 +#: ../libedataserverui/e-name-selector-dialog.glade.h:5 jpilot.c:2751 +msgid "Address Book" +msgstr "Sổ địa chỉ" + +#: ../libinit_balsa/balsa-druid-page-user.c:71 +msgid "" +"The following settings are also needed (and you can find them later, if need " +"be, in the Email application in the 'Preferences' and 'Identities' commands " +"on the 'Tools' menu)" +msgstr "" +"CÅ©ng cần những thiết lập theo đây: bạn có thể tìm chúng sau, nếu cần thiết, " +"trong:\n" +"Công cụ → Tùy thích → ThÆ° Ä‘iện tá»­ ; Công cụ → Thá»±c thể → ThÆ° Ä‘iện tá»­." + +#: ../libinit_balsa/balsa-druid-page-user.c:76 +msgid "" +" Whoever provides your email account should be able to give you the " +"following information (if you have a Network Administrator, they may already " +"have set this up for you):" +msgstr "" +"Nhà cung cấp tài khoản thÆ° Ä‘iện tá»­ cho bạn, cÅ©ng có thể cho bạn thông tin " +"theo đây (nếu bạn có quản trị mạng, có lẽ hỠđã thiết lập xong):" + +#: ../libinit_balsa/balsa-druid-page-user.c:82 +msgid "Yes, remember it" +msgstr "Có, nhá»› Ä‘i" + +#: ../libinit_balsa/balsa-druid-page-user.c:82 +msgid "No, type it in every time" +msgstr "Không, gõ má»i lần" + +#: ../libinit_balsa/balsa-druid-page-user.c:114 +msgid "Name of mail server for incoming _mail:" +msgstr "Tên máy _phục vụ thÆ° cho thÆ° gởi đến:" + +#: ../libinit_balsa/balsa-druid-page-user.c:119 +msgid "_Type of mail server:" +msgstr "_Kiểu máy phục vụ thÆ°:" + +#: ../libinit_balsa/balsa-druid-page-user.c:123 +msgid "Connect using _SSL:" +msgstr "Kết nối bằng _SSL:" + +#: ../libinit_balsa/balsa-druid-page-user.c:126 +msgid "Your email _login name:" +msgstr "Tên _đăng nhập thÆ° Ä‘iện tá»­ :" + +#: ../libinit_balsa/balsa-druid-page-user.c:129 +msgid "Your _password:" +msgstr "_Mật khẩu :" + +#: ../libinit_balsa/balsa-druid-page-user.c:137 +msgid "_SMTP Server:" +msgstr "Máy phục vụ SMTP:" + +#. 2.1 +#: ../libinit_balsa/balsa-druid-page-user.c:142 +msgid "Your real _name:" +msgstr "Tên _thật:" + +#: ../libinit_balsa/balsa-druid-page-user.c:148 +msgid "Your _Email Address, for this email account:" +msgstr "_Äịa chỉ thÆ°, cho tài khoản thÆ° này:" + +#: ../libinit_balsa/balsa-druid-page-user.c:153 +msgid "_Remember your password:" +msgstr "_Nhá»› mật khẩu :" + +#: ../libinit_balsa/balsa-druid-page-user.c:156 +msgid "_Refer to this account as:" +msgstr "Tên _riêng cho tài khoản này:" + +#: ../libinit_balsa/balsa-druid-page-user.c:164 +msgid "_Local mail directory:" +msgstr "ThÆ° mục thÆ° _cục bá»™ :" + +#: ../libinit_balsa/balsa-druid-page-user.c:181 +msgid "User Settings" +msgstr "Thiết lập ngÆ°á»i dùng" + +#: ../libinit_balsa/balsa-druid-page-user.c:333 +#, c-format +msgid "" +"Local Mail Problem\n" +"%s" +msgstr "" +"Vấn Ä‘á» thÆ° cục bá»™\n" +"%s" + +#: ../libinit_balsa/balsa-druid-page-welcome.c:32 +msgid "Welcome to Balsa!" +msgstr "Chúc mừng dùng Balsa!" + +#: ../libinit_balsa/balsa-druid-page-welcome.c:35 +msgid "" +"Before you can send or receive email:\n" +"\n" +"-- either you should already have Internet access and an email account, " +"provided by an Internet Service Provider, and you should have made that " +"Internet connection on your computer\n" +"\n" +"-- or your Network Administrator at your place of work/study/similar may " +"have set up your computer to connect to the network." +msgstr "" +"Bạn có thể gởi hoặc nhận thÆ° nếu :\n" +"hoặc\n" +"• bạn có truy cập đến Mạng và má»™t tài khoản thÆ° Ä‘iện tá»­, cung cấp do Nhà " +"Cung Cấp Dịch Vụ Mạng (ISP), và bạn đã kết nối đến Mạng lần đầu tiên\n" +"hoặc\n" +"• Quản trị mạng tại chá»— làm/há»c/tÆ°Æ¡ng tá»± đã thiết lập máy tính này để kết " +"nối đến mạng." + +#: ../libinit_balsa/balsa-initdruid.c:81 +msgid "" +"This will exit Balsa.\n" +"Do you really want to do this?" +msgstr "" +"Hành Ä‘á»™ng này sẽ thoát khá»i trình Balsa.\n" +"Bạn có chắc muốn thá»±c hiện không?" + +#: ../libinit_balsa/helper.c:66 +#, c-format +msgid "Error loading %s: %s\n" +msgstr "Gặp lá»—i khi tải %s: %s\n" + +#: ../libinit_balsa/helper.c:202 +#, c-format +msgid "The path %s must be relative to the filesystem root (start with /)." +msgstr "" +"ÄÆ°á»ng dẫn %s phải liên quan đến gốc của hệ thống tập tin (bắt đầu bằng sổ " +"chếo « / »)." + +#: ../libinit_balsa/helper.c:215 ../libinit_balsa/helper.c:239 +#, c-format +msgid "Couldn't create a directory: mkdir() failed on pathname \"%s\"." +msgstr "" +"Không thể tạo thÆ° mục: việc « mkdir() » (tạo thÆ° mục) bị lá»—i trên tên Ä‘Æ°á»ng " +"dẫn « %s »." + +#: ../libinit_balsa/helper.c:225 ../libinit_balsa/helper.c:248 +#, c-format +msgid "The file with pathname \"%s\" is not a directory." +msgstr "Tập tin có tên Ä‘Æ°á»ng dẫn « %s » không phải là thÆ° mục." + +#: ../libinit_balsa/init_balsa.c:48 +msgid "Configure Balsa" +msgstr "Cấu hình Balsa" + +#: ../sounds/balsa.soundlist.in.h:1 +msgid "Balsa E-mail reader" +msgstr "Bá»™ Ä‘á»c thÆ° Balsa" + +#: ../sounds/balsa.soundlist.in.h:2 +msgid "New mail notification" +msgstr "Thông báo thÆ° má»›i" + +#: ../sounds/balsa.soundlist.in.h:3 +msgid "Program startup" +msgstr "Khởi chạy chÆ°Æ¡ng trình" + +#: ../src/ab-main.c:199 ../src/ab-window.c:673 +msgid " address book: " +msgstr " sổ địa chỉ: " + +#: ../src/ab-main.c:470 ../src/address-book-config.c:839 +msgid "VCard Address Book (GnomeCard)" +msgstr "Sổ địa chỉ vCard (Thẻ Gnome)" + +#: ../src/ab-main.c:472 ../src/address-book-config.c:845 +msgid "External query (a program)" +msgstr "Truy vấn bên ngoài (chÆ°Æ¡ng trình khác)" + +#: ../src/ab-main.c:474 ../src/address-book-config.c:850 +msgid "LDIF Address Book" +msgstr "Sổ địa chỉ LDIF" + +#: ../src/ab-main.c:477 ../src/address-book-config.c:856 +msgid "LDAP Address Book" +msgstr "Sổ địa chỉ LDAP" + +#: ../src/ab-main.c:481 ../src/address-book-config.c:457 +#: ../src/address-book-config.c:863 +msgid "GPE Address Book" +msgstr "Sổ địa chỉ GPE" + +#: ../src/lib/FeedPropertiesDialog.py:96 +#: ../storage/sunone-permissions-dialog.c:559 +#: ../src/glade-editor-property.c:2076 ../src/glade-widget.c:146 +#: src/gpsdrive.c:9462 address_gui.c:2775 KeyRing/keyring.c:1485 +#: ../mimedir/mimedir-attribute.c:131 ../mimedir/mimedir-vcard.c:298 +#: schroot/sbuild-chroot.cc:386 +msgid "Name" +msgstr "Tên" + +#: ../src/ab-main.c:791 +msgid "F_ilter:" +msgstr "_Lá»c:" + +#: ../src/ab-window.c:176 ../smime/gui/certificate-manager.c:483 +msgid "E-Mail Address" +msgstr "Äịa chỉ thÆ° Ä‘iện tá»­" + +#. Entry widget for finding an address +#: ../src/ab-window.c:263 +msgid "_Search for Name:" +msgstr "Tìm _kiếm tên:" + +#: ../src/ab-window.c:323 +msgid "Send-To" +msgstr "Gởi cho" + +#: ../src/ab-window.c:343 +msgid "Run Editor" +msgstr "Chạy bá»™ soạn thảo" + +#: ../src/ab-window.c:350 +msgid "_Re-Import" +msgstr "Nhập _lại" + +#. mode switching stuff +#: ../src/ab-window.c:360 +msgid "Treat multiple addresses as:" +msgstr "Xá»­ lý nhiá»u địa chỉ dạng:" + +#: ../src/ab-window.c:364 +msgid "alternative addresses for the same person" +msgstr "địa chỉ xen kẽ cho cùng má»™t ngÆ°á»i" + +#: ../src/ab-window.c:369 +msgid "a distribution list" +msgstr "danh sách phân phối" + +#: ../src/ab-window.c:702 +#, c-format +msgid "" +"Error opening address book '%s':\n" +"%s" +msgstr "" +"Gặp lá»—i khi mở sổ địa chỉ « %s »:\n" +"%s" + +#: ../src/address-book-config.c:165 ../src/address-book-config.c:260 +msgid "Modify Address Book" +msgstr "Sá»­a đổi Sổ địa chỉ" + +#: ../src/address-book-config.c:169 ../src/address-book-config.c:264 +#: ../addressbook/gui/component/ldap-config.glade.h:12 +#: ../addressbook/gui/component/ldap-config.glade.h:13 +msgid "Add Address Book" +msgstr "Thêm Sổ địa chỉ" + +#. mailbox name +#. may be NULL +#. mailbox name +#: ../src/address-book-config.c:185 ../src/address-book-config.c:299 +#: ../src/address-book-config.c:388 ../src/address-book-config.c:455 +msgid "A_ddress Book Name:" +msgstr "Tên _Sổ địa chỉ:" + +#: ../src/address-book-config.c:190 ../src/address-book-config.c:343 +#: ../src/address-book-config.c:424 ../src/address-book-config.c:460 +msgid "_Expand aliases as you type" +msgstr "_Bung bí danh trong khi gõ" + +#: ../src/address-book-config.c:304 +msgid "Load program location:" +msgstr "Tải vị trí chÆ°Æ¡ng trình:" + +#: ../src/address-book-config.c:311 ../src/address-book-config.c:316 +msgid "Select load program for address book" +msgstr "Chá»n chÆ°Æ¡ng trình tải sổ địa chỉ" + +#: ../src/address-book-config.c:323 +msgid "Save program location:" +msgstr "LÆ°u vị trí chÆ°Æ¡ng trình:" + +#: ../src/address-book-config.c:330 ../src/address-book-config.c:335 +msgid "Select save program for address book" +msgstr "Chá»n chÆ°Æ¡ng trình lÆ°u sổ địa chỉ" + +#: ../src/address-book-config.c:393 +msgid "_Host Name" +msgstr "Tên _máy" + +#: ../src/address-book-config.c:398 +msgid "Base Domain _Name" +msgstr "Tên miá»n cÆ¡ _bản:" + +#: ../src/address-book-config.c:403 +msgid "_User Name (Bind DN)" +msgstr "Tên _ngươì dùng (Tên miá»n Bind):" + +#: ../src/address-book-config.c:408 +msgid "_Password" +msgstr "_Mật khẩu" + +#: ../src/address-book-config.c:414 +msgid "_User Address Book DN" +msgstr "Tên miá»n của Sổ địa chỉ ng_Æ°á»i dùng" + +#: ../src/address-book-config.c:420 +msgid "Enable _TLS" +msgstr "Bật _TLS" + +#: ../src/address-book-config.c:478 ../src/main-window.c:4098 +#, c-format +msgid "Error displaying help: %s\n" +msgstr "Gặp lá»—i khi hiển thị trợ giúp: %s\n" + +#: ../src/address-book-config.c:579 +msgid "No path found. Do you want to give one?" +msgstr "Không tìm thấy Ä‘Æ°á»ng dẫn: bạn có muốn gõ nó vậy?" + +#: ../src/address-book-config.c:585 +#, c-format +msgid "The address book file path \"%s\" is not correct. %s" +msgstr "ÄÆ°á»ng dẫn của tập tin sổ địa chỉ « %s » là không đúng. %s" + +#: ../src/address-book-config.c:588 +#, c-format +msgid "The load program path \"%s\" is not correct. %s" +msgstr "ÄÆ°á»ng dẫn của chÆ°Æ¡ng trình tải « %s » là không đúng. %s" + +#: ../src/address-book-config.c:591 +#, c-format +msgid "The save program path \"%s\" is not correct. %s" +msgstr "ÄÆ°á»ng dẫn của chÆ°Æ¡ng trình lÆ°u « %s » là không đúng. %s" + +#: ../src/address-book-config.c:594 +#, c-format +msgid "The path \"%s\" is not correct. %s" +msgstr "ÄÆ°á»ng dẫn « %s » là không đúng. %s" + +#: ../src/address-book-config.c:597 +msgid "Do you want to correct the path?" +msgstr "Bạn có muốn sá»­a Ä‘Æ°á»ng dẫn đó chứ?" + +#: ../src/balsa-app.c:69 +#, c-format +msgid "" +"Opening remote mailbox %s.\n" +"The _password for %s@%s:" +msgstr "" +"Äang mở há»™p thÆ° ở xa %s.\n" +"_Mật khẩu cho %s@%s:" + +#: ../src/balsa-app.c:74 +#, c-format +msgid "Mailbox _password for %s@%s:" +msgstr "_Mật khẩu há»™p thÆ° cho %s@%s:" + +#: ../src/balsa-app.c:77 ../src/gnomesu-auth-dialog.c:111 +msgid "Password needed" +msgstr "Cần mật khẩu" + +#: ../src/drivel.glade.h:78 +msgid "_Remember password" +msgstr "_Nhá»› mật khẩu" + +#: ../src/balsa-app.c:540 +#, c-format +msgid "Couldn't open mailbox \"%s\"" +msgstr "Không thể mở há»™p thÆ° « %s »." + +#: ../pan/text.c:707 +msgid "From" +msgstr "Từ" + +#: ../pan/prefs.c:1393 ../pan/prefs.c:1633 ../pan/text.c:703 +msgid "Subject" +msgstr "Chủ Ä‘á»" + +#: ../Pyblio/GnomeUI/Fields.py:43 src/dictmanagedlg.cpp:525 +#: datebook_gui.c:4767 +msgid "Date" +msgstr "Ngày" + +#: ../providers/odbc/gda-odbc-provider.c:976 src/prefsdlg.cpp:68 +#: ../widgets/gtk+.xml.in.h:162 +msgid "Size" +msgstr "Cỡ" + +#: ../src/balsa-index.c:1043 +#, c-format +msgid "Opening mailbox %s. Please wait..." +msgstr "Äang mở há»™p thÆ° %s. Hãy Ä‘á»i..." + +#: ../src/balsa-index.c:1086 src/fe-gtk/dccgui.c:712 src/gpsdrive.c:3665 +#: src/gpsdrive.c:3820 src/gpsdrive.c:6737 src/gpsdrive.c:8505 +#: src/gpsdrive.c:8996 +#, fuzzy +msgid "To" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Cho\n" +"#-#-#-#-# gpsdrive-2.08pre6.vi.po (gpsdrive-2.08pre6) #-#-#-#-#\n" +"Äến" + +#: ../src/balsa-index.c:1684 +#, c-format +msgid "Move to Trash failed: %s" +msgstr "Việc Chuyển vào Rác bị lá»—i: %s" + +#. R +#: ../src/balsa-index.c:1838 ../src/main-window.c:584 +#: ../src/main-window.c:1047 ../src/message-window.c:148 +msgid "_Reply..." +msgstr "T_rả lá»i..." + +#: ../src/balsa-index.c:1840 +msgid "Reply To _All..." +msgstr "Trả lá»i _má»i ngÆ°á»i..." + +#: ../src/balsa-index.c:1842 +msgid "Reply To _Group..." +msgstr "Trả lá»i _nhóm..." + +#: ../src/balsa-index.c:1844 +msgid "_Forward Attached..." +msgstr "_Chuyển tiếp đồ đính kèm..." + +#: ../src/balsa-index.c:1846 +msgid "Forward _Inline..." +msgstr "Chuyển tiếp trá»±c t_iếp..." + +#: ../src/balsa-index.c:1848 ../src/main-window.c:623 +msgid "_Pipe through..." +msgstr "Gởi _qua ống dẫn..." + +#: ../src/balsa-index.c:1850 ../src/main-window.c:682 +#: ../src/main-window.c:1031 +msgid "_Store Address..." +msgstr "_LÆ°u địa chỉ..." + +#: ../ui/muds.glade.h:48 ../gmedia_slice/interface.c:190 +#: ../storage/sunone-folder-tree.c:1281 +msgid "_Delete" +msgstr "_Xoá bá»" + +#: ../src/balsa-index.c:1870 ../gtk/gtkstock.c:409 +msgid "_Undelete" +msgstr "_Phục hồi" + +#: ../src/balsa-index.c:1875 +msgid "Move To _Trash" +msgstr "Chuyển vào _Rác" + +#: ../src/balsa-index.c:1879 +msgid "T_oggle" +msgstr "_Bật tắt" + +#: ../src/balsa-index.c:1883 ../src/main-window.c:331 ../src/main-window.c:360 +msgid "_Flagged" +msgstr "Äã đặt cá»" + +#: ../src/balsa-index.c:1886 ../ui/evolution-mail-message.xml.h:128 +#: ../ui/evolution-mail-message.xml.h:122 +msgid "_Unread" +msgstr "Ch_Æ°a Ä‘á»c" + +#: ../src/balsa-index.c:1894 +msgid "_Move to" +msgstr "_Chuyển vào" + +#: ../src/balsa-index.c:1902 ui/galeon-ui.xml.in.h:205 +#: ../display/Display.py:88 +msgid "_View Source" +msgstr "_Xem mã nguồn" + +#: ../src/balsa-index.c:2155 +#, c-format +msgid "Failed to copy messages to mailbox \"%s\": %s" +msgstr "Việc sao chép thÆ° vào há»™p thÆ° « %s » bị lá»—i: %s" + +#: ../src/balsa-index.c:2156 +#, c-format +msgid "Failed to copy message to mailbox \"%s\": %s" +msgstr "Việc sao chép thÆ° vào há»™p thÆ° « %s » bị lá»—i: %s" + +#: ../src/balsa-index.c:2168 +#, c-format +msgid "Copied to \"%s\"." +msgstr "Äã sao chép vào « %s »." + +#: ../src/balsa-index.c:2169 +#, c-format +msgid "Moved to \"%s\"." +msgstr "Äã chuyển vào « %s »." + +#: ../src/balsa-index.c:2288 +#, c-format +msgid "Committing mailbox %s failed." +msgstr "Việc gài vào há»™p thÆ° %s bị lá»—i." + +#: ../src/balsa-index.c:2461 +msgid "Pipe message through a program" +msgstr "Gởi thÆ° qua ống dẫn đến chÆ°Æ¡ng trình" + +#: ../src/balsa-index.c:2464 ../src/file-manager/fm-directory-view.c:4020 +msgid "_Run" +msgstr "_Chạy" + +#: ../src/balsa-index.c:2473 +msgid "Specify the program to run:" +msgstr "Xác định chÆ°Æ¡ng trình cần chạy:" + +#: ../src/balsa-mblist.c:301 src/gbiff2.strings:35 +msgid "Mailbox" +msgstr "Há»™p thÆ°" + +#: ../src/balsa-mblist.c:1053 ../src/balsa-mblist.c:1089 +msgid "Failed to find mailbox" +msgstr "Việc tìm há»™p thÆ° bị lá»—i." + +#: ../src/balsa-mblist.c:1663 ../src/ephy-encoding-menu.c:350 +msgid "_Other..." +msgstr "_Khác..." + +#: ../src/balsa-mblist.c:1731 ../data/glade/goobox.glade.h:10 +#: ../src/dlg-extract.c:263 utils/gul-download.c:68 +msgid "Choose destination folder" +msgstr "Chá»n thÆ° mục đích" + +#: ../src/balsa-mblist.c:1954 ../src/balsa-mblist.c:1957 +#: ../gtk/gtkfilechooserbutton.c:1531 ../app/display/gimpstatusbar.c:452 +#: app/gui.c:2084 +msgid "Other..." +msgstr "Khác..." + +#: ../src/balsa-mblist.c:2115 +#, c-format +msgid "Shown mailbox: %s with %d message, " +msgid_plural "Shown mailbox: %s with %d messages, " +msgstr[0] "Hiển thị há»™p thÆ°: %s vá»›i %d thÆ°, " +msgstr[1] "Hiển thị há»™p thÆ°: %s vá»›i %d thÆ°, " + +#. xgettext: this is the second part of the message +#. * "Shown mailbox: %s with %d messages, %ld new". +#: ../src/balsa-mblist.c:2121 +#, c-format +msgid "%ld new" +msgid_plural "%ld new" +msgstr[0] "%ld má»›i" + +#: ../src/balsa-message.c:291 +msgid "Check cryptographic signature" +msgstr "Kiểm tra chữ ký mật mã" + +#: ../src/balsa-message.c:312 +msgid "Select message part to display" +msgstr "Chá»n phần thÆ° cần hiển thị" + +#: ../src/balsa-message.c:386 ../extensions/page-info/page-info-dialog.c:1783 +msgid "Content" +msgstr "Ná»™i dung" + +#: ../src/balsa-message.c:464 +msgid "Message parts" +msgstr "Phần thÆ°" + +#: ../src/balsa-message.c:645 +msgid "Save selected as..." +msgstr "LÆ°u Ä‘iá»u chá»n dạng..." + +#: ../src/balsa-message.c:652 +msgid "Save selected to folder..." +msgstr "LÆ°u Ä‘iá»u chá»n vào thÆ° mục..." + +#: ../src/balsa-message.c:790 +msgid "(No sender)" +msgstr "(Không có ngÆ°á»i gởi)" + +#: ../src/balsa-message.c:858 +#, c-format +msgid "Could not access message %ld in mailbox \"%s\"." +msgstr "Không thể truy cập thÆ° %ld trong há»™p thÆ° « %s »." + +#: ../src/balsa-message.c:1040 +msgid "mixed parts" +msgstr "pha phần" + +#: ../src/balsa-message.c:1042 +msgid "alternative parts" +msgstr "phần xen kẽ" + +#: ../src/balsa-message.c:1044 +msgid "signed parts" +msgstr "phần đã ký" + +#: ../src/balsa-message.c:1046 +msgid "encrypted parts" +msgstr "phần mật mã" + +#: ../src/balsa-message.c:1048 +msgid "rfc822 message" +msgstr "thÆ° rfc822" + +#: ../src/balsa-message.c:1050 +#, c-format +msgid "\"%s\" parts" +msgstr "phần « %s »" + +#: ../src/balsa-message.c:1103 +#, c-format +msgid "rfc822 message (from %s, subject \"%s\")" +msgstr "thÆ° rfc822 (từ %s, chủ đỠ« %s »)" + +#: ../src/balsa-message.c:1110 +msgid "complete message" +msgstr "toàn bá»™ thÆ°" + +#: ../src/balsa-message.c:1127 +#, c-format +msgid "part %s: %s (file %s)" +msgstr "phần %s: %s (tập tin %s)" + +#: ../src/balsa-message.c:1138 +#, c-format +msgid "part %s: %s" +msgstr "phần %s: %s" + +#: ../src/balsa-message.c:1153 +msgid "encrypted: " +msgstr "mật mã: " + +#. #-#-#-#-# glade3vi..po (glade3 HEAD) #-#-#-#-# +#. If instead we dont have a path yet, fire up a file selector +#: ../src/balsa-message.c:1300 ../src/balsa-mime-widget-text.c:437 +#: ../src/balsa-mime-widget-text.c:822 gpe-go.c:1358 +#: ../data/playlist.glade.h:6 ../src/glade-project-window.c:466 +msgid "Save..." +msgstr "LÆ°u..." + +#: ../src/balsa-message.c:1376 +msgid "Select folder for saving selected parts" +msgstr "Chá»n thÆ° mục nÆ¡i cần lÆ°u các phần đã chá»n." + +#: ../src/balsa-message.c:1414 +#, c-format +msgid "%s message part" +msgstr "Phần thÆ° %s" + +#: ../src/balsa-message.c:1442 ../src/balsa-mime-widget-callbacks.c:202 +#, c-format +msgid "Could not save %s: %s" +msgstr "Không thể lÆ°u %s: %s" + +#: ../src/balsa-message.c:2140 +#, c-format +msgid "" +"The sender of this mail, %s, requested \n" +"a Message Disposition Notification(MDN) to be returned to `%s'.\n" +"Do you want to send this notification?" +msgstr "" +"NgÆ°á»i gởi thÆ° này, %s, yêu cầu \n" +"gởi trả lại Thông báo cách chuyển nhượng thÆ° (MDN) đến « %s ».\n" +"Bạn có muốn gởi thông báo này không?" + +#: ../src/balsa-message.c:2147 +msgid "Reply to MDN?" +msgstr "Trả lá»i yêu cầu thông báo chuyển nhượng không?" + +#: ../src/balsa-message.c:2342 +msgid "" +"The decryption cannot be performed because this message is displayed more " +"than once.\n" +"Please close the other instances of this message and try again." +msgstr "" +"Không thể giải mật mã vì thÆ° này được hiển thị nhiá»u lần.\n" +"Hãy đóng các thể hiện khác của thÆ° này, rồi thá»­ lại." + +#: ../src/balsa-message.c:2363 ../src/balsa-message.c:2445 +#: ../src/balsa-message.c:2560 +#, c-format +msgid "Parsing a message part failed: %s" +msgstr "Việc phân tách phần thÆ° bị lá»—i: %s" + +#: ../src/balsa-message.c:2364 ../src/balsa-message.c:2446 +#: ../src/balsa-message.c:2561 +msgid "Possible disk space problem." +msgstr "Có lẽ không có đủ chá»— trên Ä‘Ä©a." + +#: ../src/balsa-message.c:2376 +#, c-format +msgid "" +"The message sent by %s with subject \"%s\" contains an encrypted part, but " +"it's structure is invalid." +msgstr "" +"ThÆ° được gởi bởi %s vá»›i chủ đỠ« %s » chứa má»™t phần đã mật mã vá»›i cấu trúc " +"không hợp lệ." + +#: ../src/balsa-message.c:2384 +#, c-format +msgid "" +"The message sent by %s with subject \"%s\" contains a PGP encrypted part, " +"but this crypto protocol is not available." +msgstr "" +"ThÆ° được gởi bởi %s vá»›i chủ đỠ« %s » chứa má»™t phần đã mật mã PGP, nhÆ°ng " +"giao thức mật mã này chÆ°a sẵn sàng." + +#: ../src/balsa-message.c:2397 +#, c-format +msgid "" +"The message sent by %s with subject \"%s\" contains a S/MIME encrypted part, " +"but this crypto protocol is not available." +msgstr "" +"ThÆ° được gởi bởi %s vá»›i chủ đỠ« %s » chứa má»™t phần đã mật mã S/MIME, nhÆ°ng " +"giao thức mật mã này chÆ°a sẵn sàng." + +#: ../src/balsa-message.c:2461 +#, c-format +msgid "" +"The message sent by %s with subject \"%s\" contains a signed part, but its " +"structure is invalid. The signature, if there is any, cannot be checked." +msgstr "" +"ThÆ° được gởi bởi %s vá»›i chủ đỠ« %s » chứa má»™t phần đã ký tên vá»›i cấu trúc " +"không hợp lệ. NhÆ° thế thì không thể kiểm tra chữ ký, nếu có." + +#: ../src/balsa-message.c:2474 +#, c-format +msgid "" +"The message sent by %s with subject \"%s\" contains a %s signed part, but " +"this crypto protocol is not available." +msgstr "" +"ThÆ° được gởi bởi %s vá»›i chủ đỠ« %s » chứa má»™t phần đã ký tên %s, nhÆ°ng giao " +"thức mật mã này chÆ°a sẵn sàng." + +# Name: don't translate / Tên: đừng dịch +#: ../src/balsa-message.c:2477 ../mimedir/mimedir-vcard.c:3662 +msgid "PGP" +msgstr "PGP" + +# Name: don't translate / Tên: đừng dịch +#: ../src/balsa-message.c:2477 ../src/balsa-mime-widget-crypto.c:72 +#: ../src/print.c:1302 +msgid "S/MIME" +msgstr "S/MIME" + +#: ../src/balsa-message.c:2496 ../src/balsa-message.c:2622 +msgid "Detected a good signature" +msgstr "Má»›i phát hiện má»™t chữ ký đúng." + +#: ../src/balsa-message.c:2502 +msgid "Detected a good signature with insufficient validity" +msgstr "Má»›i phát hiện má»™t chữ ký đúng không có đủ Ä‘á»™ hợp lệ." + +#: ../src/balsa-message.c:2507 ../src/balsa-message.c:2626 +msgid "Detected a good signature with insufficient validity/trust" +msgstr "Má»›i phát hiện má»™t chữ ký đúng không có đủ Ä‘á»™ hợp lệ/tin cây." + +#: ../src/balsa-message.c:2514 ../src/balsa-message.c:2632 +#, c-format +msgid "" +"Checking the signature of the message sent by %s with subject \"%s\" " +"returned:\n" +"%s" +msgstr "" +"Việc kiểm tra chữ ký của thÆ° được gởi bá»i %s vá»›i chủ đỠ« %s » đã gởi trả:\n" +"%s" + +#: ../src/balsa-message.c:2526 +#, c-format +msgid "" +"Checking the signature of the message sent by %s with subject \"%s\" failed " +"with an error!" +msgstr "" +"Việc kiểm tra chữ ký của thÆ° được gởi bá»i %s vá»›i chủ đỠ« %s » đã thất bại " +"vá»›i lá»—i !" + +#: ../src/balsa-message.c:2584 +msgid "" +"The decryption cannot be performed because this message is displayed more " +"than once." +msgstr "Không thể giải mật mã vì thÆ° này được hiển thị nhiá»u lần." + +#: ../src/balsa-message.c:2586 +msgid "" +"The signature check and removal of the OpenPGP armor cannot be performed " +"because this message is displayed more than once." +msgstr "" +"Không thể kiểm tra chữ ký và gỡ bá» vá» bá»c kim thuá»™c OpenPGP vì thÆ° này được " +"hiển thị nhiá»u lần." + +#: ../src/balsa-message.c:2589 +msgid "Please close the other instances of this message and try again." +msgstr "Hãy đóng các thể hiện khác của thÆ° này, rồi thá»­ lại." + +#: ../src/filter-edit-callbacks.c:311 +msgid "One of the specified fields contains:" +msgstr "Má»™t của những trÆ°á»ng đã xác định chứa:" + +#: ../src/filter-edit-callbacks.c:312 +msgid "None of the specified fields contains:" +msgstr "Không có trÆ°á»ng đã xác định chứa:" + +#: ../src/filter-edit-callbacks.c:314 +msgid "One of the regular expressions matches" +msgstr "Má»™t của những biểu thức chính quy khá»›p" + +#: ../src/filter-edit-callbacks.c:315 +msgid "None of the regular expressions matches" +msgstr "Không có biểu thức chính quy khá»›p" + +#: ../src/filter-edit-callbacks.c:317 +msgid "Match when date is in the interval:" +msgstr "Khá»›p khi ngày nằm trong khoảng:" + +#: ../src/filter-edit-callbacks.c:318 +msgid "Match when date is outside the interval:" +msgstr "Khá»›p khi ngày nằm ngoài khoảng:" + +#: ../src/filter-edit-callbacks.c:320 ../src/filter-edit-callbacks.c:1085 +msgid "Match when one of these flags is set:" +msgstr "Khá»›p khi đặt má»™t trong những cá» hiệu này:" + +#: ../src/filter-edit-callbacks.c:321 +msgid "Match when none of these flags is set:" +msgstr "Khá»›p khi không đặt các cá» hiệu này:" + +#: ../src/filter-edit-callbacks.c:512 +msgid "You must specify the name of the user header to match on" +msgstr "Bạn cần chỉ định tên của dòng đầu ngÆ°á»i dùng cần khá»›p theo nó" + +#: ../src/filter-edit-callbacks.c:523 +msgid "You must specify at least one field for matching" +msgstr "Phải ghi rõ ít nhất má»™t trÆ°á»ng cần khá»›p" + +#: ../src/filter-edit-callbacks.c:533 +msgid "You must provide a string" +msgstr "Phải ghi rõ má»™t chuá»—i" + +#: ../src/filter-edit-callbacks.c:540 +msgid "You must provide at least one regular expression" +msgstr "Phải ghi rõ ít nhất má»™t biểu thức chính quy" + +#: ../src/filter-edit-callbacks.c:552 +msgid "Low date is incorrect" +msgstr "Ngày dÆ°á»›i không đúng" + +#: ../src/filter-edit-callbacks.c:563 +msgid "High date is incorrect" +msgstr "Ngày trên không đúng" + +#: ../src/filter-edit-callbacks.c:571 +msgid "Low date is greater than high date" +msgstr "Ngày dÆ°á»›i ở trên ngày trên" + +#: ../src/filter-edit-callbacks.c:856 +#, c-format +msgid "Error displaying condition help: %s\n" +msgstr "Gặp lá»—i khi hiển thị trợ giúp vá» Ä‘iá»u kiện: %s\n" + +#: ../src/filter-edit-callbacks.c:897 +msgid "Match Fields" +msgstr "Khá»›p trÆ°á»ng" + +#: ../src/filter-edit-callbacks.c:907 ../app/actions/select-actions.c:50 +#: src/gtkam-main.c:553 src/gtkam-main.c:555 src/gtkam-main.c:562 +msgid "_All" +msgstr "_Tất cả" + +#: ../libgnomeui/gnome-app-helper.c:171 ../ui/history.glade.h:2 +msgid "C_lear" +msgstr "_Xoá" + +#: ../src/filter-edit-callbacks.c:910 ../src/main-window.c:3977 +msgid "_Body" +msgstr "Th_ân" + +#: ../src/sendmsg-window.c:2818 ../composer/e-msg-composer-hdrs.c:646 +msgid "_To:" +msgstr "Ch_o:" + +#: ../src/filter-edit-callbacks.c:912 ../src/main-window.c:3979 +#: ../embed/print-dialog.c:392 ../src/drivel.glade.h:69 +msgid "_From:" +msgstr "_Từ :" + +#: ../src/filter-edit-callbacks.c:913 +msgid "_Subject" +msgstr "C_hủ Ä‘á»" + +#: ../src/filter-edit-callbacks.c:914 ../src/main-window.c:3981 +#: ../composer/e-msg-composer-hdrs.c:650 ../composer/e-msg-composer-hdrs.c:652 +msgid "_Cc:" +msgstr "_Cc:" + +#: ../src/filter-edit-callbacks.c:916 +msgid "_User header:" +msgstr "Dòng đầu _ngÆ°á»i dùng:" + +#: ../pan/grouplist.c:992 ../pan/rules/rule-edit-ui.c:153 +msgid "Unread" +msgstr "ChÆ°a Ä‘á»c" + +#: ../src/filter-edit-callbacks.c:946 ../mail/em-filter-i18n.h:12 +msgid "Deleted" +msgstr "Äã xoá bá»" + +#: ../src/filter-edit-callbacks.c:946 +msgid "Replied" +msgstr "Äã trả lá»i" + +#: ../src/filter-edit-callbacks.c:946 ../mail/message-list.etspec.h:5 +msgid "Flagged" +msgstr "Äã đặt cá»" + +#: ../src/filter-edit-callbacks.c:961 +msgid "One of the specified f_ields contains" +msgstr "Má»™t của những trÆ°á»ng đã xác định chứa" + +#: ../src/filter-edit-callbacks.c:976 +msgid "Contain/Does _Not Contain" +msgstr "Chứa/_Không chứa" + +#: ../src/filter-edit-callbacks.c:995 +msgid "_One of the regular expressions matches" +msgstr "Má»™t của những _biểu thức chính quy khá»›p" + +#: ../plug-ins/gimpressionist/sizemap.c:472 +msgid "A_dd" +msgstr "Th_êm" + +#: ../src/user_popup.c:605 ../src/bitmapfamily-win.cc:90 +#: ../src/guikachu.glade.h:20 ../src/mainwin-menu.cc:104 +msgid "_Remove" +msgstr "_Gỡ bá»" + +#: ../src/filter-edit-callbacks.c:1030 +msgid "One _Matches/None Matches" +msgstr "Má»™t cái khá»›_p/Không có cái nào khá»›p" + +#: ../src/filter-edit-callbacks.c:1047 +msgid "Match when message date is in the interval:" +msgstr "Khá»›p khi ngày thÆ° nằm trong khoảng:" + +#: ../src/filter-edit-callbacks.c:1067 +msgid "Inside/outside the date interval" +msgstr "Trong/Ngoài khoảng ngày" + +#: ../src/filter-edit-callbacks.c:1114 +msgid "Match when one flag is set/when no flag is set" +msgstr "Khá»›p khi má»™t cá» hiệu được đặt/Khi không có cá» hiệu được đặt" + +#: ../src/filter-edit-callbacks.c:1132 +msgid "Search T_ype:" +msgstr "_Kiểu tìm kiếm:" + +#: ../src/filter-edit-callbacks.c:1206 +msgid "Edit condition for filter: " +msgstr "Sá»­a Ä‘iá»u kiện lá»c: " + +#: ../src/filter-edit-callbacks.c:1424 ../src/save-restore.c:1845 +msgid "Filter with no condition was omitted" +msgstr "Lá»c không có Ä‘iá»u kiện thì bị bá» sót" + +#: ../src/filter-edit-callbacks.c:1450 +#, c-format +msgid "Error displaying filter help: %s\n" +msgstr "Gặp lá»—i khi hiển thị trợ giúp vá» bá»™ lá»c: %s\n" + +#: ../src/filter-edit-callbacks.c:1640 +msgid "New filter" +msgstr "Bá»™ lá»c má»›i" + +#: ../src/filter-edit-callbacks.c:1772 +msgid "No filter name specified." +msgstr "ChÆ°a ghi rõ tên bá»™ lá»c khác." + +#: ../src/filter-edit-callbacks.c:1777 +#, c-format +msgid "Filter \"%s\" already exists." +msgstr "Bá»™ lá»c « %s » đã có." + +#: ../src/filter-edit-callbacks.c:1787 +msgid "Filter must have conditions." +msgstr "Bá»™ lá»c phải có Ä‘iá»u kiện." + +#: ../src/filter-edit-callbacks.c:1844 +msgid "Filter has matched" +msgstr "Bá»™ lá»c đã khá»›p" + +#: ../src/filter-edit-callbacks.c:1869 +msgid "You must provide a sound to play" +msgstr "Phải cung cấp âm thanh cần phát" + +#: ../src/filter-edit-callbacks.c:2028 +#, c-format +msgid "(Example: write December 31, 2000, as %s)" +msgstr "(Ví dụ : viết Ngày 31, tháng Chạp, năm 2000, dạng %s)" + +#: ../src/filter-edit-dialog.c:89 ../src/pref-manager.c:384 +#: ../plug-ins/ifscompose/ifscompose.c:642 ../objects/UML/message.c:138 +#: ../src/form-editor/button-prop.cc:145 +msgid "Simple" +msgstr "ÄÆ¡n giản" + +#: ../src/filter-edit-dialog.c:90 +#: ../gtksourceview/language-specs/ruby.lang.h:14 +msgid "Regular Expression" +msgstr "Biểu thức chính quy" + +#: ../src/filter-edit-dialog.c:91 +msgid "Date interval" +msgstr "Khoảng ngày" + +#: ../src/filter-edit-dialog.c:92 +msgid "Flag condition" +msgstr "Äiá»u kiện cá»" + +#: ../src/filter-edit-dialog.c:96 +msgid "Copy to folder:" +msgstr "Chép vào thÆ° mục:" + +#: ../src/filter-edit-dialog.c:97 +msgid "Move to folder:" +msgstr "Chuyển vào thÆ° mục:" + +#: ../src/filter-edit-dialog.c:98 +msgid "Print on printer:" +msgstr "In bằng máy in:" + +#: ../src/filter-edit-dialog.c:99 +msgid "Run program:" +msgstr "Chạy chÆ°Æ¡ng trình:" + +#: ../src/filter-edit-dialog.c:100 +msgid "Send to Trash" +msgstr "Chuyển vào Rác" + +#: ../src/filter-edit-dialog.c:104 ../objects/GRAFCET/vergent.c:122 +msgid "OR" +msgstr "HOẶC" + +#: ../src/filter-edit-dialog.c:105 ../objects/GRAFCET/vergent.c:123 +msgid "AND" +msgstr "VÀ" + +#: ../gmedia_slice/interface.c:459 +msgid "_New" +msgstr "_Má»›i" + +#. The name entry +#: ../src/filter-edit-dialog.c:249 +msgid "_Filter name:" +msgstr "Tên bá»™ _lá»c:" + +#. The filter op-code : "OR" or "AND" all the conditions +#: ../src/filter-edit-dialog.c:267 +msgid "Operation between conditions" +msgstr "Thao tác giữa các Ä‘iá»u kiện" + +#: ../Pyblio/GnomeUI/Document.py:145 ../Pyblio/GnomeUI/Document.py:194 +#: ../storage/sunone-permissions-dialog.glade.h:30 po/silky.glade.h:214 +#: app/menubar.c:687 +msgid "_Edit" +msgstr "_Hiệu chỉnh" + +#: ../src/filter-edit-dialog.c:317 +msgid "Ne_w" +msgstr "_Má»›i" + +#. The notification area +#: ../src/filter-edit-dialog.c:350 +msgid "Notification:" +msgstr "Thông báo :" + +#. Notification buttons +#: ../src/filter-edit-dialog.c:360 +msgid "Play sound:" +msgstr "Phát âm:" + +#: ../src/filter-edit-dialog.c:367 ../src/filter-edit-dialog.c:380 +msgid "Use Sound..." +msgstr "Dùng âm thanh..." + +#: ../src/filter-edit-dialog.c:399 +msgid "Popup text:" +msgstr "Chuá»—i bật lên:" + +#. The action area +#: ../src/filter-edit-dialog.c:420 +msgid "Action to perform:" +msgstr "Hành Ä‘á»™ng cần thá»±c hiện:" + +#: ../data/glade/smart-playlist-dialog.glade.h:5 +msgid "Match" +msgstr "Khá»›p" + +#: ../testing/gda-test-sql.c:289 ../src/logout.c:224 libexif/exif-entry.c:479 +#: ../mimedir/mimedir-vcomponent.c:407 ../mimedir/mimedir-vcomponent.c:408 +msgid "Action" +msgstr "Hành Ä‘á»™ng" + +#: ../src/filter-edit-dialog.c:486 ../app/widgets/gimpdataeditor.c:229 +#: ../app/sheets_dialog.c:267 ../glom/glom.glade.h:135 +msgid "Revert" +msgstr "Hoàn nguyên" + +#: ../src/filter-edit-dialog.c:537 +msgid "A filter run dialog is open.Close it before you can modify filters." +msgstr "" +"Có má»™t há»™p thoại chạy bá»™ lá»c được mở. Hãy đóng nó trÆ°á»›c khi sá»­a đổi bá»™ lá»c." + +#: ../src/filter-edit-dialog.c:550 +msgid "Balsa Filters" +msgstr "Bá»™ lá»c Balsa" + +#: ../src/filter-edit-dialog.c:618 +#, c-format +msgid "Filter \"%s\" has no condition." +msgstr "Bá»™ lá»c « %s » không có Ä‘iá»u kiện." + +#: ../src/filter-export-callbacks.c:57 +#, c-format +msgid "Unable to export filter %s, an error occurred." +msgstr "Không thể xuất ra bá»™ lá»c %s vì gặp lá»—i." + +#: ../src/filter-export-dialog.c:63 +msgid "" +"There are opened filter run dialogs, close them before you can modify " +"filters." +msgstr "" +"Có má»™t số há»™p thoại chạy bá»™ lá»c được mở. Hãy đóng nó trÆ°á»›c khi sá»­a đổi trình " +"lá»c." + +#: ../src/filter-export-dialog.c:75 +msgid "Balsa Filters Export" +msgstr "Xuất bá»™ lá»c Balsa" + +#: ../src/filter-run-callbacks.c:182 +#, c-format +msgid "Error displaying run filters help: %s\n" +msgstr "Gặp lá»—i khi hiển thị trợ giúp vá» chạy bá»™ lá»c: %s\n" + +#: ../src/filter-run-callbacks.c:230 ../src/filter-run-callbacks.c:252 +msgid "Error when applying filters" +msgstr "Gặp lá»—i khi áp dụng bá»™ lá»c" + +#: ../src/filter-run-callbacks.c:233 ../src/filter-run-callbacks.c:255 +#, c-format +msgid "Filter applied to \"%s\"." +msgstr "Bá»™ lá»c được áp dụng cho « %s »." + +#: ../src/filter-run-callbacks.c:292 +#, c-format +msgid "" +"The destination mailbox of the filter \"%s\" is \"%s\".\n" +"You can't associate it with the same mailbox (that causes recursion)." +msgstr "" +"Há»™p thÆ° đích của bá»™ lá»c « %s » là « %s ».\n" +"Không thể gắn nó liên quan vá»›i cùng há»™p thÆ° (sẽ gây đệ qui)." + +#: ../src/filter-run-callbacks.c:300 +#, c-format +msgid "" +"The filter \"%s\" is not compatible with the mailbox type of \"%s\".\n" +"This happens for example when you use regular expressions match with IMAP " +"mailboxes, it is done by a very slow method; if possible, use substring " +"match instead." +msgstr "" +"Bá»™ lá»c « %s » không tÆ°Æ¡ng thích vá»›i kiểu há»™p thÆ° « %s ».\n" +"TrÆ°á»ng hợp này xảy ra, lấy thí dụ, khi bạn sá»­ dụng cách khá»›p biểu thức chính " +"quy vá»›i há»™p thÆ° kiểu IMAP: phÆ°Æ¡ng pháp rất chậm. Nếu có thể, hãy sá»­ dụng " +"cách khá»›p chuá»—i phụ thay thế." + +#: ../src/filter-run-dialog.c:189 +msgid "Balsa Filters of Mailbox: " +msgstr "Bá»™ lá»c Balsa của há»™p thÆ° : " + +#: ../src/filter-run-dialog.c:230 +msgid "On reception" +msgstr "Khi nhận" + +#: ../src/filter-run-dialog.c:242 +msgid "On exit" +msgstr "Khi thoát" + +#: ../src/filter-run-dialog.c:316 +msgid "Apply Selected" +msgstr "Ãp dụng Ä‘iá»u chá»n" + +#: ui/galeon-ui.xml.in.h:203 ../glade2/meldapp.glade.h:98 +msgid "_Up" +msgstr "_Lên" + +#. down button +#: ../src/filter-run-dialog.c:369 +msgid "Do_wn" +msgstr "_Xuống" + +#: ../src/filter-run-dialog.c:375 +msgid "A_pply Now!" +msgstr "Ã_p dụng ngay!" + +#: ../src/filter-run-dialog.c:419 +msgid "" +"The filters dialog is opened, close it before you can run filters on any " +"mailbox" +msgstr "" +"Äã mở há»™p thoại của bá»™ lá»c, hãy đóng lại trÆ°á»›c khi chạy bá»™ lá»c cho bất kỳ " +"há»™p thÆ° nào." + +#: ../src/folder-conf.c:281 +msgid "Remote IMAP folder" +msgstr "ThÆ° mục IMAP ở xa" + +#: ../plug-ins/imagemap/imap_polygon.c:515 ../src/menus.c:68 +msgid "_Update" +msgstr "_Cập nhật" + +#: ../src/folder-conf.c:284 ../sources/rb-playlist-source-recorder.c:1114 +#: ../gtk/gtkfilesel.c:1522 +msgid "C_reate" +msgstr "_Tạo" + +#: ../src/mailbox-conf.c:1232 ../gcalctool/gtk.c:350 +msgid "_Basic" +msgstr "CÆ¡ _bản" + +#: ../src/folder-conf.c:312 +msgid "_Max number of connections:" +msgstr "Số kết nối tối _Ä‘a:" + +#: ../src/folder-conf.c:319 ../src/mailbox-conf.c:1295 +msgid "Enable _persistent cache" +msgstr "Bật bá»™ nhá»› tạm _bá»n bỉ" + +#: ../src/folder-conf.c:327 ../src/mailbox-conf.c:1299 +msgid "Enable _bug workarounds" +msgstr "Bật cách chỉnh sá»­a _lá»—i" + +#: ../src/folder-conf.c:334 +msgid "Use STATUS for mailbox checking" +msgstr "Dùng TRẠNG THÃI để kiểm tra há»™p thÆ°" + +#: ../plug-ins/common/CML_explorer.c:1307 +msgid "_Advanced" +msgstr "Cấp c_ao" + +#: ../src/folder-conf.c:343 +msgid "Descriptive _Name:" +msgstr "T_ên mô tả:" + +#: ../src/folder-conf.c:358 ../src/mailbox-conf.c:1146 +msgid "Use_r name:" +msgstr "Tên _ngÆ°á»i dùng:" + +#: ../glade/straw.glade.h:87 +msgid "_Password:" +msgstr "_Mật khẩu :" + +#: ../src/folder-conf.c:369 ../src/mailbox-conf.c:1257 +msgid "_Anonymous access" +msgstr "Truy cấp _vô danh" + +#: ../src/folder-conf.c:378 +msgid "Subscribed _folders only" +msgstr "Chỉ thÆ° mục đã _đăng ký" + +#: ../src/folder-conf.c:380 +msgid "Always show _INBOX" +msgstr "Luôn hiện THƯ _ÄẾN" + +#: ../src/folder-conf.c:383 +msgid "Pr_efix:" +msgstr "T_iá»n tố :" + +#: ../src/folder-conf.c:529 +msgid "Select parent folder" +msgstr "Chá»n thÆ° mục mẹ" + +#: ../src/folder-conf.c:603 +#, c-format +msgid "" +"Renaming INBOX is special!\n" +"You will create a subfolder %s in %s\n" +"containing the messages from INBOX.\n" +"INBOX and its subfolders will remain.\n" +"What would you like to do?" +msgstr "" +"Äổi tên há»™p THƯ ÄẾN là đặc biệt!\n" +"Bạn sẽ tạo thÆ° mục con %s trong %s\n" +"chứa các thÆ° từ há»™p THƯ ÄẾN.\n" +"Vẫn duy trì há»™p THƯ ÄẾN và các thÆ°\n" +"mục con của nó. Muốn thá»±c hiện không?" + +#: ../src/folder-conf.c:609 ../src/StockIcons.cs:33 ../gtk/gtkstock.c:311 +#: ../widgets/gtk+.xml.in.h:149 app/gui-subs.c:483 app/gui-subs.c:554 +msgid "Question" +msgstr "Câu há»i" + +#: ../src/folder-conf.c:614 +msgid "Rename INBOX" +msgstr "Äổi tên há»™p THƯ ÄẾN" + +#: ../src/folder-conf.c:616 web/template/keywords_transl_main.tpl:15 +#: ../app/dialogs.c:52 ../app/paginate_psprint.c:314 ../glade/property.c:5150 +#: ../src/dirbrowser.c:288 ../src/filexferdlg.c:254 +#: ../src/mlview-attribute-picker.cc:165 ../widgets/gtk+.xml.in.h:29 +#: address_gui.c:2700 category.c:421 category.c:877 category.c:917 +#: datebook_gui.c:1154 datebook_gui.c:1582 datebook_gui.c:4371 +#: export_gui.c:344 memo_gui.c:1552 password.c:362 print_gui.c:338 +#: restore_gui.c:312 todo_gui.c:2169 utils.c:1094 KeyRing/keyring.c:1332 +#: app/gui-subs.c:507 app/gui-subs.c:571 app/keys.c:711 +#: app/midi-settings-050.c:637 app/midi-settings-09x.c:640 +#: app/sample-editor.c:1486 app/sample-editor.c:1661 app/sample-editor.c:1985 +msgid "Cancel" +msgstr "Thôi" + +#: ../src/folder-conf.c:637 +#, c-format +msgid "Folder rename failed. Reason: %s" +msgstr "Việc thay đổi tên thÆ° mục bị lá»—i. Lý do : %s" + +#: ../src/folder-conf.c:689 +#, c-format +msgid "Folder creation failed. Reason: %s" +msgstr "Việc tạo thÆ° mục bị lá»—i. Lý do : %s" + +#: ../src/folder-conf.c:729 +msgid "" +"An IMAP folder that is not a mailbox\n" +"has no properties that can be changed." +msgstr "" +"Má»™t thÆ° mục IMAP không phải là há»™p thÆ°\n" +"không có thuá»™c tính để có thể thay đổi." + +#: ../src/folder-conf.c:744 +msgid "Remote IMAP subfolder" +msgstr "ThÆ° mục con IMAP ở xa" + +#: ../src/folder-conf.c:747 ../profiles/gnome-audio-profiles.glade2.h:9 +msgid "_Create" +msgstr "_Tạo" + +#: ../src/folder-conf.c:768 +msgid "Rename or move subfolder" +msgstr "Äổi tên hay di chuyển thÆ° mục con" + +#: ../src/folder-conf.c:769 +msgid "Create subfolder" +msgstr "Tạo thÆ° mục con" + +#: ../src/folder-conf.c:777 ../gtk/gtkfilesel.c:1498 +msgid "_Folder name:" +msgstr "Tên thÆ° _mục:" + +#: ../src/folder-conf.c:783 ../ui/connect.glade.h:7 +msgid "Host:" +msgstr "Máy:" + +#: ../src/folder-conf.c:789 ../plug-ins/script-fu/script-fu-console.c:256 +#: src/fe-gtk/setup.c:1382 ../pan/pan-file-entry.c:81 +msgid "_Browse..." +msgstr "_Duyệt..." + +#: ../src/folder-conf.c:797 +msgid "_Subfolder of:" +msgstr "ThÆ° mục _con của:" + +#: ../src/folder-conf.c:826 +msgid "" +"This folder is not stored in configuration. I do not yet know how to remove " +"it from remote server." +msgstr "" +"ThÆ° mục này không được lÆ°u trữ trong cấu hình, nên còn chÆ°a biết cách gỡ bá» " +"nó khá»i máy phục vụ ở xa." + +#: ../src/folder-conf.c:835 +#, c-format +msgid "" +"This will remove the folder \"%s\" from the list.\n" +"You may use \"New IMAP Folder\" later to add this folder again.\n" +msgstr "" +"Việc này sẽ gỡ bá» thÆ° mục « %s » ra khá»i danh sách.\n" +"Có thể dùng tính năng « ThÆ° mục IMAP má»›i » sau này để bổ sung lại thÆ° mục " +"này.\n" + +#: ../src/folder-conf.c:840 ../main.c:84 ../main.c:196 ../structure.c:173 +#: list-ui.c:81 ../libgimp/gimpexport.c:387 fileops.c:429 +msgid "Confirm" +msgstr "Xác nhận" + +#: ../src/information-dialog.c:222 +msgid "Information - Balsa" +msgstr "Thông tin — Balsa" + +#: ../src/information-dialog.c:325 ../libgnomedb/gnome-db-sql-console.c:561 +#, c-format +msgid "WARNING: " +msgstr "CẢNH BÃO :" + +#: ../src/information-dialog.c:328 ../libgnomedb/gnome-db-sql-console.c:566 +#, c-format +msgid "ERROR: " +msgstr "Lá»–I :" + +#: ../src/information-dialog.c:331 +#, c-format +msgid "FATAL: " +msgstr "NGHIÊM TRỌNG: " + +#: ../src/mailbox-conf.c:215 +msgid "Use _SSL" +msgstr "Dùng _SSL" + +#: ../src/mailbox-conf.c:342 +msgid "No mailbox selected." +msgstr "ChÆ°a chá»n há»™p thÆ°." + +#: ../src/mailbox-conf.c:371 +#, c-format +msgid "" +"Mailbox \"%s\" is used by Balsa and I cannot remove it.\n" +"If you really want to remove it, assign its function\n" +"to some other mailbox." +msgstr "" +"Balsa dùng há»™p thÆ° « %s » nên không thể gỡ bá» nó.\n" +"Nếu bạn thật sá»± muốn gỡ bá» nó, hãy cấp phát chức \n" +"năng của nó cho há»™p thÆ° khác nào đó." + +#: ../src/mailbox-conf.c:381 +#, c-format +msgid "" +"This will remove the mailbox \"%s\" from the list of mailboxes. You may " +"also delete the disk file or files associated with this mailbox.\n" +"If you do not remove the file on disk you may \"Add Mailbox\" to access the " +"mailbox again.\n" +"What would you like to do?" +msgstr "" +"Việc này sẽ gỡ bá» há»™p thÆ° « %s » khá»i danh sách các há»™p thÆ°.\n" +"Bạn cÅ©ng có thể xoá bá» tập tin trên Ä‘Ä©a hay tập tin liên quan vá»›i há»™p thÆ° " +"này.\n" +"Nếu không xoá b6o tập tin của nó trên Ä‘Ä©a thì có thể « Thêm há»™p thÆ° » để " +"truy cập lại nó sau này.\n" +"Bạn thích làm gì ?" + +#: ../src/mailbox-conf.c:393 +msgid "Remove from _list" +msgstr "Gỡ bá» ra _danh sách" + +#: ../src/mailbox-conf.c:394 +msgid "Remove from list and _disk" +msgstr "Gỡ bá» ra danh sách và _Ä‘Ä©a" + +#: ../src/mailbox-conf.c:402 +#, c-format +msgid "" +"This will remove the mailbox \"%s\" and all its messages from your IMAP " +"server. If %s has subfolders, it will still appear as a node in the folder " +"tree.\n" +"You may use \"New IMAP subfolder\" later to add a mailbox with this name.\n" +"What would you like to do?" +msgstr "" +"Việc này sẽ gỡ bá» há»™p thÆ° « %s » và má»i thÆ° của nó ra khá»i máy phục vụ IMAP. " +"Nếu %s có thÆ° mục con, nó vẫn sẽ xuất hiện là má»™t nút trong cây thÆ° mục.\n" +"Có thể dùng tính năng « ThÆ° mục con IMAP má»›i » sau này để thêm má»™t há»™p thÆ° " +"vá»›i tên này.\n" +"Bạn thích làm gì?" + +#: ../src/mailbox-conf.c:415 +msgid "_Remove from server" +msgstr "Gỡ _bá» ra máy phục vụ" + +#: ../src/mailbox-conf.c:422 +#, c-format +msgid "" +"This will remove the mailbox \"%s\" from the list of mailboxes.\n" +"You may use \"Add Mailbox\" later to access this mailbox again.\n" +"What would you like to do?" +msgstr "" +"Việc này sẽ gỡ bá» há»™p thÆ° « %s » khá»i danh sách há»™p thÆ°\n" +"Có thể dùng tính năng « Thêm Há»™p ThÆ° » vá» sau để truy cập lại há»™p thÆ° này.\n" +"Bạn thích làm gì?" + +#: ../src/mailbox-conf.c:431 +msgid "_Remove from list" +msgstr "Gỡ _bá» ra danh sách" + +#: ../src/mailbox-conf.c:469 +#, c-format +msgid "Folder deletion failed. Reason: %s" +msgstr "Việc xoá bá» thÆ° mục bị lá»—i. Lý do : %s" + +#: ../src/mailbox-conf.c:526 ../src/pref-manager.c:1511 +#: ../src/pref-manager.c:1596 ../src/pref-manager.c:2645 +#: ../gtk/gtkfilechooserdefault.c:3327 ../gtk/gtkstock.c:317 +#: ../glade/glade_menu_editor.c:958 +msgid "_Add" +msgstr "Th_êm" + +#: ../src/mailbox-conf.c:746 +#: ../libnautilus-private/nautilus-file-operations.c:361 +#: ../libnautilus-private/nautilus-file-operations.c:386 +#: ../libnautilus-private/nautilus-file-utilities.c:70 +#: ../src/nautilus-connect-server-dialog.c:287 +#: ../src/baobab-remote-connect-dialog.c:291 +#, c-format +msgid "%s on %s" +msgstr "%s trên %s" + +#: ../src/mailbox-conf.c:888 +#, c-format +msgid "" +"Rename of %s to %s failed:\n" +"%s" +msgstr "" +"Việc thay đổi tên %s thành %s bị lá»—i:\n" +"%s" + +#: ../src/mailbox-conf.c:1067 +msgid "_Mailbox Name:" +msgstr "Tên _há»™p thÆ° :" + +#: ../src/mailbox-conf.c:1078 +msgid "Local Mailbox Configurator" +msgstr "Bá»™ cấu hình thÆ° cục bá»™" + +#: ../src/mailbox-conf.c:1111 +msgid "Remote Mailbox Configurator" +msgstr "Bá»™ cấu hình thÆ° ở xa" + +#. mailbox name +#: ../src/mailbox-conf.c:1133 ../src/mailbox-conf.c:1235 +msgid "Mailbox _Name:" +msgstr "T_ên há»™p thÆ° :" + +#: ../desktop/client/src/connect.c:578 +msgid "Pass_word:" +msgstr "_Mật khẩu :" + +#: ../src/mailbox-conf.c:1160 +msgid "_Delete messages from server after download" +msgstr "_Xoá bá» thÆ° ra máy phục vụ sau khi tải vá»" + +#: ../src/mailbox-conf.c:1165 +msgid "_Enable check for new mail" +msgstr "_Bật kiểm tra tìm thÆ° má»›i" + +#: ../src/mailbox-conf.c:1170 +msgid "_Filter messages through procmail" +msgstr "_Lá»c thÆ° qua procmail" + +#: ../src/mailbox-conf.c:1174 +msgid "Fi_lter Command:" +msgstr "L_á»c lệnh:" + +#: ../src/mailbox-conf.c:1188 +msgid "Disable _APOP" +msgstr "Tắt _APOP" + +#: ../pan/server-ui.c:358 ../glade/straw.glade.h:95 +msgid "_Username:" +msgstr "T_ên ngÆ°á»i dùng:" + +#: ../src/mailbox-conf.c:1263 +msgid "_Remember Password" +msgstr "_Nhá»› mật khẩu" + +#: ../src/mailbox-conf.c:1274 +msgid "F_older Path:" +msgstr "ÄÆ°á»ng dẫn thÆ° _mục:" + +#: ../src/mailbox-conf.c:1360 +msgid "_Identity:" +msgstr "T_há»±c thể:" + +#: ../mail/em-account-editor.c:304 ../mail/em-account-editor.c:767 +#: ../widgets/gtk+.xml.in.h:10 +#, fuzzy +msgid "Always" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Luôn luôn\n" +"#-#-#-#-# glade3vi..po (glade3 HEAD) #-#-#-#-#\n" +"Luôn" + +#: ../src/mailbox-conf.c:1392 +msgid "" +"_Decrypt and check\n" +"signatures automatically:" +msgstr "" +"Tá»± Ä‘á»™ng giải mật mã\n" +"và kiểm tra chữ ký:" + +#: ../src/mailbox-conf.c:1413 +msgid "Show _Recipient column instead of Sender" +msgstr "Hiển thị cá»™t _NgÆ°á»i nhận thay thế NgÆ°á»i gởi" + +#: ../src/mailbox-conf.c:1420 +msgid "_Subscribe for new mail check" +msgstr "Äăng _ký để kiểm tra tìm thÆ° vá»›i" + +#: ../src/mailbox-node.c:265 +msgid "The folder edition to be written." +msgstr "Bản sao thÆ° mục cần ghi." + +#: ../src/mailbox-node.c:433 +#, c-format +msgid "" +"Scanning of %s failed: %s\n" +"Check network connectivity." +msgstr "" +"Việc quét %s bị lá»—i: %s\n" +"Kiểm tra xem đã kết nối đến mạng." + +#: ../src/mailbox-node.c:435 +#, c-format +msgid "Scanning of %s failed: %s" +msgstr "Việc quét %s bị lá»—i: %s" + +#: ../src/mailbox-node.c:484 +#, c-format +msgid "Scanning %s. Please wait..." +msgstr "Äang quét %s. Hãy Ä‘á»i má»™t chút..." + +#: ../src/mailbox-node.c:943 +msgid "Local _mbox mailbox..." +msgstr "Há»™p thÆ° _mbox cục bá»™..." + +#: ../src/mailbox-node.c:945 +msgid "Local Mail_dir mailbox..." +msgstr "Há»™p thÆ° Mail_dir cục bá»™..." + +#: ../src/mailbox-node.c:947 +msgid "Local M_H mailbox..." +msgstr "Há»™p thÆ° M_H cục bá»™..." + +#: ../src/mailbox-node.c:949 +msgid "Remote _IMAP mailbox..." +msgstr "Há»™p thÆ° _IMAP ở xa..." + +#: ../src/mailbox-node.c:952 +msgid "Remote IMAP _folder..." +msgstr "ThÆ° _mục IMAP ở xa..." + +#: ../src/mailbox-node.c:954 +msgid "Remote IMAP _subfolder..." +msgstr "ThÆ° mục _con IMAP ở xa..." + +#: ../src/mailbox-node.c:965 ../src/mailbox-node.c:981 src/gtkam-main.c:567 +msgid "_Rescan" +msgstr "_Quét lại" + +#: ../src/mailbox-node.c:975 ../ui/muds.glade.h:54 +msgid "_Properties..." +msgstr "Th_uá»™c tính..." + +#: ../src/mailbox-node.c:991 ../extensions/page-info/page-info-dialog.c:1038 +msgid "_Open" +msgstr "_Mở" + +#: ../src/mailbox-node.c:1005 ../mail/mail-dialogs.glade.h:22 +#: ../extensions/rss/rss-ui.glade.h:3 ../glade/straw.glade.h:93 +msgid "_Subscribe" +msgstr "Äăng _ký" + +#: ../src/mailbox-node.c:1007 ../mail/mail-dialogs.glade.h:24 +#: ../plugins/folder-unsubscribe/org-gnome-mail-folder-unsubscribe.eplug.xml.h:3 +msgid "_Unsubscribe" +msgstr "_BỠđăng ký" + +#: ../src/mailbox-node.c:1013 +msgid "Mark as _Inbox" +msgstr "Äánh dấu há»™p ThÆ° _Äến" + +#: ../src/mailbox-node.c:1015 +msgid "_Mark as Sentbox" +msgstr "Äánh dấu há»™p Äã _Gởi" + +#: ../src/mailbox-node.c:1017 +msgid "Mark as _Trash" +msgstr "Äánh dấu _Rác" + +#: ../src/mailbox-node.c:1019 +msgid "Mark as D_raftbox" +msgstr "Äánh dấu há»™p _Nháp" + +#: ../src/mailbox-node.c:1022 +msgid "_Empty trash" +msgstr "Äổ Rá_c" + +#: ../src/mailbox-node.c:1027 +msgid "_Edit/Apply filters" +msgstr "_Sá»­a/Ãp dụng bá»™ lá»c" + +#: ../src/mailbox-node.c:1107 +#, c-format +msgid "The path \"%s\" does not lead to a mailbox." +msgstr "ÄÆ°á»ng dẫn « %s » không Ä‘i tá»›i há»™p thÆ°." + +#: ../src/mailbox-node.c:1120 +#, c-format +msgid "Local mailbox %s loaded as: %s\n" +msgstr "Há»™p thÆ° cục bá»™ %s được tải dạng: %s\n" + +#: ../src/mailbox-node.c:1147 +#, c-format +msgid "Local folder %s\n" +msgstr "ThÆ° mục cục bá»™ %s\n" + +#: ../src/main-window.c:280 +msgid "Balsa closes files and connections. Please wait..." +msgstr "Balsa Ä‘ang đóng má»™t số tập tin và kết nối. Hãy đợi má»™t chút..." + +#: ../src/main-window.c:319 ../src/main-window.c:365 +msgid "_Deleted" +msgstr "Äã _xoá bá»" + +#: ../src/main-window.c:322 +msgid "Un_Deleted" +msgstr "Ch_Æ°a xoá bá»" + +#: ../src/main-window.c:325 ../src/file-manager/fm-properties-window.c:2802 +#: ../gmedia_slice/interface.c:424 +msgid "_Read" +msgstr "_Äã Ä‘á»c" + +#: ../src/main-window.c:328 +msgid "Un_read" +msgstr "C_hÆ°a Ä‘á»c" + +#: ../src/main-window.c:334 +msgid "Un_flagged" +msgstr "ChÆ°_a đặt cá»" + +#: ../src/main-window.c:337 ../src/main-window.c:377 +msgid "_Answered" +msgstr "Äã t_rả lá»i" + +#: ../src/main-window.c:340 +msgid "Un_answered" +msgstr "ChÆ°a tr_ả lá»i" + +#: ../src/main-window.c:346 +msgid "_No Headers" +msgstr "Ẩ_n dòng đầu" + +#: ../src/main-window.c:346 +msgid "Display no headers" +msgstr "Không hiển thị dòng đầu" + +#: ../src/main-window.c:348 +msgid "S_elected Headers" +msgstr "Dòng đầu đã _chá»n" + +#: ../src/main-window.c:349 +msgid "Display selected headers" +msgstr "Hiển thị những dòng đầu đã chá»n" + +#: ../src/main-window.c:351 ../src/message-window.c:90 +msgid "All _Headers" +msgstr "_Má»i dòng đầu" + +#: ../src/main-window.c:351 +msgid "Display all headers" +msgstr "Hiển thị má»i dòng đầu" + +#: ../src/main-window.c:360 +msgid "Toggle flagged" +msgstr "Bật/tắt đặt cá»" + +#: ../src/main-window.c:366 +msgid "Toggle deleted flag" +msgstr "Bật tắt đặt cỠđã xoá bá»" + +#: ../src/main-window.c:371 +msgid "Toggle New" +msgstr "Bật/tắt Má»›i" + +#: ../src/main-window.c:377 +msgid "Toggle Answered" +msgstr "Bật/tắt Äã trả lá»i" + +#: ../src/main-window.c:392 ../src/user_popup.c:67 +#, fuzzy +msgid "_Message..." +msgstr "" +"#-#-#-#-# balsa.po (balsa HEAD) #-#-#-#-#\n" +"_ThÆ°...\n" +"#-#-#-#-# gnomeicu.po (gnomeicu HEAD) #-#-#-#-#\n" +"_Tin nhẳn..." + +#: ../src/main-window.c:392 ../src/main-window.c:1041 +msgid "Compose a new message" +msgstr "Soạn thảo thÆ° má»›i" + +#. We could use GNOMEUIINFO_INCLUDE but it appends the menu instead +#. of including at specified position +#: ../src/main-window.c:399 +msgid "Local mbox mailbox..." +msgstr "Há»™p thÆ° mbox cục bá»™..." + +#: ../src/main-window.c:400 ../src/main-window.c:813 +msgid "Add a new mbox style mailbox" +msgstr "Thêm má»™t há»™p thÆ° kiểu mbox má»›i" + +#: ../src/main-window.c:403 +msgid "Local Maildir mailbox..." +msgstr "Há»™p thÆ° Maildir cục bá»™..." + +#: ../src/main-window.c:404 ../src/main-window.c:818 +msgid "Add a new Maildir style mailbox" +msgstr "Thêm má»™t há»™p thÆ° kiểu Maildir má»›i" + +#: ../src/main-window.c:407 +msgid "Local MH mailbox..." +msgstr "Há»™p thÆ° MH cục bá»™..." + +#: ../src/main-window.c:408 ../src/main-window.c:822 +msgid "Add a new MH style mailbox" +msgstr "Thêm má»™t há»™p thÆ° kiểu MH má»›i" + +#: ../src/main-window.c:411 ../src/main-window.c:825 +#: ../src/pref-manager.c:2970 +msgid "Remote IMAP mailbox..." +msgstr "Há»™p thÆ° IMAP ở xa..." + +#: ../src/main-window.c:412 ../src/main-window.c:826 +msgid "Add a new IMAP mailbox" +msgstr "Thêm má»™t há»™p thÆ° kiểu IMAP má»›i" + +#: ../src/main-window.c:416 ../src/main-window.c:830 +#: ../src/pref-manager.c:2975 +msgid "Remote IMAP folder..." +msgstr "ThÆ° mục IMAP ở xa..." + +#: ../src/main-window.c:417 ../src/main-window.c:831 +msgid "Add a new IMAP folder" +msgstr "Thêm má»™t thÆ° mục IMAP má»›i" + +#: ../src/main-window.c:420 ../src/main-window.c:834 +msgid "Remote IMAP subfolder..." +msgstr "ThÆ° mục con IMAP ở xa..." + +#: ../src/main-window.c:421 ../src/main-window.c:835 +msgid "Add a new IMAP subfolder" +msgstr "Thêm má»™t thÆ° mục con IMAP má»›i" + +#: ../src/main-window.c:434 ../data/sound-juicer.glade.h:26 +#: ../src/sj-main.c:240 ../glade/straw.glade.h:71 +msgid "_Continue" +msgstr "_Tiếp tục" + +#: ../src/main-window.c:435 +msgid "Continue editing current message" +msgstr "Tiếp tục lại soạn thảo thÆ° hiện có" + +#. Ctrl-M +#: ../src/main-window.c:442 ../src/main-window.c:877 +msgid "_Get New Mail" +msgstr "_Lấy thÆ° má»›i" + +#: ../src/main-window.c:442 ../src/main-window.c:877 +msgid "Fetch new incoming mail" +msgstr "Lấy các thÆ° má»›i được gởi đến" + +#. Ctrl-S +#: ../src/main-window.c:448 ../src/main-window.c:872 +msgid "_Send Queued Mail" +msgstr "_Gởi thÆ° Ä‘ang đợi" + +#: ../src/main-window.c:449 ../src/main-window.c:873 +msgid "Send messages from the outbox" +msgstr "Gởi các thÆ° từ há»™p ThÆ° Äi" + +#. Ctrl-B +#: ../src/main-window.c:455 ../src/main-window.c:867 +msgid "Send and _Receive Mail" +msgstr "Gởi và _Nhận ThÆ°" + +#: ../src/main-window.c:456 ../src/main-window.c:868 +#: ../src/toolbar-factory.c:113 +msgid "Send and Receive messages" +msgstr "Gởi và nhạn thÆ°" + +#: ../src/main-window.c:461 ../src/main-window.c:882 +#: ../src/sendmsg-window.c:354 ../src/sendmsg-window.c:521 +#: ../plug-ins/print/print.c:172 +msgid "_Print..." +msgstr "_In..." + +#: ../src/main-window.c:462 ../src/main-window.c:883 +#: ../src/toolbar-factory.c:109 +msgid "Print current message" +msgstr "In thÆ° hiện thá»i" + +#: ../src/main-window.c:468 ../src/main-window.c:1100 +msgid "_Address Book..." +msgstr "_Sổ địa chỉ..." + +#: ../src/main-window.c:469 ../src/main-window.c:1101 +msgid "Open the address book" +msgstr "Mở sổ địa chỉ" + +#: ../src/main-window.c:493 +msgid "F_ilters..." +msgstr "_Lá»c..." + +#: ../src/main-window.c:493 ../src/main-window.c:1087 +msgid "Manage filters" +msgstr "Quản lý bá»™ lá»c" + +#: ../src/main-window.c:495 ../src/main-window.c:1093 +msgid "_Export Filters" +msgstr "_Xuất bá»™ lá»c" + +#: ../src/main-window.c:495 ../src/main-window.c:1094 +msgid "Export filters as Sieve scripts" +msgstr "Xuất ra các bá»™ lá»c dạng tập lệnh Sieve" + +#: ../src/main-window.c:503 +msgid "_Flat index" +msgstr "Chỉ mục _phẳng" + +#: ../src/main-window.c:504 +msgid "No threading at all" +msgstr "Không có nhánh nào" + +#: ../src/main-window.c:509 +msgid "Si_mple threading" +msgstr "Nhánh Ä‘_Æ¡n giản" + +#: ../src/main-window.c:510 +msgid "Simple threading algorithm" +msgstr "Thuật toán Ä‘Æ¡n giản sắp xếp theo nhánh" + +#: ../src/main-window.c:515 +msgid "_JWZ threading" +msgstr "Nhánh _JWZ" + +#: ../src/main-window.c:516 +msgid "Elaborate JWZ threading" +msgstr "Sắp xếp theo JWZ phức tạp" + +#: ../src/main-window.c:526 +msgid "_Show Mailbox Tree" +msgstr "_Hiện cây há»™p thÆ°" + +#: ../src/main-window.c:527 +msgid "Toggle display of mailbox and folder tree" +msgstr "Hiện/Ẩn cây các há»™p thÆ° và thÆ° mục" + +#: ../src/main-window.c:531 +msgid "Show Mailbox _Tabs" +msgstr "Hiện th_anh há»™p thÆ°" + +#: ../src/main-window.c:532 +msgid "Toggle display of mailbox notebook tabs" +msgstr "Hiện/Ẩn các thanh cuốn vở há»™p thÆ°" + +#: ../plug-ins/common/ripple.c:568 +msgid "_Wrap" +msgstr "_Cuá»™n" + +#: ../src/sendmsg-window.c:383 ../src/sendmsg-window.c:546 +msgid "Wrap message lines" +msgstr "Ngắt các dòng trong thÆ°" + +#: ../src/quick-lounge.glade.h:11 +msgid "E_xpand All" +msgstr "_Bung hết" + +#: ../src/main-window.c:546 ../src/main-window.c:959 +msgid "Expand all threads" +msgstr "Bung má»i nhánh" + +#: ../src/main-window.c:550 ../src/main-window.c:963 +msgid "_Collapse All" +msgstr "Th_u gá»n hết" + +#: ../src/main-window.c:551 ../src/main-window.c:964 +msgid "Collapse all expanded threads" +msgstr "Thu gá»n má»i nhánh đã bung" + +#: src/gtkam-main.c:558 +msgid "Zoom _In" +msgstr "Phóng _to" + +#: ../src/main-window.c:557 ../src/main-window.c:994 +#: ../src/message-window.c:127 +msgid "Increase magnification" +msgstr "Phóng to vùng xem" + +#: src/gtkam-main.c:560 +msgid "Zoom _Out" +msgstr "Thu _nhá»" + +#: ../src/main-window.c:561 ../src/main-window.c:998 +#: ../src/message-window.c:131 +msgid "Decrease magnification" +msgstr "Thu nhá» vùng xem" + +#: ../src/message-window.c:138 +#, no-c-format +msgid "Zoom _100%" +msgstr "Phóng to 100%" + +#: ../src/main-window.c:567 ../src/main-window.c:1004 +#: ../src/message-window.c:138 +msgid "No magnification" +msgstr "Không phóng/thu" + +#: ../src/main-window.c:585 ../src/main-window.c:1048 +#: ../src/toolbar-factory.c:85 +msgid "Reply to the current message" +msgstr "Trả lá»i thÆ° hiện thá»i" + +#. A +#: ../src/main-window.c:592 ../src/main-window.c:1054 +#: ../src/message-window.c:153 +msgid "Reply to _All..." +msgstr "Trả lá»i _má»i ngÆ°á»i..." + +#: ../src/main-window.c:593 ../src/main-window.c:1055 +msgid "Reply to all recipients of the current message" +msgstr "Soạn thÆ° trả lá»i cho má»i ngÆ°á»i nhận thÆ° được chá»n" + +#: ../src/main-window.c:600 ../src/message-window.c:159 +msgid "Reply to _Group..." +msgstr "Trả lá»i _Nhóm.." + +#: ../src/main-window.c:601 ../src/message-window.c:160 +#: ../src/toolbar-factory.c:89 +msgid "Reply to mailing list" +msgstr "Trả lá»i Há»™p thÆ° chung" + +#: ../src/main-window.c:608 ../src/message-window.c:165 +msgid "_Forward attached..." +msgstr "Chuyển tiếp dạng đính _kèm..." + +#: ../src/main-window.c:609 +msgid "Forward the current message as attachment" +msgstr "Chuyển tiếp thÆ° được chá»n tá»›i ngÆ°á»i khác nhÆ° là đính kèm" + +#: ../src/main-window.c:615 +msgid "Forward _inline..." +msgstr "Chuyển tiếp trá»±c t_iếp..." + +#: ../src/main-window.c:616 +msgid "Forward the current message inline" +msgstr "Chuyển tiếp thÆ° được chá»n tá»›i ngÆ°á»i khác nhÆ° là thân thÆ°" + +#: ../src/main-window.c:624 +msgid "Pipe the message through another program" +msgstr "Gởi thÆ° qua ống dẫn đến má»™t chÆ°Æ¡ng trình khác" + +#: ../src/main-window.c:631 +msgid "_Next Part" +msgstr "Phần _kế" + +#: ../src/main-window.c:631 ../src/message-window.c:177 +msgid "Next part in message" +msgstr "Phần kế tiếp trong thÆ°" + +#: ../src/main-window.c:637 +msgid "_Previous Part" +msgstr "Phần t_rÆ°á»›c" + +#: ../src/main-window.c:638 ../src/message-window.c:183 +msgid "Previous part in message" +msgstr "Phần trÆ°á»›c đó trong thÆ°" + +#: ../src/main-window.c:644 ../src/main-window.c:1068 +#: ../src/message-window.c:187 +msgid "Save Current Part..." +msgstr "LÆ°u phần hiện có..." + +#: ../src/main-window.c:645 ../src/main-window.c:1069 +msgid "Save currently displayed part of message" +msgstr "LÆ°u phần thÆ° hiện thá»i được hiển thị" + +#: ../src/main-window.c:651 ../src/main-window.c:1021 +#: ../src/message-window.c:192 +msgid "_View Source..." +msgstr "_Xem mã nguồn..." + +#: ../src/main-window.c:652 ../src/main-window.c:1022 +#: ../src/message-window.c:193 +msgid "View source form of the message" +msgstr "Xem thÆ° dạng mã nguồn" + +#: ../src/main-window.c:670 ../src/message-window.c:228 +msgid "_Move to Trash" +msgstr "Chu_yển vào Rác" + +#: ../src/main-window.c:671 ../src/main-window.c:1076 +msgid "Move the current message to Trash mailbox" +msgstr "Chuyển thÆ° hiện thá»i vào há»™p thÆ° Rác" + +#. ! +#: ../src/main-window.c:677 ../src/main-window.c:1027 +msgid "_Toggle flag" +msgstr "_Bật/tắt đặt cá»" + +#: ../src/main-window.c:683 ../src/main-window.c:1032 +msgid "Store address of sender in addressbook" +msgstr "LÆ°u địa chỉ của ngÆ°á»i gởi vào sổ địa chỉ" + +#: ../src/main-window.c:694 ../src/main-window.c:985 +msgid "Next Message" +msgstr "ThÆ° kế" + +#: ../src/main-window.c:700 ../src/main-window.c:989 +#: ../src/message-window.c:207 +msgid "Previous Message" +msgstr "ThÆ° trÆ°á»›c" + +#: ../src/main-window.c:706 ../src/main-window.c:707 ../src/main-window.c:979 +#: ../src/main-window.c:980 ../src/message-window.c:213 +#: ../src/message-window.c:214 +msgid "Next Unread Message" +msgstr "ThÆ° chÆ°a Ä‘á»c kế" + +#: ../src/main-window.c:713 ../src/main-window.c:714 ../src/main-window.c:944 +#: ../src/main-window.c:945 ../src/message-window.c:220 +#: ../src/message-window.c:221 +msgid "Next Flagged Message" +msgstr "ThÆ° đã đặt cá» kế" + +#: ../src/main-window.c:721 +msgid "_Hide messages" +msgstr "Ẩ_n thÆ°" + +#: ../src/main-window.c:723 +msgid "_Reset Filter" +msgstr "Äặt _lại bá»™ lá»c" + +#: ../src/main-window.c:723 +msgid "Reset mailbox filter" +msgstr "Äặt lại bá»™ lá»c há»™p thÆ°" + +#: ../data/sound-juicer.glade.h:36 ../src/yelp-window.c:320 +#: ../src/glade-editor.c:1050 ../src/form-win.cc:381 ../src/form-win.cc:443 +msgid "_Select All" +msgstr "_Chá»n hết" + +#: ../src/main-window.c:729 +msgid "Select all messages in current mailbox" +msgstr "Chá»n má»i thÆ° trong há»™p thÆ° hiện có" + +#. #-#-#-#-# glade3vi..po (glade3 HEAD) #-#-#-#-# +#. Custom editor button +#. +#: ../ui/muds.glade.h:50 ../glade/pyblio.glade.in.h:17 +#: ../src/glade-editor.c:117 +msgid "_Edit..." +msgstr "_Hiệu chỉnh..." + +#: ../src/main-window.c:735 ../src/main-window.c:845 +msgid "Edit the selected mailbox" +msgstr "Hiệu chỉnh há»™p thÆ° đã chá»n" + +#: ../src/main-window.c:739 ../src/main-window.c:840 +#: ../glade/pyblio.glade.in.h:16 +msgid "_Delete..." +msgstr "_Xoá bá»..." + +#: ../src/main-window.c:740 ../src/main-window.c:841 +msgid "Delete the selected mailbox" +msgstr "Xoá bá» há»™p thÆ° đã chá»n" + +#: ../src/main-window.c:746 +msgid "E_xpunge Deleted Messages" +msgstr "_Xoá hẳn các thÆ° đã xoá bá»" + +#: ../src/main-window.c:747 ../src/main-window.c:852 +msgid "Expunge messages marked as deleted in the currently opened mailbox" +msgstr "Xoá hẳn các thÆ° có nhãn « Äã xoá bỠ» trong há»™p thÆ° hiện thá»i Ä‘ang mở" + +#: ../src/main-window.c:752 ../src/main-window.c:858 +msgid "Close mailbox" +msgstr "Äóng há»™p thÆ°" + +#: ../src/nautilus-information-panel.c:1039 +msgid "Empty _Trash" +msgstr "Äổ _Rác" + +#: ../src/toolbar-factory.c:135 +msgid "Delete messages from the Trash mailbox" +msgstr "Xoá bá» thÆ° ra há»™p thÆ° Rác" + +#: ../src/main-window.c:761 +msgid "Select _Filters" +msgstr "Chá»n bá»™ _lá»c" + +#: ../src/main-window.c:762 ../src/main-window.c:1091 +msgid "Select filters to be applied automatically to current mailbox" +msgstr "Chá»n các bá»™ lá»c cần tá»± Ä‘á»™ng áp dụng cho há»™p thÆ° hiện thá»i" + +#: ../src/main-window.c:767 +msgid "_Remove Duplicates" +msgstr "_Gỡ bá» thÆ° trùng" + +#: ../src/main-window.c:768 +msgid "Remove duplicated messages from the selected mailbox" +msgstr "Gỡ bá» các thÆ° trùng ra há»™p thÆ° được chá»n" + +#: ../src/main-window.c:783 +msgid "_Toolbars..." +msgstr "Thanh _công cụ..." + +#: ../src/main-window.c:784 ../src/ephy-window.c:191 +msgid "Customize toolbars" +msgstr "Tùy chỉnh thanh công cụ" + +#: ../src/main-window.c:787 ../src/main-window.c:1109 +msgid "_Identities..." +msgstr "_Thá»±c thể..." + +#: ../src/main-window.c:788 ../src/main-window.c:1110 +msgid "Create and set current identities" +msgstr "Tạo và đặt thá»±c thể hiện thá»i" + +#: ../src/main-window.c:799 +msgid "Mail_box" +msgstr "_Há»™p thÆ°" + +#: ../src/main-window.c:800 ../src/main-window.c:1123 +#: ../src/message-window.c:245 ../ui/evolution-mail-list.xml.h:27 +#: ../ui/evolution-mail-message.xml.h:110 +msgid "_Message" +msgstr "_ThÆ°" + +#: ../src/main-window.c:812 +msgid "New mailbox..." +msgstr "Há»™p thÆ° má»›i..." + +#: ../src/main-window.c:817 +msgid "New \"Maildir\" mailbox..." +msgstr "Há»™p thÆ° Maildir má»›i..." + +#: ../src/main-window.c:821 +msgid "New \"MH\" mailbox..." +msgstr "Há»™p thÆ° MH má»›i..." + +#: ../src/main-window.c:851 +msgid "_Compress Mailbox" +msgstr "_Nén há»™p thÆ°" + +#: ../src/main-window.c:881 +msgid "Mail_boxes" +msgstr "_Há»™p thÆ°" + +#: ../src/main-window.c:906 +msgid "By _Arrival" +msgstr "Theo giá» _đến" + +#: ../src/main-window.c:906 +msgid "Arrival order" +msgstr "Thứ tá»± đến" + +#: ../src/main-window.c:910 +msgid "By _Sender" +msgstr "Theo ngÆ°á»i _gởi" + +#: ../src/main-window.c:910 +msgid "Sender order" +msgstr "Thứ tá»± ngÆ°á»i gởi" + +#: ../src/main-window.c:914 +msgid "By S_ubject" +msgstr "Theo _chủ Ä‘á»" + +#: ../src/main-window.c:914 +msgid "Subject order" +msgstr "Thứ tá»± chủ Ä‘á»" + +#: ../src/main-window.c:918 +msgid "By Si_ze" +msgstr "Theo c_ỡ" + +#: ../src/main-window.c:918 +msgid "By message size" +msgstr "Theo kích cỡ thÆ°" + +#: ../src/main-window.c:922 +msgid "_Threaded" +msgstr "_Nhánh" + +#: ../src/main-window.c:922 +msgid "Use message threading" +msgstr "Sắp xếp thÆ° theo nhánh" + +#: ../src/main-window.c:930 ../gtk/gtkstock.c:405 +msgid "_Descending" +msgstr "_Giảm dần" + +#: ../src/main-window.c:930 +msgid "Sort in a descending order" +msgstr "Sắp xếp giảm dần" + +#: ../src/main-window.c:951 +msgid "_Headers" +msgstr "_Dòng đầu" + +#: ../src/main-window.c:954 +msgid "_Sort Mailbox" +msgstr "_Sắp xếp há»™p thÆ°" + +#: ../src/main-window.c:955 +msgid "H_ide messages" +msgstr "Ẩ_n thÆ°" + +#: ../src/main-window.c:969 +msgid "_View filter" +msgstr "_Xem bá»™ lá»c" + +#: ../src/main-window.c:970 +msgid "Enable quick message index filter" +msgstr "Bật bá»™ lá»c chỉ mục thÆ° nhanh" + +#: ../src/sendmsg-window.c:645 ../desktop/client/src/connect.c:454 +msgid "_More" +msgstr "Th_êm" + +#: ../src/main-window.c:1041 ../app/actions/image-actions.c:71 +#: ../app/actions/image-actions.c:76 +msgid "_New..." +msgstr "_Má»›i..." + +#. F +#: ../src/main-window.c:1061 +msgid "_Forward..." +msgstr "Chu_yển tiếp..." + +#: ../src/main-window.c:1062 ../src/toolbar-factory.c:91 +msgid "Forward the current message" +msgstr "Chuyển tiếp thÆ° hiện có" + +#. D +#: ../src/main-window.c:1075 +msgid "_Delete to Trash" +msgstr "_Xoá bá» vào Rác" + +#: ../src/main-window.c:1087 +msgid "_Manage..." +msgstr "_Quản lý..." + +#: ../src/main-window.c:1090 +msgid "_Select Filters" +msgstr "_Chá»n bá»™ lá»c" + +#: ../src/main-window.c:1108 +msgid "_Filters" +msgstr "_Lá»c" + +#: ../src/mlview-app.cc:240 +msgid "_Tools" +msgstr "_Công cụ" + +#: ../src/main-window.c:1228 +msgid "" +"Balsa is sending a mail now.\n" +"Abort sending?" +msgstr "" +"Balsa hiện Ä‘ang gởi thÆ°.\n" +"Muốn hủy việc gởi thÆ°?" + +#: ../src/main-window.c:1448 +msgid "Subject or Sender Contains:" +msgstr "Chủ Ä‘á» hay NgÆ°á»i gởi chứa:" + +#: ../src/main-window.c:1449 +msgid "Subject or Recipient Contains:" +msgstr "Chủ Ä‘á» hay NgÆ°á»i nhận chứa:" + +#: ../src/main-window.c:1450 +msgid "Subject Contains:" +msgstr "Chủ Ä‘á» chứa:" + +#: ../src/main-window.c:1451 +msgid "Body Contains:" +msgstr "Thân chứa:" + +#: ../src/main-window.c:1452 +msgid "Older than (days):" +msgstr "CÅ© hÆ¡n (ngày):" + +#: ../src/main-window.c:2331 +#, c-format +msgid "" +"Unable to Open Mailbox!\n" +"%s." +msgstr "" +"• Không thể mở há»™p thÆ°. •\n" +"%s." + +#: ../gnome/applet/applet.c:356 ../pan/dialogs/dialog-about.c:86 +#: ../gnome/applet/applet.c:329 +msgid "translator-credits" +msgstr "Nhóm Việt hóa Gnome " + +#: ../src/main-window.c:2656 ../src/main-window.c:2683 +msgid "" +"The Balsa email client is part of the GNOME desktop environment. " +"Information on Balsa can be found at http://balsa.gnome.org/\n" +"\n" +"If you need to report bugs, please do so at: http://bugzilla.gnome.org/" +msgstr "" +"Ứng dụng khách thÆ° Ä‘iện tá»­ Balsa là má»™t phần của môi trÆ°á»ng Gnome. Thông " +"tin vá» Balsa có ở \n" +"\n" +"Nếu bạn muốn thông báo lá»—i, hãy thá»±c hiện tại ." + +#: ../src/main-window.c:2787 ../src/main-window.c:2800 +msgid "Checking Mail..." +msgstr "Äang kiểm tra thÆ°..." + +#: ../src/main-window.c:2978 +#, c-format +msgid "IMAP mailbox: %s" +msgstr "Há»™p thÆ° IMAP: %s" + +#: ../src/main-window.c:2981 +#, c-format +msgid "Local mailbox: %s" +msgstr "Há»™p thÆ° cục bá»™ : %s" + +#: ../src/main-window.c:3119 +msgid "Finished Checking." +msgstr "Má»›i kiểm tra xong." + +#: ../src/main-window.c:3181 +#, c-format +msgid "Sending error: %s" +msgstr "Lá»—i gởi : %s" + +#: ../src/main-window.c:3266 +msgid "Balsa: New mail" +msgstr "Balsa: ThÆ° má»›i" + +#: ../src/main-window.c:3278 +#, c-format +msgid "You have received %d new message." +msgid_plural "You have received %d new messages." +msgstr[0] "Bạn đã nhận %d thÆ° má»›i." + +#: ../src/main-window.c:3281 libmisc/mail.c:62 libmisc/mail.c:77 +#: libmisc/mail.c:61 libmisc/mail.c:76 +msgid "You have new mail." +msgstr "Bạn có thÆ° má»›i." + +#: ../src/main-window.c:3405 +#, c-format +msgid "The next unread message is in %s" +msgstr "ThÆ° chÆ°a Ä‘á»c kế tiếp có trong %s" + +#: ../src/main-window.c:3409 +#, c-format +msgid "Do you want to switch to %s?" +msgstr "Bạn có muốn chuyển đổi sang %s không?" + +#: ../src/main-window.c:3939 +msgid "Search mailbox" +msgstr "Tìm kiếm trong há»™p thÆ°" + +#: ../src/main-window.c:3956 ../gdictsrc/gdict-app.c:401 src/mainwin.cpp:1219 +msgid "_Search for:" +msgstr "Tìm _kiếm:" + +#. builds the toggle buttons to specify fields concerned by +#. * the search. +#: ../src/main-window.c:3969 +msgid "In:" +msgstr "Trong:" + +#: ../src/main-window.c:3980 +msgid "S_ubject" +msgstr "_Chủ Ä‘á»" + +#. Frame with Apply and Clear buttons +#: ../src/main-window.c:3985 +msgid "Show only matching messages" +msgstr "Hiển thị chỉ những thÆ° khá»›p" + +#. Frame with OK button +#: ../src/main-window.c:4006 +msgid "Open next matching message" +msgstr "Mở thÆ° khá»›p kế tiếp" + +#: ../src/main-window.c:4017 +msgid "_Reverse search" +msgstr "_Äảo hÆ°á»›ng tìm kiếm" + +#: ../src/main-window.c:4022 ../glade2/filediff.glade.h:15 +#: ../app/dialogs/offset-dialog.c:215 ../plug-ins/common/papertile.c:349 +msgid "_Wrap around" +msgstr "_Cuá»™n vòng" + +#: ../src/main-window.c:4184 +msgid "You can apply filters only on mailbox\n" +msgstr "Có thể áp dụng các bá»™ lá»c chỉ cho há»™p thÆ°\n" + +#: ../src/main-window.c:4198 +#, c-format +msgid "Removing duplicates failed: %s" +msgstr "Việc gỡ bá» thÆ° trùng bị lá»—i: %s" + +#: ../src/main-window.c:4418 +#, c-format +msgid "Could not open trash: %s" +msgstr "Không thể mở Rác: %s" + +#: ../src/main-window.c:4547 +#, c-format +msgid "Balsa: %s (readonly)" +msgstr "Balsa: %s (chỉ Ä‘á»c)" + +# Name: don't translate / Tên: đừng dịch +#: ../src/main-window.c:4549 +#, c-format +msgid "Balsa: %s" +msgstr "Balsa: %s" + +#: ../src/main.c:212 ../src/main.c:260 +msgid "Get new mail on startup" +msgstr "Lấy thÆ° má»›i khi khởi chạy" + +#: ../src/main.c:214 ../src/main.c:262 +msgid "Compose a new email to EMAIL@ADDRESS" +msgstr "Biên soạn má»™t thÆ° má»›i cho TÊN@ÄỊA_CHỈ" + +#: ../src/main.c:216 ../src/main.c:264 +msgid "Attach file at PATH" +msgstr "Äính kèm tập tin tại ÄƯỜNG_DẪN" + +#: ../src/main.c:218 ../src/main.c:267 +msgid "Opens MAILBOXNAME" +msgstr "Mở TÊN_HỘP_THƯ" + +#: ../src/main.c:218 ../src/main.c:267 +msgid "MAILBOXNAME" +msgstr "TÊN_HỘP_THƯ" + +#: ../src/main.c:221 ../src/main.c:270 +msgid "Opens first unread mailbox" +msgstr "Mở há»™p thÆ° chÆ°a Ä‘á»c đầu tiên" + +#: ../src/main.c:224 ../src/main.c:273 +msgid "Opens default Inbox on startup" +msgstr "Mở há»™p ThÆ° Äến mặc định khi khởi chạy" + +#: ../src/main.c:227 ../src/main.c:276 +msgid "Prints number unread and unsent messages" +msgstr "In số thÆ° chÆ°a Ä‘á»c/gởi" + +#: ../src/main.c:229 ../src/main.c:278 +msgid "Debug POP3 connection" +msgstr "Gỡ lá»—i kết nối POP3" + +#: ../src/main.c:231 ../src/main.c:280 +msgid "Debug IMAP connection" +msgstr "Gỡ lá»—i kết nối IMAP" + +#: ../src/main.c:253 +msgid "The Balsa E-Mail Client" +msgstr "Ứng dụng khách thÆ° Ä‘iện tá»­ Balsa" + +#: ../src/main.c:345 +#, c-format +msgid "Balsa cannot open your \"%s\" mailbox." +msgstr "Balsa: không thể mở há»™p thÆ° « %s » của bạn." + +#: ../src/main.c:322 ../storage/exchange-hierarchy-foreign.c:254 +msgid "Inbox" +msgstr "ThÆ° Äến" + +#: ../src/main.c:328 ../storage/exchange-hierarchy-foreign.c:257 +msgid "Outbox" +msgstr "ThÆ° Äi" + +#: ../src/main.c:334 +msgid "Sentbox" +msgstr "Äã gởi" + +#: ../src/main.c:340 +msgid "Draftbox" +msgstr "Nháp" + +#: ../src/main.c:345 ../libnautilus-private/nautilus-trash-directory.c:343 +msgid "Trash" +msgstr "Rác" + +#: ../src/main.c:562 +msgid "Compressing mail folders..." +msgstr "Äang nén thÆ° mục thÆ°..." + +#: ../src/message-window.c:86 +msgid "N_o Headers" +msgstr "Ẩ_n dòng đầu" + +#: ../src/message-window.c:88 +msgid "_Selected Headers" +msgstr "Dòng đầu đã _chá»n" + +#: ../src/message-window.c:148 +msgid "Reply to this message" +msgstr "Trả lá»i thÆ° này" + +#: ../src/message-window.c:154 +msgid "Reply to all recipients of this message" +msgstr "Trả lá»i má»i ngÆ°á»i nhận thÆ° này" + +#: ../src/message-window.c:166 +msgid "Forward this message as attachment" +msgstr "Chuyển tiếp thÆ° này tá»›i ngÆ°á»i khác dạng đính kèm" + +#: ../src/message-window.c:170 +msgid "Forward inline..." +msgstr "Chuyển tiếp trá»±c tiếp..." + +#: ../src/message-window.c:171 +msgid "Forward this message inline" +msgstr "Chuyển tiếp thÆ° này tá»›i ngÆ°á»i khác dạng thân thÆ°" + +#: ../src/message-window.c:177 +msgid "Next Part" +msgstr "Phần kế" + +#: ../src/message-window.c:182 +msgid "Previous Part" +msgstr "Phần trÆ°á»›c" + +#: ../src/message-window.c:188 +msgid "Save current part in message" +msgstr "LÆ°u phần hiện thá»i trong thÆ°" + +#: ../src/message-window.c:200 ../ui/evolution-mail-message.xml.h:117 +#: ../ui/evolution-mail-message.xml.h:112 +msgid "_Next Message" +msgstr "ThÆ° _kế" + +#: ../src/message-window.c:200 +msgid "Next message" +msgstr "ThÆ° kế tiếp" + +#: ../src/message-window.c:206 ../ui/evolution-mail-message.xml.h:121 +#: ../ui/evolution-mail-message.xml.h:116 +msgid "_Previous Message" +msgstr "ThÆ° t_rÆ°á»›c" + +#: ../src/message-window.c:229 +msgid "Move the message to Trash mailbox" +msgstr "Chuyển thÆ° vào há»™p thÆ° Rác" + +#: ../src/message-window.c:244 +msgid "M_ove" +msgstr "Chu_yển" + +#: ../src/message-window.c:366 +#, c-format +msgid "Message from %s: %s" +msgstr "Thừ từ %s: %s" + +#: ../src/balsa-mime-widget-callbacks.c:50 +#: ../src/balsa-mime-widget-callbacks.c:114 +#, c-format +msgid "Could not create temporary file %s: %s" +msgstr "Không thể tạo tập tin tạm thá»i %s: %s" + +#: ../src/balsa-mime-widget-callbacks.c:140 +#, c-format +msgid "Save %s MIME Part" +msgstr "LÆ°u phần MIME %s" + +#: ../src/balsa-mime-widget-callbacks.c:187 +msgid "File already exists. Overwrite?" +msgstr "Tập tin đã có, ghi đè không?" + +#: ../src/balsa-mime-widget-crypto.c:70 +#, c-format +msgid "" +"This is an inline %s signed %s message part:\n" +"%s" +msgstr "" +"Äây là phần thÆ° trá»±c tiếp %s có chữ ký %s:\n" +"%s" + +# Name: don't translate / Tên: đừng dịch +#: ../src/balsa-mime-widget-crypto.c:72 ../src/print.c:1302 +msgid "OpenPGP" +msgstr "OpenPGP" + +#: ../src/balsa-mime-widget-crypto.c:89 +msgid "_Run gpg to import this key" +msgstr "_Chạy GPG để nhập khoá này" + +#: ../src/balsa-mime-widget-image.c:51 ../src/balsa-mime-widget-image.c:156 +#, c-format +msgid "Error loading attached image: %s\n" +msgstr "Gặp lá»—i khi tải ảnh đính kèm: %s\n" + +#: ../src/balsa-mime-widget-message.c:164 +#: ../src/balsa-mime-widget-message.c:177 +#: ../src/balsa-mime-widget-message.c:205 +#: ../src/balsa-mime-widget-message.c:258 +msgid "Content Type: external-body\n" +msgstr "Kiểu ná»™i dung: thân bên ngoài\n" + +#: ../src/balsa-mime-widget-message.c:165 +msgid "Access type: local-file\n" +msgstr "Kiểu truy cập: tập tin cục bá»™\n" + +#: ../src/balsa-mime-widget-message.c:166 +#: ../src/balsa-mime-widget-message.c:213 ../src/balsa-mime-widget.c:236 +#, c-format +msgid "File name: %s" +msgstr "Tên tập tin: %s" + +#: ../src/balsa-mime-widget-message.c:178 +msgid "Access type: URL\n" +msgstr "Kiểu truy cập: địa chỉ Mạng\n" + +#: ../src/balsa-mime-widget-message.c:179 ../calendar/gui/print.c:2423 +#: ../calendar/gui/print.c:2412 ../plug-ins/imagemap/imap_main.c:1061 +#, c-format +msgid "URL: %s" +msgstr "Äịa chỉ Mạng: %s" + +#: ../src/balsa-mime-widget-message.c:206 +#, c-format +msgid "Access type: %s\n" +msgstr "Kiểu truy cập: %s\n" + +#: ../src/balsa-mime-widget-message.c:210 +#, c-format +msgid "FTP site: %s\n" +msgstr "Chá»— Mạng FTP: %s\n" + +#: ../src/balsa-mime-widget-message.c:212 +#, c-format +msgid "Directory: %s\n" +msgstr "ThÆ° mục: %s\n" + +#: ../src/balsa-mime-widget-message.c:259 +msgid "Access type: mail-server\n" +msgstr "Kiểu truy cập: máy phục vụ thÆ°\n" + +#: ../src/balsa-mime-widget-message.c:260 +#, c-format +msgid "Mail server: %s\n" +msgstr "Máy phục vụ thÆ° : %s\n" + +#: ../src/balsa-mime-widget-message.c:262 +#, c-format +msgid "Subject: %s\n" +msgstr "Chủ Ä‘á»: %s\n" + +#: ../src/balsa-mime-widget-message.c:279 +msgid "Se_nd message to obtain this part" +msgstr "Gởi thÆ° để lấy phần này" + +#: ../src/balsa-mime-widget-message.c:300 ../src/balsa-mime-widget-text.c:675 +#: ../src/balsa-mime-widget-text.c:866 ../src/sendmsg-window.c:1762 +#, c-format +msgid "Error showing %s: %s\n" +msgstr "Gặp lá»—i khi hiển thị %s: %s\n" + +#: ../src/balsa-mime-widget-message.c:332 ../src/print.c:675 +#, c-format +msgid "Could not get a part: %s" +msgstr "Không thể lấy phần: %s" + +#: ../src/balsa-mime-widget-message.c:601 ../src/print.c:350 +#: ../src/sendmsg-window.c:1105 ../src/sendmsg-window.c:3310 +#: ../src/sendmsg-window.c:5068 ../xpdf/gpdf-properties-dialog.glade.h:11 +msgid "Subject:" +msgstr "Chủ Ä‘á»:" + +#: ../src/lib/ItemView.py:357 Expense/expense.c:1739 +msgid "Date:" +msgstr "Ngày:" + +#: ../plug-ins/xslt/xsltdialog.c:111 ../glade/medline.glade.h:3 +#: src/splash.c:806 +msgid "From:" +msgstr "Từ :" + +#: ../src/balsa-mime-widget-message.c:618 ../src/sendmsg-window.c:1102 +msgid "Reply-To:" +msgstr "Trả lá»i:" + +#: ../glade/medline.glade.h:16 +msgid "To:" +msgstr "" +"#-#-#-#-# balsa.po (balsa HEAD) #-#-#-#-#\n" +"Cho :\n" +"#-#-#-#-# Compendium03.po (apt) #-#-#-#-#\n" +"Äến:\n" +"#-#-#-#-# dia.po (dia HEAD) #-#-#-#-#\n" +"Äến:\n" +"#-#-#-#-# drivel.po (drivel HEAD) #-#-#-#-#\n" +"Äến:\n" +"#-#-#-#-# pybliographer.po (pybliographer.v_1_0_x) #-#-#-#-#\n" +"Äến:" + +#: ../src/bug-buddy.glade.h:21 +msgid "Cc:" +msgstr "Chép cho:" + +#: ../src/store-address.c:308 +msgid "Bcc:" +msgstr "Bcc:" + +# Literal: don't translate / NghÄ©a chữ : đừng dịch +#: ../src/balsa-mime-widget-message.c:627 ../src/print.c:368 +msgid "Fcc:" +msgstr "Fcc:" + +#: ../src/balsa-mime-widget-message.c:634 ../src/print.c:374 +msgid "Disposition-Notification-To:" +msgstr "Thông báo chuyển nhượng:" + +#: ../src/balsa-mime-widget-text.c:115 +#, c-format +msgid "Could not save a text part: %s" +msgstr "Không thể lÆ°u phần văn bản: %s" + +#: ../src/balsa-mime-widget-text.c:150 +#, c-format +msgid "" +"The message sent by %s with subject \"%s\" contains 8-bit characters, but no " +"header describing the used codeset (converted to %s)" +msgstr "" +"ThÆ° được gởi bởi %s vá»›i chủ đỠ« %s » chứa ký tá»± 8-bit, nhÆ°ng không có dòng " +"đầu diá»…n tả bá»™ ký tá»± đã dùng, nên đã chuyển đổi sang %s)." + +#: ../src/balsa-mime-widget-text.c:447 +msgid "Highlight structured phrases" +msgstr "Tô sáng các cụm từ có cấu trúc" + +#: ../src/balsa-mime-widget-text.c:665 +#, c-format +msgid "Calling URL %s..." +msgstr "Äang gá»i địa chỉ Mạng %s..." + +#: ../src/balsa-mime-widget.c:255 +#, c-format +msgid "Error reading message part: %s" +msgstr "Gặp lá»—i khi Ä‘á»c phần thÆ° : %s" + +#: ../src/balsa-mime-widget.c:279 +#, c-format +msgid "Type: %s (%s)" +msgstr "Kiểu : %s (%s)" + +#: ../src/balsa-mime-widget.c:282 +#, c-format +msgid "Content Type: %s" +msgstr "Kiểu ná»™i dung: %s" + +#: ../src/balsa-mime-widget.c:296 +msgid "No open or view action defined in GNOME MIME for this content type" +msgstr "" +"Không có hành Ä‘á»™ng mở hay xem được chỉ định trong GNOME MIME cho kiểu ná»™i " +"dung này." + +#: ../src/balsa-mime-widget.c:302 +msgid "S_ave part" +msgstr "_LÆ°u phần" + +#: ../src/balsa-mime-widget.c:324 ../src/balsa-mime-widget.c:348 +#, c-format +msgid "View _part with %s" +msgstr "Xem _phần bằng %s" + +#: ../src/pref-manager.c:360 +msgid "While Retrieving Messages" +msgstr "Trong khi lấy thÆ°" + +#: ../src/pref-manager.c:361 +msgid "Until Closed" +msgstr "Äến khi đã đóng" + +#: ../src/pref-manager.c:367 ../gtik/gtik.c:1415 +msgid "Fast" +msgstr "Nhanh" + +#: ../glade/glade_menu_editor.c:1068 ../glade/property.c:102 +#: ../src/glade-gtk.c:2356 ../widgets/gtk+.xml.in.h:127 +#: libexif/exif-entry.c:409 libexif/exif-entry.c:412 libexif/exif-entry.c:413 +#: libexif/exif-entry.c:414 libexif/exif-entry.c:473 +#: libexif/olympus/mnote-olympus-entry.c:103 +#: libexif/olympus/mnote-olympus-entry.c:148 +#: libexif/olympus/mnote-olympus-entry.c:154 +#: libexif/pentax/mnote-pentax-entry.c:92 +#: libexif/pentax/mnote-pentax-entry.c:97 +#: libexif/pentax/mnote-pentax-entry.c:102 +msgid "Normal" +msgstr "Chuẩn" + +#: ../src/pref-manager.c:369 +msgid "Bad Spellers" +msgstr "NgÆ°á»i chính tả sai" + +#: ../src/pref-manager.c:375 +msgid "Message number" +msgstr "Số thứ tá»± thÆ°" + +#: ../src/pref-manager.c:379 ../mail/em-filter-i18n.h:56 +msgid "Sender" +msgstr "NgÆ°á»i gởi" + +#: ../src/pref-manager.c:383 +msgid "Flat" +msgstr "Phẳng" + +# Name: don't translate / Tên: đừng dịch +#: ../src/pref-manager.c:385 +msgid "JWZ" +msgstr "JWZ" + +#. must NOT be modal +#: ../src/pref-manager.c:436 +msgid "Balsa Preferences" +msgstr "Tùy thích Balsa" + +#: ../src/pref-manager.c:459 +msgid "Mail Servers" +msgstr "Máy phục vụ thÆ°" + +#: ../src/pref-manager.c:463 ../src/pref-manager.c:2595 +msgid "Address Books" +msgstr "Sổ địa chỉ" + +#: ../src/pref-manager.c:467 +msgid "Mail Options" +msgstr "Tùy chá»n thÆ°" + +#: ../objects/FS/function.c:1060 ../sheets/Flowchart.sheet.in.h:6 +msgid "Display" +msgstr "Hiển thị" + +#: ../src/pref-manager.c:476 ../src/toolbar-factory.c:120 +msgid "Spelling" +msgstr "Chính tả" + +#: ../src/pref-manager.c:481 ../app/interface.c:973 +#: ../sheets/Misc.sheet.in.h:3 ../app/interface.c:985 ../app/interface.c:998 +#: app/midi-settings-050.c:587 app/midi-settings-09x.c:590 +msgid "Misc" +msgstr "Lặt vặt" + +#: ../src/pref-manager.c:485 +msgid "Startup" +msgstr "Khởi chạy" + +#: ../src/pref-manager.c:1223 +#, c-format +msgid "%s (default)" +msgstr "%s (mặc định)" + +#: ../src/pref-manager.c:1470 +msgid "Remote Mailbox Servers" +msgstr "Máy phục vụ há»™p thÆ° ở xa" + +#: ../glade/fields1.glade.h:20 ../src/glade-gtk.c:73 ../src/glade-gtk.c:3530 +#: ../mimedir/mimedir-vcard-email.c:149 schroot/sbuild-chroot.cc:388 +msgid "Type" +msgstr "Kiểu" + +#: ../src/pref-manager.c:1499 +msgid "Mailbox Name" +msgstr "Tên há»™p thÆ°" + +#: ../gnopi/gnopi_files/User_Properties/user_properties.glade2.h:28 +msgid "_Modify" +msgstr "_Sá»­a đổi" + +#: ../src/pref-manager.c:1524 +msgid "Local Mail" +msgstr "ThÆ° cục bá»™" + +#: ../src/pref-manager.c:1527 ../src/pref-manager.c:1535 +msgid "Select your local mail directory" +msgstr "Chá»n thÆ° mục thÆ° cục bá»™" + +#: ../src/pref-manager.c:1563 +msgid "Outgoing Mail Servers" +msgstr "Máy phục vụ thÆ° gởi Ä‘i" + +#: ../src/pref-manager.c:1585 src/common/text.c:729 src/common/text.c:846 +#: src/common/text.c:888 +msgid "Server Name" +msgstr "Tên máy phục vụ" + +#: ../src/pref-manager.c:1622 ../filter/filter-rule.c:978 +#: ../filter/filter.glade.h:3 ../mail/em-utils.c:347 ../mail/em-utils.c:291 +msgid "Incoming" +msgstr "Gởi đến" + +#: ../src/pref-manager.c:1624 ../filter/filter-rule.c:978 +#: ../mail/em-utils.c:348 ../mail/em-utils.c:292 +msgid "Outgoing" +msgstr "Gởi Ä‘i" + +#: ../src/pref-manager.c:1648 +msgid "Checking" +msgstr "Kiểm tra" + +#: ../src/pref-manager.c:1653 +msgid "_Check mail automatically every:" +msgstr "Tá»± Ä‘á»™ng _kiểm tra thÆ° má»—i:" + +#: ../ui/prefs.glade.h:41 +msgid "minutes" +msgstr "phút" + +#: ../src/pref-manager.c:1670 +msgid "Check _IMAP mailboxes" +msgstr "Kiểm tra các há»™p thÆ° _IMAP" + +#: ../src/pref-manager.c:1676 +msgid "Check INBOX _only" +msgstr "Chỉ kiểm tra há»™p ThÆ° _Äến" + +#: ../src/pref-manager.c:1681 +msgid "Display message if new mail has arrived in an open mailbox" +msgstr "Hiển thị thÆ° nếu có thÆ° má»›i đến trong há»™p thÆ° Ä‘ang mở" + +#: ../src/pref-manager.c:1686 +msgid "Do background check quietly (no messages in status bar)" +msgstr "" +"Chạy kiểm tra ở ná»n má»™t cách thầm lặng (không hiển thị thÆ° lên thanh trạng " +"thái)" + +#: ../src/pref-manager.c:1690 +msgid "_POP message size limit:" +msgstr "Hạn chế kích cỡ thÆ° _POP:" + +#. Quoted text regular expression +#. and RFC2646-style flowed text +#: ../src/pref-manager.c:1716 +msgid "Quoted and Flowed Text" +msgstr "Văn bản trôi chảy và trích dẫn" + +#: ../src/pref-manager.c:1720 ../src/sendmsg-window.c:5636 +msgid "Quoted Text Regular Expression" +msgstr "Biểu thức chính quy cho văn bản trích dẫn" + +#: ../src/pref-manager.c:1731 +msgid "Wrap Incoming Text at:" +msgstr "Cuá»™n văn bản gởi đến tại:" + +#: ../src/pref-manager.c:1742 ../src/pref-manager.c:1905 +#: ../data/prefs-dialog.glade.h:9 +#: ../network-utilities/gnome-remote-shell.glade.h:18 +msgid "characters" +msgstr "ký tá»±" + +#. handling of multipart/alternative +#: ../src/pref-manager.c:1757 +msgid "Display of Multipart/Alternative Parts" +msgstr "Hiện thị phần Äa phần/Xen kẽ" + +#: ../src/pref-manager.c:1760 +msgid "prefer text/plain over html" +msgstr "thích chữ thô hÆ¡n HTML" + +#. treatment of messages with 8-bit chars, but without proper MIME encoding +#: ../src/pref-manager.c:1776 +msgid "National (8-bit) characters in broken messages without codeset header" +msgstr "" +"Ký tá»± thuá»™c quốc gia (8-bit) trong thÆ° bị há»ng không có dòng đầu bá»™ ký tá»±" + +#: ../src/pref-manager.c:1782 +msgid "display as \"?\"" +msgstr "hiển thị dạng « ? »" + +#: ../src/pref-manager.c:1791 +msgid "display using codeset" +msgstr "hiển thị bằng bá»™ ký tá»±" + +#. How to handle received MDN requests +#: ../src/pref-manager.c:1823 +msgid "Message Disposition Notification Requests" +msgstr "Yêu cầu thông báo cách chuyển nhượng thÆ°" + +#: ../src/pref-manager.c:1825 +msgid "" +"When I receive a message and its sender requested to return a\n" +"Message Disposition Notification (MDN), send it in the following cases:" +msgstr "" +"Khi nhận thÆ° mà ngÆ°á»i gởi nó yêu cầu\n" +"Thông báo cách chuyển nhượng thÆ° (MDN),\n" +"hãy gởi nó trong các trÆ°á»ng hợp sau đây:" + +#: ../src/pref-manager.c:1836 +msgid "" +"The message header looks clean\n" +"(the notify-to address is equal to the return path,\n" +"I am in the \"To:\" or \"Cc:\" list)." +msgstr "" +"Dòng đầu của thÆ° có vẻ sạch\n" +"(địa chỉ Thông Báo đến tÆ°Æ¡ng Ä‘Æ°Æ¡ng vá»›i Ä‘Æ°á»ng dẫn trở lại.\n" +"Tôi Ä‘ang ở danh sách « Cho » hay « Chép Cho »)." + +#: ../src/pref-manager.c:1851 +msgid "The message header looks suspicious." +msgstr "Dòng đầu của thÆ° có vẻ đáng ngá»." + +#: ../src/pref-manager.c:1886 ../gtk/gtktext.c:630 +msgid "Word Wrap" +msgstr "Ngắt từ" + +#: ../src/pref-manager.c:1891 +msgid "Wrap Outgoing Text at:" +msgstr "Cuá»™n văn bản gởi Ä‘i tại:" + +#: ../src/pref-manager.c:1919 ../plug-ins/common/diffraction.c:641 +msgid "Other Options" +msgstr "Tùy chá»n khác" + +#: ../src/pref-manager.c:1924 +msgid "Reply Prefix:" +msgstr "Tiá»n tố trả lá»i:" + +#: ../src/pref-manager.c:1927 +msgid "Edit headers in external editor" +msgstr "Sá»­a đổi dòng đầu trong bá»™ hiệu chỉnh ná»™i bá»™" + +#: ../src/pref-manager.c:1929 +msgid "Automatically quote original when replying" +msgstr "Tá»± Ä‘á»™ng trích dẫn thân thÆ° gốc khi trả lá»i" + +#: ../src/pref-manager.c:1932 +msgid "Don't include HTML parts as text when replying or forwarding mail" +msgstr "" +"Không bao gồm phần HTML theo dạng chữ thô khi trả lá»i hay gởi chuyển tiếp" + +#: ../src/pref-manager.c:1935 +msgid "Forward a mail as attachment instead of quoting it" +msgstr "Chuyển tiếp thÆ° theo dạng đính kèm thay vì trích dẫn nó" + +#: ../src/pref-manager.c:1938 +msgid "Send button always queues outgoing mail in outbox" +msgstr "Nút Gởi luôn sắp hàng thÆ° Ä‘i trong há»™p ThÆ° Äi" + +#: ../src/pref-manager.c:1941 +msgid "Copy outgoing messages to sentbox" +msgstr "Sao chép thÆ° gởi Ä‘i vào há»™p Äã Gởi" + +#: ../src/pref-manager.c:1958 +msgid "Status Messages" +msgstr "Thông Ä‘iệp trạng thái" + +#: ../app/dia-props.c:242 ../gncal/gnomecal-prefs.c:1849 ../pan/prefs.c:1975 +#: po/silky.glade.h:75 +msgid "Colors" +msgstr "Màu sắc" + +#: ../src/mlview-validator-window.cc:702 ../pan/message-window.c:1140 +#: ../mimedir/mimedir-vcard-phone.c:238 +#, fuzzy +msgid "Message" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Thông Ä‘iệp\n" +"#-#-#-#-# libmimedir.vi.po (libmimedir HEADnReport-Msgid-Bugs-To: ) #-#-#-" +"#-#\n" +"Tin nhẳn" + +#: ../src/pref-manager.c:1964 +msgid "Sort and Thread" +msgstr "Sắp xếp và Nhánh" + +#: ../src/pref-manager.c:1989 +msgid "Main Window" +msgstr "Cá»­a sổ chính" + +#: ../src/pref-manager.c:1992 +msgid "Use preview pane" +msgstr "Dùng khung Xem thá»­" + +#: ../src/pref-manager.c:1994 +msgid "Show mailbox statistics in left pane" +msgstr "Hiển thị thống kê há»™p thÆ° trong ô bên trái" + +#: ../src/pref-manager.c:1996 +msgid "Use alternative main window layout" +msgstr "Dùng bố trí cá»­a sổ chính xen kẽ" + +#: ../src/pref-manager.c:1998 +msgid "Automatically view message when mailbox opened" +msgstr "Tá»± Ä‘á»™ng xem thÆ° khi mở há»™p thÆ°" + +#: ../src/pref-manager.c:2004 +msgid "PageUp/PageDown keys scroll message by:" +msgstr "Phím PageUp/PageDown cuá»™n thÆ° theo :" + +#: ../src/pref-manager.c:2015 ../libgimp/gimpunitcache.c:57 +#: ../app/core/gimpunit.c:70 ../src/orca/chnames.py:32 +#: ../src/orca/speechgenerator.py:891 +#, fuzzy +msgid "percent" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"phần trăm\n" +"#-#-#-#-# orca.vi.po (orca HEAD) #-#-#-#-#\n" +"dấu phần trăm" + +#: ../src/pref-manager.c:2030 +msgid "Display Progress Dialog" +msgstr "Hiện há»™p thoại Tiến hành" + +#: ../src/pref-manager.c:2050 ../glade2/meldapp.glade.h:34 +msgid "Encoding" +msgstr "Bá»™ ký tá»±" + +#: ../src/pref-manager.c:2055 +msgid "Date encoding (for strftime):" +msgstr "Mã hoá ngày (cho strftime):" + +#: ../src/pref-manager.c:2057 +msgid "Selected headers:" +msgstr "Dòng đầu đã chá»n:" + +#: ../src/pref-manager.c:2078 ../src/pref-manager.c:2083 +msgid "Information Messages" +msgstr "Thông Ä‘iệp thông tin" + +#: ../src/pref-manager.c:2087 +msgid "Warning Messages" +msgstr "Thông Ä‘iệp cảnh báo" + +#: ../src/pref-manager.c:2091 +msgid "Error Messages" +msgstr "Thông Ä‘iệp lá»—i" + +#: ../src/pref-manager.c:2095 +msgid "Fatal Error Messages" +msgstr "Thông Ä‘iệp lá»—i nghiêm trá»ng" + +#: ../src/pref-manager.c:2099 +msgid "Debug Messages" +msgstr "Thông Ä‘iệp gỡ lá»—i" + +#: ../src/pref-manager.c:2125 +msgid "Message Colors" +msgstr "Màu sác thông Ä‘iệp" + +#: ../src/pref-manager.c:2130 +#, c-format +msgid "Quote level %d color" +msgstr "Màu cấp trích dẵn %d" + +#: ../src/pref-manager.c:2143 ../gtk/gtkaboutdialog.c:429 +msgid "Link Color" +msgstr "Màu liên kết" + +#: ../src/pref-manager.c:2145 +msgid "Hyperlink color" +msgstr "Màu siêu liên kết" + +#: ../src/pref-manager.c:2156 +msgid "Composition Window" +msgstr "Cá»­a sổ soạn thảo" + +#: ../src/pref-manager.c:2160 +msgid "Invalid or incomplete address label color" +msgstr "Màu nhãn địa chỉ không hợp lệ/hoàn tất" + +#: ../src/pref-manager.c:2185 ../app/pdb/internal_procs.c:129 +#: ../pan/prefs.c:1039 ../pan/prefs.c:1974 po/silky.glade.h:106 +msgid "Fonts" +msgstr "Phông chữ" + +#: ../src/pref-manager.c:2189 +msgid "Message Font" +msgstr "Phông chữ thÆ°" + +#: ../src/pref-manager.c:2201 +msgid "Message Subject Font" +msgstr "Phông chữ chủ Ä‘á» thÆ°" + +#: ../src/pref-manager.c:2232 +msgid "Sorting and Threading" +msgstr "Sắp xếp và Nhánh" + +#: ../src/pref-manager.c:2236 +msgid "Default sort column" +msgstr "Cá»™t sắp xếp mặc định" + +#: ../src/pref-manager.c:2240 +msgid "Default threading style" +msgstr "Kiểu nhánh mặc định" + +#: ../src/pref-manager.c:2245 +msgid "Expand threads on open" +msgstr "Bung các nhánh khi mở" + +#: ../src/pref-manager.c:2322 +msgid "Pspell Settings" +msgstr "Thiết lập Pspell" + +#: ../src/pref-manager.c:2328 +msgid "Spell Check Module" +msgstr "Mô-Ä‘un kiểm lá»—i chính tả" + +#: ../src/pref-manager.c:2334 +msgid "Suggestion Level" +msgstr "Cấp gợi ý" + +#. do the ignore length +#: ../src/pref-manager.c:2339 +msgid "Ignore words shorter than" +msgstr "Bá» qua từ ngắn hÆ¡n" + +#: ../src/pref-manager.c:2360 +msgid "Miscellaneous Spelling Settings" +msgstr "Thiết lập Chính tả Lặt vặt" + +#: ../src/pref-manager.c:2362 +msgid "Check signature" +msgstr "Kiểm tra chữ ký" + +#: ../src/pref-manager.c:2363 +msgid "Check quoted" +msgstr "Kiểm tra trích dẫn" + +#: ../src/pref-manager.c:2389 ../app/pdb/internal_procs.c:159 +msgid "Miscellaneous" +msgstr "Lặt vặt" + +#: ../src/pref-manager.c:2391 ../app/actions/actions.c:115 +#: src/gtkam-debug.c:329 +msgid "Debug" +msgstr "Gỡ lá»—i" + +#: ../src/pref-manager.c:2392 +msgid "Empty Trash on exit" +msgstr "Äổ Rác khi thoát" + +#: ../src/pref-manager.c:2398 +msgid "Automatically close mailbox if unused more than" +msgstr "Tá»± Ä‘á»™ng đóng há»™p thÆ° nếu không dùng sau" + +#: ../src/pref-manager.c:2426 +msgid "Deleting Messages" +msgstr "Xoá bá» thÆ°" + +#: ../src/pref-manager.c:2428 +msgid "" +"The following setting is global, but may be overridden\n" +"for the selected mailbox using Mailbox -> Hide messages:" +msgstr "" +"Thiết lập theo sau là toàn cục, nhÆ°ng có thẻ bị đè\n" +"cho há»™p thÆ° được chá»n, dùng Há»™p thÆ° → Ẩn thÆ° :" + +#: ../src/pref-manager.c:2436 +msgid "Hide messages marked as deleted" +msgstr "Ẩn thÆ° có nhãn Äã xoá bá»" + +#: ../src/pref-manager.c:2438 +msgid "The following settings are global." +msgstr "Thiết lập theo sau là toàn cục." + +#: ../src/pref-manager.c:2443 +msgid "Expunge deleted messages when mailbox is closed" +msgstr "Xoá hẳn các thÆ° đã xoá bá» khi đóng thÆ° hợp" + +#: ../src/pref-manager.c:2450 +msgid " ...and if mailbox is unused more than" +msgstr " ..và nếu há»™p thÆ° không dùng sau" + +#: ../src/pref-manager.c:2476 +msgid "Message Window" +msgstr "Cá»­a sổ thÆ°" + +#: ../src/pref-manager.c:2478 +msgid "Action after moving/trashing a message" +msgstr "Hành Ä‘á»™ng sau khi chuyển/xoá bá» thÆ°" + +#: ../src/pref-manager.c:2513 ../gncal/gnomecal-prefs.c:1506 +#: ../data/gtkorphan.glade.h:19 ../glade/glade_project_window.c:385 +#: src/prefsdlg.cpp:52 src/prefsdlg.cpp:60 src/prefsdlg.cpp:67 +msgid "Options" +msgstr "Tùy chá»n" + +#: ../src/pref-manager.c:2516 +msgid "Open Inbox upon startup" +msgstr "Mở há»™p ThÆ° Äến khi khởi chạy" + +#: ../src/pref-manager.c:2518 +msgid "Check mail upon startup" +msgstr "Kiểm tra thÆ° má»›i khi khởi chạy" + +#: ../src/pref-manager.c:2520 +msgid "Remember open mailboxes between sessions" +msgstr "Nhá»› các há»™p thÆ° Ä‘ang mở giữa hai phiên chạy" + +#: ../src/pref-manager.c:2534 +msgid "Folder Scanning" +msgstr "Quét thÆ° mục" + +#: ../src/pref-manager.c:2536 +msgid "" +"Choose depth 1 for fast startup; this defers scanning some folders.\n" +"To see more of the tree at startup, choose a greater depth." +msgstr "" +"Chá»n Ä‘á»™ sâu 1 để khởi chạy nhanh, Ä‘iá»u này trì hoãn quét má»™t số thÆ° mục.\n" +"Äể xem cụ thể cây hÆ¡n khi khởi chạy, hãy chá»n Ä‘á»™ sâu hÆ¡n." + +#: ../src/pref-manager.c:2546 +msgid "Scan local folders to depth" +msgstr "Quét thÆ° mục cục bá»™ đến Ä‘á»™ sâu" + +#: ../src/pref-manager.c:2559 +msgid "Scan IMAP folders to depth" +msgstr "Quét thÆ° mục IMAP đến Ä‘á»™ sâu" + +#: ../src/pref-manager.c:2624 +msgid "Address Book Name" +msgstr "Tên sổ địa chỉ" + +#: ../src/pref-manager.c:2632 +msgid "Expand aliases" +msgstr "Bung bí danh" + +#: ../src/pref-manager.c:2651 +msgid "_Set as default" +msgstr "Äặt là _mặc định" + +#: ../src/pref-manager.c:2965 +msgid "Remote POP3 mailbox..." +msgstr "Há»™p thÆ° POP3 ở xa..." + +#: ../src/pref-manager.c:3088 +msgid "Show nothing" +msgstr "Äừng hiện gì" + +#: ../src/pref-manager.c:3090 +msgid "Show dialog" +msgstr "Hiện há»™p thoại" + +#: ../src/pref-manager.c:3092 +msgid "Show in list" +msgstr "Hiện trong danh sách" + +#: ../src/pref-manager.c:3094 +msgid "Show in status bar" +msgstr "Hiện lên thanh trạng thái" + +#: ../src/pref-manager.c:3096 +msgid "Print to console" +msgstr "In ra bàn giao tiếp" + +#: ../src/pref-manager.c:3107 +msgid "Ask me" +msgstr "Há»i tôi" + +#: ../src/pref-manager.c:3184 +msgid "Show next unread message" +msgstr "Hiển thị thÆ° chÆ°a Ä‘á»c kế tiếp" + +#: ../src/pref-manager.c:3185 +msgid "Show next message" +msgstr "Hiển thị thÆ° kế tiếp" + +#: ../src/pref-manager.c:3186 +msgid "Close message window" +msgstr "Äóng cá»­a sổ thÆ°" + +#: ../src/pref-manager.c:3210 +#, c-format +msgid "Error displaying link_id %s: %s\n" +msgstr "Gặp lá»—i khi hiển thị ID liên kết %s: %s\n" + +#: ../src/print.c:246 ../src/print.c:727 ../src/print.c:66 +#, c-format +msgid "Page: %i/%i" +msgstr "Trang: %i/%i" + +#: ../src/print.c:662 +msgid "" +"Preparing an HTML part, which must start on a new page.\n" +"Print this part?" +msgstr "" +"Äang chuẩn bị má»™t phần dạng HTML, mà phải bắt đầu trên trang má»›i.\n" +"In phần này không?" + +#: ../gnome/applet/wireless-applet.glade.h:39 Expense/expense.c:1700 +msgid "Type:" +msgstr "Kiểu :" + +#: ../src/print.c:984 ../widgets/misc/e-attachment.glade.h:4 +#: ../data/glade/song-info.glade.h:8 +msgid "File name:" +msgstr "Tên tập tin:" + +#: ../src/print.c:1300 +#, c-format +msgid "This is an inline %s signed %s message part:" +msgstr "Äây là phần thÆ° trá»±c tiếp %s có chữ ký %s:" + +#: ../src/print.c:1547 +msgid "Font available for printing" +msgstr "Phông chữ có sẵn để in" + +#: ../src/print.c:1553 +#, c-format +msgid "Font not available for printing. Closest: %s" +msgstr "Phông chữ không có sẵn để in. Gần nhất: %s" + +#: ../glade/gbwidgets/gbfontselectiondialog.c:70 ../pan/pan-font-button.c:55 +msgid "Select Font" +msgstr "Chá»n phông chữ" + +#: ../src/print.c:1599 ../src/planner-task-dialog.c:2383 +msgid "Change..." +msgstr "Äổi..." + +#: ../src/print.c:1634 +msgid "Print message" +msgstr "In thÆ°" + +#: ../src/print.c:1645 ../app/actions/dialogs-actions.c:150 +msgid "_Fonts" +msgstr "_Phông chữ" + +#: ../src/print.c:1648 ../src/preferences.c:291 +msgid "Header font" +msgstr "Phông chữ đầu trang" + +#: ../src/print.c:1650 +msgid "Body font" +msgstr "Phông chữ thân" + +#: ../src/print.c:1652 +msgid "Footer font" +msgstr "Phông chữ chân trang" + +#. highlight cited stuff +#: ../src/print.c:1656 +msgid "Highlight cited text" +msgstr "Tô sáng trích dẫn" + +#: ../src/print.c:1660 +msgid "_Enable highlighting of cited text" +msgstr "Bật tô _sáng trích dẫn" + +#: ../src/print.c:1686 +#, c-format +msgid "" +"Balsa could not find font \"%s\".\n" +"Use the \"Fonts\" page on the \"Print message\" dialog to change it." +msgstr "" +"Balsa không tìm thấy phông chữ « %s ».\n" +"Hãy dùng trang « Phông chữ » trong há»™p thoại « In thÆ° » để thay đổi." + +#: ../src/print.c:1787 +msgid "Balsa: message print preview" +msgstr "Balsa: xem thá»­ bản in thÆ°" + +#: ../src/save-restore.c:613 +msgid "Error during filters loading: " +msgstr "Gặp lá»—i trong khi tải các bá»™ lá»c: " + +#: ../src/save-restore.c:615 +#, c-format +msgid "" +"Error during filters loading: %s\n" +"Filters may not be correct." +msgstr "" +"Gặp lá»—i trong khi tải các bá»™ lá»c: %s\n" +"Có thể bá»™ lá»c không đúng." + +#: ../src/save-restore.c:730 +msgid "The option not to recognize \"format=flowed\" text has been removed." +msgstr "" +"Tùy chá»n để không nhận ra văn bản « dạng thức=trôi chảy » đã được gỡ bá»." + +#: ../src/save-restore.c:991 +msgid "" +"The option not to send \"format=flowed\" is now on the Options menu of the " +"compose window." +msgstr "" +"Tùy chá»n để không gởi văn bản « dạng thức=trôi chảy » hiện nằm trong trình " +"Ä‘Æ¡n các Tùy Chá»n của cá»­a sổ soạn thảo." + +#: ../src/save-restore.c:1023 +msgid "" +"The option to request a MDN is now on the Options menu of the compose window." +msgstr "" +"Tùy chá»n để yêu cầu MDN hiện nằm trong trình Ä‘Æ¡n các Tùy Chá»n của cá»­a sổ " +"soạn thảo." + +#: ../src/save-restore.c:2042 +msgid "Error opening GConf database\n" +msgstr "Gặp lá»—i khi mở cÆ¡ sở dữ liệu GConf.\n" + +#: ../src/save-restore.c:2050 ../src/save-restore.c:2061 +#, c-format +msgid "Error setting GConf field: %s\n" +msgstr "Gặp lá»—i khi thiết lập trÆ°á»ng GConf: %s\n" + +#: ../src/sendmsg-window.c:234 +msgid "_Brazilian" +msgstr "Bồ-đào-nha (_Bra-xin)" + +#: ../src/sendmsg-window.c:235 +msgid "_Catalan" +msgstr "_Ca-ta-lan" + +#: ../src/sendmsg-window.c:236 +msgid "_Chinese Simplified" +msgstr "_Hoa phổ thông" + +#: ../src/sendmsg-window.c:237 +msgid "_Chinese Traditional" +msgstr "_Hoa truyá»n thống" + +#: ../src/sendmsg-window.c:238 +msgid "_Czech" +msgstr "_Séc" + +#: ../src/sendmsg-window.c:239 +msgid "_Danish" +msgstr "_Äan-mạch" + +#: ../src/sendmsg-window.c:240 +msgid "_Dutch" +msgstr "_Hoà-lan" + +#: ../src/sendmsg-window.c:241 +msgid "_English (American)" +msgstr "Anh (_Mỹ)" + +#: ../src/sendmsg-window.c:242 +msgid "_English (British)" +msgstr "Anh (_Quốc Anh)" + +#: ../src/sendmsg-window.c:243 +msgid "_Esperanto" +msgstr "_Etpêrantô" + +#: ../src/sendmsg-window.c:244 +msgid "_Estonian" +msgstr "_Et-tô-ni-a" + +#: ../src/sendmsg-window.c:245 +msgid "_Finnish" +msgstr "_Phần-lan" + +#: ../src/sendmsg-window.c:246 +msgid "_French" +msgstr "_Pháp" + +#: ../src/sendmsg-window.c:247 +msgid "_German" +msgstr "_Äức" + +#: ../src/sendmsg-window.c:248 +msgid "_Greek" +msgstr "_Hy-lạp" + +#: ../src/sendmsg-window.c:249 +msgid "_Hebrew" +msgstr "_Do-thái" + +#: ../src/sendmsg-window.c:250 +msgid "_Hungarian" +msgstr "_Hung-gia-lợi" + +#: ../src/sendmsg-window.c:251 +msgid "_Italian" +msgstr "_Ã" + +#: ../src/sendmsg-window.c:252 +msgid "_Japanese (JIS)" +msgstr "Nhật Bản (_JIS)" + +#: ../src/sendmsg-window.c:253 +msgid "_Korean" +msgstr "_Triều tiên" + +#: ../src/sendmsg-window.c:254 +msgid "_Latvian" +msgstr "_Lát-vi-a" + +#: ../src/sendmsg-window.c:255 +msgid "_Lithuanian" +msgstr "_Li-tu-a-ni" + +#: ../src/sendmsg-window.c:256 +msgid "_Norwegian" +msgstr "_Na-uy" + +#: ../src/sendmsg-window.c:257 +msgid "_Polish" +msgstr "_Ba Lan" + +#: ../src/sendmsg-window.c:258 +msgid "_Portugese" +msgstr "_Bồ-đào-nha" + +#: ../src/sendmsg-window.c:259 +msgid "_Romanian" +msgstr "_Lá»—-má-ni" + +#: ../src/sendmsg-window.c:260 +msgid "_Russian (ISO)" +msgstr "_Nga" + +#: ../src/sendmsg-window.c:261 +msgid "_Russian (KOI)" +msgstr "Nga (_KOI)" + +#: ../src/sendmsg-window.c:262 +msgid "_Serbian" +msgstr "_Xéc-bi" + +#: ../src/sendmsg-window.c:263 +msgid "_Serbian (Latin)" +msgstr "_Xéc-bi (La-tinh)" + +#: ../src/sendmsg-window.c:264 +msgid "_Slovak" +msgstr "_Xlô-vác" + +#: ../src/sendmsg-window.c:265 +msgid "_Spanish" +msgstr "_Tây-ban-nha" + +#: ../src/sendmsg-window.c:266 +msgid "_Swedish" +msgstr "_Thuỵ-Ä‘iển" + +#: ../src/sendmsg-window.c:267 +msgid "_Turkish" +msgstr "_Thổ-nhÄ©-kỳ" + +#: ../src/sendmsg-window.c:268 +msgid "_Ukrainian" +msgstr "_U-cợ-rainh" + +#: ../src/sendmsg-window.c:269 +msgid "_Generic UTF-8" +msgstr "_UTF-8 chung" + +#: ../src/sendmsg-window.c:293 +msgid "_GnuPG uses MIME mode" +msgstr "_GnuPG dùng chế Ä‘á»™ MIME" + +#: ../src/sendmsg-window.c:299 +msgid "_GnuPG uses old OpenPGP mode" +msgstr "_GnuPG dùng chế Ä‘á»™ OpenPGP" + +#: ../src/sendmsg-window.c:306 +msgid "_S/MIME mode (GpgSM)" +msgstr "Chế Ä‘á»™ _S/MIME (GpgSM)" + +#: ../src/sendmsg-window.c:321 ../src/sendmsg-window.c:499 +msgid "_Include File..." +msgstr "_Gồm tập tin..." + +#: ../src/sendmsg-window.c:324 ../src/sendmsg-window.c:512 +msgid "_Attach File..." +msgstr "Äính _kèm tập tin..." + +#: ../src/sendmsg-window.c:327 ../src/sendmsg-window.c:501 +msgid "I_nclude Message(s)" +msgstr "Gồm (các) th_Æ°" + +#: ../src/sendmsg-window.c:330 ../src/sendmsg-window.c:504 +msgid "Attach _Message(s)" +msgstr "Äính kè_m (các) thÆ°" + +#: ../src/sendmsg-window.c:336 ../src/sendmsg-window.c:529 +msgid "Sen_d" +msgstr "_Gởi" + +#: ../src/toolbar-factory.c:111 ../ui/evolution-message-composer.xml.h:30 +#, fuzzy +msgid "Send this message" +msgstr "" +"#-#-#-#-# balsa.po (balsa HEAD) #-#-#-#-#\n" +"Gởi thÆ° này\n" +"#-#-#-#-# Compendium03.po (apt) #-#-#-#-#\n" +"Gởi thông Ä‘iệp này" + +#: ../src/sendmsg-window.c:341 ../data/glade/AddWindow.glade.h:2 +#, fuzzy +msgid "_Queue" +msgstr "" +"#-#-#-#-# balsa.po (balsa HEAD) #-#-#-#-#\n" +"_Sắp hàng\n" +"#-#-#-#-# Compendium03.po (apt) #-#-#-#-#\n" +"_Hàng đợi" + +#: ../src/sendmsg-window.c:342 ../src/sendmsg-window.c:535 +msgid "Queue this message in Outbox for sending" +msgstr "Sắp hàng thÆ° này trong há»™p ThÆ° Äi để gởi" + +#: ../src/sendmsg-window.c:346 +msgid "_Postpone" +msgstr "_Hoãn" + +#: ../src/sendmsg-window.c:349 ../src/sendmsg-window.c:516 +#: ../gtk/gtkstock.c:400 ../app/actions/file-actions.c:86 +msgid "_Save" +msgstr "_LÆ°u" + +#: ../src/sendmsg-window.c:350 ../src/sendmsg-window.c:517 +msgid "Save this message" +msgstr "LÆ°u thÆ° này" + +#: ../src/sendmsg-window.c:354 ../src/sendmsg-window.c:521 +msgid "Print the edited message" +msgstr "In thÆ° đã soạn thảo" + +#: ../src/sendmsg-window.c:383 ../src/sendmsg-window.c:546 +msgid "_Wrap Body" +msgstr "_Cuá»™n thân" + +#: ../src/sendmsg-window.c:387 ../src/sendmsg-window.c:550 +msgid "_Reflow Selected Text" +msgstr "Cuá»™n _lại phần đã chá»n" + +#: ../src/sendmsg-window.c:392 ../src/sendmsg-window.c:578 +msgid "Insert Si_gnature" +msgstr "Chèn chữ _ký" + +#: ../src/sendmsg-window.c:396 ../src/sendmsg-window.c:554 +msgid "_Quote Message(s)" +msgstr "Trích _dẫn (các) thÆ°" + +#: ../src/sendmsg-window.c:402 +msgid "C_heck spelling" +msgstr "_Kiểm tra chính tả" + +#: ../src/sendmsg-window.c:403 ../src/sendmsg-window.c:407 +#: ../src/sendmsg-window.c:632 +msgid "Check the spelling of the message" +msgstr "Kiểm tra chính tả thÆ° là đúng" + +#: ../src/sendmsg-window.c:406 ../src/sendmsg-window.c:630 +msgid "C_heck Spelling" +msgstr "_Kiểm tra chính tả" + +#: ../src/sendmsg-window.c:413 ../src/sendmsg-window.c:638 +msgid "Select _Identity..." +msgstr "Chá»n _thá»±c thể..." + +#: ../src/sendmsg-window.c:414 ../src/sendmsg-window.c:639 +msgid "Select the Identity to use for the message" +msgstr "Chá»n thá»±c thể cần dùng cho thÆ° này" + +#: ../src/sendmsg-window.c:419 +msgid "_Edit with Gnome-Editor" +msgstr "Sá»­a đổi trong Bá»™ hiệu chỉnh Gnome" + +#: ../src/sendmsg-window.c:420 +msgid "Edit the current message with the default Gnome editor" +msgstr "Sá»­a đổi thÆ° hiện thá»i bằng bá»™ hiệu chỉnh Gnome mặc định" + +#: ../src/sendmsg-window.c:432 ../src/sendmsg-window.c:591 +msgid "Fr_om" +msgstr "_Từ" + +#: ../src/sendmsg-window.c:434 ../src/sendmsg-window.c:593 +msgid "_Cc" +msgstr "_Cc" + +#: ../src/sendmsg-window.c:436 ../src/sendmsg-window.c:595 +msgid "_Bcc" +msgstr "_Bcc" + +#: ../src/sendmsg-window.c:438 ../src/sendmsg-window.c:597 +msgid "_Fcc" +msgstr "_Bcc" + +#: ../src/sendmsg-window.c:440 +msgid "_Reply To" +msgstr "T_rả lá»i" + +#: ../src/sendmsg-window.c:446 ../src/sendmsg-window.c:643 +msgid "_Request Disposition Notification" +msgstr "_Yêu cầu thông báo cách chuyển nhượng thÆ°" + +#: ../src/sendmsg-window.c:449 ../src/sendmsg-window.c:609 +msgid "_Format = Flowed" +msgstr "_Dạng thức=trôi chảy" + +#: ../src/sendmsg-window.c:454 ../src/sendmsg-window.c:614 +msgid "_Sign Message" +msgstr "_Ký tên thÆ°" + +#: ../src/sendmsg-window.c:455 ../src/sendmsg-window.c:615 +msgid "signs the message using GnuPG" +msgstr "ký tên thÆ° bằng GnuPG" + +#: ../src/sendmsg-window.c:458 ../src/sendmsg-window.c:618 +msgid "_Encrypt Message" +msgstr "_Mật mã hóa thÆ°" + +#: ../src/sendmsg-window.c:459 ../src/sendmsg-window.c:619 +msgid "signs the message using GnuPG for all To: and CC: recipients" +msgstr "" +"mật mã hóa thÆ° bằng GnuPG cho má»i ngÆ°á»i nhận kiểu Cho (To:) và Chép Cho (Cc:)" + +#: ../src/sendmsg-window.c:479 ../src/sendmsg-window.c:659 +#: ../libnautilus-private/nautilus-column-chooser.c:413 +#: ../data/glade/column-dialog.glade.h:5 +#: ../bonobo/bonobo-ui-config-widget.c:275 +msgid "_Show" +msgstr "_Hiện" + +#: ../src/sendmsg-window.c:481 ../src/sendmsg-window.c:636 +msgid "_Language" +msgstr "_Ngôn ngữ" + +#: ../src/sendmsg-window.c:483 ../ui/evolution-editor.xml.h:21 +#: ../dwell-selection.xml.in.h:14 +msgid "_Options" +msgstr "Tù_y chá»n" + +#: ../src/sendmsg-window.c:525 +msgid "Sa_ve and Close" +msgstr "_LÆ°u và Äóng" + +#: ../src/sendmsg-window.c:534 +msgid "Send _Later" +msgstr "LÆ°u _sau này" + +#: ../src/sendmsg-window.c:628 +msgid "Toggle Spell C_hecker" +msgstr "Bật/tắt bá»™ _kiểm tra lá»—i chính tả" + +#: ../src/sendmsg-window.c:689 ../mail/mail-config.glade.h:45 +#: ../mail/message-list.etspec.h:1 +msgid "Attachment" +msgstr "Äính kèm" + +#: ../src/sendmsg-window.c:689 ../mail/mail-config.glade.h:91 +#: ../mail/mail-config.glade.h:92 +msgid "Inline" +msgstr "Trá»±c tiếp" + +#: ../Pyblio/GnomeUI/Fields.py:42 +msgid "Reference" +msgstr "Tham chiếu" + +#: ../src/sendmsg-window.c:917 +#, c-format +msgid "" +"The message to '%s' is modified.\n" +"Save message to Draftbox?" +msgstr "" +"ThÆ° gởi đến « %s » bị thay đổi.\n" +"Có lÆ°u thÆ° vào há»™p thÆ° Nháp không?" + +#: ../src/sendmsg-window.c:942 +#, c-format +msgid "" +"The message to '%s' was saved in Draftbox.\n" +"Remove message from Draftbox?" +msgstr "" +"ThÆ° gởi đến « %s » đã được lÆ°u vào há»™p thÆ° Nháp.\n" +"Có gỡ bá» thÆ° ra há»™p thÆ° Nháp không?" + +#: ../src/sendmsg-window.c:1247 +msgid "Gnome editor is not defined in your preferred applications." +msgstr "ChÆ°a ghi rõ bá»™ hiệu chỉnh Gnome trong các ứng dụng Æ°a thích của bạn." + +#: ../src/sendmsg-window.c:1298 +msgid "Select Identity" +msgstr "Chá»n thá»±c thể" + +#: ../src/sendmsg-window.c:1677 +#, c-format +msgid "" +"Saying yes will not send the file `%s' itself, but just a MIME message/" +"external-body reference. Note that the recipient must have proper " +"permissions to see the `real' file.\n" +"\n" +"Do you really want to attach this file as reference?" +msgstr "" +"Trả lá»i Có sẽ không gởi tập tin « %s » chính nó, nhÆ°ng là gởi thông Ä‘iệp " +"MIME / tham chiếu phần thân ngoài. LÆ°u ý là ngÆ°á»i nhận phải có quyá»n truy " +"cập đúng để xem tập tin « thật ».\n" +"\n" +"Bạn có muốn đính kèm tập tin này dạng tham chiếu không?" + +#: ../src/sendmsg-window.c:1688 +msgid "Attach as Reference?" +msgstr "Äính kèm dạng tham chiếu?" + +#: ../src/sendmsg-window.c:1801 +msgid "Choose charset" +msgstr "Chá»n bá»™ ký tá»±" + +#: ../src/sendmsg-window.c:1808 +#, c-format +msgid "" +"File\n" +"%s\n" +"is not encoded in US-ASCII or UTF-8.\n" +"Please choose the charset used to encode the file." +msgstr "" +"Tập tin\n" +"%s\n" +"chÆ°a được mã hóa theo US-ASCII hay UTF-8.\n" +"Hãy chá»n bá»™ ký tá»± dùng để mã hóa tập tin." + +#: ../src/sendmsg-window.c:1824 +msgid "Attach as MIME type:" +msgstr "Äínhy kèm dạng kiểu MIME:" + +#: ../src/sendmsg-window.c:1880 +#, c-format +msgid "Character set for file %s changed from \"%s\" to \"%s\"." +msgstr "Bá»™ ký tá»± cho tập tin %s đã thay đổi từ « %s » sang « %s »." + +#: ../src/sendmsg-window.c:1923 ../src/sendmsg-window.c:5072 +msgid "(no subject)" +msgstr "(không có chủ Ä‘á»)" + +#: ../src/sendmsg-window.c:2009 +msgid "forwarded message" +msgstr "thÆ° đã chuyển tiếp" + +#: ../src/sendmsg-window.c:2014 +#, c-format +msgid "Message from %s, subject: \"%s\"" +msgstr "Thừ từ %s, chủ Ä‘á»: « %s »" + +#: ../src/sendmsg-window.c:2032 +#, c-format +msgid "Error converting \"%s\" to UTF-8: %s\n" +msgstr "Gặp lá»—i khi chuyển đổi « %s » sang UTF-8: %s\n" + +#: ../Pyblio/GnomeUI/Config.py:435 ../Pyblio/GnomeUI/Config.py:549 +#: ../storage/sunone-permissions-dialog.glade.h:27 install_gui.c:324 +#: app/sample-editor.c:455 +msgid "Remove" +msgstr "Gỡ bá»" + +#: ../src/sendmsg-window.c:2185 src/menus.c:356 ../list-ui.c:541 +#: ../src/glade-project-window.c:309 +msgid "Open..." +msgstr "Mở..." + +#: ../src/sendmsg-window.c:2197 +msgid "(URL)" +msgstr "(Äịa chỉ Mạng)" + +#: ../src/sendmsg-window.c:2218 +#, c-format +msgid "Cannot get info on file '%s': %s" +msgstr "Không thể lấy thông tin vá» tập tin « %s »: %s" + +#: ../src/sendmsg-window.c:2223 +#, c-format +msgid "Attachment %s is not a regular file." +msgstr "Äính kèm %s không phải là tập tin chuẩn." + +#: ../src/sendmsg-window.c:2226 +#, c-format +msgid "File %s cannot be read\n" +msgstr "Tập tin %s không có khả năng Ä‘á»c.\n" + +#: ../src/sendmsg-window.c:2275 +msgid "Attach file" +msgstr "Äính kèm tập tin" + +#: ../src/sendmsg-window.c:2361 ../src/sendmsg-window.c:2470 +#: ../src/sendmsg-window.c:4287 +msgid "" +"Attaching message failed.\n" +"Possible reason: not enough temporary space" +msgstr "" +"Việc đính kèm thÆ° bị lá»—i.\n" +"Lý do có thể: không đủ chá»— tạm thá»i" + +#: ../src/sendmsg-window.c:2690 +msgid "F_rom:" +msgstr "_Từ :" + +#: ../src/sendmsg-window.c:2825 ../plug-ins/common/mail.c:605 +msgid "S_ubject:" +msgstr "C_hủ Ä‘á»:" + +#. fcc: mailbox folder where the message copy will be written to +#: ../src/sendmsg-window.c:2839 +msgid "F_cc:" +msgstr "F_cc:" + +#. Reply To: +#: ../src/sendmsg-window.c:2870 +msgid "_Reply To:" +msgstr "T_rả lá»i:" + +#. Attachment list +#: ../src/sendmsg-window.c:2875 +msgid "_Attachments:" +msgstr "Äính _kèm:" + +#: ../src/sendmsg-window.c:2920 ../plug-ins/common/waves.c:273 +msgid "Mode" +msgstr "Chế Ä‘á»™" + +#: ../pan/task-manager.c:756 src/dictmanagedlg.cpp:519 +#: ../storage/sunone-subscription-dialog.c:488 +#: ../mimedir/mimedir-vcomponent.c:276 schroot/sbuild-chroot.cc:387 +#: app/audioconfig.c:263 +msgid "Description" +msgstr "Mô tả" + +#: ../src/sendmsg-window.c:3253 +#, c-format +msgid "Could not save attachment: %s" +msgstr "Không thể lÆ°u đính kèm: %s" + +#: ../src/sendmsg-window.c:3289 +msgid "you" +msgstr "bạn" + +#: ../src/sendmsg-window.c:3298 +#, c-format +msgid "------forwarded message from %s------\n" +msgstr "â”â”â”thÆ° đã chuyển tiếp từ %sâ”â”â”\n" + +#: ../src/sendmsg-window.c:3337 +#, c-format +msgid "Message-ID: %s\n" +msgstr "ID thÆ° : %s\n" + +#: ../src/sendmsg-window.c:3343 +msgid "References:" +msgstr "Tham chiếu :" + +#: ../src/sendmsg-window.c:3354 +#, c-format +msgid "On %s, %s wrote:\n" +msgstr "Vào %s, %s đã viết:\n" + +#: ../src/sendmsg-window.c:3356 +#, c-format +msgid "%s wrote:\n" +msgstr "%s đã viết:\n" + +#: ../src/sendmsg-window.c:3459 +msgid "No signature found!" +msgstr "• Không tìm thấy chữ ký. •" + +#: ../src/sendmsg-window.c:3629 +msgid "Could not save message." +msgstr "Không thể lÆ°u thÆ°." + +#: ../src/sendmsg-window.c:3636 +#, c-format +msgid "Could not open draftbox: %s" +msgstr "Không thể mở há»™p thÆ° Nháp: %s" + +#: ../src/sendmsg-window.c:3657 +msgid "Message saved." +msgstr "ThÆ° đã được lÆ°u." + +#: ../src/sendmsg-window.c:4444 +#, c-format +msgid "Error executing signature generator %s" +msgstr "Gặp lá»—i khi thá»±c hiện bá»™ tạo ra chữ ký %s" + +#: ../src/sendmsg-window.c:4456 +#, c-format +msgid "Cannot open signature file '%s' for reading" +msgstr "Không thể mở tập tin chữ ký « %s » để Ä‘á»c." + +#: ../src/sendmsg-window.c:4466 +#, c-format +msgid "Error reading signature from %s" +msgstr "Gặp lá»—i khi Ä‘á»c chữ ký từ %s." + +#: ../src/sendmsg-window.c:4470 +#, c-format +msgid "Signature in %s is not a UTF-8 text." +msgstr "Chữ ký trong %s không phải là chuá»—i UTF-8." + +#: ../src/sendmsg-window.c:4533 +#, c-format +msgid "Could not open the file %s.\n" +msgstr "Không thể mở tập tin %s.\n" + +#: ../src/sendmsg-window.c:4589 +msgid "Include file" +msgstr "Gồm tập tin" + +#. Translators: please do not translate Face. +#: ../src/sendmsg-window.c:4804 +#, c-format +msgid "Could not load Face header file %s: %s" +msgstr "Không thể mở tập tin phần đầu Face %s: %s" + +#. Translators: please do not translate Face. +#: ../src/sendmsg-window.c:4807 +#, c-format +msgid "Could not load X-Face header file %s: %s" +msgstr "Không thể tải tập tin phần đầu Face %s: %s" + +#: ../src/sendmsg-window.c:4914 +msgid "Message contains national (8-bit) characters" +msgstr "ThÆ° chứa ký tá»± thuá»™c quốc gia (8-bit)." + +#: ../src/sendmsg-window.c:4918 +msgid "" +"Balsa will encode the message in UTF-8.\n" +"Cancel the operation to choose a different language." +msgstr "" +"Balsa sẽ mã hóa thÆ° bằng UTF-8.\n" +"Thôi thao tác này để chá»n ngôn ngữ khác." + +#: ../src/sendmsg-window.c:4924 +msgid "" +"Message contains national (8-bit) characters. Balsa will " +"encode the message in UTF-8.\n" +"Cancel the operation to choose a different language." +msgstr "" +"ThÆ° chứa ký tá»± thuá»™c quốc gia (8-bit). Balsa sẽ mã hóa thÆ° " +"bằng UTF-8.\n" +"Thôi thao tác này để chá»n ngôn ngữ khác." + +#: ../src/sendmsg-window.c:5056 +msgid "You did not specify a subject for this message" +msgstr "ChÆ°a ghi rõ chủ Ä‘á» cho thÆ° này." + +#: ../src/sendmsg-window.c:5057 +msgid "If you would like to provide one, enter it below." +msgstr "Hãy gõ bên dÆ°á»›i." + +#: ../ui/message.glade.h:6 +msgid "_Send" +msgstr "_Gởi" + +#: ../src/sendmsg-window.c:5155 +msgid "" +"You selected OpenPGP mode for a message with attachments. In this mode, only " +"the first part will be signed and/or encrypted. You should select MIME mode " +"if the complete message shall be protected. Do you really want to proceed?" +msgstr "" +"Bạn đã chá»n chế Ä‘á»™ OpenPGP cho má»™t thÆ° có đính kèm. Trong chế Ä‘á»™ này, chỉ " +"phần đầu tiên sẽ được ký tên và/hay mật mã. Bạn nên chá»n chế Ä‘á»™ MIME để bảo " +"vệ toàn bá»™ thÆ°. Bạn thật sá»± muốn tiếp tục không?" + +#: ../src/sendmsg-window.c:5171 +#, c-format +msgid "sending message with gpg mode %d" +msgstr "Ä‘ang gởi thÆ° vá»›i chế Ä‘á»™ GPG %d..." + +#: ../src/sendmsg-window.c:5208 +msgid "Message could not be created" +msgstr "Không thể tạo thÆ°." + +#: ../src/sendmsg-window.c:5210 +msgid "Message could not be queued in outbox" +msgstr "Không thể sắp hàng thÆ° trong há»™p ThÆ° Äi." + +#: ../src/sendmsg-window.c:5212 +msgid "Message could not be saved in sentbox" +msgstr "Không thể lÆ°u thÆ° trong há»™p Äã Gởi." + +#: ../src/sendmsg-window.c:5214 +msgid "Message could not be sent" +msgstr "Không thể gởi thÆ°." + +#: ../src/sendmsg-window.c:5218 +#, c-format +msgid "Send failed: %s" +msgstr "Việc gởi bị lá»—i: %s" + +#: ../src/sendmsg-window.c:5301 ../src/sendmsg-window.c:5320 +msgid "Could not postpone message." +msgstr "Không thể hoãn thÆ°." + +#: ../src/sendmsg-window.c:5315 +msgid "Message postponed." +msgstr "ThÆ° đã được hoãn." + +#: ../src/sendmsg-window.c:5470 +#, c-format +msgid "Error starting spell checker: %s" +msgstr "Gặp lá»—i khi khởi chạy bá»™ kiểm tra chính tả: %s" + +#: ../src/sendmsg-window.c:5635 +#, c-format +msgid "Could not compile %s" +msgstr "Không thể biên dịch %s." + +#: ../src/sendmsg-window.c:6182 +#, c-format +msgid "Reply to %s: %s" +msgstr "Trả lá»i %s: %s" + +#: ../src/sendmsg-window.c:6187 +#, c-format +msgid "Forward message to %s: %s" +msgstr "Chuyển tiếp thÆ° tá»›i %s: %s" + +#: ../src/sendmsg-window.c:6191 +#, c-format +msgid "Continue message to %s: %s" +msgstr "Tiếp tục thÆ° cho %s: %s" + +#: ../src/sendmsg-window.c:6195 +#, c-format +msgid "New message to %s: %s" +msgstr "ThÆ° má»›i cho %s: %s" + +#: ../src/spell-check.c:511 +msgid "Replace the current word with the selected suggestion" +msgstr "Thay thế từ hiện thá»i bằng từ đệ nghị được chá»n." + +#: ../src/spell-check.c:519 +msgid "Replace all occurences of the current word with the selected suggestion" +msgstr "Thay thế má»i lần gặp từ hiện thá»i bằng từ đệ nghị được chá»n." + +#: ../src/spell-check.c:531 +msgid "Skip the current word" +msgstr "Nhảy qua từ hiện thá»i" + +#: ../src/spell-check.c:537 +msgid "Skip all occurrences of the current word" +msgstr "Nhảy qua má»i lần gặp từ hiện thá»i" + +#: ../src/spell-check.c:547 +msgid "Add the current word to your personal dictionary" +msgstr "Thêm từ hiện thá»i vào từ Ä‘iển cá nhân của bạn." + +#: ../src/spell-check.c:556 +msgid "Finish spell checking" +msgstr "Kết thúc kiểm tra chính tả" + +#: ../src/spell-check.c:561 +msgid "Revert all changes and finish spell checking" +msgstr "Hoàn lại má»i thay đổi và kết thúc kiểm tra chính tả" + +#: ../src/spell-check.c:592 +msgid "Spell check" +msgstr "Kiểm tra chính tả" + +#: ../src/store-address.c:104 +msgid "Store address: no addresses" +msgstr "LÆ°u địa chỉ: không có địa chỉ" + +#: ../src/store-address.c:181 +msgid "Store Address" +msgstr "LÆ°u địa chỉ" + +#: ../src/store-address.c:199 +msgid "Save this address and close the dialog?" +msgstr "LÆ°u địa chỉ này và đóng há»™p thoại không?" + +#: ../src/store-address.c:217 +msgid "No address book selected...." +msgstr "ChÆ°a chá»n sổ địa chỉ..." + +#: ../src/store-address.c:230 +msgid "Address could not be written to this address book." +msgstr "Không thể ghi địa chỉ vào sổ địa chỉ." + +#: ../src/store-address.c:233 +msgid "Address book could not be accessed." +msgstr "Không thể truy cập sổ địa chỉ." + +#: ../src/store-address.c:235 +msgid "This mail address is already in this address book." +msgstr "Äịa chỉ thÆ° này đã có trong sổ địa chỉ này." + +#: ../src/store-address.c:238 +msgid "Unexpected address book error. Report it." +msgstr "Gặp lá»—i sổ địa chỉ bất ngá»: hãy thông báo." + +#: ../src/store-address.c:254 +msgid "Choose Address Book" +msgstr "Chá»n sổ địa chỉ" + +#: ../src/store-address.c:293 +msgid "Choose Address" +msgstr "Chá»n địa chỉ" + +#: ../src/toolbar-factory.c:77 utils/gul-tbi-separator.c:133 +#: ../src/glade-gtk.c:2368 ../src/orca/rolenames.py:398 +#, fuzzy +msgid "Separator" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Bá»™ ngăn cách\n" +"#-#-#-#-# glade3vi..po (glade3 HEAD) #-#-#-#-#\n" +"Bá»™ ngăn cách\n" +"#-#-#-#-# orca.vi.po (orca HEAD) #-#-#-#-#\n" +"Bá»™ phân cách" + +#: ../glade/glade_menu_editor.c:2412 ../glade/glade_menu_editor.c:2552 +#: ../src/glade-gtk.c:2362 Expense/expense.c:609 Expense/expense.c:1401 +#, fuzzy +msgid "Check" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Kiểm tra\n" +"#-#-#-#-# glade3vi..po (glade3 HEAD) #-#-#-#-#\n" +"Kiểm tra\n" +"#-#-#-#-# jpilot-0.99.8-pre12.vi.po (jpilot-0.99.8-pre12) #-#-#-#-#\n" +"Séc" + +#: ../src/toolbar-factory.c:79 +msgid "Check for new email" +msgstr "Kiểm tra tìm thÆ° má»›i" + +#: ../plug-ins/common/compose.c:1419 +msgid "Compose" +msgstr "Soạn thảo" + +#: ../src/toolbar-factory.c:81 +msgid "Compose message" +msgstr "Soạn thảo thÆ°" + +#: ../src/toolbar-factory.c:82 ../app/dialogs/user-install-dialog.c:618 +#: ../gnomecard/cardlist-widget.c:1055 +msgid "Continue" +msgstr "Tiếp tục" + +#: ../src/toolbar-factory.c:83 +msgid "Continue message" +msgstr "Tiếp tục thÆ°" + +#: ../src/toolbar-factory.c:84 ../mail/message-tag-followup.c:82 +#: ../ui/evolution-mail-message.xml.h:81 ../mail/message-tag-followup.c:81 +#: ../ui/evolution-mail-message.xml.h:79 +msgid "Reply" +msgstr "Trả lá»i" + +#: ../src/toolbar-factory.c:86 +msgid "" +"Reply\n" +"to all" +msgstr "" +"Trả lá»i\n" +"tất cả" + +#: ../src/toolbar-factory.c:87 +msgid "Reply to all recipients" +msgstr "Trả lá»i má»i ngÆ°á»i nhận" + +#: ../src/toolbar-factory.c:88 +msgid "" +"Reply\n" +"to group" +msgstr "" +"Trả lá»i\n" +"nhóm" + +#: ../src/toolbar-factory.c:90 ../libgimpbase/gimpbaseenums.c:676 +msgid "Forward" +msgstr "Chuyển tiếp" + +#: ../src/toolbar-factory.c:92 ../ui/evolution-calendar.xml.h:18 +#: ../ui/evolution-mail-message.xml.h:75 ../src/f-spot.glade.h:113 +msgid "Previous" +msgstr "TrÆ°á»›c" + +#: ../src/toolbar-factory.c:93 +msgid "Open previous" +msgstr "Mở trÆ°á»›c" + +#: ../src/toolbar-factory.c:94 ../src/menus.c:302 info/session.c:860 +#: makeinfo/node.c:1424 ../Pyblio/GnomeUI/Editor.py:608 +msgid "Next" +msgstr "Kế" + +#: ../src/toolbar-factory.c:95 +msgid "Open next" +msgstr "Mở kế" + +#: ../src/toolbar-factory.c:96 +msgid "" +"Next\n" +"unread" +msgstr "" +"ChÆ°a\n" +"Ä‘á»c kế" + +#: ../src/toolbar-factory.c:97 +msgid "Open next unread message" +msgstr "Mở thÆ° chÆ°a Ä‘á»c kế tiếp." + +#: ../src/toolbar-factory.c:98 +msgid "" +"Next\n" +"flagged" +msgstr "" +"Äã đặt\n" +"cá» kế" + +#: ../src/toolbar-factory.c:99 +msgid "Open next flagged message" +msgstr "Mở thÆ° đã đặt cá» kế tiếp." + +#: ../src/toolbar-factory.c:100 +msgid "" +"Previous\n" +"part" +msgstr "" +"Phần\n" +"trÆ°á»›c" + +#: ../src/toolbar-factory.c:101 +msgid "View previous part of message" +msgstr "Xem phần thÆ° trÆ°á»›c đó." + +#: ../src/toolbar-factory.c:102 +msgid "" +"Next\n" +"part" +msgstr "" +"Phần\n" +"kế" + +#: ../src/toolbar-factory.c:103 +msgid "View next part of message" +msgstr "Xem phần thÆ° kế tiếp." + +#: ../src/toolbar-factory.c:104 +msgid "" +"Trash /\n" +"Delete" +msgstr "" +"Rác\n" +"Xoá bá»" + +#: ../src/toolbar-factory.c:105 +msgid "Move the current message to trash" +msgstr "Chuyển thÆ° hiện thá»i vào Rác." + +#: ../src/toolbar-factory.c:106 +msgid "Postpone" +msgstr "Hoãn" + +#: ../src/toolbar-factory.c:107 +msgid "Postpone current message" +msgstr "Hoãn thÆ° hiện thá»i." + +#: ../src/toolbar-factory.c:108 ../libtomboy/gedit-print.c:144 +#: ../Tomboy/Plugins/PrintNotes.cs:15 src/mainwin.cpp:1115 jpilot.c:450 +#: monthview_gui.c:517 print_gui.c:332 weekview_gui.c:343 +msgid "Print" +msgstr "In" + +#: ../tests/gnetwork-demo.c:251 po/silky.glade.h:174 +msgid "Send" +msgstr "Gởi" + +#: ../storage/GNOME_Evolution_Exchange_Storage.server.in.in.h:5 +msgid "Exchange" +msgstr "Trao đổi" + +#: ../objects/FS/function.c:822 +msgid "Attach" +msgstr "Äính kèm" + +#: ../src/toolbar-factory.c:115 +msgid "Add attachments to this message" +msgstr "Thêm đính kèm vào thÆ° này." + +#: ../src/toolbar-factory.c:116 ../src/menus.c:263 +#: ../glade/glade_project_window.c:379 ../src/mlview-xml-document.cc:3478 +#: ../widgets/gtk+.xml.in.h:156 +msgid "Save" +msgstr "LÆ°u" + +#: ../src/toolbar-factory.c:117 +msgid "Save the current item" +msgstr "LÆ°u mục hiện thá»i." + +#: ../src/toolbar-factory.c:118 ../gnomecard/card-editor.c:427 +#: ../gnomecard/card-editor.glade.h:22 +msgid "Identity" +msgstr "Thá»±c thể" + +#: ../src/toolbar-factory.c:119 +msgid "Set identity to use for this message" +msgstr "Lập thá»±c thể cần dùng cho thÆ° này." + +#: ../src/toolbar-factory.c:122 +msgid "Toggle spell checker" +msgstr "Bật/Tắt kiểm tra chính tả" + +#: ../src/toolbar-factory.c:124 +msgid "Run a spell check" +msgstr "Kiểm tra chính tả" + +#: ../src/toolbar-factory.c:126 ../src/toolbar-factory.c:136 +#: ../glade/gbwidgets/gbdialog.c:331 ../list-ui.c:540 src/fe-gtk/search.c:120 +#: ../glade/gbwidgets/gbdialog.c:332 ../glade/search.glade.h:2 +#: ../widgets/gtk+.xml.in.h:35 install_gui.c:331 monthview_gui.c:511 +#: search_gui.c:585 weekview_gui.c:337 app/audioconfig.c:359 +#: app/gui-settings.c:496 app/gui.c:265 app/sample-editor.c:2207 +#: app/transposition.c:347 +msgid "Close" +msgstr "Äóng" + +#: ../src/toolbar-factory.c:127 +msgid "Close the compose window" +msgstr "Äóng cá»­a sổ soạn thảo" + +#: ../src/toolbar-factory.c:128 +msgid "" +"Toggle\n" +"new" +msgstr "" +"Bật/tắt\n" +"má»›i" + +#: ../src/toolbar-factory.c:129 +msgid "Toggle new message flag" +msgstr "Bật tắt đặt cá» thÆ° má»›i." + +#: ../src/toolbar-factory.c:130 +msgid "Mark all" +msgstr "Äánh dấu hết" + +#: ../src/toolbar-factory.c:131 +msgid "Mark all messages in current mailbox" +msgstr "Äánh dấu má»i thÆ° trong há»™p thÆ° hiện thá»i." + +#: ../src/toolbar-factory.c:132 +msgid "" +"All\n" +"headers" +msgstr "" +"Má»i\n" +"dòng đầu" + +#: ../src/toolbar-factory.c:133 +msgid "Show all headers" +msgstr "Hiện má»i dòng đầu." + +#: ../src/toolbar-factory.c:134 ../src/file-manager/fm-desktop-icon-view.c:706 +msgid "Empty Trash" +msgstr "Äổ Rác" + +#: ../src/toolbar-factory.c:137 +msgid "Close current mailbox" +msgstr "Äóng há»™p thÆ° hiện thá»i." + +#: ../src/toolbar-factory.c:138 +msgid "Msg Preview" +msgstr "Xem thá»­ thÆ°" + +#: ../src/toolbar-factory.c:139 +#: ../addressbook/gui/component/apps_evolution_addressbook.schemas.in.in.h:5 +msgid "Show preview pane" +msgstr "Hiện khung Xem thá»­" + +#: ../src/toolbar-factory.c:141 ../smime/lib/e-cert.c:424 +msgid "Sign" +msgstr "Ký" + +#: ../src/toolbar-factory.c:142 +msgid "Sign message using GPG" +msgstr "Ký tên thÆ° bằng GPG" + +#: ../src/toolbar-factory.c:143 ../smime/lib/e-cert.c:425 +msgid "Encrypt" +msgstr "Mật mã hóa" + +#: ../src/toolbar-factory.c:144 +msgid "Encrypt message using GPG" +msgstr "Mật mã hóa thÆ° bằng GPG." + +#: ../src/toolbar-factory.c:146 ../app/actions/edit-actions.c:69 +#: ../app/dialogs/dialogs.c:190 ../app/pdb/internal_procs.c:210 +#: ../src/menus.c:285 ../src/mainwin-menu.cc:84 +#, fuzzy +msgid "Undo" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Hoàn lại\n" +"#-#-#-#-# guikachu.vi.po (guikachu HEAD) #-#-#-#-#\n" +"Hoàn tác" + +#: ../src/toolbar-factory.c:147 +msgid "Undo most recent change" +msgstr "Hoàn lại thay đổi gần nhất" + +#: ../src/toolbar-factory.c:148 ui/galeon-bookmarks-editor-ui.xml.in.h:69 +#: ../app/actions/edit-actions.c:75 ../src/mainwin-menu.cc:88 +msgid "Redo" +msgstr "Làm lại" + +#: ../src/toolbar-factory.c:149 +msgid "Redo most recent change" +msgstr "Làm lại thay đổi gần nhất." + +#: ../src/toolbar-factory.c:150 +msgid "" +"Expunge\n" +"Deleted" +msgstr "" +"Xoá hẵn\n" +"đã xoá bá»" + +#: ../src/toolbar-factory.c:151 +msgid "Expunge messages marked as deleted" +msgstr "Xoá hẵn các thÆ° có nhãn Äã xoá bá»." + +#: ../src/toolbar-factory.c:239 +#, c-format +msgid "Unknown toolbar icon \"%s\"" +msgstr "Không biết biểu tượng thanh công cụ « %s »." + +#: ../widgets/gtk+.xml.in.h:150 +msgid "Queue" +msgstr "Hàng đợi" + +#: ../src/toolbar-factory.c:358 +msgid "Queue this message for sending" +msgstr "Sắp hàng thÆ° này để gởi" + +#: ../src/toolbar-prefs.c:123 +msgid "Customize Toolbars" +msgstr "Tùy chỉnh thanh công cụ" + +#: ../src/toolbar-prefs.c:144 src/prefsdlg.cpp:77 +msgid "Main window" +msgstr "Cá»­a sổ chính" + +#: ../src/toolbar-prefs.c:148 +msgid "Compose window" +msgstr "Cá»­a sổ soạn thảo" + +#: ../src/toolbar-prefs.c:152 +msgid "Message window" +msgstr "Cá»­a sổ thÆ°" + +#: ../src/toolbar-prefs.c:154 +msgid "Toolbar options" +msgstr "Tùy chá»n Thanh công cụ" + +#: ../src/toolbar-prefs.c:164 +msgid "_Wrap button labels" +msgstr "_Cuá»™n nhãn nút" + +#: ../src/toolbar-prefs.c:363 +#, c-format +msgid "Error displaying toolbar help: %s\n" +msgstr "Gặp lá»—i khi hiển thị trợ giúp vá» thanh công cụ : %s\n" + +#: ../glade/gbwidgets/gbpreview.c:162 +msgid "Preview" +msgstr "Xem thá»­" + +#: ../src/toolbar-prefs.c:427 +msgid "_Restore toolbar to standard buttons" +msgstr "Phục hồi các nút chuẩn lên thanh công cụ." + +#: ../src/toolbar-prefs.c:446 +msgid "Available buttons" +msgstr "Nút có sẵn" + +#: ../src/toolbar-prefs.c:462 +msgid "Current toolbar" +msgstr "Thanh công cụ hiện có" + +#: ../src/toolbar-prefs.c:479 makeinfo/node.c:1524 ../ui/directions.glade.h:12 +#: ../storage/sunone-permissions-dialog.glade.h:28 +#: ../widgets/gtk+.xml.in.h:202 app/tracker-settings.c:282 +msgid "Up" +msgstr "Lên" + +#: ../src/toolbar-prefs.c:495 +#: ../plug-ins/script-fu/scripts/beveled-pattern-arrow.scm.h:1 +#: ../ui/directions.glade.h:3 ../storage/sunone-permissions-dialog.glade.h:16 +#: ../widgets/gtk+.xml.in.h:56 app/tracker-settings.c:285 +msgid "Down" +msgstr "Xuống" + +#: ../gnome-panel/panel-action-button.c:279 ../gdictsrc/dict.c:676 +#: ../gdictsrc/gdict-pref-dialog.c:700 ../gdictsrc/gdict-pref-dialog.c:747 +msgid "Cannot connect to server" +msgstr "Không thể kết nối đến máy phục vụ" + +msgid "Cannot read message" +msgstr "Không thể Ä‘á»c thÆ°." + +msgid "%s: could not get message stream." +msgstr "%s: không thể lấy luồng thÆ°." + +msgid "Error setting flags on messages in mailbox %s" +msgstr "Gặp lá»—i khi đặt cá» lên thÆ° trong há»™p thÆ° %s." + +msgid "POP3 mailbox %s temp mailbox error:\n" +msgstr "Lá»—i há»™p thÆ° tạm của há»™p thÆ° POP3 %s:\n" + +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. override the labels/defaults of the standard settings +#: src/fe-gtk/menu.c:1280 ../data/netgame.glade.h:8 src/fe-gtk/menu.c:1414 +#: ../camel/camel-sunone-provider.c:27 +msgid "_Server" +msgstr "Máy _phục vụ" + +msgid "Identity:" +msgstr "Thá»±c thể:" + +msgid "Show address:" +msgstr "Hiện địa chỉ:" + +msgid "_From" +msgstr "_Từ" + +msgid "_To" +msgstr "Ch_o :" + +msgid "Could not create temporary file %s: " +msgstr "Không thể tạo tập tin tạm thá»i %s: " + +msgid "Could not get part: %s" +msgstr "Không thể lấy phân: %s" + +msgid "_Middle Name:" +msgstr "Tên _lót:" + +msgid "Run GnomeCard" +msgstr "Chạy Thẻ Gnome" + +msgid "New Address Book type:" +msgstr "Kiểu Sổ địa chỉ má»›i:" + +msgid "Balsa is not compiled with LDAP support" +msgstr "Trình Balsa đã được biên dịch không có há»— trợ LDAP." + +msgid "_File Name" +msgstr "Tên _tập tin" + +msgid "Select path for VCARD address book" +msgstr "Chá»n Ä‘Æ°á»ng dẫn cho sổ địa chỉ dạng vCard" + +msgid "Select path for LDIF address book" +msgstr "Chá»n Ä‘Æ°á»ng dẫn cho sổ địa chỉ dạng LDIF." + +msgid "Match In" +msgstr "Khá»›p trong" + +msgid "Mailbox _Path:" +msgstr "_ÄÆ°á»ng dẫn há»™p thÆ° :" + +msgid "Mailbox Path" +msgstr "ÄÆ°á»ng dẫn há»™p thÆ°" + +msgid "(No identity set)" +msgstr "(ChÆ°a đật thá»±c thể)" + +msgid "C_hange..." +msgstr "_Äổi..." + +msgid "IMAP Server %s: %s" +msgstr "Máy phục vụ IMAP %s: %s" + +msgid "7 Bits" +msgstr "7-Bit" + +msgid "8 Bits" +msgstr "8-Bit" + +#: ../mail/mail-config.glade.h:113 ../mail/mail-config.glade.h:114 +msgid "Quoted" +msgstr "Trích dẫn" + +msgid "Remote SMTP Server" +msgstr "Máy phục vụ SMTP ở xa" + +#: ../libgda/gda-server-provider-extra.c:160 +#: ../storage/sunone-permissions-dialog.c:654 +msgid "User" +msgstr "NgÆ°á»i dùng" + +msgid "Use TLS" +msgstr "Dùng TLS" + +msgid "Select a font to use" +msgstr "Chá»n phông chữ cần dùng" + +#: ../data/glade/project-properties.glade.h:6 ../src/gnome-schedule.glade.h:52 +msgid "Select..." +msgstr "Chá»n..." + +msgid "attach as reference" +msgstr "đính kèm dạng tham chiếu" + +msgid "attach as file" +msgstr "đính kèm dạng tập tin" + +msgid "" +"This file is not encoded in US-ASCII or UTF-8.\n" +"Please choose the charset used to encode the file.\n" +msgstr "" +"Tập tin này không được mã hóa theo US-ASCII hay UTF-8.\n" +"Hãy chá»n bá»™ ký tá»± dùng để mã hóa tập tin.\n" + +msgid "_Attach as %s type \"%s\"" +msgstr "_Äính kèm dạng %s kiểu « %s »" + +msgid "" +"The message cannot be encoded in charset %s.\n" +"Please choose a language for this message.\n" +"For multi-language messages, choose UTF-8." +msgstr "" +"Không thể mã hóa thÆ° này bằng bá»™ ký tá»± %s.\n" +"Hãy chá»n má»™t ngôn ngữ cho thÆ° này.\n" +"Cho thÆ° Ä‘a ngôn ngữ, và cho má»i ngôn\n" +"ngữ khi có thể, hãy chá»n UTF-8." + +msgid "ukranian (koi)" +msgstr "U-cợ-rainh (KOI)" + +msgid "" +"Error placing messages from %s on %s\n" +"Messages are left in %s\n" +msgstr "" +"Gặp lá»—i khi để thÆ° từ %s lên %s.\n" +"Các thÆ° còn lại trong %s.\n" + +msgid "POP3 temp mailbox %s was not removed (system error message: %s)" +msgstr "ChÆ°a gỡ bá» há»™p thÆ° tạm thá»i POP3 %s (thông Ä‘iệp lá»—i hệ thống: %s)." + +msgid "Source mailbox (%s) is readonly. Cannot move messages" +msgstr "" +"Há»™p thÆ° nguồn (%s) chỉ cho phép Ä‘á»c. Không thể di chuyển các thông Ä‘iệp." + +msgid "" +"Error writing to temporary file %s.\n" +"Check the directory permissions." +msgstr "" +"Gặp lá»—i khi ghi vào tập tin tạm thá»i %s.\n" +"Hãy kiểm tra xem quyá»n hạn thÆ° mục là đúng." + +msgid "" +"SMTP server refused connection.\n" +"Balsa by default uses submission service (587).\n" +"If you want to submit mail using relay service (25),specify it explicitly " +"via: \"host:smtp\".\n" +"Message is left in outbox." +msgstr "" +"Máy phục vụ SMTP đã từ chối kết nối.\n" +"Balsa theo mặc định dùng dịch vụ đệ trình (587).\n" +"Nếu muốn đệ trình thÆ° bằng dịch vụ tiếp lại (25), hãy ghi rõ nó dứt khoát " +"bằng: \"host:smtp\".\n" +"ThÆ° còn lại trong há»™p ThÆ° Äi." + +msgid "Please enter information about yourself." +msgstr "Hãy gõ thông tin vá» bạn." + +#: ../extensions/actions/action-properties.glade.h:11 ../src/drivel.glade.h:74 +#: ../ui/muds.glade.h:52 ../pan/server-ui.c:329 +msgid "_Name:" +msgstr "T_ên:" + +msgid "" +"You seem to be running Balsa for the first time. The following steps will " +"set up Balsa by asking a few simple questions. Once you have completed these " +"steps, you can always change them later in Balsa's preferences. If any files " +"or directories need to be created, it will be done so automatically.\n" +" Please check the about box in Balsa's main window for more information " +"about contacting the authors or reporting bugs." +msgstr "" +" Có vẻ nhÆ° bạn chạy Balsa lần đầu tiên. Các bÆ°á»›c sau đây sẽ thiết lập Balsa " +"bằng má»™t số câu há»i Ä‘Æ¡n giản. Ngay khi hoàn thành các bÆ°á»›c này, bạn có thể " +"thay đổi chúng sau này trong mục Tùy thích của Balsa. Nếu bất ký tập tin hay " +"thÆ° mục cần được tạo, nó sẽ được thá»±c hiện tá»± Ä‘á»™ng.\n" +" Hãy xem há»™p giá»›i thiệu trong cá»­a sổ chính của Balsa để biết thêm thông tin " +"vá» tác giả hay cách thông báo lá»—i." + +#: ../pan/grouplist.c:993 +msgid "Total" +msgstr "Tổng" + +msgid "By _Date" +msgstr "Theo _ngày" + +msgid "Cannot access the message's body\n" +msgstr "Không thể truy cập thân thÆ°.\n" + +msgid "Display message size as number of lines" +msgstr "Hiển thị kích cỡ của thÆ° dạng số dòng." + +msgid "" +"Failed to initialise LDAP server.\n" +"Check that the servername is valid." +msgstr "" +"Việc khởi chạy máy phục vụ LDAP bị lá»—i.\n" +"Hãy kiểm tra tên máy phục vụ có hợp lệ không." + +msgid "Couldn't set protocol version to LDAPv3." +msgstr "Không thể đặt phiên bản giao thức là LDAPv3." + +msgid "Couldn't enable TLS on the LDAP connection: %s" +msgstr "Không thể bật chạy TLS lên kết nối LDAP: %s" + +msgid "" +"Failed to bind to server: %s\n" +"Check that the server name is valid." +msgstr "" +"Việc đóng kết tá»›i máy phục vụ bị lá»—i: %s\n" +"Hãy kiểm tra tên máy chủ có hợp lệ không." + +msgid "Failed to do a search: %s.Check that the base name is valid." +msgstr "" +"Việc thá»±c hiện tìm kiếm bị lá»—i: %s. Hãy kiểm tra tên cÆ¡ bản có hợp lệ không." + +msgid "This certificate belongs to:\n" +msgstr "Chứng nhận này thuá»™c vá»:\n" + +msgid "*** ERROR: Mailbox Lock Exists: %s ***\n" +msgstr "••• Lá»–I: Há»™p thÆ° vẫn còn bị khoá: %s •••\n" + +msgid "*** ERROR: Mailbox Stream Closed: %s ***\n" +msgstr "••• Lá»–I: Luồng há»™p thÆ° bị đóng: %s •••\n" + +msgid "LibBalsaMailboxImap: Opening %s Refcount: %d\n" +msgstr "LibBalsaMailboxImap: Khi mở %s Äếm tham chiếu : %d\n" + +msgid "LibBalsaMailboxLocal: Opening %s Refcount: %d\n" +msgstr "LibBalsaMailboxLocal: Khi mở %s Äếm tham chiếu : %d\n" + +msgid "Couldn't open destination mailbox (%s) for writing" +msgstr "Không thể mở há»™p thÆ° đích (%s) để ghi." + +msgid "Couldn't open destination mailbox (%s) for copying" +msgstr "Không thể mở há»™p thÆ° đích (%s) để sao chép." + +msgid "connection error" +msgstr "lá»—i kết nối" + +msgid "Could not run the delivery program (procmail)" +msgstr "Không thể chạy chÆ°Æ¡ng trình phát thÆ° (procmail)." + +msgid "Could not open mailbox for spooling" +msgstr "Không thể mở há»™p thÆ° để cuá»™n vào ống" + +#: ../libgnomevfs/gnome-vfs-result.c:69 +msgid "Host not found" +msgstr "Không tìm thấy máy" + +#: src/common/util.c:301 +msgid "Connection refused" +msgstr "Kết nối bị từ chối." + +msgid "Unable to open sentbox - could not get IMAP server information" +msgstr "" +"Không thể mở há»™p thÆ° Äã Gởi — không thể lấy thông tin vá» máy phục vụ IMAP." + +msgid "" +"The mailbox \"%s\" does not appear to be valid.\n" +"Your system does not allow for creation of mailboxes\n" +"in /var/spool/mail. Balsa wouldn't function properly\n" +"until the system created the mailboxes. Please change\n" +"the mailbox path or check your system configuration." +msgstr "" +"Hình nhÆ° há»™p thÆ° « %s » không hợp lệ.\n" +"Hệ thống của bạn không cho phép tạo há»™p thÆ° trong\n" +". Balsa sẽ không hoạt Ä‘á»™ng đúng\n" +"cho tá»›i khi hệ thống tạo được há»™p thÆ°. Hãy thay đổi\n" +"Ä‘Æ°á»ng dẫn tá»›i há»™p thÆ° hay kiểm tra cấu hình hệ thống." + +msgid "[-- Error: Could not display any parts of Multipart/Alternative! --]\n" +msgstr "[-- Lá»—i: Không thể hiển thị phần nào của Äa phần/Xen kẽ ! --]\n" + +msgid "[-- Attachment #%d" +msgstr "[-- Äính kèm #%d" + +msgid "[-- Type: %s/%s, Encoding: %s, Size: %s --]\n" +msgstr "[-- Kiểu: %s/%s, Bá»™ ký tá»± : %s, Cỡ : %s --]\n" + +msgid "[-- Autoview using %s --]\n" +msgstr "[-- Xem tá»± Ä‘á»™ng bằng %s --]\n" + +msgid "Invoking autoview command: %s" +msgstr "Äang gá»i lệnh tá»± Ä‘á»™ng xem: %s" + +msgid "Can't create filter" +msgstr "Không thể tạo bá»™ lá»c." + +msgid "[-- Can't run %s. --]\n" +msgstr "[-- Không thể chạy %s. --]\n" + +msgid "[-- Autoview stderr of %s --]\n" +msgstr "[-- Tá»± Ä‘á»™ng xem thiết bị lá»—i chuẩn của %s --]\n" + +msgid "[-- Error: message/external-body has no access-type parameter --]\n" +msgstr "[-- Lá»—i: thÆ°/thân bên ngoài không có tham số vá» kiểu truy cập --]\n" + +msgid "[-- This %s/%s attachment " +msgstr "[-- Äính kèm %s/%s này " + +msgid "(size %s bytes) " +msgstr "(cỡ %s byte) " + +msgid "has been deleted --]\n" +msgstr "đã được xoá bá» --]\n" + +msgid "[-- on %s --]\n" +msgstr "[-- vào %s --]\n" + +msgid "" +"[-- This %s/%s attachment is not included, --]\n" +"[-- and the indicated external source has --]\n" +"[-- expired. --]\n" +msgstr "" +"[-- Äính kèm %s/%s không được bao gồm, --]\n" +"[-- và nguồn bên ngoài được chỉ định --]\n" +"[-- đã hết hạn dùng. --]\n" + +msgid "[-- This %s/%s attachment is not included, --]\n" +msgstr "[-- Äính kèm %s/%s không được bao gồm, --]\n" + +msgid "[-- and the indicated access-type %s is unsupported --]\n" +msgstr "[-- và kiểu truy cập đã chỉ định %s không được há»— trợ --]\n" + +msgid "Error: multipart/signed has no protocol." +msgstr "Lá»—i: Ä‘a phần/đã ký không có giao thức." + +msgid "Error: multipart/encrypted has no protocol parameter!" +msgstr "Lá»—i: Ä‘a phần/mật mã không có tham số cho giao thức." + +msgid "Unable to open temporary file!" +msgstr "• Không thể mở tập tin tạm thá»i. •" + +msgid "[-- %s/%s is unsupported " +msgstr "[-- %s/%s không được há»— trợ. " + +msgid "(use '%s' to view this part)" +msgstr "(dùng '%s' để xem phần này)" + +msgid "(need 'view-attachments' bound to key!)" +msgstr "(cần đóng kết « xem đính kèm » tá»›i phím!)" + +msgid "No authenticators available" +msgstr "Không có bá»™ xác thá»±c sẵn sàng." + +msgid "Authenticating (anonymous)..." +msgstr "Äang xác thá»±c (vô danh)..." + +msgid "Anonymous authentication failed." +msgstr "Việc xác thá»±c vô danh bị lá»—i." + +msgid "Authenticating (CRAM-MD5)..." +msgstr "Äang xác thá»±c (CRAM-MD5)..." + +msgid "CRAM-MD5 authentication failed." +msgstr "Việc xác thá»±c CRAM-MD5 bị lá»—i." + +msgid "Authenticating (GSSAPI)..." +msgstr "Äang xác thá»±c (GSSAPI)..." + +msgid "GSSAPI authentication failed." +msgstr "Việc xác thá»±c GSSAPI bị lá»—i." + +msgid "LOGIN disabled on this server." +msgstr "ÄÄ‚NF NHẬP bị tắt trên máy phục vụ này." + +msgid "Logging in..." +msgstr "Äang đăng nhập..." + +msgid "Authenticating (SASL)..." +msgstr "Äang xác thá»±c (SASL)..." + +msgid "SASL authentication failed." +msgstr "Việc xác thá»±c SASL bị lá»—i." + +msgid "%s is an invalid IMAP path" +msgstr "%s là Ä‘Æ°á»ng dẫn IMAP không hợp lệ." + +msgid "Getting namespaces..." +msgstr "Äang lấy các miá»n tên..." + +msgid "Getting folder list..." +msgstr "Äang lấy danh sách thÆ° mục..." + +#: ../mail/mail-stub-exchange.c:248 +msgid "No such folder" +msgstr "Không có thÆ° mục nhÆ° vậy" + +msgid "Create mailbox: " +msgstr "Tạo há»™p thÆ° : " + +msgid "Mailbox must have a name." +msgstr "Há»™p thÆ° phải có tên." + +msgid "Fatal error. Message count is out of sync!" +msgstr "Lá»—i trầm trá»ng. Số thÆ° không được đồng bá»™ !" + +msgid "Closing connection to %s..." +msgstr "Äang đóng kết nối tá»›i %s..." + +msgid "This IMAP server is ancient. Mutt does not work with it." +msgstr "Máy phục vụ IMAP này là rất cÅ© nên trình Mutt không hoạt Ä‘á»™ng vá»›i nó." + +msgid "Secure connection with TLS?" +msgstr "Kết nối bảo mật bằng TLS không?" + +msgid "Connecting to %s ..." +msgstr "Äang kết nối đến %s..." + +msgid "Could not negotiate TLS connection" +msgstr "Không thể thá»a thuận kết nối TLS." + +msgid "Selecting %s..." +msgstr "Äang chá»n %s..." + +msgid "Unable to append to IMAP mailboxes at this server" +msgstr "Không thể phụ thêm vào há»™p thÆ° IMAP trên máy phục vụ này." + +msgid "Create %s?" +msgstr "Tạo %s không?" + +msgid "Closing connection to IMAP server..." +msgstr "Äang đóng kết nối tá»›i máy phục vụ IMAP..." + +msgid "Saving message status flags... [%d/%d]" +msgstr "Äang lÆ°u các cá» trạng thái thÆ°... [%d/%d]" + +msgid "Expunging messages from server..." +msgstr "Äang xoá hẵn các thÆ° ra máy phục vụ..." + +msgid "CLOSE failed" +msgstr "Việc ÄÓNG bị lá»—i." + +msgid "Bad mailbox name" +msgstr "Tên há»™p thÆ° sai." + +msgid "Subscribing to %s..." +msgstr "Äang đăng ký vá»›i %s..." + +msgid "Unsubscribing to %s..." +msgstr "Äang bỠđăng ký vá»›i %s..." + +msgid "Unable to fetch headers from this IMAP server version." +msgstr "Không thể lấy các dòng đầu từ phiên bản máy phục vụ IMAP này." + +msgid "Fetching message headers... [%d/%d]" +msgstr "Äang lấy các dòng đầu thÆ°... [%d/%d]" + +msgid "Fetching message..." +msgstr "Äang lấy thÆ°..." + +msgid "The message index is incorrect. Try reopening the mailbox." +msgstr "Chỉ mục thÆ° là không đúng. Hãy cố mở lại há»™p thÆ°." + +msgid "Uploading message ..." +msgstr "Äang tải lên thÆ° ..." + +msgid "Continue?" +msgstr "Tiếp tục không?" + +# Variable: don't translate / Biến: đừng dịch +msgid "%s [%s]\n" +msgstr "%s [%s]\n" + +msgid "Out of memory!" +msgstr "• Hết bá»™ nhá»›. •" + +msgid "Reading %s... %d (%d%%)" +msgstr "Äang Ä‘á»c %s... %d (%d%%)" + +msgid "Mailbox is corrupt!" +msgstr "• Há»™p thÆ° bị há»ng. •" + +msgid "Mailbox was corrupted!" +msgstr "• Há»™p thÆ° bị há»ng. •" + +msgid "Fatal error! Could not reopen mailbox!" +msgstr "• Lá»—i nghiêm trá»ng: không thể mở lại há»™p thÆ°. •" + +msgid "sync: mbox modified, but no modified messages! (report this bug)" +msgstr "" +"đồng bá»™ : há»™p thÆ° mbox đã sá»­a đổi, nhÆ°ng không có thÆ° đã sá»­a đổi (hãy thông " +"báo lá»—i này)." + +msgid "Writing messages... %d (%d%%)" +msgstr "Äang ghi thÆ°... %d (%d%%)" + +msgid "Committing changes..." +msgstr "Äang gài vào các thay đổi..." + +msgid "Write failed! Saved partial mailbox to %s" +msgstr "• Việc ghi bị lá»—i. Äã lÆ°u phần há»™p thÆ° vào %s. •" + +msgid "Could not reopen mailbox!" +msgstr "• Không thể mở lại tập tin. •" + +msgid "Connection to %s closed" +msgstr "Kết nối đến %s bị đóng." + +msgid "SSL is unavailable." +msgstr "SSL không sẵn sàng." + +msgid "Preconnect command failed." +msgstr "Lệnh tiá»n kết nối bị lá»—i." + +msgid "Error talking to %s (%s)" +msgstr "Gặp lá»—i khi nói vá»›i %s (%s)." + +msgid "Looking up %s..." +msgstr "Äang tra tìm %s..." + +msgid "Connecting to %s..." +msgstr "Äang kết nối đến %s..." + +msgid "Could not connect to %s (%s)." +msgstr "Không thể kết nối đến %s (%s)." + +msgid "Failed to find enough entropy on your system" +msgstr "" +"Không tìm thấy đủ en-tợ-rô-pi (tính trạng ngẫu nhiên) trong hệ thống của bạn." + +msgid "Filling entropy pool: %s...\n" +msgstr "Äang Ä‘iá»n vÅ©ng en-tợ-rô-pi: %s...\n" + +msgid "%s has insecure permissions!" +msgstr "• %s có quyá»n hạn không bảo mật. •" + +msgid "SSL disabled due the lack of entropy" +msgstr "SSL bị tắt do thiếu en-tợ-rô-pi." + +#: src/files.c:117 +msgid "I/O error" +msgstr "Lá»—i nhập/xuất" + +msgid "unspecified protocol error" +msgstr "lá»—i giao thức không xác định" + +msgid "Unable to get certificate from peer" +msgstr "Không thể lấy chứng nhận từ ngang hàng." + +msgid "SSL connection using %s (%s)" +msgstr "Kết nối SSL bằng %s (%s)" + +msgid "[unable to calculate]" +msgstr "[không thể tính]" + +msgid "Server certificate is not yet valid" +msgstr "Chứng nhận máy phục vụ chÆ°a hợp lệ." + +msgid "Server certificate has expired" +msgstr "Chứng nhận máy phục vụ đã hết hạn." + +msgid "Warning: Couldn't save certificate" +msgstr "Cảnh báo : không thể lÆ°u chứng nhận." + +msgid "Certificate saved" +msgstr "Chứng nhận đã được lÆ°u." + +msgid "This certificate belongs to:" +msgstr "Chứng nhận này thuá»™c vá»:" + +msgid "This certificate was issued by:" +msgstr "Chứng nhận này được phát hành bởi:" + +msgid " from %s" +msgstr " từ %s" + +msgid " to %s" +msgstr " đến %s" + +msgid "SSL Certificate check" +msgstr "Kiểm tra chứng nhận SSL" + +msgid "(r)eject, accept (o)nce, (a)ccept always" +msgstr "(t)ừ chối, chấp nhận (m)á»™t lần, (l)uôn chấp nhận" + +msgid "(r)eject, accept (o)nce" +msgstr "(t)ừ chối, chấp nhận (m)á»™t lần" + +msgid "Exit " +msgstr "Thoát " + +#: src/fe-gtk/editlist.c:380 web/template/auth.tpl:4 src/floatwin.cpp:147 +#: jpilot.c:386 +msgid "Help" +msgstr "Trợ giúp" + +msgid "Reading %s... %d" +msgstr "Äang Ä‘á»c %s... %d" + +msgid "Lock count exceeded, remove lock for %s?" +msgstr "Vượt quá tổng số khoá, gỡ bá» khoá cho %s không?" + +msgid "Can't dotlock %s.\n" +msgstr "Không thể khoá chấm %s.\n" + +msgid "Couldn't lock %s\n" +msgstr "Không thể khoá '%s\n" + +msgid "Writing %s..." +msgstr "Äang ghi %s..." + +msgid "Could not synchronize mailbox %s!" +msgstr "• Không thể đồng bá»™ hóa há»™p thÆ° %s. •" + +msgid "Mailbox is unchanged." +msgstr "Há»™p thÆ° chÆ°a thay đổi." + +msgid "%d kept, %d moved, %d deleted." +msgstr "%d đã giữ lại, %d đã di chuyển, %d đã xoá bá»." + +msgid "%d kept, %d deleted." +msgstr "%d đã giữ, %d đã xoá bá»." + +msgid " Press '%s' to toggle write" +msgstr " Nhấn « %s » để bật/tắt ghi" + +msgid "Use 'toggle-write' to re-enable write!" +msgstr "• Dùng « bật/tắt ghi » để bật lại khả năng ghi. •" + +msgid "Mailbox is marked unwritable. %s" +msgstr "Há»™p thÆ° có nhãn không có khả năng ghi. %s" + +msgid "Mailbox is read-only." +msgstr "Há»™p thÆ° là chỉ-Ä‘á»c." + +msgid "Purge %d deleted message?" +msgstr "Tẩy %d thÆ° đã xoá bá» không?" + +msgid "%d kept." +msgstr "%d đã giữ" + +msgid "multipart message has no boundary parameter!" +msgstr "• ThÆ° Ä‘a phần không có tham số ranh giá»›i •" + +msgid "No boundary parameter found! [report this error]" +msgstr "• Không tìm thấy tham số ranh giá»›i! [hãy thông báo lá»—i này] •" + +msgid "%s no longer exists!" +msgstr "• %s không còn tồn tại lại. •" + +msgid "Can't stat %s: %s" +msgstr "Không thể lấy các thông tin vá» %s: %s" + +msgid "%s isn't a regular file." +msgstr "%s không phải là tập tin chuẩn." + +msgid "Output of the delivery process" +msgstr "Kết xuất của tiến trình phát thÆ°." + +msgid "Remote IMAP folder set" +msgstr "Äã lập thÆ° mục IMAP ở xa." + +msgid "Use SS_L (IMAPS)" +msgstr "Dùng SS_L (IMAPS)" + +msgid "Balsa Information" +msgstr "Thông tin Balsa" + +msgid "Oooop! mailbox not found in balsa_app.mailbox nodes?\n" +msgstr "" +"á»i! không tìm thấy há»™p thÆ° trong các nút « balsa_app.mailbox » (há»™p thÆ° ứng " +"dụng balsa)?\n" + +msgid "No value set" +msgstr "ChÆ°a đặt giá trị." + +msgid "Use _APOP Authentication" +msgstr "Dùng cách xác thá»±c _APOP" + +msgid "Use SS_L (pop3s)" +msgstr "Dùng SS_L (pop3s)" + +#: ../glade/property.c:5115 ../pan/filter-edit-ui.c:809 ../pan/gui.c:1163 +#: ../src/mainwin-menu.cc:50 category.c:832 +msgid "New" +msgstr "Má»›i" + +msgid "Delete the current message" +msgstr "Xoá bá» thÆ° hiện thá»i" + +msgid "Undelete the message" +msgstr "Hủy xoá bá» thÆ°" + +#. ../lisp/sawfish/wm/menus.jl +msgid "_Toggle" +msgstr "_Bật/tắt" + +msgid "Co_mmit Current" +msgstr "_Gài vào Ä‘iá»u hiện thá»i" + +msgid "Commit _All" +msgstr "Gài vào _hết" + +msgid "Commit the changes in all mailboxes" +msgstr "Gài vào các thay đổi trong má»i há»™p thÆ°." + +msgid "Edit/Apply _Filters" +msgstr "Sá»­a/Ãp dụng bá»™ _lá»c" + +msgid "Filter the content of the selected mailbox" +msgstr "Lá»c ná»™i dung của há»™p thÆ° được chá»n." + +msgid "" +"Unable to Open Mailbox!\n" +"Please check the mailbox settings." +msgstr "" +"• Không thể mở há»™p thÆ°. •\n" +"Hãy kiểm tra xem thiết lập há»™p thÆ° là đúng." + +msgid "Copyright (C) 1997-2002" +msgstr "Bản quyá»n © năm 1997-2002" + +msgid "You have received 1 new message." +msgstr "Bạn má»›i nhận 1 thÆ° má»›i." + +msgid "External editor command:" +msgstr "Lệnh bá»™ soạn thảo bên ngoại:" + +msgid "Message window title format:" +msgstr "Dạng thức tá»±a cá»­a sổ thÆ° :" + +msgid "Mailbox Colors" +msgstr "Màu sắc há»™p thÆ°" + +msgid "Mailbox with unread messages color" +msgstr "Màu của há»™p thÆ° có thÆ° chÆ°a Ä‘á»c" + +msgid "Delete immediately and irretrievably" +msgstr "Xoá bá» ngay lập tức và hoàn toàn" + +#: ../shell/ev-sidebar-links.c:304 +msgid "Print..." +msgstr "In..." + +msgid "_Reflow Paragraph" +msgstr "Cuá»™n _lại Ä‘oạn văn" + +msgid "R_eflow Message" +msgstr "C_uá»™n lại thÆ°" + +#: ../interfaces/users.glade.in.h:49 +msgid "_Comments" +msgstr "_Ghi chú" + +msgid "_Keywords" +msgstr "Từ _khoá" + +#: ../src/util.c:399 +msgid "English" +msgstr "Tiếng Anh" + +#: ui/bookmarks-editor.glade.h:48 +msgid "UTF-8" +msgstr "UTF-8" + +msgid "_A-J" +msgstr "_A-J" + +msgid "_K-Z" +msgstr "_K-Z" + +#: ../src/header_stuff.c:469 ../glade/gbwidgets/gbaboutdialog.c:106 +#: ../glade/gnome/gnomeabout.c:139 +msgid "Comments:" +msgstr "Ghi chú :" + +#: ../src/blam.glade.h:17 ../xpdf/gpdf-properties-dialog.glade.h:4 +msgid "Keywords:" +msgstr "Từ khoá:" + +msgid "Sorry, no semicolons are allowed in the name!\n" +msgstr "Xin lá»—i, không chấp nhận dấu chấm phẩy trong tên.\n" + +msgid "Cancel this message" +msgstr "Thôi thÆ° này" + +#. ../lisp/sawfish/wm/customize.jl +msgid "Customize" +msgstr "Tùy chỉnh" + +msgid "_File name" +msgstr "T_ên tập tin" + +msgid "Multiple mailboxes named \"%s\"" +msgstr "Có nhiá»u thÆ° có cùng tên « %s »." + +#: ../plug-ins/common/ccanalyze.c:405 +#, c-format +msgid "Filename: %s" +msgstr "Tên tập tin: %s" + +msgid "Use SS_L (imaps)" +msgstr "Dùng SS_L (imaps)" + +msgid "Next part in Message" +msgstr "Phần kế trong thÆ°" + +msgid "Previous part in Message" +msgstr "Phần trÆ°á»›c trong thÆ°" + +msgid "Reflow messages of type `text/plain; format=flowed'" +msgstr "Cuá»™n lại các thÆ° dạng « chữ/thô; dạng thức=đã trôi chảy »" + +msgid "Send message as type `text/plain; format=flowed'" +msgstr "Gởi thÆ° dạng « chữ/thô; dạng thức=đã trôi chảy »" + +#: ../grecord/src/gsr-window.c:1032 ../grecord/src/gsr-window.c:1953 +#: ../src/record.c:187 ../plug-ins/imagemap/imap_settings.c:94 +msgid "Filename:" +msgstr "Tên tập tin:" + +msgid "_Always Queue Sent Mail" +msgstr "_Luôn sắp hàng thÆ° đã gởi" + +msgid "Date: %s\n" +msgstr "Ngày: %s\n" + +msgid "From: %s\n" +msgstr "Từ : %s\n" + +msgid "To: %s\n" +msgstr "Cho : %s\n" + +# Literal: don't translate / NghÄ©a chữ : đừng dịch +msgid "CC: %s\n" +msgstr "CC: %s\n" + +msgid "Delete messages from the trash mailbox" +msgstr "Xoá bá» thÆ° ra há»™p thÆ° Rác" + +msgid "Address _Book Name" +msgstr "T_ên sổ địa chỉ" + +msgid "Connecting with \"%s\"..." +msgstr "Äang kết nối đến « %s »..." + +msgid "Tunnel error talking to %s: %s" +msgstr "Gặp lá»—i Ä‘Æ°á»ng hầm khi nói vá»›i %s: %s" + +msgid "Source mailbox (%s) is readonly. Cannot move message" +msgstr "Há»™p thÆ° nguồn (%s) chỉ cho phép Ä‘á»c nên không thể di chuyển thÆ°." + +msgid "Preview Font" +msgstr "Phông chữ xem thá»­" + +msgid "Preview pane" +msgstr "Khung xem thá»­" + +msgid "Card Name:" +msgstr "Tên thẻ:" + +#: ../addressbook/printing/e-contact-print.glade.h:4 ../pan/gui-notebook.c:57 +msgid "Body" +msgstr "Thân" + +msgid "Selected condition search type:" +msgstr "Kiểu tìm kiếm Ä‘iá»u kiện đã chá»n:" + +#: ../Pyblio/GnomeUI/Search.py:86 +#: ../storage/sunone-add-permission-dialog.glade.h:3 +#: ../storage/sunone-permissions-dialog.c:585 address_gui.c:2705 +#: category.c:421 category.c:844 datebook_gui.c:4376 memo_gui.c:1557 +#: todo_gui.c:2174 Expense/expense.c:1646 KeyRing/keyring.c:1612 +#: app/envelope-box.c:893 app/playlist.c:533 +msgid "Delete" +msgstr "Xoá bá»" + +msgid "Filters may not be correct" +msgstr "Bá»™ lá»c có thể không đúng." + +#: ../shell/rb-statusbar.c:258 app/gui.c:1769 app/gui.c:1806 +msgid "Loading..." +msgstr "Äang tải..." + +msgid "" +"Could not open external query address book %s while trying to parse output " +"from: %s" +msgstr "" +"Không thể mở sổ địa chỉ truy vấn bên ngoài %s trong khi cố phân tách dữ liệu " +"xuất từ : %s" + +msgid "Could not add address to address book %s while trying to execute: %s" +msgstr "Không thể thêm địa chỉ vào sổ địa chỉ %s trong khi cố thá»±c thi: %s" + +msgid "Could not stat ldif address book: %s" +msgstr "Không thể lấy các thông tin vá» sổ địa chỉ LDIF: %s" + +msgid "Could not open LDIF address book %s." +msgstr "Không thể mở sổ địa chỉ LDIF %s." + +msgid "Could not stat vcard address book: %s" +msgstr "Không thể lấy các thông tin vá» sổ địa chỉ vCard: %s" + +msgid "Could not open vCard address book %s." +msgstr "Không thể mở sổ địa chỉ vCard %s." + +msgid "Cannot open vCard address book %s for saving\n" +msgstr "Không thể mở sổ địa chỉ vCard %s để lÆ°u\n" + +msgid "No such address book type: %s" +msgstr "Không có kiểu sổ địa chỉ nhÆ° vậy: %s" + +msgid "Could not create a address book of type %s" +msgstr "Không thể tạo sổ địa chỉ kiểu %s." + +msgid "_Customize..." +msgstr "Tù_y chỉnh..." + +msgid "" +"The attachment pixmap (%s) cannot be used.\n" +" %s" +msgstr "" +"Không thể sá»­ dụng sÆ¡ đồ Ä‘iểm ảnh của đính kèm (%s).\n" +"%s" + +msgid "" +"Default attachment pixmap (attachment.png) cannot be found:\n" +"Your balsa installation is corrupted." +msgstr "" +"Không thể tìm thấy sÆ¡ đồ Ä‘iểm ảnh (pixmap) đính kèm mặc định (attachment." +"png):\n" +"Bản cài đặt Balsa bị há»ng." + +#: ../glade/config1.glade.h:14 ../glade/fields1.glade.h:19 +#: ../widgets/gtk+.xml.in.h:179 ../src/orca/rolenames.py:453 +msgid "Text" +msgstr "Chữ" + +#: ../src/file-manager/fm-icon-view.c:2721 ../widgets/gtk+.xml.in.h:98 +msgid "Icons" +msgstr "Biểu tượng" + +#: src/fe-gtk/menu.c:1411 ../plug-ins/print/gimp_main_window.c:621 +#: ../widgets/gtk+.xml.in.h:15 +msgid "Both" +msgstr "Cả hai" + +msgid "load program" +msgstr "tải chÆ°Æ¡ng trình" + +msgid "save program" +msgstr "lÆ°u chÆ°Æ¡ng trình" + +#: ../src/mlview-attribute-picker.cc:163 address_gui.c:697 alarms.c:226 +#: category.c:420 category.c:871 category.c:911 datebook_gui.c:669 +#: datebook_gui.c:1149 export_gui.c:339 jpilot.c:363 jpilot.c:409 jpilot.c:477 +#: jpilot.c:521 jpilot.c:968 jpilot.c:1869 memo_gui.c:526 password.c:352 +#: restore_gui.c:307 todo_gui.c:661 utils.c:1084 utils.c:1256 +#: KeyRing/keyring.c:1322 src/silc-command-reply.c:787 +#: app/sample-editor.c:1655 app/sample-editor.c:1970 +msgid "OK" +msgstr "Äược" + +msgid "" +"Can not convert %s, falling back to US-ASCII.\n" +"Some characters may be printed incorrectly." +msgstr "" +"Không thể chuyển đổi %s nên quay lại US-ASCII.\n" +"Có thể không in ra đúng má»™t số ký tá»±." + +msgid "" +"Balsa could not find font %s\n" +"Printing is not possible" +msgstr "" +"Không thể tìm thấy phông chữ %s.\n" +"NhÆ° thế thì không in ra được." + +msgid "Reply..." +msgstr "Trả lá»i..." + +msgid "Store Address..." +msgstr "LÆ°u địa chỉ..." + +#: address_gui.c:2711 datebook_gui.c:4382 memo_gui.c:1563 todo_gui.c:2180 +msgid "Undelete" +msgstr "Phục hồi" + +msgid "Address Book Configuration" +msgstr "Cấu hình Sổ địa chỉ" + +#: ../Pyblio/GnomeUI/Config.py:432 +msgid "Update" +msgstr "Cập nhật" + +msgid "Memory allocation error" +msgstr "Lá»—i cấp phát bá»™ nhá»›" + +msgid "All headers" +msgstr "Má»i dòng đầu" + +#: addr2line.c:73 +#, c-format +msgid "Usage: %s [option(s)] [addr(s)]\n" +msgstr "Cách sá»­ dụng: %s [tùy_chá»n...] [địa_chỉ...)]\n" + +#: addr2line.c:74 +#, c-format +msgid " Convert addresses into line number/file name pairs.\n" +msgstr " Chuyển đổi địa chỉ sang cặp số thứ tá»± dòng/tên tập tin.\n" + +#: addr2line.c:75 +#, c-format +msgid "" +" If no addresses are specified on the command line, they will be read from " +"stdin\n" +msgstr "" +"Nếu chÆ°a ghi rõ địa chỉ trên dòng lệnh, sẽ Ä‘á»c chúng từ thiết bị nhập chuẩn\n" + +#: addr2line.c:76 +#, c-format +msgid "" +" The options are:\n" +" -b --target= Set the binary file format\n" +" -e --exe= Set the input file name (default is a.out)\n" +" -s --basenames Strip directory names\n" +" -f --functions Show function names\n" +" -C --demangle[=style] Demangle function names\n" +" -h --help Display this information\n" +" -v --version Display the program's version\n" +"\n" +msgstr "" +" Tùy chá»n:\n" +" -b --target= \tLập khuôn dạng tập tin nhị phân (_đích_)\n" +" -e --exe= \tLập tên tập tin nhập (mặc định là )\n" +"\t\t\t\t\t\t\t\t(_chÆ°Æ¡ng trình chạy được_)\n" +" -s --basenames\t\tTÆ°á»›c các tên thÆ° mục (_các tên cÆ¡ bản_)\n" +" -f --functions \tHiện tên _các chức năng_\n" +" -C --demangle[=kiểu_dáng] \t_Tháo gỡ_ tên chức năng\n" +" -h --help \tHiện thông tin _trợ giúp_ này\n" +" -v --version \tHiện _phiên bản_ của chÆ°Æ¡ng trình\n" +"\n" + +#: sysdump.c:655 windres.c:672 lexsup.c:1547 gprof.c:176 +#, c-format +msgid "Report bugs to %s\n" +msgstr "Hãy thông báo lá»—i nào cho %s\n" + +#: addr2line.c:241 +#, c-format +msgid "%s: can not get addresses from archive" +msgstr "%s: không thể lấy địa chỉ từ kho" + +#: addr2line.c:311 nm.c:1519 objdump.c:2848 +#, c-format +msgid "unknown demangling style `%s'" +msgstr "không biết kiểu dáng tháo gõ « %s »" + +#: ar.c:205 +#, c-format +msgid "no entry %s in archive\n" +msgstr "không có mục nhập %s trong kho\n" + +#: ar.c:221 +#, c-format +msgid "" +"Usage: %s [emulation options] [-]{dmpqrstx}[abcfilNoPsSuvV] [member-name] " +"[count] archive-file file...\n" +msgstr "" +"Cách sá»­ dụng: %s [tùy chá»n mô phá»ng] [-]{dmpqrstx}[abcfilNoPsSuvV] [tên " +"thành viên] [số đếm] tập_tin_kho tập_tin...\n" + +#: ar.c:224 +#, c-format +msgid " %s -M [\n" +"\n" +msgstr "" +"\n" +"<%s>\n" +"\n" + +#: ar.c:806 ar.c:873 +#, c-format +msgid "%s is not a valid archive" +msgstr "%s không phải là má»™t kho hợp lệ" + +#: ar.c:841 +#, c-format +msgid "stat returns negative size for %s" +msgstr "việc stat (lấy các thông tin) trả gởi kích cỡ âm cho %s" + +#: ar.c:1059 +#, c-format +msgid "No member named `%s'\n" +msgstr "Không có thành viên tên « %s »\n" + +#: ar.c:1109 +#, c-format +msgid "no entry %s in archive %s!" +msgstr "không có mục nhập %s trong kho %s." + +#: ar.c:1246 +#, c-format +msgid "%s: no archive map to update" +msgstr "%s: không có ánh xạ kho cần cập nhật" + +#: arsup.c:83 +#, c-format +msgid "No entry %s in archive.\n" +msgstr "Không có mục nhập %s trong kho.\n" + +#: arsup.c:109 +#, c-format +msgid "Can't open file %s\n" +msgstr "Không thể mở tập tin %s\n" + +#: arsup.c:162 +#, c-format +msgid "%s: Can't open output archive %s\n" +msgstr "%s: Không thể mở kho xuất %s\n" + +#: arsup.c:179 +#, c-format +msgid "%s: Can't open input archive %s\n" +msgstr "%s: Không thể mở kho nhập %s\n" + +#: arsup.c:188 +#, c-format +msgid "%s: file %s is not an archive\n" +msgstr "%s: tập tin %s không phải là kho\n" + +#: arsup.c:227 +#, c-format +msgid "%s: no output archive specified yet\n" +msgstr "%s: chÆ°a ghi rõ kho xuất\n" + +#: arsup.c:247 arsup.c:285 arsup.c:327 arsup.c:347 arsup.c:413 +#, c-format +msgid "%s: no open output archive\n" +msgstr "%s: không có kho xuất đã mở\n" + +#: arsup.c:258 arsup.c:368 arsup.c:394 +#, c-format +msgid "%s: can't open file %s\n" +msgstr "%s: không thể mở tập tin %s\n" + +#: arsup.c:312 arsup.c:390 arsup.c:471 +#, c-format +msgid "%s: can't find module file %s\n" +msgstr "%s: không tìm thấy tập tin mô-Ä‘un %s\n" + +#: arsup.c:422 +#, c-format +msgid "Current open archive is %s\n" +msgstr "Kho đã mở hiện thá»i là %s\n" + +#: arsup.c:446 +#, c-format +msgid "%s: no open archive\n" +msgstr "%s: không có kho đã mở\n" + +#: binemul.c:37 +#, c-format +msgid " No emulation specific options\n" +msgstr " Không có tùy chá»n đặc trÆ°ng cho mô phá»ng\n" + +#. Macros for common output. +#: binemul.h:42 +#, c-format +msgid " emulation options: \n" +msgstr " tùy chá»n mô phá»ng:\n" + +#: bucomm.c:109 +#, c-format +msgid "can't set BFD default target to `%s': %s" +msgstr "không thể lập đích mặc định BFD thành « %s »: %s" + +#: bucomm.c:120 +#, c-format +msgid "%s: Matching formats:" +msgstr "%s: khuôn dạng khá»›p:" + +#: bucomm.c:135 +#, c-format +msgid "Supported targets:" +msgstr "Äích há»— trợ :" + +#: bucomm.c:137 lexsup.c:1530 +#, c-format +msgid "%s: supported targets:" +msgstr "%s: đích há»— trợ :" + +#: bucomm.c:153 +#, c-format +msgid "Supported architectures:" +msgstr "Kiến trúc há»— trợ :" + +#: bucomm.c:155 +#, c-format +msgid "%s: supported architectures:" +msgstr "%s: kiến trúc há»— trợ :" + +#: bucomm.c:348 +#, c-format +msgid "BFD header file version %s\n" +msgstr "Phiên bản tập tin đầu BFD %s\n" + +#: bucomm.c:449 +#, c-format +msgid "%s: bad number: %s" +msgstr "%s: số sai: %s" + +#: bucomm.c:466 strings.c:386 +#, c-format +msgid "'%s': No such file" +msgstr "« %s »: không có tập tin nhÆ° vậy" + +#: bucomm.c:468 strings.c:388 +#, c-format +msgid "Warning: could not locate '%s'. reason: %s" +msgstr "Cảnh báo : không thể định vị « %s ». Lý do : %s" + +#: bucomm.c:472 +#, c-format +msgid "Warning: '%s' is not an ordinary file" +msgstr "Cảnh báo : « %s » không phải là má»™t tập tin chuẩn" + +#: coffdump.c:105 +#, c-format +msgid "#lines %d " +msgstr "#dòng %d " + +#: coffdump.c:459 sysdump.c:648 +#, c-format +msgid "Usage: %s [option(s)] in-file\n" +msgstr "Cách sá»­ dụng: %s [tùy_chá»n...] tập_tin_nhập\n" + +#: coffdump.c:460 +#, c-format +msgid " Print a human readable interpretation of a SYSROFF object file\n" +msgstr "" +"In ra lá»i thông dịch cho phép ngÆ°á»i Ä‘á»c của tập tin đối tượng SYSROFF\n" + +#: coffdump.c:461 +#, c-format +msgid "" +" The options are:\n" +" -h --help Display this information\n" +" -v --version Display the program's version\n" +"\n" +msgstr "" +"Tùy chá»n:\n" +" -h, --help hiển thị _trợ giúp_ này\n" +" -v --version hiển thị _phiên bản_ của chÆ°Æ¡ng trình\n" + +#: coffdump.c:527 srconv.c:1819 sysdump.c:710 +msgid "no input file specified" +msgstr "chÆ°a ghi rõ tập tin nhập" + +#: debug.c:648 +msgid "debug_add_to_current_namespace: no current file" +msgstr "" +"debug_add_to_current_namespace: (gỡ lá»—i thêm vào vùng tên hiện có) không có " +"tập tin hiện thá»i" + +#: debug.c:727 +msgid "debug_start_source: no debug_set_filename call" +msgstr "" +"debug_start_source: (gỡ lá»—i bắt đầu nguồn) không có cuá»™c gá»i kiểu « " +"debug_set_filename » (gỡ lá»—i lập tên tập tin)" + +#: debug.c:783 +msgid "debug_record_function: no debug_set_filename call" +msgstr "" +"debug_record_function: (gỡ lá»—i ghi lÆ°u chứa năng) không có cuá»™c gá»i kiểu « " +"debug_set_filename » (gỡ lá»—i lập tên tập tin)" + +#: debug.c:835 +msgid "debug_record_parameter: no current function" +msgstr "" +"debug_record_parameter: (gỡ lá»—i ghi lÆ°u tham số) không có chức năng hiện thá»i" + +#: debug.c:867 +msgid "debug_end_function: no current function" +msgstr "" +"debug_end_function: (gỡ lá»—i kết thúc chức năng) không có chức năng hiện thá»i" + +#: debug.c:873 +msgid "debug_end_function: some blocks were not closed" +msgstr "" +"debug_end_function: (gỡ lá»—i kết thúc chức năng) má»™t số khối chÆ°a được đóng" + +#: debug.c:901 +msgid "debug_start_block: no current block" +msgstr "debug_start_block: (gỡ lá»—i bắt đầu khối) không có khối hiện thá»i" + +#: debug.c:937 +msgid "debug_end_block: no current block" +msgstr "debug_end_block: (gỡ lá»—i kết thúc khối) không có khối hiện thá»i" + +#: debug.c:944 +msgid "debug_end_block: attempt to close top level block" +msgstr "debug_end_block: (gỡ lá»—i kết thúc khối) cố đóng khối cấp đầu" + +#: debug.c:967 +msgid "debug_record_line: no current unit" +msgstr "debug_record_line: (gỡ lá»—i ghi lÆ°u dòng) không có Ä‘Æ¡n vị hiện thá»i" + +#. FIXME +#: debug.c:1020 +msgid "debug_start_common_block: not implemented" +msgstr "debug_start_common_block: not implemented" + +#. FIXME +#: debug.c:1031 +msgid "debug_end_common_block: not implemented" +msgstr "debug_end_common_block: not implemented" + +#. FIXME. +#: debug.c:1115 +msgid "debug_record_label: not implemented" +msgstr "debug_record_label: not implemented" + +#: debug.c:1137 +msgid "debug_record_variable: no current file" +msgstr "" +"debug_record_variable: (gỡ lá»—i ghi lÆ°u biến) không có tập tin hiện thá»ino " +"current file" + +#: debug.c:1665 +msgid "debug_make_undefined_type: unsupported kind" +msgstr "" +"debug_make_undefined_type: (gỡ lá»—i tạo kiểu chÆ°a được định nghÄ©a) kiểu chÆ°a " +"được há»— trợ" + +#: debug.c:1842 +msgid "debug_name_type: no current file" +msgstr "debug_name_type: no current file" + +#: debug.c:1887 +msgid "debug_tag_type: no current file" +msgstr "" +"debug_tag_type: (gỡ lá»—i kiểu thẻ) không có tập tin hiện thá»iLÆ°u tập tin hiện" + +#: debug.c:1895 +msgid "debug_tag_type: extra tag attempted" +msgstr "debug_tag_type: (gỡ lá»—i kiểu thẻ) đã cố thẻ thêm" + +#: debug.c:1932 +#, c-format +msgid "Warning: changing type size from %d to %d\n" +msgstr "Cảnh báo : Ä‘ang thay đổi kích cỡ kiểu từ %d đến %d\n" + +#: debug.c:1954 +msgid "debug_find_named_type: no current compilation unit" +msgstr "" +"debug_find_named_type: (gỡ lá»—i tìm kiểu tên đã cho) không có Ä‘Æ¡n vị biên " +"dịch hiện thá»i" + +#: debug.c:2057 +#, c-format +msgid "debug_get_real_type: circular debug information for %s\n" +msgstr "" +"debug_get_real_type: (gỡ lá»—i lấy kiểu thật) thông tin gỡ lá»—i vòng cho %s\n" + +#: debug.c:2484 +msgid "debug_write_type: illegal type encountered" +msgstr "debug_write_type: (gỡ lá»—i ghi kiểu) gặp kiểu không được phép" + +#: dlltool.c:773 dlltool.c:797 dlltool.c:826 +#, c-format +msgid "Internal error: Unknown machine type: %d" +msgstr "Lá»—i ná»™i bá»™ : không biết kiểu máy: %d" + +#: dlltool.c:862 +#, c-format +msgid "Can't open def file: %s" +msgstr "Không thể mở tập tin định nghÄ©a: %s" + +#: dlltool.c:867 +#, c-format +msgid "Processing def file: %s" +msgstr "Äang xá»­ lý tập tin định nghÄ©a: %s" + +#: dlltool.c:871 +msgid "Processed def file" +msgstr "Äã xá»­ lý tập tin định nghÄ©a" + +#: dlltool.c:895 +#, c-format +msgid "Syntax error in def file %s:%d" +msgstr "Gặp lá»—i cú pháp trong tập tin định nghÄ©a %s:%d" + +#: dlltool.c:930 +#, c-format +msgid "%s: Path components stripped from image name, '%s'." +msgstr "%s: các thành phần Ä‘Æ°á»ng dẫn bị tÆ°á»›c ra tên ảnh, « %s »." + +#: dlltool.c:939 +#, c-format +msgid "NAME: %s base: %x" +msgstr "TÊN: %s cÆ¡ bản: %x" + +#: dlltool.c:942 dlltool.c:958 +msgid "Can't have LIBRARY and NAME" +msgstr "Không cho phép dùng cả THƯ VIÊN lẫn TÊN Ä‘á»u" + +#: dlltool.c:955 +#, c-format +msgid "LIBRARY: %s base: %x" +msgstr "THƯ VIÊN: %s cÆ¡ bản: %x" + +#: dlltool.c:1191 resrc.c:269 +#, c-format +msgid "wait: %s" +msgstr "đợi: %s" + +#: dlltool.c:1196 dllwrap.c:418 resrc.c:274 +#, c-format +msgid "subprocess got fatal signal %d" +msgstr "tiến trình con đã nhận tín hiệu nghiệm trá»ng %d" + +#: dlltool.c:1202 dllwrap.c:425 resrc.c:281 +#, c-format +msgid "%s exited with status %d" +msgstr "%s đã thoát vá»›i trạng thái %d" + +#: dlltool.c:1233 +#, c-format +msgid "Sucking in info from %s section in %s" +msgstr "Äang kéo vào thông tin từ phần %s trong %s..." + +#: dlltool.c:1358 +#, c-format +msgid "Excluding symbol: %s" +msgstr "Äang loại trừ ký hiệu : %s" + +#: dlltool.c:1447 dlltool.c:1458 nm.c:998 nm.c:1009 +#, c-format +msgid "%s: no symbols" +msgstr "%s: không có ký hiệu" + +#. FIXME: we ought to read in and block out the base relocations. +#: dlltool.c:1484 +#, c-format +msgid "Done reading %s" +msgstr "Äã Ä‘á»c xong %s" + +#: dlltool.c:1494 +#, c-format +msgid "Unable to open object file: %s" +msgstr "Không thể mở tập tin đối tượng: %s" + +#: dlltool.c:1497 +#, c-format +msgid "Scanning object file %s" +msgstr "Äang quét tập tin đối tượng %s..." + +#: dlltool.c:1512 +#, c-format +msgid "Cannot produce mcore-elf dll from archive file: %s" +msgstr "Không thể cung cấp « mcore-elf dll » từ tập tin kho: %s" + +#: dlltool.c:1598 +msgid "Adding exports to output file" +msgstr "Äang thêm các việc xuất vào nhóm kết xuất..." + +#: dlltool.c:1646 +msgid "Added exports to output file" +msgstr "Äã thêm các việc xuất vào nhóm kết xuất" + +#: dlltool.c:1767 +#, c-format +msgid "Generating export file: %s" +msgstr "Äang tạo ra tập tin xuất: %s" + +#: dlltool.c:1772 +#, c-format +msgid "Unable to open temporary assembler file: %s" +msgstr "Không thể mở tập tin dịch mã số tạm thá»i: %s" + +#: dlltool.c:1775 +#, c-format +msgid "Opened temporary file: %s" +msgstr "Äã mở tập tin tạm thá»i: %s" + +#: dlltool.c:1997 +msgid "Generated exports file" +msgstr "Äã tạo ra tập tin xuất" + +#: dlltool.c:2203 +#, c-format +msgid "bfd_open failed open stub file: %s" +msgstr "bfd_open không mở được tập tin stub: %s" + +#: dlltool.c:2206 +#, c-format +msgid "Creating stub file: %s" +msgstr "Äang tạo tập tin stub: %s" + +#: dlltool.c:2588 +#, c-format +msgid "failed to open temporary head file: %s" +msgstr "lá»—i mở tập tin đầu tạm: %s" + +#: dlltool.c:2647 +#, c-format +msgid "failed to open temporary tail file: %s" +msgstr "lá»—i mở tập tin Ä‘uôi tạm: %s" + +#: dlltool.c:2714 +#, c-format +msgid "Can't open .lib file: %s" +msgstr "Không thể mở tập tin « .lib » (thÆ° viên): %s" + +#: dlltool.c:2717 +#, c-format +msgid "Creating library file: %s" +msgstr "Äang tạo tập tin thÆ° viên: %s" + +#: dlltool.c:2800 dlltool.c:2806 +#, c-format +msgid "cannot delete %s: %s" +msgstr "không thể xoá bá» %s: %s" + +#: dlltool.c:2811 +msgid "Created lib file" +msgstr "Äã tạo tập tin thÆ° viên" + +#: dlltool.c:2904 +#, c-format +msgid "Warning, ignoring duplicate EXPORT %s %d,%d" +msgstr "Cảnh báo, Ä‘ang bá» qua XUẤT trùng %s %d,%d" + +#: dlltool.c:2910 +#, c-format +msgid "Error, duplicate EXPORT with oridinals: %s" +msgstr "Lá»—i: XUẤT trùng vá»›i Ä‘iá»u thứ tá»± : %s" + +#: dlltool.c:3026 +msgid "Processing definitions" +msgstr "Äang xá»­ lý các lá»i định nghÄ©a..." + +#: dlltool.c:3058 +msgid "Processed definitions" +msgstr "Äã xá»­ lý các lá»i định nghÄ©a" + +#. xgetext:c-format +#: dlltool.c:3065 dllwrap.c:479 +#, c-format +msgid "Usage %s \n" +msgstr "Cách sá»­ dụng %s \n" + +#. xgetext:c-format +#: dlltool.c:3067 +#, c-format +msgid "" +" -m --machine Create as DLL for . [default: %s]\n" +msgstr " -m --machine Tạo dạng DLL cho . [mặc định: %s]\n" + +#: dlltool.c:3068 +#, c-format +msgid "" +" possible : arm[_interwork], i386, mcore[-elf]{-le|-be}, " +"ppc, thumb\n" +msgstr "" +" có thể: arm[_interwork], i386, mcore[-elf]{-le|-be}, ppc, " +"thumb\n" + +#: dlltool.c:3069 +#, c-format +msgid " -e --output-exp Generate an export file.\n" +msgstr " -e --output-exp \tTạo ra tập tin _xuất_.\n" + +#: dlltool.c:3070 +#, c-format +msgid " -l --output-lib Generate an interface library.\n" +msgstr " -l --output-lib \tTạo _ra thÆ° viên_ giao diện.\n" + +#: dlltool.c:3071 +#, c-format +msgid " -a --add-indirect Add dll indirects to export file.\n" +msgstr "" +" -a --add-indirect _Thêm lá»i gián tiếp_dạng dll vào tập tin xuất\n" + +#: dlltool.c:3072 +#, c-format +msgid "" +" -D --dllname Name of input dll to put into interface lib.\n" +msgstr "" +" -D --dllname _Tên dll_ nhập cần để vào thÆ° viên giao diện.\n" + +#: dlltool.c:3073 +#, c-format +msgid " -d --input-def Name of .def file to be read in.\n" +msgstr "" +" -d --input-def \tTên tập tin _định nghÄ©a_ cần Ä‘á»c _vào_.\n" + +#: dlltool.c:3074 +#, c-format +msgid " -z --output-def Name of .def file to be created.\n" +msgstr "" +" -z --output-def Tên tập tin _định nghÄ©a_ cần tạo (_ra_).\n" + +#: dlltool.c:3075 +#, c-format +msgid " --export-all-symbols Export all symbols to .def\n" +msgstr "" +" --export-all-symbols Tá»± Ä‘á»™ng _xuất má»i ký hiệu_ vào tập tin định nghÄ©a\n" + +#: dlltool.c:3076 +#, c-format +msgid " --no-export-all-symbols Only export listed symbols\n" +msgstr "" +" --no-export-all-symbols \tXuất chỉ những ký hiệu đã liệt kê\n" +"\t\t\t\t\t\t\t\t(_không xuất má»i ký hiệu_)\n" + +#: dlltool.c:3077 +#, c-format +msgid " --exclude-symbols Don't export \n" +msgstr "" +" --exclude-symbols Äừng xuất danh sách này\n" +"\t\t\t\t\t\t\t\t(_loại trừ ký hiệu_)\n" + +#: dlltool.c:3078 +#, c-format +msgid " --no-default-excludes Clear default exclude symbols\n" +msgstr "" +" --no-default-excludes Xoá các ký hiệu cần loại trừ theo mặc định\n" +"\t\t\t\t\t\t\t\t(không loại trừ mặc định)\n" + +#: dlltool.c:3079 +#, c-format +msgid " -b --base-file Read linker generated base file.\n" +msgstr "" +" -b --base-file Äá»c _tập tin cÆ¡ bản_ do bá»™ liên kết tạo " +"ra.\n" + +#: dlltool.c:3080 +#, c-format +msgid " -x --no-idata4 Don't generate idata$4 section.\n" +msgstr " -x --no-idata4 Äừng tạo ra phần « idata$4 ».\n" + +#: dlltool.c:3081 +#, c-format +msgid " -c --no-idata5 Don't generate idata$5 section.\n" +msgstr " -c --no-idata5 Äừng tạo ra phần « idata$5 ».\n" + +#: dlltool.c:3082 +#, c-format +msgid "" +" -U --add-underscore Add underscores to symbols in interface " +"library.\n" +msgstr "" +" -U --add-underscore \t_Thêm dấu gạch dÆ°á»›i_ vào\n" +"\t\t\t\t\t\tcác ký hiệu trong thÆ° viên giao diện.\n" + +#: dlltool.c:3083 +#, c-format +msgid " -k --kill-at Kill @ from exported names.\n" +msgstr "" +" -k --kill-at Xoá bỠ« @ » ra các tên đã xuất.\n" +"\t\t\t\t\t\t(_buá»™c kết thúc tại_)\n" + +#: dlltool.c:3084 +#, c-format +msgid " -A --add-stdcall-alias Add aliases without @.\n" +msgstr "" +" -A --add-stdcall-alias \tThêm biệt hiệu không có « @ ».\n" +"\t\t\t\t\t\t(_thêm biệt hiệu gá»i chuẩn_)\n" + +#: dlltool.c:3085 +#, c-format +msgid " -p --ext-prefix-alias Add aliases with .\n" +msgstr "" +" -p --ext-prefix-alias \tThêm các biệt hiệu có tiá»n tố này.\n" +"\t\t\t\t\t\t(_biệt hiệu tiá»n tố thêm_)\n" + +#: dlltool.c:3086 +#, c-format +msgid " -S --as Use for assembler.\n" +msgstr "" +" -S --as \tDùng tên này cho chÆ°Æ¡ng trình dịch mã số.\n" + +#: dlltool.c:3087 +#, c-format +msgid " -f --as-flags Pass to the assembler.\n" +msgstr "" +" -f --as-flags Gởi các cá» này qua cho chÆ°Æ¡ng trình dịch mã " +"số.\n" +"\t\t\t\t\t\t\t(_dạng cá»_)\n" + +#: dlltool.c:3088 +#, c-format +msgid "" +" -C --compat-implib Create backward compatible import library.\n" +msgstr "" +" -C --compat-implib \tTạo _thÆ° viên nhập tÆ°Æ¡ng thích_ ngược.\n" + +#: dlltool.c:3089 +#, c-format +msgid "" +" -n --no-delete Keep temp files (repeat for extra " +"preservation).\n" +msgstr "" +" -n --no-delete \t\tGiữ lại các tập tin tạm thá»i (lặp lại để bảo tồn " +"thêm)\n" +"\t\t\t\t\t\t(_không xoá bá»_)\n" + +#: dlltool.c:3090 +#, c-format +msgid "" +" -t --temp-prefix Use to construct temp file names.\n" +msgstr "" +" -t --temp-prefix \tDùng _tiá»n tố_ này để tạo tên tập tin _tạm_.\n" + +#: dlltool.c:3091 +#, c-format +msgid " -v --verbose Be verbose.\n" +msgstr " -v --verbose Xuất _chi tiết_.\n" + +#: dlltool.c:3092 +#, c-format +msgid " -V --version Display the program version.\n" +msgstr " -V --version \tHiển thị phiên bản chÆ°Æ¡ng trình.\n" + +#: dlltool.c:3093 +#, c-format +msgid " -h --help Display this information.\n" +msgstr " -h --help \tHiển thị _trợ giúp_ này.\n" + +#: dlltool.c:3095 +#, c-format +msgid "" +" -M --mcore-elf Process mcore-elf object files into .\n" +msgstr "" +" -M --mcore-elf \n" +"\t\tXá»­ lý các tập tin đối tượng kiểu « mcore-elf » vào tập tin tên này.\n" + +#: dlltool.c:3096 +#, c-format +msgid " -L --linker Use as the linker.\n" +msgstr " -L --linker \t\tDùng tên này là _bá»™ liên kết_.\n" + +#: dlltool.c:3097 +#, c-format +msgid " -F --linker-flags Pass to the linker.\n" +msgstr "" +" -F --linker-flags \tGởi _các cá»_ này qua cho _bá»™ liên kết_.\n" + +#: dlltool.c:3211 +#, c-format +msgid "Path components stripped from dllname, '%s'." +msgstr "Các thành phần Ä‘Æ°á»ng dẫn bị tÆ°á»›c ra tên dll, « %s »." + +#: dlltool.c:3256 +#, c-format +msgid "Unable to open base-file: %s" +msgstr "Không thể mở tập tin cÆ¡ sở: %s" + +#: dlltool.c:3288 +#, c-format +msgid "Machine '%s' not supported" +msgstr "Không há»— trợ máy « %s »" + +#: dlltool.c:3392 dllwrap.c:209 +#, c-format +msgid "Tried file: %s" +msgstr "Äã thá»­ tập tin: %s" + +#: dlltool.c:3399 dllwrap.c:216 +#, c-format +msgid "Using file: %s" +msgstr "Äang dùng tập tin: %s" + +#: dllwrap.c:299 +#, c-format +msgid "Keeping temporary base file %s" +msgstr "Äang giữ tập tin cÆ¡ bản tạm thá»i %s" + +#: dllwrap.c:301 +#, c-format +msgid "Deleting temporary base file %s" +msgstr "Äang xoá bá» tập tin cÆ¡ bản tạm thá»i %s..." + +#: dllwrap.c:315 +#, c-format +msgid "Keeping temporary exp file %s" +msgstr "Äang giữ tập tin xuất tạm thá»i %s" + +#: dllwrap.c:317 +#, c-format +msgid "Deleting temporary exp file %s" +msgstr "Äang xoá bá» tập tin xuất tạm thá»i %s..." + +#: dllwrap.c:330 +#, c-format +msgid "Keeping temporary def file %s" +msgstr "Äang giữ tập tin định nghÄ©a tạm thá»i %s" + +#: dllwrap.c:332 +#, c-format +msgid "Deleting temporary def file %s" +msgstr "Äang xoá bá» tập tin định nghÄ©a tạm thá»i %s..." + +#: dllwrap.c:480 +#, c-format +msgid " Generic options:\n" +msgstr " Tùy chá»n chung:\n" + +#: dllwrap.c:481 +#, c-format +msgid " --quiet, -q Work quietly\n" +msgstr " --quiet, -q Không xuất chi tiết (_im_)\n" + +#: dllwrap.c:482 +#, c-format +msgid " --verbose, -v Verbose\n" +msgstr " --verbose, -v Xuất _chi tiết_\n" + +#: dllwrap.c:483 +#, c-format +msgid " --version Print dllwrap version\n" +msgstr " --version In ra phiên bản dllwrap\n" + +#: dllwrap.c:484 +#, c-format +msgid " --implib Synonym for --output-lib\n" +msgstr " --implib Bằng « --output-lib »\n" + +#: dllwrap.c:485 +#, c-format +msgid " Options for %s:\n" +msgstr " Tùy chá»n cho %s:\n" + +#: dllwrap.c:486 +#, c-format +msgid " --driver-name Defaults to \"gcc\"\n" +msgstr "" +" --driver-name \t Mặc định là « gcc »\n" +"\t\t\t\t\t\t\t\t(_tên trình Ä‘iá»u khiển_)\n" + +#: dllwrap.c:487 +#, c-format +msgid " --driver-flags Override default ld flags\n" +msgstr "" +" --driver-flags \t\tCó quyá»n cao hÆ¡n các cá» ld mặc định\n" +"\t\t\t\t\t\t\t\t(_các cá» trình Ä‘iá»u khiển_)\n" + +#: dllwrap.c:488 +#, c-format +msgid " --dlltool-name Defaults to \"dlltool\"\n" +msgstr "" +" --dlltool-name \t\tMặc định là « dlltool »\n" +"\t\t\t\t\t\t\t\t(_tên công cụ dlltool_)\n" + +#: dllwrap.c:489 +#, c-format +msgid " --entry Specify alternate DLL entry point\n" +msgstr " --entry <Ä‘iểm_vào> \t\tGhi rõ Ä‘iểm _vào_ DLL xen kẽ\n" + +#: dllwrap.c:490 +#, c-format +msgid " --image-base Specify image base address\n" +msgstr " --image-base \tGhi rõ địa chỉ _cÆ¡ bản ảnh_\n" + +#: dllwrap.c:491 +#, c-format +msgid " --target i386-cygwin32 or i386-mingw32\n" +msgstr " --target i386-cygwin32 hay i386-mingw32\n" + +#: dllwrap.c:492 +#, c-format +msgid " --dry-run Show what needs to be run\n" +msgstr "" +" --dry-run \tHiển thị các Ä‘iá»u cần chạy (_chạy thá»±c hành_)\n" + +#: dllwrap.c:493 +#, c-format +msgid " --mno-cygwin Create Mingw DLL\n" +msgstr " --mno-cygwin \tTạo DLL dạng Mingw\n" + +#: dllwrap.c:494 +#, c-format +msgid " Options passed to DLLTOOL:\n" +msgstr " Các tùy chá»n được gởi qua cho DLLTOOL:\n" + +#: dllwrap.c:495 +#, c-format +msgid " --machine \n" +msgstr " --machine \n" + +#: dllwrap.c:496 +#, c-format +msgid " --output-exp Generate export file.\n" +msgstr " --output-exp \t\tTạo ra tập tin _xuất_.\n" + +#: dllwrap.c:497 +#, c-format +msgid " --output-lib Generate input library.\n" +msgstr " --output-lib \t\tTạo _ra thÆ° viên_ nhập.\n" + +#: dllwrap.c:498 +#, c-format +msgid " --add-indirect Add dll indirects to export file.\n" +msgstr "" +" --add-indirect \t\t_Thêm_ các lá»i _gián tiếp_ vào tập tin xuất.\n" + +#: dllwrap.c:499 +#, c-format +msgid " --dllname Name of input dll to put into output lib.\n" +msgstr "" +" --dllname \t\t_Tên dll_ nhập cần để vào thÆ° viên xuất.\n" + +#: dllwrap.c:500 +#, c-format +msgid " --def Name input .def file\n" +msgstr " --def \tTên tập tin _định nghÄ©a_ nhập\n" + +#: dllwrap.c:501 +#, c-format +msgid " --output-def Name output .def file\n" +msgstr " --output-def \tTên _tập tin định nghÄ©a xuất_\n" + +#: dllwrap.c:502 +#, c-format +msgid " --export-all-symbols Export all symbols to .def\n" +msgstr "" +" --export-all-symbols _Xuất má»i ký hiệu_ vào tập tin định nghÄ©a\n" + +#: dllwrap.c:503 +#, c-format +msgid " --no-export-all-symbols Only export .drectve symbols\n" +msgstr "" +" --no-export-all-symbols \tXuất chỉ ký hiệu kiểu « .drectve ».\n" +"\t\t\t\t\t\t\t\t(_không xuất má»i ký hiệu_)\n" + +#: dllwrap.c:504 +#, c-format +msgid " --exclude-symbols Exclude from .def\n" +msgstr "" +" --exclude-symbols \n" +"\t\t\t\t\tLoại trừ danh sách này ra tập tin định nghÄ©a.\n" +"\t\t\t\t\t\t\t\t(_loại trừ các ký hiệu_)\n" + +#: dllwrap.c:505 +#, c-format +msgid " --no-default-excludes Zap default exclude symbols\n" +msgstr "" +" --no-default-excludes \t\tSá»­a má»i ký hiệu loại trừ mặc định.\n" +"\t\t\t\t\t\t\t\t(_không loại trừ mặc định_)\n" + +#: dllwrap.c:506 +#, c-format +msgid " --base-file Read linker generated base file\n" +msgstr "" +" --base-file Äá»c _tập tin cÆ¡ bản_ do bá»™ liên kết tạo " +"ra.\n" + +#: dllwrap.c:507 +#, c-format +msgid " --no-idata4 Don't generate idata$4 section\n" +msgstr " --no-idata4 Äừng tạo ra phần « idata$4 ».\n" + +#: dllwrap.c:508 +#, c-format +msgid " --no-idata5 Don't generate idata$5 section\n" +msgstr " --no-idata5 Äừng tạo ra phần « idata$5 ».\n" + +#: dllwrap.c:509 +#, c-format +msgid " -U Add underscores to .lib\n" +msgstr " -U Thêm dấu gạch _dÆ°á»›i_ vào thÆ° viên\n" + +#: dllwrap.c:510 +#, c-format +msgid " -k Kill @ from exported names\n" +msgstr "" +" -k Xoá bỠ« @ » ra các tên đã xuất\n" +"\t\t\t\t\t(_buá»™c kết thúc_)\n" + +#: dllwrap.c:511 +#, c-format +msgid " --add-stdcall-alias Add aliases without @\n" +msgstr "" +" --add-stdcall-alias \tThêm biệt hiệu không có « @ ».\n" +"\t\t\t\t\t\t\t(_thêm biệt hiệu gá»i chuẩn_)\n" + +#: dllwrap.c:512 +#, c-format +msgid " --as Use for assembler\n" +msgstr "" +" --as Dùng tên này cho chÆ°Æ¡ng trình dịch mã số (_dạng_)\n" + +#: dllwrap.c:513 +#, c-format +msgid " --nodelete Keep temp files.\n" +msgstr " --nodelete Giữ các tập tin tạm (_không xoá bá»_)\n" + +#: dllwrap.c:514 +#, c-format +msgid " Rest are passed unmodified to the language driver\n" +msgstr "" +" Các Ä‘iá»u còn lại được gởi dạng chÆ°a được sá»­a đổi qua cho trình Ä‘iá»u khiển " +"ngôn ngữ\n" + +#: dllwrap.c:784 +msgid "Must provide at least one of -o or --dllname options" +msgstr "Phải cung cấp ít nhất má»™t của hai tùy chá»n « -o » hay « -dllname »" + +#: dllwrap.c:813 +msgid "" +"no export definition file provided.\n" +"Creating one, but that may not be what you want" +msgstr "" +"chÆ°a cung cấp tập tin định nghÄ©a xuất.\n" +"Äang tạo má»™t Ä‘iá»u, mà có lẽ không phải là Ä‘iá»u bạn muốn" + +#: dllwrap.c:972 +#, c-format +msgid "DLLTOOL name : %s\n" +msgstr "Tên công cụ DLLTOOL : %s\n" + +#: dllwrap.c:973 +#, c-format +msgid "DLLTOOL options : %s\n" +msgstr "Tùy chá»n DLLTOOL: %s\n" + +#: dllwrap.c:974 +#, c-format +msgid "DRIVER name : %s\n" +msgstr "Tên TRÃŒNH ÄIỀU KHIỀN : %s\n" + +#: dllwrap.c:975 +#, c-format +msgid "DRIVER options : %s\n" +msgstr "Tùy chá»n TRÃŒNH ÄIỀU KHIỂN : %s\n" + +#: emul_aix.c:51 +#, c-format +msgid " [-g] - 32 bit small archive\n" +msgstr " [-g] • kho nhá» 32-bit\n" + +#: emul_aix.c:52 +#, c-format +msgid " [-X32] - ignores 64 bit objects\n" +msgstr " [-X32] • bá» qua các đối tượng kiểu 64 bit\n" + +#: emul_aix.c:53 +#, c-format +msgid " [-X64] - ignores 32 bit objects\n" +msgstr " [-X64] • bá» qua các đối tượng kiểu 32 bit\n" + +#: emul_aix.c:54 +#, c-format +msgid " [-X32_64] - accepts 32 and 64 bit objects\n" +msgstr "" +" [-X32_64] • chấp nhận các đối tượng kiểu cả 32 bit lẫn 64 bit Ä‘á»u\n" + +#: ieee.c:311 +msgid "unexpected end of debugging information" +msgstr "gặp kết thúc thông tin gỡ lá»—i bất ngá»" + +#: ieee.c:398 +msgid "invalid number" +msgstr "số không hợp lệ" + +#: ieee.c:451 +msgid "invalid string length" +msgstr "Ä‘á»™ dài chuá»—i không hợp lệ" + +#: ieee.c:506 ieee.c:547 +msgid "expression stack overflow" +msgstr "trán đống biểu thức" + +#: ieee.c:526 +msgid "unsupported IEEE expression operator" +msgstr "toán tá»­ biểu thức IEE không được há»— trợ" + +#: ieee.c:541 +msgid "unknown section" +msgstr "không biết phần" + +#: ieee.c:562 +msgid "expression stack underflow" +msgstr "trán ngược đống biểu thức" + +#: ieee.c:576 +msgid "expression stack mismatch" +msgstr "đống biểu thức không khá»›p vá»›i nhau" + +#: ieee.c:613 +msgid "unknown builtin type" +msgstr "không biết kiểu builtin" + +#: ieee.c:758 +msgid "BCD float type not supported" +msgstr "Kiểu nổi BDC không được há»— trợ" + +#: ieee.c:895 +msgid "unexpected number" +msgstr "số bất ngá»" + +#: ieee.c:902 +msgid "unexpected record type" +msgstr "kiểu mục ghi bất ngá»" + +#: ieee.c:935 +msgid "blocks left on stack at end" +msgstr "có má»™t số khối còn lại trên đống khi kết thúc" + +#: ieee.c:1198 +msgid "unknown BB type" +msgstr "không biết kiểu BB" + +#: ieee.c:1207 lib/c-stack.c:245 +msgid "stack overflow" +msgstr "trán đống" + +#: ieee.c:1230 +msgid "stack underflow" +msgstr "trán ngược đống" + +#: ieee.c:1342 ieee.c:1412 ieee.c:2109 +msgid "illegal variable index" +msgstr "chỉ mục biến không được phép" + +#: ieee.c:1390 +msgid "illegal type index" +msgstr "chỉ mục kiểu không được phép" + +#: ieee.c:1400 ieee.c:1437 +msgid "unknown TY code" +msgstr "không biết mã TY" + +#: ieee.c:1419 +msgid "undefined variable in TY" +msgstr "gặp biến chÆ°a được định nghÄ©a trong TY" + +#. Pascal file name. FIXME. +#: ieee.c:1830 +msgid "Pascal file name not supported" +msgstr "ChÆ°a há»— trợ tên tập tin kiểu Pascal" + +#: ieee.c:1878 +msgid "unsupported qualifier" +msgstr "bá»™ dè dặt chÆ°a được há»— trợ" + +#: ieee.c:2147 +msgid "undefined variable in ATN" +msgstr "gặp biến chÆ°a định nghÄ©a trong ATN" + +#: ieee.c:2190 +msgid "unknown ATN type" +msgstr "không biết kiểu ATN" + +#. Reserved for FORTRAN common. +#: ieee.c:2312 +msgid "unsupported ATN11" +msgstr "ATN11 không được há»— trÆ¡" + +#. We have no way to record this information. FIXME. +#: ieee.c:2339 +msgid "unsupported ATN12" +msgstr "ATN12 không được há»— trÆ¡" + +#: ieee.c:2399 +msgid "unexpected string in C++ misc" +msgstr "gặp chuá»—i không được há»— trÆ¡ trong C++ lặt vặt" + +#: ieee.c:2412 +msgid "bad misc record" +msgstr "mục ghi lặt vặt sai" + +#: ieee.c:2453 +msgid "unrecognized C++ misc record" +msgstr "không chấp nhận mục ghi C++ lặt vặt" + +#: ieee.c:2568 +msgid "undefined C++ object" +msgstr "đối tượng C++ chÆ°a được định nghÄ©a" + +#: ieee.c:2602 +msgid "unrecognized C++ object spec" +msgstr "chÆ°a chấp nhận đặc tả đối tượng C++" + +#: ieee.c:2638 +msgid "unsupported C++ object type" +msgstr "kiểu đối tượng C++ chÆ°a được há»— trợ" + +#: ieee.c:2648 +msgid "C++ base class not defined" +msgstr "chÆ°a định nghÄ©a hạng cÆ¡ bản C++" + +#: ieee.c:2660 ieee.c:2765 +msgid "C++ object has no fields" +msgstr "Äối tượng C++ không có trÆ°á»ng nào" + +#: ieee.c:2679 +msgid "C++ base class not found in container" +msgstr "Không tìm thấy hạng cÆ¡ bản C++ trong bá»™ chứa" + +#: ieee.c:2786 +msgid "C++ data member not found in container" +msgstr "Không tìm thấy bá»™ phạn dữ liệu C++ trong bá»™ chứa" + +#: ieee.c:2827 ieee.c:2977 +msgid "unknown C++ visibility" +msgstr "không biết Ä‘á»™ thấy rõ C++" + +#: ieee.c:2861 +msgid "bad C++ field bit pos or size" +msgstr "vị trí bit hay kích cỡ trÆ°á»ng C++ sai" + +#: ieee.c:2953 +msgid "bad type for C++ method function" +msgstr "kiểu sai cho hàm phÆ°Æ¡ng pháp C++" + +#: ieee.c:2963 +msgid "no type information for C++ method function" +msgstr "không có thông tin kiểu cho hàm phÆ°Æ¡ng pháp C++" + +#: ieee.c:3002 +msgid "C++ static virtual method" +msgstr "phÆ°Æ¡ng pháp ảo tÄ©nh C++" + +#: ieee.c:3097 +msgid "unrecognized C++ object overhead spec" +msgstr "chÆ°a chấp nhận đặc tả duy tu đối tượng C++" + +#: ieee.c:3136 +msgid "undefined C++ vtable" +msgstr "chÆ°a định nghÄ©a vtable C++" + +#: ieee.c:3205 +msgid "C++ default values not in a function" +msgstr "Giá trị C++ mặc định không phải trong hàm" + +#: ieee.c:3245 +msgid "unrecognized C++ default type" +msgstr "chÆ°a chấp nhận kiểu C++ mặc định" + +#: ieee.c:3276 +msgid "reference parameter is not a pointer" +msgstr "tham số tham chiếu không phải là con trá»" + +#: ieee.c:3359 +msgid "unrecognized C++ reference type" +msgstr "chÆ°a chấp nhận kiểu tham chiếu C++" + +#: ieee.c:3441 +msgid "C++ reference not found" +msgstr "Không tìm thấy tham chiếu C++" + +#: ieee.c:3449 +msgid "C++ reference is not pointer" +msgstr "Tham chiếu C++ không phải là con trá»" + +#: ieee.c:3475 ieee.c:3483 +msgid "missing required ASN" +msgstr "thiếu ASN cần thiết" + +#: ieee.c:3510 ieee.c:3518 +msgid "missing required ATN65" +msgstr "thiếu ATN65 cần thiết" + +#: ieee.c:3532 +msgid "bad ATN65 record" +msgstr "mục ghi ATN65 sai" + +#: ieee.c:4160 +#, c-format +msgid "IEEE numeric overflow: 0x" +msgstr "trán thuá»™c số IEEE: 0x" + +#: ieee.c:4204 +#, c-format +msgid "IEEE string length overflow: %u\n" +msgstr "Trán Ä‘á»™ dài chuá»—i IEEE: %u\n" + +#: ieee.c:5203 +#, c-format +msgid "IEEE unsupported integer type size %u\n" +msgstr "Kích cỡ kiểu số nguyên không được há»— trợ IEEE %u\n" + +#: ieee.c:5237 +#, c-format +msgid "IEEE unsupported float type size %u\n" +msgstr "Kích cỡ kiểu nổi không được há»— trợ IEEE %u\n" + +#: ieee.c:5271 +#, c-format +msgid "IEEE unsupported complex type size %u\n" +msgstr "Kích cỡ kiểu phức tạp không được há»— trợ IEEE %u\n" + +#: nlmconv.c:267 srconv.c:1810 +msgid "input and output files must be different" +msgstr "tập tin nhập và xuất phải là khác nhau" + +#: nlmconv.c:314 +msgid "input file named both on command line and with INPUT" +msgstr "tên tập tin được lập cả trên dòng lệnh lẫn bằng INPUT Ä‘á»u" + +#: nlmconv.c:323 +msgid "no input file" +msgstr "không có tập tin nhập nào" + +#: nlmconv.c:353 +msgid "no name for output file" +msgstr "không có tên cho tập tin nhập" + +#: nlmconv.c:367 +msgid "warning: input and output formats are not compatible" +msgstr "cảnh báo : khuôn dạng nhập và xuất không tÆ°Æ¡ng thích vá»›i nhau" + +#: nlmconv.c:396 +msgid "make .bss section" +msgstr "tạo phần « .bss »" + +#: nlmconv.c:405 +msgid "make .nlmsections section" +msgstr "tạo phần « .nlmsections »" + +#: nlmconv.c:407 +msgid "set .nlmsections flags" +msgstr "đặt các cỠ« .nlmsections »" + +#: nlmconv.c:435 +msgid "set .bss vma" +msgstr "đặt vma .bss" + +#: nlmconv.c:442 +msgid "set .data size" +msgstr "đặt kích cỡ dữ liệu .data" + +#: nlmconv.c:622 +#, c-format +msgid "warning: symbol %s imported but not in import list" +msgstr "cảnh báo : ký hiệu %s được nhập mà không phải trong danh sách nhập" + +#: nlmconv.c:642 +msgid "set start address" +msgstr "đặt địa chỉ bắt đầu" + +#: nlmconv.c:691 +#, c-format +msgid "warning: START procedure %s not defined" +msgstr "cảnh báo : thủ tục START (bắt đầu) %s chÆ°a được định nghÄ©a" + +#: nlmconv.c:693 +#, c-format +msgid "warning: EXIT procedure %s not defined" +msgstr "cảnh báo : thủ tục EXIT (thoát) %s chÆ°a được định nghÄ©a" + +#: nlmconv.c:695 +#, c-format +msgid "warning: CHECK procedure %s not defined" +msgstr "cảnh báo : thủ tục CHECK (kiểm tra) %s chÆ°a được định nghÄ©a" + +#: nlmconv.c:716 nlmconv.c:905 +msgid "custom section" +msgstr "phần tá»± chá»n" + +#: nlmconv.c:737 nlmconv.c:934 +msgid "help section" +msgstr "phần trợ giúp" + +#: nlmconv.c:759 nlmconv.c:952 +msgid "message section" +msgstr "phần thông Ä‘iệp" + +#: nlmconv.c:775 nlmconv.c:985 +msgid "module section" +msgstr "phần mô-Ä‘un" + +#: nlmconv.c:795 nlmconv.c:1001 +msgid "rpc section" +msgstr "phần rpc" + +#. There is no place to record this information. +#: nlmconv.c:831 +#, c-format +msgid "%s: warning: shared libraries can not have uninitialized data" +msgstr "" +"%s: cảnh báo : thÆ° viên dùng chung không thể chứa dữ liệu chÆ°a được sở khởi" + +#: nlmconv.c:852 nlmconv.c:1020 +msgid "shared section" +msgstr "phần dùng chung" + +#: nlmconv.c:860 +msgid "warning: No version number given" +msgstr "cảnh báo : chÆ°a Ä‘Æ°a ra số thứ tá»± phiên bản" + +#: nlmconv.c:900 nlmconv.c:929 nlmconv.c:947 nlmconv.c:996 nlmconv.c:1015 +#, c-format +msgid "%s: read: %s" +msgstr "%s: Ä‘á»c: %s" + +#: nlmconv.c:922 +msgid "warning: FULLMAP is not supported; try ld -M" +msgstr "cảnh báo : chÆ°a há»— trợ FULLMAP; hãy thá»­ « ld -M »" + +#: nlmconv.c:1098 +#, c-format +msgid "Usage: %s [option(s)] [in-file [out-file]]\n" +msgstr "Cách sá»­ dụng: %s [tùy_chá»n...] [tập_tin_nhập [tập_tin_xuất]]\n" + +#: nlmconv.c:1099 +#, c-format +msgid " Convert an object file into a NetWare Loadable Module\n" +msgstr "" +" Chuyển đổi tập tin đối tượng sang Mô-Ä‘un Tải được NetWare (NetWare Loadable " +"Module)\n" + +#: nlmconv.c:1100 +#, c-format +msgid "" +" The options are:\n" +" -I --input-target= Set the input binary file format\n" +" -O --output-target= Set the output binary file format\n" +" -T --header-file= Read for NLM header information\n" +" -l --linker= Use for any linking\n" +" -d --debug Display on stderr the linker command line\n" +" -h --help Display this information\n" +" -v --version Display the program's version\n" +msgstr "" +" Tùy chá»n:\n" +" -I --input-target= \t\tLập dạng thức tập tin nhị phân nhập\n" +"\t\t\t\t\t\t\t\t (_đích nhập_)\n" +" -O --output-target= \tLập dạng thức tập tin nhị phân xuất\n" +"\t\t\t\t\t\t\t\t (_đích xuất_)\n" +" -T --header-file=\n" +"\t\tÄá»c tập tin này để tìm thông tin phần đầu NLM (_tập tin phần đầu_)\n" +" -l --linker= \tDùng _bá»™ liên kết_ này khi liên kết\n" +" -d --debug\n" +"\tHiển thị trên thiết bị lá»—i chuẩn dòng lệnh của bá»™ liên kết (_gỡ lá»—i_)\n" +" -h --help \t\t\tHiển thị _trợ giúp_ này\n" +" -v --version \t\t\tHiển thị _phiên bản_ chÆ°Æ¡ng trình\n" + +#: nlmconv.c:1140 +#, c-format +msgid "support not compiled in for %s" +msgstr "chÆ°a biên dịch cách há»— trợ %s" + +#: nlmconv.c:1177 +msgid "make section" +msgstr "tạo phần" + +#: nlmconv.c:1191 +msgid "set section size" +msgstr "lập kích cỡ phần" + +#: nlmconv.c:1197 +msgid "set section alignment" +msgstr "lập canh lá» phần" + +#: nlmconv.c:1201 +msgid "set section flags" +msgstr "lập các cá» phân" + +#: nlmconv.c:1212 +msgid "set .nlmsections size" +msgstr "lập kích cỡ « .nlmsections »" + +#: nlmconv.c:1293 nlmconv.c:1301 nlmconv.c:1310 nlmconv.c:1315 +msgid "set .nlmsection contents" +msgstr "lập ná»™i dung « .nlmsections »" + +#: nlmconv.c:1794 +msgid "stub section sizes" +msgstr "kích cỡ phần stub" + +#: nlmconv.c:1841 +msgid "writing stub" +msgstr "Ä‘ang ghi stub..." + +#: nlmconv.c:1925 +#, c-format +msgid "unresolved PC relative reloc against %s" +msgstr "có việc định vị lại liên quan đến PC chÆ°a tháo gỡ đối vá»›i %s" + +#: nlmconv.c:1989 +#, c-format +msgid "overflow when adjusting relocation against %s" +msgstr "trán khi Ä‘iá»u chỉnh việc định vị lại đối vá»›i %s" + +#: nlmconv.c:2116 +#, c-format +msgid "%s: execution of %s failed: " +msgstr "%s: việc thá»±c hiện %s bị lá»—i:" + +#: nlmconv.c:2131 ../applet/pilot.c:1095 +#, c-format +msgid "Execution of %s failed" +msgstr "Việc thá»±c hiện %s bị lá»—i" + +#: nm.c:224 size.c:80 strings.c:651 +#, c-format +msgid "Usage: %s [option(s)] [file(s)]\n" +msgstr "Cách sá»­ dụng: %s [tùy_chá»n...] [tập_tin...]\n" + +#: nm.c:225 +#, c-format +msgid " List symbols in [file(s)] (a.out by default).\n" +msgstr " Liệt kê các ký hiệu trong những tập tin này (mặc định là ).\n" + +#: nm.c:226 +#, c-format +msgid "" +" The options are:\n" +" -a, --debug-syms Display debugger-only symbols\n" +" -A, --print-file-name Print name of the input file before every symbol\n" +" -B Same as --format=bsd\n" +" -C, --demangle[=STYLE] Decode low-level symbol names into user-level " +"names\n" +" The STYLE, if specified, can be `auto' (the " +"default),\n" +" `gnu', `lucid', `arm', `hp', `edg', `gnu-v3', " +"`java'\n" +" or `gnat'\n" +" --no-demangle Do not demangle low-level symbol names\n" +" -D, --dynamic Display dynamic symbols instead of normal symbols\n" +" --defined-only Display only defined symbols\n" +" -e (ignored)\n" +" -f, --format=FORMAT Use the output format FORMAT. FORMAT can be " +"`bsd',\n" +" `sysv' or `posix'. The default is `bsd'\n" +" -g, --extern-only Display only external symbols\n" +" -l, --line-numbers Use debugging information to find a filename and\n" +" line number for each symbol\n" +" -n, --numeric-sort Sort symbols numerically by address\n" +" -o Same as -A\n" +" -p, --no-sort Do not sort the symbols\n" +" -P, --portability Same as --format=posix\n" +" -r, --reverse-sort Reverse the sense of the sort\n" +" -S, --print-size Print size of defined symbols\n" +" -s, --print-armap Include index for symbols from archive members\n" +" --size-sort Sort symbols by size\n" +" --special-syms Include special symbols in the output\n" +" --synthetic Display synthetic symbols as well\n" +" -t, --radix=RADIX Use RADIX for printing symbol values\n" +" --target=BFDNAME Specify the target object format as BFDNAME\n" +" -u, --undefined-only Display only undefined symbols\n" +" -X 32_64 (ignored)\n" +" -h, --help Display this information\n" +" -V, --version Display this program's version number\n" +"\n" +msgstr "" +" Tùy chá»n:\n" +" -a, --debug-syms \tHiển thị _ký hiệu_ chỉ kiểu bá»™ _gỡ lá»—i_ thôi\n" +" -A, --print-file-name \t_In ra tên tập tin_ nhập trÆ°á»›c má»—i ký hiệu\n" +" -B \t\t\tBằng « --format=bsd »\n" +" -C, --demangle[=KIỂU_DÃNG]\n" +"\tGiải mã các tên ký hiệu cấp thấp thành tên cấp ngÆ°á»i dùng (_tháo gỡ_)\n" +" Kiểu dáng này, nếu được ghi rõ, có thể là « auto » (tá»± Ä‘á»™ng: mặc " +"định)\n" +"\t« gnu », « lucid », « arm », « hp », « edg », « gnu-v3 », « java » hay « " +"gnat ».\n" +" --no-demangle \t\t_Äừng tháo gỡ_ tên ký hiệu cấp thấp\n" +" -D, --dynamic \t\tHiển thị ký hiệu _Ä‘á»™ng_ thay vào ký hiệu chuẩn\n" +" --defined-only \t\tHiển thị _chỉ_ ký hiệu _được định nghÄ©a_\n" +" -e \t\t\t(bị bá» qua)\n" +" -f, --format=DẠNG_THỨC \tDùng _dạng thức_ xuất này, má»™t của\n" +"\t\t\t\t\t\t\t« bsd » (mặc định), « sysv » hay « posix »\n" +" -g, --extern-only \t\tHiển thị _chỉ_ ký hiệu _bên ngoài_\n" +" -l, --line-numbers \t\tDùng thông tin gỡ lá»—i để tìm tên tập tin\n" +"\t\t\t\t\t\tvà _số thứ tá»± dòng_ cho má»—i ký hiệu\n" +" -n, --numeric-sort \t\t_Sắp xếp_ ký hiệu má»™t cách _thuá»™c số_ theo địa " +"chỉ\n" +" -o \t\t\tBằng « -A »\n" +" -p, --no-sort \t\t_Äừng sắp xếp_ ký hiệu\n" +" -P, --portability \t\tBằng « --format=posix »\n" +" -r, --reverse-sort \t\t_Sắp xếp ngược_\n" +" -S, --print-size \t\tIn ra kích cỡ của các ký hiệu được định nghÄ©a\n" +" -s, --print-armap \t\tGồm chỉ mục cho ký hiệu từ bá»™ phạn kho\n" +" --size-sort \t\t_Sắp xếp_ ký hiệu theo _kích cỡ_\n" +" --special-syms \t\tGồm _ký hiệu đặc biệt_ trong dữ liệu xuất\n" +" --synthetic \t\tCÅ©ng hiển thị ký hiệu _tổng hợp_\n" +" -t, --radix=CÆ _SỞ \tDùng _cÆ¡ sở_ này để in ra giá trị ký hiệu\n" +" --target=TÊN_BFD \tGhi rõ dạng thức đối tượng _đích_ là tên BFD này\n" +" -u, --undefined-only \tHiển thị _chỉ_ ký hiệu _chÆ°a được định nghÄ©a_\n" +" -X 32_64 \t\t(bị bá» qua)\n" +" -h, --help \t\tHiển thị _trợ giúp_ này\n" +" -V, --version \t\tHiển thị số thứ tá»± _phiên bản_ của chÆ°Æ¡ng trình " +"này\n" +"\n" + +#: nm.c:262 objdump.c:232 lib/argp-help.c:1653 lib/argp-help.c:1652 +#, c-format +msgid "Report bugs to %s.\n" +msgstr "Hãy thông báo lá»—i nào cho %s.\n" + +#: nm.c:294 +#, c-format +msgid "%s: invalid radix" +msgstr "%s: cÆ¡ sở không hợp lệ" + +#: nm.c:318 +#, c-format +msgid "%s: invalid output format" +msgstr "%s: dạng thức xuất không hợp lệ" + +#: nm.c:339 readelf.c:6342 readelf.c:6378 +#, c-format +msgid ": %d" +msgstr "<đặc trÆ°ng cho bá»™ xá»­ lý>: %d" + +#: nm.c:341 readelf.c:6345 readelf.c:6390 +#, c-format +msgid ": %d" +msgstr "<đặc trÆ°ng cho hệ Ä‘iá»u hành>: %d" + +#: nm.c:343 readelf.c:6347 readelf.c:6393 +#, c-format +msgid ": %d" +msgstr ": %d" + +#: nm.c:380 +#, c-format +msgid "" +"\n" +"Archive index:\n" +msgstr "" +"\n" +"Chỉ mục kho:\n" + +#: nm.c:1225 +#, c-format +msgid "" +"\n" +"\n" +"Undefined symbols from %s:\n" +"\n" +msgstr "" +"\n" +"\n" +"Ký hiệu chÆ°a được định nghÄ©a từ %s:\n" +"\n" + +#: nm.c:1227 +#, c-format +msgid "" +"\n" +"\n" +"Symbols from %s:\n" +"\n" +msgstr "" +"\n" +"\n" +"Ký hiệu từ %s:\n" +"\n" + +#: nm.c:1229 nm.c:1280 +#, c-format +msgid "" +"Name Value Class Type Size Line " +"Section\n" +"\n" +msgstr "" +"Tên Giá trị Hạng Kiểu Cỡ Dòng Phần\n" +"\n" + +#: nm.c:1232 nm.c:1283 +#, c-format +msgid "" +"Name Value Class Type " +"Size Line Section\n" +"\n" +msgstr "" +"Name Value Class Type " +"Size Line Section\n" +"\n" + +#: nm.c:1276 +#, c-format +msgid "" +"\n" +"\n" +"Undefined symbols from %s[%s]:\n" +"\n" +msgstr "" +"\n" +"\n" +"Ký hiệu chÆ°a được định nghÄ©a từUndefined symbols from %s[%s]:\n" +"\n" + +#: nm.c:1278 +#, c-format +msgid "" +"\n" +"\n" +"Symbols from %s[%s]:\n" +"\n" +msgstr "" +"\n" +"\n" +"Ký hiệu từ %s[%s]:\n" +"\n" + +#: nm.c:1580 +msgid "Only -X 32_64 is supported" +msgstr "Chỉ há»— trợ « -X 32_64 »" + +#: nm.c:1600 +msgid "Using the --size-sort and --undefined-only options together" +msgstr "Dùng tùy chá»n cả « --size-sort » lẫn « --undefined-only » Ä‘á»u" + +#: nm.c:1601 +msgid "will produce no output, since undefined symbols have no size." +msgstr "" +"sẽ không xuất gì, vì ký hiệu chÆ°a được định nghÄ©a không có kích cỡ nào." + +#: nm.c:1629 +#, c-format +msgid "data size %ld" +msgstr "cỡ dữ liệu %ld" + +#: objcopy.c:396 srconv.c:1721 +#, c-format +msgid "Usage: %s [option(s)] in-file [out-file]\n" +msgstr "Cách sá»­ dụng: %s [tùy_chá»n...] tập_tin_nhập [tập_tin_xuất]\n" + +#: objcopy.c:397 +#, c-format +msgid " Copies a binary file, possibly transforming it in the process\n" +msgstr " Sao chép tập tin nhị phân, cÅ©ng có thể chuyển đổi nó\n" + +#: objcopy.c:398 objcopy.c:487 +#, c-format +msgid " The options are:\n" +msgstr " Tùy chá»n:\n" + +#: objcopy.c:399 +#, c-format +msgid "" +" -I --input-target Assume input file is in format \n" +" -O --output-target Create an output file in format " +"\n" +" -B --binary-architecture Set arch of output file, when input is " +"binary\n" +" -F --target Set both input and output format to " +"\n" +" --debugging Convert debugging information, if " +"possible\n" +" -p --preserve-dates Copy modified/access timestamps to the " +"output\n" +" -j --only-section Only copy section into the output\n" +" --add-gnu-debuglink= Add section .gnu_debuglink linking to " +"\n" +" -R --remove-section Remove section from the output\n" +" -S --strip-all Remove all symbol and relocation " +"information\n" +" -g --strip-debug Remove all debugging symbols & sections\n" +" --strip-unneeded Remove all symbols not needed by " +"relocations\n" +" -N --strip-symbol Do not copy symbol \n" +" --strip-unneeded-symbol \n" +" Do not copy symbol unless needed " +"by\n" +" relocations\n" +" --only-keep-debug Strip everything but the debug " +"information\n" +" -K --keep-symbol Only copy symbol \n" +" -L --localize-symbol Force symbol to be marked as a " +"local\n" +" -G --keep-global-symbol Localize all symbols except \n" +" -W --weaken-symbol Force symbol to be marked as a " +"weak\n" +" --weaken Force all global symbols to be marked as " +"weak\n" +" -w --wildcard Permit wildcard in symbol comparison\n" +" -x --discard-all Remove all non-global symbols\n" +" -X --discard-locals Remove any compiler-generated symbols\n" +" -i --interleave Only copy one out of every " +"bytes\n" +" -b --byte Select byte in every interleaved " +"block\n" +" --gap-fill Fill gaps between sections with \n" +" --pad-to Pad the last section up to address " +"\n" +" --set-start Set the start address to \n" +" {--change-start|--adjust-start} \n" +" Add to the start address\n" +" {--change-addresses|--adjust-vma} \n" +" Add to LMA, VMA and start " +"addresses\n" +" {--change-section-address|--adjust-section-vma} {=|+|-}\n" +" Change LMA and VMA of section by " +"\n" +" --change-section-lma {=|+|-}\n" +" Change the LMA of section by " +"\n" +" --change-section-vma {=|+|-}\n" +" Change the VMA of section by " +"\n" +" {--[no-]change-warnings|--[no-]adjust-warnings}\n" +" Warn if a named section does not exist\n" +" --set-section-flags =\n" +" Set section 's properties to " +"\n" +" --add-section = Add section found in to " +"output\n" +" --rename-section =[,] Rename section to \n" +" --change-leading-char Force output format's leading character " +"style\n" +" --remove-leading-char Remove leading character from global " +"symbols\n" +" --redefine-sym = Redefine symbol name to \n" +" --redefine-syms --redefine-sym for all symbol pairs \n" +" listed in \n" +" --srec-len Restrict the length of generated " +"Srecords\n" +" --srec-forceS3 Restrict the type of generated Srecords " +"to S3\n" +" --strip-symbols -N for all symbols listed in \n" +" --strip-unneeded-symbols \n" +" --strip-unneeded-symbol for all symbols " +"listed\n" +" in \n" +" --keep-symbols -K for all symbols listed in \n" +" --localize-symbols -L for all symbols listed in \n" +" --keep-global-symbols -G for all symbols listed in \n" +" --weaken-symbols -W for all symbols listed in \n" +" --alt-machine-code Use alternate machine code for output\n" +" --writable-text Mark the output text as writable\n" +" --readonly-text Make the output text write protected\n" +" --pure Mark the output file as demand paged\n" +" --impure Mark the output file as impure\n" +" --prefix-symbols Add to start of every symbol " +"name\n" +" --prefix-sections Add to start of every section " +"name\n" +" --prefix-alloc-sections \n" +" Add to start of every " +"allocatable\n" +" section name\n" +" -v --verbose List all object files modified\n" +" -V --version Display this program's version number\n" +" -h --help Display this output\n" +" --info List object formats & architectures " +"supported\n" +msgstr "" +" -I --input-target \t\tGiả sá»­ tập tin nhập có dạng \n" +"\t\t\t\t\t\t\t\t (_đích nhập_)\n" +" -O --output-target \tTạo tập tin dạng \n" +"\t\t\t\t\t\t\t\t (_đích xuất_)\n" +" -B --binary-architecture \n" +"\t\t\tLập _kiến trúc_ của tập tin xuất, khi tập tin nhập là _nhị phân_\n" +" -F --target \n" +"\t\t\tLập dạng thức cả nhập lẫn xuất Ä‘á»u thành (_đích_)\n" +" --debugging \t\t\tChuyển đổi thông tin _gỡ lá»—i_, nếu " +"có thể\n" +" -p --preserve-dates\n" +"\tSao chép nhãn thá»i gian truy cập/sá»­a đổi vào kết xuất (_bảo tồn các " +"ngày_)\n" +" -j --only-section \t_Chỉ_ sao chép _phần_ vào kết " +"xuất\n" +" --add-gnu-debuglink=\n" +"\t\t_Thêm_ khả năng liên kết phần « .gnu_debuglink » vào \n" +" -R --remove-section \t_Gỡ bá» phần_ ra kết xuất\n" +" -S --strip-all \t\t\tGỡ bá» má»i thông tin ký hiệu và định " +"vị lại\n" +"\t\t\t\t\t\t\t\t (_tÆ°á»›c hết_)\n" +" -g --strip-debug \t\tGỡ bá» má»i ký hiệu và phần kiểu gỡ " +"lá»—i\n" +"\t\t\t\t\t\t\t\t (_tÆ°á»›c gỡ lá»—i_)\n" +" --strip-unneeded \tGỡ bá» má»i ký hiệu không cần thiết để định vị " +"lại\n" +"\t\t\t\t\t\t\t\t (_tÆ°á»›c không cần thiết_)\n" +" -N --strip-symbol \t\t Äừng sao chép ký hiệu \n" +"\t\t\t\t\t\t\t\t (_tÆ°á»›c ký hiệu_)\n" +" --strip-unneeded-symbol \n" +"\tÄừng sao chép ký hiệu trừ cần thiết để định vị lại (_tÆ°á»›c không cần " +"thiết_)\n" +" --only-keep-debug\t\t\t\tTÆ°á»›c hết, trừ thông tin gỡ lá»—i\n" +"\t\t\t\t\t\t\t\t (_chỉ giữ gỡ lá»—i_)\n" +" -K --keep-symbol \tChỉ sao chép ký hiệu \n" +"\t\t\t\t\t\t\t\t (_giữ ký hiệu_)\n" +" -L --localize-symbol \n" +"\t\t\t\tBuá»™c ký hiệu có nhãn Ä‘iá»u cục bá»™ (_địa phÆ°Æ¡ng hóa_)\n" +" -G --keep-global-symbol \tÄịa phÆ°Æ¡ng hóa má»i ký hiệu trừ \n" +"\t\t\t\t\t\t\t\t (_giữ ký hiệu toàn cục_)\n" +" -W --weaken-symbol \tBuá»™c ký hiệu có nhãn Ä‘iá»u " +"yếu\n" +"\t\t\t\t\t\t\t\t (_làm yếu ký hiệu_)\n" +" --weaken \t\tBuá»™c má»i ký hiệu toàn cục có nhãn " +"Ä‘iá»u yếu\n" +"\t\t\t\t\t\t\t\t (_làm yếu Ä‘i_)\n" +" -w --wildcard \t\tCho phép _ký tá»± đại diện_ trong sá»± so sánh ký " +"hiệu\n" +" -x --discard-all \t\t\tGỡ bá» má»i ký hiệu không toàn cục\n" +"\t\t\t\t\t\t\t\t (_hủy hết_)\n" +" -X --discard-locals Gỡ bá» ký hiệu nào được tạo ra bởi bá»™ biên " +"dịch\n" +"\t\t\t\t\t\t\t\t (_hủy các Ä‘iá»u cục bá»™_)\n" +" -i --interleave \t\t\tChỉ sao chép má»™t của má»—i byte\n" +"\t\t\t\t\t\t\t\t (_chen vào_)\n" +" -b --byte \n" +"\t\t\t\tChá»n byte số thứ tá»± trong má»—i khối tin đã chen vào\n" +" --gap-fill \t_Äiá»n vào khe_ giữa hai phần bằng " +"\n" +" --pad-to <địa_chỉ>\t\t_Äệm_ phần cuối cùng _đế_n địa chỉ <địa_chỉ>\n" +" --set-start <địa_chỉ> \t\t_Lập_ địa chỉ _đầu_ thành " +"<địa_chỉ>\n" +" {--change-start|--adjust-start} \n" +"\t\tThêm vào địa chỉ đầu (_thay đổi đầu, Ä‘iá»u chỉnh đầu_)\n" +" {--change-addresses|--adjust-vma} \n" +" \t\t\t\t\t\t\tThêm vào địa chỉ đầu, LMA và VMA\n" +"\t\t\t\t\t\t\t (_thay đổi địa chỉ, Ä‘iá»u chỉnh vma_)\n" +" {--change-section-address|--adjust-section-vma} {=|+|-}\n" +"\t\t\t\t\tThay đổi LMA và VMA của phần bằng \n" +"\t\t\t\t\t\t(_thay đổi địa chỉ phần, Ä‘iá»u chỉnh vma phần_)\n" +" --change-section-lma {=|+|-}\n" +" \tThay đổi LMA của phần bằng (_thay đổi LMA của phần_)\n" +" --change-section-vma {=|+|-}\n" +" \tThay đổi VMA của phần bằng (_thay đổi VMA của phần_)\n" +" {--[no-]change-warnings|--[no-]adjust-warnings}\n" +" \t\t\t\t\t\t\t\tCảnh báo nếu không có phần có tên\n" +"\t\t(_[không] thay đổi các cảnh báo, [không] Ä‘iá»u chỉnh các cảnh báo_)\n" +" --set-section-flags =\n" +" \t\tLập thuá»™c tính của phần thành " +"\n" +"\t\t\t\t\t\t\t\t (_lập các cá» phần_)\n" +" --add-section =\n" +"\t\t\t\t_Thêm phần_ được tìm trong vào kết xuất\n" +" --rename-section =[,]\n" +"\t\t\t\t\t\t\t\t_Thay đổi phần_ thành \n" +" --change-leading-char\n" +"\t\t\t\t\tBuá»™c kiểu dáng của ký tá»± Ä‘i trÆ°á»›c của dạng thức xuất\n" +"\t\t\t\t\t\t\t\t (_thay đổi ký tá»± Ä‘i trÆ°á»›c_)\n" +" --remove-leading-char\t\t_Gỡ bá» ký tá»± Ä‘i trÆ°á»›c_ ra các ký hiệu toàn " +"cục\n" +" --redefine-sym =\n" +"\t\t\t\t\t\t_Äịnh nghÄ©a lại_ tên _ký hiệu_ thành \n" +" --redefine-syms \n" +"\t\t« --redefine-sym » cho má»i cặp ký hiệu được liệt kê trong \n" +" --srec-len \t\tGiá»›i hạn _Ä‘á»™ dài_ của các Srecords đã tạo " +"ra\n" +" --srec-forceS3 \tGiá»›i hạn kiểu Srecords thành S3 " +"(_buá»™c_)\n" +" --strip-symbols \n" +"\t« -N » cho má»i ký hiệu được liệt kê trong (_tÆ°á»›c các ký hiệu_)\n" +" --strip-unneeded-symbols \n" +" \t\t\t\t\t\t\t« --strip-unneeded-symbol » cho má»i ký hiệu\n" +"\t\t\t\t\t\t\t\tđược liệt kê trong \n" +" --keep-symbols \n" +"\t\t\t\t\t« -K » cho má»i ký hiệu được liệt kê trong \n" +"\t\t\t\t\t\t\t\t (_giữ các ký hiệu_)\n" +" --localize-symbols \n" +"\t\t\t\t\t« -L » cho má»i ký hiệu được liệt kê trong \n" +"\t\t\t\t\t\t\t\t (_địa phÆ°Æ¡ng hóa các ký hiệu_)\n" +" --keep-global-symbols \n" +"\t\t\t\t\t« -G » cho má»i ký hiệu được liệt kê trong \n" +"\t\t\t\t\t\t\t\t (_giữ các ký hiệu toàn cục_)\n" +" --weaken-symbols \n" +"\t\t\t\t\t« -W » cho má»i ký hiệu được liệt kê trong \n" +"\t\t\t\t\t\t\t\t (_làm yếu các ký hiệu_)\n" +" --alt-machine-code Dùng _mã máy xen kẽ_ cho kết xuất\n" +" --writable-text \t\tÄánh dấu _văn bản_ xuất _có khả năng " +"ghi_\n" +" --readonly-text \tLàm cho vân bản xuất được bảo vậ chống " +"ghi\n" +"\t\t\t\t\t\t\t\t (_văn bản chỉ có khả năng Ä‘á»c_)\n" +" --pure\n" +"\t\t\tÄánh dấu tập tin xuất sẽ có trang được sắp xếp theo yêu cầu\n" +"\t\t\t\t\t\t\t\t (_tinh khiết_)\n" +" --impure \t\tÄánh dấu tập tin xuất _không tinh " +"khiết_\n" +" --prefix-symbols \n" +"\t\tThêm vào đầu của má»i tên ký hiệu (_tiá»n tố các ký hiệu_)\n" +" --prefix-sections \n" +"\t\tThêm vào đầu của má»i tên phần (_tiá»n tố các phần_)\n" +" --prefix-alloc-sections \n" +"\t\tThêm vào đầu của má»i tên phần có thể cấp phát\n" +"\t\t\t\t\t\t\t\t(_tiá»n tố các phần có thể cấp phát_)\n" +" -v --verbose \t\tLiệt kê má»i tập tin đối tượng đã được " +"sá»­a đổi\n" +"\t\t\t\t\t\t\t\t (_chi tiết_)\n" +" -V --version Hiển thị số thứ tá»± _phiên bản_ của chÆ°Æ¡ng " +"trình này\n" +" -h --help \t\t\tHiển thị _trợ giúp_ này\n" +" --info \t\tLiệt kê các dạng thức và kiến trúc " +"được há»— trợ\n" +"\t\t\t\t\t\t\t\t (_thông tin_)\n" + +#: objcopy.c:485 +#, c-format +msgid "Usage: %s in-file(s)\n" +msgstr "Cách sá»­ dụng: %s tập_tin_nhập...\n" + +#: objcopy.c:486 +#, c-format +msgid " Removes symbols and sections from files\n" +msgstr " Gỡ bá» ký hiệu và phần ra tập tin\n" + +#: objcopy.c:488 +#, c-format +msgid "" +" -I --input-target= Assume input file is in format \n" +" -O --output-target= Create an output file in format " +"\n" +" -F --target= Set both input and output format to " +"\n" +" -p --preserve-dates Copy modified/access timestamps to the " +"output\n" +" -R --remove-section= Remove section from the output\n" +" -s --strip-all Remove all symbol and relocation " +"information\n" +" -g -S -d --strip-debug Remove all debugging symbols & sections\n" +" --strip-unneeded Remove all symbols not needed by " +"relocations\n" +" --only-keep-debug Strip everything but the debug " +"information\n" +" -N --strip-symbol= Do not copy symbol \n" +" -K --keep-symbol= Only copy symbol \n" +" -w --wildcard Permit wildcard in symbol comparison\n" +" -x --discard-all Remove all non-global symbols\n" +" -X --discard-locals Remove any compiler-generated symbols\n" +" -v --verbose List all object files modified\n" +" -V --version Display this program's version number\n" +" -h --help Display this output\n" +" --info List object formats & architectures " +"supported\n" +" -o Place stripped output into \n" +msgstr "" +" -I --input-target= Giả sá»­ tập tin nhập có dạng thức " +"\n" +"\t\t(đích nhập)\n" +" -O --output-target= Tạo má»™t tập tin xuất có dạng thức \n" +"\t\t(đích xuất)\n" +" -F --target= Äặt dạng thức cả nhập lẫn xuất Ä‘á»u thành " +"\n" +"\t\t(đích)\n" +" -p --preserve-dates\n" +"\t\tSao chép các nhãn thá»i gian truy cập/đã sá»­a đổi vào kết xuất\n" +"\t\t(bảo tồn các ngày)\n" +" -R --remove-section= \t_Gỡ bá» phần_ ra dữ liệu xuất\n" +" -s --strip-all \t\tGỡ bá» má»i thông tin kiểu ký hiệu và " +"định vị lại\n" +"\t\t(tÆ°á»›c hết)\n" +" -g -S -d --strip-debug \tGỡ bá» má»i ký hiệu và phần kiểu gỡ lá»—i\n" +"\t\t(tÆ°á»›c gỡ lá»—i)\n" +" --strip-unneeded Gỡ bá» má»i ký hiệu không cần thiết khi " +"định vị lại\n" +"\t\t(tÆ°á»›c không cần thiết)\n" +" --only-keep-debug \tTÆ°á»›c hết, trừ thông tin gỡ lá»—i\n" +"\t\t(chỉ giữ gỡ lá»—i)\n" +" -N --strip-symbol= \tÄừng sao chép ký hiệu \n" +"\t\t(tÆ°á»›c ký hiệu)\n" +" -K --keep-symbol= \tSao chép chỉ ký hiệu \n" +"\t\t(giữ ký hiệu)\n" +" -w --wildcard Cho phép _ký tá»± đại diện_ trong chuá»—i so sánh " +"ký hiệu\n" +" -x --discard-all \t\tGỡ bá» má»i ký hiệu không toàn cục\n" +"\t\t(hủy hết)\n" +" -X --discard-locals \tGỡ bo ký hiệu nào do bá»™ biên dịch tạo " +"ra\n" +"\t\t(hủy các Ä‘iá»u cục bá»™)\n" +" -v --verbose \t\tLiệt kê má»i tập tin đối tượng đã sá»­a " +"đổi\n" +"\t\t(chi tiết)\n" +" -V --version Hiển thị số thứ tá»± _phiên bản_ của chÆ°Æ¡ng " +"trình này\n" +" -h --help \t\tHiển thị _trợ giúp_ này\n" +" --info Liệt kê các dạng thức đối tượng và kiến trúc " +"được há»— trợ\n" +"\t\t(thông tin) -o \tÄể kết _xuất_ đã " +"tÆ°á»›ng vào \n" + +#: objcopy.c:560 +#, c-format +msgid "unrecognized section flag `%s'" +msgstr "không nhận ra cá» phần « %s »" + +#: objcopy.c:561 +#, c-format +msgid "supported flags: %s" +msgstr "các cỠđã há»— trợ : %s" + +#: objcopy.c:638 +#, c-format +msgid "cannot open '%s': %s" +msgstr "không thể mở « %s »: %s" + +#: objcopy.c:641 objcopy.c:2629 +#, c-format +msgid "%s: fread failed" +msgstr "%s: việc fread (Ä‘á»c f) bị lá»—i" + +#: objcopy.c:714 +#, c-format +msgid "%s:%d: Ignoring rubbish found on this line" +msgstr "%s:%d: Äang bá» qua rác được gặp trên dòng này" + +#: objcopy.c:976 +#, c-format +msgid "%s: Multiple redefinition of symbol \"%s\"" +msgstr "%s: Ký hiệu « %s » đã được định nghÄ©a lại nhiá»u lần" + +#: objcopy.c:980 +#, c-format +msgid "%s: Symbol \"%s\" is target of more than one redefinition" +msgstr "%s: Ký hiệu « %s » là đích của nhiá»u lá»i định nghÄ©a lại" + +#: objcopy.c:1008 +#, c-format +msgid "couldn't open symbol redefinition file %s (error: %s)" +msgstr "không thể mở tập tin định nghÄ©a lại ký hiệu %s (lá»—i: %s)" + +#: objcopy.c:1086 +#, c-format +msgid "%s:%d: garbage found at end of line" +msgstr "%s:%d: gặp rác tại kết thúc dòng" + +#: objcopy.c:1089 +#, c-format +msgid "%s:%d: missing new symbol name" +msgstr "%s:%d: thiếu tên ký hiệu má»›i" + +#: objcopy.c:1099 +#, c-format +msgid "%s:%d: premature end of file" +msgstr "%s:%d: gặp kết thúc tập tin quá sá»›m" + +#: objcopy.c:1124 +msgid "Unable to change endianness of input file(s)" +msgstr "Không thể thay đổi tính trạng cuối (endian) của (các) tập tin nhập" + +#: objcopy.c:1133 +#, c-format +msgid "copy from %s(%s) to %s(%s)\n" +msgstr "chép từ %s(%s) đến %s(%s)\n" + +#: objcopy.c:1170 +#, c-format +msgid "Unable to recognise the format of the input file %s" +msgstr "Không thể nhận diện dạng thức của tập tin nhập %s" + +#: objcopy.c:1174 +#, c-format +msgid "Warning: Output file cannot represent architecture %s" +msgstr "Cảnh báo : Tập tin xuất không thể tiêu biểu kiến trúc %s" + +#: objcopy.c:1211 +#, c-format +msgid "can't create section `%s': %s" +msgstr "không thể tạo phần « %s »: %s" + +#: objcopy.c:1277 +msgid "there are no sections to be copied!" +msgstr "• Không có phần cần sao chép. •" + +#: objcopy.c:1323 +#, c-format +msgid "Can't fill gap after %s: %s" +msgstr "Không thể Ä‘iá»n vào khe sau : %s: %s" + +#: objcopy.c:1348 +#, c-format +msgid "Can't add padding to %s: %s" +msgstr "Không thể thêm đệm vào %s: %s" + +#: objcopy.c:1514 +#, c-format +msgid "%s: error copying private BFD data: %s" +msgstr "%s: gặp lá»—i khi sao chép dữ liệu BFD riêng : %s" + +#: objcopy.c:1525 +msgid "unknown alternate machine code, ignored" +msgstr "không biết mã máy xen kẽ nên bá» qua nó" + +#: objcopy.c:1555 objcopy.c:1585 +#, c-format +msgid "cannot mkdir %s for archive copying (error: %s)" +msgstr "không thể mkdir (tạo thÆ° mục) %s để sao chép kho (lá»—i: %s)" + +#: objcopy.c:1790 +#, c-format +msgid "Multiple renames of section %s" +msgstr "Äã thay đổi tên phần %s nhiá»u lần" + +#: objcopy.c:1841 +msgid "private header data" +msgstr "dữ liệu dòng đầu riêng" + +#: objcopy.c:1849 +#, c-format +msgid "%s: error in %s: %s" +msgstr "%s: lá»—i trong %s: %s" + +#: objcopy.c:1903 +msgid "making" +msgstr "làm" + +#: objcopy.c:1912 src/main/extractor.c:87 +msgid "size" +msgstr "cỡ" + +#: objcopy.c:1926 +msgid "vma" +msgstr "vma" + +#: objcopy.c:1951 lexsup.c:1101 +msgid "alignment" +msgstr "canh lá»" + +#: objcopy.c:1966 lib/report.c:103 lib/report.c:415 +msgid "flags" +msgstr "cá»" + +#: objcopy.c:1988 +msgid "private data" +msgstr "dữ liệu riêng" + +#: objcopy.c:1996 +#, c-format +msgid "%s: section `%s': error in %s: %s" +msgstr "%s: phần « %s »: lá»—i trong %s: %s" + +#: objcopy.c:2274 +#, c-format +msgid "%s: can't create debugging section: %s" +msgstr "%s: không thể tạo phần gỡ lá»—i: %s" + +#: objcopy.c:2288 +#, c-format +msgid "%s: can't set debugging section contents: %s" +msgstr "%s: không thể đặt ná»™i dung phần gỡ lá»—i: %s" + +#: objcopy.c:2297 +#, c-format +msgid "%s: don't know how to write debugging information for %s" +msgstr "%s: không biết cách ghi thông tin gỡ lá»—i cho %s" + +#: objcopy.c:2472 +msgid "byte number must be non-negative" +msgstr "số byte phải là không âm" + +#: objcopy.c:2482 +msgid "interleave must be positive" +msgstr "chen vào phải là dÆ°Æ¡ng" + +#: objcopy.c:2502 objcopy.c:2510 +#, c-format +msgid "%s both copied and removed" +msgstr "%s cả được sao chép lẫn bị gỡ bá» Ä‘á»u" + +#: objcopy.c:2603 objcopy.c:2674 objcopy.c:2774 objcopy.c:2805 objcopy.c:2829 +#: objcopy.c:2833 objcopy.c:2853 +#, c-format +msgid "bad format for %s" +msgstr "dạng thức sai cho %s" + +#: objcopy.c:2624 +#, c-format +msgid "cannot open: %s: %s" +msgstr "không thể mở : %s: %s" + +#: objcopy.c:2743 +#, c-format +msgid "Warning: truncating gap-fill from 0x%s to 0x%x" +msgstr "Cảnh báo : Ä‘ang cắt xén Ä‘iá»n-khe từ 0x%s thành 0x%x" + +#: objcopy.c:2903 +msgid "alternate machine code index must be positive" +msgstr "chỉ mục mã máy xen kẽ phải là dÆ°Æ¡ng" + +#: objcopy.c:2961 +msgid "byte number must be less than interleave" +msgstr "số byte phải là ít hÆ¡n chen vào" + +#: objcopy.c:2991 +#, c-format +msgid "architecture %s unknown" +msgstr "không biết kiến trúc %s" + +#: objcopy.c:2995 +msgid "" +"Warning: input target 'binary' required for binary architecture parameter." +msgstr "" +"Cảnh báo : đích nhập « binary » (nhị phân) cần thiết cho tham số kiến trúc " +"nhị phân." + +#: objcopy.c:2996 +#, c-format +msgid " Argument %s ignored" +msgstr " Äối số %s bị bá» qua" + +#: objcopy.c:3002 +#, c-format +msgid "warning: could not locate '%s'. System error message: %s" +msgstr "cảnh báo : không thể định vị « %s ». Thông Ä‘iệp lá»—i hệ thống: %s" + +#: objcopy.c:3042 objcopy.c:3056 +#, c-format +msgid "%s %s%c0x%s never used" +msgstr "%s %s%c0x%s chÆ°a bao giá» dùng" + +#: objdump.c:176 +#, c-format +msgid "Usage: %s \n" +msgstr "Cách sá»­ dụng: %s \n" + +#: objdump.c:177 +#, c-format +msgid " Display information from object .\n" +msgstr " Hiển thị thông tin từ đối tượng.\n" + +#: objdump.c:178 +#, c-format +msgid " At least one of the following switches must be given:\n" +msgstr " Phải Ä‘Æ°a ra ít nhất má»™t của những cái chuyển theo sau :\n" + +#: objdump.c:179 +#, c-format +msgid "" +" -a, --archive-headers Display archive header information\n" +" -f, --file-headers Display the contents of the overall file header\n" +" -p, --private-headers Display object format specific file header " +"contents\n" +" -h, --[section-]headers Display the contents of the section headers\n" +" -x, --all-headers Display the contents of all headers\n" +" -d, --disassemble Display assembler contents of executable " +"sections\n" +" -D, --disassemble-all Display assembler contents of all sections\n" +" -S, --source Intermix source code with disassembly\n" +" -s, --full-contents Display the full contents of all sections " +"requested\n" +" -g, --debugging Display debug information in object file\n" +" -e, --debugging-tags Display debug information using ctags style\n" +" -G, --stabs Display (in raw form) any STABS info in the file\n" +" -t, --syms Display the contents of the symbol table(s)\n" +" -T, --dynamic-syms Display the contents of the dynamic symbol table\n" +" -r, --reloc Display the relocation entries in the file\n" +" -R, --dynamic-reloc Display the dynamic relocation entries in the " +"file\n" +" -v, --version Display this program's version number\n" +" -i, --info List object formats and architectures supported\n" +" -H, --help Display this information\n" +msgstr "" +" -a, --archive-headers \t\tHiển thị thông tin vá» _các phần đầu kho_\n" +" -f, --file-headers Hiển thị ná»™i dung của _toàn bá»™ phần đầu tập " +"tin_\n" +" -p, --private-headers\n" +"\t\tHiển thị ná»™i dung của phần đầu tập tin đặc trÆ°ng cho đối tượng\n" +"\t\t(các phần đầu riêng)\n" +" -h, --[section-]headers Hiển thị ná»™i dung của _các phần đầu của phần_\n" +" -x, --all-headers \t\t Hiển thị ná»™i dung của _má»i phần đầu_\n" +" -d, --disassemble\n" +"\t\tHiển thị ná»™i dung của các phần có khả năng thá»±c hiện\n" +"\t\t(rã)\n" +" -D, --disassemble-all \t Hiển thị ná»™i dung dịch mã số của má»i phần\n" +"\t\t(rã hết)\n" +" -S, --source \t\t\t\t Trá»™n lẫn mã _nguồn_ vá»›i việc rã\n" +" -s, --full-contents Hiển thị _ná»™i dung đầy đủ_ của má»i phần đã yêu cầu\n" +" -g, --debugging Hiển thị thông tin _gỡ lá»—i_ trong tập tin đối " +"tượng\n" +" -e, --debugging-tags Hiển thị thông tin gỡ lá»—i, dùng kiểu dáng ctags\n" +"\t\t(các thẻ gỡ lá»—i)\n" +" -G, --stabs Hiển thị (dạng thô) thông tin STABS nào trong thông " +"tin\n" +" -t, --syms \t\t\t Hiển thị ná»™i dung của các bảng ký hiệu\n" +"\t\t(các ký hiệu [viết tắt])\n" +" -T, --dynamic-syms \t\tHiển thị ná»™i dung của bảng ký hiệu Ä‘á»™ng\n" +"\t\t(các ký hiệu Ä‘á»™ng [viết tắt])\n" +" -r, --reloc \t\tHiển thị các mục nhập định vị lại trong tập " +"tin\n" +"\t\t(định vị lại [viết tắt])\n" +" -R, --dynamic-reloc\n" +"\t\t\t\t Hiển thị các mục nhập định vị lại Ä‘á»™ng trong tập tin\n" +"\t\t(định vị lại Ä‘á»™ng [viết tắt])\n" +" -v, --version Hiển thị số thá»± tá»± _phiên bản_ của chÆ°Æ¡ng trình " +"này\n" +" -i, --info Liệt kê các dạng thức đối tượng và kiến trúc được há»— " +"trợ\n" +"\t\t(thông tin [viết tắt])\n" +" -H, --help \tHiển thị _trợ giúp_ này\n" + +#: objdump.c:202 +#, c-format +msgid "" +"\n" +" The following switches are optional:\n" +msgstr "" +"\n" +" Những cái chuyển theo đây là tùy chá»n:\n" + +#: objdump.c:203 +#, c-format +msgid "" +" -b, --target=BFDNAME Specify the target object format as " +"BFDNAME\n" +" -m, --architecture=MACHINE Specify the target architecture as MACHINE\n" +" -j, --section=NAME Only display information for section NAME\n" +" -M, --disassembler-options=OPT Pass text OPT on to the disassembler\n" +" -EB --endian=big Assume big endian format when " +"disassembling\n" +" -EL --endian=little Assume little endian format when " +"disassembling\n" +" --file-start-context Include context from start of file (with -" +"S)\n" +" -I, --include=DIR Add DIR to search list for source files\n" +" -l, --line-numbers Include line numbers and filenames in " +"output\n" +" -C, --demangle[=STYLE] Decode mangled/processed symbol names\n" +" The STYLE, if specified, can be `auto', " +"`gnu',\n" +" `lucid', `arm', `hp', `edg', `gnu-v3', " +"`java'\n" +" or `gnat'\n" +" -w, --wide Format output for more than 80 columns\n" +" -z, --disassemble-zeroes Do not skip blocks of zeroes when " +"disassembling\n" +" --start-address=ADDR Only process data whose address is >= ADDR\n" +" --stop-address=ADDR Only process data whose address is <= ADDR\n" +" --prefix-addresses Print complete address alongside " +"disassembly\n" +" --[no-]show-raw-insn Display hex alongside symbolic disassembly\n" +" --adjust-vma=OFFSET Add OFFSET to all displayed section " +"addresses\n" +" --special-syms Include special symbols in symbol dumps\n" +"\n" +msgstr "" +" -b, --target=TÊN_BFD \tGhi rõ dạng thức đối tượng _đích_ là TÊN_BFD\n" +" -m, --architecture=MÃY \t\t Ghi rõ _kiến trúc_ đích là MÃY\n" +" -j, --section=TÊN \t\t Hiển thị thông tin chỉ cho _phần_ " +"TÊN\n" +" -M, --disassembler-options=TÙY_CHỌN\n" +"\t\tGởi chuá»—i TÙY_CHỌN qua cho _bá»™ rã_\n" +"\t\t(các tùy chá»n bá»™ rã)\n" +" -EB --endian=big\n" +"\t\tGiả sá»­ dạng thức tính trạng cuối lá»›n (big-endian) khi rã\n" +" -EL --endian=little\n" +"\t\tGiả sá»­ dạng thức tính trạng cuối nhá» (little-endian) khi rã\n" +" --file-start-context \tGồm _ngữ cảnh_ từ _đầu tập tin_ (bằng « -" +"S »)\n" +" -I, --include=THƯ_MỤC\n" +"\t\tThêm THƯ_MỤC vào danh sách tìm kiếm tập tin nguồn\n" +"\t\t(bao gồm)\n" +" -l, --line-numbers\n" +"\t\tGồm các _số thứ tá»± dòng_ và tên tập tin trong kết xuất\n" +" -C, --demangle[=KIỂU_DÃNG] giải mã các tên ký hiệu đã rối/xá»­ lý\n" +"\t\t(tháo gỡ)\n" +"\t\tKIỂU_DÃNG, nếu đã ghi rõ, có thể là:\n" +"\t\t • auto\t\ttá»± Ä‘á»™ng\n" +"\t\t • gnu\n" +" \t • lucid\t\trõ ràng\n" +"\t\t • arm\n" +"\t\t • hp\n" +"\t\t • edg\n" +"\t\t • gnu-v3\n" +" \t\t • java\n" +" \t • gnat\n" +" -w, --wide \t\tÄịnh dạng dữ liệu xuất chiếm hÆ¡n 80 " +"cá»™t\n" +"\t\t(rá»™ng)\n" +" -z, --disassemble-zeroes \t\tÄừng nhảy qua khối ố không khi rã\n" +"\t\t(rã các số không)\n" +" --start-address=ÄỊA_CHỈ Xá»­ lý chỉ dữ liệu có địa chỉ ≥ " +"ÄỊA_CHỈ\n" +" --stop-address=ÄỊA_CHỈ Xá»­ lý chỉ dữ liệu có địa chỉ ≤ " +"ÄỊA_CHỈ\n" +" --prefix-addresses \t\tIn ra địa chỉ hoàn toàn ở b việc " +"rã\n" +"\t\t(thêm vào đầu các địa chỉ)\n" +" --[no-]show-raw-insn\n" +"\t\tHiển thị thập lục phân ở bên việc rã kiểu ký hiệu\n" +"hông] hiển thị câu lệnh thô)\n" +" --adjust-vma=HIỆU_Sá»\n" +"\t\tThêm HIỆU_Sá» vào má»i địa chỉ phần đã hiển thị\n" +"\t\t(Ä‘iá»u chỉnh vma) --special-syms Gồm _các ký hiệu đặc biệt_ " +"trong việc đổ ký hiệu\n" +"\n" + +#: objdump.c:378 +#, c-format +msgid "Sections:\n" +msgstr "Phần:\n" + +#: objdump.c:381 objdump.c:385 +#, c-format +msgid "Idx Name Size VMA LMA File off Algn" +msgstr "Idx Name Size VMA LMA File off Algn" + +#: objdump.c:387 +#, c-format +msgid "" +"Idx Name Size VMA LMA File off " +"Algn" +msgstr "" +"Idx Name Size VMA LMA File off " +"Algn" + +#: objdump.c:391 +#, c-format +msgid " Flags" +msgstr " Cá»" + +#: objdump.c:393 +#, c-format +msgid " Pg" +msgstr " Tr" + +#: objdump.c:436 +#, c-format +msgid "%s: not a dynamic object" +msgstr "%s không phải là môt đối tượng Ä‘á»™ng" + +#: objdump.c:1722 +#, c-format +msgid "Disassembly of section %s:\n" +msgstr "Việc rã phần %s:\n" + +#: objdump.c:1884 +#, c-format +msgid "Can't use supplied machine %s" +msgstr "Không thể sá»­ dụng máy đã cung cấp %s" + +#: objdump.c:1903 +#, c-format +msgid "Can't disassemble for architecture %s\n" +msgstr "Không thể rã cho kiến trúc %s\n" + +#: objdump.c:1973 +#, c-format +msgid "" +"No %s section present\n" +"\n" +msgstr "" +"Không có phần %s ở\n" +"\n" + +#: objdump.c:1982 +#, c-format +msgid "Reading %s section of %s failed: %s" +msgstr "Việc Ä‘á»c phần %s của %s bị lá»—i: %s" + +#: objdump.c:2026 +#, c-format +msgid "" +"Contents of %s section:\n" +"\n" +msgstr "" +"Ná»™i dung phần %s\n" +"\n" + +#: objdump.c:2153 +#, c-format +msgid "architecture: %s, " +msgstr "kiến trúc: %s, " + +#: objdump.c:2156 +#, c-format +msgid "flags 0x%08x:\n" +msgstr "cá» 0x%08x:\n" + +#: objdump.c:2170 +#, c-format +msgid "" +"\n" +"start address 0x" +msgstr "" +"\n" +"địa chỉ đầu 0x" + +#: objdump.c:2210 +#, c-format +msgid "Contents of section %s:\n" +msgstr "Ná»™i dung phần %s:\n" + +#: objdump.c:2335 +#, c-format +msgid "no symbols\n" +msgstr "không có ký hiệu\n" + +#: objdump.c:2342 +#, c-format +msgid "no information for symbol number %ld\n" +msgstr "không có thông tin cho ký hiệu số %ld\n" + +#: objdump.c:2345 +#, c-format +msgid "could not determine the type of symbol number %ld\n" +msgstr "không thể quyết định kiểu ký hiệu số %ld\n" + +#: objdump.c:2611 +#, c-format +msgid "" +"\n" +"%s: file format %s\n" +msgstr "" +"\n" +"%s: dạng thức tập tin %s\n" + +#: objdump.c:2662 +#, c-format +msgid "%s: printing debugging information failed" +msgstr "%s: việc in ra thông tin gỡ lá»—i bị lá»—i" + +#: objdump.c:2753 +#, c-format +msgid "In archive %s:\n" +msgstr "Trong kho %s\n" + +#: objdump.c:2873 +msgid "unrecognized -E option" +msgstr "không nhận ra tùy chá»n « -E »" + +#: objdump.c:2884 +#, c-format +msgid "unrecognized --endian type `%s'" +msgstr "không nhận ra kiểu tính trạng cuối (endian) « %s »" + +#: rdcoff.c:196 +#, c-format +msgid "parse_coff_type: Bad type code 0x%x" +msgstr "parse_coff_type: (phân tách kiểu coff) Mã kiểu sai 0x%x" + +#: rdcoff.c:404 rdcoff.c:509 rdcoff.c:697 +#, c-format +msgid "bfd_coff_get_syment failed: %s" +msgstr "« bfd_coff_get_syment » bị lá»—i: %s" + +#: rdcoff.c:420 rdcoff.c:717 +#, c-format +msgid "bfd_coff_get_auxent failed: %s" +msgstr "« bfd_coff_get_auxent » bị lá»—i: %s" + +#: rdcoff.c:784 +#, c-format +msgid "%ld: .bf without preceding function" +msgstr "%ld: « .bf » không có hàm Ä‘i trÆ°á»›c" + +#: rdcoff.c:834 +#, c-format +msgid "%ld: unexpected .ef\n" +msgstr "%ld: « .ef » bất ngá»\n" + +#: rddbg.c:85 +#, c-format +msgid "%s: no recognized debugging information" +msgstr "%s: không có thông tin gỡ lá»—i đã nhận ra" + +#: rddbg.c:394 +#, c-format +msgid "Last stabs entries before error:\n" +msgstr "Những mục nhập stabs cuối cùng trÆ°á»›c lá»—i:\n" + +#: readelf.c:272 ia64-gen.c:297 +#, c-format +msgid "%s: Error: " +msgstr "%s: Lá»—i: " + +#: readelf.c:283 ia64-gen.c:310 +#, c-format +msgid "%s: Warning: " +msgstr "%s: Cảnh báo : " + +#: readelf.c:298 +#, c-format +msgid "Unable to seek to 0x%x for %s\n" +msgstr "Không thể nhảy tá»›i 0x%x tìm %s\n" + +#: readelf.c:310 +#, c-format +msgid "Out of memory allocating 0x%x bytes for %s\n" +msgstr "Hết bá»™ nhá»› khi cấp phát 0x%x byte cho %s\n" + +#: readelf.c:318 +#, c-format +msgid "Unable to read in 0x%x bytes of %s\n" +msgstr "Không thể Ä‘á»c trong 0x%x byte của %s\n" + +#: readelf.c:364 readelf.c:412 readelf.c:615 readelf.c:647 +#, c-format +msgid "Unhandled data length: %d\n" +msgstr "Äá»™ dài dữ liệu không được quản lý: %d\n" + +#: readelf.c:752 +msgid "Don't know about relocations on this machine architecture\n" +msgstr "Không biết vá» việc định vị lại trên kiến trúc máy này\n" + +#: readelf.c:772 readelf.c:799 readelf.c:842 readelf.c:869 +msgid "relocs" +msgstr "Ä‘.v. lại" + +#: readelf.c:782 readelf.c:809 readelf.c:852 readelf.c:879 +msgid "out of memory parsing relocs" +msgstr "hết bá»™ nhá»› khi phân tách việc định vị lại" + +#: readelf.c:933 +#, c-format +msgid "" +" Offset Info Type Sym. Value Symbol's Name + Addend\n" +msgstr "" +" Hiệu Tin Kiểu Giá trị ký hiệu Tên ký hiệu + gì thêm\n" + +#: readelf.c:935 +#, c-format +msgid " Offset Info Type Sym.Value Sym. Name + Addend\n" +msgstr " HIệu Tin Kiểu Giá trị ký hiệu Tên ký hiệu + gì thêm\n" + +#: readelf.c:940 +#, c-format +msgid " Offset Info Type Sym. Value Symbol's Name\n" +msgstr " HIệu Tin Kiểu Giá trị ký hiệu Tên ký hiệu\n" + +#: readelf.c:942 +#, c-format +msgid " Offset Info Type Sym.Value Sym. Name\n" +msgstr " Hiệu Tin Kiểu Giá trị ký hiệu Tên ký hiệu\n" + +#: readelf.c:950 +#, c-format +msgid "" +" Offset Info Type Symbol's Value " +"Symbol's Name + Addend\n" +msgstr "" +" Offset Info Type Symbol's Value " +"Symbol's Name + Addend\n" + +#: readelf.c:952 +#, c-format +msgid "" +" Offset Info Type Sym. Value Sym. Name + " +"Addend\n" +msgstr "" +" Hiệu Tin Kiểu Giá trị ký hiệu Tên ký hiệu + gì thêm\n" + +#: readelf.c:957 +#, c-format +msgid "" +" Offset Info Type Symbol's Value " +"Symbol's Name\n" +msgstr "" +" Offset Info Type Symbol's Value " +"Symbol's Name\n" + +#: readelf.c:959 +#, c-format +msgid "" +" Offset Info Type Sym. Value Sym. Name\n" +msgstr " HIệu Tin Kiểu Giá trị ký hiệu Tên ký hiệu\n" + +#: readelf.c:1239 readelf.c:1241 readelf.c:1324 readelf.c:1326 readelf.c:1335 +#: readelf.c:1337 +#, c-format +msgid "unrecognized: %-7lx" +msgstr "không nhận ra: %-7lx" + +#: readelf.c:1295 +#, c-format +msgid "" +msgstr "" + +#: readelf.c:1297 +#, c-format +msgid "" +msgstr "" + +#: readelf.c:1569 +#, c-format +msgid "Processor Specific: %lx" +msgstr "Äặc trÆ°ng cho bá»™ xá»­ lý: %lx" + +#: readelf.c:1588 +#, c-format +msgid "Operating System specific: %lx" +msgstr "Äặc trÆ°ng cho Hệ Ä‘iá»u hành: %lx" + +#: readelf.c:1592 readelf.c:2370 +#, c-format +msgid ": %lx" +msgstr ": %lx" + +#: readelf.c:1605 +msgid "NONE (None)" +msgstr "KHÔNG CÓ (Không có)" + +#: readelf.c:1606 +msgid "REL (Relocatable file)" +msgstr "REL (Tập tin có thể _định vị lại_)" + +#: readelf.c:1607 +msgid "EXEC (Executable file)" +msgstr "EXEC (Executable file)" + +#: readelf.c:1608 +msgid "DYN (Shared object file)" +msgstr "DYN (Shared object file)" + +#: readelf.c:1609 +msgid "CORE (Core file)" +msgstr "CORE (Core file)" + +#: readelf.c:1613 +#, c-format +msgid "Processor Specific: (%x)" +msgstr "Äặc trÆ°ng cho bá»™ xá»­ lý: (%x)" + +#: readelf.c:1615 +#, c-format +msgid "OS Specific: (%x)" +msgstr "Äặc trÆ°ng cho HÄH: (%x)" + +#: readelf.c:1617 readelf.c:1724 readelf.c:2554 +#, c-format +msgid ": %x" +msgstr ": %x" + +#. #-#-#-#-# guikachu.vi.po (guikachu HEAD) #-#-#-#-# +#. Fill the model +#: ../src/mlview-node-editor.cc:1992 ../gnome/applet/wso-none.c:53 +#: ../storage/sunone-invitation-list.c:291 ../widgets/gtk+.xml.in.h:126 +#: ../src/form-editor/button-prop.cc:144 datebook_gui.c:1338 +#: datebook_gui.c:4626 libexif/olympus/mnote-olympus-entry.c:290 +#: app/sample-editor.c:299 app/track-editor.c:190 app/track-editor.c:200 +msgid "None" +msgstr "Không có" + +#: readelf.c:2229 +msgid "Standalone App" +msgstr "Ứng dụng Äá»™c lập" + +#: readelf.c:2232 readelf.c:2952 readelf.c:2968 +#, c-format +msgid "" +msgstr "" + +#: readelf.c:2597 +#, c-format +msgid "Usage: readelf elf-file(s)\n" +msgstr "Cách sá»­ dụng: readelf tập_tin_elf...\n" + +#: readelf.c:2598 +#, c-format +msgid " Display information about the contents of ELF format files\n" +msgstr " Hiển thị thông tin vá» ná»™i dung tập tin dạng thức ELF\n" + +#: readelf.c:2599 +#, c-format +msgid "" +" Options are:\n" +" -a --all Equivalent to: -h -l -S -s -r -d -V -A -I\n" +" -h --file-header Display the ELF file header\n" +" -l --program-headers Display the program headers\n" +" --segments An alias for --program-headers\n" +" -S --section-headers Display the sections' header\n" +" --sections An alias for --section-headers\n" +" -g --section-groups Display the section groups\n" +" -e --headers Equivalent to: -h -l -S\n" +" -s --syms Display the symbol table\n" +" --symbols An alias for --syms\n" +" -n --notes Display the core notes (if present)\n" +" -r --relocs Display the relocations (if present)\n" +" -u --unwind Display the unwind info (if present)\n" +" -d --dynamic Display the dynamic section (if present)\n" +" -V --version-info Display the version sections (if present)\n" +" -A --arch-specific Display architecture specific information (if " +"any).\n" +" -D --use-dynamic Use the dynamic section info when displaying " +"symbols\n" +" -x --hex-dump= Dump the contents of section \n" +" -w[liaprmfFsoR] or\n" +" --debug-dump[=line,=info,=abbrev,=pubnames,=aranges,=macro,=frames,=str," +"=loc,=Ranges]\n" +" Display the contents of DWARF2 debug sections\n" +msgstr "" +" Tùy chá»n:\n" +" -a --all \t\t\t\t\tBằng: -h -l -S -s -r -d -V -A -I\n" +"\t(hết)\n" +" -h --file-header \t\t\t\tHiển thị _dòng đầu tập tin_ ELF\n" +" -l --program-headers \t\tHiển thị _các dòng đầu chÆ°Æ¡ng trình_\n" +" --segments \t\t\tBiệt hiệu cho « --program-headers »\n" +"\t(các phân Ä‘oạn)\n" +" -S --section-headers \t\t\tHiển thị dòng đầu của các phần\n" +"\t(các dòng đầu phần)\n" +" --sections \t\t\tBiệt hiệu cho « --section-headers »\n" +"\t(các phần)\n" +" -g --section-groups \t\t\t Hiển thị _các nhóm phần_\n" +" -e --headers \t\t\t\tBằng: -h -l -S\n" +"\t(các dòng đầu)\n" +" -s --syms \t\t\tHiển thị bảng _ký hiệu_\n" +" --symbols \t\t\tBiệt hiệu cho « --syms »\n" +"\t(các ký hiệu [« syms » là viết tắt])\n" +" -n --notes \t\t\tHiển thị _các ghi chú_ lõi (nếu có)\n" +" -r --relocs \t\tHiển thị _các việc định vị lại_ (nếu có)\n" +" -u --unwind \t\tHiển thị thông tin _tri ra_ (nếu có)\n" +" -d --dynamic \t\tHiển thị phần _Ä‘á»™ng_ (nếu có)\n" +" -V --version-info \t\tHiển thị các phần phiên bản (nếu có)\n" +"\t(thông tin phiên bản)\n" +" -A --arch-specific Hiển thị thông tin _đặc trÆ°ng cho kiến trúc_ (nếu " +"có)\n" +" -D --use-dynamic _Dùng_ thông tin phần _Ä‘á»™ng_ khi hiển thị ký hiệu\n" +" -x --hex-dump= \t\t\tÄổ ná»™i dung phần \n" +"\t(đổ thập lục)\n" +" -w[liaprmfFsoR] or\n" +" --debug-dump[=line,=info,=abbrev,=pubnames,=aranges,=macro,=frames,=str," +"=loc,=Ranges]\n" +"\t[line\t\t\tdòng\n" +"\tinfo\t\t\tthông tin\n" +"\tabbrev.\t\tviết tắt\n" +"\tpubnames\tcác tên công\n" +"\taranges\t\tcác phạm vị a\n" +"\tmacro\t\tbá»™ lệnh\n" +"\tframes\t\tcác khung\n" +"\tstr\t\t\tchuá»—i\n" +"\tloc\t\t\tđịnh vị\n" +"\tRanges\t\tCác phạm vị]\n" +" Hiển thị ná»™i dung các phần gỡ lá»—i kiểu DWARF2\n" + +#: readelf.c:2622 +#, c-format +msgid "" +" -i --instruction-dump=\n" +" Disassemble the contents of section \n" +msgstr "" +" -i --instruction-dump=\t\tTháo ra ná»™i dung phần \n" +"\t(đổ câu lệnh)\n" + +#: readelf.c:2626 +#, c-format +msgid "" +" -I --histogram Display histogram of bucket list lengths\n" +" -W --wide Allow output width to exceed 80 characters\n" +" -H --help Display this information\n" +" -v --version Display the version number of readelf\n" +msgstr "" +" -I --histogram\n" +"\tHiển thị _biểu đồ tần xuất_ của các Ä‘á»™ dài danh sách xô\n" +" -W --wide Cho phép Ä‘á»™ _rá»™ng_ kết xuất vượt qua 80 ký tá»±\n" +" -H --help \tHiển thị _trợ giúp_ này\n" +" -v --version \tHiển thị số thứ tá»± _phiên bản_ của readelf\n" + +#: readelf.c:2651 readelf.c:12118 +msgid "Out of memory allocating dump request table." +msgstr "Hết bá»™ nhá»› khi cấp phát bảng yêu cầu đổ." + +#: readelf.c:2820 readelf.c:2888 +#, c-format +msgid "Unrecognized debug option '%s'\n" +msgstr "Không nhận diện tùy chá»n gỡ lá»—i « %s »\n" + +#: readelf.c:2922 +#, c-format +msgid "Invalid option '-%c'\n" +msgstr "Tùy chá»n không hợp lệ « -%c »\n" + +#: readelf.c:2936 +msgid "Nothing to do.\n" +msgstr "Không có gì cần làm.\n" + +#: readelf.c:2948 readelf.c:2964 readelf.c:5906 makeinfo/makeinfo.c:4144 +#: ogg123/cfgfile_options.c:165 ../app/layer_dialog.c:525 +#: ../src/nm-ap-security.c:310 datebook_gui.c:1823 +#, c-format +msgid "none" +msgstr "không có" + +#: readelf.c:2965 +msgid "2's complement, little endian" +msgstr "phần bù của 2, tính trạng cuối nhá»" + +#: readelf.c:2966 +msgid "2's complement, big endian" +msgstr "phần bù của 2, tính trạng cuối lá»›n" + +#: readelf.c:2984 +msgid "Not an ELF file - it has the wrong magic bytes at the start\n" +msgstr "" +"Không phải là tập tin ELF — có những byte ma thuật không đúng tại đầu nó.\n" + +#: readelf.c:2992 +#, c-format +msgid "ELF Header:\n" +msgstr "Dòng đầu ELF:\n" + +#: readelf.c:2993 +#, c-format +msgid " Magic: " +msgstr " Ma thuật: " + +#: readelf.c:2997 +#, c-format +msgid " Class: %s\n" +msgstr " Class: %s\n" + +#: readelf.c:2999 +#, c-format +msgid " Data: %s\n" +msgstr " Data: %s\n" + +#: readelf.c:3001 +#, c-format +msgid " Version: %d %s\n" +msgstr " Version: %d %s\n" + +#: readelf.c:3008 +#, c-format +msgid " OS/ABI: %s\n" +msgstr " OS/ABI: %s\n" + +#: readelf.c:3010 +#, c-format +msgid " ABI Version: %d\n" +msgstr " Phiên bản ABI: %d\n" + +#: readelf.c:3012 +#, c-format +msgid " Type: %s\n" +msgstr " Type: %s\n" + +#: readelf.c:3014 +#, c-format +msgid " Machine: %s\n" +msgstr " Machine: %s\n" + +#: readelf.c:3016 +#, c-format +msgid " Version: 0x%lx\n" +msgstr " Version: 0x%lx\n" + +#: readelf.c:3019 +#, c-format +msgid " Entry point address: " +msgstr " Äịa chỉ Ä‘iểm vào : " + +#: readelf.c:3021 +#, c-format +msgid "" +"\n" +" Start of program headers: " +msgstr "" +"\n" +" Äiểm đầu các dòng đầu chÆ°Æ¡ng trình: " + +#: readelf.c:3023 +#, c-format +msgid "" +" (bytes into file)\n" +" Start of section headers: " +msgstr "" +" (byte vào tập tin)\n" +" Äầu các dòng đầu phần: " + +#: readelf.c:3025 +#, c-format +msgid " (bytes into file)\n" +msgstr " (byte vào tập tin)\n" + +#: readelf.c:3027 +#, c-format +msgid " Flags: 0x%lx%s\n" +msgstr " Flags: 0x%lx%s\n" + +#: readelf.c:3030 +#, c-format +msgid " Size of this header: %ld (bytes)\n" +msgstr " Cỡ phần này: %ld (byte)\n" + +#: readelf.c:3032 +#, c-format +msgid " Size of program headers: %ld (bytes)\n" +msgstr " Cỡ các dòng đầu chÆ°Æ¡ng trình: %ld (byte)\n" + +#: readelf.c:3034 +#, c-format +msgid " Number of program headers: %ld\n" +msgstr " Số dòng đầu chÆ°Æ¡ng trình: %ld\n" + +#: readelf.c:3036 +#, c-format +msgid " Size of section headers: %ld (bytes)\n" +msgstr " Cỡ các dòng đầu phần: %ld (byte)\n" + +#: readelf.c:3038 +#, c-format +msgid " Number of section headers: %ld" +msgstr " Số dòng đầu phần: %ld" + +#: readelf.c:3043 +#, c-format +msgid " Section header string table index: %ld" +msgstr " Chỉ mục bảng chuá»—i dòng đầu phần: %ld" + +#: readelf.c:3074 readelf.c:3107 +msgid "program headers" +msgstr "các dòng đầu chÆ°Æ¡ng trình" + +#: readelf.c:3145 readelf.c:3446 readelf.c:3487 readelf.c:3546 readelf.c:3609 +#: readelf.c:3993 readelf.c:4017 readelf.c:5247 readelf.c:5291 readelf.c:5489 +#: readelf.c:6450 readelf.c:6464 readelf.c:11493 readelf.c:11912 +#: readelf.c:11979 src/bus/buses.c:69 src/cmd/include.c:47 src/detect.c:252 +#: src/jtag.c:159 src/jtag.c:270 +msgid "Out of memory\n" +msgstr "Hết bá»™ nhá»›\n" + +#: readelf.c:3172 +#, c-format +msgid "" +"\n" +"There are no program headers in this file.\n" +msgstr "" +"\n" +"Không có dòng đầu chÆ°Æ¡ng trình nào trong tập tin này.\n" + +#: readelf.c:3178 +#, c-format +msgid "" +"\n" +"Elf file type is %s\n" +msgstr "" +"\n" +"Kiểu tập tin Elf là %s\n" + +#: readelf.c:3179 +#, c-format +msgid "Entry point " +msgstr "Äiểm vào" + +#: readelf.c:3181 +#, c-format +msgid "" +"\n" +"There are %d program headers, starting at offset " +msgstr "" +"\n" +"Có %d dòng đầu chÆ°Æ¡ng trình, bắt đầu tại hiệu số" + +#: readelf.c:3193 readelf.c:3195 +#, c-format +msgid "" +"\n" +"Program Headers:\n" +msgstr "" +"\n" +"Dòng đầu chÆ°Æ¡ng trình:\n" + +#: readelf.c:3199 +#, c-format +msgid "" +" Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align\n" +msgstr " Kiểu HIệu ÄChỉẢo ÄChỉVật CỡTập CỡNhá»› Cá» Cạnh lá»\n" + +#: readelf.c:3202 +#, c-format +msgid "" +" Type Offset VirtAddr PhysAddr FileSiz " +"MemSiz Flg Align\n" +msgstr "" +" Kiểu HIệu Äịa Chỉ Ảo Äịa Chỉ Vật lý CỡTập CỡNhá»› Cá» Cạnh lá»\n" + +#: readelf.c:3206 +#, c-format +msgid " Type Offset VirtAddr PhysAddr\n" +msgstr " Kiểu HIệu Äịa Chỉ Ảo Äịa Chỉ Vật lý\n" + +#: readelf.c:3208 +#, c-format +msgid " FileSiz MemSiz Flags Align\n" +msgstr " FileSiz MemSiz Flags Align\n" + +#: readelf.c:3301 +msgid "more than one dynamic segment\n" +msgstr "hÆ¡n má»™t phân Ä‘oạn Ä‘á»™ng\n" + +#: readelf.c:3312 +msgid "no .dynamic section in the dynamic segment" +msgstr "không có phân Ä‘oạn « .dynamic » (Ä‘á»™ng) trong phân Ä‘oạn Ä‘á»™ng" + +#: readelf.c:3321 +msgid "the .dynamic section is not contained within the dynamic segment" +msgstr "phần « .dynamic » (Ä‘á»™ng) không có được chứa ở trong phân Ä‘oạn Ä‘á»™ng" + +#: readelf.c:3323 +msgid "the .dynamic section is not the first section in the dynamic segment." +msgstr "" +"phần « .dynamic » (Ä‘á»™ng) không phải là phần thứ nhất trong phân Ä‘oạn Ä‘á»™ng." + +#: readelf.c:3337 +msgid "Unable to find program interpreter name\n" +msgstr "Không tìm thấy tên bá»™ giải dịch chÆ°Æ¡ng trình\n" + +#: readelf.c:3344 +#, c-format +msgid "" +"\n" +" [Requesting program interpreter: %s]" +msgstr "" +"\n" +" [Äang yêu cầu bá»™ giải dịch chÆ°Æ¡ng trình: %s]" + +#: readelf.c:3356 +#, c-format +msgid "" +"\n" +" Section to Segment mapping:\n" +msgstr "" +"\n" +" Ãnh xạ Phần đến Phân Ä‘oạn:\n" + +#: readelf.c:3357 +#, c-format +msgid " Segment Sections...\n" +msgstr " Các phần phân Ä‘oạn...\n" + +#: readelf.c:3408 +msgid "Cannot interpret virtual addresses without program headers.\n" +msgstr "Không thể giải dịch địa chỉ ảo khi không có dòng đầu chÆ°Æ¡ng trình.\n" + +#: readelf.c:3424 +#, c-format +msgid "Virtual address 0x%lx not located in any PT_LOAD segment.\n" +msgstr "" +"Äịa chỉ ảo 0x%lx không được định vị trong phân Ä‘oạn kiểu « PT_LOAD » nào.\n" + +#: readelf.c:3438 readelf.c:3479 +msgid "section headers" +msgstr "dòng đầu phần" + +#: readelf.c:3523 readelf.c:3586 +msgid "symbols" +msgstr "ký hiệu" + +#: readelf.c:3533 readelf.c:3596 +msgid "symtab shndx" +msgstr "symtab shndx" + +#: readelf.c:3697 readelf.c:3977 +#, c-format +msgid "" +"\n" +"There are no sections in this file.\n" +msgstr "" +"\n" +"Không có phần nào trong tập tin này.\n" + +#: readelf.c:3703 +#, c-format +msgid "There are %d section headers, starting at offset 0x%lx:\n" +msgstr "Có %d dòng đầu phần, bắt đầu tại hiệu số 0x%lx:\n" + +#: readelf.c:3720 readelf.c:4079 readelf.c:4290 readelf.c:4591 readelf.c:5011 +#: readelf.c:6618 +msgid "string table" +msgstr "bảng chuá»—i" + +#: readelf.c:3765 +msgid "File contains multiple dynamic symbol tables\n" +msgstr "Tập tin chứa nhiá»u bảng ký hiệu Ä‘á»™ng\n" + +#: readelf.c:3777 +msgid "File contains multiple dynamic string tables\n" +msgstr "Tập tin chứa nhiá»u bảng chuá»—i Ä‘á»™ng\n" + +#: readelf.c:3782 +msgid "dynamic strings" +msgstr "chuá»—i Ä‘á»™ng" + +#: readelf.c:3789 +msgid "File contains multiple symtab shndx tables\n" +msgstr "Tập tin chứa nhiá»u bảng symtab shndx\n" + +#: readelf.c:3828 +#, c-format +msgid "" +"\n" +"Section Headers:\n" +msgstr "" +"\n" +"Dòng đầu phần:\n" + +#: readelf.c:3830 +#, c-format +msgid "" +"\n" +"Section Header:\n" +msgstr "" +"\n" +"Dòng đầu phần:\n" + +#: readelf.c:3834 +#, c-format +msgid "" +" [Nr] Name Type Addr Off Size ES Flg Lk " +"Inf Al\n" +msgstr "" +" [Nr] Name Type Addr Off Size ES Flg Lk " +"Inf Al\n" + +#: readelf.c:3837 +#, c-format +msgid "" +" [Nr] Name Type Address Off Size ES " +"Flg Lk Inf Al\n" +msgstr "" +" [Nr] Name Type Address Off Size ES " +"Flg Lk Inf Al\n" + +#: readelf.c:3840 +#, c-format +msgid " [Nr] Name Type Address Offset\n" +msgstr " [Nr] Name Type Address Offset\n" + +#: readelf.c:3841 +#, c-format +msgid " Size EntSize Flags Link Info Align\n" +msgstr " Size EntSize Flags Link Info Align\n" + +#: readelf.c:3936 +#, c-format +msgid "" +"Key to Flags:\n" +" W (write), A (alloc), X (execute), M (merge), S (strings)\n" +" I (info), L (link order), G (group), x (unknown)\n" +" O (extra OS processing required) o (OS specific), p (processor specific)\n" +msgstr "" +"Cá» ey \tW\tghi\n" +" \tA\tcấp phát\n" +"\tX\tthá»±c hiện\n" +"\tM\ttrá»™n\n" +"\tS\tcác chuá»—i\n" +"\tI\tthông tin\n" +"\tL\tthứ tá»± liên kết\n" +"\tG\tnhóm\n" +"\tO\tcần thiết xá»­ lý hệ Ä‘iá»u hành thêm\n" +"\to \tđặc trÆ°ng cho hệ Ä‘iá»u hành\n" +"\ts\tđặc trÆ°ng cho bá»™ xá»­ lý\n" + +#: readelf.c:3954 +#, c-format +msgid "[: 0x%x]" +msgstr "[: 0x%x]" + +#: readelf.c:3984 +msgid "Section headers are not available!\n" +msgstr "Dòng đầu phần không sẵn sàng.\n" + +#: readelf.c:4008 +#, c-format +msgid "" +"\n" +"There are no section groups in this file.\n" +msgstr "" +"\n" +"Không có nhóm phần nào trong tập tin này.\n" + +#: readelf.c:4042 +#, c-format +msgid "Bad sh_link in group section `%s'\n" +msgstr "Có liên kết « sh_link » sai trong phần nhóm « %s »\n" + +#: readelf.c:4061 +#, c-format +msgid "Bad sh_info in group section `%s'\n" +msgstr "Có thông tin « sh_info » sai trong phần nhóm « %s »\n" + +#: readelf.c:4085 readelf.c:6947 +msgid "section data" +msgstr "dữ liệu phần" + +#: readelf.c:4097 +#, c-format +msgid " [Index] Name\n" +msgstr " [Chỉ mục] Tên\n" + +#: readelf.c:4114 +#, c-format +msgid "section [%5u] already in group section [%5u]\n" +msgstr "phần [%5u] đã có trong phần nhóm [%5u]\n" + +#: readelf.c:4127 +#, c-format +msgid "section 0 in group section [%5u]\n" +msgstr "phần 0 trong phần nhóm [%5u]\n" + +#: readelf.c:4224 +#, c-format +msgid "" +"\n" +"'%s' relocation section at offset 0x%lx contains %ld bytes:\n" +msgstr "" +"\n" +"phần định vị lại « %s » tại hiệu số 0x%lx chứa %ld byte:\n" + +#: readelf.c:4236 +#, c-format +msgid "" +"\n" +"There are no dynamic relocations in this file.\n" +msgstr "" +"\n" +"Không có việc định vị lại Ä‘á»™ng nào trong tập tin này.\n" + +#: readelf.c:4260 +#, c-format +msgid "" +"\n" +"Relocation section " +msgstr "" +"\n" +"Phần định vị lại" + +#: readelf.c:4265 readelf.c:4666 readelf.c:4680 readelf.c:5025 +#, c-format +msgid "'%s'" +msgstr "« %s »" + +#: readelf.c:4267 readelf.c:4682 readelf.c:5027 +#, c-format +msgid " at offset 0x%lx contains %lu entries:\n" +msgstr " tại hiệu số 0x%lx chứa %lu mục nhập:\n" + +#: readelf.c:4308 +#, c-format +msgid "" +"\n" +"There are no relocations in this file.\n" +msgstr "" +"\n" +"Không có việc định vị lại nào trong tập tin này.\n" + +#: readelf.c:4482 readelf.c:4862 +msgid "unwind table" +msgstr "tri ra bảng" + +#: readelf.c:4540 readelf.c:4959 +#, c-format +msgid "Skipping unexpected relocation type %s\n" +msgstr "Äang nhảy kiểu định vị lại bất ngá» %s\n" + +#: readelf.c:4598 readelf.c:5018 readelf.c:5069 +#, c-format +msgid "" +"\n" +"There are no unwind sections in this file.\n" +msgstr "" +"\n" +"Không có phần tri ra nào trong tập tin này.\n" + +#: readelf.c:4661 +#, c-format +msgid "" +"\n" +"Could not find unwind info section for " +msgstr "" +"\n" +"Không thể tìm thấy phần thông tin tri ra cho " + +#: readelf.c:4673 +msgid "unwind info" +msgstr "thông tin tri ra" + +#: readelf.c:4675 readelf.c:5024 +#, c-format +msgid "" +"\n" +"Unwind section " +msgstr "" +"\n" +"Phần tri ra " + +#: readelf.c:5228 readelf.c:5272 +msgid "dynamic section" +msgstr "phần Ä‘á»™ng" + +#: readelf.c:5349 +#, c-format +msgid "" +"\n" +"There is no dynamic section in this file.\n" +msgstr "" +"\n" +"Không có phần Ä‘á»™ng nào trong tập tin này.\n" + +#: readelf.c:5387 +msgid "Unable to seek to end of file!" +msgstr "• Không thể tìm tá»›i kết thúc tập tin. •" + +#: readelf.c:5400 +msgid "Unable to determine the number of symbols to load\n" +msgstr "Không thể quyết định số ký hiệu cần tải\n" + +#: readelf.c:5435 +msgid "Unable to seek to end of file\n" +msgstr "Không thể tìm tá»›i kết thúc tập tin\n" + +#: readelf.c:5442 +msgid "Unable to determine the length of the dynamic string table\n" +msgstr "Không thể quyết định Ä‘á»™ dài của bảng chuá»—i Ä‘á»™ng\n" + +#: readelf.c:5447 +msgid "dynamic string table" +msgstr "bảng chuá»—i Ä‘á»™ng" + +#: readelf.c:5482 +msgid "symbol information" +msgstr "thông tin ký hiệu" + +#: readelf.c:5507 +#, c-format +msgid "" +"\n" +"Dynamic section at offset 0x%lx contains %u entries:\n" +msgstr "" +"\n" +"Phần Ä‘á»™ng tại hiệu số 0x%lx chứa %u mục nhập:\n" + +#: readelf.c:5510 +#, c-format +msgid " Tag Type Name/Value\n" +msgstr " Thẻ Kiểu Tên/Giá trị\n" + +#: readelf.c:5546 +#, c-format +msgid "Auxiliary library" +msgstr "ThÆ° viên phụ" + +#: readelf.c:5550 +#, c-format +msgid "Filter library" +msgstr "ThÆ° viên lá»c" + +#: readelf.c:5554 src/glade_options.c:191 +#, c-format +msgid "Configuration file" +msgstr "Tập tin cấu hình" + +#: readelf.c:5558 +#, c-format +msgid "Dependency audit library" +msgstr "ThÆ° viên kiểm tra cách phụ thuá»™c" + +#: readelf.c:5562 +#, c-format +msgid "Audit library" +msgstr "ThÆ° viên kiểm tra" + +#: readelf.c:5580 readelf.c:5608 readelf.c:5636 cardinfo.c:1401 +#: ../gmedia_slice/interface.c:416 +#, c-format +msgid "Flags:" +msgstr "Cá» :" + +#: readelf.c:5583 readelf.c:5611 readelf.c:5638 +#, c-format +msgid " None\n" +msgstr " Không có\n" + +#: readelf.c:5759 +#, c-format +msgid "Shared library: [%s]" +msgstr "ThÆ° viện dùng chung: [%s]" + +#: readelf.c:5762 +#, c-format +msgid " program interpreter" +msgstr " bá»™ giải dịch chÆ°Æ¡ng trình" + +#: readelf.c:5766 +#, c-format +msgid "Library soname: [%s]" +msgstr "soname (tên so) thÆ° viên: [%s]" + +#: readelf.c:5770 +#, c-format +msgid "Library rpath: [%s]" +msgstr "rpath (Ä‘Æ°á»ng dẫn r) thÆ° viên: [%s]" + +#: readelf.c:5774 +#, c-format +msgid "Library runpath: [%s]" +msgstr "runpath (Ä‘Æ°á»ng dẫn chạy) thÆ° viên: [%s]" + +#: readelf.c:5837 +#, c-format +msgid "Not needed object: [%s]\n" +msgstr "Äối tượng không cần thiết: [%s]\n" + +#: readelf.c:5951 +#, c-format +msgid "" +"\n" +"Version definition section '%s' contains %ld entries:\n" +msgstr "" +"\n" +"Phần định nghÄ©a phiên bản « %s » chứa %ld mục nhập:\n" + +#: readelf.c:5954 +#, c-format +msgid " Addr: 0x" +msgstr " ÄChỉ: 0x" + +#: readelf.c:5956 readelf.c:6148 +#, c-format +msgid " Offset: %#08lx Link: %lx (%s)\n" +msgstr " HIệu : %#08lx LKết: %lx (%s)\n" + +#: readelf.c:5961 +msgid "version definition section" +msgstr "phần định nghÄ©a phiên bản" + +#: readelf.c:5987 +#, c-format +msgid " %#06x: Rev: %d Flags: %s" +msgstr " %#06x: Bản: %d Cá»: %s" + +#: readelf.c:5990 +#, c-format +msgid " Index: %d Cnt: %d " +msgstr " Chỉ mục: %d Äếm: %d " + +#: readelf.c:6001 schroot/sbuild-chroot.c:392 ../mimedir/mimedir-vcard.c:3409 +#, c-format +msgid "Name: %s\n" +msgstr "Tên: %s\n" + +#: readelf.c:6003 +#, c-format +msgid "Name index: %ld\n" +msgstr "Chỉ mục tên: %ld\n" + +#: readelf.c:6018 +#, c-format +msgid " %#06x: Parent %d: %s\n" +msgstr " %#06x: Mẹ %d: %s\n" + +#: readelf.c:6021 +#, c-format +msgid " %#06x: Parent %d, name index: %ld\n" +msgstr " %#06x: Mẹ %d, chỉ mục tên: %ld\n" + +#: readelf.c:6040 +#, c-format +msgid "" +"\n" +"Version needs section '%s' contains %ld entries:\n" +msgstr "" +"\n" +"Phần cần thiết phiên bản « %s » chứa %ld mục nhập:\n" + +#: readelf.c:6043 +#, c-format +msgid " Addr: 0x" +msgstr " ÄChỉ: 0x" + +#: readelf.c:6045 +#, c-format +msgid " Offset: %#08lx Link to section: %ld (%s)\n" +msgstr " HIệu : %#08lx Liên kết đến phần: %ld (%s)\n" + +#: readelf.c:6050 +msgid "version need section" +msgstr "phần cần phiên bản" + +#: readelf.c:6072 +#, c-format +msgid " %#06x: Version: %d" +msgstr " %#06x: PhBản: %d" + +#: readelf.c:6075 +#, c-format +msgid " File: %s" +msgstr " Tập tin: %s" + +#: readelf.c:6077 +#, c-format +msgid " File: %lx" +msgstr " Tập tin: %lx" + +#: readelf.c:6079 +#, c-format +msgid " Cnt: %d\n" +msgstr " Äếm: %d\n" + +#: readelf.c:6097 +#, c-format +msgid " %#06x: Name: %s" +msgstr " %#06x: Tên: %s" + +#: readelf.c:6100 +#, c-format +msgid " %#06x: Name index: %lx" +msgstr " %#06x: Chỉ mục tên: %lx" + +#: readelf.c:6103 +#, c-format +msgid " Flags: %s Version: %d\n" +msgstr " Cá»: %s Phiên bản: %d\n" + +#: readelf.c:6139 +msgid "version string table" +msgstr "bảng chuá»—i phiên bản" + +#: readelf.c:6143 +#, c-format +msgid "" +"\n" +"Version symbols section '%s' contains %d entries:\n" +msgstr "" +"\n" +"Phần ký hiệu phiên bản « %s » chứa %d mục nhập:\n" + +#: readelf.c:6146 +#, c-format +msgid " Addr: " +msgstr " ÄChỉ: " + +#: readelf.c:6156 +msgid "version symbol data" +msgstr "dữ liệu ký hiệu phiên bản" + +#: readelf.c:6183 +msgid " 0 (*local*) " +msgstr " 0 (*local*) " + +#: readelf.c:6187 +msgid " 1 (*global*) " +msgstr " 1 (*toàn cụcglobal*) " + +#: readelf.c:6223 readelf.c:6677 +msgid "version need" +msgstr "phiên bản cần" + +#: readelf.c:6233 +msgid "version need aux (2)" +msgstr "phiên bản cần phụ (2)" + +#: readelf.c:6275 readelf.c:6740 +msgid "version def" +msgstr "phbản Ä‘.nghÄ©a" + +#: readelf.c:6294 readelf.c:6755 +msgid "version def aux" +msgstr "phbản Ä‘.nghÄ©a phụ" + +#: readelf.c:6325 +#, c-format +msgid "" +"\n" +"No version information found in this file.\n" +msgstr "" +"\n" +"Không tìm thấy thông tin phiên bản trong tập tin này.\n" + +#: readelf.c:6456 +msgid "Unable to read in dynamic data\n" +msgstr "Không thể Ä‘á»c vào dữ liệu Ä‘á»™ng\n" + +#: readelf.c:6509 +msgid "Unable to seek to start of dynamic information" +msgstr "Không thể tìm tá»›i đầu thông tin Ä‘á»™ng" + +#: readelf.c:6515 +msgid "Failed to read in number of buckets\n" +msgstr "Việc Ä‘á»c vào số xô bị lá»—i\n" + +#: readelf.c:6521 +msgid "Failed to read in number of chains\n" +msgstr "Việc Ä‘á»c vào số dây bị lá»—i\n" + +#: readelf.c:6541 +#, c-format +msgid "" +"\n" +"Symbol table for image:\n" +msgstr "" +"\n" +"Bảng ký hiệu cho ảnh:\n" + +#: readelf.c:6543 +#, c-format +msgid " Num Buc: Value Size Type Bind Vis Ndx Name\n" +msgstr " Số xô : Giá trị Cỡ Kiểu Trá»™n Hiện Ndx Tên\n" + +#: readelf.c:6545 +#, c-format +msgid " Num Buc: Value Size Type Bind Vis Ndx Name\n" +msgstr " Số xô : Giá trị Cỡ Kiểu Trá»™n Hiện Ndx Tên\n" + +#: readelf.c:6597 +#, c-format +msgid "" +"\n" +"Symbol table '%s' contains %lu entries:\n" +msgstr "" +"\n" +"Bảng ký hiệu « %s » chứa %lu mục nhập:\n" + +#: readelf.c:6601 +#, c-format +msgid " Num: Value Size Type Bind Vis Ndx Name\n" +msgstr " Số : Giá trị Cỡ Kiểu Trá»™n Hiện Ndx Tên\n" + +#: readelf.c:6603 +#, c-format +msgid " Num: Value Size Type Bind Vis Ndx Name\n" +msgstr " Số : Giá trị Cỡ Kiểu Trá»™n Hiện Ndx Tên\n" + +#: readelf.c:6649 +msgid "version data" +msgstr "dữ liệu phiên bản" + +#: readelf.c:6690 +msgid "version need aux (3)" +msgstr "phiên bản cần phụ (3)" + +#: readelf.c:6715 +msgid "bad dynamic symbol" +msgstr "ký hiệu Ä‘á»™ng sai" + +#: readelf.c:6778 +#, c-format +msgid "" +"\n" +"Dynamic symbol information is not available for displaying symbols.\n" +msgstr "" +"\n" +"Không có thông tin ký hiệu Ä‘á»™ng để hiển thị ký hiệu.\n" + +#: readelf.c:6790 +#, c-format +msgid "" +"\n" +"Histogram for bucket list length (total of %lu buckets):\n" +msgstr "" +"\n" +"Biểu đồ tần xuất cho Ä‘á»™ dài danh sách xô (tổng số %lu xô):\n" + +#: readelf.c:6792 +#, c-format +msgid " Length Number %% of total Coverage\n" +msgstr " Dài Số %% tổng phạm vị\n" + +#: readelf.c:6797 readelf.c:6813 readelf.c:10967 readelf.c:11159 +#: libgphoto2_port/gphoto2-port-result.c:76 +#: ../camel/camel-tcp-stream-openssl.c:595 ../src/yelp-toc-pager.c:1049 +#: ../src/yelp-xslt-pager.c:382 address.c:288 address.c:552 alarms.c:352 +#: dat.c:181 dat.c:655 dat.c:857 dat.c:991 dat.c:1130 datebook.c:96 +#: datebook.c:397 datebook.c:404 datebook.c:434 datebook.c:1053 jpilot.c:1534 +#: libplugin.c:467 libplugin.c:646 libplugin.c:753 libplugin.c:832 +#: libplugin.c:873 memo.c:95 memo.c:367 plugins.c:108 prefs.c:314 prefs.c:339 +#: prefs.c:854 sync.c:252 sync.c:1153 sync.c:2320 todo.c:206 todo.c:548 +#: utils.c:2307 +msgid "Out of memory" +msgstr "Hết bá»™ nhá»›" + +#: readelf.c:6862 +#, c-format +msgid "" +"\n" +"Dynamic info segment at offset 0x%lx contains %d entries:\n" +msgstr "" +"\n" +"Phân Ä‘oạn thông tin Ä‘á»™ng tại hiệu số 0x%lx chứa %d mục nhập:\n" + +#: readelf.c:6865 +#, c-format +msgid " Num: Name BoundTo Flags\n" +msgstr " Số : Tên ÄóngVá»›i Cá»\n" + +#: readelf.c:6917 +#, c-format +msgid "" +"\n" +"Assembly dump of section %s\n" +msgstr "" +"\n" +"Việc đổ thanh ghi của phần %s\n" + +#: readelf.c:6938 +#, c-format +msgid "" +"\n" +"Section '%s' has no data to dump.\n" +msgstr "" +"\n" +"Phần « %s » không có dữ liệu cần đổ.\n" + +#: readelf.c:6943 +#, c-format +msgid "" +"\n" +"Hex dump of section '%s':\n" +msgstr "" +"\n" +"Việc đổ thập lục của phần « %s »:\n" + +#: readelf.c:7090 +msgid "badly formed extended line op encountered!\n" +msgstr "gặp thao tác dòng đã mở rá»™ng dạng sai.\n" + +#: readelf.c:7097 +#, c-format +msgid " Extended opcode %d: " +msgstr " Opcode (mã thao tác) đã mở rá»™ng %d: " + +#: readelf.c:7102 +#, c-format +msgid "" +"End of Sequence\n" +"\n" +msgstr "" +"Kết thúc dãy\n" +"\n" + +#: readelf.c:7108 +#, c-format +msgid "set Address to 0x%lx\n" +msgstr "đặt Äịa chỉ là 0x%lx\n" + +#: readelf.c:7113 +#, c-format +msgid " define new File Table entry\n" +msgstr " định nghÄ©a mục nhập Bảng Tập tin má»›i\n" + +#: readelf.c:7114 readelf.c:9032 +#, c-format +msgid " Entry\tDir\tTime\tSize\tName\n" +msgstr " Mục\tTMục\tGiá»\tCỡ\tTên\n" + +# Variable: don't translate / Biến: đừng dịch +#: readelf.c:7116 +#, c-format +msgid " %d\t" +msgstr " %d\t" + +# Variable: don't translate / Biến: đừng dịch +#: readelf.c:7119 readelf.c:7121 readelf.c:7123 readelf.c:9044 readelf.c:9046 +#: readelf.c:9048 +#, c-format +msgid "%lu\t" +msgstr "%lu\t" + +# Variable: do not translate/ biến: đừng dịch +#: readelf.c:7124 +#, c-format +msgid "" +"%s\n" +"\n" +msgstr "" +"%s\n" +"\n" + +#: readelf.c:7128 +#, c-format +msgid "UNKNOWN: length %d\n" +msgstr "KHÔNG RÕ: Ä‘á»™ dài %d\n" + +#: readelf.c:7155 +msgid "debug_str section data" +msgstr "debug_str section data" + +#: readelf.c:7173 +msgid "" +msgstr "" + +#: readelf.c:7176 +msgid "" +msgstr "" + +#: readelf.c:7201 +msgid "debug_loc section data" +msgstr "dữ liệu phần « debug_loc » (định vị gỡ lá»—i)" + +#: readelf.c:7235 +msgid "debug_range section data" +msgstr "dữ liệu phần « debug_range » (phạm vị gỡ lá»—i)" + +#: readelf.c:7307 +#, c-format +msgid "" +"%s: skipping unexpected symbol type %s in relocation in section .rela%s\n" +msgstr "" +"%s: Ä‘ang nhảy qua kiểu ký hiệu bất ngá» %s trong việc định vị lại trong phần ." +"rela%s\n" + +#: readelf.c:7321 +#, c-format +msgid "skipping unexpected symbol type %s in relocation in section .rela.%s\n" +msgstr "" +"Ä‘ang nhảy qua kiểu ký hiệu bất ngá» %s trong việc định vị lại trong phần .rela" +"%s\n" + +#: readelf.c:7565 +#, c-format +msgid "Unknown TAG value: %lx" +msgstr "Giá trị TAG (thẻ) không rõ : %lx" + +#: readelf.c:7601 +#, c-format +msgid "Unknown FORM value: %lx" +msgstr "Giá trị FORM (dạng) không rõ : %lx" + +#: readelf.c:7610 +#, c-format +msgid " %lu byte block: " +msgstr " Khối %lu byte: " + +#: readelf.c:7944 +#, c-format +msgid "(User defined location op)" +msgstr "(Thao tác định vị do ngÆ°á»i dùng định nghÄ©a)" + +#: readelf.c:7946 +#, c-format +msgid "(Unknown location op)" +msgstr "(Thao tác định vị không rõ)" + +#: readelf.c:8015 +msgid "Internal error: DWARF version is not 2 or 3.\n" +msgstr "Lá»—i ná»™i bá»™: phiên bản DWARF không phải là 2 hay 3.\n" + +#: readelf.c:8113 +msgid "DW_FORM_data8 is unsupported when sizeof (unsigned long) != 8\n" +msgstr "" +"Không há»— trợ « DW_FORM_data8 » khi « sizeof (unsigned long) != 8 » [kích cỡ " +"của (dài không ký)]\n" + +#: readelf.c:8162 +#, c-format +msgid " (indirect string, offset: 0x%lx): %s" +msgstr " (chuá»—i gián tiếp, hiệu số: 0x%lx): %s" + +#: readelf.c:8171 +#, c-format +msgid "Unrecognized form: %d\n" +msgstr "Không nhận diện dạng: %d\n" + +#: readelf.c:8256 +#, c-format +msgid "(not inlined)" +msgstr "(không đặt trá»±c tiếp)" + +#: readelf.c:8259 +#, c-format +msgid "(inlined)" +msgstr "(đặt trá»±c tiếp)" + +#: readelf.c:8262 +#, c-format +msgid "(declared as inline but ignored)" +msgstr "(khai báo là trá»±c tiếp mà bị bá» qua)" + +#: readelf.c:8265 +#, c-format +msgid "(declared as inline and inlined)" +msgstr "(khai báo là trá»±c tiếp và đặt trá»±c tiếp)" + +#: readelf.c:8268 +#, c-format +msgid " (Unknown inline attribute value: %lx)" +msgstr " (Không biết giá trị thuá»™c tính trá»±c tiếp: %lx)" + +#: readelf.c:8413 readelf.c:9537 +#, c-format +msgid " [without DW_AT_frame_base]" +msgstr " [không có DW_AT_frame_base (cÆ¡ bản khung)]" + +#: readelf.c:8416 +#, c-format +msgid "(location list)" +msgstr "(danh sách địa Ä‘iểm)" + +#: readelf.c:8534 +#, c-format +msgid "Unknown AT value: %lx" +msgstr "Không biết giá trị AT: %lx" + +#: readelf.c:8602 +msgid "No comp units in .debug_info section ?" +msgstr "" +"Không có Ä‘Æ¡n vị biên dịch trong phần « .debug_info » (thông tin gỡ lá»—i) ?" + +#: readelf.c:8611 +#, c-format +msgid "Not enough memory for a debug info array of %u entries" +msgstr "Không đủ bá»™ nhá»› cho mảng thông tin gỡ lá»—i có mục nhập %u" + +#: readelf.c:8619 readelf.c:9630 +#, c-format +msgid "" +"The section %s contains:\n" +"\n" +msgstr "" +"Phần %s chứa:\n" +"\n" + +#: readelf.c:8693 +#, c-format +msgid " Compilation Unit @ %lx:\n" +msgstr " ÄÆ¡n vị biên dịch @ %lx:\n" + +#: readelf.c:8694 +#, c-format +msgid " Length: %ld\n" +msgstr " Dài: %ld\n" + +#: readelf.c:8695 +#, c-format +msgid " Version: %d\n" +msgstr " Phiên bản: %d\n" + +#: readelf.c:8696 +#, c-format +msgid " Abbrev Offset: %ld\n" +msgstr " Hiệu số tắt: %ld\n" + +#: readelf.c:8697 +#, c-format +msgid " Pointer Size: %d\n" +msgstr " Cỡ con trá» : %d\n" + +#: readelf.c:8702 +msgid "Only version 2 and 3 DWARF debug information is currently supported.\n" +msgstr "Há»— trợ chỉ thông tin gỡ lá»—i phiên bản DWARF 2 và 3 thôi.\n" + +#: readelf.c:8717 +msgid "Unable to locate .debug_abbrev section!\n" +msgstr "Không thể định vị phần « .debug_abbrev » (gỡ lá»—i viết tắt)\n" + +#: readelf.c:8722 +msgid "debug_abbrev section data" +msgstr "dữ liệu phần « .debug_abbrev » (gỡ lá»—i viết tắt)" + +#: readelf.c:8759 +#, c-format +msgid "Unable to locate entry %lu in the abbreviation table\n" +msgstr "Không thể định vị mục nhâp %lu trong bảng viết tắt\n" + +#: readelf.c:8765 +#, c-format +msgid " <%d><%lx>: Abbrev Number: %lu (%s)\n" +msgstr " <%d><%lx>: Số viết tắt: %lu (%s)\n" + +#: readelf.c:8838 +#, c-format +msgid "%s section needs a populated .debug_info section\n" +msgstr "Phần %s cần phần « .debug_info » (thông tin gỡ lá»—i) có dữ liệu\n" + +#: readelf.c:8845 +#, c-format +msgid "%s section has more comp units than .debug_info section\n" +msgstr "" +"Phần %s có nhiá»u Ä‘Æ¡n vị biên dịch hÆ¡n phần « .debug_info » (thông tin gỡ " +"lá»—i)\n" + +#: readelf.c:8847 +#, c-format +msgid "" +"assuming that the pointer size is %d, from the last comp unit in ." +"debug_info\n" +"\n" +msgstr "" +"giả sá»­ kích cỡ con trá» là %d, từ Ä‘Æ¡n vị biên dịch cuối cùng trong « ." +"debug_info » (thông tin gỡ lá»—i)\n" +"\n" + +#: readelf.c:8891 +msgid "extracting information from .debug_info section" +msgstr "Ä‘ang trích thông tin ra phần « .debug_info » (thông tin gỡ lá»—i)" + +#: readelf.c:8909 +#, c-format +msgid "" +"\n" +"Dump of debug contents of section %s:\n" +"\n" +msgstr "" +"\n" +"Việc đổ ná»™i dung gỡ lá»—i của phần %s:\n" + +#: readelf.c:8948 +msgid "The line info appears to be corrupt - the section is too small\n" +msgstr "Hình nhÆ° dòng bị há»ng — phần quá nhá»\n" + +#: readelf.c:8957 +msgid "Only DWARF version 2 and 3 line info is currently supported.\n" +msgstr "Há»— trợ hiện thá»i chỉ thông tin dòng DWARF phiên bản 2 và 3.\n" + +#: readelf.c:8984 +#, c-format +msgid " Length: %ld\n" +msgstr " Dài: %ld\n" + +#: readelf.c:8985 +#, c-format +msgid " DWARF Version: %d\n" +msgstr " Phiên bản DWARF: %d\n" + +#: readelf.c:8986 +#, c-format +msgid " Prologue Length: %d\n" +msgstr " Dài Ä‘oạn mở đầu : %d\n" + +#: readelf.c:8987 +#, c-format +msgid " Minimum Instruction Length: %d\n" +msgstr " Dài câu lệnh tối thiểu : %d\n" + +#: readelf.c:8988 +#, c-format +msgid " Initial value of 'is_stmt': %d\n" +msgstr " Giá trị đầu của « is_stmt »: %d\n" + +#: readelf.c:8989 +#, c-format +msgid " Line Base: %d\n" +msgstr " CÆ¡ bản dòng: %d\n" + +#: readelf.c:8990 +#, c-format +msgid " Line Range: %d\n" +msgstr " Phạm vị dòng: %d\n" + +#: readelf.c:8991 +#, c-format +msgid " Opcode Base: %d\n" +msgstr " CÆ¡ bản mã thao tác: %d\n" + +#: readelf.c:8992 +#, c-format +msgid " (Pointer size: %u)\n" +msgstr " (cỡ con trá» : %u)\n" + +#: readelf.c:9001 +#, c-format +msgid "" +"\n" +" Opcodes:\n" +msgstr "" +"\n" +" Mã thao tác:\n" + +#: readelf.c:9004 +#, c-format +msgid " Opcode %d has %d args\n" +msgstr " Mã thao tác %d có %d đối số\n" + +#: readelf.c:9010 +#, c-format +msgid "" +"\n" +" The Directory Table is empty.\n" +msgstr "" +"\n" +" Bảng ThÆ° mục rá»—ng\n" + +#: readelf.c:9013 +#, c-format +msgid "" +"\n" +" The Directory Table:\n" +msgstr "" +"\n" +" Bảng ThÆ° mục:\n" + +# Variable: don't translate / Biến: đừng dịch +#: readelf.c:9017 +#, c-format +msgid " %s\n" +msgstr " %s\n" + +#: readelf.c:9028 +#, c-format +msgid "" +"\n" +" The File Name Table is empty.\n" +msgstr "" +"\n" +" Bảng Tên Tập tin rá»—ng:\n" + +#: readelf.c:9031 +#, c-format +msgid "" +"\n" +" The File Name Table:\n" +msgstr "" +"\n" +" Bảng Tên Tập tin:\n" + +# Variable: don't translate / Biến: đừng dịch +#: readelf.c:9039 +#, c-format +msgid " %d\t" +msgstr " %d\t" + +# Variable: do not translate/ biến: đừng dịch +#: readelf.c:9050 src/po-charset.c:298 src/po-charset.c:323 +#: src/po-charset.c:311 src/po-charset.c:336 src/cmd/dr.c:79 +#, c-format +msgid "%s\n" +msgstr "%s\n" + +#. Now display the statements. +#: readelf.c:9058 +#, c-format +msgid "" +"\n" +" Line Number Statements:\n" +msgstr "" +"\n" +" Câu Số thứ tá»± Dòng:\n" + +#: readelf.c:9073 +#, c-format +msgid " Special opcode %d: advance Address by %d to 0x%lx" +msgstr " Mã thao tác đặc biệt %d: nâng cao Äịa chỉ bÆ°á»›c %d tá»›i 0x%lx" + +#: readelf.c:9077 +#, c-format +msgid " and Line by %d to %d\n" +msgstr " và Dòng bÆ°á»›c %d tá»›i %d\n" + +#: readelf.c:9088 +#, c-format +msgid " Copy\n" +msgstr " Chép\n" + +#: readelf.c:9095 +#, c-format +msgid " Advance PC by %d to %lx\n" +msgstr " Nâng cao PC bÆ°á»›c %d tá»›i %lx\n" + +#: readelf.c:9103 +#, c-format +msgid " Advance Line by %d to %d\n" +msgstr " Nâng cao dòng bÆ°á»›c %d tá»›i %d\n" + +#: readelf.c:9110 +#, c-format +msgid " Set File Name to entry %d in the File Name Table\n" +msgstr " Lập Tên Tập tin là mục nhập %d trong Bảng Tên Tập tin\n" + +#: readelf.c:9118 +#, c-format +msgid " Set column to %d\n" +msgstr " Lập cá»™t là %d\n" + +#: readelf.c:9125 +#, c-format +msgid " Set is_stmt to %d\n" +msgstr " Lập « is_stmt » (là câu) là %d\n" + +#: readelf.c:9130 +#, c-format +msgid " Set basic block\n" +msgstr " Lập khối cÆ¡ bản\n" + +#: readelf.c:9138 +#, c-format +msgid " Advance PC by constant %d to 0x%lx\n" +msgstr " Nâng cao PC bÆ°á»›c hằng số %d tá»›i 0x%lx\n" + +#: readelf.c:9146 +#, c-format +msgid " Advance PC by fixed size amount %d to 0x%lx\n" +msgstr " Nâng cao PC bÆ°á»›c kích cỡ cố định %d tá»›i 0x%lx\n" + +#: readelf.c:9151 +#, c-format +msgid " Set prologue_end to true\n" +msgstr " Lập « prologue_end » (kết thúc Ä‘oạn mở đầu) là true (đúng)\n" + +#: readelf.c:9155 +#, c-format +msgid " Set epilogue_begin to true\n" +msgstr " Lập « epilogue_begin » (đầu phần kết) là true (đúng)\n" + +#: readelf.c:9161 +#, c-format +msgid " Set ISA to %d\n" +msgstr " Lập ISA là %d\n" + +#: readelf.c:9165 +#, c-format +msgid " Unknown opcode %d with operands: " +msgstr " Gặp opcode (mã thao tác) không rõ %d vá»›i tác tá»­ : " + +#: readelf.c:9193 readelf.c:9279 readelf.c:9354 +#, c-format +msgid "" +"Contents of the %s section:\n" +"\n" +msgstr "" +"Ná»™i dung của phần %s:\n" +"\n" + +#: readelf.c:9233 +msgid "Only DWARF 2 and 3 pubnames are currently supported\n" +msgstr "Há»— trợ hiện thá»i chỉ pubnames (tên công) DWARF phiên bản 2 và 3 thôi\n" + +#: readelf.c:9240 +#, c-format +msgid " Length: %ld\n" +msgstr " Length: %ld\n" + +#: readelf.c:9242 +#, c-format +msgid " Version: %d\n" +msgstr " Version: %d\n" + +#: readelf.c:9244 +#, c-format +msgid " Offset into .debug_info section: %ld\n" +msgstr "" +" Hiệu số vào phầnO« ffset into .» (thông tin gỡ lá»—i)nfo section: %ld\n" + +#: readelf.c:9246 +#, c-format +msgid " Size of area in .debug_info section: %ld\n" +msgstr "" +" Kích cỡ của vùng trong phần « .debug_info » (thông tin gỡ lá»—i): %ld\n" + +#: readelf.c:9249 +#, c-format +msgid "" +"\n" +" Offset\tName\n" +msgstr "" +"\n" +" Hiệu\tTên\n" + +#: readelf.c:9300 +#, c-format +msgid " DW_MACINFO_start_file - lineno: %d filenum: %d\n" +msgstr "" +" DW_MACINFO_start_file (bắt đầu tập tin) — số_dòng: %d số_tập_tin: %d\n" + +#: readelf.c:9306 +#, c-format +msgid " DW_MACINFO_end_file\n" +msgstr " DW_MACINFO_end_file (kết thúc tập tin)\n" + +#: readelf.c:9314 +#, c-format +msgid " DW_MACINFO_define - lineno : %d macro : %s\n" +msgstr " DW_MACINFO_define (định nghÄ©a) — số_dòng : %d bá»™_lệnh : %s\n" + +#: readelf.c:9323 +#, c-format +msgid " DW_MACINFO_undef - lineno : %d macro : %s\n" +msgstr " DW_MACINFO_undef (chÆ°a định nghÄ©a) — số_dòng : %d bá»™_lệnh : %s\n" + +#: readelf.c:9335 +#, c-format +msgid " DW_MACINFO_vendor_ext - constant : %d string : %s\n" +msgstr "" +" DW_MACINFO_vendor_ext (phần mở rá»™ng nhà bán) — hằng số : %d chuối : %s\n" + +#: readelf.c:9363 +#, c-format +msgid " Number TAG\n" +msgstr " Số THẺ\n" + +# Variable: don't translate / Biến: đừng dịch +#: readelf.c:9369 +#, c-format +msgid " %ld %s [%s]\n" +msgstr " %ld %s [%s]\n" + +#: readelf.c:9372 +msgid "has children" +msgstr "có Ä‘iá»u con" + +#: readelf.c:9372 ../srcore/srctrl.c:1036 +msgid "no children" +msgstr "không có con" + +# Variable: don't translate / Biến: đừng dịch +#: readelf.c:9375 +#, c-format +msgid " %-18s %s\n" +msgstr " %-18s %s\n" + +#: readelf.c:9410 +msgid "" +"\n" +"The .debug_loc section is empty.\n" +msgstr "" +"\n" +"Phần « .debug_loc » (gỡ lá»—i định vị) rá»—ng:\n" + +#. FIXME: Should we handle this case? +#: readelf.c:9455 +msgid "Location lists in .debug_info section aren't in ascending order!\n" +msgstr "" +"• Các danh sách địa Ä‘iểm trong phần « .debug_info » (thông tin gỡ lá»—i) không " +"phải theo thứ tá»± dần. •\n" + +#: readelf.c:9458 +msgid "No location lists in .debug_info section!\n" +msgstr "" +"• Không có danh sách địa Ä‘iểm trong phần « .debug_info » (thông tin gỡ lá»—i). " +"•\n" + +#: readelf.c:9461 +#, c-format +msgid "Location lists in .debug_loc section start at 0x%lx\n" +msgstr "" +"Danh sách địa Ä‘iểm trong phần « .debug_info » (thông tin gỡ lá»—i) bắt đầu tại " +"0x%lx\n" + +#: readelf.c:9464 +#, c-format +msgid "" +"Contents of the .debug_loc section:\n" +"\n" +msgstr "" +"Ná»™i dung của phần « .debug_info » (thông tin gỡ lá»—i):\n" +"\n" + +#: readelf.c:9465 +#, c-format +msgid " Offset Begin End Expression\n" +msgstr " HIệu Äầu Cuối Biểu thức\n" + +#: readelf.c:9495 +#, c-format +msgid "There is a hole [0x%lx - 0x%lx] in .debug_loc section.\n" +msgstr "" +"Có má»™t lá»— [0x%lx - 0x%lx] trong phần « .debug_info » (thông tin gỡ lá»—i).\n" + +#: readelf.c:9498 +#, c-format +msgid "There is an overlap [0x%lx - 0x%lx] in .debug_loc section.\n" +msgstr "" +"Có má»™t nÆ¡i chồng lấp [0x%lx - 0x%lx] trong phần « .debug_info » (thông tin " +"gỡ lá»—i).\n" + +#: readelf.c:9512 readelf.c:9837 +#, c-format +msgid " %8.8lx \n" +msgstr " %8.8lx \n" + +#: readelf.c:9540 readelf.c:9854 +msgid " (start == end)" +msgstr " (start == end)" + +#: readelf.c:9542 readelf.c:9856 +msgid " (start > end)" +msgstr " (start > end)" + +#: readelf.c:9566 +#, c-format +msgid "" +"\n" +"The .debug_str section is empty.\n" +msgstr "" +"\n" +"Phần « .debug_str » (chuá»—i gỡ lá»—i) rá»—ng.\n" + +#: readelf.c:9570 +#, c-format +msgid "" +"Contents of the .debug_str section:\n" +"\n" +msgstr "" +"Ná»™i dung của phần « .debug_str » (chuá»—i gỡ lá»—i):\n" +"\n" + +#: readelf.c:9675 +msgid "Only DWARF 2 and 3 aranges are currently supported.\n" +msgstr "Há»— trợ hiện thá»i chỉ arange (phạm vị a) DWARF phiên bản 2 và 3 thôi.\n" + +#: readelf.c:9679 +#, c-format +msgid " Length: %ld\n" +msgstr " Dài: %ld\n" + +#: readelf.c:9680 +#, c-format +msgid " Version: %d\n" +msgstr " Phiên bản: %d\n" + +#: readelf.c:9681 +#, c-format +msgid " Offset into .debug_info: %lx\n" +msgstr " Hiệu số vào « .debug_info » (thông tin gỡ lá»—i): %lx\n" + +#: readelf.c:9682 +#, c-format +msgid " Pointer Size: %d\n" +msgstr " Kích cỡ con trá» : %d\n" + +#: readelf.c:9683 +#, c-format +msgid " Segment Size: %d\n" +msgstr " Kích cỡ phân Ä‘oạn: %d\n" + +#: readelf.c:9685 +#, c-format +msgid "" +"\n" +" Address Length\n" +msgstr "" +"\n" +" Äá»™ dài địa chỉ\n" + +#: readelf.c:9741 +#, c-format +msgid "" +"\n" +"The .debug_ranges section is empty.\n" +msgstr "" +"\n" +"Phần « .debug_ranges » (các phạm vị gỡ lá»—i) rá»—ng.\n" + +#. FIXME: Should we handle this case? +#: readelf.c:9786 +msgid "Range lists in .debug_info section aren't in ascending order!\n" +msgstr "" +"• Các danh sách phạm vị trong phần « .debug_info » (thông tin gỡ lá»—i) không " +"phải theo thứ tá»± dần. •\n" + +#: readelf.c:9789 +msgid "No range lists in .debug_info section!\n" +msgstr "" +"• Không có danh sách phạm vị trong phần « .debug_info » (thông tin gỡ lá»—i). " +"•\n" + +#: readelf.c:9792 +#, c-format +msgid "Range lists in .debug_ranges section start at 0x%lx\n" +msgstr "" +"Danh sách phạm vị trong phần « .debug_ranges » (các phạm vị gỡ lá»—i) bắt đầu " +"tại 0x%lx\n" + +#: readelf.c:9795 +#, c-format +msgid "" +"Contents of the .debug_ranges section:\n" +"\n" +msgstr "" +"Ná»™i dung của phần « .debug_ranges » (các phạm vị gỡ lá»—i):\n" +"\n" + +#: readelf.c:9796 +#, c-format +msgid " Offset Begin End\n" +msgstr " HIệu Äầu Cuối\n" + +#: readelf.c:9820 +#, c-format +msgid "There is a hole [0x%lx - 0x%lx] in .debug_ranges section.\n" +msgstr "" +"Có má»™t lá»— [0x%lx - 0x%lx] trong phần « .debug_ranges » (các phạm vị gỡ " +"lá»—i).\n" + +#: readelf.c:9823 +#, c-format +msgid "There is an overlap [0x%lx - 0x%lx] in .debug_ranges section.\n" +msgstr "" +"Có má»™t chồng lấp [0x%lx - 0x%lx] trong phần « .debug_ranges » (các phạm vị " +"gỡ lá»—i).\n" + +#: readelf.c:10017 +#, c-format +msgid "The section %s contains:\n" +msgstr "Phần %s chứa:\n" + +#: readelf.c:10663 +#, c-format +msgid "unsupported or unknown DW_CFA_%d\n" +msgstr "« DW_CFA_%d » không được há»— trợ, hay không rõ\n" + +#: readelf.c:10688 +#, c-format +msgid "Displaying the debug contents of section %s is not yet supported.\n" +msgstr "ChÆ°a há»— trợ khả năng hiển thị ná»™i dung phần %s.\n" + +#: readelf.c:10732 +#, c-format +msgid "" +"\n" +"Section '%s' has no debugging data.\n" +msgstr "" +"\n" +"Phần « %s » không có dữ liệu gỡ lá»—i nào.\n" + +#: readelf.c:10746 +msgid "debug section data" +msgstr "dữ liệu phần gỡ lá»—i" + +#: readelf.c:10765 +#, c-format +msgid "Unrecognized debug section: %s\n" +msgstr "Không nhận diện phần gỡ lá»—i: %s\n" + +#: readelf.c:10800 +#, c-format +msgid "Section %d was not dumped because it does not exist!\n" +msgstr "• Phần %d không được đổ vì nó không tồn tại. •\n" + +#: readelf.c:10872 readelf.c:11236 +msgid "liblist" +msgstr "danh sách thÆ° viên" + +#: readelf.c:10961 +msgid "options" +msgstr "tùy chá»n" + +#: readelf.c:10991 +#, c-format +msgid "" +"\n" +"Section '%s' contains %d entries:\n" +msgstr "" +"\n" +"Phần « %s » chứa %d mục nhập:\n" + +#: readelf.c:11152 +msgid "conflict list found without a dynamic symbol table" +msgstr "tìm danh sách xung Ä‘á»™t không có bảng ký hiệu Ä‘á»™ng" + +#: readelf.c:11168 readelf.c:11182 +msgid "conflict" +msgstr "xung Ä‘á»™t" + +#: readelf.c:11192 +#, c-format +msgid "" +"\n" +"Section '.conflict' contains %lu entries:\n" +msgstr "" +"\n" +"Phần « .conflict » (xung Ä‘á»™t) chứa %lu mục nhập:\n" + +#: readelf.c:11194 +msgid " Num: Index Value Name" +msgstr " Số : CMục Giá trị Tên" + +#: readelf.c:11243 +msgid "liblist string table" +msgstr "bảng chuá»—i danh sách thÆ° viên" + +#: readelf.c:11252 +#, c-format +msgid "" +"\n" +"Library list section '%s' contains %lu entries:\n" +msgstr "" +"\n" +"Phần danh sách thÆ° viên « %s » chứa %lu mục nhập:\n" + +#: readelf.c:11303 +msgid "NT_AUXV (auxiliary vector)" +msgstr "NT_AUXV (véc-tÆ¡ phụ)" + +#: readelf.c:11305 +msgid "NT_PRSTATUS (prstatus structure)" +msgstr "NT_PRSTATUS (cấu trúc trạng thái prstatus)" + +#: readelf.c:11307 +msgid "NT_FPREGSET (floating point registers)" +msgstr "NT_FPREGSET (thanh ghi Ä‘iểm phù Ä‘á»™ng)" + +#: readelf.c:11309 +msgid "NT_PRPSINFO (prpsinfo structure)" +msgstr "NT_PRPSINFO (cấu trúc thông tin prpsinfo)" + +#: readelf.c:11311 +msgid "NT_TASKSTRUCT (task structure)" +msgstr "NT_TASKSTRUCT (cấu trúc tác vụ)" + +#: readelf.c:11313 +msgid "NT_PRXFPREG (user_xfpregs structure)" +msgstr "NT_PRXFPREG (cấu trúc « user_xfpregs »)" + +#: readelf.c:11315 +msgid "NT_PSTATUS (pstatus structure)" +msgstr "NT_PSTATUS (cấu trúc trạng thái pstatus)" + +#: readelf.c:11317 +msgid "NT_FPREGS (floating point registers)" +msgstr "NT_FPREGS (thanh ghi Ä‘iểm phù Ä‘á»™ng)" + +#: readelf.c:11319 +msgid "NT_PSINFO (psinfo structure)" +msgstr "NT_PSINFO (cấu trúc thông tin psinfo)" + +#: readelf.c:11321 +msgid "NT_LWPSTATUS (lwpstatus_t structure)" +msgstr "NT_LWPSTATUS (cấu trúc trạng thái « lwpstatus_t »)" + +#: readelf.c:11323 +msgid "NT_LWPSINFO (lwpsinfo_t structure)" +msgstr "NT_LWPSINFO (cấu trúc thông tin « lwpsinfo_t »)" + +#: readelf.c:11325 +msgid "NT_WIN32PSTATUS (win32_pstatus structure)" +msgstr "NT_WIN32PSTATUS (cấu trúc trạng thái « win32_pstatus »)" + +#: readelf.c:11333 +msgid "NT_VERSION (version)" +msgstr "NT_VERSION (phiên bản)" + +#: readelf.c:11335 +msgid "NT_ARCH (architecture)" +msgstr "NT_ARCH (architecture)" + +#: readelf.c:11340 readelf.c:11362 +#, c-format +msgid "Unknown note type: (0x%08x)" +msgstr "Không biết kiểu ghi chú : (0x%08x)" + +#. NetBSD core "procinfo" structure. +#: readelf.c:11352 +msgid "NetBSD procinfo structure" +msgstr "Cấu trúc thông tin tiến trình procinfo NetBSD" + +#: readelf.c:11379 readelf.c:11393 +msgid "PT_GETREGS (reg structure)" +msgstr "PT_GETREGS (cấu trúc thanh ghi)" + +#: readelf.c:11381 readelf.c:11395 +msgid "PT_GETFPREGS (fpreg structure)" +msgstr "PT_GETFPREGS (cấu trúc thanh ghi « fpreg »)" + +# Name: don't translate / Tên: đừng dịch +#: readelf.c:11401 +#, c-format +msgid "PT_FIRSTMACH+%d" +msgstr "PT_FIRSTMACH+%d" + +#: readelf.c:11447 ui/bookmarks.glade.h:51 +msgid "notes" +msgstr "ghi chú" + +#: readelf.c:11453 +#, c-format +msgid "" +"\n" +"Notes at offset 0x%08lx with length 0x%08lx:\n" +msgstr "" +"\n" +"Gặp ghi chú tại hiệu số 0x%08lx có Ä‘á»™ dài 0x%08lx:\n" + +#: readelf.c:11455 +#, c-format +msgid " Owner\t\tData size\tDescription\n" +msgstr " Chủ\t\tCỡ dữ liệu\tMô tả\n" + +#: readelf.c:11474 +#, c-format +msgid "corrupt note found at offset %x into core notes\n" +msgstr "tìm ghi chú bị há»ng tại hiệu số %x vào ghi chú lõi\n" + +#: readelf.c:11476 +#, c-format +msgid " type: %x, namesize: %08lx, descsize: %08lx\n" +msgstr " kiểu: %x, cỡ_tên: %08lx, cỡ_mô_tả: %08lx\n" + +#: readelf.c:11574 +#, c-format +msgid "No note segments present in the core file.\n" +msgstr "Không có phân Ä‘oạn ghi chú trong tập tin lõi.\n" + +#: readelf.c:11653 +msgid "" +"This instance of readelf has been built without support for a\n" +"64 bit data type and so it cannot read 64 bit ELF files.\n" +msgstr "" +"Tức thá»i readelf này đã được xây dụng\n" +"không có há»— trợ kiểu dữ liệu 64-bit\n" +"nên không thể Ä‘á»c tập tin ELF kiểu 64-bit.\n" + +#: readelf.c:11700 readelf.c:12059 +#, c-format +msgid "%s: Failed to read file header\n" +msgstr "%s: việc Ä‘á»c dòng đầu tập tin bị lá»—i\n" + +#: readelf.c:11713 +#, c-format +msgid "" +"\n" +"File: %s\n" +msgstr "" +"\n" +"Tập tin: %s\n" + +#: readelf.c:11876 readelf.c:11897 readelf.c:11934 readelf.c:12014 +#, c-format +msgid "%s: failed to read archive header\n" +msgstr "%s: việc Ä‘á»c dòng đầu kho bị lá»—i\n" + +#: readelf.c:11887 +#, c-format +msgid "%s: failed to skip archive symbol table\n" +msgstr "%s: việc nhảy qua bảng ký hiệu kho bị lá»—i\n" + +#: readelf.c:11919 +#, c-format +msgid "%s: failed to read string table\n" +msgstr "%s: việc Ä‘á»c bảng chuá»—i bị lá»—i\n" + +#: readelf.c:11955 +#, c-format +msgid "%s: invalid archive string table offset %lu\n" +msgstr "%s: hiệu số bảng chuá»—i kho không hợp lệ %lu\n" + +#: readelf.c:11971 +#, c-format +msgid "%s: bad archive file name\n" +msgstr "%s: tên tập tin kho sai\n" + +#: readelf.c:12003 +#, c-format +msgid "%s: failed to seek to next archive header\n" +msgstr "%s: việc tìm tá»›i dòng đầu kho kế tiếp bị lá»—i\n" + +#: readelf.c:12037 +#, c-format +msgid "'%s': No such file\n" +msgstr "« %s »: không có tập tin nhÆ° vậy\n" + +#: readelf.c:12039 +#, c-format +msgid "Could not locate '%s'. System error message: %s\n" +msgstr "Không thể định vị « %s ». Thông Ä‘iệp lá»—i hệ thống: %s\n" + +#: readelf.c:12046 +#, c-format +msgid "'%s' is not an ordinary file\n" +msgstr "« %s » không phải là tập tin chuẩn\n" + +#: readelf.c:12053 +#, c-format +msgid "Input file '%s' is not readable.\n" +msgstr "Tập tin nhập « %s » không có khả năng Ä‘á»c.\n" + +#: rename.c:127 +#, c-format +msgid "%s: cannot set time: %s" +msgstr "%s: không thể lập thá»i gian: %s" + +#. We have to clean up here. +#: rename.c:162 rename.c:200 +#, c-format +msgid "unable to rename '%s' reason: %s" +msgstr "không thể đổi tên %s vì lý do : %s" + +#: rename.c:208 +#, c-format +msgid "unable to copy file '%s' reason: %s" +msgstr "không thể sao chép tập tin « %s » vì lý do : %s" + +#: resbin.c:132 +#, c-format +msgid "%s: not enough binary data" +msgstr "%s: không đủ dữ liệu nhị phân" + +#: resbin.c:148 +msgid "null terminated unicode string" +msgstr "chuá»—i Unicode không được chấm dứt rá»—ng" + +#: resbin.c:175 resbin.c:181 +msgid "resource ID" +msgstr "ID tài nguyên" + +#: resbin.c:221 +msgid "cursor" +msgstr "con chạy" + +#: resbin.c:253 resbin.c:260 +msgid "menu header" +msgstr "dòng đầu trình Ä‘Æ¡n" + +#: resbin.c:270 +msgid "menuex header" +msgstr "dòng đầu trình Ä‘Æ¡n menuex" + +#: resbin.c:274 +msgid "menuex offset" +msgstr "hiệu số trình Ä‘Æ¡n menuex" + +#: resbin.c:281 +#, c-format +msgid "unsupported menu version %d" +msgstr "phiên bản trình Ä‘Æ¡n không được há»— trợ %d" + +#: resbin.c:306 resbin.c:321 resbin.c:384 +msgid "menuitem header" +msgstr "dòng đầu mục trình Ä‘Æ¡n" + +#: resbin.c:414 +msgid "menuitem" +msgstr "mục trình Ä‘Æ¡n" + +#: resbin.c:453 resbin.c:481 +msgid "dialog header" +msgstr "dòng đầu đối thoại" + +#: resbin.c:471 +#, c-format +msgid "unexpected DIALOGEX version %d" +msgstr "ngỠđối thoại DIALOGEX phiên bản %d" + +#: resbin.c:516 +msgid "dialog font point size" +msgstr "kích cỡ Ä‘iểm phông chữ đối thoại" + +#: resbin.c:524 +msgid "dialogex font information" +msgstr "thông tin phông chữ đối thoại dialogex" + +#: resbin.c:550 resbin.c:568 +msgid "dialog control" +msgstr "Ä‘iá»u kiện đối thoại" + +#: resbin.c:560 +msgid "dialogex control" +msgstr "Ä‘iá»u kiện đối thoại dialogex" + +#: resbin.c:589 +msgid "dialog control end" +msgstr "kết thúc Ä‘iá»u khiển đối thoại" + +#: resbin.c:601 +msgid "dialog control data" +msgstr "dữ liệu Ä‘iá»u khiển đối thoại" + +#: resbin.c:642 +msgid "stringtable string length" +msgstr "Ä‘á»™ dài bảng chuá»—i" + +#: resbin.c:652 +msgid "stringtable string" +msgstr "chuá»—i bảng chuá»—i" + +#: resbin.c:683 +msgid "fontdir header" +msgstr "dòng đầu thÆ° mục phông chữ" + +#: resbin.c:696 +msgid "fontdir" +msgstr "thÆ° mục phông chữ" + +#: resbin.c:712 +msgid "fontdir device name" +msgstr "tên thiết bị thÆ° mục phông chữ" + +#: resbin.c:718 +msgid "fontdir face name" +msgstr "tên mặt thÆ° mục phông chữ" + +#: resbin.c:759 ../srcore/default.xml.in.h:21 ../srcore/verbose.xml.in.h:21 +#: ../src/orca/rolenames.py:149 +msgid "accelerator" +msgstr "phím tắt" + +#: resbin.c:819 +msgid "group cursor header" +msgstr "dòng đầu con chạy nhóm" + +#: resbin.c:823 +#, c-format +msgid "unexpected group cursor type %d" +msgstr "kiểu con chạy nhóm bất ngá» %d" + +#: resbin.c:838 +msgid "group cursor" +msgstr "con chạy nhóm" + +#: resbin.c:875 +msgid "group icon header" +msgstr "dòng đầu biểu tượng nhóm" + +#: resbin.c:879 +#, c-format +msgid "unexpected group icon type %d" +msgstr "kiểu biểu tượng nhóm bất ngá» %d" + +#: resbin.c:894 +msgid "group icon" +msgstr "biểu tượng nhóm" + +#: resbin.c:957 resbin.c:1174 +msgid "unexpected version string" +msgstr "chuá»—i phiên bản bất ngá»" + +#: resbin.c:989 +#, c-format +msgid "version length %d does not match resource length %lu" +msgstr "Ä‘á»™ dài phiên bản %d không khá»›p Ä‘á»™ dài tài nguyên %lu." + +#: resbin.c:993 +#, c-format +msgid "unexpected version type %d" +msgstr "kiểu phiên bản bất ngá» %d" + +#: resbin.c:1005 +#, c-format +msgid "unexpected fixed version information length %d" +msgstr "Ä‘á»™ dài thông tin phiên bản cố định bất ngá» %d" + +#: resbin.c:1008 +msgid "fixed version info" +msgstr "thông tin phiên bản cố định" + +#: resbin.c:1012 +#, c-format +msgid "unexpected fixed version signature %lu" +msgstr "chữ ký phiên bản cố định bất ngá» %lu" + +#: resbin.c:1016 +#, c-format +msgid "unexpected fixed version info version %lu" +msgstr "phiên bản thông tin phiên bản cố định %lu" + +#: resbin.c:1045 +msgid "version var info" +msgstr "hông tin tạm phiên bản" + +#: resbin.c:1062 +#, c-format +msgid "unexpected stringfileinfo value length %d" +msgstr "Ä‘á»™ dài giá trị thông tin tập tin chuá»—i bất ngá» %d" + +#: resbin.c:1072 +#, c-format +msgid "unexpected version stringtable value length %d" +msgstr "Ä‘á»™ dài giá trị bảng chuá»—i phiên bản bất ngá» %d" + +#: resbin.c:1106 +#, c-format +msgid "unexpected version string length %d != %d + %d" +msgstr "Ä‘á»™ dài chuá»—i phiên bản bất ngá» %d != %d + %d" + +#: resbin.c:1117 +#, c-format +msgid "unexpected version string length %d < %d" +msgstr "Ä‘á»™ dài chuá»—i phiên bản bất ngá» %d < %d" + +#: resbin.c:1134 +#, c-format +msgid "unexpected varfileinfo value length %d" +msgstr "Ä‘á»™ dài giá trị thông tin tập tin tạm bất ngá» %d" + +#: resbin.c:1153 +msgid "version varfileinfo" +msgstr "thông tin tập tin tạm phiên bản" + +#: resbin.c:1168 +#, c-format +msgid "unexpected version value length %d" +msgstr "nÄ‘á»™ dài giá trị phiên bản bất ngá» %d" + +#: rescoff.c:126 +msgid "filename required for COFF input" +msgstr "tên tập tin cần thiết cho dữ liệu nhập COFF" + +#: rescoff.c:143 +#, c-format +msgid "%s: no resource section" +msgstr "%s: không có phần tài nguyên" + +#: rescoff.c:150 +msgid "can't read resource section" +msgstr "không thể Ä‘á»c phần tài nguyên" + +#: rescoff.c:174 +#, c-format +msgid "%s: %s: address out of bounds" +msgstr "%s: %s: địa chỉ ở ngoại phạm vị" + +#: rescoff.c:190 lib/file-type.c:46 +msgid "directory" +msgstr "thÆ° mục" + +#: rescoff.c:218 +msgid "named directory entry" +msgstr "mục nhập thÆ° mục có tên" + +#: rescoff.c:227 +msgid "directory entry name" +msgstr "tên mục nhập thÆ° mục " + +#: rescoff.c:247 +msgid "named subdirectory" +msgstr "thÆ° mục con có tên" + +#: rescoff.c:255 +msgid "named resource" +msgstr "tài nguyên có tên" + +#: rescoff.c:270 +msgid "ID directory entry" +msgstr "mục nhập thÆ° mục ID" + +#: rescoff.c:287 +msgid "ID subdirectory" +msgstr "thÆ° mục con ID" + +#: rescoff.c:295 +msgid "ID resource" +msgstr "tài nguyên ID" + +#: rescoff.c:318 +msgid "resource type unknown" +msgstr "không biết kiểu tài nguyên" + +#: rescoff.c:321 +msgid "data entry" +msgstr "mục nhập dữ liệu" + +#: rescoff.c:329 +msgid "resource data" +msgstr "dữ liệu tài nguyên" + +#: rescoff.c:334 +msgid "resource data size" +msgstr "kích cỡ dữ liệu tài nguyên" + +#: rescoff.c:427 +msgid "filename required for COFF output" +msgstr "tên tập tin cần thiết cho kết xuất COFF" + +#: rescoff.c:719 +msgid "can't get BFD_RELOC_RVA relocation type" +msgstr "không thể lấy kiểu việc định vị lại « BFD_RELOC_RVA »" + +#: resrc.c:238 resrc.c:309 +#, c-format +msgid "can't open temporary file `%s': %s" +msgstr "không thể mở tập tin tạm thá»i « %s »: %s" + +#: resrc.c:244 +#, c-format +msgid "can't redirect stdout: `%s': %s" +msgstr "không thể chuyển hÆ°á»›ng thiết bị xuất chuẩn « %s »: %s" + +# Variable: don't translate / Biến: đừng dịch +#: resrc.c:260 +#, c-format +msgid "%s %s: %s" +msgstr "%s %s: %s" + +#: resrc.c:305 +#, c-format +msgid "can't execute `%s': %s" +msgstr "không thể thá»±c hiện « %s »: %s" + +#: resrc.c:314 +#, c-format +msgid "Using temporary file `%s' to read preprocessor output\n" +msgstr "Äang dùng tập tin tạm thá»i « %s » để Ä‘á»c dữ liệu xuất bá»™ tiá»n xá»­ lý\n" + +#: resrc.c:321 +#, c-format +msgid "can't popen `%s': %s" +msgstr "Không thể popen (mở p) « %s »: %s" + +#: resrc.c:323 +#, c-format +msgid "Using popen to read preprocessor output\n" +msgstr "Äang dùng popen để Ä‘á»c dữ liệu xuất bá»™ tiá»n xá»­ lý\n" + +#: resrc.c:362 +#, c-format +msgid "Tried `%s'\n" +msgstr "Äã thá»­ « %s »\n" + +#: resrc.c:373 +#, c-format +msgid "Using `%s'\n" +msgstr "Äang dùng « %s »\n" + +# Variable: don't translate / Biến: đừng dịch +#: resrc.c:529 +#, c-format +msgid "%s:%d: %s\n" +msgstr "%s:%d: %s\n" + +#: resrc.c:537 +#, c-format +msgid "%s: unexpected EOF" +msgstr "%s: gặp kết thúc tập tin bất ngá»" + +#: resrc.c:586 +#, c-format +msgid "%s: read of %lu returned %lu" +msgstr "%s: việc Ä‘á»c %lu đã trả gởi %lu" + +#: resrc.c:624 resrc.c:1134 +#, c-format +msgid "stat failed on bitmap file `%s': %s" +msgstr "việc lấy các thông tin bị lá»—i trên tập tin bitmap « %s »: %s" + +#: resrc.c:675 +#, c-format +msgid "cursor file `%s' does not contain cursor data" +msgstr "tập tin con chạy « %s » không chứa dữ liệu con chạy" + +#: resrc.c:707 resrc.c:1003 +#, c-format +msgid "%s: fseek to %lu failed: %s" +msgstr "%s: việc fseek (tìm f) tá»›i %lu bị lá»—i: %s" + +#: resrc.c:831 +msgid "help ID requires DIALOGEX" +msgstr "ID trợ giúp cần thiết DIALOGEX (đối thoại)" + +#: resrc.c:833 +msgid "control data requires DIALOGEX" +msgstr "dữ liệu Ä‘iá»u khiển cần thiết DIALOGEX (đối thoại)" + +#: resrc.c:861 +#, c-format +msgid "stat failed on font file `%s': %s" +msgstr "việc lấy các thông tin bị lá»—i trên tập tin phông chữ « %s »: %s" + +#: resrc.c:972 +#, c-format +msgid "icon file `%s' does not contain icon data" +msgstr "tập tin biểu tượng « %s » không chứa dữ liệu biểu tượng" + +#: resrc.c:1273 resrc.c:1308 +#, c-format +msgid "stat failed on file `%s': %s" +msgstr "việc lấy các thông tin bị lá»—i trên tập tin « %s »: %s" + +#: resrc.c:1494 +#, c-format +msgid "can't open `%s' for output: %s" +msgstr "không thể mở « %s » để xuất: %s" + +#: size.c:81 +#, c-format +msgid " Displays the sizes of sections inside binary files\n" +msgstr " Hiển thị kích cỡ của các phần ở trong tập tin nhị phân\n" + +#: size.c:82 +#, c-format +msgid " If no input file(s) are specified, a.out is assumed\n" +msgstr "Nếu chÆ°a ghi rõ tập tin nhập, giả sá»­ \n" + +#: size.c:83 +#, c-format +msgid "" +" The options are:\n" +" -A|-B --format={sysv|berkeley} Select output style (default is %s)\n" +" -o|-d|-x --radix={8|10|16} Display numbers in octal, decimal or " +"hex\n" +" -t --totals Display the total sizes (Berkeley " +"only)\n" +" --target= Set the binary file format\n" +" -h --help Display this information\n" +" -v --version Display the program's version\n" +"\n" +msgstr "" +" Tùy chá»n:\n" +" -A|-B --format={sysv|berkeley}\n" +"\t\t\tChá»n kiểu dáng xuất (mặc định là %s)\n" +"\t\t\t(dạng thức)\n" +" -o|-d|-x --radix={8|10|16}\n" +"\t\t\tHiển thị số dạng bát phân, thập phân hay thập lục\n" +"\t\t\t(cÆ¡ sở)\n" +" -t --totals Hiển thị các kích cỡ _tổng cổng_ (chỉ " +"Berkeley)\n" +" --target= \tLập dạng thức tập tin nhị phân\n" +"\t\t\t(đích)\n" +" -h --help Hiển thị _trợ giúp_ này\n" +" -v --version Hiển thị _phiên bản_ của chÆ°Æ¡ng trình này\n" +"\n" + +#: size.c:153 +#, c-format +msgid "invalid argument to --format: %s" +msgstr "đối sô không hợp lệ tá»›i « --format » (dạng thức): %s" + +#: size.c:180 +#, c-format +msgid "Invalid radix: %s\n" +msgstr "CÆ¡ sở không hợp lệ: %s\n" + +#: srconv.c:1722 +#, c-format +msgid "Convert a COFF object file into a SYSROFF object file\n" +msgstr "" +"Chuyển đổi má»™t tập tin đối tượng COFF thành má»™t tập tin đối tượng SYSROFF\n" + +#: srconv.c:1723 +#, c-format +msgid "" +" The options are:\n" +" -q --quick (Obsolete - ignored)\n" +" -n --noprescan Do not perform a scan to convert commons into defs\n" +" -d --debug Display information about what is being done\n" +" -h --help Display this information\n" +" -v --version Print the program's version number\n" +msgstr "" +" Tùy chá»n:\n" +" -q --quick \t(CÅ© nên bị bá» qua)\n" +" -n --noprescan\n" +"\t\tÄừng quét để chuyển đổi các Ä‘iá»u dùng chung (common)\n" +"\t\tthành lá»i định nghÄ©a (def)\n" +"\t\t(không quét trÆ°á»›c)\n" +" -d --debug \t\t\tHiển thị thông tin vá» hành Ä‘á»™ng hiện thá»i\n" +"\t\t(gỡ lá»—i)\n" +" -h --help \t\t\tHiển thị _trợ giúp_ này\n" +" -v --version \t\tIn ra số thứ tá»± _phiên bản_ của chÆ°Æ¡ng trình\n" + +#: srconv.c:1866 +#, c-format +msgid "unable to open output file %s" +msgstr "không thể mở tập tin kết xuất %s" + +#: stabs.c:330 stabs.c:1708 +msgid "numeric overflow" +msgstr "tràn thuá»™c số" + +#: stabs.c:340 +#, c-format +msgid "Bad stab: %s\n" +msgstr "stab sai: %s\n" + +#: stabs.c:348 +#, c-format +msgid "Warning: %s: %s\n" +msgstr "Cảnh báo : %s: %s\n" + +#: stabs.c:458 +#, c-format +msgid "N_LBRAC not within function\n" +msgstr "« N_LBRAC » không phải ở trong hàm\n" + +#: stabs.c:497 +#, c-format +msgid "Too many N_RBRACs\n" +msgstr "Quá nhiá»u « N_RBRAC »\n" + +#: stabs.c:738 +msgid "unknown C++ encoded name" +msgstr "không biết tên mã C++" + +#. Complain and keep going, so compilers can invent new +#. cross-reference types. +#: stabs.c:1253 +msgid "unrecognized cross reference type" +msgstr "không nhận diện kiểu tham chiếu chéo" + +#. Does this actually ever happen? Is that why we are worrying +#. about dealing with it rather than just calling error_type? +#: stabs.c:1800 +msgid "missing index type" +msgstr "thiếu kiểu chỉ mục" + +#: stabs.c:2114 +msgid "unknown virtual character for baseclass" +msgstr "không biết ký tá»± ảo cho hạng cÆ¡ bản" + +#: stabs.c:2132 +msgid "unknown visibility character for baseclass" +msgstr "không biết ký tá»± tính trạng hiển thị cho hạng cÆ¡ bản" + +#: stabs.c:2318 +msgid "unnamed $vb type" +msgstr "kiểu $vb chÆ°a có tên" + +#: stabs.c:2324 +msgid "unrecognized C++ abbreviation" +msgstr "không nhận biết viết tắt C++" + +#: stabs.c:2400 +msgid "unknown visibility character for field" +msgstr "không biết ký tá»± tính trạng hiển thị cho trÆ°á»ng" + +#: stabs.c:2652 +msgid "const/volatile indicator missing" +msgstr "thiếu chỉ thị bất biến/hay thay đổi" + +#: stabs.c:2888 +#, c-format +msgid "No mangling for \"%s\"\n" +msgstr "Không có việc tháo gỡ cho « %s »\n" + +#: stabs.c:3188 +msgid "Undefined N_EXCL" +msgstr "ChÆ°a định nghÄ©a « N_EXCL »" + +#: stabs.c:3268 +#, c-format +msgid "Type file number %d out of range\n" +msgstr "Số kiểu tập tin %d ở ngoài phạm vi\n" + +#: stabs.c:3273 +#, c-format +msgid "Type index number %d out of range\n" +msgstr "Số kiểu chỉ mục %d ở ngoài phạm vi\n" + +#: stabs.c:3352 +#, c-format +msgid "Unrecognized XCOFF type %d\n" +msgstr "Không nhận diện kiểu XCOFF %d\n" + +#: stabs.c:3644 +#, c-format +msgid "bad mangled name `%s'\n" +msgstr "tên đã rối sai « %s »\n" + +#: stabs.c:3739 +#, c-format +msgid "no argument types in mangled string\n" +msgstr "không có kiểu đối số nào trong chuá»—i đã rối\n" + +#: stabs.c:5093 +#, c-format +msgid "Demangled name is not a function\n" +msgstr "Tên đã tháo gỡ không phải là hàm\n" + +#: stabs.c:5135 +#, c-format +msgid "Unexpected type in v3 arglist demangling\n" +msgstr "Gặp kiểu bất ngá» trong việc tháo gỡ danh sách đối số v3\n" + +#: stabs.c:5202 +#, c-format +msgid "Unrecognized demangle component %d\n" +msgstr "Không nhận diện thành phần tháo gỡ %d\n" + +#: stabs.c:5254 +#, c-format +msgid "Failed to print demangled template\n" +msgstr "Việc in ra biểu mẫu đã tháo gỡ bị lá»—i\n" + +#: stabs.c:5334 +#, c-format +msgid "Couldn't get demangled builtin type\n" +msgstr "Không thể lấy kiểu builtin (Ä‘iá»u có sẵn) đã tháo gỡ\n" + +#: stabs.c:5383 +#, c-format +msgid "Unexpected demangled varargs\n" +msgstr "Gặp má»™t số varargs (đối số biến) đã tháo gỡ bất ngá»\n" + +#: stabs.c:5390 +#, c-format +msgid "Unrecognized demangled builtin type\n" +msgstr "Không nhận diện kiểu builtin (Ä‘iá»u có sẵn) đã tháo gỡ\n" + +#: strings.c:206 +#, c-format +msgid "invalid number %s" +msgstr "số không hợp lệ %s" + +#: strings.c:643 +#, c-format +msgid "invalid integer argument %s" +msgstr "đối số số nguyên không hợp lệ %s" + +#: strings.c:652 +#, c-format +msgid " Display printable strings in [file(s)] (stdin by default)\n" +msgstr "" +" Hiển thị các chuá»—i có khả năng in trong [tập tin...] (mặc định là thiết bị " +"nhập chuẩn)\n" + +#: strings.c:653 +#, c-format +msgid "" +" The options are:\n" +" -a - --all Scan the entire file, not just the data section\n" +" -f --print-file-name Print the name of the file before each string\n" +" -n --bytes=[number] Locate & print any NUL-terminated sequence of " +"at\n" +" - least [number] characters (default 4).\n" +" -t --radix={o,d,x} Print the location of the string in base 8, 10 " +"or 16\n" +" -o An alias for --radix=o\n" +" -T --target= Specify the binary file format\n" +" -e --encoding={s,S,b,l,B,L} Select character size and endianness:\n" +" s = 7-bit, S = 8-bit, {b,l} = 16-bit, {B,L} = 32-" +"bit\n" +" -h --help Display this information\n" +" -v --version Print the program's version number\n" +msgstr "" +" Tùy chá»n:\n" +" -a - --all \t\tQuét toàn bá»™ tập tin, không chỉ phần dữ " +"liệu\n" +"\t\t(hết)\n" +" -f --print-file-name \t\t\t _In ra tên tập tin_ trÆ°á»›c má»—i chuá»—i\n" +" -n --bytes=[số]\n" +"\t\tÄịnh vị và in ra dãy đã chấm dứt Rá»–NG nào tại số byte này\n" +" - số ký tá»± tối thiểu (mặc định là 4).\n" +" -t --radix={o,d,x}\n" +"\t\tIn ra địa Ä‘iểm của chuá»—i dạng bát phân, thập phân hay thập lục\n" +"\t\t(cÆ¡ sở)\n" +" -o \t\tBiệt hiệu cho « --radix=o » \n" +" -T --target= \t\tGhi rõ dạng thức tập tin nhị phân\n" +"\t\t(đích)\n" +" -e --encoding={s,S,b,l,B,L}\n" +"\t\tChá»n kích cỡ ký tá»± và tính trạng cuối (endian):\n" +" \ts = 7-bit, S = 8-bit, {b,l} = 16-bit, {B,L} = 32-bit\n" +" -h --help \t\tHiển thị _trợ giúp_ này\n" +" -v --version \t\tIn ra số thứ tá»± _phiên bản_ của chÆ°Æ¡ng " +"trình\n" + +#: sysdump.c:649 +#, c-format +msgid "Print a human readable interpretation of a SYSROFF object file\n" +msgstr "In ra lá»i giải dịch tập tin đối tượng SYSROFF cho ngÆ°á»i Ä‘á»c\n" + +#: sysdump.c:650 +#, c-format +msgid "" +" The options are:\n" +" -h --help Display this information\n" +" -v --version Print the program's version number\n" +msgstr "" +" Tùy chá»n:\n" +" -h --help \t\tHiển thị _trợ giúp_ này\n" +" -v --version \t\tIn ra số thứ tá»± _phiên bản_ của chÆ°Æ¡ng " +"trình\n" + +#: sysdump.c:715 +#, c-format +msgid "cannot open input file %s" +msgstr "không thể mở tập tin nhập %s" + +#: version.c:35 ldver.c:42 +#, c-format +msgid "Copyright 2005 Free Software Foundation, Inc.\n" +msgstr "Bản quyá»n © năm 2005 Tổ chức Phần má»m Tá»± do.\n" + +#: version.c:36 ldver.c:43 +#, c-format +msgid "" +"This program is free software; you may redistribute it under the terms of\n" +"the GNU General Public License. This program has absolutely no warranty.\n" +msgstr "" +"ChÆ°Æ¡ng trình này là phần má»m tá»± do; bạn có thể phát hành lại\n" +"nó vá»›i Ä‘iá»u kiện của Quyá»n Công Chung GNU (GPL).\n" +"ChÆ°Æ¡ng trình này không bảo đảm gì cả.\n" + +#: windres.c:204 +#, c-format +msgid "can't open %s `%s': %s" +msgstr "Không thể mở %s « %s »: %s" + +#: windres.c:370 +#, c-format +msgid ": expected to be a directory\n" +msgstr ": ngá» là thÆ° mục\n" + +#: windres.c:382 +#, c-format +msgid ": expected to be a leaf\n" +msgstr ": ngá» là lá\n" + +#: windres.c:391 src/po-charset.c:324 src/po-charset.c:351 +#: util/install-info.c:154 +#, c-format +msgid "%s: warning: " +msgstr "%s: cảnh báo :" + +#: windres.c:393 +#, c-format +msgid ": duplicate value\n" +msgstr ": giá trị trùng\n" + +#: windres.c:543 +#, c-format +msgid "unknown format type `%s'" +msgstr "không biết kiểu dạng thức « %s »" + +#: windres.c:544 +#, c-format +msgid "%s: supported formats:" +msgstr "%s: dạng thức há»— trợ :" + +#. Otherwise, we give up. +#: windres.c:627 +#, c-format +msgid "can not determine type of file `%s'; use the -J option" +msgstr "không thể quyết định kiểu tập tin « %s »: hãy sá»­ dụng tùy chá»n « -J »" + +#: windres.c:639 +#, c-format +msgid "Usage: %s [option(s)] [input-file] [output-file]\n" +msgstr "Usage: %s [tùy_chá»n...] [tập_tin_nhập] [tập_tin_xuất]\n" + +#: windres.c:641 +#, c-format +msgid "" +" The options are:\n" +" -i --input= Name input file\n" +" -o --output= Name output file\n" +" -J --input-format= Specify input format\n" +" -O --output-format= Specify output format\n" +" -F --target= Specify COFF target\n" +" --preprocessor= Program to use to preprocess rc file\n" +" -I --include-dir= Include directory when preprocessing rc file\n" +" -D --define [=] Define SYM when preprocessing rc file\n" +" -U --undefine Undefine SYM when preprocessing rc file\n" +" -v --verbose Verbose - tells you what it's doing\n" +" -l --language= Set language when reading rc file\n" +" --use-temp-file Use a temporary file instead of popen to " +"read\n" +" the preprocessor output\n" +" --no-use-temp-file Use popen (default)\n" +msgstr "" +" Tùy chá»n:\n" +" -i --input= \t\t Lập tập tin _nhập_\n" +" -o --output= \t\t Lập tập tin _xuất_\n" +" -J --input-format= \t Ghi rõ _dạng thức " +"nhập_\n" +" -O --output-format= \t Ghi rõ _dạng thức " +"xuất_\n" +" -F --target=<đích> \t\t\t Ghi rõ _đích_ COFF\n" +" --preprocessor=\n" +"\t\tChÆ°Æ¡ng trình cần dùng để tiá»n xá»­ lý tập tin rc (tài nguyên)\n" +"\t\t(bá»™ tiá»n xá»­ lý)\n" +" -I --include-dir=\n" +"\t\t_Gồm thÆ° mục_ khi tiá»n xá»­ lý tập tin rc (tài nguyên)\n" +" -D --define [=]\n" +"\t\t_Äịnh nghÄ©a_ ký hiệu khi tiá»n xá»­ lý tập tin rc (tài nguyên)\n" +" -U --undefine \n" +"\t\t_Hủy định nghÄ©a_ ký hiệu khi tiá»n xá»­ lý tập tin rc (tài nguyên)\n" +" -v --verbose _Chi tiết_: xuất thông tin vá» hành Ä‘á»™ng hiện " +"thá»i\n" +" -l --language= Lập _ngôn ngữ_ để Ä‘á»c tập tin rc (tài nguyên)\n" +" --use-temp-file\n" +"\t\t_Dùng tập tin tạm thá»i_ thay vào popen để Ä‘á»c kết xuất tiá»n xá»­ lý\n" +" --no-use-temp-file \t\t\t Dùng popen (mặc định)\n" +"\t\t(không dùng tập tin tạm thá»i)\n" + +#: windres.c:657 +#, c-format +msgid " --yydebug Turn on parser debugging\n" +msgstr " --yydebug Bật khả năng gỡ lá»—i kiểu bá»™ phân tách\n" + +#: windres.c:660 +#, c-format +msgid "" +" -r Ignored for compatibility with rc\n" +" -h --help Print this help message\n" +" -V --version Print version information\n" +msgstr "" +" -r\t\t\t\t\t \t \t\t Bị bá» qua để tÆ°Æ¡ng thích vá»›i rc (tài nguyên)\n" +" -h, --help \t\t\t\t rctrợ giúp_ này\n" +" -V, --version \t\t\t\t In ra thông tin _phiên bản_\n" + +#: windres.c:664 +#, c-format +msgid "" +"FORMAT is one of rc, res, or coff, and is deduced from the file name\n" +"extension if not specified. A single file name is an input file.\n" +"No input-file is stdin, default rc. No output-file is stdout, default rc.\n" +msgstr "" +"DẠNG THỨC là má»™t của rc, res hay coff, và được quyết định\n" +"từ phần mở rá»™ng tên tập tin nếu chÆ°a ghi rõ.\n" +"Má»™t tên tập tin Ä‘Æ¡n là tập tin nhập. Không có tập tin nhập thì\n" +"thiết bị nhập chuẩn, mặc định là rc. Không có tập tin xuất thì\n" +"thiết bị xuất chuẩn, mặc định là rc.\n" + +#: windres.c:800 +msgid "invalid option -f\n" +msgstr "tùy chá»n không hợp lệ « -f »\n" + +#: windres.c:805 +msgid "No filename following the -fo option.\n" +msgstr "Không có tên tập tin Ä‘i sau tùy chá»n « -fo ».\n" + +#: windres.c:863 +#, c-format +msgid "" +"Option -I is deprecated for setting the input format, please use -J " +"instead.\n" +msgstr "" +"Tùy chá»n « -l » bị phản đối để lập dạng thức nhập, hãy dùng « -J » thay " +"thế.\n" + +#: windres.c:981 +msgid "no resources" +msgstr "không có tài nguyên nào" + +#: wrstabs.c:354 wrstabs.c:1915 +#, c-format +msgid "string_hash_lookup failed: %s" +msgstr "việc « string_hash_lookup » (tra tìm băm chuá»—i) bị lá»—i: %s" + +#: wrstabs.c:635 +#, c-format +msgid "stab_int_type: bad size %u" +msgstr "stab_int_type: (kiểu số nguyên stab) kích cỡ sai %u" + +#: wrstabs.c:1393 +#, c-format +msgid "%s: warning: unknown size for field `%s' in struct" +msgstr "%s: cảnh báo : không biết kích cỡ cho trÆ°á»ng « %s » trong cấu trúc" + +#: cmdline/apt-cache.cc:135 +#, c-format +msgid "Package %s version %s has an unmet dep:\n" +msgstr "Gói %s phiên bản %s phụ thuá»™c vào phần má»m chÆ°a có :\n" + +#: cmdline/apt-cache.cc:1508 +#, c-format +msgid "Unable to locate package %s" +msgstr "Không thể định vị gói %s" + +#: cmdline/apt-cache.cc:232 +msgid "Total package names : " +msgstr "Tổng tên gói: " + +#: cmdline/apt-cache.cc:272 +msgid " Normal packages: " +msgstr " Gói bình thÆ°á»ng: " + +#: cmdline/apt-cache.cc:273 +msgid " Pure virtual packages: " +msgstr " Gói ảo nguyên chất: " + +#: cmdline/apt-cache.cc:274 +msgid " Single virtual packages: " +msgstr " Gói ảo Ä‘Æ¡n: " + +#: cmdline/apt-cache.cc:275 +msgid " Mixed virtual packages: " +msgstr " Gói ảo đã pha trá»™n: " + +#: cmdline/apt-cache.cc:276 +msgid " Missing: " +msgstr " Thiếu : " + +#: cmdline/apt-cache.cc:278 +msgid "Total distinct versions: " +msgstr "Tổng phiên bản riêng: " + +#: cmdline/apt-cache.cc:280 +msgid "Total dependencies: " +msgstr "Tổng cách phụ thuá»™c: " + +#: cmdline/apt-cache.cc:283 +msgid "Total ver/file relations: " +msgstr "Tổng cách liên quan phiên bản và tập tin: " + +#: cmdline/apt-cache.cc:285 +msgid "Total Provides mappings: " +msgstr "Tổng cách ảnh xạ Miá»…n là: " + +#: cmdline/apt-cache.cc:297 +msgid "Total globbed strings: " +msgstr "Tổng chuá»—i mở rá»™ng mẫu tìm kiếm: " + +#: cmdline/apt-cache.cc:311 +msgid "Total dependency version space: " +msgstr "Tổng chá»— cho cách phụ thuá»™c vào phiên bản: " + +#: cmdline/apt-cache.cc:316 +msgid "Total slack space: " +msgstr "Tổng chá»— chÆ°a dùng: " + +#: cmdline/apt-cache.cc:324 +msgid "Total space accounted for: " +msgstr "Tổng chá»— sẽ dùng: " + +#: cmdline/apt-cache.cc:446 cmdline/apt-cache.cc:1189 +#, c-format +msgid "Package file %s is out of sync." +msgstr "Tập tin gói %s không đồng bá»™ được." + +#: cmdline/apt-cache.cc:1231 +msgid "You must give exactly one pattern" +msgstr "Bạn phải Ä‘Æ°a ra đúng má»™t mẫu" + +#: cmdline/apt-cache.cc:1385 +msgid "No packages found" +msgstr "Không tìm thấy gói" + +#: cmdline/apt-cache.cc:1462 +msgid "Package files:" +msgstr "Tập tin gói:" + +#: cmdline/apt-cache.cc:1469 cmdline/apt-cache.cc:1555 +msgid "Cache is out of sync, can't x-ref a package file" +msgstr "" +"Bá»™ nhá»› tạm không đồng bá»™ được nên không thể tham chiếu chéo tập tin gói" + +# Variable: do not translate/ biến: đừng dịch +#: cmdline/apt-cache.cc:1470 +#, c-format +msgid "%4i %s\n" +msgstr "%4i %s\n" + +#. Show any packages have explicit pins +#: cmdline/apt-cache.cc:1482 +msgid "Pinned packages:" +msgstr "Các gói đã ghim:" + +#: cmdline/apt-cache.cc:1494 cmdline/apt-cache.cc:1535 +msgid "(not found)" +msgstr "(không tìm thấy)" + +#. Installed version +#: cmdline/apt-cache.cc:1515 +msgid " Installed: " +msgstr " Äã cài đặt: " + +#: oggenc/oggenc.c:526 oggenc/oggenc.c:531 +msgid "(none)" +msgstr "(không có)" + +#. Candidate Version +#: cmdline/apt-cache.cc:1522 +msgid " Candidate: " +msgstr " Ứng cá»­ : " + +#: cmdline/apt-cache.cc:1532 +msgid " Package pin: " +msgstr " Ghim gói: " + +#. Show the priority tables +#: cmdline/apt-cache.cc:1541 +msgid " Version table:" +msgstr " Bảng phiên bản:" + +# Variable: do not translate/ biến: đừng dịch +#: cmdline/apt-cache.cc:1556 +#, c-format +msgid " %4i %s\n" +msgstr " %4i %s\n" + +#: ftparchive/apt-ftparchive.cc:545 cmdline/apt-get.cc:2260 +#, c-format +msgid "%s %s for %s %s compiled on %s %s\n" +msgstr "%s %s cho %s %s được biên dịch vào %s %s\n" + +#: cmdline/apt-cache.cc:1658 cmdline/apt-cache.cc:1653 +msgid "" +"Usage: apt-cache [options] command\n" +" apt-cache [options] add file1 [file2 ...]\n" +" apt-cache [options] showpkg pkg1 [pkg2 ...]\n" +" apt-cache [options] showsrc pkg1 [pkg2 ...]\n" +"\n" +"apt-cache is a low-level tool used to manipulate APT's binary\n" +"cache files, and query information from them\n" +"\n" +"Commands:\n" +" add - Add a package file to the source cache\n" +" gencaches - Build both the package and source cache\n" +" showpkg - Show some general information for a single package\n" +" showsrc - Show source records\n" +" stats - Show some basic statistics\n" +" dump - Show the entire file in a terse form\n" +" dumpavail - Print an available file to stdout\n" +" unmet - Show unmet dependencies\n" +" search - Search the package list for a regex pattern\n" +" show - Show a readable record for the package\n" +" depends - Show raw dependency information for a package\n" +" rdepends - Show reverse dependency information for a package\n" +" pkgnames - List the names of all packages\n" +" dotty - Generate package graphs for GraphVis\n" +" xvcg - Generate package graphs for xvcg\n" +" policy - Show policy settings\n" +"\n" +"Options:\n" +" -h This help text.\n" +" -p=? The package cache.\n" +" -s=? The source cache.\n" +" -q Disable progress indicator.\n" +" -i Show only important deps for the unmet command.\n" +" -c=? Read this configuration file\n" +" -o=? Set an arbitrary configuration option, eg -o dir::cache=/tmp\n" +"See the apt-cache(8) and apt.conf(5) manual pages for more information.\n" +msgstr "" +"Cách sá»­ dụng: apt-cache [tùy_chá»n...] lệnh\n" +" apt-cache [tùy_chá»n...] add tập_tin1 [tập_tin2 ...]\n" +" apt-cache [tùy_chá»n...] showpkg gói1 [gói2 ...]\n" +" apt-cache [tùy_chá»n...] showsrc gói1 [gói2 ...]\n" +"(cache: \tbá»™ nhá»› tạm;\n" +"add: \tthêm;\n" +"showpkg: hiển thị gói;\n" +"showsrc: \thiển thị nguồn)\n" +"\n" +"apt-cache là má»™t công cụ mức thấp dùng để thao tác\n" +"những tập tin bá»™ nhá»› tạm nhị phân của APT,\n" +"và cÅ©ng để truy vấn thông tin từ những tập tin đó.\n" +"\n" +"Lệnh:\n" +" add\t\t_Thêm_ gói vào bá»™ nhá»› tạm nguồn\n" +" gencaches\tXây dung (_tạo ra_) cả gói lẫn _bá»™ nhá»› tạm_ nguồn Ä‘á»u\n" +" showpkg\t_Hiện_ má»™t phần thông tin chung vá» má»™t _gói_ riêng lẻ\n" +" showsrc\t_Hiện_ các mục ghi _nguồn_\n" +" stats\t\tHiện má»™t phần _thống kê_ cÆ¡ bản\n" +" dump\t\tHiện toàn bá»™ tập tin dạng ngắn (_đổ_)\n" +" dumpavail\tIn ra má»™t tập tin _sẵn sàng_ vào thiết bị xuất chuẩn (_đổ_)\n" +" unmet\t\tHiện các cách phụ thuá»™c _chÆ°a thá»±c hiện_\n" +" search\t\t_Tìm kiếm_ mẫu biểu thức chính quy trong danh sách gói\n" +" show\t\t_Hiệnị_ mục ghi có thể Ä‘á»c, cho những gói đó\n" +" depends\tHiện thông tin cách _phụ thuá»™c_ thô cho gói\n" +" rdepends\tHiện thông tin cách _phụ thuá»™c ngược lại_, cho gói\n" +" pkgnames\tHiện danh sách _tên_ má»i _gói_\n" +" dotty\t\tTạo ra đồ thị gói cho GraphVis (_nhiá»u chấm_)\n" +" xvcg\t\tTạo ra đồ thị gói cho _xvcg_\n" +" policy\t\tHiển thị các thiết lập _chính thức_\n" +"\n" +"Tùy chá»n:\n" +" -h \t\t_Trợ giúp_ này\n" +" -p=? \t\tBá»™ nhá»› tạm _gói_.\n" +" -s=? \t\tBá»™ nhá»› tạm _nguồn_.\n" +" -q \t\tTắt cái chỉ tiến trình (_im_).\n" +" -i \t\tHiện chỉ những cách phụ thuá»™c _quan trá»ng_\n" +"\t\t\tcho lệnh chÆ°a thá»±c hiện.\n" +" -c=? \t\tÄá»c tập tin _cấu hình_ này\n" +" -o=? \t\tLập má»™t tùy chá»n cấu hình nhiệm ý, v.d. « -o dir::cache=/tmp »\n" +"Äể tìm thông tin thêm thì bạn hãy xem hai trang « man » (hÆ°á»›ng dẫn)\n" +"\t\t\tapt-cache(8) và apt.conf(5).\n" + +#: cmdline/apt-cdrom.cc:78 +msgid "Please provide a name for this Disc, such as 'Debian 2.1r1 Disk 1'" +msgstr "Hãy cung cấp tên cho ÄÄ©a này, nhÆ° « Debian 2.1r1 ÄÄ©a 1 »" + +#: cmdline/apt-cdrom.cc:93 +msgid "Please insert a Disc in the drive and press enter" +msgstr "Hãy nạp Ä‘Ä©a vào ổ và bấm nút Enter" + +#: cmdline/apt-cdrom.cc:117 +msgid "Repeat this process for the rest of the CDs in your set." +msgstr "Hãy lặp lại tiến trình này cho các ÄÄ©a còn lại trong bá»™ Ä‘Ä©a của bạn." + +#: cmdline/apt-config.cc:41 +msgid "Arguments not in pairs" +msgstr "Không có các đối số dạng cặp" + +#: cmdline/apt-config.cc:76 +msgid "" +"Usage: apt-config [options] command\n" +"\n" +"apt-config is a simple tool to read the APT config file\n" +"\n" +"Commands:\n" +" shell - Shell mode\n" +" dump - Show the configuration\n" +"\n" +"Options:\n" +" -h This help text.\n" +" -c=? Read this configuration file\n" +" -o=? Set an arbitrary configuration option, eg -o dir::cache=/tmp\n" +msgstr "" +"Cách sá»­ dụng: apt-config [tùy_chá»n...] lệnh\n" +"\n" +"[config: viết tắt cho từ configuration: cấu hình]\n" +"\n" +"apt-config là má»™t công cụ Ä‘Æ¡n giản để Ä‘á»c tập tin cấu hình APT.\n" +"\n" +"Lệnh:\n" +" shell\t\tChế Ä‘á»™ _hệ vá»_\n" +" dump\t\tHiển thị cấu hình (_đổ_)\n" +"\n" +"Tùy chá»n:\n" +" -h \t\t_Trợ giúp_ này\n" +" -c=? \t\tÄá»c tập tin cấu hình này\n" +" -o=? \t\tLập má»™t tùy chá»n cấu hình nhiệm ý, v.d. « -o dir::cache=/tmp »\n" + +#: cmdline/apt-extracttemplates.cc:98 +#, c-format +msgid "%s not a valid DEB package." +msgstr "%s không phải là má»™t gói DEB hợp lệ." + +#: cmdline/apt-extracttemplates.cc:232 +msgid "" +"Usage: apt-extracttemplates file1 [file2 ...]\n" +"\n" +"apt-extracttemplates is a tool to extract config and template info\n" +"from debian packages\n" +"\n" +"Options:\n" +" -h This help text\n" +" -t Set the temp dir\n" +" -c=? Read this configuration file\n" +" -o=? Set an arbitrary configuration option, eg -o dir::cache=/tmp\n" +msgstr "" +"Cách sá»­ dụng: apt-extracttemplates tập_tin1 [tập_tin2 ...]\n" +"\n" +"[extract: \t\trút;\n" +"templates: \tnhững biểu mẫu]\n" +"\n" +"apt-extracttemplates là má»™t công cụ rút thông tin kiểu cấu hình\n" +"\tvà biểu mẫu Ä‘á»u từ gói Debian\n" +"\n" +"Tùy chá»n:\n" +" -h \t\t_Trợ giúp_ này\n" +" -t \t\tLập thÆ° muc tạm thá»i\n" +"\t\t[temp, tmp: viết tắt cho từ « temporary »: tạm thá»i]\n" +" -c=? \t\tÄá»c tập tin cấu hình này\n" +" -o=? \t\tLập má»™t tùy chá»n cấu hình nhiệm ý, v.d. « -o dir::cache=/tmp »\n" + +#: cmdline/apt-extracttemplates.cc:267 apt-pkg/pkgcachegen.cc:710 +#: apt-pkg/pkgcachegen.cc:699 +#, c-format +msgid "Unable to write to %s" +msgstr "Không thể ghi vào %s" + +#: cmdline/apt-extracttemplates.cc:310 +msgid "Cannot get debconf version. Is debconf installed?" +msgstr "Không thể gói phiên bản debconf. Có cài đăt debconf chÆ°a?" + +#: ftparchive/apt-ftparchive.cc:167 ftparchive/apt-ftparchive.cc:341 +#: ftparchive/apt-ftparchive.cc:163 ftparchive/apt-ftparchive.cc:337 +msgid "Package extension list is too long" +msgstr "Danh sách mở rá»™ng gói quá dài" + +#: ftparchive/apt-ftparchive.cc:270 ftparchive/apt-ftparchive.cc:292 +#, c-format +msgid "Error processing directory %s" +msgstr "Gặp lá»—i khi xá»­ lý thÆ° mục %s" + +#: ftparchive/apt-ftparchive.cc:254 ftparchive/apt-ftparchive.cc:250 +msgid "Source extension list is too long" +msgstr "Danh sách mở rá»™ng nguồn quá dài" + +#: ftparchive/apt-ftparchive.cc:371 ftparchive/apt-ftparchive.cc:367 +msgid "Error writing header to contents file" +msgstr "Gặp lá»—i khi ghi phần đầu vào tập tin nộị dung" + +#: ftparchive/apt-ftparchive.cc:401 +#, c-format +msgid "Error processing contents %s" +msgstr "Gặp lá»—i khi xá»­ lý ná»™i dung %s" + +#: ftparchive/apt-ftparchive.cc:556 ftparchive/apt-ftparchive.cc:551 +msgid "" +"Usage: apt-ftparchive [options] command\n" +"Commands: packages binarypath [overridefile [pathprefix]]\n" +" sources srcpath [overridefile [pathprefix]]\n" +" contents path\n" +" release path\n" +" generate config [groups]\n" +" clean config\n" +"\n" +"apt-ftparchive generates index files for Debian archives. It supports\n" +"many styles of generation from fully automated to functional replacements\n" +"for dpkg-scanpackages and dpkg-scansources\n" +"\n" +"apt-ftparchive generates Package files from a tree of .debs. The\n" +"Package file contains the contents of all the control fields from\n" +"each package as well as the MD5 hash and filesize. An override file\n" +"is supported to force the value of Priority and Section.\n" +"\n" +"Similarly apt-ftparchive generates Sources files from a tree of .dscs.\n" +"The --source-override option can be used to specify a src override file\n" +"\n" +"The 'packages' and 'sources' command should be run in the root of the\n" +"tree. BinaryPath should point to the base of the recursive search and \n" +"override file should contain the override flags. Pathprefix is\n" +"appended to the filename fields if present. Example usage from the \n" +"Debian archive:\n" +" apt-ftparchive packages dists/potato/main/binary-i386/ > \\\n" +" dists/potato/main/binary-i386/Packages\n" +"\n" +"Options:\n" +" -h This help text\n" +" --md5 Control MD5 generation\n" +" -s=? Source override file\n" +" -q Quiet\n" +" -d=? Select the optional caching database\n" +" --no-delink Enable delinking debug mode\n" +" --contents Control contents file generation\n" +" -c=? Read this configuration file\n" +" -o=? Set an arbitrary configuration option" +msgstr "" +"Cách sá»­ dụng: apt-ftparchive [tùy_chá»n...] lệnh\n" +"\n" +"[ftparchive: FTP archive: kho FTP]\n" +"\n" +"Lệnh: \tpackages binarypath [tập_tin_đè [tiá»n_tố_Ä‘Æ°á»ng_dẫn]]\n" +" \tsources srcpath [tập_tin_đè[tiá»n_tố_Ä‘Æ°á»ng_dẫn]]\n" +" \tcontents path\n" +" \trelease path\n" +" \tgenerate config [groups]\n" +" \tclean config\n" +"\n" +"[packages: \tnhững gói;\n" +"binarypath: \tÄ‘Æ°á»ng dẫn nhị phân;\n" +"sources: \t\tnhững nguồn;\n" +"srcpath: \t\tÄ‘Æ°á»ng dẫn nguồn;\n" +"contents path: Ä‘Æ°á»ng dẫn ná»™i dụng;\n" +"release path: \tÄ‘Æ°á»ng dẫn bản đã phát hành;\n" +"generate config [groups]: tạo ra cấu hình [nhóm];\n" +"clean config: \tcấu hình toàn má»›i)\n" +"\n" +"apt-ftparchive (kho ftp) thì tạo ra tập tin chỉ mục cho kho Debian.\n" +"Nó há»— trợ nhiá»u cách tạo ra, từ cách tá»± Ä‘á»™ng toàn bá»™\n" +"đến cách thay thế Ä‘iá»u hoặt Ä‘á»™ng cho dpkg-scanpackages (dpkg-quét_gói)\n" +"và dpkg-scansources (dpkg-quét_nguồn).\n" +"\n" +"apt-ftparchive tạo ra tập tin Gói ra cây các .deb.\n" +"Tập tin gói chứa ná»™i dung các trÆ°á»ng Ä‘iá»u khiển từ má»—i gói,\n" +"cùng vá»›i băm MD5 và kích cỡ tập tin.\n" +"Há»— trợ tập tin đè để buá»™c giá trị Ưu tiên và Phần\n" +"\n" +"TÆ°Æ¡ng tá»±, apt-ftparchive tạo ra tập tin Nguồn ra cây các .dsc\n" +"Có thể sá»­ dụng tùy chá»n « --source-override » (đè nguồn)\n" +"để ghi rõ tập tin đè nguồn\n" +"\n" +"Lnh « packages » (gói) và « sources » (nguồn) nên chạy tại gốc cây.\n" +"BinaryPath (Ä‘Æ°á»ng dẫn nhị phân) nên chỉ tá»›i cÆ¡ bản của việc tìm kiếm đệ " +"quy,\n" +"và tập tin đè nên chứa những cỠđè.\n" +"Pathprefix (tiá»n tố Ä‘Æ°á»ng dẫn) được phụ thêm vào\n" +"những trÆ°á»ng tên tập tin nếu có.\n" +"Cách sá»­ dụng thí dụ từ kho Debian:\n" +" apt-ftparchive packages dists/potato/main/binary-i386/ > \\\n" +" dists/potato/main/binary-i386/Packages\n" +"\n" +"Tùy chá»n:\n" +" -h \t\t_Trợ giúp_ này\n" +" --md5 \t\tÄiá»u khiển cách tạo ra MD5\n" +" -s=? \t\tTập tin đè nguồn\n" +" -q \t\t_Im_ (không xuất chi tiết)\n" +" -d=? \t\tChá»n _cÆ¡ sở dữ liệu_ nhá»› tạm tùy chá»n\n" +" --no-delink \tMở chế Ä‘á»™ gỡ lá»—i _bá» liên kết_\n" +" --contents \tÄiá»u khiển cách tạo ra tập tin _ná»™i dung_\n" +" -c=? \t\tÄá»c tập tin cấu hình này\n" +" -o=? \t\tLập má»™t tùy chá»n cấu hình nhiệm ý, v.d. « -o dir::cache=/tmp »" + +#: ftparchive/apt-ftparchive.cc:762 ftparchive/apt-ftparchive.cc:757 +msgid "No selections matched" +msgstr "ChÆ°a khá»›p Ä‘iá»u đã chá»n nào." + +#: ftparchive/apt-ftparchive.cc:835 ftparchive/apt-ftparchive.cc:830 +#, c-format +msgid "Some files are missing in the package file group `%s'" +msgstr "Thiếu má»™t số tập tin trong nhóm tập tin gói « %s »" + +#: ftparchive/cachedb.cc:45 +#, c-format +msgid "DB was corrupted, file renamed to %s.old" +msgstr "CÆ¡ sở dữ liệu bị há»ng nên đã đổi tên tâp tin thành %s.old (old: cÅ©)." + +#: ftparchive/cachedb.cc:63 +#, c-format +msgid "DB is old, attempting to upgrade %s" +msgstr "CÆ¡ sở dữ liệu cÅ© nên Ä‘ang cố nâng cấp lên %s" + +#: ftparchive/cachedb.cc:73 +#, c-format +msgid "Unable to open DB file %s: %s" +msgstr "Không thể mở tập tin cÆ¡ sở dữ liệu %s: %s." + +#: ftparchive/cachedb.cc:114 +#, c-format +msgid "File date has changed %s" +msgstr "Ngày tập tin đã đổi %s" + +#: ftparchive/cachedb.cc:155 +msgid "Archive has no control record" +msgstr "Kho không có mục ghi Ä‘iá»u khiển" + +#: ftparchive/cachedb.cc:267 +msgid "Unable to get a cursor" +msgstr "Không thể lấy con chạy" + +#: ftparchive/writer.cc:78 ftparchive/writer.cc:79 +#, c-format +msgid "W: Unable to read directory %s\n" +msgstr "W: Không thể Ä‘á»c thÆ° mục %s\n" + +#: ftparchive/writer.cc:83 ftparchive/writer.cc:84 +#, c-format +msgid "W: Unable to stat %s\n" +msgstr "W: Không thể lấy thông tin toàn bá»™ cho %s\n" + +#: ftparchive/writer.cc:125 ftparchive/writer.cc:126 +msgid "E: " +msgstr "E: " + +#: ftparchive/writer.cc:127 ftparchive/writer.cc:128 +msgid "W: " +msgstr "W: " + +#: ftparchive/writer.cc:134 ftparchive/writer.cc:135 +msgid "E: Errors apply to file " +msgstr "E: có lá»—i áp dụng vào tập tin" + +#: ftparchive/writer.cc:151 ftparchive/writer.cc:181 ftparchive/writer.cc:152 +#: ftparchive/writer.cc:182 +#, c-format +msgid "Failed to resolve %s" +msgstr "Việc quyết định %s bị lá»—i" + +#: ftparchive/writer.cc:163 ftparchive/writer.cc:164 +msgid "Tree walking failed" +msgstr "Việc di chuyển qua cây bị lá»—i" + +#: ftparchive/writer.cc:188 ftparchive/writer.cc:189 +#, c-format +msgid "Failed to open %s" +msgstr "Việc mở %s bị lá»—i" + +#: ftparchive/writer.cc:245 ftparchive/writer.cc:246 +#, c-format +msgid " DeLink %s [%s]\n" +msgstr " Bá» liên kết %s [%s]\n" + +#: ftparchive/writer.cc:253 ftparchive/writer.cc:254 +#, c-format +msgid "Failed to readlink %s" +msgstr "Việc tạo liên kết lại %s bị lá»—i" + +#: ftparchive/writer.cc:257 ftparchive/writer.cc:258 +#, c-format +msgid "Failed to unlink %s" +msgstr "Việc bá» liên kết %s bị lá»—i" + +#: ftparchive/writer.cc:264 ftparchive/writer.cc:265 +#, c-format +msgid "*** Failed to link %s to %s" +msgstr "*** Việc liên kết %s đến %s bị lá»—i" + +#: ftparchive/writer.cc:274 ftparchive/writer.cc:275 +#, c-format +msgid " DeLink limit of %sB hit.\n" +msgstr " Hết hạn bá» liên kết của %sB.\n" + +#: ftparchive/writer.cc:358 apt-inst/extract.cc:181 apt-inst/extract.cc:193 +#: apt-inst/extract.cc:210 apt-inst/deb/dpkgdb.cc:121 methods/gpgv.cc:260 +#, c-format +msgid "Failed to stat %s" +msgstr "Việc lấy thông tin toàn bá»™ cho %s bị lá»—i" + +#: ftparchive/writer.cc:386 ftparchive/writer.cc:378 +msgid "Archive had no package field" +msgstr "Kho không có trÆ°á»ng gói" + +#: ftparchive/writer.cc:394 ftparchive/writer.cc:603 ftparchive/writer.cc:386 +#: ftparchive/writer.cc:595 +#, c-format +msgid " %s has no override entry\n" +msgstr " %s không có mục ghi đè\n" + +#: ftparchive/writer.cc:437 ftparchive/writer.cc:689 ftparchive/writer.cc:429 +#: ftparchive/writer.cc:677 +#, c-format +msgid " %s maintainer is %s not %s\n" +msgstr " ngÆ°á»i bảo quản %s là %s không phải %s\n" + +#: ftparchive/contents.cc:317 +#, c-format +msgid "Internal error, could not locate member %s" +msgstr "Gặp lá»—i ná»™i bá»™, không thể định vị bá»™ phạn %s" + +#: ftparchive/contents.cc:353 ftparchive/contents.cc:384 +#: ftparchive/contents.cc:346 ftparchive/contents.cc:377 +msgid "realloc - Failed to allocate memory" +msgstr "realloc (cấp phát lại) - việc cấp phát bá»™ nhá»› bị lá»—i" + +#: ftparchive/override.cc:38 ftparchive/override.cc:146 +#, c-format +msgid "Unable to open %s" +msgstr "Không thể mở %s" + +#: ftparchive/override.cc:64 ftparchive/override.cc:170 +#, c-format +msgid "Malformed override %s line %lu #1" +msgstr "Äiá»u đè dạng sai %s dòng %lu #1" + +#: ftparchive/override.cc:78 ftparchive/override.cc:182 +#, c-format +msgid "Malformed override %s line %lu #2" +msgstr "Äiá»u đè dạng sai %s dòng %lu #2" + +#: ftparchive/override.cc:92 ftparchive/override.cc:195 +#, c-format +msgid "Malformed override %s line %lu #3" +msgstr "Äiá»u đè dạng sai %s dòng %lu #3" + +#: ftparchive/override.cc:131 ftparchive/override.cc:205 +#, c-format +msgid "Failed to read the override file %s" +msgstr "Việc Ä‘á»c tập tin đè %s bị lá»—i" + +#: ftparchive/multicompress.cc:75 +#, c-format +msgid "Unknown compression algorithm '%s'" +msgstr "Không biết thuật toán nén « %s »" + +#: ftparchive/multicompress.cc:105 +#, c-format +msgid "Compressed output %s needs a compression set" +msgstr "Dữ liệu xuất đã nén %s cần má»™t bá»™ nén" + +#: ftparchive/multicompress.cc:172 methods/rsh.cc:91 +msgid "Failed to create IPC pipe to subprocess" +msgstr "Việc tạo ống IPC đến tiến trình con bị lá»—i" + +#: ftparchive/multicompress.cc:198 +msgid "Failed to create FILE*" +msgstr "Việc tạo TẬP_TIN* bị lá»—i" + +#: ftparchive/multicompress.cc:201 +msgid "Failed to fork" +msgstr "Việc tạo tiến trình con bị lá»—i" + +#: ftparchive/multicompress.cc:215 +msgid "Compress child" +msgstr "Nén Ä‘iá»u con" + +#: ftparchive/multicompress.cc:238 +#, c-format +msgid "Internal error, failed to create %s" +msgstr "Lá»—i ná»™i bá»™, việc tạo %s bị lá»—i" + +#: ftparchive/multicompress.cc:289 +msgid "Failed to create subprocess IPC" +msgstr "Việc tạo tiến trình con IPC bị lá»—i" + +#: ftparchive/multicompress.cc:324 +msgid "Failed to exec compressor " +msgstr "Việc thá»±c hiện bô nén bị lá»—i " + +#: ftparchive/multicompress.cc:363 +msgid "decompressor" +msgstr "bá»™ giải nén" + +#: ftparchive/multicompress.cc:406 +msgid "IO to subprocess/file failed" +msgstr "việc nhập/xuất vào tiến trình con/tập tin bị lá»—i" + +#: ftparchive/multicompress.cc:458 +msgid "Failed to read while computing MD5" +msgstr "Việc Ä‘á»c khi tính MD5 bị lá»—i" + +#: ftparchive/multicompress.cc:475 +#, c-format +msgid "Problem unlinking %s" +msgstr "Gặp lá»—i khi bá» liên kết %s" + +#: ftparchive/multicompress.cc:490 apt-inst/extract.cc:188 +#, c-format +msgid "Failed to rename %s to %s" +msgstr "Việc đổi tên %s thành %s bị lá»—i" + +#: ../app/widgets/gimpcursorview.c:198 ../src/runtime-config.c:72 +#: ../lib/foocanvas/libfoocanvas/foo-canvas.c:1233 +#: ../lib/foocanvas/libfoocanvas/foo-canvas.c:1234 libexif/exif-entry.c:810 +#, fuzzy +msgid "Y" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"C\n" +"#-#-#-#-# guikachu.vi.po (guikachu HEAD) #-#-#-#-#\n" +"Y\n" +"#-#-#-#-# libexif-0.6.13.vi.po (libexif-0.6.13) #-#-#-#-#\n" +"Y" + +#: cmdline/apt-get.cc:142 cmdline/apt-get.cc:1515 cmdline/apt-get.cc:140 +#: cmdline/apt-get.cc:1422 +#, c-format +msgid "Regex compilation error - %s" +msgstr "Lá»—i biên dich biểu thức chính quy - %s" + +#: cmdline/apt-get.cc:237 cmdline/apt-get.cc:235 +msgid "The following packages have unmet dependencies:" +msgstr "Những gói theo đây phụ thuá»™c vào phần má»m chÆ°a có :" + +#: cmdline/apt-get.cc:327 cmdline/apt-get.cc:325 +#, c-format +msgid "but %s is installed" +msgstr "nhÆ°ng mà %s đã được cài đặt" + +#: cmdline/apt-get.cc:329 cmdline/apt-get.cc:327 +#, c-format +msgid "but %s is to be installed" +msgstr "nhÆ°ng mà %s sẽ được cài đặt" + +#: cmdline/apt-get.cc:336 src/cmdline/cmdline_prompt.cc:231 +#: src/cmdline/cmdline_show_broken.cc:83 cmdline/apt-get.cc:334 +#, c-format +msgid "but it is not installable" +msgstr "nhÆ°ng mà nó không có khả năng cài đặt" + +#: cmdline/apt-get.cc:338 cmdline/apt-get.cc:336 +msgid "but it is a virtual package" +msgstr "nhÆ°ng mà nó là gói ảo" + +#: cmdline/apt-get.cc:341 cmdline/apt-get.cc:339 +msgid "but it is not installed" +msgstr "nhÆ°ng mà nó chÆ°a được cài đặt" + +#: cmdline/apt-get.cc:341 cmdline/apt-get.cc:339 +msgid "but it is not going to be installed" +msgstr "nhÆ°ng mà nó sẽ không được cài đặt" + +#: cmdline/apt-get.cc:346 src/cmdline/cmdline_prompt.cc:238 +#: src/cmdline/cmdline_show_broken.cc:90 cmdline/apt-get.cc:344 +#, c-format +msgid " or" +msgstr " hay" + +#: cmdline/apt-get.cc:375 src/cmdline/cmdline_prompt.cc:258 +#: cmdline/apt-get.cc:373 +msgid "The following NEW packages will be installed:" +msgstr "Theo đây có những gói MỚI sẽ được cài đặt:" + +#: cmdline/apt-get.cc:401 src/cmdline/cmdline_prompt.cc:259 +#: cmdline/apt-get.cc:399 +msgid "The following packages will be REMOVED:" +msgstr "Theo đây có những gói sẽ bị Gá»  BỎ :" + +#: cmdline/apt-get.cc:423 src/cmdline/cmdline_prompt.cc:256 +#: cmdline/apt-get.cc:421 +msgid "The following packages have been kept back:" +msgstr "Theo đây có những gói đã được giữ lại:" + +#: cmdline/apt-get.cc:444 src/cmdline/cmdline_prompt.cc:260 +#: cmdline/apt-get.cc:442 +msgid "The following packages will be upgraded:" +msgstr "Theo đây có những gói sẽ được nâng cấp:" + +#: cmdline/apt-get.cc:465 src/cmdline/cmdline_prompt.cc:255 +#: cmdline/apt-get.cc:463 +msgid "The following packages will be DOWNGRADED:" +msgstr "Theo đây có những gói sẽ được HẠ CẤP:" + +#: cmdline/apt-get.cc:485 cmdline/apt-get.cc:483 +msgid "The following held packages will be changed:" +msgstr "Theo đây có những gói sẽ được thay đổi:" + +#: cmdline/apt-get.cc:538 cmdline/apt-get.cc:536 +#, c-format +msgid "%s (due to %s) " +msgstr "%s (do %s)" + +#: cmdline/apt-get.cc:546 +msgid "" +"WARNING: The following essential packages will be removed.\n" +"This should NOT be done unless you know exactly what you are doing!" +msgstr "" +"CẢNH BÃO : theo đây có những gói chủ yếu sẽ bị gỡ bá».\n" +"ÄỪNG làm nhÆ° thế trừ khi bạn biết làm gì ở đây nó má»™t cách chính xác." + +#: cmdline/apt-get.cc:577 cmdline/apt-get.cc:574 +#, c-format +msgid "%lu upgraded, %lu newly installed, " +msgstr "%lu đã nâng cấp, %lu má»›i được cài đặt, " + +#: cmdline/apt-get.cc:581 src/cmdline/cmdline_prompt.cc:516 +#: cmdline/apt-get.cc:578 +#, c-format +msgid "%lu reinstalled, " +msgstr "%lu được cài đặt lại, " + +#: cmdline/apt-get.cc:583 src/cmdline/cmdline_prompt.cc:518 +#: cmdline/apt-get.cc:580 +#, c-format +msgid "%lu downgraded, " +msgstr "%lu được hạ cấp, " + +#: cmdline/apt-get.cc:585 src/cmdline/cmdline_prompt.cc:520 +#: cmdline/apt-get.cc:582 +#, c-format +msgid "%lu to remove and %lu not upgraded.\n" +msgstr "%lu cần gỡ bá», và %lu chÆ°a được nâng cấp.\n" + +#: cmdline/apt-get.cc:589 cmdline/apt-get.cc:586 +#, c-format +msgid "%lu not fully installed or removed.\n" +msgstr "%lu chÆ°a được cài đặt toàn bá»™ hay được gỡ bá».\n" + +#: cmdline/apt-get.cc:649 cmdline/apt-get.cc:646 +msgid "Correcting dependencies..." +msgstr "Äang sá»­a cách phụ thuá»™c..." + +#: cmdline/apt-get.cc:652 cmdline/apt-get.cc:649 +msgid " failed." +msgstr " đã thất bại." + +#: cmdline/apt-get.cc:655 cmdline/apt-get.cc:652 +msgid "Unable to correct dependencies" +msgstr "Không thể sá»­a cách phụ thuá»™c" + +#: cmdline/apt-get.cc:658 cmdline/apt-get.cc:655 +msgid "Unable to minimize the upgrade set" +msgstr "Không thể cá»±c tiểu hóa bá»™ nâng cấp" + +#: cmdline/apt-get.cc:660 cmdline/apt-get.cc:657 +msgid " Done" +msgstr " Äã xong" + +#: cmdline/apt-get.cc:664 cmdline/apt-get.cc:661 +msgid "You might want to run `apt-get -f install' to correct these." +msgstr "Có lẽ bạn hãy chay lệnh « apt-get -f install » để sá»­a hết." + +#: cmdline/apt-get.cc:667 cmdline/apt-get.cc:664 +msgid "Unmet dependencies. Try using -f." +msgstr "" +"Còn có cách phụ thuá»™c vào phần má»m chÆ°a có. NhÆ° thế thì bạn hãy cố dùng tùy " +"chá»n « -f »." + +#: cmdline/apt-get.cc:689 +msgid "WARNING: The following packages cannot be authenticated!" +msgstr "CẢNH BÃO : không thể xác thá»±c những gói theo đây." + +#: cmdline/apt-get.cc:693 +msgid "Authentication warning overridden.\n" +msgstr "Cảnh báo xác thá»±c bị đè.\n" + +#: cmdline/apt-get.cc:700 +msgid "Install these packages without verification [y/N]? " +msgstr "Cài đặt những gói này mà không kiểm chứng không? [y/N] [c/K] " + +#: cmdline/apt-get.cc:702 +msgid "Some packages could not be authenticated" +msgstr "Má»™t số gói không thể được xác thá»±c" + +#: cmdline/apt-get.cc:711 cmdline/apt-get.cc:858 cmdline/apt-get.cc:811 +msgid "There are problems and -y was used without --force-yes" +msgstr "Gập lá»—i và đã dùng tùy chá»n « -y » mà không có « --force-yes »" + +#: cmdline/apt-get.cc:755 +msgid "Internal error, InstallPackages was called with broken packages!" +msgstr "Lá»—i ná»™i bá»™: InstallPackages (cài đặt gói) được gá»i vá»›i gói bị há»ng." + +#: cmdline/apt-get.cc:764 +msgid "Packages need to be removed but remove is disabled." +msgstr "Cần phải gỡ bá» má»™t số gói, nhÆ°ng mà khả năng Gỡ bá» (Remove) đã bị tắt." + +#: cmdline/apt-get.cc:775 +msgid "Internal error, Ordering didn't finish" +msgstr "Gặp lá»—i ná»™i bá»™: tiến trình Sắp xếp chÆ°a xong" + +#: cmdline/apt-get.cc:791 cmdline/apt-get.cc:1809 cmdline/apt-get.cc:1842 +#: cmdline/apt-get.cc:744 cmdline/apt-get.cc:1716 cmdline/apt-get.cc:1749 +msgid "Unable to lock the download directory" +msgstr "Không thể khoá thÆ° mục tải vá»" + +#: cmdline/apt-get.cc:2008 +msgid "The list of sources could not be read." +msgstr "Không thể Ä‘á»c danh sách nguồn." + +#: cmdline/apt-get.cc:816 +msgid "How odd.. The sizes didn't match, email apt@packages.debian.org" +msgstr "" +"Lạ... Hai kích cỡ không khá»›p được. Hãy gởi thÆ° cho " + +#: cmdline/apt-get.cc:821 cmdline/apt-get.cc:774 +#, c-format +msgid "Need to get %sB/%sB of archives.\n" +msgstr "Cần phải lấy %sB/%sB kho.\n" + +#: cmdline/apt-get.cc:824 cmdline/apt-get.cc:777 +#, c-format +msgid "Need to get %sB of archives.\n" +msgstr "Cần phải lấy %sB kho.\n" + +#: cmdline/apt-get.cc:829 cmdline/apt-get.cc:782 +#, c-format +msgid "After unpacking %sB of additional disk space will be used.\n" +msgstr "Sau khi đã giải nén, sẻ chiếm %sB sức chứa Ä‘Ä©a thêm.\n" + +#: cmdline/apt-get.cc:832 cmdline/apt-get.cc:785 +#, c-format +msgid "After unpacking %sB disk space will be freed.\n" +msgstr "Sau khi đã giải nén, sẽ giải phóng %sB sức chữa Ä‘Ä©a thêm.\n" + +#: cmdline/apt-get.cc:846 cmdline/apt-get.cc:1980 +#, c-format +msgid "Couldn't determine free space in %s" +msgstr "Không thể quyết định chá»— rảnh trong %s" + +#: cmdline/apt-get.cc:849 cmdline/apt-get.cc:802 +#, c-format +msgid "You don't have enough free space in %s." +msgstr "Bạn chÆ°a có đủ sức chức còn rảnh trong %s." + +#: cmdline/apt-get.cc:864 cmdline/apt-get.cc:884 cmdline/apt-get.cc:817 +#: cmdline/apt-get.cc:837 +msgid "Trivial Only specified but this is not a trivial operation." +msgstr "" +"Xác Ä‘inh « Chỉ không đáng kể » (Trivial Only) nhÆ°ng mà thao tác này đáng kể." + +#: cmdline/apt-get.cc:866 cmdline/apt-get.cc:819 +msgid "Yes, do as I say!" +msgstr "Có, làm Ä‘i." + +#: cmdline/apt-get.cc:868 +#, c-format +msgid "" +"You are about to do something potentially harmful.\n" +"To continue type in the phrase '%s'\n" +" ?] " +msgstr "" +"Bạn sắp làm gì có thể có hai.\n" +"Äể tiếp tục thì hãy gõ cụm từ « %s »\n" +"?]" + +#: cmdline/apt-get.cc:874 cmdline/apt-get.cc:893 cmdline/apt-get.cc:827 +#: cmdline/apt-get.cc:846 +msgid "Abort." +msgstr "Hủy bá»." + +#: cmdline/apt-get.cc:889 +msgid "Do you want to continue [Y/n]? " +msgstr "Bạn có muốn tiếp tục không? [Y/n] [C/k] " + +#: cmdline/apt-get.cc:961 cmdline/apt-get.cc:1365 cmdline/apt-get.cc:2023 +#: cmdline/apt-get.cc:911 cmdline/apt-get.cc:1281 cmdline/apt-get.cc:1906 +#, c-format +msgid "Failed to fetch %s %s\n" +msgstr "Việc gói %s bị lá»—i %s\n" + +#: cmdline/apt-get.cc:979 cmdline/apt-get.cc:929 +msgid "Some files failed to download" +msgstr "Má»™t số tập tin không tải vỠđược" + +#: cmdline/apt-get.cc:980 cmdline/apt-get.cc:2032 cmdline/apt-get.cc:930 +#: cmdline/apt-get.cc:1915 +msgid "Download complete and in download only mode" +msgstr "Má»›i tải vá» xong và trong chế Ä‘á»™ chỉ tải vá»" + +#: cmdline/apt-get.cc:986 cmdline/apt-get.cc:936 +msgid "" +"Unable to fetch some archives, maybe run apt-get update or try with --fix-" +"missing?" +msgstr "" +"Không thể lấy má»™t số kho, có lẽ hãy chạy lệnh « apt-get update » (apt lấy " +"cập nhật) hay cố vá»›i « --fix-missing » (sá»­a các Ä‘iá»u còn thiếu) không?" + +#: cmdline/apt-get.cc:990 cmdline/apt-get.cc:940 +msgid "--fix-missing and media swapping is not currently supported" +msgstr "" +"ChÆ°a hô trợ tùy chá»n « --fix-missing » (sá»­a khi thiếu Ä‘iá»u) và trao đổi " +"phÆ°Æ¡ng tiện" + +#: cmdline/apt-get.cc:995 cmdline/apt-get.cc:945 +msgid "Unable to correct missing packages." +msgstr "Không thể sá»­a những gói còn thiếu." + +#: cmdline/apt-get.cc:996 +msgid "Aborting install." +msgstr "Äang hủy bá» cài đặt." + +#: cmdline/apt-get.cc:1030 cmdline/apt-get.cc:979 +#, c-format +msgid "Note, selecting %s instead of %s\n" +msgstr "Ghi chú : Ä‘ang chá»n %s thay vì %s\n" + +#: cmdline/apt-get.cc:1040 cmdline/apt-get.cc:989 +#, c-format +msgid "Skipping %s, it is already installed and upgrade is not set.\n" +msgstr "Äang bá» qua %s vì nó đã được cài đặt và chÆ°a lập tùy chá»n Nâng cấp.\n" + +#: cmdline/apt-get.cc:1058 cmdline/apt-get.cc:1007 +#, c-format +msgid "Package %s is not installed, so not removed\n" +msgstr "ChÆ°a cài đặt gói %s nên không thể gỡ bá» nó\n" + +#: cmdline/apt-get.cc:1069 cmdline/apt-get.cc:1018 +#, c-format +msgid "Package %s is a virtual package provided by:\n" +msgstr "Gói %s là gói ảo được cung cấp do :\n" + +#: cmdline/apt-get.cc:1081 cmdline/apt-get.cc:1030 +msgid " [Installed]" +msgstr " [Äã cài đặt]" + +#: cmdline/apt-get.cc:1086 cmdline/apt-get.cc:1035 +msgid "You should explicitly select one to install." +msgstr "Bạn nên chá»n má»™t cách dứt khoát gói cần cài." + +#: cmdline/apt-get.cc:1091 cmdline/apt-get.cc:1040 +#, c-format +msgid "" +"Package %s is not available, but is referred to by another package.\n" +"This may mean that the package is missing, has been obsoleted, or\n" +"is only available from another source\n" +msgstr "" +"Gói %s không phải sẵn sàng, nhÆ°ng mà má»™t gói khác\n" +"đã tham chiếu đến nó. Có lẽ có nghÄ©a là gói còn thiếu,\n" +"đã trở thành cÅ©, hay chỉ sẵn sàng từ nguồn khác.\n" + +#: cmdline/apt-get.cc:1110 cmdline/apt-get.cc:1059 +msgid "However the following packages replace it:" +msgstr "Tuy nhiên, những gói theo đây thay thế nó :" + +#: cmdline/apt-get.cc:1113 cmdline/apt-get.cc:1062 +#, c-format +msgid "Package %s has no installation candidate" +msgstr "Gói %s không có ứng cá»­ cài đặt" + +#: cmdline/apt-get.cc:1133 cmdline/apt-get.cc:1082 +#, c-format +msgid "Reinstallation of %s is not possible, it cannot be downloaded.\n" +msgstr "Không thể cài đặt lại %s vì không thể tải vá» nó.\n" + +#: cmdline/apt-get.cc:1141 cmdline/apt-get.cc:1090 +#, c-format +msgid "%s is already the newest version.\n" +msgstr "%s là phiên bản mÆ¡i nhất.\n" + +#: cmdline/apt-get.cc:1168 cmdline/apt-get.cc:1117 +#, c-format +msgid "Release '%s' for '%s' was not found" +msgstr "Không tìm thấy bản phát hành « %s » cho « %s »" + +#: cmdline/apt-get.cc:1170 cmdline/apt-get.cc:1119 +#, c-format +msgid "Version '%s' for '%s' was not found" +msgstr "Không tìm thấy phiên bản « %s » cho « %s »" + +#: cmdline/apt-get.cc:1176 cmdline/apt-get.cc:1125 +#, c-format +msgid "Selected version %s (%s) for %s\n" +msgstr "Äã chá»n phiên bản %s (%s) cho %s\n" + +#: cmdline/apt-get.cc:1313 cmdline/apt-get.cc:1235 +msgid "The update command takes no arguments" +msgstr "Lệnh cập nhật không chấp nhật đối số" + +#: cmdline/apt-get.cc:1326 cmdline/apt-get.cc:1420 cmdline/apt-get.cc:1248 +msgid "Unable to lock the list directory" +msgstr "Không thể khoá thÆ° mục danh sách" + +#: cmdline/apt-get.cc:1384 cmdline/apt-get.cc:1300 +msgid "" +"Some index files failed to download, they have been ignored, or old ones " +"used instead." +msgstr "" +"Má»™t số tập tin chỉ mục không tải vỠđược, đã bá» qua chúng, hoặc Ä‘iá»u cÅ© được " +"dùng thay thế." + +#: cmdline/apt-get.cc:1403 +msgid "Internal error, AllUpgrade broke stuff" +msgstr "Lá»—i ná»™i bá»™: AllUpgrade (toàn bá»™ nâng cấp) đã ngắt gì" + +#: cmdline/apt-get.cc:1502 cmdline/apt-get.cc:1538 cmdline/apt-get.cc:1409 +#: cmdline/apt-get.cc:1445 +#, c-format +msgid "Couldn't find package %s" +msgstr "Không tìm thấy gói %s" + +#: cmdline/apt-get.cc:1525 cmdline/apt-get.cc:1432 +#, c-format +msgid "Note, selecting %s for regex '%s'\n" +msgstr "Ghi chú : Ä‘ang chá»n %s cho biểu thức chính quy « %s »\n" + +#: cmdline/apt-get.cc:1555 cmdline/apt-get.cc:1462 +msgid "You might want to run `apt-get -f install' to correct these:" +msgstr "Có lẽ bạn hãy chạy lênh « apt-get -f install » để sá»­a hết:" + +#: cmdline/apt-get.cc:1558 cmdline/apt-get.cc:1465 +msgid "" +"Unmet dependencies. Try 'apt-get -f install' with no packages (or specify a " +"solution)." +msgstr "" +"Gói còn phụ thuá»™c vào phần má»m chÆ°a có. Hãy cố chạy lệnh « apt-get -f " +"install » mà không có gói nào (hoặc ghi rõ cách quyết định)." + +#: cmdline/apt-get.cc:1570 cmdline/apt-get.cc:1477 +msgid "" +"Some packages could not be installed. This may mean that you have\n" +"requested an impossible situation or if you are using the unstable\n" +"distribution that some required packages have not yet been created\n" +"or been moved out of Incoming." +msgstr "" +"Không thể cài đặt má»™t số gói. Có lẽ có nghÄ©a là bạn Ä‘a yêu cầu\n" +"má»™t trÆ°á»ng hợp không thể, hoặc nếu bạn sá»­ dụng bản phân phối\n" +"bất định, có lẽ chÆ°a tạo má»™t số gói cần thiết,\n" +"hoặc chÆ°a di chuyển chúng ra phần Incoming (Äến)." + +#: cmdline/apt-get.cc:1578 cmdline/apt-get.cc:1485 +msgid "" +"Since you only requested a single operation it is extremely likely that\n" +"the package is simply not installable and a bug report against\n" +"that package should be filed." +msgstr "" +"Vì bạn đã yêu cầu chỉ má»™t thao tác riêng lẻ, rât có thể là\n" +"gói này Ä‘Æ¡n giản không có khả năng cài đặt, thì bạn hay\n" +"thông báo lá»—i vá» gói này." + +#: cmdline/apt-get.cc:1583 cmdline/apt-get.cc:1490 +msgid "The following information may help to resolve the situation:" +msgstr "Có lẽ thông tin theo đây sẽ giúp đỡ quyết định trÆ°á»ng hợp:" + +#: cmdline/apt-get.cc:1586 cmdline/apt-get.cc:1493 +msgid "Broken packages" +msgstr "Gói bị ngắt" + +#: cmdline/apt-get.cc:1612 cmdline/apt-get.cc:1519 +msgid "The following extra packages will be installed:" +msgstr "Những gói thêm theo đây sẽ được cài đặt:" + +#: cmdline/apt-get.cc:1683 cmdline/apt-get.cc:1590 +msgid "Suggested packages:" +msgstr "Gói được đệ nghị:" + +#: cmdline/apt-get.cc:1684 cmdline/apt-get.cc:1591 +msgid "Recommended packages:" +msgstr "Gói được khuyên:" + +#: cmdline/apt-get.cc:1704 +msgid "Calculating upgrade... " +msgstr "Äang tính nâng cấp... " + +#: cmdline/apt-get.cc:1707 methods/ftp.cc:702 methods/connect.cc:101 +#: cmdline/apt-get.cc:1614 methods/connect.cc:99 src/common/dcc.c:68 +msgid "Failed" +msgstr "Bị lá»—i" + +#: cmdline/apt-get.cc:1712 ../mail/mail-config.glade.h:72 main.c:175 +#: src/common/dcc.c:70 cmdline/apt-get.cc:1619 src/common/dcc.c:69 +#: ../scripts/pybtext.py:344 datebook_gui.c:1326 import_gui.c:311 +#: prefs_gui.c:763 +msgid "Done" +msgstr "Äã xong" + +#: cmdline/apt-get.cc:1777 cmdline/apt-get.cc:1785 +msgid "Internal error, problem resolver broke stuff" +msgstr "Lá»—i ná»™i bá»™: bá»™ tháo gỡ vấn đỠđã ngắt gì" + +#: cmdline/apt-get.cc:1885 cmdline/apt-get.cc:1792 +msgid "Must specify at least one package to fetch source for" +msgstr "Phải ghi rõ ít nhất má»™t gói cần lấy nguồn cho nó" + +#: cmdline/apt-get.cc:1915 cmdline/apt-get.cc:2144 cmdline/apt-get.cc:1819 +#: cmdline/apt-get.cc:2026 +#, c-format +msgid "Unable to find a source package for %s" +msgstr "Không tìm thấy gói nguồn cho %s" + +#: cmdline/apt-get.cc:1959 +#, c-format +msgid "Skipping already downloaded file '%s'\n" +msgstr "Äang bá» qua tập tin đã được tải vỠ« %s »\n" + +#: cmdline/apt-get.cc:1983 cmdline/apt-get.cc:1866 +#, c-format +msgid "You don't have enough free space in %s" +msgstr "Không đủ sức chứa còn rảnh trong %s" + +#: cmdline/apt-get.cc:1988 cmdline/apt-get.cc:1871 +#, c-format +msgid "Need to get %sB/%sB of source archives.\n" +msgstr "Cần phải lấy %sB/%sB kho nguồn.\n" + +#: cmdline/apt-get.cc:1991 cmdline/apt-get.cc:1874 +#, c-format +msgid "Need to get %sB of source archives.\n" +msgstr "Cần phải lấy %sB kho nguồn.\n" + +#: cmdline/apt-get.cc:1997 +#, c-format +msgid "Fetch source %s\n" +msgstr "Lấy nguồn %s\n" + +#: cmdline/apt-get.cc:2028 cmdline/apt-get.cc:1911 +msgid "Failed to fetch some archives." +msgstr "Việc lấy má»™t số kho bị lá»—i." + +#: cmdline/apt-get.cc:2056 cmdline/apt-get.cc:1939 +#, c-format +msgid "Skipping unpack of already unpacked source in %s\n" +msgstr "Äang bá» qua giải nén nguồn đã giải nén trong %s\n" + +#: cmdline/apt-get.cc:2068 cmdline/apt-get.cc:1951 +#, c-format +msgid "Unpack command '%s' failed.\n" +msgstr "Lệnh giải nén « %s » bị lá»—i.\n" + +#: cmdline/apt-get.cc:2069 +#, c-format +msgid "Check if the 'dpkg-dev' package is installed.\n" +msgstr "Hãy kiểm tra xem gói « dpkg-dev » có được cài đặt chÆ°a.\n" + +#: cmdline/apt-get.cc:2086 cmdline/apt-get.cc:1968 +#, c-format +msgid "Build command '%s' failed.\n" +msgstr "Lệnh xây dụng « %s » bị lá»—i.\n" + +#: cmdline/apt-get.cc:2105 cmdline/apt-get.cc:1987 +msgid "Child process failed" +msgstr "Tiến trình con bị lá»—i" + +#: cmdline/apt-get.cc:2121 cmdline/apt-get.cc:2003 +msgid "Must specify at least one package to check builddeps for" +msgstr "" +"Phải ghi rõ ít nhất má»™t gói cần kiểm tra cách phụ thuá»™c khi xây dụng cho nó" + +#: cmdline/apt-get.cc:2149 cmdline/apt-get.cc:2031 +#, c-format +msgid "Unable to get build-dependency information for %s" +msgstr "Không thể lấy thông tin vá» cách phụ thuá»™c khi xây dụng cho %s" + +#: cmdline/apt-get.cc:2169 cmdline/apt-get.cc:2051 +#, c-format +msgid "%s has no build depends.\n" +msgstr "%s không phụ thuá»™c vào gì khi xây dụng.\n" + +#: cmdline/apt-get.cc:2221 cmdline/apt-get.cc:2103 +#, c-format +msgid "" +"%s dependency for %s cannot be satisfied because the package %s cannot be " +"found" +msgstr "cách phụ thuá»™c %s cho %s không thể được thá»a vì không tìm thấy gá»i %s" + +#: cmdline/apt-get.cc:2273 cmdline/apt-get.cc:2155 +#, c-format +msgid "" +"%s dependency for %s cannot be satisfied because no available versions of " +"package %s can satisfy version requirements" +msgstr "" +"cách phụ thuá»™c %s cho %s không thể được thá»a vì không có phiên bản sẵn sàng " +"của gói %s có thể thá»a Ä‘iá»u kiện phiên bản" + +#: cmdline/apt-get.cc:2308 cmdline/apt-get.cc:2190 +#, c-format +msgid "Failed to satisfy %s dependency for %s: Installed package %s is too new" +msgstr "" +"Việc cố thá»a cách phụ thuá»™c %s cho %s bị lá»—i vì gói đã cài đặt %s quá má»›i" + +#: cmdline/apt-get.cc:2333 cmdline/apt-get.cc:2215 +#, c-format +msgid "Failed to satisfy %s dependency for %s: %s" +msgstr "Việc cố thá»a cách phụ thuá»™c %s cho %s bị lá»—i: %s" + +#: cmdline/apt-get.cc:2347 cmdline/apt-get.cc:2229 +#, c-format +msgid "Build-dependencies for %s could not be satisfied." +msgstr "Không thể thá»a cách phụ thuá»™c khi xây dụng cho %s." + +#: cmdline/apt-get.cc:2351 cmdline/apt-get.cc:2233 +msgid "Failed to process build dependencies" +msgstr "Việc xá»­ lý cách phụ thuá»™c khi xây dụng bị lá»—i" + +#: cmdline/apt-get.cc:2383 +msgid "Supported modules:" +msgstr "Mô-Ä‘un đã há»— trợ :" + +#: cmdline/apt-get.cc:2424 cmdline/apt-get.cc:2306 +msgid "" +"Usage: apt-get [options] command\n" +" apt-get [options] install|remove pkg1 [pkg2 ...]\n" +" apt-get [options] source pkg1 [pkg2 ...]\n" +"\n" +"apt-get is a simple command line interface for downloading and\n" +"installing packages. The most frequently used commands are update\n" +"and install.\n" +"\n" +"Commands:\n" +" update - Retrieve new lists of packages\n" +" upgrade - Perform an upgrade\n" +" install - Install new packages (pkg is libc6 not libc6.deb)\n" +" remove - Remove packages\n" +" source - Download source archives\n" +" build-dep - Configure build-dependencies for source packages\n" +" dist-upgrade - Distribution upgrade, see apt-get(8)\n" +" dselect-upgrade - Follow dselect selections\n" +" clean - Erase downloaded archive files\n" +" autoclean - Erase old downloaded archive files\n" +" check - Verify that there are no broken dependencies\n" +"\n" +"Options:\n" +" -h This help text.\n" +" -q Loggable output - no progress indicator\n" +" -qq No output except for errors\n" +" -d Download only - do NOT install or unpack archives\n" +" -s No-act. Perform ordering simulation\n" +" -y Assume Yes to all queries and do not prompt\n" +" -f Attempt to continue if the integrity check fails\n" +" -m Attempt to continue if archives are unlocatable\n" +" -u Show a list of upgraded packages as well\n" +" -b Build the source package after fetching it\n" +" -V Show verbose version numbers\n" +" -c=? Read this configuration file\n" +" -o=? Set an arbitrary configuration option, eg -o dir::cache=/tmp\n" +"See the apt-get(8), sources.list(5) and apt.conf(5) manual\n" +"pages for more information and options.\n" +" This APT has Super Cow Powers.\n" +msgstr "" +"Cách sá»­ dụng: apt-get [tùy_chá»n...] lệnh\n" +" apt-get [tùy_chá»n...] install|remove gói1 [gói2 ...]\n" +" apt-get [tùy_chá»n...] source gói1 [gói2 ...]\n" +"\n" +"[get: \tlấy\n" +"install: \tcài đặt\n" +"remove: \tgỡ bá»\n" +"source: \tnguồn]\n" +"\n" +"apt-get là má»™t giao diện dòng lệnh Ä‘Æ¡n giản để tải vá» và cài đặt gói.\n" +"Những lệnh đã dùng thÆ°á»ng nhất là update (cập nhật) và install (cài đặt).\n" +"\n" +"Lệnh:\n" +" update\t\tLấy danh sách gói má»›i (_cập nhật_)\n" +" upgrade \t_Nâng cập_ \n" +" install \t\t_Cài đặt_ gói má»›i (gói là libc6 không phải libc6.deb)\n" +" remove \t_Gỡ bá»_ gói\n" +" source \t\tTải vá» kho _nguồn_\n" +" build-dep \tÄịnh cấu hình _cách phụ thuá»™c khi xây dụng_, cho gói nguồn\n" +" dist-upgrade \t_Nâng cấp bản phân phối_,\n" +"\t\t\t\t\thãy xem trang hÆ°á»›ng dẫn (man) apt-get(8)\n" +" dselect-upgrade \t\tTheo cách chá»n dselect (_nâng cấp_)\n" +" clean \t\tXoá bá» các tập tin kho đã tải vá» (_làm sạch_)\n" +" autoclean \tXoá bá» các tập tin kho cÅ© đã tải vá» (_tá»± Ä‘á»™ng làm sạch_)\n" +" check \t\t_Kiểm chứng_ không có cách phụ thuá»™c bị ngắt\n" +"\n" +"Tùy chá»n:\n" +" -h \t_Trợ giúp_ này.\n" +" -q \tDữ liệu xuất có thể ghi lÆ°u - không có cái chỉ tiến trình (_im_)\n" +" -qq \tKhông xuất thông tin nào, trừ lá»—i (_im im_)\n" +" -d \tChỉ _tải vá»_, ÄỪNG cài đặt hay giải nén kho\n" +" -s \tKhông hoạt đông. _Mô phá»ng_ sắp xếp\n" +" -y \tGiả sá»­ trả lá»i _Có_ (yes) má»i khi gặp câu há»i;\n" +"\t\t\t\t\tđừng nhắc ngÆ°á»i dùng gõ gì\n" +" -f \t\tCố tiếp tục lại nếu việc kiểm tra tính nguyên vẹn _thất bại_\n" +" -m \tCố tiếp tục lại nếu không thể định vị kho\n" +" -u \tCÅ©ng hiện danh sách các gói đã _nâng cấp_\n" +" -b \t_Xây dụng_ gói nguồn sau khi lấy nó\n" +" -V \tHiện số thứ tá»± _phiên bản chi tiết_\n" +" -c=? \tÄá»c tập tin cấu hình ấy\n" +" -o=? \tLập tùy chá»n nhiệm ý, v.d. -o dir::cache=/tmp\n" +"Äể tim thông tin và tùy chá»n thêm thì hãy xem trang hÆ°á»›ng dẫn apt-get(8), " +"sources.list(5) và apt.conf(5).\n" +" Trình APT này có năng lá»±c của bò siêu.\n" + +#: cmdline/acqprogress.cc:55 src/download_bar.cc:76 +#: src/generic/acqprogress.cc:63 src/download_bar.cc:75 +msgid "Hit " +msgstr "Lần tìm " + +#: cmdline/acqprogress.cc:79 src/generic/acqprogress.cc:87 +msgid "Get:" +msgstr "Lấy:" + +#: cmdline/acqprogress.cc:110 src/generic/acqprogress.cc:118 +msgid "Ign " +msgstr "Bá»q " + +#: cmdline/acqprogress.cc:114 src/generic/acqprogress.cc:122 +msgid "Err " +msgstr "Lá»—i " + +#: cmdline/acqprogress.cc:135 src/generic/acqprogress.cc:142 +#, c-format +msgid "Fetched %sB in %s (%sB/s)\n" +msgstr "Má»›i lấy %sB trong %s (%sB/g).\n" + +#: src/download_item.cc:70 src/download_item.cc:74 src/download_item.cc:104 +#, c-format +msgid " [Working]" +msgstr " [Hoạt Ä‘á»™ng]" + +#: cmdline/acqprogress.cc:271 +#, c-format +msgid "" +"Media change: please insert the disc labeled\n" +" '%s'\n" +"in the drive '%s' and press enter\n" +msgstr "" +"Chuyển đổi vật chứa: hãy nạp Ä‘Ä©a có nhãn\n" +" « %s »\n" +"vào ổ « %s » và bấm nút Enter\n" + +#: cmdline/apt-sortpkgs.cc:86 +msgid "Unknown package record!" +msgstr "Không biết mục ghi gói." + +#: cmdline/apt-sortpkgs.cc:150 +msgid "" +"Usage: apt-sortpkgs [options] file1 [file2 ...]\n" +"\n" +"apt-sortpkgs is a simple tool to sort package files. The -s option is used\n" +"to indicate what kind of file it is.\n" +"\n" +"Options:\n" +" -h This help text\n" +" -s Use source file sorting\n" +" -c=? Read this configuration file\n" +" -o=? Set an arbitrary configuration option, eg -o dir::cache=/tmp\n" +msgstr "" +"Cách sá»­ dụng: apt-sortpkgs [tùy_chá»n...] tập_tin1 [tập_tin2 ...]\n" +"\n" +"[sortpkgs: sort packages: sắp xếp các gói]\n" +"\n" +"apt-sortpkgs là má»™t công cụ Ä‘Æ¡n giản để sắp xếp tập tin gói.\n" +"Tùy chon « -s » dùng để ngụ ý kiểu tập tin.\n" +"\n" +"Tùy chá»n:\n" +" -h \t_Trợ giúp_ này\n" +" -s \tSắp xếp những tập tin _nguồn_\n" +" -c=? \tÄá»c tập tin cấu hình này\n" +" -o=? \tLập tùy chá»n cấu hình nhiệm ý, v.d. « -o dir::cache=/tmp »\n" + +#: dselect/install:32 +msgid "Bad default setting!" +msgstr "Thiết lập mặc định sai." + +#: dselect/install:51 dselect/install:83 dselect/install:87 dselect/install:93 +#: dselect/install:104 dselect/update:45 +msgid "Press enter to continue." +msgstr "Hãy bấm phím Enter để tiếp tục lại." + +#: dselect/install:100 +msgid "Some errors occurred while unpacking. I'm going to configure the" +msgstr "Gập má»™t số lá»—i khi giải nén. Sẽ cấu hình" + +#: dselect/install:101 +msgid "packages that were installed. This may result in duplicate errors" +msgstr "những gói đã Ä‘Æ°Æ¡c cài đặt. Có lẽ sẽ gây ra lá»—i trùng" + +#: dselect/install:102 +msgid "or errors caused by missing dependencies. This is OK, only the errors" +msgstr "" +"hoặc lá»—i khi không có phần má»m mà gói khác phụ thuá»™c vào nó. Không có sao, " +"chỉ những lá»—i" + +#: dselect/install:103 +msgid "" +"above this message are important. Please fix them and run [I]nstall again" +msgstr "" +"ở trên thông Ä‘iệp này là quan trá»ng. Hãy sá»­a chúng và chạy lại [I]nstall " +"(cài đặt)." + +#: dselect/update:30 +msgid "Merging available information" +msgstr "Äang hợp nhất các thông tin sẵn sàng..." + +#: apt-inst/contrib/extracttar.cc:117 apt-inst/contrib/extracttar.cc:116 +msgid "Failed to create pipes" +msgstr "Việc tạo những ống bị lá»—i" + +#: apt-inst/contrib/extracttar.cc:143 apt-inst/contrib/extracttar.cc:141 +msgid "Failed to exec gzip " +msgstr "Việc thá»±c hiện gzip bị lá»—i " + +#: apt-inst/contrib/extracttar.cc:180 apt-inst/contrib/extracttar.cc:206 +#: apt-inst/contrib/extracttar.cc:178 apt-inst/contrib/extracttar.cc:204 +msgid "Corrupted archive" +msgstr "Kho bị há»ng." + +#: apt-inst/contrib/extracttar.cc:195 +msgid "Tar checksum failed, archive corrupted" +msgstr "Tiến trình tar (kiểm tổng tar) thât bại: kho bị há»ng." + +#: apt-inst/contrib/extracttar.cc:298 apt-inst/contrib/extracttar.cc:296 +#, c-format +msgid "Unknown TAR header type %u, member %s" +msgstr "Không biết kiểu phần đầu tar %u, bá»™ phạn %s" + +#: apt-inst/contrib/arfile.cc:73 +msgid "Invalid archive signature" +msgstr "Chữ ký kho không hợp lệ" + +#: apt-inst/contrib/arfile.cc:81 +msgid "Error reading archive member header" +msgstr "Gặp lá»—i khi Ä‘á»c phần đầu bá»™ phạn kho" + +#: apt-inst/contrib/arfile.cc:93 apt-inst/contrib/arfile.cc:105 +msgid "Invalid archive member header" +msgstr "Phần đầu bá»™ phạn kho không hợp lê" + +#: apt-inst/contrib/arfile.cc:131 +msgid "Archive is too short" +msgstr "Kho quá ngắn" + +#: apt-inst/contrib/arfile.cc:135 +msgid "Failed to read the archive headers" +msgstr "Việc Ä‘á»c phần đầu kho bị lá»—i" + +#: apt-inst/filelist.cc:384 +msgid "DropNode called on still linked node" +msgstr "DropNode (thả Ä‘iểm nút) được gá»i vá»›i Ä‘iểm nút còn liên kết" + +#: apt-inst/filelist.cc:416 +msgid "Failed to locate the hash element!" +msgstr "Việc định vi phần tá»­ băm bị lá»—i." + +#: apt-inst/filelist.cc:463 +msgid "Failed to allocate diversion" +msgstr "Việc cấp phát sá»± trệch Ä‘i bị lá»—i" + +#: apt-inst/filelist.cc:468 +msgid "Internal error in AddDiversion" +msgstr "Lá»—i ná»™i bá»™ trong AddDiversion (thêm sá»± trệch Ä‘i)" + +#: apt-inst/filelist.cc:481 +#, c-format +msgid "Trying to overwrite a diversion, %s -> %s and %s/%s" +msgstr "Äang cố ghi đè má»™t sá»± trệch Ä‘i, %s → %s và %s/%s" + +#: apt-inst/filelist.cc:510 +#, c-format +msgid "Double add of diversion %s -> %s" +msgstr "Sá»± trệch Ä‘i được thêm hai lần %s → %s" + +#: apt-inst/filelist.cc:553 +#, c-format +msgid "Duplicate conf file %s/%s" +msgstr "Tập tin cấu hình trùng %s/%s" + +#: apt-inst/dirstream.cc:45 apt-inst/dirstream.cc:50 apt-inst/dirstream.cc:53 +#, c-format +msgid "Failed to write file %s" +msgstr "Việc ghi tập tin %s bị lá»—i" + +#: apt-inst/dirstream.cc:96 apt-inst/dirstream.cc:104 apt-inst/dirstream.cc:80 +#: apt-inst/dirstream.cc:88 +#, c-format +msgid "Failed to close file %s" +msgstr "Việc đóng tập tin %s bị lá»—i" + +#: apt-inst/extract.cc:96 apt-inst/extract.cc:167 +#, c-format +msgid "The path %s is too long" +msgstr "ÄÆ°á»ng dẫn %s quá dài" + +#: apt-inst/extract.cc:127 +#, c-format +msgid "Unpacking %s more than once" +msgstr "Äang giải nén %s nhiá»u lần" + +#: apt-inst/extract.cc:137 +#, c-format +msgid "The directory %s is diverted" +msgstr "ThÆ° mục %s bị trệch hÆ°á»›ng" + +#: apt-inst/extract.cc:147 +#, c-format +msgid "The package is trying to write to the diversion target %s/%s" +msgstr "Gói này Ä‘ang cố ghi vào đích trệch Ä‘i %s/%s" + +#: apt-inst/extract.cc:157 apt-inst/extract.cc:300 +msgid "The diversion path is too long" +msgstr "ÄÆ°á»ng dẫn trệch Ä‘i quá dài." + +#: apt-inst/extract.cc:243 +#, c-format +msgid "The directory %s is being replaced by a non-directory" +msgstr "ThÆ° mục %s Ä‘ang được thay thế do Ä‘iá»u không phải là thÆ° mục" + +#: apt-inst/extract.cc:283 +msgid "Failed to locate node in its hash bucket" +msgstr "Việc định vị Ä‘iểm nút trong há»™p băm nó bị lá»—i" + +#: apt-inst/extract.cc:287 +msgid "The path is too long" +msgstr "ÄÆ°á»ng dẫn quá dài" + +#: apt-inst/extract.cc:417 +#, c-format +msgid "Overwrite package match with no version for %s" +msgstr "Ghi đè lên gói đã khá»›p mà không có phiên bản cho %s" + +#: apt-inst/extract.cc:434 +#, c-format +msgid "File %s/%s overwrites the one in the package %s" +msgstr "Tập tin %s/%s ghi đè lên Ä‘iá»u trong gói %s" + +#: apt-pkg/contrib/configuration.cc:709 apt-pkg/acquire.cc:416 +#, c-format +msgid "Unable to read %s" +msgstr "Không thể Ä‘á»c %s" + +#: apt-inst/extract.cc:494 +#, c-format +msgid "Unable to stat %s" +msgstr "Không thể lấy các thông tin vá» %s" + +#: apt-inst/deb/dpkgdb.cc:55 apt-inst/deb/dpkgdb.cc:61 +#, c-format +msgid "Failed to remove %s" +msgstr "Việc gỡ bá» %s bị lá»—i" + +#: apt-inst/deb/dpkgdb.cc:110 apt-inst/deb/dpkgdb.cc:112 +#, c-format +msgid "Unable to create %s" +msgstr "Không thể tạo %s" + +#: apt-inst/deb/dpkgdb.cc:118 +#, c-format +msgid "Failed to stat %sinfo" +msgstr "Việc lấy các thông tin vá» %sinfo bị lá»—i" + +#: apt-inst/deb/dpkgdb.cc:123 +msgid "The info and temp directories need to be on the same filesystem" +msgstr "" +"Những thÆ° mục info (thông tin) và temp (tạm thá»i) cần phải trong cùng má»™t hệ " +"thống tập tin" + +#: apt-pkg/pkgcachegen.cc:840 +msgid "Reading package lists" +msgstr "Äang Ä‘á»c các danh sách gói..." + +#: apt-inst/deb/dpkgdb.cc:180 +#, c-format +msgid "Failed to change to the admin dir %sinfo" +msgstr "Việc chuyển đổi sang thÆ° mục quản lý %sinfo bị lá»—i" + +#: apt-inst/deb/dpkgdb.cc:448 +msgid "Internal error getting a package name" +msgstr "Gặp lá»—i ná»™i bá»™ khi lấy tên gói" + +#: apt-inst/deb/dpkgdb.cc:205 +msgid "Reading file listing" +msgstr "Äang Ä‘á»c danh sách tập tin..." + +#: apt-inst/deb/dpkgdb.cc:216 +#, c-format +msgid "" +"Failed to open the list file '%sinfo/%s'. If you cannot restore this file " +"then make it empty and immediately re-install the same version of the " +"package!" +msgstr "" +"Việc mở tập tin danh sách « %sinfo/%s » bị lá»—i. Nếu bạn không thể phục hồi " +"tập tin này, bạn hãy làm cho nó rá»—ng và ngay cài đặt lại cùng phiên bản gói." + +#: apt-inst/deb/dpkgdb.cc:229 apt-inst/deb/dpkgdb.cc:242 +#, c-format +msgid "Failed reading the list file %sinfo/%s" +msgstr "Việc Ä‘á»c tập tin danh sách %sinfo/%s bị lá»—i" + +#: apt-inst/deb/dpkgdb.cc:266 +msgid "Internal error getting a node" +msgstr "Gặp lá»—i ná»™i bá»™ khi lấy nút Ä‘iểm..." + +#: apt-inst/deb/dpkgdb.cc:309 +#, c-format +msgid "Failed to open the diversions file %sdiversions" +msgstr "Việc mở tập tin trệch Ä‘i %sdiversions bị lá»—i" + +#: apt-inst/deb/dpkgdb.cc:324 +msgid "The diversion file is corrupted" +msgstr "Tập tin trệch Ä‘i bị há»ng" + +#: apt-inst/deb/dpkgdb.cc:331 apt-inst/deb/dpkgdb.cc:336 +#: apt-inst/deb/dpkgdb.cc:341 +#, c-format +msgid "Invalid line in the diversion file: %s" +msgstr "Gặp dòng không hợp lệ trong tập tin trệch Ä‘i: %s" + +#: apt-inst/deb/dpkgdb.cc:362 +msgid "Internal error adding a diversion" +msgstr "Gặp lá»—i ná»™i bá»™ khi thêm má»™t sá»± trệch Ä‘i" + +#: apt-inst/deb/dpkgdb.cc:383 +msgid "The pkg cache must be initialized first" +msgstr "Phải khởi Ä‘á»™ng bá»™ nhá»› tạm gói trÆ°á»›c hết" + +#: apt-inst/deb/dpkgdb.cc:386 +msgid "Reading file list" +msgstr "Äang Ä‘á»c danh sách tâp tin..." + +#: apt-inst/deb/dpkgdb.cc:443 +#, c-format +msgid "Failed to find a Package: header, offset %lu" +msgstr "Lá»—i tìm thấy Gói: phần đầu, hiệu số %lu" + +#: apt-inst/deb/dpkgdb.cc:465 +#, c-format +msgid "Bad ConfFile section in the status file. Offset %lu" +msgstr "" +"Có phần cấu hình tập tin (ConfFile) sai trong tập tin trạng thái. Hiệu số %lu" + +#: apt-inst/deb/dpkgdb.cc:470 +#, c-format +msgid "Error parsing MD5. Offset %lu" +msgstr "Gặp lá»—i khi phân tách MD5. Hiệu số %lu" + +#: apt-inst/deb/debfile.cc:42 apt-inst/deb/debfile.cc:47 +#: apt-inst/deb/debfile.cc:55 +#, c-format +msgid "This is not a valid DEB archive, missing '%s' member" +msgstr "Äây không phải là môt kho DEB hợp lệ vì còn thiếu bá»™ phạn « %s »" + +#: apt-inst/deb/debfile.cc:52 +#, c-format +msgid "This is not a valid DEB archive, it has no '%s' or '%s' member" +msgstr "" +"Äây không phải là môt kho DEB hợp lệ vì không có bá»™ phạn « %s » hay « %s »" + +#: apt-inst/deb/debfile.cc:112 apt-inst/deb/debfile.cc:104 +#, c-format +msgid "Couldn't change to %s" +msgstr "Không thể chuyển đổi sang %s" + +#: apt-inst/deb/debfile.cc:138 +msgid "Internal error, could not locate member" +msgstr "Gặp lá»—i ná»™i bá»™, không thể định vị bá»™ phạn" + +#: apt-inst/deb/debfile.cc:171 apt-inst/deb/debfile.cc:158 +msgid "Failed to locate a valid control file" +msgstr "Việc định vị tập tin Ä‘iá»u khiển hợp lệ bị lá»—i" + +#: apt-inst/deb/debfile.cc:256 +msgid "Unparsable control file" +msgstr "Tập tin Ä‘iá»u khiển không có khả năng phân tách" + +#: methods/cdrom.cc:114 methods/cdrom.cc:113 +#, c-format +msgid "Unable to read the cdrom database %s" +msgstr "Không thể Ä‘á»c cÆ¡ sở dữ liệu Ä‘Ä©a CD-ROM %s" + +#: methods/cdrom.cc:123 +msgid "" +"Please use apt-cdrom to make this CD-ROM recognized by APT. apt-get update " +"cannot be used to add new CD-ROMs" +msgstr "" +"Hãy sá»­ dụng lệnh « apt-cdrom » để làm cho APT chấp nhận Ä‘Ä©a CD này. Không " +"thể sá»­ dụng lệnh « apt-get update » (lấy cập nhật) để thêm Ä‘Ä©a CD má»›i." + +#: methods/cdrom.cc:131 +msgid "Wrong CD-ROM" +msgstr "CD không đúng" + +#: methods/cdrom.cc:164 methods/cdrom.cc:163 +#, c-format +msgid "Unable to unmount the CD-ROM in %s, it may still be in use." +msgstr "Không thể tháo gắn kết Ä‘Ä©a CD-ROM trong %s. Có lẽ nó còn dùng." + +#: methods/cdrom.cc:169 +msgid "Disk not found." +msgstr "Không tìm thấy Ä‘Ä©a" + +#: methods/cdrom.cc:177 methods/file.cc:79 methods/rsh.cc:264 +#: ../libfspot/f-jpeg-utils.c:445 +msgid "File not found" +msgstr "Không tìm thấy tập tin" + +#: methods/copy.cc:42 methods/gpgv.cc:269 methods/gzip.cc:133 +#: methods/gzip.cc:142 +msgid "Failed to stat" +msgstr "Việc lấy các thông tin bị lá»—i" + +#: methods/copy.cc:79 methods/gpgv.cc:266 methods/gzip.cc:139 +msgid "Failed to set modification time" +msgstr "Việc lập giá» sá»­a đổi bị lá»—i" + +#: methods/file.cc:44 methods/file.cc:42 +msgid "Invalid URI, local URIS must not start with //" +msgstr "Äịa chỉ Mạng (URI) không hợp lệ: URI không thể bắt đầu vá»›i « // »" + +#: methods/ftp.cc:162 +msgid "Logging in" +msgstr "Äang đăng nhập..." + +#: methods/ftp.cc:168 +msgid "Unable to determine the peer name" +msgstr "Không thể quyết định tên ngang hàng" + +#: methods/ftp.cc:173 +msgid "Unable to determine the local name" +msgstr "Không thể quyết định tên cục bá»™" + +#: methods/ftp.cc:204 methods/ftp.cc:232 +#, c-format +msgid "The server refused the connection and said: %s" +msgstr "Máy phục vụ đã từ chối kết nối, và nói: %s" + +#: methods/ftp.cc:210 +#, c-format +msgid "USER failed, server said: %s" +msgstr "Lệnh USER (ngÆ°á»i dùng) đã thất bại: máy phục vụ nói: %s" + +#: methods/ftp.cc:217 +#, c-format +msgid "PASS failed, server said: %s" +msgstr "Lệnh PASS (mật khẩu) đã thất bại: máy phục vụ nói: %s" + +#: methods/ftp.cc:237 +msgid "" +"A proxy server was specified but no login script, Acquire::ftp::ProxyLogin " +"is empty." +msgstr "" +"Äã ghi rõ máy phục vụ ủy nhiệm, nhÆ°ng mà chÆ°a ghi rõ tập lệnh đăng nhập. « " +"Acquire::ftp::ProxyLogin » là rá»—ng." + +#: methods/ftp.cc:265 +#, c-format +msgid "Login script command '%s' failed, server said: %s" +msgstr "Lệnh tập lệnh đăng nhập « %s » đã thất bại: máy phục vụ nói: %s" + +#: methods/ftp.cc:291 +#, c-format +msgid "TYPE failed, server said: %s" +msgstr "Lệnh TYPE (kiểu) đã thất bại: máy phục vụ nói: %s" + +#: methods/ftp.cc:329 methods/ftp.cc:440 methods/rsh.cc:183 methods/rsh.cc:226 +msgid "Connection timeout" +msgstr "Thá»i hạn kết nối" + +#: methods/ftp.cc:335 +msgid "Server closed the connection" +msgstr "Máy phục vụ đã đóng kết nối" + +#: src/uuencode.c:190 +msgid "Read error" +msgstr "Lá»—i Ä‘á»c" + +#: methods/ftp.cc:345 methods/rsh.cc:197 +msgid "A response overflowed the buffer." +msgstr "Má»™t trả lá»i đã tràn bá»™ đệm." + +#: methods/ftp.cc:362 methods/ftp.cc:374 +msgid "Protocol corruption" +msgstr "Giao thức bị há»ng" + +#: src/uuencode.c:310 +msgid "Write error" +msgstr "Lá»—i ghi" + +#: methods/ftp.cc:687 methods/ftp.cc:693 methods/ftp.cc:729 +msgid "Could not create a socket" +msgstr "Không thể tạo ổ cắm" + +#: methods/ftp.cc:698 +msgid "Could not connect data socket, connection timed out" +msgstr "Không thể kết nối ổ cắm dữ liệu, kết nối đã quá giá»" + +#: methods/ftp.cc:704 +msgid "Could not connect passive socket." +msgstr "Không thể kết nối ổ cắm bị Ä‘á»™ng." + +#: methods/ftp.cc:722 +msgid "getaddrinfo was unable to get a listening socket" +msgstr "getaddrinfo (lấy thông tin địa chỉ) không thể lấy ổ cắm lắng nghe" + +#: methods/ftp.cc:736 +msgid "Could not bind a socket" +msgstr "Không thể đóng kết ổ cắm" + +#: methods/ftp.cc:740 +msgid "Could not listen on the socket" +msgstr "Không thể lắng nghe trên ổ cắm đó" + +#: methods/ftp.cc:747 +msgid "Could not determine the socket's name" +msgstr "Không thể quyết định tên ổ cắm đó" + +#: methods/ftp.cc:779 +msgid "Unable to send PORT command" +msgstr "Không thể gởi lệnh PORT (cổng)" + +#: methods/ftp.cc:789 +#, c-format +msgid "Unknown address family %u (AF_*)" +msgstr "Không biết nhóm địa chỉ %u (AF_*)" + +#: methods/ftp.cc:798 +#, c-format +msgid "EPRT failed, server said: %s" +msgstr "Lệnh EPRT (thông báo lá»—i) đã thất bại: máy phục vụ nói: %s" + +#: methods/ftp.cc:818 +msgid "Data socket connect timed out" +msgstr "Kết nối ổ cắm dữ liệu đã quá giá»" + +#: methods/ftp.cc:825 +msgid "Unable to accept connection" +msgstr "Không thể chấp nhận kết nối" + +#: methods/ftp.cc:864 methods/http.cc:958 methods/rsh.cc:303 +#: methods/http.cc:916 +msgid "Problem hashing file" +msgstr "Gặp khó khăn băm tập tin" + +#: methods/ftp.cc:877 +#, c-format +msgid "Unable to fetch file, server said '%s'" +msgstr "Không thể lấy tập tin: máy phục vụ nói « %s »" + +#: methods/ftp.cc:892 methods/rsh.cc:322 +msgid "Data socket timed out" +msgstr "á»” cắm dữ liệu đã quá giá»" + +#: methods/ftp.cc:922 +#, c-format +msgid "Data transfer failed, server said '%s'" +msgstr "Việc truyá»n dữ liệu bị lá»—i: máy phục vụ nói « %s »" + +#: ../app/dialogs/module-dialog.c:477 ../objects/UML/class_dialog.c:2050 +#: ../glade/search.glade.h:6 +msgid "Query" +msgstr "Truy vấn" + +#: methods/ftp.cc:1106 +msgid "Unable to invoke " +msgstr "Không thể gá»i " + +#: methods/connect.cc:64 +#, c-format +msgid "Connecting to %s (%s)" +msgstr "Äang kết nối đến %s (%s)..." + +#: methods/connect.cc:71 +#, c-format +msgid "[IP: %s %s]" +msgstr "[Äịa chỉ IP: %s %s]" + +#: methods/connect.cc:80 +#, c-format +msgid "Could not create a socket for %s (f=%u t=%u p=%u)" +msgstr "Không thể tạo ổ cắm cho %s (f=%u t=%u p=%u)" + +#: methods/connect.cc:86 +#, c-format +msgid "Cannot initiate the connection to %s:%s (%s)." +msgstr "Không thể sở khởi kết nối đến %s:%s (%s)." + +#: methods/connect.cc:93 methods/connect.cc:92 +#, c-format +msgid "Could not connect to %s:%s (%s), connection timed out" +msgstr "Không thể kết nối đến %s:%s (%s), kết nối đã quá giá»" + +#: methods/connect.cc:106 methods/connect.cc:104 +#, c-format +msgid "Could not connect to %s:%s (%s)." +msgstr "Không thể kết nối đến %s:%s (%s)." + +#: methods/connect.cc:134 methods/rsh.cc:425 methods/connect.cc:132 +#: src/gpsdrive.c:7398 src/gpsdrive.c:7400 src/gpsdrive.c:7402 +#, c-format +msgid "Connecting to %s" +msgstr "Äang kết nối đến %s..." + +#: methods/connect.cc:165 methods/connect.cc:163 +#, c-format +msgid "Could not resolve '%s'" +msgstr "Không thể tháo gỡ « %s »" + +#: methods/connect.cc:171 methods/connect.cc:167 +#, c-format +msgid "Temporary failure resolving '%s'" +msgstr "Việc tháo gỡ « %s » bị lá»—i tạm thá»i" + +#: methods/connect.cc:174 methods/connect.cc:169 +#, c-format +msgid "Something wicked happened resolving '%s:%s' (%i)" +msgstr "Gặp lá»—i nghiệm trá»ng khi tháo gỡ « %s:%s » (%i)" + +#: methods/connect.cc:221 methods/connect.cc:216 +#, c-format +msgid "Unable to connect to %s %s:" +msgstr "Không thể kết nối đến %s %s:" + +#: methods/gpgv.cc:92 +msgid "E: Argument list from Acquire::gpgv::Options too long. Exiting." +msgstr "E: Danh sách lệnh từ « Acquire::gpgv::Options » quá dài nên thoát." + +#: methods/gpgv.cc:191 +msgid "" +"Internal error: Good signature, but could not determine key fingerprint?!" +msgstr "Lá»—i ná»™i bá»™: chữ ký đúng, nhÆ°ng không thể quyết định vân tay khoá ?!" + +#: methods/gpgv.cc:196 +msgid "At least one invalid signature was encountered." +msgstr "Gặp ít nhất má»™t chữ ký không hợp lệ." + +#. FIXME String concatenation considered harmful. +#: methods/gpgv.cc:201 +msgid "Could not execute " +msgstr "Không thể thá»±c hiện " + +#: methods/gpgv.cc:202 +msgid " to verify signature (is gnupg installed?)" +msgstr " để kiểm chứng chữ ký (gnupg có được cài đặt chÆ°a?)" + +#: methods/gpgv.cc:206 +msgid "Unknown error executing gpgv" +msgstr "Gặp lá»—i lạ khi thá»±c hiện gpgv" + +#: methods/gpgv.cc:237 +msgid "The following signatures were invalid:\n" +msgstr "Những chữ ký theo đây là không hợp lệ:\n" + +#: methods/gpgv.cc:244 +msgid "" +"The following signatures couldn't be verified because the public key is not " +"available:\n" +msgstr "" +"Không thể kiểm chứng những chữ ký theo đây, vì khoá công không sẵn sàng:\n" + +#: methods/gzip.cc:57 +#, c-format +msgid "Couldn't open pipe for %s" +msgstr "Không thể mở ống dẫn cho %s" + +#: methods/gzip.cc:102 +#, c-format +msgid "Read error from %s process" +msgstr "Gặp lá»—i Ä‘á»c từ tiến trình %s" + +#: methods/http.cc:376 methods/http.cc:344 +msgid "Waiting for headers" +msgstr "Äang đợi những phần đầu" + +#: methods/http.cc:522 methods/http.cc:490 +#, c-format +msgid "Got a single header line over %u chars" +msgstr "Äã lấy má»™t dòng đầu riêng lẻ chứa hÆ¡n %u ky tá»±" + +#: methods/http.cc:530 methods/http.cc:498 +msgid "Bad header line" +msgstr "Dòng đầu sai" + +#: methods/http.cc:549 methods/http.cc:556 +msgid "The HTTP server sent an invalid reply header" +msgstr "Máy phục vụ HTTP đã gởi má»™t dòng đầu trả lá»i không hợp lệ" + +#: methods/http.cc:585 +msgid "The HTTP server sent an invalid Content-Length header" +msgstr "" +"Máy phục vụ HTTP đã gởi má»™t dòng đầu Content-Length (Ä‘á»™ dài ná»™i dụng) không " +"hợp lệ" + +#: methods/http.cc:600 +msgid "The HTTP server sent an invalid Content-Range header" +msgstr "" +"Máy phục vụ HTTP đã gởi má»™t dòng đầu Content-Range (phạm vị ná»™i dụng) không " +"hợp lệ" + +#: methods/http.cc:602 +msgid "This HTTP server has broken range support" +msgstr "Máy phục vụ HTTP đã ngắt cách há»— trợ phạm vị" + +#: methods/http.cc:626 methods/http.cc:594 +msgid "Unknown date format" +msgstr "Không biết dạng ngày đó" + +#: methods/http.cc:773 methods/http.cc:737 +msgid "Select failed" +msgstr "Việc chá»n bị lá»—i" + +#: methods/http.cc:778 src/common/util.c:306 methods/http.cc:742 +msgid "Connection timed out" +msgstr "Kết nối đã quá giá»" + +#: methods/http.cc:801 methods/http.cc:765 +msgid "Error writing to output file" +msgstr "Gặp lá»—i khi ghi vào tập tin xuất" + +#: methods/http.cc:832 methods/http.cc:793 +msgid "Error writing to file" +msgstr "Gặp lá»—i khi ghi vào tập tin" + +#: methods/http.cc:860 methods/http.cc:818 +msgid "Error writing to the file" +msgstr "Gặp lá»—i khi ghi vào tập tin đó" + +#: methods/http.cc:874 +msgid "Error reading from server. Remote end closed connection" +msgstr "Gặp lá»—i khi Ä‘á»c từ máy phục vụ : cuối ở xa đã đóng kết nối" + +#: methods/http.cc:876 methods/http.cc:834 +msgid "Error reading from server" +msgstr "Gặp lá»—i khi Ä‘á»c từ máy phục vụ" + +#: methods/http.cc:1107 +msgid "Bad header data" +msgstr "Dữ liệu dòng đầu sai" + +#: methods/http.cc:1124 ../libgames-support/games-network.c:357 +#: methods/http.cc:1082 +msgid "Connection failed" +msgstr "Kết nối bị ngắt" + +#: src/err-codes.h:91 ../libmuine/player-xine.c:398 ../glom/base_db.cc:78 +#: ../glom/connectionpool.cc:348 libexif/olympus/mnote-olympus-entry.c:314 +msgid "Internal error" +msgstr "Lá»—i ná»™i bá»™" + +#: apt-pkg/contrib/mmap.cc:82 +msgid "Can't mmap an empty file" +msgstr "Không thể mmap (ảnh xạ bá»™ nhá»›) tâp tin rá»—ng" + +#: apt-pkg/contrib/mmap.cc:87 +#, c-format +msgid "Couldn't make mmap of %lu bytes" +msgstr "Không thể tạo mmap (ảnh xạ bá»™ nhá»›) kích cỡ %lu byte" + +#: apt-pkg/contrib/strutl.cc:938 apt-pkg/contrib/strutl.cc:941 +#, c-format +msgid "Selection %s not found" +msgstr "Không tìm thấy vùng chá»n %s" + +#: apt-pkg/contrib/configuration.cc:436 apt-pkg/contrib/configuration.cc:395 +#, c-format +msgid "Unrecognized type abbreviation: '%c'" +msgstr "Không nhận biết viết tắt kiểu : « %c »" + +#: apt-pkg/contrib/configuration.cc:494 apt-pkg/contrib/configuration.cc:453 +#, c-format +msgid "Opening configuration file %s" +msgstr "Äang mở tập tin cấu hình %s..." + +#: apt-pkg/contrib/configuration.cc:512 apt-pkg/contrib/configuration.cc:471 +#, c-format +msgid "Line %d too long (max %d)" +msgstr "Dòng %d quá dài (tối Ä‘a %d)" + +#: apt-pkg/contrib/configuration.cc:608 apt-pkg/contrib/configuration.cc:567 +#, c-format +msgid "Syntax error %s:%u: Block starts with no name." +msgstr "Gặp lá»—i cú pháp %s:%u: khối bắt đầu không có tên." + +#: apt-pkg/contrib/configuration.cc:627 +#, c-format +msgid "Syntax error %s:%u: Malformed tag" +msgstr "Gặp lá»—i cú pháp %s:%u: thẻ dạng sai" + +#: apt-pkg/contrib/configuration.cc:644 apt-pkg/contrib/configuration.cc:603 +#, c-format +msgid "Syntax error %s:%u: Extra junk after value" +msgstr "Gặp lá»—i cú pháp %s:%u: có rác thêm sau giá trị" + +#: apt-pkg/contrib/configuration.cc:684 apt-pkg/contrib/configuration.cc:643 +#, c-format +msgid "Syntax error %s:%u: Directives can only be done at the top level" +msgstr "Gặp lá»—i cú pháp %s:%u: có thể thá»±c hiện chỉ thị chỉ tại mức đầu" + +#: apt-pkg/contrib/configuration.cc:691 apt-pkg/contrib/configuration.cc:650 +#, c-format +msgid "Syntax error %s:%u: Too many nested includes" +msgstr "Gặp lá»—i cú pháp %s:%u: quá nhiá»u Ä‘iá»u bao gồm lồng nhau" + +#: apt-pkg/contrib/configuration.cc:695 apt-pkg/contrib/configuration.cc:700 +#: apt-pkg/contrib/configuration.cc:654 apt-pkg/contrib/configuration.cc:659 +#, c-format +msgid "Syntax error %s:%u: Included from here" +msgstr "Gặp lá»—i cú pháp %s:%u: đã bao gồm từ đây" + +#: apt-pkg/contrib/configuration.cc:704 apt-pkg/contrib/configuration.cc:663 +#, c-format +msgid "Syntax error %s:%u: Unsupported directive '%s'" +msgstr "Gặp lá»—i cú pháp %s:%u: chÆ°a há»— trợ chỉ thị « %s »" + +#: apt-pkg/contrib/configuration.cc:738 apt-pkg/contrib/configuration.cc:697 +#, c-format +msgid "Syntax error %s:%u: Extra junk at end of file" +msgstr "Gặp lá»—i cú pháp %s:%u: rác thêm tại kết thúc tập tin" + +#: apt-pkg/contrib/progress.cc:154 +#, c-format +msgid "%c%s... Error!" +msgstr "%c%s... Lá»—i." + +#: apt-pkg/contrib/progress.cc:156 +#, c-format +msgid "%c%s... Done" +msgstr "%c%s... Xong" + +#: apt-pkg/contrib/cmndline.cc:80 +#, c-format +msgid "Command line option '%c' [from %s] is not known." +msgstr "Không biết tùy chá»n dòng lệnh « %c » [từ %s]." + +#: apt-pkg/contrib/cmndline.cc:106 apt-pkg/contrib/cmndline.cc:114 +#: apt-pkg/contrib/cmndline.cc:122 +#, c-format +msgid "Command line option %s is not understood" +msgstr "Không hiểu tùy chá»n dòng lệnh %s" + +#: apt-pkg/contrib/cmndline.cc:127 +#, c-format +msgid "Command line option %s is not boolean" +msgstr "Tùy chá»n dòng lệnh %s không phải bun (đúng/không đúng)" + +#: apt-pkg/contrib/cmndline.cc:166 apt-pkg/contrib/cmndline.cc:187 +#, c-format +msgid "Option %s requires an argument." +msgstr "Tùy chá»n %s cần đến má»™t đối số." + +#: apt-pkg/contrib/cmndline.cc:201 apt-pkg/contrib/cmndline.cc:207 +#, c-format +msgid "Option %s: Configuration item specification must have an =." +msgstr "Tùy chá»n %s: đặc tả mục cấu hình phải có má»™t « = »." + +#: apt-pkg/contrib/cmndline.cc:237 +#, c-format +msgid "Option %s requires an integer argument, not '%s'" +msgstr "Tùy chá»n %s cần đến má»™t đối số số nguyên, không phải « %s »" + +#: apt-pkg/contrib/cmndline.cc:268 +#, c-format +msgid "Option '%s' is too long" +msgstr "Tùy chá»n « %s » quá dài" + +#: apt-pkg/contrib/cmndline.cc:301 +#, c-format +msgid "Sense %s is not understood, try true or false." +msgstr "Không hiểu %s: hãy cố dùng true (đúng) hay false (không đúng)." + +#: apt-pkg/contrib/cmndline.cc:351 +#, c-format +msgid "Invalid operation %s" +msgstr "Thao tác không hợp lệ %s" + +#: apt-pkg/contrib/cdromutl.cc:55 +#, c-format +msgid "Unable to stat the mount point %s" +msgstr "Không thể lấy các thông tin cho Ä‘iểm gắn kết %s" + +#: apt-pkg/contrib/cdromutl.cc:149 apt-pkg/acquire.cc:427 apt-pkg/clean.cc:44 +#: apt-pkg/acquire.cc:422 +#, c-format +msgid "Unable to change to %s" +msgstr "Không thể chuyển đổi sang %s" + +#: apt-pkg/contrib/cdromutl.cc:190 +msgid "Failed to stat the cdrom" +msgstr "Việc lấy cac thông tin cho Ä‘Ä©a CD-ROM bị lá»—i" + +#: apt-pkg/contrib/fileutl.cc:82 apt-pkg/contrib/fileutl.cc:80 +#, c-format +msgid "Not using locking for read only lock file %s" +msgstr "Không dùng khả năng khoá cho tập tin khoá chỉ Ä‘á»c %s" + +#: apt-pkg/contrib/fileutl.cc:87 apt-pkg/contrib/fileutl.cc:85 +#, c-format +msgid "Could not open lock file %s" +msgstr "Không thể mở tập tin khoá %s" + +#: apt-pkg/contrib/fileutl.cc:105 apt-pkg/contrib/fileutl.cc:103 +#, c-format +msgid "Not using locking for nfs mounted lock file %s" +msgstr "Không dùng khả năng khoá cho tập tin khoá đã lắp kiểu NFS %s" + +#: apt-pkg/contrib/fileutl.cc:109 apt-pkg/contrib/fileutl.cc:107 +#, c-format +msgid "Could not get lock %s" +msgstr "Không thể lấy khoá %s" + +#: apt-pkg/contrib/fileutl.cc:377 +#, c-format +msgid "Waited for %s but it wasn't there" +msgstr "Äã đợi %s nhÆ°ng mà chÆ°a gặp nó" + +#: apt-pkg/contrib/fileutl.cc:387 apt-pkg/contrib/fileutl.cc:368 +#, c-format +msgid "Sub-process %s received a segmentation fault." +msgstr "Tiến trình con %s đã nhận má»™t lá»—i chia ra từng Ä‘oạn." + +#: apt-pkg/contrib/fileutl.cc:390 apt-pkg/contrib/fileutl.cc:371 +#, c-format +msgid "Sub-process %s returned an error code (%u)" +msgstr "Tiến trình con %s đã trả lá»i mã lá»—i (%u)" + +#: apt-pkg/contrib/fileutl.cc:392 apt-pkg/contrib/fileutl.cc:373 +#, c-format +msgid "Sub-process %s exited unexpectedly" +msgstr "Tiến trình con %s đã thoát bất ngá»" + +#: ../providers/xbase/gda-xbase-provider.c:246 +#, c-format +msgid "Could not open file %s" +msgstr "Không thể mở tập tin %s" + +#: apt-pkg/contrib/fileutl.cc:492 apt-pkg/contrib/fileutl.cc:473 +#, c-format +msgid "read, still have %lu to read but none left" +msgstr "Ä‘á»c, còn cần Ä‘á»c %lu nhÆ°ng mà không có Ä‘iá»u còn lại" + +#: apt-pkg/contrib/fileutl.cc:522 apt-pkg/contrib/fileutl.cc:503 +#, c-format +msgid "write, still have %lu to write but couldn't" +msgstr "ghi, còn cần ghi %lu nhÆ°ng mà không thể" + +#: apt-pkg/contrib/fileutl.cc:597 apt-pkg/contrib/fileutl.cc:578 +msgid "Problem closing the file" +msgstr "Gặp lá»—i khi đóng tập tin đó" + +#: apt-pkg/contrib/fileutl.cc:603 apt-pkg/contrib/fileutl.cc:584 +msgid "Problem unlinking the file" +msgstr "Gặp lá»—i khi bá» liên kết tập tin đó" + +#: apt-pkg/contrib/fileutl.cc:614 apt-pkg/contrib/fileutl.cc:595 +msgid "Problem syncing the file" +msgstr "Gặp lá»—i khi đồng bá»™ hóa tập tin đó" + +#: apt-pkg/pkgcache.cc:126 +msgid "Empty package cache" +msgstr "Bá»™ nhá»› tạm gói rá»—ng" + +#: apt-pkg/pkgcache.cc:132 +msgid "The package cache file is corrupted" +msgstr "Tập tin bá»™ nhá»› tạm gói bị há»ng" + +#: apt-pkg/pkgcache.cc:137 +msgid "The package cache file is an incompatible version" +msgstr "Tập tin bá»™ nhá»› tạm gói là má»™t phiên bản không tÆ°Æ¡ng thích" + +#: apt-pkg/pkgcache.cc:142 +#, c-format +msgid "This APT does not support the versioning system '%s'" +msgstr "Trình APT này không há»— trợ hệ thống Ä‘iá»u khiển phiên bản « %s »" + +#: apt-pkg/pkgcache.cc:147 +msgid "The package cache was built for a different architecture" +msgstr "Bá»™ nhá»› tạm gói được xây dụng cho kiến trức khác" + +#: apt-pkg/pkgcache.cc:218 src/cmdline/cmdline_show.cc:311 +#: src/cmdline/cmdline_show.cc:310 +msgid "Depends" +msgstr "Phụ thuá»™c" + +#: apt-pkg/pkgcache.cc:218 src/cmdline/cmdline_show.cc:313 +#: src/cmdline/cmdline_show.cc:312 +msgid "PreDepends" +msgstr "Phụ thuá»™c trÆ°á»›c" + +#: apt-pkg/pkgcache.cc:218 src/cmdline/cmdline_show.cc:317 +#: src/cmdline/cmdline_show.cc:316 +msgid "Suggests" +msgstr "Äệ nghị" + +#: apt-pkg/pkgcache.cc:219 src/cmdline/cmdline_show.cc:315 +#: src/cmdline/cmdline_show.cc:314 +msgid "Recommends" +msgstr "Khuyên" + +#: apt-pkg/pkgcache.cc:219 ../objects/KAOS/metabinrel.c:157 +#: src/cmdline/cmdline_show.cc:319 src/cmdline/cmdline_show.cc:318 +msgid "Conflicts" +msgstr "Xung Ä‘á»™t" + +#: apt-pkg/pkgcache.cc:219 src/cmdline/cmdline_show.cc:321 +#: src/cmdline/cmdline_show.cc:320 +msgid "Replaces" +msgstr "Thay thế" + +#: apt-pkg/pkgcache.cc:220 src/cmdline/cmdline_show.cc:323 +#: src/cmdline/cmdline_show.cc:322 +msgid "Obsoletes" +msgstr "Làm cÅ©" + +#: apt-pkg/pkgcache.cc:231 +msgid "important" +msgstr "quan trá»ng" + +#: apt-pkg/pkgcache.cc:231 +msgid "required" +msgstr "cần" + +#: ../partman-basicfilesystems.templates:147 +msgid "standard" +msgstr "chuẩn" + +#: apt-pkg/pkgcache.cc:232 +msgid "optional" +msgstr "tùy chá»n" + +#: apt-pkg/pkgcache.cc:232 +msgid "extra" +msgstr "thêm" + +#: apt-pkg/depcache.cc:60 apt-pkg/depcache.cc:89 +msgid "Building dependency tree" +msgstr "Äang xây dụng cây cách phụ thuá»™c..." + +#: apt-pkg/depcache.cc:61 +msgid "Candidate versions" +msgstr "Phiên bản ứng cá»­" + +#: apt-pkg/depcache.cc:90 +msgid "Dependency generation" +msgstr "Tạo ra cách phụ thuá»™c" + +#: apt-pkg/tagfile.cc:73 apt-pkg/tagfile.cc:71 +#, c-format +msgid "Unable to parse package file %s (1)" +msgstr "Không thể phân tách tập tin gói %s (1)" + +#: apt-pkg/tagfile.cc:160 apt-pkg/tagfile.cc:158 +#, c-format +msgid "Unable to parse package file %s (2)" +msgstr "Không thể phân tách tập tin gói %s (2)" + +#: apt-pkg/sourcelist.cc:94 apt-pkg/sourcelist.cc:88 +#, c-format +msgid "Malformed line %lu in source list %s (URI)" +msgstr "Gặp dòng dạng sai %lu trong danh sách nguồn %s (địa chỉ Mạng)" + +#: apt-pkg/sourcelist.cc:96 apt-pkg/sourcelist.cc:90 +#, c-format +msgid "Malformed line %lu in source list %s (dist)" +msgstr "Gặp dòng dạng sai %lu trong danh sách nguồn %s (bản phân phối)" + +#: apt-pkg/sourcelist.cc:99 apt-pkg/sourcelist.cc:93 +#, c-format +msgid "Malformed line %lu in source list %s (URI parse)" +msgstr "" +"Gặp dòng dạng sai %lu trong danh sách nguồn %s (phân tách địa chỉ Mạng)." + +#: apt-pkg/sourcelist.cc:105 +#, c-format +msgid "Malformed line %lu in source list %s (absolute dist)" +msgstr "" +"Gặp dòng dạng sai %lu trong danh sách nguồn %s (bản phân phối tuyệt đối)" + +#: apt-pkg/sourcelist.cc:112 apt-pkg/sourcelist.cc:106 +#, c-format +msgid "Malformed line %lu in source list %s (dist parse)" +msgstr "" +"Gặp dòng dạng sai %lu trong danh sách nguồn %s (phân tách bản phân phối)" + +#: apt-pkg/sourcelist.cc:235 +#, c-format +msgid "Opening %s" +msgstr "Äang mở %s..." + +#: apt-pkg/sourcelist.cc:220 apt-pkg/cdrom.cc:426 apt-pkg/sourcelist.cc:249 +#, c-format +msgid "Line %u too long in source list %s." +msgstr "Dòng %u quá dài trong danh sách nguồn %s." + +#: apt-pkg/sourcelist.cc:240 apt-pkg/sourcelist.cc:266 +#, c-format +msgid "Malformed line %u in source list %s (type)" +msgstr "Gặp dòng dạng sai %u trong danh sách nguồn %s (kiểu)." + +#: apt-pkg/sourcelist.cc:244 apt-pkg/sourcelist.cc:270 +#, c-format +msgid "Type '%s' is not known on line %u in source list %s" +msgstr "Không biết kiểu « %s » trên dòng %u trong danh sách nguồn %s" + +#: apt-pkg/sourcelist.cc:252 apt-pkg/sourcelist.cc:255 +#: apt-pkg/sourcelist.cc:279 apt-pkg/sourcelist.cc:282 +#, c-format +msgid "Malformed line %u in source list %s (vendor id)" +msgstr "Gặp dòng dạng sai %u trong danh sách nguồn %s (mã nhận biết nhà bán)" + +#: apt-pkg/packagemanager.cc:402 +#, c-format +msgid "" +"This installation run will require temporarily removing the essential " +"package %s due to a Conflicts/Pre-Depends loop. This is often bad, but if " +"you really want to do it, activate the APT::Force-LoopBreak option." +msgstr "" +"Việc chạy tiến trình cài đặt này sẽ cần thiết gỡ bá» tạm gói chủ yếu %s, do " +"vong lăp Xung Ä‘á»™t/Phụ thuá»™c trÆ°á»›c. TrÆ°á»ng hợp này thÆ°á»ng xấu, nhÆ°ng mà nếu " +"bạn thật sá»± muốn tiếp tục, có thể hoạt hóa tuy chá»n « APT::Force-LoopBreak " +"» (buá»™c ngắt vòng lặp)." + +#: apt-pkg/pkgrecords.cc:37 +#, c-format +msgid "Index file type '%s' is not supported" +msgstr "Không há»— trợ kiểu tập tin chỉ mục « %s »" + +#: apt-pkg/algorithms.cc:241 apt-pkg/algorithms.cc:238 +#, c-format +msgid "" +"The package %s needs to be reinstalled, but I can't find an archive for it." +msgstr "Cần phải cài đặt lại gói %s, nhÆ°ng mà không thể tìm kho cho nó." + +#: apt-pkg/algorithms.cc:1059 apt-pkg/algorithms.cc:1056 +msgid "" +"Error, pkgProblemResolver::Resolve generated breaks, this may be caused by " +"held packages." +msgstr "" +"Lá»—i: « pkgProblemResolver::Resolve » (bá»™ tháo gỡ vấn Ä‘á» gá»i::tháo gỡ) đã tạo " +"ra nhiá»u chá»— ngắt, có lẽ má»™t số gói đã giữ lại đã gây ra trÆ°á»ng hợp này." + +#: apt-pkg/algorithms.cc:1061 apt-pkg/algorithms.cc:1058 +msgid "Unable to correct problems, you have held broken packages." +msgstr "Không thể sá»­a vấn Ä‘á», bạn đã giữ lại má»™t số gói bị ngắt." + +#: apt-pkg/acquire.cc:62 apt-pkg/acquire.cc:61 +#, c-format +msgid "Lists directory %spartial is missing." +msgstr "Thiếu thÆ° mục danh sách « %spartial »." + +#: apt-pkg/acquire.cc:66 apt-pkg/acquire.cc:65 +#, c-format +msgid "Archive directory %spartial is missing." +msgstr "Thiếu thÆ° mục kho « %spartial »." + +#: apt-pkg/acquire.cc:821 +#, c-format +msgid "Downloading file %li of %li (%s remaining)" +msgstr "Äang tải vá» tập tin %li trên %li (%s còn lại)" + +#: apt-pkg/acquire-worker.cc:113 apt-pkg/acquire-worker.cc:112 +#, c-format +msgid "The method driver %s could not be found." +msgstr "Không tìm thấy trình Ä‘iá»u khiển phÆ°Æ¡ng pháp %s." + +#: apt-pkg/acquire-worker.cc:162 apt-pkg/acquire-worker.cc:161 +#, c-format +msgid "Method %s did not start correctly" +msgstr "PhÆ°Æ¡ng pháp %s đã không bắt đầu cho đúng." + +#: apt-pkg/acquire-worker.cc:377 +#, c-format +msgid "Please insert the disc labeled: '%s' in the drive '%s' and press enter." +msgstr "Hãy nạp Ä‘Ä©a có nhãn « %s » vào ổ « %s » và bấm nút Enter." + +#: apt-pkg/init.cc:120 apt-pkg/init.cc:119 +#, c-format +msgid "Packaging system '%s' is not supported" +msgstr "Không há»— trợ hệ thống đóng gói « %s »" + +#: apt-pkg/init.cc:136 apt-pkg/init.cc:135 +msgid "Unable to determine a suitable packaging system type" +msgstr "Không thể quyết định kiểu hệ thống đóng gói thích hợp" + +#: apt-pkg/clean.cc:61 +#, c-format +msgid "Unable to stat %s." +msgstr "Không thể lấy các thông tin vá» %s." + +#: apt-pkg/srcrecords.cc:48 apt-pkg/srcrecords.cc:49 +msgid "You must put some 'source' URIs in your sources.list" +msgstr "" +"Bạn phải để má»™t số địa chỉ Mạng « nguồn » vào « sources.list » (danh sách " +"nguồn)" + +#: apt-pkg/cachefile.cc:73 src/generic/aptcache.cc:1580 +#: src/generic/aptcache.cc:1579 +msgid "The package lists or status file could not be parsed or opened." +msgstr "Không thể phân tách hay mở danh sách gói hay tâp tin trạng thái." + +#: apt-pkg/cachefile.cc:77 +msgid "You may want to run apt-get update to correct these problems" +msgstr "" +"Có lẽ bạn muốn chạy « apt-get update » (lấy cập nhật) để sá»­a các vấn Ä‘á» này" + +#: apt-pkg/policy.cc:269 +msgid "Invalid record in the preferences file, no Package header" +msgstr "" +"Gặp mục ghi không hợp lệ trong tập tin tùy thích: không có phần đầu Package " +"(Gói)." + +#: apt-pkg/policy.cc:291 +#, c-format +msgid "Did not understand pin type %s" +msgstr "Không hiểu kiểu ghim %s" + +#: apt-pkg/policy.cc:299 +msgid "No priority (or zero) specified for pin" +msgstr "ChÆ°a ghi rõ Æ°u tiên (hay số không) cho ghim" + +#: apt-pkg/pkgcachegen.cc:74 +msgid "Cache has an incompatible versioning system" +msgstr "Bá»™ nhá»› tạm có hệ thống Ä‘iêu khiển phiên bản không tÆ°Æ¡ng thích" + +#: apt-pkg/pkgcachegen.cc:117 +#, c-format +msgid "Error occurred while processing %s (NewPackage)" +msgstr "Gặp lá»—i khi xá»­ lý %s (NewPackage - gói má»›i)" + +#: apt-pkg/pkgcachegen.cc:129 +#, c-format +msgid "Error occurred while processing %s (UsePackage1)" +msgstr "Gặp lá»—i khi xá»­ lý %s (UsePackage1 - dùng gói 1)" + +#: apt-pkg/pkgcachegen.cc:150 +#, c-format +msgid "Error occurred while processing %s (UsePackage2)" +msgstr "Gặp lá»—i khi xá»­ lý %s (UsePackage2 - dùng gói 2)" + +#: apt-pkg/pkgcachegen.cc:154 +#, c-format +msgid "Error occurred while processing %s (NewFileVer1)" +msgstr "Gặp lá»—i khi xá»­ lý %s (NewFileVer1 - tập tin má»›i, phiên bản 1)" + +#: apt-pkg/pkgcachegen.cc:184 +#, c-format +msgid "Error occurred while processing %s (NewVersion1)" +msgstr "Gặp lá»—i khi xá»­ lý %s (NewVersion1 - phiên bản má»›i 1)" + +#: apt-pkg/pkgcachegen.cc:188 +#, c-format +msgid "Error occurred while processing %s (UsePackage3)" +msgstr "Gặp lá»—i khi xá»­ lý %s (UsePackage3 - dùng gói 3)" + +#: apt-pkg/pkgcachegen.cc:192 +#, c-format +msgid "Error occurred while processing %s (NewVersion2)" +msgstr "Gặp lá»—i khi xá»­ lý %s (NewVersion2 - phiên ban má»›i 2)" + +#: apt-pkg/pkgcachegen.cc:207 +msgid "Wow, you exceeded the number of package names this APT is capable of." +msgstr "á»’, bạn đã vượt quá số tên gói mà trình APT này có thể quản lý." + +#: apt-pkg/pkgcachegen.cc:210 +msgid "Wow, you exceeded the number of versions this APT is capable of." +msgstr "á»’, bạn đã vượt quá số phiên bản mà trình APT này có thể quản lý." + +#: apt-pkg/pkgcachegen.cc:213 +msgid "Wow, you exceeded the number of dependencies this APT is capable of." +msgstr "á»’, bạn đã vượt quá số cách phụ thuá»™c mà trình APT này có thể quản lý." + +#: apt-pkg/pkgcachegen.cc:241 +#, c-format +msgid "Error occurred while processing %s (FindPkg)" +msgstr "Gặp lá»—i khi xá»­ lý %s (FindPkg - tìm gói)" + +#: apt-pkg/pkgcachegen.cc:254 +#, c-format +msgid "Error occurred while processing %s (CollectFileProvides)" +msgstr "" +"Gặp lá»—i khi xá»­ lý %s (CollectFileProvides - tập hợp các trÆ°á»ng hợp miá»…n là " +"má»™t tập tin)" + +#: apt-pkg/pkgcachegen.cc:260 +#, c-format +msgid "Package %s %s was not found while processing file dependencies" +msgstr "Không tìm thấy gói %s %s khi xá»­ lý cách phụ thuá»™c của/vào tập tin" + +#: apt-pkg/pkgcachegen.cc:574 +#, c-format +msgid "Couldn't stat source package list %s" +msgstr "Không thể lấy các thông tin vá» danh sách gói nguồn %s" + +#: apt-pkg/pkgcachegen.cc:658 +msgid "Collecting File Provides" +msgstr "Äang tập hợp các trÆ°á»ng hợp « tập tin miá»…n là »" + +#: apt-pkg/pkgcachegen.cc:785 apt-pkg/pkgcachegen.cc:792 +#: apt-pkg/pkgcachegen.cc:774 apt-pkg/pkgcachegen.cc:781 +msgid "IO Error saving source cache" +msgstr "Lá»—i nhập/xuất khi lÆ°u bá»™ nhá»› tạm nguồn" + +#: apt-pkg/acquire-item.cc:126 apt-pkg/acquire-item.cc:124 +#, c-format +msgid "rename failed, %s (%s -> %s)." +msgstr "việc thay đổi tên bị lá»—i, %s (%s → %s)." + +#: apt-pkg/acquire-item.cc:236 apt-pkg/acquire-item.cc:950 +#: apt-pkg/acquire-item.cc:511 +msgid "MD5Sum mismatch" +msgstr "MD5Sum (tổng kiểm) không khá»›p được" + +#: apt-pkg/acquire-item.cc:645 +msgid "There are no public key available for the following key IDs:\n" +msgstr "Không có khoá công sẵn sàng cho những ID khoá theo đây:\n" + +#: apt-pkg/acquire-item.cc:758 src/generic/pkg_acqfile.cc:86 +#: apt-pkg/acquire-item.cc:353 +#, c-format +msgid "" +"I wasn't able to locate a file for the %s package. This might mean you need " +"to manually fix this package. (due to missing arch)" +msgstr "" +"Không tìm thấy tập tin liên quan đến gói %s. Có lẽ bạn cần phải tá»± sá»­a gói " +"này, do thiếu kiến trúc." + +#: apt-pkg/acquire-item.cc:817 apt-pkg/acquire-item.cc:388 +#, c-format +msgid "" +"I wasn't able to locate file for the %s package. This might mean you need to " +"manually fix this package." +msgstr "" +"Không tìm thấy tập tin liên quan đến gói %s. Có lẽ bạn cần phải tá»± sá»­a gói " +"này." + +#: apt-pkg/acquire-item.cc:853 src/generic/pkg_acqfile.cc:134 +#: apt-pkg/acquire-item.cc:419 +#, c-format +msgid "" +"The package index files are corrupted. No Filename: field for package %s." +msgstr "" +"Các tập tin chỉ mục của gói này bị há»ng. Không có trÆ°á»ng Filename: (Tên tập " +"tin:) cho gói %s." + +#: apt-pkg/acquire-item.cc:940 apt-pkg/acquire-item.cc:501 +msgid "Size mismatch" +msgstr "Kích cỡ không khá»›p được" + +#: apt-pkg/vendorlist.cc:66 +#, c-format +msgid "Vendor block %s contains no fingerprint" +msgstr "Khối nhà bán %s không chứa vân tay" + +#: apt-pkg/cdrom.cc:507 +#, c-format +msgid "" +"Using CD-ROM mount point %s\n" +"Mounting CD-ROM\n" +msgstr "" +"Äang dùng Ä‘iểm lắp Ä‘Ä©a CD-ROM %s\n" +"Äang lắp Ä‘Ä©a CD-ROM...\n" + +#: apt-pkg/cdrom.cc:516 apt-pkg/cdrom.cc:598 +msgid "Identifying.. " +msgstr "Äang nhận diện... " + +#: apt-pkg/cdrom.cc:541 +#, c-format +msgid "Stored label: %s \n" +msgstr "Nhãn đã lÆ°u : %s\n" + +#: apt-pkg/cdrom.cc:561 +#, c-format +msgid "Using CD-ROM mount point %s\n" +msgstr "Äang dùng Ä‘iểm lắp Ä‘Ä©a CD-ROM %s\n" + +#: apt-pkg/cdrom.cc:579 +msgid "Unmounting CD-ROM\n" +msgstr "Äang tháo lắp Ä‘Ä©a CD-ROM...\n" + +#: apt-pkg/cdrom.cc:583 +msgid "Waiting for disc...\n" +msgstr "Äang đợi Ä‘Ä©a...\n" + +#. Mount the new CDROM +#: apt-pkg/cdrom.cc:591 +msgid "Mounting CD-ROM...\n" +msgstr "Äang lắp Ä‘Ä©a CD-ROM...\n" + +#: apt-pkg/cdrom.cc:609 +msgid "Scanning disc for index files..\n" +msgstr "Äang quét Ä‘Ä©a tìm tập tin chỉ mục...\n" + +#: apt-pkg/cdrom.cc:647 +#, c-format +msgid "Found %i package indexes, %i source indexes and %i signatures\n" +msgstr "Má»›i tìm %i chỉ mục gói, %i chỉ mục nguồn và %i chữ ký\n" + +#: apt-pkg/cdrom.cc:710 +msgid "That is not a valid name, try again.\n" +msgstr "Nó không phải là má»™t tên hợp lệ: hãy thá»­ lại.\n" + +#: apt-pkg/cdrom.cc:726 +#, c-format +msgid "" +"This disc is called: \n" +"'%s'\n" +msgstr "" +"Tên Ä‘Ä©a này:\n" +"%s\n" + +#: apt-pkg/cdrom.cc:730 +msgid "Copying package lists..." +msgstr "Äang sao chép các danh sách gói..." + +#: apt-pkg/cdrom.cc:754 +msgid "Writing new source list\n" +msgstr "Äang ghi danh sách nguồn má»›i...\n" + +#: apt-pkg/cdrom.cc:763 +msgid "Source list entries for this disc are:\n" +msgstr "Các mục nhập danh sách nguồn cho Ä‘Ä©a này:\n" + +#: apt-pkg/cdrom.cc:803 +msgid "Unmounting CD-ROM..." +msgstr "Äang tháo lắp Ä‘Ä©a CD-ROM..." + +#: apt-pkg/indexcopy.cc:261 +#, c-format +msgid "Wrote %i records.\n" +msgstr "Má»›i ghi %i mục ghi.\n" + +#: apt-pkg/indexcopy.cc:263 +#, c-format +msgid "Wrote %i records with %i missing files.\n" +msgstr "Má»›i ghi %i mục ghi vá»›i %i tập tin còn thiếu.\n" + +#: apt-pkg/indexcopy.cc:266 +#, c-format +msgid "Wrote %i records with %i mismatched files\n" +msgstr "Má»›i ghi %i mục ghi vá»›i %i tập tin không khá»›p vá»›i nhau\n" + +#: apt-pkg/indexcopy.cc:269 +#, c-format +msgid "Wrote %i records with %i missing files and %i mismatched files\n" +msgstr "" +"Má»›i ghi %i mục ghi vá»›i %i tập tin còn thiếu và %i tập tin không khá»›p vá»›i " +"nhau\n" + +#: apt-pkg/deb/dpkgpm.cc:359 +#, c-format +msgid "Unpacking %s" +msgstr "Äang mở gói %s..." + +#: apt-pkg/deb/dpkgpm.cc:364 +#, c-format +msgid "Preparing to configure %s" +msgstr "Äang chuẩn bị cấu hình %s..." + +#: apt-pkg/deb/dpkgpm.cc:365 ../Debconf/FrontEnd.pm:203 ../hwconf.c:833 +#, fuzzy, c-format, perl-format +msgid "Configuring %s" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Äang cấu hình %s...\n" +"#-#-#-#-# kudzu_1.1.67-1_vi.po (kudzu VERSION) #-#-#-#-#\n" +"Äang cấu hình %s" + +#: apt-pkg/deb/dpkgpm.cc:366 +#, c-format +msgid "Installed %s" +msgstr "Äã cài đặt %s" + +#: apt-pkg/deb/dpkgpm.cc:371 +#, c-format +msgid "Preparing for removal of %s" +msgstr "Äang chuẩn bị gỡ bá» %s..." + +#: apt-pkg/deb/dpkgpm.cc:372 +#, c-format +msgid "Removing %s" +msgstr "Äang gỡ bá» %s..." + +#: apt-pkg/deb/dpkgpm.cc:373 +#, c-format +msgid "Removed %s" +msgstr "Äã gỡ bá» %s" + +#: apt-pkg/deb/dpkgpm.cc:378 +#, c-format +msgid "Preparing for remove with config %s" +msgstr "Äang chuẩn bị gỡ bá» vá»›i cấu hình %s..." + +#: apt-pkg/deb/dpkgpm.cc:379 +#, c-format +msgid "Removed with config %s" +msgstr "Má»›i gỡ bá» vá»›i cấu hình %s" + +#: methods/rsh.cc:330 +msgid "Connection closed prematurely" +msgstr "Kết nối bị đóng quá sá»›m." + +#: src/c.l:137 +msgid "unterminated string?" +msgstr "chuá»—i không được chấm dứt không?" + +#: src/c.l:296 +#, c-format +msgid "Command line: %s\n" +msgstr "Dòng lệnh: %s\n" + +#: src/c.l:299 +#, c-format +msgid "cannot execute `%s'" +msgstr "không thể thá»±c hiện « %s »" + +#: src/c.l:342 src/rc.c:60 +#, c-format +msgid "cannot open `%s'" +msgstr "không thể mở « %s »" + +#: src/c.l:422 +#, c-format +msgid "New location: %s:%d\n" +msgstr "Äịa Ä‘iểm má»›i: %s:%d\n" + +#. TRANSLATORS: Please, preserve the vertical tabulation (^K character) +#. in this message +#: src/main.c:29 +msgid "" +"generate a program flowgraph * The effect of each option marked with an " +"asterisk is reversed if the option's long name is prefixed with `no-'. For " +"example, --no-cpp cancels --cpp." +msgstr "" +"tạo ra má»™t lÆ°á»c đồ chÆ°Æ¡ng trình * Hiệu ứng của má»—i tùy chá»n có dấu sao có " +"được đảo ngược nếu tên dài của tùy chá»n có tiá»n tố « no- ». Lấy thí dụ, tùy " +"chá»n « --no-cpp cancels » hủy « --cpp »." + +#: src/main.c:56 +msgid "General options:" +msgstr "Tùy chá»n chung:" + +#: src/main.c:57 src/main.c:98 +#: ../addressbook/tools/evolution-addressbook-export.c:63 src/main.c:107 +#: ../gnomine/gnomine.c:862 ../gtali/setup.c:85 ../gtali/setup.c:86 +#: ../same-gnome/same-gnome.c:128 ../gsmclient/gsmclient-test.c:153 +msgid "NUMBER" +msgstr "Sá»" + +#: src/main.c:58 +msgid "Set the depth at which the flowgraph is cut off" +msgstr "Lập Ä‘á»™ sâu mà lược đồ bị cắt ra" + +#: src/main.c:59 +msgid "CLASSES" +msgstr "HẠNG" + +#: src/main.c:60 +msgid "" +"Include specified classes of symbols (see below). Prepend CLASSES with ^ or " +"- to exclude them from the output" +msgstr "" +"Gồm má»™t số hạn ký hiệu đã ghi rõ (xem dÆ°á»›i). Them dấu mÅ© « ^ » hay dấu trừ « " +"- » trÆ°á»›c các hạng bạn muốn trừ ra dữ liệu xuất." + +#: ../bonobo/bonobo-ui-init-gtk.c:138 ../gdk/gdk.c:119 lib/argp-parse.c:84 +msgid "NAME" +msgstr "TÊN" + +#: src/main.c:62 +msgid "" +"Use given output format NAME. Valid names are `gnu' (default) and `posix'" +msgstr "" +"Dùng TÊN khuôn dạng xuất đã cho. Tên hợp lệ là « gnu » (mặc định) và « posix " +"»" + +#: src/main.c:65 +msgid "* Print reverse call tree" +msgstr "* In ra cây gá»i đảo ngược" + +#: src/main.c:67 +msgid "Produce cross-reference listing only" +msgstr "Cung cấp chỉ danh sách tham chiếu chéo thôi" + +#: src/main.c:68 +msgid "OPT" +msgstr "TCH" + +#: src/main.c:69 +msgid "" +"Set printing option to OPT. Valid OPT values are: xref (or cross-ref), tree. " +"Any unambiguous abbreviation of the above is also accepted" +msgstr "" +"Lập tùy chá»n in thành TCH. Giá trị TCH hợp lệ là « xref » (tham chiếu chéo) " +"và « tree » (cây). CÅ©ng chấp nhận được bất cứ từ viết tắt rõ ràng nào của " +"chúng." + +#: ../gncal/gnomecal-main.c:94 +msgid "FILE" +msgstr "TẬP TIN" + +#: src/main.c:72 +msgid "Set output file name (default -, meaning stdout)" +msgstr "" +"Lập tên tập tin xuất (mặc định là « - » mà có nghÄ©a là thiết bị xuất chuẩn)" + +#: src/main.c:75 +msgid "Symbols classes for --include argument" +msgstr "Hạng ký hiệu cho đối số « --include » (gồm)" + +#: src/main.c:77 +msgid "all data symbols, both external and static" +msgstr "má»i ký hiệu dữ liệu, cả kiểu bên ngoài lẫn kiểu tÄ©nh Ä‘á»u" + +#: src/main.c:79 +msgid "symbols whose names begin with an underscore" +msgstr "ký hiệu có tên bắt đầu vá»›i dấu gạch dÆ°á»›i « _ »" + +#: src/main.c:81 +msgid "static symbols" +msgstr "ký hiệu tÄ©nh" + +#: src/main.c:83 +msgid "typedefs (for cross-references only)" +msgstr "typedef (lá»i định nghÄ©a kiểu : chỉ cho tham chiếu chéo)" + +#: src/main.c:89 +msgid "Parser control:" +msgstr "Äiá»u khiển bá»™ phân tách:" + +#: src/main.c:91 +msgid "* Rely on indentation" +msgstr "* Sở cậy ở thụt lá»" + +#: src/main.c:95 +msgid "* Accept only sources in ANSI C" +msgstr "* Chấp nhận chỉ mã nguồn bằng ANSI C" + +#: src/main.c:99 +msgid "Set initial token stack size to NUMBER" +msgstr "Lập kích cỡ ngăn nhá»› ban đầu là Sá»" + +#: src/main.c:100 +msgid "SYMBOL:TYPE" +msgstr "Kà HIỆU: KIỂU" + +#: src/main.c:101 +msgid "" +"Register SYMBOL with given TYPE. Valid types are: keyword (or kw), modifier, " +"identifier, type, wrapper. Any unambiguous abbreviation of the above is also " +"accepted" +msgstr "" +"Äăng ký Kà HIỆU vá»›i KIỂU đã cho. Kiểu hợp lệ là:\n" +" • keyword (hay kw)\ttừ khoá\n" +" • modifier\t\t\tbá»™ sá»­a đổi\n" +" • identifier\t\t\tbá»™ nhận diện\n" +" • type\t\t\t\tkiểu\n" +" • wrapper\t\t\tbá»™ bao bá»c\n" +"CÅ©ng chấp nhận bất cứ từ viết tất rõ ràng nào của Ä‘iá»u ở trên." + +#: src/main.c:103 +msgid "Assume main function to be called NAME" +msgstr "Giả sá»­ hàm chính sẽ có tên TÊN." + +#: src/main.c:104 +msgid "NAME[=DEFN]" +msgstr "TÊN[=LỜI_ÄN]" + +#: src/main.c:105 +msgid "Predefine NAME as a macro" +msgstr "Äịnh nghÄ©a sẵn TÊN là bá»™ lệnh (macrô)" + +#: src/main.c:107 +msgid "Cancel any previous definition of NAME" +msgstr "Hủy bất cứ lá»i định nghÄ©a trÆ°á»›c nào của TÊN" + +#: src/main.c:108 ../utils/nautilus-actions-convert.c:44 +msgid "DIR" +msgstr "TMỤC" + +#: src/main.c:109 +msgid "" +"Add the directory DIR to the list of directories to be searched for header " +"files." +msgstr "" +"Thêm thÆ° mục TMỤC vào danh sách các thÆ° mục nÆ¡i cần tìm kiếm tập tin phần " +"đầu." + +#: src/main.c:110 src/main.c:117 ../src/main.c:88 ../tools/gnomesu.c:41 +#: ../gnome-netinfo/main.c:82 +msgid "COMMAND" +msgstr "LỆNH" + +#: src/main.c:111 +msgid "* Run the specified preprocessor command" +msgstr "* Chạy lệnh bá»™ tiá»n xá»­ lý đã ghi rõ" + +#: src/main.c:119 +msgid "Output control:" +msgstr "Äiá»u khiển xuất:" + +#: src/main.c:121 +msgid "* Print line numbers" +msgstr "* In ra số thứ tá»± dòng" + +#: src/main.c:125 +msgid "* Print nesting level along with the call tree" +msgstr "* In ra cấp lồng nhau cùng vá»›i cây gá»i" + +#: src/main.c:129 +msgid "Control graph appearance" +msgstr "Äiá»u khiển hình thức của đồ thị" + +#: src/main.c:131 +msgid "* Draw ASCII art tree" +msgstr "* Vẽ cây nghệ ASCII" + +#: src/main.c:135 +msgid "* Brief output" +msgstr "* Xuất ngắn" + +#: src/main.c:139 +msgid "* Additionally format output for use with GNU Emacs" +msgstr "* CÅ©ng định dạng dữ liệu xuất để sá»­ dụng vá»›i Emacs của GNU" + +#: src/main.c:143 +msgid "* Do not print argument lists in function declarations" +msgstr "* Äừng in ra danh sách đối số trong lá»i tuyên bố hàm" + +#: src/main.c:147 +msgid "* Do not print symbol names in declaration strings" +msgstr "* Äừng in ra tên ký hiệu trong chuá»—i tuyên bố" + +#: src/main.c:153 +msgid "Informational options:" +msgstr "Tùy chá»n thông tin:" + +#: src/main.c:155 +msgid "* Verbose error diagnostics" +msgstr "* Chẩn Ä‘oán lá»—i má»™t cách chi tiết" + +#: src/main.c:159 src/main.c:200 +msgid "Print license and exit" +msgstr "In ra Quyá»n phép rồi thoát." + +#: src/main.c:161 +msgid "Set debugging level" +msgstr "Lập cấp gỡ lá»—i" + +#: src/main.c:167 +msgid "" +" GNU cflow is free software; you can redistribute it and/or modify\n" +" it under the terms of the GNU General Public License as published by\n" +" the Free Software Foundation; either version 2 of the License, or\n" +" (at your option) any later version.\n" +"\n" +" GNU cflow is distributed in the hope that it will be useful,\n" +" but WITHOUT ANY WARRANTY; without even the implied warranty of\n" +" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n" +" GNU General Public License for more details.\n" +"\n" +" You should have received a copy of the GNU General Public License\n" +" along with GNU cflow; if not, write to the Free Software Foundation,\n" +" Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n" +"\n" +"\n" +msgstr "" +" Trình cflow của GNU là phần má»m tá»± do nên có thể phân phối nó lại\n" +" và sá»­a đổi nó theo Ä‘iá»u kiện của Quyá»n Công Chung Gnu (GPL)\n" +" nhÆ° xuất do Tổ chức Phần má»m Tá»± do (Free Software Foundation),\n" +" hoặc phiên bản 2 của quyá»n ấy, hoặc (tùy chá»n) bất cứ phiên bản sau nào.\n" +"\n" +" Chúng tôi phân phối trình cflow của GNU vì mong nó có ích, nhÆ°ng\n" +" không có bảo đảm gi cả, không có bảo đảm ngụ ý khả năng bán\n" +" hay khả năng làm việc dứt khoát.\n" +" Hãy xem Quyá»n Công Chung Gnu (GPL) để tim chi tiết.\n" +"\n" +" Nếu bạn chÆ°a nhận má»™t bản sao Quyá»n Công Chung Gnu (GPL)\n" +" thì hãy viết cho Tổ chức Phần má»m Tá»± do:\n" +" Free Software Foundation, Inc.,\n" +" 51 Franklin Street, Fifth Floor,\n" +" Boston, MA 02110-1301 USA (Mỹ)\n" +"\n" + +#: src/main.c:281 +#, c-format +msgid "unknown symbol type: %s" +msgstr "không biết kiểu ký hiệu : %s" + +#: src/main.c:310 +#, c-format +msgid "unknown print option: %s" +msgstr "không biết tùy chá»n in: %s" + +#: src/main.c:433 src/main.c:442 +msgid "level indent string is too long" +msgstr "chuá»—i thụt lá» cấp quá dài" + +#: src/main.c:470 +msgid "level-indent syntax" +msgstr "cú pháp thụt lá» cấp" + +#: src/main.c:494 +#, c-format +msgid "unknown level indent option: %s" +msgstr "không biết tùy chá»n thụt lá» cấp: %s" + +#: src/main.c:529 +#, c-format +msgid "" +"License for %s:\n" +"\n" +msgstr "Quyá»n Phép cho %s:\\n\n" + +#: src/main.c:575 src/main.c:760 +#, c-format +msgid "%s: No such output driver" +msgstr "%s: Không có trình Ä‘iá»u khiển xuất nhÆ° vậy" + +#: src/main.c:602 +#, c-format +msgid "Unknown symbol class: %c" +msgstr "Không biết hạng ký hiệu : %c" + +#: src/main.c:682 +msgid "[FILE]..." +msgstr "[TẬP_TIN]..." + +#: src/main.c:725 ../process.c:911 +msgid "Exiting" +msgstr "Äang thoát..." + +#: src/main.c:792 +msgid "no input files" +msgstr "không có tập tin xuất nào" + +#: src/parser.c:119 +#, c-format +msgid " near " +msgstr " gần " + +#: src/parser.c:188 +msgid "INTERNAL ERROR: cannot return token to stream" +msgstr "Lá»–I NỘI BỘ: không thể trả gởi hiệu bài vá» luồng" + +#: src/parser.c:398 +msgid "unexpected end of file in expression" +msgstr "kết thúc tập tin bất ngá» trong biểu thức" + +#: src/parser.c:453 src/parser.c:552 +msgid "expected `;'" +msgstr "ngá» dấu chấm phẩy « ; »" + +#: src/parser.c:470 src/parser.c:577 +msgid "unexpected end of file in declaration" +msgstr "kết thức tập tin bất ngá» trong lá»i tuyên bố" + +#: src/parser.c:502 +msgid "missing `;' after struct declaration" +msgstr "thiếu dấu chấm phẩy « ; » sau lá»i tuyên bố « struct »" + +#: src/parser.c:599 +msgid "unexpected end of file in initializer list" +msgstr "kết thức tập tin bất ngá» trong danh sách bá»™ khởi Ä‘á»™ng" + +#: src/parser.c:683 +msgid "unexpected end of file in struct" +msgstr "kết thúc tập tin bất ngá» trong « struct »" + +#: src/parser.c:769 src/parser.c:792 +msgid "expected `)'" +msgstr "ngá» dấu đóng ngoặc « ) »" + +#: src/parser.c:805 +msgid "unexpected end of file in function declaration" +msgstr "kết thức tập tin bất ngá» trong lá»i tuyên bố hàm" + +#: src/parser.c:877 +msgid "unexpected token in parameter list" +msgstr "hiệu bài bất ngá» trong danh sách tham số" + +#: src/parser.c:892 +msgid "unexpected end of file in parameter list" +msgstr "kết thúc tập tin bất ngá» trong danh sách tham số" + +#: src/parser.c:930 +msgid "forced function body close" +msgstr "việc đóng thân hàm bị buá»™c" + +#: src/parser.c:944 +msgid "unexpected end of file in function body" +msgstr "kết thức tập tin bất ngá» trong thân hàm" + +#: src/parser.c:979 +#, c-format +msgid "%s/%d redefined" +msgstr "%s/%d được định nghÄ©a lại" + +#: src/parser.c:982 +msgid "this is the place of previous definition" +msgstr "đây là vị trí của lá»i định nghÄ©a trÆ°á»›c" + +#: src/parser.c:994 +#, c-format +msgid "%s:%d: %s/%d defined to %s\n" +msgstr "%s:%d: %s/%d được định nghÄ©a thành %s\n" + +# Variable: do not translate/ biến: đừng dịch +#: src/parser.c:1019 +#, c-format +msgid "%s:%d: type %s\n" +msgstr "%s:%d: kiểu %s\n" + +#: src/rc.c:55 +msgid "not enough memory to process rc file" +msgstr "không đủ bá»™ nhá»› để xá»­ lý tập tin « rc » (tài nguyên)" + +#: src/symbol.c:317 +msgid "not enough core" +msgstr "không đủ lõi" + +#: lib/argp-help.c:195 lib/argp-help.c:194 +#, c-format +msgid "%.*s: ARGP_HELP_FMT parameter requires a value" +msgstr "%.*s: tham số « ARGP_HELP_FMT » cần thiết giá trị" + +#: lib/argp-help.c:204 lib/argp-help.c:203 +#, c-format +msgid "%.*s: Unknown ARGP_HELP_FMT parameter" +msgstr "%.*s: không biết tham số « ARGP_HELP_FMT »" + +#: lib/argp-help.c:216 lib/argp-help.c:215 +#, c-format +msgid "Garbage in ARGP_HELP_FMT: %s" +msgstr "Rác trong « ARGP_HELP_FMT »: %s" + +#: lib/argp-help.c:1195 lib/argp-help.c:1194 +msgid "" +"Mandatory or optional arguments to long options are also mandatory or " +"optional for any corresponding short options." +msgstr "" +"Tất cả đối số bắt buá»™c phải sá»­ dụng vá»›i tùy chá»n dài cÅ©ng bắt buá»™c vá»›i tùy " +"chá»n ngắn tÆ°Æ¡ng ứng." + +#: lib/argp-help.c:1582 gphoto2/shell.c:747 ../glib/goption.c:468 +#: lib/argp-help.c:1581 schroot/schroot-options.cc:126 +#: schroot/schroot-releaselock-options.cc:68 +#, c-format +msgid "Usage:" +msgstr "Cách sá»­ dụng:" + +#: lib/argp-help.c:1586 lib/argp-help.c:1585 +msgid " or: " +msgstr " hoặc " + +#: lib/argp-help.c:1598 lib/argp-help.c:1597 +msgid " [OPTION...]" +msgstr " [TÙY_CHỌN...]" + +#: lib/argp-help.c:1625 lib/argp-help.c:1624 lib/print_error.c:35 +#: src/rpasswd.c:127 src/rpasswdd.c:146 +#, c-format +msgid "Try `%s --help' or `%s --usage' for more information.\n" +msgstr "" +"Hãy thá»­ lệnh « %s --help » (trợ giúp) hoặc lệnh « %s --usage » (cách sá»­ " +"dụng) để xem thông tin thêm.\n" + +#: lib/argp-help.c:1873 lib/error.c:122 lib/error.c:131 lib/error.c:159 +#: lib/error.c:121 lib/argp-help.c:1872 src/err-codes.h:229 +msgid "Unknown system error" +msgstr "Gặp lá»—i hệ thống không rõ" + +#: lib/argp-parse.c:83 src/main.c:198 lib/argp-parse.c:82 +msgid "Give this help list" +msgstr "Hiển thị trợ giúp này" + +#: lib/argp-parse.c:84 src/main.c:199 lib/argp-parse.c:83 +msgid "Give a short usage message" +msgstr "Hiển thị thông Ä‘iệp cách sá»­ dụng ngắn" + +#: lib/argp-parse.c:85 lib/argp-parse.c:84 +msgid "Set the program name" +msgstr "Lập tên chÆ°Æ¡ng trình" + +#: lib/argp-parse.c:87 lib/argp-parse.c:86 +msgid "Hang for SECS seconds (default 3600)" +msgstr "Treo trong vòng GIÂY giây (mặc định là 3600)" + +#: lib/argp-parse.c:148 src/main.c:201 lib/argp-parse.c:147 +msgid "Print program version" +msgstr "In ra phiên bản chÆ°Æ¡ng trình" + +#: lib/argp-parse.c:164 lib/argp-parse.c:163 +msgid "(PROGRAM ERROR) No version known!?" +msgstr "(Lá»–I CHƯƠNG TRÃŒNH) Không biết phiên bản không?" + +#: lib/argp-parse.c:620 lib/argp-parse.c:619 +#, c-format +msgid "%s: Too many arguments\n" +msgstr "%s: Quá nhiá»u đối số\n" + +#: lib/argp-parse.c:763 lib/argp-parse.c:762 +msgid "(PROGRAM ERROR) Option should have been recognized!?" +msgstr "(Lá»–I CHƯƠNG TRÃŒNH) Nên nhận diện tùy chá»n mà chÆ°a?" + +#: lib/getopt.c:552 lib/getopt.c:571 src/main/getopt.c:681 lib/getopt.c:551 +#: lib/getopt.c:570 lib/getopt.c:694 share/getopt.c:673 getopt.c:663 +#, c-format +msgid "%s: option `%s' is ambiguous\n" +msgstr "%s: tùy chá»n « %s » là mÆ¡ hồ\n" + +#: lib/getopt.c:604 lib/getopt.c:608 src/main/getopt.c:706 lib/getopt.c:603 +#: lib/getopt.c:607 lib/getopt.c:719 share/getopt.c:698 getopt.c:687 +#, c-format +msgid "%s: option `--%s' doesn't allow an argument\n" +msgstr "%s: tùy chá»n « --%s » không cho phép đối số\n" + +#: lib/getopt.c:617 lib/getopt.c:622 src/main/getopt.c:712 lib/getopt.c:616 +#: lib/getopt.c:621 lib/getopt.c:724 share/getopt.c:703 getopt.c:692 +#, c-format +msgid "%s: option `%c%s' doesn't allow an argument\n" +msgstr "%s: tùy chá»n « %c%s » không cho phép đối số\n" + +#: lib/getopt.c:915 share/getopt.c:721 share/getopt.c:894 getopt.c:709 +#: getopt.c:882 +#, c-format +msgid "%s: option `%s' requires an argument\n" +msgstr "%s: tùy chá»n « %s » cần đến đối số\n" + +#: lib/getopt.c:730 lib/getopt.c:771 share/getopt.c:750 getopt.c:738 +#, c-format +msgid "%s: unrecognized option `--%s'\n" +msgstr "%s: không nhận ra tùy chá»n « --%s »\n" + +#: lib/getopt.c:741 lib/getopt.c:775 share/getopt.c:754 getopt.c:742 +#, c-format +msgid "%s: unrecognized option `%c%s'\n" +msgstr "%s: không nhận ra tùy chá»n « %c%s »\n" + +#: lib/getopt.c:799 lib/getopt.c:801 share/getopt.c:780 getopt.c:768 +#, c-format +msgid "%s: illegal option -- %c\n" +msgstr "%s: không cho phép tùy chá»n « -- %c »\n" + +#: lib/getopt.c:808 lib/getopt.c:804 share/getopt.c:783 getopt.c:771 +#, c-format +msgid "%s: invalid option -- %c\n" +msgstr "%s: tùy chá»n không hợp lệ « -- %c »\n" + +#: lib/getopt.c:964 share/getopt.c:813 share/getopt.c:943 getopt.c:801 +#: getopt.c:931 +#, c-format +msgid "%s: option requires an argument -- %c\n" +msgstr "%s: tùy chá»n cần đến đối số « -- %c »\n" + +#: lib/getopt.c:954 lib/getopt.c:881 share/getopt.c:860 getopt.c:848 +#, c-format +msgid "%s: option `-W %s' is ambiguous\n" +msgstr "%s: tùy chá»n « -W %s » là mÆ¡ hồ\n" + +#: lib/getopt.c:999 lib/getopt.c:899 share/getopt.c:878 getopt.c:866 +#, c-format +msgid "%s: option `-W %s' doesn't allow an argument\n" +msgstr "%s: tùy chá»n « -W %s » không cho phép đối số\n" + +#: lib/obstack.c:441 lib/xalloc-die.c:38 lib/xsetenv.c:40 +msgid "memory exhausted" +msgstr "hết bá»™ nhá»› hoàn toàn" + +#: ../level/aceticacid.atomix.xml.h:1 +msgid "Acetic Acid" +msgstr "Axit axetic" + +#: ../level/acetone.atomix.xml.h:1 +msgid "Acetone" +msgstr "Axetôn" + +#: ../level/butanol.atomix.xml.h:1 +msgid "Butanol" +msgstr "Butanola" + +#: ../level/cyclobutane.atomix.xml.h:1 +msgid "Cyclobutane" +msgstr "Xiclôbutan" + +#: ../level/dimethylether.atomix.xml.h:1 +msgid "Dimethyl Ether" +msgstr "Ête metyla đôi" + +#: ../level/ethanal.atomix.xml.h:1 +msgid "Ethanal" +msgstr "Etanan" + +#: ../level/ethane.atomix.xml.h:1 +msgid "Ethane" +msgstr "Etan" + +#: ../level/ethanol.atomix.xml.h:1 +msgid "Ethanol" +msgstr "Etanola" + +#: ../level/ethylene.atomix.xml.h:1 +msgid "Ethylene" +msgstr "Etylen" + +#: ../level/glycerin.atomix.xml.h:1 +msgid "Glycerin" +msgstr "Glyxerin" + +#: ../level/lactic-acid.atomix.xml.h:1 +msgid "Lactic Acid" +msgstr "Acit lactic" + +#: ../level/methanal.atomix.xml.h:1 +msgid "Methanal" +msgstr "Metanan" + +#: ../level/methane.atomix.xml.h:1 +msgid "Methane" +msgstr "Metan" + +#: ../level/methanol.atomix.xml.h:1 +msgid "Methanol" +msgstr "Metanola" + +#: ../level/propanal.atomix.xml.h:1 +msgid "Propanal" +msgstr "Prôpanan" + +#: ../level/propylene.atomix.xml.h:1 +msgid "Propylene" +msgstr "Prôpylen" + +#: ../level/pyran.atomix.xml.h:1 +msgid "Pyran" +msgstr "Pyran" + +#: ../level/transbutylen.atomix.xml.h:1 +msgid "Trans Butylen" +msgstr "Butylen qua" + +#: ../level/water.atomix.xml.h:1 +msgid "Water" +msgstr "NÆ°á»›c" + +#: ../src/atomix-ui.xml.h:1 +msgid "Continue paused game" +msgstr "Tiếp tục chÆ¡i" + +#: ../src/atomix-ui.xml.h:2 +msgid "End a game" +msgstr "Kết thúc trò chÆ¡i" + +#: ../src/atomix-ui.xml.h:3 +msgid "Pause the running game" +msgstr "Tạm dừng trò chÆ¡i" + +#: ../src/atomix-ui.xml.h:4 +msgid "Reset level" +msgstr "Äặt lại cấp Ä‘á»™" + +#: ../src/atomix-ui.xml.h:5 +msgid "Restores start situation" +msgstr "Phục hồi vị trí ban đầu" + +#: ../src/atomix-ui.xml.h:6 +msgid "Set preferences" +msgstr "Thiết lập thông số" + +#: ../src/atomix-ui.xml.h:7 +msgid "Skip _level" +msgstr "Bá» qua _cấp Ä‘á»™" + +#: ../src/atomix-ui.xml.h:8 +msgid "Skip the current level" +msgstr "Bá» qua cấp Ä‘á»™ này" + +#: ../src/atomix-ui.xml.h:9 +msgid "Start a new game" +msgstr "Bắt đầu chÆ¡i" + +#: ../src/atomix-ui.xml.h:10 +msgid "Undo the last move" +msgstr "Hoàn lại lần Ä‘i cuối" + +#: ../src/atomix-ui.xml.h:11 +msgid "View highscores" +msgstr "Xem Ä‘iểm cao" + +#: ../src/atomix-ui.xml.h:12 +msgid "_Continue game" +msgstr "_Tiếp tục chÆ¡i" + +#: ../src/atomix-ui.xml.h:13 +msgid "_End Game" +msgstr "_Kết thúc trò chÆ¡i" + +#: ../src/atomix-ui.xml.h:14 +msgid "_Game" +msgstr "_Trò chÆ¡i" + +#. #-#-#-#-# glade3vi..po (glade3 HEAD) #-#-#-#-# +#. Help +#. #-#-#-#-# NetworkManager.vi.po (NetworkManager HEAD) #-#-#-#-# +#. Help item +#: ../pan/save-ui.c:262 ../Pyblio/GnomeUI/Document.py:149 src/mainwin.cpp:549 +#: ../src/glade-gtk.c:2317 ../gnome/applet/applet.c:2208 po/silky.glade.h:216 +#: app/menubar.c:691 +msgid "_Help" +msgstr "Trợ _giúp" + +#: ../src/atomix-ui.xml.h:21 +msgid "_New Game" +msgstr "Trò chÆ¡i _má»›i" + +#: ../src/atomix-ui.xml.h:22 +msgid "_Pause game" +msgstr "Tạm _dừng trò chÆ¡i" + +#: ../src/atomix-ui.xml.h:23 +msgid "_Preferences ..." +msgstr "_Tùy thích..." + +#: ../src/atomix-ui.xml.h:24 +msgid "_Scores ..." +msgstr "Ä_iểm..." + +#: ../src/atomix-ui.xml.h:25 +msgid "_Undo move" +msgstr "_Hoàn lại nÆ°á»›c Ä‘i" + +#: ../src/level-manager.c:174 +msgid "Couldn't find level sequence description." +msgstr "Không thể tìm chuá»—i mô tả cấp Ä‘á»™." + +#: ../src/level-manager.c:188 +msgid "No level found." +msgstr "Không tìm thấy cấp Ä‘á»™." + +#: ../src/level-manager.c:284 +#, c-format +msgid "Found level '%s' in: %s" +msgstr "Tìm thấy cấp Ä‘á»™ « %s » trong: %s" + +#: ../src/main.c:126 +msgid "You have not achieved any scores yet. Play a little before coming back!" +msgstr "Bạn chÆ°a được Ä‘iểm nào. ChÆ¡i nữa nhé trÆ°á»›c khi trở vá»!" + +#: ../src/main.c:173 +msgid "A puzzle game about atoms and molecules" +msgstr "Trò chÆ¡i trí tuệ vá» nguyên tá»­ và phân tá»­" + +#: ../src/main.c:488 ../atomix.desktop.in.h:1 +msgid "Atomix" +msgstr "Atomix" + +#: ../src/main.c:499 +msgid "Congratulations! You have finished all Atomix levels." +msgstr "Xin chúc mừng! Bạn đã hoàn tất má»i cấp Ä‘á»™ của Atomix." + +#: ../src/main.c:509 +msgid "Couldn't find at least one level." +msgstr "Không thể tìm thấy cấp Ä‘á»™ nào cả." + +#: ../src/main.c:514 +msgid "Do you want to finish the game?" +msgstr "Bạn có muốn hoàn tất trò chÆ¡i không?" + +#. "The branch of mathematics that deals with the relationships among groups of measurements and with the relevance of similarities and differences in those relationships." +#: ../src/main.c:723 ../aisleriot/statistics.glade.h:5 +msgid "Statistics" +msgstr "Thống kê" + +#: ../src/main.c:729 ../gnobots2/statusbar.c:85 ../gnometris/scoreframe.cpp:79 +msgid "Level:" +msgstr "Cấp Ä‘á»™ :" + +#: ../src/main.c:730 +msgid "Molecule:" +msgstr "Phân tá»­ :" + +#: ../gnometris/scoreframe.cpp:58 ../gnomine/gnomine.c:441 +msgid "Score:" +msgstr "Äiểm:" + +#: ../src/main.c:732 +msgid "Time:" +msgstr "Thá»i gian:" + +#: ../src/main.c:772 +#, c-format +msgid "Couldn't find file: %s" +msgstr "Không thể tìm tập tin: %s" + +#: ../src/theme-manager.c:135 +msgid "No themes found." +msgstr "Không tìm thấy sắc thái." + +#: ../src/theme-manager.c:193 +#, c-format +msgid "Found theme '%s' in: %s" +msgstr "Tìm thấy sắc thái « %s » trong: %s" + +#: ../atomix.desktop.in.h:2 +msgid "Molecule puzzle game" +msgstr "Trò chÆ¡i trí tuệ vá» phân tá»­" + +#. #-#-#-#-# NetworkManager.vi.po (NetworkManager HEAD) #-#-#-#-# +#. About item +#: src/mainwin.cpp:555 ../gnome/applet/applet.c:2217 po/silky.glade.h:212 +msgid "_About" +msgstr "_Giá»›i thiệu" + +#: ../gnome-power-preferences.desktop.in.h:1 +msgid "Configure power management" +msgstr "Cấu hình quản lý Ä‘iện năng" + +#: ../gnome-power-preferences.desktop.in.h:2 +msgid "Power Management" +msgstr "Quản lý Äiện năng" + +#: ../gnome-power-manager.schemas.in.h:1 +msgid "If we require a password when resuming from suspend" +msgstr "Nếu cần thiết mật khẩu khi tiếp tục sau khi ngÆ°ng" + +#: ../gnome-power-manager.schemas.in.h:2 +msgid "Options are never, critical, charge, always" +msgstr "Tùy chá»n là: không bao giá», tá»›i hạn, nạp, luôn" + +#: ../gnome-power-manager.schemas.in.h:3 +msgid "The action to take when the battery is critically low." +msgstr "Hành Ä‘á»™ng cần làm khi pin yếu tá»›i hạn." + +#: ../gnome-power-manager.schemas.in.h:4 +msgid "The brightness the display is set to on AC" +msgstr "Äá»™ sáng của bá»™ trình bày khi chạy bằng AC" + +#: ../gnome-power-manager.schemas.in.h:5 +msgid "The brightness the display is set to on battery" +msgstr "Äá»™ sáng của bá»™ trình bày khi chạy bằng pin" + +#: ../gnome-power-manager.schemas.in.h:6 +msgid "The event for a laptop lid closing" +msgstr "Sá»± kiện khi máy tính xách tây đóng nắp" + +#: ../gnome-power-manager.schemas.in.h:7 +msgid "The event for a system suspend button press" +msgstr "Sá»± kiện khi bấm nút ngÆ°ng hệ thống" + +#: ../gnome-power-manager.schemas.in.h:8 +msgid "The idle time in seconds before the computer tries to sleep" +msgstr "Thá»i gian nghỉ theo giây trÆ°á»›c khi máy tính cố ngủ" + +#: ../gnome-power-manager.schemas.in.h:9 +msgid "The idle time in seconds before the display tries to sleep" +msgstr "Thá»i gian nghỉ theo giây trÆ°á»›c khi bá»™ trình bày cố ngủ" + +#: ../gnome-power-manager.schemas.in.h:10 +msgid "The idle time in seconds before the hard disk drives try to sleep" +msgstr "Thá»i gian nghỉ theo giây trÆ°á»›c khi Ä‘Ä©a cứng cố ngủ" + +#: ../gnome-power-manager.schemas.in.h:11 +msgid "" +"The percentage that the powerdevice has to get to be considered \"low enough" +"\" to perform an action." +msgstr "" +"Phần trăm mà thiết bị Ä‘iện năng cần nhận, để thá»a tiêu chuẩn « đủ yếu » để " +"thá»±c hiện hành Ä‘á»™ng." + +#: ../gnome-power-manager.schemas.in.h:12 +msgid "" +"The percentage that the powerdevice has to get to be considered \"low enough" +"\" to warn the user." +msgstr "" +"Phần trăm mà thiết bị Ä‘iện năng cần nhận, để thá»a tiêu chuẩn « đủ yếu » để " +"cảnh báo ngÆ°á»i dùng." + +#: ../gnome-power-manager.schemas.in.h:13 +msgid "The powerdevice action threshold." +msgstr "Ngưỡng hành Ä‘á»™ng thiết bị Ä‘iện năng." + +#: ../gnome-power-manager.schemas.in.h:14 +msgid "The powerdevice warning threshold." +msgstr "Ngưỡng cảnh báo thiết bị Ä‘iện năng." + +#: ../gnome-power-manager.schemas.in.h:15 +msgid "The type of sleep (hibernate/suspend) to use automatically." +msgstr "Kiểu ngủ (ngủ Ä‘á»™ng/ngÆ°ng) cần dùng tá»± Ä‘á»™ng." + +#: ../gnome-power-manager.schemas.in.h:16 +msgid "When to show the notification icon" +msgstr "Khi cần hiển thị biểu tượng thông báo" + +#: ../src/eggtray/eggtrayicon.c:117 libexif/exif-tag.c:118 +#: ../gnome/applet/eggtrayicon.c:128 +msgid "Orientation" +msgstr "HÆ°á»›ng" + +#: ../src/eggtray/eggtrayicon.c:118 ../gnome/applet/eggtrayicon.c:129 +msgid "The orientation of the tray." +msgstr "HÆ°á»›ng khay." + +#: ../src/gpm-common.c:133 ../dirdiff.py:571 +#, c-format, python-format +msgid "%i minute" +msgid_plural "%i minute" +msgstr[0] "%i phút" + +#: ../src/gpm-common.c:144 ../dirdiff.py:572 +#, c-format, python-format +msgid "%i hour" +msgid_plural "%i hour" +msgstr[0] "%i giá»" + +# Variable: don't translate / Biến: đừng dịch +#: ../src/gpm-common.c:150 +#, c-format +msgid "%i %s, %i %s" +msgstr "%i %s, %i %s" + +#: ../src/crontab.py:246 ../bin/ical-dump.c:85 +msgid "hour" +msgid_plural "hour" +msgstr[0] "giá»" + +#: ../src/crontab.py:244 ../bin/ical-dump.c:83 +msgid "minute" +msgid_plural "minute" +msgstr[0] "phút" + +#. common descriptions of this program +#: ../src/gpm-common.h:33 ../src/gpm-main.c:685 +msgid "GNOME Power Manager" +msgstr "Bá»™ Quản lý Äiện năng GNOME" + +#: ../src/gpm-common.h:34 +msgid "Power Manager for the GNOME desktop" +msgstr "Ứng dụng quản lý Ä‘iện năng cho môi trÆ°á»ng Gnome" + +#: ../src/gpm-console.c:306 ../src/gpm-main.c:668 +msgid "Do not daemonize" +msgstr "Äừng chạy trong ná»n" + +#: ../src/gpm-console.c:308 ../src/gpm-main.c:670 ../src/gpm-prefs.c:562 +msgid "Show extra debugging information" +msgstr "Hiển thị thông tin gỡ lá»—i thêm" + +#: ../src/gpm-main.c:353 ../src/gpm-main.c:372 +#, c-format +msgid "" +"You have approximately %s of remaining battery life (%i%%). Plug in " +"your AC Adapter to avoid losing data." +msgstr "" +"Bạn có xấp xỉ %s thá»i gian pin còn lại (%i%%). Hãy cầm phít bá»™ tiếp " +"hợp AC để tránh mất dữ liệu." + +#: ../src/gpm-main.c:357 +msgid "Battery Critically Low" +msgstr "Pin yếu tá»›i hạn" + +#: ../src/gpm-main.c:375 +msgid "Battery Low" +msgstr "Pin yếu" + +#: ../src/gpm-main.c:417 +msgid "AC Power Unplugged" +msgstr "Äiện năng AC chÆ°a kết nối" + +#: ../src/gpm-main.c:418 +msgid "The AC Power has been unplugged. The system is now using battery power." +msgstr "Äiện năng AC bị tháo nút ra. Hệ thống Ä‘ang chạy bằng pin." + +#: ../src/gpm-main.c:494 +msgid "Battery Charged" +msgstr "Pin đầy" + +#: ../src/gpm-main.c:494 +msgid "Your battery is now fully charged" +msgstr "Pin đã được tái sạc đầy." + +#: ../src/gpm-notification.c:216 +msgid "charging" +msgstr "Ä‘ang sạc" + +#: ../src/gpm-notification.c:218 +msgid "discharging" +msgstr "Ä‘ang phóng ra" + +#: ../src/gpm-notification.c:221 +msgid "charged" +msgstr "được sạc" + +#: ../src/gpm-notification.c:236 +msgid "until charged" +msgstr "đến khi được sạc" + +#: ../src/gpm-notification.c:239 +msgid "until empty" +msgstr "đến khi rá»—ng" + +#: ../src/gpm-notification.c:306 +msgid "Computer is running on battery power\n" +msgstr "Hệ thống Ä‘ang chạy bằng pin\n" + +#: ../src/gpm-notification.c:308 +msgid "Computer is running on AC power\n" +msgstr "Hệ thống Ä‘ang chạy bằng năng lượng xoay chiá»u (AC)\n" + +#: ../src/gpm-notification.c:338 +msgid "Licensed under the GNU General Public License Version 2" +msgstr "" +"Äược phát hành vá»›i Ä‘iá»u kiện của Quyá»n Công Chung GNU (GPL) phiên bản 2" + +#: ../src/gpm-notification.c:339 +msgid "" +"GNOME Power Manager is free software; you can redistribute it and/or\n" +"modify it under the terms of the GNU General Public License\n" +"as published by the Free Software Foundation; either version 2\n" +"of the License, or (at your option) any later version." +msgstr "" +"ChÆ°Æ¡ng trình này là phần má»m tá»± do nên bạn có thể phân phối lại nó và sá»­a " +"đổi nó vá»›i Ä‘iá»u kiện của Quyá»n Công Chung GNU (GPL) nhÆ° do Tổ chức Phần má»m " +"Tá»± do sản xuất, hoặc phiên bản 2 của Quyá»n hoặc (tùy chá»n) bất cứ phiên bản " +"sau nào." + +#: ../src/gpm-notification.c:343 +msgid "" +"GNOME Power Manager is distributed in the hope that it will be useful,\n" +"but WITHOUT ANY WARRANTY; without even the implied warranty of\n" +"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n" +"GNU General Public License for more details." +msgstr "" +"Bá»™ Quản lý Äiện năng Gnome được phân phối vì mong muốn nó hữu ích\n" +"nhÆ°ng KHÔNG CÓ Sá»° BẢO ÄẢM NÀO, thậm chí không có\n" +"TÃNH THƯƠNG MẠI hay CHO MỘT MỤC ÄÃCH ÄẶC BIỆT NÀO CẢ.\n" +"Hãy xem Quyá»n Công Chung GNU để tìm chi tiết." + +#: ../src/gpm-notification.c:347 +msgid "" +"You should have received a copy of the GNU General Public License\n" +"along with this program; if not, write to the Free Software\n" +"Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n" +"02110-1301, USA." +msgstr "" +"Bện nên đã nhận má»™t bản sao của Quyá»n Công Chung GNU\n" +"cÅ©ng vá»›i chÆ°Æ¡ng trình này; nếu không thì hãy viết cho\n" +"Tổ chức Phần má»m Tá»± do:\n" +"Free Software Foundation, Inc.,\n" +"51 Franklin Street, Fifth Floor,\n" +"Boston, MA 02110-1301, USA. (Mỹ)" + +#: ../src/gpm-notification.c:484 +msgid "_Suspend" +msgstr "_NgÆ°ng" + +#: ../src/gpm-notification.c:489 +msgid "Hi_bernate" +msgstr "Ngủ _đông" + +#: ../src/gpm-notification.c:494 ../data/Deskbar_Applet.xml.h:2 +#: ../app/actions/dialogs-actions.c:190 ../src/login.c:917 +#: ../src/mlview-app.cc:310 po/silky.glade.h:218 +msgid "_Preferences" +msgstr "Tù_y thích" + +#: ../src/gpm-prefs.c:44 src/gbiff2.strings:140 +msgid "Suspend" +msgstr "NgÆ°ng" + +#: ../src/gpm-prefs.c:45 +msgid "Shutdown" +msgstr "Tắt máy" + +#: ../src/gpm-prefs.c:46 +msgid "Hibernate" +msgstr "Ngủ đông" + +#: ../src/gpm-prefs.c:47 +msgid "Do nothing" +msgstr "Äừng làm gì" + +#: ../src/gpm-prefs.c:505 +msgid "Configuration" +msgstr "Cấu hình" + +#: ../src/gpm-prefs.c:575 +msgid "GNOME Power Preferences" +msgstr "Tùy thích cho ÄIện năng Gnome" + +#: ../src/gpm-prefs.glade.h:1 +msgid "Actions" +msgstr "Hành Ä‘á»™ng" + +#: ../src/gnome-schedule.glade.h:4 ../gncal/calendar-editor.glade.h:5 +#: ../glade/straw.glade.h:9 +msgid "General" +msgstr "Chung" + +#: ../src/gpm-prefs.glade.h:3 ../src/drivel.glade.h:10 +msgid "Notification Area" +msgstr "Vùng thông báo" + +#: ../src/gpm-prefs.glade.h:4 +msgid "Other Options" +msgstr "Tùy chá»n khác" + +#: ../src/gpm-prefs.glade.h:5 +msgid "Running on AC Adapter" +msgstr "Äang chạy bằng bá»™ kết hợp AC" + +#: ../src/gpm-prefs.glade.h:6 +msgid "Running on Batteries" +msgstr "Äang chạy bằng pin" + +#: ../src/gpm-prefs.glade.h:7 +msgid "Estimated 16 minutes" +msgstr "Ứơc tính 16 phút" + +#: ../src/gpm-prefs.glade.h:8 +msgid "Estimated 2 hours 6 minutes" +msgstr "Ứơc tính 2 giá» 6 phút" + +#: ../src/gpm-prefs.glade.h:9 ../src/gnome-schedule.glade.h:14 +#: ../baobab.glade.h:2 ../plug-ins/metadata/interface.c:405 +msgid "Advanced" +msgstr "Cấp cao" + +#: ../src/gpm-prefs.glade.h:10 +msgid "Ba_ttery is critical when below:" +msgstr "_Pin yếu tá»›i hạn khi dÆ°á»›i:" + +#: ../src/gpm-prefs.glade.h:11 +msgid "Only display when battery life is _critical" +msgstr "Hiển thị chỉ khi thá»i gian pin tá»›i _hạn" + +#: ../src/gpm-prefs.glade.h:12 +msgid "Only display when charging or _discharging" +msgstr "Hiển thị chỉ khi sạc hay phóng _ra" + +#: ../src/gpm-prefs.glade.h:14 +msgid "Power Preferences" +msgstr "Tùy thích Äiện năng" + +#: ../src/gpm-prefs.glade.h:15 +msgid "Put _computer to sleep after:" +msgstr "Cho _máy tính ngủ sau :" + +#: ../src/gpm-prefs.glade.h:16 +msgid "Put _display to sleep after:" +msgstr "Cho bá»™ trìn_h bày ngủ sau :" + +#: ../src/gpm-prefs.glade.h:17 +msgid "Put c_omputer to sleep after:" +msgstr "Cho má_y tính ngủ sau :" + +#: ../src/gpm-prefs.glade.h:18 +msgid "Put dis_play to sleep after:" +msgstr "Cho bá»™ trình _bày ngủ sau :" + +#: ../src/gpm-prefs.glade.h:19 +msgid "Require password when returning from sleep" +msgstr "Cần thiết mật khẩu khi má»›i chạy sau khi ngủ" + +#: ../src/gpm-prefs.glade.h:20 +msgid "Set display _brightness:" +msgstr "Äặt Ä‘á»™ _sáng cho bá»™ trình bày:" + +#: ../src/gpm-prefs.glade.h:21 +msgid "Set display b_rightness:" +msgstr "Äặt _Ä‘á»™ sáng cho bá»™ trình bày:" + +#: ../src/gpm-prefs.glade.h:22 +#: ../schemas/apps_gnome_settings_daemon_keybindings.schemas.in.h:29 +msgid "Sleep" +msgstr "Ngủ" + +#: ../src/gpm-prefs.glade.h:23 +msgid "When _battery power critical:" +msgstr "Khi nạp _pin tá»›i hạn:" + +#: ../src/gpm-prefs.glade.h:24 +msgid "When _suspend button pressed:" +msgstr "Khi bấm nút _ngÆ°ng:" + +#: ../src/gpm-prefs.glade.h:25 +msgid "When laptop li_d is closed:" +msgstr "Khi đóng _nắp máy tính xách tây:" + +#: ../src/gpm-prefs.glade.h:26 +msgid "_Always display icon" +msgstr "_Luôn hiển thị biểu tượng" + +#: ../src/gpm-prefs.glade.h:27 +msgid "_Battery is low when below:" +msgstr "_Pin yếu khi dÆ°á»›i:" + +#: ../src/gpm-prefs.glade.h:28 +msgid "_Computer sleep type:" +msgstr "_Kiểu ngủ máy tính:" + +#: ../src/gpm-prefs.glade.h:29 +msgid "_Never display icon" +msgstr "_ChÆ°a bao giá» hiển thị biểu tượng" + +#: ../src/gpm-sysdev.c:57 +msgid "Laptop battery" +msgstr "Pin máy tính xách tây" + +#: ../src/gpm-sysdev.c:59 ../sheets/cisconetwork.sheet.in.h:87 +msgid "UPS" +msgstr "UPS" + +#: ../src/gpm-sysdev.c:61 +msgid "Wireless mouse" +msgstr "Chuá»™t vô tuyến" + +#: ../src/gpm-sysdev.c:63 +msgid "Wireless keyboard" +msgstr "Bàn phím vô tuyến" + +#: ../src/gpm-sysdev.c:65 +msgid "Misc PDA" +msgstr "Máy tính cầm tây lặt vặt" + +#: ../data/Deskbar_Applet.server.in.in.h:1 +msgid "An all-in-one action bar" +msgstr "Thanh hành Ä‘á»™ng hoàn thành" + +#: ../data/Deskbar_Applet.server.in.in.h:2 ../deskbar/about.py:23 +msgid "Deskbar" +msgstr "Deskbar" + +#: ../data/prefs-dialog.glade.h:1 +msgid "Keyboard Shortcut" +msgstr "Phím tắt" + +#: ../data/prefs-dialog.glade.h:2 +#: ../extensions/extensions-manager-ui/extensions-manager-ui.glade.h:1 +msgid "Loaded Extensions" +msgstr "Phần mở rá»™ng đã tải" + +#: ../data/prefs-dialog.glade.h:3 +msgid "Width" +msgstr "Rá»™ng" + +#: ../data/prefs-dialog.glade.h:4 +msgid "" +"Note: Drag and drop an extension to change its order." +msgstr "" +"Ghi chú : Hãy kéo và thả phần mở rá»™ng nào để thay đổi thứ " +"tá»±." + +#: ../data/prefs-dialog.glade.h:5 +msgid "Deskbar Preferences" +msgstr "Tùy thích Deskbar" + +#: ../data/prefs-dialog.glade.h:6 +msgid "Fixed _width:" +msgstr "_Rá»™ng có định:" + +#: ../data/prefs-dialog.glade.h:7 +msgid "Use _all available space" +msgstr "Dùng toàn _chá»— sẵn sàng" + +#: ../data/prefs-dialog.glade.h:8 +msgid "Use the _keyboard shortcut:" +msgstr "Dùng _phím tắt:" + +#: ../data/smart-bookmarks.glade.h:1 +msgid "" +"Note: If that shortcut is a single letter (like t) " +"you can also just type \"something\" and then press Ctrl-t in " +"the deskbar." +msgstr "" +"Ghi chú : Nếu phím tắt là má»™t chữ Ä‘Æ¡n (nhÆ° t) thì " +"bạn cÅ©ng có thể gõ chỉ « vật gì » rồi bấm Ctrl-t trong thanh." + +#: ../data/smart-bookmarks.glade.h:2 +msgid "" +"Note: To use a shortcut (for example wp) to search " +"for something, type \"wp something\" in the deskbar." +msgstr "" +"Ghi chú : Äể sá»­ dụng phím tắt (v.d. wp) để tìm kiếm " +"vật gì, hãy gõ « wp vật gì » vào thanh.." + +#: ../data/smart-bookmarks.glade.h:3 +msgid "Shortcuts for Bookmarked Searches" +msgstr "Phím tắt cho việc tìm kiếm đã đánh dấu" + +#: ../deskbar/about.py:26 +msgid "An all-in-one action bar." +msgstr "Thanh hành Ä‘á»™ng hoàn thanh." + +#: ../deskbar/about.py:29 +msgid "Deskbar Website" +msgstr "NÆ¡i Mạng Deskbar" + +#: ../deskbar/applet.py:312 +msgid "No History" +msgstr "Không có lược sá»­" + +#: ../deskbar/handlers/beagle-live.py:19 +msgid "Beagle Live" +msgstr "Beagle tại chá»—" + +#: ../deskbar/handlers/beagle-live.py:20 +msgid "Search all of your documents (using Beagle), as you type" +msgstr "Tìm kiếm trong má»i tài liệu của bạn (bằng Beagle) trong khi gõ" + +#: ../deskbar/handlers/beagle-live.py:47 +#, python-format +msgid "Addressbook entry for %s" +msgstr "Mục nhập sổ địa chỉ cho %s" + +#. translators: First %s is mail sender, second %s is mail subject. +#: ../deskbar/handlers/beagle-live.py:56 +#, python-format +msgid "View email from %s: %s" +msgstr "Xem thừ từ %s: %s" + +#, c-format, python-format +msgid "Open %s" +msgstr "Mở %s" + +#: ../deskbar/handlers/beagle-live.py:69 +#, python-format +msgid "Open news item %s" +msgstr "Mở mục tin tức %s" + +#: ../deskbar/handlers/beagle-live.py:76 +#, python-format +msgid "Open note %s" +msgstr "Mở ghi chép %s" + +#: ../deskbar/handlers/beagle-live.py:82 +#, python-format +msgid "View conversation with %s" +msgstr "Xem cuá»™c đối thoại vá»›i %s" + +#: ../deskbar/handlers/beagle-live.py:88 +#, python-format +msgid "View calendar %s" +msgstr "Xem lịch %s" + +#: dselect/pkgdisplay.cc:89 libexif/exif-entry.c:522 +msgid "?" +msgstr "?" + +#: ../deskbar/handlers/beagle.py:19 +msgid "Beagle" +msgstr "Beagle" + +#: ../deskbar/handlers/beagle.py:20 +msgid "Search all of your documents (using Beagle)" +msgstr "Tìm kiếm trong má»i tài liệu của bạn (bằng Beagle)" + +#: ../deskbar/handlers/beagle.py:33 +#, python-format +msgid "Search for %s using Beagle" +msgstr "Tìm kiếm %s bằng Beagle" + +#: ../deskbar/handlers_browsers.py:30 +#, python-format +msgid "Open History Item %s" +msgstr "Mở mục Lược sá»­ %s" + +#: ../deskbar/handlers_browsers.py:32 +#, python-format +msgid "Open Bookmark %s" +msgstr "Mở Äánh dấu %s" + +#. translators: First %s is the search engine name, second %s is the search term +#: ../deskbar/handlers_browsers.py:67 +#, python-format +msgid "Search %s for %s" +msgstr "Tìm kiếm trong %s có %s" + +#: ../app/widgets/gimpactionview.c:360 ../src/menu-win.cc:269 +msgid "Shortcut" +msgstr "Phím tắt" + +#: ../deskbar/handlers_browsers.py:180 ui/bookmarks.glade.h:3 +msgid "Bookmark Name" +msgstr "Tên Äánh dấu" + +#: ../data/browser.xml.h:51 address_gui.c:1921 address_gui.c:1924 +#: address_gui.c:2938 +#, fuzzy +msgid "Mail" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"ThÆ°\n" +"#-#-#-#-# jpilot-0.99.8-pre12.vi.po (jpilot-0.99.8-pre12) #-#-#-#-#\n" +"Lá thÆ°" + +#: ../deskbar/handlers/email_address.py:10 +msgid "Send mail by typing a complete e-mail address" +msgstr "Gởi thÆ° bằng cách gõ địa chỉ thÆ° hoàn thành" + +#: ../deskbar/handlers/email_address.py:26 ../deskbar/handlers/galago.py:27 +#, python-format +msgid "Send Email to %s" +msgstr "Gởi ThÆ° cho %s" + +#: ../deskbar/handlers/epiphany.py:32 ../deskbar/handlers/galeon.py:21 +#: ../deskbar/handlers/mozilla.py:29 ../data/bme.desktop.in.h:3 +msgid "Web Bookmarks" +msgstr "Dấu sách Mạng" + +#: ../deskbar/handlers/epiphany.py:33 ../deskbar/handlers/galeon.py:22 +#: ../deskbar/handlers/mozilla.py:30 +msgid "Open your web bookmarks by name" +msgstr "Mở đánh dấu theo tên" + +#: ../deskbar/handlers/epiphany.py:37 ../deskbar/handlers/galeon.py:26 +msgid "Web History" +msgstr "Lược sá»­ Mạng" + +#: ../deskbar/handlers/epiphany.py:38 ../deskbar/handlers/galeon.py:27 +msgid "Open your web history by name" +msgstr "Mở lược sá»­ Mạng theo tên" + +#: ../deskbar/handlers/epiphany.py:42 ../deskbar/handlers/galeon.py:31 +#: ../deskbar/handlers/mozilla.py:34 +msgid "Web Searches" +msgstr "Tìm kiếm Mạng" + +#: ../deskbar/handlers/epiphany.py:43 ../deskbar/handlers/galeon.py:32 +#: ../deskbar/handlers/mozilla.py:35 +msgid "Search the web via your browser's search settings" +msgstr "Tìm kiếm trên Mạng thông qua thiết lập tìm kiếm của trình duyệt" + +#: ../deskbar/handlers/evolution.py:11 +msgid "You need to enable autocomplete in your mail preferences" +msgstr "Bạn cần phải bật khả năng tá»± Ä‘á»™ng gõ xong trong tùy thích thÆ° tín" + +#: ../deskbar/handlers/evolution.py:13 +msgid "Autocompletion Needs to be Enabled" +msgstr "Cần phải bật Tá»± đông Gõ Xong" + +#: ../deskbar/handlers/evolution.py:14 +msgid "" +"We cannot provide e-mail addresses from your address book unless " +"autocompletion is enabled. To do this, from your mail program's menu, " +"choose Edit - Preferences, and then Autocompletion." +msgstr "" +"Không thể cung cấp địa chỉ thÆ° từ sổ địa chỉ nếu bạn chÆ°a bật khả năng tá»± " +"Ä‘á»™ng gõ xong. Äể bật nó, trong trình Ä‘Æ¡n của trình thÆ°, hãy chá»n Hiệu chỉnh " +"→ Tùy thích, rồi Tá»± Ä‘á»™ng Gõ Xong." + +#: ../deskbar/handlers/evolution.py:19 +msgid "Mail (Address Book)" +msgstr "ThÆ° (Sổ địa chỉ)" + +#: ../deskbar/handlers/evolution.py:20 +msgid "Send mail to your contacts by typing their name or e-mail address" +msgstr "Gởi thÆ° cho liên lạc bằng cách gõ tên hay địa chỉ thÆ° của há»" + +#. translators: First %s is the contact full name, second %s is the email address +#: ../deskbar/handlers/evolution.py:42 +#, python-format +msgid "Send Email to %s (%s)" +msgstr "Gởi thÆ° cho %s (%s)" + +#: ../deskbar/handlers/files.py:14 +msgid "Files and Folders" +msgstr "Tập tin và ThÆ° mục" + +#: ../deskbar/handlers/files.py:15 +msgid "Open your files and folders by name" +msgstr "Mở tập tin và thÆ° mục theo tên" + +#: ../deskbar/handlers/files.py:47 +#, python-format +msgid "Open folder %s" +msgstr "Mở thÆ° mục %s" + +#: ../deskbar/handlers/google-live.py:19 +msgid "" +"You need a Google account to use Google Live. To get one, go to http://api." +"google.com/\n" +"\n" +"When you have created your account, you should recieve a Google API key by " +"mail. Place this key in the file\n" +"\n" +"~/.gnome2/deskbar-applet/Google.key\n" +"\n" +"If you do not receive an API key (or you have lost it) in your account " +"verification mail, then go to www.google.com/accounts and log in. Go to api." +"google.com, click \"Create Account\" and enter your e-mail address and " +"password. Your API key will be re-sent.\n" +"\n" +"Now download the developers kit and extract the GoogleSearch.wsdl file from " +"it. Copy this file to\n" +"\n" +"~/.gnome2/deskbar-applet/GoogleSearch.wsdl" +msgstr "" +"Bạn cần phải có tài khoản Google để sá»­ dụng tính năng Google Live. Äể được " +"tài khoản, hãy Ä‘i tá»›i \n" +"\n" +"Má»™t khi bạn đã tạo tài khoản mình, bạn nên nhận má»™t khoá API của Google qua " +"thÆ°. Hãy để khoá này vào tập tin\n" +"\n" +"~/.gnome2/deskbar-applet/Google.key\n" +"\n" +"Nếu bạn chÆ°a nhận khoá API trong thÆ° xác định tài khoản (hoặc nó bị mất), " +"hãy thăm và Ä‘ang nhập. Äi tá»›i , " +"bấm « Tạo Tài khoản » (Create Account) và gõ địa chỉ thÆ° và mật khẩu mình. " +"Sau đó, khoá API sẽ được gởi lại.\n" +"\n" +"Sau đó, bạn hãy tải vá» bá»™ công cụ lập trình viên (developer's kit) và rút " +"tập tin ra nó. Sao chép tập tin này vào\n" +"\n" +"~/.gnome2/deskbar-applet/GoogleSearch.wsdl" + +#: ../deskbar/handlers/google-live.py:32 +msgid "Setting Up Google Live" +msgstr "Cách thiết lập Google Live" + +#: ../deskbar/handlers/google-live.py:38 +msgid "You need to install the SOAPpy python module." +msgstr "Bạn cần phải cài đặt mô-Ä‘un python SOAPpy." + +#: ../deskbar/handlers/google-live.py:40 +msgid "You need the Google WSDL file." +msgstr "Bạn cần đến tập tin WDSL Google." + +#: ../deskbar/handlers/google-live.py:42 +msgid "You need a Google API key." +msgstr "Bạn cần đến má»™t khoá API Google." + +#: ../deskbar/handlers/google-live.py:48 +msgid "Google Live" +msgstr "Google Live" + +#: ../deskbar/handlers/google-live.py:49 +msgid "Search Google as you type" +msgstr "Tìm kiếm trong Google trong khi gõ" + +#: ../deskbar/handlers/gtkbookmarks.py:12 +msgid "Files and Folders Bookmarks" +msgstr "Äánh dấu Tập tin và ThÆ° mục" + +#: ../deskbar/handlers/gtkbookmarks.py:13 +msgid "Open your files and folders bookmarks by name" +msgstr "Mở đánh dấu của tập tin và thÆ° mục theo tên" + +#: ../deskbar/handlers/gtkbookmarks.py:29 ../deskbar/handlers/volumes.py:45 +#, python-format +msgid "Open location %s" +msgstr "Mở địa Ä‘iểm %s" + +#: ../deskbar/handlers/pathprograms.py:13 +msgid "Programs (Advanced)" +msgstr "ChÆ°Æ¡ng trình (Cấp cao)" + +#: ../deskbar/handlers/pathprograms.py:14 +msgid "Launch any program present in your $PATH" +msgstr "Khởi chạy chÆ°Æ¡ng trình nào có trong Ä‘Æ°á»ng dẫn $PATH của bạn." + +#: ../deskbar/handlers/pathprograms.py:30 +#, python-format +msgid "Execute %s" +msgstr "Thá»±c hiện %s" + +#: ../deskbar/handlers/programs.py:14 +msgid "Programs" +msgstr "ChÆ°Æ¡ng trình" + +#: ../deskbar/handlers/programs.py:15 +msgid "Launch a program by its name and/or description" +msgstr "Khá»i chạy chÆ°Æ¡ng trình theo tên hay mô ta" + +#: ../gnopi/gnopi_files/Speech_Settings/speech_settings.glade2.h:31 +#: src/prefsdlg.cpp:75 src/stardict.cpp:1577 +msgid "Dictionary" +msgstr "Từ Ä‘iển" + +#: ../deskbar/handlers/programs.py:19 +msgid "Look up word definitions in the dictionary" +msgstr "Tìm lá»i định nghÄ©a từ trong từ Ä‘iển" + +#: ../deskbar/handlers/programs.py:22 +msgid "Files and Folders Search" +msgstr "Tìm kiếm Tập tin và ThÆ° mục" + +#: ../deskbar/handlers/programs.py:23 +msgid "Find files and folders by searching for a name pattern" +msgstr "Tìm tập tin và thÆ° mục bằng cách tìm kiếm mẫu tên" + +#. translators: First %s is the programs full name, second is the executable name +#. translators: For example: Launch Text Editor (gedit) +#: ../deskbar/handlers/programs.py:66 +msgid "Launch %s (%s)" +msgstr "Khởi chạy %s (%s)" + +#: ../deskbar/handlers/programs.py:79 +#, python-format +msgid "Lookup %s in dictionary" +msgstr "Tra tìm %s trong từ Ä‘iển" + +#: ../deskbar/handlers/programs.py:87 +#, python-format +msgid "Search for file names like %s" +msgstr "Tìm kiếm tên tập tin nhÆ° %s" + +#: ../deskbar/handlers/volumes.py:16 +msgid "Disks and Network Places" +msgstr "NÆ¡i mạng và Ä‘Ä©a" + +#: ../deskbar/handlers/volumes.py:17 +msgid "Open disk drives, shared network places and similar resources by name" +msgstr "Mở ổ Ä‘Ä©a, nÆ¡i mạng dùng chung và tài nguyên tÆ°Æ¡ng tÆ° theo tên" + +#: ../deskbar/handlers/volumes.py:41 +#, python-format +msgid "Open network place %s" +msgstr "Mở nÆ¡i mạng %s" + +#: ../deskbar/handlers/volumes.py:43 +#, python-format +msgid "Open audio disk %s" +msgstr "Mở Ä‘Ä©a âm thanh %s" + +#: ../48x48/emblems/emblem-web.icon.in.h:1 ../data/browser.xml.h:87 +msgid "Web" +msgstr "Mạng" + +#: ../deskbar/handlers/web_address.py:10 +msgid "Open web pages by typing a complete web address" +msgstr "Mở trang Mạng nào bằng cách gõ địa chỉ Mạng hoàn thành" + +#: ../deskbar/handlers/web_address.py:35 +#, python-format +msgid "Open the web page %s" +msgstr "Mở trang Mạng %s" + +#: ../deskbar/handlers/web_address.py:37 +#, python-format +msgid "Open the location %s" +msgstr "Mở địa Ä‘iểm %s" + +#: ../deskbar/preferences.py:53 +#: ../plugins/spell/gedit-automatic-spell-checker.c:443 +msgid "_More..." +msgstr "Th_êm..." + +#: ../data/gnome-screensaver-preferences.desktop.in.h:1 +msgid "Screensaver" +msgstr "Bá»™ bảo vệ màn hình" + +#: ../data/gnome-screensaver-preferences.desktop.in.h:2 +msgid "Set your screensaver preferences" +msgstr "Lập các tùy thích cho trình bảo vệ màn hình." + +#: ../data/glade/jamboree.glade.h:2 ../glade/straw.glade.h:3 +msgid " " +msgstr " " + +#: ../data/gnome-screensaver-preferences.glade.h:2 +msgid "_Screensaver" +msgstr "Bá»™ _bảo vệ màn hình" + +#: ../data/gnome-screensaver-preferences.glade.h:3 +#: ../data/gnome-screensaver-preferences.glade.h:4 +msgid "Screensaver Preferences" +msgstr "Tùy thích Bá»™ bảo vệ màn hình" + +#: ../data/gnome-screensaver-preferences.glade.h:4 +#: ../data/gnome-screensaver-preferences.glade.h:5 +msgid "_Activate after:" +msgstr "_Hoạt hóa sau :" + +#: ../data/gnome-screensaver-preferences.glade.h:5 +msgid "_Lock screen when active" +msgstr "_Khoá màn hình khi hoạt Ä‘á»™ng" + +#: ../data/gnome-screensaver.directory.in.h:1 +#: ../data/gnome-screensaver.schemas.in.h:7 +msgid "Screensaver themes" +msgstr "Sắc thái của ảnh bảo vệ màn hình" + +#: ../data/gnome-screensaver.directory.in.h:2 +msgid "Screensavers" +msgstr "Ảnh bảo vệ màn hình" + +#: ../data/gnome-screensaver.schemas.in.h:1 +msgid "Allow logout" +msgstr "Cho phép đăng xuất" + +#: ../data/gnome-screensaver.schemas.in.h:2 +msgid "Allow monitor power management" +msgstr "Cho phép quản lý Ä‘iện năng của bá»™ trình bày" + +#: ../data/gnome-screensaver.schemas.in.h:3 +msgid "Allow user switching" +msgstr "Cho phép chuyển đổi ngÆ°á»i dùng" + +#: ../data/gnome-screensaver.schemas.in.h:4 +msgid "Lock on activation" +msgstr "Khoá khi má»›i hoạt Ä‘á»™ng" + +#: ../data/gnome-screensaver.schemas.in.h:5 +msgid "Logout command" +msgstr "Lệnh đăng xuất" + +#: ../data/gnome-screensaver.schemas.in.h:6 +msgid "Screensaver selection mode" +msgstr "Chế Ä‘á»™ lá»±a chá»n ảnh bảo vệ màn hình" + +#: ../data/gnome-screensaver.schemas.in.h:8 +msgid "Set this to TRUE to allow the screensaver to power down the monitor." +msgstr "" +"Äặt giá trị này là TRUE (đúng) để cho phép bá»™ bảo vậ màn hình tắt Ä‘iện của " +"thiết bị hiển thị." + +#: ../data/gnome-screensaver.schemas.in.h:9 +msgid "Set this to TRUE to lock the screen when the screensaver goes active." +msgstr "" +"Äặt giá trị này là TRUE (đúng) để khoá màn hình khi bá»™ bảo vệ màn hình má»›i " +"hoạt Ä‘á»™ng." + +#: ../data/gnome-screensaver.schemas.in.h:10 +msgid "" +"Set this to TRUE to offer an option in the unlock dialog to switch to a " +"different user account." +msgstr "" +"Äặt giá trị này là TRUE (đúng) để cung cấp trong há»™p thoại bá» khoá tùy chá»n " +"chuyển đổi sang tài khoản ngÆ°á»i dùng khác." + +#: ../data/gnome-screensaver.schemas.in.h:11 +msgid "" +"Set this to TRUE to offer an option in unlock dialog to logging out after a " +"delay. The Delay is specified in the \"logout_delay\" key." +msgstr "" +"Äặt giá trị này là TRUE (đúng) để cung cấp trong há»™p thoại bá» khoá tùy chá»n " +"đăng xuất sau khi trá»…. Sá»± trá»… được ghi rõ trong khoá « logout_delay » (sá»± " +"trá»… đăng xuất)." + +#: ../data/gnome-screensaver.schemas.in.h:12 +msgid "" +"The command to invoke when the logout button is clicked. This command should " +"simply log the user out without any interaction. This key has effect only if " +"the \"logout_enable\" key is set to TRUE." +msgstr "" +"Lệnh cần chạy khi cái nút đăng xuất được bấm. Lệnh này nên Ä‘Æ¡n giản đăng " +"xuất ngÆ°á»i dùng, không tÆ°Æ¡ng tác gì." + +#: ../data/gnome-screensaver.schemas.in.h:13 +msgid "" +"The number of minutes after screensaver activation before locking the screen." +msgstr "" +"Số phút sau khi bá»™ bảo vệ màn hình má»›i hoạt Ä‘á»™ng, trÆ°á»›c khi khoá màn hình." + +#: ../data/gnome-screensaver.schemas.in.h:14 +msgid "" +"The number of minutes after the screensaver activation before a logout " +"option will appear in unlock dialog. This key has effect only if the " +"\"logout_enable\" key is set to TRUE." +msgstr "" +"Số phút sau khi bá»™ bảo vệ màn hình má»›i hoạt Ä‘á»™ng, trÆ°á»›c khi tùy chá»n đăng " +"xuất được hiển thị trong há»™p thoại bá» khoá. Khoá này có tác Ä‘á»™ng chỉ nếu " +"khoá « logout_enable » (bật đăng xuất) được đặt là TRUE (đúng)." + +#: ../data/gnome-screensaver.schemas.in.h:15 +msgid "" +"The number of minutes after the screensaver activation until the monitor " +"goes into standby power mode." +msgstr "" +"Số phút sau khi bá»™ bảo vệ màn hình má»›i hoạt Ä‘á»™ng, trÆ°á»›c khi thiết bị hiển " +"thị vào chế Ä‘á»™ chá»." + +#: ../data/gnome-screensaver.schemas.in.h:16 +msgid "" +"The number of minutes after the screensaver activation until the monitor " +"goes into suspend power mode." +msgstr "" +"Số phút sau khi bá»™ bảo vệ màn hình má»›i hoạt Ä‘á»™ng, trÆ°á»›c khi thiết bị hiển " +"thị vào chế Ä‘á»™ ngÆ°ng Ä‘iện năng." + +#: ../data/gnome-screensaver.schemas.in.h:17 +msgid "" +"The number of minutes after the screensaver activation until the monitor " +"powers off." +msgstr "" +"Số phút sau khi bá»™ bảo vệ màn hình má»›i hoạt Ä‘á»™ng, trÆ°á»›c khi thiết bị hiển " +"thị tắt Ä‘iện." + +#: ../data/gnome-screensaver.schemas.in.h:18 +msgid "The number of minutes of idle time before activating the screensaver." +msgstr "Số phút nghỉ trÆ°á»›c khi hoạt hóa bá»™ bảo vệ màn hình." + +#: ../data/gnome-screensaver.schemas.in.h:19 +msgid "The number of minutes to run before changing the screensaver theme." +msgstr "" +"Bao nhiêu phút cần chạy trÆ°á»›c khi thay đổi sắc thái ảnh bảo vệ màn hình." + +#: ../data/gnome-screensaver.schemas.in.h:20 +msgid "" +"The selection mode used by screensaver. May be \"disabled\" to disable " +"screensaver activation, \"blank-only\" to enable the screensaver without " +"using any theme on activation, \"single\" to enable screensaver using only " +"one theme on activation (specified in \"themes\" key), and \"random\" to " +"enable the screensaver using a random theme on activation." +msgstr "" +"Chế Ä‘á»™ lá»±a chá»n được dùng bởi bá»™ bảo vệ màn hình. Chế Ä‘á»™ có thể:\n" +" • disabled (bị tắt) để tắt khả năng hoạt hóa bá»™ bảo vệ màn hình\n" +" • blank-only (chỉ trắng) để bật chạy bá»™ bảo vệ màn hình mà không dùng sắc " +"thái nào khi má»›i hoạt Ä‘á»™ng\n" +" • single (Ä‘Æ¡n) để bật chạy bá»™ bảo vệ màn hình mà dùng chỉ má»™t sắc thái khi " +"má»›i hoạt Ä‘á»™ng thôi (được ghi rõ trong khoá « themes » (sắc thái)\n" +" • random (ngẫu nhiên) để bật chạy bá»™ bảo vệ màn hình mà dùng sắc thái ngẫu " +"nhiên khi má»›i hoạt Ä‘á»™ng." + +#: ../data/gnome-screensaver.schemas.in.h:21 +msgid "" +"This key specifies the list of themes to be used by the screensaver. It's " +"ignored when \"mode\" key is \"disabled\" or \"blank-only\", should provide " +"the theme name when \"mode\" is \"single\", and should provide a list of " +"themes when \"mode\" is \"random\"." +msgstr "" +"Khoá này ghi rõ danh sách các sắc thái cho bá»™ bảo vệ màn hình dùng. Nó\n" +" • bị bá» qua khi khoá « mode » (chế Ä‘á»™) bị tắt (« disabled ») hay chỉ trắng " +"(« blank only »)\n" +" • nên cung cấp tên sắc thái khi « mode » (chế Ä‘á»™) là Ä‘Æ¡n (« single »)\n" +" • nên cung cấp danh sách các sắc thái khi « mode » (chế Ä‘á»™) là ngẫu nhiên " +"(« random »)." + +#: ../data/gnome-screensaver.schemas.in.h:22 +msgid "Time before activation" +msgstr "Thá»i gian trÆ°á»›c khi hoạt hóa" + +#: ../data/gnome-screensaver.schemas.in.h:23 +msgid "Time before locking" +msgstr "Thá»i gian trÆ°á»›c khi khoá" + +#: ../data/gnome-screensaver.schemas.in.h:24 +msgid "Time before logout option" +msgstr "Thá»i gian trÆ°á»›c khi nhận tùy chá»n đăng xuất" + +#: ../data/gnome-screensaver.schemas.in.h:25 +msgid "Time before power off" +msgstr "Thá»i gian trÆ°á»›c khi tắt Ä‘iện" + +#: ../data/gnome-screensaver.schemas.in.h:26 +msgid "Time before standby" +msgstr "Thá»i gian trÆ°á»›c khi trạng thái chá»" + +#: ../data/gnome-screensaver.schemas.in.h:27 +msgid "Time before suspend" +msgstr "Thá»i gian trÆ°á»›c khi ngÆ°ng" + +#: ../data/gnome-screensaver.schemas.in.h:28 +msgid "Time before theme change" +msgstr "Thá»i gian trÆ°á»›c khi thay đổi sắc thái" + +#: ../savers/cosmos-slideshow.desktop.in.in.h:1 +#: ../savers/cosmos-slideshow.xml.in.h:1 +msgid "Cosmos" +msgstr "VÅ© trụ" + +#: ../savers/cosmos-slideshow.desktop.in.in.h:2 +msgid "Display a slideshow of pictures of the cosmos" +msgstr "Hiển thị trình diá»…n các ảnh của vÅ© trụ" + +#: ../savers/personal-slideshow.desktop.in.h:1 +msgid "Display a slideshow from your Pictures folder" +msgstr "Hiển thị trình diá»…n các ảnh từ thÆ° mục Ảnh (Pictures) của bạn" + +#: ../savers/personal-slideshow.xml.h:1 +msgid "Pictures folder" +msgstr "ThÆ° mục Ảnh" + +#: ../savers/popsquares.desktop.in.h:1 +msgid "A pop-art-ish grid of pulsing colors." +msgstr "Vẽ lÆ°á»›i các màu đập kiểu dáng nghệ thuật phổ biến." + +#: ../savers/popsquares.desktop.in.h:2 ../savers/popsquares.xml.h:1 +msgid "Pop art squares" +msgstr "Vuông nghệ thuật phổ biến" + +#: ../savers/floaters.c:1164 +msgid "show paths that images follow" +msgstr "hiển thị các Ä‘Æ°á»ng dẫn mà ảnh theo" + +#: ../savers/floaters.c:1171 +msgid "occasionally rotate images as they move" +msgstr "thỉng thoảng quay ảnh trong khi di chuyển" + +#: ../savers/floaters.c:1178 +msgid "print out frame rate and other statistics" +msgstr "in ra tốc Ä‘á»™ khung (frame rate) và thống kê khác" + +#: ../savers/floaters.c:1186 +msgid "the maximum number of images to keep on screen" +msgstr "số ảnh tối Ä‘a cần giữ trên màn hình" + +#: ../savers/floaters.c:1190 +msgid "N" +msgstr "N" + +#: ../savers/floaters.c:1197 +msgid "the source image to use" +msgstr "ảnh nguồn cần dùng" + +#: ../savers/floaters.c:1204 +msgid "the initial size and position of window" +msgstr "kích cỡ và vị trí ban đầu của cá»­a sổ" + +#: ../savers/floaters.c:1209 ../src/gnomeicu.c:543 +msgid "WIDTHxHEIGHT+X+Y" +msgstr "RỘNGxCAO+X+Y" + +#: ../savers/floaters.c:1229 +msgid "image - floats images around the screen" +msgstr "image - làm nổi ảnh ở chung quanh màn hình" + +#: ../savers/floaters.c:1239 +#, c-format +msgid "%s. See --help for usage information.\n" +msgstr "" +"%s: Hãy chạy lệnh « --help » (trợ giúp) để xem thông tin vá» cách sá»­ dụng.\n" + +#: ../savers/floaters.c:1248 +msgid "You must specify one image. See --help for usage information.\n" +msgstr "" +"Bạn phải ghi rõ má»™t ảnh. Hãy chạy lệnh « --help » (trợ giúp) để xem thông " +"tin vá» cách sá»­ dụng.\n" + +#: ../savers/slideshow.c:47 ../savers/slideshow.c:63 +msgid "Location to get images from" +msgstr "Äịa Ä‘iểm nÆ¡i cần lấy ảnh" + +#: ../msearch/medusa-command-line-search.c:156 ../savers/slideshow.c:63 +msgid "PATH" +msgstr "ÄƯỜNG DẪN" + +#. "Any program that is designed to perform a certain set of housekeeping tasks related to computer operation, such as the maintenance of files." +#: ../src/cut-n-paste/fusa-display.c:119 ../src/cut-n-paste/fusa-user.c:150 +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:22 +msgid "Manager" +msgstr "Bá»™ quản lý" + +#: ../src/cut-n-paste/fusa-display.c:120 ../src/fusa-display.c:120 +msgid "The manager which owns this object." +msgstr "Quản trị sở hữu đối tượng này." + +#: ../src/cut-n-paste/fusa-display.c:128 ../src/fusa-display.c:128 +msgid "The name of the X11 display this object refers to." +msgstr "Tên của bá»™ trình bày X11 đến đó đối tượng này tham chiếu." + +#: ../src/cut-n-paste/fusa-display.c:135 ../src/fusa-display.c:135 +msgid "The user currently logged in on this virtual terminal." +msgstr "NgÆ°á»i dùng mà hiện thá»i đã đăng nhập vào thiết bị cuối ảo này." + +#: ../src/cut-n-paste/fusa-display.c:141 serial.c:446 +#: ../src/fusa-display.c:141 +msgid "Console" +msgstr "Bàn giao tiếp" + +#: ../src/cut-n-paste/fusa-display.c:142 ../src/fusa-display.c:142 +msgid "The number of the virtual console this display can be found on, or %-1." +msgstr "" +"Số hiệu của bàn giao tiếp ảo trên đó bá»™ trình bày có thể được tìm, hoặc %-1." + +#: ../src/cut-n-paste/fusa-display.c:148 ../src/fusa-display.c:148 +msgid "Nested" +msgstr "Lồng nhau" + +#: ../src/cut-n-paste/fusa-display.c:149 ../src/fusa-display.c:149 +msgid "Whether or not this display is a windowed (Xnest) display." +msgstr "Có nên bá»™ trình bày bày là bá»™ trình bày có cá»­a sổ (Xnest) hay không." + +#: ../src/cut-n-paste/fusa-manager.c:1263 ../src/fusa-manager.c:1263 +#: ../src/cut-n-paste/fusa-manager.c:1264 +msgid "The display manager could not be contacted for unknown reasons." +msgstr "Không thể liên lạc vá»›i bá»™ quản lý trình bày, không biết sao." + +#: ../src/cut-n-paste/fusa-manager.c:1270 ../src/fusa-manager.c:1270 +#: ../src/cut-n-paste/fusa-manager.c:1271 +msgid "The display manager is not running or too old." +msgstr "Bá»™ quản lý trình bày không Ä‘ang chạy hoặc nó quá cÅ©." + +#: ../src/cut-n-paste/fusa-manager.c:1273 ../src/fusa-manager.c:1273 +#: ../src/cut-n-paste/fusa-manager.c:1274 +msgid "The configured limit of flexible servers has been reached." +msgstr "Má»›i tá»›i giá»›i hạn đã cấu hình của số trình phục vụ dẻo." + +#: ../src/cut-n-paste/fusa-manager.c:1276 ../src/fusa-manager.c:1276 +#: ../src/cut-n-paste/fusa-manager.c:1277 +msgid "There was an unknown error starting X." +msgstr "Gặp lá»—i không xác định khi khởi chạy X." + +#: ../src/cut-n-paste/fusa-manager.c:1279 ../src/fusa-manager.c:1279 +#: ../src/cut-n-paste/fusa-manager.c:1280 +msgid "The X server failed to finish starting." +msgstr "Trình phục vụ X không khởi chạy hoàn thành." + +#: ../src/cut-n-paste/fusa-manager.c:1282 ../src/fusa-manager.c:1282 +#: ../src/cut-n-paste/fusa-manager.c:1283 +msgid "There are too many X sessions running." +msgstr "Có quá nhiá»u phiên Ä‘ang chạy X." + +#: ../src/cut-n-paste/fusa-manager.c:1285 ../src/fusa-manager.c:1285 +#: ../src/cut-n-paste/fusa-manager.c:1286 +msgid "The nested X server (Xnest) cannot connect to your current X server." +msgstr "" +"Trình phục vụ X lồng nhau (Xnest) không thể kết nối đến trình phục vụ X hiện " +"thá»i của bạn." + +#: ../src/cut-n-paste/fusa-manager.c:1288 ../src/fusa-manager.c:1288 +#: ../src/cut-n-paste/fusa-manager.c:1289 +msgid "The X server in the GDM configuration could not be found." +msgstr "Không tìm thấy trình phục vụ X trong cấu hình GDM." + +#: ../src/cut-n-paste/fusa-manager.c:1292 +msgid "" +"Trying to set an unknown logout action, or trying to set a logout action " +"which is not available." +msgstr "" +"Äã cố đặt hành Ä‘á»™ng đăng xuất lạ, hoặc má»™t hành Ä‘á»™ng đăng xuất không sẵn " +"sàng." + +#: ../src/cut-n-paste/fusa-manager.c:1295 +msgid "Virtual terminals not supported." +msgstr "Không há»— trợ thiết bị cuối ảo." + +#: ../src/cut-n-paste/fusa-manager.c:1297 ../src/fusa-manager.c:1297 +#: ../src/cut-n-paste/fusa-manager.c:1298 +msgid "Invalid virtual terminal number." +msgstr "Số thiết bị cuối ảo không hợp lệ." + +#: ../src/cut-n-paste/fusa-manager.c:1301 +msgid "Trying to update an unsupported configuration key." +msgstr "Äã cố cập nhật má»™t khoá cấu hình không được há»— trợ." + +#: ../src/cut-n-paste/fusa-manager.c:1303 ../src/fusa-manager.c:1303 +#: ../src/cut-n-paste/fusa-manager.c:1304 +msgid "~/.Xauthority file badly configured or missing." +msgstr "Tập tin « ~/.Xauthority » có cấu hình sai, hoặc thiếu nó." + +#: ../src/cut-n-paste/fusa-manager.c:1306 ../src/fusa-manager.c:1306 +#: ../src/cut-n-paste/fusa-manager.c:1307 +msgid "Too many messages were sent to the display manager, and it hung up." +msgstr "" +"Quá nhiá»u thông Ä‘iệp đã được gởi cho bá»™ quản lý trình bày nên nó ngắt kết " +"nối." + +#: ../src/cut-n-paste/fusa-manager.c:1310 ../src/fusa-manager.c:1310 +#: ../src/cut-n-paste/fusa-manager.c:1311 +msgid "The display manager sent an unknown error message." +msgstr "Bá»™ quản lý trình bày đã gởi má»™t thông Ä‘iệp lá»—i không xác định." + +#: ../src/cut-n-paste/fusa-user-menu-item.c:153 +msgid "The user this menu item represents." +msgstr "NgÆ°á»i dùng mà mục trình Ä‘Æ¡n này tiêu biểu." + +#: ../src/cut-n-paste/fusa-user-menu-item.c:160 ../src/splashwindow.c:201 +msgid "Icon Size" +msgstr "Cỡ biểu tượng" + +#: ../src/cut-n-paste/fusa-user-menu-item.c:161 +msgid "The size of the icon to use." +msgstr "Kích cỡ của biểu tượng cần dùng." + +#: ../src/cut-n-paste/fusa-user.c:151 ../src/fusa-user.c:151 +msgid "The user manager object this user is controlled by." +msgstr "Äối tượng bá»™ quản lý ngÆ°á»i dùng có Ä‘iá»u khiển ngÆ°á»i dùng này." + +#: ../src/cut-n-paste/fusa-utils.c:80 src/DialogMain.cc:60 +#: ../src/fusa-utils.c:80 +msgid "Show Details" +msgstr "Hiện chi tiết" + +#. markup +#: ../src/cut-n-paste/gdmcomm.c:413 ../src/gdmcomm.c:413 +msgid "GDM (The GNOME Display Manager) is not running." +msgstr "GDM (Bá»™ quản lý trình bày Gnome) không Ä‘ang chạy." + +#: ../src/cut-n-paste/gdmcomm.c:416 ../src/gdmcomm.c:416 +msgid "" +"You might in fact be using a different display manager, such as KDM (KDE " +"Display Manager) or xdm." +msgstr "" +"Có lẽ bạn thật sá»± có sá»­ dụng má»™t bá»™ quản lý trình bày khác, nhÆ° KDM (Bá»™ quản " +"lý trình bày KDE) hoặc xdm." + +#: ../src/cut-n-paste/gdmcomm.c:419 ../src/gdmcomm.c:419 +msgid "" +"If you still wish to use this feature, either start GDM yourself or ask your " +"system administrator to start GDM." +msgstr "" +"Nếu bạn vẫn còn muốn sá»­ dụng tính năng này, hãy hoặc tá»± khởi chạy GDM, hoặc " +"xin quản trị hệ thống làm nhÆ° thế." + +#. markup +#: ../src/cut-n-paste/gdmcomm.c:441 ../src/gdmcomm.c:441 +msgid "Cannot communicate with GDM (The GNOME Display Manager)" +msgstr "Không thể liên lạc vá»›i GDM (Bá»™ quản lý trình bày Gnome)." + +#: ../src/cut-n-paste/gdmcomm.c:444 ../src/gdmcomm.c:444 +msgid "Perhaps you have an old version of GDM running." +msgstr "Có lẽ bạn Ä‘ang chạy má»™t phiên bản GDM cÅ©." + +#: ../src/cut-n-paste/gdmcomm.c:463 ../src/cut-n-paste/gdmcomm.c:466 +#: ../src/gdmcomm.c:463 ../src/gdmcomm.c:466 +msgid "Cannot communicate with gdm, perhaps you have an old version running." +msgstr "Không thể liên lạc vá»›i GDM, có lẽ bạn Ä‘ang chạy má»™t phiên bản GDM cÅ©." + +#: ../src/cut-n-paste/gdmcomm.c:469 ../src/gdmcomm.c:469 +msgid "The allowed limit of flexible X servers reached." +msgstr "Má»›i tá»›i giá»›i hạn số trình phục vụ X dẻo được phép." + +#: ../src/cut-n-paste/gdmcomm.c:471 ../src/gdmcomm.c:471 +msgid "There were errors trying to start the X server." +msgstr "Gặp lá»—i khi cố khởi chạy trình phục vụ X." + +#: ../src/cut-n-paste/gdmcomm.c:473 ../src/gdmcomm.c:473 +msgid "The X server failed. Perhaps it is not configured well." +msgstr "Trình phục vụ X thất bại. Có lẽ nó có cấu hình sai." + +#: ../src/cut-n-paste/gdmcomm.c:476 ../src/gdmcomm.c:476 +msgid "Too many X sessions running." +msgstr "Quá nhiá»u phiên X Ä‘ang chạy." + +#: ../src/cut-n-paste/gdmcomm.c:478 ../src/gdmcomm.c:478 +msgid "" +"The nested X server (Xnest) cannot connect to your current X server. You " +"may be missing an X authorization file." +msgstr "" +"Trình phục vụ X lồng nhau (Xnest) không thể kết nối đến trình phục vụ X hiện " +"thá»i của bạn. Có lẽ bạn thiếu má»™t tập tin cấp quyá»n X." + +#: ../src/cut-n-paste/gdmcomm.c:483 ../src/gdmcomm.c:483 +msgid "" +"The nested X server (Xnest) is not available, or gdm is badly configured.\n" +"Please install the Xnest package in order to use the nested login." +msgstr "" +"Trình phục vụ X lồng nhau (Xnest) không sẵn sàng, hoặc GDM có cấu hình sai.\n" +"Bạn hãy cài đặt gói Xnest, để sá»­ dụng khả năng đăng nhập lồng nhau." + +#: ../src/cut-n-paste/gdmcomm.c:488 ../src/gdmcomm.c:488 +msgid "" +"The X server is not available, it is likely that gdm is badly configured." +msgstr "" +"Trình phục vụ X lồng nhau (Xnest) không sẵn sàng, hoặc rất có thể là GDM có " +"cấu hình sai." + +#: ../src/cut-n-paste/gdmcomm.c:497 ../src/gdmcomm.c:497 +msgid "Trying to change to an invalid virtual terminal number." +msgstr "Äang cố chuyển đổi sang má»™t số thiết bị cuối ảo không hợp lệ." + +#: ../src/cut-n-paste/gdmcomm.c:501 ../src/gdmcomm.c:501 +msgid "" +"You do not seem to have authentication needed be for this operation. " +"Perhaps your .Xauthority file is not set up correctly." +msgstr "" +"Hình nhÆ° bạn không có cách xác thá»±c cần thiết cho thao tác này. Có lẽ tập " +"tin « .Xauthority » của bạn chÆ°a được thiết lập cho đúng. " + +#: ../src/cut-n-paste/gdmcomm.c:505 ../src/gdmcomm.c:505 +msgid "Too many messages were sent to gdm and it hung upon us." +msgstr "" +"Quá nhiá»u thông Ä‘iệp đã được gởi cho GDM nên nó ngắt kết nối đến chúng ta." + +#: ../src/cut-n-paste/gdmcomm.c:508 ../src/gdmcomm.c:508 +msgid "Unknown error occurred." +msgstr "Gặp lá»—i không xác định." + +#: ../src/file-transfer-dialog.c:94 +#, c-format +msgid "Copying file: %u of %u" +msgstr "Äang sao chép tập tin: %u trên %u" + +#: ../src/file-transfer-dialog.c:122 +#: ../capplets/common/file-transfer-dialog.c:122 +#, c-format +msgid "Copying '%s'" +msgstr "Äang sao chép « %s »" + +#: ../src/file-transfer-dialog.c:193 +msgid "From URI" +msgstr "Từ URI" + +#: ../src/file-transfer-dialog.c:194 +msgid "URI currently transferring from" +msgstr "Hiện thá»i truyá»n từ URI này" + +#: ../src/file-transfer-dialog.c:201 +msgid "To URI" +msgstr "Tá»›i URI" + +#: ../src/file-transfer-dialog.c:202 +msgid "URI currently transferring to" +msgstr "Hiện thá»i truyá»n đến URI này." + +#: ../src/file-transfer-dialog.c:209 +#: ../capplets/common/file-transfer-dialog.c:209 +msgid "Fraction completed" +msgstr "Phần hoàn tất" + +#: ../src/file-transfer-dialog.c:210 +#: ../capplets/common/file-transfer-dialog.c:210 +msgid "Fraction of transfer currently completed" +msgstr "Phần truyá»n hiện thá»i đã hoàn tất" + +#: ../src/file-transfer-dialog.c:217 +msgid "Current URI index" +msgstr "Chỉ mục URI hiện thá»i" + +#: ../src/file-transfer-dialog.c:218 +msgid "Current URI index - starts from 1" +msgstr "Chỉ mục URI hiện thá»i — bắt đầu từ 1" + +#: ../src/file-transfer-dialog.c:225 +msgid "Total URIs" +msgstr "URI tổng cá»™ng" + +#: ../src/file-transfer-dialog.c:226 +msgid "Total number of URIs" +msgstr "Tổng số URI" + +#: ../capplets/common/file-transfer-dialog.c:448 +msgid "Connecting..." +msgstr "Äang kết nối..." + +#: ../src/gnome-screensaver-command.c:59 ../src/gnome-screensaver-command.c:58 +msgid "Causes the screensaver to exit gracefully" +msgstr "Làm cho bá»™ bảo vệ màn hình thoát cho đúng." + +#: ../src/gnome-screensaver-command.c:61 ../src/gnome-screensaver-command.c:60 +msgid "Query the state of the screensaver" +msgstr "Truy vấn tính trạng của bá»™ bảo vệ màn hình." + +#: ../src/gnome-screensaver-command.c:63 ../src/gnome-screensaver-command.c:62 +msgid "Tells the running screensaver process to lock the screen immediately" +msgstr "Báo tiến trình bảo vệ màn hình Ä‘ang chạy phải khoá màn hình ngay." + +#: ../src/gnome-screensaver-command.c:65 ../src/gnome-screensaver-command.c:64 +msgid "If the screensaver is active then switch to another graphics demo" +msgstr "" +"Nếu bá»™ bảo vệ màn hình có hoạt Ä‘á»™ng thì chuyển đổi sang má»™t chứng minh đồ " +"há»a khác." + +#: ../src/gnome-screensaver-command.c:67 ../src/gnome-screensaver-command.c:66 +msgid "Turn the screensaver on (blank the screen)" +msgstr "Khởi chạy bá»™ bảo vệ màn hình (làm trắng màn hình)" + +#: ../src/gnome-screensaver-command.c:69 ../src/gnome-screensaver-command.c:68 +msgid "If the screensaver is active then deactivate it (un-blank the screen)" +msgstr "Nếu bá»™ bảo vệ màn hình có hoạt Ä‘á»™ng thì tắt nó (bá» trắng màn hình)" + +#: ../src/gnome-screensaver-command.c:71 ../src/gnome-screensaver-command.c:70 +msgid "Disable running graphical themes while blanked" +msgstr "Bật các sắc thái đồ há»a Ä‘ang chạy trong khi bị trắng." + +#: ../src/gnome-screensaver-command.c:73 ../src/gnome-screensaver-command.c:72 +msgid "Enable running graphical themes while blanked (if applicable)" +msgstr "" +"Hiệu lá»±c các sắc thái đồ há»a Ä‘ang chạy trong khi bị trắng (nếu thích hợp)" + +#: ../src/gnome-screensaver-command.c:75 ../src/gnome-screensaver-command.c:74 +msgid "Poke the running screensaver to simulate user activity" +msgstr "" +"Thức bá»™ bảo vệ màn hình Ä‘ang chạy, để mô phá»ng hoạt Ä‘á»™ng của ngÆ°á»i dùng." + +#: ../src/gnome-screensaver-dialog.c:53 ../src/gnome-screensaver.c:52 +msgid "Version of this application" +msgstr "Phiên bản ứng dụng này" + +#: ../src/gnome-screensaver-command.c:202 +#: ../src/gnome-screensaver-command.c:201 +#, c-format +msgid "The screensaver is %s\n" +msgstr "Bá»™ bảo vệ màn hình là %s\n" + +#: ../src/gnome-screensaver-command.c:202 +#: ../src/gnome-screensaver-command.c:201 +msgid "active" +msgstr "hoạt Ä‘á»™ng" + +#: ../src/gnome-screensaver-command.c:202 +#: ../src/gnome-screensaver-command.c:201 +msgid "inactive" +msgstr "không hoạt Ä‘á»™ng" + +#: ../src/gnome-screensaver-dialog.c:104 ../src/gnome-screensaver-dialog.c:51 +msgid "Show debugging output" +msgstr "Hiện dữ liệu xuất gỡ lá»—i" + +#: ../src/gnome-screensaver-dialog.c:108 ../src/gnome-screensaver-dialog.c:55 +msgid "Show the logout button" +msgstr "Hiện cái nút đăng xuất" + +#: ../src/gnome-screensaver-dialog.c:110 +msgid "Command to invoke from the logout button" +msgstr "Lệnh cần chạy từ cái nút đăng xuất" + +#: ../src/gnome-screensaver-dialog.c:112 +msgid "Show the switch user button" +msgstr "Hiện cái nút chuyển đổi ngÆ°á»i dùng" + +#: ../libgnomedb/gnome-db-matrix.c:2189 ../libgnomedb/gnome-db-matrix.c:2322 +msgid "Disabled" +msgstr "Äã tắt" + +#: ../src/gnome-screensaver-preferences.c:393 +msgid "Blank screen" +msgstr "Màn hình trắng" + +#: ../plug-ins/gimpressionist/size.c:149 +msgid "Random" +msgstr "Ngẫu nhiên" + +#: ../src/gnome-screensaver-preferences.c:692 +#: ../src/gnome-screensaver-preferences.c:616 +msgid "Invalid screensaver theme" +msgstr "Sắc thái bảo vệ màn hình không hợp lệ" + +#: ../src/gnome-screensaver-preferences.c:695 +#: ../src/gnome-screensaver-preferences.c:619 +msgid "This file does not appear to be a valid screensaver theme." +msgstr "Hình nhÆ° tập tin này không phải là sắc thái bảo vệ màn hình hợp lệ." + +#: ../src/gnome-torrent.in:245 +#, c-format +msgid "%d hour" +msgid_plural "%d hour" +msgstr[0] "%d giá»" + +#: ../src/gnome-torrent.in:246 ../src/gnome-torrent.in:249 +#, c-format +msgid "%d minute" +msgid_plural "%d minute" +msgstr[0] "%d phút" + +#: ../src/gnome-torrent.in:250 ../src/gnome-torrent.in:253 +#, c-format +msgid "%d second" +msgid_plural "%d second" +msgstr[0] "%d giây" + +#: ../sources/rb-playlist-source-recorder.c:419 +#, c-format +msgid "%s %s %s" +msgstr "%s %s %s" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. hour:minutes +#. minutes:seconds +#. #-#-#-#-# libmimedir.vi.po (libmimedir HEADnReport-Msgid-Bugs-To: ) #-#-#-#-# +#. Translators: pcode city +#: ../src/gnome-screensaver-preferences.c:835 +#: ../mimedir/mimedir-vcard-address.c:864 +#: ../mimedir/mimedir-vcard-address.c:873 +#, c-format +msgid "%s %s" +msgstr "%s %s" + +# #-#-#-#-# straw.po (straw) #-#-#-#-# +# Variable: don't translate / Biến: đừng dịch +#: ../sources/rb-playlist-source-recorder.c:425 ../src/lib/Application.py:155 +#: ../src/lib/subscribe.py:414 ../src/lib/subscribe.py:415 +#, c-format, python-format +msgid "%s" +msgstr "%s" + +#: ../sources/rb-playlist-source-recorder.c:428 +msgid "0 seconds" +msgstr "0 giây" + +#: ../src/gnome-screensaver-preferences.c:896 ../src/properties.c:255 +#: ../src/gnome-screensaver-preferences.c:819 +msgid "Could not load the main interface" +msgstr "Không thể tải giao diện chính" + +#: ../src/gnome-screensaver-preferences.c:898 +#: ../src/gnome-screensaver-preferences.c:821 +msgid "Please make sure that the screensaver is properly installed" +msgstr "Hãy chắc là bá»™ bảo vệ màn hình được cài đặt đúng." + +#: ../src/gnome-screensaver.c:57 +msgid "Don't become a daemon" +msgstr "Äừng chạy trong ná»n" + +#: ../src/gnome-screensaver.c:58 ../shell/main.c:149 +msgid "Enable debugging code" +msgstr "Bật chạy mã gỡ lá»—i" + +#: ../src/gs-listener-dbus.c:1014 ../src/gs-listener-dbus.c:852 +msgid "failed to register with the message bus" +msgstr "việc đăng ký vá»›i mạch nối thông Ä‘iệp bị lá»—i" + +#: ../src/gs-listener-dbus.c:1024 +msgid "not connected to the message bus" +msgstr "chÆ°a kết nối đến mạch nối thông Ä‘iệp" + +#: ../src/gs-listener-dbus.c:1033 ../src/gs-listener-dbus.c:861 +msgid "screensaver already running in this session" +msgstr "bá»™ bảo vệ màn hình Ä‘ang chạy trong phiên chạy này" + +#: ../src/gs-lock-plug.c:346 ../src/gs-lock-plug.c:248 +msgid "Checking password..." +msgstr "Äang kiểm tra mật khẩu..." + +#: ../src/gs-lock-plug.c:389 +msgid "Time has expired." +msgstr "Quá giá»." + +#: ../src/gs-lock-plug.c:414 ../src/gs-lock-plug.c:330 +msgid "You have the Caps Lock key on." +msgstr "Bạn đã bấm phím Khoá Chữ Hoa (CapsLock)" + +#: ../src/gs-lock-plug.c:691 +msgid "That password was incorrect." +msgstr "Bạn má»›i gá» má»™t mật khẩu không đúng." + +#: ../src/gs-lock-plug.c:717 ../src/gs-lock-plug.c:750 +#: ../src/gs-lock-plug.c:561 ../src/gs-lock-plug.c:602 +msgid "_Unlock" +msgstr "_Bá» khoá" + +#: ../src/gs-lock-plug.c:720 ../src/gs-lock-plug.c:747 +msgid "_Switch User..." +msgstr "_Chuyển đổi ngÆ°á»i dùng..." + +#: ../src/gs-lock-plug.c:1498 +msgid "S_witch to user:" +msgstr "C_huyển đổi sang ngÆ°á»i dùng:" + +#: ../src/gs-lock-plug.c:1545 ../src/gs-lock-plug.c:1372 +msgid "Log _Out" +msgstr "Äăng _xuất" + +#: ../data/pessulus.glade.h:1 ../admin-tool/lockdown/pessulus.glade.h:1 +msgid "Disabled Applets" +msgstr "Tiểu dụng bị tắt" + +#: ../data/pessulus.glade.h:2 ../admin-tool/lockdown/pessulus.glade.h:2 +msgid "Safe Protocols" +msgstr "Giao thức an toàn" + +#: ../data/pessulus.glade.h:3 ../admin-tool/lockdown/pessulus.glade.h:3 +msgid "Disable _unsafe protocols" +msgstr "Tắt các giao thức _bất an" + +#: ../data/pessulus.glade.h:4 ../admin-tool/lockdown/pessulus.glade.h:4 +#: ../data/epiphany.desktop.in.h:2 ../src/window-commands.c:779 +msgid "Epiphany Web Browser" +msgstr "Trình Duyệt Mạng Epiphany" + +#: ../data/pessulus.glade.h:6 ../admin-tool/lockdown/pessulus.glade.h:6 +msgid "Lockdown Editor" +msgstr "Bá»™ hiệu chỉnh khoá xuống" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. "A control area." +#: ../Sensors/StarterBar/__init__.py:627 ../src/orca/rolenames.py:338 +msgid "Panel" +msgstr "Bảng Ä‘iá»u khiển" + +#: ../Pessulus/lockdownbutton.py:99 +#: ../admin-tool/lockdown/lockdownbutton.py:99 +msgid "Click to make this setting not mandatory" +msgstr "Nhắp chuá»™t để làm cho thiết lập này không bắt buá»™c" + +#: ../Pessulus/lockdownbutton.py:101 +#: ../admin-tool/lockdown/lockdownbutton.py:101 +msgid "Click to make this setting mandatory" +msgstr "Nhắp chuá»™t để làm cho thiết lập này bắt buá»™c" + +#: ../Pessulus/maindialog.py:47 ../admin-tool/lockdown/maindialog.py:47 +msgid "Disable _command line" +msgstr "Tắt dòng _lệnh" + +#: ../Pessulus/maindialog.py:48 ../admin-tool/lockdown/maindialog.py:48 +msgid "Disable _printing" +msgstr "Tắt khả năng _in" + +#: ../Pessulus/maindialog.py:49 ../admin-tool/lockdown/maindialog.py:49 +msgid "Disable print _setup" +msgstr "Tắt _thiết lập in" + +#: ../Pessulus/maindialog.py:50 ../admin-tool/lockdown/maindialog.py:50 +msgid "Disable save to _disk" +msgstr "Tắt khả năng lÆ°u vào _Ä‘Ä©a" + +#: ../Pessulus/maindialog.py:52 ../admin-tool/lockdown/maindialog.py:52 +msgid "_Lock down the panels" +msgstr "_Khoá xuống các Bảng Ä‘iá»u khiển" + +#: ../Pessulus/maindialog.py:53 ../admin-tool/lockdown/maindialog.py:53 +msgid "Disable force _quit" +msgstr "Tắt khả năng buá»™c t_hoát" + +#: ../Pessulus/maindialog.py:54 ../admin-tool/lockdown/maindialog.py:54 +msgid "Disable lock _screen" +msgstr "Tắt khả năng khoá _màn hình" + +#: ../Pessulus/maindialog.py:55 ../admin-tool/lockdown/maindialog.py:55 +msgid "Disable log _out" +msgstr "Tắt khả năng đăng _xuất" + +#: ../Pessulus/maindialog.py:57 ../admin-tool/lockdown/maindialog.py:57 +msgid "Disable _quit" +msgstr "Tắt khả năng t_hoát" + +#: ../Pessulus/maindialog.py:58 ../admin-tool/lockdown/maindialog.py:58 +msgid "Disable _arbitrary URL" +msgstr "Tắt địa chỉ Mạng tù_y ý" + +#: ../Pessulus/maindialog.py:59 ../admin-tool/lockdown/maindialog.py:59 +msgid "Disable _bookmark editing" +msgstr "Tắt hiệu chỉnh đánh _dấu" + +#: ../Pessulus/maindialog.py:60 ../admin-tool/lockdown/maindialog.py:60 +msgid "Disable _history" +msgstr "Tắt _lược sá»­" + +#: ../Pessulus/maindialog.py:61 ../admin-tool/lockdown/maindialog.py:61 +msgid "Disable _javascript chrome" +msgstr "Tắt crom _JavaScript" + +#: ../Pessulus/maindialog.py:62 ../admin-tool/lockdown/maindialog.py:62 +msgid "Disable _toolbar editing" +msgstr "Tắt hiệu chỉnh thanh _công cụ" + +#: ../Pessulus/maindialog.py:63 ../admin-tool/lockdown/maindialog.py:63 +#: ../src/f-spot.glade.h:166 +msgid "_Fullscreen" +msgstr "Toàn _màn hình" + +#: ../Pessulus/maindialog.py:64 ../admin-tool/lockdown/maindialog.py:64 +msgid "Hide _menubar" +msgstr "Ẩn thanh trình Ä‘Æ¡_n" + +#: ../nact/nact.desktop.in.h:1 +msgid "Add items to the Nautilus popup menu" +msgstr "Thêm mục vào trình Ä‘Æ¡n bật lên Nautilus" + +#: ../nact/nact.desktop.in.h:2 +msgid "Nautilus Actions Configuration" +msgstr "Cấu hình Hành Ä‘á»™ng Nautilus" + +#: ../src/f-spot.glade.h:3 ../data/glade/jamboree.glade.h:1 +#: dselect/pkgdisplay.cc:48 info/session.c:3674 info/session.c:3679 +#: ../objects/UML/class.c:189 ../objects/UML/class.c:191 +#: ../objects/UML/class.c:193 ../objects/UML/class.c:195 +#: ../objects/UML/class.c:197 ../objects/UML/class.c:199 +#: ../ui/welcome.glade.h:1 ../gnome/applet/wireless-applet.glade.h:1 +#: ../glade/straw.glade.h:1 +msgid " " +msgstr " " + +#: ../nact/nautilus-actions-config.glade.h:2 +#: ../data/glade/task-dialog.glade.h:1 ../glade/straw.glade.h:2 +msgid " " +msgstr " " + +#: ../nact/nautilus-actions-config.glade.h:3 +msgid "(C) 2005 Frederic Ruaudel " +msgstr "Bản quyá»n © năm 2005 Frederic Ruaudel " + +#: ../nact/nautilus-actions-config.glade.h:5 +#, no-c-format +msgid "%% : a percent sign" +msgstr "%%: dấu phần trăm" + +#: ../nact/nautilus-actions-config.glade.h:7 +#, no-c-format +msgid "" +"%M : space-separated list of the selected file(s)/folder(s) with " +"their full paths" +msgstr "" +"%M : danh sách những tập tin/thÆ° mục đã chá»n vá»›i toàn Ä‘Æ°á»ng dẫn, định " +"giá»›i bằng dấu cách." + +#: ../nact/nautilus-actions-config.glade.h:9 +#, no-c-format +msgid "%U : username of the gnome-vfs URI" +msgstr "%U : tên ngÆ°á»i dùng của URI gnome-vfs" + +#: ../nact/nautilus-actions-config.glade.h:11 +#, no-c-format +msgid "%d : base folder of the selected file(s)" +msgstr "%d : thÆ° mục cÆ¡ bản của tập tin đã chá»n" + +#: ../nact/nautilus-actions-config.glade.h:13 +#, no-c-format +msgid "" +"%f : the name of the selected file or the 1st one if many are selected" +msgstr "" +"%f : tên tập tin đã chá»n hay tên tập tin thứ nhất nếu đã chá»n nhiá»u" + +#: ../nact/nautilus-actions-config.glade.h:15 +#, no-c-format +msgid "%h : hostname of the gnome-vfs URI" +msgstr "%h : tên máy của URI gnome-vfs" + +#: ../nact/nautilus-actions-config.glade.h:17 +#, no-c-format +msgid "" +"%m : space-separated list of the basenames of the selected\n" +"file(s)/folder(s)" +msgstr "" +"%m : danh sách tên cÆ¡ bản của những tập tin/thÆ° mục đã chá»n, định " +"giá»›i bằng dấu cách." + +#: ../nact/nautilus-actions-config.glade.h:20 +#, no-c-format +msgid "%s : scheme of the gnome-vfs URI" +msgstr "%s : lược đồ của URI gnome-vfs" + +#: ../nact/nautilus-actions-config.glade.h:22 +#, no-c-format +msgid "%u : gnome-vfs URI" +msgstr "%u : URI gnome-vfs" + +#: ../nact/nautilus-actions-config.glade.h:23 +msgid "Action" +msgstr "Hành Ä‘á»™ng" + +#: ../nact/nautilus-actions-config.glade.h:24 +msgid "Appears if scheme is in this list" +msgstr "Xuất hiện nếu lược đồ có trong danh sách này" + +#: ../nact/nautilus-actions-config.glade.h:25 +msgid "Appears if selection contains" +msgstr "Xuất hiện nếu vùng chá»n chứa" + +#: ../nact/nautilus-actions-config.glade.h:26 +msgid "File Pattern" +msgstr "Mẫu tập tin" + +#: ../nact/nautilus-actions-config.glade.h:27 +msgid "Nautilus Menu Item" +msgstr "Mục trình Ä‘Æ¡n Nautilus" + +#: ../nact/nautilus-actions-config.glade.h:29 +#, no-c-format +msgid "e.g., %s" +msgstr "v.d., %s" + +#: ../nact/nautilus-actions-config.glade.h:30 +msgid "Parameter Legend" +msgstr "Chú giải tham số" + +#: ../nact/nautilus-actions-config.glade.h:31 +msgid "" +"A string with joker ? or * that will be used to match the files. You can " +"match several file patterns by separating them with a semi-colon ;" +msgstr "" +"Chuá»—i có ký tá»± đặc biệt « ? » hay « * » sẽ được dùng để khá»›p những tập tin. " +"Bạn có thể khá»›p nhiá»u mẫu định giá»›i bằng dấu chấm phẩy « ; »." + +#: ../nact/nautilus-actions-config.glade.h:32 +msgid "Appears if selection has multiple files or folders" +msgstr "Xuất hiện nếu vùng chá»n có nhiá»u tập tin hay thÆ° mục" + +#: ../plug-ins/common/postscript.c:3154 ../plug-ins/fits/fits.c:1007 +#: ../widgets/gtk+.xml.in.h:14 src/settings.c:721 +#: libexif/olympus/mnote-olympus-entry.c:466 +msgid "Automatic" +msgstr "Tá»± Ä‘á»™ng" + +#: ../nact/nautilus-actions-config.glade.h:35 +msgid "" +"Check this box if you want to get back all your configurations from the " +"version of Nautilus-actions 0.7.1 or lesser." +msgstr "" +"Hãy đánh dấu trong há»™p này nếu bạn muốn nhận lại các cấu hình từ phiên bản " +"Nautilus-actions 0.7.1 hay trÆ°á»›c." + +#: ../nact/nautilus-actions-config.glade.h:36 +msgid "Click to add a new scheme" +msgstr "Nhấn để thêm lược đồ má»›i" + +#: ../nact/nautilus-actions-config.glade.h:37 +msgid "Click to choose a command from the file chooser dialog" +msgstr "Nhấn để chá»n lệnh từ há»™p thoại bá»™ chá»n tập tin" + +#: ../nact/nautilus-actions-config.glade.h:38 +msgid "" +"Click to choose a custom icon from a file instead of a predefined icon from " +"the drop-down list" +msgstr "" +"Nhấn để chá»n biểu tượng riêng từ tập tin thay vào biểu tượng định sẵn từ " +"danh sách thả xuống" + +#: ../nact/nautilus-actions-config.glade.h:39 +msgid "Click to remove the selected scheme" +msgstr "Nhấn để gỡ bá» lược đồ đã chá»n" + +#: ../nact/nautilus-actions-config.glade.h:40 +msgid "" +"Click to see the list of special tokens you can use in the parameter field" +msgstr "" +"Nhấn để xem danh sách các hiệu bài đặc biệt có thể gõ vào trÆ°á»ng tham số" + +#: ../nact/nautilus-actions-config.glade.h:41 +#: ../gtksourceview/language-specs/sql.lang.h:9 +msgid "Conditions" +msgstr "Äiá»u kiện" + +#: ../nact/nautilus-actions-config.glade.h:42 +msgid "Export existing configs" +msgstr "Xuất cấu hình đã có" + +#: ../nact/nautilus-actions-config.glade.h:43 +msgid "File to Import" +msgstr "Tập tin cần nhập" + +#: ../nact/nautilus-actions-config.glade.h:44 +msgid "GConf schema description file (Nautilus-actions v1.x and later)" +msgstr "Tập tin diá»…n tả giản đồ GConf (Nautilus-actions phiên bản 1.x và sau)" + +#: ../glade/glade_menu_editor.c:813 ../src/f-spot.glade.h:89 +#: ../glade/gbwidgets/gbbutton.c:122 ../glade/gbwidgets/gbcheckbutton.c:89 +#: ../glade/gbwidgets/gbimage.c:107 ../glade/gbwidgets/gbmenutoolbutton.c:89 +#: ../glade/gbwidgets/gbradiobutton.c:130 +#: ../glade/gbwidgets/gbradiotoolbutton.c:137 +#: ../glade/gbwidgets/gbtogglebutton.c:92 +#: ../glade/gbwidgets/gbtoggletoolbutton.c:94 +#: ../glade/gbwidgets/gbtoolbutton.c:108 ../glade/gbwidgets/gbwindow.c:297 +#: ../glade/glade_menu_editor.c:814 +msgid "Icon:" +msgstr "Biểu tượng:" + +#: ../nact/nautilus-actions-config.glade.h:46 +msgid "Import all my old configs" +msgstr "Nhập má»i cấu hình cÅ© của tôi" + +#: ../nact/nautilus-actions-config.glade.h:47 +msgid "Import new configurations" +msgstr "Nhập cấu hình má»›i" + +#: ../nact/nautilus-actions-config.glade.h:48 +msgid "Import/Export" +msgstr "Nhập/Xuất" + +#: ../nact/nautilus-actions-config.glade.h:49 +msgid "Import/Export Settings" +msgstr "Thiết lập Nhập/Xuất" + +#: ../nact/nautilus-actions-config.glade.h:50 +msgid "Label of the menu item in the Nautilus popup menu" +msgstr "Nhãn của mục trình Ä‘Æ¡n trong trình Ä‘Æ¡n bật lên Nautilus" + +#: ../glade/gnome/gnomehref.c:68 +msgid "Label:" +msgstr "Nhãn:" + +#: ../nact/nautilus-actions-config.glade.h:52 +msgid "Menu Item & Action" +msgstr "Mục trình Ä‘Æ¡n và hành Ä‘á»™ng" + +#: ../nact/nautilus-actions-config.glade.h:53 +msgid "" +"Nautilus Action Configuration Tool\n" +"Application to configure Nautilus Action extension" +msgstr "" +"Công cụ Cấu hình Hành Ä‘á»™ng Nautilus\n" +"Ứng dụng cần cấu hình phần mở rá»™ng Hành Ä‘á»™ng Nautilus" + +#: ../nact/nautilus-actions-config.glade.h:55 +msgid "Nautilus Action Editor" +msgstr "Bá»™ sá»­a đổi hành Ä‘á»™ng Nautilus" + +#: ../nact/nautilus-actions-config.glade.h:56 +msgid "Nautilus Actions" +msgstr "Hành Ä‘á»™ng Nautilus" + +#: ../nact/nautilus-actions-config.glade.h:57 +msgid "Old XML config file (Nautilus-actions v0.x)" +msgstr "Tập tin cấu hình XML cÅ© (Nautilus-actions phiên bản 0.x)" + +#: ../nact/nautilus-actions-config.glade.h:58 +msgid "Only files" +msgstr "Chỉ tập tin" + +#: ../nact/nautilus-actions-config.glade.h:59 +msgid "Only folders" +msgstr "Chỉ thÆ° mục" + +#: ../nact/nautilus-actions-config.glade.h:60 +msgid "" +"Parameters that will be sent to the program. Click on the 'Legend' button to " +"see the different replacement tokens" +msgstr "" +"Tham số sẽ được gởi cho chÆ°Æ¡ng trình. Hãy nhấn vào cái nút « Chú giải » để " +"xem những hiệu bài thay thế khác nhau." + +#: ../nact/nautilus-actions-config.glade.h:61 +#: ../objects/UML/class_dialog.c:1986 ../objects/UML/class_dialog.c:2083 +msgid "Parameters:" +msgstr "Tham số :" + +#: ../src/Dialog_Partition_Info.cc:168 ../src/Win_GParted.cc:232 +msgid "Path:" +msgstr "ÄÆ°á»ng dẫn:" + +#: ../nact/nautilus-actions-config.glade.h:63 +msgid "Project Website" +msgstr "NÆ¡i Mạng của Dá»± án" + +#: ../nact/nautilus-actions-config.glade.h:64 +msgid "Save in Folder" +msgstr "LÆ°u vào thÆ° mục" + +#: ../nact/nautilus-actions-config.glade.h:65 +msgid "Select the configurations you want to export :" +msgstr "Chá»n những cấu hình cần xuất:" + +#: ../nact/nautilus-actions-config.glade.h:66 +msgid "" +"Select the configurations you want to export. Use Shift or Ctrl key to " +"select multiple one." +msgstr "" +"Chá»n những cấu hình cần xuất. Hãy sá»­ dụng phím Shift hay Ctrl để chá»n nhiá»u " +"Ä‘iá»u." + +#: ../nact/nautilus-actions-config.glade.h:67 +msgid "Select the file you want to import." +msgstr "Chá»n tập tin cần nhập." + +#: ../nact/nautilus-actions-config.glade.h:68 +msgid "" +"Select the folder you want your config to be saved in. This folder must " +"exists." +msgstr "Hãy chá»n thÆ° mục nÆ¡i bạn muốn lÆ°u cấu hình. ThÆ° mục này phải tồn tại." + +#: ../nact/nautilus-actions-config.glade.h:69 +msgid "" +"Select the kind of files where you want your action to appear. If you don't " +"know what to choose, try selecting just 'file' which is the most common " +"choice. You can add a new scheme by clicking on the '+' button" +msgstr "" +"Hãy chá»n kiểu tập tin nÆ¡i bạn muốn hành Ä‘á»™ng xuất hiện. Nếu bạn chÆ°a biết " +"nên chá»n gì, thá»­ chá»n chỉ « tập tin » mà là sá»± chá»n thÆ°á»ng nhất. Có thể thêm " +"lược đồ má»›i bằng cách nhấn vào cái nút « + »." + +#: ../nact/nautilus-actions-config.glade.h:70 +msgid "" +"The command that will be launched by selecting the action in Nautilus popup " +"menu" +msgstr "" +"Lệnh sẽ được khởi chạy khi chá»n hành Ä‘á»™ng trong trình Ä‘Æ¡n bật lên Nautilus" + +#: ../nact/nautilus-actions-config.glade.h:71 +msgid "This software is licensed under the GNU Genaral Public License (GPL)" +msgstr "" +"Phần má»m này được phát hành vá»›i Ä‘iá»u kiện của Quyá»n Công Chung Gnu (GPL)." + +#: ../nact/nautilus-actions-config.glade.h:72 +msgid "Tooltip of the menu item that will appear in the Nautilus statusbar" +msgstr "" +"Mẹo công cụ của mục trình Ä‘Æ¡n sẽ xuất hiện trong thanh trạng thái Nautilus" + +#: ../nact/nautilus-actions-config.glade.h:73 ../glade/glade_menu_editor.c:936 +#: ../glade/property.c:824 ../glade/glade_menu_editor.c:937 +msgid "Tooltip:" +msgstr "Mẹo công cụ :" + +#: ../nact/nautilus-actions-config.glade.h:74 +msgid "Type of configuration :" +msgstr "Kiểu cấu hinh:" + +#: ../nact/nautilus-actions-config.glade.h:75 ../eog.glade.h:28 +msgid "_Browse" +msgstr "_Duyệt" + +#: ../src/mainWindow.py:342 ../extensions/page-info/page-info-dialog.c:933 +#: main.c:1577 main.c:1678 ../glade/glade_menu_editor.c:418 +#: ../src/orca/rolenames.py:273 +msgid "Icon" +msgstr "Biểu tượng" + +#: ../gtk/gtklabel.c:322 ../gtk/gtktoolbutton.c:187 ../src/Database.cs:845 +#: ../glade/gbwidgets/gblabel.c:872 ../glade/glade_menu_editor.c:411 +#: ../src/glade-gtk.c:3518 ../widgets/gtk+.xml.in.h:110 +#: ../src/form-editor/widget-util.cc:191 ../src/menu-win.cc:256 +#: ../src/orca/rolenames.py:288 +msgid "Label" +msgstr "Nhãn" + +#: ../nact/nact.c:231 ../nact/nact-editor.c:672 +#: ../nact/nact-import-export.c:346 +msgid "Could not load interface for Nautilus Actions Config Tool" +msgstr "Không thể tải giao diện cho Công cụ Cấu hình Hành Ä‘á»™ng Nautilus" + +#. i18n notes: example strings for the command preview +#: ../nact/nact-utils.c:157 +msgid "/path/to" +msgstr "/Ä‘Æ°á»ng_dẫn/đến" + +#: ../nact/nact-utils.c:158 ../nact/nact-utils.c:160 +msgid "file1.txt" +msgstr "tập_tin1.txt" + +#: ../nact/nact-utils.c:158 +msgid "file2.txt" +msgstr "tập_tin2.txt" + +#: ../nact/nact-utils.c:159 ../nact/nact-utils.c:160 +msgid "folder1" +msgstr "thÆ°_mục1" + +#: ../nact/nact-utils.c:159 +msgid "folder2" +msgstr "thÆ°_mục2" + +#: ../nact/nact-utils.c:162 +msgid "test.example.net" +msgstr "thá»­.ví_dụ.net" + +#: ../nact/nact-utils.c:163 +msgid "file.txt" +msgstr "tập_tin.txt" + +#: ../nact/nact-utils.c:164 gphoto2/main.c:1668 +#: ../freedesktop.org.xml.in.h:334 +msgid "folder" +msgstr "thÆ° mục" + +#. i18n notes : scheme name set for a new entry in the scheme list +#: ../nact/nact-editor.c:435 +msgid "new-scheme" +msgstr "lược-đồ-má»›i" + +#: ../nact/nact-editor.c:436 +msgid "New Scheme Description" +msgstr "Mô tả Lược đồ Má»›i" + +#: ../nact/nact-editor.c:522 +msgid "Scheme" +msgstr "Lược đồ" + +#: ../nact/nact-editor.c:688 +msgid "Icon of the menu item in the Nautilus popup menu" +msgstr "Biểu tượng của mục trình Ä‘Æ¡n trong trình Ä‘Æ¡n bật lên Nautilus" + +#: ../nact/nact-editor.c:716 +msgid "Add a New Action" +msgstr "Thêm hành Ä‘á»™ng má»›i" + +#: ../nact/nact-editor.c:720 +#, c-format +msgid "Edit Action \"%s\"" +msgstr "Sá»­a đổi hành Ä‘á»™ng « %s »" + +#: ../nact/nact-prefs.c:164 +#, c-format +msgid "%sLocal Files" +msgstr "%sTập tin cục bá»™" + +#. i18n notes : description of 'sftp' scheme +#: ../nact/nact-prefs.c:166 +#, c-format +msgid "%sSSH Files" +msgstr "%sTập tin SSH" + +#. i18n notes : description of 'smb' scheme +#: ../nact/nact-prefs.c:168 +#, c-format +msgid "%sWindows Files" +msgstr "%sTập tin Windows" + +#. i18n notes : description of 'ftp' scheme +#: ../nact/nact-prefs.c:170 +#, c-format +msgid "%sFTP Files" +msgstr "%sTập tin FTP" + +#. i18n notes : description of 'dav' scheme +#: ../nact/nact-prefs.c:172 +#, c-format +msgid "%sWebdav Files" +msgstr "%sTập tin Webdav" + +#. vim:ts=3:sw=3:tw=1024:cin +#: ../config/config_newaction.schemas.in.in.h:1 +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:86 +msgid "'true' if the selection can have several items, 'false' otherwise" +msgstr "" +"« true » (đúng) nếu vùng chá»n có thể chứa vài mục; không thì « false " +"» (không đúng)" + +#: ../config/config_newaction.schemas.in.in.h:2 +msgid "'true' if the selection must have files, 'false' otherwise" +msgstr "" +"« true » (đúng) nếu vùng chá»n phải chứa tập tin; không thì « false » (không " +"đúng)" + +#: ../config/config_newaction.schemas.in.in.h:3 +msgid "'true' if the selection must have folders, 'false' otherwise" +msgstr "" +"« true » (đúng) nếu vùng chá»n phải chứa thÆ° mục; không thì « false » (không " +"đúng)" + +#: ../config/config_newaction.schemas.in.in.h:4 +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:79 +msgid "" +"A list of strings with joker '*' or '?' to match the selected file(s)/folder" +"(s). Each selected items must match at least one of the patterns for the " +"action to appear" +msgstr "" +"Danh sách chuá»—i chứa ký tá»± đặc biệt « ? » hay « * » để khá»›p tập tin/thÆ° mục " +"đã chá»n. Má»—i mục đã chá»n phải khá»›p vá»›i ít nhất má»™t mẫu, để gây ra hành Ä‘á»™ng " +"xuất hiện." + +#: ../config/config_newaction.schemas.in.in.h:5 +msgid "" +"Defines the list of valid GnomeVFS schemes to be matched with the selected " +"items. The GnomeVFS scheme is the protocol used to access the files. The " +"keyword to use is the one used in the GnomeVFS URI. Example of GnomeVFS " +"URI : file:///tmp/foo.txt sftp:///root@test.example.net/tmp/foo.txt The most " +"common schemes are : 'file' : local files 'sftp' : files accessed via SSH " +"'ftp' : files accessed via FTP 'smb' : files accessed via Samba (Windows " +"share) 'dav' : files accessed via WebDav All GnomeVFS schemes used by " +"Nautilus can be used here." +msgstr "" +"Äịnh nghÄ©a danh sách lược đồ GnomeVFS hợp lệ để khá»›p những mục đã chá»n. Lược " +"đồ GnomeVFS là giao thức được dùng để truy cập những tập tin. Từ khoá cần " +"dùng là Ä‘iá»u dùng trong URI GnomeVFS (v.d. ). Những lược đồ thÆ°á»ng nhất:\n" +" • file \t— tập tin cục bá»™\n" +" • sftp \t— tập tin được truy cập bằng SSH\n" +" • ftp \t— tập tin được truy cập bằng FTP\n" +" • smb\t— tập tin được truy cập bằng Samba (chia sẻ Windows)\n" +" • dav\t— tập tin được truy cập bằng WebDav.\n" +"Ở đây có thể sá»­ dụng lược đồ nào do Nautilus dùng." + +#: ../config/config_newaction.schemas.in.in.h:6 +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:87 +msgid "" +"If you need one or more files or folders to be selected, set this key to " +"'true'. If you want just one file or folder, set 'false'" +msgstr "" +"Nếu bạn cần chá»n má»™t hay nhiá»u tập tin hay thÆ° mục, hãy đặt khoá này là « " +"true » (đúng). Nếu bạn muốn chỉ má»™t tập tin hay thÆ° mục, hãy đặt « false " +"» (không đúng)." + +#: ../config/config_newaction.schemas.in.in.h:7 +msgid "Manage Actions" +msgstr "Quản lý hành Ä‘á»™ng" + +#: ../config/config_newaction.schemas.in.in.h:8 +msgid "Manage your actions using NACT, the configuration tool" +msgstr "Quản lý các hành Ä‘á»™ng bằng công cụ cấu hình NACT" + +#: ../config/config_newaction.schemas.in.in.h:9 +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:71 +msgid "The icon of the menu item" +msgstr "Biểu tượng của mục trình Ä‘Æ¡n" + +#: ../config/config_newaction.schemas.in.in.h:10 +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:72 +msgid "" +"The icon of the menu item that will appear next to the label in the Nautilus " +"popup menu when the selection matches the appearance conditions settings" +msgstr "" +"Biểu tượng của mục trình Ä‘Æ¡n, sẽ xuất hiện ở cạnh nhãn trong trình Ä‘Æ¡n bật " +"lên Nautilus khi vùng chá»n khá»›p vá»›i thiết lập Ä‘iá»u kiện xuất hiện" + +#. GConf description strings : +#: ../config/config_newaction.schemas.in.in.h:11 +#: ../utils/nautilus-actions-new-config.c:48 +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:67 +msgid "The label of the menu item" +msgstr "Nhãn của mục trình Ä‘Æ¡n" + +#: ../config/config_newaction.schemas.in.in.h:12 +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:68 +msgid "" +"The label of the menu item that will appear in the Nautilus popup menu when " +"the selection matches the appearance condition settings" +msgstr "" +"Nhãn của mục trình Ä‘Æ¡n, sẽ xuất hiện ở cạnh nhãn trong trình Ä‘Æ¡n bật lên " +"Nautilus khi vùng chá»n khá»›p vá»›i thiết lập Ä‘iá»u kiện xuất hiện" + +#: ../config/config_newaction.schemas.in.in.h:13 +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:88 +msgid "The list of GnomeVFS schemes where the selected files should be located" +msgstr "Danh sách các lược đồ GnomeVFS nÆ¡i cần định vị những tập tin đã chá»n" + +#: ../config/config_newaction.schemas.in.in.h:14 +msgid "The list of patterns to match the selected file(s)/folder(s)" +msgstr "Danh sách các mẫu cần khá»›p vá»›i những tập tin/thÆ° mục đã chá»n" + +#: ../config/config_newaction.schemas.in.in.h:15 +#: ../utils/nautilus-actions-new-config.c:52 +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:75 +msgid "The parameters of the command" +msgstr "Tham số của lệnh" + +#: ../config/config_newaction.schemas.in.in.h:17 +#, no-c-format +msgid "" +"The parameters of the command to start when the user selects the menu item " +"in the Nautilus popup menu. The parameters can contain some special tokens " +"which are replaced by Nautilus informations before starting the command : %" +"d : base folder of the selected file(s) %f : the name of the selected file " +"or the 1st one if many are selected %m : space-separated list of the " +"basenames of the selected file(s)/folder(s) %M : space-separated list of the " +"selected file(s)/folder(s), with their full paths %u : gnome-vfs URI %s : " +"scheme of the gnome-vfs URI %h : hostname of the gnome-vfs URI %U : username " +"of the gnome-vfs URI %% : a percent sign" +msgstr "" +"Những tham số của lệnh cần chạy khi ngÆ°á»i dùng chá»n mục trình Ä‘Æ¡n trong " +"trình Ä‘Æ¡n bật lên Nautilus. Những tham số có thể chứa má»™t số hiệu bài đặc " +"biệt, mà được thay thế bằng thông tin Nautilus trÆ°á»›c khi khởi chạy lệnh:\n" +" • %d\t— thÆ° mục cÆ¡ bản của tập tin đã chá»n\n" +" • %f\t\t— tên của tập tin đã chá»n, hay Ä‘iá»u thứ nhất nếu có nhiá»u tập tin\n" +" • %m\t— danh sách tên cÆ¡ bản của các tập tin/thÆ° mục, định giá»›i bằng dấu " +"cách\n" +" • %M\t— danh sách những tập tin/thÆ° mục đã chá»n vá»›i toàn Ä‘Æ°á»ng dẫn định " +"giá»›i bằng dấu cách\n" +" • %u\t— URI Gnome-VFS\n" +" • %s\t— lược đồ của URI Gnome-VFS\n" +" • %h\t— tên máy của URI Gnome-VFS\n" +" • %U\t— tên ngÆ°á»i dùng của URI Gnome-VFS\n" +" • %%\t— dấu phần trăm" + +#: ../config/config_newaction.schemas.in.in.h:18 +#: ../utils/nautilus-actions-new-config.c:51 +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:73 +msgid "The path of the command" +msgstr "ÄÆ°á»ng dẫn của lệnh" + +#: ../config/config_newaction.schemas.in.in.h:19 +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:74 +msgid "" +"The path of the command to start when the user select the menu item in the " +"Nautilus popup menu" +msgstr "" +"ÄÆ°á»ng dẫn của lệnh cần chạy khi ngÆ°á»i dùng chá»n mục trình Ä‘Æ¡n trong trình " +"Ä‘Æ¡n bật lện Nautilus" + +#: ../config/config_newaction.schemas.in.in.h:20 +#: ../utils/nautilus-actions-new-config.c:49 +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:69 +msgid "The tooltip of the menu item" +msgstr "Mẹo công cụ của mục trình Ä‘Æ¡n" + +#: ../config/config_newaction.schemas.in.in.h:21 +msgid "" +"The tooltip of the menu item that will appear in the Nautilus statusbar when " +"the user points the mouse to the Nautilus popup menu item." +msgstr "" +"Mẹo công cụ của mục trình Ä‘Æ¡n sẽ xuất hiện trong thanh trạng thái Nautilus " +"khi ngÆ°á»i dùng chỉ con chuá»™t đến mục trình Ä‘Æ¡n bật lệnh Nautilus" + +#: ../config/config_newaction.schemas.in.in.h:22 +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:91 +msgid "The version of the configuration format" +msgstr "Phiên bản của khuôn dạng cấu hình" + +#: ../config/config_newaction.schemas.in.in.h:23 +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:92 +msgid "" +"The version of the configuration format that will be used to manage backward " +"compatibility" +msgstr "" +"Phiên bản của khuôn dạng cấu hình sẽ được dùng để quản lý cách tÆ°Æ¡ng thích " +"ngược" + +#: ../config/config_newaction.schemas.in.in.h:24 +msgid "" +"This setting is tied with the 'isdir' setting. Here are the valid " +"combinations : - 'isfile' is 'true' and 'isdir' is 'false' : the selection " +"must holds only files - 'isfile' is 'false' and 'isdir' is 'true' : the " +"selection must holds only folders - 'isfile' is 'true' and 'isdir' is " +"'true' : the selection can holds both files and folders - 'isfile' is " +"'false' and 'isdir' is 'false' : invalid combination" +msgstr "" +"Thiết lập này được liên quan đến thiết lập « isdir » (là thÆ° mục). Những tổ " +"hợp đúng:\n" +" • « isfile » (là tập tin) là « true » (đúng) còn « isdir » (là thÆ° mục) là " +"« false » (không đúng): vùng chá»n phải chứa chỉ tập tin\n" +" • « isfile » (là tập tin) là « false » (không đúng) còn « isdir » (là thÆ° " +"mục) là « true » (đúng): vùng chá»n phải chứa chỉ thÆ° mục\n" +" • « isfile » (là tập tin) là « true » (đúng) và « isdir » (là thÆ° mục) cÅ©ng " +"là « true » (đúng): vùng chá»n có thể chứa cả tập tin lẫn thÆ° mục Ä‘á»u\n" +" • « isfile » (là tập tin) là « false » (không đúng) và « isdir » (là thÆ° " +"mục) cÅ©ng là « false » (không đúng): tổ hợp không hợp lệ." + +#: ../config/config_newaction.schemas.in.in.h:25 +msgid "" +"This setting is tied with the 'isfile' setting. Here are the valid " +"combinations : - 'isfile' is 'true' and 'isdir' is 'false' : the selection " +"must holds only files - 'isfile' is 'false' and 'isdir' is 'true' : the " +"selection must holds only folders - 'isfile' is 'true' and 'isdir' is " +"'true' : the selection can holds both files and folders - 'isfile' is " +"'false' and 'isdir' is 'false' : invalid combination" +msgstr "" +"Thiết lập này được liên quan đến thiết lập « isfile » (là tập tin). Những tổ " +"hợp đúng:\n" +" • « isfile » (là tập tin) là « true » (đúng) còn « isdir » (là thÆ° mục) là " +"« false » (không đúng): vùng chá»n phải chứa chỉ tập tin\n" +" • « isfile » (là tập tin) là « false » (không đúng) còn « isdir » (là thÆ° " +"mục) là « true » (đúng): vùng chá»n phải chứa chỉ thÆ° mục\n" +" • « isfile » (là tập tin) là « true » (đúng) và « isdir » (là thÆ° mục) cÅ©ng " +"là « true » (đúng): vùng chá»n có thể chứa cả tập tin lẫn thÆ° mục Ä‘á»u\n" +" • « isfile » (là tập tin) là « false » (không đúng) và « isdir » (là thÆ° " +"mục) cÅ©ng là « false » (không đúng): tổ hợp không hợp lệ." + +#: ../utils/nautilus-actions-convert.c:41 +msgid "The old xml config file to convert" +msgstr "Tập tin cấu hình XML cÅ© cần chuyển đổi" + +#: ../utils/nautilus-actions-convert.c:42 +msgid "The name of the newly-converted GConf schema file" +msgstr "Tên của tập tin giản đồ GConf má»›i được chuyển đổi" + +#: ../utils/nautilus-actions-convert.c:43 +msgid "Convert all old xml config files from previous installations [default]" +msgstr "" +"Chuyển đổi má»i tập tin cấu hình XML cÅ© từ bản cài đặt trÆ°á»›c nào [mặc định]" + +#: ../utils/nautilus-actions-convert.c:44 +msgid "" +"The folder where the new GConf schema files will be saved if option -a is " +"set [default=/tmp]" +msgstr "" +"ThÆ° mục nÆ¡i những tập tin giản đồ GConf má»›i sẽ được kÆ°u nếu tùy chá»n « -a » " +"được lập [mặc định=/tmp]" + +#: ../utils/nautilus-actions-convert.c:77 +#: ../utils/nautilus-actions-new-config.c:95 +#, c-format +msgid "" +"Syntax error:\n" +"\t- %s\n" +"Try %s --help\n" +msgstr "" +"Lá»—i cú pháp:\n" +"\t- %s\n" +"Hãy chạy lệnh « %s --help » (trợ giúp)\n" + +#: ../utils/nautilus-actions-convert.c:83 +#, c-format +msgid "" +"Syntax error:\n" +"\tOptions -i and -o are mutually exclusive with option -a\n" +"Try %s --help\n" +msgstr "" +"Lá»—i cú pháp:\n" +"\ttùy chá»n « -i » và « -o » loại từ lẫn nhau vá»›i tùy chá»n « -a »\n" +"Hãy chạy lệnh « %s --help » (trợ giúp)\n" + +#: ../utils/nautilus-actions-convert.c:89 +#, c-format +msgid "" +"Syntax error:\n" +"\tOption -i is mandatory when using option -o\n" +"Try %s --help\n" +msgstr "" +"Lá»—i cú pháp\n" +"\ttùy chá»n « -i » bắt buá»™c khi dùng tùy chá»n « -o »\n" +"Hãy chạy lệnh « %s --help » (trợ giúp)\n" + +#: ../utils/nautilus-actions-convert.c:101 +#, c-format +msgid "" +"Error:\n" +"\t- Can't parse %s\n" +msgstr "" +"Lá»—i: \n" +"\tKhông thể phân tách %s\n" + +#: ../utils/nautilus-actions-convert.c:115 +#, c-format +msgid "Converting %s ..." +msgstr "Äang chuyển đổi %s..." + +#: ../utils/nautilus-actions-convert.c:135 +#: ../utils/nautilus-actions-new-config.c:152 +#, c-format +msgid " Failed: Can't create %s : %s\n" +msgstr " Bị lá»—i: không thể tạo %s: %s\n" + +#: ../utils/nautilus-actions-convert.c:143 +#: ../utils/nautilus-actions-new-config.c:160 +#, c-format +msgid " Ok, saved in %s\n" +msgstr " Äược, đã lÆ°u được vào %s\n" + +#: ../utils/nautilus-actions-convert.c:148 +#: ../utils/nautilus-actions-new-config.c:165 +#, c-format +msgid " Failed\n" +msgstr " Bị lá»—i\n" + +#: ../utils/nautilus-actions-new-config.c:48 ../srcore/srpres.c:831 +msgid "LABEL" +msgstr "NHÃN" + +#: ../utils/nautilus-actions-new-config.c:49 +msgid "TOOLTIP" +msgstr "MẸO CÔNG CỤ" + +#: ../utils/nautilus-actions-new-config.c:50 +msgid "The icon of the menu item (filename or Gtk stock id)" +msgstr "Biểu tượng của mục trình Ä‘Æ¡n (tên tập tin hay ID GTK chuẩn)" + +#: ../utils/nautilus-actions-new-config.c:50 ../srcore/srpres.c:829 +msgid "ICON" +msgstr "BIỂU_TƯỢNG" + +#: ../utils/nautilus-actions-new-config.c:52 +msgid "PARAMS" +msgstr "THAM_Sá»" + +#: ../utils/nautilus-actions-new-config.c:53 +msgid "" +"A pattern to match selected files with possibility to add jokers ? or * (you " +"must set it for each pattern you need)" +msgstr "" +"Mẫu để khá»›p tập tin đã chá»n, có thể thêm ký tá»± đặc biệt « ? » hay « * " +"» (phải lập cho má»—i mẫu cần thiết)" + +#: ../utils/nautilus-actions-new-config.c:53 +msgid "EXPR" +msgstr "BTHỨC" + +#: ../utils/nautilus-actions-new-config.c:54 +msgid "Set it if the selection can contain files" +msgstr "Lập nếu vùng chá»n có thể chứa tập tin" + +#: ../utils/nautilus-actions-new-config.c:55 +msgid "Set it if the selection can contain folders" +msgstr "Lập nếu vùng chá»n có thể chứa thÆ° mục" + +#: ../utils/nautilus-actions-new-config.c:56 +msgid "Set it if the selection can have several items" +msgstr "Lập nếu vùng chá»n có thể chứa vài mục" + +#: ../utils/nautilus-actions-new-config.c:57 +msgid "" +"A GnomeVFS scheme where the selected files should be located (you must set " +"it for each scheme you need)" +msgstr "" +"Lược đồ GnomeVFS nÆ¡i cần định vị những tập tin đã chá»n (phải lập cho má»—i " +"lược đồ cần thiết)" + +#: ../utils/nautilus-actions-new-config.c:57 +msgid "SCHEME" +msgstr "LƯỢC Äá»’" + +#: ../utils/nautilus-actions-new-config.c:58 +msgid "" +"The path of the file where to save the new GConf schema definition file " +"[default: /tmp/config_UUID.schemas]" +msgstr "" +"ÄÆ°á»ng dẫn của tập tin nÆ¡i cần lÆ°u tập tin diá»…n tả giản đồ GConf má»›i [mặc " +"định: ]" + +#: ../utils/nautilus-actions-new-config.c:133 +#, c-format +msgid "Creating %s ..." +msgstr "Äang tạo %s..." + +#: ../utils/nautilus-actions-tools-utils.c:48 +#, c-format +msgid "Can't write data in file %s\n" +msgstr "Không thể ghi dữ liệu vào tập tin %s\n" + +#: ../utils/nautilus-actions-tools-utils.c:54 +#, c-format +msgid "Can't open file %s for writing\n" +msgstr "Không thể mở tập tin « %s » để ghi\n" + +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:70 +msgid "" +"The tooltip of the menu item that will appear in the Nautilus statusbar when " +"the user points to the Nautilus popup menu item with his/her mouse" +msgstr "" +"Mẹo công cụ của mục trình Ä‘Æ¡n sẽ xuất hiện trong thanh trạng thái khi ngÆ°á»i " +"dùng chỉ con chuá»™t đến mục trình Ä‘Æ¡n bật lệnh Nautilus" + +#. i18n notes : Sorry for this long paragraph, will try to fix it the next release +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:77 +msgid "" +"The parameters of the command to start when the user selects the menu item " +"in the Nautilus popup menu.\n" +"\n" +"The parameters can contain some special tokens which are replaced by " +"Nautilus informations before starting the command :\n" +"\n" +"%d : base folder of the selected file(s)\n" +"%f : the name of the selected file or the 1st one if many are selected\n" +"%m : space-separated list of the basenames of the selected file(s)/folder" +"(s)\n" +"%M : space-separated list of the selected file(s)/folder(s), with their full " +"paths\n" +"%u : gnome-vfs URI\n" +"%s : scheme of the gnome-vfs URI\n" +"%h : hostname of the gnome-vfs URI\n" +"%U : username of the gnome-vfs URI\n" +"%% : a percent sign" +msgstr "" +"Những tham số của lệnh cần chạy khi ngÆ°á»i dùng chá»n mục trình Ä‘Æ¡n trong " +"trình Ä‘Æ¡n bật lên Nautilus.\n" +"\n" +"Những tham số có thể chứa má»™t số hiệu bài đặc biệt, mà được thay thế bằng " +"thông tin Nautilus trÆ°á»›c khi khởi chạy lệnh:\n" +"\n" +" • %d\t— thÆ° mục cÆ¡ bản của tập tin đã chá»n\n" +" • %f\t\t— tên của tập tin đã chá»n, hay Ä‘iá»u thứ nhất nếu có nhiá»u tập tin\n" +" • %m\t— danh sách tên cÆ¡ bản của các tập tin/thÆ° mục, định giá»›i bằng dấu " +"cách\n" +" • %M\t— danh sách những tập tin/thÆ° mục đã chá»n vá»›i toàn Ä‘Æ°á»ng dẫn định " +"giá»›i bằng dấu cách\n" +" • %u\t— URI Gnome-VFS\n" +" • %s\t— lược đồ của URI Gnome-VFS\n" +" • %h\t— tên máy của URI Gnome-VFS\n" +" • %U\t— tên ngÆ°á»i dùng của URI Gnome-VFS\n" +" • %%\t— dấu phần trăm" + +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:78 +msgid "The list of pattern to match the selected file(s)/folder(s)" +msgstr "Danh sách các mẫu cần khá»›p vá»›i những tập tin/thÆ° mục đã chá»n" + +#. i18n notes : Sorry for this long paragraph, will try to fix it in the next release +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:81 +msgid "" +"Here are the valid combinations :\n" +"\n" +"- 'isfile' is 'true' and 'isdir' is 'false' : the selection must holds only " +"files\n" +"- 'isfile' is 'false' and 'isdir' is 'true' : the selection must holds only " +"folders\n" +"- 'isfile' is 'true' and 'isdir' is 'true' : the selection can holds both " +"files and folders\n" +"- 'isfile' is 'false' and 'isdir' is 'false' : invalid combination" +msgstr "" +"Những tổ hợp đúng:\n" +"\n" +"• « isfile » (là tập tin) là « true » (đúng) còn « isdir » (là thÆ° mục) là « " +"false » (không đúng): vùng chá»n phải chứa chỉ tập tin\n" +" • « isfile » (là tập tin) là « false » (không đúng) còn « isdir » (là thÆ° " +"mục) là « true » (đúng): vùng chá»n phải chứa chỉ thÆ° mục\n" +" • « isfile » (là tập tin) là « true » (đúng) và « isdir » (là thÆ° mục) cÅ©ng " +"là « true » (đúng): vùng chá»n có thể chứa cả tập tin lẫn thÆ° mục Ä‘á»u\n" +" • « isfile » (là tập tin) là « false » (không đúng) và « isdir » (là thÆ° " +"mục) cÅ©ng là « false » (không đúng): tổ hợp không hợp lệ." + +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:82 +msgid "'true' if the selection can have files, 'false' otherwise" +msgstr "" +"« true » (đúng) nếu vùng chá»n có thể chứa tập tin, không thì « false " +"» (không đúng)" + +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:83 +msgid "This setting is tied with the 'isdir' setting. " +msgstr "Thiết lập này liên quan đến thiết lập « isdir » (là thÆ° mục) " + +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:84 +msgid "'true' if the selection can have folders, 'false' otherwise" +msgstr "" +"« true » (đúng) nếu vùng chá»n có thể chứa thÆ° mục, không thì « false " +"» (không đúng)" + +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:85 +msgid "This setting is tied with the 'isfile' setting. " +msgstr "Thiết lập này liên quan đến thiết lập « isfile » (là tập tin) " + +#. i18n notes : Sorry for this long paragraph, will try to fix it in the next release +#: ../libnautilus-actions/nautilus-actions-config-gconf-private.h:90 +msgid "" +"Defines the list of valid GnomeVFS schemes to be matched with the selected " +"items. The GnomeVFS scheme is the protocol used to access the files. The " +"keyword to use is the one used in the GnomeVFS URI.\n" +"\n" +"Example of GnomeVFS URI : \n" +"file:///tmp/foo.txt\n" +"sftp:///root@test.example.net/tmp/foo.txt\n" +"\n" +"The most common schemes are :\n" +"\n" +"'file' : local files\n" +"'sftp' : files accessed via SSH\n" +"'ftp' : files accessed via FTP\n" +"'smb' : files accessed via Samba (Windows share)\n" +"'dav' : files accessed via WebDav\n" +"\n" +"All GnomeVFS schemes used by Nautilus can be used here." +msgstr "" +"Äịnh nghÄ©a danh sách lược đồ GnomeVFS hợp lệ để khá»›p những mục đã chá»n. Lược " +"đồ GnomeVFS là giao thức được dùng để truy cập những tập tin. Từ khoá cần " +"dùng là Ä‘iá»u dùng trong URI GnomeVFS (v.d. ). Những lược đồ thÆ°á»ng nhất:\n" +" • file \t— tập tin cục bá»™\n" +" • sftp \t— tập tin được truy cập bằng SSH\n" +" • ftp \t— tập tin được truy cập bằng FTP\n" +" • smb\t— tập tin được truy cập bằng Samba (chia sẻ Windows)\n" +" • dav\t— tập tin được truy cập bằng WebDav.\n" +"Ở đây có thể sá»­ dụng lược đồ nào do Nautilus dùng." + +#: ../admin-tool/aboutdialog.py:63 +msgid "Program to establish and edit profiles for users" +msgstr "ChÆ°Æ¡ng trình tạo và sá»­a đổi tiểu sá»­ sÆ¡ lược cho ngÆ°á»i dùng" + +#: ../admin-tool/changeswindow.py:50 +#, python-format +msgid "Changes in profile %s" +msgstr "Các thay đổi tiểu sá»­ sÆ¡ lược %s" + +#: ../apt-mirror-setup.templates:32 src/common/xchat.c:834 ../hwconf.c:1728 +msgid "Ignore" +msgstr "Bá» qua" + +#: ../sheets/ciscomisc.sheet.in.h:19 +msgid "Lock" +msgstr "Khoá" + +#: ../admin-tool/editorwindow.py:97 +#, python-format +msgid "Profile %s" +msgstr "Tiểu sá»­ sÆ¡ lược %s" + +#: ../admin-tool/editorwindow.py:189 ../admin-tool/sessionwindow.py:174 +msgid "_Profile" +msgstr "Tiểu sá»­ s_Æ¡ lược" + +#: ../admin-tool/editorwindow.py:190 ../admin-tool/sessionwindow.py:175 +msgid "Save profile" +msgstr "LÆ°u tiểu sá»­ sÆ¡ lược" + +#: ../admin-tool/editorwindow.py:191 ../admin-tool/sessionwindow.py:176 +msgid "Close the current window" +msgstr "Äóng cá»­a sổ hiện có" + +#: ../admin-tool/editorwindow.py:193 ../structure.c:248 +msgid "Delete item" +msgstr "Xoá bá» mục" + +#: ../admin-tool/editorwindow.py:195 ../admin-tool/sessionwindow.py:181 +msgid "About Sabayon" +msgstr "Giá»›i thiệu Sabayon" + +#: ../admin-tool/editorwindow.py:254 ../lib/sources/gconfsource.py:123 +msgid "GConf" +msgstr "GConf" + +#: ../gtk/gtkfilesel.c:763 ../src/prefs.c:597 ../pan/gui.c:1169 +#: ../pan/save-ui.c:240 +msgid "Files" +msgstr "Tập tin" + +#: ../admin-tool/fileviewer.py:29 +#, python-format +msgid "Profile file: %s" +msgstr "Tập tin tiểu sá»­ sÆ¡ lược: %s" + +#: ../admin-tool/gconfviewer.py:59 ../admin-tool/gconfviewer.py:78 +msgid "" +msgstr "" + +#: ../admin-tool/gconfviewer.py:60 +msgid "" +msgstr "" + +#: ../admin-tool/gconfviewer.py:64 common/config.cpp:73 +#: ogg123/cfgfile_options.c:174 +msgid "string" +msgstr "chuá»—i" + +#: ../admin-tool/gconfviewer.py:66 common/config.cpp:73 +msgid "integer" +msgstr "số nguyên" + +#: ../admin-tool/gconfviewer.py:68 ogg123/cfgfile_options.c:180 +msgid "float" +msgstr "nổi" + +#: ../admin-tool/gconfviewer.py:70 common/config.cpp:73 +msgid "boolean" +msgstr "bun" + +#: ../admin-tool/gconfviewer.py:72 +msgid "schema" +msgstr "giản đồ" + +#: ../addressbook/gui/widgets/eab-gui-util.c:588 ../atk/atkobject.c:113 +#: ../src/orca/rolenames.py:299 +msgid "list" +msgstr "danh sách" + +#: ../admin-tool/gconfviewer.py:76 +msgid "pair" +msgstr "cặp" + +#: ../admin-tool/gconfviewer.py:88 +#, python-format +msgid "Profile settings: %s" +msgstr "Thiết lập tiểu sá»­ sÆ¡ lược: %s" + +#: ../plug-ins/gimpressionist/size.c:141 ../plug-ins/metadata/interface.c:142 +#: ../objects/UML/umlattribute.c:41 ../objects/UML/umlparameter.c:47 +#: app/envelope-box.c:881 +msgid "Value" +msgstr "Giá trị" + +#, c-format, python-format +msgid "%s (%s)" +msgstr "%s (%s)" + +#: ../admin-tool/sabayon:44 +msgid "" +"Your account does not have permissions to run the Desktop User Profiles tool" +msgstr "" +"Tài khoản của bạn không có quyá»n chạy công cụ Tiểu sá»­ sÆ¡ lược NgÆ°á»i dùng Môi " +"trÆ°á»ng" + +#: ../admin-tool/sabayon:45 +msgid "" +"Administrator level permissions are needed to run this program because it " +"can modify system files." +msgstr "" +"Cần thiết quyá»n lá»›p quản trị để chạy chÆ°Æ¡ng trình này vì nó có thể sá»­a đổi " +"tập tin hệ thống." + +#: ../admin-tool/sabayon:50 +msgid "Desktop User Profiles tool is already running" +msgstr " Công cụ Tiểu sá»­ sÆ¡ lược NgÆ°á»i dùng Môi trÆ°á»ng Ä‘ang chạy" + +#: ../admin-tool/sabayon:51 +msgid "" +"You may not use Desktop User Profiles tool from within a profile editing " +"session" +msgstr "" +"Bạn không thể sá»­ dụng công cụ Tiểu sá»­ sÆ¡ lược NgÆ°á»i dùng Môi trÆ°á»ng ở trong " +"phiên hiệu chỉnh tiểu sá»­ sÆ¡ lược" + +#: ../admin-tool/sabayon:58 +#, c-format +msgid "User account '%s' was not found" +msgstr "Không tìm thấy tài khoản ngÆ°á»i dùng « %s »" + +#: ../admin-tool/sabayon:59 +#, c-format +msgid "" +"Sabayon requires a special user account '%s' to be present on this computer. " +"Try again after creating the account (using, for example, the 'adduser' " +"command)" +msgstr "" +"Sabayon cần thiết má»™t tài khoản ngÆ°á»i dùng đặc biệt « %s » có trong máy tính " +"này. Hãy thá»­ lại sau khi tạo tài khoản này (lấy thí dụ, bằng lệnh « adduser " +"» [thêm ngÆ°á»i dùng])" + +#: ../admin-tool/sabayon-apply:39 +#, c-format +msgid "No profile for user '%s' found\n" +msgstr "Không tìm thấy tiểu sá»­ sÆ¡ lược cho ngÆ°á»i dùng « %s »\n" + +#: ../admin-tool/sabayon-apply:44 +#, c-format +msgid "Usage: %s []\n" +msgstr "Cách sá»­ dụng: %s []\n" + +#: ../admin-tool/sabayon-session:32 +#, c-format +msgid "Usage: %s \n" +msgstr "" +"Cách sá»­ dụng: %s <Ä‘Æ°á»ng_dẫn_tiểu_sá»­_sÆ¡_lược> " +"\n" + +#: ../admin-tool/sabayon.desktop.in.h:1 +msgid "Establish and Edit Profiles for Users" +msgstr "Tạo và Sá»­a đổi Tiểu sá»­ sÆ¡ lược cho NgÆ°á»i dùng" + +#: ../admin-tool/sabayon.desktop.in.h:2 ../admin-tool/sabayon.glade.h:4 +msgid "User Profile Editor" +msgstr "Bá»™ Sá»­a đổi Tiểu sá»­ sÆ¡ lược NgÆ°á»i dùng" + +#: ../admin-tool/sabayon.glade.h:1 +msgid "Add Profile" +msgstr "Thêm tiểu sá»­ sÆ¡ lược" + +#: ../admin-tool/sabayon.glade.h:2 +msgid "Profile _name:" +msgstr "T_ên tiểu sá»­ sÆ¡ lược:" + +#: ../admin-tool/sabayon.glade.h:3 +msgid "Use this profile for _all users" +msgstr "Dùng tiểu sá»­ sÆ¡ lược này cho _má»i ngÆ°á»i dùng" + +#: ../admin-tool/sabayon.glade.h:5 ../src/gnome-terminal.glade2.h:92 +msgid "_Base on:" +msgstr "_Dá»±a trên:" + +#: ../admin-tool/sabayon.glade.h:6 ../profiles/audio-profiles-edit.c:650 +#: ../src/terminal.c:3037 +msgid "_Profiles:" +msgstr "_Hồ sÆ¡ :" + +#: ../admin-tool/sabayon.glade.h:7 ../glom/application.cc:283 +msgid "_Users" +msgstr "_NgÆ°á»i dùng" + +#: ../admin-tool/sabayon.glade.h:8 +msgid "_Users:" +msgstr "_NgÆ°á»i dùng:" + +#: ../admin-tool/saveconfirm.py:36 +msgid "Close _Without Saving" +msgstr "Äóng mà _không lÆ°u" + +#: ../admin-tool/saveconfirm.py:43 +#, python-format +msgid "Save changes to profile \"%s\" before closing?" +msgstr "LÆ°u các thay đổi tiểu sá»­ sÆ¡ lược « %s » trÆ°á»›c khi đóng không?" + +#: ../admin-tool/saveconfirm.py:47 +#: ../gedit/dialogs/gedit-close-confirmation-dialog.c:367 +#, c-format, python-format +msgid "" +"If you don't save, changes from the last %ld second will be permanently lost." +msgid_plural "" +"If you don't save, changes from the last %ld second will be permanently lost." +msgstr[0] "" +"Nếu bạn không lÆ°u, các thay đổi của bạn ở %ld phút chót sẽ bị mất hoàn toàn." + +#: ../admin-tool/saveconfirm.py:53 +#: ../gedit/dialogs/gedit-close-confirmation-dialog.c:376 +msgid "" +"If you don't save, changes from the last minute will be permanently lost." +msgstr "" +"Nếu bạn không lÆ°u, các thay đổi của bạn ở phút chót sẽ bị mất hoàn toàn." + +#: ../admin-tool/saveconfirm.py:57 +#: ../gedit/dialogs/gedit-close-confirmation-dialog.c:382 +#, c-format, python-format +msgid "" +"If you don't save, changes from the last minute and %ld second will be " +"permanently lost." +msgid_plural "" +"If you don't save, changes from the last minute and %ld second will be " +"permanently lost." +msgstr[0] "" +"Nếu bạn không lÆ°u, các thay đổi của bạn ở phút chót và %ld giây sẽ bị mất " +"hoàn toàn." + +#: ../admin-tool/saveconfirm.py:64 +#: ../gedit/dialogs/gedit-close-confirmation-dialog.c:392 +#, c-format, python-format +msgid "" +"If you don't save, changes from the last %ld minute will be permanently lost." +msgid_plural "" +"If you don't save, changes from the last %ld minute will be permanently lost." +msgstr[0] "" +"Nếu bạn không lÆ°u, các thay đổi của bạn ở %ld phút chót sẽ bị mất hoàn toàn." + +#: ../gedit/dialogs/gedit-close-confirmation-dialog.c:428 +#, c-format, python-format +msgid "If you don't save, changes from the last hour will be permanently lost." +msgid_plural "" +"If you don't save, changes from the last hour will be permanently lost." +msgstr[0] "" +"Nếu bạn không lÆ°u, các thay đổi của bạn ở giá» sau chót sẽ bị mất hoàn toàn." + +#: ../admin-tool/saveconfirm.py:76 +#: ../gedit/dialogs/gedit-close-confirmation-dialog.c:413 +#, c-format, python-format +msgid "" +"If you don't save, changes from the last hour and %d minute will be " +"permanently lost." +msgid_plural "" +"If you don't save, changes from the last hour and %d minute will be " +"permanently lost." +msgstr[0] "" +"Nếu bạn không lÆ°u, các thay đổi của bạn ở giá» sau chót và %d phút chót sẽ bị " +"mất hoàn toàn." + +#: ../admin-tool/sessionwindow.py:153 +#, python-format +msgid "Editing profile %s" +msgstr "Äang sá»­a đổi tiểu sá»­ sÆ¡ lược %s" + +#: src/gtkam-main.c:556 ../src/mud-tray.c:206 src/docklet.cpp:120 +#: src/mainwin.cpp:567 po/silky.glade.h:219 app/menubar.c:428 +msgid "_Quit" +msgstr "T_hoát" + +#: ../admin-tool/sessionwindow.py:178 +msgid "_Changes" +msgstr "_Äổi" + +#: ../admin-tool/sessionwindow.py:178 +msgid "Edit changes" +msgstr "Sá»­a đổi các thay đổi" + +#: ../admin-tool/sessionwindow.py:179 +msgid "_Lockdown" +msgstr "_Khoá xuống" + +#: ../admin-tool/sessionwindow.py:179 +msgid "Edit Lockdown settings" +msgstr "Sá»­a đổi thiết lập Khoá xuống" + +#: ../admin-tool/sessionwindow.py:184 +msgid "Enforce Mandatory" +msgstr "Ép làm việc Bắt buá»™c" + +#: ../admin-tool/sessionwindow.py:184 +msgid "Enforce mandatory settings in the editing session" +msgstr "Ép làm dùng thiết lập bắt buá»™c trong phiên sá»­a đổi" + +#: ../admin-tool/sessionwindow.py:273 +#, python-format +msgid "Lockdown settings for %s" +msgstr "Thiết lập khoá xuống cho %s" + +#: ../admin-tool/usersdialog.py:67 +#, python-format +msgid "Users for profile %s" +msgstr "NgÆ°á»i dùng cho tiểu sá»­ sÆ¡ lược %s" + +#: ../admin-tool/usersdialog.py:95 +msgid "Use This Profile" +msgstr "Dùng Tiểu sá»­ sÆ¡ lược này" + +#: ../lib/dirmonitor.py:151 +#, python-format +msgid "Failed to add monitor for %s" +msgstr "Việc thêm thiết bị hiển thị cho %s bị lá»—i" + +#: ../lib/dirmonitor.py:231 +#, python-format +msgid "Expected event: %s %s" +msgstr "Sá»± kiện mong đợi: %s %s" + +#: ../lib/protosession.py:142 +msgid "Unable to find a free X display" +msgstr "Không tìm thấy bá»™ trình bày X còn rảnh" + +#: ../lib/protosession.py:409 +msgid "Failed to start Xnest: timed out waiting for USR1 signal" +msgstr "Lá»—i khởi chạy Xnest: quá giá» trong khi đợi ký hiệu USR1" + +#: ../lib/protosession.py:411 +msgid "Failed to start Xnest: died during startup" +msgstr "Lá»—i khởi chạy Xnest: kết thúc trong khi khởi chạy" + +#: ../lib/sources/filessource.py:68 +#, python-format +msgid "File '%s' created" +msgstr "Tập tin « %s » đã được tạo" + +#: ../lib/sources/filessource.py:70 +#, python-format +msgid "File '%s' deleted" +msgstr "Tập tin « %s » bị xoá bá»" + +#: ../lib/sources/filessource.py:72 +#, python-format +msgid "File '%s' changed" +msgstr "Tập tin « %s » đã được thay đổi" + +#: ../lib/sources/filessource.py:96 +msgid "Applications menu" +msgstr "Trình Ä‘Æ¡n Ứng dụng" + +#: ../lib/sources/filessource.py:98 +msgid "Preferences menu" +msgstr "Trình Ä‘Æ¡n Tùy thích" + +#: ../lib/sources/filessource.py:100 +msgid "Server Settings menu" +msgstr "Trình Ä‘Æ¡n Thiết lập Trình phục vụ" + +#: ../lib/sources/filessource.py:102 +msgid "System Settings menu" +msgstr "Trình Ä‘Æ¡n Thiết lập Hệ thống" + +#: ../lib/sources/filessource.py:104 +msgid "Start Here menu" +msgstr "Trình Ä‘Æ¡n Bắt đầu từ đây" + +#: ../lib/sources/gconfsource.py:89 +#, python-format +msgid "GConf key '%s' unset" +msgstr "Khoá GConf « %s » chÆ°a lập" + +#: ../lib/sources/gconfsource.py:91 +#, python-format +msgid "GConf key '%s' set to string '%s'" +msgstr "Khoá GConf « %s » được đặt là chuá»—i « %s »" + +#: ../lib/sources/gconfsource.py:93 +#, python-format +msgid "GConf key '%s' set to integer '%s'" +msgstr "Khoá GConf « %s » được đặt là số nguyên « %s »" + +#: ../lib/sources/gconfsource.py:95 +#, python-format +msgid "GConf key '%s' set to float '%s'" +msgstr "Khoá GConf « %s » được đặt là nổi « %s »" + +#: ../lib/sources/gconfsource.py:97 +#, python-format +msgid "GConf key '%s' set to boolean '%s'" +msgstr "Khoá GConf « %s » được đặt là bun « %s »" + +#: ../lib/sources/gconfsource.py:99 +#, python-format +msgid "GConf key '%s' set to schema '%s'" +msgstr "Khoá GConf « %s » được đặt là giản đồ « %s »" + +#: ../lib/sources/gconfsource.py:101 +#, python-format +msgid "GConf key '%s' set to list '%s'" +msgstr "Khoá GConf « %s » được đặt là danh sác h « %s »" + +#: ../lib/sources/gconfsource.py:103 +#, python-format +msgid "GConf key '%s' set to pair '%s'" +msgstr "Khoá GConf « %s » được đặt là cặp « %s »" + +#: ../lib/sources/gconfsource.py:105 +#, python-format +msgid "GConf key '%s' set to '%s'" +msgstr "Khoá GConf « %s » được đặt là « %s »" + +#: ../lib/sources/gconfsource.py:136 +msgid "Default GConf settings" +msgstr "Thiết lập GConf mặc định" + +#: ../lib/sources/gconfsource.py:138 +msgid "Mandatory GConf settings" +msgstr "Thiết lập GConf bắt buá»™c" + +#: ../lib/sources/mozillasource.py:132 +#, python-format +msgid "Mozilla key '%s' set to '%s'" +msgstr "Khoá Mozilla « %s » đã đặt là « %s »" + +#: ../lib/sources/mozillasource.py:134 +#, python-format +msgid "Mozilla key '%s' unset" +msgstr "Khoá Mozilla « %s » chÆ°a đặt" + +#: ../lib/sources/mozillasource.py:136 +#, python-format +msgid "Mozilla key '%s' changed to '%s'" +msgstr "Khoá Mozilla « %s » được thay đổi thành « %s »" + +#: ../lib/sources/mozillasource.py:165 ../lib/sources/mozillasource.py:175 +msgid "Web browser preferences" +msgstr "Tùy trích trình duyệt Mạng" + +#: ../lib/sources/mozillasource.py:167 ../lib/sources/mozillasource.py:177 +msgid "Web browser bookmarks" +msgstr "Äánh dấu trình duyệt Mạng" + +#: ../lib/sources/mozillasource.py:169 +msgid "Web browser profile list" +msgstr "Danh sách tiểu sá»­ sÆ¡ lược trình duyệt Mạng" + +#: ../lib/sources/mozillasource.py:520 +#, python-format +msgid "File Not Found (%s)" +msgstr "Không tìm thấy tập tin (%s)" + +#: ../lib/sources/mozillasource.py:871 +#, python-format +msgid "duplicate name(%(name)s) in section %(section)s" +msgstr "tên trùng(%(name)s) trong phần %(section)s" + +#: ../lib/sources/mozillasource.py:880 +#, python-format +msgid "redundant default in section %s" +msgstr "mặc định thừa trong phần %s" + +#: ../lib/sources/mozillasource.py:897 +msgid "no default profile" +msgstr "không có tiểu sá»­ sÆ¡ lược mặc định" + +#: ../lib/sources/mozillasource.py:952 +#, python-format +msgid "Mozilla bookmark created '%s' -> '%s'" +msgstr "Äánh dấu Mozilla đã được tạo « %s » → « %s »" + +#: ../lib/sources/mozillasource.py:954 +#, python-format +msgid "Mozilla bookmark folder created '%s'" +msgstr "ThÆ° mục đánh dấu Mozilla đã được tạo « %s »" + +#: ../lib/sources/mozillasource.py:957 +#, python-format +msgid "Mozilla bookmark deleted '%s'" +msgstr "Äánh dấu Mozilla bị xoá bỠ« %s »" + +#: ../lib/sources/mozillasource.py:959 +#, python-format +msgid "Mozilla bookmark folder deleted '%s'" +msgstr "ThÆ° mục đánh dấu Mozilla bị xoá bỠ« %s »" + +#: ../lib/sources/mozillasource.py:962 +#, python-format +msgid "Mozilla bookmark changed '%s' '%s'" +msgstr "Äánh dấu Mozilla đã được thay đổi « %s » « %s »" + +#: ../lib/sources/mozillasource.py:964 +#, python-format +msgid "Mozilla bookmark folder changed '%s'" +msgstr "ThÆ° mục đánh dấu Mozilla đã được thay đổi « %s »" + +#: ../lib/sources/paneldelegate.py:58 +#, python-format +msgid "Panel '%s' added" +msgstr "Bảng Ä‘iá»u khiển « %s » đã được thêm" + +#: ../lib/sources/paneldelegate.py:64 +#, python-format +msgid "Panel '%s' removed" +msgstr "Bảng Ä‘iá»u khiển « %s » bị gỡ bá»" + +#: ../lib/sources/paneldelegate.py:70 +#, python-format +msgid "Panel applet '%s' added" +msgstr "Tiểu dụng bảng Ä‘iá»u khiển « %s » đã được thêm" + +#: ../lib/sources/paneldelegate.py:76 +#, python-format +msgid "Panel applet '%s' removed" +msgstr "Tiểu dụng bảng Ä‘iá»u khiển « %s » bị gỡ bá»" + +#: ../lib/sources/paneldelegate.py:82 +#, python-format +msgid "Panel object '%s' added" +msgstr "Äối tượng Bảng Ä‘iá»u khiển « %s » đã được thêm" + +#: ../lib/sources/paneldelegate.py:94 +#, python-format +msgid "Panel object '%s' removed" +msgstr "Äối tượng Bảng Ä‘iá»u khiển « %s » bị gỡ bá»" + +#: ../lib/sources/paneldelegate.py:375 +msgid "Panel File" +msgstr "Tập tin Bảng Ä‘iá»u khiển" + +#: ../lib/storage.py:173 +#, python-format +msgid "Failed to read file '%s': %s" +msgstr "Lá»—i Ä‘á»c tập tin « %s »: %s" + +#: ../lib/storage.py:183 +#, python-format +msgid "Failed to read metadata from '%s': %s" +msgstr "Lá»—i Ä‘á»c siêu dữ liệu từ « %s »: %s" + +#: ../lib/storage.py:189 +#, python-format +msgid "Invalid metadata section in '%s': %s" +msgstr "Phần siêu dữ liệu không hợp lệ trong « %s »: %s" + +#: ../lib/storage.py:363 +#, python-format +msgid "Cannot add non-existent file '%s'" +msgstr "Không thể thêm tập tin không tồn tại « %s »" + +#: ../lib/storage.py:540 +#, python-format +msgid "Profile is read-only %s" +msgstr "Tiêu sá»­ sÆ¡ lược chỉ cho phép Ä‘á»c %s" + +#: ../lib/unittests.py:38 ../lib/unittests.py:39 +msgid "Ignore WARNINGs" +msgstr "Bá» qua các CẢNH BÃO" + +#: ../lib/unittests.py:61 +#, python-format +msgid "Running %s tests" +msgstr "Äang chạy %s việc thá»­" + +#: ../lib/unittests.py:63 +#, python-format +msgid "Running %s tests (%s)" +msgstr "Äang chạy %s việc thá»­ (%s)" + +#: ../lib/unittests.py:70 ../ui/welcome.glade.h:33 +msgid "Success!" +msgstr "• Thành công •" + +#: ../lib/userdb.py:52 +#, python-format +msgid "invalid type for setting %s in %s" +msgstr "kiểu không hợp lệ cho thiết lập %s trong %s" + +#: ../lib/userdb.py:212 +#, python-format +msgid "No search based specified for %s" +msgstr "ChÆ°a ghi rõ cÆ¡ bản tìm kiếm cho %s" + +#: ../lib/userdb.py:215 +#, python-format +msgid "No query filter specified for %s" +msgstr "ChÆ°a ghi rõ bá»™ lá»c truy vấn cho %s" + +#: ../lib/userdb.py:218 +#, python-format +msgid "No result attribute specified for %s" +msgstr "ChÆ°a ghi rõ thuá»™c tính kết quả cho %s" + +#: ../lib/userdb.py:227 +msgid "Scope must be one of sub, base and one" +msgstr "" +"Phạm vị phải là má»™t của:\n" +" • sub (dÆ°á»›i)\n" +" • base (cÆ¡ bản)\n" +" • one (má»™t)" + +#: ../lib/userdb.py:247 +msgid "multiple_result must be one of first and random" +msgstr "" +"multiple_result (nhiá»u kết quả) phải là má»™t của:\n" +" • first (thứ nhất)\n" +" • random (ngẫu nhiên)" + +#: ../lib/userdb.py:339 +#, python-format +msgid "Could not open %s for writing" +msgstr "Không thể mở %s để ghi" + +#: ../lib/userdb.py:352 +#, python-format +msgid "Failed to save UserDatabase to %s" +msgstr "Lá»—i lÆ°u UserDatabase (cÆ¡ sở dữ liệu ngÆ°á»i dùng) vào %s" + +#: ../lib/userdb.py:375 ../lib/userdb.py:410 +#, python-format +msgid "File %s is not a profile configuration" +msgstr "Tập tin %s không phải là cấu hình tiểu sá»­ sÆ¡ lược" + +#: ../lib/userdb.py:382 +#, python-format +msgid "Failed to add default profile %s to configuration" +msgstr "Lá»—i thêm tiểu sá»­ sÆ¡ lược mặc định %s vào cấu hình" + +#: ../lib/userdb.py:418 +#, python-format +msgid "Failed to add user %s to profile configuration" +msgstr "Lá»—i thêm ngÆ°á»i dùng %s vào cấu hình tiểu sá»­ sÆ¡ lược" + +#: ../lib/userdb.py:449 +msgid "Failed to get the user list" +msgstr "Lá»—i lấy danh sách ngÆ°á»i dùng" + +#: ../lib/util.py:127 +msgid "" +"Cannot find home directory: not set in /etc/passwd and no value for $HOME in " +"environment" +msgstr "" +"Không tìm thấy thÆ° mục chính: chÆ°a lập trong và không có giá " +"trị cho biến « $HOME » trong môi trÆ°á»ng." + +#: ../lib/util.py:140 +msgid "" +"Cannot find username: not set in /etc/passwd and no value for $USER in " +"environment" +msgstr "" +"Không tìm thấy tên ngÆ°á»i dùng: chÆ°a lập trong và không có giá " +"trị cho biến « $USER » trong môi trÆ°á»ng." + +#: src/filehandling_functions.c:469 src/filehandling_functions.c:476 +#, c-format +msgid "Searching for indirect done" +msgstr "Äang tìm kiếm xong gián tiếp" + +#: src/filehandling_functions.c:508 src/filehandling_functions.c:515 +#, c-format +msgid "Warning: could not find tag table" +msgstr "Cảnh báo : không tìm thấy bảng thẻ" + +#: src/filehandling_functions.c:542 +#, c-format +msgid "Searching for tag table done\n" +msgstr "Äang tìm kiếm xong bảng thẻ\n" + +#: src/filehandling_functions.c:1211 +#, c-format +msgid "Error: could not open info file\n" +msgstr "Lá»—i: không thể mở tập tin thông tin\n" + +#: src/mainfunction.c:143 src/manual.c:975 +msgid "Are you sure you want to print?" +msgstr "Bạn có chắc muốn in không?" + +#: src/mainfunction.c:195 src/manual.c:1020 +msgid "Enter line: " +msgstr "Gõ dòng: " + +#: src/mainfunction.c:236 src/manual.c:1067 +msgid "Enter command: " +msgstr "Gõ lệnh: " + +#: src/mainfunction.c:255 +msgid "Operation failed..." +msgstr "Thao tác thất bại..." + +#: src/mainfunction.c:291 src/mainfunction.c:551 src/manual.c:1120 +msgid "Enter regular expression: " +msgstr "Gõ biểu thức chính quy: " + +#: src/mainfunction.c:521 src/mainfunction.c:615 src/manual.c:1196 +msgid "Search string not found..." +msgstr "Không tìm thấy chuá»—i tìm kiếm..." + +#: src/mainfunction.c:576 src/manual.c:1153 +msgid "Invalid regular expression;" +msgstr "Biểu thức chính quy không hợp lệ;" + +#: src/mainfunction.c:578 src/manual.c:1155 +msgid "Press any key to continue..." +msgstr "Bấm phím nào để tiếp tục..." + +#: src/mainfunction.c:644 +msgid "Enter node name: " +msgstr "Gõ tên nút: " + +#: src/mainfunction.c:720 +#, c-format +msgid "Node %s not found" +msgstr "Không tìm thấy nút %s" + +#: src/mainfunction.c:1178 src/manual.c:1546 ../src/red_transaction.py:96 +#: ../glade/glade_project_window.c:1698 ../glade/glade_project_window.c:1704 +msgid "Are you sure you want to quit?" +msgstr "Bạn có chắc muốn thoát không?" + +#: src/manual.c:315 +#, c-format +msgid "Error: Cannot call man command.\n" +msgstr "Lá»—i: không thể gá»i lệnh man (hÆ°á»›ng dẫn).\n" + +#: src/manual.c:324 +#, c-format +msgid "Error: No manual page found either.\n" +msgstr "Lá»—i: cÅ©ng không tìm thấy trang hÆ°á»›ng dẫn.\n" + +#: src/manual.c:327 +#, c-format +msgid "Apropos pages:\n" +msgstr "Trang Apropos:\n" + +#: src/manual.c:370 +msgid "Calling gunzip for" +msgstr "Äang gá»i gunzip cho" + +#: src/manual.c:376 +#, c-format +msgid "Couldn't call gunzip.\n" +msgstr "Không thể gá»i gunzip.\n" + +#: src/manual.c:413 +msgid "IGNORING" +msgstr "ÄANG BỎ QUA" + +#: src/manual.c:456 +#, c-format +msgid "Error: No manual page found\n" +msgstr "Lá»—i: không tìm thấy trang hÆ°á»›ng dẫn\n" + +#: src/manual.c:461 +#, c-format +msgid "Calling apropos \n" +msgstr "Äang gá»i apropos \n" + +#: src/manual.c:466 +#, c-format +msgid "Nothing appropiate\n" +msgstr "Không có gì thích hợp\n" + +#: src/manual.c:989 +msgid "Enter manual name: " +msgstr "Gõ tên sổ hÆ°á»›ng dẫn: " + +#: src/manual.c:1629 src/video.c:114 +#, c-format +msgid "Viewing line %d/%d, %d%%" +msgstr "Äang xem dòng %d/%d, %d%%" + +#: src/manual.c:1631 src/video.c:116 +#, c-format +msgid "Viewing line %d/%d, 100%%" +msgstr "Äang xem dòng %d/%d, 100%%" + +#: src/parse_config.c:113 +#, c-format +msgid "Can't open config file!\n" +msgstr "• Không thể mở tập tin cấu hình. •\n" + +#: src/parse_config.c:163 +#, c-format +msgid "Parse error in config file on line %d\n" +msgstr "Gặp lá»—i phân tách trong tập tin cấu hình trên dòng %d\n" + +#: src/utils.c:122 src/utils.c:178 +#, c-format +msgid "Virtual memory exhausted\n" +msgstr "Hết bá»™ nhá»› ảo\n" + +#: src/utils.c:232 +#, c-format +msgid "" +"Illegal characters in filename!\n" +"*** %s\n" +msgstr "" +"• Gặp ký tá»± sai trong tên tập tin. •\n" +"*** %s\n" + +#: ../partman-basicfilesystems.templates:113 src/shar.c:679 utils.c:121 +#: utils.c:127 utils.c:133 utils.c:139 utils.c:145 utils.c:151 utils.c:157 +#: ../mimedir/mimedir-vcard.c:3665 +msgid "yes" +msgstr "có" + +#: ../partman-basicfilesystems.templates:118 src/shar.c:680 dir.c:1035 +#: dir.c:1056 utils.c:123 utils.c:129 utils.c:135 utils.c:141 utils.c:147 +#: utils.c:153 utils.c:159 +msgid "no" +msgstr "không" + +#: src/video.c:61 src/fe-gtk/dccgui.c:351 ../glade/glade_project_options.c:743 +#: ../glade/gnome/gnomepixmap.c:79 +msgid "File:" +msgstr "Tập tin:" + +#: src/video.c:62 +msgid "Node:" +msgstr "Nút:" + +#: src/video.c:63 ../glines/glines.c:1937 makeinfo/node.c:991 +msgid "Next:" +msgstr "Kế:" + +#: src/video.c:64 +msgid "Prev:" +msgstr "TrÆ°á»›c:" + +#: src/video.c:65 ../directed.xml.in.h:16 makeinfo/node.c:1021 +msgid "Up:" +msgstr "Lên:" + +#: src/pinfo.c:113 src/pinfo.c:198 +#, c-format +msgid "Looking for man page...\n" +msgstr "Äang tìm trang hÆ°á»›ng dẫn...\n" + +#: src/pinfo.c:151 +#, c-format +msgid "--node option used without argument\n" +msgstr "Tùy chá»n « --node » (nút) được dùng không có đối số\n" + +#: src/pinfo.c:161 +#, c-format +msgid "--rcfile option used without argument\n" +msgstr "Tùy chá»n « --rcfile » (tập tin rc) được dùng không có đối số\n" + +#: src/pinfo.c:172 +#, c-format +msgid "" +"Usage:\n" +"%s [options] [info|manual]\n" +"Options:\n" +"-h, --help help\n" +"-v, --version version\n" +"-m, --manual use man page\n" +"-r, --raw-filename use raw filename\n" +"-f, --file synonym for -r\n" +"-a, --apropos call apropos if nothing found\n" +"-p, --plain-apropos call only apropos\n" +"-c, --cut-man-headers cut out repeated man headers\n" +"-l, --long-manual-links use long link names in manuals\n" +"-s, --squeeze-manlines cut empty lines from manual pages\n" +"-d, --dont-handle-without-tag-table don't display texinfo pages without " +"tag\n" +" tables\n" +"-t, --force-manual-tag-table force manual detection of tag table\n" +"-x, --clear-at-exit clear screen at exit\n" +" --node=nodename, --node nodename jump directly to the node nodename\n" +" --rcfile=file, --rcfile file use alternate rcfile\n" +msgstr "" +"Cách sá»­ dụng:\n" +"%s [tùy_chá»n ...] [thông_tin|sổ_hÆ°á»›ng_dẫn]\n" +"Options:\n" +"-h, --help _trợ giúp_\n" +"-v, --version _phiên bản_\n" +"-m, --manual sá»­ dụng _trang hÆ°á»›ng dẫn_\n" +"-r, --raw-filename sá»­ dụng _tên tập tin thô_\n" +"-f, --file bằng tùy chá»n « -r » (_tập tin_)\n" +"-a, --apropos gá»i apropos nếu không tìm gì\n" +"-p, --plain-apropos gá»i chỉ apropos thôi (_chuẩn_)\n" +"-c, --cut-man-headers _cắt ra các dòng đầu hÆ°á»›ng dẫn_ trùng\n" +"-l, --long-manual-links sá»­ dụng tên _liên kết dài_ trong _sổ " +"hÆ°á»›ng dẫn_\n" +"-s, --squeeze-manlines cắt các _dòng_ trắng ra trang _hÆ°á»›ng " +"dẫn_ (_vắt_)\n" +"-d, --dont-handle-without-tag-table _đừng_ hiển thị trang kiểu texinfo\n" +"\t\t\t\t\t\t\t\t_không có bảng thẻ_ (_quản " +"lý_) -t, --force-manual-tag-" +"table _buá»™c_ tá»± phát hiện _bảng thẻ_\n" +"-x, --clear-at-exit _xoá_ màn hình _khi thoát_\n" +" --node=nodename, --node nodename nhảy thẳng đến _nút tên_ này\n" +" --rcfile=tập_tin, --rcfile tập_tin sá»­ dụng tập tin rc thay thế\n" + +#: src/pinfo.c:312 +#, c-format +msgid "Error: could not open info file, trying manual\n" +msgstr "Lá»—i: không thể mở tập tin thông tin nên thá»­ sổ hÆ°á»›ng dẫn\n" + +#: src/pinfo.c:345 +#, c-format +msgid "Warning: tag table not found...\n" +msgstr "Cảnh báo : không tìm thấy bảng thẻ...\n" + +#: src/pinfo.c:349 +#, c-format +msgid "Trying to create alternate tag table...\n" +msgstr "Äang cố tạo bảng thẻ thay thế...\n" + +#: src/pinfo.c:354 src/pinfo.c:564 +#, c-format +msgid "This doesn't look like info file...\n" +msgstr "Äiá»u này không hình nhÆ° tập tin thông tin...\n" + +#: src/pinfo.c:367 +#, c-format +msgid "Specified node does not exist...\n" +msgstr "Không có nút đã gõ...\n" + +#: src/pinfo.c:419 +msgid "Tag table is corrupt, trying to fix..." +msgstr "Bảng thẻ bị há»ng nên cố sá»­a..." + +#: src/pinfo.c:420 +msgid "press a key to continue" +msgstr "bấm phím nào để tiếp tục" + +#: src/pinfo.c:486 +msgid "File not found. Press any key..." +msgstr "Không tìm thấy tập tin. Bấm phím nào..." + +#: src/pinfo.c:506 +#, c-format +msgid "Unexpected error.\n" +msgstr "Gặp lá»—i bất ngá».\n" + +#: src/pinfo.c:559 +msgid "Tag table not found. Trying to create alternate..." +msgstr "Không tìm thấy bảng thẻ. Äang cố tạo Ä‘iá»u thay thế..." + +#: src/pinfo.c:645 +#, c-format +msgid "Security warning: Unable to get GID of group called: %s\n" +msgstr "Cảnh báo bảo mật: không thể lấy GID của nhóm tên: %s\n" + +#: src/pinfo.c:665 +#, c-format +msgid "Security warning: Unable to get UID of user called: %s\n" +msgstr "Cảnh báo bảo mật: không thể lấy UID của ngÆ°á»i dùng tên: %s\n" + +#: ../templates:5 +msgid "Which webserver would you like to configure automatically?" +msgstr "Bạn có muốn tá»± Ä‘á»™ng cấu hình trình phục vụ Mạng nào?" + +#: ../templates:5 +msgid "" +"LDAP Account Manager supports any webserver that supports PHP4, but this " +"automatic configuration process only supports Apache and Apache2. If you " +"choose to configure Apache(2) LAM can be accessed at http(s)://localhost/lam" +msgstr "" +"Bá»™ Quản lý Tài khoản LDAP há»— trợ trình phục vụ nào cÅ©ng há»— trợ PHP4, nhÆ°ng " +"mà tiến trình tá»± Ä‘á»™ng cấu hình này chỉ há»— trợ Apache và Apache2 thôi. Nếu " +"bạn chá»n cấu hình Apache(2), BQT có thể được truy cập tại ." + +#: ../templates:13 +msgid "Enter alias:" +msgstr "Gõ bí danh:" + +#: ../templates:13 +msgid "" +"LAM will add an alias to your httpd.conf which allows you to access LAM at " +"http(s)://localhost/lam. If you want an alias other than \"lam\" please " +"specify it here." +msgstr "" +"BQT sẽ thêm má»™t bí danh vào tập tin cấu hình của bạn, mà cho " +"phép bạn truy cập BQT tại . Nếu bạn muốn có bí danh " +"khác vá»›i « lam », hãy gõ nó vào đây." + +#: ../templates:21 +msgid "Enter master configuration password (clear text):" +msgstr "Gõ mật khẩu cấu hình chủ (chữ xem được):" + +#: ../templates:21 +msgid "" +"The configuration profiles are secured by a master password. You will need " +"it to create and delete profiles. As default it is set to \"lam\" and can be " +"changed directly in LAM. But you can also change it now." +msgstr "" +"Những hồ sÆ¡ cấu hình đựơc bảo mật bởi má»™t mật khẩu chủ. Bạn sẽ cần thiết nó " +"để tạo và xoá bá» hồ sÆ¡ đó. Mặc định là « lam » và có thể được thay đổi trá»±c " +"tiếp từ BQT. Bạn cÅ©ng có thể thay đổi nó ngay bây giá»." + +#: ../templates:30 +msgid "Would you like to restart your webserver(s) now?" +msgstr "Vậy bạn có muốn khởi chạy trình phục vụ Mạng không?" + +#: ../templates:30 +msgid "Your webserver(s) need to be restarted in order to apply the changes." +msgstr "" +"Cần phải khởi chạy lại trình phục vụ Mạng, để làm cho các thay đổi hoạt Ä‘á»™ng." + +#: ../templates:35 +msgid "Upgrade from pre-0.5.0 versions" +msgstr "Nâng cấp từ phiên bản trÆ°á»›c 0.5.0" + +#: ../templates:35 +msgid "" +"Please note that this version uses new file formats for configuration and " +"account profiles. You will have to update your configuration and create new " +"account profiles." +msgstr "" +"Hãy ghi chú rằng phiên bản này sá»­ dụng khuôn dạng tập tin má»›i vá»›i cấu hình " +"và hồ sÆ¡ tài khoản. Bạn sẽ phải cập nhật cấu hình, và tạo hồ sÆ¡ tài khoản " +"má»›i." + +#: ../a11y/addressbook/ea-minicard-view.c:169 +msgid "evolution addressbook" +msgstr "Sổ địa chỉ Evolution" + +#: ../a11y/addressbook/ea-minicard-view.c:34 +#: ../addressbook/gui/component/addressbook-component.c:225 ../main.c:158 +msgid "New Contact" +msgstr "Liên lạc má»›i" + +#: ../a11y/addressbook/ea-minicard-view.c:35 +#: ../addressbook/gui/component/addressbook-component.c:233 +msgid "New Contact List" +msgstr "Danh sách liên lạc má»›i" + +#: ../a11y/addressbook/ea-minicard-view.c:152 +#, c-format +msgid "current addressbook folder has %d card" +msgid_plural "current addressbook folder has %d card" +msgstr[0] "thÆ° mục sổ địa chỉ hiện thá»i có %d thẻ" + +#: ../src/menus.c:259 ../glade/glade_project_window.c:374 +#: ../widgets/gtk+.xml.in.h:136 +msgid "Open" +msgstr "Mở" + +#: ../a11y/addressbook/ea-minicard.c:141 +msgid "Contact List: " +msgstr "Danh sách liên lạc:" + +#: ../a11y/addressbook/ea-minicard.c:142 +msgid "Contact: " +msgstr "Liên lạc: " + +#: ../a11y/addressbook/ea-minicard.c:168 +msgid "evolution minicard" +msgstr "thẻ tí tị evolution" + +#: ../a11y/calendar/ea-cal-view-event.c:235 +msgid "It has alarms." +msgstr "Nó có bảo Ä‘á»™ng." + +#: ../a11y/calendar/ea-cal-view-event.c:238 +msgid "It has recurrences." +msgstr "Nó có nhiá»u lần." + +#: ../a11y/calendar/ea-cal-view-event.c:241 +msgid "It is a meeting." +msgstr "Nó là cuá»™c há»p." + +#: ../a11y/calendar/ea-cal-view-event.c:247 +#, c-format +msgid "Calendar Event: Summary is %s." +msgstr "Sá»± kiện lịch: tóm tắt là %s." + +#: ../a11y/calendar/ea-cal-view-event.c:249 +msgid "Calendar Event: It has no summary." +msgstr "Sá»± kiện lịch: chÆ°a có tóm tắt." + +#: ../a11y/calendar/ea-cal-view-event.c:268 +msgid "calendar view event" +msgstr "sá»± kiện xem lịch" + +#: ../a11y/calendar/ea-cal-view-event.c:485 +msgid "Grab Focus" +msgstr "Äược chú ý" + +#: ../a11y/calendar/ea-cal-view.c:306 +msgid "New Appointment" +msgstr "Cuá»™c hẹn má»›i" + +#: ../a11y/calendar/ea-cal-view.c:307 +msgid "New All Day Event" +msgstr "Sá»± kiện nguyên ngày má»›i" + +#: ../a11y/calendar/ea-cal-view.c:308 ../calendar/gui/e-calendar-view.c:1506 +msgid "New Meeting" +msgstr "Cuá»™c há»p má»›i" + +#: ../a11y/calendar/ea-cal-view.c:309 +msgid "Go to Today" +msgstr "Äi tá»›i ngày hôm nay" + +#: ../a11y/calendar/ea-cal-view.c:310 +msgid "Go to Date" +msgstr "Äi tá»›i ngày" + +#: ../a11y/calendar/ea-day-view-main-item.c:299 +#: ../a11y/calendar/ea-week-view-main-item.c:301 +msgid "a table to view and select the current time range" +msgstr "má»™t bảng cho phép xem và chá»n phạm vị thá»i gian hiện có" + +#: ../a11y/calendar/ea-day-view.c:146 ../a11y/calendar/ea-week-view.c:148 +#, c-format +msgid "It has %d event." +msgid_plural "It has %d event." +msgstr[0] "Nó có %d sá»± kiện." + +#: ../a11y/calendar/ea-day-view.c:148 ../a11y/calendar/ea-week-view.c:150 +msgid "It has no events." +msgstr "Nó không có sá»± kiện nào." + +#: ../a11y/calendar/ea-day-view.c:152 +#, c-format +msgid "Work Week View: %s. %s" +msgstr "Khung xem tuần làm việc: %s. %s" + +#: ../a11y/calendar/ea-day-view.c:155 +#, c-format +msgid "Day View: %s. %s" +msgstr "Khung xem ngày: %s. %s" + +#: ../a11y/calendar/ea-day-view.c:186 +msgid "calendar view for a work week" +msgstr "khung xem lịch cho má»™t tuần làm việc" + +#: ../a11y/calendar/ea-day-view.c:188 +msgid "calendar view for one or more days" +msgstr "khung xem lịch cho má»™t hay nhiá»u ngày" + +#: ../calendar/gui/calendar-component.c:660 +msgid "%A %d %b %Y" +msgstr "%A %d %b %Y" + +#: ../calendar/gui/calendar-component.c:663 ../calendar/gui/e-day-view.c:1514 +msgid "%a %d %b" +msgstr "%a %d %b" + +#: ../calendar/gui/calendar-component.c:672 +msgid "%a %d %b %Y" +msgstr "%a %A, ngày %e, %B, năm %Y" + +#: ../calendar/gui/calendar-component.c:699 +msgid "%d %b %Y" +msgstr "%d %b %Y" + +#: ../calendar/gui/calendar-component.c:689 ../calendar/gui/e-day-view.c:1530 +msgid "%d %b" +msgstr "%d %b" + +#: ../calendar/importers/icalendar-importer.c:738 +msgid "Gnome Calendar" +msgstr "Lịch Gnome" + +#: ../a11y/calendar/ea-gnome-calendar.c:290 +msgid "search bar" +msgstr "thanh tìm" + +#: ../a11y/calendar/ea-gnome-calendar.c:291 +msgid "evolution calendar search bar" +msgstr "thanh tìm kiếm lịch Evolution" + +#: ../a11y/calendar/ea-jump-button.c:149 +msgid "Jump button" +msgstr "Nút nhảy" + +#: ../a11y/calendar/ea-jump-button.c:158 +msgid "Click here, you can find more events." +msgstr "Nhấn vào đây để tìm sá»± kiện thêm nữa" + +#: ../a11y/calendar/ea-week-view.c:155 +#, c-format +msgid "Month View: %s. %s" +msgstr "Khung xem tháng: %s. %s" + +#: ../a11y/calendar/ea-week-view.c:159 +#, c-format +msgid "Week View: %s. %s" +msgstr "Khung xem tuần: %s. %s" + +#: ../a11y/calendar/ea-week-view.c:190 +msgid "calendar view for a month" +msgstr "khung xem lịch cho má»™t tháng" + +#: ../a11y/calendar/ea-week-view.c:192 +msgid "calendar view for one or more weeks" +msgstr "khung xem lịch cho má»™t hay nhiá»u tuần" + +#: ../a11y/e-table/gal-a11y-e-cell-popup.c:124 +msgid "popup" +msgstr "bật lên" + +#. action name +#: ../a11y/e-table/gal-a11y-e-cell-popup.c:125 +msgid "popup a child" +msgstr "bật lên má»™t Ä‘iá»u con" + +#: ../a11y/e-table/gal-a11y-e-cell-text.c:612 +msgid "edit" +msgstr "đổi" + +#: ../a11y/e-table/gal-a11y-e-cell-text.c:613 +msgid "begin editing this cell" +msgstr "bắt đầu sá»­a đổi ô này" + +#: ../a11y/e-table/gal-a11y-e-cell-toggle.c:151 +msgid "toggle" +msgstr "bật tắt" + +#. action name +#: ../a11y/e-table/gal-a11y-e-cell-toggle.c:152 +msgid "toggle the cell" +msgstr "bật/tắt ô này" + +#: ../a11y/e-table/gal-a11y-e-cell-tree.c:171 +msgid "expand" +msgstr "mở rá»™ng" + +#: ../a11y/e-table/gal-a11y-e-cell-tree.c:172 +msgid "expands the row in the ETree containing this cell" +msgstr "mở rá»™ng hàng trong ETree chứa ô này" + +#: ../a11y/e-table/gal-a11y-e-cell-tree.c:177 +msgid "collapse" +msgstr "co lại" + +#: ../a11y/e-table/gal-a11y-e-cell-tree.c:178 +msgid "collapses the row in the ETree containing this cell" +msgstr "co lại hàng trong ETree chứa ô này" + +#: ../a11y/e-table/gal-a11y-e-cell.c:107 +msgid "Table Cell" +msgstr "Ô bảng" + +#: ../widgets/table/e-table-click-to-add.c:575 +msgid "click to add" +msgstr "nhấn chuá»™t để thêm" + +#: ../a11y/e-table/gal-a11y-e-table-click-to-add.c:53 +msgid "click" +msgstr "nhắp" + +#: ../a11y/e-table/gal-a11y-e-table-column-header.c:135 +msgid "sort" +msgstr "sắp xếp" + +#: ../a11y/widgets/ea-calendar-item.c:296 +#: ../a11y/widgets/ea-calendar-item.c:302 prefs.c:392 +msgid "%d %B %Y" +msgstr "%d %B %Y" + +#: ../a11y/widgets/ea-calendar-item.c:304 +#, c-format +msgid "Calendar: from %s to %s" +msgstr "Lịch: từ %s đến %s" + +#: ../a11y/widgets/ea-calendar-item.c:339 +msgid "evolution calendar item" +msgstr "mục lịch Evolution" + +#: ../a11y/widgets/ea-combo-button.c:40 +msgid "Combo Button" +msgstr "Nút tổ hợp" + +#: ../a11y/widgets/ea-combo-button.c:50 +msgid "Activate Default" +msgstr "Dùng mặc định" + +#: ../a11y/widgets/ea-combo-button.c:52 ../glade/gbwidgets/gbmenu.c:198 +#: ../widgets/gtk+.xml.in.h:143 +msgid "Popup Menu" +msgstr "Trình Ä‘Æ¡n bật lên" + +#: ../addressbook/addressbook.error.xml.h:1 +msgid "" +"A contact already exists with this address. Would you like to add a new card " +"with the same address anyway?" +msgstr "" +"Má»™t liên lạc vá»›i địa chỉ này đã có. Bạn vẫn có muốn thêm má»™t thẻ má»›i vá»›i " +"cùng địa chỉ không?" + +#: ../addressbook/addressbook.error.xml.h:2 +msgid "Address '{0}' already exists." +msgstr "Äịa chỉ « {0} » đã có." + +#: ../addressbook/addressbook.error.xml.h:3 +msgid "Cannot move contact." +msgstr "Không di chuyển được liên lạc." + +#: ../addressbook/addressbook.error.xml.h:4 +msgid "Category editor not available." +msgstr "Không có bá»™ biên soạn phân loại." + +#: ../addressbook/addressbook.error.xml.h:5 +msgid "" +"Check to make sure your password is spelled correctly and that you are using " +"a supported login method. Remember that many passwords are case sensitive; " +"your caps lock might be on." +msgstr "" +"Hãy kiểm tra xem mật khẩu của bạn được gõ chính xác và bạn sá»­ dụng phÆ°Æ¡ng " +"thức đăng nhập được há»— trợ. LÆ°u ý rằng nhiá»u mật khẩu phân biệt chữ hoa, chữ " +"thÆ°á»ng; và hãy chắc là phím Caps Lock của bạn được tắt." + +#: ../addressbook/addressbook.error.xml.h:6 +msgid "Could not get schema information for LDAP server." +msgstr "Không thể lấy thông tin giản đồ cho máy phục vụ LDAP." + +#: ../addressbook/addressbook.error.xml.h:7 +msgid "Could not remove addressbook." +msgstr "Không thể gỡ bá» sổ địa chỉ." + +#: ../addressbook/addressbook.error.xml.h:8 +msgid "" +"Currently you can access only GroupWise System Address Book from Evolution. " +"Please use some other GroupWise mail client once, to get your GroupWise " +"Frequent Contacts and Groupwise Personal Contacts folders." +msgstr "" +"Hiện thá»i bạn có thể truy cập chỉ Sổ Äịa Chỉ hệ thống Groupwise từ " +"Evolution. Hãy chạy má»™t lần ứng dụng khách thÆ° Groupwise khác, để lấy các " +"thÆ° mục GroupWise Frequent Contacts (liên lạc thÆ°á»ng) và GroupWise Personal " +"Contacts (liên lạc cá nhân)." + +#: ../addressbook/addressbook.error.xml.h:9 +#: ../addressbook/addressbook.error.xml.h:8 +msgid "Delete address book '{0}'?" +msgstr "Xoá bá» sổ địa chỉ « {0} » không?" + +#: ../addressbook/addressbook.error.xml.h:10 +#: ../addressbook/addressbook.error.xml.h:9 +msgid "Error loading addressbook." +msgstr "Gặp lá»—i khi tải sổ địa chỉ." + +#: ../addressbook/addressbook.error.xml.h:11 +#: ../addressbook/addressbook.error.xml.h:10 +msgid "Error saving {0} to {1}: {2}" +msgstr "Gặp lá»—i khi lÆ°u {0} vào {1}: {2}" + +#: ../addressbook/addressbook.error.xml.h:12 +#: ../addressbook/addressbook.error.xml.h:11 +msgid "Failed to authenticate with LDAP server." +msgstr "Lá»—i xác thá»±c vá»›i máy phục vụ LDAP." + +#: ../addressbook/addressbook.error.xml.h:13 +msgid "GroupWise Address book creation:" +msgstr "Tạo Sổ địa chỉ GroupWise:" + +#: ../addressbook/addressbook.error.xml.h:14 +#: ../addressbook/addressbook.error.xml.h:12 +msgid "LDAP server did not respond with valid schema information." +msgstr "Máy phục vụ LDAP không trả lá»i vá»›i thông tin giản đồ hợp lệ." + +#: ../addressbook/addressbook.error.xml.h:15 +#: ../addressbook/addressbook.error.xml.h:13 +msgid "Server Version" +msgstr "Phiên bản máy phục vụ" + +#: ../addressbook/addressbook.error.xml.h:16 +#: ../addressbook/addressbook.error.xml.h:14 +#: ../calendar/calendar.error.xml.h:44 +msgid "Some features may not work properly with your current server" +msgstr "" +"Có lẽ má»™t số tính năng sẽ không hoạt Ä‘á»™ng vá»›i máy phục vụ hiện thá»i của bạn." + +#: ../addressbook/addressbook.error.xml.h:17 +#: ../addressbook/addressbook.error.xml.h:15 +msgid "The Evolution addressbook has quit unexpectedly." +msgstr "Sổ địa chỉ Evolution đã thoát bất ngá»." + +#: ../addressbook/addressbook.error.xml.h:18 +#: ../addressbook/addressbook.error.xml.h:16 +msgid "" +"The image you have selected is large. Do you want to resize and store it?" +msgstr "Có ảnh lá»›n. Bạn có muốn thay đổi kích thÆ°á»›c nó, và cất giữ nó không?" + +#: ../addressbook/addressbook.error.xml.h:19 +#: ../addressbook/addressbook.error.xml.h:17 +msgid "" +"This LDAP server may use an older version of LDAP, which does not support " +"this functionality or it may be misconfigured. Ask your administrator for " +"supported search bases." +msgstr "" +"Máy phục vụ LDAP này có lẽ dùng phiên bản LDAP cÅ©, không há»— trợ tính năng " +"này hoặc bị cấu hình sai. Hãy há»i quản trị hệ thống vá» những cÆ¡ sở tìm kiếm " +"được há»— trợ." + +#: ../addressbook/addressbook.error.xml.h:20 +#: ../addressbook/addressbook.error.xml.h:18 +msgid "This address book will be removed permanently." +msgstr "Sẽ xoá bá» sổ địa chỉ này hoàn toàn." + +#: ../addressbook/addressbook.error.xml.h:21 +#: ../addressbook/addressbook.error.xml.h:19 +msgid "This addressbook could not be opened." +msgstr "Không thể mở sổ địa chỉ này." + +#: ../addressbook/addressbook.error.xml.h:22 +#: ../addressbook/addressbook.error.xml.h:20 +msgid "This addressbook server does not have any suggested search bases." +msgstr "Máy phục vụ sổ địa chỉ này không Ä‘á» nghị cÆ¡ sở tìm kiếm nào." + +#: ../addressbook/addressbook.error.xml.h:23 +#: ../addressbook/addressbook.error.xml.h:21 +msgid "" +"This addressbook server might be unreachable or the server name may be " +"misspelled or your network connection could be down." +msgstr "" +"Không thể tiếp cận máy phục vụ sổ địa chỉ này, hoặc tên máy phục vụ đã gõ " +"sai, hoặc bị ngắt kết nối." + +#: ../addressbook/addressbook.error.xml.h:24 +#: ../addressbook/addressbook.error.xml.h:22 +msgid "This server does not support LDAPv3 schema information." +msgstr "Máy phục vụ này không há»— trợ thông tin giản đồ LDAPv3." + +#: ../addressbook/addressbook.error.xml.h:25 +#: ../addressbook/addressbook.error.xml.h:23 +msgid "Unable to open addressbook" +msgstr "Không thể mở sổ địa chỉ." + +#: ../addressbook/addressbook.error.xml.h:26 +#: ../addressbook/addressbook.error.xml.h:24 +msgid "Unable to perform search." +msgstr "Không thá»±c hiện được tìm kiếm." + +#: ../addressbook/addressbook.error.xml.h:27 +#: ../addressbook/addressbook.error.xml.h:25 +msgid "Unable to save {0}." +msgstr "Không thể lÆ°u {0}." + +#: ../addressbook/addressbook.error.xml.h:28 +#: ../addressbook/addressbook.error.xml.h:26 +msgid "Would you like to save your changes?" +msgstr "Bạn có muốn lÆ°u các thay đổi chứ?" + +#: ../addressbook/addressbook.error.xml.h:29 +#: ../addressbook/addressbook.error.xml.h:27 +msgid "" +"You are attempting to move a contact from one addressbook to another but it " +"cannot be removed from the source. Do you want to save a copy instead?" +msgstr "" +"Bạn Ä‘ang cố di chuyển liên lạc từ sổ địa chỉ này sang sổ địa chỉ khác nhÆ°ng " +"mà không thể gỡ bá» nó khá»i nguồn. NhÆ° thế thì bạn có muốn tạo má»™t bản sao " +"thay vào đó không?" + +#: ../addressbook/addressbook.error.xml.h:30 +#: ../addressbook/addressbook.error.xml.h:28 +#: ../calendar/calendar.error.xml.h:59 +msgid "" +"You are connecting to an unsupported GroupWise server and may encounter " +"problems using Evolution. For best results the server should be upgraded to " +"a supported version" +msgstr "" +"Bạn Ä‘ang kết nối đến má»™t máy phục vụ Groupwise không được há»— trợ thì có lẽ " +"sẽ gặp khó khăn sá»­ dụng trình Evolution. Äể được kết quả tốt nhất, bạn nên " +"nâng cấp máy phục vụ lên má»™t phiên bản được há»— trợ." + +#: ../addressbook/addressbook.error.xml.h:31 +#: ../addressbook/addressbook.error.xml.h:29 +msgid "" +"You have made modifications to this contact. Do you want to save these " +"changes?" +msgstr "Bạn đã chỉnh sá»­a liên lạc này, thì có muốn lÆ°u các thay đổi lại chứ?" + +#: ../addressbook/addressbook.error.xml.h:32 +#: ../addressbook/addressbook.error.xml.h:30 +msgid "" +"Your contacts for {0} will not be available until Evolution is restarted." +msgstr "" +"Các liên lạc của bạn cho {0} không thể sá»­ dụng cho tá»›i khi khởi chạy lại " +"Evolution." + +#: ../addressbook/addressbook.error.xml.h:34 +#: ../addressbook/addressbook.error.xml.h:32 +msgid "_Discard" +msgstr "_Hủy bá»" + +#: ../addressbook/addressbook.error.xml.h:35 +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:76 +#: ../addressbook/addressbook.error.xml.h:33 +msgid "{0}" +msgstr "{0}" + +#: ../addressbook/addressbook.error.xml.h:36 +#: ../addressbook/addressbook.error.xml.h:34 +msgid "{1}" +msgstr "{1}" + +#: ../addressbook/conduit/address-conduit.c:300 +#: ../addressbook/conduit/address-conduit.c:298 +msgid "Default Sync Address:" +msgstr "Äịa chỉ đồng bá»™ mặc định:" + +#: ../addressbook/conduit/address-conduit.c:1184 +msgid "Could not load addressbook" +msgstr "Không thể tải sổ địa chỉ." + +#: ../addressbook/conduit/address-conduit.c:1255 +msgid "Could not read pilot's Address application block" +msgstr "Không thể Ä‘á»c khối ứng dụng Äịa chỉ của pilot" + +#: ../addressbook/gui/component/GNOME_Evolution_Addressbook.server.in.in.h:1 +msgid "Autocompletion" +msgstr "Tá»± Ä‘á»™ng hoàn tất" + +#: ../addressbook/gui/component/GNOME_Evolution_Addressbook.server.in.in.h:2 +msgid "C_ontacts" +msgstr "_Liên lạc" + +#: ../extensions/certificates/certificates.ephy-extension.in.in.h:1 +msgid "Certificates" +msgstr "Chứng nhận" + +#: ../addressbook/gui/component/GNOME_Evolution_Addressbook.server.in.in.h:4 +msgid "Configure autocomplete here" +msgstr "Cấu hình tá»± Ä‘á»™ng hoàn tất ở đây" + +#: ../storage/exchange-hierarchy-foreign.c:251 +msgid "Contacts" +msgstr "Liên lạc" + +#: ../addressbook/gui/component/GNOME_Evolution_Addressbook.server.in.in.h:6 +msgid "Evolution Addressbook" +msgstr "Sổ địa chỉ Evolution" + +#: ../addressbook/gui/component/GNOME_Evolution_Addressbook.server.in.in.h:7 +msgid "Evolution Addressbook address pop-up" +msgstr "Bật lên địa chỉ của Sổ địa chỉ Evolution" + +#: ../addressbook/gui/component/GNOME_Evolution_Addressbook.server.in.in.h:8 +msgid "Evolution Addressbook address viewer" +msgstr "Khung xem địa chỉ của Sổ địa chỉ Evolution" + +#: ../addressbook/gui/component/GNOME_Evolution_Addressbook.server.in.in.h:9 +msgid "Evolution Addressbook card viewer" +msgstr "Khung xem thẻ của Sổ địa chỉ Evolution" + +#: ../addressbook/gui/component/GNOME_Evolution_Addressbook.server.in.in.h:10 +msgid "Evolution Addressbook component" +msgstr "Thành phần Sổ địa chỉ Evolution" + +#: ../addressbook/gui/component/GNOME_Evolution_Addressbook.server.in.in.h:11 +msgid "Evolution S/Mime Certificate Management Control" +msgstr "Äiá»u khiển Quản lý Chứng nhận S/MIME Evolution" + +#: ../addressbook/gui/component/GNOME_Evolution_Addressbook.server.in.in.h:12 +msgid "Evolution folder settings configuration control" +msgstr "Äiá»u khiển cấu hình thiết lập thÆ° mục Evolution" + +#: ../addressbook/gui/component/GNOME_Evolution_Addressbook.server.in.in.h:13 +msgid "Manage your S/MIME certificates here" +msgstr "Quản lý chứng nhận S/MIME của bạn ở đây" + +#: ../Util/Evolution.cs:45 ../Util/Evolution.cs:163 +msgid "On This Computer" +msgstr "Trên máy này" + +#: ../48x48/emblems/emblem-personal.icon.in.h:1 ../data/browser.xml.h:66 +#: ../storage/sunone-folder-tree.c:297 +msgid "Personal" +msgstr "Cá nhân" + +#: ../addressbook/gui/component/addressbook-migrate.c:520 +msgid "On LDAP Servers" +msgstr "Trên máy phục vụ LDAP" + +#: ../addressbook/gui/component/addressbook-component.c:226 +msgid "_Contact" +msgstr "_Liên lạc" + +#: ../addressbook/gui/component/addressbook-component.c:227 +msgid "Create a new contact" +msgstr "Tạo liên lạc má»›i" + +#: ../addressbook/gui/component/addressbook-component.c:234 +msgid "Contact _List" +msgstr "_Danh sách liên lạc" + +#: ../addressbook/gui/component/addressbook-component.c:235 +msgid "Create a new contact list" +msgstr "Tạo danh sách liên lạc má»›i" + +#: ../addressbook/gui/component/addressbook-view.c:763 +msgid "New Address Book" +msgstr "Sổ địa chỉ má»›i" + +#: ../addressbook/gui/component/addressbook-component.c:242 +msgid "Address _Book" +msgstr "_Sổ địa chỉ" + +#: ../addressbook/gui/component/addressbook-component.c:243 +msgid "Create a new address book" +msgstr "Tạo sổ địa chỉ má»›i" + +#: ../addressbook/gui/component/addressbook-component.c:385 +msgid "Failed upgrading Addressbook settings or folders." +msgstr "Lá»—i nâng cấp thiết lập Sổ địa chỉ hoặc thÆ° mục." + +#: ../addressbook/gui/component/addressbook-config.c:329 +msgid "Base" +msgstr "CÆ¡ sở" + +#: ../data/glade/resource-dialog.glade.h:14 ../src/drivel.glade.h:83 +msgid "_Type:" +msgstr "_Kiểu :" + +#: ../addressbook/gui/component/addressbook-config.c:634 +#: ../addressbook/gui/component/addressbook-config.c:607 +msgid "Copy book content locally for offline operation" +msgstr "Sao chép ná»™i dung sổ vá» máy để phục vụ các thao tác ngoại tuyến" + +#: ../mail/importers/pine-importer.c:393 +msgid "Addressbook" +msgstr "Sổ địa chỉ" + +#: ../addressbook/gui/component/addressbook-config.c:986 src/common/text.c:642 +#: ../addressbook/gui/component/addressbook-config.c:907 src/common/text.c:646 +msgid "Server Information" +msgstr "Thông tin máy phục vụ" + +#: ../data/SoftwarePropertiesDialogs.glade.h:20 +msgid "Authentication" +msgstr "Xác thá»±c" + +#: ../glom/mode_find/notebook_find.cc:28 +msgid "Details" +msgstr "Chi tiết" + +#: ../addressbook/gui/component/addressbook-config.c:992 +#: ../addressbook/gui/component/addressbook-config.c:913 +msgid "Searching" +msgstr "Äang tìm" + +#: ../addressbook/gui/component/addressbook-config.c:994 +msgid "Downloading" +msgstr "Äang tải vá»" + +#: ../addressbook/gui/component/ldap-config.glade.h:14 +msgid "Address Book Properties" +msgstr "Thuá»™c tính Sổ địa chỉ" + +#: ../calendar/gui/migration.c:142 ../mail/em-migrate.c:1190 +msgid "Migrating..." +msgstr "Äang nâng cấp..." + +#: ../storage/exchange-migrate.c:129 +#, c-format +msgid "Migrating `%s':" +msgstr "Äang nâng cấp « %s »" + +#: ../addressbook/gui/component/addressbook-migrate.c:653 +#: ../addressbook/gui/component/addressbook-migrate.c:648 +msgid "LDAP Servers" +msgstr "Máy phục vụ LDAP" + +#: ../addressbook/gui/component/addressbook-migrate.c:768 +#: ../addressbook/gui/component/addressbook-migrate.c:763 +msgid "Autocompletion Settings" +msgstr "Thiết lập Tá»± Ä‘á»™ng hoàn tất" + +#: ../addressbook/gui/component/addressbook-migrate.c:1143 +#: ../addressbook/gui/component/addressbook-migrate.c:1134 +msgid "" +"The location and hierarchy of the Evolution contact folders has changed " +"since Evolution 1.x.\n" +"\n" +"Please be patient while Evolution migrates your folders..." +msgstr "" +"Äịa chỉ và cây thÆ° mục liên lạc Evolution đã thay đổi so vá»›i Evolution 1.x.\n" +"\n" +"Hãy kiên nhẫn trong khi Evolution chuyển đổi các thÆ° mục..." + +#: ../addressbook/gui/component/addressbook-migrate.c:1157 +#: ../addressbook/gui/component/addressbook-migrate.c:1148 +msgid "" +"The format of mailing list contacts has changed.\n" +"\n" +"Please be patient while Evolution migrates your folders..." +msgstr "" +"Äịnh dạng của liên lạc há»™p thÆ° chung đã thay đổi.\n" +"\n" +"Hãy kiên nhẫn trong khi Evolution chuyển đổi các thÆ° mục của bạn..." + +#: ../addressbook/gui/component/addressbook-migrate.c:1166 +#: ../addressbook/gui/component/addressbook-migrate.c:1157 +msgid "" +"The way Evolution stores some phone numbers has changed.\n" +"\n" +"Please be patient while Evolution migrates your folders..." +msgstr "" +"Cách Evolution lÆ°u má»™t phần số Ä‘iện thoại đã thay đổi.\n" +"\n" +"Hãy kiên nhẫn trong khi Evolution chuyển đổi các thÆ° mục của bạn..." + +#: ../addressbook/gui/component/addressbook-migrate.c:1176 +#: ../addressbook/gui/component/addressbook-migrate.c:1167 +msgid "" +"Evolution's Palm Sync changelog and map files have changed.\n" +"\n" +"Please be patient while Evolution migrates your Pilot Sync data..." +msgstr "" +"Các tập tin bản ghi thay đổi và bản đồ Ä‘á»u của Evolution Palm Sync (trình " +"đồng bá»™ hóa máy tính cầm tay chạy hệ thống Palm) đã thay đổi.\n" +"\n" +"Hãy kiên nhẫn trong khi Evolution chuyển đổi dữ liệu Pilot Sync..." + +#: ../addressbook/gui/component/addressbook-view.c:769 +msgid "_New Address Book" +msgstr "Sổ địa chỉ _má»›i" + +#: ../addressbook/gui/component/addressbook-view.c:1197 +#: ../addressbook/gui/component/addressbook-view.c:1144 +msgid "Contact Source Selector" +msgstr "Bá»™ chá»n nguồn liên lạc" + +#: ../addressbook/gui/component/addressbook.c:99 +#: ../libedataserverui/e-book-auth-util.c:89 +msgid "Accessing LDAP Server anonymously" +msgstr "Truy cập vô danh tá»›i máy phục vụ LDAP" + +#: ../libedataserverui/e-book-auth-util.c:185 +msgid "Failed to authenticate.\n" +msgstr "Lá»—i xác thá»±c.\n" + +#: ../calendar/libecal/e-cal.c:1650 ../libedataserverui/e-book-auth-util.c:192 +#, c-format +msgid "Enter password for %s (user %s)" +msgstr "Hãy gõ mật khẩu cho %s (ngÆ°á»i dùng %s)" + +#: ../addressbook/gui/component/apps_evolution_addressbook.schemas.in.in.h:1 +msgid "Autocomplete length" +msgstr "Äá»™ dài tá»± Ä‘á»™ng hoàn tất" + +#: ../addressbook/gui/component/apps_evolution_addressbook.schemas.in.in.h:2 +msgid "EFolderList XML for the list of completion URIs" +msgstr "" +"XML EFolderList (danh sách thÆ° mục Ä‘iện) cho danh sách các địa chỉ Mạng cần " +"gõ xong" + +#: ../addressbook/gui/component/apps_evolution_addressbook.schemas.in.in.h:3 +msgid "EFolderList XML for the list of completion URIs." +msgstr "" +"XML EFolderList (danh sách thÆ° mục Ä‘iện) cho danh sách các địa chỉ Mạng cần " +"gõ xong." + +#: ../addressbook/gui/component/apps_evolution_addressbook.schemas.in.in.h:4 +msgid "" +"Position of the vertical pane, between the card and list views and the " +"preview pane, in pixels." +msgstr "" +"Vị trí của ô cá»­a sổ dá»c giữa khung xem thẻ và khung xem danh sách và ô cá»­a " +"sổ xem trÆ°á»›c, theo Ä‘iểm ảnh." + +#: ../addressbook/gui/component/apps_evolution_addressbook.schemas.in.in.h:6 +msgid "" +"The number of characters that must be typed before Evolution will attempt to " +"autocomplete." +msgstr "Số ký tá»± cần gõ trÆ°á»›c khi trình Evolution sẽ cố tá»± Ä‘á»™ng hoàn tất." + +#: ../addressbook/gui/component/apps_evolution_addressbook.schemas.in.in.h:7 +msgid "URI for the folder last used in the select names dialog" +msgstr "Äịa chỉ Mạng cho thÆ° mục đã dùng cuối cùng trong há»™p thoại chá»n tên." + +#: ../addressbook/gui/component/apps_evolution_addressbook.schemas.in.in.h:8 +msgid "URI for the folder last used in the select names dialog." +msgstr "Äịa chỉ Mạng cho thÆ° mục đã dùng cuối cùng trong há»™p thoại chá»n tên." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:55 +msgid "Vertical pane position" +msgstr "Vị trí ô cá»­a sổ dá»c" + +#: ../addressbook/gui/component/apps_evolution_addressbook.schemas.in.in.h:10 +msgid "Whether to show the preview pane." +msgstr "Có nên hiển thị ô cá»­a sổ xem trÆ°á»›c hay không." + +#: ../gnopi/cmdmapui.c:182 +msgid "1" +msgstr "1" + +#: ../addressbook/gui/component/ldap-config.glade.h:2 +#: ../addressbook/gui/component/ldap-config.glade.h:3 +msgid "3268" +msgstr "3268" + +#: ../addressbook/gui/component/ldap-config.glade.h:3 +#: ../addressbook/gui/component/ldap-config.glade.h:4 +msgid "389" +msgstr "389" + +#: ../gnopi/cmdmapui.c:186 +msgid "5" +msgstr "5" + +#: ../addressbook/gui/component/ldap-config.glade.h:5 +#: ../addressbook/gui/component/ldap-config.glade.h:6 +msgid "636" +msgstr "636" + +#: ../addressbook/gui/component/ldap-config.glade.h:7 +msgid "Authentication" +msgstr "Xác thá»±c" + +#: ../addressbook/gui/component/ldap-config.glade.h:7 +msgid "Display" +msgstr "Hiển thị" + +#: ../addressbook/gui/component/ldap-config.glade.h:8 +#: ../addressbook/gui/component/ldap-config.glade.h:9 +msgid "Downloading" +msgstr "Tải vá»" + +#: ../addressbook/gui/component/ldap-config.glade.h:9 +#: ../addressbook/gui/component/ldap-config.glade.h:10 +msgid "Searching" +msgstr "Tìm kiếm" + +#: ../addressbook/gui/component/ldap-config.glade.h:10 +#: ../addressbook/gui/component/ldap-config.glade.h:11 +msgid "Server Information" +msgstr "Thông tin máy phục vụ" + +#: ../addressbook/gui/component/ldap-config.glade.h:11 +msgid "Type:" +msgstr "Loại:" + +#: ../addressbook/gui/component/ldap-config.glade.h:15 +#: ../addressbook/gui/component/ldap-config.glade.h:16 +msgid "Anonymously" +msgstr "Vô danh" + +#: ../data/glade/song-info.glade.h:3 ../src/gnome-schedule.glade.h:16 +msgid "Basic" +msgstr "CÆ¡ bản" + +#: ../addressbook/gui/component/ldap-config.glade.h:18 +#: ../addressbook/gui/component/ldap-config.glade.h:19 +msgid "Distinguished name" +msgstr "Tên phân biệt" + +#: ../addressbook/gui/component/ldap-config.glade.h:19 +#: ../addressbook/gui/component/ldap-config.glade.h:20 +msgid "Email address" +msgstr "Äịa chỉ thÆ°" + +#: ../addressbook/gui/component/ldap-config.glade.h:20 +#: ../addressbook/gui/component/ldap-config.glade.h:21 +msgid "" +"Evolution will use this email address to authenticate you with the server." +msgstr "" +"Evolution sẽ dùng địa chỉ thÆ° Ä‘iện tá»­ này để xác thá»±c bạn vá»›i máy phục vụ." + +#: ../addressbook/gui/component/ldap-config.glade.h:21 +#: ../addressbook/gui/component/ldap-config.glade.h:22 +msgid "Find Possible Search Bases" +msgstr "Tìm má»i cÆ¡ sở tìm có thể" + +#: ../addressbook/gui/component/ldap-config.glade.h:23 +#: ../addressbook/gui/component/ldap-config.glade.h:24 +msgid "Lo_gin:" +msgstr "Äăng _nhập:" + +#: ../addressbook/gui/component/ldap-config.glade.h:25 +#: ../addressbook/gui/component/ldap-config.glade.h:26 +msgid "One" +msgstr "Má»™t" + +#: ../addressbook/gui/component/ldap-config.glade.h:26 +msgid "Search Filter" +msgstr "Bá»™ lá»c tìm kiếm" + +#: ../addressbook/gui/component/ldap-config.glade.h:27 +msgid "Search _base:" +msgstr "_CÆ¡ sở tìm:" + +#: ../addressbook/gui/component/ldap-config.glade.h:28 +msgid "Search _filter:" +msgstr "Bá»™ _lá»c tìm kiếm" + +#: ../addressbook/gui/component/ldap-config.glade.h:29 +msgid "Search filter" +msgstr "Bá»™ lá»c tìm kiếm" + +#: ../addressbook/gui/component/ldap-config.glade.h:30 +msgid "" +"Search filter is the type of the objects searched for, while performing the " +"search. If this is not modified, by default search will be performed on " +"objectclass of the type \"person\"." +msgstr "" +"Bá»™ lá»c tìm kiếm là kiểu đối tượng cần tìm kiếm. Nếu nó chÆ°a được sá»­a đổi, " +"mặc định là hạng đối tượng kiểu « person » (ngÆ°á»i) sẽ được tìm kiếm." + +#: ../addressbook/gui/component/ldap-config.glade.h:31 +#: ../addressbook/gui/component/ldap-config.glade.h:28 +msgid "" +"Selecting this option means that Evolution will only connect to your LDAP " +"server if your LDAP server supports SSL or TLS." +msgstr "" +"Chá»n tùy chá»n này nghÄ©a là Evolution sẽ kết nối tá»›i máy phục vụ LDAP của bạn " +"chỉ nếu máy phục vụ LDAP đó há»— trợ SSL hoặc TLS." + +#: ../addressbook/gui/component/ldap-config.glade.h:32 +#: ../addressbook/gui/component/ldap-config.glade.h:29 +msgid "" +"Selecting this option means that Evolution will only try to use SSL/TLS if " +"you are in a insecure environment. For example, if you and your LDAP server " +"are behind a firewall at work, then Evolution doesn't need to use SSL/TLS " +"because your connection is already secure." +msgstr "" +"Chá»n tùy chá»n này nghÄ©a là Evolution sẽ cố dùng SSL/TLS chỉ nếu bạn trong " +"môi trÆ°á»ng bất an. Ví dụ, nếu bạn và máy phục vụ LDAP của bạn nằm sau tÆ°á»ng " +"lá»­a tại chá»— làm, thì Evolution sẽ không cần dùng SSL/TLS vì kết nối đã đủ an " +"toàn." + +#: ../addressbook/gui/component/ldap-config.glade.h:33 +#: ../addressbook/gui/component/ldap-config.glade.h:30 +msgid "" +"Selecting this option means that your server does not support either SSL or " +"TLS. This means that your connection will be insecure, and that you will be " +"vulnerable to security exploits. " +msgstr "" +"Chá»n tùy chá»n này nghÄ©a là máy phục vụ của bạn không há»— trợ cả SSL lẫn TLS. " +"Äiá»u này nghÄ©a là kết nối của bạn không an toàn, có thể bị lá»— hổng bảo mật." + +#: ../addressbook/gui/component/ldap-config.glade.h:34 +#: ../addressbook/gui/component/ldap-config.glade.h:31 +msgid "Sub" +msgstr "Con" + +#: ../addressbook/gui/component/ldap-config.glade.h:35 +#: ../addressbook/gui/component/ldap-config.glade.h:32 +msgid "Supported Search Bases" +msgstr "CÆ¡ sở tìm được há»— trợ" + +#: ../addressbook/gui/component/ldap-config.glade.h:36 +#: ../addressbook/gui/component/ldap-config.glade.h:33 +msgid "" +"The search base is the distinguished name (DN) of the entry where your " +"searches will begin. If you leave this blank, the search will begin at the " +"root of the directory tree." +msgstr "" +"CÆ¡ sở tìm là tên phân biệt (TP) của mục, chá»— bắt đầu tìm kiếm. Nếu bạn bá» " +"trống chá»— này, tìm kiếm sẽ được bắt đầu từ gốc cây thÆ° mục." + +#: ../addressbook/gui/component/ldap-config.glade.h:37 +#: ../addressbook/gui/component/ldap-config.glade.h:34 +msgid "" +"The search scope defines how deep you would like the search to extend down " +"the directory tree. A search scope of \"sub\" will include all entries below " +"your search base. A search scope of \"one\" will only include the entries " +"one level beneath your base." +msgstr "" +"Phạm vi tìm kiếm cho biết Ä‘á»™ sâu tìm kiếm Ä‘i xuống trong cây thÆ° mục. Phạm " +"vi tìm kiếm « con » sẽ bao gồm má»i mục dÆ°á»›i cÆ¡ sở tìm. Phạm vi tìm kiếm « " +"má»™t » sẽ chỉ tìm những mục nằm má»™t mức Ä‘á»™ dÆ°á»›i trong cÆ¡ sở tìm thôi." + +#: ../addressbook/gui/component/ldap-config.glade.h:38 +#: ../addressbook/gui/component/ldap-config.glade.h:35 +msgid "" +"This is the full name of your ldap server. For example, \"ldap.mycompany.com" +"\"." +msgstr "" +"Äây là tên đầy đủ của máy phục vụ LDAP. Ví dụ :\n" +"ldap.côngtytôi.com.vn" + +#: ../addressbook/gui/component/ldap-config.glade.h:39 +#: ../addressbook/gui/component/ldap-config.glade.h:36 +msgid "" +"This is the maximum number of entries to download. Setting this number to be " +"too large will slow down your address book." +msgstr "Äây là số mục tải vá» tối Ä‘a. Dùng số quá lá»›n sẽ làm chậm sổ địa chỉ." + +#: ../addressbook/gui/component/ldap-config.glade.h:40 +#: ../addressbook/gui/component/ldap-config.glade.h:37 +msgid "" +"This is the method Evolution will use to authenticate you. Note that " +"setting this to \"Email Address\" requires anonymous access to your ldap " +"server." +msgstr "" +"Äây là cách Evolution dùng để xác thá»±c bạn. Chú ý rằng đặt cái này là\n" +"« Äịa chỉ thÆ° » yêu cầu truy cập vô danh tá»›i máy phục vụ LDAP." + +#: ../addressbook/gui/component/ldap-config.glade.h:41 +#: ../addressbook/gui/component/ldap-config.glade.h:38 +msgid "" +"This is the name for this server that will appear in your Evolution folder " +"list. It is for display purposes only. " +msgstr "" +"Äây là tên máy phục vụ xuất hiện trong danh sách thÆ° mục Evolution. Chỉ được " +"dùng vá»›i mục đích hiển thị thôi." + +#: ../addressbook/gui/component/ldap-config.glade.h:42 +#: ../addressbook/gui/component/ldap-config.glade.h:39 +msgid "" +"This is the port on the LDAP server that Evolution will try to connect to. A " +"list of standard ports has been provided. Ask your system administrator what " +"port you should specify." +msgstr "" +"Äây là số hiệu cổng của máy phục vụ LDAP mà Evolution sẽ cố kết nối đến nó. " +"Má»™t danh sách các cổng chuẩn đã được cung cấp. Hãy há»i quản trị hệ thống của " +"bạn để biết dùng cổng nào." + +#: ../addressbook/gui/component/ldap-config.glade.h:43 +#: ../addressbook/gui/component/ldap-config.glade.h:40 +msgid "Using distinguished name (DN)" +msgstr "Dùng tên phân biệt (TP)" + +#: ../addressbook/gui/component/ldap-config.glade.h:44 +#: ../addressbook/gui/component/ldap-config.glade.h:41 +msgid "Using email address" +msgstr "Dùng địa chỉ thÆ°" + +#: ../mail/em-account-editor.c:301 +msgid "Whenever Possible" +msgstr "Bất cứ khi nào có thể" + +#: ../addressbook/gui/component/ldap-config.glade.h:46 +#: ../addressbook/gui/component/ldap-config.glade.h:43 +msgid "_Add Address Book" +msgstr "_Thêm Sổ địa chỉ" + +#: ../addressbook/gui/component/ldap-config.glade.h:47 +#: ../addressbook/gui/component/ldap-config.glade.h:44 +msgid "_Download limit:" +msgstr "_Ngưỡng tải vá»:" + +#: ../addressbook/gui/component/ldap-config.glade.h:48 +#: ../addressbook/gui/component/ldap-config.glade.h:45 +msgid "_Find Possible Search Bases" +msgstr "_Tìm má»i cÆ¡ sở tìm có thể" + +#: ../addressbook/gui/component/ldap-config.glade.h:49 +#: ../addressbook/gui/component/ldap-config.glade.h:46 +msgid "_Login method:" +msgstr "Cách đăng _nhập:" + +#: ../src/baobab-remote-connect-dialog.c:500 ../ui/muds.glade.h:53 +msgid "_Port:" +msgstr "_Cổng:" + +#: ../addressbook/gui/component/ldap-config.glade.h:52 +#: ../addressbook/gui/component/ldap-config.glade.h:49 +msgid "_Search scope:" +msgstr "_Phạm vi tìm:" + +#: ../capplets/mouse/gnome-mouse-properties.glade.h:26 +msgid "_Timeout:" +msgstr "_Thá»i hạn:" + +#: ../addressbook/gui/component/ldap-config.glade.h:55 +#: ../addressbook/gui/component/ldap-config.glade.h:52 +msgid "_Use secure connection:" +msgstr "Dùng kết nối _an toàn:" + +#: ../addressbook/gui/component/ldap-config.glade.h:56 +#: ../addressbook/gui/component/ldap-config.glade.h:53 +msgid "cards" +msgstr "thẻ" + +#: ../glade/search.glade.h:1 ../storage/sunone-permissions-dialog.glade.h:1 +#: ../storage/sunone-subscription-dialog.glade.h:1 po/silky-channel.glade.h:1 +msgid "*" +msgstr "*" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:2 +msgid "Email" +msgstr "Äịa chỉ thÆ°" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:3 +#: ../capplets/about-me/gnome-about-me.glade.h:4 +msgid "Home" +msgstr "Ở nhà" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:4 +#: ../capplets/about-me/gnome-about-me.glade.h:5 +msgid "Instant Messaging" +msgstr "Tin nhắn tức khắc" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:5 +msgid "Job" +msgstr "Tác vụ" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:6 +msgid "Miscellaneous" +msgstr "Linh tinh" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:7 +msgid "Other" +msgstr "Khác" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:8 +msgid "Telephone" +msgstr "Äiện thoại" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:9 +msgid "Web Addresses" +msgstr "Äịa chỉ Mạng" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:10 +#: ../capplets/about-me/gnome-about-me.glade.h:10 +msgid "Work" +msgstr "Chá»— làm" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:184 +msgid "AIM" +msgstr "AIM" + +#: ../addressbook/gui/widgets/e-minicard.c:182 ../main.c:586 ../main.c:1369 +#: ../main.c:1435 ../mimedir/mimedir-vcomponent.c:386 +msgid "Contact" +msgstr "Liên lạc" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:2349 +msgid "Contact Editor" +msgstr "Bá»™ hiệu chỉnh liên lạc" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:14 +msgid "Full _Name..." +msgstr "_Há» tên..." + +#: ../src/Database.cs:804 ../src/Database.cs:823 ../src/Database.cs:841 +#: ../app/interface.c:122 ../app/interface.c:123 +#: ../glade/gbwidgets/gbimage.c:648 ../glom/data_structure/field.cc:558 +#: ../src/glade-gtk.c:2359 ../widgets/gtk+.xml.in.h:100 +#: ../src/orca/rolenames.py:278 +msgid "Image" +msgstr "Ảnh" + +#: ../addressbook/gui/contact-editor/e-contact-editor-im.c:66 +msgid "MSN Messenger" +msgstr "Tin nhắn MSN" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:17 +msgid "Mailing Address" +msgstr "Äịa chỉ thÆ° tín" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:18 +msgid "Ni_ckname:" +msgstr "Tên _hiệu :" + +#: ../addressbook/gui/contact-editor/e-contact-editor-im.c:63 +msgid "Novell Groupwise" +msgstr "Phần má»m nhóm Novell" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:21 +#: ../src/prefs.c:771 +msgid "Personal Information" +msgstr "Thông tin cá nhân" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:22 +#: ../sheets/network.sheet.in.h:33 Expense/expense.c:585 +#: Expense/expense.c:1434 +msgid "Telephone" +msgstr "Äiện thoại" + +#: ../src/planner-task-view.c:327 ../gncal/todo-categories.c:182 +#: ../ui/user_info.glade.h:67 ../mimedir/mimedir-vcard-address.c:252 +#: ../mimedir/mimedir-vcard-email.c:169 ../mimedir/mimedir-vcard-phone.c:166 +msgid "Work" +msgstr "Chá»— làm" + +#: ../gnomecard/card-editor.glade.h:42 ../pan/dialogs/dialog-newuser.c:389 +#: ../pan/dialogs/dialog-newuser.c:421 ../pan/server-ui.c:333 +msgid "_Address:" +msgstr "_Äịa chỉ:" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:25 +msgid "_Anniversary:" +msgstr "_Ká»· niệm:" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:26 +msgid "_Assistant:" +msgstr "_Phụ tá:" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:27 +#: ../ui/user_info.glade.h:71 +msgid "_Birthday:" +msgstr "Ngày _sinh:" + +#: ../data/glade/project-properties.glade.h:7 +msgid "_Calendar:" +msgstr "_Lịch:" + +#: ../ui/evolution-event-editor.xml.h:27 ../ui/evolution-task-editor.xml.h:18 +#: ../glade/straw.glade.h:69 +msgid "_Categories" +msgstr "_Phân loại" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:30 +#: ../ui/user_info.glade.h:72 +msgid "_City:" +msgstr "_Phố :" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:31 +#: ../ui/user_info.glade.h:73 +msgid "_Company:" +msgstr "_Công ty:" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:32 +msgid "_Country:" +msgstr "_Quốc gia:" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:33 +#: ../ui/user_info.glade.h:74 +msgid "_Department:" +msgstr "_CÆ¡ quan:" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:34 +msgid "_File under:" +msgstr "_Tập tin trong:" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:35 +msgid "_Free/Busy:" +msgstr "_Rảnh/Bận:" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:36 +#: ../ui/user_info.glade.h:79 +msgid "_Home Page:" +msgstr "Trang _chủ :" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:37 +#: ../data/glade/project-properties.glade.h:8 +msgid "_Manager:" +msgstr "Nhà _quản lý:" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:38 +msgid "_Notes:" +msgstr "_Ghi chú :" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:39 +msgid "_Office:" +msgstr "_Văn phòng:" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:40 +#: ../addressbook/gui/contact-editor/fulladdr.glade.h:7 +msgid "_PO Box:" +msgstr "Há»™p _bÆ°u Ä‘iện:" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:41 +#: ../capplets/about-me/gnome-about-me.glade.h:48 +msgid "_Profession:" +msgstr "_Nghá» nghiệp:" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:42 +msgid "_Spouse:" +msgstr "_Vợ chồng:" + +#: ../capplets/about-me/gnome-about-me.glade.h:50 +msgid "_State/Province:" +msgstr "_Tỉnh/Bang:" + +#: ../plug-ins/imagemap/imap_settings.c:102 ../glade/straw.glade.h:94 +#: ../src/dialog-win.cc:62 ../src/form-editor/form-prop.cc:55 +msgid "_Title:" +msgstr "_Tá»±a:" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:45 +msgid "_Video Chat:" +msgstr "Trò chuyện ảnh _Ä‘á»™ng:" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:46 +msgid "_Wants to receive HTML mail" +msgstr "_Muốn nhận thÆ° loại HTML" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:47 +msgid "_Web Log:" +msgstr "_Nhật ký Mạng:" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:48 +#: ../addressbook/gui/contact-list-editor/contact-list-editor.glade.h:13 +msgid "_Where:" +msgstr "_NÆ¡i:" + +#: ../addressbook/gui/contact-editor/contact-editor.glade.h:49 +msgid "_Zip/Postal Code:" +msgstr "Mã _bÆ°u Ä‘iện:" + +#: ../gnome-netinfo/lookup.c:308 ../libgnetwork/gnetwork-tcp-connection.c:1368 +#: address_gui.c:2783 prefs_gui.c:370 ../mimedir/mimedir-vcard-email.c:142 +#: ../mimedir/mimedir-vcard.c:368 +msgid "Address" +msgstr "Äịa chỉ" + +#: ../widgets/text/e-text.c:3585 +msgid "Editable" +msgstr "Có thể sá»­a" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:136 +#: Expense/expense.c:132 +msgid "United States" +msgstr "Mỹ" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:138 +#: ../src/util.c:27 +msgid "Afghanistan" +msgstr "A Phú Hãn" + +#: ../boards/geography/board3_1.xml.in.h:1 src/common/util.c:827 +#: ../src/util.c:28 +msgid "Albania" +msgstr "An-ba-ni" + +#: ../boards/geography/board4_2.xml.in.h:3 src/common/util.c:885 +#: ../src/util.c:29 +msgid "Algeria" +msgstr "An-giê-ri" + +#: src/common/util.c:834 ../src/util.c:30 +msgid "American Samoa" +msgstr "Xa-mô-a Mỹ" + +#: src/common/util.c:822 ../src/util.c:31 +msgid "Andorra" +msgstr "An-Ä‘oa-ra" + +#: ../src/util.c:32 +msgid "Angola" +msgstr "An-gô-la" + +#: src/common/util.c:826 ../src/util.c:33 +msgid "Anguilla" +msgstr "Ä‚ng-ouí-la" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:145 +msgid "Antarctica" +msgstr "Nam-cá»±c" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:145 +msgid "Antigua And Barbuda" +msgstr "An-ti-gu-a và Ba-bu-Ä‘a" + +#: ../src/util.c:35 +msgid "Argentina" +msgstr "Ä‚-gienh-ti-nạ" + +#: src/common/util.c:828 ../src/util.c:36 +msgid "Armenia" +msgstr "Ac-mê-ni" + +#: src/common/util.c:838 ../src/util.c:37 +msgid "Aruba" +msgstr "Ä‚-ru-ba" + +#: src/common/util.c:837 ../src/util.c:39 Expense/expense.c:99 +msgid "Australia" +msgstr "Úc" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:151 +#: ../src/util.c:41 Expense/expense.c:100 +msgid "Austria" +msgstr "Ão" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:152 +#: ../src/util.c:42 +msgid "Azerbaijan" +msgstr "A-dợ-bai-sanh" + +#: ../boards/geography/board2_0.xml.in.h:2 src/common/util.c:854 +#: ../src/util.c:43 +msgid "Bahamas" +msgstr "Ba-ha-ma" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:154 +#: ../src/util.c:44 +msgid "Bahrain" +msgstr "Bah-reinh" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:155 +#: ../src/util.c:45 +msgid "Bangladesh" +msgstr "Bang-la-Ä‘e-xợ" + +#: src/common/util.c:841 ../src/util.c:46 +msgid "Barbados" +msgstr "Bác-ba-đốt" + +#: ../src/util.c:48 +msgid "Belarus" +msgstr "Be-la-ru-xợ" + +#: ../boards/geography/board3_0.xml.in.h:2 src/common/util.c:843 +#: ../src/util.c:49 Expense/expense.c:101 +msgid "Belgium" +msgstr "Bỉ" + +#: src/common/util.c:859 ../src/util.c:50 +msgid "Belize" +msgstr "Bê-li-xê" + +#: ../boards/geography/board4_2.xml.in.h:5 src/common/util.c:849 +#: ../src/util.c:51 +msgid "Benin" +msgstr "Bê-ninh" + +#: src/common/util.c:850 ../src/util.c:52 +msgid "Bermuda" +msgstr "Be-mÆ°-Ä‘a" + +#: src/common/util.c:855 ../src/util.c:53 +msgid "Bhutan" +msgstr "Bu-thăn" + +#: ../boards/geography/board2_1.xml.in.h:2 src/common/util.c:852 +#: ../src/util.c:54 +msgid "Bolivia" +msgstr "Bô-li-vi-a" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:164 +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:163 +msgid "Bosnia And Herzegowina" +msgstr "Boxợ-ni-a và He-de-go-vi-nạ" + +#: ../boards/geography/board4_2.xml.in.h:6 src/common/util.c:857 +#: ../src/util.c:56 +msgid "Botswana" +msgstr "Bốt-xoa-na" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:166 +msgid "Bouvet Island" +msgstr "Äảo Bu-vê" + +#: ../boards/geography/board2_1.xml.in.h:3 src/common/util.c:853 +#: ../src/util.c:57 Expense/expense.c:102 +msgid "Brazil" +msgstr "Bra-xin" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:168 +msgid "British Indian Ocean Territory" +msgstr "Miá»n Äại dÆ°Æ¡ng Ấn-Ä‘á»™ quốc Anh" + +#: src/common/util.c:851 +msgid "Brunei Darussalam" +msgstr "Bợru-này Äa-ru-xa-làm" + +#: ../src/util.c:60 +msgid "Bulgaria" +msgstr "Bảo-gai-lÆ¡i" + +#: ../boards/geography/board4_2.xml.in.h:7 src/common/util.c:844 +#: ../src/util.c:61 +msgid "Burkina Faso" +msgstr "Buốc-khi-na Pha-xô" + +#: ../boards/geography/board4_2.xml.in.h:8 src/common/util.c:847 +#: ../src/util.c:62 +msgid "Burundi" +msgstr "Bu-run-Ä‘i" + +#: ../src/util.c:63 +msgid "Cambodia" +msgstr "Căm Bốt" + +#: ../src/util.c:64 +msgid "Cameroon" +msgstr "Ca-mÆ¡-run" + +#: ../boards/geography/board2_0.xml.in.h:3 src/common/util.c:860 +#: ../src/util.c:65 Expense/expense.c:103 +msgid "Canada" +msgstr "Ca-na-Ä‘a" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:176 +msgid "Cape Verde" +msgstr "Cáp-ve-đẹ" + +#: src/common/util.c:950 ../src/util.c:67 +msgid "Cayman Islands" +msgstr "Quần đảo Cay-mạn" + +#: ../src/util.c:68 +msgid "Central African Republic" +msgstr "Cá»™ng hòa Trung Phi" + +#: ../src/util.c:69 +msgid "Chad" +msgstr "Chê-Ä‘h" + +#: ../src/util.c:70 +msgid "Chile" +msgstr "Chi-lê" + +#: ../src/util.c:71 +msgid "China" +msgstr "Trung Quốc" + +#: src/common/util.c:877 ../src/util.c:72 +msgid "Christmas Island" +msgstr "Äảo Kh-ri-x-mạ-x" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:182 +msgid "Cocos (Keeling) Islands" +msgstr "Quần đảo Co-co-x (Khi-lịng)" + +#: ../src/util.c:74 +msgid "Colombia" +msgstr "Cô-lôm-bi-a" + +#: ../src/util.c:75 +msgid "Comoros" +msgstr "Co-mo-ro-xợ" + +#: src/common/util.c:864 ../src/util.c:76 +msgid "Congo" +msgstr "Công-gô" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:186 +msgid "Congo, The Democratic Republic Of The" +msgstr "Cá»™ng hoà Dân chủ Công-gô" + +#: src/common/util.c:867 ../src/util.c:77 +msgid "Cook Islands" +msgstr "Quần đảo Khu-kh" + +#: src/common/util.c:873 ../src/util.c:78 +msgid "Costa Rica" +msgstr "Cốt-x-tha Ri-ca" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:190 +msgid "Cote d'Ivoire" +msgstr "Cót Ä‘i vouă" + +#: ../src/util.c:79 +msgid "Croatia" +msgstr "Cợ-rô-a-ti-a" + +#: ../boards/geography/board2_0.xml.in.h:4 src/common/util.c:875 +#: ../src/util.c:80 +msgid "Cuba" +msgstr "Cu-ba" + +#: ../src/util.c:81 +msgid "Cyprus" +msgstr "Síp" + +#: src/common/util.c:879 ../src/util.c:82 +msgid "Czech Republic" +msgstr "Cá»™ng hòa Séc" + +#: ../src/util.c:84 Expense/expense.c:104 +msgid "Denmark" +msgstr "Äan-mạch" + +#: ../src/util.c:86 +msgid "Djibouti" +msgstr "Gi-bu-ti" + +#: src/common/util.c:883 ../src/util.c:87 +msgid "Dominica" +msgstr "Äô-mi-ni-cạ" + +#: src/common/util.c:884 ../src/util.c:88 +msgid "Dominican Republic" +msgstr "Cá»™ng hòa Äô-mi-ni-cạ" + +#: ../boards/geography/board2_1.xml.in.h:6 src/common/util.c:886 +#: ../src/util.c:89 +msgid "Ecuador" +msgstr "Ê-cu-a-Ä‘oa" + +#: ../src/util.c:90 +msgid "Egypt" +msgstr "Ai-cập" + +#: src/common/util.c:1039 ../src/util.c:91 +msgid "El Salvador" +msgstr "En-san-va-Ä‘oa" + +#: ../src/util.c:92 +msgid "Equatorial Guinea" +msgstr "Ghi-nê Xích-đạo" + +#: ../boards/geography/board4_2.xml.in.h:16 src/common/util.c:891 +#: ../src/util.c:93 +msgid "Eritrea" +msgstr "Ê-ri-tÆ¡-rê-a" + +#: ../boards/geography/board3_1.xml.in.h:9 src/common/util.c:888 +#: ../src/util.c:94 +msgid "Estonia" +msgstr "E-xtô-ni-a" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:205 +#: ../src/util.c:95 +msgid "Ethiopia" +msgstr "Ê-ti-ô-pi-a" + +#: ../src/util.c:98 +msgid "Falkland Islands" +msgstr "Quần đảo Phoa-kh-lận" + +#: src/common/util.c:898 +msgid "Faroe Islands" +msgstr "Quần đảo Pha-rô" + +#: src/common/util.c:895 +msgid "Fiji" +msgstr "Phi-gi" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:209 +#: ../src/util.c:100 Expense/expense.c:106 +msgid "Finland" +msgstr "Phần-lan" + +#: ../boards/geography/board3_0.xml.in.h:5 src/common/util.c:899 +#: ../src/util.c:101 Expense/expense.c:107 +msgid "France" +msgstr "Pháp" + +#: src/common/util.c:905 ../src/util.c:103 +msgid "French Guiana" +msgstr "Ghi-a-na Pháp" + +#: src/common/util.c:1004 ../src/util.c:104 +msgid "French Polynesia" +msgstr "Pô-li-nê-di Pháp" + +#: src/common/util.c:1044 +msgid "French Southern Territories" +msgstr "Miá»n Nam Pháp" + +#: ../boards/geography/board4_2.xml.in.h:17 src/common/util.c:901 +#: ../src/util.c:105 +msgid "Gabon" +msgstr "Ga-bông" + +#: ../boards/geography/board4_2.xml.in.h:18 src/common/util.c:910 +#: ../src/util.c:106 +msgid "Gambia" +msgstr "Găm-bi-a" + +#: src/common/util.c:904 ../src/util.c:107 +msgid "Georgia" +msgstr "Gi-oa-gi-a" + +#: ../boards/geography/board3_0.xml.in.h:6 src/common/util.c:880 +#: ../src/util.c:108 Expense/expense.c:108 +msgid "Germany" +msgstr "Äức" + +#: ../src/util.c:109 +msgid "Ghana" +msgstr "Gă-na" + +#: src/common/util.c:908 ../src/util.c:110 +msgid "Gibraltar" +msgstr "Gi-boa-tha" + +#: ../src/util.c:111 +msgid "Greece" +msgstr "Hy-lạp" + +#: ../src/util.c:112 +msgid "Greenland" +msgstr "Äảo băng" + +#: ../src/util.c:113 +msgid "Grenada" +msgstr "Gợ-rê-nă-Ä‘a" + +#: src/common/util.c:913 ../src/util.c:114 +msgid "Guadeloupe" +msgstr "Gu-a-Ä‘e-luc" + +#: src/common/util.c:918 ../src/util.c:115 +msgid "Guam" +msgstr "Gu-ăm" + +#: src/common/util.c:917 ../src/util.c:117 +msgid "Guatemala" +msgstr "Gua-tê-ma-la" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:226 +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:225 +msgid "Guernsey" +msgstr "GÆ¡nh-di" + +#: ../boards/geography/board4_2.xml.in.h:20 src/common/util.c:911 +#: ../src/util.c:118 +msgid "Guinea" +msgstr "Ghi-nê" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:228 +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:227 +msgid "Guinea-bissau" +msgstr "Ghi-nê-bi-sau" + +#: ../boards/geography/board2_1.xml.in.h:8 src/common/util.c:920 +#: ../src/util.c:120 +msgid "Guyana" +msgstr "Guy-a-na" + +#: ../boards/geography/board2_0.xml.in.h:7 src/common/util.c:925 +#: ../src/util.c:121 +msgid "Haiti" +msgstr "Ha-i-ti" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:230 +msgid "Heard And McDonald Islands" +msgstr "Quần đảo HÆ¡d và Mợc-Ä‘o-nợd" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:231 +msgid "Holy See" +msgstr "Toà thánh" + +#: src/common/util.c:923 ../src/util.c:122 +msgid "Honduras" +msgstr "Hôn-Ä‘u-rát" + +#: ../src/util.c:123 Expense/expense.c:109 +msgid "Hong Kong" +msgstr "Hồng Kông" + +#: ../src/util.c:124 +msgid "Hungary" +msgstr "Hung-gia-lợi" + +#: ../src/util.c:130 Expense/expense.c:110 +msgid "Iceland" +msgstr "Băng-đảo" + +#: src/common/util.c:930 ../src/util.c:131 Expense/expense.c:111 +msgid "India" +msgstr "Ấn-Ä‘á»™" + +#: src/common/util.c:927 ../src/util.c:132 Expense/expense.c:112 +#, fuzzy +msgid "Indonesia" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Nam DÆ°Æ¡ng\n" +"#-#-#-#-# jpilot-0.99.8-pre12.vi.po (jpilot-0.99.8-pre12) #-#-#-#-#\n" +"Nam-dÆ°Æ¡ng" + +#: ../src/util.c:134 +msgid "Iran" +msgstr "Ba-tÆ°" + +#: src/common/util.c:934 ../src/util.c:135 +msgid "Iraq" +msgstr "I-rắc" + +#: ../src/util.c:136 Expense/expense.c:113 +msgid "Ireland" +msgstr "Ãi-nhÄ©-lan" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:241 +msgid "Isle of Man" +msgstr "Äảo Man" + +#: src/common/util.c:929 ../src/util.c:137 +msgid "Israel" +msgstr "Do-thái" + +#: ../src/util.c:138 Expense/expense.c:114 +msgid "Italy" +msgstr "Ã" + +#: ../src/util.c:140 +msgid "Jamaica" +msgstr "Gia-mê-ca" + +#: src/common/util.c:940 ../src/util.c:141 Expense/expense.c:115 +msgid "Japan" +msgstr "Nhật-bản" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:246 +msgid "Jersey" +msgstr "ChÆ¡-di" + +#: src/common/util.c:939 ../src/util.c:142 +msgid "Jordan" +msgstr "Gi-oa-Ä‘an" + +#: src/common/util.c:951 ../src/util.c:143 +msgid "Kazakhstan" +msgstr "Ca-da-kh-x-than" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:250 +#: ../src/util.c:144 +msgid "Kenya" +msgstr "Khi-ni-a" + +#: src/common/util.c:944 +msgid "Kiribati" +msgstr "Ki-ri-ba-ti" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:252 +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:251 +msgid "Korea, Democratic People's Republic Of" +msgstr "Cá»™ng hoà Nhân dân Dân chủ Triá»u tiên" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:253 +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:252 +msgid "Korea, Republic Of" +msgstr "Cá»™ng hoà Triá»u tiên" + +#: src/common/util.c:949 ../src/util.c:148 +msgid "Kuwait" +msgstr "Cu-ouai-th" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:255 +msgid "Kyrgyzstan" +msgstr "CÆ¡-chi-x-tănh" + +#: src/common/util.c:952 ../src/util.c:150 +msgid "Laos" +msgstr "Lào" + +#: ../src/util.c:151 +msgid "Latvia" +msgstr "Lát-vi-a" + +#: src/common/util.c:953 ../src/util.c:152 +msgid "Lebanon" +msgstr "Le-ba-non" + +#: ../src/util.c:153 +msgid "Lesotho" +msgstr "Le-xô-tô" + +#: ../src/util.c:154 +msgid "Liberia" +msgstr "Li-bê-ri-a" + +#: ../src/util.c:155 +msgid "Libya" +msgstr "Li-bi-a" + +#: ../src/util.c:156 +msgid "Liechtenstein" +msgstr "Likh-ten-sợ-tâynh" + +#: ../src/util.c:157 +msgid "Lithuania" +msgstr "Li-tu-a-ni" + +#: ../src/util.c:158 Expense/expense.c:117 +msgid "Luxembourg" +msgstr "Lúc-xăm-buac" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:264 +msgid "Macao" +msgstr "Ma-cao" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:266 +msgid "Macedonia" +msgstr "Ma-xe-đô-ni-a" + +#: ../src/util.c:160 +msgid "Madagascar" +msgstr "Ma-Ä‘a-ga-x-că" + +#: ../src/util.c:161 +msgid "Malawi" +msgstr "Ma-la-uy" + +#: src/common/util.c:984 ../src/util.c:162 Expense/expense.c:118 +msgid "Malaysia" +msgstr "Ma-lay-xi-a" + +#: src/common/util.c:981 ../src/util.c:163 +msgid "Maldives" +msgstr "Mal-Ä‘i-vợx" + +#: ../boards/geography/board4_2.xml.in.h:29 src/common/util.c:971 +#: ../src/util.c:164 +msgid "Mali" +msgstr "Ma-li" + +#: ../src/util.c:165 +msgid "Malta" +msgstr "Moa-ta" + +#: src/common/util.c:968 ../src/util.c:166 +msgid "Marshall Islands" +msgstr "Quần đảo Mác-san" + +#: src/common/util.c:976 ../src/util.c:167 +msgid "Martinique" +msgstr "Mác-thi-ni-kh" + +#: ../src/util.c:168 +msgid "Mauritania" +msgstr "Mô-ri-ta-ni-a" + +#: ../src/util.c:169 +msgid "Mauritius" +msgstr "Mâu-ri-sÆ¡-x" + +#: src/common/util.c:1075 +msgid "Mayotte" +msgstr "May-oth" + +#: ../src/util.c:171 Expense/expense.c:119 +msgid "Mexico" +msgstr "Mê-hi-cô" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:279 +msgid "Micronesia" +msgstr "Mi-cợ-rô-nê-xi-a" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:279 +msgid "Moldova, Republic Of" +msgstr "Cá»™ng Hòa Mon-đô-va" + +#: src/common/util.c:964 ../src/util.c:174 +msgid "Monaco" +msgstr "Mô-na-cô" + +#: ../src/util.c:175 +msgid "Mongolia" +msgstr "Mông-cổ" + +#: src/common/util.c:978 ../src/util.c:176 +msgid "Montserrat" +msgstr "Mon-xe-rạc" + +#: src/common/util.c:963 ../src/util.c:177 +msgid "Morocco" +msgstr "Ma-rốc" + +#: ../src/util.c:178 +msgid "Mozambique" +msgstr "Mô-dăm-bích" + +#: ../src/util.c:179 +msgid "Myanmar" +msgstr "Miến-Ä‘iện" + +#: ../src/util.c:180 +msgid "Namibia" +msgstr "Na-mi-bi-a" + +#: src/common/util.c:996 ../src/util.c:181 +msgid "Nauru" +msgstr "Nau-ru" + +#: ../src/gcompris/config.c:89 src/common/util.c:995 ../src/util.c:182 +msgid "Nepal" +msgstr "Nê-pan" + +#: src/common/util.c:993 ../src/util.c:184 Expense/expense.c:120 +msgid "Netherlands" +msgstr "Hoà-lan" + +#: ../src/util.c:183 +msgid "Netherlands Antilles" +msgstr "An-thi-le-x Hoà-lan" + +#: src/common/util.c:987 ../src/util.c:186 +msgid "New Caledonia" +msgstr "Niu Ca-lê-đô-ni-a" + +#: src/common/util.c:999 ../src/util.c:187 Expense/expense.c:121 +msgid "New Zealand" +msgstr "Niu Di-lân" + +#: src/common/util.c:992 ../src/util.c:188 +msgid "Nicaragua" +msgstr "Ni-ca-ra-gua" + +#: ../boards/geography/board4_2.xml.in.h:34 src/common/util.c:988 +#: ../src/util.c:189 +msgid "Niger" +msgstr "Ni-giê" + +#: ../boards/geography/board4_2.xml.in.h:35 src/common/util.c:991 +#: ../src/util.c:190 +msgid "Nigeria" +msgstr "Ni-giê-ri-a" + +#: src/common/util.c:998 ../src/util.c:191 +msgid "Niue" +msgstr "Ni-u-e" + +#: src/common/util.c:990 ../src/util.c:192 +msgid "Norfolk Island" +msgstr "Äảo Noa-phá»±c" + +#: src/common/util.c:975 +msgid "Northern Mariana Islands" +msgstr "Quần đảo Ma-ri-a-na Bắc" + +#: ../src/util.c:193 Expense/expense.c:122 +msgid "Norway" +msgstr "Na-uy" + +#: src/common/util.c:1000 ../src/util.c:194 +msgid "Oman" +msgstr "Ô-man" + +#: src/common/util.c:1007 ../src/util.c:195 +msgid "Pakistan" +msgstr "Ba-ki-x-thănh" + +#: src/common/util.c:1014 ../src/util.c:196 +msgid "Palau" +msgstr "Ba-lau" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:304 +msgid "Palestinian Territory" +msgstr "Lãnh thổ Pa-le-x-tính" + +#: src/common/util.c:1002 ../src/util.c:197 +msgid "Panama" +msgstr "Ba-na-ma" + +#: src/common/util.c:1005 ../src/util.c:198 +msgid "Papua New Guinea" +msgstr "Pa-pu-a Niu Ghi-nê" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:307 +#: src/common/util.c:1015 ../src/util.c:199 +msgid "Paraguay" +msgstr "Ba-ra-guay" + +#: ../boards/geography/board2_1.xml.in.h:11 src/common/util.c:1003 +#: ../src/util.c:200 +msgid "Peru" +msgstr "Pê-ru" + +#: src/common/util.c:1006 ../src/util.c:201 Expense/expense.c:124 +msgid "Philippines" +msgstr "Phi-luật-tân" + +#: src/common/util.c:1010 +msgid "Pitcairn" +msgstr "Bi-th-khenh" + +#: ../boards/geography/board3_1.xml.in.h:16 src/common/util.c:1008 +#: ../src/util.c:202 +msgid "Poland" +msgstr "Ba-lan" + +#: ../src/util.c:203 +msgid "Portugal" +msgstr "Bồ-đào-nha" + +#: src/common/util.c:1011 ../src/util.c:204 +msgid "Puerto Rico" +msgstr "Bu-éc-thô Ri-cô" + +#: src/common/util.c:1016 ../src/util.c:205 +msgid "Qatar" +msgstr "Ca-tă" + +#: src/common/util.c:1017 +msgid "Reunion" +msgstr "Rê-u-ni-ợnh" + +#: ../src/util.c:207 +msgid "Romania" +msgstr "Lá»—-má-ni" + +#: src/common/util.c:1020 +msgid "Russian Federation" +msgstr "Liên bang Nga" + +#: ../boards/geography/board4_2.xml.in.h:36 src/common/util.c:1021 +#: ../src/util.c:210 +msgid "Rwanda" +msgstr "Ru-oanh-Ä‘a" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:318 +msgid "Saint Kitts And Nevis" +msgstr "Xan Khi-th-x và Ne-vi-x" + +#: ../src/util.c:211 +msgid "Saint Lucia" +msgstr "Xan Lu-xi-a" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:320 +msgid "Saint Vincent And The Grenadines" +msgstr "Xan Vinh-xen và Gợ-re-na-đính" + +#: src/common/util.c:1073 +msgid "Samoa" +msgstr "Xa-moa" + +#: src/common/util.c:1033 ../src/util.c:213 +msgid "San Marino" +msgstr "Xan Ma-ri-nô" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:323 +msgid "Sao Tome And Principe" +msgstr "Xao Tô-mê và Pợ-rinh-xi-pê" + +#: src/common/util.c:1022 ../src/util.c:215 +msgid "Saudi Arabia" +msgstr "A-rập Xau-Ä‘i" + +#: ../boards/geography/board4_2.xml.in.h:37 src/common/util.c:1034 +msgid "Senegal" +msgstr "Xê-nê-gan" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:326 +msgid "Serbia And Montenegro" +msgstr "Xéc-bi và Mon-the-nê-gợ-rô" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:328 +msgid "Seychelles" +msgstr "Xê-sen" + +#: ../src/util.c:218 +msgid "Sierra Leone" +msgstr "Xi-ê-ra Lê-ôn" + +#: src/common/util.c:1027 ../src/util.c:219 Expense/expense.c:125 +msgid "Singapore" +msgstr "Xin-ga-po" + +#: ../boards/geography/board3_1.xml.in.h:20 +msgid "Slovakia" +msgstr "Xlô-vác" + +#: ../boards/geography/board3_1.xml.in.h:21 src/common/util.c:1029 +#: ../src/util.c:221 +msgid "Slovenia" +msgstr "Xlô-ven" + +#: ../src/util.c:222 +msgid "Solomon Islands" +msgstr "Quần đảo Xô-lô-mông" + +#: ../src/util.c:223 +msgid "Somalia" +msgstr "Xo-ma-li" + +#: ../boards/geography/board4_2.xml.in.h:40 src/common/util.c:1077 +#: ../src/util.c:224 +msgid "South Africa" +msgstr "Nam Phi" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:335 +msgid "South Georgia And The South Sandwich Islands" +msgstr "Quần đảo Gi-oa-gi-a và Nam Xan-oui-ch" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:337 +#: ../src/util.c:225 Expense/expense.c:126 +msgid "Spain" +msgstr "Tây-ban-nha" + +#: ../src/util.c:226 +msgid "Sri Lanka" +msgstr "Tích-lan" + +#: ../src/util.c:227 +msgid "St. Helena" +msgstr "Xan He-lê-na" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:339 +msgid "St. Pierre And Miquelon" +msgstr "Xan Pi-e và Mi-quê-lon" + +#: ../src/util.c:231 +msgid "Sudan" +msgstr "Xu-đănh" + +#: ../src/util.c:232 +msgid "Suriname" +msgstr "Xu-ri-năm" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:342 +msgid "Svalbard And Jan Mayen Islands" +msgstr "Quần đảo X-văn-băn và Dăn May-en" + +#: ../src/util.c:233 +msgid "Swaziland" +msgstr "Xouă-di-lạn" + +#: ../src/util.c:234 Expense/expense.c:127 +msgid "Sweden" +msgstr "Thụy-Ä‘iển" + +#: ../src/util.c:235 Expense/expense.c:128 +msgid "Switzerland" +msgstr "Thụy-sÄ©" + +#: ../src/util.c:236 +msgid "Syria" +msgstr "Xi-ri-a" + +#: src/common/util.c:1056 Expense/expense.c:129 +#, fuzzy +msgid "Taiwan" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Äài Loan\n" +"#-#-#-#-# jpilot-0.99.8-pre12.vi.po (jpilot-0.99.8-pre12) #-#-#-#-#\n" +"Äài-loan" + +#: ../src/util.c:238 +msgid "Tajikistan" +msgstr "Tha-dikh-x-thăn" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:349 +msgid "Tanzania, United Republic Of" +msgstr "Cá»™ng hoà Thông nhất Thăn-da-ni-a" + +#: ../src/util.c:240 Expense/expense.c:130 +msgid "Thailand" +msgstr "Thái-lan" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:351 +msgid "Timor-Leste" +msgstr "Thi-moa Lex-the" + +#: ../boards/geography/board4_2.xml.in.h:43 src/common/util.c:1045 +#: ../src/util.c:242 +msgid "Togo" +msgstr "Tô-gô" + +#: src/common/util.c:1048 ../src/util.c:243 +msgid "Tokelau" +msgstr "To-ke-lau" + +#: src/common/util.c:1051 ../src/util.c:244 +msgid "Tonga" +msgstr "Tông-ga" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:355 +msgid "Trinidad And Tobago" +msgstr "Tợ-ri-ni-Ä‘at và To-ba-gô" + +#: ../boards/geography/board4_2.xml.in.h:44 src/common/util.c:1050 +#: ../src/util.c:246 +msgid "Tunisia" +msgstr "Tu-ni-xi-a" + +#: ../src/util.c:247 +msgid "Turkey" +msgstr "Thổ-nhÄ©-kỳ" + +#: ../src/util.c:248 +msgid "Turkmenistan" +msgstr "Thua-khợ-me-ni-x-tănh" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:359 +msgid "Turks And Caicos Islands" +msgstr "Quần Thổ-kh-x và Cai-co-x" + +#: src/common/util.c:1055 ../src/util.c:250 +msgid "Tuvalu" +msgstr "Tu-va-lu" + +#: ../src/util.c:252 +msgid "Uganda" +msgstr "U-găn-Ä‘a" + +#: ../src/util.c:253 +msgid "Ukraine" +msgstr "U-cợ-rainh" + +#: src/common/util.c:823 ../src/util.c:254 +msgid "United Arab Emirates" +msgstr "Các Tiểu VÆ°Æ¡ng quốc A-rập Thống nhất" + +#: src/common/util.c:1060 ../src/util.c:255 Expense/expense.c:131 +msgid "United Kingdom" +msgstr "VÆ°Æ¡ng quốc Anh Thống nhất" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:365 +msgid "United States Minor Outlying Islands" +msgstr "Quần đảo ở xa nhá» Mỹ" + +#: ../src/util.c:257 +msgid "Uruguay" +msgstr "U-ru-guay" + +#: ../src/util.c:258 +msgid "Uzbekistan" +msgstr "U-dợ-be-ki-x-thăn" + +#: src/common/util.c:1071 ../src/util.c:259 +msgid "Vanuatu" +msgstr "Va-nu-a-tu" + +#: ../src/util.c:261 +msgid "Venezuela" +msgstr "Vê-nê-du-ê-la" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:370 +msgid "Viet Nam" +msgstr "Việt Nam" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:371 +msgid "Virgin Islands, British" +msgstr "Quần đảo VÆ¡-ginh Anh" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:372 +msgid "Virgin Islands, U.S." +msgstr "Quần đảo VÆ¡-ginh Mỹ" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:373 +msgid "Wallis And Futuna Islands" +msgstr "Quần đảo Oua-lit và Phu-tu-na" + +#: ../addressbook/gui/contact-editor/e-contact-editor-address.c:375 +msgid "Western Sahara" +msgstr "Tây Sa-ha-ra" + +#: ../src/util.c:265 +msgid "Yemen" +msgstr "Y-ê-men" + +#: ../boards/geography/board4_2.xml.in.h:46 src/common/util.c:1078 +#: ../src/util.c:267 +msgid "Zambia" +msgstr "Dăm-bi-a" + +#: ../boards/geography/board4_2.xml.in.h:47 src/common/util.c:1079 +#: ../src/util.c:268 +msgid "Zimbabwe" +msgstr "Dim-ba-bu-ê" + +#: ../addressbook/gui/contact-editor/e-contact-editor-im.c:62 +msgid "AOL Instant Messenger" +msgstr "Tin nhắn AOL" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:185 +msgid "Jabber" +msgstr "Jabber" + +#: ../addressbook/gui/contact-editor/e-contact-editor-im.c:65 +msgid "Yahoo Messenger" +msgstr "Tin nhắn Yahoo" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:188 ../src/prefs.c:134 +msgid "ICQ" +msgstr "ICQ" + +#: ../gnome-netinfo/scan.c:297 +msgid "Service" +msgstr "Dịch vụ" + +#: feededit.c:361 ../libgda/gda-config.c:1570 ../testing/gda-diagnose.c:282 +#: schroot/sbuild-chroot-plain.cc:112 +msgid "Location" +msgstr "Äịa Ä‘iểm" + +#: src/common/text.c:634 ../src/dialogs.c:1487 ../libgda/gda-config.c:1867 +msgid "Username" +msgstr "Tên ngÆ°á»i dùng" + +#: ../mimedir/mimedir-vcard-address.c:246 ../mimedir/mimedir-vcard-email.c:163 +#: ../mimedir/mimedir-vcard-phone.c:160 +msgid "Home" +msgstr "Nhà" + +#: web/template/resources_edit_main.tpl:112 ../src/util.c:459 +#: ../src/util.c:517 src/chfn.c:194 address_gui.c:2791 Expense/expense.c:571 +#: Expense/expense.c:1427 libexif/exif-entry.c:433 libexif/exif-entry.c:460 +msgid "Other" +msgstr "Khác" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:186 +msgid "Yahoo" +msgstr "Yahoo" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:187 +msgid "MSN" +msgstr "MSN" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:189 +msgid "GroupWise" +msgstr "GroupWise" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:258 +msgid "Source Book" +msgstr "Sổ nguồn" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:265 +msgid "Target Book" +msgstr "Sổ đích" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:279 +msgid "Is New Contact" +msgstr "Là Liên lạc má»›i" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:286 +msgid "Writable Fields" +msgstr "TrÆ°á»ng có thể ghi" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:293 +msgid "Required Fields" +msgstr "TrÆ°á»ng cần thiết" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:307 main.c:1603 +msgid "Changed" +msgstr "Äã đổi" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:2344 +#, c-format +msgid "Contact Editor - %s" +msgstr "Bá»™ hiệu chỉnh liên lạc — « %s »" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:2650 +msgid "Please select an image for this contact" +msgstr "Hãy chá»n ảnh cho liên lạc này" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:2688 +#: ../addressbook/gui/contact-editor/e-contact-editor.c:2651 +msgid "No image" +msgstr "Không ảnh" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:2967 +#: ../addressbook/gui/contact-editor/e-contact-editor.c:2927 +msgid "" +"The contact data is invalid:\n" +"\n" +msgstr "" +"Dữ liệu liên lạc không hợp lệ:\n" +"\n" + +#: ../addressbook/gui/contact-editor/e-contact-editor.c:3019 +#: ../addressbook/gui/contact-editor/e-contact-editor.c:2979 +msgid "Invalid contact." +msgstr "Liên lạc không hợp lệ." + +#: ../addressbook/gui/contact-editor/e-contact-quick-add.c:277 +msgid "Contact Quick-Add" +msgstr "Thêm nhanh liên lạc" + +#: ../addressbook/gui/contact-editor/e-contact-quick-add.c:280 +msgid "_Edit Full" +msgstr "_Sá»­a đổi toàn bá»™" + +#: ../addressbook/gui/contact-editor/e-contact-quick-add.c:306 +msgid "_Full name:" +msgstr "_Há» tên:" + +#: ../addressbook/gui/contact-editor/e-contact-quick-add.c:316 +msgid "E-_mail:" +msgstr "Th_Æ° Ä‘iện tá»­ :" + +#: ../addressbook/gui/contact-editor/eab-editor.c:323 +#, c-format +msgid "" +"Are you sure you want\n" +"to delete contact list (%s) ?" +msgstr "" +"Bạn có chắc muốn xoá bá»\n" +"danh sách liên lạc (« %s ») không?" + +#: ../addressbook/gui/contact-editor/eab-editor.c:326 +msgid "" +"Are you sure you want\n" +"to delete these contact lists?" +msgstr "" +"Bạn có chắc muốn xoá bá»\n" +"những danh sách liên lạc này không?" + +#: ../addressbook/gui/contact-editor/eab-editor.c:331 +#, c-format +msgid "" +"Are you sure you want\n" +"to delete contact (%s) ?" +msgstr "" +"Bạn có chắc muốn xoá bá»\n" +"liên lạc (« %s ») không?" + +#: ../addressbook/gui/contact-editor/eab-editor.c:334 +msgid "" +"Are you sure you want\n" +"to delete these contacts?" +msgstr "" +"Bạn có chắc muốn xoá bá»\n" +"những liên lạc này không?" + +#: ../addressbook/gui/contact-editor/fulladdr.glade.h:2 +msgid "Address _2:" +msgstr "Äịa chỉ _2:" + +#: ../addressbook/gui/contact-editor/fulladdr.glade.h:3 +#: ../capplets/about-me/gnome-about-me.glade.h:21 +msgid "Ci_ty:" +msgstr "_Phố :" + +#: ../addressbook/gui/contact-editor/fulladdr.glade.h:4 +msgid "Countr_y:" +msgstr "_Quốc gia:" + +#: ../addressbook/gui/contact-editor/fulladdr.glade.h:5 +msgid "Full Address" +msgstr "Äịa chỉ đầy đủ" + +#: ../addressbook/gui/contact-editor/fulladdr.glade.h:9 +msgid "_ZIP Code:" +msgstr "Mã _bữu Ä‘iện:" + +#: ../addressbook/gui/contact-editor/fullname.glade.h:2 ../namedetail.c:30 +msgid "Dr." +msgstr "TS." + +#: ../addressbook/gui/contact-editor/fullname.glade.h:3 ../namedetail.c:32 +msgid "Esq." +msgstr "Esq." + +#: ../addressbook/gui/contact-editor/fullname.glade.h:4 +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:15 src/chfn.c:142 +#: web/template/editaccount_main.tpl:2 web/template/newaccount_main.tpl:2 +#: src/chfn.c:174 +msgid "Full Name" +msgstr "Há» tên" + +#: ../addressbook/gui/contact-editor/fullname.glade.h:5 +#: ../gnopi/cmdmapui.c:151 +msgid "I" +msgstr "I" + +#: ../addressbook/gui/contact-editor/fullname.glade.h:6 ../namedetail.c:32 +msgid "II" +msgstr "II" + +#: ../addressbook/gui/contact-editor/fullname.glade.h:7 ../namedetail.c:32 +msgid "III" +msgstr "III" + +#: ../addressbook/gui/contact-editor/fullname.glade.h:8 ../namedetail.c:32 +msgid "Jr." +msgstr "Con." + +#: ../addressbook/gui/contact-editor/fullname.glade.h:9 ../namedetail.c:30 +msgid "Miss" +msgstr "Cô" + +#: ../addressbook/gui/contact-editor/fullname.glade.h:10 ../namedetail.c:30 +msgid "Mr." +msgstr "Ông" + +#: ../addressbook/gui/contact-editor/fullname.glade.h:11 ../namedetail.c:30 +msgid "Mrs." +msgstr "Bà" + +#: ../addressbook/gui/contact-editor/fullname.glade.h:12 ../namedetail.c:30 +msgid "Ms." +msgstr "Cô/Bà" + +#: ../addressbook/gui/contact-editor/fullname.glade.h:13 ../namedetail.c:32 +msgid "Sr." +msgstr "Ông" + +#: ../addressbook/gui/contact-editor/fullname.glade.h:14 +msgid "_First:" +msgstr "_Tên:" + +#: ../addressbook/gui/contact-editor/fullname.glade.h:15 +msgid "_Last:" +msgstr "_Há» :" + +#: ../addressbook/gui/contact-editor/fullname.glade.h:16 +msgid "_Middle:" +msgstr "Tên _lót:" + +#: ../addressbook/gui/contact-editor/fullname.glade.h:17 +#: ../gnomecard/card-editor.glade.h:62 +msgid "_Suffix:" +msgstr "_Hậu tố :" + +#: ../addressbook/gui/contact-editor/im.glade.h:2 +msgid "Add IM Account" +msgstr "Thêm tài khoản tin nhắn" + +#: ../addressbook/gui/contact-editor/im.glade.h:3 +msgid "_Account name:" +msgstr "Tên tài _khoản:" + +#: ../addressbook/gui/contact-editor/im.glade.h:4 +msgid "_IM Service:" +msgstr "Dịch vụ t_in nhắn:" + +#: ../gtk/gtkfilechooserdefault.c:7271 ../src/drivel.glade.h:72 +#: ../glade/straw.glade.h:80 +msgid "_Location:" +msgstr "_Äịa Ä‘iểm:" + +#: ../src/f-spot.glade.h:1 ogginfo/ogginfo2.c:365 +#, c-format +msgid "\n" +msgstr "\n" + +#: ../addressbook/gui/contact-list-editor/contact-list-editor.glade.h:4 +msgid "Add an email to the List" +msgstr "Thêm má»™t địa chỉ thÆ° Ä‘iện tá»­ vào danh sách" + +#: ../addressbook/gui/contact-list-editor/e-contact-list-editor.c:817 +msgid "Contact List Editor" +msgstr "Bá»™ hiệu chỉnh danh sách liên lạc" + +#: ../addressbook/gui/contact-list-editor/contact-list-editor.glade.h:6 +msgid "Insert email addresses from Address Book" +msgstr "Chèn địa chỉ thÆ° Ä‘iện tá»­ từ Sổ địa chỉ" + +#: ../symbol-browser-control/symbol-browser.c:72 +msgid "Members" +msgstr "Thành viên" + +#: ../addressbook/gui/contact-list-editor/contact-list-editor.glade.h:8 +msgid "Remove an email address from the List" +msgstr "Gỡ bỠđịa chỉ thÆ° Ä‘iện tá»­ khá»i danh sách" + +#: ../addressbook/gui/contact-list-editor/contact-list-editor.glade.h:9 +msgid "_Hide addresses when sending mail to this list" +msgstr "Ẩ_n các địa chỉ khi gởi thÆ° tá»›i danh sách" + +#: ../addressbook/gui/contact-list-editor/contact-list-editor.glade.h:10 +msgid "_List name:" +msgstr "Tên _danh sách:" + +#: ../app/actions/select-actions.c:47 src/gtkam-main.c:561 +#: ../src/glade-popup.c:274 +msgid "_Select" +msgstr "_Chá»n" + +#: ../addressbook/gui/contact-list-editor/contact-list-editor.glade.h:12 +msgid "_Type an email address or drag a contact into the list below:" +msgstr "_Nhập địa chỉ thÆ° hoặc kéo liên lạc vào danh sách dÆ°á»›i đây:" + +#: ../addressbook/gui/widgets/e-minicard-view.c:505 +msgid "Book" +msgstr "Sổ" + +#: ../addressbook/gui/contact-list-editor/e-contact-list-editor.c:177 +#: ../addressbook/gui/contact-list-editor/e-contact-list-editor.c:176 +msgid "Is New List" +msgstr "Là danh sách má»›i" + +#: ../addressbook/gui/contact-list-editor/e-contact-list-editor.c:719 +#: ../addressbook/gui/contact-list-editor/e-contact-list-editor.c:707 +msgid "_Members" +msgstr "Thành _viên" + +#: ../addressbook/gui/contact-list-editor/e-contact-list-editor.c:722 +#: ../addressbook/gui/contact-list-editor/e-contact-list-editor.c:710 +msgid "Contact List Members" +msgstr "Thành viên danh sách" + +#: ../addressbook/gui/merging/eab-contact-commit-duplicate-detected.glade.h:1 +msgid "Changed Contact:" +msgstr "Liên lạc đã đổi:" + +#: ../addressbook/gui/merging/eab-contact-commit-duplicate-detected.glade.h:2 +msgid "Conflicting Contact:" +msgstr "Liên lạc xung Ä‘á»™t:" + +#: ../addressbook/gui/merging/eab-contact-commit-duplicate-detected.glade.h:3 +#: ../addressbook/gui/merging/eab-contact-duplicate-detected.glade.h:1 +msgid "Duplicate Contact Detected" +msgstr "Phát hiện liên lạc trùng" + +#: ../addressbook/gui/merging/eab-contact-commit-duplicate-detected.glade.h:4 +msgid "" +"The changed email or name of this contact already\n" +"exists in this folder. Would you like to add it anyway?" +msgstr "" +"Tên hoặc địa chỉ thÆ° Ä‘iện tá»­ đã thay đổi của liên lạc này\n" +"đã có trong thÆ° mục này. Bạn vẫn có muốn thêm không?" + +#: ../addressbook/gui/merging/eab-contact-duplicate-detected.glade.h:2 +msgid "New Contact:" +msgstr "Liên lạc má»›i:" + +#: ../addressbook/gui/merging/eab-contact-duplicate-detected.glade.h:3 +msgid "Original Contact:" +msgstr "Liên lạc gốc:" + +#: ../addressbook/gui/merging/eab-contact-duplicate-detected.glade.h:4 +msgid "" +"The name or email address of this contact already exists\n" +"in this folder. Would you like to add it anyway?" +msgstr "" +"Tên hoặc địa chỉ thÆ° Ä‘iện từ của liên lạc này đã có\n" +"trong thÆ° mục này. Bạn vẫn có muốn thêm không?" + +#: ../widgets/misc/e-filter-bar.c:156 +msgid "Advanced Search" +msgstr "Tìm kiếm cấp cao" + +#: ../addressbook/gui/widgets/e-addressbook-model.c:148 +msgid "No contacts" +msgstr "Không có liên lạc" + +#: ../addressbook/gui/widgets/e-addressbook-model.c:151 +#, c-format +msgid "%d contact" +msgid_plural "%d contact" +msgstr[0] "%d liên lạc" + +#: ../addressbook/gui/widgets/e-addressbook-model.c:446 +msgid "Error getting book view" +msgstr "Gập lá»—i khi gá»i khung xem sổ" + +#: src/set_data.c:314 libexif/exif-tag.c:105 +msgid "Model" +msgstr "Mô hình" + +#: ../addressbook/gui/widgets/e-addressbook-table-adapter.c:103 +msgid "Error modifying card" +msgstr "Gặp lá»—i khi sá»­a đổi thẻ" + +#: ../addressbook/gui/widgets/e-addressbook-view.c:170 +#: ../addressbook/gui/widgets/e-addressbook-view.c:168 +msgid "Name begins with" +msgstr "Tên bắt đầu bằng" + +#: ../addressbook/gui/widgets/e-addressbook-view.c:171 +#: ../addressbook/gui/widgets/e-addressbook-view.c:169 +msgid "Email begins with" +msgstr "ThÆ° bắt đầu bằng" + +#: ../calendar/gui/cal-search-bar.c:53 +msgid "Category is" +msgstr "Phân loại là" + +#: ../calendar/gui/cal-search-bar.c:48 +msgid "Any field contains" +msgstr "Bất kỳ trÆ°á»ng nào chứa" + +#: ../addressbook/gui/widgets/e-addressbook-view.c:177 +msgid "Advanced..." +msgstr "Cấp cao..." + +#: ../libgnomedb/gnome-db-error.c:231 ../app/tools/gimpclonetool.c:329 +msgid "Source" +msgstr "Nguồn" + +#: ../ui/evolution-addressbook.xml.h:19 +msgid "Save as VCard..." +msgstr "LÆ°u dạng vCard..." + +#: ../addressbook/gui/widgets/e-addressbook-view.c:946 +msgid "_New Contact..." +msgstr "Liên lạc _má»›i..." + +#: ../addressbook/gui/widgets/e-addressbook-view.c:947 +msgid "New Contact _List..." +msgstr "_Danh sách liên lạc má»›i..." + +#: ../addressbook/gui/widgets/e-addressbook-view.c:950 +#: ../ui/evolution-addressbook.xml.h:41 +msgid "_Save as VCard..." +msgstr "LÆ°_u dạng vCard..." + +#: ../addressbook/gui/widgets/e-addressbook-view.c:951 +msgid "_Forward Contact" +msgstr "_Chuyển tiếp liên lạc" + +#: ../addressbook/gui/widgets/e-addressbook-view.c:952 +msgid "_Forward Contacts" +msgstr "_Chuyển tiếp các liên lạc" + +#: ../addressbook/gui/widgets/e-addressbook-view.c:953 +msgid "Send _Message to Contact" +msgstr "Gởi th_Æ° cho liên lạc" + +#: ../addressbook/gui/widgets/e-addressbook-view.c:954 +msgid "Send _Message to List" +msgstr "Gởi th_Æ° cho danh sách" + +#: ../addressbook/gui/widgets/e-addressbook-view.c:955 +msgid "Send _Message to Contacts" +msgstr "Gởi th_Æ° cho các liên lạc" + +#: ../plug-ins/common/winprint.c:224 +msgid "_Print" +msgstr "_In" + +#: ../addressbook/gui/widgets/e-addressbook-view.c:959 +msgid "Cop_y to Address Book..." +msgstr "_Chép vào Sổ địa chỉ..." + +#: ../addressbook/gui/widgets/e-addressbook-view.c:960 +msgid "Mo_ve to Address Book..." +msgstr "Chu_yển vào Sổ địa chỉ..." + +#: ../app/actions/edit-actions.c:86 +msgid "Cu_t" +msgstr "Cắ_t" + +#: ../addressbook/gui/widgets/e-addressbook-view.c:965 app/menubar.c:520 +#, fuzzy +msgid "P_aste" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"_Dán\n" +"#-#-#-#-# soundtracker-0.6.7.vi.po (soundtracker) #-#-#-#-#\n" +"D_án" + +#: ../addressbook/gui/widgets/e-addressbook-view.c:1561 +msgid "Any Category" +msgstr "Bất kỳ phân loại nào" + +#: ../addressbook/gui/widgets/e-addressbook-view.c:1760 +msgid "Print cards" +msgstr "In các thẻ" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:1 +#: ../addressbook/libebook/e-contact.c:208 +msgid "Assistant" +msgstr "Phụ tá" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:2 +#: ../addressbook/libebook/e-contact.c:126 +msgid "Assistant Phone" +msgstr "Äiện thoại phụ tá" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:3 +#: ../addressbook/libebook/e-contact.c:129 +msgid "Business Fax" +msgstr "Äiện thÆ° kinh doanh" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:4 +#: ../addressbook/libebook/e-contact.c:127 +msgid "Business Phone" +msgstr "Äiện thoại kinh doanh" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:5 +#: ../addressbook/libebook/e-contact.c:128 +msgid "Business Phone 2" +msgstr "Äiện thoại kinh doanh 2" + +#: ../addressbook/libebook/e-contact.c:130 +msgid "Callback Phone" +msgstr "Số gá»i lại" + +#: ../addressbook/libebook/e-contact.c:131 +msgid "Car Phone" +msgstr "Äiện thoại xe" + +#: ../list-ui.c:653 ../gncal/todo-list.c:1095 src/prefsdlg.cpp:230 +#: ../mimedir/mimedir-vcard.c:493 +msgid "Categories" +msgstr "Phân loại" + +#: ../addressbook/libebook/e-contact.c:132 +msgid "Company Phone" +msgstr "Äiện thoại công ty" + +#: src/dictmanagedlg.cpp:507 +msgid "Email" +msgstr "ThÆ° Ä‘iện tá»­" + +#: ../addressbook/libebook/e-contact.c:149 +msgid "Email 2" +msgstr "ThÆ° Ä‘iện tá»­ 2" + +#: ../addressbook/libebook/e-contact.c:150 +msgid "Email 3" +msgstr "ThÆ° Ä‘iện tá»­ 3" + +#: ../addressbook/libebook/e-contact.c:112 ../gnomecard/cardlist-headers.c:34 +msgid "Family Name" +msgstr "Há»" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:14 +msgid "File As" +msgstr "Tập tin dạng" + +#: ../addressbook/libebook/e-contact.c:111 ../gnomecard/cardlist-headers.c:32 +msgid "Given Name" +msgstr "Tên hay gá»i" + +#: ../addressbook/libebook/e-contact.c:135 +msgid "Home Fax" +msgstr "Äiện thÆ° ở nhà" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:18 src/chfn.c:157 +#: src/chfn.c:159 src/chfn.c:189 +msgid "Home Phone" +msgstr "Äiện thoại ở nhà" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:19 +#: ../addressbook/libebook/e-contact.c:134 +msgid "Home Phone 2" +msgstr "Äiện thoại ở nhà 2" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:20 +msgid "ISDN Phone" +msgstr "Äiện thoại ISDN" + +#: ../storage/exchange-hierarchy-foreign.c:255 +msgid "Journal" +msgstr "Nhật ký" + +#: ../addressbook/libebook/e-contact.c:137 +msgid "Mobile Phone" +msgstr "Äiện thoại di Ä‘á»™ng" + +#: ../src/search.c:155 ../ui/message.glade.h:3 src/silc-command-reply.c:274 +#: src/silc-command-reply.c:703 +msgid "Nickname" +msgstr "Tên hiệu" + +#: ../components/html-editor/template.c:88 ../sheets/UML.sheet.in.h:23 +#: todo_gui.c:2313 Expense/expense.c:1862 KeyRing/keyring.c:1689 +#: ../mimedir/mimedir-vcard.c:499 +msgid "Note" +msgstr "Ghi chú" + +#: ../desktop-directories/Office.directory.in.h:1 ../data/toc.xml.in.h:13 +msgid "Office" +msgstr "Văn phòng" + +#: ../addressbook/libebook/e-contact.c:201 ../gnomecard/cardlist-headers.c:37 +#: ../pan/message-window.c:1010 ../mimedir/mimedir-vcard.c:481 +msgid "Organization" +msgstr "Tổ chức" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:28 +#: ../addressbook/libebook/e-contact.c:139 +msgid "Other Fax" +msgstr "Äiện thÆ° khác" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:29 +#: ../addressbook/libebook/e-contact.c:138 +msgid "Other Phone" +msgstr "Äiện thoại khác" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:30 +#: ../sheets/ciscotelephony.sheet.in.h:33 ../mimedir/mimedir-vcard-phone.c:196 +msgid "Pager" +msgstr "Máy nhắn tin" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:31 +#: ../addressbook/libebook/e-contact.c:141 +msgid "Primary Phone" +msgstr "Äiện thoại chính" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:32 +#: ../glade/glade_menu_editor.c:1054 ../glade/glade_menu_editor.c:2414 +#: ../glade/glade_menu_editor.c:2554 ../src/glade-gtk.c:2365 +#, fuzzy +msgid "Radio" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"RaÄ‘iô\n" +"#-#-#-#-# glade3vi..po (glade3 HEAD) #-#-#-#-#\n" +"Chá»n má»™t" + +#: ../storage/exchange-permissions-dialog.c:710 ../objects/Istar/actor.c:71 +#: ../mimedir/mimedir-vcard.c:441 +msgid "Role" +msgstr "Vai trò" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:34 +msgid "Spouse" +msgstr "Vợ/Chồng" + +#. Translators: This is a vcard standard and stands for the type of +#. phone used by the hearing impaired. TTY stands for "teletype" +#. (familiar from Unix device names), and TDD is "Telecommunications +#. Device for Deaf". However, you probably want to leave this +#. abbreviation unchanged unless you know that there is actually a +#. different and established translation for this in your language. +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:41 +msgid "TTYTDD" +msgstr "TTYTDD" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:42 +#: ../addressbook/libebook/e-contact.c:143 +msgid "Telex" +msgstr "Telex" + +#: ../providers/msql/gda-msql-provider.c:533 ../src/lib/subscribe.py:178 +#: ../mimedir/mimedir-vcard-address.c:167 +msgid "Title" +msgstr "Tá»±a" + +#: ../libgimpwidgets/gimpunitmenu.c:660 ../plug-ins/common/postscript.c:3349 +msgid "Unit" +msgstr "ÄÆ¡n vị" + +#: ../addressbook/gui/widgets/e-addressbook-view.etspec.h:45 +msgid "Web Site" +msgstr "Chá»— Mạng" + +#: ../objects/network/wanlink.c:117 +msgid "Width" +msgstr "Rá»™ng" + +#: ../extensions/page-info/page-info-dialog.c:1299 +#: ../gncal/calendar-month-item.c:267 ../gncal/calendar-year-item.c:205 +msgid "Height" +msgstr "Cao" + +#: ../addressbook/gui/widgets/e-minicard.c:152 +msgid "Has Focus" +msgstr "Có tiêu Ä‘iểm" + +#: ../libgnomedb/sel-onetable.c:203 ../libgnomedb/sel-onetarget.c:224 +#: ../glom/glom.glade.h:96 ../glom/data_structure/translatable_item.cc:234 +#: ../glom/data_structure/layout/layoutitem_field.cc:161 +#: ../glom/dialog_database_preferences.cc:49 ../glade/search.glade.h:4 +#: address_gui.c:815 address_gui.c:823 address_gui.c:840 +msgid "Field" +msgstr "TrÆ°á»ng" + +#: ../addressbook/gui/widgets/e-minicard-label.c:143 +#: providers/msql/gda-msql-provider.c:563 +#: ../providers/msql/gda-msql-provider.c:585 +msgid "Field Name" +msgstr "Tên trÆ°á»ng" + +#: ../addressbook/gui/widgets/e-minicard-label.c:150 +msgid "Text Model" +msgstr "Mô hình chữ" + +#: ../addressbook/gui/widgets/e-minicard-label.c:157 +msgid "Max field name length" +msgstr "Äá»™ dài tên trÆ°á»ng tối Ä‘a" + +#: ../addressbook/gui/widgets/e-minicard-view-widget.c:128 +msgid "Column Width" +msgstr "Äá»™ rá»™ng cá»™t" + +#: ../addressbook/gui/widgets/e-minicard-view.c:172 +#: ../addressbook/gui/widgets/e-minicard-view.c:171 +msgid "" +"\n" +"\n" +"Search for the Contact\n" +"\n" +"or double-click here to create a new Contact." +msgstr "" +"\n" +"\n" +"Tìm kiếm liên lạc,\n" +"\n" +"hay nhấp đúp vào đây để tạo liên lạc má»›i." + +#: ../addressbook/gui/widgets/e-minicard-view.c:175 +#: ../addressbook/gui/widgets/e-minicard-view.c:174 +msgid "" +"\n" +"\n" +"There are no items to show in this view.\n" +"\n" +"Double-click here to create a new Contact." +msgstr "" +"\n" +"\n" +"Không có mục nào để xem trong khung nhìn này.\n" +"\n" +"Nhấp đúp vào đây để tạo liên lạc má»›i." + +#: ../addressbook/gui/widgets/e-minicard-view.c:180 +#: ../addressbook/gui/widgets/e-minicard-view.c:179 +msgid "" +"\n" +"\n" +"Search for the Contact." +msgstr "" +"\n" +"\n" +"Tìm kiếm liên lạc." + +#: ../addressbook/gui/widgets/e-minicard-view.c:182 +#: ../addressbook/gui/widgets/e-minicard-view.c:181 +msgid "" +"\n" +"\n" +"There are no items to show in this view." +msgstr "" +"\n" +"\n" +"Không có mục nào để xem trong khung nhìn này." + +#: ../addressbook/gui/widgets/e-minicard-view.c:499 +#: ../addressbook/gui/widgets/e-minicard-view.c:498 +#: libexif/olympus/mnote-olympus-tag.c:60 +msgid "Adapter" +msgstr "Bá»™ tiếp hợp" + +#: ../plugins/taglist/HTML.tags.xml.in.h:202 ../glade/property.c:103 +msgid "Selected" +msgstr "Äã chá»n" + +#: ../addressbook/gui/widgets/e-minicard.c:168 +#: ../addressbook/gui/widgets/e-minicard.c:167 +msgid "Has Cursor" +msgstr "Có con trá»" + +#: ../addressbook/gui/widgets/eab-contact-display.c:135 +msgid "(map)" +msgstr "(bản đồ)" + +#: ../addressbook/gui/widgets/eab-contact-display.c:145 +msgid "map" +msgstr "bản đồ" + +#: ../addressbook/gui/widgets/eab-contact-display.c:543 +msgid "List Members" +msgstr "Thành viên danh sách" + +#: ../src/personal_info.c:83 address_gui.c:1919 address_gui.c:2937 +msgid "E-mail" +msgstr "ThÆ° Ä‘iện tá»­" + +#: ../objects/FS/function.c:802 ../objects/Istar/actor.c:70 +#: ../widgets/gtk+.xml.in.h:144 +msgid "Position" +msgstr "Vị trí" + +#: ../addressbook/gui/widgets/eab-contact-display.c:364 +msgid "Video Conferencing" +msgstr "Há»™i thảo ảnh Ä‘á»™ng" + +#: ../sheets/ciscotelephony.sheet.in.h:34 address_gui.c:2662 +msgid "Phone" +msgstr "Äiện thoại" + +#: ../sheets/ciscotelephony.sheet.in.h:12 Expense/expense.c:551 +#: Expense/expense.c:1417 ../mimedir/mimedir-vcard-phone.c:178 +msgid "Fax" +msgstr "Äiện thÆ°" + +#: ../addressbook/gui/widgets/eab-contact-display.c:370 +#: ../mimedir/mimedir-vcard-address.c:944 ../mimedir/mimedir-vcard-email.c:566 +#: ../mimedir/mimedir-vcard-phone.c:706 +msgid "work" +msgstr "chá»— làm" + +#: ../addressbook/gui/widgets/eab-contact-display.c:377 +#: ../gnomecard/cardlist-headers.c:40 +msgid "WWW" +msgstr "WWW" + +#: ../addressbook/gui/widgets/eab-contact-display.c:597 ../blog_applet.py:39 +msgid "Blog" +msgstr "Nhật ký Mạng" + +#: ../addressbook/gui/widgets/eab-contact-display.c:385 +msgid "personal" +msgstr "cá nhân" + +#: ../gnomecard/cardlist-headers.c:38 +msgid "Job Title" +msgstr "Chức vụ" + +#: ../addressbook/gui/widgets/eab-contact-display.c:589 galeon.schemas.in.h:84 +msgid "Home page" +msgstr "Trang chủ" + +#: ogg123/cfgfile_options.c:422 +msgid "Success" +msgstr "Thành công" + +#. E_BOOK_ERROR_INVALID_ARG +#. E_BOOK_ERROR_BUSY +#: ../addressbook/gui/widgets/eab-gui-util.c:51 +msgid "Backend busy" +msgstr "Hậu phÆ°Æ¡ng quá bận" + +#. E_BOOK_ERROR_REPOSITORY_OFFLINE +#: ../addressbook/gui/widgets/eab-gui-util.c:52 +msgid "Repository offline" +msgstr "Kho ngoại tuyến" + +#. E_BOOK_ERROR_NO_SUCH_BOOK +#: ../addressbook/gui/widgets/eab-gui-util.c:53 +msgid "Address Book does not exist" +msgstr "Không có Sổ địa chỉ đó" + +#. E_BOOK_ERROR_NO_SELF_CONTACT +#: ../addressbook/gui/widgets/eab-gui-util.c:54 +msgid "No Self Contact defined" +msgstr "ChÆ°a định nghÄ©a Tá»± liên lạc" + +#: gram.pl:360 ../src/gyrus-admin-acl.c:139 ../src/gyrus-admin-mailbox.c:78 +msgid "Permission denied" +msgstr "Quyá»n bị từ chối" + +#. E_BOOK_ERROR_CONTACT_NOT_FOUND +#: ../addressbook/gui/widgets/eab-gui-util.c:58 +msgid "Contact not found" +msgstr "Không tìm thấy liên lạc" + +#. E_BOOK_ERROR_CONTACT_ID_ALREADY_EXISTS +#: ../addressbook/gui/widgets/eab-gui-util.c:59 +msgid "Contact ID already exists" +msgstr "ID Liên lạc đã có" + +#: ../calendar/libecal/e-cal.c:5034 +msgid "Protocol not supported" +msgstr "ChÆ°a há»— trợ giao thức này" + +#: ../libgnomedb/gnome-db-sql-console.c:457 +msgid "Cancelled" +msgstr "Bị thôi" + +#. E_BOOK_ERROR_COULD_NOT_CANCEL +#: ../addressbook/gui/widgets/eab-gui-util.c:62 +msgid "Could not cancel" +msgstr "Không thể thôi" + +#. E_BOOK_ERROR_AUTHENTICATION_FAILED +#: ../addressbook/gui/widgets/eab-gui-util.c:63 +#: ../calendar/gui/comp-editor-factory.c:438 +msgid "Authentication Failed" +msgstr "Xác thá»±c thất bại" + +#. E_BOOK_ERROR_AUTHENTICATION_REQUIRED +#: ../addressbook/gui/widgets/eab-gui-util.c:64 +#: ../libgnomecups/gnome-cups-ui-connection.c:628 +msgid "Authentication Required" +msgstr "Cần thiết xác thá»±c" + +#. E_BOOK_ERROR_TLS_NOT_AVAILABLE +#: ../addressbook/gui/widgets/eab-gui-util.c:65 +msgid "TLS not Available" +msgstr "Không có TLS" + +#. E_BOOK_ERROR_CORBA_EXCEPTION +#. E_BOOK_ERROR_NO_SUCH_SOURCE +#: ../addressbook/gui/widgets/eab-gui-util.c:67 +msgid "No such source" +msgstr "Không có nguồn nhÆ° vậy" + +#. E_BOOK_ERROR_OFFLINE_UNAVAILABLE +#: ../addressbook/gui/widgets/eab-gui-util.c:68 +msgid "Not available in offline mode" +msgstr "Không sẵn sàng trong chế Ä‘á»™ ngoại tuyến" + +#. E_BOOK_ERROR_OTHER_ERROR +#: ../addressbook/gui/widgets/eab-gui-util.c:69 +msgid "Other error" +msgstr "Lá»—i khác" + +#. E_BOOK_ERROR_INVALID_SERVER_VERSION +#: ../addressbook/gui/widgets/eab-gui-util.c:70 +msgid "Invalid server version" +msgstr "Phiên bản máy phục vụ không hợp lệ" + +#: ../addressbook/gui/widgets/eab-gui-util.c:93 +msgid "" +"We were unable to open this addressbook. This either means this book is not " +"marked for offline usage or not yet downloaded for offline usage. Please " +"load the addressbook once in online mode to download its contents" +msgstr "" +"ChÆ°a có mở được sổ địa chỉ này. Hoặc vì sổ này không có dấu cho phép sá»­ dụng " +"khi ngoại tuyến, hoặc chÆ°a tải nó vỠđể sá»­ dụng ngoại tuyến. Hãy tải sổ địa " +"chỉ đó má»™t lần trong chế Ä‘á»™ trá»±c tuyến, để tải ná»™i dung nó vá»." + +#: ../addressbook/gui/widgets/eab-gui-util.c:102 +#, c-format +msgid "" +"We were unable to open this addressbook. Please check that the path %s " +"exists and that you have permission to access it." +msgstr "" +"Không thể mở sổ địa chỉ này. Vui lòng kiểm tra lại có Ä‘Æ°á»ng dẫn « %s » và " +"bạn có quyá»n truy cập vào nó." + +#: ../addressbook/gui/widgets/eab-gui-util.c:111 +#: ../addressbook/gui/widgets/eab-gui-util.c:110 +msgid "" +"We were unable to open this addressbook. This either means you have entered " +"an incorrect URI, or the LDAP server is unreachable." +msgstr "" +"Không thể mở sổ địa chỉ này. Nguyên nhân hoặc là do bạn đã gõ sai địa chỉ " +"Mạng, hoặc là do máy phục vụ LDAP không thể truy cập." + +#: ../addressbook/gui/widgets/eab-gui-util.c:116 +#: ../addressbook/gui/widgets/eab-gui-util.c:115 +msgid "" +"This version of Evolution does not have LDAP support compiled in to it. If " +"you want to use LDAP in Evolution, you must install an LDAP-enabled " +"Evolution package." +msgstr "" +"Phiên bản Evolution này không được biên dịch để há»— trợ LDAP. Nếu bạn muốn " +"dùng LDAP trong Evolution, bạn phải cài đặt gói Evolution há»— trợ LDAP." + +#: ../addressbook/gui/widgets/eab-gui-util.c:123 +#: ../addressbook/gui/widgets/eab-gui-util.c:122 +msgid "" +"We were unable to open this addressbook. This either means you have entered " +"an incorrect URI, or the server is unreachable." +msgstr "" +"Không thể mở sổ địa chỉ này. Nguyên nhân hoặc là do bạn đã gõ sai địa chỉ " +"Mạng đó, hoặc là do máy phục vụ không thể truy cập." + +#: ../addressbook/gui/widgets/eab-gui-util.c:146 +msgid "" +"More cards matched this query than either the server is \n" +"configured to return or Evolution is configured to display.\n" +"Please make your search more specific or raise the result limit in\n" +"the directory server preferences for this addressbook." +msgstr "" +"Quá nhiá»u thẻ khá»›p vá»›i truy vấn này, nhiá»u hÆ¡n cấu hình\n" +"của máy phục vụ có thể trả gởi, hoặc cấu hình của Evolution\n" +"có thể hiển thị. Bạn hãy tìm kiếm chính xác hÆ¡n hoặc tăng giá»›i hạn\n" +"kết quả trong Tùy thích máy phục vụ thÆ° mục cho sổ địa chỉ này." + +#: ../addressbook/gui/widgets/eab-gui-util.c:152 +msgid "" +"The time to execute this query exceeded the server limit or the limit\n" +"you have configured for this addressbook. Please make your search\n" +"more specific or raise the time limit in the directory server\n" +"preferences for this addressbook." +msgstr "" +"Thá»i gian thá»±c hiện truy vấn này vượt quá giá»›i hạn máy phục vụ\n" +"hoặc giá»›i hạn do bạn cấu hình cho sổ địa chỉ này.\n" +"Vui lòng tìm kiếm chính xác hÆ¡n hoặc tăng giá»›i hạn thá»i gian trong\n" +"Tùy thích máy phục vụ thÆ° mục cho sổ địa chỉ này." + +#: ../addressbook/gui/widgets/eab-gui-util.c:158 +#: ../addressbook/gui/widgets/eab-gui-util.c:157 +msgid "The backend for this addressbook was unable to parse this query." +msgstr "Hậu phÆ°Æ¡ng cho sổ địa chỉ này không thể phân tách truy vấn này." + +#: ../addressbook/gui/widgets/eab-gui-util.c:161 +#: ../addressbook/gui/widgets/eab-gui-util.c:160 +msgid "The backend for this addressbook refused to perform this query." +msgstr "Hậu phÆ°Æ¡ng cho sổ địa chỉ này từ chối thá»±c hiện truy vấn này." + +#: ../addressbook/gui/widgets/eab-gui-util.c:164 +#: ../addressbook/gui/widgets/eab-gui-util.c:163 +msgid "This query did not complete successfully." +msgstr "Truy vấn không hoàn tất." + +#: ../addressbook/gui/widgets/eab-gui-util.c:186 +#: ../addressbook/gui/widgets/eab-gui-util.c:185 +msgid "Error adding list" +msgstr "Gặp lá»—i khi thêm danh sách" + +#: ../addressbook/gui/widgets/eab-gui-util.c:687 +msgid "Error adding contact" +msgstr "Gặp lá»—i khi thêm liên lạc" + +#: ../addressbook/gui/widgets/eab-gui-util.c:197 +#: ../addressbook/gui/widgets/eab-gui-util.c:196 +msgid "Error modifying list" +msgstr "Gặp lá»—i khi sá»­a đổi danh sách" + +#: ../addressbook/gui/widgets/eab-gui-util.c:197 +#: ../addressbook/gui/widgets/eab-gui-util.c:196 +msgid "Error modifying contact" +msgstr "Gặp lá»—i khi sá»­a đổi liên lạc" + +#: ../addressbook/gui/widgets/eab-gui-util.c:209 +#: ../addressbook/gui/widgets/eab-gui-util.c:208 +msgid "Error removing list" +msgstr "Gặp lá»—i khi gỡ bá» danh sách" + +#: ../addressbook/gui/widgets/eab-gui-util.c:642 +msgid "Error removing contact" +msgstr "Gặp lá»—i khi gỡ bá» liên lạc" + +#: ../addressbook/gui/widgets/eab-gui-util.c:291 +#: ../addressbook/gui/widgets/eab-gui-util.c:290 +#, c-format +msgid "" +"Opening %d contact will open %d new window as well.\n" +"Do you really want to display this contact?" +msgid_plural "" +"Opening %d contact will open %d new window as well.\n" +"Do you really want to display this contact?" +msgstr[0] "" +"Việc mở %d liên lạc sẽ mở %d cá»­a sổ má»›i cùng lúc.\n" +"Bạn có thật sá»± muốn hiển thị liên lạc này không?" + +#: ../addressbook/gui/widgets/eab-gui-util.c:320 +#: ../addressbook/gui/widgets/eab-gui-util.c:319 +#, c-format +msgid "" +"%s already exists\n" +"Do you want to overwrite it?" +msgstr "" +"%s đã có.\n" +"Bạn có muốn ghi đè lên nó không?" + +#: ../addressbook/gui/widgets/eab-gui-util.c:324 ../src/actions.c:492 +#: ../src/actions.c:808 ../src/ui-gui.cc:244 +msgid "Overwrite" +msgstr "Ghi đè" + +#: ../addressbook/gui/widgets/eab-gui-util.c:371 +msgid "contact" +msgid_plural "contact" +msgstr[0] "liên lạc" + +#: ../addressbook/gui/widgets/eab-gui-util.c:418 +msgid "card.vcf" +msgstr "card.vcf" + +#: ../addressbook/gui/widgets/eab-gui-util.c:755 +#: ../addressbook/gui/widgets/eab-gui-util.c:748 +msgid "Move contact to" +msgstr "Chuyển liên lạc tá»›i" + +#: ../addressbook/gui/widgets/eab-gui-util.c:757 +#: ../addressbook/gui/widgets/eab-gui-util.c:750 +msgid "Copy contact to" +msgstr "Chép liên lạc tá»›i" + +#: ../addressbook/gui/widgets/eab-gui-util.c:760 +#: ../addressbook/gui/widgets/eab-gui-util.c:753 +msgid "Move contacts to" +msgstr "Chuyển các liên lạc tá»›i" + +#: ../addressbook/gui/widgets/eab-gui-util.c:762 +#: ../addressbook/gui/widgets/eab-gui-util.c:755 +msgid "Copy contacts to" +msgstr "Chép các liên lạc tá»›i" + +#: ../addressbook/gui/widgets/eab-gui-util.c:765 +#: ../addressbook/gui/widgets/eab-gui-util.c:758 +msgid "Select target addressbook." +msgstr "Chá»n sổ địa chỉ đích." + +#: ../addressbook/gui/widgets/eab-gui-util.c:988 +#: ../addressbook/gui/widgets/eab-gui-util.c:982 +msgid "Multiple VCards" +msgstr "Nhiá»u VCard" + +#: ../addressbook/gui/widgets/eab-gui-util.c:991 +#: ../addressbook/gui/widgets/eab-gui-util.c:985 +#, c-format +msgid "VCard for %s" +msgstr "VCard cho « %s »" + +#: ../addressbook/gui/widgets/eab-gui-util.c:1032 +#: ../addressbook/gui/widgets/eab-gui-util.c:1050 +#: ../mimedir/mimedir-vcomponent.c:387 +msgid "Contact information" +msgstr "Thông tin lien lạc" + +#: ../addressbook/gui/widgets/eab-gui-util.c:1052 +#, c-format +msgid "Contact information for %s" +msgstr "Thông tin lien lạc cho %s" + +#: ../addressbook/gui/widgets/eab-popup-control.c:431 +msgid "Primary Email" +msgstr "ThÆ° Ä‘iện từ chính" + +#: ../addressbook/gui/widgets/eab-popup-control.c:567 +msgid "Select an Action" +msgstr "Chá»n hành Ä‘á»™ng" + +#: ../addressbook/gui/widgets/eab-popup-control.c:575 +#, c-format +msgid "Create a new contact \"%s\"" +msgstr "Tạo liên lạc má»›i « %s »" + +#: ../addressbook/gui/widgets/eab-popup-control.c:591 +#, c-format +msgid "Add address to existing contact \"%s\"" +msgstr "Thêm địa chỉ vào liên lạc đã có « %s »" + +#: ../addressbook/gui/widgets/eab-popup-control.c:869 +msgid "Querying Address Book..." +msgstr "Äang truy vấn Sổ địa chỉ..." + +#: ../addressbook/gui/widgets/eab-popup-control.c:968 +#: ../addressbook/gui/widgets/eab-popup-control.c:970 +msgid "Merge E-Mail Address" +msgstr "Trá»™n địa chỉ thÆ° Ä‘iện tá»­" + +#: ../addressbook/gui/widgets/eab-vcard-control.c:139 +#, c-format +msgid "There is one other contact." +msgid_plural "There are %d other contacts." +msgstr[0] "Có %d liên lạc khác." + +#: ../addressbook/gui/widgets/eab-vcard-control.c:223 +#: ../addressbook/gui/widgets/eab-vcard-control.c:272 +msgid "Show Full VCard" +msgstr "Hiện toàn vCard" + +#: ../addressbook/gui/widgets/eab-vcard-control.c:227 +msgid "Show Compact VCard" +msgstr "Hiện vCard tóm gá»n" + +#: ../addressbook/gui/widgets/eab-vcard-control.c:277 +msgid "Save in addressbook" +msgstr "LÆ°u vào sổ địa chỉ" + +#: ../addressbook/gui/widgets/gal-view-factory-minicard.c:25 +msgid "Card View" +msgstr "Khung xem thẻ" + +#: ../addressbook/gui/widgets/gal-view-factory-treeview.c:26 +msgid "GTK Tree View" +msgstr "Khung xem Cây GTK" + +#: ../calendar/importers/icalendar-importer.c:643 +msgid "Importing ..." +msgstr "Äang nhập..." + +#: ../addressbook/importers/evolution-ldif-importer.c:761 +#: ../addressbook/importers/evolution-ldif-importer.c:652 +msgid "LDAP Data Interchange Format (.ldif)" +msgstr "Dạng thức chuyển đổi lẫn nhau dữ liệu LDAP (.ldif)" + +#: ../addressbook/importers/evolution-ldif-importer.c:762 +#: ../addressbook/importers/evolution-ldif-importer.c:653 +msgid "Evolution LDIF importer" +msgstr "Bá»™ nhập LDIF Evolution" + +# Name: do not translate/ tên: đừng dịch +#: ../addressbook/importers/evolution-vcard-importer.c:554 +#: ../addressbook/importers/evolution-vcard-importer.c:529 +msgid "VCard (.vcf, .gcrd)" +msgstr "vCard (.vcf, .gcrd)" + +#: ../addressbook/importers/evolution-vcard-importer.c:555 +#: ../addressbook/importers/evolution-vcard-importer.c:530 +msgid "Evolution VCard Importer" +msgstr "Bá»™ nhập vCard Evolution" + +#: ../addressbook/printing/e-contact-print-envelope.c:213 +#: ../addressbook/printing/e-contact-print-envelope.c:234 +msgid "Print envelope" +msgstr "In phong bì" + +#: ../addressbook/printing/e-contact-print.c:1033 +#: ../addressbook/printing/e-contact-print.c:1001 +msgid "Print contacts" +msgstr "In các liên lạc" + +#: ../addressbook/printing/e-contact-print.c:1093 +msgid "Print contact" +msgstr "In liên lạc" + +#: ../addressbook/printing/e-contact-print.glade.h:1 +msgid "10 pt. Tahoma" +msgstr "10 pt. Tahoma" + +#: ../addressbook/printing/e-contact-print.glade.h:2 +msgid "8 pt. Tahoma" +msgstr "8 pt. Tahoma" + +#: ../addressbook/printing/e-contact-print.glade.h:3 +msgid "Blank forms at end:" +msgstr "Mẫu trống tại cuối:" + +#: ../plug-ins/print/gimp_main_window.c:575 ../app/diapagelayout.c:212 +msgid "Bottom:" +msgstr "DÆ°á»›i:" + +#: ../plug-ins/print/gimp_main_window.c:1006 +msgid "Dimensions:" +msgstr "Các chiá»u : " + +#: ../addressbook/printing/e-contact-print.glade.h:7 +msgid "F_ont..." +msgstr "_Phông chữ..." + +#: ../addressbook/printing/e-contact-print.glade.h:9 +msgid "Footer:" +msgstr "Chân trang:" + +#: web/template/resources_edit_main.tpl:16 +msgid "Format" +msgstr "Dạng thức" + +#: ../widgets/table/e-table-selection-model.c:318 ../src/menus.c:280 +#: ../src/orca/rolenames.py:498 +msgid "Header" +msgstr "Äầu trang" + +#: ../addressbook/printing/e-contact-print.glade.h:12 +msgid "Header/Footer" +msgstr "Äầu/Chân trang" + +#: ../addressbook/printing/e-contact-print.glade.h:13 +msgid "Headings" +msgstr "Tiêu Ä‘á»" + +#: ../addressbook/printing/e-contact-print.glade.h:14 +msgid "Headings for each letter" +msgstr "Tiêu Ä‘á» cho má»—i lá thÆ°" + +#: ../glade/property.c:816 +msgid "Height:" +msgstr "Cao :" + +#: ../addressbook/printing/e-contact-print.glade.h:16 +msgid "Immediately follow each other" +msgstr "Theo ngay sau má»—i cái" + +#: ../addressbook/printing/e-contact-print.glade.h:17 +msgid "Include:" +msgstr "Gồm:" + +#: ../app/widgets/widgets-enums.c:54 ../plug-ins/print/gimp_main_window.c:494 +#: libexif/exif-entry.c:406 libexif/exif-entry.c:483 +msgid "Landscape" +msgstr "Nằm ngang" + +#: ../plug-ins/print/gimp_main_window.c:521 ../app/diapagelayout.c:225 +msgid "Left:" +msgstr "Trái:" + +#: ../addressbook/printing/e-contact-print.glade.h:20 +msgid "Letter tabs on side" +msgstr "Tab thÆ° tại bên" + +#: ../app/diapagelayout.c:187 +msgid "Margins" +msgstr "Viá»n" + +#: ../glade/gbwidgets/gbhbuttonbox.c:132 ../src/form-editor/table-prop.cc:307 +msgid "Number of columns:" +msgstr "Số cá»™t:" + +#: ../gtk/gtknotebook.c:405 ../src/orca/rolenames.py:328 +#, c-format +msgid "Page" +msgstr "Trang" + +#: ../addressbook/printing/e-contact-print.glade.h:26 +msgid "Page Setup:" +msgstr "Thiết lập trang:" + +#: ../gnome-cups-manager/gnome-cups-manager.glade.h:9 +msgid "Paper" +msgstr "Giấy" + +#: ../addressbook/printing/e-contact-print.glade.h:28 +msgid "Paper source:" +msgstr "Nguồn giấy:" + +#: ../plug-ins/print/gimp_main_window.c:493 ../app/preferences.c:135 +#: libexif/canon/mnote-canon-entry.c:104 libexif/exif-entry.c:406 +#: libexif/exif-entry.c:481 +msgid "Portrait" +msgstr "Thẳng đứng" + +#: ../addressbook/printing/e-contact-print.glade.h:30 +#: ../app/tools/gimptransformoptions.c:366 +#: ../glade/gnome/gnomepixmapentry.c:75 +msgid "Preview:" +msgstr "Xem thá»­ :" + +#: ../addressbook/printing/e-contact-print.glade.h:31 +msgid "Print using gray shading" +msgstr "In bóng xám" + +#: ../addressbook/printing/e-contact-print.glade.h:32 +msgid "Reverse on even pages" +msgstr "Äể nguyên trang chẵn" + +#: ../addressbook/printing/e-contact-print.glade.h:33 +#: ../app/diapagelayout.c:238 ../directed.xml.in.h:12 ../gok.glade2.h:105 +#: ../plug-ins/MapObject/mapobject_ui.c:1111 +#: ../plug-ins/print/gimp_main_window.c:547 +msgid "Right:" +msgstr "Phải:" + +#: ../addressbook/printing/e-contact-print.glade.h:34 +msgid "Sections:" +msgstr "Phần:" + +#: ../addressbook/printing/e-contact-print.glade.h:35 +msgid "Shading" +msgstr "Bóng" + +#: ../src/filexferdlg.c:99 ../src/filexferdlg.c:182 ../src/gtkfunc.c:261 +msgid "Size:" +msgstr "Cỡ :" + +#: ../addressbook/printing/e-contact-print.glade.h:37 +msgid "Start on a new page" +msgstr "Bắt đầu trang má»›i" + +#: ../addressbook/printing/e-contact-print.glade.h:38 +msgid "Style name:" +msgstr "Tên kiểu dáng:" + +#: ../plug-ins/print/gimp_main_window.c:534 ../app/diapagelayout.c:199 +msgid "Top:" +msgstr "Trên:" + +#: ../plug-ins/print/gimp_main_window.c:1216 ../app/preferences.c:144 +#: ../glade/property.c:813 +msgid "Width:" +msgstr "Rá»™ng:" + +#: ../addressbook/printing/e-contact-print.glade.h:42 +msgid "_Font..." +msgstr "_Phông chữ..." + +#: ../addressbook/printing/test-contact-print-style-editor.c:53 +msgid "Contact Print Style Editor Test" +msgstr "Thá»­ trình sá»­a đổi kiểu dáng in liên lạc" + +#: ../addressbook/printing/test-print.c:53 +msgid "Copyright (C) 2000, Ximian, Inc." +msgstr "Bản quyá»n © năm 2000, Ximian, Inc." + +#: ../addressbook/printing/test-contact-print-style-editor.c:56 +msgid "This should test the contact print style editor widget" +msgstr "Hành Ä‘á»™ng này nên thá»­ ra ô Ä‘iá»u khiển sá»­a đổi kiểu dáng in liên lạc." + +#: ../addressbook/printing/test-print.c:52 +msgid "Contact Print Test" +msgstr "Kiểm thá»­ In liên lạc" + +#: ../addressbook/printing/test-print.c:55 +msgid "This should test the contact print code" +msgstr "Hành Ä‘á»™ng này nên thá»­ ra mã nguồn in liên lạc." + +#: ../addressbook/tools/evolution-addressbook-export-list-folders.c:49 +msgid "Can not open file" +msgstr "Không thể mở tập tin" + +#: ../addressbook/tools/evolution-addressbook-export-list-folders.c:44 +#: ../addressbook/tools/evolution-addressbook-export-list-folders.c:43 +msgid "Couldn't get list of addressbooks" +msgstr "Không thể lấy danh sách các sổ địa chỉ" + +#: ../addressbook/tools/evolution-addressbook-export-list-folders.c:72 +#: ../addressbook/tools/evolution-addressbook-export-list-folders.c:71 +msgid "failed to open book" +msgstr "lá»—i mở sổ" + +#: ../addressbook/tools/evolution-addressbook-export.c:56 +msgid "Specify the output file instead of standard output" +msgstr "Ghi rõ tập tin xuất thay vào thiết bị xuất chuẩn" + +#: ../addressbook/tools/evolution-addressbook-export.c:57 +msgid "OUTPUTFILE" +msgstr "TẬP_TIN_XUẤT" + +#: ../addressbook/tools/evolution-addressbook-export.c:58 +msgid "List local addressbook folders" +msgstr "Liệt kê các thÆ° mục sổ địa chỉ địa phÆ°Æ¡ng" + +#: ../addressbook/tools/evolution-addressbook-export.c:60 +msgid "Show cards as vcard or csv file" +msgstr "" +"Hiển thị má»i thẻ dạng vCard (thẻ ảo) hoặc csv (định giá»›i bằng dấu phẩy)" + +# Format name: do not translate/ tên dạng thức: đừng dịch +#: ../addressbook/tools/evolution-addressbook-export.c:60 +msgid "[vcard|csv]" +msgstr "[vcard|csv]" + +#: ../addressbook/tools/evolution-addressbook-export.c:61 +msgid "Export in asynchronous mode" +msgstr "Xuất theo chế Ä‘á»™ không đồng bá»™ " + +#: ../addressbook/tools/evolution-addressbook-export.c:63 +msgid "" +"The number of cards in one output file in asychronous mode, default size 100." +msgstr "" +"Tổng số thẻ trong má»™t tập tin kết xuất riêng lẻ trong chế Ä‘á»™ không đồng bá»™ : " +"kích cỡ mặc định là 100." + +#: ../addressbook/tools/evolution-addressbook-export.c:91 +msgid "" +"Command line arguments error, please use --help option to see the usage." +msgstr "" +"Lá»—i đối số dòng lệnh, hãy dùng tùy chá»n « --help » (trợ giúp) để xem cách sá»­ " +"dụng đúng." + +#: ../addressbook/tools/evolution-addressbook-export.c:105 +msgid "Only support csv or vcard format." +msgstr "Chỉ há»— trợ dạng thức csv hoặc vCard (thẻ ảo)." + +#: ../addressbook/tools/evolution-addressbook-export.c:114 +msgid "In async mode, output must be file." +msgstr "Trong chế Ä‘á»™ không đồng bá»™, kết xuất phải là tập tin." + +#: ../addressbook/tools/evolution-addressbook-export.c:122 +msgid "In normal mode, there is no need for the size option." +msgstr "Trong chế Ä‘á»™ thÆ°á»ng, không cần tùy chá»n vá» kích thÆ°á»›c." + +#: ../addressbook/tools/evolution-addressbook-export.c:153 +msgid "Unhandled error" +msgstr "Không biết lá»—i đó" + +#: ../addressbook/tools/evolution-addressbook-import.c:46 +msgid "Error loading default addressbook." +msgstr "Gặp lá»—i khi tải sổ địa chỉ mặc định." + +#: ../addressbook/tools/evolution-addressbook-import.c:67 +msgid "Input File" +msgstr "Tập tin nhập" + +#: ../addressbook/tools/evolution-addressbook-import.c:82 +msgid "No filename provided." +msgstr "ChÆ°a cung cấp tên tập tin." + +#: ../calendar/calendar.error.xml.h:1 +msgid "" +"Adding a meaningful summary to your appointment will give your recipients an " +"idea of what your appointment is about." +msgstr "" +"Việc thêm má»™t Tóm tắt có nghÄ©a vào cuá»™c hẹn bạn sẽ cho ngÆ°á»i nhận biết ý " +"kiến vá» lý do của cuá»™c hẹn này." + +#: ../calendar/calendar.error.xml.h:2 +msgid "" +"Adding a meaningful summary to your task will give your recipients an idea " +"of what your task is about." +msgstr "" +"Việc thêm má»™t Tóm tắt có nghÄ©a vào tác vụ bạn sẽ cho ngÆ°á»i nhận biết ý kiến " +"vá» lý do của tác vụ này." + +#: ../calendar/calendar.error.xml.h:3 ../calendar/calendar.error.xml.h:5 +msgid "" +"All information in these journal entries will be deleted and can not be " +"restored." +msgstr "Má»i thông tin của những mục nhật ký này sẽ bị xoá bá» hoàn toàn." + +#: ../calendar/calendar.error.xml.h:4 ../calendar/calendar.error.xml.h:6 +msgid "" +"All information in this journal will be deleted and can not be restored." +msgstr "Má»i thông tin của nhật ký này sẽ bị xoá bá» hoàn toàn." + +#: ../calendar/calendar.error.xml.h:5 ../calendar/calendar.error.xml.h:7 +msgid "" +"All information on these appointments will be deleted and can not be " +"restored." +msgstr "Má»i thông tin của những cuá»™c hẹn này sẽ bị xoá bá» hoàn toàn." + +#: ../calendar/calendar.error.xml.h:6 ../calendar/calendar.error.xml.h:8 +msgid "All information on these tasks will be deleted and can not be restored." +msgstr "Má»i thông tin vá» những tác vụ này sẽ bị xoá bá» hoàn toàn." + +#: ../calendar/calendar.error.xml.h:7 ../calendar/calendar.error.xml.h:9 +msgid "" +"All information on this appointment will be deleted and can not be restored." +msgstr "Má»i thông tin của cuá»™c hẹn này sẽ bị xoá bá» hoàn toàn." + +#: ../calendar/calendar.error.xml.h:8 ../calendar/calendar.error.xml.h:10 +msgid "" +"All information on this journal entry will be deleted and can not be " +"restored." +msgstr "Má»i thông tin của mục nhật ký này sẽ bị xoá bá» hoàn toàn." + +#: ../calendar/calendar.error.xml.h:9 ../calendar/calendar.error.xml.h:11 +msgid "" +"All information on this meeting will be deleted and can not be restored." +msgstr "Má»i thông tin của cuá»™c há»p này sẽ bị xoá bá» hoàn toàn." + +#: ../calendar/calendar.error.xml.h:10 ../calendar/calendar.error.xml.h:12 +msgid "All information on this task will be deleted and can not be restored." +msgstr "Má»i thông tin vá» tác vụ này sẽ bị xoá bá» hoàn toàn." + +#: ../calendar/calendar.error.xml.h:11 ../calendar/calendar.error.xml.h:13 +msgid "Are you sure you want to delete the '{0}' task?" +msgstr "Bạn có chắc muốn xoá bá» tác vụ « {0} » không?" + +#: ../calendar/calendar.error.xml.h:12 ../calendar/calendar.error.xml.h:14 +msgid "Are you sure you want to delete the appointment titled '{0}'?" +msgstr "Bạn có chắc muốn xoá bá» cuá»™c hẹn tên « {0} » không?" + +#: ../calendar/calendar.error.xml.h:13 ../calendar/calendar.error.xml.h:15 +msgid "Are you sure you want to delete the journal entry '{0}'?" +msgstr "Bạn có chắc muốn xoá bá» mục nhật ký « {0} » không?" + +#: ../calendar/calendar.error.xml.h:14 ../calendar/calendar.error.xml.h:16 +msgid "Are you sure you want to delete these {0} appointments?" +msgstr "Bạn có chắc muốn xoá bá» những {0} cuá»™c hẹn này không?" + +#: ../calendar/calendar.error.xml.h:15 ../calendar/calendar.error.xml.h:17 +msgid "Are you sure you want to delete these {0} journal entries?" +msgstr "Bạn có chắc muốn xoá bá» những {0} mục nhật ký này không?" + +#: ../calendar/calendar.error.xml.h:16 ../calendar/calendar.error.xml.h:18 +msgid "Are you sure you want to delete these {0} tasks?" +msgstr "Bạn có chắc muốn xoá bá» những {0} tác vụ này không?" + +#: ../calendar/calendar.error.xml.h:17 ../calendar/calendar.error.xml.h:19 +msgid "Are you sure you want to delete this appointment?" +msgstr "Bạn có chắc muốn xoá bá» cuá»™c hẹn này không?" + +#: ../calendar/calendar.error.xml.h:18 ../calendar/calendar.error.xml.h:20 +msgid "Are you sure you want to delete this journal entry?" +msgstr "Bạn có chắc muốn xoá bá» mục nhật ký này không?" + +#: ../calendar/calendar.error.xml.h:19 ../calendar/calendar.error.xml.h:21 +msgid "Are you sure you want to delete this meeting?" +msgstr "Bạn có chắc muốn xoá bá» cuá»™c há»p này không?" + +#: ../calendar/calendar.error.xml.h:20 ../calendar/calendar.error.xml.h:22 +msgid "Are you sure you want to delete this task?" +msgstr "Bạn có chắc muốn xoá bá» tác vụ này không?" + +#: ../calendar/calendar.error.xml.h:21 ../calendar/calendar.error.xml.h:23 +msgid "Are you sure you want to send the appointment without a summary?" +msgstr "Bạn có chắc muốn gởi thÆ° không có tóm tắt không? (Không đệ nghị.)" + +#: ../calendar/calendar.error.xml.h:22 ../calendar/calendar.error.xml.h:24 +msgid "Are you sure you want to send the task without a summary?" +msgstr "Bạn có chắc muốn gởi tác vụ không có tóm tắt không?" + +#: ../calendar/calendar.error.xml.h:23 ../calendar/calendar.error.xml.h:25 +msgid "Delete calendar '{0}'?" +msgstr "Xoá bá» lịch « {0} » không?" + +#: ../calendar/calendar.error.xml.h:24 +msgid "Delete memo list '{0}'?" +msgstr "Xoá bá» danh sách ghi nhá»› « {0} » không?" + +#: ../calendar/calendar.error.xml.h:25 ../calendar/calendar.error.xml.h:26 +msgid "Delete task list '{0}'?" +msgstr "Xoá bá» danh sách tác vụ « {0} » không?" + +#: ../calendar/calendar.error.xml.h:26 ../calendar/calendar.error.xml.h:28 +msgid "Don't Send" +msgstr "Không gởi" + +#: ../calendar/calendar.error.xml.h:27 ../calendar/calendar.error.xml.h:29 +msgid "Download in progress. Do you want to save the appointment?" +msgstr "Äang tải vá». Bạn có muốn lÆ°u cuá»™c hẹn không?" + +#: ../calendar/calendar.error.xml.h:28 ../calendar/calendar.error.xml.h:30 +msgid "Download in progress. Do you want to save the task?" +msgstr "Äang tải vá». Bạn có muốn lÆ°u tác vụ không?" + +#: ../calendar/calendar.error.xml.h:29 ../calendar/calendar.error.xml.h:31 +msgid "Editor could not be loaded." +msgstr "Không thể tải trình hiệu chỉnh." + +#: ../calendar/calendar.error.xml.h:30 ../calendar/calendar.error.xml.h:32 +msgid "" +"Email invitations will be sent to all participants and allow them to RSVP." +msgstr "" +"Lá»i má»i thÆ° Ä‘iện tá»­ sẽ được gởi cho má»i ngÆ°á»i dá»± và cho phép há» trả lá»i " +"trÆ°á»›c." + +#: ../calendar/calendar.error.xml.h:31 ../calendar/calendar.error.xml.h:33 +msgid "" +"Email invitations will be sent to all participants and allow them to accept " +"this task." +msgstr "" +"Lá»i má»i thÆ° Ä‘iện từ sẽ được gởi cho má»i ngÆ°á»i dá»± và cho phép há» chấp nhận " +"tác vụ này." + +#: ../calendar/calendar.error.xml.h:32 ../calendar/calendar.error.xml.h:34 +msgid "Error loading calendar" +msgstr "Gặp lá»—i khi tải lịch" + +#: ../calendar/calendar.error.xml.h:33 +msgid "Error loading memo list" +msgstr "Gặp lá»—i khi tải danh sách ghi nhá»›" + +#: ../calendar/calendar.error.xml.h:34 ../calendar/calendar.error.xml.h:35 +msgid "Error loading task list" +msgstr "Gặp lá»—i khi tải danh sách tác vụ" + +#: ../calendar/calendar.error.xml.h:35 ../calendar/calendar.error.xml.h:36 +msgid "" +"If you don't send a cancellation notice, the other participants may not know " +"the journal has been deleted." +msgstr "" +"Nếu bạn không gởi thông báo hủy bá», những ngÆ°á»i dá»± khác có thể sẽ không biết " +"nhật ký đã được xoá bá»." + +#: ../calendar/calendar.error.xml.h:36 ../calendar/calendar.error.xml.h:37 +msgid "" +"If you don't send a cancellation notice, the other participants may not know " +"the meeting is canceled." +msgstr "" +"Nếu bạn không gởi thông báo hủy bá», những ngÆ°á»i dá»± khác có thể sẽ không biết " +"cuá»™c há»p đã bị hủy bá»." + +#: ../calendar/calendar.error.xml.h:37 ../calendar/calendar.error.xml.h:38 +msgid "" +"If you don't send a cancellation notice, the other participants may not know " +"the task has been deleted." +msgstr "" +"Nếu bạn không gởi thông báo hủy bá», những ngÆ°á»i dá»± khác có thể sẽ không biết " +"tác vụ đã được xoá bá»." + +#: ../calendar/calendar.error.xml.h:39 ../calendar/calendar.error.xml.h:41 +msgid "Send Notice" +msgstr "Gởi thông báo" + +#: ../calendar/calendar.error.xml.h:40 ../calendar/calendar.error.xml.h:42 +msgid "" +"Sending updated information allows other participants to keep their " +"calendars up to date." +msgstr "" +"Gởi thông tin cập nhật cho phép những ngÆ°á»i dá»± khác cập nhật lại lịch của há»." + +#: ../calendar/calendar.error.xml.h:41 ../calendar/calendar.error.xml.h:43 +msgid "" +"Sending updated information allows other participants to keep their task " +"lists up to date." +msgstr "" +"Việc gởi thông tin cập nhật cho phép những ngÆ°á»i dá»± khác cập nhật danh sách " +"tác vụ của há»." + +#: ../calendar/calendar.error.xml.h:42 +msgid "" +"Some attachments are being downloaded. Saving the appointment would result " +"in the loss of these attachments." +msgstr "" +"Hiện thá»i Ä‘ang tải vá» má»™t số đính kèm. Khi lÆ°u cuá»™c hẹn này, sẽ cÅ©ng mất các " +"đính kèm này." + +#: ../calendar/calendar.error.xml.h:43 +msgid "" +"Some attachments are being downloaded. Saving the task would result in the " +"loss of these attachments." +msgstr "" +"Hiện thá»i Ä‘ang tải vá» má»™t số đính kèm. Khi lÆ°u tác vụ này, sẽ cÅ©ng mất các " +"đính kèm này." + +#: ../calendar/calendar.error.xml.h:44 +msgid "Some features may not work properly with your current server." +msgstr "" +"Có lẽ má»™t số tính năng sẽ không hoạt Ä‘á»™ng vá»›i máy phục vụ hiện thá»i của bạn." + +#: ../calendar/calendar.error.xml.h:45 +msgid "The Evolution calendar has quit unexpectedly." +msgstr "Lịch Evolution đã thoát bất ngá»." + +#: ../calendar/calendar.error.xml.h:46 +msgid "The Evolution tasks have quit unexpectedly." +msgstr "Tác vụ Evolution đã thoát bất ngá»." + +#: ../calendar/calendar.error.xml.h:47 +msgid "The calendar is not marked for offline usage." +msgstr "ChÆ°a đánh dấu lịch này để sá»­ dụng khi ngoại tuyến." + +#: ../calendar/calendar.error.xml.h:48 +msgid "The memo list is not marked for offline usage" +msgstr "ChÆ°a đánh dấu danh sách ghi nhá»› này để sá»­ dụng khi ngoại tuyến." + +#: ../calendar/calendar.error.xml.h:49 +msgid "The task list is not marked for offline usage." +msgstr "ChÆ°a đánh dấu danh sách tác vụ này để sá»­ dụng khi ngoại tuyến." + +#: ../calendar/calendar.error.xml.h:50 ../calendar/calendar.error.xml.h:49 +msgid "This calendar will be removed permanently." +msgstr "Lịch này sẽ bị xoá bá» hoàn toàn." + +#: ../calendar/calendar.error.xml.h:51 +msgid "This memo list will be removed permanently." +msgstr "Danh sách ghi nhá»› này sẽ bị xoá bá» hoàn toàn." + +#: ../calendar/calendar.error.xml.h:52 ../calendar/calendar.error.xml.h:50 +msgid "This task list will be removed permanently." +msgstr "Tác vụ này sẽ bị xoá bá» hoàn toàn." + +#: ../calendar/calendar.error.xml.h:53 ../calendar/calendar.error.xml.h:51 +msgid "Would you like to save your changes to this appointment?" +msgstr "Bạn có muốn lÆ°u các thay đổi của cuá»™c hẹn này không?" + +#: ../calendar/calendar.error.xml.h:54 ../calendar/calendar.error.xml.h:52 +msgid "Would you like to save your changes to this task?" +msgstr "Bạn có muốn lÆ°u các thay đổi của tác vụ này không?" + +#: ../calendar/calendar.error.xml.h:55 ../calendar/calendar.error.xml.h:53 +msgid "Would you like to send a cancellation notice for this journal entry?" +msgstr "Bạn có muốn gởi thông báo hủy bá» cho mục nhật ký này không?" + +#: ../calendar/calendar.error.xml.h:56 ../calendar/calendar.error.xml.h:54 +msgid "Would you like to send all the participants a cancellation notice?" +msgstr "Bạn có muốn gởi cho má»i ngÆ°á»i tham gia thông báo hủy bá» không?" + +#: ../calendar/calendar.error.xml.h:57 ../calendar/calendar.error.xml.h:55 +msgid "Would you like to send meeting invitations to participants?" +msgstr "Bạn có muốn gởi lá»i má»i há»p đến những ngÆ°á»i dá»± không?" + +#: ../calendar/calendar.error.xml.h:58 ../calendar/calendar.error.xml.h:56 +msgid "Would you like to send this task to participants?" +msgstr "Bạn có muốn gởi tác vụ này cho những ngÆ°á»i dá»± không?" + +#: ../calendar/calendar.error.xml.h:59 ../calendar/calendar.error.xml.h:57 +msgid "Would you like to send updated meeting information to participants?" +msgstr "" +"Bạn có muốn gởi thông tin cuá»™c há»p đã cập nhật cho những ngÆ°á»i dá»± không?" + +#: ../calendar/calendar.error.xml.h:60 ../calendar/calendar.error.xml.h:58 +msgid "Would you like to send updated task information to participants?" +msgstr "Bạn có muốn gởi thông tin tác vụ đã cập nhật cho những ngÆ°á»i dá»± không?" + +#: ../calendar/calendar.error.xml.h:61 +msgid "" +"You are connecting to an unsupported GroupWise server and may encounter " +"problems using Evolution. For best results, the server should be upgraded to " +"a supported version." +msgstr "" +"Bạn Ä‘ang kết nối đến má»™t máy phục vụ Groupwise không được há»— trợ thì có lẽ " +"sẽ gặp khó khăn sá»­ dụng trình Evolution. Äể được kết quả tốt nhất, bạn nên " +"nâng cấp trình phục vụ lên má»™t phiên bản được há»— trợ." + +#: ../calendar/calendar.error.xml.h:62 +msgid "You have changed this appointment, but not yet saved them." +msgstr "Bạn đã sá»­a đổi cuá»™c hẹn này, nhÆ°ng chÆ°a lÆ°u lại." + +#: ../calendar/calendar.error.xml.h:63 ../calendar/calendar.error.xml.h:61 +msgid "You have made changes to this task, but not yet saved them." +msgstr "Bạn đã sá»­a đổi tác vụ này, nhÆ°ng chÆ°a lÆ°u lại." + +#: ../calendar/calendar.error.xml.h:64 ../calendar/calendar.error.xml.h:62 +msgid "Your calendars will not be available until Evolution is restarted." +msgstr "" +"Các lịch của bạn sẽ không sẵn sàng cho đến khi bạn khởi chạy lại Evolution." + +#: ../calendar/calendar.error.xml.h:65 ../calendar/calendar.error.xml.h:63 +msgid "Your tasks will not be available until Evolution is restarted." +msgstr "" +"Các tác vụ của bạn sẽ không sẵn sàng cho đến khi bạn khởi chạy lại Evolution." + +#: ../app/display.c:1149 +msgid "_Discard Changes" +msgstr "_Hủy thay đổi" + +#: ../calendar/calendar.error.xml.h:68 +msgid "_Save Changes" +msgstr "_LÆ°u thay đổi" + +# Variable: do not translate/ biến: đừng dịch +#: ../calendar/calendar.error.xml.h:70 ../calendar/calendar.error.xml.h:66 +msgid "{0}." +msgstr "{0}." + +#: ../smime/gui/component.c:48 ../modemlights/modemlights.glade.h:3 +#: ../lib/sunone-account.c:324 +msgid "Enter password" +msgstr "Hãy gõ mật khẩu" + +#: ../calendar/conduits/calendar/calendar-conduit.c:246 +msgid "Split Multi-Day Events:" +msgstr "Tách sá»± kiện nhiá»u ngày:" + +#: ../calendar/conduits/todo/todo-conduit.c:880 +msgid "Could not start evolution-data-server" +msgstr "Không thể khởi Ä‘á»™ng evolution-data-server (máy phục vụ dữ liệu)." + +#: ../calendar/conduits/calendar/calendar-conduit.c:1477 +msgid "Could not read pilot's Calendar application block" +msgstr "Không thể Ä‘á»c khối ứng dụng lịch của pilot." + +#: ../calendar/conduits/memo/memo-conduit.c:937 +#: ../calendar/conduits/memo/memo-conduit.c:940 +msgid "Could not read pilot's Memo application block" +msgstr "Không thể Ä‘á»c khối ứng dụng Ghi nhá»› của pilot." + +#: ../calendar/conduits/memo/memo-conduit.c:976 +#: ../calendar/conduits/memo/memo-conduit.c:979 +msgid "Could not write pilot's Memo application block" +msgstr "Không thể ghi khối ứng dụng Ghi nhá»› của pilot." + +#: ../calendar/conduits/todo/todo-conduit.c:239 +#: ../calendar/conduits/todo/todo-conduit.c:234 +msgid "Default Priority:" +msgstr "Äá»™ Æ°u tiên mặc định:" + +#: ../calendar/conduits/todo/todo-conduit.c:962 +msgid "Could not read pilot's ToDo application block" +msgstr "Không thể Ä‘á»c khối ứng dụng ToDo (cần làm) của pilot." + +#: ../calendar/conduits/todo/todo-conduit.c:1151 +#: ../calendar/conduits/todo/todo-conduit.c:1154 +msgid "Could not write pilot's ToDo application block" +msgstr "Không thể ghi khối ứng dụng ToDo (cần làm) của pilot." + +#: ../calendar/gui/GNOME_Evolution_Calendar.server.in.in.h:1 +#: ../plugins/itip-formatter/itip-formatter.c:1968 +msgid "Calendar and Tasks" +msgstr "Lịch và Tác vụ" + +#: ../calendar/gui/calendar-component.c:1307 +msgid "Calendars" +msgstr "Lịch" + +#: ../calendar/gui/GNOME_Evolution_Calendar.server.in.in.h:3 +msgid "Configure your timezone, Calendar and Task List here " +msgstr "Cấu hình múi giá», Lịch và danh sách Tác vụ ở đây." + +#: ../calendar/gui/GNOME_Evolution_Calendar.server.in.in.h:4 +msgid "Evolution Calendar and Tasks" +msgstr "Lịch và Tác vụ Evolution" + +#: ../calendar/gui/GNOME_Evolution_Calendar.server.in.in.h:5 +msgid "Evolution Calendar configuration control" +msgstr "Äiá»u khiển cấu hình Lịch Evolution" + +#: ../calendar/gui/GNOME_Evolution_Calendar.server.in.in.h:6 +msgid "Evolution Calendar scheduling message viewer" +msgstr "Bá»™ xem thông báo lập lịch Evolution" + +#: ../calendar/gui/GNOME_Evolution_Calendar.server.in.in.h:7 +msgid "Evolution Calendar/Task editor" +msgstr "Bá»™ hiệu chỉnh Lịch/Tác vụ Evolution" + +#: ../calendar/gui/GNOME_Evolution_Calendar.server.in.in.h:8 +msgid "Evolution's Calendar component" +msgstr "Thành phần Lịch Evolution" + +#: ../calendar/gui/GNOME_Evolution_Calendar.server.in.in.h:9 +msgid "Evolution's Memos component" +msgstr "Thành phần Ghi nhá»› của Evolution" + +#: ../calendar/gui/GNOME_Evolution_Calendar.server.in.in.h:10 +#: ../calendar/gui/GNOME_Evolution_Calendar.server.in.in.h:9 +msgid "Evolution's Tasks component" +msgstr "Thành phần Tác vụ Evolution" + +#: ../calendar/gui/GNOME_Evolution_Calendar.server.in.in.h:11 +msgid "Memo_s" +msgstr "Ghi _nhá»›" + +#: ../calendar/gui/memos-component.c:998 ../calendar/gui/memos-control.c:340 +msgid "Memos" +msgstr "Ghi nhá»›" + +#: ../plugins/groupwise-features/proxy-add-dialog.glade.h:12 +#: ../pan/prefs.c:1730 +msgid "Tasks" +msgstr "Tác vụ" + +#: ../src/GNOME_Evolution_BrainRead.server.in.in.h:8 +msgid "_Calendars" +msgstr "_Lịch" + +#: ../src/planner-task-view.c:264 +msgid "_Tasks" +msgstr "_Tác vụ" + +#: ../calendar/gui/alarm-notify/GNOME_Evolution_Calendar_AlarmNotify.server.in.in.h:1 +msgid "Evolution Calendar alarm notification service" +msgstr "Dịch vụ báo Ä‘á»™ng Lịch Evolution" + +#: ../calendar/gui/alarm-notify/alarm-notify-dialog.c:248 +#: ../objects/chronogram/chronoline.c:164 +#: ../objects/chronogram/chronoref.c:146 +msgid "Start time" +msgstr "Thá»i Ä‘iểm đầu" + +#: ../calendar/gui/alarm-notify/alarm-notify-dialog.c:356 +#: ../calendar/gui/alarm-notify/alarm-notify-dialog.c:347 +#, c-format +msgid "" +"%s\n" +"%s until %s" +msgstr "" +"%s\n" +"%s cho đến %s" + +#: ../calendar/gui/alarm-notify/alarm-notify.glade.h:1 +#: ../plugins/groupwise-features/proxy-add-dialog.glade.h:4 +#: ../applets/clock/clock.c:1116 +msgid "Appointments" +msgstr "Cuá»™c hẹn" + +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. Location +#: ../extensions/page-info/page-info.glade.h:19 ../glade/straw.glade.h:30 +#: ../storage/sunone-itip-view.c:727 +msgid "Location:" +msgstr "Äịa Ä‘iểm:" + +#: ../calendar/gui/alarm-notify/alarm-notify.glade.h:3 +msgid "Snooze _time:" +msgstr "Thá»i gian _ngủ :" + +#: ../calendar/gui/alarm-notify/alarm-notify.glade.h:5 +msgid "_Snooze" +msgstr "_Ngủ" + +#: ../calendar/gui/alarm-notify/alarm-notify.glade.h:7 +msgid "location of appointment" +msgstr "địa Ä‘iểm cuá»™c hẹn" + +#: ../calendar/gui/alarm-notify/alarm-queue.c:1127 +msgid "Calendars" +msgstr "Lịch" + +#: src/mainwin.cpp:1741 src/prefsdlg.cpp:1226 ../src/guikachu.glade.h:11 +#: ../src/preferences-win.cc:50 prefs_gui.c:334 po/silky.glade.h:143 +msgid "Preferences" +msgstr "Tùy thích" + +#: ../calendar/gui/alarm-notify/alarm-queue.c:1206 +msgid "_Configure Alarms" +msgstr "_Cấu hình Báo Ä‘á»™ng" + +#: ../calendar/gui/alarm-notify/alarm-queue.c:1037 +msgid "No summary available." +msgstr "Không có tóm tắt" + +#: ../calendar/gui/alarm-notify/alarm-queue.c:1048 +msgid "No description available." +msgstr "Không có mô tả." + +#: ../calendar/gui/alarm-notify/alarm-queue.c:1056 +msgid "No location information available." +msgstr "Không có thông tin địa Ä‘iểm." + +#: ../calendar/gui/alarm-notify/alarm-queue.c:1414 +#, c-format +msgid "You have %d alarms" +msgstr "Bạn có %d bảo Ä‘á»™ng" + +#: ../plug-ins/common/gtm.c:424 ../lib/message.c:80 ../lib/message.c:226 +#: ../src/mlview-validator-window.cc:443 ../widgets/gtk+.xml.in.h:215 +#: ../src/dialog-win-helpers.cc:378 app/gui-subs.c:589 +msgid "Warning" +msgstr "Cảnh báo" + +#: ../calendar/gui/alarm-notify/alarm-queue.c:1590 +#: ../calendar/gui/alarm-notify/alarm-queue.c:1179 +msgid "" +"Evolution does not support calendar reminders with\n" +"email notifications yet, but this reminder was\n" +"configured to send an email. Evolution will display\n" +"a normal reminder dialog box instead." +msgstr "" +"Evolution chÆ°a há»— trợ bá»™ nhắc nhở lịch thông qua\n" +"thÆ° Ä‘iện tá»­, nhÆ°ng mà bá»™ nhắc nhở này đã được\n" +"cấu hình để gởi thÆ°. Thay vào đó, Evolution\n" +"sẽ hiển thị má»™t há»™p thoại nhắc nhở thông thÆ°á»ng." + +#: ../calendar/gui/alarm-notify/alarm-queue.c:1616 +#: ../calendar/gui/alarm-notify/alarm-queue.c:1205 +#, c-format +msgid "" +"An Evolution Calendar reminder is about to trigger. This reminder is " +"configured to run the following program:\n" +"\n" +" %s\n" +"\n" +"Are you sure you want to run this program?" +msgstr "" +"Lịch Evolution sắp nhắc nhở bạn. Bá»™ nhắc nhở này được cấu hình để chạy những " +"chÆ°Æ¡ng trình sau:\n" +"\n" +" %s\n" +"\n" +"Bạn có chắc muốn chạy chÆ°Æ¡ng trình này không?" + +#: ../calendar/gui/alarm-notify/alarm-queue.c:1630 +#: ../calendar/gui/alarm-notify/alarm-queue.c:1219 +msgid "Do not ask me about this program again." +msgstr "Äừng há»i tôi vá» chÆ°Æ¡ng trình này lần nữa." + +#: ../providers/evolution/gda-evolution-connection.c:100 +msgid "Could not initialize Bonobo" +msgstr "Không thể khởi Ä‘á»™ng Bonobo" + +#: ../calendar/gui/alarm-notify/notify-main.c:153 +#: ../calendar/gui/alarm-notify/notify-main.c:150 +msgid "Could not create the alarm notify service factory" +msgstr "Không thể tạo bá»™ tạo dịch vụ báo Ä‘á»™ng" + +#: ../calendar/gui/alarm-notify/util.c:41 +msgid "invalid time" +msgstr "thá»i gian không hợp lệ" + +#. Can't be zero +#: ../calendar/gui/alarm-notify/util.c:58 ../calendar/gui/misc.c:105 +#, c-format +msgid "(%d seconds)" +msgstr "(%d giây)" + +#: ../calendar/gui/alarm-notify/util.c:64 ../calendar/gui/misc.c:111 +#, c-format +msgid "(%d %s %d %s)" +msgstr "(%d %s %d %s)" + +#: ../app/display/gimpdisplayshell-close.c:279 ../bin/ical-dump.c:81 +msgid "second" +msgstr "giây" + +#: ../src/smart-playlist-dialog.c:169 ../gncal/gnomecal-prefs.c:1444 +#: ../gncal/gnomecal-prefs.c:1467 +msgid "seconds" +msgstr "giây" + +#: ../calendar/gui/alarm-notify/util.c:66 ../calendar/gui/misc.c:113 +#, c-format +msgid "(%d %s)" +msgstr "(%d %s)" + +#: ../calendar/gui/alarm-notify/util.c:77 ../calendar/gui/misc.c:124 +#, c-format +msgid " %u second" +msgstr " %u giây" + +#: ../calendar/gui/alarm-notify/util.c:77 ../calendar/gui/misc.c:124 +#, c-format +msgid " %u seconds" +msgstr " %u giây" + +#: ../calendar/gui/alarm-notify/util.c:79 ../calendar/gui/misc.c:126 +#, c-format +msgid " %u minute" +msgstr " %u phút" + +#: ../calendar/gui/alarm-notify/util.c:79 ../calendar/gui/misc.c:126 +#, c-format +msgid " %u minutes" +msgstr " %u phút" + +#: ../calendar/gui/alarm-notify/util.c:81 ../calendar/gui/misc.c:128 +#, c-format +msgid "%u hour" +msgstr "%u giá»" + +#: ../calendar/gui/alarm-notify/util.c:81 ../calendar/gui/misc.c:128 +#, c-format +msgid "%u hours" +msgstr "%u giá»" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:1 +msgid "Alarm programs" +msgstr "ChÆ°Æ¡ng trình báo Ä‘á»™ng" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:2 +msgid "Ask for confirmation when deleting items" +msgstr "Há»i xác thá»±c khi xoá bá» mục" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:3 +msgid "Background color of tasks that are due today, in \"#rrggbb\" format." +msgstr "Màu ná»n của má»i tác vụ hết hạn hôm nay, có dạng « #rrggbb »." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:4 +msgid "Background color of tasks that are overdue, in \"#rrggbb\" format." +msgstr "Màu ná»n của má»i tác vụ quá hạn, có dạng « #rrggbb »." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:5 +msgid "Calendars to run alarms for" +msgstr "Lịch cần chạy báo Ä‘á»™ng" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:6 +msgid "" +"Color to draw the Marcus Bains Line in the Time bar (empty for default)." +msgstr "" +"Màu cần vẽ Dòng Marcus Bains trong thanh Thá»i gian (bá» rá»—ng để chá»n mặc định)" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:7 +msgid "Color to draw the Marcus Bains line in the Day View." +msgstr "Màu cần vẽ Dòng Marcus Bains trong khung xem Ngày" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:8 +msgid "Compress weekends in month view" +msgstr "Nén các ngày cuối tuần trong khung xem tháng" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:9 +msgid "Confirm expunge" +msgstr "Xác nhận khi xoá hẳn" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:10 +msgid "Days on which the start and end of work hours should be indicated." +msgstr "Ngày cần ngụ ý giá» bắt đầu và kết thúc Ä‘á»u làm việc." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:11 +msgid "Default appointment reminder" +msgstr "Bá»™ nhắc nhở cuá»™c hẹn mặc định" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:12 +msgid "Default reminder units" +msgstr "ÄÆ¡n vị nhắc nhở mặc định" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:13 +msgid "Default reminder value" +msgstr "Giá trị nhắc nhở mặc định" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:14 +msgid "Free/busy server urls" +msgstr "Äịa chỉ Mạng của máy phục vụ Rảnh/Bận" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:15 +msgid "Free/busy template url" +msgstr "Äịa chỉ Mạng mẫu Rảnh/Bận" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:16 +msgid "Hide completed tasks" +msgstr "Ẩn má»i tác vụ hoàn tất" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:17 +msgid "Hide task units" +msgstr "Ẩn Ä‘Æ¡n vị tác vụ" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:18 +msgid "Hide task value" +msgstr "Ẩn giá trị tác vụ" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:19 +msgid "Horizontal pane position" +msgstr "Ví trị ô cá»­a sổ ngang" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:20 +msgid "Hour the workday ends on, in twenty four hour format, 0 to 23." +msgstr "Giá» kết thúc ngày làm việc, có dạng 24 giá» (0-23)." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:21 +msgid "Hour the workday starts on, in twenty four hour format, 0 to 23." +msgstr "Giá» bắt đầu ngày làm việc, có dạng 24 giá» (0-23)." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:22 +msgid "Intervals shown in Day and Work Week views, in minutes." +msgstr "" +"Há»™p thá»i gian được hiển thị trong khung xem Ngày/Tuần làm việc, theo phút" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:23 +msgid "Last alarm time" +msgstr "Giá» báo Ä‘á»™ng cuối cùng" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:24 +msgid "List of server urls for free/busy publishing." +msgstr "Danh sách các địa chỉ Mạng máy phục vụ cho xuất thông tin Rảnh/Bận" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:25 +msgid "Marcus Bains Line" +msgstr "Dòng Marcus Bains" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:26 +msgid "Marcus Bains Line Color - Day View" +msgstr "Màu Dòng Marcus Bains — Khung xem ngày" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:27 +msgid "Marcus Bains Line Color - Time bar" +msgstr "Màu Dòng Marcus Bains — Thanh thá»i gian" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:28 +msgid "Minute the workday ends on, 0 to 59." +msgstr "Phút kết thúc ngày làm việc, 0-59." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:29 +msgid "Minute the workday starts on, 0 to 59." +msgstr "Phút bắt đầu ngày làm việc, 0-59." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:30 +msgid "Month view horizontal pane position" +msgstr "Vị trí của ô cá»­a sổ ngang trong khung xem tháng" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:31 +msgid "Month view vertical pane position" +msgstr "Vị trí của ô cá»­a sổ dá»c trong khung xem tháng" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:32 +msgid "Number of units for determining for a default reminder." +msgstr "Tổng số Ä‘Æ¡n vị để quyết định lúc nào nhắc nhở mặc định." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:33 +msgid "Number of units for determining when to hide tasks." +msgstr "Tổng số Ä‘Æ¡n vị để quyết định lúc nào nên ẩn cá»™ng việc." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:34 +msgid "Overdue tasks color" +msgstr "Màu của tác vụ quá hạn" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:35 +msgid "" +"Position of the horizontal pane, between the date navigator calendar and the " +"task list when not in the month view, in pixels." +msgstr "" +"Ví trị của ô cá»­a sổ ngang, giữa lịch duyệt ngày và danh sách tác vụ khi " +"không phải trong khung xem tháng, theo Ä‘iểm ảnh." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:36 +msgid "" +"Position of the horizontal pane, between the view and the date navigator " +"calendar and task list in the month view, in pixels." +msgstr "" +"Ví trị của ô cá»­a sổ ngang, giữa khung xem và lịch duyệt ngày và danh sách " +"tác vụ khi trong khung xem tháng, theo Ä‘iểm ảnh." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:37 +msgid "" +"Position of the vertical pane, between the task list and the task preview " +"pane, in pixels." +msgstr "" +"Vị trí của ô cá»­a sổ dá»c, giữa danh sách tác vụ và khung xem cá»™ng việc, theo " +"Ä‘iểm ảnh." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:38 +msgid "" +"Position of the vertical pane, between the view and the date navigator " +"calendar and task list in the month view, in pixels." +msgstr "" +"Ví trị của ô cá»­a sổ dá»c, giữa khung xem và lịch duyệt ngày và danh sách công " +"việc khi trong khung xem tháng, theo Ä‘iểm ảnh." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:39 +msgid "" +"Position of the vertical pane, between the view and the date navigator " +"calendar and task list when not in the month view, in pixels." +msgstr "" +"Ví trị của ô cá»­a sổ ngang, giữa lịch duyệt ngày và danh sách tác vụ khi " +"không phải trong khung xem tháng, theo Ä‘iểm ảnh." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:40 +msgid "Programs that are allowed to be run by alarms." +msgstr "ChÆ°Æ¡ng trình có chạy được vá»›i bảo Ä‘á»™ng" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:41 +msgid "Show RSVP field in the event/task/meeting editor" +msgstr "Hiện trÆ°á»ng RSVP trong bá»™ hiệu chỉnh cuá»™c há»p/tác vụ/sá»± kiện" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:42 +msgid "Show Role field in the event/task/meeting editor" +msgstr "Hiện trÆ°á»ng Vai trò trong bá»™ hiệu chỉnh cuá»™c há»p/tác vụ/sá»± kiện" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:43 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:41 +msgid "Show appointment end times in week and month views" +msgstr "Hiện thá»i Ä‘iểm kết thúc cuá»™c hẹn trong khung xem tuần và tháng Ä‘á»u" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:44 +msgid "Show categories field in the event/meeting/task editor" +msgstr "Hiện trÆ°á»ng Hạng trong bá»™ hiệu chỉnh cuá»™c há»p/tác vụ/sá»± kiện" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:45 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:42 +msgid "Show display alarms in notification tray" +msgstr "Hiển thị báo Ä‘á»™ng trong khay thông báo" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:46 +msgid "Show status field in the event/task/meeting editor" +msgstr "Hiện trÆ°á»ng Trạng thái trong bá»™ hiệu chỉnh cuá»™c há»p/tác vụ/sá»± kiện" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:47 +#: ../mail/evolution-mail.schemas.in.in.h:73 +msgid "Show the \"Preview\" pane" +msgstr "Hiện ô « Xem thá»­ »" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:48 +#: ../mail/evolution-mail.schemas.in.in.h:74 +msgid "Show the \"Preview\" pane." +msgstr "Hiện ô « Xem thá»­ »." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:49 +msgid "Show timezone field in the event/meeting editor" +msgstr "Hiện trÆ°á»ng Múi giá» trong bá»™ hiệu chỉnh cuá»™c há»p/tác vụ/sá»± kiện" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:50 +msgid "Show type field in the event/task/meeting editor" +msgstr "Hiện trÆ°á»ng Kiểu trong bá»™ hiệu chỉnh cuá»™c há»p/tác vụ/sá»± kiện" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:51 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:43 +msgid "Show week numbers in date navigator" +msgstr "Hiện số thứ tá»± tuần trong bá»™ duyệt ngày" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:52 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:44 +msgid "Tasks due today color" +msgstr "Màu của tác vụ hết hạn vào hôm nay" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:53 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:45 +msgid "Tasks vertical pane position" +msgstr "Ví trị ô cá»­a sổ dá»c tác vụ" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:54 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:46 +msgid "" +"The default timezone to use for dates and times in the calendar, as an " +"untranslated Olsen timezone database location like \"America/New York\"." +msgstr "" +"Múi giá» mặc định cần dùng cho ngày và giá» trong lịch, là ví trị cÆ¡ sở dữ " +"liệu kiểu Olsen chÆ°a dịch nhÆ° « Asia/Hanoi » (Châu Ã/Hà ná»™i)." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:56 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:48 +#, no-c-format +msgid "" +"The url template to use as a free/busy data fallback, %u is replaced by the " +"user part of the mail address and %d is replaced by the domain." +msgstr "" +"Mẫu địa chỉ Mạng cần dùng là dữ liệu Rảnh/Bận phục hồi: « %u » được thay thế " +"bằng phần ngÆ°á»i dùng của địa chỉ thÆ°, và « %d » được thay thế bằng miá»n của " +"địa chỉ đó." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:57 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:49 +msgid "Time divisions" +msgstr "Chia thá»i gian" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:58 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:50 +msgid "Time the last alarm ran, in time_t." +msgstr "GiỠđã bảo Ä‘á»™ng cuối cùng, theo time_t" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:59 +#: timeanddate.c:590 timeanddate.c:599 ../Sensors/Clock/__init__.py:106 +#: src/settings.c:1270 +msgid "Timezone" +msgstr "Múi giá»" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:60 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:52 +msgid "Twenty four hour time format" +msgstr "Äịnh dạng thá»i gian 24 giá»" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:61 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:53 +msgid "Units for a default reminder, \"minutes\", \"hours\" or \"days\"." +msgstr "ÄÆ¡n vị cho bá»™ nhắc nhở mặc định: « phút », « giỠ» hay « ngày »." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:62 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:54 +msgid "" +"Units for determining when to hide tasks, \"minutes\", \"hours\" or \"days\"." +msgstr "" +"ÄÆ¡n vị để quyết định lúc nào ẩn tác vụ : « phút », « giỠ» hay « ngày »." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:64 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:56 +msgid "Week start" +msgstr "Tuần bắt đầu" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:65 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:57 +msgid "Weekday the week starts on, from Sunday (0) to Saturday (6)." +msgstr "Hôm bắt đầu tuần, từ Chủ Nhật (0) đến Thứ Bảy (6)." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:66 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:58 +msgid "Whether or not to use the notification tray for display alarms." +msgstr "Có nên dùng khay thông báo để hiển thị báo Ä‘á»™ng hay không." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:67 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:59 +msgid "Whether to ask for confirmation when deleting an appointment or task." +msgstr "Có nên há»i xác nhận khi xoá bá» cuá»™c hẹn hay tác vụ hay không." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:68 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:60 +msgid "Whether to ask for confirmation when expunging appointments and tasks." +msgstr "Có nên há»i xác nhận khi xoá hắn cuá»™c hẹn và tác vụ hay không." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:69 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:61 +msgid "" +"Whether to compress weekends in the month view, which puts Saturday and " +"Sunday in the space of one weekday." +msgstr "" +"Có nên nén những ngày cuối tuần trong khung xem tháng, mà hiển thị hai ngày " +"Thứ Bảy và Chủ Nhật Ä‘á»u là cùng cách của má»™t ngày tuần." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:70 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:62 +msgid "Whether to display the end time of events in the week and month views." +msgstr "" +"Có nên hiển thị thá»i Ä‘iểm kết thúc sá»± kiện trong khung xem tuần và tháng Ä‘á»u " +"hay không." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:71 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:63 +msgid "" +"Whether to draw the Marcus Bains Line (line at current time) in the calendar." +msgstr "" +"Có nên vẽ Dòng Marcus Bains (dòng tại giá» hiện có) trong lịch hay không." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:72 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:64 +msgid "Whether to hide completed tasks in the tasks view." +msgstr "Có nên ẩn má»i tác vụ đã hoàn tất trong khung xem tác vụ hay không." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:73 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:65 +msgid "Whether to set a default reminder for appointments." +msgstr "Có nên lập bá»™ nhắc nhở mặc định cho má»i cuá»™c hẹn hay không." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:74 +msgid "Whether to show RSVP field in the event/task/meeting editor" +msgstr "" +"Có nên hiển thị trÆ°á»ng RSVP trong bá»™ hiệu chỉnh cuá»™c há»p/tác vụ/sá»± kiện hay " +"không" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:75 +msgid "Whether to show categories field in the event/meeting editor" +msgstr "" +"Có nên hiển thị trÆ°á»ng loại trong bá»™ hiệu chỉnh cuá»™c há»p/sá»± kiện hay không" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:76 +msgid "Whether to show role field in the event/task/meeting editor" +msgstr "" +"Có nên hiển thị trÆ°á»ng Vai trò trong bá»™ hiệu chỉnh cuá»™c há»p/tác vụ/sá»± kiện " +"hay không" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:77 +msgid "Whether to show status field in the event/task/meeting editor" +msgstr "" +"Có nên hiển thị trÆ°á»ng trạng thái trong bá»™ hiệu chỉnh cuá»™c há»p/tác vụ/dữ " +"kiện hay không" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:78 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:66 +msgid "" +"Whether to show times in twenty four hour format instead of using am/pm." +msgstr "" +"Có nên hiển thị giá» dạng 24-giá» thay vào dùng am/pm (buổi sáng/buổi chiá»u-" +"tối) hay không." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:79 +msgid "Whether to show timezone field in the event/meeting editor" +msgstr "" +"Có nên hiển thị trÆ°á»ng múi giá» trong bá»™ hiệu chỉnh cuá»™c há»p/sá»± kiện hay không" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:80 +msgid "Whether to show type field in the event/task/meeting editor" +msgstr "" +"Có nên hiển thị trÆ°á»ng kiểu trong bá»™ hiệu chỉnh cuá»™c há»p/tác vụ/sá»± kiện hay " +"không" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:81 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:67 +msgid "Whether to show week numbers in the date navigator." +msgstr "Có nên hiển thị số thứ tá»± tuần trong bá»™ duyệt ngày hay không." + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:82 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:68 +msgid "Work days" +msgstr "Ngày làm việc" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:83 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:69 +msgid "Workday end hour" +msgstr "Giá» kết thúc ngày làm việc" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:84 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:70 +msgid "Workday end minute" +msgstr "Phút kết thúc ngày làm việc" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:85 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:71 +msgid "Workday start hour" +msgstr "Giá» bắt đầu ngày làm việc" + +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:86 +#: ../calendar/gui/apps_evolution_calendar.schemas.in.in.h:72 +msgid "Workday start minute" +msgstr "Phút bắt đầu ngày làm việc" + +#: ../calendar/gui/cal-search-bar.c:48 ../calendar/gui/cal-search-bar.c:49 +msgid "Summary contains" +msgstr "Tóm tắt chứa" + +#: ../calendar/gui/cal-search-bar.c:49 ../calendar/gui/cal-search-bar.c:50 +msgid "Description contains" +msgstr "Mô tả chứa" + +#: ../calendar/gui/cal-search-bar.c:51 +msgid "Comment contains" +msgstr "Ghi chú chứa" + +#: ../calendar/gui/cal-search-bar.c:52 +msgid "Location contains" +msgstr "Äịa Ä‘iểm chứa" + +#: ../calendar/gui/cal-search-bar.c:350 ../camel/camel-vee-store.c:100 +#: ../camel/camel-vee-store.c:343 +msgid "Unmatched" +msgstr "Không khá»›p" + +#: ../libgnomeui/gnome-dateedit.c:775 ../glade/gbwidgets/gbcalendar.c:266 +#: ../widgets/gtk+.xml.in.h:28 ../src/orca/rolenames.py:168 +msgid "Calendar" +msgstr "Lịch" + +#: ../calendar/gui/calendar-commands.c:348 +#: ../calendar/gui/calendar-commands.c:354 +msgid "" +"This operation will permanently erase all events older than the selected " +"amount of time. If you continue, you will not be able to recover these " +"events." +msgstr "" +"Thao tác này sẽ xoá bá» hoàn toàn má»i sá»± kiện trÆ°á»›c khoảng thá»i gian được " +"chá»n. Nếu bạn tiếp tục thì sẽ không thể phục hồi những tác vụ này." + +#: ../calendar/gui/calendar-commands.c:354 +#: ../calendar/gui/calendar-commands.c:360 +msgid "Purge events older than" +msgstr "Tẩy má»i sá»± kiện trÆ°á»›c" + +#: ../src/smart-playlist-dialog.c:162 +msgid "days" +msgstr "ngày" + +#: ../calendar/gui/migration.c:582 +msgid "On The Web" +msgstr "Trên Mạng" + +#: ../calendar/gui/calendar-component.c:251 ../calendar/gui/migration.c:396 +#: ../calendar/gui/calendar-component.c:252 ../calendar/gui/migration.c:391 +msgid "Birthdays & Anniversaries" +msgstr "Sinh nhật và Ká»· niệm" + +#: ../calendar/gui/calendar-component.c:258 ../Sensors/Weather/__init__.py:129 +msgid "Weather" +msgstr "Thá»i tiết" + +#: ../calendar/gui/calendar-component.c:534 +msgid "_New Calendar" +msgstr "Lịch _má»›i" + +#: ../calendar/gui/calendar-component.c:838 +#: ../calendar/gui/calendar-component.c:858 +msgid "Failed upgrading calendars." +msgstr "Không cập nhật lịch được." + +#: ../calendar/gui/calendar-component.c:1137 +#: ../calendar/gui/calendar-component.c:1154 +#, c-format +msgid "Unable to open the calendar '%s' for creating events and meetings" +msgstr "Không thể mở lịch « %s » để tạo sá»± kiện và cuá»™c há»p." + +#: ../calendar/gui/calendar-component.c:1150 +#: ../calendar/gui/calendar-component.c:1170 +msgid "There is no calendar available for creating events and meetings" +msgstr "Không có lịch nào sẵn sàng để tạo sá»± kiện và cuá»™c há»p." + +#: ../calendar/gui/calendar-component.c:1264 +#: ../calendar/gui/calendar-component.c:1282 +msgid "Calendar Source Selector" +msgstr "Bá»™ chá»n nguồn lịch" + +#: ../calendar/gui/calendar-component.c:1455 main.c:261 +#: ../calendar/gui/calendar-component.c:1473 +msgid "New appointment" +msgstr "Cuá»™c hẹn má»›i" + +#: ../calendar/gui/calendar-component.c:1456 +#: ../calendar/gui/calendar-component.c:1474 +msgid "_Appointment" +msgstr "_Cuá»™c hẹn" + +#: ../calendar/gui/calendar-component.c:1457 +#: ../calendar/gui/calendar-component.c:1475 +#: ../gncal/gnomecal-main-window.c:537 ../gncal/gnomecal-main-window.c:571 +msgid "Create a new appointment" +msgstr "Tạo cuá»™c hẹn má»›i" + +#: ../calendar/gui/calendar-component.c:1463 +#: ../calendar/gui/calendar-component.c:1481 +msgid "New meeting" +msgstr "Cuá»™c há»p má»›i" + +#: ../calendar/gui/calendar-component.c:1464 +#: ../calendar/gui/calendar-component.c:1482 +msgid "M_eeting" +msgstr "_Cuá»™c há»p" + +#: ../calendar/gui/calendar-component.c:1465 +#: ../calendar/gui/calendar-component.c:1483 +msgid "Create a new meeting request" +msgstr "Tạo yêu cầu cuá»™c há»p má»›i" + +#: ../calendar/gui/calendar-component.c:1471 +#: ../calendar/gui/calendar-component.c:1489 +msgid "New all day appointment" +msgstr "Cuá»™c hẹn nguyên ngày má»›i" + +#: ../calendar/gui/calendar-component.c:1472 +#: ../calendar/gui/calendar-component.c:1490 +msgid "All Day A_ppointment" +msgstr "Cuá»™c hẹn _nguyên ngày" + +#: ../calendar/gui/calendar-component.c:1473 +#: ../calendar/gui/calendar-component.c:1491 +msgid "Create a new all-day appointment" +msgstr "Tạo cuá»™c hẹn nguyên ngày má»›i" + +#: ../calendar/gui/calendar-component.c:1479 +#: ../calendar/gui/calendar-component.c:1497 +msgid "New calendar" +msgstr "Lịch má»›i" + +#: ../calendar/gui/calendar-component.c:1498 +msgid "Cale_ndar" +msgstr "_Lịch" + +#: ../gncal/gnomecal-main-window.c:522 +msgid "Create a new calendar" +msgstr "Tạo lịch má»›i" + +#: ../calendar/gui/calendar-view-factory.c:109 main.c:284 +#: ../gncal/calendar-widget.c:377 +msgid "Day View" +msgstr "Xem ngày" + +#: ../calendar/gui/calendar-view-factory.c:112 +msgid "Work Week View" +msgstr "Xem tuần làm việc" + +#: ../calendar/gui/calendar-view-factory.c:115 main.c:292 +#: ../gncal/calendar-widget.c:390 +msgid "Week View" +msgstr "Xem tuần" + +#: ../calendar/gui/calendar-view-factory.c:118 main.c:300 +#: ../gncal/calendar-widget.c:404 +msgid "Month View" +msgstr "Xem tháng" + +#: ../calendar/gui/comp-editor-factory.c:420 +msgid "Error while opening the calendar" +msgstr "Gặp lá»—i khi mở lịch" + +#: ../calendar/gui/comp-editor-factory.c:426 +msgid "Method not supported when opening the calendar" +msgstr "Không há»— trợ phÆ°Æ¡ng thức đó khi mở lịch này." + +#: ../calendar/gui/comp-editor-factory.c:432 +msgid "Permission denied to open the calendar" +msgstr "Không đủ quyá»n truy cập để mở lịch" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:1 +msgid "Alarm" +msgstr "Báo Ä‘á»™ng" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:2 ../eog.glade.h:7 +#: ../gnome-screenshot/gnome-panel-screenshot.glade.h:2 +msgid "Options" +msgstr "Tùy chá»n" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:3 +msgid "Repeat" +msgstr "Lặp lại" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:4 +msgid "Add Alarm" +msgstr "Thêm Báo Ä‘á»™ng" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:5 +msgid "Custom _message" +msgstr "Thông Ä‘iệp tá»± _chá»n" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:6 +msgid "Custom alarm sound" +msgstr "Âm thanh báo Ä‘á»™ng tá»± chá»n" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:7 +msgid "Mes_sage:" +msgstr "_Thông Ä‘iệp:" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:8 +#: ../calendar/gui/e-alarm-list.c:444 +msgid "Play a sound" +msgstr "Phát âm thanh" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:9 +#: ../calendar/gui/e-alarm-list.c:448 +msgid "Pop up an alert" +msgstr "Bật lên báo Ä‘á»™ng" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:10 +#: ../calendar/gui/e-alarm-list.c:456 +msgid "Run a program" +msgstr "Chạy chÆ°Æ¡ng trình" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:11 +msgid "Send To:" +msgstr "Gởi đến:" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:12 +#: ../calendar/gui/e-alarm-list.c:452 +msgid "Send an email" +msgstr "Gởi thÆ°" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:13 +msgid "_Arguments:" +msgstr "_Äối số :" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:14 src/interface.c:240 +msgid "_Program:" +msgstr "_ChÆ°Æ¡ng trình:" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:15 +msgid "_Repeat the alarm" +msgstr "_Lặp lại báo Ä‘á»™ng" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:16 +msgid "_Sound:" +msgstr "_Âm thanh:" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:17 +#: ../plug-ins/winsnap/winsnap.c:911 ../ui/prefs.glade.h:39 +msgid "after" +msgstr "sau" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:18 +msgid "before" +msgstr "trÆ°á»›c (khi)" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:19 +#: ../calendar/gui/dialogs/recurrence-page.glade.h:7 +#: ../data/SoftwareProperties.glade.h:9 +msgid "day(s)" +msgstr "ngày" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:21 +msgid "end of appointment" +msgstr "kết thúc cuá»™c hẹn" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:22 +msgid "extra times every" +msgstr "lần thêm nữa má»—i" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:23 +msgid "hour(s)" +msgstr "giá»" + +#: ../src/smart-playlist-dialog.c:184 +msgid "hours" +msgstr "giá»" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:25 +msgid "minute(s)" +msgstr "phút" + +#: ../calendar/gui/dialogs/alarm-dialog.glade.h:27 +msgid "start of appointment" +msgstr "bắt đầu cuá»™c hẹn" + +#: ../calendar/gui/dialogs/alarm-list-dialog.c:200 +#: ../calendar/gui/dialogs/alarm-list-dialog.c:199 +msgid "Action/Trigger" +msgstr "Hành Ä‘á»™ng/Gây ra" + +#: ../calendar/gui/dialogs/alarm-list-dialog.glade.h:2 +#: ../calendar/gui/dialogs/event-page.glade.h:4 +#: ../ui/evolution-event-editor.xml.h:1 ../gncal/gnomecal-prefs.c:1861 +#: prefs_gui.c:376 +msgid "Alarms" +msgstr "Báo Ä‘á»™ng" + +#: ../composer/e-msg-composer-select-file.c:116 +msgid "Suggest automatic display of attachment" +msgstr "Äá» nghị tá»± Ä‘á»™ng hiển thị đính kèm" + +#: ../calendar/gui/dialogs/cal-attachment-select-file.c:190 +#: ../composer/e-msg-composer-select-file.c:238 +msgid "Attach file(s)" +msgstr "Äính kèm tập tin" + +# #: ../widgets/misc/e-attachment.glade.h:2 +msgid "Attachment Properties" +msgstr "Thuá»™c tính đính kèm" + +#: ../plug-ins/gimpressionist/presets.c:646 ../app/sheets_dialog.c:599 +#: ../app/sheets_dialog.c:692 ../app/sheets_dialog.c:600 +#: ../app/sheets_dialog.c:693 ../glade/glade_atk.c:640 ../src/gtkfunc.c:432 +msgid "Description:" +msgstr "Mô tả:" + +#: ../extensions/page-info/page-info.glade.h:20 src/gtkam-info.c:474 +msgid "MIME type:" +msgstr "Kiểu MIME:" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:1 +msgid "" +"60 minutes\n" +"30 minutes\n" +"15 minutes\n" +"10 minutes\n" +"05 minutes" +msgstr "" +"60 phút\n" +"30 phút\n" +"15 phút\n" +"10 phút\n" +"05 phút" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:6 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:10 +msgid "Publishing" +msgstr "Xuất" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:8 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:15 +#, no-c-format +msgid "" +"%u and %d will be replaced by user and domain from the email address." +msgstr "" +"« %u » và « %d » sẽ được thay thế bằng ngÆ°á»i dùng và miá»n riêng từng từ " +"địa chỉ thÆ°." + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:9 +#: ../mail/mail-config.glade.h:9 +msgid "Alerts" +msgstr "Báo Ä‘á»™ng" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:10 +msgid "Default Free/Busy Server" +msgstr "Máy phục vụ Rảnh/Bận mặc định" + +#: ../extensions/actions/action-properties.glade.h:2 +msgid "General" +msgstr "Chung" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:12 +msgid "Task List" +msgstr "Danh sách Tác vụ" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:13 +msgid "Time" +msgstr "Giá»" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:14 +msgid "Work Week" +msgstr "Tuần làm việc" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:15 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:19 +msgid "Day _ends:" +msgstr "Ngày _kết thúc:" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:22 +msgid "E_nable" +msgstr "_Bật" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:23 +msgid "Free/Busy" +msgstr "Rảnh/Bận" + +#: ../calendar/gui/e-itip-control.c:733 ../calendar.inc.php:12 +#: datebook_gui.c:1554 +msgid "Friday" +msgstr "Thứ Sáu" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:21 +msgid "" +"Minutes\n" +"Hours\n" +"Days" +msgstr "" +"Phút\n" +"Giá»\n" +"Ngày" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:24 +#: ../calendar/gui/dialogs/recurrence-page.c:1046 +#: ../calendar/gui/e-itip-control.c:729 ../calendar.inc.php:10 +#: datebook_gui.c:1550 prefs.c:414 +msgid "Monday" +msgstr "Thứ Hai" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:25 +msgid "" +"Monday\n" +"Tuesday\n" +"Wednesday\n" +"Thursday\n" +"Friday\n" +"Saturday\n" +"Sunday" +msgstr "" +"Thứ Hai\n" +"Thứ Ba\n" +"Thứ TÆ°\n" +"Thứ Năm\n" +"Thứ Sáu\n" +"Thứ Bảy\n" +"Chủ Nhật" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:32 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:29 +msgid "Publishing Table" +msgstr "Bảng xuất" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:33 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:30 +msgid "S_un" +msgstr "_CN" + +#: ../calendar/gui/e-itip-control.c:734 ../calendar.inc.php:13 +#: datebook_gui.c:1555 +msgid "Saturday" +msgstr "Thứ Bảy" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:35 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:32 +msgid "Sh_ow a reminder" +msgstr "_Hiện bá»™ nhắc nhở" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:36 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:33 +msgid "Show week _numbers in date navigator" +msgstr "Hiện _số thứ tá»± tuần trong bá»™ duyệt ngày" + +#: ../calendar/gui/e-itip-control.c:728 ../calendar.inc.php:10 +#: datebook_gui.c:1549 prefs.c:413 +msgid "Sunday" +msgstr "Chủ Nhật" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:38 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:35 +msgid "T_asks due today:" +msgstr "_Tác vụ hết hạn vào hôm nay:" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:39 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:36 +msgid "T_hu" +msgstr "_Năm" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:40 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:37 +msgid "Template:" +msgstr "Mẫu :" + +#: ../calendar/gui/e-itip-control.c:732 ../calendar.inc.php:12 +#: datebook_gui.c:1553 +msgid "Thursday" +msgstr "Thứ Năm" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:39 +msgid "Time _zone:" +msgstr "Múi _giá» :" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:43 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:40 +msgid "Time format:" +msgstr "Dạng thức giá» :" + +#: ../calendar/gui/e-itip-control.c:730 ../calendar.inc.php:11 +#: datebook_gui.c:1551 +msgid "Tuesday" +msgstr "Thứ Ba" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:45 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:42 +msgid "W_eek starts:" +msgstr "Tuần _bắt đầu :" + +#: ../calendar/gui/e-itip-control.c:731 ../calendar.inc.php:11 +#: datebook_gui.c:1552 +msgid "Wednesday" +msgstr "Thứ TÆ°" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:47 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:44 +msgid "Work days:" +msgstr "Ngày làm việc:" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:48 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:45 +msgid "_12 hour (AM/PM)" +msgstr "_12 giá» (AM/PM: sáng/chiá»u)" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:49 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:46 +msgid "_24 hour" +msgstr "_24 giá»" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:50 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:47 +msgid "_Add URL" +msgstr "_Thêm địa chỉ Mạng" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:51 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:48 +msgid "_Ask for confirmation when deleting items" +msgstr "_Há»i xác thá»±c khi xoá bá» mục" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:52 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:49 +msgid "_Compress weekends in month view" +msgstr "_Nén các ngày cuối tuần trong khung xem tháng" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:53 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:50 +msgid "_Day begins:" +msgstr "_Ngày bắt đầu :" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:55 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:52 +msgid "_Fri" +msgstr "_Sáu" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:56 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:53 +msgid "_Hide completed tasks after" +msgstr "Ẩ_n tác vụ hoàn thành sau" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:57 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:54 +msgid "_Mon" +msgstr "T_2" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:58 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:55 +msgid "_Overdue tasks:" +msgstr "Tác vụ _quá hạn:" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:59 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:56 +msgid "_Sat" +msgstr "T_7" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:60 +msgid "_Show appointment end times in week and month view" +msgstr "_Hiện thá»i Ä‘iểm kết thúc cuá»™c hẹn trong khung xem tuần và tháng Ä‘á»u" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:61 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:58 +msgid "_Time divisions:" +msgstr "_Chia thá»i gian:" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:62 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:59 +msgid "_Tue" +msgstr "_Ba" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:63 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:60 +msgid "_Wed" +msgstr "_TÆ°" + +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:64 +#: ../calendar/gui/dialogs/cal-prefs-dialog.glade.h:61 +msgid "before every appointment" +msgstr "trÆ°á»›c khi má»—i cuá»™c hẹn" + +#: ../calendar/gui/dialogs/calendar-setup.c:287 +msgid "Copy calendar contents locally for offline operation" +msgstr "Sao chép ná»™i dung lịch vá» máy để phục vụ các thao tác ngoại tuyến." + +#: ../calendar/gui/dialogs/calendar-setup.c:290 +msgid "Copy task list contents locally for offline operation" +msgstr "" +"Sao chép ná»™i dung danh sách tác vụ vá» máy để phục vụ các thao tác ngoại tuyến" + +#: ../calendar/gui/dialogs/calendar-setup.c:292 +msgid "Copy memo list contents locally for offline operation" +msgstr "" +"Sao chép ná»™i dung danh sách ghi nhá»› vá» máy để phục vụ các thao tác ngoại " +"tuyến" + +#: src/ui.glade.h:18 +msgid "C_olor:" +msgstr "_Màu :" + +#: ../calendar/gui/dialogs/calendar-setup.c:380 +msgid "Tasks List" +msgstr "Danh sách tác vụ" + +#: ../calendar/gui/dialogs/calendar-setup.c:391 +msgid "Memos List" +msgstr "Danh sách Ghi nhá»›" + +#: ../calendar/gui/dialogs/calendar-setup.c:460 +#: ../calendar/gui/dialogs/calendar-setup.c:449 +msgid "Calendar Properties" +msgstr "Thuá»™c tính lịch" + +#: ../data/glade/calendar-dialog.glade.h:23 +msgid "New Calendar" +msgstr "Lịch má»›i" + +#: ../calendar/gui/dialogs/calendar-setup.c:528 +#: ../calendar/gui/dialogs/calendar-setup.c:517 +msgid "Task List Properties" +msgstr "Thuá»™c tính danh sách tác vụ" + +#: ../calendar/gui/dialogs/calendar-setup.c:594 +#: ../calendar/gui/memos-component.c:448 +msgid "New Memo List" +msgstr "Danh sách ghi nhá»› má»›i" + +#: ../calendar/gui/dialogs/calendar-setup.glade.h:2 +msgid "Add Calendar" +msgstr "Thêm Lịch" + +#: ../calendar/gui/dialogs/calendar-setup.glade.h:3 +msgid "Add Task List" +msgstr "Thêm Danh sách Tác vụ" + +#: ../ui/prefs.glade.h:32 +msgid "Pick a color" +msgstr "Chá»n màu" + +#: ../calendar/gui/dialogs/calendar-setup.glade.h:6 +msgid "_Add Calendar" +msgstr "_Thêm Lịch" + +#: ../calendar/gui/dialogs/calendar-setup.glade.h:7 +msgid "_Add Task List" +msgstr "Th_êm Danh sách Tác vụ" + +#: ../plugins/calendar-weather/calendar-weather.c:548 +msgid "_Refresh:" +msgstr "_Làm tÆ°Æ¡i:" + +#: ../plugins/calendar-http/calendar-http.c:120 ../src/gtkfunc.c:611 +msgid "_URL:" +msgstr "Äịa chỉ _Mạng:" + +#: ../src/planner-format.c:379 ../src/smart-playlist-dialog.c:163 +msgid "weeks" +msgstr "tuần" + +#: ../calendar/gui/dialogs/changed-comp.c:60 +msgid "This event has been deleted." +msgstr "Sá»± kiện này bị xoá bá»." + +#: ../calendar/gui/dialogs/changed-comp.c:64 +msgid "This task has been deleted." +msgstr "Tác vụ này bị xoá bá»." + +#: ../calendar/gui/dialogs/changed-comp.c:68 +msgid "This journal entry has been deleted." +msgstr "Mục nhật ký này bị xoá bá»." + +#: ../calendar/gui/dialogs/changed-comp.c:77 +#, c-format +msgid "%s You have made changes. Forget those changes and close the editor?" +msgstr "" +"%s Bạn đã tạo ra má»™t vài thay đổi. Bá» qua những thay đổi này và đóng bá»™ " +"biên soạn không?" + +#: ../calendar/gui/dialogs/changed-comp.c:79 +#, c-format +msgid "%s You have made no changes, close the editor?" +msgstr "%s Bạn chÆ°a thay đổi gì, đóng bá»™ biên soạn?" + +#: ../calendar/gui/dialogs/changed-comp.c:84 +msgid "This event has been changed." +msgstr "Sá»± kiện này đã được thay đổi" + +#: ../calendar/gui/dialogs/changed-comp.c:88 +msgid "This task has been changed." +msgstr "Tác vụ này đã được thay đổi" + +#: ../calendar/gui/dialogs/changed-comp.c:92 +msgid "This journal entry has been changed." +msgstr "Mục nhật ký này đã được thay đổi" + +#: ../calendar/gui/dialogs/changed-comp.c:101 +#, c-format +msgid "%s You have made changes. Forget those changes and update the editor?" +msgstr "" +"%s Bạn đã tạo ra vài thay đổi. Bá» qua những thay đổi này và cập nhật bá»™ " +"biên soạn không?" + +#: ../calendar/gui/dialogs/changed-comp.c:103 +#, c-format +msgid "%s You have made no changes, update the editor?" +msgstr "%s Bạn chÆ°a thay đổi gì, còn cập nhật bá»™ biên soạn không?" + +#: ../calendar/gui/dialogs/comp-editor-page.c:464 +#, c-format +msgid "Validation error: %s" +msgstr "Lá»—i hợp lệ hóa: %s" + +#: ../calendar/gui/dialogs/comp-editor-util.c:187 ../calendar/gui/print.c:2270 +#: ../calendar/gui/print.c:2261 ogginfo/ogginfo2.c:365 +msgid " to " +msgstr " tá»›i " + +#: ../calendar/gui/dialogs/comp-editor-util.c:191 ../calendar/gui/print.c:2274 +#: ../calendar/gui/print.c:2265 +msgid " (Completed " +msgstr " (Äã hoàn tất " + +#: ../calendar/gui/dialogs/comp-editor-util.c:193 ../calendar/gui/print.c:2276 +#: ../calendar/gui/print.c:2267 +msgid "Completed " +msgstr "Äã hoàn tất " + +#: ../calendar/gui/dialogs/comp-editor-util.c:198 ../calendar/gui/print.c:2281 +#: ../calendar/gui/print.c:2272 +msgid " (Due " +msgstr " (Äến hạn " + +#: ../calendar/gui/dialogs/comp-editor-util.c:200 ../calendar/gui/print.c:2283 +#: ../calendar/gui/print.c:2274 +msgid "Due " +msgstr "Äến hạn " + +#: ../calendar/gui/dialogs/comp-editor.c:195 ../composer/e-msg-composer.c:2825 +#: ../calendar/gui/dialogs/comp-editor.c:190 ../composer/e-msg-composer.c:2663 +#, c-format +msgid "Attached message - %s" +msgstr "ThÆ° đính kèm - %s" + +#: ../composer/e-msg-composer.c:2845 +#, c-format +msgid "Attached message" +msgid_plural "%d attached messages" +msgstr[0] "%d thÆ° đính kèm" + +#: ../plug-ins/common/iwarp.c:1110 +msgid "_Move" +msgstr "_Chuyển" + +#: ../mail/em-folder-tree.c:977 ../mail/message-list.c:1711 +msgid "Cancel _Drag" +msgstr "Hủy bá» _kéo" + +#: ../calendar/gui/dialogs/comp-editor.c:782 +#: ../calendar/gui/dialogs/comp-editor.c:771 +msgid "Could not update object" +msgstr "Không thể cập nhật đối tượng" + +#: ../calendar/gui/dialogs/comp-editor.c:971 ../composer/e-msg-composer.c:2317 +#, c-format +msgid "%d Attachment" +msgid_plural "%d Attachment" +msgstr[0] "%d đính kèm" + +#: ../calendar/gui/dialogs/comp-editor.c:958 +msgid "Hide Attachment _Bar" +msgstr "Ẩn thanh đính _kèm" + +#: ../calendar/gui/dialogs/comp-editor.c:1401 +msgid "Show Attachment _Bar" +msgstr "Hiện thanh đính _kèm" + +#: ../app/actions/layers-actions.c:56 ../src/lib/FeedListView.py:116 +msgid "_Properties" +msgstr "Th_uá»™c tính" + +#: ../composer/e-msg-composer.c:3401 +msgid "_Add attachment..." +msgstr "_Thêm đính kèm..." + +#: ../calendar/gui/dialogs/comp-editor.c:1425 +msgid "Attachment Button: Press space key to toggle attachment bar" +msgstr "Nút đính kèm: bấm phím dài để bật/tắt thanh đính kèm" + +#: ../calendar/gui/dialogs/comp-editor.c:1899 +msgid "Edit Appointment" +msgstr "Sá»­a đổi cuá»™c hẹn" + +#: ../calendar/gui/dialogs/comp-editor.c:1905 +#, c-format +msgid "Meeting - %s" +msgstr "Cuá»™c há»p - %s" + +#: ../calendar/gui/dialogs/comp-editor.c:1907 +#, c-format +msgid "Appointment - %s" +msgstr "Cuá»™c hẹn - %s" + +#: ../calendar/gui/dialogs/comp-editor.c:1911 +#, c-format +msgid "Assigned Task - %s" +msgstr "Tác vụ đã gán - %s" + +#: ../calendar/gui/dialogs/comp-editor.c:1913 +#, c-format +msgid "Task - %s" +msgstr "Tác vụ - %s" + +#: ../calendar/gui/dialogs/comp-editor.c:1916 +#, c-format +msgid "Journal entry - %s" +msgstr "Mục nhật ký - %s" + +#: ../calendar/gui/dialogs/comp-editor.c:1926 +msgid "No summary" +msgstr "Không có tóm tắt" + +#: ../mail/em-utils.c:481 ../widgets/misc/e-attachment-bar.c:340 +msgid "attachment" +msgstr "đính kèm" + +#: ../calendar/gui/dialogs/comp-editor.c:2593 +msgid "Changes made to this item may be discarded if an update arrives" +msgstr "Thay đổi trên mục này có thể bị hủy nếu bản cập nhật đến vào." + +#: ../calendar/gui/dialogs/comp-editor.c:2782 +#: ../calendar/gui/dialogs/comp-editor.c:2617 +msgid "Unable to use current version!" +msgstr "• Không dùng được phiên bản hiện thá»i. •" + +#: ../calendar/gui/dialogs/copy-source-dialog.c:61 +msgid "Could not open source" +msgstr "Không thể mở nguồn." + +#: ../calendar/gui/dialogs/copy-source-dialog.c:69 +msgid "Could not open destination" +msgstr "Không thể mở đích." + +#: ../calendar/gui/dialogs/copy-source-dialog.c:78 +msgid "Destination is read only" +msgstr "Äích chỉ cho phép Ä‘á»c thôi." + +#: ../calendar/gui/dialogs/delete-error.c:54 +msgid "The event could not be deleted due to a corba error" +msgstr "Không thể xoá bá» sá»± kiện này vi găp lá»—i kiểu CORBA." + +#: ../calendar/gui/dialogs/delete-error.c:57 +msgid "The task could not be deleted due to a corba error" +msgstr "Không thể xoá bá» tác vụ này vi găp lá»—i kiểu CORBA." + +#: ../calendar/gui/dialogs/delete-error.c:60 +msgid "The journal entry could not be deleted due to a corba error" +msgstr "Không thể xoá bá» mục nhật ký này vi găp lá»—i kiểu CORBA." + +#: ../calendar/gui/dialogs/delete-error.c:63 +msgid "The item could not be deleted due to a corba error" +msgstr "Không thể xoá bá» mục này vi găp lá»—i kiểu CORBA." + +#: ../calendar/gui/dialogs/delete-error.c:70 +msgid "The event could not be deleted because permission was denied" +msgstr "Không thể xoá bá» sá»± kiện vì không đủ quyá»n." + +#: ../calendar/gui/dialogs/delete-error.c:73 +msgid "The task could not be deleted because permission was denied" +msgstr "Không thể xoá bá» tác vụ vì không đủ quyá»n." + +#: ../calendar/gui/dialogs/delete-error.c:76 +msgid "The journal entry could not be deleted because permission was denied" +msgstr "Không thể xoá bá» mục nhật ký vì không đủ quyá»n." + +#: ../calendar/gui/dialogs/delete-error.c:79 +msgid "The item could not be deleted because permission was denied" +msgstr "Không thể xoá bá» mục vì không đủ quyá»n." + +#: ../calendar/gui/dialogs/delete-error.c:86 +msgid "The event could not be deleted due to an error" +msgstr "Không thể xoá bá» sá»± kiện vì gặp lá»—i." + +#: ../calendar/gui/dialogs/delete-error.c:89 +msgid "The task could not be deleted due to an error" +msgstr "Không thể xoá bá» tác vụ vì gặp lá»—i." + +#: ../calendar/gui/dialogs/delete-error.c:92 +msgid "The journal entry could not be deleted due to an error" +msgstr "Không thể xoá bá» mục nhật ký vì gặp lá»—i." + +#: ../calendar/gui/dialogs/delete-error.c:95 +msgid "The item could not be deleted due to an error" +msgstr "Không thể xoá bá» mục vì gặp lá»—i." + +#: ../calendar/gui/dialogs/e-delegate-dialog.glade.h:1 +msgid "Contacts..." +msgstr "Liên lạc..." + +#: ../storage/exchange-delegates.c:419 +msgid "Delegate To:" +msgstr "Ủy nhiệm cho:" + +#: ../calendar/gui/dialogs/e-delegate-dialog.glade.h:3 +msgid "Enter Delegate" +msgstr "Nhập ngÆ°á»i ủy nhiệm" + +#: ../calendar/gui/dialogs/event-editor.c:531 +msgid "Appoint_ment" +msgstr "Cuá»™c _hẹn" + +#: ../gncal/calendar-editor.glade.h:23 +msgid "Recurrence" +msgstr "Äịnh kỳ" + +#: ../calendar/gui/dialogs/event-page.c:876 +#: ../calendar/gui/dialogs/task-page.c:444 +msgid "Or_ganizer" +msgstr "_Bá»™ tổ chức:" + +#: ../calendar/gui/dialogs/event-page.c:914 +msgid "_Delegatees" +msgstr "NgÆ°á»i được ủ_y nhiệm" + +#: ../calendar/gui/dialogs/event-page.c:916 +msgid "Atte_ndees" +msgstr "NgÆ°á»i _dá»±" + +#: ../calendar/gui/dialogs/event-page.c:1073 +#: ../calendar/gui/dialogs/event-page.c:729 +msgid "Event with no start date" +msgstr "Sá»± kiện không có ngày bắt đầu" + +#: ../calendar/gui/dialogs/event-page.c:1076 +#: ../calendar/gui/dialogs/event-page.c:732 +msgid "Event with no end date" +msgstr "Sá»± kiện không có ngày kết thúc" + +#: ../calendar/gui/dialogs/task-page.c:556 +msgid "Start date is wrong" +msgstr "Ngày bắt đầu sai" + +#: ../calendar/gui/dialogs/event-page.c:1252 +#: ../calendar/gui/dialogs/event-page.c:908 +msgid "End date is wrong" +msgstr "Ngày kết thúc sai" + +#: ../calendar/gui/dialogs/event-page.c:1275 +#: ../calendar/gui/dialogs/event-page.c:931 +msgid "Start time is wrong" +msgstr "Thá»i Ä‘iểm đầu sai" + +#: ../calendar/gui/dialogs/event-page.c:1282 +#: ../calendar/gui/dialogs/event-page.c:938 +msgid "End time is wrong" +msgstr "Thá»i Ä‘iểm kết thúc sai" + +#: ../calendar/gui/dialogs/meeting-page.c:463 +msgid "The organizer selected no longer has an account." +msgstr "Bá»™ tổ chức được chá»n không còn có tài khoản." + +#: ../calendar/gui/dialogs/meeting-page.c:469 +msgid "An organizer is required." +msgstr "Cần má»™t bá»™ tổ chức." + +#: ../calendar/gui/dialogs/meeting-page.c:484 +msgid "At least one attendee is required." +msgstr "Cần ít nhất má»™t ngÆ°á»i dá»±." + +#: ../calendar/gui/dialogs/task-page.c:1184 +msgid "_Add " +msgstr "Th_êm " + +#: ../calendar/gui/dialogs/event-page.c:2553 +#: ../calendar/gui/dialogs/event-page.c:1684 +#, c-format +msgid "Unable to open the calendar '%s'." +msgstr "Không thể mở lịch « %s »." + +#: ../calendar/gui/dialogs/event-page.c:2808 +#: ../calendar/gui/dialogs/event-page.c:1890 +#, c-format +msgid "%d day before appointment" +msgid_plural "%d day before appointment" +msgstr[0] "%d ngày trÆ°á»›c cuá»™c hẹn" + +#: ../calendar/gui/dialogs/event-page.c:2816 +#: ../calendar/gui/dialogs/event-page.c:1898 +#, c-format +msgid "%d hour before appointment" +msgid_plural "%d hour before appointment" +msgstr[0] "%d giá» trÆ°á»›c cuá»™c hẹn" + +#: ../calendar/gui/dialogs/event-page.c:2824 +#: ../calendar/gui/dialogs/event-page.c:1906 +#, c-format +msgid "%d minute before appointement" +msgid_plural "%d minute before appointement" +msgstr[0] "%d phút trÆ°á»›c cuá»™c hẹn" + +#: ../calendar/gui/dialogs/event-page.glade.h:1 +#: ../calendar/gui/dialogs/event-page.glade.h:3 +msgid "1 hour before appointment" +msgstr "1 giá» trÆ°á»›c cuá»™c hẹn" + +#: ../calendar/gui/dialogs/event-page.glade.h:2 +#: ../calendar/gui/dialogs/event-page.glade.h:4 +msgid "15 minutes before appointment" +msgstr "15 phút trÆ°á»›c cuá»™c hẹn" + +#: ../calendar/gui/dialogs/event-page.glade.h:3 +msgid "1day before appointment" +msgstr "1 ngày trÆ°á»›c cuá»™c hẹn" + +#: ../calendar/gui/dialogs/event-page.glade.h:5 +#: ../calendar/gui/dialogs/task-page.glade.h:1 +msgid "Atte_ndees..." +msgstr "NgÆ°á»i _dá»±..." + +#: ../calendar/gui/dialogs/event-page.glade.h:6 +msgid "C_ustomize" +msgstr "Tù_y chỉnh" + +#: ../calendar/gui/dialogs/task-page.glade.h:5 +msgid "Ca_tegories..." +msgstr "_Loại.." + +#: ../capplets/about-me/gnome-about-me.glade.h:18 +msgid "Cale_ndar:" +msgstr "_Lịch:" + +#: ../calendar/gui/dialogs/event-page.glade.h:10 +#: ../calendar/gui/dialogs/event-page.glade.h:15 +msgid "Event Description" +msgstr "Mô tả sá»± kiện" + +#: ../calendar/gui/dialogs/event-page.glade.h:11 +#: ../calendar/gui/dialogs/event-page.glade.h:16 +msgid "Locat_ion:" +msgstr "_Äịa Ä‘iểm:" + +#: ../calendar/gui/dialogs/event-page.glade.h:12 +#: ../calendar/gui/dialogs/meeting-page.glade.h:5 +msgid "Or_ganizer:" +msgstr "_Bá»™ tổ chức:" + +#: ../calendar/gui/dialogs/task-page.glade.h:13 +msgid "Su_mmary:" +msgstr "Tó_m tắt:" + +#: ../data/glade/new-property.glade.h:3 ../plug-ins/metadata/interface.c:301 +#: ../src/drivel.glade.h:67 ../ui/muds.glade.h:49 ../src/gtkfunc.c:619 +msgid "_Description:" +msgstr "_Mô tả:" + +#: ../calendar/gui/dialogs/event-page.glade.h:16 +msgid "_Set alarm\t" +msgstr "_Lặp lại báo Ä‘á»™ng\t" + +#: ../calendar/gui/dialogs/event-page.glade.h:17 +msgid "_Time:" +msgstr "_Giá» :" + +#: ../new.php:28 +msgid "for" +msgstr "cho" + +#: ../calendar/gui/dialogs/event-page.glade.h:21 +#: ../calendar/gui/dialogs/recurrence-page.glade.h:11 +msgid "until" +msgstr "tá»›i khi" + +#: ../calendar/gui/dialogs/meeting-page.c:306 +#: ../calendar/gui/dialogs/meeting-page.c:303 +msgid "Dele_gatees" +msgstr "NgÆ°á»i được ủ_y nhiệm" + +#: ../trashapplet/trashapplet.glade.h:1 +msgid "From:" +msgstr "Từ :" + +#: ../calendar/gui/e-meeting-list-view.c:358 +msgid "Attendee" +msgstr "NgÆ°á»i dá»±" + +#: ../calendar/gui/dialogs/meeting-page.etspec.h:2 +#: ../calendar/gui/e-meeting-time-sel.etspec.h:2 +msgid "Click here to add an attendee" +msgstr "Nhấn vào đây để thêm ngÆ°á»i dá»±" + +#: ../calendar/gui/dialogs/meeting-page.etspec.h:3 +#: ../calendar/gui/e-meeting-time-sel.etspec.h:3 +msgid "Common Name" +msgstr "Tên chung" + +#: ../calendar/gui/dialogs/meeting-page.etspec.h:4 +#: ../calendar/gui/e-meeting-time-sel.etspec.h:4 +msgid "Delegated From" +msgstr "Äược ủy nhiệm từ" + +#: ../calendar/gui/dialogs/meeting-page.etspec.h:5 +#: ../calendar/gui/e-meeting-time-sel.etspec.h:5 +msgid "Delegated To" +msgstr "Äã ủy nhiệm cho" + +#: ../providers/ibmdb2/gda-ibmdb2-provider.c:864 +msgid "Language" +msgstr "Ngôn ngữ" + +#: ../gnome-netinfo/netstat.c:687 ../src/lib/FeedPropertiesDialog.py:92 +msgid "Member" +msgstr "Thành viên" + +#: ../calendar/gui/e-meeting-list-view.c:386 +msgid "RSVP" +msgstr "Trả lá»i trÆ°á»›c" + +#. #-#-#-#-# gpsdrive-2.08pre6.vi.po (gpsdrive-2.08pre6) #-#-#-#-# +#. gdk_window_lower((GdkWindow *)menuwin2); +#: ../ui/prefs.glade.h:35 ../src/session.c:2024 ../pan/task-manager.c:708 +#: ../storage/sunone-invitation-list.c:534 src/gpsdrive.c:11766 +#: src/gpsdrive.c:11809 ../mimedir/mimedir-vcomponent.c:301 +msgid "Status" +msgstr "Trạng thái" + +#: ../calendar/gui/dialogs/meeting-page.glade.h:1 +msgid "Att_endees" +msgstr "NgÆ°á»i _dá»±" + +#: ../calendar/gui/dialogs/meeting-page.glade.h:2 +msgid "C_hange Organizer" +msgstr "Äổ_i bá»™ tổ chức" + +#: ../calendar/gui/dialogs/meeting-page.glade.h:3 +msgid "Co_ntacts..." +msgstr "_Liên lạc..." + +#: ../calendar/gui/dialogs/meeting-page.glade.h:6 +#: ../storage/sunone-invitation-list.c:512 +msgid "Organizer" +msgstr "Bá»™ tổ chức:" + +#: ../calendar/gui/dialogs/meeting-page.glade.h:7 +#: ../calendar/gui/e-itip-control.glade.h:9 +msgid "Organizer:" +msgstr "Tổ chức:" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. One textfield per split that should help you remember what this split was about. +#: ../calendar/gui/dialogs/memo-editor.c:130 ../calendar/gui/print.c:2311 +#: prefs_gui.c:374 +msgid "Memo" +msgstr "Ghi nhá»›" + +#: ../calendar/gui/dialogs/memo-page.c:490 +#, c-format +msgid "Unable to open memos in '%s'." +msgstr "Không thể mở ghi nhá»› trong « %s »." + +#: ../calendar/gui/dialogs/memo-page.glade.h:2 +msgid "Basics" +msgstr "CÆ¡ bản" + +#: ../calendar/gui/dialogs/task-page.glade.h:7 +msgid "Classi_fication:" +msgstr "_Phân loại:" + +#: ../providers/evolution/gda-calendar-model.c:361 +msgid "Confidential" +msgstr "Tin tưởng" + +#: ../src/dialogs.c:627 ../src/main.c:344 ../src/utils.c:574 +#: address_gui.c:2755 datebook_gui.c:4461 memo_gui.c:1609 todo_gui.c:2271 +#: po/silky-channel.glade.h:14 +msgid "Private" +msgstr "Riêng" + +#: ../src/dialogs.c:622 ../src/main.c:340 ../src/utils.c:580 +msgid "Public" +msgstr "Công" + +#: ../data/glade/resource-input-dialog.glade.h:5 +msgid "_Group:" +msgstr "_Nhóm:" + +#: ../calendar/gui/dialogs/memo-page.glade.h:9 +msgid "_Memo Content:" +msgstr "Ná»™i dung ghi _nhá»› :" + +#: ../calendar/gui/dialogs/new-calendar.glade.h:2 +msgid "Calendar options" +msgstr "Tùy chá»n lịch" + +#: ../calendar/gui/dialogs/new-calendar.glade.h:3 +msgid "Add New Calendar" +msgstr "Thêm lịch má»›i" + +#: ../calendar/gui/dialogs/new-calendar.glade.h:4 +msgid "Calendar Group" +msgstr "Nhóm lịch" + +#: ../calendar/gui/dialogs/new-calendar.glade.h:5 +msgid "Calendar Location" +msgstr "Äịa Ä‘iểm lịch" + +#: ../calendar/gui/dialogs/new-calendar.glade.h:6 +msgid "Calendar Name" +msgstr "Tên lịch" + +#: ../calendar/gui/dialogs/new-task-list.glade.h:2 +msgid "Task List Options" +msgstr "Tùy chá»n Danh sách Tác vụ" + +#: ../calendar/gui/dialogs/new-task-list.glade.h:3 +msgid "Add New Task List" +msgstr "Thêm Danh sách Tác vụ má»›i" + +#: ../calendar/gui/dialogs/new-task-list.glade.h:4 +msgid "Task List Group" +msgstr "Nhóm Danh sách Tác vụ" + +#: ../calendar/gui/dialogs/new-task-list.glade.h:5 +msgid "Task List Name" +msgstr "Tên Danh sách Tác vụ" + +#: ../calendar/gui/dialogs/recur-comp.c:52 +msgid "You are modifying a recurring event. What would you like to modify?" +msgstr "Bạn Ä‘ang sá»­a đổi sá»± kiện lặp, bạn muốn sá»­a đổi cái nào?" + +#: ../calendar/gui/dialogs/recur-comp.c:54 +msgid "You are delegating a recurring event. What would you like to delegate?" +msgstr "Bạn Ä‘ang ủy nhiệm sá»± kiện lặp, bạn muốn ủy nhiệm cái nào?" + +#: ../calendar/gui/dialogs/recur-comp.c:58 +msgid "You are modifying a recurring task. What would you like to modify?" +msgstr "Bạn Ä‘ang sá»­a đổi tác vụ lặp, bạn muốn sá»­a đổi cái nào?" + +#: ../calendar/gui/dialogs/recur-comp.c:62 +msgid "" +"You are modifying a recurring journal entry. What would you like to modify?" +msgstr "Bạn Ä‘ang sá»­a đổi mục nhật ký lặp, bạn muốn sá»­a đổi cái nào?" + +#: ../calendar/gui/dialogs/recur-comp.c:90 +msgid "This Instance Only" +msgstr "Chỉ lần này" + +#: ../calendar/gui/dialogs/recur-comp.c:94 +msgid "This and Prior Instances" +msgstr "Lần này và những lần trÆ°á»›c" + +#: ../calendar/gui/dialogs/recur-comp.c:100 +msgid "This and Future Instances" +msgstr "Lần này và những lần sau này" + +#: ../calendar/gui/dialogs/recur-comp.c:105 +msgid "All Instances" +msgstr "Má»i lần" + +#: ../calendar/gui/dialogs/recurrence-page.c:498 +#: ../calendar/gui/dialogs/recurrence-page.c:494 +msgid "This appointment contains recurrences that Evolution cannot edit." +msgstr "Cuá»™c hẹn chứa nhiá»u lần lặp lại mà Evolution không thể hiệu chỉnh." + +#: ../calendar/gui/dialogs/recurrence-page.c:819 +#: ../calendar/gui/dialogs/recurrence-page.c:815 +msgid "Recurrence date is invalid" +msgstr "Ngày lặp không hợp lệ" + +#: makeinfo/defun.c:509 makeinfo/defun.c:513 makeinfo/defun.c:517 +#: makeinfo/defun.c:551 makeinfo/defun.c:650 makeinfo/xml.c:2234 +#: libexif/canon/mnote-canon-entry.c:78 +#, fuzzy +msgid "on" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"trên\n" +"#-#-#-#-# libexif-0.6.13.vi.po (libexif-0.6.13) #-#-#-#-#\n" +"bật" + +#: ../calendar/gui/dialogs/recurrence-page.c:990 event-ui.c:991 +#: ../calendar/gui/dialogs/recurrence-page.c:986 +msgid "first" +msgstr "thứ nhắt" + +#: ../calendar/gui/dialogs/recurrence-page.c:992 event-ui.c:995 +#: ../calendar/gui/dialogs/recurrence-page.c:988 +msgid "third" +msgstr "thứ ba" + +#: ../calendar/gui/dialogs/recurrence-page.c:993 event-ui.c:997 +#: ../calendar/gui/dialogs/recurrence-page.c:989 +msgid "fourth" +msgstr "thứ tÆ°" + +#: ../calendar/gui/dialogs/recurrence-page.c:994 event-ui.c:1001 +#: ../calendar/gui/dialogs/recurrence-page.c:990 +msgid "last" +msgstr "cuối cùng" + +#: ../calendar/gui/dialogs/recurrence-page.c:1017 +#: ../calendar/gui/dialogs/recurrence-page.c:1013 +msgid "Other Date" +msgstr "Ngày khác" + +#: ../src/crontab.py:248 datebook_gui.c:1824 ../bin/ical-dump.c:87 +msgid "day" +msgstr "ngày" + +#: ../calendar/gui/dialogs/recurrence-page.c:1182 event-ui.c:983 +#: ../calendar/gui/dialogs/recurrence-page.c:1178 +msgid "on the" +msgstr "vào" + +#: ../gncal/calendar-editor.glade.h:40 +msgid "occurrences" +msgstr "lần" + +#: ../calendar/gui/dialogs/recurrence-page.c:2326 src/callerid.c:426 +#: ../calendar/gui/dialogs/recurrence-page.c:2323 +msgid "Date/Time" +msgstr "Ngày/Giá»" + +#: ../calendar/gui/dialogs/recurrence-page.glade.h:1 +#: ../gncal/calendar-editor.glade.h:4 +msgid "Exceptions" +msgstr "Ngoại lệ" + +#: ../resources/dialogexport.glade.h:3 +msgid "Preview" +msgstr "Xem thá»­" + +#: ../calendar/gui/dialogs/recurrence-page.glade.h:3 +msgid "Recurrence" +msgstr "Äịnh kỳ" + +#: ../calendar/gui/dialogs/recurrence-page.glade.h:4 event-ui.c:898 +#: event-ui.c:920 event-ui.c:963 event-ui.c:1018 +msgid "Every" +msgstr "Má»—i" + +#: ../calendar/gui/dialogs/recurrence-page.glade.h:5 +msgid "This appointment rec_urs" +msgstr "Cuá»™c hẹn này _lặp lại" + +#: ../calendar/gui/dialogs/recurrence-page.glade.h:9 event-ui.c:1053 +msgid "forever" +msgstr "mãi mãi" + +#: ../calendar/gui/dialogs/recurrence-page.glade.h:10 +msgid "month(s)" +msgstr "tháng" + +#: ../calendar/gui/dialogs/recurrence-page.glade.h:12 +msgid "week(s)" +msgstr "tuần" + +#: ../calendar/gui/dialogs/recurrence-page.glade.h:13 +msgid "year(s)" +msgstr "năm" + +#: ../calendar/gui/dialogs/task-details-page.c:430 +msgid "Completed date is wrong" +msgstr "Ngày hoàn tất sai" + +#: ../calendar/gui/dialogs/task-details-page.c:522 +msgid "Web Page" +msgstr "Trang Mạng" + +#: ../calendar/gui/dialogs/task-details-page.glade.h:1 +msgid "Miscellaneous" +msgstr "Linh tinh\t" + +#: ../calendar/gui/dialogs/task-details-page.glade.h:2 +msgid "Status" +msgstr "Trạng thái" + +#: ../calendar/gui/print.c:2372 ../plugins/save-calendar/csv-format.c:390 +#: todo-ui.c:299 ../todo-ui.c:156 todo_gui.c:2223 +msgid "Completed" +msgstr "Hoàn tất" + +#: ../todo-ui.c:163 web/template/resources_edit_main_anon.tpl:92 +#: web/template/resources_edit_main.tpl:91 ../pan/filter-edit-ui.c:855 +#: libexif/pentax/mnote-pentax-entry.c:99 +#: libexif/pentax/mnote-pentax-entry.c:104 +msgid "High" +msgstr "Cao" + +#: ../calendar/gui/e-calendar-table.c:454 ../calendar/gui/print.c:2358 +msgid "In Progress" +msgstr "Äang tiến hành" + +#: ../todo-ui.c:165 web/template/resources_edit_main_anon.tpl:90 +#: web/template/resources_edit_main.tpl:89 ../pan/filter-edit-ui.c:858 +#: libexif/pentax/mnote-pentax-entry.c:98 +#: libexif/pentax/mnote-pentax-entry.c:103 +msgid "Low" +msgstr "Thấp" + +#: ../src/gwget_data.c:231 +msgid "Not Started" +msgstr "ChÆ°a bắt đầu" + +#: ../calendar/gui/dialogs/task-details-page.glade.h:10 +msgid "P_ercent complete:" +msgstr "_Phần trăm hoàn tất:" + +#: ../calendar/gui/dialogs/task-details-page.glade.h:11 +msgid "Stat_us:" +msgstr "Trạn_g thái:" + +#: ../app/widgets/gimppaletteeditor.c:695 ../objects/UML/class_dialog.c:2217 +#: ../objects/UML/umlparameter.c:34 libexif/exif-format.c:44 +msgid "Undefined" +msgstr "ChÆ°a định nghÄ©a" + +#: ../calendar/gui/dialogs/task-details-page.glade.h:13 +#: ../calendar/gui/dialogs/task-details-page.glade.h:12 +msgid "_Date completed:" +msgstr "Ngày hoàn _tất:" + +#: ../data/glade/task-dialog.glade.h:17 ../gncal/todo-dialog.c:481 +msgid "_Priority:" +msgstr "_Ưu tiên:" + +#: ../calendar/gui/dialogs/task-details-page.glade.h:15 +msgid "_Web Page:" +msgstr "Trang _Mạng:" + +#: ../calendar/gui/tasks-component.c:1165 ../src/gnome-schedule.glade.h:70 +msgid "_Task" +msgstr "_Tác vụ" + +#: ../calendar/gui/dialogs/task-editor.c:423 +msgid "Task Details" +msgstr "Chi tiết tác vụ" + +#: ../calendar/gui/dialogs/task-page.c:437 +msgid "_Group" +msgstr "_Nhóm" + +#: ../calendar/gui/dialogs/task-page.c:821 +#: ../calendar/gui/dialogs/task-page.c:529 +msgid "Due date is wrong" +msgstr "Ngày đến hạn sai" + +#: ../calendar/gui/dialogs/task-page.c:1614 +#: ../calendar/gui/dialogs/task-page.c:873 +#, c-format +msgid "Unable to open tasks in '%s'." +msgstr "Không thể mở tác vụ trong « %s »." + +#: ../calendar/gui/dialogs/task-page.glade.h:2 +msgid "Categor_ies..." +msgstr "_Loại.." + +#: ../calendar/gui/dialogs/task-page.glade.h:3 +#: ../calendar/gui/dialogs/task-page.glade.h:9 +msgid "D_escription:" +msgstr "_Mô tả:" + +#: ../calendar/gui/dialogs/task-page.glade.h:4 +msgid "Or_ganiser:" +msgstr "_Bá»™ tổ chức:" + +#: ../calendar/gui/dialogs/task-page.glade.h:5 +#: ../calendar/gui/dialogs/task-page.glade.h:12 +msgid "Sta_rt date:" +msgstr "Ngày _bắt đầu :" + +#: ../calendar/gui/dialogs/task-page.glade.h:7 +#: ../interfaces/time.glade.in.h:11 +msgid "Time zone:" +msgstr "Múi giá» :" + +#: ../calendar/gui/dialogs/task-page.glade.h:8 +#: ../calendar/gui/dialogs/task-page.glade.h:15 +msgid "_Due date:" +msgstr "N_gày đến hạn:" + +#: ../calendar/gui/dialogs/url-editor-dialog.glade.h:3 +msgid "Free/Busy C_alendars" +msgstr "_Lịch Rảnh/Bận" + +#: ../calendar/gui/dialogs/url-editor-dialog.glade.h:4 +msgid "Publishing Frequency" +msgstr "Tần số Xuất bản" + +#: ../calendar/gui/dialogs/url-editor-dialog.glade.h:5 +msgid "Publishing _Location" +msgstr "Äị_a Ä‘iểm Xuất bản" + +#: ../calendar/gui/dialogs/url-editor-dialog.glade.h:6 +msgid "Free/Busy Publishing Settings" +msgstr "Thiết lập Xuất bản Rảnh/Bận" + +#: ../calendar/gui/dialogs/url-editor-dialog.glade.h:7 +msgid "_Daily" +msgstr "_Hàng ngày" + +#: ../src/gnome-schedule.glade.h:67 +#, fuzzy +msgid "_Manual" +msgstr "" +"#-#-#-#-# compendium4ALL.po (atomix HEAD) #-#-#-#-#\n" +"_Thủ công\n" +"#-#-#-#-# vi.po (gnome-schedule Gnome HEAD) #-#-#-#-#\n" +"_Sổ tay" + +#: ../calendar/gui/dialogs/url-editor-dialog.glade.h:12 +msgid "_Weekly" +msgstr "_Hàng tuần" + +#: ../calendar/gui/e-alarm-list.c:395 ../src/gnome-torrent.in:241 +#, c-format +msgid "%d day" +msgid_plural "%d day" +msgstr[0] "%d ngày" + +#: ../calendar/gui/e-alarm-list.c:400 +#, c-format +msgid "%d week" +msgid_plural "%d week" +msgstr[0] "%d tuần" + +#: ../calendar/gui/e-alarm-list.c:462 +msgid "Unknown action to be performed" +msgstr "Không biết hành Ä‘á»™ng cần thá»±c hiện." + +#. Translator: The first %s refers to the base, which would be actions like +#. * "Play a Sound". Second %s refers to the duration string e.g:"15 minutes" +#: ../calendar/gui/e-alarm-list.c:476 +#, c-format +msgid "%s %s before the start of the appointment" +msgstr "%s %s trÆ°á»›c khi bắt đầu cuá»™c hẹn" + +#. Translator: The first %s refers to the base, which would be actions like +#. * "Play a Sound". Second %s refers to the duration string e.g:"15 minutes" +#: ../calendar/gui/e-alarm-list.c:481 +#, c-format +msgid "%s %s after the start of the appointment" +msgstr "%s %s sau khi bắt đầu cuá»™c hẹn" + +#. Translator: The %s refers to the base, which would be actions like +#. * "Play a sound" +#: ../calendar/gui/e-alarm-list.c:488 +#, c-format +msgid "%s at the start of the appointment" +msgstr "%s lúc bắt đầu cuá»™c hẹn" + +#: ../calendar/gui/e-alarm-list.c:497 +#, c-format +msgid "%s %s before the end of the appointment" +msgstr "%s %s trÆ°á»›c khi kết thúc cuá»™c hẹn" + +#: ../calendar/gui/e-alarm-list.c:500 +#, c-format +msgid "%s %s after the end of the appointment" +msgstr "%s %s sau khi kết thúc cuá»™c hẹn" + +#: ../calendar/gui/e-alarm-list.c:505 +#, c-format +msgid "%s at the end of the appointment" +msgstr "%s lúc kết thúc cuá»™c hẹn" + +#: ../calendar/gui/e-alarm-list.c:527 +#, c-format +msgid "%s at %s" +msgstr "%s lúc %s" + +#: ../calendar/gui/e-alarm-list.c:533 +#, c-format +msgid "%s for an unknown trigger type" +msgstr "%s cho loại gây ra lạ" + +#: ../mail/em-folder-view.c:2703 +#, c-format +msgid "Click to open %s" +msgstr "Nhấn để mở %s" + +#: ../calendar/gui/e-cal-component-memo-preview.c:201 +msgid "Memo:" +msgstr "Ghi nhá»› :" + +#: ../calendar/gui/e-cal-component-preview.c:299 +msgid "Web Page:" +msgstr "Trang Mạng:" + +#: makeinfo/html.c:207 ../src/utils.c:597 ../src/utils.c:644 +msgid "Untitled" +msgstr "Không tên" + +#: ../calendar/gui/e-itip-control.c:1108 ../todo-ui.c:272 +msgid "Summary:" +msgstr "Tóm tắt:" + +#: ../calendar/gui/e-cal-component-preview.c:204 +msgid "Start Date:" +msgstr "Ngày bắt đầu :" + +#: ../calendar/gui/e-cal-component-preview.c:217 +#: ../calendar/gui/e-cal-component-preview.c:215 +msgid "Due Date:" +msgstr "Ngày đến hạn:" + +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. Status +#: ../src/Dialog_Partition_Info.cc:184 ../todo-ui.c:269 +#: ../glade/gnome/gnomeappbar.c:71 ../ui/user_info.glade.h:62 +#: ../storage/sunone-itip-view.c:752 +msgid "Status:" +msgstr "Trạng thái:" + +#: ../todo-ui.c:267 ../lib/gtkorphan_ops.pm:190 +msgid "Priority:" +msgstr "Ưu tiên:" + +#: ../calendar/gui/e-cal-list-view.etspec.h:2 +msgid "End Date" +msgstr "Ngày kết thúc" + +#: ../calendar/gui/e-cal-list-view.etspec.h:4 +msgid "Start Date" +msgstr "Ngày bắt đầu" + +#: ../gncal/gnomecal-prefs.c:1311 ../gncal/todo-list.c:1051 +#: ../glom/data_structure/layout/report_parts/layoutitem_summary.cc:58 +#: ../storage/sunone-invitation-list.c:503 ../mimedir/mimedir-vcomponent.c:309 +msgid "Summary" +msgstr "Tóm tắt" + +#: ../calendar/gui/e-cal-model-calendar.c:183 +#: ../calendar/gui/e-calendar-table.c:440 ../src/interface.c:291 +#: ../objects/standard/box.c:137 ../objects/standard/ellipse.c:131 +#: ../widgets/gtk+.xml.in.h:80 +#, fuzzy +msgid "Free" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Rảnh\n" +"#-#-#-#-# glade3vi..po (glade3 HEAD) #-#-#-#-#\n" +"Tá»± do" + +#: ../calendar/gui/e-meeting-time-sel.c:412 po/silky.glade.h:65 +msgid "Busy" +msgstr "Bận" + +#: ../calendar/gui/e-cal-model-tasks.c:607 +msgid "" +"The geographical position must be entered in the format: \n" +"\n" +"45.436845,125.862501" +msgstr "" +"Vị trí địa lý phải được nhập theo dạng thức:\n" +"\n" +"10.783114,106.699804 (Sài Gòn)" + +#: ../testing/html.c:293 ../src/glade-editor-property.c:1508 +#: ../src/glade-editor-property.c:1524 ../src/widgets/togglebutton.cc:45 +#: address_gui.c:698 datebook_gui.c:670 memo_gui.c:527 todo_gui.c:662 +#: utils.c:1274 ../hwconf.c:362 ../hwconf.c:501 libexif/exif-entry.c:487 +#: libexif/olympus/mnote-olympus-entry.c:135 +#: libexif/olympus/mnote-olympus-entry.c:159 app/gui-subs.c:492 +msgid "Yes" +msgstr "Có" + +#: ../testing/html.c:293 dir.c:1030 dir.c:1051 +#: ../src/glade-editor-property.c:1508 ../src/glade-editor-property.c:1524 +#: ../src/glade-editor-property.c:1539 ../src/widgets/togglebutton.cc:47 +#: address_gui.c:698 datebook_gui.c:670 memo_gui.c:527 todo_gui.c:662 +#: utils.c:1274 ../hwconf.c:362 ../hwconf.c:501 +#: libexif/olympus/mnote-olympus-entry.c:134 +#: libexif/olympus/mnote-olympus-entry.c:158 app/gui-subs.c:500 +msgid "No" +msgstr "Không" + +#: ../calendar/gui/e-cal-model.c:922 ../calendar/gui/e-cal-model.c:920 +msgid "Recurring" +msgstr "Lặp lại" + +#: ../calendar/gui/e-cal-model.c:924 ../calendar/gui/e-cal-model.c:922 +#: ../src/planner-task-dialog.c:2557 +msgid "Assigned" +msgstr "Äã gán" + +#: ../Pyblio/GnomeUI/Document.py:726 +msgid "Save As..." +msgstr "LÆ°u dạng..." + +#: ../calendar/gui/e-cal-popup.c:187 ../mail/em-format-html-display.c:1842 +#: ../mail/em-format-html-display.c:1791 +msgid "Select folder to save selected attachments..." +msgstr "Chá»n thÆ° mục nÆ¡i cần lÆ°u những đính kèm đã chá»n..." + +#: ../mail/em-popup.c:424 +#, c-format +msgid "untitled_image.%s" +msgstr "untitled_image.%s" + +#: ../extensions/page-info/page-info-dialog.c:1056 +#: ../extensions/page-info/page-info-dialog.c:1408 ../src/Actions.cs:55 +msgid "_Save As..." +msgstr "LÆ°u _dang..." + +#: ../calendar/gui/e-cal-popup.c:271 ../mail/em-popup.c:554 +#: ../mail/em-popup.c:565 ../mail/em-popup.c:542 ../mail/em-popup.c:553 +#: ../libnautilus-private/nautilus-dnd.c:655 ../src/f-spot.glade.h:133 +msgid "Set as _Background" +msgstr "Äặt làm _ná»n" + +#: ../calendar/gui/e-cal-popup.c:272 +msgid "_Save Selected" +msgstr "_LÆ°u các Ä‘iá»u chá»n" + +#: ../calendar/gui/e-cal-popup.c:394 ../mail/em-popup.c:774 +#: ../mail/em-popup.c:762 +#, c-format +msgid "Open in %s..." +msgstr "Mở bằng « %s »..." + +#: ../calendar/gui/e-calendar-table.c:410 +#: ../calendar/gui/e-calendar-table.c:402 ../src/gnome-netstatus.glade.h:4 +#, no-c-format +msgid "0%" +msgstr "0%" + +#: ../calendar/gui/e-calendar-table.c:411 +#: ../calendar/gui/e-calendar-table.c:403 +msgid "10%" +msgstr "10%" + +#: ../calendar/gui/e-calendar-table.c:412 +#: ../calendar/gui/e-calendar-table.c:404 +msgid "20%" +msgstr "20%" + +#: ../calendar/gui/e-calendar-table.c:413 +#: ../calendar/gui/e-calendar-table.c:405 +msgid "30%" +msgstr "30%" + +#: ../calendar/gui/e-calendar-table.c:414 +#: ../calendar/gui/e-calendar-table.c:406 +msgid "40%" +msgstr "40%" + +#: ../src/nautilus-file-management-properties.glade.h:20 +#, no-c-format +msgid "50%" +msgstr "50%" + +#: ../calendar/gui/e-calendar-table.c:416 +#: ../calendar/gui/e-calendar-table.c:408 +msgid "60%" +msgstr "60%" + +#: ../calendar/gui/e-calendar-table.c:417 +#: ../calendar/gui/e-calendar-table.c:409 +msgid "70%" +msgstr "70%" + +#: ../calendar/gui/e-calendar-table.c:418 +#: ../calendar/gui/e-calendar-table.c:410 +msgid "80%" +msgstr "80%" + +#: ../calendar/gui/e-calendar-table.c:419 +#: ../calendar/gui/e-calendar-table.c:411 +msgid "90%" +msgstr "90%" + +#: ../src/nautilus-file-management-properties.glade.h:8 +#, no-c-format +msgid "100%" +msgstr "100%" + +#: ../calendar/gui/e-calendar-table.c:527 +#: ../calendar/gui/e-calendar-table.c:514 +msgid "Task Table" +msgstr "Bảng tác vụ" + +#: ../calendar/gui/e-calendar-view.c:661 +msgid "Deleting selected objects" +msgstr "Äang xoá bá» các đối tượng đã chá»n..." + +#: ../calendar/gui/e-calendar-view.c:789 +msgid "Updating objects" +msgstr "Äang cập nhật các đối tượng..." + +#: ../calendar/gui/e-calendar-view.c:1105 ../composer/e-msg-composer.c:1242 +#: ../gedit/gedit-file.c:372 +msgid "Save as..." +msgstr "LÆ°u dạng..." + +#: ../calendar/gui/e-calendar-table.c:1177 ../calendar/gui/e-memo-table.c:855 +#: ../calendar/gui/e-calendar-table.c:1155 +msgid "Open _Web Page" +msgstr "Mở trang _Mạng" + +#: ../src/ghex-ui.xml.h:6 ui/galeon-bookmarks-editor-ui.xml.in.h:1 +#: app/menubar.c:449 app/menubar.c:460 app/menubar.c:516 app/menubar.c:547 +msgid "C_ut" +msgstr "Cắ_t" + +#: ../app/actions/edit-actions.c:101 app/menubar.c:453 app/menubar.c:464 +#: app/menubar.c:551 +msgid "_Paste" +msgstr "_Dán" + +#: ../calendar/gui/e-calendar-table.c:1189 ../ui/evolution-tasks.xml.h:22 +#: ../calendar/gui/e-calendar-table.c:1167 ../ui/evolution-tasks.xml.h:20 +msgid "_Assign Task" +msgstr "_Gán tác vụ" + +#: ../ui/evolution-tasks.xml.h:24 +msgid "_Forward as iCalendar" +msgstr "_Chuyển dạng iCalendar" + +#: ../calendar/gui/e-calendar-table.c:1191 +#: ../calendar/gui/e-calendar-table.c:1169 +msgid "_Mark as Complete" +msgstr "Äánh dấu _hoàn tất" + +#: ../calendar/gui/e-calendar-table.c:1192 +#: ../calendar/gui/e-calendar-table.c:1170 +msgid "_Mark Selected Tasks as Complete" +msgstr "Äánh _dấu các tác vụ đã chá»n là hoàn tất" + +#: ../calendar/gui/e-calendar-table.c:1197 +#: ../calendar/gui/e-calendar-table.c:1175 +msgid "_Delete Selected Tasks" +msgstr "_Xoá bá» các tác vụ đã chá»n" + +#: ../calendar/gui/e-calendar-table.c:1326 +msgid "Click to add a task" +msgstr "Nhấn để thêm tác vụ" + +#: ../calendar/gui/e-calendar-table.etspec.h:2 +#, no-c-format +msgid "% Complete" +msgstr "% hoàn tất" + +#: ../calendar/gui/e-calendar-table.etspec.h:5 ../mail/mail-send-recv.c:617 +#: ../iagno/properties.c:644 ../nautilus-cd-burner.c:1144 +msgid "Complete" +msgstr "Hoàn tất" + +#: ../calendar/gui/e-calendar-table.etspec.h:6 +msgid "Completion date" +msgstr "Ngày hoàn tất" + +#: ../calendar/gui/e-calendar-table.etspec.h:7 +#: ../providers/evolution/gda-calendar-model.c:64 +msgid "Due date" +msgstr "Ngày đến hạn" + +#: src/pkg_columnizer.cc:95 dselect/pkgtop.cc:287 ../gncal/todo-list.c:1073 +#: ../src/mlview-validator-window.cc:678 ../pan/save-ui.c:268 +#: ../mimedir/mimedir-vcomponent.c:290 schroot/sbuild-chroot.cc:389 +msgid "Priority" +msgstr "Ưu tiên" + +#: ../calendar/gui/e-calendar-table.etspec.h:9 +#: ../providers/evolution/gda-calendar-model.c:66 +msgid "Start date" +msgstr "Ngày bắt đầu" + +#: ../calendar/gui/e-calendar-table.etspec.h:12 +msgid "Task sort" +msgstr "Sắp xếp tác vụ" + +#: ../calendar/gui/e-calendar-view.c:1267 +#: ../calendar/gui/e-calendar-view.c:1231 +msgid "Moving items" +msgstr "Äang di chuyển mục..." + +#: ../calendar/gui/e-calendar-view.c:1269 +#: ../calendar/gui/e-calendar-view.c:1233 +msgid "Copying items" +msgstr "Äang sao chép mục..." + +#: ../calendar/gui/e-calendar-view.c:1533 +#: ../calendar/gui/e-calendar-view.c:1504 +msgid "New _Appointment..." +msgstr "Cuá»™c _hẹn má»›i..." + +#: ../calendar/gui/e-calendar-view.c:1534 +#: ../calendar/gui/e-calendar-view.c:1505 +msgid "New All Day _Event" +msgstr "Sá»± _kiện nguyên ngày má»›i" + +#: ../calendar/gui/e-calendar-view.c:1535 +msgid "New _Meeting" +msgstr "Cuá»™c _há»p má»›i" + +#: ../calendar/gui/e-calendar-view.c:1536 +msgid "New _Task" +msgstr "_Tác vụ má»›i" + +#. FIXME: hook in this somehow +#: ../calendar/gui/e-calendar-view.c:1546 +#: ../widgets/menus/gal-view-menus.c:291 +msgid "_Current View" +msgstr "_Khung xem hiện thá»i" + +#: ../calendar/gui/e-calendar-view.c:1548 +msgid "Select T_oday" +msgstr "Chá»n _hôm nay" + +#: ../calendar/gui/e-calendar-view.c:1549 +#: ../calendar/gui/e-calendar-view.c:1520 +msgid "_Select Date..." +msgstr "_Chá»n ngày..." + +#: ../calendar/gui/e-calendar-view.c:1565 +#: ../calendar/gui/e-calendar-view.c:1541 +msgid "Cop_y to Calendar..." +msgstr "_Chép vào lịch..." + +#: ../calendar/gui/e-calendar-view.c:1566 +#: ../calendar/gui/e-calendar-view.c:1542 +msgid "Mo_ve to Calendar..." +msgstr "_Chuyển vào lịch..." + +#: ../calendar/gui/e-calendar-view.c:1567 +#: ../calendar/gui/e-calendar-view.c:1543 +msgid "_Delegate Meeting..." +msgstr "Ủ_y nhiệm cuá»™c há»p..." + +#: ../calendar/gui/e-calendar-view.c:1568 +#: ../calendar/gui/e-calendar-view.c:1544 +msgid "_Schedule Meeting..." +msgstr "Lập lịch _cuá»™c há»p..." + +#: ../calendar/gui/e-calendar-view.c:1569 +#: ../calendar/gui/e-calendar-view.c:1545 +msgid "_Forward as iCalendar..." +msgstr "_Chuyển dạng iCalendar..." + +#: ../calendar/gui/e-calendar-view.c:1574 +#: ../calendar/gui/e-calendar-view.c:1550 +msgid "Make this Occurrence _Movable" +msgstr "Cho phép di chuyển _lần này" + +#: ../calendar/gui/e-calendar-view.c:1575 +#: ../calendar/gui/e-calendar-view.c:1551 +msgid "Delete this _Occurrence" +msgstr "_Xoá bá» lần này" + +#: ../calendar/gui/e-calendar-view.c:1576 +#: ../calendar/gui/e-calendar-view.c:1552 +msgid "Delete _All Occurrences" +msgstr "Xoá bá» _má»i lần" + +#. To Translators: It will display "Organiser: NameOfTheUser " +#: ../calendar/gui/e-calendar-view.c:1951 +#, c-format +msgid "Organizer: %s <%s>" +msgstr "Tổ chức: %s <%s>" + +#: ../calendar/gui/print.c:2328 +#, c-format +msgid "Location: %s" +msgstr "Äịa Ä‘iểm: %s" + +#. To Translators: It will display "Time: ActualStartDateAndTime (DurationOfTheMeeting)" +#: ../calendar/gui/e-calendar-view.c:1996 +#, c-format +msgid "Time: %s %s" +msgstr "Giá» : %s %s" + +#: ../libedataserver/e-time-utils.c:413 +msgid "%a %m/%d/%Y %H:%M:%S" +msgstr "%a %d/%m/%Y %H:%M:%S" + +#: ../libedataserver/e-time-utils.c:422 +msgid "%a %m/%d/%Y %I:%M:%S %p" +msgstr "%a %d/%m/%Y %I:%M:%S %p" + +#: ../calendar/gui/e-cell-date-edit-text.c:123 +#, c-format +msgid "" +"The date must be entered in the format: \n" +"\n" +"%s" +msgstr "" +"Ngày phải được nhập theo dạng thức: \n" +"\n" +"%s" + +#: ../calendar/gui/e-day-view-time-item.c:553 +#, c-format +msgid "%02i minute divisions" +msgstr "lệch %02i phút" + +#: ../calendar/gui/e-day-view.c:1497 ../calendar/gui/print.c:1517 +msgid "%A %d %B" +msgstr "%A %d %B" + +#. String to use in 12-hour time format for times in the morning. +#: ../calendar/gui/e-day-view.c:748 ../calendar/gui/e-week-view.c:512 +#: ../calendar/gui/print.c:841 ../calendar/gui/e-day-view.c:750 +msgid "am" +msgstr "sáng" + +#. String to use in 12-hour time format for times in the afternoon. +#: ../calendar/gui/e-day-view.c:751 ../calendar/gui/e-week-view.c:515 +#: ../calendar/gui/print.c:843 ../calendar/gui/e-day-view.c:753 +msgid "pm" +msgstr "chiá»u/tối" + +#: ../calendar/gui/e-itip-control.c:762 ../calendar/gui/e-itip-control.c:761 +msgid "Yes. (Complex Recurrence)" +msgstr "Có. (Lặp lại phức tạp)" + +#: ../calendar/gui/e-itip-control.c:773 ../calendar/gui/e-itip-control.c:772 +#: ../src/crontabEditor.py:83 ../src/crontabEditorHelper.py:91 +#, c-format +msgid "Every day" +msgid_plural "Every %d days" +msgstr[0] "Má»—i %d ngày" + +#: ../calendar/gui/e-itip-control.c:778 ../calendar/gui/e-itip-control.c:777 +#: ../src/crontabEditor.py:85 +#, c-format +msgid "Every week" +msgid_plural "Every %d weeks" +msgstr[0] "Má»—i %d tuần" + +#: ../calendar/gui/e-itip-control.c:780 ../calendar/gui/e-itip-control.c:779 +#, c-format +msgid "Every week on " +msgid_plural "Every %d weeks on " +msgstr[0] "Má»—i %d tuần vào " + +#: ../calendar/gui/e-itip-control.c:788 src/reduce.c:403 +#: dpkg-split/queue.c:166 +#, c-format +msgid " and " +msgstr " và " + +#: ../calendar/gui/e-itip-control.c:795 ../calendar/gui/e-itip-control.c:794 +#, c-format +msgid "The %s day of " +msgstr "Ngày thứ %s của " + +#: ../calendar/gui/e-itip-control.c:808 ../calendar/gui/e-itip-control.c:807 +#, c-format +msgid "The %s %s of " +msgstr "%s %s của " + +#: ../calendar/gui/e-itip-control.c:815 ../calendar/gui/e-itip-control.c:814 +#: ../src/lang.py:281 +#, c-format +msgid "every month" +msgid_plural "every %d months" +msgstr[0] "má»—i %d tháng" + +#: ../calendar/gui/e-itip-control.c:819 ../calendar/gui/e-itip-control.c:818 +#, c-format +msgid "Every year" +msgid_plural "Every %d years" +msgstr[0] "má»—i %d năm" + +#: ../calendar/gui/e-itip-control.c:830 ../calendar/gui/e-itip-control.c:829 +#, c-format +msgid "a total of %d time" +msgid_plural "a total of %d time" +msgstr[0] "tổng cá»™ng giá» %d" + +#: ../calendar/gui/e-itip-control.c:839 ../calendar/gui/e-itip-control.c:838 +msgid ", ending on " +msgstr ", kết thúc vào" + +#: ../calendar/gui/e-itip-control.c:863 ../calendar/gui/e-itip-control.c:862 +#: ../storage/sunone-invitation-list.c:494 +msgid "Starts" +msgstr "Bắt đầu" + +#: ../calendar/gui/e-itip-control.c:876 ../calendar/gui/e-itip-control.c:875 +msgid "Ends" +msgstr "Kết thúc" + +#: ../calendar/gui/e-itip-control.c:910 +#: ../plugins/save-calendar/csv-format.c:395 +#: ../calendar/gui/e-itip-control.c:909 datebook_gui.c:4331 todo_gui.c:2132 +#: ../mimedir/mimedir-vcomponent.c:328 +#, fuzzy +msgid "Due" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Äến hạn\n" +"#-#-#-#-# jpilot-0.99.8-pre12.vi.po (jpilot-0.99.8-pre12) #-#-#-#-#\n" +"Äến hạn\n" +"#-#-#-#-# libmimedir.vi.po (libmimedir HEADnReport-Msgid-Bugs-To: ) #-#-#-" +"#-#\n" +"Tá»›i hạn" + +#: ../calendar/gui/e-itip-control.c:950 ../calendar/gui/e-itip-control.c:1007 +#: ../calendar/gui/e-itip-control.c:949 ../calendar/gui/e-itip-control.c:1006 +msgid "iCalendar Information" +msgstr "Thông tin iCalendar" + +#. Title +#: ../calendar/gui/e-itip-control.c:967 ../calendar/gui/e-itip-control.c:966 +msgid "iCalendar Error" +msgstr "Lá»—i iCalendar" + +#: ../plugins/itip-formatter/itip-view.c:404 ../storage/sunone-itip-view.c:293 +#: ../storage/sunone-itip-view.c:294 ../storage/sunone-itip-view.c:364 +#: ../storage/sunone-itip-view.c:365 +msgid "An unknown person" +msgstr "NgÆ°á»i lạ" + +#. Describe what the user can do +#: ../calendar/gui/e-itip-control.c:1090 ../calendar/gui/e-itip-control.c:1089 +msgid "" +"
Please review the following information, and then select an action from " +"the menu below." +msgstr "" +"
Vui lòng xem lại các chỉ dẫn sau và chá»n má»™t hành Ä‘á»™ng từ trình Ä‘Æ¡n bên " +"dÆ°á»›i." + +#: ../plugins/itip-formatter/itip-formatter.c:1628 +#: ../storage/sunone-invitation-list.c:522 +#: ../storage/sunone-invitation-list-model.c:355 +#: ../storage/sunone-invitation-list-model.c:694 +msgid "Accepted" +msgstr "Äã chấp nhận" + +#: ../plugins/itip-formatter/itip-formatter.c:1631 +msgid "Tentatively Accepted" +msgstr "Tạm đã chấp nhận" + +#: ../plugins/itip-formatter/itip-formatter.c:1634 +#: ../storage/sunone-invitation-list.c:523 +#: ../storage/sunone-invitation-list-model.c:358 +#: ../storage/sunone-invitation-list-model.c:696 +msgid "Declined" +msgstr "Bị từ chối" + +#: ../calendar/gui/e-itip-control.c:1230 ../calendar/gui/e-itip-control.c:1229 +msgid "" +"The meeting has been cancelled, however it could not be found in your " +"calendars" +msgstr "" +"Cuá»™c há»p đã bị hủy, tuy nhiên không tìm thấy cuá»™c há»p trong lịch của bạn." + +#: ../calendar/gui/e-itip-control.c:1232 ../calendar/gui/e-itip-control.c:1231 +msgid "" +"The task has been cancelled, however it could not be found in your task lists" +msgstr "" +"Tác vụ đã bị hủy, tuy nhiên không tìm thấy tác vụ trong danh sách tác vụ của " +"bạn." + +#: ../calendar/gui/e-itip-control.c:1311 ../calendar/gui/e-itip-control.c:1310 +#, c-format +msgid "%s has published meeting information." +msgstr "%s đã xuất bản tin tức cuá»™c há»p." + +#: ../calendar/gui/e-itip-control.c:1312 ../calendar/gui/e-itip-control.c:1311 +msgid "Meeting Information" +msgstr "Tin tức cuá»™c há»p" + +#: ../calendar/gui/e-itip-control.c:1318 ../calendar/gui/e-itip-control.c:1317 +#, c-format +msgid "%s requests the presence of %s at a meeting." +msgstr "%s yêu cầu sá»± hiện diện của %s tại cuá»™c há»p." + +#: ../calendar/gui/e-itip-control.c:1320 ../calendar/gui/e-itip-control.c:1319 +#, c-format +msgid "%s requests your presence at a meeting." +msgstr "%s yêu cầu sá»± hiện diện của bạn tại cuá»™c há»p." + +#: ../calendar/gui/e-itip-control.c:1321 ../calendar/gui/e-itip-control.c:1320 +msgid "Meeting Proposal" +msgstr "Äá» nghị cuá»™c há»p" + +#. FIXME Whats going on here? +#: ../calendar/gui/e-itip-control.c:1327 ../calendar/gui/e-itip-control.c:1326 +#, c-format +msgid "%s wishes to add to an existing meeting." +msgstr "%s muốn thêm vào má»™t cuá»™c há»p đã có." + +#: ../calendar/gui/e-itip-control.c:1328 ../calendar/gui/e-itip-control.c:1327 +msgid "Meeting Update" +msgstr "Cập nhật cuá»™c há»p" + +#: ../calendar/gui/e-itip-control.c:1332 ../calendar/gui/e-itip-control.c:1331 +#, c-format +msgid "%s wishes to receive the latest meeting information." +msgstr "%s muốn nhận tin tức vá» cuá»™c há»p." + +#: ../calendar/gui/e-itip-control.c:1333 ../calendar/gui/e-itip-control.c:1332 +msgid "Meeting Update Request" +msgstr "Yêu cầu cập nhật cuá»™c há»p" + +#: ../calendar/gui/e-itip-control.c:1340 ../calendar/gui/e-itip-control.c:1339 +#, c-format +msgid "%s has replied to a meeting request." +msgstr "%s đã trả lá»i yêu cầu há»p." + +#: ../calendar/gui/e-itip-control.c:1341 ../calendar/gui/e-itip-control.c:1340 +msgid "Meeting Reply" +msgstr "Trả lá»i há»p" + +#: ../calendar/gui/e-itip-control.c:1348 ../calendar/gui/e-itip-control.c:1347 +#, c-format +msgid "%s has cancelled a meeting." +msgstr "%s đã hủy bá» cuá»™c há»p." + +#: ../calendar/gui/e-itip-control.c:1349 ../calendar/gui/e-itip-control.c:1348 +msgid "Meeting Cancellation" +msgstr "Hủy bá» cuá»™c há»p" + +#: ../calendar/gui/e-itip-control.c:1359 ../calendar/gui/e-itip-control.c:1436 +#: ../calendar/gui/e-itip-control.c:1476 ../calendar/gui/e-itip-control.c:1358 +#: ../calendar/gui/e-itip-control.c:1435 ../calendar/gui/e-itip-control.c:1475 +#, c-format +msgid "%s has sent an unintelligible message." +msgstr "%s đã gởi má»™t thông Ä‘iệp không thể hiểu." + +#: ../calendar/gui/e-itip-control.c:1360 ../calendar/gui/e-itip-control.c:1359 +msgid "Bad Meeting Message" +msgstr "Thông Ä‘iệp sai vá» cuá»™c há»p" + +#: ../calendar/gui/e-itip-control.c:1387 ../calendar/gui/e-itip-control.c:1386 +#, c-format +msgid "%s has published task information." +msgstr "%s đã xuất bản tin tức tác vụ." + +#: ../calendar/gui/e-itip-control.c:1388 ../calendar/gui/e-itip-control.c:1387 +msgid "Task Information" +msgstr "Tin tức tác vụ" + +#: ../calendar/gui/e-itip-control.c:1395 ../calendar/gui/e-itip-control.c:1394 +#, c-format +msgid "%s requests %s to perform a task." +msgstr "%s yêu cầu %s để thá»±c hiện tác vụ." + +#: ../calendar/gui/e-itip-control.c:1397 ../calendar/gui/e-itip-control.c:1396 +#, c-format +msgid "%s requests you perform a task." +msgstr "%s yêu cầu bạn thá»±c hiện tác vụ." + +#: ../calendar/gui/e-itip-control.c:1398 ../calendar/gui/e-itip-control.c:1397 +msgid "Task Proposal" +msgstr "Äá» nghị tác vụ" + +#. FIXME Whats going on here? +#: ../calendar/gui/e-itip-control.c:1404 ../calendar/gui/e-itip-control.c:1403 +#, c-format +msgid "%s wishes to add to an existing task." +msgstr "%s muốn thêm vào tác vụ đã có." + +#: ../calendar/gui/e-itip-control.c:1405 ../calendar/gui/e-itip-control.c:1404 +msgid "Task Update" +msgstr "Cập nhật tác vụ" + +#: ../calendar/gui/e-itip-control.c:1409 ../calendar/gui/e-itip-control.c:1408 +#, c-format +msgid "%s wishes to receive the latest task information." +msgstr "%s muốn nhận tin tức vá» tác vụ." + +#: ../calendar/gui/e-itip-control.c:1410 ../calendar/gui/e-itip-control.c:1409 +msgid "Task Update Request" +msgstr "Yêu cầu cập nhật tác vụ" + +#: ../calendar/gui/e-itip-control.c:1417 ../calendar/gui/e-itip-control.c:1416 +#, c-format +msgid "%s has replied to a task assignment." +msgstr "%s đã trả lá»i vá» cách gán tác vụ." + +#: ../calendar/gui/e-itip-control.c:1418 ../calendar/gui/e-itip-control.c:1417 +msgid "Task Reply" +msgstr "Trả lá»i tác vụ" + +#: ../calendar/gui/e-itip-control.c:1425 ../calendar/gui/e-itip-control.c:1424 +#, c-format +msgid "%s has cancelled a task." +msgstr "%s đã hủy bá» tác vụ." + +#: ../calendar/gui/e-itip-control.c:1426 ../calendar/gui/e-itip-control.c:1425 +msgid "Task Cancellation" +msgstr "Tác vụ bị hủy bá»" + +#: ../calendar/gui/e-itip-control.c:1437 ../calendar/gui/e-itip-control.c:1436 +msgid "Bad Task Message" +msgstr "Thông Ä‘iệp tác vụ sai" + +#: ../calendar/gui/e-itip-control.c:1461 ../calendar/gui/e-itip-control.c:1460 +#, c-format +msgid "%s has published free/busy information." +msgstr "%s đã xuất bản tin tức Rảnh/Bận" + +#: ../calendar/gui/e-itip-control.c:1462 ../calendar/gui/e-itip-control.c:1461 +msgid "Free/Busy Information" +msgstr "Tin tức Rảnh/Bận" + +#: ../calendar/gui/e-itip-control.c:1466 ../calendar/gui/e-itip-control.c:1465 +#, c-format +msgid "%s requests your free/busy information." +msgstr "%s yêu cầu tin tức Rảnh/Bận của bạn." + +#: ../calendar/gui/e-itip-control.c:1467 ../calendar/gui/e-itip-control.c:1466 +msgid "Free/Busy Request" +msgstr "Yêu cầu tin tức Rảnh/Bận" + +#: ../calendar/gui/e-itip-control.c:1471 ../calendar/gui/e-itip-control.c:1470 +#, c-format +msgid "%s has replied to a free/busy request." +msgstr "%s đã trả lá»i yêu cầu tin tức Rảnh/Bận" + +#: ../calendar/gui/e-itip-control.c:1472 ../calendar/gui/e-itip-control.c:1471 +msgid "Free/Busy Reply" +msgstr "Trả lá»i tin tức Rảnh/Bận" + +#: ../calendar/gui/e-itip-control.c:1477 ../calendar/gui/e-itip-control.c:1476 +msgid "Bad Free/Busy Message" +msgstr "Thông Ä‘iệp Rảnh/Bận sai" + +#: ../calendar/gui/e-itip-control.c:1552 ../calendar/gui/e-itip-control.c:1551 +msgid "The message does not appear to be properly formed" +msgstr "Thông Ä‘iệp có lẽ không đúng hình thức." + +#: ../calendar/gui/e-itip-control.c:1611 ../calendar/gui/e-itip-control.c:1610 +msgid "The message contains only unsupported requests." +msgstr "Thông Ä‘iệp chỉ chứa yêu cầu chÆ°a được há»— trợ." + +#: ../calendar/gui/e-itip-control.c:1644 ../calendar/gui/e-itip-control.c:1643 +msgid "The attachment does not contain a valid calendar message" +msgstr "Äính kèm không chứa thông Ä‘iệp lịch hợp lệ." + +#: ../calendar/gui/e-itip-control.c:1676 ../calendar/gui/e-itip-control.c:1675 +msgid "The attachment has no viewable calendar items" +msgstr "Äính kèm không chứa mục lịch nào có thể xem được." + +#: ../calendar/gui/e-itip-control.c:1910 ../calendar/gui/e-itip-control.c:1909 +msgid "Update complete\n" +msgstr "Má»›i cập nhật xong\n" + +#: ../calendar/gui/e-itip-control.c:1938 ../calendar/gui/e-itip-control.c:1937 +msgid "Object is invalid and cannot be updated\n" +msgstr "Äối tượng không hợp lệ nên không thể cập nhật nó.\n" + +#: ../calendar/gui/e-itip-control.c:1948 ../calendar/gui/e-itip-control.c:1947 +msgid "This response is not from a current attendee. Add as an attendee?" +msgstr "" +"Hồi đáp này không phải đến từ má»™t ngÆ°á»i dá»± hiện thá»i. Thêm ngÆ°á»i này nhÆ° là " +"ngÆ°á»i dá»± nhé?" + +#: ../calendar/gui/e-itip-control.c:1960 ../calendar/gui/e-itip-control.c:1959 +msgid "Attendee status could not be updated because of an invalid status!\n" +msgstr "Không thể cập nhật trạng thái ngÆ°á»i dá»± vì trạng thái không hợp lệ!\n" + +#: ../calendar/gui/e-itip-control.c:1977 ../calendar/gui/e-itip-control.c:1976 +msgid "Attendee status updated\n" +msgstr "Má»›i cập nhật trạng thái ngÆ°á»i dá»±\n" + +#: ../plugins/itip-formatter/itip-formatter.c:1013 +msgid "Attendee status can not be updated because the item no longer exists" +msgstr "Không thể cập nhật trạng thái ngÆ°á»i dá»± vì không còn có lại mục đó." + +#: ../calendar/gui/e-itip-control.c:2010 ../calendar/gui/e-itip-control.c:2009 +msgid "Removal Complete" +msgstr "Má»›i gỡ bá» xong" + +#: ../calendar/gui/e-itip-control.c:2033 ../calendar/gui/e-itip-control.c:2081 +#: ../calendar/gui/e-itip-control.c:2032 ../calendar/gui/e-itip-control.c:2080 +msgid "Item sent!\n" +msgstr "Mục đã được gởi.\n" + +#: ../calendar/gui/e-itip-control.c:2035 ../calendar/gui/e-itip-control.c:2085 +#: ../calendar/gui/e-itip-control.c:2034 ../calendar/gui/e-itip-control.c:2084 +msgid "The item could not be sent!\n" +msgstr "Không thể gởi mục này.\n" + +#: ../calendar/gui/e-itip-control.c:2165 ../calendar/gui/e-itip-control.c:2164 +msgid "Choose an action:" +msgstr "Chá»n hành Ä‘á»™ng:" + +#: ../calendar/gui/e-itip-control.c:2264 +#: ../plugins/groupwise-features/process-meeting.c:48 src/fe-gtk/dccgui.c:580 +#: src/fe-gtk/dccgui.c:880 ../calendar/gui/e-itip-control.c:2263 +#: src/fe-gtk/dccgui.c:582 src/fe-gtk/dccgui.c:882 +msgid "Accept" +msgstr "Chấp nhận" + +#: ../calendar/gui/e-itip-control.c:2265 ../calendar/gui/e-itip-control.c:2264 +msgid "Tentatively accept" +msgstr "Tạm chấp nhận" + +#: ../calendar/gui/e-itip-control.c:2266 +#: ../plugins/groupwise-features/process-meeting.c:50 +#: ../calendar/gui/e-itip-control.c:2265 +msgid "Decline" +msgstr "Từ chối" + +#: ../calendar/gui/e-itip-control.c:2295 ../calendar/gui/e-itip-control.c:2294 +msgid "Send Free/Busy Information" +msgstr "Gởi tin tức Rảnh/Bận" + +#: ../calendar/gui/e-itip-control.c:2323 ../calendar/gui/e-itip-control.c:2322 +msgid "Update respondent status" +msgstr "Cập nhật trạng thái trả lá»i" + +#: ../calendar/gui/e-itip-control.c:2351 ../calendar/gui/e-itip-control.c:2350 +msgid "Send Latest Information" +msgstr "Gởi tin tức" + +# Variable: do not translate/ biến: đừng dịch +#: ../calendar/gui/e-itip-control.glade.h:2 +#, no-c-format +msgid "%P %%" +msgstr "%P %%" + +#: ../calendar/gui/e-itip-control.glade.h:3 +msgid "--to--" +msgstr "--tá»›i--" + +#: ../calendar/gui/e-itip-control.glade.h:4 +msgid "Calendar Message" +msgstr "Thông Ä‘iệp lịch" + +#: ../calendar/gui/e-itip-control.glade.h:7 +msgid "Loading Calendar" +msgstr "Äang tải lịch" + +#: ../calendar/gui/e-itip-control.glade.h:8 +msgid "Loading calendar..." +msgstr "Äang tải lịch..." + +#: ../calendar/gui/e-itip-control.glade.h:10 +msgid "Server Message:" +msgstr "Thông Ä‘iệp máy phục vụ :" + +#: ../calendar/gui/e-itip-control.glade.h:12 +msgid "date-end" +msgstr "ngày-cuối" + +#: ../calendar/gui/e-itip-control.glade.h:13 +msgid "date-start" +msgstr "ngày-đầu" + +#: ../calendar/gui/e-meeting-list-view.c:69 +msgid "Chair Persons" +msgstr "NgÆ°á»i chủ trì" + +#: ../calendar/gui/e-meeting-list-view.c:70 +#: ../calendar/gui/e-meeting-list-view.c:153 +msgid "Required Participants" +msgstr "NgÆ°á»i dá»± yêu cầu" + +#: ../calendar/gui/e-meeting-list-view.c:71 +msgid "Optional Participants" +msgstr "NgÆ°á»i dá»± tùy chá»n" + +#: ../src/interface.c:909 ../src/procdialogs.c:652 +msgid "Resources" +msgstr "Tài nguyên" + +#: ../calendar/gui/e-meeting-store.c:116 ../calendar/gui/e-meeting-store.c:802 +msgid "Individual" +msgstr "Riêng lẻ" + +#: ../glom/data_structure/layout/layoutgroup.cc:292 ../pan/score-add-ui.c:599 +#: ../widgets/gtk+.xml.in.h:82 ../mimedir/mimedir-attribute.c:137 +msgid "Group" +msgstr "Nhóm" + +#: ../calendar/gui/e-meeting-store.c:120 ../objects/Istar/other.c:73 +msgid "Resource" +msgstr "Tài nguyên" + +#: ../calendar/gui/e-meeting-store.c:105 ../calendar/gui/e-meeting-store.c:122 +msgid "Room" +msgstr "Phòng" + +#: ../calendar/gui/e-meeting-store.c:134 ../calendar/gui/e-meeting-store.c:151 +msgid "Chair" +msgstr "Chủ trì" + +#: ../calendar/gui/e-meeting-store.c:153 ../calendar/gui/e-meeting-store.c:805 +msgid "Required Participant" +msgstr "NgÆ°á»i dá»± cần thiết" + +#: ../calendar/gui/e-meeting-store.c:138 ../calendar/gui/e-meeting-store.c:155 +msgid "Optional Participant" +msgstr "NgÆ°á»i dá»± tùy chá»n" + +#: ../calendar/gui/e-meeting-store.c:140 ../calendar/gui/e-meeting-store.c:157 +msgid "Non-Participant" +msgstr "NgÆ°á»i không tham dá»±" + +#: ../calendar/gui/e-meeting-store.c:209 ../calendar/gui/e-meeting-store.c:815 +#: ../storage/sunone-invitation-list.c:521 +#: ../storage/sunone-invitation-list-model.c:352 +#: ../storage/sunone-invitation-list-model.c:692 +msgid "Needs Action" +msgstr "Cần hành Ä‘á»™ng" + +#: ../calendar/gui/e-meeting-time-sel.c:411 +#: ../storage/sunone-invitation-list.c:524 +#: ../storage/sunone-invitation-list-model.c:361 +#: ../storage/sunone-invitation-list-model.c:698 +msgid "Tentative" +msgstr "ChÆ°a chắc" + +#: ../plugins/itip-formatter/itip-formatter.c:1637 +msgid "Delegated" +msgstr "Ủy nhiệm" + +#. The extra space is just a hack to occupy more space for Attendee +#: ../calendar/gui/e-meeting-list-view.c:463 +msgid "Attendee " +msgstr "NgÆ°á»i dá»± " + +#: ../calendar/gui/e-meeting-store.c:198 ../calendar/gui/e-meeting-store.c:221 +msgid "In Process" +msgstr "Trong tiến trình" + +#. This is a strftime() format string %A = full weekday name, +#. %B = full month name, %d = month day, %Y = full year. +#: ../calendar/gui/e-meeting-time-sel.c:2104 +msgid "%A, %B %d, %Y" +msgstr "%A, %d %B, %Y" + +#: ../libedataserver/e-time-utils.c:404 +msgid "%a %m/%d/%Y" +msgstr "%a %d/%m/%Y" + +#: ../libedataserver/e-time-utils.c:242 ../libedataserver/e-time-utils.c:303 +msgid "%m/%d/%Y" +msgstr "%d/%m/%Y" + +#: ../calendar/gui/e-meeting-time-sel.c:416 ../designs/OOA/ooa.glade.h:11 +#: ../calendar/gui/e-meeting-time-sel.c:413 +msgid "Out of Office" +msgstr "Ở ngoại văn phòng" + +#: ../calendar/gui/e-meeting-time-sel.c:417 +#: ../calendar/gui/e-meeting-time-sel.c:414 +msgid "No Information" +msgstr "Không có thông tin" + +#: ../calendar/gui/e-meeting-time-sel.c:428 +msgid "Con_tacts..." +msgstr "_Liên lạc..." + +#: ../plug-ins/MapObject/mapobject_ui.c:1299 +msgid "O_ptions" +msgstr "Tù_y chá»n" + +#: ../calendar/gui/e-meeting-time-sel.c:465 +#: ../calendar/gui/e-meeting-time-sel.c:462 +msgid "Show _only working hours" +msgstr "Chỉ hiện giá» làm _việc" + +#: ../calendar/gui/e-meeting-time-sel.c:475 +#: ../calendar/gui/e-meeting-time-sel.c:472 +msgid "Show _zoomed out" +msgstr "Hiện Thu _nhá»" + +#: ../calendar/gui/e-meeting-time-sel.c:490 +#: ../calendar/gui/e-meeting-time-sel.c:487 +msgid "_Update free/busy" +msgstr "_Cập nhật Rảnh/Bận" + +#: ../calendar/gui/e-meeting-time-sel.c:505 +#: ../calendar/gui/e-meeting-time-sel.c:502 +msgid "_<<" +msgstr "_<<" + +#: ../calendar/gui/e-meeting-time-sel.c:523 +#: ../calendar/gui/e-meeting-time-sel.c:520 +msgid "_Autopick" +msgstr "_Tá»± Ä‘á»™ng chá»n" + +#: ../calendar/gui/e-meeting-time-sel.c:538 +#: ../calendar/gui/e-meeting-time-sel.c:535 +msgid ">_>" +msgstr ">_>" + +#: ../calendar/gui/e-meeting-time-sel.c:555 +#: ../calendar/gui/e-meeting-time-sel.c:552 +msgid "_All people and resources" +msgstr "_Má»i ngÆ°á»i và tài nguyên Ä‘á»u" + +#: ../calendar/gui/e-meeting-time-sel.c:564 +#: ../calendar/gui/e-meeting-time-sel.c:561 +msgid "All _people and one resource" +msgstr "Má»i ngÆ°á»i và má»™_t tài nguyên" + +#: ../calendar/gui/e-meeting-time-sel.c:573 +#: ../calendar/gui/e-meeting-time-sel.c:570 +msgid "_Required people" +msgstr "NgÆ°á»i _cần thiết" + +#: ../calendar/gui/e-meeting-time-sel.c:582 +#: ../calendar/gui/e-meeting-time-sel.c:579 +msgid "Required people and _one resource" +msgstr "NgÆ°á»i cần thiết _và má»™t tài nguyên" + +#: ../calendar/gui/e-meeting-time-sel.c:615 +msgid "_Start time:" +msgstr "Thá»i Ä‘iểm đầ_u:" + +#: ../calendar/gui/e-meeting-time-sel.c:642 +msgid "_End time:" +msgstr "Thá»i Ä‘iểm _cuối:" + +#: ../calendar/gui/e-memo-table.c:276 +msgid "Memo Table" +msgstr "Bảng ghi nhá»›" + +#: ../calendar/gui/e-memo-table.c:872 +msgid "_Delete Selected Memos" +msgstr "Xoá bá» các ghi nhá»› đã chá»n" + +#: ../calendar/gui/e-memo-table.c:995 ../calendar/gui/e-memo-table.etspec.h:2 +msgid "Click to add a memo" +msgstr "Nhấn để thêm ghi nhá»›" + +#: ../calendar/gui/e-memo-table.etspec.h:3 +msgid "Memo sort" +msgstr "Sắp xếp ghi nhá»›" + +#: ../calendar/gui/gnome-cal.c:2532 +#, c-format +msgid "" +"Error on %s:\n" +" %s" +msgstr "" +"Lá»—i khi « %s »:\n" +" %s" + +#: ../calendar/gui/e-memos.c:776 +msgid "Loading memos" +msgstr "Äang tải ghi nhá»›" + +#: ../calendar/gui/e-memos.c:861 +#, c-format +msgid "Opening memos at %s" +msgstr "Äang mở ghi nhá»› tại %s" + +#: ../calendar/gui/e-memos.c:1034 ../calendar/gui/e-tasks.c:1226 +#: ../calendar/gui/e-tasks.c:1148 +msgid "Deleting selected objects..." +msgstr "Äang xoá bá» các đối tượng được chá»n..." + +#: ../calendar/gui/e-pub-utils.c:322 ../calendar/gui/e-pub-utils.c:300 +#, c-format +msgid "Enter the password for %s" +msgstr "Hãy nhập mật khẩu cho « %s »" + +#: ../calendar/gui/e-tasks.c:871 ../calendar/gui/e-tasks.c:845 +msgid "Loading tasks" +msgstr "Äang tải tác vụ..." + +#: ../calendar/gui/e-tasks.c:958 ../calendar/gui/e-tasks.c:929 +#, c-format +msgid "Opening tasks at %s" +msgstr "Äang mở tác vụ tại %s..." + +#: ../calendar/gui/e-tasks.c:1203 ../calendar/gui/e-tasks.c:1125 +msgid "Completing tasks..." +msgstr "Äang hoàn tất tác vụ..." + +#: ../calendar/gui/e-tasks.c:1253 ../calendar/gui/e-tasks.c:1175 +msgid "Expunging" +msgstr "Äang xoá hẳn" + +#: ../calendar/gui/e-timezone-entry.c:133 +msgid "Timezone Button" +msgstr "Nút múi giá»" + +#. strftime format %d = day of month, %B = full +#. month name. You can change the order but don't +#. change the specifiers or add anything. +#: ../calendar/gui/print.c:1501 +msgid "%d %B" +msgstr "%d %B" + +#: ../calendar/gui/gnome-cal.c:792 ../calendar/gui/gnome-cal.c:789 +msgid "Updating query" +msgstr "Äang cập nhật truy vấn" + +#: ../calendar/gui/gnome-cal.c:2240 +msgid "_Custom View" +msgstr "Khung xem tá»± _chá»n" + +#: ../calendar/gui/gnome-cal.c:2241 +msgid "_Save Custom View" +msgstr "_LÆ°u khung xem tá»± chá»n" + +#: ../calendar/gui/gnome-cal.c:2246 +msgid "_Define Views..." +msgstr "_Äịnh nghÄ©a khung xem..." + +#: ../calendar/gui/gnome-cal.c:2408 ../calendar/gui/gnome-cal.c:2378 +#, c-format +msgid "Loading appointments at %s" +msgstr "Äang tải cuá»™c hẹn lúc « %s »" + +#: ../calendar/gui/gnome-cal.c:2427 ../calendar/gui/gnome-cal.c:2397 +#, c-format +msgid "Loading tasks at %s" +msgstr "Äang tải tác vụ lúc %s..." + +#: ../calendar/gui/gnome-cal.c:3449 ../calendar/gui/gnome-cal.c:3388 +msgid "Purging" +msgstr "Äang tẩy..." + +#: ../calendar/gui/goto-dialog.glade.h:1 ../calendar.inc.php:5 +#: ../logview/log_repaint.c:36 ../gncal/gnomecal-goto.c:285 +#: makeinfo/cmds.c:585 +msgid "April" +msgstr "Tháng TÆ°" + +#: ../calendar/gui/goto-dialog.glade.h:2 ../calendar.inc.php:7 +#: ../logview/log_repaint.c:37 ../gncal/gnomecal-goto.c:289 +#: makeinfo/cmds.c:586 +msgid "August" +msgstr "Tháng Tám" + +#: ../calendar/gui/goto-dialog.glade.h:3 ../calendar.inc.php:9 +#: ../gncal/gnomecal-goto.c:293 makeinfo/cmds.c:587 +msgid "December" +msgstr "Tháng Chạp" + +#: ../calendar/gui/goto-dialog.glade.h:4 ../logview/log_repaint.c:36 +#: ../gncal/gnomecal-goto.c:283 makeinfo/cmds.c:585 +msgid "February" +msgstr "Tháng Hai" + +#: makeinfo/cmds.c:585 +msgid "January" +msgstr "Tháng Giêng" + +#: makeinfo/cmds.c:586 +msgid "July" +msgstr "Tháng Bảy" + +#: makeinfo/cmds.c:586 +msgid "June" +msgstr "Tháng Sáu" + +#: ../calendar/gui/goto-dialog.glade.h:8 ../calendar.inc.php:5 +#: ../logview/log_repaint.c:36 ../gncal/gnomecal-goto.c:284 +#: makeinfo/cmds.c:585 +msgid "March" +msgstr "Tháng Ba" + +#: ../calendar/gui/goto-dialog.glade.h:9 ../calendar.inc.php:6 +#: ../logview/log_repaint.c:36 ../gncal/gnomecal-goto.c:286 +#: makeinfo/cmds.c:585 +msgid "May" +msgstr "Tháng Năm" + +#: ../calendar/gui/goto-dialog.glade.h:10 ../calendar.inc.php:8 +#: ../logview/log_repaint.c:38 ../gncal/gnomecal-goto.c:292 +#: makeinfo/cmds.c:587 +msgid "November" +msgstr "Tháng MÆ°á»i Má»™t" + +#: ../calendar/gui/goto-dialog.glade.h:11 ../calendar.inc.php:8 +#: ../logview/log_repaint.c:37 ../gncal/gnomecal-goto.c:291 +#: makeinfo/cmds.c:586 +msgid "October" +msgstr "Tháng MÆ°á»i" + +#: ../calendar/gui/goto-dialog.glade.h:12 ../libegg/egg-datetime.c:467 +#: ../src/libegg/egg-datetime.c:467 +msgid "Select Date" +msgstr "Chá»n ngày" + +#: ../calendar/gui/goto-dialog.glade.h:13 ../calendar.inc.php:7 +#: ../logview/log_repaint.c:37 ../gncal/gnomecal-goto.c:290 +#: makeinfo/cmds.c:586 +msgid "September" +msgstr "Tháng Chín" + +#: ../calendar/gui/goto-dialog.glade.h:14 +msgid "_Select Today" +msgstr "Chá»n _hôm nay" + +#: ../calendar/gui/itip-utils.c:402 ../calendar/gui/itip-utils.c:442 +msgid "An organizer must be set." +msgstr "Phải chá»n má»™t bá»™ tổ chức." + +#: ../calendar/gui/itip-utils.c:387 ../calendar/gui/itip-utils.c:389 +msgid "At least one attendee is necessary" +msgstr "Cần ít nhất má»™t ngÆ°á»i dá»±." + +#: ../calendar/gui/itip-utils.c:508 ../calendar/gui/itip-utils.c:630 +#: ../calendar/gui/itip-utils.c:510 ../calendar/gui/itip-utils.c:632 +msgid "Event information" +msgstr "Tin tức sá»± kiện" + +#: ../calendar/gui/itip-utils.c:510 ../calendar/gui/itip-utils.c:632 +#: ../calendar/gui/itip-utils.c:512 ../calendar/gui/itip-utils.c:634 +msgid "Task information" +msgstr "Tin tức tác vụ" + +#: ../calendar/gui/itip-utils.c:512 ../calendar/gui/itip-utils.c:634 +#: ../calendar/gui/itip-utils.c:514 ../calendar/gui/itip-utils.c:636 +msgid "Journal information" +msgstr "Tin tức nhật ký" + +#: ../calendar/gui/itip-utils.c:514 ../calendar/gui/itip-utils.c:652 +#: ../calendar/gui/itip-utils.c:516 ../calendar/gui/itip-utils.c:654 +msgid "Free/Busy information" +msgstr "Tin tức rảnh/bận" + +#: ../calendar/gui/itip-utils.c:516 ../calendar/gui/itip-utils.c:518 +msgid "Calendar information" +msgstr "Tin tức lịch" + +#: ../calendar/gui/itip-utils.c:565 ../calendar/gui/itip-utils.c:567 +#: dselect/pkgdisplay.cc:99 +msgid "Updated" +msgstr "Äã cập nhật" + +#: ../calendar/gui/itip-utils.c:573 ../glade/straw.glade.h:45 +msgid "Refresh" +msgstr "Cập nhật" + +#: ../calendar/gui/itip-utils.c:577 ../calendar/gui/itip-utils.c:579 +msgid "Counter-proposal" +msgstr "Phản Ä‘á» nghị" + +#: ../calendar/gui/itip-utils.c:648 ../calendar/gui/itip-utils.c:650 +#, c-format +msgid "Free/Busy information (%s to %s)" +msgstr "Tin tức Rảnh/Bận (%s đến %s)" + +#: ../calendar/gui/itip-utils.c:658 ../calendar/gui/itip-utils.c:660 +msgid "iCalendar information" +msgstr "Tin tức iCalendar" + +#: ../calendar/gui/itip-utils.c:813 ../calendar/gui/itip-utils.c:815 +msgid "You must be an attendee of the event." +msgstr "Bạn phải là ngÆ°á»i dá»± sá»± kiện đó." + +#: ../plug-ins/imagemap/imap_cmd_copy_object.c:55 ../glade/gbwidget.c:1866 +#: ../glade/property.c:892 ../glade/property.c:5141 src/floatwin.cpp:114 +#: src/mainwin.cpp:1084 address_gui.c:2717 datebook_gui.c:4388 memo_gui.c:1569 +#: todo_gui.c:2186 Expense/expense.c:1651 KeyRing/keyring.c:1617 +#: po/silky.glade.h:85 app/sample-editor.c:461 +msgid "Copy" +msgstr "Chép" + +#: ../calendar/gui/tasks-component.c:442 ../app/disp_callbacks.c:118 +msgid "Properties..." +msgstr "Thuá»™c tính..." + +#: ../calendar/gui/memos-component.c:524 +#, c-format +msgid "%d memo" +msgid_plural "%d memo" +msgstr[0] "%d ghi nhá»›" + +#: ../calendar/gui/memos-component.c:526 ../calendar/gui/tasks-component.c:517 +#: ../mail/mail-component.c:549 ../mail/mail-component.c:547 +#, c-format +msgid ", %d selected" +msgid_plural ", %d selected" +msgstr[0] ", %d được chá»n" + +#: ../calendar/gui/memos-component.c:573 +msgid "Failed upgrading memos." +msgstr "Lá»—i nâng cấp ghi nhá»›." + +#: ../calendar/gui/memos-component.c:869 +#, c-format +msgid "Unable to open the memo list '%s' for creating events and meetings" +msgstr "Không thể mở danh sách ghi nhá»› « %s » để tạo sá»± kiện và cuá»™c há»p" + +#: ../calendar/gui/memos-component.c:882 +msgid "There is no calendar available for creating memos" +msgstr "Không có lịch nào sẵn sàng để tạo ghi nhá»›" + +#: ../calendar/gui/memos-component.c:973 +msgid "Memo Source Selector" +msgstr "Bá»™ chá»n nguồn ghi nhá»›" + +#: ../calendar/gui/memos-component.c:1156 +msgid "New memo" +msgstr "Ghi nhá»› má»›i" + +#: ../calendar/gui/memos-component.c:1157 +msgid "_Memo" +msgstr "Ghi _nhá»›" + +#: ../calendar/gui/memos-component.c:1158 +msgid "Create a new memo" +msgstr "Tạo ghi nhá»› má»›i" + +#: ../calendar/gui/memos-component.c:1164 +msgid "New memo list" +msgstr "Danh sách ghi nhá»› má»›i" + +#: ../calendar/gui/memos-component.c:1165 +msgid "Memo l_ist" +msgstr "_Danh sách ghi nhá»›" + +#: ../calendar/gui/memos-component.c:1166 +msgid "Create a new memo list" +msgstr "Tạo danh sách ghi nhá»› má»›i" + +#: ../calendar/gui/memos-control.c:340 +msgid "Print Memos" +msgstr "In ghi nhá»›" + +#: ../calendar/gui/migration.c:156 ../calendar/gui/migration.c:151 +msgid "" +"The location and hierarchy of the Evolution task folders has changed since " +"Evolution 1.x.\n" +"\n" +"Please be patient while Evolution migrates your folders..." +msgstr "" +"Äịa chỉ và cây thÆ° mục tác vụ Evolution đã thay đổi so vá»›i Evolution phiên " +"bản 1.x.\n" +"\n" +"Hãy kiên nhẫn trong khi Evolution chuyển đổi các thÆ° mục..." + +#: ../calendar/gui/migration.c:160 ../calendar/gui/migration.c:155 +msgid "" +"The location and hierarchy of the Evolution calendar folders has changed " +"since Evolution 1.x.\n" +"\n" +"Please be patient while Evolution migrates your folders..." +msgstr "" +"Äịa chỉ và cây thÆ° mục lịch Evolution đã thay đổi so vá»›i Evolution phiên bản " +"1.x.\n" +"\n" +"Hãy kiên nhẫn trong khi Evolution chuyển đổi các thÆ° mục..." + +#: ../calendar/gui/migration.c:748 ../calendar/gui/migration.c:915 +msgid "Unable to migrate old settings from evolution/config.xmldb" +msgstr "" +"Không thể chuyển đổi các thiết lập cÅ© từ tập tin evolution/config.xmldb" + +#: ../calendar/gui/migration.c:782 ../calendar/gui/migration.c:777 +#, c-format +msgid "Unable to migrate calendar `%s'" +msgstr "Không thể chuyển đổi lịch « %s »." + +#: ../calendar/gui/migration.c:948 ../calendar/gui/migration.c:944 +#, c-format +msgid "Unable to migrate tasks `%s'" +msgstr "Không thể chuyển đổi các tác vụ « %s »." + +#: ../calendar/gui/print.c:492 ../calendar/libecal/e-cal-recur.c:4014 +msgid "1st" +msgstr "mồng 1" + +#: ../calendar/gui/print.c:492 ../calendar/libecal/e-cal-recur.c:4015 +msgid "2nd" +msgstr "mồng 2" + +#: ../calendar/gui/print.c:492 ../calendar/libecal/e-cal-recur.c:4016 +msgid "3rd" +msgstr "mồng 3" + +#: ../calendar/gui/print.c:492 ../calendar/libecal/e-cal-recur.c:4017 +#: datebook_gui.c:1558 +#, fuzzy +msgid "4th" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"mồng 4\n" +"#-#-#-#-# jpilot-0.99.8-pre12.vi.po (jpilot-0.99.8-pre12) #-#-#-#-#\n" +"thứ 4" + +#: ../calendar/gui/print.c:492 ../calendar/libecal/e-cal-recur.c:4018 +msgid "5th" +msgstr "mồng 5" + +#: ../calendar/gui/print.c:493 ../calendar/libecal/e-cal-recur.c:4019 +msgid "6th" +msgstr "mồng 6" + +#: ../calendar/gui/print.c:493 ../calendar/libecal/e-cal-recur.c:4020 +msgid "7th" +msgstr "mồng 7" + +#: ../calendar/gui/print.c:493 ../calendar/libecal/e-cal-recur.c:4021 +msgid "8th" +msgstr "mồng 8" + +#: ../calendar/gui/print.c:493 ../calendar/libecal/e-cal-recur.c:4022 +msgid "9th" +msgstr "mồng 9" + +#: ../calendar/gui/print.c:493 ../calendar/libecal/e-cal-recur.c:4023 +msgid "10th" +msgstr "mồng 10" + +#: ../calendar/gui/print.c:494 ../calendar/libecal/e-cal-recur.c:4024 +msgid "11th" +msgstr "ngày 11" + +#: ../calendar/gui/print.c:494 ../calendar/libecal/e-cal-recur.c:4025 +msgid "12th" +msgstr "ngày 12" + +#: ../calendar/gui/print.c:494 ../calendar/libecal/e-cal-recur.c:4026 +msgid "13th" +msgstr "ngày 13" + +#: ../calendar/gui/print.c:494 ../calendar/libecal/e-cal-recur.c:4027 +msgid "14th" +msgstr "ngày 14" + +#: ../calendar/gui/print.c:494 ../calendar/libecal/e-cal-recur.c:4028 +msgid "15th" +msgstr "ngày 15" + +#: ../calendar/gui/print.c:495 ../calendar/libecal/e-cal-recur.c:4029 +msgid "16th" +msgstr "ngày 16" + +#: ../calendar/gui/print.c:495 ../calendar/libecal/e-cal-recur.c:4030 +msgid "17th" +msgstr "ngày 17" + +#: ../calendar/gui/print.c:495 ../calendar/libecal/e-cal-recur.c:4031 +msgid "18th" +msgstr "ngày 18" + +#: ../calendar/gui/print.c:495 ../calendar/libecal/e-cal-recur.c:4032 +msgid "19th" +msgstr "ngày 19" + +#: ../calendar/gui/print.c:495 ../calendar/libecal/e-cal-recur.c:4033 +msgid "20th" +msgstr "ngày 20" + +#: ../calendar/gui/print.c:496 ../calendar/libecal/e-cal-recur.c:4034 +msgid "21st" +msgstr "ngày 21" + +#: ../calendar/gui/print.c:496 ../calendar/libecal/e-cal-recur.c:4035 +msgid "22nd" +msgstr "ngày 22" + +#: ../calendar/gui/print.c:496 ../calendar/libecal/e-cal-recur.c:4036 +msgid "23rd" +msgstr "ngày 23" + +#: ../calendar/gui/print.c:496 ../calendar/libecal/e-cal-recur.c:4037 +msgid "24th" +msgstr "ngày 24" + +#: ../calendar/gui/print.c:496 ../calendar/libecal/e-cal-recur.c:4038 +msgid "25th" +msgstr "ngày 25" + +#: ../calendar/gui/print.c:497 ../calendar/libecal/e-cal-recur.c:4039 +msgid "26th" +msgstr "ngày 26" + +#: ../calendar/gui/print.c:497 ../calendar/libecal/e-cal-recur.c:4040 +msgid "27th" +msgstr "ngày 27" + +#: ../calendar/gui/print.c:497 ../calendar/libecal/e-cal-recur.c:4041 +msgid "28th" +msgstr "ngày 28" + +#: ../calendar/gui/print.c:497 ../calendar/libecal/e-cal-recur.c:4042 +msgid "29th" +msgstr "ngày 29" + +#: ../calendar/gui/print.c:497 ../calendar/libecal/e-cal-recur.c:4043 +msgid "30th" +msgstr "ngày 30" + +#: ../calendar/gui/print.c:498 ../calendar/libecal/e-cal-recur.c:4044 +msgid "31st" +msgstr "ngày 31" + +#: ../calendar/gui/print.c:573 datebook_gui.c:222 datebook_gui.c:229 +#: datebook_gui.c:4078 datebook_gui.c:4085 +msgid "Su" +msgstr "CN" + +#: ../calendar/gui/print.c:573 datebook_gui.c:223 datebook_gui.c:4079 +#, fuzzy +msgid "Mo" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Hai\n" +"#-#-#-#-# jpilot-0.99.8-pre12.vi.po (jpilot-0.99.8-pre12) #-#-#-#-#\n" +"T2" + +#: ../calendar/gui/print.c:573 datebook_gui.c:224 datebook_gui.c:4080 +#, fuzzy +msgid "Tu" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Ba\n" +"#-#-#-#-# jpilot-0.99.8-pre12.vi.po (jpilot-0.99.8-pre12) #-#-#-#-#\n" +"T3" + +#: ../calendar/gui/print.c:573 datebook_gui.c:225 datebook_gui.c:4081 +#, fuzzy +msgid "We" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"TÆ°\n" +"#-#-#-#-# jpilot-0.99.8-pre12.vi.po (jpilot-0.99.8-pre12) #-#-#-#-#\n" +"T4" + +#: ../calendar/gui/print.c:574 datebook_gui.c:226 datebook_gui.c:4082 +#, fuzzy +msgid "Th" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Năm\n" +"#-#-#-#-# jpilot-0.99.8-pre12.vi.po (jpilot-0.99.8-pre12) #-#-#-#-#\n" +"T5" + +#: ../calendar/gui/print.c:574 datebook_gui.c:227 datebook_gui.c:4083 +#, fuzzy +msgid "Fr" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Sáu\n" +"#-#-#-#-# jpilot-0.99.8-pre12.vi.po (jpilot-0.99.8-pre12) #-#-#-#-#\n" +"T6" + +#: ../calendar/gui/print.c:574 datebook_gui.c:228 datebook_gui.c:4084 +#, fuzzy +msgid "Sa" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Bảy\n" +"#-#-#-#-# jpilot-0.99.8-pre12.vi.po (jpilot-0.99.8-pre12) #-#-#-#-#\n" +"T7" + +#. Day +#: ../calendar/gui/print.c:1926 ../calendar/gui/print.c:1917 +msgid "Selected day (%a %b %d %Y)" +msgstr "Ngày được chá»n (%a %d %b %Y)" + +#: ../calendar/gui/print.c:1951 ../calendar/gui/print.c:1955 +#: ../calendar/gui/print.c:1942 ../calendar/gui/print.c:1946 +msgid "%a %b %d" +msgstr "%a %d %b" + +#: ../calendar/gui/print.c:1952 ../calendar/gui/print.c:1943 +msgid "%a %d %Y" +msgstr "%a %d %Y" + +#: ../calendar/gui/print.c:1949 ../calendar/gui/print.c:1950 +msgid "%a %b %d %Y" +msgstr "%a %d %b %Y" + +#: ../calendar/gui/print.c:1963 ../calendar/gui/print.c:1954 +#, c-format +msgid "Selected week (%s - %s)" +msgstr "Tuần được chá»n (%s - %s)" + +#. Month +#: ../calendar/gui/print.c:1971 ../calendar/gui/print.c:1962 +msgid "Selected month (%b %Y)" +msgstr "Tháng được chá»n (%b %Y)" + +#. Year +#: ../calendar/gui/print.c:1978 ../calendar/gui/print.c:1969 +msgid "Selected year (%Y)" +msgstr "Năm được chá»n (%Y)" + +#: ../calendar/gui/print.c:2307 event-ui.c:656 +#: ../calendar/gui/dialogs/event-editor.c:141 ../calendar/gui/print.c:2298 +#: alarms.c:533 datebook_gui.c:4279 +msgid "Appointment" +msgstr "Cuá»™c hẹn" + +#: ../calendar/gui/print.c:2309 event-ui.c:659 ../objects/Istar/other.c:74 +#: ../calendar/gui/dialogs/task-editor.c:138 ../calendar/gui/print.c:2300 +#: datebook_gui.c:4330 todo_gui.c:2131 +msgid "Task" +msgstr "Tác vụ" + +#: ../calendar/gui/print.c:2331 ../calendar/gui/print.c:2320 +#, c-format +msgid "Summary: %s" +msgstr "Tóm tắt: %s" + +#: ../calendar/gui/print.c:2382 ../calendar/gui/print.c:2371 +#, c-format +msgid "Status: %s" +msgstr "Trạng thái: %s" + +#: ../calendar/gui/print.c:2399 ../calendar/gui/print.c:2388 +#, c-format +msgid "Priority: %s" +msgstr "Äá»™ Æ°u tiên: %s" + +#: ../calendar/gui/print.c:2411 ../calendar/gui/print.c:2400 +#, c-format +msgid "Percent Complete: %i" +msgstr "Phần trăm hoàn thành: %i" + +#: ../calendar/gui/print.c:2437 ../calendar/gui/print.c:2426 +#, c-format +msgid "Categories: %s" +msgstr "Phân loại: %s" + +#: ../calendar/gui/print.c:2448 ../calendar/gui/print.c:2437 +msgid "Contacts: " +msgstr "Liên lạc: " + +#: ../gedit/gedit-ui.xml.h:39 +msgid "Print Preview" +msgstr "Xem thá»­ bản in" + +#: ../calendar/gui/print.c:2623 ../calendar/gui/print.c:2612 +msgid "Print Item" +msgstr "In mục" + +#: ../calendar/gui/tasks-component.c:439 +msgid "_New Task List" +msgstr "Danh sách tác vụ _má»›i" + +#: ../calendar/gui/tasks-component.c:515 +#, c-format +msgid "%d task" +msgid_plural "%d task" +msgstr[0] "%d tác vụ" + +#: ../calendar/gui/tasks-component.c:564 +msgid "Failed upgrading tasks." +msgstr "Lá»—i nâng cấp tác vụ." + +#: ../calendar/gui/tasks-component.c:875 ../calendar/gui/tasks-component.c:872 +#, c-format +msgid "Unable to open the task list '%s' for creating events and meetings" +msgstr "Không thể mở danh sách tác vụ « %s » để tạo sá»± kiện và cuá»™c há»p." + +#: ../calendar/gui/tasks-component.c:888 ../calendar/gui/tasks-component.c:887 +msgid "There is no calendar available for creating tasks" +msgstr "Không có lịch nào sẵn sàng để tạo tác vụ." + +#: ../calendar/gui/tasks-component.c:988 ../calendar/gui/tasks-component.c:981 +msgid "Task Source Selector" +msgstr "ChÆ°a chá»n nguồn." + +#: ../calendar/gui/tasks-component.c:1171 main.c:272 main.c:359 main.c:360 +#: ../calendar/gui/tasks-component.c:1164 +msgid "New task" +msgstr "Tác vụ má»›i" + +#: ../calendar/gui/tasks-component.c:1173 +#: ../calendar/gui/tasks-component.c:1166 +msgid "Create a new task" +msgstr "Tạo tác vụ má»›i" + +#: ../calendar/gui/tasks-component.c:1179 +#: ../calendar/gui/tasks-component.c:1172 +msgid "New assigned task" +msgstr "Tác vụ đã gán má»›i" + +#: ../calendar/gui/tasks-component.c:1180 +#: ../calendar/gui/tasks-component.c:1173 +msgid "Assigne_d Task" +msgstr "Tác vụ đã _gán" + +#: ../calendar/gui/tasks-component.c:1181 +#: ../calendar/gui/tasks-component.c:1174 +msgid "Create a new assigned task" +msgstr "Tạo tác vụ đã gán má»›i" + +#: ../calendar/gui/tasks-component.c:1187 +#: ../calendar/gui/tasks-component.c:1180 +msgid "New task list" +msgstr "Danh sách tác vụ má»›i" + +#: ../calendar/gui/tasks-component.c:1188 +#: ../calendar/gui/tasks-component.c:1181 +msgid "Task l_ist" +msgstr "_Danh sách tác vụ" + +#: ../calendar/gui/tasks-component.c:1189 +#: ../calendar/gui/tasks-component.c:1182 +msgid "Create a new task list" +msgstr "Tạo danh sách tác vụ má»›i" + +#: ../calendar/gui/tasks-control.c:435 ../calendar/gui/tasks-control.c:419 +msgid "" +"This operation will permanently erase all tasks marked as completed. If you " +"continue, you will not be able to recover these tasks.\n" +"\n" +"Really erase these tasks?" +msgstr "" +"Thao tác này sẽ xoá bá» hoàn toàn má»i tác vụ được đánh dấu đã hoàn tất. Nếu " +"bạn tiếp tục, bạn sẽ không thể phục hồi những tác vụ này.\n" +"\n" +"Bạn có thật sá»± muốn xoá bá» những tác vụ này không?" + +#: ../calendar/gui/tasks-control.c:438 ../calendar/gui/tasks-control.c:422 +msgid "Do not ask me again." +msgstr "Äừng há»i tôi lần nữa." + +#: ../calendar/gui/tasks-control.c:472 ../calendar/gui/tasks-control.c:457 +msgid "Print Tasks" +msgstr "In tác vụ" + +#. The first letter of each day of the week starting with Sunday +#: ../calendar/gui/weekday-picker.c:319 +msgid "SMTWTFS" +msgstr "CHBTNSB" + +#: ../calendar/importers/icalendar-importer.c:79 +#: ../calendar/importers/icalendar-importer.c:78 +msgid "Appointments and Meetings" +msgstr "Cuá»™c hẹn và Cuá»™c há»p" + +#: ../calendar/importers/icalendar-importer.c:586 +msgid "Opening calendar" +msgstr "Äang mở lịch" + +#: ../calendar/importers/icalendar-importer.c:444 +#: ../calendar/importers/icalendar-importer.c:429 +msgid "iCalendar files (.ics)" +msgstr "Tập tin iCalendar (.ics)" + +#: ../calendar/importers/icalendar-importer.c:445 +#: ../calendar/importers/icalendar-importer.c:430 +msgid "Evolution iCalendar importer" +msgstr "Bá»™ nhập lịch iCalendar" + +#: ../calendar/importers/icalendar-importer.c:521 +#: ../calendar/importers/icalendar-importer.c:494 +msgid "Reminder!!" +msgstr "••• Bá»™ nhắc nhở •••" + +#: ../calendar/importers/icalendar-importer.c:573 +#: ../calendar/importers/icalendar-importer.c:539 +msgid "vCalendar files (.vcf)" +msgstr "Tập tin vCalendar (.vcf)" + +#: ../calendar/importers/icalendar-importer.c:574 +#: ../calendar/importers/icalendar-importer.c:540 +msgid "Evolution vCalendar importer" +msgstr "Bá»™ nhập lịch vCalendar" + +#: ../calendar/importers/icalendar-importer.c:736 +#: ../calendar/importers/icalendar-importer.c:702 +msgid "Calendar Events" +msgstr "Sá»± kiện lịch" + +#: ../calendar/importers/icalendar-importer.c:773 +#: ../calendar/importers/icalendar-importer.c:739 +msgid "Evolution Calendar intelligent importer" +msgstr "Bá»™ nhập lịch thông minh Evolution" + +#: ../calendar/zones.h:7 +msgid "Africa/Abidjan" +msgstr "Châu Phi/Abidjan" + +#: ../calendar/zones.h:8 +msgid "Africa/Accra" +msgstr "Châu Phi/Accra" + +#: ../calendar/zones.h:9 +msgid "Africa/Addis_Ababa" +msgstr "Châu Phi/Addis_Ababa" + +#: ../calendar/zones.h:10 +msgid "Africa/Algiers" +msgstr "Châu Phi/Algiers" + +#: ../calendar/zones.h:11 +msgid "Africa/Asmera" +msgstr "Châu Phi/Asmera" + +#: ../calendar/zones.h:12 +msgid "Africa/Bamako" +msgstr "Châu Phi/Bamako" + +#: ../calendar/zones.h:13 +msgid "Africa/Bangui" +msgstr "Châu Phi/Bangui" + +#: ../calendar/zones.h:14 +msgid "Africa/Banjul" +msgstr "Châu Phi/Banjul" + +#: ../calendar/zones.h:15 +msgid "Africa/Bissau" +msgstr "Châu Phi/Bissau" + +#: ../calendar/zones.h:16 +msgid "Africa/Blantyre" +msgstr "Châu Phi/Blantyre" + +#: ../calendar/zones.h:17 +msgid "Africa/Brazzaville" +msgstr "Châu Phi/Brazzaville" + +#: ../calendar/zones.h:18 +msgid "Africa/Bujumbura" +msgstr "Châu Phi/Bujumbura" + +#: ../calendar/zones.h:19 +msgid "Africa/Cairo" +msgstr "Châu Phi/Cairo" + +#: ../calendar/zones.h:20 +msgid "Africa/Casablanca" +msgstr "Châu Phi/Casablanca" + +#: ../calendar/zones.h:21 +msgid "Africa/Ceuta" +msgstr "Châu Phi/Ceuta" + +#: ../calendar/zones.h:22 +msgid "Africa/Conakry" +msgstr "Châu Phi/Conakry" + +#: ../calendar/zones.h:23 +msgid "Africa/Dakar" +msgstr "Châu Phi/Dakar" + +#: ../calendar/zones.h:24 +msgid "Africa/Dar_es_Salaam" +msgstr "Châu Phi/Dar_es_Salaam" + +#: ../calendar/zones.h:25 +msgid "Africa/Djibouti" +msgstr "Châu Phi/Djibouti" + +#: ../calendar/zones.h:26 +msgid "Africa/Douala" +msgstr "Châu Phi/Douala" + +#: ../calendar/zones.h:27 +msgid "Africa/El_Aaiun" +msgstr "Châu Phi/El_Aaiun" + +#: ../calendar/zones.h:28 +msgid "Africa/Freetown" +msgstr "Châu Phi/Freetown" + +#: ../calendar/zones.h:29 +msgid "Africa/Gaborone" +msgstr "Châu Phi/Gaborone" + +#: ../calendar/zones.h:30 +msgid "Africa/Harare" +msgstr "Châu Phi/Harare" + +#: ../calendar/zones.h:31 +msgid "Africa/Johannesburg" +msgstr "Châu Phi/Johannesburg" + +#: ../calendar/zones.h:32 +msgid "Africa/Kampala" +msgstr "Châu Phi/Kampala" + +#: ../calendar/zones.h:33 +msgid "Africa/Khartoum" +msgstr "Châu Phi/Khartoum" + +#: ../calendar/zones.h:34 +msgid "Africa/Kigali" +msgstr "Châu Phi/Kigali" + +#: ../calendar/zones.h:35 +msgid "Africa/Kinshasa" +msgstr "Châu Phi/Kinshasa" + +#: ../calendar/zones.h:36 +msgid "Africa/Lagos" +msgstr "Châu Phi/Lagos" + +#: ../calendar/zones.h:37 +msgid "Africa/Libreville" +msgstr "Châu Phi/Libreville" + +#: ../calendar/zones.h:38 +msgid "Africa/Lome" +msgstr "Châu Phi/Lome" + +#: ../calendar/zones.h:39 +msgid "Africa/Luanda" +msgstr "Châu Phi/Luanda" + +#: ../calendar/zones.h:40 +msgid "Africa/Lubumbashi" +msgstr "Châu Phi/Lubumbashi" + +#: ../calendar/zones.h:41 +msgid "Africa/Lusaka" +msgstr "Châu Phi/Lusaka" + +#: ../calendar/zones.h:42 +msgid "Africa/Malabo" +msgstr "Châu Phi/Malabo" + +#: ../calendar/zones.h:43 +msgid "Africa/Maputo" +msgstr "Châu Phi/Maputo" + +#: ../calendar/zones.h:44 +msgid "Africa/Maseru" +msgstr "Châu Phi/Maseru" + +#: ../calendar/zones.h:45 +msgid "Africa/Mbabane" +msgstr "Châu Phi/Mbabane" + +#: ../calendar/zones.h:46 +msgid "Africa/Mogadishu" +msgstr "Châu Phi/Mogadishu" + +#: ../calendar/zones.h:47 +msgid "Africa/Monrovia" +msgstr "Châu Phi/Monrovia" + +#: ../calendar/zones.h:48 +msgid "Africa/Nairobi" +msgstr "Châu Phi/Nairobi" + +#: ../calendar/zones.h:49 +msgid "Africa/Ndjamena" +msgstr "Châu Phi/Ndjamena" + +#: ../calendar/zones.h:50 +msgid "Africa/Niamey" +msgstr "Châu Phi/Niamey" + +#: ../calendar/zones.h:51 +msgid "Africa/Nouakchott" +msgstr "Châu Phi/Nouakchott" + +#: ../calendar/zones.h:52 +msgid "Africa/Ouagadougou" +msgstr "Châu Phi/Ouagadougou" + +#: ../calendar/zones.h:53 +msgid "Africa/Porto-Novo" +msgstr "Châu Phi/Porto-Novo" + +#: ../calendar/zones.h:54 +msgid "Africa/Sao_Tome" +msgstr "Châu Phi/Sao_Tome" + +#: ../calendar/zones.h:55 +msgid "Africa/Timbuktu" +msgstr "Châu Phi/Timbuktu" + +#: ../calendar/zones.h:56 +msgid "Africa/Tripoli" +msgstr "Châu Phi/Tripoli" + +#: ../calendar/zones.h:57 +msgid "Africa/Tunis" +msgstr "Châu Phi/Tunis" + +#: ../calendar/zones.h:58 +msgid "Africa/Windhoek" +msgstr "Châu Phi/Windhoek" + +#: ../calendar/zones.h:59 +msgid "America/Adak" +msgstr "Châu Mỹ/Adak" + +#: ../calendar/zones.h:60 +msgid "America/Anchorage" +msgstr "Châu Mỹ/Anchorage" + +#: ../calendar/zones.h:61 +msgid "America/Anguilla" +msgstr "Châu Mỹ/Anguilla" + +#: ../calendar/zones.h:62 +msgid "America/Antigua" +msgstr "Châu Mỹ/Antigua" + +#: ../calendar/zones.h:63 +msgid "America/Araguaina" +msgstr "Châu Mỹ/Araguaina" + +#: ../calendar/zones.h:64 +msgid "America/Aruba" +msgstr "Châu Mỹ/Aruba" + +#: ../calendar/zones.h:65 +msgid "America/Asuncion" +msgstr "Châu Mỹ/Asuncion" + +#: ../calendar/zones.h:66 +msgid "America/Barbados" +msgstr "Châu Mỹ/Barbados" + +#: ../calendar/zones.h:67 +msgid "America/Belem" +msgstr "Châu Mỹ/Belem" + +#: ../calendar/zones.h:68 +msgid "America/Belize" +msgstr "Châu Mỹ/Belize" + +#: ../calendar/zones.h:69 +msgid "America/Boa_Vista" +msgstr "Châu Mỹ/Boa_Vista" + +#: ../calendar/zones.h:70 +msgid "America/Bogota" +msgstr "Châu Mỹ/Bogota" + +#: ../calendar/zones.h:71 +msgid "America/Boise" +msgstr "Châu Mỹ/Boise" + +#: ../calendar/zones.h:72 +msgid "America/Buenos_Aires" +msgstr "Châu Mỹ/Buenos_Aires" + +#: ../calendar/zones.h:73 +msgid "America/Cambridge_Bay" +msgstr "Châu Mỹ/Cambridge_Bay" + +#: ../calendar/zones.h:74 +msgid "America/Cancun" +msgstr "Châu Mỹ/Cancun" + +#: ../calendar/zones.h:75 +msgid "America/Caracas" +msgstr "Châu Mỹ/Caracas" + +#: ../calendar/zones.h:76 +msgid "America/Catamarca" +msgstr "Châu Mỹ/Catamarca" + +#: ../calendar/zones.h:77 +msgid "America/Cayenne" +msgstr "Châu Mỹ/Cayenne" + +#: ../calendar/zones.h:78 +msgid "America/Cayman" +msgstr "Châu Mỹ/Cayman" + +#: ../calendar/zones.h:79 +msgid "America/Chicago" +msgstr "Châu Mỹ/Chicago" + +#: ../calendar/zones.h:80 +msgid "America/Chihuahua" +msgstr "Châu Mỹ/Chihuahua" + +#: ../calendar/zones.h:81 +msgid "America/Cordoba" +msgstr "Châu Mỹ/Cordoba" + +#: ../calendar/zones.h:82 +msgid "America/Costa_Rica" +msgstr "Châu Mỹ/Costa_Rica" + +#: ../calendar/zones.h:83 +msgid "America/Cuiaba" +msgstr "Châu Mỹ/Cuiaba" + +#: ../calendar/zones.h:84 +msgid "America/Curacao" +msgstr "Châu Mỹ/Curacao" + +#: ../calendar/zones.h:85 +msgid "America/Danmarkshavn" +msgstr "Châu Mỹ/Danmarkshavn" + +#: ../calendar/zones.h:86 +msgid "America/Dawson" +msgstr "Châu Mỹ/Dawson" + +#: ../calendar/zones.h:87 +msgid "America/Dawson_Creek" +msgstr "Châu Mỹ/Dawson_Creek" + +#: ../calendar/zones.h:88 +msgid "America/Denver" +msgstr "Châu Mỹ/Denver" + +#: ../calendar/zones.h:89 +msgid "America/Detroit" +msgstr "Châu Mỹ/Detroit" + +#: ../calendar/zones.h:90 +msgid "America/Dominica" +msgstr "Châu Mỹ/Dominica" + +#: ../calendar/zones.h:91 +msgid "America/Edmonton" +msgstr "Châu Mỹ/Edmonton" + +#: ../calendar/zones.h:92 +msgid "America/Eirunepe" +msgstr "Châu Mỹ/Eirunepe" + +#: ../calendar/zones.h:93 +msgid "America/El_Salvador" +msgstr "Châu Mỹ/El_Salvador" + +#: ../calendar/zones.h:94 +msgid "America/Fortaleza" +msgstr "Châu Mỹ/Fortaleza" + +#: ../calendar/zones.h:95 +msgid "America/Glace_Bay" +msgstr "Châu Mỹ/Glace_Bay" + +#: ../calendar/zones.h:96 +msgid "America/Godthab" +msgstr "Châu Mỹ/Godthab" + +#: ../calendar/zones.h:97 +msgid "America/Goose_Bay" +msgstr "Châu Mỹ/Goose_Bay" + +#: ../calendar/zones.h:98 +msgid "America/Grand_Turk" +msgstr "Châu Mỹ/Grand_Turk" + +#: ../calendar/zones.h:99 +msgid "America/Grenada" +msgstr "Châu Mỹ/Grenada" + +#: ../calendar/zones.h:100 +msgid "America/Guadeloupe" +msgstr "Châu Mỹ/Guadeloupe" + +#: ../calendar/zones.h:101 +msgid "America/Guatemala" +msgstr "Châu Mỹ/Guatemala" + +#: ../calendar/zones.h:102 +msgid "America/Guayaquil" +msgstr "Châu Mỹ/Guayaquil" + +#: ../calendar/zones.h:103 +msgid "America/Guyana" +msgstr "Châu Mỹ/Guyana" + +#: ../calendar/zones.h:104 +msgid "America/Halifax" +msgstr "Châu Mỹ/Halifax" + +#: ../calendar/zones.h:105 +msgid "America/Havana" +msgstr "Châu Mỹ/Havana" + +#: ../calendar/zones.h:106 +msgid "America/Hermosillo" +msgstr "Châu Mỹ/Hermosillo" + +#: ../calendar/zones.h:107 +msgid "America/Indiana/Indianapolis" +msgstr "Châu Mỹ/Indiana/Indianapolis" + +#: ../calendar/zones.h:108 +msgid "America/Indiana/Knox" +msgstr "Châu Mỹ/Indiana/Knox" + +#: ../calendar/zones.h:109 +msgid "America/Indiana/Marengo" +msgstr "Châu Mỹ/Indiana/Marengo" + +#: ../calendar/zones.h:110 +msgid "America/Indiana/Vevay" +msgstr "Châu Mỹ/Indiana/Vevay" + +#: ../calendar/zones.h:111 +msgid "America/Indianapolis" +msgstr "Châu Mỹ/Indianapolis" + +#: ../calendar/zones.h:112 +msgid "America/Inuvik" +msgstr "Châu Mỹ/Inuvik" + +#: ../calendar/zones.h:113 +msgid "America/Iqaluit" +msgstr "Châu Mỹ/Iqaluit" + +#: ../calendar/zones.h:114 +msgid "America/Jamaica" +msgstr "Châu Mỹ/Cha-mê-ca" + +#: ../calendar/zones.h:115 +msgid "America/Jujuy" +msgstr "Châu Mỹ/Jujuy" + +#: ../calendar/zones.h:116 +msgid "America/Juneau" +msgstr "Châu Mỹ/Juneau" + +#: ../calendar/zones.h:117 +msgid "America/Kentucky/Louisville" +msgstr "Châu Mỹ/Kentucky/Louisville" + +#: ../calendar/zones.h:118 +msgid "America/Kentucky/Monticello" +msgstr "Châu Mỹ/Kentucky/Monticello" + +#: ../calendar/zones.h:119 +msgid "America/La_Paz" +msgstr "Châu Mỹ/La_Paz" + +#: ../calendar/zones.h:120 +msgid "America/Lima" +msgstr "Châu Mỹ/Li-ma" + +#: ../calendar/zones.h:121 +msgid "America/Los_Angeles" +msgstr "Châu Mỹ/Los_Angeles" + +#: ../calendar/zones.h:122 +msgid "America/Louisville" +msgstr "Châu Mỹ/Louisville" + +#: ../calendar/zones.h:123 +msgid "America/Maceio" +msgstr "Châu Mỹ/Maceio" + +#: ../calendar/zones.h:124 +msgid "America/Managua" +msgstr "Châu Mỹ/Managua" + +#: ../calendar/zones.h:125 +msgid "America/Manaus" +msgstr "Châu Mỹ/Manaus" + +#: ../calendar/zones.h:126 +msgid "America/Martinique" +msgstr "Châu Mỹ/Martinique" + +#: ../calendar/zones.h:127 +msgid "America/Mazatlan" +msgstr "Châu Mỹ/Mazatlan" + +#: ../calendar/zones.h:128 +msgid "America/Mendoza" +msgstr "Châu Mỹ/Mendoza" + +#: ../calendar/zones.h:129 +msgid "America/Menominee" +msgstr "Châu Mỹ/Menominee" + +#: ../calendar/zones.h:130 +msgid "America/Merida" +msgstr "Châu Mỹ/Merida" + +#: ../calendar/zones.h:131 +msgid "America/Mexico_City" +msgstr "Châu Mỹ/TP_Mexico" + +#: ../calendar/zones.h:132 +msgid "America/Miquelon" +msgstr "Châu Mỹ/Miquelon" + +#: ../calendar/zones.h:133 +msgid "America/Monterrey" +msgstr "Châu Mỹ/Monterrey" + +#: ../calendar/zones.h:134 +msgid "America/Montevideo" +msgstr "Châu Mỹ/Montevideo" + +#: ../calendar/zones.h:135 +msgid "America/Montreal" +msgstr "Châu Mỹ/Montréal" + +#: ../calendar/zones.h:136 +msgid "America/Montserrat" +msgstr "Châu Mỹ/Montserrat" + +#: ../calendar/zones.h:137 +msgid "America/Nassau" +msgstr "Châu Mỹ/Nassau" + +#: ../calendar/zones.h:138 +#: ../widgets/e-timezone-dialog/e-timezone-dialog.glade.h:4 +msgid "America/New_York" +msgstr "Châu Mỹ/New_York" + +#: ../calendar/zones.h:139 +msgid "America/Nipigon" +msgstr "Châu Mỹ/Nipigon" + +#: ../calendar/zones.h:140 +msgid "America/Nome" +msgstr "Châu Mỹ/Nome" + +#: ../calendar/zones.h:141 +msgid "America/Noronha" +msgstr "Châu Mỹ/Noronha" + +#: ../calendar/zones.h:142 +msgid "America/North_Dakota/Center" +msgstr "Châu Mỹ/North_Dakota/Center" + +#: ../calendar/zones.h:143 +msgid "America/Panama" +msgstr "Châu Mỹ/Panama" + +#: ../calendar/zones.h:144 +msgid "America/Pangnirtung" +msgstr "Châu Mỹ/Pangnirtung" + +#: ../calendar/zones.h:145 +msgid "America/Paramaribo" +msgstr "Châu Mỹ/Paramaribo" + +#: ../calendar/zones.h:146 +msgid "America/Phoenix" +msgstr "Châu Mỹ/Phoenix" + +#: ../calendar/zones.h:147 +msgid "America/Port-au-Prince" +msgstr "Châu Mỹ/Port-au-Prince" + +#: ../calendar/zones.h:148 +msgid "America/Port_of_Spain" +msgstr "Châu Mỹ/Port_of_Spain" + +#: ../calendar/zones.h:149 +msgid "America/Porto_Velho" +msgstr "Châu Mỹ/Porto_Velho" + +#: ../calendar/zones.h:150 +msgid "America/Puerto_Rico" +msgstr "Châu Mỹ/Puerto_Rico" + +#: ../calendar/zones.h:151 +msgid "America/Rainy_River" +msgstr "Châu Mỹ/Rainy_River" + +#: ../calendar/zones.h:152 +msgid "America/Rankin_Inlet" +msgstr "Châu Mỹ/Rankin_Inlet" + +#: ../calendar/zones.h:153 +msgid "America/Recife" +msgstr "Châu Mỹ/Recife" + +#: ../calendar/zones.h:154 +msgid "America/Regina" +msgstr "Châu Mỹ/Regina" + +#: ../calendar/zones.h:155 +msgid "America/Rio_Branco" +msgstr "Châu Mỹ/Rio_Branco" + +#: ../calendar/zones.h:156 +msgid "America/Rosario" +msgstr "Châu Mỹ/Rosario" + +#: ../calendar/zones.h:157 +msgid "America/Santiago" +msgstr "Châu Mỹ/Santiago" + +#: ../calendar/zones.h:158 +msgid "America/Santo_Domingo" +msgstr "Châu Mỹ/Santo_Domingo" + +#: ../calendar/zones.h:159 +msgid "America/Sao_Paulo" +msgstr "Châu Mỹ/Sao_Paulo" + +#: ../calendar/zones.h:160 +msgid "America/Scoresbysund" +msgstr "Châu Mỹ/Scoresbysund" + +#: ../calendar/zones.h:161 +msgid "America/Shiprock" +msgstr "Châu Mỹ/Shiprock" + +#: ../calendar/zones.h:162 +msgid "America/St_Johns" +msgstr "Châu Mỹ/St_Johns" + +#: ../calendar/zones.h:163 +msgid "America/St_Kitts" +msgstr "Châu Mỹ/St_Kitts" + +#: ../calendar/zones.h:164 +msgid "America/St_Lucia" +msgstr "Châu Mỹ/St_Lucia" + +#: ../calendar/zones.h:165 +msgid "America/St_Thomas" +msgstr "Châu Mỹ/St_Thomas" + +#: ../calendar/zones.h:166 +msgid "America/St_Vincent" +msgstr "Châu Mỹ/St_Vincent" + +#: ../calendar/zones.h:167 +msgid "America/Swift_Current" +msgstr "Châu Mỹ/Swift_Current" + +#: ../calendar/zones.h:168 +msgid "America/Tegucigalpa" +msgstr "Châu Mỹ/Tegucigalpa" + +#: ../calendar/zones.h:169 +msgid "America/Thule" +msgstr "Châu Mỹ/Thule" + +#: ../calendar/zones.h:170 +msgid "America/Thunder_Bay" +msgstr "Châu Mỹ/Thunder_Bay" + +#: ../calendar/zones.h:171 +msgid "America/Tijuana" +msgstr "Châu Mỹ/Tijuana" + +#: ../calendar/zones.h:172 +msgid "America/Tortola" +msgstr "Châu Mỹ/Tortola" + +#: ../calendar/zones.h:173 +msgid "America/Vancouver" +msgstr "Châu Mỹ/Vancouver" + +#: ../calendar/zones.h:174 +msgid "America/Whitehorse" +msgstr "Châu Mỹ/Whitehorse" + +#: ../calendar/zones.h:175 +msgid "America/Winnipeg" +msgstr "Châu Mỹ/Winnipeg" + +#: ../calendar/zones.h:176 +msgid "America/Yakutat" +msgstr "Châu Mỹ/Yakutat" + +#: ../calendar/zones.h:177 +msgid "America/Yellowknife" +msgstr "Châu Mỹ/Yellowknife" + +#: ../calendar/zones.h:178 +msgid "Antarctica/Casey" +msgstr "Nam Cá»±c/Casey" + +#: ../calendar/zones.h:179 +msgid "Antarctica/Davis" +msgstr "Nam Cá»±c/Davis" + +#: ../calendar/zones.h:180 +msgid "Antarctica/DumontDUrville" +msgstr "Nam Cá»±c/DumontDUrville" + +#: ../calendar/zones.h:181 +msgid "Antarctica/Mawson" +msgstr "Nam Cá»±c/Mawson" + +#: ../calendar/zones.h:182 +msgid "Antarctica/McMurdo" +msgstr "Nam Cá»±c/McMurdo" + +#: ../calendar/zones.h:183 +msgid "Antarctica/Palmer" +msgstr "Nam Cá»±c/Palmer" + +#: ../calendar/zones.h:184 +msgid "Antarctica/South_Pole" +msgstr "Nam Cá»±c/South_Pole" + +#: ../calendar/zones.h:185 +msgid "Antarctica/Syowa" +msgstr "Nam Cá»±c/Syowa" + +#: ../calendar/zones.h:186 +msgid "Antarctica/Vostok" +msgstr "Nam Cá»±c/Vostok" + +#: ../calendar/zones.h:187 +msgid "Arctic/Longyearbyen" +msgstr "Arctic/Longyearbyen" + +#: ../calendar/zones.h:188 +msgid "Asia/Aden" +msgstr "Châu Ã/Aden" + +#: ../calendar/zones.h:189 +msgid "Asia/Almaty" +msgstr "Châu Ã/Almaty" + +#: ../calendar/zones.h:190 +msgid "Asia/Amman" +msgstr "Châu Ã/Amman" + +#: ../calendar/zones.h:191 +msgid "Asia/Anadyr" +msgstr "Châu Ã/Anadyr" + +#: ../calendar/zones.h:192 +msgid "Asia/Aqtau" +msgstr "Châu Ã/Aqtau" + +#: ../calendar/zones.h:193 +msgid "Asia/Aqtobe" +msgstr "Châu Ã/Aqtobe" + +#: ../calendar/zones.h:194 +msgid "Asia/Ashgabat" +msgstr "Châu Ã/Ashgabat" + +#: ../calendar/zones.h:195 +msgid "Asia/Baghdad" +msgstr "Châu Ã/Baghdad" + +#: ../calendar/zones.h:196 +msgid "Asia/Bahrain" +msgstr "Châu Ã/Bahrain" + +#: ../calendar/zones.h:197 +msgid "Asia/Baku" +msgstr "Châu Ã/Baku" + +#: ../calendar/zones.h:198 +msgid "Asia/Bangkok" +msgstr "Châu Ã/Bangkok" + +#: ../calendar/zones.h:199 +msgid "Asia/Beirut" +msgstr "Châu Ã/Beirut" + +#: ../calendar/zones.h:200 +msgid "Asia/Bishkek" +msgstr "Châu Ã/Bishkek" + +#: ../calendar/zones.h:201 +msgid "Asia/Brunei" +msgstr "Châu Ã/Bợ-ru-nei" + +#: ../calendar/zones.h:202 +msgid "Asia/Calcutta" +msgstr "Châu Ã/Calcutta" + +#: ../calendar/zones.h:203 +msgid "Asia/Choibalsan" +msgstr "Châu Ã/Choibalsan" + +#: ../calendar/zones.h:204 +msgid "Asia/Chongqing" +msgstr "Châu Ã/Chongqing" + +#: ../calendar/zones.h:205 +msgid "Asia/Colombo" +msgstr "Châu Ã/Colombo" + +#: ../calendar/zones.h:206 +msgid "Asia/Damascus" +msgstr "Châu Ã/Damascus" + +#: ../calendar/zones.h:207 +msgid "Asia/Dhaka" +msgstr "Châu Ã/Dhaka" + +#: ../calendar/zones.h:208 +msgid "Asia/Dili" +msgstr "Châu Ã/Dili" + +#: ../calendar/zones.h:209 +msgid "Asia/Dubai" +msgstr "Châu Ã/Dubai" + +#: ../calendar/zones.h:210 +msgid "Asia/Dushanbe" +msgstr "Châu Ã/Dushanbe" + +#: ../calendar/zones.h:211 +msgid "Asia/Gaza" +msgstr "Châu Ã/Gaza" + +#: ../calendar/zones.h:212 +msgid "Asia/Harbin" +msgstr "Châu Ã/Harbin" + +#: ../calendar/zones.h:213 +msgid "Asia/Hong_Kong" +msgstr "Châu Ã/Hồng_Kông" + +#: ../calendar/zones.h:214 +msgid "Asia/Hovd" +msgstr "Châu Ã/Hovd" + +#: ../calendar/zones.h:215 +msgid "Asia/Irkutsk" +msgstr "Châu Ã/Irkutsk" + +#: ../calendar/zones.h:216 +msgid "Asia/Istanbul" +msgstr "Châu Ã/Istanbul" + +#: ../calendar/zones.h:217 +msgid "Asia/Jakarta" +msgstr "Châu Ã/Jakarta" + +#: ../calendar/zones.h:218 +msgid "Asia/Jayapura" +msgstr "Châu Ã/Jayapura" + +#: ../calendar/zones.h:219 +msgid "Asia/Jerusalem" +msgstr "Châu Ã/Jerusalem" + +#: ../calendar/zones.h:220 +msgid "Asia/Kabul" +msgstr "Châu Ã/Kabul" + +#: ../calendar/zones.h:221 +msgid "Asia/Kamchatka" +msgstr "Châu Ã/Kamchatka" + +#: ../calendar/zones.h:222 +msgid "Asia/Karachi" +msgstr "Châu Ã/Karachi" + +#: ../calendar/zones.h:223 +msgid "Asia/Kashgar" +msgstr "Châu Ã/Kashgar" + +#: ../calendar/zones.h:224 +msgid "Asia/Katmandu" +msgstr "Châu Ã/Katmandu" + +#: ../calendar/zones.h:225 +msgid "Asia/Krasnoyarsk" +msgstr "Châu Ã/Krasnoyarsk" + +#: ../calendar/zones.h:226 +msgid "Asia/Kuala_Lumpur" +msgstr "Châu Ã/Kuala_Lumpur" + +#: ../calendar/zones.h:227 +msgid "Asia/Kuching" +msgstr "Châu Ã/Kuching" + +#: ../calendar/zones.h:228 +msgid "Asia/Kuwait" +msgstr "Châu Ã/Cu-ouait" + +#: ../calendar/zones.h:229 +msgid "Asia/Macao" +msgstr "Châu Ã/Macao" + +#: ../calendar/zones.h:230 +msgid "Asia/Macau" +msgstr "Châu Ã/Ma-cao" + +#: ../calendar/zones.h:231 +msgid "Asia/Magadan" +msgstr "Châu Ã/Magadan" + +#: ../calendar/zones.h:232 +msgid "Asia/Makassar" +msgstr "Châu Ã/Makassar" + +#: ../calendar/zones.h:233 +msgid "Asia/Manila" +msgstr "Châu Ã/Manila" + +#: ../calendar/zones.h:234 +msgid "Asia/Muscat" +msgstr "Châu Ã/Muscat" + +#: ../calendar/zones.h:235 +msgid "Asia/Nicosia" +msgstr "Châu Ã/Nicosia" + +#: ../calendar/zones.h:236 +msgid "Asia/Novosibirsk" +msgstr "Châu Ã/Novosibirsk" + +#: ../calendar/zones.h:237 +msgid "Asia/Omsk" +msgstr "Châu Ã/Omsk" + +#: ../calendar/zones.h:238 +msgid "Asia/Oral" +msgstr "Châu Ã/Oral" + +#: ../calendar/zones.h:239 +msgid "Asia/Phnom_Penh" +msgstr "Châu Ã/Phnom_Penh" + +#: ../calendar/zones.h:240 +msgid "Asia/Pontianak" +msgstr "Châu Ã/Pontianak" + +#: ../calendar/zones.h:241 +msgid "Asia/Pyongyang" +msgstr "Châu Ã/Pyongyang" + +#: ../calendar/zones.h:242 +msgid "Asia/Qatar" +msgstr "Châu Ã/Qatar" + +#: ../calendar/zones.h:243 +msgid "Asia/Qyzylorda" +msgstr "Châu Ã/Qyzylorda" + +#: ../calendar/zones.h:244 +msgid "Asia/Rangoon" +msgstr "Châu Ã/Rangoon" + +#: ../calendar/zones.h:245 +msgid "Asia/Riyadh" +msgstr "Châu Ã/Riyadh" + +#: ../calendar/zones.h:246 +msgid "Asia/Saigon" +msgstr "Châu Ã/Sài_Gòn" + +#: ../calendar/zones.h:247 +msgid "Asia/Sakhalin" +msgstr "Châu Ã/Sakhalin" + +#: ../calendar/zones.h:248 +msgid "Asia/Samarkand" +msgstr "Châu Ã/Samarkand" + +#: ../calendar/zones.h:249 +msgid "Asia/Seoul" +msgstr "Châu Ã/Seoul" + +#: ../calendar/zones.h:250 +msgid "Asia/Shanghai" +msgstr "Châu Ã/Shanghai" + +#: ../calendar/zones.h:251 +msgid "Asia/Singapore" +msgstr "Châu Ã/Xing-a-poa" + +#: ../calendar/zones.h:252 +msgid "Asia/Taipei" +msgstr "Châu Ã/Tai-pei" + +#: ../calendar/zones.h:253 +msgid "Asia/Tashkent" +msgstr "Châu Ã/Tashkent" + +#: ../calendar/zones.h:254 +msgid "Asia/Tbilisi" +msgstr "Châu Ã/Tbilisi" + +#: ../calendar/zones.h:255 +msgid "Asia/Tehran" +msgstr "Châu Ã/Tehran" + +#: ../calendar/zones.h:256 +msgid "Asia/Thimphu" +msgstr "Châu Ã/Thimphu" + +#: ../calendar/zones.h:257 +msgid "Asia/Tokyo" +msgstr "Châu Ã/Tokyo" + +#: ../calendar/zones.h:258 +msgid "Asia/Ujung_Pandang" +msgstr "Châu Ã/Ujung_Pandang" + +#: ../calendar/zones.h:259 +msgid "Asia/Ulaanbaatar" +msgstr "Châu Ã/Ulaanbaatar" + +#: ../calendar/zones.h:260 +msgid "Asia/Urumqi" +msgstr "Châu Ã/Urumqi" + +#: ../calendar/zones.h:261 +msgid "Asia/Vientiane" +msgstr "Châu Ã/Vientiane" + +#: ../calendar/zones.h:262 +msgid "Asia/Vladivostok" +msgstr "Châu Ã/Vladivostok" + +#: ../calendar/zones.h:263 +msgid "Asia/Yakutsk" +msgstr "Châu Ã/Yakutsk" + +#: ../calendar/zones.h:264 +msgid "Asia/Yekaterinburg" +msgstr "Châu Ã/Yekaterinburg" + +#: ../calendar/zones.h:265 +msgid "Asia/Yerevan" +msgstr "Châu Ã/Yerevan" + +#: ../calendar/zones.h:266 +msgid "Atlantic/Azores" +msgstr "Äại Tây DÆ°Æ¡ng/Azores" + +#: ../calendar/zones.h:267 +msgid "Atlantic/Bermuda" +msgstr "Äại Tây DÆ°Æ¡ng/Bermuda" + +#: ../calendar/zones.h:268 +msgid "Atlantic/Canary" +msgstr "Äại Tây DÆ°Æ¡ng/Canary" + +#: ../calendar/zones.h:269 +msgid "Atlantic/Cape_Verde" +msgstr "Äại Tây DÆ°Æ¡ng/Cape_Verde" + +#: ../calendar/zones.h:270 +msgid "Atlantic/Faeroe" +msgstr "Äại Tây DÆ°Æ¡ng/Faeroe" + +#: ../calendar/zones.h:271 +msgid "Atlantic/Jan_Mayen" +msgstr "Äại Tây DÆ°Æ¡ng/Jan_Mayen" + +#: ../calendar/zones.h:272 +msgid "Atlantic/Madeira" +msgstr "Äại Tây DÆ°Æ¡ng/Madeira" + +#: ../calendar/zones.h:273 +msgid "Atlantic/Reykjavik" +msgstr "Äại Tây DÆ°Æ¡ng/Reykjavik" + +#: ../calendar/zones.h:274 +msgid "Atlantic/South_Georgia" +msgstr "Äại Tây DÆ°Æ¡ng/South_Georgia" + +#: ../calendar/zones.h:275 +msgid "Atlantic/St_Helena" +msgstr "Äại Tây DÆ°Æ¡ng/St_Helena" + +#: ../calendar/zones.h:276 +msgid "Atlantic/Stanley" +msgstr "Äại Tây DÆ°Æ¡ng/Stanley" + +#: ../calendar/zones.h:277 +msgid "Australia/Adelaide" +msgstr "Châu Úc/Adelaide" + +#: ../calendar/zones.h:278 +msgid "Australia/Brisbane" +msgstr "Châu Úc/Brisbane" + +#: ../calendar/zones.h:279 +msgid "Australia/Broken_Hill" +msgstr "Châu Úc/Broken_Hill" + +#: ../calendar/zones.h:280 +msgid "Australia/Darwin" +msgstr "Châu Úc/Darwin" + +#: ../calendar/zones.h:281 +msgid "Australia/Hobart" +msgstr "Châu Úc/Hobart" + +#: ../calendar/zones.h:282 +msgid "Australia/Lindeman" +msgstr "Châu Úc/Lindeman" + +#: ../calendar/zones.h:283 +msgid "Australia/Lord_Howe" +msgstr "Châu Úc/Lord_Howe" + +#: ../calendar/zones.h:284 +msgid "Australia/Melbourne" +msgstr "Châu Úc/Melbourne" + +#: ../calendar/zones.h:285 +msgid "Australia/Perth" +msgstr "Châu Úc/Perth" + +#: ../calendar/zones.h:286 +msgid "Australia/Sydney" +msgstr "Châu Úc/Sydney" + +#: ../calendar/zones.h:287 +msgid "Europe/Amsterdam" +msgstr "Châu Âu/Amsterdam" + +#: ../calendar/zones.h:288 +msgid "Europe/Andorra" +msgstr "Châu Âu/Andorra" + +#: ../calendar/zones.h:289 +msgid "Europe/Athens" +msgstr "Châu Âu/Athens" + +#: ../calendar/zones.h:290 +msgid "Europe/Belfast" +msgstr "Châu Âu/Belfast" + +#: ../calendar/zones.h:291 +msgid "Europe/Belgrade" +msgstr "Châu Âu/Belgrade" + +#: ../calendar/zones.h:292 +msgid "Europe/Berlin" +msgstr "Châu Âu/Berlin" + +#: ../calendar/zones.h:293 +msgid "Europe/Bratislava" +msgstr "Châu Âu/Bratislava" + +#: ../calendar/zones.h:294 +msgid "Europe/Brussels" +msgstr "Châu Âu/Brussels" + +#: ../calendar/zones.h:295 +msgid "Europe/Bucharest" +msgstr "Châu Âu/Bucharest" + +#: ../calendar/zones.h:296 +msgid "Europe/Budapest" +msgstr "Châu Âu/Budapest" + +#: ../calendar/zones.h:297 +msgid "Europe/Chisinau" +msgstr "Châu Âu/Chisinau" + +#: ../calendar/zones.h:298 +msgid "Europe/Copenhagen" +msgstr "Châu Âu/Copenhagen" + +#: ../calendar/zones.h:299 +msgid "Europe/Dublin" +msgstr "Châu Âu/Dublin" + +#: ../calendar/zones.h:300 +msgid "Europe/Gibraltar" +msgstr "Châu Âu/Gibraltar" + +#: ../calendar/zones.h:301 +msgid "Europe/Helsinki" +msgstr "Châu Âu/Helsinki" + +#: ../calendar/zones.h:302 +msgid "Europe/Istanbul" +msgstr "Châu Âu/Istanbul" + +#: ../calendar/zones.h:303 +msgid "Europe/Kaliningrad" +msgstr "Châu Âu/Kaliningrad" + +#: ../calendar/zones.h:304 +msgid "Europe/Kiev" +msgstr "Châu Âu/Kiev" + +#: ../calendar/zones.h:305 +msgid "Europe/Lisbon" +msgstr "Châu Âu/Lisbon" + +#: ../calendar/zones.h:306 +msgid "Europe/Ljubljana" +msgstr "Châu Âu/Ljubljana" + +#: ../calendar/zones.h:307 +msgid "Europe/London" +msgstr "Châu Âu/London" + +#: ../calendar/zones.h:308 +msgid "Europe/Luxembourg" +msgstr "Châu Âu/Luxembourg" + +#: ../calendar/zones.h:309 +msgid "Europe/Madrid" +msgstr "Châu Âu/Madrid" + +#: ../calendar/zones.h:310 +msgid "Europe/Malta" +msgstr "Châu Âu/Moa-ta" + +#: ../calendar/zones.h:311 +msgid "Europe/Minsk" +msgstr "Châu Âu/Minsk" + +#: ../calendar/zones.h:312 +msgid "Europe/Monaco" +msgstr "Châu Âu/Monaco" + +#: ../calendar/zones.h:313 +msgid "Europe/Moscow" +msgstr "Châu Âu/Moscow" + +#: ../calendar/zones.h:314 +msgid "Europe/Nicosia" +msgstr "Châu Âu/Nicosia" + +#: ../calendar/zones.h:315 +msgid "Europe/Oslo" +msgstr "Châu Âu/Oslo" + +#: ../calendar/zones.h:316 +msgid "Europe/Paris" +msgstr "Châu Âu/Paris" + +#: ../calendar/zones.h:317 +msgid "Europe/Prague" +msgstr "Châu Âu/Prague" + +#: ../calendar/zones.h:318 +msgid "Europe/Riga" +msgstr "Châu Âu/Riga" + +#: ../calendar/zones.h:319 +msgid "Europe/Rome" +msgstr "Châu Âu/Rome" + +#: ../calendar/zones.h:320 +msgid "Europe/Samara" +msgstr "Châu Âu/Samara" + +#: ../calendar/zones.h:321 +msgid "Europe/San_Marino" +msgstr "Châu Âu/San_Marino" + +#: ../calendar/zones.h:322 +msgid "Europe/Sarajevo" +msgstr "Châu Âu/Sarajevo" + +#: ../calendar/zones.h:323 +msgid "Europe/Simferopol" +msgstr "Châu Âu/Simferopol" + +#: ../calendar/zones.h:324 +msgid "Europe/Skopje" +msgstr "Châu Âu/Skopje" + +#: ../calendar/zones.h:325 +msgid "Europe/Sofia" +msgstr "Châu Âu/Sofia" + +#: ../calendar/zones.h:326 +msgid "Europe/Stockholm" +msgstr "Châu Âu/Stockholm" + +#: ../calendar/zones.h:327 +msgid "Europe/Tallinn" +msgstr "Châu Âu/Tallinn" + +#: ../calendar/zones.h:328 +msgid "Europe/Tirane" +msgstr "Châu Âu/Tirane" + +#: ../calendar/zones.h:329 +msgid "Europe/Uzhgorod" +msgstr "Châu Âu/Uzhgorod" + +#: ../calendar/zones.h:330 +msgid "Europe/Vaduz" +msgstr "Châu Âu/Vaduz" + +#: ../calendar/zones.h:331 +msgid "Europe/Vatican" +msgstr "Châu Âu/Vatican" + +#: ../calendar/zones.h:332 +msgid "Europe/Vienna" +msgstr "Châu Âu/Vienna" + +#: ../calendar/zones.h:333 +msgid "Europe/Vilnius" +msgstr "Châu Âu/Vilnius" + +#: ../calendar/zones.h:334 +msgid "Europe/Warsaw" +msgstr "Châu Âu/Warsaw" + +#: ../calendar/zones.h:335 +msgid "Europe/Zagreb" +msgstr "Châu Âu/Zagreb" + +#: ../calendar/zones.h:336 +msgid "Europe/Zaporozhye" +msgstr "Châu Âu/Zaporozhye" + +#: ../calendar/zones.h:337 +msgid "Europe/Zurich" +msgstr "Châu Âu/Zurich" + +#: ../calendar/zones.h:338 +msgid "Indian/Antananarivo" +msgstr "Indian/Antananarivo" + +#: ../calendar/zones.h:339 +msgid "Indian/Chagos" +msgstr "Ấn Äá»™ DÆ°Æ¡ng/Chagos" + +#: ../calendar/zones.h:340 +msgid "Indian/Christmas" +msgstr "Ấn Äá»™ DÆ°Æ¡ng/Christmas" + +#: ../calendar/zones.h:341 +msgid "Indian/Cocos" +msgstr "Ấn Äá»™ DÆ°Æ¡ng/Cocos" + +#: ../calendar/zones.h:342 +msgid "Indian/Comoro" +msgstr "Ấn Äá»™ DÆ°Æ¡ng/Comoro" + +#: ../calendar/zones.h:343 +msgid "Indian/Kerguelen" +msgstr "Ấn Äá»™ DÆ°Æ¡ng/Kerguelen" + +#: ../calendar/zones.h:344 +msgid "Indian/Mahe" +msgstr "Ấn Äá»™ DÆ°Æ¡ng/Mahe" + +#: ../calendar/zones.h:345 +msgid "Indian/Maldives" +msgstr "Ấn Äá»™ DÆ°Æ¡ng/Maldives" + +#: ../calendar/zones.h:346 +msgid "Indian/Mauritius" +msgstr "Ấn Äá»™ DÆ°Æ¡ng/Mauritius" + +#: ../calendar/zones.h:347 +msgid "Indian/Mayotte" +msgstr "Ấn Äá»™ DÆ°Æ¡ng/Mayotte" + +#: ../calendar/zones.h:348 +msgid "Indian/Reunion" +msgstr "Ấn Äá»™ DÆ°Æ¡ng/Reunion" + +#: ../calendar/zones.h:349 +msgid "Pacific/Apia" +msgstr "Thái Bình DÆ°Æ¡ng/Apia" + +#: ../calendar/zones.h:350 +msgid "Pacific/Auckland" +msgstr "Thái Bình DÆ°Æ¡ng/Auckland" + +#: ../calendar/zones.h:351 +msgid "Pacific/Chatham" +msgstr "Thái Bình DÆ°Æ¡ng/Chatham" + +#: ../calendar/zones.h:352 +msgid "Pacific/Easter" +msgstr "Thái Bình DÆ°Æ¡ng/Easter" + +#: ../calendar/zones.h:353 +msgid "Pacific/Efate" +msgstr "Thái Bình DÆ°Æ¡ng/Efate" + +#: ../calendar/zones.h:354 +msgid "Pacific/Enderbury" +msgstr "Thái Bình DÆ°Æ¡ng/Enderbury" + +#: ../calendar/zones.h:355 +msgid "Pacific/Fakaofo" +msgstr "Thái Bình DÆ°Æ¡ng/Fakaofo" + +#: ../calendar/zones.h:356 +msgid "Pacific/Fiji" +msgstr "Thái Bình DÆ°Æ¡ng/Phi-gi" + +#: ../calendar/zones.h:357 +msgid "Pacific/Funafuti" +msgstr "Thái Bình DÆ°Æ¡ng/Funafuti" + +#: ../calendar/zones.h:358 +msgid "Pacific/Galapagos" +msgstr "Thái Bình DÆ°Æ¡ng/Ga-la-pa-gos" + +#: ../calendar/zones.h:359 +msgid "Pacific/Gambier" +msgstr "Thái Bình DÆ°Æ¡ng/Gambier" + +#: ../calendar/zones.h:360 +msgid "Pacific/Guadalcanal" +msgstr "Thái Bình DÆ°Æ¡ng/Guadalcanal" + +#: ../calendar/zones.h:361 +msgid "Pacific/Guam" +msgstr "Thái Bình DÆ°Æ¡ng/Guam" + +#: ../calendar/zones.h:362 +msgid "Pacific/Honolulu" +msgstr "Thái Bình DÆ°Æ¡ng/Honolulu" + +#: ../calendar/zones.h:363 +msgid "Pacific/Johnston" +msgstr "Thái Bình DÆ°Æ¡ng/Johnston" + +#: ../calendar/zones.h:364 +msgid "Pacific/Kiritimati" +msgstr "Thái Bình DÆ°Æ¡ng/Kiritimati" + +#: ../calendar/zones.h:365 +msgid "Pacific/Kosrae" +msgstr "Thái Bình DÆ°Æ¡ng/Kosrae" + +#: ../calendar/zones.h:366 +msgid "Pacific/Kwajalein" +msgstr "Thái Bình DÆ°Æ¡ng/Kwajalein" + +#: ../calendar/zones.h:367 +msgid "Pacific/Majuro" +msgstr "Thái Bình DÆ°Æ¡ng/Majuro" + +#: ../calendar/zones.h:368 +msgid "Pacific/Marquesas" +msgstr "Thái Bình DÆ°Æ¡ng/Marquesas" + +#: ../calendar/zones.h:369 +msgid "Pacific/Midway" +msgstr "Thái Bình DÆ°Æ¡ng/Midway" + +#: ../calendar/zones.h:370 +msgid "Pacific/Nauru" +msgstr "Thái Bình DÆ°Æ¡ng/Nauru" + +#: ../calendar/zones.h:371 +msgid "Pacific/Niue" +msgstr "Thái Bình DÆ°Æ¡ng/Niue" + +#: ../calendar/zones.h:372 +msgid "Pacific/Norfolk" +msgstr "Thái Bình DÆ°Æ¡ng/Norfolk" + +#: ../calendar/zones.h:373 +msgid "Pacific/Noumea" +msgstr "Thái Bình DÆ°Æ¡ng/Noumea" + +#: ../calendar/zones.h:374 +msgid "Pacific/Pago_Pago" +msgstr "Thái Bình DÆ°Æ¡ng/Pago_Pago" + +#: ../calendar/zones.h:375 +msgid "Pacific/Palau" +msgstr "Thái Bình DÆ°Æ¡ng/Palau" + +#: ../calendar/zones.h:376 +msgid "Pacific/Pitcairn" +msgstr "Thái Bình DÆ°Æ¡ng/Pitcairn" + +#: ../calendar/zones.h:377 +msgid "Pacific/Ponape" +msgstr "Thái Bình DÆ°Æ¡ng/Ponape" + +#: ../calendar/zones.h:378 +msgid "Pacific/Port_Moresby" +msgstr "Thái Bình DÆ°Æ¡ng/Port_Moresby" + +#: ../calendar/zones.h:379 +msgid "Pacific/Rarotonga" +msgstr "Thái Bình DÆ°Æ¡ng/Rarotonga" + +#: ../calendar/zones.h:380 +msgid "Pacific/Saipan" +msgstr "Thái Bình DÆ°Æ¡ng/Sai-pan" + +#: ../calendar/zones.h:381 +msgid "Pacific/Tahiti" +msgstr "Thái Bình DÆ°Æ¡ng/Ta-hi-ti" + +#: ../calendar/zones.h:382 +msgid "Pacific/Tarawa" +msgstr "Thái Bình DÆ°Æ¡ng/Tarawa" + +#: ../calendar/zones.h:383 +msgid "Pacific/Tongatapu" +msgstr "Thái Bình DÆ°Æ¡ng/Tongatapu" + +#: ../calendar/zones.h:384 +msgid "Pacific/Truk" +msgstr "Thái Bình DÆ°Æ¡ng/Truk" + +#: ../calendar/zones.h:385 +msgid "Pacific/Wake" +msgstr "Thái Bình DÆ°Æ¡ng/Wake" + +#: ../calendar/zones.h:386 +msgid "Pacific/Wallis" +msgstr "Thái Bình DÆ°Æ¡ng/Wallis" + +#: ../calendar/zones.h:387 +msgid "Pacific/Yap" +msgstr "Thái Bình DÆ°Æ¡ng/Yap" + +# Variable and unit: do not translate/ biến và Ä‘Æ¡n vị: đừng dịch +#: ../widgets/misc/e-attachment-bar.c:105 +#, c-format +msgid "%.0fK" +msgstr "%.0fK" + +#: ../widgets/misc/e-attachment-bar.c:108 +#, c-format +msgid "%.0fM" +msgstr "%.0fM" + +#: ../widgets/misc/e-attachment-bar.c:111 +#, c-format +msgid "%.0fG" +msgstr "%.0fG" + +#: ../widgets/misc/e-attachment-bar.c:908 +msgid "Attachment Bar" +msgstr "Thanh đính kèm" + +#: ../widgets/misc/e-attachment.c:420 ../widgets/misc/e-attachment.c:436 +#, c-format +msgid "Cannot attach file %s: %s" +msgstr "Không thể đính kèm tập tin « %s »: %s" + +#: ../widgets/misc/e-attachment.c:227 ../widgets/misc/e-attachment.c:428 +#, c-format +msgid "Cannot attach file %s: not a regular file" +msgstr "Không thể đính kèm tập tin « %s »: không phải tập tin bình thÆ°á»ng." + +#: ../composer/e-msg-composer-hdrs.c:558 ../composer/e-msg-composer-hdrs.c:559 +msgid "Posting destination" +msgstr "Äích gởi đến" + +#: ../composer/e-msg-composer-hdrs.c:559 ../composer/e-msg-composer-hdrs.c:560 +msgid "Choose folders to post the message to." +msgstr "Hãy chá»n các thÆ° mục để gởi thÆ° đó vào." + +#: ../composer/e-msg-composer-hdrs.c:593 ../composer/e-msg-composer-hdrs.c:594 +msgid "Click here for the address book" +msgstr "Nhấn vào đây để xem Sổ địa chỉ" + +#: ../composer/e-msg-composer-hdrs.c:623 ../composer/e-msg-composer-hdrs.c:624 +msgid "_Reply-To:" +msgstr "T_rả lá»i:" + +#: ../composer/e-msg-composer-hdrs.c:630 ../composer/e-msg-composer-hdrs.c:631 +msgid "Fr_om:" +msgstr "_Từ :" + +#: ../composer/e-msg-composer-hdrs.c:647 ../composer/e-msg-composer-hdrs.c:649 +msgid "Enter the recipients of the message" +msgstr "Nhập ngÆ°á»i nhận thÆ°" + +#: ../composer/e-msg-composer-hdrs.c:651 ../composer/e-msg-composer-hdrs.c:653 +msgid "Enter the addresses that will receive a carbon copy of the message" +msgstr "Chép cho: hãy nhập các địa chỉ sẽ nhận má»™t bản sao của thÆ° đó." + +#: ../composer/e-msg-composer-hdrs.c:655 ../composer/e-msg-composer-hdrs.c:657 +msgid "" +"Enter the addresses that will receive a carbon copy of the message without " +"appearing in the recipient list of the message." +msgstr "" +"Bí mật Chép cho: hãy nhập các địa chỉ sẽ nhận má»™t bản sao của thÆ° mà không " +"xuất hiện tên trong danh sách ngÆ°á»i nhận (tránh ngÆ°á»i gởi thÆ° rác ăn cấp các " +"địa chỉ đó nhé)." + +#: ../composer/e-msg-composer-hdrs.c:662 ../composer/e-msg-composer-hdrs.c:664 +msgid "_Post To:" +msgstr "_Gởi tá»›i:" + +#: ../composer/e-msg-composer-hdrs.c:667 +msgid "Click here to select folders to post to" +msgstr "Nhấn vào đây để chá»n thÆ° mục gởi đến" + +#: ../composer/e-msg-composer-hdrs.c:673 ../composer/e-msg-composer-hdrs.c:675 +msgid "Post To:" +msgstr "Gởi tá»›i:" + +#: ../composer/e-msg-composer-select-file.c:82 +#: ../composer/e-msg-composer-select-file.c:81 +msgid "A_ttach" +msgstr "Äính _kèm" + +#: ../composer/e-msg-composer-select-file.c:239 +msgid "Insert Attachment" +msgstr "Chèn đính kèm" + +#: ../composer/e-msg-composer.c:830 ../composer/e-msg-composer.c:738 +msgid "" +"Cannot sign outgoing message: No signing certificate set for this account" +msgstr "" +"Không thể ký tên thÆ° gởi Ä‘i: chÆ°a lập chứng nhận chữ ký cho tài khoản này." + +#: ../composer/e-msg-composer.c:837 ../composer/e-msg-composer.c:745 +msgid "" +"Cannot encrypt outgoing message: No encryption certificate set for this " +"account" +msgstr "" +"Không thể mật mã hóa thÆ° gởi Ä‘i: chÆ°a lập chứng nhận mật mã cho tài khoản " +"này." + +#: ../plug-ins/common/svg.c:315 ../plug-ins/common/svg.c:717 +msgid "Unknown reason" +msgstr "Không biết sao" + +#: ../composer/e-msg-composer.c:1392 ../composer/e-msg-composer.c:1293 +#: ../gmedia_slice/callbacks.c:739 +#, c-format +msgid "Could not open file" +msgstr "Không thể mở tập tin" + +#: ../composer/e-msg-composer.c:1400 ../composer/e-msg-composer.c:1301 +msgid "Unable to retrieve message from editor" +msgstr "Không nhận được thÆ° từ trình biên soạn." + +#: ../composer/e-msg-composer.c:1678 ../composer/e-msg-composer.c:1571 +msgid "Untitled Message" +msgstr "ThÆ° chÆ°a tên" + +#: ../glade/gnome/gnomeapp.c:172 ../plug-ins/common/spheredesigner.c:2195 +#: ../glade/gnome/gnomeapp.c:173 ../src/gtkfunc.c:1203 ../scripts/test.c:278 +#: ../glade/pyblio.glade.in.h:9 +msgid "Open File" +msgstr "Mở tập tin" + +#: ../mail/em-account-editor.c:633 ../mail/em-account-editor.c:700 +msgid "Autogenerated" +msgstr "Tá»± Ä‘á»™ng phát sinh" + +#: ../composer/e-msg-composer.c:2243 ../composer/e-msg-composer.c:2104 +msgid "Si_gnature:" +msgstr "Chữ _ký:" + +#: ../composer/e-msg-composer.c:3525 ../composer/e-msg-composer.c:3526 +msgid "Compose a message" +msgstr "Biên soạn thÆ°" + +#: ../composer/e-msg-composer.c:3819 ../composer/e-msg-composer.c:3641 +msgid "_Attachment Bar" +msgstr "Thanh đính _kèm" + +#: ../composer/e-msg-composer.c:4911 ../composer/e-msg-composer.c:4716 +msgid "" +"(The composer contains a non-text message body, which cannot be edited.)" +"" +msgstr "" +"(Bá»™ soạn thảo chứa phần thân thÆ° phi văn bản nên không thể hiệu chỉnh nó)" +"" + +#: ../composer/mail-composer.error.xml.h:1 +msgid "" +" There are few attachments getting downloaded. Sending the mail will cause " +"the mail to be sent without those pending attachments " +msgstr "" +"Hiện thá»i Ä‘ang tải má»™t số đính kèm vá». Gởi thÆ° này sẽ gởi nó không có những " +"đính kèm treo." + +#: ../composer/mail-composer.error.xml.h:2 +msgid "All accounts have been removed." +msgstr "Má»i tài khoản đã được gỡ bá»." + +#: ../composer/mail-composer.error.xml.h:3 +msgid "" +"Are you sure you want to discard the message, titled '{0}', you are " +"composing?" +msgstr "Bạn có chắc muốn xoá bá» thÆ° tên « {0} » mà bạn Ä‘ang soạn không?" + +#: ../composer/mail-composer.error.xml.h:4 +msgid "Because "{0}", you may need to select different mail options." +msgstr "Vì « {0} », có lẽ bạn cần chá»n má»™t số tùy chá»n thÆ° khác." + +#: ../composer/mail-composer.error.xml.h:5 ../e-util/e-system.error.xml.h:1 +#: ../mail/mail.error.xml.h:18 ../mail/mail.error.xml.h:17 +msgid "Because "{1}"." +msgstr "Vì « {1}»." + +#: ../composer/mail-composer.error.xml.h:6 +msgid "" +"Closing this composer window will discard the message permanently, unless " +"you choose to save the message in your Drafts folder. This will allow you to " +"continue the message at a later date." +msgstr "" +"Äóng cá»­a sổ soạn này thì sẽ xoá bá» thÆ° đó hoàn toàn, trừ bạn chá»n lÆ°u thÆ° đó " +"vào thÆ° mục Nháp. Làm nhÆ° thế sẽ cho phép bạn tiếp tục thÆ° đó lần sau." + +#: ../composer/mail-composer.error.xml.h:7 +msgid "Could not create composer window." +msgstr "Không thể tạo cá»­a sổ soạn." + +#: ../composer/mail-composer.error.xml.h:8 +msgid "Could not create message." +msgstr "Không thể tạo thÆ°." + +#: ../composer/mail-composer.error.xml.h:9 +msgid "Could not read signature file "{0}"." +msgstr "Không thể Ä‘á»c tập tin chữ ký « {0} »." + +#: ../composer/mail-composer.error.xml.h:10 +msgid "Could not retrieve messages to attach from {0}." +msgstr "Không thể gá»i thÆ° để đính kèm từ {0}." + +#: ../composer/mail-composer.error.xml.h:11 +msgid "Could not save to autosave file "{0}"." +msgstr "Không thể lÆ°u vào tập tin lÆ°u tá»± Ä‘á»™ng « {0}»." + +#: ../composer/mail-composer.error.xml.h:12 +msgid "Directories can not be attached to Messages." +msgstr "Không thể đính thÆ° mục kèm thÆ°." + +#: ../composer/mail-composer.error.xml.h:13 +msgid "Do you want to recover unfinished messages?" +msgstr "Bạn có muốn phục hồi các thÆ° chÆ°a hoàn tất không?" + +#: ../composer/mail-composer.error.xml.h:14 +msgid "Don't Recover" +msgstr "Không phục hồi" + +#: ../composer/mail-composer.error.xml.h:15 +msgid "Download in progress. Do you want to send the mail?" +msgstr "Äang tải vá». Bạn còn muốn gởi thÆ° sao?" + +#: ../composer/mail-composer.error.xml.h:16 +msgid "Error saving to autosave because "{1}"." +msgstr "Gặp lá»—i khi lÆ°u vào tập tin lÆ°u tá»± Ä‘á»™ng vì « {1} »." + +#: ../composer/mail-composer.error.xml.h:17 +msgid "" +"Evolution quit unexpectedly while you were composing a new message. " +"Recovering the message will allow you to continue where you left off." +msgstr "" +"Trình Evolution đã thoát bất ngá» trong khi bạn soạn má»™t thÆ° má»›i. Phục hồi " +"thÆ° đó thì sẽ cho phép bạn tiếp tục từ chá»— đó." + +#: ../composer/mail-composer.error.xml.h:18 +msgid "Recover" +msgstr "Phục hồi" + +#: ../composer/mail-composer.error.xml.h:19 +msgid "The file `{0}' is not a regular file and cannot be sent in a message." +msgstr "" +"Tập tin « {0} » không phải là tập tin chuẩn nên không thể gởi nó trong thÆ°." + +#: ../composer/mail-composer.error.xml.h:20 +msgid "" +"To attach the contents of this directory, either attach the files in this " +"directory individually, or create an archive of the directory and attach it." +msgstr "" +"Äể đính kèm ná»™i dung thÆ° mục này thì bạn hãy hoặc đính kèm má»—i tập tin trong " +"nó từng cái má»™t, hoặc tạo má»™t kho của toàn bá»™ thÆ° mục và đính kèm kho đó." + +#: ../composer/mail-composer.error.xml.h:21 +msgid "" +"Unable to activate the HTML editor control.\n" +"\n" +"Please make sure that you have the correct version of gtkhtml and libgtkhtml " +"installed." +msgstr "" +"Không thể kích hoạt Ä‘iá»u khiển bá»™ biên soạn HTML.\n" +"\n" +"Vui lòng kiểm tra xem GtkHTML và libGtkHTML có được cài đặt đúng phiên bản " +"chÆ°a." + +#: ../composer/mail-composer.error.xml.h:24 +msgid "Unable to activate the address selector control." +msgstr "Không thể kích hoạt Ä‘iá»u khiển bá»™ chá»n địa chỉ." + +#: ../composer/mail-composer.error.xml.h:25 +msgid "Unfinished messages found" +msgstr "Tìm thấy thÆ° chÆ°a hoàn tất" + +#: ../composer/mail-composer.error.xml.h:26 +msgid "Warning: Modified Message" +msgstr "Cảnh báo : thÆ° được sá»­a đổi." + +#: ../composer/mail-composer.error.xml.h:27 +msgid "You cannot attach the file `{0}' to this message." +msgstr "Không thể đính kèm tập tin « {0} » vào thÆ° này." + +#: ../composer/mail-composer.error.xml.h:28 +msgid "You need to configure an account before you can compose mail." +msgstr "Bạn cần cấu hình má»™t tài khoản nào đó trÆ°á»›c khi có thể biên soạn thÆ°." + +#: ../composer/mail-composer.error.xml.h:30 +msgid "_Save Message" +msgstr "_LÆ°u thÆ°" + +# Name: do not translate/ tên: đừng dịch +#: ../shell/main.c:514 ../shell/main.c:509 +msgid "Evolution" +msgstr "Evolution" + +#: ../data/evolution.desktop.in.in.h:2 +msgid "The Evolution Groupware Suite" +msgstr "Bá»™ phần má»m nhóm Evolution" + +#: ../data/evolution.keys.in.in.h:1 +msgid "address card" +msgstr "thẻ địa chỉ" + +#: ../data/evolution.keys.in.in.h:2 +msgid "calendar information" +msgstr "thông tin lịch" + +#: ../designs/OOA/ooa.glade.h:1 ../storage/exchange-oof.glade.h:1 +msgid "" +"Currently, your status is \"Out of the Office\". \n" +"\n" +"Would you like to change your status to \"In the Office\"? " +msgstr "" +"Hiện thá»i, trạng thái của bạn là « Ngoài văn phòng ». \n" +"\n" +"Bạn có muốn thay đổi trạng thái thành « Trong văn phòng » không? " + +#: ../designs/OOA/ooa.glade.h:4 ../storage/exchange-oof.glade.h:4 +msgid "Out of Office Message:" +msgstr "Thông Ä‘iệp Ngoài Văn Phòng:" + +#: ../designs/OOA/ooa.glade.h:5 ../storage/exchange-oof.glade.h:5 +#: ../data/UpdateManager.glade.h:2 ui/galeon.glade.h:6 +msgid "Status:" +msgstr "Trạng thái:" + +#: ../designs/OOA/ooa.glade.h:6 ../storage/exchange-oof.glade.h:6 +msgid "" +"The message specified below will be automatically sent to each person " +"who sends\n" +"mail to you while you are out of the office." +msgstr "" +"Thông Ä‘iệp dÆ°á»›i đây sẽ được tá»± Ä‘á»™ng gởi tá»›i má»—i ngÆ°á»i gởi thÆ° cho " +"bạn\n" +"khi bạn ở ngoài văn phòng." + +#: ../designs/OOA/ooa.glade.h:8 ../storage/exchange-oof.glade.h:8 +msgid "I am currently in the office" +msgstr "Tôi hiện thá»i ở trong văn phòng" + +#: ../designs/OOA/ooa.glade.h:9 ../storage/exchange-oof.glade.h:9 +msgid "I am currently out of the office" +msgstr "Tôi hiện thá»i ở ngoài văn phòng" + +#: ../designs/OOA/ooa.glade.h:10 ../storage/exchange-oof.glade.h:10 +msgid "No, Don't Change Status" +msgstr "Không, đừng thay đổi trạng thái" + +#: ../designs/OOA/ooa.glade.h:12 ../storage/exchange-oof.glade.h:11 +msgid "Out of Office Assistant" +msgstr "Trợ tá Ngoài Văn Phòng" + +#: ../designs/OOA/ooa.glade.h:13 ../storage/exchange-oof.glade.h:12 +msgid "Yes, Change Status" +msgstr "Có, thay đổi trạng thái" + +#: ../files/sharing-properties-view.glade.h:1 +msgid " " +msgstr " " + +#: ../designs/read_receipts/read.glade.h:2 +msgid "Receiving Email" +msgstr "Nhận thÆ°" + +#: ../designs/read_receipts/read.glade.h:3 +msgid "Sending Email:" +msgstr "Gởi thÆ°" + +#: ../designs/read_receipts/read.glade.h:4 +msgid "" +"This page allows you to choose if you want to be notified via a read " +"receipt when a message you\n" +"sent is read, and to specify what Evolution should do when someone requests " +"a receipt from you." +msgstr "" +"Trang này cho phép bạn chá»n nếu muốn nhận thông báo ngÆ°á»i nhận đã Ä‘á»c " +"thÆ° của bạn, và cÅ©ng có thể lập hành Ä‘á»™ng của trình Evolution khi ngÆ°á»i khác " +"yêu cầu nhận thông báo đã Ä‘á»c từ bạn." + +#: ../designs/read_receipts/read.glade.h:6 +msgid "Always send back a read receipt" +msgstr "Luôn luôn trả gởi má»™t thông báo đã Ä‘á»c" + +#: ../designs/read_receipts/read.glade.h:7 +msgid "Ask me if I want to send back a read receipt" +msgstr "Há»i tôi nếu muốn trả gởi má»™t thông báo đã Ä‘á»c" + +#: ../designs/read_receipts/read.glade.h:8 +msgid "Never send back a read receipt" +msgstr "Không bao giá» trả gởi má»™t thông báo đã Ä‘á»c" + +#: ../designs/read_receipts/read.glade.h:9 +msgid "Read Receipts" +msgstr "Thông báo đã Ä‘á»c" + +#: ../designs/read_receipts/read.glade.h:10 +msgid "Request a read receipt for all messages I send" +msgstr "Yêu cầu má»™t thông báo đã Ä‘á»c cho má»i thÆ° tôi gởi" + +#: ../designs/read_receipts/read.glade.h:11 +msgid "Unless the message is sent to a mailing list, and not to me personally" +msgstr "Trừ khi gởi thÆ° đó cho há»™p thÆ° chung, không phải cho tôi riêng" + +#: ../designs/read_receipts/read.glade.h:12 +msgid "" +"When you receive an email with a read receipt request, what should Evolution " +"do?" +msgstr "" +"Khi bạn nhận má»™t thÆ° yêu cầu nhận thông báo đã Ä‘á»c, trình Evolution nên làm " +"gì vậy?" + +#: ../e-util/e-dialog-utils.c:281 ../e-util/e-dialog-utils.c:267 +msgid "" +"A file by that name already exists.\n" +"Overwrite it?" +msgstr "" +"Tập tin tên này đã có.\n" +"Ghi đè lên nó không?" + +#: ../e-util/e-dialog-utils.c:283 ../e-util/e-system.error.xml.h:6 +#: ../e-util/e-dialog-utils.c:269 ../src/totem-playlist.c:884 +msgid "Overwrite file?" +msgstr "Ghi đè lên tập tin không?" + +#: ../e-util/e-error.c:84 ../e-util/e-error.c:85 ../e-util/e-error.c:127 +msgid "Evolution Error" +msgstr "Lá»—i Evolution" + +#: ../e-util/e-error.c:86 ../e-util/e-error.c:87 ../e-util/e-error.c:125 +msgid "Evolution Warning" +msgstr "Cảnh báo Evolution" + +#: ../e-util/e-error.c:124 +msgid "Evolution Information" +msgstr "Thông tin Evolution" + +#: ../e-util/e-error.c:126 +msgid "Evolution Query" +msgstr "Truy vấn Evolution" + +#. setup a dummy error +#: ../e-util/e-error.c:442 ../e-util/e-error.c:438 +#, c-format +msgid "" +"Internal error, unknown error '%s' requested" +msgstr "Lá»—i ná»™i tại, lá»—i lạ « %s » được yêu cầu" + +#: ../e-util/e-system.error.xml.h:2 +msgid "Cannot open file "{0}"." +msgstr "Không thể mở tập tin « {0} »." + +#: ../e-util/e-system.error.xml.h:3 +msgid "Cannot save file "{0}"." +msgstr "Không thể lÆ°u tập tin « {0} »." + +#: ../e-util/e-system.error.xml.h:4 +msgid "Do you wish to overwrite it?" +msgstr "Bạn có muốn ghi đè lên nó không?" + +#: ../e-util/e-system.error.xml.h:5 +msgid "File exists "{0}"." +msgstr "Tập tin « {0} » đã có." + +#: ../nautilus-cd-burner.c:868 ../src/sj-extracting.c:212 +#: ../data/glade/OverwriteDialog.glade.h:2 +msgid "_Overwrite" +msgstr "_Ghi đè" + +#: ../filter/filter-datespec.c:78 +#, c-format +msgid "1 second ago" +msgid_plural "%d seconds ago" +msgstr[0] "%d giây trÆ°á»›c" + +#: ../filter/filter-datespec.c:79 +#, c-format +msgid "1 minute ago" +msgid_plural "%d minutes ago" +msgstr[0] "%d phút trÆ°á»›c" + +#: ../filter/filter-datespec.c:80 +#, c-format +msgid "1 hour ago" +msgid_plural "%d hours ago" +msgstr[0] "%d giá» trÆ°á»›c" + +#: ../filter/filter-datespec.c:81 +#, c-format +msgid "1 day ago" +msgid_plural "%d days ago" +msgstr[0] "%d ngày trÆ°á»›c" + +#: ../filter/filter-datespec.c:82 +#, c-format +msgid "1 week ago" +msgid_plural "%d weeks ago" +msgstr[0] "%d tuần trÆ°á»›c" + +#: ../filter/filter-datespec.c:83 +#, c-format +msgid "1 month ago" +msgid_plural "%d months ago" +msgstr[0] "%d tháng trÆ°á»›c" + +#: ../filter/filter-datespec.c:84 +#, c-format +msgid "1 year ago" +msgid_plural "%d years ago" +msgstr[0] "%d năm trÆ°á»›c" + +#: ../filter/filter-datespec.c:285 +msgid "" +msgstr "" + +#: ../filter/filter-datespec.c:288 ../filter/filter-datespec.c:299 +msgid "now" +msgstr "bây giá»" + +#. strftime for date filter display, only needs to show a day date (i.e. no time) +#: ../filter/filter-datespec.c:295 +msgid "%d-%b-%Y" +msgstr "%d-%b-%Y" + +#: ../filter/filter-datespec.c:415 +msgid "Select a time to compare against" +msgstr "Chá»n thá»i Ä‘iểm để đối chiếu" + +#: ../libgnomedb/handlers/plugins/gnome-db-entry-filesel.c:199 +msgid "Choose a file" +msgstr "Chá»n tập tin" + +#: dselect/pkgdisplay.cc:61 +msgid "Important" +msgstr "Quan trá»ng" + +#: ../mail/em-migrate.c:1044 ../mail/mail-config.c:79 +msgid "To Do" +msgstr "Cần làm" + +#: ../mail/mail-config.glade.h:95 +msgid "Later" +msgstr "Sau đó" + +#: sound/sound.c:329 ../glom/glom.glade.h:149 +msgid "Test" +msgstr "Thá»­ tra" + +#: ../filter/filter-rule.c:791 +msgid "_Search name:" +msgstr "Tên tìm _kiếm:" + +#: ../filter/filter-rule.c:819 +msgid "Find items that meet the following criteria" +msgstr "Tìm mục khá»›p tiêu chuẩn theo đây" + +#: ../filter/filter-rule.c:858 +msgid "If all criteria are met" +msgstr "Nếu má»i tiêu chuẩn Ä‘á»u thá»a" + +#: ../filter/filter-rule.c:858 +msgid "If any criteria are met" +msgstr "Nếu tiêu chuẩn nào thá»a" + +#: ../filter/filter-rule.c:860 +msgid "Find items:" +msgstr "Tìm mục:" + +#: ../filter/filter-rule.c:881 +msgid "All related" +msgstr "Má»i thứ liên quan" + +#: ../filter/filter-rule.c:881 +msgid "Replies" +msgstr "Trả lá»i" + +#: ../filter/filter-rule.c:881 +msgid "Replies and parents" +msgstr "Trả lá»i và mẹ" + +#: ../filter/filter-rule.c:883 +msgid "Include threads" +msgstr "Gồm các mạch" + +#: ../filter/filter.error.xml.h:1 +msgid "Bad regular expression "{0}"." +msgstr "Biểu thức chính quy sai « {0} »." + +#: ../filter/filter.error.xml.h:2 +msgid "Could not compile regular expression "{1}"." +msgstr "Không thể biên dịch biểu thức chính quy « {1} »." + +#: ../filter/filter.error.xml.h:3 +msgid "File "{0}" does not exist or is not a regular file." +msgstr "" +"Tập tin « {0} » không tồn tại hoặc không phải là má»™t tập tin bình thÆ°á»ng." + +#: ../filter/filter.error.xml.h:4 +msgid "Missing date." +msgstr "Thiếu ngày." + +#: ../filter/filter.error.xml.h:5 +msgid "Missing file name." +msgstr "Thiếu tên tập tin." + +#: ../filter/filter.error.xml.h:6 ../mail/mail.error.xml.h:67 +#: ../mail/mail.error.xml.h:65 +msgid "Missing name." +msgstr "Thiếu tên." + +#: ../filter/filter.error.xml.h:7 +msgid "Name "{0}" already used." +msgstr "Tên « {0} » đã được dùng." + +#: ../filter/filter.error.xml.h:8 +msgid "Please choose another name." +msgstr "Hãy chá»n tên khác." + +#: ../filter/filter.error.xml.h:9 +msgid "You must choose a date." +msgstr "Bạn phải chá»n ngày." + +#: ../filter/filter.error.xml.h:10 +msgid "You must name this filter." +msgstr "Bạn phải đặt tên cho bá»™ lá»c này." + +#: ../filter/filter.error.xml.h:11 prog/aspell.cpp:965 +msgid "You must specify a file name." +msgstr "Bạn phải xác định tên tập tin." + +#: ../filter/filter.glade.h:1 +msgid "_Filter Rules" +msgstr "Quy tắc bá»™ _lá»c" + +#: ../filter/filter.glade.h:2 +msgid "Compare against" +msgstr "Äối chiếu vá»›i" + +#: ../filter/filter.glade.h:4 +msgid "Show filters for mail:" +msgstr "Hiển thị bá»™ lá»c cho thÆ° :" + +#: ../filter/filter.glade.h:5 +msgid "" +"The message's date will be compared against\n" +"12:00am of the date specified." +msgstr "" +"Ngày gởi thÆ° sẽ được đối chiếu vá»›i thá»i\n" +" Ä‘iểm 12:00am tại ngày xác định." + +#: ../filter/filter.glade.h:7 +msgid "" +"The message's date will be compared against\n" +"a time relative to when filtering occurs." +msgstr "" +"Ngày gởi thÆ° sẽ được đối chiếu\n" +"vá»›i thá»i Ä‘iểm liên quan lúc lá»c." + +#: ../filter/filter.glade.h:9 +msgid "" +"The message's date will be compared against\n" +"the current time when filtering occurs." +msgstr "" +"Ngày gởi thông Ä‘iệp sẽ được đối chiếu\n" +"vá»›i thá»i Ä‘iểm hiện thá»i, khi lá»c." + +#: ../filter/filter.glade.h:12 +msgid "a time relative to the current time" +msgstr "thá»i Ä‘iểm so vá»›i hiện thá»i" + +#: ../filter/filter.glade.h:13 +msgid "ago" +msgstr "trÆ°á»›c" + +#: ../src/smart-playlist-dialog.c:164 +msgid "months" +msgstr "tháng" + +#: ../filter/filter.glade.h:19 +msgid "the current time" +msgstr "thá»i Ä‘iểm hiện thá»i" + +#: ../filter/filter.glade.h:20 +msgid "the time you specify" +msgstr "thá»i Ä‘iểm bạn chá»n" + +#: ../filter/filter.glade.h:22 event-ui.c:1028 +msgid "years" +msgstr "năm" + +#: ../filter/rule-editor.c:292 ../filter/rule-editor.c:290 +msgid "Add Rule" +msgstr "Thêm quy tắc" + +#: ../filter/rule-editor.c:368 ../filter/rule-editor.c:366 +msgid "Edit Rule" +msgstr "Sá»­a quy tắc" + +#: ../filter/rule-editor.c:735 ../filter/rule-editor.c:698 +msgid "Rule name" +msgstr "Tên quy tắc" + +#: ../mail/GNOME_Evolution_Mail.server.in.in.h:1 +msgid "Composer Preferences" +msgstr "Tùy thích bá»™ soạn thảo" + +#: ../mail/GNOME_Evolution_Mail.server.in.in.h:2 +msgid "" +"Configure mail preferences, including security and message display, here" +msgstr "" +"Cấu hình Tùy thích thÆ° tín, bao gồm tính bảo mật và cách hiển thị thÆ°, ở đây." + +#: ../mail/GNOME_Evolution_Mail.server.in.in.h:3 +msgid "Configure spell-checking, signatures, and the message composer here" +msgstr "Cấu hình kiểm tra chính tả, chữ ký, và bá»™ soạn thảo thÆ° ở đây" + +#: ../mail/GNOME_Evolution_Mail.server.in.in.h:4 +msgid "Configure your email accounts here" +msgstr "Cấu hình tài khoản thÆ° ở đây" + +#: ../mail/GNOME_Evolution_Mail.server.in.in.h:5 +msgid "Evolution Mail" +msgstr "ThÆ° tín Evolution" + +#: ../mail/GNOME_Evolution_Mail.server.in.in.h:6 +msgid "Evolution Mail accounts configuration control" +msgstr "Äiá»u khiển cấu hình tài khoản thÆ° tín Evolution." + +#: ../mail/GNOME_Evolution_Mail.server.in.in.h:7 +msgid "Evolution Mail component" +msgstr "Thành phần thÆ° tín Evolution" + +#: ../mail/GNOME_Evolution_Mail.server.in.in.h:8 +msgid "Evolution Mail composer" +msgstr "Bá»™ soạn thÆ° của Evolution" + +#: ../mail/GNOME_Evolution_Mail.server.in.in.h:9 +msgid "Evolution Mail composer configuration control" +msgstr "Äiá»u khiển cấu hình bá»™ soạn thÆ° Evolution." + +#: ../mail/GNOME_Evolution_Mail.server.in.in.h:10 +msgid "Evolution Mail preferences control" +msgstr "Äiá»u khiển sở thích thÆ° tín Evolution." + +#: ../mail/GNOME_Evolution_Mail.server.in.in.h:12 +msgid "Mail Accounts" +msgstr "Tài khoản thÆ°" + +#: ../mail/GNOME_Evolution_Mail.server.in.in.h:13 +#: ../mail/mail-config.glade.h:97 +msgid "Mail Preferences" +msgstr "Tùy thích thÆ°" + +#: ../mail/GNOME_Evolution_Mail.server.in.in.h:14 +#: ../gncal/gnomecal-prefs.c:1534 +msgid "_Mail" +msgstr "Th_Æ° tín" + +#: ../mail/em-account-editor.c:395 ../mail/em-account-editor.c:387 +#, c-format +msgid "%s License Agreement" +msgstr "Äiá»u kiện quyá»n của « %s »" + +#: ../mail/em-account-editor.c:402 ../mail/em-account-editor.c:394 +#, c-format +msgid "" +"\n" +"Please read carefully the license agreement\n" +"for %s displayed below\n" +"and tick the check box for accepting it\n" +msgstr "" +"\n" +"Vui lòng Ä‘á»c cẳn thận Ä‘iá»u kiện quyá»n\n" +"cho « %s » bên dÆ°á»›i, và đánh dấu\n" +"trong há»™p chá»n để chấp nhận các Ä‘iá»u kiện này.\n" + +#: ../src/ImportCommand.cs:27 ../libgimpwidgets/gimpfileentry.c:351 +#: ../widgets/gtk+.xml.in.h:159 +msgid "Select Folder" +msgstr "Chá»n thÆ° mục" + +#: ../mail/em-account-editor.c:768 ../mail/em-account-editor.c:760 +msgid "Ask for each message" +msgstr "Há»i cho má»—i thÆ°" + +#: ../mail/em-account-editor.c:1796 ../mail/mail-config.glade.h:116 +#: ../mail/em-account-editor.c:1760 ../mail/mail-config.glade.h:117 +msgid "Receiving Email" +msgstr "Äang nhận thÆ°" + +#: ../mail/em-account-editor.c:1979 ../mail/em-account-editor.c:1943 +msgid "Automatically check for _new mail every" +msgstr "Tá»± Ä‘á»™ng kiểm tra thÆ° _má»›i má»—i" + +#: ../mail/em-account-editor.c:2160 ../mail/mail-config.glade.h:128 +#: ../mail/em-account-editor.c:2111 +msgid "Sending Email" +msgstr "Äang gởi thÆ°" + +#: ../mail/em-account-editor.c:2219 ../mail/mail-config.glade.h:68 +#: ap-gl/ap-gl.c:63 src/ap-config.c:81 ../mail/em-account-editor.c:2162 +#: ../memprof.glade.h:16 +msgid "Defaults" +msgstr "Mặc định" + +#: ../mail/em-account-editor.c:2258 ../mail/em-account-editor.c:2332 +msgid "Receiving Options" +msgstr "Tùy chá»n nhận" + +#: ../camel/providers/imap/camel-imap-provider.c:44 +msgid "Checking for New Mail" +msgstr "Kiểm tra tìm thÆ° má»›i" + +#: ../mail/em-account-editor.c:2737 ../mail/mail-config.glade.h:34 +#: ../mail/em-account-editor.c:2657 +msgid "Account Editor" +msgstr "Bá»™ hiệu chỉnh tài khoản" + +#: ../mail/em-account-editor.c:2737 ../mail/mail-config.glade.h:80 +#: ../mail/em-account-editor.c:2657 +msgid "Evolution Account Assistant" +msgstr "Phụ tá tài khoản Evolution" + +#: ../objects/FS/function.c:968 +msgid "Disable" +msgstr "Tắt" + +#: src/dictmanagedlg.cpp:483 +msgid "Enable" +msgstr "Bật" + +#: ../src/boards/python/admin/profile_list.py:316 +msgid "[Default]" +msgstr "[Mặc định]" + +#: ../extensions/extensions-manager-ui/extensions-manager-ui.c:340 +#: ../src/glade-property.c:491 +msgid "Enabled" +msgstr "Hoạt Ä‘á»™ng" + +#: ../mail/em-account-prefs.c:511 ../mail/em-account-prefs.c:510 +msgid "Account name" +msgstr "Tên tài khoản" + +#: ../src/netstat.c:443 ../gnome-netinfo/netstat.c:403 +msgid "Protocol" +msgstr "Giao thức" + +#: ../mail/em-account-prefs.c:518 ../mail/em-account-prefs.c:517 +msgid "Mail Accounts Table" +msgstr "Bảng tài khoản thÆ°" + +#: ../plug-ins/common/gbr.c:425 ../plug-ins/common/gih.c:499 +#: ../plug-ins/common/gih.c:1158 ../plug-ins/gflare/gflare.c:2993 +msgid "Unnamed" +msgstr "Không tên" + +#: ../mail/em-composer-prefs.c:936 ../mail/em-composer-prefs.c:895 +msgid "Language(s)" +msgstr "Ngôn ngữ" + +#: ../mail/em-composer-prefs.c:979 ../mail/em-composer-prefs.c:938 +msgid "Add signature script" +msgstr "Thêm tập lệnh chữ ký" + +#: ../mail/em-composer-prefs.c:999 ../mail/em-composer-prefs.c:958 +msgid "Signature(s)" +msgstr "Chữ ký" + +#: ../mail/em-composer-utils.c:862 ../mail/em-format-quote.c:389 +#: ../mail/em-composer-utils.c:853 +msgid "-------- Forwarded Message --------" +msgstr "â”â”â”ThÆ° đã chuyển tiếpâ”â”â”" + +#: ../mail/em-composer-utils.c:1657 ../mail/em-composer-utils.c:1648 +msgid "an unknown sender" +msgstr "không biết ngÆ°á»i gởi đó" + +#. Note to translators: this is the attribution string used when quoting messages. +#. * each ${Variable} gets replaced with a value. To see a full list of available +#. * variables, see em-composer-utils.c:1514 +#: ../mail/em-composer-utils.c:1704 ../mail/em-composer-utils.c:1695 +msgid "" +"On ${AbbrevWeekdayName}, ${Year}-${Month}-${Day} at ${24Hour}:${Minute} " +"${TimeZone}, ${Sender} wrote:" +msgstr "" +"Vào ${AbbrevWeekdayName}, ngày ${Day}, tháng ${Month} năm ${Year} lúc " +"${24Hour}:${Minute} ${TimeZone}, ${Sender} viết:" + +#: ../mail/em-filter-editor.c:155 ../mail/em-filter-editor.c:147 +msgid "_Filter Rules" +msgstr "_Quy tắc bá»™ lá»c" + +#. Automatically generated. Do not edit. +#: ../mail/em-filter-i18n.h:2 +msgid "Adjust Score" +msgstr "Chỉnh Ä‘iểm" + +#: ../mail/em-filter-i18n.h:3 +msgid "Assign Color" +msgstr "Gán màu" + +#: ../mail/em-filter-i18n.h:4 +msgid "Assign Score" +msgstr "Gán Ä‘iểm" + +#: ../mail/em-filter-i18n.h:5 ../pan/filter-edit-ui.c:796 +msgid "Attachments" +msgstr "Äính kèm" + +#: ../mail/em-filter-i18n.h:6 +msgid "Beep" +msgstr "Bíp" + +#: ../mail/em-filter-i18n.h:7 ui/bookmarks.glade.h:44 +#: ../src/smart-playlist-dialog.c:150 ../pan/filter-edit-ui.c:772 +#: ../pan/filters/filter-phrase.c:238 ../pan/score-add-ui.c:136 +msgid "contains" +msgstr "chứa" + +#: ../mail/em-filter-i18n.h:8 +msgid "Copy to Folder" +msgstr "Chép vào thÆ° mục" + +#: ../mail/em-filter-i18n.h:9 +msgid "Date received" +msgstr "Ngày nhận" + +#: ../mail/em-filter-i18n.h:10 +msgid "Date sent" +msgstr "Ngày gởi" + +#: ../mail/em-filter-i18n.h:13 ../src/smart-playlist-dialog.c:151 +#: ../pan/filter-edit-ui.c:773 ../pan/filters/filter-phrase.c:247 +#: ../pan/score-add-ui.c:137 +msgid "does not contain" +msgstr "không chứa" + +#: ../mail/em-filter-i18n.h:14 ../pan/filter-edit-ui.c:779 +#: ../pan/filters/filter-phrase.c:250 ../pan/score-add-ui.c:143 +msgid "does not end with" +msgstr "không kết thúc bằng" + +#: ../mail/em-filter-i18n.h:15 +msgid "does not exist" +msgstr "không tồn tại" + +#: ../mail/em-filter-i18n.h:16 +msgid "does not return" +msgstr "không trả gởi" + +#: ../mail/em-filter-i18n.h:17 +msgid "does not sound like" +msgstr "không giống vá»›i" + +#: ../mail/em-filter-i18n.h:18 ../pan/filter-edit-ui.c:777 +#: ../pan/filters/filter-phrase.c:249 ../pan/score-add-ui.c:141 +msgid "does not start with" +msgstr "không bắt đầu bằng" + +#: ../mail/em-filter-i18n.h:19 +msgid "Do Not Exist" +msgstr "Không tồn tại" + +#: web/template/resources_edit_main.tpl:121 +msgid "Draft" +msgstr "Nháp" + +#: ../mail/em-filter-i18n.h:21 ui/bookmarks.glade.h:46 +#: ../pan/filter-edit-ui.c:778 ../pan/filters/filter-phrase.c:241 +#: ../pan/score-add-ui.c:142 +msgid "ends with" +msgstr "kết thúc bằng" + +#: ../mail/em-filter-i18n.h:22 +msgid "Exist" +msgstr "Tồn tại" + +#: ../mail/em-filter-i18n.h:23 +msgid "exists" +msgstr "tồn tại" + +#: ../mail/em-filter-i18n.h:24 ../gcalctool/calctool.c:73 +msgid "Expression" +msgstr "Biểu thức" + +#: ../mail/em-filter-i18n.h:25 +msgid "Follow Up" +msgstr "Theo dõi tiếp" + +#: ../src/smart-playlist-dialog.c:148 ../pan/filter-edit-ui.c:774 +#: ../pan/filters/filter-phrase.c:239 ../pan/score-add-ui.c:138 +msgid "is" +msgstr "là" + +#: ../mail/em-filter-i18n.h:28 +msgid "is after" +msgstr "có sau" + +#: ../mail/em-filter-i18n.h:29 +msgid "is before" +msgstr "có trÆ°á»›c" + +#: ../mail/em-filter-i18n.h:30 +msgid "is Flagged" +msgstr "được đặt cá»" + +#: ../mail/em-filter-i18n.h:31 ../src/smart-playlist-dialog.c:142 +msgid "is greater than" +msgstr "trên" + +#: ../mail/em-filter-i18n.h:32 ../src/smart-playlist-dialog.c:143 +msgid "is less than" +msgstr "dÆ°á»›i" + +#: ../mail/em-filter-i18n.h:33 ../src/smart-playlist-dialog.c:141 +#: ../src/smart-playlist-dialog.c:149 ../pan/filter-edit-ui.c:775 +#: ../pan/filters/filter-phrase.c:248 ../pan/score-add-ui.c:139 +msgid "is not" +msgstr "không là" + +#: ../mail/em-filter-i18n.h:34 +msgid "is not Flagged" +msgstr "không được đặt cá»" + +#: ../mail/em-filter-i18n.h:35 ../mail/mail-config.glade.h:92 +#: ../ui/evolution-mail-message.xml.h:48 +msgid "Junk" +msgstr "Rác" + +#: ../mail/em-filter-i18n.h:36 +msgid "Junk Test" +msgstr "Kiểm tra Rác" + +#: ../mail/em-filter-i18n.h:38 +msgid "Mailing list" +msgstr "Hôp thÆ° chung" + +#: ../mail/em-filter-i18n.h:39 +msgid "Match All" +msgstr "Khá»›p tất cả" + +#: ../mail/em-filter-i18n.h:40 +msgid "Message Body" +msgstr "Thân thÆ°" + +#: ../mail/em-filter-i18n.h:41 +msgid "Message Header" +msgstr "Dòng đầu thÆ°" + +#: ../mail/em-filter-i18n.h:42 +msgid "Message is Junk" +msgstr "ThÆ° là Rác" + +#: ../mail/em-filter-i18n.h:43 +msgid "Message is not Junk" +msgstr "ThÆ° không phải Rác" + +#: ../mail/em-filter-i18n.h:44 +msgid "Move to Folder" +msgstr "Chuyển vào thÆ° mục" + +#: ../mail/em-filter-i18n.h:45 +msgid "Pipe to Program" +msgstr "Gởi qua ống dẫn đến chÆ°Æ¡ng trình" + +#: ../mail/em-filter-i18n.h:46 +msgid "Play Sound" +msgstr "Phát âm thanh" + +#: ../pan/filter-edit-ui.c:811 ../pan/rules/rule-edit-ui.c:151 +#: ../storage/sunone-add-permission-dialog.glade.h:7 +#: ../storage/sunone-permissions-dialog.c:579 +#, fuzzy +msgid "Read" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Äã Ä‘á»c\n" +"#-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-#\n" +"Äá»c" + +#: ../mail/em-filter-i18n.h:48 +msgid "Recipients" +msgstr "NgÆ°á»i nhận" + +#: ../mail/em-filter-i18n.h:49 +msgid "Regex Match" +msgstr "Khá»›p biểu thức chính quy" + +#: ../mail/em-filter-i18n.h:50 +msgid "Replied to" +msgstr "Äã trả lá»i cho" + +#: ../mail/em-filter-i18n.h:51 +msgid "returns" +msgstr "trả gởi" + +#: ../mail/em-filter-i18n.h:52 +msgid "returns greater than" +msgstr "trả gởi trên" + +#: ../mail/em-filter-i18n.h:53 +msgid "returns less than" +msgstr "trả gởi dÆ°á»›i" + +#: ../mail/em-filter-i18n.h:54 ../src/alleyoop.c:635 ../memprof.glade.h:40 +msgid "Run Program" +msgstr "Chạy chÆ°Æ¡ng trình" + +#: ../libgnomeui/gnome-scores.c:94 ../pan/articlelist.c:1058 ../pan/gui.c:1175 +#: ../pan/prefs.c:1392 ../pan/score-add-ui.c:686 ../pan/score-add-ui.c:720 +msgid "Score" +msgstr "Äiểm" + +#: ../mail/em-filter-i18n.h:57 +msgid "Set Status" +msgstr "Äặt trạng thái" + +#: ../mail/em-filter-i18n.h:58 +msgid "Size (kB)" +msgstr "Cỡ (kB)" + +#: ../mail/em-filter-i18n.h:59 +msgid "sounds like" +msgstr "giống nhÆ°" + +#: ../mail/em-filter-i18n.h:60 +msgid "Source Account" +msgstr "Tài khoản nguồn" + +#: ../mail/em-filter-i18n.h:61 +msgid "Specific header" +msgstr "Dòng đầu xác định" + +#: ../mail/em-filter-i18n.h:62 ui/bookmarks.glade.h:52 +#: ../pan/filter-edit-ui.c:776 ../pan/filters/filter-phrase.c:240 +#: ../pan/score-add-ui.c:140 +msgid "starts with" +msgstr "bắt đầu bằng" + +#: ../mail/em-filter-i18n.h:64 +msgid "Stop Processing" +msgstr "Dừng xá»­ lý" + +#: ../mail/em-filter-i18n.h:66 +msgid "Unset Status" +msgstr "BỠđặt trạng thái" + +#. and now for the action area +#: ../mail/em-filter-rule.c:488 +msgid "Then" +msgstr "Rồi" + +#: ../mail/em-folder-browser.c:143 ../mail/em-folder-browser.c:134 +msgid "C_reate Search Folder From Search..." +msgstr "Tạo thÆ° mục tìm kiếm từ kết quả tìm kiếm..." + +#. TODO: can this be done in a loop? +#: ../mail/em-folder-properties.c:144 +msgid "Total message:" +msgid_plural "Total message:" +msgstr[0] "Tổng số thÆ° :" + +#: ../mail/em-folder-properties.c:156 +msgid "Unread message:" +msgid_plural "Unread message:" +msgstr[0] "ThÆ° chÆ°a Ä‘á»c:" + +#: ../mail/em-folder-properties.c:278 +#: ../plugins/groupwise-features/properties.glade.h:3 +msgid "Folder Properties" +msgstr "Thuá»™c tính thÆ° mục" + +#: ../mail/em-folder-selection-button.c:123 +msgid "" +msgstr "" + +#: ../plug-ins/imagemap/imap_cmd_gimp_guides.c:151 +#: web/template/newaccount_bottom.tpl:2 ../objects/UML/message.c:136 +#: ../glom/glom.glade.h:80 ../glom/mode_design/users/dialog_groups_list.cc:76 +msgid "Create" +msgstr "Tạo" + +#: ../mail/em-folder-selector.c:265 +#: ../shell/e-folder-creation-dialog.glade.h:2 +msgid "Folder _name:" +msgstr "_Tên thÆ° mục:" + +#: ../mail/mail-vfolder.c:937 ../mail/mail-vfolder.c:1007 +msgid "Search Folders" +msgstr "ThÆ° mục tìm kiếm" + +#: ../mail/em-folder-tree-model.c:207 ../mail/em-folder-tree-model.c:209 +msgid "UNMATCHED" +msgstr "KHÔNG KHỚP" + +#: ../storage/exchange-hierarchy-foreign.c:253 ../src/journal.c:821 +#: ../src/journal.c:1091 +msgid "Drafts" +msgstr "Nháp" + +#: ../src/history.c:111 ../src/history.c:154 +msgid "Sent" +msgstr "Äã gởi" + +#: ../mail/em-folder-tree.c:694 ../mail/em-folder-tree.c:689 +msgid "Mail Folder Tree" +msgstr "Cây thÆ° mục thÆ°" + +#: ../mail/em-folder-tree.c:854 +#, c-format +msgid "Moving folder %s" +msgstr "Äang chuyển thÆ° mục « %s »" + +#: ../mail/em-folder-tree.c:856 ../mail/em-folder-tree.c:851 +#, c-format +msgid "Copying folder %s" +msgstr "Äang sao chép thÆ° mục « %s »" + +#: ../mail/em-folder-tree.c:858 ../mail/message-list.c:1613 +#, c-format +msgid "Moving messages into folder %s" +msgstr "Äang chuyển thÆ° vào thÆ° mục « %s »" + +#: ../mail/em-folder-tree.c:865 ../mail/message-list.c:1623 +#: ../mail/em-folder-tree.c:860 ../mail/message-list.c:1615 +#, c-format +msgid "Copying messages into folder %s" +msgstr "Äang sao chép thÆ° vào thÆ° mục « %s »" + +#: ../mail/em-folder-tree.c:881 ../mail/em-folder-tree.c:876 +msgid "Cannot drop message(s) into toplevel store" +msgstr "Không thả được thÆ° vào kho mức Ä‘á»™ đỉnh" + +#: ../mail/em-folder-tree.c:977 ../ui/evolution-mail-message.xml.h:105 +#: ../mail/em-folder-tree.c:972 ../ui/evolution-mail-message.xml.h:101 +msgid "_Copy to Folder" +msgstr "_Chép vào thÆ° mục" + +#: ../mail/em-folder-tree.c:978 ../ui/evolution-mail-message.xml.h:116 +#: ../mail/em-folder-tree.c:973 ../ui/evolution-mail-message.xml.h:111 +msgid "_Move to Folder" +msgstr "_Chuyển vào thÆ° mục" + +#: ../mail/em-folder-tree.c:1683 ../mail/mail-ops.c:1058 +#: ../mail/em-folder-tree.c:1678 ../mail/mail-ops.c:1057 +#, c-format +msgid "Scanning folders in \"%s\"" +msgstr "Äang quét các thÆ° mục trong « %s »" + +#: ../Pyblio/GnomeUI/Document.py:146 po/silky.glade.h:221 +msgid "_View" +msgstr "_Xem" + +#: ../mail/em-folder-tree.c:2043 ../mail/em-folder-tree.c:2032 +msgid "Open in _New Window" +msgstr "Mở trong cá»­a sổ má»›_i" + +#: ../mail/em-folder-tree.c:2047 ../mail/em-folder-tree.c:2036 +msgid "_Copy..." +msgstr "_Chép..." + +#: ../mail/em-folder-tree.c:2048 ../mail/em-folder-tree.c:2037 +msgid "_Move..." +msgstr "_Di chuyển..." + +#: ../storage/xc-commands.c:391 +msgid "_New Folder..." +msgstr "ThÆ° mục _má»›i..." + +#: ../mail/em-folder-tree.c:2055 ../ui/evolution-mail-list.xml.h:30 +msgid "_Rename..." +msgstr "Äổi _tên..." + +#: ../libnautilus-private/nautilus-file-operations.c:2639 +msgid "_Empty Trash" +msgstr "Äổ _Rác" + +#: ../mail/em-folder-utils.c:104 +#, c-format +msgid "Copying `%s' to `%s'" +msgstr "Äang sao chép « %s » vào « %s »..." + +#: ../mail/importers/evolution-outlook-importer.c:134 +msgid "Select folder" +msgstr "Chá»n thÆ° mục" + +#: ../mail/em-folder-utils.c:368 ../mail/em-folder-view.c:982 +#: ../mail/em-folder-view.c:946 +msgid "C_opy" +msgstr "_Chép" + +#: ../mail/em-folder-utils.c:503 ../shell/e-folder-misc-dialogs.c:188 +#: ../storage/sunone-folder-tree.c:1058 +#, c-format +msgid "Rename the \"%s\" folder to:" +msgstr "Äổi tên thÆ° mục « %s » thành:" + +#: ../mail/em-folder-utils.c:505 ../shell/e-folder-misc-dialogs.c:191 +msgid "Rename Folder" +msgstr "Äổi tên thÆ° mục" + +#: ../mail/em-folder-utils.c:511 +msgid "Folder names cannot contain '/'" +msgstr "Tên thÆ° mục không thể chứa ký tá»± sổ chéo." + +#: ../plugins/groupwise-features/share-folder-common.c:140 +#, c-format +msgid "Creating folder `%s'" +msgstr "Äang tạo thÆ° mục « %s »" + +#: ../plugins/groupwise-features/share-folder-common.c:384 +msgid "Create folder" +msgstr "Tạo thÆ° mục" + +#: ../shell/e-folder-creation-dialog.glade.h:4 +msgid "Specify where to create the folder:" +msgstr "Xác định nÆ¡i tạo thÆ° mục đó :" + +#: ../mail/em-folder-view.c:1075 ../ui/evolution-mail-message.xml.h:125 +#: ../mail/em-folder-view.c:1040 ../ui/evolution-mail-message.xml.h:119 +msgid "_Reply to Sender" +msgstr "T_rả lá»i ngÆ°á»i gởi" + +#: ../mail/em-popup.c:546 ../mail/em-popup.c:557 +msgid "Reply to _All" +msgstr "Trả lá»i _má»i ngÆ°á»i" + +#: ui/galeon-ui.xml.in.h:177 +msgid "_Forward" +msgstr "_Chuyển tiếp" + +#: ../mail/em-folder-view.c:1080 ../ui/evolution-mail-message.xml.h:107 +#: ../mail/em-folder-view.c:1044 ../ui/evolution-mail-message.xml.h:103 +msgid "_Edit as New Message..." +msgstr "_Hiệu chỉnh dạng thÆ° má»›i..." + +#: ../mail/em-folder-view.c:1086 ../mail/em-folder-view.c:1050 +msgid "U_ndelete" +msgstr "_Hủy xoá bá»" + +#: ../mail/em-folder-view.c:1087 ../ui/evolution-addressbook.xml.h:37 +#: ../mail/em-folder-view.c:1051 ../ui/evolution-addressbook.xml.h:36 +msgid "_Move to Folder..." +msgstr "_Chuyển sang thÆ° mục..." + +#: ../mail/em-folder-view.c:1088 ../ui/evolution-addressbook.xml.h:33 +#: ../mail/em-folder-view.c:1052 ../ui/evolution-addressbook.xml.h:32 +msgid "_Copy to Folder..." +msgstr "_Chép vào thÆ° mục..." + +#: ../mail/em-folder-view.c:1091 ../mail/em-folder-view.c:1055 +msgid "Mar_k as Read" +msgstr "Äánh dấu Äã Ä‘á»_c" + +#: ../mail/em-folder-view.c:1092 ../mail/em-folder-view.c:1056 +msgid "Mark as _Unread" +msgstr "Äánh dấu C_hÆ°a Ä‘á»c" + +#: ../mail/em-folder-view.c:1093 ../mail/em-folder-view.c:1057 +msgid "Mark as _Important" +msgstr "Äánh dấu _Quan trá»ng" + +#: ../mail/em-folder-view.c:1094 ../mail/em-folder-view.c:1058 +msgid "_Mark as Unimportant" +msgstr "Äánh dấu _Không quan trá»ng" + +#: ../mail/em-folder-view.c:1095 ../mail/em-folder-view.c:1059 +msgid "Mark as _Junk" +msgstr "Äánh dấu _Rác" + +#: ../mail/em-folder-view.c:1096 ../mail/em-folder-view.c:1060 +msgid "Mark as _Not Junk" +msgstr "Äánh dấu Không Rá_c" + +#: ../mail/em-folder-view.c:1097 ../mail/em-folder-view.c:1061 +msgid "Mark for Follo_w Up..." +msgstr "Äánh dấu Cần theo _dõi tiếp..." + +#: ../mail/em-folder-view.c:1105 ../mail/em-folder-view.c:1069 +msgid "Fla_g Completed" +msgstr "Cá» _hoàn tất" + +#: ../mail/em-folder-view.c:1106 ../mail/em-folder-view.c:1070 +msgid "Cl_ear Flag" +msgstr "Xó_a cá»" + +#: ../mail/em-folder-view.c:1109 ../mail/em-folder-view.c:1073 +msgid "Crea_te Rule From Message" +msgstr "Tạo _quy tắc từ thÆ°" + +#: ../mail/em-folder-view.c:1110 ../mail/em-folder-view.c:1074 +msgid "Search Folder from _Subject" +msgstr "ThÆ° mục tìm kiếm trên _Chủ Ä‘á»" + +#: ../mail/em-folder-view.c:1111 ../mail/em-folder-view.c:1075 +msgid "Search Folder from Se_nder" +msgstr "ThÆ° mục tìm kiếm trên _NgÆ°á»i gởi" + +#: ../mail/em-folder-view.c:1112 ../mail/em-folder-view.c:1076 +msgid "Search Folder from _Recipients" +msgstr "ThÆ° mục tìm kiếm trên N_gÆ°á»i nhận" + +#: ../mail/em-folder-view.c:1113 ../mail/em-folder-view.c:1077 +msgid "Search Folder from Mailing _List" +msgstr "ThÆ° mục tìm kiếm trên _Há»™p thÆ° chung" + +#: ../mail/em-folder-view.c:1117 ../mail/em-folder-view.c:1081 +msgid "Filter on Sub_ject" +msgstr "Lá»c theo _Chủ Ä‘á»" + +#: ../mail/em-folder-view.c:1118 ../mail/em-folder-view.c:1082 +msgid "Filter on Sen_der" +msgstr "Lá»c theo _NgÆ°á»i gởi" + +#: ../mail/em-folder-view.c:1119 ../mail/em-folder-view.c:1083 +msgid "Filter on Re_cipients" +msgstr "Lá»c theo N_gÆ°á»i nhận" + +#: ../mail/em-folder-view.c:1120 ../mail/em-folder-view.c:1084 +msgid "Filter on _Mailing List" +msgstr "Lá»c theo _Há»™p thÆ° chung" + +#: ../plugins/print-message/print-message.c:83 +msgid "Print Message" +msgstr "In thÆ°" + +#: ../mail/em-folder-view.c:2257 ../mail/em-folder-view.c:2220 +msgid "Unable to retrieve message" +msgstr "Không nhận được thÆ°." + +#: ../mail/em-folder-view.c:2450 ../mail/em-folder-view.c:2413 +msgid "_Copy Link Location" +msgstr "Sao chép địa chỉ _liên kết" + +#: ../mail/em-folder-view.c:2452 ../mail/em-folder-view.c:2415 +msgid "Create _Search Folder" +msgstr "Tạo ThÆ° mục tìm _kiếm" + +#: ../mail/em-folder-view.c:2453 ../mail/em-folder-view.c:2416 +msgid "_From this Address" +msgstr "_Từ địa chỉ này" + +#: ../mail/em-folder-view.c:2454 ../mail/em-folder-view.c:2417 +msgid "_To this Address" +msgstr "_Cho địa chỉ này" + +#: ../mail/em-folder-view.c:2790 ../mail/em-folder-view.c:2698 +#, c-format +msgid "Click to mail %s" +msgstr "Nhấn để gởi thÆ° cho « %s »" + +#. message-search popup match count string +#: ../mail/em-format-html-display.c:472 ../mail/em-format-html-display.c:442 +#, c-format +msgid "Matches: %d" +msgstr "Khá»›p: %d" + +#: ../mail/em-format-html-display.c:729 ../mail/em-format-html.c:607 +#: ../mail/em-format-html-display.c:694 ../mail/em-format-html.c:596 +#: app/sample-editor.c:1541 +msgid "Unsigned" +msgstr "ChÆ°a ký" + +#: ../mail/em-format-html-display.c:729 ../mail/em-format-html-display.c:694 +msgid "" +"This message is not signed. There is no guarantee that this message is " +"authentic." +msgstr "ThÆ° này không có chữ ký nên không thể đảm bảo thÆ° này do ngÆ°á»i đó gởi." + +#: ../mail/em-format-html-display.c:730 ../mail/em-format-html.c:608 +#: ../mail/em-format-html-display.c:695 ../mail/em-format-html.c:597 +msgid "Valid signature" +msgstr "Chữ ký hợp lệ" + +#: ../mail/em-format-html-display.c:730 ../mail/em-format-html-display.c:695 +msgid "" +"This message is signed and is valid meaning that it is very likely that this " +"message is authentic." +msgstr "ThÆ° này có chữ ký và hợp lệ nên rất có thể là thÆ° đó do ngÆ°á»i đó gởi." + +#: ../mail/em-format-html-display.c:731 ../mail/em-format-html.c:609 +#: ../mail/em-format-html-display.c:696 ../mail/em-format-html.c:598 +msgid "Invalid signature" +msgstr "Chữ ký không hợp lệ" + +#: ../mail/em-format-html-display.c:731 ../mail/em-format-html-display.c:696 +msgid "" +"The signature of this message cannot be verified, it may have been altered " +"in transit." +msgstr "" +"Không thể xác minh chữ ký của thÆ° này: có lẽ nó bị giả trong khi truyá»n." + +#: ../mail/em-format-html-display.c:732 ../mail/em-format-html-display.c:697 +msgid "Valid signature, cannot verify sender" +msgstr "Chữ ký hợp lệ nhÆ°ng mà không thể xác minh ngÆ°á»i gởi." + +#: ../mail/em-format-html-display.c:732 ../mail/em-format-html-display.c:697 +msgid "" +"This message is signed with a valid signature, but the sender of the message " +"cannot be verified." +msgstr "ThÆ° này có chữ ký hợp lệ, nhÆ°ng mà không thể xác minh ngÆ°á»i gởi thÆ°." + +#: ../mail/em-format-html-display.c:738 ../mail/em-format-html.c:616 +#: ../mail/em-format-html-display.c:703 ../mail/em-format-html.c:605 +msgid "Unencrypted" +msgstr "Không mật mã" + +#: ../mail/em-format-html-display.c:738 ../mail/em-format-html-display.c:703 +msgid "" +"This message is not encrypted. Its content may be viewed in transit across " +"the Internet." +msgstr "" +"ThÆ° này không mật mã nên bất cứ ngÆ°á»i nào có xem được ná»™i dung nó trong khi " +"truyá»n qua Mạng." + +#: ../mail/em-format-html-display.c:739 ../mail/em-format-html.c:617 +#: ../mail/em-format-html-display.c:704 ../mail/em-format-html.c:606 +msgid "Encrypted, weak" +msgstr "Mật mã yếu" + +#: ../mail/em-format-html-display.c:739 ../mail/em-format-html-display.c:704 +msgid "" +"This message is encrypted, but with a weak encryption algorithm. It would " +"be difficult, but not impossible for an outsider to view the content of this " +"message in a practical amount of time." +msgstr "" +"ThÆ° này mật mã, nhÆ°ng mà dùng thuật toán mật mã yếu. NgÆ°á»i khác sẽ gặp khó " +"khăn Ä‘á»c thÆ° này được má»™t thá»i gian hữu ích, nhÆ°ng mà có thể làm nhÆ° thế." + +#: ../xpdf/pdf-info-dict-util.cc:203 +msgid "Encrypted" +msgstr "Äã mật mã" + +#: ../mail/em-format-html-display.c:740 ../mail/em-format-html-display.c:705 +msgid "" +"This message is encrypted. It would be difficult for an outsider to view " +"the content of this message." +msgstr "ThÆ° này mật mã. NgÆ°á»i khác sẽ gặp khó khăn nhiá»u Ä‘á»c ná»™i dung thÆ°." + +#: ../mail/em-format-html-display.c:741 ../mail/em-format-html.c:619 +#: ../mail/em-format-html-display.c:706 ../mail/em-format-html.c:608 +msgid "Encrypted, strong" +msgstr "Äã mật mã mạnh" + +#: ../mail/em-format-html-display.c:741 ../mail/em-format-html-display.c:706 +msgid "" +"This message is encrypted, with a strong encryption algorithm. It would be " +"very difficult for an outsider to view the content of this message in a " +"practical amount of time." +msgstr "" +"ThÆ° này mật mã dùng thuật toán mật mã mạnh. NgÆ°á»i khác sẽ gặp khó khăn rất " +"nhiá»u Ä‘á»c ná»™i dung thÆ° được thá»i gian hữu ích. " + +#: mozilla/GtkNSSDialogs.cpp:175 mozilla/GtkNSSDialogs.cpp:459 +msgid "_View Certificate" +msgstr "_Xem Chứng nhận" + +#: ../mail/em-format-html-display.c:857 ../mail/em-format-html-display.c:822 +msgid "This certificate is not viewable" +msgstr "Chứng nhận này không thể xem" + +#: ../mail/em-format-html-display.c:1145 ../mail/em-format-html-display.c:1104 +msgid "Completed on %B %d, %Y, %l:%M %p" +msgstr "Hoàn thành lúc %d %B, %Y, %l:%M %p" + +#: ../mail/em-format-html-display.c:1153 ../mail/em-format-html-display.c:1112 +msgid "Overdue:" +msgstr "Quá hạn:" + +#: ../mail/em-format-html-display.c:1156 ../mail/em-format-html-display.c:1115 +msgid "by %B %d, %Y, %l:%M %p" +msgstr "trÆ°á»›c %d %B, %Y, %l:%M %p" + +#: ../mail/em-format-html-display.c:1216 ../mail/em-format-html-display.c:1175 +msgid "_View Inline" +msgstr "_Xem trá»±c tiếp" + +#: ../mail/em-format-html-display.c:1218 ../mail/em-format-html-display.c:1177 +msgid "_Fit to Width" +msgstr "_Vừa khít Ä‘á»™ rá»™ng" + +#: ../mail/em-format-html-display.c:1219 ../mail/em-format-html-display.c:1178 +msgid "Show _Original Size" +msgstr "Hiển thị kích thÆ°á»›c _gốc" + +#: ../mail/em-format-html-display.c:1596 ../mail/em-format-html-display.c:1546 +msgid "Attachment Button" +msgstr "Nút đính kèm" + +#: ../mail/em-format-html-display.c:1803 ../mail/em-format-html-display.c:1753 +msgid "Select folder to save all attachments..." +msgstr "Chá»n thÆ° mục nÆ¡i cần lÆ°u má»i đính kèm..." + +#: ../mail/em-format-html-display.c:1850 ../mail/em-format-html-display.c:1799 +msgid "Save Selected..." +msgstr "LÆ°u các Ä‘iá»u chá»n..." + +#. Cant i put in the number of attachments here ? +#: ../mail/em-format-html-display.c:1917 ../mail/em-format-html-display.c:1866 +#, c-format +msgid "%d attachment" +msgid_plural "%d attachment" +msgstr[0] "%d đính kèm" + +#: ../mail/em-format-html-display.c:1980 +msgid "Toggle Attachment Bar" +msgstr "Bật/tắt thanh đính kèm" + +#: ../mail/em-format-html-display.c:1982 ../mail/em-format-html-display.c:1920 +msgid "No Attachment" +msgstr "Không có đính kèm" + +#: ../mail/em-format-html-display.c:1985 ../mail/em-format-html-display.c:1923 +msgid "Save All" +msgstr "LÆ°u tất cả" + +#: ../mail/em-format-html-print.c:130 +#, c-format +msgid "Page %d of %d" +msgstr "Trang %d trên %d" + +#: ../mail/em-format-html.c:483 ../mail/em-format-html.c:485 +#: ../mail/em-format-html.c:474 ../mail/em-format-html.c:476 +#, c-format +msgid "Retrieving `%s'" +msgstr "Äang nhận « %s »" + +#: ../mail/em-format-html.c:610 ../mail/em-format-html.c:599 +msgid "Valid signature but cannot verify sender" +msgstr "Chữ ký hợp lệ nhÆ°ng mà không thể xác minh ngÆ°á»i gởi." + +#: ../mail/em-format-html.c:880 ../mail/em-format-html.c:967 +msgid "Malformed external-body part." +msgstr "Phần thân ở ngoại dạng sai." + +#: ../mail/em-format-html.c:910 ../mail/em-format-html.c:997 +#, c-format +msgid "Pointer to FTP site (%s)" +msgstr "Trá» tá»›i địa chỉ FTP (%s)" + +#: ../mail/em-format-html.c:921 +#, c-format +msgid "Pointer to local file (%s) valid at site \"%s\"" +msgstr "Trá» tá»›i tập tin cục bá»™ (%s) hợp lệ tại nÆ¡i Mạng « %s »" + +#: ../mail/em-format-html.c:923 ../mail/em-format-html.c:1010 +#, c-format +msgid "Pointer to local file (%s)" +msgstr "Trá» tá»›i tập tin cục bá»™ (%s)" + +#: ../mail/em-format-html.c:944 ../mail/em-format-html.c:1031 +#, c-format +msgid "Pointer to remote data (%s)" +msgstr "Trá» tá»›i dữ liệu ở xa (%s)" + +#: ../mail/em-format-html.c:955 ../mail/em-format-html.c:1042 +#, c-format +msgid "Pointer to unknown external data (\"%s\" type)" +msgstr "Trá» tá»›i dữ liệu lạ bên ngoài (kiểu « %s »)" + +#: ../mail/em-format-html.c:1181 ../mail/em-format-html.c:1270 +msgid "Formatting message" +msgstr "Äang định dạng thÆ°" + +#: ../pan/prefs.c:1635 ../pan/text.c:713 +msgid "Reply-To" +msgstr "Trả lá»i" + +#: ../mail/em-mailer-prefs.c:86 +msgid "Cc" +msgstr "Cc" + +#: ../mail/em-mailer-prefs.c:87 +msgid "Bcc" +msgstr "Bcc" + +#: ../mail/em-mailer-prefs.c:987 ../addressbook/libebook/e-contact.c:152 +#: ../mimedir/mimedir-vcard.c:397 +msgid "Mailer" +msgstr "Trình thÆ°" + +#. translators: strftime format for local time equivalent in Date header display, with day +#: ../mail/em-format-html.c:1613 ../mail/em-format-html.c:1702 +msgid " (%a, %R %Z)" +msgstr " (%a, %R %Z)" + +#. translators: strftime format for local time equivalent in Date header display, without day +#: ../mail/em-format-html.c:1616 ../mail/em-format-html.c:1705 +msgid " (%R %Z)" +msgstr " (%R %Z)" + +#: ../pan/rules/rule-edit-ui.c:757 ../pan/score-add-ui.c:595 ../pan/text.c:717 +msgid "Newsgroups" +msgstr "Nhóm tin" + +#: ../mail/em-format.c:1102 ../mail/em-format.c:1101 +#, c-format +msgid "%s attachment" +msgstr "%s đính kèm" + +#: ../mail/em-format.c:1141 ../mail/em-format.c:1288 ../mail/em-format.c:1575 +#: ../mail/em-format.c:1140 ../mail/em-format.c:1279 +msgid "Could not parse S/MIME message: Unknown error" +msgstr "Không thể phân tách thÆ° S/MIME. Không biết sao." + +#: ../mail/em-format.c:1270 ../mail/em-format.c:1426 ../mail/em-format.c:1417 +msgid "Could not parse MIME message. Displaying as source." +msgstr "Không thể phân tách thÆ° MIME nên hiện thị mã nguồn." + +#: ../mail/em-format.c:1278 ../mail/em-format.c:1269 +msgid "Unsupported encryption type for multipart/encrypted" +msgstr "Kiểu mật mã không được há»— trợ cho: Ä‘a phần/mật mã" + +#: ../mail/em-format.c:1445 ../mail/em-format.c:1436 +msgid "Unsupported signature format" +msgstr "Dạng thức chữ ký không há»— trợ" + +#: ../mail/em-format.c:1453 ../mail/em-format.c:1517 ../mail/em-format.c:1444 +msgid "Error verifying signature" +msgstr "Gặp lá»—i khi xác minh chữ ký" + +#: ../mail/em-format.c:1453 ../mail/em-format.c:1517 ../mail/em-format.c:1444 +msgid "Unknown error verifying signature" +msgstr "Gặp lá»—i lạ khi xác minh chữ ký." + +#: ../mail/em-mailer-prefs.c:103 ../mail/em-mailer-prefs.c:101 +msgid "Every time" +msgstr "Má»i lần" + +#: ../mail/em-mailer-prefs.c:104 ../mail/em-mailer-prefs.c:102 +msgid "Once per day" +msgstr "Má»™t lần má»—i ngày" + +#: ../mail/em-mailer-prefs.c:105 ../mail/em-mailer-prefs.c:103 +msgid "Once per week" +msgstr "Má»™t lần má»—i tuần" + +#: ../mail/em-mailer-prefs.c:106 ../mail/em-mailer-prefs.c:104 +msgid "Once per month" +msgstr "Má»™t lần má»—i tháng" + +#: ../mail/em-migrate.c:1208 ../mail/em-migrate.c:1198 +msgid "" +"The location and hierarchy of the Evolution mailbox folders has changed " +"since Evolution 1.x.\n" +"\n" +"Please be patient while Evolution migrates your folders..." +msgstr "" +"Vị trí và cây của các thÆ° mục há»™p thÆ° Evolution đã thay đổi so sánh vá»›i " +"trình Evolution phiên bản 1.x.\n" +"\n" +"Hãy kiên nhẫn trong khi Evolution chuyển đổi các thÆ° mục của bạn..." + +#: ../mail/em-migrate.c:1647 ../mail/em-migrate.c:1632 +#, c-format +msgid "Unable to create new folder `%s': %s" +msgstr "Không thể tạo thÆ° mục má»›i « %s »: %s" + +#: ../mail/em-migrate.c:1673 ../mail/em-migrate.c:1658 +#, c-format +msgid "Unable to copy folder `%s' to `%s': %s" +msgstr "Không sao chép được thÆ° mục « %s » thành « %s »: %s" + +#: ../mail/em-migrate.c:1858 ../mail/em-migrate.c:1843 +#, c-format +msgid "Unable to scan for existing mailboxes at `%s': %s" +msgstr "Không quét được tìm há»™p thÆ° đã có tại « %s »: %s" + +#: ../mail/em-migrate.c:2062 ../mail/em-migrate.c:2047 +#, c-format +msgid "Unable to open old POP keep-on-server data `%s': %s" +msgstr "Không thể mở dữ liệu giữ-trên-máy-chủ POP cÅ© « %s »: %s" + +#: ../mail/em-migrate.c:2076 +#, c-format +msgid "Unable to create POP3 keep-on-server data directory `%s': %s" +msgstr "Không thể tạo thÆ° mục dữ liệu giữ-trên-máy-chủ POP3 « %s »: %s" + +#: ../mail/em-migrate.c:2105 ../mail/em-migrate.c:2090 +#, c-format +msgid "Unable to copy POP3 keep-on-server data `%s': %s" +msgstr "Không sao chép được dữ liệu giữ-trên-máy-chủ POP3 « %s »: %s" + +#: ../mail/em-migrate.c:2576 ../mail/em-migrate.c:2588 +#: ../mail/em-migrate.c:2561 ../mail/em-migrate.c:2573 +#, c-format +msgid "Failed to create local mail storage `%s': %s" +msgstr "Không tạo kho thÆ° địa phÆ°Æ¡ng được « %s »: %s" + +#: ../mail/em-migrate.c:2711 ../mail/em-migrate.c:2693 +#, c-format +msgid "Unable to create local mail folders at `%s': %s" +msgstr "Không thể tạo những thÆ° mục thÆ° địa phÆ°Æ¡ng tại « %s »: %s" + +#: ../mail/em-migrate.c:2730 ../mail/em-migrate.c:2711 +msgid "" +"Unable to read settings from previous Evolution install, `evolution/config." +"xmldb' does not exist or is corrupt." +msgstr "" +"Không thể Ä‘á»c thiết lập từ bản cài đặt Evolution cÅ© : tập tin bị há»ng hay không tồn tại." + +#: ../mail/em-popup.c:556 ../mail/em-popup.c:567 ../mail/em-popup.c:544 +#: ../mail/em-popup.c:555 +msgid "_Reply to sender" +msgstr "T_rả lá»i ngÆ°á»i gởi" + +#: ../mail/em-popup.c:556 ../ui/evolution-mail-message.xml.h:81 +msgid "Reply to _List" +msgstr "Trả lá»i _Há»™p thÆ° chung" + +#: ../mail/em-popup.c:619 ../mail/em-popup.c:607 +msgid "_Open Link in Browser" +msgstr "_Mở liên kết bằng trình duyệt" + +#: ../mail/em-popup.c:620 ../mail/em-popup.c:608 +msgid "_Send New Message To..." +msgstr "_Gởi thÆ° má»›i cho..." + +#: ../mail/em-popup.c:621 ../mail/em-popup.c:609 +msgid "_Add to Addressbook" +msgstr "_Thêm vào Sổ địa chỉ" + +#: ../mail/em-subscribe-editor.c:615 ../mail/em-subscribe-editor.c:614 +msgid "This store does not support subscriptions, or they are not enabled." +msgstr "Kho này không há»— trợ đăng ký, hay chÆ°a hiệu lá»±c khả năng đó." + +#: ../mail/em-subscribe-editor.c:644 ../mail/em-subscribe-editor.c:643 +#: ../src/red_subscriptions.py:57 ../pan/grouplist.c:382 +#: ../pan/grouplist.c:957 +msgid "Subscribed" +msgstr "Äã đăng ký" + +#: ../mail/em-subscribe-editor.c:648 ../libgimpwidgets/gimppatheditor.c:252 +#: ../app/core/gimpimagefile.c:575 ../app/dialogs/preferences-dialog.c:1593 +#: src/gtkam-tree.c:1318 ../sheets/Misc.sheet.in.h:2 +msgid "Folder" +msgstr "ThÆ° mục" + +#. FIXME: This is just to get the shadow, is there a better way? +#: ../mail/em-subscribe-editor.c:869 ../mail/em-subscribe-editor.c:862 +msgid "Please select a server." +msgstr "Hãy chá»n máy phục vụ." + +#: ../mail/em-subscribe-editor.c:890 ../mail/em-subscribe-editor.c:883 +msgid "No server has been selected" +msgstr "ChÆ°a chá»n máy phục vụ." + +#: ../mail/em-utils.c:122 ../mail/em-utils.c:105 +msgid "Don't show this message again." +msgstr "Äừng hiện thông Ä‘iệp này lần nữa." + +#: ../pan/filter-ui.c:364 ../pan/gui.c:1156 ../pan/rules/rule-edit-ui.c:760 +msgid "Filters" +msgstr "Lá»c" + +#: ../mail/em-utils.c:479 +msgid "message" +msgstr "thÆ°" + +#: ../mail/em-utils.c:670 ../mail/em-utils.c:614 +msgid "Save Message..." +msgstr "LÆ°u thÆ°..." + +#: ../mail/em-utils.c:719 ../mail/em-utils.c:663 +msgid "Add address" +msgstr "Thêm địa chỉ" + +#: ../mail/em-utils.c:1198 ../mail/em-utils.c:1142 +#, c-format +msgid "Messages from %s" +msgstr "Thừ từ « %s »" + +#: ../mail/em-vfolder-editor.c:112 ../mail/em-vfolder-editor.c:104 +msgid "Search _Folders" +msgstr "_ThÆ° mục tìm kiếm" + +#: ../mail/em-vfolder-rule.c:576 ../mail/em-vfolder-rule.c:574 +msgid "Search Folder source" +msgstr "Nguồn thÆ° mục tìm kiếm" + +#: ../mail/evolution-mail.schemas.in.in.h:1 +msgid "Automatic link recognition" +msgstr "Tá»± Ä‘á»™ng nhận dạng liên kết" + +#: ../mail/evolution-mail.schemas.in.in.h:2 +msgid "Automatic smiley recognition" +msgstr "Tá»± Ä‘á»™ng nhận dạng biểu tÆ°Æ¡Ì£ng xúc cảm" + +#: ../mail/evolution-mail.schemas.in.in.h:3 +msgid "Check incoming mail being junk" +msgstr "Kiểm thÆ° má»›i nhận là thÆ° rác" + +#: ../mail/evolution-mail.schemas.in.in.h:4 +msgid "Citation highlight color" +msgstr "Màu tô sáng trích dẫn" + +#: ../mail/evolution-mail.schemas.in.in.h:5 +msgid "Citation highlight color." +msgstr "Màu tô sáng trích dẫn." + +#: ../mail/evolution-mail.schemas.in.in.h:6 +msgid "Composer Window default height" +msgstr "Äá»™ cao mặc định cá»­a sổ soạn" + +#: ../mail/evolution-mail.schemas.in.in.h:7 +msgid "Composer Window default width" +msgstr "Äá»™ rá»™ng mặc định cá»­a sổ soạn" + +#: ../mail/evolution-mail.schemas.in.in.h:8 +msgid "Default charset in which to compose messages" +msgstr "Bá»™ ký tá»± mặc định để soạn thảo thÆ°." + +#: ../mail/evolution-mail.schemas.in.in.h:9 +msgid "Default charset in which to compose messages." +msgstr "Bá»™ ký tá»± mặc định để soạn thảo thÆ°." + +#: ../mail/evolution-mail.schemas.in.in.h:10 +msgid "Default charset in which to display messages" +msgstr "Bá»™ ký tá»± mặc định để hiển thị thÆ°." + +#: ../mail/evolution-mail.schemas.in.in.h:11 +msgid "Default charset in which to display messages." +msgstr "Bá»™ ký tá»± mặc định để hiển thị thÆ°." + +#: ../mail/evolution-mail.schemas.in.in.h:12 +msgid "Default forward style" +msgstr "Kiểu chuyển tiếp mặc định" + +#: ../mail/evolution-mail.schemas.in.in.h:13 +msgid "Default height of the Composer Window" +msgstr "Äá»™ cao mặc định cá»­a Cá»­a sổ Soạn" + +#: ../mail/evolution-mail.schemas.in.in.h:14 +msgid "Default height of the Message Window" +msgstr "Äá»™ cao mặc định cá»­a Cá»­a sổ ThÆ°" + +#: ../mail/evolution-mail.schemas.in.in.h:15 +msgid "Default height of the Subscribe dialog" +msgstr "Äá»™ cao mặc định cá»­a há»™p thoại Äăng ký" + +#: ../mail/evolution-mail.schemas.in.in.h:16 +msgid "Default reply style" +msgstr "Kiểu trả lá»i mặc định" + +#: ../mail/evolution-mail.schemas.in.in.h:17 +msgid "Default width of the Composer Window" +msgstr "Äá»™ rá»™ng mặc định cá»­a Cá»­a sổ Soạn" + +#: ../mail/evolution-mail.schemas.in.in.h:18 +msgid "Default width of the Message Window" +msgstr "Äá»™ rá»™ng mặc định cá»­a Cá»­a sổ ThÆ°" + +#: ../mail/evolution-mail.schemas.in.in.h:19 +msgid "Default width of the Subscribe dialog" +msgstr "Äá»™ rá»™ng mặc định cá»­a há»™p thoại Äăng ký" + +#: ../mail/evolution-mail.schemas.in.in.h:20 +msgid "Draw spelling error indicators on words as you type." +msgstr "Vẽ chỉ báo lá»—i chính tả trên từ khi gõ." + +#: ../mail/evolution-mail.schemas.in.in.h:21 +msgid "Empty Trash folders on exit" +msgstr "Äổ các thÆ° mục Sá»t rác khi thoát" + +#: ../mail/evolution-mail.schemas.in.in.h:22 +msgid "Empty all Trash folders when exiting Evolution." +msgstr "Äổ các thÆ° mục Sá»t rác khi thoát trình Evolution." + +#: ../mail/evolution-mail.schemas.in.in.h:23 +msgid "Enable caret mode, so that you can see a cursor when reading mail." +msgstr "Hiệu lá»±c chế Ä‘á»™ con nháy, để bạn xem con chạy khi Ä‘á»c thÆ°." + +#: ../mail/evolution-mail.schemas.in.in.h:24 +msgid "Enable/disable caret mode" +msgstr "Bật/tắt chế Ä‘á»™ con nháy" + +#: ../mail/evolution-mail.schemas.in.in.h:25 +msgid "Height of the message-list pane" +msgstr "Äá»™ cao ô cá»­a sổ danh sách thÆ°" + +#: ../mail/evolution-mail.schemas.in.in.h:26 +msgid "Height of the message-list pane." +msgstr "Äá»™ cao ô cá»­a sổ danh sách thÆ°." + +#: ../mail/evolution-mail.schemas.in.in.h:27 +msgid "" +"If a user tries to open 10 or more messages at one time, ask the user if " +"they really want to do it." +msgstr "" +"Nếu ngÆ°á»i dùng cố mở hÆ¡n 9 thÆ° cùng lúc thì há»i nếu há» thật muốn làm nhÆ° thế." + +#: ../mail/evolution-mail.schemas.in.in.h:28 +msgid "" +"If there isn't a builtin viewer for a particular mime-type inside Evolution, " +"any mime-types appearing in this list which map to a bonobo-component viewer " +"in GNOME's mime-type database may be used for displaying content." +msgstr "" +"Nếu Evolution không có bá»™ xem có sẵn cho má»™t kiểu MIME nào đó thì sẽ dùng " +"bất cứ bá»™ xem tÆ°Æ¡ng thích vá»›i Bonobo nào sẵn sàng trong cÆ¡ sở dữ liệu kiểu " +"MIME của GNOME." + +#: ../mail/evolution-mail.schemas.in.in.h:29 +msgid "Last time empty trash was run" +msgstr "Lần cuối cùng đã đổ Sá»t Rác" + +#: ../mail/evolution-mail.schemas.in.in.h:30 +msgid "List of Labels and their associated colors" +msgstr "Danh sách Nhãn và màu sắc liên quan" + +#: ../mail/evolution-mail.schemas.in.in.h:31 +msgid "List of accepted licenses" +msgstr "Danh sách các quyá»n đã chấp nhận" + +#: ../mail/evolution-mail.schemas.in.in.h:32 +msgid "List of accounts" +msgstr "Danh sách các tài khoản" + +#: ../mail/evolution-mail.schemas.in.in.h:33 +msgid "" +"List of accounts known to the mail component of Evolution. The list contains " +"strings naming subdirectories relative to /apps/evolution/mail/accounts." +msgstr "" +"Danh sách các tài khoản mà thành phần thÆ° của Evolution biết được. Danh sách " +"ấy chứa chuá»—i lập tên của các thÆ° mục con liên quan vá»›i /apps/evolution/mail/" +"accounts." + +#: ../mail/evolution-mail.schemas.in.in.h:34 +msgid "List of custom headers and whether they are enabled." +msgstr "Danh sách các dòng đầu tá»± chá»n và nếu bật chÆ°a." + +#: ../mail/evolution-mail.schemas.in.in.h:35 +msgid "" +"List of labels known to the mail component of Evolution. The list contains " +"strings containing name:color where color uses the HTML hex encoding." +msgstr "" +"Danh sách các nhãn mà thành phần thÆ° của Evolution biết được. Danh sách đó " +"chứa chuá»—i name:color (tên:màu) mà màu đó dạng hệ thập lục phân HTML." + +#: ../mail/evolution-mail.schemas.in.in.h:36 +msgid "List of mime types to check for bonobo component viewers" +msgstr "" +"Danh sách các kiểu MIME cần kiểm tra khá»›p vá»›i bá»™ xem thành phần Bonobo." + +#: ../mail/evolution-mail.schemas.in.in.h:37 +msgid "List of protocol names whose license has been accepted." +msgstr "Danh sách các tên giao thức có quyá»n đã chấp nhận." + +#: ../mail/evolution-mail.schemas.in.in.h:38 +msgid "Load images for HTML messages over http" +msgstr "Tải các ảnh cho thÆ° HTML bằng giao thức HTTP" + +#: ../mail/evolution-mail.schemas.in.in.h:39 +msgid "" +"Load images for HTML messages over http(s). Possible values are: 0 - Never " +"load images off the net 1 - Load images in mail from contacts 2 - Always " +"load images off the net" +msgstr "" +"Tải các ảnh cho thÆ° HTML bằng giao thức HTTP hay HTTPS. Giá trị có thể là:\n" +"0 - không bao giá» tải ảnh từ Mạng\n" +"1 - tải ảnh nếu ngÆ°á»i gởi có trong Sổ địa chỉ\n" +"2 - luôn luôn tải ảnh từ Mạng (không an toàn)" + +#: ../mail/evolution-mail.schemas.in.in.h:40 +msgid "Log filter actions" +msgstr "Ghi lÆ°u các hành Ä‘á»™ng lá»c" + +#: ../mail/evolution-mail.schemas.in.in.h:41 +msgid "Log filter actions to the specified log file." +msgstr "Ghi lÆ°u các hành Ä‘á»™ng lá»c vào tập tin bản ghi đã ghi rõ." + +#: ../mail/evolution-mail.schemas.in.in.h:42 +msgid "Logfile to log filter actions" +msgstr "Tập tin bản ghi để ghi lÆ°u các hành Ä‘á»™ng lá»c." + +#: ../mail/evolution-mail.schemas.in.in.h:43 +msgid "Logfile to log filter actions." +msgstr "Tập tin bản ghi để ghi lÆ°u các hành Ä‘á»™ng lá»c." + +#: ../mail/evolution-mail.schemas.in.in.h:44 +msgid "Mark as Seen after specified timeout" +msgstr "Äánh dấu là Äã xem, sau thá»i hạn đã ghi rõ." + +#: ../mail/evolution-mail.schemas.in.in.h:45 +msgid "Mark as Seen after specified timeout." +msgstr "Äánh dấu là Äã xem, sau thá»i hạn đã ghi rõ." + +#: ../mail/evolution-mail.schemas.in.in.h:46 +msgid "Mark citations in the message \"Preview\"" +msgstr "Äánh các trích dẫn trong thÆ° « Xem thá»­ »." + +#: ../mail/evolution-mail.schemas.in.in.h:47 +msgid "Mark citations in the message \"Preview\"." +msgstr "Äánh các trích dẫn trong thÆ° « Xem thá»­ »." + +#: ../mail/evolution-mail.schemas.in.in.h:48 +msgid "Message Window default height" +msgstr "Äá»™ cao mặc định của Cá»­a sổ thÆ°" + +#: ../mail/evolution-mail.schemas.in.in.h:49 +msgid "Message Window default width" +msgstr "Äá»™ rá»™ng mặc định của Cá»­a sổ thÆ°" + +#: ../mail/evolution-mail.schemas.in.in.h:50 +msgid "Message-display style (normal, full headers, source)" +msgstr "Kiểu hiển thị thÆ° (bình thÆ°á»ng, dòng đầu đây đủ, mã nguồn)" + +#: ../mail/evolution-mail.schemas.in.in.h:51 +msgid "Minimum days between emptying the trash on exit" +msgstr "Số ngày tối thiểu giữa hai lần đổ Sá»t Rác khi thoát." + +#: ../mail/evolution-mail.schemas.in.in.h:52 +msgid "Minimum time between emptying the trash on exit, in days." +msgstr "Thá»i gian tối thiểu giữa hai lần sổ Sá»t Rác khi thoát, theo ngày." + +#: ../mail/evolution-mail.schemas.in.in.h:53 +msgid "New Mail Notify sound file" +msgstr "Tập tin âm thanh Thông báo ThÆ° Má»›i" + +#: ../mail/evolution-mail.schemas.in.in.h:54 +msgid "New Mail Notify type" +msgstr "Kiểu Thông báo ThÆ° Má»›i" + +#: ../mail/evolution-mail.schemas.in.in.h:55 +msgid "Prompt on empty subject" +msgstr "Nhắc khi chủ Ä‘á» rá»—ng" + +#: ../mail/evolution-mail.schemas.in.in.h:56 +msgid "Prompt the user when he or she tries to expunge a folder." +msgstr "Nhắc ngÆ°á»i dùng khi há» cố xoá hẳn thÆ° mục." + +#: ../mail/evolution-mail.schemas.in.in.h:57 +msgid "" +"Prompt the user when he or she tries to send a message without a Subject." +msgstr "Nhắc ngÆ°á»i dùng khi há» cố gởi thÆ° chÆ°a có Chủ Ä‘á»." + +#: ../mail/evolution-mail.schemas.in.in.h:58 +msgid "Prompt when user expunges" +msgstr "Nhắc khi ngÆ°á»i dùng xoá hẳn" + +#: ../mail/evolution-mail.schemas.in.in.h:59 +msgid "Prompt when user only fills Bcc" +msgstr "Nhắc khi ngÆ°á»i dùng chỉ nhập Bcc" + +#: ../mail/evolution-mail.schemas.in.in.h:60 +msgid "Prompt when user tries to open 10 or more messages at once" +msgstr "Nhắc khi ngÆ°á»i dùng cố mở hÆ¡n 9 thÆ° cùng lúc" + +#: ../mail/evolution-mail.schemas.in.in.h:61 +msgid "" +"Prompt when user tries to send HTML mail to recipients that may not want to " +"receive HTML mail." +msgstr "Nhắc khi ngÆ°á»i dùng cố gởi thÆ° HTML cho liên lạc không muốn nhận HTML." + +#: ../mail/evolution-mail.schemas.in.in.h:62 +msgid "Prompt when user tries to send a message with no To or Cc recipients." +msgstr "" +"Nhắc khi ngÆ°á»i dùng cố gởi thÆ° mà không có ngÆ°á»i nhận Cho (To) hay Chép Cho " +"(CC)." + +#: ../mail/evolution-mail.schemas.in.in.h:63 +msgid "Prompt when user tries to send unwanted HTML" +msgstr "Nhắc khi ngÆ°á»i dùng cố gởi thÆ° dạng HTML không phải được muốn" + +#: ../mail/evolution-mail.schemas.in.in.h:64 +msgid "Recognize links in text and replace them." +msgstr "Nhận ra má»i liên kết trong văn bản và thay thế suốt." + +#: ../mail/evolution-mail.schemas.in.in.h:65 +msgid "Recognize smileys in text and replace them with images." +msgstr "Nhận dạng biểu tượng xác cảm trong văn bản và thay thế bằng ảnh." + +#: ../mail/evolution-mail.schemas.in.in.h:66 +msgid "Run junk test on incoming mail" +msgstr "Chạy kiểm tra thÆ° Rác khi má»i thÆ° gởi Äến" + +#: ../mail/evolution-mail.schemas.in.in.h:67 +msgid "Send HTML mail by default" +msgstr "Gởi thÆ° mặc định dùng dạng HTML (không đệ nghị)" + +#: ../mail/evolution-mail.schemas.in.in.h:68 +msgid "Send HTML mail by default." +msgstr "Gởi thÆ° mặc định dùng dạng HTML (không đệ nghị)." + +#: ../mail/evolution-mail.schemas.in.in.h:69 +msgid "Show Animations" +msgstr "Hiện hoạt cảnh" + +#: ../mail/evolution-mail.schemas.in.in.h:70 +msgid "Show animated images as animations." +msgstr "Hiện ảnh kiểu hoạt cảnh." + +#: ../mail/evolution-mail.schemas.in.in.h:71 +msgid "Show deleted messages (with a strike-through) in the message-list." +msgstr "" +"Hiển thị má»i thÆ° bị xoá bá» (kiểu gaÌ£ch xuyên qua) trong danh sách các thÆ°." + +#: ../mail/evolution-mail.schemas.in.in.h:72 +msgid "Show deleted messages in the message-list" +msgstr "Hiển thị thÆ° bị xoá bá» trong danh sách các thÆ°." + +#: ../mail/evolution-mail.schemas.in.in.h:75 +msgid "Sound file to play when new mail arrives." +msgstr "Tập tin âm thanh cần phát khi nhận thÆ° má»›i." + +#: ../mail/evolution-mail.schemas.in.in.h:76 +msgid "Specifies the type of New Mail Notification the user wishes to use." +msgstr "Ghi rõ kiểu Thông báo ThÆ° Má»›i ngÆ°á»i dùng muốn dùng." + +#: ../mail/evolution-mail.schemas.in.in.h:77 +msgid "Spell check inline" +msgstr "Kiểm tra chính tả trá»±c tiếp" + +#: ../mail/evolution-mail.schemas.in.in.h:78 +msgid "Subscribe dialog default height" +msgstr "Äá»™ cao mặc định của há»™p thoại đăng ký" + +#: ../mail/evolution-mail.schemas.in.in.h:79 +msgid "Subscribe dialog default width" +msgstr "Äá»™ rá»™ng mặc định của há»™p thoại đăng ký" + +#: ../mail/evolution-mail.schemas.in.in.h:80 +msgid "Terminal font" +msgstr "Phông chữ thiết bị cuối" + +#: ../mail/evolution-mail.schemas.in.in.h:81 +msgid "The last time empty trash was run, in days since the epoch." +msgstr "Lần cuối cùng đã chạy đổ Sá»t Rác, theo ngày từ ká»· nguyên bắt đầu." + +#: ../mail/evolution-mail.schemas.in.in.h:82 +msgid "The terminal font for mail display" +msgstr "Phông chữ thiết bị cuối để hiển thị thÆ°" + +#: ../mail/evolution-mail.schemas.in.in.h:83 +msgid "The variable width font for mail display" +msgstr "Phông chữ Ä‘á»™ rá»™ng thay đổi để hiển thị thÆ°" + +#: ../mail/evolution-mail.schemas.in.in.h:84 +msgid "" +"This key should contain a list of XML structures specifying custom headers, " +"and whether they are to be displayed. The format of the XML structure is <" +"header enabled> - set enabled if the header is to be displayed in the " +"mail view." +msgstr "" +"Khoá này nên chứa danh sách các cấu trúc XML ghi rõ dòng đầu tá»± chá»n, và nếu " +"sẽ hiển thị chúng. Dang thức của cấu trúc XML là:\n" +"<header enabled>\n" +"(lập đã bật dòng đầu, nếu sẽ hiển thị dòng đầu đó trong khung xem thÆ°)." + +#: ../mail/evolution-mail.schemas.in.in.h:85 +msgid "Thread the message list." +msgstr "Hiển thị mạch trong danh sách thÆ°." + +#: ../mail/evolution-mail.schemas.in.in.h:86 +msgid "Thread the message-list" +msgstr "Hiển thị mạch trong danh sách thÆ°." + +#: ../mail/evolution-mail.schemas.in.in.h:87 +msgid "Thread the message-list based on Subject" +msgstr "Hiển thị mạch trong danh sách thÆ°, theo Chủ Ä‘á»" + +#: ../mail/evolution-mail.schemas.in.in.h:88 +msgid "Timeout for marking message as Seen" +msgstr "Thá»i hạn Äánh dấu thÆ° Äã xem." + +#: ../mail/evolution-mail.schemas.in.in.h:89 +msgid "Timeout for marking message as Seen." +msgstr "Thá»i hạn Äánh dấu thÆ° Äã xem." + +#: ../mail/evolution-mail.schemas.in.in.h:90 +msgid "UID string of the default account." +msgstr "Chuá»—i UID của tài khoản mặc định." + +#: ../mail/evolution-mail.schemas.in.in.h:91 +msgid "Use Spamassassin daemon and client" +msgstr "Sá»­ dụng trình ná»n và khách Spamassassin" + +#: ../mail/evolution-mail.schemas.in.in.h:92 +msgid "Use Spamassassin daemon and client (spamc/spamd)" +msgstr "Sá»­ dụng trình ná»n và khách Spamassassin (spamc/spamd)" + +#: ../mail/evolution-mail.schemas.in.in.h:93 +msgid "Use custom fonts" +msgstr "Sá»­ dụng phông chữ tá»± chá»n" + +#: ../mail/evolution-mail.schemas.in.in.h:94 +msgid "Use custom fonts for displaying mail" +msgstr "Sá»­ dụng phông chữ tá»± chá»n để hiển thị thÆ°" + +#: ../mail/evolution-mail.schemas.in.in.h:95 +msgid "Use only local spam tests." +msgstr "Chỉ kiểm tra địa phÆ°Æ¡ng nếu thÆ° là rác." + +#: ../mail/evolution-mail.schemas.in.in.h:96 +msgid "Use only the local spam tests (no DNS)." +msgstr "Chỉ kiểm tra địa phÆ°Æ¡ng nếu thÆ° là rác (không có DNS)." + +#: ../mail/evolution-mail.schemas.in.in.h:97 +msgid "Variable width font" +msgstr "Phông chữ rá»™ng biến" + +#: ../mail/evolution-mail.schemas.in.in.h:98 +msgid "View/Bcc menu item is checked" +msgstr "Äã chá»n mục trình Ä‘Æ¡n Xem/Bcc." + +#: ../mail/evolution-mail.schemas.in.in.h:99 +msgid "View/Bcc menu item is checked." +msgstr "Äã chá»n mục trình Ä‘Æ¡n Xem/Bcc." + +#: ../mail/evolution-mail.schemas.in.in.h:100 +msgid "View/Cc menu item is checked" +msgstr "Äã chá»n mục trình Ä‘Æ¡n Xem/Cc." + +#: ../mail/evolution-mail.schemas.in.in.h:101 +msgid "View/Cc menu item is checked." +msgstr "Äã chá»n mục trình Ä‘Æ¡n Xem/Cc." + +#: ../mail/evolution-mail.schemas.in.in.h:102 +msgid "View/From menu item is checked" +msgstr "Äã chá»n mục trình Ä‘Æ¡n Xem/Từ." + +#: ../mail/evolution-mail.schemas.in.in.h:103 +msgid "View/From menu item is checked." +msgstr "Äã chá»n mục trình Ä‘Æ¡n Xem/Từ." + +#: ../mail/evolution-mail.schemas.in.in.h:104 +msgid "View/PostTo menu item is checked" +msgstr "Äã chá»n mục trình Ä‘Æ¡n Xem/Gởi tá»›i." + +#: ../mail/evolution-mail.schemas.in.in.h:105 +msgid "View/PostTo menu item is checked." +msgstr "Äã chá»n mục trình Ä‘Æ¡n Xem/Gởi tá»›i." + +#: ../mail/evolution-mail.schemas.in.in.h:106 +msgid "View/ReplyTo menu item is checked" +msgstr "Äã chá»n mục trình Ä‘Æ¡n Xem/Trả lá»i cho." + +#: ../mail/evolution-mail.schemas.in.in.h:107 +msgid "View/ReplyTo menu item is checked." +msgstr "Äã chá»n mục trình Ä‘Æ¡n Xem/Trả lá»i cho." + +#: ../mail/evolution-mail.schemas.in.in.h:108 +msgid "" +"Whether or not to fall back on threading by subjects when the messages do " +"not contain In-Reply-To or References headers." +msgstr "" +"Có nên trở vá» xâu thÆ° theo chủ Ä‘á» khi thÆ° không chứa dòng đầu In-Reply-To " +"(trả lá»i theo thÆ° trÆ°á»›c) hay References (tham chiếu đến)." + +#: ../mail/importers/elm-importer.c:192 ../mail/importers/elm-importer.c:193 +msgid "Importing Elm data" +msgstr "Äang nhập dữ liệu Elm" + +#: ../mail/importers/elm-importer.c:381 ../mail/importers/elm-importer.c:382 +msgid "Evolution Elm importer" +msgstr "Bá»™ nhập Elm vào Evolution" + +#: ../mail/importers/elm-importer.c:382 ../mail/importers/elm-importer.c:383 +msgid "Import mail from Elm." +msgstr "Äang nhập thÆ° từ Elm" + +#: ../mail/importers/evolution-outlook-importer.c:131 +msgid "Destination folder:" +msgstr "ThÆ° mục đích:" + +#: ../mail/importers/evolution-outlook-importer.c:134 +msgid "Select folder to import into" +msgstr "Chá»n thÆ° mục để nhập vào" + +#: ../mail/importers/evolution-mbox-importer.c:216 +msgid "Berkeley Mailbox (mbox)" +msgstr "Berkeley Mailbox (mbox)" + +#: ../mail/importers/evolution-mbox-importer.c:217 +msgid "Importer Berkeley Mailbox format folders" +msgstr "Bá»™ nhập thÆ° mục dạng Berkeley Mailbox" + +#: ../mail/importers/mail-importer.c:230 ../shell/e-shell-importer.c:516 +#, c-format +msgid "Importing `%s'" +msgstr "Äang nhập « %s »" + +#: ../mail/importers/netscape-importer.c:1251 +msgid "Importing..." +msgstr "Äang nhập..." + +#: ../mail/importers/netscape-importer.c:1253 ../shell/e-shell-importer.c:523 +msgid "Please wait" +msgstr "Vui lòng chá»" + +#: ../mail/importers/mail-importer.c:144 +msgid "Importing mailbox" +msgstr "Äang nhập há»™p thÆ°..." + +#: ../mail/importers/mail-importer.c:377 ../mail/importers/mail-importer.c:373 +#, c-format +msgid "Scanning %s" +msgstr "Äang quét %s..." + +#: ../mail/importers/netscape-importer.c:73 +#, c-format +msgid "Priority Filter \"%s\"" +msgstr "Bá»™ lá»c Æ°u tiên « %s »" + +#: ../mail/importers/netscape-importer.c:662 +msgid "" +"Some of your Netscape email filters are based on\n" +"email priorities, which are not used in Evolution.\n" +"Instead, Evolution provides scores in the range of\n" +"-3 to 3 that can be assigned to emails and filtered\n" +"accordingly.\n" +"\n" +"As a workaround, a set of filters called \"Priority Filter\"\n" +"was added that converts Netscape's email priorities into\n" +"Evolution's scores, and the affected filters use scores instead\n" +"of priorities. Check the imported filters to make sure\n" +"everything still works as intended." +msgstr "" +"Má»™t số bá»™ lá»c Netscape của bạn Ä‘á»±a vào Ä‘á»™ Æ°u tiên thÆ°,\n" +"mà Evolution không dùng. Thay vào đó, trình Evolution\n" +"cung cấp Ä‘iểm từ -3 đến 3, thì có thể lá»c thÆ° theo Ä‘iểm\n" +"đã gán vào nó.\n" +"\n" +"Äể chỉnh sá»­a, đã thêm má»™t tập hợp bá»™ lá»c được gá»i là\n" +"« Bá»™ lá»c Æ°u tiên » mà chuyển đổi các Æ°u tiên Netscape\n" +"sang Ä‘iểm Evolution, để các bá»™ lá»c kiểu đó sẽ dùng Ä‘iểm\n" +"thay vào Æ°u tiên. Hãy kiểm tra xem má»—i bá»™ lá»c đã nhập\n" +"vẫn còn hoạt Ä‘á»™ng cho đúng." + +#: ../mail/importers/netscape-importer.c:687 +#: ../mail/importers/netscape-importer.c:686 +msgid "" +"Some of your Netscape email filters use\n" +"the \"Ignore Thread\" or \"Watch Thread\"\n" +"feature, which is not supported in Evolution.\n" +"These filters will be dropped." +msgstr "" +"Má»™t số bá»™ lá»c Netscape của bạn dùng tín năng\n" +"« Bá» qua mạch » hay « Theo dõi mạch » mà trình\n" +"Evolution không há»— trợ. Sẽ bá» qua những bá»™ lá»c này." + +#: ../mail/importers/netscape-importer.c:704 +#: ../mail/importers/netscape-importer.c:703 +msgid "" +"Some of your Netscape email filters test the\n" +"body of emails for (in)equality to a given string,\n" +"which is not supported in Evolution. Those filters\n" +"were modified to test whether that string is or is not\n" +"contained in the message body." +msgstr "" +"Má»™t số bá»™ lá»c Netscape của bạn kiểm tra\n" +"nếu thân thÆ° khá»›p (hay không khá»›p) má»™t chuá»—i đã cho,\n" +"mà trình Evolution không há»— trợ. Äã sá»­a đổi\n" +"các bá»™ lá»c đó để kiểm tra nếu thân thÆ°\n" +"chứa chuá»—i đó hay không." + +#: ../mail/importers/netscape-importer.c:1251 +#: ../mail/importers/netscape-importer.c:1250 +msgid "Evolution is importing your old Netscape data" +msgstr "Evolution Ä‘ang nhập các dữ liệu cÅ© từ Netscape" + +#: ../mail/importers/netscape-importer.c:1708 +#: ../mail/importers/netscape-importer.c:1707 +msgid "Importing Netscape data" +msgstr "Äang nhập dữ liệu Netscape" + +#. #-#-#-#-# silky-0.5.3pre1.vi.po (silky-0.5.3pre1) #-#-#-#-# +#. I18N This is a window title +#: ../plug-ins/common/mosaic.c:722 ../glade/pyblio.glade.in.h:14 +#: src/settings.c:652 src/settings.c:657 prefs_gui.c:366 src/gui.c:362 +#: po/silky-channel.glade.h:23 +msgid "Settings" +msgstr "Thiết lập" + +#: ../mail/importers/netscape-importer.c:1913 +#: ../mail/importers/netscape-importer.c:1912 +msgid "Mail Filters" +msgstr "Bá»™ lá»c thÆ°" + +#: ../mail/importers/netscape-importer.c:1934 +#: ../mail/importers/netscape-importer.c:1933 +msgid "" +"Evolution has found Netscape mail files.\n" +"Would you like them to be imported into Evolution?" +msgstr "" +"Evolution đã tìm thấy những tập tin thÆ° dạng Netscape.\n" +"Bạn có muốn nhập chúng vào Evolution không?" + +#: ../mail/importers/pine-importer.c:229 ../mail/importers/pine-importer.c:230 +msgid "Importing Pine data" +msgstr "Äang nhập dữ liệu Pine" + +#: ../mail/importers/pine-importer.c:433 ../mail/importers/pine-importer.c:434 +msgid "Evolution Pine importer" +msgstr "Bá»™ nhập Pine vào Evolution" + +#: ../mail/importers/pine-importer.c:434 ../mail/importers/pine-importer.c:435 +msgid "Import mail from Pine." +msgstr "Nhập thÆ° từ Pine" + +#: ../mail/mail-autofilter.c:79 ../mail/mail-autofilter.c:78 +#, c-format +msgid "Mail to %s" +msgstr "Gởi thÆ° chÆ¡ « %s »" + +#: ../mail/mail-autofilter.c:243 ../mail/mail-autofilter.c:282 +#: ../mail/mail-autofilter.c:242 ../mail/mail-autofilter.c:281 +#, c-format +msgid "Mail from %s" +msgstr "Thừ từ « %s »" + +#: ../mail/mail-autofilter.c:266 ../mail/mail-autofilter.c:265 +#, c-format +msgid "Subject is %s" +msgstr "Chủ để là « %s »" + +#: ../mail/mail-autofilter.c:301 ../mail/mail-autofilter.c:300 +#, c-format +msgid "%s mailing list" +msgstr "Há»™p thÆ° chung « %s »" + +#: ../mail/mail-autofilter.c:372 ../mail/mail-autofilter.c:369 +msgid "Add Filter Rule" +msgstr "Thêm quy tắc lá»c" + +#: ../mail/mail-component.c:510 ../mail/mail-component.c:508 +#, c-format +msgid "%d deleted" +msgid_plural "%d deleted" +msgstr[0] "%d bị xoá bá»" + +#: ../mail/mail-component.c:512 ../mail/mail-component.c:510 +#, c-format +msgid "%d junk" +msgid_plural "%d junk" +msgstr[0] "%d rác" + +#: ../mail/mail-component.c:535 ../mail/mail-component.c:533 +#, c-format +msgid "%d draft" +msgid_plural "%d draft" +msgstr[0] "%d nháp" + +#: ../mail/mail-component.c:537 ../mail/mail-component.c:535 +#, c-format +msgid "%d sent" +msgid_plural "%d sent" +msgstr[0] "%d đã gởi" + +#: ../mail/mail-component.c:539 ../mail/mail-component.c:537 +#, c-format +msgid "%d unsent" +msgid_plural "%d unsent" +msgstr[0] "%d chÆ°a gởi" + +#: ../mail/mail-component.c:543 ../mail/mail-component.c:541 +#, c-format +msgid "%d total" +msgid_plural "%d total" +msgstr[0] "%d tổng cá»™ng" + +#: ../mail/mail-component.c:545 ../mail/mail-component.c:543 +#, c-format +msgid ", %d unread" +msgid_plural ", %d unread" +msgstr[0] ", %d chÆ°a Ä‘á»c" + +#: ../mail/mail-component.c:766 ../mail/mail-component.c:764 +msgid "New Mail Message" +msgstr "ThÆ° má»›i" + +#: ../mail/mail-component.c:767 ../mail/mail-component.c:765 +msgid "_Mail Message" +msgstr "_ThÆ°" + +#: ../mail/mail-component.c:768 ../mail/mail-component.c:766 +msgid "Compose a new mail message" +msgstr "Biên soạn thÆ° má»›i" + +#: ../mail/mail-component.c:774 ../mail/mail-component.c:772 +msgid "New Mail Folder" +msgstr "Há»™p thÆ° má»›i" + +#: ../mail/mail-component.c:775 ../mail/mail-component.c:773 +msgid "Mail _Folder" +msgstr "_Há»™p thÆ°" + +#: ../mail/mail-component.c:776 ../mail/mail-component.c:774 +msgid "Create a new mail folder" +msgstr "Tạo há»™p thÆ° má»›i" + +#: ../mail/mail-component.c:920 ../mail/mail-component.c:918 +msgid "Failed upgrading Mail settings or folders." +msgstr "Không cập nhật thiết lập hay thÆ° mục ThÆ° được." + +#: ../mail/mail-config.glade.h:2 +msgid " Ch_eck for Supported Types " +msgstr "_Kiểm tra kiểu được há»— trợ " + +#: ../mail/mail-config.glade.h:4 +msgid "SSL is not supported in this build of Evolution" +msgstr "Phiên bản Evolution Ä‘ang dùng không há»— trợ SSL" + +#: ../mail/mail-config.glade.h:5 +msgid "Sig_natures" +msgstr "Chữ _ký" + +#: ../mail/mail-config.glade.h:6 +msgid "_Languages" +msgstr "_Ngôn ngữ " + +#: ../mail/mail-config.glade.h:7 +msgid "This will make the the filter more reliable, but slower" +msgstr "Việc này giúp bá»™ lá»c đáng tin hÆ¡n, nhÆ°ng chậm hÆ¡n" + +#: ../mail/mail-config.glade.h:8 +msgid "Account Information" +msgstr "Thông tin tài khoản" + +#: ../mail/mail-config.glade.h:10 +msgid "Authentication Type" +msgstr "Kiểu xác thá»±c" + +#: ../mail/mail-config.glade.h:11 +msgid "Authentication" +msgstr "Xác thá»±c" + +#: ../mail/mail-config.glade.h:12 +msgid "Composing Messages" +msgstr "Soạn thÆ°" + +#: ../mail/mail-config.glade.h:13 +msgid "Configuration" +msgstr "Cấu hình" + +#: ../mail/mail-config.glade.h:14 +msgid "Default Behavior" +msgstr "Hành vi mặc định" + +#: ../mail/mail-config.glade.h:15 +msgid "Delete Mail" +msgstr "Xoá bá» thÆ°" + +#: ../mail/mail-config.glade.h:16 +msgid "Displayed Mail _Headers" +msgstr "_Dòng đầu thÆ° được hiển thị" + +#: ../mail/mail-config.glade.h:17 +msgid "Filter Options" +msgstr "Tùy chá»n bá»™ lá»c" + +#: ../mail/mail-config.glade.h:19 +msgid "Labels and Colors" +msgstr "Nhãn và màu" + +#: ../mail/mail-config.glade.h:20 +msgid "Loading Images" +msgstr "Tải ảnh" + +#: ../mail/mail-config.glade.h:21 +msgid "Message Display" +msgstr "Hiển thị thÆ°" + +#: ../mail/mail-config.glade.h:22 +msgid "Message Fonts" +msgstr "Phông chữ thÆ°" + +#: ../mail/mail-config.glade.h:23 +msgid "Message Receipts" +msgstr "Thông báo đã Ä‘á»c thÆ°" + +#: ../mail/mail-config.glade.h:24 +msgid "New Mail Notification" +msgstr "Thông báo nhận thÆ° má»›i" + +#: ../mail/mail-config.glade.h:25 +msgid "Optional Information" +msgstr "Thông tin tùy chá»n" + +#: ../mail/mail-config.glade.h:26 +msgid "Options" +msgstr "Tùy chá»n" + +#: ../mail/mail-config.glade.h:27 +msgid "Pretty Good Privacy (PGP/GPG)" +msgstr "Riêng tÆ° hÆ¡i tốt (PGP/GPG)" + +#: ../mail/mail-config.glade.h:28 +msgid "Printed Fonts" +msgstr "Phông chữ in" + +#: ../mail/mail-config.glade.h:29 +msgid "Required Information" +msgstr "Thông tin bắt buá»™c" + +#: ../mail/mail-config.glade.h:30 +msgid "Secure MIME (S/MIME)" +msgstr "MIME an toàn (S/MIME)" + +#: ../mail/mail-config.glade.h:31 +msgid "Security" +msgstr "Bảo mật" + +#: ../mail/mail-config.glade.h:32 +msgid "Sent and Draft Messages" +msgstr "ThÆ° đã gởi và thÆ° nháp" + +#: ../mail/mail-config.glade.h:33 +msgid "Server Configuration" +msgstr "Cấu hình máy phục vụ" + +#: ../mail/mail-config.glade.h:35 +msgid "Account Management" +msgstr "Quản lý tài khoản" + +#: ../mail/mail-config.glade.h:36 +msgid "Add Ne_w Signature..." +msgstr "_Thêm chữ ký má»›i..." + +#: ../mail/mail-config.glade.h:37 +msgid "Add _Script" +msgstr "Thêm tập _lệnh" + +#: ../mail/mail-config.glade.h:38 +msgid "Al_ways sign outgoing messages when using this account" +msgstr "_Luôn ký tên lên các thÆ° cần gởi Ä‘i khi dùng tài khoản này" + +#: ../mail/mail-config.glade.h:39 +msgid "Also encrypt to sel_f when sending encrypted mail" +msgstr "_CÅ©ng tá»± mật mã khi gởi thÆ° mật mã" + +#: ../mail/mail-config.glade.h:40 +msgid "Alway_s carbon-copy (cc) to:" +msgstr "Luôn _Chép Cho (Cc) tá»›i:" + +#: ../mail/mail-config.glade.h:41 +msgid "Always _blind carbon-copy (bcc) to:" +msgstr "Luôn _Bí mật Chép Cho (Bcc) tá»›i:" + +#: ../mail/mail-config.glade.h:42 +msgid "Always _trust keys in my keyring when encrypting" +msgstr "Luôn _tin khoá trong dây khoá tôi khi mật mã hóa" + +#: ../mail/mail-config.glade.h:43 +msgid "Always encrypt to _myself when sending encrypted mail" +msgstr "_Luôn tá»± mật mã khi gởi thÆ° được mật mã" + +#: ../mail/mail-config.glade.h:44 +msgid "Attach original message" +msgstr "Äính kèm thÆ° gốc" + +#: ../mail/mail-config.glade.h:46 +msgid "Automatically insert _smiley images" +msgstr "Tá»± Ä‘á»™ng chèn _biểu tượng xúc cảm" + +#: ../mail/mail-config.glade.h:47 +msgid "Baltic (ISO-8859-13)" +msgstr "Ban-tích (ISO-8859-13)" + +#: ../mail/mail-config.glade.h:48 +msgid "Baltic (ISO-8859-4)" +msgstr "Ban-tích (ISO-8859-4)" + +#: ../mail/mail-config.glade.h:49 +msgid "Beep w_hen new mail arrives" +msgstr "_Kêu bíp khi nhận thÆ° má»›i" + +#: ../mail/mail-config.glade.h:50 +msgid "C_haracter set:" +msgstr "Bá»™ _ký tá»± :" + +#: ../mail/mail-config.glade.h:51 +msgid "Ch_eck for Supported Types " +msgstr "_Kiểm tra kiểu được há»— trợ " + +#: ../mail/mail-config.glade.h:52 +msgid "Check in_coming mail for junk" +msgstr "Kiểm tra thÆ° má»›i _đến là rác" + +#: ../mail/mail-config.glade.h:53 +msgid "Check spelling while I _type" +msgstr "Kiểm tra chính tả khi Ä‘ang _gõ" + +#: ../mail/mail-config.glade.h:54 +msgid "Checks incoming mail messages to be Junk" +msgstr "Kiểm tra nếu thÆ° đã gởi đến là thÆ° Rác" + +#: ../mail/mail-config.glade.h:55 +msgid "Cle_ar" +msgstr "_Xoá" + +#: ../mail/mail-config.glade.h:56 +msgid "Clea_r" +msgstr "Xó_a" + +#: ../mail/mail-config.glade.h:57 +msgid "Color for _misspelled words:" +msgstr "Màu từ gõ _sai:" + +#: ../mail/mail-config.glade.h:59 +msgid "Confirm _when expunging a folder" +msgstr "_Xác nhận khi xoá hẳn thÆ° mục" + +#: ../mail/mail-config.glade.h:60 +msgid "" +"Congratulations, your mail configuration is complete.\n" +"\n" +"You are now ready to send and receive email \n" +"using Evolution. \n" +"\n" +"Click \"Apply\" to save your settings." +msgstr "" +"Xin chúc mừng, bạn đã hoàn tất quá trình cấu hình thÆ°.\n" +"\n" +"Từ bây giá» bạn có thể gởi và nhận thÆ° bằng Evolution.\n" +"\n" +"Hãu nhấn « Ãp dụng » để lÆ°u các thiết lập." + +#: ../mail/mail-config.glade.h:66 +msgid "De_fault" +msgstr "_Mặc định" + +#: ../mail/mail-config.glade.h:67 +msgid "Default character e_ncoding:" +msgstr "Bá»™ _ký tá»± mặc định:" + +#: ../mail/mail-config.glade.h:70 +msgid "Digitally _sign outgoing messages (by default)" +msgstr "_Luôn ký số lên các thÆ° cần gởi Ä‘i (theo mặc định)" + +#: ../mail/mail-config.glade.h:71 +msgid "Do not quote original message" +msgstr "Không trích dẫn thÆ° gốc" + +#: ../mail/mail-config.glade.h:73 +msgid "Drafts _Folder:" +msgstr "ThÆ° mục _Nháp:" + +#: ../mail/mail-config.glade.h:75 +msgid "Email Accounts" +msgstr "Tài khoản thÆ°" + +#: ../mail/mail-config.glade.h:76 ../pan/dialogs/dialog-newuser.c:273 +msgid "Email _Address:" +msgstr "_Äịa chỉ thÆ° :" + +#: ../mail/mail-config.glade.h:77 +msgid "Empty trash folders on e_xit" +msgstr "_Äổ các thÆ° mục Rác khi thoát" + +#: ../mail/mail-config.glade.h:78 +msgid "Encry_ption certificate:" +msgstr "Chứng nhận _mật mã:" + +#: ../mail/mail-config.glade.h:79 +msgid "Encrypt out_going messages (by default)" +msgstr "_Mật mã má»i thÆ° cần gởi Ä‘i (theo mặc định)" + +#: ../mail/mail-config.glade.h:81 ../mail/mail-config.glade.h:82 +msgid "Fi_xed-width:" +msgstr "Äá»™ rá»™ng cố _định:" + +#: ../mail/mail-config.glade.h:82 ../mail/mail-config.glade.h:83 +msgid "Font Properties" +msgstr "Thuá»™c tính phông chữ" + +#: ../mail/mail-config.glade.h:83 ../mail/mail-config.glade.h:84 +msgid "Format messages in _HTML" +msgstr "Äịnh dạng thÆ° bằng _HTML" + +#: ../mail/mail-config.glade.h:85 ../mail/mail-config.glade.h:86 +msgid "HTML Mail" +msgstr "ThÆ° HTML" + +#: ../mail/mail-config.glade.h:86 ../mail/mail-config.glade.h:87 +#: ../pan/gui-notebook.c:56 ../pan/prefs.c:1623 +msgid "Headers" +msgstr "Dòng đầu" + +#: ../mail/mail-config.glade.h:87 ../mail/mail-config.glade.h:88 +msgid "Highlight _quotations with" +msgstr "Tô sang _trích dẫn bằng" + +#: ../mail/mail-config.glade.h:88 ../mail/mail-config.glade.h:89 +msgid "I_nclude remote tests" +msgstr "CÅ©_ng thá»­ từ xa" + +#: ../mail/mail-config.glade.h:93 ../mail/mail-config.glade.h:94 +msgid "Languages Table" +msgstr "Bảng ngôn ngữ" + +#: ../mail/mail-config.glade.h:95 ../src/united-states-of-bug-buddy.c:59 +#: ../mail/mail-config.glade.h:96 +msgid "Mail Configuration" +msgstr "Cấu hình thÆ°" + +#: ../mail/mail-config.glade.h:96 ../mail/mail-config.glade.h:97 +msgid "Mail Headers Table" +msgstr "Bảng dòng đầu thÆ°" + +#: ../mail/mail-config.glade.h:98 ../mail/mail-config.glade.h:99 +msgid "Mailbox location" +msgstr "Äịa Ä‘iểm há»™p thÆ°" + +#: ../mail/mail-config.glade.h:99 ../mail/mail-config.glade.h:100 +msgid "Message Composer" +msgstr "Bá»™ soạn thảo thÆ°" + +#: ../mail/mail-config.glade.h:100 ../mail/mail-config.glade.h:101 +msgid "" +"Note: you will not be prompted for a password until you connect for the " +"first time" +msgstr "Ghi chú : sẽ không nhắc bạn nhập mật khẩu tá»›i khi kết nối lần đầu." + +#: ../mail/mail-config.glade.h:101 ../mail/mail-config.glade.h:102 +msgid "Or_ganization:" +msgstr "Tổ _chức:" + +#: ../mail/mail-config.glade.h:102 ../mail/mail-config.glade.h:103 +msgid "PGP/GPG _Key ID:" +msgstr "ID _khoá PGP/GPG:" + +#: ../mail/mail-config.glade.h:105 ../mail/mail-config.glade.h:106 +msgid "Play sound file when new mail arri_ves" +msgstr "_Phát tập tin âm thanh khi nhận thÆ° má»›i" + +#: ../mail/mail-config.glade.h:106 ../mail/mail-config.glade.h:107 +msgid "" +"Please enter a descriptive name for this account in the space below.\n" +"This name will be used for display purposes only." +msgstr "" +"Hãy nhập má»™t tên diá»…n tả cho tài khoản này vào trÆ°á»ng bên dÆ°á»›i.\n" +"Chỉ được dùng tên này vá»›i mục đích hiển thị thôi." + +#: ../mail/mail-config.glade.h:108 ../mail/mail-config.glade.h:109 +msgid "" +"Please enter information about the way you will send mail. If you are not " +"sure, ask your system administrator or Internet Service Provider." +msgstr "" +"Hãy nhập thông tin vá» cách bạn sẽ gởi thÆ°. Nếu bạn không chắc, hãy há»i quản " +"trị hệ thống hoặc ISP (nhà cung cấp dịch vụ Mạng) của bạn." + +#: ../mail/mail-config.glade.h:109 ../mail/mail-config.glade.h:110 +msgid "" +"Please enter your name and email address below. The \"optional\" fields " +"below do not need to be filled in, unless you wish to include this " +"information in email you send." +msgstr "" +"Vui lòng nhập tên và địa chỉ thÆ° Ä‘iện từ vào bên dÆ°á»›i. TrÆ°á»ng « tùy chá»n » " +"bên dÆ°á»›i không cần phải được chá»n, trừ khi bạn muốn gồm thông tin đó vào thÆ° " +"bạn cần gởi." + +#: ../mail/mail-config.glade.h:110 ../mail/mail-config.glade.h:111 +msgid "Please select among the following options" +msgstr "Hãy chá»n từ các tuỳ chá»n sau" + +#: ../mail/mail-config.glade.h:111 ../mail/mail-config.glade.h:112 +msgid "Pr_ompt when sending messages with only Bcc recipients defined" +msgstr "_Nhắc khi gởi thÆ° mà chỉ có ngÆ°á»i nhận _Bí mật Chép Cho (Bcc)" + +#: ../mail/mail-config.glade.h:112 ../mail/mail-config.glade.h:113 +msgid "Quote original message" +msgstr "Trích dẫn thÆ° gốc" + +#: ../mail/mail-config.glade.h:114 ../mail/mail-config.glade.h:115 +msgid "Re_member password" +msgstr "_Nhá»› mật khẩu" + +#: ../mail/mail-config.glade.h:115 ../mail/mail-config.glade.h:116 +msgid "Re_ply-To:" +msgstr "T_rả lá»i cho:" + +#: ../mail/mail-config.glade.h:117 ../mail/mail-config.glade.h:118 +msgid "Remember _password" +msgstr "_Nhá»› mật khẩu" + +#: ../mail/mail-config.glade.h:118 ../mail/mail-config.glade.h:119 +msgid "S_elect..." +msgstr "_Chá»n..." + +#: ../mail/mail-config.glade.h:119 ../mail/mail-config.glade.h:120 +msgid "S_tandard Font:" +msgstr "Phông chữ _chuẩn:" + +#: ../mail/mail-config.glade.h:120 ../mail/mail-config.glade.h:121 +msgid "Se_lect..." +msgstr "C_há»n..." + +#: ../mail/mail-config.glade.h:122 ../mail/mail-config.glade.h:123 +msgid "Select HTML fixed width font" +msgstr "Chá»n phông chữ HTML Ä‘á»™ rá»™ng cứng" + +#: ../mail/mail-config.glade.h:123 ../mail/mail-config.glade.h:124 +msgid "Select HTML fixed width font for printing" +msgstr "Chá»n phông chữ HTML Ä‘á»™ rá»™ng cứng để in" + +#: ../mail/mail-config.glade.h:124 ../mail/mail-config.glade.h:125 +msgid "Select HTML variable width font" +msgstr "Chá»n phông chữ Ä‘á»™ rá»™ng biến HTML" + +#: ../mail/mail-config.glade.h:125 ../mail/mail-config.glade.h:126 +msgid "Select HTML variable width font for printing" +msgstr "Chá»n phông HTML Ä‘á»™ rá»™ng thay đổi để in" + +#: ../mail/mail-config.glade.h:126 ../gok/gok-page-feedbacks.c:875 +msgid "Select sound file" +msgstr "Chá»n tập tin âm thanh" + +#: ../mail/mail-config.glade.h:127 +msgid "Send message receipts:" +msgstr "Gởi thông báo đã Ä‘á»c thÆ° :" + +#: ../mail/mail-config.glade.h:129 ../pan/prefs.c:1888 +msgid "Sending Mail" +msgstr "Gởi thÆ°" + +#: ../mail/mail-config.glade.h:130 +msgid "Sent _Messages Folder:" +msgstr "ThÆ° mục thÆ° Äã _gởi:" + +#: ../mail/mail-config.glade.h:131 +msgid "Ser_ver requires authentication" +msgstr "Máy phục vụ cần thiết _xác thá»±c" + +#: ../mail/mail-config.glade.h:132 +msgid "Server _Type: " +msgstr "_Kiểu máy phục vụ :" + +#: ../mail/mail-config.glade.h:133 +msgid "Sig_ning certificate:" +msgstr "Chứng nhận _ký tên:" + +#: ../mail/mail-config.glade.h:134 +msgid "Signat_ure:" +msgstr "Chữ _ký:" + +#: ../mail/mail-config.glade.h:135 +msgid "Signatures" +msgstr "Chữ ký" + +#: ../mail/mail-config.glade.h:136 +msgid "Signatures Table" +msgstr "Bảng chữ ký" + +#: ../mail/mail-config.glade.h:137 +msgid "Specify _filename:" +msgstr "Xác định _tên tập tin:" + +#: ../mail/mail-config.glade.h:138 +msgid "Spell Checking" +msgstr "Kiểm tra chính tả" + +#: ../mail/mail-config.glade.h:139 +msgid "T_erminal Font:" +msgstr "Phông chữ _thiết bị cuối:" + +#: ../mail/mail-config.glade.h:140 +msgid "T_ype: " +msgstr "_Kiểu : " + +#: ../mail/mail-config.glade.h:141 +msgid "" +"The list of languages here reflects only the languages for which you have a " +"dictionary installed." +msgstr "" +"Danh sách ngôn ngữ phản ánh chỉ những ngôn ngữ mà bạn có cài đặt từ Ä‘iển." + +#: ../mail/mail-config.glade.h:142 +msgid "" +"The output of this script will be used as your\n" +"signature. The name you specify will be used\n" +"for display purposes only. " +msgstr "" +"Äầu ra của tập lệnh này sẽ được dùng\n" +"nhÆ° là chữ ký bạn.\n" +"Tên bạn xác định sẽ chỉ được dùng\n" +"cho mục đích hiển thị." + +#: ../mail/mail-config.glade.h:146 +msgid "" +"Type the name by which you would like to refer to this account.\n" +"For example: \"Work\" or \"Personal\"" +msgstr "" +"Hãy gõ tên mà bạn muốn dùng cho tài khoản này.\n" +"Ví dụ : « Chá»— làm » hoặc « Ở nhà »." + +#: ../mail/mail-config.glade.h:148 +msgid "User_name:" +msgstr "T_ên ngÆ°á»i dùng:" + +#: ../mail/mail-config.glade.h:149 +msgid "V_ariable-width:" +msgstr "Äá»™ rá»™ng th_ay đổi:" + +#: ../mail/mail-config.glade.h:150 +msgid "" +"Welcome to the Evolution Mail Configuration Assistant.\n" +"\n" +"Click \"Forward\" to begin. " +msgstr "" +"Chào mừng dùng Phụ tá cấu hình thÆ° Evolution.\n" +"\n" +"Hãy nhấn « Tiếp » để bắt đầu." + +#: ../mail/mail-config.glade.h:154 +msgid "_Add Signature" +msgstr "Thêm chữ _ký" + +#: ../mail/mail-config.glade.h:155 +msgid "_Always load images from the Internet" +msgstr "_Luôn tải ảnh xuống Mạng (không đệ nghị)" + +#: ../mail/mail-config.glade.h:156 +msgid "_Do not notify me when new mail arrives" +msgstr "Äừn_g thông báo tôi khi nhận thÆ° má»›i" + +#: ../mail/mail-config.glade.h:157 +msgid "_Don't sign meeting requests (for Outlook compatibility)" +msgstr "_Không ký tên yêu cầu há»p (để tÆ°Æ¡ng thích vá»›i trình Outlook)" + +#: ../mail/mail-config.glade.h:159 +msgid "_Forward style:" +msgstr "Kiểu dáng _chuyển tiếp:" + +#: ../mail/mail-config.glade.h:161 +msgid "_Load images in mail from contacts" +msgstr "_Tải ảnh trong thÆ° từ liên lạc" + +#: ../mail/mail-config.glade.h:162 +msgid "_Make this my default account" +msgstr "Chá»n làm tài khoản này _mặc định" + +#: ../mail/mail-config.glade.h:163 +msgid "_Mark messages as read after" +msgstr "Äánh dấu thÆ° đã Ä‘á»_c sau" + +#: ../mail/mail-config.glade.h:165 +msgid "_Never load images from the Internet" +msgstr "_Không bao giá» tải ảnh từ Mạng" + +#: ../mail/mail-config.glade.h:166 +msgid "_Path:" +msgstr "ÄÆ°á»ng _dẫn:" + +#: ../mail/mail-config.glade.h:167 +msgid "_Prompt when sending HTML messages to contacts that don't want them" +msgstr "_Nhắc khi gởi thÆ° HTML cho các liên lạc không muốn nhận HTML" + +#: ../mail/mail-config.glade.h:168 +msgid "_Prompt when sending messages with an empty subject line" +msgstr "_Nhắc khi gởi thÆ° không có chủ Ä‘á»" + +#: ../mail/mail-config.glade.h:169 +msgid "_Reply style:" +msgstr "_Kiểu dáng trả lá»i:" + +#: ../mail/mail-config.glade.h:170 +msgid "_Script:" +msgstr "Tập _lệnh:" + +#: ../mail/mail-config.glade.h:172 +msgid "_Show animated images" +msgstr "_Hiện hoạt cảnh" + +#: ../mail/mail-config.glade.h:173 +msgid "_Use Secure Connection:" +msgstr "Dùng kết nối _an toàn:" + +#: ../mail/mail-config.glade.h:174 +msgid "_Use the same fonts as other applications" +msgstr "_Dùng cùng những phông chữ vá»›i các ứng dụng khác" + +#: ../mail/mail-config.glade.h:175 +msgid "color" +msgstr "màu" + +#: ../providers/evolution/gda-calendar-model.c:68 +msgid "description" +msgstr "mô tả" + +#: ../mail/mail-dialogs.glade.h:2 +msgid "Search Folder Sources" +msgstr "Nguồn thÆ° mục tìm kiếm" + +#: ../mail/mail-dialogs.glade.h:3 +msgid "Digital Signature" +msgstr "Chữ ký số" + +#: ../mail/mail-dialogs.glade.h:4 +msgid "Encryption" +msgstr "Mật mã" + +#: ../mail/mail-dialogs.glade.h:5 ../gnomecard/gnomecard.glade.h:2 +msgid "Case _sensitive" +msgstr "_Phân biệt hoa/thÆ°á»ng" + +#: ../mail/mail-dialogs.glade.h:6 ../mail/message-tags.glade.h:2 +msgid "Co_mpleted" +msgstr "_Hoàn tất" + +#: ../mail/mail-dialogs.glade.h:8 ../shell/eggfindbar.c:300 +#: ../pan/grouplist.c:1033 +msgid "F_ind:" +msgstr "_Tìm:" + +#: ../mail/mail-dialogs.glade.h:9 +msgid "Find in Message" +msgstr "Tìm trong thÆ°" + +#: ../mail/mail-dialogs.glade.h:10 ../mail/message-tag-followup.c:297 +#: ../mail/message-tags.glade.h:3 ../mail/message-tag-followup.c:295 +msgid "Flag to Follow Up" +msgstr "Äặt cỠđể theo dõi tiếp" + +#: ../mail/mail-dialogs.glade.h:11 +msgid "Folder Subscriptions" +msgstr "Äăng ký thÆ° mục" + +#: ../mail/mail-dialogs.glade.h:12 +msgid "License Agreement" +msgstr "Äiá»u kiện Quyá»n" + +#: ../mail/mail-dialogs.glade.h:13 +msgid "None Selected" +msgstr "ChÆ°a chá»n" + +#: ../mail/mail-dialogs.glade.h:14 +msgid "S_erver:" +msgstr "_Máy phục vụ :" + +#: ../mail/mail-dialogs.glade.h:15 +msgid "Security Information" +msgstr "Thông tin bảo mật" + +#: ../mail/mail-dialogs.glade.h:17 ../mail/message-tags.glade.h:4 +msgid "" +"The messages you have selected for follow up are listed below.\n" +"Please select a follow up action from the \"Flag\" menu." +msgstr "" +"Các thÆ° mà bạn đã chá»n để theo dõi tiếp thì được liệt kê bên dÆ°á»›i.\n" +"Hãy chá»n má»™t hành Ä‘á»™ng theo dõi tiếp từ trình Ä‘Æ¡n « CỠ»." + +#: ../mail/mail-dialogs.glade.h:19 +msgid "_Accept License" +msgstr "_Chấp nhận các Ä‘iá»u kiện này" + +#: ../mail/mail-dialogs.glade.h:20 ../mail/message-tags.glade.h:6 +msgid "_Due By:" +msgstr "Äến _hạn:" + +#: ../mail/mail-dialogs.glade.h:21 ../mail/message-tags.glade.h:7 +msgid "_Flag:" +msgstr "_Cá» :" + +#: ../mail/mail-dialogs.glade.h:23 +msgid "_Tick this to accept the license agreement" +msgstr "" +"Hãy _Äánh dấu trong há»™p chá»n này để chấp nhận các Ä‘iá»u kiện quyá»n phép." + +#: ../mail/mail-dialogs.glade.h:25 +msgid "specific folders only" +msgstr "chỉ những thÆ° mục dứt khoát thôi" + +#: ../mail/mail-dialogs.glade.h:26 +msgid "with all active remote folders" +msgstr "vá»›i má»i thÆ° mục hoạt Ä‘á»™ng từ xa" + +#: ../mail/mail-dialogs.glade.h:27 +msgid "with all local and active remote folders" +msgstr "vá»›i má»i thÆ° mục hoạt Ä‘á»™ng từ xa và cục bá»™ Ä‘á»u" + +#: ../mail/mail-dialogs.glade.h:28 +msgid "with all local folders" +msgstr "vá»›i má»i thÆ° mục cục bá»™" + +#: ../mail/mail-folder-cache.c:860 ../mail/mail-folder-cache.c:853 +#, c-format +msgid "Pinging %s" +msgstr "Äang « ping » %s..." + +#: ../mail/mail-ops.c:103 +msgid "Filtering Folder" +msgstr "Äang lá»c thÆ° mục..." + +#: ../mail/mail-ops.c:264 ../mail/mail-ops.c:263 +msgid "Fetching Mail" +msgstr "Äang lấy thÆ°..." + +#. sending mail, filtering failed +#: ../mail/mail-ops.c:564 ../mail/mail-ops.c:563 +#, c-format +msgid "Failed to apply outgoing filters: %s" +msgstr "Không áp dụng bá»™ lá»c gởi Ä‘i được: %s" + +#: ../mail/mail-ops.c:576 ../mail/mail-ops.c:605 ../mail/mail-ops.c:575 +#: ../mail/mail-ops.c:604 +#, c-format +msgid "" +"Failed to append to %s: %s\n" +"Appending to local `Sent' folder instead." +msgstr "" +"Lá»—i phụ thêm vào %s: %s\n" +"Thì phụ thêm vào thÆ° mục « Äã gởi » thay vào đó." + +#: ../mail/mail-ops.c:622 ../mail/mail-ops.c:621 +#, c-format +msgid "Failed to append to local `Sent' folder: %s" +msgstr "Lá»—i thêm vào thÆ° mục « Äã gởi » cục bá»™ : %s" + +#: ../mail/mail-ops.c:734 ../mail/mail-ops.c:733 +#, c-format +msgid "Sending message %d of %d" +msgstr "Äang gởi thÆ° %d trên %d..." + +#: ../mail/mail-ops.c:759 ../mail/mail-ops.c:758 +#, c-format +msgid "Failed to send %d of %d messages" +msgstr "Việc gởi %d trên %d thÆ° bị lá»—i." + +#: ../mail/mail-ops.c:761 ../mail/mail-send-recv.c:613 ../mail/mail-ops.c:760 +#: ../camel/camel-gpg-context.c:803 ../camel/camel-gpg-context.c:1000 +#: ../camel/providers/nntp/camel-nntp-store.c:1276 +msgid "Cancelled." +msgstr "Bị thôi" + +#: ../mail/mail-ops.c:763 ../mail/mail-ops.c:762 +msgid "Complete." +msgstr "Hoàn tất." + +#: ../mail/mail-ops.c:860 ../mail/mail-ops.c:859 +msgid "Saving message to folder" +msgstr "Äang lÆ°u thÆ° vào thÆ° mục..." + +#: ../mail/mail-ops.c:945 ../mail/mail-ops.c:944 +#, c-format +msgid "Moving messages to %s" +msgstr "Äang chuyển thÆ° tá»›i %s..." + +#: ../mail/mail-ops.c:945 ../mail/mail-ops.c:944 +#, c-format +msgid "Copying messages to %s" +msgstr "Äang sao chép thÆ° vào « %s »" + +#: ../mail/mail-ops.c:1168 ../mail/mail-ops.c:1167 +msgid "Forwarded messages" +msgstr "ThÆ° đã chuyển tiếp" + +#: ../mail/mail-ops.c:1211 ../mail/mail-ops.c:1210 +#, c-format +msgid "Opening folder %s" +msgstr "Äang mở thÆ° mục « %s »" + +#: ../mail/mail-ops.c:1283 ../mail/mail-ops.c:1282 +#, c-format +msgid "Opening store %s" +msgstr "Äang mở kho « %s »" + +#: ../mail/mail-ops.c:1361 ../mail/mail-ops.c:1360 +#, c-format +msgid "Removing folder %s" +msgstr "Äang gở bá» thÆ° mục « %s »" + +#: ../mail/mail-ops.c:1455 ../mail/mail-ops.c:1454 +#, c-format +msgid "Storing folder '%s'" +msgstr "Äang cất giữ thÆ° mục « %s »" + +#: ../mail/mail-ops.c:1520 ../mail/mail-ops.c:1519 +#, c-format +msgid "Expunging and storing account '%s'" +msgstr "Äang xoá hẳn và cất giữ tài khoản « %s »" + +#: ../mail/mail-ops.c:1521 ../mail/mail-ops.c:1520 +#, c-format +msgid "Storing account '%s'" +msgstr "Äang cất giữ tài khoản « %s »" + +#: ../mail/mail-ops.c:1576 +msgid "Refreshing folder" +msgstr "Äang cập nhật thÆ° mục" + +#: ../mail/mail-ops.c:1612 ../mail/mail-ops.c:1663 ../mail/mail-ops.c:1611 +#: ../mail/mail-ops.c:1662 +msgid "Expunging folder" +msgstr "Äang xoá hẳn thÆ° mục" + +#: ../mail/mail-ops.c:1660 ../mail/mail-ops.c:1659 +#, c-format +msgid "Emptying trash in '%s'" +msgstr "Äang đổ sá»t rác trong « %s »" + +#: ../mail/mail-ops.c:1661 ../mail/mail-ops.c:1660 +msgid "Local Folders" +msgstr "ThÆ° mục cục bá»™" + +#: ../mail/mail-ops.c:1744 ../mail/mail-ops.c:1743 +#, c-format +msgid "Retrieving message %s" +msgstr "Äang gá»i thÆ° « %s »" + +#: ../mail/mail-ops.c:1854 ../mail/mail-ops.c:1853 +#, c-format +msgid "Retrieving %d message" +msgid_plural "Retrieving %d message" +msgstr[0] "Äang gá»i %d thÆ°" + +#: ../mail/mail-ops.c:1940 ../mail/mail-ops.c:1939 +#, c-format +msgid "Saving %d message" +msgid_plural "Saving %d message" +msgstr[0] "Äang lÆ°u %d thÆ°" + +#: ../mail/mail-ops.c:1990 ../mail/mail-ops.c:1989 +#, c-format +msgid "" +"Unable to create output file: %s\n" +" %s" +msgstr "" +"Không thể tạo tập tin xuất: %s\n" +" %s" + +#: ../mail/mail-ops.c:2018 ../mail/mail-ops.c:2017 +#, c-format +msgid "" +"Error saving messages to: %s:\n" +" %s" +msgstr "" +"Gặp lá»—i khi lÆ°u thÆ° vào: %s:\n" +" %s" + +#: ../mail/mail-ops.c:2089 ../mail/mail-ops.c:2088 +msgid "Saving attachment" +msgstr "Äang lÆ°u đính kèm" + +#: ../mail/mail-ops.c:2101 ../mail/mail-ops.c:2100 +#, c-format +msgid "" +"Cannot create output file: %s:\n" +" %s" +msgstr "" +"Không thể tạo tập tin xuất: %s:\n" +" %s" + +#: ../mail/mail-ops.c:2111 ../mail/mail-ops.c:2110 +#, c-format +msgid "Could not write data: %s" +msgstr "Không thể ghi dữ liệu : %s" + +#: ../mail/mail-ops.c:2261 ../mail/mail-ops.c:2260 +#, c-format +msgid "Disconnecting from %s" +msgstr "Äang ngắt kết nối từ %s..." + +#: ../mail/mail-ops.c:2261 ../mail/mail-ops.c:2260 +#, c-format +msgid "Reconnecting to %s" +msgstr "Äang tái kết nối tá»›i %s..." + +#: ../mail/mail-ops.c:2377 ../mail/mail-ops.c:2376 +msgid "Checking Service" +msgstr "Äang kiểm tra dịch vụ..." + +#: ../mail/mail-send-recv.c:158 +msgid "Cancelling..." +msgstr "Äang hủy bá»..." + +#: ../mail/mail-send-recv.c:265 +#, c-format +msgid "Server: %s, Type: %s" +msgstr "" +"Máy phục vụ : %s\n" +"Kiểu : %s" + +#: ../mail/mail-send-recv.c:267 +#, c-format +msgid "Path: %s, Type: %s" +msgstr "" +"ÄÆ°á»ng dẫn: %s\n" +"Kiểu : %s" + +#: ../mail/mail-send-recv.c:269 tools/interface.c:1876 +#, c-format +msgid "Type: %s" +msgstr "Kiểu : %s" + +#: ../mail/mail-send-recv.c:320 +msgid "Send & Receive Mail" +msgstr "Gởi và Nhận ThÆ°" + +#: ../mail/mail-send-recv.c:327 +msgid "Cancel _All" +msgstr "Thôi _hết" + +#: ../mail/mail-send-recv.c:416 ../gtik/gtik.c:305 +#: ../gweather/gweather-applet.c:545 +msgid "Updating..." +msgstr "Äang cập nhật..." + +#: ../mail/mail-send-recv.c:416 ../mail/mail-send-recv.c:468 +msgid "Waiting..." +msgstr "Äang chá»..." + +#: ../mail/mail-send-recv.c:699 +#: ../camel/providers/groupwise/camel-groupwise-provider.c:51 +#: ../camel/providers/imap4/camel-imap4-provider.c:36 +msgid "Checking for new mail" +msgstr "Äang kiểm tra tìm thÆ° má»›i..." + +#: ../mail/mail-session.c:207 +#, c-format +msgid "Enter Password for %s" +msgstr "Nhập mật khẩu cho « %s »" + +#: ../mail/mail-session.c:206 ../mail/mail-session.c:209 +#: ../interfaces/common.glade.in.h:3 ../src/FlickrExport.cs:49 +msgid "Enter Password" +msgstr "Nhập mật khẩu" + +#: ../mail/mail-session.c:241 ../mail/mail-session.c:244 +msgid "User canceled operation." +msgstr "NgÆ°á»i dùng đã hủy bá» tác vụ." + +#: ../mail/mail-signature-editor.c:384 ../mail/mail-signature-editor.c:372 +msgid "Edit signature" +msgstr "Sá»­a đổi chữ ký" + +#: ../mail/mail-signature-editor.c:431 ../mail/mail-signature-editor.c:412 +msgid "Enter a name for this signature." +msgstr "Nhập tên cho chữ ký này." + +#: ../src/gtkfunc.c:269 +msgid "Name:" +msgstr "Tên:" + +#: ../mail/mail-tools.c:120 ../mail/mail-tools.c:116 +#, c-format +msgid "Could not create spool directory `%s': %s" +msgstr "Không thể tạo thÆ° mục ống chỉ « %s »: %s" + +#: ../mail/mail-tools.c:150 ../mail/mail-tools.c:143 +#, c-format +msgid "Trying to movemail a non-mbox source `%s'" +msgstr "Äang cố movemail (di chuyển thÆ°) má»™t nguồn không dạng mbox « %s »" + +#: ../mail/mail-tools.c:256 ../mail/mail-tools.c:242 +#, c-format +msgid "Forwarded message - %s" +msgstr "ThÆ° đã chuyển tiếp - %s" + +#: ../mail/mail-tools.c:258 ../mail/mail-tools.c:244 +msgid "Forwarded message" +msgstr "ThÆ° đã chuyển tiếp" + +#: ../mail/mail-tools.c:298 ../mail/mail-tools.c:284 +#, c-format +msgid "Invalid folder: `%s'" +msgstr "ThÆ° mục không hợp lệ: « %s »" + +#: ../mail/mail-vfolder.c:91 +#, c-format +msgid "Setting up Search Folder: %s" +msgstr "Äang thiết lập thÆ° mục tìm kiếm: %s" + +#: ../mail/mail-vfolder.c:240 +#, c-format +msgid "Updating Search Folders for '%s:%s'" +msgstr "Äang cập nhật các thÆ° mục tìm kiếm cho « %s:%s »..." + +#: ../mail/mail-vfolder.c:247 +#, c-format +msgid "Updating Search Folders for '%s'" +msgstr "Äang cập nhật các thÆ° mục tìm kiếm cho « %s »..." + +#: ../mail/mail-vfolder.c:1050 ../mail/mail-vfolder.c:1046 +msgid "Edit Search Folder" +msgstr "Hiệu chỉnh thÆ° mục tìm kiếm" + +#: ../mail/mail-vfolder.c:1134 ../mail/mail-vfolder.c:1130 +msgid "New Search Folder" +msgstr "ThÆ° mục tìm kiếm má»›i" + +#: ../mail/mail.error.xml.h:1 +msgid "" +"A folder named "{1}" already exists. Please use a different name." +msgstr "ThÆ° mục tên « {1} » đã có. Hãy sá»­ dụng tên khác." + +#: ../mail/mail.error.xml.h:2 +msgid "" +"A non-empty folder at "{1}" already exists.\n" +"\n" +"You can choose to ignore this folder, overwrite or append its contents, or " +"quit.\n" +msgstr "" +"Má»™t thÆ° mục không rá»—ng tại « {1} » đã có.\n" +"\n" +"Bạn có thể chá»n bá» qua thÆ° mục này, ghi đè lên nó, phụ thêm ná»™i dung nó, " +"hoặc thoát.\n" + +#: ../mail/mail.error.xml.h:6 +msgid "" +"A read receipt notification has been requested for "{1}". Send " +"the reciept notification to {0}?" +msgstr "" +"Yêu cầu má»™t thông báo đã Ä‘á»c cho « {1} ». Gởi thông báo đó cho « {0} » không?" + +#: ../mail/mail.error.xml.h:7 +msgid "" +"A signature already exists with the name "{0}". Please specify a " +"different name." +msgstr "Chữ ký tên « {1} » đã có. Hãy gõ tên khác." + +#: ../mail/mail.error.xml.h:8 ../mail/mail.error.xml.h:7 +msgid "" +"Adding a meaningful Subject line to your messages will give your recipients " +"an idea of what your mail is about." +msgstr "" +"Thêm má»™t Chủ Ä‘á» có nghÄ©a vào thÆ° bạn sẽ cho ngÆ°á»i nhận ý kiến vá» ná»™i dung. " +"Nhiá»u ngÆ°á»i sẽ bá» qua thÆ° không có Chủ Ä‘á» (vì thÆ°á»ng là thÆ° rác)." + +#: ../mail/mail.error.xml.h:9 ../mail/mail.error.xml.h:8 +msgid "Are you sure you want to delete this account and all its proxies?" +msgstr "Bạn có chắc muốn xoá bá» tài khoản này và các ủy nhiệm của nó không?" + +#: ../mail/mail.error.xml.h:10 ../mail/mail.error.xml.h:9 +msgid "Are you sure you want to delete this account?" +msgstr "Bạn có muốn xoá bá» tài khoản này không?" + +#: ../mail/mail.error.xml.h:11 ../mail/mail.error.xml.h:10 +msgid "Are you sure you want to open {0} messages at once?" +msgstr "Bạn có chắc muốn mở cả {0} thÆ° cùng lúc không?" + +#: ../mail/mail.error.xml.h:12 ../mail/mail.error.xml.h:11 +msgid "" +"Are you sure you want to permanently remove all the deleted messages in all " +"folders?" +msgstr "" +"Bạn có chắc muốn gỡ bá» hoàn toàn má»i thÆ° đã xoá bá» trong má»i thÆ° mục không?" + +#: ../mail/mail.error.xml.h:13 ../mail/mail.error.xml.h:12 +msgid "" +"Are you sure you want to permanently remove all the deleted messages in " +"folder "{0}"?" +msgstr "" +"Bạn có chắc muốn xoá bá» hoàn toàn má»i thÆ° đã xoá bá» trong thÆ° mục « {0} » " +"không?" + +#: ../mail/mail.error.xml.h:14 ../mail/mail.error.xml.h:13 +msgid "Are you sure you want to send a message in HTML format?" +msgstr "Bạn có chắc muốn gởi thÆ° theo dạng HTML không?" + +#: ../mail/mail.error.xml.h:15 ../mail/mail.error.xml.h:14 +msgid "Are you sure you want to send a message with only BCC recipients?" +msgstr "" +"Bạn có chắc muốn gởi thÆ° chỉ có ngÆ°á»i nhận Bí mật Chép Cho (BCC) không?" + +#: ../mail/mail.error.xml.h:16 ../mail/mail.error.xml.h:15 +msgid "Are you sure you want to send a message without a subject?" +msgstr "Bạn có chắc muốn gởi thÆ° không có chủ Ä‘á» không? (Không đệ nghị)" + +#: ../mail/mail.error.xml.h:17 ../mail/mail.error.xml.h:16 +msgid "Because "{0}"." +msgstr "Vì « {0} »." + +#: ../mail/mail.error.xml.h:19 ../mail/mail.error.xml.h:18 +msgid "Because "{2}"." +msgstr "Vì « {2} »." + +#: ../mail/mail.error.xml.h:20 +msgid "Blank Signature" +msgstr "Chữ ký rá»—ng" + +#: ../mail/mail.error.xml.h:21 ../mail/mail.error.xml.h:19 +msgid "Cannot add Search Folder "{0}"." +msgstr "Không thể thêm thÆ° mục tìm kiếm « {0}»." + +#: ../mail/mail.error.xml.h:22 ../mail/mail.error.xml.h:20 +msgid "Cannot copy folder "{0}" to "{1}"." +msgstr "Không thể sao chép thÆ° mục « {0} » vào « {1} »." + +#: ../mail/mail.error.xml.h:23 ../mail/mail.error.xml.h:21 +msgid "Cannot create folder "{0}"." +msgstr "Không thể tạo thÆ° mục « {0} »." + +#: ../mail/mail.error.xml.h:24 +msgid "Cannot create temporary save directory." +msgstr "Không thể tạo thÆ° mục lÆ°u tạm." + +#: ../mail/mail.error.xml.h:25 +msgid "Cannot create the save directory, because "{1}"" +msgstr "Không thể tạo thÆ° mục lÆ°u, vì « {1} »." + +#: ../mail/mail.error.xml.h:26 ../mail/mail.error.xml.h:24 +msgid "Cannot delete folder "{0}"." +msgstr "Không thể xoá bá» thÆ° mục « {0} »." + +#: ../mail/mail.error.xml.h:27 ../mail/mail.error.xml.h:25 +msgid "Cannot delete system folder "{0}"." +msgstr "Không thể xoá bá» thÆ° mục hệ thống « {0} »." + +#: ../mail/mail.error.xml.h:28 ../mail/mail.error.xml.h:26 +msgid "Cannot edit Search Folder "{0}" as it does not exist." +msgstr "Không thể hiệu chỉnh thÆ° mục tìm kiếm « {0} » vì nó không tồn tại." + +#: ../mail/mail.error.xml.h:29 ../mail/mail.error.xml.h:27 +msgid "Cannot move folder "{0}" to "{1}"." +msgstr "Không thể di chuyển thÆ° mục « {0} » đến « {1} »." + +#: ../mail/mail.error.xml.h:30 ../mail/mail.error.xml.h:28 +msgid "Cannot open source "{1}"" +msgstr "Không thể mở nguồn « {1} »." + +#: ../mail/mail.error.xml.h:31 ../mail/mail.error.xml.h:29 +msgid "Cannot open source "{2}"." +msgstr "Không thể mở nguồn « {2} »." + +#: ../mail/mail.error.xml.h:32 ../mail/mail.error.xml.h:30 +msgid "Cannot open target "{2}"." +msgstr "Không thể mở đích « {2} »." + +#: ../mail/mail.error.xml.h:33 ../mail/mail.error.xml.h:31 +msgid "" +"Cannot read the license file "{0}", due to an installation " +"problem. You will not be able to use this provider until you can accept its " +"license." +msgstr "" +"Không thể Ä‘á»c tập tin quyá»n « {0} » vì gặp lá»—i cài đặt. Bạn sẽ không thể sá»­ " +"dụng nhà cung cấp này cho đến khi có thể chấp nhận quyá»n của nó." + +#: ../mail/mail.error.xml.h:34 ../mail/mail.error.xml.h:32 +msgid "Cannot rename "{0}" to "{1}"." +msgstr "Không thể thay đổi tên thÆ° mục « {0} » sang « {1} »." + +#: ../mail/mail.error.xml.h:35 ../mail/mail.error.xml.h:33 +msgid "Cannot rename or move system folder "{0}"." +msgstr "Không thể thay đổi tên hoặc di chuyển thÆ° mục hệ thống « {0} »." + +#: ../mail/mail.error.xml.h:36 ../mail/mail.error.xml.h:34 +msgid "Cannot save changes to account." +msgstr "Không thể lÆ°u các thay đổi trong tài khoản." + +#: ../mail/mail.error.xml.h:37 +msgid "Cannot save to directory "{0}"." +msgstr "Không thể lÆ°u vào thÆ° mục « {0} »." + +#: ../mail/mail.error.xml.h:38 ../mail/mail.error.xml.h:36 +msgid "Cannot save to file "{0}"." +msgstr "Không thể lÆ°u vào tập tin « {0} »." + +#: ../mail/mail.error.xml.h:39 ../mail/mail.error.xml.h:37 +msgid "Cannot set signature script "{0}"." +msgstr "Không thể lập tập lệnh chữ ký « {0} »." + +#: ../mail/mail.error.xml.h:40 ../mail/mail.error.xml.h:38 +msgid "" +"Check to make sure your password is spelled correctly. Remember that many " +"passwords are case sensitive; your caps lock might be on." +msgstr "" +"Hãy kiểm tra xem đã gõ mật khẩu cho đúng. Nhiá»u mật khẩu phân biệt chữ hoa, " +"chữ thÆ°á»ng; phím CapsLock (chữ hoa luôn) phải tắt." + +#: ../mail/mail.error.xml.h:41 ../mail/mail.error.xml.h:39 +msgid "Could not save signature file." +msgstr "Không thể lÆ°u tập tin chữ ký." + +#: ../mail/mail.error.xml.h:42 ../mail/mail.error.xml.h:40 +msgid "Delete "{0}"?" +msgstr "Xoá bỠ« {0} » không?" + +#: ../mail/mail.error.xml.h:43 ../mail/mail.error.xml.h:41 +msgid "Delete account?" +msgstr "Xoá bá» tài khoản không?" + +#: ../mail/mail.error.xml.h:44 +msgid "Discard changes?" +msgstr "Hủy các thay đổi không?" + +#: ../mail/mail.error.xml.h:45 ../mail/mail.error.xml.h:43 +msgid "Do you want the operation to be performed in the subfolders?" +msgstr "Bạn có muốn thá»±c hiện thao tác đó xuống những thÆ° mục con không?" + +#: ../mail/mail.error.xml.h:46 ../mail/mail.error.xml.h:44 +msgid "Do you wish to save your changes?" +msgstr "Bạn có muốn lÆ°u các thay đổi không?" + +#: ../mail/mail.error.xml.h:47 ../mail/mail.error.xml.h:45 +msgid "Don't delete" +msgstr "Không xoá bá»" + +#: ../mail/mail.error.xml.h:48 ../mail/mail.error.xml.h:46 +msgid "Enter password." +msgstr "Hãy gõ mật khẩu." + +#: ../mail/mail.error.xml.h:49 ../mail/mail.error.xml.h:47 +msgid "Error loading filter definitions." +msgstr "Gặp lá»—i khi tải lá»i định nghÄ©a bá»™ lá»c." + +#: ../mail/mail.error.xml.h:50 ../mail/mail.error.xml.h:48 +msgid "Error while performing operation." +msgstr "Gặp lá»—i khi thá»±c hiện thao tác." + +#: ../mail/mail.error.xml.h:51 ../mail/mail.error.xml.h:49 +msgid "Error while {0}." +msgstr "Gặp lá»—i khi « {0} »." + +#: ../mail/mail.error.xml.h:52 ../mail/mail.error.xml.h:50 +msgid "File exists but cannot overwrite it." +msgstr "Tập tin đã tồn tại nhÆ°ng không thể ghi đè lên nó." + +#: ../mail/mail.error.xml.h:53 ../mail/mail.error.xml.h:51 +msgid "File exists but is not a regular file." +msgstr "Tập tin tồn tại nhÆ°ng không phải là tập tin bình thÆ°á»ng." + +#: ../mail/mail.error.xml.h:54 ../mail/mail.error.xml.h:52 +msgid "If you continue, you will not be able to recover these messages." +msgstr "Nếu bạn tiếp tục, bạn sẽ không thể phục hồi những thÆ° này." + +#: ../mail/mail.error.xml.h:55 ../mail/mail.error.xml.h:53 +msgid "" +"If you delete the folder, all of its contents and its subfolders contents " +"will be deleted permanently." +msgstr "" +"Nếu bạn xoá bá» thÆ° mục đó thì sẽ xoá bá» hoàn toàn má»i ná»™i dung và thÆ° mục " +"con của nó." + +#: ../mail/mail.error.xml.h:56 ../mail/mail.error.xml.h:54 +msgid "" +"If you proceed, the account information and\n" +"all proxy information will be deleted permanently." +msgstr "" +"Nếu bạn tiếp tục, sẽ xoá bá» hoàn toàn thông tin tài khoản đó và các thông " +"tin ủy nhiệm của nó." + +#: ../mail/mail.error.xml.h:58 ../mail/mail.error.xml.h:56 +msgid "If you proceed, the account information will be deleted permanently." +msgstr "Nếu bạn tiếp tục, sẽ xoá bá» hoàn toàn thông tin tài khoản đó." + +#: ../mail/mail.error.xml.h:59 ../mail/mail.error.xml.h:57 +msgid "" +"If you quit, these messages will not be sent until Evolution is started " +"again." +msgstr "" +"Nếu bạn thoát thì sẽ không gởi những thÆ° này tá»›i khi khởi chạy lại trình " +"Evolution." + +#: ../mail/mail.error.xml.h:61 ../mail/mail.error.xml.h:59 +msgid "Invalid authentication" +msgstr "Xác thá»±c không hợp lệ" + +#: ../mail/mail.error.xml.h:62 ../mail/mail.error.xml.h:60 +msgid "Mail filters automatically updated." +msgstr "Các bá»™ lá»c thÆ° đã được cập nhật tá»± Ä‘á»™ng." + +#: ../mail/mail.error.xml.h:63 ../mail/mail.error.xml.h:61 +msgid "" +"Many email systems add an Apparently-To header to messages that only have " +"BCC recipients. This header, if added, will list all of your recipients to " +"your message anyway. To avoid this, you should add at least one To: or CC: " +"recipient." +msgstr "" +"Nhiá»u hệ thống thÆ° Ä‘iện tá»­ thêm má»™t dòng đầu « Hình nhÆ° Cho » (Apparently-" +"To) vào má»i thÆ° chỉ có ngÆ°á»i nhận BCC (Bí mật Chép Cho). Nếu thêm dòng đầu " +"đó, nó sẽ liệt kê má»i ngÆ°á»i nhận trong thÆ° của bạn. Äể tránh ngÆ°á»i gởi thÆ° " +"Rác ăn cấp các địa chỉ trong danh sách đó, bạn hãy thêm ít nhất má»™t ngÆ°á»i " +"nhận Cho (To) hay Chép Cho (Cc), v.d. địa chỉ mình." + +#: ../mail/mail.error.xml.h:64 ../mail/mail.error.xml.h:62 +msgid "Mark all messages as read" +msgstr "Äánh dấu má»i thÆ° Äã Ä‘á»c" + +#: ../mail/mail.error.xml.h:65 ../mail/mail.error.xml.h:63 +msgid "Mark all messages as read in the selected folder" +msgstr "Äánh dấu má»i thÆ° Äã Ä‘á»c trong thÆ° mục đã chá»n" + +#: ../mail/mail.error.xml.h:66 +msgid "Missing folder." +msgstr "Thiếu thÆ° mục." + +#: ../mail/mail.error.xml.h:68 ../mail/mail.error.xml.h:66 +msgid "No sources selected." +msgstr "ChÆ°a chá»n nguồn." + +#: ../mail/mail.error.xml.h:69 ../mail/mail.error.xml.h:67 +msgid "Opening too many messages at once may take a long time." +msgstr "Mở quá nhiá»u thÆ° cùng lúc có lẽ sẽ mất lâu." + +#: ../mail/mail.error.xml.h:70 ../mail/mail.error.xml.h:68 +msgid "Please check your account settings and try again." +msgstr "Hãy kiểm tra xem thiết lập tài khoản rồi thá»­ lại." + +#: ../mail/mail.error.xml.h:71 +msgid "Please enable the account or send using another account." +msgstr "Hãy bật tài khoản này hoặc gởi bằng tài khoản khác." + +#: ../mail/mail.error.xml.h:72 ../mail/mail.error.xml.h:69 +msgid "" +"Please enter a valid email address in the To: field. You can search for " +"email addresses by clicking on the To: button next to the entry box." +msgstr "" +"Hãy nhập má»™t địa chỉ thÆ° Ä‘iện tá»­ hợp lệ vào trÆ°á»ng Cho: (To). Có thể tìm " +"kiếm địa chỉ thÆ° bằng cách nhắp vào nút Cho: (To) ở cạnh há»™p nhập." + +#: ../mail/mail.error.xml.h:73 ../mail/mail.error.xml.h:70 +msgid "" +"Please make sure the following recipients are willing and able to receive " +"HTML email:\n" +"{0}\n" +"Send anyway?" +msgstr "" +"Hãy đảm bảo rằng những ngÆ°á»i nhận sau có thể và cÅ©ng muốn nhận thÆ° dạng " +"HTML:\n" +"{0}\n" +"Gởi bất chấp không?" + +#: ../mail/mail.error.xml.h:76 +msgid "Please provide an unique name to identify this signature." +msgstr "Hãy cung cấp tên duy nhất để nhận diện chữ ký này." + +#: ../mail/mail.error.xml.h:77 ../mail/mail.error.xml.h:73 +msgid "Please wait." +msgstr "Vui lòng chá»" + +#: ../mail/mail.error.xml.h:78 ../mail/mail.error.xml.h:74 +msgid "Problem migrating old mail folder "{0}"." +msgstr "Gặp lá»—i khi chuyển đổi thÆ° mục thÆ° cÅ© « {0} »." + +#: ../mail/mail.error.xml.h:79 ../mail/mail.error.xml.h:75 +msgid "Querying server" +msgstr "Äang truy vấn máy phục vụ..." + +#: ../mail/mail.error.xml.h:80 ../mail/mail.error.xml.h:76 +msgid "Querying server for a list of supported authentication mechanisms." +msgstr "" +"Äang truy vấn máy phục vụ có danh sách các cÆ¡ chế xác thá»±c được há»— trợ." + +#: ../mail/mail.error.xml.h:81 ../mail/mail.error.xml.h:77 +msgid "Read receipt requested." +msgstr "Thông báo đã Ä‘á»c đã được yêu cầu." + +#: ../mail/mail.error.xml.h:82 ../mail/mail.error.xml.h:78 +msgid "Really delete folder "{0}" and all of its subfolders?" +msgstr "" +"Bạn thật sá»± muốn xoá bá» thÆ° mục « {0} » và má»i thÆ° mục con của nó không?" + +#: ../mail/mail.error.xml.h:83 ../mail/mail.error.xml.h:79 +msgid "Search Folders automatically updated." +msgstr "Các thÆ° mục tìm kiếm đã được cập nhật tá»± Ä‘á»™ng." + +#: ../mail/mail.error.xml.h:84 ../mail/mail.error.xml.h:80 +msgid "Send Receipt" +msgstr "Gởi thông báo đã Ä‘á»c" + +#: ../mail/mail.error.xml.h:85 +msgid "Signature Already Exists" +msgstr "Chữ ký đã có" + +#: ../mail/mail.error.xml.h:86 ../mail/mail.error.xml.h:81 +msgid "" +"System folders are required for Ximian Evolution to function correctly and " +"cannot be renamed, moved, or deleted." +msgstr "" +"Các thÆ° mục hệ thống có cần thiết để trình Ximian Evolution hoạt Ä‘á»™ng cho " +"đúng nên không thể thay đổi tên, di chuyển hay xoá bá» chúng." + +#: ../mail/mail.error.xml.h:87 ../mail/mail.error.xml.h:82 +msgid "" +"The contact list you are sending to is configured to hide list recipients.\n" +"\n" +"Many email systems add an Apparently-To header to messages that only have " +"BCC recipients. This header, if added, will list all of your recipients in " +"your message. To avoid this, you should add at least one To: or CC: " +"recipient. " +msgstr "" +"Bạn Ä‘ang gởi cho má»™t danh sách liên lạc có cấu hình ẩn má»i ngÆ°á»i nhận có " +"trong danh sách đó.\n" +"\n" +"Nhiá»u hệ thống thÆ° Ä‘iện tá»­ thêm má»™t dòng đầu « Hình nhÆ° Cho » (Apparently-" +"To) vào má»i thÆ° chỉ có ngÆ°á»i nhận BCC (Bí mật Chép Cho). Nếu thêm dòng đầu " +"đó, nó sẽ liệt kê má»i ngÆ°á»i nhận trong thÆ° của bạn. Äể tránh ngÆ°á»i gởi thÆ° " +"Rác ăn cấp các địa chỉ trong danh sách đó, bạn hãy thêm ít nhất má»™t ngÆ°á»i " +"nhận Cho (To) hay Chép Cho (Cc), v.d. địa chỉ mình." + +#: ../mail/mail.error.xml.h:90 ../mail/mail.error.xml.h:85 +msgid "" +"The following Search Folder(s):\n" +"{0}\n" +"Used the now removed folder:\n" +" « {1} »\n" +"And have been updated." +msgstr "" +"Những thÆ° mục tìm kiếm theo đây:\n" +"{0}\n" +"đã dùng thÆ° mục má»›i bị gỡ bá» :\n" +" « {1} »\n" +"và đã được cập nhật." + +#: ../mail/mail.error.xml.h:95 ../mail/mail.error.xml.h:90 +msgid "" +"The following filter rule(s):\n" +"{0}\n" +"Used the now removed folder:\n" +" "{1}"\n" +"And have been updated." +msgstr "" +"Những quy tắc lá»c theo đây:\n" +"{0}\n" +"đã dùng thÆ° mục má»›i bị gỡ bá» :\n" +" « {1} »\n" +"và đã được cập nhật." + +#: ../mail/mail.error.xml.h:100 ../mail/mail.error.xml.h:95 +msgid "" +"The message was sent via the "sendmail" external application. " +"Sendmail reports the following error: status 67: mail not sent.\n" +"The message is stored in the Outbox folder. Check the message for errors " +"and resend." +msgstr "" +"Äã cố gởi thÆ° đó thông qua ứng dụng ở ngoại sendmail. Trình sendmail thông " +"báo lá»—i này:\n" +"status 67: mail not sent (trạng thái 67, chÆ°a gởi thÆ°)\n" +"Äã cất giữ thÆ° đó vào thÆ° mục Há»™p Äi (Outbox). Hãy kiểm tra xem lá»—i trong " +"thÆ° đó và gởi lại." + +#: ../mail/mail.error.xml.h:102 ../mail/mail.error.xml.h:97 +msgid "The script file must exist and be executable." +msgstr "Tập tin tập lệnh phải tồn tại và có chạy được." + +#: ../mail/mail.error.xml.h:103 ../mail/mail.error.xml.h:98 +msgid "" +"This folder may have been added implicitly,\n" +"go to the Search Folder editor to add it explicitly, if required." +msgstr "" +"Có lẽ đã thêm thÆ° mục này má»™t cách ngầm; hãy dùng bá»™ hiệu chỉnh thÆ° mục tìm " +"kiếm để thêm nó má»™t cách dứt khoát, nếu cần thiết." + +#: ../mail/mail.error.xml.h:105 +msgid "" +"This message cannot be sent because the account you chose to send with is " +"not enabled" +msgstr "" +"Không thể gởi thÆ° này vì bạn Ä‘ang gởi nó bằng má»™t tài khoản chÆ°a được bật" + +#: ../mail/mail.error.xml.h:106 ../mail/mail.error.xml.h:100 +msgid "" +"This message cannot be sent because you have not specified any Recipients" +msgstr "Không gởi được thÆ° này vì bạn chÆ°a ghi rõ ngÆ°á»i nhận nào." + +#: ../mail/mail.error.xml.h:107 ../mail/mail.error.xml.h:101 +msgid "" +"This server does not support this type of authentication and may not support " +"authentication at all." +msgstr "" +"Máy phục vụ này không há»— trợ loại xác thá»±c này và có lẽ hoàn toàn không há»— " +"trợ xác thá»±c nào." + +#: ../mail/mail.error.xml.h:108 ../mail/mail.error.xml.h:102 +msgid "This signature has been changed, but has not been saved." +msgstr "Chữ ký này đã thay đổi, nhÆ°ng vẫn chÆ°a được lÆ°u." + +#: ../mail/mail.error.xml.h:109 ../mail/mail.error.xml.h:103 +msgid "Unable to connect to the GroupWise server." +msgstr "Không kết nối tá»›i máy phục vụ Groupwise được." + +#: ../mail/mail.error.xml.h:110 ../mail/mail.error.xml.h:104 +msgid "" +"Unable to open the drafts folder for this account. Use the system drafts " +"folder instead?" +msgstr "" +"Không thể mở thÆ° mục Nháp cho tài khoản này. Dùng thÆ° mục Nháp của hệ thống " +"chứ?" + +#: ../mail/mail.error.xml.h:111 ../mail/mail.error.xml.h:105 +msgid "Unable to read license file." +msgstr "Không Ä‘oc được tập tin quyá»n." + +#: ../mail/mail.error.xml.h:112 ../mail/mail.error.xml.h:106 +#: ../glade/straw.glade.h:64 +msgid "Use _Default" +msgstr "Dùng _mặc định" + +#: ../mail/mail.error.xml.h:113 ../mail/mail.error.xml.h:107 +msgid "Use default drafts folder?" +msgstr "Dùng thÆ° mục nháp mặc định chứ?" + +#: ../mail/mail.error.xml.h:114 ../mail/mail.error.xml.h:108 +msgid "You have not filled in all of the required information." +msgstr "Bạn chÆ°a Ä‘iá»n đủ các thông tin yêu cầu." + +#: ../mail/mail.error.xml.h:115 ../mail/mail.error.xml.h:109 +msgid "You have unsent messages, do you wish to quit anyway?" +msgstr "Bạn có vài thÆ° chÆ°a gởi, bạn vẫn muốn thoát sao?" + +#: ../mail/mail.error.xml.h:116 ../mail/mail.error.xml.h:110 +msgid "You may not create two accounts with the same name." +msgstr "Không cho phép bạn tạo hai tài khoản trùng tên." + +#: ../mail/mail.error.xml.h:117 ../mail/mail.error.xml.h:111 +msgid "You must name this Search Folder." +msgstr "Bạn phải đặt tên cho thÆ° mục tìm kiếm này." + +#: ../mail/mail.error.xml.h:118 +msgid "You must specify a folder." +msgstr "Bạn phải xác định thÆ° mục." + +#: ../mail/mail.error.xml.h:119 ../mail/mail.error.xml.h:113 +msgid "" +"You must specify at least one folder as a source.\n" +"Either by selecting the folders individually, and/or by selecting all local " +"folders, all remote folders, or both." +msgstr "" +"Bạn phải ghi rõ ít nhất má»™t thÆ° mục là nguồn,\n" +"hoặc bằng cách chá»n má»—i thÆ° mục từng má»™t cái,\n" +"hoặc/và bằng cách chá»n má»i thÆ° mục địa phÆ°Æ¡ng,\n" +"má»i thÆ° mục ở xa, hoặc cả hai." + +#: ../mail/mail.error.xml.h:121 ../mail/mail.error.xml.h:115 +msgid "Your login to your server "{0}" as "{0}" failed." +msgstr "" +"Việc đăng nhập của bạn vào máy phục vụ « {0} » vá»›i tÆ° cách « {0} » bị lá»—i." + +#: ../mail/mail.error.xml.h:122 ../mail/mail.error.xml.h:116 +msgid "Your message with the subject "{0}" was not delivered." +msgstr "ChÆ°a phát thÆ° của bạn có chủ đỠ« {0} »." + +#: ../mail/mail.error.xml.h:123 ../mail/mail.error.xml.h:117 +msgid "_Append" +msgstr "Phụ th_êm" + +#: ../mail/mail.error.xml.h:124 ../mail/mail.error.xml.h:118 +msgid "_Discard changes" +msgstr "_Hủy các thay đổi" + +#: ../mail/mail.error.xml.h:126 ../mail/mail.error.xml.h:120 +msgid "_Expunge" +msgstr "_Xoá hẳn" + +#: ../mail/mail.error.xml.h:127 ../mail/mail.error.xml.h:121 +msgid "_Open Messages" +msgstr "_Mở các thÆ°" + +#: ../mail/message-list.c:1004 ../mail/message-list.c:996 +msgid "Unseen" +msgstr "ChÆ°a xem" + +#: ../mail/message-list.c:1005 ../mail/message-list.c:997 +msgid "Seen" +msgstr "Äã xem" + +#: ../mail/message-list.c:1006 ../mail/message-list.c:998 +msgid "Answered" +msgstr "Äã trả lá»i" + +#: ../mail/message-list.c:1007 ../mail/message-list.c:999 +msgid "Multiple Unseen Messages" +msgstr "Nhiá»u thÆ° chÆ°a xem" + +#: ../mail/message-list.c:1008 ../mail/message-list.c:1000 +msgid "Multiple Messages" +msgstr "Nhiá»u thÆ°" + +#: ../mail/message-list.c:1012 ../mail/message-list.c:1004 +msgid "Lowest" +msgstr "Thấp nhất" + +#: ../mail/message-list.c:1013 ../mail/message-list.c:1005 +msgid "Lower" +msgstr "Thấp hÆ¡n" + +#: ../mail/message-list.c:1017 ../mail/message-list.c:1009 +msgid "Higher" +msgstr "Cao hÆ¡n" + +#: ../mail/message-list.c:1018 ../mail/message-list.c:1010 +msgid "Highest" +msgstr "Cao nhất" + +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a time, +#. in 12-hour format, without seconds. +#: ../src/gnome-keyring-manager-util.c:219 ../storage/sunone-itip-view.c:152 +msgid "Today %l:%M %p" +msgstr "Hôm nay %l:%M %p" + +#: ../src/gnome-keyring-manager-util.c:231 +msgid "Yesterday %l:%M %p" +msgstr "Hôm qua %l:%M %p" + +#: ../src/gnome-keyring-manager-util.c:247 +msgid "%a %l:%M %p" +msgstr "%a %l:%M %p" + +#: ../mail/message-list.c:1382 ../widgets/table/e-cell-date.c:102 +#: ../src/gnome-keyring-manager-util.c:258 +msgid "%b %d %l:%M %p" +msgstr "%d %b %l:%M %p" + +#: ../mail/message-list.c:1384 ../widgets/table/e-cell-date.c:104 +#: ../src/gnome-keyring-manager-util.c:262 +msgid "%b %d %Y" +msgstr "%d %b %Y" + +#: ../mail/message-list.c:2055 ../mail/message-list.c:2042 +msgid "Message List" +msgstr "Danh sách thÆ°" + +#: ../mail/message-list.c:3400 ../mail/message-list.c:3387 +msgid "Generating message list" +msgstr "Äang tạo danh sách thÆ°" + +#: ../mail/message-list.etspec.h:3 +msgid "Due By" +msgstr "Äến hạn" + +#: ../mail/message-list.etspec.h:4 +msgid "Flag Status" +msgstr "Trạng thái cá»" + +#: ../mail/message-list.etspec.h:6 +msgid "Follow Up Flag" +msgstr "Cá» theo dõi tiếp" + +#: ../mail/message-list.etspec.h:8 +msgid "Original Location" +msgstr "Äịa Ä‘iểm gốc" + +#: ../mail/message-list.etspec.h:9 ../src/statusview.c:955 +#: ../Tiles/TileMailMessage.cs:116 ../src/history.c:111 ../src/history.c:154 +msgid "Received" +msgstr "Äã nhận" + +#: ../mail/message-tag-followup.c:74 ../objects/UML/message.c:135 +msgid "Call" +msgstr "Gá»i" + +#: ../mail/message-tag-followup.c:76 ../mail/message-tag-followup.c:75 +msgid "Do Not Forward" +msgstr "Không chuyển tiếp" + +#: ../mail/message-tag-followup.c:77 ../mail/message-tag-followup.c:76 +msgid "Follow-Up" +msgstr "Theo dõi tiếp" + +#: ../mail/message-tag-followup.c:78 ../mail/message-tag-followup.c:77 +msgid "For Your Information" +msgstr "Cho bạn biết tin tức này" + +#: ../mail/message-tag-followup.c:80 ../mail/message-tag-followup.c:79 +msgid "No Response Necessary" +msgstr "Không cần thiết trả lá»i" + +#: ../mail/message-tag-followup.c:83 ../ui/evolution-mail-message.xml.h:82 +#: ../mail/message-tag-followup.c:82 ../ui/evolution-mail-message.xml.h:80 +msgid "Reply to All" +msgstr "Trả lá»i tất cả" + +#: ../mail/message-tag-followup.c:84 src/ui/gtk/learningpref.c:236 +#: ../mail/message-tag-followup.c:83 +msgid "Review" +msgstr "Xem lại" + +#: ../mail/searchtypes.xml.h:1 +msgid "Body contains" +msgstr "Phần thân chứa" + +#: ../mail/searchtypes.xml.h:2 +msgid "Message contains" +msgstr "ThÆ° chứa" + +#: ../mail/searchtypes.xml.h:3 +msgid "Recipients contain" +msgstr "NgÆ°á»i nhận chứa" + +#: ../mail/searchtypes.xml.h:4 +msgid "Sender contains" +msgstr "NgÆ°á»i gởi chứa" + +#: ../mail/searchtypes.xml.h:5 +msgid "Subject contains" +msgstr "Chủ Ä‘á» chứa" + +#: ../mail/searchtypes.xml.h:6 +msgid "Subject or Sender contains" +msgstr "Chủ Ä‘á» hay ngÆ°á»i gởi chứa" + +#: ../plugins/audio-inline/org-gnome-audio-inline.eplug.xml.h:1 +msgid "" +"A formatter plugin which displays audio attachments inline and allows you to " +"play them directly from evolution." +msgstr "" +"Má»™t trình cầm phít định dạng mà hiển thị đính kèm âm thanh trong thÆ°, và cho " +"phép bạn phát chúng má»™t cách trá»±c tiếp từ trình Evolution." + +#: ../plugins/audio-inline/org-gnome-audio-inline.eplug.xml.h:2 +msgid "Audio inline plugin" +msgstr "Trình cầm phít trá»±c tiếp âm thanh" + +#: ../plugins/backup-restore/backup-restore.c:51 +msgid "Select name of Evolution archive" +msgstr "Chá»n tên của kho Evolution" + +#: ../plugins/backup-restore/backup-restore.c:61 +msgid "_Restart Evolution after backup" +msgstr "_Khởi chạy lại Evolution sau khi lÆ°u trữ" + +#: ../plugins/backup-restore/backup-restore.c:89 +msgid "Select Evolution archive to restore" +msgstr "Chá»n kho Evolution cần phục hồi" + +#: ../plugins/backup-restore/backup-restore.c:97 +msgid "_Restart Evolution after restore" +msgstr "_Khởi chạy lại Evolution sau khi phục hồi" + +#: ../plugins/backup-restore/backup.c:109 +msgid "Backup Evolution directory" +msgstr "ThÆ° mục lÆ°u trữ Evolution" + +#: ../plugins/backup-restore/backup.c:111 +msgid "Restore Evolution directory" +msgstr "ThÆ° mục phục hồi Evolution" + +#: ../plugins/backup-restore/backup.c:113 +msgid "Check Evolution archive" +msgstr "Kiểm tra kho Evolution" + +#: ../plugins/backup-restore/backup.c:115 +msgid "Restart Evolution" +msgstr "Khởi chạy lại Evolution" + +#: ../plugins/backup-restore/org-gnome-backup-restore.eplug.xml.h:1 +msgid "A plugin for backing up and restore Evolution data and settings." +msgstr "" +"Bá»™ cầm phít để lÆ°u trữ và phục hồi dữ liệu và thiết lập của trình Evolution." + +#: ../plugins/backup-restore/org-gnome-backup-restore.eplug.xml.h:2 +msgid "Backup and restore plugin" +msgstr "Bá»™ cầm phít lÆ°u trữ và phục hồi" + +#: ../plugins/backup-restore/org-gnome-backup-restore.xml.h:1 +msgid "Backup Settings..." +msgstr "Thiết lập lÆ°u trữ..." + +#: ../plugins/backup-restore/org-gnome-backup-restore.xml.h:2 +msgid "Backup and restore Evolution data and settings" +msgstr "LÆ°u trữ và phục hồi các dữ liệu và thiết lập Ä‘á»u của trình Evolution" + +#: ../plugins/backup-restore/org-gnome-backup-restore.xml.h:3 +msgid "Restore Settings..." +msgstr "Phục hồi thiết lập..." + +#: ../plugins/bbdb/bbdb.c:410 ../plugins/bbdb/bbdb.c:404 +msgid "Automatic Contacts" +msgstr "Liên lạc tá»± Ä‘á»™ng" + +#: ../plugins/bbdb/bbdb.c:419 ../plugins/bbdb/bbdb.c:413 +msgid "Automatic Contacts" +msgstr "Liên lạc tá»± Ä‘á»™ng" + +#. Enable BBDB checkbox +#: ../plugins/bbdb/bbdb.c:432 ../plugins/bbdb/bbdb.c:426 +msgid "" +"_Automatically create entries in the addressbook when responding to mail" +msgstr "_Tá»± Ä‘á»™ng tạo mục nhập trong sổ địa chỉ khi trả lá»i thÆ°" + +#: ../plugins/bbdb/bbdb.c:450 ../plugins/bbdb/bbdb.c:444 +msgid "Instant Messaging Contacts" +msgstr "Liên lạc tin nhắn tức khắc" + +#. Enable Gaim Checkbox +#: ../plugins/bbdb/bbdb.c:463 ../plugins/bbdb/bbdb.c:457 +msgid "" +"Periodically synchronize contact information and images from my _instant " +"messenger" +msgstr "" +"Äồng bá»™ hóa theo định ká»· các thông tin liên lạc và ảnh Ä‘á»u từ trình tin nhắn " +"tức khắc của tôi" + +#. Synchronize now button. +#: ../plugins/bbdb/bbdb.c:470 ../plugins/bbdb/bbdb.c:464 +msgid "Synchronize with _buddy list now" +msgstr "Äồng bá»™ hóa vá»›i danh sách ngÆ°á»i _bạn ngay bây giá»" + +#: ../plugins/bbdb/org-gnome-evolution-bbdb.eplug.xml.h:1 +msgid "Automatic contacts" +msgstr "Liên lạc tá»± Ä‘á»™ng" + +#: ../plugins/bbdb/org-gnome-evolution-bbdb.eplug.xml.h:2 +msgid "" +"Automatically fills your addressbook with names and email addresses as you " +"reply to mails. Also fills in IM contact information from your buddy lists." +msgstr "" +"Tá»± Ä‘á»™ng chèn vào sổ địa chỉ các tên và địa chỉ thÆ° Ä‘á»u khi bạn trả lá»i thÆ°. " +"CÅ©ng chèn thông tin vá» liên lặc tin nhắn tức khác từ các danh sách ngÆ°á»i bạn " +"của bạn." + +# Name: don't translate / Tên: đừng dịch +#: ../plugins/bbdb/org-gnome-evolution-bbdb.eplug.xml.h:3 +msgid "BBDB" +msgstr "BBDB" + +#: ../plugins/calendar-file/org-gnome-calendar-file.eplug.xml.h:1 +msgid "Local Calendars" +msgstr "Lịch địa phÆ°Æ¡ng" + +#: ../plugins/calendar-file/org-gnome-calendar-file.eplug.xml.h:2 +msgid "Provides core functionality for local calendars." +msgstr "Cung cấp chức năng lõi cho lịch địa phÆ°Æ¡ng." + +#: ../plugins/calendar-http/org-gnome-calendar-http.eplug.xml.h:1 +msgid "HTTP Calendars" +msgstr "Lịch HTTP" + +#: ../plugins/calendar-http/org-gnome-calendar-http.eplug.xml.h:2 +msgid "Provides core functionality for webcal and http calendars." +msgstr "Cung cấp chức năng lõi cho lịch webcal và HTTP." + +#: ../calendar/backends/weather/e-cal-backend-weather.c:239 +msgid "Weather: Cloudy" +msgstr "Thá»i tiết: đầy mây" + +#: ../calendar/backends/weather/e-cal-backend-weather.c:248 +msgid "Weather: Fog" +msgstr "Thá»i tiết: sÆ°Æ¡ng mù" + +#: ../plugins/calendar-weather/calendar-weather.c:62 +#: ../calendar/backends/weather/e-cal-backend-weather.c:231 +msgid "Weather: Partly Cloudy" +msgstr "Thá»i tiết: phần mây" + +#: ../calendar/backends/weather/e-cal-backend-weather.c:252 +msgid "Weather: Rain" +msgstr "Thá»i tiết: mÆ°a" + +#: ../calendar/backends/weather/e-cal-backend-weather.c:249 +msgid "Weather: Snow" +msgstr "Thá»i tiết: tuyết" + +#: ../calendar/backends/weather/e-cal-backend-weather.c:245 +msgid "Weather: Sunny" +msgstr "Thá»i tiết: trÆ¡Ì€i có năÌng" + +#: ../plugins/calendar-weather/calendar-weather.c:66 +#: ../calendar/backends/weather/e-cal-backend-weather.c:233 +msgid "Weather: Thunderstorms" +msgstr "Thá»i tiết: bão kèm sấm" + +#: ../plugins/calendar-weather/calendar-weather.c:267 +msgid "Select a location" +msgstr "Chá»n địa Ä‘iểm" + +#: ../plugins/calendar-weather/calendar-weather.c:654 +msgid "_Units:" +msgstr "_ÄÆ¡n vị" + +#: ../plugins/calendar-weather/calendar-weather.c:661 +msgid "Metric (Celsius, cm, etc)" +msgstr "Mét (chia trăm Ä‘á»™, cm v.v.)" + +#: ../plugins/calendar-weather/calendar-weather.c:662 +msgid "Imperial (Fahrenheit, inches, etc)" +msgstr "Mỹ (ái Ä‘o nhiệt Fa-ren-hét, insÆ¡ v.v.)" + +#: ../plugins/calendar-weather/org-gnome-calendar-weather.eplug.xml.h:1 +msgid "Provides core functionality for weather calendars." +msgstr "Cung cấp chức năng lõi cho lịch thá»i tiết." + +#: ../plugins/calendar-weather/org-gnome-calendar-weather.eplug.xml.h:2 +msgid "Weather Calendars" +msgstr "Lịch thá»i tiết" + +#: ../plugins/copy-tool/org-gnome-copy-tool.eplug.xml.h:1 +msgid "" +"A test plugin which demonstrates a popup menu plugin which lets you copy " +"things to the clipboard." +msgstr "" +"Má»™t trình cầm phít thá»­ ra mà biểu diá»…n má»™t trình cầm phít trình Ä‘Æ¡n bật lên " +"cho phép bạn sao chép Ä‘iá»u vào bảng tạm." + +#: ../plugins/copy-tool/org-gnome-copy-tool.eplug.xml.h:2 +msgid "Copy _Email Address" +msgstr "Chép địa chỉ th_Æ°" + +#: ../plugins/copy-tool/org-gnome-copy-tool.eplug.xml.h:3 +msgid "Copy tool" +msgstr "Công cụ chép" + +#: ../plugins/default-mailer/apps-evolution-mail-prompts-checkdefault.schemas.in.in.h:1 +msgid "Check whether Evolution is the default mailer" +msgstr "Kiểm tra nếu Evolution là trình thÆ° mặc định." + +#: ../plugins/default-mailer/apps-evolution-mail-prompts-checkdefault.schemas.in.in.h:2 +msgid "" +"Every time Evolution starts, check whether or not it is the default mailer." +msgstr "Má»—i lần khởi chạy Evolution, kiểm tra nếu nó là trình thÆ° mặc định." + +#: ../plugins/default-mailer/org-gnome-default-mailer.eplug.xml.h:1 +msgid "Checks whether Evolution is the default mail client on startup." +msgstr "Kiểm tra nếu Evolution là trình thÆ° mặc định, khi khởi chạy." + +#: ../plugins/default-mailer/org-gnome-default-mailer.eplug.xml.h:2 +msgid "Default Mail Client " +msgstr "Trình khách thÆ° mặc định" + +#: ../plugins/default-mailer/org-gnome-default-mailer.error.xml.h:1 +msgid "Do you want to make Evolution your default e-mail client?" +msgstr "" +"Bạn có muốn đặt Evolution là trình khách thÆ° Ä‘iện tá»­ mặc định của bạn không?" + +#: ../plugins/default-source/default-source.c:108 +msgid "Mark as default folder" +msgstr "Äánh dấu là thÆ° mục mặc định" + +#: ../plugins/exchange-operations/e-foreign-folder-dialog.glade.h:1 +#: ../shell/e-foreign-folder-dialog.glade.h:1 +msgid "Open Other User's Folder" +msgstr "Mở thÆ° mục của ngÆ°á»i dùng khác" + +#: ../plugins/exchange-operations/e-foreign-folder-dialog.glade.h:2 +#: ../shell/e-foreign-folder-dialog.glade.h:2 +msgid "_Account:" +msgstr "Tài _khoản:" + +#: ../plugins/exchange-operations/e-foreign-folder-dialog.glade.h:3 +#: ../shell/e-foreign-folder-dialog.glade.h:3 +msgid "_Folder Name:" +msgstr "T_ên thÆ° mục:" + +#: ../plugins/exchange-operations/e-foreign-folder-dialog.glade.h:4 +#: ../src/gnome-schedule.glade.h:72 +msgid "_User:" +msgstr "_NgÆ°á»i dùng:" + +#: ../camel/camel-exchange-provider.c:94 +msgid "Secure Password" +msgstr "Mật khẩu bảo mật" + +#: ../camel/camel-exchange-provider.c:97 +msgid "" +"This option will connect to the Exchange server using secure password (NTLM) " +"authentication." +msgstr "" +"Tùy chá»n này sẽ kết nối đến máy phục vụ Exchange dùng cách xác thá»±c mặt khẩu " +"bảo mật (NTLM)." + +#: ../plugins/exchange-operations/exchange-account-setup.c:74 +#: ../camel/camel-exchange-provider.c:105 +msgid "Plaintext Password" +msgstr "Mật khẩu chữ thô" + +#: ../plugins/exchange-operations/exchange-account-setup.c:76 +#: ../camel/camel-exchange-provider.c:107 +msgid "" +"This option will connect to the Exchange server using standard plaintext " +"password authentication." +msgstr "" +"Tùy chá»n này sẽ kết nối tá»›i máy phục vụ Exchange dùng cách xác thá»±c mật khẩu " +"chữ thô (không mã hóa)." + +#: ../plugins/exchange-operations/exchange-account-setup.c:255 +#: ../plugins/exchange-operations/exchange-account-setup.c:257 +msgid "Out Of Office" +msgstr "Ở ngoại văn phòng" + +#: ../plugins/exchange-operations/exchange-account-setup.c:262 +#: ../plugins/exchange-operations/exchange-account-setup.c:264 +msgid "" +"The message specified below will be automatically sent to \n" +"each person who sends mail to you while you are out of the office." +msgstr "" +"Thông Ä‘iệp dÆ°á»›i đây sẽ được gởi tá»± Ä‘á»™ng\n" +"tá»›i má»—i ngÆ°á»i gởi thÆ° cho bạn khi bạn ở ngoài văn phòng." + +#: ../plugins/exchange-operations/exchange-account-setup.c:281 +msgid "I am out of the office" +msgstr "Tôi hiện thá»i ở ngoài văn phòng" + +#: ../plugins/exchange-operations/exchange-account-setup.c:280 +msgid "I am in the office" +msgstr "Tôi Ä‘ang ở trong văn phòng" + +#: ../plugins/exchange-operations/exchange-account-setup.c:327 +msgid "Change the password for Exchange account" +msgstr "Thay đổi mật khẩu cho tài khoản Exchange" + +#: users/interface.c:478 ../capplets/about-me/gnome-about-me.glade.h:20 +msgid "Change Password" +msgstr "Äổi mật khẩu" + +#: ../plugins/exchange-operations/exchange-account-setup.c:333 +msgid "Manage the delegate settings for Exchange account" +msgstr "Quản lý thiết lập ủy nhiệm cho tài khoản Exchange" + +#: ../plugins/exchange-operations/exchange-account-setup.c:335 +msgid "Delegation Assitant" +msgstr "Trợ tá ủy nhiệm" + +#. Miscelleneous settings +#: ../plugins/exchange-operations/exchange-account-setup.c:347 +#: ../plugins/exchange-operations/exchange-account-setup.c:345 +msgid "Miscelleneous" +msgstr "Linh tinh" + +#: ../plugins/exchange-operations/exchange-account-setup.c:355 +msgid "View the size of all Exchange folders" +msgstr "Xem kích thÆ°á»›c của má»i thÆ° mục Exchange" + +#: ../plugins/exchange-operations/exchange-account-setup.c:359 +#: ../plugins/exchange-operations/exchange-account-setup.c:357 +msgid "Folders Size" +msgstr "Cỡ thÆ° mục" + +#: ../plugins/exchange-operations/exchange-account-setup.c:364 +msgid "Exchange Settings" +msgstr "Thiết lập Exchange" + +#: ../plugins/exchange-operations/exchange-account-setup.c:607 +msgid "_OWA Url:" +msgstr "Äịa chỉ Mạng _OWA:" + +#: ../plugins/exchange-operations/exchange-account-setup.c:632 +msgid "A_uthenticate" +msgstr "_Xác thá»±c" + +#: ../libgnetwork/gnetwork-tcp-connection.c:1402 +msgid "Authentication Type" +msgstr "Kiểu xác thá»±c" + +#: ../plugins/exchange-operations/exchange-account-setup.c:838 +msgid "Ch_eck for Supported Types" +msgstr "_Kiểm tra có kiểu được há»— trợ " + +#: ../plugins/exchange-operations/exchange-contacts.c:162 +msgid "" +"Evolution is in offline mode. You cannot create or modify folders now.\n" +"Please switch to online mode for such operations." +msgstr "" +"Trình Evolution hiện thá»i trong chế Ä‘á»™ ngoại tuyện. NhÆ° thế thì bạn chÆ°a có " +"thể tạo hay sá»­a đổi thÆ° mục.\n" +"Hãy chuyển đổi sang chế Ä‘á»™ trá»±c tuyến cho thao tác nhÆ° vậy." + +#: ../storage/exchange-change-password.c:114 +msgid "" +"The current password does not match the existing password for your account. " +"Please enter the correct password" +msgstr "" +"Mật khẩu hiện thá»i không trùng vá»›i mật khẩu đã có của tài khoản. Hãy gõ mật " +"khẩu chính xác" + +#: ../storage/exchange-change-password.c:121 +msgid "The two passwords do not match. Please re-enter the passwords." +msgstr "Hai mật khẩu không trùng nhau. Hãy gõ lại mật khẩu." + +#: ../storage/exchange-change-password.glade.h:3 ../ui/user_info.glade.h:18 +msgid "Confirm Password:" +msgstr "Xác nhận mật khẩu:" + +#: ../storage/exchange-change-password.glade.h:4 +msgid "Current Password:" +msgstr "Mật khẩu hiện thá»i:" + +#: ../ui/user_info.glade.h:43 +msgid "New Password:" +msgstr "Mật khẩu má»›i:" + +#: ../storage/exchange-change-password.glade.h:6 +msgid "Your current password has expired. Please change your password now." +msgstr "" +"Mật khẩu hiện thá»i của bạn đã hết hạn. Hãy thay đổi mật khẩu bạn ngay bây " +"giá»." + +#: src/mine/cmine.cc:310 ../src/preferences.c:147 ../app/core/core-enums.c:390 +#: ../objects/custom/custom.c:117 libexif/canon/mnote-canon-entry.c:302 +#: libexif/pentax/mnote-pentax-entry.c:74 +msgid "Custom" +msgstr "Tá»± chá»n" + +#: ../storage/exchange-delegates.glade.h:4 +msgid "Delegate Permissions" +msgstr "Quyá»n hạn ủy nhiệm" + +#: ../storage/exchange-permissions-dialog.c:179 +#, c-format +msgid "Permissions for %s" +msgstr "Quyá»n hạn cho %s" + +#: ../plugins/exchange-operations/exchange-delegates.c:421 +#: ../storage/exchange-delegates.c:419 +msgid "Delegate To" +msgstr "Ủy nhiệm cho" + +#: ../storage/exchange-delegates.c:563 +#, c-format +msgid "Remove the delegate %s?" +msgstr "Gỡ bá» ngÆ°á»i được ủy nhiệm %s không?" + +#: ../plugins/exchange-operations/exchange-delegates.c:679 +msgid "Could not access Active Directory" +msgstr "Không thể truy cập thÆ° mục hoạt Ä‘á»™ng." + +#: ../plugins/exchange-operations/exchange-delegates.c:694 +msgid "Could not find self in Active Directory" +msgstr "Không tìm thấy chính nó trong thÆ° mục hoạt Ä‘á»™ng." + +#: ../plugins/exchange-operations/exchange-delegates.c:707 +#, c-format +msgid "Could not find delegate %s in Active Directory" +msgstr "Không tìm thấy ngÆ°á»i được ủy nhiệm « %s » trong thÆ° mục hoạt Ä‘á»™ng." + +#: ../storage/exchange-delegates.c:720 +#, c-format +msgid "Could not remove delegate %s" +msgstr "Không thể gỡ bá» ngÆ°á»i được ủy nhiệm %s" + +#: ../plugins/exchange-operations/exchange-delegates.c:779 +#: ../storage/exchange-delegates.c:780 +msgid "Could not update list of delegates." +msgstr "Không thể cập nhật danh sách các ngÆ°á»i được ủy nhiệm." + +#: ../storage/exchange-delegates.c:798 +#, c-format +msgid "Could not add delegate %s" +msgstr "Không thể thêm ngÆ°á»i được ủy nhiệm %s" + +#: ../plugins/exchange-operations/exchange-delegates.c:965 +#: ../storage/exchange-delegates.c:967 +msgid "Error reading delegates list." +msgstr "Gặp lá»—i khi Ä‘á»c danh sách các ngÆ°á»i được ủy nhiệm." + +#: ../plugins/exchange-operations/exchange-delegates.glade.h:1 +#: ../storage/exchange-delegates.glade.h:1 +msgid "Author (read, create)" +msgstr "Tác giả (Ä‘á»c, tạo)" + +#. Translators: This is used for permissions for for the folder Calendar. +#: ../plugins/exchange-operations/exchange-delegates.glade.h:3 +#: ../src/zenity.glade.h:7 +msgid "C_alendar:" +msgstr "_Lịch:" + +#. Translators: This is used for permissions for for the folder Contacts. +#: ../storage/exchange-delegates.glade.h:3 +msgid "Co_ntacts:" +msgstr "_Liên lạc:" + +#: ../storage/exchange-delegates.glade.h:5 +msgid "Delegates" +msgstr "NgÆ°á»i được ủy nhiệm" + +#: ../storage/exchange-delegates.glade.h:6 +msgid "Editor (read, create, edit)" +msgstr "NgÆ°á»i sá»­a (Ä‘á»c, tạo, sá»­a đổi)" + +#: ../storage/exchange-delegates.glade.h:8 +msgid "Permissions for" +msgstr "Quyá»n hạn cho" + +#: ../storage/exchange-delegates.glade.h:9 +msgid "Reviewer (read-only)" +msgstr "NgÆ°á»i xem lại (chỉ Ä‘á»c)" + +#: ../storage/exchange-delegates.glade.h:10 +msgid "" +"These users will be able to send mail on your behalf\n" +"and access your folders with the permissions you give them." +msgstr "" +"Những ngÆ°á»i dùng này sẽ có thể gởi thÆ° Ä‘iện tá»­\n" +"thay mặt cho bạn, cÅ©ng có thể truy cập các thÆ° mục bạn,\n" +"dùng quyá»n hạn mà bạn đã cho há»." + +#: ../storage/exchange-delegates.glade.h:12 +msgid "_Delegate can see private items" +msgstr "NgÆ°á»i ủ_y nhiêm có thể thấy những mục tÆ° nhân" + +#: ../storage/exchange-delegates.glade.h:15 +msgid "_Tasks:" +msgstr "_Tác vụ" + +#: ../plugins/exchange-operations/exchange-folder-permission.c:58 +#: ../plugins/exchange-operations/org-gnome-folder-permissions.xml.h:2 +msgid "Permissions..." +msgstr "Quyá»n hạn..." + +#: ../plugins/exchange-operations/exchange-folder-size-display.c:136 +#: ../storage/exchange-folder-size.c:322 +msgid "Folder Name" +msgstr "Tên thÆ° mục" + +#: ../plugins/exchange-operations/exchange-folder-size-display.c:140 +#: ../storage/exchange-folder-size.c:326 +msgid "Folder Size" +msgstr "Cỡ thÆ° mục" + +#: ../plugins/exchange-operations/exchange-folder-subscription.c:181 +#: ../plugins/exchange-operations/org-gnome-folder-subscription.xml.h:1 +msgid "Subscribe to Other User's Folder" +msgstr "Äăng ký vá»›i thÆ° mục của ngÆ°á»i dùng khác" + +#: ../plugins/exchange-operations/exchange-folder-tree.glade.h:1 +#: ../storage/exchange-folder-tree.glade.h:1 +msgid "Exchange Folder Tree" +msgstr "Cây thÆ° mục Exchange" + +#: ../plugins/exchange-operations/exchange-folder.c:110 +msgid "Unsubscribe Folder..." +msgstr "BỠđăng ký thÆ° mục..." + +#: ../shell/e-folder-misc-dialogs.c:766 +#, c-format +msgid "Really unsubscribe from folder \"%s\"?" +msgstr "Thật sá»± bỠđăng ký trên thÆ° mục « %s » không?" + +#: ../shell/e-folder-misc-dialogs.c:780 +#, c-format +msgid "Unsubscribe from \"%s\"" +msgstr "BỠđăng ký trên thÆ° mục « %s »" + +#: ../plugins/exchange-operations/exchange-permissions-dialog.c:301 +#: ../storage/exchange-permissions-dialog.c:295 +msgid "(Permission denied.)" +msgstr "(Không đủ quyá»n truy cập.)" + +#: ../storage/exchange-permissions-dialog.c:402 +msgid "Add User:" +msgstr "Thêm ngÆ°á»i dùng:" + +#: ../storage/exchange-permissions-dialog.c:402 ../src/gtkfunc.c:101 +msgid "Add User" +msgstr "Thêm ngÆ°á»i dùng" + +#: ../plugins/exchange-operations/exchange-permissions-dialog.glade.h:2 +#: ../storage/exchange-permissions-dialog.glade.h:2 +msgid "Permissions" +msgstr "Quyá»n hạn" + +#: ../plugins/exchange-operations/exchange-permissions-dialog.glade.h:3 +#: ../storage/exchange-permissions-dialog.glade.h:3 +msgid "Cannot Delete" +msgstr "Không thể xoá bá»" + +#: ../plugins/exchange-operations/exchange-permissions-dialog.glade.h:4 +#: ../storage/exchange-permissions-dialog.glade.h:4 +msgid "Cannot Edit" +msgstr "Không thể sá»­a" + +#: ../plugins/exchange-operations/exchange-permissions-dialog.glade.h:5 +#: ../storage/exchange-permissions-dialog.glade.h:5 +msgid "Create items" +msgstr "Tạo mục" + +#: ../plugins/exchange-operations/exchange-permissions-dialog.glade.h:6 +#: ../storage/exchange-permissions-dialog.glade.h:6 +msgid "Create subfolders" +msgstr "Tạo thÆ° mục con" + +#: ../plugins/exchange-operations/exchange-permissions-dialog.glade.h:7 +#: ../storage/exchange-permissions-dialog.glade.h:7 +msgid "Delete Any Items" +msgstr "Xoá bá» má»i mục" + +#: ../plugins/exchange-operations/exchange-permissions-dialog.glade.h:8 +#: ../storage/exchange-permissions-dialog.glade.h:8 +msgid "Delete Own Items" +msgstr "Xoá bá» mục mình" + +#: ../plugins/exchange-operations/exchange-permissions-dialog.glade.h:9 +#: ../storage/exchange-permissions-dialog.glade.h:9 +msgid "Edit Any Items" +msgstr "Sá»­a má»i mục" + +#: ../plugins/exchange-operations/exchange-permissions-dialog.glade.h:10 +#: ../storage/exchange-permissions-dialog.glade.h:10 +msgid "Edit Own Items" +msgstr "Sá»­a mục mình" + +#: ../plugins/exchange-operations/exchange-permissions-dialog.glade.h:11 +#: ../storage/exchange-permissions-dialog.glade.h:11 +msgid "Folder contact" +msgstr "Liên lạc thÆ° mục" + +#: ../plugins/exchange-operations/exchange-permissions-dialog.glade.h:12 +#: ../storage/exchange-permissions-dialog.glade.h:12 +msgid "Folder owner" +msgstr "NgÆ°á»i sở hữu thÆ° mục" + +#: ../plugins/exchange-operations/exchange-permissions-dialog.glade.h:13 +msgid "Folder visible" +msgstr "Hiển thị thÆ° mục" + +#: ../plugins/exchange-operations/exchange-permissions-dialog.glade.h:14 +#: ../storage/exchange-permissions-dialog.glade.h:14 +msgid "Read items" +msgstr "Mục đã Ä‘á»c" + +#: ../plugins/exchange-operations/exchange-permissions-dialog.glade.h:15 +#: ../storage/exchange-permissions-dialog.glade.h:15 +msgid "Role: " +msgstr "Vai trò : " + +#: ../plugins/exchange-operations/exchange-user-dialog.c:144 +#: ../servers/exchange/lib/e2k-user-dialog.c:144 ../lib/e2k-user-dialog.c:144 +msgid "Select User" +msgstr "Chá»n ngÆ°á»i dùng" + +#: ../lib/e2k-user-dialog.c:182 +msgid "Addressbook..." +msgstr "Sổ địa chỉ..." + +#: ../plugins/exchange-operations/org-gnome-exchange-ab-subscription.xml.h:1 +msgid "Subscribe to Other User's Contacts" +msgstr "Äăng ký vá»›i các liên lạc của ngÆ°á»i dùng khác" + +#: ../plugins/exchange-operations/org-gnome-exchange-cal-subscription.xml.h:1 +msgid "Subscribe to Other User's Calendar" +msgstr "Äăng ký vá»›i lịch của ngÆ°á»i dùng khác" + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:1 +msgid "Cannot change password due to configuration problems." +msgstr "Không thể thay đổi mật khẩu vì vấn Ä‘á» cấu hình." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:2 +msgid "Cannot display folders." +msgstr "Không thể hiển thị thÆ° mục." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:3 +msgid "" +"Changes to Exchange account configuration will take place after you quit and " +"restart Evolution." +msgstr "" +"Các thay đổi trong cấu hình tài khoản Evolution sẽ có tác Ä‘á»™ng sau khi bạn " +"thoát rồi khởi chạy lại trình Evolution." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:4 +msgid "Could not authenticate to server." +msgstr "Không thể xác thá»±c tá»›i máy phục vụ." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:5 +msgid "Could not change password." +msgstr "Không thể thay đổi mật khẩu." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:6 +msgid "" +"Could not configure Exchange account because \n" +"an unknown error occurred. Check the URL, \n" +"username, and password, and try again." +msgstr "" +"Không thể cấu hình tài khoản Exchange\n" +"vì gặp má»™t lá»—i lạ. Bạn hãy kiểm tra đã gõ đúng\n" +"địa chỉ Mạng, tên ngÆ°á»i dùng và mật khẩu\n" +"rồi thá»­ lại." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:9 +msgid "Could not connect to Exchange server." +msgstr "Không thể kết nối đến máy phục vụ Exchange." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:10 +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:9 +msgid "Could not connect to server {0}." +msgstr "Không thể kết nối đến máy phục vụ {0}." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:11 +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:10 +msgid "Could not determine folder permissions for delegates." +msgstr "Không thể xác định quyá»n truy cập thÆ° mục cho ngÆ°á»i được ủy nhiệm." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:12 +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:11 +msgid "Could not find Exchange Web Storage System." +msgstr "Không tìm thấy Hệ thống Cất giữ Mạng Exchange." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:13 +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:12 +msgid "Could not locate server {0}." +msgstr "Không thể định vị máy phục vụ {0}." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:14 +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:13 +msgid "Could not make {0} a delegate" +msgstr "Không thể ủy nhiệm cho « {0} »." + +#: ../storage/exchange-permissions-dialog.c:222 +msgid "Could not read folder permissions" +msgstr "Không thể Ä‘á»c quyá»n truy cập thÆ° mục." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:16 +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:15 +msgid "Could not read folder permissions." +msgstr "Không thể Ä‘á»c quyá»n truy cập thÆ° mục." + +#: ../storage/exchange-oof.c:424 +msgid "Could not read out-of-office state" +msgstr "Không thể Ä‘á»c trạng thái ngoài-văn-phòng." + +#: ../storage/exchange-permissions-dialog.c:264 +msgid "Could not update folder permissions." +msgstr "Không thể cập nhật quyá»n truy cập thÆ° mục." + +#: ../storage/exchange-oof.c:199 ../storage/exchange-oof.c:406 +msgid "Could not update out-of-office state" +msgstr "Không thể cập nhật tính trạng ngoài-văn-phòng." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:20 +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:19 +msgid "Exchange Account is offline." +msgstr "Tài khoản Exchange ngoại tuyến." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:21 +msgid "" +"Exchange Connector requires access to certain\n" +"functionality on the Exchange Server that appears\n" +"to be disabled or blocked. (This is usually \n" +"unintentional.) Your Exchange Administrator will \n" +"need to enable this functionality in order for \n" +"you to be able to use Ximian Connector.\n" +"\n" +"For information to provide to your Exchange \n" +"administrator, please follow the link below:\n" +"\n" +"{0}\n" +" " +msgstr "" +"Trình Exchange Connector cần thiết truy cập\n" +"chức năng nào đó trên máy phục vụ Exchange\n" +"dÆ°Æ¡ng nhÆ° bị tắt hay bị trở ngại.\n" +"(ThÆ°á»ng không phải do chủ tâm.)\n" +"Quản trị Exchange của bạn sẽ cần phải hiệu lá»±c\n" +"chức năng này để cho phép bạn sá»­ dụng trình\n" +"Ximian Connector.\n" +"\n" +"Äể xem thông tin cần cung cấp cho quản trị Exchange,\n" +"hãy theo liên kết bên dÆ°á»›i đây:\n" +"\n" +"<{0}>\n" +" " + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:33 +msgid "Failed to update delegates:" +msgstr "Không thể cập nhật ngÆ°á»i được ủy nhiệm:" + +#: ../mail/mail-stub-exchange.c:2356 ../storage/xc-commands.c:313 +msgid "Folder already exists" +msgstr "ThÆ° mục đã có" + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:35 +#: ../storage/xc-commands.c:316 +msgid "Folder does not exist" +msgstr "Không có thÆ° mục này." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:36 +msgid "Folder offline" +msgstr "ThÆ° mục này ngoại tuyến." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:37 +#: ../shell/e-shell.c:1263 +msgid "Generic error" +msgstr "Lá»—i chung chung" + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:38 +msgid "" +"If OWA is running on a different path, you must specify that in the account " +"configuration dialog." +msgstr "" +"Nếu OWA Ä‘ang chạy trên Ä‘Æ°á»ng dẫn khác thì bạn cần phải ghi rõ nó trong há»™p " +"thoại cấu hình tài khoản." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:39 +msgid "Mailbox for {0} is not on this server." +msgstr "Không có há»™p thÆ° cho « {0} » trên máy phục vụ này." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:40 +msgid "Make sure the URL is correct and try again." +msgstr "Hãy kiểm tra xem gõ địa chỉ Mạng đúng, rồi thá»­ lại." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:41 +msgid "Make sure the server name is spelled correctly and try again." +msgstr "Hãy kiểm tra xem gõ tên phục vụ đúng, rồi thá»­ lại." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:42 +msgid "Make sure the username and password are correct and try again." +msgstr "Hãy kiểm tra xem gõ tên ngÆ°á»i dùng và mật khẩu đúng, rồi thá»­ lại." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:43 +msgid "No Global Catalog server configured for this account." +msgstr "" +"Không có trình phục vụ Phân loại Toàn cục được cấu hình cho tài khoản này." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:44 +msgid "No mailbox for user {0} on {1}." +msgstr "Không có há»™p thÆ° cho ngÆ°á»i dùng « {0} » trên « {1} »." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:45 +msgid "No such user {0}" +msgstr "Không có ngÆ°á»i dùng nhÆ° vậy « {0} »." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:46 +msgid "Password successfully changed." +msgstr "Mật khẩu đã được thay đổi." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:48 +msgid "Please restart Evolution" +msgstr "Hãy khởi chạy lại Evolution" + +#: ../shell/e-folder-misc-dialogs.c:451 +msgid "Please select a user." +msgstr "Hãy chá»n má»™t ngÆ°á»i dùng." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:50 +msgid "Server rejected password because it is too weak." +msgstr "Máy phục vụ đã từ chối mật khẩu vì quá yếu." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:51 +#: ../storage/exchange-config-listener.c:593 +msgid "The Exchange account will be disabled when you quit Evolution" +msgstr "Tài khoản Exchange sẽ bị tắt khi bạn thoát khá»i trình Evolution." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:52 +#: ../storage/exchange-config-listener.c:588 +msgid "The Exchange account will be removed when you quit Evolution" +msgstr "Tài khoản Exchange sẽ bị gỡ bá» khi bạn thoát khá»i trình Evolution" + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:53 +msgid "The Exchange server is not compatible with Exchange Connector." +msgstr "Tài khoản Exchange không tÆ°Æ¡ng thích vá»›i Exchange Connector." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:54 +msgid "" +"The server is runinng Exchange 5.5. Exchange Connector \n" +"supports Microsoft Exchange 2000 and 2003 only." +msgstr "" +"Máy phục vụ có chạy phần má»m Exchange phiên bản 5.5.\n" +"Exchange Connector há»— trợ chỉ Exchange phiên bản 2000 và 2003." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:56 +msgid "" +"This probably means that your server requires \n" +"you to specify the Windows domain name \n" +"as part of your username (eg, "DOMAIN\\user").\n" +"\n" +"Or you might have just typed your password wrong." +msgstr "" +"Rất có thể có nghÄ©a là máy phục vụ cần thiết\n" +"bạn ghi rõi tên miá»n Windows là phần của tên ngÆ°á»i dùng\n" +"(v.d. «MIỀN\\ngÆ°á»i_dùng»).\n" +"\n" +"Hoạc có lẽ bạn đã gõ sai mật khẩu." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:61 +msgid "Try again with a different password." +msgstr "Hãy thá»­ lại vá»›i má»™t mật khẩu khác." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:62 +msgid "Unable to add user to access control list:" +msgstr "Không thể thêm ngÆ°á»i dùng vào danh sách Ä‘iá»u khiển truy cập." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:63 +msgid "Unable to edit delegates." +msgstr "Không thể hiệu chỉnh ngÆ°á»i được ủy nhiệm." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:64 +msgid "Unknown error looking up {0}" +msgstr "Gặp lá»—i lạ khi tra cứu « {0} »." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:65 +msgid "Unknown error." +msgstr "Không biết lá»—i đó." + +#: ../extensions/page-info/page-info-dialog.c:776 +msgid "Unknown type" +msgstr "Kiểu lạ" + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:67 +#: src/err-codes.h:152 +msgid "Unsupported operation" +msgstr "Thao tác không được há»— trợ" + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:68 +msgid "You are nearing your quota available for storing mails on this server." +msgstr "Bạn gần vượt quá giá»›i hạn lÆ°u thÆ° trên máy phục vụ này." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:69 +#: ../storage/exchange-delegates.c:445 +msgid "You cannot make yourself your own delegate" +msgstr "Bạn không thể ủy nhiệm cho mình." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:70 +msgid "You have exceeded your quota for storing mails on this server." +msgstr "Bạn đã vượt quá giá»›i hạn lÆ°u thÆ° trên máy phục vụ này." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:71 +msgid "You may only configure a single Exchange account." +msgstr "Bạn có thể cấu hình chỉ má»™t tài khoản Exchange riêng lẻ." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:72 +msgid "" +"Your current usage is : {0}KB. Try to clear up some space by deleting some " +"mails." +msgstr "" +"Hiện thá»i bạn Ä‘ang sá»­ dụng chá»— : {0}KB. Hãy cố giải phóng thêm chá»— trống " +"bằng cách xoá bá» má»™t số thÆ°." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:73 +msgid "" +"Your current usage is : {0}KB. You will not be able to either send or " +"recieve mails now." +msgstr "" +"Hiện thá»i bạn Ä‘ang sá»­ dụng chá»— : {0}KB. Vậy bạn sẽ không thể gởi hay nhận " +"thÆ°." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:74 +msgid "" +"Your current usage is : {0}KB. You will not be able to send mails till you " +"clear up some space by deleting some mails." +msgstr "" +"Hiện thá»i bạn Ä‘ang sá»­ dụng chá»— : {0}KB. Bạn sẽ không thể gởi thÆ° đến khi bạn " +"giải phóng thêm chá»— trống bằng cách xoá bá» má»™t số thÆ°." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:75 +msgid "Your password has expired." +msgstr "Mật khẩu bạn đã hết hạn." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:77 +msgid "{0} cannot be added to an access control list" +msgstr "Không thể thêm « {0} » vào danh sách Ä‘iá»u khiển truy cập." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:78 +msgid "{0} is already a delegate" +msgstr "« {0} » đã má»™t ngÆ°á»i được ủy nhiệm." + +#: ../plugins/exchange-operations/org-gnome-exchange-operations.error.xml.h:79 +msgid "{0} is already in the list" +msgstr "« {0} » đã có trong danh sách." + +#: ../plugins/exchange-operations/org-gnome-exchange-tasks-subscription.xml.h:1 +msgid "Subscribe to Other User's Tasks" +msgstr "Äăng ký vá»›i các tác vụ của ngÆ°á»i dùng khác" + +#: ../plugins/exchange-operations/org-gnome-folder-permissions.xml.h:1 +msgid "Check folder permissions" +msgstr "Hãy kiểm tra quyá»n truy cập thÆ° mục là đúng." + +#: ../plugins/folder-unsubscribe/folder-unsubscribe.c:57 +#, c-format +msgid "Unsubscribing from folder \"%s\"" +msgstr "Äang bỠđăng ký trên thÆ° mục « %s »" + +#: ../plugins/folder-unsubscribe/org-gnome-mail-folder-unsubscribe.eplug.xml.h:1 +msgid "Allows unsubscribing of mail folders in the folder tree context menu." +msgstr "Cho phép bỠđăng ký thÆ° mục thÆ° trong trình Ä‘Æ¡n ngữ cảnh cây thÆ° mục." + +#: ../plugins/folder-unsubscribe/org-gnome-mail-folder-unsubscribe.eplug.xml.h:2 +msgid "Unsubscribe Folders" +msgstr "BỠđăng ký thÆ° mục" + +#: ../plugins/groupwise-account-setup/camel-gw-listener.c:414 +msgid "Checklist" +msgstr "Danh sách kiểm" + +#: ../plugins/groupwise-account-setup/org-gnome-gw-account-setup.eplug.xml.h:1 +msgid "Groupwise Account Setup" +msgstr "Thiết lập tài khoản Groupwise" + +#: ../plugins/groupwise-features/junk-mail-settings.c:77 +msgid "Junk Settings" +msgstr "Thiết lập Rác" + +#: ../plugins/groupwise-features/junk-mail-settings.c:90 +msgid "Junk Mail Settings" +msgstr "Thiết lập ThÆ° Rác" + +#: ../plugins/groupwise-features/junk-mail-settings.c:112 +msgid "Junk Mail Settings..." +msgstr "Thiết lập ThÆ° Rác.." + +#: ../plugins/groupwise-features/junk-settings.glade.h:1 +msgid "Junk List :" +msgstr "Danh sách Rác" + +#: ../plugins/groupwise-features/junk-settings.glade.h:2 +#: ../src/red_activation.py:57 +msgid "Email:" +msgstr "Äịa chỉ thÆ° :" + +#: ../plugins/groupwise-features/junk-settings.glade.h:3 +msgid "Junk Mail Settings" +msgstr "Thiết lập ThÆ° Rác" + +#: ../plugins/groupwise-features/junk-settings.glade.h:5 +#: ../plugins/mail-account-disable/mail-account-disable.c:46 +msgid "_Disable" +msgstr "_Tắt" + +#: ../plugins/groupwise-features/junk-settings.glade.h:6 +msgid "_Enable" +msgstr "_Bật" + +#: ../plugins/groupwise-features/junk-settings.glade.h:7 +msgid "_Junk List" +msgstr "Danh sách _Rác" + +#: ../plugins/groupwise-features/org-gnome-compose-send-options.xml.h:1 +msgid "Add Send Options to groupwise messages" +msgstr "Thêm Tùy chá»n Gởi vào má»i thÆ° Groupwise" + +#: ../widgets/misc/e-send-options.glade.h:17 +msgid "Send Options" +msgstr "Tùy chá»n gởi" + +#: ../plugins/groupwise-features/org-gnome-groupwise-features.eplug.xml.h:1 +msgid "A plugin for the features in Groupwise accounts." +msgstr "Má»™t trình cầm phít cho những tính năng trong tài khoản Groupwise." + +#: ../plugins/groupwise-features/org-gnome-groupwise-features.eplug.xml.h:2 +msgid "Groupwise Features" +msgstr "Tính năng Groupwise" + +#: ../plugins/groupwise-features/process-meeting.c:49 +msgid "Accept Tentatively" +msgstr "Chấp nhận tạm" + +#: ../plugins/groupwise-features/properties.glade.h:2 +msgid "Users :" +msgstr "NgÆ°á»i dùng: " + +#: ../plugins/groupwise-features/properties.glade.h:6 +msgid "Shared Folder Notification" +msgstr "Thông báo thÆ° mục chung" + +#: ../plugins/groupwise-features/properties.glade.h:8 +msgid "The participants will receive the following notification.\n" +msgstr "Má»i ngÆ°á»i dá»± sẽ nhận thông báo theo đây.\n" + +#: ../plugins/groupwise-features/properties.glade.h:11 +msgid "_Contacts..." +msgstr "_Liên lạc..." + +#: ../plugins/groupwise-features/properties.glade.h:12 +msgid "_Cutomize notification message" +msgstr "Tá»± _chá»n thông Ä‘iệp thông báo" + +#: ../plugins/groupwise-features/properties.glade.h:13 +msgid "_Not Shared" +msgstr "_Không chung" + +#: ../plugins/groupwise-features/properties.glade.h:15 +msgid "_Shared With ..." +msgstr "_Chung vá»›i ..." + +#: ../plugins/groupwise-features/properties.glade.h:16 +msgid "_Sharing" +msgstr "Dùng _chung" + +#: ../plugins/groupwise-features/proxy-add-dialog.glade.h:1 +#: ../data/gnome-keyring-manager.glade.h:2 ../gnomecard/card-editor.glade.h:4 +msgid "Name" +msgstr "Tên" + +#: ../plugins/groupwise-features/proxy-add-dialog.glade.h:2 +msgid "Access Rights" +msgstr "Quyá»n truy cập" + +#: ../plugins/groupwise-features/proxy-add-dialog.glade.h:3 +msgid "Add/Edit" +msgstr "Thêm/Hiệu chỉnh" + +#: ../plugins/groupwise-features/proxy-add-dialog.glade.h:5 +msgid "Con_tacts" +msgstr "_Liên lạc" + +#: ../plugins/groupwise-features/proxy-add-dialog.glade.h:7 +msgid "Modify _folders/options/rules/" +msgstr "Sá»­a đổi _thÆ° mục/tùy chá»n/quy tắc/" + +#: ../plugins/groupwise-features/proxy-add-dialog.glade.h:8 +msgid "Read items marked _private" +msgstr "Äá»c mục có dấu _Riêng" + +#: ../plugins/groupwise-features/proxy-add-dialog.glade.h:9 +msgid "Reminder Notes" +msgstr "Chú thích nhắc nhở" + +#: ../plugins/groupwise-features/proxy-add-dialog.glade.h:10 +msgid "Subscribe to my _alarms" +msgstr "Äăng ký vá»›i _báo Ä‘á»™ng tôi" + +#: ../plugins/groupwise-features/proxy-add-dialog.glade.h:11 +msgid "Subscribe to my _notifications" +msgstr "Äăng ký vá»›i _thông báo tôi" + +#: ../src/dialogs.c:891 ../src/dialogs.c:1111 ../gmedia_slice/interface.c:433 +msgid "_Write" +msgstr "_Ghi" + +#: ../plugins/groupwise-features/proxy-listing.glade.h:1 network.c:1364 +#: ../src/red_prefs.py:210 +msgid "Proxy" +msgstr "Ủy nhiệm" + +#: ../src/f-spot.glade.h:183 ../gnomecard/gnomecard.glade.h:5 +msgid "dialog1" +msgstr "thoại1" + +#: ../plugins/groupwise-features/proxy-login-dialog.glade.h:1 +msgid "Account Name" +msgstr "Tên tài khoản" + +#: ../plugins/groupwise-features/proxy-login-dialog.glade.h:2 +msgid "Proxy Login" +msgstr "Äăng nhập ủy nhiệm" + +#: ../plugins/groupwise-features/proxy.c:490 +#, c-format +msgid "%sEnter password for %s (user %s)" +msgstr "%sNhập mật khẩu cho « %s » (ngÆ°á»i dùng « %s »)" + +#: ../plugins/groupwise-features/proxy-login.c:505 +#: ../plugins/groupwise-features/proxy-login.c:485 +msgid "_Proxy Login..." +msgstr "Äăng nhập ủ_y nhiệm.." + +#: ../plugins/groupwise-features/proxy.c:698 +msgid "The Proxy tab will be available only when the account is online." +msgstr "Thanh Ủy nhiệm sẽ sẵn sàng chỉ khi tài khoản trá»±c tuyến." + +#: ../plugins/groupwise-features/proxy.c:703 +#: ../plugins/groupwise-features/proxy.c:672 +msgid "The Proxy tab will be available only when the account is enabled." +msgstr "Thanh Ủy nhiệm sẽ sẵn sàng chỉ khi tài khoản Ä‘" + +#: src/fe-gtk/chanlist.c:600 ../glom/glom.glade.h:162 +#: ../providers/ibmdb2/gda-ibmdb2-provider.c:976 +#: ../providers/oracle/gda-oracle-provider.c:1848 po/silky-channel.glade.h:26 +msgid "Users" +msgstr "NgÆ°á»i dùng" + +#: ../plugins/groupwise-features/share-folder-common.c:319 +#: ../plugins/groupwise-features/share-folder-common.c:318 +msgid "Enter the users and set permissions" +msgstr "Nhập những ngÆ°á»i dùng và lập quyá»n hạn" + +#: ../plugins/groupwise-features/share-folder-common.c:338 +#: ../plugins/groupwise-features/share-folder-common.c:337 +msgid "New _Shared Folder..." +msgstr "ThÆ° mục _chung má»›i..." + +#: ../plugins/groupwise-features/share-folder-common.c:446 +#: ../files/Nautilus_View_sharing_properties.server.in.in.h:2 +msgid "Sharing" +msgstr "Chia sẻ" + +#: ../plugins/groupwise-features/status-track.c:235 +msgid "Track Message Status..." +msgstr "Theo dõi trạng thái thÆ°..." + +#: ../plugins/hula-account-setup/org-gnome-evolution-hula-account-setup.eplug.xml.h:1 +msgid "A plugin to setup hula calendar sources." +msgstr "Bá»™ cầm phít có thể thiết lập nguồn lịch « hula »." + +#: ../plugins/hula-account-setup/org-gnome-evolution-hula-account-setup.eplug.xml.h:2 +msgid "Hula Account Setup" +msgstr "Thiết lập tài khoản Hula" + +#: ../plugins/save-calendar/ical-format.c:136 +msgid "iCalendar format (.ics)" +msgstr "Tập tin iCalendar (.ics)" + +#: ../plugins/ipod-sync/org-gnome-ipod-sync-evolution.eplug.xml.h:1 +msgid "Synchronize the selected task/calendar/addressbook with Apple iPod" +msgstr "Äồng bá»™ hóa sổ địa chỉ/lịch/tác vụ đã chá»n vá»›i Apple iPod" + +#: ../plugins/ipod-sync/org-gnome-ipod-sync-evolution.eplug.xml.h:2 +msgid "Synchronize to iPod" +msgstr "Äồng bá»™ tá»›i iPod" + +#: ../plugins/ipod-sync/org-gnome-ipod-sync-evolution.eplug.xml.h:3 +msgid "iPod Synchronization" +msgstr "Äồng bá»™ iPod" + +#: ../plugins/ipod-sync/sync.c:158 +msgid "No output directory!" +msgstr "• Không có thÆ° mục xuất. •" + +#: ../plugins/ipod-sync/sync.c:159 +msgid "" +"The output directory was not found on iPod! Please ensure that iPod has been " +"correctly set up and try again." +msgstr "" +"Không tìm thấy thÆ° mục xuất trên iPod. Hãy chắc đã thiết lập đúng iPod rồi " +"thá»­ lại." + +#: ../plugins/ipod-sync/sync.c:174 ../plugins/ipod-sync/sync.c:202 +msgid "Could not export data!" +msgstr "• Không thể xuất dữ liệu. •" + +#: ../plugins/ipod-sync/sync.c:203 +msgid "Exporting data failed." +msgstr "Lá»—i xuất dữ liệu." + +#: ../plugins/ipod-sync/sync.c:231 +msgid "Could not open addressbook!" +msgstr "• Không thể mở sổ địa chỉ. •" + +#: ../plugins/ipod-sync/sync.c:232 +msgid "Could not open the Evolution addressbook to export data." +msgstr "Không thể mở Sổ địa chỉ Evolution để xuất dữ liệu." + +#: ../plugins/ipod-sync/sync.c:291 +msgid "Could not open calendar/todo!" +msgstr "• Không thể mở lịch/cần làm. •" + +#: ../plugins/ipod-sync/sync.c:292 +msgid "Could not open the Evolution calendar/todo list to export data." +msgstr "Không thể mở lịch/danh sách cần làm để xuất dữ liệu." + +#: ../plugins/itip-formatter/itip-formatter.c:724 +msgid "Unable to parse item" +msgstr "Không thể phân tach mục" + +#: ../plugins/itip-formatter/itip-formatter.c:780 +#, c-format +msgid "Unable to send item to calendar '%s'. %s" +msgstr "Không gởi được mục cho lịch « %s ». %s" + +#: ../plugins/itip-formatter/itip-formatter.c:791 +#, c-format +msgid "Sent to calendar '%s' as accepted" +msgstr "Äã gởi cho lịch « %s »: đã chấp nhận" + +#: ../plugins/itip-formatter/itip-formatter.c:795 +#, c-format +msgid "Sent to calendar '%s' as tentative" +msgstr "Äã gởi cho lịch « %s »: dá»± định" + +#: ../plugins/itip-formatter/itip-formatter.c:800 +#, c-format +msgid "Sent to calendar '%s' as declined" +msgstr "Äã gởi cho lịch « %s »: bị từ chối" + +#: ../plugins/itip-formatter/itip-formatter.c:805 +#, c-format +msgid "Sent to calendar '%s' as cancelled" +msgstr "Äã gởi cho lịch « %s »: bị hủy bá»" + +#: ../plugins/itip-formatter/itip-formatter.c:898 +#, c-format +msgid "Organizer has removed the delegate %s " +msgstr "Bá»™ tổ chức đã gỡ bá» ngÆ°á»i được ủy nhiệm %s." + +#: ../plugins/itip-formatter/itip-formatter.c:905 +msgid "Sent a cancellation notice to the delegate" +msgstr "Äã gởi má»™t thông báo hủy bá» cho ngÆ°á»i được ủy nhiệm." + +#: ../plugins/itip-formatter/itip-formatter.c:907 +msgid "Could not send the cancellation notice to the delegate" +msgstr "Không gởi được thông báo hủy bá» cho ngÆ°á»i được ủy nhiệm.?" + +#: ../plugins/itip-formatter/itip-formatter.c:991 +#: ../plugins/itip-formatter/itip-formatter.c:979 +msgid "Attendee status could not be updated because the status is invalid" +msgstr "Không thể cập nhật trạng thái ngÆ°á»i dá»± vì trạng thái không hợp lệ." + +#: ../plugins/itip-formatter/itip-formatter.c:1017 +#: ../plugins/itip-formatter/itip-formatter.c:1005 +#, c-format +msgid "Unable to update attendee. %s" +msgstr "Không thể cập nhật ngÆ°á»i dá»±. %s" + +#: ../plugins/itip-formatter/itip-formatter.c:1021 +#: ../plugins/itip-formatter/itip-formatter.c:1009 +msgid "Attendee status updated" +msgstr "Trạng thái ngÆ°á»i dá»± đã được cập nhật" + +#: ../plugins/itip-formatter/itip-formatter.c:1148 +#: ../plugins/itip-formatter/itip-formatter.c:1136 +msgid "The calendar attached is not valid" +msgstr "Lịch đã đính kèm không hợp lệ" + +#: ../plugins/itip-formatter/itip-formatter.c:1149 +#: ../plugins/itip-formatter/itip-formatter.c:1137 +msgid "" +"The message claims to contain a calendar, but the calendar is not valid " +"iCalendar." +msgstr "" +"ThÆ° này tuyên bố chứa má»™t lịch, nhÆ°ng mà lịch đó không phải là má»™t iCalendar " +"hợp lệ." + +#: ../plugins/itip-formatter/itip-formatter.c:1262 +msgid "The item in the calendar is not valid" +msgstr "Mục đó trong lịch không hợp lệ." + +#: ../plugins/itip-formatter/itip-formatter.c:1263 +msgid "" +"The message does contain a calendar, but the calendar contains no events, " +"tasks or free/busy information" +msgstr "" +"ThÆ° đó có phải chứa má»™t lịch, nhÆ°ng mà lịch đó không chứa sá»± kiện nào, công " +"việc nào hay thông tin rảnh/bận nào." + +#: ../plugins/itip-formatter/itip-formatter.c:1209 +#: ../plugins/itip-formatter/itip-formatter.c:1197 +msgid "The calendar attached contains multiple items" +msgstr "Lịch đã đính kèm chứa nhiá»u mục" + +#: ../plugins/itip-formatter/itip-formatter.c:1210 +#: ../plugins/itip-formatter/itip-formatter.c:1198 +msgid "" +"To process all of these items, the file should be saved and the calendar " +"imported" +msgstr "Äể xá»­ lý má»i mục này thì nên lÆ°u tập tin này và nhập lịch đó." + +#: ../plugins/itip-formatter/itip-formatter.c:1977 +msgid "_Delete message after acting" +msgstr "_Xoá bá» thÆ° sau hành Ä‘á»™ng" + +#: ../plugins/itip-formatter/itip-formatter.c:2001 +#: ../plugins/itip-formatter/itip-formatter.c:1987 +msgid "Conflict Search" +msgstr "Tìm kiếm xung Ä‘á»™t" + +#: ../plugins/itip-formatter/itip-formatter.c:2000 +msgid "Select the calendars to search for meeting conflicts" +msgstr "Chá»n những lịch cần tìm kiệm cuá»™c há»p có xung Ä‘á»™t vá»›i nhau" + +#: ../plugins/itip-formatter/itip-formatter.c:2031 +#: ../plugins/itip-formatter/itip-formatter.c:2017 +msgid "Conflict Search Table" +msgstr "Bảng tìm kiếm xung Ä‘á»™t" + +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday and a date. +#: ../gtk/gtkfilechooserdefault.c:7037 ../gncal/gnomecal-main-window.c:576 +#: ../storage/sunone-itip-view.c:138 utils.c:1089 +msgid "Today" +msgstr "Hôm nay" + +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a time, +#. in 24-hour format, without seconds. +#: ../plugins/itip-formatter/itip-view.c:187 ../storage/sunone-itip-view.c:143 +#, fuzzy +msgid "Today %H:%M" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Hôm nay %l:%M %p\n" +"#-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-#\n" +"Hôm nay %H:%M" + +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a time, +#. in 24-hour format. +#: ../plugins/itip-formatter/itip-view.c:191 ../storage/sunone-itip-view.c:147 +#, fuzzy +msgid "Today %H:%M:%S" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Hôm nay %l:%M %p\n" +"#-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-#\n" +"Hôm nay %H:%M:%S" + +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a time, +#. in 12-hour format. +#: ../plugins/itip-formatter/itip-view.c:200 ../storage/sunone-itip-view.c:156 +#, fuzzy +msgid "Today %l:%M:%S %p" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Hôm nay %l:%M %p\n" +"#-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-#\n" +"Hôm nay %l:%M:%S %p" + +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday and a date. +#: ../plugins/itip-formatter/itip-view.c:210 ../storage/sunone-itip-view.c:166 +msgid "Tomorrow" +msgstr "Ngày mai" + +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a time, +#. in 24-hour format, without seconds. +#: ../plugins/itip-formatter/itip-view.c:215 ../storage/sunone-itip-view.c:171 +msgid "Tomorrow %H:%M" +msgstr "Ngày mai %H:%M" + +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a time, +#. in 24-hour format. +#: ../plugins/itip-formatter/itip-view.c:219 ../storage/sunone-itip-view.c:175 +msgid "Tomorrow %H:%M:%S" +msgstr "Ngày mai %H:%M:%S" + +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a time, +#. in 12-hour format, without seconds. +#: ../plugins/itip-formatter/itip-view.c:224 ../storage/sunone-itip-view.c:180 +msgid "Tomorrow %l:%M %p" +msgstr "Ngày mai %l:%M %p" + +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a time, +#. in 12-hour format. +#: ../plugins/itip-formatter/itip-view.c:228 ../storage/sunone-itip-view.c:184 +msgid "Tomorrow %l:%M:%S %p" +msgstr "Ngày mai %l:%M:%S %p" + +# Variable: don't translate / Biến: đừng dịch +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. strftime format of a weekday. +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday. +#: ../plugins/itip-formatter/itip-view.c:247 ../storage/sunone-itip-view.c:203 +#, c-format +msgid "%A" +msgstr "%A" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. strftime format of a weekday and a +#. time, in 24-hour format, without seconds. +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday and a +#. time, in 24-hour format, without seconds. +#: ../plugins/itip-formatter/itip-view.c:252 ../storage/sunone-itip-view.c:208 +msgid "%A %H:%M" +msgstr "%A %H:%M" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. strftime format of a weekday and a +#. time, in 24-hour format. +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday and a +#. time, in 24-hour format. +#: ../plugins/itip-formatter/itip-view.c:256 ../storage/sunone-itip-view.c:212 +msgid "%A %H:%M:%S" +msgstr "%A %H:%M:%S" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. strftime format of a weekday and a +#. time, in 12-hour format, without seconds. +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday and a +#. time, in 12-hour format, without seconds. +#: ../plugins/itip-formatter/itip-view.c:261 ../storage/sunone-itip-view.c:217 +msgid "%A %l:%M %p" +msgstr "%A %l:%M %p" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. strftime format of a weekday and a +#. time, in 12-hour format. +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday and a +#. time, in 12-hour format. +#: ../plugins/itip-formatter/itip-view.c:266 +#: ../plugins/itip-formatter/itip-view.c:265 ../storage/sunone-itip-view.c:221 +msgid "%A %l:%M:%S %p" +msgstr "%A %l:%M:%S %p" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. strftime format of a weekday and a date +#. without a year. +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday and a date +#. without a year. +#: ../plugins/itip-formatter/itip-view.c:274 ../storage/sunone-itip-view.c:230 +msgid "%A, %B %e" +msgstr "%A, %B %e" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. strftime format of a weekday, a date +#. without a year and a time, +#. in 24-hour format, without seconds. +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday, a date +#. without a year and a time, +#. in 24-hour format, without seconds. +#: ../plugins/itip-formatter/itip-view.c:280 ../storage/sunone-itip-view.c:236 +msgid "%A, %B %e %H:%M" +msgstr "%A, %B %e %H:%M" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. strftime format of a weekday, a date without a year +#. and a time, in 24-hour format. +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday, a date without a year +#. and a time, in 24-hour format. +#: ../plugins/itip-formatter/itip-view.c:284 ../storage/sunone-itip-view.c:240 +msgid "%A, %B %e %H:%M:%S" +msgstr "%A, %B %e %H:%M:%S" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. strftime format of a weekday, a date without a year +#. and a time, in 12-hour format, without seconds. +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday, a date without a year +#. and a time, in 12-hour format, without seconds. +#: ../plugins/itip-formatter/itip-view.c:289 ../storage/sunone-itip-view.c:245 +msgid "%A, %B %e %l:%M %p" +msgstr "%A, %B %e %l:%M %p" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. strftime format of a weekday, a date without a year +#. and a time, in 12-hour format. +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday, a date without a year +#. and a time, in 12-hour format. +#: ../plugins/itip-formatter/itip-view.c:293 ../storage/sunone-itip-view.c:249 +msgid "%A, %B %e %l:%M:%S %p" +msgstr "%A, %B %e %l:%M:%S %p" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. strftime format of a weekday and a date. +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday and a date. +#: ../plugins/itip-formatter/itip-view.c:299 ../storage/sunone-itip-view.c:255 +msgid "%A, %B %e, %Y" +msgstr "%A, %B %e, %Y" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. strftime format of a weekday, a date and a +#. time, in 24-hour format, without seconds. +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday, a date and a +#. time, in 24-hour format, without seconds. +#: ../plugins/itip-formatter/itip-view.c:305 +#: ../plugins/itip-formatter/itip-view.c:304 ../storage/sunone-itip-view.c:260 +msgid "%A, %B %e, %Y %H:%M" +msgstr "%A, %B %e, %Y %H:%M" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. strftime format of a weekday, a date and a +#. time, in 24-hour format. +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday, a date and a +#. time, in 24-hour format. +#: ../plugins/itip-formatter/itip-view.c:309 +#: ../plugins/itip-formatter/itip-view.c:308 ../storage/sunone-itip-view.c:264 +msgid "%A, %B %e, %Y %H:%M:%S" +msgstr "%A, %B %e, %Y %H:%M:%S" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. strftime format of a weekday, a date and a +#. time, in 12-hour format, without seconds. +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday, a date and a +#. time, in 12-hour format, without seconds. +#: ../plugins/itip-formatter/itip-view.c:314 +#: ../plugins/itip-formatter/itip-view.c:313 ../storage/sunone-itip-view.c:269 +msgid "%A, %B %e, %Y %l:%M %p" +msgstr "%A, %B %e, %Y %l:%M %p" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. strftime format of a weekday, a date and a +#. time, in 12-hour format. +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. strftime format of a weekday, a date and a +#. time, in 12-hour format. +#: ../plugins/itip-formatter/itip-view.c:318 +#: ../plugins/itip-formatter/itip-view.c:317 ../storage/sunone-itip-view.c:273 +msgid "%A, %B %e, %Y %l:%M:%S %p" +msgstr "%A, %B %e, %Y %l:%M:%S %p" + +#: ../plugins/itip-formatter/itip-view.c:343 +#: ../plugins/itip-formatter/itip-view.c:342 ../storage/sunone-itip-view.c:299 +#, c-format +msgid "%s through %s has published the following meeting information:" +msgstr "%s thông qua « %s » đã công bố tin tức cuá»™c há»p này:" + +#: ../plugins/itip-formatter/itip-view.c:345 +#: ../plugins/itip-formatter/itip-view.c:344 ../storage/sunone-itip-view.c:301 +#, c-format +msgid "%s has published the following meeting information:" +msgstr "%s đã công bố tin tức cuá»™c há»p này:" + +#: ../plugins/itip-formatter/itip-view.c:350 +#: ../plugins/itip-formatter/itip-view.c:349 ../storage/sunone-itip-view.c:306 +#, c-format +msgid "%s has delegated the following meeting to you:" +msgstr "%s đã ủy nhiệm cuá»™c há»p này cho bạn:" + +#: ../plugins/itip-formatter/itip-view.c:353 +#: ../plugins/itip-formatter/itip-view.c:352 ../storage/sunone-itip-view.c:309 +#, fuzzy, c-format +msgid "%s through %s requests your presence at the following meeting:" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"%s thông qua « %s » yêu cầu sá»± hiện diện của bạn tại cuá»™c há»p này:\n" +"#-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-#\n" +"%s thông qua %s yêu cầu sá»± hiện diện của bạn tại cuá»™c há»p này:" + +#: ../plugins/itip-formatter/itip-view.c:355 +#: ../plugins/itip-formatter/itip-view.c:354 ../storage/sunone-itip-view.c:311 +#, c-format +msgid "%s requests your presence at the following meeting:" +msgstr "%s yêu cầu sá»± hiện diện của bạn tại cuá»™c há»p này:" + +#: ../plugins/itip-formatter/itip-view.c:361 +#: ../plugins/itip-formatter/itip-view.c:360 ../storage/sunone-itip-view.c:320 +#, fuzzy, c-format +msgid "%s through %s wishes to add to an existing meeting:" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"%s thông qua « %s » muốn thêm vào má»™t cuá»™c há»p đã có :\n" +"#-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-#\n" +"%s thông qua %s muốn thêm vào má»™t cuá»™c há»p đã có :" + +#: ../plugins/itip-formatter/itip-view.c:363 +#: ../plugins/itip-formatter/itip-view.c:362 ../storage/sunone-itip-view.c:322 +#, c-format +msgid "%s wishes to add to an existing meeting:" +msgstr "%s muốn thêm vào má»™t cuá»™c há»p đã có :" + +#: ../plugins/itip-formatter/itip-view.c:366 +#: ../plugins/itip-formatter/itip-view.c:365 ../storage/sunone-itip-view.c:325 +#, c-format +msgid "" +"%s wishes to receive the latest information for the following meeting:" +msgstr "%s muốn nhận tin tức vá» cuá»™c há»p này:" + +#: ../plugins/itip-formatter/itip-view.c:369 +#: ../plugins/itip-formatter/itip-view.c:368 ../storage/sunone-itip-view.c:328 +#, c-format +msgid "%s has sent back the following meeting response:" +msgstr "%s đã trả lá»i vá» cuá»™c há»p:" + +#: ../plugins/itip-formatter/itip-view.c:373 +#: ../plugins/itip-formatter/itip-view.c:372 ../storage/sunone-itip-view.c:332 +#, fuzzy, c-format +msgid "%s through %s has cancelled the following meeting:" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"%s thông qua « %s » đã hủy bá» cuá»™c há»p này:\n" +"#-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-#\n" +"%s thông qua %s đã hủy bá» cuá»™c há»p này:" + +#: ../plugins/itip-formatter/itip-view.c:375 +#: ../plugins/itip-formatter/itip-view.c:374 ../storage/sunone-itip-view.c:334 +#, c-format +msgid "%s has cancelled the following meeting." +msgstr "%s đã hủy bá» cuá»™c há»p này:" + +#: ../plugins/itip-formatter/itip-view.c:378 +#: ../plugins/itip-formatter/itip-view.c:377 ../storage/sunone-itip-view.c:337 +#, c-format +msgid "%s has proposed the following meeting changes." +msgstr "%s đã đệ nghị những thay đổi cuá»™c há»p này:" + +#: ../plugins/itip-formatter/itip-view.c:382 +#: ../plugins/itip-formatter/itip-view.c:381 ../storage/sunone-itip-view.c:341 +#, fuzzy, c-format +msgid "%s through %s has declined the following meeting changes:" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"%s thông qua « %s » đã từ chối những thay đổi cuá»™c há»p này:\n" +"#-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-#\n" +"%s thông qua %s đã từ chối những thay đổi cuá»™c há»p này:" + +#: ../plugins/itip-formatter/itip-view.c:384 +#: ../plugins/itip-formatter/itip-view.c:383 ../storage/sunone-itip-view.c:343 +#, c-format +msgid "%s has declined the following meeting changes." +msgstr "%s đã từ chối những thay đổi cuá»™c há»p này:" + +#: ../plugins/itip-formatter/itip-view.c:411 +#: ../plugins/itip-formatter/itip-view.c:410 ../storage/sunone-itip-view.c:370 +#, c-format +msgid "%s through %s has published the following task:" +msgstr "%s thông qua « %s » đã công bố tác vụ này:" + +#: ../plugins/itip-formatter/itip-view.c:413 +#: ../plugins/itip-formatter/itip-view.c:412 ../storage/sunone-itip-view.c:372 +#, c-format +msgid "%s has published the following task:" +msgstr "%s đã công bố tác vụ này:" + +#: ../plugins/itip-formatter/itip-view.c:418 +#: ../plugins/itip-formatter/itip-view.c:417 ../storage/sunone-itip-view.c:377 +#, c-format +msgid "%s requests the assignment of %s to the following task:" +msgstr "%s yêu cầu gán %s cho tác vụ này:" + +#: ../plugins/itip-formatter/itip-view.c:421 +#: ../plugins/itip-formatter/itip-view.c:420 ../storage/sunone-itip-view.c:380 +#, c-format +msgid "%s through %s has assigned you a task:" +msgstr "%s thông qua %s đã gán bạn cho tác vụ này:" + +#: ../plugins/itip-formatter/itip-view.c:423 +#: ../plugins/itip-formatter/itip-view.c:422 ../storage/sunone-itip-view.c:382 +#, c-format +msgid "%s has assigned you a task:" +msgstr "%s đã gán bạn cho tác vụ này:" + +#: ../plugins/itip-formatter/itip-view.c:429 +#: ../plugins/itip-formatter/itip-view.c:428 ../storage/sunone-itip-view.c:388 +#, c-format +msgid "%s through %s wishes to add to an existing task:" +msgstr "%s thông qua « %s » muốn thêm vào tác vụ đã có :" + +#: ../plugins/itip-formatter/itip-view.c:431 +#: ../plugins/itip-formatter/itip-view.c:430 ../storage/sunone-itip-view.c:390 +#, c-format +msgid "%s wishes to add to an existing task:" +msgstr "%s muốn thêm vào tác vụ đã có :" + +#: ../plugins/itip-formatter/itip-view.c:434 +#: ../plugins/itip-formatter/itip-view.c:433 ../storage/sunone-itip-view.c:393 +#, c-format +msgid "" +"%s wishes to receive the latest information for the following " +"assigned task:" +msgstr "%s muốn nhận tin tức vá» tác vụ đã gán này:" + +#: ../plugins/itip-formatter/itip-view.c:437 +#: ../plugins/itip-formatter/itip-view.c:436 ../storage/sunone-itip-view.c:396 +#, c-format +msgid "%s has sent back the following assigned task response:" +msgstr "%s đã trả lá»i tác vụ đã gán:" + +#: ../plugins/itip-formatter/itip-view.c:441 +#: ../plugins/itip-formatter/itip-view.c:440 ../storage/sunone-itip-view.c:400 +#, c-format +msgid "%s through %s has cancelled the following assigned task:" +msgstr "%s thông qua « %s » đã hủy bá» tác vụ đã gán này:" + +#: ../plugins/itip-formatter/itip-view.c:443 +#: ../plugins/itip-formatter/itip-view.c:442 ../storage/sunone-itip-view.c:402 +#, c-format +msgid "%s has cancelled the following assigned task:" +msgstr "%s đã hủy bá» tác vụ đã gán này:" + +#: ../plugins/itip-formatter/itip-view.c:446 +#: ../plugins/itip-formatter/itip-view.c:445 ../storage/sunone-itip-view.c:405 +#, c-format +msgid "%s has proposed the following task assignment changes:" +msgstr "%s đã đệ nghị những thay đổi cách gán tác vụ này:" + +#: ../plugins/itip-formatter/itip-view.c:450 +#: ../plugins/itip-formatter/itip-view.c:449 ../storage/sunone-itip-view.c:409 +#, c-format +msgid "%s through %s has declined the following assigned task:" +msgstr "%s thông qua %s đã từ chối tác vụ đã gán này:" + +#: ../plugins/itip-formatter/itip-view.c:452 +#: ../plugins/itip-formatter/itip-view.c:451 ../storage/sunone-itip-view.c:411 +#, c-format +msgid "%s has declined the following assigned task:" +msgstr "%s đã từ chối tác vụ đã gán này:" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. Start time +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. Start time +#: ../plugins/itip-formatter/itip-view.c:891 +#: ../plugins/itip-formatter/itip-view.c:890 ../storage/sunone-itip-view.c:735 +#, fuzzy +msgid "Start time:" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Thá»i Ä‘iểm đầu :\n" +"#-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-#\n" +"GiỠđầu :" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. End time +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. End time +#: ../plugins/itip-formatter/itip-view.c:900 +#: ../plugins/itip-formatter/itip-view.c:899 ../storage/sunone-itip-view.c:744 +#, fuzzy +msgid "End time:" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Thá»i Ä‘iểm cuối:\n" +"#-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-#\n" +"Giá» cuối:" + +#. #-#-#-#-# evolution-jescs.vi.po (evolution-jescs HEAD) #-#-#-#-# +#. Comment +#: ../plug-ins/common/xbm.c:1213 ../src/dialogs.c:322 +#: ogg123/oggvorbis_format.c:57 ogg123/oggvorbis_format.c:58 +#: ../objects/UML/class_dialog.c:308 ../objects/UML/class_dialog.c:997 +#: ../objects/UML/class_dialog.c:2057 ../objects/UML/class_dialog.c:2188 +#: ../storage/sunone-itip-view.c:760 ../storage/sunone-itip-view.c:805 +msgid "Comment:" +msgstr "Ghi chú :" + +#: ../plugins/itip-formatter/itip-view.c:980 +#: ../plugins/itip-formatter/itip-view.c:979 +msgid "Send u_pdates to attendees" +msgstr "Gởi thông báo cập nhật cho các ngÆ°á»i dá»±" + +#: ../plugins/itip-formatter/itip-view.c:989 +#: ../plugins/itip-formatter/itip-view.c:988 +msgid "A_pply to all instances" +msgstr "Ã_p dụng vào má»i lần" + +#: ../plugins/itip-formatter/org-gnome-itip-formatter.eplug.xml.h:1 +msgid "Displays text/calendar parts in messages." +msgstr "Hiển thị phần văn bản/lịch trong thÆ°." + +#: ../plugins/itip-formatter/org-gnome-itip-formatter.eplug.xml.h:2 +msgid "Itip Formatter" +msgstr "Bá»™ định dạng Itip" + +#: ../plugins/itip-formatter/org-gnome-itip-formatter.error.xml.h:1 +msgid "" +""{0}" has delegated the meeting. Do you want to add the delegate " +""{1}" ?" +msgstr "" +"« {0} » đã ủy nhiệm cuá»™c há»p này. Bạn có muốn thêm ngÆ°á»i ủy nhiệm « {1} » " +"không?" + +#: ../plugins/itip-formatter/org-gnome-itip-formatter.error.xml.h:3 +msgid "This meeting has been delegated" +msgstr "Cuá»™c há»p này đã được ủy nhiệm." + +#: ../plugins/itip-formatter/org-gnome-itip-formatter.error.xml.h:4 +msgid "" +"This response is not from a current attendee. Add the sender as an attendee?" +msgstr "" +"Hồi đáp này không phải đến từ má»™t ngÆ°á»i dá»± hiện thá»i. Thêm ngÆ°á»i này nhÆ° là " +"ngÆ°á»i dá»± không?" + +#: ../plugins/mail-account-disable/mail-account-disable.c:47 +msgid "Proxy _Logout" +msgstr "Äăng _xuất ủy nhiệm" + +#: ../plugins/mail-account-disable/org-gnome-mail-account-disable.eplug.xml.h:1 +msgid "Allows disabling of accounts." +msgstr "Cho phép vô hiệu hóa tài khoản" + +#: ../plugins/mail-account-disable/org-gnome-mail-account-disable.eplug.xml.h:2 +msgid "Disable Account" +msgstr "Vô hiệu hóa tài khoản" + +#: ../plugins/mail-remote/client.c:30 +#, c-format +msgid "System error: %s" +msgstr "Lá»—i hệ thống: %s" + +#: ../plugins/mail-remote/client.c:32 +#, c-format +msgid "Camel error: %s" +msgstr "Lá»—i Camel: %s" + +#: ../plugins/mail-remote/evolution-mail-store.c:476 +msgid "Account cannot send e-mail" +msgstr "Tài khoản không gởi được thÆ° Ä‘iện tá»­." + +#: ../plugins/mail-remote/evolution-mail-store.c:605 +msgid "No store available" +msgstr "Không có kho." + +#: ../plugins/mail-remote/org-gnome-evolution-mail-remote.eplug.xml.h:1 +msgid "" +"A plugin which implements a CORBA interface for accessing mail data remotely." +msgstr "Bá»™ cầm phít thá»±c hiện giao diện CORBA để truy cập dữ liệu thÆ° từ xa." + +#: ../plugins/mail-remote/org-gnome-evolution-mail-remote.eplug.xml.h:2 +msgid "Mail Remote" +msgstr "ThÆ° từ xa" + +#: ../plugins/mail-to-meeting/org-gnome-mail-to-meeting.eplug.xml.h:1 +msgid "" +"A plugin which allows the creation of meetings from the contents of a mail " +"message." +msgstr "" +"Má»™t trình cầm phít cho phép tạo cuá»™c há»p từ ná»™i dung của má»™t thÆ° nào đó." + +#: ../plugins/mail-to-meeting/org-gnome-mail-to-meeting.eplug.xml.h:2 +msgid "Con_vert to Meeting" +msgstr "_Chuyển đổi sang cuá»™c há»p" + +#: ../plugins/mail-to-meeting/org-gnome-mail-to-meeting.eplug.xml.h:3 +msgid "Mail to meeting" +msgstr "Gởi thÆ° chÆ¡ cuá»™c há»p" + +#: ../plugins/mail-to-task/org-gnome-mail-to-task.eplug.xml.h:1 +msgid "" +"A plugin which allows the creation of tasks from the contents of a mail " +"message." +msgstr "Bá»™ cầm phít cho phép tạo tác vụ từ ná»™i dung thÆ°." + +#: ../plugins/mail-to-task/org-gnome-mail-to-task.eplug.xml.h:2 +msgid "Con_vert to Task" +msgstr "_Chuyển đổi sang Tác vụ" + +#: ../plugins/mail-to-task/org-gnome-mail-to-task.eplug.xml.h:3 +msgid "Mail to task" +msgstr "Gởi thÆ° chÆ¡ tác vụ" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.eplug.xml.h:1 +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.xml.h:1 +msgid "Contact list _owner" +msgstr "Liên lạc vá»›i ngÆ°á»i _chủ há»™p" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.eplug.xml.h:2 +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.xml.h:5 +msgid "Get list _archive" +msgstr "Gá»i _kho há»™p" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.eplug.xml.h:3 +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.xml.h:6 +msgid "Get list _usage information" +msgstr "Gá»i thông tin _dùng há»™p" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.eplug.xml.h:4 +msgid "Mailing List Actions" +msgstr "Hành Ä‘á»™ng hôp thÆ° chung" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.eplug.xml.h:5 +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.xml.h:7 +msgid "Mailing _List" +msgstr "_Hôp thÆ° chung" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.eplug.xml.h:6 +msgid "" +"Provide actions for common mailing list commands (subscribe, " +"unsubscribe, ...)." +msgstr "" +"Cung cấp hành Ä‘á»™ng cho lệnh há»™p thÆ° chung thÆ°á»ng (đăng ký, bỠđăng ký ...)" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.eplug.xml.h:7 +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.xml.h:11 +msgid "_Post message to list" +msgstr "_Gởi thÆ° cho há»™p" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.eplug.xml.h:8 +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.xml.h:12 +msgid "_Subscribe to list" +msgstr "Äăng _ký vá»›i há»™p" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.eplug.xml.h:9 +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.xml.h:13 +msgid "_Un-subscribe to list" +msgstr "_BỠđăng ký vá»›i há»™p" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.error.xml.h:1 +msgid "Action not available" +msgstr "Hành Ä‘á»™ng không sẵn sàng" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.error.xml.h:2 +msgid "" +"An e-mail message will be sent to the URL \"{0}\". You can either send the " +"message automatically, or see and change it first.\n" +"\n" +"You should receive an answer from the mailing list shortly after the message " +"has been sent." +msgstr "" +"Má»™t thÆ° Ä‘iện tá»­ sẽ được gởi cho địa chỉ Mạng « {0} ». Bạn có thể hoặc tá»± " +"Ä‘á»™ng gởi thÆ° đó, hoặc xem và sá»­a đổi nó trÆ°á»›c tiên.\n" +"\n" +"Bạn nên nhận má»™t trả lá»i từ há»™p thÆ° chung má»™t chút sau khi gởi thÆ° đó." + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.error.xml.h:5 +msgid "Malformed header" +msgstr "Dòng đầu sai dạng thức" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.error.xml.h:6 +msgid "No e-mail action" +msgstr "Không có hành Ä‘á»™ng thÆ°" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.error.xml.h:7 +msgid "Posting not allowed" +msgstr "Không cho phép gởi thÆ°" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.error.xml.h:8 +msgid "" +"Posting to this mailing list is not allowed. Possibly, this is a read-only " +"mailing list. Contact the list owner for details." +msgstr "" +"Không cho phép gởi thÆ° cho há»™p thÆ° chung này. Có lẽ nó là há»™p thÆ° chung chỉ " +"cho phép Ä‘á»c. Hãy liên lạc vá»›i ngÆ°á»i chủ há»™p thÆ° chung, để tìm biết chi tiết." + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.error.xml.h:9 +msgid "Send e-mail message to mailing list?" +msgstr "Gởi thÆ° cho há»™p thÆ° chung không?" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.error.xml.h:10 +msgid "" +"The action could not be performed. This means the header for this action did " +"not contain any action we could handle.\n" +"\n" +"Header: {0}" +msgstr "" +"Không thá»±c hiện được hành Ä‘á»™ng đó. Có nghÄ©a là dòng đầu của hành Ä‘á»™ng này " +"không chứa hành Ä‘á»™ng nào trình này có quản lý được.\n" +"\n" +"Dòng đầu : « {0} »" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.error.xml.h:13 +msgid "" +"The {0} header of this message is malformed and could not be processed.\n" +"\n" +"Header: {1}" +msgstr "" +"Dòng đầu « {0} » của thÆ° này có dạng sai nên không xá»­ lý được nó.\n" +"\n" +"Dòng đầu : « {1} »" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.error.xml.h:16 +msgid "" +"This message does not contain the header information required for this " +"action." +msgstr "ThÆ° này không chứa thông tin đầu thÆ° cần thiết cho hành Ä‘á»™ng này." + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.error.xml.h:17 +msgid "_Edit message" +msgstr "_Sá»­a đổi thÆ°" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.error.xml.h:18 +msgid "_Send message" +msgstr "_Gởi thÆ°" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.xml.h:2 +msgid "Contact the owner of the mailing list this message belongs to" +msgstr "Liên lạc vá»›i ngÆ°á»i chủ há»™p thÆ° chung của thÆ° này." + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.xml.h:3 +msgid "Get an archive of the list this message belongs to" +msgstr "Gá»i kho của há»™p thÆ° chung của thÆ° này" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.xml.h:4 +msgid "Get information about the usage of the list this message belongs to" +msgstr "Gá»i thông tin vá» cách sá»­ dụng há»™p thÆ° chung của thÆ° này" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.xml.h:8 +msgid "Post a message to the mailing list this message belongs to" +msgstr "Gởi thÆ° cho há»™p thÆ° chung của thÆ° này" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.xml.h:9 +msgid "Subscribe to the mailing list this message belongs to" +msgstr "Äăng ký vá»›i há»™p thÆ° chung của thÆ° này" + +#: ../plugins/mailing-list-actions/org-gnome-mailing-list-actions.xml.h:10 +msgid "Unsubscribe to the mailing list this message belongs to" +msgstr "BỠđăng ký vá»›i há»™p thÆ° chung của thÆ° này" + +#: ../plugins/mark-calendar-offline/org-gnome-mark-calendar-offline.eplug.xml.h:1 +msgid "Mark calendar offline" +msgstr "Nhãn lịch này ngoại tuyến" + +#: ../plugins/mark-calendar-offline/org-gnome-mark-calendar-offline.eplug.xml.h:2 +msgid "Marks the selected calendar for offline viewing." +msgstr "Äánh dấu lịch đã chá»n để xem khi ngoại tuyến." + +#: ../plugins/mark-calendar-offline/org-gnome-mark-calendar-offline.eplug.xml.h:3 +msgid "_Do not make this available offline" +msgstr "_Không cho phép Ä‘iá»u này sẵn sàng ngoại tuyến" + +#: ../plugins/mark-calendar-offline/org-gnome-mark-calendar-offline.eplug.xml.h:4 +msgid "_Mark Calendar for offline use" +msgstr "_Nhãn lịch để dùng ngoại tuyến" + +#: ../plugins/mono/org-gnome-evolution-mono.eplug.xml.h:1 +msgid "A plugin which implements mono plugins." +msgstr "Bá»™ cầm phít thá»±c hiện bá»™ cầm phít má»™t nguồn." + +#: ../plugins/mono/org-gnome-evolution-mono.eplug.xml.h:2 +msgid "Mono Loader" +msgstr "Bá»™ tải Ä‘iá»u má»™t nguồn" + +#: ../plugins/new-mail-notify/org-gnome-new-mail-notify.eplug.xml.h:1 +msgid "Generates a D-BUS message when new mail arrives." +msgstr "Tạo ra má»™t thông Ä‘iệp D-BUS khi nhận thÆ° má»›i." + +#: ../plugins/new-mail-notify/org-gnome-new-mail-notify.eplug.xml.h:2 +msgid "New Mail Notification" +msgstr "Thông báo ThÆ° Má»›i" + +#: ../plugins/new-mail-notify/org-gnome-new-mail-notify.eplug.xml.h:3 +msgid "New mail notify" +msgstr "Thông báo ThÆ° Má»›i" + +#: ../plugins/plugin-manager/org-gnome-plugin-manager.eplug.xml.h:1 +msgid "A plugin for managing which plugins are enabled or disabled." +msgstr "Má»™t trình cầm phít quản lý trình cầm phít nào bật hay tắt." + +#: ../plugins/plugin-manager/org-gnome-plugin-manager.eplug.xml.h:2 +msgid "Plugin manager" +msgstr "Bá»™ quản lý trình cầm phít" + +#: ../plugins/plugin-manager/org-gnome-plugin-manager.xml.h:1 +msgid "Enable and disable plugins" +msgstr "Bật và tắt trình cầm phít" + +#: ../testing/test-handlers.c:482 +msgid "Plugins" +msgstr "Bá»™ cầm phít" + +#: ../plugins/plugin-manager/plugin-manager.c:45 +msgid "Author(s)" +msgstr "Tác giả" + +#: ../providers/odbc/gda-odbc-provider.c:1162 +msgid "Id" +msgstr "ID" + +#: ../app/vectors/gimpvectors.c:229 +msgid "Path" +msgstr "ÄÆ°á»ng dẫn" + +#: ../gnomeofficeui/go-plugin-manager-dialog.c:220 +msgid "Plugin Manager" +msgstr "Bá»™ quản lý trình cầm phít" + +#: ../plugins/plugin-manager/plugin-manager.c:201 +msgid "Note: Some changes will not take effect until restart" +msgstr "" +"Ghi chú : má»™t số thay đổi sẽ không hoạt Ä‘á»™ng cho đến khi đã khởi Ä‘á»™ng lại" + +#: ../gedit/dialogs/gedit-plugin-manager.c:55 +msgid "Plugin" +msgstr "Bá»™ cầm phít" + +#: ../plugins/prefer-plain/org-gnome-prefer-plain.eplug.xml.h:1 +msgid "" +"A test plugin which demonstrates a formatter plugin which lets you choose to " +"disable HTML mails.\n" +"\n" +"This plugin is unsupported demonstration code only.\n" +msgstr "" +"Má»™t trình cầm phít thừ ra mà biểu diá»…n má»™t trình cầm phít định dạng cho phép " +"bạn chá»n tắt thÆ° HTML.\n" +"\n" +"Trình cầm phít này chỉ chứa mã biểu diá»…n không được há»— trợ thôi.\n" + +#. but then we also need to create our own section frame +#: ../plugins/prefer-plain/org-gnome-prefer-plain.eplug.xml.h:6 +msgid "Plain Text Mode" +msgstr "Chế Ä‘á»™ chữ thô" + +#: ../plugins/prefer-plain/org-gnome-prefer-plain.eplug.xml.h:7 +msgid "Prefer plain-text" +msgstr "Thích chữ thô hÆ¡n" + +#: ../plugins/prefer-plain/prefer-plain.c:105 +msgid "Show HTML if present" +msgstr "Hiển thị HTML nếu có" + +#: ../plugins/prefer-plain/prefer-plain.c:106 +msgid "Prefer PLAIN" +msgstr "Thích chữ thô hÆ¡n" + +#: ../plugins/prefer-plain/prefer-plain.c:107 +msgid "Only ever show PLAIN" +msgstr "Chỉ hiển thị chữ thô" + +#: ../plugins/prefer-plain/prefer-plain.c:150 +msgid "HTML Mode" +msgstr "Chế Ä‘á»™ HTML" + +#: ../plugins/print-message/org-gnome-print-message.eplug.xml.h:1 +msgid "Gives an option to print mail from composer" +msgstr "Cung cấp tùy chá»n để in thÆ° từ bá»™ soạn" + +#: ../gtk/gtkstock.c:393 +msgid "Print Pre_view" +msgstr "_Xem thá»­ bản in" + +#: ../plugins/print-message/org-gnome-print-message.xml.h:2 +msgid "Prints the message" +msgstr "In thÆ° này" + +#: ../plugins/sa-junk-plugin/em-junk-filter.c:97 +msgid "Spamassassin (built-in)" +msgstr "Spamassassin (sẵn có)" + +#: ../plugins/sa-junk-plugin/org-gnome-sa-junk-plugin.eplug.xml.h:1 +msgid "Sa junk-plugin" +msgstr "Bá»™ cầm phít ThÆ° rác SA" + +#: ../plugins/sa-junk-plugin/org-gnome-sa-junk-plugin.eplug.xml.h:2 +msgid "learns junk messages using spamd." +msgstr "há»c biết phát hiện thÆ° rác, dùng trình ná»n spamd" + +#: ../plugins/save-attachments/org-gnome-save-attachments.eplug.xml.h:1 +msgid "A plugin for saving all attachments or parts of a message at once." +msgstr "Má»™t trình cầm phít lÆ°u má»i đính kèm hay phần thÆ° Ä‘á»u cùng lúc." + +#: ../plugins/save-attachments/org-gnome-save-attachments.eplug.xml.h:2 +msgid "Save attachments" +msgstr "LÆ°u đính kèm" + +#: ../plugins/save-attachments/org-gnome-save-attachments.xml.h:1 +msgid "Save Attachments ..." +msgstr "LÆ°u các đính kèm..." + +#: ../plugins/save-attachments/org-gnome-save-attachments.xml.h:2 +msgid "Save all attachments" +msgstr "LÆ°u má»i đính kèm" + +#: ../plugins/save-attachments/save-attachments.c:338 +#: ../plugins/save-attachments/save-attachments.c:331 +msgid "Select save base name" +msgstr "Chá»n tên cÆ¡ bản khi lÆ°u" + +#: ../plugins/save-attachments/save-attachments.c:358 +msgid "MIME Type" +msgstr "Kiểu MIME:" + +#: ../plugins/save-calendar/csv-format.c:171 +msgid "%F %T" +msgstr "%F %T" + +#: ../plugins/save-calendar/csv-format.c:385 +msgid "Uid" +msgstr "UID" + +#: ../plugins/save-calendar/csv-format.c:387 +msgid "Description List" +msgstr "Danh sách mô tả" + +#: ../plugins/save-calendar/csv-format.c:388 +msgid "Categories List" +msgstr "Danh sách phân loại" + +#: ../plugins/save-calendar/csv-format.c:389 +msgid "Comment List" +msgstr "Danh sách chú thích" + +#: ../plugins/save-calendar/csv-format.c:391 +#: ../mimedir/mimedir-vcomponent.c:438 +msgid "Created" +msgstr "Äã tạo" + +#: ../plugins/save-calendar/csv-format.c:392 +msgid "Contact List" +msgstr "Danh sách liên lạc" + +#: ../src/main-window.c:328 ../objects/FS/function.c:952 +#: ../widgets/gtk+.xml.in.h:170 app/envelope-box.c:1018 +#: app/sample-editor.c:261 +#, fuzzy +msgid "Start" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Bắt đầu\n" +"#-#-#-#-# glade3vi..po (glade3 HEAD) #-#-#-#-#\n" +"Bắt đầu\n" +"#-#-#-#-# soundtracker-0.6.7.vi.po (soundtracker) #-#-#-#-#\n" +"Äầu" + +#: ../partman-partitioning.templates:97 ../widgets/gtk+.xml.in.h:60 +#: app/envelope-box.c:1019 app/sample-editor.c:262 +#, fuzzy +msgid "End" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Kết thúc\n" +"#-#-#-#-# glade3vi..po (glade3 HEAD) #-#-#-#-#\n" +"Kết thúc\n" +"#-#-#-#-# soundtracker-0.6.7.vi.po (soundtracker) #-#-#-#-#\n" +"Cuối" + +#: ../plugins/save-calendar/csv-format.c:396 +msgid "percent Done" +msgstr "Phần trăm xong" + +#: ../plugins/save-calendar/csv-format.c:398 +msgid "Url" +msgstr "Äịa chỉ Mạng" + +#: ../plugins/save-calendar/csv-format.c:399 +msgid "Attendees List" +msgstr "Danh sách ngÆ°á»i dá»±" + +#: ../gtk/gtkfilechooserdefault.c:3803 +msgid "Modified" +msgstr "Äã sá»­a đổi" + +#: ../plugins/save-calendar/csv-format.c:552 +msgid "Advanced options for the CSV format" +msgstr "Tùy chá»n cấp cao cho khuôn dạng CSV" + +#: ../plugins/save-calendar/csv-format.c:559 +msgid "Prepend a header" +msgstr "Thêm dòng đầu vào đầu" + +#: ../plugins/save-calendar/csv-format.c:568 +msgid "Value delimiter:" +msgstr "Äiá»u định giá»›i giá trị:" + +#: ../plugins/save-calendar/csv-format.c:574 +msgid "Record delimiter:" +msgstr "Äiá»u định giá»›i mục ghi:" + +#: ../plugins/save-calendar/csv-format.c:580 +msgid "Encapsulate values with:" +msgstr "Bao giá trị dùng:" + +#: ../plugins/save-calendar/csv-format.c:602 +msgid "Comma separated value format (.csv)" +msgstr "Khuôn dạng giá trị định giá»›i bằng dấu phẩy (.csv)" + +#: ../plugins/save-calendar/org-gnome-save-calendar.eplug.xml.h:1 +msgid "Save Selected" +msgstr "LÆ°u các Ä‘iá»u chá»n" + +#: ../plugins/save-calendar/org-gnome-save-calendar.eplug.xml.h:2 +msgid "Save to _Disk" +msgstr "LÆ°u vào _Ä‘Ä©a" + +#: ../plugins/save-calendar/org-gnome-save-calendar.eplug.xml.h:3 +msgid "Saves selected calendar or tasks list to disk." +msgstr "LÆ°u các lịch hay tác vụ Ä‘á»u đã chá»n vào Ä‘Ä©a." + +#: ../plugins/save-calendar/rdf-format.c:158 +msgid "%FT%T" +msgstr "%FT%T" + +#: ../plugins/save-calendar/rdf-format.c:396 +msgid "RDF format (.rdf)" +msgstr "Khuôn dạng RDF (.rdf)" + +#: ../plugins/save-calendar/save-calendar.c:181 +msgid "Select destination file" +msgstr "Chá»n tập tin đích" + +#: ../plugins/select-one-source/org-gnome-select-one-source.eplug.xml.h:1 +msgid "Select one source" +msgstr "Chá»n má»™t nguồn" + +#: ../plugins/select-one-source/org-gnome-select-one-source.eplug.xml.h:2 +msgid "Selects a single calendar or task source for viewing." +msgstr "Chá»n chỉ má»™t lịch hay nguồn tác vụ riêng lẻ để xem thôi." + +#: ../plugins/select-one-source/org-gnome-select-one-source.eplug.xml.h:3 +msgid "_Show only this Calendar" +msgstr "_Hiện chỉ Lịch này" + +#: ../plugins/select-one-source/org-gnome-select-one-source.eplug.xml.h:4 +msgid "_Show only this Task List" +msgstr "_Hiện chỉ danh sách tác vụ này" + +#: ../plugins/startup-wizard/org-gnome-evolution-startup-wizard.eplug.xml.h:1 +msgid "Startup wizard" +msgstr "Phụ tá khởi Ä‘á»™ng" + +#: ../plugins/startup-wizard/startup-wizard.c:85 +msgid "Evolution Setup Assistant" +msgstr "Trợ tá thiết lập Evolution" + +#: ../plugins/startup-wizard/startup-wizard.c:88 +#: ../storage/exchange-autoconfig-wizard.glade.h:13 ../src/wizard.glade.h:29 +msgid "Welcome" +msgstr "Chúc mừng bạn" + +#: ../plugins/startup-wizard/startup-wizard.c:89 +msgid "" +"Welcome to Evolution. The next few screens will allow Evolution to connect " +"to your email accounts, and to import files from other applications. \n" +"\n" +"Please click the \"Forward\" button to continue. " +msgstr "" +"Chào mừng bạn dùng Evolution. Những màn hình kế tiếp\n" +"sẽ cho phép Evolution kết nối vá»›i các tài khoản thÆ° của bạn,\n" +"và để nhập các tập tin từ các ứng dụng khác.\n" +"\n" +"Vui lòng nhấn nút «Tiếp » để tiếp tục." + +#: ../plugins/startup-wizard/startup-wizard.c:140 +#: ../shell/e-shell-importer.c:147 ../shell/e-shell-importer.c:145 +msgid "Please select the information that you would like to import:" +msgstr "Hãy chá»n thông tin bạn muốn nhập:" + +#: ../plugins/startup-wizard/startup-wizard.c:154 +#: ../shell/e-shell-importer.c:400 ../shell/e-shell-importer.c:398 +#, c-format +msgid "From %s:" +msgstr "Từ %s:" + +#: ../plugins/startup-wizard/startup-wizard.c:234 +#: ../shell/e-shell-importer.c:511 ../shell/e-shell-importer.c:509 +msgid "Importing data." +msgstr "Äang nhập dữ liệu." + +#: ../plugins/subject-thread/org-gnome-subject-thread.eplug.xml.h:1 +msgid "Indicates if threading of messages should fall back to subject." +msgstr "Ngụ ý nếu cách tạo mạch thÆ° nên trở vá» theo chủ Ä‘á»" + +#: ../plugins/subject-thread/org-gnome-subject-thread.eplug.xml.h:2 +msgid "Subject Threading" +msgstr "Tạo mạch theo chủ Ä‘á»" + +#: ../plugins/subject-thread/org-gnome-subject-thread.eplug.xml.h:3 +msgid "Thread messages by subject" +msgstr "Hiển thị mạch trong danh sách thÆ°, theo Chủ Ä‘á»" + +#. Create the checkbox we will display, complete with mnemonic that is unique in the dialog +#: ../plugins/subject-thread/subject-thread.c:54 +msgid "Fall back to threading messages by sub_ject" +msgstr "Trở vá» tạo mạch thÆ° theo _chủ Ä‘á»" + +#: ../shell/GNOME_Evolution_Shell.server.in.in.h:1 +msgid "Evolution Shell" +msgstr "Hệ vá» Evolution" + +#: ../shell/GNOME_Evolution_Shell.server.in.in.h:2 +msgid "Evolution Shell Config factory" +msgstr "Bá»™ tạo cấu hình hệ vá» Evolution" + +#: ../shell/GNOME_Evolution_Test.server.in.in.h:1 +msgid "Evolution Test" +msgstr "Kiểm tra Evolution" + +#: ../shell/GNOME_Evolution_Test.server.in.in.h:2 +msgid "Evolution Test component" +msgstr "Thành phần kiểm tra Evolution" + +#: ../shell/apps_evolution_shell.schemas.in.in.h:1 +msgid "A GNOME Print description of the current printer settings" +msgstr "Mô tả In GNOME của thiết lập máy in hiện có" + +#: ../shell/apps_evolution_shell.schemas.in.in.h:2 +msgid "Configuration version" +msgstr "Phiên bản cấu hình" + +#: ../shell/apps_evolution_shell.schemas.in.in.h:3 +msgid "Default sidebar width" +msgstr "Äá»™ rá»™ng thanh nách mặc định" + +#: ../shell/apps_evolution_shell.schemas.in.in.h:4 +msgid "Default window height" +msgstr "Äá»™ cao cá»­a sổ mặc định" + +#: ../shell/apps_evolution_shell.schemas.in.in.h:5 +msgid "Default window width" +msgstr "Äá»™ rá»™ng cá»­a sổ mặc định" + +#: ../shell/apps_evolution_shell.schemas.in.in.h:6 +msgid "ID or alias of the component to be shown by default at start-up." +msgstr "Mặc định là hiển thị ID hay biệt hiệu của thành phần khi khởi Ä‘á»™ng." + +#: ../shell/apps_evolution_shell.schemas.in.in.h:7 +msgid "Last upgraded configuration version" +msgstr "Phiên bản cấu hình Evolution đã cập nhật cuối cùng" + +#: ../shell/apps_evolution_shell.schemas.in.in.h:8 +msgid "" +"List of paths for the folders to be synchronized to disk for offline usage" +msgstr "" +"Danh sách Ä‘Æ°á»ng dẫn cho những thÆ° mục sẽ được đồng bá»™ vá»›i Ä‘Ä©a để sá»­ dụng " +"ngoại tuyến." + +#: ../shell/apps_evolution_shell.schemas.in.in.h:9 +msgid "Printer settings" +msgstr "Thiết lập máy in" + +#: ../shell/apps_evolution_shell.schemas.in.in.h:10 +msgid "Skip development warning dialog" +msgstr "Bá» qua há»™p thoại cảnh báo phát triển" + +#: ../shell/apps_evolution_shell.schemas.in.in.h:11 ../shell/main.c:473 +#: ../shell/main.c:468 +msgid "Start in offline mode" +msgstr "Khởi chạy trong chế Ä‘á»™ ngoại tuyến" + +#: ../shell/apps_evolution_shell.schemas.in.in.h:12 +msgid "" +"The configuration version of Evolution, with major/minor/configuration level" +msgstr "Phiên bản cấu hình của trình Evolution, vá»›i mức Ä‘á»™ cấu hình lá»›n/nhá»" + +#: ../shell/apps_evolution_shell.schemas.in.in.h:13 +msgid "The default height for the main window, in pixels." +msgstr "Äá»™ cao mặc định cá»­a của sổ chính, theo Ä‘iểm ảnh." + +#: ../shell/apps_evolution_shell.schemas.in.in.h:14 +msgid "The default width for the main window, in pixels." +msgstr "Äá»™ rá»™ng mặc định cá»­a của sổ chính, theo Ä‘iểm ảnh." + +#: ../shell/apps_evolution_shell.schemas.in.in.h:15 +msgid "The default width for the sidebar, in pixels." +msgstr "Äá»™ rá»™ng mặc định của thanh nách, theo Ä‘iểm ảnh." + +#: ../shell/apps_evolution_shell.schemas.in.in.h:16 +msgid "" +"The last upgraded configuration version of Evolution, with major/minor/" +"configuration level" +msgstr "" +"Phiên bản cấu hình Evolution đã cập nhật cuối cùng, vá»›i mức Ä‘á»™ cấu hình lá»›n/" +"nhá»" + +#: ../shell/apps_evolution_shell.schemas.in.in.h:17 +msgid "" +"The style of the window buttons. Can be \"text\", \"icons\", \"both\", " +"\"toolbar\". If \"toolbar\" is set, the style of the buttons is determined " +"by the GNOME toolbar setting." +msgstr "" +"Kiểu dáng má»i cái nút cá»­a sổ. Có thể là « chữ », « ảnh », « cả hai » hay « " +"thanh công cụ». Nếu lập « thanh công cụ » thì thiết lập thanh công cụ GNOME " +"sẽ quyết định kiểu dáng các cái nút này." + +#: ../shell/apps_evolution_shell.schemas.in.in.h:18 +msgid "Toolbar is visible" +msgstr "Hiện thanh công cụ" + +#: ../shell/apps_evolution_shell.schemas.in.in.h:19 +msgid "Whether Evolution will start up in offline mode instead of online mode." +msgstr "" +"Có nên khá»i chạy trình Evolution trong chế Ä‘á»™ ngoại tuyến thay vào chế Ä‘á»™ " +"trá»±c tuyến hay không." + +#: ../shell/apps_evolution_shell.schemas.in.in.h:20 +msgid "Whether the toolbar should be visible." +msgstr "Có nên hiển thị thanh công cụ hay không." + +#: ../shell/apps_evolution_shell.schemas.in.in.h:21 +msgid "" +"Whether the warning dialog in development versions of Evolution is skipped." +msgstr "" +"Có nên bá» qua há»™p thoại cảnh báo trong phiên bản phát triển Evolution hay " +"không." + +#: ../shell/apps_evolution_shell.schemas.in.in.h:22 +msgid "Whether the window buttons should be visible." +msgstr "Có nên hiển thị má»i cái nút trên cá»­a sổ hay không." + +#: ../shell/apps_evolution_shell.schemas.in.in.h:23 +msgid "Window button style" +msgstr "Kiểu nút cá»­a sổ" + +#: ../shell/apps_evolution_shell.schemas.in.in.h:24 +msgid "Window buttons are visible" +msgstr "Hiển thị nút cá»­a sổ" + +#: ../shell/e-active-connection-dialog.glade.h:1 +msgid "Active Connections" +msgstr "Kết nối hoạt Ä‘á»™ng" + +#: ../shell/e-active-connection-dialog.glade.h:2 +msgid "Active Connections" +msgstr "Kết nối hoạt Ä‘á»™ng" + +#: ../shell/e-active-connection-dialog.glade.h:3 +msgid "Click OK to close these connections and go offline" +msgstr "Nhấn « Äược » để đóng những kết nối này và chuyển sang ngoại tuyến." + +#: ../shell/e-shell-folder-title-bar.c:586 +#: ../shell/e-shell-folder-title-bar.c:587 +msgid "(Untitled)" +msgstr "(Không tên)" + +#: ../shell/e-shell-importer.c:135 ../shell/e-shell-importer.c:133 +msgid "Choose the type of importer to run:" +msgstr "Chá»n kiểu bá»™ nhập cần chạy:" + +#: ../shell/e-shell-importer.c:138 ../shell/e-shell-importer.c:136 +msgid "" +"Choose the file that you want to import into Evolution, and select what type " +"of file it is from the list.\n" +"\n" +"You can select \"Automatic\" if you do not know, and Evolution will attempt " +"to work it out." +msgstr "" +"Hãy chá»n tập tin muốn nhập vào Evolution, và chá»n kiểu tập tin từ danh sách " +"dÆ°á»›i đây.\n" +"\n" +"Bạn có thể chá»n « Tá»± Ä‘á»™ng » nếu bạn không biết, và Evolution sẽ thá»­ tá»± tìm " +"cách hoạt Ä‘á»™ng." + +#: ../shell/e-shell-importer.c:144 ../shell/e-shell-importer.c:142 +msgid "Choose the destination for this import" +msgstr "Hãy chá»n nhập vào đích nào" + +#: ../shell/e-shell-importer.c:150 ../shell/e-shell-importer.c:148 +msgid "" +"Evolution checked for settings to import from the following\n" +"applications: Pine, Netscape, Elm, iCalendar. No importable\n" +"settings found. If you would like to\n" +"try again, please click the \"Back\" button.\n" +msgstr "" +"Trình Evolution đã kiểm tra có thiết lập để nhập từ\n" +"những ứng dụng theo đây: Pine, Netscape, Elm, iCalendar.\n" +"ChÆ°a tìm thiết lập có thể nhập. Nếu bạn muốn thá»­ lại,\n" +"hãy nhắp vào cái nút « Lùi ».\n" + +#: ../shell/e-shell-importer.c:285 ../shell/e-shell-importer.c:283 +msgid "F_ilename:" +msgstr "_T_ên tập tin:" + +#: ../shell/e-shell-importer.c:290 ../shell/e-shell-importer.c:288 +#: ../src/zenity.glade.h:15 +msgid "Select a file" +msgstr "Chá»n tập tin" + +#: ../shell/e-shell-importer.c:302 ../shell/e-shell-importer.c:300 +msgid "File _type:" +msgstr "_Kiểu tập tin:" + +#: ../shell/e-shell-importer.c:338 ../shell/e-shell-importer.c:336 +msgid "Import data and settings from _older programs" +msgstr "_Nhập dữ liệu và thiết lập từ chÆ°Æ¡ng trình cÅ©" + +#: ../shell/e-shell-importer.c:341 ../shell/e-shell-importer.c:339 +msgid "Import a _single file" +msgstr "Nhập má»™t _tập tin Ä‘Æ¡n" + +#: ../plug-ins/common/postscript.c:3032 ../src/ImportDialog.cs:40 +msgid "_Import" +msgstr "_Nhập" + +#: ../shell/e-shell-settings-dialog.c:318 +msgid "Evolution Settings" +msgstr "Thiết lập Evolution" + +#: ../shell/e-shell-utils.c:118 +msgid "No folder name specified." +msgstr "ChÆ°a ghi rõ tên thÆ° mục." + +#: ../shell/e-shell-utils.c:125 +msgid "Folder name cannot contain the Return character." +msgstr "Tên thÆ° mục không thể chứa ký tá»± Return." + +#: ../shell/e-shell-utils.c:131 +msgid "Folder name cannot contain the character \"/\"." +msgstr "Tên thÆ° mục không thể chứa ký tá»± sổ chéo « / »" + +#: ../shell/e-shell-utils.c:137 +msgid "Folder name cannot contain the character \"#\"." +msgstr "Tên thÆ° mục không thể chứa ký tá»± dấu thăng « # »." + +#: ../shell/e-shell-utils.c:143 +msgid "'.' and '..' are reserved folder names." +msgstr "" +"Dấu chấm « . » và hai dấu chấm tiếp tá»±c « .. » là hai tên thÆ° mục đặc biệt, " +"được dành riêng." + +#: ../shell/e-shell-window-commands.c:71 ../shell/e-shell-window-commands.c:69 +msgid "The GNOME Pilot tools do not appear to be installed on this system." +msgstr "Công cụ GNOME Pilot có lẽ chÆ°a được cài đặt trên hệ thống này." + +#: ../shell/e-shell-window-commands.c:79 ../shell/e-shell-window-commands.c:77 +#, c-format +msgid "Error executing %s." +msgstr "Gặp lá»—i khi thá»±c hiện « %s »." + +#: ../shell/e-shell-window-commands.c:128 +#: ../shell/e-shell-window-commands.c:126 +msgid "Bug buddy is not installed." +msgstr "ChÆ°a cài đặt trình Bug Buddy (thông báo lá»—i)." + +#: ../shell/e-shell-window-commands.c:136 +#: ../shell/e-shell-window-commands.c:134 +msgid "Bug buddy could not be run." +msgstr "Không thể chạy trình Bug buddy." + +#: ../shell/e-shell-window-commands.c:547 +#: ../shell/e-shell-window-commands.c:521 +msgid "Groupware Suite" +msgstr "Bá»™ phần má»m nhóm (Groupware)" + +#: ../shell/e-shell-window-commands.c:778 +#: ../shell/e-shell-window-commands.c:749 +msgid "_Work Online" +msgstr "_Trá»±c tuyến" + +#: ../ui/evolution.xml.h:47 +msgid "_Work Offline" +msgstr "_Ngoại tuyến" + +#: ../shell/e-shell-window-commands.c:804 +#: ../shell/e-shell-window-commands.c:775 +msgid "Work Offline" +msgstr "Ngoại tuyến" + +#: ../shell/e-shell-window.c:343 +msgid "Evolution is currently online. Click on this button to work offline." +msgstr "" +"Evolution hiện thá»i Ä‘ang trá»±c tuyến. Nhấn nút này để chuyển sang ngoại tuyến." + +#: ../shell/e-shell-window.c:351 +msgid "Evolution is in the process of going offline." +msgstr "Evolution Ä‘ang chuyển sang ngoại tuyến." + +#: ../shell/e-shell-window.c:358 +msgid "Evolution is currently offline. Click on this button to work online." +msgstr "Evolution Ä‘ang ngoại tuyến. Nhấn nút này để chuyển sang trá»±c tuyến." + +#: ../shell/e-shell-window.c:735 ../shell/e-shell-window.c:724 +#, c-format +msgid "Switch to %s" +msgstr "Chuyển sang « %s »" + +#: ../shell/e-shell.c:625 ../shell/e-shell.c:620 +msgid "Uknown system error." +msgstr "Gặp lá»—i hệ thống lạ." + +# Variable and unit: do not translate/ biến và Ä‘Æ¡n vị: đừng dịch +#: ../shell/e-shell.c:823 ../shell/e-shell.c:824 ../shell/e-shell.c:822 +#, c-format +msgid "%ld KB" +msgstr "%ld KB" + +#: ../shell/e-shell.c:1257 ../shell/e-shell.c:1278 +msgid "Invalid arguments" +msgstr "Äối số không hợp lệ" + +#: ../shell/e-shell.c:1259 ../shell/e-shell.c:1280 +msgid "Cannot register on OAF" +msgstr "Không thể đăng ký vá»›i OAF" + +#: ../shell/e-shell.c:1261 ../shell/e-shell.c:1282 +msgid "Configuration Database not found" +msgstr "Không tìm thấy cÆ¡ sở dữ liệu cấu hình" + +#: ../shell/evolution-test-component.c:140 +msgid "New Test" +msgstr "Kiểm tra má»›i" + +#: ../plug-ins/script-fu/script-fu.c:282 +msgid "_Test" +msgstr "_Thá»­ ra" + +#: ../shell/evolution-test-component.c:142 +msgid "Create a new test item" +msgstr "Tạo mục kiểm tra má»›i" + +#: ../shell/import.glade.h:1 +msgid "Click \"Import\" to begin importing the file into Evolution. " +msgstr "Nhấn « Nhập » để bắt đầu nhập tập tin đó vào Evolution." + +#: ../shell/import.glade.h:2 +msgid "Evolution Import Assistant" +msgstr "Trợ tá nhập Evolution" + +#: ../shell/import.glade.h:3 +msgid "Import File" +msgstr "Nhập tập tin" + +#: ../shell/import.glade.h:4 +msgid "Import Location" +msgstr "Äịa Ä‘iểm nhập" + +#: ../shell/import.glade.h:5 +msgid "Importer Type" +msgstr "Loại bá»™ nhập" + +#: ../shell/import.glade.h:6 +msgid "Select Importers" +msgstr "Chá»n bá»™ nhập" + +#: ../shell/import.glade.h:7 +msgid "Select a File" +msgstr "Chá»n tập tin" + +#: ../shell/import.glade.h:8 +msgid "" +"Welcome to the Evolution Import Assistant.\n" +"With this assistant you will be guided through the process of\n" +"importing external files into Evolution." +msgstr "" +"Chào mừng dùng Trợ tá nhập Evolution.\n" +"Vá»›i trợ tá này, bạn sẽ được hÆ°á»›ng dẫn thông qua tiến trình\n" +"nhập các tập tin bên ngoài vào Evolution." + +#. Preview/Alpha/Beta version warning message +#: ../shell/main.c:230 +#, no-c-format +msgid "" +"Hi. Thanks for taking the time to download this preview release\n" +"of the Evolution groupware suite.\n" +"\n" +"This version of Evolution is not yet complete. It is getting close,\n" +"but some features are either unfinished or do not work properly.\n" +"\n" +"If you want a stable version of Evolution, we urge you to uninstall\n" +"this version, and install version %s instead.\n" +"\n" +"If you find bugs, please report them to us at bugzilla.gnome.org.\n" +"This product comes with no warranty and is not intended for\n" +"individuals prone to violent fits of anger.\n" +"\n" +"We hope that you enjoy the results of our hard work, and we\n" +"eagerly await your contributions!\n" +msgstr "" +"Xin chào. Xin cám Æ¡n đã mất thá»i gian để tải vá» bản dùng thá»­ này\n" +"của bá»™ phần má»m nhóm Evolution.\n" +"\n" +"Äây là phiên bản Evolution chÆ°a hoàn chỉnh. Nó gần hoàn chỉnh,\n" +"nhÆ°ng vẫn còn vài tính năng hoặc chÆ°a hoàn chỉnh,\n" +"hoặc chÆ°a làm việc đúng.\n" +"\n" +"Nếu bạn muốn dùng má»™t phiên bản ổn định của Evolution, chúng tôi thúc giục " +"bạn bá» cài đặt phiên bản này, và để cài đặt phiên bản %s thay vào đó.\n" +"\n" +"Nếu bạn tìm thấy lá»—i, vui lòng thông báo cho chúng tôi tại .\n" +"Sản phầm này không bảo đảm gì cả.\n" +"\n" +"Chúng tôi hy vá»ng bạn thích kết quả của quá trình làm việc của chúng tôi,\n" +"và chúng tôi háo hức chá» Ä‘á»i sá»± đóng góp của bạn!\n" + +#: ../shell/main.c:254 +msgid "" +"Thanks\n" +"The Evolution Team\n" +msgstr "" +"Xin cám Æ¡n\n" +"Nhóm Evolution\n" + +#: ../shell/main.c:261 +msgid "Don't tell me again" +msgstr "Äừng nói Ä‘iá»u này lần nữa" + +#: ../shell/main.c:471 ../shell/main.c:466 +msgid "Start Evolution activating the specified component" +msgstr "Báo trình Evolution hoạt hóa thành phần đã ghi rõ" + +#: ../shell/main.c:475 ../shell/main.c:470 +msgid "Start in online mode" +msgstr "Khởi chạy trong chế Ä‘á»™ trá»±c tuyến" + +#: ../shell/main.c:478 ../shell/main.c:473 +msgid "Forcibly shut down all Evolution components" +msgstr "Buá»™c kết thúc má»i thành phần Evolution" + +#: ../shell/main.c:482 ../shell/main.c:477 +msgid "Forcibly re-migrate from Evolution 1.4" +msgstr "Buá»™c tái nâng cấp từ Evolution 1.4" + +#: ../shell/main.c:485 ../shell/main.c:480 +msgid "Send the debugging output of all components to a file." +msgstr "Gởi thông tin gỡ lá»—i của má»i thành phần vào tập tin." + +#: ../shell/main.c:487 ../shell/main.c:482 +msgid "Disable loading of any plugins." +msgstr "Tắt tải trình cầm phít nào." + +#: ../shell/main.c:518 ../shell/main.c:513 +#, c-format +msgid "" +"%s: --online and --offline cannot be used together.\n" +" Use %s --help for more information.\n" +msgstr "" +"%s: hai tùy chá»n « --online » (trá»±c tuyến) và « --offline » (ngoại tuyến)\n" +"thì không thể được dùng chung.\n" +" Hãy dùng lệnh « %s --help » (trợ giúp) để biết thêm thông tin.\n" + +#: ../shell/shell.error.xml.h:1 +msgid "Are you sure you want to forget all remembered passwords?" +msgstr "Bạn có chắc muốn quên các mật khẩu đã nhá»› không?" + +#: ../shell/shell.error.xml.h:3 +msgid "Delete old data from version {0}?" +msgstr "Xoá bá» dữ liệu cÅ© từ phiên bản {0} không?" + +#: ../shell/shell.error.xml.h:4 +msgid "Evolution can not start." +msgstr "Evolution không khởi chạy được." + +#: ../shell/shell.error.xml.h:5 +msgid "" +"Forgetting your passwords will clear all remembered passwords. You will be " +"reprompted next time they are needed. " +msgstr "" +"Quên Ä‘i các mật khẩu đã nhá»› sẽ xoá hết mật khẩu đã nhá»›,. NhÆ° vậy bạn sẽ lại " +"được nhắc nhập mật khẩu lần sau cần thiết." + +#: ../shell/shell.error.xml.h:7 +msgid "Insufficient disk space for upgrade." +msgstr "Không có đủ sức chứa trên Ä‘Ä©a để nâng cấp." + +#: ../shell/shell.error.xml.h:8 +msgid "Really delete old data?" +msgstr "Bạn thật sá»± muốn xoá bá» dữ liệu cÅ© không?" + +#: ../shell/shell.error.xml.h:9 +msgid "" +"The entire contents of the "evolution" directory is about to be be " +"permanently removed.\n" +"\n" +"It is suggested you manually verify that all of your mail, contact, and " +"calendar data is present, and that this version of Evolution operates " +"correctly before deleting this old data.\n" +"\n" +"Once deleted, you cannot downgrade to the previous version of Evolution " +"without manual intervention.\n" +msgstr "" +"Sắp gỡ bá» hoàn toàn toàn ná»™i dung của thÆ° mục « evolution».\n" +"\n" +"Có Ä‘á» nghị là bạn tá»± kiểm chứng có tất cả dữ liệu thÆ°, liên lạc và lịch " +"trong phiên bản má»›i, mà hoặt Ä‘á»™ng cho đúng, trÆ°á»›c khi xoá bá» dữ liệu cÅ© " +"này.\n" +"\n" +"Má»™t khi đã xoá bá» nó, không thể trở lại « xuống » phiên bản trÆ°á»›c nếu không " +"có khả năng đặc biệt cấp cao.\n" + +#: ../shell/shell.error.xml.h:15 +msgid "" +"The previous version of evolution stored its data in a different location.\n" +"\n" +"If you choose to remove this data, the entire contents of the "" +"evolution" directory will be removed permanently. If you choose to " +"keep this data, then you may manually remove the contents of "" +"evolution" at your convenience.\n" +msgstr "" +"Phiên bản Evolution trÆ°á»›c đã cất giữ dữ liệu tại vị trí khác.\n" +"\n" +"Nếu bạn chá»n gỡ bá» dữ liệu này thì sẽ gỡ bá» hoàn toàn toàn bá»™ ná»™i dung của " +"thÆ° mục «evolution». Nếu bạn chá»n giữ dữ liệu này thì có thể tá»± gỡ bá» ná»™i " +"dung «evolution» lúc nào thuận tiện cho bạn.\n" + +#: ../shell/shell.error.xml.h:19 +msgid "Upgrade from previous version failed: {0}" +msgstr "Việc nâng cấp từ phiên bản trÆ°á»›c bị lá»—i: {0}" + +#: ../shell/shell.error.xml.h:20 +msgid "" +"Upgrading your data and settings will require upto {0} of disk space, but " +"you only have {1} available.\n" +"\n" +"You will need to make more space available in your home directory before you " +"can continue." +msgstr "" +"Nâng cấp các dữ liệu và thiết lập của bạn sẽ cần thiết đến {0} sức chứa trên " +"Ä‘Ä©a, nhÆ°ng mà hiện thá»i bạn chỉ có {1} sẵn sàng.\n" +"\n" +"NhÆ° thế thì bạn sẽ phải giải phóng thêm chá»— trống trong thÆ° mục chinh của " +"bạn trÆ°á»›c khi có thể tiếp tục." + +#: ../shell/shell.error.xml.h:23 +msgid "" +"Your system configuration does not match your Evolution configuration.\n" +"\n" +"Click help for details" +msgstr "" +"Cấu hình hệ thống bạn không khá»›p vá»›i cấu hình Evolution.\n" +"\n" +"Hãy nhắp vào « Trợ giúp » để xem chi tiết." + +#: ../shell/shell.error.xml.h:26 +msgid "" +"Your system configuration does not match your Evolution configuration:\n" +"\n" +"{0}\n" +"\n" +"Click help for details." +msgstr "" +"Cấu hình hệ thống bạn không khá»›p vá»›i cấu hình Evolution.\n" +"\n" +"{0}\n" +"\n" +"Hãy nhắp vào « Trợ giúp » để xem chi tiết." + +#: ../shell/shell.error.xml.h:31 +msgid "_Forget" +msgstr "_Quên" + +#: ../shell/shell.error.xml.h:32 +msgid "_Keep Data" +msgstr "_Giữ dữ liệu" + +#: ../shell/shell.error.xml.h:33 +msgid "_Remind Me Later" +msgstr "_Nhắc nhở lần sau" + +#: ../shell/shell.error.xml.h:34 +msgid "" +"{1}\n" +"\n" +"If you choose to continue, you may not have access to some of your old " +"data.\n" +msgstr "" +"{1}\n" +"\n" +"Nếu bạn chá»n tiếp tục thì có lẽ sẽ không thể truy cập má»™t phần dữ liệu cÅ©.\n" + +#: ../smime/gui/ca-trust-dialog.c:104 ../smime/gui/ca-trust-dialog.c:96 +#, c-format +msgid "" +"Certificate '%s' is a CA certificate.\n" +"\n" +"Edit trust settings:" +msgstr "" +"Chức nhận « %s » là má»™t chứng nhận CA (nhà cầm quyá»n chứng nhận).\n" +"\n" +"Sá»­a đổi thiết lập tin cây:" + +#: ../smime/gui/cert-trust-dialog.c:153 ../smime/gui/cert-trust-dialog.c:145 +msgid "" +"Because you trust the certificate authority that issued this certificate, " +"then you trust the authenticity of this certificate unless otherwise " +"indicated here" +msgstr "" +"Vì bạn tin cây nhà cầm quyá»n đã phát hành chứng nhận này, thì bạn tin cây " +"xác thá»±c của chứng nhận này trừ khi chỉ thị cách khác ở đây." + +#: ../smime/gui/cert-trust-dialog.c:157 ../smime/gui/cert-trust-dialog.c:149 +msgid "" +"Because you do not trust the certificate authority that issued this " +"certificate, then you do not trust the authenticity of this certificate " +"unless otherwise indicated here" +msgstr "" +"Vì bạn không tin cây nhà cầm quyá»n đã phát hành chứng nhận này, thì bạn " +"không tin cây xác thá»±c của chứng nhận này trừ khi chỉ thị cách khác ở đây." + +#: ../smime/gui/certificate-manager.c:605 +msgid "Select a certificate to import..." +msgstr "Hãy chá»n chứng nhận cần nhập..." + +#: ../smime/gui/certificate-manager.c:692 +msgid "Certificate Name" +msgstr "Tên chứng nhận" + +#: ../smime/gui/certificate-manager.c:492 +msgid "Purposes" +msgstr "Mục đích" + +#: ../smime/lib/e-cert.c:569 ../smime/gui/certificate-manager.c:283 +msgid "Serial Number" +msgstr "Số sản xuất" + +#: ../smime/gui/certificate-manager.c:293 +msgid "Expires" +msgstr "Hết hạn" + +#: ../smime/gui/certificate-viewer.c:342 ../smime/gui/certificate-viewer.c:334 +#, c-format +msgid "Certificate Viewer: %s" +msgstr "Bá»™ xem chứng nhận: %s" + +#: ../smime/gui/component.c:45 +#, c-format +msgid "Enter the password for `%s'" +msgstr "Nhập mật khẩu cho « %s »" + +#. we're setting the password initially +#: ../smime/gui/component.c:68 +msgid "Enter new password for certificate database" +msgstr "Hãy nhập mật khẩu má»›i cho cÆ¡ sở dữ liệu chứng nhận" + +#: ../smime/gui/component.c:70 +msgid "Enter new password" +msgstr "Hãy nhập mật khẩu má»›i" + +#. FIXME: add serial no, validity date, uses +#: ../smime/gui/e-cert-selector.c:121 ../smime/gui/e-cert-selector.c:119 +#, c-format +msgid "" +"Issued to:\n" +" Subject: %s\n" +msgstr "" +"Phát hành cho:\n" +" Chủ Ä‘á»: %s\n" + +#: ../smime/gui/e-cert-selector.c:122 ../smime/gui/e-cert-selector.c:120 +#, c-format +msgid "" +"Issued by:\n" +" Subject: %s\n" +msgstr "" +"Phát hành bởi:\n" +" Chủ Ä‘á»: %s\n" + +#: ../smime/gui/e-cert-selector.c:174 ../smime/gui/e-cert-selector.c:167 +msgid "Select certificate" +msgstr "Chá»n chứng nhận" + +#: ../smime/gui/smime-ui.glade.h:1 +msgid "" +msgstr "" + +#: ../smime/gui/smime-ui.glade.h:2 +msgid "Certificate Fields" +msgstr "TrÆ°á»ng chứng nhận" + +#: ../smime/gui/smime-ui.glade.h:3 +msgid "Certificate Hierarchy" +msgstr "Cây chứng nhận" + +#: ../smime/gui/smime-ui.glade.h:4 +msgid "Field Value" +msgstr "Giá trị trÆ°á»ng" + +#: ../smime/gui/smime-ui.glade.h:5 +msgid "Fingerprints" +msgstr "Dấu Ä‘iá»m chỉ" + +#: ../smime/gui/smime-ui.glade.h:6 +msgid "Issued By" +msgstr "Phát hành bởi" + +#: ../smime/gui/smime-ui.glade.h:7 +msgid "Issued To" +msgstr "Phát hành cho" + +#: ../smime/gui/smime-ui.glade.h:8 +msgid "This certificate has been verified for the following uses:" +msgstr "Äã xác minh chứng nhận này cho những cách sá»­ dụng theo đây:" + +#: ../smime/gui/smime-ui.glade.h:9 +msgid "Validity" +msgstr "Hợp lệ" + +#: ../smime/gui/smime-ui.glade.h:10 +msgid "Authorities" +msgstr "Nhà cầm quyá»n" + +#. #-#-#-#-# Compendium04.po (NAME) #-#-#-#-# +#. "A duplicate copy of a program, a disk, or data, made either for archiving purposes or for safeguarding valuable files from loss should the active copy be damaged or destroyed." +#: ../smime/gui/smime-ui.glade.h:11 +msgid "Backup" +msgstr "Sao lÆ°u" + +#: ../smime/gui/smime-ui.glade.h:12 +msgid "Backup All" +msgstr "LÆ°u trữ tất cả" + +#: ../smime/gui/smime-ui.glade.h:13 +msgid "" +"Before trusting this CA for any purpose, you should examine its certificate " +"and its policy and procedures (if available)." +msgstr "" +"TrÆ°á»›c khi tin cây nhà cầm quyá»n này để làm gì thì bạn nên kiểm tra chứng " +"nhận của nó, và chính thức và thủ tục của nó (nếu công bố)." + +#: ../smime/gui/smime-ui.glade.h:14 ../smime/lib/e-cert.c:1076 +msgid "Certificate" +msgstr "Chứng nhận" + +#: ../smime/gui/smime-ui.glade.h:15 +msgid "Certificate Authority Trust" +msgstr "Äá»™ tin nhà cầm quyá»n chứng nhận" + +#: ../smime/gui/smime-ui.glade.h:16 +msgid "Certificate details" +msgstr "Chi tiết chứng nhận" + +#: ../smime/gui/smime-ui.glade.h:17 +msgid "Certificates Table" +msgstr "Bảng chứng nhận" + +#: ../smime/gui/smime-ui.glade.h:18 +msgid "Common Name (CN)" +msgstr "Tên chung (TC)" + +#: ../smime/gui/smime-ui.glade.h:19 +msgid "Contact Certificates" +msgstr "Chứng nhận liên lạc" + +#: ../smime/gui/smime-ui.glade.h:21 +msgid "Do not trust the authenticity of this certificate" +msgstr "Äừng tin cây tính xác thá»±c của chứng nhận này." + +#: ../smime/gui/smime-ui.glade.h:22 +msgid "Dummy window only" +msgstr "Chỉ cá»­a sổ giả" + +#: ../glom/utility_widgets/adddel/adddel.cc:205 +msgid "Edit" +msgstr "Hiệu chỉnh" + +#: ../smime/gui/smime-ui.glade.h:24 +msgid "Email Certificate Trust Settings" +msgstr "Thiết lập Tin cây Chứng nhận ThÆ° Ä‘iện tá»­" + +#: ../smime/gui/smime-ui.glade.h:25 +msgid "Email Recipient Certificate" +msgstr "Chứng nhận NgÆ°á»i nhận ThÆ° Ä‘iện tá»­" + +#: ../smime/gui/smime-ui.glade.h:26 +msgid "Email Signer Certificate" +msgstr "Chứng nhận Ký tên ThÆ° Ä‘iện tá»­" + +#: ../smime/gui/smime-ui.glade.h:27 +msgid "Expires On" +msgstr "Hết hạn vào ngày" + +#: ../objects/FS/function.c:684 ../objects/FS/function.c:682 import_gui.c:265 +#: import_gui.c:304 import_gui.c:417 import_gui.c:497 jpilot.c:510 +msgid "Import" +msgstr "Nhập" + +#: ../smime/gui/smime-ui.glade.h:30 +msgid "Issued On" +msgstr "Phát hành vào ngày" + +#: ../smime/gui/smime-ui.glade.h:31 +msgid "MD5 Fingerprint" +msgstr "Dấu Ä‘iá»m chỉ MD5" + +#: ../smime/gui/smime-ui.glade.h:32 +msgid "Organization (O)" +msgstr "Tổ chức (T)" + +#: ../smime/gui/smime-ui.glade.h:33 +msgid "Organizational Unit (OU)" +msgstr "ÄÆ¡n vị Tổ chức (ÄT)" + +#: ../smime/gui/smime-ui.glade.h:34 +msgid "SHA1 Fingerprint" +msgstr "Dấu Ä‘iá»m chỉ SHA1" + +#: ../smime/gui/smime-ui.glade.h:35 ../smime/lib/e-cert.c:818 +msgid "SSL Client Certificate" +msgstr "Chứng nhận khách SSL" + +#: ../smime/gui/smime-ui.glade.h:36 ../smime/lib/e-cert.c:822 +msgid "SSL Server Certificate" +msgstr "Chứng nhận máy phục vụ SSL" + +#: ../smime/gui/smime-ui.glade.h:38 +msgid "Trust the authenticity of this certificate" +msgstr "Tin cây tính xác thá»±c của chứng nhận này" + +#: ../smime/gui/smime-ui.glade.h:39 +msgid "Trust this CA to identify email users." +msgstr "" +"Tin cây nhà cầm quyá»n chứng nhận này để nhận diện ngÆ°á»i dùng thÆ° Ä‘iện tá»­." + +#: ../smime/gui/smime-ui.glade.h:40 +msgid "Trust this CA to identify software developers." +msgstr "" +"Tin cây nhà cầm quyá»n chứng nhận này để nhận diện ngÆ°á»i phát triển phần má»m." + +#: ../smime/gui/smime-ui.glade.h:41 +msgid "Trust this CA to identify web sites." +msgstr "Tin cây nhà cầm quyá»n chứng nhận này để nhận diện nÆ¡i Mạng." + +#: ../src/f-spot.glade.h:148 ../app/actions/actions.c:199 ../list-ui.c:538 +#: ../glom/mode_design/users/dialog_groups_list.cc:70 +#: ../libgda/gda-server-provider-extra.c:164 +msgid "View" +msgstr "Xem" + +#: ../smime/gui/smime-ui.glade.h:43 +msgid "You have certificates from these organizations that identify you:" +msgstr "Bạn có chứng nhận từ những tổ chức này có nhận diện bạn:" + +#: ../smime/gui/smime-ui.glade.h:44 +msgid "" +"You have certificates on file that identify these certificate authorities:" +msgstr "Bạn đã lÆ°u chứng nhận có nhận diện những nhà cầm quyá»n chứng nhận này:" + +#: ../smime/gui/smime-ui.glade.h:45 +msgid "You have certificates on file that identify these people:" +msgstr "Bạn đã lÆ°u chứng nhận có nhận diện những ngÆ°á»i này:" + +#: ../smime/gui/smime-ui.glade.h:46 +msgid "Your Certificates" +msgstr "Chứng nhận của bạn" + +#: ../smime/gui/smime-ui.glade.h:47 +msgid "_Edit CA Trust" +msgstr "_Sá»­a đổi tính tin cây CA" + +#. XXX we shouldn't be popping up dialogs in this code. +#: ../smime/lib/e-cert-db.c:654 ../smime/lib/e-cert-db.c:651 +msgid "Certificate already exists" +msgstr "Chứng nhận này đã có" + +#: ../smime/lib/e-cert.c:238 ../smime/lib/e-cert.c:248 +msgid "%d/%m/%Y" +msgstr "%d/%m/%Y" + +#: src/fe-gtk/plugingui.c:74 src/query.c:164 +msgid "Version" +msgstr "Phiên bản" + +#: ../smime/lib/e-cert.c:545 +msgid "Version 1" +msgstr "Phiên bản 1" + +#: ../smime/lib/e-cert.c:548 +msgid "Version 2" +msgstr "Phiên bản 2" + +#: ../smime/lib/e-cert.c:551 +msgid "Version 3" +msgstr "Phiên bản 3" + +#: ../smime/lib/e-cert.c:633 +msgid "PKCS #1 MD2 With RSA Encryption" +msgstr "PCKS #1 MD2 vá»›i mật mã RSA" + +#: ../smime/lib/e-cert.c:636 +msgid "PKCS #1 MD5 With RSA Encryption" +msgstr "PCKS #1 MD5 vá»›i mật mã RSA" + +#: ../smime/lib/e-cert.c:639 +msgid "PKCS #1 SHA-1 With RSA Encryption" +msgstr "PCKS #1 SHA-1 vá»›i mật mã RSA" + +#: ../src/red_appwindow.py:92 +msgid "C" +msgstr "C" + +#: ../smime/lib/e-cert.c:645 +msgid "CN" +msgstr "TC" + +#: ../smime/lib/e-cert.c:648 +msgid "OU" +msgstr "ÄT" + +#: ../smime/lib/e-cert.c:651 +msgid "O" +msgstr "T" + +#: ../smime/lib/e-cert.c:654 ../gnopi/cmdmapui.c:154 +msgid "L" +msgstr "L" + +#: ../smime/lib/e-cert.c:657 +msgid "DN" +msgstr "TP" + +#: ../smime/lib/e-cert.c:660 +msgid "DC" +msgstr "DC" + +#: ../smime/lib/e-cert.c:663 +msgid "ST" +msgstr "ST" + +#: ../smime/lib/e-cert.c:666 +msgid "PKCS #1 RSA Encryption" +msgstr "Mật mã RSA PKCS #1" + +#: ../smime/lib/e-cert.c:669 +msgid "Certificate Key Usage" +msgstr "Cách dùng khoá chứng nhận" + +#: ../smime/lib/e-cert.c:672 +msgid "Netscape Certificate Type" +msgstr "Loại chứng nhận Netscape" + +#: ../smime/lib/e-cert.c:675 +msgid "Certificate Authority Key Identifier" +msgstr "Dấu hiệu nhận diện khoá nhà cầm quyá»n chứng nhận" + +#: ../providers/evolution/gda-calendar-model.c:60 +msgid "UID" +msgstr "UID" + +#: ../smime/lib/e-cert.c:687 +#, c-format +msgid "Object Identifier (%s)" +msgstr "Dấu hiệu nhận diện đối tượng (%s)" + +#: ../smime/lib/e-cert.c:738 +msgid "Algorithm Identifier" +msgstr "Dấu hiệu nhận diện thuật toán" + +#: ../smime/lib/e-cert.c:746 +msgid "Algorithm Parameters" +msgstr "Tham số thuật toán" + +#: ../smime/lib/e-cert.c:768 +msgid "Subject Public Key Info" +msgstr "Thông tin khoá công nhà nhận" + +#: ../smime/lib/e-cert.c:773 +msgid "Subject Public Key Algorithm" +msgstr "Thuật toán khoá công nhà nhận" + +#: ../smime/lib/e-cert.c:788 +msgid "Subject's Public Key" +msgstr "Khoá công nhà nhận" + +#: ../smime/lib/e-cert.c:809 ../smime/lib/e-cert.c:858 +msgid "Error: Unable to process extension" +msgstr "Lá»—i: không thể xá»­ lý phần mở rá»™ng" + +#: ../smime/lib/e-cert.c:830 ../smime/lib/e-cert.c:842 +msgid "Object Signer" +msgstr "Bá»™ ký nhận đối tượng" + +#: ../smime/lib/e-cert.c:834 +msgid "SSL Certificate Authority" +msgstr "Nhà cầm quyá»n chứng nhận SSL" + +#: ../smime/lib/e-cert.c:838 +msgid "Email Certificate Authority" +msgstr "Nhà cầm quyá»n chứng nhận thÆ° Ä‘iện tá»­" + +#: ../smime/lib/e-cert.c:866 +msgid "Signing" +msgstr "Ký nhận" + +#: ../smime/lib/e-cert.c:870 +msgid "Non-repudiation" +msgstr "Không từ chối" + +#: ../smime/lib/e-cert.c:874 +msgid "Key Encipherment" +msgstr "Mật mã hóa khoá" + +#: ../smime/lib/e-cert.c:878 +msgid "Data Encipherment" +msgstr "Mật mã hóa dữ liệu" + +#: ../smime/lib/e-cert.c:882 +msgid "Key Agreement" +msgstr "Chấp thuận khoá" + +#: ../smime/lib/e-cert.c:886 +msgid "Certificate Signer" +msgstr "NgÆ°á»i ký chứng nhận" + +#: ../smime/lib/e-cert.c:890 +msgid "CRL Signer" +msgstr "NgÆ°á»i ký CRL" + +#: ../smime/lib/e-cert.c:938 +msgid "Critical" +msgstr "Nghiêm trá»ng" + +#: ../smime/lib/e-cert.c:940 ../smime/lib/e-cert.c:943 +msgid "Not Critical" +msgstr "Không nghiêm trá»ng" + +#: ../smime/lib/e-cert.c:964 ../app/widgets/gimpfileprocview.c:253 +#: ../extensions/extensions-manager-ui/extensions-manager-ui.glade.h:2 +#: ../ui/mlview-plugins-window.glade.h:2 +msgid "Extensions" +msgstr "Phần mở rá»™ng" + +# Variable: do not translate/ biến: đừng dịch +#: ../smime/lib/e-cert.c:1035 +#, c-format +msgid "%s = %s" +msgstr "%s = %s" + +#: ../smime/lib/e-cert.c:1091 ../smime/lib/e-cert.c:1211 +msgid "Certificate Signature Algorithm" +msgstr "Thuật toán chữ ký chứng nhận" + +#: ../smime/lib/e-cert.c:1100 +msgid "Issuer" +msgstr "Nhà phát hành" + +#: ../smime/lib/e-cert.c:1154 +msgid "Issuer Unique ID" +msgstr "Thông tin Ä‘á»™c nhất nhận biết nhà phát hành" + +#: ../smime/lib/e-cert.c:1173 +msgid "Subject Unique ID" +msgstr "Thông tin Ä‘á»™c nhất nhận biết nhà nhận" + +#: ../smime/lib/e-cert.c:1216 +msgid "Certificate Signature Value" +msgstr "Giá trị chữ ký chứng nhận" + +#: ../smime/lib/e-pkcs12.c:266 ../smime/lib/e-pkcs12.c:264 +msgid "PKCS12 File Password" +msgstr "Mật khẩu tập tin PKCS12" + +#: ../smime/lib/e-pkcs12.c:266 ../smime/lib/e-pkcs12.c:264 +msgid "Enter password for PKCS12 file:" +msgstr "Nhập mật khẩu cho tập tin PCKS12:" + +#: ../smime/lib/e-pkcs12.c:365 ../smime/lib/e-pkcs12.c:363 +msgid "Imported Certificate" +msgstr "Chứng nhận đã nhập" + +#: ../tools/evolution-launch-composer.c:324 +msgid "An attachment to add." +msgstr "Äính kèm cần thêm." + +#: ../tools/evolution-launch-composer.c:325 +msgid "Content type of the attachment." +msgstr "Kiểu ná»™i dung của đính kèm." + +#: ../tools/evolution-launch-composer.c:326 +msgid "The filename to display in the mail." +msgstr "Tên tập tin cần hiển thị trong thÆ°." + +#: ../tools/evolution-launch-composer.c:327 +msgid "Description of the attachment." +msgstr "Mô tả đính kèm." + +#: ../tools/evolution-launch-composer.c:328 +msgid "Mark attachment to be shown inline by default." +msgstr "Mặc định là Äánh dấu đính kèm sẽ được hiển thị trá»±c tiếp." + +#: ../tools/evolution-launch-composer.c:329 +msgid "Default subject for the message." +msgstr "Chủ Ä‘á» mặc định cho thÆ° đó." + +#: ../tools/killev.c:61 +#, c-format +msgid "Could not execute '%s': %s\n" +msgstr "Không thể thá»±c hiện « %s »: %s\n" + +#: ../tools/killev.c:76 +#, c-format +msgid "Shutting down %s (%s)\n" +msgstr "Äang tắt %s (%s)\n" + +#: ../ui/evolution-addressbook.xml.h:2 +msgid "Contact _Preview" +msgstr "_Xem thá»­ liên lạc" + +#: ../ui/evolution-addressbook.xml.h:4 ../ui/evolution-addressbook.xml.h:3 +msgid "Copy Selected Contacts to Another Folder..." +msgstr "Chép các liên lạc được chá»n sang thÆ° mục khác..." + +#: ../ui/evolution-addressbook.xml.h:5 ../ui/evolution-calendar.xml.h:2 +#: ../libgnomeui/gnome-app-helper.c:161 +msgid "Copy the selection" +msgstr "Chép Ä‘oạn đã chá»n" + +#: ../ui/evolution-addressbook.xml.h:6 +msgid "Copy to Folder..." +msgstr "Chép vào thÆ° mục..." + +#: ../glade/gbwidget.c:1859 po/silky.glade.h:87 app/sample-editor.c:449 +msgid "Cut" +msgstr "Cắt" + +#: ../ui/evolution-addressbook.xml.h:8 ../ui/evolution-calendar.xml.h:3 +msgid "Cut the selection" +msgstr "Cắt vùng chá»n" + +#: ../ui/evolution-addressbook.xml.h:10 ../ui/evolution-addressbook.xml.h:9 +msgid "Delete selected contacts" +msgstr "Xoá bá» các liên lạc được chá»n" + +#: ../ui/evolution-addressbook.xml.h:10 +msgid "Forward Contact" +msgstr "Chuyển tiếp liên lạc" + +#: ../ui/evolution-addressbook.xml.h:12 ../ui/evolution-addressbook.xml.h:11 +msgid "Move Selected Contacts to Another Folder..." +msgstr "Chuyển các liên lạc được chá»n sang thÆ° mục khác..." + +#: ../ui/evolution-addressbook.xml.h:13 ../ui/evolution-addressbook.xml.h:12 +msgid "Move to Folder..." +msgstr "Chuyển sang thÆ° mục..." + +#: ../plug-ins/imagemap/imap_cmd_paste.c:51 ../glade/gbwidget.c:1875 +#: ../glade/property.c:904 po/silky.glade.h:139 app/sample-editor.c:467 +msgid "Paste" +msgstr "Dán" + +#: ../ui/evolution-addressbook.xml.h:15 ../ui/evolution-calendar.xml.h:16 +#: ../libgnomeui/gnome-app-helper.c:166 +msgid "Paste the clipboard" +msgstr "Dán bảng tạm" + +#: ../ui/evolution-addressbook.xml.h:16 ../ui/evolution-addressbook.xml.h:15 +msgid "Previews the contacts to be printed" +msgstr "Xem trÆ°á»›c liên lạc cần in" + +#: ../ui/evolution-addressbook.xml.h:19 ../ui/evolution-addressbook.xml.h:18 +msgid "Print selected contacts" +msgstr "In các liên lạc được chá»n" + +#: ../ui/evolution-addressbook.xml.h:21 ../ui/evolution-addressbook.xml.h:20 +msgid "Save selected contacts as a VCard." +msgstr "LÆ°u các liên lạc được chá»n là vCard" + +#: ../plug-ins/imagemap/imap_cmd_select_all.c:51 +#: ../plug-ins/rcm/rcm_stock.c:41 +msgid "Select All" +msgstr "Chá»n hết" + +#: ../ui/evolution-addressbook.xml.h:23 ../ui/evolution-addressbook.xml.h:22 +msgid "Select all contacts" +msgstr "Chá»n má»i liên lạc" + +#: ../ui/evolution-addressbook.xml.h:24 ../ui/evolution-addressbook.xml.h:23 +msgid "Send a message to the selected contacts." +msgstr "Gởi thÆ° cho các liên lạc được chá»n." + +#: ../ui/evolution-addressbook.xml.h:25 ../ui/evolution-addressbook.xml.h:24 +msgid "Send message to contact" +msgstr "Gởi thÆ° cho liên lạc" + +#: ../ui/evolution-addressbook.xml.h:26 ../ui/evolution-addressbook.xml.h:25 +msgid "Send selected contacts to another person." +msgstr "Gởi các liên lạc được chá»n cho ngÆ°á»i khác" + +#: ../ui/evolution-addressbook.xml.h:27 ../ui/evolution-addressbook.xml.h:26 +msgid "Show contact preview window" +msgstr "Hiện khung xem trÆ°á»›c liên lạc" + +#: ../sheets/SDL.sheet.in.h:19 app/gui.c:1959 +msgid "Stop" +msgstr "Dừng" + +#: ../ui/evolution-addressbook.xml.h:29 ../ui/evolution-addressbook.xml.h:28 +msgid "Stop Loading" +msgstr "NgÆ°ng tải" + +#: ../ui/evolution-addressbook.xml.h:30 ../ui/evolution-addressbook.xml.h:29 +msgid "View the current contact" +msgstr "Xem liên lạc hiện thá»i" + +#: ../extensions/actions/ephy-actions-extension.c:112 +msgid "_Actions" +msgstr "_Hành Ä‘á»™ng" + +#: ../ui/evolution-addressbook.xml.h:36 ../ui/evolution-addressbook.xml.h:35 +msgid "_Forward Contact..." +msgstr "_Chuyển tiếp liên lạc..." + +#: ../ui/evolution-addressbook.xml.h:43 +msgid "_Send Message to Contact..." +msgstr "_Gởi thÆ° tá»›i liên lạc..." + +#: ../ui/evolution-calendar.xml.h:4 ../gtk/gtkcalendar.c:433 +#: ../gncal/calendar-month-item.c:285 ../gncal/calendar-year-item.c:223 +#: ../libegg/egg-datetime.c:305 ../src/libegg/egg-datetime.c:305 +#: ../Pyblio/GnomeUI/Editor.py:312 src/settings.c:1305 datebook_gui.c:4627 +msgid "Day" +msgstr "Ngày" + +#: ../ui/evolution-calendar.xml.h:6 +msgid "Delete All Occurrences" +msgstr "Xoá bá» má»i lần" + +#: ../ui/evolution-calendar.xml.h:7 +msgid "Delete the appointment" +msgstr "Xoá bá» cuá»™c hẹn" + +#: ../ui/evolution-calendar.xml.h:8 +msgid "Delete this Occurrence" +msgstr "Xoá bá» lần này" + +#: ../ui/evolution-calendar.xml.h:9 +msgid "Delete this occurrence" +msgstr "Xoá bá» lần này" + +#: ../ui/evolution-calendar.xml.h:10 +msgid "Go To" +msgstr "Äi tá»›i" + +#: ../ui/evolution-calendar.xml.h:11 ../src/ephy-toolbar.c:267 +#: src/galeon-navigation-button.c:159 +msgid "Go back" +msgstr "Lùi lại" + +#: ../ui/evolution-calendar.xml.h:12 +msgid "Go forward" +msgstr "Äi tiếp" + +#: ../glom/mode_data/notebook_data.cc:28 ../glom/mode_find/notebook_find.cc:27 +#: ../widgets/gtk+.xml.in.h:116 ../src/form-editor/palette.cc:92 +#: ../src/form-editor/widget-util.cc:209 ../src/orca/rolenames.py:298 +msgid "List" +msgstr "Danh sách" + +#: ../libegg/egg-datetime.c:299 ../src/libegg/egg-datetime.c:299 +#: ../Pyblio/GnomeUI/Editor.py:321 datebook_gui.c:4195 datebook_gui.c:4629 +msgid "Month" +msgstr "Tháng" + +#: ../ui/evolution-calendar.xml.h:17 +msgid "Previews the calendar to be printed" +msgstr "Xem trÆ°á»›c lịch cần in" + +#: ../ui/evolution-calendar.xml.h:21 +msgid "Print this calendar" +msgstr "In lịch này" + +#: ../ui/evolution-calendar.xml.h:22 ../ui/evolution-tasks.xml.h:17 +#: ../ui/evolution-calendar.xml.h:23 +msgid "Purg_e" +msgstr "_Tẩy" + +#: ../ui/evolution-calendar.xml.h:23 ../ui/evolution-calendar.xml.h:24 +msgid "Purge old appointments and meetings" +msgstr "Tẩy các cuá»™c hẹn và cuá»™c há»p cÅ©" + +#: ../ui/evolution-calendar.xml.h:24 ../ui/evolution-calendar.xml.h:25 +msgid "Select _Date" +msgstr "Chá»n _ngày" + +#: ../ui/evolution-calendar.xml.h:25 ../calendar/gui/e-calendar-view.c:1519 +#: ../ui/evolution-calendar.xml.h:26 +msgid "Select _Today" +msgstr "Chá»n _hôm nay" + +#: ../ui/evolution-calendar.xml.h:26 ../ui/evolution-calendar.xml.h:27 +msgid "Select a specific date" +msgstr "Chá»n ngày xác định" + +#: ../ui/evolution-calendar.xml.h:27 ../ui/evolution-calendar.xml.h:28 +msgid "Select today" +msgstr "Chá»n hôm nay" + +#: ../ui/evolution-calendar.xml.h:28 ../ui/evolution-calendar.xml.h:29 +msgid "Show as list" +msgstr "Xem kiểu danh sách" + +#: ../ui/evolution-calendar.xml.h:29 ../ui/evolution-calendar.xml.h:30 +msgid "Show one day" +msgstr "Xem má»™t ngày" + +#: ../ui/evolution-calendar.xml.h:30 ../ui/evolution-calendar.xml.h:31 +msgid "Show one month" +msgstr "Xem má»™t tháng" + +#: ../ui/evolution-calendar.xml.h:31 ../ui/evolution-calendar.xml.h:32 +msgid "Show one week" +msgstr "Xem má»™t tuần" + +#: ../ui/evolution-calendar.xml.h:32 ../ui/evolution-calendar.xml.h:33 +msgid "Show the working week" +msgstr "Xem tuần làm việc" + +#: ../ui/evolution-calendar.xml.h:34 ../ui/evolution-calendar.xml.h:35 +msgid "View the current appointment" +msgstr "Xem cuá»™c hẹn hiện thá»i" + +#: ../ui/evolution-calendar.xml.h:35 main.c:292 +#: ../ui/evolution-calendar.xml.h:36 ui/galeon.glade.h:54 datebook_gui.c:4184 +#: datebook_gui.c:4628 +msgid "Week" +msgstr "Tuần" + +#: ../ui/evolution-calendar.xml.h:36 ../ui/evolution-calendar.xml.h:37 +msgid "Work Week" +msgstr "Tuần làm việc" + +#: ../ui/evolution-calendar.xml.h:41 ../ui/evolution-calendar.xml.h:42 +msgid "_Open Appointment" +msgstr "Mở _Cuá»™c hẹn" + +#: ../ui/evolution-composer-entries.xml.h:1 ../ui/evolution-editor.xml.h:7 +msgid "Copy selected text to the clipboard" +msgstr "Sao chép Ä‘oạn đã chá»n sang bảng tạm" + +#: ../ui/evolution-composer-entries.xml.h:3 ../ui/evolution-editor.xml.h:9 +msgid "Cut selected text to the clipboard" +msgstr "Cắt Ä‘oạn đã chá»n vào bảng tạm" + +#: ../ui/evolution-composer-entries.xml.h:4 ../ui/evolution-editor.xml.h:10 +msgid "Paste text from the clipboard" +msgstr "Dán Ä‘oạn từ bảng tạm" + +#: ../plug-ins/ifscompose/ifscompose.c:1060 ../src/journal.c:3388 +msgid "Select _All" +msgstr "Chá»n _hết" + +#: ../ui/evolution-composer-entries.xml.h:6 ../ui/evolution-editor.xml.h:13 +msgid "Select all text" +msgstr "Chá»n toàn bá»™ văn bản" + +#: ../ui/evolution-editor.xml.h:2 +msgid "Click here to attach a file" +msgstr "Nhấn đây để đính kèm tập tin" + +#: ../ui/evolution-editor.xml.h:3 +msgid "Click here to close the current window" +msgstr "Nhấn đây để đóng cá»­a sổ hiện thá»i" + +#: ../ui/evolution-editor.xml.h:4 +msgid "Click here to save the current window" +msgstr "Nhấn đây để lÆ°u cá»­a sổ hiện thá»i" + +#: ../ui/evolution-editor.xml.h:5 +msgid "Click here to view help availabe" +msgstr "Nhấn đây để xem trợ giúp có sẵn" + +#: ../ui/evolution-editor.xml.h:14 ../ui/evolution-message-composer.xml.h:40 +#: ../ui/evolution-message-composer.xml.h:39 +msgid "_Attachment..." +msgstr "Äính _kèm..." + +#. #-#-#-#-# glade3vi..po (glade3 HEAD) #-#-#-#-# +#. File +#: ../src/mlview-app.cc:277 ../Pyblio/GnomeUI/Document.py:144 +#: ../src/glade-gtk.c:2312 po/silky.glade.h:215 app/menubar.c:685 +msgid "_File" +msgstr "_Tập tin" + +#: ../plug-ins/imagemap/imap_polygon.c:521 ../src/main.c:595 +msgid "_Insert" +msgstr "_Chèn" + +#: ../ui/evolution-event-editor.xml.h:2 +msgid "All day Event" +msgstr "Sá»± kiện nguyên ngày" + +#: ../ui/evolution-event-editor.xml.h:3 ../ui/evolution-task-editor.xml.h:1 +msgid "Classify as Confidential" +msgstr "Phân loại là Tin tưởng" + +#: ../ui/evolution-event-editor.xml.h:4 ../ui/evolution-task-editor.xml.h:2 +msgid "Classify as Private" +msgstr "Phân loại là Riêng" + +#: ../ui/evolution-event-editor.xml.h:5 ../ui/evolution-task-editor.xml.h:3 +msgid "Classify as public" +msgstr "Phân loại là Công" + +#: ../ui/evolution-event-editor.xml.h:6 +msgid "Click here to set or unset alarms for this event" +msgstr "Nhấn đây để lập hay bá» lập báo Ä‘á»™ng cho sá»± kiện này" + +#: ../ui/evolution-event-editor.xml.h:8 ../ui/evolution-task-editor.xml.h:5 +msgid "Insert advanced send options" +msgstr "Chèn tùy chá»n gởi cấp cao" + +#: ../ui/evolution-event-editor.xml.h:9 +msgid "Make this a recurring event" +msgstr "Äặt là sá»± kiện lặp" + +#: ../ui/evolution-event-editor.xml.h:10 ../ui/evolution-task-editor.xml.h:6 +msgid "Pu_blic" +msgstr "_Công" + +#: ../ui/evolution-event-editor.xml.h:11 +msgid "Query free / busy information for the attendees" +msgstr "Truy vấn thông tin Rảnh/Bận cho các ngÆ°á»i dá»±" + +#: ../ui/evolution-event-editor.xml.h:12 ../ui/evolution-task-editor.xml.h:7 +msgid "R_ole Field" +msgstr "TrÆ°á»ng _Vai trò" + +#: ../ui/evolution-event-editor.xml.h:15 ../ui/evolution-task-editor.xml.h:9 +msgid "Show Time _Zone" +msgstr "Hiện múi _giá»" + +#: ../ui/evolution-event-editor.xml.h:16 +msgid "Show time as b_usy" +msgstr "Hiện giá» là _bận" + +#: ../ui/evolution-event-editor.xml.h:17 ../ui/evolution-task-editor.xml.h:12 +msgid "Toggles whether the Attendee Type field is displayed" +msgstr "Bật tắt hiển thị trÆ°á»ng Kiểu ngÆ°á»i dá»±" + +#: ../ui/evolution-event-editor.xml.h:18 ../ui/evolution-task-editor.xml.h:13 +msgid "Toggles whether the RSVP field is displayed" +msgstr "Bật tắt hiển thị trÆ°á»ng RSVP" + +#: ../ui/evolution-event-editor.xml.h:19 ../ui/evolution-task-editor.xml.h:14 +msgid "Toggles whether the Role field is displayed" +msgstr "Bật tắt hiển thị trÆ°á»ng Vai trò" + +#: ../ui/evolution-event-editor.xml.h:20 ../ui/evolution-task-editor.xml.h:15 +msgid "Toggles whether the Status field is displayed" +msgstr "Bật tắt hiển thị trÆ°á»ng Trạng thái" + +#: ../ui/evolution-event-editor.xml.h:21 ../ui/evolution-task-editor.xml.h:16 +msgid "Toggles whether the time zone is displayed" +msgstr "Bật tắt hiển thị múi giá»" + +#: ../ui/evolution-event-editor.xml.h:22 ../ui/evolution-task-editor.xml.h:17 +msgid "Toggles whether to display categories" +msgstr "Bật tắt hiển thị các phân loại" + +#: ../ui/evolution-event-editor.xml.h:23 +msgid "Toggles whether to have All day Event" +msgstr "Bật tắt có dá»± kiện nguyên ngày" + +#: ../ui/evolution-event-editor.xml.h:24 +msgid "Toggles whether to show time as busy" +msgstr "Bật tắt hiển thị giá» là bận" + +#: ../ui/evolution-event-editor.xml.h:25 +msgid "_Alarms" +msgstr "_Báo Ä‘á»™ng" + +#: ../ui/evolution-event-editor.xml.h:26 +msgid "_All day Event" +msgstr "Dá»± kiện _nguyên ngày" + +#: ../ui/evolution-event-editor.xml.h:28 ../ui/evolution-task-editor.xml.h:19 +msgid "_Classification" +msgstr "_Phân loại" + +#: ../ui/evolution-event-editor.xml.h:29 ../ui/evolution-task-editor.xml.h:20 +msgid "_Confidential" +msgstr "_Tin tưởng" + +#: ../ui/evolution-event-editor.xml.h:30 ../ui/evolution-task-editor.xml.h:21 +#: ../gnomecard/card-editor.glade.h:60 +msgid "_Private" +msgstr "_Riêng" + +#: ../ui/evolution-event-editor.xml.h:31 ../ui/evolution-task-editor.xml.h:22 +msgid "_RSVP" +msgstr "_RSVP" + +#: ../ui/evolution-event-editor.xml.h:32 ../ui/evolution-task-editor.xml.h:24 +msgid "_Status Field" +msgstr "TrÆ°á»ng _Trạng thái" + +#: ../ui/evolution-event-editor.xml.h:33 ../ui/evolution-task-editor.xml.h:25 +msgid "_Type Field" +msgstr "TrÆ°á»ng _Kiểu" + +#: ../ui/evolution-executive-summary.xml.h:1 +msgid "Customize My Evolution" +msgstr "Tùy biến Evolution của tôi" + +#: ../ui/evolution-mail-global.xml.h:2 +msgid "Cancel the current mail operation" +msgstr "Hủy tác vụ thÆ° tín hiện thá»i" + +#: ../ui/evolution-mail-global.xml.h:3 +msgid "Copy the selected folder into another folder" +msgstr "Sao chép thÆ° mục được chá»n sang thÆ° mục khác" + +#: ../ui/evolution-mail-global.xml.h:4 +msgid "Create a new folder for storing mail" +msgstr "Tạo thÆ° mục má»›i để lÆ°u thÆ°" + +#: ../ui/evolution-mail-global.xml.h:5 +msgid "Create or edit Search Folder definitions" +msgstr "Tạo hoặc sá»­a lá»i định nghÄ©a thÆ° mục tìm kiếm" + +#: ../ui/evolution-mail-global.xml.h:6 +msgid "Create or edit rules for filtering new mail" +msgstr "Tạo hoặc sá»­a đổi quy tắc lá»c thÆ° má»›i" + +#: ../ui/evolution-mail-global.xml.h:8 ../ui/evolution-mail-list.xml.h:7 +#: ../ui/evolution-subscribe.xml.h:2 ../ui/evolution-mail-list.xml.h:6 +msgid "F_older" +msgstr "Danh _mục" + +#: ../ui/evolution-mail-global.xml.h:9 +msgid "Message F_ilters" +msgstr "Bá»™ _lá»c thÆ°" + +#: ../ui/evolution-mail-global.xml.h:10 ../ui/evolution-mail-global.xml.h:11 +msgid "Message _Preview" +msgstr "Xem thÆ° _trÆ°á»›c" + +#: ../ui/evolution-mail-global.xml.h:11 ../ui/evolution-mail-global.xml.h:12 +msgid "Move the selected folder into another folder" +msgstr "Chuyển thÆ° mục được chá»n tá»›i thÆ° mục khác" + +#. Alphabetical by name, yo +#: ../ui/evolution-mail-global.xml.h:13 +msgid "Permanently remove all deleted messages from all folders" +msgstr "Gỡ bá» hoàn toàn má»i thÆ° đã xoá bá» ra má»i thÆ° mục" + +#: ../ui/evolution-mail-global.xml.h:14 ../ui/evolution-mail-global.xml.h:15 +msgid "Search F_olders" +msgstr "Tìm kiếm trong _thÆ° mục" + +#: ../ui/evolution-mail-global.xml.h:15 ../ui/evolution-mail-global.xml.h:16 +msgid "Show message preview window" +msgstr "Hiện khung xem thÆ° trÆ°á»›c" + +#: ../ui/evolution-mail-global.xml.h:16 +msgid "Subscribe or unsubscribe to folders on remote servers" +msgstr "Äăng ký hoặc hủy đăng ký thÆ° mục trên máy chủ từ xa" + +#: ../ui/evolution-mail-global.xml.h:17 ../ui/evolution-mail-global.xml.h:18 +msgid "_Copy Folder To..." +msgstr "_Chép thÆ° mục vào..." + +#: ../ui/evolution-mail-global.xml.h:18 ../ui/evolution-mail-global.xml.h:19 +msgid "_Move Folder To..." +msgstr "_Chuyển thÆ° mục sang..." + +#: ../ui/evolution-mail-global.xml.h:23 ../ui/evolution-mail-global.xml.h:22 +msgid "_Subscriptions" +msgstr "_Mục đăng ký" + +#: ../ui/evolution-mail-list.xml.h:1 +msgid "Change the name of this folder" +msgstr "Thay đổi tên thÆ° mục này" + +#: ../ui/evolution-mail-list.xml.h:2 +msgid "Change the properties of this folder" +msgstr "Thay đổi thuá»™c tính thÆ° mục này" + +#: ../ui/evolution-mail-list.xml.h:3 ../ui/evolution-mail-message.xml.h:12 +msgid "Copy selected message(s) to the clipboard" +msgstr "Sao chép các thÆ° đã chá»n sang bảng tạm" + +#: ../ui/evolution-mail-list.xml.h:4 +msgid "Cut selected message(s) to the clipboard" +msgstr "Cắt các thÆ° đã chá»n vào bảng tạm" + +#: ../ui/evolution-mail-list.xml.h:6 ../ui/evolution-mail-list.xml.h:5 +msgid "E_xpunge" +msgstr "_Xoá hẳn" + +#: ../ui/evolution-mail-list.xml.h:8 ../ui/evolution-mail-list.xml.h:7 +msgid "Group By _Threads" +msgstr "Nhóm lại theo _mạch" + +#: ../ui/evolution-mail-list.xml.h:9 ../ui/evolution-mail-list.xml.h:8 +msgid "Hide S_elected Messages" +msgstr "Ẩn các thÆ° đã _chá»n" + +#: ../ui/evolution-mail-list.xml.h:10 ../ui/evolution-mail-list.xml.h:9 +msgid "Hide _Deleted Messages" +msgstr "Ẩn các thÆ° đã _xoá bá»" + +#: ../ui/evolution-mail-list.xml.h:11 ../ui/evolution-mail-list.xml.h:10 +msgid "Hide _Read Messages" +msgstr "Ẩn các thÆ° đã _Ä‘á»c" + +#: ../ui/evolution-mail-list.xml.h:12 ../ui/evolution-mail-list.xml.h:11 +msgid "" +"Hide deleted messages rather than displaying them with a line through them" +msgstr "Ẩn các thÆ° đã xoá bá» thay vì hiển thị chúng dạng gạch đè" + +#: ../ui/evolution-mail-list.xml.h:13 +msgid "Mar_k Messages as Read" +msgstr "Äánh dấu thÆ° Äã Ä‘á»_c" + +#: ../ui/evolution-mail-list.xml.h:14 ../ui/evolution-mail-message.xml.h:68 +#: ../ui/evolution-mail-list.xml.h:13 +msgid "Paste message(s) from the clipboard" +msgstr "Dán các thÆ° từ bảng tạm" + +#: ../ui/evolution-mail-list.xml.h:15 ../ui/evolution-mail-list.xml.h:14 +msgid "Permanently remove all deleted messages from this folder" +msgstr "Gỡ bá» hoàn toàn má»i thÆ° đã xoá bá» trong thÆ° mục này" + +#: ../ui/evolution-mail-list.xml.h:16 ../ui/evolution-mail-list.xml.h:15 +msgid "Permanently remove this folder" +msgstr "Gỡ bá» hoàn toàn thÆ° mục này" + +#: ../ui/evolution-mail-list.xml.h:17 +msgid "Select Message _Thread" +msgstr "Chá»n _nhánh thÆ°" + +#: ../ui/evolution-mail-list.xml.h:18 ../ui/evolution-mail-list.xml.h:16 +msgid "Select _All Messages" +msgstr "Chá»n _má»i thÆ°" + +#: ../ui/evolution-mail-list.xml.h:19 ../ui/evolution-mail-list.xml.h:17 +msgid "Select all and only the messages that are not currently selected" +msgstr "Chá»n tất cả và chỉ những thÆ° hiện thá»i không được chá»n" + +#: ../ui/evolution-mail-list.xml.h:20 ../ui/evolution-mail-list.xml.h:18 +msgid "Select all messages in the same thread as the selected message" +msgstr "Chá»n tất cả nhÆ°ng thÆ° trong cùng mạch vá»›i thÆ° đã chá»n" + +#: ../ui/evolution-mail-list.xml.h:21 ../ui/evolution-mail-list.xml.h:19 +msgid "Select all visible messages" +msgstr "Chá»n má»i thÆ° có thể thấy" + +#: ../ui/evolution-mail-list.xml.h:22 ../ui/evolution-mail-list.xml.h:20 +msgid "Sh_ow Hidden Messages" +msgstr "_Hiển thị thÆ° bị ẩn" + +#: ../ui/evolution-mail-list.xml.h:23 ../ui/evolution-mail-list.xml.h:21 +msgid "Show messages that have been temporarily hidden" +msgstr "Hiển thị các thÆ° Ä‘ang bị giấu tạm thá»i" + +#: ../ui/evolution-mail-list.xml.h:24 ../ui/evolution-mail-list.xml.h:22 +msgid "Temporarily hide all messages that have already been read" +msgstr "Ẩn tạm thá»i má»i thÆ° đã Ä‘á»c" + +#: ../ui/evolution-mail-list.xml.h:25 ../ui/evolution-mail-list.xml.h:23 +msgid "Temporarily hide the selected messages" +msgstr "Ẩn tạm thá»i những thÆ° được chá»n" + +#: ../ui/evolution-mail-list.xml.h:26 ../ui/evolution-mail-list.xml.h:24 +msgid "Threaded Message list" +msgstr "Danh sách thÆ° theo mạch" + +#: ../ui/evolution-mail-message.xml.h:1 +msgid "A_dd Sender to Address Book" +msgstr "Thêm ngÆ°á»i _gởi vào Sổ địa chỉ" + +#: ../ui/evolution-mail-message.xml.h:2 +msgid "A_pply Filters" +msgstr "Ã_p dụng bá»™ lá»c" + +#: ../ui/evolution-mail-message.xml.h:4 ../ui/evolution-mail-message.xml.h:3 +msgid "Add Sender to Address Book" +msgstr "Thêm ngÆ°á»i gởi vào Sổ địa chỉ" + +#: ../ui/evolution-mail-message.xml.h:5 ../ui/evolution-mail-message.xml.h:4 +msgid "All Message _Headers" +msgstr "Các dòng đầu thÆ°" + +#: ../ui/evolution-mail-message.xml.h:6 ../ui/evolution-mail-message.xml.h:5 +msgid "Apply filter rules to the selected messages" +msgstr "Ãp dụng bá»™ lá»c vào các thÆ° đã chá»n" + +#: ../ui/evolution-mail-message.xml.h:7 ../ui/evolution-mail-message.xml.h:6 +msgid "Check for _Junk" +msgstr "Kiểm tra tìm thÆ° rác" + +#: ../ui/evolution-mail-message.xml.h:8 ../ui/evolution-mail-message.xml.h:7 +msgid "Compose _New Message" +msgstr "Soạn thÆ° _má»›i" + +#: ../ui/evolution-mail-message.xml.h:9 ../ui/evolution-mail-message.xml.h:8 +msgid "Compose a reply to all of the recipients of the selected message" +msgstr "Soạn thÆ° trả lá»i cho má»i ngÆ°á»i nhận thÆ° được chá»n" + +#: ../ui/evolution-mail-message.xml.h:10 ../ui/evolution-mail-message.xml.h:9 +msgid "Compose a reply to the mailing list of the selected message" +msgstr "Soạn thÆ° trả lá»i cho há»™p thÆ° chung của thÆ° được chá»n" + +#: ../ui/evolution-mail-message.xml.h:11 ../ui/evolution-mail-message.xml.h:10 +msgid "Compose a reply to the sender of the selected message" +msgstr "Soạn thÆ° trả lá»i cho ngÆ°á»i gởi thÆ° được chá»n" + +#: ../ui/evolution-mail-message.xml.h:13 +msgid "Copy selected messages to another folder" +msgstr "Sao chép các thÆ° được chá»n sang thÆ° mục khác" + +#: ../ui/evolution-mail-message.xml.h:14 +msgid "Create R_ule" +msgstr "Tạo _quy tắc" + +#: ../ui/evolution-mail-message.xml.h:15 +msgid "Create a Search Folder for these recipients" +msgstr "Tạo thÆ° mục tìm kiếm cho những ngÆ°á»i nhận này" + +#: ../ui/evolution-mail-message.xml.h:16 +msgid "Create a Search Folder for this mailing list" +msgstr "Tạo thÆ° mục tìm kiếm cho há»™p thÆ° chung này" + +#: ../ui/evolution-mail-message.xml.h:17 +msgid "Create a Search Folder for this sender" +msgstr "Tạo thÆ° mục tìm kiếm cho ngÆ°á»i gởi này" + +#: ../ui/evolution-mail-message.xml.h:18 +msgid "Create a Search Folder for this subject" +msgstr "Tạo thÆ° mục tìm kiếm cho chủ Ä‘á» này" + +#: ../ui/evolution-mail-message.xml.h:19 +msgid "Create a rule to filter messages from this sender" +msgstr "Tạo quy tắc để lá»c má»i thÆ° từ ngÆ°á»i gởi này" + +#: ../ui/evolution-mail-message.xml.h:20 +msgid "Create a rule to filter messages to these recipients" +msgstr "Tạo quy tắc để lá»c má»i thÆ° được gởi cho những ngÆ°á»i nhận này" + +#: ../ui/evolution-mail-message.xml.h:21 +msgid "Create a rule to filter messages to this mailing list" +msgstr "Tạo quy tắc để lá»c má»i thÆ° được gởi cho há»™p thÆ° chung này" + +#: ../ui/evolution-mail-message.xml.h:22 +msgid "Create a rule to filter messages with this subject" +msgstr "Tạo quy tắc để lá»c má»i thÆ° có chủ Ä‘á» này" + +#: ../ui/evolution-mail-message.xml.h:24 ../src/ephy-window.c:214 +msgid "Decrease the text size" +msgstr "Giảm cỡ chữ" + +#: ../ui/evolution-mail-message.xml.h:26 +msgid "Display the next important message" +msgstr "Hiển thị thÆ° quan trá»ng kế tiếp" + +#: ../ui/evolution-mail-message.xml.h:27 +msgid "Display the next message" +msgstr "Hiển thị thÆ° kế tiếp" + +#: ../ui/evolution-mail-message.xml.h:28 +msgid "Display the next unread message" +msgstr "Hiển thị thÆ° chÆ°a Ä‘á»c kế tiếp" + +#: ../ui/evolution-mail-message.xml.h:29 +msgid "Display the next unread thread" +msgstr "Hiển thị mạch chÆ°a Ä‘á»c kế tiếp" + +#: ../ui/evolution-mail-message.xml.h:30 +msgid "Display the previous important message" +msgstr "Hiển thị thÆ° quan trá»ng trÆ°á»›c đó" + +#: ../ui/evolution-mail-message.xml.h:31 +msgid "Display the previous message" +msgstr "Hiển thị thÆ° trÆ°á»›c đó" + +#: ../ui/evolution-mail-message.xml.h:32 +msgid "Display the previous unread message" +msgstr "Hiển thị thÆ° chÆ°a Ä‘á»c trÆ°á»›c đó" + +#: ../ui/evolution-mail-message.xml.h:33 +msgid "F_orward As..." +msgstr "_Chuyển tiếp dạng..." + +#: ../ui/evolution-mail-message.xml.h:34 ../ui/evolution-mail-message.xml.h:33 +msgid "Filter on Mailing _List..." +msgstr "Lá»c theo _há»™p thÆ° chung..." + +#: ../ui/evolution-mail-message.xml.h:35 ../ui/evolution-mail-message.xml.h:34 +msgid "Filter on Se_nder..." +msgstr "Lá»c theo NgÆ°á»i _gởi..." + +#: ../ui/evolution-mail-message.xml.h:36 ../ui/evolution-mail-message.xml.h:35 +msgid "Filter on _Recipients..." +msgstr "Lá»c theo _NgÆ°á»i nhận..." + +#: ../ui/evolution-mail-message.xml.h:37 ../ui/evolution-mail-message.xml.h:36 +msgid "Filter on _Subject..." +msgstr "Lá»c theo _Chủ Ä‘á»..." + +#: ../ui/evolution-mail-message.xml.h:38 ../ui/evolution-mail-message.xml.h:37 +msgid "Filter the selected messages for junk status" +msgstr "Lá»c các thÆ° được chá»n để quyết định trạng thái rác" + +#: ../ui/evolution-mail-message.xml.h:39 ../ui/evolution-mail-message.xml.h:38 +msgid "Flag selected message(s) for follow-up" +msgstr "Äặt cá» trên các thÆ° được chá»n để theo dõi tiếp" + +#: ../ui/evolution-mail-message.xml.h:40 ../ui/evolution-mail-message.xml.h:39 +msgid "Follow _Up..." +msgstr "Th_eo dõi tiếp..." + +#: ../ui/evolution-mail-message.xml.h:41 ../ui/evolution-mail-message.xml.h:40 +msgid "Force images in HTML mail to be loaded" +msgstr "Ép tải ảnh trong thÆ° HTML" + +#: ../ui/evolution-mail-message.xml.h:43 ../ui/evolution-mail-message.xml.h:42 +msgid "Forward the selected message in the body of a new message" +msgstr "Chuyển tiếp thÆ° được chá»n trong thân thÆ° má»›i" + +#: ../ui/evolution-mail-message.xml.h:44 ../ui/evolution-mail-message.xml.h:43 +msgid "Forward the selected message quoted like a reply" +msgstr "Chuyển tiếp thÆ° được chá»n được trích dẫn là trả lá»i" + +#: ../ui/evolution-mail-message.xml.h:45 ../ui/evolution-mail-message.xml.h:44 +msgid "Forward the selected message to someone" +msgstr "Chuyển tiếp thông Ä‘iệp được chá»n tá»›i ngÆ°á»i khác" + +#: ../ui/evolution-mail-message.xml.h:46 ../ui/evolution-mail-message.xml.h:45 +msgid "Forward the selected message to someone as an attachment" +msgstr "Chuyển tiếp thông Ä‘iệp được chá»n tá»›i ngÆ°á»i khác nhÆ° là đính kèm" + +#: ../ui/evolution-mail-message.xml.h:47 ../src/ephy-window.c:211 +#: ../ui/evolution-mail-message.xml.h:46 +msgid "Increase the text size" +msgstr "Tăng cỡ chữ" + +#: ../ui/evolution-mail-message.xml.h:49 ../ui/evolution-mail-message.xml.h:48 +msgid "Mar_k as" +msgstr "_Nhãn là" + +#: ../ui/evolution-mail-message.xml.h:50 ../ui/evolution-mail-message.xml.h:49 +msgid "Mark the selected message(s) as having been read" +msgstr "Äánh dấu các thÆ° được chá»n có đã Ä‘á»c" + +#: ../ui/evolution-mail-message.xml.h:51 ../ui/evolution-mail-message.xml.h:50 +msgid "Mark the selected message(s) as important" +msgstr "Äánh dấu cho các thÆ° được chá»n là quan trá»ng" + +#: ../ui/evolution-mail-message.xml.h:52 ../ui/evolution-mail-message.xml.h:51 +msgid "Mark the selected message(s) as junk" +msgstr "Äánh dấu các thÆ° được chá»n là rác" + +#: ../ui/evolution-mail-message.xml.h:53 ../ui/evolution-mail-message.xml.h:52 +msgid "Mark the selected message(s) as not being junk" +msgstr "Äánh dấu các thÆ° được chá»n không phải là rác" + +#: ../ui/evolution-mail-message.xml.h:54 ../ui/evolution-mail-message.xml.h:53 +msgid "Mark the selected message(s) as not having been read" +msgstr "Äánh dấu các thÆ° được chá»n có chÆ°a Ä‘á»c" + +#: ../ui/evolution-mail-message.xml.h:55 ../ui/evolution-mail-message.xml.h:54 +msgid "Mark the selected message(s) as unimportant" +msgstr "Äánh dấu các thÆ° được chá»n không phải là quan trá»ng" + +#: ../ui/evolution-mail-message.xml.h:56 ../ui/evolution-mail-message.xml.h:55 +msgid "Mark the selected messages for deletion" +msgstr "Äánh dấu các thÆ° được chá»n cần xoá bá»" + +#: ../ui/evolution-mail-message.xml.h:58 +msgid "Move selected message(s) to another folder" +msgstr "Di chuyển các thÆ° được chá»n sang thÆ° mục khác" + +#: ../ui/evolution-mail-message.xml.h:60 +msgid "Next _Important Message" +msgstr "ThÆ° _quan trá»ng kế" + +#: ../ui/evolution-mail-message.xml.h:61 +msgid "Next _Thread" +msgstr "_Mạch kế" + +#: ../ui/evolution-mail-message.xml.h:62 +msgid "Next _Unread Message" +msgstr "ThÆ° _chÆ°a Ä‘á»c kế" + +#: ../ui/evolution-mail-message.xml.h:63 +msgid "Not Junk" +msgstr "Không phải rác" + +#: ../ui/evolution-mail-message.xml.h:64 +msgid "Open a window for composing a mail message" +msgstr "Mở cá»­a sổ soạn thÆ°" + +#: ../ui/evolution-mail-message.xml.h:65 +msgid "Open the selected message in a new window" +msgstr "Mở thông Ä‘iệp được chá»n trong cá»­a sổ má»›i" + +#: ../ui/evolution-mail-message.xml.h:66 +msgid "Open the selected message in the composer for editing" +msgstr "Mở thông Ä‘iệp được chá»n trong bá»™ soạn thảo để hiệu chỉnh" + +#: ../ui/evolution-mail-message.xml.h:67 +msgid "P_revious Unread Message" +msgstr "ThÆ° chÆ°a Ä‘á»c t_rÆ°á»›c" + +#: ../ui/evolution-mail-message.xml.h:69 +msgid "Pos_t New Message to Folder" +msgstr "Gởi thÆ° má»›i _tá»›i thÆ° mục" + +#: ../ui/evolution-mail-message.xml.h:70 ../ui/evolution-mail-message.xml.h:69 +msgid "Post a Repl_y" +msgstr "Gởi t_rả lá»i" + +#: ../ui/evolution-mail-message.xml.h:71 ../ui/evolution-mail-global.xml.h:14 +msgid "Post a message to a Public folder" +msgstr "Gởi thÆ° tá»›i thÆ° mục Công cá»™ng" + +#: ../ui/evolution-mail-message.xml.h:72 +msgid "Post a reply to a message in a Public folder" +msgstr "Gởi trả lá»i thông Ä‘iệp trong thÆ° mục Công cá»™ng" + +#: ../ui/evolution-mail-message.xml.h:73 ../ui/evolution-mail-message.xml.h:71 +msgid "Pr_evious Important Message" +msgstr "ThÆ° quan trá»ng t_rÆ°á»›c" + +#: ../ui/evolution-mail-message.xml.h:74 ../ui/evolution-mail-message.xml.h:72 +msgid "Preview the message to be printed" +msgstr "Xem trÆ°á»›c thông Ä‘iệp cần in" + +#: ../ui/evolution-mail-message.xml.h:78 ../ui/evolution-mail-message.xml.h:76 +msgid "Print this message" +msgstr "In thÆ° này" + +#: ../ui/evolution-mail-message.xml.h:79 ../ui/evolution-mail-message.xml.h:77 +msgid "Re_direct" +msgstr "Chuyển _hÆ°á»›ng" + +#: ../ui/evolution-mail-message.xml.h:80 ../ui/evolution-mail-message.xml.h:78 +msgid "Redirect (bounce) the selected message to someone" +msgstr "Chuyển hÆ°á»›ng (bounce: nảy lên) thÆ° được chá»n tá»›i ngÆ°á»i khác" + +#: ../ui/evolution-mail-message.xml.h:85 ../ui/evolution-mail-message.xml.h:82 +msgid "Reset the text to its original size" +msgstr "Phục hồi kích thÆ°á»›c chữ gốc" + +#: ../ui/evolution-mail-message.xml.h:86 ../ui/evolution-mail-message.xml.h:83 +msgid "Save the message as a text file" +msgstr "LÆ°u thÆ° là tập tin văn bản" + +#: ../ui/evolution-mail-message.xml.h:87 ../ui/evolution-mail-message.xml.h:84 +msgid "Search Folder from Mailing _List..." +msgstr "ThÆ° mục tìm kiếm trên _Há»™p thÆ° chung..." + +#: ../ui/evolution-mail-message.xml.h:88 ../ui/evolution-mail-message.xml.h:85 +msgid "Search Folder from Recipients..." +msgstr "ThÆ° mục tìm kiếm trên _NgÆ°á»i nhận..." + +#: ../ui/evolution-mail-message.xml.h:89 ../ui/evolution-mail-message.xml.h:86 +msgid "Search Folder from S_ubject..." +msgstr "ThÆ° mục tìm kiếm trên _Chủ Ä‘á»..." + +#: ../ui/evolution-mail-message.xml.h:90 ../ui/evolution-mail-message.xml.h:87 +msgid "Search Folder from Sen_der..." +msgstr "ThÆ° mục tìm kiếm trên NgÆ°á»i _gởi..." + +#: ../ui/evolution-mail-message.xml.h:91 ../ui/evolution-mail-message.xml.h:88 +msgid "Search for text in the body of the displayed message" +msgstr "Tìm Ä‘oạn trong thân thÆ° đã hiển thị" + +#: ../ui/evolution-mail-message.xml.h:92 ../ui/evolution-mail-message.xml.h:89 +msgid "Select _All Text" +msgstr "Chá»n toàn bá»™ v_ăn bản" + +#: ../ui/evolution-mail-message.xml.h:93 ../ui/evolution-mail-message.xml.h:90 +msgid "Select all the text in a message" +msgstr "Chá»n má»i văn bản trong thÆ°" + +#: ../ui/evolution-mail-message.xml.h:94 ../ui/evolution-mail-message.xml.h:91 +msgid "Set up the page settings for your current printer" +msgstr "Thiết lập trang cho máy in hiện thá»i" + +#: ../ui/evolution-mail-message.xml.h:95 +msgid "Show a blinking cursor in the body of displayed messages" +msgstr "Hiển thị con chạy nháy trong phần thân các thÆ° đã hiển thị" + +#: ../ui/evolution-mail-message.xml.h:96 ../ui/evolution-mail-message.xml.h:93 +msgid "Show message in the normal style" +msgstr "Hiện thông Ä‘iệp theo cách bình thÆ°á»ng" + +#: ../ui/evolution-mail-message.xml.h:97 ../ui/evolution-mail-message.xml.h:94 +msgid "Show message with all email headers" +msgstr "Hiện thÆ° vá»›i má»i dòng đầu thÆ°" + +#: ../ui/evolution-mail-message.xml.h:98 ../ui/evolution-mail-message.xml.h:95 +msgid "Show the raw email source of the message" +msgstr "Hiện thÆ° thô, mã nguồn" + +#: ../ui/evolution-mail-message.xml.h:99 ../ui/evolution-mail-message.xml.h:96 +msgid "Un-delete the selected messages" +msgstr "Hủy xoá bá» những thÆ° được chá»n" + +#: ../ui/evolution-mail-message.xml.h:100 +#: ../ui/evolution-mail-message.xml.h:97 +msgid "Uni_mportant" +msgstr "_Không quan trá»ng" + +#: ../ui/evolution-mail-message.xml.h:102 +msgid "_Attached" +msgstr "Gởi _kèm" + +#: ../ui/evolution-mail-message.xml.h:103 +#: ../ui/evolution-mail-message.xml.h:99 +msgid "_Caret Mode" +msgstr "Chế Ä‘á»™n con _nháy" + +#: ../ui/evolution-mail-message.xml.h:106 +#: ../ui/evolution-mail-message.xml.h:102 +msgid "_Delete Message" +msgstr "_Xoá bá» thÆ°" + +#: ../ui/evolution-mail-message.xml.h:108 +#: ../ui/evolution-mail-message.xml.h:104 +msgid "_Find in Message..." +msgstr "_Tìm trong thÆ°" + +#: ../ui/evolution-mail-message.xml.h:110 +#: ../ui/evolution-mail-message.xml.h:106 +msgid "_Go To" +msgstr "Ä_i tá»›i" + +#: ../ui/evolution-mail-message.xml.h:111 +#: ../ui/evolution-mail-message.xml.h:107 +msgid "_Important" +msgstr "_Quan trá»ng" + +#: ../ui/evolution-mail-message.xml.h:112 +msgid "_Inline" +msgstr "Trá»±c t_iếp" + +#: ../ui/evolution-mail-message.xml.h:113 +#: ../ui/evolution-mail-message.xml.h:108 +msgid "_Junk" +msgstr "_Rác" + +#: ../ui/evolution-mail-message.xml.h:114 +#: ../ui/evolution-mail-message.xml.h:109 +msgid "_Load Images" +msgstr "Tải ả_nh" + +#: ../ui/evolution-mail-message.xml.h:118 ../gtk/gtkstock.c:413 +msgid "_Normal Size" +msgstr "_Cỡ thÆ°á»ng" + +#: ../ui/evolution-mail-message.xml.h:119 +#: ../ui/evolution-mail-message.xml.h:114 +msgid "_Not Junk" +msgstr "Không _phải rác" + +#: ../ui/evolution-mail-message.xml.h:120 +#: ../ui/evolution-mail-message.xml.h:115 +msgid "_Open in New Window" +msgstr "Mở trong cá»­a sổ _má»›i" + +#: ../ui/evolution-mail-message.xml.h:123 +msgid "_Quoted" +msgstr "Trích _dẫn" + +#: ../ui/evolution-mail-message.xml.h:126 +#: ../ui/evolution-mail-message.xml.h:120 +msgid "_Save Message..." +msgstr "_LÆ°u thÆ°..." + +#: ../ui/evolution-mail-message.xml.h:127 +#: ../ui/evolution-mail-message.xml.h:121 +msgid "_Undelete Message" +msgstr "_Hủy xoá bá» thÆ°" + +#: ../ui/evolution-mail-message.xml.h:129 ../src/planner-gantt-view.c:164 +msgid "_Zoom In" +msgstr "_Phóng to" + +#: ../src/ggv-ui.xml.h:3 ../src/widgets/cria-main-window.c:1576 +msgid "Close this window" +msgstr "Äóng cá»­a sổ này" + +#: ../ui/evolution-mail-messagedisplay.xml.h:3 ../ui/evolution.xml.h:16 +#: ../ui/evolution.xml.h:17 ../gedit/gedit-ui.xml.h:24 +msgid "Main toolbar" +msgstr "Thanh công cụ chính" + +#: ../ui/evolution-memos.xml.h:3 +msgid "Copy selected memo" +msgstr "Chép ghi nhá»› đã chá»n" + +#: ../ui/evolution-memos.xml.h:5 +msgid "Cut selected memo" +msgstr "Cắt ghi nhá»› đã chá»n" + +#: ../ui/evolution-memos.xml.h:7 +msgid "Delete selected memos" +msgstr "Xoá bá» các ghi nhá»› đã chá»n" + +#: ../ui/evolution-memos.xml.h:9 +msgid "Paste memo from the clipboard" +msgstr "Dán ghi nhá»› từ bảng tạm" + +#: ../ui/evolution-memos.xml.h:10 +msgid "Previews the list of memos to be printed" +msgstr "Xem thá»­ danh sách các ghi nhá»› cần in" + +#: ../ui/evolution-memos.xml.h:13 +msgid "Print the list of memos" +msgstr "In danh sách các ghi nhá»›" + +#: ../ui/evolution-memos.xml.h:14 +msgid "View the selected memo" +msgstr "Xem ghi nhá»› đã chá»n" + +#: ../ui/evolution-memos.xml.h:18 +msgid "_Open Memo" +msgstr "_Mở ghi nhá»›" + +#: ../ui/evolution-message-composer.xml.h:2 +msgid "Attach a file" +msgstr "Äính kèm tập tin" + +#: ../ui/evolution-signature-editor.xml.h:13 +msgid "Close the current file" +msgstr "Äóng tập tin hiện thá»i" + +#: ../ui/evolution-message-composer.xml.h:5 +#: ../ui/evolution-message-composer.xml.h:6 +msgid "Delete all but signature" +msgstr "Xoá bá» toàn thứ trừ chữ ký" + +#: ../ui/evolution-message-composer.xml.h:6 +#: ../ui/evolution-message-composer.xml.h:7 +msgid "Encrypt this message with PGP" +msgstr "Mật mã hóa thÆ° này, dùng PGP" + +#: ../ui/evolution-message-composer.xml.h:7 +#: ../ui/evolution-message-composer.xml.h:8 +msgid "Encrypt this message with your S/MIME Encryption Certificate" +msgstr "Mật mã hoá thÆ° này, dùng Chứng nhận Mật mã hóa S/MIME của bạn" + +#: ../src/widgets/cria-main-window.c:1581 +msgid "For_mat" +msgstr "_Äịnh dạng" + +#: ../ui/evolution-message-composer.xml.h:9 +msgid "Get delivery notification when your message is read" +msgstr "Chá»n để nhận thông báo khi ngÆ°á»i nhận đã Ä‘á»c thÆ° bạn" + +#: ../ui/evolution-message-composer.xml.h:10 +msgid "HT_ML" +msgstr "HT_ML" + +#: ui/galeon-ui.xml.in.h:94 ../libgnomeui/gnome-app-helper.c:109 +msgid "Open a file" +msgstr "Mở tập tin" + +#: ../ui/evolution-message-composer.xml.h:13 +msgid "PGP Encrypt" +msgstr "Mật mã hóa PGP" + +#: ../ui/evolution-message-composer.xml.h:14 +msgid "PGP Sign" +msgstr "Chữ ký PGP" + +#: ../ui/evolution-message-composer.xml.h:15 +msgid "R_equest Read Receipt" +msgstr "_Yêu cầu thông báo đã Ä‘á»c" + +#: ../ui/evolution-message-composer.xml.h:16 +msgid "S/MIME Encrypt" +msgstr "Mật mã hoá S/MIME" + +#: ../ui/evolution-message-composer.xml.h:17 +msgid "S/MIME Sign" +msgstr "Chữ ký S/MIME" + +#: ui/galeon-ui.xml.in.h:120 +msgid "Save As" +msgstr "LÆ°u dạng" + +#: ../ui/evolution-message-composer.xml.h:20 +msgid "Save Draft" +msgstr "LÆ°u nháp" + +#: ../libgnomeui/gnome-app-helper.c:119 ../app/actions/file-actions.c:91 +msgid "Save _As..." +msgstr "LÆ°u _dạng..." + +#: ../ui/evolution-message-composer.xml.h:22 ../src/main.c:623 +msgid "Save _Draft" +msgstr "LÆ°u _nháp" + +#: ../ui/evolution-message-composer.xml.h:23 +msgid "Save as draft" +msgstr "LÆ°u dạng nháp" + +#: ../ui/evolution-message-composer.xml.h:24 +msgid "Save in folder..." +msgstr "LÆ°u vào thÆ° mục..." + +#: ../ui/evolution-message-composer.xml.h:25 +msgid "Save the current file" +msgstr "LÆ°u tập tin hiện thá»i" + +#: ../ui/evolution-message-composer.xml.h:26 +msgid "Save the current file with a different name" +msgstr "LÆ°u tập tin hiện thá»i vá»›i tên khác" + +#: ../ui/evolution-message-composer.xml.h:27 +msgid "Save the message in a specified folder" +msgstr "LÆ°u thông Ä‘iệp vào thÆ° mục xác định" + +#: ../ui/evolution-signature-editor.xml.h:11 +msgid "Send the mail in HTML format" +msgstr "Gởi thông Ä‘iệp vá»›i dạng thức HTML" + +#: ../ui/evolution-message-composer.xml.h:31 +msgid "Set the message priority to high" +msgstr "Äặt Æ°u tiên thÆ° là cao" + +#: ../ui/evolution-message-composer.xml.h:32 +#: ../ui/evolution-message-composer.xml.h:31 +msgid "Sign this message with your PGP key" +msgstr "Ký tên vào thÆ° này, dùng khoá PGP của bạn" + +#: ../ui/evolution-message-composer.xml.h:33 +#: ../ui/evolution-message-composer.xml.h:32 +msgid "Sign this message with your S/MIME Signature Certificate" +msgstr "Ký tên vào thÆ° này, dùng Chứng nhận Chữ ký S/MIME của bạn" + +#: ../ui/evolution-message-composer.xml.h:34 +#: ../ui/evolution-message-composer.xml.h:33 +msgid "Toggles whether the BCC field is displayed" +msgstr "Bật tắt hiển thị trÆ°á»ng Bí mật Chép Cho (BCC)" + +#: ../ui/evolution-message-composer.xml.h:35 +#: ../ui/evolution-message-composer.xml.h:34 +msgid "Toggles whether the CC field is displayed" +msgstr "Bật tắt hiển thị trÆ°á»ng Chép Cho (CC)" + +#: ../ui/evolution-message-composer.xml.h:36 +#: ../ui/evolution-message-composer.xml.h:35 +msgid "Toggles whether the From chooser is displayed" +msgstr "Bật tắt hiển thị bá»™ chá»n From (Từ)" + +#: ../ui/evolution-message-composer.xml.h:37 +#: ../ui/evolution-message-composer.xml.h:36 +msgid "Toggles whether the Post-To field is displayed" +msgstr "Bật tắt hiển thị trÆ°á»ng Post-To (Gởi cho nhóm tin tức)" + +#: ../ui/evolution-message-composer.xml.h:38 +#: ../ui/evolution-message-composer.xml.h:37 +msgid "Toggles whether the Reply-To field is displayed" +msgstr "Bật tắt hiển thị trÆ°á»ng Reply-To (Trả lá»i)" + +#: ../ui/evolution-message-composer.xml.h:39 +#: ../ui/evolution-message-composer.xml.h:38 +msgid "Toggles whether the To field is displayed" +msgstr "Bật tắt hiển thị trÆ°á»ng To (Cho)" + +#: ../ui/evolution-message-composer.xml.h:41 +#: ../ui/evolution-message-composer.xml.h:40 +msgid "_Bcc Field" +msgstr "TrÆ°á»ng _BCC" + +#: ../ui/evolution-message-composer.xml.h:42 +#: ../ui/evolution-message-composer.xml.h:41 +msgid "_Cc Field" +msgstr "TrÆ°á»ng _CC" + +#: ../ui/evolution-message-composer.xml.h:44 +#: ../ui/evolution-message-composer.xml.h:43 +msgid "_Delete all" +msgstr "_Xoá bá» tất cả" + +#: ../ui/evolution-message-composer.xml.h:47 +#: ../ui/evolution-message-composer.xml.h:46 +msgid "_From Field" +msgstr "TrÆ°á»ng _From (Từ)" + +#: ../app/actions/file-actions.c:71 ../src/Actions.cs:52 app/menubar.c:413 +msgid "_Open..." +msgstr "_Mở..." + +#: ../ui/evolution-message-composer.xml.h:50 +#: ../ui/evolution-message-composer.xml.h:49 +msgid "_Post-To Field" +msgstr "TrÆ°á»ng _Post-To (Gởi cho nhóm tin tức)" + +#: ../ui/evolution-message-composer.xml.h:51 +msgid "_Prioritise Message" +msgstr "_Ưu tiên hóa thÆ°" + +#: ../ui/evolution-message-composer.xml.h:52 +#: ../ui/evolution-message-composer.xml.h:50 +msgid "_Reply-To Field" +msgstr "TrÆ°á»ng _Reply-To (Trả lá»i)" + +#: ../ui/evolution-message-composer.xml.h:54 +#: ../ui/evolution-message-composer.xml.h:52 +msgid "_Security" +msgstr "_Bảo mật" + +#: ../ui/evolution-message-composer.xml.h:55 +#: ../ui/evolution-message-composer.xml.h:53 +msgid "_To Field" +msgstr "TrÆ°á»ng To (Cho)" + +#: ../ui/evolution-signature-editor.xml.h:1 +msgid "C_lose" +msgstr "Äón_g" + +#: ../ui/evolution-signature-editor.xml.h:15 +#: ../ui/evolution-signature-editor.xml.h:5 +msgid "H_TML" +msgstr "H_TML" + +#: ../ui/evolution-signature-editor.xml.h:16 +#: ../ui/evolution-signature-editor.xml.h:7 +msgid "Save and Close" +msgstr "LÆ°u và Äóng" + +#: ../ui/evolution-signature-editor.xml.h:20 +msgid "Save and _Close" +msgstr "LÆ°u và Äón_g" + +#: ../ui/evolution-signature-editor.xml.h:21 +#: ../ui/evolution-signature-editor.xml.h:10 +msgid "Save the current file and close the window" +msgstr "LÆ°u tập tin hiện thá»i và đóng cá»­a sổ" + +#: ../ui/evolution-subscribe.xml.h:1 +msgid "Add folder to your list of subscribed folders" +msgstr "Thêm thÆ° mục vào danh sách những thÆ° mục đăng ký" + +#: ../ui/evolution-subscribe.xml.h:3 +msgid "Refresh List" +msgstr "Cập nhật danh sách" + +#: ../ui/evolution-subscribe.xml.h:4 +msgid "Refresh List of Folders" +msgstr "Cập nhật danh sách các thÆ° mục" + +#: ../ui/evolution-subscribe.xml.h:5 +msgid "Remove folder from your list of subscribed folders" +msgstr "Gỡ bá» thÆ° mục khá»i danh sách các thÆ° mục đã đăng ký" + +#: ../ui/evolution-subscribe.xml.h:7 ../extensions/rss/rss-ui.c:591 +#: ../glade/straw.glade.h:60 ../src/lib/subscribe.py:173 +msgid "Subscribe" +msgstr "Äăng ký" + +#: ../ui/evolution-subscribe.xml.h:8 +msgid "Unsubscribe" +msgstr "BỠđăng ký" + +#: ../ui/evolution-subscribe.xml.h:12 +msgid "_Invert Selection" +msgstr "_Äảo vùng chá»n" + +#: ../ui/evolution-task-editor.xml.h:4 +msgid "Click change / view the status details of the task" +msgstr "Nhấn để thay đổi/xem chi tiết trạng thái của tác vụ" + +#: ../ui/evolution-task-editor.xml.h:10 +msgid "Status Details" +msgstr "Chi tiết trạng thái" + +#: ../ui/evolution-task-editor.xml.h:11 +msgid "Time Zone" +msgstr "Múi giá»" + +#: ../ui/evolution-task-editor.xml.h:23 +msgid "_Status Details" +msgstr "_Chi tiết trạng thái" + +#: ../ui/evolution-tasks.xml.h:3 +msgid "Copy selected task" +msgstr "Chép tác vụ đã chá»n" + +#: ../ui/evolution-tasks.xml.h:5 +msgid "Cut selected task" +msgstr "Cắt tác vụ đã chá»n" + +#: ../ui/evolution-tasks.xml.h:7 +msgid "Delete completed tasks" +msgstr "Xoá bá» má»i tác vụ hoàn tất" + +#: ../ui/evolution-tasks.xml.h:8 +msgid "Delete selected tasks" +msgstr "Xoá bá» các tác vụ được chá»n" + +#: ../ui/evolution-tasks.xml.h:9 +msgid "Mar_k as Complete" +msgstr "Nhãn _hoàn tất" + +#: ../ui/evolution-tasks.xml.h:10 +msgid "Mark selected tasks as complete" +msgstr "Äánh dấu các tác vụ được chá»n là hoàn tất" + +#: ../ui/evolution-tasks.xml.h:12 +msgid "Paste task from the clipboard" +msgstr "Dán tác vụ từ bảng tạm" + +#: ../ui/evolution-tasks.xml.h:13 +msgid "Previews the list of tasks to be printed" +msgstr "Xem thá»­ danh sách các tác vụ cần in" + +#: ../ui/evolution-tasks.xml.h:16 +msgid "Print the list of tasks" +msgstr "In danh sách các tác vụ" + +#: ../ui/evolution-tasks.xml.h:18 +msgid "Show task preview window" +msgstr "Hiện khung xem thá»­ tác vụ" + +#: ../ui/evolution-tasks.xml.h:19 +msgid "Task _Preview" +msgstr "_Xem thá»­ tác vụ" + +#: ../ui/evolution-tasks.xml.h:20 ../ui/evolution-tasks.xml.h:18 +msgid "View the selected task" +msgstr "Xem tác vụ được chá»n" + +#: ../ui/evolution-tasks.xml.h:27 ../ui/evolution-tasks.xml.h:25 +msgid "_Open Task" +msgstr "_Mở tác vụ" + +#: ../ui/evolution.xml.h:1 +msgid "About Evolution..." +msgstr "Giá»›i thiệu vá» Evolution..." + +#: ../ui/evolution.xml.h:2 +msgid "Change Evolution's settings" +msgstr "Äổi thiết lập Evolution" + +#: ../ui/evolution.xml.h:3 +msgid "Change the visibility of the toolbar" +msgstr "Hiện/Ẩn thanh công cụ" + +#: ../ui/evolution.xml.h:5 +msgid "Create a new window displaying this folder" +msgstr "Tạo cá»­a sổ má»›i hiển thị thÆ° mục này" + +#: ../ui/evolution.xml.h:6 +msgid "Display window buttons using the desktop toolbar setting" +msgstr "" +"Hiển thị má»i cái nút cá»­a sổ dùng thiết lập thanh công cụ của màn hình ná»n" + +#: ../ui/evolution.xml.h:7 +msgid "Display window buttons with icons and text" +msgstr "Hiển thị má»i cái nút cá»­a sổ dùng ảnh và chữ Ä‘á»u" + +#: ../ui/evolution.xml.h:8 +msgid "Display window buttons with icons only" +msgstr "Hiển thị má»i cái nút cá»­a sổ dùng chỉ ảnh thôi" + +#: ../ui/evolution.xml.h:9 +msgid "Display window buttons with text only" +msgstr "Hiển thị má»i cái nút cá»­a sổ dùng chỉ chữ thôi" + +#: ../ui/evolution.xml.h:10 src/ui.cc:1450 src/ui.cc:1385 +#: ../src/ghex-ui.xml.h:20 +msgid "Exit the program" +msgstr "Thoát khá»i chÆ°Æ¡ng trình" + +#: ../ui/evolution.xml.h:11 ../ui/evolution.xml.h:12 +msgid "Forget _Passwords" +msgstr "Quên các _mật khẩu" + +#: ../ui/evolution.xml.h:12 ../ui/evolution.xml.h:13 +msgid "Forget remembered passwords so you will be prompted for them again" +msgstr "" +"Quên Ä‘i các mật khẩu đã nhá»›, nhÆ° vậy bạn sẽ lại được nhắc nhập mật khẩu" + +#: ../ui/evolution.xml.h:13 ../ui/evolution.xml.h:14 +msgid "Hide window buttons" +msgstr "Ẩn má»i nút cá»­a sổ" + +#: ../ui/evolution.xml.h:14 ../ui/evolution.xml.h:15 +msgid "Icons _and text" +msgstr "Ảnh _và chữ" + +#: ../ui/evolution.xml.h:15 ../ui/evolution.xml.h:16 +msgid "Import data from other programs" +msgstr "Nhập dữ liệu từ chÆ°Æ¡ng trình khác" + +#: ../ui/evolution.xml.h:17 +msgid "New _Window" +msgstr "Cá»­a sổ má»›_i" + +#: ../ui/evolution.xml.h:18 ../shell/rb-shell.c:386 +msgid "Prefere_nces" +msgstr "Tù_y thích" + +#: ../ui/evolution.xml.h:19 ../ui/evolution.xml.h:20 +msgid "Send / Receive" +msgstr "Gởi / Nhận" + +#: ../ui/evolution.xml.h:20 +msgid "Send / _Receive" +msgstr "Gởi / _Nhận" + +#: ../ui/evolution.xml.h:21 +msgid "Send queued items and retrieve new items" +msgstr "Gởi các mục Ä‘ang đợi gởi và nhận các mục má»›i" + +#: ../ui/evolution.xml.h:22 +msgid "Set up Pilot configuration" +msgstr "Thiết lập cấu hình Pilot" + +#: ../ui/evolution.xml.h:23 +msgid "Show information about Evolution" +msgstr "Hiện thông tin vá» Evolution" + +#: ../ui/evolution.xml.h:24 +msgid "Submit Bug Report" +msgstr "Gởi báo cáo lá»—i" + +#: ../ui/evolution.xml.h:29 ../ui/evolution.xml.h:25 +msgid "Submit _Bug Report" +msgstr "_Gởi báo cáo lá»—i" + +#: ../ui/evolution.xml.h:30 ../ui/evolution.xml.h:26 +msgid "Submit a bug report using Bug Buddy" +msgstr "Báo cáo lá»—i, dùng Bug Buddy" + +#: ../ui/evolution.xml.h:31 ../ui/evolution.xml.h:27 +msgid "Toggle whether we are working offline." +msgstr "Bật tắt hoạt Ä‘á»™ng ngoại tuyến" + +#: ../ui/evolution.xml.h:32 ../ui/evolution.xml.h:28 +msgid "Tool_bar" +msgstr "_Thanh công cụ" + +#: ../ui/evolution.xml.h:33 ../ui/evolution.xml.h:29 +msgid "Tool_bar style" +msgstr "_Kiểu thanh công cụ" + +#: ../ui/evolution.xml.h:34 +msgid "View/Hide the Status Bar" +msgstr "Xem/Ẩn thanh trạng thái" + +#: ../ui/evolution.xml.h:35 ../ui/evolution.xml.h:30 +msgid "_About Evolution..." +msgstr "_Giá»›i thiệu Evolution..." + +#: ../ui/evolution.xml.h:36 src/fe-gtk/menu.c:1828 +msgid "_Close Window" +msgstr "_Äóng cá»­a sổ" + +#: ../ui/evolution.xml.h:40 ../ui/evolution.xml.h:35 +msgid "_Hide buttons" +msgstr "Ẩ_n nút" + +#: ../ui/evolution.xml.h:41 ../ui/evolution.xml.h:36 +msgid "_Icons only" +msgstr "_Chỉ ảnh thôi" + +#: ../ui/evolution.xml.h:42 ../ui/evolution.xml.h:37 ../src/f-spot.glade.h:167 +#: src/interface.c:517 src/interface.c:168 +msgid "_Import..." +msgstr "_Nhập..." + +#: ../ui/evolution.xml.h:44 ../ui/evolution.xml.h:39 +msgid "_Quick Reference" +msgstr "_Tham khảo nhanh" + +#: ../ui/evolution.xml.h:46 ../ui/evolution.xml.h:42 +msgid "_Switcher Appearance" +msgstr "Hình thức bá»™ _chuyển đổi" + +#: ../ui/evolution.xml.h:47 ../ui/evolution.xml.h:43 +msgid "_Synchronization Options..." +msgstr "Tùy chá»n _đồng bá»™..." + +#: ../ui/evolution.xml.h:48 ../ui/evolution.xml.h:44 +msgid "_Text only" +msgstr "Chỉ _chữ thôi" + +#: ../ui/evolution.xml.h:50 +msgid "_View Status Bar" +msgstr "_Xem thanh trạng thái" + +#: ../ui/evolution.xml.h:51 src/fe-gtk/menu.c:1305 ../ui/evolution.xml.h:46 +#: ../gnopi/gnopi_files/Find/find.glade2.h:35 src/fe-gtk/menu.c:1438 +msgid "_Window" +msgstr "_Cá»­a sổ" + +#: ../views/addressbook/galview.xml.h:1 +msgid "By _Company" +msgstr "Theo Công t_y" + +#: ../views/addressbook/galview.xml.h:2 +msgid "_Address Cards" +msgstr "_Thẻ địa chỉ" + +#: ../views/addressbook/galview.xml.h:3 +msgid "_Phone List" +msgstr "Danh sách Ä‘iện th_oại" + +#: ../views/calendar/galview.xml.h:1 +msgid "W_eek View" +msgstr "Khung _tuần" + +#: ../views/calendar/galview.xml.h:2 +msgid "_Day View" +msgstr "Khung n_gày" + +#: ../views/calendar/galview.xml.h:3 +msgid "_List View" +msgstr "Khung xem _danh sách" + +#: ../views/calendar/galview.xml.h:4 +msgid "_Month View" +msgstr "Khung t_háng" + +#: ../views/calendar/galview.xml.h:5 +msgid "_Work Week View" +msgstr "Khung _tuần làm việc" + +#: ../views/mail/galview.xml.h:1 +msgid "As _Sent Folder" +msgstr "Theo _thÆ° mục gởi" + +#: ../views/mail/galview.xml.h:2 +msgid "By S_tatus" +msgstr "Theo t_rạng thái" + +#: ../views/mail/galview.xml.h:3 +msgid "By Se_nder" +msgstr "Theo ngÆ°á»i _gởi" + +#: ../views/mail/galview.xml.h:4 +msgid "By Su_bject" +msgstr "Theo _chủ Ä‘á»" + +#: ../views/mail/galview.xml.h:5 +msgid "By _Follow Up Flag" +msgstr "Theo _cá» theo dõi tiếp" + +#: ../views/mail/galview.xml.h:6 po/silky.glade.h:217 +#, fuzzy +msgid "_Messages" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"_ThÆ°\n" +"#-#-#-#-# silky-0.5.3pre1.vi.po (silky-0.5.3pre1) #-#-#-#-#\n" +"Tin _nhẳn" + +#: ../views/memos/galview.xml.h:1 +msgid "_Memos" +msgstr "Ghi _nhá»›" + +#: ../views/tasks/galview.xml.h:1 +msgid "With _Due Date" +msgstr "Vá»›i ngày đến _hạn" + +#: ../views/tasks/galview.xml.h:2 +msgid "With _Status" +msgstr "Vá»›i _trạng thái" + +#: ../Sensors/Clock/__init__.py:48 ../src/util.c:301 +msgid "UTC" +msgstr "Giá» thế giá»›i" + +#: ../widgets/e-timezone-dialog/e-timezone-dialog.glade.h:2 +msgid "Time Zones" +msgstr "Múi giá»" + +#: ../widgets/e-timezone-dialog/e-timezone-dialog.glade.h:3 +msgid "_Selection" +msgstr "Äiá»u _chá»n" + +#: ../widgets/e-timezone-dialog/e-timezone-dialog.glade.h:5 +msgid "Select a Time Zone" +msgstr "Chá»n múi giá»" + +#: ../widgets/e-timezone-dialog/e-timezone-dialog.glade.h:6 +msgid "TimeZone Combobox" +msgstr "Há»™p tổ hợp múi giá»" + +#: ../widgets/e-timezone-dialog/e-timezone-dialog.glade.h:7 +msgid "" +"Use the left mouse button to zoom in on an area of the map and select a time " +"zone.\n" +"Use the right mouse button to zoom out." +msgstr "" +"Hãy dùng nút chuá»™t trái để phóng to vùng trên bản đồ và chá»n múi giá».\n" +"Dùng nút chuá»™t phải để thu nhá»." + +#: gal/menus/gal-define-views-model.c:185 +msgid "Collection" +msgstr "Tập hợp" + +#: gal/menus/gal-define-views.glade.h:4 +#, no-c-format +msgid "Define Views for %s" +msgstr "Äịnh nghÄ©a khung xem cho « %s »" + +#: gal/menus/gal-define-views-dialog.c:322 +msgid "Define Views" +msgstr "Äịnh nghÄ©a khung xem" + +#: gal/menus/gal-define-views.glade.h:2 +#, no-c-format +msgid "Define Views for \"%s\"" +msgstr "Äịnh nghÄ©a khung xem cho « %s »" + +#: ../providers/odbc/gda-odbc-provider.c:1271 ../widgets/gtk+.xml.in.h:178 +#: ../src/form-editor/widget-util.cc:224 ../src/orca/rolenames.py:423 +msgid "Table" +msgstr "Bảng" + +#: ../widgets/menus/gal-view-instance-save-as-dialog.c:182 +#: gal/menus/gal-view-instance-save-as-dialog.c:183 +msgid "Instance" +msgstr "tức thá»i" + +#: ../widgets/menus/gal-view-instance-save-as-dialog.c:232 +#: gal/menus/gal-view-instance-save-as-dialog.c:229 +msgid "Save Current View" +msgstr "LÆ°u khung xem hiện có" + +#: ../widgets/menus/gal-view-instance-save-as-dialog.glade.h:3 +#: gal/menus/gal-view-instance-save-as-dialog.glade.h:3 +msgid "_Create new view" +msgstr "Tạo khung xem _má»›i" + +#: ../widgets/menus/gal-view-instance-save-as-dialog.glade.h:5 +#: gal/menus/gal-view-instance-save-as-dialog.glade.h:5 +msgid "_Replace existing view" +msgstr "Tha_y thế khung xem đã có" + +#: ../widgets/menus/gal-view-instance.c:585 gal/menus/gal-view-instance.c:574 +msgid "Custom View" +msgstr "Khung xem tá»± chá»n" + +#: ../widgets/menus/gal-view-instance.c:587 ../calendar/gui/gnome-cal.c:2210 +#: ../widgets/menus/gal-view-instance.c:586 gal/menus/gal-view-instance.c:575 +msgid "Save Custom View" +msgstr "LÆ°u khung xem tá»± chá»n" + +#: ../widgets/menus/gal-view-instance.c:590 gal/menus/gal-view-instance.c:579 +msgid "Define Views..." +msgstr "Äịnh nghÄ©a khung xem" + +#: ../widgets/menus/gal-view-menus.c:359 +msgid "Save Custom View..." +msgstr "LÆ°u khung xem tá»± chá»n..." + +#: ../widgets/menus/gal-view-new-dialog.c:80 +#: gal/menus/gal-view-new-dialog.c:77 ../sheets/IsometricMap.sheet.in.h:14 +msgid "Factory" +msgstr "Bá»™ tạo" + +#: ../widgets/menus/gal-view-new-dialog.c:115 +#: gal/menus/gal-view-new-dialog.c:108 +msgid "Define New View" +msgstr "Äịnh nghÄ©a khung xem má»›i" + +#: ../widgets/menus/gal-view-new-dialog.glade.h:2 +#: gal/menus/gal-view-new-dialog.glade.h:2 +msgid "Name of new view:" +msgstr "Tên khung xem má»›i:" + +#: ../widgets/menus/gal-view-new-dialog.glade.h:3 +msgid "Type of View" +msgstr "Loại khung xem" + +#: ../widgets/menus/gal-view-new-dialog.glade.h:4 +#: gal/menus/gal-view-new-dialog.glade.h:3 +msgid "Type of view:" +msgstr "Loại khung xem:" + +#. Translators: These are the first characters of each day of the +#. week, 'M' for 'Monday', 'T' for Tuesday etc. +#: ../widgets/misc/e-calendar-item.c:415 +msgid "MTWTFSS" +msgstr "HBTNSBC" + +#: ../widgets/misc/e-calendar-item.c:1099 ../gncal/calendar-month.c:204 +msgid "%B %Y" +msgstr "%B %Y" + +#: ../widgets/misc/e-calendar.c:177 ../widgets/misc/e-calendar.c:201 +msgid "Previous Button" +msgstr "Nút trÆ°á»›c" + +#: ../widgets/misc/e-calendar.c:226 +msgid "Month Calendar" +msgstr "Lịch tháng" + +#: ../lib/properties.h:503 ../lib/properties.c:75 ../lib/properties.h:499 +#: ../lib/properties.h:502 +msgid "Fill color" +msgstr "Màu tô" + +#: gal/e-text/e-text.c:3531 gal/e-text/e-text.c:3538 gal/e-text/e-text.c:3539 +msgid "GDK fill color" +msgstr "Màu tô đầy GDK" + +#: gal/e-text/e-text.c:3546 +msgid "Fill stipple" +msgstr "Tô đầy dùng thuật vẽ bằng chấm" + +#: ../widgets/misc/e-canvas-background.c:484 +#: ../widgets/misc/e-canvas-background.c:485 +msgid "X1" +msgstr "X1" + +#: ../widgets/misc/e-canvas-background.c:491 +#: ../widgets/misc/e-canvas-background.c:492 +msgid "X2" +msgstr "X2" + +#: ../widgets/misc/e-canvas-background.c:498 +#: ../widgets/misc/e-canvas-background.c:499 +msgid "Y1" +msgstr "Y1" + +#: ../widgets/misc/e-canvas-background.c:505 +#: ../widgets/misc/e-canvas-background.c:506 +msgid "Y2" +msgstr "Y2" + +#: gal/e-table/e-table-group-container.c:983 +msgid "Minimum width" +msgstr "Äá»™ rá»™ng tối thiểu" + +#: gal/e-table/e-table-group-container.c:984 ../gtk/gtktreeviewcolumn.c:279 +msgid "Minimum Width" +msgstr "Äá»™ rá»™ng tối thiểu" + +#: ../gtk/gtktreeviewcolumn.c:251 ../app/widgets/gimpgrideditor.c:242 +#: ../app/dia-props.c:136 +msgid "Spacing" +msgstr "Khoảng cách" + +#: ../widgets/misc/e-cell-date-edit.c:233 ../widgets/misc/e-dateedit.c:460 +#: ../widgets/misc/e-cell-date-edit.c:232 ../widgets/misc/e-dateedit.c:451 +#: ../src/personal_info.c:269 +msgid "Now" +msgstr "Bây giá»" + +#: ../widgets/misc/e-cell-date-edit.c:802 +#: ../widgets/misc/e-cell-date-edit.c:801 +#, c-format +msgid "The time must be in the format: %s" +msgstr "Thá»i gian phải theo dạng thức: %s" + +#: ../widgets/misc/e-cell-percent.c:76 +msgid "The percent value must be between 0 and 100, inclusive" +msgstr "Giá trị phần trăm phải nằm giữa 0 và 100, kể cả hai số đó" + +#: ../widgets/misc/e-charset-picker.c:63 ../pan/pan-charset-picker.c:38 +#: ../pan/pan-charset-picker.c:39 ../pan/pan-charset-picker.c:40 +msgid "Baltic" +msgstr "Ban-tích" + +#: ../pan/pan-charset-picker.c:42 +msgid "Central European" +msgstr "Trung Âu" + +#: ../src/util.c:394 +msgid "Chinese" +msgstr "Trung Quốc" + +#: ../pan/pan-charset-picker.c:46 ../pan/pan-charset-picker.c:47 +msgid "Cyrillic" +msgstr "Ki-rin" + +#: ../pan/pan-charset-picker.c:49 +msgid "Greek" +msgstr "Hy-lạp" + +#: ../src/languages.c:153 ../src/util.c:408 +msgid "Hebrew" +msgstr "Do-thái" + +#: ../pan/pan-charset-picker.c:50 +msgid "Japanese" +msgstr "Nhật-bản" + +#: ../pan/pan-charset-picker.c:51 +msgid "Korean" +msgstr "Hàn Quốc" + +#: ../src/languages.c:297 ../src/util.c:436 ../pan/pan-charset-picker.c:52 +msgid "Turkish" +msgstr "Thổ-nhÄ©-kỳ" + +#: src/galeon-prefs-dialog.c:647 +msgid "Unicode" +msgstr "Unicode" + +#: ../widgets/misc/e-charset-picker.c:73 +msgid "Western European" +msgstr "Tây Âu" + +#: ../widgets/misc/e-charset-picker.c:74 +msgid "Western European, New" +msgstr "Tây Âu, Má»›i" + +#: ../desktop-themes/Traditional/index.theme.in.h:1 +msgid "Traditional" +msgstr "Truyá»n thống" + +#: ../widgets/misc/e-charset-picker.c:94 ../widgets/misc/e-charset-picker.c:95 +#: ../widgets/misc/e-charset-picker.c:96 ../widgets/misc/e-charset-picker.c:97 +msgid "Simplified" +msgstr "ÄÆ¡n giản" + +#: ../widgets/misc/e-charset-picker.c:100 ../src/languages.c:301 +#: ../src/util.c:437 +msgid "Ukrainian" +msgstr "U-cợ-rainh" + +#: ../widgets/misc/e-charset-picker.c:103 +msgid "Visual" +msgstr "Trá»±c quan" + +#: ../widgets/misc/e-charset-picker.c:171 +#, c-format +msgid "Unknown character set: %s" +msgstr "Bá»™ ký tá»± lạ: %s" + +#: ../widgets/misc/e-charset-picker.c:216 +#: ../widgets/misc/e-charset-picker.c:479 +msgid "Character Encoding" +msgstr "Mã ký tá»±" + +#: ../widgets/misc/e-charset-picker.c:231 +msgid "Enter the character set to use" +msgstr "Hãy gõ bá»™ ký tá»± cần dùng" + +#: ../src/f-spot.glade.h:4 +msgid "..." +msgstr "..." + +#: ../widgets/misc/e-dateedit.c:320 ../widgets/misc/e-dateedit.c:316 +msgid "Date and Time Entry" +msgstr "Nhập Ngày và Giá»" + +#: ../widgets/misc/e-dateedit.c:339 ../widgets/misc/e-dateedit.c:335 +msgid "Text entry to input date" +msgstr "TrÆ°á»ng nhập để gõ ngày" + +#: ../widgets/misc/e-dateedit.c:340 ../widgets/misc/e-dateedit.c:336 +msgid "Text Date Entry" +msgstr "Chá»— gõ ngày" + +#: ../widgets/misc/e-dateedit.c:361 ../widgets/misc/e-dateedit.c:353 +msgid "Click this button to show a calendar" +msgstr "Nhắp vào cái nút này để hiển thị má»™t lịch" + +#: ../widgets/misc/e-dateedit.c:362 ../widgets/misc/e-dateedit.c:354 +msgid "Date Button" +msgstr "Nút ngày" + +#: ../widgets/misc/e-dateedit.c:383 ../widgets/misc/e-dateedit.c:374 +msgid "Combo box to select time" +msgstr "Há»™p tổ hợp để chá»n giá»" + +#: ../widgets/misc/e-dateedit.c:384 ../widgets/misc/e-dateedit.c:375 +msgid "Time Combo Box" +msgstr "Há»™p tổ hợp giá»" + +#: ../gncal/utils-time.c:48 +msgid "%H:%M" +msgstr "%H:%M" + +#: ../applets/clock/clock.c:1725 +msgid "%I:%M %p" +msgstr "%I:%M %p" + +#: ../widgets/misc/e-expander.c:181 ../gtk/gtkexpander.c:198 +msgid "Expanded" +msgstr "Äã mở rá»™ng" + +#: ../widgets/misc/e-expander.c:182 +msgid "Whether or not the expander is expanded" +msgstr "Có bung mÅ©i tên bung hay không" + +#: ../widgets/misc/e-expander.c:190 +msgid "Text of the expander's label" +msgstr "Chữ trong nhãn mÅ©i tên bung" + +#: ../widgets/misc/e-expander.c:197 +msgid "Use underline" +msgstr "Dùng gạch chân" + +#: ../widgets/misc/e-expander.c:198 +msgid "" +"If set, an underline in the text indicates the next character should be used " +"for the mnemonic accelerator key" +msgstr "Nếu lập thì gạch chân ngú ý ký tá»± sau nó là phím tắt." + +#: ../glade/gbwidgets/gbexpander.c:58 +msgid "Space to put between the label and the child" +msgstr "Số Ä‘iểm ảnh giữa nhãn và con." + +#: ../gtk/gtkframe.c:170 ../gtk/gtktoolbutton.c:201 +msgid "Label widget" +msgstr "Ô Ä‘iá»u khiển nhãn" + +#: ../widgets/misc/e-expander.c:216 +msgid "A widget to display in place of the usual expander label" +msgstr "Má»™t ô Ä‘iá»u khiển để hiển thị trong chá»— nhãn của mÅ©i tên bung thÆ°á»ng" + +#: ../gtk/gtktreeview.c:716 +msgid "Expander Size" +msgstr "Cỡ mÅ©i tên bung" + +#: ../gtk/gtktreeview.c:717 +msgid "Size of the expander arrow" +msgstr "Cỡ mÅ©i tên bung" + +#: ../widgets/misc/e-expander.c:231 +msgid "Indicator Spacing" +msgstr "Dấu cách chỉ báo" + +#: ../widgets/misc/e-expander.c:232 +msgid "Spacing around expander arrow" +msgstr "Dấu cách ở quanh mÅ©i tên bung" + +#: ../widgets/misc/e-filter-bar.c:201 ../widgets/misc/e-filter-bar.c:195 +msgid "_Searches" +msgstr "Việc tìm _kiếm" + +#: ../widgets/misc/e-filter-bar.c:203 +msgid "Searches" +msgstr "Việc tìm kiếm" + +#. FIXME: get the toplevel window... +#: ../widgets/misc/e-filter-bar.c:226 ../widgets/misc/e-filter-bar.c:220 +msgid "Save Search" +msgstr "LÆ°u việc tìm kiếm" + +#: ../widgets/misc/e-filter-bar.h:92 ../widgets/misc/e-filter-bar.h:99 +msgid "_Save Search..." +msgstr "_LÆ°u việc tìm kiếm..." + +#: ../widgets/misc/e-filter-bar.h:93 ../widgets/misc/e-filter-bar.h:100 +msgid "_Edit Saved Searches..." +msgstr "_Sá»­a đổi việc tìm kiếm đã lÆ°u..." + +#: ../widgets/misc/e-filter-bar.h:94 ../widgets/misc/e-filter-bar.h:101 +msgid "_Advanced Search..." +msgstr "Tìm kiếm cấp c_ao" + +#: ../widgets/misc/e-image-chooser.c:172 +msgid "Choose Image" +msgstr "Chá»n ảnh" + +#: ../widgets/misc/e-map.c:651 ../widgets/misc/e-map.c:647 +msgid "World Map" +msgstr "Bản đồ thế giá»›i" + +#: ../widgets/misc/e-map.c:653 ../widgets/misc/e-map.c:649 +msgid "" +"Mouse-based interactive map widget for selecting timezone. Keyboard users " +"should select the timezone from the below combo box instead." +msgstr "" +"Ô Ä‘iá»u khiển bản đồ tÆ°Æ¡ng tác Ä‘á»±a vào con chuá»™t để chá»n múi giá». NgÆ°á»i thích " +"dùng bàn phím thì nên chá»n múi giá» trong há»™p tổ hợp bên dÆ°á»›i thay vào đó." + +#: ../widgets/misc/e-pilot-settings.c:103 +msgid "Sync with:" +msgstr "Äồng bá»™ hóa vá»›i:" + +#: ../widgets/misc/e-pilot-settings.c:111 +msgid "Sync Private Records:" +msgstr "Äồng bá»™ hóa mục ghi riêng:" + +#: ../widgets/misc/e-pilot-settings.c:120 +msgid "Sync Categories:" +msgstr "Phân loại đồng bá»™ :" + +#: ../widgets/misc/e-reflow.c:1452 ../widgets/misc/e-reflow.c:1453 +msgid "Empty message" +msgstr "ThÆ° rá»—ng" + +#: ../widgets/misc/e-reflow.c:1459 ../widgets/misc/e-reflow.c:1460 +msgid "Reflow model" +msgstr "Mẫu thông lượng lại" + +#: ../widgets/misc/e-reflow.c:1466 ../widgets/misc/e-reflow.c:1467 +msgid "Column width" +msgstr "Rá»™ng cá»™t" + +#: ../widgets/misc/e-search-bar.c:357 ../widgets/misc/e-search-bar.c:345 +msgid "Search Text Entry" +msgstr "Chá»— gõ chữ tìm kiếm" + +#: ../Tomboy/NoteWindow.cs:419 +msgid "_Search" +msgstr "Tìm _kiếm" + +#: ../widgets/misc/e-search-bar.c:582 ../widgets/misc/e-search-bar.c:555 +#: ../storage/sunone-subscription-dialog.glade.h:2 +msgid "_Find Now" +msgstr "Tìm n_gay" + +#: ../src/menus.c:60 ../gtk/gtkstock.c:322 ../plug-ins/gfig/gfig-dialog.c:880 +#: ../src/Actions.cs:96 +msgid "_Clear" +msgstr "_Xoá" + +#: ../widgets/misc/e-search-bar.c:674 ../widgets/misc/e-search-bar.c:647 +msgid "Search Type" +msgstr "Kiểu tìm kiếm" + +#: ../widgets/misc/e-search-bar.c:878 ../widgets/misc/e-search-bar.c:851 +msgid "Item ID" +msgstr "ID mục" + +#: ../widgets/misc/e-search-bar.c:885 ../widgets/misc/e-search-bar.c:858 +msgid "Subitem ID" +msgstr "ID mục con" + +#: ../widgets/misc/e-search-bar.c:972 ../widgets/misc/e-search-bar.c:945 +msgid "Find _Now" +msgstr "Tìm _ngay" + +#: gal/widgets/e-selection-model-array.c:543 +msgid "Cursor Row" +msgstr "Hàng con trá»" + +#: gal/widgets/e-selection-model-array.c:550 +msgid "Cursor Column" +msgstr "Cá»™t con trá»" + +#: ../widgets/misc/e-selection-model.c:214 gal/widgets/e-selection-model.c:210 +msgid "Sorter" +msgstr "Bá»™ sắp xếp" + +#: ../widgets/misc/e-selection-model.c:221 gal/widgets/e-selection-model.c:217 +msgid "Selection Mode" +msgstr "Chế Ä‘á»™ lá»±a chá»n" + +#: ../widgets/misc/e-selection-model.c:229 gal/widgets/e-selection-model.c:225 +msgid "Cursor Mode" +msgstr "Chế Ä‘á»™ con trá»" + +#: ../widgets/misc/e-send-options.c:524 ../widgets/misc/e-send-options.c:521 +msgid "When de_leted:" +msgstr "Khi _xoá bá» :" + +#: ../widgets/misc/e-send-options.glade.h:1 +msgid "Delivery Options" +msgstr "Tùy chá»n gởi" + +#: ../widgets/misc/e-send-options.glade.h:2 +msgid "Replies" +msgstr "Trả lá»i" + +#: ../widgets/misc/e-send-options.glade.h:3 +msgid "Return Notification" +msgstr "Trở vá» thông báo" + +#: ../widgets/misc/e-send-options.glade.h:4 +msgid "Status Tracking" +msgstr "Theo dõi trạng thái" + +#: ../widgets/misc/e-send-options.glade.h:5 +msgid "A_uto-delete sent item" +msgstr "_Tá»± Ä‘á»™ng xoá bá» mục đã gởi" + +#: ../widgets/misc/e-send-options.glade.h:6 +msgid "C_lassification" +msgstr "_Phân loại" + +#: ../widgets/misc/e-send-options.glade.h:7 +msgid "Creat_e a sent item to track information" +msgstr "Tạ_o mục đã gởi để theo dõi thông tin" + +#: ../widgets/misc/e-send-options.glade.h:8 +msgid "Deli_vered and opened" +msgstr "Äã _phát và mở" + +#: ../widgets/misc/e-send-options.glade.h:9 +msgid "Gene_ral Options" +msgstr "Tùy chá»n ch_ung" + +#: ../widgets/misc/e-send-options.glade.h:10 +msgid "" +"None\n" +"Mail Receipt" +msgstr "" +"Không có\n" +"Thông báo đã Ä‘á»c" + +#: ../widgets/misc/e-send-options.glade.h:12 +msgid "" +"Public\n" +"Private\n" +"Confidential\n" +msgstr "" +"Công\n" +"Riêng\n" +"Tin tưởng\n" + +#: ../widgets/misc/e-send-options.glade.h:16 +msgid "R_eply requested" +msgstr "Yêu cầu t_rả lá»i" + +#: ../widgets/misc/e-send-options.glade.h:18 +msgid "Sta_tus Tracking" +msgstr "Theo dõi _trạng thái" + +#: ../widgets/misc/e-send-options.glade.h:19 +msgid "" +"Undefined\n" +"High\n" +"Standard\n" +"Low" +msgstr "" +"ChÆ°a định nghÄ©a\n" +"Cao\n" +"Chuẩn\n" +"Thấp" + +#: ../widgets/misc/e-send-options.glade.h:23 +msgid "W_ithin" +msgstr "Ở tr_ong" + +#: ../widgets/misc/e-send-options.glade.h:24 +msgid "When acce_pted:" +msgstr "Khi chấ_p nhận" + +#: ../widgets/misc/e-send-options.glade.h:25 +msgid "When co_mpleted:" +msgstr "Khi _hoàn tất:" + +#: ../widgets/misc/e-send-options.glade.h:26 +msgid "When decli_ned:" +msgstr "Khi bị từ chối:" + +#: ../widgets/misc/e-send-options.glade.h:27 +msgid "_After:" +msgstr "_Sau :" + +#: ../widgets/misc/e-send-options.glade.h:28 +msgid "_All information" +msgstr "_Má»i thông tin" + +#: ../widgets/misc/e-send-options.glade.h:29 +msgid "_Delay message delivery" +msgstr "Gởi t_rá»… thÆ°" + +#: ../widgets/misc/e-send-options.glade.h:30 +msgid "_Delivered" +msgstr "Äã _phát" + +#: ../widgets/misc/e-send-options.glade.h:32 +msgid "_Set expiration date" +msgstr "_Lập ngày hết hạn" + +#: ../widgets/misc/e-send-options.glade.h:33 +msgid "_Until:" +msgstr "_Äến khi:" + +#: ../widgets/misc/e-send-options.glade.h:34 +msgid "_When convenient" +msgstr "Khi _tiện" + +#: ../widgets/misc/e-send-options.glade.h:35 +msgid "_When opened:" +msgstr "Khi _mở :" + +# Variable: do not translate/ biến: đừng dịch +#: ../widgets/misc/e-task-widget.c:208 +#, c-format +msgid "%s (...)" +msgstr "%s (...)" + +#: ../widgets/misc/e-task-widget.c:213 +#, c-format +msgid "%s (%d%% complete)" +msgstr "%s (%d%% hoàn tất)" + +#: ../widgets/misc/e-url-entry.c:107 +msgid "click here to go to url" +msgstr "nhấn đây để Ä‘i tá»›i địa chỉ Mạng" + +#: ../widgets/misc/gal-categories.glade.h:2 +#: gal/widgets/gal-categories.glade.h:2 +msgid "Edit Master Category List..." +msgstr "Sá»­a đổi danh sách phân loại chính..." + +#: ../widgets/misc/gal-categories.glade.h:3 +#: gal/widgets/gal-categories.glade.h:3 +msgid "Item(s) belong to these _categories:" +msgstr "Mục thuá»™c các loại này:" + +#: ../widgets/misc/gal-categories.glade.h:4 +#: gal/widgets/gal-categories.glade.h:4 +msgid "_Available Categories:" +msgstr "_Loại sẵn có :" + +#: ../widgets/misc/gal-categories.glade.h:5 +#: gal/widgets/gal-categories.glade.h:5 +msgid "categories" +msgstr "loại" + +#: ../widgets/table/e-cell-combo.c:177 +msgid "popup list" +msgstr "danh sách bật lên" + +#: ../widgets/table/e-cell-date.c:64 gal/e-table/e-cell-date.c:58 +#: ../applets/clock/clock.c:266 ../applets/clock/clock.c:591 +msgid "%l:%M %p" +msgstr "%l:%M %p" + +#: ../widgets/table/e-cell-pixbuf.c:397 gal/e-table/e-cell-pixbuf.c:392 +msgid "Selected Column" +msgstr "Cá»™t đã chá»n" + +#: ../widgets/table/e-cell-pixbuf.c:404 gal/e-table/e-cell-pixbuf.c:399 +msgid "Focused Column" +msgstr "Cá»™t có tiêu Ä‘iểm" + +#: ../widgets/table/e-cell-pixbuf.c:411 gal/e-table/e-cell-pixbuf.c:406 +msgid "Unselected Column" +msgstr "Cá»™t đã bá» chá»n" + +#: ../widgets/table/e-cell-text.c:1740 gal/e-table/e-cell-text.c:1703 +msgid "Strikeout Column" +msgstr "Cá»™t gạch ngang" + +#: ../widgets/table/e-cell-text.c:1747 gal/e-table/e-cell-text.c:1710 +msgid "Underline Column" +msgstr "Cá»™t gạch dÆ°á»›i" + +#: ../widgets/table/e-cell-text.c:1754 gal/e-table/e-cell-text.c:1717 +msgid "Bold Column" +msgstr "Cá»™t đậm" + +#: ../widgets/table/e-cell-text.c:1761 gal/e-table/e-cell-text.c:1724 +msgid "Color Column" +msgstr "Cá»™t màu" + +#: ../widgets/table/e-cell-text.c:1775 gal/e-table/e-cell-text.c:1738 +msgid "BG Color Column" +msgstr "Cá»™t màu ná»n" + +#: gal/e-table/e-table-config.glade.h:1 +msgid "<- _Remove" +msgstr "↠_Gỡ bá»" + +#: gal/e-table/e-table-config.glade.h:2 +msgid "A_vailable Fields:" +msgstr "TrÆ°á»ng có _sẵn:" + +#: gal/e-table/e-table-config.glade.h:3 +msgid "Ascending" +msgstr "Tăng dần" + +#: gal/e-table/e-table-config.glade.h:4 +msgid "Clear All" +msgstr "Xoá hết" + +#: gal/e-table/e-table-config.glade.h:5 +msgid "Descending" +msgstr "Giảm dần" + +#: gal/e-table/e-table-config.glade.h:8 +msgid "Group Items By" +msgstr "Nhóm lại mục theo" + +#: gal/e-table/e-table-config.glade.h:9 +msgid "Move _Down" +msgstr "Chuyển _xuống" + +#: ../libnautilus-private/nautilus-column-chooser.c:394 ../src/cd-lib.c:377 +msgid "Move _Up" +msgstr "Äem _lên" + +#: ../widgets/table/e-table-config-no-group.glade.h:11 +#: gal/e-table/e-table-config-no-group.glade.h:11 +msgid "Sh_ow these fields in order:" +msgstr "_Hiện những trÆ°á»ng này theo thứ tá»± :" + +#: gal/e-table/e-table-config.glade.h:11 +msgid "Show Fields" +msgstr "Hiện trÆ°á»ng" + +#: gal/e-table/e-table-config.glade.h:12 +msgid "Show field in View" +msgstr "Hiện trÆ°á»ng trong Khung xem" + +#: src/fe-gtk/editlist.c:376 ../gnomecard/gnomecard-main-window.c:656 +#: ../sheets/Flowchart.sheet.in.h:28 +msgid "Sort" +msgstr "Sắp xếp" + +#: gal/e-table/e-table-config.glade.h:14 +msgid "Sort Items By" +msgstr "Sắp xếp mục theo" + +#: gal/e-table/e-table-config.glade.h:15 +msgid "Then By" +msgstr "Rồi theo" + +#: gal/e-table/e-table-config.glade.h:16 +msgid "_Add ->" +msgstr "Th_êm →" + +#: gal/e-table/e-table-config.glade.h:17 +msgid "_Fields Shown..." +msgstr "T_rÆ°á»ng đã hiện..." + +#: gal/e-table/e-table-config.glade.h:20 +msgid "_Sort..." +msgstr "_Sắp xếp..." + +#: ../extensions/permissions/ephy-permissions-dialog.c:527 +#: ../sheets/SDL.sheet.in.h:18 ../sheets/UML.sheet.in.h:28 +#: ../providers/sybase/utils.c:357 +msgid "State" +msgstr "Tính trạng" + +#: ../widgets/table/e-table-config.c:307 ../widgets/table/e-table-config.c:349 +#: gal/e-table/e-table-config.c:309 gal/e-table/e-table-config.c:351 +msgid "(Ascending)" +msgstr "(Tăng dần)" + +#: ../widgets/table/e-table-config.c:307 ../widgets/table/e-table-config.c:349 +#: gal/e-table/e-table-config.c:309 gal/e-table/e-table-config.c:351 +msgid "(Descending)" +msgstr "(Giảm dần)" + +#: ../widgets/table/e-table-config.c:314 gal/e-table/e-table-config.c:316 +msgid "Not sorted" +msgstr "ChÆ°a sắp xếp" + +#: ../widgets/table/e-table-config.c:355 gal/e-table/e-table-config.c:357 +msgid "No grouping" +msgstr "ChÆ°a nhóm lại" + +#: ../widgets/table/e-table-config.c:584 +msgid "Available Fields" +msgstr "TrÆ°á»ng có sẵn:" + +#: ../widgets/table/e-table-config.glade.h:17 +#: gal/e-table/e-table-config.glade.h:18 +msgid "_Group By..." +msgstr "_Nhóm lại theo..." + +#: ../widgets/table/e-table-config.glade.h:19 +#: gal/e-table/e-table-config.glade.h:19 +msgid "_Show these fields in order:" +msgstr "_Hiển thị những trÆ°á»ng này theo thứ tá»± :" + +#: gal/e-table/e-table-header-item.c:1795 +msgid "DnD code" +msgstr "Mã DnD" + +#: gal/e-table/e-table-header-item.c:1809 +msgid "Full Header" +msgstr "Phần đầu đầy đủ" + +#: ../widgets/table/e-table-field-chooser-dialog.c:126 +#: gal/e-table/e-table-field-chooser-dialog.c:123 +msgid "Add a column..." +msgstr "Thêm cá»™t..." + +#: ../widgets/table/e-table-field-chooser.glade.h:1 +#: gal/e-table/e-table-field-chooser.glade.h:1 +msgid "Field Chooser" +msgstr "Bá»™ chá»n trÆ°á»ng" + +#: ../widgets/table/e-table-field-chooser.glade.h:2 +#: gal/e-table/e-table-field-chooser.glade.h:2 +msgid "" +"To add a column to your table, drag it into\n" +"the location in which you want it to appear." +msgstr "" +"Äể thêm má»™t cá»™t vào bảng,\n" +"hãy kéo nó vào vị trí đã muốn." + +#: ../widgets/table/e-table-group-container.c:350 +#: gal/e-table/e-table-group-container.c:355 +#, c-format +msgid "%s : %s (%d item)" +msgstr "%s : %s (%d mục)" + +#: ../widgets/table/e-table-group-container.c:351 +#: gal/e-table/e-table-group-container.c:356 +#, c-format +msgid "%s : %s (%d items)" +msgstr "%s : %s (%d mục)" + +#: ../widgets/table/e-table-group-container.c:356 +#: gal/e-table/e-table-group-container.c:361 +#, c-format +msgid "%s (%d item)" +msgstr "%s (%d mục)" + +#: ../widgets/table/e-table-group-container.c:357 +#: gal/e-table/e-table-group-container.c:362 +#, c-format +msgid "%s (%d items)" +msgstr "%s (%d mục)" + +#: gal/e-table/e-table-group-container.c:907 +msgid "Alternating Row Colors" +msgstr "Màu hàng xen kẽ" + +#: gal/e-table/e-tree.c:3268 +msgid "Horizontal Draw Grid" +msgstr "LÆ°á»›i vẽ ngang" + +#: gal/e-table/e-tree.c:3274 +msgid "Vertical Draw Grid" +msgstr "LÆ°á»›i vẽ dá»c" + +#: gal/e-table/e-tree.c:3280 +msgid "Draw focus" +msgstr "Tiêu Ä‘iểm vẽ" + +#: gal/e-table/e-table-group-container.c:935 +msgid "Cursor mode" +msgstr "Chế Ä‘á»™ con trá»" + +#: gal/e-table/e-table-group-container.c:942 +msgid "Selection model" +msgstr "Mô hình lá»±a chá»n" + +#: gal/e-table/e-tree.c:3261 gal/e-table/e-tree.c:3262 +msgid "Length Threshold" +msgstr "Ngưỡng dài" + +#: gal/e-table/e-tree.c:3293 gal/e-table/e-tree.c:3294 +msgid "Uniform row height" +msgstr "Äá»™ cao hàng không đổi" + +#: gal/e-table/e-table-group-container.c:963 +msgid "Frozen" +msgstr "Äông cứng" + +#: ../widgets/table/e-table-header-item.c:1472 +#: gal/e-table/e-table-header-item.c:1457 +msgid "Customize Current View" +msgstr "Tùy biến khung xem hiện thá»i" + +#: ../widgets/table/e-table-header-item.c:1492 +#: gal/e-table/e-table-header-item.c:1477 +msgid "Sort Ascending" +msgstr "Sắp xếp tăng dần" + +#: ../widgets/table/e-table-header-item.c:1493 +#: gal/e-table/e-table-header-item.c:1478 +msgid "Sort Descending" +msgstr "Sắp xếp giảm dần" + +#: ../widgets/table/e-table-header-item.c:1494 +#: gal/e-table/e-table-header-item.c:1479 +msgid "Unsort" +msgstr "Hủy sắp xếp" + +#: ../widgets/table/e-table-header-item.c:1496 +#: gal/e-table/e-table-header-item.c:1481 +msgid "Group By This Field" +msgstr "Nhóm lại theo trÆ°á»ng này" + +#: ../widgets/table/e-table-header-item.c:1497 +#: gal/e-table/e-table-header-item.c:1482 +msgid "Group By Box" +msgstr "Nhóm lại theo há»™p" + +#: ../widgets/table/e-table-header-item.c:1499 +#: gal/e-table/e-table-header-item.c:1484 +msgid "Remove This Column" +msgstr "Bá» cá»™t này" + +#: ../widgets/table/e-table-header-item.c:1500 +#: gal/e-table/e-table-header-item.c:1485 +msgid "Add a Column..." +msgstr "Thêm cá»™t..." + +#: ../app/tools/gimpclonetool.c:338 ../glade/gbwidgets/gbalignment.c:255 +#: ../widgets/gtk+.xml.in.h:7 +msgid "Alignment" +msgstr "Canh lá»" + +#: gal/e-table/e-table-header-item.c:1488 ../xpdf/gpdf-control-ui.xml.h:2 +msgid "Best Fit" +msgstr "Vừa nhất" + +#: gal/e-table/e-table-header-item.c:1489 +msgid "Format Columns..." +msgstr "Äịnh dạng cá»™t..." + +#: gal/e-table/e-table-header-item.c:1491 +msgid "Customize Current View..." +msgstr "Tùy biến khung xem hiện thá»i..." + +#: ../widgets/text/e-entry.c:1264 gal/e-table/e-table-header-item.c:1802 +msgid "Fontset" +msgstr "Bá»™ phông chữ" + +#: gal/e-table/e-table-header-item.c:1823 gal/e-table/e-table-sorter.c:172 +msgid "Sort Info" +msgstr "Sắp xếp thông tin" + +#: ../src/file-manager/fm-tree-view.c:1488 ../src/orca/rolenames.py:473 +msgid "Tree" +msgstr "Cây" + +#: ../plugins/taglist/HTML.tags.xml.in.h:235 +msgid "Table header" +msgstr "Äầu bảng" + +#: ../widgets/table/e-table-item.c:2949 ../widgets/table/e-table-item.c:2950 +#: ../widgets/table/e-table-item.c:2945 ../widgets/table/e-table-item.c:2946 +msgid "Table model" +msgstr "Mẫu bảng" + +#: ../widgets/table/e-table-item.c:3025 ../widgets/table/e-table-item.c:3026 +#: ../widgets/table/e-table-item.c:3021 ../widgets/table/e-table-item.c:3022 +msgid "Cursor row" +msgstr "Hàng con trá»" + +#: ../widgets/table/e-table.c:3330 gal/e-table/e-table.c:3313 +msgid "Always Search" +msgstr "Luôn tìm kiếm" + +#: ../widgets/table/e-table.c:3337 gal/e-table/e-table.c:3320 +msgid "Use click to add" +msgstr "Nhấn chuá»™t để thêm" + +#: ../widgets/table/e-tree.c:3290 ../widgets/table/e-tree.c:3291 +#: gal/e-table/e-tree.c:3286 gal/e-table/e-tree.c:3287 +msgid "ETree table adapter" +msgstr "Bá»™ tiếp hợp ETree (bảng cây Ä‘iện)" + +#: ../widgets/table/e-tree.c:3304 ../widgets/table/e-tree.c:3305 +#: gal/e-table/e-tree.c:3300 gal/e-table/e-tree.c:3301 +msgid "Always search" +msgstr "Luôn tìm kiếm" + +#: ../widgets/table/e-tree.c:3311 gal/e-table/e-tree.c:3307 +msgid "Retro Look" +msgstr "Vẻ cÅ©" + +#: ../widgets/table/e-tree.c:3312 gal/e-table/e-tree.c:3308 +msgid "Draw lines and +/- expanders." +msgstr "Vẽ Ä‘Æ°á»ng và mÅ©i tên bung +/-" + +#: ../widgets/text/e-entry-test.c:49 +msgid "Minicard Test" +msgstr "Kiểm tra thẻ tí tị" + +#: ../widgets/text/e-entry-test.c:50 +msgid "Copyright (C) 2000, Helix Code, Inc." +msgstr "Bản quyá»n © năm 2000, Helix Code, Inc." + +#: ../widgets/text/e-entry-test.c:52 +msgid "This should test the minicard canvas item" +msgstr "Hành Ä‘á»™ng này nên thá»­ ra mục vải căng thẻ tí tị" + +#: gal/e-text/e-text.c:3438 gal/e-text/e-text.c:3439 +msgid "Event Processor" +msgstr "Bá»™ xá»­ lý sá»± kiện" + +#: src/prefsdlg.cpp:44 jpilot.c:2620 +msgid "Font" +msgstr "Phông chữ" + +#: ../widgets/text/e-entry.c:1270 ../widgets/text/e-entry.c:1271 +msgid "GDKFont" +msgstr "Phông chữ GDK" + +#: ../gtk/gtktexttag.c:380 ../gtk/gtktextview.c:595 +msgid "Justification" +msgstr "Canh Ä‘á»u" + +#: gal/e-text/e-text.c:3574 gal/e-text/e-text.c:3575 +msgid "Use ellipsis" +msgstr "Dùng dấu chấm lá»­ng" + +#: gal/e-text/e-text.c:3581 gal/e-text/e-text.c:3582 +msgid "Ellipsis" +msgstr "Dấu chấm lá»­ng" + +#: gal/e-text/e-text.c:3588 gal/e-text/e-text.c:3589 ../gtk/gtklabel.c:368 +msgid "Line wrap" +msgstr "Ngắt dòng" + +#: gal/e-text/e-text.c:3595 gal/e-text/e-text.c:3596 +msgid "Break characters" +msgstr "Ngắt ký tá»±" + +#: gal/e-text/e-text.c:3602 gal/e-text/e-text.c:3603 +msgid "Max lines" +msgstr "Số dòng tối Ä‘a" + +#: gal/e-text/e-text.c:3631 gal/e-text/e-text.c:3632 +msgid "Allow newlines" +msgstr "Cho phép ký tá»± dòng má»›i" + +#: gal/e-text/e-text.c:3624 gal/e-text/e-text.c:3625 +msgid "Draw borders" +msgstr "Viá»n vẽ" + +#: gal/e-text/e-text.c:3638 gal/e-text/e-text.c:3639 ../lib/properties.c:76 +#: ../lib/properties.h:505 ../lib/properties.h:508 +msgid "Draw background" +msgstr "Ná»n vẽ" + +#: gal/e-text/e-text.c:3645 gal/e-text/e-text.c:3646 +msgid "Draw button" +msgstr "Nút vẽ" + +#: gal/e-text/e-text.c:3652 gal/e-text/e-text.c:3653 +msgid "Cursor position" +msgstr "Ví trị con trá»" + +#: ../widgets/text/e-entry.c:1389 ../widgets/text/e-entry.c:1390 +msgid "Emulate label resize" +msgstr "Mô phá»ng đổi cỡ nhãn" + +#: ../widgets/text/e-text.c:2696 gal/e-text/e-text.c:2679 +#: ../components/html-editor/popup.c:553 +msgid "Input Methods" +msgstr "Cách nhập" + +#: ../components/html-editor/toolbar.c:551 +#: ../src/form-editor/button-prop.cc:146 ../src/widgets/font-combo.cc:47 +msgid "Bold" +msgstr "Äậm" + +#: ../components/html-editor/toolbar.c:557 +msgid "Strikeout" +msgstr "Gạch xoá" + +#: ../libgimpwidgets/gimpstock.c:113 +msgid "Anchor" +msgstr "Neo" + +#: ../widgets/text/e-text.c:3498 ../widgets/text/e-text.c:3499 +#: gal/e-text/e-text.c:3481 gal/e-text/e-text.c:3482 +msgid "Clip Width" +msgstr "Rá»™ng trích Ä‘oạn" + +#: ../widgets/text/e-text.c:3505 ../widgets/text/e-text.c:3506 +#: gal/e-text/e-text.c:3488 gal/e-text/e-text.c:3489 +msgid "Clip Height" +msgstr "Cao trích Ä‘oạn" + +#: ../widgets/text/e-text.c:3512 ../widgets/text/e-text.c:3513 +#: gal/e-text/e-text.c:3495 gal/e-text/e-text.c:3496 +msgid "Clip" +msgstr "Trích Ä‘oạn" + +#: ../widgets/text/e-text.c:3519 ../widgets/text/e-text.c:3520 +#: gal/e-text/e-text.c:3502 gal/e-text/e-text.c:3503 +msgid "Fill clip rectangle" +msgstr "Tô đầy hình chữ nhật trích Ä‘oạn" + +#: ../widgets/text/e-text.c:3526 ../widgets/text/e-text.c:3527 +#: gal/e-text/e-text.c:3509 gal/e-text/e-text.c:3510 +msgid "X Offset" +msgstr "Hiệu số X" + +#: ../widgets/text/e-text.c:3533 ../widgets/text/e-text.c:3534 +#: gal/e-text/e-text.c:3516 gal/e-text/e-text.c:3517 +msgid "Y Offset" +msgstr "Hiệu số Y" + +#: ../widgets/text/e-text.c:3569 ../widgets/text/e-text.c:3570 +msgid "Text width" +msgstr "Rá»™ng văn bản" + +#: ../widgets/text/e-text.c:3576 ../widgets/text/e-text.c:3577 +msgid "Text height" +msgstr "Cao văn bản" + +#: ../widgets/text/e-text.c:3676 ../widgets/text/e-text.c:3677 +#: gal/e-text/e-text.c:3659 gal/e-text/e-text.c:3660 +msgid "IM Context" +msgstr "Ngữ cảnh IM" + +#: ../widgets/text/e-text.c:3683 ../widgets/text/e-text.c:3684 +#: gal/e-text/e-text.c:3666 gal/e-text/e-text.c:3667 +msgid "Handle Popup" +msgstr "Bá»™ bật lên móc kéo" + +#: emultempl/armcoff.em:72 +#, c-format +msgid " --support-old-code Support interworking with old code\n" +msgstr " --support-old-code _Há»— trợ_ dệt vào vá»›i _mã cÅ©_\n" + +#: emultempl/armcoff.em:73 +#, c-format +msgid " --thumb-entry= Set the entry point to be Thumb symbol \n" +msgstr "" +" --thumb-entry= Lập Ä‘iểm _vào_ là ký hiệu _Hình Nhá»_Thumb này\n" + +#: emultempl/armcoff.em:121 +#, c-format +msgid "Errors encountered processing file %s" +msgstr "Gặp lá»—i khi xá»­ lý tập tin %s" + +#: emultempl/armcoff.em:188 emultempl/pe.em:1455 +msgid "%P: warning: '--thumb-entry %s' is overriding '-e %s'\n" +msgstr "%P: cảnh báo : « --thumb-entry %s » Ä‘ang lấy quyá»n cao hÆ¡n « -e %s »\n" + +#: emultempl/armcoff.em:193 emultempl/pe.em:1460 +msgid "%P: warning: connot find thumb start symbol %s\n" +msgstr "%P: warning: connot find thumb start symbol %s\n" + +#: emultempl/pe.em:301 +#, c-format +msgid "" +" --base_file Generate a base file for relocatable " +"DLLs\n" +msgstr "" +" --base_file Tạo ra má»™t _tập tin cÆ¡ bản_ choocác ata\n" +"\t\t\t\t\t\t\tcó thể định vị lạile DLLs\n" + +#: emultempl/pe.em:302 +#, c-format +msgid "" +" --dll Set image base to the default for DLLs\n" +msgstr "" +" --dll Lập cÆ¡ bản ảnh là mặc định cho các DLL\n" + +#: emultempl/pe.em:303 +#, c-format +msgid " --file-alignment Set file alignment\n" +msgstr " --file-alignment Lập cách _canh lá» tập tin_\n" + +#: emultempl/pe.em:304 +#, c-format +msgid " --heap Set initial size of the heap\n" +msgstr "" +" --heap Lập kích cỡ _miá»n nhá»›_ ban đầu\n" + +#: emultempl/pe.em:305 +#, c-format +msgid "" +" --image-base
Set start address of the executable\n" +msgstr "" +" --image-base <địa_chỉ> Lập địa chỉ bắt đầu của ứng dụng chạy " +"được\n" +"\t\t\t\t\t\t\t\t (_cÆ¡ bản ảnh_)\n" + +#: emultempl/pe.em:306 +#, c-format +msgid "" +" --major-image-version Set version number of the executable\n" +msgstr "" +" --major-image-version \tLập số thứ tá»± _phiên bản_\n" +"\t\t\t\t\tcủa ứng dụng chạy được (_ảnh lá»›n_)\n" + +#: emultempl/pe.em:307 +#, c-format +msgid " --major-os-version Set minimum required OS version\n" +msgstr "" +" --major-os-version \t\tLập số thứ tá»± _phiên bản\n" +"\t\t\t\thệ Ä‘iá»u hành_ tối thiểu cần thiết (_lá»›n_)\n" + +#: emultempl/pe.em:308 +#, c-format +msgid "" +" --major-subsystem-version Set minimum required OS subsystem " +"version\n" +msgstr "" +" --major-subsystem-version \t Lập số thứ tá»± _phiên bản\n" +"\t\t\t\thệ Ä‘iá»u hành con_ tối thiểu cần thiết (_lá»›n_)\n" + +#: emultempl/pe.em:309 +#, c-format +msgid "" +" --minor-image-version Set revision number of the executable\n" +msgstr "" +" --minor-image-version \tLập số thứ tá»± bản sá»­a đổi\n" +"\tcủa ứng dụng chạy được (_phiên bản ảnh nhá»_)\n" + +#: emultempl/pe.em:310 +#, c-format +msgid " --minor-os-version Set minimum required OS revision\n" +msgstr "" +" --minor-os-version \t\tLập số thứ tá»± bản sá»­a đổi\n" +"\t\tcủa hệ Ä‘iá»u hành cần thiết (_phiên bản hệ Ä‘iá»u hành nhá»_)\n" + +#: emultempl/pe.em:311 +#, c-format +msgid "" +" --minor-subsystem-version Set minimum required OS subsystem " +"revision\n" +msgstr "" +" --minor-subsystem-version \t Lập số thứ tá»± bản sá»­a đổi\n" +"\t\tcủa hệ Ä‘iá»u hành con cần thiết (_phiên bản hệ Ä‘iá»u hành con nhá»_)\n" + +#: emultempl/pe.em:312 +#, c-format +msgid " --section-alignment Set section alignment\n" +msgstr " --section-alignment Lập cách _canh lá» phần_\n" + +#: emultempl/pe.em:313 +#, c-format +msgid " --stack Set size of the initial stack\n" +msgstr "" +" --stack Lập kích cỡ của _đống_ ban đầu\n" + +#: emultempl/pe.em:314 +#, c-format +msgid "" +" --subsystem [:] Set required OS subsystem [& version]\n" +msgstr "" +" --subsystem [:] Lập _hệ Ä‘iá»u hành con_ [và phiên bản] " +"cần thiết\n" + +#: emultempl/pe.em:315 +#, c-format +msgid "" +" --support-old-code Support interworking with old code\n" +msgstr " --support-old-code _Há»— trợ_ dệt vào vá»›i _mã cÅ©_\n" + +#: emultempl/pe.em:316 +#, c-format +msgid "" +" --thumb-entry= Set the entry point to be Thumb " +"\n" +msgstr " --thumb-entry= Lập Ä‘iểm _vào_ là ký hiệu _Hình Nhá»_ này\n" + +#: emultempl/pe.em:318 +#, c-format +msgid "" +" --add-stdcall-alias Export symbols with and without @nn\n" +msgstr "" +" --add-stdcall-alias Xuất ký hiệu vá»›i và không vá»›i « @nn » (_thêm " +"bí danh gá»i chuẩn_)\n" + +#: emultempl/pe.em:319 +#, c-format +msgid " --disable-stdcall-fixup Don't link _sym to _sym@nn\n" +msgstr "" +" --disable-stdcall-fixup Äừng liên kết « _sym » đến « _sym@nn " +"» (_tắt sá»­a gá»i chuẩn_)\n" + +#: emultempl/pe.em:320 +#, c-format +msgid "" +" --enable-stdcall-fixup Link _sym to _sym@nn without warnings\n" +msgstr "" +" --enable-stdcall-fixup Liên kết « _sym » đến « _sym@nn », " +"không có cảnh báo\n" +" \t\t\t\t\t\t\t(_bật sá»­a gá»i chuẩn_)\n" + +#: emultempl/pe.em:321 +#, c-format +msgid "" +" --exclude-symbols sym,sym,... Exclude symbols from automatic export\n" +msgstr "" +" --exclude-symbols ký_hiệu,ký_hiệu,... _Loại trừ những ký hiệu_ này ra " +"việc xuất tá»± Ä‘á»™ng\n" + +#: emultempl/pe.em:322 +#, c-format +msgid "" +" --exclude-libs lib,lib,... Exclude libraries from automatic " +"export\n" +msgstr "" +" --exclude-libs thÆ°_viên,thÆ°_viên,... _Loại trừ những thÆ° viên_ này " +"ra việc xuất tá»± Ä‘á»™ng\n" + +#: emultempl/pe.em:323 +#, c-format +msgid "" +" --export-all-symbols Automatically export all globals to " +"DLL\n" +msgstr "" +" --export-all-symbols Tá»± Ä‘á»™ng _xuất má»i_ Ä‘iá»u toàn cục vào " +"DLL (_ký hiệu_)\n" + +#: emultempl/pe.em:324 +#, c-format +msgid " --kill-at Remove @nn from exported symbols\n" +msgstr "" +" --kill-at Gỡ bỠ« @nn » ra những ký hiệu đã xuất " +"(_buá»™c kết thúc tại_)\n" + +#: emultempl/pe.em:325 +#, c-format +msgid " --out-implib Generate import library\n" +msgstr " --out-implib Tạo _ra thÆ° viên nhập_\n" + +#: emultempl/pe.em:326 +#, c-format +msgid "" +" --output-def Generate a .DEF file for the built DLL\n" +msgstr "" +" --output-def Tạo _ra_ má»™t tập tin .DEF cho DLL đã " +"xây dụng\n" + +#: emultempl/pe.em:327 +#, c-format +msgid " --warn-duplicate-exports Warn about duplicate exports.\n" +msgstr "" +" --warn-duplicate-exports _Cảnh báo_ vá» _việc xuất trùng_ nào.\n" + +#: emultempl/pe.em:328 +#, c-format +msgid "" +" --compat-implib Create backward compatible import " +"libs;\n" +" create __imp_ as well.\n" +msgstr "" +" --compat-implib Tạo các _thÆ° viên nhập tÆ°Æ¡ng thích_ " +"ngược;\n" +"\t\t\t\t\tcÅ©ng tạo « __imp_ ».\n" + +#: emultempl/pe.em:330 +#, c-format +msgid "" +" --enable-auto-image-base Automatically choose image base for " +"DLLs\n" +" unless user specifies one\n" +msgstr "" +" --enable-auto-image-base Tá»± Ä‘á»™ng chá»n cÆ¡ bản ảnh cho má»i DLL\n" +"\t\t\t\t\t\t\ttrừ khi ngÆ°á»i dùng gõ nó\n" + +#: emultempl/pe.em:332 +#, c-format +msgid "" +" --disable-auto-image-base Do not auto-choose image base. " +"(default)\n" +msgstr "" +" --disable-auto-image-base Äừng _tá»± Ä‘á»™ng_ chá»n _cÆ¡ bản ảnh_ (mặc " +"định) (_tắt_)\n" + +#: emultempl/pe.em:333 +#, c-format +msgid "" +" --dll-search-prefix= When linking dynamically to a dll " +"without\n" +" an importlib, use ." +"dll\n" +" in preference to lib.dll \n" +msgstr "" +" --dll-search-prefix= Khi liên kết Ä‘á»™ng đến DLL không có thÆ° " +"viên nhập,\n" +"\thãy dùng « .dll » hÆ¡n « .dll »\n" +"\t(_tiá»n_tố_tìm_kiếm_)\n" + +#: emultempl/pe.em:336 +#, c-format +msgid "" +" --enable-auto-import Do sophistcated linking of _sym to\n" +" __imp_sym for DATA references\n" +msgstr "" +" --enable-auto-import Liên kết má»™t cách tinh tế\n" +"\t« _sym » đến « __imp_sym » cho các tham chiếu DATA (dữ liệu)\n" +"\t(_bật nhập tá»± Ä‘á»™ng)\n" + +#: emultempl/pe.em:338 +#, c-format +msgid "" +" --disable-auto-import Do not auto-import DATA items from " +"DLLs\n" +msgstr "" +" --disable-auto-import Äừng _tá»± Ä‘á»™ng nhập_ mục DATA từ DLL (_tắt_)\n" + +#: emultempl/pe.em:339 +#, c-format +msgid "" +" --enable-runtime-pseudo-reloc Work around auto-import limitations by\n" +" adding pseudo-relocations resolved " +"at\n" +" runtime.\n" +msgstr "" +" --enable-runtime-pseudo-reloc Chỉnh sá»­a các hạn chế nhập tá»± Ä‘á»™ng,\n" +"\tbằng cách thêm các việc _định vị lại giả_ được tháo gỡ vào _lúc chạy_. " +"(_bật_)\n" + +#: emultempl/pe.em:342 +#, c-format +msgid "" +" --disable-runtime-pseudo-reloc Do not add runtime pseudo-relocations " +"for\n" +" auto-imported DATA.\n" +msgstr "" +" --disable-runtime-pseudo-reloc Äừng thêm việc _định vị lại giả_\n" +"\tvào _lúc chạy_ cho DATA (dữ liệu) được nhập tá»± Ä‘á»™ng. (_tắt_)\n" + +#: emultempl/pe.em:344 +#, c-format +msgid "" +" --enable-extra-pe-debug Enable verbose debug output when " +"building\n" +" or linking to DLLs (esp. auto-" +"import)\n" +msgstr "" +" --enable-extra-pe-debug _Bật_ xuất dữ liệu _gỡ lá»—i_ chi tiết\n" +"\ttrong khi xây dụng hay liên kết đến DLL nào (nhất là việc tá»± Ä‘á»™ng nhập) " +"(_thêm_)\n" + +#: emultempl/pe.em:347 +#, c-format +msgid "" +" --large-address-aware Executable supports virtual addresses\n" +" greater than 2 gigabytes\n" +msgstr "" +" --large-address-aware Ứng dụng chạy có há»— trợ _địa chỉ_ ảo _lá»›n_ " +"hÆ¡n 2 GB\n" +"\t\t\t\t\t\t\t(_kiến thức_)\n" + +#: emultempl/pe.em:414 +msgid "%P: warning: bad version number in -subsystem option\n" +msgstr "" +"%P: cảnh báo : gặp số thứ tá»± phiên bản sai trong tùy chá»n « -subsystem » (hệ " +"thống con)\n" + +#: emultempl/pe.em:445 +msgid "%P%F: invalid subsystem type %s\n" +msgstr "%P%F: kiểu hệ thống con không hợp lệ %s\n" + +#: emultempl/pe.em:484 +msgid "%P%F: invalid hex number for PE parameter '%s'\n" +msgstr "%P%F: số thập lục không hợp lệ cho tham số « %s »\n" + +#: emultempl/pe.em:501 +msgid "%P%F: strange hex info for PE parameter '%s'\n" +msgstr "%P%F: thông tin thập lục lạ cho tham số PE « %s »\n" + +#: emultempl/pe.em:518 +#, c-format +msgid "%s: Can't open base file %s\n" +msgstr "%s: Không thể mở tập tin cÆ¡ bản %s\n" + +#: emultempl/pe.em:734 +msgid "%P: warning, file alignment > section alignment.\n" +msgstr "%P: cảnh báo, canh lá» tập tin > canh lá» phần.\n" + +#: emultempl/pe.em:821 emultempl/pe.em:848 +#, c-format +msgid "Warning: resolving %s by linking to %s\n" +msgstr "Cảnh báo : Ä‘ang tháo gỡ %s bằng cách liên kết đến %s\n" + +#: emultempl/pe.em:826 emultempl/pe.em:853 +msgid "Use --enable-stdcall-fixup to disable these warnings\n" +msgstr "" +"Hãy dùng « --enable-stdcall-fixup » (bật sá»­a gá»i chuẩn) để tắt các cảnh báo " +"này\n" + +#: emultempl/pe.em:827 emultempl/pe.em:854 +msgid "Use --disable-stdcall-fixup to disable these fixups\n" +msgstr "" +"Hãy dùng « --disable-stdcall-fixup » (tắt sá»­a gá»i chuẩn) để tắt các việc sá»­a " +"này\n" + +#: emultempl/pe.em:873 +#, c-format +msgid "%C: Cannot get section contents - auto-import exception\n" +msgstr "%C: Không thể lấy ná»™i dung phần: ngoài lệ nhập tá»± Ä‘á»™ng\n" + +#: emultempl/pe.em:910 +#, c-format +msgid "Info: resolving %s by linking to %s (auto-import)\n" +msgstr "Thông tin: Ä‘ang tháo gỡ %s bằng cách liên kết đến %s (tá»± Ä‘á»™ng nhập)\n" + +#: emultempl/pe.em:983 +msgid "%F%P: PE operations on non PE file.\n" +msgstr "%F%P: thao tác PE vá»›i tập tin không phải PE.\n" + +#: emultempl/pe.em:1258 +#, c-format +msgid "Errors encountered processing file %s\n" +msgstr "Gặp lá»—i trong khi xá»­ lý tập tin %s\n" + +#: emultempl/pe.em:1281 +#, c-format +msgid "Errors encountered processing file %s for interworking" +msgstr "Gặp lá»—i trong khi xá»­ lý tập tin %s để dệt vào vá»›i nhau" + +#: emultempl/pe.em:1340 ldexp.c:570 ldlang.c:2408 ldlang.c:5135 ldlang.c:5166 +#: ldmain.c:1161 +msgid "%P%F: bfd_link_hash_lookup failed: %E\n" +msgstr "" +"%P%F: « bfd_link_hash_lookup » (bfd liên kết băm tra cứu) thất bại: %E\n" + +#: ldcref.c:153 +msgid "%X%P: bfd_hash_table_init of cref table failed: %E\n" +msgstr "" +"%X%P: « bfd_hash_table_init » (bfd băm bảng khởi Ä‘á»™ng) vá»›i bảng cref thất " +"bại: %E\n" + +#: ldcref.c:159 +msgid "%X%P: cref_hash_lookup failed: %E\n" +msgstr "%X%P: việc « cref_hash_lookup » bị lá»—i: %E\n" + +#: ldcref.c:225 +#, c-format +msgid "" +"\n" +"Cross Reference Table\n" +"\n" +msgstr "" +"\n" +"Bảng Tham Chiếu Chéo\n" +"\n" + +#: ldcref.c:226 ../plug-ins/common/uniteditor.c:104 +#: ../src/widgets/font-combo.cc:49 +msgid "Symbol" +msgstr "Ký hiệu" + +#: ldcref.c:234 +#, c-format +msgid "File\n" +msgstr "Tập tin\n" + +#: ldcref.c:238 +#, c-format +msgid "No symbols\n" +msgstr "Không có ký hiệu\n" + +#: ldcref.c:359 ldcref.c:478 +msgid "%B%F: could not read symbols; %E\n" +msgstr "%B%F: không thể Ä‘á»c các ký hiệu ; %E\n" + +#: ldcref.c:363 ldcref.c:482 ldmain.c:1226 ldmain.c:1230 +msgid "%B%F: could not read symbols: %E\n" +msgstr "%B%F: không thể Ä‘á»c các ký hiệu : %E\n" + +#: ldcref.c:414 +msgid "%P: symbol `%T' missing from main hash table\n" +msgstr "%P: thiếu ký hiệu « %T » trong bảng băm chính\n" + +#: ldcref.c:547 ldcref.c:554 ldmain.c:1273 ldmain.c:1280 +msgid "%B%F: could not read relocs: %E\n" +msgstr "%B%F: không thể Ä‘á»c các Ä‘iá»u định vị lại : %E\n" + +#: ldcref.c:573 +msgid "%X%C: prohibited cross reference from %s to `%T' in %s\n" +msgstr "%X%C: không cho phép tham chiếu chéo từ %s đến « %T » trong %s\n" + +#: ldctor.c:84 +msgid "%P%X: Different relocs used in set %s\n" +msgstr "%P%X: Sá»­ dụng sá»± định vị lại khác nhau trong tập hợp %s\n" + +#: ldctor.c:102 +msgid "%P%X: Different object file formats composing set %s\n" +msgstr "%P%X: Có gồm khuôn dạng tập tin đối tượng khác nhau trong %s\n" + +#: ldctor.c:281 ldctor.c:295 +msgid "%P%X: %s does not support reloc %s for set %s\n" +msgstr "%P%X: %s không há»— trợ định vị lại %s cho tập hợp %s\n" + +#: ldctor.c:316 +msgid "%P%X: Unsupported size %d for set %s\n" +msgstr "%P%X: Không há»— trợ kích cỡ %d cho tập hợp %s\n" + +#: ldctor.c:337 +msgid "" +"\n" +"Set Symbol\n" +"\n" +msgstr "" +"\n" +"Tập hợp Ký hiệu\n" +"\n" + +#: ldemul.c:227 +#, c-format +msgid "%S SYSLIB ignored\n" +msgstr "%S SYSLIB bị bá» qua\n" + +#: ldemul.c:233 +#, c-format +msgid "%S HLL ignored\n" +msgstr "%S HLL bị bá» qua\n" + +#: ldemul.c:253 +msgid "%P: unrecognised emulation mode: %s\n" +msgstr "%P: không nhận ra chế Ä‘á»™ mô phá»ng: %s\n" + +#: ldemul.c:254 +msgid "Supported emulations: " +msgstr "Mô phá»ng đã há»— trợ :" + +#: ldemul.c:296 +#, c-format +msgid " no emulation specific options.\n" +msgstr " không có tùy chá»n đặc trÆ°ng cho mô phá»ng.\n" + +#: ldexp.c:379 +#, c-format +msgid "%F%S %% by zero\n" +msgstr "%F%S %% cho số không\n" + +#: ldexp.c:386 +#, c-format +msgid "%F%S / by zero\n" +msgstr "%F%S / cho số không\n" + +#: ldexp.c:583 +#, c-format +msgid "%X%S: unresolvable symbol `%s' referenced in expression\n" +msgstr "" +"%X%S: ký hiệu không tháo gỡ được « %s » được tham chiếu trong biểu thức\n" + +#: ldexp.c:604 +#, c-format +msgid "%F%S: undefined symbol `%s' referenced in expression\n" +msgstr "%F%S: ký hiệu chÆ°a định nghÄ©a « %s » được tham chiếu trong biểu thức\n" + +#: ldexp.c:665 ldexp.c:678 +#, c-format +msgid "%F%S: undefined MEMORY region `%s' referenced in expression\n" +msgstr "" +"%F%S: miá»n MEMORY (nhá»›) chÆ°a định nghÄ©a « %s » được tham chiếu trong biểu " +"thức\n" + +#: ldexp.c:757 +#, c-format +msgid "%F%S can not PROVIDE assignment to location counter\n" +msgstr "%F%S không thể PROVIDE (cung cấp) việc gán cho bá»™ đếm địa Ä‘iểm\n" + +#: ldexp.c:770 +#, c-format +msgid "%F%S invalid assignment to location counter\n" +msgstr "%F%S việc gán không hợp lệ cho bá»™ đếm địa Ä‘iểm\n" + +#: ldexp.c:774 +#, c-format +msgid "%F%S assignment to location counter invalid outside of SECTION\n" +msgstr "" +"%F%S việc gán cho bá»™ đếm địa Ä‘iểm không phải hợp lệ bên ngoài SECTION " +"(phần)\n" + +#: ldexp.c:783 +msgid "%F%S cannot move location counter backwards (from %V to %V)\n" +msgstr "%F%S không thể chạy ngược bá»™ đếm địa Ä‘iểm (từ %V vá» %V)\n" + +#: ldexp.c:810 +msgid "%P%F:%s: hash creation failed\n" +msgstr "%P%F:%s: việc tạo băm bị lá»—i\n" + +#: ldexp.c:1077 ldexp.c:1109 +#, c-format +msgid "%F%S nonconstant expression for %s\n" +msgstr "%F%S biểu thức thay đổi cho %s\n" + +#: ldexp.c:1163 +#, c-format +msgid "%F%S non constant expression for %s\n" +msgstr "%F%S biểu thức thay đổi cho %s\n" + +#: ldfile.c:139 +#, c-format +msgid "attempt to open %s failed\n" +msgstr "việc cố mở %s bị lá»—i\n" + +#: ldfile.c:141 +#, c-format +msgid "attempt to open %s succeeded\n" +msgstr "việc cố mở %s đã thành công\n" + +#: ldfile.c:147 +msgid "%F%P: invalid BFD target `%s'\n" +msgstr "%F%P: đích BFD không hợp lệ « %s »\n" + +#: ldfile.c:255 ldfile.c:282 +msgid "%P: skipping incompatible %s when searching for %s\n" +msgstr "%P: Ä‘ang nhảy qua %s không tÆ°Æ¡ng thích trong khi tìm kiếm %s\n" + +#: ldfile.c:267 +msgid "%F%P: attempted static link of dynamic object `%s'\n" +msgstr "%F%P: đã cố liên kết tÄ©nh đối tượng Ä‘á»™ng « %s »\n" + +#: ldfile.c:384 +msgid "%F%P: %s (%s): No such file: %E\n" +msgstr "%F%P: %s (%s): Không có tập tin nhÆ° vậy: %E\n" + +#: ldfile.c:387 +msgid "%F%P: %s: No such file: %E\n" +msgstr "%F%P: %s: Không có tập tin nhÆ° vậy: %E\n" + +#: ldfile.c:417 +msgid "%F%P: cannot find %s inside %s\n" +msgstr "%F%P: không tìm thấy được %s ở trong %s\n" + +#: ldfile.c:420 +msgid "%F%P: cannot find %s\n" +msgstr "%F%P: không tìm thấy được %s\n" + +#: ldfile.c:437 ldfile.c:453 +#, c-format +msgid "cannot find script file %s\n" +msgstr "không tìm thấy tập tin tập lệnh %s\n" + +#: ldfile.c:439 ldfile.c:455 +#, c-format +msgid "opened script file %s\n" +msgstr "đã mở tập tin tập lệnh %s\n" + +#: ldfile.c:499 +msgid "%P%F: cannot open linker script file %s: %E\n" +msgstr "%P%F: không thể mở tập tin tập lệnh liên kết %s: %E\n" + +#: ldfile.c:546 +msgid "%P%F: cannot represent machine `%s'\n" +msgstr "%P%F: không thể miêu tả máy « %s »\n" + +#: ldlang.c:511 +msgid "%P%F: out of memory during initialization" +msgstr "%P%F: hết bá»™ nhá»› trong khi cài đặt" + +#: ldlang.c:551 +msgid "%P:%S: warning: redeclaration of memory region '%s'\n" +msgstr "%P:%S: cảnh báo : miá»n nhá»› « %s » được khai báo lại\n" + +#: ldlang.c:557 +msgid "%P:%S: warning: memory region %s not declared\n" +msgstr "%P:%S: cảnh báo : chÆ°a khai báo miá»n bá»™ nhá»› %s\n" + +#: ldlang.c:1073 +msgid "" +"\n" +"Memory Configuration\n" +"\n" +msgstr "" +"\n" +"Cấu hình bá»™ nhá»›\n" +"\n" + +#: ../plug-ins/metadata/interface.c:347 ../providers/sybase/utils.c:475 +msgid "Origin" +msgstr "Gốc" + +#: ../gtk/gtkcellrenderertext.c:235 ../gtk/gtklabel.c:329 +#: ../objects/UML/class.c:205 ../objects/UML/object.c:154 +#: ../src/mlview-icon-tree.cc:1148 +msgid "Attributes" +msgstr "Thuá»™c tính" + +#: ldlang.c:1115 +#, c-format +msgid "" +"\n" +"Linker script and memory map\n" +"\n" +msgstr "" +"\n" +"Tập lệnh liên kết và bản đồ bá»™ nhá»›\n" +"\n" + +#: ldlang.c:1183 +msgid "%P%F: Illegal use of `%s' section\n" +msgstr "%P%F: Không cho phép cách sá»­ dụng phần « %s »\n" + +#: ldlang.c:1193 +msgid "%P%F: output format %s cannot represent section called %s\n" +msgstr "%P%F: khuôn dạng %s không thể miêu tả phần được gá»i là %s\n" + +#: ldlang.c:1775 +msgid "%B: file not recognized: %E\n" +msgstr "%B: không nhận ra tập tin: %E\n" + +#: ldlang.c:1776 +msgid "%B: matching formats:" +msgstr "%B: các dạng thức khá»›p vá»›i nhau :" + +#: ldlang.c:1783 +msgid "%F%B: file not recognized: %E\n" +msgstr "%F%B: không nhận ra tập tin: %E\n" + +#: ldlang.c:1847 +msgid "%F%B: member %B in archive is not an object\n" +msgstr "%F%B: bá»™ phạn kho %B không phải là đối tượng\n" + +#: ldlang.c:1858 ldlang.c:1872 +msgid "%F%B: could not read symbols: %E\n" +msgstr "%F%B: không thể Ä‘á»c các ký hiệu : %E\n" + +#: ldlang.c:2127 +msgid "" +"%P: warning: could not find any targets that match endianness requirement\n" +msgstr "" +"%P: cảnh báo : không tìm thấy đích nào khá»›p vá»›i kiểu endian đã cần thiết\n" + +#: ldlang.c:2141 +msgid "%P%F: target %s not found\n" +msgstr "%P%F: không tìm thấy đích %s\n" + +#: ldlang.c:2143 +msgid "%P%F: cannot open output file %s: %E\n" +msgstr "%P%F: không thể mở tập tin xuất %s: %E\n" + +#: ldlang.c:2149 +msgid "%P%F:%s: can not make object file: %E\n" +msgstr "%P%F:%s: không thể tạo tập tin đối tượng: %E\n" + +#: ldlang.c:2153 +msgid "%P%F:%s: can not set architecture: %E\n" +msgstr "%P%F:%s: không thể lập kiến trúc: %E\n" + +#: ldlang.c:2157 +msgid "%P%F: can not create link hash table: %E\n" +msgstr "%P%F: không thể tạo bảng băm liên kết: %E\n" + +#: ldlang.c:2301 +msgid "%P%F: bfd_hash_lookup failed creating symbol %s\n" +msgstr "" +"%P%F: việc « bfd_hash_lookup » (bfd băm tra cứu) bị lá»—i, tạo ký hiệu %s\n" + +#: ldlang.c:2319 +msgid "%P%F: bfd_hash_allocate failed creating symbol %s\n" +msgstr "" +"%P%F: « bfd_hash_allocate » (bfd băm cấp cho) thất bại, tạo ký hiệu %s\n" + +#: ldlang.c:2710 +msgid " load address 0x%V" +msgstr " tải địa chỉ 0x%V" + +#: ldlang.c:2874 +msgid "%W (size before relaxing)\n" +msgstr "%W (kích cỡ trÆ°á»›c khi lÆ¡i ra)\n" + +#: ldlang.c:2961 +#, c-format +msgid "Address of section %s set to " +msgstr "Äịa chỉ của phần %s được lập thành " + +#: ldlang.c:3114 +#, c-format +msgid "Fail with %d\n" +msgstr "Thất bại vá»›i %d\n" + +#: ldlang.c:3351 +msgid "%X%P: section %s [%V -> %V] overlaps section %s [%V -> %V]\n" +msgstr "%X%P: phần %s [%V → %V] đè lên phần %s [%V → %V]\n" + +#: ldlang.c:3379 +msgid "%X%P: address 0x%v of %B section %s is not within region %s\n" +msgstr "%X%P: địa chỉ 0x%v cá»­a %B phần %s không phải ở trong miá»n %s\n" + +#: ldlang.c:3388 +msgid "%X%P: region %s is full (%B section %s)\n" +msgstr "%X%P: miá»n %s đầy (%B phần %s)\n" + +#: ldlang.c:3439 +msgid "%P%X: Internal error on COFF shared library section %s\n" +msgstr "%P%X: Lá»—i ná»™i bá»™ trên phần thÆ° viên dùng chung COFF %s\n" + +#: ldlang.c:3493 +msgid "%P%F: error: no memory region specified for loadable section `%s'\n" +msgstr "%P%F: lá»—i: chÆ°a ghi rõ miá»n bá»™ nhá»› cho phần tải được « %s »\n" + +#: ldlang.c:3498 +msgid "%P: warning: no memory region specified for loadable section `%s'\n" +msgstr "%P: lá»—i: chÆ°a ghi rõ miá»n bá»™ nhá»› cho phần tải được « %s »\n" + +#: ldlang.c:3515 +msgid "%P: warning: changing start of section %s by %u bytes\n" +msgstr "%P: cảnh báo : Ä‘ang thay đổi đầu phần %s bằng %u byte\n" + +#: ldlang.c:3532 +#, c-format +msgid "" +"%F%S: non constant or forward reference address expression for section %s\n" +msgstr "%F%S: biểu thức địa chỉ tham chiếu thay đổi hay tiếp lên %s\n" + +#: ldlang.c:3703 +msgid "%P%F: can't relax section: %E\n" +msgstr "%P%F: không thể lÆ¡i ra phần: %E\n" + +#: ldlang.c:3960 +msgid "%F%P: invalid data statement\n" +msgstr "%F%P: câu dữ liệu không hợp lệ\n" + +#: ldlang.c:3999 +msgid "%F%P: invalid reloc statement\n" +msgstr "%F%P: câu định vị lại không hợp lệ\n" + +#: ldlang.c:4141 +msgid "%P%F:%s: can't set start address\n" +msgstr "%P%F:%s: không thể lập địa chỉ đầu\n" + +#: ldlang.c:4154 ldlang.c:4173 +msgid "%P%F: can't set start address\n" +msgstr "%P%F: không thể lập địa chỉ đầu\n" + +#: ldlang.c:4166 +msgid "%P: warning: cannot find entry symbol %s; defaulting to %V\n" +msgstr "" +"%P: cảnh báo : không tìm thấy được ký hiệu vào %s; nên dùng mặc định %V\n" + +#: ldlang.c:4178 +msgid "%P: warning: cannot find entry symbol %s; not setting start address\n" +msgstr "" +"%P: cảnh báo : không tìm thấy ký hiệu vào %s; nên không lập địa chỉ bắt đầu " +"symbol %s; not setting start address\n" + +#: ldlang.c:4227 +msgid "" +"%P%F: Relocatable linking with relocations from format %s (%B) to format %s " +"(%B) is not supported\n" +msgstr "" +"%P%F: Không há»— trợ liên kết định vị lại Ä‘Æ°á»c có định vị lại từ khuôn dạng %s " +"(%B) sang khuôn dạng %s (%B)\n" + +#: ldlang.c:4237 +msgid "" +"%P: warning: %s architecture of input file `%B' is incompatible with %s " +"output\n" +msgstr "" +"%P: cảnh báo : kiến trức %s của tập tin nhập « %B » không tÆ°Æ¡ng thích vá»›i dữ " +"liệu xuất %s\n" + +#: ldlang.c:4259 +msgid "%P%X: failed to merge target specific data of file %B\n" +msgstr "%P%X: lá»—i hợp nhất dữ liệu đặc trÆ°ng cho dữ liệu của tập tin %B\n" + +#: ldlang.c:4343 +msgid "" +"\n" +"Allocating common symbols\n" +msgstr "" +"\n" +"Äang cấp phát các ký hiệu dùng chung\n" + +#: ldlang.c:4344 +msgid "" +"Common symbol size file\n" +"\n" +msgstr "" +"Ký hiệu cùng dùng cỡ tập tin\n" +"\n" + +#: ldlang.c:4470 +msgid "%P%F: invalid syntax in flags\n" +msgstr "%P%F: cụ pháp không hợp lệ trong các cá»\n" + +#: ldlang.c:4740 +msgid "%P%F: Failed to create hash table\n" +msgstr "%P%F: Việc tạo bảng băm bị lá»—i\n" + +#: ldlang.c:5057 +msgid "%P%Fmultiple STARTUP files\n" +msgstr "%P%Fcó nhiá»u tập tin STARTUP (khởi Ä‘á»™ng)\n" + +#: ldlang.c:5105 +msgid "%X%P:%S: section has both a load address and a load region\n" +msgstr "%X%P:%S: phần có cả địa chỉ tải lẫn miá»n tải Ä‘á»u\n" + +#: ldlang.c:5345 +msgid "%F%P: bfd_record_phdr failed: %E\n" +msgstr "%F%P: việc « bfd_record_phdr » bị lá»—i: %E\n" + +#: ldlang.c:5365 +msgid "%X%P: section `%s' assigned to non-existent phdr `%s'\n" +msgstr "%X%P: phần « %s » được gán cho phdr không có « %s »\n" + +#: ldlang.c:5751 +msgid "%X%P: unknown language `%s' in version information\n" +msgstr "%X%P: không biết ngôn ngữ « %s » trong thông tin phiên bản\n" + +#: ldlang.c:5893 +msgid "" +"%X%P: anonymous version tag cannot be combined with other version tags\n" +msgstr "" +"%X%P: thẻ phiên bản vô danh không kết hợp được vá»›i thẻ phiên bản khác\n" + +#: ldlang.c:5902 +msgid "%X%P: duplicate version tag `%s'\n" +msgstr "%X%P: thẻ phiên bản trùng « %s »\n" + +#: ldlang.c:5922 ldlang.c:5931 ldlang.c:5948 ldlang.c:5958 +msgid "%X%P: duplicate expression `%s' in version information\n" +msgstr "%X%P: biểu thức trùng « %s » trong thông tin phiên bản\n" + +#: ldlang.c:5998 +msgid "%X%P: unable to find version dependency `%s'\n" +msgstr "%X%P: không tìm thấy được cách phục thuá»™c vào phiên bản « %s »\n" + +#: ldlang.c:6020 +msgid "%X%P: unable to read .exports section contents\n" +msgstr "%X%P: không thể Ä‘á»c ná»™i dung của phần « .exports » (xuất)\n" + +#: ldmain.c:229 +msgid "%X%P: can't set BFD default target to `%s': %E\n" +msgstr "%X%P: không thể lập đích mặc định BFD thành « %s »: %E\n" + +#: ldmain.c:341 +msgid "%P%F: --relax and -r may not be used together\n" +msgstr "" +"%P%F: không cho phép sá»­ dụng hai tùy chá»n « --relax » (lÆ¡i ra) và « -r » vá»›i " +"nhau \n" + +#: ldmain.c:343 +msgid "%P%F: -r and -shared may not be used together\n" +msgstr "%P%F: không thể sá»­ dụng cả « -r » lẫn « -shared » (dùng chung) Ä‘á»u\n" + +#: ldmain.c:347 +msgid "%P%F: -static and -shared may not be used together\n" +msgstr "" +"%P%F: không thể sá»­ dụng cả « -static » (tÄ©nh) lẫn « -shared » (dùng chung) " +"Ä‘á»u\n" + +#: ldmain.c:352 +msgid "%P%F: -F may not be used without -shared\n" +msgstr "" +"%P%F: không thể sá»­ dụng tùy chá»n « -F » khi không có tùy chá»n « -shared " +"» (dùng chung)\n" + +#: ldmain.c:354 +msgid "%P%F: -f may not be used without -shared\n" +msgstr "" +"%P%F: không thể sá»­ dụng tùy chá»n « -f » khi không có tùy chá»n « -shared " +"» (dùng chung)\n" + +#: ldmain.c:396 +msgid "using external linker script:" +msgstr "Ä‘ang dùng tập lệnh liên kết bên ngoài:" + +#: ldmain.c:398 +msgid "using internal linker script:" +msgstr "Ä‘ang dùng tập lệnh liên kết bên trong:" + +#: ldmain.c:432 +msgid "%P%F: no input files\n" +msgstr "%P%F: không có tập tin nhập nào\n" + +#: ldmain.c:436 +msgid "%P: mode %s\n" +msgstr "%P: chế Ä‘á»™ %s\n" + +#: ldmain.c:452 +msgid "%P%F: cannot open map file %s: %E\n" +msgstr "%P%F: không thể mở tập tin bản đồ %s: %E\n" + +#: ldmain.c:482 +msgid "%P: link errors found, deleting executable `%s'\n" +msgstr "%P: tìm thấy má»™t số lá»—i liên kết nên xoá bá» tập tin chạy được « %s »\n" + +#: ldmain.c:491 +msgid "%F%B: final close failed: %E\n" +msgstr "%F%B: việc đóng cuối cùng bị lá»—i: %E\n" + +#: ldmain.c:517 +msgid "%X%P: unable to open for source of copy `%s'\n" +msgstr "%X%P: không thể mở cho nguồn của bản sao « %s »\n" + +#: ldmain.c:520 +msgid "%X%P: unable to open for destination of copy `%s'\n" +msgstr "%X%P: không thể mở cho đích của bản sao « %s »\n" + +#: ldmain.c:527 +msgid "%P: Error writing file `%s'\n" +msgstr "%P: Gặp lá»—i khi ghi tập tin « %s »\n" + +#: ldmain.c:532 pe-dll.c:1447 +#, c-format +msgid "%P: Error closing file `%s'\n" +msgstr "%P: Gặp lá»—i khi đóng tập tin « %s »\n" + +#: ldmain.c:548 +#, c-format +msgid "%s: total time in link: %ld.%06ld\n" +msgstr "%s: thá»i gian tổng trong liên kết: %ld.%06ld\n" + +#: ldmain.c:551 +#, c-format +msgid "%s: data size %ld\n" +msgstr "%s: kích cỡ dữ liệu %ld\n" + +#: ldmain.c:634 +msgid "%P%F: missing argument to -m\n" +msgstr "%P%F: thiếu đối số tá»›i « -m »\n" + +#: ldmain.c:780 ldmain.c:798 ldmain.c:828 +msgid "%P%F: bfd_hash_table_init failed: %E\n" +msgstr "" +"%P%F: việc « bfd_hash_table_init » (bfd băm bảng khởi Ä‘á»™ng) bị lá»—i: %E\n" + +#: ldmain.c:784 ldmain.c:802 +msgid "%P%F: bfd_hash_lookup failed: %E\n" +msgstr "%P%F: việc « bfd_hash_lookup » (tra tìm băm BFD) bị lá»—i: %E\n" + +#: ldmain.c:816 +msgid "%X%P: error: duplicate retain-symbols-file\n" +msgstr "%X%P: lá»—i: « retain-symbols-file » (giữ lại tập tin ký hiệu) trùng\n" + +#: ldmain.c:858 +msgid "%P%F: bfd_hash_lookup for insertion failed: %E\n" +msgstr "" +"%P%F: việc « bfd_hash_lookup » (bfd băm tra cứu) cho sá»± chèn bị lá»—i: %E\n" + +#: ldmain.c:863 +msgid "%P: `-retain-symbols-file' overrides `-s' and `-S'\n" +msgstr "" +"%P: tùy chá»n « -retain-symbols-file » (giữ lại tập tin ký hiệu) đè lên « -s " +"» và « -S »\n" + +#: ldmain.c:938 +#, c-format +msgid "" +"Archive member included because of file (symbol)\n" +"\n" +msgstr "" +"Gồm bá»™ phạn kho vì tập tin (ký hiệu)\n" +"\n" + +#: ldmain.c:1008 +msgid "%X%C: multiple definition of `%T'\n" +msgstr "%X%C: « %T » đã được định nghÄ©a nhiá»u lần\n" + +#: ldmain.c:1011 +msgid "%D: first defined here\n" +msgstr "%D: đã được định nghÄ©a đầu tiên ở đây\n" + +#: ldmain.c:1015 +msgid "%P: Disabling relaxation: it will not work with multiple definitions\n" +msgstr "" +"%P: Tắt khả năng lÆ¡i ra: nó sẽ không hoạt Ä‘á»™ng vá»›i nhiá»u lá»i định nghÄ©a\n" + +#: ldmain.c:1045 +msgid "%B: warning: definition of `%T' overriding common\n" +msgstr "%B: cảnh báo : lá»i định nghÄ©a « %T » đè lên Ä‘iá»u dùng chung\n" + +#: ldmain.c:1048 +msgid "%B: warning: common is here\n" +msgstr "%B: cảnh báo : common (cùng dùng) là đây\n" + +#: ldmain.c:1055 +msgid "%B: warning: common of `%T' overridden by definition\n" +msgstr "%B: cảnh báo : lá»i định nghÄ©a đè lên Ä‘iá»u cùng dùng của « %T »\n" + +#: ldmain.c:1058 +msgid "%B: warning: defined here\n" +msgstr "%B: cảnh báo : đã được định nghÄ©a ở đây\n" + +#: ldmain.c:1065 +msgid "%B: warning: common of `%T' overridden by larger common\n" +msgstr "" +"%B: cảnh báo : Ä‘iá»u cùng dùng lá»›n hÆ¡n có đè lên Ä‘iá»u cùng dùng « %T »\n" + +#: ldmain.c:1068 +msgid "%B: warning: larger common is here\n" +msgstr "%B: cảnh báo : Ä‘iá»u dùng chung lá»›n hÆ¡n tại đây\n" + +#: ldmain.c:1072 +msgid "%B: warning: common of `%T' overriding smaller common\n" +msgstr "" +"%B: cảnh báo : Ä‘iá»u « %T » dùng chung có đè lên Ä‘iá»u dùng chung nhá» hÆ¡n\n" + +#: ldmain.c:1075 +msgid "%B: warning: smaller common is here\n" +msgstr "%B: cảnh báo : Ä‘iá»u cùng dùng nhá» hÆ¡n ở đây\n" + +#: ldmain.c:1079 +msgid "%B: warning: multiple common of `%T'\n" +msgstr "%B: cảnh báo : nhiá»u Ä‘iá»u cùng dùng của « %T »\n" + +#: ldmain.c:1081 +msgid "%B: warning: previous common is here\n" +msgstr "%B: cảnh báo : Ä‘iá»u cùng dùng trÆ°á»›c ở đây\n" + +#: ldmain.c:1101 ldmain.c:1139 +msgid "%P: warning: global constructor %s used\n" +msgstr "%P: cảnh báo : bá»™ cấu trúc toàn cục %s được dùng\n" + +#: ldmain.c:1149 +msgid "%P%F: BFD backend error: BFD_RELOC_CTOR unsupported\n" +msgstr "%P%F: lá»—i hậu phÆ°Æ¡ng: « BFD_RELOC_CTOR » không được há»— trợ\n" + +#: src/xgettext.c:2070 src/complain.c:51 src/complain.c:66 +#, c-format +msgid "warning: " +msgstr "cảnh báo : " + +#: ldmain.c:1327 +msgid "%F%P: bfd_hash_table_init failed: %E\n" +msgstr "" +"%F%P: việc « bfd_hash_table_init » (bfd băm bảng khởi Ä‘á»™ng) bị lá»—i: %E\n" + +#: ldmain.c:1334 +msgid "%F%P: bfd_hash_lookup failed: %E\n" +msgstr "%F%P: việc « bfd_hash_lookup » (tra tìm băm BFD) bị lá»—i: %E\n" + +#: ldmain.c:1355 +msgid "%X%C: undefined reference to `%T'\n" +msgstr "%X%C: tham chiếu chÆ°a định nghÄ©a đến « %T »\n" + +#: ldmain.c:1358 +msgid "%C: warning: undefined reference to `%T'\n" +msgstr "%C: cảnh báo : tham chiếu chÆ°a định nghÄ©a đến « %T »\n" + +#: ldmain.c:1364 +msgid "%X%D: more undefined references to `%T' follow\n" +msgstr "%X%D: có tham chiếu chÆ°a định nghÄ©a đến « %T » thêm nữa theo sau\n" + +#: ldmain.c:1367 +msgid "%D: warning: more undefined references to `%T' follow\n" +msgstr "" +"%D: chÆ°a định nghÄ©a lá»i tham chiếu đến « %T » tại nhiá»u nÆ¡i nữa theo đây\n" + +#: ldmain.c:1378 +msgid "%X%B: undefined reference to `%T'\n" +msgstr "%X%B: tham chiếu chÆ°a định nghÄ©a đến « %T »\n" + +#: ldmain.c:1381 +msgid "%B: warning: undefined reference to `%T'\n" +msgstr "%B: cảnh báo : chÆ°a định nghÄ©a lá»i tham chiếu đến « %T »\n" + +#: ldmain.c:1387 +msgid "%X%B: more undefined references to `%T' follow\n" +msgstr "%X%B: có tham chiếu chÆ°a định nghÄ©a đến « %T » thêm nữa theo sau\n" + +#: ldmain.c:1390 +msgid "%B: warning: more undefined references to `%T' follow\n" +msgstr "" +"%B: cảnh báo : chÆ°a định nghÄ©a lá»i tham chiếu đến « %T » tại nhiá»u nÆ¡i nữa " +"theo đây\n" + +#: ldmain.c:1425 ldmain.c:1478 ldmain.c:1496 +msgid "%P%X: generated" +msgstr "%P%X: đã tạo ra" + +#: ldmain.c:1432 +msgid " additional relocation overflows omitted from the output\n" +msgstr "tràn định vị lại thêm bị bá» Ä‘i khá»i dữ liệu xuất\n" + +#: ldmain.c:1445 +msgid " relocation truncated to fit: %s against undefined symbol `%T'" +msgstr "" +" sá»± định vị lại bị cắt xém để vừa: %s đối vá»›i ký hiệu chÆ°a định nghÄ©a « %T »" + +#: ldmain.c:1450 +msgid "" +" relocation truncated to fit: %s against symbol `%T' defined in %A section " +"in %B" +msgstr "" +" sá»± định vị lại bị cắt xém để vừa: %s đối vá»›i ký hiệu « %T » đã định nghÄ©a " +"trong phần %A trong %B" + +#: ldmain.c:1460 +msgid " relocation truncated to fit: %s against `%T'" +msgstr "sá»± định vị lại bị cắt xém để vừa: %s đối vá»›i « %T »" + +#: ldmain.c:1481 +#, c-format +msgid "dangerous relocation: %s\n" +msgstr "sá»± định vị lại nguy hiểm: %s\n" + +#: ldmain.c:1499 +msgid " reloc refers to symbol `%T' which is not being output\n" +msgstr "" +" sá»± định vị lại tham chiếu đến ký hiệu « %T » mà không còn được xuất lại\n" + +#: ldmisc.c:149 +#, c-format +msgid "no symbol" +msgstr "không có ký hiệu" + +#: ldmisc.c:240 +#, c-format +msgid "built in linker script:%u" +msgstr "tập lệnh liên kết có sẵn:%u" + +#: ldmisc.c:289 ldmisc.c:293 +msgid "%B%F: could not read symbols\n" +msgstr "%B%F: không thể Ä‘á»c các ký hiệu\n" + +#: ldmisc.c:329 +msgid "%B: In function `%T':\n" +msgstr "%B: trong hàm « %T »:\n" + +#: ldmisc.c:480 +msgid "%F%P: internal error %s %d\n" +msgstr "%F%P: lá»—i ná»™i bá»™ %s %d\n" + +#: ldmisc.c:526 +msgid "%P: internal error: aborting at %s line %d in %s\n" +msgstr "%P: lá»—i ná»™i bá»™ : Ä‘ang hủy bá» tại dòng %d trong %s\n" + +#: ldmisc.c:529 +msgid "%P: internal error: aborting at %s line %d\n" +msgstr "%P: lá»—i ná»™i bá»™ : Ä‘ang hủy bá» tại dòng %s trong %s\n" + +#: ldmisc.c:531 +msgid "%P%F: please report this bug\n" +msgstr "%P%F: vui lòng thông báo lá»—i này\n" + +#. Output for noisy == 2 is intended to follow the GNU standards. +#: ldver.c:38 +#, c-format +msgid "GNU ld version %s\n" +msgstr "Trình ld phiên bản %s của GNU\n" + +#: ldver.c:52 +#, c-format +msgid " Supported emulations:\n" +msgstr " Mô phá»ng đã há»— trợ :\n" + +#: ldwrite.c:55 ldwrite.c:191 +msgid "%P%F: bfd_new_link_order failed\n" +msgstr "%P%F: việc « bfd_new_link_order » (bfd má»›i liên kết thứ tá»±) bị lá»—i\n" + +#: ldwrite.c:341 +msgid "%F%P: cannot create split section name for %s\n" +msgstr "%F%P: không thể tạo tên phần đã chia tách cho %s\n" + +#: ldwrite.c:353 +msgid "%F%P: clone section failed: %E\n" +msgstr "%F%P: việc bắt chÆ°á»›c phần bị lá»—i: %E\n" + +#: ldwrite.c:391 +#, c-format +msgid "%8x something else\n" +msgstr "%8x cái gì khác\n" + +#: ldwrite.c:561 +msgid "%F%P: final link failed: %E\n" +msgstr "%F%P: liên kết cuối cùng bị lá»—i: %E\n" + +#: lexsup.c:195 lexsup.c:327 +msgid "KEYWORD" +msgstr "TỪ_KHÓA" + +#: lexsup.c:195 +msgid "Shared library control for HP/UX compatibility" +msgstr "Äiá»u khiển thÆ° viên dùng chung để tÆ°Æ¡ng thích vá»›i HP/UX" + +#: lexsup.c:198 +msgid "ARCH" +msgstr "ARCH" + +#: lexsup.c:198 +msgid "Set architecture" +msgstr "Lập kiến trúc" + +#: lexsup.c:200 lexsup.c:421 +msgid "TARGET" +msgstr "ÄÃCH" + +#: lexsup.c:200 +msgid "Specify target for following input files" +msgstr "Ghi rõ đích cho những tập tin nhập theo đây" + +#: lexsup.c:203 +msgid "Read MRI format linker script" +msgstr "Äá»c tập lệnh liên kết khuôn dạng MRI" + +#: lexsup.c:205 +msgid "Force common symbols to be defined" +msgstr "Ép buá»™c định nghÄ©a má»i ký hiệu dùng chung" + +#: lexsup.c:209 lexsup.c:475 lexsup.c:477 lexsup.c:479 +#: ../data/contact-lookup-applet.glade.h:5 +msgid "ADDRESS" +msgstr "ÄỊA CHỈ" + +#: lexsup.c:209 +msgid "Set start address" +msgstr "Lập địa chỉ bắt đầu" + +#: lexsup.c:211 +msgid "Export all dynamic symbols" +msgstr "Xuất má»i ký hiệu Ä‘á»™ng" + +#: lexsup.c:213 +msgid "Link big-endian objects" +msgstr "Liên kết má»i đối tượng big-endian (cuối lá»›n)" + +#: lexsup.c:215 +msgid "Link little-endian objects" +msgstr "Liên kết má»i đối tượng little-endian (cuối nhá»)" + +#: lexsup.c:217 lexsup.c:220 +msgid "SHLIB" +msgstr "SHLIB" + +#: lexsup.c:217 +msgid "Auxiliary filter for shared object symbol table" +msgstr "Bá»™ lá»c phụ cho bảng ký hiệu đối tượng dùng chung" + +#: lexsup.c:220 +msgid "Filter for shared object symbol table" +msgstr "Bá»™ lá»c cho bảng ký hiệu đối tượng dùng chung" + +#: lexsup.c:223 ../pan/filter-edit-ui.c:859 +msgid "Ignored" +msgstr "Bị bá» qua" + +#: lexsup.c:225 ../gnotravex/gnotravex.c:245 +#: ../msearch/medusa-command-line-search.c:159 +msgid "SIZE" +msgstr "Cá» " + +#: lexsup.c:225 +msgid "Small data size (if no size, same as --shared)" +msgstr "Kích cỡ dữ liệu nhá» (nếu không có, nó bằng tùy chá»n « --shared »)" + +#: lexsup.c:228 ../gnome-stones/main.c:76 ../src/option.c:326 +#: ../src/option.c:600 +msgid "FILENAME" +msgstr "TÊN TẬP TIN" + +#: lexsup.c:228 +msgid "Set internal name of shared library" +msgstr "Lập tên ná»™i bá»™ của thÆ° viên dùng chung" + +#: lexsup.c:230 +msgid "PROGRAM" +msgstr "CHƯƠNG TRÃŒNH" + +#: lexsup.c:230 +msgid "Set PROGRAM as the dynamic linker to use" +msgstr "Lập CHƯƠNG TRÃŒNH là bá»™ liên kết Ä‘á»™ng cần dùng" + +#: lexsup.c:233 +msgid "LIBNAME" +msgstr "TÊN THƯ VIÊN" + +#: lexsup.c:233 +msgid "Search for library LIBNAME" +msgstr "Tìm kiếm thÆ° viên TÊN THƯ VIÊN" + +#: lexsup.c:235 src/fe-gtk/fe-gtk.c:172 ../utils/gpilotd-client.c:46 +#: ../activation-server/activation-server-main.c:84 +msgid "DIRECTORY" +msgstr "THƯ MỤC" + +#: lexsup.c:235 +msgid "Add DIRECTORY to library search path" +msgstr "Thêm THƯ MỤC vào Ä‘Æ°á»ng dẫn tìm kiếm thÆ° viên" + +#: lexsup.c:238 +msgid "Override the default sysroot location" +msgstr "Äè lên địa Ä‘iểm sysroot (gốc hệ thống) mặc định" + +#: lexsup.c:240 +msgid "EMULATION" +msgstr "MÔ PHỎNG" + +#: lexsup.c:240 +msgid "Set emulation" +msgstr "Lập cách mô phá»ng" + +#: lexsup.c:242 +msgid "Print map file on standard output" +msgstr "In tập tin bản đồ ra thiết bị xuất chuẩn" + +#: lexsup.c:244 +msgid "Do not page align data" +msgstr "Äừng canh lá» trang dữ liệu" + +#: lexsup.c:246 +msgid "Do not page align data, do not make text readonly" +msgstr "Äừng canh lá» trang dữ liệu, đừng lập văn bản là chỉ Ä‘á»c" + +#: lexsup.c:249 +msgid "Page align data, make text readonly" +msgstr "Canh lá» trang dữ liệu, lập văn bản là chỉ Ä‘á»c" + +#: lexsup.c:252 +msgid "Set output file name" +msgstr "Lập tên tập tin xuất" + +#: lexsup.c:254 +msgid "Optimize output file" +msgstr "Ưu tiên hóa tập tin xuất" + +#: lexsup.c:256 +msgid "Ignored for SVR4 compatibility" +msgstr "Bị bá» qua để tÆ°Æ¡ng thích vá»›i SVR4" + +#: lexsup.c:260 +msgid "Generate relocatable output" +msgstr "Tạo ra dữ liệu có thể định vị lại" + +#: lexsup.c:264 +msgid "Just link symbols (if directory, same as --rpath)" +msgstr "Chỉ liên kết ký hiệu (nếu thÆ° mục, bằng tùy chá»n « --rpath »)" + +#: lexsup.c:267 +msgid "Strip all symbols" +msgstr "TÆ°á»›c má»i ký hiệu" + +#: lexsup.c:269 +msgid "Strip debugging symbols" +msgstr "TÆ°á»›c ký hiệu gỡ lối" + +#: lexsup.c:271 +msgid "Strip symbols in discarded sections" +msgstr "TÆ°á»›c ký hiệu trong phần bị hủy" + +#: lexsup.c:273 +msgid "Do not strip symbols in discarded sections" +msgstr "Äừng tÆ°á»›c ký hiệu trong phần bị hủy" + +#: lexsup.c:275 +msgid "Trace file opens" +msgstr "Tập tin vết có mở" + +#: lexsup.c:277 +msgid "Read linker script" +msgstr "Äá»c tập lệnh liên kết" + +#: lexsup.c:279 lexsup.c:297 lexsup.c:363 lexsup.c:378 lexsup.c:468 +#: lexsup.c:493 lexsup.c:520 +msgid "SYMBOL" +msgstr "Kà HIỆU" + +#: lexsup.c:279 +msgid "Start with undefined reference to SYMBOL" +msgstr "Bắt đầu vá»›i tham chiệu gạch chân đến Kà HIỆU" + +#: lexsup.c:282 +msgid "[=SECTION]" +msgstr "[=PHẦN]" + +#: lexsup.c:283 +msgid "Don't merge input [SECTION | orphan] sections" +msgstr "Äừng kết hợp phần nhập [PHẦN | mồ côi]" + +#: lexsup.c:285 +msgid "Build global constructor/destructor tables" +msgstr "Xây dụng bảng cấu tạo/phá toàn cục" + +#: lexsup.c:287 schroot/schroot.c:73 schroot/schroot-options.cc:64 +#: schroot/schroot-releaselock-options.cc:48 +msgid "Print version information" +msgstr "In ra thông tin phiên bản" + +#: lexsup.c:289 +msgid "Print version and emulation information" +msgstr "In ra thông tin phiên bản và mô phá»ng" + +#: lexsup.c:291 +msgid "Discard all local symbols" +msgstr "Hủy má»i ký hiệu cục bá»™" + +#: lexsup.c:293 +msgid "Discard temporary local symbols (default)" +msgstr "Hủy má»i ký hiệu cục bá»™ tạm thá»i (mặc định)" + +#: lexsup.c:295 +msgid "Don't discard any local symbols" +msgstr "Äừng hủy ký hiệu cục bá»™ nào" + +#: lexsup.c:297 +msgid "Trace mentions of SYMBOL" +msgstr "Vết nÆ¡i ghi Kà HIỆU" + +#: lexsup.c:299 +msgid "Default search path for Solaris compatibility" +msgstr "ÄÆ°á»ng dẫn tìm kiếm để tÆ°Æ¡ng thích vá»›i Solaris" + +#: lexsup.c:302 +msgid "Start a group" +msgstr "Bắt đầu nhóm" + +#: lexsup.c:304 +msgid "End a group" +msgstr "Kết thúc nhóm" + +#: lexsup.c:308 +msgid "Accept input files whose architecture cannot be determined" +msgstr "Chấp nhận tập tin nhập có kiến trức không thể được tháo gỡ" + +#: lexsup.c:312 +msgid "Reject input files whose architecture is unknown" +msgstr "Từ chối tập tin nhập có kiến trức lạ" + +#: lexsup.c:315 +msgid "" +"Set DT_NEEDED tags for DT_NEEDED entries in\n" +"\t\t\t\tfollowing dynamic libs" +msgstr "" +"Lập thẻ « DT_NEEDED » (cần thiết DT)\n" +"\tcho mục nhập « DT_NEEDED »\n" +"\ttrong những thÆ° viên Ä‘á»™ng theo đây" + +#: lexsup.c:318 +msgid "" +"Do not set DT_NEEDED tags for DT_NEEDED entries\n" +"\t\t\t\tin following dynamic libs" +msgstr "" +"Äừng lập thẻ « DT_NEEDED » (cần thiết DT)\n" +"\tcho mục nhập « DT_NEEDED »\n" +"\ttrong những thÆ° viên Ä‘á»™ng theo đây" + +#: lexsup.c:321 +msgid "Only set DT_NEEDED for following dynamic libs if used" +msgstr "" +"Chỉ lập thẻ « DT_NEEDED » (cần thiết DT)\n" +"\tcho những thÆ° viên Ä‘á»™ng theo đây nếu được dùng" + +#: lexsup.c:324 +msgid "Always set DT_NEEDED for following dynamic libs" +msgstr "" +"Luôn lập thẻ « DT_NEEDED » (cần thiết DT)\n" +"\tcho những thÆ° viên Ä‘á»™ng theo đây" + +#: lexsup.c:327 +msgid "Ignored for SunOS compatibility" +msgstr "Bị bá» qua để tÆ°Æ¡ng thích vá»›i SunOS" + +#: lexsup.c:329 +msgid "Link against shared libraries" +msgstr "Liên kết đối vá»›i thÆ° viên dùng chung" + +#: lexsup.c:335 +msgid "Do not link against shared libraries" +msgstr "Äừng liên kết đối vá»›i thÆ° viên dùng chung" + +#: lexsup.c:343 +msgid "Bind global references locally" +msgstr "Äóng kết tham chiếu toàn cục má»™t cách địa phÆ°Æ¡ng" + +#: lexsup.c:345 +msgid "Check section addresses for overlaps (default)" +msgstr "Kiểm tra địa chỉ phần có chồng chéo (mặc định)" + +msgid "Do not check section addresses for overlaps" +msgstr "Äừng kiểm tra địa chỉ phần có chồng chéo" + +#: lexsup.c:351 +msgid "Output cross reference table" +msgstr "Xuất bảng tham chiếu chéo" + +#: lexsup.c:353 +msgid "SYMBOL=EXPRESSION" +msgstr "Kà HIỆU=BIỂU THỨC" + +#: lexsup.c:353 +msgid "Define a symbol" +msgstr "Äịnh nghÄ©a ký hiệu" + +#: lexsup.c:355 +msgid "[=STYLE]" +msgstr "[=KIỂU DÃNG]" + +#: lexsup.c:355 +msgid "Demangle symbol names [using STYLE]" +msgstr "Tháo gỡ tên ký hiệu [bằng KIỂU DÃNG]" + +#: lexsup.c:358 +msgid "Generate embedded relocs" +msgstr "Tạo ra sá»± định vị lại nhúng" + +#: lexsup.c:360 +msgid "Treat warnings as errors" +msgstr "Xá»­ lý cảnh báo là lá»—i" + +#: lexsup.c:363 +msgid "Call SYMBOL at unload-time" +msgstr "Gá»i Kà HIỆU vào lúc bá» tải" + +#: lexsup.c:365 +msgid "Force generation of file with .exe suffix" +msgstr "Ép buá»™c tạo ra tập tin có hậu tố « .exe »" + +#: lexsup.c:367 +msgid "Remove unused sections (on some targets)" +msgstr "Gỡ bá» phần không dùng (trên má»™t số đích)" + +#: lexsup.c:370 +msgid "Don't remove unused sections (default)" +msgstr "Äừng gỡ bá» phần không dùng (mặc định)" + +#: lexsup.c:373 +msgid "Set default hash table size close to " +msgstr "Lập kích cỡ bảng băm mặc định là gần " + +#: lexsup.c:376 +msgid "Print option help" +msgstr "In ra trợ giúp vá» tùy chá»n" + +#: lexsup.c:378 +msgid "Call SYMBOL at load-time" +msgstr "Gá»i Kà HIỆU vào lúc tải" + +#: lexsup.c:380 +msgid "Write a map file" +msgstr "Ghi tập tin bản đồ" + +#: lexsup.c:382 +msgid "Do not define Common storage" +msgstr "Äừng định nghÄ©a kho dùng chung" + +#: lexsup.c:384 +msgid "Do not demangle symbol names" +msgstr "Äừng tháo gỡ tên ký hiệu" + +#: lexsup.c:386 +msgid "Use less memory and more disk I/O" +msgstr "Chiếm ít bá»™ nhá»› hÆ¡n, và nhiá»u nhập/xuất Ä‘Ä©a hÆ¡n" + +#: lexsup.c:388 +msgid "Do not allow unresolved references in object files" +msgstr "Äừng cho phép tham chiệu chÆ°a tháo gỡ trong tập tin đối tượng" + +#: lexsup.c:391 +msgid "Allow unresolved references in shared libaries" +msgstr "Cho phép tham chiệu chÆ°a tháo gỡ trong thÆ° viên dùng chung" + +#: lexsup.c:395 +msgid "Do not allow unresolved references in shared libs" +msgstr "Äừng cho phép tham chiệu chÆ°a tháo gỡ trong thÆ° viên dùng chung" + +#: lexsup.c:399 +msgid "Allow multiple definitions" +msgstr "Cho phép nhiá»u lá»i định nghÄ©a" + +#: lexsup.c:401 +msgid "Disallow undefined version" +msgstr "Bá» cho phép phiên bản chÆ°a định nghÄ©a" + +#: lexsup.c:403 +msgid "Create default symbol version" +msgstr "Tạo phiên bản ký hiệu mặc định" + +#: lexsup.c:406 +msgid "Create default symbol version for imported symbols" +msgstr "Tạo phiên bản ký hiệu mặc định cho ký hiệu đã nhập" + +#: lexsup.c:409 +msgid "Don't warn about mismatched input files" +msgstr "Äừng cảnh báo vá» tập tin nhập không khá»›p vá»›i nhau" + +#: lexsup.c:411 +msgid "Turn off --whole-archive" +msgstr "Tắt tùy chá»n « --whole-archive » (toàn kho)" + +#: lexsup.c:413 +msgid "Create an output file even if errors occur" +msgstr "Tạo tập tin xuất dù gặp lá»—i" + +#: lexsup.c:418 +msgid "" +"Only use library directories specified on\n" +"\t\t\t\tthe command line" +msgstr "" +"Chỉ dùng thÆ° mục thÆ° viên\n" +"\tđược ghi rõ trên dòng lệnh" + +#: lexsup.c:421 +msgid "Specify target of output file" +msgstr "Ghi rõ đích của tập tin xuất" + +#: lexsup.c:424 +msgid "Ignored for Linux compatibility" +msgstr "Bị bá» qua để tÆ°Æ¡ng thích vá»›i Linux" + +#: lexsup.c:427 +msgid "Reduce memory overheads, possibly taking much longer" +msgstr "Giảm bá»™ nhá»› duy tu, có thể mất rất nhiá»u thá»i gian hÆ¡n" + +#: lexsup.c:430 +msgid "Relax branches on certain targets" +msgstr "LÆ¡i ra nhánh trên má»™t số đích nào đó" + +#: lexsup.c:433 +msgid "Keep only symbols listed in FILE" +msgstr "Giữ chỉ những ký hiệu được liệt kê trong TẬP TIN" + +#: lexsup.c:435 +msgid "Set runtime shared library search path" +msgstr "Lập Ä‘Æ°á»ng dẫn tìm kiếm thÆ° viên dùng chung vào lúc chạy" + +#: lexsup.c:437 +msgid "Set link time shared library search path" +msgstr "Lập Ä‘Æ°á»ng dẫn tìm kiếm thÆ° viên dùng chung vào lúc liên kết" + +#: lexsup.c:440 +msgid "Create a shared library" +msgstr "Tạo thÆ° viên dùng chung" + +#: lexsup.c:444 +msgid "Create a position independent executable" +msgstr "Tạo ứng dụng chạy được không phụ thuá»™c vào vị trí" + +#: lexsup.c:448 +msgid "Sort common symbols by size" +msgstr "Sắp xếp ký hiệu dùng chung theo kích cỡ" + +#: lexsup.c:452 +msgid "name|alignment" +msgstr "tên|canh_hàng" + +#: lexsup.c:453 +msgid "Sort sections by name or maximum alignment" +msgstr "Sắp xếp phần theo tên hay canh lá» tối Ä‘a" + +#: lexsup.c:455 +msgid "COUNT" +msgstr "Sá»_ÄẾM" + +#: lexsup.c:455 +msgid "How many tags to reserve in .dynamic section" +msgstr "Số thẻ cần giữ lại trong phần « .dynamic » (Ä‘á»™ng)" + +#: lexsup.c:458 +msgid "[=SIZE]" +msgstr "[=Cá» ]" + +#: lexsup.c:458 +msgid "Split output sections every SIZE octets" +msgstr "Chia tách phần xuất tại má»—i Cá»  bá»™ tám" + +#: lexsup.c:461 +msgid "[=COUNT]" +msgstr "[=Sá»_ÄẾM]" + +#: lexsup.c:461 +msgid "Split output sections every COUNT relocs" +msgstr "Chia tách phần xuất tại má»—i Sá»_ÄẾM việc định vị lại" + +#: lexsup.c:464 +msgid "Print memory usage statistics" +msgstr "In ra thống kê cách sá»­ dụng bá»™ nhá»›" + +#: lexsup.c:466 +msgid "Display target specific options" +msgstr "Hiển thị tùy chá»n đặc trÆ°ng cho đích" + +#: lexsup.c:468 +msgid "Do task level linking" +msgstr "Liên kết trong lá»›p tác vụ" + +#: lexsup.c:470 +msgid "Use same format as native linker" +msgstr "Dùng cùng khuôn dạng vá»›i bá»™ liên kết sở hữu" + +#: lexsup.c:472 +msgid "SECTION=ADDRESS" +msgstr "PHẦN=ÄỊA CHỈ" + +#: lexsup.c:472 +msgid "Set address of named section" +msgstr "Lập địa chỉ của phần có tên" + +#: lexsup.c:475 +msgid "Set address of .bss section" +msgstr "Lập địa chỉ của phần « .bss »" + +#: lexsup.c:477 +msgid "Set address of .data section" +msgstr "Lập địa chỉ của phần « .data » (dữ liệu)" + +#: lexsup.c:479 +msgid "Set address of .text section" +msgstr "Lập địa chỉ của phần « .text » (văn bản)" + +#: lexsup.c:482 +msgid "" +"How to handle unresolved symbols. is:\n" +"\t\t\t\tignore-all, report-all, ignore-in-object-files,\n" +"\t\t\t\tignore-in-shared-libs" +msgstr "" +"Cách quản lý ký hiệu chÆ°a tháo gỡ.\n" +" \t là:\n" +" • ignore-all\t\t\t\tbá» qua hết\n" +" • report-all\t\t\t\tthông báo hết\n" +" • ignore-in-object-files\tbá» qua trong tập tin đối tượng\n" +" • ignore-in-shared-libs\tbá» qua trong thÆ° viên dùng chung" + +#: lexsup.c:486 +msgid "Output lots of information during link" +msgstr "Xuất nhiá»u thông tin trong khi liên kết" + +#: lexsup.c:490 +msgid "Read version information script" +msgstr "Äá»c tập lệnh thông tin phiên bản" + +#: lexsup.c:493 +msgid "" +"Take export symbols list from .exports, using\n" +"\t\t\t\tSYMBOL as the version." +msgstr "" +"Lấy danh sách ký hiệu xuất từ « .exports » (xuất),\n" +"\t\tvá»›i phiên bản là Kà HIỆU" + +#: lexsup.c:496 +msgid "Warn about duplicate common symbols" +msgstr "Cảnh báo vá» ký hiệu dùng chung trùng" + +#: lexsup.c:498 +msgid "Warn if global constructors/destructors are seen" +msgstr "Cảnh báo nếu gặp bá»™ cấu tạo/phá toàn cục" + +#: lexsup.c:501 +msgid "Warn if the multiple GP values are used" +msgstr "Cảnh báo nếu sá»­ dụng nhiá»u giá trị GP" + +#: lexsup.c:503 +msgid "Warn only once per undefined symbol" +msgstr "Cảnh báo chỉ má»™t lần vá» má»—i ký hiệu chÆ°a định nghÄ©a" + +#: lexsup.c:505 +msgid "Warn if start of section changes due to alignment" +msgstr "Cảnh báo nếu đầu phần thay đổi vì canh lá»" + +#: lexsup.c:508 +msgid "Warn if shared object has DT_TEXTREL" +msgstr "Cảnh báo nếu đối tượng dùng chung có « DT_TEXTREL »" + +#: lexsup.c:512 +msgid "Report unresolved symbols as warnings" +msgstr "Thông báo ký hiệu chÆ°a tháo gỡ là cảnh báo" + +#: lexsup.c:515 +msgid "Report unresolved symbols as errors" +msgstr "Thông báo ký hiệu chÆ°a tháo gỡ là lá»—i" + +#: lexsup.c:517 +msgid "Include all objects from following archives" +msgstr "Gồm má»i đối tượng từ những kho theo đây" + +#: lexsup.c:520 +msgid "Use wrapper functions for SYMBOL" +msgstr "Sá»­ dụng hàm cuốn cho Kà HIỆU" + +#: lexsup.c:667 +msgid "%P: unrecognized option '%s'\n" +msgstr "%P: không nhận ra tùy chá»n « %s »\n" + +#: lexsup.c:669 +msgid "%P%F: use the --help option for usage information\n" +msgstr "" +"%P%F: hãy sá»­ dụng tùy chá»n « --help » để xem thông tin vá» cách sá»­ dụng\n" + +#: lexsup.c:687 +msgid "%P%F: unrecognized -a option `%s'\n" +msgstr "%P%F: không nhận ra tùy chá»n kiểu « -a » là « %s »\n" + +#: lexsup.c:700 +msgid "%P%F: unrecognized -assert option `%s'\n" +msgstr "%P%F: không nhận ra tùy chá»n kiểu « -assert » (khẳng định) là « %s »\n" + +#: lexsup.c:743 +msgid "%F%P: unknown demangling style `%s'" +msgstr "%F%Ps: không biết kiểu dáng tháo gõ « %s »" + +#: lexsup.c:805 +msgid "%P%F: invalid number `%s'\n" +msgstr "%P%F: số không hợp lệ « %s »\n" + +#: lexsup.c:897 +msgid "%P%F: bad --unresolved-symbols option: %s\n" +msgstr "" +"%P%F: tùy chá»n « --unresolved-symbols » (các ký hiệu chÆ°a tháo gỡ) sai : %s\n" + +#: lexsup.c:968 +msgid "%P%F: bad -rpath option\n" +msgstr "%P%F: tùy chá»n « -rpath » (Ä‘Æ°á»ng dẫn r) sai\n" + +#: lexsup.c:1080 +msgid "%P%F: -shared not supported\n" +msgstr "%P%F: không há»— trợ tùy chá»n « -shared » (dùng chung)\n" + +#: lexsup.c:1089 +msgid "%P%F: -pie not supported\n" +msgstr "%P%F: không há»— trợ tùy chá»n « -pie » (bánh)\n" + +#: lexsup.c:1099 gphoto2/main.c:195 gphoto2/main.c:196 cg_print.c:98 +#: hist.c:385 ui/bookmarks.glade.h:49 plugins/dbus/xchat-remote.c:47 +msgid "name" +msgstr "tên" + +#: lexsup.c:1104 +msgid "%P%F: invalid section sorting option: %s\n" +msgstr "%P%F: tùy chá»n sắp xếp phần không hợp lệ: %s\n" + +#: lexsup.c:1130 +msgid "%P%F: invalid argument to option \"--section-start\"\n" +msgstr "%P%F: đối số không hợp lệ đối vá»›i tùy chá»n « --section-start »\n" + +#: lexsup.c:1137 +msgid "%P%F: missing argument(s) to option \"--section-start\"\n" +msgstr "" +"%P%F: thiếu đối số đối vá»›i tùy chá»n « --section-start » (bắt đầu phần)\n" + +#: lexsup.c:1311 +msgid "%P%F: may not nest groups (--help for usage)\n" +msgstr "" +"%P%F: không cho phép lồng nhóm vá»›i nhau (« --help » để xem cách sá»­ dụng " +"đúng)\n" + +#: lexsup.c:1318 +msgid "%P%F: group ended before it began (--help for usage)\n" +msgstr "" +"%P%F: nhóm kết thúc trÆ°á»›c bắt đầu (« --help » để xem cách sá»­ dụng đúng)\n" + +#: lexsup.c:1346 +msgid "%P%X: --hash-size needs a numeric argument\n" +msgstr "" +"%P%X: tùy chá»n « --hash-size » (kích cỡ băm) cần thiết đối số thuá»™c số\n" + +#: lexsup.c:1397 lexsup.c:1410 +msgid "%P%F: invalid hex number `%s'\n" +msgstr "%P%F: số thập lục không hợp lệ « %s »\n" + +#: lexsup.c:1445 +#, c-format +msgid "Usage: %s [options] file...\n" +msgstr "Cách sá»­ dụng: %s tập_tin...\n" + +#: lexsup.c:1447 main.c:292 +#, c-format +msgid "Options:\n" +msgstr "Tùy chá»n:\n" + +#: lexsup.c:1538 +#, c-format +msgid "%s: supported emulations: " +msgstr "%s: mô phá»ng há»— trợ :" + +#: lexsup.c:1543 +#, c-format +msgid "%s: emulation specific options:\n" +msgstr "%s: tùy chá»n đặc trÆ°ng cho mô phá»ng:\n" + +#: mri.c:291 +msgid "%P%F: unknown format type %s\n" +msgstr "%P%F: không biết kiểu khuôn dạng %s\n" + +#: pe-dll.c:303 +#, c-format +msgid "%XUnsupported PEI architecture: %s\n" +msgstr "%XChÆ°a há»— trợ kiến trúc PEI: %s\n" + +#: pe-dll.c:652 +#, c-format +msgid "%XError, duplicate EXPORT with ordinals: %s (%d vs %d)\n" +msgstr "%XLá»—i: XUẤT trùng vá»›i Ä‘iá»u thứ tá»± : %s (%d so vá»›i %d)\n" + +#: pe-dll.c:659 +#, c-format +msgid "Warning, duplicate EXPORT: %s\n" +msgstr "Cảnh báo, XUẤT trùng: %s\n" + +#: pe-dll.c:725 +#, c-format +msgid "%XCannot export %s: symbol not defined\n" +msgstr "%XKhông thể xuất %s: chÆ°a định nghÄ©a ký hiệu\n" + +#: pe-dll.c:731 +#, c-format +msgid "%XCannot export %s: symbol wrong type (%d vs %d)\n" +msgstr "%XKhông thể xuất %s: ký hiệu sai kiểu (%d so vá»›i %d)\n" + +#: pe-dll.c:738 +#, c-format +msgid "%XCannot export %s: symbol not found\n" +msgstr "%XKhông thể xuất %s: không tìm thấy ký hiệu\n" + +#: pe-dll.c:850 +#, c-format +msgid "%XError, ordinal used twice: %d (%s vs %s)\n" +msgstr "%XLá»—i, Ä‘iá»u thứ tá»± được dùng hai lần: %d (%s so vá»›i %s)\n" + +#: pe-dll.c:1172 +#, c-format +msgid "%XError: %d-bit reloc in dll\n" +msgstr "%xLá»—i: định vị lại %d-bit trong DLL\n" + +#: pe-dll.c:1300 +#, c-format +msgid "%s: Can't open output def file %s\n" +msgstr "%s: Không thể mở tập tin xuất def (định nghÄ©a) %s\n" + +#: pe-dll.c:1443 +#, c-format +msgid "; no contents available\n" +msgstr "; không có ná»™i dung sẵn sàng\n" + +#: pe-dll.c:2205 +msgid "" +"%C: variable '%T' can't be auto-imported. Please read the documentation for " +"ld's --enable-auto-import for details.\n" +msgstr "" +"%C: không thể tá»± Ä‘á»™ng nhập biến « %T ». Hãy Ä‘á»c tài liệu hÆ°á»›ng dẫn vá» tùy " +"chá»n « --enable-auto-import » (bật nhập tá»± Ä‘á»™ng) của trình ld, để xem chi " +"tiết.\n" + +#: pe-dll.c:2235 +#, c-format +msgid "%XCan't open .lib file: %s\n" +msgstr "%XKhông thể mở tập tin « .lib » (thÆ° viên): %s\n" + +#: pe-dll.c:2240 +#, c-format +msgid "Creating library file: %s\n" +msgstr "Äang tạo tập tin thÆ° viên: %s\n" + +#: src/plugins/language/language-compiler.c:37 +#, c-format +msgid "Please provide a list of klp files as arguments.\n" +msgstr "Hãy cung cấp danh sách các tập tin kiểu « klp » dạng đối số.\n" + +#: src/plugins/printable/dictionary-builder.c:113 +#, c-format +msgid "Error opening file `%s': %s\n" +msgstr "Gặp lá»—i khi mở tập tin « %s »: %s\n" + +#: src/plugins/printable/dictionary-builder.c:74 +#, c-format +msgid "" +"Error allocating: %s\n" +"." +msgstr "" +"Gặp lá»—i khi cấp phát: %s\n" +"." + +#: src/plugins/printable/dictionary-builder.c:86 +#, c-format +msgid "Increase ALLOCSIZE (in %s).\n" +msgstr "Tăng lên ALLOCSIZE (kích cỡ cấp phát, theo %s).\n" + +#: src/plugins/rpm/rpmextractor.c:3048 +#, c-format +msgid "Source RPM %d.%d" +msgstr "RPM nguồn %d.%d" + +#: src/plugins/rpm/rpmextractor.c:3053 +#, c-format +msgid "Binary RPM %d.%d" +msgstr "RPM nhị phân %d.%d" + +#: src/plugins/printable/dictionary-builder.c:53 +#, c-format +msgid "" +"Please provide the name of the language you are building\n" +"a dictionary for. For example:\n" +msgstr "" +"Hãy cung cấp tên ngôn ngữ mà bạn Ä‘ang xây dụng từ Ä‘iển cho nó. Lấy thí dụ :\n" + +#: ../gnopi/cmdmapui.c:1588 +msgid "Commands" +msgstr "Lệnh" + +#: src/plugins/manextractor.c:147 src/plugins/manextractor.c:133 +msgid "System calls" +msgstr "Cuá»™c gá»i hệ thống" + +#: src/plugins/manextractor.c:152 src/plugins/manextractor.c:138 +msgid "Library calls" +msgstr "Cuá»™c gá»i thÆ° viên" + +#: src/plugins/manextractor.c:157 src/plugins/manextractor.c:143 +msgid "Special files" +msgstr "Tập tin đặc biệt" + +#: src/plugins/manextractor.c:162 src/plugins/manextractor.c:148 +msgid "File formats and conventions" +msgstr "Khuôn dang tập tin và quy Æ°á»›c" + +#: ../data/toc.xml.in.h:7 ../src/red_searchbox.py:179 ../src/util.c:339 +msgid "Games" +msgstr "Trò chÆ¡i" + +#: src/plugins/manextractor.c:172 src/plugins/manextractor.c:158 +msgid "Conventions and miscellaneous" +msgstr "Quy Æ°á»›c và linh tinh" + +#: src/plugins/manextractor.c:177 src/plugins/manextractor.c:163 +msgid "System management commands" +msgstr "Lệnh quản lý hệ thống" + +#: src/plugins/manextractor.c:182 src/plugins/manextractor.c:168 +msgid "Kernel routines" +msgstr "Thao tác hạt nhân" + +#: src/plugins/wavextractor.c:113 src/plugins/mp3extractor.c:434 +#: src/plugins/wavextractor.c:114 src/plugins/mp3extractor.c:438 +msgid "mono" +msgstr "má»™t nguồn" + +#: ../audio-properties-view/audio-properties-view.c:171 +msgid "stereo" +msgstr "âm lập thể" + +#: src/plugins/jpegextractor.c:178 +#, c-format +msgid "%ux%u dots per inch" +msgstr "%ux%u chấm trên má»—i insÆ¡" + +#: src/plugins/jpegextractor.c:188 +#, c-format +msgid "%ux%u dots per cm" +msgstr "%ux%u chấm trên má»—i cm" + +#: src/plugins/jpegextractor.c:198 +#, c-format +msgid "%ux%u dots per inch?" +msgstr "%ux%u chấm trên má»—i insÆ¡?" + +#: src/plugins/riffextractor.c:167 +#, c-format +msgid "codec: %s, %u fps, %u ms" +msgstr "codec: %s, %u khung/giây, %u miligiây" + +#: src/plugins/mp3extractor.c:49 ../cddb-slave2/cddb-track-editor.c:78 +msgid "Blues" +msgstr "Blu" + +#: src/plugins/mp3extractor.c:50 +msgid "Classic Rock" +msgstr "Rốc cổ Ä‘iển" + +#: ../src/Database.cs:813 ../src/Database.cs:833 ../glom/glom.glade.h:79 +#: ../mimedir/mimedir-vcard-address.c:216 +#: ../mimedir/mimedir-vcard-address.c:217 +msgid "Country" +msgstr "Quốc gia" + +#: src/plugins/mp3extractor.c:52 ../cddb-slave2/cddb-track-editor.c:81 +msgid "Dance" +msgstr "Khiêu vÅ©" + +#: src/plugins/mp3extractor.c:53 ../cddb-slave2/cddb-track-editor.c:82 +msgid "Disco" +msgstr "Äít-xcô" + +#: src/plugins/mp3extractor.c:54 ../cddb-slave2/cddb-track-editor.c:83 +msgid "Funk" +msgstr "Sôi nổi" + +#: src/plugins/mp3extractor.c:55 ../cddb-slave2/cddb-track-editor.c:84 +msgid "Grunge" +msgstr "Vỡ má»™ng" + +#: src/plugins/mp3extractor.c:56 ../cddb-slave2/cddb-track-editor.c:85 +msgid "Hip-Hop" +msgstr "Hít-há»t" + +#: src/plugins/mp3extractor.c:57 ../cddb-slave2/cddb-track-editor.c:86 +msgid "Jazz" +msgstr "Ja" + +#: src/plugins/mp3extractor.c:58 ../cddb-slave2/cddb-track-editor.c:87 +msgid "Metal" +msgstr "Kim" + +#: src/plugins/mp3extractor.c:59 ../cddb-slave2/cddb-track-editor.c:88 +msgid "New Age" +msgstr "Thá»i kỳ má»›i" + +#: src/plugins/mp3extractor.c:60 ../cddb-slave2/cddb-track-editor.c:89 +msgid "Oldies" +msgstr "CÅ©" + +#: src/plugins/mp3extractor.c:62 ../cddb-slave2/cddb-track-editor.c:91 +msgid "Pop" +msgstr "Pốp" + +#: src/plugins/mp3extractor.c:63 ../cddb-slave2/cddb-track-editor.c:92 +msgid "R&B" +msgstr "Nhịp Ä‘iệu và blu" + +#: src/plugins/mp3extractor.c:64 ../cddb-slave2/cddb-track-editor.c:93 +msgid "Rap" +msgstr "Rap" + +#: src/plugins/mp3extractor.c:65 ../cddb-slave2/cddb-track-editor.c:94 +msgid "Reggae" +msgstr "Re-gê" + +#: src/plugins/mp3extractor.c:66 ../cddb-slave2/cddb-track-editor.c:95 +msgid "Rock" +msgstr "Rốc" + +#: src/plugins/mp3extractor.c:67 ../cddb-slave2/cddb-track-editor.c:96 +msgid "Techno" +msgstr "Kỹ thuật" + +#: src/plugins/mp3extractor.c:68 ../cddb-slave2/cddb-track-editor.c:97 +msgid "Industrial" +msgstr "Công nghiệp" + +#: src/plugins/mp3extractor.c:69 +msgid "Alternative" +msgstr "Sá»± chá»n khác" + +#: src/plugins/mp3extractor.c:70 ../cddb-slave2/cddb-track-editor.c:99 +msgid "Ska" +msgstr "Ska" + +#: src/plugins/mp3extractor.c:71 ../cddb-slave2/cddb-track-editor.c:100 +msgid "Death Metal" +msgstr "Kim chết" + +#: src/plugins/mp3extractor.c:72 ../cddb-slave2/cddb-track-editor.c:101 +msgid "Pranks" +msgstr "Trò chÆ¡i ác" + +#: src/plugins/mp3extractor.c:73 ../cddb-slave2/cddb-track-editor.c:102 +msgid "Soundtrack" +msgstr "Nhạc của phím" + +#: src/plugins/mp3extractor.c:74 ../cddb-slave2/cddb-track-editor.c:103 +msgid "Euro-Techno" +msgstr "Kỹ thuật Âu" + +#: src/plugins/mp3extractor.c:75 ../cddb-slave2/cddb-track-editor.c:104 +msgid "Ambient" +msgstr "Chung quanh" + +#: src/plugins/mp3extractor.c:76 ../cddb-slave2/cddb-track-editor.c:105 +msgid "Trip-Hop" +msgstr "Tợ-rít-Hot" + +#: src/plugins/mp3extractor.c:77 ../cddb-slave2/cddb-track-editor.c:106 +msgid "Vocal" +msgstr "Thanh nhạc" + +#: src/plugins/mp3extractor.c:78 ../cddb-slave2/cddb-track-editor.c:107 +msgid "Jazz+Funk" +msgstr "Ja và Sôi nổi" + +#: src/plugins/mp3extractor.c:79 ../cddb-slave2/cddb-track-editor.c:108 +msgid "Fusion" +msgstr "Nóng chảy" + +#: src/plugins/mp3extractor.c:80 ../cddb-slave2/cddb-track-editor.c:109 +msgid "Trance" +msgstr "Hôn mê" + +#: src/plugins/mp3extractor.c:81 ../cddb-slave2/cddb-track-editor.c:110 +msgid "Classical" +msgstr "Cổ Ä‘iển" + +#: src/plugins/mp3extractor.c:82 ../cddb-slave2/cddb-track-editor.c:111 +msgid "Instrumental" +msgstr "Bằng nhạc khí" + +#: src/plugins/mp3extractor.c:83 ../cddb-slave2/cddb-track-editor.c:112 +msgid "Acid" +msgstr "Axit" + +#: src/plugins/mp3extractor.c:84 ../sheets/ciscomisc.sheet.in.h:16 +#: ../cddb-slave2/cddb-track-editor.c:113 +msgid "House" +msgstr "Nhà" + +#: src/plugins/mp3extractor.c:85 ../src/ui/keyboard-properties.c:124 +msgid "Game" +msgstr "Trò chÆ¡i" + +#: src/plugins/mp3extractor.c:86 ../cddb-slave2/cddb-track-editor.c:115 +msgid "Sound Clip" +msgstr "Trích Ä‘oạn âm thanh" + +#: src/plugins/mp3extractor.c:87 ../cddb-slave2/cddb-track-editor.c:116 +msgid "Gospel" +msgstr "Phúc âm" + +#: src/plugins/mp3extractor.c:88 ../cddb-slave2/cddb-track-editor.c:117 +#: ../plug-ins/common/spheredesigner.c:296 +msgid "Noise" +msgstr "á»’n" + +#: src/plugins/mp3extractor.c:89 +msgid "Alt. Rock" +msgstr "Rốc thay thế" + +#: src/plugins/mp3extractor.c:90 sys/oss/gstossmixer.c:100 +#: ../cddb-slave2/cddb-track-editor.c:119 ext/alsa/gstalsamixertrack.c:84 +msgid "Bass" +msgstr "Trầm" + +#: src/plugins/mp3extractor.c:91 ../cddb-slave2/cddb-track-editor.c:120 +msgid "Soul" +msgstr "Hồn" + +#: src/plugins/mp3extractor.c:92 ../cddb-slave2/cddb-track-editor.c:121 +msgid "Punk" +msgstr "Rốc dữ dá»™i" + +#: src/plugins/mp3extractor.c:93 ../src/util.c:361 +msgid "Space" +msgstr "Khoảng" + +#: src/plugins/mp3extractor.c:94 ../cddb-slave2/cddb-track-editor.c:123 +msgid "Meditative" +msgstr "Tĩnh toÌ£a" + +#: src/plugins/mp3extractor.c:95 ../cddb-slave2/cddb-track-editor.c:124 +msgid "Instrumental Pop" +msgstr "Pốp bằng nhac khí" + +#: src/plugins/mp3extractor.c:96 ../cddb-slave2/cddb-track-editor.c:125 +msgid "Instrumental Rock" +msgstr "Rốc bằng nhạc khí" + +#: src/plugins/mp3extractor.c:97 ../cddb-slave2/cddb-track-editor.c:126 +msgid "Ethnic" +msgstr "Dân tá»™c" + +#: src/plugins/mp3extractor.c:98 ../cddb-slave2/cddb-track-editor.c:127 +msgid "Gothic" +msgstr "Gô-tích" + +#: src/plugins/mp3extractor.c:99 ../cddb-slave2/cddb-track-editor.c:128 +msgid "Darkwave" +msgstr "Sóng bóng" + +#: src/plugins/mp3extractor.c:100 ../cddb-slave2/cddb-track-editor.c:129 +msgid "Techno-Industrial" +msgstr "Kỹ thuật - Công nghiệp" + +#: src/plugins/mp3extractor.c:101 ../cddb-slave2/cddb-track-editor.c:130 +msgid "Electronic" +msgstr "Äiện" + +#: src/plugins/mp3extractor.c:102 ../cddb-slave2/cddb-track-editor.c:131 +msgid "Pop-Folk" +msgstr "Pốp - Dân ca" + +#: src/plugins/mp3extractor.c:103 ../cddb-slave2/cddb-track-editor.c:132 +msgid "Eurodance" +msgstr "Khiêu vũ Âu" + +#: src/plugins/mp3extractor.c:104 ../cddb-slave2/cddb-track-editor.c:133 +msgid "Dream" +msgstr "MÆ¡ má»™ng " + +#: src/plugins/mp3extractor.c:105 ../cddb-slave2/cddb-track-editor.c:134 +msgid "Southern Rock" +msgstr "Rốc Nam" + +#: src/plugins/mp3extractor.c:106 ../cddb-slave2/cddb-track-editor.c:135 +msgid "Comedy" +msgstr "Kịch vui" + +#: src/plugins/mp3extractor.c:107 ../cddb-slave2/cddb-track-editor.c:136 +msgid "Cult" +msgstr "Giáo phái" + +#: src/plugins/mp3extractor.c:108 ../cddb-slave2/cddb-track-editor.c:137 +msgid "Gangsta Rap" +msgstr "Rap Kẻ cÆ°á»›p" + +#: src/plugins/mp3extractor.c:109 ../cddb-slave2/cddb-track-editor.c:138 +msgid "Top 40" +msgstr "40 tốt nhất" + +#: src/plugins/mp3extractor.c:110 ../cddb-slave2/cddb-track-editor.c:139 +msgid "Christian Rap" +msgstr "Ráp CÆ¡-đốc" + +#: src/plugins/mp3extractor.c:111 ../cddb-slave2/cddb-track-editor.c:140 +msgid "Pop/Funk" +msgstr "Pốp/Sôi nổi" + +#: src/plugins/mp3extractor.c:112 ../cddb-slave2/cddb-track-editor.c:141 +msgid "Jungle" +msgstr "Rừng" + +#: src/plugins/mp3extractor.c:113 ../cddb-slave2/cddb-track-editor.c:142 +msgid "Native American" +msgstr "Mỹ bản xứ" + +#: src/plugins/mp3extractor.c:114 ../cddb-slave2/cddb-track-editor.c:143 +msgid "Cabaret" +msgstr "Ca-ba-rê" + +#: src/plugins/mp3extractor.c:115 ../cddb-slave2/cddb-track-editor.c:144 +msgid "New Wave" +msgstr "Sóng má»›i" + +#: src/plugins/mp3extractor.c:116 ../cddb-slave2/cddb-track-editor.c:145 +msgid "Psychedelic" +msgstr "Tạo ảo giác" + +#: src/plugins/mp3extractor.c:117 ../cddb-slave2/cddb-track-editor.c:146 +msgid "Rave" +msgstr "Rít" + +#: src/plugins/mp3extractor.c:118 ../cddb-slave2/cddb-track-editor.c:147 +msgid "Showtunes" +msgstr "Äiệu kịch" + +#: src/plugins/mp3extractor.c:119 ../cddb-slave2/cddb-track-editor.c:148 +msgid "Trailer" +msgstr "Quảng cáo trÆ°á»›c phím" + +#: src/plugins/mp3extractor.c:120 ../cddb-slave2/cddb-track-editor.c:149 +msgid "Lo-Fi" +msgstr "Äá»™ trung thá»±c thấp" + +#: src/plugins/mp3extractor.c:121 ../cddb-slave2/cddb-track-editor.c:150 +msgid "Tribal" +msgstr "Bá»™ lạc" + +#: src/plugins/mp3extractor.c:122 ../cddb-slave2/cddb-track-editor.c:151 +msgid "Acid Punk" +msgstr "Rốc dữ dá»™i axit" + +#: src/plugins/mp3extractor.c:123 ../cddb-slave2/cddb-track-editor.c:152 +msgid "Acid Jazz" +msgstr "Ja axit" + +#: src/plugins/mp3extractor.c:124 ../cddb-slave2/cddb-track-editor.c:153 +msgid "Polka" +msgstr "Pôn-ca" + +#: src/plugins/mp3extractor.c:125 ../cddb-slave2/cddb-track-editor.c:154 +msgid "Retro" +msgstr "Lại sau" + +#: src/plugins/mp3extractor.c:126 ../cddb-slave2/cddb-track-editor.c:155 +msgid "Musical" +msgstr "Kịch nhạc" + +#: src/plugins/mp3extractor.c:127 ../cddb-slave2/cddb-track-editor.c:156 +msgid "Rock & Roll" +msgstr "Rốc en rôn" + +#: src/plugins/mp3extractor.c:128 ../cddb-slave2/cddb-track-editor.c:157 +msgid "Hard Rock" +msgstr "Rốc cứng" + +#: src/plugins/mp3extractor.c:129 ../cddb-slave2/cddb-track-editor.c:158 +msgid "Folk" +msgstr "Dân ca" + +#: src/plugins/mp3extractor.c:130 ../cddb-slave2/cddb-track-editor.c:159 +msgid "Folk/Rock" +msgstr "Dân ca/Rốc" + +#: src/plugins/mp3extractor.c:131 ../cddb-slave2/cddb-track-editor.c:160 +msgid "National Folk" +msgstr "Dân ca quốc gia" + +#: src/plugins/mp3extractor.c:132 ../cddb-slave2/cddb-track-editor.c:161 +msgid "Swing" +msgstr "Xuynh" + +#: src/plugins/mp3extractor.c:133 ../cddb-slave2/cddb-track-editor.c:162 +msgid "Fast-Fusion" +msgstr "Nóng chạy nhanh" + +#: src/plugins/mp3extractor.c:134 +msgid "Bebob" +msgstr "Bí-bá»t" + +#: src/plugins/mp3extractor.c:135 +msgid "Latin" +msgstr "Dân tá»™c Tây-ban-nha" + +#: src/plugins/mp3extractor.c:136 ../cddb-slave2/cddb-track-editor.c:165 +msgid "Revival" +msgstr "Phục âm nhấn mạnh" + +#: src/plugins/mp3extractor.c:137 ../gedit/gedit-encodings.c:174 +#: ../cddb-slave2/cddb-track-editor.c:166 ../src/encoding.c:82 +msgid "Celtic" +msgstr "Xen-tÆ¡" + +#: src/plugins/mp3extractor.c:138 ../cddb-slave2/cddb-track-editor.c:167 +msgid "Bluegrass" +msgstr "Cá» xanh" + +#: src/plugins/mp3extractor.c:139 ../cddb-slave2/cddb-track-editor.c:168 +msgid "Avantgarde" +msgstr "Äi tiên phong" + +#: src/plugins/mp3extractor.c:140 ../cddb-slave2/cddb-track-editor.c:169 +msgid "Gothic Rock" +msgstr "Rốc Gô-tích" + +#: src/plugins/mp3extractor.c:141 ../cddb-slave2/cddb-track-editor.c:170 +msgid "Progressive Rock" +msgstr "Rốc tiến lên" + +#: src/plugins/mp3extractor.c:142 ../cddb-slave2/cddb-track-editor.c:171 +msgid "Psychedelic Rock" +msgstr "Rốc tạo ảo giác" + +#: src/plugins/mp3extractor.c:143 ../cddb-slave2/cddb-track-editor.c:172 +msgid "Symphonic Rock" +msgstr "Rốc giao hưởng" + +#: src/plugins/mp3extractor.c:144 ../cddb-slave2/cddb-track-editor.c:173 +msgid "Slow Rock" +msgstr "Rốc chậm" + +#: src/plugins/mp3extractor.c:145 ../cddb-slave2/cddb-track-editor.c:174 +msgid "Big Band" +msgstr "Dàn nhạc To" + +#: src/plugins/mp3extractor.c:146 ../cddb-slave2/cddb-track-editor.c:175 +msgid "Chorus" +msgstr "Hợp xÆ°á»›ng" + +#: src/plugins/mp3extractor.c:147 ../cddb-slave2/cddb-track-editor.c:176 +msgid "Easy Listening" +msgstr "Nghe dá»… dàng" + +#: src/plugins/mp3extractor.c:148 ../cddb-slave2/cddb-track-editor.c:177 +msgid "Acoustic" +msgstr "Äá»™ trung thá»±c âm thanh" + +#: src/plugins/mp3extractor.c:149 ../cddb-slave2/cddb-track-editor.c:178 +msgid "Humour" +msgstr "Hài hÆ°á»›c" + +#: src/plugins/mp3extractor.c:150 +msgid "Speech" +msgstr "Nói tiếng" + +#: src/plugins/mp3extractor.c:151 ../cddb-slave2/cddb-track-editor.c:180 +msgid "Chanson" +msgstr "Bài hát kiểu Pháp" + +#: src/plugins/mp3extractor.c:152 ../cddb-slave2/cddb-track-editor.c:181 +msgid "Opera" +msgstr "Hát kịch" + +#: src/plugins/mp3extractor.c:153 ../cddb-slave2/cddb-track-editor.c:182 +msgid "Chamber Music" +msgstr "Nhạc phòng" + +#: src/plugins/mp3extractor.c:154 ../cddb-slave2/cddb-track-editor.c:183 +msgid "Sonata" +msgstr "Bản xô-nat" + +#: src/plugins/mp3extractor.c:155 ../cddb-slave2/cddb-track-editor.c:184 +msgid "Symphony" +msgstr "Giao hưởng" + +#: src/plugins/mp3extractor.c:156 ../cddb-slave2/cddb-track-editor.c:185 +msgid "Booty Bass" +msgstr "Trầm Booty" + +#: src/plugins/mp3extractor.c:157 ../cddb-slave2/cddb-track-editor.c:186 +msgid "Primus" +msgstr "Pri-mus" + +#: src/plugins/mp3extractor.c:158 ../cddb-slave2/cddb-track-editor.c:187 +msgid "Porn Groove" +msgstr "Porn Groove" + +#: src/plugins/mp3extractor.c:159 ../cddb-slave2/cddb-track-editor.c:188 +msgid "Satire" +msgstr "Châm biếm" + +#: src/plugins/mp3extractor.c:160 ../cddb-slave2/cddb-track-editor.c:189 +msgid "Slow Jam" +msgstr "Ứng tác chậm" + +#: src/plugins/mp3extractor.c:161 ../cddb-slave2/cddb-track-editor.c:190 +msgid "Club" +msgstr "Há»™i" + +#: src/plugins/mp3extractor.c:162 ../cddb-slave2/cddb-track-editor.c:191 +msgid "Tango" +msgstr "Tan-gô" + +#: src/plugins/mp3extractor.c:163 ../cddb-slave2/cddb-track-editor.c:192 +msgid "Samba" +msgstr "Sam-ba" + +#: src/plugins/mp3extractor.c:164 ../cddb-slave2/cddb-track-editor.c:193 +msgid "Folklore" +msgstr "Truyá»n thống dân gian" + +#: src/plugins/mp3extractor.c:165 ../cddb-slave2/cddb-track-editor.c:194 +msgid "Ballad" +msgstr "Khúc balat" + +#: src/plugins/mp3extractor.c:166 ../cddb-slave2/cddb-track-editor.c:195 +msgid "Power Ballad" +msgstr "Khúc balat năng lá»±c" + +#: src/plugins/mp3extractor.c:167 ../cddb-slave2/cddb-track-editor.c:196 +msgid "Rhythmic Soul" +msgstr "Hồn nhịp nhàng" + +#: src/plugins/mp3extractor.c:168 ../cddb-slave2/cddb-track-editor.c:197 +msgid "Freestyle" +msgstr "Kiểu tá»± do" + +#: src/plugins/mp3extractor.c:169 ../cddb-slave2/cddb-track-editor.c:198 +msgid "Duet" +msgstr "Bản nhạc cho bá»™ đôi" + +#: src/plugins/mp3extractor.c:170 ../cddb-slave2/cddb-track-editor.c:199 +msgid "Punk Rock" +msgstr "Rốc - rốc dữ dá»™i" + +#: src/plugins/mp3extractor.c:171 ../cddb-slave2/cddb-track-editor.c:200 +msgid "Drum Solo" +msgstr "Trống diá»…n Ä‘Æ¡n" + +#: src/plugins/mp3extractor.c:172 ../cddb-slave2/cddb-track-editor.c:201 +msgid "A Cappella" +msgstr "Hát không có nhạc há»— trợ" + +#: src/plugins/mp3extractor.c:173 ../cddb-slave2/cddb-track-editor.c:202 +msgid "Euro-House" +msgstr "Nhà Âu" + +#: src/plugins/mp3extractor.c:174 ../cddb-slave2/cddb-track-editor.c:203 +msgid "Dance Hall" +msgstr "Phòng khiêu vÅ©" + +#: src/plugins/mp3extractor.c:175 ../cddb-slave2/cddb-track-editor.c:204 +msgid "Goa" +msgstr "Goa" + +#: src/plugins/mp3extractor.c:176 ../cddb-slave2/cddb-track-editor.c:205 +msgid "Drum & Bass" +msgstr "Trống và Trầm" + +#: src/plugins/mp3extractor.c:177 ../cddb-slave2/cddb-track-editor.c:206 +msgid "Club-House" +msgstr "Nhà há»™i" + +#: src/plugins/mp3extractor.c:178 ../cddb-slave2/cddb-track-editor.c:207 +msgid "Hardcore" +msgstr "Lõi cứng" + +#: src/plugins/mp3extractor.c:179 ../cddb-slave2/cddb-track-editor.c:208 +msgid "Terror" +msgstr "Kinh hãi" + +#: src/plugins/mp3extractor.c:180 ../cddb-slave2/cddb-track-editor.c:209 +msgid "Indie" +msgstr "In-Ä‘i" + +#: src/plugins/mp3extractor.c:181 ../cddb-slave2/cddb-track-editor.c:210 +msgid "BritPop" +msgstr "Pốp quốc Anh" + +#: src/plugins/mp3extractor.c:182 ../cddb-slave2/cddb-track-editor.c:211 +msgid "Negerpunk" +msgstr "Rốc dữ dá»™i Ä‘en" + +#: src/plugins/mp3extractor.c:183 ../cddb-slave2/cddb-track-editor.c:212 +msgid "Polsk Punk" +msgstr "Rốc dữ dá»™i Ba-lan" + +#: src/plugins/mp3extractor.c:184 +msgid "Beat" +msgstr "Nhịp phách" + +#: src/plugins/mp3extractor.c:185 ../cddb-slave2/cddb-track-editor.c:214 +msgid "Christian Gangsta Rap" +msgstr "Rap kẻ cÆ°á»›p CÆ¡ đốc" + +#: src/plugins/mp3extractor.c:186 ../cddb-slave2/cddb-track-editor.c:215 +msgid "Heavy Metal" +msgstr "Kim nặng" + +#: src/plugins/mp3extractor.c:187 ../cddb-slave2/cddb-track-editor.c:216 +msgid "Black Metal" +msgstr "Kim Ä‘en" + +#: src/plugins/mp3extractor.c:188 ../cddb-slave2/cddb-track-editor.c:217 +msgid "Crossover" +msgstr "Xuyên chéo" + +#: src/plugins/mp3extractor.c:189 ../cddb-slave2/cddb-track-editor.c:218 +msgid "Contemporary Christian" +msgstr "CÆ¡-đốc Ä‘Æ°Æ¡ng thá»i" + +#: src/plugins/mp3extractor.c:190 ../cddb-slave2/cddb-track-editor.c:219 +msgid "Christian Rock" +msgstr "Rốc CÆ¡-đốc" + +#: src/plugins/mp3extractor.c:191 ../cddb-slave2/cddb-track-editor.c:220 +msgid "Merengue" +msgstr "Me-ren-gê" + +#: src/plugins/mp3extractor.c:192 ../cddb-slave2/cddb-track-editor.c:221 +msgid "Salsa" +msgstr "San-sa" + +#: src/plugins/mp3extractor.c:193 ../cddb-slave2/cddb-track-editor.c:222 +msgid "Thrash Metal" +msgstr "Kim quẫy đập" + +#: src/plugins/mp3extractor.c:194 ../cddb-slave2/cddb-track-editor.c:223 +msgid "Anime" +msgstr "A-ni-mê" + +#: src/plugins/mp3extractor.c:195 ../cddb-slave2/cddb-track-editor.c:224 +msgid "JPop" +msgstr "JPốp" + +#: src/plugins/mp3extractor.c:196 ../cddb-slave2/cddb-track-editor.c:225 +msgid "Synthpop" +msgstr "Pốp tổng hợp" + +#: src/plugins/mp3extractor.c:435 src/plugins/mp3extractor.c:439 +msgid "(variable bps)" +msgstr "(bit/giây thay đổi)" + +#: src/main/extract.c:49 src/doodle/help.c:51 +#, c-format +msgid "" +"Usage: %s\n" +"%s\n" +"\n" +msgstr "" +"Cách sá»­ dụng: %s\n" +"%s\n" +"\n" + +#: src/main/extract.c:52 src/doodle/help.c:54 +#, c-format +msgid "" +"Arguments mandatory for long options are also mandatory for short options.\n" +msgstr "" +"Má»i đối số bắt buá»™c phải sá»­ dụng vá»›i tùy chá»n dài cÅ©ng bắt buá»™c vá»›i tùy chá»n " +"ngắn.\n" + +#: src/main/extract.c:126 +msgid "do not remove any duplicates" +msgstr "đừng gỡ bá» bản sao nào" + +#: src/main/extract.c:128 +msgid "print output in bibtex format" +msgstr "hiển thị dữ liệu xuất có dạng bibtex" + +#: src/main/extract.c:130 src/doodle/doodled.c:60 +msgid "" +"use the generic plaintext extractor for the language with the 2-letter " +"language code LANG" +msgstr "" +"sá»­ dụng trình rút văn bản thuần thuá»™c giống loại cho ngôn ngữ có mã ngôn ngữ " +"bằng hai chữ là LANG" + +#: src/main/extract.c:132 +msgid "remove duplicates only if types match" +msgstr "gỡ bá» bản sao chỉ nếu kiểu khá»›p thôi" + +#: src/main/extract.c:134 +msgid "use the filename as a keyword (loads filename-extractor plugin)" +msgstr "" +"dùng tên tập tin là má»™t từ khoá (thì tải bá»™ cầm phít « filename-extractor " +"» [rút tên tập tin])" + +#: src/main/extract.c:136 +msgid "print this help" +msgstr "hiển thị trợ giúp này" + +#: src/main/extract.c:138 src/doodle/doodle.c:81 +msgid "compute hash using the given ALGORITHM (currently sha1 or md5)" +msgstr "tính băm bằng THUẬT TOÃN đã cho (hiện là sha1 hay md5)" + +#: src/main/extract.c:140 src/doodle/doodle.c:85 src/doodle/doodled.c:73 +msgid "load an extractor plugin named LIBRARY" +msgstr "tải má»™t trình cầm phít rút có tên LIBRARY (THƯ VIÊN)" + +#: src/main/extract.c:142 +msgid "list all keyword types" +msgstr "liệt kê má»i kiểu từ khoá" + +#: src/main/extract.c:144 +msgid "do not use the default set of extractor plugins" +msgstr "đừng dùng bá»™ trình rút mặc định" + +#: src/main/extract.c:146 +msgid "print only keywords of the given TYPE (use -L to get a list)" +msgstr "" +"hiển thị chỉ từ khoá KIỂU (TYPE) đã cho thôi (dùng « -L » để xem danh sách)" + +#: src/main/extract.c:148 +msgid "remove duplicates even if keyword types do not match" +msgstr "gỡ bá» bản sao thậm chí nếu kiểu từ khoá không khá»›p" + +#: src/main/extract.c:150 +msgid "use keyword splitting (loads split-extractor plugin)" +msgstr "" +"dùng khả năng xẻ từ khoá (thì tải bá»™ cầm phít « split-extractor » [rút xẻ])" + +#: src/main/extract.c:152 src/doodle/doodle.c:97 src/doodle/doodled.c:83 +msgid "print the version number" +msgstr "hiển thị số thứ tá»± phiên bản" + +#: src/main/extract.c:154 src/doodle/doodle.c:99 src/doodle/doodled.c:85 +msgid "be verbose" +msgstr "xuất chi tiết" + +#: src/main/extract.c:156 +msgid "do not print keywords of the given TYPE" +msgstr "đừng hiển thị từ khoá KIỂU (TYPE) đã cho" + +#: src/main/extract.c:159 +msgid "extract [OPTIONS] [FILENAME]*" +msgstr "" +"extract [TÙY_CHỌN] [TÊN_TẬP_TIN]*\n" +"[extract: rút]" + +#: src/main/extract.c:160 +msgid "Extract metadata from files." +msgstr "Rút siêu dữ liệu ra tập tin." + +#: src/main/extract.c:198 src/main/extractor.c:1121 src/main/extractor.c:784 +#, c-format +msgid "%s - (binary)\n" +msgstr "%s - (nhị phân)\n" + +#: src/main/extract.c:204 src/main/extractor.c:1126 src/main/extractor.c:789 +#, c-format +msgid "INVALID TYPE - %s\n" +msgstr "KIỂU KHÔNG HỢP LỆ — %s\n" + +#: src/main/extract.c:270 src/main/extractor.c:47 gst/gsttag.c:83 +#: src/main/extractor.c:40 +msgid "title" +msgstr "tá»±a" + +#: src/main/extract.c:272 src/main/extractor.c:45 gphoto2/main.c:1662 +#: src/main/extractor.c:38 +msgid "filename" +msgstr "tên tập tin" + +#: src/main/extract.c:277 src/main/extractor.c:48 src/main/extractor.c:41 +msgid "author" +msgstr "tác giả" + +#: src/main/extract.c:283 src/main/extractor.c:62 src/main/extractor.c:55 +msgid "keywords" +msgstr "từ khoá" + +#: src/main/extract.c:285 src/main/extractor.c:51 gst/gsttag.c:102 +#: src/main/extractor.c:44 +msgid "comment" +msgstr "chú thích" + +#: src/main/extract.c:289 src/main/extractor.c:52 gst/gsttag.c:94 +#: src/main/extractor.c:45 +msgid "date" +msgstr "ngày" + +#: src/main/extract.c:291 src/main/extractor.c:74 src/main/extractor.c:67 +msgid "creation date" +msgstr "ngày tạo" + +#: src/main/extract.c:319 src/main/extractor.c:53 src/main/extractor.c:46 +msgid "publisher" +msgstr "nhà xuất bản" + +#: src/main/extract.c:323 src/main/extractor.c:59 gst/gsttag.c:140 +#: src/main/extractor.c:52 +msgid "organization" +msgstr "tổ chức" + +#: src/main/extract.c:327 src/main/extractor.c:61 src/main/extractor.c:54 +msgid "subject" +msgstr "chủ Ä‘á»" + +#: src/main/extract.c:331 src/main/extractor.c:78 src/main/extractor.c:71 +msgid "page count" +msgstr "tổng số trang" + +#: src/main/extract.c:474 +#, c-format +msgid "You must specify an argument for the `%s' option (option ignored).\n" +msgstr "Bạn phải ghi rõ má»™t đối số cho tùy chá»n « %s » (tùy chá»n bị bá» qua).\n" + +#: src/main/extract.c:541 src/main/extract.c:532 +#, c-format +msgid "Use --help to get a list of options.\n" +msgstr "" +"Hãy sá»­ dụng lệnh « --help » (trợ giúp) để xem má»™t danh sách các tùy chá»n.\n" + +#: src/main/extract.c:600 src/main/extract.c:585 +#, c-format +msgid "%% BiBTeX file\n" +msgstr "%% tập tin BiBTeX\n" + +#: src/main/extract.c:617 src/main/extract.c:592 +#, c-format +msgid "Keywords for file %s:\n" +msgstr "Từ khoá cho tập tin %s:\n" + +#: src/main/extractor.c:46 src/main/extractor.c:39 +msgid "mimetype" +msgstr "kiểu MIME" + +#: src/main/extractor.c:49 gst/gsttag.c:86 src/main/extractor.c:42 +msgid "artist" +msgstr "nhạc sÄ©" + +#: src/main/extractor.c:54 src/main/extractor.c:47 +msgid "language" +msgstr "ngôn ngữ" + +#: src/main/extractor.c:55 gst/gsttag.c:91 src/main/extractor.c:48 +msgid "album" +msgstr "tập" + +#: src/main/extractor.c:56 gst/gsttag.c:98 src/main/extractor.c:49 +msgid "genre" +msgstr "thể loại" + +#: ../providers/evolution/gda-calendar-model.c:40 +msgid "location" +msgstr "địa Ä‘iểm" + +#: src/main/extractor.c:58 gst/gsttag.c:133 src/init.c:120 +#: src/main/extractor.c:51 +msgid "version" +msgstr "phiên bản" + +#: src/main/extractor.c:60 gst/gsttag.c:143 src/main/extractor.c:53 +msgid "copyright" +msgstr "bản quyá»n" + +#: src/main/extractor.c:63 src/main/extractor.c:56 +msgid "contributor" +msgstr "ngÆ°á»i đóng góp" + +#: src/main/extractor.c:64 src/main/extractor.c:57 +msgid "resource-type" +msgstr "kiểu tài nguyên" + +#: ../partman-basicmethods.templates:42 +msgid "format" +msgstr "định dạng" + +#: src/main/extractor.c:66 src/main/extractor.c:59 +msgid "resource-identifier" +msgstr "Ä‘iá»u nhận diện tài nguyên" + +#: src/main/extractor.c:67 src/main/extractor.c:60 +msgid "source" +msgstr "nguồn" + +#: src/main/extractor.c:68 src/main/extractor.c:61 +msgid "relation" +msgstr "liên quan" + +#: src/main/extractor.c:69 src/main/extractor.c:62 +msgid "coverage" +msgstr "phạm vị" + +#: src/main/extractor.c:70 src/main/extractor.c:63 +msgid "software" +msgstr "phần má»m" + +#: src/main/extractor.c:71 src/main/extractor.c:64 +msgid "disclaimer" +msgstr "từ chối trách nhiệm" + +#: src/main/extractor.c:72 src/errs.c:88 src/gram.c:321 src/reduce.c:394 +#: src/main/extractor.c:65 lib/parsehelp.c:40 +msgid "warning" +msgstr "cảnh báo" + +#: src/main/extractor.c:73 src/main/extractor.c:66 +msgid "translated" +msgstr "dịch" + +#: src/main/extractor.c:75 src/main/extractor.c:68 +msgid "modification date" +msgstr "ngày sá»­a đổi" + +#: src/main/extractor.c:76 src/main/extractor.c:69 +msgid "creator" +msgstr "ngÆ°á»i tạo" + +#: src/main/extractor.c:77 src/main/extractor.c:70 +msgid "producer" +msgstr "ngÆ°á»i cung cấp" + +#: src/main/extractor.c:79 src/main/extractor.c:72 +msgid "page orientation" +msgstr "hÆ°á»›ng trang" + +#: src/main/extractor.c:80 src/main/extractor.c:73 +msgid "paper size" +msgstr "cỡ giấy" + +#: src/main/extractor.c:81 src/main/extractor.c:74 +msgid "used fonts" +msgstr "phông chữ đã dùng" + +#: src/main/extractor.c:82 src/main/extractor.c:75 +msgid "page order" +msgstr "thứ tá»± trang" + +#: src/main/extractor.c:83 src/main/extractor.c:76 +msgid "created for" +msgstr "tạo cho" + +#: src/main/extractor.c:84 src/main/extractor.c:77 +msgid "magnification" +msgstr "phóng to" + +#: src/main/extractor.c:85 src/main/extractor.c:78 +msgid "release" +msgstr "bản phát hành" + +#: ../src/nautilus-file-management-properties.glade.h:82 +msgid "group" +msgstr "nhóm" + +#: ../providers/evolution/gda-calendar-model.c:62 +msgid "summary" +msgstr "tóm tắt" + +#: src/main/extractor.c:89 src/main/extractor.c:82 +msgid "packager" +msgstr "nhà đóng gói" + +#: lib/report.c:604 +msgid "vendor" +msgstr "nhà bán" + +#: src/main/extractor.c:91 gst/gsttag.c:148 src/main/extractor.c:84 +msgid "license" +msgstr "quyá»n phép" + +#: src/main/extractor.c:92 src/main/extractor.c:85 +msgid "distribution" +msgstr "bản phân phối" + +#: src/main/extractor.c:93 src/main/extractor.c:86 +msgid "build-host" +msgstr "máy há»— trợ xây dụng" + +#: src/main/extractor.c:94 src/main/extractor.c:87 +msgid "os" +msgstr "hệ Ä‘iá»u hành" + +#: src/main/extractor.c:95 src/main/extractor.c:88 +msgid "dependency" +msgstr "phụ thuá»™c" + +# Name: don't translate / Tên: đừng dịch +#: src/main/extractor.c:96 src/main/extractor.c:89 +msgid "MD4" +msgstr "MD4" + +# Name: don't translate / Tên: đừng dịch +#: src/main/extractor.c:97 src/main/extractor.c:90 +msgid "MD5" +msgstr "MD5" + +# Name: don't translate / Tên: đừng dịch +#: src/main/extractor.c:98 src/main/extractor.c:91 +msgid "SHA-0" +msgstr "SHA-0" + +# Name: don't translate / Tên: đừng dịch +#: src/main/extractor.c:99 src/main/extractor.c:92 +msgid "SHA-1" +msgstr "SHA-1" + +# Name: don't translate / Tên: đừng dịch +#: src/main/extractor.c:100 src/main/extractor.c:93 +msgid "RipeMD160" +msgstr "RipeMD160" + +#: src/main/extractor.c:101 src/main/extractor.c:94 +msgid "resolution" +msgstr "Ä‘á»™ phân giải" + +#: src/main/extractor.c:102 src/main/extractor.c:95 +msgid "category" +msgstr "phân loại" + +#: src/main/extractor.c:103 src/main/extractor.c:96 +msgid "book title" +msgstr "tên sách" + +#: src/main/extractor.c:104 src/main/extractor.c:97 +msgid "priority" +msgstr "Æ°u tiên" + +#: src/main/extractor.c:105 src/main/extractor.c:98 +msgid "conflicts" +msgstr "xung Ä‘á»™t" + +#: src/main/extractor.c:106 src/main/extractor.c:99 src/reason_fragment.cc:39 +#: dselect/pkgdisplay.cc:77 +msgid "replaces" +msgstr "thay thế" + +#: src/main/extractor.c:107 src/main/extractor.c:100 dselect/pkgdisplay.cc:76 +msgid "provides" +msgstr "cung cấp" + +#: src/main/extractor.c:108 src/main/extractor.c:101 +msgid "conductor" +msgstr "ngÆ°á»i chỉ huy" + +#: src/main/extractor.c:109 src/main/extractor.c:102 +msgid "interpreter" +msgstr "ngÆ°á»i dịch" + +#: src/main/extractor.c:110 src/main/extractor.c:103 +#: ../src/nautilus-file-management-properties.glade.h:88 +msgid "owner" +msgstr "sở hữu" + +#: src/main/extractor.c:111 src/main/extractor.c:104 +msgid "lyrics" +msgstr "lá»i bài hát" + +#: src/main/extractor.c:112 src/main/extractor.c:105 +msgid "media type" +msgstr "kiểu vật chứa" + +#: src/main/extractor.c:114 src/main/extractor.c:107 +msgid "binary thumbnail data" +msgstr "dữ liệu hình thu nhá» nhị phân" + +#: src/main/extractor.c:115 src/main/extractor.c:108 +msgid "publication date" +msgstr "ngày xuất bản" + +#: src/main/extractor.c:116 +msgid "camera make" +msgstr "nhà chế tạo máy ảnh" + +#: src/main/extractor.c:117 +msgid "camera model" +msgstr "mô hình máy ảnh" + +#: src/main/extractor.c:118 +msgid "exposure" +msgstr "sá»± phÆ¡i nắng" + +#: src/main/extractor.c:119 +msgid "aperture" +msgstr "lá»— ống kính" + +#: src/main/extractor.c:120 +msgid "exposure bias" +msgstr "khuynh hÆ°á»›ng phÆ¡i nắng" + +#: src/main/extractor.c:121 libexif/exif-entry.c:487 +msgid "flash" +msgstr "đèn nháy" + +#: src/main/extractor.c:122 +msgid "flash bias" +msgstr "khuynh hÆ°á»›ng đèn nháy" + +#: src/main/extractor.c:123 +msgid "focal length" +msgstr "tiêu cá»±" + +#: src/main/extractor.c:124 +msgid "focal length (35mm equivalent)" +msgstr "tiêu dá»± (35mm tÆ°Æ¡ng Ä‘Æ°Æ¡ng)" + +#: src/main/extractor.c:125 +msgid "iso speed" +msgstr "tốc Ä‘á»™ ISO" + +#: src/main/extractor.c:126 +msgid "exposure mode" +msgstr "chế Ä‘á»™ phÆ¡i nắng" + +#: src/main/extractor.c:127 +msgid "metering mode" +msgstr "chế Ä‘á»™ do" + +#: src/main/extractor.c:128 +msgid "macro mode" +msgstr "chế Ä‘á»™ macrô" + +#: src/main/extractor.c:129 +msgid "image quality" +msgstr "chất lượng ảnh" + +#: src/main/extractor.c:130 +msgid "white balance" +msgstr "cán cân trắng" + +#: src/main/extractor.c:131 +msgid "orientation" +msgstr "hÆ°á»›ng" + +#: src/main/extractor.c:132 +msgid "template" +msgstr "mẫu" + +#: src/main/extractor.c:226 src/main/extractor.c:194 +#, c-format +msgid "Initialization of plugin mechanism failed: %s!\n" +msgstr "Việc khởi Ä‘á»™ng cÆ¡ chế cầm phít bị lá»—i: %s\n" + +#: src/main/extractor.c:375 +#, c-format +msgid "" +"Resolving symbol `%s' in library `%s' failed, so I tried `%s', but that " +"failed also. Errors are: `%s' and `%s'.\n" +msgstr "" +"Việc tháo gỡ ký hiệu « %s » trong thÆ° viên « %s » bị lá»—i, thì đã cố « %s », " +"nhÆ°ng mà nó cÅ©ng không thành công. Gặp lá»—i « %s » và « %s ».\n" + +#: src/main/extractor.c:404 +#, c-format +msgid "Loading `%s' plugin failed: %s\n" +msgstr "Việc tải bá»™ cầm phít « %s » bị lá»—i: %s\n" + +#: src/main/extractor.c:609 +#, c-format +msgid "Unloading plugin `%s' failed!\n" +msgstr "Việc bá» tải bá»™ cầm phít « %s » bị lá»—i.\n" + +#: ../src/gam-app.c:95 ../src/ghex-ui.xml.h:18 +msgid "E_xit" +msgstr "T_hoát" + +#: ../gnibbles/gnibbles.soundlist.in.h:5 ../gnometris/field.cpp:130 +msgid "Game Over" +msgstr "Hết lượt chÆ¡i" + +#: doc/demux_nsf.c:320 +#, c-format +msgid "demux_nsf.c: input not seekable, can not handle!\n" +msgstr "demux_nsf.c: không thể tìm trong dữ liệu gõ nên không thể quản lý!\n" + +#: doc/demux_wc3movie.c:210 +#, c-format +msgid "demux_wc3movie: SHOT chunk referenced invalid palette (%d >= %d)\n" +msgstr "" +"demux_wc3movie: phần riêng SHOT đã tham chiếu đến bảng chá»n không hợp lệ (%d " +"≥ %d)\n" + +#: doc/demux_wc3movie.c:300 doc/demux_wc3movie.c:538 +#, c-format +msgid "demux_wc3movie: encountered unknown chunk: %c%c%c%c\n" +msgstr "demux_wc3movie: gặp phần riêng lạ: %c%c%c%c\n" + +#: doc/demux_wc3movie.c:449 +msgid "demux_wc3movie: There was a problem while loading palette chunks\n" +msgstr "demux_wc3movie: gặp lá»—i trong khi tải các phần riêng bảng chá»n\n" + +#: src/plugins/htmlextractor.c:130 src/plugins/htmlextractor.c:928 +#, c-format +msgid "Fatal: could not allocate (%s at %s:%d).\n" +msgstr "Nghiêm trá»ng: không thể cấp phát (%s lúc %s.%d).\n" + +#: src/buffer.c:67 +msgid "any type" +msgstr "bất cứ kiểu nào" + +#: lib/routines.c:160 lib/xbackupfile.c:248 lib/xbackupfile.c:276 +#: lib/xbackupfile.c:284 src/delegate.c:260 +#, c-format +msgid "cannot create file `%s'" +msgstr "không thể tạo tập tin « %s »" + +#: lib/routines.c:190 lib/routines.c:196 src/delegate.c:269 src/select.c:159 +#, c-format +msgid "cannot open a pipe on `%s'" +msgstr "không thể mở ống dẫn trên « %s »" + +#. E.g.: Delegation `PsNup', from ps to ps +#: src/delegate.c:389 +#, c-format +msgid "Delegation `%s', from %s to %s\n" +msgstr "Ủy quyá»n « %s », từ %s cho %s\n" + +#: src/delegate.c:408 src/delegate.c:430 +msgid "Applications configured for delegation" +msgstr "Ứng dụng có cấu hình để ủy quyá»n" + +#: src/generate.c:88 +#, c-format +msgid "`%s' is a directory" +msgstr "« %s » là má»™t thÆ° mục" + +#: lib/confg.c:288 lib/confg.c:451 lib/routines.c:154 src/generate.c:96 +#: src/main.c:558 src/main.c:580 src/files.c:101 +#, c-format +msgid "cannot open file `%s'" +msgstr "không thể mở tập tin « %s »." + +#. Another kind of error occurred: exit +#: lib/xbackupfile.c:224 src/generate.c:101 +#, c-format +msgid "cannot get informations on file `%s'" +msgstr "không thể lấy thông tin vá» tập tin « %s »" + +#: src/generate.c:168 +#, c-format +msgid "[%s (%s): 1 page on 1 sheet]\n" +msgstr "[%s (%s): 1 trang trên 1 lá]\n" + +#: src/generate.c:174 +#, c-format +msgid "[%s (%s): %d pages on 1 sheet]\n" +msgstr "[%s (%s): %d trang trên 1 lá]\n" + +#: src/generate.c:181 +#, c-format +msgid "[%s (%s): %d pages on %d sheets]\n" +msgstr "[%s (%s): %d trang trên %d lá]\n" + +#: src/generate.c:208 +#, c-format +msgid "[Total: 1 page on 1 sheet] %s\n" +msgstr "[Tổng số : 1 trang trên 1 lá] %s\n" + +#: src/generate.c:212 +#, c-format +msgid "[Total: %d pages on 1 sheet] %s\n" +msgstr "[Tổng số : %d trang trên 1 lá] %s\n" + +#: src/generate.c:217 +#, c-format +msgid "[Total: %d pages on %d sheets] %s\n" +msgstr "[Tổng số : %d trang trên %d lá] %s\n" + +#: src/generate.c:226 +msgid "[1 line wrapped]\n" +msgstr "[1 dòng đã ngắt]\n" + +#: src/generate.c:229 +#, c-format +msgid "[%d lines wrapped]\n" +msgstr "[%d dòng đã ngắt]\n" + +#: src/generate.c:242 +msgid "[No output produced]\n" +msgstr "[ChÆ°a xuất gì]\n" + +#: src/generate.c:314 +#, c-format +msgid "%s, delegated to %s" +msgstr "%s, ủy quyá»n cho %s" + +#: src/generate.c:322 +#, c-format +msgid "[%s (%s): failed. Ignored]\n" +msgstr "[%s (%s): thất bại nên bị bo qua.]\n" + +#: src/generate.c:330 +#, c-format +msgid "[%s (unprintable): ignored]\n" +msgstr "[%s (không thể in ra được): nên bị bá» qua]\n" + +#: src/generate.c:339 +#, c-format +msgid "[%s (binary): ignored]\n" +msgstr "[%s (nhị phân): nên bị bá» qua]\n" + +#: src/generate.c:360 +msgid "plain" +msgstr "thuần" + +#: /home/akim/src/a2ps-4.12/src/lexssh.l:348 +msgid "end-of-line in string constant" +msgstr "kết thúc dòng trong hằng số chuá»—i" + +#. TRANS: %s is ".." or <..> or /../ etc. +#: src/sheets-map.l:191 +#, c-format +msgid "end of line inside a %s" +msgstr "kết thúc dòng ở trong %s" + +#: src/main.c:201 +#, c-format +msgid "received signal %d: %s" +msgstr "nhận tín hiệu %d: %s" + +#. TRANS: highlighting level = heavy (2/2) +#: src/main.c:240 +msgid "heavy" +msgstr "nặng" + +#: ../srcore/verbose.xml.in.h:41 libexif/canon/mnote-canon-entry.c:75 +#: libexif/canon/mnote-canon-entry.c:108 libexif/canon/mnote-canon-entry.c:111 +#: libexif/canon/mnote-canon-entry.c:114 +#: libexif/olympus/mnote-olympus-entry.c:378 +#, fuzzy +msgid "normal" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"thÆ°á»ng\n" +"#-#-#-#-# libexif-0.6.13.vi.po (libexif-0.6.13) #-#-#-#-#\n" +"chuẩn" + +#: src/main.c:333 +#, c-format +msgid "Configuration status of %s %s\n" +msgstr "Tính trạng cấu hình của %s %s\n" + +#: src/main.c:337 src/main.c:694 +msgid "Sheets:\n" +msgstr "Tá» giấy:\n" + +#: src/main.c:338 +#, c-format +msgid "" +" medium = %s%s, %s\n" +" page layout = %d x %d, %s\n" +" borders = %s\n" +" file alignment = %s\n" +" interior margin = %d\n" +msgstr "" +" vật vật chứa = %s%s, %s\n" +" bố cục trang = %d x %d, %s\n" +" viá»n = %s\n" +" canh lá» tập tin = %s\n" +" lỠở trong = %d\n" + +#: src/main.c:347 +msgid "portrait" +msgstr "thẳng đứng" + +#: src/main.c:347 libexif/canon/mnote-canon-entry.c:98 +msgid "landscape" +msgstr "nằm ngang" + +#: src/main.c:356 +#, c-format +msgid "%d characters per line" +msgstr "%d ký tá»± trong má»—i dòng" + +#: src/main.c:359 +#, c-format +msgid "%d lines per page" +msgstr "%d dòng trong má»—i trang" + +#: src/main.c:362 +#, c-format +msgid "font size is %gpt" +msgstr "cỡ phông chữ là %gpt" + +#. number line: each line +#: src/main.c:371 +msgid "each line" +msgstr "má»—i dòng" + +#. number line: each %d line +#: src/main.c:375 +#, c-format +msgid "each %d lines" +msgstr "má»—i %d dòng" + +#: src/main.c:378 src/main.c:715 +msgid "Virtual pages:\n" +msgstr "Trang ảo :\n" + +#: src/main.c:379 +#, c-format +msgid "" +" number lines = %s\n" +" format = %s\n" +" tabulation size = %d\n" +" non printable format = %s\n" +msgstr "" +" dòng số = %s\n" +" dạng = %s\n" +" cỡ tab = %d\n" +" dạng khi không in = %s\n" + +#: src/main.c:390 +msgid "Headers:\n" +msgstr "Dầu trang:\n" + +#: src/main.c:391 +#, c-format +msgid "" +" header = %s\n" +" left footer = %s\n" +" footer = %s\n" +" right footer = %s\n" +" left title = %s\n" +" center title = %s\n" +" right title = %s\n" +" under lay = %s\n" +msgstr "" +" đầu trang = %s\n" +" chân trang bên trái = %s\n" +" chân trang = %s\n" +" chân trang bên phải = %s\n" +" đầu Ä‘á» bên trái = %s\n" +" đầu đỠở trung tâm = %s\n" +" đầu Ä‘á» bên phải = %s\n" +" giấy lót = %s\n" + +#: src/main.c:410 src/main.c:744 +msgid "Input:\n" +msgstr "Nhập :\n" + +#: src/main.c:411 +#, c-format +msgid "" +" truncate lines = %s\n" +" interpret = %s\n" +" end of line = %s\n" +" encoding = %s\n" +" document title = %s\n" +" prologue = %s\n" +" print anyway = %s\n" +" delegating = %s\n" +msgstr "" +" cắt bá»›t dòng = %s\n" +" giải thích = %s\n" +" kết thúc dòng = %s\n" +" mã ký tá»± = %s\n" +" đầu Ä‘á» tài liệu = %s\n" +" Ä‘oạn mở đầu = %s\n" +" in bất chấp = %s\n" +" ủy quyá»n = %s\n" + +#. TRANS: a2ps -E --list=options. Warning, this answer is also +#. used for the PPD file. Make it compatible with both. +#: src/main.c:436 src/main.c:502 +msgid "selected automatically" +msgstr "tá»± Ä‘á»™ng chá»n" + +#: src/main.c:439 src/main.c:763 +msgid "Pretty-printing:\n" +msgstr "In xinh:\n" + +#: src/main.c:440 +#, c-format +msgid "" +" style sheet = %s\n" +" highlight level = %s\n" +" strip level = %d\n" +msgstr "" +" tá» kiểu dáng = %s\n" +" mức nổi bật = %s\n" +" mức tÆ°á»›c = %d\n" + +#: src/main.c:460 +msgid "never make backups" +msgstr "không bao giá» sao lÆ°u tập tin" + +#: src/main.c:464 +msgid "simple backups of every file" +msgstr "bản sao lÆ°u Ä‘Æ¡n giản của má»i tập tin" + +#. appears in a2ps --version-=existing --list=defaults +#: src/main.c:469 +msgid "" +"numbered backups of files already numbered,\n" +" and simple of others" +msgstr "" +"bản sao lÆ°u đánh số của tâp tin đã đánh số,\n" +" và bản sao Ä‘Æ¡n giản của các tập tin khác" + +#: src/main.c:474 +msgid "numbered backups of every file" +msgstr "bản sao lÆ°u đánh số của má»i tập tin" + +#: src/main.c:478 src/main.c:772 +msgid "Output:\n" +msgstr "Xuất:\n" + +#: src/main.c:479 +#, c-format +msgid "" +" destination = %s\n" +" version control = %s\n" +" backup suffix = %s\n" +msgstr "" +" nÆ¡i nhận = %s\n" +" Ä‘iá»u khiển phiên bản = %s\n" +" hậu tố sao lÆ°u = %s\n" + +#: src/main.c:492 src/main.c:782 +msgid "PostScript:\n" +msgstr "PostScript:\n" + +#: src/main.c:493 +#, c-format +msgid "" +" magic number = %s\n" +" Printer Description (PPD) = %s\n" +" default PPD = %s\n" +" page label format = %s\n" +" number of copies = %d\n" +" sides per sheet = %s\n" +" page device definitions = " +msgstr "" +" số mã thuật = %s\n" +" Mô tả máy in (PPD) = %s\n" +" (Mô tả máy in) PPD mặc định = %s\n" +" dạng nhãn trang = %s\n" +" số bản = %d\n" +" mặt của má»™t tá» giấy = %s\n" +" định nghÄ©a thiết bị trang = " + +#: src/main.c:513 +msgid " statusdict definitions = " +msgstr " định nghÄ©a statusdict (từ Ä‘iển tính trạng) = " + +#: src/main.c:516 +#, c-format +msgid " page prefeed = %s\n" +msgstr " nạp giấy trÆ°á»›c = %s\n" + +#: src/main.c:525 +msgid "Internals:\n" +msgstr "Chi tiết ná»™i bá»™ :\n" + +#: src/main.c:526 +#, c-format +msgid "" +" verbosity level = %d\n" +" file command = %s\n" +" library path = \n" +msgstr "" +" mức xuất chi tiết = %d\n" +" lệnh tập tin = %s\n" +" Ä‘Æ°á»ng dẫn thÆ° viện = \n" + +#: src/main.c:651 +#, c-format +msgid "" +"Usage: %s [OPTION]... [FILE]...\n" +"\n" +"Convert FILE(s) or standard input to PostScript.\n" +"\n" +"Mandatory arguments to long options are mandatory for short options too.\n" +"Long options marked with * require a yes/no argument, corresponding\n" +"short options stand for `yes'.\n" +msgstr "" +"Cách sá»­ dụng: %s [TÙY_CHỌN]... [TẬP_TIN]...\n" +"\n" +"Chuyển đổi TẬP_TIN hay dữ liệu gõ chuẩn sang PostScript.\n" +"\n" +"Má»i đối số phải sá»­ dụng vá»›i tùy chá»n dài cÅ©ng vậy vá»›i tùy chá»n ngắn.\n" +"Má»i tùy chá»n dài có dấu * phải có đối số Có/Không (yes/no);\n" +"tùy chá»n ngắn tÆ°Æ¡ng thích thì có nghÄ©a Có (yes).\n" + +#: src/main.c:665 +msgid "Tasks:\n" +msgstr "Việc:\n" + +#: src/main.c:666 +msgid "" +" --version display version\n" +" --help display this help\n" +" --guess report guessed types of FILES\n" +" --which report the full path of library files named FILES\n" +" --glob report the full path of library files matching FILES\n" +" --list=defaults display default settings and parameters\n" +" --list=TOPIC detailed list on TOPIC (delegations, encodings, " +"features,\n" +" variables, media, ppd, printers, prologues, style-" +"sheets,\n" +" user-options)\n" +msgstr "" +" --version trình bày thông tin _phiên bản_\n" +" --help trình bày _trợ giúp_ này\n" +" --guess thông báo loại đã _Ä‘oán_ của TẬP_TIN\n" +" --which thông báo Ä‘Æ°á»ng dẫn đầy đủ của má»i tập tin thÆ° viện có tên " +"TẬP_TIN (_nào_)\n" +" --glob thông báo Ä‘Æ°á»ng dẫn đầy đủ của má»i tập tin thÆ° viện khá»›p " +"vá»›i TẬP_TIN\n" +" --list=defaults _ghi danh sách_ các thiết bị và tham số _mặc định_\n" +" --list=ÄỀ_TÀI _danh sách_ chi tiết vá» ÄỀ_TÀI đó (ủy quyá»n gì, mã ký " +"tá»±, tính năng,\n" +"\t\tbiến, vật vật chứa, mô tả máy in (PPD), máy in, Ä‘oạn mở đầu, tá» kiểu " +"dang,\n" +"\t\ttùy chá»n cho ngÆ°á»i dùng)\n" + +#: src/main.c:677 +msgid "" +"After having performed the task, exit successfully. Detailed lists may\n" +"provide additional help on specific features.\n" +msgstr "" +"Sau khi thá»±c hiện việc đó hãy thoát được. Danh sách chi tiết có lẽ\n" +"bao gồm trợ giúp thêm vá» tính năng dứt khoát.\n" + +#: src/main.c:685 +msgid "Global:\n" +msgstr "Toàn cục:\n" + +#: src/main.c:686 +msgid "" +" -q, --quiet, --silent be really quiet\n" +" -v, --verbose[=LEVEL] set verbosity on, or to LEVEL\n" +" -=, --user-option=OPTION use the user defined shortcut OPTION\n" +" --debug enable debugging features\n" +" -D, --define=KEY[:VALUE] unset variable KEY or set to VALUE\n" +msgstr "" +" -q, --quiet, --silent hãy _im_ lắm (không xuất chi tiết)\n" +" -v, --verbose[=MỨC] xuất _chi tiết_, hay xuất chi tiết MỨC đó\n" +" -=, --user-option=TÙY_CHỌN sá»­ dụng _tùy chá»n_ lối tắt định nghÄ©a do " +"_ngÆ°á»i dùng_\n" +" --debug hiệu lá»±c tính năng _gỡ lá»—i_\n" +" -D, --define=PHÃM[:GIÃ_TRỊ] bá» lập PHÃM biến hay lập thành GIà TRỊ đó\n" + +#: src/main.c:695 +msgid "" +" -M, --medium=NAME use output medium NAME\n" +" -r, --landscape print in landscape mode\n" +" -R, --portrait print in portrait mode\n" +" --columns=NUM number of columns per sheet\n" +" --rows=NUM number of rows per sheet\n" +" --major=DIRECTION first fill (DIRECTION=) rows, or columns\n" +" -1, -2, ..., -9 predefined font sizes and layouts for 1.. 9 " +"virtuals\n" +" -A, --file-align=MODE align separate files according to MODE (fill, rank\n" +" page, sheet, or a number)\n" +" -j, --borders* print borders around columns\n" +" --margin[=NUM] define an interior margin of size NUM\n" +msgstr "" +" -M, --medium=TÊN sá»­ dụng _vật vật chứa_ có TÊN đó\n" +" -r, --landscape in bằng chế Ä‘á»™ _ngang_\n" +" -R, --portrait in bằng chế Ä‘á»™ _chân dung_\n" +" --columns=Sá» số _cá»™t_ trên má»™t tá» giấy\n" +" --rows=Sá» số _hàng_ trên má»™t tá» giấy\n" +" --major=HƯỚNG trÆ°á»›c hết tô đầy hàng hay cá»™t HƯỚNG đó (nhiá»u hÆ¡n)\n" +" -1, -2, ..., -9 cỡ phông chữ và bố trí định nghÄ©a trÆ°á»›c cho Ä‘iá»u ảo " +"1..9 \n" +" -A, --file-align=CHẾ_ÄỘ _canh lá»_ những _tập tin_ riêng theo CHẾ ÄỘ đó\n" +"\t\t\t(fill [tô đầy], rank page [sắp xếp trang], sheet [tá» giấy] hay số) -" +"j, --borders* in _viá»n_ chung quanh cá»™t\n" +" --margin[=Sá»] định nghÄ©a _lá» trang_ ná»™i bá»™ có kích thÆ°á»›c SỠđó\n" + +#: src/main.c:708 +msgid "" +"The options -1.. -9 affect several primitive parameters to set up " +"predefined\n" +"layouts with 80 columns. Therefore the order matters: `-R -f40 -2' is\n" +"equivalent to `-2'. To modify the layout, use `-2Rf40', or compose " +"primitive\n" +"options (`--columns', `--font-size' etc.).\n" +msgstr "" +"Những tùy chá»n -1.. -9 làm ảnh hưởng đến vài tham số nguyên thuá»·\n" +"để thiết lập bố trí định nghÄ©a trÆ°á»›c có 80 cá»™t. Vì thế thứ tá»± là quan " +"trá»ng:\n" +"`-R -f40 -2' bằng `-2'. Äể sá»­a đổi bố trí thì hãy sÆ° dụng `-2Rf40',\n" +"hay tạo tùy chá»n nguyên thuá»· (`--columns' [cá»™t], `--font-size' [cỡ phông " +"chữ] v.v.).\n" + +#: src/main.c:716 +msgid "" +" --line-numbers=NUM precede each NUM lines with its line number\n" +" -C alias for --line-numbers=5\n" +" -f, --font-size=SIZE use font SIZE (float) for the body text\n" +" -L, --lines-per-page=NUM scale the font to print NUM lines per virtual\n" +" -l, --chars-per-line=NUM scale the font to print NUM columns per " +"virtual\n" +" -m, --catman process FILE as a man page (same as -L66)\n" +" -T, --tabsize=NUM set tabulator size to NUM\n" +" --non-printable-format=FMT specify how non-printable chars are printed\n" +msgstr "" +" --line-numbers=Sá» chèn _số dòng_ trÆ°á»›c má»—i dòng thứ Sá»\n" +" -C\t\t\t\t\t biệt hiệu cho tùy chá»n --line-numbers=5\n" +" -f, --font-size=Cá»  sá»­ dụng _Cá»  phông chữ_ (nổi) khi in chữ ná»™i " +"dụng\n" +" -L, --lines-per-page=Sá» co giãn phông chữ để in Sá» _dòng trong má»—i " +"trang_ ảo\n" +" -l, --chars-per-line=Sá» cÆ¡ giãn phông chữ để in Sá» _cá»™t trong má»—i_ trang " +"ảo (_dòng_)\n" +" -m, --catman xá»­ lý TẬP_TIN dạng trang « man » (bằng tùy " +"chá»n -L66)\n" +" -T, --tabsize=Sá» lập _cỡ « tab»_ thành Sá»\n" +" --non-printable-format=DẠNG ghi rõ cách in má»i ký tá»± _không thể in_\n" + +#: src/main.c:727 +msgid "Headings:\n" +msgstr "Tá»±a Ä‘á»:\n" + +#: src/main.c:729 +#, no-c-format +msgid "" +" -B, --no-header no page headers at all\n" +" -b, --header[=TEXT] set page header\n" +" -u, --underlay[=TEXT] print TEXT under every page\n" +" --center-title[=TEXT] set page title to TITLE\n" +" --left-title[=TEXT] set left and right page title to TEXT\n" +" --right-title[=TEXT]\n" +" --left-footer[=TEXT] set sheet footers to TEXT\n" +" --footer[=TEXT]\n" +" --right-footer[=TEXT]\n" +msgstr "" +" -B, --no-header _không có đầu trang_ nào cả\n" +" -b, --header[=CHá»®] lập _đầu trang_\n" +" -u, --underlay[=CHá»®] in CHá»® dÆ°á»›i má»i trang (_giấy lót_)\n" +" --center-title[=CHá»®] lập _đầu Ä‘á»_ trang thành CHá»® (_trung tâm_)\n" +" --left-title[=CHá»®] lập _đầu Ä‘á» bên trái_ trang thành CHá»®\n" +" --right-title[=CHá»®] lập _đầu Ä‘á» bên phải_ trang thành CHá»®\n" +" --left-footer[=CHá»®] lập _chân bên trái_ trang thành CHá»®\n" +" --footer[=CHá»®] lập _chân trang_ thành CHá»®\n" +" --right-footer[=CHá»®] lập _chân bên phải_ trang thành CHá»®\n" + +#: src/main.c:740 +msgid "The TEXTs may use special escapes.\n" +msgstr "CHá»® đó có thể sá»­ dụng ký tá»± thoát đặc biệt.\n" + +#: src/main.c:745 +msgid "" +" -a, --pages[=RANGE] select the pages to print\n" +" -c, --truncate-lines* cut long lines\n" +" -i, --interpret* interpret tab, bs and ff chars\n" +" --end-of-line=TYPE specify the eol char (TYPE: r, n, nr, rn, any)\n" +" -X, --encoding=NAME use input encoding NAME\n" +" -t, --title=NAME set the name of the job\n" +" --stdin=NAME set the name of the input file stdin\n" +" --print-anyway* force binary printing\n" +" -Z, --delegate* delegate files to another application\n" +" --toc[=TEXT] generate a table of content\n" +msgstr "" +" -a, --pages[=PHẠM_VỊ] chá»n _trang_ nào để in\n" +" -c, --truncate-lines* _cắt bá»›t_ má»i _dòng_ dài\n" +" -i, --interpret* _giải thích_ má»i ký tá»± tab, xoá lùi và nạp " +"giấy\n" +" --end-of-line=LOẠI ghi rõ ký tá»± _kết thúc dòng_\n" +"\t\t\t\t(LOẠI: r, n, nr, rn, any [bất cứ ký tá»± nào])\n" +" -X, --encoding=TÊN sÆ° dụng _mã_ ký tá»± gõ TÊN này\n" +" -t, --title=TÊN lập _tên_ của việc này\n" +" --stdin=TÊN lập TÊN của tập tin _gõ chuẩn_\n" +" --print-anyway* buá»™c in cách nhị phân\n" +" -Z, --delegate* _ủy quyá»n_ tập tin cho ứng dụng khác\n" +" --toc[=CHá»®] tạo _mục lục_\n" + +#: src/main.c:757 +msgid "" +"When delegations are enabled, a2ps may use other applications to handle the\n" +"processing of files that should not be printed as raw information, e.g., " +"HTML\n" +"PostScript, PDF etc.\n" +msgstr "" +"Khi tùy chá»n « ủy quyá»n » là hoặt Ä‘á»™ng, trình a2ps có lẽ sá»­ dụng ứng dụng " +"khác\n" +"để xá»­ lý tâp tin không in được dạng dữ liệu thô, v.d. HTML, PostScript, " +"PDF.\n" + +#: src/main.c:764 +msgid "" +" -E, --pretty-print[=LANG] enable pretty-printing (set style to LANG)\n" +" --highlight-level=LEVEL set pretty printing highlight LEVEL\n" +" LEVEL can be none, normal or heavy\n" +" -g alias for --highlight-level=heavy\n" +" --strip-level=NUM level of comments stripping\n" +msgstr "" +" -E, --pretty-print[=NGÔN_NGá»®] hiệu lá»±c _in xinh_ (lập kiểu thành NGÔN " +"NGá»®)\n" +" --highlight-level=MỨC lập _mức nổi_ khi in xinh\n" +" MỨC có thể là none [không có], normal [thÆ°á»ng] " +"hay heavy [nặng]\n" +" -g biệt hiệu cho tùy chá»n --highlight-level=heavy " +"(mức nổi là nặng)\n" +" --strip-level=Sá» _mức tÆ°á»›c_ chú thích\n" + +#: src/main.c:773 +msgid "" +" -o, --output=FILE leave output to file FILE. If FILE is `-',\n" +" leave output to stdout.\n" +" --version-control=WORD override the usual version control\n" +" --suffix=SUFFIX override the usual backup suffix\n" +" -P, --printer=NAME send output to printer NAME\n" +" -d send output to the default printer\n" +msgstr "" +" -o, --output=TẬP_TIN _xuất_ đến tập tin đó; nếu tập tin đó là " +"`-',\n" +" thì xuất đến thiết bị xuất chuẩn (stdout).\n" +" --version-control=TỪ có quyá»n cao hÆ¡n _Ä‘iá»u khiển phiên bản_ thÆ°á»ng\n" +" --suffix=HẬU_Tá» có quyá»n cao hÆ¡n _hậu tố_ sao lÆ°u thÆ°á»ng\n" +" -P, --printer=TÊN xuất đến _máy in_ có tên đó\n" +" -d xuất đến may in mặc định\n" + +#: src/main.c:783 +msgid "" +" --prologue=FILE include FILE.pro as PostScript prologue\n" +" --ppd[=KEY] automatic PPD selection or set to KEY\n" +" -n, --copies=NUM print NUM copies of each page\n" +" -s, --sides=MODE set the duplex MODE (`1' or `simplex',\n" +" `2' or `duplex', `tumble')\n" +" -S, --setpagedevice=K[:V] pass a page device definition to output\n" +" --statusdict=K[:[:]V] pass a statusdict definition to the output\n" +" -k, --page-prefeed enable page prefeed\n" +" -K, --no-page-prefeed disable page prefeed\n" +msgstr "" +" --prologue=TẬP_TIN bao gồm TẬP_TIN.pro là _Ä‘oạn mở đầu_ " +"PostScript\n" +" --ppd[=PHÃM] tá»± Ä‘á»™ng chá»n mô tả may in (PPD) hay lập thành " +"PHÃM đó\n" +" -n, --copies=Sá» in Sá» _bản sao_ của má»—i trang\n" +" -s, --sides=CHẾ_ÄỘ lập chế Ä‘á»™ _mặt trang_ (`1' hay `simplex',\n" +" `2' hay `duplex', `tumble')\n" +" -S, --setpagedevice=K[:V] xuất dữ liệu định nghÄ©a _thiết bị trang_ " +"(_lập_)\n" +" --statusdict=K[:[:]V] xuất dữ liệu định nghÄ©a statusdict (từ Ä‘iển " +"tính trạng)\n" +" -k, --page-prefeed hiệu lá»±c _nạp trang trÆ°á»›c_\n" +" -K, --no-page-prefeed vô hiệu hóa _nạp trang trÆ°á»›c_ (_không_)\n" + +#: src/main.c:797 +msgid "" +"By default a2ps is tuned to do what you want to, so trust it. To pretty\n" +"print the content of the `src' directory and a table of content, and send " +"the\n" +"result to the printer `lw',\n" +"\n" +" $ a2ps -P lw --toc src/*\n" +"\n" +"To process the files `sample.ps' and `sample.html' and display the result,\n" +"\n" +" $ a2ps -P display sample.ps sample.html\n" +"\n" +"To process a mailbox in 4 up,\n" +"\n" +" $ a2ps -=mail -4 mailbox\n" +"\n" +"To print as a booklet on the default printer, which is Duplex capable,\n" +"\n" +" $ a2ps -=book paper.dvi.gz -d\n" +msgstr "" +"Mặc định là trình a2ps sẽ giúp đỡ bạn làm việc thì hãy tin nó để làm việc " +"cho đúng.\n" +"\n" +"Äể « in xinh » ná»™i dung của thÆ° mục `src', và _mục lục_,\n" +"\trồi gởi kết quả đó cho máy in `lw' hãy sá»­ dụng lệnh này:\n" +"\n" +" $ a2ps -P lw --toc src/*\n" +"\n" +"Äể xá»­ lý hai tập tin `sample.ps' và `sample.html' rồi _trình bày_ kết quả " +"ấy\n" +"\tthì hãy sá»­ dụng lệnh này:\n" +"\n" +" $ a2ps -P display sample.ps sample.html\n" +"\n" +"Äể xá»­ lý má»™t _há»™p thÆ°_ để xuất bốn _thÆ°_ trên má»—i tá» giấy (4 up)\n" +"\tthì hãy sá»­ dụng lệnh này:\n" +"\n" +" $ a2ps -=mail -4 mailbox\n" +"\n" +"Äể in dạng _cuốn sách_ nhá» qua máy in mặc định mà có thể in hai mặt _tá» " +"giấy_\n" +"thì hãy sÆ° dụng lệnh này:\n" +"\n" +" $ a2ps -=book paper.dvi.gz -d\n" + +#: src/main.c:818 +msgid "" +"News, updates and documentation: visit http://www.inf.enst.fr/~demaille/" +"a2ps/.\n" +msgstr "" +"Äể xem tin tức, trình cập nhật và tài liệu thì hãy tá»›i thăm chá»— Mạng của " +"chúng tôi nhé:\n" +"\thttp://www.inf.enst.fr/~demaille/a2ps/.\n" + +#: src/main.c:820 +msgid "Report bugs to .\n" +msgstr "Hãy thông báo lá»—i cho .\n" + +#: src/main.c:938 +msgid "" +"Copyright (c) 1988-1993 Miguel Santana\n" +"Copyright (c) 1995-2000 Akim Demaille, Miguel Santana" +msgstr "" +"Bản quyá»n © năm 1988-1993 Miguel Santana\n" +"Bản quyá»n © năm 1995-2000 Akim Demaille, Miguel Santana" + +#: src/main.c:1168 +msgid "Table of Content" +msgstr "Mục lục" + +#: src/parsessh.y:236 +#, c-format +msgid "cannot process `%s' which requires a2ps version %s" +msgstr "không xá»­ lý được « %s » mà cần đến trình a2ps phiên bản %s" + +#: src/sheets-map.l:110 +#, c-format +msgid "unexpected character `%c'" +msgstr "ký tá»± bất ngỠ« %c »" + +#: /home/akim/src/a2ps-4.12/lib/lexppd.l:211 src/sheets-map.l:210 +msgid "too many includes" +msgstr "quá nhiá»u tập tin bao gồm (include)" + +#: src/sheets-map.l:292 src/sheets-map.l:299 +#, c-format +msgid "no key defined for `%s'" +msgstr "chÆ°a định nghÄ©a phím cho « %s »" + +#: lib/pathwalk.c:414 src/select.c:122 src/ssheet.c:866 +#, c-format +msgid "cannot find file `%s'" +msgstr "không tìm thấy tâp tin « %s »" + +#. sheets.map can not be found: there is no automatic prettyprinting +#: src/select.c:124 +msgid "automatic style selection cancelled" +msgstr "việc tá»± Ä‘á»™ng chá»n kiểu dáng bị thôi" + +#: src/ssheet.c:295 +#, c-format +msgid "cannot compile regular expression `%s': %s" +msgstr "không thể biên dịch biểu thức chính quy « %s »: %s" + +#: src/ssheet.c:974 src/ssheet.c:995 +msgid "Known Style Sheets" +msgstr "Biết tá» kiểu dáng" + +#: src/ssheet.c:1461 +#, c-format +msgid "cannot find style sheet `%s': using plain style" +msgstr "không tìm thấy tá» kiểu dáng « %s » nên sá»­ dụng kiểu dáng thuần" + +#: src/sshread.c:453 +#, c-format +msgid "unknown encoding `%s', ignored" +msgstr "không biết mã ký tá»± « %s » nên bá» qua nó" + +#: src/version-etc.c:38 +msgid "Copyright (C) 1999 Free Software Foundation, Inc." +msgstr "" +"Bản quyá»n © năm 1999 Free Software Foundation, Inc. (Tổ chức Phần má»m Tá»± do)" + +#: lib/version-etc.c:90 +#, c-format +msgid "Written by %s.\n" +msgstr "Tác giả: %s.\n" + +#: src/wdiff.c:1225 src/getargs.c:275 schroot/schroot.c:108 +#: schroot/schroot.cc:50 schroot/schroot-releaselock.cc:58 +msgid "" +"This is free software; see the source for copying conditions. There is NO\n" +"warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n" +msgstr "" +"Äây là phần má»m tá»± do; hãy xem mã nguồn để tìm thấy Ä‘iá»u kiện sao chép.\n" +"• Không bảo đảm gì cả, dù khă nang bán hay khả năng làm việc dứt khoát. •\n" + +#: src/versions.c:133 +#, c-format +msgid "invalid version number `%s'" +msgstr "số phiên bản không hợp lệ « %s »" + +#: lib/caret.c:43 +msgid "space (i.e., ` ')" +msgstr "ký tá»± cách « »" + +#: lib/caret.c:45 +msgid "octal (i.e., `\\001' etc.)" +msgstr "bát phân (v.d. \\001)" + +#: lib/caret.c:47 +msgid "hexadecimal (i.e., `\\x0a' etc.)" +msgstr "hệ thập lục phân (v.d. \\x0a)" + +#: lib/caret.c:49 +msgid "caret (i.e., `^C', `M-^C' etc.)" +msgstr "dấu sót (v.d. ^C, M-^C)" + +#: lib/caret.c:51 +msgid "emacs (i.e., `C-c', `M-C-c' etc.)" +msgstr "emacs (v.d. C-c, M-C-c)" + +#: lib/caret.c:53 +msgid "question-mark (i.e., `?')" +msgstr "dấu há»i (v.d. ?)" + +#: ../gettext-tools/lib/closeout.c:64 lib/closeout.c:64 +#: ../su-backend/closeout.c:71 ../su-backend/closeout.c:73 lib/closeout.c:94 +#: misc.c:853 +msgid "write error" +msgstr "lá»—i ghi" + +#: lib/confg.c:253 lib/encoding.c:639 +#, c-format +msgid "invalid option `%s'" +msgstr "tùy chá»n không hợp lệ `%s'" + +#: lib/confg.c:264 lib/encoding.c:577 lib/encoding.c:617 lib/prolog.c:146 +#, c-format +msgid "missing argument for `%s'" +msgstr "thiếu đối số cho `%s'" + +#: lib/confg.c:320 lib/confg.c:327 lib/confg.c:334 +#, c-format +msgid "invalid definition for printer `%s': %s" +msgstr "dữ liệu định nghÄ©a không hợp lệ cho máy in `%s': %s" + +#: lib/confg.c:325 lib/confg.c:328 +msgid "Unknown Printer" +msgstr "Không biết máy in" + +#: lib/confg.c:332 lib/confg.c:335 ../gnome-default-printer.c:231 +#: ../gnome-default-printer.desktop.in.h:1 +msgid "Default Printer" +msgstr "Máy in mặc định" + +#: lib/confg.c:388 lib/options.c:703 +#, c-format +msgid "invalid variable identifier `%s'" +msgstr "dấu hiệu nhận diện biến không hợp lệ `%s'" + +#: lib/confg.c:417 +#, c-format +msgid "obsolete `%s' entry. Ignored" +msgstr "mục « %s » quá thá»i nên bị bo qua" + +#: lib/encoding.c:1115 lib/encoding.c:1136 +msgid "Known Encodings" +msgstr "Biết mã ký tá»±" + +#: lib/faces.c:156 +msgid "incomplete knowledge of faces" +msgstr "chÆ°a biết đủ thông tin vá» mặt phông chữ" + +#: lib/filtdir.c:113 +#, c-format +msgid "cannot close directory `%s'" +msgstr "không thể đóng thÆ° mục « %s »" + +#: lib/getnum.c:63 lib/getnum.c:121 lib/getnum.c:153 lib/getnum.c:221 +#: lib/argmatch.c:134 +#, c-format +msgid "invalid argument `%s' for `%s'" +msgstr "đối số không hợp lệ « %s » cho « %s »" + +#: lib/getnum.c:123 +#, c-format +msgid "Valid arguments are integers n such that: %s\n" +msgstr "Äối số hợp lệ là số nguyên n để mà: %s\n" + +#: lib/getnum.c:223 +#, c-format +msgid "Valid arguments are floats f such that: %s\n" +msgstr "Äối số hợp lệ là Ä‘iá»u nổi f để mà: %s\n" + +#: lib/jobs.c:307 +#, c-format +msgid "unknown encoding `%s'" +msgstr "không biết mã ký tá»± « %s »" + +#: lib/madir.c:59 +msgid "rows first" +msgstr "hàng trÆ°á»›c" + +#: lib/madir.c:62 +msgid "columns first" +msgstr "cá»™t trÆ°á»›c" + +#: lib/media.c:173 +#, c-format +msgid "unknown medium `%s'" +msgstr "không biệt vật vật chứa « %s »" + +#: lib/media.c:208 lib/media.c:237 +msgid "Known Media" +msgstr "Vật chứa đã biết" + +#: lib/media.c:212 +msgid "dimensions" +msgstr "kích thÆ°á»›c" + +#. TRANS: Variables (formely called `macro meta sequences', eeeaerk) +#. are things such as #(psnup) which is substituted to a bigger strings, +#. e.g. -#v #?q|-q|| #?j|-d|| #?r||-c| -w#w -h#h +#: lib/metaseq.c:104 lib/metaseq.c:113 +msgid "Known Variables" +msgstr "Biết biến" + +#: lib/metaseq.c:288 lib/metaseq.c:300 +#, c-format +msgid "Printed by %s" +msgstr "In do %s" + +#: lib/metaseq.c:298 +#, c-format +msgid "Printed by %s from %s" +msgstr "In do %s từ %s" + +#: lib/metaseq.c:308 lib/metaseq.c:327 +msgid "cannot get current working directory" +msgstr "không thể lấy thÆ° mục hoặt Ä‘á»™ng hiện có" + +#: lib/metaseq.c:954 lib/metaseq.c:987 +#, c-format +msgid "%s: too long argument for %s escape" +msgstr "%s: đối số quá dài cho dãy thoát %s" + +#. Translators: please make a short date format +#. * according to the std form in your language, using +#. * the standard strftime(3) +#: lib/metaseq.c:364 lib/metaseq.c:675 +msgid "%b %d, %y" +msgstr "%d/%b/%y" + +#. Translators: please make a long date format +#. * according to the std form in your language, using +#. * GNU strftime(3) +#: lib/metaseq.c:373 lib/metaseq.c:684 +msgid "%A %B %d, %Y" +msgstr "%A, ngày %e, %B, năm %Y" + +#: lib/metaseq.c:1234 lib/metaseq.c:1246 lib/metaseq.c:1289 +#, c-format +msgid "%s: unknown `%s' escape `%c' (%d)" +msgstr "%s: không biết « %s » dãy thoát « %c » (%d)" + +#, c-format +msgid "Page %d" +msgstr "Trang %d" + +#. `%Q' localized `Page %d/%c' +#: lib/metaseq.c:431 lib/metaseq.c:826 +#, c-format +msgid "Page %d/%c" +msgstr "Trang %d/%c" + +#: lib/metaseq.c:972 +#, c-format +msgid "%s: missing `%c' for %s%c escape" +msgstr "%s: thiếu « %c »cho dãy thoát %s%c " + +#: lib/metaseq.c:593 lib/metaseq.c:1021 +#, c-format +msgid "%s: invalid separator `%s%c' for `%s' escape" +msgstr "%s: dấu ngân cách không hợp lệ « %s%c » cho dãy thoát « %s »." + +#: lib/metaseq.c:601 lib/metaseq.c:611 +#, c-format +msgid "%s: invalid argument for %s%c escape" +msgstr "%s: đối số không hợp lệ cho dãy thoát « %s%c »." + +#: lib/metaseq.c:822 +#, c-format +msgid "Page %d/%d" +msgstr "Trang %d/%d" + +#: lib/metaseq.c:1266 +msgid "output command" +msgstr "lệnh xuất" + +#: makeinfo/makeinfo.c:340 +#, c-format +msgid "Try `%s --help' for more information.\n" +msgstr "Hãy thá»­ lệnh « %s --help » để tìm thông tin thêm (_trợ giúp_).\n" + +#: lib/output.c:466 +#, c-format +msgid "invalid face `%s'" +msgstr "mặt phông chữ không hợp lệ « %s »" + +#: lib/output.c:538 +#, c-format +msgid "`%s' with no matching `%s'" +msgstr "« %s » không có « %s » khá»›p vá»›i nhau" + +#: lib/ppd.c:108 +msgid "Known Fonts" +msgstr "Biết phông chữ" + +#. TRANS: This `none' is an answer to `List of known fonts: None' +#: lib/ppd.c:111 +msgid "" +"\n" +" None.\n" +msgstr "" +"\n" +" Không có.\n" + +#: lib/ppd.c:149 lib/ppd.c:165 +msgid "Known PostScript Printer Descriptions" +msgstr "Biết mô tả máy in PostScript" + +#: lib/prange.c:305 lib/prange.c:323 +#, c-format +msgid "invalid interval `%s'" +msgstr "khoảng không hợp lệ « %s »" + +#: lib/printers.c:396 lib/printers.c:413 +#, c-format +msgid "no command for the `%s' (%s%s)" +msgstr "không có lệnh cho « %s » (%s%s)" + +#: lib/printers.c:429 +msgid "sent to the standard output" +msgstr "đã gởi cho thiết bị xuất chuẩn" + +#: lib/printers.c:430 +msgid "sent to the default printer" +msgstr "đã gởi cho máy in mặc định" + +#: lib/printers.c:435 +#, c-format +msgid "saved into the file `%s'" +msgstr "đã lÆ°u vào tập tin « %s »" + +#: lib/printers.c:436 +#, c-format +msgid "sent to the printer `%s'" +msgstr "đã gởi cho máy in « %s »" + +#: lib/printers.c:613 lib/printers.c:621 +msgid "Known Outputs (Printers, etc.)" +msgstr "Thiết bị xuất đã biết (máy in v.v.)" + +#: lib/prolog.c:98 lib/prolog.c:181 +msgid "Known Prologues" +msgstr "Biết Ä‘oạn mở đầu" + +#: lib/prolog.c:579 +#, c-format +msgid "font %f too big" +msgstr "phông chữ %f quá lá»›n" + +#: lib/psgen.c:662 +#, c-format +msgid "`%s' is a binary file, printing aborted" +msgstr "`%s' là tập tin nhị phân nên thôi in" + +#: lib/quotearg.c:259 lib/quotearg.c:245 +msgid "`" +msgstr "« " + +#: lib/quotearg.c:203 lib/quotearg.c:246 gnulib/lib/quotearg.c:241 +#: lib/quotearg.c:260 +msgid "'" +msgstr " »" + +#: lib/userdata.c:129 +msgid "user" +msgstr "ngÆ°á»i dùng" + +#: lib/userdata.c:130 ../calendar/libecal/e-cal.c:5030 +#: ../servers/groupwise/e-gw-connection.c:168 +msgid "Unknown User" +msgstr "Không biết ngÆ°á»i dùng" + +#: lib/useropt.c:75 +#, c-format +msgid "unknown user option `%s'" +msgstr "không biết tùy chá»n cho ngÆ°á»i dùng « %s »" + +#: lib/useropt.c:86 lib/useropt.c:97 +msgid "Known User Options" +msgstr "Tùy chá»n cho ngÆ°á»i dùng đã biết" + +#: compat/regex.c:1008 lib/regcomp.c:167 +msgid "Memory exhausted" +msgstr "Hết bá»™ nhá»› hoàn toàn" + +#: lib/xbackupfile.c:240 lib/xbackupfile.c:252 +#, c-format +msgid "cannot rename file `%s' as `%s'" +msgstr "không thể thay đổi tên tập tin « %s » thành « %s »" + +#: lib/xbackupfile.c:255 +#, c-format +msgid "restored file `%s'" +msgstr "đã phục hồi tập tin « %s »" + +#: lib/argmatch.c:159 +#, c-format +msgid "invalid argument %s for `%s'" +msgstr "đối số không hợp lệ %s cho « %s »" + +#: lib/argmatch.c:160 +#, c-format +msgid "ambiguous argument %s for `%s'" +msgstr "đối số mÆ¡ hồ %s cho « %s »" + +#: gnulib/lib/argmatch.c:157 lib/argmatch.c:155 lib/argmatch.c:157 +#, c-format +msgid "Valid arguments are:" +msgstr "Các đối số hợp lệ:" + +#: src/ant-phone.c:172 +#, c-format +msgid "" +"Usage: %s [OPTION...]\n" +"\n" +"Options:\n" +" -h, --help Show this help message\n" +" -v, --version Print version information\n" +" -r, --cleanup Remove stale socket file (left by accident by\n" +" previous run)\n" +" -d, --debug[=level] Print additional runtime debugging data to stdout\n" +" level = 1..2\n" +" -i, --soundin=DEVICE OSS compatible device for input (recording),\n" +" default: /dev/dsp\n" +" -o, --soundout=DEVICE OSS compatible device for output (playback),\n" +" default: /dev/dsp\n" +" -m, --msn=MSN identifying MSN (for outgoing calls), 0 for " +"master\n" +" MSN of this termination/port\n" +" default: 0\n" +" -l, --msns=MSNS MSNs to listen on, semicolon-separated list or " +"'*'\n" +" default: *\n" +" -c, --call=NUMBER Call specified number\n" +"\n" +"Note: If arguments of --soundin and --soundout are equal, a full duplex\n" +" sound device is needed.\n" +msgstr "" +"Cách sá»­ dụng: %s [TÙY_CHỌN...]\n" +"\n" +"Tùy chá»n:\n" +" -h, --help Hiển thị thông Ä‘iệp _trợ giúp_ này\n" +" -v, --version Hiển thị thông tin _phiên bản_\n" +" -r, --cleanup Bá» tập tin ổ cắm cÅ© (còn lại bất ngá» sau chạy " +"trÆ°á»›c) (_xoá_) -d, --debug[=MỨC] In dữ liệu _gỡ lá»—i_ thá»i chạy thêm xuất " +"thiết bị xuất chuẩn (stdout)\n" +" MỨC = 1..2\n" +" -i, --soundin=THIIẾT_BỊ thiết bị gõ tÆ°Æ¡ng thích vá»›i phần má»m nguồn mở " +"(ghi)\n" +"\t\t\t\t(_âm thành vào_) mặc định: /dev/dsp\n" +" -o, --soundout=THIẾT_BỊ thiết bị xuất tÆ°Æ¡ng thích vá»›i phần má»m nguồn mở " +"(phát)\n" +"\t\t\t\t(_âm thành ra_) mặc định: /dev/dsp\n" +" -m, --msn=Sá» số Ä‘a ngÆ°á»i ký tên (Multiple Subscriber Number: MSN) " +"nhận biết để gá»i qua Ä‘iện thoại,\n" +"\t\t\t0 cho MSN chính của cổng/thiết bị cuối này, mặc định: 0\n" +" -l, --msns=NHá»®NG_Sá» Những MSN để nghe qua, danh sách phân cách bằng dấu " +"phẩy\n" +"\t\t\thay dấu sao '*', mặc định: *\n" +" -c, --call=Sá» _gá»i_ số Ä‘iện thoại đó\n" +"\n" +"Ghi chú : nếu hai đối số --soundin (âm thành vào) và --soundout (âm thành " +"xuất\n" +"là bằng nhau thì cần thiết bị âm thành truyá»n dẫn hai chiếu đầy đủ (full " +"duplex).\n" + +#: src/ant-phone.c:223 +#, c-format +msgid "Calling %s... " +msgstr "Gá»i %s..." + +#: src/ant-phone.c:227 +msgid "successful.\n" +msgstr "thành công.\n" + +#: src/callerid.c:131 +msgid "Delete Entry" +msgstr "Xoá bá» mục" + +#: src/callerid.c:143 +msgid "" +"Are you sure you want to\n" +"delete this entry?" +msgstr "" +"Bạn có chắc muốn xoá bá»\n" +"mục này không?" + +#: src/callerid.c:148 +msgid "Delete recording" +msgstr "Xoá bá» mục ghi" + +#: src/callerid.c:266 +#, c-format +msgid "Enter the base filename for %s file" +msgstr "Hãy nhập tên tập tin cÆ¡ bản cho tập tin %s" + +#: src/callerid.c:312 +msgid "Do you really want to delete this recording?" +msgstr "Bạn chắc muốn xoá bá» mục ghi này không?" + +#: src/callerid.c:336 src/callerid.c:362 +msgid "/_Playback" +msgstr "/_Phát lại" + +#: src/callerid.c:337 src/callerid.c:366 +msgid "/_Save as..." +msgstr "/_LÆ°u là..." + +#: src/callerid.c:338 src/callerid.c:370 +msgid "/Delete _Recording" +msgstr "/Xoá bá» mục _ghi" + +#: src/callerid.c:340 src/callerid.c:374 +msgid "/_Delete Row" +msgstr "/Xoá bá» _hàng" + +#: src/callerid.c:423 src/gtksettings.c:455 +msgid "Caller ID" +msgstr "Xem số ngÆ°á»i gá»i" + +#: ../src/sj-main.c:1245 ../src/source-view.c:211 +#: ../mimedir/mimedir-vcomponent.c:340 +#, fuzzy +msgid "Duration" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Thá»i gian\n" +"#-#-#-#-# libmimedir.vi.po (libmimedir HEADnReport-Msgid-Bugs-To: ) #-#-#-" +"#-#\n" +"Thá»i lượng" + +#: src/callerid.c:748 +msgid "(UNKNOWN)" +msgstr "• Không biết •" + +#: src/callerid.c:818 +#, c-format +msgid "ANT: %d unanswered" +msgstr "ANT: %d chÆ°a trả lá»i" + +#: src/controlpad.c:101 +#, c-format +msgid "Preset %c" +msgstr "Äặt trÆ°á»›c %c" + +#: src/controlpad.c:108 +#, c-format +msgid "Please input new preset data for button %c:" +msgstr "Hãy nhập dữ liệu đặt trÆ°á»›c má»›i cho cái nút %c:" + +#: src/controlpad.c:124 src/gtk.c:553 +msgid "Number:" +msgstr "Số Ä‘t:" + +#: ../gcalctool/calctool.c:196 +msgid "Backspace" +msgstr "Xoá lùi" + +#: src/controlpad.c:315 +msgid "Preset 1" +msgstr "Lập trÆ°á»›c 1" + +#: src/controlpad.c:316 +msgid "Clear Number" +msgstr "Xoá số" + +#: src/controlpad.c:316 +msgid "Preset 2" +msgstr "Lập trÆ°á»›c 2" + +#: src/controlpad.c:317 +msgid "Redial" +msgstr "Quay số lại" + +#: src/controlpad.c:317 +msgid "Preset 3" +msgstr "Lập trÆ°á»›c 3" + +#: src/controlpad.c:318 +msgid "Mute Microphone" +msgstr "Tắt máy vi âm" + +#: src/controlpad.c:318 +msgid "Preset 4" +msgstr "Lập trÆ°á»›c 4" + +#: ../objects/FS/function.c:1178 ../objects/UML/classicon.c:127 +msgid "Control" +msgstr "Äiá»u khiển" + +#: src/controlpad.c:423 +msgid "Recording" +msgstr "Ghi" + +#: src/controlpad.c:434 +msgid "Record to file" +msgstr "Ghi vào tập tin" + +#: src/controlpad.c:445 +msgid "Record local channel" +msgstr "Ghi kênh địa phÆ°Æ¡ng" + +#: src/controlpad.c:456 +msgid "Record remote channel" +msgstr "Ghi kênh từ xa" + +#: src/gtk.c:222 src/gtksettings.c:229 +msgid "ANT Note" +msgstr "Ant: chú thích" + +#: src/gtk.c:223 +msgid "" +"Can't open audio device.\n" +"Please stop other applications using\n" +"the audio device(s) or check your\n" +"device settings and try again." +msgstr "" +"Không thể mở thiết bị âm thanh.\n" +"Hãy thôi các thiết bị khác sá»­ dụng\n" +"cùng thiết bị âm thanh đó hay kiểm tra\n" +"thiết lập thiết bị và thá»­ lại." + +#: src/gtk.c:252 +msgid "Sound input device:" +msgstr "Thiết bị gõ âm thanh:" + +#: src/gtk.c:253 +msgid "Input speed:" +msgstr "Tốc Ä‘á»™ gõ :" + +#: src/gtk.c:253 src/gtk.c:256 src/gtk.c:257 src/gtk.c:259 src/gtk.c:263 +#: src/gtk.c:266 src/gtk.c:267 src/gtk.c:269 +msgid "[inactive]" +msgstr "[không làm gì]" + +#: src/gtk.c:255 +msgid "Input sample size (bits):" +msgstr "Cỡ mẫu gõ (theo bit):" + +#: src/gtk.c:257 src/gtk.c:267 +msgid "Input fragment size (samples):" +msgstr "Cỡ mảnh gõ (theo mẫu):" + +#: src/gtk.c:259 +msgid "Input channels:" +msgstr "Kênh gõ :" + +#: src/gtk.c:262 +msgid "Sound output device:" +msgstr "Thiết bị âm thanh xuất:" + +#: src/gtk.c:263 +msgid "Output speed:" +msgstr "Tốc Ä‘á»™ xuất:" + +#: src/gtk.c:265 +msgid "Output sample size (bits):" +msgstr "Cỡ mẫu xuât (theo bit):" + +#: src/gtk.c:269 +msgid "Output channels:" +msgstr "Kênh xuất:" + +#: src/gtk.c:272 +msgid "ISDN device:" +msgstr "Thiết bị ISDN:" + +#: src/gtk.c:273 +msgid "ISDN speed (samples):" +msgstr "Tốc Ä‘á»™ ISDN (theo mẫu):" + +#: src/gtk.c:274 +msgid "ISDN sample size (bits):" +msgstr "Cỡ mẫu ISDN (theo bit):" + +#: src/gtk.c:275 +msgid "ISDN fragment size (bytes):" +msgstr "Cỡ mảnh ISDN (theo byte):" + +#: src/gtk.c:287 +msgid "ANT Info" +msgstr "Thông tin ANT" + +#: src/gtk.c:358 +msgid "About ANT" +msgstr "Giá»›i thiệu ANT" + +#: src/gtk.c:374 +#, c-format +msgid "" +"ANT (ANT is Not a Telephone) Version %s\n" +"Copyright 2002, 2003 Roland Stigge\n" +"\n" +"This is an ISDN telephone application\n" +"written for GNU/Linux and ISDN4Linux for\n" +"communicating via a full duplex soundcard (or\n" +"multiple sound devices if you like) and an\n" +"audio capable ISDN4Linux ISDN device\n" +"\n" +"Contact:\n" +"Roland Stigge, stigge@antcom.de\n" +"http://www.antcom.de/\n" +"Mailing list: ant-phone-devel@nongnu.org" +msgstr "" +"ANT (ANT is Not a Telephone) phiên bản %s\n" +"(ANT không phải là má»™t máy Ä‘iện thoại)Bản quyá»n © năm 2002, 2003 Roland " +"Stigge\n" +"\n" +"Äây là má»™t ứng dụng Ä‘iện thoai ISDN\n" +"được tạo cho GNU/Linux và ISDN4Linux\n" +"để truyá»n thông qua thẻ âm thanh truyá»n dẫn\n" +"hai chiếu đầy đủ (hay số nhiá»u thiết bị nếu muốn)\n" +"và thiết bị ISDN ISDN4Linux có thể gởi âm thanh.\n" +"\n" +"Liên lạc:\n" +"Roland Stigge, stigge@antcom.de\n" +"http://www.antcom.de/\n" +"Há»™p thÆ° chung: ant-phone-devel@nongnu.org" + +#: src/gtk.c:412 +msgid "ANT License" +msgstr "Quyá»n ANT" + +#: src/gtk.c:413 +msgid "" +"ANT (ANT is Not a Telephone)\n" +"Copyright (C) 2002, 2003 Roland Stigge\n" +"\n" +"This program is free software; you can redistribute it and/or\n" +"modify it under the terms of the GNU General Public License\n" +"as published by the Free Software Foundation; either version 2\n" +"of the License, or (at your option) any later version.\n" +"\n" +"This program is distributed in the hope that it will be useful,\n" +"but WITHOUT ANY WARRANTY; without even the implied warranty of\n" +"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n" +"GNU General Public License for more details.\n" +"\n" +"You should have received a copy of the GNU General Public License\n" +"along with this program; if not, write to the Free Software\n" +"Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA." +msgstr "" +"ANT (ANT is Not a Telephone)\n" +"Bản quyá»n © năm 2002, 2003 Roland Stigge\n" +"\n" +"ChÆ°Æ¡ng trình này là phần má»m tá»± do; bạn có thể phân phối nó lại và/hay\n" +"sá»­a đổi nó theo Ä‘iá»u kiện của Quyá»n Công Chung Gnu (GPL)\n" +"nhÆ° xuất do Tổ chức Phần má»m Tá»± do (Free Software Foundation)\n" +"hoặc phiên bản 2 của quyá»n đó hoặc (tùy chá»n) bất cứ phiên bản sau nào.\n" +"\n" +"Chúng tôi phân phối chÆ°Æ¡ng trình này vì mong nó hữu ích,\n" +"nhÆ°ng nó không bảo đảm gì cả, không có bảo đảm ngụ ý ngay cả\n" +"khả năng bán hay khả năng làm việc dứt khoát.\n" +"Hãy xem Quyá»n Công Chung Gnu (GPL) để tìm chi tiết.\n" +"\n" +"Nếu bạn chÆ°a nhận má»™t bản Quyá»n Công Chung Gnu\n" +"(Gnu General Public Licence) cùng vá»›i chÆ°Æ¡ng trinh này thì hãy\n" +"viết cho Tổ chức Phần má»m Tá»± do:\n" +"Free SoftwareFoundation, Inc.,\n" +"59 Temple Place - Suite 330,\n" +"Boston, MA 02111-1307, USA (Mỹ)." + +#: src/gtk.c:446 +msgid "/Phon_e" +msgstr "/_Äiện thoại" + +#: src/gtk.c:447 +msgid "/Phone/_Info Window" +msgstr "/Äiện thoại/Cá»­a sổ thông t_in" + +#: src/gtk.c:448 src/gtk.c:496 +msgid "/Phone/_Line Level Check" +msgstr "/Äiện thoại/Kiểm tra mức _dòng" + +#: src/gtk.c:450 +msgid "/Phone/" +msgstr "/Äiện thoại/" + +#: src/gtk.c:451 +msgid "/Phone/_Quit" +msgstr "/Äiện thoại/_Thoát" + +#: src/gtk.c:453 ../app/menus.c:141 ../app/menus.c:142 ../pan/gui.c:1659 +#: jpilot.c:1396 +msgid "/_View" +msgstr "/_Xem" + +#: src/gtk.c:454 src/gtk.c:484 +msgid "/View/_Caller ID Monitor" +msgstr "/Xem/Theo dõi số ngÆ°á»i dùng" + +#: src/gtk.c:456 src/gtk.c:487 +msgid "/View/_Line Level Meters" +msgstr "/Xem/Äo mức _dòng" + +#: src/gtk.c:458 src/gtk.c:490 +msgid "/View/Control _Pad" +msgstr "/Xem/_Bảng Ä‘iá»u khiển" + +#: src/gtk.c:460 src/metro.c:978 +msgid "/_Options" +msgstr "/Tùy _chá»n" + +#: src/gtk.c:461 src/gtk.c:493 +msgid "/Options/_Settings" +msgstr "/Tùy chá»n/Thiết _lập" + +#: ../app/menus.c:73 ../app/menus.c:263 ../pan/gui.c:1782 jpilot.c:1437 +msgid "/_Help" +msgstr "/Trợ _giúp" + +#: src/gtk.c:464 src/metro.c:993 +msgid "/Help/_About" +msgstr "/Trợ giúp/_Giá»›i thiệu" + +#: src/gtk.c:465 src/metro.c:995 +msgid "/Help/_License" +msgstr "/Trợ giúp/_Quyá»n" + +#: src/gtk.c:543 src/gtksettings.c:431 src/session.c:68 +msgid "Dialing" +msgstr "Äang quay số..." + +#: src/gtk.c:759 src/gtk.c:760 +msgid "MUTED" +msgstr "• Câm •" + +#: src/gtksettings.c:230 +msgid "Bad isdn/sound device settings, please try again." +msgstr "Có thiết lập thiết bị ISDN/âm thanh sai nên hay thá»­ lại." + +#: src/gtksettings.c:293 +msgid "ANT Settings" +msgstr "Thiết lập ANT" + +#: ../src/dlg-pick-applications.c:355 ../src/session.c:630 +#: ../src/resource-tree-treedata.cc:72 ../src/orca/rolenames.py:513 +msgid "Application" +msgstr "Ứng dụng" + +#: src/gtksettings.c:320 +msgid "Save options on exit" +msgstr "LÆ°u tùy chá»n khi thoát" + +#: src/gtksettings.c:327 +msgid "Popup main window on incoming call" +msgstr "Bật lên cá»­a sổ chính khi nhận sá»± gá»i" + +#: src/gtksettings.c:333 +msgid "Execute on incoming call:" +msgstr "Thi hành khi nhận cuá»™c gá»i" + +#: src/gtksettings.c:344 +msgid "Recording Format" +msgstr "Dạng ghi" + +#: src/gtksettings.c:354 +msgid "Microsoft WAV, uLaw" +msgstr "Microsoft WAV, uLaw" + +#: src/gtksettings.c:364 +msgid "Microsoft WAV, 16-bit signed" +msgstr "Microsoft WAV, 16-bit đã ký tên" + +#: src/gtksettings.c:374 +msgid "Apple/SGI AIFF, uLaw" +msgstr "Apple/SGI AIFF, uLaw" + +#: src/gtksettings.c:384 +msgid "Apple/SGI AIFF, 16-bit signed" +msgstr "Apple/SGI AIFF, 16-bit đã ký tên" + +#: src/gtksettings.c:399 ../addressbook/libebook/e-contact.c:136 +#: ../mimedir/mimedir-vcard-phone.c:220 ../mimedir/mimedir-vcard-phone.c:756 +msgid "ISDN" +msgstr "ISDN" + +#: src/gtksettings.c:411 +msgid "Identifying MSN:" +msgstr "Số Ä‘a ngÆ°á»i ký tên (Multiplê Subscriber Number: MSN) nhân biết:" + +#: src/gtksettings.c:421 +msgid "Listen to MSNs:" +msgstr "Nghe qua những số Ä‘a ngÆ°á»i ký tên (MSN) này:" + +#: src/gtksettings.c:443 +msgid "Dial history size:" +msgstr "Cỡ lịch sá»­ quay số :" + +#: src/gtksettings.c:467 +msgid "Maximum CID rows:" +msgstr "Tối Ä‘a hàng số ngÆ°á»i gởi:" + +#: src/gtksettings.c:475 src/gtksettings.c:503 +msgid "[no limit]" +msgstr "[vô vùng] " + +#: src/gtksettings.c:484 +msgid "Read isdnlog data on startup" +msgstr "Äá»c dữ liệu isdnlog (bản ghi ISDN) khi khởi Ä‘á»™ng" + +#: src/gtksettings.c:491 +msgid "Maximum days to read from isdnlog:" +msgstr "Äá»c từ isdnlog (bản ghi ISDN) được số ngày (tối Ä‘a):" + +#: src/gtksettings.c:520 +msgid "Sound Devices" +msgstr "Thiết bị âm thanh" + +#: src/gtksettings.c:522 +msgid "OSS" +msgstr "Phần má»m nguồn mở tá»± do" + +#: src/gtksettings.c:534 +msgid "Input sound device:" +msgstr "Thiết bị âm thanh gõ :" + +#: src/gtksettings.c:546 +msgid "Output sound device:" +msgstr "Thiết bị âm thanh xuất:" + +#: src/gtksettings.c:559 +msgid "Release unused devices" +msgstr "Nhả các thiết bị chÆ°a sá»­ dụng lại" + +#: src/llcheck.c:371 +msgid "Line Level Check" +msgstr "Kiểm tra mức dòng" + +#: src/llcheck.c:383 +msgid "" +"Please check the line input level\n" +"and adjust it using your favorite\n" +"mixer application.\n" +"You can also play a sound\n" +"to test the sound output." +msgstr "" +"Hãy kiểm tra mức gõ dòng\n" +"và Ä‘iá»u chỉnh nó bằng ứng dụng\n" +"hoà tiếng Æ°a thích của bạn.\n" +"Bạn cÅ©ng có thể phát tiếng\n" +"để thá»­ xuất âm thanh ra." + +#: src/llcheck.c:405 ../grecord/src/gsr-window.c:1737 +msgid "Play sound" +msgstr "Phát âm" + +#: ../src/bb_util.c:279 +msgid "Ready" +msgstr "Sẵn sàng" + +#: src/session.c:65 ../srcore/srpres.c:876 address_gui.c:1929 +#: address_gui.c:1932 address_gui.c:2941 dialer.c:308 dialer.c:333 +#: ../src/orca/rolenames.py:223 +#, fuzzy +msgid "Dial" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Quay số\n" +"#-#-#-#-# jpilot-0.99.8-pre12.vi.po (jpilot-0.99.8-pre12) #-#-#-#-#\n" +"Quay số\n" +"#-#-#-#-# orca.vi.po (orca HEAD) #-#-#-#-#\n" +"Thoại" + +#: src/session.c:65 src/session.c:69 src/session.c:70 +msgid "Hang up" +msgstr "Ngừng nói" + +#: src/session.c:66 src/session.c:67 +msgid "RING" +msgstr "REO" + +#: src/session.c:66 src/session.c:67 +msgid "Answer" +msgstr "Trả lá»i" + +#: src/session.c:66 src/session.c:67 +msgid "Reject" +msgstr "Loại ra" + +#: src/session.c:68 src/session.c:69 src/session.c:70 src/session.c:71 +msgid "Pick up" +msgstr "Lấy" + +#: src/session.c:69 +msgid "B-Channel open" +msgstr "Kênh-B mở" + +#: src/session.c:70 ../configure.c:463 ../gnometris/tetris.cpp:777 +msgid "Setup" +msgstr "Thiết lập" + +#: src/session.c:71 ../gst-mixer/src/element.c:230 +#: ext/alsa/gstalsamixertrack.c:92 +msgid "Playback" +msgstr "Phát lại" + +#: src/session.c:377 +#, c-format +msgid "Preset %d" +msgstr "Äặt trÆ°á»›c %d" + +#: src/session.c:699 +msgid "(HW ERROR)" +msgstr "(• Lá»—i phần cứng •)" + +#: src/session.c:986 +msgid "Audio OFF" +msgstr "TẮT âm thanh" + +#: src/session.c:986 +msgid "Audio ON" +msgstr "MỞ âm thanh" + +#: src/session.c:1081 +msgid "(BUSY)" +msgstr "(• Bận •)" + +#: src/session.c:1087 +msgid "(TIMEOUT)" +msgstr "(• Hết thá»i •)" + +#: src/session.c:1121 +msgid "(RUNG)" +msgstr "(• Äã rung •)" + +#: src/session.c:1361 +msgid "(ABORTED)" +msgstr "(• Bị hủy bỠ•)" + +#: src/session.c:1369 +msgid "(REJECTED)" +msgstr "(• Bị từ chối •)" + +#: src/authmode.c:469 +msgid "Entering XDB loop..." +msgstr "Vào vòng lặp XDB..." + +#: src/authmode.c:481 src/authmode.c:503 +msgid "Exiting XDB loop..." +msgstr "Xuất vòng lặp XDB..." + +#: src/authmode.c:526 +msgid "Database not specified" +msgstr "ChÆ°a ghi rõ cÆ¡ sở dữ liệu" + +#: src/authmode.c:534 +#, c-format +msgid "Cannot open database %s: %s" +msgstr "Không thể mở cÆ¡ sở dữ liệu %s: %s" + +#: src/authmode.c:541 +#, c-format +msgid "Found record for `%s'." +msgstr "Tìm thấy mục ghi cho « %s »." + +#: src/authmode.c:546 +#, c-format +msgid "Cannot retrieve data from the SASL database: %s" +msgstr "Không thể lấy dữ liệu từ cÆ¡ sở dữ liêu SASL: %s" + +#: src/authmode.c:551 +#, c-format +msgid "Record for `%s' not found." +msgstr "Không tìm thấy mục ghi cho « %s »." + +#: src/authmode.c:606 +msgid "MTA has not been specified. Set either REMOTE-MTA or LOCAL-MTA." +msgstr "" +"ChÆ°a ghi rõ MTA (tác nhân chuyển giao thông Ä‘iệp). Hãy lập hoặc REMOTE-MTA " +"(MTA ở xa) hay LOCAL-MTA (MTA cục bá»™)." + +#: src/authmode.c:637 src/map.c:164 src/net.c:150 src/net.c:217 +#: src/transmode.c:98 +#, c-format +msgid "Illegal address length received for host %s" +msgstr "Nhận Ä‘á»™ dài địa chỉ sai cho máy %s" + +#: src/authmode.c:651 src/transmode.c:112 +msgid "Loop not allowed. Connection rejected." +msgstr "Không cho phép vòng lặp nên từ chối kết nối." + +#: src/authmode.c:683 src/transmode.c:143 +msgid "Connection closed successfully." +msgstr "Äã đóng kết nối." + +#: src/authmode.c:690 src/transmode.c:149 +msgid "PAM: Session closed." +msgstr "PAM: đã đóng phiên chạy." + +#: src/authmode.c:694 src/transmode.c:153 +msgid "PAM: failed to release authenticator." +msgstr "PAM: không nhả Ä‘iá»u xác thá»±c được" + +#: src/daemon.c:49 +msgid "daemon() failed" +msgstr "daemon() (tập lệnh trình ná»n) không thành công" + +#: src/daemon.c:56 +msgid "Cannot fork." +msgstr "Không thể tạo tiến trình con." + +#: src/daemon.c:64 +msgid "setsid() failed" +msgstr "setsid() không thành công" + +#: src/daemon.c:76 +#, c-format +msgid "%s daemon startup succeeded." +msgstr "%s khởi Ä‘á»™ng trình ná»n được." + +#: src/daemon.c:88 +#, c-format +msgid "Exited successfully" +msgstr "Äã thoát thành công" + +#: src/daemon.c:90 +#, c-format +msgid "Failed with status %d" +msgstr "Không thành công vá»›i trạng thái %d" + +#: src/daemon.c:95 +#, c-format +msgid "Terminated on signal %d" +msgstr "Bị kết thúc tại tín hiệu %d" + +#: src/daemon.c:98 +#, c-format +msgid "Stopped on signal %d" +msgstr "Ngừng tại tín hiệu %d" + +#: src/daemon.c:101 +#, c-format +msgid "Dumped core" +msgstr "Lõi bị đổi" + +#: src/daemon.c:104 signame.c:142 +#, c-format +msgid "Terminated" +msgstr "Bị kết thúc" + +#: src/daemon.c:121 +#, c-format +msgid "Child [%lu] finished. %s. %d client left." +msgid_plural "Child [%lu] finished. %s. %d clients left." +msgstr[0] "Tiến trình con [%lu] đã xong. %s. %d trình/máy khách còn lại" + +#: src/daemon.c:169 +msgid "WARNING: An unprivileged user has not been specified!" +msgstr "CẢNH BÃO : chÆ°a ghi rõ ngÆ°á»i dùng không co quyá»n." + +#: src/daemon.c:214 +msgid "GNU Anubis is running..." +msgstr "Trình Anubis của GNU Ä‘ang chạy..." + +#: src/daemon.c:226 src/exec.c:122 +msgid "accept() failed" +msgstr "accept() không thành công" + +#: src/daemon.c:244 +#, c-format +msgid "TCP wrappers: connection from %s:%u rejected." +msgstr "Lá»›p bá»c choTCP: kết nối từ %s:%u bị từ chối." + +#: src/daemon.c:269 +#, c-format +msgid "Too many clients. Connection from %s:%u rejected." +msgstr "Quá nhiá»u máy khách: kết nối từ %s:%u bị từ chối." + +#: src/daemon.c:276 +#, c-format +msgid "Connection from %s:%u" +msgstr "Kết nối từ %s:%u" + +#: src/daemon.c:281 +msgid "daemon: cannot fork" +msgstr "trình ná»n: không thể tạo tiến trình con" + +#: src/daemon.c:362 src/transmode.c:68 +msgid "The MTA has not been specified. Set the REMOTE-MTA or LOCAL-MTA." +msgstr "" +"ChÆ°a ghi rõ MTA (tác nhân chuyển giao thông Ä‘iệp). Hãy lập hoặc REMOTE-MTA " +"(MTA ở xa) hay LOCAL-MTA (MTA cục bá»™)." + +#: src/env.c:172 +#, c-format +msgid "Try '%s --help' for more information." +msgstr "Hãy thá»­ lệnh '%s --help' (trÆ¡ giúp) để xem thông tin thêm." + +#: src/env.c:273 +#, c-format +msgid "setgroups(1, %lu) failed" +msgstr "setgroups(1, %lu) không thành công" + +#: src/env.c:283 +#, c-format +msgid "setegid(%lu) failed" +msgstr "setegid(%lu) không thành công" + +#: src/env.c:286 +#, c-format +msgid "setregid(%lu,%lu) failed" +msgstr "setregid(%lu,%lu) không thành công" + +#: src/env.c:290 +#, c-format +msgid "setresgid(%lu,%lu,%lu) failed" +msgstr "setresgid(%lu,%lu,%lu) không thành công" + +#: src/env.c:299 +#, c-format +msgid "setgid(%lu) failed" +msgstr "setgid(%lu) không thành công" + +#: src/env.c:302 +#, c-format +msgid "cannot set effective gid to %lu" +msgstr "không thể lập GID hữu ích cho %lu" + +#: src/env.c:324 +#, c-format +msgid "setreuid(%lu,-1) failed" +msgstr "setreuid(%lu,-1) không thành công" + +#: src/env.c:330 +#, c-format +msgid "second setuid(%lu) failed" +msgstr "second setuid(%lu) không thành công" + +#: src/env.c:338 +#, c-format +msgid "setuid(%lu) failed" +msgstr "setuid(%lu) không thành công" + +#: src/env.c:347 +msgid "seteuid(0) succeeded when it should not" +msgstr "seteuid(0) thành công khi không nên." + +#: src/env.c:352 +msgid "cannot drop non-root setuid privileges" +msgstr "không bỠđược quyá»n truy cập setuid không phải của ngÆ°á»i chủ" + +#: src/env.c:379 +msgid "PAM: Session opened (restrictions applied)." +msgstr "PAM: đã mở phiên chạy (đã áp dụng các hạn chế)." + +#: src/env.c:382 +msgid "PAM: Not authenticated to use GNU Anubis." +msgstr "PAM: không có xác thá»±c để sá»­ dụng trình Anubis của GNU." + +#: src/env.c:394 src/main.c:156 +#, c-format +msgid "UID:%d (%s), GID:%d, EUID:%d, EGID:%d" +msgstr "UID:%d (%s), GID:%d, EUID:%d, EGID:%d" + +#: src/env.c:429 +#, c-format +msgid "Invalid user ID: %s" +msgstr "UID (thông tin nhận biết ngÆ°á»i dùng) không hợp lệ: %s" + +#: src/env.c:435 +#, c-format +msgid "Invalid user name: %s" +msgstr "Tên ngÆ°á»i dùng không hợp lệ: %s" + +#: src/env.c:458 +#, c-format +msgid "Wrong permissions on %s. Set 0600." +msgstr "Quyá»n truy cập sai vá»›i %s. Lập 0600." + +#: src/env.c:484 +#, c-format +msgid "%s is not a regular file or a symbolic link." +msgstr "%s không phải là má»™t tập tin bình thÆ°á»ng hay má»™t liên kết má»m." + +#: src/env.c:507 +#, c-format +msgid "Unknown mode: %s" +msgstr "Không biết chế Ä‘á»™ : %s" + +#: src/env.c:522 +#, c-format +msgid "Cannot open pid file '%s'" +msgstr "Không thể mở tập tin PID '%s'" + +#: src/errs.c:96 +#, c-format +msgid "Could not write to socket: %s" +msgstr "Không thể ghi vào ổ cắm: %s" + +#: src/errs.c:98 +msgid "Could not write to socket" +msgstr "Không thể ghi vào ổ cắm" + +#: src/errs.c:108 +#, c-format +msgid "Unknown host %s." +msgstr "Không biết máy %s." + +#: src/errs.c:111 +#, c-format +msgid "%s: host name is valid but does not have an IP address." +msgstr "%s: tên máy là hợp lệ nhÆ°ng mà không có địa chỉ IP." + +#: src/errs.c:115 +#, c-format +msgid "%s: unrecoverable name server error occurred." +msgstr "%s: gặp lá»—i máy phục vụ tên không thể phục hồi." + +#: src/errs.c:118 +#, c-format +msgid "%s: a temporary name server error occurred. Try again later." +msgstr "%s: gặp lá»—i máy phục vụ tên tạm thá»i nên hãy thÆ° lại lần sau." + +#: src/errs.c:121 +#, c-format +msgid "%s: unknown DNS error %d." +msgstr "%s: không biết lá»—i DNS %d." + +#: src/esmtp.c:165 +msgid "Malformed or unexpected reply" +msgstr "Trả lá»i dạng sai hay rá»—ng." + +#: src/esmtp.c:189 +#, c-format +msgid "SASL gsasl_client_start: %s" +msgstr "SASL gsasl_client_start: %s" + +#: src/esmtp.c:198 src/esmtp.c:216 +#, c-format +msgid "GSASL handshake aborted: %d %s" +msgstr "Việc bắt tay GSASL bị hủy bá» : %d %s" + +#: src/esmtp.c:227 +#, c-format +msgid "GSASL error: %s" +msgstr "Lá»—i GSASL: %s" + +#: src/esmtp.c:241 +#, c-format +msgid "Authentication failed: %d %s" +msgstr "Không xác thá»±c được: %d %s" + +#: src/esmtp.c:244 +msgid "Authentication successful." +msgstr "Äã xác thá»±c thành công." + +#: src/esmtp.c:263 +msgid "Got empty list of authentication methods" +msgstr "Äã gá»i danh sách phÆ°Æ¡ng pháp xác thá»±c rá»—ng" + +#: src/esmtp.c:288 +msgid "Server did not offer any feasible authentication mechanism" +msgstr "Máy phục vụ chÆ°a Ä‘Æ°a cÆ¡ chế xác thá»±c nào có thể" + +#: src/esmtp.c:298 +msgid "INTERNAL ERROR" +msgstr "Lá»–I NỘI BỘ" + +#: src/esmtp.c:306 +#, c-format +msgid "" +"Selected authentication mechanism %s requires TLS encryption. Not using " +"ESMTP authentication" +msgstr "" +"CÆ¡ chế xác thá»±c đã chá»n %s thì cần thiết mật mã TLS. Không xác thá»±c loại " +"ESMTP." + +#: src/esmtp.c:313 +#, c-format +msgid "Selected authentication mechanism %s" +msgstr "CÆ¡ chế xác thá»±c đã chon %s" + +#: src/esmtp.c:319 +#, c-format +msgid "Cannot initialize libgsasl: %s" +msgstr "Không khởi chạy được libgsasl: %s" + +#: src/esmtp.c:340 +msgid "ESMTP AUTH is not supported" +msgstr "Không há»— trợ cach xác thá»±c (AUTH) ESMTP" + +#: src/exec.c:82 +msgid "#1 socket() failed." +msgstr "#1 socket() (ổ cắm) không thành công." + +#: src/exec.c:87 +msgid "#2 socket() failed." +msgstr "#2 socket() (ổ cắm) không thành công." + +#: src/exec.c:97 +msgid "#1 bind() failed" +msgstr "#1 bind() (đóng kết) không thành công." + +#: src/exec.c:102 +msgid "#2 bind() failed" +msgstr "#2 bind() (đóng kết) không thành công." + +#: src/exec.c:107 src/net.c:237 +msgid "listen() failed" +msgstr "listen() (nghe) không thành công." + +#: src/exec.c:112 +#, c-format +msgid "getsockname() failed: %s." +msgstr "getsockname() (gá»i tên ổ cắm) không thành công: %s." + +#: src/exec.c:117 +msgid "connect() failed" +msgstr "connect() (kết nối) không thành công." + +#: src/exec.c:129 +msgid "socketpair() failed" +msgstr "socketpair() (cặp ổ cắm) không thành công." + +#: src/exec.c:143 +#, c-format +msgid "Local program [%lu] finished." +msgstr "ChÆ°Æ¡ng trình cục bá»™ [%lu] đã xong." + +#: src/exec.c:179 +#, c-format +msgid "Executing %s %s..." +msgstr "Thá»±c hiện %s %s..." + +#: src/exec.c:187 +msgid "fork() failed" +msgstr "fork() (tạo tiến trình con) không thành công." + +#: src/exec.c:201 +msgid "execvp() failed" +msgstr "execvp() không thành công." + +# Name of a program: do not translate/ tên chÆ°Æ¡ng trình: đừng dịch +#: src/gpg.c:54 +#, c-format +msgid "GPGME: %s." +msgstr "GPGME: %s." + +#: src/gpg.c:100 +#, c-format +msgid "Install GPGME version %s or later." +msgstr "Cài đặt trình GPGME phiên bản %s hay sau." + +#: src/gpg.c:107 +#, c-format +msgid "GPGME: failed. %s." +msgstr "GPGME: không thành công. %s." + +#: src/gpg.c:159 src/gpg.c:407 +#, c-format +msgid "GPGME: Cannot list keys: %s" +msgstr "GPGME: không thể liệt kê các khoá: %s" + +#: src/gpg.c:288 src/gpg.c:430 +#, c-format +msgid "GPGME: Invalid recipient encountered: %s" +msgstr "GPGME: gặp ngÆ°á»i nhận không hợp lệ: %s" + +#: src/gpg.c:333 +#, c-format +msgid "GPGME: Invalid signer found: %s" +msgstr "GPGME: tìm thấy ngÆ°á»i ký tên không hợp lệ: %s" + +#: src/gpg.c:340 +msgid "GPGME: Unexpected number of signatures created" +msgstr "GPGME: đã tạo số lượng ký tên bất ngá»" + +#: src/gpg.c:347 +msgid "GPGME: Wrong type of signature created" +msgstr "GPGME: đã tạo chữ ký kiểu không đúng" + +#: src/gpg.c:352 +#, c-format +msgid "GPGME: Wrong pubkey algorithm reported: %i" +msgstr "GPGME: đã thông báo thuật toán khoá công không đúng: %i" + +#: src/gpg.c:359 +#, c-format +msgid "GPGME: Wrong hash algorithm reported: %i" +msgstr "GPGME: đã thông báo thuật toán băm không đúng: %i" + +#: src/gpg.c:366 +#, c-format +msgid "GPGME: Wrong signature class reported: %u" +msgstr "GPGME: đã thông báo loại chữ ký không đúng: %u" + +#: src/guile.c:79 +#, c-format +msgid "cannot open guile output file %s" +msgstr "không thể mở tập tin xuất cua guile %s" + +#: src/guile.c:266 +msgid "missing procedure name" +msgstr "thiếu tên thủ tuc" + +#: src/guile.c:281 +#, c-format +msgid "%s not a procedure object" +msgstr "%s không phải là môt đối tượng thủ tục" + +#: src/guile.c:323 +#, c-format +msgid "Bad car type in return from %s" +msgstr "%s đã gởi trả kiểu car sai" + +#: src/guile.c:342 +#, c-format +msgid "Bad cdr type in return from %s" +msgstr "%s đã gởi trả kiểu cdr sai" + +#: src/guile.c:345 +#, c-format +msgid "Bad return type from %s" +msgstr "Kiểu trả vệ sai từ %s" + +#: src/help.c:85 +msgid "" +"\n" +"GNU Anubis is free software; you can redistribute it and/or modify\n" +"it under the terms of the GNU General Public License as published by\n" +"the Free Software Foundation; either version 2 of the License, or\n" +"(at your option) any later version." +msgstr "" +"\n" +"Trình Anubis của GNU là phần má»m tá»± do: bạn có thể phân phối lại nó\n" +"và/hay sá»­a đổi nó vá»›i Ä‘iá»u kiện của Quyá»n Công Chung GNU (GPL)\n" +"nhÆ° đã xuất bởi Tổ chức Phần má»m Tá»± do, hoặc phiên bản 2\n" +"của Quyá»n đó, hay (tùy chá»n) bất cứ phiên bản sau nào." + +#: src/help.c:89 +msgid "" +"\n" +"GNU Anubis is distributed in the hope that it will be useful,\n" +"but WITHOUT ANY WARRANTY; without even the implied warranty of\n" +"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n" +"GNU General Public License for more details." +msgstr "" +"\n" +"Trình Anubis của GNU được phân phối vì chúng tôi mong nó có ích,\n" +"nhÆ°ng mà không bảo đảm gì cả,\n" +"dù khả năng bán hay khả năng làm việc dứt khoát.\n" +"Hãy xem Quyá»n Công Chung GNU để tìm chi tiết." + +#: src/help.c:93 +msgid "" +"\n" +"You should have received a copy of the GNU General Public License\n" +"along with GNU Anubis; if not, write to the Free Software\n" +"Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA" +msgstr "" +"\n" +"Bạn nên đã nhận má»™t bản sao của Quyá»n Công Chung GNU (GPL)\n" +"cùng vá»›i trình Anubis của GNU; nếu không thì hãy viết thÆ° cho:\n" +"Free Software Foundation, Inc.,\n" +"59 Temple Place, Suite 330,\n" +"Boston, MA 02111-1307 USA (Mỹ)" + +#: src/help.c:96 +msgid "" +"\n" +"GNU Anubis is released under the GPL with the additional exemption that\n" +"compiling, linking, and/or using OpenSSL is allowed.\n" +msgstr "" +"\n" +"Trình Anubis của GNU được phát hành vá»›i Ä‘iá»u kiện của GPL\n" +"và cÅ©ng vá»›i sá»± miá»…n là cho phép biên dịch, liên kết và/hay sá»­ dụng OpenSSL.\n" + +#: src/help.c:104 +msgid "Usage: anubis [options]\n" +msgstr "Cách sá»­ dụng: anubis [tùy_chá»n]\n" + +#: src/help.c:105 +msgid "" +" -b, --bind [HOST:]PORT Specify the TCP port on which GNU Anubis " +"listens\n" +" for connections. The default HOST is " +"INADDR_ANY,\n" +" and default PORT is 24 (private mail system)." +msgstr "" +" -b, --bind [MÃY:]Cá»”NG Ghi rõ cổng TCP nÆ¡i trình Anubis của GNU\n" +"\t\t\t\tlắng nghe kết nối nào. Máy mặc định là INADDR_ANY,\n" +"\t\t\t\tvà cổng mặc định là 24 (hệ thống thÆ° cá nhân)" + +#: src/help.c:108 +msgid "" +" -r, --remote-mta HOST[:PORT] Specify a remote SMTP host name or IP " +"address.\n" +" The default PORT number is 25." +msgstr "" +" -r, --remote-mta MÃY[:Cá»”NG] \t\tGhi rõ tên máy SMTP _ở xa_ hay địa chỉ IP.\n" +"\t\t\t\t\t\t\t\tSố cổng mặc định là 25." + +#: src/help.c:110 +msgid "" +" -l, --local-mta FILE Execute a local SMTP server, which works on\n" +" standard input and output (inetd-type " +"program).\n" +" This option excludes the '--remote-mta' option." +msgstr "" +" -l, --local-mta FILE Thá»±c hiện trình há»— trợ SMTP _địa phÆ°Æ¡ng_\n" +"\t\t\t\tmà làm việc vá»›i thiết bị gõ/xuất chuẩn (trình kiểu inetd).\n" +"\t\t\t\tTùy chá»n này loạị trừ tùy chá»n '--remote-mta'." + +#: src/help.c:113 +msgid " -m, --mode=MODE Select operation mode." +msgstr " -m, --mode=CHẾ_ÄỘ Chá»n _chế Ä‘á»™_ thao tác." + +#: src/help.c:114 +msgid "" +" MODE is either \"transparent\" or \"auth\"" +msgstr "" +" CHẾ ÄỘ đó là hoặc \"transparent\" (trong " +"suốt)\n" +"\t\t\t\t\t\t\thay \"auth\" (xác thá»±c)" + +#: src/help.c:115 +msgid " -f, --foreground Foreground mode." +msgstr " -f, --foreground Chế Ä‘á»™ _cảnh gần_." + +#: src/help.c:116 +msgid "" +" -i, --stdio Use the SMTP protocol (OMP/Tunnel) as " +"described\n" +" in RFC 821 on standard input and output." +msgstr "" +" -i, --stdio Sá»­ dụng giao thức SMTP (OMP/Tunnel) nhÆ° được " +"diá»…n tả\n" +"\t\t\t\t\ttrong RFC 821, vá»›i _thiết bị gõ/xuất chuẩn_." + +#: src/help.c:118 +msgid "Output options:\n" +msgstr "Tùy chá»n xuất:\n" + +#: src/help.c:119 +msgid " -s, --silent Work silently." +msgstr " -s, --silent Không xuất chi tiết (_im_)." + +#: src/help.c:120 +msgid " -v, --verbose Work noisily." +msgstr " -v, --verbose Xuất _chi tiết_." + +#: src/help.c:121 +msgid " -D, --debug Debug mode." +msgstr " -D, --debug Chế Ä‘á»™ _gỡ lá»—i_." + +#: src/help.c:122 +msgid "" +"\n" +"Miscellaneous options:\n" +msgstr "" +"\n" +"Tùy chá»n thêm:\n" + +#: src/help.c:123 +msgid "" +" -c, --check-config Run the configuration file syntax checker." +msgstr "" +" -c, --check-config Chạy trình _kiểm tra_ cú pháp trong tập tin " +"_cấu hình_." + +#: src/help.c:124 +msgid "" +" --show-config-options Print a list of configuration options used\n" +" to build GNU Anubis." +msgstr "" +" --show-config-options _Hiển thị_ danh sách _tùy chá»n cấu hình_\n" +"\t\t\t\t\t\tđược dùng để xây dụng trình Anubis của GNU." + +#: src/help.c:126 +msgid "" +" --relax-perm-check Do not check user configuration file " +"permissions." +msgstr "" +" --relax-perm-check Không _kiểm tra quyá»n truy cập_ tập tin cấu " +"hình\n" +"\t\t\t\t\t\tngÆ°á»i dùng (_ná»›i lá»ng_)." + +#: src/help.c:127 +msgid "" +" --altrc FILE Specify alternate system configuration file." +msgstr "" +" --altrc TẬP_TIN Ghi rõ tập tin đó là tập tin cấu hình hệ thống " +"_thay thế_." + +#: src/help.c:128 +msgid " --norc Ignore system configuration file." +msgstr " --norc Bá» qua tập tin cấu hình hế thống." + +#: src/help.c:129 +msgid " --version Print version number and copyright." +msgstr "" +" --version Hiển thị số _phiên bản_ và thông tin quyá»n." + +#: src/help.c:130 +msgid " --help It's obvious..." +msgstr " --help trợ _giúp_" + +#: src/help.c:131 +#, c-format +msgid "" +"\n" +"Report bugs to <%s>.\n" +msgstr "" +"\n" +"Hãy thông báo lá»—i cho <%s>.\n" + +#: src/ident.c:105 +msgid "IDENT: socket() failed" +msgstr "IDENT: socket() (ổ cắm) không thành công" + +#: src/ident.c:117 +msgid "IDENT: connect() failed" +msgstr "IDENT: connect() (kết nối) không thành công" + +#: src/ident.c:123 +#, c-format +msgid "IDENT: connected to %s:%u" +msgstr "IDENT: hiện có kết nối đến %s:%u" + +#: src/ident.c:132 +#, c-format +msgid "IDENT: stream_write() failed: %s." +msgstr "IDENT: stream_write() (ghi dòng) không thành công: %s." + +#: src/ident.c:140 +#, c-format +msgid "IDENT: recvline() failed: %s." +msgstr "IDENT: recvline() (dòng nhận) không thành công: %s." + +#: src/ident.c:151 +msgid "IDENT: incorrect data." +msgstr "IDENT: dữ liệu không đúng." + +#: src/ident.c:162 +msgid "IDENT: data probably encrypted with DES..." +msgstr "IDENT: rất có thể là dữ liệu được mật mã bằng DES..." + +#: src/ident.c:170 +msgid "IDENT: incorrect data (DES deciphered)." +msgstr "IDENT: dữ liệu không đúng (đã giải mật mã DES)." + +#: src/ident.c:188 +#, c-format +msgid "IDENT: resolved remote user to %s." +msgstr "IDENT: đã quyết định ngÆ°á»i dùng ở xa là %s." + +#: src/map.c:71 +#, c-format +msgid "%s remapped to %s@localhost." +msgstr "Äã ảnh xạ lại %s thành %s@localhost." + +#: src/map.c:123 +msgid "Translation map: incorrect syntax." +msgstr "Bản đồ thông dịch: cú pháp không đúng." + +#: src/mem.c:37 +msgid "malloc() failed. Cannot allocate enough memory." +msgstr "malloc() (phân chia bá»™ nhá»›) không thể phân chia đủ bá»™ nhá»›." + +#: src/mem.c:52 +msgid "realloc() failed. Cannot reallocate enough memory." +msgstr "realloc() (phân chia lại bá»™ nhá»›) không thể phân chia lại đủ bá»™ nhá»›." + +#: src/misc.c:332 +msgid "Can't find out my own hostname" +msgstr "Không tìm thấy tên máy mình" + +#: src/net.c:50 +msgid "SERVER" +msgstr "MÃY CHỦ" + +#: src/net.c:53 +msgid "CLIENT" +msgstr "MÃY KHÃCH" + +#: src/net.c:131 +msgid "Getting remote host information..." +msgstr "Gá»i thông tin máy ở xa..." + +#: src/net.c:168 +msgid "Cannot create stream socket." +msgstr "Không thể tạo ổ cắm dòng." + +#: src/net.c:173 +#, c-format +msgid "Couldn't connect to %s:%u. %s." +msgstr "Không thể kết nối đến %s:%u. %s." + +#: src/net.c:178 +#, c-format +msgid "Connected to %s:%u" +msgstr "Hiện có kết nối đến %s:%u" + +#: src/net.c:200 +msgid "Cannot create stream socket" +msgstr "Không thể tạo ổ cắm dòng." + +#: src/net.c:233 +msgid "bind() failed" +msgstr "bind() (đóng kết) không thành công." + +#: src/net.c:234 +#, c-format +msgid "GNU Anubis bound to %s:%u" +msgstr "Trình Anubis của GNU đã đóng kết đến %s:%u" + +#: src/net.c:261 +msgid "Short write" +msgstr "Ghi vắn" + +#: src/net.c:339 +msgid "INTERNAL ERROR (get_response_smtp): buffer exhausted. Please report." +msgstr "" +"Lá»–I NỘI BỘ (get_response_smtp): (gá»i trả lá»i SMTP) hết bá»™ đệm hoàn toàn. Hãy " +"thông báo lá»—i này." + +#: src/socks.c:53 +msgid "Using SOCKS Proxy..." +msgstr "Dùng máy ủy nhiệm SOCKS..." + +#: src/socks.c:71 +#, c-format +msgid "SOCKS proxy: %s" +msgstr "Máy ủy nhiệm SOCKS: %s" + +#: src/socks.c:143 +msgid "Address must be an IP, not a domain name." +msgstr "" +"Äịa chỉ phải là địa chỉ IP (v.d. 127.0.0.0), không phải tên miá»n (v.d. www." +"miá»nnày.com)." + +#: src/socks.c:180 src/socks.c:399 +msgid "SOCKS Proxy Connection: succeeded." +msgstr "Kết nối ủy nhiệm SOCKS: thành công." + +#: src/socks.c:183 +msgid "Request rejected or failed." +msgstr "Yêu cầu bị từ chối hay không thành công." + +#: src/socks.c:186 +msgid "Request rejected." +msgstr "Yêu cầu bị từ chối." + +#: src/socks.c:189 +msgid "" +"Request rejected, because the client program and identd reported different " +"User-IDs." +msgstr "" +"Yêu cầu bị từ chối, vì chÆ°Æ¡ng trình khách và identd đã thông báo thông tin " +"nhận biết ngÆ°á»i dùng (UID) khác nhau." + +#: src/socks.c:193 src/socks.c:426 +msgid "Server reply is not valid." +msgstr "Máy phục vụ trả lá»i không hợp lệ." + +#: src/socks.c:234 +msgid "Possibly not a SOCKS proxy service." +msgstr "Có lẽ không phải má»™t dịch vụ ủy nhiệm SOCKS." + +#: src/socks.c:246 +msgid "SOCKS Proxy AUTH method: NO AUTHENTICATION REQUIRED" +msgstr "PhÆ°Æ¡ng pháp xac thức (AUTH) ủy nhiệm SOCKS: KHÔNG CẦN PHẢI XÃC THỨC" + +#: src/socks.c:249 +msgid "SOCKS Proxy AUTH method: USER NAME/PASSWORD" +msgstr "PhÆ°Æ¡ng pháp xac thức (AUTH) ủy nhiệm SOCKS: TÊN DÙNG/MẬT KHẨU" + +#: src/socks.c:253 +msgid "Cannot send null user name or password." +msgstr "Không gởi được tên dùng hay mật khẩu rá»—ng." + +#: src/socks.c:295 +msgid "Bad user name or password." +msgstr "Tên dùng hay mật khẩu sai." + +#: src/socks.c:299 +msgid "SOCKS Proxy AUTH: succeeded." +msgstr "Xac thức (AUTH) ủy nhiệm SOCKS: thành công." + +#: src/socks.c:302 +msgid "Server does not accept any method." +msgstr "Máy phục vụ không chấp nhận phÆ°Æ¡ng pháp nào." + +#: src/socks.c:305 +msgid "Server does not accept an AUTH method." +msgstr "Máy phục vụ không chấp nhận phÆ°Æ¡ng pháp AUTH (xác thá»±c)." + +#: src/socks.c:402 +msgid "General SOCKS server failure." +msgstr "Lá»—i máy phục vụ SOCKS chung." + +#: src/socks.c:405 +msgid "Connection not allowed by a ruleset." +msgstr "Má»™t bá»™ quy tắc không cho phép kết nối đó." + +#: src/socks.c:408 +msgid "Network unreachable." +msgstr "Không tớí được mạng." + +#: src/socks.c:411 +msgid "Host unreachable." +msgstr "Không tá»›i được máy." + +#: src/socks.c:414 +msgid "Connection refused." +msgstr "Kết nối bị từ chối." + +#: src/socks.c:417 +msgid "TTL expired." +msgstr "Thá»i gian sống đã hết hạn." + +#: src/socks.c:420 +msgid "Command not supported." +msgstr "Không há»— trợ lệnh đó." + +#: src/socks.c:423 +msgid "Address type not supported." +msgstr "Không há»— trợ kiểu địa chỉ đó." + +#: src/quit.c:31 +msgid "Signal Caught. Exiting Cleanly..." +msgstr "Äã bắt tín hiệu, thoát được..." + +#: src/quit.c:38 +msgid "Timeout! Exiting..." +msgstr "Thá»i hạn! Thoát..." + +#: src/rcfile.c:121 +#, c-format +msgid "cannot stat file `%s'" +msgstr "không thể stat (gá»i các thông tin vá») tập tin `%s'" + +#: src/rcfile.c:131 +#, c-format +msgid "File `%s' has already been read.\n" +msgstr "Tập tin « %s » đã được Ä‘á»c.\n" + +#: src/rcfile.c:149 +#, c-format +msgid "Welcome user %s !" +msgstr "Chào mừng ngÆ°á»i dùng %s !" + +#: src/rcfile.c:185 +#, c-format +msgid "Reading system configuration file %s..." +msgstr "Äá»c tập tin cấu hình hệ thống %s..." + +#: src/rcfile.c:197 +#, c-format +msgid "Reading user configuration file %s..." +msgstr "Äá»c tập tin cấu hình ngÆ°á»i dùng %s..." + +#: src/rcfile.c:341 +msgid "`logfile' directive is ignored in main configuration file" +msgstr "" +"Chỉ thị `logfile' (tập tin bản ghi) bị bá» qua trong tập tin cấu hình chính." + +#: src/rcfile.c:789 +#, c-format +msgid "No such section: %s" +msgstr "Không có phần nhÆ° : %s" + +#: src/rcfile.l:181 +#, c-format +msgid "" +"Stray character in config: \\%03o. Possibly missing quotes around the string" +msgstr "" +"Gặp ký tá»± rải rác trong cấu hình: \\%03o. Có lẽ thiếu dấu trích dẫn ở quanh " +"chuá»—i." + +#: src/rcfile.l:385 +msgid "Anubis RC file error" +msgstr "Lá»—i tập tin RC Anubis." + +#: src/rcfile.y:170 src/rcfile.y:178 +#, c-format +msgid "Section %s already defined" +msgstr "Phần %s đã được định nghÄ©a." + +#: src/rcfile.y:230 +#, c-format +msgid "unknown keyword: %s" +msgstr "không biết từ khoá: %s" + +#: src/rcfile.y:532 +msgid "missing replacement value" +msgstr "thiếu giá trị thay thế" + +#: src/rcfile.y:629 +#, c-format +msgid "Not a valid debugging level: %s" +msgstr "Không phải là mức Ä‘á»™ gỡ lá»—i hợp lệ: %s" + +#: src/rcfile.y:1179 +msgid "Unknown regexp modifier" +msgstr "Không biết ký tá»± sá»­a đổi biểu thức chính quy" + +#: src/rcfile.y:1277 +msgid "STOP" +msgstr "NGỪNG" + +#: src/rcfile.y:1282 +#, c-format +msgid "Calling %s" +msgstr "Äang gá»i %s" + +#: src/rcfile.y:1288 +#, c-format +msgid "ADD %s [%s] %s" +msgstr "THÊM %s [%s] %s" + +#: src/rcfile.y:1298 +#, c-format +msgid "MODIFY %s [%s] [%s] %s" +msgstr "SỬA Äá»”I %s [%s] [%s] %s" + +#: src/rcfile.y:1311 +#, c-format +msgid "REMOVE HEADER [%s]" +msgstr "Gá»  BỎ DÃ’NG ÄẦU [%s]" + +#: src/rcfile.y:1334 +#, c-format +msgid "Executing %s" +msgstr "Äang thá»±c hiện %s" + +#: src/rcfile.y:1414 +#, c-format +msgid "Matched trigger \"%s\"" +msgstr "Thủ tục lẫy đã khá»›p « %s »" + +#: src/rcfile.y:1418 +#, c-format +msgid "Matched condition %s[%s] \"%s\"" +msgstr "Äiá»u kiện đã khá»›p %s[%s] « %s »" + +#: src/rcfile.y:1528 +#, c-format +msgid "Section %s" +msgstr "Phần %s" + +#: src/rcfile.y:1552 +#, c-format +msgid "Unknown section: %s" +msgstr "Không biết phần: %s" + +#: src/rcfile.y:1599 +msgid "program is not allowed in this section" +msgstr "không cho phép chÆ°Æ¡ng trình đó trong phần này" + +#: src/regex.c:113 +#, c-format +msgid "INTERNAL ERROR at %s:%d: missing or invalid regex" +msgstr "Lá»–I NỘI BỘ tại %s:%d: thiếu biểu thức chính quy hay nó không hợp lệ" + +#: src/regex.c:332 +#, c-format +msgid "regcomp() failed at %s: %s." +msgstr "regcomp() không thành công tại %s: %s." + +#: src/regex.c:408 +#, c-format +msgid "pcre_compile() failed at offset %d: %s." +msgstr "pcre_compile() không thành công tại hiệu số %d: %s." + +#: src/regex.c:433 +#, c-format +msgid "pcre_fullinfo() failed: %d." +msgstr "pcre_fullinfo() không thành công: %d." + +#: src/regex.c:445 +msgid "Matched, but too many substrings." +msgstr "Khá»›p được, nhÆ°ng mà có quá nhiá»u chuá»—i phụ." + +#: src/regex.c:458 +#, c-format +msgid "Get substring %d failed (%d)." +msgstr "Gá»i chuá»—i phụ %d không thành công (%d)." + +#: src/ssl.c:59 +msgid "Seeding random number generator..." +msgstr "Chèn bá»™ tạo số ngẫu nhiên..." + +#: src/ssl.c:63 +msgid "Unable to seed random number generator." +msgstr "Không chèn được bá»™ tạo số ngẫu nhiên." + +#: src/ssl.c:80 +#, c-format +msgid "Write error: %s" +msgstr "Lá»—i ghi: %s" + +#: src/ssl.c:98 +#, c-format +msgid "Read error: %s" +msgstr "Lá»—i Ä‘á»c: %s" + +#: src/ssl.c:277 +msgid "SSLv23_client_method() failed." +msgstr "SSLv23_client_method() (phÆ°Æ¡ng pháp máy khách) không thành công." + +#: src/ssl.c:282 src/ssl.c:363 +msgid "Can't create SSL_CTX object." +msgstr "Không thể tạo đối tượng SSL_CTX." + +#: src/ssl.c:288 src/ssl.c:383 +msgid "SSL_CTX_set_cipher_list() failed." +msgstr "SSL_CTX_set_cipher_list() (lập danh sách mật mã) không thành công." + +#: src/ssl.c:309 src/tls.c:168 +msgid "Initializing the TLS/SSL connection with MTA..." +msgstr "Khởi Ä‘á»™ng kết nối TLS/SSL vá»›i MTA..." + +#: src/ssl.c:313 src/ssl.c:404 +msgid "Can't create a new SSL structure for a connection." +msgstr "Không thể tạo cấu trúc SSL má»›i để kết nối." + +#: src/ssl.c:329 src/tls.c:202 +#, c-format +msgid "TLS/SSL handshake failed: %s" +msgstr "Việc bắt tay TLS/SSL không thành công: %s" + +#: src/ssl.c:358 +msgid "SSLv23_server_method() failed." +msgstr "SSLv23_server_method() (phÆ°Æ¡ng pháp máy phục vụ) không thành công." + +#: src/ssl.c:368 +msgid "SSL_CTX_use_certificate_file() failed." +msgstr "" +"SSL_CTX_use_certificate_file() (dùng tập tin chứng nhận) không thành công." + +#: src/ssl.c:373 +msgid "SSL_CTX_use_PrivateKey_file() failed." +msgstr "" +"SSL_CTX_use_PrivateKey_file() (dùng tập tin khoá riêng) không thành công." + +#: src/ssl.c:378 +msgid "Private key does not match the certificate public key." +msgstr "Khoá riêng không khá»›p khoá công của chứng nhận." + +#: src/ssl.c:400 src/tls.c:252 +msgid "Initializing the TLS/SSL connection with MUA..." +msgstr "Khởi Ä‘á»™ng kết nối TLS/SSL vớí MUA..." + +#: src/ssl.c:417 src/tls.c:284 +msgid "TLS/SSL handshake failed!" +msgstr "Việc bắt tây TLS/SSL không thành công." + +#: src/ssl.c:450 +#, c-format +msgid "%s connection using %s (%u bit)" +msgid_plural "%s connection using %s (%u bits)" +msgstr[0] "%s kết nối dùng %s (%u bit)" + +#: src/ssl.c:462 +#, c-format +msgid "Server public key is %d bit" +msgid_plural "Server public key is %d bits" +msgstr[0] "Khoá công máy phục vụ là %d bit" + +#: src/ssl.c:467 +msgid "Certificate:" +msgstr "Chứng nhận:" + +#: src/ssl.c:472 +msgid "X509_NAME_oneline [subject] failed!" +msgstr "X509_NAME_oneline [subject] (tên dòng Ä‘Æ¡n [chủ Ä‘á»]) không thành công." + +#: src/ssl.c:475 +#, c-format +msgid "Subject: %s" +msgstr "Chủ Ä‘á»: %s" + +#: src/ssl.c:479 +msgid "X509_NAME_oneline [issuer] failed!" +msgstr "" +"X509_NAME_oneline [issuer] (tên dòng Ä‘Æ¡n [nhà phat hành] không thành công." + +#: src/ssl.c:482 +#, c-format +msgid "Issuer: %s" +msgstr "Nhà phát hành: %s" + +#: src/tls.c:185 src/tls.c:263 +#, c-format +msgid "TLS error reading `%s': %s" +msgstr "Gặp lá»—i TLS khi Ä‘á»c `%s': %s" + +#: src/tls.c:305 +msgid "No certificate was sent." +msgstr "ChÆ°a gởi chứng nhận." + +#: src/tls.c:310 +msgid "The certificate is not trusted." +msgstr "Không tin chứng nhận đó." + +#: src/tls.c:315 +msgid "The certificate has expired." +msgstr "Chứng nhận đó đã hết hạn." + +#: src/tls.c:320 +msgid "The certificate is not yet activated." +msgstr "ChÆ°a hoạt hóa chứng nhận đó." + +#: src/tls.c:330 +msgid "No certificate was found!" +msgstr "ChÆ°a tìm thấy chứng nhận." + +#: src/tls.c:335 +msgid "The certificate is trusted." +msgstr "Tin chứng nhận đó." + +#: src/tls.c:365 +#, c-format +msgid "- Anonymous DH using prime of %d bit.\n" +msgid_plural "- Anonymous DH using prime of %d bits.\n" +msgstr[0] "- DH vô danh dùng số nguyên tố của %d bit.\n" + +#: src/tls.c:373 +#, c-format +msgid "- Ephemeral DH using prime of %d bit.\n" +msgid_plural "- Ephemeral DH using prime of %d bits.\n" +msgstr[0] "- DH phù du dùng số nguyên tố của %d bit.\n" + +#: src/tls.c:384 +#, c-format +msgid "- Protocol: %s\n" +msgstr "- Giao thức: %s\n" + +#: src/tls.c:388 +#, c-format +msgid "- Certificate Type: %s\n" +msgstr "- Kiểu chứng nhận: %s\n" + +#: src/tls.c:391 +#, c-format +msgid "- Compression: %s\n" +msgstr "- Nén: %s\n" + +#: src/tls.c:394 +#, c-format +msgid "- Cipher: %s\n" +msgstr "- Mật mã: %s\n" + +#: src/tls.c:397 +#, c-format +msgid "- MAC: %s\n" +msgstr "- MAC: %s\n" + +#: src/tls.c:425 +#, c-format +msgid "- Certificate info:\n" +msgstr "- Thông tin chứng nhận:\n" + +#: src/tls.c:429 +#, c-format +msgid "- Certificate is valid since: %s" +msgstr "- Chứng nhận đó hợp lệ sau: %s" + +#: src/tls.c:431 +#, c-format +msgid "- Certificate expires: %s" +msgstr "- Chứng nhận đó hết hạn: %s" + +#: src/tls.c:436 +#, c-format +msgid "- Certificate fingerprint: " +msgstr "- Dấu Ä‘iá»m chỉ chứng nhận: " + +#: src/tls.c:446 +#, c-format +msgid "- Certificate serial number: " +msgstr "- Số sản xuất chứng nhận: " + +#: src/tls.c:455 +#, c-format +msgid "- Certificate public key: " +msgstr "- Khoá công chứng nhận: " + +#: src/tls.c:458 +#, c-format +msgid "RSA\n" +msgstr "RSA\n" + +#: src/tls.c:459 +#, c-format +msgid "- Modulus: %d bit\n" +msgid_plural "- Modulus: %d bits\n" +msgstr[0] "- Giá trị tuyệt đối: %d bit\n" + +#: src/tls.c:464 +#, c-format +msgid "DSA\n" +msgstr "DSA\n" + +#: src/tls.c:465 +#, c-format +msgid "- Exponent: %d bit\n" +msgid_plural "- Exponent: %d bits\n" +msgstr[0] "- Số mÅ© : %d bit\n" + +#: src/tls.c:469 +#, c-format +msgid "UNKNOWN\n" +msgstr "KHÔNG BIẾT\n" + +#: src/tls.c:471 +#, c-format +msgid "- Certificate version: #%d\n" +msgstr "- Phiên bản chứng nhận: #%d\n" + +#: src/tls.c:478 +#, c-format +msgid "- Certificate Issuer's DN: %s\n" +msgstr "- Tên miá»n của nhà phát hành chứng nhận: %s\n" + +#: src/tunnel.c:318 src/tunnel.c:387 +msgid "Transferring message(s)..." +msgstr "Truyá»n thông Ä‘iệp..." + +#: src/tunnel.c:385 +msgid "Starting SMTP session..." +msgstr "Bắt đầu phiên chạy SMTP..." + +#: src/tunnel.c:468 +msgid "Using the TLS/SSL encryption..." +msgstr "Dùng mật mã TLS/SSL..." + +#: src/tunnel.c:482 src/tunnel.c:626 +#, c-format +msgid "WARNING: %s" +msgstr "CẢNH BÃO : %s" + +#: src/tunnel.c:483 +msgid "STARTTLS command failed." +msgstr "Lệnh STARTTLS không thành công." + +#: src/tunnel.c:617 +msgid "Using TLS/SSL encryption between Anubis and remote MTA only..." +msgstr "Dùng mật mã TLS/SSL chỉ giữa trình Anubis và MTA ở xa thôi..." + +#: src/tunnel.c:627 +msgid "STARTTLS (ONEWAY) command failed." +msgstr "Lệnh STARTTLS (ONEWAY) (chỉ má»™t chiá»u) không thành công." + +#: ap-gl/ap-gl.c:41 ap-gl/bridge.c:178 src/ap-config.c:41 src/bridge.c:182 +msgid "Bridging" +msgstr "Cầu dẫn" + +#: ap-gl/ap-gl.c:41 src/ap-config.c:41 +msgid "Set bridging and IP-related options" +msgstr "" +"Lập các tùy chá»n cầu dẫn và các tùy chá»n liên quân đến giao thức Mạng (IP)." + +#: ap-gl/ap-gl.c:43 ap-gl/ap-gl.c:81 ap-gl/bridge.c:132 src/ap-config.c:43 +#: src/ap-config.c:112 src/ap-config.c:120 src/bridge.c:117 src/bridge.c:121 +#: sysinfo.c:466 sysinfo.c:467 src/ui.glade.h:69 src/monitor-impls.cpp:709 +#: ../sheets/ciscocomputer.sheet.in.h:50 +#, c-format +msgid "Wireless" +msgstr "Vô tuyến" + +#: ap-gl/ap-gl.c:43 src/ap-config.c:43 +msgid "Set wireless options" +msgstr "Lập các tùy chá»n vô tuyến." + +#: ui/prefs-dialog.glade.h:54 ../ui/user_info.glade.h:58 +msgid "Privacy" +msgstr "Riêng tÆ°" + +#: ap-gl/ap-gl.c:45 src/ap-config.c:45 src/ap-config.c:57 +msgid "MAC auth" +msgstr "Xác thá»±c MAC" + +#: ap-gl/ap-gl.c:46 src/ap-config.c:46 src/ap-config.c:58 ../src/dialogs.c:774 +msgid "Community" +msgstr "Cá»™ng đồng" + +#: ap-gl/ap-gl.c:47 src/ap-config.c:47 +msgid "Set radio signal power and antenna options" +msgstr "Lập năng lượng tín hiệu thu thanh và các tùy chá»n ăngten." + +#: ap-gl/ap-gl.c:62 src/ap-config.c:80 +msgid "Upload" +msgstr "Tải lên" + +#: ap-gl/ap-gl.c:62 src/ap-config.c:80 +msgid "Activate current configuration" +msgstr "Hoạt hóa cấu hình hiện có" + +#: ap-gl/ap-gl.c:63 src/ap-config.c:81 +msgid "Restore factory default settings" +msgstr "Phục hồi các thiết lập mặc định của hãng" + +#: web/template/keywords_view_bottom.tpl:2 ../src/glade-editor.c:766 +#: src/settings.c:1506 +msgid "Reset" +msgstr "Äặt lại" + +#: ap-gl/ap-gl.c:65 src/ap-config.c:83 +msgid "Reset AP. All not uploaded configuration will be lost" +msgstr "" +"Lập lại Äiểm Truy cập. NhÆ° thế thì má»i cấu hình chÆ°a tải lên sẽ bị mất." + +#: ap-gl/ap-gl.c:66 src/ap-config.c:84 +msgid "TestMode" +msgstr "Chế Ä‘á»™ thá»­" + +#: ap-gl/ap-gl.c:66 src/ap-config.c:84 +msgid "Put Access Point in test mode" +msgstr "Äặt Äiểm Truy cập trong chế Ä‘á»™ thá»­ ra" + +#: ap-gl/ap-gl.c:79 src/ap-config.c:110 src/ap-config.c:119 +msgid "SysInfo" +msgstr "Thông tin hệ thống" + +#: ../src/netstatus-iface.c:880 +msgid "Ethernet" +msgstr "Ethernet" + +#: ap-gl/ap-gl.c:80 src/ap-config.c:111 +msgid "Get ethernet port statistics" +msgstr "Gá»i thống kê cổng Ethernet" + +#: ap-gl/ap-gl.c:82 src/ap-config.c:113 src/ap-config.c:121 +msgid "Stations" +msgstr "Trạm" + +#: ap-gl/ap-gl.c:83 src/ap-config.c:114 +msgid "KnownAPs" +msgstr "Äiểm TC đã biết" + +#: ap-gl/ap-gl.c:83 src/ap-config.c:114 +msgid "Get info about known Access Points" +msgstr "Gá»i thông tin vá» các Äiểm Truy cập được biết" + +#: src/fe-gtk/dccgui.c:586 src/fe-gtk/dccgui.c:744 +#: ../widgets/gtk+.xml.in.h:105 libexif/olympus/mnote-olympus-tag.c:113 +msgid "Info" +msgstr "Thông tin" + +#: ap-gl/ap-gl.c:101 src/ap-config.c:148 +msgid "Config" +msgstr "Cấu hình" + +#: ap-gl/ap-gl.c:102 src/ap-config.c:149 +msgid "Execute commands on Access Point" +msgstr "Thá»±c hiện lệnh vÆ¡i Äiểm Truy cập" + +#: ../glom/glom.glade.h:78 ../ui/connect.glade.h:5 +msgid "Connect" +msgstr "Kết nối" + +#: info/session.c:3672 info/session.c:3678 ../ui/mlview-search-node.glade.h:3 +#: ../scripts/test.c:309 ../glade/search.glade.h:7 ../glade/straw.glade.h:54 +#: search_gui.c:526 search_gui.c:579 po/silky.glade.h:160 +msgid "Search" +msgstr "Tìm kiếm" + +#: ../src/Win_GParted.cc:112 src/interface.c:733 ../ui/user_info.glade.h:7 +#: ../pan/dialogs/pan-about.c:167 +msgid "About" +msgstr "Giá»›i thiệu" + +#: ap-gl/ap-gl.c:107 src/ap-config.c:154 ../src/users/users-table.c:65 +msgid "Shell" +msgstr "Hệ vá»" + +#: web/template/auth.tpl:3 +msgid "Exit" +msgstr "Thoát" + +#: ap-gl/ap-gl.c:158 src/ap-config.c:204 +#, c-format +msgid "Wireless Access Point Configurator ver. %s" +msgstr "Bá»™ cấu hình Äiểm Truy cập Vô tuyến phiên bản %s" + +#: ap-gl/auth_mac.c:27 src/auth_mac.c:27 +msgid "AuthorizedMacTableString packet error" +msgstr "Lá»—i gói tin AuthorizedMacTableString (chuá»—i bảng MAC đã xác thá»±c)" + +#: ap-gl/auth_mac.c:29 src/auth_mac.c:29 +msgid "[A] MAC authorization: " +msgstr "[A] Xác thá»±c MAC: " + +#: ap-gl/auth_mac.c:30 src/auth_mac.c:30 +msgid "Enter MAC: " +msgstr "Hãy nhập MAC: " + +#: ap-gl/auth_mac.c:31 src/auth_mac.c:31 +msgid "Delete Num: " +msgstr "Xoá bá» số :" + +#: ap-gl/auth_mac.c:32 src/auth_mac.c:32 +msgid "Authorized MAC addresses" +msgstr "Các địa chỉ MAC đã xác thá»±c" + +#: ap-gl/auth_mac.c:33 +msgid "NUM MAC address" +msgstr "SỠđịa chỉ MAC" + +#: ap-gl/auth_mac.c:34 src/auth_mac.c:34 +msgid "A - auth; N - new; D - del; arrows - scroll; W - write conf; Q - quit" +msgstr "" +"A - xác thá»±c; N - má»›i; D - xoá bá»; mÅ©i tên - cuá»™n; W - ghi cấu hình; Q - " +"thoát" + +#: ap-gl/auth_mac.c:35 +msgid "A - auth; IPSTF - set; W - write conf; Q - quit" +msgstr "A - xác thá»±c; IPSTF - lập; W - ghi cấu hình; Q - thoát" + +#: ap-gl/auth_mac.c:36 +msgid "A - auth; W - write conf; Q - quit" +msgstr "A - xác thá»±c; W - ghi cấu hình; Q - thoát" + +#: ap-gl/auth_mac.c:38 +msgid "[I] RADIUS SERVER IP: " +msgstr "[I] ÄỊA CHỈ IP MÃY CHỦ RADIUS: " + +#: ap-gl/auth_mac.c:39 +msgid "[P] RADIUS SERVER PORT: " +msgstr "[P] Cá»”NG MÃY CHỦ RADIUS: " + +#: ap-gl/auth_mac.c:40 +msgid "[S] RADIUS SERVER SECRET: " +msgstr "[S] BỊ MẤT MÃY CHỦ RADIUS: " + +#: ap-gl/auth_mac.c:41 +msgid "[T] REAUTHORIZATION TIME: " +msgstr "[T] THỜI GIAN XÃC THỨC LẠI: " + +#: ap-gl/auth_mac.c:42 +msgid "[F] RADIUS SOURCE PORT: " +msgstr "[F] Cá»”NG NGUá»’N RADIUS: " + +#: ap-gl/auth_mac.c:87 +msgid "Internal" +msgstr "Nộị bá»™" + +#: ap-gl/auth_mac.c:95 +msgid "" +msgstr "" + +#: ap-gl/bridge.c:26 src/bridge.c:26 +msgid "[I] IP: " +msgstr "[I] Äịa chỉ IP: " + +#: ap-gl/bridge.c:27 src/bridge.c:27 +msgid "[N] Netmask: " +msgstr "[N] Mặt nạ mạng: " + +#: ap-gl/bridge.c:28 src/bridge.c:28 +msgid "[G] Gateway: " +msgstr "[G] Cổng ra: " + +#: ap-gl/bridge.c:29 src/bridge.c:29 +msgid "[F] Filter non-IP traffic: " +msgstr "[F] Lá»c các tải khác IP: " + +#: ap-gl/bridge.c:30 src/bridge.c:30 +msgid "[P] Primary port: " +msgstr "[P] Cổng chính:" + +#: ap-gl/bridge.c:31 src/bridge.c:31 +msgid "Attached station MAC: " +msgstr "MAC trạm đã gắn:" + +#: ap-gl/bridge.c:32 src/bridge.c:32 +msgid "[D] DHCP client: " +msgstr "[D] Máy khách DHCP:" + +#: ap-gl/bridge.c:33 src/bridge.c:33 +msgid "[O] Operational mode: " +msgstr "[O] Chế đô thao tác:" + +#: ap-gl/bridge.c:34 src/bridge.c:34 +msgid "[M] Preferred BSSID (remote MAC addr.): " +msgstr "[M] BSSID Æ°a thích (địa chỉ MAC ở xa): " + +#: ap-gl/bridge.c:36 src/bridge.c:36 +msgid "[T] Trap-sending port(s): " +msgstr "[T] Cổng bắt gởi:" + +#: ap-gl/bridge.c:37 src/bridge.c:37 +msgid "[R] Forward broadcast traffic: " +msgstr "[R] Chuyển tiếp tải phát thanh:" + +#: ap-gl/bridge.c:39 +msgid "[U] Isolate wireless clients: " +msgstr "[U] Cách các máy/trình khách vô tuyến:" + +#: ap-gl/bridge.c:40 src/bridge.c:40 +msgid "INGFPDOMSCTRBU - set; W - write conf; Q - quit to menu" +msgstr "INGFPDOMSCTRBU - lập; W - ghi cấu hình Q - thoát vào trình Ä‘Æ¡n" + +#: ap-gl/bridge.c:124 src/bridge.c:109 +msgid "Wireless Bridge Point to MultiPoint" +msgstr "Äiểm Cấu dẫn Vô tuyến đến Äa Äiểm" + +#: ap-gl/bridge.c:125 src/bridge.c:110 +msgid "Access Point" +msgstr "Äiểm Truy cập" + +#: ap-gl/bridge.c:126 src/bridge.c:111 +msgid "Access Point client" +msgstr "Máy/trình khách Äiểm Truy cập" + +#: ap-gl/bridge.c:127 src/bridge.c:112 +msgid "Wireless Bridge Point to Point" +msgstr "Äiểm-đến-Äiểm Cẫu dẫn Vô tuyến" + +#: ap-gl/bridge.c:128 src/bridge.c:113 ../sheets/cisconetwork.sheet.in.h:76 +msgid "Repeater" +msgstr "Bá»™ lặp lại" + +#: ap-gl/stations.c:77 src/stations.c:100 +msgid "AP is currently in AP Client Mode => no associated STAtions." +msgstr "" +"Äiểm TC hiện có trong chế Ä‘á»™ khách Äiểm TC → không có trạm nào liên quân." + +#: ap-gl/stations.c:102 +msgid "# MAC LQ RSSI Status Port IP" +msgstr "# MAC LQ RSSI Trạngt Cổng IP" + +#: ap-gl/stations.c:128 src/stations.c:159 +msgid "AssociatedSTAsInfo packet error" +msgstr "Lá»—i gói tin AssociatedSTAsInfo (thông tin cac trạm liên quan)" + +#: ap-gl/stations.c:160 +msgid "Arrows - scroll; S - save to file; Q - quit to menu." +msgstr "MÅ©i tên - cuá»™n; S - lÆ°u vào tập tin; Q - thoát vào trình Ä‘Æ¡n." + +#: lib/aps.c:29 +msgid "Known Access Points" +msgstr "Các Äiểm TC đã biết" + +#: lib/aps.c:129 +msgid "Your Access Point is not in \"AP client\" mode => getting" +msgstr "" +"Äiểm Truy cập của bạn không phải trong chế Ä‘á»™ « trình khách Äiểm TC » → gá»i" + +#: lib/aps.c:132 +msgid "up-to-date \"Known APs\" info requires your AP to be" +msgstr "tin tức « Các Äiểm TC đã biết » cần thiết Äiểm TC bạn được" + +#: lib/aps.c:135 +msgid "temporarily configured into \"AP client\" mode and rebooted." +msgstr "" +"cấu hình tạm thá»i vào chế Ä‘á»™ « trình khách Äiểm TC » rồi được khởi Ä‘á»™ng lại." + +#: lib/aps.c:138 +msgid "Your AP will be reconfigured back to original mode by this" +msgstr "Tiện ích này sẽ cấu hình lại Äiểm TC bạn vào chế Ä‘á»™ trÆ°á»›c" + +#: lib/aps.c:141 +msgid "utility once you quit the \"KnownAP\" view. This, in turn, may" +msgstr "má»™t khi bạn thoát khung xem « Các Äiểm TC đã biết». Hành Ä‘á»™ng này" + +#: lib/aps.c:144 +msgid "cause loss of Access Point's current configuration." +msgstr "có lẽ sẽ làm cho Äiểm TC mất cấu hình hiện có." + +#: lib/aps.c:148 +msgid "Do NOT answer \"Yes\" if you're connected to the Access Point" +msgstr "ÄỪNG trả lá»i « Có » nếu bạn Ä‘ang kết nối đến Äiêm Truy cập" + +#: lib/aps.c:151 +msgid "via its wireless port." +msgstr "qua cổng vô tuyến của nó." + +#: lib/aps.c:153 lib/cmd.c:45 lib/cmd.c:75 lib/test.c:91 +msgid "Do you want to continue? " +msgstr "Bạn có muốn tiếp tục không?" + +#: lib/aps.c:207 +msgid "NetworkType" +msgstr "Kiểu mạng" + +#: lib/aps.c:283 +msgid "Infrastructure" +msgstr "HaÌ£ tầng cÆ¡ sở" + +#: lib/aps.c:343 lib/aps.c:351 lib/aps.c:359 +msgid "CN: Channel Name; P: Preambule Type (S: Short; L: Long);" +msgstr "CN: Tên kênh; P: Kiểu lá»i mở đầu (S: Vắn; L: Dài);" + +#: lib/aps.c:345 +msgid "RSSI: Radio Signal Strength Indicator [%]" +msgstr "RSSI: Chỉ báo Ä‘á»™ mạnh tín hiệu raÄ‘iô [%]" + +#: lib/aps.c:347 lib/aps.c:355 +msgid "; LQ: Link Quality [%]" +msgstr "; LQ: chất lượng liên kết [%]" + +#: lib/aps.c:353 +msgid "RSSI: Radio Signal Strength Indicator [dBm]" +msgstr "RSSI: Chỉ báo Ä‘á»™ mạnh tín hiệu raÄ‘iô [dBm]" + +#: lib/aps.c:361 +msgid "RSSI: Radio Signal Strength Indicator [raw]" +msgstr "RSSI: Chỉ báo Ä‘á»™ mạnh tín hiệu raÄ‘iô [thô]" + +#: lib/aps.c:363 +msgid "; LQ: Link Q. [raw]" +msgstr "; LQ: Chất lÆ°Æ¡ng liên kết [thô]" + +#: lib/aps.c:369 +msgid "" +"# con. to AP #; R refresh with reset; T toggle; Q quit; Other = refr. w/o " +"reset" +msgstr "" +"# kết nối đến ÄTC #; R cập nhật có lập lại; T bật/tắt; Q thoát; Other = cập " +"nhật không có lập lại" + +#: lib/aps.c:371 +msgid "" +"# con. to AP #; R initiate AP scan; T toggle view; Q quit; Other = refresh " +"view" +msgstr "" +"# kết nối đến ÄTC #; R khởi chạy quét ÄTC; T bật/tất khung xem; Q thoát; " +"Other = cập nhật khung xem" + +#: lib/aps.c:500 lib/ap_search.c:167 +#, c-format +msgid "Failure in sendto(): %s. Press any key." +msgstr "Lá»—i trong sendto() (gởi cho): %s. Hãy bấm bất cứ phím nào." + +#: lib/aps.c:509 +msgid "You have just initiated the AP scan. Be advised that it may" +msgstr "Bạn má»›i khởi chạy quét tìm Äiểm Truy cập. Ghi chú là" + +#: lib/aps.c:512 +msgid "take a few seconds for your Access Point to find out some" +msgstr "Äiểm Truy cập có lẽ sẽ mất vài giây để tìm má»™t số giá trị," + +#: lib/aps.c:515 +msgid "values, so expect finishing the scan in about 5 seconds." +msgstr "thì sẽ quét xong được trong khoảng 5 giây." + +#: lib/aps.c:518 +msgid "Also note that your Access Point stops forwarding the network" +msgstr "CÅ©ng hãy ghi chú là Äiểm Truy cập bạn ngừng chuyển tiếp tải" + +#: lib/aps.c:521 +msgid "traffic while the scan is in progress, but restores itself" +msgstr "mạng trong khi quét, nhÆ°ng mà phục hồi tá»± nó đến" + +#: lib/aps.c:524 +msgid "to normal operation in time ranging up to 1 minute." +msgstr "thao tác bình thÆ°á»ng trong thá»i gian đến 1 phút." + +#: lib/aps.c:527 +msgid "Hence, if you are connected to target Access Point via its" +msgstr "Vì vậy, nếu bạn Ä‘ang kết nối đến Äiểm Truy cập đích qua" + +#: lib/aps.c:530 +msgid "wireless port, you need to wait a bit longer" +msgstr "cổng vô tuyến của nó, thì cần phải chá» dài hÆ¡n má»™t chút " + +#: lib/aps.c:533 +msgid "after pressing 'S'." +msgstr "sau khi bấm phím S." + +#: lib/ap_search.c:48 +msgid "Community name: " +msgstr "Tên cá»™ng đồng:" + +#: lib/ap_search.c:49 +msgid " NUM IP ADDRESS MIB TYPE NAME" +msgstr " Sá» ÄỊA CHỈ IP KIỂU MIB TÊN" + +#: lib/ap_search.c:100 +msgid "Please wait while scanning, or press 'Q' to quit." +msgstr "Hãy Ä‘á»i trong khi quét, hay bấm phím Q để thoát." + +#: lib/ap_search.c:116 +msgid "Can't set broadcast option on socket. Press any key." +msgstr "Không thể lập tùy chá»n phát thanh trên ổ cắm. Hãy bấm bất cứ phím nào." + +#: lib/ap_search.c:125 +msgid "Can't set multicast membership on socket. Press any key." +msgstr "" +"Không lập địa vị há»™i viên truyá»n má»™t-nhiá»u trên ổ cắm. Hãy bấm bất cứ phím " +"nào." + +#: lib/ap_search.c:132 +msgid "Scanning via network interface:" +msgstr "Äang quét qua giao diện mạng:" + +#: lib/ap_search.c:133 +#, c-format +msgid " Index: %i" +msgstr " Chỉ mục: %i" + +#: lib/ap_search.c:135 +#, c-format +msgid " Name: %s" +msgstr " Tên: %s" + +#: lib/ap_search.c:137 +#, c-format +msgid " IP: %s" +msgstr " Äịa chỉ IP: %s" + +#: lib/ap_search.c:146 +#, c-format +msgid "Scanning for AP with MIB type: %s" +msgstr "Äang quét tìm Äiểm TC có kiểu MIB: %s" + +#: lib/ap_search.c:334 +msgid "Please enter SNMP community name that will be used for AP detection." +msgstr "Hãy nhập tên cá»™ng đồng SNMP sẽ dùng để phát hiện Äiểm TC." + +#: lib/ap_search.c:345 +msgid "Access Points Search" +msgstr "Tìm kiếm Äiểm TC" + +#: lib/ap_search.c:363 +msgid "realloc() error." +msgstr "Lá»—i realloc()." + +#: lib/ap_search.c:375 +msgid "Network interface discovery error." +msgstr "Lá»—i phát minh giao diện mạng." + +#: lib/ap_search.c:450 +msgid "No local network interfaces found. Press any key." +msgstr "ChÆ°a tìm thấy giao diện mạng địa phÆ°Æ¡ng. Hãy bấm bất cứ phím nào." + +#: lib/ap_search.c:452 +msgid "No directly reachable Access Points found. Press any key." +msgstr "" +"ChÆ°a tìm thấy Äiểm Truy cập có thể tá»›i trá»±c tiếp. Hãy bấm bất cứ phím nào." + +#: lib/ap_search.c:457 +msgid "Single-screen maximum number of APs found." +msgstr "Tìm thấy số tối Ä‘a Äiểm TC cho má»™t màn hinh riêng lẻ." + +#: lib/ap_search.c:460 +msgid "# - connect to AP; Q - quit" +msgstr "# - kết nối đến ÄTC; Q - thoát" + +#: lib/ap-utils.h:79 +msgid "MAC address: " +msgstr "Äịa chỉ MAC:" + +#: lib/ap-utils.h:80 +msgid "[S] SNMP traps: " +msgstr "[S] NÆ¡i bắt SNMP:" + +#: lib/ap-utils.h:82 +msgid "[C] Frequency channel: " +msgstr "[C] Kênh tần số :" + +#: lib/ap-utils.h:84 +msgid "Receive antenna:" +msgstr "Ä‚ngten nhận:" + +#: lib/ap-utils.h:85 +msgid "[U] Left" +msgstr "[U] Trái" + +#: lib/ap-utils.h:86 +msgid "[I] Right" +msgstr "[I] Phải" + +#: lib/ap-utils.h:87 +msgid "Transmit antenna:" +msgstr "Ä‚ngten gởi:" + +#: lib/ap-utils.h:88 +msgid "[O] Left" +msgstr "[O] Trái" + +#: lib/ap-utils.h:89 +msgid "[P] Right" +msgstr "[P] Phải" + +#: lib/ap-utils.h:90 +msgid "Diversity select:" +msgstr "Chá»n tính nhiá»u dạng:" + +#: lib/ap-utils.h:91 +msgid "[T] Left" +msgstr "[T] Trái" + +#: lib/ap-utils.h:92 +msgid "[Y] Right" +msgstr "[Y] Phải" + +#: lib/ap-utils.h:97 ../storage/sunone-permissions-dialog.glade.h:22 +#: src/settings.c:727 +msgid "On" +msgstr "Bật" + +#: lib/ap-utils.h:98 src/fe-gtk/setup.c:140 src/galeon-prefs-dialog.c:434 +#: src/fe-gtk/menu.c:1408 src/settings.c:732 +#: libexif/olympus/mnote-olympus-entry.c:145 +msgid "Off" +msgstr "Tắt" + +#: lib/ap-utils.h:102 +msgid "Press any key to continue." +msgstr "Bấm bất cứ phím nào để tiếp tục." + +#: lib/ap-utils.h:103 +msgid "Q - quit to menu. T - toggle polling mode, Other key - force update." +msgstr "" +"Q - thoát vào trình Ä‘Æ¡n. T - bật/tắt chế Ä‘á»™ kiểm soát vòng, Phím khác - buá»™c " +"cập nhật.." + +#: lib/ap-utils.h:105 +msgid "Unable to write data to AP. Press any key to continue." +msgstr "Không thể ghi dữ liệu vào Äiểm TC. Bấm bất cứ phím nào để tiếp tục." + +#: lib/ap-utils.h:106 +msgid "Unable to retrieve (valid) data from AP. Press any key to continue." +msgstr "" +"Không thể lấy dữ liệu (hợp lệ) từ Äiểm TC. Bấm bất cứ phím nào để tiếp tục." + +#: lib/ap-utils.h:107 +msgid "Trying to retrieve data from AP. Please wait..." +msgstr "Cố gá»i dữ liệu từ Äiểm TC. Hãy Ä‘á»i...." + +#: lib/ap-utils.h:108 +msgid "Writing data to AP. Please wait..." +msgstr "Äang ghi dữ liệu vào Äiểm TC. Hãy Ä‘á»i..." + +#: lib/ap-utils.h:109 +msgid "Configuration written to the AP. Press any key to continue." +msgstr "Cấu hình đã được ghi vào Äiểm TC. Bấm bất cứ phím nào để tiếp tục." + +#: lib/ap-utils.h:110 +msgid "select() function error. Press any key." +msgstr "Lá»—i chức năng select(). Bấm bất cứ phím nào." + +#: lib/ap-utils.h:112 +msgid "Create socket error. Press any key." +msgstr "Lá»—i tạo ổ cắm. Bấm bất cứ phím nào." + +#: lib/ap-utils.h:113 +msgid "Bind socket error. Press any key." +msgstr "Lá»—i đóng kết ổ cắm. Bấm bất cứ phím nào." + +#: lib/ap-utils.h:115 +msgid "Back to main menu" +msgstr "Trở vá» trình Ä‘Æ¡n chính" + +#: lib/ap-utils.h:116 +msgid "Exit program" +msgstr "Thoát khá»i chÆ°Æ¡ng trình" + +#: lib/ap-utils.h:117 +msgid "Run subshell. To return type 'exit'." +msgstr "Chạy hệ vá» con. Äể trở vá» thì gõ 'exit' (thoát)." + +#: lib/ap-utils.h:118 +msgid "Short info about program" +msgstr "Thông tin vắn vá» chÆ°Æ¡ng trình" + +#: lib/ap-utils.h:119 +msgid "Find connected Access Points" +msgstr "Tìm các Äiểm Truy cập đã kết nối" + +#: lib/ap-utils.h:120 +msgid "Set connection options: ip and community" +msgstr "Lập tùy chá»n kết nối: giao thức Mạng (IP) và cá»™ng đồng" + +#: lib/ap-utils.h:121 +msgid "Set encryption; edit WEP keys" +msgstr "Lập mật mã; sá»­a đổi khoá WEP" + +#: lib/ap-utils.h:122 +msgid "Set MAC authorization; edit MAC authorization table" +msgstr "Lâp xác thá»±c MAC; sá»­a đổi bảng xác thá»±c MAC" + +#: lib/ap-utils.h:123 +msgid "Set SNMP community/password for access to the AP" +msgstr "Lập cá»™ng đồng/mật khẩu SNMP để truy cập Äiểm TC" + +#: lib/ap-utils.h:124 +msgid "Get info about AP hardware and firmware" +msgstr "Gá»i thông tin vá» phần cứng Äiểm TC và phần vững" + +#: lib/ap-utils.h:125 +msgid "Get wireless port statistics" +msgstr "Gá»i thống kê cổng vô tuyến" + +#: lib/ap-utils.h:126 +msgid "Get list of currently associated stations (Access Point clients)" +msgstr "Gá»i danh sách các trạm liên quan hiện có (máy khách Äiểm Truy cập)" + +#: lib/ap-utils.h:127 +msgid "Get info and statistics from AP" +msgstr "Gá»i thông tin và thống kê từ Äiểm TC" + +#: lib/ap-utils.h:128 +msgid "Set various configuration options" +msgstr "Lập nhiá»u tùy chá»n cấu hình khác nhau" + +#: lib/ap-utils.h:130 +msgid "Associated stations" +msgstr "Trạm liên quan" + +#: lib/ap-utils.h:132 +msgid "Polling: on" +msgstr "Kiểm soát vòng: bật" + +#: lib/ap-utils.h:133 +msgid "Polling: off" +msgstr "Kiểm soát vòng: tắt" + +#: lib/cmd.c:40 +msgid "Restore factory default configuration" +msgstr "Phục hồi cấu hình mặc định của hãng" + +#: lib/cmd.c:43 +msgid "After restoring factory defaults your current configuration" +msgstr "Sau khi phục hồi mặc định của hãng thì cấu hình hiện có" + +#: lib/cmd.c:44 +msgid "will be lost." +msgstr "sẽ bị mất." + +#: lib/cmd.c:63 +msgid "Factory default settings loaded. Press any key to continue." +msgstr "" +"Äã tải các thiết lập mặc định của hãng. Bấm bất cứ phím nào để tiếp tục." + +#: lib/cmd.c:71 +msgid "Reset Access Point" +msgstr "Lập lại Äiểm Truy cập" + +#: lib/cmd.c:74 +msgid "By reset you'll lose all non-uploaded configuration." +msgstr "Khi lập lại thì sẽ mất các cấu hình chÆ°a tải lên." + +#: lib/cmd.c:85 +msgid "Access Point reset. Press any key to continue." +msgstr "Äiểm Truy cập đã được đặt lại. Hãy bấm bất cứ phím nào để tiếp tục." + +#: lib/cmd.c:117 +msgid "Upload configuration" +msgstr "Tải lên cấu hình" + +#: lib/cmd.c:119 +msgid "You may need to upload the configuration only if you've" +msgstr "ThÆ°á»ng bạn cần tải lên cấu hình chỉ nếu đã thay đổi" + +#: lib/cmd.c:121 +msgid "changed some option values before. Using this option may" +msgstr "má»™t số tùy chá»n sau lần tải lên cuối cùng. Dùng tùy chá»n" + +#: lib/cmd.c:123 +msgid "cause loss of your current configuration." +msgstr "này có lẽ sẽ làm cho cấu hình hiện có bị mất." + +#: lib/cmd.c:135 +msgid "Configuration uploaded. Press any key to continue." +msgstr "Cấu hình đã được tải lên. Hãy bấm bất cứ phím nào để tiếp tục." + +#: lib/common.c:30 +msgid "Access Point IP-address: " +msgstr "Äịa chỉ IP của Äiểm Truy cập:" + +#: lib/common.c:31 +msgid "Password (community): " +msgstr "Mật khẩu (cá»™ng đồng):" + +#: lib/common.c:32 +msgid "Autodetect AP MIB properties? " +msgstr "Tá»± Ä‘á»™ng phát hiện các thuá»™c tính MIB của Äiểm Truy cập không?" + +#: lib/common.c:33 +msgid "AP MIB type: " +msgstr "Kiểu MIB của Äiểm TC:" + +#: lib/common.c:34 +msgid "AP MIB vendor extensions: " +msgstr "Phần mở rá»™ng của nhà bán MIB Äiểm TC:" + +#: lib/common.c:35 +msgid "Do you want to use AP's name as its label? " +msgstr "Bạn có muốn đặt tên Äiểm TC là nhãn nó chứ?" + +#: lib/common.c:36 +msgid "Access Point label: " +msgstr "Nhãn Äiểm Truy cập:" + +#: lib/common.c:37 +msgid "Save connect-settings: " +msgstr "LÆ°u các thiết lập kết nối:" + +#: lib/common.c:100 +#, c-format +msgid "From %s" +msgstr "Từ %s" + +#: lib/common.c:102 src/Controller.cc:83 ../main/__init__.py:85 +#, c-format, python-format +msgid "Version %s" +msgstr "Phiên bản %s" + +#: lib/common.c:105 +msgid "Written by Roman Festchook roma@polesye.net" +msgstr "Tác giả: Roman Festchook roma@polesye.net" + +#: lib/common.c:107 +msgid "Portions by Jan Rafaj aputils@cedric.unob.cz" +msgstr "Má»™t số phần bởi Jan Rafaj aputils@cedric.unob.cz" + +#: lib/common.c:109 +msgid "Copyright (c) 2001-2004" +msgstr "Bản quyá»n © năm 2001-2004" + +#: lib/common.c:111 +msgid "Roman Festchook and Jan Rafaj" +msgstr "Roman Festchook và Jan Rafaj" + +#: lib/common.c:114 +msgid "This program is distributed under the terms" +msgstr "ChÆ°Æ¡ng trình này được phát hành vá»›i Ä‘iá»u kiện" + +#: lib/common.c:116 +msgid "of the GNU General Public License version 2." +msgstr "của Quyá»n Công Chung GNU (GPL) phiên bản 2." + +#: lib/common.c:118 +msgid "See the included COPYING file for details." +msgstr "Äể tìm chi tiết thi hãy xem tập tin COPYING (chép) đã gồm." + +#: lib/common.c:175 +msgid "Connect options" +msgstr "Thiết lập kết nối" + +#: lib/common.c:183 +msgid "Enter IP address of your Access Point." +msgstr "Nhập địa chỉ IP của Äiểm Truy cập bạn." + +#: lib/common.c:192 +msgid "Entered characters will not be displayed for security reason." +msgstr "Sẽ không hiển thị ký tá»± đã nhập, vì lý do bảo mật" + +#: lib/common.c:266 +msgid "This label will be stored on HDD (independently on AP name!)." +msgstr "Sẽ cất giữ nhãn này vào Ä‘Ä©a cứng (không phu thuá»™c vào tên Äiểm TC)." + +#: lib/common.c:361 +msgid "Trying to probe AP for MIB properties. Please wait..." +msgstr "Cố dò Äiểm TC để tìm thuá»™c tính MIB. Hãy Ä‘á»i..." + +#: lib/common.c:390 +msgid "" +"Unable to determine AP MIB properties (no response from AP). Press any key." +msgstr "" +"Không thể quyết định thuá»™c tính MIB của Äiểm TC (Äiểm TC không trả lá»i). Hãy " +"bấm bất cứ phím nào." + +#: lib/file.c:205 +msgid "NUM IP ADDRESS MIB TYPE MIB EXT. LABEL" +msgstr "Sá» ÄỊA CHỈ IP KIỂU MIB PHẦN MIB NHÃN" + +#: lib/file.c:206 +msgid "Choose an AP to connect to" +msgstr "Hãy chá»n Äiểm TC cần kết nối đến nó." + +#: lib/file.c:212 +msgid "1-9,C: connect; N: new; D: delete; W: save; Q: quit; arrows: scroll" +msgstr "1-9,C: kết nối; N: má»›i; D: xoá bá»; W: lÆ°u; Q: thoát; mÅ©i tên: cuá»™n" + +#: lib/file.c:268 +msgid "Connect to AP num:" +msgstr "Kết nối đến Äiểm TC số :" + +#: lib/file.c:304 +msgid "Delete num:" +msgstr "Xoá bá» số :" + +#: lib/file.c:372 +msgid "AP list file ~/.ap-config successfully written. Press any key." +msgstr "" +"Tập tin danh sách Äiểm TC <~/.ap-config> đã được ghi thành công. Hãy bấm bất " +"cứ phím nào." + +#: lib/file.c:376 +msgid "Unable to write AP list file ~/.ap-config. Press any key." +msgstr "" +"Không thể ghi tập tin danh sách Äiểm TC ~/.ap-config. Hãy bấm bất cứ phím " +"nào." + +#: lib/file.c:426 +msgid "Unable to write stations file. Press any key." +msgstr "Không thể ghi tập tin trạm. Hãy bấm bất cứ phím nào." + +#: lib/file.c:428 +msgid "Stations file succesfully written. Press any key." +msgstr "Tập tin trạm đã được ghi thành công. Hãy bấm bất cứ phím nào." + +#: lib/input.c:33 +msgid "Invalid value. Press any key to continue." +msgstr "Giá trị không hợp lệ. Hãy bấm bất cứ phím nào để tiếp tục." + +#: lib/input.c:34 +#, c-format +msgid "Value must be in range %u - %u. Press any key to continue." +msgstr "" +"Giá trị phải ở trong phạm vị %u - %u. Hãy bấm bất cứ phím nào để tiếp tục." + +#: lib/input.c:412 +msgid "Y - Yes; Any other key - No (it's safer to answer No)" +msgstr "Y - Có; bất cứ phím khác nào - Không (an toàn hÆ¡n để trả lá»i Không)." + +#: lib/oui.c:6056 +msgid "Unknown or Private" +msgstr "Lạ hay Riêng" + +#: lib/radio.c:26 +msgid "" +"[key] - power level; UIOP or LR - antenna; W - write config; Q - quit to menu" +msgstr "" +"[key] - mức Ä‘á»™ năng lượng; UIOP hay LR - ăngten; W - ghi cấu hình; Q - thoát " +"vào trình Ä‘Æ¡n" + +#: lib/radio.c:28 +msgid "Antenna:" +msgstr "Ä‚ngten:" + +#: lib/radio.c:29 +msgid "[L] Left:" +msgstr "[L] Trái:" + +#: lib/radio.c:30 +msgid "[R] Right:" +msgstr "[R] Phải:" + +#: lib/radio.c:120 +msgid "Radio Configuration" +msgstr "Cấu hình raÄ‘iô" + +#: lib/radio.c:121 +msgid "Output RF signal power level (CR31 register values)" +msgstr "Mức Ä‘á»™ năng lượng tín hiệu (giá trị thanh ghi CR31)" + +#: lib/radio.c:123 +msgid "Key Channel Level" +msgstr "Mức Ä‘á»™ kênh khoá" + +#: lib/radio.c:279 +msgid "" +"You can't disable both antennas; unable to save antenna-config. Press any " +"key." +msgstr "" +"Không thể vô hiệu hóa cả hai ăngten; không thể lÆ°u cấu hình ăngten (antenna-" +"config). Hãy bấm bất cứ phím nào." + +#: lib/scr.c:168 +#, c-format +msgid "Current AP: %s Type: %s Ext: %s" +msgstr "Äiểm TC hiện có : %s Kiểu : %s Phần: %s" + +#: lib/set_community.c:27 +msgid "Set community/password" +msgstr "Lập cá»™ng đồng/mật khẩu" + +#: lib/set_community.c:28 +msgid "Key Access level" +msgstr "Mức Ä‘á»™ truy cập khoá" + +#: lib/set_community.c:29 +msgid "Community/Password" +msgstr "Cá»™ng đồng/mật khẩu" + +#: lib/set_community.c:30 +msgid "[U] User " +msgstr "[U] NgÆ°á»i dùng" + +#: lib/set_community.c:31 +msgid "[A] Administrator " +msgstr "[A] Quản trị" + +#: lib/set_community.c:32 +msgid "[M] Manufacturer " +msgstr "[M] Hãng chế tạo" + +#: lib/set_community.c:33 +msgid "" +"[key] - set community/password; W - write config to AP; Q - quit to menu" +msgstr "" +"[key] - lập cá»™ng đồng/mật khẩu; W - ghi cấu hình vào Äiểm TC; Q - thoát vào " +"trình Ä‘Æ¡n" + +#: lib/stat.c:29 +msgid "Ethernet Statistics" +msgstr "Thống kê Ethernet" + +#: lib/stat.c:30 +msgid "Wireless Statistics" +msgstr "Thống kê vô tuyến" + +#: lib/stat.c:98 +msgid "EthRxStat packet error. Press any key." +msgstr "Lá»—i gói tin EthRxStat. Hãy bấm bất cứ phím nào." + +#: lib/stat.c:111 +msgid "EthTxStat packet error. Press any key." +msgstr "Lá»—i gói tin EthTxStat. Hãy bấm bất cứ phím nào." + +#: lib/stat.c:115 ../src/gnome-netstatus.glade.h:16 +msgid "Received:" +msgstr "Ãã nhận:" + +#: lib/stat.c:116 +msgid "Transmitted:" +msgstr "Äã gởi:" + +#: lib/stat.c:270 +msgid "WirelessStat packet error. Press any key." +msgstr "Lá»—i gói tin WirelessStat. Hãy bấm bất cứ phím nào." + +#: lib/sysinfo.c:29 +msgid "System Description: " +msgstr "Mô tả hệ thống:" + +#: lib/sysinfo.c:30 gpe-conf-sysinfo.desktop.in.h:1 +msgid "System Info" +msgstr "Thông tin hệ thống" + +#: lib/sysinfo.c:120 +msgid "Device hardware/software/name info:" +msgstr "Thông tin phần cứng/phần thêm/tên thiết bị:" + +#: lib/sysinfo.c:154 +msgid "Product name:" +msgstr "Tên sản phẩm:" + +#: lib/sysinfo.c:158 +#, c-format +msgid "Product type: %u" +msgstr "Kiểu sảnh phẩm: %u" + +#: lib/sysinfo.c:161 +msgid "OEM name:" +msgstr "Tên hãng chÃªÌ taÌ£o thiêÌt biÌ£ gôÌc:" + +#: lib/sysinfo.c:168 +#, c-format +msgid "Hardware revision: %u" +msgstr "Phiên bản phần cứng: %u" + +#: lib/sysinfo.c:176 +#, c-format +msgid "Info structure version: %u" +msgstr "Phiên bản cấu trúc thông tin: %u" + +#: lib/sysinfo.c:179 lib/sysinfo.c:355 +#, c-format +msgid "Manufacturer OUI: %02X %02X %02X (%s)" +msgstr "Hãng chế tạo OUI: %02X %02X %02X (%s)" + +#: lib/sysinfo.c:196 +#, c-format +msgid "Uptime: %u days, %02u:%02u:%02u hours:mins:secs" +msgstr "Thá»i gian chạy: %u ngày, %02u:%02u:%02u giá» :phút:giây" + +# Name: do not translate/ tên: đừng dịch +#: lib/sysinfo.c:284 +msgid "FHSS 2.4 GHz" +msgstr "FHSS 2.4 GHz" + +# Name: do not translate/ tên: đừng dịch +#: lib/sysinfo.c:284 +msgid "DSSS 2.4 GHz" +msgstr "DSSS 2.4 GHz" + +#: lib/sysinfo.c:284 +msgid "IR Baseband" +msgstr "Dải tần cÆ¡ sở hồng ngoại" + +#: lib/sysinfo.c:285 +msgid "Commercial range 0..40 C" +msgstr "Phạm vị thÆ°Æ¡ng mại 0º..40º C" + +#: lib/sysinfo.c:286 +msgid "Industrial range -30..70 C" +msgstr "Phạm vị cá»™ng nghiệp -30º..70º C" + +#: lib/sysinfo.c:289 ../src/gui.c:1050 libexif/canon/mnote-canon-entry.c:97 +#: libexif/canon/mnote-canon-entry.c:134 +msgid "manual" +msgstr "thủ công" + +#: lib/sysinfo.c:289 +msgid "notsupported" +msgstr "không há»— trÆ¡" + +#: lib/sysinfo.c:289 +msgid "dynamic" +msgstr "Ä‘á»™ng" + +#: lib/sysinfo.c:345 ../driverdialog.glade.h:5 src/interface.c:90 +msgid "Manufacturer:" +msgstr "Hãng chế tạo :" + +#: lib/sysinfo.c:350 +msgid "Manufacturer ID:" +msgstr "ID hãng chế tạo:" + +#: lib/sysinfo.c:361 +msgid "Product Name:" +msgstr "Tên sản phẩm:" + +#: lib/sysinfo.c:367 +msgid "Product ID:" +msgstr "ID sản phẩm:" + +#: lib/sysinfo.c:373 +msgid "Product Version:" +msgstr "Phiên bản sản phẩm:" + +#: lib/sysinfo.c:379 +#, c-format +msgid "PHYType: %s" +msgstr "Kiểu PHY: %s" + +#: lib/sysinfo.c:382 +#, c-format +msgid "Temperature: %s" +msgstr "Nhiệt Ä‘á»™ : %s" + +#: lib/sysinfo.c:390 +#, c-format +msgid "Regulatory Domain: %s" +msgstr "Miá»n Ä‘iá»u tiết: %s" + +#: lib/sysinfo.c:391 +msgid "FCC (USA)" +msgstr "FCC (Mỹ)" + +#: lib/sysinfo.c:392 +msgid "DOC (Canada)" +msgstr "DOC (Ca-na-Ä‘a)" + +#: lib/sysinfo.c:393 +msgid "ETSI (Europe)" +msgstr "ETSI (Châu Âu)" + +#: lib/sysinfo.c:396 +msgid "MKK (Japan)" +msgstr "MKK (Nhật bản)" + +#: lib/sysinfo.c:399 +#, c-format +msgid "Transmit Power: %u mW" +msgstr "Năng lượng gởi: %u mW" + +#: lib/sysinfo.c:429 +#, c-format +msgid "WEP implemented: %s" +msgstr "Äã thá»±c hiện WEP: %s" + +#: lib/sysinfo.c:432 +#, c-format +msgid "Diversity: %s" +msgstr "Äá»™ nhiá»u dạng: %s" + +#: lib/sysinfo.c:460 +#, c-format +msgid "Uptime: %u:%02u:%02u.%02u" +msgstr "Thá»i gian chạy: %u:%02u:%02u.%02u" + +#: lib/sysinfo.c:480 +#, c-format +msgid "IP Address: %s" +msgstr "Äịa chỉ IP: %s" + +#: lib/test.c:26 +msgid "[T] Test mode: " +msgstr "[T] Chế Ä‘á»™ thá»­ :" + +#: lib/test.c:27 +msgid "[A] Antenna: " +msgstr "[A] Ä‚ngtên:" + +#: lib/test.c:28 +msgid "[S] Signal level: " +msgstr "[S] Mức Ä‘á»™ tín hiệu :" + +#: lib/test.c:29 +msgid "[R] Rate: " +msgstr "[R] Tá»· lệ:" + +#: lib/test.c:30 +msgid "[F] TxFiler: " +msgstr "[F] TxFiler:" + +#: lib/test.c:31 +msgid "[O] Command: " +msgstr "[O] Lệnh:" + +#: lib/test.c:32 +msgid "T - Test mode On/Off; CASRFO - set options; Q - quit to menu" +msgstr "T — Bật/tắt chế Ä‘á»™ thá»­; CASRFO — lập tùy chá»n; Q — thoát vào trình Ä‘Æ¡n" + +#: src/fe-gtk/setup.c:190 ../plug-ins/gfig/gfig-dialog.c:1714 +#: ../lib/properties.c:64 ../lib/properties.h:468 ../lib/widgets.c:638 +#: ../glade/editor.c:508 ../glade/gbwidgets/gbtextview.c:49 +#: ../widgets/gtk+.xml.in.h:113 app/sample-editor.c:1461 +msgid "Left" +msgstr "Trái" + +#: src/fe-gtk/setup.c:191 ../plug-ins/gfig/gfig-dialog.c:1713 +#: ../lib/properties.c:66 ../lib/properties.h:470 ../lib/widgets.c:650 +#: ../glade/editor.c:517 ../glade/gbwidgets/gbtextview.c:50 +#: ../widgets/gtk+.xml.in.h:154 app/sample-editor.c:1473 +msgid "Right" +msgstr "Phải" + +#: lib/test.c:86 +msgid "Test mode" +msgstr "Chế Ä‘á»™ thá»­" + +#: lib/test.c:89 +msgid "Using the \"Test mode\" may cause loss of your current" +msgstr "Dùng « Chế Ä‘á»™ thá»­ » có lẽ sẽ làm cho cấu hình hiện có" + +#: lib/test.c:90 +msgid "configuration." +msgstr "cấu hình." + +#: lib/test.c:102 ../src/nautilus-cvs.c:581 +msgid "Options:" +msgstr "Tùy chá»n:" + +#: lib/test.c:128 +msgid "Statistics:" +msgstr "Thống kê:" + +#: lib/test.c:129 +msgid "Success Frames: 0 Failed Frames: 0" +msgstr "Khung được: 0 Khung không được: 0" + +#: lib/test.c:190 +#, c-format +msgid "Success Frames: %lu Failed Frames: %lu" +msgstr "Khung được: %lu Khung không được: %lu" + +#: lib/wep.c:28 +msgid "Privacy Settings" +msgstr "Thiết lập riêng tÆ°" + +#: lib/wep.c:29 +msgid "[E] Standard encryption mechanism: " +msgstr "[E] CÆ¡ chế mật mã chuẩn:" + +#: lib/wep.c:30 +msgid "[A] Allow unencrypted: " +msgstr "[A} Cho phép không mật mã:" + +#: lib/wep.c:31 +msgid "[K] Default WEP key: " +msgstr "[K] Khoá WEP mặc định:" + +#: lib/wep.c:32 +msgid "[P] Public key: " +msgstr "[P] Khoá công:" + +#: lib/wep.c:85 +msgid "EK1234 - set; W - write conf; Q - quit to menu" +msgstr "EK1234 - lập; W - ghi cấu hình; Q - thoát vào trình Ä‘Æ¡n" + +#: lib/wep.c:94 lib/wep.c:263 +msgid "Key WEP" +msgstr "Khoá WEP" + +#: lib/wep.c:101 +msgid "Hint! Confused by WEP key values? See man ap-config for info..." +msgstr "" +"Gợi ý! Bạn có lẫn lá»™n giữa nhÆ°ng giá trị khoá WEP khác nhau không? Hãy dùng " +"lệnh:\n" +"man ap-config" + +#: lib/wep.c:247 +msgid "AEPK1234 - set options; W - write conf; Q - quit to menu" +msgstr "AEPK1234 - lập tùy chá»n; W - ghi cấu hình; Q - thoát vào trình Ä‘Æ¡n" + +# Name: do not translate/ tên: đừng dịch +#: lib/wlan.c:27 +msgid "[E] ESSID: " +msgstr "[E] ESSID: " + +#: lib/wlan.c:28 +msgid "[N] AP name: " +msgstr "[N] Tên Äiểm TC: " + +#: lib/wlan.c:30 +msgid "[K] AP contact: " +msgstr "[K] Liên lạc Äiểm TC: " + +#: lib/wlan.c:31 +msgid "[L] AP location: " +msgstr "[L] vị trí Äiểm TC: " + +#: lib/wlan.c:33 +msgid "[R] RTS threshold: " +msgstr "[R] ngưỡng RTS: " + +#: lib/wlan.c:34 +msgid "[F] Fragmentation threshold: " +msgstr "[F] ngưỡng tế phân:" + +#: lib/wlan.c:35 +msgid "[P] Preambule type: " +msgstr "[P] Kiểu lá»i mở đầu :" + +#: lib/wlan.c:36 +msgid "[A] Auth type: " +msgstr "[A] Kiểu xác thá»±c:" + +#: lib/wlan.c:37 +msgid "Open system" +msgstr "Hệ thống mở" + +#: lib/wlan.c:38 +msgid "Shared key" +msgstr "Khoá dùng chung" + +#: lib/wlan.c:39 +msgid "Both types" +msgstr "Cả hai kiểu" + +#: lib/wlan.c:40 +msgid "[U] Auto rate fallback: " +msgstr "[U] rút lui tá»· lệ tá»± Ä‘á»™ng:" + +#: lib/wlan.c:41 +msgid "[S] Insert ESSID in broadcast packets: " +msgstr "[S] Chèn ESSID vào gói tin phát thanh:" + +#: lib/wlan.c:42 +msgid "Basic and Supported rates:" +msgstr "Tá»· lệ cÆ¡ ban và đã há»— trợ :" + +#: lib/wlan.c:43 +msgid "Key Rate Status" +msgstr "Khoá Tá»· lệ Trang thái" + +#: lib/wlan.c:45 +msgid "[I] International roaming: " +msgstr "[I] Ä‘i lang thang khắp thế giá»›i:" + +#: lib/wlan.c:46 +msgid "[B] Beacon period (msec): " +msgstr "[B] chu kỳ máy tín hiệu (miligiây):" + +#: lib/wlan.c:47 +msgid "[D] DTIM sending interval (beacons): " +msgstr "[D] thá»i gian giữa lần gởi DTIM (máy tín hiệu)" + +#: lib/wlan.c:48 +msgid "[T] SIFS time (msec): " +msgstr "[T] Thá»i gian SIFS (miligiây):" + +#: lib/wlan.c:49 +msgid "[key] - set option; W - write conf; Q - quit to menu" +msgstr "[key] - lập tùy chá»n; W - ghi cấu hình; Q - thoát vào trình Ä‘Æ¡n" + +#: lib/wlan.c:125 libexif/exif-format.c:35 +msgid "Short" +msgstr "Ngắn" + +#: lib/wlan.c:125 libexif/exif-format.c:36 +#, fuzzy +msgid "Long" +msgstr "" +"#-#-#-#-# Compendium04.po (NAME) #-#-#-#-#\n" +"Lâu\n" +"#-#-#-#-# libexif-0.6.13.vi.po (libexif-0.6.13) #-#-#-#-#\n" +"Dài" + +#: lib/wlan.c:237 +msgid "Wireless Settings" +msgstr "Thiết lập vô tuyến" + +#: lib/wlan.c:731 +msgid "Antenna Configuration:" +msgstr "Cấu hình ăngten:" + +#: ../plug-ins/MapObject/mapobject_ui.c:473 +msgid "General Options" +msgstr "Tùy chá»n chung" + +#: lib/wlan.c:750 +msgid "" +"UIOPTY - antenna; SCANLEDFR1234 - options; W - write conf; Q - quit to menu" +msgstr "" +"UIOPTY - ăngten; SCANLEDFR1234 - tùy chá»n; W - ghi cấu hình; Q - thoát vào " +"trình Ä‘Æ¡n" + +#: src/ap-config.c:54 +msgid "Set general options" +msgstr "Lập tùy chá»n chung" + +#: src/ap-config.c:55 +msgid "Set advanced options" +msgstr "Lập tùy chá»n nâng cao" + +#: src/ap-config.c:89 +msgid "Reset AP." +msgstr "Lập lại Äiểm TC" + +#: src/ap-config.c:122 +msgid "Latest" +msgstr "Má»›i nhất" + +#: src/ap-config.c:122 +msgid "Get info about latest events" +msgstr "Gá»i tin tức vá» sá»± kiện má»›i nhất" + +#: src/ap-mrtg.c:42 +msgid "" +"\n" +"Usage:\n" +msgstr "" +"\n" +"Cách sá»­ dụng:\n" + +#: src/ap-mrtg.c:44 +msgid "" +"\tap-mrtg -i ip -c community -t type [-b bssid] [-n name] [-a aptype] [-v] [-" +"h] [-r]\n" +"\n" +msgstr "" +"\tap-mrtg -i ip -c cá»™ng đồng -t kiểu [-b bssid] [-n tên] [-a kiểu Äiểm TC] [-" +"v] [-h] [-r]\n" +"\n" + +#: src/ap-mrtg.c:46 +msgid "" +"Get stats from AP and return it in MRTG parsable format\n" +"\n" +msgstr "" +"Gá»i thống kê từ Äiểm TC và gởi trả nó trong dạng mà MRTG có phân tách được\n" + +#: src/ap-mrtg.c:47 +msgid "-i ip - AP ip address\n" +msgstr "-i ip - địa chỉ IP của Äiểm TC\n" + +#: src/ap-mrtg.c:48 +msgid "-c community - SNMP community string\n" +msgstr "-c community - chuá»—i _cá»™ng đồng_ SNMP\n" + +#: src/ap-mrtg.c:50 +msgid "" +"-t type - statistics type ireless, thernet, associated tations " +"or ink quality in client mode\n" +msgstr "" +"-t type - _kiểu_ thống kê:\n" +"w - vô tuyến\n" +" e - Ethernet\n" +"s - trạm liên quan\n" +" l - chất lượng liên kết trong chế Ä‘á»™ máy khách\n" + +#: src/ap-mrtg.c:52 +msgid "" +"-b bssid - mac address of the AP to which get link quality, only if " +"type=l\n" +msgstr "" +"-b bssid - địa chỉ MAC của Äiểm TC mà cần goi chất lượng liên kết đến " +"nó, chỉ nếu kiểu=l\n" + +#: src/ap-mrtg.c:53 +msgid "-n name - AP name - for check only\n" +msgstr "-n name - _tên_ Äiểm TC - chỉ để kiểm tra\n" + +#: src/ap-mrtg.c:54 +msgid "" +"-a aptype - AP type - 410 (default) or 510 for ATMEL12350's, like the " +"ME-102\n" +msgstr "" +"-a aptype - _kiểu Äiểm TC_ - 410 (mặc định) hay 510 cho các máy " +"ATMEL12350, nhÆ°ME-102\n" + +#: src/ap-mrtg.c:56 +msgid "-v - report MRTG about problems connecting to AP\n" +msgstr "-v - thông báo MRTG vá» vấn Ä‘á» khi kết nối đến Äiểm TC\n" + +#: src/ap-mrtg.c:57 +msgid "-r - reset AP when getting LinkQuality stats\n" +msgstr "" +"-r - _lập lại_ Äiểm TC khi gá»i thống kê chất lượng liên kết\n" + +#: src/ap-mrtg.c:58 +msgid "" +"-h - print this help screen\n" +"\n" +msgstr "" +"-h - hiển thị _trợ giúp_ nàỳ\n" +"\n" + +#: src/ap-mrtg.c:59 +#, c-format +msgid "" +"ap-mrtg %s Copyright (c) 2002-2003 Roman Festchook\n" +"\n" +msgstr "" +"ap-mrtg %s Bản quyá»n © năm 2002-2003 Roman Festchook\n" +"\n" + +#: src/ap-mrtg.c:143 +msgid "Invalid IP-address\n" +msgstr "Äịa chỉ IP không hợp lệ\n" + +#: src/ap-mrtg.c:188 +#, c-format +msgid "Invalid AP-Type '%s' - valid types are 510 or 410\n" +msgstr "Kiểu Äiểm TC không hợp lệ '%s' - kiểu hợp lệ là 510 hay 410\n" + +#: src/ap-mrtg.c:207 +msgid "Create socket error" +msgstr "Lá»—i tạo ổ cắm" + +#: src/ap-mrtg.c:211 +msgid "Bind socket error" +msgstr "Lá»—i đóng kết ổ cắm" + +#: src/ap-trapd.c:148 +#, c-format +msgid "ap-trapd %s started%s%s." +msgstr "ap-trapd %s đã khởi chạy%s%s." + +#: src/ap-trapd.c:149 +msgid " on " +msgstr " bật" + +#: src/ap-trapd.c:155 +msgid "Unable to fork. Exiting." +msgstr "Không thể tạo tiến trình con nên thoát." + +#: src/ap-trapd.c:159 +msgid "Can't create socket. Exiting." +msgstr "Không tạo ổ cắm nên thoát." + +#: src/ap-trapd.c:165 +msgid "Can't bind socket. Exiting." +msgstr "Không thể đóng kết ổ cắm nên thoát." + +#: src/ap-trapd.c:172 +#, c-format +msgid "Can't bind to device %s. Exiting." +msgstr "Không thể đóng kết thiết bị %s nên thoát." + +#: src/ap-trapd.c:183 +#, c-format +msgid "Unable to process username %s. Error: %m." +msgstr "Không thể xá»­ lý tên ngÆ°á»i dùng %s. Lá»—i: %m." + +#: src/ap-trapd.c:188 +#, c-format +msgid "Unable to change to uid %d." +msgstr "Không thể chuyển đổi sang UID %d." + +#: src/ap-trapd.c:235 +#, c-format +msgid "" +"Received unknown SNMP ver %d trap. From %s:%d. Agent: %s. Community: %s." +msgstr "" +"Äã nhận sá»± bắt SNMP phiên bản %d lạ. Từ %s:%d. Tác nhân: %s. Cá»™ng đồng: %s." + +#: src/ap-trapd.c:307 +#, c-format +msgid "Agent:v%d %s (%s@%s:%d) %s%s%s. SysUptime %d:%02d:%02d.%02d" +msgstr "" +"Tác nhân:v%d %s (%s@%s:%d) %s%s%s. Thá»i gian chạy hệ thống %d:%02d:%02d.%02d" + +#: src/auth_mac.c:33 +msgid "NUM MAC address" +msgstr "Sá» Äịa chỉ MAC" + +#: src/bridge.c:35 +msgid "[C] Configuration-enabled port(s): " +msgstr "[C] Cổng đã bật trong cấu hình:" + +#: src/bridge.c:38 +msgid "[B] Isolate wireless clients (broadcast traffic): " +msgstr "[B] Cách máy khách vô tuyến (tải phát thanh):" + +#: src/bridge.c:39 +msgid "[U] Isolate wireless clients (unicast traffic): " +msgstr "[U] Cách máy khách vô tuyến (tải má»™t-má»™t):" + +#: src/nwn_advanced.c:60 +#, c-format +msgid "[D] DB Station Timeout: %d" +msgstr "[D] Thá»i hạn trạm DB: %d" + +#: src/nwn_advanced.c:62 +#, c-format +msgid "[A] ACK Window: %d" +msgstr "[A] Cá»­a sổ ACK: %d" + +#: pppconfig:323 ../plug-ins/common/warp.c:552 +msgid "Advanced Options" +msgstr "Tùy chá»n cấp cao" + +#: src/nwn_advanced.c:66 +msgid "DA - options; W - write conf; Q - quit to menu" +msgstr "DA - tùy chá»n; W - ghi cấu hình Q - thoát vào trình Ä‘Æ¡n" + +#: src/nwn_latest.c:30 +#, c-format +msgid "Reason: %u Station: %02X%02X%02X%02X%02X%02X" +msgstr "Lý do: %u Trạm: %02X%02X%02X%02X%02X%02X" + +#: src/nwn_latest.c:88 +msgid "Latest Events" +msgstr "Sá»± kiện má»›i nhất" + +#: src/nwn_latest.c:89 +msgid "Disassociate:" +msgstr "Phân ra:" + +#: src/nwn_latest.c:96 +msgid "Deauthenticate:" +msgstr "Bá» xác thá»±c:" + +#: src/nwn_latest.c:103 +msgid "Authenticate Fail:" +msgstr "Không xác thá»±c được:" + +#: src/nwn_latest.c:109 ../app/dialogs/module-dialog.c:506 +msgid "Last error:" +msgstr "Lá»—i cuối cùng:" + +#: src/nwn_latest.c:110 +msgid "Error:" +msgstr "Lá»—i:" + +#: src/stations.c:33 +msgid "AP link state" +msgstr "Tính trạng liên kết Äiểm TC" + +# Name: do not translate/ tên: đừng dịch +#: src/stations.c:37 +msgid " # MAC " +msgstr " # MAC " + +#: src/stations.c:40 +msgid " # MAC Parent MAC RSSI Status MACn IP " +msgstr " # MAC MAC cha RSSI Trạngt MACn IP " + +#: src/stations.c:195 src/stations.c:367 +msgid "" +"Arrows - scroll; S - save to file; Q - return; T - toggle view; Other - " +"refresh" +msgstr "" +"MÅ©i tên - cuá»™n; S - lÆ°u vào tập tin; Q - trở vá»; T - bật/tắt khung xem; Khác " +"- cập nhật" + +#: src/stations.c:200 +msgid "Arrows - scroll; S - save to file; Q - return; Other key - refresh" +msgstr "MÅ©i tên - cuá»™n; S - lÆ°u vào tập tin; Q - trở vá»; Phím khác - cập nhật" + +#: src/stations.c:279 +msgid "Id MAC address Quality Age RSSI" +msgstr "Id Äịa chỉ MAC Chất lượng CÅ© RSSI" + +#: common/info.cpp:232 +msgid "a number between 0 and 1" +msgstr "má»™t số giữa 0 và 1" + +#: common/info.cpp:569 +msgid "in the form \" \"" +msgstr "kiểu \" \"" + +#. TRANSLATORS: "true" and "false" are literal +#. * values and should not be translated. +#: common/config.cpp:977 +msgid "either \"true\" or \"false\"" +msgstr "hoặc \"true\" (thật) hoặc \"false\" (không thật)" + +#: common/config.cpp:996 +msgid "a positive integer" +msgstr "số nguyên dÆ°Æ¡ng" + +#: common/config.cpp:1124 +msgid "# default: " +msgstr "# mặc định: " + +#: common/config.cpp:1187 +#, c-format +msgid "" +"\n" +"#######################################################################\n" +"#\n" +"# Filter: %s\n" +"# %s\n" +"#\n" +"# configured as follows:\n" +"\n" +msgstr "" +"\n" +"#######################################################################\n" +"#\n" +"# Bá»™ lá»c: %s\n" +"# %s\n" +"#\n" +"# có cấu hình nhÆ° theo đây:\n" +"\n" + +#: common/config.cpp:1285 +msgid "ASPELL_CONF env var" +msgstr "ASPELL_CONF env var (biến môi trÆ°á»ng cấu hình trình Aspell)" + +#: common/config.cpp:1359 +msgid "main configuration file" +msgstr "tâp tin cấu hình chính" + +#: common/config.cpp:1361 +msgid "location of main configuration file" +msgstr "vị trí của tập tin cấu hình chính" + +#: common/config.cpp:1364 +msgid "location of language data files" +msgstr "vị trí của tập tin dữ liệụ ngôn ngữ" + +#: common/config.cpp:1366 +msgid "create dictionary aliases" +msgstr "tạo biệt hiệu từ Ä‘iển" + +#: common/config.cpp:1368 +msgid "location of the main word list" +msgstr "vị trí danh sách từ chính" + +#: common/config.cpp:1370 +msgid "encoding to expect data to be in" +msgstr "ngá» dÆ° liệu bằng mã hóa này" + +#: common/config.cpp:1372 +msgid "add or removes a filter" +msgstr "thêm hay bá» bá»™ lá»c" + +#: common/config.cpp:1374 +msgid "path(s) aspell looks for filters" +msgstr "trinh aspell tìm bá»™ lá»c theo Ä‘Æ°á»ng dẫn này" + +#: common/config.cpp:1378 +msgid "filter mode" +msgstr "chế Ä‘á»™ lá»c" + +#: common/config.cpp:1380 +msgid "extra dictionaries to use" +msgstr "từ Ä‘iển thêm để sá»­ dụng" + +#: common/config.cpp:1382 +msgid "location for personal files" +msgstr "vị trí của tập tin cá nhân" + +#: common/config.cpp:1384 +msgid "ignore words <= n chars" +msgstr "bá» qua từ <= n ký tá»±" + +#. TRANSLATORS: It is OK if this is longer than 50 chars +#: common/config.cpp:1387 +msgid "ignore accents when checking words -- CURRENTLY IGNORED" +msgstr "bá» qua dấu khi kiểm tra từ -- HIỆN BỎ QUA" + +#: common/config.cpp:1389 +msgid "ignore case when checking words" +msgstr "bá» qua hoa/thÆ°á»ng khi kiểm tra từ" + +#: common/config.cpp:1391 +msgid "ignore commands to store replacement pairs" +msgstr "bá» qua lệnh để cất giữ đôi từ thay thế" + +#: common/config.cpp:1393 common/config.cpp:1460 +msgid "extra information for the word list" +msgstr "thông tin thêm cho danh sách từ" + +#: common/config.cpp:1395 +msgid "keyboard definition to use for typo analysis" +msgstr "cấu hình bàn phím để sá»­ dụng để phân tích lá»—i đánh máy" + +#: common/config.cpp:1397 +msgid "language code" +msgstr "mã ngôn ngữ (Việt ngữ là vi)" + +#: common/config.cpp:1399 +msgid "deprecated, use lang instead" +msgstr "bị phân đối nên hãy sá»­ dụng đối số lang thay thế" + +#: common/config.cpp:1401 +msgid "location of local language data files" +msgstr "vị trí của tập tin dữ liệu ngôn ngữ địa phÆ°Æ¡ng" + +#: common/config.cpp:1403 +msgid "base name of the main dictionary to use" +msgstr "tên cÆ¡ sở của từ Ä‘iển chính để sá»­ dụng" + +#: common/config.cpp:1407 +msgid "set module name" +msgstr "lập tên mô-Ä‘un" + +#: common/config.cpp:1409 +msgid "search order for modules" +msgstr "thứ tá»± tìm kiếm mô-Ä‘un" + +#: common/config.cpp:1411 +msgid "enable Unicode normalization" +msgstr "hiệu lá»±c việc tiêu chuẩn hóa Unicode (Chỉ môt mã)" + +#: common/config.cpp:1413 +msgid "Unicode normalization required for current lang" +msgstr "Ngôn ngữ hiện cần đến việc tiêu chuẩn hóa Unicode" + +#. TRANSLATORS: the values after the ':' are literal +#. values and should not be translated. +#: common/config.cpp:1417 +msgid "Unicode normalization form: none, nfd, nfc, comp" +msgstr "kiểu tiêu chuẩn hóa Unicode: none (không có), nfd, nfc, comp" + +#: common/config.cpp:1419 +msgid "avoid lossy conversions when normalization" +msgstr "tránh việc chuyển đổi thiếu gì khi tiêu chuẩn hóa" + +#: common/config.cpp:1421 +msgid "personal configuration file" +msgstr "tập tin cấu hình cá nhân" + +#: common/config.cpp:1424 +msgid "personal dictionary file name" +msgstr "tên tập tin từ Ä‘iển cá nhân" + +#: common/config.cpp:1427 +msgid "prefix directory" +msgstr "thÆ° mục tiá»n tố" + +#: common/config.cpp:1429 +msgid "replacements list file name" +msgstr "tên tập tin danh sách từ thay thế" + +#: common/config.cpp:1432 +msgid "consider run-together words legal" +msgstr "cho phép từ được kết hợp" + +#: common/config.cpp:1434 +msgid "maximum number that can be strung together" +msgstr "tối Ä‘a số từ có thể kết hợp nhau" + +#: common/config.cpp:1436 +msgid "minimal length of interior words" +msgstr "tốí thiểu Ä‘á»™ dài từ ná»™i bá»™" + +#: common/config.cpp:1438 +msgid "save replacement pairs on save all" +msgstr "lÆ°u đôi từ thay thế khi LÆ°u tất cả" + +#: common/config.cpp:1440 +msgid "set the prefix based on executable location" +msgstr "lập tiá»n tố trên cÆ¡ sở vị trí trình chạy" + +#: common/config.cpp:1442 +msgid "size of the word list" +msgstr "cỡ danh sách từ" + +#: common/config.cpp:1444 +msgid "no longer used" +msgstr "không còn sá»­ dụng lại" + +#: common/config.cpp:1446 +msgid "suggestion mode" +msgstr "chế Ä‘á»™ góp ý" + +#. TRANSLATORS: "sug-mode" is a literal value and should not be +#. translated. +#: common/config.cpp:1450 +msgid "edit distance to use, override sug-mode default" +msgstr "" +"hiệu chỉnh tầm để sá»­ dụng, có quyá»n cao hÆ¡n sug-mode (chế Ä‘á»™ góp ý) mặc định" + +#: common/config.cpp:1452 +msgid "use typo analysis, override sug-mode default" +msgstr "" +"phân tích lá»—i đánh máy, co quyá»n cao hÆ¡n sug-mode (chế Ä‘á»™ góp ý) mặc định" + +#: common/config.cpp:1454 +msgid "use replacement tables, override sug-mode default" +msgstr "" +"sá»­ dụng bảng thay thế, có quyá»n cao hÆ¡n sug-mode (chế Ä‘á»™ góp ý) măc định" + +#: common/config.cpp:1456 +msgid "characters to insert when a word is split" +msgstr "ký tá»± để chèn khi chia tách từ" + +#: common/config.cpp:1458 +msgid "use personal, replacement & session dictionaries" +msgstr "sá»­ dụng các từ Ä‘iển loại cá nhân, thay thế và phiên hợp" + +#: common/config.cpp:1462 +msgid "search path for word list information files" +msgstr "Ä‘Æ°á»ng dẫn tìm kiếm đốí vá»›i tập tin thông tin danh sách từ" + +#: common/config.cpp:1464 +msgid "enable warnings" +msgstr "hiệu lá»±c lá»i cảnh báo" + +#. TRANSLATORS: It is OK if this is longer than 50 chars +#: common/config.cpp:1474 +msgid "indicator for affix flags in word lists -- CURRENTLY IGNORED" +msgstr "cái chỉ cá» phụ tố trong danh sách từ -- HIỆN BỎ QUA" + +#: common/config.cpp:1476 +msgid "use affix compression when creating dictionaries" +msgstr "sá»­ dụng cách nén loại affix (thêm vào) khi tạo từ Ä‘iển" + +#: common/config.cpp:1478 +msgid "remove invalid affix flags" +msgstr "bá» cá» affix (thêm vào) không hợp lệ" + +#: common/config.cpp:1480 +msgid "attempts to clean words so that they are valid" +msgstr "lần cố sá»­a từ để làm hợp lệ" + +#: common/config.cpp:1482 +msgid "compute soundslike on demand rather than storing" +msgstr "tính soundslike (nghe nhÆ°) khi lệnh, không cất giữ" + +#: common/config.cpp:1484 +msgid "partially expand affixes for better suggestions" +msgstr "mở rá»™ng affix (thêm vào) cục bá»™ để góp ý tốt hÆ¡n" + +#: common/config.cpp:1486 +msgid "skip invalid words" +msgstr "bá» qua từ không hợp lệ" + +#: common/config.cpp:1488 +msgid "check if affix flags are valid" +msgstr "kiểm tra cá» affix (thêm vào) là hợp lệ" + +#: common/config.cpp:1490 +msgid "check if words are valid" +msgstr "kiểm tra từ la hợp lệ" + +#: common/config.cpp:1497 +msgid "create a backup file by appending \".bak\"" +msgstr "tạo tập tin lÆ°u trữ bằng cách thêm vào phần cuối \".bak\"" + +#: common/config.cpp:1499 +msgid "use byte offsets instead of character offsets" +msgstr "sá»­ dụng hiệu số byte thay thế hiệu số ký tá»±" + +#: common/config.cpp:1501 +msgid "create missing root/affix combinations" +msgstr "tạo sá»± kết hợp gốc/phụ tố thiếu" + +#: common/config.cpp:1503 +msgid "keymapping for check mode: \"aspell\" or \"ispell\"" +msgstr "ảnh xạ khoá đốí vá»›i chế Ä‘á»™ kiểm tra: \"aspell\" hay \"ispell\"" + +#: common/config.cpp:1505 +msgid "reverse the order of the suggest list" +msgstr "đổi chiếu thứ tá»± danh sách góp ý" + +#: common/config.cpp:1507 +msgid "suggest possible replacements" +msgstr "góp ý từ thay thế co thể" + +#: common/config.cpp:1509 +msgid "time load time and suggest time in pipe mode" +msgstr "ghi thá»i gian tải và thá»i gian góp ý khi trong chế Ä‘á»™ ống" + +#: common/convert.cpp:303 common/convert.cpp:429 +#, c-format +msgid "" +"This could also mean that the file \"%s\" could not be opened for reading or " +"does not exist." +msgstr "" +"CÅ©ng có thể nghÄ©a là không thể mở tập tin \"%s\" để Ä‘á»c, hay tập tin đó " +"không tồn tại." + +#: common/convert.cpp:552 common/convert.cpp:659 common/convert.cpp:705 +#, c-format +msgid "The Unicode code point U+%04X is unsupported." +msgstr "Không há»— trợ Ä‘iểm mã Unicode U+%04X." + +#: common/convert.cpp:829 +#, c-format +msgid "Invalid UTF-8 sequence at position %d." +msgstr "Dãy UTF-8 không hợp lệ tại vị trí %d." + +#: common/errors.cpp:27 +msgid "Operation Not Supported: %what:1" +msgstr "Không há»— trợ thi hành: %what:1." + +#: common/errors.cpp:43 +msgid "The method \"%what:1\" is unimplemented in \"%where:2\"." +msgstr "Không thi hành phÆ°Æ¡ng pháp « %what:1 » trong « %where:2 »." + +#: common/errors.cpp:51 +#, c-format +msgid "%file:1:" +msgstr "%file:1:" + +#: common/errors.cpp:59 +#, c-format +msgid "The file \"%file:1\" can not be opened" +msgstr "Không thể mở tập tin « %file:1 »." + +#: common/errors.cpp:67 +#, c-format +msgid "The file \"%file:1\" can not be opened for reading." +msgstr "Không thể mở tập tin « %file:1 » để Ä‘á»c." + +#: common/errors.cpp:75 +#, c-format +msgid "The file \"%file:1\" can not be opened for writing." +msgstr "Không thể mở tập tin « %file:1 » để ghi." + +#: common/errors.cpp:83 +#, c-format +msgid "The file name \"%file:1\" is invalid." +msgstr "Tên tập tin « %file:1 » không hợp lệ." + +#: common/errors.cpp:91 +#, c-format +msgid "The file \"%file:1\" is not in the proper format." +msgstr "Tập tin « %file:1 » không có dạng đúng." + +#: common/errors.cpp:107 +#, c-format +msgid "The directory \"%dir:1\" can not be opened for reading." +msgstr "Không thể mở thÆ° mục « %dir:1 » để Ä‘á»c." + +#: common/errors.cpp:123 +msgid "The key \"%key:1\" is unknown." +msgstr "Không biết khoá « %key:1 »." + +#: common/errors.cpp:131 +msgid "The value for option \"%key:1\" can not be changed." +msgstr "Không thay đổi được trị số cho tùy chá»n « %key:1 »." + +#: common/errors.cpp:139 +msgid "The key \"%key:1\" is not %accepted:2 and is thus invalid." +msgstr "Khoá « %key:1 » không %accepted:2 thì không hợp lệ." + +#: common/errors.cpp:147 +msgid "" +"The value \"%value:2\" is not %accepted:3 and is thus invalid for the key \"%" +"key:1\"." +msgstr "" +"Trị số « %value:2 » không %accepted:3 thì không hợp lệ đối vá»›i khoá « %key:1 " +"»." + +#: common/errors.cpp:163 +msgid "The key \"%key:1\" is not a string." +msgstr "Khoá « %key:1 » không là chuá»—i." + +#: common/errors.cpp:171 +msgid "The key \"%key:1\" is not an integer." +msgstr "Khoá « %key:1 » không là số nguyên." + +#: common/errors.cpp:179 +msgid "The key \"%key:1\" is not a boolean." +msgstr "Khoá « %key:1 » không là bun." + +#: common/errors.cpp:187 +msgid "The key \"%key:1\" is not a list." +msgstr "Khoá « %key:1 » không là danh sách." + +#: common/errors.cpp:195 +msgid "" +"The key \"%key:1\" does not take any parameters when prefixed by a \"reset-" +"\"." +msgstr "" +"Khoá « %key:1 » không nhận tham số khi có tiá»n tố « reset- » (lập lại)." + +#: common/errors.cpp:203 +msgid "" +"The key \"%key:1\" does not take any parameters when prefixed by a \"enable-" +"\"." +msgstr "" +"Khoá « %key:1 » không nhận tham số khi có tiá»n tố « enable- » (hiệu lá»±c)." + +#: common/errors.cpp:211 +msgid "" +"The key \"%key:1\" does not take any parameters when prefixed by a \"dont-\" " +"or \"disable-\"." +msgstr "" +"Khoá « %key:1 » không nhận tham số khi có tiá»n tố « dont- » hay « disable- " +"» (không hay vô hiệu hóa)." + +#: common/errors.cpp:219 +msgid "" +"The key \"%key:1\" does not take any parameters when prefixed by a \"clear-" +"\"." +msgstr "Khoá « %key:1 » không nhận tham số khi có tiá»n tố « clear- » (xoá)." + +#: common/errors.cpp:235 +#, c-format +msgid "The language \"%lang:1\" is not known." +msgstr "Không biết ngôn ngữ « %lang:1 »." + +#: common/errors.cpp:243 +#, c-format +msgid "The soundslike \"%sl:2\" is not known." +msgstr "Không biết Ä‘iá»u soundslike (nghe nhÆ°) « %sl:2 »." + +#: common/errors.cpp:251 +#, c-format +msgid "The language \"%lang:1\" is not supported." +msgstr "Không há»— trợ ngôn ngữ « %lang:1 »." + +#: common/errors.cpp:259 +#, c-format +msgid "No word lists can be found for the language \"%lang:1\"." +msgstr "Không tìm được danh sách từ đối vá»›i ngôn ngữ « %lang:1 »." + +#: common/errors.cpp:267 +#, c-format +msgid "Expected language \"%lang:1\" but got \"%prev:2\"." +msgstr "Ngá» ngôn ngữ « %lang:1 » nhÆ°ng có « %prev:2 »." + +#: common/errors.cpp:283 +#, c-format +msgid "Affix '%aff:1' is corrupt." +msgstr "Phụ tố « %aff:1 » là há»ng." + +#: common/errors.cpp:291 +#, c-format +msgid "The condition \"%cond:1\" is invalid." +msgstr "Äiá»u khiển « %cond:1 » không hợp lệ." + +#: common/errors.cpp:299 +#, c-format +msgid "" +"The condition \"%cond:1\" does not guarantee that \"%strip:2\" can always be " +"stripped." +msgstr "Äiá»u khiển « %cond:1 » không bảo đảm luôn có thể tÆ°á»›c « %strip:2 »." + +#: common/errors.cpp:307 +#, c-format +msgid "" +"The file \"%file:1\" is not in the proper format. Expected the file to be in " +"\"%exp:2\" not \"%got:3\"." +msgstr "" +"Tập tin « %file:1 » không co dạng đúng. Ngá» tập tin dạng « %exp:2 » không " +"phải « %got:3 »." + +#: common/errors.cpp:323 +#, c-format +msgid "The encoding \"%encod:1\" is not known." +msgstr "Không biết mã « %encod:1 »." + +#: common/errors.cpp:331 +#, c-format +msgid "The encoding \"%encod:1\" is not supported." +msgstr "Không há»— trợ mã « %encod:1 »." + +#: common/errors.cpp:339 +#, c-format +msgid "The conversion from \"%encod:1\" to \"%encod2:2\" is not supported." +msgstr "Không há»— trợ việc chuyển đổi từ « %encod:1 » sang « %encod2:2 »." + +#: common/errors.cpp:379 +#, c-format +msgid "The string \"%str:1\" is invalid." +msgstr "Chuá»—i « %str:1 » không hợp lệ." + +#: common/errors.cpp:387 +msgid "The word \"%word:1\" is invalid." +msgstr "Từ « %word:1 » không hợp lệ." + +#: common/errors.cpp:395 +msgid "The affix flag '%aff:1' is invalid for word \"%word:2\"." +msgstr "Cá» affix (thêm vào) « %aff:1 » không hợp lệ đối vá»›i « %word:2 »." + +#: common/errors.cpp:403 +msgid "The affix flag '%aff:1' can not be applied to word \"%word:2\"." +msgstr "Không áp dụng cá» affix (thêm vào) '%aff:1' vào từ « %word:2 »." + +#: common/errors.cpp:451 +msgid "not a version number" +msgstr "không là số phiên bản" + +#: common/errors.cpp:467 +msgid "dlopen returned \"%return:1\"." +msgstr "lệnh dlopen đã gá»i « %return:1 »." + +#: common/errors.cpp:475 +#, c-format +msgid "The file \"%filter:1\" does not contain any filters." +msgstr "Tập tin « %filter:1 » không có bá»™ lá»c nào." + +#: common/errors.cpp:483 +#, c-format +msgid "The filter \"%filter:1\" does not exist." +msgstr "Không có bá»™ lá»c « %filter:1 »." + +#: common/errors.cpp:491 common/errors.cpp:587 +msgid "Confused by version control." +msgstr "Äiá»u khiển phiên bản bối rối." + +#: common/errors.cpp:499 +msgid "Aspell version does not match filter's requirement." +msgstr "Phiên bản trình Aspell không khá»›p Ä‘iá»u mà bá»™ lá»c cần đến." + +#: common/errors.cpp:507 +msgid "Filter option already exists." +msgstr "Tùy chá»n lá»c đó đã có." + +#: common/errors.cpp:515 +msgid "Use option modifiers only within named option." +msgstr "Hãy sá»­ dụng cá» sá»­a đổi tùy chá»n chỉ ở trong tùy chá»n đó." + +#: common/errors.cpp:523 +msgid "Option modifier unknown." +msgstr "Không biết cá» sá»­a đổi tùy chá»n đó." + +#: common/errors.cpp:531 +msgid "Error setting filter description." +msgstr "Gặp lá»—i khi lập mô tả bá»™ lá»c." + +#: common/errors.cpp:547 +msgid "Empty option specifier." +msgstr "Äiá»u ghi rõ tùy chá»n trống." + +#: common/errors.cpp:555 +#, c-format +msgid "Option \"%option:1\" possibly specified prior to filter." +msgstr "Có lẽ đã ghi rõ tùy chá»n \"%option:1\" trÆ°á»›c bá»™ lá»c." + +#: common/errors.cpp:563 +msgid "Unknown mode description key \"%key:1\"." +msgstr "Không biết khoá diá»…n tả chế Ä‘á»™ \"%key:1\"." + +#: common/errors.cpp:571 +#, c-format +msgid "Expecting \"%modekey:1\" key." +msgstr "Ngá» khoá \"%modekey:1\"." + +#: common/errors.cpp:579 +msgid "Version specifier missing key: \"aspell\"." +msgstr "Thiếu khoá \"aspell\" khi ghi rõ phiên bản." + +#: common/errors.cpp:595 +msgid "Aspell version does not match mode's requirement." +msgstr "Phiên bản trình Aspell không khá»›p Ä‘iá»u mà chế Ä‘á»™ cần đến." + +#: common/errors.cpp:603 +msgid "Missing magic mode expression." +msgstr "Thiếu biểu thức chế Ä‘á»™ mã thuật." + +#: common/errors.cpp:611 +#, c-format +msgid "Empty extension at char %char:1." +msgstr "Äiá»u mở rá»™ng trống tại ký tá»± %char:1." + +#: common/errors.cpp:619 +#, c-format +msgid "\"%mode:1\" error" +msgstr "Lá»—i \"%mode:1\"" === added file 'src/github.com/chai2010/gettext-go/testdata/poedit-1.5.7-zh_CN.mo' Binary files src/github.com/chai2010/gettext-go/testdata/poedit-1.5.7-zh_CN.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/testdata/poedit-1.5.7-zh_CN.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/testdata/poedit-1.5.7-zh_CN.po' --- src/github.com/chai2010/gettext-go/testdata/poedit-1.5.7-zh_CN.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/poedit-1.5.7-zh_CN.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1591 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: Poedit 1.5\n" +"Report-Msgid-Bugs-To: poedit@googlegroups.com\n" +"POT-Creation-Date: 2012-07-30 10:34+0200\n" +"PO-Revision-Date: 2013-02-24 21:00+0800\n" +"Last-Translator: Christopher Meng \n" +"Language-Team: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +"X-Generator: Poedit 1.5.5\n" + +#: ../src/edframe.cpp:2060 +msgid " (modified)" +msgstr " (已修改)" + +#. TRANSLATORS: This is version information in about dialog, it is followed +#. by version number when used (wxWidgets 2.8) +#: ../src/edframe.cpp:2431 +msgid " Version " +msgstr " 版本 " + +#: ../src/edframe.cpp:1367 +#, c-format +msgid "%d issue with the translation found." +msgid_plural "%d issues with the translation found." +msgstr[0] "在翻译中å‘现了 %d 个问题。" + +#: ../src/edframe.cpp:2024 +#, c-format +msgid "%i %% translated, %i string" +msgid_plural "%i %% translated, %i strings" +msgstr[0] "%i%% 已翻译,%i 个字串" + +#: ../src/edframe.cpp:2029 +#, c-format +msgid "%i %% translated, %i string (%s)" +msgid_plural "%i %% translated, %i strings (%s)" +msgstr[0] "%i%% 已翻译,%i 个字串 (%s)" + +#: ../src/export_html.cpp:134 +#, c-format +msgid "" +"%i %% translated, %i strings (%i fuzzy, %i bad tokens, %i not translated)" +msgstr "%i%% 已翻译,%i 个字串 (%i 个模糊翻译,%i 个错误的标记,%i 个未翻译)" + +#: ../src/edframe.cpp:2014 +#, c-format +msgid "%i bad token" +msgid_plural "%i bad tokens" +msgstr[0] "%i 个错误的标记" + +#: ../src/edframe.cpp:2008 +#, c-format +msgid "%i fuzzy" +msgid_plural "%i fuzzy" +msgstr[0] "%i 个模糊翻译" + +#: ../src/catalog.cpp:132 +#, c-format +msgid "%i lines of file '%s' were not loaded correctly." +msgstr "%i 行在文件 '%s' 中未正确加载。" + +#: ../src/edframe.cpp:2020 +#, c-format +msgid "%i not translated" +msgid_plural "%i not translated" +msgstr[0] "%i 个未翻译" + +#: ../src/resources/menus.xrc:213 ../src/resources/menus.xrc:214 +msgid "&About" +msgstr "关于(&A)" + +#: ../src/resources/menus.xrc:212 +msgid "&About Poedit" +msgstr "关于 Poedit(&A)" + +#: ../src/resources/menus.xrc:107 +msgid "&Automatically Translate Using TM" +msgstr "使用 TM 自动翻译(&A)" + +#: ../src/resources/menus.xrc:106 +msgid "&Automatically translate using TM" +msgstr "使用 TM 自动翻译(&A)" + +#: ../src/edframe.cpp:2786 +msgid "&Bookmarks" +msgstr "书签(&B)" + +#: ../src/resources/manager.xrc:132 ../src/resources/menus.xrc:26 +msgid "&Close" +msgstr "关闭(&C)" + +#: ../src/resources/menus.xrc:176 +msgid "&Comment Window" +msgstr "注释窗å£(&C)" + +#: ../src/resources/menus.xrc:175 +msgid "&Comment window" +msgstr "注释窗å£(&C)" + +#: ../src/resources/menus.xrc:130 +msgid "&Done and Next" +msgstr "完æˆå¹¶è½¬åˆ°ä¸‹ä¸€ä¸ª(&D)" + +#: ../src/resources/menus.xrc:129 +msgid "&Done and next" +msgstr "完æˆå¹¶è½¬åˆ°ä¸‹ä¸€ä¸ª(&D)" + +#: ../src/resources/menus.xrc:56 +msgid "&Edit" +msgstr "编辑(&E)" + +#: ../src/edframe.cpp:471 ../src/resources/manager.xrc:125 +#: ../src/resources/menus.xrc:5 +msgid "&File" +msgstr "文件(&F)" + +#: ../src/resources/menus.xrc:73 +msgid "&Find..." +msgstr "查找(&F)..." + +#: ../src/edframe.cpp:480 ../src/resources/menus.xrc:126 +msgid "&Go" +msgstr "转到(&G)" + +#: ../src/edapp.cpp:183 ../src/resources/menus.xrc:216 +msgid "&Help" +msgstr "帮助(&H)" + +#: ../src/resources/menus.xrc:14 +msgid "&New Catalog..." +msgstr "新建编目(&N)..." + +#: ../src/resources/menus.xrc:13 +msgid "&New catalog..." +msgstr "新建编目(&N)..." + +#: ../src/resources/menus.xrc:142 +msgid "&Next Message" +msgstr "下一æ¡æ¶ˆæ¯(&N)" + +#: ../src/resources/menus.xrc:141 +msgid "&Next message" +msgstr "下一æ¡æ¶ˆæ¯(&N)" + +#: ../src/resources/menus.xrc:206 +msgid "&Online Help" +msgstr "在线帮助(&O)" + +#: ../src/resources/menus.xrc:205 +msgid "&Online help" +msgstr "在线帮助(&O)" + +#: ../src/resources/menus.xrc:21 +msgid "&Open..." +msgstr "打开(&O)..." + +#: ../src/resources/menus.xrc:90 +msgid "&Preferences" +msgstr "首选项(&P)" + +#: ../src/resources/manager.xrc:128 ../src/resources/menus.xrc:44 +msgid "&Preferences..." +msgstr "首选项(&P)..." + +#: ../src/resources/menus.xrc:137 +msgid "&Previous Message" +msgstr "上一æ¡æ¶ˆæ¯(&P)" + +#: ../src/resources/menus.xrc:136 +msgid "&Previous message" +msgstr "上一æ¡æ¶ˆæ¯(&P)" + +#: ../src/resources/menus.xrc:119 +msgid "&Properties..." +msgstr "属性(&P)..." + +#: ../src/resources/menus.xrc:111 +msgid "&Purge Deleted Translations" +msgstr "清除已删除的翻译(&P)" + +#: ../src/resources/menus.xrc:110 +msgid "&Purge deleted translations" +msgstr "清除已删除的翻译(&P)" + +#: ../src/resources/menus.xrc:30 +msgid "&Save" +msgstr "ä¿å­˜(&S)" + +#: ../src/resources/menus.xrc:70 +msgid "&Show References" +msgstr "显示引用(&S)" + +#: ../src/resources/menus.xrc:69 +msgid "&Show references" +msgstr "显示引用(&S)" + +#: ../src/resources/menus.xrc:198 +msgid "&Untranslated Entries First" +msgstr "未翻译æ¡ç›®ä¼˜å…ˆ(&U)" + +#: ../src/resources/menus.xrc:197 +msgid "&Untranslated entries first" +msgstr "未翻译æ¡ç›®ä¼˜å…ˆ(&U)" + +#: ../src/resources/menus.xrc:99 +msgid "&Update from Sources" +msgstr "从æºæ–‡æ›´æ–°(&U)" + +#: ../src/resources/menus.xrc:98 +msgid "&Update from sources" +msgstr "从æºæ–‡æ›´æ–°(&U)" + +#: ../src/resources/menus.xrc:115 +msgid "&Validate Translations" +msgstr "验è¯ç¿»è¯‘(&V)" + +#: ../src/resources/menus.xrc:114 +msgid "&Validate translations" +msgstr "验è¯ç¿»è¯‘(&V)" + +#: ../src/resources/menus.xrc:157 +msgid "&View" +msgstr "查看(&V)" + +#: ../src/catalog.cpp:1572 +#, c-format +msgid "'%s' is not a valid POT file." +msgstr "'%s' ä¸æ˜¯æœ‰æ•ˆçš„ POT 文件。" + +#: ../src/summarydlg.cpp:58 +#, c-format +msgid "(%i new, %i obsolete)" +msgstr "(%i 个新建,%i 个已废弃)" + +#: ../src/resources/summary.xrc:63 +msgid "(0 new, 0 obsolete)" +msgstr "(0 个新建,0 个已废弃)" + +#: ../src/chooselang.cpp:79 +msgid "(Use default language)" +msgstr "(使用默认语言)" + +#: ../src/edframe.cpp:916 +msgid "(none of these)" +msgstr "(这些都ä¸æ˜¯)" + +#: ../src/resources/find.xrc:94 +msgid "< Previous" +msgstr "< 上一个" + +#: ../src/manager.cpp:377 +msgid "" +msgstr "<未命å>" + +#. TRANSLATORS: This is titlebar of about dialog, the string ends with space +#. and is followed by application name when used ("Poedit", +#. but don't add it to this translation yourself) (wxWidgets 2.8) +#: ../src/edframe.cpp:2435 +msgid "About " +msgstr "关于 " + +#. TRANSLATORS: This is titlebar of about dialog, "%s" is application name +#. ("Poedit" here, but please use "%s") +#: ../src/edframe.cpp:2425 +#, c-format +msgid "About %s" +msgstr "关于 %s" + +#: ../src/resources/prefs.xrc:270 +msgid "Add" +msgstr "添加" + +#: ../src/resources/manager.xrc:91 +msgid "Add directory to the list" +msgstr "添加目录到列表" + +#: ../src/transmemupd_wizard.cpp:134 ../src/resources/tm_update.xrc:85 +msgid "Add files" +msgstr "添加文件" + +#: ../src/resources/prefs.xrc:589 +msgid "Add path to the list of directories where catalogs lie." +msgstr "将路径添加到编目所在目录的列表" + +#: ../src/resources/prefs.xrc:111 +msgid "Always change focus to text input field" +msgstr "总是将焦点更改到文本输入字段" + +#: ../src/resources/prefs.xrc:514 +msgid "An item in input files list:" +msgstr "在输入文件列表中的项:" + +#: ../src/resources/prefs.xrc:495 +msgid "An item in keywords list:" +msgstr "在关键字列表中的项:" + +#: ../src/edframe.cpp:2367 ../src/edframe.cpp:2379 +msgid "Automatic Translations:" +msgstr "自动翻译:" + +#: ../src/resources/prefs.xrc:128 +msgid "Automatic spellchecking" +msgstr "自动检查拼写" + +#: ../src/export_html.cpp:173 +msgid "Automatic translation" +msgstr "自动翻译" + +#: ../src/edframe.cpp:2365 ../src/edframe.cpp:2377 +msgid "Automatic translations:" +msgstr "自动翻译:" + +#: ../src/resources/prefs.xrc:70 +msgid "Automatically check for new version of Poedit" +msgstr "自动检查 Poedit 的新版本" + +#: ../src/resources/prefs.xrc:88 +msgid "Automatically compile .mo file on save" +msgstr "ä¿å­˜æ—¶è‡ªåŠ¨ç¼–译 .mo 文件" + +#: ../src/resources/prefs.xrc:342 +msgid "Automatically translate when updating catalog" +msgstr "当更新编目时自动翻译" + +#: ../src/edframe.cpp:2301 +#, c-format +msgid "Automatically translated %u strings" +msgstr "已自动翻译 %u 个字串" + +#: ../src/edframe.cpp:2286 +msgid "Automatically translating..." +msgstr "正在自动翻译..." + +#: ../src/manager.cpp:248 +msgid "Bad Tokens" +msgstr "错误的标记" + +#: ../src/resources/properties.xrc:136 +msgid "Base path:" +msgstr "基础路径:" + +#: ../src/resources/prefs.xrc:84 +msgid "Behavior" +msgstr "行为" + +#: ../src/catalog.cpp:680 +msgid "Broken catalog file: plural form msgstr used without msgid_plural" +msgstr "å·²æŸå的编目文件: 在没有 msgid_plural 的情况下使用了å¤æ•°å½¢å¼çš„ msgstr" + +#: ../src/catalog.cpp:642 +msgid "" +"Broken catalog file: singular form msgstr used together with msgid_plural" +msgstr "å·²æŸå的编目文件: å’Œ msgid_plural 一起使用了å•æ•°å½¢å¼çš„ msgstr" + +#: ../src/resources/manager.xrc:90 ../src/resources/prefs.xrc:588 +#: ../src/resources/tm_update.xrc:37 +msgid "Browse" +msgstr "æµè§ˆ" + +#: ../src/resources/menus.xrc:95 +msgid "C&atalog" +msgstr "ç¼–ç›®(&A)" + +#: ../src/resources/comment.xrc:45 +msgid "C&lear" +msgstr "清除(&L)" + +#: ../src/resources/prefs.xrc:141 +msgid "CR/LF conversion" +msgstr "CR/LF 转æ¢" + +#: ../src/resources/comment.xrc:38 ../src/resources/manager.xrc:113 +#: ../src/resources/prefs.xrc:424 ../src/resources/prefs.xrc:566 +#: ../src/resources/prefs.xrc:611 ../src/resources/progress.xrc:32 +#: ../src/resources/properties.xrc:198 +msgid "Cancel" +msgstr "å–消" + +#: ../src/transmem.cpp:732 +msgid "Cannot create TM database directory!" +msgstr "ä¸èƒ½åˆ›å»º TM æ•°æ®åº“目录!" + +#: ../src/utility.cpp:57 ../src/utility.cpp:67 +msgid "Cannot create temporary directory." +msgstr "ä¸èƒ½åˆ›å»ºä¸´æ—¶ç›®å½•ã€‚" + +#: ../src/gexecute.cpp:100 +#, c-format +msgid "Cannot execute program: %s" +msgstr "ä¸èƒ½æ‰§è¡Œç¨‹åº: %s" + +#: ../src/transmemupd.cpp:199 +msgid "Cannot extract catalogs from RPM file." +msgstr "ä¸èƒ½ä»Ž RPM 文件æå–编目。" + +#: ../src/resources/find.xrc:36 +msgid "Case sensitive" +msgstr "区分大å°å†™" + +#: ../src/manager.cpp:244 +msgid "Catalog" +msgstr "ç¼–ç›®" + +#: ../src/edframe.cpp:987 +msgid "Catalog modified. Do you want to save changes?" +msgstr "编目已修改。您想è¦ä¿å­˜æ›´æ”¹å—?" + +#: ../src/resources/properties.xrc:4 +msgid "Catalog properties" +msgstr "编目属性" + +#: ../src/resources/menus.xrc:9 +msgid "Catalogs &Manager" +msgstr "编目管ç†å™¨(&M)" + +#: ../src/resources/menus.xrc:8 +msgid "Catalogs &manager" +msgstr "编目管ç†å™¨(&M)" + +#: ../src/resources/prefs.xrc:63 +msgid "Change UI language" +msgstr "更改 UI 语言" + +#: ../src/export_html.cpp:119 ../src/resources/properties.xrc:74 +msgid "Charset:" +msgstr "字符集:" + +#: ../src/edframe.cpp:483 +msgid "Check for Updates..." +msgstr "检查更新..." + +#: ../src/resources/toolbar.xrc:39 +msgid "Check for errors in the translation" +msgstr "检查翻译中的错误" + +#: ../src/resources/prefs.xrc:204 ../src/resources/prefs.xrc:230 +msgid "Choose" +msgstr "选择" + +#: ../src/edframe.cpp:2339 ../src/resources/menus.xrc:64 +msgid "Clear Translation" +msgstr "清除翻译" + +#: ../src/resources/comment.xrc:46 +msgid "Clear the comment" +msgstr "清除注释" + +#: ../src/edframe.cpp:2337 ../src/resources/menus.xrc:63 +msgid "Clear translation" +msgstr "清除翻译" + +#: ../src/resources/find.xrc:87 +msgid "Close" +msgstr "关闭" + +#: ../src/resources/toolbar.xrc:57 +msgid "Comment" +msgstr "注释" + +#: ../src/resources/prefs.xrc:120 +msgid "Comment window is editable" +msgstr "注释窗å£å¯ç¼–辑" + +#: ../src/edframe.cpp:547 ../src/resources/comment.xrc:10 +msgid "Comment:" +msgstr "注释:" + +#: ../src/resources/prefs.xrc:292 +msgid "Configuration" +msgstr "é…ç½®" + +#: ../src/manager.cpp:407 ../src/manager.cpp:427 +msgid "Confirmation" +msgstr "确认" + +#: ../src/edframe.cpp:1812 +msgid "Context:" +msgstr "上下文:" + +#: ../src/edframe.cpp:2332 ../src/resources/menus.xrc:59 +msgid "Copy from Source Text" +msgstr "从æºæ–‡æ–‡æœ¬ä¸­å¤åˆ¶" + +#: ../src/edframe.cpp:2330 ../src/resources/menus.xrc:58 +msgid "Copy from source text" +msgstr "从æºæ–‡æ–‡æœ¬ä¸­å¤åˆ¶" + +#: ../src/catalog.cpp:1038 +#, c-format +msgid "Couldn't load file %s, it is probably corrupted." +msgstr "ä¸èƒ½åŠ è½½æ–‡ä»¶ %s,å¯èƒ½å·²æŸå。" + +#: ../src/catalog.cpp:1307 +#, c-format +msgid "Couldn't save file %s." +msgstr "ä¸èƒ½ä¿å­˜æ–‡ä»¶ %s。" + +#: ../src/resources/manager.xrc:44 +msgid "Create new translations project" +msgstr "创建新的翻译项目" + +#: ../src/resources/prefs.xrc:252 +msgid "Database" +msgstr "æ•°æ®åº“" + +#: ../src/resources/manager.xrc:53 ../src/resources/prefs.xrc:390 +msgid "Delete" +msgstr "删除" + +#: ../src/editlbox/editlbox.cpp:171 +msgid "Delete item" +msgstr "删除项" + +#: ../src/resources/manager.xrc:54 +msgid "Delete the project" +msgstr "删除项目" + +#: ../src/manager.cpp:300 +msgid "Directories:" +msgstr "目录:" + +#: ../src/resources/menus.xrc:165 +msgid "Display &Line Numbers" +msgstr "显示行å·(&L)" + +#: ../src/resources/menus.xrc:170 +msgid "Display &Notes for Translators" +msgstr "显示给译员的附注(&N)" + +#: ../src/resources/menus.xrc:160 +msgid "Display &Quotes" +msgstr "显示引å·(&Q)" + +#: ../src/resources/menus.xrc:164 +msgid "Display &line numbers" +msgstr "显示行å·(&L)" + +#: ../src/resources/menus.xrc:169 +msgid "Display ¬es for translators" +msgstr "显示给译员的附注(&N)" + +#: ../src/resources/menus.xrc:159 +msgid "Display "es" +msgstr "显示引å·(&Q)" + +#: ../src/manager.cpp:426 +msgid "" +"Do you really want to do mass update of\n" +"all catalogs in this project?" +msgstr "" +"您确实想è¦æ‰§è¡Œè¿™ä¸ªé¡¹ç›®ä¸­æ‰€æœ‰ç¼–目的\n" +"大é‡æ›´æ–°å—?" + +#: ../src/manager.cpp:406 +msgid "Do you want to delete the project?" +msgstr "您想è¦åˆ é™¤é¡¹ç›®å—?" + +#: ../src/edframe.cpp:2227 +msgid "Do you want to remove all translations that are no longer used?" +msgstr "您想è¦ç§»é™¤ä¸å†ä½¿ç”¨çš„所有翻译å—?" + +#: ../src/edframe.cpp:999 +msgid "Don't Save" +msgstr "ä¸ä¿å­˜" + +#: ../src/resources/prefs.xrc:170 +msgid "Don't change format of existing catalogs" +msgstr "ä¸æ›´æ”¹çŽ°æœ‰ç¼–目的格å¼" + +#: ../src/edframe.cpp:997 +msgid "Don't save" +msgstr "ä¸ä¿å­˜" + +#: ../src/attentionbar.cpp:195 +msgid "Don't show again" +msgstr "ä¸å†æ˜¾ç¤º" + +#: ../src/resources/manager.xrc:136 ../src/resources/menus.xrc:51 +msgid "E&xit" +msgstr "退出(&X)" + +#: ../src/resources/menus.xrc:39 +msgid "E&xport..." +msgstr "导出(&X)..." + +#: ../src/resources/manager.xrc:48 ../src/resources/prefs.xrc:383 +msgid "Edit" +msgstr "编辑" + +#: ../src/resources/menus.xrc:85 +msgid "Edit &Comment" +msgstr "编辑注释(&C)" + +#: ../src/resources/menus.xrc:84 +msgid "Edit &comment" +msgstr "编辑注释(&C)" + +#: ../src/edframe.cpp:2346 +msgid "Edit Comment" +msgstr "编辑注释" + +#: ../src/edframe.cpp:2344 ../src/resources/comment.xrc:4 +#: ../src/resources/toolbar.xrc:58 +msgid "Edit comment" +msgstr "编辑注释" + +#: ../src/editlbox/editlbox.cpp:169 +msgid "Edit item" +msgstr "编辑项" + +#: ../src/resources/manager.xrc:64 +msgid "Edit project" +msgstr "编辑项目" + +#: ../src/resources/manager.xrc:49 +msgid "Edit the project" +msgstr "编辑项目" + +#: ../src/resources/prefs.xrc:78 +msgid "Editor" +msgstr "编辑器" + +#: ../src/resources/prefs.xrc:129 +msgid "Enables on-the-fly spellchecking" +msgstr "å¯ç”¨è¿è¡Œä¸­æ‹¼å†™æ£€æŸ¥" + +#: ../src/edframe.cpp:1309 +msgid "Entries in the catalog are probably incorrect." +msgstr "编目中的æ¡ç›®å¯èƒ½ä¸æ­£ç¡®ã€‚" + +#: ../src/edframe.cpp:1911 +msgid "" +"Entries in this catalog have different plural forms count from what " +"catalog's Plural-Forms header says" +msgstr "这个编目中的æ¡ç›®çš„å¤æ•°å½¢å¼æ•°é‡ä¸Žç¼–目的 Plural-Forms 头所说明的ä¸åŒ" + +#: ../src/edframe.cpp:1376 +msgid "" +"Entries with errors were marked in red in the list. Details of the error " +"will be shown when you select such an entry." +msgstr "" +"有错误的æ¡ç›®åœ¨åˆ—表中被标记为红色。当您选择这样的æ¡ç›®æ—¶å°†æ˜¾ç¤ºé”™è¯¯çš„详细信æ¯ã€‚" + +#: ../src/edframe.cpp:1960 +#, c-format +msgid "Error loading message catalog file '%s'." +msgstr "加载消æ¯ç¼–目文件 '%s' 时存在错误。" + +#: ../src/fileviewer.cpp:226 +#, c-format +msgid "Error opening file %s!" +msgstr "打开文件 %s 时存在错误!" + +#: ../src/catalog.cpp:1354 +msgid "Error saving catalog" +msgstr "ä¿å­˜ç¼–目时存在错误" + +#: ../src/errorbar.cpp:60 +msgid "Error:" +msgstr "错误:" + +#: ../src/edframe.cpp:1138 +msgid "Export as..." +msgstr "导出为..." + +#: ../src/resources/properties.xrc:127 +msgid "Extract text from source files in the following directories:" +msgstr "从下列目录中的æºæ–‡ä»¶æå–文本:" + +#: ../src/digger.cpp:60 +#, c-format +msgid "Failed command: %s" +msgstr "失败的命令: %s" + +#: ../src/digger.cpp:114 +msgid "Failed to load extracted catalog." +msgstr "未能加载已æå–的编目。" + +#: ../src/digger.cpp:61 +msgid "Failed to merge gettext catalogs." +msgstr "未能åˆå¹¶ gettext 编目。" + +#: ../src/edframe.cpp:393 ../src/edframe.cpp:1061 +#, c-format +msgid "File '%s' doesn't exist." +msgstr "文件 '%s' ä¸å­˜åœ¨ã€‚" + +#: ../src/edframe.cpp:386 +#, c-format +msgid "File '%s' is not a message catalog." +msgstr "文件 '%s' ä¸æ˜¯æ¶ˆæ¯ç¼–目。" + +#: ../src/catalog.cpp:1268 +#, c-format +msgid "" +"File '%s' is read-only and cannot be saved.\n" +"Please save it under different name." +msgstr "" +"文件 '%s' 为åªè¯»ï¼Œä¸èƒ½ä¿å­˜ã€‚\n" +"请用ä¸åŒçš„å称ä¿å­˜ã€‚" + +#: ../src/transmemupd_wizard.cpp:59 +msgid "Files List" +msgstr "文件列表" + +#: ../src/resources/find.xrc:78 +msgid "Find in automatic comments" +msgstr "在自动注释中查找" + +#: ../src/resources/find.xrc:71 +msgid "Find in comments" +msgstr "在注释中查找" + +#: ../src/resources/find.xrc:57 +msgid "Find in original strings" +msgstr "在原始字串中查找" + +#: ../src/resources/find.xrc:64 +msgid "Find in translations" +msgstr "在翻译中查找" + +#: ../src/resources/find.xrc:4 +msgid "Find..." +msgstr "查找..." + +#: ../src/edframe.cpp:1941 +msgid "Fix the header" +msgstr "修补头" + +#: ../src/resources/prefs.xrc:181 +msgid "Fonts" +msgstr "字体" + +#: ../src/edframe.cpp:2719 +#, c-format +msgid "Form %i" +msgstr "å½¢å¼ %i" + +#: ../src/edframe.cpp:2721 +#, c-format +msgid "Form %i (e.g. \"%u\")" +msgstr "å½¢å¼ %i (例如 \"%u\")" + +#: ../src/manager.cpp:247 ../src/resources/toolbar.xrc:51 +msgid "Fuzzy" +msgstr "模糊" + +#: ../src/export_html.cpp:179 +msgid "Fuzzy translation" +msgstr "模糊翻译" + +#: ../src/edframe.cpp:1043 ../src/edframe.cpp:1102 +msgid "GNU gettext catalogs (*.po)|*.po|All files (*.*)|*.*" +msgstr "GNU gettext ç¼–ç›®(*.po)|*.po|所有文件(*.*)|*.*" + +#: ../src/edframe.cpp:1176 ../src/edframe.cpp:1327 +msgid "GNU gettext templates (*.pot)|*.pot|All files (*.*)|*.*" +msgstr "GNU gettext 模æ¿(*.pot)|*.pot|所有文件(*.*)|*.*" + +#: ../src/resources/prefs.xrc:621 +msgid "Generate TM database" +msgstr "ç”Ÿæˆ TM æ•°æ®åº“" + +#: ../src/resources/prefs.xrc:278 +msgid "Generate database" +msgstr "生æˆæ•°æ®åº“" + +#: ../src/edframe.cpp:2798 +#, c-format +msgid "Go to Bookmark %i\tCtrl+%i" +msgstr "转到书签 %i\tCtrl+%i" + +#: ../src/edframe.cpp:2792 +#, c-format +msgid "Go to Bookmark %i\tCtrl+Alt+%i" +msgstr "转到书签 %i\tCtrl+Alt+%i" + +#: ../src/edframe.cpp:2795 +#, c-format +msgid "Go to bookmark %i\tCtrl+%i" +msgstr "转到书签 %i\tCtrl+%i" + +#: ../src/edframe.cpp:1140 +msgid "HTML file (*.html)|*.html" +msgstr "HTML 文件(*.html)|*.html" + +#: ../src/attentionbar.cpp:78 +msgid "Hide this notification message" +msgstr "éšè—这个通知消æ¯" + +#: ../src/resources/prefs.xrc:18 +msgid "Identity" +msgstr "身份" + +#: ../src/resources/prefs.xrc:121 +msgid "If checked, the comment window will be editable." +msgstr "如果选中,则注释窗å£å°†å¯ç¼–辑。" + +#: ../src/edframe.cpp:2229 +msgid "" +"If you continue with purging, all translations marked as deleted will be " +"permanently removed. You will have to translate them again if they are added " +"back in the future." +msgstr "" +"如果您继续清除,则所有被标记为已删除的翻译都将被永久移除。如果将æ¥å®ƒä»¬è¢«æ·»åŠ " +"回æ¥ï¼Œåˆ™æ‚¨å¿…é¡»å†ç¿»è¯‘一é。" + +#: ../src/resources/prefs.xrc:472 +msgid "Invocation:" +msgstr "调用:" + +#: ../src/edframe.cpp:2234 ../src/transmem.cpp:1202 +msgid "Keep" +msgstr "ä¿æŒ" + +#: ../src/propertiesdlg.cpp:58 +msgid "Keywords" +msgstr "关键字" + +#: ../src/chooselang.cpp:159 +msgid "Language selection" +msgstr "语言选择" + +#: ../src/export_html.cpp:108 ../src/resources/prefs.xrc:440 +#: ../src/resources/prefs.xrc:444 ../src/resources/properties.xrc:62 +msgid "Language:" +msgstr "语言:" + +#: ../src/manager.cpp:249 +msgid "Last modified" +msgstr "最åŽä¿®æ”¹" + +#: ../src/resources/properties.xrc:111 +msgid "Learn about plural forms" +msgstr "学习å¤æ•°å½¢å¼" + +#: ../src/edframe.cpp:889 +msgid "Learn more" +msgstr "学习更多" + +#: ../src/edlistctrl.cpp:328 +msgid "Line" +msgstr "è¡Œ" + +#: ../src/catalog.cpp:145 +#, c-format +msgid "Line %u of file '%s' is corrupted (not valid %s data)." +msgstr "è¡Œ %u 在文件 '%s' 中已æŸå(ä¸æ˜¯æœ‰æ•ˆçš„ %s æ•°æ®)。" + +#: ../src/resources/prefs.xrc:149 +msgid "Line endings format:" +msgstr "行结æŸæ ¼å¼:" + +#: ../src/resources/prefs.xrc:456 +msgid "List of extensions separated by semicolons (e.g. *.cpp;*.h):" +msgstr "用分å·åˆ†éš”的扩展å列表(例如 *.cpp;*.h):" + +#: ../src/catalog.cpp:220 +#, c-format +msgid "Malformed header: '%s'" +msgstr "错误的头: '%s'" + +#: ../src/resources/prefs.xrc:300 +msgid "Max. # of missing words:" +msgstr "缺少å•è¯çš„最大数é‡:" + +#: ../src/resources/prefs.xrc:322 +msgid "Max. difference in sentence length:" +msgstr "å¥å­é•¿åº¦çš„最大差异:" + +#: ../src/catalog.cpp:1543 +msgid "Merging differences..." +msgstr "åˆå¹¶å·®å¼‚..." + +#: ../src/editlbox/editlbox.cpp:173 +msgid "Move down" +msgstr "下移" + +#: ../src/editlbox/editlbox.cpp:172 +msgid "Move up" +msgstr "上移" + +#: ../src/prefsdlg.cpp:55 +msgid "My Languages" +msgstr "我的语言" + +#: ../src/resources/menus.xrc:152 +msgid "Ne&xt Unfinished" +msgstr "下一个未完æˆ(&X)" + +#: ../src/resources/menus.xrc:151 +msgid "Ne&xt unfinished" +msgstr "下一个未完æˆ(&X)" + +#: ../src/resources/prefs.xrc:113 +msgid "" +"Never let the list of strings take focus. If enabled, you must use Ctrl-" +"arrows for keyboard navigation but you can also type text immediately, " +"without having to press Tab to change focus." +msgstr "" +"从ä¸è®©å­—串列表å–得焦点。如果å¯ç”¨ï¼Œæ‚¨å¿…须使用 “Ctrl-æ–¹å‘键†进行键盘导航,但您" +"也å¯ä»¥ç«‹å³è¾“入文本,ä¸ç”¨æŒ‰ Tab 改å˜ç„¦ç‚¹ã€‚" + +#: ../src/resources/manager.xrc:43 ../src/resources/prefs.xrc:378 +msgid "New" +msgstr "新建" + +#: ../src/resources/menus.xrc:18 +msgid "New Catalog from POT File..." +msgstr "从 POT 文件新建编目..." + +#: ../src/resources/menus.xrc:17 +msgid "New catalog from POT file..." +msgstr "从 POT 文件新建编目..." + +#: ../src/editlbox/editlbox.cpp:170 +msgid "New item" +msgstr "新建项" + +#: ../src/resources/summary.xrc:30 +msgid "New strings" +msgstr "新建字串" + +#: ../src/resources/find.xrc:102 +msgid "Next >" +msgstr "下一个 >" + +#: ../src/digger.cpp:201 +msgid "No files found in: " +msgstr "在此ä½ç½®ä¸­æœªæ‰¾åˆ°æ–‡ä»¶: " + +#: ../src/edframe.cpp:1391 +msgid "No problems with the translation found." +msgstr "翻译中未å‘现问题。" + +#: ../src/edframe.cpp:1433 +msgid "No references to this string found." +msgstr "未找到对这个字串的引用。" + +#: ../src/export_html.cpp:151 +msgid "Notes" +msgstr "附注" + +#: ../src/edframe.cpp:550 +msgid "Notes for translators:" +msgstr "给译员的附注:" + +#: ../src/resources/comment.xrc:30 ../src/resources/manager.xrc:105 +#: ../src/resources/prefs.xrc:416 ../src/resources/prefs.xrc:558 +#: ../src/resources/properties.xrc:190 ../src/resources/summary.xrc:71 +msgid "OK" +msgstr "确定" + +#: ../src/resources/summary.xrc:51 +msgid "Obsolete strings" +msgstr "已废弃的字串" + +#: ../src/resources/toolbar.xrc:15 ../src/resources/toolbar.xrc:20 +msgid "Open" +msgstr "打开" + +#: ../src/edframe.cpp:1041 ../src/resources/toolbar.xrc:16 +#: ../src/resources/toolbar.xrc:21 +msgid "Open catalog" +msgstr "打开编目" + +#: ../src/edframe.cpp:1174 ../src/edframe.cpp:1325 +msgid "Open catalog template" +msgstr "打开编目模æ¿" + +#: ../src/resources/prefs.xrc:104 +msgid "Open catalogs manager on Poedit startup" +msgstr "在 Poedit å¯åŠ¨æ—¶æ‰“开编目管ç†å™¨" + +#: ../src/resources/menus.xrc:147 +msgid "P&revious Unfinished" +msgstr "上一个未完æˆ(&R)" + +#: ../src/resources/menus.xrc:146 +msgid "P&revious unfinished" +msgstr "上一个未完æˆ(&R)" + +#: ../src/resources/prefs.xrc:476 +msgid "Parser command:" +msgstr "分æžå™¨å‘½ä»¤:" + +#: ../src/resources/prefs.xrc:435 +msgid "Parser setup" +msgstr "分æžå™¨è®¾ç½®" + +#: ../src/resources/prefs.xrc:353 +msgid "Parsers" +msgstr "分æžå™¨" + +#: ../src/digger.cpp:92 +#, c-format +msgid "Parsing %s files..." +msgstr "æ­£åœ¨åˆ†æž %s 文件..." + +#: ../src/propertiesdlg.cpp:60 +msgid "Paths" +msgstr "路径" + +#: ../src/resources/prefs.xrc:12 +msgid "Personalize" +msgstr "个人化" + +#: ../src/resources/prefs.xrc:271 +msgid "Pick language from the list of known languages" +msgstr "从已知语言的列表中挑选语言" + +#: ../src/resources/tm_update.xrc:21 +msgid "Please add directories where locale files are stored on your system:" +msgstr "请添加地区(locale)文件在您的系统上存储的目录:" + +#: ../src/edframe.cpp:1441 +msgid "Please choose the reference you want to show:" +msgstr "请选择您想è¦æ˜¾ç¤ºçš„引用:" + +#: ../src/prefsdlg.cpp:334 +msgid "Please select language ISO code:" +msgstr "请选择语言 ISO 代ç :" + +#: ../src/edframe.cpp:925 +msgid "Please select language code:" +msgstr "请选择语言代ç :" + +#: ../src/transmem.cpp:1196 +#, c-format +msgid "" +"Please verify that all files were moved to the new location or do it " +"manually if they weren't.\n" +"\n" +"Old location: %s\n" +"New location: %s" +msgstr "" +"请核实所有的文件都被移动到新ä½ç½®ï¼Œå¦‚果没有,则手动执行。\n" +"\n" +"æ—§ä½ç½®: %s\n" +"æ–°ä½ç½®: %s" + +#: ../src/resources/properties.xrc:98 +msgid "Plural Forms:" +msgstr "å¤æ•°å½¢å¼:" + +#: ../src/edframe.cpp:564 +msgid "Plural:" +msgstr "å¤æ•°:" + +#: ../src/edframe.cpp:410 +msgid "Poedit" +msgstr "Poedit" + +#: ../src/manager.cpp:69 +msgid "Poedit - Catalogs manager" +msgstr "Poedit - 编目管ç†å™¨" + +#: ../src/digger.cpp:202 +msgid "Poedit did not find any files in scanned directories." +msgstr "Poedit 在已扫æ的目录中未找到任何文件。" + +#: ../src/edframe.cpp:2443 +msgid "Poedit is an easy to use translations editor." +msgstr "Poedit 是一个易于使用的翻译编辑器。" + +#: ../src/transmem.cpp:1191 +msgid "Poedit translation memory error" +msgstr "Poedit 翻译记忆错误" + +#: ../src/resources/prefs.xrc:4 +msgid "Preferences" +msgstr "首选项" + +#: ../src/resources/prefs.xrc:603 +msgid "Proceed" +msgstr "继续进行" + +#: ../src/export_html.cpp:101 +msgid "Project info" +msgstr "项目信æ¯" + +#: ../src/export_html.cpp:105 ../src/resources/properties.xrc:19 +msgid "Project name and version:" +msgstr "项目å称和版本:" + +#: ../src/resources/manager.xrc:69 +msgid "Project name:" +msgstr "项目å称:" + +#: ../src/edframe.cpp:2234 ../src/transmem.cpp:1202 +msgid "Purge" +msgstr "清除" + +#: ../src/edframe.cpp:2225 +msgid "Purge deleted translations" +msgstr "清除已删除的翻译" + +#: ../src/resources/manager.xrc:137 ../src/resources/menus.xrc:52 +msgid "Quit" +msgstr "退出" + +#: ../src/edframe.cpp:1441 +msgid "References" +msgstr "引用" + +#: ../src/edframe.cpp:2401 ../src/edframe.cpp:2405 +msgid "References:" +msgstr "引用:" + +#: ../src/resources/prefs.xrc:279 +msgid "Regenerate translation memory from catalogs in paths listed above." +msgstr "从上é¢åˆ—出的路径中的编目é‡æ–°ç”Ÿæˆç¿»è¯‘记忆。" + +#: ../src/edframe.cpp:1922 +msgid "Required header Plural-Forms is missing." +msgstr "缺少必需的头 Plural-Forms。" + +#: ../src/resources/tm_update.xrc:42 +msgid "Reset to defaults" +msgstr "é‡ç½®ä¸ºé»˜è®¤å€¼" + +#: ../src/edframe.cpp:995 ../src/resources/toolbar.xrc:25 +#: ../src/resources/toolbar.xrc:30 +msgid "Save" +msgstr "ä¿å­˜" + +#: ../src/resources/menus.xrc:35 +msgid "Save &As..." +msgstr "å¦å­˜ä¸º(&A)..." + +#: ../src/resources/menus.xrc:34 +msgid "Save &as..." +msgstr "å¦å­˜ä¸º(&A)..." + +#: ../src/edframe.cpp:1101 +msgid "Save as..." +msgstr "å¦å­˜ä¸º..." + +#: ../src/resources/toolbar.xrc:26 ../src/resources/toolbar.xrc:31 +msgid "Save catalog" +msgstr "ä¿å­˜ç¼–ç›®" + +#: ../src/edframe.cpp:988 +msgid "Save changes" +msgstr "ä¿å­˜æ›´æ”¹" + +#: ../src/transmemupd.cpp:149 +msgid "Scanning file: " +msgstr "正在扫æ文件: " + +#: ../src/digger.cpp:77 +msgid "Scanning files..." +msgstr "正在扫æ文件..." + +#: ../src/transmemupd_wizard.cpp:55 +msgid "Search Paths" +msgstr "æœç´¢è·¯å¾„" + +#: ../src/edframe.cpp:924 +msgid "Select catalog's language" +msgstr "选择编目的语言" + +#: ../src/manager.cpp:280 ../src/transmemupd_wizard.cpp:108 +msgid "Select directory" +msgstr "选择目录" + +#: ../src/prefsdlg.cpp:333 +msgid "Select language" +msgstr "选择语言" + +#: ../src/chooselang.cpp:158 +msgid "Select your prefered language" +msgstr "选择您的首选语言" + +#: ../src/edframe.cpp:2797 +#, c-format +msgid "Set Bookmark %i\tAlt+%i" +msgstr "设置书签 %i\tAlt+%i" + +#: ../src/edframe.cpp:2791 +#, c-format +msgid "Set Bookmark %i\tCtrl+%i" +msgstr "设置书签 %i\tCtrl+%i" + +#: ../src/edframe.cpp:2794 +#, c-format +msgid "Set bookmark %i\tAlt+%i" +msgstr "设置书签 %i\tAlt+%i" + +#: ../src/edframe.cpp:1893 +msgid "Set email" +msgstr "设置电å­é‚®ä»¶" + +#: ../src/resources/prefs.xrc:97 +msgid "Show summary after catalog update" +msgstr "在编目更新之åŽæ˜¾ç¤ºæ‘˜è¦" + +#: ../src/edframe.cpp:562 +msgid "Singular:" +msgstr "å•æ•°:" + +#: ../src/resources/menus.xrc:182 +msgid "Sort by &File Order" +msgstr "按文件顺åºæŽ’åº(&F)" + +#: ../src/resources/menus.xrc:187 +msgid "Sort by &Source" +msgstr "按æºæ–‡æŽ’åº(&S)" + +#: ../src/resources/menus.xrc:192 +msgid "Sort by &Translation" +msgstr "按翻译排åº(&T)" + +#: ../src/resources/menus.xrc:181 +msgid "Sort by &file order" +msgstr "按文件顺åºæŽ’åº(&F)" + +#: ../src/resources/menus.xrc:186 +msgid "Sort by &source" +msgstr "按æºæ–‡æŽ’åº(&S)" + +#: ../src/resources/menus.xrc:191 +msgid "Sort by &translation" +msgstr "按翻译排åº(&T)" + +#: ../src/export_html.cpp:145 +msgid "Source" +msgstr "æºæ–‡" + +#: ../src/resources/prefs.xrc:533 ../src/resources/properties.xrc:86 +msgid "Source code charset:" +msgstr "æºä»£ç å­—符集:" + +#: ../src/resources/prefs.xrc:359 +msgid "Source code parsers:" +msgstr "æºä»£ç åˆ†æžå™¨:" + +#: ../src/fileviewer.cpp:46 +msgid "Source file" +msgstr "æºæ–‡ä»¶" + +#: ../src/fileviewer.cpp:61 +msgid "Source file occurrence:" +msgstr "æºæ–‡ä»¶å‡ºçŽ°:" + +#: ../src/edlistctrl.cpp:325 +msgid "Source text" +msgstr "æºæ–‡æ–‡æœ¬" + +#: ../src/edframe.cpp:536 +msgid "Source text:" +msgstr "æºæ–‡æ–‡æœ¬:" + +#: ../src/resources/properties.xrc:179 +msgid "Sources keywords" +msgstr "æºå…³é”®å­—" + +#: ../src/resources/properties.xrc:158 +msgid "Sources paths" +msgstr "æºè·¯å¾„" + +#. TRANSLATORS: %s is language name in its basic form (as you +#. would see e.g. in a list of supported languages). +#: ../src/edframe.cpp:885 +#, c-format +msgid "Spellchecker dictionary for %s isn't available, you need to install it." +msgstr "ä¾› %s 使用的拼写检查器字典ä¸å¯ç”¨ï¼Œæ‚¨éœ€è¦å®‰è£…。" + +#: ../src/resources/find.xrc:43 +msgid "Start from the first item" +msgstr "从第一项开始" + +#: ../src/resources/find.xrc:21 +msgid "String to find:" +msgstr "è¦æŸ¥æ‰¾çš„字串:" + +#: ../src/edframe.cpp:1927 +#, c-format +msgid "Syntax error in Plural-Forms header (\"%s\")." +msgstr "在 Plural-Forms 头中存在语法错误(\"%s\")。" + +#: ../src/export_html.cpp:115 ../src/resources/properties.xrc:48 +msgid "Team's email address:" +msgstr "团队的电å­é‚®ä»¶åœ°å€:" + +#: ../src/export_html.cpp:111 ../src/resources/properties.xrc:34 +msgid "Team:" +msgstr "团队:" + +#: ../src/catalog.cpp:1353 +#, c-format +msgid "" +"The catalog couldn't be saved in '%s' charset as\n" +"specified in catalog settings. It was saved in UTF-8 instead\n" +"and the setting was modified accordingly." +msgstr "" +"ä¸èƒ½æŒ‰åœ¨ç¼–目设置中所指定的将编目ä¿å­˜ä¸º '%s' 字符集。\n" +"改为将其ä¿å­˜ä¸º UTF-8,\n" +"设置也相应地被修改。" + +#: ../src/edframe.cpp:1380 +msgid "" +"The file was saved safely, but it cannot be compiled into the MO format and " +"used." +msgstr "文件已安全地ä¿å­˜ï¼Œä½†å®ƒä¸èƒ½è¢«ç¼–è¯‘æˆ MO æ ¼å¼å¹¶ä½¿ç”¨ã€‚" + +#: ../src/edframe.cpp:1396 +msgid "The translation is ready for use." +msgstr "翻译为使用准备就绪。" + +#: ../src/catalog.cpp:1311 +msgid "" +"There was a problem formatting the file nicely (but it was saved all right)." +msgstr "在精确格å¼åŒ–文件时有一个问题(但文件ä¿å­˜æ­£ç¡®)。" + +#: ../src/transmem.cpp:1193 +msgid "There was a problem moving your translation memory." +msgstr "在移动您的翻译记忆时有一个问题。" + +#: ../src/catalog.cpp:1030 +msgid "" +"There were errors when loading the catalog. Some data may be missing or " +"corrupted as the result." +msgstr "当加载编目时有错误。因此有些数æ®å¯èƒ½ç¼ºå°‘或æŸå。" + +#: ../src/resources/summary.xrc:38 +msgid "" +"These strings are no longer in the sources.\n" +"Poedit will remove them from the catalog now." +msgstr "" +"æºæ–‡ä»¶ä¸­ä¸å†æœ‰è¿™äº›å­—串。\n" +"Poedit 现在将从编目中移除这些字串。" + +#: ../src/resources/summary.xrc:17 +msgid "" +"These strings were found in the sources but were not in the catalog.\n" +"Poedit will add them to the catalog now." +msgstr "" +"在æºæ–‡ä¸­æ‰¾åˆ°è¿™äº›å­—串,但编目中没有。\n" +"Poedit 现在将把这些字串添加到编目。" + +#: ../src/edframe.cpp:1907 +msgid "" +"This catalog has entries with plural forms, but doesn't have Plural-Forms " +"header configured." +msgstr "这个编目有带有å¤æ•°å½¢å¼çš„æ¡ç›®ï¼Œä½†æ²¡æœ‰å·²é…置的 Plural-Forms 头。" + +#: ../src/resources/prefs.xrc:488 +msgid "" +"This is the command used to launch the parser.\n" +"%o expands to the name of output file, %K to list\n" +"of keywords, %F to list of input files,\n" +"%C to charset flag (see below)." +msgstr "" +"这是用æ¥å¯åŠ¨åˆ†æžå™¨çš„命令。\n" +"%o 展开æˆè¾“出文件的å称,%K 展开æˆå…³é”®å­—的列表,\n" +"%F 展开æˆè¾“入文件的列表,%C 展开æˆå­—符集标记(è§ä¸‹é¢çš„设置)。" + +#: ../src/resources/prefs.xrc:545 +#, c-format +msgid "" +"This will be attached to the command line\n" +"only if source codecharset was given. %c expands to charset value." +msgstr "" +"仅在指定了æºä»£ç å­—符集时,这æ‰è¢«é™„加到命令行。\n" +"%c 展开æˆå­—符集值。" + +#: ../src/resources/prefs.xrc:526 +#, c-format +msgid "" +"This will be attached to the command line once\n" +"for each input file. %f expands to the filename." +msgstr "" +"对于æ¯ä¸ªè¾“入文件,这将被附加到命令行一次。\n" +"%f 展开æˆæ–‡ä»¶å。" + +#: ../src/resources/prefs.xrc:507 +msgid "" +"This will be attached to the command line once\n" +"for each keyword. %k expands to the keyword." +msgstr "" +"对于æ¯ä¸ªå…³é”®å­—,这将被附加到命令行一次。\n" +"%k 展开æˆå…³é”®å­—。" + +#: ../src/resources/toolbar.xrc:52 +msgid "Toggled if selected string has fuzzy translation" +msgstr "切æ¢æ˜¯å¦é€‰æ‹©çš„字串有模糊翻译" + +#: ../src/manager.cpp:245 +msgid "Total" +msgstr "总计" + +#: ../src/edlistctrl.cpp:326 ../src/export_html.cpp:148 +msgid "Translation" +msgstr "翻译" + +#: ../src/resources/menus.xrc:79 +msgid "Translation Is &Fuzzy" +msgstr "翻译是模糊翻译(&F)" + +#: ../src/resources/prefs.xrc:245 +msgid "Translation Memory" +msgstr "翻译记忆" + +#: ../src/transmemupd_wizard.cpp:140 +msgid "Translation files (*.po;*.mo)|*.po;*.mo" +msgstr "翻译文件(*.po;*.mo)|*.po;*.mo" + +#: ../src/transmemupd_wizard.cpp:138 +msgid "Translation files (*.po;*.mo;*.rpm)|*.po;*.mo;*.rpm" +msgstr "翻译文件(*.po;*.mo;*.rpm)|*.po;*.mo;*.rpm" + +#: ../src/resources/menus.xrc:78 +msgid "Translation is &fuzzy" +msgstr "翻译是模糊翻译(&F)" + +#: ../src/transmem.cpp:661 +#, c-format +msgid "Translation memory database error: %s" +msgstr "翻译记忆数æ®åº“错误: %s" + +#: ../src/resources/tm_update.xrc:68 +msgid "" +"Translation memory will be built from the files listed below.\n" +"You can add more files to the list now." +msgstr "" +"翻译记忆将从下é¢åˆ—出的文件构建。\n" +"您现在å¯ä»¥å°†æ›´å¤šæ–‡ä»¶æ·»åŠ åˆ°åˆ—表。" + +#: ../src/resources/properties.xrc:119 +msgid "Translation properties" +msgstr "翻译属性" + +#: ../src/edframe.cpp:544 +msgid "Translation:" +msgstr "翻译:" + +#: ../src/propertiesdlg.cpp:73 +msgid "UTF-8 (recommended)" +msgstr "UTF-8 (推è)" + +#: ../src/resources/summary.xrc:79 +msgid "Undo" +msgstr "撤销" + +#: ../src/resources/prefs.xrc:157 +msgid "Unix (recommended)" +msgstr "Unix (推è)" + +#: ../src/chooselang.cpp:60 +#, c-format +msgid "Unknown locale code '%s' in registry." +msgstr "注册表中存在未知的地区(locale)ä»£ç  '%s'。" + +#: ../src/manager.cpp:246 +msgid "Untrans" +msgstr "未翻译" + +#: ../src/resources/toolbar.xrc:44 +msgid "Update" +msgstr "æ›´æ–°" + +#: ../src/resources/manager.xrc:59 +msgid "Update all" +msgstr "更新全部" + +#: ../src/resources/manager.xrc:60 +msgid "Update all catalogs in the project" +msgstr "更新项目中的所有编目" + +#: ../src/resources/toolbar.xrc:45 +msgid "Update catalog - synchronize it with sources" +msgstr "æ›´æ–°ç¼–ç›® - 将其与æºæ–‡åŒæ­¥" + +#: ../src/resources/menus.xrc:103 +msgid "Update from &POT File..." +msgstr "从 POT 文件更新(&P)..." + +#: ../src/resources/menus.xrc:102 +msgid "Update from &POT file..." +msgstr "从 POT 文件更新(&P)..." + +#: ../src/resources/summary.xrc:4 +msgid "Update summary" +msgstr "更新摘è¦" + +#: ../src/resources/tm_update.xrc:4 +msgid "Update translation memory" +msgstr "更新翻译记忆" + +#: ../src/edframe.cpp:1297 ../src/manager.cpp:435 +msgid "Updating catalog" +msgstr "æ›´æ–°ç¼–ç›®" + +#: ../src/edframe.cpp:1311 +msgid "Updating the catalog failed. Click on 'Details >>' for details." +msgstr "更新编目失败。å•å‡» '详细资料 >>' 了解详细信æ¯ã€‚" + +#: ../src/transmemupd_wizard.cpp:194 +msgid "Updating translation memory" +msgstr "更新翻译记忆" + +#: ../src/resources/prefs.xrc:213 +msgid "Use custom font for text fields" +msgstr "为文本字段使用自定义字体" + +#: ../src/resources/prefs.xrc:187 +msgid "Use custom font for translations list" +msgstr "为翻译列表使用自定义字体" + +#: ../src/resources/properties.xrc:166 +msgid "" +"Use these keywords (function names) to recognize translatable strings\n" +"in source files:" +msgstr "使用这些关键字(函数å)æ¥è¯†åˆ«æºæ–‡ä»¶ä¸­çš„å¯ç¿»è¯‘字串:" + +#: ../src/resources/toolbar.xrc:38 +msgid "Validate" +msgstr "验è¯" + +#: ../src/edframe.cpp:1372 ../src/edframe.cpp:1392 +msgid "Validation results" +msgstr "验è¯ç»“æžœ" + +#. TRANSLATORS: This is version information in about dialog, "%s" will be +#. version number when used +#: ../src/edframe.cpp:2428 +#, c-format +msgid "Version %s" +msgstr "版本 %s" + +#: ../src/resources/prefs.xrc:255 +msgid "What languages do you want to use the TM with?" +msgstr "您希望 TM 使用什么语言?" + +#: ../src/resources/find.xrc:50 +msgid "Whole words only" +msgstr "仅整个å•è¯" + +#: ../src/resources/prefs.xrc:158 +msgid "Windows" +msgstr "Windows" + +#: ../src/edframe.cpp:378 +msgid "You can't drop more than one file on Poedit window." +msgstr "您ä¸èƒ½åœ¨ Poedit 窗å£ä¸Šæ”¾ä¸‹ä¸€ä¸ªä»¥ä¸Šçš„文件。" + +#: ../src/chooselang.cpp:173 +msgid "You must restart Poedit for this change to take effect." +msgstr "您必须é‡æ–°å¯åŠ¨ Poedit æ‰èƒ½ä½¿è¿™ä¸ªæ›´æ”¹ç”Ÿæ•ˆã€‚" + +#: ../src/edframe.cpp:1891 +msgid "" +"You should set your email address in Preferences so that it can be used for " +"Last-Translator header in GNU gettext files." +msgstr "" +"您应该在 “首选项†中设置您的电å­é‚®ä»¶åœ°å€ï¼Œä»¥ä¾¿å®ƒå¯ä»¥ä¾›åœ¨ GNU gettext 文件中" +"çš„ Last-Translator 头使用。" + +#: ../src/edframe.cpp:992 +msgid "Your changes will be lost if you don't save them." +msgstr "如果您ä¸ä¿å­˜ï¼Œåˆ™æ‚¨çš„更改将丢失。" + +#: ../src/resources/prefs.xrc:44 +msgid "Your email address:" +msgstr "您的电å­é‚®ä»¶åœ°å€:" + +#: ../src/resources/prefs.xrc:22 +msgid "" +"Your name and email set below are only used\n" +"to set the Last-Translator header of GNU gettext files." +msgstr "" +"下é¢è®¾ç½®çš„您的å字和电å­é‚®ä»¶ä»…用于设置\n" +" GNU gettext 文件的 Last-Translator 头。" + +#: ../src/resources/prefs.xrc:30 +msgid "Your name:" +msgstr "您的åå­—:" + +#: ../src/edapp.cpp:372 +msgid "don't delete temporary files (for debugging)" +msgstr "ä¸åˆ é™¤ä¸´æ—¶æ–‡ä»¶(供调试使用)" === added file 'src/github.com/chai2010/gettext-go/testdata/qttest2_de.mo' Binary files src/github.com/chai2010/gettext-go/testdata/qttest2_de.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/testdata/qttest2_de.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/testdata/qttest2_de.po' --- src/github.com/chai2010/gettext-go/testdata/qttest2_de.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/qttest2_de.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,36 @@ +# German translations for hello-cplusplus-qt package. +# Copyright (C) 2005 Yoyodyne, Inc. +# This file is distributed under the same license as the hello-cplusplus-qt package. +# Bruno Haible , 2005. +# +msgid "" +msgstr "" +"Project-Id-Version: hello-cplusplus-qt 0\n" +"Report-Msgid-Bugs-To: bug-gnu-gettext@gnu.org\n" +"POT-Creation-Date: 2003-10-20 10:14+0200\n" +"PO-Revision-Date: 2003-10-20 10:13+0200\n" +"Last-Translator: Bruno Haible \n" +"Language-Team: German \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: main.cc:17 +msgctxt "Menu" +msgid "File" +msgstr "Datei" + +#: main.cc:19 +msgctxt "Menu" +msgid "Edit" +msgstr "Bearbeiten" + +#: main.cc:21 +msgctxt "Menu" +msgid "Help" +msgstr "" + +#: data.cc:45 +msgctxt "Database" +msgid "File" +msgstr "Archiv" === added file 'src/github.com/chai2010/gettext-go/testdata/qttest_pl.mo' Binary files src/github.com/chai2010/gettext-go/testdata/qttest_pl.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/testdata/qttest_pl.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/testdata/qttest_pl.po' --- src/github.com/chai2010/gettext-go/testdata/qttest_pl.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/qttest_pl.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,26 @@ +# Polish translations for hello-cplusplus-qt package. +# Copyright (C) 2003 Yoyodyne, Inc. +# This file is distributed under the same license as the hello-cplusplus-qt package. +# Bruno Haible , 2003. +# +msgid "" +msgstr "" +"Project-Id-Version: hello-cplusplus-qt 0\n" +"Report-Msgid-Bugs-To: bug-gnu-gettext@gnu.org\n" +"POT-Creation-Date: 2003-10-20 10:14+0200\n" +"PO-Revision-Date: 2003-10-20 10:13+0200\n" +"Last-Translator: Bruno Haible \n" +"Language-Team: Polish \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=ISO-8859-2\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 " +"|| n%100>=20) ? 1 : 2);\n" + +#: hello.cc:45 +msgid "Written by François Pinard." +msgstr "Program napisa³ François Pinard." + +#: hello.cc:52 +msgid "error %1." +msgstr "b³±d %1." === added file 'src/github.com/chai2010/gettext-go/testdata/test.mo' Binary files src/github.com/chai2010/gettext-go/testdata/test.mo 1970-01-01 00:00:00 +0000 and src/github.com/chai2010/gettext-go/testdata/test.mo 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/chai2010/gettext-go/testdata/test.po' --- src/github.com/chai2010/gettext-go/testdata/test.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/test.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,38 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: Test\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2011-12-12 20:03+0000\n" +"PO-Revision-Date: 2013-12-02 17:05+0800\n" +"Last-Translator: chai2010 \n" +"Language-Team: chai2010(团队) \n" +"Language: zh_CN\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.5.7\n" +"X-Poedit-SourceCharset: UTF-8\n" + +msgid "Title" +msgstr "Título" + +# test: bad comment +# test: good comment +#. test: extracted-comments +#: src/test.cc:9527 +#, fuzzy +#| msgctxt "" +#| "previous-context-1\n" +#| "previous-context-2" +#| msgid "" +#| "previous-untranslated-string\n" +#| "previous-untranslated-string-2" +msgid "%d topic" +msgid_plural "%d topics" +msgstr[0] "%d tema" +msgstr[1] "%d temas" === added file 'src/github.com/chai2010/gettext-go/testdata/xg-c-1.ok.po' --- src/github.com/chai2010/gettext-go/testdata/xg-c-1.ok.po 1970-01-01 00:00:00 +0000 +++ src/github.com/chai2010/gettext-go/testdata/xg-c-1.ok.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,727 @@ +#, c-format, no-wrap +msgid "" +"Copyright (C) %s Free Software Foundation, Inc.\n" +"License GPLv3+: GNU GPL version 3 or later \n" +"This is free software: you are free to change and redistribute it.\n" +"There is NO WARRANTY, to the extent permitted by law.\n" +msgstr "" + +#, c-format +msgid "Written by %s.\n" +msgstr "" + +#, c-format +msgid "%s and %s are mutually exclusive" +msgstr "" + +msgid "--join-existing cannot be used when output is written to stdout" +msgstr "" + +msgid "xgettext cannot work without keywords to look for" +msgstr "" + +msgid "no input file given" +msgstr "" + +#, c-format +msgid "" +"Cannot convert from \"%s\" to \"%s\". %s relies on iconv(), and iconv() does " +"not support this conversion." +msgstr "" + +#, c-format +msgid "" +"Cannot convert from \"%s\" to \"%s\". %s relies on iconv(). This version was " +"built without iconv()." +msgstr "" + +#, c-format +msgid "warning: file '%s' extension '%s' is unknown; will try C" +msgstr "" + +#, c-format +msgid "Try '%s --help' for more information.\n" +msgstr "" + +#, c-format +msgid "Usage: %s [OPTION] [INPUTFILE]...\n" +msgstr "" + +#, c-format +msgid "Extract translatable strings from given input files.\n" +msgstr "" + +#, c-format, no-wrap +msgid "" +"Mandatory arguments to long options are mandatory for short options too.\n" +"Similarly for optional arguments.\n" +msgstr "" + +#, c-format +msgid "Input file location:\n" +msgstr "" + +#, c-format +msgid " INPUTFILE ... input files\n" +msgstr "" + +#, c-format +msgid " -f, --files-from=FILE get list of input files from FILE\n" +msgstr "" + +#, c-format +msgid "" +" -D, --directory=DIRECTORY add DIRECTORY to list for input files search\n" +msgstr "" + +#, c-format +msgid "If input file is -, standard input is read.\n" +msgstr "" + +#, c-format +msgid "Output file location:\n" +msgstr "" + +#, c-format +msgid "" +" -d, --default-domain=NAME use NAME.po for output (instead of messages." +"po)\n" +msgstr "" + +#, c-format +msgid " -o, --output=FILE write output to specified file\n" +msgstr "" + +#, c-format +msgid "" +" -p, --output-dir=DIR output files will be placed in directory DIR\n" +msgstr "" + +#, c-format +msgid "If output file is -, output is written to standard output.\n" +msgstr "" + +#, c-format +msgid "Choice of input file language:\n" +msgstr "" + +#, c-format +msgid "" +" -L, --language=NAME recognise the specified language\n" +" (C, C++, ObjectiveC, PO, Shell, Python, " +"Lisp,\n" +" EmacsLisp, librep, Scheme, Smalltalk, Java,\n" +" JavaProperties, C#, awk, YCP, Tcl, Perl, " +"PHP,\n" +" GCC-source, NXStringTable, RST, Glade, Lua,\n" +" JavaScript, Vala)\n" +msgstr "" + +#, c-format +msgid " -C, --c++ shorthand for --language=C++\n" +msgstr "" + +#, c-format +msgid "" +"By default the language is guessed depending on the input file name " +"extension.\n" +msgstr "" + +#, c-format +msgid "Input file interpretation:\n" +msgstr "" + +#, c-format +msgid "" +" --from-code=NAME encoding of input files\n" +" (except for Python, Tcl, Glade)\n" +msgstr "" + +#, c-format +msgid "By default the input files are assumed to be in ASCII.\n" +msgstr "" + +#, c-format +msgid "Operation mode:\n" +msgstr "" + +#, c-format +msgid " -j, --join-existing join messages with existing file\n" +msgstr "" + +#, c-format +msgid " -x, --exclude-file=FILE.po entries from FILE.po are not extracted\n" +msgstr "" + +#, c-format +msgid "" +" -cTAG, --add-comments=TAG place comment blocks starting with TAG and\n" +" preceding keyword lines in output file\n" +" -c, --add-comments place all comment blocks preceding keyword " +"lines\n" +" in output file\n" +msgstr "" + +#, c-format +msgid "Language specific options:\n" +msgstr "" + +#, c-format +msgid " -a, --extract-all extract all strings\n" +msgstr "" + +#, c-format +msgid "" +" (only languages C, C++, ObjectiveC, Shell,\n" +" Python, Lisp, EmacsLisp, librep, Scheme, " +"Java,\n" +" C#, awk, Tcl, Perl, PHP, GCC-source, Glade,\n" +" Lua, JavaScript, Vala)\n" +msgstr "" + +#, c-format +msgid "" +" -kWORD, --keyword=WORD look for WORD as an additional keyword\n" +" -k, --keyword do not to use default keywords\n" +msgstr "" + +#, c-format +msgid "" +" --flag=WORD:ARG:FLAG additional flag for strings inside the " +"argument\n" +" number ARG of keyword WORD\n" +msgstr "" + +#, c-format +msgid "" +" (only languages C, C++, ObjectiveC, Shell,\n" +" Python, Lisp, EmacsLisp, librep, Scheme, " +"Java,\n" +" C#, awk, YCP, Tcl, Perl, PHP, GCC-source,\n" +" Lua, JavaScript, Vala)\n" +msgstr "" + +#, c-format +msgid " -T, --trigraphs understand ANSI C trigraphs for input\n" +msgstr "" + +#, c-format +msgid " (only languages C, C++, ObjectiveC)\n" +msgstr "" + +#, c-format +msgid " --qt recognize Qt format strings\n" +msgstr "" + +#, c-format +msgid " (only language C++)\n" +msgstr "" + +#, c-format +msgid " --kde recognize KDE 4 format strings\n" +msgstr "" + +#, c-format +msgid " --boost recognize Boost format strings\n" +msgstr "" + +#, c-format +msgid "" +" --debug more detailed formatstring recognition result\n" +msgstr "" + +#, c-format +msgid "Output details:\n" +msgstr "" + +#, c-format +msgid "" +" --color use colors and other text attributes always\n" +" --color=WHEN use colors and other text attributes if WHEN.\n" +" WHEN may be 'always', 'never', 'auto', or " +"'html'.\n" +msgstr "" + +#, c-format +msgid " --style=STYLEFILE specify CSS style rule file for --color\n" +msgstr "" + +#, c-format +msgid "" +" -e, --no-escape do not use C escapes in output (default)\n" +msgstr "" + +#, c-format +msgid "" +" -E, --escape use C escapes in output, no extended chars\n" +msgstr "" + +#, c-format +msgid " --force-po write PO file even if empty\n" +msgstr "" + +#, c-format +msgid " -i, --indent write the .po file using indented style\n" +msgstr "" + +#, c-format +msgid " --no-location do not write '#: filename:line' lines\n" +msgstr "" + +#, c-format +msgid "" +" -n, --add-location generate '#: filename:line' lines (default)\n" +msgstr "" + +#, c-format +msgid "" +" --strict write out strict Uniforum conforming .po file\n" +msgstr "" + +#, c-format +msgid " --properties-output write out a Java .properties file\n" +msgstr "" + +#, c-format +msgid "" +" --stringtable-output write out a NeXTstep/GNUstep .strings file\n" +msgstr "" + +#, c-format +msgid " -w, --width=NUMBER set output page width\n" +msgstr "" + +#, c-format +msgid "" +" --no-wrap do not break long message lines, longer than\n" +" the output page width, into several lines\n" +msgstr "" + +#, c-format +msgid " -s, --sort-output generate sorted output\n" +msgstr "" + +#, c-format +msgid " -F, --sort-by-file sort output by file location\n" +msgstr "" + +#, c-format +msgid "" +" --omit-header don't write header with 'msgid \"\"' entry\n" +msgstr "" + +#, c-format +msgid " --copyright-holder=STRING set copyright holder in output\n" +msgstr "" + +#, c-format +msgid "" +" --foreign-user omit FSF copyright in output for foreign user\n" +msgstr "" + +#, c-format +msgid " --package-name=PACKAGE set package name in output\n" +msgstr "" + +#, c-format +msgid " --package-version=VERSION set package version in output\n" +msgstr "" + +#, c-format +msgid "" +" --msgid-bugs-address=EMAIL@ADDRESS set report address for msgid bugs\n" +msgstr "" + +#, c-format +msgid "" +" -m[STRING], --msgstr-prefix[=STRING] use STRING or \"\" as prefix for " +"msgstr\n" +" values\n" +msgstr "" + +#, c-format +msgid "" +" -M[STRING], --msgstr-suffix[=STRING] use STRING or \"\" as suffix for " +"msgstr\n" +" values\n" +msgstr "" + +#, c-format +msgid "Informative output:\n" +msgstr "" + +#, c-format +msgid " -h, --help display this help and exit\n" +msgstr "" + +#, c-format +msgid " -V, --version output version information and exit\n" +msgstr "" + +msgid "Report bugs to .\n" +msgstr "" + +msgid "this file may not contain domain directives" +msgstr "" + +#, c-format +msgid "" +"A --flag argument doesn't have the ::[pass-] syntax: " +"%s" +msgstr "" + +msgid "standard input" +msgstr "" + +#, c-format +msgid "error while opening \"%s\" for reading" +msgstr "" + +#, c-format +msgid "Non-ASCII character at %s%s." +msgstr "" + +#, c-format +msgid "Non-ASCII comment at or before %s%s." +msgstr "" + +#, c-format +msgid "Non-ASCII string at %s%s." +msgstr "" + +msgid "Please specify the source encoding through --from-code." +msgstr "" + +#, c-format +msgid "%s%s: warning: " +msgstr "" + +#, c-format +msgid "" +"Although being used in a format string position, the %s is not a valid %s " +"format string. Reason: %s\n" +msgstr "" + +#, c-format +msgid "" +"Although declared as such, the %s is not a valid %s format string. Reason: " +"%s\n" +msgstr "" + +#, c-format +msgid "" +"'%s' format string with unnamed arguments cannot be properly localized:\n" +"The translator cannot reorder the arguments.\n" +"Please consider using a format string with named arguments,\n" +"and a mapping instead of a tuple for the arguments.\n" +msgstr "" + +msgid "" +"Empty msgid. It is reserved by GNU gettext:\n" +"gettext(\"\") returns the header entry with\n" +"meta information, not the empty string.\n" +msgstr "" + +#, c-format +msgid "ambiguous argument specification for keyword '%.*s'" +msgstr "" + +#, c-format +msgid "warning: missing context for keyword '%.*s'" +msgstr "" + +#, c-format +msgid "warning: missing context for plural argument of keyword '%.*s'" +msgstr "" + +msgid "context mismatch between singular and plural form" +msgstr "" + +msgid "warning: " +msgstr "" + +msgid "" +"The option --msgid-bugs-address was not specified.\n" +"If you are using a 'Makevars' file, please specify\n" +"the MSGID_BUGS_ADDRESS variable there; otherwise please\n" +"specify an --msgid-bugs-address command line option.\n" +msgstr "" + +#, c-format +msgid "language '%s' unknown" +msgstr "" + +#, c-format +msgid "the argument to %s should be a single punctuation character" +msgstr "" + +#, c-format +msgid "invalid endianness: %s" +msgstr "" + +#, c-format +msgid "%s requires a \"-d directory\" specification" +msgstr "" + +#, c-format +msgid "%s requires a \"-l locale\" specification" +msgstr "" + +#, c-format +msgid "%s is only valid with %s or %s" +msgstr "" + +#, c-format +msgid "%s is only valid with %s, %s or %s" +msgstr "" + +#, c-format +msgid "found %d fatal error" +msgid_plural "found %d fatal errors" +msgstr[0] "" +msgstr[1] "" + +#, c-format +msgid "%s: " +msgstr "" + +#, c-format +msgid "%d translated message" +msgid_plural "%d translated messages" +msgstr[0] "" +msgstr[1] "" + +#, c-format +msgid ", %d fuzzy translation" +msgid_plural ", %d fuzzy translations" +msgstr[0] "" +msgstr[1] "" + +#, c-format +msgid ", %d untranslated message" +msgid_plural ", %d untranslated messages" +msgstr[0] "" +msgstr[1] "" + +#, c-format +msgid "Usage: %s [OPTION] filename.po ...\n" +msgstr "" + +#, c-format +msgid "Generate binary message catalog from textual translation description.\n" +msgstr "" + +#, c-format +msgid " filename.po ... input files\n" +msgstr "" + +#, c-format +msgid "" +" -j, --java Java mode: generate a Java ResourceBundle " +"class\n" +msgstr "" + +#, c-format +msgid "" +" --java2 like --java, and assume Java2 (JDK 1.2 or " +"higher)\n" +msgstr "" + +#, c-format +msgid " --csharp C# mode: generate a .NET .dll file\n" +msgstr "" + +#, c-format +msgid "" +" --csharp-resources C# resources mode: generate a .NET .resources " +"file\n" +msgstr "" + +#, c-format +msgid "" +" --tcl Tcl mode: generate a tcl/msgcat .msg file\n" +msgstr "" + +#, c-format +msgid " --qt Qt mode: generate a Qt .qm file\n" +msgstr "" + +#, c-format +msgid " -o, --output-file=FILE write output to specified file\n" +msgstr "" + +#, c-format +msgid " --strict enable strict Uniforum mode\n" +msgstr "" + +#, c-format +msgid "Output file location in Java mode:\n" +msgstr "" + +#, c-format +msgid " -r, --resource=RESOURCE resource name\n" +msgstr "" + +#, c-format +msgid "" +" -l, --locale=LOCALE locale name, either language or " +"language_COUNTRY\n" +msgstr "" + +#, c-format +msgid "" +" -d DIRECTORY base directory of classes directory hierarchy\n" +msgstr "" + +#, c-format +msgid "" +"The class name is determined by appending the locale name to the resource " +"name,\n" +"separated with an underscore. The -d option is mandatory. The class is\n" +"written under the specified directory.\n" +msgstr "" + +#, c-format +msgid "Output file location in C# mode:\n" +msgstr "" + +#, c-format +msgid "" +" -d DIRECTORY base directory for locale dependent .dll " +"files\n" +msgstr "" + +#, c-format +msgid "" +"The -l and -d options are mandatory. The .dll file is written in a\n" +"subdirectory of the specified directory whose name depends on the locale.\n" +msgstr "" + +#, c-format +msgid "Output file location in Tcl mode:\n" +msgstr "" + +#, c-format +msgid " -d DIRECTORY base directory of .msg message catalogs\n" +msgstr "" + +#, c-format +msgid "" +"The -l and -d options are mandatory. The .msg file is written in the\n" +"specified directory.\n" +msgstr "" + +#, c-format +msgid "Input file syntax:\n" +msgstr "" + +#, c-format +msgid "" +" -P, --properties-input input files are in Java .properties syntax\n" +msgstr "" + +#, c-format +msgid "" +" --stringtable-input input files are in NeXTstep/GNUstep .strings\n" +" syntax\n" +msgstr "" + +#, c-format +msgid "" +" -c, --check perform all the checks implied by\n" +" --check-format, --check-header, --check-" +"domain\n" +msgstr "" + +#, c-format +msgid " --check-format check language dependent format strings\n" +msgstr "" + +#, c-format +msgid "" +" --check-header verify presence and contents of the header " +"entry\n" +msgstr "" + +#, c-format +msgid "" +" --check-domain check for conflicts between domain directives\n" +" and the --output-file option\n" +msgstr "" + +#, c-format +msgid "" +" -C, --check-compatibility check that GNU msgfmt behaves like X/Open " +"msgfmt\n" +msgstr "" + +#, c-format +msgid "" +" --check-accelerators[=CHAR] check presence of keyboard accelerators " +"for\n" +" menu items\n" +msgstr "" + +#, c-format +msgid " -f, --use-fuzzy use fuzzy entries in output\n" +msgstr "" + +#, c-format +msgid "" +" -a, --alignment=NUMBER align strings to NUMBER bytes (default: %d)\n" +msgstr "" + +#, c-format +msgid "" +" --endianness=BYTEORDER write out 32-bit numbers in the given byte " +"order\n" +" (big or little, default depends on " +"platform)\n" +msgstr "" + +#, c-format +msgid "" +" --no-hash binary file will not include the hash table\n" +msgstr "" + +#, c-format +msgid " --statistics print statistics about translations\n" +msgstr "" + +#, c-format +msgid " -v, --verbose increase verbosity level\n" +msgstr "" + +msgid "warning: PO file header missing or invalid\n" +msgstr "" + +msgid "warning: charset conversion will not work\n" +msgstr "" + +msgid "warning: PO file header fuzzy\n" +msgstr "" + +msgid "warning: older versions of msgfmt will give an error on this\n" +msgstr "" + +#, c-format +msgid "domain name \"%s\" not suitable as file name" +msgstr "" + +#, c-format +msgid "domain name \"%s\" not suitable as file name: will use prefix" +msgstr "" + +#, c-format +msgid "'domain %s' directive ignored" +msgstr "" + +msgid "empty 'msgstr' entry ignored" +msgstr "" + +msgid "fuzzy 'msgstr' entry ignored" +msgstr "" + +#, c-format +msgid "%s: warning: source file contains fuzzy translation" +msgstr "" === added directory 'src/github.com/gorilla' === added directory 'src/github.com/gorilla/websocket' === added file 'src/github.com/gorilla/websocket/.gitignore' --- src/github.com/gorilla/websocket/.gitignore 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/.gitignore 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe === added file 'src/github.com/gorilla/websocket/.travis.yml' --- src/github.com/gorilla/websocket/.travis.yml 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/.travis.yml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,6 @@ +language: go + +go: + - 1.1 + - 1.2 + - tip === added file 'src/github.com/gorilla/websocket/AUTHORS' --- src/github.com/gorilla/websocket/AUTHORS 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/AUTHORS 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Joachim Bauch + === added file 'src/github.com/gorilla/websocket/LICENSE' --- src/github.com/gorilla/websocket/LICENSE 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. === added file 'src/github.com/gorilla/websocket/README.md' --- src/github.com/gorilla/websocket/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,60 @@ +# Gorilla WebSocket + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + +### Documentation + +* [API Reference](http://godoc.org/github.com/gorilla/websocket) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + +### Gorilla WebSocket compared with other packages + + + + + + + + + + + + + + + + + + +
github.com/gorillagolang.org/x/net
RFC 6455 Features
Passes Autobahn Test SuiteYesNo
Receive fragmented messageYesNo, see note 1
Send close messageYesNo
Send pings and receive pongsYesNo
Get the type of a received data messageYesYes, see note 2
Other Features
Limit size of received messageYesNo
Read message using io.ReaderYesNo, see note 3
Write message using io.WriteCloserYesNo, see note 3
+ +Notes: + +1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html). +2. The application can get the type of a received data message by implementing + a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal) + function. +3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries. + Read returns when the input buffer is full or a frame boundary is + encountered. Each call to Write sends a single frame message. The Gorilla + io.Reader and io.WriteCloser operate on a single WebSocket message. + === added file 'src/github.com/gorilla/websocket/bench_test.go' --- src/github.com/gorilla/websocket/bench_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/bench_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +// Copyright 2014 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "testing" +) + +func BenchmarkMaskBytes(b *testing.B) { + var key [4]byte + data := make([]byte, 1024) + pos := 0 + for i := 0; i < b.N; i++ { + pos = maskBytes(key, pos, data) + } + b.SetBytes(int64(len(data))) +} === added file 'src/github.com/gorilla/websocket/client.go' --- src/github.com/gorilla/websocket/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,269 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "crypto/tls" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + acceptKey := computeAcceptKey(challengeKey) + + c = newConn(netConn, false, readBufSize, writeBufSize) + p := c.writeBuf[:0] + p = append(p, "GET "...) + p = append(p, u.RequestURI()...) + p = append(p, " HTTP/1.1\r\nHost: "...) + p = append(p, u.Host...) + // "Upgrade" is capitalized for servers that do not use case insensitive + // comparisons on header tokens. + p = append(p, "\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Version: 13\r\nSec-WebSocket-Key: "...) + p = append(p, challengeKey...) + p = append(p, "\r\n"...) + for k, vs := range requestHeader { + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + p = append(p, v...) + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + if _, err := netConn.Write(p); err != nil { + return nil, nil, err + } + + resp, err := http.ReadResponse(c.br, &http.Request{Method: "GET", URL: u}) + if err != nil { + return nil, nil, err + } + if resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != acceptKey { + return nil, resp, ErrBadHandshake + } + c.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + return c, resp, nil +} + +// A Dialer contains options for connecting to WebSocket server. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // Input and output buffer sizes. If the buffer size is zero, then a + // default value of 4096 is used. + ReadBufferSize, WriteBufferSize int + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +// parseURL parses the URL. The url.Parse function is not used here because +// url.Parse mangles the path. +func parseURL(s string) (*url.URL, error) { + // From the RFC: + // + // ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ] + // wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ] + // + // We don't use the net/url parser here because the dialer interface does + // not provide a way for applications to work around percent deocding in + // the net/url parser. + + var u url.URL + switch { + case strings.HasPrefix(s, "ws://"): + u.Scheme = "ws" + s = s[len("ws://"):] + case strings.HasPrefix(s, "wss://"): + u.Scheme = "wss" + s = s[len("wss://"):] + default: + return nil, errMalformedURL + } + + u.Host = s + u.Opaque = "/" + if i := strings.Index(s, "/"); i >= 0 { + u.Host = s[:i] + u.Opaque = s[i:] + } + + if strings.Contains(u.Host, "@") { + // WebSocket URIs do not contain user information. + return nil, errMalformedURL + } + + return &u, nil +} + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + if u.Scheme == "wss" { + hostPort += ":443" + } else { + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default zero values. +var DefaultDialer *Dialer + +// Dial creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + u, err := parseURL(urlStr) + if err != nil { + return nil, nil, err + } + + hostPort, hostNoPort := hostPortNoPort(u) + + if d == nil { + d = &Dialer{} + } + + var deadline time.Time + if d.HandshakeTimeout != 0 { + deadline = time.Now().Add(d.HandshakeTimeout) + } + + netDial := d.NetDial + if netDial == nil { + netDialer := &net.Dialer{Deadline: deadline} + netDial = netDialer.Dial + } + + netConn, err := netDial("tcp", hostPort) + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if err := netConn.SetDeadline(deadline); err != nil { + return nil, nil, err + } + + if u.Scheme == "wss" { + cfg := d.TLSClientConfig + if cfg == nil { + cfg = &tls.Config{ServerName: hostNoPort} + } else if cfg.ServerName == "" { + shallowCopy := *cfg + cfg = &shallowCopy + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + if err := tlsConn.Handshake(); err != nil { + return nil, nil, err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return nil, nil, err + } + } + } + + if len(d.Subprotocols) > 0 { + h := http.Header{} + for k, v := range requestHeader { + h[k] = v + } + h.Set("Sec-Websocket-Protocol", strings.Join(d.Subprotocols, ", ")) + requestHeader = h + } + + if len(requestHeader["Host"]) > 0 { + // This can be used to supply a Host: header which is different from + // the dial address. + u.Host = requestHeader.Get("Host") + + // Drop "Host" header + h := http.Header{} + for k, v := range requestHeader { + if k == "Host" { + continue + } + h[k] = v + } + requestHeader = h + } + + conn, resp, err := NewClient(netConn, u, requestHeader, d.ReadBufferSize, d.WriteBufferSize) + + if err != nil { + if err == ErrBadHandshake { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + } + return nil, resp, err + } + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} === added file 'src/github.com/gorilla/websocket/client_server_test.go' --- src/github.com/gorilla/websocket/client_server_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/client_server_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,323 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/tls" + "crypto/x509" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "strings" + "testing" + "time" +) + +var cstUpgrader = Upgrader{ + Subprotocols: []string{"p0", "p1"}, + ReadBufferSize: 1024, + WriteBufferSize: 1024, + Error: func(w http.ResponseWriter, r *http.Request, status int, reason error) { + http.Error(w, reason.Error(), status) + }, +} + +var cstDialer = Dialer{ + Subprotocols: []string{"p1", "p2"}, + ReadBufferSize: 1024, + WriteBufferSize: 1024, +} + +type cstHandler struct{ *testing.T } + +type cstServer struct { + *httptest.Server + URL string +} + +func newServer(t *testing.T) *cstServer { + var s cstServer + s.Server = httptest.NewServer(cstHandler{t}) + s.URL = makeWsProto(s.Server.URL) + return &s +} + +func newTLSServer(t *testing.T) *cstServer { + var s cstServer + s.Server = httptest.NewTLSServer(cstHandler{t}) + s.URL = makeWsProto(s.Server.URL) + return &s +} + +func (t cstHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + t.Logf("method %s not allowed", r.Method) + http.Error(w, "method not allowed", 405) + return + } + subprotos := Subprotocols(r) + if !reflect.DeepEqual(subprotos, cstDialer.Subprotocols) { + t.Logf("subprotols=%v, want %v", subprotos, cstDialer.Subprotocols) + http.Error(w, "bad protocol", 400) + return + } + ws, err := cstUpgrader.Upgrade(w, r, http.Header{"Set-Cookie": {"sessionID=1234"}}) + if err != nil { + t.Logf("Upgrade: %v", err) + return + } + defer ws.Close() + + if ws.Subprotocol() != "p1" { + t.Logf("Subprotocol() = %s, want p1", ws.Subprotocol()) + ws.Close() + return + } + op, rd, err := ws.NextReader() + if err != nil { + t.Logf("NextReader: %v", err) + return + } + wr, err := ws.NextWriter(op) + if err != nil { + t.Logf("NextWriter: %v", err) + return + } + if _, err = io.Copy(wr, rd); err != nil { + t.Logf("NextWriter: %v", err) + return + } + if err := wr.Close(); err != nil { + t.Logf("Close: %v", err) + return + } +} + +func makeWsProto(s string) string { + return "ws" + strings.TrimPrefix(s, "http") +} + +func sendRecv(t *testing.T, ws *Conn) { + const message = "Hello World!" + if err := ws.SetWriteDeadline(time.Now().Add(time.Second)); err != nil { + t.Fatalf("SetWriteDeadline: %v", err) + } + if err := ws.WriteMessage(TextMessage, []byte(message)); err != nil { + t.Fatalf("WriteMessage: %v", err) + } + if err := ws.SetReadDeadline(time.Now().Add(time.Second)); err != nil { + t.Fatalf("SetReadDeadline: %v", err) + } + _, p, err := ws.ReadMessage() + if err != nil { + t.Fatalf("ReadMessage: %v", err) + } + if string(p) != message { + t.Fatalf("message=%s, want %s", p, message) + } +} + +func TestDial(t *testing.T) { + s := newServer(t) + defer s.Close() + + ws, _, err := cstDialer.Dial(s.URL, nil) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer ws.Close() + sendRecv(t, ws) +} + +func TestDialTLS(t *testing.T) { + s := newTLSServer(t) + defer s.Close() + + certs := x509.NewCertPool() + for _, c := range s.TLS.Certificates { + roots, err := x509.ParseCertificates(c.Certificate[len(c.Certificate)-1]) + if err != nil { + t.Fatalf("error parsing server's root cert: %v", err) + } + for _, root := range roots { + certs.AddCert(root) + } + } + + u, _ := url.Parse(s.URL) + d := cstDialer + d.NetDial = func(network, addr string) (net.Conn, error) { return net.Dial(network, u.Host) } + d.TLSClientConfig = &tls.Config{RootCAs: certs} + ws, _, err := d.Dial("wss://example.com/", nil) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer ws.Close() + sendRecv(t, ws) +} + +func xTestDialTLSBadCert(t *testing.T) { + // This test is deactivated because of noisy logging from the net/http package. + s := newTLSServer(t) + defer s.Close() + + ws, _, err := cstDialer.Dial(s.URL, nil) + if err == nil { + ws.Close() + t.Fatalf("Dial: nil") + } +} + +func xTestDialTLSNoVerify(t *testing.T) { + s := newTLSServer(t) + defer s.Close() + + d := cstDialer + d.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + ws, _, err := d.Dial(s.URL, nil) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer ws.Close() + sendRecv(t, ws) +} + +func TestDialTimeout(t *testing.T) { + s := newServer(t) + defer s.Close() + + d := cstDialer + d.HandshakeTimeout = -1 + ws, _, err := d.Dial(s.URL, nil) + if err == nil { + ws.Close() + t.Fatalf("Dial: nil") + } +} + +func TestDialBadScheme(t *testing.T) { + s := newServer(t) + defer s.Close() + + ws, _, err := cstDialer.Dial(s.Server.URL, nil) + if err == nil { + ws.Close() + t.Fatalf("Dial: nil") + } +} + +func TestDialBadOrigin(t *testing.T) { + s := newServer(t) + defer s.Close() + + ws, resp, err := cstDialer.Dial(s.URL, http.Header{"Origin": {"bad"}}) + if err == nil { + ws.Close() + t.Fatalf("Dial: nil") + } + if resp == nil { + t.Fatalf("resp=nil, err=%v", err) + } + if resp.StatusCode != http.StatusForbidden { + t.Fatalf("status=%d, want %d", resp.StatusCode, http.StatusForbidden) + } +} + +func TestHandshake(t *testing.T) { + s := newServer(t) + defer s.Close() + + ws, resp, err := cstDialer.Dial(s.URL, http.Header{"Origin": {s.URL}}) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer ws.Close() + + var sessionID string + for _, c := range resp.Cookies() { + if c.Name == "sessionID" { + sessionID = c.Value + } + } + if sessionID != "1234" { + t.Error("Set-Cookie not received from the server.") + } + + if ws.Subprotocol() != "p1" { + t.Errorf("ws.Subprotocol() = %s, want p1", ws.Subprotocol()) + } + sendRecv(t, ws) +} + +func TestRespOnBadHandshake(t *testing.T) { + const expectedStatus = http.StatusGone + const expectedBody = "This is the response body." + + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(expectedStatus) + io.WriteString(w, expectedBody) + })) + defer s.Close() + + ws, resp, err := cstDialer.Dial(makeWsProto(s.URL), nil) + if err == nil { + ws.Close() + t.Fatalf("Dial: nil") + } + + if resp == nil { + t.Fatalf("resp=nil, err=%v", err) + } + + if resp.StatusCode != expectedStatus { + t.Errorf("resp.StatusCode=%d, want %d", resp.StatusCode, expectedStatus) + } + + p, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("ReadFull(resp.Body) returned error %v", err) + } + + if string(p) != expectedBody { + t.Errorf("resp.Body=%s, want %s", p, expectedBody) + } +} + +// If the Host header is specified in `Dial()`, the server must receive it as +// the `Host:` header. +func TestHostHeader(t *testing.T) { + s := newServer(t) + defer s.Close() + + specifiedHost := make(chan string, 1) + origHandler := s.Server.Config.Handler + + // Capture the request Host header. + s.Server.Config.Handler = http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + specifiedHost <- r.Host + origHandler.ServeHTTP(w, r) + }) + + ws, resp, err := cstDialer.Dial(s.URL, http.Header{"Host": {"testhost"}}) + if err != nil { + t.Fatalf("Dial: %v", err) + } + defer ws.Close() + + if resp.StatusCode != http.StatusSwitchingProtocols { + t.Fatalf("resp.StatusCode = %v, want http.StatusSwitchingProtocols", resp.StatusCode) + } + + if gotHost := <-specifiedHost; gotHost != "testhost" { + t.Fatalf("gotHost = %q, want \"testhost\"", gotHost) + } + + sendRecv(t, ws) +} === added file 'src/github.com/gorilla/websocket/client_test.go' --- src/github.com/gorilla/websocket/client_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/client_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,64 @@ +// Copyright 2014 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "net/url" + "reflect" + "testing" +) + +var parseURLTests = []struct { + s string + u *url.URL +}{ + {"ws://example.com/", &url.URL{Scheme: "ws", Host: "example.com", Opaque: "/"}}, + {"ws://example.com", &url.URL{Scheme: "ws", Host: "example.com", Opaque: "/"}}, + {"ws://example.com:7777/", &url.URL{Scheme: "ws", Host: "example.com:7777", Opaque: "/"}}, + {"wss://example.com/", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/"}}, + {"wss://example.com/a/b", &url.URL{Scheme: "wss", Host: "example.com", Opaque: "/a/b"}}, + {"ss://example.com/a/b", nil}, + {"ws://webmaster@example.com/", nil}, +} + +func TestParseURL(t *testing.T) { + for _, tt := range parseURLTests { + u, err := parseURL(tt.s) + if tt.u != nil && err != nil { + t.Errorf("parseURL(%q) returned error %v", tt.s, err) + continue + } + if tt.u == nil && err == nil { + t.Errorf("parseURL(%q) did not return error", tt.s) + continue + } + if !reflect.DeepEqual(u, tt.u) { + t.Errorf("parseURL(%q) returned %v, want %v", tt.s, u, tt.u) + continue + } + } +} + +var hostPortNoPortTests = []struct { + u *url.URL + hostPort, hostNoPort string +}{ + {&url.URL{Scheme: "ws", Host: "example.com"}, "example.com:80", "example.com"}, + {&url.URL{Scheme: "wss", Host: "example.com"}, "example.com:443", "example.com"}, + {&url.URL{Scheme: "ws", Host: "example.com:7777"}, "example.com:7777", "example.com"}, + {&url.URL{Scheme: "wss", Host: "example.com:7777"}, "example.com:7777", "example.com"}, +} + +func TestHostPortNoPort(t *testing.T) { + for _, tt := range hostPortNoPortTests { + hostPort, hostNoPort := hostPortNoPort(tt.u) + if hostPort != tt.hostPort { + t.Errorf("hostPortNoPort(%v) returned hostPort %q, want %q", tt.u, hostPort, tt.hostPort) + } + if hostNoPort != tt.hostNoPort { + t.Errorf("hostPortNoPort(%v) returned hostNoPort %q, want %q", tt.u, hostNoPort, tt.hostNoPort) + } + } +} === added file 'src/github.com/gorilla/websocket/conn.go' --- src/github.com/gorilla/websocket/conn.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/conn.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,826 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "time" +) + +const ( + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + finalBit = 1 << 7 + maskBit = 1 << 7 + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents close frame. +type CloseError struct { + + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + return "websocket: close " + strconv.Itoa(e.Code) + " " + e.Text +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +// Conn represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan bool // used as mutex to protect write to conn and closeSent + closeSent bool // true if close message was sent + + // Message writer fields. + writeErr error + writeBuf []byte // frame is constructed in this buffer. + writePos int // end of data in writeBuf. + writeFrameType int // type of the current frame. + writeSeq int // incremented to invalidate message writers. + writeDeadline time.Time + + // Read fields + readErr error + br *bufio.Reader + readRemaining int64 // bytes remaining in current frame. + readFinal bool // true the current message has more frames. + readSeq int // incremented to invalidate message readers. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn { + mu := make(chan bool, 1) + mu <- true + + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } + if writeBufferSize == 0 { + writeBufferSize = defaultWriteBufferSize + } + + c := &Conn{ + isServer: isServer, + br: bufio.NewReaderSize(conn, readBufferSize), + conn: conn, + mu: mu, + readFinal: true, + writeBuf: make([]byte, writeBufferSize+maxFrameHeaderSize), + writeFrameType: noFrame, + writePos: maxFrameHeaderSize, + } + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting for a close frame. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error { + <-c.mu + defer func() { c.mu <- true }() + + if c.closeSent { + return ErrCloseSent + } else if frameType == CloseMessage { + c.closeSent = true + } + + c.conn.SetWriteDeadline(deadline) + for _, buf := range bufs { + if len(buf) > 0 { + n, err := c.conn.Write(buf) + if n != len(buf) { + // Close on partial write. + c.conn.Close() + } + if err != nil { + return err + } + } + } + return nil +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := time.Hour * 1000 + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- true }() + + if c.closeSent { + return ErrCloseSent + } else if messageType == CloseMessage { + c.closeSent = true + } + + c.conn.SetWriteDeadline(deadline) + n, err := c.conn.Write(buf) + if n != 0 && n != len(buf) { + c.conn.Close() + } + return err +} + +// NextWriter returns a writer for the next message to send. The writer's +// Close method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// The NextWriter method and the writers returned from the method cannot be +// accessed by more than one goroutine at a time. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + if c.writeErr != nil { + return nil, c.writeErr + } + + if c.writeFrameType != noFrame { + if err := c.flushFrame(true, nil); err != nil { + return nil, err + } + } + + if !isControl(messageType) && !isData(messageType) { + return nil, errBadWriteOpCode + } + + c.writeFrameType = messageType + return messageWriter{c, c.writeSeq}, nil +} + +func (c *Conn) flushFrame(final bool, extra []byte) error { + length := c.writePos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(c.writeFrameType) && + (!final || length > maxControlFramePayloadSize) { + c.writeSeq++ + c.writeFrameType = noFrame + c.writePos = maxFrameHeaderSize + return errInvalidControlFrame + } + + b0 := byte(c.writeFrameType) + if final { + b0 |= finalBit + } + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:c.writePos]) + if len(extra) > 0 { + c.writeErr = errors.New("websocket: internal error, extra used in client mode") + return c.writeErr + } + } + + // Write the buffers to the connection. + c.writeErr = c.write(c.writeFrameType, c.writeDeadline, c.writeBuf[framePos:c.writePos], extra) + + // Setup for next frame. + c.writePos = maxFrameHeaderSize + c.writeFrameType = continuationFrame + if final { + c.writeSeq++ + c.writeFrameType = noFrame + } + return c.writeErr +} + +type messageWriter struct { + c *Conn + seq int +} + +func (w messageWriter) err() error { + c := w.c + if c.writeSeq != w.seq { + return errWriteClosed + } + if c.writeErr != nil { + return c.writeErr + } + return nil +} + +func (w messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.c.writePos + if n <= 0 { + if err := w.c.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.c.writePos + } + if n > max { + n = max + } + return n, nil +} + +func (w messageWriter) write(final bool, p []byte) (int, error) { + if err := w.err(); err != nil { + return 0, err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.c.flushFrame(final, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.c.writePos:], p[:n]) + w.c.writePos += n + p = p[n:] + } + return nn, nil +} + +func (w messageWriter) Write(p []byte) (int, error) { + return w.write(false, p) +} + +func (w messageWriter) WriteString(p string) (int, error) { + if err := w.err(); err != nil { + return 0, err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.c.writePos:], p[:n]) + w.c.writePos += n + p = p[n:] + } + return nn, nil +} + +func (w messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if err := w.err(); err != nil { + return 0, err + } + for { + if w.c.writePos == len(w.c.writeBuf) { + err = w.c.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.c.writePos:]) + w.c.writePos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w messageWriter) Close() error { + if err := w.err(); err != nil { + return err + } + return w.c.flushFrame(true, nil) +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + wr, err := c.NextWriter(messageType) + if err != nil { + return err + } + w := wr.(messageWriter) + if _, err := w.write(true, data); err != nil { + return err + } + if c.writeSeq == w.seq { + if err := c.flushFrame(true, nil); err != nil { + return err + } + } + return nil +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +// readFull is like io.ReadFull except that io.EOF is never returned. +func (c *Conn) readFull(p []byte) (err error) { + var n int + for n < len(p) && err == nil { + var nn int + nn, err = c.br.Read(p[n:]) + n += nn + } + if n == len(p) { + err = nil + } else if err == io.EOF { + err = errUnexpectedEOF + } + return +} + +func (c *Conn) advanceFrame() (int, error) { + + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + + var b [8]byte + if err := c.readFull(b[:2]); err != nil { + return noFrame, err + } + + final := b[0]&finalBit != 0 + frameType := int(b[0] & 0xf) + reserved := int((b[0] >> 4) & 0x7) + mask := b[1]&maskBit != 0 + c.readRemaining = int64(b[1] & 0x7f) + + if reserved != 0 { + return noFrame, c.handleProtocolError("unexpected reserved bits " + strconv.Itoa(reserved)) + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + return noFrame, c.handleProtocolError("control frame length > 125") + } + if !final { + return noFrame, c.handleProtocolError("control frame not final") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + return noFrame, c.handleProtocolError("message start before final message frame") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + return noFrame, c.handleProtocolError("continuation after final message frame") + } + c.readFinal = final + default: + return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) + } + + // 3. Read and parse frame length. + + switch c.readRemaining { + case 126: + if err := c.readFull(b[:2]); err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint16(b[:2])) + case 127: + if err := c.readFull(b[:8]); err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint64(b[:8])) + } + + // 4. Handle frame masking. + + if mask != c.isServer { + return noFrame, c.handleProtocolError("incorrect mask flag") + } + + if mask { + c.readMaskPos = 0 + if err := c.readFull(c.readMaskKey[:]); err != nil { + return noFrame, err + } + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload = make([]byte, c.readRemaining) + c.readRemaining = 0 + if err := c.readFull(payload); err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + c.WriteControl(CloseMessage, []byte{}, time.Now().Add(writeWait)) + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + closeText = string(payload[2:]) + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// The NextReader method and the readers returned from the method cannot be +// accessed by more than one goroutine at a time. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + + c.readSeq++ + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + if frameType == TextMessage || frameType == BinaryMessage { + return frameType, messageReader{c, c.readSeq}, nil + } + } + return noFrame, nil, c.readErr +} + +type messageReader struct { + c *Conn + seq int +} + +func (r messageReader) Read(b []byte) (int, error) { + + if r.seq != r.c.readSeq { + return 0, io.EOF + } + + for r.c.readErr == nil { + + if r.c.readRemaining > 0 { + if int64(len(b)) > r.c.readRemaining { + b = b[:r.c.readRemaining] + } + n, err := r.c.br.Read(b) + r.c.readErr = hideTempErr(err) + if r.c.isServer { + r.c.readMaskPos = maskBytes(r.c.readMaskKey, r.c.readMaskPos, b[:n]) + } + r.c.readRemaining -= int64(n) + return n, r.c.readErr + } + + if r.c.readFinal { + r.c.readSeq++ + return 0, io.EOF + } + + frameType, err := r.c.advanceFrame() + switch { + case err != nil: + r.c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + r.c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := r.c.readErr + if err == io.EOF && r.seq == r.c.readSeq { + err = errUnexpectedEOF + } + return 0, err +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size for a message read from the peer. If a +// message exceeds the limit, the connection sends a close frame to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING frame application data. The default +// ping handler sends a pong to the peer. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + return nil + } + } + c.handlePing = h +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG frame application data. The default +// pong handler does nothing. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +func FormatCloseMessage(closeCode int, text string) []byte { + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} === added file 'src/github.com/gorilla/websocket/conn_test.go' --- src/github.com/gorilla/websocket/conn_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/conn_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,241 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" + "reflect" + "testing" + "testing/iotest" + "time" +) + +var _ net.Error = errWriteTimeout + +type fakeNetConn struct { + io.Reader + io.Writer +} + +func (c fakeNetConn) Close() error { return nil } +func (c fakeNetConn) LocalAddr() net.Addr { return nil } +func (c fakeNetConn) RemoteAddr() net.Addr { return nil } +func (c fakeNetConn) SetDeadline(t time.Time) error { return nil } +func (c fakeNetConn) SetReadDeadline(t time.Time) error { return nil } +func (c fakeNetConn) SetWriteDeadline(t time.Time) error { return nil } + +func TestFraming(t *testing.T) { + frameSizes := []int{0, 1, 2, 124, 125, 126, 127, 128, 129, 65534, 65535, 65536, 65537} + var readChunkers = []struct { + name string + f func(io.Reader) io.Reader + }{ + {"half", iotest.HalfReader}, + {"one", iotest.OneByteReader}, + {"asis", func(r io.Reader) io.Reader { return r }}, + } + + writeBuf := make([]byte, 65537) + for i := range writeBuf { + writeBuf[i] = byte(i) + } + + for _, isServer := range []bool{true, false} { + for _, chunker := range readChunkers { + + var connBuf bytes.Buffer + wc := newConn(fakeNetConn{Reader: nil, Writer: &connBuf}, isServer, 1024, 1024) + rc := newConn(fakeNetConn{Reader: chunker.f(&connBuf), Writer: nil}, !isServer, 1024, 1024) + + for _, n := range frameSizes { + for _, iocopy := range []bool{true, false} { + name := fmt.Sprintf("s:%v, r:%s, n:%d c:%v", isServer, chunker.name, n, iocopy) + + w, err := wc.NextWriter(TextMessage) + if err != nil { + t.Errorf("%s: wc.NextWriter() returned %v", name, err) + continue + } + var nn int + if iocopy { + var n64 int64 + n64, err = io.Copy(w, bytes.NewReader(writeBuf[:n])) + nn = int(n64) + } else { + nn, err = w.Write(writeBuf[:n]) + } + if err != nil || nn != n { + t.Errorf("%s: w.Write(writeBuf[:n]) returned %d, %v", name, nn, err) + continue + } + err = w.Close() + if err != nil { + t.Errorf("%s: w.Close() returned %v", name, err) + continue + } + + opCode, r, err := rc.NextReader() + if err != nil || opCode != TextMessage { + t.Errorf("%s: NextReader() returned %d, r, %v", name, opCode, err) + continue + } + rbuf, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("%s: ReadFull() returned rbuf, %v", name, err) + continue + } + + if len(rbuf) != n { + t.Errorf("%s: len(rbuf) is %d, want %d", name, len(rbuf), n) + continue + } + + for i, b := range rbuf { + if byte(i) != b { + t.Errorf("%s: bad byte at offset %d", name, i) + break + } + } + } + } + } + } +} + +func TestControl(t *testing.T) { + const message = "this is a ping/pong messsage" + for _, isServer := range []bool{true, false} { + for _, isWriteControl := range []bool{true, false} { + name := fmt.Sprintf("s:%v, wc:%v", isServer, isWriteControl) + var connBuf bytes.Buffer + wc := newConn(fakeNetConn{Reader: nil, Writer: &connBuf}, isServer, 1024, 1024) + rc := newConn(fakeNetConn{Reader: &connBuf, Writer: nil}, !isServer, 1024, 1024) + if isWriteControl { + wc.WriteControl(PongMessage, []byte(message), time.Now().Add(time.Second)) + } else { + w, err := wc.NextWriter(PongMessage) + if err != nil { + t.Errorf("%s: wc.NextWriter() returned %v", name, err) + continue + } + if _, err := w.Write([]byte(message)); err != nil { + t.Errorf("%s: w.Write() returned %v", name, err) + continue + } + if err := w.Close(); err != nil { + t.Errorf("%s: w.Close() returned %v", name, err) + continue + } + var actualMessage string + rc.SetPongHandler(func(s string) error { actualMessage = s; return nil }) + rc.NextReader() + if actualMessage != message { + t.Errorf("%s: pong=%q, want %q", name, actualMessage, message) + continue + } + } + } + } +} + +func TestCloseBeforeFinalFrame(t *testing.T) { + const bufSize = 512 + + expectedErr := &CloseError{Code: CloseNormalClosure, Text: "hello"} + + var b1, b2 bytes.Buffer + wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, 1024, bufSize) + rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, 1024, 1024) + + w, _ := wc.NextWriter(BinaryMessage) + w.Write(make([]byte, bufSize+bufSize/2)) + wc.WriteControl(CloseMessage, FormatCloseMessage(expectedErr.Code, expectedErr.Text), time.Now().Add(10*time.Second)) + w.Close() + + op, r, err := rc.NextReader() + if op != BinaryMessage || err != nil { + t.Fatalf("NextReader() returned %d, %v", op, err) + } + _, err = io.Copy(ioutil.Discard, r) + if !reflect.DeepEqual(err, expectedErr) { + t.Fatalf("io.Copy() returned %v, want %v", err, expectedErr) + } + _, _, err = rc.NextReader() + if !reflect.DeepEqual(err, expectedErr) { + t.Fatalf("NextReader() returned %v, want %v", err, expectedErr) + } +} + +func TestEOFBeforeFinalFrame(t *testing.T) { + const bufSize = 512 + + var b1, b2 bytes.Buffer + wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, 1024, bufSize) + rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, 1024, 1024) + + w, _ := wc.NextWriter(BinaryMessage) + w.Write(make([]byte, bufSize+bufSize/2)) + + op, r, err := rc.NextReader() + if op != BinaryMessage || err != nil { + t.Fatalf("NextReader() returned %d, %v", op, err) + } + _, err = io.Copy(ioutil.Discard, r) + if err != errUnexpectedEOF { + t.Fatalf("io.Copy() returned %v, want %v", err, errUnexpectedEOF) + } + _, _, err = rc.NextReader() + if err != errUnexpectedEOF { + t.Fatalf("NextReader() returned %v, want %v", err, errUnexpectedEOF) + } +} + +func TestReadLimit(t *testing.T) { + + const readLimit = 512 + message := make([]byte, readLimit+1) + + var b1, b2 bytes.Buffer + wc := newConn(fakeNetConn{Reader: nil, Writer: &b1}, false, 1024, readLimit-2) + rc := newConn(fakeNetConn{Reader: &b1, Writer: &b2}, true, 1024, 1024) + rc.SetReadLimit(readLimit) + + // Send message at the limit with interleaved pong. + w, _ := wc.NextWriter(BinaryMessage) + w.Write(message[:readLimit-1]) + wc.WriteControl(PongMessage, []byte("this is a pong"), time.Now().Add(10*time.Second)) + w.Write(message[:1]) + w.Close() + + // Send message larger than the limit. + wc.WriteMessage(BinaryMessage, message[:readLimit+1]) + + op, _, err := rc.NextReader() + if op != BinaryMessage || err != nil { + t.Fatalf("1: NextReader() returned %d, %v", op, err) + } + op, r, err := rc.NextReader() + if op != BinaryMessage || err != nil { + t.Fatalf("2: NextReader() returned %d, %v", op, err) + } + _, err = io.Copy(ioutil.Discard, r) + if err != ErrReadLimit { + t.Fatalf("io.Copy() returned %v", err) + } +} + +func TestUnderlyingConn(t *testing.T) { + var b1, b2 bytes.Buffer + fc := fakeNetConn{Reader: &b1, Writer: &b2} + c := newConn(fc, true, 1024, 1024) + ul := c.UnderlyingConn() + if ul != fc { + t.Fatalf("Underlying conn is not what it should be.") + } +} === added file 'src/github.com/gorilla/websocket/doc.go' --- src/github.com/gorilla/websocket/doc.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/doc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,148 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application uses +// the Upgrade function from an Upgrader object with a HTTP request handler +// to get a pointer to a Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// return +// } +// if err = conn.WriteMessage(messageType, p); err != nil { +// return err +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// snippet shows how to echo messages using the NextWriter and NextReader +// methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received ping and pong messages by invoking a callback +// function set with SetPingHandler and SetPongHandler methods. These callback +// functions can be invoked from the ReadMessage method, the NextReader method +// or from a call to the data message reader returned from NextReader. +// +// Connections handle received close messages by returning an error from the +// ReadMessage method, the NextReader method or from a call to the data message +// reader returned from NextReader. +// +// Concurrency +// +// Connections do not support concurrent calls to the write methods +// (NextWriter, SetWriteDeadline, WriteMessage) or concurrent calls to the read +// methods methods (NextReader, SetReadDeadline, ReadMessage). Connections do +// support a concurrent reader and writer. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Read is Required +// +// The application must read the connection to process ping and close messages +// sent from the peer. If the application is not otherwise interested in +// messages from the peer, then the application should start a goroutine to read +// and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and not equal to the +// Host request header. +// +// An application can allow connections from any origin by specifying a +// function that always returns true: +// +// var upgrader = websocket.Upgrader{ +// CheckOrigin: func(r *http.Request) bool { return true }, +// } +// +// The deprecated Upgrade function does not enforce an origin policy. It's the +// application's responsibility to check the Origin header before calling +// Upgrade. +package websocket === added directory 'src/github.com/gorilla/websocket/examples' === added directory 'src/github.com/gorilla/websocket/examples/autobahn' === added file 'src/github.com/gorilla/websocket/examples/autobahn/README.md' --- src/github.com/gorilla/websocket/examples/autobahn/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/examples/autobahn/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,13 @@ +# Test Server + +This package contains a server for the [Autobahn WebSockets Test Suite](http://autobahn.ws/testsuite). + +To test the server, run + + go run server.go + +and start the client test driver + + wstest -m fuzzingclient -s fuzzingclient.json + +When the client completes, it writes a report to reports/clients/index.html. === added file 'src/github.com/gorilla/websocket/examples/autobahn/fuzzingclient.json' --- src/github.com/gorilla/websocket/examples/autobahn/fuzzingclient.json 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/examples/autobahn/fuzzingclient.json 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ + +{ + "options": {"failByDrop": false}, + "outdir": "./reports/clients", + "servers": [ + {"agent": "ReadAllWriteMessage", "url": "ws://localhost:9000/m", "options": {"version": 18}}, + {"agent": "ReadAllWrite", "url": "ws://localhost:9000/r", "options": {"version": 18}}, + {"agent": "CopyFull", "url": "ws://localhost:9000/f", "options": {"version": 18}}, + {"agent": "CopyWriterOnly", "url": "ws://localhost:9000/c", "options": {"version": 18}} + ], + "cases": ["*"], + "exclude-cases": [], + "exclude-agent-cases": {} +} === added file 'src/github.com/gorilla/websocket/examples/autobahn/server.go' --- src/github.com/gorilla/websocket/examples/autobahn/server.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/examples/autobahn/server.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,246 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Command server is a test server for the Autobahn WebSockets Test Suite. +package main + +import ( + "errors" + "flag" + "github.com/gorilla/websocket" + "io" + "log" + "net/http" + "time" + "unicode/utf8" +) + +var upgrader = websocket.Upgrader{ + ReadBufferSize: 4096, + WriteBufferSize: 4096, + CheckOrigin: func(r *http.Request) bool { + return true + }, +} + +// echoCopy echoes messages from the client using io.Copy. +func echoCopy(w http.ResponseWriter, r *http.Request, writerOnly bool) { + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + log.Println("Upgrade:", err) + return + } + defer conn.Close() + for { + mt, r, err := conn.NextReader() + if err != nil { + if err != io.EOF { + log.Println("NextReader:", err) + } + return + } + if mt == websocket.TextMessage { + r = &validator{r: r} + } + w, err := conn.NextWriter(mt) + if err != nil { + log.Println("NextWriter:", err) + return + } + if mt == websocket.TextMessage { + r = &validator{r: r} + } + if writerOnly { + _, err = io.Copy(struct{ io.Writer }{w}, r) + } else { + _, err = io.Copy(w, r) + } + if err != nil { + if err == errInvalidUTF8 { + conn.WriteControl(websocket.CloseMessage, + websocket.FormatCloseMessage(websocket.CloseInvalidFramePayloadData, ""), + time.Time{}) + } + log.Println("Copy:", err) + return + } + err = w.Close() + if err != nil { + log.Println("Close:", err) + return + } + } +} + +func echoCopyWriterOnly(w http.ResponseWriter, r *http.Request) { + echoCopy(w, r, true) +} + +func echoCopyFull(w http.ResponseWriter, r *http.Request) { + echoCopy(w, r, false) +} + +// echoReadAll echoes messages from the client by reading the entire message +// with ioutil.ReadAll. +func echoReadAll(w http.ResponseWriter, r *http.Request, writeMessage bool) { + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + log.Println("Upgrade:", err) + return + } + defer conn.Close() + for { + mt, b, err := conn.ReadMessage() + if err != nil { + if err != io.EOF { + log.Println("NextReader:", err) + } + return + } + if mt == websocket.TextMessage { + if !utf8.Valid(b) { + conn.WriteControl(websocket.CloseMessage, + websocket.FormatCloseMessage(websocket.CloseInvalidFramePayloadData, ""), + time.Time{}) + log.Println("ReadAll: invalid utf8") + } + } + if writeMessage { + err = conn.WriteMessage(mt, b) + if err != nil { + log.Println("WriteMessage:", err) + } + } else { + w, err := conn.NextWriter(mt) + if err != nil { + log.Println("NextWriter:", err) + return + } + if _, err := w.Write(b); err != nil { + log.Println("Writer:", err) + return + } + if err := w.Close(); err != nil { + log.Println("Close:", err) + return + } + } + } +} + +func echoReadAllWriter(w http.ResponseWriter, r *http.Request) { + echoReadAll(w, r, false) +} + +func echoReadAllWriteMessage(w http.ResponseWriter, r *http.Request) { + echoReadAll(w, r, true) +} + +func serveHome(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.Error(w, "Not found.", 404) + return + } + if r.Method != "GET" { + http.Error(w, "Method not allowed", 405) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + io.WriteString(w, "Echo Server") +} + +var addr = flag.String("addr", ":9000", "http service address") + +func main() { + flag.Parse() + http.HandleFunc("/", serveHome) + http.HandleFunc("/c", echoCopyWriterOnly) + http.HandleFunc("/f", echoCopyFull) + http.HandleFunc("/r", echoReadAllWriter) + http.HandleFunc("/m", echoReadAllWriteMessage) + err := http.ListenAndServe(*addr, nil) + if err != nil { + log.Fatal("ListenAndServe: ", err) + } +} + +type validator struct { + state int + x rune + r io.Reader +} + +var errInvalidUTF8 = errors.New("invalid utf8") + +func (r *validator) Read(p []byte) (int, error) { + n, err := r.r.Read(p) + state := r.state + x := r.x + for _, b := range p[:n] { + state, x = decode(state, x, b) + if state == utf8Reject { + break + } + } + r.state = state + r.x = x + if state == utf8Reject || (err == io.EOF && state != utf8Accept) { + return n, errInvalidUTF8 + } + return n, err +} + +// UTF-8 decoder from http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ +// +// Copyright (c) 2008-2009 Bjoern Hoehrmann +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to +// deal in the Software without restriction, including without limitation the +// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +// sell copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +// IN THE SOFTWARE. +var utf8d = [...]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 00..1f + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20..3f + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 40..5f + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 60..7f + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, // 80..9f + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, // a0..bf + 8, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // c0..df + 0xa, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x3, 0x4, 0x3, 0x3, // e0..ef + 0xb, 0x6, 0x6, 0x6, 0x5, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, 0x8, // f0..ff + 0x0, 0x1, 0x2, 0x3, 0x5, 0x8, 0x7, 0x1, 0x1, 0x1, 0x4, 0x6, 0x1, 0x1, 0x1, 0x1, // s0..s0 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, // s1..s2 + 1, 2, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, // s3..s4 + 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, // s5..s6 + 1, 3, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // s7..s8 +} + +const ( + utf8Accept = 0 + utf8Reject = 1 +) + +func decode(state int, x rune, b byte) (int, rune) { + t := utf8d[b] + if state != utf8Accept { + x = rune(b&0x3f) | (x << 6) + } else { + x = rune((0xff >> t) & b) + } + state = int(utf8d[256+state*16+int(t)]) + return state, x +} === added directory 'src/github.com/gorilla/websocket/examples/chat' === added file 'src/github.com/gorilla/websocket/examples/chat/README.md' --- src/github.com/gorilla/websocket/examples/chat/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/examples/chat/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,20 @@ +# Chat Example + +This application shows how to use use the +[websocket](https://github.com/gorilla/websocket) package and +[jQuery](http://jquery.com) to implement a simple web chat application. + +## Running the example + +The example requires a working Go development environment. The [Getting +Started](http://golang.org/doc/install) page describes how to install the +development environment. + +Once you have Go up and running, you can download, build and run the example +using the following commands. + + $ go get github.com/gorilla/websocket + $ cd `go list -f '{{.Dir}}' github.com/gorilla/websocket/examples/chat` + $ go run *.go + +To use the chat example, open http://localhost:8080/ in your browser. === added file 'src/github.com/gorilla/websocket/examples/chat/conn.go' --- src/github.com/gorilla/websocket/examples/chat/conn.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/examples/chat/conn.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,106 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "github.com/gorilla/websocket" + "log" + "net/http" + "time" +) + +const ( + // Time allowed to write a message to the peer. + writeWait = 10 * time.Second + + // Time allowed to read the next pong message from the peer. + pongWait = 60 * time.Second + + // Send pings to peer with this period. Must be less than pongWait. + pingPeriod = (pongWait * 9) / 10 + + // Maximum message size allowed from peer. + maxMessageSize = 512 +) + +var upgrader = websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, +} + +// connection is an middleman between the websocket connection and the hub. +type connection struct { + // The websocket connection. + ws *websocket.Conn + + // Buffered channel of outbound messages. + send chan []byte +} + +// readPump pumps messages from the websocket connection to the hub. +func (c *connection) readPump() { + defer func() { + h.unregister <- c + c.ws.Close() + }() + c.ws.SetReadLimit(maxMessageSize) + c.ws.SetReadDeadline(time.Now().Add(pongWait)) + c.ws.SetPongHandler(func(string) error { c.ws.SetReadDeadline(time.Now().Add(pongWait)); return nil }) + for { + _, message, err := c.ws.ReadMessage() + if err != nil { + break + } + h.broadcast <- message + } +} + +// write writes a message with the given message type and payload. +func (c *connection) write(mt int, payload []byte) error { + c.ws.SetWriteDeadline(time.Now().Add(writeWait)) + return c.ws.WriteMessage(mt, payload) +} + +// writePump pumps messages from the hub to the websocket connection. +func (c *connection) writePump() { + ticker := time.NewTicker(pingPeriod) + defer func() { + ticker.Stop() + c.ws.Close() + }() + for { + select { + case message, ok := <-c.send: + if !ok { + c.write(websocket.CloseMessage, []byte{}) + return + } + if err := c.write(websocket.TextMessage, message); err != nil { + return + } + case <-ticker.C: + if err := c.write(websocket.PingMessage, []byte{}); err != nil { + return + } + } + } +} + +// serverWs handles websocket requests from the peer. +func serveWs(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + http.Error(w, "Method not allowed", 405) + return + } + ws, err := upgrader.Upgrade(w, r, nil) + if err != nil { + log.Println(err) + return + } + c := &connection{send: make(chan []byte, 256), ws: ws} + h.register <- c + go c.writePump() + c.readPump() +} === added file 'src/github.com/gorilla/websocket/examples/chat/home.html' --- src/github.com/gorilla/websocket/examples/chat/home.html 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/examples/chat/home.html 2016-03-22 15:18:22 +0000 @@ -0,0 +1,92 @@ + + + +Chat Example + + + + + +
+
+ + +
+ + === added file 'src/github.com/gorilla/websocket/examples/chat/hub.go' --- src/github.com/gorilla/websocket/examples/chat/hub.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/examples/chat/hub.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,51 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// hub maintains the set of active connections and broadcasts messages to the +// connections. +type hub struct { + // Registered connections. + connections map[*connection]bool + + // Inbound messages from the connections. + broadcast chan []byte + + // Register requests from the connections. + register chan *connection + + // Unregister requests from connections. + unregister chan *connection +} + +var h = hub{ + broadcast: make(chan []byte), + register: make(chan *connection), + unregister: make(chan *connection), + connections: make(map[*connection]bool), +} + +func (h *hub) run() { + for { + select { + case c := <-h.register: + h.connections[c] = true + case c := <-h.unregister: + if _, ok := h.connections[c]; ok { + delete(h.connections, c) + close(c.send) + } + case m := <-h.broadcast: + for c := range h.connections { + select { + case c.send <- m: + default: + close(c.send) + delete(h.connections, c) + } + } + } + } +} === added file 'src/github.com/gorilla/websocket/examples/chat/main.go' --- src/github.com/gorilla/websocket/examples/chat/main.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/examples/chat/main.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,39 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "flag" + "log" + "net/http" + "text/template" +) + +var addr = flag.String("addr", ":8080", "http service address") +var homeTempl = template.Must(template.ParseFiles("home.html")) + +func serveHome(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.Error(w, "Not found", 404) + return + } + if r.Method != "GET" { + http.Error(w, "Method not allowed", 405) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + homeTempl.Execute(w, r.Host) +} + +func main() { + flag.Parse() + go h.run() + http.HandleFunc("/", serveHome) + http.HandleFunc("/ws", serveWs) + err := http.ListenAndServe(*addr, nil) + if err != nil { + log.Fatal("ListenAndServe: ", err) + } +} === added directory 'src/github.com/gorilla/websocket/examples/echo' === added file 'src/github.com/gorilla/websocket/examples/echo/README.md' --- src/github.com/gorilla/websocket/examples/echo/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/examples/echo/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,15 @@ +# Client and server example + +This example shows a simple client and server. + +The server echoes messages sent to it. The client sends a message every five +seconds and prints all messages received. + +To run the example, start the server: + + $ go run server.go + +Next, start the client: + + $ go run client.go + === added file 'src/github.com/gorilla/websocket/examples/echo/client.go' --- src/github.com/gorilla/websocket/examples/echo/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/examples/echo/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,57 @@ +// Copyright 2015 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "flag" + "log" + "net/url" + "time" + + "github.com/gorilla/websocket" +) + +var addr = flag.String("addr", "localhost:8081", "http service address") + +var dialer = websocket.Dialer{} // use default options + +func main() { + flag.Parse() + log.SetFlags(0) + + u := url.URL{Scheme: "ws", Host: *addr, Path: "/"} + log.Printf("connecting to %s", u.String()) + + c, _, err := dialer.Dial(u.String(), nil) + if err != nil { + log.Fatal("dial:", err) + } + defer c.Close() + + go func() { + defer c.Close() + for { + _, message, err := c.ReadMessage() + if err != nil { + log.Println("read:", err) + break + } + log.Printf("recv: %s", message) + } + }() + + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + + for t := range ticker.C { + err := c.WriteMessage(websocket.TextMessage, []byte(t.String())) + if err != nil { + log.Println("write:", err) + break + } + } +} === added file 'src/github.com/gorilla/websocket/examples/echo/server.go' --- src/github.com/gorilla/websocket/examples/echo/server.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/examples/echo/server.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,60 @@ +// Copyright 2015 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "flag" + "log" + "net/http" + + "github.com/gorilla/websocket" +) + +var addr = flag.String("addr", "localhost:8081", "http service address") + +var upgrader = websocket.Upgrader{} // use default options + +func echo(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.Error(w, "Not found", 404) + return + } + if r.Method != "GET" { + http.Error(w, "Method not allowed", 405) + return + } + c, err := upgrader.Upgrade(w, r, nil) + if err != nil { + log.Print("upgrade:", err) + return + } + defer c.Close() + for { + mt, message, err := c.ReadMessage() + if err != nil { + log.Println("read:", err) + break + } + log.Printf("recv: %s", message) + err = c.WriteMessage(mt, message) + if err != nil { + log.Println("write:", err) + break + } + } +} + +func main() { + flag.Parse() + log.SetFlags(0) + + http.HandleFunc("/", echo) + err := http.ListenAndServe(*addr, nil) + if err != nil { + log.Fatal("ListenAndServe: ", err) + } +} === added directory 'src/github.com/gorilla/websocket/examples/filewatch' === added file 'src/github.com/gorilla/websocket/examples/filewatch/README.md' --- src/github.com/gorilla/websocket/examples/filewatch/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/examples/filewatch/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +# File Watch example. + +This example sends a file to the browser client for display whenever the file is modified. + + $ go get github.com/gorilla/websocket + $ cd `go list -f '{{.Dir}}' github.com/gorilla/websocket/examples/filewatch` + $ go run main.go + # Open http://localhost:8080/ . + # Modify the file to see it update in the browser. === added file 'src/github.com/gorilla/websocket/examples/filewatch/main.go' --- src/github.com/gorilla/websocket/examples/filewatch/main.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/examples/filewatch/main.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,193 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "flag" + "io/ioutil" + "log" + "net/http" + "os" + "strconv" + "text/template" + "time" + + "github.com/gorilla/websocket" +) + +const ( + // Time allowed to write the file to the client. + writeWait = 10 * time.Second + + // Time allowed to read the next pong message from the client. + pongWait = 60 * time.Second + + // Send pings to client with this period. Must be less than pongWait. + pingPeriod = (pongWait * 9) / 10 + + // Poll file for changes with this period. + filePeriod = 10 * time.Second +) + +var ( + addr = flag.String("addr", ":8080", "http service address") + homeTempl = template.Must(template.New("").Parse(homeHTML)) + filename string + upgrader = websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, + } +) + +func readFileIfModified(lastMod time.Time) ([]byte, time.Time, error) { + fi, err := os.Stat(filename) + if err != nil { + return nil, lastMod, err + } + if !fi.ModTime().After(lastMod) { + return nil, lastMod, nil + } + p, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fi.ModTime(), err + } + return p, fi.ModTime(), nil +} + +func reader(ws *websocket.Conn) { + defer ws.Close() + ws.SetReadLimit(512) + ws.SetReadDeadline(time.Now().Add(pongWait)) + ws.SetPongHandler(func(string) error { ws.SetReadDeadline(time.Now().Add(pongWait)); return nil }) + for { + _, _, err := ws.ReadMessage() + if err != nil { + break + } + } +} + +func writer(ws *websocket.Conn, lastMod time.Time) { + lastError := "" + pingTicker := time.NewTicker(pingPeriod) + fileTicker := time.NewTicker(filePeriod) + defer func() { + pingTicker.Stop() + fileTicker.Stop() + ws.Close() + }() + for { + select { + case <-fileTicker.C: + var p []byte + var err error + + p, lastMod, err = readFileIfModified(lastMod) + + if err != nil { + if s := err.Error(); s != lastError { + lastError = s + p = []byte(lastError) + } + } else { + lastError = "" + } + + if p != nil { + ws.SetWriteDeadline(time.Now().Add(writeWait)) + if err := ws.WriteMessage(websocket.TextMessage, p); err != nil { + return + } + } + case <-pingTicker.C: + ws.SetWriteDeadline(time.Now().Add(writeWait)) + if err := ws.WriteMessage(websocket.PingMessage, []byte{}); err != nil { + return + } + } + } +} + +func serveWs(w http.ResponseWriter, r *http.Request) { + ws, err := upgrader.Upgrade(w, r, nil) + if err != nil { + if _, ok := err.(websocket.HandshakeError); !ok { + log.Println(err) + } + return + } + + var lastMod time.Time + if n, err := strconv.ParseInt(r.FormValue("lastMod"), 16, 64); err != nil { + lastMod = time.Unix(0, n) + } + + go writer(ws, lastMod) + reader(ws) +} + +func serveHome(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.Error(w, "Not found", 404) + return + } + if r.Method != "GET" { + http.Error(w, "Method not allowed", 405) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + p, lastMod, err := readFileIfModified(time.Time{}) + if err != nil { + p = []byte(err.Error()) + lastMod = time.Unix(0, 0) + } + var v = struct { + Host string + Data string + LastMod string + }{ + r.Host, + string(p), + strconv.FormatInt(lastMod.UnixNano(), 16), + } + homeTempl.Execute(w, &v) +} + +func main() { + flag.Parse() + if flag.NArg() != 1 { + log.Fatal("filename not specified") + } + filename = flag.Args()[0] + http.HandleFunc("/", serveHome) + http.HandleFunc("/ws", serveWs) + if err := http.ListenAndServe(*addr, nil); err != nil { + log.Fatal(err) + } +} + +const homeHTML = ` + + + WebSocket Example + + +
{{.Data}}
+ + + +` === added file 'src/github.com/gorilla/websocket/json.go' --- src/github.com/gorilla/websocket/json.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/json.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,55 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON is deprecated, use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v to the connection. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON is deprecated, use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} === added file 'src/github.com/gorilla/websocket/json_test.go' --- src/github.com/gorilla/websocket/json_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/json_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,119 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "encoding/json" + "io" + "reflect" + "testing" +) + +func TestJSON(t *testing.T) { + var buf bytes.Buffer + c := fakeNetConn{&buf, &buf} + wc := newConn(c, true, 1024, 1024) + rc := newConn(c, false, 1024, 1024) + + var actual, expect struct { + A int + B string + } + expect.A = 1 + expect.B = "hello" + + if err := wc.WriteJSON(&expect); err != nil { + t.Fatal("write", err) + } + + if err := rc.ReadJSON(&actual); err != nil { + t.Fatal("read", err) + } + + if !reflect.DeepEqual(&actual, &expect) { + t.Fatal("equal", actual, expect) + } +} + +func TestPartialJSONRead(t *testing.T) { + var buf bytes.Buffer + c := fakeNetConn{&buf, &buf} + wc := newConn(c, true, 1024, 1024) + rc := newConn(c, false, 1024, 1024) + + var v struct { + A int + B string + } + v.A = 1 + v.B = "hello" + + messageCount := 0 + + // Partial JSON values. + + data, err := json.Marshal(v) + if err != nil { + t.Fatal(err) + } + for i := len(data) - 1; i >= 0; i-- { + if err := wc.WriteMessage(TextMessage, data[:i]); err != nil { + t.Fatal(err) + } + messageCount++ + } + + // Whitespace. + + if err := wc.WriteMessage(TextMessage, []byte(" ")); err != nil { + t.Fatal(err) + } + messageCount++ + + // Close. + + if err := wc.WriteMessage(CloseMessage, FormatCloseMessage(CloseNormalClosure, "")); err != nil { + t.Fatal(err) + } + + for i := 0; i < messageCount; i++ { + err := rc.ReadJSON(&v) + if err != io.ErrUnexpectedEOF { + t.Error("read", i, err) + } + } + + err = rc.ReadJSON(&v) + if _, ok := err.(*CloseError); !ok { + t.Error("final", err) + } +} + +func TestDeprecatedJSON(t *testing.T) { + var buf bytes.Buffer + c := fakeNetConn{&buf, &buf} + wc := newConn(c, true, 1024, 1024) + rc := newConn(c, false, 1024, 1024) + + var actual, expect struct { + A int + B string + } + expect.A = 1 + expect.B = "hello" + + if err := WriteJSON(wc, &expect); err != nil { + t.Fatal("write", err) + } + + if err := ReadJSON(rc, &actual); err != nil { + t.Fatal("read", err) + } + + if !reflect.DeepEqual(&actual, &expect) { + t.Fatal("equal", actual, expect) + } +} === added file 'src/github.com/gorilla/websocket/server.go' --- src/github.com/gorilla/websocket/server.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/server.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,247 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then a default value of 4096 is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is set, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, the host in the Origin header must not be set or + // must match the host of the request. + CheckOrigin func(r *http.Request) bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return u.Host == r.Host +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// application negotiated subprotocol (Sec-Websocket-Protocol). +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + if values := r.Header["Sec-Websocket-Version"]; len(values) == 0 || values[0] != "13" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: version != 13") + } + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find connection header with token 'upgrade'") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find upgrade header with token 'websocket'") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: origin not allowed") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: key missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + var ( + netConn net.Conn + br *bufio.Reader + err error + ) + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var rw *bufio.ReadWriter + netConn, rw, err = h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + br = rw.Reader + + if br.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize) + c.subprotocol = subprotocol + + p := c.writeBuf[:0] + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-Websocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// This function is deprecated, use websocket.Upgrader instead. +// +// The application is responsible for checking the request origin before +// calling Upgrade. An example implementation of the same origin policy is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", 403) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} === added file 'src/github.com/gorilla/websocket/server_test.go' --- src/github.com/gorilla/websocket/server_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/server_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,33 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "net/http" + "reflect" + "testing" +) + +var subprotocolTests = []struct { + h string + protocols []string +}{ + {"", nil}, + {"foo", []string{"foo"}}, + {"foo,bar", []string{"foo", "bar"}}, + {"foo, bar", []string{"foo", "bar"}}, + {" foo, bar", []string{"foo", "bar"}}, + {" foo, bar ", []string{"foo", "bar"}}, +} + +func TestSubprotocols(t *testing.T) { + for _, st := range subprotocolTests { + r := http.Request{Header: http.Header{"Sec-Websocket-Protocol": {st.h}}} + protocols := Subprotocols(&r) + if !reflect.DeepEqual(st.protocols, protocols) { + t.Errorf("SubProtocols(%q) returned %#v, want %#v", st.h, protocols, st.protocols) + } + } +} === added file 'src/github.com/gorilla/websocket/util.go' --- src/github.com/gorilla/websocket/util.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/util.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,44 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" +) + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains token. +func tokenListContainsValue(header http.Header, name string, value string) bool { + for _, v := range header[name] { + for _, s := range strings.Split(v, ",") { + if strings.EqualFold(value, strings.TrimSpace(s)) { + return true + } + } + } + return false +} + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} === added file 'src/github.com/gorilla/websocket/util_test.go' --- src/github.com/gorilla/websocket/util_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gorilla/websocket/util_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,34 @@ +// Copyright 2014 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "net/http" + "testing" +) + +var tokenListContainsValueTests = []struct { + value string + ok bool +}{ + {"WebSocket", true}, + {"WEBSOCKET", true}, + {"websocket", true}, + {"websockets", false}, + {"x websocket", false}, + {"websocket x", false}, + {"other,websocket,more", true}, + {"other, websocket, more", true}, +} + +func TestTokenListContainsValue(t *testing.T) { + for _, tt := range tokenListContainsValueTests { + h := http.Header{"Upgrade": {tt.value}} + ok := tokenListContainsValue(h, "Upgrade", "websocket") + if ok != tt.ok { + t.Errorf("tokenListContainsValue(h, n, %q) = %v, want %v", tt.value, ok, tt.ok) + } + } +} === added directory 'src/github.com/gosuri' === added directory 'src/github.com/gosuri/uitable' === added file 'src/github.com/gosuri/uitable/.travis.yml' --- src/github.com/gosuri/uitable/.travis.yml 1970-01-01 00:00:00 +0000 +++ src/github.com/gosuri/uitable/.travis.yml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +language: go +sudo: false +install: + - go get ./... +go: + - 1.4 + - 1.5.2 + - tip === added file 'src/github.com/gosuri/uitable/LICENSE' --- src/github.com/gosuri/uitable/LICENSE 1970-01-01 00:00:00 +0000 +++ src/github.com/gosuri/uitable/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,10 @@ +MIT License +=========== + +Copyright (c) 2015, Greg Osuri + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. === added file 'src/github.com/gosuri/uitable/Makefile' --- src/github.com/gosuri/uitable/Makefile 1970-01-01 00:00:00 +0000 +++ src/github.com/gosuri/uitable/Makefile 2016-03-22 15:18:22 +0000 @@ -0,0 +1,4 @@ +test: + @go test ./... + +.PHONY: test === added file 'src/github.com/gosuri/uitable/README.md' --- src/github.com/gosuri/uitable/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/gosuri/uitable/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,67 @@ +# uitable [![GoDoc](https://godoc.org/github.com/gosuri/uitable?status.svg)](https://godoc.org/github.com/gosuri/uitable) [![Build Status](https://travis-ci.org/gosuri/uitable.svg?branch=master)](https://travis-ci.org/gosuri/uitable) + +uitable is a go library for representing data as tables for terminal applications. It provides primitives for sizing and wrapping columns to improve redability. + +## Example Usage + +Full source code for the example is available at [example/main.go](example/main.go) + +```go +table := uitable.New() +table.MaxColWidth = 50 + +table.AddRow("NAME", "BIRTHDAY", "BIO") +for _, hacker := range hackers { + table.AddRow(hacker.Name, hacker.Birthday, hacker.Bio) +} +fmt.Println(table) +``` + +Will render the data as: + +```sh +NAME BIRTHDAY BIO +Ada Lovelace December 10, 1815 Ada was a British mathematician and writer, chi... +Alan Turing June 23, 1912 Alan was a British pioneering computer scientis... +``` + +For wrapping in two columns: + +```go +table = uitable.New() +table.MaxColWidth = 80 +table.Wrap = true // wrap columns + +for _, hacker := range hackers { + table.AddRow("Name:", hacker.Name) + table.AddRow("Birthday:", hacker.Birthday) + table.AddRow("Bio:", hacker.Bio) + table.AddRow("") // blank +} +fmt.Println(table) +``` + +Will render the data as: + +``` +Name: Ada Lovelace +Birthday: December 10, 1815 +Bio: Ada was a British mathematician and writer, chiefly known for her work on + Charles Babbage's early mechanical general-purpose computer, the Analytical + Engine + +Name: Alan Turing +Birthday: June 23, 1912 +Bio: Alan was a British pioneering computer scientist, mathematician, logician, + cryptanalyst and theoretical biologist +``` + +## Installation + +``` +$ go get -v github.com/gosuri/uitable +``` + + +[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/gosuri/uitable/trend.png)](https://bitdeli.com/free "Bitdeli Badge") + === added directory 'src/github.com/gosuri/uitable/example' === added file 'src/github.com/gosuri/uitable/example/main.go' --- src/github.com/gosuri/uitable/example/main.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gosuri/uitable/example/main.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,40 @@ +package main + +import ( + "fmt" + + "github.com/gosuri/uitable" +) + +type hacker struct { + Name, Birthday, Bio string +} + +var hackers = []hacker{ + {"Ada Lovelace", "December 10, 1815", "Ada was a British mathematician and writer, chiefly known for her work on Charles Babbage's early mechanical general-purpose computer, the Analytical Engine"}, + {"Alan Turing", "June 23, 1912", "Alan was a British pioneering computer scientist, mathematician, logician, cryptanalyst and theoretical biologist"}, +} + +func main() { + table := uitable.New() + table.MaxColWidth = 50 + + fmt.Println("==> List") + table.AddRow("NAME", "BIRTHDAY", "BIO") + for _, hacker := range hackers { + table.AddRow(hacker.Name, hacker.Birthday, hacker.Bio) + } + fmt.Println(table) + + fmt.Print("\n==> Details\n") + table = uitable.New() + table.MaxColWidth = 80 + table.Wrap = true + for _, hacker := range hackers { + table.AddRow("Name:", hacker.Name) + table.AddRow("Birthday:", hacker.Birthday) + table.AddRow("Bio:", hacker.Bio) + table.AddRow("") // blank + } + fmt.Println(table) +} === added file 'src/github.com/gosuri/uitable/example_test.go' --- src/github.com/gosuri/uitable/example_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gosuri/uitable/example_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,40 @@ +package uitable_test + +import ( + "fmt" + + "github.com/gosuri/uitable" +) + +type hacker struct { + Name, Birthday, Bio string +} + +var hackers = []hacker{ + {"Ada Lovelace", "December 10, 1815", "Ada was a British mathematician and writer, chiefly known for her work on Charles Babbage's early mechanical general-purpose computer, the Analytical Engine"}, + {"Alan Turing", "June 23, 1912", "Alan was a British pioneering computer scientist, mathematician, logician, cryptanalyst and theoretical biologist"}, +} + +func Example() { + table := uitable.New() + table.MaxColWidth = 50 + + fmt.Println("==> List") + table.AddRow("NAME", "BIRTHDAY", "BIO") + for _, hacker := range hackers { + table.AddRow(hacker.Name, hacker.Birthday, hacker.Bio) + } + fmt.Println(table) + + fmt.Print("\n==> Details\n") + table = uitable.New() + table.MaxColWidth = 80 + table.Wrap = true + for _, hacker := range hackers { + table.AddRow("Name:", hacker.Name) + table.AddRow("Birthday:", hacker.Birthday) + table.AddRow("Bio:", hacker.Bio) + table.AddRow("") // blank + } + fmt.Println(table) +} === added file 'src/github.com/gosuri/uitable/table.go' --- src/github.com/gosuri/uitable/table.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gosuri/uitable/table.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,190 @@ +// Package uitable provides a decorator for formating data as a table +package uitable + +import ( + "fmt" + "strings" + "sync" + + "github.com/gosuri/uitable/util/strutil" + "github.com/gosuri/uitable/util/wordwrap" + "github.com/mattn/go-runewidth" +) + +// Separator is the default column seperator +var Separator = "\t" + +// Table represents a decorator that renders the data in formatted in a table +type Table struct { + // Rows is the collection of rows in the table + Rows []*Row + + // MaxColWidth is the maximum allowed width for cells in the table + MaxColWidth uint + + // Wrap when set to true wraps the contents of the columns when the length exceeds the MaxColWidth + Wrap bool + + // Separator is the seperator for columns in the table. Default is "\t" + Separator string + + mtx *sync.RWMutex +} + +// New returns a new Table with default values +func New() *Table { + return &Table{ + Separator: Separator, + mtx: new(sync.RWMutex), + } +} + +// AddRow adds a new row to the table +func (t *Table) AddRow(data ...interface{}) *Table { + t.mtx.Lock() + defer t.mtx.Unlock() + r := NewRow(data...) + t.Rows = append(t.Rows, r) + return t +} + +// Bytes returns the []byte value of table +func (t *Table) Bytes() []byte { + return []byte(t.String()) +} + +// String returns the string value of table +func (t *Table) String() string { + t.mtx.RLock() + defer t.mtx.RUnlock() + + if len(t.Rows) == 0 { + return "" + } + + // determine the width for each column (cell in a row) + var colwidths []uint + for _, row := range t.Rows { + for i, cell := range row.Cells { + // resize colwidth array + if i+1 > len(colwidths) { + colwidths = append(colwidths, 0) + } + cellwidth := cell.LineWidth() + if t.MaxColWidth != 0 && cellwidth > t.MaxColWidth { + cellwidth = t.MaxColWidth + } + + if cellwidth > colwidths[i] { + colwidths[i] = cellwidth + } + } + } + + var lines []string + for _, row := range t.Rows { + row.Separator = t.Separator + for i, cell := range row.Cells { + cell.Width = colwidths[i] + cell.Wrap = t.Wrap + } + lines = append(lines, row.String()) + } + return strutil.Join(lines, "\n") +} + +// Row represents a row in a table +type Row struct { + // Cells is the group of cell for the row + Cells []*Cell + + // Separator for tabular columns + Separator string +} + +// NewRow returns a new Row and adds the data to the row +func NewRow(data ...interface{}) *Row { + r := &Row{Cells: make([]*Cell, len(data))} + for i, d := range data { + r.Cells[i] = &Cell{Data: d} + } + return r +} + +// String returns the string representation of the row +func (r *Row) String() string { + // get the max number of lines for each cell + var lc int // line count + for _, cell := range r.Cells { + if clc := len(strings.Split(cell.String(), "\n")); clc > lc { + lc = clc + } + } + + // allocate a two-dimentional array of cells for each line and add size them + cells := make([][]*Cell, lc) + for x := 0; x < lc; x++ { + cells[x] = make([]*Cell, len(r.Cells)) + for y := 0; y < len(r.Cells); y++ { + cells[x][y] = &Cell{Width: r.Cells[y].Width} + } + } + + // insert each line in a cell as new cell in the cells array + for y, cell := range r.Cells { + lines := strings.Split(cell.String(), "\n") + for x, line := range lines { + cells[x][y].Data = line + } + } + + // format each line + lines := make([]string, lc) + for x := range lines { + line := make([]string, len(cells[x])) + for y := range cells[x] { + line[y] = cells[x][y].String() + } + lines[x] = strutil.Join(line, r.Separator) + } + return strutil.Join(lines, "\n") +} + +// Cell represents a column in a row +type Cell struct { + // Width is the width of the cell + Width uint + + // Wrap when true wraps the contents of the cell when the lenght exceeds the width + Wrap bool + + // Data is the cell data + Data interface{} +} + +// LineWidth returns the max width of all the lines in a cell +func (c *Cell) LineWidth() uint { + width := 0 + for _, s := range strings.Split(c.String(), "\n") { + w := runewidth.StringWidth(s) + if w > width { + width = w + } + } + return uint(width) +} + +// String returns the string formated representation of the cell +func (c *Cell) String() string { + if c.Data == nil { + return strutil.PadLeft(" ", int(c.Width), ' ') + } + s := fmt.Sprintf("%v", c.Data) + switch { + case c.Width > 0 && c.Wrap: + return wordwrap.WrapString(s, c.Width) + case c.Width > 0: + return strutil.Resize(s, c.Width) + } + return s +} === added file 'src/github.com/gosuri/uitable/table_test.go' --- src/github.com/gosuri/uitable/table_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gosuri/uitable/table_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,62 @@ +package uitable + +import ( + "sync" + "testing" +) + +func TestCell(t *testing.T) { + c := &Cell{ + Data: "foo bar", + Width: 5, + } + + got := c.String() + if got != "fo..." { + t.Fatal("need", "fo...", "got", got) + } + if c.LineWidth() != 5 { + t.Fatal("need", 5, "got", c.LineWidth()) + } + + c.Wrap = true + got = c.String() + if got != "foo\nbar" { + t.Fatal("need", "foo\nbar", "got", got) + } + if c.LineWidth() != 3 { + t.Fatal("need", 3, "got", c.LineWidth()) + } +} + +func TestRow(t *testing.T) { + row := &Row{ + Separator: "\t", + Cells: []*Cell{ + {Data: "foo", Width: 3, Wrap: true}, + {Data: "bar baz", Width: 3, Wrap: true}, + }, + } + got := row.String() + need := "foo\tbar\n \tbaz" + + if got != need { + t.Fatalf("need: %q | got: %q ", need, got) + } +} + +func TestAddRow(t *testing.T) { + var wg sync.WaitGroup + table := New() + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + table.AddRow("foo") + }() + } + wg.Wait() + if len(table.Rows) != 100 { + t.Fatal("want", 100, "got", len(table.Rows)) + } +} === added directory 'src/github.com/gosuri/uitable/util' === added directory 'src/github.com/gosuri/uitable/util/strutil' === added file 'src/github.com/gosuri/uitable/util/strutil/strutil.go' --- src/github.com/gosuri/uitable/util/strutil/strutil.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gosuri/uitable/util/strutil/strutil.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,72 @@ +// Package strutil provides various utilities for manipulating strings +package strutil + +import ( + "bytes" + "github.com/mattn/go-runewidth" +) + +// PadRight returns a new string of a specified length in which the end of the current string is padded with spaces or with a specified Unicode character. +func PadRight(str string, length int, pad byte) string { + slen := runewidth.StringWidth(str) + if slen >= length { + return str + } + buf := bytes.NewBufferString(str) + for i := 0; i < length-slen; i++ { + buf.WriteByte(pad) + } + return buf.String() +} + +// PadLeft returns a new string of a specified length in which the beginning of the current string is padded with spaces or with a specified Unicode character. +func PadLeft(str string, length int, pad byte) string { + slen := runewidth.StringWidth(str) + if slen >= length { + return str + } + var buf bytes.Buffer + for i := 0; i < length-slen; i++ { + buf.WriteByte(pad) + } + buf.WriteString(str) + return buf.String() +} + +// Resize resizes the string with the given length. It ellipses with '...' when the string's length exceeds +// the desired length or pads spaces to the right of the string when length is smaller than desired +func Resize(s string, length uint) string { + slen := runewidth.StringWidth(s) + n := int(length) + if slen == n { + return s + } + // Pads only when length of the string smaller than len needed + s = PadRight(s, n, ' ') + if slen > n { + rs := []rune(s) + var buf bytes.Buffer + w := 0 + for _, r := range rs { + buf.WriteRune(r) + rw := runewidth.RuneWidth(r) + if w+rw >= n-3 { + break + } + w += rw + } + buf.WriteString("...") + s = buf.String() + } + return s +} + +// Join joins the list of the string with the delim provided +func Join(list []string, delim string) string { + var buf bytes.Buffer + for i := 0; i < len(list)-1; i++ { + buf.WriteString(list[i] + delim) + } + buf.WriteString(list[len(list)-1]) + return buf.String() +} === added file 'src/github.com/gosuri/uitable/util/strutil/strutil_test.go' --- src/github.com/gosuri/uitable/util/strutil/strutil_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gosuri/uitable/util/strutil/strutil_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,40 @@ +package strutil + +import ( + "testing" +) + +func TestResize(t *testing.T) { + s := "foo" + got := Resize(s, 5) + if len(got) != 5 { + t.Fatal("want", 5, "got", len(got)) + } + s = "foobar" + got = Resize(s, 5) + + if got != "fo..." { + t.Fatal("want", "fo...", "got", got) + } +} + +func TestJoin(t *testing.T) { + got := Join([]string{"foo", "bar"}, ",") + if got != "foo,bar" { + t.Fatal("want", "foo,bar", "got", got) + } +} + +func TestPadRight(t *testing.T) { + got := PadRight("foo", 5, '-') + if got != "foo--" { + t.Fatal("want", "foo--", "got", got) + } +} + +func TestPadLeft(t *testing.T) { + got := PadLeft("foo", 5, '-') + if got != "--foo" { + t.Fatal("want", "--foo", "got", got) + } +} === added directory 'src/github.com/gosuri/uitable/util/wordwrap' === added file 'src/github.com/gosuri/uitable/util/wordwrap/LICENSE.md' --- src/github.com/gosuri/uitable/util/wordwrap/LICENSE.md 1970-01-01 00:00:00 +0000 +++ src/github.com/gosuri/uitable/util/wordwrap/LICENSE.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. === added file 'src/github.com/gosuri/uitable/util/wordwrap/README.md' --- src/github.com/gosuri/uitable/util/wordwrap/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/gosuri/uitable/util/wordwrap/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,39 @@ +# go-wordwrap + +`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that +automatically wraps words into multiple lines. The primary use case for this +is in formatting CLI output, but of course word wrapping is a generally useful +thing to do. + +## Installation and Usage + +Install using `go get github.com/mitchellh/go-wordwrap`. + +Full documentation is available at +http://godoc.org/github.com/mitchellh/go-wordwrap + +Below is an example of its usage ignoring errors: + +```go +wrapped := wordwrap.WrapString("foo bar baz", 3) +fmt.Println(wrapped) +``` + +Would output: + +``` +foo +bar +baz +``` + +## Word Wrap Algorithm + +This library doesn't use any clever algorithm for word wrapping. The wrapping +is actually very naive: whenever there is whitespace or an explicit linebreak. +The goal of this library is for word wrapping CLI output, so the input is +typically pretty well controlled human language. Because of this, the naive +approach typically works just fine. + +In the future, we'd like to make the algorithm more advanced. We would do +so without breaking the API. === added file 'src/github.com/gosuri/uitable/util/wordwrap/wordwrap.go' --- src/github.com/gosuri/uitable/util/wordwrap/wordwrap.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gosuri/uitable/util/wordwrap/wordwrap.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,84 @@ +// Package wordwrap provides methods for wrapping the contents of a string +package wordwrap + +import ( + "bytes" + "github.com/mattn/go-runewidth" + "unicode" +) + +// WrapString wraps the given string within lim width in characters. +// +// Wrapping is currently naive and only happens at white-space. A future +// version of the library will implement smarter wrapping. This means that +// pathological cases can dramatically reach past the limit, such as a very +// long word. +func WrapString(s string, lim uint) string { + // Initialize a buffer with a slightly larger size to account for breaks + init := make([]byte, 0, len(s)) + buf := bytes.NewBuffer(init) + + var current uint + var wordBuf, spaceBuf bytes.Buffer + var wordWidth, spaceWidth int + + for _, char := range s { + if char == '\n' { + if wordBuf.Len() == 0 { + if current+uint(spaceWidth) > lim { + current = 0 + } else { + current += uint(spaceWidth) + spaceBuf.WriteTo(buf) + spaceWidth += runewidth.StringWidth(buf.String()) + } + spaceBuf.Reset() + spaceWidth = 0 + } else { + current += uint(spaceWidth + wordWidth) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + spaceWidth = 0 + wordWidth = 0 + } + buf.WriteRune(char) + current = 0 + } else if unicode.IsSpace(char) { + if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { + current += uint(spaceWidth + wordWidth) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + spaceWidth = 0 + wordWidth = 0 + } + + spaceBuf.WriteRune(char) + spaceWidth += runewidth.RuneWidth(char) + } else { + wordBuf.WriteRune(char) + wordWidth += runewidth.RuneWidth(char) + + if current+uint(spaceWidth+wordWidth) > lim && uint(wordWidth) < lim { + buf.WriteRune('\n') + current = 0 + spaceBuf.Reset() + spaceWidth = 0 + } + } + } + + if wordBuf.Len() == 0 { + if current+uint(spaceWidth) <= lim { + spaceBuf.WriteTo(buf) + } + } else { + spaceBuf.WriteTo(buf) + wordBuf.WriteTo(buf) + } + + return buf.String() +} === added file 'src/github.com/gosuri/uitable/util/wordwrap/wordwrap_test.go' --- src/github.com/gosuri/uitable/util/wordwrap/wordwrap_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/gosuri/uitable/util/wordwrap/wordwrap_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,85 @@ +package wordwrap + +import ( + "testing" +) + +func TestWrapString(t *testing.T) { + cases := []struct { + Input, Output string + Lim uint + }{ + // A simple word passes through. + { + "foo", + "foo", + 4, + }, + // A single word that is too long passes through. + // We do not break words. + { + "foobarbaz", + "foobarbaz", + 4, + }, + // Lines are broken at whitespace. + { + "foo bar baz", + "foo\nbar\nbaz", + 4, + }, + // Lines are broken at whitespace, even if words + // are too long. We do not break words. + { + "foo bars bazzes", + "foo\nbars\nbazzes", + 4, + }, + // A word that would run beyond the width is wrapped. + { + "fo sop", + "fo\nsop", + 4, + }, + // Whitespace that trails a line and fits the width + // passes through, as does whitespace prefixing an + // explicit line break. A tab counts as one character. + { + "foo\nb\t r\n baz", + "foo\nb\t r\n baz", + 4, + }, + // Trailing whitespace is removed if it doesn't fit the width. + // Runs of whitespace on which a line is broken are removed. + { + "foo \nb ar ", + "foo\nb\nar", + 4, + }, + // An explicit line break at the end of the input is preserved. + { + "foo bar baz\n", + "foo\nbar\nbaz\n", + 4, + }, + // Explicit break are always preserved. + { + "\nfoo bar\n\n\nbaz\n", + "\nfoo\nbar\n\n\nbaz\n", + 4, + }, + // Complete example: + { + " This is a list: \n\n\t* foo\n\t* bar\n\n\n\t* baz \nBAM ", + " This\nis a\nlist: \n\n\t* foo\n\t* bar\n\n\n\t* baz\nBAM", + 6, + }, + } + + for i, tc := range cases { + actual := WrapString(tc.Input, tc.Lim) + if actual != tc.Output { + t.Fatalf("Case %d Input:\n\n`%s`\n\nActual Output:\n\n`%s`", i, tc.Input, actual) + } + } +} === modified file 'src/github.com/juju/blobstore/gridfs.go' --- src/github.com/juju/blobstore/gridfs.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/blobstore/gridfs.go 2016-03-22 15:18:22 +0000 @@ -32,60 +32,25 @@ } func (g *gridFSStorage) db() *mgo.Database { - s := g.session.Copy() - return s.DB(g.dbName) -} - -func (g *gridFSStorage) gridFS() *gridFS { - db := g.db() - return &gridFS{ - GridFS: db.GridFS(g.namespace), - session: db.Session, - } -} - -// gridFS wraps a GridFS so that the session can be closed when finished -// with. -type gridFS struct { - *mgo.GridFS - session *mgo.Session -} - -func (g *gridFS) Close() { - g.session.Close() -} - -// gridfsFile wraps a GridFile so that the session can be closed when finished -// with. -type gridfsFile struct { - *mgo.GridFile - gfs *gridFS -} - -func (f *gridfsFile) Close() error { - defer f.gfs.Close() - return f.GridFile.Close() + return g.session.DB(g.dbName) +} + +func (g *gridFSStorage) gridFS() *mgo.GridFS { + return g.db().GridFS(g.namespace) } // Get is defined on ResourceStorage. func (g *gridFSStorage) Get(path string) (io.ReadCloser, error) { - gfs := g.gridFS() - file, err := gfs.Open(path) + file, err := g.gridFS().Open(path) if err != nil { - gfs.Close() return nil, errors.Annotatef(err, "failed to open GridFS file %q", path) } - return &gridfsFile{ - GridFile: file, - gfs: gfs, - }, nil + return file, nil } // Put is defined on ResourceStorage. func (g *gridFSStorage) Put(path string, r io.Reader, length int64) (checksum string, err error) { - gfs := g.gridFS() - defer gfs.Close() - file, err := gfs.Create(path) + file, err := g.gridFS().Create(path) if err != nil { return "", errors.Annotatef(err, "failed to create GridFS file %q", path) } @@ -108,7 +73,5 @@ // Remove is defined on ResourceStorage. func (g *gridFSStorage) Remove(path string) error { - gfs := g.gridFS() - defer gfs.Close() - return gfs.Remove(path) + return g.gridFS().Remove(path) } === modified file 'src/github.com/juju/blobstore/interface.go' --- src/github.com/juju/blobstore/interface.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/blobstore/interface.go 2016-03-22 15:18:22 +0000 @@ -61,10 +61,18 @@ GetForEnvironment(envUUID, path string) (r io.ReadCloser, length int64, err error) // PutForEnvironment stores data from reader at path, namespaced to the environment. + // + // PutForEnvironment is equivalent to PutForEnvironmentAndCheckHash with an empty + // hash string. PutForEnvironment(envUUID, path string, r io.Reader, length int64) error // PutForEnvironmentAndCheckHash is the same as PutForEnvironment - // except that it also checks that the content matches the provided hash. + // except that it also checks that the content matches the provided + // hash. The hash must be hex-encoded SHA-384. + // + // If checkHash is empty, then the hash check is elided. + // + // If length is < 0, then the reader will be consumed until EOF. PutForEnvironmentAndCheckHash(envUUID, path string, r io.Reader, length int64, checkHash string) error // RemoveForEnvironment deletes data at path, namespaced to the environment. === modified file 'src/github.com/juju/blobstore/managedstorage.go' --- src/github.com/juju/blobstore/managedstorage.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/blobstore/managedstorage.go 2016-03-22 15:18:22 +0000 @@ -116,18 +116,18 @@ return storagePath, nil } -// preprocessUpload pulls in all the data from the reader, storing it in a temp file and +// preprocessUpload pulls in data from the reader, storing it in a temp file and // calculating the sha384 checksum. // The caller is expected to remove the temporary file if and only if we return a nil error. func (ms *managedStorage) preprocessUpload(r io.Reader, length int64) ( - f *os.File, hash string, err error, + f *os.File, n int64, hash string, err error, ) { sha384hash := sha512.New384() // Set up a chain of readers to pull in the data and calculate the checksum. rdr := io.TeeReader(r, sha384hash) f, err = ioutil.TempFile(os.TempDir(), "juju-resource") if err != nil { - return nil, "", err + return nil, -1, "", err } tempFilename := f.Name() // Add a cleanup function to remove the data file if we exit with an error. @@ -137,17 +137,20 @@ os.Remove(tempFilename) } }() + if length >= 0 { + rdr = &io.LimitedReader{rdr, length} + } // Write the data to a temp file. - _, err = io.CopyN(f, rdr, length) + length, err = io.Copy(f, rdr) if err != nil { - return nil, "", err + return nil, -1, "", err } // Reset the file so when we return it, it can be read from to get the data. _, err = f.Seek(0, 0) if err != nil { - return nil, "", err + return nil, -1, "", err } - return f, fmt.Sprintf("%x", sha384hash.Sum(nil)), nil + return f, length, fmt.Sprintf("%x", sha384hash.Sum(nil)), nil } // GetForEnvironment is defined on the ManagedStorage interface. @@ -157,9 +160,7 @@ return nil, 0, err } var doc managedResourceDoc - coll := ms.managedResourceCollection.With(ms.managedResourceCollection.Database.Session.Copy()) - defer coll.Database.Session.Close() - if err := coll.Find(bson.D{{"path", managedPath}}).One(&doc); err != nil { + if err := ms.managedResourceCollection.Find(bson.D{{"path", managedPath}}).One(&doc); err != nil { if err == mgo.ErrNotFound { return nil, 0, errors.NotFoundf("resource at path %q", managedPath) } @@ -219,7 +220,7 @@ // putForEnvironment is the internal implementation for both the above // methods. It checks the hash if checkHash is non-nil. func (ms *managedStorage) putForEnvironment(envUUID, path string, r io.Reader, length int64, checkHash string) (putError error) { - dataFile, hash, err := ms.preprocessUpload(r, length) + dataFile, length, hash, err := ms.preprocessUpload(r, length) if err != nil { return errors.Annotate(err, "cannot calculate data checksums") } @@ -323,9 +324,7 @@ return addManagedResourceOps, err } - db := ms.db.With(ms.db.Session.Copy()) - defer db.Session.Close() - txnRunner := txnRunner(db) + txnRunner := txnRunner(ms.db) if err = txnRunner.Run(buildTxn); err != nil { return "", errors.Annotate(err, "cannot update managed resource catalog") } @@ -352,9 +351,7 @@ resourceId, removeManagedResourceOps, err = ms.removeResourceTxn(managedPath) return removeManagedResourceOps, err } - db := ms.db.With(ms.db.Session.Copy()) - defer db.Session.Close() - txnRunner := txnRunner(db) + txnRunner := txnRunner(ms.db) if err := txnRunner.Run(buildTxn); err != nil { if err == mgo.ErrNotFound { return errors.NotFoundf("resource at path %q", managedPath) @@ -408,9 +405,7 @@ func (ms *managedStorage) removeResourceTxn(managedPath string) (string, []txn.Op, error) { var existingDoc managedResourceDoc - coll := ms.managedResourceCollection.With(ms.managedResourceCollection.Database.Session.Copy()) - defer coll.Database.Session.Close() - if err := coll.FindId(managedPath).One(&existingDoc); err != nil { + if err := ms.managedResourceCollection.FindId(managedPath).One(&existingDoc); err != nil { return "", nil, err } return existingDoc.ResourceId, []txn.Op{{ === modified file 'src/github.com/juju/blobstore/managedstorage_test.go' --- src/github.com/juju/blobstore/managedstorage_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/blobstore/managedstorage_test.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,7 @@ "fmt" "io/ioutil" "math/rand" + "strings" "sync" "time" @@ -268,6 +269,35 @@ c.Assert(err, gc.IsNil) } +func (s *managedStorageSuite) TestPutForEnvironmentAndCheckHashEmptyHash(c *gc.C) { + // Passing "" as the hash to PutForEnvironmentAndCheckHash will elide + // the hash check. + rdr := strings.NewReader("data") + err := s.managedStorage.PutForEnvironmentAndCheckHash("env", "/some/path", rdr, int64(rdr.Len()), "") + c.Assert(err, jc.ErrorIsNil) +} + +func (s *managedStorageSuite) TestPutForEnvironmentUnknownLen(c *gc.C) { + // Passing -1 for the size of the data directs PutForEnvironment + // to read in the whole amount. + blob := []byte("data") + rdr := bytes.NewReader(blob) + err := s.managedStorage.PutForEnvironment("env", "/some/path", rdr, -1) + c.Assert(err, jc.ErrorIsNil) + s.assertGet(c, "/some/path", blob) +} + +func (s *managedStorageSuite) TestPutForEnvironmentOverLong(c *gc.C) { + // Passing a size to PutForEnvironment that exceeds the actual + // size of the data will result in metadata recording the actual + // size. + blob := []byte("data") + rdr := bytes.NewReader(blob) + err := s.managedStorage.PutForEnvironment("env", "/some/path", rdr, int64(len(blob)+1)) + c.Assert(err, jc.ErrorIsNil) + s.assertGet(c, "/some/path", blob) +} + func (s *managedStorageSuite) assertGet(c *gc.C, path string, blob []byte) { r, length, err := s.managedStorage.GetForEnvironment("env", path) c.Assert(err, gc.IsNil) @@ -517,7 +547,6 @@ c.Check(err, gc.IsNil) if err == nil { r, length, err := s.managedStorage.GetForEnvironment("env", fmt.Sprintf("path/to/blob%d", index)) - defer r.Close() c.Check(err, gc.IsNil) if err == nil { data, err := ioutil.ReadAll(r) === modified file 'src/github.com/juju/blobstore/resourcecatalog.go' --- src/github.com/juju/blobstore/resourcecatalog.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/blobstore/resourcecatalog.go 2016-03-22 15:18:22 +0000 @@ -85,9 +85,7 @@ // Get is defined on the ResourceCatalog interface. func (rc *resourceCatalog) Get(id string) (*Resource, error) { var doc resourceDoc - coll := rc.collection.With(rc.collection.Database.Session.Copy()) - defer coll.Database.Session.Close() - if err := coll.FindId(id).One(&doc); err == mgo.ErrNotFound { + if err := rc.collection.FindId(id).One(&doc); err == mgo.ErrNotFound { return nil, errors.NotFoundf("resource with id %q", id) } else if err != nil { return nil, err @@ -101,9 +99,7 @@ // Find is defined on the ResourceCatalog interface. func (rc *resourceCatalog) Find(hash string) (string, error) { var doc resourceDoc - coll := rc.collection.With(rc.collection.Database.Session.Copy()) - defer coll.Database.Session.Close() - if err := coll.Find(checksumMatch(hash)).One(&doc); err == mgo.ErrNotFound { + if err := rc.collection.Find(checksumMatch(hash)).One(&doc); err == mgo.ErrNotFound { return "", errors.NotFoundf("resource with sha384=%q", hash) } else if err != nil { return "", err @@ -120,9 +116,7 @@ id, path, ops, err = rc.resourceIncRefOps(hash, length) return ops, err } - db := rc.collection.Database.With(rc.collection.Database.Session.Copy()) - defer db.Session.Close() - txnRunner := txnRunner(db) + txnRunner := txnRunner(rc.collection.Database) if err = txnRunner.Run(buildTxn); err != nil { return "", "", err } @@ -137,9 +131,7 @@ } return ops, err } - db := rc.collection.Database.With(rc.collection.Database.Session.Copy()) - defer db.Session.Close() - txnRunner := txnRunner(db) + txnRunner := txnRunner(rc.collection.Database) return txnRunner.Run(buildTxn) } @@ -151,9 +143,7 @@ } return ops, err } - db := rc.collection.Database.With(rc.collection.Database.Session.Copy()) - defer db.Session.Close() - txnRunner := txnRunner(db) + txnRunner := txnRunner(rc.collection.Database) return wasDeleted, path, txnRunner.Run(buildTxn) } @@ -167,9 +157,7 @@ var doc resourceDoc exists := false checksumMatchTerm := checksumMatch(hash) - coll := rc.collection.With(rc.collection.Database.Session.Copy()) - defer coll.Database.Session.Close() - err = coll.Find(checksumMatchTerm).One(&doc) + err = rc.collection.Find(checksumMatchTerm).One(&doc) if err != nil && err != mgo.ErrNotFound { return "", "", nil, err } else if err == nil { @@ -197,9 +185,7 @@ func (rc *resourceCatalog) uploadCompleteOps(id, path string) ([]txn.Op, error) { var doc resourceDoc - coll := rc.collection.With(rc.collection.Database.Session.Copy()) - defer coll.Database.Session.Close() - if err := coll.FindId(id).One(&doc); err != nil { + if err := rc.collection.FindId(id).One(&doc); err != nil { return nil, err } if doc.Path != "" { @@ -215,9 +201,7 @@ func (rc *resourceCatalog) resourceDecRefOps(id string) (wasDeleted bool, path string, ops []txn.Op, err error) { var doc resourceDoc - coll := rc.collection.With(rc.collection.Database.Session.Copy()) - defer coll.Database.Session.Close() - if err = coll.FindId(id).One(&doc); err != nil { + if err = rc.collection.FindId(id).One(&doc); err != nil { return false, "", nil, err } if doc.RefCount == 1 { === added directory 'src/github.com/juju/bundlechanges' === added file 'src/github.com/juju/bundlechanges/.gitignore' --- src/github.com/juju/bundlechanges/.gitignore 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/bundlechanges/.gitignore 2016-03-22 15:18:22 +0000 @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof === added file 'src/github.com/juju/bundlechanges/LICENSE' --- src/github.com/juju/bundlechanges/LICENSE 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/bundlechanges/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,166 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. + === added file 'src/github.com/juju/bundlechanges/Makefile' --- src/github.com/juju/bundlechanges/Makefile 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/bundlechanges/Makefile 2016-03-22 15:18:22 +0000 @@ -0,0 +1,100 @@ +# Makefile for the bundlechanges library. + +PROJECT := github.com/juju/bundlechanges +PROJECT_DIR := $(shell go list -e -f '{{.Dir}}' $(PROJECT)) + +ifeq ($(shell uname -p | sed -r 's/.*(x86|armel|armhf).*/golang/'), golang) + GO_C := golang + INSTALL_FLAGS := +else + GO_C := gccgo-4.9 gccgo-go + INSTALL_FLAGS := -gccgoflags=-static-libgo +endif + +define DEPENDENCIES + build-essential + bzr + git + $(GO_C) +endef + +default: build + +$(GOPATH)/bin/godeps: + go get -v launchpad.net/godeps + +# Start of GOPATH-dependent targets. Some targets only make sense - +# and will only work - when this tree is found on the GOPATH. +ifeq ($(CURDIR),$(PROJECT_DIR)) + +build: + go build $(PROJECT)/... + +check: + go test $(PROJECT)/... + +install: + go install $(INSTALL_FLAGS) -v $(PROJECT)/... + +clean: + go clean $(PROJECT)/... + +else + +build: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +check: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +install: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +clean: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +endif +# End of GOPATH-dependent targets. + +# Reformat source files. +format: + gofmt -w -l . + +# Update the project Go dependencies to the required revision. +deps: $(GOPATH)/bin/godeps + $(GOPATH)/bin/godeps -u dependencies.tsv + +# Generate the dependencies file. +create-deps: $(GOPATH)/bin/godeps + godeps -t $(shell go list $(PROJECT)/...) > dependencies.tsv || true + +# Install packages required to develop the bundlechanges lib and run tests. +APT_BASED := $(shell command -v apt-get >/dev/null; echo $$?) +sysdeps: +ifeq ($(APT_BASED),0) +ifeq ($(shell lsb_release -cs|sed -r 's/precise|quantal|raring/old/'),old) + @echo Adding PPAs for golang and mongodb + @sudo apt-add-repository --yes ppa:juju/golang + @sudo apt-add-repository --yes ppa:juju/stable +endif + @echo Installing dependencies + sudo apt-get update + @sudo apt-get --yes install $(strip $(DEPENDENCIES)) \ + $(shell apt-cache madison juju-mongodb mongodb-server | head -1 | cut -d '|' -f1) +else + @echo sysdeps runs only on systems with apt-get + @echo on OS X with homebrew try: brew install bazaar mongodb +endif + +help: + @echo -e 'Juju Bundle Changes - list of make targets:\n' + @echo 'make - Build the package.' + @echo 'make check - Run tests.' + @echo 'make install - Install the package.' + @echo 'make clean - Remove object files from package source directories.' + @echo 'make sysdeps - Install the development environment system packages.' + @echo 'make deps - Set up the project Go dependencies.' + @echo 'make create-deps - Generate the Go dependencies file.' + @echo 'make format - Format the source files.' + +.PHONY: build check clean format help install sysdeps === added file 'src/github.com/juju/bundlechanges/README.md' --- src/github.com/juju/bundlechanges/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/bundlechanges/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,3 @@ +# Juju Bundle Changes + +A Go library to generate the list of changes required to deploy a bundle. === added file 'src/github.com/juju/bundlechanges/changes.go' --- src/github.com/juju/bundlechanges/changes.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/bundlechanges/changes.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,386 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package bundlechanges + +import ( + "fmt" + + "gopkg.in/juju/charm.v6-unstable" +) + +// FromData generates and returns the list of changes required to deploy the +// given bundle data. The changes are sorted by requirements, so that they can +// be applied in order. The bundle data is assumed to be already verified. +func FromData(data *charm.BundleData) []Change { + cs := &changeset{} + addedServices := handleServices(cs.add, data.Services) + addedMachines := handleMachines(cs.add, data.Machines) + handleRelations(cs.add, data.Relations, addedServices) + handleUnits(cs.add, data.Services, addedServices, addedMachines) + return cs.sorted() +} + +// Change holds a single change required to deploy a bundle. +type Change interface { + // Id returns the unique identifier for this change. + Id() string + // Requires returns the ids of all the changes that must + // be applied before this one. + Requires() []string + // Method returns the action to be performed to apply this change. + Method() string + // GUIArgs returns positional arguments to pass to the method, suitable for + // being JSON-serialized and sent to the Juju GUI. + GUIArgs() []interface{} + // setId is used to set the identifier for the change. + setId(string) +} + +// changeInfo holds information on a change, suitable for embedding into a more +// specific change type. +type changeInfo struct { + id string + requires []string + method string +} + +// Id implements Change.Id. +func (ch *changeInfo) Id() string { + return ch.id +} + +// Requires implements Change.Requires. +func (ch *changeInfo) Requires() []string { + // Avoid returning a nil interface because so that avoid returning a slice + // that will serialize to JSON null. + if ch.requires == nil { + return []string{} + } + return ch.requires +} + +// Method implements Change.Method. +func (ch *changeInfo) Method() string { + return ch.method +} + +// setId implements Change.setId. +func (ch *changeInfo) setId(id string) { + ch.id = id +} + +// newAddCharmChange creates a new change for adding a charm. +func newAddCharmChange(params AddCharmParams, requires ...string) *AddCharmChange { + return &AddCharmChange{ + changeInfo: changeInfo{ + requires: requires, + method: "addCharm", + }, + Params: params, + } +} + +// AddCharmChange holds a change for adding a charm to the environment. +type AddCharmChange struct { + changeInfo + // Params holds parameters for adding a charm. + Params AddCharmParams +} + +// GUIArgs implements Change.GUIArgs. +func (ch *AddCharmChange) GUIArgs() []interface{} { + return []interface{}{ch.Params.Charm} +} + +// AddCharmParams holds parameters for adding a charm to the environment. +type AddCharmParams struct { + // Charm holds the URL of the charm to be added. + Charm string +} + +// newAddMachineChange creates a new change for adding a machine or container. +func newAddMachineChange(params AddMachineParams, requires ...string) *AddMachineChange { + return &AddMachineChange{ + changeInfo: changeInfo{ + requires: requires, + method: "addMachines", + }, + Params: params, + } +} + +// AddMachineChange holds a change for adding a machine or container. +type AddMachineChange struct { + changeInfo + // Params holds parameters for adding a machine. + Params AddMachineParams +} + +// GUIArgs implements Change.GUIArgs. +func (ch *AddMachineChange) GUIArgs() []interface{} { + options := AddMachineOptions{ + Series: ch.Params.Series, + Constraints: ch.Params.Constraints, + ContainerType: ch.Params.ContainerType, + ParentId: ch.Params.ParentId, + } + return []interface{}{options} +} + +// AddMachineOptions holds GUI options for adding a machine or container. +type AddMachineOptions struct { + // Series holds the machine OS series. + Series string `json:"series,omitempty"` + // Constraints holds the machine constraints. + Constraints string `json:"constraints,omitempty"` + // ContainerType holds the machine container type (like "lxc" or "kvm"). + ContainerType string `json:"containerType,omitempty"` + // ParentId holds the id of the parent machine. + ParentId string `json:"parentId,omitempty"` +} + +// AddMachineParams holds parameters for adding a machine or container. +type AddMachineParams struct { + // Series holds the optional machine OS series. + Series string + // Constraints holds the optional machine constraints. + Constraints string + // ContainerType optionally holds the type of the container (for instance + // ""lxc" or kvm"). It is not specified for top level machines. + ContainerType string + // ParentId optionally holds a placeholder pointing to another machine + // change or to a unit change. This value is only specified in the case + // this machine is a container, in which case also ContainerType is set. + ParentId string +} + +// newAddRelationChange creates a new change for adding a relation. +func newAddRelationChange(params AddRelationParams, requires ...string) *AddRelationChange { + return &AddRelationChange{ + changeInfo: changeInfo{ + requires: requires, + method: "addRelation", + }, + Params: params, + } +} + +// AddRelationChange holds a change for adding a relation between two services. +type AddRelationChange struct { + changeInfo + // Params holds parameters for adding a relation. + Params AddRelationParams +} + +// GUIArgs implements Change.GUIArgs. +func (ch *AddRelationChange) GUIArgs() []interface{} { + return []interface{}{ch.Params.Endpoint1, ch.Params.Endpoint2} +} + +// AddRelationParams holds parameters for adding a relation between two services. +type AddRelationParams struct { + // Endpoint1 and Endpoint2 hold relation endpoints in the + // "service:interface" form, where the service is always a placeholder + // pointing to a service change, and the interface is optional. Examples + // are "$deploy-42:web" or just "$deploy-42". + Endpoint1 string + Endpoint2 string +} + +// newAddServiceChange creates a new change for adding a service. +func newAddServiceChange(params AddServiceParams, requires ...string) *AddServiceChange { + return &AddServiceChange{ + changeInfo: changeInfo{ + requires: requires, + method: "deploy", + }, + Params: params, + } +} + +// AddServiceChange holds a change for deploying a Juju service. +type AddServiceChange struct { + changeInfo + // Params holds parameters for adding a service. + Params AddServiceParams +} + +// GUIArgs implements Change.GUIArgs. +func (ch *AddServiceChange) GUIArgs() []interface{} { + options := ch.Params.Options + if options == nil { + options = make(map[string]interface{}, 0) + } + storage := ch.Params.Storage + if storage == nil { + storage = make(map[string]string, 0) + } + endpointBindings := ch.Params.EndpointBindings + if endpointBindings == nil { + endpointBindings = make(map[string]string, 0) + } + return []interface{}{ch.Params.Charm, ch.Params.Service, options, ch.Params.Constraints, storage, endpointBindings} +} + +// AddServiceParams holds parameters for deploying a Juju service. +type AddServiceParams struct { + // Charm holds the URL of the charm to be used to deploy this service. + Charm string + // Service holds the service name. + Service string + // Options holds service options. + Options map[string]interface{} + // Constraints holds the optional service constraints. + Constraints string + // Storage holds the optional storage constraints. + Storage map[string]string + // EndpointBindings holds the optional endpoint bindings + EndpointBindings map[string]string +} + +// newAddUnitChange creates a new change for adding a service unit. +func newAddUnitChange(params AddUnitParams, requires ...string) *AddUnitChange { + return &AddUnitChange{ + changeInfo: changeInfo{ + requires: requires, + method: "addUnit", + }, + Params: params, + } +} + +// AddUnitChange holds a change for adding a service unit. +type AddUnitChange struct { + changeInfo + // Params holds parameters for adding a unit. + Params AddUnitParams +} + +// GUIArgs implements Change.GUIArgs. +func (ch *AddUnitChange) GUIArgs() []interface{} { + args := []interface{}{ch.Params.Service, nil} + if ch.Params.To != "" { + args[1] = ch.Params.To + } + return args +} + +// AddUnitParams holds parameters for adding a service unit. +type AddUnitParams struct { + // Service holds the service placeholder name for which a unit is added. + Service string + // To holds the optional location where to add the unit, as a placeholder + // pointing to another unit change or to a machine change. + To string +} + +// newExposeChange creates a new change for exposing a service. +func newExposeChange(params ExposeParams, requires ...string) *ExposeChange { + return &ExposeChange{ + changeInfo: changeInfo{ + requires: requires, + method: "expose", + }, + Params: params, + } +} + +// ExposeChange holds a change for exposing a service. +type ExposeChange struct { + changeInfo + // Params holds parameters for exposing a service. + Params ExposeParams +} + +// GUIArgs implements Change.GUIArgs. +func (ch *ExposeChange) GUIArgs() []interface{} { + return []interface{}{ch.Params.Service} +} + +// ExposeParams holds parameters for exposing a service. +type ExposeParams struct { + // Service holds the placeholder name of the service that must be exposed. + Service string +} + +// newSetAnnotationsChange creates a new change for setting annotations. +func newSetAnnotationsChange(params SetAnnotationsParams, requires ...string) *SetAnnotationsChange { + return &SetAnnotationsChange{ + changeInfo: changeInfo{ + requires: requires, + method: "setAnnotations", + }, + Params: params, + } +} + +// SetAnnotationsChange holds a change for setting service and machine +// annotations. +type SetAnnotationsChange struct { + changeInfo + // Params holds parameters for setting annotations. + Params SetAnnotationsParams +} + +// GUIArgs implements Change.GUIArgs. +func (ch *SetAnnotationsChange) GUIArgs() []interface{} { + return []interface{}{ch.Params.Id, string(ch.Params.EntityType), ch.Params.Annotations} +} + +// EntityType holds entity types ("service" or "machine"). +type EntityType string + +const ( + ServiceType EntityType = "service" + MachineType EntityType = "machine" +) + +// SetAnnotationsParams holds parameters for setting annotations. +type SetAnnotationsParams struct { + // Id is the placeholder for the service or machine change corresponding to + // the entity to be annotated. + Id string + // EntityType holds the type of the entity, "service" or "machine". + EntityType EntityType + // Annotations holds the annotations as key/value pairs. + Annotations map[string]string +} + +// changeset holds the list of changes returned by FromData. +type changeset struct { + changes []Change +} + +// add adds the given change to this change set. +func (cs *changeset) add(change Change) { + change.setId(fmt.Sprintf("%s-%d", change.Method(), len(cs.changes))) + cs.changes = append(cs.changes, change) +} + +// sorted returns the changes sorted by requirements, required first. +func (cs *changeset) sorted() []Change { + numChanges := len(cs.changes) + records := make(map[string]bool, numChanges) + sorted := make([]Change, 0, numChanges) + changes := make([]Change, numChanges, numChanges*2) + copy(changes, cs.changes) +mainloop: + for len(changes) != 0 { + // Note that all valid bundles have at least two changes + // (add one charm and deploy one service). + change := changes[0] + changes = changes[1:] + for _, r := range change.Requires() { + if !records[r] { + // This change requires a change which is not yet listed. + // Push this change at the end of the list and retry later. + changes = append(changes, change) + continue mainloop + } + } + records[change.Id()] = true + sorted = append(sorted, change) + } + return sorted +} === added file 'src/github.com/juju/bundlechanges/changes_test.go' --- src/github.com/juju/bundlechanges/changes_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/bundlechanges/changes_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1230 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package bundlechanges_test + +import ( + "encoding/json" + "reflect" + "strings" + "testing" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/bundlechanges" +) + +type changesSuite struct{} + +var _ = gc.Suite(&changesSuite{}) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} + +// record holds expected information about the contents of a change value. +type record struct { + Id string + Requires []string + Method string + Params interface{} + GUIArgs []interface{} +} + +var fromDataTests = []struct { + // about describes the test. + about string + // content is the YAML encoded bundle content. + content string + // expected holds the expected changes required to deploy the bundle. + expected []record +}{{ + about: "minimal bundle", + content: ` + services: + django: + charm: django + `, + expected: []record{{ + Id: "addCharm-0", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "django", + }, + GUIArgs: []interface{}{"django"}, + }, { + Id: "deploy-1", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-0", + Service: "django", + }, + GUIArgs: []interface{}{ + "$addCharm-0", + "django", + map[string]interface{}{}, + "", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-0"}, + }}, +}, { + about: "simple bundle", + content: ` + services: + mediawiki: + charm: cs:precise/mediawiki-10 + num_units: 1 + expose: true + options: + debug: false + annotations: + gui-x: "609" + gui-y: "-15" + mysql: + charm: cs:precise/mysql-28 + num_units: 1 + series: trusty + relations: + - - mediawiki:db + - mysql:db + `, + expected: []record{{ + Id: "addCharm-0", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "cs:precise/mediawiki-10", + }, + GUIArgs: []interface{}{"cs:precise/mediawiki-10"}, + }, { + Id: "deploy-1", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-0", + Service: "mediawiki", + Options: map[string]interface{}{"debug": false}, + }, + GUIArgs: []interface{}{ + "$addCharm-0", + "mediawiki", + map[string]interface{}{"debug": false}, + "", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-0"}, + }, { + Id: "expose-2", + Method: "expose", + Params: bundlechanges.ExposeParams{ + Service: "$deploy-1", + }, + GUIArgs: []interface{}{"$deploy-1"}, + Requires: []string{"deploy-1"}, + }, { + Id: "setAnnotations-3", + Method: "setAnnotations", + Params: bundlechanges.SetAnnotationsParams{ + Id: "$deploy-1", + EntityType: bundlechanges.ServiceType, + Annotations: map[string]string{"gui-x": "609", "gui-y": "-15"}, + }, + GUIArgs: []interface{}{ + "$deploy-1", + "service", + map[string]string{"gui-x": "609", "gui-y": "-15"}, + }, + Requires: []string{"deploy-1"}, + }, { + Id: "addCharm-4", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "cs:precise/mysql-28", + }, + GUIArgs: []interface{}{"cs:precise/mysql-28"}, + }, { + Id: "deploy-5", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-4", + Service: "mysql", + }, + GUIArgs: []interface{}{ + "$addCharm-4", + "mysql", + map[string]interface{}{}, + "", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-4"}, + }, { + Id: "addRelation-6", + Method: "addRelation", + Params: bundlechanges.AddRelationParams{ + Endpoint1: "$deploy-1:db", + Endpoint2: "$deploy-5:db", + }, + GUIArgs: []interface{}{"$deploy-1:db", "$deploy-5:db"}, + Requires: []string{"deploy-1", "deploy-5"}, + }, { + Id: "addUnit-7", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + }, + GUIArgs: []interface{}{"$deploy-1", nil}, + Requires: []string{"deploy-1"}, + }, { + Id: "addUnit-8", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-5", + }, + GUIArgs: []interface{}{"$deploy-5", nil}, + Requires: []string{"deploy-5"}, + }}, +}, { + about: "same charm reused", + content: ` + services: + mediawiki: + charm: precise/mediawiki-10 + num_units: 1 + otherwiki: + charm: precise/mediawiki-10 + `, + expected: []record{{ + Id: "addCharm-0", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "precise/mediawiki-10", + }, + GUIArgs: []interface{}{"precise/mediawiki-10"}, + }, { + Id: "deploy-1", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-0", + Service: "mediawiki", + }, + GUIArgs: []interface{}{ + "$addCharm-0", + "mediawiki", + map[string]interface{}{}, + "", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-0"}, + }, { + Id: "deploy-2", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-0", + Service: "otherwiki", + }, + GUIArgs: []interface{}{ + "$addCharm-0", + "otherwiki", + map[string]interface{}{}, + "", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-0"}, + }, { + Id: "addUnit-3", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + }, + GUIArgs: []interface{}{"$deploy-1", nil}, + Requires: []string{"deploy-1"}, + }}, +}, { + about: "machines and units placement", + content: ` + services: + django: + charm: cs:trusty/django-42 + num_units: 2 + to: + - 1 + - lxc:2 + constraints: cpu-cores=4 cpu-power=42 + haproxy: + charm: cs:trusty/haproxy-47 + num_units: 2 + expose: yes + to: + - lxc:django/0 + - new + options: + bad: wolf + number: 42.47 + machines: + 1: + series: trusty + 2: + `, + expected: []record{{ + Id: "addCharm-0", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "cs:trusty/django-42", + }, + GUIArgs: []interface{}{"cs:trusty/django-42"}, + }, { + Id: "deploy-1", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-0", + Service: "django", + Constraints: "cpu-cores=4 cpu-power=42", + }, + GUIArgs: []interface{}{ + "$addCharm-0", + "django", + map[string]interface{}{}, + "cpu-cores=4 cpu-power=42", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-0"}, + }, { + Id: "addCharm-2", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "cs:trusty/haproxy-47", + }, + GUIArgs: []interface{}{"cs:trusty/haproxy-47"}, + }, { + Id: "deploy-3", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-2", + Service: "haproxy", + Options: map[string]interface{}{"bad": "wolf", "number": 42.47}, + }, + GUIArgs: []interface{}{ + "$addCharm-2", + "haproxy", + map[string]interface{}{"bad": "wolf", "number": 42.47}, + "", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-2"}, + }, { + Id: "expose-4", + Method: "expose", + Params: bundlechanges.ExposeParams{ + Service: "$deploy-3", + }, + GUIArgs: []interface{}{"$deploy-3"}, + Requires: []string{"deploy-3"}, + }, { + Id: "addMachines-5", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{ + Series: "trusty", + }, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{Series: "trusty"}, + }, + }, { + Id: "addMachines-6", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{}, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{}, + }, + }, { + Id: "addUnit-7", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + To: "$addMachines-5", + }, + GUIArgs: []interface{}{"$deploy-1", "$addMachines-5"}, + Requires: []string{"deploy-1", "addMachines-5"}, + }, { + Id: "addMachines-11", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{ + ContainerType: "lxc", + ParentId: "$addMachines-6", + }, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{ + ContainerType: "lxc", + ParentId: "$addMachines-6", + }, + }, + Requires: []string{"addMachines-6"}, + }, { + Id: "addMachines-12", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{ + ContainerType: "lxc", + ParentId: "$addUnit-7", + }, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{ + ContainerType: "lxc", + ParentId: "$addUnit-7", + }, + }, + Requires: []string{"addUnit-7"}, + }, { + Id: "addMachines-13", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{}, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{}, + }, + }, { + Id: "addUnit-8", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + To: "$addMachines-11", + }, + GUIArgs: []interface{}{"$deploy-1", "$addMachines-11"}, + Requires: []string{"deploy-1", "addMachines-11"}, + }, { + Id: "addUnit-9", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-3", + To: "$addMachines-12", + }, + GUIArgs: []interface{}{"$deploy-3", "$addMachines-12"}, + Requires: []string{"deploy-3", "addMachines-12"}, + }, { + Id: "addUnit-10", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-3", + To: "$addMachines-13", + }, + GUIArgs: []interface{}{"$deploy-3", "$addMachines-13"}, + Requires: []string{"deploy-3", "addMachines-13"}, + }}, +}, { + about: "machines with constraints and annotations", + content: ` + services: + django: + charm: cs:trusty/django-42 + num_units: 2 + to: + - 1 + - new + machines: + 1: + constraints: "cpu-cores=4" + annotations: + foo: bar + `, + expected: []record{{ + Id: "addCharm-0", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "cs:trusty/django-42", + }, + GUIArgs: []interface{}{"cs:trusty/django-42"}, + }, { + Id: "deploy-1", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-0", + Service: "django", + }, + GUIArgs: []interface{}{ + "$addCharm-0", + "django", + map[string]interface{}{}, + "", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-0"}, + }, { + Id: "addMachines-2", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{ + Constraints: "cpu-cores=4", + }, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{Constraints: "cpu-cores=4"}, + }, + }, { + Id: "setAnnotations-3", + Method: "setAnnotations", + Params: bundlechanges.SetAnnotationsParams{ + Id: "$addMachines-2", + EntityType: bundlechanges.MachineType, + Annotations: map[string]string{"foo": "bar"}, + }, + GUIArgs: []interface{}{ + "$addMachines-2", + "machine", + map[string]string{"foo": "bar"}, + }, + Requires: []string{"addMachines-2"}, + }, { + Id: "addUnit-4", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + To: "$addMachines-2", + }, + GUIArgs: []interface{}{"$deploy-1", "$addMachines-2"}, + Requires: []string{"deploy-1", "addMachines-2"}, + }, { + Id: "addMachines-6", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{}, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{}, + }, + }, { + Id: "addUnit-5", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + To: "$addMachines-6", + }, + GUIArgs: []interface{}{"$deploy-1", "$addMachines-6"}, + Requires: []string{"deploy-1", "addMachines-6"}, + }}, +}, { + about: "endpoint without relation name", + content: ` + services: + mediawiki: + charm: cs:precise/mediawiki-10 + mysql: + charm: cs:precise/mysql-28 + constraints: mem=42G + relations: + - - mediawiki:db + - mysql + `, + expected: []record{{ + Id: "addCharm-0", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "cs:precise/mediawiki-10", + }, + GUIArgs: []interface{}{"cs:precise/mediawiki-10"}, + }, { + Id: "deploy-1", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-0", + Service: "mediawiki", + }, + GUIArgs: []interface{}{ + "$addCharm-0", + "mediawiki", + map[string]interface{}{}, + "", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-0"}, + }, { + Id: "addCharm-2", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "cs:precise/mysql-28", + }, + GUIArgs: []interface{}{"cs:precise/mysql-28"}, + }, { + Id: "deploy-3", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-2", + Service: "mysql", + Constraints: "mem=42G", + }, + GUIArgs: []interface{}{ + "$addCharm-2", + "mysql", + map[string]interface{}{}, + "mem=42G", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-2"}, + }, { + Id: "addRelation-4", + Method: "addRelation", + Params: bundlechanges.AddRelationParams{ + Endpoint1: "$deploy-1:db", + Endpoint2: "$deploy-3", + }, + GUIArgs: []interface{}{"$deploy-1:db", "$deploy-3"}, + Requires: []string{"deploy-1", "deploy-3"}, + }}, +}, { + about: "unit placed in service", + content: ` + services: + wordpress: + charm: wordpress + num_units: 3 + django: + charm: cs:trusty/django-42 + num_units: 2 + to: [wordpress] + `, + expected: []record{{ + Id: "addCharm-0", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "cs:trusty/django-42", + }, + GUIArgs: []interface{}{"cs:trusty/django-42"}, + }, { + Id: "deploy-1", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-0", + Service: "django", + }, + GUIArgs: []interface{}{ + "$addCharm-0", + "django", + map[string]interface{}{}, + "", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-0"}, + }, { + Id: "addCharm-2", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "wordpress", + }, + GUIArgs: []interface{}{"wordpress"}, + }, { + Id: "deploy-3", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-2", + Service: "wordpress", + }, + GUIArgs: []interface{}{ + "$addCharm-2", + "wordpress", + map[string]interface{}{}, + "", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-2"}, + }, { + Id: "addUnit-6", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-3", + }, + GUIArgs: []interface{}{"$deploy-3", nil}, + Requires: []string{"deploy-3"}, + }, { + Id: "addUnit-7", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-3", + }, + GUIArgs: []interface{}{"$deploy-3", nil}, + Requires: []string{"deploy-3"}, + }, { + Id: "addUnit-8", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-3", + }, + GUIArgs: []interface{}{"$deploy-3", nil}, + Requires: []string{"deploy-3"}, + }, { + Id: "addUnit-4", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + To: "$addUnit-6", + }, + GUIArgs: []interface{}{"$deploy-1", "$addUnit-6"}, + Requires: []string{"deploy-1", "addUnit-6"}, + }, { + Id: "addUnit-5", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + To: "$addUnit-7", + }, + GUIArgs: []interface{}{"$deploy-1", "$addUnit-7"}, + Requires: []string{"deploy-1", "addUnit-7"}, + }}, +}, { + about: "unit co-location with other units", + content: ` + services: + memcached: + charm: cs:trusty/mem-47 + num_units: 3 + to: [1, new] + django: + charm: cs:trusty/django-42 + num_units: 5 + to: + - memcached/0 + - lxc:memcached/1 + - lxc:memcached/2 + - kvm:ror + ror: + charm: vivid/rails + num_units: 2 + to: + - new + - 1 + machines: + 1: + series: trusty + `, + expected: []record{{ + Id: "addCharm-0", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "cs:trusty/django-42", + }, + GUIArgs: []interface{}{"cs:trusty/django-42"}, + }, { + Id: "deploy-1", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-0", + Service: "django", + }, + GUIArgs: []interface{}{ + "$addCharm-0", + "django", + map[string]interface{}{}, + "", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-0"}, + }, { + Id: "addCharm-2", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "cs:trusty/mem-47", + }, + GUIArgs: []interface{}{"cs:trusty/mem-47"}, + }, { + Id: "deploy-3", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-2", + Service: "memcached", + }, + GUIArgs: []interface{}{ + "$addCharm-2", + "memcached", + map[string]interface{}{}, + "", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-2"}, + }, { + Id: "addCharm-4", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "vivid/rails", + }, + GUIArgs: []interface{}{"vivid/rails"}, + }, { + Id: "deploy-5", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-4", + Service: "ror", + }, + GUIArgs: []interface{}{ + "$addCharm-4", + "ror", + map[string]interface{}{}, + "", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-4"}, + }, { + Id: "addMachines-6", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{ + Series: "trusty", + }, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{ + Series: "trusty", + Constraints: "", + }, + }, + }, { + Id: "addUnit-12", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-3", + To: "$addMachines-6", + }, + GUIArgs: []interface{}{"$deploy-3", "$addMachines-6"}, + Requires: []string{"deploy-3", "addMachines-6"}, + }, { + Id: "addUnit-16", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-5", + To: "$addMachines-6", + }, + GUIArgs: []interface{}{"$deploy-5", "$addMachines-6"}, + Requires: []string{"deploy-5", "addMachines-6"}, + }, { + Id: "addMachines-20", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{ + ContainerType: "kvm", + ParentId: "$addUnit-16", + }, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{ + ContainerType: "kvm", + ParentId: "$addUnit-16", + }, + }, + Requires: []string{"addUnit-16"}, + }, { + Id: "addMachines-21", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{}, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{}, + }, + }, { + Id: "addMachines-22", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{}, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{}, + }, + }, { + Id: "addMachines-23", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{}, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{}, + }, + }, { + Id: "addUnit-7", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + To: "$addUnit-12", + }, + GUIArgs: []interface{}{"$deploy-1", "$addUnit-12"}, + Requires: []string{"deploy-1", "addUnit-12"}, + }, { + Id: "addUnit-11", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + To: "$addMachines-20", + }, + GUIArgs: []interface{}{"$deploy-1", "$addMachines-20"}, + Requires: []string{"deploy-1", "addMachines-20"}, + }, { + Id: "addUnit-13", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-3", + To: "$addMachines-21", + }, + GUIArgs: []interface{}{"$deploy-3", "$addMachines-21"}, + Requires: []string{"deploy-3", "addMachines-21"}, + }, { + Id: "addUnit-14", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-3", + To: "$addMachines-22", + }, + GUIArgs: []interface{}{"$deploy-3", "$addMachines-22"}, + Requires: []string{"deploy-3", "addMachines-22"}, + }, { + Id: "addUnit-15", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-5", + To: "$addMachines-23", + }, + GUIArgs: []interface{}{"$deploy-5", "$addMachines-23"}, + Requires: []string{"deploy-5", "addMachines-23"}, + }, { + Id: "addMachines-17", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{ + ContainerType: "lxc", + ParentId: "$addUnit-13", + }, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{ + ContainerType: "lxc", + ParentId: "$addUnit-13", + }, + }, + Requires: []string{"addUnit-13"}, + }, { + Id: "addMachines-18", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{ + ContainerType: "lxc", + ParentId: "$addUnit-14", + }, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{ + ContainerType: "lxc", + ParentId: "$addUnit-14", + }, + }, + Requires: []string{"addUnit-14"}, + }, { + Id: "addMachines-19", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{ + ContainerType: "kvm", + ParentId: "$addUnit-15", + }, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{ + ContainerType: "kvm", + ParentId: "$addUnit-15", + }, + }, + Requires: []string{"addUnit-15"}, + }, { + Id: "addUnit-8", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + To: "$addMachines-17", + }, + GUIArgs: []interface{}{"$deploy-1", "$addMachines-17"}, + Requires: []string{"deploy-1", "addMachines-17"}, + }, { + Id: "addUnit-9", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + To: "$addMachines-18", + }, + GUIArgs: []interface{}{"$deploy-1", "$addMachines-18"}, + Requires: []string{"deploy-1", "addMachines-18"}, + }, { + Id: "addUnit-10", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + To: "$addMachines-19", + }, + GUIArgs: []interface{}{"$deploy-1", "$addMachines-19"}, + Requires: []string{"deploy-1", "addMachines-19"}, + }}, +}, { + about: "unit placed to machines", + content: ` + services: + django: + charm: cs:trusty/django-42 + num_units: 5 + to: + - new + - 4 + - kvm:8 + - lxc:new + machines: + 4: + constraints: "cpu-cores=4" + 8: + constraints: "cpu-cores=8" + `, + expected: []record{{ + Id: "addCharm-0", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "cs:trusty/django-42", + }, + GUIArgs: []interface{}{"cs:trusty/django-42"}, + }, { + Id: "deploy-1", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-0", + Service: "django", + }, + GUIArgs: []interface{}{ + "$addCharm-0", + "django", + map[string]interface{}{}, + "", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-0"}, + }, { + Id: "addMachines-2", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{ + Constraints: "cpu-cores=4", + }, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{ + Constraints: "cpu-cores=4", + }, + }, + }, { + Id: "addMachines-3", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{ + Constraints: "cpu-cores=8", + }, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{ + Constraints: "cpu-cores=8", + }, + }, + }, { + Id: "addUnit-5", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + To: "$addMachines-2", + }, + GUIArgs: []interface{}{"$deploy-1", "$addMachines-2"}, + Requires: []string{"deploy-1", "addMachines-2"}, + }, { + Id: "addMachines-9", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{}, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{}, + }, + }, { + Id: "addMachines-10", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{ + ContainerType: "kvm", + ParentId: "$addMachines-3", + }, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{ + ContainerType: "kvm", + ParentId: "$addMachines-3", + }, + }, + Requires: []string{"addMachines-3"}, + }, { + Id: "addMachines-11", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{ + ContainerType: "lxc", + }, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{ + ContainerType: "lxc", + }, + }, + }, { + Id: "addMachines-12", + Method: "addMachines", + Params: bundlechanges.AddMachineParams{ + ContainerType: "lxc", + }, + GUIArgs: []interface{}{ + bundlechanges.AddMachineOptions{ + ContainerType: "lxc", + }, + }, + }, { + Id: "addUnit-4", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + To: "$addMachines-9", + }, + GUIArgs: []interface{}{"$deploy-1", "$addMachines-9"}, + Requires: []string{"deploy-1", "addMachines-9"}, + }, { + Id: "addUnit-6", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + To: "$addMachines-10", + }, + GUIArgs: []interface{}{"$deploy-1", "$addMachines-10"}, + Requires: []string{"deploy-1", "addMachines-10"}, + }, { + Id: "addUnit-7", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + To: "$addMachines-11", + }, + GUIArgs: []interface{}{"$deploy-1", "$addMachines-11"}, + Requires: []string{"deploy-1", "addMachines-11"}, + }, { + Id: "addUnit-8", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + To: "$addMachines-12", + }, + GUIArgs: []interface{}{"$deploy-1", "$addMachines-12"}, + Requires: []string{"deploy-1", "addMachines-12"}, + }}, +}, { + about: "service with storage", + content: ` + services: + django: + charm: cs:trusty/django-42 + num_units: 2 + storage: + osd-devices: 3,30G + tmpfs: tmpfs,1G + `, + expected: []record{{ + Id: "addCharm-0", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "cs:trusty/django-42", + }, + GUIArgs: []interface{}{"cs:trusty/django-42"}, + }, { + Id: "deploy-1", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-0", + Service: "django", + Storage: map[string]string{ + "osd-devices": "3,30G", + "tmpfs": "tmpfs,1G", + }, + }, + GUIArgs: []interface{}{ + "$addCharm-0", + "django", + map[string]interface{}{}, + "", + map[string]string{ + "osd-devices": "3,30G", + "tmpfs": "tmpfs,1G", + }, + map[string]string{}, + }, + Requires: []string{"addCharm-0"}, + }, { + Id: "addUnit-2", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + }, + GUIArgs: []interface{}{"$deploy-1", nil}, + Requires: []string{"deploy-1"}, + }, { + Id: "addUnit-3", + Method: "addUnit", + Params: bundlechanges.AddUnitParams{ + Service: "$deploy-1", + }, + GUIArgs: []interface{}{"$deploy-1", nil}, + Requires: []string{"deploy-1"}, + }}, +}, { + about: "service with endpoint bindings", + content: ` + services: + django: + charm: django + bindings: + foo: bar + `, + expected: []record{{ + Id: "addCharm-0", + Method: "addCharm", + Params: bundlechanges.AddCharmParams{ + Charm: "django", + }, + GUIArgs: []interface{}{"django"}, + }, { + Id: "deploy-1", + Method: "deploy", + Params: bundlechanges.AddServiceParams{ + Charm: "$addCharm-0", + Service: "django", + EndpointBindings: map[string]string{"foo": "bar"}, + }, + GUIArgs: []interface{}{ + "$addCharm-0", + "django", + map[string]interface{}{}, + "", + map[string]string{}, + map[string]string{"foo": "bar"}, + }, + Requires: []string{"addCharm-0"}, + }}, +}} + +func (s *changesSuite) TestFromData(c *gc.C) { + for i, test := range fromDataTests { + c.Logf("test %d: %s", i, test.about) + + // Retrieve and validate the bundle data. + data, err := charm.ReadBundleData(strings.NewReader(test.content)) + c.Assert(err, jc.ErrorIsNil) + err = data.Verify(nil, nil) + c.Assert(err, jc.ErrorIsNil) + + // Retrieve the changes, and convert them to a sequence of records. + changes := bundlechanges.FromData(data) + records := make([]record, len(changes)) + for i, change := range changes { + r := record{ + Id: change.Id(), + Requires: change.Requires(), + Method: change.Method(), + GUIArgs: change.GUIArgs(), + } + r.Params = reflect.ValueOf(change).Elem().FieldByName("Params").Interface() + records[i] = r + } + + // Output the records for debugging. + b, err := json.MarshalIndent(records, "", " ") + c.Assert(err, jc.ErrorIsNil) + c.Logf("obtained records: %s", b) + + // Check that the obtained records are what we expect. + c.Assert(records, jc.DeepEquals, test.expected) + } +} === added directory 'src/github.com/juju/bundlechanges/cmd' === added directory 'src/github.com/juju/bundlechanges/cmd/get-bundle-changes' === added file 'src/github.com/juju/bundlechanges/cmd/get-bundle-changes/main.go' --- src/github.com/juju/bundlechanges/cmd/get-bundle-changes/main.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/bundlechanges/cmd/get-bundle-changes/main.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,92 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "os" + + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/bundlechanges" +) + +func main() { + flag.Usage = usage + flag.Parse() + if len(flag.Args()) > 1 { + fmt.Fprintln(os.Stderr, "need a bundle path as first and only argument") + os.Exit(2) + } + r := os.Stdin + if path := flag.Arg(0); path != "" { + var err error + if r, err = os.Open(path); err != nil { + fmt.Fprintf(os.Stderr, "invalid bundle path: %s\n", err) + os.Exit(2) + } + defer r.Close() + } + if err := process(r, os.Stdout); err != nil { + fmt.Fprintf(os.Stderr, "unable to parse bundle: %s\n", err) + os.Exit(1) + } +} + +// usage outputs instructions on how to use this command. +func usage() { + fmt.Fprintln(os.Stderr, "usage: get-bundle-changes [bundle]") + fmt.Fprintln(os.Stderr, "bundle can also be provided on stdin") + flag.PrintDefaults() + os.Exit(2) +} + +// process generates and print to w the set of changes required to deploy +// the bundle data to be retrieved using r. +func process(r io.Reader, w io.Writer) error { + // Read the bundle data. + data, err := charm.ReadBundleData(r) + if err != nil { + return err + } + // Validate the bundle. + if err := data.Verify(nil, nil); err != nil { + return err + } + // Generate the changes and convert them to the standard form. + changes := bundlechanges.FromData(data) + records := make([]*record, len(changes)) + for i, change := range changes { + records[i] = &record{ + Id: change.Id(), + Requires: change.Requires(), + Method: change.Method(), + Args: change.GUIArgs(), + } + } + // Serialize and print the records. + content, err := json.MarshalIndent(records, "", " ") + if err != nil { + return err + } + fmt.Fprintln(w, string(content)) + return nil +} + +// record holds the JSON representation of a change. +type record struct { + // Id is the unique identifier for this change. + Id string `json:"id"` + // Method is the action to be performed to apply this change. + Method string `json:"method"` + // Args holds a list of arguments to pass to the method. + Args []interface{} `json:"args"` + // Requires holds a list of dependencies for this change. Each dependency + // is represented by the corresponding change id, and must be applied + // before this change is applied. + Requires []string `json:"requires"` +} === added file 'src/github.com/juju/bundlechanges/dependencies.tsv' --- src/github.com/juju/bundlechanges/dependencies.tsv 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/bundlechanges/dependencies.tsv 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +github.com/juju/errors git 4567a5e69fd3130ca0d89f69478e7ac025b67452 2015-03-27T19:24:31Z +github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z +github.com/juju/gojsonreference git f0d24ac5ee330baa21721cdff56d45e4ee42628e 2015-02-04T19:46:33Z +github.com/juju/gojsonschema git e1ad140384f254c82f89450d9a7c8dd38a632838 2015-03-12T17:00:16Z +github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z +github.com/juju/names git a6a253b0a94cc79e99a68d284b970ffce2a11ecd 2015-07-09T13:59:32Z +github.com/juju/schema git 5fef1eb09944df7dea207cf14ffa8163268cc7cd 2015-06-16T19:52:11Z +github.com/juju/testing git f521911d9a79aeb62c051fe18e689796369c5564 2015-05-29T04:40:43Z +github.com/juju/utils git 6f48322bb574b8578e8ecccf689220eac7edad9d 2015-08-10T03:07:18Z +golang.org/x/crypto git c57d4a71915a248dbad846d60825145062b4c18e 2015-03-27T05:11:19Z +gopkg.in/check.v1 git b3d3430320d4260e5fea99841af984b3badcea63 2015-06-26T10:50:28Z +gopkg.in/juju/charm.v6-unstable git 0d22d549d60dbc5011064710a2913073ed951975 2016-01-06T15:03:58Z +gopkg.in/mgo.v2 git 3569c88678d88179dcbd68d02ab081cbca3cd4d0 2015-06-04T15:26:27Z +gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z === added file 'src/github.com/juju/bundlechanges/handlers.go' --- src/github.com/juju/bundlechanges/handlers.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/bundlechanges/handlers.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,260 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package bundlechanges + +import ( + "fmt" + "sort" + "strings" + + "gopkg.in/juju/charm.v6-unstable" +) + +// handleServices populates the change set with "addCharm"/"addService" records. +// This function also handles adding service annotations. +func handleServices(add func(Change), services map[string]*charm.ServiceSpec) map[string]string { + charms := make(map[string]string, len(services)) + addedServices := make(map[string]string, len(services)) + // Iterate over the map using its sorted keys so that results are + // deterministic and easier to test. + names := make([]string, 0, len(services)) + for name, _ := range services { + names = append(names, name) + } + sort.Strings(names) + var change Change + for _, name := range names { + service := services[name] + // Add the addCharm record if one hasn't been added yet. + if charms[service.Charm] == "" { + change = newAddCharmChange(AddCharmParams{ + Charm: service.Charm, + }) + add(change) + charms[service.Charm] = change.Id() + } + + // Add the addService record for this service. + change = newAddServiceChange(AddServiceParams{ + Charm: "$" + charms[service.Charm], + Service: name, + Options: service.Options, + Constraints: service.Constraints, + Storage: service.Storage, + EndpointBindings: service.EndpointBindings, + }, charms[service.Charm]) + add(change) + id := change.Id() + addedServices[name] = id + + // Expose the service if required. + if service.Expose { + add(newExposeChange(ExposeParams{ + Service: "$" + id, + }, id)) + } + + // Add service annotations. + if len(service.Annotations) > 0 { + add(newSetAnnotationsChange(SetAnnotationsParams{ + EntityType: ServiceType, + Id: "$" + id, + Annotations: service.Annotations, + }, id)) + } + } + return addedServices +} + +// handleMachines populates the change set with "addMachines" records. +// This function also handles adding machine annotations. +func handleMachines(add func(Change), machines map[string]*charm.MachineSpec) map[string]string { + addedMachines := make(map[string]string, len(machines)) + // Iterate over the map using its sorted keys so that results are + // deterministic and easier to test. + names := make([]string, 0, len(machines)) + for name, _ := range machines { + names = append(names, name) + } + sort.Strings(names) + var change Change + for _, name := range names { + machine := machines[name] + if machine == nil { + machine = &charm.MachineSpec{} + } + // Add the addMachines record for this machine. + change = newAddMachineChange(AddMachineParams{ + Series: machine.Series, + Constraints: machine.Constraints, + }) + add(change) + addedMachines[name] = change.Id() + + // Add machine annotations. + if len(machine.Annotations) > 0 { + add(newSetAnnotationsChange(SetAnnotationsParams{ + EntityType: MachineType, + Id: "$" + change.Id(), + Annotations: machine.Annotations, + }, change.Id())) + } + } + return addedMachines +} + +// handleRelations populates the change set with "addRelation" records. +func handleRelations(add func(Change), relations [][]string, addedServices map[string]string) { + for _, relation := range relations { + // Add the addRelation record for this relation pair. + args := make([]string, 2) + requires := make([]string, 2) + for i, endpoint := range relation { + ep := parseEndpoint(endpoint) + service := addedServices[ep.service] + requires[i] = service + ep.service = service + args[i] = "$" + ep.String() + } + add(newAddRelationChange(AddRelationParams{ + Endpoint1: args[0], + Endpoint2: args[1], + }, requires...)) + } +} + +// handleUnits populates the change set with "addUnit" records. +// It also handles adding machine containers where to place units if required. +func handleUnits(add func(Change), services map[string]*charm.ServiceSpec, addedServices, addedMachines map[string]string) { + records := make(map[string]*AddUnitChange) + // Iterate over the map using its sorted keys so that results are + // deterministic and easier to test. + names := make([]string, 0, len(services)) + for name, _ := range services { + names = append(names, name) + } + sort.Strings(names) + // Collect and add all unit changes. These records are likely to be + // modified later in order to handle unit placement. + for _, name := range names { + service := services[name] + for i := 0; i < service.NumUnits; i++ { + addedService := addedServices[name] + change := newAddUnitChange(AddUnitParams{ + Service: "$" + addedService, + }, addedService) + add(change) + records[fmt.Sprintf("%s/%d", name, i)] = change + } + } + // Now handle unit placement for each added service unit. + for _, name := range names { + service := services[name] + numPlaced := len(service.To) + if numPlaced == 0 { + // If there are no placement directives it means that either the + // service has no units (in which case there is no need to + // proceed), or the units are not placed (in which case there is no + // need to modify the change already added above). + continue + } + // servicePlacedUnits holds, for each service, the number of units of + // the current service already placed to that service. + servicePlacedUnits := make(map[string]int) + // At this point we know that we have at least one placement directive. + // Fill the other ones if required. + lastPlacement := service.To[numPlaced-1] + for i := 0; i < service.NumUnits; i++ { + p := lastPlacement + if i < numPlaced { + p = service.To[i] + } + // Generate the changes required in order to place this unit, and + // retrieve the identifier of the parent change. + parentId := unitParent(add, p, records, addedMachines, servicePlacedUnits) + // Retrieve and modify the original "addUnit" change to add the + // new parent requirement and placement target. + change := records[fmt.Sprintf("%s/%d", name, i)] + change.requires = append(change.requires, parentId) + change.Params.To = "$" + parentId + } + } +} + +func unitParent(add func(Change), p string, records map[string]*AddUnitChange, addedMachines map[string]string, servicePlacedUnits map[string]int) (parentId string) { + placement, err := charm.ParsePlacement(p) + if err != nil { + // Since the bundle is already verified, this should never happen. + panic(err) + } + if placement.Machine == "new" { + // The unit is placed to a new machine. + change := newAddMachineChange(AddMachineParams{ + ContainerType: placement.ContainerType, + }) + add(change) + return change.Id() + } + if placement.Machine != "" { + // The unit is placed to a machine declared in the bundle. + parentId = addedMachines[placement.Machine] + if placement.ContainerType != "" { + parentId = addContainer(add, placement.ContainerType, parentId) + } + return parentId + } + // The unit is placed to another unit or to a service. + number := placement.Unit + if number == -1 { + // The unit is placed to a service. Calculate the unit number to be + // used for unit co-location. + if n, ok := servicePlacedUnits[placement.Service]; ok { + number = n + 1 + } else { + number = 0 + } + servicePlacedUnits[placement.Service] = number + } + otherUnit := fmt.Sprintf("%s/%d", placement.Service, number) + parentId = records[otherUnit].Id() + if placement.ContainerType != "" { + parentId = addContainer(add, placement.ContainerType, parentId) + } + return parentId +} + +func addContainer(add func(Change), containerType, parentId string) string { + change := newAddMachineChange(AddMachineParams{ + ContainerType: containerType, + ParentId: "$" + parentId, + }, parentId) + add(change) + return change.Id() +} + +// parseEndpoint creates an endpoint from its string representation. +func parseEndpoint(e string) *endpoint { + parts := strings.SplitN(e, ":", 2) + ep := &endpoint{ + service: parts[0], + } + if len(parts) == 2 { + ep.relation = parts[1] + } + return ep +} + +// endpoint holds a relation endpoint. +type endpoint struct { + service string + relation string +} + +// String returns the string representation of an endpoint. +func (ep endpoint) String() string { + if ep.relation == "" { + return ep.service + } + return fmt.Sprintf("%s:%s", ep.service, ep.relation) +} === modified file 'src/github.com/juju/cmd/README.md' --- src/github.com/juju/cmd/README.md 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/cmd/README.md 2016-03-22 15:18:22 +0000 @@ -60,10 +60,19 @@ FormatYaml marshals value to a yaml-formatted []byte, unless value is nil. +## func IsErrSilent +``` go +func IsErrSilent(err error) bool +``` +IsErrSilent returns whether the error should be logged from cmd.Main. + + ## func IsRcPassthroughError ``` go func IsRcPassthroughError(err error) bool ``` +IsRcPassthroughError returns whether the error is an RcPassthroughError. + ## func Main ``` go @@ -92,6 +101,16 @@ there is an error. +## func ParseAliasFile +``` go +func ParseAliasFile(aliasFilename string) map[string][]string +``` + ParseAliasFile will read the specified file and convert +the content to a map of names to the command line arguments +they relate to. The function will always return a valid map, even +if it is empty. + + ## func ZeroOrOneArgs ``` go func ZeroOrOneArgs(args []string) (string, error) @@ -231,6 +250,7 @@ ``` go type Context struct { Dir string + Env map[string]string Stdin io.Reader Stdout io.Writer Stderr io.Writer @@ -291,6 +311,15 @@ +### func (\*Context) Getenv +``` go +func (ctx *Context) Getenv(key string) string +``` +Getenv looks up an environment variable in the context. It mirrors +os.Getenv. An empty string is returned if the key is not set. + + + ### func (\*Context) Infof ``` go func (ctx *Context) Infof(format string, params ...interface{}) @@ -308,6 +337,14 @@ +### func (\*Context) Setenv +``` go +func (ctx *Context) Setenv(key, value string) error +``` +Setenv sets an environment variable in the context. It mirrors os.Setenv. + + + ### func (\*Context) StopInterruptNotify ``` go func (ctx *Context) StopInterruptNotify(c chan<- os.Signal) @@ -355,7 +392,12 @@ ## type FileVar ``` go type FileVar struct { + // Path is the path to the file. Path string + + // StdinMarkers are the Path values that should be interpreted as + // stdin. If it is empty then stdin is not supported. + StdinMarkers []string } ``` FileVar represents a path to a file. @@ -370,6 +412,22 @@ +### func (FileVar) IsStdin +``` go +func (f FileVar) IsStdin() bool +``` +IsStdin determines whether or not the path represents stdin. + + + +### func (\*FileVar) Open +``` go +func (f *FileVar) Open(ctx *Context) (io.ReadCloser, error) +``` +Open opens the file. + + + ### func (\*FileVar) Read ``` go func (f *FileVar) Read(ctx *Context) ([]byte, error) @@ -386,6 +444,15 @@ +### func (\*FileVar) SetStdin +``` go +func (f *FileVar) SetStdin(markers ...string) +``` +SetStdin sets StdinMarkers to the provided strings. If none are +provided then the default of "-" is used. + + + ### func (\*FileVar) String ``` go func (f *FileVar) String() string @@ -567,6 +634,9 @@ Code int } ``` +RcPassthroughError indicates that a Juju plugin command exited with a +non-zero exit code. This error is used to exit with the return code. + @@ -581,6 +651,8 @@ ``` go func (e *RcPassthroughError) Error() string ``` +Error implements error. + ## type StringsValue @@ -735,6 +807,16 @@ +### func (\*SuperCommand) RegisterDeprecated +``` go +func (c *SuperCommand) RegisterDeprecated(subcmd Command, check DeprecationCheck) +``` +RegisterDeprecated makes a subcommand available for use on the command line if it +is not obsolete. It inserts the command with the specified DeprecationCheck so +that a warning is displayed if the command is deprecated. + + + ### func (\*SuperCommand) RegisterSuperAlias ``` go func (c *SuperCommand) RegisterSuperAlias(name, super, forName string, check DeprecationCheck) @@ -793,6 +875,12 @@ MissingCallback MissingCallback Aliases []string Version string + + // UserAliasesFilename refers to the location of a file that contains + // name = cmd [args...] + // values, that is used to change default behaviour of commands in order + // to add flags, or provide short cuts to longer commands. + UserAliasesFilename string } ``` SuperCommandParams provides a way to have default parameter to the === added file 'src/github.com/juju/cmd/aliasfile.go' --- src/github.com/juju/cmd/aliasfile.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/cmd/aliasfile.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,53 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENSE file for details. + +package cmd + +import ( + "io/ioutil" + "strings" +) + +// ParseAliasFile will read the specified file and convert +// the content to a map of names to the command line arguments +// they relate to. The function will always return a valid map, even +// if it is empty. +func ParseAliasFile(aliasFilename string) map[string][]string { + result := map[string][]string{} + if aliasFilename == "" { + return result + } + + content, err := ioutil.ReadFile(aliasFilename) + if err != nil { + logger.Tracef("unable to read alias file %q: %s", aliasFilename, err) + return result + } + + lines := strings.Split(string(content), "\n") + for i, line := range lines { + line = strings.TrimSpace(line) + if line == "" || strings.HasPrefix(line, "#") { + // skip blank lines and comments + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) != 2 { + logger.Warningf("line %d bad in alias file: %s", i+1, line) + continue + } + name, value := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]) + if name == "" { + logger.Warningf("line %d missing alias name in alias file: %s", i+1, line) + continue + } + if value == "" { + logger.Warningf("line %d missing alias value in alias file: %s", i+1, line) + continue + } + + logger.Tracef("setting alias %q=%q", name, value) + result[name] = strings.Fields(value) + } + return result +} === added file 'src/github.com/juju/cmd/aliasfile_test.go' --- src/github.com/juju/cmd/aliasfile_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/cmd/aliasfile_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,58 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENSE file for details. + +package cmd_test + +import ( + _ "fmt" + "io/ioutil" + "path/filepath" + + "github.com/juju/testing" + gc "gopkg.in/check.v1" + + "github.com/juju/cmd" +) + +type ParseAliasFileSuite struct { + testing.LoggingSuite +} + +var _ = gc.Suite(&ParseAliasFileSuite{}) + +func (*ParseAliasFileSuite) TestMissing(c *gc.C) { + dir := c.MkDir() + filename := filepath.Join(dir, "missing") + aliases := cmd.ParseAliasFile(filename) + c.Assert(aliases, gc.NotNil) + c.Assert(aliases, gc.HasLen, 0) +} + +func (*ParseAliasFileSuite) TestParse(c *gc.C) { + dir := c.MkDir() + filename := filepath.Join(dir, "missing") + content := ` +# comments skipped, as are the blank lines, such as the line +# at the start of this file + foo = trailing-space +repeat = first +flags = flags --with flag + +# if the same alias name is used more than once, last one wins +repeat = second + +# badly formated values are logged, but skipped +no equals sign += +key = += value +` + err := ioutil.WriteFile(filename, []byte(content), 0644) + c.Assert(err, gc.IsNil) + aliases := cmd.ParseAliasFile(filename) + c.Assert(aliases, gc.DeepEquals, map[string][]string{ + "foo": []string{"trailing-space"}, + "repeat": []string{"second"}, + "flags": []string{"flags", "--with", "flag"}, + }) +} === modified file 'src/github.com/juju/cmd/output.go' --- src/github.com/juju/cmd/output.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/cmd/output.go 2016-03-22 15:18:22 +0000 @@ -13,7 +13,7 @@ "strconv" "strings" - goyaml "gopkg.in/yaml.v1" + goyaml "gopkg.in/yaml.v2" "launchpad.net/gnuflag" ) === modified file 'src/github.com/juju/cmd/supercommand.go' --- src/github.com/juju/cmd/supercommand.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/cmd/supercommand.go 2016-03-22 15:18:22 +0000 @@ -13,7 +13,7 @@ "launchpad.net/gnuflag" ) -var logger = loggo.GetLogger("juju.cmd") +var logger = loggo.GetLogger("cmd") type topic struct { short string @@ -55,21 +55,28 @@ MissingCallback MissingCallback Aliases []string Version string + + // UserAliasesFilename refers to the location of a file that contains + // name = cmd [args...] + // values, that is used to change default behaviour of commands in order + // to add flags, or provide short cuts to longer commands. + UserAliasesFilename string } // NewSuperCommand creates and initializes a new `SuperCommand`, and returns // the fully initialized structure. func NewSuperCommand(params SuperCommandParams) *SuperCommand { command := &SuperCommand{ - Name: params.Name, - Purpose: params.Purpose, - Doc: params.Doc, - Log: params.Log, - usagePrefix: params.UsagePrefix, - missingCallback: params.MissingCallback, - Aliases: params.Aliases, - version: params.Version, - notifyRun: params.NotifyRun, + Name: params.Name, + Purpose: params.Purpose, + Doc: params.Doc, + Log: params.Log, + usagePrefix: params.UsagePrefix, + missingCallback: params.MissingCallback, + Aliases: params.Aliases, + version: params.Version, + notifyRun: params.NotifyRun, + userAliasesFilename: params.UserAliasesFilename, } command.init() return command @@ -102,23 +109,26 @@ // its selected subcommand. type SuperCommand struct { CommandBase - Name string - Purpose string - Doc string - Log *Log - Aliases []string - version string - usagePrefix string - subcmds map[string]commandReference - help *helpCommand - commonflags *gnuflag.FlagSet - flags *gnuflag.FlagSet - action commandReference - showHelp bool - showDescription bool - showVersion bool - missingCallback MissingCallback - notifyRun func(string) + Name string + Purpose string + Doc string + Log *Log + Aliases []string + version string + usagePrefix string + userAliasesFilename string + userAliases map[string][]string + subcmds map[string]commandReference + help *helpCommand + commonflags *gnuflag.FlagSet + flags *gnuflag.FlagSet + action commandReference + showHelp bool + showDescription bool + showVersion bool + noAlias bool + missingCallback MissingCallback + notifyRun func(string) } // IsSuperCommand implements Command.IsSuperCommand @@ -142,6 +152,8 @@ command: newVersionCommand(c.version), } } + + c.userAliases = ParseAliasFile(c.userAliasesFilename) } // AddHelpTopic adds a new help topic with the description being the short @@ -335,7 +347,10 @@ // Any flags added below only take effect when no subcommand is // specified (e.g. command --version). if c.version != "" { - f.BoolVar(&c.showVersion, "version", false, "Show the command's version and exit") + f.BoolVar(&c.showVersion, "version", false, "show the command's version and exit") + } + if c.userAliasesFilename != "" { + f.BoolVar(&c.noAlias, "no-alias", false, "do not process command aliases when running this command") } c.flags = f } @@ -358,6 +373,10 @@ return c.action.command.Init(args) } + if userAlias, found := c.userAliases[args[0]]; found && !c.noAlias { + logger.Debugf("using alias %q=%q", args[0], strings.Join(userAlias, " ")) + args = append(userAlias, args[1:]...) + } found := false // Look for the command. if c.action, found = c.subcmds[args[0]]; !found { === modified file 'src/github.com/juju/cmd/supercommand_test.go' --- src/github.com/juju/cmd/supercommand_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/cmd/supercommand_test.go 2016-03-22 15:18:22 +0000 @@ -6,6 +6,8 @@ import ( "bytes" "fmt" + "io/ioutil" + "path/filepath" "strings" gitjujutesting "github.com/juju/testing" @@ -23,6 +25,21 @@ return jc, tc, cmdtesting.InitCommand(jc, args) } +func initDefenestrateWithAliases(c *gc.C, args []string) (*cmd.SuperCommand, *TestCommand, error) { + dir := c.MkDir() + filename := filepath.Join(dir, "aliases") + err := ioutil.WriteFile(filename, []byte(` +def = defenestrate +be-firm = defenestrate --option firmly +other = missing + `), 0644) + c.Assert(err, gc.IsNil) + jc := cmd.NewSuperCommand(cmd.SuperCommandParams{Name: "jujutest", UserAliasesFilename: filename}) + tc := &TestCommand{Name: "defenestrate"} + jc.Register(tc) + return jc, tc, cmdtesting.InitCommand(jc, args) +} + type SuperCommandSuite struct { gitjujutesting.IsolationSuite } @@ -64,6 +81,40 @@ // --description must be used on it's own. _, _, err = initDefenestrate([]string{"--description", "defenestrate"}) c.Assert(err, gc.ErrorMatches, `unrecognized args: \["defenestrate"\]`) + + // --no-alias is not a valid option if there is no alias file speciifed + _, _, err = initDefenestrate([]string{"--no-alias", "defenestrate"}) + c.Assert(err, gc.ErrorMatches, `flag provided but not defined: --no-alias`) +} + +func (s *SuperCommandSuite) TestUserAliasDispatch(c *gc.C) { + // Can still use the full name. + jc, tc, err := initDefenestrateWithAliases(c, []string{"defenestrate"}) + c.Assert(err, gc.IsNil) + c.Assert(tc.Option, gc.Equals, "") + info := jc.Info() + c.Assert(info.Name, gc.Equals, "jujutest defenestrate") + c.Assert(info.Args, gc.Equals, "") + c.Assert(info.Doc, gc.Equals, "defenestrate-doc") + + jc, tc, err = initDefenestrateWithAliases(c, []string{"def"}) + c.Assert(err, gc.IsNil) + c.Assert(tc.Option, gc.Equals, "") + info = jc.Info() + c.Assert(info.Name, gc.Equals, "jujutest defenestrate") + + jc, tc, err = initDefenestrateWithAliases(c, []string{"be-firm"}) + c.Assert(err, gc.IsNil) + c.Assert(tc.Option, gc.Equals, "firmly") + info = jc.Info() + c.Assert(info.Name, gc.Equals, "jujutest defenestrate") + + _, _, err = initDefenestrateWithAliases(c, []string{"--no-alias", "def"}) + c.Assert(err, gc.ErrorMatches, "unrecognized command: jujutest def") + + // Aliases to missing values are converted before lookup. + _, _, err = initDefenestrateWithAliases(c, []string{"other"}) + c.Assert(err, gc.ErrorMatches, "unrecognized command: jujutest missing") } func (s *SuperCommandSuite) TestRegister(c *gc.C) { === modified file 'src/github.com/juju/errors/errortypes.go' --- src/github.com/juju/errors/errortypes.go 2015-04-14 14:11:54 +0000 +++ src/github.com/juju/errors/errortypes.go 2016-03-22 15:18:22 +0000 @@ -233,3 +233,52 @@ _, ok := err.(*notAssigned) return ok } + +// badRequest represents an error when a request has bad parameters. +type badRequest struct { + Err +} + +// BadRequestf returns an error which satisfies IsBadRequest(). +func BadRequestf(format string, args ...interface{}) error { + return &badRequest{wrap(nil, format, "", args...)} +} + +// NewBadRequest returns an error which wraps err that satisfies +// IsBadRequest(). +func NewBadRequest(err error, msg string) error { + return &badRequest{wrap(err, msg, "")} +} + +// IsBadRequest reports whether err was created with BadRequestf() or +// NewBadRequest(). +func IsBadRequest(err error) bool { + err = Cause(err) + _, ok := err.(*badRequest) + return ok +} + +// methodNotAllowed represents an error when an HTTP request +// is made with an inappropriate method. +type methodNotAllowed struct { + Err +} + +// MethodNotAllowedf returns an error which satisfies IsMethodNotAllowed(). +func MethodNotAllowedf(format string, args ...interface{}) error { + return &methodNotAllowed{wrap(nil, format, "", args...)} +} + +// NewMethodNotAllowed returns an error which wraps err that satisfies +// IsMethodNotAllowed(). +func NewMethodNotAllowed(err error, msg string) error { + return &methodNotAllowed{wrap(err, msg, "")} +} + +// IsMethodNotAllowed reports whether err was created with MethodNotAllowedf() or +// NewMethodNotAllowed(). +func IsMethodNotAllowed(err error) bool { + err = Cause(err) + _, ok := err.(*methodNotAllowed) + return ok +} === modified file 'src/github.com/juju/errors/errortypes_test.go' --- src/github.com/juju/errors/errortypes_test.go 2015-04-14 14:11:54 +0000 +++ src/github.com/juju/errors/errortypes_test.go 2016-03-22 15:18:22 +0000 @@ -36,6 +36,8 @@ &errorInfo{errors.IsNotValid, errors.NotValidf, errors.NewNotValid, " not valid"}, &errorInfo{errors.IsNotProvisioned, errors.NotProvisionedf, errors.NewNotProvisioned, " not provisioned"}, &errorInfo{errors.IsNotAssigned, errors.NotAssignedf, errors.NewNotAssigned, " not assigned"}, + &errorInfo{errors.IsMethodNotAllowed, errors.MethodNotAllowedf, errors.NewMethodNotAllowed, ""}, + &errorInfo{errors.IsBadRequest, errors.BadRequestf, errors.NewBadRequest, ""}, } type errorTypeSuite struct{} === added directory 'src/github.com/juju/gomaasapi' === added file 'src/github.com/juju/gomaasapi/.gitignore' --- src/github.com/juju/gomaasapi/.gitignore 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/.gitignore 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +*.sw[nop] +example/[^.]* === added file 'src/github.com/juju/gomaasapi/COPYING' --- src/github.com/juju/gomaasapi/COPYING 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/COPYING 2016-03-22 15:18:22 +0000 @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. === added file 'src/github.com/juju/gomaasapi/COPYING.LESSER' --- src/github.com/juju/gomaasapi/COPYING.LESSER 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/COPYING.LESSER 2016-03-22 15:18:22 +0000 @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. === added file 'src/github.com/juju/gomaasapi/LICENSE' --- src/github.com/juju/gomaasapi/LICENSE 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,15 @@ +Gomaasapi - Go bindings for talking to MAAS + +Copyright 2012-2014, Canonical Ltd. + +This program is free software: you can redistribute it and/or modify it under +the terms of the GNU Lesser General Public License as published by the Free +Software Foundation, either version 3 of the License, or (at your option) any +later version. + +This program is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A +PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. + +See both COPYING and COPYING.LESSER for the full terms of the GNU Lesser +General Public License. \ No newline at end of file === added file 'src/github.com/juju/gomaasapi/Makefile' --- src/github.com/juju/gomaasapi/Makefile 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/Makefile 2016-03-22 15:18:22 +0000 @@ -0,0 +1,26 @@ +# Build, and run tests. +check: examples + go test ./... + +example_source := $(wildcard example/*.go) +example_binaries := $(patsubst %.go,%,$(example_source)) + +# Clean up binaries. +clean: + $(RM) $(example_binaries) + +# Reformat the source files to match our layout standards. +format: + gofmt -w . + +# Invoke gofmt's "simplify" option to streamline the source code. +simplify: + gofmt -w -s . + +# Build the examples (we have no tests for them). +examples: $(example_binaries) + +%: %.go + go build -o $@ $< + +.PHONY: check clean format examples simplify === added file 'src/github.com/juju/gomaasapi/README.rst' --- src/github.com/juju/gomaasapi/README.rst 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/README.rst 2016-03-22 15:18:22 +0000 @@ -0,0 +1,12 @@ +.. -*- mode: rst -*- + +****************************** +MAAS API client library for Go +****************************** + +This library serves as a minimal client for communicating with the MAAS web +API in Go programs. + +For more information see the `project homepage`_. + +.. _project homepage: https://github.com/juju/gomaasapi === added file 'src/github.com/juju/gomaasapi/client.go' --- src/github.com/juju/gomaasapi/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,303 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +const ( + // Number of retries performed when the server returns a 503 + // response with a 'Retry-after' header. A request will be issued + // at most NumberOfRetries + 1 times. + NumberOfRetries = 4 + + RetryAfterHeaderName = "Retry-After" +) + +// Client represents a way to communicating with a MAAS API instance. +// It is stateless, so it can have concurrent requests in progress. +type Client struct { + APIURL *url.URL + Signer OAuthSigner +} + +// ServerError is an http error (or at least, a non-2xx result) received from +// the server. It contains the numerical HTTP status code as well as an error +// string and the response's headers. +type ServerError struct { + error + StatusCode int + Header http.Header +} + +// readAndClose reads and closes the given ReadCloser. +// +// Trying to read from a nil simply returns nil, no error. +func readAndClose(stream io.ReadCloser) ([]byte, error) { + if stream == nil { + return nil, nil + } + defer stream.Close() + return ioutil.ReadAll(stream) +} + +// dispatchRequest sends a request to the server, and interprets the response. +// Client-side errors will return an empty response and a non-nil error. For +// server-side errors however (i.e. responses with a non 2XX status code), the +// returned error will be ServerError and the returned body will reflect the +// server's response. If the server returns a 503 response with a 'Retry-after' +// header, the request will be transparenty retried. +func (client Client) dispatchRequest(request *http.Request) ([]byte, error) { + // First, store the request's body into a byte[] to be able to restore it + // after each request. + bodyContent, err := readAndClose(request.Body) + if err != nil { + return nil, err + } + for retry := 0; retry < NumberOfRetries; retry++ { + // Restore body before issuing request. + newBody := ioutil.NopCloser(bytes.NewReader(bodyContent)) + request.Body = newBody + body, err := client.dispatchSingleRequest(request) + // If this is a 503 response with a non-void "Retry-After" header: wait + // as instructed and retry the request. + if err != nil { + serverError, ok := err.(ServerError) + if ok && serverError.StatusCode == http.StatusServiceUnavailable { + retry_time_int, errConv := strconv.Atoi(serverError.Header.Get(RetryAfterHeaderName)) + if errConv == nil { + select { + case <-time.After(time.Duration(retry_time_int) * time.Second): + } + continue + } + } + } + return body, err + } + // Restore body before issuing request. + newBody := ioutil.NopCloser(bytes.NewReader(bodyContent)) + request.Body = newBody + return client.dispatchSingleRequest(request) +} + +func (client Client) dispatchSingleRequest(request *http.Request) ([]byte, error) { + client.Signer.OAuthSign(request) + httpClient := http.Client{} + // See https://code.google.com/p/go/issues/detail?id=4677 + // We need to force the connection to close each time so that we don't + // hit the above Go bug. + request.Close = true + response, err := httpClient.Do(request) + if err != nil { + return nil, err + } + body, err := readAndClose(response.Body) + if err != nil { + return nil, err + } + if response.StatusCode < 200 || response.StatusCode > 299 { + msg := fmt.Errorf("gomaasapi: got error back from server: %v (%v)", response.Status, string(body)) + return body, ServerError{error: msg, StatusCode: response.StatusCode, Header: response.Header} + } + return body, nil +} + +// GetURL returns the URL to a given resource on the API, based on its URI. +// The resource URI may be absolute or relative; either way the result is a +// full absolute URL including the network part. +func (client Client) GetURL(uri *url.URL) *url.URL { + return client.APIURL.ResolveReference(uri) +} + +// Get performs an HTTP "GET" to the API. This may be either an API method +// invocation (if you pass its name in "operation") or plain resource +// retrieval (if you leave "operation" blank). +func (client Client) Get(uri *url.URL, operation string, parameters url.Values) ([]byte, error) { + if parameters == nil { + parameters = make(url.Values) + } + opParameter := parameters.Get("op") + if opParameter != "" { + msg := fmt.Errorf("reserved parameter 'op' passed (with value '%s')", opParameter) + return nil, msg + } + if operation != "" { + parameters.Set("op", operation) + } + queryUrl := client.GetURL(uri) + queryUrl.RawQuery = parameters.Encode() + request, err := http.NewRequest("GET", queryUrl.String(), nil) + if err != nil { + return nil, err + } + return client.dispatchRequest(request) +} + +// writeMultiPartFiles writes the given files as parts of a multipart message +// using the given writer. +func writeMultiPartFiles(writer *multipart.Writer, files map[string][]byte) error { + for fileName, fileContent := range files { + + fw, err := writer.CreateFormFile(fileName, fileName) + if err != nil { + return err + } + io.Copy(fw, bytes.NewBuffer(fileContent)) + } + return nil +} + +// writeMultiPartParams writes the given parameters as parts of a multipart +// message using the given writer. +func writeMultiPartParams(writer *multipart.Writer, parameters url.Values) error { + for key, values := range parameters { + for _, value := range values { + fw, err := writer.CreateFormField(key) + if err != nil { + return err + } + buffer := bytes.NewBufferString(value) + io.Copy(fw, buffer) + } + } + return nil + +} + +// nonIdempotentRequestFiles implements the common functionality of PUT and +// POST requests (but not GET or DELETE requests) when uploading files is +// needed. +func (client Client) nonIdempotentRequestFiles(method string, uri *url.URL, parameters url.Values, files map[string][]byte) ([]byte, error) { + buf := new(bytes.Buffer) + writer := multipart.NewWriter(buf) + err := writeMultiPartFiles(writer, files) + if err != nil { + return nil, err + } + err = writeMultiPartParams(writer, parameters) + if err != nil { + return nil, err + } + writer.Close() + url := client.GetURL(uri) + request, err := http.NewRequest(method, url.String(), buf) + if err != nil { + return nil, err + } + request.Header.Set("Content-Type", writer.FormDataContentType()) + return client.dispatchRequest(request) + +} + +// nonIdempotentRequest implements the common functionality of PUT and POST +// requests (but not GET or DELETE requests). +func (client Client) nonIdempotentRequest(method string, uri *url.URL, parameters url.Values) ([]byte, error) { + url := client.GetURL(uri) + request, err := http.NewRequest(method, url.String(), strings.NewReader(string(parameters.Encode()))) + if err != nil { + return nil, err + } + request.Header.Set("Content-Type", "application/x-www-form-urlencoded") + return client.dispatchRequest(request) +} + +// Post performs an HTTP "POST" to the API. This may be either an API method +// invocation (if you pass its name in "operation") or plain resource +// retrieval (if you leave "operation" blank). +func (client Client) Post(uri *url.URL, operation string, parameters url.Values, files map[string][]byte) ([]byte, error) { + queryParams := url.Values{"op": {operation}} + uri.RawQuery = queryParams.Encode() + if files != nil { + return client.nonIdempotentRequestFiles("POST", uri, parameters, files) + } + return client.nonIdempotentRequest("POST", uri, parameters) +} + +// Put updates an object on the API, using an HTTP "PUT" request. +func (client Client) Put(uri *url.URL, parameters url.Values) ([]byte, error) { + return client.nonIdempotentRequest("PUT", uri, parameters) +} + +// Delete deletes an object on the API, using an HTTP "DELETE" request. +func (client Client) Delete(uri *url.URL) error { + url := client.GetURL(uri) + request, err := http.NewRequest("DELETE", url.String(), strings.NewReader("")) + if err != nil { + return err + } + _, err = client.dispatchRequest(request) + if err != nil { + return err + } + return nil +} + +// Anonymous "signature method" implementation. +type anonSigner struct{} + +func (signer anonSigner) OAuthSign(request *http.Request) error { + return nil +} + +// *anonSigner implements the OAuthSigner interface. +var _ OAuthSigner = anonSigner{} + +func composeAPIURL(BaseURL string, apiVersion string) (*url.URL, error) { + baseurl := EnsureTrailingSlash(BaseURL) + apiurl := fmt.Sprintf("%sapi/%s/", baseurl, apiVersion) + return url.Parse(apiurl) +} + +// NewAnonymousClient creates a client that issues anonymous requests. +// BaseURL should refer to the root of the MAAS server path, e.g. +// http://my.maas.server.example.com/MAAS/ +// apiVersion should contain the version of the MAAS API that you want to use. +func NewAnonymousClient(BaseURL string, apiVersion string) (*Client, error) { + parsedBaseURL, err := composeAPIURL(BaseURL, apiVersion) + if err != nil { + return nil, err + } + return &Client{Signer: &anonSigner{}, APIURL: parsedBaseURL}, nil +} + +// NewAuthenticatedClient parses the given MAAS API key into the individual +// OAuth tokens and creates an Client that will use these tokens to sign the +// requests it issues. +// BaseURL should refer to the root of the MAAS server path, e.g. +// http://my.maas.server.example.com/MAAS/ +// apiVersion should contain the version of the MAAS API that you want to use. +func NewAuthenticatedClient(BaseURL string, apiKey string, apiVersion string) (*Client, error) { + elements := strings.Split(apiKey, ":") + if len(elements) != 3 { + errString := "invalid API key %q; expected \"::\"" + return nil, fmt.Errorf(errString, apiKey) + } + token := &OAuthToken{ + ConsumerKey: elements[0], + // The consumer secret is the empty string in MAAS' authentication. + ConsumerSecret: "", + TokenKey: elements[1], + TokenSecret: elements[2], + } + signer, err := NewPlainTestOAuthSigner(token, "MAAS API") + if err != nil { + return nil, err + } + parsedBaseURL, err := composeAPIURL(BaseURL, apiVersion) + if err != nil { + return nil, err + } + return &Client{Signer: signer, APIURL: parsedBaseURL}, nil +} === added file 'src/github.com/juju/gomaasapi/client_test.go' --- src/github.com/juju/gomaasapi/client_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/client_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,312 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + . "gopkg.in/check.v1" +) + +type ClientSuite struct{} + +var _ = Suite(&ClientSuite{}) + +func (*ClientSuite) TestReadAndCloseReturnsEmptyStringForNil(c *C) { + data, err := readAndClose(nil) + c.Assert(err, IsNil) + c.Check(string(data), Equals, "") +} + +func (*ClientSuite) TestReadAndCloseReturnsContents(c *C) { + content := "Stream contents." + stream := ioutil.NopCloser(strings.NewReader(content)) + + data, err := readAndClose(stream) + c.Assert(err, IsNil) + + c.Check(string(data), Equals, content) +} + +func (suite *ClientSuite) TestClientdispatchRequestReturnsServerError(c *C) { + URI := "/some/url/?param1=test" + expectedResult := "expected:result" + server := newSingleServingServer(URI, expectedResult, http.StatusBadRequest) + defer server.Close() + client, err := NewAnonymousClient(server.URL, "1.0") + c.Assert(err, IsNil) + request, err := http.NewRequest("GET", server.URL+URI, nil) + + result, err := client.dispatchRequest(request) + + expectedErrorString := fmt.Sprintf("gomaasapi: got error back from server: 400 Bad Request (%v)", expectedResult) + c.Check(err.Error(), Equals, expectedErrorString) + c.Check(err.(ServerError).StatusCode, Equals, 400) + c.Check(string(result), Equals, expectedResult) +} + +func (suite *ClientSuite) TestClientdispatchRequestRetries503(c *C) { + URI := "/some/url/?param1=test" + server := newFlakyServer(URI, 503, NumberOfRetries) + defer server.Close() + client, err := NewAnonymousClient(server.URL, "1.0") + c.Assert(err, IsNil) + content := "content" + request, err := http.NewRequest("GET", server.URL+URI, ioutil.NopCloser(strings.NewReader(content))) + + _, err = client.dispatchRequest(request) + + c.Check(err, IsNil) + c.Check(*server.nbRequests, Equals, NumberOfRetries+1) + expectedRequestsContent := make([][]byte, NumberOfRetries+1) + for i := 0; i < NumberOfRetries+1; i++ { + expectedRequestsContent[i] = []byte(content) + } + c.Check(*server.requests, DeepEquals, expectedRequestsContent) +} + +func (suite *ClientSuite) TestClientdispatchRequestDoesntRetry200(c *C) { + URI := "/some/url/?param1=test" + server := newFlakyServer(URI, 200, 10) + defer server.Close() + client, err := NewAnonymousClient(server.URL, "1.0") + c.Assert(err, IsNil) + + request, err := http.NewRequest("GET", server.URL+URI, nil) + + _, err = client.dispatchRequest(request) + + c.Check(err, IsNil) + c.Check(*server.nbRequests, Equals, 1) +} + +func (suite *ClientSuite) TestClientdispatchRequestRetriesIsLimited(c *C) { + URI := "/some/url/?param1=test" + // Make the server return 503 responses NumberOfRetries + 1 times. + server := newFlakyServer(URI, 503, NumberOfRetries+1) + defer server.Close() + client, err := NewAnonymousClient(server.URL, "1.0") + c.Assert(err, IsNil) + request, err := http.NewRequest("GET", server.URL+URI, nil) + + _, err = client.dispatchRequest(request) + + c.Check(*server.nbRequests, Equals, NumberOfRetries+1) + c.Check(err.(ServerError).StatusCode, Equals, 503) +} + +func (suite *ClientSuite) TestClientDispatchRequestReturnsNonServerError(c *C) { + client, err := NewAnonymousClient("/foo", "1.0") + c.Assert(err, IsNil) + // Create a bad request that will fail to dispatch. + request, err := http.NewRequest("GET", "/", nil) + c.Assert(err, IsNil) + + result, err := client.dispatchRequest(request) + + // This type of failure is an error, but not a ServerError. + c.Check(err, NotNil) + c.Check(err, Not(FitsTypeOf), ServerError{}) + // For this kind of error, result is guaranteed to be nil. + c.Check(result, IsNil) +} + +func (suite *ClientSuite) TestClientdispatchRequestSignsRequest(c *C) { + URI := "/some/url/?param1=test" + expectedResult := "expected:result" + server := newSingleServingServer(URI, expectedResult, http.StatusOK) + defer server.Close() + client, err := NewAuthenticatedClient(server.URL, "the:api:key", "1.0") + c.Assert(err, IsNil) + request, err := http.NewRequest("GET", server.URL+URI, nil) + c.Assert(err, IsNil) + + result, err := client.dispatchRequest(request) + + c.Check(err, IsNil) + c.Check(string(result), Equals, expectedResult) + c.Check((*server.requestHeader)["Authorization"][0], Matches, "^OAuth .*") +} + +func (suite *ClientSuite) TestClientGetFormatsGetParameters(c *C) { + URI, err := url.Parse("/some/url") + c.Assert(err, IsNil) + expectedResult := "expected:result" + params := url.Values{"test": {"123"}} + fullURI := URI.String() + "?test=123" + server := newSingleServingServer(fullURI, expectedResult, http.StatusOK) + defer server.Close() + client, err := NewAnonymousClient(server.URL, "1.0") + c.Assert(err, IsNil) + + result, err := client.Get(URI, "", params) + + c.Check(err, IsNil) + c.Check(string(result), Equals, expectedResult) +} + +func (suite *ClientSuite) TestClientGetFormatsOperationAsGetParameter(c *C) { + URI, err := url.Parse("/some/url") + c.Assert(err, IsNil) + expectedResult := "expected:result" + fullURI := URI.String() + "?op=list" + server := newSingleServingServer(fullURI, expectedResult, http.StatusOK) + defer server.Close() + client, err := NewAnonymousClient(server.URL, "1.0") + c.Assert(err, IsNil) + + result, err := client.Get(URI, "list", nil) + + c.Check(err, IsNil) + c.Check(string(result), Equals, expectedResult) +} + +func (suite *ClientSuite) TestClientPostSendsRequestWithParams(c *C) { + URI, err := url.Parse("/some/url") + c.Check(err, IsNil) + expectedResult := "expected:result" + fullURI := URI.String() + "?op=list" + params := url.Values{"test": {"123"}} + server := newSingleServingServer(fullURI, expectedResult, http.StatusOK) + defer server.Close() + client, err := NewAnonymousClient(server.URL, "1.0") + c.Check(err, IsNil) + + result, err := client.Post(URI, "list", params, nil) + + c.Check(err, IsNil) + c.Check(string(result), Equals, expectedResult) + postedValues, err := url.ParseQuery(*server.requestContent) + c.Check(err, IsNil) + expectedPostedValues, err := url.ParseQuery("test=123") + c.Check(err, IsNil) + c.Check(postedValues, DeepEquals, expectedPostedValues) +} + +// extractFileContent extracts from the request built using 'requestContent', +// 'requestHeader' and 'requestURL', the file named 'filename'. +func extractFileContent(requestContent string, requestHeader *http.Header, requestURL string, filename string) ([]byte, error) { + // Recreate the request from server.requestContent to use the parsing + // utility from the http package (http.Request.FormFile). + request, err := http.NewRequest("POST", requestURL, bytes.NewBufferString(requestContent)) + if err != nil { + return nil, err + } + request.Header.Set("Content-Type", requestHeader.Get("Content-Type")) + file, _, err := request.FormFile("testfile") + if err != nil { + return nil, err + } + fileContent, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + return fileContent, nil +} + +func (suite *ClientSuite) TestClientPostSendsMultipartRequest(c *C) { + URI, err := url.Parse("/some/url") + c.Assert(err, IsNil) + expectedResult := "expected:result" + fullURI := URI.String() + "?op=add" + server := newSingleServingServer(fullURI, expectedResult, http.StatusOK) + defer server.Close() + client, err := NewAnonymousClient(server.URL, "1.0") + c.Assert(err, IsNil) + fileContent := []byte("content") + files := map[string][]byte{"testfile": fileContent} + + result, err := client.Post(URI, "add", nil, files) + + c.Check(err, IsNil) + c.Check(string(result), Equals, expectedResult) + receivedFileContent, err := extractFileContent(*server.requestContent, server.requestHeader, fullURI, "testfile") + c.Assert(err, IsNil) + c.Check(receivedFileContent, DeepEquals, fileContent) +} + +func (suite *ClientSuite) TestClientPutSendsRequest(c *C) { + URI, err := url.Parse("/some/url") + c.Assert(err, IsNil) + expectedResult := "expected:result" + params := url.Values{"test": {"123"}} + server := newSingleServingServer(URI.String(), expectedResult, http.StatusOK) + defer server.Close() + client, err := NewAnonymousClient(server.URL, "1.0") + c.Assert(err, IsNil) + + result, err := client.Put(URI, params) + + c.Check(err, IsNil) + c.Check(string(result), Equals, expectedResult) + c.Check(*server.requestContent, Equals, "test=123") +} + +func (suite *ClientSuite) TestClientDeleteSendsRequest(c *C) { + URI, err := url.Parse("/some/url") + c.Assert(err, IsNil) + expectedResult := "expected:result" + server := newSingleServingServer(URI.String(), expectedResult, http.StatusOK) + defer server.Close() + client, err := NewAnonymousClient(server.URL, "1.0") + c.Assert(err, IsNil) + + err = client.Delete(URI) + + c.Check(err, IsNil) +} + +func (suite *ClientSuite) TestNewAnonymousClientEnsuresTrailingSlash(c *C) { + client, err := NewAnonymousClient("http://example.com/", "1.0") + c.Check(err, IsNil) + expectedURL, err := url.Parse("http://example.com/api/1.0/") + c.Assert(err, IsNil) + c.Check(client.APIURL, DeepEquals, expectedURL) +} + +func (suite *ClientSuite) TestNewAuthenticatedClientEnsuresTrailingSlash(c *C) { + client, err := NewAuthenticatedClient("http://example.com/", "a:b:c", "1.0") + c.Check(err, IsNil) + expectedURL, err := url.Parse("http://example.com/api/1.0/") + c.Assert(err, IsNil) + c.Check(client.APIURL, DeepEquals, expectedURL) +} + +func (suite *ClientSuite) TestNewAuthenticatedClientParsesApiKey(c *C) { + // NewAuthenticatedClient returns a plainTextOAuthSigneri configured + // to use the given API key. + consumerKey := "consumerKey" + tokenKey := "tokenKey" + tokenSecret := "tokenSecret" + keyElements := []string{consumerKey, tokenKey, tokenSecret} + apiKey := strings.Join(keyElements, ":") + + client, err := NewAuthenticatedClient("http://example.com/", apiKey, "1.0") + + c.Check(err, IsNil) + signer := client.Signer.(*plainTextOAuthSigner) + c.Check(signer.token.ConsumerKey, Equals, consumerKey) + c.Check(signer.token.TokenKey, Equals, tokenKey) + c.Check(signer.token.TokenSecret, Equals, tokenSecret) +} + +func (suite *ClientSuite) TestNewAuthenticatedClientFailsIfInvalidKey(c *C) { + client, err := NewAuthenticatedClient("", "invalid-key", "1.0") + + c.Check(err, ErrorMatches, "invalid API key.*") + c.Check(client, IsNil) + +} + +func (suite *ClientSuite) TestcomposeAPIURLReturnsURL(c *C) { + apiurl, err := composeAPIURL("http://example.com/MAAS", "1.0") + c.Assert(err, IsNil) + expectedURL, err := url.Parse("http://example.com/MAAS/api/1.0/") + c.Check(expectedURL, DeepEquals, apiurl) +} === added file 'src/github.com/juju/gomaasapi/enum.go' --- src/github.com/juju/gomaasapi/enum.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/enum.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,57 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +const ( + // NodeStatus* values represent the vocabulary of a Node‘s possible statuses. + + // The node has been created and has a system ID assigned to it. + NodeStatusDeclared = "0" + + //Testing and other commissioning steps are taking place. + NodeStatusCommissioning = "1" + + // Smoke or burn-in testing has a found a problem. + NodeStatusFailedTests = "2" + + // The node can’t be contacted. + NodeStatusMissing = "3" + + // The node is in the general pool ready to be deployed. + NodeStatusReady = "4" + + // The node is ready for named deployment. + NodeStatusReserved = "5" + + // The node is powering a service from a charm or is ready for use with a fresh Ubuntu install. + NodeStatusDeployed = "6" + + // The node has been removed from service manually until an admin overrides the retirement. + NodeStatusRetired = "7" + + // The node is broken: a step in the node lifecyle failed. More details + // can be found in the node's event log. + NodeStatusBroken = "8" + + // The node is being installed. + NodeStatusDeploying = "9" + + // The node has been allocated to a user and is ready for deployment. + NodeStatusAllocated = "10" + + // The deployment of the node failed. + NodeStatusFailedDeployment = "11" + + // The node is powering down after a release request. + NodeStatusReleasing = "12" + + // The releasing of the node failed. + NodeStatusFailedReleasing = "13" + + // The node is erasing its disks. + NodeStatusDiskErasing = "14" + + // The node failed to erase its disks. + NodeStatusFailedDiskErasing = "15" +) === added directory 'src/github.com/juju/gomaasapi/example' === added file 'src/github.com/juju/gomaasapi/example/live_example.go' --- src/github.com/juju/gomaasapi/example/live_example.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/example/live_example.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,171 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +/* +This is an example on how the Go library gomaasapi can be used to interact with +a real MAAS server. +Note that this is a provided only as an example and that real code should probably do something more sensible with errors than ignoring them or panicking. +*/ +package main + +import ( + "bytes" + "fmt" + "net/url" + + "github.com/juju/gomaasapi" +) + +var apiKey string +var apiURL string +var apiVersion string + +func getParams() { + fmt.Println("Warning: this will create a node on the MAAS server; it should be deleted at the end of the run but if something goes wrong, that test node might be left over. You've been warned.") + fmt.Print("Enter API key: ") + _, err := fmt.Scanf("%s", &apiKey) + if err != nil { + panic(err) + } + fmt.Print("Enter API URL: ") + _, err = fmt.Scanf("%s", &apiURL) + if err != nil { + panic(err) + } + + fmt.Print("Enter API version: ") + _, err = fmt.Scanf("%s", &apiVersion) + if err != nil { + panic(err) + } +} + +func checkError(err error) { + if err != nil { + panic(err) + } +} + +func main() { + getParams() + + // Create API server endpoint. + authClient, err := gomaasapi.NewAuthenticatedClient(apiURL, apiKey, apiVersion) + checkError(err) + maas := gomaasapi.NewMAAS(*authClient) + + // Exercise the API. + ManipulateNodes(maas) + ManipulateFiles(maas) + + fmt.Println("All done.") +} + +// ManipulateFiles exercises the /api/1.0/files/ API endpoint. Most precisely, +// it uploads a files and then fetches it, making sure the received content +// is the same as the one that was sent. +func ManipulateFiles(maas *gomaasapi.MAASObject) { + files := maas.GetSubObject("files") + fileContent := []byte("test file content") + fileName := "filename" + filesToUpload := map[string][]byte{"file": fileContent} + + // Upload a file. + fmt.Println("Uploading a file...") + _, err := files.CallPostFiles("add", url.Values{"filename": {fileName}}, filesToUpload) + checkError(err) + fmt.Println("File sent.") + + // Fetch the file. + fmt.Println("Fetching the file...") + fileResult, err := files.CallGet("get", url.Values{"filename": {fileName}}) + checkError(err) + receivedFileContent, err := fileResult.GetBytes() + checkError(err) + if bytes.Compare(receivedFileContent, fileContent) != 0 { + panic("Received content differs from the content sent!") + } + fmt.Println("Got file.") + + // Fetch list of files. + listFiles, err := files.CallGet("list", url.Values{}) + checkError(err) + listFilesArray, err := listFiles.GetArray() + checkError(err) + fmt.Printf("We've got %v file(s)\n", len(listFilesArray)) + + // Delete the file. + fmt.Println("Deleting the file...") + fileObject, err := listFilesArray[0].GetMAASObject() + checkError(err) + errDelete := fileObject.Delete() + checkError(errDelete) + + // Count the files. + listFiles, err = files.CallGet("list", url.Values{}) + checkError(err) + listFilesArray, err = listFiles.GetArray() + checkError(err) + fmt.Printf("We've got %v file(s)\n", len(listFilesArray)) +} + +// ManipulateFiles exercises the /api/1.0/nodes/ API endpoint. Most precisely, +// it lists the existing nodes, creates a new node, updates it and then +// deletes it. +func ManipulateNodes(maas *gomaasapi.MAASObject) { + nodeListing := maas.GetSubObject("nodes") + + // List nodes. + fmt.Println("Fetching list of nodes...") + listNodeObjects, err := nodeListing.CallGet("list", url.Values{}) + checkError(err) + listNodes, err := listNodeObjects.GetArray() + checkError(err) + fmt.Printf("Got list of %v nodes\n", len(listNodes)) + for index, nodeObj := range listNodes { + node, err := nodeObj.GetMAASObject() + checkError(err) + hostname, err := node.GetField("hostname") + checkError(err) + fmt.Printf("Node #%d is named '%v' (%v)\n", index, hostname, node.URL()) + } + + // Create a node. + fmt.Println("Creating a new node...") + params := url.Values{"architecture": {"i386/generic"}, "mac_addresses": {"AA:BB:CC:DD:EE:FF"}} + newNodeObj, err := nodeListing.CallPost("new", params) + checkError(err) + newNode, err := newNodeObj.GetMAASObject() + checkError(err) + newNodeName, err := newNode.GetField("hostname") + checkError(err) + fmt.Printf("New node created: %s (%s)\n", newNodeName, newNode.URL()) + + // Update the new node. + fmt.Println("Updating the new node...") + updateParams := url.Values{"hostname": {"mynewname"}} + newNodeObj2, err := newNode.Update(updateParams) + checkError(err) + newNodeName2, err := newNodeObj2.GetField("hostname") + checkError(err) + fmt.Printf("New node updated, now named: %s\n", newNodeName2) + + // Count the nodes. + listNodeObjects2, err := nodeListing.CallGet("list", url.Values{}) + checkError(err) + listNodes2, err := listNodeObjects2.GetArray() + checkError(err) + fmt.Printf("We've got %v nodes\n", len(listNodes2)) + + // Delete the new node. + fmt.Println("Deleting the new node...") + errDelete := newNode.Delete() + checkError(errDelete) + + // Count the nodes. + listNodeObjects3, err := nodeListing.CallGet("list", url.Values{}) + checkError(err) + listNodes3, err := listNodeObjects3.GetArray() + checkError(err) + fmt.Printf("We've got %v nodes\n", len(listNodes3)) +} === added file 'src/github.com/juju/gomaasapi/gomaasapi.go' --- src/github.com/juju/gomaasapi/gomaasapi.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/gomaasapi.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,4 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi === added file 'src/github.com/juju/gomaasapi/gomaasapi_test.go' --- src/github.com/juju/gomaasapi/gomaasapi_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/gomaasapi_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "testing" + + . "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + TestingT(t) +} + +type GomaasapiTestSuite struct { +} + +var _ = Suite(&GomaasapiTestSuite{}) === added file 'src/github.com/juju/gomaasapi/jsonobject.go' --- src/github.com/juju/gomaasapi/jsonobject.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/jsonobject.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,215 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "encoding/json" + "errors" + "fmt" +) + +// JSONObject is a wrapper around a JSON structure which provides +// methods to extract data from that structure. +// A JSONObject provides a simple structure consisting of the data types +// defined in JSON: string, number, object, list, and bool. To get the +// value you want out of a JSONObject, you must know (or figure out) which +// kind of value you have, and then call the appropriate Get*() method to +// get at it. Reading an item as the wrong type will return an error. +// For instance, if your JSONObject consists of a number, call GetFloat64() +// to get the value as a float64. If it's a list, call GetArray() to get +// a slice of JSONObjects. To read any given item from the slice, you'll +// need to "Get" that as the right type as well. +// There is one exception: a MAASObject is really a special kind of map, +// so you can read it as either. +// Reading a null item is also an error. So before you try obj.Get*(), +// first check obj.IsNil(). +type JSONObject struct { + // Parsed value. May actually be any of the types a JSONObject can + // wrap, except raw bytes. If the object can only be interpreted + // as raw bytes, this will be nil. + value interface{} + // Raw bytes, if this object was parsed directly from an API response. + // Is nil for sub-objects found within other objects. An object that + // was parsed directly from a response can be both raw bytes and some + // other value at the same time. + // For example, "[]" looks like a JSON list, so you can read it as an + // array. But it may also be the raw contents of a file that just + // happens to look like JSON, and so you can read it as raw bytes as + // well. + bytes []byte + // Client for further communication with the API. + client Client + // Is this a JSON null? + isNull bool +} + +// Our JSON processor distinguishes a MAASObject from a jsonMap by the fact +// that it contains a key "resource_uri". (A regular map might contain the +// same key through sheer coincide, but never mind: you can still treat it +// as a jsonMap and never notice the difference.) +const resourceURI = "resource_uri" + +// maasify turns a completely untyped json.Unmarshal result into a JSONObject +// (with the appropriate implementation of course). This function is +// recursive. Maps and arrays are deep-copied, with each individual value +// being converted to a JSONObject type. +func maasify(client Client, value interface{}) JSONObject { + if value == nil { + return JSONObject{isNull: true} + } + switch value.(type) { + case string, float64, bool: + return JSONObject{value: value} + case map[string]interface{}: + original := value.(map[string]interface{}) + result := make(map[string]JSONObject, len(original)) + for key, value := range original { + result[key] = maasify(client, value) + } + return JSONObject{value: result, client: client} + case []interface{}: + original := value.([]interface{}) + result := make([]JSONObject, len(original)) + for index, value := range original { + result[index] = maasify(client, value) + } + return JSONObject{value: result} + } + msg := fmt.Sprintf("Unknown JSON type, can't be converted to JSONObject: %v", value) + panic(msg) +} + +// Parse a JSON blob into a JSONObject. +func Parse(client Client, input []byte) (JSONObject, error) { + var obj JSONObject + if input == nil { + panic(errors.New("Parse() called with nil input")) + } + var parsed interface{} + err := json.Unmarshal(input, &parsed) + if err == nil { + obj = maasify(client, parsed) + obj.bytes = input + } else { + switch err.(type) { + case *json.InvalidUTF8Error: + case *json.SyntaxError: + // This isn't JSON. Treat it as raw binary data. + default: + return obj, err + } + obj = JSONObject{value: nil, client: client, bytes: input} + } + return obj, nil +} + +// JSONObjectFromStruct takes a struct and converts it to a JSONObject +func JSONObjectFromStruct(client Client, input interface{}) (JSONObject, error) { + j, err := json.MarshalIndent(input, "", " ") + if err != nil { + return JSONObject{}, err + } + return Parse(client, j) +} + +// Return error value for failed type conversion. +func failConversion(wantedType string, obj JSONObject) error { + msg := fmt.Sprintf("Requested %v, got %T.", wantedType, obj.value) + return errors.New(msg) +} + +// MarshalJSON tells the standard json package how to serialize a JSONObject +// back to JSON. +func (obj JSONObject) MarshalJSON() ([]byte, error) { + if obj.IsNil() { + return json.Marshal(nil) + } + return json.MarshalIndent(obj.value, "", " ") +} + +// With MarshalJSON, JSONObject implements json.Marshaler. +var _ json.Marshaler = (*JSONObject)(nil) + +// IsNil tells you whether a JSONObject is a JSON "null." +// There is one irregularity. If the original JSON blob was actually raw +// data, not JSON, then its IsNil will return false because the object +// contains the binary data as a non-nil value. But, if the original JSON +// blob consisted of a null, then IsNil returns true even though you can +// still retrieve binary data from it. +func (obj JSONObject) IsNil() bool { + if obj.value != nil { + return false + } + if obj.bytes == nil { + return true + } + // This may be a JSON null. We can't expect every JSON null to look + // the same; there may be leading or trailing space. + return obj.isNull +} + +// GetString retrieves the object's value as a string. If the value wasn't +// a JSON string, that's an error. +func (obj JSONObject) GetString() (value string, err error) { + value, ok := obj.value.(string) + if !ok { + err = failConversion("string", obj) + } + return +} + +// GetFloat64 retrieves the object's value as a float64. If the value wasn't +// a JSON number, that's an error. +func (obj JSONObject) GetFloat64() (value float64, err error) { + value, ok := obj.value.(float64) + if !ok { + err = failConversion("float64", obj) + } + return +} + +// GetMap retrieves the object's value as a map. If the value wasn't a JSON +// object, that's an error. +func (obj JSONObject) GetMap() (value map[string]JSONObject, err error) { + value, ok := obj.value.(map[string]JSONObject) + if !ok { + err = failConversion("map", obj) + } + return +} + +// GetArray retrieves the object's value as an array. If the value wasn't a +// JSON list, that's an error. +func (obj JSONObject) GetArray() (value []JSONObject, err error) { + value, ok := obj.value.([]JSONObject) + if !ok { + err = failConversion("array", obj) + } + return +} + +// GetBool retrieves the object's value as a bool. If the value wasn't a JSON +// bool, that's an error. +func (obj JSONObject) GetBool() (value bool, err error) { + value, ok := obj.value.(bool) + if !ok { + err = failConversion("bool", obj) + } + return +} + +// GetBytes retrieves the object's value as raw bytes. A JSONObject that was +// parsed from the original input (as opposed to one that's embedded in +// another JSONObject) can contain both the raw bytes and the parsed JSON +// value, but either can be the case without the other. +// If this object wasn't parsed directly from the original input, that's an +// error. +// If the object was parsed from an original input that just said "null", then +// IsNil will return true but the raw bytes are still available from GetBytes. +func (obj JSONObject) GetBytes() ([]byte, error) { + if obj.bytes == nil { + return nil, failConversion("bytes", obj) + } + return obj.bytes, nil +} === added file 'src/github.com/juju/gomaasapi/jsonobject_test.go' --- src/github.com/juju/gomaasapi/jsonobject_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/jsonobject_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,463 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "encoding/json" + "fmt" + + . "gopkg.in/check.v1" +) + +type JSONObjectSuite struct { +} + +var _ = Suite(&JSONObjectSuite{}) + +// maasify() converts nil. +func (suite *JSONObjectSuite) TestMaasifyConvertsNil(c *C) { + c.Check(maasify(Client{}, nil).IsNil(), Equals, true) +} + +// maasify() converts strings. +func (suite *JSONObjectSuite) TestMaasifyConvertsString(c *C) { + const text = "Hello" + out, err := maasify(Client{}, text).GetString() + c.Assert(err, IsNil) + c.Check(out, Equals, text) +} + +// maasify() converts float64 numbers. +func (suite *JSONObjectSuite) TestMaasifyConvertsNumber(c *C) { + const number = 3.1415926535 + num, err := maasify(Client{}, number).GetFloat64() + c.Assert(err, IsNil) + c.Check(num, Equals, number) +} + +// maasify() converts array slices. +func (suite *JSONObjectSuite) TestMaasifyConvertsArray(c *C) { + original := []interface{}{3.0, 2.0, 1.0} + output, err := maasify(Client{}, original).GetArray() + c.Assert(err, IsNil) + c.Check(len(output), Equals, len(original)) +} + +// When maasify() converts an array slice, the result contains JSONObjects. +func (suite *JSONObjectSuite) TestMaasifyArrayContainsJSONObjects(c *C) { + arr, err := maasify(Client{}, []interface{}{9.9}).GetArray() + c.Assert(err, IsNil) + var _ JSONObject = arr[0] + entry, err := arr[0].GetFloat64() + c.Assert(err, IsNil) + c.Check(entry, Equals, 9.9) +} + +// maasify() converts maps. +func (suite *JSONObjectSuite) TestMaasifyConvertsMap(c *C) { + original := map[string]interface{}{"1": "one", "2": "two", "3": "three"} + output, err := maasify(Client{}, original).GetMap() + c.Assert(err, IsNil) + c.Check(len(output), Equals, len(original)) +} + +// When maasify() converts a map, the result contains JSONObjects. +func (suite *JSONObjectSuite) TestMaasifyMapContainsJSONObjects(c *C) { + jsonobj := maasify(Client{}, map[string]interface{}{"key": "value"}) + mp, err := jsonobj.GetMap() + var _ JSONObject = mp["key"] + c.Assert(err, IsNil) + entry, err := mp["key"].GetString() + c.Check(entry, Equals, "value") +} + +// maasify() converts MAAS objects. +func (suite *JSONObjectSuite) TestMaasifyConvertsMAASObject(c *C) { + original := map[string]interface{}{ + "resource_uri": "http://example.com/foo", + "size": "3", + } + obj, err := maasify(Client{}, original).GetMAASObject() + c.Assert(err, IsNil) + c.Check(len(obj.GetMap()), Equals, len(original)) + size, err := obj.GetMap()["size"].GetString() + c.Assert(err, IsNil) + c.Check(size, Equals, "3") +} + +// maasify() passes its client to a MAASObject it creates. +func (suite *JSONObjectSuite) TestMaasifyPassesClientToMAASObject(c *C) { + client := Client{} + original := map[string]interface{}{"resource_uri": "/foo"} + output, err := maasify(client, original).GetMAASObject() + c.Assert(err, IsNil) + c.Check(output.client, Equals, client) +} + +// maasify() passes its client into an array of MAASObjects it creates. +func (suite *JSONObjectSuite) TestMaasifyPassesClientIntoArray(c *C) { + client := Client{} + obj := map[string]interface{}{"resource_uri": "/foo"} + list := []interface{}{obj} + jsonobj, err := maasify(client, list).GetArray() + c.Assert(err, IsNil) + out, err := jsonobj[0].GetMAASObject() + c.Assert(err, IsNil) + c.Check(out.client, Equals, client) +} + +// maasify() passes its client into a map of MAASObjects it creates. +func (suite *JSONObjectSuite) TestMaasifyPassesClientIntoMap(c *C) { + client := Client{} + obj := map[string]interface{}{"resource_uri": "/foo"} + mp := map[string]interface{}{"key": obj} + jsonobj, err := maasify(client, mp).GetMap() + c.Assert(err, IsNil) + out, err := jsonobj["key"].GetMAASObject() + c.Assert(err, IsNil) + c.Check(out.client, Equals, client) +} + +// maasify() passes its client all the way down into any MAASObjects in the +// object structure it creates. +func (suite *JSONObjectSuite) TestMaasifyPassesClientAllTheWay(c *C) { + client := Client{} + obj := map[string]interface{}{"resource_uri": "/foo"} + mp := map[string]interface{}{"key": obj} + list := []interface{}{mp} + jsonobj, err := maasify(client, list).GetArray() + c.Assert(err, IsNil) + outerMap, err := jsonobj[0].GetMap() + c.Assert(err, IsNil) + out, err := outerMap["key"].GetMAASObject() + c.Assert(err, IsNil) + c.Check(out.client, Equals, client) +} + +// maasify() converts Booleans. +func (suite *JSONObjectSuite) TestMaasifyConvertsBool(c *C) { + t, err := maasify(Client{}, true).GetBool() + c.Assert(err, IsNil) + f, err := maasify(Client{}, false).GetBool() + c.Assert(err, IsNil) + c.Check(t, Equals, true) + c.Check(f, Equals, false) +} + +// Parse takes you from a JSON blob to a JSONObject. +func (suite *JSONObjectSuite) TestParseMaasifiesJSONBlob(c *C) { + blob := []byte("[12]") + obj, err := Parse(Client{}, blob) + c.Assert(err, IsNil) + + arr, err := obj.GetArray() + c.Assert(err, IsNil) + out, err := arr[0].GetFloat64() + c.Assert(err, IsNil) + c.Check(out, Equals, 12.0) +} + +func (suite *JSONObjectSuite) TestParseKeepsBinaryOriginal(c *C) { + blob := []byte(`"Hi"`) + + obj, err := Parse(Client{}, blob) + c.Assert(err, IsNil) + + text, err := obj.GetString() + c.Assert(err, IsNil) + c.Check(text, Equals, "Hi") + binary, err := obj.GetBytes() + c.Assert(err, IsNil) + c.Check(binary, DeepEquals, blob) +} + +func (suite *JSONObjectSuite) TestParseTreatsInvalidJSONAsBinary(c *C) { + blob := []byte("?x]}y![{z") + + obj, err := Parse(Client{}, blob) + c.Assert(err, IsNil) + + c.Check(obj.IsNil(), Equals, false) + c.Check(obj.value, IsNil) + binary, err := obj.GetBytes() + c.Assert(err, IsNil) + c.Check(binary, DeepEquals, blob) +} + +func (suite *JSONObjectSuite) TestParseTreatsInvalidUTF8AsBinary(c *C) { + // Arbitrary data that is definitely not UTF-8. + blob := []byte{220, 8, 129} + + obj, err := Parse(Client{}, blob) + c.Assert(err, IsNil) + + c.Check(obj.IsNil(), Equals, false) + c.Check(obj.value, IsNil) + binary, err := obj.GetBytes() + c.Assert(err, IsNil) + c.Check(binary, DeepEquals, blob) +} + +func (suite *JSONObjectSuite) TestParseTreatsEmptyJSONAsBinary(c *C) { + blob := []byte{} + + obj, err := Parse(Client{}, blob) + c.Assert(err, IsNil) + + c.Check(obj.IsNil(), Equals, false) + data, err := obj.GetBytes() + c.Assert(err, IsNil) + c.Check(data, DeepEquals, blob) +} + +func (suite *JSONObjectSuite) TestParsePanicsOnNilJSON(c *C) { + defer func() { + failure := recover() + c.Assert(failure, NotNil) + c.Check(failure.(error).Error(), Matches, ".*nil input") + }() + Parse(Client{}, nil) +} + +func (suite *JSONObjectSuite) TestParseNullProducesIsNil(c *C) { + blob := []byte("null") + obj, err := Parse(Client{}, blob) + c.Assert(err, IsNil) + c.Check(obj.IsNil(), Equals, true) +} + +func (suite *JSONObjectSuite) TestParseNonNullProducesNonIsNil(c *C) { + blob := []byte("1") + obj, err := Parse(Client{}, blob) + c.Assert(err, IsNil) + c.Check(obj.IsNil(), Equals, false) +} + +func (suite *JSONObjectSuite) TestParseSpacedNullProducesIsNil(c *C) { + blob := []byte(" null ") + obj, err := Parse(Client{}, blob) + c.Assert(err, IsNil) + c.Check(obj.IsNil(), Equals, true) +} + +// String-type JSONObjects convert only to string. +func (suite *JSONObjectSuite) TestConversionsString(c *C) { + obj := maasify(Client{}, "Test string") + + value, err := obj.GetString() + c.Check(err, IsNil) + c.Check(value, Equals, "Test string") + + _, err = obj.GetFloat64() + c.Check(err, NotNil) + _, err = obj.GetMap() + c.Check(err, NotNil) + _, err = obj.GetMAASObject() + c.Check(err, NotNil) + _, err = obj.GetArray() + c.Check(err, NotNil) + _, err = obj.GetBool() + c.Check(err, NotNil) +} + +// Number-type JSONObjects convert only to float64. +func (suite *JSONObjectSuite) TestConversionsFloat64(c *C) { + obj := maasify(Client{}, 1.1) + + value, err := obj.GetFloat64() + c.Check(err, IsNil) + c.Check(value, Equals, 1.1) + + _, err = obj.GetString() + c.Check(err, NotNil) + _, err = obj.GetMap() + c.Check(err, NotNil) + _, err = obj.GetMAASObject() + c.Check(err, NotNil) + _, err = obj.GetArray() + c.Check(err, NotNil) + _, err = obj.GetBool() + c.Check(err, NotNil) +} + +// Map-type JSONObjects convert only to map. +func (suite *JSONObjectSuite) TestConversionsMap(c *C) { + obj := maasify(Client{}, map[string]interface{}{"x": "y"}) + + value, err := obj.GetMap() + c.Check(err, IsNil) + text, err := value["x"].GetString() + c.Check(err, IsNil) + c.Check(text, Equals, "y") + + _, err = obj.GetString() + c.Check(err, NotNil) + _, err = obj.GetFloat64() + c.Check(err, NotNil) + _, err = obj.GetMAASObject() + c.Check(err, NotNil) + _, err = obj.GetArray() + c.Check(err, NotNil) + _, err = obj.GetBool() + c.Check(err, NotNil) +} + +// Array-type JSONObjects convert only to array. +func (suite *JSONObjectSuite) TestConversionsArray(c *C) { + obj := maasify(Client{}, []interface{}{"item"}) + + value, err := obj.GetArray() + c.Check(err, IsNil) + text, err := value[0].GetString() + c.Check(err, IsNil) + c.Check(text, Equals, "item") + + _, err = obj.GetString() + c.Check(err, NotNil) + _, err = obj.GetFloat64() + c.Check(err, NotNil) + _, err = obj.GetMap() + c.Check(err, NotNil) + _, err = obj.GetMAASObject() + c.Check(err, NotNil) + _, err = obj.GetBool() + c.Check(err, NotNil) +} + +// Boolean-type JSONObjects convert only to bool. +func (suite *JSONObjectSuite) TestConversionsBool(c *C) { + obj := maasify(Client{}, false) + + value, err := obj.GetBool() + c.Check(err, IsNil) + c.Check(value, Equals, false) + + _, err = obj.GetString() + c.Check(err, NotNil) + _, err = obj.GetFloat64() + c.Check(err, NotNil) + _, err = obj.GetMap() + c.Check(err, NotNil) + _, err = obj.GetMAASObject() + c.Check(err, NotNil) + _, err = obj.GetArray() + c.Check(err, NotNil) +} + +func (suite *JSONObjectSuite) TestNilSerializesToJSON(c *C) { + output, err := json.Marshal(maasify(Client{}, nil)) + c.Assert(err, IsNil) + c.Check(output, DeepEquals, []byte("null")) +} + +func (suite *JSONObjectSuite) TestEmptyStringSerializesToJSON(c *C) { + output, err := json.Marshal(maasify(Client{}, "")) + c.Assert(err, IsNil) + c.Check(string(output), Equals, `""`) +} + +func (suite *JSONObjectSuite) TestStringSerializesToJSON(c *C) { + text := "Text wrapped in JSON" + output, err := json.Marshal(maasify(Client{}, text)) + c.Assert(err, IsNil) + c.Check(output, DeepEquals, []byte(fmt.Sprintf(`"%s"`, text))) +} + +func (suite *JSONObjectSuite) TestStringIsEscapedInJSON(c *C) { + text := `\"Quote,\" \\backslash, and \'apostrophe\'.` + output, err := json.Marshal(maasify(Client{}, text)) + c.Assert(err, IsNil) + var deserialized string + err = json.Unmarshal(output, &deserialized) + c.Assert(err, IsNil) + c.Check(deserialized, Equals, text) +} + +func (suite *JSONObjectSuite) TestFloat64SerializesToJSON(c *C) { + number := 3.1415926535 + output, err := json.Marshal(maasify(Client{}, number)) + c.Assert(err, IsNil) + var deserialized float64 + err = json.Unmarshal(output, &deserialized) + c.Assert(err, IsNil) + c.Check(deserialized, Equals, number) +} + +func (suite *JSONObjectSuite) TestEmptyMapSerializesToJSON(c *C) { + mp := map[string]interface{}{} + output, err := json.Marshal(maasify(Client{}, mp)) + c.Assert(err, IsNil) + var deserialized interface{} + err = json.Unmarshal(output, &deserialized) + c.Assert(err, IsNil) + c.Check(deserialized.(map[string]interface{}), DeepEquals, mp) +} + +func (suite *JSONObjectSuite) TestMapSerializesToJSON(c *C) { + // Sample data: counting in Japanese. + mp := map[string]interface{}{"one": "ichi", "two": "nii", "three": "san"} + output, err := json.Marshal(maasify(Client{}, mp)) + c.Assert(err, IsNil) + var deserialized interface{} + err = json.Unmarshal(output, &deserialized) + c.Assert(err, IsNil) + c.Check(deserialized.(map[string]interface{}), DeepEquals, mp) +} + +func (suite *JSONObjectSuite) TestEmptyArraySerializesToJSON(c *C) { + arr := []interface{}{} + output, err := json.Marshal(maasify(Client{}, arr)) + c.Assert(err, IsNil) + var deserialized interface{} + err = json.Unmarshal(output, &deserialized) + c.Assert(err, IsNil) + // The deserialized value is a slice, and it contains no elements. + // Can't do a regular comparison here because at least in the current + // json implementation, an empty list deserializes as a nil slice, + // not as an empty slice! + // (It doesn't work that way for maps though, for some reason). + c.Check(len(deserialized.([]interface{})), Equals, len(arr)) +} + +func (suite *JSONObjectSuite) TestArrayOfStringsSerializesToJSON(c *C) { + value := "item" + output, err := json.Marshal(maasify(Client{}, []interface{}{value})) + c.Assert(err, IsNil) + var deserialized []string + err = json.Unmarshal(output, &deserialized) + c.Assert(err, IsNil) + c.Check(deserialized, DeepEquals, []string{value}) +} + +func (suite *JSONObjectSuite) TestArrayOfNumbersSerializesToJSON(c *C) { + value := 9.0 + output, err := json.Marshal(maasify(Client{}, []interface{}{value})) + c.Assert(err, IsNil) + var deserialized []float64 + err = json.Unmarshal(output, &deserialized) + c.Assert(err, IsNil) + c.Check(deserialized, DeepEquals, []float64{value}) +} + +func (suite *JSONObjectSuite) TestArrayPreservesOrderInJSON(c *C) { + // Sample data: counting in Korean. + arr := []interface{}{"jong", "il", "ee", "sam"} + output, err := json.Marshal(maasify(Client{}, arr)) + c.Assert(err, IsNil) + + var deserialized []interface{} + err = json.Unmarshal(output, &deserialized) + c.Assert(err, IsNil) + c.Check(deserialized, DeepEquals, arr) +} + +func (suite *JSONObjectSuite) TestBoolSerializesToJSON(c *C) { + f, err := json.Marshal(maasify(Client{}, false)) + c.Assert(err, IsNil) + t, err := json.Marshal(maasify(Client{}, true)) + c.Assert(err, IsNil) + + c.Check(f, DeepEquals, []byte("false")) + c.Check(t, DeepEquals, []byte("true")) +} === added file 'src/github.com/juju/gomaasapi/maas.go' --- src/github.com/juju/gomaasapi/maas.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/maas.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +// NewMAAS returns an interface to the MAAS API as a *MAASObject. +func NewMAAS(client Client) *MAASObject { + attrs := map[string]interface{}{resourceURI: client.APIURL.String()} + obj := newJSONMAASObject(attrs, client) + return &obj +} === added file 'src/github.com/juju/gomaasapi/maas_test.go' --- src/github.com/juju/gomaasapi/maas_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/maas_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,23 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "net/url" + + . "gopkg.in/check.v1" +) + +type MAASSuite struct{} + +var _ = Suite(&MAASSuite{}) + +func (suite *MAASSuite) TestNewMAASUsesBaseURLFromClient(c *C) { + baseURLString := "https://server.com:888/" + baseURL, _ := url.Parse(baseURLString) + client := Client{APIURL: baseURL} + maas := NewMAAS(client) + URL := maas.URL() + c.Check(URL, DeepEquals, baseURL) +} === added file 'src/github.com/juju/gomaasapi/maasobject.go' --- src/github.com/juju/gomaasapi/maasobject.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/maasobject.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,197 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" +) + +// MAASObject represents a MAAS object as returned by the MAAS API, such as a +// Node or a Tag. +// You can extract a MAASObject out of a JSONObject using +// JSONObject.GetMAASObject. A MAAS API call will usually return either a +// MAASObject or a list of MAASObjects. The list itself would be wrapped in +// a JSONObject, so if an API call returns a list of objects "l," you first +// obtain the array using l.GetArray(). Then, for each item "i" in the array, +// obtain the matching MAASObject using i.GetMAASObject(). +type MAASObject struct { + values map[string]JSONObject + client Client + uri *url.URL +} + +// newJSONMAASObject creates a new MAAS object. It will panic if the given map +// does not contain a valid URL for the 'resource_uri' key. +func newJSONMAASObject(jmap map[string]interface{}, client Client) MAASObject { + obj, err := maasify(client, jmap).GetMAASObject() + if err != nil { + panic(err) + } + return obj +} + +// MarshalJSON tells the standard json package how to serialize a MAASObject. +func (obj MAASObject) MarshalJSON() ([]byte, error) { + return json.MarshalIndent(obj.GetMap(), "", " ") +} + +// With MarshalJSON, MAASObject implements json.Marshaler. +var _ json.Marshaler = (*MAASObject)(nil) + +func marshalNode(node MAASObject) string { + res, _ := json.MarshalIndent(node, "", " ") + return string(res) + +} + +var noResourceURI = errors.New("not a MAAS object: no 'resource_uri' key") + +// extractURI obtains the "resource_uri" string from a JSONObject map. +func extractURI(attrs map[string]JSONObject) (*url.URL, error) { + uriEntry, ok := attrs[resourceURI] + if !ok { + return nil, noResourceURI + } + uri, err := uriEntry.GetString() + if err != nil { + return nil, fmt.Errorf("invalid resource_uri: %v", uri) + } + resourceURL, err := url.Parse(uri) + if err != nil { + return nil, fmt.Errorf("resource_uri does not contain a valid URL: %v", uri) + } + return resourceURL, nil +} + +// JSONObject getter for a MAAS object. From a decoding perspective, a +// MAASObject is just like a map except it contains a key "resource_uri", and +// it keeps track of the Client you got it from so that you can invoke API +// methods directly on their MAAS objects. +func (obj JSONObject) GetMAASObject() (MAASObject, error) { + attrs, err := obj.GetMap() + if err != nil { + return MAASObject{}, err + } + uri, err := extractURI(attrs) + if err != nil { + return MAASObject{}, err + } + return MAASObject{values: attrs, client: obj.client, uri: uri}, nil +} + +// GetField extracts a string field from this MAAS object. +func (obj MAASObject) GetField(name string) (string, error) { + return obj.values[name].GetString() +} + +// URI is the resource URI for this MAAS object. It is an absolute path, but +// without a network part. +func (obj MAASObject) URI() *url.URL { + // Duplicate the URL. + uri, err := url.Parse(obj.uri.String()) + if err != nil { + panic(err) + } + return uri +} + +// URL returns a full absolute URL (including network part) for this MAAS +// object on the API. +func (obj MAASObject) URL() *url.URL { + return obj.client.GetURL(obj.URI()) +} + +// GetMap returns all of the object's attributes in the form of a map. +func (obj MAASObject) GetMap() map[string]JSONObject { + return obj.values +} + +// GetSubObject returns a new MAASObject representing the API resource found +// at a given sub-path of the current object's resource URI. +func (obj MAASObject) GetSubObject(name string) MAASObject { + uri := obj.URI() + newURL := url.URL{Path: name} + resUrl := uri.ResolveReference(&newURL) + resUrl.Path = EnsureTrailingSlash(resUrl.Path) + input := map[string]interface{}{resourceURI: resUrl.String()} + return newJSONMAASObject(input, obj.client) +} + +var NotImplemented = errors.New("Not implemented") + +// Get retrieves a fresh copy of this MAAS object from the API. +func (obj MAASObject) Get() (MAASObject, error) { + uri := obj.URI() + result, err := obj.client.Get(uri, "", url.Values{}) + if err != nil { + return MAASObject{}, err + } + jsonObj, err := Parse(obj.client, result) + if err != nil { + return MAASObject{}, err + } + return jsonObj.GetMAASObject() +} + +// Post overwrites this object's existing value on the API with those given +// in "params." It returns the object's new value as received from the API. +func (obj MAASObject) Post(params url.Values) (JSONObject, error) { + uri := obj.URI() + result, err := obj.client.Post(uri, "", params, nil) + if err != nil { + return JSONObject{}, err + } + return Parse(obj.client, result) +} + +// Update modifies this object on the API, based on the values given in +// "params." It returns the object's new value as received from the API. +func (obj MAASObject) Update(params url.Values) (MAASObject, error) { + uri := obj.URI() + result, err := obj.client.Put(uri, params) + if err != nil { + return MAASObject{}, err + } + jsonObj, err := Parse(obj.client, result) + if err != nil { + return MAASObject{}, err + } + return jsonObj.GetMAASObject() +} + +// Delete removes this object on the API. +func (obj MAASObject) Delete() error { + uri := obj.URI() + return obj.client.Delete(uri) +} + +// CallGet invokes an idempotent API method on this object. +func (obj MAASObject) CallGet(operation string, params url.Values) (JSONObject, error) { + uri := obj.URI() + result, err := obj.client.Get(uri, operation, params) + if err != nil { + return JSONObject{}, err + } + return Parse(obj.client, result) +} + +// CallPost invokes a non-idempotent API method on this object. +func (obj MAASObject) CallPost(operation string, params url.Values) (JSONObject, error) { + return obj.CallPostFiles(operation, params, nil) +} + +// CallPostFiles invokes a non-idempotent API method on this object. It is +// similar to CallPost but has an extra parameter, 'files', which should +// contain the files that will be uploaded to the API. +func (obj MAASObject) CallPostFiles(operation string, params url.Values, files map[string][]byte) (JSONObject, error) { + uri := obj.URI() + result, err := obj.client.Post(uri, operation, params, files) + if err != nil { + return JSONObject{}, err + } + return Parse(obj.client, result) +} === added file 'src/github.com/juju/gomaasapi/maasobject_test.go' --- src/github.com/juju/gomaasapi/maasobject_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/maasobject_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,206 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "encoding/json" + "fmt" + "math/rand" + "net/url" + + . "gopkg.in/check.v1" +) + +type MAASObjectSuite struct{} + +var _ = Suite(&MAASObjectSuite{}) + +func makeFakeResourceURI() string { + return "http://example.com/" + fmt.Sprint(rand.Int31()) +} + +// JSONObjects containing MAAS objects convert only to map or to MAASObject. +func (suite *MAASObjectSuite) TestConversionsMAASObject(c *C) { + input := map[string]interface{}{resourceURI: "someplace"} + obj := maasify(Client{}, input) + + mp, err := obj.GetMap() + c.Check(err, IsNil) + text, err := mp[resourceURI].GetString() + c.Check(err, IsNil) + c.Check(text, Equals, "someplace") + + var maasobj MAASObject + maasobj, err = obj.GetMAASObject() + c.Assert(err, IsNil) + c.Check(maasobj, NotNil) + + _, err = obj.GetString() + c.Check(err, NotNil) + _, err = obj.GetFloat64() + c.Check(err, NotNil) + _, err = obj.GetArray() + c.Check(err, NotNil) + _, err = obj.GetBool() + c.Check(err, NotNil) +} + +func (suite *MAASObjectSuite) TestNewJSONMAASObjectPanicsIfNoResourceURI(c *C) { + defer func() { + recoveredError := recover() + c.Check(recoveredError, NotNil) + msg := recoveredError.(error).Error() + c.Check(msg, Matches, ".*no 'resource_uri' key.*") + }() + + input := map[string]interface{}{"test": "test"} + newJSONMAASObject(input, Client{}) +} + +func (suite *MAASObjectSuite) TestNewJSONMAASObjectPanicsIfResourceURINotString(c *C) { + defer func() { + recoveredError := recover() + c.Check(recoveredError, NotNil) + msg := recoveredError.(error).Error() + c.Check(msg, Matches, ".*invalid resource_uri.*") + }() + + input := map[string]interface{}{resourceURI: 77.77} + newJSONMAASObject(input, Client{}) +} + +func (suite *MAASObjectSuite) TestNewJSONMAASObjectPanicsIfResourceURINotURL(c *C) { + defer func() { + recoveredError := recover() + c.Check(recoveredError, NotNil) + msg := recoveredError.(error).Error() + c.Check(msg, Matches, ".*resource_uri.*valid URL.*") + }() + + input := map[string]interface{}{resourceURI: "%z"} + newJSONMAASObject(input, Client{}) +} + +func (suite *MAASObjectSuite) TestNewJSONMAASObjectSetsUpURI(c *C) { + URI, err := url.Parse("http://example.com/a/resource") + c.Assert(err, IsNil) + attrs := map[string]interface{}{resourceURI: URI.String()} + obj := newJSONMAASObject(attrs, Client{}) + c.Check(obj.uri, DeepEquals, URI) +} + +func (suite *MAASObjectSuite) TestURL(c *C) { + baseURL, err := url.Parse("http://example.com/") + c.Assert(err, IsNil) + uri := "http://example.com/a/resource" + resourceURL, err := url.Parse(uri) + c.Assert(err, IsNil) + input := map[string]interface{}{resourceURI: uri} + client := Client{APIURL: baseURL} + obj := newJSONMAASObject(input, client) + + URL := obj.URL() + + c.Check(URL, DeepEquals, resourceURL) +} + +// makeFakeMAASObject creates a MAASObject for some imaginary resource. +// There is no actual HTTP service or resource attached. +// serviceURL is the base URL of the service, and resourceURI is the path for +// the object, relative to serviceURL. +func makeFakeMAASObject(serviceURL, resourcePath string) MAASObject { + baseURL, err := url.Parse(serviceURL) + if err != nil { + panic(fmt.Errorf("creation of fake object failed: %v", err)) + } + uri := serviceURL + resourcePath + input := map[string]interface{}{resourceURI: uri} + client := Client{APIURL: baseURL} + return newJSONMAASObject(input, client) +} + +// Passing GetSubObject a relative path effectively concatenates that path to +// the original object's resource URI. +func (suite *MAASObjectSuite) TestGetSubObjectRelative(c *C) { + obj := makeFakeMAASObject("http://example.com/", "a/resource/") + + subObj := obj.GetSubObject("test") + subURL := subObj.URL() + + // uri ends with a slash and subName starts with one, but the two paths + // should be concatenated as "http://example.com/a/resource/test/". + expectedSubURL, err := url.Parse("http://example.com/a/resource/test/") + c.Assert(err, IsNil) + c.Check(subURL, DeepEquals, expectedSubURL) +} + +// Passing GetSubObject an absolute path effectively substitutes that path for +// the path component in the original object's resource URI. +func (suite *MAASObjectSuite) TestGetSubObjectAbsolute(c *C) { + obj := makeFakeMAASObject("http://example.com/", "a/resource/") + + subObj := obj.GetSubObject("/b/test") + subURL := subObj.URL() + + expectedSubURL, err := url.Parse("http://example.com/b/test/") + c.Assert(err, IsNil) + c.Check(subURL, DeepEquals, expectedSubURL) +} + +// An absolute path passed to GetSubObject is rooted at the server root, not +// at the service root. So every absolute resource URI must repeat the part +// of the path that leads to the service root. This does not double that part +// of the URI. +func (suite *MAASObjectSuite) TestGetSubObjectAbsoluteDoesNotDoubleServiceRoot(c *C) { + obj := makeFakeMAASObject("http://example.com/service", "a/resource/") + + subObj := obj.GetSubObject("/service/test") + subURL := subObj.URL() + + // The "/service" part is not repeated; it must be included. + expectedSubURL, err := url.Parse("http://example.com/service/test/") + c.Assert(err, IsNil) + c.Check(subURL, DeepEquals, expectedSubURL) +} + +// The argument to GetSubObject is a relative path, not a URL. So it won't +// take a query part. The special characters that mark a query are escaped +// so they are recognized as parts of the path. +func (suite *MAASObjectSuite) TestGetSubObjectTakesPathNotURL(c *C) { + obj := makeFakeMAASObject("http://example.com/", "x/") + + subObj := obj.GetSubObject("/y?z") + + c.Check(subObj.URL().String(), Equals, "http://example.com/y%3Fz/") +} + +func (suite *MAASObjectSuite) TestGetField(c *C) { + uri := "http://example.com/a/resource" + fieldName := "field name" + fieldValue := "a value" + input := map[string]interface{}{ + resourceURI: uri, fieldName: fieldValue, + } + obj := newJSONMAASObject(input, Client{}) + value, err := obj.GetField(fieldName) + c.Check(err, IsNil) + c.Check(value, Equals, fieldValue) +} + +func (suite *MAASObjectSuite) TestSerializesToJSON(c *C) { + attrs := map[string]interface{}{ + resourceURI: "http://maas.example.com/", + "counter": 5.0, + "active": true, + "macs": map[string]interface{}{"eth0": "AA:BB:CC:DD:EE:FF"}, + } + obj := maasify(Client{}, attrs) + output, err := json.Marshal(obj) + c.Assert(err, IsNil) + + var deserialized map[string]interface{} + err = json.Unmarshal(output, &deserialized) + c.Assert(err, IsNil) + c.Check(deserialized, DeepEquals, attrs) +} === added file 'src/github.com/juju/gomaasapi/oauth.go' --- src/github.com/juju/gomaasapi/oauth.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/oauth.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,80 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "crypto/rand" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// Not a true uuidgen, but at least creates same length random +func generateNonce() (string, error) { + randBytes := make([]byte, 16) + _, err := rand.Read(randBytes) + if err != nil { + return "", err + } + return fmt.Sprintf("%16x", randBytes), nil +} + +func generateTimestamp() string { + return strconv.Itoa(int(time.Now().Unix())) +} + +type OAuthSigner interface { + OAuthSign(request *http.Request) error +} + +type OAuthToken struct { + ConsumerKey string + ConsumerSecret string + TokenKey string + TokenSecret string +} + +// Trick to ensure *plainTextOAuthSigner implements the OAuthSigner interface. +var _ OAuthSigner = (*plainTextOAuthSigner)(nil) + +type plainTextOAuthSigner struct { + token *OAuthToken + realm string +} + +func NewPlainTestOAuthSigner(token *OAuthToken, realm string) (OAuthSigner, error) { + return &plainTextOAuthSigner{token, realm}, nil +} + +// OAuthSignPLAINTEXT signs the provided request using the OAuth PLAINTEXT +// method: http://oauth.net/core/1.0/#anchor22. +func (signer plainTextOAuthSigner) OAuthSign(request *http.Request) error { + + signature := signer.token.ConsumerSecret + `&` + signer.token.TokenSecret + nonce, err := generateNonce() + if err != nil { + return err + } + authData := map[string]string{ + "realm": signer.realm, + "oauth_consumer_key": signer.token.ConsumerKey, + "oauth_token": signer.token.TokenKey, + "oauth_signature_method": "PLAINTEXT", + "oauth_signature": signature, + "oauth_timestamp": generateTimestamp(), + "oauth_nonce": nonce, + "oauth_version": "1.0", + } + // Build OAuth header. + var authHeader []string + for key, value := range authData { + authHeader = append(authHeader, fmt.Sprintf(`%s="%s"`, key, url.QueryEscape(value))) + } + strHeader := "OAuth " + strings.Join(authHeader, ", ") + request.Header.Add("Authorization", strHeader) + return nil +} === added directory 'src/github.com/juju/gomaasapi/templates' === added file 'src/github.com/juju/gomaasapi/templates/source.go' --- src/github.com/juju/gomaasapi/templates/source.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/templates/source.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,4 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi === added file 'src/github.com/juju/gomaasapi/templates/source_test.go' --- src/github.com/juju/gomaasapi/templates/source_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/templates/source_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,17 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + . "gopkg.in/check.v1" +) + +type MyTestSuite struct{} + +var _ = Suite(&MyTestSuite{}) + +// TODO: Replace with real test functions. Give them real names. +func (suite *MyTestSuite) TestXXX(c *C) { + c.Check(2+2, Equals, 4) +} === added file 'src/github.com/juju/gomaasapi/testing.go' --- src/github.com/juju/gomaasapi/testing.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/testing.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,82 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "fmt" + "net/http" + "net/http/httptest" +) + +type singleServingServer struct { + *httptest.Server + requestContent *string + requestHeader *http.Header +} + +// newSingleServingServer creates a single-serving test http server which will +// return only one response as defined by the passed arguments. +func newSingleServingServer(uri string, response string, code int) *singleServingServer { + var requestContent string + var requestHeader http.Header + var requested bool + handler := func(writer http.ResponseWriter, request *http.Request) { + if requested { + http.Error(writer, "Already requested", http.StatusServiceUnavailable) + } + res, err := readAndClose(request.Body) + if err != nil { + panic(err) + } + requestContent = string(res) + requestHeader = request.Header + if request.URL.String() != uri { + errorMsg := fmt.Sprintf("Error 404: page not found (expected '%v', got '%v').", uri, request.URL.String()) + http.Error(writer, errorMsg, http.StatusNotFound) + } else { + writer.WriteHeader(code) + fmt.Fprint(writer, response) + } + requested = true + } + server := httptest.NewServer(http.HandlerFunc(handler)) + return &singleServingServer{server, &requestContent, &requestHeader} +} + +type flakyServer struct { + *httptest.Server + nbRequests *int + requests *[][]byte +} + +// newFlakyServer creates a "flaky" test http server which will +// return `nbFlakyResponses` responses with the given code and then a 200 response. +func newFlakyServer(uri string, code int, nbFlakyResponses int) *flakyServer { + nbRequests := 0 + requests := make([][]byte, nbFlakyResponses+1) + handler := func(writer http.ResponseWriter, request *http.Request) { + nbRequests += 1 + body, err := readAndClose(request.Body) + if err != nil { + panic(err) + } + requests[nbRequests-1] = body + if request.URL.String() != uri { + errorMsg := fmt.Sprintf("Error 404: page not found (expected '%v', got '%v').", uri, request.URL.String()) + http.Error(writer, errorMsg, http.StatusNotFound) + } else if nbRequests <= nbFlakyResponses { + if code == http.StatusServiceUnavailable { + writer.Header().Set("Retry-After", "0") + } + writer.WriteHeader(code) + fmt.Fprint(writer, "flaky") + } else { + writer.WriteHeader(http.StatusOK) + fmt.Fprint(writer, "ok") + } + + } + server := httptest.NewServer(http.HandlerFunc(handler)) + return &flakyServer{server, &nbRequests, &requests} +} === added file 'src/github.com/juju/gomaasapi/testservice.go' --- src/github.com/juju/gomaasapi/testservice.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/testservice.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1623 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "mime/multipart" + "net" + "net/http" + "net/http/httptest" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "text/template" + "time" + + "gopkg.in/mgo.v2/bson" +) + +// TestMAASObject is a fake MAAS server MAASObject. +type TestMAASObject struct { + MAASObject + TestServer *TestServer +} + +// checkError is a shorthand helper that panics if err is not nil. +func checkError(err error) { + if err != nil { + panic(err) + } +} + +// NewTestMAAS returns a TestMAASObject that implements the MAASObject +// interface and thus can be used as a test object instead of the one returned +// by gomaasapi.NewMAAS(). +func NewTestMAAS(version string) *TestMAASObject { + server := NewTestServer(version) + authClient, err := NewAnonymousClient(server.URL, version) + checkError(err) + maas := NewMAAS(*authClient) + return &TestMAASObject{*maas, server} +} + +// Close shuts down the test server. +func (testMAASObject *TestMAASObject) Close() { + testMAASObject.TestServer.Close() +} + +// A TestServer is an HTTP server listening on a system-chosen port on the +// local loopback interface, which simulates the behavior of a MAAS server. +// It is intendend for use in end-to-end HTTP tests using the gomaasapi +// library. +type TestServer struct { + *httptest.Server + serveMux *http.ServeMux + client Client + nodes map[string]MAASObject + ownedNodes map[string]bool + // mapping system_id -> list of operations performed. + nodeOperations map[string][]string + // list of operations performed at the /nodes/ level. + nodesOperations []string + // mapping system_id -> list of Values passed when performing + // operations + nodeOperationRequestValues map[string][]url.Values + // list of Values passed when performing operations at the + // /nodes/ level. + nodesOperationRequestValues []url.Values + nodeMetadata map[string]Node + files map[string]MAASObject + networks map[string]MAASObject + networksPerNode map[string][]string + ipAddressesPerNetwork map[string][]string + version string + macAddressesPerNetwork map[string]map[string]JSONObject + nodeDetails map[string]string + zones map[string]JSONObject + // bootImages is a map of nodegroup UUIDs to boot-image objects. + bootImages map[string][]JSONObject + // nodegroupsInterfaces is a map of nodegroup UUIDs to interface + // objects. + nodegroupsInterfaces map[string][]JSONObject + + // versionJSON is the response to the /version/ endpoint listing the + // capabilities of the MAAS server. + versionJSON string + + // devices is a map of device UUIDs to devices. + devices map[string]*device + + subnets map[uint]Subnet + subnetNameToID map[string]uint + nextSubnet uint + spaces map[uint]Space + spaceNameToID map[string]uint + nextSpace uint + vlans map[int]VLAN + nextVLAN int +} + +type device struct { + IPAddresses []string + SystemId string + MACAddress string + Parent string + Hostname string + + // Not part of the device definition but used by the template. + APIVersion string +} + +func getNodesEndpoint(version string) string { + return fmt.Sprintf("/api/%s/nodes/", version) +} + +func getNodeURL(version, systemId string) string { + return fmt.Sprintf("/api/%s/nodes/%s/", version, systemId) +} + +func getNodeURLRE(version string) *regexp.Regexp { + reString := fmt.Sprintf("^/api/%s/nodes/([^/]*)/$", regexp.QuoteMeta(version)) + return regexp.MustCompile(reString) +} + +func getDevicesEndpoint(version string) string { + return fmt.Sprintf("/api/%s/devices/", version) +} + +func getDeviceURL(version, systemId string) string { + return fmt.Sprintf("/api/%s/devices/%s/", version, systemId) +} + +func getDeviceURLRE(version string) *regexp.Regexp { + reString := fmt.Sprintf("^/api/%s/devices/([^/]*)/$", regexp.QuoteMeta(version)) + return regexp.MustCompile(reString) +} + +func getFilesEndpoint(version string) string { + return fmt.Sprintf("/api/%s/files/", version) +} + +func getFileURL(version, filename string) string { + // Uses URL object so filename is correctly percent-escaped + url := url.URL{} + url.Path = fmt.Sprintf("/api/%s/files/%s/", version, filename) + return url.String() +} + +func getFileURLRE(version string) *regexp.Regexp { + reString := fmt.Sprintf("^/api/%s/files/(.*)/$", regexp.QuoteMeta(version)) + return regexp.MustCompile(reString) +} + +func getNetworksEndpoint(version string) string { + return fmt.Sprintf("/api/%s/networks/", version) +} + +func getNetworkURL(version, name string) string { + return fmt.Sprintf("/api/%s/networks/%s/", version, name) +} + +func getNetworkURLRE(version string) *regexp.Regexp { + reString := fmt.Sprintf("^/api/%s/networks/(.*)/$", regexp.QuoteMeta(version)) + return regexp.MustCompile(reString) +} + +func getIPAddressesEndpoint(version string) string { + return fmt.Sprintf("/api/%s/ipaddresses/", version) +} + +func getMACAddressURL(version, systemId, macAddress string) string { + return fmt.Sprintf("/api/%s/nodes/%s/macs/%s/", version, systemId, url.QueryEscape(macAddress)) +} + +func getVersionURL(version string) string { + return fmt.Sprintf("/api/%s/version/", version) +} + +func getNodegroupsEndpoint(version string) string { + return fmt.Sprintf("/api/%s/nodegroups/", version) +} + +func getNodegroupURL(version, uuid string) string { + return fmt.Sprintf("/api/%s/nodegroups/%s/", version, uuid) +} + +func getNodegroupsInterfacesURLRE(version string) *regexp.Regexp { + reString := fmt.Sprintf("^/api/%s/nodegroups/([^/]*)/interfaces/$", regexp.QuoteMeta(version)) + return regexp.MustCompile(reString) +} + +func getBootimagesURLRE(version string) *regexp.Regexp { + reString := fmt.Sprintf("^/api/%s/nodegroups/([^/]*)/boot-images/$", regexp.QuoteMeta(version)) + return regexp.MustCompile(reString) +} + +func getZonesEndpoint(version string) string { + return fmt.Sprintf("/api/%s/zones/", version) +} + +// Clear clears all the fake data stored and recorded by the test server +// (nodes, recorded operations, etc.). +func (server *TestServer) Clear() { + server.nodes = make(map[string]MAASObject) + server.ownedNodes = make(map[string]bool) + server.nodesOperations = make([]string, 0) + server.nodeOperations = make(map[string][]string) + server.nodesOperationRequestValues = make([]url.Values, 0) + server.nodeOperationRequestValues = make(map[string][]url.Values) + server.nodeMetadata = make(map[string]Node) + server.files = make(map[string]MAASObject) + server.networks = make(map[string]MAASObject) + server.networksPerNode = make(map[string][]string) + server.ipAddressesPerNetwork = make(map[string][]string) + server.macAddressesPerNetwork = make(map[string]map[string]JSONObject) + server.nodeDetails = make(map[string]string) + server.bootImages = make(map[string][]JSONObject) + server.nodegroupsInterfaces = make(map[string][]JSONObject) + server.zones = make(map[string]JSONObject) + server.versionJSON = `{"capabilities": ["networks-management","static-ipaddresses","devices-management","network-deployment-ubuntu"]}` + server.devices = make(map[string]*device) + server.subnets = make(map[uint]Subnet) + server.subnetNameToID = make(map[string]uint) + server.nextSubnet = 1 + server.spaces = make(map[uint]Space) + server.spaceNameToID = make(map[string]uint) + server.nextSpace = 1 + server.vlans = make(map[int]VLAN) + server.nextVLAN = 1 +} + +// SetVersionJSON sets the JSON response (capabilities) returned from the +// /version/ endpoint. +func (server *TestServer) SetVersionJSON(json string) { + server.versionJSON = json +} + +// NodesOperations returns the list of operations performed at the /nodes/ +// level. +func (server *TestServer) NodesOperations() []string { + return server.nodesOperations +} + +// NodeOperations returns the map containing the list of the operations +// performed for each node. +func (server *TestServer) NodeOperations() map[string][]string { + return server.nodeOperations +} + +// NodesOperationRequestValues returns the list of url.Values extracted +// from the request used when performing operations at the /nodes/ level. +func (server *TestServer) NodesOperationRequestValues() []url.Values { + return server.nodesOperationRequestValues +} + +// NodeOperationRequestValues returns the map containing the list of the +// url.Values extracted from the request used when performing operations +// on nodes. +func (server *TestServer) NodeOperationRequestValues() map[string][]url.Values { + return server.nodeOperationRequestValues +} + +func parseRequestValues(request *http.Request) url.Values { + var requestValues url.Values + if request.Header.Get("Content-Type") == "application/x-www-form-urlencoded" { + if request.PostForm == nil { + if err := request.ParseForm(); err != nil { + panic(err) + } + } + requestValues = request.PostForm + } + return requestValues +} + +func (server *TestServer) addNodesOperation(operation string, request *http.Request) url.Values { + requestValues := parseRequestValues(request) + server.nodesOperations = append(server.nodesOperations, operation) + server.nodesOperationRequestValues = append(server.nodesOperationRequestValues, requestValues) + return requestValues +} + +func (server *TestServer) addNodeOperation(systemId, operation string, request *http.Request) url.Values { + operations, present := server.nodeOperations[systemId] + operationRequestValues, present2 := server.nodeOperationRequestValues[systemId] + if present != present2 { + panic("inconsistent state: nodeOperations and nodeOperationRequestValues don't have the same keys.") + } + requestValues := parseRequestValues(request) + if !present { + operations = []string{operation} + operationRequestValues = []url.Values{requestValues} + } else { + operations = append(operations, operation) + operationRequestValues = append(operationRequestValues, requestValues) + } + server.nodeOperations[systemId] = operations + server.nodeOperationRequestValues[systemId] = operationRequestValues + return requestValues +} + +// NewNode creates a MAAS node. The provided string should be a valid json +// string representing a map and contain a string value for the key +// 'system_id'. e.g. `{"system_id": "mysystemid"}`. +// If one of these conditions is not met, NewNode panics. +func (server *TestServer) NewNode(jsonText string) MAASObject { + var attrs map[string]interface{} + err := json.Unmarshal([]byte(jsonText), &attrs) + checkError(err) + systemIdEntry, hasSystemId := attrs["system_id"] + if !hasSystemId { + panic("The given map json string does not contain a 'system_id' value.") + } + systemId := systemIdEntry.(string) + attrs[resourceURI] = getNodeURL(server.version, systemId) + if _, hasStatus := attrs["status"]; !hasStatus { + attrs["status"] = NodeStatusDeployed + } + obj := newJSONMAASObject(attrs, server.client) + server.nodes[systemId] = obj + return obj +} + +// Nodes returns a map associating all the nodes' system ids with the nodes' +// objects. +func (server *TestServer) Nodes() map[string]MAASObject { + return server.nodes +} + +// OwnedNodes returns a map whose keys represent the nodes that are currently +// allocated. +func (server *TestServer) OwnedNodes() map[string]bool { + return server.ownedNodes +} + +// NewFile creates a file in the test MAAS server. +func (server *TestServer) NewFile(filename string, filecontent []byte) MAASObject { + attrs := make(map[string]interface{}) + attrs[resourceURI] = getFileURL(server.version, filename) + base64Content := base64.StdEncoding.EncodeToString(filecontent) + attrs["content"] = base64Content + attrs["filename"] = filename + + // Allocate an arbitrary URL here. It would be nice if the caller + // could do this, but that would change the API and require many + // changes. + escapedName := url.QueryEscape(filename) + attrs["anon_resource_uri"] = "/maas/1.0/files/?op=get_by_key&key=" + escapedName + "_key" + + obj := newJSONMAASObject(attrs, server.client) + server.files[filename] = obj + return obj +} + +func (server *TestServer) Files() map[string]MAASObject { + return server.files +} + +// ChangeNode updates a node with the given key/value. +func (server *TestServer) ChangeNode(systemId, key, value string) { + node, found := server.nodes[systemId] + if !found { + panic("No node with such 'system_id'.") + } + node.GetMap()[key] = maasify(server.client, value) +} + +// NewIPAddress creates a new static IP address reservation for the +// given network/subnet and ipAddress. While networks is being deprecated +// try the given name as both a netowrk and a subnet. +func (server *TestServer) NewIPAddress(ipAddress, networkOrSubnet string) { + _, foundNetwork := server.networks[networkOrSubnet] + subnetID, foundSubnet := server.subnetNameToID[networkOrSubnet] + + if (foundNetwork || foundSubnet) == false { + panic("No such network or subnet: " + networkOrSubnet) + } + if foundNetwork { + ips, found := server.ipAddressesPerNetwork[networkOrSubnet] + if found { + ips = append(ips, ipAddress) + } else { + ips = []string{ipAddress} + } + server.ipAddressesPerNetwork[networkOrSubnet] = ips + } else { + subnet := server.subnets[subnetID] + netIp := net.ParseIP(ipAddress) + if netIp == nil { + panic(ipAddress + " is invalid") + } + ip := IPFromNetIP(netIp) + ip.Purpose = []string{"assigned-ip"} + subnet.InUseIPAddresses = append(subnet.InUseIPAddresses, ip) + server.subnets[subnetID] = subnet + } +} + +// RemoveIPAddress removes the given existing ipAddress and returns +// whether it was actually removed. +func (server *TestServer) RemoveIPAddress(ipAddress string) bool { + for network, ips := range server.ipAddressesPerNetwork { + for i, ip := range ips { + if ip == ipAddress { + ips = append(ips[:i], ips[i+1:]...) + server.ipAddressesPerNetwork[network] = ips + return true + } + } + } + return false +} + +// IPAddresses returns the map with network names as keys and slices +// of IP addresses belonging to each network as values. +func (server *TestServer) IPAddresses() map[string][]string { + return server.ipAddressesPerNetwork +} + +// NewNetwork creates a network in the test MAAS server +func (server *TestServer) NewNetwork(jsonText string) MAASObject { + var attrs map[string]interface{} + err := json.Unmarshal([]byte(jsonText), &attrs) + checkError(err) + nameEntry, hasName := attrs["name"] + _, hasIP := attrs["ip"] + _, hasNetmask := attrs["netmask"] + if !hasName || !hasIP || !hasNetmask { + panic("The given map json string does not contain a 'name', 'ip', or 'netmask' value.") + } + // TODO(gz): Sanity checking done on other fields + name := nameEntry.(string) + attrs[resourceURI] = getNetworkURL(server.version, name) + obj := newJSONMAASObject(attrs, server.client) + server.networks[name] = obj + return obj +} + +// NewNodegroupInterface adds a nodegroup-interface, for the specified +// nodegroup, in the test MAAS server. +func (server *TestServer) NewNodegroupInterface(uuid, jsonText string) JSONObject { + _, ok := server.bootImages[uuid] + if !ok { + panic("no nodegroup with the given UUID") + } + var attrs map[string]interface{} + err := json.Unmarshal([]byte(jsonText), &attrs) + checkError(err) + requiredMembers := []string{"ip_range_high", "ip_range_low", "broadcast_ip", "static_ip_range_low", "static_ip_range_high", "name", "ip", "subnet_mask", "management", "interface"} + for _, member := range requiredMembers { + _, hasMember := attrs[member] + if !hasMember { + panic(fmt.Sprintf("The given map json string does not contain a required %q", member)) + } + } + obj := maasify(server.client, attrs) + server.nodegroupsInterfaces[uuid] = append(server.nodegroupsInterfaces[uuid], obj) + return obj +} + +func (server *TestServer) ConnectNodeToNetwork(systemId, name string) { + _, hasNode := server.nodes[systemId] + if !hasNode { + panic("no node with the given system id") + } + _, hasNetwork := server.networks[name] + if !hasNetwork { + panic("no network with the given name") + } + networkNames, _ := server.networksPerNode[systemId] + server.networksPerNode[systemId] = append(networkNames, name) +} + +func (server *TestServer) ConnectNodeToNetworkWithMACAddress(systemId, networkName, macAddress string) { + node, hasNode := server.nodes[systemId] + if !hasNode { + panic("no node with the given system id") + } + if _, hasNetwork := server.networks[networkName]; !hasNetwork { + panic("no network with the given name") + } + networkNames, _ := server.networksPerNode[systemId] + server.networksPerNode[systemId] = append(networkNames, networkName) + attrs := make(map[string]interface{}) + attrs[resourceURI] = getMACAddressURL(server.version, systemId, macAddress) + attrs["mac_address"] = macAddress + array := []JSONObject{} + if set, ok := node.GetMap()["macaddress_set"]; ok { + var err error + array, err = set.GetArray() + if err != nil { + panic(err) + } + } + array = append(array, maasify(server.client, attrs)) + node.GetMap()["macaddress_set"] = JSONObject{value: array, client: server.client} + if _, ok := server.macAddressesPerNetwork[networkName]; !ok { + server.macAddressesPerNetwork[networkName] = map[string]JSONObject{} + } + server.macAddressesPerNetwork[networkName][systemId] = maasify(server.client, attrs) +} + +// AddBootImage adds a boot-image object to the specified nodegroup. +func (server *TestServer) AddBootImage(nodegroupUUID string, jsonText string) { + var attrs map[string]interface{} + err := json.Unmarshal([]byte(jsonText), &attrs) + checkError(err) + if _, ok := attrs["architecture"]; !ok { + panic("The boot-image json string does not contain an 'architecture' value.") + } + if _, ok := attrs["release"]; !ok { + panic("The boot-image json string does not contain a 'release' value.") + } + obj := maasify(server.client, attrs) + server.bootImages[nodegroupUUID] = append(server.bootImages[nodegroupUUID], obj) +} + +// AddZone adds a physical zone to the server. +func (server *TestServer) AddZone(name, description string) { + attrs := map[string]interface{}{ + "name": name, + "description": description, + } + obj := maasify(server.client, attrs) + server.zones[name] = obj +} + +// NewTestServer starts and returns a new MAAS test server. The caller should call Close when finished, to shut it down. +func NewTestServer(version string) *TestServer { + server := &TestServer{version: version} + + serveMux := http.NewServeMux() + devicesURL := getDevicesEndpoint(server.version) + // Register handler for '/api//devices/*'. + serveMux.HandleFunc(devicesURL, func(w http.ResponseWriter, r *http.Request) { + devicesHandler(server, w, r) + }) + nodesURL := getNodesEndpoint(server.version) + // Register handler for '/api//nodes/*'. + serveMux.HandleFunc(nodesURL, func(w http.ResponseWriter, r *http.Request) { + nodesHandler(server, w, r) + }) + filesURL := getFilesEndpoint(server.version) + // Register handler for '/api//files/*'. + serveMux.HandleFunc(filesURL, func(w http.ResponseWriter, r *http.Request) { + filesHandler(server, w, r) + }) + networksURL := getNetworksEndpoint(server.version) + // Register handler for '/api//networks/'. + serveMux.HandleFunc(networksURL, func(w http.ResponseWriter, r *http.Request) { + networksHandler(server, w, r) + }) + ipAddressesURL := getIPAddressesEndpoint(server.version) + // Register handler for '/api//ipaddresses/'. + serveMux.HandleFunc(ipAddressesURL, func(w http.ResponseWriter, r *http.Request) { + ipAddressesHandler(server, w, r) + }) + versionURL := getVersionURL(server.version) + // Register handler for '/api//version/'. + serveMux.HandleFunc(versionURL, func(w http.ResponseWriter, r *http.Request) { + versionHandler(server, w, r) + }) + // Register handler for '/api//nodegroups/*'. + nodegroupsURL := getNodegroupsEndpoint(server.version) + serveMux.HandleFunc(nodegroupsURL, func(w http.ResponseWriter, r *http.Request) { + nodegroupsHandler(server, w, r) + }) + + // Register handler for '/api//zones/*'. + zonesURL := getZonesEndpoint(server.version) + serveMux.HandleFunc(zonesURL, func(w http.ResponseWriter, r *http.Request) { + zonesHandler(server, w, r) + }) + + subnetsURL := getSubnetsEndpoint(server.version) + serveMux.HandleFunc(subnetsURL, func(w http.ResponseWriter, r *http.Request) { + subnetsHandler(server, w, r) + }) + + spacesURL := getSpacesEndpoint(server.version) + serveMux.HandleFunc(spacesURL, func(w http.ResponseWriter, r *http.Request) { + spacesHandler(server, w, r) + }) + + vlansURL := getVLANsEndpoint(server.version) + serveMux.HandleFunc(vlansURL, func(w http.ResponseWriter, r *http.Request) { + vlansHandler(server, w, r) + }) + + var mu sync.Mutex + singleFile := func(w http.ResponseWriter, req *http.Request) { + mu.Lock() + defer mu.Unlock() + serveMux.ServeHTTP(w, req) + } + + newServer := httptest.NewServer(http.HandlerFunc(singleFile)) + client, err := NewAnonymousClient(newServer.URL, "1.0") + checkError(err) + server.Server = newServer + server.serveMux = serveMux + server.client = *client + server.Clear() + return server +} + +// devicesHandler handles requests for '/api//devices/*'. +func devicesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + values, err := url.ParseQuery(r.URL.RawQuery) + checkError(err) + op := values.Get("op") + deviceURLRE := getDeviceURLRE(server.version) + deviceURLMatch := deviceURLRE.FindStringSubmatch(r.URL.Path) + devicesURL := getDevicesEndpoint(server.version) + switch { + case r.URL.Path == devicesURL: + devicesTopLevelHandler(server, w, r, op) + case deviceURLMatch != nil: + // Request for a single device. + deviceHandler(server, w, r, deviceURLMatch[1], op) + default: + // Default handler: not found. + http.NotFoundHandler().ServeHTTP(w, r) + } +} + +// devicesTopLevelHandler handles a request for /api//devices/ +// (with no device id following as part of the path). +func devicesTopLevelHandler(server *TestServer, w http.ResponseWriter, r *http.Request, op string) { + switch { + case r.Method == "GET" && op == "list": + // Device listing operation. + deviceListingHandler(server, w, r) + case r.Method == "POST" && op == "new": + newDeviceHandler(server, w, r) + default: + w.WriteHeader(http.StatusBadRequest) + } +} + +func macMatches(device *device, macs []string, hasMac bool) bool { + if !hasMac { + return true + } + return contains(macs, device.MACAddress) +} + +// deviceListingHandler handles requests for '/devices/'. +func deviceListingHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + values, err := url.ParseQuery(r.URL.RawQuery) + checkError(err) + // TODO(mfoord): support filtering by hostname and id + macs, hasMac := values["mac_address"] + var matchedDevices []string + for _, device := range server.devices { + if macMatches(device, macs, hasMac) { + matchedDevices = append(matchedDevices, renderDevice(device)) + } + } + json := fmt.Sprintf("[%v]", strings.Join(matchedDevices, ", ")) + + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, json) +} + +var templateFuncs = template.FuncMap{ + "quotedList": func(items []string) string { + var pieces []string + for _, item := range items { + pieces = append(pieces, fmt.Sprintf("%q", item)) + } + return strings.Join(pieces, ", ") + }, +} + +const ( + // The json template for generating new devices. + // TODO(mfoord): set resource_uri in MAC addresses + deviceTemplate = `{ + "macaddress_set": [ + { + "mac_address": "{{.MACAddress}}" + } + ], + "zone": { + "resource_uri": "/MAAS/api/{{.APIVersion}}/zones/default/", + "name": "default", + "description": "" + }, + "parent": "{{.Parent}}", + "ip_addresses": [{{.IPAddresses | quotedList }}], + "hostname": "{{.Hostname}}", + "tag_names": [], + "owner": "maas-admin", + "system_id": "{{.SystemId}}", + "resource_uri": "/MAAS/api/{{.APIVersion}}/devices/{{.SystemId}}/" +}` +) + +func renderDevice(device *device) string { + t := template.New("Device template") + t = t.Funcs(templateFuncs) + t, err := t.Parse(deviceTemplate) + checkError(err) + var buf bytes.Buffer + err = t.Execute(&buf, device) + checkError(err) + return buf.String() +} + +func getValue(values url.Values, value string) (string, bool) { + result, hasResult := values[value] + if !hasResult || len(result) != 1 || result[0] == "" { + return "", false + } + return result[0], true +} + +// newDeviceHandler creates, stores and returns new devices. +func newDeviceHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + err := r.ParseForm() + checkError(err) + values := r.PostForm + + // TODO(mfood): generate a "proper" uuid for the system Id. + uuid, err := generateNonce() + checkError(err) + systemId := fmt.Sprintf("node-%v", uuid) + // At least one MAC address must be specified. + // TODO(mfoord) we only support a single MAC in the test server. + mac, hasMac := getValue(values, "mac_addresses") + + // hostname and parent are optional. + // TODO(mfoord): we require both to be set in the test server. + hostname, hasHostname := getValue(values, "hostname") + parent, hasParent := getValue(values, "parent") + if !hasHostname || !hasMac || !hasParent { + w.WriteHeader(http.StatusBadRequest) + return + } + + device := &device{ + MACAddress: mac, + APIVersion: server.version, + Parent: parent, + Hostname: hostname, + SystemId: systemId, + } + + deviceJSON := renderDevice(device) + server.devices[systemId] = device + + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, deviceJSON) + return +} + +// deviceHandler handles requests for '/api//devices//'. +func deviceHandler(server *TestServer, w http.ResponseWriter, r *http.Request, systemId string, operation string) { + device, ok := server.devices[systemId] + if !ok { + http.NotFoundHandler().ServeHTTP(w, r) + return + } + if r.Method == "GET" { + deviceJSON := renderDevice(device) + if operation == "" { + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, deviceJSON) + return + } else { + w.WriteHeader(http.StatusBadRequest) + return + } + } + if r.Method == "POST" { + if operation == "claim_sticky_ip_address" { + err := r.ParseForm() + checkError(err) + values := r.PostForm + // TODO(mfoord): support optional mac_address parameter + // TODO(mfoord): requested_address should be optional + // and we should generate one if it isn't provided. + address, hasAddress := getValue(values, "requested_address") + if !hasAddress { + w.WriteHeader(http.StatusBadRequest) + return + } + checkError(err) + device.IPAddresses = append(device.IPAddresses, address) + deviceJSON := renderDevice(device) + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, deviceJSON) + return + } else { + w.WriteHeader(http.StatusBadRequest) + return + } + } else if r.Method == "DELETE" { + delete(server.devices, systemId) + w.WriteHeader(http.StatusNoContent) + return + + } + + // TODO(mfoord): support PUT method for updating device + http.NotFoundHandler().ServeHTTP(w, r) +} + +// nodesHandler handles requests for '/api//nodes/*'. +func nodesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + values, err := url.ParseQuery(r.URL.RawQuery) + checkError(err) + op := values.Get("op") + nodeURLRE := getNodeURLRE(server.version) + nodeURLMatch := nodeURLRE.FindStringSubmatch(r.URL.Path) + nodesURL := getNodesEndpoint(server.version) + switch { + case r.URL.Path == nodesURL: + nodesTopLevelHandler(server, w, r, op) + case nodeURLMatch != nil: + // Request for a single node. + nodeHandler(server, w, r, nodeURLMatch[1], op) + default: + // Default handler: not found. + http.NotFoundHandler().ServeHTTP(w, r) + } +} + +// nodeHandler handles requests for '/api//nodes//'. +func nodeHandler(server *TestServer, w http.ResponseWriter, r *http.Request, systemId string, operation string) { + node, ok := server.nodes[systemId] + if !ok { + http.NotFoundHandler().ServeHTTP(w, r) + return + } + UUID, UUIDError := node.values["system_id"].GetString() + + if r.Method == "GET" { + if operation == "" { + w.WriteHeader(http.StatusOK) + if UUIDError == nil { + i, err := JSONObjectFromStruct(server.client, server.nodeMetadata[UUID].Interfaces) + checkError(err) + if err == nil { + node.values["interface_set"] = i + } + } + fmt.Fprint(w, marshalNode(node)) + return + } else if operation == "details" { + if UUIDError == nil { + i, err := JSONObjectFromStruct(server.client, server.nodeMetadata[UUID].Interfaces) + if err == nil { + node.values["interface_set"] = i + } + } + nodeDetailsHandler(server, w, r, systemId) + return + } else { + w.WriteHeader(http.StatusBadRequest) + return + } + } + if r.Method == "POST" { + // The only operations supported are "start", "stop" and "release". + if operation == "start" || operation == "stop" || operation == "release" { + // Record operation on node. + server.addNodeOperation(systemId, operation, r) + + if operation == "release" { + delete(server.OwnedNodes(), systemId) + } + + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, marshalNode(node)) + return + } + + w.WriteHeader(http.StatusBadRequest) + return + } + if r.Method == "DELETE" { + delete(server.nodes, systemId) + w.WriteHeader(http.StatusOK) + return + } + http.NotFoundHandler().ServeHTTP(w, r) +} + +func contains(slice []string, val string) bool { + for _, item := range slice { + if item == val { + return true + } + } + return false +} + +// nodeListingHandler handles requests for '/nodes/'. +func nodeListingHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + values, err := url.ParseQuery(r.URL.RawQuery) + checkError(err) + ids, hasId := values["id"] + var convertedNodes = []map[string]JSONObject{} + for systemId, node := range server.nodes { + if !hasId || contains(ids, systemId) { + convertedNodes = append(convertedNodes, node.GetMap()) + } + } + res, err := json.MarshalIndent(convertedNodes, "", " ") + checkError(err) + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(res)) +} + +// nodeDeploymentStatusHandler handles requests for '/nodes/?op=deployment_status'. +func nodeDeploymentStatusHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + values, err := url.ParseQuery(r.URL.RawQuery) + checkError(err) + nodes, _ := values["nodes"] + var nodeStatus = make(map[string]interface{}) + for _, systemId := range nodes { + node := server.nodes[systemId] + field, err := node.GetField("status") + if err != nil { + continue + } + switch field { + case NodeStatusDeployed: + nodeStatus[systemId] = "Deployed" + case NodeStatusFailedDeployment: + nodeStatus[systemId] = "Failed deployment" + default: + nodeStatus[systemId] = "Not in Deployment" + } + } + obj := maasify(server.client, nodeStatus) + res, err := json.MarshalIndent(obj, "", " ") + checkError(err) + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(res)) +} + +// findFreeNode looks for a node that is currently available, and +// matches the specified filter. +func findFreeNode(server *TestServer, filter url.Values) *MAASObject { + for systemID, node := range server.Nodes() { + _, present := server.OwnedNodes()[systemID] + if !present { + var agentName, nodeName, zoneName, mem, cpuCores, arch string + for k := range filter { + switch k { + case "agent_name": + agentName = filter.Get(k) + case "name": + nodeName = filter.Get(k) + case "zone": + zoneName = filter.Get(k) + case "mem": + mem = filter.Get(k) + case "arch": + arch = filter.Get(k) + case "cpu-cores": + cpuCores = filter.Get(k) + } + } + if nodeName != "" && !matchField(node, "hostname", nodeName) { + continue + } + if zoneName != "" && !matchField(node, "zone", zoneName) { + continue + } + if mem != "" && !matchNumericField(node, "memory", mem) { + continue + } + if arch != "" && !matchArchitecture(node, "architecture", arch) { + continue + } + if cpuCores != "" && !matchNumericField(node, "cpu_count", cpuCores) { + continue + } + if agentName != "" { + agentNameObj := maasify(server.client, agentName) + node.GetMap()["agent_name"] = agentNameObj + } else { + delete(node.GetMap(), "agent_name") + } + return &node + } + } + return nil +} + +func matchArchitecture(node MAASObject, k, v string) bool { + field, err := node.GetField(k) + if err != nil { + return false + } + baseArch := strings.Split(field, "/") + return v == baseArch[0] +} + +func matchNumericField(node MAASObject, k, v string) bool { + field, ok := node.GetMap()[k] + if !ok { + return false + } + nodeVal, err := field.GetFloat64() + if err != nil { + return false + } + constraintVal, err := strconv.ParseFloat(v, 64) + if err != nil { + return false + } + return constraintVal <= nodeVal +} + +func matchField(node MAASObject, k, v string) bool { + field, err := node.GetField(k) + if err != nil { + return false + } + return field == v +} + +// nodesAcquireHandler simulates acquiring a node. +func nodesAcquireHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + requestValues := server.addNodesOperation("acquire", r) + node := findFreeNode(server, requestValues) + if node == nil { + w.WriteHeader(http.StatusConflict) + } else { + systemId, err := node.GetField("system_id") + checkError(err) + server.OwnedNodes()[systemId] = true + res, err := json.MarshalIndent(node, "", " ") + checkError(err) + // Record operation. + server.addNodeOperation(systemId, "acquire", r) + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(res)) + } +} + +// nodesReleaseHandler simulates releasing multiple nodes. +func nodesReleaseHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + server.addNodesOperation("release", r) + values := server.NodesOperationRequestValues() + systemIds := values[len(values)-1]["nodes"] + var unknown []string + for _, systemId := range systemIds { + if _, ok := server.Nodes()[systemId]; !ok { + unknown = append(unknown, systemId) + } + } + if len(unknown) > 0 { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "Unknown node(s): %s.", strings.Join(unknown, ", ")) + return + } + var releasedNodes = []map[string]JSONObject{} + for _, systemId := range systemIds { + if _, ok := server.OwnedNodes()[systemId]; !ok { + continue + } + delete(server.OwnedNodes(), systemId) + node := server.Nodes()[systemId] + releasedNodes = append(releasedNodes, node.GetMap()) + } + res, err := json.MarshalIndent(releasedNodes, "", " ") + checkError(err) + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(res)) +} + +// nodesTopLevelHandler handles a request for /api//nodes/ +// (with no node id following as part of the path). +func nodesTopLevelHandler(server *TestServer, w http.ResponseWriter, r *http.Request, op string) { + switch { + case r.Method == "GET" && op == "list": + // Node listing operation. + nodeListingHandler(server, w, r) + case r.Method == "GET" && op == "deployment_status": + // Node deployment_status operation. + nodeDeploymentStatusHandler(server, w, r) + case r.Method == "POST" && op == "acquire": + nodesAcquireHandler(server, w, r) + case r.Method == "POST" && op == "release": + nodesReleaseHandler(server, w, r) + default: + w.WriteHeader(http.StatusBadRequest) + } +} + +// AddNodeDetails stores node details, expected in XML format. +func (server *TestServer) AddNodeDetails(systemId, xmlText string) { + _, hasNode := server.nodes[systemId] + if !hasNode { + panic("no node with the given system id") + } + server.nodeDetails[systemId] = xmlText +} + +const lldpXML = ` + +` + +// nodeDetailesHandler handles requests for '/api//nodes//?op=details'. +func nodeDetailsHandler(server *TestServer, w http.ResponseWriter, r *http.Request, systemId string) { + attrs := make(map[string]interface{}) + attrs["lldp"] = lldpXML + xmlText, _ := server.nodeDetails[systemId] + attrs["lshw"] = []byte(xmlText) + res, err := bson.Marshal(attrs) + checkError(err) + w.Header().Set("Content-Type", "application/bson") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(res)) +} + +// filesHandler handles requests for '/api//files/*'. +func filesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + values, err := url.ParseQuery(r.URL.RawQuery) + checkError(err) + op := values.Get("op") + fileURLRE := getFileURLRE(server.version) + fileURLMatch := fileURLRE.FindStringSubmatch(r.URL.Path) + fileListingURL := getFilesEndpoint(server.version) + switch { + case r.Method == "GET" && op == "list" && r.URL.Path == fileListingURL: + // File listing operation. + fileListingHandler(server, w, r) + case op == "get" && r.Method == "GET" && r.URL.Path == fileListingURL: + getFileHandler(server, w, r) + case op == "add" && r.Method == "POST" && r.URL.Path == fileListingURL: + addFileHandler(server, w, r) + case fileURLMatch != nil: + // Request for a single file. + fileHandler(server, w, r, fileURLMatch[1], op) + default: + // Default handler: not found. + http.NotFoundHandler().ServeHTTP(w, r) + } + +} + +// listFilenames returns the names of those uploaded files whose names start +// with the given prefix, sorted lexicographically. +func listFilenames(server *TestServer, prefix string) []string { + var filenames = make([]string, 0) + for filename := range server.files { + if strings.HasPrefix(filename, prefix) { + filenames = append(filenames, filename) + } + } + sort.Strings(filenames) + return filenames +} + +// stripFileContent copies a map of attributes representing an uploaded file, +// but with the "content" attribute removed. +func stripContent(original map[string]JSONObject) map[string]JSONObject { + newMap := make(map[string]JSONObject, len(original)-1) + for key, value := range original { + if key != "content" { + newMap[key] = value + } + } + return newMap +} + +// fileListingHandler handles requests for '/api//files/?op=list'. +func fileListingHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + values, err := url.ParseQuery(r.URL.RawQuery) + checkError(err) + prefix := values.Get("prefix") + filenames := listFilenames(server, prefix) + + // Build a sorted list of the files as map[string]JSONObject objects. + convertedFiles := make([]map[string]JSONObject, 0) + for _, filename := range filenames { + // The "content" attribute is not in the listing. + fileMap := stripContent(server.files[filename].GetMap()) + convertedFiles = append(convertedFiles, fileMap) + } + res, err := json.MarshalIndent(convertedFiles, "", " ") + checkError(err) + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(res)) +} + +// fileHandler handles requests for '/api//files//'. +func fileHandler(server *TestServer, w http.ResponseWriter, r *http.Request, filename string, operation string) { + switch { + case r.Method == "DELETE": + delete(server.files, filename) + w.WriteHeader(http.StatusOK) + case r.Method == "GET": + // Retrieve a file's information (including content) as a JSON + // object. + file, ok := server.files[filename] + if !ok { + http.NotFoundHandler().ServeHTTP(w, r) + return + } + jsonText, err := json.MarshalIndent(file, "", " ") + if err != nil { + panic(err) + } + w.WriteHeader(http.StatusOK) + w.Write(jsonText) + default: + // Default handler: not found. + http.NotFoundHandler().ServeHTTP(w, r) + } +} + +// InternalError replies to the request with an HTTP 500 internal error. +func InternalError(w http.ResponseWriter, r *http.Request, err error) { + http.Error(w, err.Error(), http.StatusInternalServerError) +} + +// getFileHandler handles requests for +// '/api//files/?op=get&filename=filename'. +func getFileHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + values, err := url.ParseQuery(r.URL.RawQuery) + checkError(err) + filename := values.Get("filename") + file, found := server.files[filename] + if !found { + http.NotFoundHandler().ServeHTTP(w, r) + return + } + base64Content, err := file.GetField("content") + if err != nil { + InternalError(w, r, err) + return + } + content, err := base64.StdEncoding.DecodeString(base64Content) + if err != nil { + InternalError(w, r, err) + return + } + w.Write(content) +} + +func readMultipart(upload *multipart.FileHeader) ([]byte, error) { + file, err := upload.Open() + if err != nil { + return nil, err + } + defer file.Close() + reader := bufio.NewReader(file) + return ioutil.ReadAll(reader) +} + +// filesHandler handles requests for '/api//files/?op=add&filename=filename'. +func addFileHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + err := r.ParseMultipartForm(10000000) + checkError(err) + + filename := r.Form.Get("filename") + if filename == "" { + panic("upload has no filename") + } + + uploads := r.MultipartForm.File + if len(uploads) != 1 { + panic("the payload should contain one file and one file only") + } + var upload *multipart.FileHeader + for _, uploadContent := range uploads { + upload = uploadContent[0] + } + content, err := readMultipart(upload) + checkError(err) + server.NewFile(filename, content) + w.WriteHeader(http.StatusOK) +} + +// networkListConnectedMACSHandler handles requests for '/api//networks//?op=list_connected_macs' +func networkListConnectedMACSHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + networkURLRE := getNetworkURLRE(server.version) + networkURLREMatch := networkURLRE.FindStringSubmatch(r.URL.Path) + if networkURLREMatch == nil { + http.NotFoundHandler().ServeHTTP(w, r) + return + } + networkName := networkURLREMatch[1] + convertedMacAddresses := []map[string]JSONObject{} + if macAddresses, ok := server.macAddressesPerNetwork[networkName]; ok { + for _, macAddress := range macAddresses { + m, err := macAddress.GetMap() + checkError(err) + convertedMacAddresses = append(convertedMacAddresses, m) + } + } + res, err := json.MarshalIndent(convertedMacAddresses, "", " ") + checkError(err) + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(res)) +} + +// networksHandler handles requests for '/api//networks/?node=system_id'. +func networksHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + panic("only networks GET operation implemented") + } + values, err := url.ParseQuery(r.URL.RawQuery) + checkError(err) + op := values.Get("op") + systemId := values.Get("node") + if op == "list_connected_macs" { + networkListConnectedMACSHandler(server, w, r) + return + } + if op != "" { + panic("only list_connected_macs and default operations implemented") + } + if systemId == "" { + panic("network missing associated node system id") + } + networks := []MAASObject{} + if networkNames, hasNetworks := server.networksPerNode[systemId]; hasNetworks { + networks = make([]MAASObject, len(networkNames)) + for i, networkName := range networkNames { + networks[i] = server.networks[networkName] + } + } + res, err := json.MarshalIndent(networks, "", " ") + checkError(err) + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(res)) +} + +// ipAddressesHandler handles requests for '/api//ipaddresses/'. +func ipAddressesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + err := r.ParseForm() + checkError(err) + values := r.Form + op := values.Get("op") + + switch r.Method { + case "GET": + if op != "" { + panic("expected empty op for GET, got " + op) + } + listIPAddressesHandler(server, w, r) + return + case "POST": + switch op { + case "reserve": + reserveIPAddressHandler(server, w, r, values.Get("network"), values.Get("requested_address")) + return + case "release": + releaseIPAddressHandler(server, w, r, values.Get("ip")) + return + default: + panic("expected op=release|reserve for POST, got " + op) + } + } + http.NotFoundHandler().ServeHTTP(w, r) +} + +func marshalIPAddress(server *TestServer, ipAddress string) (JSONObject, error) { + jsonTemplate := `{"alloc_type": 4, "ip": %q, "resource_uri": %q, "created": %q}` + uri := getIPAddressesEndpoint(server.version) + now := time.Now().UTC().Format(time.RFC3339) + bytes := []byte(fmt.Sprintf(jsonTemplate, ipAddress, uri, now)) + return Parse(server.client, bytes) +} + +func badRequestError(w http.ResponseWriter, err error) { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprint(w, err.Error()) +} + +func listIPAddressesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + results := []MAASObject{} + for _, ips := range server.IPAddresses() { + for _, ip := range ips { + jsonObj, err := marshalIPAddress(server, ip) + if err != nil { + badRequestError(w, err) + return + } + maasObj, err := jsonObj.GetMAASObject() + if err != nil { + badRequestError(w, err) + return + } + results = append(results, maasObj) + } + } + res, err := json.MarshalIndent(results, "", " ") + checkError(err) + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(res)) +} + +func reserveIPAddressHandler(server *TestServer, w http.ResponseWriter, r *http.Request, network, reqAddress string) { + _, ipNet, err := net.ParseCIDR(network) + if err != nil { + badRequestError(w, fmt.Errorf("Invalid network parameter %s", network)) + return + } + if reqAddress != "" { + // Validate "requested_address" parameter. + reqIP := net.ParseIP(reqAddress) + if reqIP == nil { + badRequestError(w, fmt.Errorf("failed to detect a valid IP address from u'%s'", reqAddress)) + return + } + if !ipNet.Contains(reqIP) { + badRequestError(w, fmt.Errorf("%s is not inside the range %s", reqAddress, ipNet.String())) + return + } + } + // Find the network name matching the parsed CIDR. + foundNetworkName := "" + for netName, netObj := range server.networks { + // Get the "ip" and "netmask" attributes of the network. + netIP, err := netObj.GetField("ip") + checkError(err) + netMask, err := netObj.GetField("netmask") + checkError(err) + + // Convert the netmask string to net.IPMask. + parts := strings.Split(netMask, ".") + ipMask := make(net.IPMask, len(parts)) + for i, part := range parts { + intPart, err := strconv.Atoi(part) + checkError(err) + ipMask[i] = byte(intPart) + } + netNet := &net.IPNet{IP: net.ParseIP(netIP), Mask: ipMask} + if netNet.String() == network { + // Exact match found. + foundNetworkName = netName + break + } + } + if foundNetworkName == "" { + badRequestError(w, fmt.Errorf("No network found matching %s", network)) + return + } + ips, found := server.ipAddressesPerNetwork[foundNetworkName] + if !found { + // This will be the first address. + ips = []string{} + } + reservedIP := "" + if reqAddress != "" { + // Use what the user provided. NOTE: Because this is testing + // code, no duplicates check is done. + reservedIP = reqAddress + } else { + // Generate an IP in the network range by incrementing the + // last byte of the network's IP. + firstIP := ipNet.IP + firstIP[len(firstIP)-1] += byte(len(ips) + 1) + reservedIP = firstIP.String() + } + ips = append(ips, reservedIP) + server.ipAddressesPerNetwork[foundNetworkName] = ips + jsonObj, err := marshalIPAddress(server, reservedIP) + checkError(err) + maasObj, err := jsonObj.GetMAASObject() + checkError(err) + res, err := json.MarshalIndent(maasObj, "", " ") + checkError(err) + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(res)) +} + +func releaseIPAddressHandler(server *TestServer, w http.ResponseWriter, r *http.Request, ip string) { + if netIP := net.ParseIP(ip); netIP == nil { + http.NotFoundHandler().ServeHTTP(w, r) + return + } + if server.RemoveIPAddress(ip) { + w.WriteHeader(http.StatusOK) + return + } + http.NotFoundHandler().ServeHTTP(w, r) +} + +// versionHandler handles requests for '/api//version/'. +func versionHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + panic("only version GET operation implemented") + } + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, server.versionJSON) +} + +// nodegroupsHandler handles requests for '/api//nodegroups/*'. +func nodegroupsHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + values, err := url.ParseQuery(r.URL.RawQuery) + checkError(err) + op := values.Get("op") + bootimagesURLRE := getBootimagesURLRE(server.version) + bootimagesURLMatch := bootimagesURLRE.FindStringSubmatch(r.URL.Path) + nodegroupsInterfacesURLRE := getNodegroupsInterfacesURLRE(server.version) + nodegroupsInterfacesURLMatch := nodegroupsInterfacesURLRE.FindStringSubmatch(r.URL.Path) + nodegroupsURL := getNodegroupsEndpoint(server.version) + switch { + case r.URL.Path == nodegroupsURL: + nodegroupsTopLevelHandler(server, w, r, op) + case bootimagesURLMatch != nil: + bootimagesHandler(server, w, r, bootimagesURLMatch[1], op) + case nodegroupsInterfacesURLMatch != nil: + nodegroupsInterfacesHandler(server, w, r, nodegroupsInterfacesURLMatch[1], op) + default: + // Default handler: not found. + http.NotFoundHandler().ServeHTTP(w, r) + } +} + +// nodegroupsTopLevelHandler handles requests for '/api//nodegroups/'. +func nodegroupsTopLevelHandler(server *TestServer, w http.ResponseWriter, r *http.Request, op string) { + if r.Method != "GET" || op != "list" { + w.WriteHeader(http.StatusBadRequest) + return + } + + nodegroups := []JSONObject{} + for uuid := range server.bootImages { + attrs := map[string]interface{}{ + "uuid": uuid, + resourceURI: getNodegroupURL(server.version, uuid), + } + obj := maasify(server.client, attrs) + nodegroups = append(nodegroups, obj) + } + + res, err := json.MarshalIndent(nodegroups, "", " ") + checkError(err) + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(res)) +} + +// bootimagesHandler handles requests for '/api//nodegroups//boot-images/'. +func bootimagesHandler(server *TestServer, w http.ResponseWriter, r *http.Request, nodegroupUUID, op string) { + if r.Method != "GET" { + w.WriteHeader(http.StatusBadRequest) + return + } + + bootImages, ok := server.bootImages[nodegroupUUID] + if !ok { + http.NotFoundHandler().ServeHTTP(w, r) + return + } + + res, err := json.MarshalIndent(bootImages, "", " ") + checkError(err) + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(res)) +} + +// nodegroupsInterfacesHandler handles requests for '/api//nodegroups//interfaces/' +func nodegroupsInterfacesHandler(server *TestServer, w http.ResponseWriter, r *http.Request, nodegroupUUID, op string) { + if r.Method != "GET" { + w.WriteHeader(http.StatusBadRequest) + return + } + _, ok := server.bootImages[nodegroupUUID] + if !ok { + http.NotFoundHandler().ServeHTTP(w, r) + return + } + + interfaces, ok := server.nodegroupsInterfaces[nodegroupUUID] + if !ok { + // we already checked the nodegroup exists, so return an empty list + interfaces = []JSONObject{} + } + res, err := json.MarshalIndent(interfaces, "", " ") + checkError(err) + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(res)) +} + +// zonesHandler handles requests for '/api//zones/'. +func zonesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + w.WriteHeader(http.StatusBadRequest) + return + } + + if len(server.zones) == 0 { + // Until a zone is registered, behave as if the endpoint + // does not exist. This way we can simulate older MAAS + // servers that do not support zones. + http.NotFoundHandler().ServeHTTP(w, r) + return + } + + zones := make([]JSONObject, 0, len(server.zones)) + for _, zone := range server.zones { + zones = append(zones, zone) + } + res, err := json.MarshalIndent(zones, "", " ") + checkError(err) + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, string(res)) +} === added file 'src/github.com/juju/gomaasapi/testservice_spaces.go' --- src/github.com/juju/gomaasapi/testservice_spaces.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/testservice_spaces.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,81 @@ +// Copyright 2015 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "encoding/json" + "fmt" + "net/http" + "regexp" +) + +func getSpacesEndpoint(version string) string { + return fmt.Sprintf("/api/%s/spaces/", version) +} + +// Space is the MAAS API space representation +type Space struct { + Name string `json:"name"` + Subnets []Subnet `json:"subnets"` + ResourceURI string `json:"resource_uri"` + ID uint `json:"id"` +} + +// spacesHandler handles requests for '/api//spaces/'. +func spacesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + var err error + spacesURLRE := regexp.MustCompile(`/spaces/(.+?)/`) + spacesURLMatch := spacesURLRE.FindStringSubmatch(r.URL.Path) + spacesURL := getSpacesEndpoint(server.version) + + var ID uint + var gotID bool + if spacesURLMatch != nil { + ID, err = NameOrIDToID(spacesURLMatch[1], server.spaceNameToID, 1, uint(len(server.spaces))) + + if err != nil { + http.NotFoundHandler().ServeHTTP(w, r) + return + } + + gotID = true + } + + switch r.Method { + case "GET": + w.Header().Set("Content-Type", "application/vnd.api+json") + if len(server.spaces) == 0 { + // Until a space is registered, behave as if the endpoint + // does not exist. This way we can simulate older MAAS + // servers that do not support spaces. + http.NotFoundHandler().ServeHTTP(w, r) + return + } + + if r.URL.Path == spacesURL { + var spaces []Space + for i := uint(1); i < server.nextSpace; i++ { + s, ok := server.spaces[i] + if ok { + spaces = append(spaces, s) + } + } + err = json.NewEncoder(w).Encode(spaces) + } else if gotID == false { + w.WriteHeader(http.StatusBadRequest) + } else { + err = json.NewEncoder(w).Encode(server.spaces[ID]) + } + checkError(err) + case "POST": + //server.NewSpace(r.Body) + case "PUT": + //server.UpdateSpace(r.Body) + case "DELETE": + delete(server.spaces, ID) + w.WriteHeader(http.StatusOK) + default: + w.WriteHeader(http.StatusBadRequest) + } +} === added file 'src/github.com/juju/gomaasapi/testservice_subnets.go' --- src/github.com/juju/gomaasapi/testservice_subnets.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/testservice_subnets.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,393 @@ +// Copyright 2015 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "regexp" + "sort" + "strings" +) + +func getSubnetsEndpoint(version string) string { + return fmt.Sprintf("/api/%s/subnets/", version) +} + +// CreateSubnet is used to receive new subnets via the MAAS API +type CreateSubnet struct { + DNSServers []string `json:"dns_servers"` + Name string `json:"name"` + Space string `json:"space"` + GatewayIP string `json:"gateway_ip"` + CIDR string `json:"cidr"` + + // VLAN this subnet belongs to. Currently ignored. + // TODO: Defaults to the default VLAN + // for the provided fabric or defaults to the default VLAN + // in the default fabric. + VLAN *uint `json:"vlan"` + + // Fabric for the subnet. Currently ignored. + // TODO: Defaults to the fabric the provided + // VLAN belongs to or defaults to the default fabric. + Fabric *uint `json:"fabric"` + + // VID of the VLAN this subnet belongs to. Currently ignored. + // TODO: Only used when vlan + // is not provided. Picks the VLAN with this VID in the provided + // fabric or the default fabric if one is not given. + VID *uint `json:"vid"` + + // This is used for updates (PUT) and is ignored by create (POST) + ID uint `json:"id"` +} + +// Subnet is the MAAS API subnet representation +type Subnet struct { + DNSServers []string `json:"dns_servers"` + Name string `json:"name"` + Space string `json:"space"` + VLAN VLAN `json:"vlan"` + GatewayIP string `json:"gateway_ip"` + CIDR string `json:"cidr"` + + ResourceURI string `json:"resource_uri"` + ID uint `json:"id"` + InUseIPAddresses []IP `json:"-"` + FixedAddressRanges []AddressRange `json:"-"` +} + +// AddFixedAddressRange adds an AddressRange to the list of fixed address ranges +// that subnet stores. +func (server *TestServer) AddFixedAddressRange(subnetID uint, ar AddressRange) { + subnet := server.subnets[subnetID] + ar.startUint = IPFromString(ar.Start).UInt64() + ar.endUint = IPFromString(ar.End).UInt64() + subnet.FixedAddressRanges = append(subnet.FixedAddressRanges, ar) + server.subnets[subnetID] = subnet +} + +// subnetsHandler handles requests for '/api//subnets/'. +func subnetsHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + var err error + values, err := url.ParseQuery(r.URL.RawQuery) + checkError(err) + op := values.Get("op") + includeRangesString := strings.ToLower(values.Get("include_ranges")) + subnetsURLRE := regexp.MustCompile(`/subnets/(.+?)/`) + subnetsURLMatch := subnetsURLRE.FindStringSubmatch(r.URL.Path) + subnetsURL := getSubnetsEndpoint(server.version) + + var ID uint + var gotID bool + if subnetsURLMatch != nil { + ID, err = NameOrIDToID(subnetsURLMatch[1], server.subnetNameToID, 1, uint(len(server.subnets))) + + if err != nil { + http.NotFoundHandler().ServeHTTP(w, r) + return + } + + gotID = true + } + + var includeRanges bool + switch includeRangesString { + case "true", "yes", "1": + includeRanges = true + } + + switch r.Method { + case "GET": + w.Header().Set("Content-Type", "application/vnd.api+json") + if len(server.subnets) == 0 { + // Until a subnet is registered, behave as if the endpoint + // does not exist. This way we can simulate older MAAS + // servers that do not support subnets. + http.NotFoundHandler().ServeHTTP(w, r) + return + } + + if r.URL.Path == subnetsURL { + var subnets []Subnet + for i := uint(1); i < server.nextSubnet; i++ { + s, ok := server.subnets[i] + if ok { + subnets = append(subnets, s) + } + } + PrettyJsonWriter(subnets, w) + } else if gotID == false { + w.WriteHeader(http.StatusBadRequest) + } else { + switch op { + case "unreserved_ip_ranges": + PrettyJsonWriter(server.subnetUnreservedIPRanges(server.subnets[ID]), w) + case "reserved_ip_ranges": + PrettyJsonWriter(server.subnetReservedIPRanges(server.subnets[ID]), w) + case "statistics": + PrettyJsonWriter(server.subnetStatistics(server.subnets[ID], includeRanges), w) + default: + PrettyJsonWriter(server.subnets[ID], w) + } + } + checkError(err) + case "POST": + server.NewSubnet(r.Body) + case "PUT": + server.UpdateSubnet(r.Body) + case "DELETE": + delete(server.subnets, ID) + w.WriteHeader(http.StatusOK) + default: + w.WriteHeader(http.StatusBadRequest) + } +} + +type addressList []IP + +func (a addressList) Len() int { return len(a) } +func (a addressList) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a addressList) Less(i, j int) bool { return a[i].UInt64() < a[j].UInt64() } + +// AddressRange is used to generate reserved IP address range lists +type AddressRange struct { + Start string `json:"start"` + startUint uint64 + End string `json:"end"` + endUint uint64 + Purpose []string `json:"purpose,omitempty"` + NumAddresses uint `json:"num_addresses"` +} + +// AddressRangeList is a list of AddressRange +type AddressRangeList struct { + ar []AddressRange +} + +// Append appends a new AddressRange to an AddressRangeList +func (ranges *AddressRangeList) Append(startIP, endIP IP) { + var i AddressRange + i.Start, i.End = startIP.String(), endIP.String() + i.startUint, i.endUint = startIP.UInt64(), endIP.UInt64() + i.NumAddresses = uint(1 + endIP.UInt64() - startIP.UInt64()) + i.Purpose = startIP.Purpose + ranges.ar = append(ranges.ar, i) +} + +func appendRangesToIPList(subnet Subnet, ipAddresses *[]IP) { + for _, r := range subnet.FixedAddressRanges { + for v := r.startUint; v <= r.endUint; v++ { + ip := IPFromInt64(v) + ip.Purpose = r.Purpose + *ipAddresses = append(*ipAddresses, ip) + } + } +} + +func (server *TestServer) subnetUnreservedIPRanges(subnet Subnet) []AddressRange { + // Make a sorted copy of subnet.InUseIPAddresses + ipAddresses := make([]IP, len(subnet.InUseIPAddresses)) + copy(ipAddresses, subnet.InUseIPAddresses) + appendRangesToIPList(subnet, &ipAddresses) + sort.Sort(addressList(ipAddresses)) + + // We need the first and last address in the subnet + var ranges AddressRangeList + var startIP, endIP, lastUsableIP IP + + _, ipNet, err := net.ParseCIDR(subnet.CIDR) + checkError(err) + startIP = IPFromNetIP(ipNet.IP) + // Start with the lowest usable address in the range, which is 1 above + // what net.ParseCIDR will give back. + startIP.SetUInt64(startIP.UInt64() + 1) + + ones, bits := ipNet.Mask.Size() + set := ^((^uint64(0)) << uint(bits-ones)) + + // The last usable address is one below the broadcast address, which is + // what you get by bitwise ORing 'set' with any IP address in the subnet. + lastUsableIP.SetUInt64((startIP.UInt64() | set) - 1) + + for _, endIP = range ipAddresses { + end := endIP.UInt64() + + if endIP.UInt64() == startIP.UInt64() { + if endIP.UInt64() != lastUsableIP.UInt64() { + startIP.SetUInt64(end + 1) + } + continue + } + + if end == lastUsableIP.UInt64() { + continue + } + + ranges.Append(startIP, IPFromInt64(end-1)) + startIP.SetUInt64(end + 1) + } + + if startIP.UInt64() != lastUsableIP.UInt64() { + ranges.Append(startIP, lastUsableIP) + } + + return ranges.ar +} + +func (server *TestServer) subnetReservedIPRanges(subnet Subnet) []AddressRange { + var ranges AddressRangeList + var startIP, thisIP IP + + // Make a sorted copy of subnet.InUseIPAddresses + ipAddresses := make([]IP, len(subnet.InUseIPAddresses)) + copy(ipAddresses, subnet.InUseIPAddresses) + appendRangesToIPList(subnet, &ipAddresses) + sort.Sort(addressList(ipAddresses)) + startIP = ipAddresses[0] + lastIP := ipAddresses[0] + + if len(ipAddresses) == 0 { + return ranges.ar + } + + for _, thisIP = range ipAddresses { + var purposeMissmatch bool + for i, p := range thisIP.Purpose { + if startIP.Purpose[i] != p { + purposeMissmatch = true + } + } + if (thisIP.UInt64() != lastIP.UInt64() && thisIP.UInt64() != lastIP.UInt64()+1) || purposeMissmatch { + ranges.Append(startIP, lastIP) + startIP = thisIP + } + lastIP = thisIP + } + + if len(ranges.ar) == 0 || ranges.ar[len(ranges.ar)-1].endUint != lastIP.UInt64() { + ranges.Append(startIP, lastIP) + } + + return ranges.ar +} + +// SubnetStats holds statistics about a subnet +type SubnetStats struct { + NumAvailable uint `json:"num_available"` + LargestAvailable uint `json:"largest_available"` + NumUnavailable uint `json:"num_unavailable"` + TotalAddresses uint `json:"total_addresses"` + Usage float32 `json:"usage"` + UsageString string `json:"usage_string"` + Ranges []AddressRange `json:"ranges"` +} + +func (server *TestServer) subnetStatistics(subnet Subnet, includeRanges bool) SubnetStats { + var stats SubnetStats + _, ipNet, err := net.ParseCIDR(subnet.CIDR) + checkError(err) + + ones, bits := ipNet.Mask.Size() + stats.TotalAddresses = (1 << uint(bits-ones)) - 2 + stats.NumUnavailable = uint(len(subnet.InUseIPAddresses)) + stats.NumAvailable = stats.TotalAddresses - stats.NumUnavailable + stats.Usage = float32(stats.NumUnavailable) / float32(stats.TotalAddresses) + stats.UsageString = fmt.Sprintf("%0.1f%%", stats.Usage*100) + + // Calculate stats.LargestAvailable - the largest contiguous block of IP addresses available + reserved := server.subnetUnreservedIPRanges(subnet) + for _, addressRange := range reserved { + if addressRange.NumAddresses > stats.LargestAvailable { + stats.LargestAvailable = addressRange.NumAddresses + } + } + + if includeRanges { + stats.Ranges = reserved + } + + return stats +} + +func decodePostedSubnet(subnetJSON io.Reader) CreateSubnet { + var postedSubnet CreateSubnet + decoder := json.NewDecoder(subnetJSON) + err := decoder.Decode(&postedSubnet) + checkError(err) + if postedSubnet.DNSServers == nil { + postedSubnet.DNSServers = []string{} + } + return postedSubnet +} + +// UpdateSubnet creates a subnet in the test server +func (server *TestServer) UpdateSubnet(subnetJSON io.Reader) Subnet { + postedSubnet := decodePostedSubnet(subnetJSON) + updatedSubnet := subnetFromCreateSubnet(postedSubnet) + server.subnets[updatedSubnet.ID] = updatedSubnet + return updatedSubnet +} + +// NewSubnet creates a subnet in the test server +func (server *TestServer) NewSubnet(subnetJSON io.Reader) *Subnet { + postedSubnet := decodePostedSubnet(subnetJSON) + newSubnet := subnetFromCreateSubnet(postedSubnet) + newSubnet.ID = server.nextSubnet + server.subnets[server.nextSubnet] = newSubnet + server.subnetNameToID[newSubnet.Name] = newSubnet.ID + + server.nextSubnet++ + return &newSubnet +} + +// NodeNetworkInterface represents a network interface attached to a node +type NodeNetworkInterface struct { + Name string `json:"name"` + Links []NetworkLink `json:"links"` +} + +// Node represents a node +type Node struct { + SystemID string `json:"system_id"` + Interfaces []NodeNetworkInterface `json:"interface_set"` +} + +// NetworkLink represents a MAAS network link +type NetworkLink struct { + ID uint `json:"id"` + Mode string `json:"mode"` + Subnet *Subnet `json:"subnet"` +} + +// SetNodeNetworkLink records that the given node + interface are in subnet +func (server *TestServer) SetNodeNetworkLink(SystemID string, nodeNetworkInterface NodeNetworkInterface) { + for i, ni := range server.nodeMetadata[SystemID].Interfaces { + if ni.Name == nodeNetworkInterface.Name { + server.nodeMetadata[SystemID].Interfaces[i] = nodeNetworkInterface + return + } + } + n := server.nodeMetadata[SystemID] + n.Interfaces = append(n.Interfaces, nodeNetworkInterface) + server.nodeMetadata[SystemID] = n +} + +// subnetFromCreateSubnet creates a subnet in the test server +func subnetFromCreateSubnet(postedSubnet CreateSubnet) Subnet { + var newSubnet Subnet + newSubnet.DNSServers = postedSubnet.DNSServers + newSubnet.Name = postedSubnet.Name + newSubnet.Space = postedSubnet.Space + //TODO: newSubnet.VLAN = server.postedSubnetVLAN + newSubnet.GatewayIP = postedSubnet.GatewayIP + newSubnet.CIDR = postedSubnet.CIDR + newSubnet.ID = postedSubnet.ID + return newSubnet +} === added file 'src/github.com/juju/gomaasapi/testservice_test.go' --- src/github.com/juju/gomaasapi/testservice_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/testservice_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1783 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "math/rand" + "mime/multipart" + "net" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + + . "gopkg.in/check.v1" + "gopkg.in/mgo.v2/bson" +) + +type TestServerSuite struct { + server *TestServer +} + +var _ = Suite(&TestServerSuite{}) + +func (suite *TestServerSuite) SetUpTest(c *C) { + server := NewTestServer("1.0") + suite.server = server +} + +func (suite *TestServerSuite) TearDownTest(c *C) { + suite.server.Close() +} + +func (suite *TestServerSuite) TestNewTestServerReturnsTestServer(c *C) { + handler := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusAccepted) + } + suite.server.serveMux.HandleFunc("/test/", handler) + resp, err := http.Get(suite.server.Server.URL + "/test/") + + c.Check(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusAccepted) +} + +func (suite *TestServerSuite) TestGetResourceURI(c *C) { + c.Check(getNodeURL("0.1", "test"), Equals, "/api/0.1/nodes/test/") +} + +func (suite *TestServerSuite) TestSetVersionJSON(c *C) { + capabilities := `{"capabilities": ["networks-management","static-ipaddresses", "devices-management"]}` + suite.server.SetVersionJSON(capabilities) + + url := fmt.Sprintf("/api/%s/version/", suite.server.version) + resp, err := http.Get(suite.server.Server.URL + url) + c.Assert(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusOK) + content, err := readAndClose(resp.Body) + c.Assert(err, IsNil) + c.Assert(string(content), Equals, capabilities) +} + +func (suite *TestServerSuite) createDevice(c *C, mac, hostname, parent string) string { + devicesURL := fmt.Sprintf("/api/%s/devices/", suite.server.version) + "?op=new" + values := url.Values{} + values.Add("mac_addresses", mac) + values.Add("hostname", hostname) + values.Add("parent", parent) + result := suite.post(c, devicesURL, values) + resultMap, err := result.GetMap() + c.Assert(err, IsNil) + systemId, err := resultMap["system_id"].GetString() + c.Assert(err, IsNil) + return systemId +} + +func getString(c *C, object map[string]JSONObject, key string) string { + value, err := object[key].GetString() + c.Assert(err, IsNil) + return value +} + +func (suite *TestServerSuite) post(c *C, url string, values url.Values) JSONObject { + resp, err := http.Post(suite.server.Server.URL+url, "application/x-www-form-urlencoded", strings.NewReader(values.Encode())) + c.Assert(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusOK) + content, err := readAndClose(resp.Body) + c.Assert(err, IsNil) + result, err := Parse(suite.server.client, content) + c.Assert(err, IsNil) + return result +} + +func (suite *TestServerSuite) get(c *C, url string) JSONObject { + resp, err := http.Get(suite.server.Server.URL + url) + c.Assert(err, IsNil) + c.Assert(resp.StatusCode, Equals, http.StatusOK) + + content, err := readAndClose(resp.Body) + c.Assert(err, IsNil) + + result, err := Parse(suite.server.client, content) + c.Assert(err, IsNil) + return result +} + +func checkDevice(c *C, device map[string]JSONObject, mac, hostname, parent string) { + macArray, err := device["macaddress_set"].GetArray() + c.Assert(err, IsNil) + c.Assert(macArray, HasLen, 1) + macMap, err := macArray[0].GetMap() + c.Assert(err, IsNil) + + actualMac := getString(c, macMap, "mac_address") + c.Assert(actualMac, Equals, mac) + + actualParent := getString(c, device, "parent") + c.Assert(actualParent, Equals, parent) + actualHostname := getString(c, device, "hostname") + c.Assert(actualHostname, Equals, hostname) +} + +func (suite *TestServerSuite) TestNewDeviceRequiredParameters(c *C) { + devicesURL := fmt.Sprintf("/api/%s/devices/", suite.server.version) + "?op=new" + values := url.Values{} + values.Add("mac_addresses", "foo") + values.Add("hostname", "bar") + post := func(values url.Values) int { + resp, err := http.Post(suite.server.Server.URL+devicesURL, "application/x-www-form-urlencoded", strings.NewReader(values.Encode())) + c.Assert(err, IsNil) + return resp.StatusCode + } + c.Check(post(values), Equals, http.StatusBadRequest) + values.Del("hostname") + values.Add("parent", "baz") + c.Check(post(values), Equals, http.StatusBadRequest) + values.Del("mac_addresses") + values.Add("hostname", "bam") + c.Check(post(values), Equals, http.StatusBadRequest) +} + +func (suite *TestServerSuite) TestNewDevice(c *C) { + devicesURL := fmt.Sprintf("/api/%s/devices/", suite.server.version) + "?op=new" + + values := url.Values{} + values.Add("mac_addresses", "foo") + values.Add("hostname", "bar") + values.Add("parent", "baz") + result := suite.post(c, devicesURL, values) + + resultMap, err := result.GetMap() + c.Assert(err, IsNil) + + macArray, err := resultMap["macaddress_set"].GetArray() + c.Assert(err, IsNil) + c.Assert(macArray, HasLen, 1) + macMap, err := macArray[0].GetMap() + c.Assert(err, IsNil) + + mac := getString(c, macMap, "mac_address") + c.Assert(mac, Equals, "foo") + + parent := getString(c, resultMap, "parent") + c.Assert(parent, Equals, "baz") + hostname := getString(c, resultMap, "hostname") + c.Assert(hostname, Equals, "bar") + + addresses, err := resultMap["ip_addresses"].GetArray() + c.Assert(err, IsNil) + c.Assert(addresses, HasLen, 0) + + systemId := getString(c, resultMap, "system_id") + resourceURI := getString(c, resultMap, "resource_uri") + c.Assert(resourceURI, Equals, fmt.Sprintf("/MAAS/api/%v/devices/%v/", suite.server.version, systemId)) +} + +func (suite *TestServerSuite) TestGetDevice(c *C) { + systemId := suite.createDevice(c, "foo", "bar", "baz") + deviceURL := fmt.Sprintf("/api/%v/devices/%v/", suite.server.version, systemId) + + result := suite.get(c, deviceURL) + resultMap, err := result.GetMap() + c.Assert(err, IsNil) + checkDevice(c, resultMap, "foo", "bar", "baz") + actualId, err := resultMap["system_id"].GetString() + c.Assert(actualId, Equals, systemId) +} + +func (suite *TestServerSuite) TestDevicesList(c *C) { + firstId := suite.createDevice(c, "foo", "bar", "baz") + c.Assert(firstId, Not(Equals), "") + secondId := suite.createDevice(c, "bam", "bing", "bong") + c.Assert(secondId, Not(Equals), "") + + devicesURL := fmt.Sprintf("/api/%s/devices/", suite.server.version) + "?op=list" + result := suite.get(c, devicesURL) + + devicesArray, err := result.GetArray() + c.Assert(err, IsNil) + c.Assert(devicesArray, HasLen, 2) + + for _, device := range devicesArray { + deviceMap, err := device.GetMap() + c.Assert(err, IsNil) + systemId, err := deviceMap["system_id"].GetString() + c.Assert(err, IsNil) + switch systemId { + case firstId: + checkDevice(c, deviceMap, "foo", "bar", "baz") + case secondId: + checkDevice(c, deviceMap, "bam", "bing", "bong") + default: + c.Fatalf("unknown system id %q", systemId) + } + } +} + +func (suite *TestServerSuite) TestDevicesListMacFiltering(c *C) { + firstId := suite.createDevice(c, "foo", "bar", "baz") + c.Assert(firstId, Not(Equals), "") + secondId := suite.createDevice(c, "bam", "bing", "bong") + c.Assert(secondId, Not(Equals), "") + + op := fmt.Sprintf("?op=list&mac_address=%v", "foo") + devicesURL := fmt.Sprintf("/api/%s/devices/", suite.server.version) + op + result := suite.get(c, devicesURL) + + devicesArray, err := result.GetArray() + c.Assert(err, IsNil) + c.Assert(devicesArray, HasLen, 1) + deviceMap, err := devicesArray[0].GetMap() + c.Assert(err, IsNil) + checkDevice(c, deviceMap, "foo", "bar", "baz") +} + +func (suite *TestServerSuite) TestDeviceClaimStickyIPRequiresAddress(c *C) { + systemId := suite.createDevice(c, "foo", "bar", "baz") + op := "?op=claim_sticky_ip_address" + deviceURL := fmt.Sprintf("/api/%s/devices/%s/%s", suite.server.version, systemId, op) + values := url.Values{} + resp, err := http.Post(suite.server.Server.URL+deviceURL, "application/x-www-form-urlencoded", strings.NewReader(values.Encode())) + c.Assert(err, IsNil) + c.Assert(resp.StatusCode, Equals, http.StatusBadRequest) +} + +func (suite *TestServerSuite) TestDeviceClaimStickyIP(c *C) { + systemId := suite.createDevice(c, "foo", "bar", "baz") + op := "?op=claim_sticky_ip_address" + deviceURL := fmt.Sprintf("/api/%s/devices/%s/", suite.server.version, systemId) + values := url.Values{} + values.Add("requested_address", "127.0.0.1") + result := suite.post(c, deviceURL+op, values) + resultMap, err := result.GetMap() + c.Assert(err, IsNil) + + addresses, err := resultMap["ip_addresses"].GetArray() + c.Assert(err, IsNil) + c.Assert(addresses, HasLen, 1) + address, err := addresses[0].GetString() + c.Assert(err, IsNil) + c.Assert(address, Equals, "127.0.0.1") +} + +func (suite *TestServerSuite) TestDeleteDevice(c *C) { + systemId := suite.createDevice(c, "foo", "bar", "baz") + deviceURL := fmt.Sprintf("/api/%s/devices/%s/", suite.server.version, systemId) + req, err := http.NewRequest("DELETE", suite.server.Server.URL+deviceURL, nil) + c.Assert(err, IsNil) + resp, err := http.DefaultClient.Do(req) + c.Assert(err, IsNil) + c.Assert(resp.StatusCode, Equals, http.StatusNoContent) + + resp, err = http.Get(suite.server.Server.URL + deviceURL) + c.Assert(err, IsNil) + c.Assert(resp.StatusCode, Equals, http.StatusNotFound) +} + +func (suite *TestServerSuite) TestInvalidOperationOnNodesIsBadRequest(c *C) { + badURL := getNodesEndpoint(suite.server.version) + "?op=procrastinate" + + response, err := http.Get(suite.server.Server.URL + badURL) + c.Assert(err, IsNil) + + c.Check(response.StatusCode, Equals, http.StatusBadRequest) +} + +func (suite *TestServerSuite) TestHandlesNodeListingUnknownPath(c *C) { + invalidPath := fmt.Sprintf("/api/%s/nodes/invalid/path/", suite.server.version) + resp, err := http.Get(suite.server.Server.URL + invalidPath) + + c.Check(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusNotFound) +} + +func (suite *TestServerSuite) TestHandlesNodegroupsInterfacesListingUnknownNodegroup(c *C) { + invalidPath := fmt.Sprintf("/api/%s/nodegroups/unknown/interfaces/", suite.server.version) + resp, err := http.Get(suite.server.Server.URL + invalidPath) + + c.Check(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusNotFound) +} + +func (suite *TestServerSuite) TestNewNode(c *C) { + input := `{"system_id": "mysystemid"}` + + newNode := suite.server.NewNode(input) + + c.Check(len(suite.server.nodes), Equals, 1) + c.Check(suite.server.nodes["mysystemid"], DeepEquals, newNode) +} + +func (suite *TestServerSuite) TestNodesReturnsNodes(c *C) { + input := `{"system_id": "mysystemid"}` + newNode := suite.server.NewNode(input) + + nodesMap := suite.server.Nodes() + + c.Check(len(nodesMap), Equals, 1) + c.Check(nodesMap["mysystemid"], DeepEquals, newNode) +} + +func (suite *TestServerSuite) TestChangeNode(c *C) { + input := `{"system_id": "mysystemid"}` + suite.server.NewNode(input) + suite.server.ChangeNode("mysystemid", "newfield", "newvalue") + + node, _ := suite.server.nodes["mysystemid"] + field, err := node.GetField("newfield") + c.Assert(err, IsNil) + c.Check(field, Equals, "newvalue") +} + +func (suite *TestServerSuite) TestClearClearsData(c *C) { + input := `{"system_id": "mysystemid"}` + suite.server.NewNode(input) + suite.server.addNodeOperation("mysystemid", "start", &http.Request{}) + + suite.server.Clear() + + c.Check(len(suite.server.nodes), Equals, 0) + c.Check(len(suite.server.nodeOperations), Equals, 0) + c.Check(len(suite.server.nodeOperationRequestValues), Equals, 0) +} + +func (suite *TestServerSuite) TestAddNodeOperationPopulatesOperations(c *C) { + input := `{"system_id": "mysystemid"}` + suite.server.NewNode(input) + + suite.server.addNodeOperation("mysystemid", "start", &http.Request{}) + suite.server.addNodeOperation("mysystemid", "stop", &http.Request{}) + + nodeOperations := suite.server.NodeOperations() + operations := nodeOperations["mysystemid"] + c.Check(operations, DeepEquals, []string{"start", "stop"}) +} + +func (suite *TestServerSuite) TestAddNodeOperationPopulatesOperationRequestValues(c *C) { + input := `{"system_id": "mysystemid"}` + suite.server.NewNode(input) + reader := strings.NewReader("key=value") + request, err := http.NewRequest("POST", "http://example.com/", reader) + request.Header.Set("Content-Type", "application/x-www-form-urlencoded") + c.Assert(err, IsNil) + + suite.server.addNodeOperation("mysystemid", "start", request) + + values := suite.server.NodeOperationRequestValues() + value := values["mysystemid"] + c.Check(len(value), Equals, 1) + c.Check(value[0], DeepEquals, url.Values{"key": []string{"value"}}) +} + +func (suite *TestServerSuite) TestNewNodeRequiresJSONString(c *C) { + input := `invalid:json` + defer func() { + recoveredError := recover().(*json.SyntaxError) + c.Check(recoveredError, NotNil) + c.Check(recoveredError.Error(), Matches, ".*invalid character.*") + }() + suite.server.NewNode(input) +} + +func (suite *TestServerSuite) TestNewNodeRequiresSystemIdKey(c *C) { + input := `{"test": "test"}` + defer func() { + recoveredError := recover() + c.Check(recoveredError, NotNil) + c.Check(recoveredError, Matches, ".*does not contain a 'system_id' value.") + }() + suite.server.NewNode(input) +} + +func (suite *TestServerSuite) TestHandlesNodeRequestNotFound(c *C) { + getURI := fmt.Sprintf("/api/%s/nodes/test/", suite.server.version) + resp, err := http.Get(suite.server.Server.URL + getURI) + + c.Check(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusNotFound) +} + +func (suite *TestServerSuite) TestHandlesNodeUnknownOperation(c *C) { + input := `{"system_id": "mysystemid"}` + suite.server.NewNode(input) + postURI := fmt.Sprintf("/api/%s/nodes/mysystemid/?op=unknown/", suite.server.version) + respStart, err := http.Post(suite.server.Server.URL+postURI, "", nil) + + c.Check(err, IsNil) + c.Check(respStart.StatusCode, Equals, http.StatusBadRequest) +} + +func (suite *TestServerSuite) TestHandlesNodeDelete(c *C) { + input := `{"system_id": "mysystemid"}` + suite.server.NewNode(input) + deleteURI := fmt.Sprintf("/api/%s/nodes/mysystemid/?op=mysystemid", suite.server.version) + req, err := http.NewRequest("DELETE", suite.server.Server.URL+deleteURI, nil) + var client http.Client + resp, err := client.Do(req) + + c.Check(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusOK) + c.Check(len(suite.server.nodes), Equals, 0) +} + +func uploadTo(url, fileName string, fileContent []byte) (*http.Response, error) { + buf := new(bytes.Buffer) + w := multipart.NewWriter(buf) + fw, err := w.CreateFormFile(fileName, fileName) + if err != nil { + panic(err) + } + io.Copy(fw, bytes.NewBuffer(fileContent)) + w.Close() + req, err := http.NewRequest("POST", url, buf) + if err != nil { + panic(err) + } + req.Header.Set("Content-Type", w.FormDataContentType()) + client := &http.Client{} + return client.Do(req) +} + +func (suite *TestServerSuite) TestHandlesUploadFile(c *C) { + fileContent := []byte("test file content") + postURL := suite.server.Server.URL + fmt.Sprintf("/api/%s/files/?op=add&filename=filename", suite.server.version) + + resp, err := uploadTo(postURL, "upload", fileContent) + + c.Check(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusOK) + c.Check(len(suite.server.files), Equals, 1) + file, ok := suite.server.files["filename"] + c.Assert(ok, Equals, true) + field, err := file.GetField("content") + c.Assert(err, IsNil) + c.Check(field, Equals, base64.StdEncoding.EncodeToString(fileContent)) +} + +func (suite *TestServerSuite) TestNewFileEscapesName(c *C) { + obj := suite.server.NewFile("aa?bb", []byte("bytes")) + resourceURI := obj.URI() + c.Check(strings.Contains(resourceURI.String(), "aa?bb"), Equals, false) + c.Check(strings.Contains(resourceURI.Path, "aa?bb"), Equals, true) + anonURI, err := obj.GetField("anon_resource_uri") + c.Assert(err, IsNil) + c.Check(strings.Contains(anonURI, "aa?bb"), Equals, false) + c.Check(strings.Contains(anonURI, url.QueryEscape("aa?bb")), Equals, true) +} + +func (suite *TestServerSuite) TestHandlesFile(c *C) { + const filename = "my-file" + const fileContent = "test file content" + file := suite.server.NewFile(filename, []byte(fileContent)) + getURI := fmt.Sprintf("/api/%s/files/%s/", suite.server.version, filename) + fileURI, err := file.GetField("anon_resource_uri") + c.Assert(err, IsNil) + + resp, err := http.Get(suite.server.Server.URL + getURI) + c.Check(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusOK) + + content, err := readAndClose(resp.Body) + c.Assert(err, IsNil) + var obj map[string]interface{} + err = json.Unmarshal(content, &obj) + c.Assert(err, IsNil) + anon_url, ok := obj["anon_resource_uri"] + c.Check(ok, Equals, true) + c.Check(anon_url.(string), Equals, fileURI) + base64Content, ok := obj["content"] + c.Check(ok, Equals, true) + decodedContent, err := base64.StdEncoding.DecodeString(base64Content.(string)) + c.Assert(err, IsNil) + c.Check(string(decodedContent), Equals, fileContent) +} + +func (suite *TestServerSuite) TestHandlesGetFile(c *C) { + fileContent := []byte("test file content") + fileName := "filename" + suite.server.NewFile(fileName, fileContent) + getURI := fmt.Sprintf("/api/%s/files/?op=get&filename=filename", suite.server.version) + + resp, err := http.Get(suite.server.Server.URL + getURI) + + c.Check(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusOK) + content, err := readAndClose(resp.Body) + c.Check(err, IsNil) + c.Check(string(content), Equals, string(fileContent)) + c.Check(content, DeepEquals, fileContent) +} + +func (suite *TestServerSuite) TestHandlesListReturnsSortedFilenames(c *C) { + fileName1 := "filename1" + suite.server.NewFile(fileName1, []byte("test file content")) + fileName2 := "filename2" + suite.server.NewFile(fileName2, []byte("test file content")) + getURI := fmt.Sprintf("/api/%s/files/?op=list", suite.server.version) + + resp, err := http.Get(suite.server.Server.URL + getURI) + c.Check(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusOK) + content, err := readAndClose(resp.Body) + c.Assert(err, IsNil) + var files []map[string]string + err = json.Unmarshal(content, &files) + c.Assert(err, IsNil) + c.Check(len(files), Equals, 2) + c.Check(files[0]["filename"], Equals, fileName1) + c.Check(files[1]["filename"], Equals, fileName2) +} + +func (suite *TestServerSuite) TestHandlesListFiltersFiles(c *C) { + fileName1 := "filename1" + suite.server.NewFile(fileName1, []byte("test file content")) + fileName2 := "prefixFilename" + suite.server.NewFile(fileName2, []byte("test file content")) + getURI := fmt.Sprintf("/api/%s/files/?op=list&prefix=prefix", suite.server.version) + + resp, err := http.Get(suite.server.Server.URL + getURI) + + c.Check(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusOK) + content, err := readAndClose(resp.Body) + c.Assert(err, IsNil) + var files []map[string]string + err = json.Unmarshal(content, &files) + c.Assert(err, IsNil) + c.Check(len(files), Equals, 1) + c.Check(files[0]["filename"], Equals, fileName2) +} + +func (suite *TestServerSuite) TestHandlesListOmitsContent(c *C) { + const filename = "myfile" + fileContent := []byte("test file content") + suite.server.NewFile(filename, fileContent) + getURI := fmt.Sprintf("/api/%s/files/?op=list", suite.server.version) + + resp, err := http.Get(suite.server.Server.URL + getURI) + c.Assert(err, IsNil) + + content, err := readAndClose(resp.Body) + c.Assert(err, IsNil) + var files []map[string]string + err = json.Unmarshal(content, &files) + + // The resulting dict does not have a "content" entry. + file := files[0] + _, ok := file["content"] + c.Check(ok, Equals, false) + + // But the original as stored in the test service still has it. + contentAfter, err := suite.server.files[filename].GetField("content") + c.Assert(err, IsNil) + bytes, err := base64.StdEncoding.DecodeString(contentAfter) + c.Assert(err, IsNil) + c.Check(string(bytes), Equals, string(fileContent)) +} + +func (suite *TestServerSuite) TestDeleteFile(c *C) { + fileName1 := "filename1" + suite.server.NewFile(fileName1, []byte("test file content")) + deleteURI := fmt.Sprintf("/api/%s/files/filename1/", suite.server.version) + + req, err := http.NewRequest("DELETE", suite.server.Server.URL+deleteURI, nil) + c.Check(err, IsNil) + var client http.Client + resp, err := client.Do(req) + + c.Check(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusOK) + c.Check(suite.server.Files(), DeepEquals, map[string]MAASObject{}) +} + +func (suite *TestServerSuite) TestListZonesNotSupported(c *C) { + // Older versions of MAAS do not support zones. We simulate + // this behaviour by returning 404 if no zones are defined. + zonesURL := getZonesEndpoint(suite.server.version) + resp, err := http.Get(suite.server.Server.URL + zonesURL) + + c.Check(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusNotFound) +} + +func defaultSubnet() CreateSubnet { + var s CreateSubnet + s.DNSServers = []string{"192.168.1.2"} + s.Name = "maas-eth0" + s.Space = "space-0" + s.GatewayIP = "192.168.1.1" + s.CIDR = "192.168.1.0/24" + s.ID = 1 + return s +} + +func (suite *TestServerSuite) subnetJSON(subnet CreateSubnet) *bytes.Buffer { + var out bytes.Buffer + err := json.NewEncoder(&out).Encode(subnet) + if err != nil { + panic(err) + } + return &out +} + +func (suite *TestServerSuite) subnetURL(ID int) string { + return suite.subnetsURL() + strconv.Itoa(ID) + "/" +} + +func (suite *TestServerSuite) subnetsURL() string { + return suite.server.Server.URL + getSubnetsEndpoint(suite.server.version) +} + +func (suite *TestServerSuite) getSubnets(c *C) []Subnet { + resp, err := http.Get(suite.subnetsURL()) + + c.Check(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusOK) + + var subnets []Subnet + decoder := json.NewDecoder(resp.Body) + err = decoder.Decode(&subnets) + c.Check(err, IsNil) + return subnets +} + +func (suite *TestServerSuite) TestSubnetAdd(c *C) { + suite.server.NewSubnet(suite.subnetJSON(defaultSubnet())) + + subnets := suite.getSubnets(c) + c.Check(subnets, HasLen, 1) + s := subnets[0] + c.Check(s.DNSServers, DeepEquals, []string{"192.168.1.2"}) + c.Check(s.Name, Equals, "maas-eth0") + c.Check(s.Space, Equals, "space-0") + c.Check(s.VLAN.ID, Equals, uint(0)) + c.Check(s.CIDR, Equals, "192.168.1.0/24") +} + +func (suite *TestServerSuite) TestSubnetGet(c *C) { + suite.server.NewSubnet(suite.subnetJSON(defaultSubnet())) + + subnet2 := defaultSubnet() + subnet2.Name = "maas-eth1" + subnet2.CIDR = "192.168.2.0/24" + suite.server.NewSubnet(suite.subnetJSON(subnet2)) + + subnets := suite.getSubnets(c) + c.Check(subnets, HasLen, 2) + c.Check(subnets[0].CIDR, Equals, "192.168.1.0/24") + c.Check(subnets[1].CIDR, Equals, "192.168.2.0/24") +} + +func (suite *TestServerSuite) TestSubnetPut(c *C) { + subnet1 := defaultSubnet() + suite.server.NewSubnet(suite.subnetJSON(subnet1)) + + subnets := suite.getSubnets(c) + c.Check(subnets, HasLen, 1) + c.Check(subnets[0].DNSServers, DeepEquals, []string{"192.168.1.2"}) + + subnet1.DNSServers = []string{"192.168.1.2", "192.168.1.3"} + suite.server.UpdateSubnet(suite.subnetJSON(subnet1)) + + subnets = suite.getSubnets(c) + c.Check(subnets, HasLen, 1) + c.Check(subnets[0].DNSServers, DeepEquals, []string{"192.168.1.2", "192.168.1.3"}) +} + +func (suite *TestServerSuite) TestSubnetDelete(c *C) { + suite.server.NewSubnet(suite.subnetJSON(defaultSubnet())) + + subnets := suite.getSubnets(c) + c.Check(subnets, HasLen, 1) + c.Check(subnets[0].DNSServers, DeepEquals, []string{"192.168.1.2"}) + + req, err := http.NewRequest("DELETE", suite.subnetURL(1), nil) + c.Check(err, IsNil) + resp, err := http.DefaultClient.Do(req) + c.Check(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusOK) + + resp, err = http.Get(suite.subnetsURL()) + c.Check(err, IsNil) + c.Check(resp.StatusCode, Equals, http.StatusNotFound) +} + +func (suite *TestServerSuite) reserveSomeAddresses() map[int]bool { + reserved := make(map[int]bool) + rand.Seed(6) + + // Insert some random test data + for i := 0; i < 200; i++ { + r := rand.Intn(253) + 1 + _, ok := reserved[r] + for ok == true { + r++ + if r == 255 { + r = 1 + } + _, ok = reserved[r] + } + reserved[r] = true + addr := fmt.Sprintf("192.168.1.%d", r) + suite.server.NewIPAddress(addr, "maas-eth0") + } + + return reserved +} + +func (suite *TestServerSuite) TestSubnetReservedIPRanges(c *C) { + suite.server.NewSubnet(suite.subnetJSON(defaultSubnet())) + reserved := suite.reserveSomeAddresses() + + // Fetch from the server + reservedIPRangeURL := suite.subnetURL(1) + "?op=reserved_ip_ranges" + resp, err := http.Get(reservedIPRangeURL) + c.Check(err, IsNil) + + var reservedFromAPI []AddressRange + decoder := json.NewDecoder(resp.Body) + err = decoder.Decode(&reservedFromAPI) + c.Check(err, IsNil) + + // Check that anything in a reserved range was an address we allocated + // with NewIPAddress + for _, addressRange := range reservedFromAPI { + var start, end int + fmt.Sscanf(addressRange.Start, "192.168.1.%d", &start) + fmt.Sscanf(addressRange.End, "192.168.1.%d", &end) + c.Check(addressRange.NumAddresses, Equals, uint(1+end-start)) + c.Check(start <= end, Equals, true) + c.Check(start < 255, Equals, true) + c.Check(end < 255, Equals, true) + for i := start; i <= end; i++ { + _, ok := reserved[int(i)] + c.Check(ok, Equals, true) + delete(reserved, int(i)) + } + } + c.Check(reserved, HasLen, 0) +} + +func (suite *TestServerSuite) TestSubnetUnreservedIPRanges(c *C) { + suite.server.NewSubnet(suite.subnetJSON(defaultSubnet())) + reserved := suite.reserveSomeAddresses() + unreserved := make(map[int]bool) + + // Fetch from the server + reservedIPRangeURL := suite.subnetURL(1) + "?op=unreserved_ip_ranges" + resp, err := http.Get(reservedIPRangeURL) + c.Check(err, IsNil) + + var unreservedFromAPI []AddressRange + decoder := json.NewDecoder(resp.Body) + err = decoder.Decode(&unreservedFromAPI) + c.Check(err, IsNil) + + // Check that anything in an unreserved range wasn't an address we allocated + // with NewIPAddress + for _, addressRange := range unreservedFromAPI { + var start, end int + fmt.Sscanf(addressRange.Start, "192.168.1.%d", &start) + fmt.Sscanf(addressRange.End, "192.168.1.%d", &end) + c.Check(addressRange.NumAddresses, Equals, uint(1+end-start)) + c.Check(start <= end, Equals, true) + c.Check(start < 255, Equals, true) + c.Check(end < 255, Equals, true) + for i := start; i <= end; i++ { + _, ok := reserved[int(i)] + c.Check(ok, Equals, false) + unreserved[int(i)] = true + } + } + for i := 1; i < 255; i++ { + _, r := reserved[i] + _, u := unreserved[i] + if (r || u) == false { + fmt.Println(i, r, u) + } + c.Check(r || u, Equals, true) + } + c.Check(len(reserved)+len(unreserved), Equals, 254) +} + +func (suite *TestServerSuite) TestSubnetReserveRange(c *C) { + suite.server.NewSubnet(suite.subnetJSON(defaultSubnet())) + suite.server.NewIPAddress("192.168.1.10", "maas-eth0") + + var ar AddressRange + ar.Start = "192.168.1.100" + ar.End = "192.168.1.200" + ar.Purpose = []string{"dynamic"} + + suite.server.AddFixedAddressRange(1, ar) + + // Fetch from the server + reservedIPRangeURL := suite.subnetURL(1) + "?op=reserved_ip_ranges" + resp, err := http.Get(reservedIPRangeURL) + c.Check(err, IsNil) + + var reservedFromAPI []AddressRange + decoder := json.NewDecoder(resp.Body) + err = decoder.Decode(&reservedFromAPI) + c.Check(err, IsNil) + + // Check that the address ranges we got back were as expected + addressRange := reservedFromAPI[0] + c.Check(addressRange.Start, Equals, "192.168.1.10") + c.Check(addressRange.End, Equals, "192.168.1.10") + c.Check(addressRange.NumAddresses, Equals, uint(1)) + c.Check(addressRange.Purpose[0], Equals, "assigned-ip") + c.Check(addressRange.Purpose, HasLen, 1) + + addressRange = reservedFromAPI[1] + c.Check(addressRange.Start, Equals, "192.168.1.100") + c.Check(addressRange.End, Equals, "192.168.1.200") + c.Check(addressRange.NumAddresses, Equals, uint(101)) + c.Check(addressRange.Purpose[0], Equals, "dynamic") + c.Check(addressRange.Purpose, HasLen, 1) +} + +func (suite *TestServerSuite) getSubnetStats(c *C, subnetID int) SubnetStats { + URL := suite.subnetURL(1) + "?op=statistics" + resp, err := http.Get(URL) + c.Check(err, IsNil) + + var s SubnetStats + decoder := json.NewDecoder(resp.Body) + err = decoder.Decode(&s) + c.Check(err, IsNil) + return s +} + +func (suite *TestServerSuite) TestSubnetStats(c *C) { + suite.server.NewSubnet(suite.subnetJSON(defaultSubnet())) + + stats := suite.getSubnetStats(c, 1) + // There are 254 usable addresses in a class C subnet, so these + // stats are fixed + expected := SubnetStats{ + NumAvailable: 254, + LargestAvailable: 254, + NumUnavailable: 0, + TotalAddresses: 254, + Usage: 0, + UsageString: "0.0%", + Ranges: nil, + } + c.Check(stats, DeepEquals, expected) + + suite.reserveSomeAddresses() + stats = suite.getSubnetStats(c, 1) + // We have reserved 200 addresses so parts of these + // stats are fixed. + expected = SubnetStats{ + NumAvailable: 54, + NumUnavailable: 200, + TotalAddresses: 254, + Usage: 0.787401556968689, + UsageString: "78.7%", + Ranges: nil, + } + + reserved := suite.server.subnetUnreservedIPRanges(suite.server.subnets[1]) + var largestAvailable uint + for _, addressRange := range reserved { + if addressRange.NumAddresses > largestAvailable { + largestAvailable = addressRange.NumAddresses + } + } + + expected.LargestAvailable = largestAvailable + c.Check(stats, DeepEquals, expected) +} + +func (suite *TestServerSuite) TestSubnetsInNodes(c *C) { + // Create a subnet + subnet := suite.server.NewSubnet(suite.subnetJSON(defaultSubnet())) + + // Create a node + var node Node + node.SystemID = "node-89d832ca-8877-11e5-b5a5-00163e86022b" + suite.server.NewNode(fmt.Sprintf(`{"system_id": "%s"}`, "node-89d832ca-8877-11e5-b5a5-00163e86022b")) + + // Put the node in the subnet + var nni NodeNetworkInterface + nni.Name = "eth0" + nni.Links = append(nni.Links, NetworkLink{uint(1), "auto", subnet}) + suite.server.SetNodeNetworkLink(node.SystemID, nni) + + // Fetch the node details + URL := suite.server.Server.URL + getNodesEndpoint(suite.server.version) + node.SystemID + "/" + resp, err := http.Get(URL) + c.Check(err, IsNil) + + var n Node + decoder := json.NewDecoder(resp.Body) + err = decoder.Decode(&n) + c.Check(err, IsNil) + c.Check(n.SystemID, Equals, node.SystemID) + c.Check(n.Interfaces, HasLen, 1) + i := n.Interfaces[0] + c.Check(i.Name, Equals, "eth0") + c.Check(i.Links, HasLen, 1) + c.Check(i.Links[0].ID, Equals, uint(1)) + c.Check(i.Links[0].Subnet.Name, Equals, "maas-eth0") +} + +type IPSuite struct { +} + +var _ = Suite(&IPSuite{}) + +func (suite *IPSuite) TestIPFromNetIP(c *C) { + ip := IPFromNetIP(net.ParseIP("1.2.3.4")) + c.Check(ip.String(), Equals, "1.2.3.4") +} + +func (suite *IPSuite) TestIPUInt64(c *C) { + ip := IPFromNetIP(net.ParseIP("1.2.3.4")) + v := ip.UInt64() + c.Check(v, Equals, uint64(0x01020304)) +} + +func (suite *IPSuite) TestIPSetUInt64(c *C) { + var ip IP + ip.SetUInt64(0x01020304) + c.Check(ip.String(), Equals, "1.2.3.4") +} + +// TestMAASObjectSuite validates that the object created by +// NewTestMAAS can be used by the gomaasapi library as if it were a real +// MAAS server. +type TestMAASObjectSuite struct { + TestMAASObject *TestMAASObject +} + +var _ = Suite(&TestMAASObjectSuite{}) + +func (suite *TestMAASObjectSuite) SetUpSuite(c *C) { + suite.TestMAASObject = NewTestMAAS("1.0") +} + +func (suite *TestMAASObjectSuite) TearDownSuite(c *C) { + suite.TestMAASObject.Close() +} + +func (suite *TestMAASObjectSuite) TearDownTest(c *C) { + suite.TestMAASObject.TestServer.Clear() +} + +func (suite *TestMAASObjectSuite) TestListNodes(c *C) { + input := `{"system_id": "mysystemid"}` + suite.TestMAASObject.TestServer.NewNode(input) + nodeListing := suite.TestMAASObject.GetSubObject("nodes") + + listNodeObjects, err := nodeListing.CallGet("list", url.Values{}) + + c.Check(err, IsNil) + listNodes, err := listNodeObjects.GetArray() + c.Assert(err, IsNil) + c.Check(len(listNodes), Equals, 1) + node, err := listNodes[0].GetMAASObject() + c.Assert(err, IsNil) + systemId, err := node.GetField("system_id") + c.Assert(err, IsNil) + c.Check(systemId, Equals, "mysystemid") + resourceURI, _ := node.GetField(resourceURI) + apiVersion := suite.TestMAASObject.TestServer.version + expectedResourceURI := fmt.Sprintf("/api/%s/nodes/mysystemid/", apiVersion) + c.Check(resourceURI, Equals, expectedResourceURI) +} + +func (suite *TestMAASObjectSuite) TestListNodesNoNodes(c *C) { + nodeListing := suite.TestMAASObject.GetSubObject("nodes") + listNodeObjects, err := nodeListing.CallGet("list", url.Values{}) + c.Check(err, IsNil) + + listNodes, err := listNodeObjects.GetArray() + + c.Check(err, IsNil) + c.Check(listNodes, DeepEquals, []JSONObject{}) +} + +func (suite *TestMAASObjectSuite) TestListNodesSelectedNodes(c *C) { + input := `{"system_id": "mysystemid"}` + suite.TestMAASObject.TestServer.NewNode(input) + input2 := `{"system_id": "mysystemid2"}` + suite.TestMAASObject.TestServer.NewNode(input2) + nodeListing := suite.TestMAASObject.GetSubObject("nodes") + + listNodeObjects, err := nodeListing.CallGet("list", url.Values{"id": {"mysystemid2"}}) + + c.Check(err, IsNil) + listNodes, err := listNodeObjects.GetArray() + c.Check(err, IsNil) + c.Check(len(listNodes), Equals, 1) + node, _ := listNodes[0].GetMAASObject() + systemId, _ := node.GetField("system_id") + c.Check(systemId, Equals, "mysystemid2") +} + +func (suite *TestMAASObjectSuite) TestDeleteNode(c *C) { + input := `{"system_id": "mysystemid"}` + node := suite.TestMAASObject.TestServer.NewNode(input) + + err := node.Delete() + + c.Check(err, IsNil) + c.Check(suite.TestMAASObject.TestServer.Nodes(), DeepEquals, map[string]MAASObject{}) +} + +func (suite *TestMAASObjectSuite) TestOperationsOnNode(c *C) { + input := `{"system_id": "mysystemid"}` + node := suite.TestMAASObject.TestServer.NewNode(input) + operations := []string{"start", "stop", "release"} + for _, operation := range operations { + _, err := node.CallPost(operation, url.Values{}) + c.Check(err, IsNil) + } +} + +func (suite *TestMAASObjectSuite) TestOperationsOnNodeGetRecorded(c *C) { + input := `{"system_id": "mysystemid"}` + node := suite.TestMAASObject.TestServer.NewNode(input) + + _, err := node.CallPost("start", url.Values{}) + + c.Check(err, IsNil) + nodeOperations := suite.TestMAASObject.TestServer.NodeOperations() + operations := nodeOperations["mysystemid"] + c.Check(operations, DeepEquals, []string{"start"}) +} + +func (suite *TestMAASObjectSuite) TestAcquireOperationGetsRecorded(c *C) { + input := `{"system_id": "mysystemid"}` + suite.TestMAASObject.TestServer.NewNode(input) + nodesObj := suite.TestMAASObject.GetSubObject("nodes/") + params := url.Values{"key": []string{"value"}} + + jsonResponse, err := nodesObj.CallPost("acquire", params) + c.Assert(err, IsNil) + acquiredNode, err := jsonResponse.GetMAASObject() + c.Assert(err, IsNil) + systemId, err := acquiredNode.GetField("system_id") + c.Assert(err, IsNil) + + // The 'acquire' operation has been recorded. + nodeOperations := suite.TestMAASObject.TestServer.NodeOperations() + operations := nodeOperations[systemId] + c.Check(operations, DeepEquals, []string{"acquire"}) + + // The parameters used to 'acquire' the node have been recorded as well. + values := suite.TestMAASObject.TestServer.NodeOperationRequestValues() + value := values[systemId] + c.Check(len(value), Equals, 1) + c.Check(value[0], DeepEquals, params) +} + +func (suite *TestMAASObjectSuite) TestNodesRelease(c *C) { + suite.TestMAASObject.TestServer.NewNode(`{"system_id": "mysystemid1"}`) + suite.TestMAASObject.TestServer.NewNode(`{"system_id": "mysystemid2"}`) + suite.TestMAASObject.TestServer.OwnedNodes()["mysystemid2"] = true + nodesObj := suite.TestMAASObject.GetSubObject("nodes/") + params := url.Values{"nodes": []string{"mysystemid1", "mysystemid2"}} + + // release should only release mysystemid2, as it is the only one allocated. + jsonResponse, err := nodesObj.CallPost("release", params) + c.Assert(err, IsNil) + releasedNodes, err := jsonResponse.GetArray() + c.Assert(err, IsNil) + c.Assert(releasedNodes, HasLen, 1) + releasedNode, err := releasedNodes[0].GetMAASObject() + c.Assert(err, IsNil) + systemId, err := releasedNode.GetField("system_id") + c.Assert(err, IsNil) + c.Assert(systemId, Equals, "mysystemid2") + + // The 'release' operation has been recorded. + nodesOperations := suite.TestMAASObject.TestServer.NodesOperations() + c.Check(nodesOperations, DeepEquals, []string{"release"}) + nodesOperationRequestValues := suite.TestMAASObject.TestServer.NodesOperationRequestValues() + expectedValues := make(url.Values) + expectedValues.Add("nodes", "mysystemid1") + expectedValues.Add("nodes", "mysystemid2") + c.Check(nodesOperationRequestValues, DeepEquals, []url.Values{expectedValues}) +} + +func (suite *TestMAASObjectSuite) TestNodesReleaseUnknown(c *C) { + suite.TestMAASObject.TestServer.NewNode(`{"system_id": "mysystemid"}`) + suite.TestMAASObject.TestServer.OwnedNodes()["mysystemid"] = true + nodesObj := suite.TestMAASObject.GetSubObject("nodes/") + params := url.Values{"nodes": []string{"mysystemid", "what"}} + + // if there are any unknown nodes, none are released. + _, err := nodesObj.CallPost("release", params) + c.Assert(err, ErrorMatches, `gomaasapi: got error back from server: 400 Bad Request \(Unknown node\(s\): what.\)`) + c.Assert(suite.TestMAASObject.TestServer.OwnedNodes()["mysystemid"], Equals, true) +} + +func (suite *TestMAASObjectSuite) TestUploadFile(c *C) { + const filename = "myfile.txt" + const fileContent = "uploaded contents" + files := suite.TestMAASObject.GetSubObject("files") + params := url.Values{"filename": {filename}} + filesMap := map[string][]byte{"file": []byte(fileContent)} + + // Upload a file. + _, err := files.CallPostFiles("add", params, filesMap) + c.Assert(err, IsNil) + + // The file can now be downloaded. + downloadedFile, err := files.CallGet("get", params) + c.Assert(err, IsNil) + bytes, err := downloadedFile.GetBytes() + c.Assert(err, IsNil) + c.Check(string(bytes), Equals, fileContent) +} + +func (suite *TestMAASObjectSuite) TestFileNamesMayContainSlashes(c *C) { + const filename = "filename/with/slashes/in/it" + const fileContent = "file contents" + files := suite.TestMAASObject.GetSubObject("files") + params := url.Values{"filename": {filename}} + filesMap := map[string][]byte{"file": []byte(fileContent)} + + _, err := files.CallPostFiles("add", params, filesMap) + c.Assert(err, IsNil) + + file, err := files.GetSubObject(filename).Get() + c.Assert(err, IsNil) + field, err := file.GetField("content") + c.Assert(err, IsNil) + c.Check(field, Equals, base64.StdEncoding.EncodeToString([]byte(fileContent))) +} + +func (suite *TestMAASObjectSuite) TestAcquireNodeGrabsAvailableNode(c *C) { + input := `{"system_id": "nodeid"}` + suite.TestMAASObject.TestServer.NewNode(input) + nodesObj := suite.TestMAASObject.GetSubObject("nodes/") + + jsonResponse, err := nodesObj.CallPost("acquire", nil) + c.Assert(err, IsNil) + + acquiredNode, err := jsonResponse.GetMAASObject() + c.Assert(err, IsNil) + systemID, err := acquiredNode.GetField("system_id") + c.Assert(err, IsNil) + c.Check(systemID, Equals, "nodeid") + _, owned := suite.TestMAASObject.TestServer.OwnedNodes()[systemID] + c.Check(owned, Equals, true) +} + +func (suite *TestMAASObjectSuite) TestAcquireNodeNeedsANode(c *C) { + nodesObj := suite.TestMAASObject.GetSubObject("nodes/") + _, err := nodesObj.CallPost("acquire", nil) + c.Check(err.(ServerError).StatusCode, Equals, http.StatusConflict) +} + +func (suite *TestMAASObjectSuite) TestAcquireNodeIgnoresOwnedNodes(c *C) { + input := `{"system_id": "nodeid"}` + suite.TestMAASObject.TestServer.NewNode(input) + nodesObj := suite.TestMAASObject.GetSubObject("nodes/") + // Ensure that the one node in the MAAS is not available. + _, err := nodesObj.CallPost("acquire", nil) + c.Assert(err, IsNil) + + _, err = nodesObj.CallPost("acquire", nil) + c.Check(err.(ServerError).StatusCode, Equals, http.StatusConflict) +} + +func (suite *TestMAASObjectSuite) TestReleaseNodeReleasesAcquiredNode(c *C) { + input := `{"system_id": "nodeid"}` + suite.TestMAASObject.TestServer.NewNode(input) + nodesObj := suite.TestMAASObject.GetSubObject("nodes/") + jsonResponse, err := nodesObj.CallPost("acquire", nil) + c.Assert(err, IsNil) + acquiredNode, err := jsonResponse.GetMAASObject() + c.Assert(err, IsNil) + systemID, err := acquiredNode.GetField("system_id") + c.Assert(err, IsNil) + nodeObj := nodesObj.GetSubObject(systemID) + + _, err = nodeObj.CallPost("release", nil) + c.Assert(err, IsNil) + _, owned := suite.TestMAASObject.TestServer.OwnedNodes()[systemID] + c.Check(owned, Equals, false) +} + +func (suite *TestMAASObjectSuite) TestGetNetworks(c *C) { + nodeJSON := `{"system_id": "mysystemid"}` + suite.TestMAASObject.TestServer.NewNode(nodeJSON) + networkJSON := `{"name": "mynetworkname", "ip": "0.1.2.0", "netmask": "255.255.255.0"}` + suite.TestMAASObject.TestServer.NewNetwork(networkJSON) + suite.TestMAASObject.TestServer.ConnectNodeToNetwork("mysystemid", "mynetworkname") + + networkMethod := suite.TestMAASObject.GetSubObject("networks") + params := url.Values{"node": []string{"mysystemid"}} + listNetworkObjects, err := networkMethod.CallGet("", params) + c.Assert(err, IsNil) + + networkJSONArray, err := listNetworkObjects.GetArray() + c.Assert(err, IsNil) + c.Check(networkJSONArray, HasLen, 1) + + listNetworks, err := networkJSONArray[0].GetMAASObject() + c.Assert(err, IsNil) + + networkName, err := listNetworks.GetField("name") + c.Assert(err, IsNil) + ip, err := listNetworks.GetField("ip") + c.Assert(err, IsNil) + netmask, err := listNetworks.GetField("netmask") + c.Assert(err, IsNil) + c.Check(networkName, Equals, "mynetworkname") + c.Check(ip, Equals, "0.1.2.0") + c.Check(netmask, Equals, "255.255.255.0") +} + +func (suite *TestMAASObjectSuite) TestGetNetworksNone(c *C) { + nodeJSON := `{"system_id": "mysystemid"}` + suite.TestMAASObject.TestServer.NewNode(nodeJSON) + + networkMethod := suite.TestMAASObject.GetSubObject("networks") + params := url.Values{"node": []string{"mysystemid"}} + listNetworkObjects, err := networkMethod.CallGet("", params) + c.Assert(err, IsNil) + + networkJSONArray, err := listNetworkObjects.GetArray() + c.Assert(err, IsNil) + c.Check(networkJSONArray, HasLen, 0) +} + +func (suite *TestMAASObjectSuite) TestListNodesWithNetworks(c *C) { + nodeJSON := `{"system_id": "mysystemid"}` + suite.TestMAASObject.TestServer.NewNode(nodeJSON) + networkJSON := `{"name": "mynetworkname", "ip": "0.1.2.0", "netmask": "255.255.255.0"}` + suite.TestMAASObject.TestServer.NewNetwork(networkJSON) + suite.TestMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("mysystemid", "mynetworkname", "aa:bb:cc:dd:ee:ff") + + nodeListing := suite.TestMAASObject.GetSubObject("nodes") + listNodeObjects, err := nodeListing.CallGet("list", url.Values{}) + c.Assert(err, IsNil) + + listNodes, err := listNodeObjects.GetArray() + c.Assert(err, IsNil) + c.Check(listNodes, HasLen, 1) + + node, err := listNodes[0].GetMAASObject() + c.Assert(err, IsNil) + systemId, err := node.GetField("system_id") + c.Assert(err, IsNil) + c.Check(systemId, Equals, "mysystemid") + + gotResourceURI, err := node.GetField(resourceURI) + c.Assert(err, IsNil) + apiVersion := suite.TestMAASObject.TestServer.version + expectedResourceURI := fmt.Sprintf("/api/%s/nodes/mysystemid/", apiVersion) + c.Check(gotResourceURI, Equals, expectedResourceURI) + + macAddressSet, err := node.GetMap()["macaddress_set"].GetArray() + c.Assert(err, IsNil) + c.Check(macAddressSet, HasLen, 1) + + macAddress, err := macAddressSet[0].GetMap() + c.Assert(err, IsNil) + macAddressString, err := macAddress["mac_address"].GetString() + c.Check(macAddressString, Equals, "aa:bb:cc:dd:ee:ff") + + gotResourceURI, err = macAddress[resourceURI].GetString() + c.Assert(err, IsNil) + expectedResourceURI = fmt.Sprintf("/api/%s/nodes/mysystemid/macs/%s/", apiVersion, url.QueryEscape("aa:bb:cc:dd:ee:ff")) + c.Check(gotResourceURI, Equals, expectedResourceURI) +} + +func (suite *TestMAASObjectSuite) TestListNetworkConnectedMACAddresses(c *C) { + suite.TestMAASObject.TestServer.NewNode(`{"system_id": "node_1"}`) + suite.TestMAASObject.TestServer.NewNode(`{"system_id": "node_2"}`) + suite.TestMAASObject.TestServer.NewNetwork( + `{"name": "net_1", "ip": "0.1.2.0", "netmask": "255.255.255.0"}`, + ) + suite.TestMAASObject.TestServer.NewNetwork( + `{"name": "net_2", "ip": "0.2.2.0", "netmask": "255.255.255.0"}`, + ) + suite.TestMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node_2", "net_2", "aa:bb:cc:dd:ee:22") + suite.TestMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node_1", "net_1", "aa:bb:cc:dd:ee:11") + suite.TestMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node_2", "net_1", "aa:bb:cc:dd:ee:21") + suite.TestMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node_1", "net_2", "aa:bb:cc:dd:ee:12") + + nodeListing := suite.TestMAASObject.GetSubObject("networks").GetSubObject("net_1") + listNodeObjects, err := nodeListing.CallGet("list_connected_macs", url.Values{}) + c.Assert(err, IsNil) + + listNodes, err := listNodeObjects.GetArray() + c.Assert(err, IsNil) + c.Check(listNodes, HasLen, 2) + + node, err := listNodes[0].GetMAASObject() + c.Assert(err, IsNil) + macAddress, err := node.GetField("mac_address") + c.Assert(err, IsNil) + c.Check(macAddress == "aa:bb:cc:dd:ee:11" || macAddress == "aa:bb:cc:dd:ee:21", Equals, true) + node1_idx := 0 + if macAddress == "aa:bb:cc:dd:ee:21" { + node1_idx = 1 + } + + node, err = listNodes[node1_idx].GetMAASObject() + c.Assert(err, IsNil) + macAddress, err = node.GetField("mac_address") + c.Assert(err, IsNil) + c.Check(macAddress, Equals, "aa:bb:cc:dd:ee:11") + nodeResourceURI, err := node.GetField(resourceURI) + c.Assert(err, IsNil) + apiVersion := suite.TestMAASObject.TestServer.version + expectedResourceURI := fmt.Sprintf("/api/%s/nodes/node_1/macs/%s/", apiVersion, url.QueryEscape("aa:bb:cc:dd:ee:11")) + c.Check(nodeResourceURI, Equals, expectedResourceURI) + + node, err = listNodes[1-node1_idx].GetMAASObject() + c.Assert(err, IsNil) + macAddress, err = node.GetField("mac_address") + c.Assert(err, IsNil) + c.Check(macAddress, Equals, "aa:bb:cc:dd:ee:21") + nodeResourceURI, err = node.GetField(resourceURI) + c.Assert(err, IsNil) + expectedResourceURI = fmt.Sprintf("/api/%s/nodes/node_2/macs/%s/", apiVersion, url.QueryEscape("aa:bb:cc:dd:ee:21")) + c.Check(nodeResourceURI, Equals, expectedResourceURI) +} + +func (suite *TestMAASObjectSuite) TestGetVersion(c *C) { + networkMethod := suite.TestMAASObject.GetSubObject("version") + params := url.Values{"node": []string{"mysystemid"}} + versionObject, err := networkMethod.CallGet("", params) + c.Assert(err, IsNil) + + versionMap, err := versionObject.GetMap() + c.Assert(err, IsNil) + jsonArray, ok := versionMap["capabilities"] + c.Check(ok, Equals, true) + capArray, err := jsonArray.GetArray() + for _, capJSONName := range capArray { + capName, err := capJSONName.GetString() + c.Assert(err, IsNil) + switch capName { + case "networks-management": + case "static-ipaddresses": + case "devices-management": + case "network-deployment-ubuntu": + default: + c.Fatalf("unknown capability %q", capName) + } + } +} + +func (suite *TestMAASObjectSuite) assertIPAmong(c *C, jsonObjIP JSONObject, expectIPs ...string) { + apiVersion := suite.TestMAASObject.TestServer.version + expectedURI := getIPAddressesEndpoint(apiVersion) + + maasObj, err := jsonObjIP.GetMAASObject() + c.Assert(err, IsNil) + attrs := maasObj.GetMap() + uri, err := attrs["resource_uri"].GetString() + c.Assert(err, IsNil) + c.Assert(uri, Equals, expectedURI) + allocType, err := attrs["alloc_type"].GetFloat64() + c.Assert(err, IsNil) + c.Assert(allocType, Equals, 4.0) + created, err := attrs["created"].GetString() + c.Assert(err, IsNil) + c.Assert(created, Not(Equals), "") + ip, err := attrs["ip"].GetString() + c.Assert(err, IsNil) + if !contains(expectIPs, ip) { + c.Fatalf("expected IP in %v, got %q", expectIPs, ip) + } +} + +func (suite *TestMAASObjectSuite) TestListIPAddresses(c *C) { + ipAddresses := suite.TestMAASObject.GetSubObject("ipaddresses") + + // First try without any networks and IPs. + listIPObjects, err := ipAddresses.CallGet("", url.Values{}) + c.Assert(err, IsNil) + items, err := listIPObjects.GetArray() + c.Assert(err, IsNil) + c.Assert(items, HasLen, 0) + + // Add two networks and some addresses to each one. + suite.TestMAASObject.TestServer.NewNetwork( + `{"name": "net_1", "ip": "0.1.2.0", "netmask": "255.255.255.0"}`, + ) + suite.TestMAASObject.TestServer.NewNetwork( + `{"name": "net_2", "ip": "0.2.2.0", "netmask": "255.255.255.0"}`, + ) + suite.TestMAASObject.TestServer.NewIPAddress("0.1.2.3", "net_1") + suite.TestMAASObject.TestServer.NewIPAddress("0.1.2.4", "net_1") + suite.TestMAASObject.TestServer.NewIPAddress("0.1.2.5", "net_1") + suite.TestMAASObject.TestServer.NewIPAddress("0.2.2.3", "net_2") + suite.TestMAASObject.TestServer.NewIPAddress("0.2.2.4", "net_2") + + // List all addresses and verify the needed response fields are set. + listIPObjects, err = ipAddresses.CallGet("", url.Values{}) + c.Assert(err, IsNil) + items, err = listIPObjects.GetArray() + c.Assert(err, IsNil) + c.Assert(items, HasLen, 5) + + for _, ipObj := range items { + suite.assertIPAmong( + c, ipObj, + "0.1.2.3", "0.1.2.4", "0.1.2.5", "0.2.2.3", "0.2.2.4", + ) + } + + // Remove all net_1 IPs. + removed := suite.TestMAASObject.TestServer.RemoveIPAddress("0.1.2.3") + c.Assert(removed, Equals, true) + removed = suite.TestMAASObject.TestServer.RemoveIPAddress("0.1.2.4") + c.Assert(removed, Equals, true) + removed = suite.TestMAASObject.TestServer.RemoveIPAddress("0.1.2.5") + c.Assert(removed, Equals, true) + // Remove the last IP twice, should be OK and return false. + removed = suite.TestMAASObject.TestServer.RemoveIPAddress("0.1.2.5") + c.Assert(removed, Equals, false) + + // List again. + listIPObjects, err = ipAddresses.CallGet("", url.Values{}) + c.Assert(err, IsNil) + items, err = listIPObjects.GetArray() + c.Assert(err, IsNil) + c.Assert(items, HasLen, 2) + for _, ipObj := range items { + suite.assertIPAmong( + c, ipObj, + "0.2.2.3", "0.2.2.4", + ) + } +} + +func (suite *TestMAASObjectSuite) TestReserveIPAddress(c *C) { + suite.TestMAASObject.TestServer.NewNetwork( + `{"name": "net_1", "ip": "0.1.2.0", "netmask": "255.255.255.0"}`, + ) + ipAddresses := suite.TestMAASObject.GetSubObject("ipaddresses") + // First try "reserve" with requested_address set. + params := url.Values{"network": []string{"0.1.2.0/24"}, "requested_address": []string{"0.1.2.42"}} + res, err := ipAddresses.CallPost("reserve", params) + c.Assert(err, IsNil) + suite.assertIPAmong(c, res, "0.1.2.42") + + // Now try "reserve" without requested_address. + delete(params, "requested_address") + res, err = ipAddresses.CallPost("reserve", params) + c.Assert(err, IsNil) + suite.assertIPAmong(c, res, "0.1.2.2") +} + +func (suite *TestMAASObjectSuite) TestReleaseIPAddress(c *C) { + suite.TestMAASObject.TestServer.NewNetwork( + `{"name": "net_1", "ip": "0.1.2.0", "netmask": "255.255.255.0"}`, + ) + suite.TestMAASObject.TestServer.NewIPAddress("0.1.2.3", "net_1") + ipAddresses := suite.TestMAASObject.GetSubObject("ipaddresses") + + // Try with non-existing address - should return 404. + params := url.Values{"ip": []string{"0.2.2.1"}} + _, err := ipAddresses.CallPost("release", params) + c.Assert(err, ErrorMatches, `(\n|.)*404 Not Found(\n|.)*`) + + // Now with existing one - all OK. + params = url.Values{"ip": []string{"0.1.2.3"}} + _, err = ipAddresses.CallPost("release", params) + c.Assert(err, IsNil) + + // Ensure it got removed. + c.Assert(suite.TestMAASObject.TestServer.ipAddressesPerNetwork["net_1"], HasLen, 0) + + // Try again, should return 404. + _, err = ipAddresses.CallPost("release", params) + c.Assert(err, ErrorMatches, `(\n|.)*404 Not Found(\n|.)*`) +} + +const nodeDetailsXML = ` + + + Computer + +` + +func (suite *TestMAASObjectSuite) TestNodeDetails(c *C) { + nodeJSON := `{"system_id": "mysystemid"}` + suite.TestMAASObject.TestServer.NewNode(nodeJSON) + suite.TestMAASObject.TestServer.AddNodeDetails("mysystemid", nodeDetailsXML) + + obj := suite.TestMAASObject.GetSubObject("nodes").GetSubObject("mysystemid") + uri := obj.URI() + result, err := obj.client.Get(uri, "details", nil) + c.Assert(err, IsNil) + + bsonObj := map[string]interface{}{} + err = bson.Unmarshal(result, &bsonObj) + c.Assert(err, IsNil) + + _, ok := bsonObj["lldp"] + c.Check(ok, Equals, true) + gotXMLText, ok := bsonObj["lshw"] + c.Check(ok, Equals, true) + c.Check(string(gotXMLText.([]byte)), Equals, string(nodeDetailsXML)) +} + +func (suite *TestMAASObjectSuite) TestListNodegroups(c *C) { + suite.TestMAASObject.TestServer.AddBootImage("uuid-0", `{"architecture": "arm64", "release": "trusty"}`) + suite.TestMAASObject.TestServer.AddBootImage("uuid-1", `{"architecture": "amd64", "release": "precise"}`) + + nodegroupListing := suite.TestMAASObject.GetSubObject("nodegroups") + result, err := nodegroupListing.CallGet("list", nil) + c.Assert(err, IsNil) + + nodegroups, err := result.GetArray() + c.Assert(err, IsNil) + c.Check(nodegroups, HasLen, 2) + + for _, obj := range nodegroups { + nodegroup, err := obj.GetMAASObject() + c.Assert(err, IsNil) + uuid, err := nodegroup.GetField("uuid") + c.Assert(err, IsNil) + + nodegroupResourceURI, err := nodegroup.GetField(resourceURI) + c.Assert(err, IsNil) + apiVersion := suite.TestMAASObject.TestServer.version + expectedResourceURI := fmt.Sprintf("/api/%s/nodegroups/%s/", apiVersion, uuid) + c.Check(nodegroupResourceURI, Equals, expectedResourceURI) + } +} + +func (suite *TestMAASObjectSuite) TestListNodegroupsEmptyList(c *C) { + nodegroupListing := suite.TestMAASObject.GetSubObject("nodegroups") + result, err := nodegroupListing.CallGet("list", nil) + c.Assert(err, IsNil) + + nodegroups, err := result.GetArray() + c.Assert(err, IsNil) + c.Check(nodegroups, HasLen, 0) +} + +func (suite *TestMAASObjectSuite) TestListNodegroupInterfaces(c *C) { + suite.TestMAASObject.TestServer.AddBootImage("uuid-0", `{"architecture": "arm64", "release": "trusty"}`) + jsonText := `{ + "ip_range_high": "172.16.0.128", + "ip_range_low": "172.16.0.2", + "broadcast_ip": "172.16.0.255", + "static_ip_range_low": "172.16.0.129", + "name": "eth0", + "ip": "172.16.0.2", + "subnet_mask": "255.255.255.0", + "management": 2, + "static_ip_range_high": "172.16.0.255", + "interface": "eth0" + }` + + suite.TestMAASObject.TestServer.NewNodegroupInterface("uuid-0", jsonText) + nodegroupsInterfacesListing := suite.TestMAASObject.GetSubObject("nodegroups").GetSubObject("uuid-0").GetSubObject("interfaces") + result, err := nodegroupsInterfacesListing.CallGet("list", nil) + c.Assert(err, IsNil) + + nodegroupsInterfaces, err := result.GetArray() + c.Assert(err, IsNil) + c.Check(nodegroupsInterfaces, HasLen, 1) + + nodegroupsInterface, err := nodegroupsInterfaces[0].GetMap() + c.Assert(err, IsNil) + + checkMember := func(member, expectedValue string) { + value, err := nodegroupsInterface[member].GetString() + c.Assert(err, IsNil) + c.Assert(value, Equals, expectedValue) + } + checkMember("ip_range_high", "172.16.0.128") + checkMember("ip_range_low", "172.16.0.2") + checkMember("broadcast_ip", "172.16.0.255") + checkMember("static_ip_range_low", "172.16.0.129") + checkMember("static_ip_range_high", "172.16.0.255") + checkMember("name", "eth0") + checkMember("ip", "172.16.0.2") + checkMember("subnet_mask", "255.255.255.0") + checkMember("interface", "eth0") + + value, err := nodegroupsInterface["management"].GetFloat64() + c.Assert(err, IsNil) + c.Assert(value, Equals, 2.0) +} + +func (suite *TestMAASObjectSuite) TestListNodegroupsInterfacesEmptyList(c *C) { + suite.TestMAASObject.TestServer.AddBootImage("uuid-0", `{"architecture": "arm64", "release": "trusty"}`) + nodegroupsInterfacesListing := suite.TestMAASObject.GetSubObject("nodegroups").GetSubObject("uuid-0").GetSubObject("interfaces") + result, err := nodegroupsInterfacesListing.CallGet("list", nil) + c.Assert(err, IsNil) + + interfaces, err := result.GetArray() + c.Assert(err, IsNil) + c.Check(interfaces, HasLen, 0) +} + +func (suite *TestMAASObjectSuite) TestListBootImages(c *C) { + suite.TestMAASObject.TestServer.AddBootImage("uuid-0", `{"architecture": "arm64", "release": "trusty"}`) + suite.TestMAASObject.TestServer.AddBootImage("uuid-1", `{"architecture": "amd64", "release": "precise"}`) + suite.TestMAASObject.TestServer.AddBootImage("uuid-1", `{"architecture": "ppc64el", "release": "precise"}`) + + bootImageListing := suite.TestMAASObject.GetSubObject("nodegroups").GetSubObject("uuid-1").GetSubObject("boot-images") + result, err := bootImageListing.CallGet("", nil) + c.Assert(err, IsNil) + + bootImageObjects, err := result.GetArray() + c.Assert(err, IsNil) + c.Check(bootImageObjects, HasLen, 2) + + expectedBootImages := []string{"amd64.precise", "ppc64el.precise"} + bootImages := make([]string, len(bootImageObjects)) + for i, obj := range bootImageObjects { + bootimage, err := obj.GetMap() + c.Assert(err, IsNil) + architecture, err := bootimage["architecture"].GetString() + c.Assert(err, IsNil) + release, err := bootimage["release"].GetString() + c.Assert(err, IsNil) + bootImages[i] = fmt.Sprintf("%s.%s", architecture, release) + } + sort.Strings(bootImages) + c.Assert(bootImages, DeepEquals, expectedBootImages) +} + +func (suite *TestMAASObjectSuite) TestListZones(c *C) { + expected := map[string]string{ + "zone0": "zone0 is very nice", + "zone1": "zone1 is much nicer than zone0", + } + for name, desc := range expected { + suite.TestMAASObject.TestServer.AddZone(name, desc) + } + + result, err := suite.TestMAASObject.GetSubObject("zones").CallGet("", nil) + c.Assert(err, IsNil) + c.Assert(result, NotNil) + + list, err := result.GetArray() + c.Assert(err, IsNil) + c.Assert(list, HasLen, len(expected)) + + m := make(map[string]string) + for _, item := range list { + itemMap, err := item.GetMap() + c.Assert(err, IsNil) + name, err := itemMap["name"].GetString() + c.Assert(err, IsNil) + desc, err := itemMap["description"].GetString() + c.Assert(err, IsNil) + m[name] = desc + } + c.Assert(m, DeepEquals, expected) +} + +func (suite *TestMAASObjectSuite) TestAcquireNodeZone(c *C) { + suite.TestMAASObject.TestServer.AddZone("z0", "rox") + suite.TestMAASObject.TestServer.AddZone("z1", "sux") + suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n0", "zone": "z0"}`) + suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n1", "zone": "z1"}`) + suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n2", "zone": "z1"}`) + nodesObj := suite.TestMAASObject.GetSubObject("nodes") + + acquire := func(zone string) (string, string, error) { + var params url.Values + if zone != "" { + params = url.Values{"zone": []string{zone}} + } + jsonResponse, err := nodesObj.CallPost("acquire", params) + if err != nil { + return "", "", err + } + acquiredNode, err := jsonResponse.GetMAASObject() + c.Assert(err, IsNil) + systemId, err := acquiredNode.GetField("system_id") + c.Assert(err, IsNil) + assignedZone, err := acquiredNode.GetField("zone") + c.Assert(err, IsNil) + if zone != "" { + c.Assert(assignedZone, Equals, zone) + } + return systemId, assignedZone, nil + } + + id, _, err := acquire("z0") + c.Assert(err, IsNil) + c.Assert(id, Equals, "n0") + id, _, err = acquire("z0") + c.Assert(err.(ServerError).StatusCode, Equals, http.StatusConflict) + + id, zone, err := acquire("") + c.Assert(err, IsNil) + c.Assert(id, Not(Equals), "n0") + c.Assert(zone, Equals, "z1") +} + +func (suite *TestMAASObjectSuite) TestAcquireFilterMemory(c *C) { + suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n0", "memory": 1024}`) + suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n1", "memory": 2048}`) + nodeListing := suite.TestMAASObject.GetSubObject("nodes") + jsonResponse, err := nodeListing.CallPost("acquire", url.Values{"mem": []string{"2048"}}) + c.Assert(err, IsNil) + acquiredNode, err := jsonResponse.GetMAASObject() + c.Assert(err, IsNil) + mem, err := acquiredNode.GetMap()["memory"].GetFloat64() + c.Assert(err, IsNil) + c.Assert(mem, Equals, float64(2048)) +} + +func (suite *TestMAASObjectSuite) TestAcquireFilterCpuCores(c *C) { + suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n0", "cpu_count": 1}`) + suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n1", "cpu_count": 2}`) + nodeListing := suite.TestMAASObject.GetSubObject("nodes") + jsonResponse, err := nodeListing.CallPost("acquire", url.Values{"cpu-cores": []string{"2"}}) + c.Assert(err, IsNil) + acquiredNode, err := jsonResponse.GetMAASObject() + c.Assert(err, IsNil) + cpucount, err := acquiredNode.GetMap()["cpu_count"].GetFloat64() + c.Assert(err, IsNil) + c.Assert(cpucount, Equals, float64(2)) +} + +func (suite *TestMAASObjectSuite) TestAcquireFilterArch(c *C) { + suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n0", "architecture": "amd64"}`) + suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n1", "architecture": "arm/generic"}`) + nodeListing := suite.TestMAASObject.GetSubObject("nodes") + jsonResponse, err := nodeListing.CallPost("acquire", url.Values{"arch": []string{"arm"}}) + c.Assert(err, IsNil) + acquiredNode, err := jsonResponse.GetMAASObject() + c.Assert(err, IsNil) + arch, _ := acquiredNode.GetField("architecture") + c.Assert(arch, Equals, "arm/generic") +} + +func (suite *TestMAASObjectSuite) TestDeploymentStatus(c *C) { + suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n0", "status": "6"}`) + suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n1", "status": "1"}`) + nodes := suite.TestMAASObject.GetSubObject("nodes") + jsonResponse, err := nodes.CallGet("deployment_status", url.Values{"nodes": []string{"n0", "n1"}}) + c.Assert(err, IsNil) + deploymentStatus, err := jsonResponse.GetMap() + c.Assert(err, IsNil) + c.Assert(deploymentStatus, HasLen, 2) + expectedStatus := map[string]string{ + "n0": "Deployed", "n1": "Not in Deployment", + } + for systemId, status := range expectedStatus { + nodeStatus, err := deploymentStatus[systemId].GetString() + c.Assert(err, IsNil) + c.Assert(nodeStatus, Equals, status) + } +} === added file 'src/github.com/juju/gomaasapi/testservice_utils.go' --- src/github.com/juju/gomaasapi/testservice_utils.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/testservice_utils.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,119 @@ +// Copyright 2015 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "errors" + "net" + "net/http" + "strconv" +) + +// NameOrIDToID takes a string that contains eiter an integer ID or the +// name of a thing. It returns the integer ID contained or mapped to or panics. +func NameOrIDToID(v string, nameToID map[string]uint, minID, maxID uint) (ID uint, err error) { + ID, ok := nameToID[v] + if !ok { + intID, err := strconv.Atoi(v) + if err != nil { + return 0, err + } + ID = uint(intID) + } + + if ID < minID || ID > maxID { + return 0, errors.New("ID out of range") + } + + return ID, nil +} + +// IP is an enhanced net.IP +type IP struct { + netIP net.IP + Purpose []string +} + +// IPFromNetIP creates a IP from a net.IP. +func IPFromNetIP(netIP net.IP) IP { + var ip IP + ip.netIP = netIP + return ip +} + +// IPFromString creates a new IP from a string IP address representation +func IPFromString(v string) IP { + return IPFromNetIP(net.ParseIP(v)) +} + +// IPFromInt64 creates a new IP from a uint64 IP address representation +func IPFromInt64(v uint64) IP { + var ip IP + ip.SetUInt64(v) + return ip +} + +// To4 converts the IPv4 address ip to a 4-byte representation. If ip is not +// an IPv4 address, To4 returns nil. +func (ip IP) To4() net.IP { + return ip.netIP.To4() +} + +// To16 converts the IP address ip to a 16-byte representation. If ip is not +// an IP address (it is the wrong length), To16 returns nil. +func (ip IP) To16() net.IP { + return ip.netIP.To16() +} + +func (ip IP) String() string { + return ip.netIP.String() +} + +// UInt64 returns a uint64 holding the IP address +func (ip IP) UInt64() uint64 { + if len(ip.netIP) == 0 { + return uint64(0) + } + + if ip.To4() != nil { + return uint64(binary.BigEndian.Uint32([]byte(ip.To4()))) + } + + return binary.BigEndian.Uint64([]byte(ip.To16())) +} + +// SetUInt64 sets the IP value to v +func (ip *IP) SetUInt64(v uint64) { + if len(ip.netIP) == 0 { + // If we don't have allocated storage make an educated guess + // at if the address we received is an IPv4 or IPv6 address. + if v == (v & 0x00000000ffffFFFF) { + // Guessing IPv4 + ip.netIP = net.ParseIP("0.0.0.0") + } else { + ip.netIP = net.ParseIP("2001:4860:0:2001::68") + } + } + + bb := new(bytes.Buffer) + var first int + if ip.To4() != nil { + binary.Write(bb, binary.BigEndian, uint32(v)) + first = len(ip.netIP) - 4 + } else { + binary.Write(bb, binary.BigEndian, v) + } + copy(ip.netIP[first:], bb.Bytes()) +} + +func PrettyJsonWriter(thing interface{}, w http.ResponseWriter) { + var out bytes.Buffer + b, err := json.MarshalIndent(thing, "", " ") + checkError(err) + out.Write(b) + out.WriteTo(w) +} === added file 'src/github.com/juju/gomaasapi/testservice_vlan.go' --- src/github.com/juju/gomaasapi/testservice_vlan.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/testservice_vlan.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,33 @@ +// Copyright 2015 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "fmt" + "net/http" +) + +func getVLANsEndpoint(version string) string { + return fmt.Sprintf("/api/%s/vlans/", version) +} + +// VLAN is the MAAS API VLAN representation +type VLAN struct { + Name string `json:"name"` + Fabric string `json:"fabric"` + VID uint `json:"vid"` + + ResourceURI string `json:"resource_uri"` + ID uint `json:"id"` +} + +// PostedVLAN is the MAAS API posted VLAN representation +type PostedVLAN struct { + Name string `json:"name"` + VID uint `json:"vid"` +} + +func vlansHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { + //TODO +} === added file 'src/github.com/juju/gomaasapi/util.go' --- src/github.com/juju/gomaasapi/util.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/util.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,27 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + "strings" +) + +// JoinURLs joins a base URL and a subpath together. +// Regardless of whether baseURL ends in a trailing slash (or even multiple +// trailing slashes), or whether there are any leading slashes at the begining +// of path, the two will always be joined together by a single slash. +func JoinURLs(baseURL, path string) string { + return strings.TrimRight(baseURL, "/") + "/" + strings.TrimLeft(path, "/") +} + +// EnsureTrailingSlash appends a slash at the end of the given string unless +// there already is one. +// This is used to create the kind of normalized URLs that Django expects. +// (to avoid Django's redirection when an URL does not ends with a slash.) +func EnsureTrailingSlash(URL string) string { + if strings.HasSuffix(URL, "/") { + return URL + } + return URL + "/" +} === added file 'src/github.com/juju/gomaasapi/util_test.go' --- src/github.com/juju/gomaasapi/util_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/gomaasapi/util_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,32 @@ +// Copyright 2013 Canonical Ltd. This software is licensed under the +// GNU Lesser General Public License version 3 (see the file COPYING). + +package gomaasapi + +import ( + . "gopkg.in/check.v1" +) + +func (suite *GomaasapiTestSuite) TestJoinURLsAppendsPathToBaseURL(c *C) { + c.Check(JoinURLs("http://example.com/", "foo"), Equals, "http://example.com/foo") +} + +func (suite *GomaasapiTestSuite) TestJoinURLsAddsSlashIfNeeded(c *C) { + c.Check(JoinURLs("http://example.com/foo", "bar"), Equals, "http://example.com/foo/bar") +} + +func (suite *GomaasapiTestSuite) TestJoinURLsNormalizesDoubleSlash(c *C) { + c.Check(JoinURLs("http://example.com/base/", "/szot"), Equals, "http://example.com/base/szot") +} + +func (suite *GomaasapiTestSuite) TestEnsureTrailingSlashAppendsSlashIfMissing(c *C) { + c.Check(EnsureTrailingSlash("test"), Equals, "test/") +} + +func (suite *GomaasapiTestSuite) TestEnsureTrailingSlashDoesNotAppendIfPresent(c *C) { + c.Check(EnsureTrailingSlash("test/"), Equals, "test/") +} + +func (suite *GomaasapiTestSuite) TestEnsureTrailingSlashReturnsSlashIfEmpty(c *C) { + c.Check(EnsureTrailingSlash(""), Equals, "/") +} === added directory 'src/github.com/juju/httprequest' === added file 'src/github.com/juju/httprequest/LICENSE' --- src/github.com/juju/httprequest/LICENSE 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,185 @@ +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. === added file 'src/github.com/juju/httprequest/README.md' --- src/github.com/juju/httprequest/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,217 @@ +# httprequest +-- + import "github.com/juju/httprequest" + +Package httprequest provides functionality for unmarshaling HTTP request +parameters into a struct type. + +Please note that the API is not considered stable at this point and may be +changed in a backwardly incompatible manner at any time. + +## Usage + +```go +var ( + ErrUnmarshal = errgo.New("httprequest unmarshal error") + ErrBadUnmarshalType = errgo.New("httprequest bad unmarshal type") +) +``` + +#### func Marshal + +```go +func Marshal(baseURL, method string, x interface{}) (*http.Request, error) +``` +Marshal is the counterpart of Unmarshal. It takes information from x, which must +be a pointer to a struct, and returns an HTTP request using the given method +that holds all of the information + +The HTTP request will use the given method. Named fields in the given baseURL +will be filled out from "path"-tagged fields in x to form the URL path in the +returned request. These are specified as for httprouter. + +If a field in baseURL is a suffix of the form "*var" (a trailing wildcard +element that holds the rest of the path), the marshaled string must begin with a +"/". This matches the httprouter convention that it always returns such fields +with a "/" prefix. + +If a field is of type string or []string, the value of the field will be used +directly; otherwise if implements encoding.TextMarshaler, that will be used to +marshal the field, otherwise fmt.Sprint will be used. + +For example, this code: + + type UserDetails struct { + Age int + } + + type Test struct { + Username string `httprequest:"user,path"` + ContextId int64 `httprequest:"context,form"` + Details UserDetails `httprequest:",body"` + } + req, err := Marshal("GET", "http://example.com/users/:user/details", &Test{ + Username: "bob", + ContextId: 1234, + Details: UserDetails{ + Age: 36, + } + }) + if err != nil { + ... + } + +will produce an HTTP request req with a URL of +http://example.com/users/bob/details?context=1234 and a JSON-encoded body +holding `{"Age":36}`. + +It is an error if there is a field specified in the URL that is not found in x. + +#### func ToHTTP + +```go +func ToHTTP(h httprouter.Handle) http.Handler +``` +ToHTTP converts an httprouter.Handle into an http.Handler. It will pass no path +variables to h. + +#### func Unmarshal + +```go +func Unmarshal(p Params, x interface{}) error +``` +Unmarshal takes values from given parameters and fills out fields in x, which +must be a pointer to a struct. + +Tags on the struct's fields determine where each field is filled in from. +Similar to encoding/json and other encoding packages, the tag holds a +comma-separated list. The first item in the list is an alternative name for the +field (the field name itself will be used if this is empty). The next item +specifies where the field is filled in from. It may be: + + "path" - the field is taken from a parameter in p.PathVar + with a matching field name. + + "form" - the field is taken from the given name in p.Form + (note that this covers both URL query parameters and + POST form parameters) + + "body" - the field is filled in by parsing the request body + as JSON. + +For path and form parameters, the field will be filled out from the field in +p.PathVar or p.Form using one of the following methods (in descending order of +preference): + +- if the type is string, it will be set from the first value. + +- if the type is []string, it will be filled out using all values for that field + + (allowed only for form) + +- if the type implements encoding.TextUnmarshaler, its UnmarshalText method will +be used + +- otherwise fmt.Sscan will be used to set the value. + +When the unmarshaling fails, Unmarshal returns an error with an ErrUnmarshal +cause. If the type of x is inappropriate, it returns an error with an +ErrBadUnmarshalType cause. + +#### func WriteJSON + +```go +func WriteJSON(w http.ResponseWriter, code int, val interface{}) error +``` +WriteJSON writes the given value to the ResponseWriter and sets the HTTP status +to the given code. + +#### type ErrorHandler + +```go +type ErrorHandler func(http.ResponseWriter, Params) error +``` + +ErrorHandler is like httprouter.Handle except it returns an error which may be +returned as the error body of the response. An ErrorHandler function should not +itself write to the ResponseWriter if it returns an error. + +#### type ErrorMapper + +```go +type ErrorMapper func(err error) (httpStatus int, errorBody interface{}) +``` + +ErrorMapper holds a function that can convert a Go error into a form that can be +returned as a JSON body from an HTTP request. The httpStatus value reports the +desired HTTP status. + +#### func (ErrorMapper) Handle + +```go +func (e ErrorMapper) Handle(f interface{}) httprouter.Handle +``` +Handle converts a function into an httprouter.Handle. The argument f must be a +function of one of the following three forms, where ArgT must be a struct type +acceptable to Unmarshal and ResultT is a type that can be marshaled as JSON: + + func(w http.ResponseWriter, p Params, arg *ArgT) + func(w http.ResponseWriter, p Params, arg *ArgT) error + func(header http.Header, p Params, arg *ArgT) (ResultT, error) + +When processing a call to the returned handler, the provided parameters are +unmarshaled into a new ArgT value using Unmarshal, then f is called with this +value. If the unmarshaling fails, f will not be called and the unmarshal error +will be written as a JSON response. + +If an error is returned from f, it is passed through the error mapper before +writing as a JSON response. + +In the third form, when no error is returned, the result is written as a JSON +response with status http.StatusOK. + +Handle will panic if the provided function is not in one of the above forms. + +#### func (ErrorMapper) HandleErrors + +```go +func (e ErrorMapper) HandleErrors(handle ErrorHandler) httprouter.Handle +``` +HandleErrors returns a handler that passes any non-nil error returned by handle +through the error mapper and writes it as a JSON response. + +#### func (ErrorMapper) HandleJSON + +```go +func (e ErrorMapper) HandleJSON(handle JSONHandler) httprouter.Handle +``` +HandleJSON returns a handler that writes the return value of handle as a JSON +response. If handle returns an error, it is passed through the error mapper. + +#### func (ErrorMapper) WriteError + +```go +func (e ErrorMapper) WriteError(w http.ResponseWriter, err error) +``` +WriteError writes an error to a ResponseWriter and sets the HTTP status code. + +#### type JSONHandler + +```go +type JSONHandler func(http.Header, Params) (interface{}, error) +``` + +JSONHandler is like httprouter.Handle except that it returns a body (to be +converted to JSON) and an error. The Header parameter can be used to set custom +headers on the response. + +#### type Params + +```go +type Params struct { + *http.Request + PathVar httprouter.Params +} +``` + +Params holds request parameters that can be unmarshaled into a struct. === added file 'src/github.com/juju/httprequest/bench_test.go' --- src/github.com/juju/httprequest/bench_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/bench_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,456 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package httprequest_test + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "strconv" + "testing" + "time" + + "github.com/julienschmidt/httprouter" + "gopkg.in/errgo.v1" + + "github.com/juju/httprequest" +) + +const dateFormat = "2006-01-02" + +type testResult struct { + Key string `json:",omitempty"` + Date string `json:",omitempty"` + Count int64 +} + +type testParams2Fields struct { + Id string `httprequest:"id,path"` + Limit int `httprequest:"limit,form"` +} + +type testParams4Fields struct { + Id string `httprequest:"id,path"` + Limit int `httprequest:"limit,form"` + From dateTime `httprequest:"from,form"` + To dateTime `httprequest:"to,form"` +} + +type dateTime struct { + time.Time +} + +func (dt *dateTime) UnmarshalText(b []byte) (err error) { + dt.Time, err = time.Parse(dateFormat, string(b)) + return +} + +type testParams2StringFields struct { + Field0 string `httprequest:",form"` + Field1 string `httprequest:",form"` +} + +type testParams4StringFields struct { + Field0 string `httprequest:",form"` + Field1 string `httprequest:",form"` + Field2 string `httprequest:",form"` + Field3 string `httprequest:",form"` +} + +type testParams8StringFields struct { + Field0 string `httprequest:",form"` + Field1 string `httprequest:",form"` + Field2 string `httprequest:",form"` + Field3 string `httprequest:",form"` + Field4 string `httprequest:",form"` + Field5 string `httprequest:",form"` + Field6 string `httprequest:",form"` + Field7 string `httprequest:",form"` +} + +type testParams16StringFields struct { + Field0 string `httprequest:",form"` + Field1 string `httprequest:",form"` + Field2 string `httprequest:",form"` + Field3 string `httprequest:",form"` + Field4 string `httprequest:",form"` + Field5 string `httprequest:",form"` + Field6 string `httprequest:",form"` + Field7 string `httprequest:",form"` + Field8 string `httprequest:",form"` + Field9 string `httprequest:",form"` + Field10 string `httprequest:",form"` + Field11 string `httprequest:",form"` + Field12 string `httprequest:",form"` + Field13 string `httprequest:",form"` + Field14 string `httprequest:",form"` + Field15 string `httprequest:",form"` +} + +func BenchmarkUnmarshal2Fields(b *testing.B) { + params := httprequest.Params{ + Request: &http.Request{ + Form: url.Values{ + "limit": {"2000"}, + }, + }, + PathVar: httprouter.Params{{ + Key: "id", + Value: "someid", + }}, + } + var arg testParams2Fields + + b.ResetTimer() + for i := 0; i < b.N; i++ { + arg = testParams2Fields{} + err := httprequest.Unmarshal(params, &arg) + if err != nil { + b.Fatalf("unmarshal failed: %v", err) + } + } + b.StopTimer() + if !reflect.DeepEqual(arg, testParams2Fields{ + Id: "someid", + Limit: 2000, + }) { + b.Errorf("unexpected result: got %#v", arg) + } +} + +func BenchmarkHandle2FieldsTrad(b *testing.B) { + results := []testResult{} + benchmarkHandle2Fields(b, errorMapper.HandleJSON(func(p httprequest.Params) (interface{}, error) { + limit := -1 + if limitStr := p.Request.Form.Get("limit"); limitStr != "" { + var err error + limit, err = strconv.Atoi(limitStr) + if err != nil || limit <= 0 { + panic("unreachable") + } + } + if id := p.PathVar.ByName("id"); id == "" { + panic("unreachable") + } + return results, nil + })) +} + +func BenchmarkHandle2Fields(b *testing.B) { + results := []testResult{} + benchmarkHandle2Fields(b, errorMapper.Handle(func(p httprequest.Params, arg *testParams2Fields) ([]testResult, error) { + if arg.Limit <= 0 { + panic("unreachable") + } + return results, nil + }).Handle) +} + +func BenchmarkHandle2FieldsUnmarshalOnly(b *testing.B) { + results := []testResult{} + benchmarkHandle2Fields(b, errorMapper.HandleJSON(func(p httprequest.Params) (interface{}, error) { + var arg testParams2Fields + if err := httprequest.Unmarshal(p, &arg); err != nil { + return nil, err + } + if arg.Limit <= 0 { + panic("unreachable") + } + return results, nil + })) +} + +func benchmarkHandle2Fields(b *testing.B, handle func(w http.ResponseWriter, req *http.Request, pvar httprouter.Params)) { + rec := httptest.NewRecorder() + params := httprequest.Params{ + Request: &http.Request{ + Form: url.Values{ + "limit": {"2000"}, + }, + }, + PathVar: httprouter.Params{{ + Key: "id", + Value: "someid", + }}, + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + rec.Body.Reset() + handle(rec, params.Request, params.PathVar) + } +} + +func BenchmarkUnmarshal4Fields(b *testing.B) { + fromDate, err1 := time.Parse(dateFormat, "2010-10-10") + toDate, err2 := time.Parse(dateFormat, "2011-11-11") + if err1 != nil || err2 != nil { + b.Fatalf("bad times") + } + type P testParams4Fields + params := httprequest.Params{ + Request: &http.Request{ + Form: url.Values{ + "limit": {"2000"}, + "from": {fromDate.Format(dateFormat)}, + "to": {toDate.Format(dateFormat)}, + }, + }, + PathVar: httprouter.Params{{ + Key: "id", + Value: "someid", + }}, + } + var args P + + b.ResetTimer() + for i := 0; i < b.N; i++ { + args = P{} + err := httprequest.Unmarshal(params, &args) + if err != nil { + b.Fatalf("unmarshal failed: %v", err) + } + } + b.StopTimer() + if !reflect.DeepEqual(args, P{ + Id: "someid", + Limit: 2000, + From: dateTime{fromDate}, + To: dateTime{toDate}, + }) { + b.Errorf("unexpected result: got %#v", args) + } +} + +func BenchmarkHandle4FieldsTrad(b *testing.B) { + results := []testResult{} + benchmarkHandle4Fields(b, errorMapper.HandleJSON(func(p httprequest.Params) (interface{}, error) { + start, stop, err := parseDateRange(p.Request.Form) + if err != nil { + panic("unreachable") + } + _ = start + _ = stop + limit := -1 + if limitStr := p.Request.Form.Get("limit"); limitStr != "" { + limit, err = strconv.Atoi(limitStr) + if err != nil || limit <= 0 { + panic("unreachable") + } + } + if id := p.PathVar.ByName("id"); id == "" { + panic("unreachable") + } + return results, nil + })) +} + +// parseDateRange parses a date range as specified in an http +// request. The returned times will be zero if not specified. +func parseDateRange(form url.Values) (start, stop time.Time, err error) { + if v := form.Get("start"); v != "" { + var err error + start, err = time.Parse(dateFormat, v) + if err != nil { + return time.Time{}, time.Time{}, errgo.Newf("invalid 'start' value %q", v) + } + } + if v := form.Get("stop"); v != "" { + var err error + stop, err = time.Parse(dateFormat, v) + if err != nil { + return time.Time{}, time.Time{}, errgo.Newf("invalid 'stop' value %q", v) + } + // Cover all timestamps within the stop day. + stop = stop.Add(24*time.Hour - 1*time.Second) + } + return +} + +func BenchmarkHandle4Fields(b *testing.B) { + results := []testResult{} + benchmarkHandle4Fields(b, errorMapper.Handle(func(p httprequest.Params, arg *testParams4Fields) ([]testResult, error) { + if arg.To.Before(arg.From.Time) { + panic("unreachable") + } + if arg.Limit <= 0 { + panic("unreachable") + } + return results, nil + }).Handle) +} + +func BenchmarkHandle4FieldsUnmarshalOnly(b *testing.B) { + results := []testResult{} + benchmarkHandle4Fields(b, errorMapper.HandleJSON(func(p httprequest.Params) (interface{}, error) { + var arg testParams4Fields + if err := httprequest.Unmarshal(p, &arg); err != nil { + return nil, err + } + if arg.To.Before(arg.From.Time) { + panic("unreachable") + } + if arg.Limit <= 0 { + panic("unreachable") + } + return results, nil + })) +} + +func benchmarkHandle4Fields(b *testing.B, handle func(w http.ResponseWriter, req *http.Request, pvar httprouter.Params)) { + // example taken from charmstore changes/published endpoint + fromDate, err1 := time.Parse(dateFormat, "2010-10-10") + toDate, err2 := time.Parse(dateFormat, "2011-11-11") + if err1 != nil || err2 != nil { + b.Fatalf("bad times") + } + rec := httptest.NewRecorder() + params := httprequest.Params{ + Request: &http.Request{ + Form: url.Values{ + "limit": {"2000"}, + "from": {fromDate.Format(dateFormat)}, + "to": {toDate.Format(dateFormat)}, + }, + }, + PathVar: httprouter.Params{{ + Key: "id", + Value: "someid", + }}, + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + rec.Body.Reset() + handle(rec, params.Request, params.PathVar) + } +} + +func BenchmarkHandle2StringFields(b *testing.B) { + benchmarkHandleNFields(b, 2, errorMapper.Handle(func(p httprequest.Params, arg *testParams2StringFields) error { + return nil + }).Handle) +} + +func BenchmarkHandle2StringFieldsUnmarshalOnly(b *testing.B) { + benchmarkHandleNFields(b, 2, errorMapper.HandleErrors(func(p httprequest.Params) error { + var arg testParams2StringFields + return httprequest.Unmarshal(p, &arg) + })) +} + +func BenchmarkHandle2StringFieldsTrad(b *testing.B) { + benchmarkHandleNFields(b, 2, errorMapper.HandleErrors(func(p httprequest.Params) error { + var arg testParams2StringFields + arg.Field0 = p.Request.Form.Get("Field0") + arg.Field1 = p.Request.Form.Get("Field1") + return nil + })) +} + +func BenchmarkHandle4StringFields(b *testing.B) { + benchmarkHandleNFields(b, 4, errorMapper.Handle(func(p httprequest.Params, arg *testParams4StringFields) error { + return nil + }).Handle) +} + +func BenchmarkHandle4StringFieldsUnmarshalOnly(b *testing.B) { + benchmarkHandleNFields(b, 4, errorMapper.HandleErrors(func(p httprequest.Params) error { + var arg testParams4StringFields + return httprequest.Unmarshal(p, &arg) + })) +} + +func BenchmarkHandle4StringFieldsTrad(b *testing.B) { + benchmarkHandleNFields(b, 4, errorMapper.HandleErrors(func(p httprequest.Params) error { + var arg testParams4StringFields + arg.Field0 = p.Request.Form.Get("Field0") + arg.Field1 = p.Request.Form.Get("Field1") + arg.Field2 = p.Request.Form.Get("Field2") + arg.Field3 = p.Request.Form.Get("Field3") + return nil + })) +} + +func BenchmarkHandle8StringFields(b *testing.B) { + benchmarkHandleNFields(b, 8, errorMapper.Handle(func(p httprequest.Params, arg *testParams8StringFields) error { + return nil + }).Handle) +} + +func BenchmarkHandle8StringFieldsUnmarshalOnly(b *testing.B) { + benchmarkHandleNFields(b, 8, errorMapper.HandleErrors(func(p httprequest.Params) error { + var arg testParams8StringFields + return httprequest.Unmarshal(p, &arg) + })) +} + +func BenchmarkHandle8StringFieldsTrad(b *testing.B) { + benchmarkHandleNFields(b, 8, errorMapper.HandleErrors(func(p httprequest.Params) error { + var arg testParams8StringFields + arg.Field0 = p.Request.Form.Get("Field0") + arg.Field1 = p.Request.Form.Get("Field1") + arg.Field2 = p.Request.Form.Get("Field2") + arg.Field3 = p.Request.Form.Get("Field3") + arg.Field4 = p.Request.Form.Get("Field4") + arg.Field5 = p.Request.Form.Get("Field5") + arg.Field6 = p.Request.Form.Get("Field6") + arg.Field7 = p.Request.Form.Get("Field7") + return nil + })) +} + +func BenchmarkHandle16StringFields(b *testing.B) { + benchmarkHandleNFields(b, 16, errorMapper.Handle(func(p httprequest.Params, arg *testParams16StringFields) error { + return nil + }).Handle) +} + +func BenchmarkHandle16StringFieldsUnmarshalOnly(b *testing.B) { + benchmarkHandleNFields(b, 16, errorMapper.HandleErrors(func(p httprequest.Params) error { + var arg testParams16StringFields + return httprequest.Unmarshal(p, &arg) + })) +} + +func BenchmarkHandle16StringFieldsTrad(b *testing.B) { + benchmarkHandleNFields(b, 16, errorMapper.HandleErrors(func(p httprequest.Params) error { + var arg testParams16StringFields + arg.Field0 = p.Request.Form.Get("Field0") + arg.Field1 = p.Request.Form.Get("Field1") + arg.Field2 = p.Request.Form.Get("Field2") + arg.Field3 = p.Request.Form.Get("Field3") + arg.Field4 = p.Request.Form.Get("Field4") + arg.Field5 = p.Request.Form.Get("Field5") + arg.Field6 = p.Request.Form.Get("Field6") + arg.Field7 = p.Request.Form.Get("Field7") + arg.Field8 = p.Request.Form.Get("Field8") + arg.Field9 = p.Request.Form.Get("Field9") + arg.Field10 = p.Request.Form.Get("Field10") + arg.Field11 = p.Request.Form.Get("Field11") + arg.Field12 = p.Request.Form.Get("Field12") + arg.Field13 = p.Request.Form.Get("Field13") + arg.Field14 = p.Request.Form.Get("Field14") + arg.Field15 = p.Request.Form.Get("Field15") + return nil + })) +} + +func benchmarkHandleNFields(b *testing.B, n int, handle func(w http.ResponseWriter, req *http.Request, pvar httprouter.Params)) { + form := make(url.Values) + for i := 0; i < n; i++ { + form[fmt.Sprint("Field", i)] = []string{fmt.Sprintf("field %d", i)} + } + rec := httptest.NewRecorder() + params := httprequest.Params{ + Request: &http.Request{ + Form: form, + }, + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + rec.Body.Reset() + handle(rec, params.Request, params.PathVar) + } +} === added file 'src/github.com/juju/httprequest/checkisjson.go' --- src/github.com/juju/httprequest/checkisjson.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/checkisjson.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,155 @@ +package httprequest + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "unicode" + + "golang.org/x/net/html" + "golang.org/x/net/html/atom" + "gopkg.in/errgo.v1" +) + +// maxErrorBodySize holds the maximum amount of body that +// we try to read for an error before extracting text from it. +// It's reasonably large because: +// a) HTML often has large embedded scripts which we want +// to skip and +// b) it should be an relatively unusual case so the size +// shouldn't harm. +// +// It's defined as a variable so that it can be redefined in tests. +var maxErrorBodySize = 200 * 1024 + +// checkIsJSON checks that the content type of the given header implies +// that the content is JSON. If it is not, then reads from the body to +// try to make a useful error message. +func checkIsJSON(header http.Header, body io.Reader) error { + contentType := header.Get("Content-Type") + mediaType, _, err := mime.ParseMediaType(contentType) + if mediaType == "application/json" { + return nil + } + if err != nil { + // Even if there's no media type, we want to see something useful. + mediaType = fmt.Sprintf("%q", contentType) + } + // TODO use charset.NewReader to convert from non-utf8 content? + // Read the body ignoring any errors - we'll just make do with what we've got. + bodyData, _ := ioutil.ReadAll(io.LimitReader(noErrorReader{body}, int64(maxErrorBodySize))) + switch mediaType { + case "text/html": + text, err := htmlToText(bytes.NewReader(bodyData)) + if err != nil { + // Note: it seems that this can never actually + // happen - the only way that the HTML parser + // can fail is if there's a read error and we've + // removed that possibility by using + // noErrorReader above. + return errgo.Notef(err, "unexpected (and invalid) content text/html; want application/json; content: %q", sizeLimit(bodyData)) + } + if len(text) == 0 { + return errgo.Newf(`unexpected content type text/html; want application/json; content: %q`, sizeLimit(bodyData)) + } + return errgo.Newf(`unexpected content type text/html; want application/json; content: %s`, sizeLimit(text)) + case "text/plain": + return errgo.Newf(`unexpected content type text/plain; want application/json; content: %s`, sizeLimit(sanitizeText(string(bodyData), true))) + default: + return errgo.Newf(`unexpected content type %s; want application/json; content: %q`, mediaType, sizeLimit(bodyData)) + } +} + +// noErrorReader wraps a reader, turning any errors into io.EOF +// so that we can extract some content even if we get an io error. +type noErrorReader struct { + r io.Reader +} + +func (r noErrorReader) Read(buf []byte) (int, error) { + n, err := r.r.Read(buf) + if err != nil { + err = io.EOF + } + return n, err +} + +func sizeLimit(data []byte) []byte { + const max = 1024 + if len(data) < max { + return data + } + return append(data[0:max], fmt.Sprintf(" ... [%d bytes omitted]", len(data)-max)...) +} + +// htmlToText attempts to return some relevant textual content +// from the HTML content in the given reader, formatted +// as a single line. +func htmlToText(r io.Reader) ([]byte, error) { + n, err := html.Parse(r) + if err != nil { + return nil, err + } + var buf bytes.Buffer + htmlNodeToText(&buf, n) + return buf.Bytes(), nil +} + +func htmlNodeToText(w *bytes.Buffer, n *html.Node) { + for ; n != nil; n = n.NextSibling { + switch n.Type { + case html.TextNode: + data := sanitizeText(n.Data, false) + if len(data) == 0 { + break + } + if w.Len() > 0 { + w.WriteString("; ") + } + w.Write(data) + case html.ElementNode: + if n.DataAtom != atom.Script { + htmlNodeToText(w, n.FirstChild) + } + case html.DocumentNode: + htmlNodeToText(w, n.FirstChild) + } + } +} + +// sanitizeText tries to make the given string easier to read when presented +// as a single line. It squashes each run of white space into a single +// space, trims leading and trailing white space and trailing full +// stops. If newlineSemi is true, any newlines will be replaced with a +// semicolon. +func sanitizeText(s string, newlineSemi bool) []byte { + out := make([]byte, 0, len(s)) + prevWhite := false + for _, r := range s { + if newlineSemi && r == '\n' && len(out) > 0 { + out = append(out, ';') + prevWhite = true + continue + } + if unicode.IsSpace(r) { + if len(out) > 0 { + prevWhite = true + } + continue + } + if prevWhite { + out = append(out, ' ') + prevWhite = false + } + out = append(out, string(r)...) + } + // Remove final space, any full stops and any final semicolon + // we might have added. + out = bytes.TrimRightFunc(out, func(r rune) bool { + return r == '.' || r == ' ' || r == ';' + }) + return out +} === added file 'src/github.com/juju/httprequest/checkisjson_test.go' --- src/github.com/juju/httprequest/checkisjson_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/checkisjson_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,455 @@ +package httprequest_test + +import ( + "net/http" + "strings" + + gc "gopkg.in/check.v1" + + "github.com/juju/httprequest" +) + +type checkIsJSONSuite struct{} + +var _ = gc.Suite(&checkIsJSONSuite{}) + +var checkIsJSONTests = []struct { + about string + contentType string + body string + expectError string +}{{ + about: "simple json", + contentType: "application/json", + body: "not json but unread", +}, { + about: "simple json with charset", + contentType: "application/json; charset=UTF-8", + body: "not json but unread", +}, { + about: "plain text", + contentType: "text/plain; charset=UTF-8", + body: " some\n text\t\n", + expectError: `unexpected content type text/plain; want application/json; content: some; text`, +}, { + about: "plain text with leading newline", + contentType: "text/plain; charset=UTF-8", + body: "\nsome text", + expectError: `unexpected content type text/plain; want application/json; content: some text`, +}, { + about: "unknown content type", + contentType: "something", + body: "some \nstuff", + expectError: `unexpected content type something; want application/json; content: "some \\nstuff"`, +}, { + about: "bad content type", + contentType: "/; charset=foo", + body: `some stuff`, + expectError: `unexpected content type "/; charset=foo"; want application/json; content: "some stuff"`, +}, { + about: "large text body", + contentType: "text/plain", + body: strings.Repeat("x", 1024+300), + expectError: `unexpected content type text/plain; want application/json; content: ` + strings.Repeat("x", 1024) + ` \.\.\. \[300 bytes omitted]`, +}, { + about: "html with no text", + contentType: "text/html", + body: "\n", + expectError: `unexpected content type text/html; want application/json; content: "\\n"`, +}, { + about: "non-utf8 text", + contentType: "text/plain; charset=iso8859-1", + body: "Pepp\xe9\n", + // It would be nice to make this better, but we don't + // really want to drag in all the charsets for this. + expectError: "unexpected content type text/plain; want application/json; content: Pepp\uFFFD", +}, { + about: "actual html error message from proxy", + contentType: "text/html; charset=UTF-8", + body: ` + +502 Proxy Error + +

Proxy Error

+

The proxy server received an invalid +response from an upstream server.
+The proxy server could not handle the request GET /identity/v1/wait.

+Reason: Error reading from remote server

+
+
Apache/2.4.7 (Ubuntu) Server at api.jujucharms.com Port 443
+`, + expectError: `unexpected content type text/html; want application/json; content: 502 Proxy Error; Proxy Error; The proxy server received an invalid response from an upstream server; The proxy server could not handle the request; GET /identity/v1/wait; Reason:; Error reading from remote server; Apache/2\.4\.7 \(Ubuntu\) Server at api.jujucharms.com Port 443`, +}, { + about: "actual html error message web page", + contentType: "text/html; charset=UTF-8", + body: ` + + + + + + +Page not found | Juju + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + +
+ + + +
+ + +
+ +
+
+ + + + + + +
+
+

404: Sorry, we couldn’t find the page.

+ +

Try a different URL, try searching for solutions or learn how to create your own solution.

+ + +
+
+ + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + +`, + expectError: `unexpected content type text/html; want application/json; content: Page not found | Juju; Jump to content; Store; Demo; About; Features; Community; Docs; Get started; ☰; Create; \+; 404: Sorry, we couldn’t find the page; Try a different URL, try searching for solutions or learn how to; create your own solution; Browse the store; All bundles; All charms; Submit a bug; Browse the store ›; Back to the top; Demo; About; Features; Docs; Get Started; Juju on Google+; Ubuntu Cloud on Twitter; Ubuntu Cloud on Facebook; © 2015 Canonical Ltd. Ubuntu and Canonical are registered trademarks of Canonical Ltd; Legal information; Report a bug on this site; Got to the top of the page`, +}} + +func (checkIsJSONSuite) TestCheckIsJSON(c *gc.C) { + *httprequest.MaxErrorBodySize = 16 * 1024 + for i, test := range checkIsJSONTests { + c.Logf("test %d: %s", i, test.about) + r := strings.NewReader(test.body) + err := httprequest.CheckIsJSON(http.Header{ + "Content-Type": {test.contentType}, + }, r) + if test.expectError == "" { + c.Assert(err, gc.IsNil) + c.Assert(r.Len(), gc.Equals, len(test.body)) + continue + } + c.Assert(err, gc.ErrorMatches, test.expectError) + if len(test.body) > *httprequest.MaxErrorBodySize { + c.Assert(r.Len(), gc.Equals, *httprequest.MaxErrorBodySize-len(test.body)) + } + } +} === added file 'src/github.com/juju/httprequest/client.go' --- src/github.com/juju/httprequest/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,333 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package httprequest + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + + "gopkg.in/errgo.v1" +) + +// Doer is implemented by HTTP client packages +// to make an HTTP request. It is notably implemented +// by http.Client and httpbakery.Client. +// +// When httprequest uses a Doer value for requests +// with a non-empty body, it will use DoWithBody if +// the value implements it (see DoerWithBody). +// This enables httpbakery.Client to be used correctly. +type Doer interface { + Do(req *http.Request) (*http.Response, error) +} + +// DoerWithBody is implemented by HTTP clients that need +// to be able to retry HTTP requests with a body. +// It is notably implemented by httpbakery.Client. +type DoerWithBody interface { + DoWithBody(req *http.Request, body io.ReadSeeker) (*http.Response, error) +} + +// Client represents a client that can invoke httprequest endpoints. +type Client struct { + // BaseURL holds the base URL to use when making + // HTTP requests. + BaseURL string + + // Doer holds a value that will be used to actually + // make the HTTP request. If it is nil, http.DefaultClient + // will be used instead. If the request has a non-empty body + // and Doer implements DoerWithBody, DoWithBody + // will be used instead. + Doer Doer + + // If a request returns an HTTP response that signifies an + // error, UnmarshalError is used to unmarshal the response into + // an appropriate error. See ErrorUnmarshaler for a convenient + // way to create an UnmarshalError function for a given type. If + // this is nil, DefaultErrorUnmarshaler will be used. + UnmarshalError func(resp *http.Response) error +} + +// DefaultErrorUnmarshaler is the default error unmarshaler +// used by Client. +var DefaultErrorUnmarshaler = ErrorUnmarshaler(new(RemoteError)) + +// Call invokes the endpoint implied by the given params, +// which should be of the form accepted by the ArgT +// argument to a function passed to Handle, and +// unmarshals the response into the given response parameter, +// which should be a pointer to the response value. +// +// If params implements the HeaderSetter interface, its SetHeader method +// will be called to add additional headers to the HTTP request. +// +// If resp is nil, the response will be ignored if the +// request was successful. +// +// If resp is of type **http.Response, instead of unmarshaling +// into it, its element will be set to the returned HTTP +// response directly and the caller is responsible for +// closing its Body field. +// +// Any error that c.UnmarshalError or c.Doer returns will not +// have its cause masked. +func (c *Client) Call(params, resp interface{}) error { + return c.CallURL(c.BaseURL, params, resp) +} + +// CallURL is like Call except that the given URL is used instead of +// c.BaseURL. +func (c *Client) CallURL(url string, params, resp interface{}) error { + rt, err := getRequestType(reflect.TypeOf(params)) + if err != nil { + return errgo.Mask(err) + } + if rt.method == "" { + return errgo.Newf("type %T has no httprequest.Route field", params) + } + reqURL, err := appendURL(url, rt.path) + if err != nil { + return errgo.Mask(err) + } + req, err := Marshal(reqURL.String(), rt.method, params) + if err != nil { + return errgo.Mask(err) + } + + // Actually make the request. + doer := c.Doer + if doer == nil { + doer = http.DefaultClient + } + var httpResp *http.Response + body := req.Body.(BytesReaderCloser) + // Always use DoWithBody when available. + if doer1, ok := doer.(DoerWithBody); ok { + req.Body = nil + httpResp, err = doer1.DoWithBody(req, body) + } else { + httpResp, err = doer.Do(req) + } + if err != nil { + return errgo.Mask(err, errgo.Any) + } + return c.unmarshalResponse(httpResp, resp) +} + +// Do sends the given request and unmarshals its JSON +// result into resp, which should be a pointer to the response value. +// If an error status is returned, the error will be unmarshaled +// as in Client.Call. The req.Body field must be nil - any request +// body should be provided in the body parameter. +// +// If resp is nil, the response will be ignored if the response was +// successful. +// +// If resp is of type **http.Response, instead of unmarshaling +// into it, its element will be set to the returned HTTP +// response directly and the caller is responsible for +// closing its Body field. +// +// Any error that c.UnmarshalError or c.Doer returns will not +// have its cause masked. +// +// If req.URL does not have a host part it will be treated as relative to +// c.BaseURL. req.URL will be updated to the actual URL used. +func (c *Client) Do(req *http.Request, body io.ReadSeeker, resp interface{}) error { + if req.URL.Host == "" { + var err error + req.URL, err = appendURL(c.BaseURL, req.URL.String()) + if err != nil { + return errgo.Mask(err) + } + } + if req.Body != nil { + return errgo.Newf("%s %s: request body supplied unexpectedly", req.Method, req.URL) + } + inferContentLength(req, body) + doer := c.Doer + if doer == nil { + doer = http.DefaultClient + } + var httpResp *http.Response + var err error + // Use DoWithBody when it's available and body is not nil. + doer1, ok := doer.(DoerWithBody) + if ok && body != nil { + httpResp, err = doer1.DoWithBody(req, body) + } else { + if body != nil { + req.Body = ioutil.NopCloser(body) + } + httpResp, err = doer.Do(req) + } + if err != nil { + return errgo.NoteMask(err, fmt.Sprintf("%s %s", req.Method, req.URL), errgo.Any) + } + return c.unmarshalResponse(httpResp, resp) +} + +// Get is a convenience method that uses c.Do to issue a GET request to +// the given URL. If the given URL does not have a host part then it will +// be treated as relative to c.BaseURL. +func (c *Client) Get(url string, resp interface{}) error { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return errgo.Notef(err, "cannot make request") + } + return c.Do(req, nil, resp) +} + +func inferContentLength(req *http.Request, body io.ReadSeeker) { + if body == nil { + return + } + switch v := body.(type) { + case *bytes.Reader: + req.ContentLength = int64(v.Len()) + case *strings.Reader: + req.ContentLength = int64(v.Len()) + } +} + +// unmarshalResponse unmarshals +func (c *Client) unmarshalResponse(httpResp *http.Response, resp interface{}) error { + if 200 <= httpResp.StatusCode && httpResp.StatusCode < 300 { + if respPt, ok := resp.(**http.Response); ok { + *respPt = httpResp + return nil + } + defer httpResp.Body.Close() + return UnmarshalJSONResponse(httpResp, resp) + } + defer httpResp.Body.Close() + errUnmarshaler := c.UnmarshalError + if errUnmarshaler == nil { + errUnmarshaler = DefaultErrorUnmarshaler + } + err := errUnmarshaler(httpResp) + if err == nil { + err = errgo.Newf("unexpected HTTP response status: %s", httpResp.Status) + } + return errgo.NoteMask(err, httpResp.Request.Method+" "+httpResp.Request.URL.String(), errgo.Any) +} + +// ErrorUnmarshaler returns a function which will unmarshal error +// responses into new values of the same type as template. The argument +// must be a pointer. A new instance of it is created every time the +// returned function is called. +func ErrorUnmarshaler(template error) func(*http.Response) error { + t := reflect.TypeOf(template) + if t.Kind() != reflect.Ptr { + panic(errgo.Newf("cannot unmarshal errors into value of type %T", template)) + } + t = t.Elem() + return func(resp *http.Response) error { + if 300 <= resp.StatusCode && resp.StatusCode < 400 { + // It's a redirection error. + loc, _ := resp.Location() + return fmt.Errorf("unexpected redirect (status %s) from %q to %q", resp.Status, resp.Request.URL, loc) + } + if err := checkIsJSON(resp.Header, resp.Body); err != nil { + // TODO consider including some or all of the body + // in the error. + return fmt.Errorf("cannot unmarshal error response (status %s): %v", resp.Status, err) + } + errv := reflect.New(t) + if err := UnmarshalJSONResponse(resp, errv.Interface()); err != nil { + return fmt.Errorf("cannot unmarshal error response (status %s): %v", resp.Status, err) + } + return errv.Interface().(error) + } +} + +// UnmarshalJSONResponse unmarshals the given HTTP response +// into x, which should be a pointer to the result to be +// unmarshaled into. +func UnmarshalJSONResponse(resp *http.Response, x interface{}) error { + // Try to read all the body so that we can reuse the + // connection, but don't try *too* hard. + defer io.Copy(ioutil.Discard, io.LimitReader(resp.Body, 8*1024)) + if x == nil { + return nil + } + if err := checkIsJSON(resp.Header, resp.Body); err != nil { + return errgo.Notef(err, "%s %s", resp.Request.Method, resp.Request.URL) + } + // Decode only a single JSON value, and then + // discard the rest of the body so that we can + // reuse the connection even if some foolish server + // has put garbage on the end. + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(x); err != nil { + return errgo.Notef(err, "%s %s", resp.Request.Method, resp.Request.URL) + } + return nil +} + +// RemoteError holds the default type of a remote error +// used by Client when no custom error unmarshaler +// is set. +type RemoteError struct { + // Message holds the error message. + Message string + + // Code may hold a code that classifies the error. + Code string `json:",omitempty"` + + // Info holds any other information associated with the error. + Info *json.RawMessage `json:",omitempty"` +} + +// Error implements the error interface. +func (e *RemoteError) Error() string { + if e.Message == "" { + return "httprequest: no error message found" + } + return "httprequest: " + e.Message +} + +// appendURL returns the result of combining the +// given base URL and relative URL. +// +// The path of the relative URL will be appended +// to the base URL, separated by a slash (/) if +// needed. +// +// Any query parameters will be concatenated together. +// +// appendURL will return an error if relURLStr contains +// a host name. +func appendURL(baseURLStr, relURLStr string) (*url.URL, error) { + b, err := url.Parse(baseURLStr) + if err != nil { + return nil, errgo.Notef(err, "cannot parse %q", baseURLStr) + } + r, err := url.Parse(relURLStr) + if err != nil { + return nil, errgo.Notef(err, "cannot parse %q", relURLStr) + } + if r.Host != "" { + return nil, errgo.Newf("relative URL specifies a host") + } + if r.Path != "" { + b.Path = strings.TrimSuffix(b.Path, "/") + "/" + strings.TrimPrefix(r.Path, "/") + } + if r.RawQuery != "" { + if b.RawQuery != "" { + b.RawQuery += "&" + r.RawQuery + } else { + b.RawQuery = r.RawQuery + } + } + return b, nil +} === added file 'src/github.com/juju/httprequest/client_test.go' --- src/github.com/juju/httprequest/client_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/client_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,606 @@ +package httprequest_test + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "reflect" + "strings" + + jc "github.com/juju/testing/checkers" + "github.com/julienschmidt/httprouter" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + + "github.com/juju/httprequest" +) + +type clientSuite struct{} + +var _ = gc.Suite(&clientSuite{}) + +var callTests = []struct { + about string + client httprequest.Client + req interface{} + expectError string + expectCause interface{} + expectResp interface{} +}{{ + about: "GET success", + req: &chM1Req{ + P: "hello", + }, + expectResp: &chM1Resp{"hello"}, +}, { + about: "GET with nil response", + req: &chM1Req{ + P: "hello", + }, +}, { + about: "POST success", + req: &chM2Req{ + P: "hello", + Body: struct{ I int }{999}, + }, + expectResp: &chM2Resp{"hello", 999}, +}, { + about: "GET marshal error", + req: 123, + expectError: `type is not pointer to struct`, +}, { + about: "error response", + req: &chInvalidM2Req{ + P: "hello", + Body: struct{ I bool }{true}, + }, + expectError: `POST http://.*/m2/hello: httprequest: cannot unmarshal parameters: cannot unmarshal into field: cannot unmarshal request body: json: cannot unmarshal bool into Go value of type int`, + expectCause: &httprequest.RemoteError{ + Message: `cannot unmarshal parameters: cannot unmarshal into field: cannot unmarshal request body: json: cannot unmarshal bool into Go value of type int`, + Code: "bad request", + }, +}, { + about: "error unmarshaler returns nil", + client: httprequest.Client{ + UnmarshalError: func(*http.Response) error { + return nil + }, + }, + req: &chM3Req{}, + expectError: `GET http://.*/m3: unexpected HTTP response status: 500 Internal Server Error`, +}, { + about: "unexpected redirect", + req: &chM2RedirectM2Req{}, + expectError: `POST http://.*/m2/foo//: unexpected redirect \(status 307 Temporary Redirect\) from "http://.*/m2/foo//" to "http://.*/m2/foo"`, +}, { + about: "doer with body", + client: httprequest.Client{ + Doer: doerFunc(func(req *http.Request, body io.ReadSeeker) (*http.Response, error) { + if body == nil { + panic("Do called when DoWithBody expected") + } + req.Body = ioutil.NopCloser(body) + return http.DefaultClient.Do(req) + }), + }, + req: &chM2Req{ + P: "hello", + Body: struct{ I int }{999}, + }, + expectResp: &chM2Resp{"hello", 999}, +}, { + about: "doer that implements DoWithBody but no body", + client: httprequest.Client{ + Doer: doerFunc(func(req *http.Request, body io.ReadSeeker) (*http.Response, error) { + if body == nil { + panic("Do called but DoWithBody should always be called") + } + return http.DefaultClient.Do(req) + }), + }, + req: &chM1Req{ + P: "hello", + }, + expectResp: &chM1Resp{"hello"}, +}} + +func (s *clientSuite) TestCall(c *gc.C) { + srv := s.newServer() + defer srv.Close() + + for i, test := range callTests { + c.Logf("test %d: %s", i, test.about) + var resp interface{} + if test.expectResp != nil { + resp = reflect.New(reflect.TypeOf(test.expectResp).Elem()).Interface() + } + client := test.client + client.BaseURL = srv.URL + err := client.Call(test.req, resp) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + if test.expectCause != nil { + c.Assert(errgo.Cause(err), jc.DeepEquals, test.expectCause) + } + continue + } + c.Assert(err, gc.IsNil) + c.Assert(resp, jc.DeepEquals, test.expectResp) + } +} + +func (s *clientSuite) TestCallURLNoRequestPath(c *gc.C) { + srv := s.newServer() + defer srv.Close() + + var client httprequest.Client + req := struct { + httprequest.Route `httprequest:"GET"` + chM1Req + }{ + chM1Req: chM1Req{ + P: "hello", + }, + } + var resp chM1Resp + err := client.CallURL(srv.URL+"/m1/:P", &req, &resp) + c.Assert(err, gc.IsNil) + c.Assert(resp, jc.DeepEquals, chM1Resp{"hello"}) +} + +func mustNewRequest(url string, method string, body io.Reader) *http.Request { + req, err := http.NewRequest(method, url, body) + if err != nil { + panic(err) + } + req.Header.Set("Content-Type", "application/json") + return req +} + +var doTests = []struct { + about string + client httprequest.Client + request *http.Request + body io.ReadSeeker + + expectError string + expectCause interface{} + expectResp interface{} +}{{ + about: "GET success", + request: mustNewRequest("/m1/hello", "GET", nil), + expectResp: &chM1Resp{"hello"}, +}, { + about: "appendURL error", + request: mustNewRequest("/m1/hello", "GET", nil), + client: httprequest.Client{ + BaseURL: ":::", + }, + expectError: `cannot parse ":::": parse :::: missing protocol scheme`, +}, { + about: "body supplied in request", + request: mustNewRequest("/m1/hello", "GET", strings.NewReader("")), + expectError: `GET http://.*/m1/hello: request body supplied unexpectedly`, +}, { + about: "content length is inferred from strings.Reader", + request: mustNewRequest("/content-length", "PUT", nil), + body: strings.NewReader("hello"), + expectResp: newInt64(int64(len("hello"))), +}, { + about: "content length is inferred from bytes.Reader", + request: mustNewRequest("/content-length", "PUT", nil), + body: bytes.NewReader([]byte("hello")), + expectResp: newInt64(int64(len("hello"))), +}, { + about: "DoWithBody implemented but no body", + client: httprequest.Client{ + Doer: doerFunc(func(req *http.Request, body io.ReadSeeker) (*http.Response, error) { + if body != nil { + panic("DoWithBody called when Do expected") + } + return http.DefaultClient.Do(req) + }), + }, + request: mustNewRequest("/m1/hello", "GET", nil), + expectResp: &chM1Resp{"hello"}, +}, { + about: "DoWithBody not implemented and body present", + client: httprequest.Client{ + Doer: doerOnlyFunc(func(req *http.Request) (*http.Response, error) { + return http.DefaultClient.Do(req) + }), + }, + request: mustNewRequest("/m2/foo", "POST", nil), + body: strings.NewReader(`{"I": 999}`), + expectResp: &chM2Resp{ + P: "foo", + Arg: 999, + }, +}, { + about: "DoWithBody implemented and body present", + client: httprequest.Client{ + Doer: doerFunc(func(req *http.Request, body io.ReadSeeker) (*http.Response, error) { + if body == nil { + panic("Do called when DoWithBody expected") + } + req.Body = ioutil.NopCloser(body) + return http.DefaultClient.Do(req) + }), + }, + request: mustNewRequest("/m2/foo", "POST", nil), + body: strings.NewReader(`{"I": 999}`), + expectResp: &chM2Resp{ + P: "foo", + Arg: 999, + }, +}, { + about: "Do returns error", + client: httprequest.Client{ + Doer: doerOnlyFunc(func(req *http.Request) (*http.Response, error) { + return nil, errgo.Newf("an error") + }), + }, + request: mustNewRequest("/m2/foo", "POST", nil), + body: strings.NewReader(`{"I": 999}`), + expectError: "POST http://.*/m2/foo: an error", +}} + +func newInt64(i int64) *int64 { + return &i +} + +func (s *clientSuite) TestDo(c *gc.C) { + srv := s.newServer() + defer srv.Close() + for i, test := range doTests { + c.Logf("test %d: %s", i, test.about) + var resp interface{} + if test.expectResp != nil { + resp = reflect.New(reflect.TypeOf(test.expectResp).Elem()).Interface() + } + client := test.client + if client.BaseURL == "" { + client.BaseURL = srv.URL + } + err := client.Do(test.request, test.body, resp) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + if test.expectCause != nil { + c.Assert(errgo.Cause(err), jc.DeepEquals, test.expectCause) + } + continue + } + c.Assert(err, gc.IsNil) + c.Assert(resp, jc.DeepEquals, test.expectResp) + } +} + +func (s *clientSuite) TestDoWithHTTPReponse(c *gc.C) { + srv := s.newServer() + defer srv.Close() + client := &httprequest.Client{ + BaseURL: srv.URL, + } + var resp *http.Response + err := client.Get("/m1/foo", &resp) + c.Assert(err, gc.IsNil) + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + c.Assert(err, gc.IsNil) + c.Assert(string(data), gc.Equals, `{"P":"foo"}`) +} + +func (s *clientSuite) TestDoWithHTTPReponseAndError(c *gc.C) { + srv := s.newServer() + defer srv.Close() + var doer closeCountingDoer // Also check the body is closed. + client := &httprequest.Client{ + BaseURL: srv.URL, + Doer: &doer, + } + var resp *http.Response + err := client.Get("/m3", &resp) + c.Assert(resp, gc.IsNil) + c.Assert(err, gc.ErrorMatches, `GET http://.*/m3: httprequest: m3 error`) + c.Assert(doer.openedBodies, gc.Equals, 1) + c.Assert(doer.closedBodies, gc.Equals, 1) +} + +func (s *clientSuite) TestCallWithHTTPResponse(c *gc.C) { + srv := s.newServer() + defer srv.Close() + client := &httprequest.Client{ + BaseURL: srv.URL, + } + var resp *http.Response + err := client.Call(&chM1Req{ + P: "foo", + }, &resp) + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + c.Assert(err, gc.IsNil) + c.Assert(string(data), gc.Equals, `{"P":"foo"}`) +} + +func (s *clientSuite) TestCallClosesResponseBodyOnSuccess(c *gc.C) { + srv := s.newServer() + defer srv.Close() + var doer closeCountingDoer + client := &httprequest.Client{ + BaseURL: srv.URL, + Doer: &doer, + } + var resp chM1Resp + err := client.Call(&chM1Req{ + P: "foo", + }, &resp) + c.Assert(err, gc.IsNil) + c.Assert(resp, jc.DeepEquals, chM1Resp{"foo"}) + c.Assert(doer.openedBodies, gc.Equals, 1) + c.Assert(doer.closedBodies, gc.Equals, 1) +} + +func (s *clientSuite) TestCallClosesResponseBodyOnError(c *gc.C) { + srv := s.newServer() + defer srv.Close() + var doer closeCountingDoer + client := &httprequest.Client{ + BaseURL: srv.URL, + Doer: &doer, + } + err := client.Call(&chM3Req{}, nil) + c.Assert(err, gc.ErrorMatches, ".*m3 error") + c.Assert(doer.openedBodies, gc.Equals, 1) + c.Assert(doer.closedBodies, gc.Equals, 1) +} + +func (s *clientSuite) TestDoClosesResponseBodyOnSuccess(c *gc.C) { + srv := s.newServer() + defer srv.Close() + var doer closeCountingDoer + client := &httprequest.Client{ + BaseURL: srv.URL, + Doer: &doer, + } + req, err := http.NewRequest("GET", "/m1/foo", nil) + c.Assert(err, gc.IsNil) + var resp chM1Resp + err = client.Do(req, nil, &resp) + c.Assert(err, gc.IsNil) + c.Assert(resp, jc.DeepEquals, chM1Resp{"foo"}) + c.Assert(doer.openedBodies, gc.Equals, 1) + c.Assert(doer.closedBodies, gc.Equals, 1) +} + +func (s *clientSuite) TestDoClosesResponseBodyOnError(c *gc.C) { + srv := s.newServer() + defer srv.Close() + var doer closeCountingDoer + client := &httprequest.Client{ + BaseURL: srv.URL, + Doer: &doer, + } + req, err := http.NewRequest("GET", "/m3", nil) + c.Assert(err, gc.IsNil) + err = client.Do(req, nil, nil) + c.Assert(err, gc.ErrorMatches, ".*m3 error") + c.Assert(doer.openedBodies, gc.Equals, 1) + c.Assert(doer.closedBodies, gc.Equals, 1) +} + +func (s *clientSuite) TestGet(c *gc.C) { + srv := s.newServer() + defer srv.Close() + client := httprequest.Client{ + BaseURL: srv.URL, + } + var resp chM1Resp + err := client.Get("/m1/foo", &resp) + c.Assert(err, gc.IsNil) + c.Assert(resp, jc.DeepEquals, chM1Resp{"foo"}) +} + +func (s *clientSuite) TestGetNoBaseURL(c *gc.C) { + srv := s.newServer() + defer srv.Close() + client := httprequest.Client{} + var resp chM1Resp + err := client.Get(srv.URL+"/m1/foo", &resp) + c.Assert(err, gc.IsNil) + c.Assert(resp, jc.DeepEquals, chM1Resp{"foo"}) +} + +func (*clientSuite) newServer() *httptest.Server { + f := func(p httprequest.Params) (clientHandlers, error) { + return clientHandlers{}, nil + } + handlers := errorMapper.Handlers(f) + router := httprouter.New() + for _, h := range handlers { + router.Handle(h.Method, h.Path, h.Handle) + } + + return httptest.NewServer(router) +} + +var appendURLTests = []struct { + u string + p string + expect string + expectError string +}{{ + u: "http://foo", + p: "bar", + expect: "http://foo/bar", +}, { + u: "http://foo", + p: "/bar", + expect: "http://foo/bar", +}, { + u: "http://foo/", + p: "bar", + expect: "http://foo/bar", +}, { + u: "http://foo/", + p: "/bar", + expect: "http://foo/bar", +}, { + u: "", + p: "bar", + expect: "/bar", +}, { + u: "http://xxx", + p: "", + expect: "http://xxx", +}, { + u: "http://xxx.com", + p: "http://foo.com", + expectError: "relative URL specifies a host", +}, { + u: "http://xxx.com/a/b", + p: "foo?a=45&b=c", + expect: "http://xxx.com/a/b/foo?a=45&b=c", +}, { + u: "http://xxx.com", + p: "?a=45&b=c", + expect: "http://xxx.com?a=45&b=c", +}, { + u: "http://xxx.com/a?z=w", + p: "foo?a=45&b=c", + expect: "http://xxx.com/a/foo?z=w&a=45&b=c", +}, { + u: "http://xxx.com?z=w", + p: "/a/b/c", + expect: "http://xxx.com/a/b/c?z=w", +}} + +func (*clientSuite) TestAppendURL(c *gc.C) { + for i, test := range appendURLTests { + c.Logf("test %d: %s %s", i, test.u, test.p) + u, err := httprequest.AppendURL(test.u, test.p) + if test.expectError != "" { + c.Assert(u, gc.IsNil) + c.Assert(err, gc.ErrorMatches, test.expectError) + } else { + c.Assert(err, gc.IsNil) + c.Assert(u.String(), gc.Equals, test.expect) + } + } +} + +type clientHandlers struct{} + +type chM1Req struct { + httprequest.Route `httprequest:"GET /m1/:P"` + P string `httprequest:",path"` +} + +type chM1Resp struct { + P string +} + +func (clientHandlers) M1(p *chM1Req) (*chM1Resp, error) { + return &chM1Resp{p.P}, nil +} + +type chM2Req struct { + httprequest.Route `httprequest:"POST /m2/:P"` + P string `httprequest:",path"` + Body struct { + I int + } `httprequest:",body"` +} + +type chInvalidM2Req struct { + httprequest.Route `httprequest:"POST /m2/:P"` + P string `httprequest:",path"` + Body struct { + I bool + } `httprequest:",body"` +} + +type chM2RedirectM2Req struct { + httprequest.Route `httprequest:"POST /m2/foo//"` +} + +type chM2Resp struct { + P string + Arg int +} + +func (clientHandlers) M2(p *chM2Req) (*chM2Resp, error) { + return &chM2Resp{p.P, p.Body.I}, nil +} + +type chM3Req struct { + httprequest.Route `httprequest:"GET /m3"` +} + +func (clientHandlers) M3(p *chM3Req) error { + return errgo.New("m3 error") +} + +type chContentLengthReq struct { + httprequest.Route `httprequest:"PUT /content-length"` +} + +func (clientHandlers) ContentLength(rp httprequest.Params, p *chContentLengthReq) (int64, error) { + return rp.Request.ContentLength, nil +} + +type doerFunc func(req *http.Request, body io.ReadSeeker) (*http.Response, error) + +func (f doerFunc) Do(req *http.Request) (*http.Response, error) { + return f(req, nil) +} + +func (f doerFunc) DoWithBody(req *http.Request, body io.ReadSeeker) (*http.Response, error) { + if req.Body != nil { + panic("unexpected non-nil body in request") + } + if body == nil { + panic("unexpected nil body argument") + } + return f(req, body) +} + +type doerOnlyFunc func(req *http.Request) (*http.Response, error) + +func (f doerOnlyFunc) Do(req *http.Request) (*http.Response, error) { + return f(req) +} + +type closeCountingDoer struct { + // openBodies records the number of response bodies + // that have been returned. + openedBodies int + + // closedBodies records the number of response bodies + // that have been closed. + closedBodies int +} + +func (doer *closeCountingDoer) Do(req *http.Request) (*http.Response, error) { + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + resp.Body = &closeCountingReader{ + doer: doer, + ReadCloser: resp.Body, + } + doer.openedBodies++ + return resp, nil +} + +type closeCountingReader struct { + doer *closeCountingDoer + io.ReadCloser +} + +func (r *closeCountingReader) Close() error { + r.doer.closedBodies++ + return r.ReadCloser.Close() +} === added directory 'src/github.com/juju/httprequest/cmd' === added directory 'src/github.com/juju/httprequest/cmd/httprequest-generate-client' === added file 'src/github.com/juju/httprequest/cmd/httprequest-generate-client/main.go' --- src/github.com/juju/httprequest/cmd/httprequest-generate-client/main.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/cmd/httprequest-generate-client/main.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,290 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/build" + "go/format" + "go/parser" + "io/ioutil" + "os" + "strings" + "text/template" + + "golang.org/x/tools/go/loader" + "golang.org/x/tools/go/types" + "gopkg.in/errgo.v1" +) + +// TODO: +// - generate exported types if the parameter/response types aren't exported? +// - deal with literal interface and struct types. +// - copy doc comments from server methods. + +var currentDir string + +func main() { + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "usage: httprequest-generate server-package server-type client-type\n") + os.Exit(2) + } + flag.Parse() + if flag.NArg() != 3 { + flag.Usage() + } + + serverPkg, serverType, clientType := flag.Arg(0), flag.Arg(1), flag.Arg(2) + + if err := generate(serverPkg, serverType, clientType); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} + +type templateArg struct { + CommandLine string + PkgName string + Imports []string + Methods []method + ClientType string +} + +var code = template.Must(template.New("").Parse(` +// The code in this file was automatically generated by running +// httprequest-generate-client {{.CommandLine}} +// DO NOT EDIT + +package {{.PkgName}} +import ( + {{range .Imports}}{{printf "%q" .}} + {{end}} +) + +type {{.ClientType}} struct { + Client httprequest.Client +} + +{{range .Methods}} +{{if .RespType}} + {{.Doc}} + func (c *{{$.ClientType}}) {{.Name}}(p *{{.ParamType}}) ({{.RespType}}, error) { + var r {{.RespType}} + err := c.Client.Call(p, &r) + return r, err + } +{{else}} + {{.Doc}} + func (c *{{$.ClientType}}) {{.Name}}(p *{{.ParamType}}) (error) { + return c.Client.Call(p, nil) + } +{{end}} +{{end}} +`)) + +func generate(serverPkg, serverType, clientType string) error { + cwd, err := os.Getwd() + if err != nil { + return err + } + currentDir = cwd + + methods, imports, err := serverMethods(serverPkg, serverType) + if err != nil { + return errgo.Mask(err) + } + localPkg, err := build.Import(".", currentDir, 0) + if err != nil { + return errgo.Notef(err, "cannot open package in current directory") + } + arg := templateArg{ + CommandLine: strings.Join(flag.Args(), " "), + Imports: imports, + Methods: methods, + PkgName: localPkg.Name, + ClientType: clientType, + } + var buf bytes.Buffer + if err := code.Execute(&buf, arg); err != nil { + return errgo.Mask(err) + } + data, err := format.Source(buf.Bytes()) + if err != nil { + return errgo.Notef(err, "cannot format source") + } + if err := writeOutput(data, clientType); err != nil { + return errgo.Mask(err) + } + return nil +} + +func writeOutput(data []byte, clientType string) error { + filename := strings.ToLower(clientType) + "_generated.go" + if err := ioutil.WriteFile(filename, data, 0644); err != nil { + return errgo.Mask(err) + } + return nil +} + +type method struct { + Name string + Doc string + ParamType string + RespType string +} + +func serverMethods(serverPkg, serverType string) ([]method, []string, error) { + cfg := loader.Config{ + TypeCheckFuncBodies: func(string) bool { + return false + }, + ImportPkgs: map[string]bool{ + serverPkg: false, // false means don't load tests. + }, + ParserMode: parser.ParseComments, + } + prog, err := cfg.Load() + if err != nil { + return nil, nil, errgo.Notef(err, "cannot load %q", serverPkg) + } + pkgInfo := prog.Imported[serverPkg] + if pkgInfo == nil { + return nil, nil, errgo.Newf("cannot find %q in imported code", serverPkg) + } + pkg := pkgInfo.Pkg + obj := pkg.Scope().Lookup(serverType) + if obj == nil { + return nil, nil, errgo.Newf("type %s not found in %s", serverType, serverPkg) + } + objTypeName, ok := obj.(*types.TypeName) + if !ok { + return nil, nil, errgo.Newf("%s is not a type", serverType) + } + // Use the pointer type to get as many methods as possible. + ptrObjType := types.NewPointer(objTypeName.Type()) + + imports := map[string]string{ + "github.com/juju/httprequest": "httprequest", + } + var methods []method + mset := types.NewMethodSet(ptrObjType) + for i := 0; i < mset.Len(); i++ { + sel := mset.At(i) + if !sel.Obj().Exported() { + continue + } + name := sel.Obj().Name() + if name == "Close" { + continue + } + ptype, rtype, err := parseMethodType(sel.Type().(*types.Signature)) + if err != nil { + fmt.Fprintf(os.Stderr, "ignoring method %s: %v\n", name, err) + continue + } + comment := docComment(prog, sel) + methods = append(methods, method{ + Name: name, + Doc: comment, + ParamType: typeStr(ptype, imports), + RespType: typeStr(rtype, imports), + }) + } + var allImports []string + for path := range imports { + allImports = append(allImports, path) + } + return methods, allImports, nil +} + +// docComment returns the doc comment for the method referred to +// by the given selection. +func docComment(prog *loader.Program, sel *types.Selection) string { + obj := sel.Obj() + tokFile := prog.Fset.File(obj.Pos()) + if tokFile == nil { + panic("no file found for method") + } + filename := tokFile.Name() + for _, pkgInfo := range prog.AllPackages { + for _, f := range pkgInfo.Files { + if tokFile := prog.Fset.File(f.Pos()); tokFile == nil || tokFile.Name() != filename { + continue + } + // We've found the file we're looking for. Now traverse all + // top level declarations looking for the right function declaration. + for _, decl := range f.Decls { + fdecl, ok := decl.(*ast.FuncDecl) + if ok && fdecl.Name.Pos() == obj.Pos() { + // Found it! + return commentStr(fdecl.Doc) + } + } + } + } + panic("method declaration not found") +} + +func commentStr(c *ast.CommentGroup) string { + if c == nil { + return "" + } + var b []byte + for i, cc := range c.List { + if i > 0 { + b = append(b, '\n') + } + b = append(b, cc.Text...) + } + return string(b) +} + +// typeStr returns the type string to be used when using the +// given type. It adds any needed import paths to the given +// imports map (map from package path to package id). +func typeStr(t types.Type, imports map[string]string) string { + if t == nil { + return "" + } + qualify := func(pkg *types.Package) string { + if name := imports[pkg.Path()]; name != "" { + return name + } + name := pkg.Name() + // Make sure we're not duplicating the name. + // TODO if we are, make a new non-duplicated version. + for oldPkg, oldName := range imports { + if oldName == name { + panic(errgo.Newf("duplicate package name %s vs %s", pkg.Path(), oldPkg)) + } + } + imports[pkg.Path()] = name + return name + } + return types.TypeString(t, qualify) +} + +func parseMethodType(t *types.Signature) (ptype, rtype types.Type, err error) { + mp := t.Params() + if mp.Len() != 1 && mp.Len() != 2 { + return nil, nil, errgo.New("wrong argument count") + } + ptype0 := mp.At(mp.Len() - 1).Type() + ptype1, ok := ptype0.(*types.Pointer) + if !ok { + return nil, nil, errgo.New("parameter is not a pointer") + } + ptype = ptype1.Elem() + if _, ok := ptype.Underlying().(*types.Struct); !ok { + return nil, nil, errgo.Newf("parameter is %s, not a pointer to struct", ptype1.Elem()) + } + rp := t.Results() + if rp.Len() > 2 { + return nil, nil, errgo.New("wrong result count") + } + if rp.Len() == 2 { + rtype = rp.At(0).Type() + } + return ptype, rtype, nil +} === added file 'src/github.com/juju/httprequest/dependencies.tsv' --- src/github.com/juju/httprequest/dependencies.tsv 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/dependencies.tsv 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z +github.com/juju/testing git f521911d9a79aeb62c051fe18e689796369c5564 2015-05-29T04:40:43Z +github.com/julienschmidt/httprouter git b59a38004596b696aca7aa2adccfa68760864d86 2015-04-08T17:04:29Z +golang.org/x/net git 446d52dd4018303a13b36097e26d0888aca5d6ef 2015-07-21T02:27:38Z +golang.org/x/tools git c5ca59aab8c27791ce3f820caad760cff360cfc8 2015-07-14T18:01:18Z +gopkg.in/check.v1 git 64131543e7896d5bcc6bd5a76287eb75ea96c673 2014-10-24T13:38:53Z +gopkg.in/errgo.v1 git 81357a83344ddd9f7772884874e5622c2a3da21c 2014-10-13T17:33:38Z +gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z === added file 'src/github.com/juju/httprequest/example_handlers_test.go' --- src/github.com/juju/httprequest/example_handlers_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/example_handlers_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,65 @@ +package httprequest_test + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + + "github.com/julienschmidt/httprouter" + + "github.com/juju/httprequest" +) + +type arithHandler struct { +} + +type number struct { + N int +} + +func (arithHandler) Add(arg *struct { + httprequest.Route `httprequest:"GET /:A/add/:B"` + A int `httprequest:",path"` + B int `httprequest:",path"` +}) (number, error) { + return number{ + N: arg.A + arg.B, + }, nil +} + +func ExampleErrorMapper_Handlers() { + f := func(p httprequest.Params) (arithHandler, error) { + fmt.Printf("handle %s %s\n", p.Request.Method, p.Request.URL) + return arithHandler{}, nil + } + router := httprouter.New() + for _, h := range exampleErrorMapper.Handlers(f) { + router.Handle(h.Method, h.Path, h.Handle) + } + srv := httptest.NewServer(router) + resp, err := http.Get(srv.URL + "/123/add/11") + if err != nil { + panic(err) + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + panic("status " + resp.Status) + } + fmt.Println("result:") + io.Copy(os.Stdout, resp.Body) + // Output: handle GET /123/add/11 + // result: + // {"N":134} +} + +type exampleErrorResponse struct { + Message string +} + +var exampleErrorMapper httprequest.ErrorMapper = func(err error) (int, interface{}) { + return http.StatusInternalServerError, &exampleErrorResponse{ + Message: err.Error(), + } +} === added file 'src/github.com/juju/httprequest/export_test.go' --- src/github.com/juju/httprequest/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +package httprequest + +var AppendURL = appendURL +var CheckIsJSON = checkIsJSON +var MaxErrorBodySize = &maxErrorBodySize === added file 'src/github.com/juju/httprequest/fields_test.go' --- src/github.com/juju/httprequest/fields_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/fields_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,263 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package httprequest + +import ( + "reflect" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" +) + +type fieldsSuite struct{} + +var _ = gc.Suite(&fieldsSuite{}) + +type structField struct { + name string + index []int +} + +var fieldsTests = []struct { + about string + val interface{} + expect []structField +}{{ + about: "simple struct", + val: struct { + A int + B string + C bool + }{}, + expect: []structField{{ + name: "A", + index: []int{0}, + }, { + name: "B", + index: []int{1}, + }, { + name: "C", + index: []int{2}, + }}, +}, { + about: "non-embedded struct member", + val: struct { + A struct { + X int + } + }{}, + expect: []structField{{ + name: "A", + index: []int{0}, + }}, +}, { + about: "embedded exported struct", + val: struct { + SFG + }{}, + expect: []structField{{ + name: "SFG", + index: []int{0}, + }, { + name: "F", + index: []int{0, 0}, + }, { + name: "G", + index: []int{0, 1}, + }}, +}, { + about: "embedded unexported struct", + val: struct { + sFG + }{}, + expect: []structField{{ + name: "sFG", + index: []int{0}, + }, { + name: "F", + index: []int{0, 0}, + }, { + name: "G", + index: []int{0, 1}, + }}, +}, { + about: "two embedded structs with cancelling members", + val: struct { + SFG + SF + }{}, + expect: []structField{{ + name: "SFG", + index: []int{0}, + }, { + name: "G", + index: []int{0, 1}, + }, { + name: "SF", + index: []int{1}, + }}, +}, { + about: "embedded structs with same fields at different depths", + val: struct { + SFGH3 + SG1 + SFG2 + SF2 + L int + }{}, + expect: []structField{{ + name: "SFGH3", + index: []int{0}, + }, { + name: "SFGH2", + index: []int{0, 0}, + }, { + name: "SFGH1", + index: []int{0, 0, 0}, + }, { + name: "SFGH", + index: []int{0, 0, 0, 0}, + }, { + name: "H", + index: []int{0, 0, 0, 0, 2}, + }, { + name: "SG1", + index: []int{1}, + }, { + name: "SG", + index: []int{1, 0}, + }, { + name: "G", + index: []int{1, 0, 0}, + }, { + name: "SFG2", + index: []int{2}, + }, { + name: "SFG1", + index: []int{2, 0}, + }, { + name: "SFG", + index: []int{2, 0, 0}, + }, { + name: "SF2", + index: []int{3}, + }, { + name: "SF1", + index: []int{3, 0}, + }, { + name: "SF", + index: []int{3, 0, 0}, + }, { + name: "L", + index: []int{4}, + }}, +}, { + about: "embedded pointer struct", + val: struct { + *SF + }{}, + expect: []structField{{ + name: "SF", + index: []int{0}, + }, { + name: "F", + index: []int{0, 0}, + }}, +}, { + about: "embedded not a pointer", + val: struct { + M + }{}, + expect: []structField{{ + name: "M", + index: []int{0}, + }}, +}} + +type SFG struct { + F int `httprequest:",form"` + G int `httprequest:",form"` +} + +type SFG1 struct { + SFG +} + +type SFG2 struct { + SFG1 +} + +type SFGH struct { + F int `httprequest:",form"` + G int `httprequest:",form"` + H int `httprequest:",form"` +} + +type SFGH1 struct { + SFGH +} + +type SFGH2 struct { + SFGH1 +} + +type SFGH3 struct { + SFGH2 +} + +type SF struct { + F int `httprequest:",form"` +} + +type SF1 struct { + SF +} + +type SF2 struct { + SF1 +} + +type SG struct { + G int `httprequest:",form"` +} + +type SG1 struct { + SG +} + +type sFG struct { + F int `httprequest:",form"` + G int `httprequest:",form"` +} + +type M map[string]interface{} + +func (*fieldsSuite) TestFields(c *gc.C) { + for i, test := range fieldsTests { + c.Logf("%d: %s", i, test.about) + t := reflect.TypeOf(test.val) + got := fields(t) + c.Assert(got, gc.HasLen, len(test.expect)) + for j, field := range got { + expect := test.expect[j] + c.Logf("field %d: %s", j, expect.name) + gotField := t.FieldByIndex(field.Index) + // Unfortunately, FieldByIndex does not return + // a field with the same index that we passed in, + // so we set it to the expected value so that + // it can be compared later with the result of FieldByName. + gotField.Index = field.Index + expectField := t.FieldByIndex(expect.index) + // ditto. + expectField.Index = expect.index + c.Assert(gotField, jc.DeepEquals, expectField) + + // Sanity check that we can actually access the field by the + // expected name. + expectField1, ok := t.FieldByName(expect.name) + c.Assert(ok, jc.IsTrue) + c.Assert(expectField1, jc.DeepEquals, expectField) + } + } +} === added file 'src/github.com/juju/httprequest/handler.go' --- src/github.com/juju/httprequest/handler.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/handler.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,541 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package httprequest + +import ( + "encoding/json" + "io" + "net/http" + "reflect" + + "github.com/julienschmidt/httprouter" + "gopkg.in/errgo.v1" +) + +// ErrorMapper holds a function that can convert a Go error +// into a form that can be returned as a JSON body from an HTTP request. +// +// The httpStatus value reports the desired HTTP status. +// +// If the returned errorBody implements HeaderSetter, then +// that method will be called to add custom headers to the request. +type ErrorMapper func(err error) (httpStatus int, errorBody interface{}) + +var ( + paramsType = reflect.TypeOf(Params{}) + errorType = reflect.TypeOf((*error)(nil)).Elem() + httpResponseWriterType = reflect.TypeOf((*http.ResponseWriter)(nil)).Elem() + httpHeaderType = reflect.TypeOf(http.Header(nil)) + httpRequestType = reflect.TypeOf((*http.Request)(nil)) + ioCloserType = reflect.TypeOf((*io.Closer)(nil)).Elem() +) + +// Handle converts a function into a Handler. The argument f +// must be a function of one of the following six forms, where ArgT +// must be a struct type acceptable to Unmarshal and ResultT is a type +// that can be marshaled as JSON: +// +// func(p Params, arg *ArgT) +// func(p Params, arg *ArgT) error +// func(p Params, arg *ArgT) (ResultT, error) +// +// func(arg *ArgT) +// func(arg *ArgT) error +// func(arg *ArgT) (ResultT, error) +// +// When processing a call to the returned handler, the provided +// parameters are unmarshaled into a new ArgT value using Unmarshal, +// then f is called with this value. If the unmarshaling fails, f will +// not be called and the unmarshal error will be written as a JSON +// response. +// +// As an additional special case to the rules defined in Unmarshal, +// the tag on an anonymous field of type Route +// specifies the method and path to use in the HTTP request. +// It should hold two space-separated fields; the first specifies +// the HTTP method, the second the URL path to use for the request. +// If this is given, the returned handler will hold that +// method and path, otherwise they will be empty. +// +// If an error is returned from f, it is passed through the error mapper before +// writing as a JSON response. +// +// In the third form, when no error is returned, the result is written +// as a JSON response with status http.StatusOK. Also in this case, +// any calls to Params.Response.Write or Params.Response.WriteHeader +// will be ignored, as the response code and data should be defined +// entirely by the returned result and error. +// +// Handle will panic if the provided function is not in one +// of the above forms. +func (e ErrorMapper) Handle(f interface{}) Handler { + fv := reflect.ValueOf(f) + hf, rt, err := e.handlerFunc(fv.Type()) + if err != nil { + panic(errgo.Notef(err, "bad handler function")) + } + return Handler{ + Method: rt.method, + Path: rt.path, + Handle: func(w http.ResponseWriter, req *http.Request, p httprouter.Params) { + hf(fv, Params{ + Response: w, + Request: req, + PathVar: p, + }) + }, + } +} + +// Handlers returns a list of handlers that will be handled by the value +// returned by the given argument, which must be a function of the form: +// +// func(httprequest.Params) (T, error) +// +// for some type T. Each exported method defined on T defines a handler, +// and should be in one of the forms accepted by ErrorMapper.Handle. +// +// Handlers will panic if f is not of the required form, no methods are +// defined on T or any method defined on T is not suitable for Handle. +// +// When any of the returned handlers is invoked, f will be called and +// then the appropriate method will be called on the value it returns. +// +// If T implements io.Closer, its Close method will be called +// after the request is completed. +func (e ErrorMapper) Handlers(f interface{}) []Handler { + fv := reflect.ValueOf(f) + wt, err := checkHandlersWrapperFunc(fv) + if err != nil { + panic(errgo.Notef(err, "bad handler function")) + } + hasClose := wt.Implements(ioCloserType) + hs := make([]Handler, 0, wt.NumMethod()) + numMethod := 0 + for i := 0; i < wt.NumMethod(); i++ { + i := i + m := wt.Method(i) + if m.PkgPath != "" { + continue + } + if m.Name == "Close" { + if !hasClose { + panic(errgo.Newf("bad type for Close method (got %v want func(%v) error", m.Type, wt)) + } + continue + } + // The type in the Method struct includes the receiver type, + // which we don't want to look at (and we won't see when + // we get the method from the actual value at dispatch time), + // so we hide it. + mt := withoutReceiver(m.Type) + hf, rt, err := e.handlerFunc(mt) + if err != nil { + panic(errgo.Notef(err, "bad type for method %s", m.Name)) + } + if rt.method == "" || rt.path == "" { + panic(errgo.Notef(err, "method %s does not specify route method and path", m.Name)) + } + handler := func(w http.ResponseWriter, req *http.Request, p httprouter.Params) { + terrv := fv.Call([]reflect.Value{ + reflect.ValueOf(Params{ + Response: w, + Request: req, + PathVar: p, + }), + }) + tv, errv := terrv[0], terrv[1] + if !errv.IsNil() { + e.WriteError(w, errv.Interface().(error)) + return + } + if hasClose { + defer tv.Interface().(io.Closer).Close() + } + hf(tv.Method(i), Params{ + Response: w, + Request: req, + PathVar: p, + }) + + } + hs = append(hs, Handler{ + Method: rt.method, + Path: rt.path, + Handle: handler, + }) + numMethod++ + } + if numMethod == 0 { + panic(errgo.Newf("no exported methods defined on %s", wt)) + } + return hs +} + +func checkHandlersWrapperFunc(fv reflect.Value) (reflect.Type, error) { + ft := fv.Type() + if ft.Kind() != reflect.Func { + return nil, errgo.Newf("expected function, got %v", ft) + } + if fv.IsNil() { + return nil, errgo.Newf("function is nil") + } + if n := ft.NumIn(); n != 1 { + return nil, errgo.Newf("got %d arguments, want 1", n) + } + if n := ft.NumOut(); n != 2 { + return nil, errgo.Newf("function returns %d values, want 2", n) + } + if ft.In(0) != paramsType || + ft.Out(1) != errorType { + return nil, errgo.Newf("invalid argument or return values, want func(httprequest.Params) (any, error), got %v", ft) + } + return ft.Out(0), nil +} + +// Handler defines a HTTP handler that will handle the +// given HTTP method at the given httprouter path +type Handler struct { + Method string + Path string + Handle httprouter.Handle +} + +func checkHandleType(t reflect.Type) (*requestType, error) { + if t.Kind() != reflect.Func { + return nil, errgo.New("not a function") + } + if n := t.NumIn(); n != 1 && n != 2 { + return nil, errgo.Newf("has %d parameters, need 1 or 2", t.NumIn()) + } + if t.NumOut() > 2 { + return nil, errgo.Newf("has %d result parameters, need 0, 1 or 2", t.NumOut()) + } + if t.NumIn() == 2 { + if t.In(0) != paramsType { + return nil, errgo.Newf("first argument is %v, need httprequest.Params", t.In(0)) + } + } else { + if t.In(0) == paramsType { + return nil, errgo.Newf("no argument parameter after Params argument") + } + } + pt, err := getRequestType(t.In(t.NumIn() - 1)) + if err != nil { + return nil, errgo.Notef(err, "last argument cannot be used for Unmarshal") + } + if t.NumOut() > 0 { + // func(p Params, arg *ArgT) error + // func(p Params, arg *ArgT) (ResultT, error) + if et := t.Out(t.NumOut() - 1); et != errorType { + return nil, errgo.Newf("final result parameter is %s, need error", et) + } + } + return pt, nil +} + +// handlerFunc returns a function that will call a function of the given type, +// unmarshaling request parameters and marshaling the response as +// appropriate. +func (e ErrorMapper) handlerFunc(ft reflect.Type) (func(fv reflect.Value, p Params), *requestType, error) { + rt, err := checkHandleType(ft) + if err != nil { + return nil, nil, errgo.Mask(err) + } + return e.handleResult(ft, handleParams(ft, rt)), rt, nil +} + +// handleParams handles unmarshaling the parameters to be passed to +// a function of type ft. The rt parameter describes ft (as determined by +// checkHandleType). The returned function accepts the actual function +// value to use in the call as well as the request parameters and returns +// the result value to use for marshaling. +func handleParams( + ft reflect.Type, + rt *requestType, +) func(fv reflect.Value, p Params) ([]reflect.Value, error) { + returnJSON := ft.NumOut() > 1 + needsParams := ft.In(0) == paramsType + if needsParams { + argStructType := ft.In(1).Elem() + return func(fv reflect.Value, p Params) ([]reflect.Value, error) { + if err := p.Request.ParseForm(); err != nil { + return nil, errgo.WithCausef(err, ErrUnmarshal, "cannot parse HTTP request form") + } + if returnJSON { + p.Response = headerOnlyResponseWriter{p.Response.Header()} + } + argv := reflect.New(argStructType) + if err := unmarshal(p, argv, rt); err != nil { + return nil, errgo.NoteMask(err, "cannot unmarshal parameters", errgo.Is(ErrUnmarshal)) + } + return fv.Call([]reflect.Value{ + reflect.ValueOf(p), + argv, + }), nil + } + } + argStructType := ft.In(0).Elem() + return func(fv reflect.Value, p Params) ([]reflect.Value, error) { + if err := p.Request.ParseForm(); err != nil { + return nil, errgo.WithCausef(err, ErrUnmarshal, "cannot parse HTTP request form") + } + argv := reflect.New(argStructType) + if err := unmarshal(p, argv, rt); err != nil { + return nil, errgo.NoteMask(err, "cannot unmarshal parameters", errgo.Is(ErrUnmarshal)) + } + return fv.Call([]reflect.Value{argv}), nil + } + +} + +// handleResult handles the marshaling of the result values from the call to a function +// of type ft. The returned function accepts the actual function value to use in the +// call as well as the request parameters. +func (e ErrorMapper) handleResult( + ft reflect.Type, + f func(fv reflect.Value, p Params) ([]reflect.Value, error), +) func(fv reflect.Value, p Params) { + switch ft.NumOut() { + case 0: + // func(w http.ResponseWriter, p Params, arg *ArgT) + return func(fv reflect.Value, p Params) { + _, err := f(fv, p) + if err != nil { + e.WriteError(p.Response, err) + } + } + case 1: + // func(w http.ResponseWriter, p Params, arg *ArgT) error + return func(fv reflect.Value, p Params) { + out, err := f(fv, p) + if err != nil { + e.WriteError(p.Response, err) + return + } + herr := out[0].Interface() + if herr != nil { + e.WriteError(p.Response, herr.(error)) + } + } + case 2: + // func(header http.Header, p Params, arg *ArgT) (ResultT, error) + return func(fv reflect.Value, p Params) { + out, err := f(fv, p) + if err != nil { + e.WriteError(p.Response, err) + return + } + herr := out[1].Interface() + if herr != nil { + e.WriteError(p.Response, herr.(error)) + return + } + err = WriteJSON(p.Response, http.StatusOK, out[0].Interface()) + if err != nil { + e.WriteError(p.Response, err) + } + } + default: + panic("unreachable") + } +} + +// ToHTTP converts an httprouter.Handle into an http.Handler. +// It will pass no path variables to h. +func ToHTTP(h httprouter.Handle) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + h(w, req, nil) + }) +} + +// JSONHandler is like httprouter.Handle except that it returns a +// body (to be converted to JSON) and an error. +// The Header parameter can be used to set +// custom headers on the response. +type JSONHandler func(Params) (interface{}, error) + +// ErrorHandler is like httprouter.Handle except it returns an error +// which may be returned as the error body of the response. +// An ErrorHandler function should not itself write to the ResponseWriter +// if it returns an error. +type ErrorHandler func(Params) error + +// HandleJSON returns a handler that writes the return value of handle +// as a JSON response. If handle returns an error, it is passed through +// the error mapper. +func (e ErrorMapper) HandleJSON(handle JSONHandler) httprouter.Handle { + return func(w http.ResponseWriter, req *http.Request, p httprouter.Params) { + val, err := handle(Params{ + Response: headerOnlyResponseWriter{w.Header()}, + Request: req, + PathVar: p, + }) + if err == nil { + if err = WriteJSON(w, http.StatusOK, val); err == nil { + return + } + } + e.WriteError(w, err) + } +} + +// HandleErrors returns a handler that passes any non-nil error returned +// by handle through the error mapper and writes it as a JSON response. +func (e ErrorMapper) HandleErrors(handle ErrorHandler) httprouter.Handle { + return func(w http.ResponseWriter, req *http.Request, p httprouter.Params) { + w1 := responseWriter{ + ResponseWriter: w, + } + if err := handle(Params{ + Response: &w1, + Request: req, + PathVar: p, + }); err != nil { + // We write the error only if the header hasn't + // already been written, because if it has, then + // we will not be able to set the appropriate error + // response code, and there's a danger that we + // may be corrupting output by appending + // a JSON error message to it. + if !w1.headerWritten { + e.WriteError(w, err) + } + // TODO log the error? + } + } +} + +// WriteError writes an error to a ResponseWriter +// and sets the HTTP status code. +// +// It uses WriteJSON to write the error body returned from +// the ErrorMapper so it is possible to add custom +// headers to the HTTP error response by implementing +// HeaderSetter. +func (e ErrorMapper) WriteError(w http.ResponseWriter, err error) { + status, resp := e(err) + WriteJSON(w, status, resp) +} + +// WriteJSON writes the given value to the ResponseWriter +// and sets the HTTP status to the given code. +// +// If val implements the HeaderSetter interface, the SetHeader +// method will be called to add additional headers to the +// HTTP response. It is called after the Content-Type header +// has been added, so can be used to override the content type +// if required. +func WriteJSON(w http.ResponseWriter, code int, val interface{}) error { + // TODO consider marshalling directly to w using json.NewEncoder. + // pro: this will not require a full buffer allocation. + // con: if there's an error after the first write, it will be lost. + data, err := json.Marshal(val) + if err != nil { + // TODO(rog) log an error if this fails and lose the + // error return, because most callers will need + // to do that anyway. + return errgo.Mask(err) + } + w.Header().Set("content-type", "application/json") + if headerSetter, ok := val.(HeaderSetter); ok { + headerSetter.SetHeader(w.Header()) + } + w.WriteHeader(code) + w.Write(data) + return nil +} + +// HeaderSetter is the interface checked for by WriteJSON. +// If implemented on a value passed to WriteJSON, the SetHeader +// method will be called to allow it to set custom headers +// on the response. +type HeaderSetter interface { + SetHeader(http.Header) +} + +// CustomHeader is a type that allows a JSON value to +// set custom HTTP headers associated with the +// HTTP response. +type CustomHeader struct { + // Body holds the JSON-marshaled body of the response. + Body interface{} + + // SetHeaderFunc holds a function that will be called + // to set any custom headers on the response. + SetHeaderFunc func(http.Header) +} + +// MarshalJSON implements json.Marshaler by marshaling +// h.Body. +func (h CustomHeader) MarshalJSON() ([]byte, error) { + return json.Marshal(h.Body) +} + +// SetHeader implements HeaderSetter by calling +// h.SetHeaderFunc. +func (h CustomHeader) SetHeader(header http.Header) { + h.SetHeaderFunc(header) +} + +// Ensure statically that responseWriter does implement http.Flusher. +var _ http.Flusher = (*responseWriter)(nil) + +// responseWriter wraps http.ResponseWriter but allows us +// to find out whether any body has already been written. +type responseWriter struct { + headerWritten bool + http.ResponseWriter +} + +func (w *responseWriter) Write(data []byte) (int, error) { + w.headerWritten = true + return w.ResponseWriter.Write(data) +} + +func (w *responseWriter) WriteHeader(code int) { + w.headerWritten = true + w.ResponseWriter.WriteHeader(code) +} + +// Flush implements http.Flusher.Flush. +func (w *responseWriter) Flush() { + w.headerWritten = true + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} + +type headerOnlyResponseWriter struct { + h http.Header +} + +func (w headerOnlyResponseWriter) Header() http.Header { + return w.h +} + +func (w headerOnlyResponseWriter) Write([]byte) (int, error) { + // TODO log or panic when this happens? + return 0, errgo.New("inappropriate call to ResponseWriter.Write in JSON-returning handler") +} + +func (w headerOnlyResponseWriter) WriteHeader(code int) { + // TODO log or panic when this happens? +} + +func withoutReceiver(t reflect.Type) reflect.Type { + return withoutReceiverType{t} +} + +type withoutReceiverType struct { + reflect.Type +} + +func (t withoutReceiverType) NumIn() int { + return t.Type.NumIn() - 1 +} + +func (t withoutReceiverType) In(i int) reflect.Type { + return t.Type.In(i + 1) +} === added file 'src/github.com/juju/httprequest/handler_test.go' --- src/github.com/juju/httprequest/handler_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/handler_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,981 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package httprequest_test + +import ( + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "net/url" + + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + "github.com/julienschmidt/httprouter" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + + "github.com/juju/httprequest" +) + +type handlerSuite struct{} + +var _ = gc.Suite(&handlerSuite{}) + +var handleTests = []struct { + about string + f func(c *gc.C) interface{} + req *http.Request + pathVar httprouter.Params + expectMethod string + expectPath string + expectBody interface{} + expectStatus int +}{{ + about: "function with no return", + f: func(c *gc.C) interface{} { + type testStruct struct { + A string `httprequest:"a,path"` + B map[string]int `httprequest:",body"` + C int `httprequest:"c,form"` + } + return func(p httprequest.Params, s *testStruct) { + c.Assert(s, jc.DeepEquals, &testStruct{ + A: "A", + B: map[string]int{"hello": 99}, + C: 43, + }) + c.Assert(p.PathVar, jc.DeepEquals, httprouter.Params{{ + Key: "a", + Value: "A", + }}) + c.Assert(p.Request.Form, jc.DeepEquals, url.Values{ + "c": {"43"}, + }) + p.Response.Header().Set("Content-Type", "application/json") + p.Response.Write([]byte("true")) + } + }, + req: &http.Request{ + Header: http.Header{"Content-Type": {"application/json"}}, + Form: url.Values{ + "c": {"43"}, + }, + Body: body(`{"hello": 99}`), + }, + pathVar: httprouter.Params{{ + Key: "a", + Value: "A", + }}, + expectBody: true, +}, { + about: "function with error return that returns no error", + f: func(c *gc.C) interface{} { + type testStruct struct { + A int `httprequest:"a,path"` + } + return func(p httprequest.Params, s *testStruct) error { + c.Assert(s, jc.DeepEquals, &testStruct{123}) + p.Response.Header().Set("Content-Type", "application/json") + p.Response.Write([]byte("true")) + return nil + } + }, + req: &http.Request{}, + pathVar: httprouter.Params{{ + Key: "a", + Value: "123", + }}, + expectBody: true, +}, { + about: "function with error return that returns an error", + f: func(c *gc.C) interface{} { + type testStruct struct { + A int `httprequest:"a,path"` + } + return func(p httprequest.Params, s *testStruct) error { + c.Assert(s, jc.DeepEquals, &testStruct{123}) + return errUnauth + } + }, + req: &http.Request{}, + pathVar: httprouter.Params{{ + Key: "a", + Value: "123", + }}, + expectBody: httprequest.RemoteError{ + Message: errUnauth.Error(), + Code: "unauthorized", + }, + expectStatus: http.StatusUnauthorized, +}, { + about: "function with value return that returns a value", + f: func(c *gc.C) interface{} { + type testStruct struct { + A int `httprequest:"a,path"` + } + return func(p httprequest.Params, s *testStruct) (int, error) { + c.Assert(s, jc.DeepEquals, &testStruct{123}) + return 1234, nil + } + }, + req: &http.Request{}, + pathVar: httprouter.Params{{ + Key: "a", + Value: "123", + }}, + expectBody: 1234, +}, { + about: "function with value return that returns an error", + f: func(c *gc.C) interface{} { + type testStruct struct { + A int `httprequest:"a,path"` + } + return func(p httprequest.Params, s *testStruct) (int, error) { + c.Assert(s, jc.DeepEquals, &testStruct{123}) + return 0, errUnauth + } + }, + req: &http.Request{}, + pathVar: httprouter.Params{{ + Key: "a", + Value: "123", + }}, + expectBody: httprequest.RemoteError{ + Message: errUnauth.Error(), + Code: "unauthorized", + }, + expectStatus: http.StatusUnauthorized, +}, { + about: "function with value return that writes to p.Response", + f: func(c *gc.C) interface{} { + type testStruct struct { + A int `httprequest:"a,path"` + } + return func(p httprequest.Params, s *testStruct) (int, error) { + _, err := p.Response.Write(nil) + c.Assert(err, gc.ErrorMatches, "inappropriate call to ResponseWriter.Write in JSON-returning handler") + p.Response.WriteHeader(http.StatusTeapot) + c.Assert(s, jc.DeepEquals, &testStruct{123}) + return 1234, nil + } + }, + req: &http.Request{}, + pathVar: httprouter.Params{{ + Key: "a", + Value: "123", + }}, + expectBody: 1234, +}, { + about: "function with no Params and no return", + f: func(c *gc.C) interface{} { + type testStruct struct { + A string `httprequest:"a,path"` + B map[string]int `httprequest:",body"` + C int `httprequest:"c,form"` + } + return func(s *testStruct) { + c.Assert(s, jc.DeepEquals, &testStruct{ + A: "A", + B: map[string]int{"hello": 99}, + C: 43, + }) + } + }, + req: &http.Request{ + Header: http.Header{"Content-Type": {"application/json"}}, + Form: url.Values{ + "c": {"43"}, + }, + Body: body(`{"hello": 99}`), + }, + pathVar: httprouter.Params{{ + Key: "a", + Value: "A", + }}, +}, { + about: "function with no Params with error return that returns no error", + f: func(c *gc.C) interface{} { + type testStruct struct { + A int `httprequest:"a,path"` + } + return func(s *testStruct) error { + c.Assert(s, jc.DeepEquals, &testStruct{123}) + return nil + } + }, + req: &http.Request{}, + pathVar: httprouter.Params{{ + Key: "a", + Value: "123", + }}, +}, { + about: "function with no Params with error return that returns an error", + f: func(c *gc.C) interface{} { + type testStruct struct { + A int `httprequest:"a,path"` + } + return func(s *testStruct) error { + c.Assert(s, jc.DeepEquals, &testStruct{123}) + return errUnauth + } + }, + req: &http.Request{}, + pathVar: httprouter.Params{{ + Key: "a", + Value: "123", + }}, + expectBody: httprequest.RemoteError{ + Message: errUnauth.Error(), + Code: "unauthorized", + }, + expectStatus: http.StatusUnauthorized, +}, { + about: "function with no Params with value return that returns a value", + f: func(c *gc.C) interface{} { + type testStruct struct { + A int `httprequest:"a,path"` + } + return func(s *testStruct) (int, error) { + c.Assert(s, jc.DeepEquals, &testStruct{123}) + return 1234, nil + } + }, + req: &http.Request{}, + pathVar: httprouter.Params{{ + Key: "a", + Value: "123", + }}, + expectBody: 1234, +}, { + about: "function with no Params with value return that returns an error", + f: func(c *gc.C) interface{} { + type testStruct struct { + A int `httprequest:"a,path"` + } + return func(s *testStruct) (int, error) { + c.Assert(s, jc.DeepEquals, &testStruct{123}) + return 0, errUnauth + } + }, + req: &http.Request{}, + pathVar: httprouter.Params{{ + Key: "a", + Value: "123", + }}, + expectBody: httprequest.RemoteError{ + Message: errUnauth.Error(), + Code: "unauthorized", + }, + expectStatus: http.StatusUnauthorized, +}, { + about: "error when unmarshaling", + f: func(c *gc.C) interface{} { + type testStruct struct { + A int `httprequest:"a,path"` + } + return func(p httprequest.Params, s *testStruct) (int, error) { + c.Errorf("function should not have been called") + return 0, nil + } + }, + req: &http.Request{}, + pathVar: httprouter.Params{{ + Key: "a", + Value: "not a number", + }}, + expectBody: httprequest.RemoteError{ + Message: `cannot unmarshal parameters: cannot unmarshal into field: cannot parse "not a number" into int: expected integer`, + Code: "bad request", + }, + expectStatus: http.StatusBadRequest, +}, { + about: "error when unmarshaling, no Params", + f: func(c *gc.C) interface{} { + type testStruct struct { + A int `httprequest:"a,path"` + } + return func(s *testStruct) (int, error) { + c.Errorf("function should not have been called") + return 0, nil + } + }, + req: &http.Request{}, + pathVar: httprouter.Params{{ + Key: "a", + Value: "not a number", + }}, + expectBody: httprequest.RemoteError{ + Message: `cannot unmarshal parameters: cannot unmarshal into field: cannot parse "not a number" into int: expected integer`, + Code: "bad request", + }, + expectStatus: http.StatusBadRequest, +}, { + about: "error when unmarshaling single value return", + f: func(c *gc.C) interface{} { + type testStruct struct { + A int `httprequest:"a,path"` + } + return func(p httprequest.Params, s *testStruct) error { + c.Errorf("function should not have been called") + return nil + } + }, + req: &http.Request{}, + pathVar: httprouter.Params{{ + Key: "a", + Value: "not a number", + }}, + expectBody: httprequest.RemoteError{ + Message: `cannot unmarshal parameters: cannot unmarshal into field: cannot parse "not a number" into int: expected integer`, + Code: "bad request", + }, + expectStatus: http.StatusBadRequest, +}, { + about: "return type that can't be marshaled as JSON", + f: func(c *gc.C) interface{} { + return func(p httprequest.Params, s *struct{}) (map[int]int, error) { + return map[int]int{0: 1}, nil + } + }, + req: &http.Request{}, + pathVar: httprouter.Params{}, + expectBody: httprequest.RemoteError{ + Message: "json: unsupported type: map[int]int", + }, + expectStatus: http.StatusInternalServerError, +}, { + about: "argument with route", + f: func(c *gc.C) interface{} { + type testStruct struct { + httprequest.Route `httprequest:"GET /foo/:bar"` + A string `httprequest:"bar,path"` + } + return func(s *testStruct) { + c.Check(s.A, gc.Equals, "val") + } + }, + req: &http.Request{}, + pathVar: httprouter.Params{{ + Key: "bar", + Value: "val", + }}, + expectMethod: "GET", + expectPath: "/foo/:bar", +}} + +func (*handlerSuite) TestHandle(c *gc.C) { + for i, test := range handleTests { + c.Logf("%d: %s", i, test.about) + h := errorMapper.Handle(test.f(c)) + c.Assert(h.Method, gc.Equals, test.expectMethod) + c.Assert(h.Path, gc.Equals, test.expectPath) + rec := httptest.NewRecorder() + h.Handle(rec, test.req, test.pathVar) + if test.expectStatus == 0 { + test.expectStatus = http.StatusOK + } + httptesting.AssertJSONResponse(c, rec, test.expectStatus, test.expectBody) + } +} + +var handlePanicTests = []struct { + f interface{} + expect string +}{{ + f: 42, + expect: "bad handler function: not a function", +}, { + f: func(httprequest.Params) {}, + expect: "bad handler function: no argument parameter after Params argument", +}, { + f: func(httprequest.Params, *struct{}, struct{}) {}, + expect: "bad handler function: has 3 parameters, need 1 or 2", +}, { + f: func(httprequest.Params, *struct{}) struct{} { return struct{}{} }, + expect: "bad handler function: final result parameter is struct {}, need error", +}, { + f: func(http.ResponseWriter, httprequest.Params) (struct{}, error) { + return struct{}{}, nil + }, + expect: "bad handler function: first argument is http.ResponseWriter, need httprequest.Params", +}, { + f: func(httprequest.Params, *struct{}) (struct{}, struct{}) { + return struct{}{}, struct{}{} + }, + expect: "bad handler function: final result parameter is struct {}, need error", +}, { + f: func(*http.Request, *struct{}) {}, + expect: `bad handler function: first argument is \*http.Request, need httprequest.Params`, +}, { + f: func(httprequest.Params, struct{}) {}, + expect: "bad handler function: last argument cannot be used for Unmarshal: type is not pointer to struct", +}, { + f: func(httprequest.Params, *struct { + A int `httprequest:"a,the-ether"` + }) { + }, + expect: `bad handler function: last argument cannot be used for Unmarshal: bad tag "httprequest:\\"a,the-ether\\"" in field A: unknown tag flag "the-ether"`, +}, { + f: func(httprequest.Params, *struct{}) (a, b, c struct{}) { return }, + expect: `bad handler function: has 3 result parameters, need 0, 1 or 2`, +}, { + f: func(*struct { + httprequest.Route + }) { + }, + expect: `bad handler function: last argument cannot be used for Unmarshal: bad route tag "": no httprequest tag`, +}, { + f: func(*struct { + httprequest.Route `othertag:"foo"` + }) { + }, + expect: `bad handler function: last argument cannot be used for Unmarshal: bad route tag "othertag:\\"foo\\"": no httprequest tag`, +}, { + f: func(*struct { + httprequest.Route `httprequest:""` + }) { + }, + expect: `bad handler function: last argument cannot be used for Unmarshal: bad route tag "httprequest:\\"\\"": no httprequest tag`, +}, { + f: func(*struct { + httprequest.Route `httprequest:"GET /foo /bar"` + }) { + }, + expect: `bad handler function: last argument cannot be used for Unmarshal: bad route tag "httprequest:\\"GET /foo /bar\\"": wrong field count`, +}, { + f: func(*struct { + httprequest.Route `httprequest:"BAD /foo"` + }) { + }, + expect: `bad handler function: last argument cannot be used for Unmarshal: bad route tag "httprequest:\\"BAD /foo\\"": invalid method`, +}} + +func (*handlerSuite) TestHandlePanicsWithBadFunctions(c *gc.C) { + for i, test := range handlePanicTests { + c.Logf("%d: %s", i, test.expect) + c.Check(func() { + errorMapper.Handle(test.f) + }, gc.PanicMatches, test.expect) + } +} + +var handlersTests = []struct { + calledMethod string + callParams httptesting.JSONCallParams +}{{ + calledMethod: "M1", + callParams: httptesting.JSONCallParams{ + URL: "/m1/99", + }, +}, { + calledMethod: "M2", + callParams: httptesting.JSONCallParams{ + URL: "/m2/99", + ExpectBody: 999, + }, +}, { + calledMethod: "M3", + callParams: httptesting.JSONCallParams{ + URL: "/m3/99", + ExpectBody: &httprequest.RemoteError{ + Message: "m3 error", + }, + ExpectStatus: http.StatusInternalServerError, + }, +}, { + calledMethod: "M3Post", + callParams: httptesting.JSONCallParams{ + Method: "POST", + URL: "/m3/99", + }, +}} + +func (*handlerSuite) TestHandlers(c *gc.C) { + handleVal := testHandlers{ + c: c, + } + f := func(p httprequest.Params) (*testHandlers, error) { + handleVal.p = p + return &handleVal, nil + } + handlers := errorMapper.Handlers(f) + handlers1 := make([]httprequest.Handler, len(handlers)) + copy(handlers1, handlers) + for i := range handlers1 { + handlers1[i].Handle = nil + } + expectHandlers := []httprequest.Handler{{ + Method: "GET", + Path: "/m1/:p", + }, { + Method: "GET", + Path: "/m2/:p", + }, { + Method: "GET", + Path: "/m3/:p", + }, { + Method: "POST", + Path: "/m3/:p", + }} + c.Assert(handlers1, jc.DeepEquals, expectHandlers) + c.Assert(handlersTests, gc.HasLen, len(expectHandlers)) + + router := httprouter.New() + for _, h := range handlers { + c.Logf("adding %s %s", h.Method, h.Path) + router.Handle(h.Method, h.Path, h.Handle) + } + for i, test := range handlersTests { + c.Logf("test %d: %s", i, test.calledMethod) + handleVal.calledMethod = "" + test.callParams.Handler = router + httptesting.AssertJSONCall(c, test.callParams) + c.Assert(handleVal.calledMethod, gc.Equals, test.calledMethod) + } +} + +type testHandlers struct { + calledMethod string + c *gc.C + p httprequest.Params +} + +func (h *testHandlers) M1(p httprequest.Params, arg *struct { + httprequest.Route `httprequest:"GET /m1/:p"` + P int `httprequest:"p,path"` +}) { + h.calledMethod = "M1" + h.c.Check(arg.P, gc.Equals, 99) + h.c.Check(p.Response, gc.Equals, h.p.Response) + h.c.Check(p.Request, gc.Equals, h.p.Request) + h.c.Check(p.PathVar, gc.DeepEquals, h.p.PathVar) +} + +func (h *testHandlers) M2(arg *struct { + httprequest.Route `httprequest:"GET /m2/:p"` + P int `httprequest:"p,path"` +}) (int, error) { + h.calledMethod = "M2" + h.c.Check(arg.P, gc.Equals, 99) + return 999, nil +} + +func (h *testHandlers) unexported() { +} + +func (h *testHandlers) M3(arg *struct { + httprequest.Route `httprequest:"GET /m3/:p"` + P int `httprequest:"p,path"` +}) (int, error) { + h.calledMethod = "M3" + h.c.Check(arg.P, gc.Equals, 99) + return 0, errgo.New("m3 error") +} + +func (h *testHandlers) M3Post(arg *struct { + httprequest.Route `httprequest:"POST /m3/:p"` + P int `httprequest:"p,path"` +}) { + h.calledMethod = "M3Post" + h.c.Check(arg.P, gc.Equals, 99) +} + +var badHandlersFuncTests = []struct { + f interface{} + expectPanic string +}{{ + f: 123, + expectPanic: "bad handler function: expected function, got int", +}, { + f: (func())(nil), + expectPanic: "bad handler function: function is nil", +}, { + f: func() {}, + expectPanic: "bad handler function: got 0 arguments, want 1", +}, { + f: func(http.ResponseWriter, *http.Request) {}, + expectPanic: "bad handler function: got 2 arguments, want 1", +}, { + f: func(httprequest.Params) {}, + expectPanic: "bad handler function: function returns 0 values, want 2", +}, { + f: func(httprequest.Params) string { return "" }, + expectPanic: "bad handler function: function returns 1 values, want 2", +}, { + f: func(httprequest.Params) (string, error, error) { return "", nil, nil }, + expectPanic: "bad handler function: function returns 3 values, want 2", +}, { + f: func(string) (string, error) { return "", nil }, + expectPanic: `bad handler function: invalid argument or return values, want func\(httprequest.Params\) \(any, error\), got func\(string\) \(string, error\)`, +}, { + f: func(httprequest.Params) (string, string) { return "", "" }, + expectPanic: `bad handler function: invalid argument or return values, want func\(httprequest.Params\) \(any, error\), got func\(httprequest.Params\) \(string, string\)`, +}, { + f: func(httprequest.Params) (string, error) { return "", nil }, + expectPanic: `no exported methods defined on string`, +}, { + f: func(httprequest.Params) (a badHandlersType1, b error) { return }, + expectPanic: `bad type for method M: has 3 parameters, need 1 or 2`, +}, { + f: func(httprequest.Params) (a badHandlersType2, b error) { return }, + expectPanic: `method M does not specify route method and path`, +}, { + f: func(httprequest.Params) (a badHandlersType3, b error) { return }, + expectPanic: `bad type for Close method \(got func\(httprequest_test\.badHandlersType3\) want func\(httprequest_test.badHandlersType3\) error`, +}} + +type badHandlersType1 struct{} + +func (badHandlersType1) M(a, b, c int) { +} + +type badHandlersType2 struct{} + +func (badHandlersType2) M(*struct { + P int `httprequest:",path"` +}) { +} + +type badHandlersType3 struct{} + +func (badHandlersType3) M(arg *struct { + httprequest.Route `httprequest:"GET /m1/:P"` + P int `httprequest:",path"` +}) { +} + +func (badHandlersType3) Close() { +} + +func (*handlerSuite) TestBadHandlersFunc(c *gc.C) { + for i, test := range badHandlersFuncTests { + c.Logf("test %d: %s", i, test.expectPanic) + c.Check(func() { + errorMapper.Handlers(test.f) + }, gc.PanicMatches, test.expectPanic) + } +} + +func (*handlerSuite) TestHandlersFuncReturningError(c *gc.C) { + handlers := errorMapper.Handlers(func(httprequest.Params) (*testHandlers, error) { + return nil, errgo.WithCausef(errgo.New("failure"), errUnauth, "something") + }) + router := httprouter.New() + for _, h := range handlers { + router.Handle(h.Method, h.Path, h.Handle) + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + URL: "/m1/p", + Handler: router, + ExpectStatus: http.StatusUnauthorized, + ExpectBody: &httprequest.RemoteError{ + Message: "something: failure", + Code: "unauthorized", + }, + }) +} + +type closeHandlersType struct { + p int + closed bool +} + +func (h *closeHandlersType) M(arg *struct { + httprequest.Route `httprequest:"GET /m1/:P"` + P int `httprequest:",path"` +}) { + h.p = arg.P +} + +func (h *closeHandlersType) Close() error { + h.closed = true + return nil +} + +func (*handlerSuite) TestHandlersWithTypeThatImplementsIOCloser(c *gc.C) { + var v closeHandlersType + handlers := errorMapper.Handlers(func(httprequest.Params) (*closeHandlersType, error) { + return &v, nil + }) + router := httprouter.New() + for _, h := range handlers { + router.Handle(h.Method, h.Path, h.Handle) + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + URL: "/m1/99", + Handler: router, + }) + c.Assert(v.closed, gc.Equals, true) + c.Assert(v.p, gc.Equals, 99) +} + +func (*handlerSuite) TestBadForm(c *gc.C) { + h := errorMapper.Handle(func(p httprequest.Params, _ *struct{}) { + c.Fatalf("shouldn't be called") + }) + testBadForm(c, h.Handle) +} + +func (*handlerSuite) TestBadFormNoParams(c *gc.C) { + h := errorMapper.Handle(func(_ *struct{}) { + c.Fatalf("shouldn't be called") + }) + testBadForm(c, h.Handle) +} + +func testBadForm(c *gc.C, h httprouter.Handle) { + rec := httptest.NewRecorder() + req := &http.Request{ + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/x-www-form-urlencoded"}, + }, + Body: body("%6"), + } + h(rec, req, httprouter.Params{}) + httptesting.AssertJSONResponse(c, rec, http.StatusBadRequest, httprequest.RemoteError{ + Message: `cannot parse HTTP request form: invalid URL escape "%6"`, + Code: "bad request", + }) +} + +func (*handlerSuite) TestToHTTP(c *gc.C) { + var h http.Handler + h = httprequest.ToHTTP(errorMapper.Handle(func(p httprequest.Params, s *struct{}) { + c.Assert(p.PathVar, gc.IsNil) + p.Response.WriteHeader(http.StatusOK) + }).Handle) + rec := httptest.NewRecorder() + req := &http.Request{ + Body: body(""), + } + h.ServeHTTP(rec, req) + c.Assert(rec.Code, gc.Equals, http.StatusOK) +} + +func (*handlerSuite) TestWriteJSON(c *gc.C) { + rec := httptest.NewRecorder() + type Number struct { + N int + } + err := httprequest.WriteJSON(rec, http.StatusTeapot, Number{1234}) + c.Assert(err, gc.IsNil) + c.Assert(rec.Code, gc.Equals, http.StatusTeapot) + c.Assert(rec.Body.String(), gc.Equals, `{"N":1234}`) + c.Assert(rec.Header().Get("content-type"), gc.Equals, "application/json") +} + +var ( + errUnauth = errors.New("unauth") + errBadReq = errors.New("bad request") + errOther = errors.New("other") + errCustomHeaders = errors.New("custom headers") + errNil = errors.New("nil result") +) + +type HeaderNumber struct { + N int +} + +func (HeaderNumber) SetHeader(h http.Header) { + h.Add("some-custom-header", "yes") +} + +func (*handlerSuite) TestSetHeader(c *gc.C) { + rec := httptest.NewRecorder() + err := httprequest.WriteJSON(rec, http.StatusTeapot, HeaderNumber{1234}) + c.Assert(err, gc.IsNil) + c.Assert(rec.Code, gc.Equals, http.StatusTeapot) + c.Assert(rec.Body.String(), gc.Equals, `{"N":1234}`) + c.Assert(rec.Header().Get("content-type"), gc.Equals, "application/json") + c.Assert(rec.Header().Get("some-custom-header"), gc.Equals, "yes") +} + +func (*handlerSuite) TestSetHeaderOnErrorMapper(c *gc.C) { + +} + +var errorMapper httprequest.ErrorMapper = func(err error) (int, interface{}) { + resp := &httprequest.RemoteError{ + Message: err.Error(), + } + status := http.StatusInternalServerError + switch errgo.Cause(err) { + case errUnauth: + status = http.StatusUnauthorized + resp.Code = "unauthorized" + case errBadReq, httprequest.ErrUnmarshal: + status = http.StatusBadRequest + resp.Code = "bad request" + case errCustomHeaders: + return http.StatusNotAcceptable, httprequest.CustomHeader{ + Body: resp, + SetHeaderFunc: func(h http.Header) { + h.Set("Acceptability", "not at all") + }, + } + case errNil: + return status, nil + } + return status, &resp +} + +var writeErrorTests = []struct { + err error + expectStatus int + expectResp *httprequest.RemoteError + expectHeader http.Header +}{{ + err: errUnauth, + expectStatus: http.StatusUnauthorized, + expectResp: &httprequest.RemoteError{ + Message: errUnauth.Error(), + Code: "unauthorized", + }, +}, { + err: errBadReq, + expectStatus: http.StatusBadRequest, + expectResp: &httprequest.RemoteError{ + Message: errBadReq.Error(), + Code: "bad request", + }, +}, { + err: errOther, + expectStatus: http.StatusInternalServerError, + expectResp: &httprequest.RemoteError{ + Message: errOther.Error(), + }, +}, { + err: errNil, + expectStatus: http.StatusInternalServerError, +}, { + err: errCustomHeaders, + expectStatus: http.StatusNotAcceptable, + expectResp: &httprequest.RemoteError{ + Message: errCustomHeaders.Error(), + }, + expectHeader: http.Header{ + "Acceptability": {"not at all"}, + }, +}} + +func (s *handlerSuite) TestWriteError(c *gc.C) { + for i, test := range writeErrorTests { + c.Logf("%d: %s", i, test.err) + rec := httptest.NewRecorder() + errorMapper.WriteError(rec, test.err) + resp := parseErrorResponse(c, rec.Body.Bytes()) + c.Assert(resp, gc.DeepEquals, test.expectResp) + c.Assert(rec.Code, gc.Equals, test.expectStatus) + for name, vals := range test.expectHeader { + c.Assert(rec.HeaderMap[name], jc.DeepEquals, vals) + } + } +} + +func parseErrorResponse(c *gc.C, body []byte) *httprequest.RemoteError { + var errResp *httprequest.RemoteError + err := json.Unmarshal(body, &errResp) + c.Assert(err, gc.IsNil) + return errResp +} + +func (s *handlerSuite) TestHandleErrors(c *gc.C) { + req := new(http.Request) + params := httprouter.Params{} + // Test when handler returns an error. + handler := errorMapper.HandleErrors(func(p httprequest.Params) error { + c.Assert(p.Request, jc.DeepEquals, req) + c.Assert(p.PathVar, jc.DeepEquals, params) + return errUnauth + }) + rec := httptest.NewRecorder() + handler(rec, req, params) + c.Assert(rec.Code, gc.Equals, http.StatusUnauthorized) + resp := parseErrorResponse(c, rec.Body.Bytes()) + c.Assert(resp, gc.DeepEquals, &httprequest.RemoteError{ + Message: errUnauth.Error(), + Code: "unauthorized", + }) + + // Test when handler returns nil. + handler = errorMapper.HandleErrors(func(p httprequest.Params) error { + c.Assert(p.Request, jc.DeepEquals, req) + c.Assert(p.PathVar, jc.DeepEquals, params) + p.Response.WriteHeader(http.StatusCreated) + p.Response.Write([]byte("something")) + return nil + }) + rec = httptest.NewRecorder() + handler(rec, req, params) + c.Assert(rec.Code, gc.Equals, http.StatusCreated) + c.Assert(rec.Body.String(), gc.Equals, "something") +} + +var handleErrorsWithErrorAfterWriteHeaderTests = []struct { + about string + causeWriteHeader func(w http.ResponseWriter) +}{{ + about: "write", + causeWriteHeader: func(w http.ResponseWriter) { + w.Write([]byte("")) + }, +}, { + about: "write header", + causeWriteHeader: func(w http.ResponseWriter) { + w.WriteHeader(http.StatusOK) + }, +}, { + about: "flush", + causeWriteHeader: func(w http.ResponseWriter) { + w.(http.Flusher).Flush() + }, +}} + +func (s *handlerSuite) TestHandleErrorsWithErrorAfterWriteHeader(c *gc.C) { + for i, test := range handleErrorsWithErrorAfterWriteHeaderTests { + c.Logf("test %d: %s", i, test.about) + handler := errorMapper.HandleErrors(func(p httprequest.Params) error { + test.causeWriteHeader(p.Response) + return errgo.New("unexpected") + }) + rec := httptest.NewRecorder() + handler(rec, new(http.Request), nil) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.Equals, "") + } +} + +func (s *handlerSuite) TestHandleJSON(c *gc.C) { + req := new(http.Request) + params := httprouter.Params{} + // Test when handler returns an error. + handler := errorMapper.HandleJSON(func(p httprequest.Params) (interface{}, error) { + c.Assert(p.Request, jc.DeepEquals, req) + c.Assert(p.PathVar, jc.DeepEquals, params) + return nil, errUnauth + }) + rec := httptest.NewRecorder() + handler(rec, new(http.Request), params) + resp := parseErrorResponse(c, rec.Body.Bytes()) + c.Assert(resp, gc.DeepEquals, &httprequest.RemoteError{ + Message: errUnauth.Error(), + Code: "unauthorized", + }) + c.Assert(rec.Code, gc.Equals, http.StatusUnauthorized) + + // Test when handler returns a body. + handler = errorMapper.HandleJSON(func(p httprequest.Params) (interface{}, error) { + c.Assert(p.Request, jc.DeepEquals, req) + c.Assert(p.PathVar, jc.DeepEquals, params) + p.Response.Header().Set("Some-Header", "value") + return "something", nil + }) + rec = httptest.NewRecorder() + handler(rec, req, params) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.Equals, `"something"`) + c.Assert(rec.Header().Get("Some-Header"), gc.Equals, "value") +} === added file 'src/github.com/juju/httprequest/marshal.go' --- src/github.com/juju/httprequest/marshal.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/marshal.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,334 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package httprequest + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/url" + "reflect" + "strings" + + "github.com/julienschmidt/httprouter" + "gopkg.in/errgo.v1" +) + +var emptyReader = bytes.NewReader(nil) + +// Marshal is the counterpart of Unmarshal. It takes information from +// x, which must be a pointer to a struct, and returns an HTTP request +// using the given method that holds all of the information. +// +// The Body field in the returned request will always be of type +// BytesReaderCloser. +// +// If x implements the HeaderSetter interface, its SetHeader method will +// be called to add additional headers to the HTTP request after it has +// been marshaled. If x is pointer to a CustomHeader object then Marshal will use +// its Body member to create the HTTP request. +// +// The HTTP request will use the given method. Named fields in the given +// baseURL will be filled out from "path"-tagged fields in x to form the +// URL path in the returned request. These are specified as for httprouter. +// +// If a field in baseURL is a suffix of the form "*var" (a trailing wildcard element +// that holds the rest of the path), the marshaled string must begin with a "/". +// This matches the httprouter convention that it always returns such fields +// with a "/" prefix. +// +// If a field is of type string or []string, the value of the field will +// be used directly; otherwise if implements encoding.TextMarshaler, that +// will be used to marshal the field, otherwise fmt.Sprint will be used. +// +// For example, this code: +// +// type UserDetails struct { +// Age int +// } +// +// type Test struct { +// Username string `httprequest:"user,path"` +// ContextId int64 `httprequest:"context,form"` +// Details UserDetails `httprequest:",body"` +// } +// req, err := Marshal("GET", "http://example.com/users/:user/details", &Test{ +// Username: "bob", +// ContextId: 1234, +// Details: UserDetails{ +// Age: 36, +// } +// }) +// if err != nil { +// ... +// } +// +// will produce an HTTP request req with a URL of +// http://example.com/users/bob/details?context=1234 and a JSON-encoded +// body holding `{"Age":36}`. +// +// It is an error if there is a field specified in the URL that is not +// found in x. +func Marshal(baseURL, method string, x interface{}) (*http.Request, error) { + var xv reflect.Value + if ch, ok := x.(*CustomHeader); ok { + xv = reflect.ValueOf(ch.Body) + } else { + xv = reflect.ValueOf(x) + } + pt, err := getRequestType(xv.Type()) + if err != nil { + return nil, errgo.WithCausef(err, ErrBadUnmarshalType, "bad type %s", xv.Type()) + } + req, err := http.NewRequest(method, baseURL, BytesReaderCloser{emptyReader}) + if err != nil { + return nil, errgo.Mask(err) + } + req.Form = url.Values{} + p := &Params{ + Request: req, + } + if err := marshal(p, xv, pt); err != nil { + return nil, errgo.Mask(err, errgo.Is(ErrUnmarshal)) + } + if headerSetter, ok := x.(HeaderSetter); ok { + headerSetter.SetHeader(p.Request.Header) + } + return p.Request, nil +} + +// marshal is the internal version of Marshal. +func marshal(p *Params, xv reflect.Value, pt *requestType) error { + xv = xv.Elem() + for _, f := range pt.fields { + fv := xv.FieldByIndex(f.index) + if f.isPointer { + if fv.IsNil() { + continue + } + fv = fv.Elem() + } + // TODO store the field name in the field so + // that we can produce a nice error message. + if err := f.marshal(fv, p); err != nil { + return errgo.WithCausef(err, ErrUnmarshal, "cannot marshal field") + } + } + path, err := buildPath(p.Request.URL.Path, p.PathVar) + if err != nil { + return errgo.Mask(err) + } + p.Request.URL.Path = path + if q := p.Request.Form.Encode(); q != "" && p.Request.URL.RawQuery != "" { + p.Request.URL.RawQuery += "&" + q + } else { + p.Request.URL.RawQuery += q + } + return nil +} + +func buildPath(path string, p httprouter.Params) (string, error) { + pathBytes := make([]byte, 0, len(path)*2) + for { + s, rest := nextPathSegment(path) + if s == "" { + break + } + if s[0] != ':' && s[0] != '*' { + pathBytes = append(pathBytes, s...) + path = rest + continue + } + if s[0] == '*' && rest != "" { + return "", errgo.New("star path parameter is not at end of path") + } + if len(s) == 1 { + return "", errgo.New("empty path parameter") + } + val := p.ByName(s[1:]) + if val == "" { + return "", errgo.Newf("missing value for path parameter %q", s[1:]) + } + if s[0] == '*' { + if !strings.HasPrefix(val, "/") { + return "", errgo.Newf("value %q for path parameter %q does not start with required /", val, s) + } + val = val[1:] + } + pathBytes = append(pathBytes, val...) + path = rest + } + return string(pathBytes), nil +} + +// nextPathSegment returns the next wildcard or constant +// segment of the given path and everything after that +// segment. +func nextPathSegment(s string) (string, string) { + if s == "" { + return "", "" + } + if s[0] == ':' || s[0] == '*' { + if i := strings.Index(s, "/"); i != -1 { + return s[0:i], s[i:] + } + return s, "" + } + if i := strings.IndexAny(s, ":*"); i != -1 { + return s[0:i], s[i:] + } + return s, "" +} + +// getMarshaler returns a marshaler function suitable for marshaling +// a field with the given tag into and http request. +func getMarshaler(tag tag, t reflect.Type) (marshaler, error) { + switch { + case tag.source == sourceNone: + return marshalNop, nil + case tag.source == sourceBody: + return marshalBody, nil + case t == reflect.TypeOf([]string(nil)): + switch tag.source { + default: + return nil, errgo.New("invalid target type []string for path parameter") + case sourceForm: + return marshalAllField(tag.name), nil + case sourceHeader: + return marshalAllHeader(tag.name), nil + } + case t == reflect.TypeOf(""): + return marshalString(tag), nil + case implementsTextMarshaler(t): + return marshalWithMarshalText(t, tag), nil + default: + return marshalWithSprint(tag), nil + } +} + +// marshalNop does nothing with the value. +func marshalNop(v reflect.Value, p *Params) error { + return nil +} + +// mashalBody marshals the specified value into the body of the http request. +func marshalBody(v reflect.Value, p *Params) error { + // TODO allow body types that aren't necessarily JSON. + data, err := json.Marshal(v.Addr().Interface()) + if err != nil { + return errgo.Notef(err, "cannot marshal request body") + } + p.Request.Body = BytesReaderCloser{bytes.NewReader(data)} + p.Request.ContentLength = int64(len(data)) + p.Request.Header.Set("Content-Type", "application/json") + return nil +} + +// marshalAllField marshals a []string slice into form fields. +func marshalAllField(name string) marshaler { + return func(v reflect.Value, p *Params) error { + p.Request.Form[name] = v.Interface().([]string) + return nil + } +} + +// marshalAllHeader marshals a []string slice into a header. +func marshalAllHeader(name string) marshaler { + return func(v reflect.Value, p *Params) error { + p.Request.Header[name] = v.Interface().([]string) + return nil + } +} + +// marshalString marshals s string field. +func marshalString(tag tag) marshaler { + formSet := formSetters[tag.source] + if formSet == nil { + panic("unexpected source") + } + return func(v reflect.Value, p *Params) error { + formSet(tag.name, v.String(), p) + return nil + } +} + +// encodingTextMarshaler is the same as encoding.TextUnmarshaler +// but avoids us importing the encoding package, which some +// broken gccgo installations do not allow. +// TODO remove this and use encoding.TextMarshaler instead. +type encodingTextMarshaler interface { + MarshalText() (text []byte, err error) +} + +var textMarshalerType = reflect.TypeOf((*encodingTextMarshaler)(nil)).Elem() + +func implementsTextMarshaler(t reflect.Type) bool { + // Use the pointer type, because a pointer + // type will implement a superset of the methods + // of a non-pointer type. + return reflect.PtrTo(t).Implements(textMarshalerType) +} + +// marshalWithMarshalText returns a marshaler +// that marshals the given type from the given tag +// using its MarshalText method. +func marshalWithMarshalText(t reflect.Type, tag tag) marshaler { + formSet := formSetters[tag.source] + if formSet == nil { + panic("unexpected source") + } + return func(v reflect.Value, p *Params) error { + m := v.Addr().Interface().(encodingTextMarshaler) + data, err := m.MarshalText() + if err != nil { + return errgo.Mask(err) + } + formSet(tag.name, string(data), p) + + return nil + } +} + +// marshalWithScan returns an marshaler +// that unmarshals the given tag using fmt.Sprint. +func marshalWithSprint(tag tag) marshaler { + formSet := formSetters[tag.source] + if formSet == nil { + panic("unexpected source") + } + return func(v reflect.Value, p *Params) error { + valueString := fmt.Sprint(v.Interface()) + + formSet(tag.name, valueString, p) + + return nil + } +} + +// formSetters maps from source to a function that +// sets the value for a given key. +var formSetters = []func(string, string, *Params){ + sourceForm: func(name, value string, p *Params) { + p.Request.Form.Set(name, value) + }, + sourcePath: func(name, value string, p *Params) { + p.PathVar = append(p.PathVar, httprouter.Param{Key: name, Value: value}) + }, + sourceBody: nil, + sourceHeader: func(name, value string, p *Params) { + p.Request.Header.Set(name, value) + }, +} + +// BytesReaderCloser is a bytes.Reader which +// implements io.Closer with a no-op Close method. +type BytesReaderCloser struct { + *bytes.Reader +} + +// Close implements io.Closer.Close. +func (BytesReaderCloser) Close() error { + return nil +} === added file 'src/github.com/juju/httprequest/marshal_test.go' --- src/github.com/juju/httprequest/marshal_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/marshal_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,431 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package httprequest_test + +import ( + "io/ioutil" + "net/http" + + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + + "github.com/juju/httprequest" +) + +type marshalSuite struct{} + +var _ = gc.Suite(&marshalSuite{}) + +type embedded struct { + F1 string `json:"name"` + F2 int `json:"age"` + F3 *string `json:"address"` +} + +var marshalTests = []struct { + about string + urlString string + method string + val interface{} + expectURLString string + expectBody *string + expectHeader http.Header + expectError string +}{{ + about: "struct with simple fields", + urlString: "http://localhost:8081/:F1", + val: &struct { + F1 int `httprequest:",path"` + F2 string `httprequest:",form"` + }{ + F1: 99, + F2: "some text", + }, + expectURLString: "http://localhost:8081/99?F2=some+text", +}, { + about: "struct with renamed fields", + urlString: "http://localhost:8081/:name", + val: &struct { + F1 string `httprequest:"name,path"` + F2 int `httprequest:"age,form"` + }{ + F1: "some random user", + F2: 42, + }, + expectURLString: "http://localhost:8081/some%20random%20user?age=42", +}, { + about: "fields without httprequest tags are ignored", + urlString: "http://localhost:8081/:name", + val: &struct { + F1 string `httprequest:"name,path"` + F2 int `httprequest:"age,form"` + F3 string + }{ + F1: "some random user", + F2: 42, + F3: "some more random text", + }, + expectURLString: "http://localhost:8081/some%20random%20user?age=42", +}, { + about: "pointer fields are correctly handled", + urlString: "http://localhost:8081/:name", + val: &struct { + F1 *string `httprequest:"name,path"` + F2 *string `httprequest:"age,form"` + F3 *string `httprequest:"address,form"` + }{ + F1: newString("some random user"), + F2: newString("42"), + }, + expectURLString: "http://localhost:8081/some%20random%20user?age=42", +}, { + about: "MarshalText called on TextMarshalers", + urlString: "http://localhost:8081/:param1/:param2", + val: &struct { + F1 testMarshaler `httprequest:"param1,path"` + F2 *testMarshaler `httprequest:"param2,path"` + F3 testMarshaler `httprequest:"param3,form"` + F4 *testMarshaler `httprequest:"param4,form"` + }{ + F1: "test1", + F2: (*testMarshaler)(newString("test2")), + F3: "test3", + F4: (*testMarshaler)(newString("test4")), + }, + expectURLString: "http://localhost:8081/test_test1/test_test2?param3=test_test3¶m4=test_test4", +}, { + about: "MarshalText not called on values that do not implement TextMarshaler", + urlString: "http://localhost:8081/user/:name/:surname", + val: &struct { + F1 notTextMarshaler `httprequest:"name,path"` + F2 *notTextMarshaler `httprequest:"surname,path"` + }{ + F1: "name", + F2: (*notTextMarshaler)(newString("surname")), + }, + expectURLString: "http://localhost:8081/user/name/surname", +}, { + about: "MarshalText returns an error", + urlString: "http://localhost:8081/user/:name/:surname", + val: &struct { + F1 testMarshaler `httprequest:"name,path"` + F2 *testMarshaler `httprequest:"surname,path"` + }{ + F1: "", + F2: (*testMarshaler)(newString("surname")), + }, + expectError: "cannot marshal field: empty string", +}, { + about: "[]string field form value", + urlString: "http://localhost:8081/user", + val: &struct { + F1 []string `httprequest:"users,form"` + }{ + F1: []string{"user1", "user2", "user3"}, + }, + expectURLString: "http://localhost:8081/user?users=user1&users=user2&users=user3", +}, { + about: "nil []string field form value", + urlString: "http://localhost:8081/user", + val: &struct { + F1 *[]string `httprequest:"users,form"` + }{ + F1: nil, + }, + expectURLString: "http://localhost:8081/user", +}, { + about: "cannot marshal []string field to path", + urlString: "http://localhost:8081/:users", + val: &struct { + F1 []string `httprequest:"users,path"` + }{ + F1: []string{"user1", "user2"}, + }, + expectError: `bad type \*struct { F1 \[\]string "httprequest:\\"users,path\\"" }: invalid target type \[\]string for path parameter`, +}, { + about: "[]string field fails to marshal to path", + urlString: "http://localhost:8081/user/:users", + val: &struct { + F1 []string `httprequest:"users,path"` + }{ + F1: []string{"user1", "user2", "user3"}, + }, + expectError: "bad type .*: invalid target type.*", +}, { + about: "more than one field with body tag", + urlString: "http://localhost:8081/user", + method: "POST", + val: &struct { + F1 string `httprequest:"user,body"` + F2 int `httprequest:"age,body"` + }{ + F1: "test user", + F2: 42, + }, + expectError: "bad type .*: more than one body field specified", +}, { + about: "required path parameter, but not specified", + urlString: "http://localhost:8081/u/:username", + method: "POST", + val: &struct { + F1 string `httprequest:"user,body"` + }{ + F1: "test user", + }, + expectError: `missing value for path parameter "username"`, +}, { + about: "marshal to body", + urlString: "http://localhost:8081/u", + method: "POST", + val: &struct { + F1 embedded `httprequest:"info,body"` + }{ + F1: embedded{ + F1: "test user", + F2: 42, + F3: newString("test address"), + }, + }, + expectBody: newString(`{"name":"test user","age":42,"address":"test address"}`), +}, { + about: "empty path wildcard", + urlString: "http://localhost:8081/u/:", + method: "POST", + val: &struct { + F1 string `httprequest:"user,body"` + }{ + F1: "test user", + }, + expectError: "empty path parameter", +}, { + about: "nil field to form", + urlString: "http://localhost:8081/u", + val: &struct { + F1 *string `httprequest:"user,form"` + }{}, + expectURLString: "http://localhost:8081/u", +}, { + about: "nil field to path", + urlString: "http://localhost:8081/u", + val: &struct { + F1 *string `httprequest:"user,path"` + }{}, + expectURLString: "http://localhost:8081/u", +}, { + about: "marshal to body of a GET request", + urlString: "http://localhost:8081/u", + val: &struct { + F1 string `httprequest:",body"` + }{ + F1: "hello test", + }, + // Providing a body to a GET request is unusual but + // some people do it anyway. + + expectBody: newString(`"hello test"`), +}, { + about: "marshal to nil value to body", + urlString: "http://localhost:8081/u", + val: &struct { + F1 *string `httprequest:",body"` + }{ + F1: nil, + }, + expectBody: newString(""), +}, { + about: "nil TextMarshaler", + urlString: "http://localhost:8081/u", + val: &struct { + F1 *testMarshaler `httprequest:"surname,form"` + }{ + F1: (*testMarshaler)(nil), + }, + expectURLString: "http://localhost:8081/u", +}, { + about: "marshal nil with Sprint", + urlString: "http://localhost:8081/u", + val: &struct { + F1 *int `httprequest:"surname,form"` + }{ + F1: (*int)(nil), + }, + expectURLString: "http://localhost:8081/u", +}, { + about: "marshal to path with * placeholder", + urlString: "http://localhost:8081/u/*name", + val: &struct { + F1 string `httprequest:"name,path"` + }{ + F1: "/test", + }, + expectURLString: "http://localhost:8081/u/test", +}, { + about: "marshal to path with * placeholder, but the marshaled value does not start with /", + urlString: "http://localhost:8081/u/*name", + val: &struct { + F1 string `httprequest:"name,path"` + }{ + F1: "test", + }, + expectError: `value \"test\" for path parameter \"\*name\" does not start with required /`, +}, { + about: "* placeholder allowed only at the end", + urlString: "http://localhost:8081/u/*name/document", + val: &struct { + F1 string `httprequest:"name,path"` + }{ + F1: "test", + }, + expectError: "star path parameter is not at end of path", +}, { + about: "unparsable base url string", + urlString: "%%", + val: &struct { + F1 string `httprequest:"name,form"` + }{ + F1: "test", + }, + expectError: `parse %%: invalid URL escape \"%%\"`, +}, { + about: "value cannot be marshaled to json", + urlString: "http://localhost", + method: "POST", + val: &struct { + F1 failJSONMarshaler `httprequest:"field,body"` + }{ + F1: "test", + }, + expectError: `cannot marshal field: cannot marshal request body: json: error calling MarshalJSON for type \*httprequest_test.failJSONMarshaler: marshal error`, +}, { + about: "url with query parameters", + urlString: "http://localhost?a=b", + method: "POST", + val: &struct { + F1 failJSONMarshaler `httprequest:"f1,form"` + }{ + F1: "test", + }, + expectURLString: "http://localhost?a=b&f1=test", +}, { + about: "url with query parameters no form", + urlString: "http://localhost?a=b", + method: "POST", + val: &struct{}{}, + expectURLString: "http://localhost?a=b", +}, { + about: "struct with headers", + urlString: "http://localhost:8081/", + val: &struct { + F1 string `httprequest:",header"` + F2 int `httprequest:",header"` + F3 bool `httprequest:",header"` + }{ + F1: "some text", + F2: 99, + F3: true, + }, + expectURLString: "http://localhost:8081/", + expectHeader: http.Header{ + "F1": []string{"some text"}, + "F2": []string{"99"}, + "F3": []string{"true"}, + }, +}, { + about: "struct with header slice", + urlString: "http://localhost:8081/:F1", + val: &struct { + F1 int `httprequest:",path"` + F2 string `httprequest:",form"` + F3 []string `httprequest:",header"` + }{ + F1: 99, + F2: "some text", + F3: []string{"A", "B", "C"}, + }, + expectURLString: "http://localhost:8081/99?F2=some+text", + expectHeader: http.Header{"F3": []string{"A", "B", "C"}}, +}, { + about: "SetHeader called after marshaling", + urlString: "http://localhost:8081/", + val: &httprequest.CustomHeader{ + Body: &struct { + F1 string `httprequest:",header"` + F2 int `httprequest:",header"` + F3 bool `httprequest:",header"` + }{ + F1: "some text", + F2: 99, + F3: false, + }, + SetHeaderFunc: func(h http.Header) { + h.Set("F2", "some other text") + }, + }, + expectURLString: "http://localhost:8081/", + expectHeader: http.Header{ + "F1": []string{"some text"}, + "F2": []string{"some other text"}, + "F3": []string{"false"}, + }, +}} + +func getStruct() interface{} { + return &struct { + F1 string + }{ + F1: "hello", + } +} + +func (*marshalSuite) TestMarshal(c *gc.C) { + for i, test := range marshalTests { + c.Logf("%d: %s", i, test.about) + method := "GET" + if test.method != "" { + method = test.method + } + req, err := httprequest.Marshal(test.urlString, method, test.val) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + continue + } + c.Assert(err, gc.IsNil) + if test.expectURLString != "" { + c.Assert(req.URL.String(), gc.DeepEquals, test.expectURLString) + } + if test.expectBody != nil { + data, err := ioutil.ReadAll(req.Body) + c.Assert(err, gc.IsNil) + if *test.expectBody != "" { + c.Assert(req.Header.Get("Content-Type"), gc.Equals, "application/json") + } + c.Assert(string(data), gc.DeepEquals, *test.expectBody) + } + for k, v := range test.expectHeader { + c.Assert(req.Header[k], gc.DeepEquals, v) + } + } +} + +type testMarshaler string + +func (t *testMarshaler) MarshalText() ([]byte, error) { + if len(*t) == 0 { + return nil, errgo.New("empty string") + } + return []byte("test_" + *t), nil +} + +type notTextMarshaler string + +// MarshalText does *not* implement encoding.TextMarshaler +func (t *notTextMarshaler) MarshalText() { + panic("unexpected call") +} + +type failJSONMarshaler string + +func (*failJSONMarshaler) MarshalJSON() ([]byte, error) { + return nil, errgo.New("marshal error") +} === added file 'src/github.com/juju/httprequest/package_test.go' --- src/github.com/juju/httprequest/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package httprequest_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/httprequest/type.go' --- src/github.com/juju/httprequest/type.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/type.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,353 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// Package httprequest provides functionality for unmarshaling +// HTTP request parameters into a struct type. +// +// Please note that the API is not considered stable at this +// point and may be changed in a backwardly incompatible +// manner at any time. +package httprequest + +import ( + "fmt" + "net/http" + "reflect" + "sort" + "strings" + "sync" + + "github.com/julienschmidt/httprouter" + "gopkg.in/errgo.v1" +) + +// TODO include field name and source in error messages. + +var ( + typeMutex sync.RWMutex + typeMap = make(map[reflect.Type]*requestType) +) + +// Route is the type of a field that specifies a routing +// path and HTTP method. See Marshal and Unmarshal +// for details. +type Route struct{} + +// Params holds the parameters provided to an HTTP request. +type Params struct { + Response http.ResponseWriter + Request *http.Request + PathVar httprouter.Params +} + +// resultMaker is provided to the unmarshal functions. +// When called with the value passed to the unmarshaler, +// it returns the field value to be assigned to, +// creating it if necessary. +type resultMaker func(reflect.Value) reflect.Value + +// unmarshaler unmarshals some value from params into +// the given value. The value should not be assigned to directly, +// but passed to makeResult and then updated. +type unmarshaler func(v reflect.Value, p Params, makeResult resultMaker) error + +// marshaler marshals the specified value into params. +// The value is always the value type, even if the field type +// is a pointer. +type marshaler func(reflect.Value, *Params) error + +// requestType holds information derived from a request +// type, preprocessed so that it's quick to unmarshal. +type requestType struct { + method string + path string + fields []field +} + +// field holds preprocessed information on an individual field +// in the result. +type field struct { + // index holds the index slice of the field. + index []int + + // unmarshal is used to unmarshal the value into + // the given field. The value passed as its first + // argument is not a pointer type, but is addressable. + unmarshal unmarshaler + + // marshal is used to marshal the value into the + // give filed. The value passed as its first argument is not + // a pointer type, but it is addressable. + marshal marshaler + + // makeResult is the resultMaker that will be + // passed into the unmarshaler. + makeResult resultMaker + + // isPointer is true if the field is pointer to the underlying type. + isPointer bool +} + +// getRequestType is like parseRequestType except that +// it returns the cached requestType when possible, +// adding the type to the cache otherwise. +func getRequestType(t reflect.Type) (*requestType, error) { + typeMutex.RLock() + pt := typeMap[t] + typeMutex.RUnlock() + if pt != nil { + return pt, nil + } + typeMutex.Lock() + defer typeMutex.Unlock() + if pt = typeMap[t]; pt != nil { + // The type has been parsed after we dropped + // the read lock, so use it. + return pt, nil + } + pt, err := parseRequestType(t) + if err != nil { + return nil, errgo.Mask(err) + } + typeMap[t] = pt + return pt, nil +} + +// parseRequestType preprocesses the given type +// into a form that can be efficiently interpreted +// by Unmarshal. +func parseRequestType(t reflect.Type) (*requestType, error) { + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return nil, fmt.Errorf("type is not pointer to struct") + } + + hasBody := false + var pt requestType + foundRoute := false + for _, f := range fields(t.Elem()) { + if f.PkgPath != "" { + // Ignore unexported fields (note that this + // does not apply to anonymous fields). + continue + } + if !foundRoute && f.Anonymous && f.Type == reflect.TypeOf(Route{}) { + var err error + pt.method, pt.path, err = parseRouteTag(f.Tag) + if err != nil { + return nil, errgo.Notef(err, "bad route tag %q", f.Tag) + } + foundRoute = true + continue + } + tag, err := parseTag(f.Tag, f.Name) + if err != nil { + return nil, errgo.Notef(err, "bad tag %q in field %s", f.Tag, f.Name) + } + if tag.source == sourceBody { + if hasBody { + return nil, errgo.New("more than one body field specified") + } + hasBody = true + } + field := field{ + index: f.Index, + } + if f.Type.Kind() == reflect.Ptr { + // The field is a pointer, so when the value is set, + // we need to create a new pointer to put + // it into. + field.makeResult = makePointerResult + field.isPointer = true + f.Type = f.Type.Elem() + } else { + field.makeResult = makeValueResult + field.isPointer = false + } + + field.unmarshal, err = getUnmarshaler(tag, f.Type) + if err != nil { + return nil, errgo.Mask(err) + } + + field.marshal, err = getMarshaler(tag, f.Type) + if err != nil { + return nil, errgo.Mask(err) + } + + if f.Anonymous { + if tag.source != sourceBody && tag.source != sourceNone { + return nil, errgo.New("httprequest tag not yet supported on anonymous fields") + } + } + pt.fields = append(pt.fields, field) + } + return &pt, nil +} + +// Note: we deliberately omit HEAD and OPTIONS +// from this list. HEAD will be routed through GET handlers +// and OPTIONS is handled separately. +var validMethod = map[string]bool{ + "PUT": true, + "POST": true, + "DELETE": true, + "GET": true, + "PATCH": true, +} + +func parseRouteTag(tag reflect.StructTag) (method, path string, err error) { + tagStr := tag.Get("httprequest") + if tagStr == "" { + return "", "", errgo.New("no httprequest tag") + } + f := strings.Fields(tagStr) + switch len(f) { + case 2: + path = f[1] + fallthrough + case 1: + method = f[0] + default: + return "", "", errgo.New("wrong field count") + } + if !validMethod[method] { + return "", "", errgo.Newf("invalid method") + } + // TODO check that path looks valid + return method, path, nil +} + +func makePointerResult(v reflect.Value) reflect.Value { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return v.Elem() +} + +func makeValueResult(v reflect.Value) reflect.Value { + return v +} + +type tagSource uint8 + +const ( + sourceNone = iota + sourcePath + sourceForm + sourceBody + sourceHeader +) + +type tag struct { + name string + source tagSource +} + +// parseTag parses the given struct tag attached to the given +// field name into a tag structure. +func parseTag(rtag reflect.StructTag, fieldName string) (tag, error) { + t := tag{ + name: fieldName, + } + tagStr := rtag.Get("httprequest") + if tagStr == "" { + return t, nil + } + fields := strings.Split(tagStr, ",") + if fields[0] != "" { + t.name = fields[0] + } + for _, f := range fields[1:] { + switch f { + case "path": + t.source = sourcePath + case "form": + t.source = sourceForm + case "body": + t.source = sourceBody + case "header": + t.source = sourceHeader + default: + return tag{}, fmt.Errorf("unknown tag flag %q", f) + } + } + return t, nil +} + +// fields returns all the fields in the given struct type +// including fields inside anonymous struct members. +// The fields are ordered with top level fields first +// followed by the members of those fields +// for anonymous fields. +func fields(t reflect.Type) []reflect.StructField { + byName := make(map[string]reflect.StructField) + addFields(t, byName, nil) + fields := make(fieldsByIndex, 0, len(byName)) + for _, f := range byName { + if f.Name != "" { + fields = append(fields, f) + } + } + sort.Sort(fields) + return fields +} + +func addFields(t reflect.Type, byName map[string]reflect.StructField, index []int) { + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + index := append(index, i) + var add bool + old, ok := byName[f.Name] + switch { + case ok && len(old.Index) == len(index): + // Fields with the same name at the same depth + // cancel one another out. Set the field name + // to empty to signify that has happened. + old.Name = "" + byName[f.Name] = old + add = false + case ok: + // Fields at less depth win. + add = len(index) < len(old.Index) + default: + // The field did not previously exist. + add = true + } + if add { + // copy the index so that it's not overwritten + // by the other appends. + f.Index = append([]int(nil), index...) + byName[f.Name] = f + } + if f.Anonymous { + if f.Type.Kind() == reflect.Ptr { + f.Type = f.Type.Elem() + } + if f.Type.Kind() == reflect.Struct { + addFields(f.Type, byName, index) + } + } + } +} + +type fieldsByIndex []reflect.StructField + +func (f fieldsByIndex) Len() int { + return len(f) +} + +func (f fieldsByIndex) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +func (f fieldsByIndex) Less(i, j int) bool { + indexi, indexj := f[i].Index, f[j].Index + for len(indexi) != 0 && len(indexj) != 0 { + ii, ij := indexi[0], indexj[0] + if ii != ij { + return ii < ij + } + indexi, indexj = indexi[1:], indexj[1:] + } + return len(indexi) < len(indexj) +} === added file 'src/github.com/juju/httprequest/unmarshal.go' --- src/github.com/juju/httprequest/unmarshal.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/unmarshal.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,255 @@ +package httprequest + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "reflect" + + "gopkg.in/errgo.v1" +) + +var ( + ErrUnmarshal = errgo.New("httprequest unmarshal error") + ErrBadUnmarshalType = errgo.New("httprequest bad unmarshal type") +) + +// Unmarshal takes values from given parameters and fills +// out fields in x, which must be a pointer to a struct. +// +// Tags on the struct's fields determine where each field is filled in +// from. Similar to encoding/json and other encoding packages, the tag +// holds a comma-separated list. The first item in the list is an +// alternative name for the field (the field name itself will be used if +// this is empty). The next item specifies where the field is filled in +// from. It may be: +// +// "path" - the field is taken from a parameter in p.PathVar +// with a matching field name. +// +// "form" - the field is taken from the given name in p.Request.Form +// (note that this covers both URL query parameters and +// POST form parameters). +// +// "header" - the field is taken from the given name in +// p.Request.Header. +// +// "body" - the field is filled in by parsing the request body +// as JSON. +// +// For path and form parameters, the field will be filled out from +// the field in p.PathVar or p.Form using one of the following +// methods (in descending order of preference): +// +// - if the type is string, it will be set from the first value. +// +// - if the type is []string, it will be filled out using all values for that field +// (allowed only for form) +// +// - if the type implements encoding.TextUnmarshaler, its +// UnmarshalText method will be used +// +// - otherwise fmt.Sscan will be used to set the value. +// +// When the unmarshaling fails, Unmarshal returns an error with an +// ErrUnmarshal cause. If the type of x is inappropriate, +// it returns an error with an ErrBadUnmarshalType cause. +func Unmarshal(p Params, x interface{}) error { + xv := reflect.ValueOf(x) + pt, err := getRequestType(xv.Type()) + if err != nil { + return errgo.WithCausef(err, ErrBadUnmarshalType, "bad type %s", xv.Type()) + } + if err := unmarshal(p, xv, pt); err != nil { + return errgo.Mask(err, errgo.Is(ErrUnmarshal)) + } + return nil +} + +// unmarshal is the internal version of Unmarshal. +func unmarshal(p Params, xv reflect.Value, pt *requestType) error { + xv = xv.Elem() + for _, f := range pt.fields { + fv := xv.FieldByIndex(f.index) + // TODO store the field name in the field so + // that we can produce a nice error message. + if err := f.unmarshal(fv, p, f.makeResult); err != nil { + return errgo.WithCausef(err, ErrUnmarshal, "cannot unmarshal into field") + } + } + return nil +} + +// getUnmarshaler returns an unmarshaler function +// suitable for unmarshaling a field with the given tag +// into a value of the given type. +func getUnmarshaler(tag tag, t reflect.Type) (unmarshaler, error) { + switch { + case tag.source == sourceNone: + return unmarshalNop, nil + case tag.source == sourceBody: + return unmarshalBody, nil + case t == reflect.TypeOf([]string(nil)): + switch tag.source { + default: + return nil, errgo.New("invalid target type []string for path parameter") + case sourceForm: + return unmarshalAllField(tag.name), nil + case sourceHeader: + return unmarshalAllHeader(tag.name), nil + } + case t == reflect.TypeOf(""): + return unmarshalString(tag), nil + case implementsTextUnmarshaler(t): + return unmarshalWithUnmarshalText(t, tag), nil + default: + return unmarshalWithScan(tag), nil + } +} + +// unmarshalNop just creates the result value but does not +// fill it out with anything. This is used to create pointers +// to new anonymous field members. +func unmarshalNop(v reflect.Value, p Params, makeResult resultMaker) error { + makeResult(v) + return nil +} + +// unmarshalAllField unmarshals all the form fields for a given +// attribute into a []string slice. +func unmarshalAllField(name string) unmarshaler { + return func(v reflect.Value, p Params, makeResult resultMaker) error { + vals := p.Request.Form[name] + if len(vals) > 0 { + makeResult(v).Set(reflect.ValueOf(vals)) + } + return nil + } +} + +// unmarshalAllHeader unmarshals all the header fields for a given +// attribute into a []string slice. +func unmarshalAllHeader(name string) unmarshaler { + return func(v reflect.Value, p Params, makeResult resultMaker) error { + vals := p.Request.Header[name] + if len(vals) > 0 { + makeResult(v).Set(reflect.ValueOf(vals)) + } + return nil + } +} + +// unmarshalString unmarshals into a string field. +func unmarshalString(tag tag) unmarshaler { + getVal := formGetters[tag.source] + if getVal == nil { + panic("unexpected source") + } + return func(v reflect.Value, p Params, makeResult resultMaker) error { + val, ok := getVal(tag.name, p) + if ok { + makeResult(v).SetString(val) + } + return nil + } +} + +// unmarshalBody unmarshals the http request body +// into the given value. +func unmarshalBody(v reflect.Value, p Params, makeResult resultMaker) error { + if err := checkIsJSON(p.Request.Header, p.Request.Body); err != nil { + return errgo.Mask(err) + } + data, err := ioutil.ReadAll(p.Request.Body) + if err != nil { + return errgo.Notef(err, "cannot read request body") + } + // TODO allow body types that aren't necessarily JSON. + result := makeResult(v) + if err := json.Unmarshal(data, result.Addr().Interface()); err != nil { + return errgo.Notef(err, "cannot unmarshal request body") + } + return nil +} + +// formGetters maps from source to a function that +// returns the value for a given key and reports +// whether the value was found. +var formGetters = []func(name string, p Params) (string, bool){ + sourceForm: func(name string, p Params) (string, bool) { + vs := p.Request.Form[name] + if len(vs) == 0 { + return "", false + } + return vs[0], true + }, + sourcePath: func(name string, p Params) (string, bool) { + for _, pv := range p.PathVar { + if pv.Key == name { + return pv.Value, true + } + } + return "", false + }, + sourceBody: nil, + sourceHeader: func(name string, p Params) (string, bool) { + vs := p.Request.Header[name] + if len(vs) == 0 { + return "", false + } + return vs[0], true + }, +} + +// encodingTextUnmarshaler is the same as encoding.TextUnmarshaler +// but avoids us importing the encoding package, which some +// broken gccgo installations do not allow. +// TODO remove this and use encoding.TextUnmarshaler instead. +type encodingTextUnmarshaler interface { + UnmarshalText(text []byte) error +} + +var textUnmarshalerType = reflect.TypeOf((*encodingTextUnmarshaler)(nil)).Elem() + +func implementsTextUnmarshaler(t reflect.Type) bool { + // Use the pointer type, because a pointer + // type will implement a superset of the methods + // of a non-pointer type. + return reflect.PtrTo(t).Implements(textUnmarshalerType) +} + +// unmarshalWithUnmarshalText returns an unmarshaler +// that unmarshals the given type from the given tag +// using its UnmarshalText method. +func unmarshalWithUnmarshalText(t reflect.Type, tag tag) unmarshaler { + getVal := formGetters[tag.source] + if getVal == nil { + panic("unexpected source") + } + return func(v reflect.Value, p Params, makeResult resultMaker) error { + val, _ := getVal(tag.name, p) + uv := makeResult(v).Addr().Interface().(encodingTextUnmarshaler) + return uv.UnmarshalText([]byte(val)) + } +} + +// unmarshalWithScan returns an unmarshaler +// that unmarshals the given tag using fmt.Scan. +func unmarshalWithScan(tag tag) unmarshaler { + formGet := formGetters[tag.source] + if formGet == nil { + panic("unexpected source") + } + return func(v reflect.Value, p Params, makeResult resultMaker) error { + val, ok := formGet(tag.name, p) + if !ok { + // TODO allow specifying that a field is mandatory? + return nil + } + _, err := fmt.Sscan(val, makeResult(v).Addr().Interface()) + if err != nil { + return errgo.Notef(err, "cannot parse %q into %s", val, v.Type()) + } + return nil + } +} === added file 'src/github.com/juju/httprequest/unmarshal_test.go' --- src/github.com/juju/httprequest/unmarshal_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/httprequest/unmarshal_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,468 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package httprequest_test + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + + jc "github.com/juju/testing/checkers" + "github.com/julienschmidt/httprouter" + gc "gopkg.in/check.v1" + + "github.com/juju/httprequest" +) + +type unmarshalSuite struct{} + +var _ = gc.Suite(&unmarshalSuite{}) + +var unmarshalTests = []struct { + about string + val interface{} + expect interface{} + params httprequest.Params + expectError string + // TODO expectErrorCause func(error) bool +}{{ + about: "struct with simple fields", + val: struct { + F1 int `httprequest:",form"` + F2 int `httprequest:",form"` + G1 string `httprequest:",path"` + G2 string `httprequest:",path"` + H string `httprequest:",body"` + UnknownForm string `httprequest:",form"` + UnknownPath string `httprequest:",path"` + }{ + F1: 99, + F2: -35, + G1: "g1 val", + G2: "g2 val", + H: "h val", + }, + params: httprequest.Params{ + Request: &http.Request{ + Header: http.Header{"Content-Type": {"application/json"}}, + Form: url.Values{ + "F1": {"99"}, + "F2": {"-35", "not a number"}, + }, + Body: body(`"h val"`), + }, + PathVar: httprouter.Params{{ + Key: "G2", + Value: "g2 val", + }, { + Key: "G1", + Value: "g1 val", + }, { + Key: "G1", + Value: "g1 wrong val", + }}, + }, +}, { + about: "struct with renamed fields", + val: struct { + F1 int `httprequest:"x1,form"` + F2 int `httprequest:"x2,form"` + G1 string `httprequest:"g1,path"` + G2 string `httprequest:"g2,path"` + }{ + F1: 99, + F2: -35, + G1: "g1 val", + G2: "g2 val", + }, + params: httprequest.Params{ + Request: &http.Request{ + Form: url.Values{ + "x1": {"99"}, + "x2": {"-35", "not a number"}, + }, + }, + PathVar: httprouter.Params{{ + Key: "g2", + Value: "g2 val", + }, { + Key: "g1", + Value: "g1 val", + }, { + Key: "g1", + Value: "g1 wrong val", + }}, + }, +}, { + about: "unexported fields are ignored", + val: struct { + f int `httprequest:",form"` + G int `httprequest:",form"` + }{ + G: 99, + }, + params: httprequest.Params{ + Request: &http.Request{ + Form: url.Values{ + "G": {"99"}, + "f": {"100"}, + }, + }, + }, +}, { + about: "unexported embedded type works ok", + val: struct { + sFG + }{ + sFG: sFG{ + F: 99, + G: 100, + }, + }, + params: httprequest.Params{ + Request: &http.Request{ + Form: url.Values{ + "F": {"99"}, + "G": {"100"}, + }, + }, + }, +}, { + about: "unexported embedded type for body works ok", + val: struct { + sFG `httprequest:",body"` + }{ + sFG: sFG{ + F: 99, + G: 100, + }, + }, + params: httprequest.Params{ + Request: &http.Request{ + Header: http.Header{"Content-Type": {"application/json"}}, + Body: body(`{"F": 99, "G": 100}`), + }, + }, +}, { + about: "unexported type for body is ignored", + val: struct { + foo sFG `httprequest:",body"` + }{}, + params: httprequest.Params{ + Request: &http.Request{ + Header: http.Header{"Content-Type": {"application/json"}}, + Body: body(`{"F": 99, "G": 100}`), + }, + }, +}, { + about: "fields without httprequest tags are ignored", + val: struct { + F int + }{}, + params: httprequest.Params{ + Request: &http.Request{ + Form: url.Values{ + "F": {"foo"}, + }, + }, + PathVar: httprouter.Params{{ + Key: "F", + Value: "foo", + }}, + }, +}, { + about: "pointer fields are filled out", + val: struct { + F *int `httprequest:",form"` + *SFG + S *string `httprequest:",form"` + T *string `httprequest:",form"` + }{ + F: newInt(99), + SFG: &SFG{ + F: 0, + G: 534, + }, + S: newString("s val"), + }, + params: httprequest.Params{ + Request: &http.Request{ + Form: url.Values{ + "F": {"99"}, + "G": {"534"}, + "S": {"s val"}, + }, + }, + }, +}, { + about: "UnmarshalText called on TextUnmarshalers", + val: struct { + F exclamationUnmarshaler `httprequest:",form"` + G exclamationUnmarshaler `httprequest:",path"` + FP *exclamationUnmarshaler `httprequest:",form"` + }{ + F: "yes!", + G: "no!", + FP: (*exclamationUnmarshaler)(newString("maybe!")), + }, + params: httprequest.Params{ + Request: &http.Request{ + Form: url.Values{ + "F": {"yes"}, + "FP": {"maybe"}, + }, + }, + PathVar: httprouter.Params{{ + Key: "G", + Value: "no", + }}, + }, +}, { + about: "UnmarshalText not called on values with a non-TextUnmarshaler UnmarshalText method", + val: struct { + F notTextUnmarshaler `httprequest:",form"` + }{ + F: "hello", + }, + params: httprequest.Params{ + Request: &http.Request{ + Form: url.Values{ + "F": {"hello"}, + }, + }, + }, +}, { + about: "UnmarshalText returning an error", + val: struct { + F exclamationUnmarshaler `httprequest:",form"` + }{}, + params: httprequest.Params{ + Request: &http.Request{}, + }, + expectError: "cannot unmarshal into field: empty string!", +}, { + about: "all field form values", + val: struct { + A []string `httprequest:",form"` + B *[]string `httprequest:",form"` + C []string `httprequest:",form"` + D *[]string `httprequest:",form"` + }{ + A: []string{"a1", "a2"}, + B: func() *[]string { + x := []string{"b1", "b2", "b3"} + return &x + }(), + }, + params: httprequest.Params{ + Request: &http.Request{ + Form: url.Values{ + "A": {"a1", "a2"}, + "B": {"b1", "b2", "b3"}, + }, + }, + }, +}, { + about: "invalid scan field", + val: struct { + A int `httprequest:",form"` + }{}, + params: httprequest.Params{ + Request: &http.Request{ + Form: url.Values{ + "A": {"not an int"}, + }, + }, + }, + expectError: `cannot unmarshal into field: cannot parse "not an int" into int: expected integer`, +}, { + about: "scan field not present", + val: struct { + A int `httprequest:",form"` + }{}, + params: httprequest.Params{ + Request: &http.Request{}, + }, +}, { + about: "invalid JSON body", + val: struct { + A string `httprequest:",body"` + }{}, + params: httprequest.Params{ + Request: &http.Request{ + Header: http.Header{"Content-Type": {"application/json"}}, + Body: body("invalid JSON"), + }, + }, + expectError: "cannot unmarshal into field: cannot unmarshal request body: invalid character 'i' looking for beginning of value", +}, { + about: "body with read error", + val: struct { + A string `httprequest:",body"` + }{}, + params: httprequest.Params{ + Request: &http.Request{ + Header: http.Header{"Content-Type": {"application/json"}}, + Body: errorReader("some error"), + }, + }, + expectError: "cannot unmarshal into field: cannot read request body: some error", +}, { + about: "[]string not allowed for URL source", + val: struct { + A []string `httprequest:",path"` + }{}, + expectError: `bad type .*: invalid target type \[]string for path parameter`, +}, { + about: "duplicated body", + val: struct { + B1 int `httprequest:",body"` + B2 string `httprequest:",body"` + }{}, + expectError: "bad type .*: more than one body field specified", +}, { + about: "body tag name is ignored", + val: struct { + B string `httprequest:"foo,body"` + }{ + B: "hello", + }, + params: httprequest.Params{ + Request: &http.Request{ + Header: http.Header{"Content-Type": {"application/json"}}, + Body: body(`"hello"`), + }, + }, +}, { + about: "tag with invalid source", + val: struct { + B1 int `httprequest:",xxx"` + }{}, + expectError: `bad type .*: bad tag "httprequest:\\",xxx\\"" in field B1: unknown tag flag "xxx"`, +}, { + about: "non-struct pointer", + val: 0, + expectError: `bad type \*int: type is not pointer to struct`, +}, { + about: "unmarshaling with wrong request content type", + val: struct { + A string `httprequest:",body"` + }{}, + params: httprequest.Params{ + Request: &http.Request{ + Header: http.Header{"Content-Type": {"text/html"}}, + Body: body("invalid JSON"), + }, + }, + expectError: `cannot unmarshal into field: unexpected content type text/html; want application/json; content: invalid JSON`, +}, { + about: "struct with header fields", + val: struct { + F1 int `httprequest:"x1,header"` + G1 string `httprequest:"g1,header"` + }{ + F1: 99, + G1: "g1 val", + }, + params: httprequest.Params{ + Request: &http.Request{ + Header: http.Header{ + "x1": {"99"}, + "g1": {"g1 val"}, + }, + }, + }, +}, { + about: "all field header values", + val: struct { + A []string `httprequest:",header"` + B *[]string `httprequest:",header"` + C []string `httprequest:",header"` + D *[]string `httprequest:",header"` + }{ + A: []string{"a1", "a2"}, + B: func() *[]string { + x := []string{"b1", "b2", "b3"} + return &x + }(), + }, + params: httprequest.Params{ + Request: &http.Request{ + Header: http.Header{ + "A": {"a1", "a2"}, + "B": {"b1", "b2", "b3"}, + }, + }, + }, +}} + +type SFG struct { + F int `httprequest:",form"` + G int `httprequest:",form"` +} + +type sFG struct { + F int `httprequest:",form"` + G int `httprequest:",form"` +} + +func (*unmarshalSuite) TestUnmarshal(c *gc.C) { + for i, test := range unmarshalTests { + c.Logf("%d: %s", i, test.about) + t := reflect.TypeOf(test.val) + fillv := reflect.New(t) + err := httprequest.Unmarshal(test.params, fillv.Interface()) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + continue + } + c.Assert(fillv.Elem().Interface(), jc.DeepEquals, test.val) + } +} + +// TODO non-pointer struct + +type notTextUnmarshaler string + +// UnmarshalText does *not* implement encoding.TextUnmarshaler +// (it has no arguments or error return value) +func (t *notTextUnmarshaler) UnmarshalText() { + panic("unexpected call") +} + +type exclamationUnmarshaler string + +func (t *exclamationUnmarshaler) UnmarshalText(b []byte) error { + if len(b) == 0 { + return fmt.Errorf("empty string!") + } + *t = exclamationUnmarshaler(b) + "!" + return nil +} + +func newInt(i int) *int { + return &i +} + +func newString(s string) *string { + return &s +} + +type errorReader string + +func (r errorReader) Read([]byte) (int, error) { + return 0, fmt.Errorf("%s", r) +} + +func (r errorReader) Close() error { + return nil +} + +func body(s string) io.ReadCloser { + return ioutil.NopCloser(strings.NewReader(s)) +} === modified file 'src/github.com/juju/juju/.reviewboardrc' --- src/github.com/juju/juju/.reviewboardrc 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/.reviewboardrc 2016-03-22 15:18:22 +0000 @@ -1,4 +1,4 @@ -REVIEWBOARD_URL = "https://reviews.vapour.ws/" +REVIEWBOARD_URL = "http://reviews.vapour.ws/" REPOSITORY = "juju (core)" BRANCH = "master" TRACKING_BRANCH = "upstream/master" === modified file 'src/github.com/juju/juju/README.md' --- src/github.com/juju/juju/README.md 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/README.md 2016-03-22 15:18:22 +0000 @@ -5,10 +5,10 @@ Juju enables you to use [Charms](http://juju.ubuntu.com/charms) to deploy your application architectures to EC2, OpenStack, Azure, HP your data center and even your own Ubuntu based laptop. -Moving between environments is simple giving you the flexibility to switch hosts +Moving between models is simple giving you the flexibility to switch hosts whenever you want — for free. -For more information, see the [docs](https://juju.ubuntu.com/docs/). +For more information, see the [docs](https://jujucharms.com/docs/stable/getting-started). Getting started =============== @@ -110,7 +110,7 @@ which juju -You should be able to bootstrap a local environment now with the following +You should be able to bootstrap a local model now with the following (Note: the use of sudo for bootstrap here is only required for the local provider because it uses LXC, which requires root privileges) @@ -127,10 +127,10 @@ number, and implies that no tools are available for the next, development, version of juju. Therefore, when using the development version of juju you will need to pass an additional flag, `--upload-tools` to instruct the `juju` client to build -a set of tools from source and upload them to the environment as part of the +a set of tools from source and upload them to the model as part of the bootstrap process. - juju bootstrap -e your-environment --upload-tools {--debug} + juju bootstrap -m your-model --upload-tools {--debug} Installing bash completion for juju === modified file 'src/github.com/juju/juju/agent/agent.go' --- src/github.com/juju/juju/agent/agent.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/agent/agent.go 2016-03-22 15:18:22 +0000 @@ -13,12 +13,12 @@ "path/filepath" "regexp" "strconv" - "strings" "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" "github.com/juju/utils" + "github.com/juju/utils/series" "github.com/juju/utils/shell" "github.com/juju/juju/api" @@ -41,9 +41,10 @@ // These are base values used for the corresponding defaults. var ( - logDir = paths.MustSucceed(paths.LogDir(version.Current.Series)) - dataDir = paths.MustSucceed(paths.DataDir(version.Current.Series)) - confDir = paths.MustSucceed(paths.ConfDir(version.Current.Series)) + logDir = paths.MustSucceed(paths.LogDir(series.HostSeries())) + dataDir = paths.MustSucceed(paths.DataDir(series.HostSeries())) + confDir = paths.MustSucceed(paths.ConfDir(series.HostSeries())) + metricsSpoolDir = paths.MustSucceed(paths.MetricsSpoolDir(series.HostSeries())) ) // Agent exposes the agent's configuration to other components. This @@ -74,7 +75,7 @@ }) } -// SetStateServingInfo trivially wraps an Agent to implement +// StateServingInfoSetter trivially wraps an Agent to implement // worker/certupdater/SetStateServingInfo. type StateServingInfoSetter struct { Agent @@ -88,19 +89,64 @@ }) } +// Paths holds the directory paths used by the agent. +type Paths struct { + // DataDir is the data directory where each agent has a subdirectory + // containing the configuration files. + DataDir string + // LogDir is the log directory where all logs from all agents on + // the machine are written. + LogDir string + // MetricsSpoolDir is the spool directory where workloads store + // collected metrics. + MetricsSpoolDir string + // ConfDir is the directory where all config file for + // Juju agents are stored. + ConfDir string +} + +// Migrate assigns the directory locations specified from the new path configuration. +func (p *Paths) Migrate(newPaths Paths) { + if newPaths.DataDir != "" { + p.DataDir = newPaths.DataDir + } + if newPaths.LogDir != "" { + p.LogDir = newPaths.LogDir + } + if newPaths.MetricsSpoolDir != "" { + p.MetricsSpoolDir = newPaths.MetricsSpoolDir + } + if newPaths.ConfDir != "" { + p.ConfDir = newPaths.ConfDir + } +} + +// NewPathsWithDefaults returns a Paths struct initialized with default locations if not otherwise specified. +func NewPathsWithDefaults(p Paths) Paths { + paths := DefaultPaths + if p.DataDir != "" { + paths.DataDir = p.DataDir + } + if p.LogDir != "" { + paths.LogDir = p.LogDir + } + if p.MetricsSpoolDir != "" { + paths.MetricsSpoolDir = p.MetricsSpoolDir + } + if p.ConfDir != "" { + paths.ConfDir = p.ConfDir + } + return paths +} + var ( - // DefaultLogDir defines the default log directory for juju agents. - // It's defined as a variable so it could be overridden in tests. - DefaultLogDir = path.Join(logDir, "juju") - - // DefaultDataDir defines the default data directory for juju agents. - // It's defined as a variable so it could be overridden in tests. - DefaultDataDir = dataDir - - // DefaultConfDir defines the default config file directory for - // Juju agents. - // It's defined as a variable so it could be overridden in tests. - DefaultConfDir = confDir + // DefaultPaths defines the default paths for an agent. + DefaultPaths = Paths{ + DataDir: dataDir, + LogDir: path.Join(logDir, "juju"), + MetricsSpoolDir: metricsSpoolDir, + ConfDir: confDir, + } ) // SystemIdentity is the name of the file where the environment SSH key is kept. @@ -111,12 +157,10 @@ ProviderType = "PROVIDER_TYPE" ContainerType = "CONTAINER_TYPE" Namespace = "NAMESPACE" - StorageDir = "STORAGE_DIR" - StorageAddr = "STORAGE_ADDR" AgentServiceName = "AGENT_SERVICE_NAME" MongoOplogSize = "MONGO_OPLOG_SIZE" NumaCtlPreference = "NUMA_CTL_PREFERENCE" - AllowsSecureConnection = "SECURE_STATESERVER_CONNECTION" + AllowsSecureConnection = "SECURE_CONTROLLER_CONNECTION" ) // The Config interface is the sole way that the agent gets access to the @@ -169,7 +213,7 @@ WriteCommands(renderer shell.Renderer) ([]string, error) // StateServingInfo returns the details needed to run - // a state server and reports whether those details + // a controller and reports whether those details // are available StateServingInfo() (params.StateServingInfo, bool) @@ -177,7 +221,7 @@ // reports whether the details are available. APIInfo() (*api.Info, bool) - // MongoInfo returns details for connecting to the state server's mongo + // MongoInfo returns details for connecting to the controller's mongo // database and reports whether those details are available MongoInfo() (*mongo.MongoInfo, bool) @@ -197,9 +241,16 @@ // available) when connecting to the state or API server. PreferIPv6() bool - // Environment returns the tag for the environment that the agent belongs - // to. - Environment() names.EnvironTag + // Model returns the tag for the model that the agent belongs to. + Model() names.ModelTag + + // MetricsSpoolDir returns the spool directory where workloads store + // collected metrics. + MetricsSpoolDir() string + + // MongoVersion returns the version of mongo that the state server + // is using. + MongoVersion() mongo.Version } type configSetterOnly interface { @@ -244,8 +295,11 @@ Migrate(MigrateParams) error // SetStateServingInfo sets the information needed - // to run a state server + // to run a controller SetStateServingInfo(info params.StateServingInfo) + + // SetMongoVersion sets the passed version as currently in use. + SetMongoVersion(mongo.Version) } // LogFileName returns the filename for the Agent's log file. @@ -275,12 +329,11 @@ // Migrate call. Empty fields will be ignored. DeleteValues // specifies a list of keys to delete. type MigrateParams struct { - DataDir string - LogDir string + Paths Paths Jobs []multiwatcher.MachineJob DeleteValues []string Values map[string]string - Environment names.EnvironTag + Model names.ModelTag } // Ensure that the configInternal struct implements the Config interface. @@ -302,11 +355,10 @@ type configInternal struct { configFilePath string - dataDir string - logDir string + paths Paths tag names.Tag nonce string - environment names.EnvironTag + model names.ModelTag jobs []multiwatcher.MachineJob upgradedToVersion version.Number caCert string @@ -316,34 +368,33 @@ servingInfo *params.StateServingInfo values map[string]string preferIPv6 bool + mongoVersion string } +// AgentConfigParams holds the parameters required to create +// a new AgentConfig. type AgentConfigParams struct { - DataDir string - LogDir string + Paths Paths Jobs []multiwatcher.MachineJob UpgradedToVersion version.Number Tag names.Tag Password string Nonce string - Environment names.EnvironTag + Model names.ModelTag StateAddresses []string APIAddresses []string CACert string Values map[string]string PreferIPv6 bool + MongoVersion mongo.Version } // NewAgentConfig returns a new config object suitable for use for a // machine or unit agent. func NewAgentConfig(configParams AgentConfigParams) (ConfigSetterWriter, error) { - if configParams.DataDir == "" { + if configParams.Paths.DataDir == "" { return nil, errors.Trace(requiredError("data directory")) } - logDir := DefaultLogDir - if configParams.LogDir != "" { - logDir = configParams.LogDir - } if configParams.Tag == nil { return nil, errors.Trace(requiredError("entity tag")) } @@ -359,10 +410,10 @@ if configParams.Password == "" { return nil, errors.Trace(requiredError("password")) } - if uuid := configParams.Environment.Id(); uuid == "" { - return nil, errors.Trace(requiredError("environment")) - } else if !names.IsValidEnvironment(uuid) { - return nil, errors.Errorf("%q is not a valid environment uuid", uuid) + if uuid := configParams.Model.Id(); uuid == "" { + return nil, errors.Trace(requiredError("model")) + } else if !names.IsValidModel(uuid) { + return nil, errors.Errorf("%q is not a valid model uuid", uuid) } if len(configParams.CACert) == 0 { return nil, errors.Trace(requiredError("CA certificate")) @@ -370,18 +421,19 @@ // Note that the password parts of the state and api information are // blank. This is by design. config := &configInternal{ - logDir: logDir, - dataDir: configParams.DataDir, + paths: NewPathsWithDefaults(configParams.Paths), jobs: configParams.Jobs, upgradedToVersion: configParams.UpgradedToVersion, tag: configParams.Tag, nonce: configParams.Nonce, - environment: configParams.Environment, + model: configParams.Model, caCert: configParams.CACert, oldPassword: configParams.Password, values: configParams.Values, preferIPv6: configParams.PreferIPv6, + mongoVersion: configParams.MongoVersion.String(), } + if len(configParams.StateAddresses) > 0 { config.stateDetails = &connectionDetails{ addresses: configParams.StateAddresses, @@ -398,18 +450,18 @@ if config.values == nil { config.values = make(map[string]string) } - config.configFilePath = ConfigPath(config.dataDir, config.tag) + config.configFilePath = ConfigPath(config.paths.DataDir, config.tag) return config, nil } // NewStateMachineConfig returns a configuration suitable for -// a machine running the state server. +// a machine running the controller. func NewStateMachineConfig(configParams AgentConfigParams, serverInfo params.StateServingInfo) (ConfigSetterWriter, error) { if serverInfo.Cert == "" { - return nil, errors.Trace(requiredError("state server cert")) + return nil, errors.Trace(requiredError("controller cert")) } if serverInfo.PrivateKey == "" { - return nil, errors.Trace(requiredError("state server key")) + return nil, errors.Trace(requiredError("controller key")) } if serverInfo.CAPrivateKey == "" { return nil, errors.Trace(requiredError("ca cert key")) @@ -428,11 +480,19 @@ return config, nil } +// BaseDir returns the directory containing the data directories for +// all the agents. +func BaseDir(dataDir string) string { + // Note: must use path, not filepath, as this function is + // (indirectly) used by the client on Windows. + return path.Join(dataDir, "agents") +} + // Dir returns the agent-specific data directory. func Dir(dataDir string, tag names.Tag) string { // Note: must use path, not filepath, as this // function is used by the client on Windows. - return path.Join(dataDir, "agents", tag.String()) + return path.Join(BaseDir(dataDir), tag.String()) } // ConfigPath returns the full path to the agent config file. @@ -452,47 +512,12 @@ if err != nil { return nil, fmt.Errorf("cannot read agent config %q: %v", configFilePath, err) } - - // Try to read the legacy format file. - dir := filepath.Dir(configFilePath) - legacyFormatPath := filepath.Join(dir, legacyFormatFilename) - formatBytes, err := ioutil.ReadFile(legacyFormatPath) - if err != nil && !os.IsNotExist(err) { - return nil, fmt.Errorf("cannot read format file: %v", err) - } - formatData := string(formatBytes) - if err == nil { - // It exists, so unmarshal with a legacy formatter. - // Drop the format prefix to leave the version only. - if !strings.HasPrefix(formatData, legacyFormatPrefix) { - return nil, fmt.Errorf("malformed agent config format %q", formatData) - } - format, err = getFormatter(strings.TrimPrefix(formatData, legacyFormatPrefix)) - if err != nil { - return nil, err - } - config, err = format.unmarshal(configData) - } else { - // Does not exist, just parse the data. - format, config, err = parseConfigData(configData) - } + format, config, err = parseConfigData(configData) if err != nil { return nil, err } logger.Debugf("read agent config, format %q", format.version()) config.configFilePath = configFilePath - if format != currentFormat { - // Migrate from a legacy format to the new one. - err := config.Write() - if err != nil { - return nil, fmt.Errorf("cannot migrate %s agent config to %s: %v", format.version(), currentFormat.version(), err) - } - logger.Debugf("migrated agent config from %s to %s", format.version(), currentFormat.version()) - err = os.Remove(legacyFormatPath) - if err != nil && !os.IsNotExist(err) { - return nil, fmt.Errorf("cannot remove legacy format file %q: %v", legacyFormatPath, err) - } - } return config, nil } @@ -511,13 +536,8 @@ } func (config *configInternal) Migrate(newParams MigrateParams) error { - if newParams.DataDir != "" { - config.dataDir = newParams.DataDir - config.configFilePath = ConfigPath(config.dataDir, config.tag) - } - if newParams.LogDir != "" { - config.logDir = newParams.LogDir - } + config.paths.Migrate(newParams.Paths) + config.configFilePath = ConfigPath(config.paths.DataDir, config.tag) if len(newParams.Jobs) > 0 { config.jobs = make([]multiwatcher.MachineJob, len(newParams.Jobs)) copy(config.jobs, newParams.Jobs) @@ -531,8 +551,8 @@ } config.values[key] = value } - if newParams.Environment.Id() != "" { - config.environment = newParams.Environment + if newParams.Model.Id() != "" { + config.model = newParams.Model } if err := config.check(); err != nil { return fmt.Errorf("migrated agent config is invalid: %v", err) @@ -550,7 +570,15 @@ } var addrs []string for _, serverHostPorts := range servers { - addrs = append(addrs, network.SelectInternalHostPorts(serverHostPorts, false)...) + // Try the preferred approach first. + serverHP, ok := network.SelectHostPortBySpace(serverHostPorts, network.DefaultSpace) + if ok { + addrs = append(addrs, serverHP.NetAddr()) + } else { + // Fallback to the legacy approach. + hps := network.SelectInternalHostPorts(serverHostPorts, false) + addrs = append(addrs, hps...) + } } c.apiDetails.addresses = addrs logger.Infof("API server address details %q written to agent config as %q", servers, addrs) @@ -599,15 +627,19 @@ } func (c *configInternal) DataDir() string { - return c.dataDir + return c.paths.DataDir +} + +func (c *configInternal) MetricsSpoolDir() string { + return c.paths.MetricsSpoolDir } func (c *configInternal) LogDir() string { - return c.logDir + return c.paths.LogDir } func (c *configInternal) SystemIdentityPath() string { - return filepath.Join(c.dataDir, SystemIdentity) + return filepath.Join(c.paths.DataDir, SystemIdentity) } func (c *configInternal) Jobs() []multiwatcher.MachineJob { @@ -660,12 +692,12 @@ return c.tag } -func (c *configInternal) Environment() names.EnvironTag { - return c.environment +func (c *configInternal) Model() names.ModelTag { + return c.model } func (c *configInternal) Dir() string { - return Dir(c.dataDir, c.tag) + return Dir(c.paths.DataDir, c.tag) } func (c *configInternal) check() error { @@ -673,7 +705,7 @@ return errors.Trace(requiredError("state or API addresses")) } if c.stateDetails != nil { - if err := checkAddrs(c.stateDetails.addresses, "state server address"); err != nil { + if err := checkAddrs(c.stateDetails.addresses, "controller address"); err != nil { return err } } @@ -685,6 +717,20 @@ return nil } +// MongoVersion implements Config. +func (c *configInternal) MongoVersion() mongo.Version { + v, err := mongo.NewVersion(c.mongoVersion) + if err != nil { + return mongo.Mongo24 + } + return v +} + +// SetMongoVersion implements configSetterOnly. +func (c *configInternal) SetMongoVersion(v mongo.Version) { + c.mongoVersion = v.String() +} + var validAddr = regexp.MustCompile("^.+:[0-9]+$") func checkAddrs(addrs []string, what string) error { @@ -728,9 +774,9 @@ if c.apiDetails == nil || c.apiDetails.addresses == nil { return nil, false } - servingInfo, isStateServer := c.StateServingInfo() + servingInfo, isController := c.StateServingInfo() addrs := c.apiDetails.addresses - if isStateServer { + if isController { port := servingInfo.APIPort localAPIAddr := net.JoinHostPort("localhost", strconv.Itoa(port)) if c.preferIPv6 { @@ -748,12 +794,12 @@ } } return &api.Info{ - Addrs: addrs, - Password: c.apiDetails.password, - CACert: c.caCert, - Tag: c.tag, - Nonce: c.nonce, - EnvironTag: c.environment, + Addrs: addrs, + Password: c.apiDetails.password, + CACert: c.caCert, + Tag: c.tag, + Nonce: c.nonce, + ModelTag: c.model, }, true } === modified file 'src/github.com/juju/juju/agent/agent_test.go' --- src/github.com/juju/juju/agent/agent_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/agent/agent_test.go 2016-03-22 15:18:22 +0000 @@ -40,131 +40,131 @@ }, { about: "missing tag", params: agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{DataDir: "/data/dir"}, }, checkErr: "entity tag not found in configuration", }, { about: "missing upgraded to version", params: agent.AgentConfigParams{ - DataDir: "/data/dir", - Tag: names.NewMachineTag("1"), + Paths: agent.Paths{DataDir: "/data/dir"}, + Tag: names.NewMachineTag("1"), }, checkErr: "upgradedToVersion not found in configuration", }, { about: "missing password", params: agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{DataDir: "/data/dir"}, Tag: names.NewMachineTag("1"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, }, checkErr: "password not found in configuration", }, { - about: "missing environment tag", + about: "missing model tag", params: agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{DataDir: "/data/dir"}, Tag: names.NewMachineTag("1"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, Password: "sekrit", }, - checkErr: "environment not found in configuration", + checkErr: "model not found in configuration", }, { - about: "invalid environment tag", + about: "invalid model tag", params: agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{DataDir: "/data/dir"}, Tag: names.NewMachineTag("1"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, Password: "sekrit", - Environment: names.NewEnvironTag("uuid"), + Model: names.NewModelTag("uuid"), }, - checkErr: `"uuid" is not a valid environment uuid`, + checkErr: `"uuid" is not a valid model uuid`, }, { about: "missing CA cert", params: agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{DataDir: "/data/dir"}, Tag: names.NewMachineTag("1"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, Password: "sekrit", - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, }, checkErr: "CA certificate not found in configuration", }, { about: "need either state or api addresses", params: agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{DataDir: "/data/dir"}, Tag: names.NewMachineTag("1"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, Password: "sekrit", CACert: "ca cert", - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, }, checkErr: "state or API addresses not found in configuration", }, { about: "invalid state address", params: agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{DataDir: "/data/dir"}, Tag: names.NewMachineTag("1"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, Password: "sekrit", CACert: "ca cert", - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, StateAddresses: []string{"localhost:8080", "bad-address"}, }, - checkErr: `invalid state server address "bad-address"`, + checkErr: `invalid controller address "bad-address"`, }, { about: "invalid api address", params: agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{DataDir: "/data/dir"}, Tag: names.NewMachineTag("1"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, Password: "sekrit", CACert: "ca cert", - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, APIAddresses: []string{"localhost:8080", "bad-address"}, }, checkErr: `invalid API server address "bad-address"`, }, { about: "good state addresses", params: agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{DataDir: "/data/dir"}, Tag: names.NewMachineTag("1"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, Password: "sekrit", CACert: "ca cert", - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, StateAddresses: []string{"localhost:1234"}, }, }, { about: "good api addresses", params: agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{DataDir: "/data/dir"}, Tag: names.NewMachineTag("1"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, Password: "sekrit", CACert: "ca cert", - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, APIAddresses: []string{"localhost:1234"}, }, }, { about: "both state and api addresses", params: agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{DataDir: "/data/dir"}, Tag: names.NewMachineTag("1"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, Password: "sekrit", CACert: "ca cert", - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, StateAddresses: []string{"localhost:1234"}, APIAddresses: []string{"localhost:1235"}, }, }, { about: "everything...", params: agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{DataDir: "/data/dir"}, Tag: names.NewMachineTag("1"), Password: "sekrit", - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, CACert: "ca cert", - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, StateAddresses: []string{"localhost:1234"}, APIAddresses: []string{"localhost:1235"}, Nonce: "a nonce", @@ -172,36 +172,71 @@ }, { about: "missing logDir sets default", params: agent.AgentConfigParams{ - DataDir: "/data/dir", - Tag: names.NewMachineTag("1"), - Password: "sekrit", - UpgradedToVersion: version.Current.Number, - CACert: "ca cert", - Environment: testing.EnvironmentTag, - StateAddresses: []string{"localhost:1234"}, - APIAddresses: []string{"localhost:1235"}, - Nonce: "a nonce", - }, - inspectConfig: func(c *gc.C, cfg agent.Config) { - c.Check(cfg.LogDir(), gc.Equals, agent.DefaultLogDir) + Paths: agent.Paths{DataDir: "/data/dir"}, + Tag: names.NewMachineTag("1"), + Password: "sekrit", + UpgradedToVersion: version.Current, + CACert: "ca cert", + Model: testing.ModelTag, + StateAddresses: []string{"localhost:1234"}, + APIAddresses: []string{"localhost:1235"}, + Nonce: "a nonce", + }, + inspectConfig: func(c *gc.C, cfg agent.Config) { + c.Check(cfg.LogDir(), gc.Equals, agent.DefaultPaths.LogDir) + }, +}, { + about: "missing metricsSpoolDir sets default", + params: agent.AgentConfigParams{ + Paths: agent.Paths{DataDir: "/data/dir"}, + Tag: names.NewMachineTag("1"), + Password: "sekrit", + UpgradedToVersion: version.Current, + CACert: "ca cert", + Model: testing.ModelTag, + StateAddresses: []string{"localhost:1234"}, + APIAddresses: []string{"localhost:1235"}, + Nonce: "a nonce", + }, + inspectConfig: func(c *gc.C, cfg agent.Config) { + c.Check(cfg.MetricsSpoolDir(), gc.Equals, agent.DefaultPaths.MetricsSpoolDir) + }, +}, { + about: "setting a custom metricsSpoolDir", + params: agent.AgentConfigParams{ + Paths: agent.Paths{ + DataDir: "/data/dir", + MetricsSpoolDir: "/tmp/nowhere", + }, + Tag: names.NewMachineTag("1"), + Password: "sekrit", + UpgradedToVersion: version.Current, + CACert: "ca cert", + Model: testing.ModelTag, + StateAddresses: []string{"localhost:1234"}, + APIAddresses: []string{"localhost:1235"}, + Nonce: "a nonce", + }, + inspectConfig: func(c *gc.C, cfg agent.Config) { + c.Check(cfg.MetricsSpoolDir(), gc.Equals, "/tmp/nowhere") }, }, { about: "agentConfig must not be a User tag", params: agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{DataDir: "/data/dir"}, Tag: names.NewUserTag("admin"), // this is a joke, the admin user is nil. - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, Password: "sekrit", }, checkErr: "entity tag must be MachineTag or UnitTag, got names.UserTag", }, { about: "agentConfig accepts a Unit tag", params: agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{DataDir: "/data/dir"}, Tag: names.NewUnitTag("ubuntu/1"), Password: "sekrit", - UpgradedToVersion: version.Current.Number, - Environment: testing.EnvironmentTag, + UpgradedToVersion: version.Current, + Model: testing.ModelTag, CACert: "ca cert", StateAddresses: []string{"localhost:1234"}, APIAddresses: []string{"localhost:1235"}, @@ -212,12 +247,12 @@ }, { about: "prefer-ipv6 parsed when set", params: agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{DataDir: "/data/dir"}, Tag: names.NewMachineTag("1"), Password: "sekrit", - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, CACert: "ca cert", - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, StateAddresses: []string{"localhost:1234"}, APIAddresses: []string{"localhost:1235"}, Nonce: "a nonce", @@ -229,12 +264,12 @@ }, { about: "missing prefer-ipv6 defaults to false", params: agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{DataDir: "/data/dir"}, Tag: names.NewMachineTag("1"), Password: "sekrit", - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, CACert: "ca cert", - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, StateAddresses: []string{"localhost:1234"}, APIAddresses: []string{"localhost:1235"}, Nonce: "a nonce", @@ -261,18 +296,20 @@ func (*suite) TestMigrate(c *gc.C) { initialParams := agent.AgentConfigParams{ - DataDir: c.MkDir(), - LogDir: c.MkDir(), + Paths: agent.Paths{ + DataDir: c.MkDir(), + LogDir: c.MkDir(), + }, Tag: names.NewMachineTag("1"), Nonce: "nonce", Password: "secret", UpgradedToVersion: version.MustParse("1.16.5"), Jobs: []multiwatcher.MachineJob{ - multiwatcher.JobManageEnviron, + multiwatcher.JobManageModel, multiwatcher.JobHostUnits, }, CACert: "ca cert", - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, StateAddresses: []string{"localhost:1234"}, APIAddresses: []string{"localhost:4321"}, Values: map[string]string{ @@ -293,15 +330,17 @@ fields: nil, newParams: agent.MigrateParams{}, }, { - fields: []string{"DataDir"}, + fields: []string{"Paths"}, newParams: agent.MigrateParams{ - DataDir: c.MkDir(), + Paths: agent.Paths{DataDir: c.MkDir()}, }, }, { - fields: []string{"DataDir", "LogDir"}, + fields: []string{"Paths"}, newParams: agent.MigrateParams{ - DataDir: c.MkDir(), - LogDir: c.MkDir(), + Paths: agent.Paths{ + DataDir: c.MkDir(), + LogDir: c.MkDir(), + }, }, }, { fields: []string{"Jobs"}, @@ -355,6 +394,7 @@ // Make sure we can read it back successfully and it // matches what we wrote. configPath := agent.ConfigPath(newConfig.DataDir(), newConfig.Tag()) + c.Logf("new config path: %v", configPath) readConfig, err := agent.ReadConfig(configPath) c.Check(err, jc.ErrorIsNil) c.Check(newConfig, jc.DeepEquals, readConfig) @@ -405,14 +445,14 @@ inspectConfig func(*gc.C, agent.Config) } var tests = []testStruct{{ - about: "missing state server cert", - checkErr: "state server cert not found in configuration", + about: "missing controller cert", + checkErr: "controller cert not found in configuration", }, { - about: "missing state server key", + about: "missing controller key", servingInfo: params.StateServingInfo{ Cert: "server cert", }, - checkErr: "state server key not found in configuration", + checkErr: "controller key not found in configuration", }, { about: "missing ca cert key", servingInfo: params.StateServingInfo{ @@ -462,15 +502,17 @@ } var attributeParams = agent.AgentConfigParams{ - DataDir: "/data/dir", + Paths: agent.Paths{ + DataDir: "/data/dir", + }, Tag: names.NewMachineTag("1"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, Password: "sekrit", CACert: "ca cert", StateAddresses: []string{"localhost:1234"}, APIAddresses: []string{"localhost:1235"}, Nonce: "a nonce", - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, } func (*suite) TestAttributes(c *gc.C) { @@ -483,7 +525,7 @@ c.Assert(conf.Tag(), gc.Equals, names.NewMachineTag("1")) c.Assert(conf.Dir(), gc.Equals, "/data/dir/agents/machine-1") c.Assert(conf.Nonce(), gc.Equals, "a nonce") - c.Assert(conf.UpgradedToVersion(), jc.DeepEquals, version.Current.Number) + c.Assert(conf.UpgradedToVersion(), jc.DeepEquals, version.Current) } func (*suite) TestStateServingInfo(c *gc.C) { @@ -531,8 +573,8 @@ func (*suite) TestWriteAndRead(c *gc.C) { testParams := attributeParams - testParams.DataDir = c.MkDir() - testParams.LogDir = c.MkDir() + testParams.Paths.DataDir = c.MkDir() + testParams.Paths.LogDir = c.MkDir() conf, err := agent.NewAgentConfig(testParams) c.Assert(err, jc.ErrorIsNil) @@ -632,12 +674,12 @@ c.Assert(err, jc.ErrorIsNil) expectAPIInfo := &api.Info{ - Addrs: attrParams.APIAddresses, - CACert: attrParams.CACert, - Tag: attrParams.Tag, - Password: "", - Nonce: attrParams.Nonce, - EnvironTag: attrParams.Environment, + Addrs: attrParams.APIAddresses, + CACert: attrParams.CACert, + Tag: attrParams.Tag, + Password: "", + Nonce: attrParams.Nonce, + ModelTag: attrParams.Model, } apiInfo, ok := conf.APIInfo() c.Assert(ok, jc.IsTrue) @@ -681,7 +723,7 @@ conf, err := agent.NewAgentConfig(attributeParams) c.Assert(err, jc.ErrorIsNil) - c.Assert(conf.UpgradedToVersion(), gc.Equals, version.Current.Number) + c.Assert(conf.UpgradedToVersion(), gc.Equals, version.Current) expectVers := version.MustParse("3.4.5") conf.SetUpgradedToVersion(expectVers) === modified file 'src/github.com/juju/juju/agent/bootstrap.go' --- src/github.com/juju/juju/agent/bootstrap.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/agent/bootstrap.go 2016-03-22 15:18:22 +0000 @@ -7,6 +7,7 @@ "github.com/juju/errors" "github.com/juju/names" "github.com/juju/utils" + "github.com/juju/utils/series" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/constraints" @@ -14,14 +15,12 @@ "github.com/juju/juju/instance" "github.com/juju/juju/mongo" "github.com/juju/juju/network" - "github.com/juju/juju/provider" "github.com/juju/juju/state" "github.com/juju/juju/state/multiwatcher" - "github.com/juju/juju/version" ) const ( - // BootstrapNonce is used as a nonce for the state server machine. + // BootstrapNonce is used as a nonce for the controller machine. BootstrapNonce = "user-admin:bootstrap" ) @@ -31,9 +30,11 @@ // Addresses holds the bootstrap machine's addresses. Addresses []network.Address - // Constraints holds the bootstrap machine's constraints. - // This value is also used for the environment-level constraints. - Constraints constraints.Value + // BootstrapConstraints holds the bootstrap machine's constraints. + BootstrapConstraints constraints.Value + + // ModelConstraints holds the model-level constraints. + ModelConstraints constraints.Value // Jobs holds the jobs that the machine agent will run. Jobs []multiwatcher.MachineJob @@ -52,14 +53,14 @@ const BootstrapMachineId = "0" // InitializeState should be called on the bootstrap machine's agent -// configuration. It uses that information to create the state server, dial the -// state server, and initialize it. It also generates a new password for the +// configuration. It uses that information to create the controller, dial the +// controller, and initialize it. It also generates a new password for the // bootstrap machine and calls Write to save the the configuration. // -// The envCfg values will be stored in the state's EnvironConfig; the +// The envCfg values will be stored in the state's ModelConfig; the // machineCfg values will be used to configure the bootstrap Machine, -// and its constraints will be also be used for the environment-level -// constraints. The connection to the state server will respect the +// and its constraints will be also be used for the model-level +// constraints. The connection to the controller will respect the // given timeout parameter. // // InitializeState returns the newly initialized state and bootstrap @@ -99,13 +100,8 @@ servingInfo.SharedSecret = machineCfg.SharedSecret c.SetStateServingInfo(servingInfo) - // Filter out any LXC bridge addresses from the machine addresses, - // except for local environments. See LP bug #1416928. - if !isLocalEnv(envCfg) { - machineCfg.Addresses = network.FilterLXCAddresses(machineCfg.Addresses) - } else { - logger.Debugf("local environment - not filtering addresses from %v", machineCfg.Addresses) - } + // Filter out any LXC bridge addresses from the machine addresses. + machineCfg.Addresses = network.FilterLXCAddresses(machineCfg.Addresses) if err = initAPIHostPorts(c, st, machineCfg.Addresses, servingInfo.APIPort); err != nil { return nil, nil, err @@ -121,12 +117,6 @@ return st, m, nil } -// isLocalEnv returns true if the given config is for a local -// environment. Defined like this for testing. -var isLocalEnv = func(cfg *config.Config) bool { - return cfg.Type() == provider.Local -} - func paramsStateServingInfoToStateStateServingInfo(i params.StateServingInfo) state.StateServingInfo { return state.StateServingInfo{ APIPort: i.APIPort, @@ -140,7 +130,7 @@ } func initConstraintsAndBootstrapMachine(c ConfigSetter, st *state.State, cfg BootstrapMachineConfig) (*state.Machine, error) { - if err := st.SetEnvironConstraints(cfg.Constraints); err != nil { + if err := st.SetModelConstraints(cfg.ModelConstraints); err != nil { return nil, errors.Errorf("cannot set initial environ constraints: %v", err) } m, err := initBootstrapMachine(c, st, cfg) @@ -175,9 +165,9 @@ } m, err := st.AddOneMachine(state.MachineTemplate{ Addresses: cfg.Addresses, - Series: version.Current.Series, + Series: series.HostSeries(), Nonce: BootstrapNonce, - Constraints: cfg.Constraints, + Constraints: cfg.BootstrapConstraints, InstanceId: cfg.InstanceId, HardwareCharacteristics: cfg.Characteristics, Jobs: jobs, @@ -209,7 +199,18 @@ // initAPIHostPorts sets the initial API host/port addresses in state. func initAPIHostPorts(c ConfigSetter, st *state.State, addrs []network.Address, apiPort int) error { - hostPorts := network.AddressesWithPort(addrs, apiPort) + var hostPorts []network.HostPort + // First try to select the correct address using the default space where all + // API servers should be accessible on. + spaceAddr, ok := network.SelectAddressBySpace(addrs, network.DefaultSpace) + if ok { + logger.Debugf("selected %q as API address, using space %q", spaceAddr.Value, network.DefaultSpace) + hostPorts = network.AddressesWithPort([]network.Address{spaceAddr}, apiPort) + } else { + // Fallback to using all instead. + hostPorts = network.AddressesWithPort(addrs, apiPort) + } + return st.SetAPIHostPorts([][]network.HostPort{hostPorts}) } @@ -220,13 +221,10 @@ switch job { case multiwatcher.JobHostUnits: return state.JobHostUnits, nil - case multiwatcher.JobManageEnviron: - return state.JobManageEnviron, nil + case multiwatcher.JobManageModel: + return state.JobManageModel, nil case multiwatcher.JobManageNetworking: return state.JobManageNetworking, nil - case multiwatcher.JobManageStateDeprecated: - // Deprecated in 1.18. - return state.JobManageStateDeprecated, nil default: return -1, errors.Errorf("invalid machine job %q", job) } === modified file 'src/github.com/juju/juju/agent/bootstrap_test.go' --- src/github.com/juju/juju/agent/bootstrap_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/agent/bootstrap_test.go 2016-03-22 15:18:22 +0000 @@ -12,6 +12,7 @@ gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils" + "github.com/juju/utils/series" gc "gopkg.in/check.v1" "github.com/juju/juju/agent" @@ -50,15 +51,7 @@ s.BaseSuite.TearDownTest(c) } -func (s *bootstrapSuite) TestInitializeStateNonLocal(c *gc.C) { - s.testInitializeState(c, false) -} - -func (s *bootstrapSuite) TestInitializeStateLocal(c *gc.C) { - s.testInitializeState(c, true) -} - -func (s *bootstrapSuite) testInitializeState(c *gc.C, fakeLocalEnv bool) { +func (s *bootstrapSuite) TestInitializeState(c *gc.C) { dataDir := c.MkDir() lxcFakeNetConfig := filepath.Join(c.MkDir(), "lxc-net") @@ -79,20 +72,16 @@ }, nil }) s.PatchValue(&network.LXCNetDefaultConfig, lxcFakeNetConfig) - s.PatchValue(agent.IsLocalEnv, func(*config.Config) bool { - c.Logf("fakeLocalEnv=%v", fakeLocalEnv) - return fakeLocalEnv - }) pwHash := utils.UserPasswordHash(testing.DefaultMongoPassword, utils.CompatSalt) configParams := agent.AgentConfigParams{ - DataDir: dataDir, + Paths: agent.Paths{DataDir: dataDir}, Tag: names.NewMachineTag("0"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, StateAddresses: []string{s.mgoInst.Addr()}, CACert: testing.CACert, Password: pwHash, - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, } servingInfo := params.StateServingInfo{ Cert: testing.ServerCert, @@ -108,34 +97,32 @@ _, available := cfg.StateServingInfo() c.Assert(available, jc.IsTrue) - expectConstraints := constraints.MustParse("mem=1024M") + expectBootstrapConstraints := constraints.MustParse("mem=1024M") + expectModelConstraints := constraints.MustParse("mem=512M") expectHW := instance.MustParseHardware("mem=2048M") initialAddrs := network.NewAddresses( "zeroonetwothree", "0.1.2.3", - "10.0.3.1", // lxc bridge address filtered (when fakeLocalEnv=false). + "10.0.3.1", // lxc bridge address filtered. "10.0.3.4", // lxc bridge address filtered (-"-). "10.0.3.3", // not a lxc bridge address ) mcfg := agent.BootstrapMachineConfig{ - Addresses: initialAddrs, - Constraints: expectConstraints, - Jobs: []multiwatcher.MachineJob{multiwatcher.JobManageEnviron}, - InstanceId: "i-bootstrap", - Characteristics: expectHW, - SharedSecret: "abc123", + Addresses: initialAddrs, + BootstrapConstraints: expectBootstrapConstraints, + ModelConstraints: expectModelConstraints, + Jobs: []multiwatcher.MachineJob{multiwatcher.JobManageModel}, + InstanceId: "i-bootstrap", + Characteristics: expectHW, + SharedSecret: "abc123", } filteredAddrs := network.NewAddresses( "zeroonetwothree", "0.1.2.3", "10.0.3.3", ) - if fakeLocalEnv { - // For local environments - no filtering. - filteredAddrs = append([]network.Address{}, initialAddrs...) - } envAttrs := dummy.SampleConfig().Delete("admin-secret").Merge(testing.Attrs{ - "agent-version": version.Current.Number.String(), + "agent-version": version.Current.String(), "state-id": "1", // needed so policy can Open config }) envCfg, err := config.New(config.NoDefaults, envAttrs) @@ -150,33 +137,37 @@ c.Assert(err, jc.ErrorIsNil) // Check that the environment has been set up. - env, err := st.Environment() + env, err := st.Model() c.Assert(err, jc.ErrorIsNil) uuid, ok := envCfg.UUID() c.Assert(ok, jc.IsTrue) c.Assert(env.UUID(), gc.Equals, uuid) // Check that initial admin user has been set up correctly. - envTag := env.Tag().(names.EnvironTag) - s.assertCanLogInAsAdmin(c, envTag, pwHash) + modelTag := env.Tag().(names.ModelTag) + s.assertCanLogInAsAdmin(c, modelTag, pwHash) user, err := st.User(env.Owner()) c.Assert(err, jc.ErrorIsNil) c.Assert(user.PasswordValid(testing.DefaultMongoPassword), jc.IsTrue) - // Check that environment configuration has been added. - newEnvCfg, err := st.EnvironConfig() + // Check that model configuration has been added, and + // model constraints set. + newEnvCfg, err := st.ModelConfig() c.Assert(err, jc.ErrorIsNil) c.Assert(newEnvCfg.AllAttrs(), gc.DeepEquals, envCfg.AllAttrs()) + gotModelConstraints, err := st.ModelConstraints() + c.Assert(err, jc.ErrorIsNil) + c.Assert(gotModelConstraints, gc.DeepEquals, expectModelConstraints) // Check that the bootstrap machine looks correct. c.Assert(m.Id(), gc.Equals, "0") - c.Assert(m.Jobs(), gc.DeepEquals, []state.MachineJob{state.JobManageEnviron}) - c.Assert(m.Series(), gc.Equals, version.Current.Series) + c.Assert(m.Jobs(), gc.DeepEquals, []state.MachineJob{state.JobManageModel}) + c.Assert(m.Series(), gc.Equals, series.HostSeries()) c.Assert(m.CheckProvisioned(agent.BootstrapNonce), jc.IsTrue) c.Assert(m.Addresses(), jc.DeepEquals, filteredAddrs) - gotConstraints, err := m.Constraints() + gotBootstrapConstraints, err := m.Constraints() c.Assert(err, jc.ErrorIsNil) - c.Assert(gotConstraints, gc.DeepEquals, expectConstraints) + c.Assert(gotBootstrapConstraints, gc.DeepEquals, expectBootstrapConstraints) c.Assert(err, jc.ErrorIsNil) gotHW, err := m.HardwareCharacteristics() c.Assert(err, jc.ErrorIsNil) @@ -212,20 +203,20 @@ c.Assert(agent.Password(newCfg), gc.Not(gc.Equals), testing.DefaultMongoPassword) info, ok := cfg.MongoInfo() c.Assert(ok, jc.IsTrue) - st1, err := state.Open(newCfg.Environment(), info, mongo.DefaultDialOpts(), environs.NewStatePolicy()) + st1, err := state.Open(newCfg.Model(), info, mongo.DefaultDialOpts(), environs.NewStatePolicy()) c.Assert(err, jc.ErrorIsNil) defer st1.Close() } func (s *bootstrapSuite) TestInitializeStateWithStateServingInfoNotAvailable(c *gc.C) { configParams := agent.AgentConfigParams{ - DataDir: c.MkDir(), + Paths: agent.Paths{DataDir: c.MkDir()}, Tag: names.NewMachineTag("0"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, StateAddresses: []string{s.mgoInst.Addr()}, CACert: testing.CACert, Password: "fake", - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, } cfg, err := agent.NewAgentConfig(configParams) c.Assert(err, jc.ErrorIsNil) @@ -244,13 +235,13 @@ pwHash := utils.UserPasswordHash(testing.DefaultMongoPassword, utils.CompatSalt) configParams := agent.AgentConfigParams{ - DataDir: dataDir, + Paths: agent.Paths{DataDir: dataDir}, Tag: names.NewMachineTag("0"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, StateAddresses: []string{s.mgoInst.Addr()}, CACert: testing.CACert, Password: pwHash, - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, } cfg, err := agent.NewAgentConfig(configParams) c.Assert(err, jc.ErrorIsNil) @@ -262,16 +253,15 @@ SharedSecret: "baz", SystemIdentity: "qux", }) - expectConstraints := constraints.MustParse("mem=1024M") expectHW := instance.MustParseHardware("mem=2048M") mcfg := agent.BootstrapMachineConfig{ - Constraints: expectConstraints, - Jobs: []multiwatcher.MachineJob{multiwatcher.JobManageEnviron}, - InstanceId: "i-bootstrap", - Characteristics: expectHW, + BootstrapConstraints: constraints.MustParse("mem=1024M"), + Jobs: []multiwatcher.MachineJob{multiwatcher.JobManageModel}, + InstanceId: "i-bootstrap", + Characteristics: expectHW, } envAttrs := dummy.SampleConfig().Delete("admin-secret").Merge(testing.Attrs{ - "agent-version": version.Current.Number.String(), + "agent-version": version.Current.String(), "state-id": "1", // needed so policy can Open config }) envCfg, err := config.New(config.NoDefaults, envAttrs) @@ -298,15 +288,12 @@ name: multiwatcher.JobHostUnits, want: state.JobHostUnits, }, { - name: multiwatcher.JobManageEnviron, - want: state.JobManageEnviron, + name: multiwatcher.JobManageModel, + want: state.JobManageModel, }, { name: multiwatcher.JobManageNetworking, want: state.JobManageNetworking, }, { - name: multiwatcher.JobManageStateDeprecated, - want: state.JobManageStateDeprecated, - }, { name: "invalid", want: -1, err: `invalid machine job "invalid"`, @@ -320,7 +307,7 @@ } } -func (s *bootstrapSuite) assertCanLogInAsAdmin(c *gc.C, environTag names.EnvironTag, password string) { +func (s *bootstrapSuite) assertCanLogInAsAdmin(c *gc.C, modelTag names.ModelTag, password string) { info := &mongo.MongoInfo{ Info: mongo.Info{ Addrs: []string{s.mgoInst.Addr()}, @@ -329,7 +316,7 @@ Tag: nil, // admin user Password: password, } - st, err := state.Open(environTag, info, mongo.DefaultDialOpts(), environs.NewStatePolicy()) + st, err := state.Open(modelTag, info, mongo.DefaultDialOpts(), environs.NewStatePolicy()) c.Assert(err, jc.ErrorIsNil) defer st.Close() _, err = st.Machine("0") === modified file 'src/github.com/juju/juju/agent/export_test.go' --- src/github.com/juju/juju/agent/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/agent/export_test.go 2016-03-22 15:18:22 +0000 @@ -21,10 +21,14 @@ func PatchConfig(config Config, fieldName string, value interface{}) error { conf := config.(*configInternal) switch fieldName { - case "DataDir": - conf.dataDir = value.(string) - case "LogDir": - conf.logDir = value.(string) + case "Paths": + paths := value.(Paths) + if paths.DataDir != "" { + conf.paths.DataDir = paths.DataDir + } + if paths.LogDir != "" { + conf.paths.LogDir = paths.LogDir + } case "Jobs": conf.jobs = value.([]multiwatcher.MachineJob)[:] case "DeleteValues": @@ -41,7 +45,7 @@ default: return fmt.Errorf("unknown field %q", fieldName) } - conf.configFilePath = ConfigPath(conf.dataDir, conf.tag) + conf.configFilePath = ConfigPath(conf.paths.DataDir, conf.tag) return nil } @@ -53,7 +57,6 @@ var ( MachineJobFromParams = machineJobFromParams - IsLocalEnv = &isLocalEnv ) func EmptyConfig() Config { === removed file 'src/github.com/juju/juju/agent/format-1.16.go' --- src/github.com/juju/juju/agent/format-1.16.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/agent/format-1.16.go 1970-01-01 00:00:00 +0000 @@ -1,145 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package agent - -import ( - "encoding/base64" - "fmt" - "net" - "strconv" - - "github.com/juju/names" - goyaml "gopkg.in/yaml.v1" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/version" -) - -var format_1_16 = formatter_1_16{} - -// formatter_1_16 is the formatter for the 1.16 format. -type formatter_1_16 struct { -} - -// Ensure that the formatter_1_16 struct implements the formatter interface. -var _ formatter = formatter_1_16{} - -// format_1_16Serialization holds information for a given agent. -type format_1_16Serialization struct { - Tag string - Nonce string - UpgradedToVersion *version.Number `yaml:"upgradedToVersion"` - - CACert string - StateAddresses []string `yaml:",omitempty"` - StatePassword string `yaml:",omitempty"` - - APIAddresses []string `yaml:",omitempty"` - APIPassword string `yaml:",omitempty"` - - OldPassword string - Values map[string]string - - // Only state server machines have these next three items - StateServerCert string `yaml:",omitempty"` - StateServerKey string `yaml:",omitempty"` - APIPort int `yaml:",omitempty"` -} - -func init() { - registerFormat(format_1_16) -} - -const legacyFormatFilename = "format" - -// legacyFormatPrefix is the prefix of the legacy format file. -const legacyFormatPrefix = "format " - -// decode64 makes sure that for an empty string we have a nil slice, not an -// empty slice, which is what the base64 DecodeString function returns. -func decode64(value string) (result []byte, err error) { - if value != "" { - result, err = base64.StdEncoding.DecodeString(value) - } - return -} - -func (formatter_1_16) version() string { - return "1.16" -} - -func (formatter_1_16) unmarshal(data []byte) (*configInternal, error) { - var format format_1_16Serialization - if err := goyaml.Unmarshal(data, &format); err != nil { - return nil, err - } - caCert, err := decode64(format.CACert) - if err != nil { - return nil, err - } - stateServerCert, err := decode64(format.StateServerCert) - if err != nil { - return nil, err - } - stateServerKey, err := decode64(format.StateServerKey) - if err != nil { - return nil, err - } - if format.UpgradedToVersion == nil { - // Assume it's 1.16.0. - upgradedToVersion := version.MustParse("1.16.0") - format.UpgradedToVersion = &upgradedToVersion - } - tag, err := names.ParseTag(format.Tag) - if err != nil { - return nil, err - } - config := &configInternal{ - tag: tag, - nonce: format.Nonce, - dataDir: DefaultDataDir, - logDir: DefaultLogDir, - upgradedToVersion: *format.UpgradedToVersion, - caCert: string(caCert), - oldPassword: format.OldPassword, - values: format.Values, - } - if len(format.StateAddresses) > 0 { - config.stateDetails = &connectionDetails{ - format.StateAddresses, - format.StatePassword, - } - } - - if len(stateServerKey) != 0 { - config.servingInfo = ¶ms.StateServingInfo{ - Cert: string(stateServerCert), - PrivateKey: string(stateServerKey), - APIPort: format.APIPort, - } - // There's a private key, then we need the state - // port, which wasn't directly available in the 1.16 format, - // but we can infer it from the ports in the state addresses. - if len(format.StateAddresses) > 0 { - _, portString, err := net.SplitHostPort(format.StateAddresses[0]) - if err != nil { - return nil, err - } - statePort, err := strconv.Atoi(portString) - if err != nil { - return nil, err - } - config.servingInfo.StatePort = statePort - } else { - return nil, fmt.Errorf("server key found but no state port") - } - } - if len(format.APIAddresses) > 0 { - config.apiDetails = &connectionDetails{ - format.APIAddresses, - format.APIPassword, - } - } - return config, nil -} === removed file 'src/github.com/juju/juju/agent/format-1.16_whitebox_test.go' --- src/github.com/juju/juju/agent/format-1.16_whitebox_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/agent/format-1.16_whitebox_test.go 1970-01-01 00:00:00 +0000 @@ -1,160 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Copyright 2014 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. - -// The format tests are white box tests, meaning that the tests are in the -// same package as the code, as all the format details are internal to the -// package. - -package agent - -import ( - "io/ioutil" - "path/filepath" - - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/juju/paths" - "github.com/juju/juju/testing" - "github.com/juju/juju/version" -) - -type format_1_16Suite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&format_1_16Suite{}) - -func (s *format_1_16Suite) TestMissingAttributes(c *gc.C) { - logDir, err := paths.LogDir(version.Current.Series) - c.Assert(err, jc.ErrorIsNil) - realDataDir, err := paths.DataDir(version.Current.Series) - c.Assert(err, jc.ErrorIsNil) - - realDataDir = filepath.FromSlash(realDataDir) - logPath := filepath.Join(logDir, "juju") - logPath = filepath.FromSlash(logPath) - - dataDir := c.MkDir() - formatPath := filepath.Join(dataDir, legacyFormatFilename) - err = utils.AtomicWriteFile(formatPath, []byte(legacyFormatFileContents), 0600) - c.Assert(err, jc.ErrorIsNil) - configPath := filepath.Join(dataDir, agentConfigFilename) - - err = utils.AtomicWriteFile(configPath, []byte(configDataWithoutNewAttributes), 0600) - c.Assert(err, jc.ErrorIsNil) - readConfig, err := ReadConfig(configPath) - c.Assert(err, jc.ErrorIsNil) - c.Assert(readConfig.UpgradedToVersion(), gc.Equals, version.MustParse("1.16.0")) - configLogDir := filepath.FromSlash(readConfig.LogDir()) - configDataDir := filepath.FromSlash(readConfig.DataDir()) - - c.Assert(configLogDir, gc.Equals, logPath) - c.Assert(configDataDir, gc.Equals, realDataDir) - // Test data doesn't include a StateServerKey so StateServingInfo - // should *not* be available - _, available := readConfig.StateServingInfo() - c.Assert(available, jc.IsFalse) -} - -func (*format_1_16Suite) TestStatePortParsed(c *gc.C) { - dataDir := c.MkDir() - formatPath := filepath.Join(dataDir, legacyFormatFilename) - err := utils.AtomicWriteFile(formatPath, []byte(legacyFormatFileContents), 0600) - c.Assert(err, jc.ErrorIsNil) - configPath := filepath.Join(dataDir, agentConfigFilename) - err = utils.AtomicWriteFile(configPath, []byte(stateMachineConfigData), 0600) - c.Assert(err, jc.ErrorIsNil) - readConfig, err := ReadConfig(configPath) - c.Assert(err, jc.ErrorIsNil) - info, available := readConfig.StateServingInfo() - c.Assert(available, jc.IsTrue) - c.Assert(info.StatePort, gc.Equals, 37017) -} - -func (*format_1_16Suite) TestReadConfReadsLegacyFormatAndWritesNew(c *gc.C) { - dataDir := c.MkDir() - formatPath := filepath.Join(dataDir, legacyFormatFilename) - err := utils.AtomicWriteFile(formatPath, []byte(legacyFormatFileContents), 0600) - c.Assert(err, jc.ErrorIsNil) - configPath := filepath.Join(dataDir, agentConfigFilename) - err = utils.AtomicWriteFile(configPath, []byte(agentConfig1_16Contents), 0600) - c.Assert(err, jc.ErrorIsNil) - - config, err := ReadConfig(configPath) - c.Assert(err, jc.ErrorIsNil) - c.Assert(config, gc.NotNil) - // Test we wrote a currently valid config. - config, err = ReadConfig(configPath) - c.Assert(err, jc.ErrorIsNil) - c.Assert(config, gc.NotNil) - c.Assert(config.UpgradedToVersion(), jc.DeepEquals, version.MustParse("1.16.0")) - c.Assert(config.Jobs(), gc.HasLen, 0) - - // Old format was deleted. - assertFileNotExist(c, formatPath) - // And new contents were written. - data, err := ioutil.ReadFile(configPath) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(data), gc.Not(gc.Equals), agentConfig1_16Contents) -} - -const legacyFormatFileContents = "format 1.16" - -var agentConfig1_16Contents = ` -tag: machine-0 -nonce: user-admin:bootstrap -cacert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNXekNDQWNhZ0F3SUJBZ0lCQURBTEJna3Foa2lHOXcwQkFRVXdRekVOTUFzR0ExVUVDaE1FYW5WcWRURXkKTURBR0ExVUVBd3dwYW5WcWRTMW5aVzVsY21GMFpXUWdRMEVnWm05eUlHVnVkbWx5YjI1dFpXNTBJQ0pzYjJOaApiQ0l3SGhjTk1UUXdNekExTVRJeE9ERTJXaGNOTWpRd016QTFNVEl5TXpFMldqQkRNUTB3Q3dZRFZRUUtFd1JxCmRXcDFNVEl3TUFZRFZRUUREQ2xxZFdwMUxXZGxibVZ5WVhSbFpDQkRRU0JtYjNJZ1pXNTJhWEp2Ym0xbGJuUWcKSW14dlkyRnNJakNCbnpBTkJna3Foa2lHOXcwQkFRRUZBQU9CalFBd2dZa0NnWUVBd3NaVUg3NUZGSW1QUWVGSgpaVnVYcmlUWmNYdlNQMnk0VDJaSU5WNlVrY2E5VFdXb01XaWlPYm4yNk03MjNGQllPczh3WHRYNEUxZ2l1amxYCmZGeHNFckloczEyVXQ1S3JOVkkyMlEydCtVOGViakZMUHJiUE5Fb3pzdnU3UzFjZklFbjBXTVg4MWRBaENOMnQKVkxGaC9hS3NqSHdDLzJ5Y3Z0VSttTngyVG5FQ0F3RUFBYU5qTUdFd0RnWURWUjBQQVFIL0JBUURBZ0NrTUE4RwpBMVVkRXdFQi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZKVUxKZVlIbERsdlJ3T0owcWdyemcwclZGZUVNQjhHCkExVWRJd1FZTUJhQUZKVUxKZVlIbERsdlJ3T0owcWdyemcwclZGZUVNQXNHQ1NxR1NJYjNEUUVCQlFPQmdRQ2UKRlRZbThsWkVYZUp1cEdPc3pwc2pjaHNSMEFxeXROZ1dxQmE1cWUyMS9xS2R3TUFSQkNFMTU3eUxGVnl6MVoycQp2YVhVNy9VKzdKbGNiWmtHRHJ5djE2S2UwK2RIY3NEdG5jR2FOVkZKMTAxYnNJNG1sVEkzQWpQNDErNG5mQ0VlCmhwalRvYm1YdlBhOFN1NGhQYTBFc1E4bXFaZGFabmdwRU0vb1JiZ0RMdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K -stateaddresses: -- localhost:37017 -statepassword: OlUMkte5J3Ss0CH9yxedilIC -apiaddresses: -- localhost:17070 -apipassword: OlUMkte5J3Ss0CH9yxedilIC -oldpassword: oBlMbFUGvCb2PMFgYVzjS6GD -values: - PROVIDER_TYPE: local - SHARED_STORAGE_ADDR: 10.0.3.1:8041 - SHARED_STORAGE_DIR: /home/user/.juju/local/shared-storage - STORAGE_ADDR: 10.0.3.1:8040 - STORAGE_DIR: /home/user/.juju/local/storage -stateservercert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNJakNDQVkyZ0F3SUJBZ0lCQURBTEJna3Foa2lHOXcwQkFRVXdRekVOTUFzR0ExVUVDaE1FYW5WcWRURXkKTURBR0ExVUVBd3dwYW5WcWRTMW5aVzVsY21GMFpXUWdRMEVnWm05eUlHVnVkbWx5YjI1dFpXNTBJQ0pzYjJOaApiQ0l3SGhjTk1UUXdNekExTVRJeE9ESXlXaGNOTWpRd016QTFNVEl5TXpJeVdqQWJNUTB3Q3dZRFZRUUtFd1JxCmRXcDFNUW93Q0FZRFZRUURFd0VxTUlHZk1BMEdDU3FHU0liM0RRRUJBUVVBQTRHTkFEQ0JpUUtCZ1FDdVA0dTAKQjZtbGs0V0g3SHFvOXhkSFp4TWtCUVRqV2VLTkhERzFMb21SWmc2RHA4Z0VQK0ZNVm5IaUprZW1pQnJNSEk3OAo5bG4zSVRBT0NJT0xna0NkN3ZsaDJub2FheTlSeXpUaG9PZ0RMSzVpR0VidmZDeEFWZThhWDQvbThhOGNLWE9TCmJJZTZFNnVtb0wza0JNaEdiL1QrYW1xbHRjaHVNRXJhanJSVit3SURBUUFCbzFJd1VEQU9CZ05WSFE4QkFmOEUKQkFNQ0FCQXdIUVlEVlIwT0JCWUVGRTV1RFg3UlRjckF2ajFNcWpiU2w1M21pR0NITUI4R0ExVWRJd1FZTUJhQQpGSlVMSmVZSGxEbHZSd09KMHFncnpnMHJWRmVFTUFzR0NTcUdTSWIzRFFFQkJRT0JnUUJUNC8vZkpESUcxM2dxClBiamNnUTN6eHh6TG12STY5Ty8zMFFDbmIrUGZObDRET0U1SktwVE5OTjhkOEJEQWZPYStvWE5neEM3VTZXdjUKZjBYNzEyRnlNdUc3VXJEVkNDY0kxS3JSQ0F0THlPWUREL0ZPblBwSWdVQjF1bFRnOGlRUzdlTjM2d0NEL21wVApsUVVUS2FuU00yMnhnWWJKazlRY1dBSzQ0ZjA4SEE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== -stateserverkey: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlDV3dJQkFBS0JnUUN1UDR1MEI2bWxrNFdIN0hxbzl4ZEhaeE1rQlFUaldlS05IREcxTG9tUlpnNkRwOGdFClArRk1WbkhpSmtlbWlCck1ISTc4OWxuM0lUQU9DSU9MZ2tDZDd2bGgybm9hYXk5Unl6VGhvT2dETEs1aUdFYnYKZkN4QVZlOGFYNC9tOGE4Y0tYT1NiSWU2RTZ1bW9MM2tCTWhHYi9UK2FtcWx0Y2h1TUVyYWpyUlYrd0lEQVFBQgpBb0dBRERJZ2FoSmJPbDZQNndxUEwwSlVHOGhJRzY1S1FFdHJRdXNsUTRRbFZzcm8yeWdrSkwvLzJlTDNCNWdjClRiaWEvNHhFS2Nwb1U1YThFVTloUGFONU9EYnlkVEsxQ1I3R2JXSGkwWm1LbGZCUlR4bUpxakdKVU1CSmI4a0QKNStpMzlvcXdQS3dnaXoyTVR5SHZKZFFJVHB0ZDVrbEQyYjU1by9YWFRCTnk2NGtDUVFEbXRFWHNTL2kxTm5pSwozZVJkeHM4UVFGN0pKVG5SR042ZUh6ZHlXb242Zjl2ZkxrSDROWUdxcFUydjVBNUl1Nno3K3NJdXVHU2ZSeEI1CktrZVFXdlVQQWtFQXdWcVdlczdmc3NLbUFCZGxER3ozYzNxMjI2eVVaUE00R3lTb1cxYXZsYzJ1VDVYRm9vVUsKNjRpUjJuU2I1OHZ2bGY1RjRRMnJuRjh2cFRLcFJwK0lWUUpBTlcwZ0dFWEx0ZU9FYk54UUMydUQva1o1N09rRApCNnBUdTVpTkZaMWtBSy9sY2p6YktDanorMW5Hc09vR2FNK1ZrdEVTY1JGZ3RBWVlDWWRDQldzYS93SkFROWJXCnlVdmdMTVlpbkJHWlFKelN6VStXN01oR1lJejllSGlLSVZIdTFTNlBKQmsyZUdrWmhiNHEvbXkvYnJxYzJ4R1YKenZxTzVaUjRFUXdQWEZvSTZRSkFkeVdDMllOTTF2a1BuWnJqZzNQQXFHRHJQMHJwNEZ0bFV4alh0ay8vcW9hNgpRcXVYcE9vNjd4THRieW1PTlJTdDFiZGE5ZE5tbGljMFVNZ0JQRHgrYnc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= -apiport: 17070 -`[1:] - -const configDataWithoutNewAttributes = ` -tag: user-omg -nonce: a nonce -cacert: Y2EgY2VydA== -stateaddresses: -- localhost:1234 -apiaddresses: -- localhost:1235 -oldpassword: sekrit -values: {} -` - -const stateMachineConfigData = ` -tag: machine-0 -nonce: user-admin:bootstrap -cacert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNXekNDQWNhZ0F3SUJBZ0lCQURBTEJna3Foa2lHOXcwQkFRVXdRekVOTUFzR0ExVUVDaE1FYW5WcWRURXkKTURBR0ExVUVBd3dwYW5WcWRTMW5aVzVsY21GMFpXUWdRMEVnWm05eUlHVnVkbWx5YjI1dFpXNTBJQ0pzYjJOaApiQ0l3SGhjTk1UTXdPVEkzTURZME1ERTFXaGNOTWpNd09USTNNRFkwTlRFMVdqQkRNUTB3Q3dZRFZRUUtFd1JxCmRXcDFNVEl3TUFZRFZRUUREQ2xxZFdwMUxXZGxibVZ5WVhSbFpDQkRRU0JtYjNJZ1pXNTJhWEp2Ym0xbGJuUWcKSW14dlkyRnNJakNCbnpBTkJna3Foa2lHOXcwQkFRRUZBQU9CalFBd2dZa0NnWUVBcWRhYWFVWE9YTFNtcTdhVApKUTNzckFIb3dFUjJnTFcyd1g5dHptMGdqVkZEVVBkdjNQQ3N1b1R6THdkaXhaQ2dJMFpMaGY5cWllYkZkSmpZCjAxOHUrVHovTkJuMzJLdDYzZWM3YmtRWnR3T09jSEZOWDhHZUdRRkVGOVVJcjYzeGxhUnNaMnJybTFlZCszZTgKdDdwendHY2YvdlB0ZmxldlJXRUpIT1l6MVZVQ0F3RUFBYU5qTUdFd0RnWURWUjBQQVFIL0JBUURBZ0NrTUE4RwpBMVVkRXdFQi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZDQlhnaXFpSkhBWVZ5RlA3R1hSS3NkcVlEVzhNQjhHCkExVWRJd1FZTUJhQUZDQlhnaXFpSkhBWVZ5RlA3R1hSS3NkcVlEVzhNQXNHQ1NxR1NJYjNEUUVCQlFPQmdRQWgKTy9JcWRjYnhsNzBpcUMzcHVqNGswbnV6ZFNoOXFlTzZVVktaYkVIWmtLV2J1ejVHK2tBdldaQ0QwcVhjb0JFcgpLc2dKZlNLdDVKWXZUQW1uUnF2dEdLVWN6SGN0WHMyQVBkWWcrRnkvdGd2THFSNGdaeXN4NWs3cVV1MVNITWZhCk5CUlo4YkdBbGZsOXF2Rlo5TkR4NElKUnQzUGh3S1FRWlpmcTkzQm5SQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K -stateaddresses: -- localhost:37017 -statepassword: +PCNyLFNAg2f5SN3ig6uHHum -apiaddresses: -- localhost:17070 -apipassword: +PCNyLFNAg2f5SN3ig6uHHum -oldpassword: Jc1GMZX/d35BgbQ6F9nxrTY4 -values: - PROVIDER_TYPE: local - SHARED_STORAGE_ADDR: 10.0.3.1:8041 - SHARED_STORAGE_DIR: /home/rog/.juju/local/shared-storage - STORAGE_ADDR: 10.0.3.1:8040 - STORAGE_DIR: /home/rog/.juju/local/storage -stateservercert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNJakNDQVkyZ0F3SUJBZ0lCQURBTEJna3Foa2lHOXcwQkFRVXdRekVOTUFzR0ExVUVDaE1FYW5WcWRURXkKTURBR0ExVUVBd3dwYW5WcWRTMW5aVzVsY21GMFpXUWdRMEVnWm05eUlHVnVkbWx5YjI1dFpXNTBJQ0pzYjJOaApiQ0l3SGhjTk1UUXdOREF4TVRNd016SXlXaGNOTWpRd05EQXhNVE13T0RJeVdqQWJNUTB3Q3dZRFZRUUtFd1JxCmRXcDFNUW93Q0FZRFZRUURFd0VxTUlHZk1BMEdDU3FHU0liM0RRRUJBUVVBQTRHTkFEQ0JpUUtCZ1FDa1E1RzEKbUFuQU0wb3REVzVwREo3R3pQbTg5OUtySlVlR0NIZytGV2l0d1RETnJiK0NhYk1TYWRsc3JYb0crYjdETDFIcApXNTdnQXZoNjBTeUFLWHJCVW9tMG1pdVI1QkhYeitpWkZsZDZHS0UySTFIMUlON0pldUdmTURyVUN4WlVYNkdkCjVlcStUU3JvQ3ZPVGxDYWFtNDRkaHd0S1JHMlFQQ2RYbTNSbWxRSURBUUFCbzFJd1VEQU9CZ05WSFE4QkFmOEUKQkFNQ0FCQXdIUVlEVlIwT0JCWUVGTElWeDdmUVJFUkRGZ3hCcWh4b3puMHZueUlXTUI4R0ExVWRJd1FZTUJhQQpGQ0JYZ2lxaUpIQVlWeUZQN0dYUktzZHFZRFc4TUFzR0NTcUdTSWIzRFFFQkJRT0JnUUFKeW9yaEtLa20ySEFBCmNtS2RyRFNyRlZraElxUFlnc0p6STVTOXRBb0lxRDYwMUZ2eVh1aE50STlwR21ZS2tEd1J0Q2JXNy9nL1RMYVIKbVhXcEpqSDRMNlNLbEFkRFFVMVpPejMwRTdlR3F6aXp3dUdTUHB1VDdjUm5wOVVYdEwrRGZPc2N4WDNwNXMvMwpobmJGdFZGVWllejJRVDNoemo4VTRocXlWTENNZkE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== -stateserverkey: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlDWEFJQkFBS0JnUUNrUTVHMW1BbkFNMG90RFc1cERKN0d6UG04OTlLckpVZUdDSGcrRldpdHdURE5yYitDCmFiTVNhZGxzclhvRytiN0RMMUhwVzU3Z0F2aDYwU3lBS1hyQlVvbTBtaXVSNUJIWHoraVpGbGQ2R0tFMkkxSDEKSU43SmV1R2ZNRHJVQ3haVVg2R2Q1ZXErVFNyb0N2T1RsQ2FhbTQ0ZGh3dEtSRzJRUENkWG0zUm1sUUlEQVFBQgpBb0dBY3ZvODFxQTZTd2RicDFkY2JqbUFOZVVwOWNSOStIL2FwWTN1Skg2MXk5R0xTSnlTalVWUks5VmRkRDJsClNaYXNtVkRaQS8rMm9GUlQreHZKQzFoOWJBNm51NzBxczZXUXBQczQ5WGxhSFdNWXJ0dEV5UDVXeVE4ZWNPWlkKazJZeWJsN3ZQVnhOS1VXdk85L0N3MDgyU2FWZUJGbktvSkRxM1NZZHAzYnhWOEVDUVFEQW85cnBibTFnaENkcApIUFNIQU1SY0lOZUpLcHoxM3QxS3ErN1E3YUZOOWsxYnZvWm8yV2FZZ1pRbXBRL1RoNnl3dy9teWJscmxpMUxGCm5Vc25HZzV4QWtFQTJrcDNnV1B1aXN1bHoyMU1hQmtaN0pLUzVKUXkyQlFUM2ZuM0Nua3hFa2xRdGZ2VnFBN1cKMndPbG9acUFBM2ZCRVUycWEyVmptejg1WGZKUVZYbjBaUUpCQUljaTZ2Q1NESnlHV0hjK1hyTk44SEdJZ0dxeQp3QVVpNEMzL3lybzUyTXd1R2pwZnZ6NVNNOHlNS2ZlcUZ4NFdzU2dYY2xTZllaaGhVaUZhcEZ1N3hhRUNRR2lWCmV2SWtGYnFyM1RJbk5JOC9UM3RYc2tjUGRkaXVyZUlSQzdvWjNGZmRobXphVGtBcGMra1VzenRjMFc1WDVzbEsKZzViV3ljVXNvbWlQV3N2SkZUMENRQTRseEVjN0ZKd0xmRTVRMGpoUkc0d0Jjdll5YUtNRzNiQi9YYzlzZU1uUwpjU3RqM2ZzZkIwYTNldENMZW1PTnpaWkV1YVlFUjZiblR6R3BqdFhwQ3lBPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= -apiport: 17070 -` === modified file 'src/github.com/juju/juju/agent/format-1.18.go' --- src/github.com/juju/juju/agent/format-1.18.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/agent/format-1.18.go 2016-03-22 15:18:22 +0000 @@ -10,7 +10,7 @@ "github.com/juju/errors" "github.com/juju/names" - goyaml "gopkg.in/yaml.v1" + goyaml "gopkg.in/yaml.v2" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state/multiwatcher" @@ -31,6 +31,7 @@ Tag string DataDir string LogDir string + MetricsSpoolDir string Nonce string Jobs []multiwatcher.MachineJob `yaml:",omitempty"` UpgradedToVersion *version.Number `yaml:"upgradedToVersion"` @@ -39,7 +40,7 @@ StateAddresses []string `yaml:",omitempty"` StatePassword string `yaml:",omitempty"` - Environment string `yaml:",omitempty"` + Model string `yaml:",omitempty"` APIAddresses []string `yaml:",omitempty"` APIPassword string `yaml:",omitempty"` @@ -48,14 +49,15 @@ PreferIPv6 bool `yaml:"prefer-ipv6,omitempty"` - // Only state server machines have these next items set. - StateServerCert string `yaml:",omitempty"` - StateServerKey string `yaml:",omitempty"` - CAPrivateKey string `yaml:",omitempty"` - APIPort int `yaml:",omitempty"` - StatePort int `yaml:",omitempty"` - SharedSecret string `yaml:",omitempty"` - SystemIdentity string `yaml:",omitempty"` + // Only controller machines have these next items set. + ControllerCert string `yaml:",omitempty"` + ControllerKey string `yaml:",omitempty"` + CAPrivateKey string `yaml:",omitempty"` + APIPort int `yaml:",omitempty"` + StatePort int `yaml:",omitempty"` + SharedSecret string `yaml:",omitempty"` + SystemIdentity string `yaml:",omitempty"` + MongoVersion string `yaml:",omitempty"` } func init() { @@ -82,32 +84,29 @@ if err != nil { return nil, err } - var envTag names.EnvironTag - if format.Environment != "" { - envTag, err = names.ParseEnvironTag(format.Environment) + var modelTag names.ModelTag + if format.Model != "" { + modelTag, err = names.ParseModelTag(format.Model) if err != nil { return nil, errors.Trace(err) } } config := &configInternal{ - tag: tag, - dataDir: format.DataDir, - logDir: format.LogDir, + tag: tag, + paths: NewPathsWithDefaults(Paths{ + DataDir: format.DataDir, + LogDir: format.LogDir, + MetricsSpoolDir: format.MetricsSpoolDir, + }), jobs: format.Jobs, upgradedToVersion: *format.UpgradedToVersion, nonce: format.Nonce, - environment: envTag, + model: modelTag, caCert: format.CACert, oldPassword: format.OldPassword, values: format.Values, preferIPv6: format.PreferIPv6, } - if config.logDir == "" { - config.logDir = DefaultLogDir - } - if config.dataDir == "" { - config.dataDir = DefaultDataDir - } if len(format.StateAddresses) > 0 { config.stateDetails = &connectionDetails{ format.StateAddresses, @@ -120,10 +119,10 @@ format.APIPassword, } } - if len(format.StateServerKey) != 0 { + if len(format.ControllerKey) != 0 { config.servingInfo = ¶ms.StateServingInfo{ - Cert: format.StateServerCert, - PrivateKey: format.StateServerKey, + Cert: format.ControllerCert, + PrivateKey: format.ControllerKey, CAPrivateKey: format.CAPrivateKey, APIPort: format.APIPort, StatePort: format.StatePort, @@ -150,30 +149,35 @@ } } + // Mongo version is set, we might be running a version other than default. + if format.MongoVersion != "" { + config.mongoVersion = format.MongoVersion + } return config, nil } func (formatter_1_18) marshal(config *configInternal) ([]byte, error) { - var envTag string - if config.environment.Id() != "" { - envTag = config.environment.String() + var modelTag string + if config.model.Id() != "" { + modelTag = config.model.String() } format := &format_1_18Serialization{ Tag: config.tag.String(), - DataDir: config.dataDir, - LogDir: config.logDir, + DataDir: config.paths.DataDir, + LogDir: config.paths.LogDir, + MetricsSpoolDir: config.paths.MetricsSpoolDir, Jobs: config.jobs, UpgradedToVersion: &config.upgradedToVersion, Nonce: config.nonce, - Environment: envTag, + Model: modelTag, CACert: string(config.caCert), OldPassword: config.oldPassword, Values: config.values, PreferIPv6: config.preferIPv6, } if config.servingInfo != nil { - format.StateServerCert = config.servingInfo.Cert - format.StateServerKey = config.servingInfo.PrivateKey + format.ControllerCert = config.servingInfo.Cert + format.ControllerKey = config.servingInfo.PrivateKey format.CAPrivateKey = config.servingInfo.CAPrivateKey format.APIPort = config.servingInfo.APIPort format.StatePort = config.servingInfo.StatePort @@ -188,5 +192,8 @@ format.APIAddresses = config.apiDetails.addresses format.APIPassword = config.apiDetails.password } + if config.mongoVersion != "" { + format.MongoVersion = string(config.mongoVersion) + } return goyaml.Marshal(format) } === modified file 'src/github.com/juju/juju/agent/format-1.18_whitebox_test.go' --- src/github.com/juju/juju/agent/format-1.18_whitebox_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/agent/format-1.18_whitebox_test.go 2016-03-22 15:18:22 +0000 @@ -15,7 +15,6 @@ "github.com/juju/utils" gc "gopkg.in/check.v1" - "github.com/juju/juju/juju/paths" "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/testing" "github.com/juju/juju/version" @@ -27,36 +26,6 @@ var _ = gc.Suite(&format_1_18Suite{}) -var configData1_18WithoutUpgradedToVersion = "# format 1.18\n" + configDataWithoutNewAttributes - -func (s *format_1_18Suite) TestMissingAttributes(c *gc.C) { - logDir, err := paths.LogDir(version.Current.Series) - c.Assert(err, jc.ErrorIsNil) - realDataDir, err := paths.DataDir(version.Current.Series) - c.Assert(err, jc.ErrorIsNil) - - realDataDir = filepath.FromSlash(realDataDir) - logPath := filepath.Join(logDir, "juju") - logPath = filepath.FromSlash(logPath) - - dataDir := c.MkDir() - configPath := filepath.Join(dataDir, agentConfigFilename) - err = utils.AtomicWriteFile(configPath, []byte(configData1_18WithoutUpgradedToVersion), 0600) - c.Assert(err, jc.ErrorIsNil) - readConfig, err := ReadConfig(configPath) - c.Assert(err, jc.ErrorIsNil) - c.Assert(readConfig.UpgradedToVersion(), gc.Equals, version.MustParse("1.16.0")) - configLogDir := filepath.FromSlash(readConfig.LogDir()) - configDataDir := filepath.FromSlash(readConfig.DataDir()) - c.Assert(configLogDir, gc.Equals, logPath) - c.Assert(configDataDir, gc.Equals, realDataDir) - c.Assert(readConfig.PreferIPv6(), jc.IsFalse) - // The api info doesn't have the environment tag set. - apiInfo, ok := readConfig.APIInfo() - c.Assert(ok, jc.IsTrue) - c.Assert(apiInfo.EnvironTag.Id(), gc.Equals, "") -} - func (s *format_1_18Suite) TestStatePortNotParsedWithoutSecret(c *gc.C) { dataDir := c.MkDir() configPath := filepath.Join(dataDir, agentConfigFilename) @@ -77,18 +46,18 @@ config, err := ReadConfig(configPath) c.Assert(err, jc.ErrorIsNil) c.Assert(config.UpgradedToVersion(), jc.DeepEquals, version.MustParse("1.17.5.1")) - c.Assert(config.Jobs(), jc.DeepEquals, []multiwatcher.MachineJob{multiwatcher.JobManageEnviron}) + c.Assert(config.Jobs(), jc.DeepEquals, []multiwatcher.MachineJob{multiwatcher.JobManageModel}) c.Assert(config.PreferIPv6(), jc.IsTrue) } var agentConfig1_18Contents = ` # format 1.18 tag: machine-0 -datadir: /home/user/.juju/local +datadir: /home/user/.local/share/juju/local logdir: /var/log/juju-user-local nonce: user-admin:bootstrap jobs: -- JobManageEnviron +- JobManageModel upgradedToVersion: 1.17.5.1 cacert: '-----BEGIN CERTIFICATE----- @@ -134,8 +103,8 @@ NAMESPACE: user-local PROVIDER_TYPE: local STORAGE_ADDR: 10.0.3.1:8040 - STORAGE_DIR: /home/user/.juju/local/storage -stateservercert: '-----BEGIN CERTIFICATE----- + STORAGE_DIR: /home/user/.local/share/juju/local/storage +controllercert: '-----BEGIN CERTIFICATE----- MIICNzCCAaKgAwIBAgIBADALBgkqhkiG9w0BAQUwQzENMAsGA1UEChMEanVqdTEy @@ -164,7 +133,7 @@ -----END CERTIFICATE----- ' -stateserverkey: '-----BEGIN RSA PRIVATE KEY----- +controllerkey: '-----BEGIN RSA PRIVATE KEY----- MIICXAIBAAKBgQDJnbuNL3m/oY7Er2lEF6ye1SodepvpI0CLCdLwrYP52cRxbVzo @@ -233,11 +202,11 @@ var agentConfig1_18NotStateMachine = ` # format 1.18 tag: machine-1 -datadir: /home/user/.juju/local +datadir: /home/user/.local/share/juju/local logdir: /var/log/juju-user-local nonce: user-admin:bootstrap jobs: -- JobManageEnviron +- JobManageModel upgradedToVersion: 1.17.5.1 cacert: '-----BEGIN CERTIFICATE----- @@ -284,7 +253,7 @@ NAMESPACE: user-local PROVIDER_TYPE: local STORAGE_ADDR: 10.0.3.1:8040 - STORAGE_DIR: /home/user/.juju/local/storage + STORAGE_DIR: /home/user/.local/share/juju/local/storage apiport: 17070 prefer-ipv6: true `[1:] === modified file 'src/github.com/juju/juju/agent/format.go' --- src/github.com/juju/juju/agent/format.go 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/agent/format.go 2016-03-22 15:18:22 +0000 @@ -16,13 +16,6 @@ // // All of this is saved in a single agent.conf file. // -// Historically the format file in the agent config directory was used -// to identify the method of serialization. This was used by -// individual legacy (pre 1.18) format readers and writers to be able -// to translate from the file format to the in-memory structure. From -// version 1.18, the format is part of the agent configuration file, -// so there is only a single source of truth. -// // Juju only supports upgrading from single steps, so Juju only needs // to know about the current format and the format of the previous // stable release. For convenience, the format name includes the === modified file 'src/github.com/juju/juju/agent/format_whitebox_test.go' --- src/github.com/juju/juju/agent/format_whitebox_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/agent/format_whitebox_test.go 2016-03-22 15:18:22 +0000 @@ -30,7 +30,7 @@ // located here for easy reuse. var agentParams = AgentConfigParams{ Tag: names.NewMachineTag("1"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, Password: "sekrit", CACert: "ca cert", @@ -38,13 +38,13 @@ APIAddresses: []string{"localhost:1235"}, Nonce: "a nonce", PreferIPv6: false, - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, } func newTestConfig(c *gc.C) *configInternal { params := agentParams - params.DataDir = c.MkDir() - params.LogDir = c.MkDir() + params.Paths.DataDir = c.MkDir() + params.Paths.LogDir = c.MkDir() config, err := NewAgentConfig(params) c.Assert(err, jc.ErrorIsNil) return config.(*configInternal) @@ -81,7 +81,7 @@ c.Assert(err, jc.ErrorIsNil) configPath := ConfigPath(config.DataDir(), config.Tag()) - formatPath := filepath.Join(config.Dir(), legacyFormatFilename) + formatPath := filepath.Join(config.Dir(), "format") assertFileExists(c, configPath) assertFileNotExist(c, formatPath) } @@ -100,7 +100,7 @@ APIPort: 23456, } params := agentParams - params.DataDir = c.MkDir() + params.Paths.DataDir = c.MkDir() params.Values = map[string]string{"foo": "bar", "wibble": "wobble"} configInterface, err := NewStateMachineConfig(params, servingInfo) c.Assert(err, jc.ErrorIsNil) === modified file 'src/github.com/juju/juju/agent/identity_test.go' --- src/github.com/juju/juju/agent/identity_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/agent/identity_test.go 2016-03-22 15:18:22 +0000 @@ -28,13 +28,13 @@ var attributeParams = AgentConfigParams{ Tag: names.NewMachineTag("1"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, Password: "sekrit", CACert: "ca cert", StateAddresses: []string{"localhost:1234"}, APIAddresses: []string{"localhost:1235"}, Nonce: "a nonce", - Environment: testing.EnvironmentTag, + Model: testing.ModelTag, } var servingInfo = params.StateServingInfo{ @@ -49,7 +49,7 @@ func (s *identitySuite) TestWriteSystemIdentityFile(c *gc.C) { params := attributeParams - params.DataDir = c.MkDir() + params.Paths.DataDir = c.MkDir() conf, err := NewStateMachineConfig(params, servingInfo) c.Assert(err, jc.ErrorIsNil) err = WriteSystemIdentityFile(conf) === modified file 'src/github.com/juju/juju/agent/tools/tools_test.go' --- src/github.com/juju/juju/agent/tools/tools_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/agent/tools/tools_test.go 2016-03-22 15:18:22 +0000 @@ -39,7 +39,7 @@ // resulting slice has that prefix removed to keep the output short. c.Assert(testing.FindJujuCoreImports(c, "github.com/juju/juju/agent/tools"), gc.DeepEquals, - []string{"juju/arch", "tools", "version"}) + []string{"tools", "version"}) } const toolsFile = "downloaded-tools.txt" === modified file 'src/github.com/juju/juju/api/action/client.go' --- src/github.com/juju/juju/api/action/client.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/action/client.go 2016-03-22 15:18:22 +0000 @@ -5,7 +5,7 @@ import ( "github.com/juju/errors" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" === modified file 'src/github.com/juju/juju/api/action/client_test.go' --- src/github.com/juju/juju/api/action/client_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/action/client_test.go 2016-03-22 15:18:22 +0000 @@ -9,7 +9,7 @@ "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api/action" "github.com/juju/juju/apiserver/params" === modified file 'src/github.com/juju/juju/api/addresser/addresser.go' --- src/github.com/juju/juju/api/addresser/addresser.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/addresser/addresser.go 2016-03-22 15:18:22 +0000 @@ -8,8 +8,9 @@ "github.com/juju/loggo" "github.com/juju/juju/api/base" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) var logger = loggo.GetLogger("juju.api.addresser") @@ -31,7 +32,7 @@ } } -// CanDeallocateAddresses checks if the current environment can +// CanDeallocateAddresses checks if the current model can // deallocate IP addresses. func (api *API) CanDeallocateAddresses() (bool, error) { var result params.BoolResult @@ -58,20 +59,20 @@ return errors.Trace(result.Error) } -var newEntityWatcher = watcher.NewEntityWatcher +var newEntitiesWatcher = apiwatcher.NewEntitiesWatcher -// WatchIPAddresses returns a EntityWatcher for observing the +// WatchIPAddresses returns a EntitiesWatcher for observing the // tags of IP addresses with changes in life cycle. // The initial event will contain the tags of any IP addresses // which are no longer Alive. -func (api *API) WatchIPAddresses() (watcher.EntityWatcher, error) { - var result params.EntityWatchResult +func (api *API) WatchIPAddresses() (watcher.EntitiesWatcher, error) { + var result params.EntitiesWatchResult err := api.facade.FacadeCall("WatchIPAddresses", nil, &result) if err != nil { return nil, errors.Trace(err) } if result.Error == nil { - w := newEntityWatcher(api.facade.RawAPICaller(), result) + w := newEntitiesWatcher(api.facade.RawAPICaller(), result) return w, nil } return nil, errors.Trace(result.Error) === modified file 'src/github.com/juju/juju/api/addresser/addresser_test.go' --- src/github.com/juju/juju/api/addresser/addresser_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/addresser/addresser_test.go 2016-03-22 15:18:22 +0000 @@ -12,10 +12,10 @@ "github.com/juju/juju/api/addresser" "github.com/juju/juju/api/base" apitesting "github.com/juju/juju/api/base/testing" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" apiservertesting "github.com/juju/juju/apiserver/testing" coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/watcher" ) type AddresserSuite struct { @@ -97,20 +97,20 @@ func (s *AddresserSuite) TestWatchIPAddressesSuccess(c *gc.C) { var numFacadeCalls int var numWatcherCalls int - expectedResult := params.EntityWatchResult{ - EntityWatcherId: "42", + expectedResult := params.EntitiesWatchResult{ + EntitiesWatcherId: "42", Changes: []string{ "ipaddress-11111111-0000-0000-0000-000000000000", "ipaddress-22222222-0000-0000-0000-000000000000", }, } - watcherFunc := func(caller base.APICaller, result params.EntityWatchResult) watcher.EntityWatcher { + watcherFunc := func(caller base.APICaller, result params.EntitiesWatchResult) watcher.EntitiesWatcher { numWatcherCalls++ c.Check(caller, gc.NotNil) c.Check(result, jc.DeepEquals, expectedResult) return nil } - s.PatchValue(addresser.NewEntityWatcher, watcherFunc) + s.PatchValue(addresser.NewEntitiesWatcher, watcherFunc) apiCaller := successAPICaller(c, "WatchIPAddresses", nil, expectedResult, &numFacadeCalls) api := addresser.NewAPI(apiCaller) @@ -136,7 +136,7 @@ func (s *AddresserSuite) TestWatchIPAddressesServerError(c *gc.C) { var called int - expectedResult := params.EntityWatchResult{ + expectedResult := params.EntitiesWatchResult{ Error: apiservertesting.ServerError("server boom!"), } apiCaller := successAPICaller(c, "WatchIPAddresses", nil, expectedResult, &called) === modified file 'src/github.com/juju/juju/api/addresser/export_test.go' --- src/github.com/juju/juju/api/addresser/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/addresser/export_test.go 2016-03-22 15:18:22 +0000 @@ -3,4 +3,4 @@ package addresser -var NewEntityWatcher = &newEntityWatcher +var NewEntitiesWatcher = &newEntitiesWatcher === modified file 'src/github.com/juju/juju/api/agent/machine_test.go' --- src/github.com/juju/juju/api/agent/machine_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/agent/machine_test.go 2016-03-22 15:18:22 +0000 @@ -19,6 +19,7 @@ "github.com/juju/juju/environs" "github.com/juju/juju/juju/testing" "github.com/juju/juju/mongo" + "github.com/juju/juju/rpc" "github.com/juju/juju/state" "github.com/juju/juju/state/multiwatcher" coretesting "github.com/juju/juju/testing" @@ -35,7 +36,7 @@ var _ = gc.Suite(&servingInfoSuite{}) func (s *servingInfoSuite) TestStateServingInfo(c *gc.C) { - st, _ := s.OpenAPIAsNewMachine(c, state.JobManageEnviron) + st, _ := s.OpenAPIAsNewMachine(c, state.JobManageModel) ssi := state.StateServingInfo{ PrivateKey: "some key", @@ -61,7 +62,10 @@ st, _ := s.OpenAPIAsNewMachine(c) _, err := st.Agent().StateServingInfo() - c.Assert(err, gc.ErrorMatches, "permission denied") + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: "permission denied", + Code: "unauthorized access", + }) } func (s *servingInfoSuite) TestIsMaster(c *gc.C) { @@ -72,7 +76,7 @@ } s.PatchValue(&apiserveragent.MongoIsMaster, fakeMongoIsMaster) - st, _ := s.OpenAPIAsNewMachine(c, state.JobManageEnviron) + st, _ := s.OpenAPIAsNewMachine(c, state.JobManageModel) expected := true result, err := st.Agent().IsMaster() @@ -84,7 +88,10 @@ func (s *servingInfoSuite) TestIsMasterPermission(c *gc.C) { st, _ := s.OpenAPIAsNewMachine(c) _, err := st.Agent().IsMaster() - c.Assert(err, gc.ErrorMatches, "permission denied") + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: "permission denied", + Code: "unauthorized access", + }) } type machineSuite struct { @@ -149,7 +156,7 @@ c.Assert(err, jc.ErrorIsNil) info.Tag = tag info.Password = "foo-12345678901234567890" - err = tryOpenState(s.State.EnvironTag(), info) + err = tryOpenState(s.State.ModelTag(), info) c.Assert(errors.Cause(err), jc.Satisfies, errors.IsUnauthorized) } @@ -171,8 +178,8 @@ c.Assert(rFlag, jc.IsFalse) } -func tryOpenState(envTag names.EnvironTag, info *mongo.MongoInfo) error { - st, err := state.Open(envTag, info, mongo.DefaultDialOpts(), environs.NewStatePolicy()) +func tryOpenState(modelTag names.ModelTag, info *mongo.MongoInfo) error { + st, err := state.Open(modelTag, info, mongo.DefaultDialOpts(), environs.NewStatePolicy()) if err == nil { st.Close() } === added file 'src/github.com/juju/juju/api/agent/model_test.go' --- src/github.com/juju/juju/api/agent/model_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/agent/model_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,31 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agent_test + +import ( + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/agent" + apitesting "github.com/juju/juju/api/testing" + jujutesting "github.com/juju/juju/juju/testing" +) + +type modelSuite struct { + jujutesting.JujuConnSuite + *apitesting.ModelWatcherTests +} + +var _ = gc.Suite(&modelSuite{}) + +func (s *modelSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + + stateAPI, _ := s.OpenAPIAsNewMachine(c) + + agentAPI := agent.NewState(stateAPI) + c.Assert(agentAPI, gc.NotNil) + + s.ModelWatcherTests = apitesting.NewModelWatcherTests( + agentAPI, s.BackingState, apitesting.NoSecrets) +} === modified file 'src/github.com/juju/juju/api/agent/state.go' --- src/github.com/juju/juju/api/agent/state.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/agent/state.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,7 @@ "github.com/juju/names" "github.com/juju/juju/api/base" + "github.com/juju/juju/api/common" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/instance" "github.com/juju/juju/state/multiwatcher" @@ -17,13 +18,17 @@ // State provides access to an agent's view of the state. type State struct { facade base.FacadeCaller + *common.ModelWatcher } // NewState returns a version of the state that provides functionality // required by agent code. func NewState(caller base.APICaller) *State { facadeCaller := base.NewFacadeCaller(caller, "Agent") - return &State{facadeCaller} + return &State{ + facade: facadeCaller, + ModelWatcher: common.NewModelWatcher(facadeCaller), + } } func (st *State) getEntity(tag names.Tag) (*params.AgentGetEntitiesResult, error) { @@ -54,7 +59,7 @@ // agent lives at the same network address as the primary // mongo server for the replica set. // This call will return an error if the connected -// agent is not a machine agent with environment-manager +// agent is not a machine agent with model-manager // privileges. func (st *State) IsMaster() (bool, error) { var results params.IsMasterResult === added directory 'src/github.com/juju/juju/api/agenttools' === added file 'src/github.com/juju/juju/api/agenttools/agenttools.go' --- src/github.com/juju/juju/api/agenttools/agenttools.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/agenttools/agenttools.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,26 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agenttools + +import ( + "github.com/juju/juju/api/base" +) + +const apiName = "AgentTools" + +// Facade provides access to an api used for manipulating agent tools. +type Facade struct { + facade base.FacadeCaller +} + +// NewFacade returns a new api client facade instance. +func NewFacade(caller base.APICaller) *Facade { + facadeCaller := base.NewFacadeCaller(caller, apiName) + return &Facade{facadeCaller} +} + +// UpdateToolsVersion calls UpdateToolsAvailable in the server. +func (f *Facade) UpdateToolsVersion() error { + return f.facade.FacadeCall("UpdateToolsAvailable", nil, nil) +} === added file 'src/github.com/juju/juju/api/agenttools/agenttools_test.go' --- src/github.com/juju/juju/api/agenttools/agenttools_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/agenttools/agenttools_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,41 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agenttools_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/agenttools" + "github.com/juju/juju/api/base/testing" + coretesting "github.com/juju/juju/testing" +) + +type AgentToolsSuite struct { + coretesting.BaseSuite +} + +var _ = gc.Suite(&AgentToolsSuite{}) + +func (s *AgentToolsSuite) TestUpdateToolsVersion(c *gc.C) { + called := false + apiCaller := testing.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + called = true + c.Check(objType, gc.Equals, "AgentTools") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "UpdateToolsAvailable") + + c.Assert(a, gc.IsNil) + return nil + }) + client := agenttools.NewFacade(apiCaller) + err := client.UpdateToolsVersion() + c.Check(err, jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) +} === added file 'src/github.com/juju/juju/api/agenttools/package_test.go' --- src/github.com/juju/juju/api/agenttools/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/agenttools/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agenttools_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === modified file 'src/github.com/juju/juju/api/allwatcher.go' --- src/github.com/juju/juju/api/allwatcher.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/allwatcher.go 2016-03-22 15:18:22 +0000 @@ -10,7 +10,7 @@ ) // AllWatcher holds information allowing us to get Deltas describing -// changes to the entire environment or all environments (depending on +// changes to the entire model or all models (depending on // the watcher type). type AllWatcher struct { objType string @@ -27,14 +27,14 @@ return newAllWatcher("AllWatcher", caller, id) } -// NewAllEnvWatcher returns an AllWatcher instance which interacts -// with a watcher created by the WatchAllEnvs API call. +// NewAllModelWatcher returns an AllWatcher instance which interacts +// with a watcher created by the WatchAllModels API call. // // There should be no need to call this from outside of the api -// package. It is only used by Client.WatchAllEnvs in -// api/systemmanager. -func NewAllEnvWatcher(caller base.APICaller, id *string) *AllWatcher { - return newAllWatcher("AllEnvWatcher", caller, id) +// package. It is only used by Client.WatchAllModels in +// api/controller. +func NewAllModelWatcher(caller base.APICaller, id *string) *AllWatcher { + return newAllWatcher("AllModelWatcher", caller, id) } func newAllWatcher(objType string, caller base.APICaller, id *string) *AllWatcher { @@ -46,7 +46,7 @@ } // Next returns a new set of deltas from a watcher previously created -// by the WatchAll or WatchAllEnvs API calls. It will block until +// by the WatchAll or WatchAllModels API calls. It will block until // there are deltas to return. func (watcher *AllWatcher) Next() ([]multiwatcher.Delta, error) { var info params.AllWatcherNextResults @@ -61,7 +61,7 @@ } // Stop shutdowns down a watcher previously created by the WatchAll or -// WatchAllEnvs API calls +// WatchAllModels API calls func (watcher *AllWatcher) Stop() error { return watcher.caller.APICall( watcher.objType, === modified file 'src/github.com/juju/juju/api/apiclient.go' --- src/github.com/juju/juju/api/apiclient.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/apiclient.go 2016-03-22 15:18:22 +0000 @@ -4,11 +4,16 @@ package api import ( + "bufio" "crypto/tls" "crypto/x509" + "encoding/json" + "fmt" "io" "net/http" + "net/url" "strings" + "sync/atomic" "time" "github.com/juju/errors" @@ -17,7 +22,9 @@ "github.com/juju/utils" "github.com/juju/utils/parallel" "golang.org/x/net/websocket" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/network" "github.com/juju/juju/rpc" @@ -27,24 +34,37 @@ var logger = loggo.GetLogger("juju.api") -// PingPeriod defines how often the internal connection health check -// will run. It's a variable so it can be changed in tests. -var PingPeriod = 1 * time.Minute - -type State struct { +// TODO(fwereade): we should be injecting a Clock; and injecting these values; +// across the board, instead of using these global variables. +var ( + // PingPeriod defines how often the internal connection health check + // will run. + PingPeriod = 1 * time.Minute + + // PingTimeout defines how long a health check can take before we + // consider it to have failed. + PingTimeout = 30 * time.Second +) + +// state is the internal implementation of the Connection interface. +type state struct { client *rpc.Conn conn *websocket.Conn // addr is the address used to connect to the API server. addr string - // environTag holds the environment tag once we're connected - environTag string - - // serverTag holds the server tag once we're connected. + // cookieURL is the URL that HTTP cookies for the API + // will be associated with (specifically macaroon auth cookies). + cookieURL *url.URL + + // modelTag holds the model tag once we're connected + modelTag string + + // controllerTag holds the controller tag once we're connected. // This is only set with newer apiservers where they are using // the v1 login mechansim. - serverTag string + controllerTag string // serverVersion holds the version of the API server that we are // connected to. It is possible that this version is 0 if the @@ -69,9 +89,16 @@ // closed is a channel that gets closed when State.Close is called. closed chan struct{} - // tag and password hold the cached login credentials. + // loggedIn holds whether the client has successfully logged + // in. It's a int32 so that the atomic package can be used to + // access it safely. + loggedIn int32 + + // tag and password and nonce hold the cached login credentials. + // These are only valid if loggedIn is 1. tag string password string + nonce string // serverRootAddress holds the cached API server address and port used // to login. @@ -80,9 +107,17 @@ // serverScheme is the URI scheme of the API Server serverScheme string + // tlsConfig holds the TLS config appropriate for making SSL + // connections to the API endpoints. + tlsConfig *tls.Config + // certPool holds the cert pool that is used to authenticate the tls // connections to the API. certPool *x509.CertPool + + // bakeryClient holds the client that will be used to + // authorize macaroon based login requests. + bakeryClient *httpbakery.Client } // Open establishes a connection to the API server using the Info @@ -91,34 +126,65 @@ // // See Connect for details of the connection mechanics. func Open(info *Info, opts DialOpts) (Connection, error) { - return open(info, opts, (*State).Login) + return open(info, opts, (*state).Login) } // This unexported open method is used both directly above in the Open // function, and also the OpenWithVersion function below to explicitly cause // the API server to think that the client is older than it really is. -func open(info *Info, opts DialOpts, loginFunc func(st *State, tag, pwd, nonce string) error) (Connection, error) { - conn, err := Connect(info, "", nil, opts) +func open(info *Info, opts DialOpts, loginFunc func(st *state, tag names.Tag, pwd, nonce string) error) (Connection, error) { + if info.UseMacaroons { + if info.Tag != nil || info.Password != "" { + return nil, errors.New("open should specifiy UseMacaroons or a username & password. Not both") + } + } + conn, tlsConfig, err := connectWebsocket(info, opts) if err != nil { return nil, errors.Trace(err) } client := rpc.NewConn(jsoncodec.NewWebsocket(conn), nil) client.Start() - st := &State{ - client: client, - conn: conn, - addr: conn.Config().Location.Host, + + bakeryClient := opts.BakeryClient + if bakeryClient == nil { + bakeryClient = httpbakery.NewClient() + } else { + // Make a copy of the bakery client and its + // HTTP client + c := *opts.BakeryClient + bakeryClient = &c + httpc := *bakeryClient.Client + bakeryClient.Client = &httpc + } + apiHost := conn.Config().Location.Host + bakeryClient.Client.Transport = &hostSwitchingTransport{ + primaryHost: apiHost, + primary: utils.NewHttpTLSTransport(tlsConfig), + fallback: http.DefaultTransport, + } + + st := &state{ + client: client, + conn: conn, + addr: apiHost, + cookieURL: &url.URL{ + Scheme: "https", + Host: conn.Config().Location.Host, + Path: "/", + }, serverScheme: "https", serverRootAddress: conn.Config().Location.Host, // why are the contents of the tag (username and password) written into the // state structure BEFORE login ?!? - tag: toString(info.Tag), - password: info.Password, - certPool: conn.Config().TlsConfig.RootCAs, + tag: tagToString(info.Tag), + password: info.Password, + nonce: info.Nonce, + tlsConfig: tlsConfig, + bakeryClient: bakeryClient, } - if info.Tag != nil || info.Password != "" { - if err := loginFunc(st, info.Tag.String(), info.Password, info.Nonce); err != nil { + if info.Tag != nil || info.Password != "" || info.UseMacaroons { + if err := loginFunc(st, info.Tag, info.Password, info.Nonce); err != nil { conn.Close() return nil, err } @@ -129,51 +195,92 @@ return st, nil } +// hostSwitchingTransport provides an http.RoundTripper +// that chooses an actual RoundTripper to use +// depending on the destination host. +// +// This makes it possible to use a different set of root +// CAs for the API and all other hosts. +type hostSwitchingTransport struct { + primaryHost string + primary http.RoundTripper + fallback http.RoundTripper +} + +// RoundTrip implements http.RoundTripper.RoundTrip. +func (t *hostSwitchingTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if req.URL.Host == t.primaryHost { + return t.primary.RoundTrip(req) + } + return t.fallback.RoundTrip(req) +} + // OpenWithVersion uses an explicit version of the Admin facade to call Login // on. This allows the caller to pretend to be an older client, and is used // only in testing. func OpenWithVersion(info *Info, opts DialOpts, loginVersion int) (Connection, error) { - var loginFunc func(st *State, tag, pwd, nonce string) error + var loginFunc func(st *state, tag names.Tag, pwd, nonce string) error switch loginVersion { - case 0: - loginFunc = (*State).loginV0 - case 1: - loginFunc = (*State).loginV1 case 2: - loginFunc = (*State).loginV2 + loginFunc = (*state).loginV2 + case 3: + loginFunc = (*state).loginV3 default: return nil, errors.NotSupportedf("loginVersion %d", loginVersion) } return open(info, opts, loginFunc) } -// Connect establishes a websocket connection to the API server using -// the Info, API path tail and (optional) request headers provided. If -// multiple API addresses are provided in Info they will be tried -// concurrently - the first successful connection wins. +// connectWebsocket establishes a websocket connection to the RPC +// API websocket on the API server using Info. If multiple API addresses +// are provided in Info they will be tried concurrently - the first successful +// connection wins. // -// The path tail may be blank, in which case the default value will be -// used. Otherwise, it must start with a "/". -func Connect(info *Info, pathTail string, header http.Header, opts DialOpts) (*websocket.Conn, error) { +// It also returns the TLS configuration that it has derived from the Info. +func connectWebsocket(info *Info, opts DialOpts) (*websocket.Conn, *tls.Config, error) { if len(info.Addrs) == 0 { - return nil, errors.New("no API addresses to connect to") - } - if pathTail != "" && !strings.HasPrefix(pathTail, "/") { - return nil, errors.New(`path tail must start with "/"`) - } + return nil, nil, errors.New("no API addresses to connect to") + } + tlsConfig, err := tlsConfigForCACert(info.CACert) + if err != nil { + return nil, nil, errors.Annotatef(err, "cannot make TLS configuration") + } + tlsConfig.InsecureSkipVerify = opts.InsecureSkipVerify + path := "/" + if info.ModelTag.Id() != "" { + path = apiPath(info.ModelTag, "/api") + } + conn, err := dialWebSocket(info.Addrs, path, tlsConfig, opts) + if err != nil { + return nil, nil, errors.Trace(err) + } + logger.Infof("connection established to %q", conn.RemoteAddr()) + return conn, tlsConfig, nil +} - pool, err := CreateCertPool(info.CACert) +func tlsConfigForCACert(caCert string) (*tls.Config, error) { + certPool, err := CreateCertPool(caCert) if err != nil { return nil, errors.Annotate(err, "cert pool creation failed") } - - path := makeAPIPath(info.EnvironTag.Id(), pathTail) - + return &tls.Config{ + RootCAs: certPool, + // We want to be specific here (rather than just using "anything". + // See commit 7fc118f015d8480dfad7831788e4b8c0432205e8 (PR 899). + ServerName: "juju-apiserver", + }, nil +} + +// dialWebSocket dials a websocket with one of the provided addresses, the +// specified URL path, TLS configuration, and dial options. Each of the +// specified addresses will be attempted concurrently, and the first +// successful connection will be returned. +func dialWebSocket(addrs []string, path string, tlsConfig *tls.Config, opts DialOpts) (*websocket.Conn, error) { // Dial all addresses at reasonable intervals. try := parallel.NewTry(0, nil) defer try.Kill() - for _, addr := range info.Addrs { - err := dialWebsocket(addr, path, header, opts, pool, try) + for _, addr := range addrs { + err := dialWebsocket(addr, path, opts, tlsConfig, try) if err == parallel.ErrStopped { break } @@ -190,57 +297,181 @@ if err != nil { return nil, errors.Trace(err) } - conn := result.(*websocket.Conn) - logger.Infof("connection established to %q", conn.RemoteAddr()) + return result.(*websocket.Conn), nil +} + +// ConnectStream implements Connection.ConnectStream. +func (st *state) ConnectStream(path string, attrs url.Values) (base.Stream, error) { + if !st.isLoggedIn() { + return nil, errors.New("cannot use ConnectStream without logging in") + } + // We use the standard "macaraq" macaroon authentication dance here. + // That is, we attach any macaroons we have to the initial request, + // and if that succeeds, all's good. If it fails with a DischargeRequired + // error, the response will contain a macaroon that, when discharged, + // may allow access, so we discharge it (using bakery.Client.HandleError) + // and try the request again. + conn, err := st.connectStream(path, attrs) + if err == nil { + return conn, err + } + if params.ErrCode(err) != params.CodeDischargeRequired { + return nil, errors.Trace(err) + } + if err := st.bakeryClient.HandleError(st.cookieURL, bakeryError(err)); err != nil { + return nil, errors.Trace(err) + } + // Try again with the discharged macaroon. + conn, err = st.connectStream(path, attrs) + if err != nil { + return nil, errors.Trace(err) + } return conn, nil } -// makeAPIPath builds the path to connect to based on the tail given -// and whether the environment UUID is set. -func makeAPIPath(envUUID, tail string) string { - if envUUID == "" { - if tail == "" { - tail = "/" - } - return tail - } - if tail == "" { - tail = "/api" - } - return "/environment/" + envUUID + tail -} - -// toString returns the value of a tag's String method, or "" if the tag is nil. -func toString(tag names.Tag) string { +// connectStream is the internal version of ConnectStream. It differs from +// ConnectStream only in that it will not retry the connection if it encounters +// discharge-required error. +func (st *state) connectStream(path string, attrs url.Values) (base.Stream, error) { + if !strings.HasPrefix(path, "/") { + return nil, errors.New(`path must start with "/"`) + } + if _, ok := st.ServerVersion(); ok { + // If the server version is set, then we know the server is capable of + // serving streams at the model path. We also fully expect + // that the server has returned a valid model tag. + modelTag, err := st.ModelTag() + if err != nil { + return nil, errors.Annotate(err, "cannot get model tag, perhaps connected to system not model") + } + path = apiPath(modelTag, path) + } + target := url.URL{ + Scheme: "wss", + Host: st.addr, + Path: path, + RawQuery: attrs.Encode(), + } + cfg, err := websocket.NewConfig(target.String(), "http://localhost/") + if st.tag != "" { + cfg.Header = utils.BasicAuthHeader(st.tag, st.password) + } + if st.nonce != "" { + cfg.Header.Set(params.MachineNonceHeader, st.nonce) + } + // Add any cookies because they will not be sent to websocket + // connections by default. + st.addCookiesToHeader(cfg.Header) + + cfg.TlsConfig = st.tlsConfig + connection, err := websocketDialConfig(cfg) + if err != nil { + return nil, err + } + if err := readInitialStreamError(connection); err != nil { + return nil, errors.Trace(err) + } + return connection, nil +} + +// readInitialStreamError reads the initial error response +// from a stream connection and returns it. +func readInitialStreamError(conn io.Reader) error { + // We can use bufio here because the websocket guarantees that a + // single read will not read more than a single frame; there is + // no guarantee that a single read might not read less than the + // whole frame though, so using a single Read call is not + // correct. By using ReadSlice rather than ReadBytes, we + // guarantee that the error can't be too big (>4096 bytes). + line, err := bufio.NewReader(conn).ReadSlice('\n') + if err != nil { + return errors.Annotate(err, "unable to read initial response") + } + var errResult params.ErrorResult + if err := json.Unmarshal(line, &errResult); err != nil { + return errors.Annotate(err, "unable to unmarshal initial response") + } + if errResult.Error != nil { + return errResult.Error + } + return nil +} + +// addCookiesToHeader adds any cookies associated with the +// API host to the given header. This is necessary because +// otherwise cookies are not sent to websocket endpoints. +func (st *state) addCookiesToHeader(h http.Header) { + // net/http only allows adding cookies to a request, + // but when it sends a request to a non-http endpoint, + // it doesn't add the cookies, so make a request, starting + // with the given header, add the cookies to use, then + // throw away the request but keep the header. + req := &http.Request{ + Header: h, + } + cookies := st.bakeryClient.Client.Jar.Cookies(st.cookieURL) + for _, c := range cookies { + req.AddCookie(c) + } +} + +// apiEndpoint returns a URL that refers to the given API slash-prefixed +// endpoint path and query parameters. Note that the caller +// is responsible for ensuring that the path *is* prefixed with a slash. +func (st *state) apiEndpoint(path, query string) (*url.URL, error) { + if _, err := st.ControllerTag(); err == nil { + // The controller tag is set, so the agent version is >= 1.23, + // so we can use the model endpoint. + modelTag, err := st.ModelTag() + if err != nil { + return nil, errors.Annotate(err, "cannot get API endpoint address") + } + path = apiPath(modelTag, path) + } + return &url.URL{ + Scheme: st.serverScheme, + Host: st.Addr(), + Path: path, + RawQuery: query, + }, nil +} + +// apiPath returns the given API endpoint path relative +// to the given model tag. The caller is responsible +// for ensuring that the model tag is valid and +// that the path is slash-prefixed. +func apiPath(modelTag names.ModelTag, path string) string { + if !strings.HasPrefix(path, "/") { + panic(fmt.Sprintf("apiPath called with non-slash-prefixed path %q", path)) + } + if modelTag.Id() == "" { + panic("apiPath called with empty model tag") + } + if modelUUID := modelTag.Id(); modelUUID != "" { + return "/model/" + modelUUID + path + } + return path +} + +// tagToString returns the value of a tag's String method, or "" if the tag is nil. +func tagToString(tag names.Tag) string { if tag == nil { return "" } return tag.String() } -func dialWebsocket(addr, path string, header http.Header, opts DialOpts, rootCAs *x509.CertPool, try *parallel.Try) error { - cfg, err := setUpWebsocket(addr, path, header, rootCAs) - if err != nil { - return err - } - return try.Start(newWebsocketDialer(cfg, opts)) -} - -func setUpWebsocket(addr, path string, header http.Header, rootCAs *x509.CertPool) (*websocket.Config, error) { +func dialWebsocket(addr, path string, opts DialOpts, tlsConfig *tls.Config, try *parallel.Try) error { // origin is required by the WebSocket API, used for "origin policy" // in websockets. We pass localhost to satisfy the API; it is // inconsequential to us. const origin = "http://localhost/" cfg, err := websocket.NewConfig("wss://"+addr+path, origin) if err != nil { - return nil, errors.Trace(err) - } - cfg.TlsConfig = &tls.Config{ - RootCAs: rootCAs, - ServerName: "juju-apiserver", - } - cfg.Header = header - return cfg, nil + return errors.Trace(err) + } + cfg.TlsConfig = tlsConfig + return try.Start(newWebsocketDialer(cfg, opts)) } // newWebsocketDialer returns a function that @@ -268,16 +499,35 @@ logger.Debugf("error dialing %q, will retry: %v", cfg.Location, err) } else { logger.Infof("error dialing %q: %v", cfg.Location, err) - return nil, errors.Errorf("unable to connect to %q", cfg.Location) + return nil, errors.Annotatef(err, "unable to connect to API") } } panic("unreachable") } } -func (s *State) heartbeatMonitor() { +func callWithTimeout(f func() error, timeout time.Duration) bool { + result := make(chan error, 1) + go func() { + // Note that result is buffered so that we don't leak this + // goroutine when a timeout happens. + result <- f() + }() + select { + case err := <-result: + if err != nil { + logger.Debugf("health ping failed: %v", err) + } + return err == nil + case <-time.After(timeout): + logger.Errorf("health ping timed out after %s", timeout) + return false + } +} + +func (s *state) heartbeatMonitor() { for { - if err := s.Ping(); err != nil { + if !callWithTimeout(s.Ping, PingTimeout) { close(s.broken) return } @@ -288,7 +538,7 @@ } } -func (s *State) Ping() error { +func (s *state) Ping() error { return s.APICall("Pinger", s.BestFacadeVersion("Pinger"), "", "Ping", nil, nil) } @@ -297,17 +547,17 @@ // This fills out the rpc.Request on the given facade, version for a given // object id, and the specific RPC method. It marshalls the Arguments, and will // unmarshall the result into the response object that is supplied. -func (s *State) APICall(facade string, version int, id, method string, args, response interface{}) error { +func (s *state) APICall(facade string, version int, id, method string, args, response interface{}) error { err := s.client.Call(rpc.Request{ Type: facade, Version: version, Id: id, Action: method, }, args, response) - return params.ClientError(err) + return errors.Trace(err) } -func (s *State) Close() error { +func (s *state) Close() error { err := s.client.Close() select { case <-s.closed: @@ -319,30 +569,30 @@ } // Broken returns a channel that's closed when the connection is broken. -func (s *State) Broken() <-chan struct{} { +func (s *state) Broken() <-chan struct{} { return s.broken } // RPCClient returns the RPC client for the state, so that testing // functions can tickle parts of the API that the conventional entry // points don't reach. This is exported for testing purposes only. -func (s *State) RPCClient() *rpc.Conn { +func (s *state) RPCClient() *rpc.Conn { return s.client } // Addr returns the address used to connect to the API server. -func (s *State) Addr() string { +func (s *state) Addr() string { return s.addr } -// EnvironTag returns the tag of the environment we are connected to. -func (s *State) EnvironTag() (names.EnvironTag, error) { - return names.ParseEnvironTag(s.environTag) +// ModelTag returns the tag of the model we are connected to. +func (s *state) ModelTag() (names.ModelTag, error) { + return names.ParseModelTag(s.modelTag) } -// ServerTag returns the tag of the server we are connected to. -func (s *State) ServerTag() (names.EnvironTag, error) { - return names.ParseEnvironTag(s.serverTag) +// ControllerTag returns the tag of the server we are connected to. +func (s *state) ControllerTag() (names.ModelTag, error) { + return names.ParseModelTag(s.controllerTag) } // APIHostPorts returns addresses that may be used to connect @@ -351,9 +601,9 @@ // The addresses are scoped (public, cloud-internal, etc.), so // the client may choose which addresses to attempt. For the // Juju CLI, all addresses must be attempted, as the CLI may -// be invoked both within and outside the environment (think +// be invoked both within and outside the model (think // private clouds). -func (s *State) APIHostPorts() [][]network.HostPort { +func (s *state) APIHostPorts() [][]network.HostPort { // NOTE: We're making a copy of s.hostPorts before returning it, // for safety. hostPorts := make([][]network.HostPort, len(s.hostPorts)) @@ -364,7 +614,7 @@ } // AllFacadeVersions returns what versions we know about for all facades -func (s *State) AllFacadeVersions() map[string][]int { +func (s *state) AllFacadeVersions() map[string][]int { facades := make(map[string][]int, len(s.facadeVersions)) for name, versions := range s.facadeVersions { facades[name] = append([]int{}, versions...) @@ -378,12 +628,20 @@ // TODO(jam) this is the eventual implementation of what version of a given // Facade we will want to use. It needs to line up the versions that the server // reports to us, with the versions that our client knows how to use. -func (s *State) BestFacadeVersion(facade string) int { +func (s *state) BestFacadeVersion(facade string) int { return bestVersion(facadeVersions[facade], s.facadeVersions[facade]) } // serverRoot returns the cached API server address and port used // to login, prefixed with "://" (usually https). -func (s *State) serverRoot() string { +func (s *state) serverRoot() string { return s.serverScheme + "://" + s.serverRootAddress } + +func (s *state) isLoggedIn() bool { + return atomic.LoadInt32(&s.loggedIn) == 1 +} + +func (s *state) setLoggedIn() { + atomic.StoreInt32(&s.loggedIn, 1) +} === modified file 'src/github.com/juju/juju/api/apiclient_test.go' --- src/github.com/juju/juju/api/apiclient_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/apiclient_test.go 2016-03-22 15:18:22 +0000 @@ -4,23 +4,21 @@ package api_test import ( - "errors" - "fmt" - "io" "net" - "strconv" - - "golang.org/x/net/websocket" - + "sync/atomic" + + "github.com/juju/errors" "github.com/juju/names" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" - "github.com/juju/utils" "github.com/juju/utils/parallel" + "golang.org/x/net/websocket" gc "gopkg.in/check.v1" "github.com/juju/juju/api" "github.com/juju/juju/apiserver/params" jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/rpc" "github.com/juju/juju/version" ) @@ -30,146 +28,77 @@ var _ = gc.Suite(&apiclientSuite{}) -func (s *apiclientSuite) TestConnectToEnv(c *gc.C) { - info := s.APIInfo(c) - conn, err := api.Connect(info, "", nil, api.DialOpts{}) - c.Assert(err, jc.ErrorIsNil) - defer conn.Close() - assertConnAddrForEnv(c, conn, info.Addrs[0], s.State.EnvironUUID(), "/api") -} - -func (s *apiclientSuite) TestConnectToEnvWithPathTail(c *gc.C) { - info := s.APIInfo(c) - conn, err := api.Connect(info, "/log", nil, api.DialOpts{}) - c.Assert(err, jc.ErrorIsNil) - defer conn.Close() - assertConnAddrForEnv(c, conn, info.Addrs[0], s.State.EnvironUUID(), "/log") -} - -func (s *apiclientSuite) TestConnectToRoot(c *gc.C) { - info := s.APIInfo(c) - info.EnvironTag = names.NewEnvironTag("") - conn, err := api.Connect(info, "", nil, api.DialOpts{}) +func (s *apiclientSuite) TestOpenFailsIfUsernameAndUseMacaroon(c *gc.C) { + info := s.APIInfo(c) + info.Tag = names.NewUserTag("foobar") + info.UseMacaroons = true + _, err := api.Open(info, api.DialOpts{}) + c.Assert(err, gc.ErrorMatches, "open should specifiy UseMacaroons or a username & password. Not both") +} + +func (s *apiclientSuite) TestConnectWebsocketToEnv(c *gc.C) { + info := s.APIInfo(c) + conn, _, err := api.ConnectWebsocket(info, api.DialOpts{}) + c.Assert(err, jc.ErrorIsNil) + defer conn.Close() + assertConnAddrForEnv(c, conn, info.Addrs[0], s.State.ModelUUID(), "/api") +} + +func (s *apiclientSuite) TestConnectWebsocketToRoot(c *gc.C) { + info := s.APIInfo(c) + info.ModelTag = names.NewModelTag("") + conn, _, err := api.ConnectWebsocket(info, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) defer conn.Close() assertConnAddrForRoot(c, conn, info.Addrs[0]) } -func (s *apiclientSuite) TestConnectWithHeader(c *gc.C) { - var seenCfg *websocket.Config - fakeNewDialer := func(cfg *websocket.Config, _ api.DialOpts) func(<-chan struct{}) (io.Closer, error) { - seenCfg = cfg - return func(<-chan struct{}) (io.Closer, error) { - return nil, errors.New("fake") - } - } - s.PatchValue(api.NewWebsocketDialerPtr, fakeNewDialer) - - header := utils.BasicAuthHeader("foo", "bar") - api.Connect(s.APIInfo(c), "", header, api.DialOpts{}) // Return values not important here - c.Assert(seenCfg, gc.NotNil) - c.Assert(seenCfg.Header, gc.DeepEquals, header) -} - -func (s *apiclientSuite) TestConnectRequiresTailStartsWithSlash(c *gc.C) { - _, err := api.Connect(s.APIInfo(c), "foo", nil, api.DialOpts{}) - c.Assert(err, gc.ErrorMatches, `path tail must start with "/"`) -} - -func (s *apiclientSuite) TestConnectPrefersLocalhostIfPresent(c *gc.C) { - // Create a socket that proxies to the API server though our localhost address. - info := s.APIInfo(c) - serverAddr := info.Addrs[0] - server, err := net.Dial("tcp", serverAddr) - c.Assert(err, jc.ErrorIsNil) - defer server.Close() - listener, err := net.Listen("tcp", "localhost:0") - c.Assert(err, jc.ErrorIsNil) - defer listener.Close() - go func() { - for { - client, err := listener.Accept() - if err != nil { - return - } - go io.Copy(client, server) - go io.Copy(server, client) - } - }() - - // Check that we are using our working address to connect - listenerAddress := listener.Addr().String() - // listenAddress contains the actual IP address, but APIHostPorts - // is going to report localhost, so just find the port - _, port, err := net.SplitHostPort(listenerAddress) - c.Check(err, jc.ErrorIsNil) - portNum, err := strconv.Atoi(port) - c.Check(err, jc.ErrorIsNil) - expectedHostPort := fmt.Sprintf("localhost:%d", portNum) - info.Addrs = []string{"fakeAddress:1", "fakeAddress:1", expectedHostPort} - conn, err := api.Connect(info, "/api", nil, api.DialOpts{}) - c.Assert(err, jc.ErrorIsNil) - defer conn.Close() - assertConnAddrForEnv(c, conn, expectedHostPort, s.State.EnvironUUID(), "/api") -} - -func (s *apiclientSuite) TestConnectMultiple(c *gc.C) { +func (s *apiclientSuite) TestConnectWebsocketMultiple(c *gc.C) { // Create a socket that proxies to the API server. info := s.APIInfo(c) serverAddr := info.Addrs[0] - server, err := net.Dial("tcp", serverAddr) - c.Assert(err, jc.ErrorIsNil) - defer server.Close() - listener, err := net.Listen("tcp", "127.0.0.1:0") - c.Assert(err, jc.ErrorIsNil) - defer listener.Close() - go func() { - for { - client, err := listener.Accept() - if err != nil { - return - } - go io.Copy(client, server) - go io.Copy(server, client) - } - }() + proxy := testing.NewTCPProxy(c, serverAddr) + defer proxy.Close() // Check that we can use the proxy to connect. - proxyAddr := listener.Addr().String() - info.Addrs = []string{proxyAddr} - conn, err := api.Connect(info, "/api", nil, api.DialOpts{}) + info.Addrs = []string{proxy.Addr()} + conn, _, err := api.ConnectWebsocket(info, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) conn.Close() - assertConnAddrForEnv(c, conn, proxyAddr, s.State.EnvironUUID(), "/api") + assertConnAddrForEnv(c, conn, proxy.Addr(), s.State.ModelUUID(), "/api") // Now break Addrs[0], and ensure that Addrs[1] // is successfully connected to. - info.Addrs = []string{proxyAddr, serverAddr} - listener.Close() - conn, err = api.Connect(info, "/api", nil, api.DialOpts{}) + proxy.Close() + info.Addrs = []string{proxy.Addr(), serverAddr} + conn, _, err = api.ConnectWebsocket(info, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) conn.Close() - assertConnAddrForEnv(c, conn, serverAddr, s.State.EnvironUUID(), "/api") + assertConnAddrForEnv(c, conn, serverAddr, s.State.ModelUUID(), "/api") } -func (s *apiclientSuite) TestConnectMultipleError(c *gc.C) { +func (s *apiclientSuite) TestConnectWebsocketMultipleError(c *gc.C) { listener, err := net.Listen("tcp", "127.0.0.1:0") c.Assert(err, jc.ErrorIsNil) defer listener.Close() + // count holds the number of times we've accepted a connection. + var count int32 go func() { for { client, err := listener.Accept() if err != nil { return } + atomic.AddInt32(&count, 1) client.Close() } }() info := s.APIInfo(c) addr := listener.Addr().String() info.Addrs = []string{addr, addr, addr} - _, err = api.Connect(info, "/api", nil, api.DialOpts{}) - c.Assert(err, gc.ErrorMatches, `unable to connect to "wss://.*/environment/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/api"`) + _, _, err = api.ConnectWebsocket(info, api.DialOpts{}) + c.Assert(err, gc.ErrorMatches, `unable to connect to API: websocket.Dial wss://.*/model/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}/api: .*`) + c.Assert(atomic.LoadInt32(&count), gc.Equals, int32(3)) } func (s *apiclientSuite) TestOpen(c *gc.C) { @@ -179,39 +108,42 @@ defer st.Close() c.Assert(st.Addr(), gc.Equals, info.Addrs[0]) - envTag, err := st.EnvironTag() + modelTag, err := st.ModelTag() c.Assert(err, jc.ErrorIsNil) - c.Assert(envTag, gc.Equals, s.State.EnvironTag()) + c.Assert(modelTag, gc.Equals, s.State.ModelTag()) remoteVersion, versionSet := st.ServerVersion() c.Assert(versionSet, jc.IsTrue) - c.Assert(remoteVersion, gc.Equals, version.Current.Number) + c.Assert(remoteVersion, gc.Equals, version.Current) } -func (s *apiclientSuite) TestOpenHonorsEnvironTag(c *gc.C) { +func (s *apiclientSuite) TestOpenHonorsModelTag(c *gc.C) { info := s.APIInfo(c) // TODO(jam): 2014-06-05 http://pad.lv/1326802 // we want to test this eventually, but for now s.APIInfo uses - // conn.StateInfo() which doesn't know about EnvironTag. - // c.Check(info.EnvironTag, gc.Equals, env.Tag()) - // c.Assert(info.EnvironTag, gc.Not(gc.Equals), "") + // conn.StateInfo() which doesn't know about ModelTag. + // c.Check(info.ModelTag, gc.Equals, env.Tag()) + // c.Assert(info.ModelTag, gc.Not(gc.Equals), "") // We start by ensuring we have an invalid tag, and Open should fail. - info.EnvironTag = names.NewEnvironTag("bad-tag") + info.ModelTag = names.NewModelTag("bad-tag") _, err := api.Open(info, api.DialOpts{}) - c.Check(err, gc.ErrorMatches, `unknown environment: "bad-tag"`) + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: `unknown model: "bad-tag"`, + Code: "not found", + }) c.Check(params.ErrCode(err), gc.Equals, params.CodeNotFound) // Now set it to the right tag, and we should succeed. - info.EnvironTag = s.State.EnvironTag() + info.ModelTag = s.State.ModelTag() st, err := api.Open(info, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) st.Close() // Backwards compatibility, we should succeed if we do not set an - // environ tag - info.EnvironTag = names.NewEnvironTag("") + // model tag + info.ModelTag = names.NewModelTag("") st, err = api.Open(info, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) st.Close() @@ -231,8 +163,8 @@ c.Assert(result, gc.IsNil) } -func assertConnAddrForEnv(c *gc.C, conn *websocket.Conn, addr, envUUID, tail string) { - c.Assert(conn.RemoteAddr(), gc.Matches, "^wss://"+addr+"/environment/"+envUUID+tail+"$") +func assertConnAddrForEnv(c *gc.C, conn *websocket.Conn, addr, modelUUID, tail string) { + c.Assert(conn.RemoteAddr(), gc.Matches, "^wss://"+addr+"/model/"+modelUUID+tail+"$") } func assertConnAddrForRoot(c *gc.C, conn *websocket.Conn, addr string) { === added file 'src/github.com/juju/juju/api/backups/base_test.go' --- src/github.com/juju/juju/api/backups/base_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/backups/base_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,60 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package backups_test + +import ( + "time" + + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/backups" + apiserverbackups "github.com/juju/juju/apiserver/backups" + "github.com/juju/juju/apiserver/params" + jujutesting "github.com/juju/juju/juju/testing" + stbackups "github.com/juju/juju/state/backups" + backupstesting "github.com/juju/juju/state/backups/testing" +) + +type baseSuite struct { + jujutesting.JujuConnSuite + backupstesting.BaseSuite + client *backups.Client +} + +func (s *baseSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.JujuConnSuite.SetUpTest(c) + client, err := backups.NewClient(s.APIState) + c.Assert(err, gc.IsNil) + s.client = client +} + +func (s *baseSuite) metadataResult() *params.BackupsMetadataResult { + result := apiserverbackups.ResultFromMetadata(s.Meta) + return &result +} + +func (s *baseSuite) checkMetadataResult(c *gc.C, result *params.BackupsMetadataResult, meta *stbackups.Metadata) { + var finished, stored time.Time + if meta.Finished != nil { + finished = *meta.Finished + } + if meta.Stored() != nil { + stored = *(meta.Stored()) + } + + c.Check(result.ID, gc.Equals, meta.ID()) + c.Check(result.Started, gc.Equals, meta.Started) + c.Check(result.Finished, gc.Equals, finished) + c.Check(result.Checksum, gc.Equals, meta.Checksum()) + c.Check(result.ChecksumFormat, gc.Equals, meta.ChecksumFormat()) + c.Check(result.Size, gc.Equals, meta.Size()) + c.Check(result.Stored, gc.Equals, stored) + c.Check(result.Notes, gc.Equals, meta.Notes) + + c.Check(result.Model, gc.Equals, meta.Origin.Model) + c.Check(result.Machine, gc.Equals, meta.Origin.Machine) + c.Check(result.Hostname, gc.Equals, meta.Origin.Hostname) + c.Check(result.Version, gc.Equals, meta.Origin.Version) +} === modified file 'src/github.com/juju/juju/api/backups/client.go' --- src/github.com/juju/juju/api/backups/client.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/backups/client.go 2016-03-22 15:18:22 +0000 @@ -4,10 +4,8 @@ package backups import ( - "io" - "net/http" - "strings" - + "github.com/juju/errors" + "github.com/juju/httprequest" "github.com/juju/loggo" "github.com/juju/juju/api/base" @@ -15,40 +13,23 @@ var logger = loggo.GetLogger("juju.api.backups") -// httpClient represents the methods of api.State (see api/http.go) -// needed by backups for direct HTTP requests. -type httpClient interface { - // SendHTTPRequest sends an HTTP GET request relative to the client. - SendHTTPRequest(path string, args interface{}) (*http.Request, *http.Response, error) - // SendHTTPRequestReader sends an HTTP PUT request relative to the client. - SendHTTPRequestReader(path string, attached io.Reader, meta interface{}, name string) (*http.Request, *http.Response, error) -} - -type apiState interface { - base.APICallCloser - httpClient - - // Addr returns the address used to connect to the API server. - Addr() string -} - // Client wraps the backups API for the client. type Client struct { base.ClientFacade - facade base.FacadeCaller - http httpClient - baseFacade base.FacadeCaller - publicAddress string + facade base.FacadeCaller + client *httprequest.Client } // NewClient returns a new backups API client. -func NewClient(st apiState) *Client { - publicAddress := strings.SplitN(st.Addr(), ":", 2)[0] +func NewClient(st base.APICallCloser) (*Client, error) { frontend, backend := base.NewClientFacade(st, "Backups") + client, err := st.HTTPClient() + if err != nil { + return nil, errors.Trace(err) + } return &Client{ - ClientFacade: frontend, - facade: backend, - http: st, - publicAddress: publicAddress, - } + ClientFacade: frontend, + facade: backend, + client: client, + }, nil } === modified file 'src/github.com/juju/juju/api/backups/download.go' --- src/github.com/juju/juju/api/backups/download.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/backups/download.go 2016-03-22 15:18:22 +0000 @@ -8,30 +8,30 @@ "net/http" "github.com/juju/errors" + "github.com/juju/httprequest" - apihttp "github.com/juju/juju/apiserver/http" "github.com/juju/juju/apiserver/params" ) +type downloadParams struct { + httprequest.Route `httprequest:"GET /backups"` + Body params.BackupsDownloadArgs `httprequest:",body"` +} + // Download returns an io.ReadCloser for the given backup id. func (c *Client) Download(id string) (io.ReadCloser, error) { // Send the request. - args := params.BackupsDownloadArgs{ - ID: id, - } - _, resp, err := c.http.SendHTTPRequest("backups", &args) + var resp *http.Response + err := c.client.Call( + &downloadParams{ + Body: params.BackupsDownloadArgs{ + ID: id, + }, + }, + &resp, + ) if err != nil { - return nil, errors.Annotate(err, "while sending HTTP request") - } - - // Handle the response. - if resp.StatusCode != http.StatusOK { - failure, err := apihttp.ExtractAPIError(resp) - if err != nil { - return nil, errors.Annotate(err, "while extracting failure") - } - return nil, errors.Trace(failure) - } - + return nil, errors.Trace(err) + } return resp.Body, nil } === modified file 'src/github.com/juju/juju/api/backups/download_test.go' --- src/github.com/juju/juju/api/backups/download_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/backups/download_test.go 2016-03-22 15:18:22 +0000 @@ -5,31 +5,35 @@ import ( "io/ioutil" - "net/http" + "strings" - "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - apiserverhttp "github.com/juju/juju/apiserver/http" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state/backups" ) type downloadSuite struct { - httpSuite + baseSuite } var _ = gc.Suite(&downloadSuite{}) -func (s *downloadSuite) setSuccess(c *gc.C, data string) { - body := []byte(data) - s.setResponse(c, http.StatusOK, body, apiserverhttp.CTypeRaw) -} - func (s *downloadSuite) TestSuccessfulRequest(c *gc.C) { - s.setSuccess(c, "") + store := backups.NewStorage(s.State) + defer store.Close() + backupsState := backups.NewBackups(store) - resultArchive, err := s.client.Download("spam") + r := strings.NewReader("") + meta, err := backups.NewMetadataState(s.State, "0") + c.Assert(err, jc.ErrorIsNil) + // The Add method requires the length to be set + // otherwise the content is assumed to have length 0. + meta.Raw.Size = int64(r.Len()) + id, err := backupsState.Add(r, meta) + c.Assert(err, jc.ErrorIsNil) + resultArchive, err := s.client.Download(id) c.Assert(err, jc.ErrorIsNil) resultData, err := ioutil.ReadAll(resultArchive) @@ -38,19 +42,8 @@ } func (s *downloadSuite) TestFailedRequest(c *gc.C) { - s.setFailure(c, "something went wrong!", http.StatusInternalServerError) - - _, err := s.client.Download("spam") - - c.Check(errors.Cause(err), gc.FitsTypeOf, ¶ms.Error{}) - c.Check(err, gc.ErrorMatches, "something went wrong!") -} - -func (s *downloadSuite) TestErrorRequest(c *gc.C) { - s.setError(c, "something went wrong!", -1) - - _, err := s.client.Download("spam") - - c.Check(errors.Cause(err), gc.FitsTypeOf, ¶ms.Error{}) - c.Check(err, gc.ErrorMatches, "something went wrong!") + resultArchive, err := s.client.Download("unknown") + c.Assert(err, gc.ErrorMatches, `GET https://.*/model/.*/backups: backup metadata "unknown" not found`) + c.Assert(err, jc.Satisfies, params.IsCodeNotFound) + c.Assert(resultArchive, gc.Equals, nil) } === modified file 'src/github.com/juju/juju/api/backups/export_test.go' --- src/github.com/juju/juju/api/backups/export_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/backups/export_test.go 2016-03-22 15:18:22 +0000 @@ -12,11 +12,6 @@ return c.facade } -// SetHTTP sets the HTTP caller on the client. -func SetHTTP(c *Client, http httpClient) { - c.http = http -} - // PatchClientFacadeCall changes the internal FacadeCaller to one that lets // you mock out the FacadeCall method. The function returned by // PatchClientFacadeCall is a cleanup function that returns the client to its @@ -29,18 +24,6 @@ } } -// PatchClientFacadeCall changes the internal FacadeCaller to one that lets -// you mock out the FacadeCall method. The function returned by -// PatchClientFacadeCall is a cleanup function that returns the client to its -// original state. -func PatchBaseFacadeCall(c *Client, mockCall func(request string, params interface{}, response interface{}) error) func() { - orig := c.baseFacade - c.baseFacade = &resultCaller{mockCall} - return func() { - c.facade = orig - } -} - type resultCaller struct { mockCall func(request string, params interface{}, response interface{}) error } === modified file 'src/github.com/juju/juju/api/backups/package_test.go' --- src/github.com/juju/juju/api/backups/package_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/backups/package_test.go 2016-03-22 15:18:22 +0000 @@ -1,22 +1,9 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - package backups_test import ( "runtime" "testing" - "time" - - gc "gopkg.in/check.v1" - - "github.com/juju/juju/api/backups" - httptesting "github.com/juju/juju/api/http/testing" - apiserverbackups "github.com/juju/juju/apiserver/backups" - "github.com/juju/juju/apiserver/params" - jujutesting "github.com/juju/juju/juju/testing" - stbackups "github.com/juju/juju/state/backups" - backupstesting "github.com/juju/juju/state/backups/testing" + coretesting "github.com/juju/juju/testing" ) @@ -27,89 +14,3 @@ } coretesting.MgoTestPackage(t) } - -type baseSuite struct { - jujutesting.JujuConnSuite - backupstesting.BaseSuite - client *backups.Client -} - -func (s *baseSuite) SetUpTest(c *gc.C) { - s.BaseSuite.SetUpTest(c) - s.JujuConnSuite.SetUpTest(c) - s.client = backups.NewClient(s.APIState) -} - -func (s *baseSuite) metadataResult() *params.BackupsMetadataResult { - result := apiserverbackups.ResultFromMetadata(s.Meta) - return &result -} - -func (s *baseSuite) checkMetadataResult(c *gc.C, result *params.BackupsMetadataResult, meta *stbackups.Metadata) { - var finished, stored time.Time - if meta.Finished != nil { - finished = *meta.Finished - } - if meta.Stored() != nil { - stored = *(meta.Stored()) - } - - c.Check(result.ID, gc.Equals, meta.ID()) - c.Check(result.Started, gc.Equals, meta.Started) - c.Check(result.Finished, gc.Equals, finished) - c.Check(result.Checksum, gc.Equals, meta.Checksum()) - c.Check(result.ChecksumFormat, gc.Equals, meta.ChecksumFormat()) - c.Check(result.Size, gc.Equals, meta.Size()) - c.Check(result.Stored, gc.Equals, stored) - c.Check(result.Notes, gc.Equals, meta.Notes) - - c.Check(result.Environment, gc.Equals, meta.Origin.Environment) - c.Check(result.Machine, gc.Equals, meta.Origin.Machine) - c.Check(result.Hostname, gc.Equals, meta.Origin.Hostname) - c.Check(result.Version, gc.Equals, meta.Origin.Version) -} - -type httpSuite struct { - baseSuite - httptesting.APIHTTPClientSuite -} - -func (s *httpSuite) SetUpSuite(c *gc.C) { - s.baseSuite.SetUpSuite(c) - s.APIHTTPClientSuite.SetUpSuite(c) -} - -func (s *httpSuite) TearDownSuite(c *gc.C) { - s.APIHTTPClientSuite.TearDownSuite(c) - s.baseSuite.TearDownSuite(c) -} - -func (s *httpSuite) SetUpTest(c *gc.C) { - s.baseSuite.SetUpTest(c) - s.APIHTTPClientSuite.SetUpTest(c) -} - -func (s *httpSuite) TearDownTest(c *gc.C) { - s.APIHTTPClientSuite.TearDownTest(c) - s.baseSuite.TearDownTest(c) -} - -func (s *httpSuite) setResponse(c *gc.C, status int, data []byte, ctype string) { - s.APIHTTPClientSuite.SetResponse(c, status, data, ctype) - backups.SetHTTP(s.client, &s.FakeClient) -} - -func (s *httpSuite) setJSONSuccess(c *gc.C, result interface{}) { - s.APIHTTPClientSuite.SetJSONSuccess(c, result) - backups.SetHTTP(s.client, &s.FakeClient) -} - -func (s *httpSuite) setFailure(c *gc.C, msg string, status int) { - s.APIHTTPClientSuite.SetFailure(c, msg, status) - backups.SetHTTP(s.client, &s.FakeClient) -} - -func (s *httpSuite) setError(c *gc.C, msg string, status int) { - s.APIHTTPClientSuite.SetError(c, msg, status) - backups.SetHTTP(s.client, &s.FakeClient) -} === modified file 'src/github.com/juju/juju/api/backups/restore.go' --- src/github.com/juju/juju/api/backups/restore.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/backups/restore.go 2016-03-22 15:18:22 +0000 @@ -5,15 +5,20 @@ import ( "io" + "strings" "time" "github.com/juju/errors" "github.com/juju/utils" + "github.com/juju/juju/apiserver" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/rpc" ) +// TODO: There are no unit tests for this file. +// lp1545568 opened to track their addition. + var ( // restoreStrategy is the attempt strategy for api server calls re-attempts in case // the server is upgrading. @@ -23,6 +28,15 @@ } ) +// isUpgradeInProgressErr returns whether or not the error +// is an "upgrade in progress" error. This is necessary as +// the error type returned from a facade call is rpc.RequestError +// and we cannot use params.IsCodeUpgradeInProgress +func isUpgradeInProgressErr(err error) bool { + errorMessage := err.Error() + return strings.Contains(errorMessage, apiserver.UpgradeInProgressError.Error()) +} + // ClientConnection type represents a function capable of spawning a new Client connection // it is used to pass around connection factories when necessary. // TODO(perrito666) This is a workaround for lp:1399722 . @@ -51,10 +65,11 @@ if clientErr != nil { return errors.Trace(clientErr) } - if err, remoteError = prepareAttempt(client, clientCloser); err == nil { + err, remoteError = prepareAttempt(client, clientCloser) + if err == nil && remoteError == nil { return nil } - if !params.IsCodeUpgradeInProgress(remoteError) { + if !isUpgradeInProgressErr(err) || remoteError != nil { return errors.Annotatef(err, "could not start prepare restore mode, server returned: %v", remoteError) } } @@ -62,7 +77,7 @@ } // RestoreReader restores the contents of backupFile as backup. -func (c *Client) RestoreReader(r io.Reader, meta *params.BackupsMetadataResult, newClient ClientConnection) error { +func (c *Client) RestoreReader(r io.ReadSeeker, meta *params.BackupsMetadataResult, newClient ClientConnection) error { if err := prepareRestore(newClient); err != nil { return errors.Trace(err) } @@ -108,27 +123,31 @@ BackupId: backupId, } + cleanExit := false for a := restoreStrategy.Start(); a.Next(); { logger.Debugf("Attempting Restore of %q", backupId) - restoreClient, restoreClientCloser, err := newClient() + var restoreClient *Client + var restoreClientCloser func() error + restoreClient, restoreClientCloser, err = newClient() if err != nil { return errors.Trace(err) } err, remoteError = restoreAttempt(restoreClient, restoreClientCloser, restoreArgs) - // This signals that Restore almost certainly finished and + // A ShutdownErr signals that Restore almost certainly finished and // triggered Exit. - if err == rpc.ErrShutdown && remoteError == nil { + if (err == nil || rpc.IsShutdownErr(err)) && remoteError == nil { + cleanExit = true break } - if err != nil && !params.IsCodeUpgradeInProgress(remoteError) { + if remoteError != nil || !isUpgradeInProgressErr(err) { finishErr := finishRestore(newClient) logger.Errorf("could not exit restoring status: %v", finishErr) return errors.Annotatef(err, "cannot perform restore: %v", remoteError) } } - if err != rpc.ErrShutdown { + if !cleanExit { finishErr := finishRestore(newClient) if finishErr != nil { logger.Errorf("could not exit restoring status: %v", finishErr) @@ -151,22 +170,26 @@ } // finishRestore since Restore call will end up with a reset -// state server, finish restore will check that the the newly -// placed state server has the mark of restore complete. +// controller, finish restore will check that the the newly +// placed controller has the mark of restore complete. // upstart should have restarted the api server so we reconnect. func finishRestore(newClient ClientConnection) error { var err, remoteError error for a := restoreStrategy.Start(); a.Next(); { logger.Debugf("Attempting finishRestore") - finishClient, finishClientCloser, err := newClient() + var finishClient *Client + var finishClientCloser func() error + finishClient, finishClientCloser, err = newClient() if err != nil { return errors.Trace(err) } - if err, remoteError = finishAttempt(finishClient, finishClientCloser); err == nil { + err, remoteError = finishAttempt(finishClient, finishClientCloser) + if err == nil && remoteError == nil { return nil } - if !params.IsCodeUpgradeInProgress(remoteError) { + + if !isUpgradeInProgressErr(err) || remoteError != nil { return errors.Annotatef(err, "cannot complete restore: %v", remoteError) } } === modified file 'src/github.com/juju/juju/api/backups/upload.go' --- src/github.com/juju/juju/api/backups/upload.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/backups/upload.go 2016-03-22 15:18:22 +0000 @@ -10,34 +10,28 @@ "github.com/juju/errors" - apihttp "github.com/juju/juju/apiserver/http" + "github.com/juju/juju/apiserver/httpattachment" "github.com/juju/juju/apiserver/params" ) // Upload sends the backup archive to remote storage. -func (c *Client) Upload(archive io.Reader, meta params.BackupsMetadataResult) (string, error) { +func (c *Client) Upload(archive io.ReadSeeker, meta params.BackupsMetadataResult) (string, error) { // Empty out some of the metadata. meta.ID = "" meta.Stored = time.Time{} - // Send the request. - _, resp, err := c.http.SendHTTPRequestReader("backups", archive, &meta, "juju-backup.tar.gz") - if err != nil { - return "", errors.Annotate(err, "while sending HTTP request") - } - - // Handle the response. - if resp.StatusCode == http.StatusOK { - var result params.BackupsMetadataResult - if err := apihttp.ExtractJSONResult(resp, &result); err != nil { - return "", errors.Annotate(err, "while extracting result") - } - return result.ID, nil - } else { - failure, err := apihttp.ExtractAPIError(resp) - if err != nil { - return "", errors.Annotate(err, "while extracting failure") - } - return "", errors.Trace(failure) - } + req, err := http.NewRequest("PUT", "/backups", nil) + if err != nil { + return "", errors.Trace(err) + } + body, contentType, err := httpattachment.NewBody(archive, meta, "juju-backup.tar.gz") + if err != nil { + return "", errors.Annotatef(err, "cannot create multipart body") + } + req.Header.Set("Content-Type", contentType) + var result params.BackupsMetadataResult + if err := c.client.Do(req, body, &result); err != nil { + return "", errors.Trace(err) + } + return result.ID, nil } === modified file 'src/github.com/juju/juju/api/backups/upload_test.go' --- src/github.com/juju/juju/api/backups/upload_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/backups/upload_test.go 2016-03-22 15:18:22 +0000 @@ -4,48 +4,25 @@ package backups_test import ( - "bytes" "io/ioutil" + "strings" "time" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" apiserverbackups "github.com/juju/juju/apiserver/backups" - "github.com/juju/juju/apiserver/params" ) type uploadSuite struct { - httpSuite + baseSuite } var _ = gc.Suite(&uploadSuite{}) -func (s *uploadSuite) setSuccess(c *gc.C, id string) { - result := params.BackupsUploadResult{ID: id} - s.setJSONSuccess(c, &result) -} - -func (s *uploadSuite) TestSuccess(c *gc.C) { - s.setSuccess(c, "") - - data := "" - archive := ioutil.NopCloser(bytes.NewBufferString(data)) - - meta := apiserverbackups.ResultFromMetadata(s.Meta) - meta.ID = "" - meta.Stored = time.Time{} - - id, err := s.client.Upload(archive, meta) - c.Assert(err, jc.ErrorIsNil) - - c.Check(id, gc.Equals, "") - s.FakeClient.CheckCalledReader(c, "backups", archive, &meta, "juju-backup.tar.gz", "SendHTTPRequestReader") -} - -func (s *uploadSuite) TestFunctional(c *gc.C) { - data := "" - archive := ioutil.NopCloser(bytes.NewBufferString(data)) +func (s *uploadSuite) TestSuccessfulRequest(c *gc.C) { + data := "" + archive := strings.NewReader(data) meta := apiserverbackups.ResultFromMetadata(s.Meta) meta.ID = "" @@ -60,7 +37,6 @@ // Check the stored contents. stored, err := s.client.Download(id) c.Assert(err, jc.ErrorIsNil) - defer archive.Close() storedData, err := ioutil.ReadAll(stored) c.Assert(err, jc.ErrorIsNil) c.Check(string(storedData), gc.Equals, data) @@ -72,3 +48,19 @@ meta.Stored = storedMeta.Stored c.Check(storedMeta, gc.DeepEquals, &meta) } + +func (s *uploadSuite) TestFailedRequest(c *gc.C) { + data := "" + archive := strings.NewReader(data) + + meta := apiserverbackups.ResultFromMetadata(s.Meta) + meta.ID = "" + meta.Size = int64(len(data)) + // The Model field is required, so zero it so that + // we'll get an error from the endpoint. + meta.Model = "" + + id, err := s.client.Upload(archive, meta) + c.Assert(err, gc.ErrorMatches, `PUT https://.*/model/.*/backups: while storing backup archive: missing Model`) + c.Assert(id, gc.Equals, "") +} === modified file 'src/github.com/juju/juju/api/base/caller.go' --- src/github.com/juju/juju/api/base/caller.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/base/caller.go 2016-03-22 15:18:22 +0000 @@ -4,10 +4,27 @@ package base import ( + "fmt" + "io" + "net/url" + + "github.com/juju/errors" + "github.com/juju/httprequest" "github.com/juju/names" ) +// OldAgentError is returned when an api call is not supported +// by the Juju agent. +func OldAgentError(operation string, vers string) error { + return errors.NewNotSupported( + nil, fmt.Sprintf("%s not supported. Please upgrade API server to Juju %v or later", operation, vers)) +} + // APICaller is implemented by the client-facing State object. +// It defines the lowest level of API calls and is used by +// the various API implementations to actually make +// the calls to the API. It should not be used outside +// of tests or the api/* hierarchy. type APICaller interface { // APICall makes a call to the API server with the given object type, // id, request and parameters. The response is filled in with the @@ -18,9 +35,45 @@ // client can use with the current API server. BestFacadeVersion(facade string) int - // EnvironTag returns the tag of the environment the client is + // ModelTag returns the tag of the model the client is // connected to. - EnvironTag() (names.EnvironTag, error) + ModelTag() (names.ModelTag, error) + + // HTTPClient returns an httprequest.Client that can be used + // to make HTTP requests to the API. URLs passed to the client + // will be made relative to the API host and the current model. + // + // Note that the URLs in HTTP requests passed to the Client.Do + // method should not include a host part. + HTTPClient() (*httprequest.Client, error) + + StreamConnector +} + +// StreamConnector is implemented by the client-facing State object. +type StreamConnector interface { + // ConnectStream connects to the given HTTP websocket + // endpoint path (interpreted relative to the receiver's + // model) and returns the resulting connection. + // The given parameters are used as URL query values + // when making the initial HTTP request. + // + // The path must start with a "/". + ConnectStream(path string, attrs url.Values) (Stream, error) +} + +// Stream represents a streaming connection to the API. +type Stream interface { + io.ReadWriteCloser + + // WriteJSON encodes the given value as JSON + // and writes it to the connection. + WriteJSON(v interface{}) error + + // ReadJSON reads a JSON value from the stream + // and decodes it into the element pointed to by + // the given value, which should be a pointer. + ReadJSON(v interface{}) error } // FacadeCaller is a wrapper for the common paradigm that a given client just === modified file 'src/github.com/juju/juju/api/base/testing/apicaller.go' --- src/github.com/juju/juju/api/base/testing/apicaller.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/base/testing/apicaller.go 2016-03-22 15:18:22 +0000 @@ -4,15 +4,22 @@ package testing import ( + "net/url" + + "github.com/juju/errors" + "github.com/juju/httprequest" + "github.com/juju/names" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/api/base" - "github.com/juju/names" - "github.com/juju/testing" + coretesting "github.com/juju/juju/testing" ) // APICallerFunc is a function type that implements APICaller. +// The only method that actually does anything is APICall itself +// which calls the function. The other methods are just stubs. type APICallerFunc func(objType string, version int, id, request string, params, response interface{}) error func (f APICallerFunc) APICall(objType string, version int, id, request string, params, response interface{}) error { @@ -23,14 +30,22 @@ return 0 } -func (APICallerFunc) EnvironTag() (names.EnvironTag, error) { - return names.NewEnvironTag(""), nil +func (APICallerFunc) ModelTag() (names.ModelTag, error) { + return coretesting.ModelTag, nil } func (APICallerFunc) Close() error { return nil } +func (APICallerFunc) HTTPClient() (*httprequest.Client, error) { + return nil, errors.New("no HTTP client available in this test") +} + +func (APICallerFunc) ConnectStream(path string, attrs url.Values) (base.Stream, error) { + return nil, errors.New("stream connection unimplemented") +} + // CheckArgs holds the possible arguments to CheckingAPICaller(). Any // fields non empty fields will be checked to match the arguments // recieved by the APICall() method of the returned APICallerFunc. If @@ -115,3 +130,51 @@ }, ) } + +// StubFacadeCaller is a testing stub implementation of api/base.FacadeCaller. +type StubFacadeCaller struct { + // Stub is the raw stub used to track calls and errors. + Stub *testing.Stub + // These control the values returned by the stub's methods. + FacadeCallFn func(name string, params, response interface{}) error + ReturnName string + ReturnBestAPIVersion int + ReturnRawAPICaller base.APICaller +} + +// FacadeCall implements api/base.FacadeCaller. +func (s *StubFacadeCaller) FacadeCall(request string, params, response interface{}) error { + s.Stub.AddCall("FacadeCall", request, params, response) + if err := s.Stub.NextErr(); err != nil { + return errors.Trace(err) + } + + if s.FacadeCallFn != nil { + return s.FacadeCallFn(request, params, response) + } + return nil +} + +// Name implements api/base.FacadeCaller. +func (s *StubFacadeCaller) Name() string { + s.Stub.AddCall("Name") + s.Stub.PopNoErr() + + return s.ReturnName +} + +// BestAPIVersion implements api/base.FacadeCaller. +func (s *StubFacadeCaller) BestAPIVersion() int { + s.Stub.AddCall("BestAPIVersion") + s.Stub.PopNoErr() + + return s.ReturnBestAPIVersion +} + +// RawAPICaller implements api/base.FacadeCaller. +func (s *StubFacadeCaller) RawAPICaller() base.APICaller { + s.Stub.AddCall("RawAPICaller") + s.Stub.PopNoErr() + + return s.ReturnRawAPICaller +} === modified file 'src/github.com/juju/juju/api/base/types.go' --- src/github.com/juju/juju/api/base/types.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/base/types.go 2016-03-22 15:18:22 +0000 @@ -5,14 +5,25 @@ import ( "time" + + "github.com/juju/juju/apiserver/params" ) -// UserEnvironment holds information about an environment and the last -// time the environment was accessed for a particular user. This is a client +// UserModel holds information about a model and the last +// time the model was accessed for a particular user. This is a client // side structure that translates the owner tag into a user facing string. -type UserEnvironment struct { +type UserModel struct { Name string UUID string Owner string LastConnection *time.Time } + +// ModelStatus holds information about the status of a juju model. +type ModelStatus struct { + UUID string + Life params.Life + Owner string + HostedMachineCount int + ServiceCount int +} === modified file 'src/github.com/juju/juju/api/block/client.go' --- src/github.com/juju/juju/api/block/client.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/block/client.go 2016-03-22 15:18:22 +0000 @@ -22,20 +22,18 @@ // NewClient creates a new client for accessing the block API. func NewClient(st base.APICallCloser) *Client { frontend, backend := base.NewClientFacade(st, "Block") - logger.Debugf("\nSTORAGE FRONT-END: %#v", frontend) - logger.Debugf("\nSTORAGE BACK-END: %#v", backend) return &Client{ClientFacade: frontend, facade: backend} } -// List returns blocks that are switched on for current environment. +// List returns blocks that are switched on for current model. func (c *Client) List() ([]params.Block, error) { - blocks := params.BlockResults{} + var blocks params.BlockResults if err := c.facade.FacadeCall("List", nil, &blocks); err != nil { return nil, errors.Trace(err) } - all := []params.Block{} - allErr := params.ErrorResults{} + var all []params.Block + var allErr params.ErrorResults for _, result := range blocks.Results { if result.Error != nil { allErr.Results = append(allErr.Results, params.ErrorResult{result.Error}) @@ -46,35 +44,37 @@ return all, allErr.Combine() } -// SwitchBlockOn switches desired block on for the current environment. +// SwitchBlockOn switches desired block on for the current model. // Valid block types are "BlockDestroy", "BlockRemove" and "BlockChange". func (c *Client) SwitchBlockOn(blockType, msg string) error { args := params.BlockSwitchParams{ Type: blockType, Message: msg, } - result := params.ErrorResult{} + var result params.ErrorResult if err := c.facade.FacadeCall("SwitchBlockOn", args, &result); err != nil { return errors.Trace(err) } if result.Error != nil { - return result.Error + // cope with typed error + return errors.Trace(result.Error) } return nil } -// SwitchBlockOff switches desired block off for the current environment. +// SwitchBlockOff switches desired block off for the current model. // Valid block types are "BlockDestroy", "BlockRemove" and "BlockChange". func (c *Client) SwitchBlockOff(blockType string) error { args := params.BlockSwitchParams{ Type: blockType, } - result := params.ErrorResult{} + var result params.ErrorResult if err := c.facade.FacadeCall("SwitchBlockOff", args, &result); err != nil { return errors.Trace(err) } if result.Error != nil { - return result.Error + // cope with typed error + return errors.Trace(result.Error) } return nil } === modified file 'src/github.com/juju/juju/api/certpool.go' --- src/github.com/juju/juju/api/certpool.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/certpool.go 2016-03-22 15:18:22 +0000 @@ -10,13 +10,13 @@ "path/filepath" "github.com/juju/errors" + "github.com/juju/utils/series" "github.com/juju/juju/cert" "github.com/juju/juju/juju/paths" - "github.com/juju/juju/version" ) -var certDir = filepath.FromSlash(paths.MustSucceed(paths.CertDir(version.Current.Series))) +var certDir = filepath.FromSlash(paths.MustSucceed(paths.CertDir(series.HostSeries()))) // CreateCertPool creates a new x509.CertPool and adds in the caCert passed // in. All certs from the cert directory (/etc/juju/cert.d on ubuntu) are === modified file 'src/github.com/juju/juju/api/charmrevisionupdater/updater_test.go' --- src/github.com/juju/juju/api/charmrevisionupdater/updater_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/charmrevisionupdater/updater_test.go 2016-03-22 15:18:22 +0000 @@ -7,7 +7,7 @@ jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api/charmrevisionupdater" "github.com/juju/juju/apiserver/charmrevisionupdater/testing" @@ -38,7 +38,7 @@ s.JujuConnSuite.SetUpTest(c) s.CharmSuite.SetUpTest(c) - machine, err := s.State.AddMachine("quantal", state.JobManageEnviron) + machine, err := s.State.AddMachine("quantal", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) password, err := utils.RandomPassword() c.Assert(err, jc.ErrorIsNil) === modified file 'src/github.com/juju/juju/api/charms/client.go' --- src/github.com/juju/juju/api/charms/client.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/charms/client.go 2016-03-22 15:18:22 +0000 @@ -1,12 +1,10 @@ // Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. +// charms provides a client for accessing the charms API. package charms import ( - "github.com/juju/errors" - "gopkg.in/juju/charm.v5" - "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" ) @@ -23,42 +21,11 @@ return &Client{ClientFacade: frontend, facade: backend} } -// CharmInfo holds information about a charm. -type CharmInfo struct { - Revision int - URL string - Config *charm.Config - Meta *charm.Meta - Actions *charm.Actions -} - -// CharmInfo returns information about the requested charm. -func (c *Client) CharmInfo(charmURL string) (*CharmInfo, error) { - args := params.CharmInfo{CharmURL: charmURL} - info := &CharmInfo{} - if err := c.facade.FacadeCall("CharmInfo", args, info); err != nil { - return nil, err - } - return info, nil -} - -// List returns a list of charm URLs currently in the state. -// If supplied parameter contains any names, the result will be filtered -// to return only the charms with supplied names. -func (c *Client) List(names []string) ([]string, error) { - charms := ¶ms.CharmsListResult{} - args := params.CharmsList{Names: names} - if err := c.facade.FacadeCall("List", args, charms); err != nil { - return nil, errors.Trace(err) - } - return charms.CharmURLs, nil -} - // IsMetered returns whether or not the charm is metered. func (c *Client) IsMetered(charmURL string) (bool, error) { args := params.CharmInfo{CharmURL: charmURL} - metered := ¶ms.IsMeteredResult{} - if err := c.facade.FacadeCall("IsMetered", args, metered); err != nil { + var metered params.IsMeteredResult + if err := c.facade.FacadeCall("IsMetered", args, &metered); err != nil { return false, err } return metered.Metered, nil === modified file 'src/github.com/juju/juju/api/charms/client_test.go' --- src/github.com/juju/juju/api/charms/client_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/charms/client_test.go 2016-03-22 15:18:22 +0000 @@ -22,71 +22,6 @@ var _ = gc.Suite(&charmsMockSuite{}) -func (s *charmsMockSuite) TestCharmInfo(c *gc.C) { - var called bool - curl := "local:quantal/dummy-1" - - apiCaller := basetesting.APICallerFunc( - func(objType string, - version int, - id, request string, - a, result interface{}, - ) error { - called = true - c.Check(objType, gc.Equals, "Charms") - c.Check(id, gc.Equals, "") - c.Check(request, gc.Equals, "CharmInfo") - - args, ok := a.(params.CharmInfo) - c.Assert(ok, jc.IsTrue) - c.Assert(args.CharmURL, gc.DeepEquals, curl) - if wanted, k := result.(*charms.CharmInfo); k { - wanted.URL = curl - } - return nil - }) - charmsClient := charms.NewClient(apiCaller) - charmResult, err := charmsClient.CharmInfo(curl) - c.Assert(err, jc.ErrorIsNil) - c.Assert(called, jc.IsTrue) - c.Assert(charmResult.URL, gc.DeepEquals, curl) -} - -func (s *charmsMockSuite) TestList(c *gc.C) { - var called bool - charmName := "dummy-1" - curl := "local:quantal/dummy-1" - - apiCaller := basetesting.APICallerFunc( - func(objType string, - version int, - id, request string, - a, result interface{}, - ) error { - called = true - c.Check(objType, gc.Equals, "Charms") - c.Check(id, gc.Equals, "") - c.Check(request, gc.Equals, "List") - - args, ok := a.(params.CharmsList) - c.Assert(ok, jc.IsTrue) - - c.Assert(args.Names, gc.HasLen, 1) - c.Assert(args.Names[0], gc.DeepEquals, charmName) - - if wanted, k := result.(*params.CharmsListResult); k { - wanted.CharmURLs = []string{curl} - } - return nil - }) - charmsClient := charms.NewClient(apiCaller) - listResult, err := charmsClient.List([]string{charmName}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(called, jc.IsTrue) - c.Assert(listResult, gc.HasLen, 1) - c.Assert(listResult[0], gc.DeepEquals, curl) -} - func (s *charmsMockSuite) TestIsMeteredFalse(c *gc.C) { var called bool curl := "local:quantal/dummy-1" @@ -104,9 +39,6 @@ args, ok := a.(params.CharmInfo) c.Assert(ok, jc.IsTrue) c.Assert(args.CharmURL, gc.DeepEquals, curl) - if wanted, k := result.(*charms.CharmInfo); k { - wanted.URL = curl - } return nil }) charmsClient := charms.NewClient(apiCaller) === modified file 'src/github.com/juju/juju/api/cleaner/cleaner.go' --- src/github.com/juju/juju/api/cleaner/cleaner.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/cleaner/cleaner.go 2016-03-22 15:18:22 +0000 @@ -5,8 +5,9 @@ import ( "github.com/juju/juju/api/base" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) const cleanerFacade = "Cleaner" @@ -37,6 +38,6 @@ if err := result.Error; err != nil { return nil, result.Error } - w := watcher.NewNotifyWatcher(api.facade.RawAPICaller(), result) + w := apiwatcher.NewNotifyWatcher(api.facade.RawAPICaller(), result) return w, nil } === modified file 'src/github.com/juju/juju/api/client.go' --- src/github.com/juju/juju/api/client.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/api/client.go 2016-03-22 15:18:22 +0000 @@ -4,31 +4,25 @@ package api import ( - "bytes" - "crypto/tls" - "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" - "path" "strings" "time" "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" - "github.com/juju/utils" "golang.org/x/net/websocket" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "gopkg.in/macaroon.v1" "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/constraints" - "github.com/juju/juju/instance" "github.com/juju/juju/network" "github.com/juju/juju/tools" "github.com/juju/juju/version" @@ -38,10 +32,10 @@ type Client struct { base.ClientFacade facade base.FacadeCaller - st *State + st *state } -// Status returns the status of the juju environment. +// Status returns the status of the juju model. func (c *Client) Status(patterns []string) (*params.FullStatus, error) { var result params.FullStatus p := params.StatusParams{Patterns: patterns} @@ -62,9 +56,6 @@ } err := c.facade.FacadeCall("UnitStatusHistory", args, &results) if err != nil { - if params.IsCodeNotImplemented(err) { - return ¶ms.UnitStatusHistory{}, errors.NotImplementedf("UnitStatusHistory") - } return ¶ms.UnitStatusHistory{}, errors.Trace(err) } return &results, nil @@ -80,26 +71,6 @@ return &result, nil } -// ServiceSet sets configuration options on a service. -func (c *Client) ServiceSet(service string, options map[string]string) error { - p := params.ServiceSet{ - ServiceName: service, - Options: options, - } - // TODO(Nate): Put this back to ServiceSet when the GUI stops expecting - // ServiceSet to unset values set to an empty string. - return c.facade.FacadeCall("NewServiceSetForClientAPI", p, nil) -} - -// ServiceUnset resets configuration options on a service. -func (c *Client) ServiceUnset(service string, options []string) error { - p := params.ServiceUnset{ - ServiceName: service, - Options: options, - } - return c.facade.FacadeCall("ServiceUnset", p, nil) -} - // Resolved clears errors on a unit. func (c *Client) Resolved(unit string, retry bool) error { p := params.Resolved{ @@ -140,61 +111,6 @@ return results.PrivateAddress, err } -// ServiceSetYAML sets configuration options on a service -// given options in YAML format. -func (c *Client) ServiceSetYAML(service string, yaml string) error { - p := params.ServiceSetYAML{ - ServiceName: service, - Config: yaml, - } - return c.facade.FacadeCall("ServiceSetYAML", p, nil) -} - -// ServiceGet returns the configuration for the named service. -func (c *Client) ServiceGet(service string) (*params.ServiceGetResults, error) { - var results params.ServiceGetResults - params := params.ServiceGet{ServiceName: service} - err := c.facade.FacadeCall("ServiceGet", params, &results) - return &results, err -} - -// AddRelation adds a relation between the specified endpoints and returns the relation info. -func (c *Client) AddRelation(endpoints ...string) (*params.AddRelationResults, error) { - var addRelRes params.AddRelationResults - params := params.AddRelation{Endpoints: endpoints} - err := c.facade.FacadeCall("AddRelation", params, &addRelRes) - return &addRelRes, err -} - -// DestroyRelation removes the relation between the specified endpoints. -func (c *Client) DestroyRelation(endpoints ...string) error { - params := params.DestroyRelation{Endpoints: endpoints} - return c.facade.FacadeCall("DestroyRelation", params, nil) -} - -// ServiceCharmRelations returns the service's charms relation names. -func (c *Client) ServiceCharmRelations(service string) ([]string, error) { - var results params.ServiceCharmRelationsResults - params := params.ServiceCharmRelations{ServiceName: service} - err := c.facade.FacadeCall("ServiceCharmRelations", params, &results) - return results.CharmRelations, err -} - -// AddMachines1dot18 adds new machines with the supplied parameters. -// -// TODO(axw) 2014-04-11 #XXX -// This exists for backwards compatibility; -// We cannot remove this code while clients > 1.20 need to talk to 1.18 -// servers (which is something we need for an undetermined amount of time). -func (c *Client) AddMachines1dot18(machineParams []params.AddMachineParams) ([]params.AddMachinesResult, error) { - args := params.AddMachines{ - MachineParams: machineParams, - } - results := new(params.AddMachinesResults) - err := c.facade.FacadeCall("AddMachines", args, results) - return results.Machines, err -} - // AddMachines adds new machines with the supplied parameters. func (c *Client) AddMachines(machineParams []params.AddMachineParams) ([]params.AddMachinesResult, error) { args := params.AddMachines{ @@ -227,156 +143,19 @@ return c.facade.FacadeCall("DestroyMachines", params, nil) } -// ServiceExpose changes the juju-managed firewall to expose any ports that -// were also explicitly marked by units as open. -func (c *Client) ServiceExpose(service string) error { - params := params.ServiceExpose{ServiceName: service} - return c.facade.FacadeCall("ServiceExpose", params, nil) -} - -// ServiceUnexpose changes the juju-managed firewall to unexpose any ports that -// were also explicitly marked by units as open. -func (c *Client) ServiceUnexpose(service string) error { - params := params.ServiceUnexpose{ServiceName: service} - return c.facade.FacadeCall("ServiceUnexpose", params, nil) -} - -// ServiceDeployWithNetworks works exactly like ServiceDeploy, but -// allows the specification of requested networks that must be present -// on the machines where the service is deployed. Another way to specify -// networks to include/exclude is using constraints. -func (c *Client) ServiceDeployWithNetworks( - charmURL string, - serviceName string, - numUnits int, - configYAML string, - cons constraints.Value, - toMachineSpec string, - networks []string, -) error { - params := params.ServiceDeploy{ - ServiceName: serviceName, - CharmUrl: charmURL, - NumUnits: numUnits, - ConfigYAML: configYAML, - Constraints: cons, - ToMachineSpec: toMachineSpec, - Networks: networks, - } - return c.facade.FacadeCall("ServiceDeployWithNetworks", params, nil) -} - -// ServiceDeploy obtains the charm, either locally or from the charm store, -// and deploys it. -func (c *Client) ServiceDeploy(charmURL string, serviceName string, numUnits int, configYAML string, cons constraints.Value, toMachineSpec string) error { - params := params.ServiceDeploy{ - ServiceName: serviceName, - CharmUrl: charmURL, - NumUnits: numUnits, - ConfigYAML: configYAML, - Constraints: cons, - ToMachineSpec: toMachineSpec, - } - return c.facade.FacadeCall("ServiceDeploy", params, nil) -} - -// ServiceUpdate updates the service attributes, including charm URL, -// minimum number of units, settings and constraints. -// TODO(frankban) deprecate redundant API calls that this supercedes. -func (c *Client) ServiceUpdate(args params.ServiceUpdate) error { - return c.facade.FacadeCall("ServiceUpdate", args, nil) -} - -// ServiceSetCharm sets the charm for a given service. -func (c *Client) ServiceSetCharm(serviceName string, charmUrl string, force bool) error { - args := params.ServiceSetCharm{ - ServiceName: serviceName, - CharmUrl: charmUrl, - Force: force, - } - return c.facade.FacadeCall("ServiceSetCharm", args, nil) -} - -// ServiceGetCharmURL returns the charm URL the given service is -// running at present. -func (c *Client) ServiceGetCharmURL(serviceName string) (*charm.URL, error) { - result := new(params.StringResult) - args := params.ServiceGet{ServiceName: serviceName} - err := c.facade.FacadeCall("ServiceGetCharmURL", args, &result) - if err != nil { - return nil, err - } - return charm.ParseURL(result.Result) -} - -// AddServiceUnits adds a given number of units to a service. -func (c *Client) AddServiceUnits(service string, numUnits int, machineSpec string) ([]string, error) { - args := params.AddServiceUnits{ - ServiceName: service, - NumUnits: numUnits, - ToMachineSpec: machineSpec, - } - results := new(params.AddServiceUnitsResults) - err := c.facade.FacadeCall("AddServiceUnits", args, results) - return results.Units, err -} - -// AddServiceUnitsWithPlacement adds a given number of units to a service using the specified -// placement directives to assign units to machines. -func (c *Client) AddServiceUnitsWithPlacement(service string, numUnits int, placement []*instance.Placement) ([]string, error) { - args := params.AddServiceUnits{ - ServiceName: service, - NumUnits: numUnits, - Placement: placement, - } - results := new(params.AddServiceUnitsResults) - err := c.facade.FacadeCall("AddServiceUnitsWithPlacement", args, results) - return results.Units, err -} - -// DestroyServiceUnits decreases the number of units dedicated to a service. -func (c *Client) DestroyServiceUnits(unitNames ...string) error { - params := params.DestroyServiceUnits{unitNames} - return c.facade.FacadeCall("DestroyServiceUnits", params, nil) -} - -// ServiceDestroy destroys a given service. -func (c *Client) ServiceDestroy(service string) error { - params := params.ServiceDestroy{ - ServiceName: service, - } - return c.facade.FacadeCall("ServiceDestroy", params, nil) -} - -// GetServiceConstraints returns the constraints for the given service. -func (c *Client) GetServiceConstraints(service string) (constraints.Value, error) { - results := new(params.GetConstraintsResults) - err := c.facade.FacadeCall("GetServiceConstraints", params.GetServiceConstraints{service}, results) - return results.Constraints, err -} - -// GetEnvironmentConstraints returns the constraints for the environment. -func (c *Client) GetEnvironmentConstraints() (constraints.Value, error) { - results := new(params.GetConstraintsResults) - err := c.facade.FacadeCall("GetEnvironmentConstraints", nil, results) - return results.Constraints, err -} - -// SetServiceConstraints specifies the constraints for the given service. -func (c *Client) SetServiceConstraints(service string, constraints constraints.Value) error { - params := params.SetConstraints{ - ServiceName: service, - Constraints: constraints, - } - return c.facade.FacadeCall("SetServiceConstraints", params, nil) -} - -// SetEnvironmentConstraints specifies the constraints for the environment. -func (c *Client) SetEnvironmentConstraints(constraints constraints.Value) error { - params := params.SetConstraints{ - Constraints: constraints, - } - return c.facade.FacadeCall("SetEnvironmentConstraints", params, nil) +// GetModelConstraints returns the constraints for the model. +func (c *Client) GetModelConstraints() (constraints.Value, error) { + results := new(params.GetConstraintsResults) + err := c.facade.FacadeCall("GetModelConstraints", nil, results) + return results.Constraints, err +} + +// SetModelConstraints specifies the constraints for the model. +func (c *Client) SetModelConstraints(constraints constraints.Value) error { + params := params.SetConstraints{ + Constraints: constraints, + } + return c.facade.FacadeCall("SetModelConstraints", params, nil) } // CharmInfo holds information about a charm. @@ -398,68 +177,59 @@ return info, nil } -// EnvironmentInfo holds information about the Juju environment. -type EnvironmentInfo struct { - DefaultSeries string - ProviderType string - Name string - UUID string - ServerUUID string -} - -// EnvironmentInfo returns details about the Juju environment. -func (c *Client) EnvironmentInfo() (*EnvironmentInfo, error) { - info := new(EnvironmentInfo) - err := c.facade.FacadeCall("EnvironmentInfo", nil, info) +// ModelInfo returns details about the Juju model. +func (c *Client) ModelInfo() (params.ModelInfo, error) { + var info params.ModelInfo + err := c.facade.FacadeCall("ModelInfo", nil, &info) return info, err } -// EnvironmentUUID returns the environment UUID from the client connection. -func (c *Client) EnvironmentUUID() string { - tag, err := c.st.EnvironTag() +// ModelUUID returns the model UUID from the client connection. +func (c *Client) ModelUUID() string { + tag, err := c.st.ModelTag() if err != nil { - logger.Warningf("environ tag not an environ: %v", err) + logger.Warningf("model tag not an model: %v", err) return "" } return tag.Id() } -// ShareEnvironment allows the given users access to the environment. -func (c *Client) ShareEnvironment(users ...names.UserTag) error { - var args params.ModifyEnvironUsers +// ShareModel allows the given users access to the model. +func (c *Client) ShareModel(users ...names.UserTag) error { + var args params.ModifyModelUsers for _, user := range users { if &user != nil { - args.Changes = append(args.Changes, params.ModifyEnvironUser{ + args.Changes = append(args.Changes, params.ModifyModelUser{ UserTag: user.String(), - Action: params.AddEnvUser, + Action: params.AddModelUser, }) } } var result params.ErrorResults - err := c.facade.FacadeCall("ShareEnvironment", args, &result) + err := c.facade.FacadeCall("ShareModel", args, &result) if err != nil { return errors.Trace(err) } for i, r := range result.Results { if r.Error != nil && r.Error.Code == params.CodeAlreadyExists { - logger.Warningf("environment is already shared with %s", users[i].Canonical()) + logger.Warningf("model is already shared with %s", users[i].Canonical()) result.Results[i].Error = nil } } return result.Combine() } -// EnvironmentUserInfo returns information on all users in the environment. -func (c *Client) EnvironmentUserInfo() ([]params.EnvUserInfo, error) { - var results params.EnvUserInfoResults - err := c.facade.FacadeCall("EnvUserInfo", nil, &results) +// ModelUserInfo returns information on all users in the model. +func (c *Client) ModelUserInfo() ([]params.ModelUserInfo, error) { + var results params.ModelUserInfoResults + err := c.facade.FacadeCall("ModelUserInfo", nil, &results) if err != nil { return nil, errors.Trace(err) } - info := []params.EnvUserInfo{} + info := []params.ModelUserInfo{} for i, result := range results.Results { if result.Result == nil { return nil, errors.Errorf("unexpected nil result at position %d", i) @@ -469,34 +239,34 @@ return info, nil } -// UnshareEnvironment removes access to the environment for the given users. -func (c *Client) UnshareEnvironment(users ...names.UserTag) error { - var args params.ModifyEnvironUsers +// UnshareModel removes access to the model for the given users. +func (c *Client) UnshareModel(users ...names.UserTag) error { + var args params.ModifyModelUsers for _, user := range users { if &user != nil { - args.Changes = append(args.Changes, params.ModifyEnvironUser{ + args.Changes = append(args.Changes, params.ModifyModelUser{ UserTag: user.String(), - Action: params.RemoveEnvUser, + Action: params.RemoveModelUser, }) } } var result params.ErrorResults - err := c.facade.FacadeCall("ShareEnvironment", args, &result) + err := c.facade.FacadeCall("ShareModel", args, &result) if err != nil { return errors.Trace(err) } for i, r := range result.Results { if r.Error != nil && r.Error.Code == params.CodeNotFound { - logger.Warningf("environment was not previously shared with user %s", users[i].Canonical()) + logger.Warningf("model was not previously shared with user %s", users[i].Canonical()) result.Results[i].Error = nil } } return result.Combine() } -// WatchAll holds the id of the newly-created AllWatcher/AllEnvWatcher. +// WatchAll holds the id of the newly-created AllWatcher/AllModelWatcher. type WatchAll struct { AllWatcherId string } @@ -511,26 +281,6 @@ return NewAllWatcher(c.st, &info.AllWatcherId), nil } -// GetAnnotations returns annotations that have been set on the given entity. -// This API is now deprecated - "Annotations" client should be used instead. -// TODO(anastasiamac) remove for Juju 2.x -func (c *Client) GetAnnotations(tag string) (map[string]string, error) { - args := params.GetAnnotations{tag} - ann := new(params.GetAnnotationsResults) - err := c.facade.FacadeCall("GetAnnotations", args, ann) - return ann.Annotations, err -} - -// SetAnnotations sets the annotation pairs on the given entity. -// Currently annotations are supported on machines, services, -// units and the environment itself. -// This API is now deprecated - "Annotations" client should be used instead. -// TODO(anastasiamac) remove for Juju 2.x -func (c *Client) SetAnnotations(tag string, pairs map[string]string) error { - args := params.SetAnnotations{tag, pairs} - return c.facade.FacadeCall("SetAnnotations", args, nil) -} - // Close closes the Client's underlying State connection // Client is unique among the api.State facades in closing its own State // connection, but it is conventional to use a Client object without any access @@ -539,30 +289,30 @@ return c.st.Close() } -// EnvironmentGet returns all environment settings. -func (c *Client) EnvironmentGet() (map[string]interface{}, error) { - result := params.EnvironmentConfigResults{} - err := c.facade.FacadeCall("EnvironmentGet", nil, &result) +// ModelGet returns all model settings. +func (c *Client) ModelGet() (map[string]interface{}, error) { + result := params.ModelConfigResults{} + err := c.facade.FacadeCall("ModelGet", nil, &result) return result.Config, err } -// EnvironmentSet sets the given key-value pairs in the environment. -func (c *Client) EnvironmentSet(config map[string]interface{}) error { - args := params.EnvironmentSet{Config: config} - return c.facade.FacadeCall("EnvironmentSet", args, nil) -} - -// EnvironmentUnset sets the given key-value pairs in the environment. -func (c *Client) EnvironmentUnset(keys ...string) error { - args := params.EnvironmentUnset{Keys: keys} - return c.facade.FacadeCall("EnvironmentUnset", args, nil) -} - -// SetEnvironAgentVersion sets the environment agent-version setting +// ModelSet sets the given key-value pairs in the model. +func (c *Client) ModelSet(config map[string]interface{}) error { + args := params.ModelSet{Config: config} + return c.facade.FacadeCall("ModelSet", args, nil) +} + +// ModelUnset sets the given key-value pairs in the model. +func (c *Client) ModelUnset(keys ...string) error { + args := params.ModelUnset{Keys: keys} + return c.facade.FacadeCall("ModelUnset", args, nil) +} + +// SetModelAgentVersion sets the model agent-version setting // to the given value. -func (c *Client) SetEnvironAgentVersion(version version.Number) error { - args := params.SetEnvironAgentVersion{Version: version} - return c.facade.FacadeCall("SetEnvironAgentVersion", args, nil) +func (c *Client) SetModelAgentVersion(version version.Number) error { + args := params.SetModelAgentVersion{Version: version} + return c.facade.FacadeCall("SetModelAgentVersion", args, nil) } // AbortCurrentUpgrade aborts and archives the current upgrade @@ -572,10 +322,7 @@ } // FindTools returns a List containing all tools matching the specified parameters. -func (c *Client) FindTools( - majorVersion, minorVersion int, - series, arch string, -) (result params.FindToolsResult, err error) { +func (c *Client) FindTools(majorVersion, minorVersion int, series, arch string) (result params.FindToolsResult, err error) { args := params.FindToolsParams{ MajorVersion: majorVersion, MinorVersion: minorVersion, @@ -603,22 +350,25 @@ return results.Results, err } -// DestroyEnvironment puts the environment into a "dying" state, -// and removes all non-manager machine instances. DestroyEnvironment +// DestroyModel puts the model into a "dying" state, +// and removes all non-manager machine instances. DestroyModel // will fail if there are any manually-provisioned non-manager machines // in state. -func (c *Client) DestroyEnvironment() error { - return c.facade.FacadeCall("DestroyEnvironment", nil, nil) +func (c *Client) DestroyModel() error { + return c.facade.FacadeCall("DestroyModel", nil, nil) } // AddLocalCharm prepares the given charm with a local: schema in its // URL, and uploads it via the API server, returning the assigned -// charm URL. If the API server does not support charm uploads, an -// error satisfying params.IsCodeNotImplemented() is returned. +// charm URL. func (c *Client) AddLocalCharm(curl *charm.URL, ch charm.Charm) (*charm.URL, error) { if curl.Schema != "local" { return nil, errors.Errorf("expected charm URL with local: schema, got %q", curl.String()) } + httpClient, err := c.st.HTTPClient() + if err != nil { + return nil, errors.Trace(err) + } // Package the charm for uploading. var archive *os.File switch ch := ch.(type) { @@ -645,106 +395,25 @@ return nil, errors.Errorf("unknown charm type %T", ch) } - endPoint, err := c.apiEndpoint("charms", "series="+curl.Series) - if err != nil { - return nil, errors.Trace(err) - } - - // wrap archive in a noopCloser to prevent the underlying transport closing - // the request body. This is neccessary to prevent a data race on the underlying - // *os.File as the http transport _may_ issue Close once the body is sent, or it - // may not if there is an error. - noop := &noopCloser{archive} - req, err := http.NewRequest("POST", endPoint, noop) + req, err := http.NewRequest("POST", "/charms?series="+curl.Series, nil) if err != nil { return nil, errors.Annotate(err, "cannot create upload request") } - req.SetBasicAuth(c.st.tag, c.st.password) req.Header.Set("Content-Type", "application/zip") - // Send the request. - - // BUG(dimitern) 2013-12-17 bug #1261780 - // Due to issues with go 1.1.2, fixed later, we cannot use a - // regular TLS client with the CACert here, because we get "x509: - // cannot validate certificate for 127.0.0.1 because it doesn't - // contain any IP SANs". Once we use a later go version, this - // should be changed to connect to the API server with a regular - // HTTP+TLS enabled client, using the CACert (possily cached, like - // the tag and password) passed in api.Open()'s info argument. - resp, err := utils.GetNonValidatingHTTPClient().Do(req) - if err != nil { - return nil, errors.Annotate(err, "cannot upload charm") - } - defer resp.Body.Close() - - // Now parse the response & return. - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, errors.Annotate(err, "cannot read charm upload response") - } - if resp.StatusCode != http.StatusOK { - return nil, errors.Errorf("charm upload failed: %v (%s)", resp.StatusCode, bytes.TrimSpace(body)) - } - - var jsonResponse params.CharmsResponse - if err := json.Unmarshal(body, &jsonResponse); err != nil { - return nil, errors.Annotate(err, "cannot unmarshal upload response") - } - if jsonResponse.Error != "" { - return nil, errors.Errorf("error uploading charm: %v", jsonResponse.Error) - } - return charm.MustParseURL(jsonResponse.CharmURL), nil -} - -// noopCloser implements io.ReadCloser, but does not close the underlying io.ReadCloser. -// This is necessary to ensure the ownership of io.ReadCloser implementations that are -// passed to the net/http Transport which may (under some circumstances), call Close on -// the body passed to a request. -type noopCloser struct { - io.ReadCloser -} - -func (n *noopCloser) Close() error { - - // do not propogate the Close method to the underlying ReadCloser. - return nil -} - -func (c *Client) apiEndpoint(destination, query string) (string, error) { - root, err := c.apiRoot() - if err != nil { - return "", errors.Trace(err) - } - - upURL := url.URL{ - Scheme: c.st.serverScheme, - Host: c.st.Addr(), - Path: path.Join(root, destination), - RawQuery: query, - } - return upURL.String(), nil -} - -func (c *Client) apiRoot() (string, error) { - var apiRoot string - if _, err := c.st.ServerTag(); err == nil { - envTag, err := c.st.EnvironTag() - if err != nil { - return "", errors.Annotate(err, "cannot get API endpoint address") - } - - apiRoot = fmt.Sprintf("/environment/%s/", envTag.Id()) - } else { - // If the server tag is not set, then the agent version is < 1.23. We - // use the old API endpoint for backwards compatibility. - apiRoot = "/" - } - return apiRoot, nil + var resp params.CharmsResponse + if err := httpClient.Do(req, archive, &resp); err != nil { + return nil, errors.Trace(err) + } + curl, err = charm.ParseURL(resp.CharmURL) + if err != nil { + return nil, errors.Annotatef(err, "bad charm URL in response") + } + return curl, nil } // AddCharm adds the given charm URL (which must include revision) to -// the environment, if it does not exist yet. Local charms are not +// the model, if it does not exist yet. Local charms are not // supported, only charm store URLs. See also AddLocalCharm() in the // client-side API. // @@ -777,8 +446,8 @@ // ResolveCharm resolves the best available charm URLs with series, for charm // locations without a series specified. -func (c *Client) ResolveCharm(ref *charm.Reference) (*charm.URL, error) { - args := params.ResolveCharms{References: []charm.Reference{*ref}} +func (c *Client) ResolveCharm(ref *charm.URL) (*charm.URL, error) { + args := params.ResolveCharms{References: []charm.URL{*ref}} result := new(params.ResolveCharmResults) if err := c.facade.FacadeCall("ResolveCharms", args, result); err != nil { return nil, err @@ -794,63 +463,35 @@ } // UploadTools uploads tools at the specified location to the API server over HTTPS. -func (c *Client) UploadTools(r io.Reader, vers version.Binary, additionalSeries ...string) (*tools.Tools, error) { - // Prepare the upload request. - query := fmt.Sprintf("binaryVersion=%s&series=%s", - vers, - strings.Join(additionalSeries, ","), - ) - - endPoint, err := c.apiEndpoint("tools", query) - if err != nil { - return nil, errors.Trace(err) - } - - req, err := http.NewRequest("POST", endPoint, r) +func (c *Client) UploadTools(r io.ReadSeeker, vers version.Binary, additionalSeries ...string) (*tools.Tools, error) { + endpoint := fmt.Sprintf("/tools?binaryVersion=%s&series=%s", vers, strings.Join(additionalSeries, ",")) + + req, err := http.NewRequest("POST", endpoint, nil) if err != nil { return nil, errors.Annotate(err, "cannot create upload request") } - req.SetBasicAuth(c.st.tag, c.st.password) req.Header.Set("Content-Type", "application/x-tar-gz") - // Send the request. - - // BUG(dimitern) 2013-12-17 bug #1261780 - // Due to issues with go 1.1.2, fixed later, we cannot use a - // regular TLS client with the CACert here, because we get "x509: - // cannot validate certificate for 127.0.0.1 because it doesn't - // contain any IP SANs". Once we use a later go version, this - // should be changed to connect to the API server with a regular - // HTTP+TLS enabled client, using the CACert (possily cached, like - // the tag and password) passed in api.Open()'s info argument. - resp, err := utils.GetNonValidatingHTTPClient().Do(req) - if err != nil { - return nil, errors.Annotate(err, "cannot upload tools") - } - defer resp.Body.Close() - - // Now parse the response & return. - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, errors.Annotate(err, "cannot read tools upload response") - } - if resp.StatusCode != http.StatusOK { - message := fmt.Sprintf("%s", bytes.TrimSpace(body)) - if resp.StatusCode == http.StatusBadRequest && strings.Contains(message, params.CodeOperationBlocked) { - // Operation Blocked errors must contain correct error code and message. - return nil, ¶ms.Error{Code: params.CodeOperationBlocked, Message: message} + httpClient, err := c.st.HTTPClient() + if err != nil { + return nil, errors.Trace(err) + } + var resp params.ToolsResult + err = httpClient.Do(req, r, &resp) + if err != nil { + msg := err.Error() + if params.ErrCode(err) == "" && strings.Contains(msg, params.CodeOperationBlocked) { + // We're probably talking to an old version of the API server + // that doesn't provide error codes. + // See https://bugs.launchpad.net/juju-core/+bug/1499277 + err = ¶ms.Error{ + Code: params.CodeOperationBlocked, + Message: msg, + } } - return nil, errors.Errorf("tools upload failed: %v (%s)", resp.StatusCode, message) - } - - var jsonResponse params.ToolsResult - if err := json.Unmarshal(body, &jsonResponse); err != nil { - return nil, errors.Annotate(err, "cannot unmarshal upload response") - } - if err := jsonResponse.Error; err != nil { - return nil, errors.Annotate(err, "error uploading tools") - } - return jsonResponse.Tools, nil + return nil, errors.Trace(err) + } + return resp.Tools, nil } // APIHostPorts returns a slice of network.HostPort for each API server. @@ -862,36 +503,6 @@ return result.NetworkHostsPorts(), nil } -// EnsureAvailability ensures the availability of Juju state servers. -// DEPRECATED: remove when we stop supporting 1.20 and earlier servers. -// This API is now on the HighAvailability facade. -func (c *Client) EnsureAvailability(numStateServers int, cons constraints.Value, series string) (params.StateServersChanges, error) { - var results params.StateServersChangeResults - envTag, err := c.st.EnvironTag() - if err != nil { - return params.StateServersChanges{}, errors.Trace(err) - } - arg := params.StateServersSpecs{ - Specs: []params.StateServersSpec{{ - EnvironTag: envTag.String(), - NumStateServers: numStateServers, - Constraints: cons, - Series: series, - }}} - err = c.facade.FacadeCall("EnsureAvailability", arg, &results) - if err != nil { - return params.StateServersChanges{}, err - } - if len(results.Results) != 1 { - return params.StateServersChanges{}, errors.Errorf("expected 1 result, got %d", len(results.Results)) - } - result := results.Results[0] - if result.Error != nil { - return params.StateServersChanges{}, result.Error - } - return result.Result, nil -} - // AgentVersion reports the version number of the api server. func (c *Client) AgentVersion() (version.Number, error) { var result params.AgentVersionResult @@ -903,8 +514,24 @@ // websocketDialConfig is called instead of websocket.DialConfig so we can // override it in tests. -var websocketDialConfig = func(config *websocket.Config) (io.ReadCloser, error) { - return websocket.DialConfig(config) +var websocketDialConfig = func(config *websocket.Config) (base.Stream, error) { + c, err := websocket.DialConfig(config) + if err != nil { + return nil, errors.Trace(err) + } + return websocketStream{c}, nil +} + +type websocketStream struct { + *websocket.Conn +} + +func (c websocketStream) ReadJSON(v interface{}) error { + return websocket.JSON.Receive(c.Conn, v) +} + +func (c websocketStream) WriteJSON(v interface{}) error { + return websocket.JSON.Send(c.Conn, v) } // DebugLogParams holds parameters for WatchDebugLog that control the @@ -939,6 +566,9 @@ // Replay tells the server to start at the start of the log file rather // than the end. If replay is true, backlog is ignored. Replay bool + // NoTail tells the server to only return the logs it has now, and not + // to wait for new logs to arrive. + NoTail bool } // WatchDebugLog returns a ReadCloser that the caller can read the log @@ -957,11 +587,19 @@ if err != nil { return nil, errors.NotSupportedf("WatchDebugLog") } - // Prepare URL. - attrs := url.Values{} + // Prepare URL query attributes. + attrs := url.Values{ + "includeEntity": args.IncludeEntity, + "includeModule": args.IncludeModule, + "excludeEntity": args.ExcludeEntity, + "excludeModule": args.ExcludeModule, + } if args.Replay { attrs.Set("replay", fmt.Sprint(args.Replay)) } + if args.NoTail { + attrs.Set("noTail", fmt.Sprint(args.NoTail)) + } if args.Limit > 0 { attrs.Set("maxLines", fmt.Sprint(args.Limit)) } @@ -971,54 +609,10 @@ if args.Level != loggo.UNSPECIFIED { attrs.Set("level", fmt.Sprint(args.Level)) } - attrs["includeEntity"] = args.IncludeEntity - attrs["includeModule"] = args.IncludeModule - attrs["excludeEntity"] = args.ExcludeEntity - attrs["excludeModule"] = args.ExcludeModule - - path := "/log" - if _, ok := c.st.ServerVersion(); ok { - // If the server version is set, then we know the server is capable of - // serving debug log at the environment path. We also fully expect - // that the server has returned a valid environment tag. - envTag, err := c.st.EnvironTag() - if err != nil { - return nil, errors.Annotate(err, "very unexpected") - } - path = fmt.Sprintf("/environment/%s/log", envTag.Id()) - } - - target := url.URL{ - Scheme: "wss", - Host: c.st.addr, - Path: path, - RawQuery: attrs.Encode(), - } - cfg, err := websocket.NewConfig(target.String(), "http://localhost/") - cfg.Header = utils.BasicAuthHeader(c.st.tag, c.st.password) - cfg.TlsConfig = &tls.Config{RootCAs: c.st.certPool, ServerName: "juju-apiserver"} - connection, err := websocketDialConfig(cfg) - if err != nil { - return nil, err - } - // Read the initial error and translate to a real error. - // Read up to the first new line character. We can't use bufio here as it - // reads too much from the reader. - line := make([]byte, 4096) - n, err := connection.Read(line) - if err != nil { - return nil, errors.Annotate(err, "unable to read initial response") - } - line = line[0:n] - - logger.Debugf("initial line: %q", line) - var errResult params.ErrorResult - err = json.Unmarshal(line, &errResult) - if err != nil { - return nil, errors.Annotate(err, "unable to unmarshal initial response") - } - if errResult.Error != nil { - return nil, errResult.Error + + connection, err := c.st.ConnectStream("/log", attrs) + if err != nil { + return nil, errors.Trace(err) } return connection, nil } === added file 'src/github.com/juju/juju/api/client_macaroon_test.go' --- src/github.com/juju/juju/api/client_macaroon_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/client_macaroon_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,78 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package api_test + +import ( + "fmt" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/api" + apitesting "github.com/juju/juju/api/testing" + "github.com/juju/juju/testcharms" +) + +var _ = gc.Suite(&clientMacaroonSuite{}) + +// clientMacaroonSuite tests that Client endpoints that are +// independent of the RPC-based API work with +// macaroon authentication. +type clientMacaroonSuite struct { + apitesting.MacaroonSuite + client *api.Client + cookieJar *apitesting.ClearableCookieJar +} + +func (s *clientMacaroonSuite) SetUpTest(c *gc.C) { + s.MacaroonSuite.SetUpTest(c) + s.AddModelUser(c, "testuser@somewhere") + s.cookieJar = apitesting.NewClearableCookieJar() + s.DischargerLogin = func() string { return "testuser@somewhere" } + s.client = s.OpenAPI(c, nil, s.cookieJar).Client() + + // Even though we've logged into the API, we want + // the tests below to exercise the discharging logic + // so we clear the cookies. + s.cookieJar.Clear() +} + +func (s *clientMacaroonSuite) TearDownTest(c *gc.C) { + s.client.Close() + s.MacaroonSuite.TearDownTest(c) +} + +func (s *clientMacaroonSuite) TestAddLocalCharmWithFailedDischarge(c *gc.C) { + s.DischargerLogin = func() string { return "" } + charmArchive := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") + curl := charm.MustParseURL( + fmt.Sprintf("local:quantal/%s-%d", charmArchive.Meta().Name, charmArchive.Revision()), + ) + savedURL, err := s.client.AddLocalCharm(curl, charmArchive) + c.Assert(err, gc.ErrorMatches, `POST https://.*/model/deadbeef-0bad-400d-8000-4b1d0d06f00d/charms\?series=quantal: cannot get discharge from "https://.*": third party refused discharge: cannot discharge: login denied by discharger`) + c.Assert(savedURL, gc.IsNil) +} + +func (s *clientMacaroonSuite) TestAddLocalCharmSuccess(c *gc.C) { + charmArchive := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") + curl := charm.MustParseURL( + fmt.Sprintf("local:quantal/%s-%d", charmArchive.Meta().Name, charmArchive.Revision()), + ) + // Upload an archive with its original revision. + savedURL, err := s.client.AddLocalCharm(curl, charmArchive) + c.Assert(err, jc.ErrorIsNil) + c.Assert(savedURL.String(), gc.Equals, curl.String()) +} + +func (s *clientMacaroonSuite) TestAddLocalCharmUnauthorized(c *gc.C) { + s.DischargerLogin = func() string { return "baduser" } + charmArchive := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") + curl := charm.MustParseURL( + fmt.Sprintf("local:quantal/%s-%d", charmArchive.Meta().Name, charmArchive.Revision()), + ) + // Upload an archive with its original revision. + _, err := s.client.AddLocalCharm(curl, charmArchive) + c.Assert(err, gc.ErrorMatches, `POST https://.*/model/deadbeef-0bad-400d-8000-4b1d0d06f00d/charms\?series=quantal: invalid entity name or password`) +} === modified file 'src/github.com/juju/juju/api/client_test.go' --- src/github.com/juju/juju/api/client_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/api/client_test.go 2016-03-22 15:18:22 +0000 @@ -6,7 +6,6 @@ import ( "bufio" "bytes" - "encoding/json" "fmt" "io" "io/ioutil" @@ -17,17 +16,20 @@ "strings" "github.com/juju/errors" + "github.com/juju/httprequest" "github.com/juju/loggo" "github.com/juju/names" jc "github.com/juju/testing/checkers" "golang.org/x/net/websocket" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api" + "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" jujunames "github.com/juju/juju/juju/names" jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/rpc" "github.com/juju/juju/state" "github.com/juju/juju/testcharms" coretesting "github.com/juju/juju/testing" @@ -138,9 +140,9 @@ } func (s *clientSuite) otherEnviron(c *gc.C) (*state.State, api.Connection) { - otherSt := s.Factory.MakeEnvironment(c, nil) + otherSt := s.Factory.MakeModel(c, nil) info := s.APIInfo(c) - info.EnvironTag = otherSt.EnvironTag() + info.ModelTag = otherSt.ModelTag() apiState, err := api.Open(info, api.DefaultDialOpts()) c.Assert(err, jc.ErrorIsNil) return otherSt, apiState @@ -153,7 +155,10 @@ // facade call, we set up a fake endpoint to test. defer fakeAPIEndpoint(c, client, envEndpoint(c, s.APIState, "charms"), "POST", func(w http.ResponseWriter, r *http.Request) { - http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + httprequest.WriteJSON(w, http.StatusMethodNotAllowed, ¶ms.CharmsResponse{ + Error: "the POST method is not allowed", + ErrorCode: params.CodeMethodNotAllowed, + }) }, ).Close() @@ -163,7 +168,7 @@ ) _, err := client.AddLocalCharm(curl, charmArchive) - c.Assert(err, gc.ErrorMatches, "charm upload failed: 405 \\(Method Not Allowed\\)") + c.Assert(err, gc.ErrorMatches, `POST http://.*/model/deadbeef-0bad-400d-8000-4b1d0d06f00d/charms\?series=quantal: the POST method is not allowed`) } func fakeAPIEndpoint(c *gc.C, client *api.Client, address, method string, handle func(http.ResponseWriter, *http.Request)) net.Listener { @@ -182,19 +187,19 @@ return lis } -// envEndpoint returns "/environment//" +// envEndpoint returns "/model//" func envEndpoint(c *gc.C, apiState api.Connection, destination string) string { - envTag, err := apiState.EnvironTag() + modelTag, err := apiState.ModelTag() c.Assert(err, jc.ErrorIsNil) - return path.Join("/environment", envTag.Id(), destination) + return path.Join("/model", modelTag.Id(), destination) } func (s *clientSuite) TestClientEnvironmentUUID(c *gc.C) { - environ, err := s.State.Environment() + environ, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) client := s.APIState.Client() - c.Assert(client.EnvironmentUUID(), gc.Equals, environ.Tag().Id()) + c.Assert(client.ModelUUID(), gc.Equals, environ.Tag().Id()) } func (s *clientSuite) TestClientEnvironmentUsers(c *gc.C) { @@ -202,11 +207,11 @@ cleanup := api.PatchClientFacadeCall(client, func(request string, paramsIn interface{}, response interface{}) error { c.Assert(paramsIn, gc.IsNil) - if response, ok := response.(*params.EnvUserInfoResults); ok { - response.Results = []params.EnvUserInfoResult{ - {Result: ¶ms.EnvUserInfo{UserName: "one"}}, - {Result: ¶ms.EnvUserInfo{UserName: "two"}}, - {Result: ¶ms.EnvUserInfo{UserName: "three"}}, + if response, ok := response.(*params.ModelUserInfoResults); ok { + response.Results = []params.ModelUserInfoResult{ + {Result: ¶ms.ModelUserInfo{UserName: "one"}}, + {Result: ¶ms.ModelUserInfo{UserName: "two"}}, + {Result: ¶ms.ModelUserInfo{UserName: "three"}}, } } else { c.Log("wrong output structure") @@ -217,10 +222,10 @@ ) defer cleanup() - obtained, err := client.EnvironmentUserInfo() + obtained, err := client.ModelUserInfo() c.Assert(err, jc.ErrorIsNil) - c.Assert(obtained, jc.DeepEquals, []params.EnvUserInfo{ + c.Assert(obtained, jc.DeepEquals, []params.ModelUserInfo{ {UserName: "one"}, {UserName: "two"}, {UserName: "three"}, @@ -229,12 +234,12 @@ func (s *clientSuite) TestShareEnvironmentExistingUser(c *gc.C) { client := s.APIState.Client() - user := s.Factory.MakeEnvUser(c, nil) + user := s.Factory.MakeModelUser(c, nil) cleanup := api.PatchClientFacadeCall(client, func(request string, paramsIn interface{}, response interface{}) error { - if users, ok := paramsIn.(params.ModifyEnvironUsers); ok { + if users, ok := paramsIn.(params.ModifyModelUsers); ok { c.Assert(users.Changes, gc.HasLen, 1) - c.Logf(string(users.Changes[0].Action), gc.Equals, string(params.AddEnvUser)) + c.Logf(string(users.Changes[0].Action), gc.Equals, string(params.AddModelUser)) c.Logf(users.Changes[0].UserTag, gc.Equals, user.UserTag().String()) } else { c.Fatalf("wrong input structure") @@ -253,9 +258,9 @@ ) defer cleanup() - err := client.ShareEnvironment(user.UserTag()) + err := client.ShareModel(user.UserTag()) c.Assert(err, jc.ErrorIsNil) - logMsg := fmt.Sprintf("WARNING juju.api environment is already shared with %s", user.UserName()) + logMsg := fmt.Sprintf("WARNING juju.api model is already shared with %s", user.UserName()) c.Assert(c.GetTestLog(), jc.Contains, logMsg) } @@ -264,31 +269,31 @@ var called bool cleanup := api.PatchClientFacadeCall(client, func(req string, args interface{}, resp interface{}) error { - c.Assert(req, gc.Equals, "DestroyEnvironment") + c.Assert(req, gc.Equals, "DestroyModel") called = true return nil }) defer cleanup() - err := client.DestroyEnvironment() + err := client.DestroyModel() c.Assert(err, jc.ErrorIsNil) c.Assert(called, jc.IsTrue) } func (s *clientSuite) TestShareEnvironmentThreeUsers(c *gc.C) { client := s.APIState.Client() - existingUser := s.Factory.MakeEnvUser(c, nil) + existingUser := s.Factory.MakeModelUser(c, nil) localUser := s.Factory.MakeUser(c, nil) newUserTag := names.NewUserTag("foo@bar") cleanup := api.PatchClientFacadeCall(client, func(request string, paramsIn interface{}, response interface{}) error { - if users, ok := paramsIn.(params.ModifyEnvironUsers); ok { + if users, ok := paramsIn.(params.ModifyModelUsers); ok { c.Assert(users.Changes, gc.HasLen, 3) - c.Assert(string(users.Changes[0].Action), gc.Equals, string(params.AddEnvUser)) + c.Assert(string(users.Changes[0].Action), gc.Equals, string(params.AddModelUser)) c.Assert(users.Changes[0].UserTag, gc.Equals, existingUser.UserTag().String()) - c.Assert(string(users.Changes[1].Action), gc.Equals, string(params.AddEnvUser)) + c.Assert(string(users.Changes[1].Action), gc.Equals, string(params.AddModelUser)) c.Assert(users.Changes[1].UserTag, gc.Equals, localUser.UserTag().String()) - c.Assert(string(users.Changes[2].Action), gc.Equals, string(params.AddEnvUser)) + c.Assert(string(users.Changes[2].Action), gc.Equals, string(params.AddModelUser)) c.Assert(users.Changes[2].UserTag, gc.Equals, newUserTag.String()) } else { c.Log("wrong input structure") @@ -306,24 +311,24 @@ ) defer cleanup() - err := client.ShareEnvironment(existingUser.UserTag(), localUser.UserTag(), newUserTag) + err := client.ShareModel(existingUser.UserTag(), localUser.UserTag(), newUserTag) c.Assert(err, gc.ErrorMatches, `existing user`) } func (s *clientSuite) TestUnshareEnvironmentThreeUsers(c *gc.C) { client := s.APIState.Client() - missingUser := s.Factory.MakeEnvUser(c, nil) + missingUser := s.Factory.MakeModelUser(c, nil) localUser := s.Factory.MakeUser(c, nil) newUserTag := names.NewUserTag("foo@bar") cleanup := api.PatchClientFacadeCall(client, func(request string, paramsIn interface{}, response interface{}) error { - if users, ok := paramsIn.(params.ModifyEnvironUsers); ok { + if users, ok := paramsIn.(params.ModifyModelUsers); ok { c.Assert(users.Changes, gc.HasLen, 3) - c.Assert(string(users.Changes[0].Action), gc.Equals, string(params.RemoveEnvUser)) + c.Assert(string(users.Changes[0].Action), gc.Equals, string(params.RemoveModelUser)) c.Assert(users.Changes[0].UserTag, gc.Equals, missingUser.UserTag().String()) - c.Assert(string(users.Changes[1].Action), gc.Equals, string(params.RemoveEnvUser)) + c.Assert(string(users.Changes[1].Action), gc.Equals, string(params.RemoveModelUser)) c.Assert(users.Changes[1].UserTag, gc.Equals, localUser.UserTag().String()) - c.Assert(string(users.Changes[2].Action), gc.Equals, string(params.RemoveEnvUser)) + c.Assert(string(users.Changes[2].Action), gc.Equals, string(params.RemoveModelUser)) c.Assert(users.Changes[2].UserTag, gc.Equals, newUserTag.String()) } else { c.Log("wrong input structure") @@ -341,7 +346,7 @@ ) defer cleanup() - err := client.UnshareEnvironment(missingUser.UserTag(), localUser.UserTag(), newUserTag) + err := client.UnshareModel(missingUser.UserTag(), localUser.UserTag(), newUserTag) c.Assert(err, gc.ErrorMatches, "error unsharing user") } @@ -350,9 +355,9 @@ user := names.NewUserTag("bob@local") cleanup := api.PatchClientFacadeCall(client, func(request string, paramsIn interface{}, response interface{}) error { - if users, ok := paramsIn.(params.ModifyEnvironUsers); ok { + if users, ok := paramsIn.(params.ModifyModelUsers); ok { c.Assert(users.Changes, gc.HasLen, 1) - c.Logf(string(users.Changes[0].Action), gc.Equals, string(params.RemoveEnvUser)) + c.Logf(string(users.Changes[0].Action), gc.Equals, string(params.RemoveModelUser)) c.Logf(users.Changes[0].UserTag, gc.Equals, user.String()) } else { c.Fatalf("wrong input structure") @@ -371,64 +376,67 @@ ) defer cleanup() - err := client.UnshareEnvironment(user) + err := client.UnshareModel(user) c.Assert(err, jc.ErrorIsNil) - logMsg := fmt.Sprintf("WARNING juju.api environment was not previously shared with user %s", user.Canonical()) + logMsg := fmt.Sprintf("WARNING juju.api model was not previously shared with user %s", user.Canonical()) c.Assert(c.GetTestLog(), jc.Contains, logMsg) } func (s *clientSuite) TestWatchDebugLogConnected(c *gc.C) { - // Shows both the unmarshalling of a real error, and - // that the api server is connected. client := s.APIState.Client() - reader, err := client.WatchDebugLog(api.DebugLogParams{}) - c.Assert(err, gc.ErrorMatches, "cannot open log file: .*") - c.Assert(reader, gc.IsNil) -} - -func (s *clientSuite) TestConnectionErrorBadConnection(c *gc.C) { - s.PatchValue(api.WebsocketDialConfig, func(_ *websocket.Config) (io.ReadCloser, error) { + // Use the no tail option so we don't try to start a tailing cursor + // on the oplog when there is no oplog configured in mongo as the tests + // don't set up mongo in replicaset mode. + reader, err := client.WatchDebugLog(api.DebugLogParams{NoTail: true}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(reader, gc.NotNil) + reader.Close() +} + +func (s *clientSuite) TestConnectStreamRequiresSlashPathPrefix(c *gc.C) { + reader, err := s.APIState.ConnectStream("foo", nil) + c.Assert(err, gc.ErrorMatches, `path must start with "/"`) + c.Assert(reader, gc.Equals, nil) +} + +func (s *clientSuite) TestConnectStreamErrorBadConnection(c *gc.C) { + s.PatchValue(api.WebsocketDialConfig, func(_ *websocket.Config) (base.Stream, error) { return nil, fmt.Errorf("bad connection") }) - client := s.APIState.Client() - reader, err := client.WatchDebugLog(api.DebugLogParams{}) + reader, err := s.APIState.ConnectStream("/", nil) c.Assert(err, gc.ErrorMatches, "bad connection") c.Assert(reader, gc.IsNil) } -func (s *clientSuite) TestConnectionErrorNoData(c *gc.C) { - s.PatchValue(api.WebsocketDialConfig, func(_ *websocket.Config) (io.ReadCloser, error) { - return ioutil.NopCloser(&bytes.Buffer{}), nil +func (s *clientSuite) TestConnectStreamErrorNoData(c *gc.C) { + s.PatchValue(api.WebsocketDialConfig, func(_ *websocket.Config) (base.Stream, error) { + return fakeStreamReader{&bytes.Buffer{}}, nil }) - client := s.APIState.Client() - reader, err := client.WatchDebugLog(api.DebugLogParams{}) + reader, err := s.APIState.ConnectStream("/", nil) c.Assert(err, gc.ErrorMatches, "unable to read initial response: EOF") c.Assert(reader, gc.IsNil) } -func (s *clientSuite) TestConnectionErrorBadData(c *gc.C) { - s.PatchValue(api.WebsocketDialConfig, func(_ *websocket.Config) (io.ReadCloser, error) { - junk := strings.NewReader("junk\n") - return ioutil.NopCloser(junk), nil +func (s *clientSuite) TestConnectStreamErrorBadData(c *gc.C) { + s.PatchValue(api.WebsocketDialConfig, func(_ *websocket.Config) (base.Stream, error) { + return fakeStreamReader{strings.NewReader("junk\n")}, nil }) - client := s.APIState.Client() - reader, err := client.WatchDebugLog(api.DebugLogParams{}) + reader, err := s.APIState.ConnectStream("/", nil) c.Assert(err, gc.ErrorMatches, "unable to unmarshal initial response: .*") c.Assert(reader, gc.IsNil) } -func (s *clientSuite) TestConnectionErrorReadError(c *gc.C) { - s.PatchValue(api.WebsocketDialConfig, func(_ *websocket.Config) (io.ReadCloser, error) { +func (s *clientSuite) TestConnectStreamErrorReadError(c *gc.C) { + s.PatchValue(api.WebsocketDialConfig, func(_ *websocket.Config) (base.Stream, error) { err := fmt.Errorf("bad read") - return ioutil.NopCloser(&badReader{err}), nil + return fakeStreamReader{&badReader{err}}, nil }) - client := s.APIState.Client() - reader, err := client.WatchDebugLog(api.DebugLogParams{}) + reader, err := s.APIState.ConnectStream("/", nil) c.Assert(err, gc.ErrorMatches, "unable to read initial response: bad read") c.Assert(reader, gc.IsNil) } -func (s *clientSuite) TestParamsEncoded(c *gc.C) { +func (s *clientSuite) TestWatchDebugLogParamsEncoded(c *gc.C) { s.PatchValue(api.WebsocketDialConfig, echoURL(c)) params := api.DebugLogParams{ @@ -440,6 +448,7 @@ Backlog: 200, Level: loggo.ERROR, Replay: true, + NoTail: true, } client := s.APIState.Client() @@ -457,60 +466,44 @@ "backlog": {"200"}, "level": {"ERROR"}, "replay": {"true"}, + "noTail": {"true"}, }) } -func (s *clientSuite) TestDebugLogRootPath(c *gc.C) { - s.PatchValue(api.WebsocketDialConfig, echoURL(c)) - - // If the server is old, we log at "/log" - info := s.APIInfo(c) - info.EnvironTag = names.NewEnvironTag("") - apistate, err := api.OpenWithVersion(info, api.DialOpts{}, 1) - c.Assert(err, jc.ErrorIsNil) - defer apistate.Close() - reader, err := apistate.Client().WatchDebugLog(api.DebugLogParams{}) - c.Assert(err, jc.ErrorIsNil) - connectURL := connectURLFromReader(c, reader) - c.Assert(connectURL.Path, gc.Matches, "/log") -} - -func (s *clientSuite) TestDebugLogAtUUIDLogPath(c *gc.C) { - s.PatchValue(api.WebsocketDialConfig, echoURL(c)) - // If the server supports it, we should log at "/environment/UUID/log" - environ, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - info := s.APIInfo(c) - info.EnvironTag = environ.EnvironTag() +func (s *clientSuite) TestConnectStreamAtUUIDPath(c *gc.C) { + s.PatchValue(api.WebsocketDialConfig, echoURL(c)) + // If the server supports it, we should log at "/model/UUID/log" + environ, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + info := s.APIInfo(c) + info.ModelTag = environ.ModelTag() apistate, err := api.Open(info, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) defer apistate.Close() - reader, err := apistate.Client().WatchDebugLog(api.DebugLogParams{}) + reader, err := apistate.ConnectStream("/path", nil) c.Assert(err, jc.ErrorIsNil) connectURL := connectURLFromReader(c, reader) - c.Assert(connectURL.Path, gc.Matches, fmt.Sprintf("/environment/%s/log", environ.UUID())) + c.Assert(connectURL.Path, gc.Matches, fmt.Sprintf("/model/%s/path", environ.UUID())) } func (s *clientSuite) TestOpenUsesEnvironUUIDPaths(c *gc.C) { info := s.APIInfo(c) - // Backwards compatibility, passing EnvironTag = "" should just work - info.EnvironTag = names.NewEnvironTag("") + + // Passing in the correct model UUID should work + environ, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + info.ModelTag = environ.ModelTag() apistate, err := api.Open(info, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) apistate.Close() - // Passing in the correct environment UUID should also work - environ, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - info.EnvironTag = environ.EnvironTag() - apistate, err = api.Open(info, api.DialOpts{}) - c.Assert(err, jc.ErrorIsNil) - apistate.Close() - - // Passing in a bad environment UUID should fail with a known error - info.EnvironTag = names.NewEnvironTag("dead-beef-123456") - apistate, err = api.Open(info, api.DialOpts{}) - c.Check(err, gc.ErrorMatches, `unknown environment: "dead-beef-123456"`) + // Passing in a bad model UUID should fail with a known error + info.ModelTag = names.NewModelTag("dead-beef-123456") + apistate, err = api.Open(info, api.DialOpts{}) + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: `unknown model: "dead-beef-123456"`, + Code: "not found", + }) c.Check(err, jc.Satisfies, params.IsCodeNotFound) c.Assert(apistate, gc.IsNil) } @@ -518,22 +511,21 @@ func (s *clientSuite) TestSetEnvironAgentVersionDuringUpgrade(c *gc.C) { // This is an integration test which ensure that a test with the // correct error code is seen by the client from the - // SetEnvironAgentVersion call when an upgrade is in progress. - envConfig, err := s.State.EnvironConfig() + // SetModelAgentVersion call when an upgrade is in progress. + envConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) agentVersion, ok := envConfig.AgentVersion() c.Assert(ok, jc.IsTrue) machine := s.Factory.MakeMachine(c, &factory.MachineParams{ - Jobs: []state.MachineJob{state.JobManageEnviron}, + Jobs: []state.MachineJob{state.JobManageModel}, }) err = machine.SetAgentVersion(version.MustParseBinary(agentVersion.String() + "-quantal-amd64")) c.Assert(err, jc.ErrorIsNil) - nextVersion := version.Current.Number - nextVersion.Minor++ + nextVersion := version.MustParse("9.8.7") _, err = s.State.EnsureUpgradeInfo(machine.Id(), agentVersion, nextVersion) c.Assert(err, jc.ErrorIsNil) - err = s.APIState.Client().SetEnvironAgentVersion(nextVersion) + err = s.APIState.Client().SetModelAgentVersion(nextVersion) // Expect an error with a error code that indicates this specific // situation. The client needs to be able to reliably identify @@ -560,7 +552,7 @@ func (s *clientSuite) TestEnvironmentGet(c *gc.C) { client := s.APIState.Client() - env, err := client.EnvironmentGet() + env, err := client.ModelGet() c.Assert(err, jc.ErrorIsNil) // Check a known value, just checking that there is something there. c.Assert(env["type"], gc.Equals, "dummy") @@ -568,13 +560,13 @@ func (s *clientSuite) TestEnvironmentSet(c *gc.C) { client := s.APIState.Client() - err := client.EnvironmentSet(map[string]interface{}{ + err := client.ModelSet(map[string]interface{}{ "some-name": "value", "other-name": true, }) c.Assert(err, jc.ErrorIsNil) - // Check them using EnvironmentGet. - env, err := client.EnvironmentGet() + // Check them using ModelGet. + env, err := client.ModelGet() c.Assert(err, jc.ErrorIsNil) c.Assert(env["some-name"], gc.Equals, "value") c.Assert(env["other-name"], gc.Equals, true) @@ -582,16 +574,16 @@ func (s *clientSuite) TestEnvironmentUnset(c *gc.C) { client := s.APIState.Client() - err := client.EnvironmentSet(map[string]interface{}{ + err := client.ModelSet(map[string]interface{}{ "some-name": "value", }) c.Assert(err, jc.ErrorIsNil) // Now unset it and make sure it isn't there. - err = client.EnvironmentUnset("some-name") + err = client.ModelUnset("some-name") c.Assert(err, jc.ErrorIsNil) - env, err := client.EnvironmentGet() + env, err := client.ModelGet() c.Assert(err, jc.ErrorIsNil) _, found := env["some-name"] c.Assert(found, jc.IsFalse) @@ -606,17 +598,14 @@ return 0, r.err } -func echoURL(c *gc.C) func(*websocket.Config) (io.ReadCloser, error) { - response := ¶ms.ErrorResult{} - message, err := json.Marshal(response) - c.Assert(err, jc.ErrorIsNil) - return func(config *websocket.Config) (io.ReadCloser, error) { +func echoURL(c *gc.C) func(*websocket.Config) (base.Stream, error) { + return func(config *websocket.Config) (base.Stream, error) { pr, pw := io.Pipe() go func() { - fmt.Fprintf(pw, "%s\n", message) + fmt.Fprintf(pw, "null\n") fmt.Fprintf(pw, "%s\n", config.Location) }() - return pr, nil + return fakeStreamReader{pr}, nil } } @@ -629,3 +618,26 @@ rc.Close() return connectURL } + +type fakeStreamReader struct { + io.Reader +} + +func (s fakeStreamReader) Close() error { + if c, ok := s.Reader.(io.Closer); ok { + return c.Close() + } + return nil +} + +func (s fakeStreamReader) Write([]byte) (int, error) { + panic("not implemented") +} + +func (s fakeStreamReader) ReadJSON(v interface{}) error { + panic("not implemented") +} + +func (s fakeStreamReader) WriteJSON(v interface{}) error { + panic("not implemented") +} === modified file 'src/github.com/juju/juju/api/common/apiaddresser.go' --- src/github.com/juju/juju/api/common/apiaddresser.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/common/apiaddresser.go 2016-03-22 15:18:22 +0000 @@ -5,9 +5,10 @@ import ( "github.com/juju/juju/api/base" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/network" + "github.com/juju/juju/watcher" ) // APIAddresser provides common client-side API @@ -38,11 +39,11 @@ return result.Result, nil } -// EnvironUUID returns the environment UUID to connect to the environment +// ModelUUID returns the model UUID to connect to the model // that the current connection is for. -func (a *APIAddresser) EnvironUUID() (string, error) { +func (a *APIAddresser) ModelUUID() (string, error) { var result params.StringResult - err := a.facade.FacadeCall("EnvironUUID", nil, &result) + err := a.facade.FacadeCall("ModelUUID", nil, &result) if err != nil { return "", err } @@ -76,5 +77,5 @@ if err != nil { return nil, err } - return watcher.NewNotifyWatcher(a.facade.RawAPICaller(), result), nil + return apiwatcher.NewNotifyWatcher(a.facade.RawAPICaller(), result), nil } === removed file 'src/github.com/juju/juju/api/common/environwatcher.go' --- src/github.com/juju/juju/api/common/environwatcher.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/common/environwatcher.go 1970-01-01 00:00:00 +0000 @@ -1,48 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common - -import ( - "github.com/juju/juju/api/base" - "github.com/juju/juju/api/watcher" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/environs/config" -) - -// EnvironWatcher provides common client-side API functions -// to call into apiserver.common.EnvironWatcher. -type EnvironWatcher struct { - facade base.FacadeCaller -} - -// NewEnvironWatcher creates a EnvironWatcher on the specified facade, -// and uses this name when calling through the caller. -func NewEnvironWatcher(facade base.FacadeCaller) *EnvironWatcher { - return &EnvironWatcher{facade} -} - -// WatchForEnvironConfigChanges return a NotifyWatcher waiting for the -// environment configuration to change. -func (e *EnvironWatcher) WatchForEnvironConfigChanges() (watcher.NotifyWatcher, error) { - var result params.NotifyWatchResult - err := e.facade.FacadeCall("WatchForEnvironConfigChanges", nil, &result) - if err != nil { - return nil, err - } - return watcher.NewNotifyWatcher(e.facade.RawAPICaller(), result), nil -} - -// EnvironConfig returns the current environment configuration. -func (e *EnvironWatcher) EnvironConfig() (*config.Config, error) { - var result params.EnvironConfigResult - err := e.facade.FacadeCall("EnvironConfig", nil, &result) - if err != nil { - return nil, err - } - conf, err := config.New(config.NoDefaults, result.Config) - if err != nil { - return nil, err - } - return conf, nil -} === added file 'src/github.com/juju/juju/api/common/modelwatcher.go' --- src/github.com/juju/juju/api/common/modelwatcher.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/common/modelwatcher.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,49 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common + +import ( + "github.com/juju/juju/api/base" + apiwatcher "github.com/juju/juju/api/watcher" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/watcher" +) + +// ModelWatcher provides common client-side API functions +// to call into apiserver.common.ModelWatcher. +type ModelWatcher struct { + facade base.FacadeCaller +} + +// NewModelWatcher creates a ModelWatcher on the specified facade, +// and uses this name when calling through the caller. +func NewModelWatcher(facade base.FacadeCaller) *ModelWatcher { + return &ModelWatcher{facade} +} + +// WatchForModelConfigChanges return a NotifyWatcher waiting for the +// model configuration to change. +func (e *ModelWatcher) WatchForModelConfigChanges() (watcher.NotifyWatcher, error) { + var result params.NotifyWatchResult + err := e.facade.FacadeCall("WatchForModelConfigChanges", nil, &result) + if err != nil { + return nil, err + } + return apiwatcher.NewNotifyWatcher(e.facade.RawAPICaller(), result), nil +} + +// ModelConfig returns the current model configuration. +func (e *ModelWatcher) ModelConfig() (*config.Config, error) { + var result params.ModelConfigResult + err := e.facade.FacadeCall("ModelConfig", nil, &result) + if err != nil { + return nil, err + } + conf, err := config.New(config.NoDefaults, result.Config) + if err != nil { + return nil, err + } + return conf, nil +} === modified file 'src/github.com/juju/juju/api/common/watch.go' --- src/github.com/juju/juju/api/common/watch.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/common/watch.go 2016-03-22 15:18:22 +0000 @@ -9,8 +9,9 @@ "github.com/juju/names" "github.com/juju/juju/api/base" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) // Watch starts a NotifyWatcher for the entity with the specified tag. @@ -30,5 +31,5 @@ if result.Error != nil { return nil, result.Error } - return watcher.NewNotifyWatcher(facade.RawAPICaller(), result), nil + return apiwatcher.NewNotifyWatcher(facade.RawAPICaller(), result), nil } === added directory 'src/github.com/juju/juju/api/controller' === added file 'src/github.com/juju/juju/api/controller/controller.go' --- src/github.com/juju/juju/api/controller/controller.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/controller/controller.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,135 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package controller + +import ( + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + + "github.com/juju/juju/api" + "github.com/juju/juju/api/base" + "github.com/juju/juju/apiserver/params" +) + +var logger = loggo.GetLogger("juju.api.controller") + +// Client provides methods that the Juju client command uses to interact +// with the Juju controller. +type Client struct { + base.ClientFacade + facade base.FacadeCaller +} + +// NewClient creates a new `Client` based on an existing authenticated API +// connection. +func NewClient(st base.APICallCloser) *Client { + frontend, backend := base.NewClientFacade(st, "Controller") + logger.Tracef("%#v", frontend) + return &Client{ClientFacade: frontend, facade: backend} +} + +// AllModels allows controller administrators to get the list of all the +// models in the controller. +func (c *Client) AllModels() ([]base.UserModel, error) { + var models params.UserModelList + err := c.facade.FacadeCall("AllModels", nil, &models) + if err != nil { + return nil, errors.Trace(err) + } + result := make([]base.UserModel, len(models.UserModels)) + for i, model := range models.UserModels { + owner, err := names.ParseUserTag(model.OwnerTag) + if err != nil { + return nil, errors.Annotatef(err, "OwnerTag %q at position %d", model.OwnerTag, i) + } + result[i] = base.UserModel{ + Name: model.Name, + UUID: model.UUID, + Owner: owner.Canonical(), + LastConnection: model.LastConnection, + } + } + return result, nil +} + +// ModelConfig returns all model settings for the +// controller model. +func (c *Client) ModelConfig() (map[string]interface{}, error) { + result := params.ModelConfigResults{} + err := c.facade.FacadeCall("ModelConfig", nil, &result) + return result.Config, err +} + +// DestroyController puts the controller model into a "dying" state, +// and removes all non-manager machine instances. Underlying DestroyModel +// calls will fail if there are any manually-provisioned non-manager machines +// in state. +func (c *Client) DestroyController(destroyModels bool) error { + args := params.DestroyControllerArgs{ + DestroyModels: destroyModels, + } + return c.facade.FacadeCall("DestroyController", args, nil) +} + +// ListBlockedModels returns a list of all models within the controller +// which have at least one block in place. +func (c *Client) ListBlockedModels() ([]params.ModelBlockInfo, error) { + result := params.ModelBlockInfoList{} + err := c.facade.FacadeCall("ListBlockedModels", nil, &result) + return result.Models, err +} + +// RemoveBlocks removes all the blocks in the controller. +func (c *Client) RemoveBlocks() error { + args := params.RemoveBlocksArgs{All: true} + return c.facade.FacadeCall("RemoveBlocks", args, nil) +} + +// WatchAllModels returns an AllWatcher, from which you can request +// the Next collection of Deltas (for all models). +func (c *Client) WatchAllModels() (*api.AllWatcher, error) { + info := new(api.WatchAll) + if err := c.facade.FacadeCall("WatchAllModels", nil, info); err != nil { + return nil, err + } + return api.NewAllModelWatcher(c.facade.RawAPICaller(), &info.AllWatcherId), nil +} + +// ModelStatus returns a status summary for each model tag passed in. +func (c *Client) ModelStatus(tags ...names.ModelTag) ([]base.ModelStatus, error) { + result := params.ModelStatusResults{} + models := make([]params.Entity, len(tags)) + for i, tag := range tags { + models[i] = params.Entity{Tag: tag.String()} + } + req := params.Entities{ + Entities: models, + } + if err := c.facade.FacadeCall("ModelStatus", req, &result); err != nil { + return nil, err + } + + results := make([]base.ModelStatus, len(result.Results)) + for i, r := range result.Results { + model, err := names.ParseModelTag(r.ModelTag) + if err != nil { + return nil, errors.Annotatef(err, "ModelTag %q at position %d", r.ModelTag, i) + } + owner, err := names.ParseUserTag(r.OwnerTag) + if err != nil { + return nil, errors.Annotatef(err, "OwnerTag %q at position %d", r.OwnerTag, i) + } + + results[i] = base.ModelStatus{ + UUID: model.Id(), + Life: r.Life, + Owner: owner.Canonical(), + HostedMachineCount: r.HostedMachineCount, + ServiceCount: r.ServiceCount, + } + + } + return results, nil +} === added file 'src/github.com/juju/juju/api/controller/controller_test.go' --- src/github.com/juju/juju/api/controller/controller_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/controller/controller_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,162 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package controller_test + +import ( + "fmt" + "time" + + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/controller" + commontesting "github.com/juju/juju/apiserver/common/testing" + "github.com/juju/juju/apiserver/params" + jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/state" + "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/testing" + "github.com/juju/juju/testing/factory" +) + +type controllerSuite struct { + jujutesting.JujuConnSuite + commontesting.BlockHelper +} + +var _ = gc.Suite(&controllerSuite{}) + +func (s *controllerSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) +} + +func (s *controllerSuite) OpenAPI(c *gc.C) *controller.Client { + return controller.NewClient(s.APIState) +} + +func (s *controllerSuite) TestAllModels(c *gc.C) { + owner := names.NewUserTag("user@remote") + s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "first", Owner: owner}).Close() + s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "second", Owner: owner}).Close() + + sysManager := s.OpenAPI(c) + envs, err := sysManager.AllModels() + c.Assert(err, jc.ErrorIsNil) + c.Assert(envs, gc.HasLen, 3) + + var obtained []string + for _, env := range envs { + obtained = append(obtained, fmt.Sprintf("%s/%s", env.Owner, env.Name)) + } + expected := []string{ + "admin@local/dummymodel", + "user@remote/first", + "user@remote/second", + } + c.Assert(obtained, jc.SameContents, expected) +} + +func (s *controllerSuite) TestModelConfig(c *gc.C) { + sysManager := s.OpenAPI(c) + env, err := sysManager.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env["name"], gc.Equals, "dummymodel") +} + +func (s *controllerSuite) TestDestroyController(c *gc.C) { + s.Factory.MakeModel(c, &factory.ModelParams{Name: "foo"}).Close() + + sysManager := s.OpenAPI(c) + err := sysManager.DestroyController(false) + c.Assert(err, gc.ErrorMatches, "controller model cannot be destroyed before all other models are destroyed") +} + +func (s *controllerSuite) TestListBlockedModels(c *gc.C) { + err := s.State.SwitchBlockOn(state.ChangeBlock, "change block for controller") + err = s.State.SwitchBlockOn(state.DestroyBlock, "destroy block for controller") + c.Assert(err, jc.ErrorIsNil) + + sysManager := s.OpenAPI(c) + results, err := sysManager.ListBlockedModels() + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, jc.DeepEquals, []params.ModelBlockInfo{ + params.ModelBlockInfo{ + Name: "dummymodel", + UUID: s.State.ModelUUID(), + OwnerTag: s.AdminUserTag(c).String(), + Blocks: []string{ + "BlockChange", + "BlockDestroy", + }, + }, + }) +} + +func (s *controllerSuite) TestRemoveBlocks(c *gc.C) { + s.State.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyModel") + s.State.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") + + sysManager := s.OpenAPI(c) + err := sysManager.RemoveBlocks() + c.Assert(err, jc.ErrorIsNil) + + blocks, err := s.State.AllBlocksForController() + c.Assert(err, jc.ErrorIsNil) + c.Assert(blocks, gc.HasLen, 0) +} + +func (s *controllerSuite) TestWatchAllModels(c *gc.C) { + // The WatchAllModels infrastructure is comprehensively tested + // else. This test just ensure that the API calls work end-to-end. + sysManager := s.OpenAPI(c) + + w, err := sysManager.WatchAllModels() + c.Assert(err, jc.ErrorIsNil) + defer func() { + err := w.Stop() + c.Assert(err, jc.ErrorIsNil) + }() + + deltasC := make(chan []multiwatcher.Delta) + go func() { + deltas, err := w.Next() + c.Assert(err, jc.ErrorIsNil) + deltasC <- deltas + }() + + select { + case deltas := <-deltasC: + c.Assert(deltas, gc.HasLen, 1) + modelInfo := deltas[0].Entity.(*multiwatcher.ModelInfo) + + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + + c.Assert(modelInfo.ModelUUID, gc.Equals, env.UUID()) + c.Assert(modelInfo.Name, gc.Equals, env.Name()) + c.Assert(modelInfo.Life, gc.Equals, multiwatcher.Life("alive")) + c.Assert(modelInfo.Owner, gc.Equals, env.Owner().Id()) + c.Assert(modelInfo.ServerUUID, gc.Equals, env.ControllerUUID()) + case <-time.After(testing.LongWait): + c.Fatal("timed out") + } +} + +func (s *controllerSuite) TestModelStatus(c *gc.C) { + controller := s.OpenAPI(c) + modelTag := s.State.ModelTag() + results, err := controller.ModelStatus(modelTag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, jc.DeepEquals, []base.ModelStatus{{ + UUID: modelTag.Id(), + HostedMachineCount: 0, + ServiceCount: 0, + Owner: "admin@local", + Life: params.Alive, + }}) +} === added file 'src/github.com/juju/juju/api/controller/package_test.go' --- src/github.com/juju/juju/api/controller/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/controller/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package controller_test + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + testing.MgoTestPackage(t) +} === modified file 'src/github.com/juju/juju/api/deployer/deployer_test.go' --- src/github.com/juju/juju/api/deployer/deployer_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/deployer/deployer_test.go 2016-03-22 15:18:22 +0000 @@ -17,8 +17,8 @@ "github.com/juju/juju/juju/testing" "github.com/juju/juju/network" "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/watcher/watchertest" ) func TestAll(t *stdtesting.T) { @@ -46,7 +46,7 @@ func (s *deployerSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) - s.stateAPI, s.machine = s.OpenAPIAsNewMachine(c, state.JobManageEnviron, state.JobHostUnits) + s.stateAPI, s.machine = s.OpenAPIAsNewMachine(c, state.JobManageModel, state.JobHostUnits) err := s.machine.SetProviderAddresses(network.NewAddress("0.1.2.3")) c.Assert(err, jc.ErrorIsNil) @@ -104,8 +104,8 @@ c.Assert(err, jc.ErrorIsNil) w, err := machine.WatchUnits() c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, w) - wc := statetesting.NewStringsWatcherC(c, s.BackingState, w) + wc := watchertest.NewStringsWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertChange("mysql/0", "logging/0") @@ -126,9 +126,6 @@ c.Assert(err, jc.ErrorIsNil) wc.AssertChange("logging/0") wc.AssertNoChange() - - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *deployerSuite) TestUnit(c *gc.C) { === modified file 'src/github.com/juju/juju/api/deployer/machine.go' --- src/github.com/juju/juju/api/deployer/machine.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/deployer/machine.go 2016-03-22 15:18:22 +0000 @@ -8,8 +8,9 @@ "github.com/juju/names" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) // Machine represents a juju machine as seen by the deployer worker. @@ -37,6 +38,6 @@ if result.Error != nil { return nil, result.Error } - w := watcher.NewStringsWatcher(m.st.facade.RawAPICaller(), result) + w := apiwatcher.NewStringsWatcher(m.st.facade.RawAPICaller(), result) return w, nil } === added directory 'src/github.com/juju/juju/api/discoverspaces' === added file 'src/github.com/juju/juju/api/discoverspaces/discoverspaces.go' --- src/github.com/juju/juju/api/discoverspaces/discoverspaces.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/discoverspaces/discoverspaces.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,81 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package discoverspaces + +import ( + "github.com/juju/errors" + "github.com/juju/loggo" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs/config" +) + +var logger = loggo.GetLogger("juju.api.discoverspaces") + +const discoverspacesFacade = "DiscoverSpaces" + +// API provides access to the DiscoverSpaces API facade. +type API struct { + facade base.FacadeCaller +} + +// NewAPI creates a new facade. +func NewAPI(caller base.APICaller) *API { + if caller == nil { + panic("caller is nil") + } + facadeCaller := base.NewFacadeCaller(caller, discoverspacesFacade) + return &API{ + facade: facadeCaller, + } +} + +func (api *API) ListSpaces() (params.DiscoverSpacesResults, error) { + var result params.DiscoverSpacesResults + if err := api.facade.FacadeCall("ListSpaces", nil, &result); err != nil { + return result, errors.Trace(err) + } + return result, nil +} + +func (api *API) AddSubnets(args params.AddSubnetsParams) (params.ErrorResults, error) { + var result params.ErrorResults + err := api.facade.FacadeCall("AddSubnets", args, &result) + if err != nil { + return result, errors.Trace(err) + } + return result, nil +} + +func (api *API) CreateSpaces(args params.CreateSpacesParams) (results params.ErrorResults, err error) { + var result params.ErrorResults + err = api.facade.FacadeCall("CreateSpaces", args, &result) + if err != nil { + return result, errors.Trace(err) + } + return result, nil +} + +func (api *API) ListSubnets(args params.SubnetsFilters) (params.ListSubnetsResults, error) { + var result params.ListSubnetsResults + if err := api.facade.FacadeCall("ListSubnets", args, &result); err != nil { + return result, errors.Trace(err) + } + return result, nil +} + +// ModelConfig returns the current model configuration. +func (api *API) ModelConfig() (*config.Config, error) { + var result params.ModelConfigResult + err := api.facade.FacadeCall("ModelConfig", nil, &result) + if err != nil { + return nil, err + } + conf, err := config.New(config.NoDefaults, result.Config) + if err != nil { + return nil, err + } + return conf, nil +} === added file 'src/github.com/juju/juju/api/discoverspaces/discoverspaces_test.go' --- src/github.com/juju/juju/api/discoverspaces/discoverspaces_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/discoverspaces/discoverspaces_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,129 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package discoverspaces_test + +import ( + "errors" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/base" + apitesting "github.com/juju/juju/api/base/testing" + "github.com/juju/juju/api/discoverspaces" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs/config" + coretesting "github.com/juju/juju/testing" +) + +type DiscoverSpacesSuite struct { + coretesting.BaseSuite +} + +var _ = gc.Suite(&DiscoverSpacesSuite{}) + +func (s *DiscoverSpacesSuite) TestNewAPI(c *gc.C) { + var called int + apiCaller := clientErrorAPICaller(c, "ListSpaces", nil, &called) + api := discoverspaces.NewAPI(apiCaller) + c.Check(api, gc.NotNil) + c.Check(called, gc.Equals, 0) + + // Make a call so that an error will be returned. + _, err := api.ListSpaces() + c.Assert(err, gc.ErrorMatches, "client error!") + c.Assert(called, gc.Equals, 1) +} + +func clientErrorAPICaller(c *gc.C, method string, expectArgs interface{}, numCalls *int) base.APICaller { + args := &apitesting.CheckArgs{ + Facade: "DiscoverSpaces", + VersionIsZero: true, + IdIsEmpty: true, + Method: method, + Args: expectArgs, + } + return apitesting.CheckingAPICaller(c, args, numCalls, errors.New("client error!")) +} + +func successAPICaller(c *gc.C, method string, expectArgs, useResults interface{}, numCalls *int) base.APICaller { + args := &apitesting.CheckArgs{ + Facade: "DiscoverSpaces", + VersionIsZero: true, + IdIsEmpty: true, + Method: method, + Args: expectArgs, + Results: useResults, + } + return apitesting.CheckingAPICaller(c, args, numCalls, nil) +} + +func (s *DiscoverSpacesSuite) TestNewAPIWithNilCaller(c *gc.C) { + panicFunc := func() { discoverspaces.NewAPI(nil) } + c.Assert(panicFunc, gc.PanicMatches, "caller is nil") +} + +func (s *DiscoverSpacesSuite) TestListSpaces(c *gc.C) { + var called int + expectedResult := params.DiscoverSpacesResults{ + Results: []params.ProviderSpace{{Name: "foobar"}}, + } + apiCaller := successAPICaller(c, "ListSpaces", nil, expectedResult, &called) + api := discoverspaces.NewAPI(apiCaller) + + result, err := api.ListSpaces() + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, expectedResult) + c.Assert(called, gc.Equals, 1) +} + +func (s *DiscoverSpacesSuite) TestAddSubnets(c *gc.C) { + var called int + expectedResult := params.ErrorResults{ + Results: []params.ErrorResult{{}}, + } + expectedArgs := params.AddSubnetsParams{ + Subnets: []params.AddSubnetParams{{SubnetTag: "foo"}}, + } + apiCaller := successAPICaller(c, "AddSubnets", expectedArgs, expectedResult, &called) + api := discoverspaces.NewAPI(apiCaller) + + result, err := api.AddSubnets(expectedArgs) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, expectedResult) + c.Assert(called, gc.Equals, 1) +} + +func (s *DiscoverSpacesSuite) TestCreateSpaces(c *gc.C) { + var called int + expectedResult := params.ErrorResults{ + Results: []params.ErrorResult{{}}, + } + expectedArgs := params.CreateSpacesParams{ + Spaces: []params.CreateSpaceParams{{SpaceTag: "foo"}}, + } + apiCaller := successAPICaller(c, "CreateSpaces", expectedArgs, expectedResult, &called) + api := discoverspaces.NewAPI(apiCaller) + + result, err := api.CreateSpaces(expectedArgs) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, expectedResult) + c.Assert(called, gc.Equals, 1) +} + +func (s *DiscoverSpacesSuite) TestModelConfig(c *gc.C) { + var called int + cfg, err := config.New(config.UseDefaults, coretesting.FakeConfig()) + c.Assert(err, jc.ErrorIsNil) + expectedResult := params.ModelConfigResult{ + Config: cfg.AllAttrs(), + } + apiCaller := successAPICaller(c, "ModelConfig", nil, expectedResult, &called) + api := discoverspaces.NewAPI(apiCaller) + + result, err := api.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, cfg) + c.Assert(called, gc.Equals, 1) +} === added file 'src/github.com/juju/juju/api/discoverspaces/package_test.go' --- src/github.com/juju/juju/api/discoverspaces/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/discoverspaces/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package discoverspaces_test + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + testing.MgoTestPackage(t) +} === removed directory 'src/github.com/juju/juju/api/environment' === removed file 'src/github.com/juju/juju/api/environment/environment.go' --- src/github.com/juju/juju/api/environment/environment.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/environment/environment.go 1970-01-01 00:00:00 +0000 @@ -1,26 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environment - -import ( - "github.com/juju/juju/api/base" - "github.com/juju/juju/api/common" -) - -const apiName = "Environment" - -// Facade provides access to a machine environment worker's view of the world. -type Facade struct { - *common.EnvironWatcher - *ToolsVersionUpdater -} - -// NewFacade returns a new api client facade instance. -func NewFacade(caller base.APICaller) *Facade { - facadeCaller := base.NewFacadeCaller(caller, apiName) - return &Facade{ - EnvironWatcher: common.NewEnvironWatcher(facadeCaller), - ToolsVersionUpdater: NewToolsVersionUpdater(facadeCaller), - } -} === removed file 'src/github.com/juju/juju/api/environment/environment_test.go' --- src/github.com/juju/juju/api/environment/environment_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/environment/environment_test.go 1970-01-01 00:00:00 +0000 @@ -1,30 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environment_test - -import ( - gc "gopkg.in/check.v1" - - apitesting "github.com/juju/juju/api/testing" - jujutesting "github.com/juju/juju/juju/testing" -) - -type environmentSuite struct { - jujutesting.JujuConnSuite - *apitesting.EnvironWatcherTests -} - -var _ = gc.Suite(&environmentSuite{}) - -func (s *environmentSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - - stateAPI, _ := s.OpenAPIAsNewMachine(c) - - environmentAPI := stateAPI.Environment() - c.Assert(environmentAPI, gc.NotNil) - - s.EnvironWatcherTests = apitesting.NewEnvironWatcherTests( - environmentAPI, s.BackingState, apitesting.NoSecrets) -} === removed file 'src/github.com/juju/juju/api/environment/package_test.go' --- src/github.com/juju/juju/api/environment/package_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/environment/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environment_test - -import ( - stdtesting "testing" - - "github.com/juju/juju/testing" -) - -func TestAll(t *stdtesting.T) { - testing.MgoTestPackage(t) -} === removed file 'src/github.com/juju/juju/api/environment/toolsversion.go' --- src/github.com/juju/juju/api/environment/toolsversion.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/environment/toolsversion.go 1970-01-01 00:00:00 +0000 @@ -1,24 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environment - -import ( - "github.com/juju/juju/api/base" -) - -// ToolsVersionUpdater allows api calls to update available tool version. -type ToolsVersionUpdater struct { - facade base.FacadeCaller -} - -// NewToolsVersionUpdater returns a new ToolsVersionUpdater pointer. -func NewToolsVersionUpdater(facade base.FacadeCaller) *ToolsVersionUpdater { - return &ToolsVersionUpdater{facade} -} - -// UpdateToolsVersion calls UpdateToolsAvailable in the server with -// the provided version. -func (t *ToolsVersionUpdater) UpdateToolsVersion() error { - return t.facade.FacadeCall("UpdateToolsAvailable", nil, nil) -} === removed directory 'src/github.com/juju/juju/api/environmentmanager' === removed file 'src/github.com/juju/juju/api/environmentmanager/environmentmanager.go' --- src/github.com/juju/juju/api/environmentmanager/environmentmanager.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/api/environmentmanager/environmentmanager.go 1970-01-01 00:00:00 +0000 @@ -1,97 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environmentmanager - -import ( - "github.com/juju/errors" - "github.com/juju/loggo" - "github.com/juju/names" - - "github.com/juju/juju/api/base" - "github.com/juju/juju/apiserver/params" -) - -var logger = loggo.GetLogger("juju.api.environmentmanager") - -// Client provides methods that the Juju client command uses to interact -// with environments stored in the Juju Server. -type Client struct { - base.ClientFacade - facade base.FacadeCaller -} - -// NewClient creates a new `Client` based on an existing authenticated API -// connection. -func NewClient(st base.APICallCloser) *Client { - frontend, backend := base.NewClientFacade(st, "EnvironmentManager") - logger.Debugf("%#v", frontend) - return &Client{ClientFacade: frontend, facade: backend} -} - -// ConfigSkeleton returns config values to be used as a starting point for the -// API caller to construct a valid environment specific config. The provider -// and region params are there for future use, and current behaviour expects -// both of these to be empty. -func (c *Client) ConfigSkeleton(provider, region string) (params.EnvironConfig, error) { - var result params.EnvironConfigResult - args := params.EnvironmentSkeletonConfigArgs{ - Provider: provider, - Region: region, - } - err := c.facade.FacadeCall("ConfigSkeleton", args, &result) - if err != nil { - return nil, errors.Trace(err) - } - return result.Config, nil -} - -// CreateEnvironment creates a new environment using the account and -// environment config specified in the args. -func (c *Client) CreateEnvironment(owner string, account, config map[string]interface{}) (params.Environment, error) { - var result params.Environment - if !names.IsValidUser(owner) { - return result, errors.Errorf("invalid owner name %q", owner) - } - createArgs := params.EnvironmentCreateArgs{ - OwnerTag: names.NewUserTag(owner).String(), - Account: account, - Config: config, - } - err := c.facade.FacadeCall("CreateEnvironment", createArgs, &result) - if err != nil { - return result, errors.Trace(err) - } - logger.Infof("created environment %s (%s)", result.Name, result.UUID) - return result, nil -} - -// ListEnvironments returns the environments that the specified user -// has access to in the current server. Only that state server owner -// can list environments for any user (at this stage). Other users -// can only ask about their own environments. -func (c *Client) ListEnvironments(user string) ([]base.UserEnvironment, error) { - var environments params.UserEnvironmentList - if !names.IsValidUser(user) { - return nil, errors.Errorf("invalid user name %q", user) - } - entity := params.Entity{names.NewUserTag(user).String()} - err := c.facade.FacadeCall("ListEnvironments", entity, &environments) - if err != nil { - return nil, errors.Trace(err) - } - result := make([]base.UserEnvironment, len(environments.UserEnvironments)) - for i, env := range environments.UserEnvironments { - owner, err := names.ParseUserTag(env.OwnerTag) - if err != nil { - return nil, errors.Annotatef(err, "OwnerTag %q at position %d", env.OwnerTag, i) - } - result[i] = base.UserEnvironment{ - Name: env.Name, - UUID: env.UUID, - Owner: owner.Canonical(), - LastConnection: env.LastConnection, - } - } - return result, nil -} === removed file 'src/github.com/juju/juju/api/environmentmanager/environmentmanager_test.go' --- src/github.com/juju/juju/api/environmentmanager/environmentmanager_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/api/environmentmanager/environmentmanager_test.go 1970-01-01 00:00:00 +0000 @@ -1,119 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environmentmanager_test - -import ( - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/api" - "github.com/juju/juju/api/environmentmanager" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/feature" - "github.com/juju/juju/juju" - jujutesting "github.com/juju/juju/juju/testing" - coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/testing/factory" -) - -type environmentmanagerSuite struct { - jujutesting.JujuConnSuite -} - -var _ = gc.Suite(&environmentmanagerSuite{}) - -func (s *environmentmanagerSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) -} - -func (s *environmentmanagerSuite) OpenAPI(c *gc.C) *environmentmanager.Client { - conn, err := juju.NewAPIState(s.AdminUserTag(c), s.Environ, api.DialOpts{}) - c.Assert(err, jc.ErrorIsNil) - s.AddCleanup(func(*gc.C) { conn.Close() }) - return environmentmanager.NewClient(conn) -} - -func (s *environmentmanagerSuite) TestConfigSkeleton(c *gc.C) { - s.SetFeatureFlags(feature.JES) - envManager := s.OpenAPI(c) - result, err := envManager.ConfigSkeleton("", "") - c.Assert(err, jc.ErrorIsNil) - - // The apiPort changes every test run as the dummy provider - // looks for a random open port. - apiPort := s.Environ.Config().APIPort() - - // Numbers coming over the api are floats, not ints. - c.Assert(result, jc.DeepEquals, params.EnvironConfig{ - "type": "dummy", - "ca-cert": coretesting.CACert, - "state-port": float64(1234), - "api-port": float64(apiPort), - "syslog-port": float64(2345), - }) - -} - -func (s *environmentmanagerSuite) TestCreateEnvironmentBadUser(c *gc.C) { - envManager := s.OpenAPI(c) - _, err := envManager.CreateEnvironment("not a user", nil, nil) - c.Assert(err, gc.ErrorMatches, `invalid owner name "not a user"`) -} - -func (s *environmentmanagerSuite) TestCreateEnvironmentFeatureNotEnabled(c *gc.C) { - envManager := s.OpenAPI(c) - _, err := envManager.CreateEnvironment("owner", nil, nil) - c.Assert(err, gc.ErrorMatches, `unknown object type "EnvironmentManager"`) -} - -func (s *environmentmanagerSuite) TestCreateEnvironmentMissingConfig(c *gc.C) { - s.SetFeatureFlags(feature.JES) - envManager := s.OpenAPI(c) - _, err := envManager.CreateEnvironment("owner", nil, nil) - c.Assert(err, gc.ErrorMatches, `creating config from values failed: name: expected string, got nothing`) -} - -func (s *environmentmanagerSuite) TestCreateEnvironment(c *gc.C) { - s.SetFeatureFlags(feature.JES) - envManager := s.OpenAPI(c) - user := s.Factory.MakeUser(c, nil) - owner := user.UserTag().Canonical() - newEnv, err := envManager.CreateEnvironment(owner, nil, map[string]interface{}{ - "name": "new-env", - "authorized-keys": "ssh-key", - // dummy needs state-server - "state-server": false, - }) - c.Assert(err, jc.ErrorIsNil) - c.Assert(newEnv.Name, gc.Equals, "new-env") - c.Assert(newEnv.OwnerTag, gc.Equals, user.Tag().String()) - c.Assert(utils.IsValidUUIDString(newEnv.UUID), jc.IsTrue) -} - -func (s *environmentmanagerSuite) TestListEnvironmentsBadUser(c *gc.C) { - envManager := s.OpenAPI(c) - _, err := envManager.ListEnvironments("not a user") - c.Assert(err, gc.ErrorMatches, `invalid user name "not a user"`) -} - -func (s *environmentmanagerSuite) TestListEnvironments(c *gc.C) { - s.SetFeatureFlags(feature.JES) - owner := names.NewUserTag("user@remote") - s.Factory.MakeEnvironment(c, &factory.EnvParams{ - Name: "first", Owner: owner}).Close() - s.Factory.MakeEnvironment(c, &factory.EnvParams{ - Name: "second", Owner: owner}).Close() - - envManager := s.OpenAPI(c) - envs, err := envManager.ListEnvironments("user@remote") - c.Assert(err, jc.ErrorIsNil) - c.Assert(envs, gc.HasLen, 2) - - envNames := []string{envs[0].Name, envs[1].Name} - c.Assert(envNames, jc.DeepEquals, []string{"first", "second"}) - ownerNames := []string{envs[0].Owner, envs[1].Owner} - c.Assert(ownerNames, jc.DeepEquals, []string{"user@remote", "user@remote"}) -} === removed file 'src/github.com/juju/juju/api/environmentmanager/export_test.go' --- src/github.com/juju/juju/api/environmentmanager/export_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/environmentmanager/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,20 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environmentmanager - -import ( - "github.com/juju/juju/api/base/testing" -) - -// PatchResponses changes the internal FacadeCaller to one that lets you return -// canned results. The responseFunc will get the 'response' interface object, -// and can set attributes of it to fix the response to the caller. -// It can also return an error to have the FacadeCall return an error. -// The function returned by PatchResponses is a cleanup function that returns -// the client to its original state. -func PatchResponses(p testing.Patcher, client *Client, responseFunc func(interface{}) error) { - testing.PatchFacadeCall(p, &client.facade, func(request string, params, response interface{}) error { - return responseFunc(response) - }) -} === removed file 'src/github.com/juju/juju/api/environmentmanager/package_test.go' --- src/github.com/juju/juju/api/environmentmanager/package_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/environmentmanager/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environmentmanager_test - -import ( - stdtesting "testing" - - "github.com/juju/juju/testing" -) - -func TestAll(t *stdtesting.T) { - testing.MgoTestPackage(t) -} === modified file 'src/github.com/juju/juju/api/export_test.go' --- src/github.com/juju/juju/api/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/export_test.go 2016-03-22 15:18:22 +0000 @@ -16,7 +16,7 @@ SlideAddressToFront = slideAddressToFront BestVersion = bestVersion FacadeVersions = &facadeVersions - NewHTTPClient = &newHTTPClient + ConnectWebsocket = connectWebsocket ) // SetServerAddress allows changing the URL to the internal API server @@ -31,21 +31,11 @@ return c.st.serverRoot() } -// PatchEnvironTag patches the value of the environment tag. -// It returns a function that reverts the change. -func PatchEnvironTag(st *State, envTag string) func() { - originalTag := st.environTag - st.environTag = envTag - return func() { - st.environTag = originalTag - } -} - // TestingStateParams is the parameters for NewTestingState, so that you can // only set the bits that you acutally want to test. type TestingStateParams struct { Address string - EnvironTag string + ModelTag string APIHostPorts [][]network.HostPort FacadeVersions map[string][]int ServerScheme string @@ -55,10 +45,10 @@ // NewTestingState creates an api.State object that can be used for testing. It // isn't backed onto an actual API server, so actual RPC methods can't be // called on it. But it can be used for testing general behavior. -func NewTestingState(params TestingStateParams) *State { - st := &State{ +func NewTestingState(params TestingStateParams) Connection { + st := &state{ addr: params.Address, - environTag: params.EnvironTag, + modelTag: params.ModelTag, hostPorts: params.APIHostPorts, facadeVersions: params.FacadeVersions, serverScheme: params.ServerScheme, === modified file 'src/github.com/juju/juju/api/facadeversions.go' --- src/github.com/juju/juju/api/facadeversions.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/facadeversions.go 2016-03-22 15:18:22 +0000 @@ -3,6 +3,10 @@ package api +import ( + "github.com/juju/errors" +) + // facadeVersions lists the best version of facades that we know about. This // will be used to pick out a default version for communication, given the list // of known versions that the API server tells us it is capable of supporting. @@ -11,55 +15,72 @@ // New facades should start at 1. // Facades that existed before versioning start at 0. var facadeVersions = map[string]int{ - "Action": 0, - "Addresser": 1, - "Agent": 1, - "AllWatcher": 0, - "AllEnvWatcher": 1, - "Annotations": 1, - "Backups": 0, - "Block": 1, - "Charms": 1, - "CharmRevisionUpdater": 0, - "Client": 0, - "Cleaner": 1, - "Deployer": 0, - "DiskManager": 1, - "EntityWatcher": 1, - "Environment": 0, - "EnvironmentManager": 1, - "FilesystemAttachmentsWatcher": 1, - "Firewaller": 1, - "HighAvailability": 1, - "ImageManager": 1, - "ImageMetadata": 1, - "InstancePoller": 1, - "KeyManager": 0, - "KeyUpdater": 0, - "LeadershipService": 1, - "Logger": 0, - "MachineManager": 1, - "Machiner": 0, - "MetricsManager": 0, - "Networker": 0, - "NotifyWatcher": 0, - "Pinger": 0, - "Provisioner": 1, - "Reboot": 1, - "RelationUnitsWatcher": 0, - "Resumer": 1, - "Rsyslog": 0, - "Service": 1, - "Storage": 1, - "Spaces": 1, - "Subnets": 1, - "StorageProvisioner": 1, - "StringsWatcher": 0, - "SystemManager": 1, - "Upgrader": 0, - "Uniter": 2, - "UserManager": 0, - "VolumeAttachmentsWatcher": 1, + "Action": 1, + "Addresser": 2, + "Agent": 2, + "AgentTools": 1, + "AllWatcher": 1, + "AllModelWatcher": 2, + "Annotations": 2, + "Backups": 1, + "Block": 2, + "Charms": 2, + "CharmRevisionUpdater": 1, + "Client": 1, + "Cleaner": 2, + "Controller": 2, + "Deployer": 1, + "DiscoverSpaces": 2, + "DiskManager": 2, + "EntityWatcher": 2, + "FilesystemAttachmentsWatcher": 2, + "Firewaller": 2, + "HighAvailability": 2, + "ImageManager": 2, + "ImageMetadata": 2, + "InstancePoller": 2, + "KeyManager": 1, + "KeyUpdater": 1, + "LeadershipService": 2, + "Logger": 1, + "MachineManager": 2, + "Machiner": 1, + "MetricsDebug": 1, + "MetricsManager": 1, + "MeterStatus": 1, + "MetricsAdder": 2, + "ModelManager": 2, + "NotifyWatcher": 1, + "Pinger": 1, + "Provisioner": 2, + "ProxyUpdater": 1, + "Reboot": 2, + "RelationUnitsWatcher": 1, + "Resumer": 2, + "RetryStrategy": 1, + "Service": 3, + "Storage": 2, + "Spaces": 2, + "Subnets": 2, + "StatusHistory": 2, + "StorageProvisioner": 2, + "StringsWatcher": 1, + "Upgrader": 1, + "UnitAssigner": 1, + "Uniter": 3, + "UserManager": 1, + "VolumeAttachmentsWatcher": 2, + "Undertaker": 1, +} + +// RegisterFacadeVersion sets the API client to prefer the given version +// for the facade. +func RegisterFacadeVersion(name string, version int) error { + if ver, ok := facadeVersions[name]; ok && ver != version { + return errors.Errorf("facade %q already registered", name) + } + facadeVersions[name] = version + return nil } // bestVersion tries to find the newest version in the version list that we can === modified file 'src/github.com/juju/juju/api/facadeversions_test.go' --- src/github.com/juju/juju/api/facadeversions_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/facadeversions_test.go 2016-03-22 15:18:22 +0000 @@ -4,14 +4,12 @@ package api_test import ( - "strings" - + jc "github.com/juju/testing/checkers" "github.com/juju/utils/set" gc "gopkg.in/check.v1" "github.com/juju/juju/api" "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/feature" coretesting "github.com/juju/juju/testing" ) @@ -22,9 +20,6 @@ var _ = gc.Suite(&facadeVersionSuite{}) func (s *facadeVersionSuite) TestFacadeVersionsMatchServerVersions(c *gc.C) { - // Enable feature flags so we can see them all. - devFeatures := []string{feature.JES} - s.SetFeatureFlags(strings.Join(devFeatures, ",")) // The client side code doesn't want to directly import the server side // code just to list out what versions are available. However, we do // want to make sure that the two sides are kept in sync. @@ -43,7 +38,7 @@ c.Check(serverFacadeNames.Difference(clientFacadeNames).SortedValues(), gc.HasLen, 0) c.Check(clientFacadeNames.Difference(serverFacadeNames).SortedValues(), gc.HasLen, 0) // Next check that the best versions match - c.Check(*api.FacadeVersions, gc.DeepEquals, serverFacadeBestVersions) + c.Check(*api.FacadeVersions, jc.DeepEquals, serverFacadeBestVersions) } func checkBestVersion(c *gc.C, desiredVersion int, versions []int, expectedVersion int) { === modified file 'src/github.com/juju/juju/api/firewaller/firewaller.go' --- src/github.com/juju/juju/api/firewaller/firewaller.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/firewaller/firewaller.go 2016-03-22 15:18:22 +0000 @@ -9,8 +9,9 @@ "github.com/juju/juju/api/base" "github.com/juju/juju/api/common" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) const firewallerFacade = "Firewaller" @@ -18,15 +19,15 @@ // State provides access to the Firewaller API facade. type State struct { facade base.FacadeCaller - *common.EnvironWatcher + *common.ModelWatcher } // NewState creates a new client-side Firewaller API facade. func NewState(caller base.APICaller) *State { facadeCaller := base.NewFacadeCaller(caller, firewallerFacade) return &State{ - facade: facadeCaller, - EnvironWatcher: common.NewEnvironWatcher(facadeCaller), + facade: facadeCaller, + ModelWatcher: common.NewModelWatcher(facadeCaller), } } @@ -36,9 +37,9 @@ return st.facade.BestAPIVersion() } -// EnvironTag returns the current environment's tag. -func (st *State) EnvironTag() (names.EnvironTag, error) { - return st.facade.RawAPICaller().EnvironTag() +// ModelTag returns the current model's tag. +func (st *State) ModelTag() (names.ModelTag, error) { + return st.facade.RawAPICaller().ModelTag() } // life requests the life cycle of the given entity from the server. @@ -73,32 +74,32 @@ }, nil } -// WatchEnvironMachines returns a StringsWatcher that notifies of +// WatchModelMachines returns a StringsWatcher that notifies of // changes to the life cycles of the top level machines in the current -// environment. -func (st *State) WatchEnvironMachines() (watcher.StringsWatcher, error) { +// model. +func (st *State) WatchModelMachines() (watcher.StringsWatcher, error) { var result params.StringsWatchResult - err := st.facade.FacadeCall("WatchEnvironMachines", nil, &result) + err := st.facade.FacadeCall("WatchModelMachines", nil, &result) if err != nil { return nil, err } if err := result.Error; err != nil { return nil, result.Error } - w := watcher.NewStringsWatcher(st.facade.RawAPICaller(), result) + w := apiwatcher.NewStringsWatcher(st.facade.RawAPICaller(), result) return w, nil } // WatchOpenedPorts returns a StringsWatcher that notifies of -// changes to the opened ports for the current environment. +// changes to the opened ports for the current model. func (st *State) WatchOpenedPorts() (watcher.StringsWatcher, error) { - envTag, err := st.EnvironTag() + modelTag, err := st.ModelTag() if err != nil { - return nil, errors.Annotatef(err, "invalid environ tag") + return nil, errors.Annotatef(err, "invalid model tag") } var results params.StringsWatchResults args := params.Entities{ - Entities: []params.Entity{{Tag: envTag.String()}}, + Entities: []params.Entity{{Tag: modelTag.String()}}, } err = st.facade.FacadeCall("WatchOpenedPorts", args, &results) if err != nil { @@ -111,6 +112,6 @@ if err := result.Error; err != nil { return nil, result.Error } - w := watcher.NewStringsWatcher(st.facade.RawAPICaller(), result) + w := apiwatcher.NewStringsWatcher(st.facade.RawAPICaller(), result) return w, nil } === modified file 'src/github.com/juju/juju/api/firewaller/firewaller_test.go' --- src/github.com/juju/juju/api/firewaller/firewaller_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/firewaller/firewaller_test.go 2016-03-22 15:18:22 +0000 @@ -40,7 +40,7 @@ s.units = make([]*state.Unit, 3) var err error - s.machines[0], err = s.State.AddMachine("quantal", state.JobManageEnviron, state.JobHostUnits) + s.machines[0], err = s.State.AddMachine("quantal", state.JobManageModel, state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) password, err := utils.RandomPassword() c.Assert(err, jc.ErrorIsNil) === modified file 'src/github.com/juju/juju/api/firewaller/machine.go' --- src/github.com/juju/juju/api/firewaller/machine.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/firewaller/machine.go 2016-03-22 15:18:22 +0000 @@ -7,10 +7,11 @@ "github.com/juju/names" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/instance" "github.com/juju/juju/network" + "github.com/juju/juju/watcher" ) // Machine represents a juju machine as seen by the firewaller worker. @@ -43,7 +44,7 @@ if result.Error != nil { return nil, result.Error } - w := watcher.NewStringsWatcher(m.st.facade.RawAPICaller(), result) + w := apiwatcher.NewStringsWatcher(m.st.facade.RawAPICaller(), result) return w, nil } === modified file 'src/github.com/juju/juju/api/firewaller/machine_test.go' --- src/github.com/juju/juju/api/firewaller/machine_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/firewaller/machine_test.go 2016-03-22 15:18:22 +0000 @@ -13,7 +13,7 @@ "github.com/juju/juju/instance" "github.com/juju/juju/network" "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" + "github.com/juju/juju/watcher/watchertest" ) type machineSuite struct { @@ -70,8 +70,8 @@ func (s *machineSuite) TestWatchUnits(c *gc.C) { w, err := s.apiMachine.WatchUnits() c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, w) - wc := statetesting.NewStringsWatcherC(c, s.BackingState, w) + wc := watchertest.NewStringsWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertChange("wordpress/0") @@ -92,9 +92,6 @@ c.Assert(err, jc.ErrorIsNil) wc.AssertChange("wordpress/0") wc.AssertNoChange() - - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *machineSuite) TestActiveNetworks(c *gc.C) { === modified file 'src/github.com/juju/juju/api/firewaller/service.go' --- src/github.com/juju/juju/api/firewaller/service.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/firewaller/service.go 2016-03-22 15:18:22 +0000 @@ -9,8 +9,8 @@ "github.com/juju/names" "github.com/juju/juju/api/common" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) // Service represents the state of a service. === modified file 'src/github.com/juju/juju/api/firewaller/service_test.go' --- src/github.com/juju/juju/api/firewaller/service_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/firewaller/service_test.go 2016-03-22 15:18:22 +0000 @@ -10,7 +10,7 @@ "github.com/juju/juju/api/firewaller" "github.com/juju/juju/apiserver/params" - statetesting "github.com/juju/juju/state/testing" + "github.com/juju/juju/watcher/watchertest" ) type serviceSuite struct { @@ -47,8 +47,8 @@ w, err := s.apiService.Watch() c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, w) - wc := statetesting.NewNotifyWatcherC(c, s.BackingState, w) + wc := watchertest.NewNotifyWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertOneChange() @@ -62,9 +62,6 @@ err = s.service.Destroy() c.Assert(err, jc.ErrorIsNil) wc.AssertOneChange() - - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *serviceSuite) TestRefresh(c *gc.C) { === modified file 'src/github.com/juju/juju/api/firewaller/state_test.go' --- src/github.com/juju/juju/api/firewaller/state_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/firewaller/state_test.go 2016-03-22 15:18:22 +0000 @@ -10,30 +10,30 @@ apitesting "github.com/juju/juju/api/testing" "github.com/juju/juju/instance" "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" + "github.com/juju/juju/watcher/watchertest" ) type stateSuite struct { firewallerSuite - *apitesting.EnvironWatcherTests + *apitesting.ModelWatcherTests } var _ = gc.Suite(&stateSuite{}) func (s *stateSuite) SetUpTest(c *gc.C) { s.firewallerSuite.SetUpTest(c) - s.EnvironWatcherTests = apitesting.NewEnvironWatcherTests(s.firewaller, s.BackingState, true) + s.ModelWatcherTests = apitesting.NewModelWatcherTests(s.firewaller, s.BackingState, true) } func (s *stateSuite) TearDownTest(c *gc.C) { s.firewallerSuite.TearDownTest(c) } -func (s *stateSuite) TestWatchEnvironMachines(c *gc.C) { - w, err := s.firewaller.WatchEnvironMachines() +func (s *stateSuite) TestWatchModelMachines(c *gc.C) { + w, err := s.firewaller.WatchModelMachines() c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, w) - wc := statetesting.NewStringsWatcherC(c, s.BackingState, w) + wc := watchertest.NewStringsWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertChange(s.machines[0].Id(), s.machines[1].Id(), s.machines[2].Id()) @@ -56,9 +56,6 @@ _, err = s.State.AddMachineInsideMachine(template, s.machines[0].Id(), instance.LXC) c.Assert(err, jc.ErrorIsNil) wc.AssertNoChange() - - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *stateSuite) TestWatchOpenedPorts(c *gc.C) { @@ -70,8 +67,8 @@ w, err := s.firewaller.WatchOpenedPorts() c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, w) - wc := statetesting.NewStringsWatcherC(c, s.BackingState, w) + wc := watchertest.NewStringsWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() expectChanges := []string{ "0:juju-public", @@ -102,7 +99,4 @@ c.Assert(err, jc.ErrorIsNil) wc.AssertChange("1:juju-public") wc.AssertNoChange() - - statetesting.AssertStop(c, w) - wc.AssertClosed() } === modified file 'src/github.com/juju/juju/api/highavailability/client.go' --- src/github.com/juju/juju/api/highavailability/client.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/highavailability/client.go 2016-03-22 15:18:22 +0000 @@ -7,67 +7,82 @@ "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" + "github.com/juju/replicaset" "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/constraints" + "github.com/juju/juju/mongo" ) var logger = loggo.GetLogger("juju.api.highavailability") -// Client provides access to the high availability service, used to manage state servers. +// Client provides access to the high availability service, used to manage controllers. type Client struct { base.ClientFacade - facade base.FacadeCaller - environTag names.EnvironTag + facade base.FacadeCaller + modelTag names.ModelTag } // NewClient returns a new HighAvailability client. func NewClient(caller base.APICallCloser) *Client { - environTag, err := caller.EnvironTag() + modelTag, err := caller.ModelTag() if err != nil { - logger.Errorf("ignoring invalid environment tag: %v", err) + logger.Errorf("ignoring invalid model tag: %v", err) } frontend, backend := base.NewClientFacade(caller, "HighAvailability") - return &Client{ClientFacade: frontend, facade: backend, environTag: environTag} + return &Client{ClientFacade: frontend, facade: backend, modelTag: modelTag} } -// EnsureAvailability ensures the availability of Juju state servers. -func (c *Client) EnsureAvailability( - numStateServers int, cons constraints.Value, series string, placement []string, -) (params.StateServersChanges, error) { +// EnableHA ensures the availability of Juju controllers. +func (c *Client) EnableHA( + numControllers int, cons constraints.Value, series string, placement []string, +) (params.ControllersChanges, error) { - var results params.StateServersChangeResults - arg := params.StateServersSpecs{ - Specs: []params.StateServersSpec{{ - EnvironTag: c.environTag.String(), - NumStateServers: numStateServers, - Constraints: cons, - Series: series, - Placement: placement, + var results params.ControllersChangeResults + arg := params.ControllersSpecs{ + Specs: []params.ControllersSpec{{ + ModelTag: c.modelTag.String(), + NumControllers: numControllers, + Constraints: cons, + Series: series, + Placement: placement, }}} - var err error - // We need to retain compatibility with older Juju deployments without the new HighAvailability facade. - if c.facade.BestAPIVersion() < 1 { - if len(placement) > 0 { - return params.StateServersChanges{}, errors.Errorf("placement directives not supported with this version of Juju") - } - caller := c.facade.RawAPICaller() - err = caller.APICall("Client", caller.BestFacadeVersion("Client"), "", "EnsureAvailability", arg, &results) - } else { - err = c.facade.FacadeCall("EnsureAvailability", arg, &results) - } - + err := c.facade.FacadeCall("EnableHA", arg, &results) if err != nil { - return params.StateServersChanges{}, err + return params.ControllersChanges{}, err } if len(results.Results) != 1 { - return params.StateServersChanges{}, errors.Errorf("expected 1 result, got %d", len(results.Results)) + return params.ControllersChanges{}, errors.Errorf("expected 1 result, got %d", len(results.Results)) } result := results.Results[0] if result.Error != nil { - return params.StateServersChanges{}, result.Error + return params.ControllersChanges{}, result.Error } return result.Result, nil } + +// MongoUpgradeMode will make all Slave members of the HA +// to shut down their mongo server. +func (c *Client) MongoUpgradeMode(v mongo.Version) (params.MongoUpgradeResults, error) { + arg := params.UpgradeMongoParams{ + Target: v, + } + results := params.MongoUpgradeResults{} + if err := c.facade.FacadeCall("StopHAReplicationForUpgrade", arg, &results); err != nil { + return results, errors.Annotate(err, "cannnot enter mongo upgrade mode") + } + return results, nil +} + +// ResumeHAReplicationAfterUpgrade makes all members part of HA again. +func (c *Client) ResumeHAReplicationAfterUpgrade(members []replicaset.Member) error { + arg := params.ResumeReplicationParams{ + Members: members, + } + if err := c.facade.FacadeCall("ResumeHAReplicationAfterUpgrad", arg, nil); err != nil { + return errors.Annotate(err, "cannnot resume ha") + } + return nil +} === modified file 'src/github.com/juju/juju/api/highavailability/client_test.go' --- src/github.com/juju/juju/api/highavailability/client_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/highavailability/client_test.go 2016-03-22 15:18:22 +0000 @@ -10,7 +10,6 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/api/highavailability" - "github.com/juju/juju/apiserver/common" "github.com/juju/juju/constraints" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" @@ -47,17 +46,17 @@ return pinger } -func assertEnsureAvailability(c *gc.C, s *jujutesting.JujuConnSuite) { - _, err := s.State.AddMachine("quantal", state.JobManageEnviron) +func assertEnableHA(c *gc.C, s *jujutesting.JujuConnSuite) { + _, err := s.State.AddMachine("quantal", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) - // We have to ensure the agents are alive, or EnsureAvailability will + // We have to ensure the agents are alive, or EnableHA will // create more to replace them. pingerA := setAgentPresence(c, s, "0") defer assertKill(c, pingerA) emptyCons := constraints.Value{} client := highavailability.NewClient(s.APIState) - result, err := client.EnsureAvailability(3, emptyCons, "", nil) + result, err := client.EnableHA(3, emptyCons, "", nil) c.Assert(err, jc.ErrorIsNil) c.Assert(result.Maintained, gc.DeepEquals, []string{"machine-0"}) @@ -72,32 +71,11 @@ c.Assert(machines[2].Series(), gc.Equals, "quantal") } -func (s *clientSuite) TestClientEnsureAvailability(c *gc.C) { - assertEnsureAvailability(c, &s.JujuConnSuite) -} - -func (s *clientSuite) TestClientEnsureAvailabilityVersion(c *gc.C) { - client := highavailability.NewClient(s.APIState) - c.Assert(client.BestAPIVersion(), gc.Equals, 1) -} - -type clientLegacySuite struct { - jujutesting.JujuConnSuite -} - -var _ = gc.Suite(&clientLegacySuite{}) - -func (s *clientLegacySuite) SetUpTest(c *gc.C) { - common.Facades.Discard("HighAvailability", 1) - s.JujuConnSuite.SetUpTest(c) -} - -func (s *clientLegacySuite) TestEnsureAvailabilityLegacy(c *gc.C) { - assertEnsureAvailability(c, &s.JujuConnSuite) -} - -func (s *clientLegacySuite) TestEnsureAvailabilityLegacyRejectsPlacement(c *gc.C) { - client := highavailability.NewClient(s.APIState) - _, err := client.EnsureAvailability(3, constraints.Value{}, "", []string{"machine"}) - c.Assert(err, gc.ErrorMatches, "placement directives not supported with this version of Juju") +func (s *clientSuite) TestClientEnableHA(c *gc.C) { + assertEnableHA(c, &s.JujuConnSuite) +} + +func (s *clientSuite) TestClientEnableHAVersion(c *gc.C) { + client := highavailability.NewClient(s.APIState) + c.Assert(client.BestAPIVersion(), gc.Equals, 2) } === modified file 'src/github.com/juju/juju/api/http.go' --- src/github.com/juju/juju/api/http.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/http.go 2016-03-22 15:18:22 +0000 @@ -4,92 +4,149 @@ package api import ( - "crypto/tls" + "bytes" + "encoding/json" "io" "net/http" - "net/url" "github.com/juju/errors" - "github.com/juju/utils" + "github.com/juju/httprequest" + "gopkg.in/macaroon-bakery.v1/httpbakery" - apihttp "github.com/juju/juju/api/http" - apiserverhttp "github.com/juju/juju/apiserver/http" + "github.com/juju/juju/apiserver/params" ) -var newHTTPClient = func(s Connection) apihttp.HTTPClient { - return s.NewHTTPClient() -} - -// NewHTTPClient returns an HTTP client initialized based on State. -func (s *State) NewHTTPClient() *http.Client { - httpclient := utils.GetValidatingHTTPClient() - tlsconfig := tls.Config{ - RootCAs: s.certPool, - // We want to be specific here (rather than just using "anything". - // See commit 7fc118f015d8480dfad7831788e4b8c0432205e8 (PR 899). - ServerName: "juju-apiserver", - } - httpclient.Transport = utils.NewHttpTLSTransport(&tlsconfig) - return httpclient -} - -// NewHTTPRequest returns a new API-supporting HTTP request based on State. -func (s *State) NewHTTPRequest(method, path string) (*http.Request, error) { - baseURL, err := url.Parse(s.serverRoot()) - if err != nil { - return nil, errors.Annotatef(err, "while parsing base URL (%s)", s.serverRoot()) - } - - tag, err := s.EnvironTag() - if err != nil { - return nil, errors.Annotate(err, "while extracting environment UUID") - } - uuid := tag.Id() - - req, err := apiserverhttp.NewRequest(method, baseURL, path, uuid, s.tag, s.password) - return req, errors.Trace(err) -} - -// SendHTTPRequest sends a GET request using the HTTP client derived from State. -func (s *State) SendHTTPRequest(path string, args interface{}) (*http.Request, *http.Response, error) { - req, err := s.NewHTTPRequest("GET", path) - if err != nil { - return nil, nil, errors.Trace(err) - } - - err = apiserverhttp.SetRequestArgs(req, args) - if err != nil { - return nil, nil, errors.Annotate(err, "while setting request body") - } - - httpclient := newHTTPClient(s) - resp, err := httpclient.Do(req) - if err != nil { - return nil, nil, errors.Annotate(err, "while sending HTTP request") - } - return req, resp, nil -} - -// SendHTTPRequestReader sends a PUT request using the HTTP client derived -// from State. The provided io.Reader and associated JSON metadata are -// attached to the request body as multi-part data. The name parameter -// identifies the attached data's part in the multi-part data. That name -// doesn't have any semantic significance in juju, so the provided value -// is strictly informational. -func (s *State) SendHTTPRequestReader(path string, attached io.Reader, meta interface{}, name string) (*http.Request, *http.Response, error) { - req, err := s.NewHTTPRequest("PUT", path) - if err != nil { - return nil, nil, errors.Trace(err) - } - - if err := apiserverhttp.AttachToRequest(req, attached, meta, name); err != nil { - return nil, nil, errors.Trace(err) - } - - httpclient := newHTTPClient(s) - resp, err := httpclient.Do(req) - if err != nil { - return nil, nil, errors.Annotate(err, "while sending HTTP request") - } - return req, resp, nil +// HTTPClient implements Connection.APICaller.HTTPClient. +func (s *state) HTTPClient() (*httprequest.Client, error) { + if !s.isLoggedIn() { + return nil, errors.New("no HTTP client available without logging in") + } + baseURL, err := s.apiEndpoint("/", "") + if err != nil { + return nil, errors.Trace(err) + } + return &httprequest.Client{ + BaseURL: baseURL.String(), + Doer: httpRequestDoer{ + st: s, + }, + UnmarshalError: unmarshalHTTPErrorResponse, + }, nil +} + +// httpRequestDoer implements httprequest.Doer and httprequest.DoerWithBody +// by using httpbakery and the state to make authenticated requests to +// the API server. +type httpRequestDoer struct { + st *state +} + +var _ httprequest.Doer = httpRequestDoer{} + +var _ httprequest.DoerWithBody = httpRequestDoer{} + +// Do implements httprequest.Doer.Do. +func (doer httpRequestDoer) Do(req *http.Request) (*http.Response, error) { + return doer.DoWithBody(req, nil) +} + +// DoWithBody implements httprequest.DoerWithBody.DoWithBody. +func (doer httpRequestDoer) DoWithBody(req *http.Request, body io.ReadSeeker) (*http.Response, error) { + // Add basic auth if appropriate + // Call doer.bakeryClient.DoWithBodyAndCustomError + if doer.st.tag != "" { + req.SetBasicAuth(doer.st.tag, doer.st.password) + } + return doer.st.bakeryClient.DoWithBodyAndCustomError(req, body, func(resp *http.Response) error { + // At this point we are only interested in errors that + // the bakery cares about, and the CodeDischargeRequired + // error is the only one, and that always comes with a + // response code StatusUnauthorized. + if resp.StatusCode != http.StatusUnauthorized { + return nil + } + return bakeryError(unmarshalHTTPErrorResponse(resp)) + }) +} + +// unmarshalHTTPErrorResponse unmarshals an error response from +// an HTTP endpoint. For historical reasons, these endpoints +// return several different incompatible error response formats. +// We cope with this by accepting all of the possible formats +// and unmarshaling accordingly. +// +// It always returns a non-nil error. +func unmarshalHTTPErrorResponse(resp *http.Response) error { + var body json.RawMessage + if err := httprequest.UnmarshalJSONResponse(resp, &body); err != nil { + return errors.Trace(err) + } + // genericErrorResponse defines a struct that is compatible with all the + // known error types, so that we can know which of the + // possible error types has been returned. + // + // Another possible approach might be to look at resp.Request.URL.Path + // and determine the expected error type from that, but that + // seems more fragile than this approach. + type genericErrorResponse struct { + Error json.RawMessage + } + var generic genericErrorResponse + if err := json.Unmarshal(body, &generic); err != nil { + return errors.Annotatef(err, "incompatible error response") + } + if bytes.HasPrefix(generic.Error, []byte(`"`)) { + // The error message is in a string, which means that + // the error must be in a params.CharmsResponse + var resp params.CharmsResponse + if err := json.Unmarshal(body, &resp); err != nil { + return errors.Annotatef(err, "incompatible error response") + } + return ¶ms.Error{ + Message: resp.Error, + Code: resp.ErrorCode, + Info: resp.ErrorInfo, + } + } + var errorBody []byte + if len(generic.Error) > 0 { + // We have an Error field, therefore the error must be in that. + // (it's a params.ErrorResponse) + errorBody = generic.Error + } else { + // There wasn't an Error field, so the error must be directly + // in the body of the response. + errorBody = body + } + var perr params.Error + if err := json.Unmarshal(errorBody, &perr); err != nil { + return errors.Annotatef(err, "incompatible error response") + } + if perr.Message == "" { + return errors.Errorf("error response with no message") + } + return &perr +} + +// bakeryError translates any discharge-required error into +// an error value that the httpbakery package will recognize. +// Other errors are returned unchanged. +func bakeryError(err error) error { + if params.ErrCode(err) != params.CodeDischargeRequired { + return err + } + errResp := errors.Cause(err).(*params.Error) + if errResp.Info == nil { + return errors.Annotatef(err, "no error info found in discharge-required response error") + } + // It's a discharge-required error, so make an appropriate httpbakery + // error from it. + return &httpbakery.Error{ + Message: err.Error(), + Code: httpbakery.ErrDischargeRequired, + Info: &httpbakery.ErrorInfo{ + Macaroon: errResp.Info.Macaroon, + MacaroonPath: errResp.Info.MacaroonPath, + }, + } } === removed directory 'src/github.com/juju/juju/api/http/testing' === removed file 'src/github.com/juju/juju/api/http/testing/fakes.go' --- src/github.com/juju/juju/api/http/testing/fakes.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/http/testing/fakes.go 1970-01-01 00:00:00 +0000 @@ -1,101 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package testing - -import ( - "io" - "net/http" - - gc "gopkg.in/check.v1" - - apihttptesting "github.com/juju/juju/apiserver/http/testing" -) - -// FakeHTTPClient is used in testing in place of an actual http.Client. -type FakeHTTPClient struct { - // Error is the error that will be returned for any calls. - Error error - - // Response is the response returned from calls. - Response *http.Response - - // Calls is the record of which methods were called. - Calls []string - - // RequestArg is the request that was passed to a call. - RequestArg *http.Request -} - -// NewFakeHTTPClient returns a fake with Response set to an OK status, -// no headers, and no body. -func NewFakeHTTPClient() *FakeHTTPClient { - resp := apihttptesting.NewHTTPResponse() - fake := FakeHTTPClient{ - Response: &resp.Response, - } - return &fake -} - -// CheckCalled checks that the Do was called once with the request and -// returned the correct value. -func (f *FakeHTTPClient) CheckCalled(c *gc.C, req *http.Request, resp *http.Response) { - c.Check(f.Calls, gc.DeepEquals, []string{"Do"}) - c.Check(f.RequestArg, gc.Equals, req) - c.Check(resp, gc.Equals, f.Response) -} - -// Do fakes the behavior of http.Client.Do(). -func (f *FakeHTTPClient) Do(req *http.Request) (*http.Response, error) { - f.Calls = append(f.Calls, "Do") - f.RequestArg = req - return f.Response, f.Error -} - -type FakeClient struct { - calls []string - pathArg string - argsArg interface{} - attachedArg io.Reader - metaArg interface{} - nameArg string - - // Error is the error that will be returned for any calls. - Error error - // Request is the request returned from calls. - Request *http.Request - // Response is the response returned from calls. - Response *http.Response -} - -func (f *FakeClient) SendHTTPRequest(path string, args interface{}) (*http.Request, *http.Response, error) { - f.calls = append(f.calls, "SendHTTPRequest") - f.pathArg = path - f.argsArg = args - return f.Request, f.Response, f.Error -} - -func (f *FakeClient) SendHTTPRequestReader(path string, attached io.Reader, meta interface{}, name string) (*http.Request, *http.Response, error) { - f.calls = append(f.calls, "SendHTTPRequestReader") - f.pathArg = path - f.attachedArg = attached - f.metaArg = meta - f.nameArg = name - return f.Request, f.Response, f.Error -} - -// CheckCalled checks that the fake was called properly. -func (f *FakeClient) CheckCalled(c *gc.C, path string, args interface{}, calls ...string) { - c.Check(f.calls, gc.DeepEquals, calls) - c.Check(f.pathArg, gc.Equals, path) - c.Check(f.argsArg, gc.Equals, args) -} - -// CheckCalledReader checks that the fake was called properly. -func (f *FakeClient) CheckCalledReader(c *gc.C, path string, attached io.Reader, meta interface{}, name string, calls ...string) { - c.Check(f.calls, gc.DeepEquals, calls) - c.Check(f.pathArg, gc.Equals, path) - c.Check(f.attachedArg, gc.Equals, attached) - c.Check(f.metaArg, gc.DeepEquals, meta) - c.Check(f.nameArg, gc.Equals, name) -} === removed file 'src/github.com/juju/juju/api/http/testing/suite.go' --- src/github.com/juju/juju/api/http/testing/suite.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/http/testing/suite.go 1970-01-01 00:00:00 +0000 @@ -1,132 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package testing - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - apiserverhttp "github.com/juju/juju/apiserver/http" - apihttptesting "github.com/juju/juju/apiserver/http/testing" - "github.com/juju/juju/apiserver/params" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/provider/dummy" - "github.com/juju/juju/testing" -) - -// HTTPSuite provides basic testing capability for API HTTP tests. -type HTTPSuite struct { - testing.BaseSuite - - // Fake is the fake HTTP client used in tests. - Fake *FakeHTTPClient - - // Hostname is the API server's hostname. - Hostname string - - // Username is the username to use for API connections. - Username string - - // Password is the password to use for API connections. - Password string -} - -func (s *HTTPSuite) SetUpTest(c *gc.C) { - s.BaseSuite.SetUpTest(c) - - s.Fake = NewFakeHTTPClient() - s.Hostname = "localhost" - s.Username = dummy.AdminUserTag().String() - s.Password = jujutesting.AdminSecret -} - -// CheckRequest verifies that the HTTP request matches the args -// as an API request should. We only check API-related request fields. -func (s *HTTPSuite) CheckRequest(c *gc.C, req *http.Request, method, path string) { - apihttptesting.CheckRequest(c, req, method, s.Username, s.Password, s.Hostname, path) -} - -// APIMethodSuite provides testing functionality for specific API methods. -type APIMethodSuite struct { - HTTPSuite - - // HTTPMethod is the HTTP method to use for the suite. - HTTPMethod string - - // Name is the name of the API method. - Name string -} - -// CheckRequest verifies that the HTTP request matches the args -// as an API request should. We only check API-related request fields. -func (s *APIMethodSuite) CheckRequest(c *gc.C, req *http.Request) { - s.HTTPSuite.CheckRequest(c, req, s.HTTPMethod, s.Name) -} - -// APIHTTPClientSuite wraps a fake API HTTP client (see api/http.go). -// It provides methods for setting the response the client will return. -type APIHTTPClientSuite struct { - testing.BaseSuite - - // FakeClient is the fake API HTTP Client that may be used in testing. - FakeClient FakeClient -} - -// SetResponse sets the HTTP response on the fake client using the -// provided values. The data is set as the body of the response. -func (s *APIHTTPClientSuite) SetResponse(c *gc.C, status int, data []byte, ctype string) { - resp := http.Response{ - StatusCode: status, - Header: make(http.Header), - } - - resp.Header.Set("Content-Type", ctype) - resp.Body = ioutil.NopCloser(bytes.NewBuffer(data)) - - s.FakeClient.Response = &resp -} - -// SetJSONSuccess sets a success response on the fake client. The -// provided result is JSON-encoded and set as the body of the response. -// The content-type is thus application/json. A status code of -// http.StatusOK (200) is used. -func (s *APIHTTPClientSuite) SetJSONSuccess(c *gc.C, result interface{}) { - status := http.StatusOK - data, err := json.Marshal(result) - c.Assert(err, jc.ErrorIsNil) - - s.SetResponse(c, status, data, apiserverhttp.CTypeJSON) -} - -// SetFailure sets a failure response on the fake client. The provided -// message is packed into an apiserver/params.Error. That error is then -// set as the body of the response. The content-type is thus -// application/json. -func (s *APIHTTPClientSuite) SetFailure(c *gc.C, msg string, status int) { - failure := params.Error{ - Message: msg, - } - data, err := json.Marshal(&failure) - c.Assert(err, jc.ErrorIsNil) - - s.SetResponse(c, status, data, apiserverhttp.CTypeJSON) -} - -// SetError sets an error response on the fake client. A content-type -// of application/octet-stream is used. The provided message is set as -// the body of the response. Any status code less than 0 is replaced -// with http.StatusInternalServerError (500). -func (s *APIHTTPClientSuite) SetError(c *gc.C, msg string, status int) { - if status < 0 { - status = http.StatusInternalServerError - } - - data := []byte(msg) - s.SetResponse(c, status, data, apiserverhttp.CTypeRaw) -} === modified file 'src/github.com/juju/juju/api/http_test.go' --- src/github.com/juju/juju/api/http_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/http_test.go 2016-03-22 15:18:22 +0000 @@ -1,83 +1,175 @@ -// Copyright 2014 Canonical Ltd. +// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package api_test import ( "net/http" + "net/http/httptest" + "reflect" + "github.com/juju/errors" + "github.com/juju/httprequest" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "github.com/juju/juju/api" - apihttp "github.com/juju/juju/api/http" - apihttptesting "github.com/juju/juju/api/http/testing" + "github.com/juju/juju/apiserver/params" jujutesting "github.com/juju/juju/juju/testing" ) type httpSuite struct { - apihttptesting.HTTPSuite jujutesting.JujuConnSuite + + client *httprequest.Client } var _ = gc.Suite(&httpSuite{}) -func (s *httpSuite) SetUpSuite(c *gc.C) { - s.HTTPSuite.SetUpSuite(c) - s.JujuConnSuite.SetUpSuite(c) -} - -func (s *httpSuite) TearDownSuite(c *gc.C) { - s.HTTPSuite.TearDownSuite(c) - s.JujuConnSuite.TearDownSuite(c) -} - func (s *httpSuite) SetUpTest(c *gc.C) { - s.HTTPSuite.SetUpTest(c) s.JujuConnSuite.SetUpTest(c) - // This determines the client used in SendHTTPRequest(). - s.PatchValue(api.NewHTTPClient, - func(api.Connection) apihttp.HTTPClient { - return s.Fake - }, - ) -} - -func (s *httpSuite) TearDownTest(c *gc.C) { - s.HTTPSuite.TearDownTest(c) - s.JujuConnSuite.TearDownTest(c) -} - -func (s *httpSuite) TestNewHTTPRequestSuccess(c *gc.C) { - req, err := s.APIState.NewHTTPRequest("GET", "somefacade") - c.Assert(err, jc.ErrorIsNil) - - s.CheckRequest(c, req, "GET", "somefacade") -} - -func (s *httpSuite) TestNewHTTPClientCorrectTransport(c *gc.C) { - httpClient := s.APIState.NewHTTPClient() - - c.Assert(httpClient.Transport, gc.NotNil) - c.Assert(httpClient.Transport, gc.FitsTypeOf, (*http.Transport)(nil)) - config := httpClient.Transport.(*http.Transport).TLSClientConfig - - c.Check(config.RootCAs, gc.NotNil) -} - -func (s *httpSuite) TestNewHTTPClientValidatesCert(c *gc.C) { - req, err := s.APIState.NewHTTPRequest("GET", "somefacade") - httpClient := s.APIState.NewHTTPClient() - resp, err := httpClient.Do(req) - c.Assert(err, jc.ErrorIsNil) - - c.Check(resp.StatusCode, gc.Equals, http.StatusNotFound) -} - -func (s *httpSuite) TestSendHTTPRequestSuccess(c *gc.C) { - req, resp, err := s.APIState.SendHTTPRequest("somefacade", nil) - c.Assert(err, jc.ErrorIsNil) - - s.Fake.CheckCalled(c, req, resp) + client, err := s.APIState.HTTPClient() + c.Assert(err, gc.IsNil) + s.client = client +} + +var httpClientTests = []struct { + about string + handler http.HandlerFunc + expectResponse interface{} + expectError string + expectErrorCode string + expectErrorInfo *params.ErrorInfo +}{{ + about: "success", + handler: func(w http.ResponseWriter, req *http.Request) { + httprequest.WriteJSON(w, http.StatusOK, "hello, world") + }, + expectResponse: newString("hello, world"), +}, { + about: "unauthorized status without discharge-required error", + handler: func(w http.ResponseWriter, req *http.Request) { + httprequest.WriteJSON(w, http.StatusUnauthorized, params.Error{ + Message: "something", + }) + }, + expectError: `GET http://.*/: something`, +}, { + about: "non-JSON error response", + handler: http.NotFound, + expectError: `GET http://.*/: unexpected content type text/plain; want application/json; content: 404 page not found`, +}, { + about: "bad error response", + handler: func(w http.ResponseWriter, req *http.Request) { + type badResponse struct { + Message map[string]int + } + httprequest.WriteJSON(w, http.StatusUnauthorized, badResponse{ + Message: make(map[string]int), + }) + }, + expectError: `GET http://.*/: incompatible error response: json: cannot unmarshal object into Go value of type string`, +}, { + about: "bad charms error response", + handler: func(w http.ResponseWriter, req *http.Request) { + type badResponse struct { + Error string + CharmURL map[string]int + } + httprequest.WriteJSON(w, http.StatusUnauthorized, badResponse{ + Error: "something", + CharmURL: make(map[string]int), + }) + }, + expectError: `GET http://.*/: incompatible error response: json: cannot unmarshal object into Go value of type string`, +}, { + about: "no message in ErrorResponse", + handler: func(w http.ResponseWriter, req *http.Request) { + httprequest.WriteJSON(w, http.StatusUnauthorized, params.ErrorResult{ + Error: ¶ms.Error{}, + }) + }, + expectError: `GET http://.*/: error response with no message`, +}, { + about: "no message in Error", + handler: func(w http.ResponseWriter, req *http.Request) { + httprequest.WriteJSON(w, http.StatusUnauthorized, params.Error{}) + }, + expectError: `GET http://.*/: error response with no message`, +}, { + about: "charms error response", + handler: func(w http.ResponseWriter, req *http.Request) { + httprequest.WriteJSON(w, http.StatusBadRequest, params.CharmsResponse{ + Error: "some error", + ErrorCode: params.CodeBadRequest, + ErrorInfo: ¶ms.ErrorInfo{ + MacaroonPath: "foo", + }, + }) + }, + expectError: `GET http://.*/: some error`, + expectErrorCode: params.CodeBadRequest, + expectErrorInfo: ¶ms.ErrorInfo{ + MacaroonPath: "foo", + }, +}, { + about: "discharge-required response with no error info", + handler: func(w http.ResponseWriter, req *http.Request) { + httprequest.WriteJSON(w, http.StatusUnauthorized, params.Error{ + Message: "some error", + Code: params.CodeDischargeRequired, + }) + }, + expectError: `GET http://.*/: no error info found in discharge-required response error: some error`, + expectErrorCode: params.CodeDischargeRequired, +}, { + about: "discharge-required response with no macaroon", + handler: func(w http.ResponseWriter, req *http.Request) { + httprequest.WriteJSON(w, http.StatusUnauthorized, params.Error{ + Message: "some error", + Code: params.CodeDischargeRequired, + Info: ¶ms.ErrorInfo{}, + }) + }, + expectError: `GET http://.*/: no macaroon found in discharge-required response`, +}} + +func (s *httpSuite) TestHTTPClient(c *gc.C) { + var handler http.HandlerFunc + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + handler(w, req) + })) + defer srv.Close() + s.client.BaseURL = srv.URL + for i, test := range httpClientTests { + c.Logf("test %d: %s", i, test.about) + handler = test.handler + var resp interface{} + if test.expectResponse != nil { + resp = reflect.New(reflect.TypeOf(test.expectResponse).Elem()).Interface() + } + err := s.client.Get("/", resp) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + c.Assert(params.ErrCode(err), gc.Equals, test.expectErrorCode) + if err, ok := errors.Cause(err).(*params.Error); ok { + c.Assert(err.Info, jc.DeepEquals, test.expectErrorInfo) + } else if test.expectErrorInfo != nil { + c.Fatalf("no error info found in error") + } + continue + } + c.Assert(err, gc.IsNil) + c.Assert(resp, jc.DeepEquals, test.expectResponse) + } +} + +// Note: the fact that the code works against the actual API server is +// well tested by some of the other API tests. +// This suite focuses on less reachable paths by changing +// the BaseURL of the httprequest.Client so that +// we can use our own custom servers. + +func newString(s string) *string { + return &s } === modified file 'src/github.com/juju/juju/api/imagemetadata/client.go' --- src/github.com/juju/juju/api/imagemetadata/client.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/imagemetadata/client.go 2016-03-22 15:18:22 +0000 @@ -31,14 +31,14 @@ func (c *Client) List( stream, region string, series, arches []string, - virtualType, rootStorageType string, + virtType, rootStorageType string, ) ([]params.CloudImageMetadata, error) { in := params.ImageMetadataFilter{ Region: region, Series: series, Arches: arches, Stream: stream, - VirtualType: virtualType, + VirtType: virtType, RootStorageType: rootStorageType, } out := params.ListCloudImageMetadataResult{} @@ -48,12 +48,49 @@ // Save saves specified image metadata. // Supports bulk saves for scenarios like cloud image metadata caching at bootstrap. -func (c *Client) Save(metadata []params.CloudImageMetadata) ([]params.ErrorResult, error) { - in := params.MetadataSaveParams{Metadata: metadata} +func (c *Client) Save(metadata []params.CloudImageMetadata) error { + in := params.MetadataSaveParams{ + Metadata: []params.CloudImageMetadataList{{metadata}}, + } out := params.ErrorResults{} err := c.facade.FacadeCall("Save", in, &out) if err != nil { - return nil, errors.Trace(err) - } - return out.Results, nil + return errors.Trace(err) + } + if len(out.Results) != 1 { + return errors.Errorf("exected 1 result, got %d", len(out.Results)) + } + if out.Results[0].Error != nil { + return errors.Trace(out.Results[0].Error) + } + return nil +} + +// UpdateFromPublishedImages retrieves currently published image metadata and +// updates stored ones accordingly. +// This method is primarily intended for a worker. +func (c *Client) UpdateFromPublishedImages() error { + return errors.Trace( + c.facade.FacadeCall("UpdateFromPublishedImages", nil, nil)) +} + +// Delete removes image metadata for given image id from stored metadata. +func (c *Client) Delete(imageId string) error { + in := params.MetadataImageIds{[]string{imageId}} + out := params.ErrorResults{} + err := c.facade.FacadeCall("Delete", in, &out) + if err != nil { + return errors.Trace(err) + } + + result := out.Results + if len(result) != 1 { + return errors.Errorf("expected to find one result for image id %q but found %d", imageId, len(result)) + } + + theOne := result[0] + if theOne.Error != nil { + return errors.Trace(theOne.Error) + } + return nil } === modified file 'src/github.com/juju/juju/api/imagemetadata/client_test.go' --- src/github.com/juju/juju/api/imagemetadata/client_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/imagemetadata/client_test.go 2016-03-22 15:18:22 +0000 @@ -4,8 +4,11 @@ package imagemetadata_test import ( + "regexp" + "github.com/juju/errors" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/series" gc "gopkg.in/check.v1" "github.com/juju/juju/api/base/testing" @@ -25,9 +28,14 @@ imageId := "imageid" stream := "stream" region := "region" - series := "series" + + // This is used by filters to search function + testSeries := "trusty" + version, err := series.SeriesVersion(testSeries) + c.Assert(err, jc.ErrorIsNil) + arch := "arch" - virtualType := "virtual-type" + virtType := "virt-type" rootStorageType := "root-storage-type" rootStorageSize := uint64(1024) source := "source" @@ -53,9 +61,10 @@ ImageId: imageId, Stream: args.Stream, Region: args.Region, + Version: versionFromSeries(args.Series[0]), Series: args.Series[0], Arch: args.Arches[0], - VirtualType: args.VirtualType, + VirtType: args.VirtType, RootStorageType: args.RootStorageType, RootStorageSize: &rootStorageSize, Source: source, @@ -69,8 +78,8 @@ client := imagemetadata.NewClient(apiCaller) found, err := client.List( stream, region, - []string{series}, []string{arch}, - virtualType, rootStorageType, + []string{testSeries}, []string{arch}, + virtType, rootStorageType, ) c.Check(err, jc.ErrorIsNil) @@ -80,9 +89,10 @@ ImageId: imageId, Stream: stream, Region: region, - Series: series, + Version: version, + Series: testSeries, Arch: arch, - VirtualType: virtualType, + VirtType: virtType, RootStorageType: rootStorageType, RootStorageSize: &rootStorageSize, Source: source, @@ -118,11 +128,188 @@ m := params.CloudImageMetadata{} called := false + apiCaller := testing.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + called = true + c.Check(objType, gc.Equals, "ImageMetadata") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "Save") + + c.Assert(a, gc.FitsTypeOf, params.MetadataSaveParams{}) + args := a.(params.MetadataSaveParams) + c.Assert(args.Metadata, gc.HasLen, 1) + c.Assert(args.Metadata, jc.DeepEquals, []params.CloudImageMetadataList{ + {[]params.CloudImageMetadata{m, m}}, + }) + + c.Assert(result, gc.FitsTypeOf, ¶ms.ErrorResults{}) + *(result.(*params.ErrorResults)) = params.ErrorResults{ + Results: []params.ErrorResult{{}}, + } + + return nil + }) + + client := imagemetadata.NewClient(apiCaller) + err := client.Save([]params.CloudImageMetadata{m, m}) + c.Check(err, jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) +} + +func (s *imagemetadataSuite) TestSaveFacadeCallError(c *gc.C) { + m := []params.CloudImageMetadata{{}} + msg := "facade failure" + apiCaller := testing.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + c.Check(objType, gc.Equals, "ImageMetadata") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "Save") + return errors.New(msg) + }) + client := imagemetadata.NewClient(apiCaller) + err := client.Save(m) + c.Assert(errors.Cause(err), gc.ErrorMatches, msg) +} + +func (s *imagemetadataSuite) TestSaveFacadeCallErrorResult(c *gc.C) { + m := []params.CloudImageMetadata{{}} + msg := "facade failure" + apiCaller := testing.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + c.Check(objType, gc.Equals, "ImageMetadata") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "Save") + *(result.(*params.ErrorResults)) = params.ErrorResults{ + Results: []params.ErrorResult{ + {Error: ¶ms.Error{Message: msg}}, + }, + } + return nil + }) + client := imagemetadata.NewClient(apiCaller) + err := client.Save(m) + c.Assert(errors.Cause(err), gc.ErrorMatches, msg) +} + +func (s *imagemetadataSuite) TestUpdateFromPublishedImages(c *gc.C) { + called := false + + apiCaller := testing.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + called = true + c.Check(objType, gc.Equals, "ImageMetadata") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "UpdateFromPublishedImages") + return nil + }) + + client := imagemetadata.NewClient(apiCaller) + err := client.UpdateFromPublishedImages() + c.Check(err, jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) +} + +func (s *imagemetadataSuite) TestUpdateFromPublishedImagesFacadeCallError(c *gc.C) { + called := false + msg := "facade failure" + apiCaller := testing.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + called = true + c.Check(objType, gc.Equals, "ImageMetadata") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "UpdateFromPublishedImages") + return errors.New(msg) + }) + client := imagemetadata.NewClient(apiCaller) + err := client.UpdateFromPublishedImages() + c.Assert(errors.Cause(err), gc.ErrorMatches, msg) + c.Assert(called, jc.IsTrue) +} + +var versionFromSeries = func(s string) string { + // For testing purposes only, there will not be an error :D + v, _ := series.SeriesVersion(s) + return v +} + +func (s *imagemetadataSuite) TestDelete(c *gc.C) { + imageId := "tst12345" + called := false + + apiCaller := testing.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + called = true + c.Check(objType, gc.Equals, "ImageMetadata") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "Delete") + + c.Assert(a, gc.FitsTypeOf, params.MetadataImageIds{}) + c.Assert(a.(params.MetadataImageIds).Ids, gc.DeepEquals, []string{imageId}) + + results := result.(*params.ErrorResults) + results.Results = []params.ErrorResult{{}} + return nil + }) + + client := imagemetadata.NewClient(apiCaller) + err := client.Delete(imageId) + c.Check(err, jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) +} + +func (s *imagemetadataSuite) TestDeleteMultipleResult(c *gc.C) { + imageId := "tst12345" + called := false + + apiCaller := testing.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + called = true + c.Check(objType, gc.Equals, "ImageMetadata") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "Delete") + + results := result.(*params.ErrorResults) + results.Results = []params.ErrorResult{{}, {}} + return nil + }) + + client := imagemetadata.NewClient(apiCaller) + err := client.Delete(imageId) + c.Assert(err, gc.ErrorMatches, regexp.QuoteMeta(`expected to find one result for image id "tst12345" but found 2`)) + c.Assert(called, jc.IsTrue) +} + +func (s *imagemetadataSuite) TestDeleteFailure(c *gc.C) { + called := false msg := "save failure" - expected := []params.ErrorResult{ - params.ErrorResult{}, - params.ErrorResult{¶ms.Error{Message: msg}}, - } apiCaller := testing.APICallerFunc( func(objType string, @@ -133,29 +320,22 @@ called = true c.Check(objType, gc.Equals, "ImageMetadata") c.Check(id, gc.Equals, "") - c.Check(request, gc.Equals, "Save") - - args, ok := a.(params.MetadataSaveParams) - c.Assert(ok, jc.IsTrue) - c.Assert(args.Metadata, gc.HasLen, 2) - c.Assert(args.Metadata, gc.DeepEquals, []params.CloudImageMetadata{m, m}) - - if results, k := result.(*params.ErrorResults); k { - results.Results = expected + c.Check(request, gc.Equals, "Delete") + + results := result.(*params.ErrorResults) + results.Results = []params.ErrorResult{ + {¶ms.Error{Message: msg}}, } - return nil }) client := imagemetadata.NewClient(apiCaller) - errs, err := client.Save([]params.CloudImageMetadata{m, m}) - c.Check(err, jc.ErrorIsNil) + err := client.Delete("tst12345") + c.Assert(err, gc.ErrorMatches, msg) c.Assert(called, jc.IsTrue) - c.Assert(errs, jc.DeepEquals, expected) } -func (s *imagemetadataSuite) TestSaveFacadeCallError(c *gc.C) { - m := []params.CloudImageMetadata{{}} +func (s *imagemetadataSuite) TestDeleteFacadeCallError(c *gc.C) { called := false msg := "facade failure" apiCaller := testing.APICallerFunc( @@ -167,12 +347,11 @@ called = true c.Check(objType, gc.Equals, "ImageMetadata") c.Check(id, gc.Equals, "") - c.Check(request, gc.Equals, "Save") + c.Check(request, gc.Equals, "Delete") return errors.New(msg) }) client := imagemetadata.NewClient(apiCaller) - found, err := client.Save(m) - c.Assert(errors.Cause(err), gc.ErrorMatches, msg) - c.Assert(found, gc.HasLen, 0) + err := client.Delete("tst12345") + c.Assert(err, gc.ErrorMatches, msg) c.Assert(called, jc.IsTrue) } === modified file 'src/github.com/juju/juju/api/instancepoller/instancepoller.go' --- src/github.com/juju/juju/api/instancepoller/instancepoller.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/instancepoller/instancepoller.go 2016-03-22 15:18:22 +0000 @@ -9,15 +9,16 @@ "github.com/juju/juju/api/base" "github.com/juju/juju/api/common" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) const instancePollerFacade = "InstancePoller" // API provides access to the InstancePoller API facade. type API struct { - *common.EnvironWatcher + *common.ModelWatcher facade base.FacadeCaller } @@ -29,8 +30,8 @@ } facadeCaller := base.NewFacadeCaller(caller, instancePollerFacade) return &API{ - EnvironWatcher: common.NewEnvironWatcher(facadeCaller), - facade: facadeCaller, + ModelWatcher: common.NewModelWatcher(facadeCaller), + facade: facadeCaller, } } @@ -44,13 +45,13 @@ return &Machine{api.facade, tag, life}, nil } -var newStringsWatcher = watcher.NewStringsWatcher +var newStringsWatcher = apiwatcher.NewStringsWatcher -// WatchEnvironMachines return a StringsWatcher reporting waiting for the -// environment configuration to change. -func (api *API) WatchEnvironMachines() (watcher.StringsWatcher, error) { +// WatchModelMachines return a StringsWatcher reporting waiting for the +// model configuration to change. +func (api *API) WatchModelMachines() (watcher.StringsWatcher, error) { var result params.StringsWatchResult - err := api.facade.FacadeCall("WatchEnvironMachines", nil, &result) + err := api.facade.FacadeCall("WatchModelMachines", nil, &result) if err != nil { return nil, err } === modified file 'src/github.com/juju/juju/api/instancepoller/instancepoller_test.go' --- src/github.com/juju/juju/api/instancepoller/instancepoller_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/instancepoller/instancepoller_test.go 2016-03-22 15:18:22 +0000 @@ -13,10 +13,10 @@ "github.com/juju/juju/api/base" apitesting "github.com/juju/juju/api/base/testing" "github.com/juju/juju/api/instancepoller" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" apiservertesting "github.com/juju/juju/apiserver/testing" coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/watcher" ) type InstancePollerSuite struct { @@ -60,7 +60,7 @@ c.Assert(m.Id(), gc.Equals, "42") } -func (s *InstancePollerSuite) TestWatchEnvironMachinesSuccess(c *gc.C) { +func (s *InstancePollerSuite) TestWatchModelMachinesSuccess(c *gc.C) { // We're not testing the watcher logic here as it's already tested elsewhere. var numFacadeCalls int var numWatcherCalls int @@ -76,87 +76,87 @@ } s.PatchValue(instancepoller.NewStringsWatcher, watcherFunc) - apiCaller := successAPICaller(c, "WatchEnvironMachines", nil, expectResult, &numFacadeCalls) + apiCaller := successAPICaller(c, "WatchModelMachines", nil, expectResult, &numFacadeCalls) api := instancepoller.NewAPI(apiCaller) - w, err := api.WatchEnvironMachines() + w, err := api.WatchModelMachines() c.Assert(err, jc.ErrorIsNil) c.Assert(numFacadeCalls, gc.Equals, 1) c.Assert(numWatcherCalls, gc.Equals, 1) c.Assert(w, gc.IsNil) } -func (s *InstancePollerSuite) TestWatchEnvironMachinesClientError(c *gc.C) { +func (s *InstancePollerSuite) TestWatchModelMachinesClientError(c *gc.C) { var called int - apiCaller := clientErrorAPICaller(c, "WatchEnvironMachines", nil, &called) + apiCaller := clientErrorAPICaller(c, "WatchModelMachines", nil, &called) api := instancepoller.NewAPI(apiCaller) - w, err := api.WatchEnvironMachines() + w, err := api.WatchModelMachines() c.Assert(err, gc.ErrorMatches, "client error!") c.Assert(w, gc.IsNil) c.Assert(called, gc.Equals, 1) } -func (s *InstancePollerSuite) TestWatchEnvironMachinesServerError(c *gc.C) { +func (s *InstancePollerSuite) TestWatchModelMachinesServerError(c *gc.C) { var called int expectedResults := params.StringsWatchResult{ Error: apiservertesting.ServerError("server boom!"), } - apiCaller := successAPICaller(c, "WatchEnvironMachines", nil, expectedResults, &called) + apiCaller := successAPICaller(c, "WatchModelMachines", nil, expectedResults, &called) api := instancepoller.NewAPI(apiCaller) - w, err := api.WatchEnvironMachines() + w, err := api.WatchModelMachines() c.Assert(err, gc.ErrorMatches, "server boom!") c.Assert(called, gc.Equals, 1) c.Assert(w, gc.IsNil) } -func (s *InstancePollerSuite) TestWatchForEnvironConfigChangesClientError(c *gc.C) { +func (s *InstancePollerSuite) TestWatchForModelConfigChangesClientError(c *gc.C) { // We're not testing the success case as we're not patching the - // NewNotifyWatcher call the embedded EnvironWatcher is calling. + // NewNotifyWatcher call the embedded ModelWatcher is calling. var called int - apiCaller := clientErrorAPICaller(c, "WatchForEnvironConfigChanges", nil, &called) + apiCaller := clientErrorAPICaller(c, "WatchForModelConfigChanges", nil, &called) api := instancepoller.NewAPI(apiCaller) - w, err := api.WatchForEnvironConfigChanges() + w, err := api.WatchForModelConfigChanges() c.Assert(err, gc.ErrorMatches, "client error!") c.Assert(called, gc.Equals, 1) c.Assert(w, gc.IsNil) } -func (s *InstancePollerSuite) TestEnvironConfigSuccess(c *gc.C) { +func (s *InstancePollerSuite) TestModelConfigSuccess(c *gc.C) { var called int - expectedConfig := coretesting.EnvironConfig(c) - expectedResults := params.EnvironConfigResult{ - Config: params.EnvironConfig(expectedConfig.AllAttrs()), + expectedConfig := coretesting.ModelConfig(c) + expectedResults := params.ModelConfigResult{ + Config: params.ModelConfig(expectedConfig.AllAttrs()), } - apiCaller := successAPICaller(c, "EnvironConfig", nil, expectedResults, &called) + apiCaller := successAPICaller(c, "ModelConfig", nil, expectedResults, &called) api := instancepoller.NewAPI(apiCaller) - cfg, err := api.EnvironConfig() + cfg, err := api.ModelConfig() c.Assert(err, jc.ErrorIsNil) c.Assert(called, gc.Equals, 1) c.Assert(cfg, jc.DeepEquals, expectedConfig) } -func (s *InstancePollerSuite) TestEnvironConfigClientError(c *gc.C) { +func (s *InstancePollerSuite) TestModelConfigClientError(c *gc.C) { var called int - apiCaller := clientErrorAPICaller(c, "EnvironConfig", nil, &called) + apiCaller := clientErrorAPICaller(c, "ModelConfig", nil, &called) api := instancepoller.NewAPI(apiCaller) - cfg, err := api.EnvironConfig() + cfg, err := api.ModelConfig() c.Assert(err, gc.ErrorMatches, "client error!") c.Assert(cfg, gc.IsNil) c.Assert(called, gc.Equals, 1) } -func (s *InstancePollerSuite) TestEnvironConfigServerError(c *gc.C) { +func (s *InstancePollerSuite) TestModelConfigServerError(c *gc.C) { var called int - expectResults := params.EnvironConfigResult{ - Config: params.EnvironConfig{"type": "foo"}, + expectResults := params.ModelConfigResult{ + Config: params.ModelConfig{"type": "foo"}, } - apiCaller := successAPICaller(c, "EnvironConfig", nil, expectResults, &called) + apiCaller := successAPICaller(c, "ModelConfig", nil, expectResults, &called) api := instancepoller.NewAPI(apiCaller) - cfg, err := api.EnvironConfig() + cfg, err := api.ModelConfig() c.Assert(err, gc.NotNil) // the actual error doesn't matter c.Assert(called, gc.Equals, 1) c.Assert(cfg, gc.IsNil) === modified file 'src/github.com/juju/juju/api/interface.go' --- src/github.com/juju/juju/api/interface.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/api/interface.go 2016-03-22 15:18:22 +0000 @@ -4,30 +4,25 @@ package api import ( - "io" - "net/http" "time" "github.com/juju/names" + "gopkg.in/macaroon-bakery.v1/httpbakery" "github.com/juju/juju/api/addresser" "github.com/juju/juju/api/agent" + "github.com/juju/juju/api/base" "github.com/juju/juju/api/charmrevisionupdater" "github.com/juju/juju/api/cleaner" "github.com/juju/juju/api/deployer" - "github.com/juju/juju/api/diskmanager" - "github.com/juju/juju/api/environment" + "github.com/juju/juju/api/discoverspaces" "github.com/juju/juju/api/firewaller" + "github.com/juju/juju/api/imagemetadata" "github.com/juju/juju/api/instancepoller" - "github.com/juju/juju/api/keyupdater" - apilogger "github.com/juju/juju/api/logger" "github.com/juju/juju/api/machiner" - "github.com/juju/juju/api/networker" "github.com/juju/juju/api/provisioner" "github.com/juju/juju/api/reboot" - "github.com/juju/juju/api/resumer" - "github.com/juju/juju/api/rsyslog" - "github.com/juju/juju/api/storageprovisioner" + "github.com/juju/juju/api/unitassigner" "github.com/juju/juju/api/uniter" "github.com/juju/juju/api/upgrader" "github.com/juju/juju/network" @@ -41,20 +36,24 @@ // This block of fields is sufficient to connect: - // Addrs holds the addresses of the state servers. + // Addrs holds the addresses of the controllers. Addrs []string // CACert holds the CA certificate that will be used - // to validate the state server's certificate, in PEM format. + // to validate the controller's certificate, in PEM format. CACert string - // EnvironTag holds the environ tag for the environment we are + // ModelTag holds the model tag for the model we are // trying to connect to. - EnvironTag names.EnvironTag + ModelTag names.ModelTag // ...but this block of fields is all about the authentication mechanism // to use after connecting -- if any -- and should probably be extracted. + // UseMacaroons, when true, enables macaroon-based login and ignores + // the provided username and password. + UseMacaroons bool `yaml:"use-macaroons,omitempty"` + // Tag holds the name of the entity that is connecting. // If this is nil, and the password is empty, no login attempt will be made. // (this is to allow tests to access the API to check that operations @@ -70,23 +69,36 @@ } // DialOpts holds configuration parameters that control the -// Dialing behavior when connecting to a state server. +// Dialing behavior when connecting to a controller. type DialOpts struct { // DialAddressInterval is the amount of time to wait // before starting to dial another address. DialAddressInterval time.Duration // Timeout is the amount of time to wait contacting - // a state server. + // a controller. Timeout time.Duration // RetryDelay is the amount of time to wait between // unsucssful connection attempts. RetryDelay time.Duration + + // BakeryClient is the httpbakery Client, which + // is used to do the macaroon-based authorization. + // This and the *http.Client inside it are copied + // by Open, and any RoundTripper field + // the HTTP client is ignored. + BakeryClient *httpbakery.Client + + // InsecureSkipVerify skips TLS certificate verification + // when connecting to the controller. This should only + // be used in tests, or when verification cannot be + // performed and the communication need not be secure. + InsecureSkipVerify bool } // DefaultDialOpts returns a DialOpts representing the default -// parameters for contacting a state server. +// parameters for contacting a controller. func DefaultDialOpts() DialOpts { return DialOpts{ DialAddressInterval: 50 * time.Millisecond, @@ -101,6 +113,8 @@ // Connection exists purely to make api-opening funcs mockable. It's just a // dumb copy of all the methods on api.Connection; we can and should be extracting // smaller and more relevant interfaces (and dropping some of them too). + +// Connection represents a connection to a Juju API server. type Connection interface { // This first block of methods is pretty close to a sane Connection interface. @@ -111,22 +125,18 @@ // These are a bit off -- ServerVersion is apparently not known until after // Login()? Maybe evidence of need for a separate AuthenticatedConnection..? - Login(name, password, nonce string) error + Login(name names.Tag, password, nonce string) error ServerVersion() (version.Number, bool) - // These are either part of base.APICaller or look like they probably should - // be (ServerTag in particular). It's fine and good for Connection to be an - // APICaller. - APICall(facade string, version int, id, method string, args, response interface{}) error - BestFacadeVersion(string) int - EnvironTag() (names.EnvironTag, error) - ServerTag() (names.EnvironTag, error) + // APICaller provides the facility to make API calls directly. + // This should not be used outside the api/* packages or tests. + base.APICaller - // These HTTP methods should probably be separated out somehow. - NewHTTPClient() *http.Client - NewHTTPRequest(method, path string) (*http.Request, error) - SendHTTPRequest(path string, args interface{}) (*http.Request, *http.Response, error) - SendHTTPRequestReader(path string, attached io.Reader, meta interface{}, name string) (*http.Request, *http.Response, error) + // ControllerTag returns the model tag of the controller + // (as opposed to the model tag of the currently connected + // model inside that controller). + // This could be defined on base.APICaller. + ControllerTag() (names.ModelTag, error) // All the rest are strange and questionable and deserve extra attention // and/or discussion. @@ -152,23 +162,18 @@ // prohibitively ugly to do so. Client() *Client Machiner() *machiner.State - Resumer() *resumer.API - Networker() networker.State Provisioner() *provisioner.State Uniter() (*uniter.State, error) - DiskManager() (*diskmanager.State, error) - StorageProvisioner(scope names.Tag) *storageprovisioner.State Firewaller() *firewaller.State Agent() *agent.State Upgrader() *upgrader.State Reboot() (reboot.State, error) Deployer() *deployer.State - Environment() *environment.Facade - Logger() *apilogger.State - KeyUpdater() *keyupdater.State Addresser() *addresser.API + DiscoverSpaces() *discoverspaces.API InstancePoller() *instancepoller.API CharmRevisionUpdater() *charmrevisionupdater.State Cleaner() *cleaner.API - Rsyslog() *rsyslog.State + MetadataUpdater() *imagemetadata.Client + UnitAssigner() unitassigner.API } === modified file 'src/github.com/juju/juju/api/keymanager/client.go' --- src/github.com/juju/juju/api/keymanager/client.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/keymanager/client.go 2016-03-22 15:18:22 +0000 @@ -4,9 +4,10 @@ package keymanager import ( + "github.com/juju/utils/ssh" + "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/utils/ssh" ) // Client provides access to the keymanager, used to add/delete/list authorised ssh keys. === modified file 'src/github.com/juju/juju/api/keymanager/client_test.go' --- src/github.com/juju/juju/api/keymanager/client_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/keymanager/client_test.go 2016-03-22 15:18:22 +0000 @@ -6,7 +6,10 @@ import ( "strings" + "github.com/juju/errors" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/ssh" + sshtesting "github.com/juju/utils/ssh/testing" gc "gopkg.in/check.v1" "github.com/juju/juju/api/keymanager" @@ -14,9 +17,8 @@ keymanagertesting "github.com/juju/juju/apiserver/keymanager/testing" "github.com/juju/juju/apiserver/params" jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/rpc" "github.com/juju/juju/state" - "github.com/juju/juju/utils/ssh" - sshtesting "github.com/juju/juju/utils/ssh/testing" ) type keymanagerSuite struct { @@ -35,7 +37,7 @@ } func (s *keymanagerSuite) setAuthorisedKeys(c *gc.C, keys string) { - err := s.BackingState.UpdateEnvironConfig(map[string]interface{}{"authorized-keys": keys}, nil, nil) + err := s.BackingState.UpdateModelConfig(map[string]interface{}{"authorized-keys": keys}, nil, nil) c.Assert(err, jc.ErrorIsNil) } @@ -69,8 +71,8 @@ } } -func (s *keymanagerSuite) assertEnvironKeys(c *gc.C, expected []string) { - envConfig, err := s.State.EnvironConfig() +func (s *keymanagerSuite) assertModelKeys(c *gc.C, expected []string) { + envConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) keys := envConfig.AuthorizedKeys() c.Assert(keys, gc.Equals, strings.Join(expected, "\n")) @@ -88,14 +90,14 @@ {Error: nil}, {Error: clientError("invalid ssh key: invalid")}, }) - s.assertEnvironKeys(c, append([]string{key1}, newKeys[:2]...)) + s.assertModelKeys(c, append([]string{key1}, newKeys[:2]...)) } func (s *keymanagerSuite) TestAddSystemKey(c *gc.C) { key1 := sshtesting.ValidKeyOne.Key + " user@host" s.setAuthorisedKeys(c, key1) - apiState, _ := s.OpenAPIAsNewMachine(c, state.JobManageEnviron) + apiState, _ := s.OpenAPIAsNewMachine(c, state.JobManageModel) keyManager := keymanager.NewClient(apiState) defer keyManager.Close() newKey := sshtesting.ValidKeyTwo.Key @@ -104,20 +106,23 @@ c.Assert(errResults, gc.DeepEquals, []params.ErrorResult{ {Error: nil}, }) - s.assertEnvironKeys(c, []string{key1, newKey}) + s.assertModelKeys(c, []string{key1, newKey}) } func (s *keymanagerSuite) TestAddSystemKeyWrongUser(c *gc.C) { key1 := sshtesting.ValidKeyOne.Key + " user@host" s.setAuthorisedKeys(c, key1) - apiState, _ := s.OpenAPIAsNewMachine(c, state.JobManageEnviron) + apiState, _ := s.OpenAPIAsNewMachine(c, state.JobManageModel) keyManager := keymanager.NewClient(apiState) defer keyManager.Close() newKey := sshtesting.ValidKeyTwo.Key _, err := keyManager.AddKeys("some-user", newKey) - c.Assert(err, gc.ErrorMatches, "permission denied") - s.assertEnvironKeys(c, []string{key1}) + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: "permission denied", + Code: "unauthorized access", + }) + s.assertModelKeys(c, []string{key1}) } func (s *keymanagerSuite) TestDeleteKeys(c *gc.C) { @@ -134,7 +139,7 @@ {Error: nil}, {Error: clientError("invalid ssh key: missing")}, }) - s.assertEnvironKeys(c, []string{"invalid", key3}) + s.assertModelKeys(c, []string{"invalid", key3}) } func (s *keymanagerSuite) TestImportKeys(c *gc.C) { @@ -150,7 +155,7 @@ {Error: nil}, {Error: clientError("invalid ssh key id: invalid-key")}, }) - s.assertEnvironKeys(c, []string{key1, sshtesting.ValidKeyThree.Key}) + s.assertModelKeys(c, []string{key1, sshtesting.ValidKeyThree.Key}) } func (s *keymanagerSuite) assertInvalidUserOperation(c *gc.C, test func(user string, keys []string) error) { @@ -162,8 +167,8 @@ err := test("invalid", keys) c.Assert(err, gc.ErrorMatches, `permission denied`) - // No environ changes. - s.assertEnvironKeys(c, []string{key1}) + // No model changes. + s.assertModelKeys(c, []string{key1}) } func (s *keymanagerSuite) TestAddKeysInvalidUser(c *gc.C) { @@ -191,5 +196,5 @@ } func (s *keymanagerSuite) TestExposesBestAPIVersion(c *gc.C) { - c.Check(s.keymanager.BestAPIVersion(), gc.Equals, 0) + c.Check(s.keymanager.BestAPIVersion(), gc.Equals, 1) } === modified file 'src/github.com/juju/juju/api/keyupdater/authorisedkeys.go' --- src/github.com/juju/juju/api/keyupdater/authorisedkeys.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/keyupdater/authorisedkeys.go 2016-03-22 15:18:22 +0000 @@ -8,8 +8,9 @@ "github.com/juju/names" "github.com/juju/juju/api/base" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) // State provides access to a worker's view of the state. @@ -65,6 +66,6 @@ // TODO: Not directly tested return nil, result.Error } - w := watcher.NewNotifyWatcher(st.facade.RawAPICaller(), result) + w := apiwatcher.NewNotifyWatcher(st.facade.RawAPICaller(), result) return w, nil } === modified file 'src/github.com/juju/juju/api/keyupdater/authorisedkeys_test.go' --- src/github.com/juju/juju/api/keyupdater/authorisedkeys_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/keyupdater/authorisedkeys_test.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,7 @@ "github.com/juju/juju/api/keyupdater" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" - "github.com/juju/juju/state/testing" + "github.com/juju/juju/watcher/watchertest" ) type keyupdaterSuite struct { @@ -32,7 +32,7 @@ var stateAPI api.Connection stateAPI, s.rawMachine = s.OpenAPIAsNewMachine(c) c.Assert(stateAPI, gc.NotNil) - s.keyupdater = stateAPI.KeyUpdater() + s.keyupdater = keyupdater.NewState(stateAPI) c.Assert(s.keyupdater, gc.NotNil) } @@ -56,15 +56,16 @@ } func (s *keyupdaterSuite) setAuthorisedKeys(c *gc.C, keys string) { - err := s.BackingState.UpdateEnvironConfig(map[string]interface{}{"authorized-keys": keys}, nil, nil) + err := s.BackingState.UpdateModelConfig(map[string]interface{}{"authorized-keys": keys}, nil, nil) c.Assert(err, jc.ErrorIsNil) } func (s *keyupdaterSuite) TestWatchAuthorisedKeys(c *gc.C) { watcher, err := s.keyupdater.WatchAuthorisedKeys(s.rawMachine.Tag().(names.MachineTag)) c.Assert(err, jc.ErrorIsNil) - defer testing.AssertStop(c, watcher) - wc := testing.NewNotifyWatcherC(c, s.BackingState, watcher) + wc := watchertest.NewNotifyWatcherC(c, watcher, s.BackingState.StartSync) + defer wc.AssertStops() + // Initial event wc.AssertOneChange() @@ -77,6 +78,4 @@ s.setAuthorisedKeys(c, "key1\nkey2\nkey3") wc.AssertOneChange() - testing.AssertStop(c, watcher) - wc.AssertClosed() } === modified file 'src/github.com/juju/juju/api/leadership/client.go' --- src/github.com/juju/juju/api/leadership/client.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/leadership/client.go 2016-03-22 15:18:22 +0000 @@ -16,7 +16,7 @@ "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/leadership" + "github.com/juju/juju/core/leadership" ) var logger = loggo.GetLogger("juju.api.leadership") === modified file 'src/github.com/juju/juju/api/leadership/client_test.go' --- src/github.com/juju/juju/api/leadership/client_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/leadership/client_test.go 2016-03-22 15:18:22 +0000 @@ -16,7 +16,7 @@ apitesting "github.com/juju/juju/api/base/testing" "github.com/juju/juju/api/leadership" "github.com/juju/juju/apiserver/params" - coreleadership "github.com/juju/juju/leadership" + coreleadership "github.com/juju/juju/core/leadership" ) /* === modified file 'src/github.com/juju/juju/api/logger/logger.go' --- src/github.com/juju/juju/api/logger/logger.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/logger/logger.go 2016-03-22 15:18:22 +0000 @@ -9,8 +9,9 @@ "github.com/juju/names" "github.com/juju/juju/api/base" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) // State provides access to an logger worker's view of the state. @@ -68,6 +69,6 @@ // TODO: Not directly tested return nil, result.Error } - w := watcher.NewNotifyWatcher(st.facade.RawAPICaller(), result) + w := apiwatcher.NewNotifyWatcher(st.facade.RawAPICaller(), result) return w, nil } === modified file 'src/github.com/juju/juju/api/logger/logger_test.go' --- src/github.com/juju/juju/api/logger/logger_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/logger/logger_test.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,7 @@ "github.com/juju/juju/api/logger" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" - "github.com/juju/juju/state/testing" + "github.com/juju/juju/watcher/watchertest" ) type loggerSuite struct { @@ -35,7 +35,7 @@ var stateAPI api.Connection stateAPI, s.rawMachine = s.OpenAPIAsNewMachine(c) // Create the logger facade. - s.logger = stateAPI.Logger() + s.logger = logger.NewState(stateAPI) c.Assert(s.logger, gc.NotNil) } @@ -52,15 +52,16 @@ } func (s *loggerSuite) setLoggingConfig(c *gc.C, loggingConfig string) { - err := s.BackingState.UpdateEnvironConfig(map[string]interface{}{"logging-config": loggingConfig}, nil, nil) + err := s.BackingState.UpdateModelConfig(map[string]interface{}{"logging-config": loggingConfig}, nil, nil) c.Assert(err, jc.ErrorIsNil) } func (s *loggerSuite) TestWatchLoggingConfig(c *gc.C) { watcher, err := s.logger.WatchLoggingConfig(s.rawMachine.Tag()) c.Assert(err, jc.ErrorIsNil) - defer testing.AssertStop(c, watcher) - wc := testing.NewNotifyWatcherC(c, s.BackingState, watcher) + wc := watchertest.NewNotifyWatcherC(c, watcher, s.BackingState.StartSync) + defer wc.AssertStops() + // Initial event wc.AssertOneChange() @@ -75,6 +76,4 @@ loggingConfig = loggingConfig + ";wibble=DEBUG" s.setLoggingConfig(c, loggingConfig) wc.AssertOneChange() - testing.AssertStop(c, watcher) - wc.AssertClosed() } === added directory 'src/github.com/juju/juju/api/logsender' === added file 'src/github.com/juju/juju/api/logsender/logsender.go' --- src/github.com/juju/juju/api/logsender/logsender.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/logsender/logsender.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,63 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package logsender implements the API for storing log +// messages on the API server. +package logsender + +import ( + "io" + + "github.com/juju/errors" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/apiserver/params" +) + +// LogWriter is the interface that allows sending log +// messages to the server for storage. +type LogWriter interface { + // WriteLog writes the given log record. + WriteLog(*params.LogRecord) error + + io.Closer +} + +// API provides access to the LogSender API. +type API struct { + connector base.StreamConnector +} + +// NewAPI creates a new client-side logsender API. +func NewAPI(connector base.StreamConnector) *API { + return &API{connector: connector} +} + +// LogWriter returns a new log writer interface value +// which must be closed when finished with. +func (api *API) LogWriter() (LogWriter, error) { + conn, err := api.connector.ConnectStream("/logsink", nil) + if err != nil { + return nil, errors.Annotatef(err, "cannot connect to /logsink") + } + return writer{conn}, nil +} + +type writer struct { + conn base.Stream +} + +func (w writer) WriteLog(m *params.LogRecord) error { + // Note: due to the fire-and-forget nature of the + // logsink API, it is possible that when the + // connection dies, any logs that were "in-flight" + // will not be recorded on the server side. + if err := w.conn.WriteJSON(m); err != nil { + return errors.Annotatef(err, "cannot send log message") + } + return nil +} + +func (w writer) Close() error { + return w.conn.Close() +} === added file 'src/github.com/juju/juju/api/logsender/logsender_test.go' --- src/github.com/juju/juju/api/logsender/logsender_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/logsender/logsender_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,118 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package logsender_test + +import ( + "errors" + "net/url" + + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/logsender" + "github.com/juju/juju/apiserver/params" + coretesting "github.com/juju/juju/testing" +) + +type LogSenderSuite struct { + coretesting.BaseSuite +} + +var _ = gc.Suite(&LogSenderSuite{}) + +func (s *LogSenderSuite) TestNewAPI(c *gc.C) { + conn := &mockConnector{ + c: c, + } + a := logsender.NewAPI(conn) + w, err := a.LogWriter() + c.Assert(err, gc.IsNil) + + msg := new(params.LogRecord) + err = w.WriteLog(msg) + c.Assert(err, gc.IsNil) + + c.Assert(conn.written, gc.HasLen, 1) + c.Assert(conn.written[0], gc.Equals, msg) + + err = w.Close() + c.Assert(err, gc.IsNil) + c.Assert(conn.closeCount, gc.Equals, 1) +} + +func (s *LogSenderSuite) TestNewAPIWriteLogError(c *gc.C) { + conn := &mockConnector{ + c: c, + connectError: errors.New("foo"), + } + a := logsender.NewAPI(conn) + w, err := a.LogWriter() + c.Assert(err, gc.ErrorMatches, "cannot connect to /logsink: foo") + c.Assert(w, gc.Equals, nil) +} + +func (s *LogSenderSuite) TestNewAPIWriteError(c *gc.C) { + conn := &mockConnector{ + c: c, + writeError: errors.New("foo"), + } + a := logsender.NewAPI(conn) + w, err := a.LogWriter() + c.Assert(err, gc.IsNil) + + err = w.WriteLog(new(params.LogRecord)) + c.Assert(err, gc.ErrorMatches, "cannot send log message: foo") + c.Assert(conn.written, gc.HasLen, 0) +} + +type mockConnector struct { + c *gc.C + + connectError error + writeError error + written []interface{} + + closeCount int +} + +func (c *mockConnector) ConnectStream(path string, values url.Values) (base.Stream, error) { + c.c.Assert(path, gc.Equals, "/logsink") + c.c.Assert(values, gc.HasLen, 0) + if c.connectError != nil { + return nil, c.connectError + } + return mockStream{c}, nil +} + +type mockStream struct { + conn *mockConnector +} + +func (s mockStream) WriteJSON(v interface{}) error { + if s.conn.writeError != nil { + return s.conn.writeError + } + s.conn.written = append(s.conn.written, v) + return nil +} + +func (s mockStream) ReadJSON(v interface{}) error { + s.conn.c.Errorf("ReadJSON called unexpectedly") + return nil +} + +func (s mockStream) Read([]byte) (int, error) { + s.conn.c.Errorf("Read called unexpectedly") + return 0, nil +} + +func (s mockStream) Write([]byte) (int, error) { + s.conn.c.Errorf("Write called unexpectedly") + return 0, nil +} + +func (s mockStream) Close() error { + s.conn.closeCount++ + return nil +} === added file 'src/github.com/juju/juju/api/logsender/package_test.go' --- src/github.com/juju/juju/api/logsender/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/logsender/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package logsender_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === modified file 'src/github.com/juju/juju/api/machiner/machine.go' --- src/github.com/juju/juju/api/machiner/machine.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/machiner/machine.go 2016-03-22 15:18:22 +0000 @@ -8,9 +8,9 @@ "github.com/juju/names" "github.com/juju/juju/api/common" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/network" + "github.com/juju/juju/watcher" ) // Machine represents a juju machine as seen by a machiner worker. === modified file 'src/github.com/juju/juju/api/machiner/machiner_test.go' --- src/github.com/juju/juju/api/machiner/machiner_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/machiner/machiner_test.go 2016-03-22 15:18:22 +0000 @@ -18,8 +18,8 @@ "github.com/juju/juju/juju/testing" "github.com/juju/juju/network" "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/watcher/watchertest" ) func TestAll(t *stdtesting.T) { @@ -40,7 +40,7 @@ func (s *machinerSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) - m, err := s.State.AddMachine("quantal", state.JobManageEnviron) + m, err := s.State.AddMachine("quantal", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) err = m.SetProviderAddresses(network.NewAddress("10.0.0.1")) c.Assert(err, jc.ErrorIsNil) @@ -184,8 +184,8 @@ w, err := machine.Watch() c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, w) - wc := statetesting.NewNotifyWatcherC(c, s.BackingState, w) + wc := watchertest.NewNotifyWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertOneChange() @@ -200,7 +200,4 @@ err = machine.EnsureDead() c.Assert(err, jc.ErrorIsNil) wc.AssertOneChange() - - statetesting.AssertStop(c, w) - wc.AssertClosed() } === added directory 'src/github.com/juju/juju/api/meterstatus' === added file 'src/github.com/juju/juju/api/meterstatus/client.go' --- src/github.com/juju/juju/api/meterstatus/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/meterstatus/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,83 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package meterstatus contains an implementation of the api facade to +// watch the meter status of a unit for changes and return the current meter status. +package meterstatus + +import ( + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/api/base" + apiwatcher "github.com/juju/juju/api/watcher" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" +) + +// MeterStatusClient defines the methods on the MeterStatus API end point. +type MeterStatusClient interface { + // MeterStatus returns the meter status and additional information for the + // API client. + MeterStatus() (string, string, error) + // WatchMeterStatus returns a watcher for observing changes to the unit's meter + // status. + WatchMeterStatus() (watcher.NotifyWatcher, error) +} + +// NewClient creates a new client for accessing the MeterStatus API. +func NewClient(caller base.APICaller, tag names.UnitTag) MeterStatusClient { + return &Client{ + facade: base.NewFacadeCaller(caller, "MeterStatus"), + tag: tag, + } +} + +var _ MeterStatusClient = (*Client)(nil) + +// Client provides access to the meter status API. +type Client struct { + facade base.FacadeCaller + tag names.UnitTag +} + +// MeterStatus is part of the MeterStatusClient interface. +func (c *Client) MeterStatus() (statusCode, statusInfo string, rErr error) { + var results params.MeterStatusResults + args := params.Entities{ + Entities: []params.Entity{{Tag: c.tag.String()}}, + } + err := c.facade.FacadeCall("GetMeterStatus", args, &results) + if err != nil { + return "", "", errors.Trace(err) + } + if len(results.Results) != 1 { + return "", "", errors.Errorf("expected 1 result, got %d", len(results.Results)) + } + result := results.Results[0] + if result.Error != nil { + return "", "", errors.Trace(result.Error) + } + return result.Code, result.Info, nil +} + +// WatchMeterStatus is part of the MeterStatusClient interface. +func (c *Client) WatchMeterStatus() (watcher.NotifyWatcher, error) { + var results params.NotifyWatchResults + args := params.Entities{ + Entities: []params.Entity{{Tag: c.tag.String()}}, + } + err := c.facade.FacadeCall("WatchMeterStatus", args, &results) + if err != nil { + return nil, err + } + if len(results.Results) != 1 { + return nil, errors.Errorf("expected 1 result, got %d", len(results.Results)) + } + result := results.Results[0] + if result.Error != nil { + return nil, result.Error + } + w := apiwatcher.NewNotifyWatcher(c.facade.RawAPICaller(), result) + return w, nil +} === added file 'src/github.com/juju/juju/api/meterstatus/client_test.go' --- src/github.com/juju/juju/api/meterstatus/client_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/meterstatus/client_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,221 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus_test + +import ( + "fmt" + + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/base/testing" + "github.com/juju/juju/api/meterstatus" + "github.com/juju/juju/apiserver/params" + coretesting "github.com/juju/juju/testing" +) + +type meterStatusSuite struct { + coretesting.BaseSuite +} + +var _ = gc.Suite(&meterStatusSuite{}) + +func (s *meterStatusSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) +} + +func (s *meterStatusSuite) TestGetMeterStatus(c *gc.C) { + tag := names.NewUnitTag("wp/1") + var called bool + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, response interface{}) error { + c.Check(objType, gc.Equals, "MeterStatus") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "GetMeterStatus") + c.Check(arg, gc.DeepEquals, params.Entities{ + Entities: []params.Entity{{Tag: tag.String()}}, + }) + c.Assert(response, gc.FitsTypeOf, ¶ms.MeterStatusResults{}) + result := response.(*params.MeterStatusResults) + result.Results = []params.MeterStatusResult{{ + Code: "GREEN", + Info: "All ok.", + }} + called = true + return nil + }) + status := meterstatus.NewClient(apiCaller, tag) + c.Assert(status, gc.NotNil) + + statusCode, statusInfo, err := status.MeterStatus() + c.Assert(called, jc.IsTrue) + c.Assert(err, jc.ErrorIsNil) + c.Assert(statusCode, gc.Equals, "GREEN") + c.Assert(statusInfo, gc.Equals, "All ok.") +} + +func (s *meterStatusSuite) TestGetMeterStatusResultError(c *gc.C) { + tag := names.NewUnitTag("wp/1") + var called bool + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, response interface{}) error { + c.Check(objType, gc.Equals, "MeterStatus") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "GetMeterStatus") + c.Check(arg, gc.DeepEquals, params.Entities{ + Entities: []params.Entity{{Tag: tag.String()}}, + }) + c.Assert(response, gc.FitsTypeOf, ¶ms.MeterStatusResults{}) + result := response.(*params.MeterStatusResults) + result.Results = []params.MeterStatusResult{{ + Error: ¶ms.Error{ + Message: "An error in the meter status.", + Code: params.CodeNotAssigned, + }, + }} + called = true + return nil + }) + status := meterstatus.NewClient(apiCaller, tag) + c.Assert(status, gc.NotNil) + + statusCode, statusInfo, err := status.MeterStatus() + c.Assert(called, jc.IsTrue) + c.Assert(err, gc.ErrorMatches, "An error in the meter status.") + c.Assert(statusCode, gc.Equals, "") + c.Assert(statusInfo, gc.Equals, "") +} + +func (s *meterStatusSuite) TestGetMeterStatusReturnsError(c *gc.C) { + tag := names.NewUnitTag("wp/1") + var called bool + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, response interface{}) error { + c.Check(objType, gc.Equals, "MeterStatus") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "GetMeterStatus") + c.Check(arg, gc.DeepEquals, params.Entities{ + Entities: []params.Entity{{Tag: tag.String()}}, + }) + c.Assert(response, gc.FitsTypeOf, ¶ms.MeterStatusResults{}) + called = true + return fmt.Errorf("could not retrieve meter status") + }) + status := meterstatus.NewClient(apiCaller, tag) + c.Assert(status, gc.NotNil) + + statusCode, statusInfo, err := status.MeterStatus() + c.Assert(called, jc.IsTrue) + c.Assert(err, gc.ErrorMatches, "could not retrieve meter status") + c.Assert(statusCode, gc.Equals, "") + c.Assert(statusInfo, gc.Equals, "") +} + +func (s *meterStatusSuite) TestGetMeterStatusMoreResults(c *gc.C) { + tag := names.NewUnitTag("wp/1") + var called bool + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, response interface{}) error { + c.Check(objType, gc.Equals, "MeterStatus") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "GetMeterStatus") + c.Check(arg, gc.DeepEquals, params.Entities{ + Entities: []params.Entity{{Tag: tag.String()}}, + }) + c.Assert(response, gc.FitsTypeOf, ¶ms.MeterStatusResults{}) + result := response.(*params.MeterStatusResults) + result.Results = make([]params.MeterStatusResult, 2) + called = true + return nil + }) + status := meterstatus.NewClient(apiCaller, tag) + c.Assert(status, gc.NotNil) + statusCode, statusInfo, err := status.MeterStatus() + c.Assert(called, jc.IsTrue) + c.Assert(err, gc.ErrorMatches, "expected 1 result, got 2") + c.Assert(statusCode, gc.Equals, "") + c.Assert(statusInfo, gc.Equals, "") +} + +func (s *meterStatusSuite) TestWatchMeterStatusError(c *gc.C) { + tag := names.NewUnitTag("wp/1") + var called bool + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, response interface{}) error { + c.Check(objType, gc.Equals, "MeterStatus") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "WatchMeterStatus") + c.Check(arg, gc.DeepEquals, params.Entities{ + Entities: []params.Entity{{Tag: tag.String()}}, + }) + c.Assert(response, gc.FitsTypeOf, ¶ms.NotifyWatchResults{}) + result := response.(*params.NotifyWatchResults) + result.Results = make([]params.NotifyWatchResult, 1) + called = true + return fmt.Errorf("could not retrieve meter status watcher") + }) + status := meterstatus.NewClient(apiCaller, tag) + c.Assert(status, gc.NotNil) + w, err := status.WatchMeterStatus() + c.Assert(called, jc.IsTrue) + c.Assert(err, gc.ErrorMatches, "could not retrieve meter status watcher") + c.Assert(w, gc.IsNil) +} + +func (s *meterStatusSuite) TestWatchMeterStatusMoreResults(c *gc.C) { + tag := names.NewUnitTag("wp/1") + var called bool + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, response interface{}) error { + c.Check(objType, gc.Equals, "MeterStatus") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "WatchMeterStatus") + c.Check(arg, gc.DeepEquals, params.Entities{ + Entities: []params.Entity{{Tag: tag.String()}}, + }) + c.Assert(response, gc.FitsTypeOf, ¶ms.NotifyWatchResults{}) + result := response.(*params.NotifyWatchResults) + result.Results = make([]params.NotifyWatchResult, 2) + called = true + return nil + }) + status := meterstatus.NewClient(apiCaller, tag) + c.Assert(status, gc.NotNil) + w, err := status.WatchMeterStatus() + c.Assert(called, jc.IsTrue) + c.Assert(err, gc.ErrorMatches, "expected 1 result, got 2") + c.Assert(w, gc.IsNil) +} + +func (s *meterStatusSuite) TestWatchMeterStatusResultError(c *gc.C) { + tag := names.NewUnitTag("wp/1") + var called bool + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, response interface{}) error { + c.Check(objType, gc.Equals, "MeterStatus") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "WatchMeterStatus") + c.Check(arg, gc.DeepEquals, params.Entities{ + Entities: []params.Entity{{Tag: tag.String()}}, + }) + c.Assert(response, gc.FitsTypeOf, ¶ms.NotifyWatchResults{}) + result := response.(*params.NotifyWatchResults) + result.Results = []params.NotifyWatchResult{{ + Error: ¶ms.Error{ + Message: "error", + Code: params.CodeNotAssigned, + }, + }} + + called = true + return nil + }) + status := meterstatus.NewClient(apiCaller, tag) + c.Assert(status, gc.NotNil) + w, err := status.WatchMeterStatus() + c.Assert(called, jc.IsTrue) + c.Assert(err, gc.ErrorMatches, "error") + c.Assert(w, gc.IsNil) +} === added file 'src/github.com/juju/juju/api/meterstatus/export_test.go' --- src/github.com/juju/juju/api/meterstatus/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/meterstatus/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,15 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus + +import ( + "github.com/juju/juju/api/base/testing" +) + +// PatchFacadeCall patches the State's facade such that +// FacadeCall method calls are diverted to the provided +// function. +func PatchFacadeCall(p testing.Patcher, client *Client, f func(request string, params, response interface{}) error) { + testing.PatchFacadeCall(p, &client.facade, f) +} === added file 'src/github.com/juju/juju/api/meterstatus/package_test.go' --- src/github.com/juju/juju/api/meterstatus/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/meterstatus/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus_test + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + testing.MgoTestPackage(t) +} === added directory 'src/github.com/juju/juju/api/metricsadder' === added file 'src/github.com/juju/juju/api/metricsadder/client.go' --- src/github.com/juju/juju/api/metricsadder/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/metricsadder/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,48 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package metricsadder contains an implementation of the api facade to +// add metrics to the state. +package metricsadder + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/apiserver/params" +) + +// MetricsAdderClient defines the methods on the metricadder API end point. +type MetricsAdderClient interface { + // AddMetricBatches stores specified metric batches in the state. + AddMetricBatches(batches []params.MetricBatchParam) (map[string]error, error) +} + +// NewClient creates a new client for accessing the metricsadder API. +func NewClient(caller base.APICaller) *Client { + return &Client{facade: base.NewFacadeCaller(caller, "MetricsAdder")} +} + +var _ MetricsAdderClient = (*Client)(nil) + +// Client provides access to the metrics adder API. +type Client struct { + facade base.FacadeCaller +} + +// AddMetricBatches implements the MetricsAdderClient interface. +func (c *Client) AddMetricBatches(batches []params.MetricBatchParam) (map[string]error, error) { + parameters := params.MetricBatchParams{ + Batches: batches, + } + results := new(params.ErrorResults) + err := c.facade.FacadeCall("AddMetricBatches", parameters, results) + if err != nil { + return nil, errors.Trace(err) + } + resultMap := make(map[string]error) + for i, result := range results.Results { + resultMap[batches[i].Batch.UUID] = result.Error + } + return resultMap, nil +} === added file 'src/github.com/juju/juju/api/metricsadder/client_test.go' --- src/github.com/juju/juju/api/metricsadder/client_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/metricsadder/client_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,157 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package metricsadder_test + +import ( + "time" + + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/metricsadder" + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/state" + factory "github.com/juju/juju/testing/factory" +) + +type metricsAdderSuite struct { + jujutesting.JujuConnSuite + + adder *metricsadder.Client +} + +var _ = gc.Suite(&metricsAdderSuite{}) + +func (s *metricsAdderSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + s.adder = metricsadder.NewClient(s.APIState) + c.Assert(s.adder, gc.NotNil) +} + +func (s *metricsAdderSuite) TestAddMetricBatches(c *gc.C) { + var called bool + var callParams params.MetricBatchParams + metricsadder.PatchFacadeCall(s, s.adder, func(request string, args, response interface{}) error { + p, ok := args.(params.MetricBatchParams) + c.Assert(ok, jc.IsTrue) + callParams = p + called = true + c.Assert(request, gc.Equals, "AddMetricBatches") + result := response.(*params.ErrorResults) + result.Results = make([]params.ErrorResult, 1) + return nil + }) + + batches := []params.MetricBatchParam{{ + Tag: names.NewUnitTag("test-unit/0").String(), + Batch: params.MetricBatch{ + UUID: utils.MustNewUUID().String(), + CharmURL: "test-charm-url", + Created: time.Now(), + Metrics: []params.Metric{{Key: "pings", Value: "5", Time: time.Now().UTC()}}, + }, + }} + + _, err := s.adder.AddMetricBatches(batches) + c.Assert(err, jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) + c.Assert(callParams.Batches, gc.DeepEquals, batches) +} + +func (s *metricsAdderSuite) TestAddMetricBatchesFails(c *gc.C) { + var called bool + metricsadder.PatchFacadeCall(s, s.adder, func(request string, args, response interface{}) error { + _, ok := args.(params.MetricBatchParams) + c.Assert(ok, jc.IsTrue) + called = true + c.Assert(request, gc.Equals, "AddMetricBatches") + result := response.(*params.ErrorResults) + result.Results = make([]params.ErrorResult, 1) + result.Results[0].Error = common.ServerError(common.ErrPerm) + return nil + }) + + batches := []params.MetricBatchParam{{ + Tag: names.NewUnitTag("test-unit/0").String(), + Batch: params.MetricBatch{ + UUID: utils.MustNewUUID().String(), + CharmURL: "test-charm-url", + Created: time.Now(), + Metrics: []params.Metric{{Key: "pings", Value: "5", Time: time.Now().UTC()}}, + }, + }} + + results, err := s.adder.AddMetricBatches(batches) + c.Assert(err, jc.ErrorIsNil) + result, ok := results[batches[0].Batch.UUID] + c.Assert(ok, jc.IsTrue) + c.Assert(result.Error(), gc.Equals, "permission denied") + c.Assert(called, jc.IsTrue) +} + +type metricsAdderIntegrationSuite struct { + jujutesting.JujuConnSuite + + adder *metricsadder.Client + unitTag names.Tag +} + +var _ = gc.Suite(&metricsAdderIntegrationSuite{}) + +func (s *metricsAdderIntegrationSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + f := factory.NewFactory(s.State) + machine0 := f.MakeMachine(c, &factory.MachineParams{ + Series: "quantal", + Jobs: []state.MachineJob{state.JobHostUnits}, + }) + + meteredCharm := f.MakeCharm(c, &factory.CharmParams{ + Name: "metered", + URL: "cs:quantal/metered", + }) + meteredService := f.MakeService(c, &factory.ServiceParams{ + Charm: meteredCharm, + }) + meteredUnit := f.MakeUnit(c, &factory.UnitParams{ + Service: meteredService, + SetCharmURL: true, + Machine: machine0, + }) + + state, _ := s.OpenAPIAsNewMachine(c) + s.adder = metricsadder.NewClient(state) + s.unitTag = meteredUnit.Tag() +} + +func (s *metricsAdderIntegrationSuite) TestAddMetricBatches(c *gc.C) { + batches := []params.MetricBatchParam{{ + Tag: s.unitTag.String(), + Batch: params.MetricBatch{ + UUID: utils.MustNewUUID().String(), + CharmURL: "cs:quantal/metered", + Created: time.Now(), + Metrics: []params.Metric{{Key: "pings", Value: "5", Time: time.Now().UTC()}}, + }, + }} + + results, err := s.adder.AddMetricBatches(batches) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, gc.HasLen, 1) + result, ok := results[batches[0].Batch.UUID] + c.Assert(ok, jc.IsTrue) + c.Assert(result, gc.IsNil) + + stateBatches, err := s.State.AllMetricBatches() + c.Assert(err, jc.ErrorIsNil) + c.Assert(stateBatches, gc.HasLen, 1) + c.Assert(stateBatches[0].CharmURL(), gc.Equals, batches[0].Batch.CharmURL) + c.Assert(stateBatches[0].UUID(), gc.Equals, batches[0].Batch.UUID) + c.Assert(stateBatches[0].ModelUUID(), gc.Equals, s.State.ModelUUID()) + c.Assert(stateBatches[0].Unit(), gc.Equals, s.unitTag.Id()) +} === added file 'src/github.com/juju/juju/api/metricsadder/export_test.go' --- src/github.com/juju/juju/api/metricsadder/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/metricsadder/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,15 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package metricsadder + +import ( + "github.com/juju/juju/api/base/testing" +) + +// PatchFacadeCall patches the State's facade such that +// FacadeCall method calls are diverted to the provided +// function. +func PatchFacadeCall(p testing.Patcher, client *Client, f func(request string, params, response interface{}) error) { + testing.PatchFacadeCall(p, &client.facade, f) +} === added file 'src/github.com/juju/juju/api/metricsadder/package_test.go' --- src/github.com/juju/juju/api/metricsadder/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/metricsadder/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package metricsadder_test + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + testing.MgoTestPackage(t) +} === added directory 'src/github.com/juju/juju/api/metricsdebug' === added file 'src/github.com/juju/juju/api/metricsdebug/client.go' --- src/github.com/juju/juju/api/metricsdebug/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/metricsdebug/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,81 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// The metricsdebug package contains the implementation of a client to +// access metrics debug functions within state. +package metricsdebug + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/api" + "github.com/juju/juju/api/base" + "github.com/juju/juju/apiserver/params" +) + +// Client provides access to the metric debug api +type Client struct { + base.ClientFacade + st api.Connection + facade base.FacadeCaller +} + +// MetricsDebugClient defines the methods on the metricsdebug API end point. +type MetricsDebugClient interface { + // GetMetrics will receive metrics collected by the given entity tag + GetMetrics(tag string) ([]params.MetricResult, error) +} + +// MeterStatusClient defines methods on the metricsdebug API end point. +type MeterStatusClient interface { + // SetMeterStatus will set the meter status on the given entity tag. + SetMeterStatus(tag, code, info string) error +} + +var _ MetricsDebugClient = (*Client)(nil) +var _ MeterStatusClient = (*Client)(nil) + +// NewClient creates a new client for accessing the metricsdebug api +func NewClient(st base.APICallCloser) *Client { + frontend, backend := base.NewClientFacade(st, "MetricsDebug") + return &Client{ClientFacade: frontend, facade: backend} +} + +// GetMetrics will receive metrics collected by the given entity +func (c *Client) GetMetrics(tag string) ([]params.MetricResult, error) { + p := params.Entities{Entities: []params.Entity{ + {tag}, + }} + results := new(params.MetricResults) + if err := c.facade.FacadeCall("GetMetrics", p, results); err != nil { + return nil, errors.Trace(err) + } + if err := results.OneError(); err != nil { + return nil, errors.Trace(err) + } + metrics := []params.MetricResult{} + for _, r := range results.Results { + metrics = append(metrics, r.Metrics...) + } + return metrics, nil +} + +// SetMeterStatus will set the meter status on the given entity tag. +func (c *Client) SetMeterStatus(tag, code, info string) error { + args := params.MeterStatusParams{ + Statuses: []params.MeterStatusParam{{ + Tag: tag, + Code: code, + Info: info, + }, + }, + } + results := new(params.ErrorResults) + if err := c.facade.FacadeCall("SetMeterStatus", args, results); err != nil { + return errors.Trace(err) + } + if err := results.OneError(); err != nil { + return errors.Trace(err) + } + return nil +} === added file 'src/github.com/juju/juju/api/metricsdebug/client_test.go' --- src/github.com/juju/juju/api/metricsdebug/client_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/metricsdebug/client_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,342 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package metricsdebug_test + +import ( + "errors" + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + basetesting "github.com/juju/juju/api/base/testing" + "github.com/juju/juju/api/metricsdebug" + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/state" + "github.com/juju/juju/testing" + "github.com/juju/juju/testing/factory" +) + +type metricsdebugSuiteMock struct { + testing.BaseSuite + manager *metricsdebug.Client +} + +var _ = gc.Suite(&metricsdebugSuiteMock{}) + +func (s *metricsdebugSuiteMock) TestGetMetrics(c *gc.C) { + var called bool + now := time.Now() + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + a, response interface{}, + ) error { + c.Assert(request, gc.Equals, "GetMetrics") + result := response.(*params.MetricResults) + result.Results = []params.EntityMetrics{{ + Metrics: []params.MetricResult{{ + Key: "pings", + Value: "5", + Time: now, + }}, + Error: nil, + }} + called = true + return nil + }) + client := metricsdebug.NewClient(apiCaller) + metrics, err := client.GetMetrics("unit-wordpress/0") + c.Assert(err, jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) + c.Assert(metrics, gc.HasLen, 1) + c.Assert(metrics[0].Key, gc.Equals, "pings") + c.Assert(metrics[0].Value, gc.Equals, "5") + c.Assert(metrics[0].Time, gc.Equals, now) +} + +func (s *metricsdebugSuiteMock) TestGetMetricsFails(c *gc.C) { + var called bool + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + a, response interface{}, + ) error { + c.Assert(request, gc.Equals, "GetMetrics") + result := response.(*params.MetricResults) + result.Results = []params.EntityMetrics{{ + Error: common.ServerError(errors.New("an error")), + }} + called = true + return nil + }) + client := metricsdebug.NewClient(apiCaller) + metrics, err := client.GetMetrics("unit-wordpress/0") + c.Assert(err, gc.ErrorMatches, "an error") + c.Assert(metrics, gc.IsNil) + c.Assert(called, jc.IsTrue) +} + +func (s *metricsdebugSuiteMock) TestGetMetricsFacadeCallError(c *gc.C) { + var called bool + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + called = true + return errors.New("an error") + }) + client := metricsdebug.NewClient(apiCaller) + metrics, err := client.GetMetrics("unit-wordpress/0") + c.Assert(err, gc.ErrorMatches, "an error") + c.Assert(metrics, gc.IsNil) + c.Assert(called, jc.IsTrue) +} + +func (s *metricsdebugSuiteMock) TestSetMeterStatus(c *gc.C) { + var called bool + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + a, response interface{}, + ) error { + c.Assert(request, gc.Equals, "SetMeterStatus") + c.Assert(a, gc.DeepEquals, params.MeterStatusParams{ + Statuses: []params.MeterStatusParam{{ + Tag: "unit-metered/0", + Code: "RED", + Info: "test"}, + }, + }) + result := response.(*params.ErrorResults) + result.Results = []params.ErrorResult{{ + Error: nil, + }} + called = true + return nil + }) + client := metricsdebug.NewClient(apiCaller) + err := client.SetMeterStatus("unit-metered/0", "RED", "test") + c.Assert(err, jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) +} + +func (s *metricsdebugSuiteMock) TestSetMeterStatusAPIServerError(c *gc.C) { + var called bool + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + a, response interface{}, + ) error { + c.Assert(request, gc.Equals, "SetMeterStatus") + c.Assert(a, gc.DeepEquals, params.MeterStatusParams{ + Statuses: []params.MeterStatusParam{{ + Tag: "unit-metered/0", + Code: "RED", + Info: "test"}, + }, + }) + result := response.(*params.ErrorResults) + result.Results = []params.ErrorResult{{ + Error: common.ServerError(errors.New("an error")), + }} + called = true + return nil + }) + client := metricsdebug.NewClient(apiCaller) + err := client.SetMeterStatus("unit-metered/0", "RED", "test") + c.Assert(err, gc.ErrorMatches, "an error") + c.Assert(called, jc.IsTrue) +} + +func (s *metricsdebugSuiteMock) TestSetMeterStatusFacadeCallError(c *gc.C) { + var called bool + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + a, response interface{}, + ) error { + called = true + return errors.New("an error") + }) + client := metricsdebug.NewClient(apiCaller) + err := client.SetMeterStatus("unit-metered/0", "RED", "test") + c.Assert(err, gc.ErrorMatches, "an error") + c.Assert(called, jc.IsTrue) +} + +type metricsdebugSuite struct { + jujutesting.JujuConnSuite + manager *metricsdebug.Client +} + +var _ = gc.Suite(&metricsdebugSuite{}) + +func (s *metricsdebugSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + s.manager = metricsdebug.NewClient(s.APIState) + c.Assert(s.manager, gc.NotNil) +} + +func assertSameMetric(c *gc.C, a params.MetricResult, b *state.MetricBatch) { + c.Assert(a.Key, gc.Equals, b.Metrics()[0].Key) + c.Assert(a.Value, gc.Equals, b.Metrics()[0].Value) + c.Assert(a.Time, jc.TimeBetween(b.Metrics()[0].Time, b.Metrics()[0].Time)) +} + +func (s *metricsdebugSuite) TestFeatureGetMetrics(c *gc.C) { + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + meteredService := s.Factory.MakeService(c, &factory.ServiceParams{Charm: meteredCharm}) + unit := s.Factory.MakeUnit(c, &factory.UnitParams{Service: meteredService, SetCharmURL: true}) + metric := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit}) + metrics, err := s.manager.GetMetrics("unit-metered/0") + c.Assert(err, jc.ErrorIsNil) + c.Assert(metrics, gc.HasLen, 1) + assertSameMetric(c, metrics[0], metric) +} + +func (s *metricsdebugSuite) TestFeatureGetMultipleMetrics(c *gc.C) { + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + meteredService := s.Factory.MakeService(c, &factory.ServiceParams{ + Charm: meteredCharm, + }) + unit0 := s.Factory.MakeUnit(c, &factory.UnitParams{Service: meteredService, SetCharmURL: true}) + unit1 := s.Factory.MakeUnit(c, &factory.UnitParams{Service: meteredService, SetCharmURL: true}) + + metricUnit0 := s.Factory.MakeMetric(c, &factory.MetricParams{ + Unit: unit0, + }) + metricUnit1 := s.Factory.MakeMetric(c, &factory.MetricParams{ + Unit: unit1, + }) + + metrics0, err := s.manager.GetMetrics("unit-metered/0") + c.Assert(err, jc.ErrorIsNil) + c.Assert(metrics0, gc.HasLen, 1) + assertSameMetric(c, metrics0[0], metricUnit0) + + metrics1, err := s.manager.GetMetrics("unit-metered/1") + c.Assert(err, jc.ErrorIsNil) + c.Assert(metrics1, gc.HasLen, 1) + assertSameMetric(c, metrics1[0], metricUnit1) +} + +func (s *metricsdebugSuite) TestFeatureGetMultipleMetricsWithService(c *gc.C) { + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + meteredService := s.Factory.MakeService(c, &factory.ServiceParams{ + Charm: meteredCharm, + }) + unit0 := s.Factory.MakeUnit(c, &factory.UnitParams{Service: meteredService, SetCharmURL: true}) + unit1 := s.Factory.MakeUnit(c, &factory.UnitParams{Service: meteredService, SetCharmURL: true}) + + metricUnit0 := s.Factory.MakeMetric(c, &factory.MetricParams{ + Unit: unit0, + }) + metricUnit1 := s.Factory.MakeMetric(c, &factory.MetricParams{ + Unit: unit1, + }) + + metrics, err := s.manager.GetMetrics("service-metered") + c.Assert(err, jc.ErrorIsNil) + c.Assert(metrics, gc.HasLen, 2) + assertSameMetric(c, metrics[0], metricUnit0) + assertSameMetric(c, metrics[1], metricUnit1) +} + +func (s *metricsdebugSuite) TestSetMeterStatus(c *gc.C) { + testCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + testService := s.Factory.MakeService(c, &factory.ServiceParams{Charm: testCharm}) + testUnit1 := s.Factory.MakeUnit(c, &factory.UnitParams{Service: testService, SetCharmURL: true}) + testUnit2 := s.Factory.MakeUnit(c, &factory.UnitParams{Service: testService, SetCharmURL: true}) + + csCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) + csService := s.Factory.MakeService(c, &factory.ServiceParams{Name: "cs-service", Charm: csCharm}) + csUnit1 := s.Factory.MakeUnit(c, &factory.UnitParams{Service: csService, SetCharmURL: true}) + + tests := []struct { + about string + tag string + code string + info string + err string + assert func(*gc.C) + }{{ + about: "set service meter status", + tag: testService.Tag().String(), + code: "RED", + info: "test", + assert: func(c *gc.C) { + ms1, err := testUnit1.GetMeterStatus() + c.Assert(err, jc.ErrorIsNil) + c.Assert(ms1, gc.DeepEquals, state.MeterStatus{ + Code: state.MeterRed, + Info: "test", + }) + ms2, err := testUnit2.GetMeterStatus() + c.Assert(err, jc.ErrorIsNil) + c.Assert(ms2, gc.DeepEquals, state.MeterStatus{ + Code: state.MeterRed, + Info: "test", + }) + }, + }, { + about: "set unit meter status", + tag: testUnit1.Tag().String(), + code: "AMBER", + info: "test", + assert: func(c *gc.C) { + ms1, err := testUnit1.GetMeterStatus() + c.Assert(err, jc.ErrorIsNil) + c.Assert(ms1, gc.DeepEquals, state.MeterStatus{ + Code: state.MeterAmber, + Info: "test", + }) + }, + }, { + about: "not a local charm - service", + tag: csService.Tag().String(), + code: "AMBER", + info: "test", + err: "not a local charm", + }, { + about: "not a local charm - unit", + tag: csUnit1.Tag().String(), + code: "AMBER", + info: "test", + err: "not a local charm", + }, { + about: "invalid meter status", + tag: testUnit1.Tag().String(), + code: "WRONG", + info: "test", + err: "invalid meter status \"NOT AVAILABLE\"", + }, { + about: "not such service", + tag: "service-missing", + code: "AMBER", + info: "test", + err: "service \"missing\" not found", + }, + } + + for i, test := range tests { + c.Logf("running test %d: %v", i, test.about) + err := s.manager.SetMeterStatus(test.tag, test.code, test.info) + if test.err == "" { + c.Assert(err, jc.ErrorIsNil) + test.assert(c) + } else { + c.Assert(err, gc.ErrorMatches, test.err) + } + } +} === added file 'src/github.com/juju/juju/api/metricsdebug/package_test.go' --- src/github.com/juju/juju/api/metricsdebug/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/metricsdebug/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package metricsdebug_test + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + testing.MgoTestPackage(t) +} === modified file 'src/github.com/juju/juju/api/metricsmanager/client.go' --- src/github.com/juju/juju/api/metricsmanager/client.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/metricsmanager/client.go 2016-03-22 15:18:22 +0000 @@ -7,17 +7,16 @@ import ( "github.com/juju/errors" + "github.com/juju/names" - "github.com/juju/juju/api" "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" ) // Client provides access to the metrics manager api type Client struct { - base.ClientFacade - st api.Connection - facade base.FacadeCaller + modelTag names.ModelTag + facade base.FacadeCaller } // MetricsManagerClient defines the methods on the metricsmanager API end point. @@ -29,23 +28,26 @@ var _ MetricsManagerClient = (*Client)(nil) // NewClient creates a new client for accessing the metricsmanager api -func NewClient(st api.Connection) *Client { - frontend, backend := base.NewClientFacade(st, "MetricsManager") - return &Client{ClientFacade: frontend, st: st, facade: backend} +func NewClient(apiCaller base.APICaller) (*Client, error) { + modelTag, err := apiCaller.ModelTag() + if err != nil { + return nil, errors.Trace(err) + } + facade := base.NewFacadeCaller(apiCaller, "MetricsManager") + return &Client{ + modelTag: modelTag, + facade: facade, + }, nil } // CleanupOldMetrics looks for metrics that are 24 hours old (or older) // and have been sent. Any metrics it finds are deleted. func (c *Client) CleanupOldMetrics() error { - envTag, err := c.st.EnvironTag() - if err != nil { - return errors.Trace(err) - } p := params.Entities{Entities: []params.Entity{ - {envTag.String()}, + {c.modelTag.String()}, }} - results := new(params.ErrorResults) - err = c.facade.FacadeCall("CleanupOldMetrics", p, results) + var results params.ErrorResults + err := c.facade.FacadeCall("CleanupOldMetrics", p, &results) if err != nil { return errors.Trace(err) } @@ -54,15 +56,11 @@ // SendMetrics will send any unsent metrics to the collection service. func (c *Client) SendMetrics() error { - envTag, err := c.st.EnvironTag() - if err != nil { - return errors.Trace(err) - } p := params.Entities{Entities: []params.Entity{ - {envTag.String()}, + {c.modelTag.String()}, }} - results := new(params.ErrorResults) - err = c.facade.FacadeCall("SendMetrics", p, results) + var results params.ErrorResults + err := c.facade.FacadeCall("SendMetrics", p, &results) if err != nil { return errors.Trace(err) } === modified file 'src/github.com/juju/juju/api/metricsmanager/client_test.go' --- src/github.com/juju/juju/api/metricsmanager/client_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/metricsmanager/client_test.go 2016-03-22 15:18:22 +0000 @@ -23,8 +23,10 @@ func (s *metricsManagerSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) - s.manager = metricsmanager.NewClient(s.APIState) - c.Assert(s.manager, gc.NotNil) + manager, err := metricsmanager.NewClient(s.APIState) + c.Assert(err, jc.ErrorIsNil) + c.Assert(manager, gc.NotNil) + s.manager = manager } func (s *metricsManagerSuite) TestCleanupOldMetrics(c *gc.C) { === added directory 'src/github.com/juju/juju/api/modelmanager' === added file 'src/github.com/juju/juju/api/modelmanager/modelmanager.go' --- src/github.com/juju/juju/api/modelmanager/modelmanager.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/modelmanager/modelmanager.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,102 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package modelmanager + +import ( + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/apiserver/params" +) + +var logger = loggo.GetLogger("juju.api.modelmanager") + +// Client provides methods that the Juju client command uses to interact +// with models stored in the Juju Server. +type Client struct { + base.ClientFacade + facade base.FacadeCaller +} + +// NewClient creates a new `Client` based on an existing authenticated API +// connection. +func NewClient(st base.APICallCloser) *Client { + frontend, backend := base.NewClientFacade(st, "ModelManager") + logger.Debugf("%#v", frontend) + return &Client{ClientFacade: frontend, facade: backend} +} + +// Close closes the api connection. +func (c *Client) Close() error { + return c.ClientFacade.Close() +} + +// ConfigSkeleton returns config values to be used as a starting point for the +// API caller to construct a valid model specific config. The provider +// and region params are there for future use, and current behaviour expects +// both of these to be empty. +func (c *Client) ConfigSkeleton(provider, region string) (params.ModelConfig, error) { + var result params.ModelConfigResult + args := params.ModelSkeletonConfigArgs{ + Provider: provider, + Region: region, + } + err := c.facade.FacadeCall("ConfigSkeleton", args, &result) + if err != nil { + return nil, errors.Trace(err) + } + return result.Config, nil +} + +// CreateModel creates a new model using the account and +// model config specified in the args. +func (c *Client) CreateModel(owner string, account, config map[string]interface{}) (params.Model, error) { + var result params.Model + if !names.IsValidUser(owner) { + return result, errors.Errorf("invalid owner name %q", owner) + } + createArgs := params.ModelCreateArgs{ + OwnerTag: names.NewUserTag(owner).String(), + Account: account, + Config: config, + } + err := c.facade.FacadeCall("CreateModel", createArgs, &result) + if err != nil { + return result, errors.Trace(err) + } + logger.Infof("created model %s (%s)", result.Name, result.UUID) + return result, nil +} + +// ListModels returns the models that the specified user +// has access to in the current server. Only that controller owner +// can list models for any user (at this stage). Other users +// can only ask about their own models. +func (c *Client) ListModels(user string) ([]base.UserModel, error) { + var models params.UserModelList + if !names.IsValidUser(user) { + return nil, errors.Errorf("invalid user name %q", user) + } + entity := params.Entity{names.NewUserTag(user).String()} + err := c.facade.FacadeCall("ListModels", entity, &models) + if err != nil { + return nil, errors.Trace(err) + } + result := make([]base.UserModel, len(models.UserModels)) + for i, model := range models.UserModels { + owner, err := names.ParseUserTag(model.OwnerTag) + if err != nil { + return nil, errors.Annotatef(err, "OwnerTag %q at position %d", model.OwnerTag, i) + } + result[i] = base.UserModel{ + Name: model.Name, + UUID: model.UUID, + Owner: owner.Canonical(), + LastConnection: model.LastConnection, + } + } + return result, nil +} === added file 'src/github.com/juju/juju/api/modelmanager/modelmanager_test.go' --- src/github.com/juju/juju/api/modelmanager/modelmanager_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/modelmanager/modelmanager_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,102 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package modelmanager_test + +import ( + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/modelmanager" + "github.com/juju/juju/apiserver/params" + jujutesting "github.com/juju/juju/juju/testing" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/testing/factory" +) + +type modelmanagerSuite struct { + jujutesting.JujuConnSuite +} + +var _ = gc.Suite(&modelmanagerSuite{}) + +func (s *modelmanagerSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) +} + +func (s *modelmanagerSuite) OpenAPI(c *gc.C) *modelmanager.Client { + return modelmanager.NewClient(s.APIState) +} + +func (s *modelmanagerSuite) TestConfigSkeleton(c *gc.C) { + modelManager := s.OpenAPI(c) + result, err := modelManager.ConfigSkeleton("", "") + c.Assert(err, jc.ErrorIsNil) + + // The apiPort changes every test run as the dummy provider + // looks for a random open port. + apiPort := s.Environ.Config().APIPort() + + // Numbers coming over the api are floats, not ints. + c.Assert(result, jc.DeepEquals, params.ModelConfig{ + "type": "dummy", + "ca-cert": coretesting.CACert, + "state-port": float64(1234), + "api-port": float64(apiPort), + }) + +} + +func (s *modelmanagerSuite) TestCreateModelBadUser(c *gc.C) { + modelManager := s.OpenAPI(c) + _, err := modelManager.CreateModel("not a user", nil, nil) + c.Assert(err, gc.ErrorMatches, `invalid owner name "not a user"`) +} + +func (s *modelmanagerSuite) TestCreateModelMissingConfig(c *gc.C) { + modelManager := s.OpenAPI(c) + _, err := modelManager.CreateModel("owner", nil, nil) + c.Assert(err, gc.ErrorMatches, `creating config from values failed: name: expected string, got nothing`) +} + +func (s *modelmanagerSuite) TestCreateModel(c *gc.C) { + modelManager := s.OpenAPI(c) + user := s.Factory.MakeUser(c, nil) + owner := user.UserTag().Canonical() + newEnv, err := modelManager.CreateModel(owner, nil, map[string]interface{}{ + "name": "new-model", + "authorized-keys": "ssh-key", + // dummy needs controller + "controller": false, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(newEnv.Name, gc.Equals, "new-model") + c.Assert(newEnv.OwnerTag, gc.Equals, user.Tag().String()) + c.Assert(utils.IsValidUUIDString(newEnv.UUID), jc.IsTrue) +} + +func (s *modelmanagerSuite) TestListModelsBadUser(c *gc.C) { + modelManager := s.OpenAPI(c) + _, err := modelManager.ListModels("not a user") + c.Assert(err, gc.ErrorMatches, `invalid user name "not a user"`) +} + +func (s *modelmanagerSuite) TestListModels(c *gc.C) { + owner := names.NewUserTag("user@remote") + s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "first", Owner: owner}).Close() + s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "second", Owner: owner}).Close() + + modelManager := s.OpenAPI(c) + models, err := modelManager.ListModels("user@remote") + c.Assert(err, jc.ErrorIsNil) + c.Assert(models, gc.HasLen, 2) + + envNames := []string{models[0].Name, models[1].Name} + c.Assert(envNames, jc.DeepEquals, []string{"first", "second"}) + ownerNames := []string{models[0].Owner, models[1].Owner} + c.Assert(ownerNames, jc.DeepEquals, []string{"user@remote", "user@remote"}) +} === added file 'src/github.com/juju/juju/api/modelmanager/package_test.go' --- src/github.com/juju/juju/api/modelmanager/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/modelmanager/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package modelmanager_test + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + testing.MgoTestPackage(t) +} === removed directory 'src/github.com/juju/juju/api/networker' === removed file 'src/github.com/juju/juju/api/networker/export_test.go' --- src/github.com/juju/juju/api/networker/export_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/networker/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,16 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package networker - -import ( - "github.com/juju/juju/api/base/testing" -) - -// PatchFacadeCall patches the State's facade such that -// FacadeCall method calls are diverted to the provided -// function. -func PatchFacadeCall(p testing.Patcher, st State, f func(request string, params, response interface{}) error) { - st0 := st.(*state) // *state is the only implementation of State. - testing.PatchFacadeCall(p, &st0.facade, f) -} === removed file 'src/github.com/juju/juju/api/networker/networker.go' --- src/github.com/juju/juju/api/networker/networker.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/networker/networker.go 1970-01-01 00:00:00 +0000 @@ -1,107 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package networker - -import ( - "github.com/juju/errors" - "github.com/juju/names" - - "github.com/juju/juju/api/base" - "github.com/juju/juju/api/watcher" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/network" -) - -const networkerFacade = "Networker" - -// State provides access to an networker worker's view of the state. -// -// NOTE: This is defined as an interface due to PPC64 bug #1424669 - -// if it were a type build errors happen (due to a linker bug). -type State interface { - MachineNetworkConfig(names.MachineTag) ([]network.InterfaceInfo, error) - WatchInterfaces(names.MachineTag) (watcher.NotifyWatcher, error) -} - -var _ State = (*state)(nil) - -// state implements State. -type state struct { - facade base.FacadeCaller -} - -// NewState creates a new client-side Machiner facade. -func NewState(caller base.APICaller) State { - return &state{base.NewFacadeCaller(caller, networkerFacade)} -} - -// MachineNetworkConfig returns information about network interfaces to -// setup only for a single machine. -func (st *state) MachineNetworkConfig(tag names.MachineTag) ([]network.InterfaceInfo, error) { - args := params.Entities{ - Entities: []params.Entity{{Tag: tag.String()}}, - } - var results params.MachineNetworkConfigResults - err := st.facade.FacadeCall("MachineNetworkConfig", args, &results) - if err != nil { - if params.IsCodeNotImplemented(err) { - // Fallback to former name. - err = st.facade.FacadeCall("MachineNetworkInfo", args, &results) - } - if err != nil { - // TODO: Not directly tested. - return nil, err - } - } - if len(results.Results) != 1 { - // TODO: Not directly tested - err = errors.Errorf("expected one result, got %d", len(results.Results)) - return nil, err - } - result := results.Results[0] - if result.Error != nil { - return nil, result.Error - } - interfaceInfo := make([]network.InterfaceInfo, len(result.Config)) - for i, ifaceInfo := range result.Config { - interfaceInfo[i].DeviceIndex = ifaceInfo.DeviceIndex - interfaceInfo[i].MACAddress = ifaceInfo.MACAddress - interfaceInfo[i].CIDR = ifaceInfo.CIDR - interfaceInfo[i].NetworkName = ifaceInfo.NetworkName - interfaceInfo[i].ProviderId = network.Id(ifaceInfo.ProviderId) - interfaceInfo[i].VLANTag = ifaceInfo.VLANTag - interfaceInfo[i].InterfaceName = ifaceInfo.InterfaceName - interfaceInfo[i].Disabled = ifaceInfo.Disabled - // TODO(dimitern) Once we store all the information from - // network.InterfaceInfo in state, change this as needed to - // return it. - } - - return interfaceInfo, nil -} - -// WatchInterfaces returns a NotifyWatcher that notifies of changes to network -// interfaces on the machine. -func (st *state) WatchInterfaces(tag names.MachineTag) (watcher.NotifyWatcher, error) { - args := params.Entities{ - Entities: []params.Entity{{Tag: tag.String()}}, - } - var results params.NotifyWatchResults - err := st.facade.FacadeCall("WatchInterfaces", args, &results) - if err != nil { - // TODO: Not directly tested - return nil, err - } - if len(results.Results) != 1 { - // TODO: Not directly tested - err = errors.Errorf("expected one result, got %d", len(results.Results)) - return nil, err - } - result := results.Results[0] - if result.Error != nil { - return nil, result.Error - } - w := watcher.NewNotifyWatcher(st.facade.RawAPICaller(), result) - return w, nil -} === removed file 'src/github.com/juju/juju/api/networker/networker_test.go' --- src/github.com/juju/juju/api/networker/networker_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/networker/networker_test.go 1970-01-01 00:00:00 +0000 @@ -1,413 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package networker_test - -import ( - "runtime" - "sort" - - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/api" - "github.com/juju/juju/api/networker" - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/instance" - "github.com/juju/juju/juju/testing" - "github.com/juju/juju/network" - "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" -) - -type networkerSuite struct { - testing.JujuConnSuite - - networks []state.NetworkInfo - - machine *state.Machine - container *state.Machine - nestedContainer *state.Machine - - machineIfaces []state.NetworkInterfaceInfo - containerIfaces []state.NetworkInterfaceInfo - nestedContainerIfaces []state.NetworkInterfaceInfo - - st api.Connection - networker networker.State -} - -var _ = gc.Suite(&networkerSuite{}) - -// Create several networks. -func (s *networkerSuite) setUpNetworks(c *gc.C) { - s.networks = []state.NetworkInfo{{ - Name: "net1", - ProviderId: "net1", - CIDR: "0.1.2.0/24", - VLANTag: 0, - }, { - Name: "vlan42", - ProviderId: "vlan42", - CIDR: "0.2.2.0/24", - VLANTag: 42, - }, { - Name: "vlan69", - ProviderId: "vlan69", - CIDR: "0.3.2.0/24", - VLANTag: 69, - }, { - Name: "vlan123", - ProviderId: "vlan123", - CIDR: "0.4.2.0/24", - VLANTag: 123, - }, { - Name: "net2", - ProviderId: "net2", - CIDR: "0.5.2.0/24", - VLANTag: 0, - }} -} - -// Create a machine and login to it. -func (s *networkerSuite) setUpMachine(c *gc.C) { - var err error - s.machine, err = s.State.AddMachine("quantal", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) - password, err := utils.RandomPassword() - c.Assert(err, jc.ErrorIsNil) - err = s.machine.SetPassword(password) - c.Assert(err, jc.ErrorIsNil) - hwChars := instance.MustParseHardware("cpu-cores=123", "mem=4G") - s.machineIfaces = []state.NetworkInterfaceInfo{{ - MACAddress: "aa:bb:cc:dd:ee:f0", - InterfaceName: "eth0", - NetworkName: "net1", - IsVirtual: false, - }, { - MACAddress: "aa:bb:cc:dd:ee:f1", - InterfaceName: "eth1", - NetworkName: "net1", - IsVirtual: false, - }, { - MACAddress: "aa:bb:cc:dd:ee:f1", - InterfaceName: "eth1.42", - NetworkName: "vlan42", - IsVirtual: true, - }, { - MACAddress: "aa:bb:cc:dd:ee:f0", - InterfaceName: "eth0.69", - NetworkName: "vlan69", - IsVirtual: true, - }, { - MACAddress: "aa:bb:cc:dd:ee:f2", - InterfaceName: "eth2", - NetworkName: "net2", - IsVirtual: false, - Disabled: true, - }} - err = s.machine.SetInstanceInfo("i-am", "fake_nonce", &hwChars, s.networks, s.machineIfaces, nil, nil) - c.Assert(err, jc.ErrorIsNil) - s.st = s.OpenAPIAsMachine(c, s.machine.Tag(), password, "fake_nonce") - c.Assert(s.st, gc.NotNil) -} - -// Create and provision a container and a nested container. -func (s *networkerSuite) setUpContainers(c *gc.C) { - template := state.MachineTemplate{ - Series: "quantal", - Jobs: []state.MachineJob{state.JobHostUnits}, - } - var err error - s.container, err = s.State.AddMachineInsideMachine(template, s.machine.Id(), instance.LXC) - c.Assert(err, jc.ErrorIsNil) - s.containerIfaces = []state.NetworkInterfaceInfo{{ - MACAddress: "aa:bb:cc:dd:ee:e0", - InterfaceName: "eth0", - NetworkName: "net1", - IsVirtual: false, - }, { - MACAddress: "aa:bb:cc:dd:ee:e1", - InterfaceName: "eth1", - NetworkName: "net1", - IsVirtual: false, - }, { - MACAddress: "aa:bb:cc:dd:ee:e1", - InterfaceName: "eth1.42", - NetworkName: "vlan42", - IsVirtual: true, - }} - hwChars := instance.MustParseHardware("arch=i386", "mem=4G") - err = s.container.SetInstanceInfo("i-container", "fake_nonce", &hwChars, s.networks[:2], - s.containerIfaces, nil, nil) - c.Assert(err, jc.ErrorIsNil) - - s.nestedContainer, err = s.State.AddMachineInsideMachine(template, s.container.Id(), instance.LXC) - c.Assert(err, jc.ErrorIsNil) - s.nestedContainerIfaces = []state.NetworkInterfaceInfo{{ - MACAddress: "aa:bb:cc:dd:ee:d0", - InterfaceName: "eth0", - NetworkName: "net1", - IsVirtual: false, - }} - err = s.nestedContainer.SetInstanceInfo("i-too", "fake_nonce", &hwChars, s.networks[:1], - s.nestedContainerIfaces, nil, nil) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *networkerSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - - s.setUpNetworks(c) - s.setUpMachine(c) - s.setUpContainers(c) - - // Create the networker API facade. - s.networker = s.st.Networker() - c.Assert(s.networker, gc.NotNil) -} - -func (s *networkerSuite) TestMachineNetworkConfigPermissionDenied(c *gc.C) { - info, err := s.networker.MachineNetworkConfig(names.NewMachineTag("1")) - c.Assert(err, gc.ErrorMatches, "permission denied") - c.Assert(err, jc.Satisfies, params.IsCodeUnauthorized) - c.Assert(info, gc.IsNil) -} - -func (s *networkerSuite) TestMachineNetworkConfigNameChange(c *gc.C) { - var called bool - networker.PatchFacadeCall(s, s.networker, func(request string, args, response interface{}) error { - if !called { - called = true - c.Assert(request, gc.Equals, "MachineNetworkConfig") - return ¶ms.Error{"MachineNetworkConfig", params.CodeNotImplemented} - } - c.Assert(request, gc.Equals, "MachineNetworkInfo") - expected := params.Entities{ - Entities: []params.Entity{{Tag: names.NewMachineTag("42").String()}}, - } - c.Assert(args, gc.DeepEquals, expected) - result := response.(*params.MachineNetworkConfigResults) - result.Results = make([]params.MachineNetworkConfigResult, 1) - result.Results[0].Error = common.ServerError(common.ErrPerm) - return nil - }) - // Make a call, in this case result is "permission denied". - info, err := s.networker.MachineNetworkConfig(names.NewMachineTag("42")) - c.Assert(err, gc.ErrorMatches, "permission denied") - c.Assert(err, jc.Satisfies, params.IsCodeUnauthorized) - c.Assert(info, gc.IsNil) -} - -type orderedIfc []network.InterfaceInfo - -func (o orderedIfc) Len() int { - return len(o) -} - -func (o orderedIfc) Less(i, j int) bool { - if o[i].MACAddress < o[j].MACAddress { - return true - } - if o[i].MACAddress > o[j].MACAddress { - return false - } - if o[i].CIDR < o[j].CIDR { - return true - } - if o[i].CIDR > o[j].CIDR { - return false - } - if o[i].NetworkName < o[j].NetworkName { - return true - } - if o[i].NetworkName > o[j].NetworkName { - return false - } - return o[i].VLANTag < o[j].VLANTag -} - -func (o orderedIfc) Swap(i, j int) { - o[i], o[j] = o[j], o[i] -} - -func (s *networkerSuite) TestMachineNetworkConfig(c *gc.C) { - // TODO(bogdanteleaga): Find out what's the problem with this test - // It seems to work on some machines - if runtime.GOOS == "windows" { - c.Skip("bug 1403084: currently does not work on windows") - } - // Expected results of MachineNetworkInfo for a machine and containers - expectedMachineInfo := []network.InterfaceInfo{{ - MACAddress: "aa:bb:cc:dd:ee:f0", - CIDR: "0.1.2.0/24", - NetworkName: "net1", - ProviderId: "net1", - VLANTag: 0, - InterfaceName: "eth0", - }, { - MACAddress: "aa:bb:cc:dd:ee:f1", - CIDR: "0.1.2.0/24", - NetworkName: "net1", - ProviderId: "net1", - VLANTag: 0, - InterfaceName: "eth1", - }, { - MACAddress: "aa:bb:cc:dd:ee:f1", - CIDR: "0.2.2.0/24", - NetworkName: "vlan42", - ProviderId: "vlan42", - VLANTag: 42, - InterfaceName: "eth1", - }, { - MACAddress: "aa:bb:cc:dd:ee:f0", - CIDR: "0.3.2.0/24", - NetworkName: "vlan69", - ProviderId: "vlan69", - VLANTag: 69, - InterfaceName: "eth0", - }, { - MACAddress: "aa:bb:cc:dd:ee:f2", - CIDR: "0.5.2.0/24", - NetworkName: "net2", - ProviderId: "net2", - VLANTag: 0, - InterfaceName: "eth2", - Disabled: true, - }} - sort.Sort(orderedIfc(expectedMachineInfo)) - - expectedContainerInfo := []network.InterfaceInfo{{ - MACAddress: "aa:bb:cc:dd:ee:e0", - CIDR: "0.1.2.0/24", - NetworkName: "net1", - ProviderId: "net1", - VLANTag: 0, - InterfaceName: "eth0", - }, { - MACAddress: "aa:bb:cc:dd:ee:e1", - CIDR: "0.1.2.0/24", - NetworkName: "net1", - ProviderId: "net1", - VLANTag: 0, - InterfaceName: "eth1", - }, { - MACAddress: "aa:bb:cc:dd:ee:e1", - CIDR: "0.2.2.0/24", - NetworkName: "vlan42", - ProviderId: "vlan42", - VLANTag: 42, - InterfaceName: "eth1", - }} - sort.Sort(orderedIfc(expectedContainerInfo)) - - expectedNestedContainerInfo := []network.InterfaceInfo{{ - MACAddress: "aa:bb:cc:dd:ee:d0", - CIDR: "0.1.2.0/24", - NetworkName: "net1", - ProviderId: "net1", - VLANTag: 0, - InterfaceName: "eth0", - }} - sort.Sort(orderedIfc(expectedNestedContainerInfo)) - - results, err := s.networker.MachineNetworkConfig(names.NewMachineTag("0")) - c.Assert(err, jc.ErrorIsNil) - sort.Sort(orderedIfc(results)) - c.Assert(results, gc.DeepEquals, expectedMachineInfo) - - results, err = s.networker.MachineNetworkConfig(names.NewMachineTag("0/lxc/0")) - c.Assert(err, jc.ErrorIsNil) - sort.Sort(orderedIfc(results)) - c.Assert(results, gc.DeepEquals, expectedContainerInfo) - - results, err = s.networker.MachineNetworkConfig(names.NewMachineTag("0/lxc/0/lxc/0")) - c.Assert(err, jc.ErrorIsNil) - sort.Sort(orderedIfc(results)) - c.Assert(results, gc.DeepEquals, expectedNestedContainerInfo) -} - -func (s *networkerSuite) TestWatchInterfacesPermissionDenied(c *gc.C) { - w, err := s.networker.WatchInterfaces(names.NewMachineTag("1")) - c.Assert(err, gc.ErrorMatches, "permission denied") - c.Assert(err, jc.Satisfies, params.IsCodeUnauthorized) - c.Assert(w, gc.IsNil) -} - -func (s *networkerSuite) TestWatchInterfaces(c *gc.C) { - // Read dynamically generated document Ids. - ifaces, err := s.machine.NetworkInterfaces() - c.Assert(err, jc.ErrorIsNil) - c.Assert(ifaces, gc.HasLen, 5) - - // Start network interface watcher. - w, err := s.networker.WatchInterfaces(names.NewMachineTag("0")) - defer statetesting.AssertStop(c, w) - wc := statetesting.NewNotifyWatcherC(c, s.BackingState, w) - wc.AssertOneChange() - - // Disable the first interface. - err = ifaces[0].Disable() - c.Assert(err, jc.ErrorIsNil) - wc.AssertOneChange() - - // Disable the first interface again, should not report. - err = ifaces[0].Disable() - c.Assert(err, jc.ErrorIsNil) - wc.AssertNoChange() - - // Enable the first interface. - err = ifaces[0].Enable() - c.Assert(err, jc.ErrorIsNil) - wc.AssertOneChange() - - // Enable the first interface again, should not report. - err = ifaces[0].Enable() - c.Assert(err, jc.ErrorIsNil) - wc.AssertNoChange() - - // Remove the network interface. - err = ifaces[0].Remove() - c.Assert(err, jc.ErrorIsNil) - wc.AssertOneChange() - - // Add the new interface. - _, err = s.machine.AddNetworkInterface(state.NetworkInterfaceInfo{ - MACAddress: "aa:bb:cc:dd:ee:f3", - InterfaceName: "eth3", - NetworkName: "net2", - }) - c.Assert(err, jc.ErrorIsNil) - wc.AssertOneChange() - - // Add the new interface on the container, should not report. - _, err = s.container.AddNetworkInterface(state.NetworkInterfaceInfo{ - MACAddress: "aa:bb:cc:dd:ee:e3", - InterfaceName: "eth3", - NetworkName: "net2", - }) - c.Assert(err, jc.ErrorIsNil) - wc.AssertNoChange() - - // Read dynamically generated document Ids. - containerIfaces, err := s.container.NetworkInterfaces() - c.Assert(err, jc.ErrorIsNil) - c.Assert(containerIfaces, gc.HasLen, 4) - - // Disable the first interface on the second machine, should not report. - err = containerIfaces[0].Disable() - c.Assert(err, jc.ErrorIsNil) - wc.AssertNoChange() - - // Remove the network interface on the second machine, should not report. - err = containerIfaces[0].Remove() - c.Assert(err, jc.ErrorIsNil) - wc.AssertNoChange() - - // Stop watcher; check Changes chan closed. - statetesting.AssertStop(c, w) - wc.AssertClosed() -} === removed file 'src/github.com/juju/juju/api/networker/package_test.go' --- src/github.com/juju/juju/api/networker/package_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/networker/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package networker_test - -import ( - stdtesting "testing" - - "github.com/juju/juju/testing" -) - -func TestAll(t *stdtesting.T) { - testing.MgoTestPackage(t) -} === modified file 'src/github.com/juju/juju/api/provisioner/machine.go' --- src/github.com/juju/juju/api/provisioner/machine.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/provisioner/machine.go 2016-03-22 15:18:22 +0000 @@ -8,9 +8,10 @@ "github.com/juju/names" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/instance" + "github.com/juju/juju/watcher" ) // Machine represents a juju machine as seen by the provisioner worker. @@ -274,7 +275,7 @@ if result.Error != nil { return nil, result.Error } - w := watcher.NewStringsWatcher(m.st.facade.RawAPICaller(), result) + w := apiwatcher.NewStringsWatcher(m.st.facade.RawAPICaller(), result) return w, nil } @@ -298,7 +299,7 @@ if result.Error != nil { return nil, result.Error } - w := watcher.NewStringsWatcher(m.st.facade.RawAPICaller(), result) + w := apiwatcher.NewStringsWatcher(m.st.facade.RawAPICaller(), result) return w, nil } === added file 'src/github.com/juju/juju/api/provisioner/package_test.go' --- src/github.com/juju/juju/api/provisioner/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/provisioner/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package provisioner_test + +import ( + stdtesting "testing" + + coretesting "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + coretesting.MgoTestPackage(t) +} === modified file 'src/github.com/juju/juju/api/provisioner/provisioner.go' --- src/github.com/juju/juju/api/provisioner/provisioner.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/provisioner/provisioner.go 2016-03-22 15:18:22 +0000 @@ -9,16 +9,17 @@ "github.com/juju/juju/api/base" "github.com/juju/juju/api/common" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/network" "github.com/juju/juju/tools" "github.com/juju/juju/version" + "github.com/juju/juju/watcher" ) // State provides access to the Machiner API facade. type State struct { - *common.EnvironWatcher + *common.ModelWatcher *common.APIAddresser facade base.FacadeCaller @@ -30,9 +31,9 @@ func NewState(caller base.APICaller) *State { facadeCaller := base.NewFacadeCaller(caller, provisionerFacade) return &State{ - EnvironWatcher: common.NewEnvironWatcher(facadeCaller), - APIAddresser: common.NewAPIAddresser(facadeCaller), - facade: facadeCaller} + ModelWatcher: common.NewModelWatcher(facadeCaller), + APIAddresser: common.NewAPIAddresser(facadeCaller), + facade: facadeCaller} } // machineLife requests the lifecycle of the given machine from the server. @@ -53,19 +54,19 @@ }, nil } -// WatchEnvironMachines returns a StringsWatcher that notifies of +// WatchModelMachines returns a StringsWatcher that notifies of // changes to the lifecycles of the machines (but not containers) in -// the current environment. -func (st *State) WatchEnvironMachines() (watcher.StringsWatcher, error) { +// the current model. +func (st *State) WatchModelMachines() (watcher.StringsWatcher, error) { var result params.StringsWatchResult - err := st.facade.FacadeCall("WatchEnvironMachines", nil, &result) + err := st.facade.FacadeCall("WatchModelMachines", nil, &result) if err != nil { return nil, err } if err := result.Error; err != nil { return nil, result.Error } - w := watcher.NewStringsWatcher(st.facade.RawAPICaller(), result) + w := apiwatcher.NewStringsWatcher(st.facade.RawAPICaller(), result) return w, nil } @@ -78,7 +79,7 @@ if err := result.Error; err != nil { return nil, result.Error } - w := watcher.NewNotifyWatcher(st.facade.RawAPICaller(), result) + w := apiwatcher.NewNotifyWatcher(st.facade.RawAPICaller(), result) return w, nil } @@ -92,14 +93,14 @@ return result.Result, nil } -// ContainerManagerConfig returns information from the environment config that is +// ContainerManagerConfig returns information from the model config that is // needed for configuring the container manager. func (st *State) ContainerManagerConfig(args params.ContainerManagerConfigParams) (result params.ContainerManagerConfig, err error) { err = st.facade.FacadeCall("ContainerManagerConfig", args, &result) return result, err } -// ContainerConfig returns information from the environment config that is +// ContainerConfig returns information from the model config that is // needed for container cloud-init. func (st *State) ContainerConfig() (result params.ContainerConfig, err error) { err = st.facade.FacadeCall("ContainerConfig", nil, &result) @@ -129,16 +130,16 @@ } // FindTools returns al ist of tools matching the specified version number and -// series, and, if non-empty, arch. -func (st *State) FindTools(v version.Number, series string, arch *string) (tools.List, error) { +// series, and, arch. If arch is blank, a default will be used. +func (st *State) FindTools(v version.Number, series string, arch string) (tools.List, error) { args := params.FindToolsParams{ Number: v, Series: series, MajorVersion: -1, MinorVersion: -1, } - if arch != nil { - args.Arch = *arch + if arch != "" { + args.Arch = arch } var result params.FindToolsResult if err := st.facade.FacadeCall("FindTools", args, &result); err != nil { === modified file 'src/github.com/juju/juju/api/provisioner/provisioner_test.go' --- src/github.com/juju/juju/api/provisioner/provisioner_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/provisioner/provisioner_test.go 2016-03-22 15:18:22 +0000 @@ -10,12 +10,13 @@ import ( "fmt" - stdtesting "testing" "github.com/juju/errors" "github.com/juju/names" jc "github.com/juju/testing/checkers" "github.com/juju/utils" + "github.com/juju/utils/arch" + "github.com/juju/utils/series" gc "gopkg.in/check.v1" "github.com/juju/juju/api" @@ -27,26 +28,20 @@ "github.com/juju/juju/container" "github.com/juju/juju/feature" "github.com/juju/juju/instance" - "github.com/juju/juju/juju/arch" "github.com/juju/juju/juju/testing" "github.com/juju/juju/mongo" "github.com/juju/juju/network" "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" "github.com/juju/juju/storage/poolmanager" "github.com/juju/juju/storage/provider" - coretesting "github.com/juju/juju/testing" coretools "github.com/juju/juju/tools" "github.com/juju/juju/version" + "github.com/juju/juju/watcher/watchertest" ) -func TestAll(t *stdtesting.T) { - coretesting.MgoTestPackage(t) -} - type provisionerSuite struct { testing.JujuConnSuite - *apitesting.EnvironWatcherTests + *apitesting.ModelWatcherTests *apitesting.APIAddresserTests st api.Connection @@ -65,7 +60,7 @@ s.SetFeatureFlags(feature.AddressAllocation) var err error - s.machine, err = s.State.AddMachine("quantal", state.JobManageEnviron) + s.machine, err = s.State.AddMachine("quantal", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) password, err := utils.RandomPassword() c.Assert(err, jc.ErrorIsNil) @@ -82,22 +77,26 @@ s.provisioner = s.st.Provisioner() c.Assert(s.provisioner, gc.NotNil) - s.EnvironWatcherTests = apitesting.NewEnvironWatcherTests(s.provisioner, s.BackingState, apitesting.HasSecrets) + s.ModelWatcherTests = apitesting.NewModelWatcherTests(s.provisioner, s.BackingState, apitesting.HasSecrets) s.APIAddresserTests = apitesting.NewAPIAddresserTests(s.provisioner, s.BackingState) } func (s *provisionerSuite) TestPrepareContainerInterfaceInfoNoFeatureFlag(c *gc.C) { s.SetFeatureFlags() // clear the flag ifaceInfo, err := s.provisioner.PrepareContainerInterfaceInfo(names.NewMachineTag("42")) - c.Assert(err, gc.ErrorMatches, "address allocation not supported") + // We'll still attempt to reserve an address, in case we're running on MAAS + // 1.8+ and have registered the container as a device. + c.Assert(err, gc.ErrorMatches, "machine 42 not found") c.Assert(ifaceInfo, gc.HasLen, 0) } func (s *provisionerSuite) TestReleaseContainerAddressNoFeatureFlag(c *gc.C) { s.SetFeatureFlags() // clear the flag err := s.provisioner.ReleaseContainerAddresses(names.NewMachineTag("42")) + // We'll still attempt to release all addresses, in case we're running on + // MAAS 1.8+ and have registered the container as a device. c.Assert(err, gc.ErrorMatches, - `cannot release static addresses for "42": address allocation not supported`, + `cannot release static addresses for "42": machine 42 not found`, ) } @@ -207,7 +206,7 @@ apiMachine, err = s.provisioner.Machine(s.machine.Tag().(names.MachineTag)) c.Assert(err, jc.ErrorIsNil) err = apiMachine.EnsureDead() - c.Assert(err, gc.ErrorMatches, "machine 0 is required by the environment") + c.Assert(err, gc.ErrorMatches, "machine 0 is required by the model") } func (s *provisionerSuite) TestRefreshAndLife(c *gc.C) { @@ -472,9 +471,9 @@ func (s *provisionerSuite) TestProvisioningInfo(c *gc.C) { // Add a couple of spaces. - _, err := s.State.AddSpace("space1", nil, true) + _, err := s.State.AddSpace("space1", "", nil, true) c.Assert(err, jc.ErrorIsNil) - _, err = s.State.AddSpace("space2", nil, false) + _, err = s.State.AddSpace("space2", "", nil, false) c.Assert(err, jc.ErrorIsNil) // Add 2 subnets into each space. // Only the first subnet of space2 has AllocatableIPLow|High set. @@ -540,8 +539,8 @@ w, err := apiMachine.WatchContainers(instance.LXC) c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, w) - wc := statetesting.NewStringsWatcherC(c, s.BackingState, w) + wc := watchertest.NewStringsWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertChange(container.Id()) @@ -561,9 +560,6 @@ container, err = s.State.AddMachineInsideMachine(template, s.machine.Id(), instance.LXC) c.Assert(err, jc.ErrorIsNil) wc.AssertChange(container.Id()) - - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *provisionerSuite) TestWatchContainersAcceptsSupportedContainers(c *gc.C) { @@ -588,11 +584,11 @@ c.Assert(err, gc.ErrorMatches, "container type must be specified") } -func (s *provisionerSuite) TestWatchEnvironMachines(c *gc.C) { - w, err := s.provisioner.WatchEnvironMachines() +func (s *provisionerSuite) TestWatchModelMachines(c *gc.C) { + w, err := s.provisioner.WatchModelMachines() c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, w) - wc := statetesting.NewStringsWatcherC(c, s.BackingState, w) + wc := watchertest.NewStringsWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertChange(s.machine.Id()) @@ -617,9 +613,6 @@ _, err = s.State.AddMachineInsideMachine(template, s.machine.Id(), instance.LXC) c.Assert(err, jc.ErrorIsNil) wc.AssertNoChange() - - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *provisionerSuite) TestStateAddresses(c *gc.C) { @@ -665,7 +658,7 @@ func (s *provisionerSuite) TestContainerManagerConfigLXC(c *gc.C) { args := params.ContainerManagerConfigParams{Type: instance.LXC} - st, err := state.Open(s.State.EnvironTag(), s.MongoInfo(c), mongo.DefaultDialOpts(), state.Policy(nil)) + st, err := state.Open(s.State.ModelTag(), s.MongoInfo(c), mongo.DefaultDialOpts(), state.Policy(nil)) c.Assert(err, jc.ErrorIsNil) defer st.Close() @@ -701,7 +694,7 @@ // Change lxc-clone, and ensure it gets picked up. for i, t := range tests { c.Logf("test %d: %+v", i, t) - err = st.UpdateEnvironConfig(map[string]interface{}{ + err = st.UpdateModelConfig(map[string]interface{}{ "lxc-clone": t.lxcUseClone, "lxc-clone-aufs": t.lxcUseCloneAufs, }, nil, nil) @@ -805,20 +798,30 @@ } func (s *provisionerSuite) testFindTools(c *gc.C, matchArch bool, apiError, logicError error) { - var toolsList = coretools.List{&coretools.Tools{Version: version.Current}} + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + var toolsList = coretools.List{&coretools.Tools{Version: current}} var called bool + var a string + if matchArch { + // if matchArch is true, this will be overwriten with the host's arch, otherwise + // leave a blank. + a = arch.HostArch() + } + provisioner.PatchFacadeCall(s, s.provisioner, func(request string, args, response interface{}) error { called = true c.Assert(request, gc.Equals, "FindTools") expected := params.FindToolsParams{ - Number: version.Current.Number, - Series: version.Current.Series, + Number: version.Current, + Series: series.HostSeries(), + Arch: a, MinorVersion: -1, MajorVersion: -1, } - if matchArch { - expected.Arch = arch.HostArch() - } c.Assert(args, gc.Equals, expected) result := response.(*params.FindToolsResult) result.List = toolsList @@ -827,13 +830,7 @@ } return apiError }) - - var a *string - if matchArch { - arch := arch.HostArch() - a = &arch - } - apiList, err := s.provisioner.FindTools(version.Current.Number, version.Current.Series, a) + apiList, err := s.provisioner.FindTools(version.Current, series.HostSeries(), a) c.Assert(called, jc.IsTrue) if apiError != nil { c.Assert(err, gc.Equals, apiError) @@ -870,12 +867,13 @@ Disabled: false, NoAutoStart: false, ConfigType: network.ConfigStatic, - // Overwrite the Address field below with the actual one, as - // it's chosen randomly. - Address: network.Address{}, - DNSServers: network.NewAddresses("ns1.dummy", "ns2.dummy"), - GatewayAddress: network.NewAddress("0.10.0.2"), - ExtraConfig: nil, + DNSServers: network.NewAddresses("ns1.dummy", "ns2.dummy"), + GatewayAddress: network.NewAddress("0.10.0.2"), + ExtraConfig: nil, + // Overwrite Address and MACAddress fields below with the actual ones, + // as they are chosen randomly. + Address: network.Address{}, + MACAddress: "", }} c.Assert(ifaceInfo[0].Address, gc.Not(gc.DeepEquals), network.Address{}) c.Assert(ifaceInfo[0].MACAddress, gc.Not(gc.DeepEquals), "") @@ -907,7 +905,7 @@ addr := network.NewAddress(fmt.Sprintf("0.10.0.%d", i)) ipaddr, err := s.State.AddIPAddress(addr, sub.ID()) c.Check(err, jc.ErrorIsNil) - err = ipaddr.AllocateTo(container.Id(), "", "") + err = ipaddr.AllocateTo(container.Id(), "nic42", "aa:bb:cc:dd:ee:f0") c.Check(err, jc.ErrorIsNil) } c.Assert(err, jc.ErrorIsNil) === added directory 'src/github.com/juju/juju/api/proxyupdater' === added file 'src/github.com/juju/juju/api/proxyupdater/package_test.go' --- src/github.com/juju/juju/api/proxyupdater/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/proxyupdater/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package proxyupdater_test + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + testing.MgoTestPackage(t) +} === added file 'src/github.com/juju/juju/api/proxyupdater/proxyupdater.go' --- src/github.com/juju/juju/api/proxyupdater/proxyupdater.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/proxyupdater/proxyupdater.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,28 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package proxyupdater + +import ( + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/common" +) + +const apiName = "ProxyUpdater" + +// Facade provides access to a machine model worker's view of the world. +type Facade struct { + *common.ModelWatcher +} + +// NewFacade returns a new api client facade instance. +func NewFacade(caller base.APICaller) *Facade { + facadeCaller := base.NewFacadeCaller(caller, apiName) + return &Facade{ + ModelWatcher: common.NewModelWatcher(facadeCaller), + } +} + +// TODO(wallyworld) - add methods for getting proxy settings specifically, +// rather than the entire model config. +// Also WatchProxySettings instead of WatchForModelConfigChanges. === added file 'src/github.com/juju/juju/api/proxyupdater/proxyupdater_test.go' --- src/github.com/juju/juju/api/proxyupdater/proxyupdater_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/proxyupdater/proxyupdater_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,30 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package proxyupdater_test + +import ( + gc "gopkg.in/check.v1" + + apitesting "github.com/juju/juju/api/testing" + jujutesting "github.com/juju/juju/juju/testing" +) + +type modelSuite struct { + jujutesting.JujuConnSuite + *apitesting.ModelWatcherTests +} + +var _ = gc.Suite(&modelSuite{}) + +func (s *modelSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + + stateAPI, _ := s.OpenAPIAsNewMachine(c) + + agentAPI := stateAPI.Agent() + c.Assert(agentAPI, gc.NotNil) + + s.ModelWatcherTests = apitesting.NewModelWatcherTests( + agentAPI, s.BackingState, apitesting.NoSecrets) +} === modified file 'src/github.com/juju/juju/api/reboot/reboot.go' --- src/github.com/juju/juju/api/reboot/reboot.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/api/reboot/reboot.go 2016-03-22 15:18:22 +0000 @@ -9,8 +9,9 @@ "github.com/juju/names" "github.com/juju/juju/api/base" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) // State provides access to an reboot worker's view of the state. @@ -59,7 +60,7 @@ return nil, result.Error } - w := watcher.NewNotifyWatcher(st.facade.RawAPICaller(), result) + w := apiwatcher.NewNotifyWatcher(st.facade.RawAPICaller(), result) return w, nil } === added directory 'src/github.com/juju/juju/api/retrystrategy' === added file 'src/github.com/juju/juju/api/retrystrategy/package_test.go' --- src/github.com/juju/juju/api/retrystrategy/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/retrystrategy/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,15 @@ +// Copyright 2016 Canonical Ltd. +// Copyright 2016 Cloudbase Solutions SRL +// Licensed under the AGPLv3, see LICENCE file for details. + +package retrystrategy_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/api/retrystrategy/retrystrategy.go' --- src/github.com/juju/juju/api/retrystrategy/retrystrategy.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/retrystrategy/retrystrategy.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,70 @@ +// Copyright 2016 Canonical Ltd. +// Copyright 2016 Cloudbase Solutions +// Licensed under the AGPLv3, see LICENCE file for details. + +package retrystrategy + +import ( + "fmt" + + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/api/base" + apiwatcher "github.com/juju/juju/api/watcher" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" +) + +// Client provides access to the retry strategy api +type Client struct { + facade base.FacadeCaller +} + +// NewClient creates a client for accessing the retry strategy api +func NewClient(apiCaller base.APICaller) *Client { + return &Client{base.NewFacadeCaller(apiCaller, "RetryStrategy")} +} + +// RetryStrategy returns the configuration for the agent specified by the agentTag. +func (c *Client) RetryStrategy(agentTag names.Tag) (params.RetryStrategy, error) { + var results params.RetryStrategyResults + args := params.Entities{ + Entities: []params.Entity{{Tag: agentTag.String()}}, + } + err := c.facade.FacadeCall("RetryStrategy", args, &results) + if err != nil { + return params.RetryStrategy{}, errors.Trace(err) + } + if len(results.Results) != 1 { + return params.RetryStrategy{}, fmt.Errorf("expected 1 result, got %d", len(results.Results)) + } + result := results.Results[0] + if result.Error != nil { + return params.RetryStrategy{}, errors.Trace(result.Error) + } + return *result.Result, nil +} + +// WatchRetryStrategy returns a notify watcher that looks for changes in the +// retry strategy config for the agent specified by agentTag +// Right now only the boolean that decides whether we retry can be modified. +func (c *Client) WatchRetryStrategy(agentTag names.Tag) (watcher.NotifyWatcher, error) { + var results params.NotifyWatchResults + args := params.Entities{ + Entities: []params.Entity{{Tag: agentTag.String()}}, + } + err := c.facade.FacadeCall("WatchRetryStrategy", args, &results) + if err != nil { + return nil, errors.Trace(err) + } + if len(results.Results) != 1 { + return nil, fmt.Errorf("expected 1 result, got %d", len(results.Results)) + } + result := results.Results[0] + if result.Error != nil { + return nil, errors.Trace(result.Error) + } + w := apiwatcher.NewNotifyWatcher(c.facade.RawAPICaller(), result) + return w, nil +} === added file 'src/github.com/juju/juju/api/retrystrategy/retrystrategy_test.go' --- src/github.com/juju/juju/api/retrystrategy/retrystrategy_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/retrystrategy/retrystrategy_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,233 @@ +// Copyright 2016 Canonical Ltd. +// Copyright 2016 Cloudbase Solutions +// Licensed under the AGPLv3, see LICENCE file for details. + +package retrystrategy_test + +import ( + "fmt" + + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/base/testing" + "github.com/juju/juju/api/retrystrategy" + "github.com/juju/juju/apiserver/params" + coretesting "github.com/juju/juju/testing" +) + +type retryStrategySuite struct { + coretesting.BaseSuite +} + +var _ = gc.Suite(&retryStrategySuite{}) + +func (s *retryStrategySuite) TestRetryStrategyOk(c *gc.C) { + tag := names.NewUnitTag("wp/1") + expectedRetryStrategy := params.RetryStrategy{ + ShouldRetry: true, + } + var called bool + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, response interface{}) error { + called = true + + c.Check(objType, gc.Equals, "RetryStrategy") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "RetryStrategy") + c.Check(arg, gc.DeepEquals, params.Entities{ + Entities: []params.Entity{{Tag: tag.String()}}, + }) + c.Assert(response, gc.FitsTypeOf, ¶ms.RetryStrategyResults{}) + result := response.(*params.RetryStrategyResults) + result.Results = []params.RetryStrategyResult{{ + Result: &expectedRetryStrategy, + }} + return nil + }) + + client := retrystrategy.NewClient(apiCaller) + c.Assert(client, gc.NotNil) + + retryStrategy, err := client.RetryStrategy(tag) + c.Assert(called, jc.IsTrue) + c.Assert(err, jc.ErrorIsNil) + c.Assert(retryStrategy, jc.DeepEquals, expectedRetryStrategy) +} + +func (s *retryStrategySuite) TestRetryStrategyResultError(c *gc.C) { + tag := names.NewUnitTag("wp/1") + var called bool + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, response interface{}) error { + called = true + + c.Check(objType, gc.Equals, "RetryStrategy") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "RetryStrategy") + c.Check(arg, gc.DeepEquals, params.Entities{ + Entities: []params.Entity{{Tag: tag.String()}}, + }) + c.Assert(response, gc.FitsTypeOf, ¶ms.RetryStrategyResults{}) + result := response.(*params.RetryStrategyResults) + result.Results = []params.RetryStrategyResult{{ + Error: ¶ms.Error{ + Message: "splat", + Code: params.CodeNotAssigned, + }, + }} + return nil + }) + + client := retrystrategy.NewClient(apiCaller) + c.Assert(client, gc.NotNil) + + retryStrategy, err := client.RetryStrategy(tag) + c.Assert(called, jc.IsTrue) + c.Assert(err, gc.ErrorMatches, "splat") + c.Assert(retryStrategy, jc.DeepEquals, params.RetryStrategy{}) +} + +func (s *retryStrategySuite) TestRetryStrategyMoreResults(c *gc.C) { + tag := names.NewUnitTag("wp/1") + var called bool + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, response interface{}) error { + called = true + + c.Check(objType, gc.Equals, "RetryStrategy") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "RetryStrategy") + c.Check(arg, gc.DeepEquals, params.Entities{ + Entities: []params.Entity{{Tag: tag.String()}}, + }) + c.Assert(response, gc.FitsTypeOf, ¶ms.RetryStrategyResults{}) + result := response.(*params.RetryStrategyResults) + result.Results = make([]params.RetryStrategyResult, 2) + return nil + }) + + client := retrystrategy.NewClient(apiCaller) + c.Assert(client, gc.NotNil) + + retryStrategy, err := client.RetryStrategy(tag) + c.Assert(called, jc.IsTrue) + c.Assert(err, gc.ErrorMatches, "expected 1 result, got 2") + c.Assert(retryStrategy, jc.DeepEquals, params.RetryStrategy{}) +} + +func (s *retryStrategySuite) TestRetryStrategyError(c *gc.C) { + tag := names.NewUnitTag("wp/1") + var called bool + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, response interface{}) error { + called = true + + c.Check(objType, gc.Equals, "RetryStrategy") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "RetryStrategy") + c.Check(arg, gc.DeepEquals, params.Entities{ + Entities: []params.Entity{{Tag: tag.String()}}, + }) + c.Assert(response, gc.FitsTypeOf, ¶ms.RetryStrategyResults{}) + return fmt.Errorf("impossibru") + }) + + client := retrystrategy.NewClient(apiCaller) + c.Assert(client, gc.NotNil) + + retryStrategy, err := client.RetryStrategy(tag) + c.Assert(called, jc.IsTrue) + c.Assert(err, gc.ErrorMatches, "impossibru") + c.Assert(retryStrategy, jc.DeepEquals, params.RetryStrategy{}) +} + +func (s *retryStrategySuite) TestWatchRetryStrategyError(c *gc.C) { + tag := names.NewUnitTag("wp/1") + var called bool + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, response interface{}) error { + called = true + + c.Check(objType, gc.Equals, "RetryStrategy") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "WatchRetryStrategy") + c.Check(arg, gc.DeepEquals, params.Entities{ + Entities: []params.Entity{{Tag: tag.String()}}, + }) + c.Assert(response, gc.FitsTypeOf, ¶ms.NotifyWatchResults{}) + result := response.(*params.NotifyWatchResults) + result.Results = make([]params.NotifyWatchResult, 1) + return fmt.Errorf("sosorry") + }) + + client := retrystrategy.NewClient(apiCaller) + c.Assert(client, gc.NotNil) + + w, err := client.WatchRetryStrategy(tag) + c.Assert(called, jc.IsTrue) + c.Assert(err, gc.ErrorMatches, "sosorry") + c.Assert(w, gc.IsNil) +} + +func (s *retryStrategySuite) TestWatchRetryStrategyResultError(c *gc.C) { + tag := names.NewUnitTag("wp/1") + var called bool + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, response interface{}) error { + called = true + + c.Check(objType, gc.Equals, "RetryStrategy") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "WatchRetryStrategy") + c.Check(arg, gc.DeepEquals, params.Entities{ + Entities: []params.Entity{{Tag: tag.String()}}, + }) + c.Assert(response, gc.FitsTypeOf, ¶ms.NotifyWatchResults{}) + result := response.(*params.NotifyWatchResults) + result.Results = []params.NotifyWatchResult{{ + Error: ¶ms.Error{ + Message: "rigged", + Code: params.CodeNotAssigned, + }, + }} + return nil + }) + + client := retrystrategy.NewClient(apiCaller) + c.Assert(client, gc.NotNil) + + w, err := client.WatchRetryStrategy(tag) + c.Assert(called, jc.IsTrue) + c.Assert(err, gc.ErrorMatches, "rigged") + c.Assert(w, gc.IsNil) +} + +func (s *retryStrategySuite) TestWatchRetryStrategyMoreResults(c *gc.C) { + tag := names.NewUnitTag("wp/1") + var called bool + apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, response interface{}) error { + called = true + + c.Check(objType, gc.Equals, "RetryStrategy") + c.Check(version, gc.Equals, 0) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "WatchRetryStrategy") + c.Check(arg, gc.DeepEquals, params.Entities{ + Entities: []params.Entity{{Tag: tag.String()}}, + }) + c.Assert(response, gc.FitsTypeOf, ¶ms.NotifyWatchResults{}) + result := response.(*params.NotifyWatchResults) + result.Results = make([]params.NotifyWatchResult, 2) + return nil + }) + + client := retrystrategy.NewClient(apiCaller) + c.Assert(client, gc.NotNil) + + w, err := client.WatchRetryStrategy(tag) + c.Assert(called, jc.IsTrue) + c.Assert(err, gc.ErrorMatches, "expected 1 result, got 2") + c.Assert(w, gc.IsNil) +} === removed directory 'src/github.com/juju/juju/api/rsyslog' === removed file 'src/github.com/juju/juju/api/rsyslog/package_test.go' --- src/github.com/juju/juju/api/rsyslog/package_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/rsyslog/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package rsyslog_test - -import ( - stdtesting "testing" - - "github.com/juju/juju/testing" -) - -func TestAll(t *stdtesting.T) { - testing.MgoTestPackage(t) -} === removed file 'src/github.com/juju/juju/api/rsyslog/rsyslog.go' --- src/github.com/juju/juju/api/rsyslog/rsyslog.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/rsyslog/rsyslog.go 1970-01-01 00:00:00 +0000 @@ -1,103 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package rsyslog - -import ( - "fmt" - - "github.com/juju/juju/api/base" - "github.com/juju/juju/api/watcher" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/network" -) - -const rsyslogAPI = "Rsyslog" - -// RsyslogConfig holds the values needed for the rsyslog worker -type RsyslogConfig struct { - CACert string - CAKey string - // Port is only used by state servers as the port to listen on. - Port int - HostPorts []network.HostPort -} - -// State provides access to the Rsyslog API facade. -type State struct { - facade base.FacadeCaller -} - -// NewState creates a new client-side Rsyslog facade. -func NewState(caller base.APICaller) *State { - return &State{facade: base.NewFacadeCaller(caller, rsyslogAPI)} -} - -// SetRsyslogCert sets the rsyslog CA and Key certificates. -// The CA cert is used to verify the server's identify and establish -// a TLS session. The Key is used to allow us to properly regenerate -// rsyslog server certificates when adding and removing -// state servers with ensure-availability. -func (st *State) SetRsyslogCert(caCert, caKey string) error { - var result params.ErrorResult - args := params.SetRsyslogCertParams{ - CACert: []byte(caCert), - CAKey: []byte(caKey), - } - err := st.facade.FacadeCall("SetRsyslogCert", args, &result) - if err != nil { - return err - } - if result.Error != nil { - return result.Error - } - return nil -} - -// WatchForRsyslogChanges returns a new NotifyWatcher. -func (st *State) WatchForRsyslogChanges(agentTag string) (watcher.NotifyWatcher, error) { - var results params.NotifyWatchResults - args := params.Entities{ - Entities: []params.Entity{{Tag: agentTag}}, - } - - err := st.facade.FacadeCall("WatchForRsyslogChanges", args, &results) - if err != nil { - // TODO: Not directly tested - return nil, err - } - if len(results.Results) != 1 { - // TODO: Not directly tested - return nil, fmt.Errorf("expected 1 result, got %d", len(results.Results)) - } - result := results.Results[0] - if result.Error != nil { - // TODO: Not directly tested - return nil, result.Error - } - w := watcher.NewNotifyWatcher(st.facade.RawAPICaller(), result) - return w, nil -} - -// GetRsyslogConfig returns a RsyslogConfig. -func (st *State) GetRsyslogConfig(agentTag string) (*RsyslogConfig, error) { - var results params.RsyslogConfigResults - args := params.Entities{ - Entities: []params.Entity{{Tag: agentTag}}, - } - err := st.facade.FacadeCall("GetRsyslogConfig", args, &results) - if err != nil { - return nil, err - } - result := results.Results[0] - if result.Error != nil { - // TODO: Not directly tested - return nil, result.Error - } - return &RsyslogConfig{ - CACert: result.CACert, - CAKey: result.CAKey, - Port: result.Port, - HostPorts: params.NetworkHostPorts(result.HostPorts), - }, nil -} === removed file 'src/github.com/juju/juju/api/rsyslog/rsyslog_test.go' --- src/github.com/juju/juju/api/rsyslog/rsyslog_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/rsyslog/rsyslog_test.go 1970-01-01 00:00:00 +0000 @@ -1,82 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package rsyslog_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/api" - "github.com/juju/juju/api/rsyslog" - "github.com/juju/juju/juju/testing" - "github.com/juju/juju/network" - "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" - coretesting "github.com/juju/juju/testing" -) - -type rsyslogSuite struct { - testing.JujuConnSuite - - st api.Connection - machine *state.Machine - rsyslog *rsyslog.State -} - -var _ = gc.Suite(&rsyslogSuite{}) - -func (s *rsyslogSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - - s.st, s.machine = s.OpenAPIAsNewMachine(c, state.JobManageEnviron) - err := s.machine.SetProviderAddresses(network.NewAddress("0.1.2.3")) - c.Assert(err, jc.ErrorIsNil) - - // Create the rsyslog API facade - s.rsyslog = s.st.Rsyslog() - c.Assert(s.rsyslog, gc.NotNil) -} - -func (s *rsyslogSuite) TestGetRsyslogConfig(c *gc.C) { - err := s.APIState.Client().EnvironmentSet(map[string]interface{}{ - "rsyslog-ca-cert": coretesting.CACert, - "rsyslog-ca-key": coretesting.CAKey, - }) - c.Assert(err, jc.ErrorIsNil) - - cfg, err := s.rsyslog.GetRsyslogConfig(s.machine.Tag().String()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(cfg, gc.NotNil) - - c.Assert(cfg.CACert, gc.Equals, coretesting.CACert) - c.Assert(cfg.HostPorts, gc.HasLen, 1) - hostPort := cfg.HostPorts[0] - c.Assert(hostPort.Address.Value, gc.Equals, "0.1.2.3") - - // the rsyslog port is set by the provider/dummy/environs.go - c.Assert(hostPort.Port, gc.Equals, 2345) -} - -func (s *rsyslogSuite) TestWatchForRsyslogChanges(c *gc.C) { - w, err := s.rsyslog.WatchForRsyslogChanges(s.machine.Tag().String()) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, w) - - wc := statetesting.NewNotifyWatcherC(c, s.BackingState, w) - // Initial event - wc.AssertOneChange() - - // change the API HostPorts - newHostPorts := network.NewHostPorts(6541, "127.0.0.1") - err = s.State.SetAPIHostPorts([][]network.HostPort{newHostPorts}) - c.Assert(err, jc.ErrorIsNil) - - // assert we get notified - wc.AssertOneChange() - - statetesting.AssertStop(c, w) - wc.AssertClosed() -} - -// SetRsyslogCACert is tested in apiserver/rsyslog === modified file 'src/github.com/juju/juju/api/service/client.go' --- src/github.com/juju/juju/api/service/client.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/service/client.go 2016-03-22 15:18:22 +0000 @@ -10,6 +10,7 @@ import ( "github.com/juju/errors" "github.com/juju/loggo" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api" "github.com/juju/juju/api/base" @@ -48,61 +49,228 @@ return errors.Trace(results.OneError()) } -// EnvironmentUUID returns the environment UUID from the client connection. -func (c *Client) EnvironmentUUID() string { - tag, err := c.st.EnvironTag() +// ModelUUID returns the model UUID from the client connection. +func (c *Client) ModelUUID() string { + tag, err := c.st.ModelTag() if err != nil { - logger.Warningf("environ tag not an environ: %v", err) + logger.Warningf("model tag not an model: %v", err) return "" } return tag.Id() } -// ServiceDeploy obtains the charm, either locally or from -// the charm store, and deploys it. It allows the specification of -// requested networks that must be present on the machines where the -// service is deployed. Another way to specify networks to include/exclude -// is using constraints. Placement directives, if provided, specify the +// DeployArgs holds the arguments to be sent to Client.ServiceDeploy. +type DeployArgs struct { + // CharmURL is the URL of the charm to deploy. + CharmURL string + // ServiceName is the name to give the service. + ServiceName string + // Series to be used for the machine. + Series string + // NumUnits is the number of units to deploy. + NumUnits int + // ConfigYAML is a string that overrides the default config.yml. + ConfigYAML string + // Cons contains constraints on where units of this service may be + // placed. + Cons constraints.Value + // Placement directives on where the machines for the unit must be + // created. + Placement []*instance.Placement + // Networks contains names of networks to deploy on. + Networks []string + // Storage contains Constraints specifying how storage should be + // handled. + Storage map[string]storage.Constraints + // EndpointBindings + EndpointBindings map[string]string + // Collection of resource names for the service, with the value being the + // unique ID of a pre-uploaded resources in storage. + Resources map[string]string +} + +// Deploy obtains the charm, either locally or from the charm store, +// and deploys it. It allows the specification of requested networks +// that must be present on the machines where the service is +// deployed. Another way to specify networks to include/exclude is +// using constraints. Placement directives, if provided, specify the // machine on which the charm is deployed. -func (c *Client) ServiceDeploy( - charmURL string, - serviceName string, - numUnits int, - configYAML string, - cons constraints.Value, - toMachineSpec string, - placement []*instance.Placement, - networks []string, - storage map[string]storage.Constraints, -) error { - args := params.ServicesDeploy{ +func (c *Client) Deploy(args DeployArgs) error { + deployArgs := params.ServicesDeploy{ Services: []params.ServiceDeploy{{ - ServiceName: serviceName, - CharmUrl: charmURL, - NumUnits: numUnits, - ConfigYAML: configYAML, - Constraints: cons, - ToMachineSpec: toMachineSpec, - Placement: placement, - Networks: networks, - Storage: storage, + ServiceName: args.ServiceName, + Series: args.Series, + CharmUrl: args.CharmURL, + NumUnits: args.NumUnits, + ConfigYAML: args.ConfigYAML, + Constraints: args.Cons, + Placement: args.Placement, + Networks: args.Networks, + Storage: args.Storage, + EndpointBindings: args.EndpointBindings, + Resources: args.Resources, }}, } var results params.ErrorResults var err error - if len(placement) > 0 { - err = c.facade.FacadeCall("ServicesDeployWithPlacement", args, &results) - if err != nil { - if params.IsCodeNotImplemented(err) { - return errors.Errorf("unsupported --to parameter %q", toMachineSpec) - } - return err - } - } else { - err = c.facade.FacadeCall("ServicesDeploy", args, &results) - } + err = c.facade.FacadeCall("Deploy", deployArgs, &results) if err != nil { return err } return results.OneError() } + +// GetCharmURL returns the charm URL the given service is +// running at present. +func (c *Client) GetCharmURL(serviceName string) (*charm.URL, error) { + result := new(params.StringResult) + args := params.ServiceGet{ServiceName: serviceName} + err := c.facade.FacadeCall("GetCharmURL", args, result) + if err != nil { + return nil, err + } + if result.Error != nil { + return nil, result.Error + } + return charm.ParseURL(result.Result) +} + +// SetCharmConfig holds the configuration for setting a new revision of a charm +// on a service. +type SetCharmConfig struct { + // ServiceName is the name of the service to set the charm on. + ServiceName string + // CharmUrl is the url for the charm. + CharmUrl string + // ForceSeries forces the use of the charm even if it doesn't match the + // series of the unit. + ForceSeries bool + // ForceUnits forces the upgrade on units in an error state. + ForceUnits bool + // ResourceIDs is a map of resource names to resource IDs to activate during + // the upgrade. + ResourceIDs map[string]string +} + +// SetCharm sets the charm for a given service. +func (c *Client) SetCharm(cfg SetCharmConfig) error { + args := params.ServiceSetCharm{ + ServiceName: cfg.ServiceName, + CharmUrl: cfg.CharmUrl, + ForceSeries: cfg.ForceSeries, + ForceUnits: cfg.ForceUnits, + ResourceIDs: cfg.ResourceIDs, + } + return c.facade.FacadeCall("SetCharm", args, nil) +} + +// Update updates the service attributes, including charm URL, +// minimum number of units, settings and constraints. +func (c *Client) Update(args params.ServiceUpdate) error { + return c.facade.FacadeCall("Update", args, nil) +} + +// AddUnits adds a given number of units to a service using the specified +// placement directives to assign units to machines. +func (c *Client) AddUnits(service string, numUnits int, placement []*instance.Placement) ([]string, error) { + args := params.AddServiceUnits{ + ServiceName: service, + NumUnits: numUnits, + Placement: placement, + } + results := new(params.AddServiceUnitsResults) + err := c.facade.FacadeCall("AddUnits", args, results) + return results.Units, err +} + +// DestroyUnits decreases the number of units dedicated to a service. +func (c *Client) DestroyUnits(unitNames ...string) error { + params := params.DestroyServiceUnits{unitNames} + return c.facade.FacadeCall("DestroyUnits", params, nil) +} + +// Destroy destroys a given service. +func (c *Client) Destroy(service string) error { + params := params.ServiceDestroy{ + ServiceName: service, + } + return c.facade.FacadeCall("Destroy", params, nil) +} + +// GetConstraints returns the constraints for the given service. +func (c *Client) GetConstraints(service string) (constraints.Value, error) { + results := new(params.GetConstraintsResults) + err := c.facade.FacadeCall("GetConstraints", params.GetServiceConstraints{service}, results) + return results.Constraints, err +} + +// SetConstraints specifies the constraints for the given service. +func (c *Client) SetConstraints(service string, constraints constraints.Value) error { + params := params.SetConstraints{ + ServiceName: service, + Constraints: constraints, + } + return c.facade.FacadeCall("SetConstraints", params, nil) +} + +// Expose changes the juju-managed firewall to expose any ports that +// were also explicitly marked by units as open. +func (c *Client) Expose(service string) error { + params := params.ServiceExpose{ServiceName: service} + return c.facade.FacadeCall("Expose", params, nil) +} + +// Unexpose changes the juju-managed firewall to unexpose any ports that +// were also explicitly marked by units as open. +func (c *Client) Unexpose(service string) error { + params := params.ServiceUnexpose{ServiceName: service} + return c.facade.FacadeCall("Unexpose", params, nil) +} + +// Get returns the configuration for the named service. +func (c *Client) Get(service string) (*params.ServiceGetResults, error) { + var results params.ServiceGetResults + params := params.ServiceGet{ServiceName: service} + err := c.facade.FacadeCall("Get", params, &results) + return &results, err +} + +// Set sets configuration options on a service. +func (c *Client) Set(service string, options map[string]string) error { + p := params.ServiceSet{ + ServiceName: service, + Options: options, + } + return c.facade.FacadeCall("Set", p, nil) +} + +// Unset resets configuration options on a service. +func (c *Client) Unset(service string, options []string) error { + p := params.ServiceUnset{ + ServiceName: service, + Options: options, + } + return c.facade.FacadeCall("Unset", p, nil) +} + +// CharmRelations returns the service's charms relation names. +func (c *Client) CharmRelations(service string) ([]string, error) { + var results params.ServiceCharmRelationsResults + params := params.ServiceCharmRelations{ServiceName: service} + err := c.facade.FacadeCall("CharmRelations", params, &results) + return results.CharmRelations, err +} + +// AddRelation adds a relation between the specified endpoints and returns the relation info. +func (c *Client) AddRelation(endpoints ...string) (*params.AddRelationResults, error) { + var addRelRes params.AddRelationResults + params := params.AddRelation{Endpoints: endpoints} + err := c.facade.FacadeCall("AddRelation", params, &addRelRes) + return &addRelRes, err +} + +// DestroyRelation removes the relation between the specified endpoints. +func (c *Client) DestroyRelation(endpoints ...string) error { + params := params.DestroyRelation{Endpoints: endpoints} + return c.facade.FacadeCall("DestroyRelation", params, nil) +} === modified file 'src/github.com/juju/juju/api/service/client_test.go' --- src/github.com/juju/juju/api/service/client_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/service/client_test.go 2016-03-22 15:18:22 +0000 @@ -6,11 +6,13 @@ import ( jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api/service" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/constraints" + "github.com/juju/juju/instance" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/storage" ) @@ -77,25 +79,83 @@ var called bool service.PatchFacadeCall(s, s.client, func(request string, a, response interface{}) error { called = true - c.Assert(request, gc.Equals, "ServicesDeploy") + c.Assert(request, gc.Equals, "Deploy") args, ok := a.(params.ServicesDeploy) c.Assert(ok, jc.IsTrue) c.Assert(args.Services, gc.HasLen, 1) c.Assert(args.Services[0].CharmUrl, gc.Equals, "charmURL") c.Assert(args.Services[0].ServiceName, gc.Equals, "serviceA") + c.Assert(args.Services[0].Series, gc.Equals, "series") c.Assert(args.Services[0].NumUnits, gc.Equals, 2) c.Assert(args.Services[0].ConfigYAML, gc.Equals, "configYAML") c.Assert(args.Services[0].Constraints, gc.DeepEquals, constraints.MustParse("mem=4G")) - c.Assert(args.Services[0].ToMachineSpec, gc.Equals, "machineSpec") - c.Assert(args.Services[0].Networks, gc.DeepEquals, []string{"neta"}) + c.Assert(args.Services[0].Placement, gc.DeepEquals, []*instance.Placement{{"scope", "directive"}}) + c.Assert(args.Services[0].EndpointBindings, gc.DeepEquals, map[string]string{"foo": "bar"}) c.Assert(args.Services[0].Storage, gc.DeepEquals, map[string]storage.Constraints{"data": storage.Constraints{Pool: "pool"}}) + c.Assert(args.Services[0].Resources, gc.DeepEquals, map[string]string{"foo": "bar"}) result := response.(*params.ErrorResults) result.Results = make([]params.ErrorResult, 1) return nil }) - err := s.client.ServiceDeploy("charmURL", "serviceA", 2, "configYAML", constraints.MustParse("mem=4G"), - "machineSpec", nil, []string{"neta"}, map[string]storage.Constraints{"data": storage.Constraints{Pool: "pool"}}) + + args := service.DeployArgs{ + CharmURL: "charmURL", + ServiceName: "serviceA", + Series: "series", + NumUnits: 2, + ConfigYAML: "configYAML", + Cons: constraints.MustParse("mem=4G"), + Placement: []*instance.Placement{{"scope", "directive"}}, + Networks: []string{"neta"}, + Storage: map[string]storage.Constraints{"data": storage.Constraints{Pool: "pool"}}, + Resources: map[string]string{"foo": "bar"}, + EndpointBindings: map[string]string{"foo": "bar"}, + } + err := s.client.Deploy(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) +} + +func (s *serviceSuite) TestServiceGetCharmURL(c *gc.C) { + var called bool + service.PatchFacadeCall(s, s.client, func(request string, a, response interface{}) error { + called = true + c.Assert(request, gc.Equals, "GetCharmURL") + args, ok := a.(params.ServiceGet) + c.Assert(ok, jc.IsTrue) + c.Assert(args.ServiceName, gc.Equals, "service") + + result := response.(*params.StringResult) + result.Result = "curl" + return nil + }) + curl, err := s.client.GetCharmURL("service") + c.Assert(err, jc.ErrorIsNil) + c.Assert(curl, gc.DeepEquals, charm.MustParseURL("curl")) + c.Assert(called, jc.IsTrue) +} + +func (s *serviceSuite) TestServiceSetCharm(c *gc.C) { + var called bool + service.PatchFacadeCall(s, s.client, func(request string, a, response interface{}) error { + called = true + c.Assert(request, gc.Equals, "SetCharm") + args, ok := a.(params.ServiceSetCharm) + c.Assert(ok, jc.IsTrue) + c.Assert(args.ServiceName, gc.Equals, "service") + c.Assert(args.CharmUrl, gc.Equals, "charmURL") + c.Assert(args.ForceSeries, gc.Equals, true) + c.Assert(args.ForceUnits, gc.Equals, true) + return nil + }) + cfg := service.SetCharmConfig{ + ServiceName: "service", + CharmUrl: "charmURL", + ForceSeries: true, + ForceUnits: true, + } + err := s.client.SetCharm(cfg) c.Assert(err, jc.ErrorIsNil) c.Assert(called, jc.IsTrue) } === added directory 'src/github.com/juju/juju/api/singular' === added file 'src/github.com/juju/juju/api/singular/api.go' --- src/github.com/juju/juju/api/singular/api.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/singular/api.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,88 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package singular + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/core/lease" +) + +// NewAPI returns a new API client for the Singular facade. It exposes methods +// for claiming and observing administration responsibility for the apiCaller's +// model, on behalf of the supplied controller machine. +func NewAPI(apiCaller base.APICaller, controllerTag names.MachineTag) (*API, error) { + controllerId := controllerTag.Id() + if !names.IsValidMachine(controllerId) { + return nil, errors.NotValidf("controller tag") + } + modelTag, err := apiCaller.ModelTag() + if err != nil { + return nil, errors.Trace(err) + } + facadeCaller := base.NewFacadeCaller(apiCaller, "Singular") + return &API{ + modelTag: modelTag, + controllerTag: controllerTag, + facadeCaller: facadeCaller, + }, nil +} + +// API allows controller machines to claim responsibility for; or to wait for +// no other machine to have responsibility for; administration for some model. +type API struct { + modelTag names.ModelTag + controllerTag names.MachineTag + facadeCaller base.FacadeCaller +} + +// Claim attempts to claim responsibility for model administration for the +// supplied duration. If the claim is denied, it will return +// lease.ErrClaimDenied. +func (api *API) Claim(duration time.Duration) error { + args := params.SingularClaims{ + Claims: []params.SingularClaim{{ + ModelTag: api.modelTag.String(), + ControllerTag: api.controllerTag.String(), + Duration: duration, + }}, + } + var results params.ErrorResults + err := api.facadeCaller.FacadeCall("Claim", args, &results) + if err != nil { + return errors.Trace(err) + } + + err = results.OneError() + if err != nil { + if params.IsCodeLeaseClaimDenied(err) { + return lease.ErrClaimDenied + } + return errors.Trace(err) + } + return nil +} + +// Wait blocks until nobody has responsibility for model administration. It +// should probably be doing something watchy rather than blocky, but it's +// following the lease manager implementation underlying the original +// leadership approach and it doesn't seem worth rewriting all that. +func (api *API) Wait() error { + args := params.Entities{ + Entities: []params.Entity{{ + Tag: api.modelTag.String(), + }}, + } + var results params.ErrorResults + err := api.facadeCaller.FacadeCall("Wait", args, &results) + if err != nil { + return errors.Trace(err) + } + return results.OneError() +} === added file 'src/github.com/juju/juju/api/singular/api_test.go' --- src/github.com/juju/juju/api/singular/api_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/singular/api_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,186 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package singular_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/base" + basetesting "github.com/juju/juju/api/base/testing" + "github.com/juju/juju/api/singular" + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/core/lease" +) + +type APISuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&APISuite{}) + +var machine123 = names.NewMachineTag("123") + +func (s *APISuite) TestBadControllerTag(c *gc.C) { + apiCaller := apiCaller(c, nil, nil) + badTag := names.NewMachineTag("") + api, err := singular.NewAPI(apiCaller, badTag) + c.Check(api, gc.IsNil) + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, "controller tag not valid") +} + +func (s *APISuite) TestBadModelTag(c *gc.C) { + api, err := singular.NewAPI(mockAPICaller{}, machine123) + c.Check(api, gc.IsNil) + c.Check(err, gc.ErrorMatches, "no tags for you") +} + +func (s *APISuite) TestNoCalls(c *gc.C) { + stub := &testing.Stub{} + apiCaller := apiCaller(c, nil, nil) + _, err := singular.NewAPI(apiCaller, machine123) + c.Check(err, jc.ErrorIsNil) + stub.CheckCallNames(c) +} + +func (s *APISuite) TestClaimSuccess(c *gc.C) { + stub := &testing.Stub{} + apiCaller := apiCaller(c, stub, func(result *params.ErrorResults) error { + result.Results = []params.ErrorResult{{}} + return nil + }) + api, err := singular.NewAPI(apiCaller, machine123) + c.Assert(err, jc.ErrorIsNil) + + err = api.Claim(time.Minute) + c.Check(err, jc.ErrorIsNil) + checkCall(c, stub, "Claim", params.SingularClaims{ + Claims: []params.SingularClaim{{ + ModelTag: "model-deadbeef-0bad-400d-8000-4b1d0d06f00d", + ControllerTag: "machine-123", + Duration: time.Minute, + }}, + }) +} + +func (s *APISuite) TestClaimDenied(c *gc.C) { + stub := &testing.Stub{} + apiCaller := apiCaller(c, stub, func(result *params.ErrorResults) error { + result.Results = []params.ErrorResult{{ + Error: common.ServerError(lease.ErrClaimDenied), + }} + return nil + }) + api, err := singular.NewAPI(apiCaller, machine123) + c.Assert(err, jc.ErrorIsNil) + + err = api.Claim(time.Hour) + c.Check(err, gc.Equals, lease.ErrClaimDenied) + checkCall(c, stub, "Claim", params.SingularClaims{ + Claims: []params.SingularClaim{{ + ModelTag: "model-deadbeef-0bad-400d-8000-4b1d0d06f00d", + ControllerTag: "machine-123", + Duration: time.Hour, + }}, + }) +} + +func (s *APISuite) TestClaimError(c *gc.C) { + stub := &testing.Stub{} + apiCaller := apiCaller(c, stub, func(result *params.ErrorResults) error { + result.Results = []params.ErrorResult{{ + Error: common.ServerError(errors.New("zap pow splat oof")), + }} + return nil + }) + api, err := singular.NewAPI(apiCaller, machine123) + c.Assert(err, jc.ErrorIsNil) + + err = api.Claim(time.Second) + c.Check(err, gc.ErrorMatches, "zap pow splat oof") + checkCall(c, stub, "Claim", params.SingularClaims{ + Claims: []params.SingularClaim{{ + ModelTag: "model-deadbeef-0bad-400d-8000-4b1d0d06f00d", + ControllerTag: "machine-123", + Duration: time.Second, + }}, + }) +} + +func (s *APISuite) TestWaitSuccess(c *gc.C) { + stub := &testing.Stub{} + apiCaller := apiCaller(c, stub, func(result *params.ErrorResults) error { + result.Results = []params.ErrorResult{{}} + return nil + }) + api, err := singular.NewAPI(apiCaller, machine123) + c.Assert(err, jc.ErrorIsNil) + + err = api.Wait() + c.Check(err, jc.ErrorIsNil) + checkCall(c, stub, "Wait", params.Entities{ + Entities: []params.Entity{{ + Tag: "model-deadbeef-0bad-400d-8000-4b1d0d06f00d", + }}, + }) +} + +func (s *APISuite) TestWaitError(c *gc.C) { + stub := &testing.Stub{} + apiCaller := apiCaller(c, stub, func(result *params.ErrorResults) error { + result.Results = []params.ErrorResult{{ + Error: common.ServerError(errors.New("crunch squelch")), + }} + return nil + }) + api, err := singular.NewAPI(apiCaller, machine123) + c.Assert(err, jc.ErrorIsNil) + + err = api.Wait() + c.Check(err, gc.ErrorMatches, "crunch squelch") + checkCall(c, stub, "Wait", params.Entities{ + Entities: []params.Entity{{ + Tag: "model-deadbeef-0bad-400d-8000-4b1d0d06f00d", + }}, + }) +} + +type setResultFunc func(result *params.ErrorResults) error + +func apiCaller(c *gc.C, stub *testing.Stub, setResult setResultFunc) base.APICaller { + return basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + args, response interface{}, + ) error { + stub.AddCall(objType, version, id, request, args) + result, ok := response.(*params.ErrorResults) + c.Assert(ok, jc.IsTrue) + return setResult(result) + }, + ) +} + +func checkCall(c *gc.C, stub *testing.Stub, method string, args interface{}) { + stub.CheckCalls(c, []testing.StubCall{{ + FuncName: "Singular", + Args: []interface{}{0, "", method, args}, + }}) +} + +type mockAPICaller struct { + base.APICaller +} + +func (mockAPICaller) ModelTag() (names.ModelTag, error) { + return names.ModelTag{}, errors.New("no tags for you") +} === added file 'src/github.com/juju/juju/api/singular/package_test.go' --- src/github.com/juju/juju/api/singular/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/singular/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package singular_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === modified file 'src/github.com/juju/juju/api/state.go' --- src/github.com/juju/juju/api/state.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/api/state.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,7 @@ "github.com/juju/errors" "github.com/juju/names" + "gopkg.in/macaroon-bakery.v1/httpbakery" "github.com/juju/juju/api/addresser" "github.com/juju/juju/api/agent" @@ -16,19 +17,15 @@ "github.com/juju/juju/api/charmrevisionupdater" "github.com/juju/juju/api/cleaner" "github.com/juju/juju/api/deployer" - "github.com/juju/juju/api/diskmanager" - "github.com/juju/juju/api/environment" + "github.com/juju/juju/api/discoverspaces" "github.com/juju/juju/api/firewaller" + "github.com/juju/juju/api/imagemetadata" "github.com/juju/juju/api/instancepoller" "github.com/juju/juju/api/keyupdater" - apilogger "github.com/juju/juju/api/logger" "github.com/juju/juju/api/machiner" - "github.com/juju/juju/api/networker" "github.com/juju/juju/api/provisioner" "github.com/juju/juju/api/reboot" - "github.com/juju/juju/api/resumer" - "github.com/juju/juju/api/rsyslog" - "github.com/juju/juju/api/storageprovisioner" + "github.com/juju/juju/api/unitassigner" "github.com/juju/juju/api/uniter" "github.com/juju/juju/api/upgrader" "github.com/juju/juju/apiserver/params" @@ -40,45 +37,67 @@ // Subsequent requests on the state will act as that entity. This // method is usually called automatically by Open. The machine nonce // should be empty unless logging in as a machine agent. -func (st *State) Login(tag, password, nonce string) error { - err := st.loginV2(tag, password, nonce) - if params.IsCodeNotImplemented(err) { - err = st.loginV1(tag, password, nonce) - if params.IsCodeNotImplemented(err) { - // TODO (cmars): remove fallback once we can drop v0 compatibility - return st.loginV0(tag, password, nonce) - } - } - return err -} - -func (st *State) loginV2(tag, password, nonce string) error { +func (st *state) Login(tag names.Tag, password, nonce string) error { + err := st.loginV3(tag, password, nonce) + return errors.Trace(err) +} + +// loginV2 is retained for testing logins from older clients. +func (st *state) loginV2(tag names.Tag, password, nonce string) error { + return st.loginForVersion(tag, password, nonce, 2) +} + +func (st *state) loginV3(tag names.Tag, password, nonce string) error { + return st.loginForVersion(tag, password, nonce, 3) +} + +func (st *state) loginForVersion(tag names.Tag, password, nonce string, vers int) error { var result params.LoginResultV1 request := ¶ms.LoginRequest{ - AuthTag: tag, + AuthTag: tagToString(tag), Credentials: password, Nonce: nonce, } - err := st.APICall("Admin", 2, "", "Login", request, &result) + if tag == nil { + // Add any macaroons that might work for authenticating the login request. + request.Macaroons = httpbakery.MacaroonsForURL(st.bakeryClient.Client.Jar, st.cookieURL) + } + err := st.APICall("Admin", vers, "", "Login", request, &result) if err != nil { - // If the server complains about an empty tag it may be that we are - // talking to an older server version that does not understand facades and - // expects a params.Creds request instead of a params.LoginRequest. We - // return a CodNotImplemented error to force login down to V1, which - // supports older server logins. This may mask an actual empty tag in - // params.LoginRequest, but that would be picked up in loginV1. V1 will - // also produce a warning that we are ignoring an invalid API, so we do not - // need to add one here. - if err.Error() == `"" is not a valid tag` { - return ¶ms.Error{ - Message: err.Error(), - Code: params.CodeNotImplemented, - } - } return errors.Trace(err) } + if result.DischargeRequired != nil { + // The result contains a discharge-required + // macaroon. We discharge it and retry + // the login request with the original macaroon + // and its discharges. + if result.DischargeRequiredReason == "" { + result.DischargeRequiredReason = "no reason given for discharge requirement" + } + if err := st.bakeryClient.HandleError(st.cookieURL, &httpbakery.Error{ + Message: result.DischargeRequiredReason, + Code: httpbakery.ErrDischargeRequired, + Info: &httpbakery.ErrorInfo{ + Macaroon: result.DischargeRequired, + MacaroonPath: "/", + }, + }); err != nil { + return errors.Trace(err) + } + // Add the macaroons that have been saved by HandleError to our login request. + request.Macaroons = httpbakery.MacaroonsForURL(st.bakeryClient.Client.Jar, st.cookieURL) + result = params.LoginResultV1{} // zero result + err = st.APICall("Admin", vers, "", "Login", request, &result) + if err != nil { + return errors.Trace(err) + } + if result.DischargeRequired != nil { + return errors.Errorf("login with discharged macaroons failed: %s", result.DischargeRequiredReason) + } + } + servers := params.NetworkHostsPorts(result.Servers) - err = st.setLoginResult(tag, result.EnvironTag, result.ServerTag, servers, result.Facades) + err = st.setLoginResult(tag, result.ModelTag, result.ControllerTag, servers, result.Facades) if err != nil { return errors.Trace(err) } @@ -89,68 +108,10 @@ return nil } -func (st *State) loginV1(tag, password, nonce string) error { - var result struct { - // TODO (cmars): remove once we can drop 1.18 login compatibility - params.LoginResult - - params.LoginResultV1 - } - err := st.APICall("Admin", 1, "", "Login", ¶ms.LoginRequestCompat{ - LoginRequest: params.LoginRequest{ - AuthTag: tag, - Credentials: password, - Nonce: nonce, - }, - // TODO (cmars): remove once we can drop 1.18 login compatibility - Creds: params.Creds{ - AuthTag: tag, - Password: password, - Nonce: nonce, - }, - }, &result) - if err != nil { - return err - } - - // We've either logged into an Admin v1 facade, or a pre-facade (1.18) API - // server. The JSON field names between the structures are disjoint, so only - // one should have an environ tag set. - - var environTag string - var serverTag string - var servers [][]network.HostPort - var facades []params.FacadeVersions - // For quite old servers, it is possible that they don't send down - // the environTag. - if result.LoginResult.EnvironTag != "" { - environTag = result.LoginResult.EnvironTag - // If the server doesn't support login v1, it doesn't support - // multiple environments, so don't store a server tag. - servers = params.NetworkHostsPorts(result.LoginResult.Servers) - facades = result.LoginResult.Facades - } else if result.LoginResultV1.EnvironTag != "" { - environTag = result.LoginResultV1.EnvironTag - serverTag = result.LoginResultV1.ServerTag - servers = params.NetworkHostsPorts(result.LoginResultV1.Servers) - facades = result.LoginResultV1.Facades - } - - err = st.setLoginResult(tag, environTag, serverTag, servers, facades) - if err != nil { - return err - } - return nil -} - -func (st *State) setLoginResult(tag, environTag, serverTag string, servers [][]network.HostPort, facades []params.FacadeVersions) error { - authtag, err := names.ParseTag(tag) - if err != nil { - return err - } - st.authTag = authtag - st.environTag = environTag - st.serverTag = serverTag +func (st *state) setLoginResult(tag names.Tag, modelTag, controllerTag string, servers [][]network.HostPort, facades []params.FacadeVersions) error { + st.authTag = tag + st.modelTag = modelTag + st.controllerTag = controllerTag hostPorts, err := addAddress(servers, st.addr) if err != nil { @@ -165,24 +126,8 @@ for _, facade := range facades { st.facadeVersions[facade.Name] = facade.Versions } - return nil -} -func (st *State) loginV0(tag, password, nonce string) error { - var result params.LoginResult - err := st.APICall("Admin", 0, "", "Login", ¶ms.Creds{ - AuthTag: tag, - Password: password, - Nonce: nonce, - }, &result) - if err != nil { - return err - } - servers := params.NetworkHostsPorts(result.Servers) - // Don't set a server tag. - if err = st.setLoginResult(tag, result.EnvironTag, "", servers, result.Facades); err != nil { - return err - } + st.setLoggedIn() return nil } @@ -230,38 +175,32 @@ // Client returns an object that can be used // to access client-specific functionality. -func (st *State) Client() *Client { +func (st *state) Client() *Client { frontend, backend := base.NewClientFacade(st, "Client") return &Client{ClientFacade: frontend, facade: backend, st: st} } // Machiner returns a version of the state that provides functionality // required by the machiner worker. -func (st *State) Machiner() *machiner.State { +func (st *state) Machiner() *machiner.State { return machiner.NewState(st) } -// Resumer returns a version of the state that provides functionality -// required by the resumer worker. -func (st *State) Resumer() *resumer.API { - return resumer.NewAPI(st) -} - -// Networker returns a version of the state that provides functionality -// required by the networker worker. -func (st *State) Networker() networker.State { - return networker.NewState(st) +// UnitAssigner returns a version of the state that provides functionality +// required by the unitassigner worker. +func (st *state) UnitAssigner() unitassigner.API { + return unitassigner.New(st) } // Provisioner returns a version of the state that provides functionality // required by the provisioner worker. -func (st *State) Provisioner() *provisioner.State { +func (st *state) Provisioner() *provisioner.State { return provisioner.NewState(st) } // Uniter returns a version of the state that provides functionality // required by the uniter worker. -func (st *State) Uniter() (*uniter.State, error) { +func (st *state) Uniter() (*uniter.State, error) { unitTag, ok := st.authTag.(names.UnitTag) if !ok { return nil, errors.Errorf("expected UnitTag, got %T %v", st.authTag, st.authTag) @@ -269,45 +208,25 @@ return uniter.NewState(st, unitTag), nil } -// DiskManager returns a version of the state that provides functionality -// required by the diskmanager worker. -func (st *State) DiskManager() (*diskmanager.State, error) { - machineTag, ok := st.authTag.(names.MachineTag) - if !ok { - return nil, errors.Errorf("expected MachineTag, got %#v", st.authTag) - } - return diskmanager.NewState(st, machineTag), nil -} - -// StorageProvisioner returns a version of the state that provides -// functionality required by the storageprovisioner worker. -// The scope tag defines the type of storage that is provisioned, either -// either attached directly to a specified machine (machine scoped), -// or provisioned on the underlying cloud for use by any machine in a -// specified environment (environ scoped). -func (st *State) StorageProvisioner(scope names.Tag) *storageprovisioner.State { - return storageprovisioner.NewState(st, scope) -} - // Firewaller returns a version of the state that provides functionality // required by the firewaller worker. -func (st *State) Firewaller() *firewaller.State { +func (st *state) Firewaller() *firewaller.State { return firewaller.NewState(st) } // Agent returns a version of the state that provides // functionality required by the agent code. -func (st *State) Agent() *agent.State { +func (st *state) Agent() *agent.State { return agent.NewState(st) } // Upgrader returns access to the Upgrader API -func (st *State) Upgrader() *upgrader.State { +func (st *state) Upgrader() *upgrader.State { return upgrader.NewState(st) } // Reboot returns access to the Reboot API -func (st *State) Reboot() (reboot.State, error) { +func (st *state) Reboot() (reboot.State, error) { switch tag := st.authTag.(type) { case names.MachineTag: return reboot.NewState(st, tag), nil @@ -317,54 +236,49 @@ } // Deployer returns access to the Deployer API -func (st *State) Deployer() *deployer.State { +func (st *state) Deployer() *deployer.State { return deployer.NewState(st) } // Addresser returns access to the Addresser API. -func (st *State) Addresser() *addresser.API { +func (st *state) Addresser() *addresser.API { return addresser.NewAPI(st) } -// Environment returns access to the Environment API -func (st *State) Environment() *environment.Facade { - return environment.NewFacade(st) -} - -// Logger returns access to the Logger API -func (st *State) Logger() *apilogger.State { - return apilogger.NewState(st) +// DiscoverSpaces returns access to the DiscoverSpacesAPI. +func (st *state) DiscoverSpaces() *discoverspaces.API { + return discoverspaces.NewAPI(st) } // KeyUpdater returns access to the KeyUpdater API -func (st *State) KeyUpdater() *keyupdater.State { +func (st *state) KeyUpdater() *keyupdater.State { return keyupdater.NewState(st) } // InstancePoller returns access to the InstancePoller API -func (st *State) InstancePoller() *instancepoller.API { +func (st *state) InstancePoller() *instancepoller.API { return instancepoller.NewAPI(st) } // CharmRevisionUpdater returns access to the CharmRevisionUpdater API -func (st *State) CharmRevisionUpdater() *charmrevisionupdater.State { +func (st *state) CharmRevisionUpdater() *charmrevisionupdater.State { return charmrevisionupdater.NewState(st) } // Cleaner returns a version of the state that provides access to the cleaner API -func (st *State) Cleaner() *cleaner.API { +func (st *state) Cleaner() *cleaner.API { return cleaner.NewAPI(st) } -// Rsyslog returns access to the Rsyslog API -func (st *State) Rsyslog() *rsyslog.State { - return rsyslog.NewState(st) -} - // ServerVersion holds the version of the API server that we are connected to. // It is possible that this version is Zero if the server does not report this // during login. The second result argument indicates if the version number is // set. -func (st *State) ServerVersion() (version.Number, bool) { +func (st *state) ServerVersion() (version.Number, bool) { return st.serverVersion, st.serverVersion != version.Zero } + +// MetadataUpdater returns access to the imageMetadata API +func (st *state) MetadataUpdater() *imagemetadata.Client { + return imagemetadata.NewClient(st) +} === added file 'src/github.com/juju/juju/api/state_macaroon_test.go' --- src/github.com/juju/juju/api/state_macaroon_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/state_macaroon_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,130 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package api_test + +import ( + "net/url" + + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api" + apitesting "github.com/juju/juju/api/testing" + "github.com/juju/juju/rpc" +) + +var _ = gc.Suite(&macaroonLoginSuite{}) + +type macaroonLoginSuite struct { + apitesting.MacaroonSuite + client api.Connection +} + +const testUserName = "testuser@somewhere" + +func (s *macaroonLoginSuite) SetUpTest(c *gc.C) { + s.MacaroonSuite.SetUpTest(c) + s.AddModelUser(c, testUserName) + info := s.APIInfo(c) + // Don't log in. + info.UseMacaroons = false + s.client = s.OpenAPI(c, info, nil) +} + +func (s *macaroonLoginSuite) TearDownTest(c *gc.C) { + s.client.Close() + s.MacaroonSuite.TearDownTest(c) +} + +func (s *macaroonLoginSuite) TestSuccessfulLogin(c *gc.C) { + s.DischargerLogin = func() string { return testUserName } + err := s.client.Login(nil, "", "") + c.Assert(err, jc.ErrorIsNil) +} + +func (s *macaroonLoginSuite) TestFailedToObtainDischargeLogin(c *gc.C) { + err := s.client.Login(nil, "", "") + c.Assert(err, gc.ErrorMatches, `cannot get discharge from "https://.*": third party refused discharge: cannot discharge: login denied by discharger`) +} + +func (s *macaroonLoginSuite) TestUnknownUserLogin(c *gc.C) { + s.DischargerLogin = func() string { + return "testUnknown" + } + err := s.client.Login(nil, "", "") + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: "invalid entity name or password", + Code: "unauthorized access", + }) +} + +func (s *macaroonLoginSuite) TestConnectStream(c *gc.C) { + s.PatchValue(api.WebsocketDialConfig, echoURL(c)) + + dischargeCount := 0 + s.DischargerLogin = func() string { + dischargeCount++ + return testUserName + } + // First log into the regular API. + err := s.client.Login(nil, "", "") + c.Assert(err, jc.ErrorIsNil) + c.Assert(dischargeCount, gc.Equals, 1) + + // Then check that ConnectStream works OK and that it doesn't need + // to discharge again. + conn, err := s.client.ConnectStream("/path", nil) + c.Assert(err, gc.IsNil) + defer conn.Close() + connectURL := connectURLFromReader(c, conn) + c.Assert(connectURL.Path, gc.Equals, "/model/"+s.State.ModelTag().Id()+"/path") + c.Assert(dischargeCount, gc.Equals, 1) +} + +func (s *macaroonLoginSuite) TestConnectStreamWithoutLogin(c *gc.C) { + s.PatchValue(api.WebsocketDialConfig, echoURL(c)) + + conn, err := s.client.ConnectStream("/path", nil) + c.Assert(err, gc.ErrorMatches, `cannot use ConnectStream without logging in`) + c.Assert(conn, gc.Equals, nil) +} + +func (s *macaroonLoginSuite) TestConnectStreamFailedDischarge(c *gc.C) { + // This is really a test for ConnectStream, but to test ConnectStream's + // discharge failing logic, we need an actual endpoint to test against, + // and the debug-log endpoint makes a convenient example. + + var dischargeError bool + s.DischargerLogin = func() string { + if dischargeError { + return "" + } + return testUserName + } + + // Make an API connection that uses a cookie jar + // that allows us to remove all cookies. + jar := apitesting.NewClearableCookieJar() + client := s.OpenAPI(c, nil, jar) + + // Ensure that the discharger won't discharge and try + // logging in again. We should succeed in getting past + // authorization because we have the cookies (but + // the actual debug-log endpoint will return an error). + dischargeError = true + logArgs := url.Values{"noTail": []string{"true"}} + conn, err := client.ConnectStream("/log", logArgs) + c.Assert(err, jc.ErrorIsNil) + c.Assert(conn, gc.NotNil) + conn.Close() + + // Then delete all the cookies by deleting the cookie jar + // and try again. The login should fail. + jar.Clear() + + conn, err = client.ConnectStream("/log", logArgs) + c.Assert(err, gc.ErrorMatches, `cannot get discharge from "https://.*": third party refused discharge: cannot discharge: login denied by discharger`) + c.Assert(conn, gc.IsNil) +} === modified file 'src/github.com/juju/juju/api/state_test.go' --- src/github.com/juju/juju/api/state_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/state_test.go 2016-03-22 15:18:22 +0000 @@ -41,7 +41,7 @@ // OpenAPIWithoutLogin connects to the API and returns an api.State without // actually calling st.Login already. The returned strings are the "tag" and // "password" that we would have used to login. -func (s *stateSuite) OpenAPIWithoutLogin(c *gc.C) (api.Connection, string, string) { +func (s *stateSuite) OpenAPIWithoutLogin(c *gc.C) (api.Connection, names.Tag, string) { info := s.APIInfo(c) tag := info.Tag password := info.Password @@ -49,7 +49,7 @@ info.Password = "" apistate, err := api.Open(info, api.DialOpts{}) c.Assert(err, jc.ErrorIsNil) - return apistate, tag.String(), password + return apistate, tag, password } func (s *stateSuite) TestAPIHostPortsAlwaysIncludesTheConnection(c *gc.C) { @@ -73,26 +73,26 @@ }) } -func (s *stateSuite) TestLoginSetsEnvironTag(c *gc.C) { - env, err := s.State.Environment() +func (s *stateSuite) TestLoginSetsModelTag(c *gc.C) { + env, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) apistate, tag, password := s.OpenAPIWithoutLogin(c) defer apistate.Close() - // We haven't called Login yet, so the EnvironTag shouldn't be set. - envTag, err := apistate.EnvironTag() + // We haven't called Login yet, so the ModelTag shouldn't be set. + modelTag, err := apistate.ModelTag() c.Check(err, gc.ErrorMatches, `"" is not a valid tag`) - c.Check(envTag, gc.Equals, names.EnvironTag{}) + c.Check(modelTag, gc.Equals, names.ModelTag{}) err = apistate.Login(tag, password, "") c.Assert(err, jc.ErrorIsNil) - // Now that we've logged in, EnvironTag should be updated correctly. - envTag, err = apistate.EnvironTag() - c.Check(err, jc.ErrorIsNil) - c.Check(envTag, gc.Equals, env.EnvironTag()) - // The server tag is also set, and since the environment is the - // state server environment, the uuid is the same. - srvTag, err := apistate.ServerTag() - c.Check(err, jc.ErrorIsNil) - c.Check(srvTag, gc.Equals, env.EnvironTag()) + // Now that we've logged in, ModelTag should be updated correctly. + modelTag, err = apistate.ModelTag() + c.Check(err, jc.ErrorIsNil) + c.Check(modelTag, gc.Equals, env.ModelTag()) + // The controller tag is also set, and since the model is the + // controller model, the uuid is the same. + controllerTag, err := apistate.ControllerTag() + c.Check(err, jc.ErrorIsNil) + c.Check(controllerTag, gc.Equals, env.ModelTag()) } func (s *stateSuite) TestLoginTracksFacadeVersions(c *gc.C) { @@ -107,7 +107,7 @@ c.Check(allVersions, gc.Not(gc.HasLen), 0) // For sanity checking, ensure that we have a v2 of the Client facade c.Assert(allVersions["Client"], gc.Not(gc.HasLen), 0) - c.Check(allVersions["Client"][0], gc.Equals, 0) + c.Check(allVersions["Client"][0], gc.Equals, 1) } func (s *stateSuite) TestAllFacadeVersionsSafeFromMutation(c *gc.C) { @@ -124,7 +124,7 @@ } func (s *stateSuite) TestBestFacadeVersion(c *gc.C) { - c.Check(s.APIState.BestFacadeVersion("Client"), gc.Equals, 0) + c.Check(s.APIState.BestFacadeVersion("Client"), gc.Equals, 1) } func (s *stateSuite) TestAPIHostPortsMovesConnectedValueFirst(c *gc.C) { === added directory 'src/github.com/juju/juju/api/statushistory' === added file 'src/github.com/juju/juju/api/statushistory/pruner.go' --- src/github.com/juju/juju/api/statushistory/pruner.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/statushistory/pruner.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,30 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package statushistory + +import ( + "github.com/juju/juju/api/base" + "github.com/juju/juju/apiserver/params" +) + +const apiName = "StatusHistory" + +// Facade allows calls to "StatusHistory" endpoints +type Facade struct { + facade base.FacadeCaller +} + +// NewFacade returns a status "StatusHistory" Facade. +func NewFacade(caller base.APICaller) *Facade { + facadeCaller := base.NewFacadeCaller(caller, apiName) + return &Facade{facadeCaller} +} + +// Prune calls "StatusHistory.Prune" +func (s *Facade) Prune(maxLogsPerEntity int) error { + p := params.StatusHistoryPruneArgs{ + MaxLogsPerEntity: maxLogsPerEntity, + } + return s.facade.FacadeCall("Prune", p, nil) +} === modified file 'src/github.com/juju/juju/api/storage/client.go' --- src/github.com/juju/juju/api/storage/client.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/storage/client.go 2016-03-22 15:18:22 +0000 @@ -23,45 +23,63 @@ // NewClient creates a new client for accessing the storage API. func NewClient(st base.APICallCloser) *Client { frontend, backend := base.NewClientFacade(st, "Storage") - logger.Debugf("\nSTORAGE FRONT-END: %#v", frontend) - logger.Debugf("\nSTORAGE BACK-END: %#v", backend) return &Client{ClientFacade: frontend, facade: backend} } -// Show retrieves information about desired storage instances. -func (c *Client) Show(tags []names.StorageTag) ([]params.StorageDetailsResult, error) { +// StorageDetails retrieves details about desired storage instances. +func (c *Client) StorageDetails(tags []names.StorageTag) ([]params.StorageDetailsResult, error) { found := params.StorageDetailsResults{} entities := make([]params.Entity, len(tags)) for i, tag := range tags { entities[i] = params.Entity{Tag: tag.String()} } - if err := c.facade.FacadeCall("Show", params.Entities{Entities: entities}, &found); err != nil { + if err := c.facade.FacadeCall("StorageDetails", params.Entities{Entities: entities}, &found); err != nil { return nil, errors.Trace(err) } return found.Results, nil } -// List lists all storage. -func (c *Client) List() ([]params.StorageDetailsResult, error) { - found := params.StorageDetailsResults{} - if err := c.facade.FacadeCall("List", nil, &found); err != nil { +// ListStorageDetails lists all storage. +func (c *Client) ListStorageDetails() ([]params.StorageDetails, error) { + args := params.StorageFilters{ + []params.StorageFilter{{}}, // one empty filter + } + var results params.StorageDetailsListResults + if err := c.facade.FacadeCall("ListStorageDetails", args, &results); err != nil { return nil, errors.Trace(err) } - return found.Results, nil + if len(results.Results) != 1 { + return nil, errors.Errorf( + "expected 1 result, got %d", + len(results.Results), + ) + } + if results.Results[0].Error != nil { + return nil, errors.Trace(results.Results[0].Error) + } + return results.Results[0].Result, nil } // ListPools returns a list of pools that matches given filter. // If no filter was provided, a list of all pools is returned. func (c *Client) ListPools(providers, names []string) ([]params.StoragePool, error) { - args := params.StoragePoolFilter{ - Names: names, - Providers: providers, + args := params.StoragePoolFilters{ + Filters: []params.StoragePoolFilter{{ + Names: names, + Providers: providers, + }}, } - found := params.StoragePoolsResult{} - if err := c.facade.FacadeCall("ListPools", args, &found); err != nil { + var results params.StoragePoolsResults + if err := c.facade.FacadeCall("ListPools", args, &results); err != nil { return nil, errors.Trace(err) } - return found.Results, nil + if len(results.Results) != 1 { + return nil, errors.Errorf("expected 1 result, got %d", len(results.Results)) + } + if err := results.Results[0].Error; err != nil { + return nil, err + } + return results.Results[0].Result, nil } // CreatePool creates pool with specified parameters. @@ -76,17 +94,50 @@ // ListVolumes lists volumes for desired machines. // If no machines provided, a list of all volumes is returned. -func (c *Client) ListVolumes(machines []string) ([]params.VolumeDetailsResult, error) { - tags := make([]string, len(machines)) - for i, one := range machines { - tags[i] = names.NewMachineTag(one).String() - } - args := params.VolumeFilter{Machines: tags} - found := params.VolumeDetailsResults{} - if err := c.facade.FacadeCall("ListVolumes", args, &found); err != nil { - return nil, errors.Trace(err) - } - return found.Results, nil +func (c *Client) ListVolumes(machines []string) ([]params.VolumeDetailsListResult, error) { + filters := make([]params.VolumeFilter, len(machines)) + for i, machine := range machines { + filters[i].Machines = []string{names.NewMachineTag(machine).String()} + } + if len(filters) == 0 { + filters = []params.VolumeFilter{{}} + } + args := params.VolumeFilters{filters} + var results params.VolumeDetailsListResults + if err := c.facade.FacadeCall("ListVolumes", args, &results); err != nil { + return nil, errors.Trace(err) + } + if len(results.Results) != len(filters) { + return nil, errors.Errorf( + "expected %d result(s), got %d", + len(filters), len(results.Results), + ) + } + return results.Results, nil +} + +// ListFilesystems lists filesystems for desired machines. +// If no machines provided, a list of all filesystems is returned. +func (c *Client) ListFilesystems(machines []string) ([]params.FilesystemDetailsListResult, error) { + filters := make([]params.FilesystemFilter, len(machines)) + for i, machine := range machines { + filters[i].Machines = []string{names.NewMachineTag(machine).String()} + } + if len(filters) == 0 { + filters = []params.FilesystemFilter{{}} + } + args := params.FilesystemFilters{filters} + var results params.FilesystemDetailsListResults + if err := c.facade.FacadeCall("ListFilesystems", args, &results); err != nil { + return nil, errors.Trace(err) + } + if len(results.Results) != len(filters) { + return nil, errors.Errorf( + "expected %d result(s), got %d", + len(filters), len(results.Results), + ) + } + return results.Results, nil } // AddToUnit adds specified storage to desired units. === modified file 'src/github.com/juju/juju/api/storage/client_test.go' --- src/github.com/juju/juju/api/storage/client_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/storage/client_test.go 2016-03-22 15:18:22 +0000 @@ -25,7 +25,7 @@ var _ = gc.Suite(&storageMockSuite{}) -func (s *storageMockSuite) TestShow(c *gc.C) { +func (s *storageMockSuite) TestStorageDetails(c *gc.C) { one := "shared-fs/0" oneTag := names.NewStorageTag(one) two := "db-dir/1000" @@ -41,7 +41,7 @@ ) error { c.Check(objType, gc.Equals, "Storage") c.Check(id, gc.Equals, "") - c.Check(request, gc.Equals, "Show") + c.Check(request, gc.Equals, "StorageDetails") args, ok := a.(params.Entities) c.Assert(ok, jc.IsTrue) @@ -72,7 +72,7 @@ }) storageClient := storage.NewClient(apiCaller) tags := []names.StorageTag{oneTag, twoTag} - found, err := storageClient.Show(tags) + found, err := storageClient.StorageDetails(tags) c.Assert(err, jc.ErrorIsNil) c.Assert(found, gc.HasLen, 3) c.Assert(expected.Contains(found[0].Result.StorageTag), jc.IsTrue) @@ -80,7 +80,7 @@ c.Assert(found[2].Error, gc.ErrorMatches, msg) } -func (s *storageMockSuite) TestShowFacadeCallError(c *gc.C) { +func (s *storageMockSuite) TestStorageDetailsFacadeCallError(c *gc.C) { one := "shared-fs/0" oneTag := names.NewStorageTag(one) @@ -93,19 +93,18 @@ ) error { c.Check(objType, gc.Equals, "Storage") c.Check(id, gc.Equals, "") - c.Check(request, gc.Equals, "Show") + c.Check(request, gc.Equals, "StorageDetails") return errors.New(msg) }) storageClient := storage.NewClient(apiCaller) - found, err := storageClient.Show([]names.StorageTag{oneTag}) + found, err := storageClient.StorageDetails([]names.StorageTag{oneTag}) c.Assert(errors.Cause(err), gc.ErrorMatches, msg) c.Assert(found, gc.HasLen, 0) } -func (s *storageMockSuite) TestList(c *gc.C) { +func (s *storageMockSuite) TestListStorageDetails(c *gc.C) { storageTag := names.NewStorageTag("db-dir/1000") - msg := "call failure" apiCaller := basetesting.APICallerFunc( func(objType string, @@ -115,46 +114,42 @@ ) error { c.Check(objType, gc.Equals, "Storage") c.Check(id, gc.Equals, "") - c.Check(request, gc.Equals, "List") - c.Check(a, gc.IsNil) + c.Check(request, gc.Equals, "ListStorageDetails") + c.Check(a, jc.DeepEquals, params.StorageFilters{ + []params.StorageFilter{{}}, + }) - if results, k := result.(*params.StorageDetailsResults); k { - instances := []params.StorageDetailsResult{{ - Error: common.ServerError(errors.New(msg)), - }, { - Result: ¶ms.StorageDetails{ - StorageTag: storageTag.String(), - Status: params.EntityStatus{ - Status: "attached", - }, - Persistent: true, + c.Assert(result, gc.FitsTypeOf, ¶ms.StorageDetailsListResults{}) + results := result.(*params.StorageDetailsListResults) + results.Results = []params.StorageDetailsListResult{{ + Result: []params.StorageDetails{{ + StorageTag: storageTag.String(), + Status: params.EntityStatus{ + Status: "attached", }, - }} - results.Results = instances - } + Persistent: true, + }}, + }} return nil - }) + }, + ) storageClient := storage.NewClient(apiCaller) - found, err := storageClient.List() + found, err := storageClient.ListStorageDetails() c.Check(err, jc.ErrorIsNil) - c.Assert(found, gc.HasLen, 2) - expected := []params.StorageDetailsResult{{ - Error: ¶ms.Error{Message: msg}, - }, { - Result: ¶ms.StorageDetails{ - StorageTag: "storage-db-dir-1000", - Status: params.EntityStatus{ - Status: "attached", - }, - Persistent: true, + c.Assert(found, gc.HasLen, 1) + expected := []params.StorageDetails{{ + StorageTag: "storage-db-dir-1000", + Status: params.EntityStatus{ + Status: "attached", }, + Persistent: true, }} c.Assert(found, jc.DeepEquals, expected) } -func (s *storageMockSuite) TestListFacadeCallError(c *gc.C) { +func (s *storageMockSuite) TestListStorageDetailsFacadeCallError(c *gc.C) { msg := "facade failure" apiCaller := basetesting.APICallerFunc( func(objType string, @@ -164,12 +159,12 @@ ) error { c.Check(objType, gc.Equals, "Storage") c.Check(id, gc.Equals, "") - c.Check(request, gc.Equals, "List") + c.Check(request, gc.Equals, "ListStorageDetails") return errors.New(msg) }) storageClient := storage.NewClient(apiCaller) - found, err := storageClient.List() + found, err := storageClient.ListStorageDetails() c.Assert(errors.Cause(err), gc.ErrorMatches, msg) c.Assert(found, gc.HasLen, 0) } @@ -192,21 +187,22 @@ c.Check(id, gc.Equals, "") c.Check(request, gc.Equals, "ListPools") - args, ok := a.(params.StoragePoolFilter) - c.Assert(ok, jc.IsTrue) - c.Assert(args.Names, gc.HasLen, 2) - c.Assert(args.Providers, gc.HasLen, 1) + args := a.(params.StoragePoolFilters) + c.Assert(args.Filters, gc.HasLen, 1) + c.Assert(args.Filters[0].Names, gc.HasLen, 2) + c.Assert(args.Filters[0].Providers, gc.HasLen, 1) - if results, k := result.(*params.StoragePoolsResult); k { - instances := make([]params.StoragePool, want) - for i := 0; i < want; i++ { - instances[i] = params.StoragePool{ - Name: fmt.Sprintf("name%v", i), - Provider: fmt.Sprintf("type%v", i), - } + results := result.(*params.StoragePoolsResults) + pools := make([]params.StoragePool, want) + for i := 0; i < want; i++ { + pools[i] = params.StoragePool{ + Name: fmt.Sprintf("name%v", i), + Provider: fmt.Sprintf("type%v", i), } - results.Results = instances } + results.Results = []params.StoragePoolsResult{{ + Result: pools, + }} return nil }) @@ -294,11 +290,7 @@ func (s *storageMockSuite) TestListVolumes(c *gc.C) { var called bool - machines := []string{"one", "two"} - machineTags := set.NewStrings( - names.NewMachineTag(machines[0]).String(), - names.NewMachineTag(machines[1]).String(), - ) + machines := []string{"0", "1"} apiCaller := basetesting.APICallerFunc( func(objType string, version int, @@ -310,30 +302,43 @@ c.Check(id, gc.Equals, "") c.Check(request, gc.Equals, "ListVolumes") - c.Assert(a, gc.FitsTypeOf, params.VolumeFilter{}) - args := a.(params.VolumeFilter) - c.Assert(args.Machines, gc.HasLen, 2) - - c.Assert(result, gc.FitsTypeOf, ¶ms.VolumeDetailsResults{}) - results := result.(*params.VolumeDetailsResults) - attachments := make([]params.VolumeAttachment, len(args.Machines)) - for i, m := range args.Machines { - attachments[i] = params.VolumeAttachment{ - MachineTag: m} - } - results.Results = []params.VolumeDetailsResult{ - params.VolumeDetailsResult{LegacyAttachments: attachments}, - } + c.Assert(a, gc.FitsTypeOf, params.VolumeFilters{}) + args := a.(params.VolumeFilters) + c.Assert(args.Filters, gc.HasLen, 2) + c.Assert(args.Filters[0].Machines, jc.DeepEquals, []string{"machine-0"}) + c.Assert(args.Filters[1].Machines, jc.DeepEquals, []string{"machine-1"}) + + c.Assert(result, gc.FitsTypeOf, ¶ms.VolumeDetailsListResults{}) + results := result.(*params.VolumeDetailsListResults) + + details := params.VolumeDetails{ + VolumeTag: "volume-0", + MachineAttachments: map[string]params.VolumeAttachmentInfo{ + "machine-0": params.VolumeAttachmentInfo{}, + "machine-1": params.VolumeAttachmentInfo{}, + }, + } + results.Results = []params.VolumeDetailsListResult{{ + Result: []params.VolumeDetails{details}, + }, { + Result: []params.VolumeDetails{details}, + }} return nil }) storageClient := storage.NewClient(apiCaller) found, err := storageClient.ListVolumes(machines) c.Assert(called, jc.IsTrue) c.Assert(err, jc.ErrorIsNil) - c.Assert(found, gc.HasLen, 1) - c.Assert(found[0].LegacyAttachments, gc.HasLen, len(machines)) - c.Assert(machineTags.Contains(found[0].LegacyAttachments[0].MachineTag), jc.IsTrue) - c.Assert(machineTags.Contains(found[0].LegacyAttachments[1].MachineTag), jc.IsTrue) + c.Assert(found, gc.HasLen, 2) + for i := 0; i < 2; i++ { + c.Assert(found[i].Result, jc.DeepEquals, []params.VolumeDetails{{ + VolumeTag: "volume-0", + MachineAttachments: map[string]params.VolumeAttachmentInfo{ + "machine-0": params.VolumeAttachmentInfo{}, + "machine-1": params.VolumeAttachmentInfo{}, + }, + }}) + } } func (s *storageMockSuite) TestListVolumesEmptyFilter(c *gc.C) { @@ -350,23 +355,26 @@ c.Check(id, gc.Equals, "") c.Check(request, gc.Equals, "ListVolumes") - c.Assert(a, gc.FitsTypeOf, params.VolumeFilter{}) - args := a.(params.VolumeFilter) - c.Assert(args.IsEmpty(), jc.IsTrue) + c.Assert(a, gc.FitsTypeOf, params.VolumeFilters{}) + args := a.(params.VolumeFilters) + c.Assert(args.Filters, gc.HasLen, 1) + c.Assert(args.Filters[0].IsEmpty(), jc.IsTrue) - c.Assert(result, gc.FitsTypeOf, ¶ms.VolumeDetailsResults{}) - results := result.(*params.VolumeDetailsResults) - results.Results = []params.VolumeDetailsResult{ - {LegacyVolume: ¶ms.LegacyVolumeDetails{VolumeTag: tag}}, + c.Assert(result, gc.FitsTypeOf, ¶ms.VolumeDetailsListResults{}) + results := result.(*params.VolumeDetailsListResults) + results.Results = []params.VolumeDetailsListResult{ + {Result: []params.VolumeDetails{{VolumeTag: tag}}}, } return nil - }) + }, + ) storageClient := storage.NewClient(apiCaller) found, err := storageClient.ListVolumes(nil) c.Assert(called, jc.IsTrue) c.Assert(err, jc.ErrorIsNil) c.Assert(found, gc.HasLen, 1) - c.Assert(found[0].LegacyVolume.VolumeTag, gc.Equals, tag) + c.Assert(found[0].Result, gc.HasLen, 1) + c.Assert(found[0].Result[0].VolumeTag, gc.Equals, tag) } func (s *storageMockSuite) TestListVolumesFacadeCallError(c *gc.C) { @@ -388,6 +396,108 @@ c.Assert(errors.Cause(err), gc.ErrorMatches, msg) } +func (s *storageMockSuite) TestListFilesystems(c *gc.C) { + expected := params.FilesystemDetails{ + FilesystemTag: "filesystem-1", + Info: params.FilesystemInfo{ + FilesystemId: "fs-id", + Size: 4096, + }, + Status: params.EntityStatus{ + Status: "attached", + }, + MachineAttachments: map[string]params.FilesystemAttachmentInfo{ + "0": params.FilesystemAttachmentInfo{ + MountPoint: "/mnt/kinabalu", + ReadOnly: false, + }, + }, + } + + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + c.Check(objType, gc.Equals, "Storage") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "ListFilesystems") + + c.Assert(a, gc.FitsTypeOf, params.FilesystemFilters{}) + args := a.(params.FilesystemFilters) + c.Assert(args.Filters, jc.DeepEquals, []params.FilesystemFilter{{ + Machines: []string{"machine-1"}, + }, { + Machines: []string{"machine-2"}, + }}) + + c.Assert(result, gc.FitsTypeOf, ¶ms.FilesystemDetailsListResults{}) + results := result.(*params.FilesystemDetailsListResults) + results.Results = []params.FilesystemDetailsListResult{{ + Result: []params.FilesystemDetails{expected}, + }, {}} + return nil + }, + ) + storageClient := storage.NewClient(apiCaller) + found, err := storageClient.ListFilesystems([]string{"1", "2"}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(found, gc.HasLen, 2) + c.Assert(found[0].Result, jc.DeepEquals, []params.FilesystemDetails{expected}) + c.Assert(found[1].Result, jc.DeepEquals, []params.FilesystemDetails{}) +} + +func (s *storageMockSuite) TestListFilesystemsEmptyFilter(c *gc.C) { + var called bool + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + called = true + c.Check(objType, gc.Equals, "Storage") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "ListFilesystems") + + c.Assert(a, gc.FitsTypeOf, params.FilesystemFilters{}) + args := a.(params.FilesystemFilters) + c.Assert(args.Filters, gc.HasLen, 1) + c.Assert(args.Filters[0].IsEmpty(), jc.IsTrue) + + c.Assert(result, gc.FitsTypeOf, ¶ms.FilesystemDetailsListResults{}) + results := result.(*params.FilesystemDetailsListResults) + results.Results = []params.FilesystemDetailsListResult{{}} + + return nil + }, + ) + storageClient := storage.NewClient(apiCaller) + _, err := storageClient.ListFilesystems(nil) + c.Assert(called, jc.IsTrue) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *storageMockSuite) TestListFilesystemsFacadeCallError(c *gc.C) { + msg := "facade failure" + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + a, result interface{}, + ) error { + c.Check(objType, gc.Equals, "Storage") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "ListFilesystems") + + return errors.New(msg) + }) + storageClient := storage.NewClient(apiCaller) + _, err := storageClient.ListFilesystems(nil) + c.Assert(errors.Cause(err), gc.ErrorMatches, msg) +} + func (s *storageMockSuite) TestAddToUnit(c *gc.C) { size := uint64(42) cons := params.StorageConstraints{ === modified file 'src/github.com/juju/juju/api/storageprovisioner/provisioner.go' --- src/github.com/juju/juju/api/storageprovisioner/provisioner.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/storageprovisioner/provisioner.go 2016-03-22 15:18:22 +0000 @@ -9,8 +9,9 @@ "github.com/juju/juju/api/base" "github.com/juju/juju/api/common" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) const storageProvisionerFacade = "StorageProvisioner" @@ -20,23 +21,23 @@ facade base.FacadeCaller scope names.Tag - *common.EnvironWatcher + *common.ModelWatcher } // NewState creates a new client-side StorageProvisioner facade. -func NewState(caller base.APICaller, scope names.Tag) *State { +func NewState(caller base.APICaller, scope names.Tag) (*State, error) { switch scope.(type) { - case names.EnvironTag: + case names.ModelTag: case names.MachineTag: default: - panic(errors.Errorf("expected EnvironTag or MachineTag, got %T", scope)) + return nil, errors.Errorf("expected ModelTag or MachineTag, got %T", scope) } facadeCaller := base.NewFacadeCaller(caller, storageProvisionerFacade) return &State{ facadeCaller, scope, - common.NewEnvironWatcher(facadeCaller), - } + common.NewModelWatcher(facadeCaller), + }, nil } // WatchBlockDevices watches for changes to the specified machine's block devices. @@ -56,7 +57,7 @@ if result.Error != nil { return nil, result.Error } - w := watcher.NewNotifyWatcher(st.facade.RawAPICaller(), result) + w := apiwatcher.NewNotifyWatcher(st.facade.RawAPICaller(), result) return w, nil } @@ -77,7 +78,7 @@ if result.Error != nil { return nil, result.Error } - w := watcher.NewNotifyWatcher(st.facade.RawAPICaller(), result) + w := apiwatcher.NewNotifyWatcher(st.facade.RawAPICaller(), result) return w, nil } @@ -109,20 +110,20 @@ if result.Error != nil { return nil, result.Error } - w := watcher.NewStringsWatcher(st.facade.RawAPICaller(), result) + w := apiwatcher.NewStringsWatcher(st.facade.RawAPICaller(), result) return w, nil } // WatchVolumeAttachments watches for changes to volume attachments // scoped to the entity with the tag passed to NewState. func (st *State) WatchVolumeAttachments() (watcher.MachineStorageIdsWatcher, error) { - return st.watchAttachments("WatchVolumeAttachments", watcher.NewVolumeAttachmentsWatcher) + return st.watchAttachments("WatchVolumeAttachments", apiwatcher.NewVolumeAttachmentsWatcher) } // WatchFilesystemAttachments watches for changes to filesystem attachments // scoped to the entity with the tag passed to NewState. func (st *State) WatchFilesystemAttachments() (watcher.MachineStorageIdsWatcher, error) { - return st.watchAttachments("WatchFilesystemAttachments", watcher.NewFilesystemAttachmentsWatcher) + return st.watchAttachments("WatchFilesystemAttachments", apiwatcher.NewFilesystemAttachmentsWatcher) } func (st *State) watchAttachments( === modified file 'src/github.com/juju/juju/api/storageprovisioner/provisioner_test.go' --- src/github.com/juju/juju/api/storageprovisioner/provisioner_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/storageprovisioner/provisioner_test.go 2016-03-22 15:18:22 +0000 @@ -23,18 +23,28 @@ coretesting.BaseSuite } -func (s *provisionerSuite) TestNewState(c *gc.C) { - apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { +var nullAPICaller = testing.APICallerFunc( + func(objType string, version int, id, request string, arg, result interface{}) error { return nil - }) - - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) - c.Assert(st, gc.NotNil) - st = storageprovisioner.NewState(apiCaller, names.NewEnvironTag("87927ace-9e41-4fd5-8103-1a6fb5ff7eb4")) - c.Assert(st, gc.NotNil) - c.Assert(func() { - storageprovisioner.NewState(apiCaller, names.NewUnitTag("mysql/0")) - }, gc.PanicMatches, "expected EnvironTag or MachineTag, got names.UnitTag") + }, +) + +func (s *provisionerSuite) TestNewStateMachineScope(c *gc.C) { + st, err := storageprovisioner.NewState(nullAPICaller, names.NewMachineTag("123")) + c.Check(err, jc.ErrorIsNil) + c.Check(st, gc.NotNil) +} + +func (s *provisionerSuite) TestNewStateModelScope(c *gc.C) { + st, err := storageprovisioner.NewState(nullAPICaller, names.NewModelTag("87927ace-9e41-4fd5-8103-1a6fb5ff7eb4")) + c.Check(err, jc.ErrorIsNil) + c.Check(st, gc.NotNil) +} + +func (s *provisionerSuite) TestNewStateBadScope(c *gc.C) { + st, err := storageprovisioner.NewState(nullAPICaller, names.NewUnitTag("mysql/0")) + c.Check(st, gc.IsNil) + c.Check(err, gc.ErrorMatches, "expected ModelTag or MachineTag, got names.UnitTag") } func (s *provisionerSuite) TestWatchVolumes(c *gc.C) { @@ -54,8 +64,9 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) - _, err := st.WatchVolumes() + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) + _, err = st.WatchVolumes() c.Check(err, gc.ErrorMatches, "FAIL") c.Check(callCount, gc.Equals, 1) } @@ -77,8 +88,9 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) - _, err := st.WatchFilesystems() + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) + _, err = st.WatchFilesystems() c.Check(err, gc.ErrorMatches, "FAIL") c.Check(callCount, gc.Equals, 1) } @@ -100,8 +112,9 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) - _, err := st.WatchVolumeAttachments() + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) + _, err = st.WatchVolumeAttachments() c.Check(err, gc.ErrorMatches, "FAIL") c.Check(callCount, gc.Equals, 1) } @@ -123,8 +136,9 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) - _, err := st.WatchFilesystemAttachments() + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) + _, err = st.WatchFilesystemAttachments() c.Check(err, gc.ErrorMatches, "FAIL") c.Check(callCount, gc.Equals, 1) } @@ -147,8 +161,9 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) - _, err := st.WatchBlockDevices(names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) + _, err = st.WatchBlockDevices(names.NewMachineTag("123")) c.Check(err, gc.ErrorMatches, "FAIL") } @@ -177,7 +192,8 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) volumes, err := st.Volumes([]names.VolumeTag{names.NewVolumeTag("100")}) c.Check(err, jc.ErrorIsNil) c.Check(callCount, gc.Equals, 1) @@ -217,7 +233,8 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) filesystems, err := st.Filesystems([]names.FilesystemTag{names.NewFilesystemTag("100")}) c.Check(err, jc.ErrorIsNil) c.Check(callCount, gc.Equals, 1) @@ -262,7 +279,8 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) volumes, err := st.VolumeAttachments([]params.MachineStorageId{{ MachineTag: "machine-100", AttachmentTag: "volume-100", }}) @@ -297,7 +315,8 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) volumes, err := st.VolumeBlockDevices([]params.MachineStorageId{{ MachineTag: "machine-100", AttachmentTag: "volume-100", }}) @@ -335,7 +354,8 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) filesystems, err := st.FilesystemAttachments([]params.MachineStorageId{{ MachineTag: "machine-100", AttachmentTag: "filesystem-100", }}) @@ -366,7 +386,8 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) volumeParams, err := st.VolumeParams([]names.VolumeTag{names.NewVolumeTag("100")}) c.Check(err, jc.ErrorIsNil) c.Check(callCount, gc.Equals, 1) @@ -399,7 +420,8 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) filesystemParams, err := st.FilesystemParams([]names.FilesystemTag{names.NewFilesystemTag("100")}) c.Check(err, jc.ErrorIsNil) c.Check(callCount, gc.Equals, 1) @@ -439,7 +461,8 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) volumeParams, err := st.VolumeAttachmentParams([]params.MachineStorageId{{ MachineTag: "machine-100", AttachmentTag: "volume-100", }}) @@ -478,7 +501,8 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) filesystemParams, err := st.FilesystemAttachmentParams([]params.MachineStorageId{{ MachineTag: "machine-100", AttachmentTag: "filesystem-100", }}) @@ -513,7 +537,8 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) volumes := []params.Volume{{ VolumeTag: "volume-100", Info: params.VolumeInfo{ @@ -551,7 +576,8 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) filesystems := []params.Filesystem{{ FilesystemTag: "filesystem-100", Info: params.FilesystemInfo{ @@ -590,7 +616,8 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) errorResults, err := st.SetVolumeAttachmentInfo(volumeAttachments) c.Check(err, jc.ErrorIsNil) c.Check(callCount, gc.Equals, 1) @@ -622,7 +649,8 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) errorResults, err := st.SetFilesystemAttachmentInfo(filesystemAttachments) c.Check(err, jc.ErrorIsNil) c.Check(callCount, gc.Equals, 1) @@ -648,7 +676,8 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) volumes := []names.Tag{names.NewVolumeTag("100")} errorResults, err := apiCall(st, volumes) c.Check(err, jc.ErrorIsNil) @@ -684,7 +713,8 @@ return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) volumes := []names.Tag{names.NewVolumeTag("100")} lifeResults, err := st.Life(volumes) c.Check(err, jc.ErrorIsNil) @@ -696,8 +726,9 @@ apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { return errors.New("blargh") }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) - err := apiCall(st) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) + err = apiCall(st) c.Check(err, gc.ErrorMatches, "blargh") } @@ -773,8 +804,9 @@ } return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) - _, err := st.WatchVolumes() + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) + _, err = st.WatchVolumes() c.Check(err, gc.ErrorMatches, "MSG") } @@ -787,7 +819,8 @@ } return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) results, err := st.Volumes([]names.VolumeTag{names.NewVolumeTag("100")}) c.Assert(err, jc.ErrorIsNil) c.Assert(results, gc.HasLen, 1) @@ -803,7 +836,8 @@ } return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) results, err := st.VolumeParams([]names.VolumeTag{names.NewVolumeTag("100")}) c.Assert(err, jc.ErrorIsNil) c.Assert(results, gc.HasLen, 1) @@ -819,7 +853,8 @@ } return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) results, err := st.SetVolumeInfo([]params.Volume{{ VolumeTag: names.NewVolumeTag("100").String(), }}) @@ -837,7 +872,8 @@ } return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) tags := []names.Tag{ names.NewVolumeTag("100"), } @@ -868,7 +904,8 @@ } return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) tags := []names.Tag{ names.NewVolumeTag("100"), } @@ -878,38 +915,40 @@ c.Check(results[0].Error, gc.ErrorMatches, "MSG") } -func (s *provisionerSuite) TestWatchForEnvironConfigChanges(c *gc.C) { +func (s *provisionerSuite) TestWatchForModelConfigChanges(c *gc.C) { apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { c.Check(objType, gc.Equals, "StorageProvisioner") c.Check(version, gc.Equals, 0) c.Check(id, gc.Equals, "") - c.Check(request, gc.Equals, "WatchForEnvironConfigChanges") + c.Check(request, gc.Equals, "WatchForModelConfigChanges") c.Assert(result, gc.FitsTypeOf, ¶ms.NotifyWatchResult{}) *(result.(*params.NotifyWatchResult)) = params.NotifyWatchResult{ NotifyWatcherId: "abc", } return errors.New("FAIL") }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) - _, err := st.WatchForEnvironConfigChanges() + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) + _, err = st.WatchForModelConfigChanges() c.Assert(err, gc.ErrorMatches, "FAIL") } -func (s *provisionerSuite) TestEnvironConfig(c *gc.C) { - inputCfg := coretesting.EnvironConfig(c) +func (s *provisionerSuite) TestModelConfig(c *gc.C) { + inputCfg := coretesting.ModelConfig(c) apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { c.Check(objType, gc.Equals, "StorageProvisioner") c.Check(version, gc.Equals, 0) c.Check(id, gc.Equals, "") - c.Check(request, gc.Equals, "EnvironConfig") - c.Assert(result, gc.FitsTypeOf, ¶ms.EnvironConfigResult{}) - *(result.(*params.EnvironConfigResult)) = params.EnvironConfigResult{ + c.Check(request, gc.Equals, "ModelConfig") + c.Assert(result, gc.FitsTypeOf, ¶ms.ModelConfigResult{}) + *(result.(*params.ModelConfigResult)) = params.ModelConfigResult{ Config: inputCfg.AllAttrs(), } return nil }) - st := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) - outputCfg, err := st.EnvironConfig() + st, err := storageprovisioner.NewState(apiCaller, names.NewMachineTag("123")) + c.Assert(err, jc.ErrorIsNil) + outputCfg, err := st.ModelConfig() c.Assert(err, jc.ErrorIsNil) c.Assert(outputCfg.AllAttrs(), jc.DeepEquals, inputCfg.AllAttrs()) } === modified file 'src/github.com/juju/juju/api/subnets/subnets.go' --- src/github.com/juju/juju/api/subnets/subnets.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/subnets/subnets.go 2016-03-22 15:18:22 +0000 @@ -35,7 +35,7 @@ } } -// AddSubnet adds an existing subnet to the environment. +// AddSubnet adds an existing subnet to the model. func (api *API) AddSubnet(subnet names.SubnetTag, providerId network.Id, space names.SpaceTag, zones []string) error { var response params.ErrorResults // Prefer ProviderId when set over CIDR. @@ -77,7 +77,7 @@ return response.OneError() } -// ListSubnets fetches all the subnets known by the environment. +// ListSubnets fetches all the subnets known by the model. func (api *API) ListSubnets(spaceTag *names.SpaceTag, zone string) ([]params.Subnet, error) { var response params.ListSubnetsResults var space string === modified file 'src/github.com/juju/juju/api/subnets/subnets_test.go' --- src/github.com/juju/juju/api/subnets/subnets_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/subnets/subnets_test.go 2016-03-22 15:18:22 +0000 @@ -6,6 +6,7 @@ import ( "errors" + "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -15,7 +16,6 @@ "github.com/juju/juju/apiserver/params" "github.com/juju/juju/network" coretesting "github.com/juju/juju/testing" - "github.com/juju/names" ) // SubnetsSuite tests the client side subnets API === removed directory 'src/github.com/juju/juju/api/systemmanager' === removed file 'src/github.com/juju/juju/api/systemmanager/package_test.go' --- src/github.com/juju/juju/api/systemmanager/package_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/systemmanager/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package systemmanager_test - -import ( - stdtesting "testing" - - "github.com/juju/juju/testing" -) - -func TestAll(t *stdtesting.T) { - testing.MgoTestPackage(t) -} === removed file 'src/github.com/juju/juju/api/systemmanager/systemmanager.go' --- src/github.com/juju/juju/api/systemmanager/systemmanager.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/api/systemmanager/systemmanager.go 1970-01-01 00:00:00 +0000 @@ -1,99 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package systemmanager - -import ( - "github.com/juju/errors" - "github.com/juju/loggo" - "github.com/juju/names" - - "github.com/juju/juju/api" - "github.com/juju/juju/api/base" - "github.com/juju/juju/apiserver/params" -) - -var logger = loggo.GetLogger("juju.api.systemmanager") - -// Client provides methods that the Juju client command uses to interact -// with systems stored in the Juju Server. -type Client struct { - base.ClientFacade - facade base.FacadeCaller -} - -// NewClient creates a new `Client` based on an existing authenticated API -// connection. -func NewClient(st base.APICallCloser) *Client { - frontend, backend := base.NewClientFacade(st, "SystemManager") - logger.Tracef("%#v", frontend) - return &Client{ClientFacade: frontend, facade: backend} -} - -// AllEnvironments allows system administrators to get the list of all the -// environments in the system. -func (c *Client) AllEnvironments() ([]base.UserEnvironment, error) { - var environments params.UserEnvironmentList - err := c.facade.FacadeCall("AllEnvironments", nil, &environments) - if err != nil { - return nil, errors.Trace(err) - } - result := make([]base.UserEnvironment, len(environments.UserEnvironments)) - for i, env := range environments.UserEnvironments { - owner, err := names.ParseUserTag(env.OwnerTag) - if err != nil { - return nil, errors.Annotatef(err, "OwnerTag %q at position %d", env.OwnerTag, i) - } - result[i] = base.UserEnvironment{ - Name: env.Name, - UUID: env.UUID, - Owner: owner.Canonical(), - LastConnection: env.LastConnection, - } - } - return result, nil -} - -// EnvironmentConfig returns all environment settings for the -// system environment. -func (c *Client) EnvironmentConfig() (map[string]interface{}, error) { - result := params.EnvironmentConfigResults{} - err := c.facade.FacadeCall("EnvironmentConfig", nil, &result) - return result.Config, err -} - -// DestroySystem puts the system environment into a "dying" state, -// and removes all non-manager machine instances. Underlying DestroyEnvironment -// calls will fail if there are any manually-provisioned non-manager machines -// in state. -func (c *Client) DestroySystem(destroyEnvs bool, ignoreBlocks bool) error { - args := params.DestroySystemArgs{ - DestroyEnvironments: destroyEnvs, - IgnoreBlocks: ignoreBlocks, - } - return c.facade.FacadeCall("DestroySystem", args, nil) -} - -// ListBlockedEnvironments returns a list of all environments within the system -// which have at least one block in place. -func (c *Client) ListBlockedEnvironments() ([]params.EnvironmentBlockInfo, error) { - result := params.EnvironmentBlockInfoList{} - err := c.facade.FacadeCall("ListBlockedEnvironments", nil, &result) - return result.Environments, err -} - -// RemoveBlocks removes all the blocks in the system. -func (c *Client) RemoveBlocks() error { - args := params.RemoveBlocksArgs{All: true} - return c.facade.FacadeCall("RemoveBlocks", args, nil) -} - -// WatchAllEnv returns an AllEnvWatcher, from which you can request -// the Next collection of Deltas (for all environments). -func (c *Client) WatchAllEnvs() (*api.AllWatcher, error) { - info := new(api.WatchAll) - if err := c.facade.FacadeCall("WatchAllEnvs", nil, info); err != nil { - return nil, err - } - return api.NewAllEnvWatcher(c.facade.RawAPICaller(), &info.AllWatcherId), nil -} === removed file 'src/github.com/juju/juju/api/systemmanager/systemmanager_test.go' --- src/github.com/juju/juju/api/systemmanager/systemmanager_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/systemmanager/systemmanager_test.go 1970-01-01 00:00:00 +0000 @@ -1,154 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package systemmanager_test - -import ( - "fmt" - "time" - - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/api" - "github.com/juju/juju/api/systemmanager" - commontesting "github.com/juju/juju/apiserver/common/testing" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/feature" - "github.com/juju/juju/juju" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" - "github.com/juju/juju/state/multiwatcher" - "github.com/juju/juju/testing" - "github.com/juju/juju/testing/factory" -) - -type systemManagerSuite struct { - jujutesting.JujuConnSuite - commontesting.BlockHelper -} - -var _ = gc.Suite(&systemManagerSuite{}) - -func (s *systemManagerSuite) SetUpTest(c *gc.C) { - s.SetInitialFeatureFlags(feature.JES) - s.JujuConnSuite.SetUpTest(c) -} - -func (s *systemManagerSuite) OpenAPI(c *gc.C) *systemmanager.Client { - conn, err := juju.NewAPIState(s.AdminUserTag(c), s.Environ, api.DialOpts{}) - c.Assert(err, jc.ErrorIsNil) - s.AddCleanup(func(*gc.C) { conn.Close() }) - return systemmanager.NewClient(conn) -} - -func (s *systemManagerSuite) TestAllEnvironments(c *gc.C) { - owner := names.NewUserTag("user@remote") - s.Factory.MakeEnvironment(c, &factory.EnvParams{ - Name: "first", Owner: owner}).Close() - s.Factory.MakeEnvironment(c, &factory.EnvParams{ - Name: "second", Owner: owner}).Close() - - sysManager := s.OpenAPI(c) - envs, err := sysManager.AllEnvironments() - c.Assert(err, jc.ErrorIsNil) - c.Assert(envs, gc.HasLen, 3) - - var obtained []string - for _, env := range envs { - obtained = append(obtained, fmt.Sprintf("%s/%s", env.Owner, env.Name)) - } - expected := []string{ - "dummy-admin@local/dummyenv", - "user@remote/first", - "user@remote/second", - } - c.Assert(obtained, jc.SameContents, expected) -} - -func (s *systemManagerSuite) TestEnvironmentConfig(c *gc.C) { - sysManager := s.OpenAPI(c) - env, err := sysManager.EnvironmentConfig() - c.Assert(err, jc.ErrorIsNil) - c.Assert(env["name"], gc.Equals, "dummyenv") -} - -func (s *systemManagerSuite) TestDestroySystem(c *gc.C) { - s.Factory.MakeEnvironment(c, &factory.EnvParams{Name: "foo"}).Close() - - sysManager := s.OpenAPI(c) - err := sysManager.DestroySystem(false, false) - c.Assert(err, gc.ErrorMatches, "state server environment cannot be destroyed before all other environments are destroyed") -} - -func (s *systemManagerSuite) TestListBlockedEnvironments(c *gc.C) { - err := s.State.SwitchBlockOn(state.ChangeBlock, "change block for state server") - err = s.State.SwitchBlockOn(state.DestroyBlock, "destroy block for state server") - c.Assert(err, jc.ErrorIsNil) - - sysManager := s.OpenAPI(c) - results, err := sysManager.ListBlockedEnvironments() - c.Assert(err, jc.ErrorIsNil) - c.Assert(results, jc.DeepEquals, []params.EnvironmentBlockInfo{ - params.EnvironmentBlockInfo{ - Name: "dummyenv", - UUID: s.State.EnvironUUID(), - OwnerTag: s.AdminUserTag(c).String(), - Blocks: []string{ - "BlockChange", - "BlockDestroy", - }, - }, - }) -} - -func (s *systemManagerSuite) TestRemoveBlocks(c *gc.C) { - s.State.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyEnvironment") - s.State.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") - - sysManager := s.OpenAPI(c) - err := sysManager.RemoveBlocks() - c.Assert(err, jc.ErrorIsNil) - - blocks, err := s.State.AllBlocksForSystem() - c.Assert(err, jc.ErrorIsNil) - c.Assert(blocks, gc.HasLen, 0) -} - -func (s *systemManagerSuite) TestWatchAllEnvs(c *gc.C) { - // The WatchAllEnvs infrastructure is comprehensively tested - // else. This test just ensure that the API calls work end-to-end. - sysManager := s.OpenAPI(c) - - w, err := sysManager.WatchAllEnvs() - c.Assert(err, jc.ErrorIsNil) - defer func() { - err := w.Stop() - c.Assert(err, jc.ErrorIsNil) - }() - - deltasC := make(chan []multiwatcher.Delta) - go func() { - deltas, err := w.Next() - c.Assert(err, jc.ErrorIsNil) - deltasC <- deltas - }() - - select { - case deltas := <-deltasC: - c.Assert(deltas, gc.HasLen, 1) - envInfo := deltas[0].Entity.(*multiwatcher.EnvironmentInfo) - - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - - c.Assert(envInfo.EnvUUID, gc.Equals, env.UUID()) - c.Assert(envInfo.Name, gc.Equals, env.Name()) - c.Assert(envInfo.Life, gc.Equals, multiwatcher.Life("alive")) - c.Assert(envInfo.Owner, gc.Equals, env.Owner().Id()) - c.Assert(envInfo.ServerUUID, gc.Equals, env.ServerUUID()) - case <-time.After(testing.LongWait): - c.Fatal("timed out") - } -} === modified file 'src/github.com/juju/juju/api/testing/apiaddresser.go' --- src/github.com/juju/juju/api/testing/apiaddresser.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/testing/apiaddresser.go 2016-03-22 15:18:22 +0000 @@ -7,10 +7,10 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/network" "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" + "github.com/juju/juju/watcher" + "github.com/juju/juju/watcher/watchertest" ) type APIAddresserTests struct { @@ -79,9 +79,8 @@ w, err := s.facade.WatchAPIHostPorts() c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, w) - - wc := statetesting.NewNotifyWatcherC(c, s.state, w) + wc := watchertest.NewNotifyWatcherC(c, w, s.state.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertOneChange() @@ -93,7 +92,4 @@ c.Assert(err, jc.ErrorIsNil) wc.AssertOneChange() - - statetesting.AssertStop(c, w) - wc.AssertClosed() } === modified file 'src/github.com/juju/juju/api/testing/environwatcher.go' --- src/github.com/juju/juju/api/testing/environwatcher.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/testing/environwatcher.go 2016-03-22 15:18:22 +0000 @@ -7,11 +7,11 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" + "github.com/juju/juju/watcher" + "github.com/juju/juju/watcher/watchertest" ) const ( @@ -19,37 +19,37 @@ NoSecrets = false ) -type EnvironWatcherFacade interface { - WatchForEnvironConfigChanges() (watcher.NotifyWatcher, error) - EnvironConfig() (*config.Config, error) +type ModelWatcherFacade interface { + WatchForModelConfigChanges() (watcher.NotifyWatcher, error) + ModelConfig() (*config.Config, error) } -type EnvironWatcherTests struct { - facade EnvironWatcherFacade +type ModelWatcherTests struct { + facade ModelWatcherFacade state *state.State hasSecrets bool } -func NewEnvironWatcherTests( - facade EnvironWatcherFacade, +func NewModelWatcherTests( + facade ModelWatcherFacade, st *state.State, - hasSecrets bool) *EnvironWatcherTests { - return &EnvironWatcherTests{ + hasSecrets bool) *ModelWatcherTests { + return &ModelWatcherTests{ facade: facade, state: st, hasSecrets: hasSecrets, } } -func (s *EnvironWatcherTests) TestEnvironConfig(c *gc.C) { - envConfig, err := s.state.EnvironConfig() +func (s *ModelWatcherTests) TestModelConfig(c *gc.C) { + envConfig, err := s.state.ModelConfig() c.Assert(err, jc.ErrorIsNil) - conf, err := s.facade.EnvironConfig() + conf, err := s.facade.ModelConfig() c.Assert(err, jc.ErrorIsNil) // If the facade doesn't have secrets, we need to replace the config - // values in our environment to compare against with the secrets replaced. + // values in our model to compare against with the secrets replaced. if !s.hasSecrets { env, err := environs.New(envConfig) c.Assert(err, jc.ErrorIsNil) @@ -66,41 +66,38 @@ c.Assert(conf, jc.DeepEquals, envConfig) } -func (s *EnvironWatcherTests) TestWatchForEnvironConfigChanges(c *gc.C) { - envConfig, err := s.state.EnvironConfig() +func (s *ModelWatcherTests) TestWatchForModelConfigChanges(c *gc.C) { + envConfig, err := s.state.ModelConfig() c.Assert(err, jc.ErrorIsNil) - w, err := s.facade.WatchForEnvironConfigChanges() + w, err := s.facade.WatchForModelConfigChanges() c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, w) - wc := statetesting.NewNotifyWatcherC(c, s.state, w) + wc := watchertest.NewNotifyWatcherC(c, w, s.state.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertOneChange() - // Change the environment configuration by updating an existing attribute, check it's detected. + // Change the model configuration by updating an existing attribute, check it's detected. newAttrs := map[string]interface{}{"logging-config": "juju=ERROR"} - err = s.state.UpdateEnvironConfig(newAttrs, nil, nil) + err = s.state.UpdateModelConfig(newAttrs, nil, nil) c.Assert(err, jc.ErrorIsNil) wc.AssertOneChange() - // Change the environment configuration by adding a new attribute, check it's detected. + // Change the model configuration by adding a new attribute, check it's detected. newAttrs = map[string]interface{}{"foo": "bar"} - err = s.state.UpdateEnvironConfig(newAttrs, nil, nil) + err = s.state.UpdateModelConfig(newAttrs, nil, nil) c.Assert(err, jc.ErrorIsNil) wc.AssertOneChange() - // Change the environment configuration by removing an attribute, check it's detected. - err = s.state.UpdateEnvironConfig(map[string]interface{}{}, []string{"foo"}, nil) + // Change the model configuration by removing an attribute, check it's detected. + err = s.state.UpdateModelConfig(map[string]interface{}{}, []string{"foo"}, nil) c.Assert(err, jc.ErrorIsNil) wc.AssertOneChange() // Change it back to the original config. oldAttrs := map[string]interface{}{"logging-config": envConfig.AllAttrs()["logging-config"]} - err = s.state.UpdateEnvironConfig(oldAttrs, nil, nil) + err = s.state.UpdateModelConfig(oldAttrs, nil, nil) c.Assert(err, jc.ErrorIsNil) wc.AssertOneChange() - - statetesting.AssertStop(c, w) - wc.AssertClosed() } === added file 'src/github.com/juju/juju/api/testing/macaroonsuite.go' --- src/github.com/juju/juju/api/testing/macaroonsuite.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/testing/macaroonsuite.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,143 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package testing + +import ( + "net/http" + "net/http/cookiejar" + "net/url" + + "github.com/juju/errors" + "github.com/juju/names" + gc "gopkg.in/check.v1" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/bakerytest" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + "github.com/juju/juju/api" + "github.com/juju/juju/environs/config" + jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/testing/factory" +) + +// MacaroonSuite wraps a JujuConnSuite with macaroon authentication +// enabled. +type MacaroonSuite struct { + jujutesting.JujuConnSuite + + // discharger holds the third-party discharger used + // for authentication. + discharger *bakerytest.Discharger + + // DischargerLogin is called by the discharger when an + // API macaroon is discharged. It should either return + // the chosen username or an empty string, in which case + // the discharge is denied. + // If this is nil, func() {return ""} is implied. + DischargerLogin func() string +} + +func (s *MacaroonSuite) SetUpTest(c *gc.C) { + s.discharger = bakerytest.NewDischarger(nil, func(req *http.Request, cond, arg string) ([]checkers.Caveat, error) { + if cond != "is-authenticated-user" { + return nil, errors.New("unknown caveat") + } + var username string + if s.DischargerLogin != nil { + username = s.DischargerLogin() + } + if username == "" { + return nil, errors.New("login denied by discharger") + } + return []checkers.Caveat{checkers.DeclaredCaveat("username", username)}, nil + }) + s.JujuConnSuite.ConfigAttrs = map[string]interface{}{ + config.IdentityURL: s.discharger.Location(), + } + s.JujuConnSuite.SetUpTest(c) +} + +func (s *MacaroonSuite) TearDownTest(c *gc.C) { + s.discharger.Close() + s.JujuConnSuite.TearDownTest(c) +} + +// AddModelUser is a convenience function that adds an external +// user to the current model. It will panic +// if the user name is local. +func (s *MacaroonSuite) AddModelUser(c *gc.C, username string) { + if names.NewUserTag(username).IsLocal() { + panic("cannot use MacaroonSuite.AddModelUser to add a local name") + } + s.Factory.MakeModelUser(c, &factory.ModelUserParams{ + User: username, + }) +} + +// OpenAPI opens a connection to the API using the given information. +// and empty DialOpts. If info is nil, s.APIInfo(c) is used. +// If jar is non-nil, it will be used as the store for the cookies created +// as a result of API interaction. +func (s *MacaroonSuite) OpenAPI(c *gc.C, info *api.Info, jar http.CookieJar) api.Connection { + if info == nil { + info = s.APIInfo(c) + } + bakeryClient := httpbakery.NewClient() + if jar != nil { + bakeryClient.Client.Jar = jar + } + conn, err := api.Open(info, api.DialOpts{ + BakeryClient: bakeryClient, + }) + c.Assert(err, gc.IsNil) + return conn +} + +// APIInfo returns API connection info suitable for +// connecting to the API using macaroon authentication. +func (s *MacaroonSuite) APIInfo(c *gc.C) *api.Info { + info := s.JujuConnSuite.APIInfo(c) + info.Tag = nil + info.Password = "" + info.UseMacaroons = true + return info +} + +// NewClearableCookieJar returns a new ClearableCookieJar. +func NewClearableCookieJar() *ClearableCookieJar { + jar, err := cookiejar.New(nil) + if err != nil { + panic(err) + } + return &ClearableCookieJar{ + jar: jar, + } +} + +// ClearableCookieJar implements a cookie jar +// that can be cleared of all cookies for testing purposes. +type ClearableCookieJar struct { + jar http.CookieJar +} + +// Clear clears all the cookies in the jar. +// It is not OK to call Clear concurrently +// with the other methods. +func (jar *ClearableCookieJar) Clear() { + newJar, err := cookiejar.New(nil) + if err != nil { + panic(err) + } + jar.jar = newJar +} + +// Cookies implements http.CookieJar.Cookies. +func (jar *ClearableCookieJar) Cookies(u *url.URL) []*http.Cookie { + return jar.jar.Cookies(u) +} + +// Cookies implements http.CookieJar.SetCookies. +func (jar *ClearableCookieJar) SetCookies(u *url.URL, cookies []*http.Cookie) { + jar.jar.SetCookies(u, cookies) +} === added directory 'src/github.com/juju/juju/api/undertaker' === added file 'src/github.com/juju/juju/api/undertaker/package_test.go' --- src/github.com/juju/juju/api/undertaker/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/undertaker/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package undertaker_test + +import ( + "testing" + + gc "gopkg.in/check.v1" + + coretesting "github.com/juju/juju/testing" +) + +func TestAll(t *testing.T) { + gc.TestingT(t) +} + +type undertakerSuite struct { + coretesting.BaseSuite +} + +var _ = gc.Suite(&undertakerSuite{}) === added file 'src/github.com/juju/juju/api/undertaker/undertaker.go' --- src/github.com/juju/juju/api/undertaker/undertaker.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/undertaker/undertaker.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,118 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package undertaker + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/api/base" + apiwatcher "github.com/juju/juju/api/watcher" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/watcher" +) + +// Client provides access to the undertaker API +type Client struct { + base.ClientFacade + st base.APICallCloser + facade base.FacadeCaller +} + +// UndertakerClient defines the methods on the undertaker API end point. +type UndertakerClient interface { + ModelInfo() (params.UndertakerModelInfoResult, error) + ProcessDyingModel() error + RemoveModel() error + WatchModelResources() (watcher.NotifyWatcher, error) + ModelConfig() (*config.Config, error) +} + +// NewClient creates a new client for accessing the undertaker API. +func NewClient(st base.APICallCloser) *Client { + frontend, backend := base.NewClientFacade(st, "Undertaker") + return &Client{ClientFacade: frontend, st: st, facade: backend} +} + +// ModelInfo returns information on the model needed by the undertaker worker. +func (c *Client) ModelInfo() (params.UndertakerModelInfoResult, error) { + result := params.UndertakerModelInfoResult{} + p, err := c.params() + if err != nil { + return params.UndertakerModelInfoResult{}, errors.Trace(err) + } + err = c.facade.FacadeCall("ModelInfo", p, &result) + return result, errors.Trace(err) +} + +// ProcessDyingModel checks if a dying model has any machines or services. +// If there are none, the model's life is changed from dying to dead. +func (c *Client) ProcessDyingModel() error { + p, err := c.params() + if err != nil { + return errors.Trace(err) + } + + return c.facade.FacadeCall("ProcessDyingModel", p, nil) +} + +// RemoveModel removes any records of this model from Juju. +func (c *Client) RemoveModel() error { + p, err := c.params() + if err != nil { + return errors.Trace(err) + } + return c.facade.FacadeCall("RemoveModel", p, nil) +} + +func (c *Client) params() (params.Entities, error) { + modelTag, err := c.st.ModelTag() + if err != nil { + return params.Entities{}, errors.Trace(err) + } + return params.Entities{Entities: []params.Entity{{modelTag.String()}}}, nil +} + +// WatchModelResources starts a watcher for changes to the model's +// machines and services. +func (c *Client) WatchModelResources() (watcher.NotifyWatcher, error) { + var results params.NotifyWatchResults + + p, err := c.params() + if err != nil { + return nil, errors.Trace(err) + } + err = c.facade.FacadeCall("WatchModelResources", p, &results) + if err != nil { + return nil, err + } + if len(results.Results) != 1 { + return nil, errors.Errorf("expected 1 result, got %d", len(results.Results)) + } + result := results.Results[0] + if result.Error != nil { + return nil, result.Error + } + w := apiwatcher.NewNotifyWatcher(c.facade.RawAPICaller(), result) + return w, nil +} + +// ModelConfig returns configuration information on the model needed +// by the undertaker worker. +func (c *Client) ModelConfig() (*config.Config, error) { + p, err := c.params() + if err != nil { + return nil, errors.Trace(err) + } + var result params.ModelConfigResult + err = c.facade.FacadeCall("ModelConfig", p, &result) + if err != nil { + return nil, err + } + conf, err := config.New(config.NoDefaults, result.Config) + if err != nil { + return nil, err + } + return conf, nil +} === added file 'src/github.com/juju/juju/api/undertaker/undertaker_test.go' --- src/github.com/juju/juju/api/undertaker/undertaker_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/undertaker/undertaker_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,145 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package undertaker_test + +import ( + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + basetesting "github.com/juju/juju/api/base/testing" + "github.com/juju/juju/api/undertaker" + "github.com/juju/juju/apiserver/params" + coretesting "github.com/juju/juju/testing" +) + +var _ undertaker.UndertakerClient = (*undertaker.Client)(nil) + +func (s *undertakerSuite) TestEnvironInfo(c *gc.C) { + var called bool + client := s.mockClient(c, "ModelInfo", func(response interface{}) { + called = true + result := response.(*params.UndertakerModelInfoResult) + result.Result = params.UndertakerModelInfo{} + }) + + result, err := client.ModelInfo() + c.Assert(err, jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) + c.Assert(result, gc.Equals, params.UndertakerModelInfoResult{}) +} + +func (s *undertakerSuite) TestProcessDyingEnviron(c *gc.C) { + var called bool + client := s.mockClient(c, "ProcessDyingModel", func(response interface{}) { + called = true + c.Assert(response, gc.IsNil) + }) + + c.Assert(client.ProcessDyingModel(), jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) +} + +func (s *undertakerSuite) TestRemoveModel(c *gc.C) { + var called bool + client := s.mockClient(c, "RemoveModel", func(response interface{}) { + called = true + c.Assert(response, gc.IsNil) + }) + + err := client.RemoveModel() + c.Assert(err, jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) +} + +func (s *undertakerSuite) mockClient(c *gc.C, expectedRequest string, callback func(response interface{})) *undertaker.Client { + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + args, response interface{}, + ) error { + c.Check(objType, gc.Equals, "Undertaker") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, expectedRequest) + + a, ok := args.(params.Entities) + c.Check(ok, jc.IsTrue) + c.Check(a.Entities, gc.DeepEquals, []params.Entity{{Tag: coretesting.ModelTag.String()}}) + + callback(response) + return nil + }) + + return undertaker.NewClient(apiCaller) +} + +func (s *undertakerSuite) TestWatchModelResourcesGetsChange(c *gc.C) { + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + args, response interface{}, + ) error { + if resp, ok := response.(*params.NotifyWatchResults); ok { + c.Check(objType, gc.Equals, "Undertaker") + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, "WatchModelResources") + + a, ok := args.(params.Entities) + c.Check(ok, jc.IsTrue) + c.Check(a.Entities, gc.DeepEquals, []params.Entity{{Tag: coretesting.ModelTag.String()}}) + + resp.Results = []params.NotifyWatchResult{{NotifyWatcherId: "1"}} + } else { + c.Check(objType, gc.Equals, "NotifyWatcher") + c.Check(id, gc.Equals, "1") + c.Check(request, gc.Equals, "Next") + } + return nil + }) + + client := undertaker.NewClient(apiCaller) + w, err := client.WatchModelResources() + c.Assert(err, jc.ErrorIsNil) + + select { + case <-w.Changes(): + case <-time.After(coretesting.ShortWait): + c.Fatalf("timed out waiting for change") + } +} + +func (s *undertakerSuite) TestWatchModelResourcesError(c *gc.C) { + var called bool + + // The undertaker feature tests ensure WatchModelResources is connected + // correctly end to end. This test just ensures that the API calls work. + client := s.mockClient(c, "WatchModelResources", func(response interface{}) { + called = true + c.Check(response, gc.DeepEquals, ¶ms.NotifyWatchResults{Results: []params.NotifyWatchResult(nil)}) + }) + + w, err := client.WatchModelResources() + c.Assert(err, gc.ErrorMatches, "expected 1 result, got 0") + c.Assert(w, gc.IsNil) + c.Assert(called, jc.IsTrue) +} + +func (s *undertakerSuite) TestModelConfig(c *gc.C) { + var called bool + + // The undertaker feature tests ensure ModelConfig is connected + // correctly end to end. This test just ensures that the API calls work. + client := s.mockClient(c, "ModelConfig", func(response interface{}) { + called = true + c.Check(response, gc.DeepEquals, ¶ms.ModelConfigResult{Config: params.ModelConfig(nil)}) + }) + + // We intentionally don't test the error here. We are only interested that + // the ModelConfig endpoint was called. + client.ModelConfig() + c.Assert(called, jc.IsTrue) +} === added directory 'src/github.com/juju/juju/api/unitassigner' === added file 'src/github.com/juju/juju/api/unitassigner/package_test.go' --- src/github.com/juju/juju/api/unitassigner/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/unitassigner/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package unitassigner + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/api/unitassigner/unitassigner.go' --- src/github.com/juju/juju/api/unitassigner/unitassigner.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/unitassigner/unitassigner.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,83 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package unitassigner + +import ( + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/api/base" + apiwatcher "github.com/juju/juju/api/watcher" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" +) + +const uaFacade = "UnitAssigner" + +// API provides access to the UnitAssigner API facade. +type API struct { + facade base.FacadeCaller +} + +// New creates a new client-side UnitAssigner facade. +func New(caller base.APICaller) API { + fc := base.NewFacadeCaller(caller, uaFacade) + return API{facade: fc} +} + +// AssignUnits tells the controller to run whatever unit assignments it has. +// Unit assignments for units that no longer exist will return an error that +// satisfies errors.IsNotFound. +func (a API) AssignUnits(tags []names.UnitTag) ([]error, error) { + entities := make([]params.Entity, len(tags)) + for i, tag := range tags { + entities[i] = params.Entity{Tag: tag.String()} + } + args := params.Entities{Entities: entities} + var result params.ErrorResults + if err := a.facade.FacadeCall("AssignUnits", args, &result); err != nil { + return nil, err + } + + errs := make([]error, len(result.Results)) + for i, e := range result.Results { + if e.Error != nil { + errs[i] = convertNotFound(e.Error) + } + } + return errs, nil +} + +// convertNotFound converts param notfound errors into errors.notfound values. +func convertNotFound(err error) error { + if params.IsCodeNotFound(err) { + return errors.NewNotFound(err, "") + } + return err +} + +// WatchUnitAssignments watches the server for new unit assignments to be +// created. +func (a API) WatchUnitAssignments() (watcher.StringsWatcher, error) { + var result params.StringsWatchResult + err := a.facade.FacadeCall("WatchUnitAssignments", nil, &result) + if err != nil { + return nil, err + } + if result.Error != nil { + return nil, result.Error + } + w := apiwatcher.NewStringsWatcher(a.facade.RawAPICaller(), result) + return w, nil +} + +// SetAgentStatus sets the status of the unit agents. +func (a API) SetAgentStatus(args params.SetStatus) error { + var result params.ErrorResults + err := a.facade.FacadeCall("SetAgentStatus", args, &result) + if err != nil { + return err + } + return result.Combine() +} === added file 'src/github.com/juju/juju/api/unitassigner/unitassigner_test.go' --- src/github.com/juju/juju/api/unitassigner/unitassigner_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/unitassigner/unitassigner_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,129 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package unitassigner + +import ( + "sync" + + "github.com/juju/errors" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/apiserver/params" +) + +var _ = gc.Suite(testsuite{}) + +type testsuite struct{} + +func (testsuite) TestAssignUnits(c *gc.C) { + f := &fakeAssignCaller{c: c, response: params.ErrorResults{ + Results: []params.ErrorResult{ + {}, + {}, + }}} + api := New(f) + ids := []names.UnitTag{names.NewUnitTag("mysql/0"), names.NewUnitTag("mysql/1")} + errs, err := api.AssignUnits(ids) + c.Assert(f.request, gc.Equals, "AssignUnits") + c.Assert(f.params, gc.DeepEquals, + params.Entities{[]params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-mysql-1"}, + }}, + ) + c.Assert(err, jc.ErrorIsNil) + c.Assert(errs, gc.DeepEquals, []error{nil, nil}) +} + +func (testsuite) TestAssignUnitsNotFound(c *gc.C) { + f := &fakeAssignCaller{c: c, response: params.ErrorResults{ + Results: []params.ErrorResult{ + {Error: ¶ms.Error{Code: params.CodeNotFound}}, + }}} + api := New(f) + ids := []names.UnitTag{names.NewUnitTag("mysql/0")} + errs, err := api.AssignUnits(ids) + f.Lock() + c.Assert(f.request, gc.Equals, "AssignUnits") + c.Assert(f.params, gc.DeepEquals, + params.Entities{[]params.Entity{ + {Tag: "unit-mysql-0"}, + }}, + ) + c.Assert(err, jc.ErrorIsNil) + c.Assert(errs, gc.HasLen, 1) + c.Assert(errs[0], jc.Satisfies, errors.IsNotFound) +} + +func (testsuite) TestWatchUnitAssignment(c *gc.C) { + f := &fakeWatchCaller{ + c: c, + response: params.StringsWatchResult{}, + } + api := New(f) + w, err := api.WatchUnitAssignments() + f.Lock() + c.Assert(f.request, gc.Equals, "WatchUnitAssignments") + c.Assert(f.params, gc.IsNil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(w, gc.NotNil) +} + +type fakeAssignCaller struct { + base.APICaller + sync.Mutex + request string + params interface{} + response params.ErrorResults + err error + c *gc.C +} + +func (f *fakeAssignCaller) APICall(objType string, version int, id, request string, param, response interface{}) error { + f.Lock() + defer f.Unlock() + f.request = request + f.params = param + res, ok := response.(*params.ErrorResults) + if !ok { + f.c.Errorf("Expected *params.ErrorResults as response, but was %#v", response) + } else { + *res = f.response + } + return f.err + +} + +func (*fakeAssignCaller) BestFacadeVersion(facade string) int { + return 1 +} + +type fakeWatchCaller struct { + base.APICaller + sync.Mutex + request string + params interface{} + response params.StringsWatchResult + err error + c *gc.C +} + +func (f *fakeWatchCaller) APICall(objType string, version int, id, request string, param, response interface{}) error { + f.Lock() + defer f.Unlock() + f.request = request + f.params = param + _, ok := response.(*params.StringsWatchResult) + if !ok { + f.c.Errorf("Expected *params.StringsWatchResult as response, but was %#v", response) + } + return f.err +} + +func (*fakeWatchCaller) BestFacadeVersion(facade string) int { + return 1 +} === modified file 'src/github.com/juju/juju/api/uniter/charm.go' --- src/github.com/juju/juju/api/uniter/charm.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/uniter/charm.go 2016-03-22 15:18:22 +0000 @@ -8,7 +8,7 @@ "net/url" "github.com/juju/errors" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/apiserver/params" ) @@ -16,7 +16,7 @@ // This module implements a subset of the interface provided by // state.Charm, as needed by the uniter API. -// Charm represents the state of a charm in the environment. +// Charm represents the state of a charm in the model. type Charm struct { st *State curl *charm.URL @@ -33,7 +33,7 @@ } // ArchiveURLs returns the URLs to the charm archive (bundle) in the -// environment storage. Each URL should be tried until one succeeds. +// model storage. Each URL should be tried until one succeeds. func (c *Charm) ArchiveURLs() ([]*url.URL, error) { var results params.StringsResults args := params.CharmURLs{ === modified file 'src/github.com/juju/juju/api/uniter/charm_test.go' --- src/github.com/juju/juju/api/uniter/charm_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/uniter/charm_test.go 2016-03-22 15:18:22 +0000 @@ -50,8 +50,8 @@ func (s *charmSuite) TestArchiveURLs(c *gc.C) { apiInfo := s.APIInfo(c) url, err := url.Parse(fmt.Sprintf( - "https://0.1.2.3:1234/environment/%s/charms?file=%s&url=%s", - apiInfo.EnvironTag.Id(), + "https://0.1.2.3:1234/model/%s/charms?file=%s&url=%s", + apiInfo.ModelTag.Id(), url.QueryEscape("*"), url.QueryEscape(s.apiCharm.URL().String()), )) === modified file 'src/github.com/juju/juju/api/uniter/endpoint.go' --- src/github.com/juju/juju/api/uniter/endpoint.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/uniter/endpoint.go 2016-03-22 15:18:22 +0000 @@ -4,7 +4,7 @@ package uniter import ( - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" ) // Endpoint represents one endpoint of a relation. It is just a wrapper === modified file 'src/github.com/juju/juju/api/uniter/environ.go' --- src/github.com/juju/juju/api/uniter/environ.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/uniter/environ.go 2016-03-22 15:18:22 +0000 @@ -4,20 +4,20 @@ package uniter // This module implements a subset of the interface provided by -// state.Environment, as needed by the uniter API. +// state.Model, as needed by the uniter API. -// Environment represents the state of an environment. -type Environment struct { +// Model represents the state of a model. +type Model struct { name string uuid string } -// UUID returns the universally unique identifier of the environment. -func (e Environment) UUID() string { +// UUID returns the universally unique identifier of the model. +func (e Model) UUID() string { return e.uuid } -// Name returns the human friendly name of the environment. -func (e Environment) Name() string { +// Name returns the human friendly name of the model. +func (e Model) Name() string { return e.name } === modified file 'src/github.com/juju/juju/api/uniter/environ_test.go' --- src/github.com/juju/juju/api/uniter/environ_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/uniter/environ_test.go 2016-03-22 15:18:22 +0000 @@ -13,8 +13,8 @@ type environSuite struct { uniterSuite - apiEnviron *uniter.Environment - stateEnviron *state.Environment + apiEnviron *uniter.Model + stateEnviron *state.Model } var _ = gc.Suite(&environSuite{}) @@ -22,9 +22,9 @@ func (s *environSuite) SetUpTest(c *gc.C) { s.uniterSuite.SetUpTest(c) var err error - s.apiEnviron, err = s.uniter.Environment() + s.apiEnviron, err = s.uniter.Model() c.Assert(err, jc.ErrorIsNil) - s.stateEnviron, err = s.State.Environment() + s.stateEnviron, err = s.State.Model() c.Assert(err, jc.ErrorIsNil) } === modified file 'src/github.com/juju/juju/api/uniter/export_test.go' --- src/github.com/juju/juju/api/uniter/export_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/uniter/export_test.go 2016-03-22 15:18:22 +0000 @@ -1,5 +1,6 @@ // Copyright 2013 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. + package uniter import ( @@ -13,11 +14,9 @@ var ( NewSettings = newSettings - NewStateV0 = newStateV0 - NewStateV1 = newStateV1 ) -// PatchResponses changes the internal FacadeCaller to one that lets you return +// PatchUnitResponse changes the internal FacadeCaller to one that lets you return // canned results. The responseFunc will get the 'response' interface object, // and can set attributes of it to fix the response to the caller. // It can also return an error to have the FacadeCall return an error. The expected === modified file 'src/github.com/juju/juju/api/uniter/leadership.go' --- src/github.com/juju/juju/api/uniter/leadership.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/uniter/leadership.go 2016-03-22 15:18:22 +0000 @@ -7,8 +7,8 @@ "github.com/juju/errors" "github.com/juju/names" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) // NewLeadershipSettingsAccessor returns a new LeadershipSettingsAccessor. === modified file 'src/github.com/juju/juju/api/uniter/leadership_test.go' --- src/github.com/juju/juju/api/uniter/leadership_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/uniter/leadership_test.go 2016-03-22 15:18:22 +0000 @@ -10,8 +10,8 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/api/uniter" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) type leadershipSuite struct { === modified file 'src/github.com/juju/juju/api/uniter/relation_test.go' --- src/github.com/juju/juju/api/uniter/relation_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/uniter/relation_test.go 2016-03-22 15:18:22 +0000 @@ -7,7 +7,7 @@ "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api/uniter" "github.com/juju/juju/apiserver/params" === modified file 'src/github.com/juju/juju/api/uniter/relationunit.go' --- src/github.com/juju/juju/api/uniter/relationunit.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/uniter/relationunit.go 2016-03-22 15:18:22 +0000 @@ -9,8 +9,8 @@ "github.com/juju/errors" "github.com/juju/names" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) // This module implements a subset of the interface provided by @@ -165,24 +165,32 @@ // Watch returns a watcher that notifies of changes to counterpart // units in the relation. func (ru *RelationUnit) Watch() (watcher.RelationUnitsWatcher, error) { - var results params.RelationUnitsWatchResults + return ru.st.WatchRelationUnits(ru.relation.tag, ru.unit.tag) +} + +// NetworkConfig requests network config information from the server. +func (ru *RelationUnit) NetworkConfig() ([]params.NetworkConfig, error) { + var results params.UnitNetworkConfigResults args := params.RelationUnits{ RelationUnits: []params.RelationUnit{{ Relation: ru.relation.tag.String(), Unit: ru.unit.tag.String(), }}, } - err := ru.st.facade.FacadeCall("WatchRelationUnits", args, &results) + + err := ru.st.facade.FacadeCall("NetworkConfig", args, &results) if err != nil { - return nil, err + return nil, errors.Trace(err) } + if len(results.Results) != 1 { return nil, fmt.Errorf("expected 1 result, got %d", len(results.Results)) } + result := results.Results[0] if result.Error != nil { return nil, result.Error } - w := watcher.NewRelationUnitsWatcher(ru.st.facade.RawAPICaller(), result) - return w, nil + + return result.Config, nil } === modified file 'src/github.com/juju/juju/api/uniter/relationunit_test.go' --- src/github.com/juju/juju/api/uniter/relationunit_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/uniter/relationunit_test.go 2016-03-22 15:18:22 +0000 @@ -7,13 +7,13 @@ "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api/uniter" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/network" "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" + "github.com/juju/juju/watcher/watchertest" ) // commonRelationSuiteMixin contains fields used by both relationSuite @@ -74,6 +74,29 @@ c.Assert(apiRel.String(), gc.Equals, "wordpress:db mysql:server") } +func (s *relationUnitSuite) TestNetworkConfig(c *gc.C) { + // Set some provider addresses bound to both "public" and "internal" + // spaces. + addresses := []network.Address{ + network.NewAddressOnSpace("public", "8.8.8.8"), + network.NewAddressOnSpace("", "8.8.4.4"), + network.NewAddressOnSpace("internal", "10.0.0.1"), + network.NewAddressOnSpace("internal", "10.0.0.2"), + network.NewAddressOnSpace("public", "fc00::1"), + } + err := s.wordpressMachine.SetProviderAddresses(addresses...) + c.Assert(err, jc.ErrorIsNil) + + _, apiRelUnit := s.getRelationUnits(c) + + netConfig, err := apiRelUnit.NetworkConfig() + c.Assert(err, jc.ErrorIsNil) + c.Assert(netConfig, jc.DeepEquals, []params.NetworkConfig{ + {Address: "10.0.0.1"}, + {Address: "10.0.0.2"}, + }) +} + func (s *relationUnitSuite) TestEndpoint(c *gc.C) { _, apiRelUnit := s.getRelationUnits(c) @@ -272,8 +295,8 @@ c.Assert(err, jc.ErrorIsNil) w, err := apiRelUnit.Watch() - defer statetesting.AssertStop(c, w) - wc := statetesting.NewRelationUnitsWatcherC(c, s.BackingState, w) + wc := watchertest.NewRelationUnitsWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertChange([]string{"mysql/0"}, nil) @@ -288,12 +311,4 @@ err = myRelUnit.LeaveScope() c.Assert(err, jc.ErrorIsNil) wc.AssertNoChange() - - // NOTE: This test is not as exhaustive as the one in state, - // because the watcher is already tested there. Here we just - // ensure we get the events when we expect them and don't get - // them when they're not expected. - - statetesting.AssertStop(c, w) - wc.AssertClosed() } === modified file 'src/github.com/juju/juju/api/uniter/service.go' --- src/github.com/juju/juju/api/uniter/service.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/uniter/service.go 2016-03-22 15:18:22 +0000 @@ -8,11 +8,12 @@ "github.com/juju/errors" "github.com/juju/names" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api/common" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) // This module implements a subset of the interface provided by @@ -63,7 +64,7 @@ if result.Error != nil { return nil, result.Error } - w := watcher.NewStringsWatcher(s.st.facade.RawAPICaller(), result) + w := apiwatcher.NewStringsWatcher(s.st.facade.RawAPICaller(), result) return w, nil } @@ -83,6 +84,29 @@ return nil } +// CharmModifiedVersion increments every time the charm, or any part of it, is +// changed in some way. +func (s *Service) CharmModifiedVersion() (int, error) { + var results params.IntResults + args := params.Entities{ + Entities: []params.Entity{{Tag: s.tag.String()}}, + } + err := s.st.facade.FacadeCall("CharmModifiedVersion", args, &results) + if err != nil { + return -1, err + } + + if len(results.Results) != 1 { + return -1, fmt.Errorf("expected 1 result, got %d", len(results.Results)) + } + result := results.Results[0] + if result.Error != nil { + return -1, result.Error + } + + return result.Result, nil +} + // CharmURL returns the service's charm URL, and whether units should // upgrade to the charm with that URL even if they are in an error // state (force flag). @@ -117,10 +141,7 @@ // OwnerTag returns the service's owner user tag. func (s *Service) OwnerTag() (names.UserTag, error) { - if s.st.BestAPIVersion() > 0 { - return s.serviceOwnerTag() - } - return s.ownerTag() + return s.serviceOwnerTag() } func (s *Service) serviceOwnerTag() (names.UserTag, error) { @@ -143,29 +164,9 @@ return names.ParseUserTag(result.Result) } -func (s *Service) ownerTag() (names.UserTag, error) { - var invalidTag names.UserTag - var result params.StringResult - args := params.Entities{ - Entities: []params.Entity{{Tag: s.tag.String()}}, - } - err := s.st.facade.FacadeCall("GetOwnerTag", args, &result) - if err != nil { - return invalidTag, err - } - if result.Error != nil { - return invalidTag, result.Error - } - return names.ParseUserTag(result.Result) -} - // SetStatus sets the status of the service if the passed unitName, // corresponding to the calling unit, is of the leader. func (s *Service) SetStatus(unitName string, status params.Status, info string, data map[string]interface{}) error { - //TODO(perrito666) bump api version for this? - if s.st.facade.BestAPIVersion() < 2 { - return errors.NotImplementedf("SetStatus") - } tag := names.NewUnitTag(unitName) var result params.ErrorResults args := params.SetStatus{ @@ -180,15 +181,12 @@ } err := s.st.facade.FacadeCall("SetServiceStatus", args, &result) if err != nil { - if params.IsCodeNotImplemented(err) { - return errors.NotImplementedf("SetServiceStatus") - } return errors.Trace(err) } return result.OneError() } -// ServiceStatus returns the status of the service if the passed unitName, +// Status returns the status of the service if the passed unitName, // corresponding to the calling unit, is of the leader. func (s *Service) Status(unitName string) (params.ServiceStatusResult, error) { tag := names.NewUnitTag(unitName) @@ -202,9 +200,6 @@ } err := s.st.facade.FacadeCall("ServiceStatus", args, &results) if err != nil { - if params.IsCodeNotImplemented(err) { - return params.ServiceStatusResult{}, errors.NotImplementedf("ServiceStatus") - } return params.ServiceStatusResult{}, errors.Trace(err) } result := results.Results[0] @@ -213,3 +208,9 @@ } return result, nil } + +// WatchLeadershipSettings returns a watcher which can be used to wait +// for leadership settings changes to be made for the service. +func (s *Service) WatchLeadershipSettings() (watcher.NotifyWatcher, error) { + return s.st.LeadershipSettings.WatchLeadershipSettings(s.tag.Id()) +} === modified file 'src/github.com/juju/juju/api/uniter/service_test.go' --- src/github.com/juju/juju/api/uniter/service_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/uniter/service_test.go 2016-03-22 15:18:22 +0000 @@ -10,11 +10,10 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "github.com/juju/juju/api/base" "github.com/juju/juju/api/uniter" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" + "github.com/juju/juju/watcher/watchertest" ) type serviceSuite struct { @@ -44,8 +43,8 @@ w, err := s.apiService.Watch() c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, w) - wc := statetesting.NewNotifyWatcherC(c, s.BackingState, w) + wc := watchertest.NewNotifyWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertOneChange() @@ -59,16 +58,13 @@ err = s.wordpressService.Destroy() c.Assert(err, jc.ErrorIsNil) wc.AssertOneChange() - - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *serviceSuite) TestWatchRelations(c *gc.C) { w, err := s.apiService.WatchRelations() c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, w) - wc := statetesting.NewStringsWatcherC(c, s.BackingState, w) + wc := watchertest.NewStringsWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertChange() @@ -91,9 +87,6 @@ c.Assert(err, jc.ErrorIsNil) wc.AssertChange(rel.String()) wc.AssertNoChange() - - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *serviceSuite) TestRefresh(c *gc.C) { @@ -121,30 +114,17 @@ c.Assert(force, jc.IsFalse) } -func (s *serviceSuite) TestOwnerTagV0(c *gc.C) { - s.patchNewState(c, uniter.NewStateV0) - - tag, err := s.apiService.OwnerTag() - c.Assert(err, jc.ErrorIsNil) - c.Assert(tag, gc.Equals, s.AdminUserTag(c)) -} - -func (s *serviceSuite) TestOwnerTagV1(c *gc.C) { - s.patchNewState(c, uniter.NewStateV1) - - tag, err := s.apiService.OwnerTag() - c.Assert(err, jc.ErrorIsNil) - c.Assert(tag, gc.Equals, s.AdminUserTag(c)) -} - -func (s *serviceSuite) patchNewState( - c *gc.C, - patchFunc func(_ base.APICaller, _ names.UnitTag) *uniter.State, -) { - s.uniterSuite.patchNewState(c, patchFunc) - var err error - s.apiService, err = s.uniter.Service(s.wordpressService.Tag().(names.ServiceTag)) - c.Assert(err, jc.ErrorIsNil) +func (s *serviceSuite) TestCharmModifiedVersion(c *gc.C) { + // Get the charm URL through state calls. + ver, err := s.apiService.CharmModifiedVersion() + c.Assert(err, jc.ErrorIsNil) + c.Assert(ver, gc.Equals, s.wordpressService.CharmModifiedVersion()) +} + +func (s *serviceSuite) TestOwnerTag(c *gc.C) { + tag, err := s.apiService.OwnerTag() + c.Assert(err, jc.ErrorIsNil) + c.Assert(tag, gc.Equals, s.AdminUserTag(c)) } func (s *serviceSuite) TestSetServiceStatus(c *gc.C) { === modified file 'src/github.com/juju/juju/api/uniter/state_test.go' --- src/github.com/juju/juju/api/uniter/state_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/uniter/state_test.go 2016-03-22 15:18:22 +0000 @@ -4,13 +4,11 @@ package uniter_test import ( - "github.com/juju/errors" "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" apitesting "github.com/juju/juju/api/testing" - "github.com/juju/juju/api/uniter" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/network" ) @@ -18,7 +16,7 @@ type stateSuite struct { uniterSuite *apitesting.APIAddresserTests - *apitesting.EnvironWatcherTests + *apitesting.ModelWatcherTests } var _ = gc.Suite(&stateSuite{}) @@ -26,11 +24,11 @@ func (s *stateSuite) SetUpTest(c *gc.C) { s.uniterSuite.SetUpTest(c) s.APIAddresserTests = apitesting.NewAPIAddresserTests(s.uniter, s.BackingState) - s.EnvironWatcherTests = apitesting.NewEnvironWatcherTests(s.uniter, s.BackingState, apitesting.NoSecrets) + s.ModelWatcherTests = apitesting.NewModelWatcherTests(s.uniter, s.BackingState, apitesting.NoSecrets) } func (s *stateSuite) TestProviderType(c *gc.C) { - cfg, err := s.State.EnvironConfig() + cfg, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) providerType, err := s.uniter.ProviderType() @@ -38,18 +36,7 @@ c.Assert(providerType, gc.DeepEquals, cfg.Type()) } -func (s *stateSuite) TestAllMachinePortsV0NotImplemented(c *gc.C) { - s.patchNewState(c, uniter.NewStateV0) - - ports, err := s.uniter.AllMachinePorts(s.wordpressMachine.Tag().(names.MachineTag)) - c.Assert(err, jc.Satisfies, errors.IsNotImplemented) - c.Assert(err.Error(), gc.Equals, "AllMachinePorts() (need V1+) not implemented") - c.Assert(ports, gc.IsNil) -} - -func (s *stateSuite) TestAllMachinePortsV1(c *gc.C) { - s.patchNewState(c, uniter.NewStateV1) - +func (s *stateSuite) TestAllMachinePorts(c *gc.C) { // Verify no ports are opened yet on the machine or unit. machinePorts, err := s.wordpressMachine.AllPorts() c.Assert(err, jc.ErrorIsNil) === modified file 'src/github.com/juju/juju/api/uniter/storage.go' --- src/github.com/juju/juju/api/uniter/storage.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/uniter/storage.go 2016-03-22 15:18:22 +0000 @@ -8,8 +8,9 @@ "github.com/juju/names" "github.com/juju/juju/api/base" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) type StorageAccessor struct { @@ -88,7 +89,7 @@ if result.Error != nil { return nil, result.Error } - w := watcher.NewStringsWatcher(sa.facade.RawAPICaller(), result) + w := apiwatcher.NewStringsWatcher(sa.facade.RawAPICaller(), result) return w, nil } @@ -158,7 +159,7 @@ if result.Error != nil { return nil, result.Error } - w := watcher.NewNotifyWatcher(sa.facade.RawAPICaller(), result) + w := apiwatcher.NewNotifyWatcher(sa.facade.RawAPICaller(), result) return w, nil } === modified file 'src/github.com/juju/juju/api/uniter/storage_test.go' --- src/github.com/juju/juju/api/uniter/storage_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/uniter/storage_test.go 2016-03-22 15:18:22 +0000 @@ -30,7 +30,7 @@ var called bool apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { c.Check(objType, gc.Equals, "Uniter") - c.Check(version, gc.Equals, 2) + c.Check(version, gc.Equals, 3) c.Check(id, gc.Equals, "") c.Check(request, gc.Equals, "UnitStorageAttachments") c.Check(arg, gc.DeepEquals, params.Entities{ @@ -57,7 +57,7 @@ var called bool apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { c.Check(objType, gc.Equals, "Uniter") - c.Check(version, gc.Equals, 2) + c.Check(version, gc.Equals, 3) c.Check(id, gc.Equals, "") c.Check(request, gc.Equals, "DestroyUnitStorageAttachments") c.Check(arg, gc.DeepEquals, params.Entities{ @@ -103,7 +103,7 @@ var called bool apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { c.Check(objType, gc.Equals, "Uniter") - c.Check(version, gc.Equals, 2) + c.Check(version, gc.Equals, 3) c.Check(id, gc.Equals, "") c.Check(request, gc.Equals, "WatchUnitStorageAttachments") c.Check(arg, gc.DeepEquals, params.Entities{ @@ -129,7 +129,7 @@ var called bool apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { c.Check(objType, gc.Equals, "Uniter") - c.Check(version, gc.Equals, 2) + c.Check(version, gc.Equals, 3) c.Check(id, gc.Equals, "") c.Check(request, gc.Equals, "WatchStorageAttachments") c.Check(arg, gc.DeepEquals, params.StorageAttachmentIds{ @@ -166,7 +166,7 @@ var called bool apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { c.Check(objType, gc.Equals, "Uniter") - c.Check(version, gc.Equals, 2) + c.Check(version, gc.Equals, 3) c.Check(id, gc.Equals, "") c.Check(request, gc.Equals, "StorageAttachments") c.Check(arg, gc.DeepEquals, params.StorageAttachmentIds{ @@ -195,7 +195,7 @@ func (s *storageSuite) TestStorageAttachmentLife(c *gc.C) { apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { c.Check(objType, gc.Equals, "Uniter") - c.Check(version, gc.Equals, 2) + c.Check(version, gc.Equals, 3) c.Check(id, gc.Equals, "") c.Check(request, gc.Equals, "StorageAttachmentLife") c.Check(arg, gc.DeepEquals, params.StorageAttachmentIds{ @@ -225,7 +225,7 @@ func (s *storageSuite) TestRemoveStorageAttachment(c *gc.C) { apiCaller := testing.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { c.Check(objType, gc.Equals, "Uniter") - c.Check(version, gc.Equals, 2) + c.Check(version, gc.Equals, 3) c.Check(id, gc.Equals, "") c.Check(request, gc.Equals, "RemoveStorageAttachments") c.Check(arg, gc.DeepEquals, params.StorageAttachmentIds{ === modified file 'src/github.com/juju/juju/api/uniter/unit.go' --- src/github.com/juju/juju/api/uniter/unit.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/uniter/unit.go 2016-03-22 15:18:22 +0000 @@ -8,11 +8,12 @@ "github.com/juju/errors" "github.com/juju/names" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api/common" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) // Unit represents a juju unit as seen by a uniter worker. @@ -80,9 +81,6 @@ } err := u.st.facade.FacadeCall("UnitStatus", args, &results) if err != nil { - if params.IsCodeNotImplemented(err) { - return params.StatusResult{}, errors.NotImplementedf("UnitStatus") - } return params.StatusResult{}, errors.Trace(err) } if len(results.Results) != 1 { @@ -146,15 +144,7 @@ } results := new(params.ErrorResults) err := u.st.facade.FacadeCall("AddMetricBatches", p, results) - if params.IsCodeNotImplemented(err) { - for _, batch := range batches { - err = u.AddMetrics(batch.Metrics) - if err != nil { - batchResults[batch.UUID] = errors.Annotate(err, "failed to send metric batch") - } - } - return batchResults, nil - } else if err != nil { + if err != nil { return nil, errors.Annotate(err, "failed to send metric batches") } for i, result := range results.Results { @@ -469,44 +459,6 @@ return result.OneError() } -// OpenPort sets the policy of the port with protocol and number to be -// opened. -// -// TODO(dimitern): This is deprecated and is kept for -// backwards-compatibility. Use OpenPorts instead. -func (u *Unit) OpenPort(protocol string, number int) error { - var result params.ErrorResults - args := params.EntitiesPorts{ - Entities: []params.EntityPort{ - {Tag: u.tag.String(), Protocol: protocol, Port: number}, - }, - } - err := u.st.facade.FacadeCall("OpenPort", args, &result) - if err != nil { - return err - } - return result.OneError() -} - -// ClosePort sets the policy of the port with protocol and number to -// be closed. -// -// TODO(dimitern): This is deprecated and is kept for -// backwards-compatibility. Use ClosePorts instead. -func (u *Unit) ClosePort(protocol string, number int) error { - var result params.ErrorResults - args := params.EntitiesPorts{ - Entities: []params.EntityPort{ - {Tag: u.tag.String(), Protocol: protocol, Port: number}, - }, - } - err := u.st.facade.FacadeCall("ClosePort", args, &result) - if err != nil { - return err - } - return result.OneError() -} - var ErrNoCharmURLSet = errors.New("unit has no charm url set") // CharmURL returns the charm URL this unit is currently using. @@ -591,7 +543,7 @@ if result.Error != nil { return nil, result.Error } - w := watcher.NewNotifyWatcher(u.st.facade.RawAPICaller(), result) + w := apiwatcher.NewNotifyWatcher(u.st.facade.RawAPICaller(), result) return w, nil } @@ -615,7 +567,7 @@ if result.Error != nil { return nil, result.Error } - w := watcher.NewNotifyWatcher(u.st.facade.RawAPICaller(), result) + w := apiwatcher.NewNotifyWatcher(u.st.facade.RawAPICaller(), result) return w, nil } @@ -638,7 +590,7 @@ if result.Error != nil { return nil, result.Error } - w := watcher.NewStringsWatcher(u.st.facade.RawAPICaller(), result) + w := apiwatcher.NewStringsWatcher(u.st.facade.RawAPICaller(), result) return w, nil } @@ -725,7 +677,7 @@ if result.Error != nil { return nil, result.Error } - w := watcher.NewNotifyWatcher(u.st.facade.RawAPICaller(), result) + w := apiwatcher.NewNotifyWatcher(u.st.facade.RawAPICaller(), result) return w, nil } === modified file 'src/github.com/juju/juju/api/uniter/unit_test.go' --- src/github.com/juju/juju/api/uniter/unit_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/uniter/unit_test.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,7 @@ jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api" "github.com/juju/juju/api/base" @@ -22,8 +22,8 @@ "github.com/juju/juju/juju/testing" "github.com/juju/juju/network" "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" jujufactory "github.com/juju/juju/testing/factory" + "github.com/juju/juju/watcher/watchertest" ) type unitSuite struct { @@ -121,33 +121,6 @@ c.Assert(agentStatusInfo.Data, gc.HasLen, 0) } -func (s *unitSuite) TestSetUnitStatusOldServer(c *gc.C) { - s.patchNewState(c, uniter.NewStateV1) - - err := s.apiUnit.SetUnitStatus(params.StatusActive, "blah", nil) - c.Assert(err, jc.Satisfies, errors.IsNotImplemented) - c.Assert(err.Error(), gc.Equals, "SetUnitStatus not implemented") -} - -func (s *unitSuite) TestSetAgentStatusOldServer(c *gc.C) { - s.patchNewState(c, uniter.NewStateV1) - - statusInfo, err := s.wordpressUnit.Status() - c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, state.StatusUnknown) - c.Assert(statusInfo.Message, gc.Equals, "Waiting for agent initialization to finish") - c.Assert(statusInfo.Data, gc.HasLen, 0) - - err = s.apiUnit.SetAgentStatus(params.StatusIdle, "blah", nil) - c.Assert(err, jc.ErrorIsNil) - - statusInfo, err = s.wordpressUnit.AgentStatus() - c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, state.StatusIdle) - c.Assert(statusInfo.Message, gc.Equals, "blah") - c.Assert(statusInfo.Data, gc.HasLen, 0) -} - func (s *unitSuite) TestUnitStatus(c *gc.C) { err := s.wordpressUnit.SetStatus(state.StatusMaintenance, "blah", nil) c.Assert(err, jc.ErrorIsNil) @@ -241,8 +214,8 @@ w, err := s.apiUnit.Watch() c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, w) - wc := statetesting.NewNotifyWatcherC(c, s.BackingState, w) + wc := watchertest.NewNotifyWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertOneChange() @@ -257,9 +230,6 @@ err = s.apiUnit.EnsureDead() c.Assert(err, jc.ErrorIsNil) wc.AssertOneChange() - - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *unitSuite) TestResolve(c *gc.C) { @@ -278,17 +248,7 @@ c.Assert(mode, gc.Equals, params.ResolvedNone) } -func (s *unitSuite) TestAssignedMachineV0NotImplemented(c *gc.C) { - s.patchNewState(c, uniter.NewStateV0) - - _, err := s.apiUnit.AssignedMachine() - c.Assert(err, jc.Satisfies, errors.IsNotImplemented) - c.Assert(err.Error(), gc.Equals, "unit.AssignedMachine() (need V1+) not implemented") -} - -func (s *unitSuite) TestAssignedMachineV1(c *gc.C) { - s.patchNewState(c, uniter.NewStateV1) - +func (s *unitSuite) TestAssignedMachine(c *gc.C) { machineTag, err := s.apiUnit.AssignedMachine() c.Assert(err, jc.ErrorIsNil) c.Assert(machineTag, gc.Equals, s.wordpressMachine.Tag()) @@ -396,42 +356,6 @@ c.Assert(ports, gc.HasLen, 0) } -func (s *unitSuite) TestOpenClosePort(c *gc.C) { - ports, err := s.wordpressUnit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(ports, gc.HasLen, 0) - - err = s.apiUnit.OpenPort("tcp", 1234) - c.Assert(err, jc.ErrorIsNil) - err = s.apiUnit.OpenPort("tcp", 4321) - c.Assert(err, jc.ErrorIsNil) - - ports, err = s.wordpressUnit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - // OpenedPorts returns a sorted slice. - c.Assert(ports, gc.DeepEquals, []network.PortRange{ - {Protocol: "tcp", FromPort: 1234, ToPort: 1234}, - {Protocol: "tcp", FromPort: 4321, ToPort: 4321}, - }) - - err = s.apiUnit.ClosePort("tcp", 4321) - c.Assert(err, jc.ErrorIsNil) - - ports, err = s.wordpressUnit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - // OpenedPorts returns a sorted slice. - c.Assert(ports, gc.DeepEquals, []network.PortRange{ - {Protocol: "tcp", FromPort: 1234, ToPort: 1234}, - }) - - err = s.apiUnit.ClosePort("tcp", 1234) - c.Assert(err, jc.ErrorIsNil) - - ports, err = s.wordpressUnit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(ports, gc.HasLen, 0) -} - func (s *unitSuite) TestGetSetCharmURL(c *gc.C) { // No charm URL set yet. curl, ok := s.wordpressUnit.CharmURL() @@ -491,8 +415,8 @@ c.Assert(err, jc.ErrorIsNil) w, err = s.apiUnit.WatchConfigSettings() - defer statetesting.AssertStop(c, w) - wc := statetesting.NewNotifyWatcherC(c, s.BackingState, w) + wc := watchertest.NewNotifyWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertOneChange() @@ -514,22 +438,13 @@ }) c.Assert(err, jc.ErrorIsNil) wc.AssertNoChange() - - // NOTE: This test is not as exhaustive as the one in state, - // because the watcher is already tested there. Here we just - // ensure we get the events when we expect them and don't get - // them when they're not expected. - - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *unitSuite) TestWatchActionNotifications(c *gc.C) { w, err := s.apiUnit.WatchActionNotifications() c.Assert(err, jc.ErrorIsNil) - - defer statetesting.AssertStop(c, w) - wc := statetesting.NewStringsWatcherC(c, s.BackingState, w) + wc := watchertest.NewStringsWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertChange() @@ -550,9 +465,6 @@ }) c.Assert(err, jc.ErrorIsNil) wc.AssertChange(action.Id()) - - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *unitSuite) TestWatchActionNotificationsError(c *gc.C) { @@ -639,8 +551,9 @@ func (s *unitSuite) TestWatchAddresses(c *gc.C) { w, err := s.apiUnit.WatchAddresses() - defer statetesting.AssertStop(c, w) - wc := statetesting.NewNotifyWatcherC(c, s.BackingState, w) + c.Assert(err, jc.ErrorIsNil) + wc := watchertest.NewNotifyWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertOneChange() @@ -666,14 +579,6 @@ err = s.wordpressMachine.SetMachineAddresses() c.Assert(err, jc.ErrorIsNil) wc.AssertOneChange() - - // NOTE: This test is not as exhaustive as the one in state, - // because the watcher is already tested there. Here we just - // ensure we get the events when we expect them and don't get - // them when they're not expected. - - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *unitSuite) TestWatchAddressesErrors(c *gc.C) { @@ -776,8 +681,8 @@ func (s *unitSuite) TestWatchMeterStatus(c *gc.C) { w, err := s.apiUnit.WatchMeterStatus() - defer statetesting.AssertStop(c, w) - wc := statetesting.NewNotifyWatcherC(c, s.BackingState, w) + wc := watchertest.NewNotifyWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() // Initial event. wc.AssertOneChange() @@ -804,9 +709,6 @@ status := mm.MeterStatus() c.Assert(status.Code, gc.Equals, state.MeterAmber) // Confirm meter status has changed wc.AssertOneChange() - - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *unitSuite) patchNewState( @@ -912,40 +814,6 @@ c.Assert(called, jc.IsTrue) } -func (s *unitMetricBatchesSuite) TestSendMetricBatchNotImplemented(c *gc.C) { - var called bool - uniter.PatchUnitFacadeCall(s, s.apiUnit, func(request string, args, response interface{}) error { - switch request { - case "AddMetricBatches": - result := response.(*params.ErrorResults) - result.Results = make([]params.ErrorResult, 1) - return ¶ms.Error{"not implemented", params.CodeNotImplemented} - case "AddMetrics": - called = true - result := response.(*params.ErrorResults) - result.Results = make([]params.ErrorResult, 1) - return nil - default: - panic(fmt.Errorf("unexpected request %q received", request)) - } - }) - - metrics := []params.Metric{{"pings", "5", time.Now().UTC()}} - uuid := utils.MustNewUUID().String() - batch := params.MetricBatch{ - UUID: uuid, - CharmURL: s.charm.URL().String(), - Created: time.Now(), - Metrics: metrics, - } - - results, err := s.apiUnit.AddMetricBatches([]params.MetricBatch{batch}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(called, jc.IsTrue) - c.Assert(results, gc.HasLen, 1) - c.Assert(results[batch.UUID], gc.IsNil) -} - func (s *unitMetricBatchesSuite) TestSendMetricBatch(c *gc.C) { uuid := utils.MustNewUUID().String() now := time.Now().Round(time.Second).UTC() @@ -962,7 +830,7 @@ c.Assert(results, gc.HasLen, 1) c.Assert(results[batch.UUID], gc.IsNil) - batches, err := s.State.MetricBatches() + batches, err := s.State.AllMetricBatches() c.Assert(err, gc.IsNil) c.Assert(batches, gc.HasLen, 1) c.Assert(batches[0].UUID(), gc.Equals, uuid) === modified file 'src/github.com/juju/juju/api/uniter/uniter.go' --- src/github.com/juju/juju/api/uniter/uniter.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/uniter/uniter.go 2016-03-22 15:18:22 +0000 @@ -8,20 +8,21 @@ "github.com/juju/errors" "github.com/juju/names" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api/base" "github.com/juju/juju/api/common" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/network" + "github.com/juju/juju/watcher" ) const uniterFacade = "Uniter" // State provides access to the Uniter API facade. type State struct { - *common.EnvironWatcher + *common.ModelWatcher *common.APIAddresser *StorageAccessor @@ -44,24 +45,21 @@ version, ) state := &State{ - EnvironWatcher: common.NewEnvironWatcher(facadeCaller), + ModelWatcher: common.NewModelWatcher(facadeCaller), APIAddresser: common.NewAPIAddresser(facadeCaller), StorageAccessor: NewStorageAccessor(facadeCaller), facade: facadeCaller, unitTag: authTag, } - if version >= 2 { - newWatcher := func(result params.NotifyWatchResult) watcher.NotifyWatcher { - return watcher.NewNotifyWatcher(caller, result) - } - state.LeadershipSettings = NewLeadershipSettingsAccessor( - facadeCaller.FacadeCall, - newWatcher, - ErrIfNotVersionFn(2, state.BestAPIVersion()), - ) + newWatcher := func(result params.NotifyWatchResult) watcher.NotifyWatcher { + return apiwatcher.NewNotifyWatcher(caller, result) } - + state.LeadershipSettings = NewLeadershipSettingsAccessor( + facadeCaller.FacadeCall, + newWatcher, + ErrIfNotVersionFn(2, state.BestAPIVersion()), + ) return state } @@ -71,18 +69,12 @@ } } -// newStateV0 creates a new client-side Uniter facade, version 0. -var newStateV0 = newStateForVersionFn(0) - -// newStateV1 creates a new client-side Uniter facade, version 1. -var newStateV1 = newStateForVersionFn(1) - -// newStateV2 creates a new client-side Uniter facade, version 2. -var newStateV2 = newStateForVersionFn(2) +// newStateV3 creates a new client-side Uniter facade, version 3. +var newStateV3 = newStateForVersionFn(3) // NewState creates a new client-side Uniter facade. // Defined like this to allow patching during tests. -var NewState = newStateV2 +var NewState = newStateV3 // BestAPIVersion returns the API version that we were able to // determine is supported by both the client and the API Server. @@ -122,7 +114,7 @@ return result.Results[0], nil } -// getOneAction retrieves a single Action from the state server. +// getOneAction retrieves a single Action from the controller. func (st *State) getOneAction(tag *names.ActionTag) (params.ActionsQueryResult, error) { nothing := params.ActionsQueryResult{} @@ -177,8 +169,7 @@ }, nil } -// ProviderType returns a provider type used by the current juju -// environment. +// ProviderType returns a provider type used by the current juju model. // // TODO(dimitern): We might be able to drop this, once we have machine // addresses implemented fully. See also LP bug 1221798. @@ -310,21 +301,17 @@ }, nil } -// Environment returns the environment entity. -func (st *State) Environment() (*Environment, error) { - var result params.EnvironmentResult - err := st.facade.FacadeCall("CurrentEnvironment", nil, &result) - if params.IsCodeNotImplemented(err) { - // Fall back to using the 1.16 API. - return st.environment1dot16() - } +// Model returns the model entity. +func (st *State) Model() (*Model, error) { + var result params.ModelResult + err := st.facade.FacadeCall("CurrentModel", nil, &result) if err != nil { return nil, err } if err := result.Error; err != nil { return nil, err } - return &Environment{ + return &Model{ name: result.Name, uuid: result.UUID, }, nil @@ -364,20 +351,32 @@ return portsMap, nil } -// environment1dot16 requests just the UUID of the current environment, when -// using an older API server that does not support CurrentEnvironment API call. -func (st *State) environment1dot16() (*Environment, error) { - var result params.StringResult - err := st.facade.FacadeCall("CurrentEnvironUUID", nil, &result) +// WatchRelationUnits returns a watcher that notifies of changes to the +// counterpart units in the relation for the given unit. +func (st *State) WatchRelationUnits( + relationTag names.RelationTag, + unitTag names.UnitTag, +) (watcher.RelationUnitsWatcher, error) { + var results params.RelationUnitsWatchResults + args := params.RelationUnits{ + RelationUnits: []params.RelationUnit{{ + Relation: relationTag.String(), + Unit: unitTag.String(), + }}, + } + err := st.facade.FacadeCall("WatchRelationUnits", args, &results) if err != nil { return nil, err } - if err := result.Error; err != nil { - return nil, err - } - return &Environment{ - uuid: result.Result, - }, nil + if len(results.Results) != 1 { + return nil, fmt.Errorf("expected 1 result, got %d", len(results.Results)) + } + result := results.Results[0] + if result.Error != nil { + return nil, result.Error + } + w := apiwatcher.NewRelationUnitsWatcher(st.facade.RawAPICaller(), result) + return w, nil } // ErrIfNotVersionFn returns a function which can be used to check for === modified file 'src/github.com/juju/juju/api/uniter/uniter_test.go' --- src/github.com/juju/juju/api/uniter/uniter_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/uniter/uniter_test.go 2016-03-22 15:18:22 +0000 @@ -22,12 +22,12 @@ type uniterSuite struct { testing.JujuConnSuite - st api.Connection - stateServerMachine *state.Machine - wordpressMachine *state.Machine - wordpressService *state.Service - wordpressCharm *state.Charm - wordpressUnit *state.Unit + st api.Connection + controllerMachine *state.Machine + wordpressMachine *state.Machine + wordpressService *state.Service + wordpressCharm *state.Charm + wordpressUnit *state.Unit uniter *uniter.State } @@ -38,16 +38,25 @@ s.setUpTest(c, true) } -func (s *uniterSuite) setUpTest(c *gc.C, addStateServer bool) { +func (s *uniterSuite) setUpTest(c *gc.C, addController bool) { s.JujuConnSuite.SetUpTest(c) - if addStateServer { - s.stateServerMachine = testing.AddStateServerMachine(c, s.State) - } + if addController { + s.controllerMachine = testing.AddControllerMachine(c, s.State) + } + + // Bind wordpress:db to space "internal" + bindings := map[string]string{ + "db": "internal", + } + _, err := s.State.AddSpace("internal", "", nil, false) + c.Assert(err, jc.ErrorIsNil) + _, err = s.State.AddSpace("public", "", nil, true) + c.Assert(err, jc.ErrorIsNil) // Create a machine, a service and add a unit so we can log in as // its agent. - s.wordpressMachine, s.wordpressService, s.wordpressCharm, s.wordpressUnit = s.addMachineServiceCharmAndUnit(c, "wordpress") + s.wordpressMachine, s.wordpressService, s.wordpressCharm, s.wordpressUnit = s.addMachineBoundServiceCharmAndUnit(c, "wordpress", bindings) password, err := utils.RandomPassword() c.Assert(err, jc.ErrorIsNil) err = s.wordpressUnit.SetPassword(password) @@ -60,18 +69,33 @@ c.Assert(s.uniter, gc.NotNil) } -func (s *uniterSuite) addMachineServiceCharmAndUnit(c *gc.C, serviceName string) (*state.Machine, *state.Service, *state.Charm, *state.Unit) { +func (s *uniterSuite) addMachineBoundServiceCharmAndUnit(c *gc.C, serviceName string, bindings map[string]string) (*state.Machine, *state.Service, *state.Charm, *state.Unit) { machine, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) charm := s.AddTestingCharm(c, serviceName) - service := s.AddTestingService(c, serviceName, charm) + + owner := s.AdminUserTag(c).String() + service, err := s.State.AddService(state.AddServiceArgs{ + Name: serviceName, + Owner: owner, + Charm: charm, + EndpointBindings: bindings, + }) + c.Assert(err, jc.ErrorIsNil) + unit, err := service.AddUnit() c.Assert(err, jc.ErrorIsNil) + err = unit.AssignToMachine(machine) c.Assert(err, jc.ErrorIsNil) + return machine, service, charm, unit } +func (s *uniterSuite) addMachineServiceCharmAndUnit(c *gc.C, serviceName string) (*state.Machine, *state.Service, *state.Charm, *state.Unit) { + return s.addMachineBoundServiceCharmAndUnit(c, serviceName, nil) +} + func (s *uniterSuite) addRelation(c *gc.C, first, second string) *state.Relation { eps, err := s.State.InferEndpoints(first, second) c.Assert(err, jc.ErrorIsNil) === modified file 'src/github.com/juju/juju/api/uniter/unitstorage_test.go' --- src/github.com/juju/juju/api/uniter/unitstorage_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/api/uniter/unitstorage_test.go 2016-03-22 15:18:22 +0000 @@ -42,7 +42,7 @@ apiCaller := basetesting.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { c.Assert(objType, gc.Equals, "Uniter") - c.Assert(version, gc.Equals, 2) + c.Assert(version, gc.Equals, 3) c.Assert(id, gc.Equals, "") c.Assert(request, gc.Equals, "AddUnitStorage") c.Assert(arg, gc.DeepEquals, expected) @@ -75,7 +75,7 @@ msg := "yoink" apiCaller := basetesting.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { c.Assert(objType, gc.Equals, "Uniter") - c.Assert(version, gc.Equals, 2) + c.Assert(version, gc.Equals, 3) c.Assert(id, gc.Equals, "") c.Assert(request, gc.Equals, "AddUnitStorage") c.Assert(arg, gc.DeepEquals, expected) === added file 'src/github.com/juju/juju/api/upgrader/package_test.go' --- src/github.com/juju/juju/api/upgrader/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/upgrader/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgrader_test + +import ( + stdtesting "testing" + + coretesting "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + coretesting.MgoTestPackage(t) +} === modified file 'src/github.com/juju/juju/api/upgrader/unitupgrader_test.go' --- src/github.com/juju/juju/api/upgrader/unitupgrader_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/upgrader/unitupgrader_test.go 2016-03-22 15:18:22 +0000 @@ -7,6 +7,8 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" "github.com/juju/utils" + "github.com/juju/utils/arch" + "github.com/juju/utils/series" gc "gopkg.in/check.v1" "github.com/juju/juju/api" @@ -14,9 +16,9 @@ "github.com/juju/juju/apiserver/params" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" "github.com/juju/juju/tools" "github.com/juju/juju/version" + "github.com/juju/juju/watcher/watchertest" ) type unitUpgraderSuite struct { @@ -34,6 +36,12 @@ var _ = gc.Suite(&unitUpgraderSuite{}) +var current = version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), +} + func (s *unitUpgraderSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) @@ -62,28 +70,27 @@ } func (s *unitUpgraderSuite) TestSetVersionWrongUnit(c *gc.C) { - err := s.st.SetVersion("unit-wordpress-42", version.Current) + err := s.st.SetVersion("unit-wordpress-42", current) c.Assert(err, gc.ErrorMatches, "permission denied") c.Assert(err, jc.Satisfies, params.IsCodeUnauthorized) } func (s *unitUpgraderSuite) TestSetVersionNotUnit(c *gc.C) { - err := s.st.SetVersion("foo-42", version.Current) + err := s.st.SetVersion("foo-42", current) c.Assert(err, gc.ErrorMatches, "permission denied") c.Assert(err, jc.Satisfies, params.IsCodeUnauthorized) } func (s *unitUpgraderSuite) TestSetVersion(c *gc.C) { - cur := version.Current agentTools, err := s.rawUnit.AgentTools() c.Assert(err, jc.Satisfies, errors.IsNotFound) c.Assert(agentTools, gc.IsNil) - err = s.st.SetVersion(s.rawUnit.Tag().String(), cur) + err = s.st.SetVersion(s.rawUnit.Tag().String(), current) c.Assert(err, jc.ErrorIsNil) s.rawUnit.Refresh() agentTools, err = s.rawUnit.AgentTools() c.Assert(err, jc.ErrorIsNil) - c.Check(agentTools.Version, gc.Equals, cur) + c.Check(agentTools.Version, gc.Equals, current) } func (s *unitUpgraderSuite) TestToolsWrongUnit(c *gc.C) { @@ -101,36 +108,35 @@ } func (s *unitUpgraderSuite) TestTools(c *gc.C) { - cur := version.Current - curTools := &tools.Tools{Version: cur, URL: ""} + curTools := &tools.Tools{Version: current, URL: ""} curTools.Version.Minor++ - s.rawMachine.SetAgentVersion(cur) + s.rawMachine.SetAgentVersion(current) // UnitUpgrader.Tools returns the *desired* set of tools, not the currently // running set. We want to be upgraded to cur.Version stateTools, err := s.st.Tools(s.rawUnit.Tag().String()) c.Assert(err, jc.ErrorIsNil) - c.Check(stateTools.Version.Number, gc.DeepEquals, version.Current.Number) + c.Check(stateTools.Version.Number, gc.DeepEquals, current.Number) c.Assert(stateTools.URL, gc.NotNil) } func (s *unitUpgraderSuite) TestWatchAPIVersion(c *gc.C) { w, err := s.st.WatchAPIVersion(s.rawUnit.Tag().String()) c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, w) - wc := statetesting.NewNotifyWatcherC(c, s.BackingState, w) + wc := watchertest.NewNotifyWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() + // Initial event wc.AssertOneChange() vers := version.MustParseBinary("10.20.34-quantal-amd64") err = s.rawMachine.SetAgentVersion(vers) c.Assert(err, jc.ErrorIsNil) + // One change noticing the new version wc.AssertOneChange() vers = version.MustParseBinary("10.20.35-quantal-amd64") err = s.rawMachine.SetAgentVersion(vers) c.Assert(err, jc.ErrorIsNil) wc.AssertOneChange() - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *unitUpgraderSuite) TestWatchAPIVersionWrongUnit(c *gc.C) { @@ -146,15 +152,14 @@ } func (s *unitUpgraderSuite) TestDesiredVersion(c *gc.C) { - cur := version.Current - curTools := &tools.Tools{Version: cur, URL: ""} + curTools := &tools.Tools{Version: current, URL: ""} curTools.Version.Minor++ - s.rawMachine.SetAgentVersion(cur) + s.rawMachine.SetAgentVersion(current) // UnitUpgrader.DesiredVersion returns the *desired* set of tools, not the // currently running set. We want to be upgraded to cur.Version stateVersion, err := s.st.DesiredVersion(s.rawUnit.Tag().String()) c.Assert(err, jc.ErrorIsNil) - c.Assert(stateVersion, gc.Equals, cur.Number) + c.Assert(stateVersion, gc.Equals, current.Number) } func (s *unitUpgraderSuite) TestDesiredVersionWrongUnit(c *gc.C) { === modified file 'src/github.com/juju/juju/api/upgrader/upgrader.go' --- src/github.com/juju/juju/api/upgrader/upgrader.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/upgrader/upgrader.go 2016-03-22 15:18:22 +0000 @@ -7,10 +7,11 @@ "fmt" "github.com/juju/juju/api/base" - "github.com/juju/juju/api/watcher" + apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/tools" "github.com/juju/juju/version" + "github.com/juju/juju/watcher" ) // State provides access to an upgrader worker's view of the state. @@ -110,6 +111,6 @@ // TODO: Not directly tested return nil, result.Error } - w := watcher.NewNotifyWatcher(st.facade.RawAPICaller(), result) + w := apiwatcher.NewNotifyWatcher(st.facade.RawAPICaller(), result) return w, nil } === modified file 'src/github.com/juju/juju/api/upgrader/upgrader_test.go' --- src/github.com/juju/juju/api/upgrader/upgrader_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/upgrader/upgrader_test.go 2016-03-22 15:18:22 +0000 @@ -5,7 +5,6 @@ import ( "fmt" - stdtesting "testing" "github.com/juju/errors" jc "github.com/juju/testing/checkers" @@ -20,12 +19,9 @@ coretesting "github.com/juju/juju/testing" "github.com/juju/juju/tools" "github.com/juju/juju/version" + "github.com/juju/juju/watcher/watchertest" ) -func TestAll(t *stdtesting.T) { - coretesting.MgoTestPackage(t) -} - type machineUpgraderSuite struct { testing.JujuConnSuite @@ -56,28 +52,27 @@ } func (s *machineUpgraderSuite) TestSetVersionWrongMachine(c *gc.C) { - err := s.st.SetVersion("machine-42", version.Current) + err := s.st.SetVersion("machine-42", current) c.Assert(err, gc.ErrorMatches, "permission denied") c.Assert(err, jc.Satisfies, params.IsCodeUnauthorized) } func (s *machineUpgraderSuite) TestSetVersionNotMachine(c *gc.C) { - err := s.st.SetVersion("foo-42", version.Current) + err := s.st.SetVersion("foo-42", current) c.Assert(err, gc.ErrorMatches, "permission denied") c.Assert(err, jc.Satisfies, params.IsCodeUnauthorized) } func (s *machineUpgraderSuite) TestSetVersion(c *gc.C) { - cur := version.Current agentTools, err := s.rawMachine.AgentTools() c.Assert(err, jc.Satisfies, errors.IsNotFound) c.Assert(agentTools, gc.IsNil) - err = s.st.SetVersion(s.rawMachine.Tag().String(), cur) + err = s.st.SetVersion(s.rawMachine.Tag().String(), current) c.Assert(err, jc.ErrorIsNil) s.rawMachine.Refresh() agentTools, err = s.rawMachine.AgentTools() c.Assert(err, jc.ErrorIsNil) - c.Check(agentTools.Version, gc.Equals, cur) + c.Check(agentTools.Version, gc.Equals, current) } func (s *machineUpgraderSuite) TestToolsWrongMachine(c *gc.C) { @@ -95,52 +90,53 @@ } func (s *machineUpgraderSuite) TestTools(c *gc.C) { - cur := version.Current - curTools := &tools.Tools{Version: cur, URL: ""} + curTools := &tools.Tools{Version: current, URL: ""} curTools.Version.Minor++ - s.rawMachine.SetAgentVersion(cur) + s.rawMachine.SetAgentVersion(current) // Upgrader.Tools returns the *desired* set of tools, not the currently // running set. We want to be upgraded to cur.Version stateTools, err := s.st.Tools(s.rawMachine.Tag().String()) c.Assert(err, jc.ErrorIsNil) - c.Assert(stateTools.Version, gc.Equals, cur) - url := fmt.Sprintf("https://%s/environment/%s/tools/%s", - s.stateAPI.Addr(), coretesting.EnvironmentTag.Id(), cur) + c.Assert(stateTools.Version, gc.Equals, current) + url := fmt.Sprintf("https://%s/model/%s/tools/%s", + s.stateAPI.Addr(), coretesting.ModelTag.Id(), current) c.Assert(stateTools.URL, gc.Equals, url) } func (s *machineUpgraderSuite) TestWatchAPIVersion(c *gc.C) { w, err := s.st.WatchAPIVersion(s.rawMachine.Tag().String()) c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, w) - wc := statetesting.NewNotifyWatcherC(c, s.BackingState, w) + wc := watchertest.NewNotifyWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() + // Initial event wc.AssertOneChange() + + // One change noticing the new version vers := version.MustParse("10.20.34") err = statetesting.SetAgentVersion(s.BackingState, vers) c.Assert(err, jc.ErrorIsNil) - // One change noticing the new version wc.AssertOneChange() + // Setting the version to the same value doesn't trigger a change err = statetesting.SetAgentVersion(s.BackingState, vers) c.Assert(err, jc.ErrorIsNil) wc.AssertNoChange() + + // Another change noticing another new version vers = version.MustParse("10.20.35") err = statetesting.SetAgentVersion(s.BackingState, vers) c.Assert(err, jc.ErrorIsNil) wc.AssertOneChange() - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *machineUpgraderSuite) TestDesiredVersion(c *gc.C) { - cur := version.Current - curTools := &tools.Tools{Version: cur, URL: ""} + curTools := &tools.Tools{Version: current, URL: ""} curTools.Version.Minor++ - s.rawMachine.SetAgentVersion(cur) + s.rawMachine.SetAgentVersion(current) // Upgrader.DesiredVersion returns the *desired* set of tools, not the // currently running set. We want to be upgraded to cur.Version stateVersion, err := s.st.DesiredVersion(s.rawMachine.Tag().String()) c.Assert(err, jc.ErrorIsNil) - c.Assert(stateVersion, gc.Equals, cur.Number) + c.Assert(stateVersion, gc.Equals, current.Number) } === modified file 'src/github.com/juju/juju/api/usermanager/client.go' --- src/github.com/juju/juju/api/usermanager/client.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/usermanager/client.go 2016-03-22 15:18:22 +0000 @@ -31,40 +31,49 @@ return &Client{ClientFacade: frontend, facade: backend} } -// AddUser creates a new local user in the juju server. -func (c *Client) AddUser(username, displayName, password string) (names.UserTag, error) { +// AddUser creates a new local user in the controller, sharing with that user any specified models. +func (c *Client) AddUser( + username, displayName, password string, modelUUIDs ...string, +) (_ names.UserTag, secretKey []byte, _ error) { if !names.IsValidUser(username) { - return names.UserTag{}, fmt.Errorf("invalid user name %q", username) + return names.UserTag{}, nil, fmt.Errorf("invalid user name %q", username) + } + modelTags := make([]string, len(modelUUIDs)) + for i, uuid := range modelUUIDs { + modelTags[i] = names.NewModelTag(uuid).String() } userArgs := params.AddUsers{ - Users: []params.AddUser{{Username: username, DisplayName: displayName, Password: password}}, + Users: []params.AddUser{{ + Username: username, + DisplayName: displayName, + Password: password, + SharedModelTags: modelTags}}, } var results params.AddUserResults err := c.facade.FacadeCall("AddUser", userArgs, &results) if err != nil { - return names.UserTag{}, errors.Trace(err) + return names.UserTag{}, nil, errors.Trace(err) } if count := len(results.Results); count != 1 { logger.Errorf("expected 1 result, got %#v", results) - return names.UserTag{}, errors.Errorf("expected 1 result, got %d", count) + return names.UserTag{}, nil, errors.Errorf("expected 1 result, got %d", count) } result := results.Results[0] if result.Error != nil { - return names.UserTag{}, errors.Trace(result.Error) + return names.UserTag{}, nil, errors.Trace(result.Error) } tag, err := names.ParseUserTag(result.Tag) if err != nil { - return names.UserTag{}, errors.Trace(err) + return names.UserTag{}, nil, errors.Trace(err) } - logger.Infof("created user %s", result.Tag) - return tag, nil + return tag, result.SecretKey, nil } func (c *Client) userCall(username string, methodCall string) error { - if !names.IsValidUserName(username) { + if !names.IsValidUser(username) { return errors.Errorf("%q is not a valid username", username) } - tag := names.NewLocalUserTag(username) + tag := names.NewUserTag(username) var results params.ErrorResults args := params.Entities{ @@ -108,10 +117,10 @@ var results params.UserInfoResults var entities []params.Entity for _, username := range usernames { - if !names.IsValidUserName(username) { + if !names.IsValidUser(username) { return nil, errors.Errorf("%q is not a valid username", username) } - tag := names.NewLocalUserTag(username) + tag := names.NewUserTag(username) entities = append(entities, params.Entity{Tag: tag.String()}) } args := params.UserInfoRequest{ @@ -150,10 +159,10 @@ // SetPassword changes the password for the specified user. func (c *Client) SetPassword(username, password string) error { - if !names.IsValidUserName(username) { + if !names.IsValidUser(username) { return errors.Errorf("%q is not a valid username", username) } - tag := names.NewLocalUserTag(username) + tag := names.NewUserTag(username) args := params.EntityPasswords{ Changes: []params.EntityPassword{{ Tag: tag.String(), === modified file 'src/github.com/juju/juju/api/usermanager/client_test.go' --- src/github.com/juju/juju/api/usermanager/client_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/api/usermanager/client_test.go 2016-03-22 15:18:22 +0000 @@ -5,6 +5,7 @@ import ( "github.com/juju/errors" + "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -29,7 +30,7 @@ } func (s *usermanagerSuite) TestAddUser(c *gc.C) { - tag, err := s.usermanager.AddUser("foobar", "Foo Bar", "password") + tag, _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password") c.Assert(err, jc.ErrorIsNil) user, err := s.State.User(tag) @@ -39,10 +40,33 @@ c.Assert(user.PasswordValid("password"), jc.IsTrue) } +func (s *usermanagerSuite) TestAddUserWithSharedModel(c *gc.C) { + sharedModelState := s.Factory.MakeModel(c, nil) + defer sharedModelState.Close() + + tag, _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password", sharedModelState.ModelUUID()) + c.Assert(err, jc.ErrorIsNil) + + // Check model is shared with expected users. + sharedModel, err := sharedModelState.Model() + c.Assert(err, jc.ErrorIsNil) + users, err := sharedModel.Users() + c.Assert(err, jc.ErrorIsNil) + c.Assert(users, gc.HasLen, 2) + var modelUserTags = make([]names.UserTag, len(users)) + for i, u := range users { + modelUserTags[i] = u.UserTag() + } + c.Assert(modelUserTags, jc.SameContents, []names.UserTag{ + tag, + names.NewLocalUserTag("admin"), + }) +} + func (s *usermanagerSuite) TestAddExistingUser(c *gc.C) { s.Factory.MakeUser(c, &factory.UserParams{Name: "foobar"}) - _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password") + _, _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password") c.Assert(err, gc.ErrorMatches, "failed to create user: user already exists") } @@ -52,7 +76,7 @@ return errors.New("call error") }, ) - _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password") + _, _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password") c.Assert(err, gc.ErrorMatches, "call error") } @@ -66,7 +90,7 @@ return errors.New("wrong result type") }, ) - _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password") + _, _, err := s.usermanager.AddUser("foobar", "Foo Bar", "password") c.Assert(err, gc.ErrorMatches, "expected 1 result, got 2") } @@ -82,8 +106,8 @@ } func (s *usermanagerSuite) TestDisableUserBadName(c *gc.C) { - err := s.usermanager.DisableUser("not@home") - c.Assert(err, gc.ErrorMatches, `"not@home" is not a valid username`) + err := s.usermanager.DisableUser("not!good") + c.Assert(err, gc.ErrorMatches, `"not!good" is not a valid username`) } func (s *usermanagerSuite) TestEnableUser(c *gc.C) { @@ -98,13 +122,13 @@ } func (s *usermanagerSuite) TestEnableUserBadName(c *gc.C) { - err := s.usermanager.EnableUser("not@home") - c.Assert(err, gc.ErrorMatches, `"not@home" is not a valid username`) + err := s.usermanager.EnableUser("not!good") + c.Assert(err, gc.ErrorMatches, `"not!good" is not a valid username`) } func (s *usermanagerSuite) TestCantRemoveAdminUser(c *gc.C) { err := s.usermanager.DisableUser(s.AdminUserTag(c).Name()) - c.Assert(err, gc.ErrorMatches, "failed to disable user: cannot disable state server environment owner") + c.Assert(err, gc.ErrorMatches, "failed to disable user: cannot disable controller model owner") } func (s *usermanagerSuite) TestUserInfo(c *gc.C) { @@ -173,7 +197,16 @@ c.Assert(user.PasswordValid("new-password"), jc.IsTrue) } +func (s *usermanagerSuite) TestSetUserPasswordCanonical(c *gc.C) { + tag := s.AdminUserTag(c) + err := s.usermanager.SetPassword(tag.Canonical(), "new-password") + c.Assert(err, jc.ErrorIsNil) + user, err := s.State.User(tag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(user.PasswordValid("new-password"), jc.IsTrue) +} + func (s *usermanagerSuite) TestSetUserPasswordBadName(c *gc.C) { - err := s.usermanager.SetPassword("not@home", "new-password") - c.Assert(err, gc.ErrorMatches, `"not@home" is not a valid username`) + err := s.usermanager.SetPassword("not!good", "new-password") + c.Assert(err, gc.ErrorMatches, `"not!good" is not a valid username`) } === removed file 'src/github.com/juju/juju/api/watcher/interfaces.go' --- src/github.com/juju/juju/api/watcher/interfaces.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/watcher/interfaces.go 1970-01-01 00:00:00 +0000 @@ -1,50 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package watcher - -import ( - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/state/multiwatcher" -) - -// NotifyWatcher will send events when something changes. -// It does not send content for those changes. -type NotifyWatcher interface { - Changes() <-chan struct{} - Stop() error - Err() error -} - -// StringsWatcher will send events when something changes. -// The content for the changes is a list of strings. -type StringsWatcher interface { - Changes() <-chan []string - Stop() error - Err() error -} - -// EntityWatcher will send events when something changes. -// The content for the changes is a list of tag strings. -type EntityWatcher interface { - Changes() <-chan []string - Stop() error - Err() error -} - -// RelationUnitsWatcher will send events when something changes. -// The content for the changes is a params.RelationUnitsChange struct. -type RelationUnitsWatcher interface { - Changes() <-chan multiwatcher.RelationUnitsChange - Stop() error - Err() error -} - -// MachineStorageIdsWatcher will send events when the lifecycle states -// of machine/storage entities change. The content for the changes is a -// list of params.MachineStorageId. -type MachineStorageIdsWatcher interface { - Changes() <-chan []params.MachineStorageId - Stop() error - Err() error -} === added file 'src/github.com/juju/juju/api/watcher/package_test.go' --- src/github.com/juju/juju/api/watcher/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/api/watcher/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package watcher_test + +import ( + stdtesting "testing" + + coretesting "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + coretesting.MgoTestPackage(t) +} === modified file 'src/github.com/juju/juju/api/watcher/watcher.go' --- src/github.com/juju/juju/api/watcher/watcher.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/watcher/watcher.go 2016-03-22 15:18:22 +0000 @@ -11,7 +11,7 @@ "github.com/juju/juju/api/base" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/watcher" ) var logger = loggo.GetLogger("juju.api.watcher") @@ -119,15 +119,16 @@ wg.Wait() } -func (w *commonWatcher) Stop() error { +// Kill is part of the worker.Worker interface. +func (w *commonWatcher) Kill() { w.tomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (w *commonWatcher) Wait() error { return w.tomb.Wait() } -func (w *commonWatcher) Err() error { - return w.tomb.Err() -} - // notifyWatcher will send events when something changes. // It does not send content for those changes. type notifyWatcher struct { @@ -139,7 +140,7 @@ // If an API call returns a NotifyWatchResult, you can use this to turn it into // a local Watcher. -func NewNotifyWatcher(caller base.APICaller, result params.NotifyWatchResult) NotifyWatcher { +func NewNotifyWatcher(caller base.APICaller, result params.NotifyWatchResult) watcher.NotifyWatcher { w := ¬ifyWatcher{ caller: caller, notifyWatcherId: result.NotifyWatcherId, @@ -147,7 +148,6 @@ } go func() { defer w.tomb.Done() - defer close(w.out) w.tomb.Kill(w.loop()) }() return w @@ -178,7 +178,7 @@ // Changes returns a channel that receives a value when a given entity // changes in some way. -func (w *notifyWatcher) Changes() <-chan struct{} { +func (w *notifyWatcher) Changes() watcher.NotifyChannel { return w.out } @@ -191,7 +191,7 @@ out chan []string } -func NewStringsWatcher(caller base.APICaller, result params.StringsWatchResult) StringsWatcher { +func NewStringsWatcher(caller base.APICaller, result params.StringsWatchResult) watcher.StringsWatcher { w := &stringsWatcher{ caller: caller, stringsWatcherId: result.StringsWatcherId, @@ -199,7 +199,6 @@ } go func() { defer w.tomb.Done() - defer close(w.out) w.tomb.Kill(w.loop(result.Changes)) }() return w @@ -232,7 +231,7 @@ // Changes returns a channel that receives a list of strings of watched // entites with changes. -func (w *stringsWatcher) Changes() <-chan []string { +func (w *stringsWatcher) Changes() watcher.StringsChannel { return w.out } @@ -243,25 +242,39 @@ commonWatcher caller base.APICaller relationUnitsWatcherId string - out chan multiwatcher.RelationUnitsChange + out chan watcher.RelationUnitsChange } -func NewRelationUnitsWatcher(caller base.APICaller, result params.RelationUnitsWatchResult) RelationUnitsWatcher { +func NewRelationUnitsWatcher(caller base.APICaller, result params.RelationUnitsWatchResult) watcher.RelationUnitsWatcher { w := &relationUnitsWatcher{ caller: caller, relationUnitsWatcherId: result.RelationUnitsWatcherId, - out: make(chan multiwatcher.RelationUnitsChange), + out: make(chan watcher.RelationUnitsChange), } go func() { defer w.tomb.Done() - defer close(w.out) w.tomb.Kill(w.loop(result.Changes)) }() return w } -func (w *relationUnitsWatcher) loop(initialChanges multiwatcher.RelationUnitsChange) error { - changes := initialChanges +func copyRelationUnitsChanged(src params.RelationUnitsChange) watcher.RelationUnitsChange { + dst := watcher.RelationUnitsChange{ + Departed: src.Departed, + } + if src.Changed != nil { + dst.Changed = make(map[string]watcher.UnitSettings) + for name, unitSettings := range src.Changed { + dst.Changed[name] = watcher.UnitSettings{ + Version: unitSettings.Version, + } + } + } + return dst +} + +func (w *relationUnitsWatcher) loop(initialChanges params.RelationUnitsChange) error { + changes := copyRelationUnitsChanged(initialChanges) w.newResult = func() interface{} { return new(params.RelationUnitsWatchResult) } w.call = makeWatcherAPICaller(w.caller, "RelationUnitsWatcher", w.relationUnitsWatcherId) w.commonWatcher.init() @@ -281,14 +294,14 @@ // at this point, so just return. return nil } - changes = data.(*params.RelationUnitsWatchResult).Changes + changes = copyRelationUnitsChanged(data.(*params.RelationUnitsWatchResult).Changes) } } // Changes returns a channel that will receive the changes to // counterpart units in a relation. The first event on the channel // holds the initial state of the relation in its Changed field. -func (w *relationUnitsWatcher) Changes() <-chan multiwatcher.RelationUnitsChange { +func (w *relationUnitsWatcher) Changes() watcher.RelationUnitsChannel { return w.out } @@ -299,39 +312,49 @@ commonWatcher caller base.APICaller machineAttachmentsWatcherId string - out chan []params.MachineStorageId + out chan []watcher.MachineStorageId } // NewVolumeAttachmentsWatcher returns a MachineStorageIdsWatcher which // communicates with the VolumeAttachmentsWatcher API facade to watch // volume attachments. -func NewVolumeAttachmentsWatcher(caller base.APICaller, result params.MachineStorageIdsWatchResult) MachineStorageIdsWatcher { +func NewVolumeAttachmentsWatcher(caller base.APICaller, result params.MachineStorageIdsWatchResult) watcher.MachineStorageIdsWatcher { return newMachineStorageIdsWatcher("VolumeAttachmentsWatcher", caller, result) } // NewFilesystemAttachmentsWatcher returns a MachineStorageIdsWatcher which // communicates with the FilesystemAttachmentsWatcher API facade to watch // filesystem attachments. -func NewFilesystemAttachmentsWatcher(caller base.APICaller, result params.MachineStorageIdsWatchResult) MachineStorageIdsWatcher { +func NewFilesystemAttachmentsWatcher(caller base.APICaller, result params.MachineStorageIdsWatchResult) watcher.MachineStorageIdsWatcher { return newMachineStorageIdsWatcher("FilesystemAttachmentsWatcher", caller, result) } -func newMachineStorageIdsWatcher(facade string, caller base.APICaller, result params.MachineStorageIdsWatchResult) MachineStorageIdsWatcher { +func newMachineStorageIdsWatcher(facade string, caller base.APICaller, result params.MachineStorageIdsWatchResult) watcher.MachineStorageIdsWatcher { w := &machineAttachmentsWatcher{ caller: caller, machineAttachmentsWatcherId: result.MachineStorageIdsWatcherId, - out: make(chan []params.MachineStorageId), + out: make(chan []watcher.MachineStorageId), } go func() { defer w.tomb.Done() - defer close(w.out) w.tomb.Kill(w.loop(facade, result.Changes)) }() return w } +func copyMachineStorageIds(src []params.MachineStorageId) []watcher.MachineStorageId { + dst := make([]watcher.MachineStorageId, len(src)) + for i, msi := range src { + dst[i] = watcher.MachineStorageId{ + MachineTag: msi.MachineTag, + AttachmentTag: msi.AttachmentTag, + } + } + return dst +} + func (w *machineAttachmentsWatcher) loop(facade string, initialChanges []params.MachineStorageId) error { - changes := initialChanges + changes := copyMachineStorageIds(initialChanges) w.newResult = func() interface{} { return new(params.MachineStorageIdsWatchResult) } w.call = makeWatcherAPICaller(w.caller, facade, w.machineAttachmentsWatcherId) w.commonWatcher.init() @@ -351,43 +374,42 @@ // at this point, so just return. return nil } - changes = data.(*params.MachineStorageIdsWatchResult).Changes + changes = copyMachineStorageIds(data.(*params.MachineStorageIdsWatchResult).Changes) } } // Changes returns a channel that will receive the IDs of machine // storage entity attachments which have changed. -func (w *machineAttachmentsWatcher) Changes() <-chan []params.MachineStorageId { +func (w *machineAttachmentsWatcher) Changes() watcher.MachineStorageIdsChannel { return w.out } -// EntityWatcher will send events when something changes. +// EntitiesWatcher will send events when something changes. // The content for the changes is a list of tag strings. -type entityWatcher struct { +type entitiesWatcher struct { commonWatcher - caller base.APICaller - entityWatcherId string - out chan []string + caller base.APICaller + entitiesWatcherId string + out chan []string } -func NewEntityWatcher(caller base.APICaller, result params.EntityWatchResult) EntityWatcher { - w := &entityWatcher{ - caller: caller, - entityWatcherId: result.EntityWatcherId, - out: make(chan []string), +func NewEntitiesWatcher(caller base.APICaller, result params.EntitiesWatchResult) watcher.EntitiesWatcher { + w := &entitiesWatcher{ + caller: caller, + entitiesWatcherId: result.EntitiesWatcherId, + out: make(chan []string), } go func() { defer w.tomb.Done() - defer close(w.out) w.tomb.Kill(w.loop(result.Changes)) }() return w } -func (w *entityWatcher) loop(initialChanges []string) error { +func (w *entitiesWatcher) loop(initialChanges []string) error { changes := initialChanges - w.newResult = func() interface{} { return new(params.EntityWatchResult) } - w.call = makeWatcherAPICaller(w.caller, "EntityWatcher", w.entityWatcherId) + w.newResult = func() interface{} { return new(params.EntitiesWatchResult) } + w.call = makeWatcherAPICaller(w.caller, "EntityWatcher", w.entitiesWatcherId) w.commonWatcher.init() go w.commonLoop() @@ -406,13 +428,13 @@ return nil } // Changes have been transformed at the server side already. - changes = data.(*params.EntityWatchResult).Changes + changes = data.(*params.EntitiesWatchResult).Changes } } // Changes returns a channel that receives a list of changes // as tags (converted to strings) of the watched entities // with changes. -func (w *entityWatcher) Changes() <-chan []string { +func (w *entitiesWatcher) Changes() watcher.StringsChannel { return w.out } === modified file 'src/github.com/juju/juju/api/watcher/watcher_test.go' --- src/github.com/juju/juju/api/watcher/watcher_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/api/watcher/watcher_test.go 2016-03-22 15:18:22 +0000 @@ -4,7 +4,6 @@ package watcher_test import ( - stdtesting "testing" "time" jc "github.com/juju/testing/checkers" @@ -15,18 +14,15 @@ "github.com/juju/juju/apiserver/params" "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" "github.com/juju/juju/storage" "github.com/juju/juju/storage/provider/dummy" "github.com/juju/juju/storage/provider/registry" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" + corewatcher "github.com/juju/juju/watcher" + "github.com/juju/juju/watcher/watchertest" ) -func TestAll(t *stdtesting.T) { - coretesting.MgoTestPackage(t) -} - type watcherSuite struct { testing.JujuConnSuite @@ -41,7 +37,7 @@ func (s *watcherSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) - s.stateAPI, s.rawMachine = s.OpenAPIAsNewMachine(c, state.JobManageEnviron, state.JobHostUnits) + s.stateAPI, s.rawMachine = s.OpenAPIAsNewMachine(c, state.JobManageModel, state.JobHostUnits) } func (s *watcherSuite) TestWatchInitialEventConsumed(c *gc.C) { @@ -79,12 +75,10 @@ result := results.Results[0] c.Assert(result.Error, gc.IsNil) - // params.NotifyWatcher conforms to the state.NotifyWatcher interface w := watcher.NewNotifyWatcher(s.stateAPI, result) - wc := statetesting.NewNotifyWatcherC(c, s.State, w) + wc := watchertest.NewNotifyWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() wc.AssertOneChange() - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *watcherSuite) TestNotifyWatcherStopsWithPendingSend(c *gc.C) { @@ -96,13 +90,10 @@ result := results.Results[0] c.Assert(result.Error, gc.IsNil) - // params.NotifyWatcher conforms to the state.NotifyWatcher interface + // params.NotifyWatcher conforms to the watcher.NotifyWatcher interface w := watcher.NewNotifyWatcher(s.stateAPI, result) - wc := statetesting.NewNotifyWatcherC(c, s.State, w) - - // Now, without reading any changes try stopping the watcher. - statetesting.AssertCanStopWhenSending(c, w) - wc.AssertClosed() + wc := watchertest.NewNotifyWatcherC(c, w, s.BackingState.StartSync) + wc.AssertStops() } func (s *watcherSuite) TestWatchUnitsKeepsEvents(c *gc.C) { @@ -136,7 +127,9 @@ // Start a StringsWatcher and check the initial event. w := watcher.NewStringsWatcher(s.stateAPI, result) - wc := statetesting.NewStringsWatcherC(c, s.State, w) + wc := watchertest.NewStringsWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() + wc.AssertChange("mysql/0", "logging/0") wc.AssertNoChange() @@ -157,9 +150,6 @@ wc.AssertChange("logging/0") wc.AssertChange("mysql/0") wc.AssertNoChange() - - statetesting.AssertStop(c, w) - wc.AssertClosed() } func (s *watcherSuite) TestStringsWatcherStopsWithPendingSend(c *gc.C) { @@ -174,7 +164,8 @@ // Start a StringsWatcher and check the initial event. w := watcher.NewStringsWatcher(s.stateAPI, result) - wc := statetesting.NewStringsWatcherC(c, s.State, w) + wc := watchertest.NewStringsWatcherC(c, w, s.BackingState.StartSync) + defer wc.AssertStops() // Create a service, deploy a unit of it on the machine. mysql := s.AddTestingService(c, "mysql", s.AddTestingCharm(c, "mysql")) @@ -182,14 +173,9 @@ c.Assert(err, jc.ErrorIsNil) err = principal.AssignToMachine(s.rawMachine) c.Assert(err, jc.ErrorIsNil) - - // Ensure the initial event is delivered. Then test the watcher - // can be stopped cleanly without reading the pending change. - s.BackingState.StartSync() - statetesting.AssertCanStopWhenSending(c, w) - wc.AssertClosed() } +// TODO(fwereade): 2015-11-18 lp:1517391 func (s *watcherSuite) TestWatchMachineStorage(c *gc.C) { registry.RegisterProvider( "envscoped", @@ -212,7 +198,7 @@ var results params.MachineStorageIdsWatchResults args := params.Entities{Entities: []params.Entity{{ - Tag: s.State.EnvironTag().String(), + Tag: s.State.ModelTag().String(), }}} err := s.stateAPI.APICall( "StorageProvisioner", @@ -224,27 +210,49 @@ c.Assert(result.Error, gc.IsNil) w := watcher.NewVolumeAttachmentsWatcher(s.stateAPI, result) + defer func() { + + // Check we can stop the watcher... + w.Kill() + wait := make(chan error) + go func() { + wait <- w.Wait() + }() + select { + case err := <-wait: + c.Assert(err, jc.ErrorIsNil) + case <-time.After(coretesting.LongWait): + c.Fatalf("watcher never stopped") + } + + // ...and that its channel hasn't been closed. + s.BackingState.StartSync() + select { + case change, ok := <-w.Changes(): + c.Fatalf("watcher sent unexpected change: (%#v, %v)", change, ok) + default: + } + + }() + + // Check initial event; + s.BackingState.StartSync() select { case changes, ok := <-w.Changes(): c.Assert(ok, jc.IsTrue) - c.Assert(changes, jc.SameContents, []params.MachineStorageId{{ + c.Assert(changes, jc.SameContents, []corewatcher.MachineStorageId{{ MachineTag: "machine-1", AttachmentTag: "volume-0", }}) case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for change") } + + // check no subsequent event. + s.BackingState.StartSync() select { case <-w.Changes(): c.Fatalf("received unexpected change") case <-time.After(coretesting.ShortWait): } - - statetesting.AssertStop(c, w) - select { - case _, ok := <-w.Changes(): - c.Assert(ok, jc.IsFalse) - case <-time.After(coretesting.LongWait): - c.Fatalf("timed out waiting for watcher channel to be closed") - } } === modified file 'src/github.com/juju/juju/apiserver/action/action.go' --- src/github.com/juju/juju/apiserver/action/action.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/action/action.go 2016-03-22 15:18:22 +0000 @@ -15,7 +15,7 @@ var logger = loggo.GetLogger("juju.apiserver.action") func init() { - common.RegisterStandardFacade("Action", 0, NewActionAPI) + common.RegisterStandardFacade("Action", 1, NewActionAPI) } // ActionAPI implements the client API for interacting with Actions === modified file 'src/github.com/juju/juju/apiserver/action/action_test.go' --- src/github.com/juju/juju/apiserver/action/action_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/action/action_test.go 2016-03-22 15:18:22 +0000 @@ -10,7 +10,7 @@ "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/apiserver/action" "github.com/juju/juju/apiserver/common" @@ -75,7 +75,7 @@ }) s.machine0 = factory.MakeMachine(c, &jujuFactory.MachineParams{ Series: "quantal", - Jobs: []state.MachineJob{state.JobHostUnits, state.JobManageEnviron}, + Jobs: []state.MachineJob{state.JobHostUnits, state.JobManageModel}, }) s.wordpressUnit = factory.MakeUnit(c, &jujuFactory.UnitParams{ Service: s.wordpress, === modified file 'src/github.com/juju/juju/apiserver/addresser/addresser.go' --- src/github.com/juju/juju/apiserver/addresser/addresser.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/addresser/addresser.go 2016-03-22 15:18:22 +0000 @@ -17,7 +17,7 @@ ) func init() { - common.RegisterStandardFacade("Addresser", 1, NewAddresserAPI) + common.RegisterStandardFacade("Addresser", 2, NewAddresserAPI) } var logger = loggo.GetLogger("juju.apiserver.addresser") @@ -35,9 +35,9 @@ resources *common.Resources, authorizer common.Authorizer, ) (*AddresserAPI, error) { - isEnvironManager := authorizer.AuthEnvironManager() - if !isEnvironManager { - // Addresser must run as environment manager. + isModelManager := authorizer.AuthModelManager() + if !isModelManager { + // Addresser must run as model manager. return nil, common.ErrPerm } sti := getState(st) @@ -51,13 +51,13 @@ // getNetworkingEnviron checks if the environment implements NetworkingEnviron // and also if it supports IP address allocation. func (api *AddresserAPI) getNetworkingEnviron() (environs.NetworkingEnviron, bool, error) { - config, err := api.st.EnvironConfig() + config, err := api.st.ModelConfig() if err != nil { - return nil, false, errors.Annotate(err, "getting environment config") + return nil, false, errors.Annotate(err, "getting model config") } env, err := environs.New(config) if err != nil { - return nil, false, errors.Annotate(err, "validating environment config") + return nil, false, errors.Annotate(err, "validating model config") } netEnv, ok := environs.SupportsNetworking(env) if !ok { @@ -132,8 +132,8 @@ // netEnvReleaseAddress is used for testability. var netEnvReleaseAddress = func(env environs.NetworkingEnviron, - instId instance.Id, subnetId network.Id, addr network.Address, macAddress string) error { - return env.ReleaseAddress(instId, subnetId, addr, macAddress) + instId instance.Id, subnetId network.Id, addr network.Address, macAddress, hostname string) error { + return env.ReleaseAddress(instId, subnetId, addr, macAddress, hostname) } // releaseIPAddress releases one IP address. @@ -146,7 +146,7 @@ } // Now release the IP address. subnetId := network.Id(ipAddress.SubnetId()) - err = netEnvReleaseAddress(netEnv, ipAddress.InstanceId(), subnetId, ipAddress.Address(), ipAddress.MACAddress()) + err = netEnvReleaseAddress(netEnv, ipAddress.InstanceId(), subnetId, ipAddress.Address(), ipAddress.MACAddress(), "") if err != nil { return errors.Trace(err) } @@ -154,18 +154,18 @@ } // WatchIPAddresses observes changes to the IP addresses. -func (api *AddresserAPI) WatchIPAddresses() (params.EntityWatchResult, error) { +func (api *AddresserAPI) WatchIPAddresses() (params.EntitiesWatchResult, error) { watch := &ipAddressesWatcher{api.st.WatchIPAddresses(), api.st} if changes, ok := <-watch.Changes(); ok { mappedChanges, err := watch.MapChanges(changes) if err != nil { - return params.EntityWatchResult{}, errors.Trace(err) + return params.EntitiesWatchResult{}, errors.Trace(err) } - return params.EntityWatchResult{ - EntityWatcherId: api.resources.Register(watch), - Changes: mappedChanges, + return params.EntitiesWatchResult{ + EntitiesWatcherId: api.resources.Register(watch), + Changes: mappedChanges, }, nil } - return params.EntityWatchResult{}, watcher.EnsureErr(watch) + return params.EntitiesWatchResult{}, watcher.EnsureErr(watch) } === modified file 'src/github.com/juju/juju/apiserver/addresser/addresser_test.go' --- src/github.com/juju/juju/apiserver/addresser/addresser_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/addresser/addresser_test.go 2016-03-22 15:18:22 +0000 @@ -5,6 +5,7 @@ import ( "github.com/juju/errors" + jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -12,12 +13,13 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" apiservertesting "github.com/juju/juju/apiserver/testing" - "github.com/juju/juju/cmd/envcmd" + "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/environs/configstore" "github.com/juju/juju/feature" "github.com/juju/juju/instance" + "github.com/juju/juju/jujuclient/jujuclienttesting" "github.com/juju/juju/network" "github.com/juju/juju/provider/dummy" "github.com/juju/juju/state" @@ -94,7 +96,7 @@ s.st.stub.SetErrors(errors.New("ouch")) result := s.api.CanDeallocateAddresses() - c.Assert(result.Error, gc.ErrorMatches, "getting environment config: ouch") + c.Assert(result.Error, gc.ErrorMatches, "getting model config: ouch") c.Assert(result.Result, jc.IsFalse) } @@ -103,7 +105,7 @@ s.st.setConfig(c, config) result := s.api.CanDeallocateAddresses() - c.Assert(result.Error, gc.ErrorMatches, `validating environment config: no registered provider for "nonex"`) + c.Assert(result.Error, gc.ErrorMatches, `validating model config: no registered provider for "nonex"`) c.Assert(result.Result, jc.IsFalse) } @@ -152,13 +154,19 @@ // Prepare tests. called := 0 - s.PatchValue(addresser.NetEnvReleaseAddress, func(env environs.NetworkingEnviron, - instId instance.Id, subnetId network.Id, addr network.Address, macAddress string) error { + s.PatchValue(addresser.NetEnvReleaseAddress, func( + env environs.NetworkingEnviron, + instId instance.Id, + subnetId network.Id, + addr network.Address, + macAddress, hostname string, + ) error { called++ c.Assert(instId, gc.Equals, instance.Id("a3")) c.Assert(subnetId, gc.Equals, network.Id("a")) c.Assert(addr, gc.Equals, network.NewAddress("0.1.2.3")) c.Assert(macAddress, gc.Equals, "fff3") + c.Assert(hostname, gc.Equals, "") return nil }) @@ -191,7 +199,7 @@ // First action is getting the environment configuration, // so the injected error is returned here. apiErr := s.api.CleanupIPAddresses() - c.Assert(apiErr.Error, gc.ErrorMatches, "getting environment config: ouch") + c.Assert(apiErr.Error, gc.ErrorMatches, "getting model config: ouch") // Still has two dead addresses. dead, err = s.st.DeadIPAddresses() @@ -209,7 +217,7 @@ // Validation of configuration fails due to illegal provider. apiErr := s.api.CleanupIPAddresses() - c.Assert(apiErr.Error, gc.ErrorMatches, `validating environment config: no registered provider for "nonex"`) + c.Assert(apiErr.Error, gc.ErrorMatches, `validating model config: no registered provider for "nonex"`) // Still has two dead addresses. dead, err = s.st.DeadIPAddresses() @@ -242,8 +250,8 @@ result, err := s.api.WatchIPAddresses() c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.EntityWatchResult{ - EntityWatcherId: "1", + c.Assert(result, gc.DeepEquals, params.EntitiesWatchResult{ + EntitiesWatcherId: "1", Changes: []string{ "ipaddress-00000000-1111-2222-3333-0123456789ab", "ipaddress-00000000-1111-2222-4444-0123456789ab", @@ -268,7 +276,11 @@ func testingEnvConfig(c *gc.C) *config.Config { cfg, err := config.New(config.NoDefaults, dummy.SampleConfig()) c.Assert(err, jc.ErrorIsNil) - env, err := environs.Prepare(cfg, envcmd.BootstrapContext(coretesting.Context(c)), configstore.NewMem()) + env, err := environs.Prepare( + modelcmd.BootstrapContext(coretesting.Context(c)), configstore.NewMem(), + jujuclienttesting.NewMemStore(), + "dummycontroller", environs.PrepareForBootstrapParams{Config: cfg}, + ) c.Assert(err, jc.ErrorIsNil) return env.Config() } @@ -289,7 +301,11 @@ func mockTestingEnvConfig(c *gc.C) *config.Config { cfg, err := config.New(config.NoDefaults, mockConfig()) c.Assert(err, jc.ErrorIsNil) - env, err := environs.Prepare(cfg, envcmd.BootstrapContext(coretesting.Context(c)), configstore.NewMem()) + env, err := environs.Prepare( + modelcmd.BootstrapContext(coretesting.Context(c)), configstore.NewMem(), + jujuclienttesting.NewMemStore(), + "dummycontroller", environs.PrepareForBootstrapParams{Config: cfg}, + ) c.Assert(err, jc.ErrorIsNil) return env.Config() } === modified file 'src/github.com/juju/juju/apiserver/addresser/mock_test.go' --- src/github.com/juju/juju/apiserver/addresser/mock_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/addresser/mock_test.go 2016-03-22 15:18:22 +0000 @@ -7,12 +7,11 @@ "sort" "sync" - gc "gopkg.in/check.v1" - "github.com/juju/errors" "github.com/juju/names" "github.com/juju/testing" jujutxn "github.com/juju/txn" + gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/addresser" "github.com/juju/juju/environs" @@ -80,12 +79,12 @@ var _ addresser.StateInterface = (*mockState)(nil) -// EnvironConfig implements StateInterface. -func (mst *mockState) EnvironConfig() (*config.Config, error) { +// ModelConfig implements StateInterface. +func (mst *mockState) ModelConfig() (*config.Config, error) { mst.mu.Lock() defer mst.mu.Unlock() - mst.stub.MethodCall(mst, "EnvironConfig") + mst.stub.MethodCall(mst, "ModelConfig") if err := mst.stub.NextErr(); err != nil { return nil, err @@ -391,7 +390,7 @@ environs.EnvironProvider } -func (p mockEnvironProvider) PrepareForBootstrap(ctx environs.BootstrapContext, cfg *config.Config) (environs.Environ, error) { +func (p mockEnvironProvider) PrepareForBootstrap(environs.BootstrapContext, environs.PrepareForBootstrapParams) (environs.Environ, error) { return &mockEnviron{}, nil } === modified file 'src/github.com/juju/juju/apiserver/addresser/state.go' --- src/github.com/juju/juju/apiserver/addresser/state.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/addresser/state.go 2016-03-22 15:18:22 +0000 @@ -28,8 +28,8 @@ // StateInterface defines the needed methods of state.State // for the work of the Addresser API. type StateInterface interface { - // EnvironConfig retrieves the environment configuration. - EnvironConfig() (*config.Config, error) + // ModelConfig retrieves the model configuration. + ModelConfig() (*config.Config, error) // DeadIPAddresses retrieves all dead IP addresses. DeadIPAddresses() ([]StateIPAddress, error) === modified file 'src/github.com/juju/juju/apiserver/admin.go' --- src/github.com/juju/juju/apiserver/admin.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/admin.go 2016-03-22 15:18:22 +0000 @@ -5,6 +5,7 @@ import ( "sync" + "sync/atomic" "time" "github.com/juju/errors" @@ -14,6 +15,7 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/rpc" + "github.com/juju/juju/rpc/rpcreflect" "github.com/juju/juju/state" "github.com/juju/juju/state/presence" "github.com/juju/juju/version" @@ -64,7 +66,7 @@ case nil: // in this case no need to wrap authed api so we do nothing default: - return fail, err + return fail, errors.Trace(err) } } @@ -82,10 +84,18 @@ isUser = true } - serverOnlyLogin := loginVersion > 1 && a.root.envUUID == "" + serverOnlyLogin := a.root.modelUUID == "" - entity, lastConnection, err := doCheckCreds(a.root.state, req, !serverOnlyLogin) + entity, lastConnection, err := doCheckCreds(a.root.state, req, !serverOnlyLogin, a.srv.authCtxt) if err != nil { + if err, ok := errors.Cause(err).(*common.DischargeRequiredError); ok { + loginResult := params.LoginResultV1{ + DischargeRequired: err.Macaroon, + DischargeRequiredReason: err.Error(), + } + logger.Infof("login failed with discharge-required error: %v", err) + return loginResult, nil + } if a.maintenanceInProgress() { // An upgrade, restore or similar operation is in // progress. It is possible for logins to fail until this @@ -95,25 +105,25 @@ return fail, MaintenanceNoLoginError } // Here we have a special case. The machine agents that manage - // environments in the state server environment need to be able to - // open API connections to other environments. In those cases, we - // need to look in the state server database to check the creds + // models in the controller model need to be able to + // open API connections to other models. In those cases, we + // need to look in the controller database to check the creds // against the machine if and only if the entity tag is a machine tag, - // and the machine exists in the state server environment, and the + // and the machine exists in the controller model, and the // machine has the manage state job. If all those parts are valid, we - // can then check the credentials against the state server environment + // can then check the credentials against the controller model // machine. if kind != names.MachineTagKind { - return fail, err + return fail, errors.Trace(err) } - entity, err = a.checkCredsOfStateServerMachine(req) + entity, err = a.checkCredsOfControllerMachine(req) if err != nil { - return fail, err + return fail, errors.Trace(err) } - // If we are here, then the entity will refer to a state server - // machine in the state server environment, and we don't need a pinger + // If we are here, then the entity will refer to a controller + // machine in the controller model, and we don't need a pinger // for it as we already have one running in the machine agent api - // worker for the state server environment. + // worker for the controller model. agentPingerNeeded = false } a.root.entity = entity @@ -128,47 +138,55 @@ if agentPingerNeeded { if err := startPingerIfAgent(a.root, entity); err != nil { - return fail, err + return fail, errors.Trace(err) } } var maybeUserInfo *params.AuthUserInfo + var envUser *state.ModelUser // Send back user info if user - if isUser { + if isUser && !serverOnlyLogin { maybeUserInfo = ¶ms.AuthUserInfo{ Identity: entity.Tag().String(), LastConnection: lastConnection, } + envUser, err = a.root.state.ModelUser(entity.Tag().(names.UserTag)) + if err != nil { + return fail, errors.Annotatef(err, "missing ModelUser for logged in user %s", entity.Tag()) + } + if envUser.ReadOnly() { + logger.Debugf("model user %s is READ ONLY", entity.Tag()) + } } // Fetch the API server addresses from state. hostPorts, err := a.root.state.APIHostPorts() if err != nil { - return fail, err + return fail, errors.Trace(err) } logger.Debugf("hostPorts: %v", hostPorts) - environ, err := a.root.state.Environment() + environ, err := a.root.state.Model() if err != nil { - return fail, err + return fail, errors.Trace(err) } loginResult := params.LoginResultV1{ Servers: params.FromNetworkHostsPorts(hostPorts), - EnvironTag: environ.Tag().String(), - ServerTag: environ.ServerTag().String(), + ModelTag: environ.Tag().String(), + ControllerTag: environ.ControllerTag().String(), Facades: DescribeFacades(), UserInfo: maybeUserInfo, - ServerVersion: version.Current.Number.String(), + ServerVersion: version.Current.String(), } // For sufficiently modern login versions, stop serving the - // state server environment at the root of the API. + // controller model at the root of the API. if serverOnlyLogin { authedApi = newRestrictedRoot(authedApi) - // Remove the EnvironTag from the response as there is no - // environment here. - loginResult.EnvironTag = "" + // Remove the ModelTag from the response as there is no + // model here. + loginResult.ModelTag = "" // Strip out the facades that are not supported from the result. var facades []params.FacadeVersions for _, facade := range loginResult.Facades { @@ -179,33 +197,36 @@ loginResult.Facades = facades } + if envUser != nil { + authedApi = newClientAuthRoot(authedApi, envUser) + } + a.root.rpcConn.ServeFinder(authedApi, serverError) return loginResult, nil } -// checkCredsOfStateServerMachine checks the special case of a state server -// machine creating an API connection for a different environment so it can -// run API workers for that environment to do things like provisioning +// checkCredsOfControllerMachine checks the special case of a controller +// machine creating an API connection for a different model so it can +// run API workers for that model to do things like provisioning // machines. -func (a *admin) checkCredsOfStateServerMachine(req params.LoginRequest) (state.Entity, error) { - // Check the credentials against the state server environment. - entity, _, err := doCheckCreds(a.srv.state, req, false) +func (a *admin) checkCredsOfControllerMachine(req params.LoginRequest) (state.Entity, error) { + entity, _, err := doCheckCreds(a.srv.state, req, false, a.srv.authCtxt) if err != nil { - return nil, err + return nil, errors.Trace(err) } machine, ok := entity.(*state.Machine) if !ok { return nil, errors.Errorf("entity should be a machine, but is %T", entity) } for _, job := range machine.Jobs() { - if job == state.JobManageEnviron { + if job == state.JobManageModel { return entity, nil } } - // The machine does exist in the state server environment, but it - // doesn't manage environments, so reject it. - return nil, common.ErrBadCreds + // The machine does exist in the controller model, but it + // doesn't manage models, so reject it. + return nil, errors.Trace(common.ErrBadCreds) } func (a *admin) maintenanceInProgress() bool { @@ -229,72 +250,161 @@ var doCheckCreds = checkCreds -// checkCreds validates the entities credentials in the current environment. -// If the entity is a user, and lookForEnvUser is true, an env user must exist -// for the environment. In the case of a user logging in to the server, but -// not an environment, there is no env user needed. While we have the env +// checkCreds validates the entities credentials in the current model. +// If the entity is a user, and lookForModelUser is true, a model user must exist +// for the model. In the case of a user logging in to the server, but +// not a model, there is no env user needed. While we have the env // user, if we do have it, update the last login time. -func checkCreds(st *state.State, req params.LoginRequest, lookForEnvUser bool) (state.Entity, *time.Time, error) { - tag, err := names.ParseTag(req.AuthTag) - if err != nil { - return nil, nil, err - } - entity, err := st.FindEntity(tag) - if errors.IsNotFound(err) { - // We return the same error when an entity does not exist as for a bad - // password, so that we don't allow unauthenticated users to find - // information about existing entities. - logger.Debugf("entity %q not found", tag) - return nil, nil, common.ErrBadCreds - } +// +// Note that when logging in with lookForModelUser true, the returned +// entity will be modelUserEntity, not *state.User (external users +// don't have user entries) or *state.ModelUser (we +// don't want to lose the local user information associated with that). +func checkCreds(st *state.State, req params.LoginRequest, lookForModelUser bool, authenticator authentication.EntityAuthenticator) (state.Entity, *time.Time, error) { + var tag names.Tag + if req.AuthTag != "" { + var err error + tag, err = names.ParseTag(req.AuthTag) + if err != nil { + return nil, nil, errors.Trace(err) + } + } + var entityFinder authentication.EntityFinder = st + if lookForModelUser { + // When looking up model users, use a custom + // entity finder that looks up both the local user (if the user + // tag is in the local domain) and the model user. + entityFinder = modelUserEntityFinder{st} + } + entity, err := authenticator.Authenticate(entityFinder, tag, req) if err != nil { return nil, nil, errors.Trace(err) } - authenticator, err := authentication.FindEntityAuthenticator(entity) - if err != nil { - return nil, nil, err - } - - if err = authenticator.Authenticate(entity, req.Credentials, req.Nonce); err != nil { - logger.Debugf("bad credentials") - return nil, nil, err - } - // For user logins, update the last login time. - // NOTE: this code path is only for local users. When we support remote - // user logins with bearer tokens, we will need to make sure that we also - // update the last connection times for the environment users there. var lastLogin *time.Time - if user, ok := entity.(*state.User); ok { - userLastLogin, err := user.LastLogin() + if entity, ok := entity.(loginEntity); ok { + userLastLogin, err := entity.LastLogin() if err != nil && !state.IsNeverLoggedInError(err) { return nil, nil, errors.Trace(err) } - if lookForEnvUser { - envUser, err := st.EnvironmentUser(user.UserTag()) - if err != nil { - return nil, nil, errors.Wrap(err, common.ErrBadCreds) - } - // The last connection for the environment takes precedence over - // the local user last login time. - userLastLogin, err = envUser.LastConnection() - if err != nil && !state.IsNeverConnectedError(err) { - return nil, nil, errors.Trace(err) - } - envUser.UpdateLastConnection() - } - // Only update the user's last login time if it is a successful - // login, meaning that if we are logging into an environment, make - // sure that there is an environment user in that environment for - // this user. - user.UpdateLastLogin() + entity.UpdateLastLogin() lastLogin = &userLastLogin } - return entity, lastLogin, nil } +// loginEntity defines the interface needed to log in as a user. +// Notable implementations are *state.User and *modelUserEntity. +type loginEntity interface { + state.Entity + state.Authenticator + LastLogin() (time.Time, error) + UpdateLastLogin() error +} + +// modelUserEntityFinder implements EntityFinder by returning a +// loginEntity value for users, ensuring that the user exists in the +// state's current model as well as retrieving more global +// authentication details such as the password. +type modelUserEntityFinder struct { + st *state.State +} + +// FindEntity implements authentication.EntityFinder.FindEntity. +func (f modelUserEntityFinder) FindEntity(tag names.Tag) (state.Entity, error) { + utag, ok := tag.(names.UserTag) + if !ok { + return f.st.FindEntity(tag) + } + modelUser, err := f.st.ModelUser(utag) + if err != nil { + return nil, err + } + u := &modelUserEntity{ + modelUser: modelUser, + } + if utag.IsLocal() { + user, err := f.st.User(utag) + if err != nil { + return nil, err + } + u.user = user + } + return u, nil +} + +var _ loginEntity = &modelUserEntity{} + +// modelUserEntity encapsulates an model user +// and, if the user is local, the local state user +// as well. This enables us to implement FindEntity +// in such a way that the authentication mechanisms +// can work without knowing these details. +type modelUserEntity struct { + modelUser *state.ModelUser + user *state.User +} + +// Refresh implements state.Authenticator.Refresh. +func (u *modelUserEntity) Refresh() error { + if u.user == nil { + return nil + } + return u.user.Refresh() +} + +// SetPassword implements state.Authenticator.SetPassword +// by setting the password on the local user. +func (u *modelUserEntity) SetPassword(pass string) error { + if u.user == nil { + return errors.New("cannot set password on external user") + } + return u.user.SetPassword(pass) +} + +// PasswordValid implements state.Authenticator.PasswordValid. +func (u *modelUserEntity) PasswordValid(pass string) bool { + if u.user == nil { + return false + } + return u.user.PasswordValid(pass) +} + +// Tag implements state.Entity.Tag. +func (u *modelUserEntity) Tag() names.Tag { + return u.modelUser.UserTag() +} + +// LastLogin implements loginEntity.LastLogin. +func (u *modelUserEntity) LastLogin() (time.Time, error) { + // The last connection for the model takes precedence over + // the local user last login time. + t, err := u.modelUser.LastConnection() + if state.IsNeverConnectedError(err) { + if u.user != nil { + // There's a global user, so use that login time instead. + return u.user.LastLogin() + } + // Since we're implementing LastLogin, we need + // to implement LastLogin error semantics too. + err = state.NeverLoggedInError(err.Error()) + } + return t, err +} + +// UpdateLastLogin implements loginEntity.UpdateLastLogin. +func (u *modelUserEntity) UpdateLastLogin() error { + err := u.modelUser.UpdateLastConnection() + if u.user != nil { + err1 := u.user.UpdateLastLogin() + if err == nil { + err = err1 + } + } + return err +} + func checkForValidMachineAgent(entity state.Entity, req params.LoginRequest) error { // If this is a machine agent connecting, we need to check the // nonce matches, otherwise the wrong agent might be trying to @@ -310,6 +420,7 @@ // machinePinger wraps a presence.Pinger. type machinePinger struct { *presence.Pinger + mongoUnavailable *uint32 } // Stop implements Pinger.Stop() as Pinger.Kill(), needed at @@ -318,6 +429,14 @@ if err := p.Pinger.Stop(); err != nil { return err } + if atomic.LoadUint32(p.mongoUnavailable) > 0 { + // Kill marks the agent as not-present. If the + // Mongo server is known to be unavailable, then + // we do not perform this operation; the agent + // will naturally become "not present" when its + // presence expires. + return nil + } return p.Pinger.Kill() } @@ -336,7 +455,7 @@ return err } - root.getResources().Register(&machinePinger{pinger}) + root.getResources().Register(&machinePinger{pinger, root.mongoUnavailable}) action := func() { if err := root.getRpcConn().Close(); err != nil { logger.Errorf("error closing the RPC connection: %v", err) @@ -353,7 +472,7 @@ err error } -// Admin conforms to the same API as initialRoot, but we'll always return (nil, err) -func (r *errRoot) Admin(id string) (*adminV0, error) { +// FindMethod conforms to the same API as initialRoot, but we'll always return (nil, err) +func (r *errRoot) FindMethod(rootName string, version int, methodName string) (rpcreflect.MethodCaller, error) { return nil, r.err } === modified file 'src/github.com/juju/juju/apiserver/admin_test.go' --- src/github.com/juju/juju/apiserver/admin_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/admin_test.go 2016-03-22 15:18:22 +0000 @@ -18,10 +18,12 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/api" + apitesting "github.com/juju/juju/api/testing" "github.com/juju/juju/apiserver" "github.com/juju/juju/apiserver/params" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/network" + "github.com/juju/juju/rpc" "github.com/juju/juju/state" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" @@ -39,7 +41,7 @@ var _ = gc.Suite(&loginSuite{ baseLoginSuite{ setAdminApi: func(srv *apiserver.Server) { - apiserver.SetAdminApiVersions(srv, 0, 1, 2) + apiserver.SetAdminApiVersions(srv, 3) }, }, }) @@ -49,52 +51,12 @@ loggo.GetLogger("juju.apiserver").SetLogLevel(loggo.TRACE) } -type loginV0Suite struct { - loginSuite -} - -var _ = gc.Suite(&loginV0Suite{ - loginSuite{ - baseLoginSuite{ - setAdminApi: func(srv *apiserver.Server) { - apiserver.SetAdminApiVersions(srv, 0) - }, - }, - }, -}) - -type loginV1Suite struct { - loginSuite -} - -var _ = gc.Suite(&loginV1Suite{ - loginSuite{ - baseLoginSuite{ - setAdminApi: func(srv *apiserver.Server) { - apiserver.SetAdminApiVersions(srv, 1) - }, - }, - }, -}) - -type loginAncientSuite struct { - baseLoginSuite -} - -var _ = gc.Suite(&loginAncientSuite{ - baseLoginSuite{ - setAdminApi: func(srv *apiserver.Server) { - apiserver.SetPreFacadeAdminApi(srv) - }, - }, -}) - func (s *baseLoginSuite) setupServer(c *gc.C) (api.Connection, func()) { - return s.setupServerForEnvironment(c, s.State.EnvironTag()) + return s.setupServerForEnvironment(c, s.State.ModelTag()) } -func (s *baseLoginSuite) setupServerForEnvironment(c *gc.C, envTag names.EnvironTag) (api.Connection, func()) { - info, cleanup := s.setupServerForEnvironmentWithValidator(c, envTag, nil) +func (s *baseLoginSuite) setupServerForEnvironment(c *gc.C, modelTag names.ModelTag) (api.Connection, func()) { + info, cleanup := s.setupServerForEnvironmentWithValidator(c, modelTag, nil) st, err := api.Open(info, fastDialOpts) c.Assert(err, jc.ErrorIsNil) return st, func() { @@ -113,6 +75,23 @@ return info, cleanup } +func (s *loginSuite) TestLoginWithInvalidTag(c *gc.C) { + info := s.APIInfo(c) + info.Tag = nil + info.Password = "" + st, err := api.Open(info, api.DialOpts{}) + c.Assert(err, jc.ErrorIsNil) + + request := ¶ms.LoginRequest{ + AuthTag: "bar", + Credentials: "password", + } + + var response params.LoginResult + err = st.APICall("Admin", 3, "", "Login", request, &response) + c.Assert(err, gc.ErrorMatches, `.*"bar" is not a valid tag.*`) +} + func (s *loginSuite) TestBadLogin(c *gc.C) { // Start our own server so we can control when the first login // happens. Otherwise in JujuConnSuite.SetUpTest api.Open is @@ -123,24 +102,26 @@ adminUser := s.AdminUserTag(c) for i, t := range []struct { - tag string + tag names.Tag password string - err string + err error code string }{{ - tag: adminUser.String(), + tag: adminUser, password: "wrong password", - err: "invalid entity name or password", - code: params.CodeUnauthorized, - }, { - tag: "user-unknown", - password: "password", - err: "invalid entity name or password", - code: params.CodeUnauthorized, - }, { - tag: "bar", - password: "password", - err: `"bar" is not a valid tag`, + err: &rpc.RequestError{ + Message: "invalid entity name or password", + Code: "unauthorized access", + }, + code: params.CodeUnauthorized, + }, { + tag: names.NewUserTag("unknown"), + password: "password", + err: &rpc.RequestError{ + Message: "invalid entity name or password", + Code: "unauthorized access", + }, + code: params.CodeUnauthorized, }} { c.Logf("test %d; entity %q; password %q", i, t.tag, t.password) // Note that Open does not log in if the tag and password @@ -155,15 +136,21 @@ defer st.Close() _, err = st.Machiner().Machine(names.NewMachineTag("0")) - c.Assert(err, gc.ErrorMatches, `.*unknown object type "Machiner"`) + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: `unknown object type "Machiner"`, + Code: "not implemented", + }) // Since these are user login tests, the nonce is empty. err = st.Login(t.tag, t.password, "") - c.Assert(err, gc.ErrorMatches, t.err) + c.Assert(errors.Cause(err), gc.DeepEquals, t.err) c.Assert(params.ErrCode(err), gc.Equals, t.code) _, err = st.Machiner().Machine(names.NewMachineTag("0")) - c.Assert(err, gc.ErrorMatches, `.*unknown object type "Machiner"`) + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: `unknown object type "Machiner"`, + Code: "not implemented", + }) }() } } @@ -181,22 +168,23 @@ u := s.Factory.MakeUser(c, &factory.UserParams{Password: password, Disabled: true}) _, err = st.Client().Status([]string{}) - c.Assert(err, gc.ErrorMatches, `.*unknown object type "Client"`) + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: `unknown object type "Client"`, + Code: "not implemented", + }) // Since these are user login tests, the nonce is empty. - err = st.Login(u.Tag().String(), password, "") - c.Assert(err, gc.ErrorMatches, "invalid entity name or password") + err = st.Login(u.Tag(), password, "") + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: "invalid entity name or password", + Code: "unauthorized access", + }) _, err = st.Client().Status([]string{}) - c.Assert(err, gc.ErrorMatches, `.*unknown object type "Client"`) -} - -func (s *loginV0Suite) TestLoginSetsLogIdentifier(c *gc.C) { - s.runLoginSetsLogIdentifier(c) -} - -func (s *loginV1Suite) TestLoginSetsLogIdentifier(c *gc.C) { - s.runLoginSetsLogIdentifier(c) + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: `unknown object type "Client"`, + Code: "not implemented", + }) } func (s *baseLoginSuite) runLoginSetsLogIdentifier(c *gc.C) { @@ -491,77 +479,14 @@ func (s *loginSuite) TestNonEnvironUserLoginFails(c *gc.C) { info, cleanup := s.setupServerWithValidator(c, nil) defer cleanup() - user := s.Factory.MakeUser(c, &factory.UserParams{Password: "dummy-password", NoEnvUser: true}) + user := s.Factory.MakeUser(c, &factory.UserParams{Password: "dummy-password", NoModelUser: true}) info.Password = "dummy-password" info.Tag = user.UserTag() _, err := api.Open(info, fastDialOpts) - c.Assert(err, gc.ErrorMatches, "invalid entity name or password") -} - -func (s *loginV0Suite) TestLoginReportsEnvironTag(c *gc.C) { - st, cleanup := s.setupServer(c) - defer cleanup() - // If we call api.Open without giving a username and password, then it - // won't call Login, so we can call it ourselves. - // We Login without passing an EnvironTag, to show that it still lets - // us in, and that we can find out the real EnvironTag from the - // response. - adminUser := s.AdminUserTag(c) - var result params.LoginResult - creds := ¶ms.Creds{ - AuthTag: adminUser.String(), - Password: "dummy-secret", - } - err := st.APICall("Admin", 0, "", "Login", creds, &result) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result.EnvironTag, gc.Equals, s.State.EnvironTag().String()) -} - -func (s *loginV1Suite) TestLoginReportsEnvironAndServerTag(c *gc.C) { - otherState := s.Factory.MakeEnvironment(c, nil) - defer otherState.Close() - newEnvTag := otherState.EnvironTag() - - st, cleanup := s.setupServerForEnvironment(c, newEnvTag) - defer cleanup() - var result params.LoginResultV1 - creds := ¶ms.LoginRequest{ - AuthTag: s.AdminUserTag(c).String(), - Credentials: "dummy-secret", - } - err := st.APICall("Admin", 1, "", "Login", creds, &result) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result.EnvironTag, gc.Equals, newEnvTag.String()) - c.Assert(result.ServerTag, gc.Equals, s.State.EnvironTag().String()) -} - -func (s *loginV1Suite) TestLoginV1Valid(c *gc.C) { - st, cleanup := s.setupServer(c) - defer cleanup() - var result params.LoginResultV1 - userTag := s.AdminUserTag(c) - creds := ¶ms.LoginRequest{ - AuthTag: userTag.String(), - Credentials: "dummy-secret", - } - err := st.APICall("Admin", 1, "", "Login", creds, &result) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result.UserInfo, gc.NotNil) - c.Assert(result.UserInfo.LastConnection, gc.NotNil) - c.Assert(result.UserInfo.Identity, gc.Equals, userTag.String()) - c.Assert(time.Now().Unix()-result.UserInfo.LastConnection.Unix() < 300, jc.IsTrue) -} - -func (s *loginV1Suite) TestLoginRejectV0(c *gc.C) { - st, cleanup := s.setupServer(c) - defer cleanup() - var result params.LoginResultV1 - req := ¶ms.LoginRequest{ - AuthTag: s.AdminUserTag(c).String(), - Credentials: "dummy-secret", - } - err := st.APICall("Admin", 0, "", "Login", req, &result) - c.Assert(err, gc.NotNil) + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: "invalid entity name or password", + Code: "unauthorized access", + }) } func (s *loginSuite) TestLoginValidationSuccess(c *gc.C) { @@ -573,7 +498,7 @@ // Ensure an API call that would be restricted during // upgrades works after a normal login. - err := st.APICall("Client", 0, "", "DestroyEnvironment", nil, nil) + err := st.APICall("Client", 1, "", "DestroyModel", nil, nil) c.Assert(err, jc.ErrorIsNil) } s.checkLoginWithValidator(c, validator, checker) @@ -598,10 +523,10 @@ c.Assert(loginErr, gc.IsNil) var statusResult params.FullStatus - err := st.APICall("Client", 0, "", "FullStatus", params.StatusParams{}, &statusResult) + err := st.APICall("Client", 1, "", "FullStatus", params.StatusParams{}, &statusResult) c.Assert(err, jc.ErrorIsNil) - err = st.APICall("Client", 0, "", "DestroyEnvironment", nil, nil) + err = st.APICall("Client", 1, "", "DestroyModel", nil, nil) c.Assert(err, gc.ErrorMatches, ".*upgrade in progress - Juju functionality is limited.*") } s.checkLoginWithValidator(c, validator, checker) @@ -617,7 +542,7 @@ checkLogin := func(tag names.Tag) { st := s.openAPIWithoutLogin(c, info) defer st.Close() - err := st.Login(tag.String(), "dummy-secret", "nonce") + err := st.Login(tag, "dummy-secret", "nonce") c.Assert(err, gc.ErrorMatches, "something") } checkLogin(names.NewUserTag("definitelywontexist")) @@ -635,22 +560,25 @@ // Ensure not already logged in. _, err := st.Machiner().Machine(names.NewMachineTag("0")) - c.Assert(err, gc.ErrorMatches, `*.unknown object type "Machiner"`) + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: `unknown object type "Machiner"`, + Code: "not implemented", + }) adminUser := s.AdminUserTag(c) // Since these are user login tests, the nonce is empty. - err = st.Login(adminUser.String(), "dummy-secret", "") + err = st.Login(adminUser, "dummy-secret", "") checker(c, err, st) } func (s *baseLoginSuite) setupServerWithValidator(c *gc.C, validator apiserver.LoginValidator) (*api.Info, func()) { - env, err := s.State.Environment() + env, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) - return s.setupServerForEnvironmentWithValidator(c, env.EnvironTag(), validator) + return s.setupServerForEnvironmentWithValidator(c, env.ModelTag(), validator) } -func (s *baseLoginSuite) setupServerForEnvironmentWithValidator(c *gc.C, envTag names.EnvironTag, validator apiserver.LoginValidator) (*api.Info, func()) { +func (s *baseLoginSuite) setupServerForEnvironmentWithValidator(c *gc.C, modelTag names.ModelTag, validator apiserver.LoginValidator) (*api.Info, func()) { listener, err := net.Listen("tcp", "127.0.0.1:0") c.Assert(err, jc.ErrorIsNil) srv, err := apiserver.NewServer( @@ -661,20 +589,18 @@ Key: []byte(coretesting.ServerKey), Validator: validator, Tag: names.NewMachineTag("0"), + LogDir: c.MkDir(), }, ) c.Assert(err, jc.ErrorIsNil) - if s.setAdminApi != nil { - s.setAdminApi(srv) - } else { - panic(nil) - } + c.Assert(s.setAdminApi, gc.NotNil) + s.setAdminApi(srv) info := &api.Info{ - Tag: nil, - Password: "", - EnvironTag: envTag, - Addrs: []string{srv.Addr().String()}, - CACert: coretesting.CACert, + Tag: nil, + Password: "", + ModelTag: modelTag, + Addrs: []string{srv.Addr().String()}, + CACert: coretesting.CACert, } return info, func() { err := srv.Stop() @@ -690,103 +616,37 @@ return st } -func (s *loginV0Suite) TestLoginReportsAvailableFacadeVersions(c *gc.C) { - st, cleanup := s.setupServer(c) - defer cleanup() - var result params.LoginResult - adminUser := s.AdminUserTag(c) - creds := ¶ms.Creds{ - AuthTag: adminUser.String(), - Password: "dummy-secret", - } - err := st.APICall("Admin", 0, "", "Login", creds, &result) - c.Assert(err, jc.ErrorIsNil) - c.Check(result.Facades, gc.Not(gc.HasLen), 0) - // as a sanity check, ensure that we have Client v0 - asMap := make(map[string][]int, len(result.Facades)) - for _, facade := range result.Facades { - asMap[facade.Name] = facade.Versions - } - clientVersions := asMap["Client"] - c.Assert(len(clientVersions), jc.GreaterThan, 0) - c.Check(clientVersions[0], gc.Equals, 0) -} - -func (s *loginV0Suite) TestLoginRejectV1(c *gc.C) { - st, cleanup := s.setupServer(c) - defer cleanup() - var result params.LoginResultV1 - creds := ¶ms.LoginRequest{ - AuthTag: s.AdminUserTag(c).String(), - Credentials: "dummy-secret", - } - err := st.APICall("Admin", 1, "", "Login", creds, &result) - // You shouldn't be able to log into a V0 server with V1 client call - // This should fail & API client will degrade to a V0 login attempt. - c.Assert(err, gc.NotNil) -} - -func (s *loginV1Suite) TestLoginReportsAvailableFacadeVersions(c *gc.C) { - st, cleanup := s.setupServer(c) - defer cleanup() - var result params.LoginResultV1 - adminUser := s.AdminUserTag(c) - creds := ¶ms.LoginRequest{ - AuthTag: adminUser.String(), - Credentials: "dummy-secret", - } - err := st.APICall("Admin", 1, "", "Login", creds, &result) - c.Assert(err, jc.ErrorIsNil) - c.Check(result.Facades, gc.Not(gc.HasLen), 0) - // as a sanity check, ensure that we have Client v0 - asMap := make(map[string][]int, len(result.Facades)) - for _, facade := range result.Facades { - asMap[facade.Name] = facade.Versions - } - clientVersions := asMap["Client"] - c.Assert(len(clientVersions), jc.GreaterThan, 0) - c.Check(clientVersions[0], gc.Equals, 0) -} - -func (s *loginAncientSuite) TestAncientLoginDegrades(c *gc.C) { - st, cleanup := s.setupServer(c) - defer cleanup() - adminUser := s.AdminUserTag(c) - err := st.Login(adminUser.String(), "dummy-secret", "") - c.Assert(err, jc.ErrorIsNil) - envTag, err := st.EnvironTag() - c.Assert(err, jc.ErrorIsNil) - c.Assert(envTag.String(), gc.Equals, apiserver.PreFacadeEnvironTag.String()) -} - -func (s *loginSuite) TestStateServerEnvironment(c *gc.C) { - info, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() - - c.Assert(info.EnvironTag, gc.Equals, s.State.EnvironTag()) - st, err := api.Open(info, fastDialOpts) - c.Assert(err, jc.ErrorIsNil) - defer st.Close() - - adminUser := s.AdminUserTag(c) - err = st.Login(adminUser.String(), "dummy-secret", "") - c.Assert(err, jc.ErrorIsNil) - - s.assertRemoteEnvironment(c, st, s.State.EnvironTag()) -} - -func (s *loginSuite) TestStateServerEnvironmentBadCreds(c *gc.C) { - info, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() - - c.Assert(info.EnvironTag, gc.Equals, s.State.EnvironTag()) - st, err := api.Open(info, fastDialOpts) - c.Assert(err, jc.ErrorIsNil) - defer st.Close() - - adminUser := s.AdminUserTag(c) - err = st.Login(adminUser.String(), "bad-password", "") - c.Assert(err, gc.ErrorMatches, `invalid entity name or password`) +func (s *loginSuite) TestControllerModel(c *gc.C) { + info, cleanup := s.setupServerWithValidator(c, nil) + defer cleanup() + + c.Assert(info.ModelTag, gc.Equals, s.State.ModelTag()) + st, err := api.Open(info, fastDialOpts) + c.Assert(err, jc.ErrorIsNil) + defer st.Close() + + adminUser := s.AdminUserTag(c) + err = st.Login(adminUser, "dummy-secret", "") + c.Assert(err, jc.ErrorIsNil) + + s.assertRemoteEnvironment(c, st, s.State.ModelTag()) +} + +func (s *loginSuite) TestControllerModelBadCreds(c *gc.C) { + info, cleanup := s.setupServerWithValidator(c, nil) + defer cleanup() + + c.Assert(info.ModelTag, gc.Equals, s.State.ModelTag()) + st, err := api.Open(info, fastDialOpts) + c.Assert(err, jc.ErrorIsNil) + defer st.Close() + + adminUser := s.AdminUserTag(c) + err = st.Login(adminUser, "bad-password", "") + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: `invalid entity name or password`, + Code: "unauthorized access", + }) } func (s *loginSuite) TestNonExistentEnvironment(c *gc.C) { @@ -795,29 +655,34 @@ uuid, err := utils.NewUUID() c.Assert(err, jc.ErrorIsNil) - info.EnvironTag = names.NewEnvironTag(uuid.String()) + info.ModelTag = names.NewModelTag(uuid.String()) st, err := api.Open(info, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer st.Close() adminUser := s.AdminUserTag(c) - err = st.Login(adminUser.String(), "dummy-secret", "") - expectedError := fmt.Sprintf("unknown environment: %q", uuid) - c.Assert(err, gc.ErrorMatches, expectedError) + err = st.Login(adminUser, "dummy-secret", "") + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: fmt.Sprintf("unknown model: %q", uuid), + Code: "not found", + }) } func (s *loginSuite) TestInvalidEnvironment(c *gc.C) { info, cleanup := s.setupServerWithValidator(c, nil) defer cleanup() - info.EnvironTag = names.NewEnvironTag("rubbish") + info.ModelTag = names.NewModelTag("rubbish") st, err := api.Open(info, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer st.Close() adminUser := s.AdminUserTag(c) - err = st.Login(adminUser.String(), "dummy-secret", "") - c.Assert(err, gc.ErrorMatches, `unknown environment: "rubbish"`) + err = st.Login(adminUser, "dummy-secret", "") + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: `unknown model: "rubbish"`, + Code: "not found", + }) } func (s *loginSuite) TestOtherEnvironment(c *gc.C) { @@ -825,18 +690,18 @@ defer cleanup() envOwner := s.Factory.MakeUser(c, nil) - envState := s.Factory.MakeEnvironment(c, &factory.EnvParams{ + envState := s.Factory.MakeModel(c, &factory.ModelParams{ Owner: envOwner.UserTag(), }) defer envState.Close() - info.EnvironTag = envState.EnvironTag() + info.ModelTag = envState.ModelTag() st, err := api.Open(info, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer st.Close() - err = st.Login(envOwner.UserTag().String(), "password", "") + err = st.Login(envOwner.UserTag(), "password", "") c.Assert(err, jc.ErrorIsNil) - s.assertRemoteEnvironment(c, st, envState.EnvironTag()) + s.assertRemoteEnvironment(c, st, envState.ModelTag()) } func (s *loginSuite) TestMachineLoginOtherEnvironment(c *gc.C) { @@ -848,10 +713,10 @@ defer cleanup() envOwner := s.Factory.MakeUser(c, nil) - envState := s.Factory.MakeEnvironment(c, &factory.EnvParams{ + envState := s.Factory.MakeModel(c, &factory.ModelParams{ Owner: envOwner.UserTag(), ConfigAttrs: map[string]interface{}{ - "state-server": false, + "controller": false, }, Prepare: true, }) @@ -862,64 +727,67 @@ Nonce: "nonce", }) - info.EnvironTag = envState.EnvironTag() + info.ModelTag = envState.ModelTag() st, err := api.Open(info, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer st.Close() - err = st.Login(machine.Tag().String(), password, "nonce") + err = st.Login(machine.Tag(), password, "nonce") c.Assert(err, jc.ErrorIsNil) } -func (s *loginSuite) TestOtherEnvironmentFromStateServer(c *gc.C) { +func (s *loginSuite) TestOtherEnvironmentFromController(c *gc.C) { info, cleanup := s.setupServerWithValidator(c, nil) defer cleanup() machine, password := s.Factory.MakeMachineReturningPassword(c, &factory.MachineParams{ - Jobs: []state.MachineJob{state.JobManageEnviron}, + Jobs: []state.MachineJob{state.JobManageModel}, }) - envState := s.Factory.MakeEnvironment(c, nil) + envState := s.Factory.MakeModel(c, nil) defer envState.Close() - info.EnvironTag = envState.EnvironTag() + info.ModelTag = envState.ModelTag() st, err := api.Open(info, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer st.Close() - err = st.Login(machine.Tag().String(), password, "nonce") + err = st.Login(machine.Tag(), password, "nonce") c.Assert(err, jc.ErrorIsNil) } -func (s *loginSuite) TestOtherEnvironmentWhenNotStateServer(c *gc.C) { +func (s *loginSuite) TestOtherEnvironmentWhenNotController(c *gc.C) { info, cleanup := s.setupServerWithValidator(c, nil) defer cleanup() machine, password := s.Factory.MakeMachineReturningPassword(c, nil) - envState := s.Factory.MakeEnvironment(c, nil) + envState := s.Factory.MakeModel(c, nil) defer envState.Close() - info.EnvironTag = envState.EnvironTag() + info.ModelTag = envState.ModelTag() st, err := api.Open(info, fastDialOpts) c.Assert(err, jc.ErrorIsNil) defer st.Close() - err = st.Login(machine.Tag().String(), password, "nonce") - c.Assert(err, gc.ErrorMatches, `invalid entity name or password`) + err = st.Login(machine.Tag(), password, "nonce") + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: "invalid entity name or password", + Code: "unauthorized access", + }) } -func (s *loginSuite) assertRemoteEnvironment(c *gc.C, st api.Connection, expected names.EnvironTag) { +func (s *loginSuite) assertRemoteEnvironment(c *gc.C, st api.Connection, expected names.ModelTag) { // Look at what the api thinks it has. - tag, err := st.EnvironTag() + tag, err := st.ModelTag() c.Assert(err, jc.ErrorIsNil) c.Assert(tag, gc.Equals, expected) // Look at what the api Client thinks it has. client := st.Client() - // EnvironmentUUID looks at the env tag on the api state connection. - c.Assert(client.EnvironmentUUID(), gc.Equals, expected.Id()) + // ModelUUID looks at the env tag on the api state connection. + c.Assert(client.ModelUUID(), gc.Equals, expected.Id()) - // EnvironmentInfo calls a remote method that looks up the environment. - info, err := client.EnvironmentInfo() + // ModelInfo calls a remote method that looks up the environment. + info, err := client.ModelInfo() c.Assert(err, jc.ErrorIsNil) c.Assert(info.UUID, gc.Equals, expected.Id()) } @@ -953,10 +821,69 @@ c.Assert(lastLogin.After(startTime), jc.IsTrue) // The env user is also updated. - envUser, err := s.State.EnvironmentUser(user.UserTag()) + modelUser, err := s.State.ModelUser(user.UserTag()) c.Assert(err, jc.ErrorIsNil) - when, err := envUser.LastConnection() + when, err := modelUser.LastConnection() c.Assert(err, jc.ErrorIsNil) c.Assert(when, gc.NotNil) c.Assert(when.After(startTime), jc.IsTrue) } + +var _ = gc.Suite(&macaroonLoginSuite{}) + +type macaroonLoginSuite struct { + apitesting.MacaroonSuite +} + +func (s *macaroonLoginSuite) TestLoginToController(c *gc.C) { + // Note that currently we cannot use macaroon auth + // to log into the controller rather than an environment + // because there's no place to store the fact that + // a given external user is allowed access to the controller. + s.DischargerLogin = func() string { + return "test@somewhere" + } + info := s.APIInfo(c) + + // Zero the environment tag so that we log into the controller + // not the environment. + info.ModelTag = names.ModelTag{} + + client, err := api.Open(info, api.DialOpts{}) + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: "invalid entity name or password", + Code: "unauthorized access", + }) + c.Assert(client, gc.Equals, nil) +} + +func (s *macaroonLoginSuite) TestLoginToEnvironmentSuccess(c *gc.C) { + s.AddModelUser(c, "test@somewhere") + s.DischargerLogin = func() string { + return "test@somewhere" + } + client, err := api.Open(s.APIInfo(c), api.DialOpts{}) + c.Assert(err, jc.ErrorIsNil) + client.Close() +} + +func (s *macaroonLoginSuite) TestFailedToObtainDischargeLogin(c *gc.C) { + s.DischargerLogin = func() string { + return "" + } + client, err := api.Open(s.APIInfo(c), api.DialOpts{}) + c.Assert(err, gc.ErrorMatches, `cannot get discharge from "https://.*": third party refused discharge: cannot discharge: login denied by discharger`) + c.Assert(client, gc.Equals, nil) +} + +func (s *macaroonLoginSuite) TestUnknownUserLogin(c *gc.C) { + s.DischargerLogin = func() string { + return "testUnknown@somewhere" + } + client, err := api.Open(s.APIInfo(c), api.DialOpts{}) + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: "invalid entity name or password", + Code: "unauthorized access", + }) + c.Assert(client, gc.Equals, nil) +} === removed file 'src/github.com/juju/juju/apiserver/adminv0.go' --- src/github.com/juju/juju/apiserver/adminv0.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/adminv0.go 1970-01-01 00:00:00 +0000 @@ -1,66 +0,0 @@ -// Copyright 2013, 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package apiserver - -import ( - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" -) - -// adminApiV0 implements the API that a client first sees when connecting to -// the API. We start serving a different API once the user has logged in. -type adminApiV0 struct { - admin *adminV0 -} - -type adminV0 struct { - *admin -} - -func newAdminApiV0(srv *Server, root *apiHandler, reqNotifier *requestNotifier) interface{} { - return &adminApiV0{ - admin: &adminV0{ - &admin{ - srv: srv, - root: root, - reqNotifier: reqNotifier, - }, - }, - } -} - -// Admin returns an object that provides API access to methods that can be -// called even when not authenticated. -func (r *adminApiV0) Admin(id string) (*adminV0, error) { - if id != "" { - // Safeguard id for possible future use. - return nil, common.ErrBadId - } - return r.admin, nil -} - -// Login logs in with the provided credentials. All subsequent requests on the -// connection will act as the authenticated user. -func (a *adminV0) Login(c params.Creds) (params.LoginResult, error) { - var fail params.LoginResult - - resultV1, err := a.doLogin(params.LoginRequest{ - AuthTag: c.AuthTag, - Credentials: c.Password, - Nonce: c.Nonce, - }, 0) - if err != nil { - return fail, err - } - - resultV0 := params.LoginResult{ - Servers: resultV1.Servers, - EnvironTag: resultV1.EnvironTag, - Facades: resultV1.Facades, - } - if resultV1.UserInfo != nil { - resultV0.LastConnection = resultV1.UserInfo.LastConnection - } - return resultV0, nil -} === removed file 'src/github.com/juju/juju/apiserver/adminv1.go' --- src/github.com/juju/juju/apiserver/adminv1.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/adminv1.go 1970-01-01 00:00:00 +0000 @@ -1,47 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package apiserver - -import ( - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" -) - -type adminApiV1 struct { - admin *adminV1 -} - -// adminV1 is the only object that unlogged-in clients can access. It holds any -// methods that are needed to log in. -type adminV1 struct { - *admin -} - -func newAdminApiV1(srv *Server, root *apiHandler, reqNotifier *requestNotifier) interface{} { - return &adminApiV1{ - admin: &adminV1{ - &admin{ - srv: srv, - root: root, - reqNotifier: reqNotifier, - }, - }, - } -} - -// Admin returns an object that provides API access to methods that can be -// called even when not authenticated. -func (r *adminApiV1) Admin(id string) (*adminV1, error) { - if id != "" { - // Safeguard id for possible future use. - return nil, common.ErrBadId - } - return r.admin, nil -} - -// Login logs in with the provided credentials. All subsequent requests on the -// connection will act as the authenticated user. -func (a *adminV1) Login(req params.LoginRequest) (params.LoginResultV1, error) { - return a.doLogin(req, 1) -} === removed file 'src/github.com/juju/juju/apiserver/adminv2.go' --- src/github.com/juju/juju/apiserver/adminv2.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/adminv2.go 1970-01-01 00:00:00 +0000 @@ -1,39 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package apiserver - -import ( - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" -) - -type adminApiV2 struct { - *admin -} - -func newAdminApiV2(srv *Server, root *apiHandler, reqNotifier *requestNotifier) interface{} { - return &adminApiV2{ - &admin{ - srv: srv, - root: root, - reqNotifier: reqNotifier, - }, - } -} - -// Admin returns an object that provides API access to methods that can be -// called even when not authenticated. -func (r *adminApiV2) Admin(id string) (*adminApiV2, error) { - if id != "" { - // Safeguard id for possible future use. - return nil, common.ErrBadId - } - return r, nil -} - -// Login logs in with the provided credentials. All subsequent requests on the -// connection will act as the authenticated user. -func (a *adminApiV2) Login(req params.LoginRequest) (params.LoginResultV1, error) { - return a.doLogin(req, 2) -} === removed file 'src/github.com/juju/juju/apiserver/adminv2_test.go' --- src/github.com/juju/juju/apiserver/adminv2_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/adminv2_test.go 1970-01-01 00:00:00 +0000 @@ -1,97 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package apiserver_test - -import ( - "github.com/juju/juju/api" - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver" - "github.com/juju/juju/testing/factory" -) - -type loginV2Suite struct { - loginSuite -} - -var _ = gc.Suite(&loginV2Suite{ - loginSuite{ - baseLoginSuite{ - setAdminApi: func(srv *apiserver.Server) { - apiserver.SetAdminApiVersions(srv, 2) - }, - }, - }, -}) - -func (s *loginV2Suite) TestClientLoginToEnvironment(c *gc.C) { - _, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() - - info := s.APIInfo(c) - apiState, err := api.Open(info, api.DialOpts{}) - c.Assert(err, jc.ErrorIsNil) - defer apiState.Close() - - client := apiState.Client() - _, err = client.GetEnvironmentConstraints() - c.Assert(err, jc.ErrorIsNil) -} - -func (s *loginV2Suite) TestClientLoginToServer(c *gc.C) { - _, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() - - info := s.APIInfo(c) - info.EnvironTag = names.EnvironTag{} - apiState, err := api.Open(info, api.DialOpts{}) - c.Assert(err, jc.ErrorIsNil) - defer apiState.Close() - - client := apiState.Client() - _, err = client.GetEnvironmentConstraints() - c.Assert(err, gc.ErrorMatches, `logged in to server, no environment, "Client" not supported`) -} - -func (s *loginV2Suite) TestClientLoginToServerNoAccessToStateServerEnv(c *gc.C) { - _, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() - - password := "shhh..." - user := s.Factory.MakeUser(c, &factory.UserParams{ - NoEnvUser: true, - Password: password, - }) - - info := s.APIInfo(c) - info.Tag = user.Tag() - info.Password = password - info.EnvironTag = names.EnvironTag{} - apiState, err := api.Open(info, api.DialOpts{}) - c.Assert(err, jc.ErrorIsNil) - defer apiState.Close() - // The user now has last login updated. - err = user.Refresh() - c.Assert(err, jc.ErrorIsNil) - lastLogin, err := user.LastLogin() - c.Assert(err, jc.ErrorIsNil) - c.Assert(lastLogin, gc.NotNil) -} - -func (s *loginV2Suite) TestClientLoginToRootOldClient(c *gc.C) { - _, cleanup := s.setupServerWithValidator(c, nil) - defer cleanup() - - info := s.APIInfo(c) - info.EnvironTag = names.EnvironTag{} - apiState, err := api.OpenWithVersion(info, api.DialOpts{}, 1) - c.Assert(err, jc.ErrorIsNil) - defer apiState.Close() - - client := apiState.Client() - _, err = client.GetEnvironmentConstraints() - c.Assert(err, jc.ErrorIsNil) -} === added file 'src/github.com/juju/juju/apiserver/adminv3.go' --- src/github.com/juju/juju/apiserver/adminv3.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/adminv3.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,39 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver + +import ( + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" +) + +type adminApiV3 struct { + *admin +} + +func newAdminApiV3(srv *Server, root *apiHandler, reqNotifier *requestNotifier) interface{} { + return &adminApiV3{ + &admin{ + srv: srv, + root: root, + reqNotifier: reqNotifier, + }, + } +} + +// Admin returns an object that provides API access to methods that can be +// called even when not authenticated. +func (r *adminApiV3) Admin(id string) (*adminApiV3, error) { + if id != "" { + // Safeguard id for possible future use. + return nil, common.ErrBadId + } + return r, nil +} + +// Login logs in with the provided credentials. All subsequent requests on the +// connection will act as the authenticated user. +func (a *adminApiV3) Login(req params.LoginRequest) (params.LoginResultV1, error) { + return a.doLogin(req, 3) +} === added file 'src/github.com/juju/juju/apiserver/adminv3_test.go' --- src/github.com/juju/juju/apiserver/adminv3_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/adminv3_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,97 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver_test + +import ( + "github.com/juju/errors" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api" + "github.com/juju/juju/apiserver" + "github.com/juju/juju/rpc" + "github.com/juju/juju/testing/factory" +) + +type loginV3Suite struct { + loginSuite +} + +var _ = gc.Suite(&loginV3Suite{ + loginSuite{ + baseLoginSuite{ + setAdminApi: func(srv *apiserver.Server) { + apiserver.SetAdminApiVersions(srv, 3) + }, + }, + }, +}) + +func (s *loginV3Suite) TestClientLoginToEnvironment(c *gc.C) { + _, cleanup := s.setupServerWithValidator(c, nil) + defer cleanup() + + info := s.APIInfo(c) + apiState, err := api.Open(info, api.DialOpts{}) + c.Assert(err, jc.ErrorIsNil) + defer apiState.Close() + + client := apiState.Client() + _, err = client.GetModelConstraints() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *loginV3Suite) TestClientLoginToServer(c *gc.C) { + _, cleanup := s.setupServerWithValidator(c, nil) + defer cleanup() + + info := s.APIInfo(c) + info.ModelTag = names.ModelTag{} + apiState, err := api.Open(info, api.DialOpts{}) + c.Assert(err, jc.ErrorIsNil) + defer apiState.Close() + + client := apiState.Client() + _, err = client.GetModelConstraints() + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: `logged in to server, no model, "Client" not supported`, + Code: "not supported", + }) +} + +func (s *loginV3Suite) TestClientLoginToServerNoAccessToControllerEnv(c *gc.C) { + _, cleanup := s.setupServerWithValidator(c, nil) + defer cleanup() + + password := "shhh..." + user := s.Factory.MakeUser(c, &factory.UserParams{ + NoModelUser: true, + Password: password, + }) + + info := s.APIInfo(c) + info.Tag = user.Tag() + info.Password = password + info.ModelTag = names.ModelTag{} + apiState, err := api.Open(info, api.DialOpts{}) + c.Assert(err, jc.ErrorIsNil) + defer apiState.Close() + // The user now has last login updated. + err = user.Refresh() + c.Assert(err, jc.ErrorIsNil) + lastLogin, err := user.LastLogin() + c.Assert(err, jc.ErrorIsNil) + c.Assert(lastLogin, gc.NotNil) +} + +func (s *loginV3Suite) TestClientLoginToRootOldClient(c *gc.C) { + _, cleanup := s.setupServerWithValidator(c, nil) + defer cleanup() + + info := s.APIInfo(c) + info.ModelTag = names.ModelTag{} + _, err := api.OpenWithVersion(info, api.DialOpts{}, 2) + c.Assert(err, gc.ErrorMatches, ".*this version of Juju does not support login from old clients.*") +} === modified file 'src/github.com/juju/juju/apiserver/agent/agent.go' --- src/github.com/juju/juju/apiserver/agent/agent.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/agent/agent.go 2016-03-22 15:18:22 +0000 @@ -1,15 +1,134 @@ // Copyright 2013 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. -// The machine package implements the API interfaces +// Package agent implements the API interfaces // used by the machine agent. + package agent import ( + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/mongo" + "github.com/juju/juju/state" + "github.com/juju/juju/state/multiwatcher" ) func init() { - common.RegisterStandardFacade("Agent", 0, NewAgentAPIV0) - common.RegisterStandardFacade("Agent", 1, NewAgentAPIV1) + common.RegisterStandardFacade("Agent", 2, NewAgentAPIV2) +} + +// AgentAPIV2 implements the version 2 of the API provided to an agent. +type AgentAPIV2 struct { + *common.PasswordChanger + *common.RebootFlagClearer + *common.ModelWatcher + + st *state.State + auth common.Authorizer +} + +// NewAgentAPIV2 returns an object implementing version 2 of the Agent API +// with the given authorizer representing the currently logged in client. +func NewAgentAPIV2(st *state.State, resources *common.Resources, auth common.Authorizer) (*AgentAPIV2, error) { + // Agents are defined to be any user that's not a client user. + if !auth.AuthMachineAgent() && !auth.AuthUnitAgent() { + return nil, common.ErrPerm + } + getCanChange := func() (common.AuthFunc, error) { + return auth.AuthOwner, nil + } + return &AgentAPIV2{ + PasswordChanger: common.NewPasswordChanger(st, getCanChange), + RebootFlagClearer: common.NewRebootFlagClearer(st, getCanChange), + ModelWatcher: common.NewModelWatcher(st, resources, auth), + st: st, + auth: auth, + }, nil +} + +func (api *AgentAPIV2) GetEntities(args params.Entities) params.AgentGetEntitiesResults { + results := params.AgentGetEntitiesResults{ + Entities: make([]params.AgentGetEntitiesResult, len(args.Entities)), + } + for i, entity := range args.Entities { + tag, err := names.ParseTag(entity.Tag) + if err != nil { + results.Entities[i].Error = common.ServerError(err) + continue + } + result, err := api.getEntity(tag) + result.Error = common.ServerError(err) + results.Entities[i] = result + } + return results +} + +func (api *AgentAPIV2) getEntity(tag names.Tag) (result params.AgentGetEntitiesResult, err error) { + // Allow only for the owner agent. + // Note: having a bulk API call for this is utter madness, given that + // this check means we can only ever return a single object. + if !api.auth.AuthOwner(tag) { + err = common.ErrPerm + return + } + entity0, err := api.st.FindEntity(tag) + if err != nil { + return + } + entity, ok := entity0.(state.Lifer) + if !ok { + err = common.NotSupportedError(tag, "life cycles") + return + } + result.Life = params.Life(entity.Life().String()) + if machine, ok := entity.(*state.Machine); ok { + result.Jobs = stateJobsToAPIParamsJobs(machine.Jobs()) + result.ContainerType = machine.ContainerType() + } + return +} + +func (api *AgentAPIV2) StateServingInfo() (result state.StateServingInfo, err error) { + if !api.auth.AuthModelManager() { + err = common.ErrPerm + return + } + return api.st.StateServingInfo() +} + +// MongoIsMaster is called by the IsMaster API call +// instead of mongo.IsMaster. It exists so it can +// be overridden by tests. +var MongoIsMaster = mongo.IsMaster + +func (api *AgentAPIV2) IsMaster() (params.IsMasterResult, error) { + if !api.auth.AuthModelManager() { + return params.IsMasterResult{}, common.ErrPerm + } + + switch tag := api.auth.GetAuthTag().(type) { + case names.MachineTag: + machine, err := api.st.Machine(tag.Id()) + if err != nil { + return params.IsMasterResult{}, common.ErrPerm + } + + session := api.st.MongoSession() + isMaster, err := MongoIsMaster(session, machine) + return params.IsMasterResult{Master: isMaster}, err + default: + return params.IsMasterResult{}, errors.Errorf("authenticated entity is not a Machine") + } +} + +func stateJobsToAPIParamsJobs(jobs []state.MachineJob) []multiwatcher.MachineJob { + pjobs := make([]multiwatcher.MachineJob, len(jobs)) + for i, job := range jobs { + pjobs[i] = multiwatcher.MachineJob(job.String()) + } + return pjobs } === modified file 'src/github.com/juju/juju/apiserver/agent/agent_test.go' --- src/github.com/juju/juju/apiserver/agent/agent_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/agent/agent_test.go 2016-03-22 15:18:22 +0000 @@ -3,14 +3,18 @@ import ( stdtesting "testing" + "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "github.com/juju/juju/apiserver/agent" "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" apiservertesting "github.com/juju/juju/apiserver/testing" "github.com/juju/juju/instance" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" + "github.com/juju/juju/state/multiwatcher" coretesting "github.com/juju/juju/testing" ) @@ -18,8 +22,9 @@ coretesting.MgoTestPackage(t) } -// baseSuite contains the information need for all tests. -type baseSuite struct { +var _ = gc.Suite(&agentSuite{}) + +type agentSuite struct { jujutesting.JujuConnSuite resources *common.Resources @@ -30,11 +35,11 @@ container *state.Machine } -func (s *baseSuite) SetUpTest(c *gc.C) { +func (s *agentSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) var err error - s.machine0, err = s.State.AddMachine("quantal", state.JobManageEnviron) + s.machine0, err = s.State.AddMachine("quantal", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) s.machine1, err = s.State.AddMachine("quantal", state.JobHostUnits) @@ -56,3 +61,177 @@ Tag: s.machine1.Tag(), } } + +func (s *agentSuite) TestAgentFailsWithNonAgent(c *gc.C) { + auth := s.authorizer + auth.Tag = names.NewUserTag("admin") + api, err := agent.NewAgentAPIV2(s.State, s.resources, auth) + c.Assert(err, gc.NotNil) + c.Assert(api, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "permission denied") +} + +func (s *agentSuite) TestAgentSucceedsWithUnitAgent(c *gc.C) { + auth := s.authorizer + auth.Tag = names.NewUnitTag("foosball/1") + _, err := agent.NewAgentAPIV2(s.State, s.resources, auth) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *agentSuite) TestGetEntities(c *gc.C) { + err := s.container.Destroy() + c.Assert(err, jc.ErrorIsNil) + args := params.Entities{ + Entities: []params.Entity{ + {Tag: "machine-1"}, + {Tag: "machine-0"}, + {Tag: "machine-1-lxc-0"}, + {Tag: "machine-42"}, + }, + } + api, err := agent.NewAgentAPIV2(s.State, s.resources, s.authorizer) + c.Assert(err, jc.ErrorIsNil) + results := api.GetEntities(args) + c.Assert(results, gc.DeepEquals, params.AgentGetEntitiesResults{ + Entities: []params.AgentGetEntitiesResult{ + { + Life: "alive", + Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, + }, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +func (s *agentSuite) TestGetEntitiesContainer(c *gc.C) { + auth := s.authorizer + auth.Tag = s.container.Tag() + err := s.container.Destroy() + c.Assert(err, jc.ErrorIsNil) + + api, err := agent.NewAgentAPIV2(s.State, s.resources, auth) + c.Assert(err, jc.ErrorIsNil) + args := params.Entities{ + Entities: []params.Entity{ + {Tag: "machine-1"}, + {Tag: "machine-0"}, + {Tag: "machine-1-lxc-0"}, + {Tag: "machine-42"}, + }, + } + results := api.GetEntities(args) + c.Assert(results, gc.DeepEquals, params.AgentGetEntitiesResults{ + Entities: []params.AgentGetEntitiesResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + { + Life: "dying", + Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, + ContainerType: instance.LXC, + }, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +func (s *agentSuite) TestGetEntitiesNotFound(c *gc.C) { + // Destroy the container first, so we can destroy its parent. + err := s.container.Destroy() + c.Assert(err, jc.ErrorIsNil) + err = s.container.EnsureDead() + c.Assert(err, jc.ErrorIsNil) + err = s.container.Remove() + c.Assert(err, jc.ErrorIsNil) + + err = s.machine1.Destroy() + c.Assert(err, jc.ErrorIsNil) + err = s.machine1.EnsureDead() + c.Assert(err, jc.ErrorIsNil) + err = s.machine1.Remove() + c.Assert(err, jc.ErrorIsNil) + + api, err := agent.NewAgentAPIV2(s.State, s.resources, s.authorizer) + c.Assert(err, jc.ErrorIsNil) + results := api.GetEntities(params.Entities{ + Entities: []params.Entity{{Tag: "machine-1"}}, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, gc.DeepEquals, params.AgentGetEntitiesResults{ + Entities: []params.AgentGetEntitiesResult{{ + Error: ¶ms.Error{ + Code: params.CodeNotFound, + Message: "machine 1 not found", + }, + }}, + }) +} + +func (s *agentSuite) TestSetPasswords(c *gc.C) { + api, err := agent.NewAgentAPIV2(s.State, s.resources, s.authorizer) + c.Assert(err, jc.ErrorIsNil) + results, err := api.SetPasswords(params.EntityPasswords{ + Changes: []params.EntityPassword{ + {Tag: "machine-0", Password: "xxx-12345678901234567890"}, + {Tag: "machine-1", Password: "yyy-12345678901234567890"}, + {Tag: "machine-42", Password: "zzz-12345678901234567890"}, + }, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {apiservertesting.ErrUnauthorized}, + {nil}, + {apiservertesting.ErrUnauthorized}, + }, + }) + err = s.machine1.Refresh() + c.Assert(err, jc.ErrorIsNil) + changed := s.machine1.PasswordValid("yyy-12345678901234567890") + c.Assert(changed, jc.IsTrue) +} + +func (s *agentSuite) TestSetPasswordsShort(c *gc.C) { + api, err := agent.NewAgentAPIV2(s.State, s.resources, s.authorizer) + c.Assert(err, jc.ErrorIsNil) + results, err := api.SetPasswords(params.EntityPasswords{ + Changes: []params.EntityPassword{ + {Tag: "machine-1", Password: "yyy"}, + }, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.ErrorMatches, + "password is only 3 bytes long, and is not a valid Agent password") +} + +func (s *agentSuite) TestClearReboot(c *gc.C) { + api, err := agent.NewAgentAPIV2(s.State, s.resources, s.authorizer) + c.Assert(err, jc.ErrorIsNil) + + err = s.machine1.SetRebootFlag(true) + c.Assert(err, jc.ErrorIsNil) + + args := params.Entities{Entities: []params.Entity{ + {Tag: s.machine0.Tag().String()}, + {Tag: s.machine1.Tag().String()}, + }} + + rFlag, err := s.machine1.GetRebootFlag() + c.Assert(err, jc.ErrorIsNil) + c.Assert(rFlag, jc.IsTrue) + + result, err := api.ClearReboot(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {apiservertesting.ErrUnauthorized}, + {nil}, + }, + }) + + rFlag, err = s.machine1.GetRebootFlag() + c.Assert(err, jc.ErrorIsNil) + c.Assert(rFlag, jc.IsFalse) +} === removed file 'src/github.com/juju/juju/apiserver/agent/agent_v0.go' --- src/github.com/juju/juju/apiserver/agent/agent_v0.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/agent/agent_v0.go 1970-01-01 00:00:00 +0000 @@ -1,125 +0,0 @@ -// Copyright 2013, 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package agent - -import ( - "github.com/juju/errors" - "github.com/juju/names" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/mongo" - "github.com/juju/juju/state" - "github.com/juju/juju/state/multiwatcher" -) - -// AgentAPIV0 implements the version 0 of the API provided to an agent. -type AgentAPIV0 struct { - *common.PasswordChanger - *common.RebootFlagClearer - - st *state.State - auth common.Authorizer -} - -// NewAgentAPIV0 returns an object implementing version 0 of the Agent API -// with the given authorizer representing the currently logged in client. -func NewAgentAPIV0(st *state.State, resources *common.Resources, auth common.Authorizer) (*AgentAPIV0, error) { - // Agents are defined to be any user that's not a client user. - if !auth.AuthMachineAgent() && !auth.AuthUnitAgent() { - return nil, common.ErrPerm - } - getCanChange := func() (common.AuthFunc, error) { - return auth.AuthOwner, nil - } - return &AgentAPIV0{ - PasswordChanger: common.NewPasswordChanger(st, getCanChange), - RebootFlagClearer: common.NewRebootFlagClearer(st, getCanChange), - st: st, - auth: auth, - }, nil -} - -func (api *AgentAPIV0) GetEntities(args params.Entities) params.AgentGetEntitiesResults { - results := params.AgentGetEntitiesResults{ - Entities: make([]params.AgentGetEntitiesResult, len(args.Entities)), - } - for i, entity := range args.Entities { - tag, err := names.ParseTag(entity.Tag) - if err != nil { - results.Entities[i].Error = common.ServerError(err) - continue - } - result, err := api.getEntity(tag) - result.Error = common.ServerError(err) - results.Entities[i] = result - } - return results -} - -func (api *AgentAPIV0) getEntity(tag names.Tag) (result params.AgentGetEntitiesResult, err error) { - // Allow only for the owner agent. - // Note: having a bulk API call for this is utter madness, given that - // this check means we can only ever return a single object. - if !api.auth.AuthOwner(tag) { - err = common.ErrPerm - return - } - entity0, err := api.st.FindEntity(tag) - if err != nil { - return - } - entity, ok := entity0.(state.Lifer) - if !ok { - err = common.NotSupportedError(tag, "life cycles") - return - } - result.Life = params.Life(entity.Life().String()) - if machine, ok := entity.(*state.Machine); ok { - result.Jobs = stateJobsToAPIParamsJobs(machine.Jobs()) - result.ContainerType = machine.ContainerType() - } - return -} - -func (api *AgentAPIV0) StateServingInfo() (result state.StateServingInfo, err error) { - if !api.auth.AuthEnvironManager() { - err = common.ErrPerm - return - } - return api.st.StateServingInfo() -} - -// MongoIsMaster is called by the IsMaster API call -// instead of mongo.IsMaster. It exists so it can -// be overridden by tests. -var MongoIsMaster = mongo.IsMaster - -func (api *AgentAPIV0) IsMaster() (params.IsMasterResult, error) { - if !api.auth.AuthEnvironManager() { - return params.IsMasterResult{}, common.ErrPerm - } - - switch tag := api.auth.GetAuthTag().(type) { - case names.MachineTag: - machine, err := api.st.Machine(tag.Id()) - if err != nil { - return params.IsMasterResult{}, common.ErrPerm - } - - session := api.st.MongoSession() - isMaster, err := MongoIsMaster(session, machine) - return params.IsMasterResult{Master: isMaster}, err - default: - return params.IsMasterResult{}, errors.Errorf("authenticated entity is not a Machine") - } -} - -func stateJobsToAPIParamsJobs(jobs []state.MachineJob) []multiwatcher.MachineJob { - pjobs := make([]multiwatcher.MachineJob, len(jobs)) - for i, job := range jobs { - pjobs[i] = multiwatcher.MachineJob(job.String()) - } - return pjobs -} === removed file 'src/github.com/juju/juju/apiserver/agent/agent_v0_test.go' --- src/github.com/juju/juju/apiserver/agent/agent_v0_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/agent/agent_v0_test.go 1970-01-01 00:00:00 +0000 @@ -1,207 +0,0 @@ -package agent_test - -import ( - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/agent" - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - apiservertesting "github.com/juju/juju/apiserver/testing" - "github.com/juju/juju/instance" - "github.com/juju/juju/state" - "github.com/juju/juju/state/multiwatcher" -) - -// Definition of reusable V0 tests. - -type factoryV0 func(st *state.State, resources *common.Resources, auth common.Authorizer) (interface{}, error) - -func (s *baseSuite) testAgentFailsWithNonAgentV0(c *gc.C, factory factoryV0) { - auth := s.authorizer - auth.Tag = names.NewUserTag("admin") - api, err := factory(s.State, s.resources, auth) - c.Assert(err, gc.NotNil) - c.Assert(api, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "permission denied") -} - -func (s *baseSuite) testAgentSucceedsWithUnitAgentV0(c *gc.C, factory factoryV0) { - auth := s.authorizer - auth.Tag = names.NewUnitTag("foosball/1") - _, err := factory(s.State, s.resources, auth) - c.Assert(err, jc.ErrorIsNil) -} - -type getEntitiesV0 interface { - GetEntities(args params.Entities) params.AgentGetEntitiesResults -} - -func (s *baseSuite) testGetEntitiesV0(c *gc.C, api getEntitiesV0) { - err := s.container.Destroy() - c.Assert(err, jc.ErrorIsNil) - args := params.Entities{ - Entities: []params.Entity{ - {Tag: "machine-1"}, - {Tag: "machine-0"}, - {Tag: "machine-1-lxc-0"}, - {Tag: "machine-42"}, - }, - } - results := api.GetEntities(args) - c.Assert(results, gc.DeepEquals, params.AgentGetEntitiesResults{ - Entities: []params.AgentGetEntitiesResult{ - { - Life: "alive", - Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, - }, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *baseSuite) testGetEntitiesContainerV0(c *gc.C, api getEntitiesV0) { - err := s.container.Destroy() - c.Assert(err, jc.ErrorIsNil) - args := params.Entities{ - Entities: []params.Entity{ - {Tag: "machine-1"}, - {Tag: "machine-0"}, - {Tag: "machine-1-lxc-0"}, - {Tag: "machine-42"}, - }, - } - results := api.GetEntities(args) - c.Assert(results, gc.DeepEquals, params.AgentGetEntitiesResults{ - Entities: []params.AgentGetEntitiesResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - { - Life: "dying", - Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, - ContainerType: instance.LXC, - }, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *baseSuite) testGetEntitiesNotFoundV0(c *gc.C, api getEntitiesV0) { - // Destroy the container first, so we can destroy its parent. - err := s.container.Destroy() - c.Assert(err, jc.ErrorIsNil) - err = s.container.EnsureDead() - c.Assert(err, jc.ErrorIsNil) - err = s.container.Remove() - c.Assert(err, jc.ErrorIsNil) - - err = s.machine1.Destroy() - c.Assert(err, jc.ErrorIsNil) - err = s.machine1.EnsureDead() - c.Assert(err, jc.ErrorIsNil) - err = s.machine1.Remove() - c.Assert(err, jc.ErrorIsNil) - results := api.GetEntities(params.Entities{ - Entities: []params.Entity{{Tag: "machine-1"}}, - }) - c.Assert(err, jc.ErrorIsNil) - c.Assert(results, gc.DeepEquals, params.AgentGetEntitiesResults{ - Entities: []params.AgentGetEntitiesResult{{ - Error: ¶ms.Error{ - Code: params.CodeNotFound, - Message: "machine 1 not found", - }, - }}, - }) -} - -type setPasswordsV0 interface { - SetPasswords(args params.EntityPasswords) (params.ErrorResults, error) -} - -func (s *baseSuite) testSetPasswordsV0(c *gc.C, api setPasswordsV0) { - results, err := api.SetPasswords(params.EntityPasswords{ - Changes: []params.EntityPassword{ - {Tag: "machine-0", Password: "xxx-12345678901234567890"}, - {Tag: "machine-1", Password: "yyy-12345678901234567890"}, - {Tag: "machine-42", Password: "zzz-12345678901234567890"}, - }, - }) - c.Assert(err, jc.ErrorIsNil) - c.Assert(results, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - {apiservertesting.ErrUnauthorized}, - }, - }) - err = s.machine1.Refresh() - c.Assert(err, jc.ErrorIsNil) - changed := s.machine1.PasswordValid("yyy-12345678901234567890") - c.Assert(changed, jc.IsTrue) -} - -func (s *baseSuite) testSetPasswordsShortV0(c *gc.C, api setPasswordsV0) { - results, err := api.SetPasswords(params.EntityPasswords{ - Changes: []params.EntityPassword{ - {Tag: "machine-1", Password: "yyy"}, - }, - }) - c.Assert(err, jc.ErrorIsNil) - c.Assert(results.Results, gc.HasLen, 1) - c.Assert(results.Results[0].Error, gc.ErrorMatches, - "password is only 3 bytes long, and is not a valid Agent password") -} - -// V0 test suite. - -func factoryWrapperV0(st *state.State, resources *common.Resources, auth common.Authorizer) (interface{}, error) { - return agent.NewAgentAPIV0(st, resources, auth) -} - -type agentSuiteV0 struct { - baseSuite -} - -var _ = gc.Suite(&agentSuiteV0{}) - -func (s *agentSuiteV0) TestAgentFailsWithNonAgent(c *gc.C) { - s.testAgentFailsWithNonAgentV0(c, factoryWrapperV0) -} - -func (s *agentSuiteV0) TestAgentSucceedsWithUnitAgent(c *gc.C) { - s.testAgentSucceedsWithUnitAgentV0(c, factoryWrapperV0) -} - -func (s *agentSuiteV0) TestGetEntities(c *gc.C) { - s.testGetEntitiesV0(c, s.newAPI(c)) -} - -func (s *agentSuiteV0) TestGetEntitiesContainer(c *gc.C) { - auth := s.authorizer - auth.Tag = s.container.Tag() - api, err := agent.NewAgentAPIV0(s.State, s.resources, auth) - c.Assert(err, jc.ErrorIsNil) - s.testGetEntitiesContainerV0(c, api) -} - -func (s *agentSuiteV0) TestGetEntitiesNotFound(c *gc.C) { - s.testGetEntitiesNotFoundV0(c, s.newAPI(c)) -} - -func (s *agentSuiteV0) TestSetPasswords(c *gc.C) { - s.testSetPasswordsV0(c, s.newAPI(c)) -} - -func (s *agentSuiteV0) TestSetPasswordsShort(c *gc.C) { - s.testSetPasswordsShortV0(c, s.newAPI(c)) -} - -func (s *agentSuiteV0) newAPI(c *gc.C) *agent.AgentAPIV0 { - api, err := agent.NewAgentAPIV0(s.State, s.resources, s.authorizer) - c.Assert(err, jc.ErrorIsNil) - return api -} === removed file 'src/github.com/juju/juju/apiserver/agent/agent_v1.go' --- src/github.com/juju/juju/apiserver/agent/agent_v1.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/agent/agent_v1.go 1970-01-01 00:00:00 +0000 @@ -1,28 +0,0 @@ -// Copyright 2013, 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package agent - -import ( - "github.com/juju/errors" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/state" -) - -// AgentAPIV1 implements the version 1 of the API provided to an agent. -type AgentAPIV1 struct { - *AgentAPIV0 -} - -// NewAgentAPIV1 returns an object implementing version 1 of the Agent API -// with the given authorizer representing the currently logged in client. -// The functionality is like V0, except that it also knows about the additional -// JobManageNetworking. -func NewAgentAPIV1(st *state.State, resources *common.Resources, auth common.Authorizer) (*AgentAPIV1, error) { - apiV0, err := NewAgentAPIV0(st, resources, auth) - if err != nil { - return nil, errors.Trace(err) - } - return &AgentAPIV1{apiV0}, nil -} === removed file 'src/github.com/juju/juju/apiserver/agent/agent_v1_test.go' --- src/github.com/juju/juju/apiserver/agent/agent_v1_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/agent/agent_v1_test.go 1970-01-01 00:00:00 +0000 @@ -1,92 +0,0 @@ -package agent_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/agent" - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - apiservertesting "github.com/juju/juju/apiserver/testing" - "github.com/juju/juju/state" -) - -// V1 test suite, no additional or changed tests. - -func factoryWrapperV1(st *state.State, resources *common.Resources, auth common.Authorizer) (interface{}, error) { - return agent.NewAgentAPIV1(st, resources, auth) -} - -type agentSuiteV1 struct { - baseSuite -} - -var _ = gc.Suite(&agentSuiteV1{}) - -func (s *agentSuiteV1) TestClearReboot(c *gc.C) { - api, err := agent.NewAgentAPIV1(s.State, s.resources, s.authorizer) - c.Assert(err, jc.ErrorIsNil) - - err = s.machine1.SetRebootFlag(true) - c.Assert(err, jc.ErrorIsNil) - - args := params.Entities{Entities: []params.Entity{ - {Tag: s.machine0.Tag().String()}, - {Tag: s.machine1.Tag().String()}, - }} - - rFlag, err := s.machine1.GetRebootFlag() - c.Assert(err, jc.ErrorIsNil) - c.Assert(rFlag, jc.IsTrue) - - result, err := api.ClearReboot(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - }, - }) - - rFlag, err = s.machine1.GetRebootFlag() - c.Assert(err, jc.ErrorIsNil) - c.Assert(rFlag, jc.IsFalse) -} - -func (s *agentSuiteV1) TestAgentFailsWithNonAgent(c *gc.C) { - s.testAgentFailsWithNonAgentV0(c, factoryWrapperV1) -} - -func (s *agentSuiteV1) TestAgentSucceedsWithUnitAgent(c *gc.C) { - s.testAgentSucceedsWithUnitAgentV0(c, factoryWrapperV1) -} - -func (s *agentSuiteV1) TestGetEntities(c *gc.C) { - s.testGetEntitiesV0(c, s.newAPI(c)) -} - -func (s *agentSuiteV1) TestGetEntitiesContainer(c *gc.C) { - auth := s.authorizer - auth.Tag = s.container.Tag() - api, err := agent.NewAgentAPIV1(s.State, s.resources, auth) - c.Assert(err, jc.ErrorIsNil) - s.testGetEntitiesContainerV0(c, api) -} - -func (s *agentSuiteV1) TestGetEntitiesNotFound(c *gc.C) { - s.testGetEntitiesNotFoundV0(c, s.newAPI(c)) -} - -func (s *agentSuiteV1) TestSetPasswords(c *gc.C) { - s.testSetPasswordsV0(c, s.newAPI(c)) -} - -func (s *agentSuiteV1) TestSetPasswordsShort(c *gc.C) { - s.testSetPasswordsShortV0(c, s.newAPI(c)) -} - -func (s *agentSuiteV1) newAPI(c *gc.C) *agent.AgentAPIV1 { - api, err := agent.NewAgentAPIV1(s.State, s.resources, s.authorizer) - c.Assert(err, jc.ErrorIsNil) - return api -} === added file 'src/github.com/juju/juju/apiserver/agent/model_test.go' --- src/github.com/juju/juju/apiserver/agent/model_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/agent/model_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,52 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agent_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/agent" + "github.com/juju/juju/apiserver/common" + commontesting "github.com/juju/juju/apiserver/common/testing" + apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/juju/testing" + "github.com/juju/juju/state" +) + +type modelSuite struct { + testing.JujuConnSuite + *commontesting.ModelWatcherTest + + authorizer apiservertesting.FakeAuthorizer + resources *common.Resources + + machine0 *state.Machine + api *agent.AgentAPIV2 +} + +var _ = gc.Suite(&modelSuite{}) + +func (s *modelSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + + var err error + s.machine0, err = s.State.AddMachine("quantal", state.JobHostUnits, state.JobManageModel) + c.Assert(err, jc.ErrorIsNil) + + s.authorizer = apiservertesting.FakeAuthorizer{ + Tag: s.machine0.Tag(), + } + s.resources = common.NewResources() + s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) + + s.api, err = agent.NewAgentAPIV2( + s.State, + s.resources, + s.authorizer, + ) + c.Assert(err, jc.ErrorIsNil) + s.ModelWatcherTest = commontesting.NewModelWatcherTest( + s.api, s.State, s.resources, commontesting.NoSecrets) +} === added directory 'src/github.com/juju/juju/apiserver/agenttools' === added file 'src/github.com/juju/juju/apiserver/agenttools/agenttools.go' --- src/github.com/juju/juju/apiserver/agenttools/agenttools.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/agenttools/agenttools.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,127 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agenttools + +import ( + "github.com/juju/errors" + "github.com/juju/loggo" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/environs/tools" + "github.com/juju/juju/state" + coretools "github.com/juju/juju/tools" + "github.com/juju/juju/version" +) + +func init() { + common.RegisterStandardFacade("AgentTools", 1, NewAgentToolsAPI) +} + +var logger = loggo.GetLogger("juju.apiserver.model") + +var ( + findTools = tools.FindTools +) + +// AgentToolsAPI implements the API used by the machine model worker. +type AgentToolsAPI struct { + st EnvironGetter + authorizer common.Authorizer + // tools lookup + findTools toolsFinder + envVersionUpdate envVersionUpdater +} + +// NewAgentToolsAPI creates a new instance of the Model API. +func NewAgentToolsAPI(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*AgentToolsAPI, error) { + return &AgentToolsAPI{ + st: st, + authorizer: authorizer, + findTools: findTools, + envVersionUpdate: envVersionUpdate, + }, nil +} + +// EnvironGetter represents a struct that can provide a state.Environment. +type EnvironGetter interface { + Model() (*state.Model, error) +} + +type toolsFinder func(environs.Environ, int, int, string, coretools.Filter) (coretools.List, error) +type envVersionUpdater func(*state.Model, version.Number) error + +var newEnvirons = environs.New + +func checkToolsAvailability(cfg *config.Config, finder toolsFinder) (version.Number, error) { + currentVersion, ok := cfg.AgentVersion() + if !ok || currentVersion == version.Zero { + return version.Zero, nil + } + + env, err := newEnvirons(cfg) + if err != nil { + return version.Zero, errors.Annotatef(err, "cannot make model") + } + + // finder receives major and minor as parameters as it uses them to filter versions and + // only return patches for the passed major.minor (from major.minor.patch). + // We'll try the released stream first, then fall back to the current configured stream + // if no released tools are found. + vers, err := finder(env, currentVersion.Major, currentVersion.Minor, tools.ReleasedStream, coretools.Filter{}) + preferredStream := tools.PreferredStream(¤tVersion, cfg.Development(), cfg.AgentStream()) + if preferredStream != tools.ReleasedStream && errors.Cause(err) == coretools.ErrNoMatches { + vers, err = finder(env, currentVersion.Major, currentVersion.Minor, preferredStream, coretools.Filter{}) + } + if err != nil { + return version.Zero, errors.Annotatef(err, "cannot find available tools") + } + // Newest also returns a list of the items in this list matching with the + // newest version. + newest, _ := vers.Newest() + return newest, nil +} + +var envConfig = func(e *state.Model) (*config.Config, error) { + return e.Config() +} + +// Base implementation of envVersionUpdater +func envVersionUpdate(env *state.Model, ver version.Number) error { + return env.UpdateLatestToolsVersion(ver) +} + +func updateToolsAvailability(st EnvironGetter, finder toolsFinder, update envVersionUpdater) error { + env, err := st.Model() + if err != nil { + return errors.Annotate(err, "cannot get model") + } + cfg, err := envConfig(env) + if err != nil { + return errors.Annotate(err, "cannot get config") + } + ver, err := checkToolsAvailability(cfg, finder) + if err != nil { + if errors.IsNotFound(err) { + // No newer tools, so exit silently. + return nil + } + return errors.Annotate(err, "cannot get latest version") + } + if ver == version.Zero { + logger.Debugf("tools lookup returned version Zero, this should only happen during bootstrap.") + return nil + } + return update(env, ver) +} + +// UpdateToolsAvailable invokes a lookup and further update in environ +// for new patches of the current tool versions. +func (api *AgentToolsAPI) UpdateToolsAvailable() error { + if !api.authorizer.AuthModelManager() { + return common.ErrPerm + } + return updateToolsAvailability(api.st, api.findTools, api.envVersionUpdate) +} === added file 'src/github.com/juju/juju/apiserver/agenttools/agenttools_test.go' --- src/github.com/juju/juju/apiserver/agenttools/agenttools_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/agenttools/agenttools_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,167 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agenttools + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/state" + coretesting "github.com/juju/juju/testing" + coretools "github.com/juju/juju/tools" + "github.com/juju/juju/version" +) + +var _ = gc.Suite(&AgentToolsSuite{}) + +type AgentToolsSuite struct { + coretesting.BaseSuite +} + +type dummyEnviron struct { + environs.Environ +} + +func (s *AgentToolsSuite) TestCheckTools(c *gc.C) { + sConfig := coretesting.FakeConfig() + sConfig = sConfig.Merge(coretesting.Attrs{ + "agent-version": "2.5.0", + }) + cfg, err := config.New(config.NoDefaults, sConfig) + c.Assert(err, jc.ErrorIsNil) + fakeNewEnvirons := func(*config.Config) (environs.Environ, error) { + return dummyEnviron{}, nil + } + s.PatchValue(&newEnvirons, fakeNewEnvirons) + var ( + calledWithMajor, calledWithMinor int + ) + fakeToolFinder := func(e environs.Environ, maj int, min int, stream string, filter coretools.Filter) (coretools.List, error) { + calledWithMajor = maj + calledWithMinor = min + ver := version.Binary{Number: version.Number{Major: maj, Minor: min}} + t := coretools.Tools{Version: ver, URL: "http://example.com", Size: 1} + c.Assert(calledWithMajor, gc.Equals, 2) + c.Assert(calledWithMinor, gc.Equals, 5) + c.Assert(stream, gc.Equals, "released") + return coretools.List{&t}, nil + } + + ver, err := checkToolsAvailability(cfg, fakeToolFinder) + c.Assert(err, jc.ErrorIsNil) + c.Assert(ver, gc.Not(gc.Equals), version.Zero) + c.Assert(ver, gc.Equals, version.Number{Major: 2, Minor: 5, Patch: 0}) +} + +func (s *AgentToolsSuite) TestCheckToolsNonReleasedStream(c *gc.C) { + sConfig := coretesting.FakeConfig() + sConfig = sConfig.Merge(coretesting.Attrs{ + "agent-version": "2.5-alpha1", + "agent-stream": "proposed", + }) + cfg, err := config.New(config.NoDefaults, sConfig) + c.Assert(err, jc.ErrorIsNil) + fakeNewEnvirons := func(*config.Config) (environs.Environ, error) { + return dummyEnviron{}, nil + } + s.PatchValue(&newEnvirons, fakeNewEnvirons) + var ( + calledWithMajor, calledWithMinor int + calledWithStreams []string + ) + fakeToolFinder := func(e environs.Environ, maj int, min int, stream string, filter coretools.Filter) (coretools.List, error) { + calledWithMajor = maj + calledWithMinor = min + calledWithStreams = append(calledWithStreams, stream) + if stream == "released" { + return nil, coretools.ErrNoMatches + } + ver := version.Binary{Number: version.Number{Major: maj, Minor: min}} + t := coretools.Tools{Version: ver, URL: "http://example.com", Size: 1} + c.Assert(calledWithMajor, gc.Equals, 2) + c.Assert(calledWithMinor, gc.Equals, 5) + return coretools.List{&t}, nil + } + ver, err := checkToolsAvailability(cfg, fakeToolFinder) + c.Assert(err, jc.ErrorIsNil) + c.Assert(calledWithStreams, gc.DeepEquals, []string{"released", "proposed"}) + c.Assert(ver, gc.Not(gc.Equals), version.Zero) + c.Assert(ver, gc.Equals, version.Number{Major: 2, Minor: 5, Patch: 0}) +} + +type modelGetter struct { +} + +func (e *modelGetter) Model() (*state.Model, error) { + return &state.Model{}, nil +} + +func (s *AgentToolsSuite) TestUpdateToolsAvailability(c *gc.C) { + fakeNewEnvirons := func(*config.Config) (environs.Environ, error) { + return dummyEnviron{}, nil + } + s.PatchValue(&newEnvirons, fakeNewEnvirons) + + fakeEnvConfig := func(_ *state.Model) (*config.Config, error) { + sConfig := coretesting.FakeConfig() + sConfig = sConfig.Merge(coretesting.Attrs{ + "agent-version": "2.5.0", + }) + return config.New(config.NoDefaults, sConfig) + } + s.PatchValue(&envConfig, fakeEnvConfig) + + fakeToolFinder := func(_ environs.Environ, _ int, _ int, _ string, _ coretools.Filter) (coretools.List, error) { + ver := version.Binary{Number: version.Number{Major: 2, Minor: 5, Patch: 2}} + olderVer := version.Binary{Number: version.Number{Major: 2, Minor: 5, Patch: 1}} + t := coretools.Tools{Version: ver, URL: "http://example.com", Size: 1} + tOld := coretools.Tools{Version: olderVer, URL: "http://example.com", Size: 1} + return coretools.List{&t, &tOld}, nil + } + + var ver version.Number + fakeUpdate := func(_ *state.Model, v version.Number) error { + ver = v + return nil + } + + err := updateToolsAvailability(&modelGetter{}, fakeToolFinder, fakeUpdate) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(ver, gc.Not(gc.Equals), version.Zero) + c.Assert(ver, gc.Equals, version.Number{Major: 2, Minor: 5, Patch: 2}) +} + +func (s *AgentToolsSuite) TestUpdateToolsAvailabilityNoMatches(c *gc.C) { + fakeNewEnvirons := func(*config.Config) (environs.Environ, error) { + return dummyEnviron{}, nil + } + s.PatchValue(&newEnvirons, fakeNewEnvirons) + + fakeEnvConfig := func(_ *state.Model) (*config.Config, error) { + sConfig := coretesting.FakeConfig() + sConfig = sConfig.Merge(coretesting.Attrs{ + "agent-version": "2.5.0", + }) + return config.New(config.NoDefaults, sConfig) + } + s.PatchValue(&envConfig, fakeEnvConfig) + + // No new tools available. + fakeToolFinder := func(_ environs.Environ, _ int, _ int, _ string, _ coretools.Filter) (coretools.List, error) { + return nil, errors.NotFoundf("tools") + } + + // Update should never be called. + fakeUpdate := func(_ *state.Model, v version.Number) error { + c.Fail() + return nil + } + + err := updateToolsAvailability(&modelGetter{}, fakeToolFinder, fakeUpdate) + c.Assert(err, jc.ErrorIsNil) +} === added file 'src/github.com/juju/juju/apiserver/agenttools/package_test.go' --- src/github.com/juju/juju/apiserver/agenttools/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/agenttools/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agenttools_test + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + testing.MgoTestPackage(t) +} === modified file 'src/github.com/juju/juju/apiserver/allfacades.go' --- src/github.com/juju/juju/apiserver/allfacades.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/allfacades.go 2016-03-22 15:18:22 +0000 @@ -10,6 +10,7 @@ _ "github.com/juju/juju/apiserver/action" _ "github.com/juju/juju/apiserver/addresser" _ "github.com/juju/juju/apiserver/agent" + _ "github.com/juju/juju/apiserver/agenttools" _ "github.com/juju/juju/apiserver/annotations" _ "github.com/juju/juju/apiserver/backups" _ "github.com/juju/juju/apiserver/block" @@ -17,11 +18,12 @@ _ "github.com/juju/juju/apiserver/charms" _ "github.com/juju/juju/apiserver/cleaner" _ "github.com/juju/juju/apiserver/client" + _ "github.com/juju/juju/apiserver/controller" _ "github.com/juju/juju/apiserver/deployer" + _ "github.com/juju/juju/apiserver/discoverspaces" _ "github.com/juju/juju/apiserver/diskmanager" - _ "github.com/juju/juju/apiserver/environment" - _ "github.com/juju/juju/apiserver/environmentmanager" _ "github.com/juju/juju/apiserver/firewaller" + _ "github.com/juju/juju/apiserver/highavailability" _ "github.com/juju/juju/apiserver/imagemanager" _ "github.com/juju/juju/apiserver/imagemetadata" _ "github.com/juju/juju/apiserver/instancepoller" @@ -30,18 +32,24 @@ _ "github.com/juju/juju/apiserver/logger" _ "github.com/juju/juju/apiserver/machine" _ "github.com/juju/juju/apiserver/machinemanager" + _ "github.com/juju/juju/apiserver/meterstatus" + _ "github.com/juju/juju/apiserver/metricsadder" + _ "github.com/juju/juju/apiserver/metricsdebug" _ "github.com/juju/juju/apiserver/metricsmanager" - _ "github.com/juju/juju/apiserver/networker" + _ "github.com/juju/juju/apiserver/modelmanager" _ "github.com/juju/juju/apiserver/provisioner" + _ "github.com/juju/juju/apiserver/proxyupdater" _ "github.com/juju/juju/apiserver/reboot" _ "github.com/juju/juju/apiserver/resumer" - _ "github.com/juju/juju/apiserver/rsyslog" + _ "github.com/juju/juju/apiserver/retrystrategy" _ "github.com/juju/juju/apiserver/service" _ "github.com/juju/juju/apiserver/spaces" + _ "github.com/juju/juju/apiserver/statushistory" _ "github.com/juju/juju/apiserver/storage" _ "github.com/juju/juju/apiserver/storageprovisioner" _ "github.com/juju/juju/apiserver/subnets" - _ "github.com/juju/juju/apiserver/systemmanager" + _ "github.com/juju/juju/apiserver/undertaker" + _ "github.com/juju/juju/apiserver/unitassigner" _ "github.com/juju/juju/apiserver/uniter" _ "github.com/juju/juju/apiserver/upgrader" _ "github.com/juju/juju/apiserver/usermanager" === modified file 'src/github.com/juju/juju/apiserver/annotations/client.go' --- src/github.com/juju/juju/apiserver/annotations/client.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/annotations/client.go 2016-03-22 15:18:22 +0000 @@ -13,7 +13,7 @@ ) func init() { - common.RegisterStandardFacade("Annotations", 1, NewAPI) + common.RegisterStandardFacade("Annotations", 2, NewAPI) } var getState = func(st *state.State) annotationAccess { === modified file 'src/github.com/juju/juju/apiserver/annotations/client_test.go' --- src/github.com/juju/juju/apiserver/annotations/client_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/annotations/client_test.go 2016-03-22 15:18:22 +0000 @@ -39,7 +39,7 @@ } func (s *annotationSuite) TestEnvironmentAnnotations(c *gc.C) { - env, err := s.State.Environment() + env, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) s.testSetGetEntitiesAnnotations(c, env.Tag()) } === modified file 'src/github.com/juju/juju/apiserver/apiserver.go' --- src/github.com/juju/juju/apiserver/apiserver.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/apiserver.go 2016-03-22 15:18:22 +0000 @@ -23,7 +23,6 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/feature" "github.com/juju/juju/rpc" "github.com/juju/juju/rpc/jsoncodec" "github.com/juju/juju/state" @@ -41,16 +40,17 @@ wg sync.WaitGroup state *state.State statePool *state.StatePool - addr *net.TCPAddr + lis net.Listener tag names.Tag dataDir string logDir string limiter utils.Limiter validator LoginValidator adminApiFactories map[int]adminApiFactory - - mu sync.Mutex // protects the fields that follow - environUUID string + mongoUnavailable uint32 // non zero if mongoUnavailable + modelUUID string + authCtxt *authContext + connections int32 // count of active websocket connections } // LoginValidator functions are used to decide whether login requests @@ -83,10 +83,10 @@ certChanged <-chan params.StateServingInfo // The config to update with any new certificate. - config tls.Config + config *tls.Config } -func newChangeCertListener(lis net.Listener, certChanged <-chan params.StateServingInfo, config tls.Config) *changeCertListener { +func newChangeCertListener(lis net.Listener, certChanged <-chan params.StateServingInfo, config *tls.Config) *changeCertListener { cl := &changeCertListener{ Listener: lis, certChanged: certChanged, @@ -108,7 +108,7 @@ cl.m.Lock() defer cl.m.Unlock() config := cl.config - return tls.Server(conn, &config), nil + return tls.Server(conn, config), nil } // Close closes the listener. @@ -156,43 +156,53 @@ // NewServer serves the given state by accepting requests on the given // listener, using the given certificate and key (in PEM format) for // authentication. +// +// The Server will close the listener when it exits, even if returns an error. func NewServer(s *state.State, lis net.Listener, cfg ServerConfig) (*Server, error) { + // Important note: + // Do not manipulate the state within NewServer as the API + // server needs to run before mongo upgrades have happened and + // any state manipulation may be be relying on features of the + // database added by upgrades. Here be dragons. l, ok := lis.(*net.TCPListener) if !ok { return nil, errors.Errorf("listener is not of type *net.TCPListener: %T", lis) } - return newServer(s, l, cfg) + srv, err := newServer(s, l, cfg) + if err != nil { + // There is no running server around to close the listener. + lis.Close() + return nil, errors.Trace(err) + } + return srv, nil } -func newServer(s *state.State, lis *net.TCPListener, cfg ServerConfig) (*Server, error) { - logger.Infof("listening on %q", lis.Addr()) +func newServer(s *state.State, lis *net.TCPListener, cfg ServerConfig) (_ *Server, err error) { + tlsCert, err := tls.X509KeyPair(cfg.Cert, cfg.Key) + if err != nil { + return nil, err + } + // TODO(rog) check that *srvRoot is a valid type for using + // as an RPC server. + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{tlsCert}, + MinVersion: tls.VersionTLS10, + } srv := &Server{ state: s, statePool: state.NewStatePool(s), - addr: lis.Addr().(*net.TCPAddr), // cannot fail + lis: newChangeCertListener(lis, cfg.CertChanged, tlsConfig), tag: cfg.Tag, dataDir: cfg.DataDir, logDir: cfg.LogDir, limiter: utils.NewLimiter(loginRateLimit), validator: cfg.Validator, adminApiFactories: map[int]adminApiFactory{ - 0: newAdminApiV0, - 1: newAdminApiV1, - 2: newAdminApiV2, + 3: newAdminApiV3, }, } - tlsCert, err := tls.X509KeyPair(cfg.Cert, cfg.Key) - if err != nil { - return nil, err - } - // TODO(rog) check that *srvRoot is a valid type for using - // as an RPC server. - tlsConfig := tls.Config{ - Certificates: []tls.Certificate{tlsCert}, - MinVersion: tls.VersionTLS10, - } - changeCertListener := newChangeCertListener(lis, cfg.CertChanged, tlsConfig) - go srv.run(changeCertListener) + srv.authCtxt = newAuthContext(srv) + go srv.run() return srv, nil } @@ -224,15 +234,20 @@ mu sync.Mutex tag_ string + + // count is incremented by calls to join, and deincremented + // by calls to leave. + count *int32 } var globalCounter int64 -func newRequestNotifier() *requestNotifier { +func newRequestNotifier(count *int32) *requestNotifier { return &requestNotifier{ id: atomic.AddInt64(&globalCounter, 1), tag_: "", start: time.Now(), + count: count, } } @@ -278,11 +293,13 @@ } func (n *requestNotifier) join(req *http.Request) { - logger.Infof("[%X] API connection from %s", n.id, req.RemoteAddr) + active := atomic.AddInt32(n.count, 1) + logger.Infof("[%X] API connection from %s, active connections: %d", n.id, req.RemoteAddr, active) } func (n *requestNotifier) leave() { - logger.Infof("[%X] %s API connection terminated after %v", n.id, n.tag(), time.Since(n.start)) + active := atomic.AddInt32(n.count, -1) + logger.Infof("[%X] %s API connection terminated after %v, active connections: %d", n.id, n.tag(), time.Since(n.start), active) } func (n *requestNotifier) ClientRequest(hdr *rpc.Header, body interface{}) { @@ -291,6 +308,31 @@ func (n *requestNotifier) ClientReply(req rpc.Request, hdr *rpc.Header, body interface{}) { } +// trackRequests wraps a http.Handler, incrementing and decrementing +// the apiserver's WaitGroup and blocking request when the apiserver +// is shutting down. +// +// Note: It is only safe to use trackRequests with API handlers which +// are interruptible (i.e. they pay attention to the apiserver tomb) +// or are guaranteed to be short-lived. If it's used with long running +// API handlers which don't watch the apiserver's tomb, apiserver +// shutdown will be blocked until the API handler returns. +func (srv *Server) trackRequests(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + srv.wg.Add(1) + defer srv.wg.Done() + // If we've got to this stage and the tomb is still + // alive, we know that any tomb.Kill must occur after we + // have called wg.Add, so we avoid the possibility of a + // handler goroutine running after Stop has returned. + if srv.tomb.Err() != tomb.ErrStillAlive { + return + } + + handler.ServeHTTP(w, r) + }) +} + func handleAll(mux *pat.PatternServeMux, pattern string, handler http.Handler) { mux.Get(pattern, handler) mux.Post(pattern, handler) @@ -300,8 +342,14 @@ mux.Options(pattern, handler) } -func (srv *Server) run(lis net.Listener) { +func (srv *Server) run() { + logger.Infof("listening on %q", srv.lis.Addr()) + defer func() { + addr := srv.lis.Addr().String() // Addr not valid after close + err := srv.lis.Close() + logger.Infof("closed listening socket %q with final error: %v", addr, err) + srv.state.HackLeadership() // Break deadlocks caused by BlockUntil... calls. srv.wg.Wait() // wait for any outstanding requests to complete. srv.tomb.Done() @@ -312,6 +360,11 @@ srv.wg.Add(1) go func() { err := srv.mongoPinger() + // Before killing the tomb, inform the API handlers that + // Mongo is unavailable. API handlers can use this to decide + // not to perform non-critical Mongo-related operations when + // tearing down. + atomic.AddUint32(&srv.mongoUnavailable, 1) srv.tomb.Kill(err) srv.wg.Done() }() @@ -321,104 +374,106 @@ // registered first. mux := pat.New() - srvDying := srv.tomb.Dying() - - if feature.IsDbLogEnabled() { - handleAll(mux, "/environment/:envuuid/logsink", - newLogSinkHandler(httpHandler{statePool: srv.statePool}, srv.logDir)) - handleAll(mux, "/environment/:envuuid/log", - newDebugLogDBHandler(srv.statePool, srvDying)) - } else { - handleAll(mux, "/environment/:envuuid/log", - newDebugLogFileHandler(srv.statePool, srvDying, srv.logDir)) + httpCtxt := httpContext{ + srv: srv, } - handleAll(mux, "/environment/:envuuid/charms", + + mainAPIHandler := srv.trackRequests(http.HandlerFunc(srv.apiHandler)) + logSinkHandler := srv.trackRequests(newLogSinkHandler(httpCtxt, srv.logDir)) + debugLogHandler := srv.trackRequests(newDebugLogDBHandler(httpCtxt)) + + handleAll(mux, "/model/:modeluuid/services/:service/resources/:resource", + newResourceHandler(httpCtxt), + ) + handleAll(mux, "/model/:modeluuid/units/:unit/resources/:resource", + newUnitResourceHandler(httpCtxt), + ) + handleAll(mux, "/model/:modeluuid/logsink", logSinkHandler) + handleAll(mux, "/model/:modeluuid/log", debugLogHandler) + handleAll(mux, "/model/:modeluuid/charms", &charmsHandler{ - httpHandler: httpHandler{statePool: srv.statePool}, - dataDir: srv.dataDir}, + ctxt: httpCtxt, + dataDir: srv.dataDir}, ) // TODO: We can switch from handleAll to mux.Post/Get/etc for entries // where we only want to support specific request methods. However, our // tests currently assert that errors come back as application/json and // pat only does "text/plain" responses. - handleAll(mux, "/environment/:envuuid/tools", - &toolsUploadHandler{toolsHandler{ - httpHandler{statePool: srv.statePool}, - }}, - ) - handleAll(mux, "/environment/:envuuid/tools/:version", - &toolsDownloadHandler{toolsHandler{ - httpHandler{statePool: srv.statePool}, - }}, - ) - handleAll(mux, "/environment/:envuuid/backups", - &backupHandler{httpHandler{ - statePool: srv.statePool, - strictValidation: true, - stateServerEnvOnly: true, - }}, - ) - handleAll(mux, "/environment/:envuuid/api", http.HandlerFunc(srv.apiHandler)) + handleAll(mux, "/model/:modeluuid/tools", + &toolsUploadHandler{ + ctxt: httpCtxt, + }, + ) + handleAll(mux, "/model/:modeluuid/tools/:version", + &toolsDownloadHandler{ + ctxt: httpCtxt, + }, + ) + strictCtxt := httpCtxt + strictCtxt.strictValidation = true + strictCtxt.controllerModelOnly = true + handleAll(mux, "/model/:modeluuid/backups", + &backupHandler{ + ctxt: strictCtxt, + }, + ) + handleAll(mux, "/model/:modeluuid/api", mainAPIHandler) - handleAll(mux, "/environment/:envuuid/images/:kind/:series/:arch/:filename", + handleAll(mux, "/model/:modeluuid/images/:kind/:series/:arch/:filename", &imagesDownloadHandler{ - httpHandler: httpHandler{statePool: srv.statePool}, - dataDir: srv.dataDir, - state: srv.state}, + ctxt: httpCtxt, + dataDir: srv.dataDir, + state: srv.state, + }, ) // For backwards compatibility we register all the old paths - - if feature.IsDbLogEnabled() { - handleAll(mux, "/log", newDebugLogDBHandler(srv.statePool, srvDying)) - } else { - handleAll(mux, "/log", newDebugLogFileHandler(srv.statePool, srvDying, srv.logDir)) - } + handleAll(mux, "/log", debugLogHandler) handleAll(mux, "/charms", &charmsHandler{ - httpHandler: httpHandler{statePool: srv.statePool}, - dataDir: srv.dataDir, + ctxt: httpCtxt, + dataDir: srv.dataDir, }, ) handleAll(mux, "/tools", - &toolsUploadHandler{toolsHandler{ - httpHandler{statePool: srv.statePool}, - }}, + &toolsUploadHandler{ + ctxt: httpCtxt, + }, ) handleAll(mux, "/tools/:version", - &toolsDownloadHandler{toolsHandler{ - httpHandler{statePool: srv.statePool}, - }}, - ) - handleAll(mux, "/", http.HandlerFunc(srv.apiHandler)) + &toolsDownloadHandler{ + ctxt: httpCtxt, + }, + ) + handleAll(mux, "/register", + ®isterUserHandler{ + ctxt: httpCtxt, + }, + ) + handleAll(mux, "/", mainAPIHandler) go func() { - // The error from http.Serve is not interesting. - http.Serve(lis, mux) + addr := srv.lis.Addr() // not valid after addr closed + logger.Debugf("Starting API http server on address %q", addr) + err := http.Serve(srv.lis, mux) + // normally logging an error at debug level would be grounds for a beating, + // however in this case the error is *expected* to be non nil, and does not + // affect the operation of the apiserver, but for completeness log it anyway. + logger.Debugf("API http server exited, final error was: %v", err) }() <-srv.tomb.Dying() - lis.Close() } func (srv *Server) apiHandler(w http.ResponseWriter, req *http.Request) { - reqNotifier := newRequestNotifier() + reqNotifier := newRequestNotifier(&srv.connections) reqNotifier.join(req) defer reqNotifier.leave() wsServer := websocket.Server{ Handler: func(conn *websocket.Conn) { - srv.wg.Add(1) - defer srv.wg.Done() - // If we've got to this stage and the tomb is still - // alive, we know that any tomb.Kill must occur after we - // have called wg.Add, so we avoid the possibility of a - // handler goroutine running after Stop has returned. - if srv.tomb.Err() != tomb.ErrStillAlive { - return - } - envUUID := req.URL.Query().Get(":envuuid") - logger.Tracef("got a request for env %q", envUUID) - if err := srv.serveConn(conn, reqNotifier, envUUID); err != nil { + modelUUID := req.URL.Query().Get(":modeluuid") + logger.Tracef("got a request for model %q", modelUUID) + if err := srv.serveConn(conn, reqNotifier, modelUUID); err != nil { logger.Errorf("error serving RPCs: %v", err) } }, @@ -426,12 +481,7 @@ wsServer.ServeHTTP(w, req) } -// Addr returns the address that the server is listening on. -func (srv *Server) Addr() *net.TCPAddr { - return srv.addr -} - -func (srv *Server) serveConn(wsConn *websocket.Conn, reqNotifier *requestNotifier, envUUID string) error { +func (srv *Server) serveConn(wsConn *websocket.Conn, reqNotifier *requestNotifier, modelUUID string) error { codec := jsoncodec.NewWebsocket(wsConn) if loggo.GetLogger("juju.rpc.jsoncodec").EffectiveLogLevel() <= loggo.TRACE { codec.SetLogging(true) @@ -444,13 +494,9 @@ } conn := rpc.NewConn(codec, notifier) - var h *apiHandler - st, err := validateEnvironUUID(validateArgs{statePool: srv.statePool, envUUID: envUUID}) - if err == nil { - h, err = newApiHandler(srv, st, conn, reqNotifier, envUUID) - } + h, err := srv.newAPIHandler(conn, reqNotifier, modelUUID) if err != nil { - conn.Serve(&errRoot{err}, serverError) + conn.ServeFinder(&errRoot{err}, serverError) } else { adminApis := make(map[int]interface{}) for apiVersion, factory := range srv.adminApiFactories { @@ -466,9 +512,28 @@ return conn.Close() } +func (srv *Server) newAPIHandler(conn *rpc.Conn, reqNotifier *requestNotifier, modelUUID string) (*apiHandler, error) { + // Note that we don't overwrite modelUUID here because + // newAPIHandler treats an empty modelUUID as signifying + // the API version used. + resolvedModelUUID, err := validateModelUUID(validateArgs{ + statePool: srv.statePool, + modelUUID: modelUUID, + }) + if err != nil { + return nil, errors.Trace(err) + } + st, err := srv.statePool.Get(resolvedModelUUID) + if err != nil { + return nil, errors.Trace(err) + } + return newApiHandler(srv, st, conn, reqNotifier, modelUUID) +} + func (srv *Server) mongoPinger() error { timer := time.NewTimer(0) - session := srv.state.MongoSession() + session := srv.state.MongoSession().Copy() + defer session.Close() for { select { case <-timer.C: @@ -489,5 +554,3 @@ } return nil } - -var logRequests = true === added file 'src/github.com/juju/juju/apiserver/authcontext.go' --- src/github.com/juju/juju/apiserver/authcontext.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/authcontext.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,131 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver + +import ( + "net/http" + "sync" + + "github.com/juju/errors" + "github.com/juju/names" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + "github.com/juju/juju/apiserver/authentication" + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" +) + +// authContext holds authentication context shared +// between all API endpoints. +type authContext struct { + srv *Server + + agentAuth authentication.AgentAuthenticator + userAuth authentication.UserAuthenticator + + // macaroonAuthOnce guards the fields below it. + macaroonAuthOnce sync.Once + _macaroonAuth *authentication.MacaroonAuthenticator + _macaroonAuthError error +} + +// newAuthContext creates a new authentication context for srv. +func newAuthContext(srv *Server) *authContext { + return &authContext{ + srv: srv, + } +} + +// Authenticate implements authentication.EntityAuthenticator +// by choosing the right kind of authentication for the given +// tag. +func (ctxt *authContext) Authenticate(entityFinder authentication.EntityFinder, tag names.Tag, req params.LoginRequest) (state.Entity, error) { + auth, err := ctxt.authenticatorForTag(tag) + if err != nil { + return nil, errors.Trace(err) + } + return auth.Authenticate(entityFinder, tag, req) +} + +// authenticatorForTag returns the authenticator appropriate +// to use for a login with the given possibly-nil tag. +func (ctxt *authContext) authenticatorForTag(tag names.Tag) (authentication.EntityAuthenticator, error) { + if tag == nil { + auth, err := ctxt.macaroonAuth() + if errors.Cause(err) == errMacaroonAuthNotConfigured { + // Make a friendlier error message. + err = errors.New("no credentials provided") + } + if err != nil { + return nil, errors.Trace(err) + } + return auth, nil + } + switch tag.Kind() { + case names.UnitTagKind, names.MachineTagKind: + return &ctxt.agentAuth, nil + case names.UserTagKind: + return &ctxt.userAuth, nil + default: + return nil, errors.Annotatef(common.ErrBadRequest, "unexpected login entity tag") + } +} + +// macaroonAuth returns an authenticator that can authenticate macaroon-based +// logins. If it fails once, it will always fail. +func (ctxt *authContext) macaroonAuth() (authentication.EntityAuthenticator, error) { + ctxt.macaroonAuthOnce.Do(func() { + ctxt._macaroonAuth, ctxt._macaroonAuthError = newMacaroonAuth(ctxt.srv.statePool.SystemState()) + }) + if ctxt._macaroonAuth == nil { + return nil, errors.Trace(ctxt._macaroonAuthError) + } + return ctxt._macaroonAuth, nil +} + +var errMacaroonAuthNotConfigured = errors.New("macaroon authentication is not configured") + +// newMacaroonAuth returns an authenticator that can authenticate +// macaroon-based logins. This is just a helper function for authCtxt.macaroonAuth. +func newMacaroonAuth(st *state.State) (*authentication.MacaroonAuthenticator, error) { + envCfg, err := st.ModelConfig() + if err != nil { + return nil, errors.Annotate(err, "cannot get model config") + } + idURL := envCfg.IdentityURL() + if idURL == "" { + return nil, errMacaroonAuthNotConfigured + } + // The identity server has been configured, + // so configure the bakery service appropriately. + idPK := envCfg.IdentityPublicKey() + if idPK == nil { + // No public key supplied - retrieve it from the identity manager. + idPK, err = httpbakery.PublicKeyForLocation(http.DefaultClient, idURL) + if err != nil { + return nil, errors.Annotate(err, "cannot get identity public key") + } + } + svc, err := bakery.NewService( + bakery.NewServiceParams{ + Location: "juju model " + st.ModelUUID(), + Locator: bakery.PublicKeyLocatorMap{ + idURL: idPK, + }, + }, + ) + if err != nil { + return nil, errors.Annotate(err, "cannot make bakery service") + } + var auth authentication.MacaroonAuthenticator + auth.Service = svc + auth.Macaroon, err = svc.NewMacaroon("api-login", nil, nil) + if err != nil { + return nil, errors.Annotate(err, "cannot make macaroon") + } + auth.IdentityLocation = idURL + return &auth, nil +} === modified file 'src/github.com/juju/juju/apiserver/authentication/agent.go' --- src/github.com/juju/juju/apiserver/authentication/agent.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/authentication/agent.go 2016-03-22 15:18:22 +0000 @@ -5,8 +5,10 @@ import ( "github.com/juju/errors" + "github.com/juju/names" "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" ) @@ -20,24 +22,32 @@ state.Authenticator } -// Authenticate authenticates the provided entity and returns an error on authentication failure. -func (*AgentAuthenticator) Authenticate(entity state.Entity, password, nonce string) error { +// Authenticate authenticates the provided entity. +// It takes an entityfinder and the tag used to find the entity that requires authentication. +func (*AgentAuthenticator) Authenticate(entityFinder EntityFinder, tag names.Tag, req params.LoginRequest) (state.Entity, error) { + entity, err := entityFinder.FindEntity(tag) + if errors.IsNotFound(err) { + return nil, errors.Trace(common.ErrBadCreds) + } + if err != nil { + return nil, errors.Trace(err) + } authenticator, ok := entity.(taggedAuthenticator) if !ok { - return common.ErrBadRequest + return nil, errors.Trace(common.ErrBadRequest) } - if !authenticator.PasswordValid(password) { - return common.ErrBadCreds + if !authenticator.PasswordValid(req.Credentials) { + return nil, errors.Trace(common.ErrBadCreds) } // If this is a machine agent connecting, we need to check the // nonce matches, otherwise the wrong agent might be trying to // connect. if machine, ok := authenticator.(*state.Machine); ok { - if !machine.CheckProvisioned(nonce) { - return errors.NotProvisionedf("machine %v", machine.Id()) + if !machine.CheckProvisioned(req.Nonce) { + return nil, errors.NotProvisionedf("machine %v", machine.Id()) } } - return nil + return entity, nil } === modified file 'src/github.com/juju/juju/apiserver/authentication/agent_test.go' --- src/github.com/juju/juju/apiserver/authentication/agent_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/authentication/agent_test.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,7 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/authentication" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" "github.com/juju/juju/testing/factory" @@ -101,8 +102,12 @@ for i, t := range testCases { c.Logf("test %d: %s", i, t.about) var authenticator authentication.AgentAuthenticator - err := authenticator.Authenticate(t.entity, t.credentials, t.nonce) - c.Check(err, jc.ErrorIsNil) + entity, err := authenticator.Authenticate(s.State, t.entity.Tag(), params.LoginRequest{ + Credentials: t.credentials, + Nonce: t.nonce, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(entity.Tag(), gc.DeepEquals, t.entity.Tag()) } } @@ -133,7 +138,11 @@ for i, t := range testCases { c.Logf("test %d: %s", i, t.about) var authenticator authentication.AgentAuthenticator - err := authenticator.Authenticate(t.entity, t.credentials, t.nonce) - c.Check(err, gc.ErrorMatches, t.errorMessage) + entity, err := authenticator.Authenticate(s.State, t.entity.Tag(), params.LoginRequest{ + Credentials: t.credentials, + Nonce: t.nonce, + }) + c.Assert(err, gc.ErrorMatches, t.errorMessage) + c.Assert(entity, gc.IsNil) } } === removed file 'src/github.com/juju/juju/apiserver/authentication/authenticator.go' --- src/github.com/juju/juju/apiserver/authentication/authenticator.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/authentication/authenticator.go 1970-01-01 00:00:00 +0000 @@ -1,21 +0,0 @@ -// Copyright 2014 Canonical Ltd. All rights reserved. -// Licensed under the AGPLv3, see LICENCE file for details. - -package authentication - -import ( - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/state" -) - -// FindEntityAuthenticator looks up the authenticator for the entity identified tag. -func FindEntityAuthenticator(entity state.Entity) (EntityAuthenticator, error) { - switch entity.(type) { - case *state.Machine, *state.Unit: - return &AgentAuthenticator{}, nil - case *state.User: - return &UserAuthenticator{}, nil - } - - return nil, common.ErrBadRequest -} === removed file 'src/github.com/juju/juju/apiserver/authentication/authenticator_test.go' --- src/github.com/juju/juju/apiserver/authentication/authenticator_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/authentication/authenticator_test.go 1970-01-01 00:00:00 +0000 @@ -1,43 +0,0 @@ -// Copyright 2014 Canonical Ltd. All rights reserved. -// Licensed under the AGPLv3, see LICENCE file for details. - -package authentication_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/authentication" - "github.com/juju/juju/juju/testing" - "github.com/juju/juju/testing/factory" -) - -type AgentAuthenticatorSuite struct { - testing.JujuConnSuite -} - -func (s *AgentAuthenticatorSuite) TestFindEntityAuthenticatorFails(c *gc.C) { - // add relation - wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) - wordpressEP, err := wordpress.Endpoint("db") - c.Assert(err, jc.ErrorIsNil) - mysql := s.AddTestingService(c, "mysql", s.AddTestingCharm(c, "mysql")) - mysqlEP, err := mysql.Endpoint("server") - c.Assert(err, jc.ErrorIsNil) - relation, err := s.State.AddRelation(wordpressEP, mysqlEP) - c.Assert(err, jc.ErrorIsNil) - - _, err = authentication.FindEntityAuthenticator(relation) - c.Assert(err, gc.ErrorMatches, "invalid request") -} - -func (s *AgentAuthenticatorSuite) TestFindEntityAuthenticator(c *gc.C) { - fact := factory.NewFactory(s.State) - user := fact.MakeUser(c, &factory.UserParams{Password: "password"}) - authenticator, err := authentication.FindEntityAuthenticator(user) - c.Assert(err, jc.ErrorIsNil) - c.Assert(authenticator, gc.NotNil) - - err = authenticator.Authenticate(user, "password", "nonce") - c.Assert(err, jc.ErrorIsNil) -} === modified file 'src/github.com/juju/juju/apiserver/authentication/interfaces.go' --- src/github.com/juju/juju/apiserver/authentication/interfaces.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/authentication/interfaces.go 2016-03-22 15:18:22 +0000 @@ -4,6 +4,9 @@ package authentication import ( + "github.com/juju/names" + + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" ) @@ -11,5 +14,10 @@ // to authenticate juju entities. type EntityAuthenticator interface { // Authenticate authenticates the given entity - Authenticate(entity state.Entity, password, nonce string) error + Authenticate(entityFinder EntityFinder, tag names.Tag, req params.LoginRequest) (state.Entity, error) +} + +// EntityFinder finds the entity described by the tag. +type EntityFinder interface { + FindEntity(tag names.Tag) (state.Entity, error) } === modified file 'src/github.com/juju/juju/apiserver/authentication/package_test.go' --- src/github.com/juju/juju/apiserver/authentication/package_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/authentication/package_test.go 2016-03-22 15:18:22 +0000 @@ -3,13 +3,9 @@ import ( "testing" - gc "gopkg.in/check.v1" - coretesting "github.com/juju/juju/testing" ) -var _ = gc.Suite(&AgentAuthenticatorSuite{}) - func TestAll(t *testing.T) { coretesting.MgoTestPackage(t) } === modified file 'src/github.com/juju/juju/apiserver/authentication/user.go' --- src/github.com/juju/juju/apiserver/authentication/user.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/authentication/user.go 2016-03-22 15:18:22 +0000 @@ -4,22 +4,120 @@ package authentication import ( + "time" + + "github.com/juju/errors" + "github.com/juju/names" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon.v1" + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" ) -// UserIdentityProvider performs authentication for users. +// UserAuthenticator performs password based authentication for users. type UserAuthenticator struct { AgentAuthenticator } +const usernameKey = "username" + var _ EntityAuthenticator = (*UserAuthenticator)(nil) // Authenticate authenticates the provided entity and returns an error on authentication failure. -func (u *UserAuthenticator) Authenticate(entity state.Entity, password, nonce string) error { - if _, ok := entity.(*state.User); ok { - return u.AgentAuthenticator.Authenticate(entity, password, nonce) - } - - return common.ErrBadRequest +func (u *UserAuthenticator) Authenticate(entityFinder EntityFinder, tag names.Tag, req params.LoginRequest) (state.Entity, error) { + if tag.Kind() != names.UserTagKind { + return nil, errors.Errorf("invalid request") + } + return u.AgentAuthenticator.Authenticate(entityFinder, tag, req) +} + +// MacaroonAuthenticator performs authentication for users using macaroons. +// If the authentication fails because provided macaroons are invalid, +// and macaroon authentiction is enabled, it will return a +// *common.DischargeRequiredError holding a macaroon to be +// discharged. +type MacaroonAuthenticator struct { + // Service holds the service that is + // used to verify macaroon authorization. + Service *bakery.Service + + // Macaroon guards macaroon-authentication-based access + // to the APIs. Appropriate caveats will be added before + // sending it to a client. + Macaroon *macaroon.Macaroon + + // IdentityLocation holds the URL of the trusted third party + // that is used to address the is-authenticated-user + // third party caveat to. + IdentityLocation string +} + +var _ EntityAuthenticator = (*MacaroonAuthenticator)(nil) + +func (m *MacaroonAuthenticator) newDischargeRequiredError(cause error) error { + if m.Service == nil || m.Macaroon == nil { + return errors.Trace(cause) + } + mac := m.Macaroon.Clone() + err := m.Service.AddCaveat(mac, checkers.TimeBeforeCaveat(time.Now().Add(time.Hour))) + if err != nil { + return errors.Annotatef(err, "cannot create macaroon") + } + err = m.Service.AddCaveat(mac, checkers.NeedDeclaredCaveat( + checkers.Caveat{ + Location: m.IdentityLocation, + Condition: "is-authenticated-user", + }, + usernameKey, + )) + if err != nil { + return errors.Annotatef(err, "cannot create macaroon") + } + return &common.DischargeRequiredError{ + Cause: cause, + Macaroon: mac, + } +} + +// Authenticate authenticates the provided entity. If there is no macaroon provided, it will +// return a *DischargeRequiredError containing a macaroon that can be used to grant access. +func (m *MacaroonAuthenticator) Authenticate(entityFinder EntityFinder, _ names.Tag, req params.LoginRequest) (state.Entity, error) { + declared, err := m.Service.CheckAny(req.Macaroons, nil, checkers.New(checkers.TimeBefore)) + if _, ok := errors.Cause(err).(*bakery.VerificationError); ok { + return nil, m.newDischargeRequiredError(err) + } + if err != nil { + return nil, errors.Trace(err) + } + username := declared[usernameKey] + var tag names.UserTag + if names.IsValidUserName(username) { + // The name is a local name without an explicit @local suffix. + // In this case, for compatibility with 3rd parties that don't + // care to add their own domain, we add an @external domain + // to ensure there is no confusion between local and external + // users. + // TODO(rog) remove this logic when deployed dischargers + // always add an @ domain. + tag = names.NewLocalUserTag(username).WithDomain("external") + } else { + // We have a name with an explicit domain (or an invalid user name). + if !names.IsValidUser(username) { + return nil, errors.Errorf("%q is an invalid user name", username) + } + tag = names.NewUserTag(username) + if tag.IsLocal() { + return nil, errors.Errorf("external identity provider has provided ostensibly local name %q", username) + } + } + entity, err := entityFinder.FindEntity(tag) + if errors.IsNotFound(err) { + return nil, errors.Trace(common.ErrBadCreds) + } else if err != nil { + return nil, errors.Trace(err) + } + return entity, nil } === modified file 'src/github.com/juju/juju/apiserver/authentication/user_test.go' --- src/github.com/juju/juju/apiserver/authentication/user_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/authentication/user_test.go 2016-03-22 15:18:22 +0000 @@ -4,20 +4,42 @@ package authentication_test import ( + "net/http" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/bakerytest" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon.v1" "github.com/juju/juju/apiserver/authentication" + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" "github.com/juju/juju/testing/factory" ) +var logger = loggo.GetLogger("juju.apiserver.authentication") + type userAuthenticatorSuite struct { jujutesting.JujuConnSuite } +type entityFinder struct { + entity state.Entity +} + +func (f entityFinder) FindEntity(tag names.Tag) (state.Entity, error) { + return f.entity, nil +} + var _ = gc.Suite(&userAuthenticatorSuite{}) func (s *userAuthenticatorSuite) TestMachineLoginFails(c *gc.C) { @@ -36,7 +58,10 @@ // attempt machine login authenticator := &authentication.UserAuthenticator{} - err = authenticator.Authenticate(machine, machinePassword, nonce) + _, err = authenticator.Authenticate(nil, machine.Tag(), params.LoginRequest{ + Credentials: machinePassword, + Nonce: nonce, + }) c.Assert(err, gc.ErrorMatches, "invalid request") } @@ -53,7 +78,10 @@ // Attempt unit login authenticator := &authentication.UserAuthenticator{} - err = authenticator.Authenticate(unit, unitPassword, "") + _, err = authenticator.Authenticate(nil, unit.Tag(), params.LoginRequest{ + Credentials: unitPassword, + Nonce: "", + }) c.Assert(err, gc.ErrorMatches, "invalid request") } @@ -66,7 +94,10 @@ // User login authenticator := &authentication.UserAuthenticator{} - err := authenticator.Authenticate(user, "password", "") + _, err := authenticator.Authenticate(s.State, user.Tag(), params.LoginRequest{ + Credentials: "password", + Nonce: "", + }) c.Assert(err, jc.ErrorIsNil) } @@ -79,7 +110,10 @@ // User login authenticator := &authentication.UserAuthenticator{} - err := authenticator.Authenticate(user, "wrongpassword", "") + _, err := authenticator.Authenticate(s.State, user.Tag(), params.LoginRequest{ + Credentials: "wrongpassword", + Nonce: "", + }) c.Assert(err, gc.ErrorMatches, "invalid entity name or password") } @@ -98,7 +132,150 @@ // Attempt relation login authenticator := &authentication.UserAuthenticator{} - err = authenticator.Authenticate(relation, "dummy-secret", "") + _, err = authenticator.Authenticate(nil, relation.Tag(), params.LoginRequest{ + Credentials: "dummy-secret", + Nonce: "", + }) c.Assert(err, gc.ErrorMatches, "invalid request") } + +type macaroonAuthenticatorSuite struct { + jujutesting.JujuConnSuite + discharger *bakerytest.Discharger + // username holds the username that will be + // declared in the discharger's caveats. + username string +} + +var _ = gc.Suite(&macaroonAuthenticatorSuite{}) + +func (s *macaroonAuthenticatorSuite) SetUpTest(c *gc.C) { + s.discharger = bakerytest.NewDischarger(nil, s.Checker) +} + +func (s *macaroonAuthenticatorSuite) TearDownTest(c *gc.C) { + s.discharger.Close() +} + +func (s *macaroonAuthenticatorSuite) Checker(req *http.Request, cond, arg string) ([]checkers.Caveat, error) { + return []checkers.Caveat{checkers.DeclaredCaveat("username", s.username)}, nil +} + +var authenticateSuccessTests = []struct { + about string + dischargedUsername string + finder authentication.EntityFinder + expectTag string + expectError string +}{{ + about: "user that can be found", + dischargedUsername: "bobbrown@somewhere", + expectTag: "user-bobbrown@somewhere", + finder: simpleEntityFinder{ + "user-bobbrown@somewhere": true, + }, +}, { + about: "user with no @ domain", + dischargedUsername: "bobbrown", + finder: simpleEntityFinder{ + "user-bobbrown@external": true, + }, + expectTag: "user-bobbrown@external", +}, { + about: "user not found in database", + dischargedUsername: "bobbrown@nowhere", + finder: simpleEntityFinder{}, + expectError: "invalid entity name or password", +}, { + about: "invalid user name", + dischargedUsername: "--", + finder: simpleEntityFinder{}, + expectError: `"--" is an invalid user name`, +}, { + about: "ostensibly local name", + dischargedUsername: "cheat@local", + finder: simpleEntityFinder{ + "cheat@local": true, + }, + expectError: `external identity provider has provided ostensibly local name "cheat@local"`, +}, { + about: "FindEntity error", + dischargedUsername: "bobbrown@nowhere", + finder: errorEntityFinder("lost in space"), + expectError: "lost in space", +}} + +func (s *macaroonAuthenticatorSuite) TestMacaroonAuthentication(c *gc.C) { + for i, test := range authenticateSuccessTests { + c.Logf("\ntest %d; %s", i, test.about) + s.username = test.dischargedUsername + + svc, err := bakery.NewService(bakery.NewServiceParams{ + Locator: s.discharger, + }) + c.Assert(err, jc.ErrorIsNil) + mac, err := svc.NewMacaroon("", nil, nil) + c.Assert(err, jc.ErrorIsNil) + authenticator := &authentication.MacaroonAuthenticator{ + Service: svc, + IdentityLocation: s.discharger.Location(), + Macaroon: mac, + } + + // Authenticate once to obtain the macaroon to be discharged. + _, err = authenticator.Authenticate(test.finder, nil, params.LoginRequest{ + Credentials: "", + Nonce: "", + Macaroons: nil, + }) + + // Discharge the macaroon. + dischargeErr := errors.Cause(err).(*common.DischargeRequiredError) + client := httpbakery.NewClient() + ms, err := client.DischargeAll(dischargeErr.Macaroon) + c.Assert(err, jc.ErrorIsNil) + + // Authenticate again with the discharged macaroon. + entity, err := authenticator.Authenticate(test.finder, nil, params.LoginRequest{ + Credentials: "", + Nonce: "", + Macaroons: []macaroon.Slice{ms}, + }) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + c.Assert(entity, gc.Equals, nil) + } else { + c.Assert(err, jc.ErrorIsNil) + c.Assert(entity.Tag().String(), gc.Equals, test.expectTag) + } + } +} + +type errorEntityFinder string + +func (f errorEntityFinder) FindEntity(tag names.Tag) (state.Entity, error) { + return nil, errors.New(string(f)) +} + +type simpleEntityFinder map[string]bool + +func (f simpleEntityFinder) FindEntity(tag names.Tag) (state.Entity, error) { + if utag, ok := tag.(names.UserTag); ok { + // It's a user tag which we need to be in canonical form + // so we can look it up unambiguously. + tag = names.NewUserTag(utag.Canonical()) + } + if f[tag.String()] { + return &simpleEntity{tag}, nil + } + return nil, errors.NotFoundf("entity %q", tag) +} + +type simpleEntity struct { + tag names.Tag +} + +func (e *simpleEntity) Tag() names.Tag { + return e.tag +} === added file 'src/github.com/juju/juju/apiserver/authenticator_test.go' --- src/github.com/juju/juju/apiserver/authenticator_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/authenticator_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,74 @@ +// Copyright 2014 Canonical Ltd. All rights reserved. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver_test + +import ( + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver" + "github.com/juju/juju/apiserver/authentication" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/juju/testing" + "github.com/juju/juju/state" + "github.com/juju/juju/testing/factory" +) + +type agentAuthenticatorSuite struct { + testing.JujuConnSuite +} +type userFinder struct { + user state.Entity +} + +func (u userFinder) FindEntity(tag names.Tag) (state.Entity, error) { + return u.user, nil +} + +var _ = gc.Suite(&agentAuthenticatorSuite{}) + +func (s *agentAuthenticatorSuite) TestAuthenticatorForTag(c *gc.C) { + fact := factory.NewFactory(s.State) + user := fact.MakeUser(c, &factory.UserParams{Password: "password"}) + srv := newServer(c, s.State) + defer srv.Stop() + authenticator, err := apiserver.ServerAuthenticatorForTag(srv, user.Tag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(authenticator, gc.NotNil) + userFinder := userFinder{user} + + entity, err := authenticator.Authenticate(userFinder, user.Tag(), params.LoginRequest{ + Credentials: "password", + Nonce: "nonce", + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(entity, gc.DeepEquals, user) +} + +func (s *agentAuthenticatorSuite) TestMachineGetsAgentAuthenticator(c *gc.C) { + srv := newServer(c, s.State) + defer srv.Stop() + authenticator, err := apiserver.ServerAuthenticatorForTag(srv, names.NewMachineTag("0")) + c.Assert(err, jc.ErrorIsNil) + _, ok := authenticator.(*authentication.AgentAuthenticator) + c.Assert(ok, jc.IsTrue) +} + +func (s *agentAuthenticatorSuite) TestUnitGetsAgentAuthenticator(c *gc.C) { + srv := newServer(c, s.State) + defer srv.Stop() + authenticator, err := apiserver.ServerAuthenticatorForTag(srv, names.NewUnitTag("wordpress/0")) + c.Assert(err, jc.ErrorIsNil) + _, ok := authenticator.(*authentication.AgentAuthenticator) + c.Assert(ok, jc.IsTrue) +} + +func (s *agentAuthenticatorSuite) TestNotSupportedTag(c *gc.C) { + srv := newServer(c, s.State) + defer srv.Stop() + authenticator, err := apiserver.ServerAuthenticatorForTag(srv, names.NewServiceTag("not-support")) + c.Assert(err, gc.ErrorMatches, "unexpected login entity tag: invalid request") + c.Assert(authenticator, gc.IsNil) +} === modified file 'src/github.com/juju/juju/apiserver/authhttp_test.go' --- src/github.com/juju/juju/apiserver/authhttp_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/authhttp_test.go 2016-03-22 15:18:22 +0000 @@ -9,19 +9,22 @@ "crypto/x509" "encoding/json" "io" + "io/ioutil" "net/http" "net/url" "os" + "github.com/juju/errors" "github.com/juju/names" jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" "github.com/juju/utils" "golang.org/x/net/websocket" gc "gopkg.in/check.v1" + "gopkg.in/macaroon-bakery.v1/httpbakery" - apihttp "github.com/juju/juju/apiserver/http" + apitesting "github.com/juju/juju/api/testing" "github.com/juju/juju/apiserver/params" - jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" @@ -29,13 +32,57 @@ // authHttpSuite provides helpers for testing HTTP "streaming" style APIs. type authHttpSuite struct { - jujutesting.JujuConnSuite - envUUID string + // macaroonAuthEnabled may be set by a test suite + // before SetUpTest is called. If it is true, macaroon + // authentication will be enabled for the duration + // of the suite. + macaroonAuthEnabled bool + + // MacaroonSuite is embedded because we need + // it when macaroonAuthEnabled is true. + // When macaroonAuthEnabled is false, + // only the JujuConnSuite in it will be initialized; + // all other fields will be zero. + apitesting.MacaroonSuite + + modelUUID string + + // userTag and password hold the user tag and password + // to use in authRequest. When macaroonAuthEnabled + // is true, password will be empty. + userTag names.UserTag + password string } func (s *authHttpSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - s.envUUID = s.State.EnvironUUID() + if s.macaroonAuthEnabled { + s.MacaroonSuite.SetUpTest(c) + } else { + // No macaroons, so don't enable them. + s.JujuConnSuite.SetUpTest(c) + } + + s.modelUUID = s.State.ModelUUID() + + if s.macaroonAuthEnabled { + // When macaroon authentication is enabled, we must use + // an external user. + s.userTag = names.NewUserTag("bob@authhttpsuite") + s.AddModelUser(c, s.userTag.Id()) + } else { + // Make a user in the state. + s.password = "password" + user := s.Factory.MakeUser(c, &factory.UserParams{Password: s.password}) + s.userTag = user.UserTag() + } +} + +func (s *authHttpSuite) TearDownTest(c *gc.C) { + if s.macaroonAuthEnabled { + s.MacaroonSuite.TearDownTest(c) + } else { + s.JujuConnSuite.TearDownTest(c) + } } func (s *authHttpSuite) baseURL(c *gc.C) *url.URL { @@ -47,11 +94,6 @@ } } -func (s *authHttpSuite) assertErrorResponse(c *gc.C, resp *http.Response, expCode int, expError string) { - body := assertResponse(c, resp, expCode, apihttp.CTypeJSON) - c.Check(jsonResponse(c, body).Error, gc.Matches, expError) -} - func (s *authHttpSuite) dialWebsocketFromURL(c *gc.C, server string, header http.Header) *websocket.Conn { config := s.makeWebsocketConfigFromURL(c, server, header) c.Logf("dialing %v", server) @@ -87,66 +129,138 @@ return url } -// userAuthHttpSuite extends authHttpSuite with helpers for testing -// HTTP "streaming" style APIs which only accept user logins (not -// agents). -type userAuthHttpSuite struct { - authHttpSuite - userTag names.UserTag - password string - archiveContentType string -} - -func (s *userAuthHttpSuite) SetUpTest(c *gc.C) { - s.authHttpSuite.SetUpTest(c) - s.password = "password" - user := s.Factory.MakeUser(c, &factory.UserParams{Password: s.password}) - s.userTag = user.UserTag() -} - -func (s *userAuthHttpSuite) sendRequest(c *gc.C, tag, password, method, uri, contentType string, body io.Reader) (*http.Response, error) { - c.Logf("sendRequest: %s", uri) - req, err := http.NewRequest(method, uri, body) - c.Assert(err, jc.ErrorIsNil) - if tag != "" && password != "" { - req.SetBasicAuth(tag, password) - } - if contentType != "" { - req.Header.Set("Content-Type", contentType) - } - return utils.GetNonValidatingHTTPClient().Do(req) -} - -func (s *userAuthHttpSuite) setupOtherEnvironment(c *gc.C) *state.State { - envState := s.Factory.MakeEnvironment(c, nil) +// httpRequestParams holds parameters for the authRequest and sendRequest +// methods. +type httpRequestParams struct { + // do is used to make the HTTP request. + // If it is nil, utils.GetNonValidatingHTTPClient().Do will be used. + // If the body reader implements io.Seeker, + // req.Body will also implement that interface. + do func(req *http.Request) (*http.Response, error) + + // expectError holds the error regexp to match + // against the error returned from the HTTP Do + // request. If it is empty, the error is expected to be + // nil. + expectError string + + // tag holds the tag to authenticate as. + tag string + + // password holds the password associated with the tag. + password string + + // method holds the HTTP method to use for the request. + method string + + // url holds the URL to send the HTTP request to. + url string + + // contentType holds the content type of the request. + contentType string + + // body holds the body of the request. + body io.Reader + + // jsonBody holds an object to be marshaled as JSON + // as the body of the request. If this is specified, body will + // be ignored and the Content-Type header will + // be set to application/json. + jsonBody interface{} + + // nonce holds the machine nonce to provide in the header. + nonce string +} + +func (s *authHttpSuite) sendRequest(c *gc.C, p httpRequestParams) *http.Response { + c.Logf("sendRequest: %s", p.url) + hp := httptesting.DoRequestParams{ + Do: p.do, + Method: p.method, + URL: p.url, + Body: p.body, + JSONBody: p.jsonBody, + Header: make(http.Header), + Username: p.tag, + Password: p.password, + ExpectError: p.expectError, + } + if p.contentType != "" { + hp.Header.Set("Content-Type", p.contentType) + } + if p.nonce != "" { + hp.Header.Set(params.MachineNonceHeader, p.nonce) + } + if hp.Do == nil { + hp.Do = utils.GetNonValidatingHTTPClient().Do + } + return httptesting.Do(c, hp) +} + +// bakeryDo provides a function suitable for using in httpRequestParams.Do +// that will use the given http client (or utils.GetNonValidatingHTTPClient() +// if client is nil) and use the given getBakeryError function +// to translate errors in responses. +func bakeryDo(client *http.Client, getBakeryError func(*http.Response) error) func(*http.Request) (*http.Response, error) { + bclient := httpbakery.NewClient() + if client != nil { + bclient.Client = client + } else { + // Configure the default client to skip verification/ + bclient.Client.Transport = utils.NewHttpTLSTransport(&tls.Config{ + InsecureSkipVerify: true, + }) + } + return func(req *http.Request) (*http.Response, error) { + var body io.ReadSeeker + if req.Body != nil { + body = req.Body.(io.ReadSeeker) + req.Body = nil + } + return bclient.DoWithBodyAndCustomError(req, body, getBakeryError) + } +} + +// authRequest is like sendRequest but fills out p.tag and p.password +// from the userTag and password fields in the suite. +func (s *authHttpSuite) authRequest(c *gc.C, p httpRequestParams) *http.Response { + p.tag = s.userTag.String() + p.password = s.password + return s.sendRequest(c, p) +} + +func (s *authHttpSuite) setupOtherModel(c *gc.C) *state.State { + envState := s.Factory.MakeModel(c, nil) s.AddCleanup(func(*gc.C) { envState.Close() }) user := s.Factory.MakeUser(c, nil) - _, err := envState.AddEnvironmentUser(user.UserTag(), s.userTag, "") + _, err := envState.AddModelUser(state.ModelUserSpec{ + User: user.UserTag(), + CreatedBy: s.userTag}) c.Assert(err, jc.ErrorIsNil) s.userTag = user.UserTag() s.password = "password" - s.envUUID = envState.EnvironUUID() + s.modelUUID = envState.ModelUUID() return envState } -func (s *userAuthHttpSuite) authRequest(c *gc.C, method, uri, contentType string, body io.Reader) (*http.Response, error) { - return s.sendRequest(c, s.userTag.String(), s.password, method, uri, contentType, body) -} - -func (s *userAuthHttpSuite) uploadRequest(c *gc.C, uri string, asZip bool, path string) (*http.Response, error) { - contentType := apihttp.CTypeRaw - if asZip { - contentType = s.archiveContentType - } - +func (s *authHttpSuite) uploadRequest(c *gc.C, uri string, contentType, path string) *http.Response { if path == "" { - return s.authRequest(c, "POST", uri, contentType, nil) + return s.authRequest(c, httpRequestParams{ + method: "POST", + url: uri, + contentType: contentType, + }) } file, err := os.Open(path) c.Assert(err, jc.ErrorIsNil) defer file.Close() - return s.authRequest(c, "POST", uri, contentType, file) + return s.authRequest(c, httpRequestParams{ + method: "POST", + url: uri, + contentType: contentType, + body: file, + }) } // assertJSONError checks the JSON encoded error returned by the log @@ -167,3 +281,49 @@ c.Assert(err, jc.ErrorIsNil) return errResult } + +func assertResponse(c *gc.C, resp *http.Response, expHTTPStatus int, expContentType string) []byte { + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + c.Assert(err, jc.ErrorIsNil) + c.Check(resp.StatusCode, gc.Equals, expHTTPStatus, gc.Commentf("body: %s", body)) + ctype := resp.Header.Get("Content-Type") + c.Assert(ctype, gc.Equals, expContentType) + return body +} + +// bakeryGetError implements a getError function +// appropriate for passing to httpbakery.Client.DoWithBodyAndCustomError +// for any endpoint that returns the error in a top level Error field. +func bakeryGetError(resp *http.Response) error { + if resp.StatusCode != http.StatusUnauthorized { + return nil + } + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return errors.Annotatef(err, "cannot read body") + } + var errResp params.ErrorResult + if err := json.Unmarshal(data, &errResp); err != nil { + return errors.Annotatef(err, "cannot unmarshal body") + } + if errResp.Error == nil { + return errors.New("no error found in error response body") + } + if errResp.Error.Code != params.CodeDischargeRequired { + return errResp.Error + } + if errResp.Error.Info == nil { + return errors.Annotatef(err, "no error info found in discharge-required response error") + } + // It's a discharge-required error, so make an appropriate httpbakery + // error from it. + return &httpbakery.Error{ + Message: errResp.Error.Message, + Code: httpbakery.ErrDischargeRequired, + Info: &httpbakery.ErrorInfo{ + Macaroon: errResp.Error.Info.Macaroon, + MacaroonPath: errResp.Error.Info.MacaroonPath, + }, + } +} === modified file 'src/github.com/juju/juju/apiserver/backup.go' --- src/github.com/juju/juju/apiserver/backup.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/backup.go 2016-03-22 15:18:22 +0000 @@ -13,7 +13,8 @@ "github.com/juju/errors" apiserverbackups "github.com/juju/juju/apiserver/backups" - apihttp "github.com/juju/juju/apiserver/http" + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/httpattachment" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" "github.com/juju/juju/state/backups" @@ -26,24 +27,19 @@ // backupHandler handles backup requests. type backupHandler struct { - httpHandler + ctxt httpContext } func (h *backupHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { // Validate before authenticate because the authentication is dependent // on the state connection that is determined during the validation. - stateWrapper, err := h.validateEnvironUUID(req) + st, _, err := h.ctxt.stateForRequestAuthenticatedUser(req) if err != nil { - h.sendError(resp, http.StatusNotFound, err.Error()) - return - } - - if err := stateWrapper.authenticateUser(req); err != nil { - h.authError(resp, h) - return - } - - backups, closer := newBackups(stateWrapper.state) + h.sendError(resp, err) + return + } + + backups, closer := newBackups(st) defer closer.Close() switch req.Method { @@ -51,7 +47,7 @@ logger.Infof("handling backups download request") id, err := h.download(backups, resp, req) if err != nil { - h.sendError(resp, http.StatusInternalServerError, err.Error()) + h.sendError(resp, err) return } logger.Infof("backups download request successful for %q", id) @@ -59,12 +55,12 @@ logger.Infof("handling backups upload request") id, err := h.upload(backups, resp, req) if err != nil { - h.sendError(resp, http.StatusInternalServerError, err.Error()) + h.sendError(resp, err) return } logger.Infof("backups upload request successful for %q", id) default: - h.sendError(resp, http.StatusMethodNotAllowed, fmt.Sprintf("unsupported method: %q", req.Method)) + h.sendError(resp, errors.MethodNotAllowedf("unsupported method: %q", req.Method)) } } @@ -81,7 +77,7 @@ } defer archive.Close() - err = h.sendFile(archive, meta.Checksum(), apihttp.DigestSHA, resp) + err = h.sendFile(archive, meta.Checksum(), resp) return args.ID, err } @@ -91,7 +87,7 @@ defer req.Body.Close() var metaResult params.BackupsMetadataResult - archive, err := apihttp.ExtractRequestAttachment(req, &metaResult) + archive, err := httpattachment.Get(req, &metaResult) if err != nil { return "", err } @@ -106,7 +102,7 @@ return "", err } - h.sendJSON(resp, http.StatusOK, ¶ms.BackupsUploadResult{ID: id}) + sendStatusAndJSON(resp, http.StatusOK, ¶ms.BackupsUploadResult{ID: id}) return id, nil } @@ -137,7 +133,7 @@ } func (h *backupHandler) parseGETArgs(req *http.Request) (*params.BackupsDownloadArgs, error) { - body, err := h.read(req, apihttp.CTypeJSON) + body, err := h.read(req, params.ContentTypeJSON) if err != nil { return nil, errors.Trace(err) } @@ -150,10 +146,10 @@ return &args, nil } -func (h *backupHandler) sendFile(file io.Reader, checksum string, algorithm apihttp.DigestAlgorithm, resp http.ResponseWriter) error { +func (h *backupHandler) sendFile(file io.Reader, checksum string, resp http.ResponseWriter) error { // We don't set the Content-Length header, leaving it at -1. - resp.Header().Set("Content-Type", apihttp.CTypeRaw) - resp.Header().Set("Digest", fmt.Sprintf("%s=%s", algorithm, checksum)) + resp.Header().Set("Content-Type", params.ContentTypeRaw) + resp.Header().Set("Digest", fmt.Sprintf("%s=%s", params.DigestSHA, checksum)) resp.WriteHeader(http.StatusOK) if _, err := io.Copy(resp, file); err != nil { return errors.Annotate(err, "while streaming archive") @@ -161,27 +157,12 @@ return nil } -// sendJSON sends a JSON-encoded result. -func (h *backupHandler) sendJSON(w http.ResponseWriter, statusCode int, result interface{}) { - body, err := json.Marshal(result) - if err != nil { - logger.Errorf("failed to serialize the result (%v): %v", result, err) - return - } - - w.Header().Set("Content-Type", apihttp.CTypeJSON) - w.WriteHeader(statusCode) - w.Write(body) - - logger.Infof("backups request successful") -} - // sendError sends a JSON-encoded error response. -func (h *backupHandler) sendError(w http.ResponseWriter, statusCode int, message string) { - failure := params.Error{ - Message: message, - // Leave Code empty. - } +// Note the difference from the error response sent by +// the sendError function - the error is encoded directly +// rather than in the Error field. +func (h *backupHandler) sendError(w http.ResponseWriter, err error) { + err, status := common.ServerErrorAndStatus(err) - h.sendJSON(w, statusCode, &failure) + sendStatusAndJSON(w, status, err) } === modified file 'src/github.com/juju/juju/apiserver/backup_test.go' --- src/github.com/juju/juju/apiserver/backup_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/backup_test.go 2016-03-22 15:18:22 +0000 @@ -17,23 +17,23 @@ jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" + "gopkg.in/macaroon-bakery.v1/httpbakery" "github.com/juju/juju/apiserver" apiserverbackups "github.com/juju/juju/apiserver/backups" - apihttp "github.com/juju/juju/apiserver/http" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" "github.com/juju/juju/state/backups" backupstesting "github.com/juju/juju/state/backups/testing" ) -type baseBackupsSuite struct { - userAuthHttpSuite +type backupsCommonSuite struct { + authHttpSuite fake *backupstesting.FakeBackups } -func (s *baseBackupsSuite) SetUpTest(c *gc.C) { - s.userAuthHttpSuite.SetUpTest(c) +func (s *backupsCommonSuite) SetUpTest(c *gc.C) { + s.authHttpSuite.SetUpTest(c) s.fake = &backupstesting.FakeBackups{} s.PatchValue(apiserver.NewBackups, @@ -43,43 +43,42 @@ ) } -func (s *baseBackupsSuite) backupURL(c *gc.C) string { - environ, err := s.State.Environment() +func (s *backupsCommonSuite) backupURL(c *gc.C) string { + environ, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) uri := s.baseURL(c) - uri.Path = fmt.Sprintf("/environment/%s/backups", environ.UUID()) + uri.Path = fmt.Sprintf("/model/%s/backups", environ.UUID()) return uri.String() } -func (s *baseBackupsSuite) checkErrorResponse(c *gc.C, resp *http.Response, statusCode int, msg string) { - c.Check(resp.StatusCode, gc.Equals, statusCode) - c.Check(resp.Header.Get("Content-Type"), gc.Equals, apihttp.CTypeJSON) - +func (s *backupsCommonSuite) assertErrorResponse(c *gc.C, resp *http.Response, statusCode int, msg string) *params.Error { body, err := ioutil.ReadAll(resp.Body) c.Assert(err, jc.ErrorIsNil) + c.Assert(resp.StatusCode, gc.Equals, statusCode, gc.Commentf("body: %s", body)) + c.Assert(resp.Header.Get("Content-Type"), gc.Equals, params.ContentTypeJSON) + var failure params.Error err = json.Unmarshal(body, &failure) c.Assert(err, jc.ErrorIsNil) - c.Check(&failure, gc.ErrorMatches, msg) + c.Assert(&failure, gc.ErrorMatches, msg, gc.Commentf("body: %s", body)) + return &failure } type backupsSuite struct { - baseBackupsSuite + backupsCommonSuite } var _ = gc.Suite(&backupsSuite{}) func (s *backupsSuite) TestRequiresAuth(c *gc.C) { - resp, err := s.sendRequest(c, "", "", "GET", s.backupURL(c), "", nil) - c.Assert(err, jc.ErrorIsNil) - s.checkErrorResponse(c, resp, http.StatusUnauthorized, "unauthorized") + resp := s.sendRequest(c, httpRequestParams{method: "GET", url: s.backupURL(c)}) + s.assertErrorResponse(c, resp, http.StatusUnauthorized, "no credentials provided") } func (s *backupsSuite) checkInvalidMethod(c *gc.C, method, url string) { - resp, err := s.authRequest(c, method, url, "", nil) - c.Assert(err, jc.ErrorIsNil) - s.checkErrorResponse(c, resp, http.StatusMethodNotAllowed, `unsupported method: "`+method+`"`) + resp := s.authRequest(c, httpRequestParams{method: method, url: url}) + s.assertErrorResponse(c, resp, http.StatusMethodNotAllowed, `unsupported method: "`+method+`"`) } func (s *backupsSuite) TestInvalidHTTPMethods(c *gc.C) { @@ -101,49 +100,129 @@ err = machine.SetPassword(password) c.Assert(err, jc.ErrorIsNil) - resp, err := s.sendRequest(c, machine.Tag().String(), password, "GET", s.backupURL(c), "", nil) - c.Assert(err, jc.ErrorIsNil) - s.checkErrorResponse(c, resp, http.StatusUnauthorized, "unauthorized") + resp := s.sendRequest(c, httpRequestParams{ + tag: machine.Tag().String(), + password: password, + method: "GET", + url: s.backupURL(c), + nonce: "fake_nonce", + }) + s.assertErrorResponse(c, resp, http.StatusUnauthorized, "invalid entity name or password") // Now try a user login. - resp, err = s.authRequest(c, "POST", s.backupURL(c), "", nil) - c.Assert(err, jc.ErrorIsNil) - s.checkErrorResponse(c, resp, http.StatusMethodNotAllowed, `unsupported method: "POST"`) + resp = s.authRequest(c, httpRequestParams{method: "POST", url: s.backupURL(c)}) + s.assertErrorResponse(c, resp, http.StatusMethodNotAllowed, `unsupported method: "POST"`) +} + +type backupsWithMacaroonsSuite struct { + backupsCommonSuite +} + +var _ = gc.Suite(&backupsWithMacaroonsSuite{}) + +func (s *backupsWithMacaroonsSuite) SetUpTest(c *gc.C) { + s.macaroonAuthEnabled = true + s.backupsCommonSuite.SetUpTest(c) +} + +func (s *backupsWithMacaroonsSuite) TestWithNoBasicAuthReturnsDischargeRequiredError(c *gc.C) { + resp := s.sendRequest(c, httpRequestParams{ + method: "GET", + jsonBody: ¶ms.BackupsDownloadArgs{"bad-id"}, + url: s.backupURL(c), + }) + + errResp := s.assertErrorResponse(c, resp, http.StatusUnauthorized, "verification failed: no macaroons") + c.Assert(errResp.Code, gc.Equals, params.CodeDischargeRequired) + c.Assert(errResp.Info, gc.NotNil) + c.Assert(errResp.Info.Macaroon, gc.NotNil) +} + +func (s *backupsWithMacaroonsSuite) TestCanGetWithDischargedMacaroon(c *gc.C) { + checkCount := 0 + s.DischargerLogin = func() string { + checkCount++ + return s.userTag.Id() + } + s.fake.Error = errors.New("failed!") + resp := s.sendRequest(c, httpRequestParams{ + do: s.doer(), + method: "GET", + jsonBody: ¶ms.BackupsDownloadArgs{"bad-id"}, + url: s.backupURL(c), + }) + s.assertErrorResponse(c, resp, http.StatusInternalServerError, "failed!") + c.Assert(checkCount, gc.Equals, 1) +} + +// doer returns a Do function that can make a bakery request +// appropriate for a backups endpoint. +func (s *backupsWithMacaroonsSuite) doer() func(*http.Request) (*http.Response, error) { + return bakeryDo(nil, backupsBakeryGetError) +} + +// backupsBakeryGetError implements a getError function +// appropriate for passing to httpbakery.Client.DoWithBodyAndCustomError +// for the backups endpoint. +func backupsBakeryGetError(resp *http.Response) error { + if resp.StatusCode != http.StatusUnauthorized { + return nil + } + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return errors.Annotatef(err, "cannot read body") + } + var errResp params.Error + if err := json.Unmarshal(data, &errResp); err != nil { + return errors.Annotatef(err, "cannot unmarshal body") + } + if errResp.Code != params.CodeDischargeRequired { + return &errResp + } + if errResp.Info == nil { + return errors.Annotatef(err, "no error info found in discharge-required response error") + } + // It's a discharge-required error, so make an appropriate httpbakery + // error from it. + return &httpbakery.Error{ + Message: errResp.Message, + Code: httpbakery.ErrDischargeRequired, + Info: &httpbakery.ErrorInfo{ + Macaroon: errResp.Info.Macaroon, + MacaroonPath: errResp.Info.MacaroonPath, + }, + } } type backupsDownloadSuite struct { - baseBackupsSuite - body []byte + backupsCommonSuite } var _ = gc.Suite(&backupsDownloadSuite{}) -func (s *backupsDownloadSuite) newBody(c *gc.C, id string) *bytes.Buffer { - args := params.BackupsDownloadArgs{ - ID: id, - } - body, err := json.Marshal(args) - c.Assert(err, jc.ErrorIsNil) - return bytes.NewBuffer(body) -} - -func (s *backupsDownloadSuite) sendValid(c *gc.C) *http.Response { +// sendValid sends a valid GET request to the backups endpoint +// and returns the response and the expected contents of the +// archive if the request succeeds. +func (s *backupsDownloadSuite) sendValidGet(c *gc.C) (resp *http.Response, archiveBytes []byte) { meta := backupstesting.NewMetadata() archive, err := backupstesting.NewArchiveBasic(meta) c.Assert(err, jc.ErrorIsNil) + archiveBytes = archive.Bytes() s.fake.Meta = meta s.fake.Archive = ioutil.NopCloser(archive) - s.body = archive.Bytes() - ctype := apihttp.CTypeJSON - body := s.newBody(c, meta.ID()) - resp, err := s.authRequest(c, "GET", s.backupURL(c), ctype, body) - c.Assert(err, jc.ErrorIsNil) - return resp + return s.authRequest(c, httpRequestParams{ + method: "GET", + url: s.backupURL(c), + contentType: params.ContentTypeJSON, + jsonBody: params.BackupsDownloadArgs{ + ID: meta.ID(), + }, + }), archiveBytes } func (s *backupsDownloadSuite) TestCalls(c *gc.C) { - resp := s.sendValid(c) + resp, _ := s.sendValidGet(c) defer resp.Body.Close() c.Check(s.fake.Calls, gc.DeepEquals, []string{"Get"}) @@ -151,34 +230,34 @@ } func (s *backupsDownloadSuite) TestResponse(c *gc.C) { - resp := s.sendValid(c) + resp, _ := s.sendValidGet(c) defer resp.Body.Close() meta := s.fake.Meta c.Check(resp.StatusCode, gc.Equals, http.StatusOK) - c.Check(resp.Header.Get("Digest"), gc.Equals, string(apihttp.DigestSHA)+"="+meta.Checksum()) - c.Check(resp.Header.Get("Content-Type"), gc.Equals, apihttp.CTypeRaw) + c.Check(resp.Header.Get("Digest"), gc.Equals, string(params.DigestSHA)+"="+meta.Checksum()) + c.Check(resp.Header.Get("Content-Type"), gc.Equals, params.ContentTypeRaw) } func (s *backupsDownloadSuite) TestBody(c *gc.C) { - resp := s.sendValid(c) + resp, archiveBytes := s.sendValidGet(c) defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) c.Assert(err, jc.ErrorIsNil) - c.Check(body, jc.DeepEquals, s.body) + c.Check(body, jc.DeepEquals, archiveBytes) } func (s *backupsDownloadSuite) TestErrorWhenGetFails(c *gc.C) { s.fake.Error = errors.New("failed!") - resp := s.sendValid(c) + resp, _ := s.sendValidGet(c) defer resp.Body.Close() - s.checkErrorResponse(c, resp, http.StatusInternalServerError, "failed!") + s.assertErrorResponse(c, resp, http.StatusInternalServerError, "failed!") } type backupsUploadSuite struct { - baseBackupsSuite + backupsCommonSuite meta *backups.Metadata } @@ -196,7 +275,7 @@ metaResult := apiserverbackups.ResultFromMetadata(s.meta) header := make(textproto.MIMEHeader) header.Set("Content-Disposition", `form-data; name="metadata"`) - header.Set("Content-Type", apihttp.CTypeJSON) + header.Set("Content-Type", params.ContentTypeJSON) part, err := writer.CreatePart(header) c.Assert(err, jc.ErrorIsNil) err = json.NewEncoder(part).Encode(metaResult) @@ -211,9 +290,7 @@ // Send the request. ctype := writer.FormDataContentType() - resp, err := s.authRequest(c, "PUT", s.backupURL(c), ctype, &parts) - c.Assert(err, jc.ErrorIsNil) - return resp + return s.authRequest(c, httpRequestParams{method: "PUT", url: s.backupURL(c), contentType: ctype, body: &parts}) } func (s *backupsUploadSuite) TestCalls(c *gc.C) { @@ -230,7 +307,7 @@ defer resp.Body.Close() c.Check(resp.StatusCode, gc.Equals, http.StatusOK) - c.Check(resp.Header.Get("Content-Type"), gc.Equals, apihttp.CTypeJSON) + c.Check(resp.Header.Get("Content-Type"), gc.Equals, params.ContentTypeJSON) } func (s *backupsUploadSuite) TestBody(c *gc.C) { @@ -250,5 +327,5 @@ resp := s.sendValid(c, "") defer resp.Body.Close() - s.checkErrorResponse(c, resp, http.StatusInternalServerError, "failed!") + s.assertErrorResponse(c, resp, http.StatusInternalServerError, "failed!") } === modified file 'src/github.com/juju/juju/apiserver/backups/backups.go' --- src/github.com/juju/juju/apiserver/backups/backups.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/backups/backups.go 2016-03-22 15:18:22 +0000 @@ -16,7 +16,7 @@ ) func init() { - common.RegisterStandardFacade("Backups", 0, NewAPI) + common.RegisterStandardFacade("Backups", 1, NewAPI) } var logger = loggo.GetLogger("juju.apiserver.backups") @@ -36,9 +36,9 @@ return nil, errors.Trace(common.ErrPerm) } - // For now, backup operations are only permitted on the system environment. - if !st.IsStateServer() { - return nil, errors.New("backups are not supported for hosted environments") + // For now, backup operations are only permitted on the controller environment. + if !st.IsController() { + return nil, errors.New("backups are not supported for hosted models") } // Get the backup paths. @@ -106,7 +106,7 @@ } result.Notes = meta.Notes - result.Environment = meta.Origin.Environment + result.Model = meta.Origin.Model result.Machine = meta.Origin.Machine result.Hostname = meta.Origin.Hostname result.Version = meta.Origin.Version @@ -123,7 +123,7 @@ if !result.Finished.IsZero() { meta.Finished = &result.Finished } - meta.Origin.Environment = result.Environment + meta.Origin.Model = result.Model meta.Origin.Machine = result.Machine meta.Origin.Hostname = result.Hostname meta.Origin.Version = result.Version === modified file 'src/github.com/juju/juju/apiserver/backups/backups_test.go' --- src/github.com/juju/juju/apiserver/backups/backups_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/backups/backups_test.go 2016-03-22 15:18:22 +0000 @@ -63,7 +63,7 @@ } func (s *backupsSuite) TestRegistered(c *gc.C) { - _, err := common.Facades.GetType("Backups", 0) + _, err := common.Facades.GetType("Backups", 1) c.Check(err, jc.ErrorIsNil) } @@ -80,8 +80,8 @@ } func (s *backupsSuite) TestNewAPIHostedEnvironmentFails(c *gc.C) { - otherState := factory.NewFactory(s.State).MakeEnvironment(c, nil) + otherState := factory.NewFactory(s.State).MakeModel(c, nil) defer otherState.Close() _, err := backupsAPI.NewAPI(otherState, s.resources, s.authorizer) - c.Check(err, gc.ErrorMatches, "backups are not supported for hosted environments") + c.Check(err, gc.ErrorMatches, "backups are not supported for hosted models") } === modified file 'src/github.com/juju/juju/apiserver/block/client.go' --- src/github.com/juju/juju/apiserver/block/client.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/block/client.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,7 @@ ) func init() { - common.RegisterStandardFacade("Block", 1, NewAPI) + common.RegisterStandardFacade("Block", 2, NewAPI) } // Block defines the methods on the block API end point. === modified file 'src/github.com/juju/juju/apiserver/charmrevisionupdater/testing/suite.go' --- src/github.com/juju/juju/apiserver/charmrevisionupdater/testing/suite.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/charmrevisionupdater/testing/suite.go 2016-03-22 15:18:22 +0000 @@ -5,13 +5,14 @@ import ( "fmt" + "net/http/httptest" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/charmrepo" - "gopkg.in/juju/charmstore.v4" - "gopkg.in/juju/charmstore.v4/charmstoretesting" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient" + "gopkg.in/juju/charmstore.v5-unstable" "github.com/juju/juju/apiserver/charmrevisionupdater" jujutesting "github.com/juju/juju/juju/testing" @@ -25,8 +26,10 @@ type CharmSuite struct { jcSuite *jujutesting.JujuConnSuite - Server *charmstoretesting.Server - charms map[string]*state.Charm + Handler charmstore.HTTPCloseHandler + Server *httptest.Server + Client *csclient.Client + charms map[string]*state.Charm } func (s *CharmSuite) SetUpSuite(c *gc.C, jcSuite *jujutesting.JujuConnSuite) { @@ -36,33 +39,42 @@ func (s *CharmSuite) TearDownSuite(c *gc.C) {} func (s *CharmSuite) SetUpTest(c *gc.C) { - s.Server = charmstoretesting.OpenServer(c, s.jcSuite.Session, charmstore.ServerParams{ + db := s.jcSuite.Session.DB("juju-testing") + params := charmstore.ServerParams{ AuthUsername: "test-user", AuthPassword: "test-password", + } + handler, err := charmstore.NewServer(db, nil, "", params, charmstore.V4) + c.Assert(err, jc.ErrorIsNil) + s.Handler = handler + s.Server = httptest.NewServer(handler) + s.Client = csclient.New(csclient.Params{ + URL: s.Server.URL, + User: params.AuthUsername, + Password: params.AuthPassword, }) - urls := []string{ - "~who/quantal/mysql-23", - "~who/quantal/dummy-24", - "~who/quantal/riak-25", - "~who/quantal/wordpress-26", - "~who/quantal/logging-27", + urls := map[string]string{ + "mysql": "quantal/mysql-23", + "dummy": "quantal/dummy-24", + "riak": "quantal/riak-25", + "wordpress": "quantal/wordpress-26", + "logging": "quantal/logging-27", } - for _, url := range urls { - id := charm.MustParseReference(url) - ch := testcharms.Repo.CharmArchive(c.MkDir(), id.Name) - s.Server.UploadCharm(c, ch, id, true) + for name, url := range urls { + testcharms.UploadCharm(c, s.Client, url, name) } s.jcSuite.PatchValue(&charmrepo.CacheDir, c.MkDir()) // Patch the charm repo initializer function: it is replaced with a charm // store repo pointing to the testing server. - s.jcSuite.PatchValue(&charmrevisionupdater.NewCharmStore, func(p charmrepo.NewCharmStoreParams) charmrepo.Interface { - p.URL = s.Server.URL() + s.jcSuite.PatchValue(&charmrevisionupdater.NewCharmStore, func(p charmrepo.NewCharmStoreParams) *charmrepo.CharmStore { + p.URL = s.Server.URL return charmrepo.NewCharmStore(p) }) s.charms = make(map[string]*state.Charm) } func (s *CharmSuite) TearDownTest(c *gc.C) { + s.Handler.Close() s.Server.Close() } @@ -79,7 +91,6 @@ inst, hc := jujutesting.AssertStartInstanceWithConstraints(c, s.jcSuite.Environ, m.Id(), cons) err = m.SetProvisioned(inst.Id(), "fake_nonce", hc) c.Assert(err, jc.ErrorIsNil) - } // AddCharmWithRevision adds a charm with the specified revision to state. @@ -98,7 +109,7 @@ ch, ok := s.charms[charmName] c.Assert(ok, jc.IsTrue) owner := s.jcSuite.AdminUserTag(c) - _, err := s.jcSuite.State.AddService(serviceName, owner.String(), ch, networks, nil, nil) + _, err := s.jcSuite.State.AddService(state.AddServiceArgs{Name: serviceName, Owner: owner.String(), Charm: ch, Networks: networks}) c.Assert(err, jc.ErrorIsNil) } @@ -126,7 +137,7 @@ } // SetupScenario adds some machines and services to state. -// It assumes a state server machine has already been created. +// It assumes a controller machine has already been created. func (s *CharmSuite) SetupScenario(c *gc.C) { s.AddMachine(c, "1", state.JobHostUnits) s.AddMachine(c, "2", state.JobHostUnits) === modified file 'src/github.com/juju/juju/apiserver/charmrevisionupdater/updater.go' --- src/github.com/juju/juju/apiserver/charmrevisionupdater/updater.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/charmrevisionupdater/updater.go 2016-03-22 15:18:22 +0000 @@ -6,8 +6,8 @@ import ( "github.com/juju/errors" "github.com/juju/loggo" - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/charmrepo" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" @@ -17,7 +17,7 @@ var logger = loggo.GetLogger("juju.apiserver.charmrevisionupdater") func init() { - common.RegisterStandardFacade("CharmRevisionUpdater", 0, NewCharmRevisionUpdaterAPI) + common.RegisterStandardFacade("CharmRevisionUpdater", 1, NewCharmRevisionUpdaterAPI) } // CharmRevisionUpdater defines the methods on the charmrevisionupdater API end point. @@ -41,7 +41,7 @@ resources *common.Resources, authorizer common.Authorizer, ) (*CharmRevisionUpdaterAPI, error) { - if !authorizer.AuthMachineAgent() && !authorizer.AuthEnvironManager() { + if !authorizer.AuthMachineAgent() && !authorizer.AuthModelManager() { return nil, common.ErrPerm } return &CharmRevisionUpdaterAPI{ @@ -52,7 +52,7 @@ // and records this information in state. func (api *CharmRevisionUpdaterAPI) UpdateLatestRevisions() (params.ErrorResult, error) { // First get the uuid for the environment to use when querying the charm store. - env, err := api.state.Environment() + env, err := api.state.Model() if err != nil { return params.ErrorResult{Error: common.ServerError(err)}, nil } @@ -115,7 +115,7 @@ // Do a bulk call to get the revision info for all charms. logger.Infof("retrieving revision information for %d charms", len(curls)) repo := NewCharmStore(charmrepo.NewCharmStoreParams{}) - repo = repo.(*charmrepo.CharmStore).WithJujuAttrs(map[string]string{ + repo = repo.WithJujuAttrs(map[string]string{ "environment_uuid": uuid, }) revInfo, err := repo.Latest(curls...) === modified file 'src/github.com/juju/juju/apiserver/charmrevisionupdater/updater_test.go' --- src/github.com/juju/juju/apiserver/charmrevisionupdater/updater_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/charmrevisionupdater/updater_test.go 2016-03-22 15:18:22 +0000 @@ -10,8 +10,8 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/charmrepo" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable" "github.com/juju/juju/apiserver/charmrevisionupdater" "github.com/juju/juju/apiserver/charmrevisionupdater/testing" @@ -75,7 +75,7 @@ } func (s *charmVersionSuite) TestUpdateRevisions(c *gc.C) { - s.AddMachine(c, "0", state.JobManageEnviron) + s.AddMachine(c, "0", state.JobManageModel) s.SetupScenario(c) curl := charm.MustParseURL("cs:quantal/mysql") @@ -109,7 +109,11 @@ svc, err := s.State.Service("mysql") c.Assert(err, jc.ErrorIsNil) ch := s.AddCharmWithRevision(c, "mysql", 23) - err = svc.SetCharm(ch, true) + cfg := state.SetCharmConfig{ + Charm: ch, + ForceUnits: true, + } + err = svc.SetCharm(cfg) c.Assert(err, jc.ErrorIsNil) result, err = s.charmrevisionupdater.UpdateLatestRevisions() @@ -123,11 +127,11 @@ } func (s *charmVersionSuite) TestWordpressCharmNoReadAccessIsntVisible(c *gc.C) { - s.AddMachine(c, "0", state.JobManageEnviron) + s.AddMachine(c, "0", state.JobManageModel) s.SetupScenario(c) // Disallow read access to the wordpress charm in the charm store. - err := s.Server.NewClient().Put("/quantal/wordpress/meta/perm/read", nil) + err := s.Client.Put("/quantal/wordpress/meta/perm/read", nil) c.Assert(err, jc.ErrorIsNil) // Run the revision updater and check that the public charm updates are @@ -148,19 +152,19 @@ } func (s *charmVersionSuite) TestEnvironmentUUIDUsed(c *gc.C) { - s.AddMachine(c, "0", state.JobManageEnviron) + s.AddMachine(c, "0", state.JobManageModel) s.SetupScenario(c) // Set up a charm store server that stores the request header. var header http.Header srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { header = r.Header - s.Server.Handler().ServeHTTP(w, r) + s.Handler.ServeHTTP(w, r) })) defer srv.Close() // Point the charm repo initializer to the testing server. - s.PatchValue(&charmrevisionupdater.NewCharmStore, func(p charmrepo.NewCharmStoreParams) charmrepo.Interface { + s.PatchValue(&charmrevisionupdater.NewCharmStore, func(p charmrepo.NewCharmStoreParams) *charmrepo.CharmStore { p.URL = srv.URL return charmrepo.NewCharmStore(p) }) @@ -169,7 +173,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(result.Error, gc.IsNil) - env, err := s.State.Environment() + env, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) c.Assert(header.Get(charmrepo.JujuMetadataHTTPHeader), gc.Equals, "environment_uuid="+env.UUID()) } === modified file 'src/github.com/juju/juju/apiserver/charms.go' --- src/github.com/juju/juju/apiserver/charms.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/charms.go 2016-03-22 15:18:22 +0000 @@ -8,7 +8,6 @@ "bytes" "crypto/sha256" "encoding/hex" - "encoding/json" "fmt" "io" "io/ioutil" @@ -23,9 +22,9 @@ "github.com/juju/errors" ziputil "github.com/juju/utils/zip" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" - apihttp "github.com/juju/juju/apiserver/http" + "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/apiserver/service" "github.com/juju/juju/state" @@ -34,115 +33,132 @@ // charmsHandler handles charm upload through HTTPS in the API server. type charmsHandler struct { - httpHandler + ctxt httpContext dataDir string } // bundleContentSenderFunc functions are responsible for sending a // response related to a charm bundle. -type bundleContentSenderFunc func(w http.ResponseWriter, r *http.Request, bundle *charm.CharmArchive) +type bundleContentSenderFunc func(w http.ResponseWriter, r *http.Request, bundle *charm.CharmArchive) error func (h *charmsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - stateWrapper, err := h.validateEnvironUUID(r) - if err != nil { - h.sendError(w, http.StatusNotFound, err.Error()) - return - } - + var err error switch r.Method { case "POST": - if err := stateWrapper.authenticateUser(r); err != nil { - h.authError(w, h) - return - } - // Add a local charm to the store provider. - // Requires a "series" query specifying the series to use for the charm. - charmURL, err := h.processPost(r, stateWrapper.state) - if err != nil { - h.sendError(w, http.StatusBadRequest, err.Error()) - return - } - h.sendJSON(w, http.StatusOK, ¶ms.CharmsResponse{CharmURL: charmURL.String()}) + err = h.servePost(w, r) case "GET": - // Retrieve or list charm files. - // Requires "url" (charm URL) and an optional "file" (the path to the - // charm file) to be included in the query. - if charmArchivePath, filePath, err := h.processGet(r, stateWrapper.state); err != nil { - // An error occurred retrieving the charm bundle. - if errors.IsNotFound(err) { - h.sendError(w, http.StatusNotFound, err.Error()) - } else { - h.sendError(w, http.StatusBadRequest, err.Error()) - } - } else if filePath == "" { - // The client requested the list of charm files. - sendBundleContent(w, r, charmArchivePath, h.manifestSender) - } else if filePath == "*" { - // The client requested the archive. - sendBundleContent(w, r, charmArchivePath, h.archiveSender) - } else { - // The client requested a specific file. - sendBundleContent(w, r, charmArchivePath, h.archiveEntrySender(filePath)) + err = h.serveGet(w, r) + default: + err = errors.MethodNotAllowedf("unsupported method: %q", r.Method) + } + if err != nil { + h.sendError(w, r, err) + } +} + +func (h *charmsHandler) servePost(w http.ResponseWriter, r *http.Request) error { + st, _, err := h.ctxt.stateForRequestAuthenticatedUser(r) + if err != nil { + return errors.Trace(err) + } + // Add a local charm to the store provider. + // Requires a "series" query specifying the series to use for the charm. + charmURL, err := h.processPost(r, st) + if err != nil { + return errors.NewBadRequest(err, "") + } + sendStatusAndJSON(w, http.StatusOK, ¶ms.CharmsResponse{CharmURL: charmURL.String()}) + return nil +} + +func (h *charmsHandler) serveGet(w http.ResponseWriter, r *http.Request) error { + // TODO (bug #1499338 2015/09/24) authenticate this. + st, err := h.ctxt.stateForRequestUnauthenticated(r) + if err != nil { + return errors.Trace(err) + } + // Retrieve or list charm files. + // Requires "url" (charm URL) and an optional "file" (the path to the + // charm file) to be included in the query. + charmArchivePath, filePath, err := h.processGet(r, st) + if err != nil { + // An error occurred retrieving the charm bundle. + if errors.IsNotFound(err) { + return errors.Trace(err) } + return errors.NewBadRequest(err, "") + } + var sender bundleContentSenderFunc + switch filePath { + case "": + // The client requested the list of charm files. + sender = h.manifestSender + case "*": + // The client requested the archive. + sender = h.archiveSender default: - h.sendError(w, http.StatusMethodNotAllowed, fmt.Sprintf("unsupported method: %q", r.Method)) - } -} - -// sendJSON sends a JSON-encoded response to the client. -func (h *charmsHandler) sendJSON(w http.ResponseWriter, statusCode int, response *params.CharmsResponse) error { - w.Header().Set("Content-Type", apihttp.CTypeJSON) - w.WriteHeader(statusCode) - body, err := json.Marshal(response) - if err != nil { - return err - } - w.Write(body) + // The client requested a specific file. + sender = h.archiveEntrySender(filePath) + } + if err := h.sendBundleContent(w, r, charmArchivePath, sender); err != nil { + return errors.Trace(err) + } return nil } +// sendError sends a JSON-encoded error response. +// Note the difference from the error response sent by +// the sendError function - the error is encoded in the +// Error field as a string, not an Error object. +func (h *charmsHandler) sendError(w http.ResponseWriter, req *http.Request, err error) { + logger.Errorf("returning error from %s %s: %s", req.Method, req.URL, errors.Details(err)) + perr, status := common.ServerErrorAndStatus(err) + sendStatusAndJSON(w, status, ¶ms.CharmsResponse{ + Error: perr.Message, + ErrorCode: perr.Code, + ErrorInfo: perr.Info, + }) +} + // sendBundleContent uses the given bundleContentSenderFunc to send a response // related to the charm archive located in the given archivePath. -func sendBundleContent(w http.ResponseWriter, r *http.Request, archivePath string, sender bundleContentSenderFunc) { +func (h *charmsHandler) sendBundleContent(w http.ResponseWriter, r *http.Request, archivePath string, sender bundleContentSenderFunc) error { bundle, err := charm.ReadCharmArchive(archivePath) if err != nil { - http.Error( - w, fmt.Sprintf("unable to read archive in %q: %v", archivePath, err), - http.StatusInternalServerError) - return + return errors.Annotatef(err, "unable to read archive in %q", archivePath) } // The bundleContentSenderFunc will set up and send an appropriate response. - sender(w, r, bundle) + if err := sender(w, r, bundle); err != nil { + return errors.Trace(err) + } + return nil } // manifestSender sends a JSON-encoded response to the client including the // list of files contained in the charm bundle. -func (h *charmsHandler) manifestSender(w http.ResponseWriter, r *http.Request, bundle *charm.CharmArchive) { +func (h *charmsHandler) manifestSender(w http.ResponseWriter, r *http.Request, bundle *charm.CharmArchive) error { manifest, err := bundle.Manifest() if err != nil { - http.Error( - w, fmt.Sprintf("unable to read archive in %q: %v", bundle.Path, err), - http.StatusInternalServerError) - return + return errors.Annotatef(err, "unable to read manifest in %q", bundle.Path) } - h.sendJSON(w, http.StatusOK, ¶ms.CharmsResponse{Files: manifest.SortedValues()}) + sendStatusAndJSON(w, http.StatusOK, ¶ms.CharmsResponse{ + Files: manifest.SortedValues(), + }) + return nil } // archiveEntrySender returns a bundleContentSenderFunc which is responsible for // sending the contents of filePath included in the given charm bundle. If filePath // does not identify a file or a symlink, a 403 forbidden error is returned. func (h *charmsHandler) archiveEntrySender(filePath string) bundleContentSenderFunc { - return func(w http.ResponseWriter, r *http.Request, bundle *charm.CharmArchive) { + return func(w http.ResponseWriter, r *http.Request, bundle *charm.CharmArchive) error { // TODO(fwereade) 2014-01-27 bug #1285685 // This doesn't handle symlinks helpfully, and should be talking in // terms of bundles rather than zip readers; but this demands thought // and design and is not amenable to a quick fix. zipReader, err := zip.OpenReader(bundle.Path) if err != nil { - http.Error( - w, fmt.Sprintf("unable to read charm: %v", err), - http.StatusInternalServerError) - return + return errors.Annotatef(err, "unable to read charm") } defer zipReader.Close() for _, file := range zipReader.File { @@ -151,47 +167,45 @@ } fileInfo := file.FileInfo() if fileInfo.IsDir() { - http.Error(w, "directory listing not allowed", http.StatusForbidden) - return + return ¶ms.Error{ + Message: "directory listing not allowed", + Code: params.CodeForbidden, + } } contents, err := file.Open() if err != nil { - http.Error( - w, fmt.Sprintf("unable to read file %q: %v", filePath, err), - http.StatusInternalServerError) - return + return errors.Annotatef(err, "unable to read file %q", filePath) } defer contents.Close() ctype := mime.TypeByExtension(filepath.Ext(filePath)) if ctype != "" { // Older mime.types may map .js to x-javascript. // Map it to javascript for consistency. - if ctype == apihttp.CTypeXJS { - ctype = apihttp.CTypeJS + if ctype == params.ContentTypeXJS { + ctype = params.ContentTypeJS } w.Header().Set("Content-Type", ctype) } w.Header().Set("Content-Length", strconv.FormatInt(fileInfo.Size(), 10)) w.WriteHeader(http.StatusOK) io.Copy(w, contents) - return + return nil } - http.NotFound(w, r) - return + return errors.NotFoundf("charm") } } // archiveSender is a bundleContentSenderFunc which is responsible for sending // the contents of the given charm bundle. -func (h *charmsHandler) archiveSender(w http.ResponseWriter, r *http.Request, bundle *charm.CharmArchive) { +func (h *charmsHandler) archiveSender(w http.ResponseWriter, r *http.Request, bundle *charm.CharmArchive) error { + // Note that http.ServeFile's error responses are not our standard JSON + // responses (they are the usual textual error messages as produced + // by http.Error), but there's not a great deal we can do about that, + // except accept non-JSON error responses in the client, because + // http.ServeFile does not provide a way of customizing its + // error responses. http.ServeFile(w, r, bundle.Path) -} - -// sendError sends a JSON-encoded error response. -func (h *charmsHandler) sendError(w http.ResponseWriter, statusCode int, message string) { - if err := h.sendJSON(w, statusCode, ¶ms.CharmsResponse{Error: message}); err != nil { - logger.Errorf("failed to send error: %v", err) - } + return nil } // processPost handles a charm upload POST request after authentication. @@ -415,7 +429,7 @@ // downloadCharm downloads the given charm name from the provider storage and // saves the corresponding zip archive to the given charmArchivePath. func (h *charmsHandler) downloadCharm(st *state.State, curl *charm.URL, charmArchivePath string) error { - storage := storage.NewStorage(st.EnvironUUID(), st.MongoSession()) + storage := storage.NewStorage(st.ModelUUID(), st.MongoSession()) ch, err := st.Charm(curl) if err != nil { return errors.Annotate(err, "cannot get charm from state") @@ -439,7 +453,7 @@ reader, _, err := storage.Get(ch.StoragePath()) if err != nil { defer cleanupFile(tempCharmArchive) - return errors.Annotate(err, "cannot get charm from environment storage") + return errors.Annotate(err, "cannot get charm from model storage") } defer reader.Close() === modified file 'src/github.com/juju/juju/apiserver/charms/client.go' --- src/github.com/juju/juju/apiserver/charms/client.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/charms/client.go 2016-03-22 15:18:22 +0000 @@ -6,7 +6,7 @@ import ( "github.com/juju/errors" "github.com/juju/utils/set" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api" "github.com/juju/juju/apiserver/common" @@ -15,7 +15,7 @@ ) func init() { - common.RegisterStandardFacade("Charms", 1, NewAPI) + common.RegisterStandardFacade("Charms", 2, NewAPI) } var getState = func(st *state.State) charmsAccess { === modified file 'src/github.com/juju/juju/apiserver/charms/client_test.go' --- src/github.com/juju/juju/apiserver/charms/client_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/charms/client_test.go 2016-03-22 15:18:22 +0000 @@ -6,7 +6,7 @@ import ( jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api" "github.com/juju/juju/apiserver/charms" @@ -93,25 +93,22 @@ url: "local:quantal/wordpress-3", }, { - about: "invalid URL", - charm: "wordpress", - expectedActions: &charm.Actions{ActionSpecs: nil}, - url: "not-valid", - err: "charm url series is not resolved", - }, - { - about: "invalid schema", - charm: "wordpress", - expectedActions: &charm.Actions{ActionSpecs: nil}, - url: "not-valid:your-arguments", - err: `charm URL has invalid schema: "not-valid:your-arguments"`, - }, - { - about: "unknown charm", - charm: "wordpress", - expectedActions: &charm.Actions{ActionSpecs: nil}, - url: "cs:missing/one-1", - err: `charm "cs:missing/one-1" not found`, + about: "invalid URL", + charm: "wordpress", + url: "not-valid!", + err: `URL has invalid charm or bundle name: "not-valid!"`, + }, + { + about: "invalid schema", + charm: "wordpress", + url: "not-valid:your-arguments", + err: `charm or bundle URL has invalid schema: "not-valid:your-arguments"`, + }, + { + about: "unknown charm", + charm: "wordpress", + url: "cs:missing/one-1", + err: `charm "cs:missing/one-1" not found \(not found\)`, }, } === modified file 'src/github.com/juju/juju/apiserver/charms/state.go' --- src/github.com/juju/juju/apiserver/charms/state.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/charms/state.go 2016-03-22 15:18:22 +0000 @@ -4,7 +4,7 @@ package charms import ( - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/state" ) === modified file 'src/github.com/juju/juju/apiserver/charms_test.go' --- src/github.com/juju/juju/apiserver/charms_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/charms_test.go 2016-03-22 15:18:22 +0000 @@ -14,20 +14,76 @@ "path/filepath" "runtime" + "github.com/juju/errors" jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/macaroon-bakery.v1/httpbakery" - apihttp "github.com/juju/juju/apiserver/http" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" "github.com/juju/juju/state/storage" "github.com/juju/juju/testcharms" ) +// charmsCommonSuite wraps authHttpSuite and adds +// some helper methods suitable for working with the +// charms endpoint. +type charmsCommonSuite struct { + authHttpSuite +} + +func (s *charmsCommonSuite) charmsURL(c *gc.C, query string) *url.URL { + uri := s.baseURL(c) + if s.modelUUID == "" { + uri.Path = "/charms" + } else { + uri.Path = fmt.Sprintf("/model/%s/charms", s.modelUUID) + } + uri.RawQuery = query + return uri +} + +func (s *charmsCommonSuite) charmsURI(c *gc.C, query string) string { + if query != "" && query[0] == '?' { + query = query[1:] + } + return s.charmsURL(c, query).String() +} + +func (s *charmsCommonSuite) assertUploadResponse(c *gc.C, resp *http.Response, expCharmURL string) { + charmResponse := s.assertResponse(c, resp, http.StatusOK) + c.Check(charmResponse.Error, gc.Equals, "") + c.Check(charmResponse.CharmURL, gc.Equals, expCharmURL) +} + +func (s *charmsCommonSuite) assertGetFileResponse(c *gc.C, resp *http.Response, expBody, expContentType string) { + body := assertResponse(c, resp, http.StatusOK, expContentType) + c.Check(string(body), gc.Equals, expBody) +} + +func (s *charmsCommonSuite) assertGetFileListResponse(c *gc.C, resp *http.Response, expFiles []string) { + charmResponse := s.assertResponse(c, resp, http.StatusOK) + c.Check(charmResponse.Error, gc.Equals, "") + c.Check(charmResponse.Files, gc.DeepEquals, expFiles) +} + +func (s *charmsCommonSuite) assertErrorResponse(c *gc.C, resp *http.Response, expCode int, expError string) { + charmResponse := s.assertResponse(c, resp, expCode) + c.Check(charmResponse.Error, gc.Matches, expError) +} + +func (s *charmsCommonSuite) assertResponse(c *gc.C, resp *http.Response, expStatus int) params.CharmsResponse { + body := assertResponse(c, resp, expStatus, params.ContentTypeJSON) + var charmResponse params.CharmsResponse + err := json.Unmarshal(body, &charmResponse) + c.Assert(err, jc.ErrorIsNil, gc.Commentf("body: %s", body)) + return charmResponse +} + type charmsSuite struct { - userAuthHttpSuite + charmsCommonSuite } var _ = gc.Suite(&charmsSuite{}) @@ -37,32 +93,31 @@ if runtime.GOOS == "windows" { c.Skip("bug 1403084: Skipping this on windows for now") } - s.userAuthHttpSuite.SetUpSuite(c) - s.archiveContentType = "application/zip" + s.charmsCommonSuite.SetUpSuite(c) } func (s *charmsSuite) TestCharmsServedSecurely(c *gc.C) { info := s.APIInfo(c) uri := "http://" + info.Addrs[0] + "/charms" - _, err := s.sendRequest(c, "", "", "GET", uri, "", nil) - c.Assert(err, gc.ErrorMatches, `.*malformed HTTP response.*`) + s.sendRequest(c, httpRequestParams{ + method: "GET", + url: uri, + expectError: `.*malformed HTTP response.*`, + }) } func (s *charmsSuite) TestPOSTRequiresAuth(c *gc.C) { - resp, err := s.sendRequest(c, "", "", "POST", s.charmsURI(c, ""), "", nil) - c.Assert(err, jc.ErrorIsNil) - s.assertErrorResponse(c, resp, http.StatusUnauthorized, "unauthorized") + resp := s.sendRequest(c, httpRequestParams{method: "POST", url: s.charmsURI(c, "")}) + s.assertErrorResponse(c, resp, http.StatusUnauthorized, "no credentials provided") } func (s *charmsSuite) TestGETDoesNotRequireAuth(c *gc.C) { - resp, err := s.sendRequest(c, "", "", "GET", s.charmsURI(c, ""), "", nil) - c.Assert(err, jc.ErrorIsNil) + resp := s.sendRequest(c, httpRequestParams{method: "GET", url: s.charmsURI(c, "")}) s.assertErrorResponse(c, resp, http.StatusBadRequest, "expected url=CharmURL query argument") } func (s *charmsSuite) TestRequiresPOSTorGET(c *gc.C) { - resp, err := s.authRequest(c, "PUT", s.charmsURI(c, ""), "", nil) - c.Assert(err, jc.ErrorIsNil) + resp := s.authRequest(c, httpRequestParams{method: "PUT", url: s.charmsURI(c, "")}) s.assertErrorResponse(c, resp, http.StatusMethodNotAllowed, `unsupported method: "PUT"`) } @@ -77,19 +132,22 @@ err = machine.SetPassword(password) c.Assert(err, jc.ErrorIsNil) - resp, err := s.sendRequest(c, machine.Tag().String(), password, "POST", s.charmsURI(c, ""), "", nil) - c.Assert(err, jc.ErrorIsNil) - s.assertErrorResponse(c, resp, http.StatusUnauthorized, "unauthorized") + resp := s.sendRequest(c, httpRequestParams{ + tag: machine.Tag().String(), + password: password, + method: "POST", + url: s.charmsURI(c, ""), + nonce: "fake_nonce", + }) + s.assertErrorResponse(c, resp, http.StatusUnauthorized, "invalid entity name or password") // Now try a user login. - resp, err = s.authRequest(c, "POST", s.charmsURI(c, ""), "", nil) - c.Assert(err, jc.ErrorIsNil) + resp = s.authRequest(c, httpRequestParams{method: "POST", url: s.charmsURI(c, "")}) s.assertErrorResponse(c, resp, http.StatusBadRequest, "expected series=URL argument") } func (s *charmsSuite) TestUploadRequiresSeries(c *gc.C) { - resp, err := s.authRequest(c, "POST", s.charmsURI(c, ""), "", nil) - c.Assert(err, jc.ErrorIsNil) + resp := s.authRequest(c, httpRequestParams{method: "POST", url: s.charmsURI(c, "")}) s.assertErrorResponse(c, resp, http.StatusBadRequest, "expected series=URL argument") } @@ -100,13 +158,11 @@ // Pretend we upload a zip by setting the Content-Type, so we can // check the error at extraction time later. - resp, err := s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), true, tempFile.Name()) - c.Assert(err, jc.ErrorIsNil) + resp := s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/zip", tempFile.Name()) s.assertErrorResponse(c, resp, http.StatusBadRequest, "cannot open charm archive: zip: not a valid zip file") // Now try with the default Content-Type. - resp, err = s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), false, tempFile.Name()) - c.Assert(err, jc.ErrorIsNil) + resp = s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/octet-stream", tempFile.Name()) s.assertErrorResponse(c, resp, http.StatusBadRequest, "expected Content-Type: application/zip, got: application/octet-stream") } @@ -121,8 +177,7 @@ // Now try uploading the same revision and verify it gets bumped, // and the BundleSha256 is calculated. - resp, err := s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), true, ch.Path) - c.Assert(err, jc.ErrorIsNil) + resp := s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/zip", ch.Path) expectedURL := charm.MustParseURL("local:quantal/dummy-2") s.assertUploadResponse(c, resp, expectedURL.String()) sch, err := s.State.Charm(expectedURL) @@ -148,8 +203,7 @@ c.Assert(err, jc.ErrorIsNil) // Now try uploading it and ensure the revision persists. - resp, err := s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), true, tempFile.Name()) - c.Assert(err, jc.ErrorIsNil) + resp := s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/zip", tempFile.Name()) expectedURL := charm.MustParseURL("local:quantal/dummy-123") s.assertUploadResponse(c, resp, expectedURL.String()) sch, err := s.State.Charm(expectedURL) @@ -168,7 +222,7 @@ c.Assert(sch.BundleSha256(), gc.Equals, expectedSHA256) - storage := storage.NewStorage(s.State.EnvironUUID(), s.State.MongoSession()) + storage := storage.NewStorage(s.State.ModelUUID(), s.State.MongoSession()) reader, _, err := storage.Get(sch.StoragePath()) c.Assert(err, jc.ErrorIsNil) defer reader.Close() @@ -183,42 +237,38 @@ // https://host:port/charms url := s.charmsURL(c, "series=quantal") url.Path = "/charms" - resp, err := s.uploadRequest(c, url.String(), true, ch.Path) - c.Assert(err, jc.ErrorIsNil) - expectedURL := charm.MustParseURL("local:quantal/dummy-1") - s.assertUploadResponse(c, resp, expectedURL.String()) -} - -func (s *charmsSuite) TestUploadAllowsEnvUUIDPath(c *gc.C) { - // Check that we can upload charms to https://host:port/ENVUUID/charms - ch := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") - url := s.charmsURL(c, "series=quantal") - url.Path = fmt.Sprintf("/environment/%s/charms", s.envUUID) - resp, err := s.uploadRequest(c, url.String(), true, ch.Path) - c.Assert(err, jc.ErrorIsNil) - expectedURL := charm.MustParseURL("local:quantal/dummy-1") - s.assertUploadResponse(c, resp, expectedURL.String()) -} - -func (s *charmsSuite) TestUploadAllowsOtherEnvUUIDPath(c *gc.C) { - envState := s.setupOtherEnvironment(c) - // Check that we can upload charms to https://host:port/ENVUUID/charms - ch := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") - url := s.charmsURL(c, "series=quantal") - url.Path = fmt.Sprintf("/environment/%s/charms", envState.EnvironUUID()) - resp, err := s.uploadRequest(c, url.String(), true, ch.Path) - c.Assert(err, jc.ErrorIsNil) - expectedURL := charm.MustParseURL("local:quantal/dummy-1") - s.assertUploadResponse(c, resp, expectedURL.String()) -} - -func (s *charmsSuite) TestUploadRejectsWrongEnvUUIDPath(c *gc.C) { - // Check that we cannot upload charms to https://host:port/BADENVUUID/charms - url := s.charmsURL(c, "series=quantal") - url.Path = "/environment/dead-beef-123456/charms" - resp, err := s.authRequest(c, "POST", url.String(), "", nil) - c.Assert(err, jc.ErrorIsNil) - s.assertErrorResponse(c, resp, http.StatusNotFound, `unknown environment: "dead-beef-123456"`) + resp := s.uploadRequest(c, url.String(), "application/zip", ch.Path) + expectedURL := charm.MustParseURL("local:quantal/dummy-1") + s.assertUploadResponse(c, resp, expectedURL.String()) +} + +func (s *charmsSuite) TestUploadAllowsModelUUIDPath(c *gc.C) { + // Check that we can upload charms to https://host:port/ModelUUID/charms + ch := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") + url := s.charmsURL(c, "series=quantal") + url.Path = fmt.Sprintf("/model/%s/charms", s.modelUUID) + resp := s.uploadRequest(c, url.String(), "application/zip", ch.Path) + expectedURL := charm.MustParseURL("local:quantal/dummy-1") + s.assertUploadResponse(c, resp, expectedURL.String()) +} + +func (s *charmsSuite) TestUploadAllowsOtherModelUUIDPath(c *gc.C) { + envState := s.setupOtherModel(c) + // Check that we can upload charms to https://host:port/ModelUUID/charms + ch := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") + url := s.charmsURL(c, "series=quantal") + url.Path = fmt.Sprintf("/model/%s/charms", envState.ModelUUID()) + resp := s.uploadRequest(c, url.String(), "application/zip", ch.Path) + expectedURL := charm.MustParseURL("local:quantal/dummy-1") + s.assertUploadResponse(c, resp, expectedURL.String()) +} + +func (s *charmsSuite) TestUploadRejectsWrongModelUUIDPath(c *gc.C) { + // Check that we cannot upload charms to https://host:port/BADModelUUID/charms + url := s.charmsURL(c, "series=quantal") + url.Path = "/model/dead-beef-123456/charms" + resp := s.authRequest(c, httpRequestParams{method: "POST", url: url.String()}) + s.assertErrorResponse(c, resp, http.StatusNotFound, `unknown model: "dead-beef-123456"`) } func (s *charmsSuite) TestUploadRepackagesNestedArchives(c *gc.C) { @@ -242,8 +292,7 @@ c.Assert(err, gc.ErrorMatches, `archive file "metadata.yaml" not found`) // Now try uploading it - should succeeed and be repackaged. - resp, err := s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), true, tempFile.Name()) - c.Assert(err, jc.ErrorIsNil) + resp := s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/zip", tempFile.Name()) expectedURL := charm.MustParseURL("local:quantal/dummy-1") s.assertUploadResponse(c, resp, expectedURL.String()) sch, err := s.State.Charm(expectedURL) @@ -255,7 +304,7 @@ // Get it from the storage and try to read it as a bundle - it // should succeed, because it was repackaged during upload to // strip nested dirs. - storage := storage.NewStorage(s.State.EnvironUUID(), s.State.MongoSession()) + storage := storage.NewStorage(s.State.ModelUUID(), s.State.MongoSession()) reader, _, err := storage.Get(sch.StoragePath()) c.Assert(err, jc.ErrorIsNil) defer reader.Close() @@ -278,8 +327,7 @@ func (s *charmsSuite) TestGetRequiresCharmURL(c *gc.C) { uri := s.charmsURI(c, "?file=hooks/install") - resp, err := s.authRequest(c, "GET", uri, "", nil) - c.Assert(err, jc.ErrorIsNil) + resp := s.authRequest(c, httpRequestParams{method: "GET", url: uri}) s.assertErrorResponse( c, resp, http.StatusBadRequest, "expected url=CharmURL query argument", @@ -288,8 +336,7 @@ func (s *charmsSuite) TestGetFailsWithInvalidCharmURL(c *gc.C) { uri := s.charmsURI(c, "?url=local:precise/no-such") - resp, err := s.authRequest(c, "GET", uri, "", nil) - c.Assert(err, jc.ErrorIsNil) + resp := s.authRequest(c, httpRequestParams{method: "GET", url: uri}) s.assertErrorResponse( c, resp, http.StatusNotFound, `unable to retrieve and save the charm: cannot get charm from state: charm "local:precise/no-such" not found`, @@ -299,9 +346,7 @@ func (s *charmsSuite) TestGetReturnsNotFoundWhenMissing(c *gc.C) { // Add the dummy charm. ch := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") - _, err := s.uploadRequest( - c, s.charmsURI(c, "?series=quantal"), true, ch.Path) - c.Assert(err, jc.ErrorIsNil) + s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/zip", ch.Path) // Ensure a 404 is returned for files not included in the charm. for i, file := range []string{ @@ -309,8 +354,7 @@ } { c.Logf("test %d: %s", i, file) uri := s.charmsURI(c, "?url=local:quantal/dummy-1&file="+file) - resp, err := s.authRequest(c, "GET", uri, "", nil) - c.Assert(err, jc.ErrorIsNil) + resp := s.authRequest(c, httpRequestParams{method: "GET", url: uri}) c.Assert(resp.StatusCode, gc.Equals, http.StatusNotFound) } } @@ -318,23 +362,18 @@ func (s *charmsSuite) TestGetReturnsForbiddenWithDirectory(c *gc.C) { // Add the dummy charm. ch := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") - _, err := s.uploadRequest( - c, s.charmsURI(c, "?series=quantal"), true, ch.Path) - c.Assert(err, jc.ErrorIsNil) + s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/zip", ch.Path) // Ensure a 403 is returned if the requested file is a directory. uri := s.charmsURI(c, "?url=local:quantal/dummy-1&file=hooks") - resp, err := s.authRequest(c, "GET", uri, "", nil) - c.Assert(err, jc.ErrorIsNil) + resp := s.authRequest(c, httpRequestParams{method: "GET", url: uri}) c.Assert(resp.StatusCode, gc.Equals, http.StatusForbidden) } func (s *charmsSuite) TestGetReturnsFileContents(c *gc.C) { // Add the dummy charm. ch := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") - _, err := s.uploadRequest( - c, s.charmsURI(c, "?series=quantal"), true, ch.Path) - c.Assert(err, jc.ErrorIsNil) + s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/zip", ch.Path) // Ensure the file contents are properly returned. for i, t := range []struct { @@ -357,8 +396,7 @@ } { c.Logf("test %d: %s", i, t.summary) uri := s.charmsURI(c, "?url=local:quantal/dummy-1&file="+t.file) - resp, err := s.authRequest(c, "GET", uri, "", nil) - c.Assert(err, jc.ErrorIsNil) + resp := s.authRequest(c, httpRequestParams{method: "GET", url: uri}) s.assertGetFileResponse(c, resp, t.response, "text/plain; charset=utf-8") } } @@ -366,16 +404,13 @@ func (s *charmsSuite) TestGetStarReturnsArchiveBytes(c *gc.C) { // Add the dummy charm. ch := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") - _, err := s.uploadRequest( - c, s.charmsURI(c, "?series=quantal"), true, ch.Path) - c.Assert(err, jc.ErrorIsNil) + s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/zip", ch.Path) data, err := ioutil.ReadFile(ch.Path) c.Assert(err, jc.ErrorIsNil) uri := s.charmsURI(c, "?url=local:quantal/dummy-1&file=*") - resp, err := s.authRequest(c, "GET", uri, "", nil) - c.Assert(err, jc.ErrorIsNil) + resp := s.authRequest(c, httpRequestParams{method: "GET", url: uri}) s.assertGetFileResponse(c, resp, string(data), "application/zip") } @@ -383,67 +418,54 @@ // Backwards compatibility check, that we can GET from charms at // https://host:port/charms ch := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") - _, err := s.uploadRequest( - c, s.charmsURI(c, "?series=quantal"), true, ch.Path) - c.Assert(err, jc.ErrorIsNil) + s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/zip", ch.Path) url := s.charmsURL(c, "url=local:quantal/dummy-1&file=revision") url.Path = "/charms" - resp, err := s.authRequest(c, "GET", url.String(), "", nil) - c.Assert(err, jc.ErrorIsNil) + resp := s.authRequest(c, httpRequestParams{method: "GET", url: url.String()}) s.assertGetFileResponse(c, resp, "1", "text/plain; charset=utf-8") } -func (s *charmsSuite) TestGetAllowsEnvUUIDPath(c *gc.C) { +func (s *charmsSuite) TestGetAllowsModelUUIDPath(c *gc.C) { ch := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") - _, err := s.uploadRequest( - c, s.charmsURI(c, "?series=quantal"), true, ch.Path) - c.Assert(err, jc.ErrorIsNil) + s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/zip", ch.Path) url := s.charmsURL(c, "url=local:quantal/dummy-1&file=revision") - url.Path = fmt.Sprintf("/environment/%s/charms", s.envUUID) - resp, err := s.authRequest(c, "GET", url.String(), "", nil) - c.Assert(err, jc.ErrorIsNil) + url.Path = fmt.Sprintf("/model/%s/charms", s.modelUUID) + resp := s.authRequest(c, httpRequestParams{method: "GET", url: url.String()}) s.assertGetFileResponse(c, resp, "1", "text/plain; charset=utf-8") } func (s *charmsSuite) TestGetAllowsOtherEnvironment(c *gc.C) { - envState := s.setupOtherEnvironment(c) + envState := s.setupOtherModel(c) ch := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") - _, err := s.uploadRequest( - c, s.charmsURI(c, "?series=quantal"), true, ch.Path) - c.Assert(err, jc.ErrorIsNil) + s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/zip", ch.Path) url := s.charmsURL(c, "url=local:quantal/dummy-1&file=revision") - url.Path = fmt.Sprintf("/environment/%s/charms", envState.EnvironUUID()) - resp, err := s.authRequest(c, "GET", url.String(), "", nil) - c.Assert(err, jc.ErrorIsNil) + url.Path = fmt.Sprintf("/model/%s/charms", envState.ModelUUID()) + resp := s.authRequest(c, httpRequestParams{method: "GET", url: url.String()}) s.assertGetFileResponse(c, resp, "1", "text/plain; charset=utf-8") } -func (s *charmsSuite) TestGetRejectsWrongEnvUUIDPath(c *gc.C) { +func (s *charmsSuite) TestGetRejectsWrongModelUUIDPath(c *gc.C) { url := s.charmsURL(c, "url=local:quantal/dummy-1&file=revision") - url.Path = "/environment/dead-beef-123456/charms" - resp, err := s.authRequest(c, "GET", url.String(), "", nil) - c.Assert(err, jc.ErrorIsNil) - s.assertErrorResponse(c, resp, http.StatusNotFound, `unknown environment: "dead-beef-123456"`) + url.Path = "/model/dead-beef-123456/charms" + resp := s.authRequest(c, httpRequestParams{method: "GET", url: url.String()}) + s.assertErrorResponse(c, resp, http.StatusNotFound, `unknown model: "dead-beef-123456"`) } func (s *charmsSuite) TestGetReturnsManifest(c *gc.C) { // Add the dummy charm. ch := testcharms.Repo.CharmArchive(c.MkDir(), "dummy") - _, err := s.uploadRequest( - c, s.charmsURI(c, "?series=quantal"), true, ch.Path) - c.Assert(err, jc.ErrorIsNil) + s.uploadRequest(c, s.charmsURI(c, "?series=quantal"), "application/zip", ch.Path) // Ensure charm files are properly listed. uri := s.charmsURI(c, "?url=local:quantal/dummy-1") - resp, err := s.authRequest(c, "GET", uri, "", nil) - c.Assert(err, jc.ErrorIsNil) + resp := s.authRequest(c, httpRequestParams{method: "GET", url: uri}) manifest, err := ch.Manifest() c.Assert(err, jc.ErrorIsNil) expectedFiles := manifest.SortedValues() s.assertGetFileListResponse(c, resp, expectedFiles) ctype := resp.Header.Get("content-type") - c.Assert(ctype, gc.Equals, apihttp.CTypeJSON) + c.Assert(ctype, gc.Equals, params.ContentTypeJSON) } func (s *charmsSuite) TestGetUsesCache(c *gc.C) { @@ -468,60 +490,89 @@ // Ensure the cached contents are properly retrieved. uri := s.charmsURI(c, "?url=local:trusty/django-42&file=utils.js") - resp, err := s.authRequest(c, "GET", uri, "", nil) - c.Assert(err, jc.ErrorIsNil) - s.assertGetFileResponse(c, resp, contents, apihttp.CTypeJS) -} - -func (s *charmsSuite) charmsURL(c *gc.C, query string) *url.URL { - uri := s.baseURL(c) - if s.envUUID == "" { - uri.Path = "/charms" - } else { - uri.Path = fmt.Sprintf("/environment/%s/charms", s.envUUID) - } - uri.RawQuery = query - return uri -} - -func (s *charmsSuite) charmsURI(c *gc.C, query string) string { - if query != "" && query[0] == '?' { - query = query[1:] - } - return s.charmsURL(c, query).String() -} - -func (s *charmsSuite) assertUploadResponse(c *gc.C, resp *http.Response, expCharmURL string) { - body := assertResponse(c, resp, http.StatusOK, apihttp.CTypeJSON) - charmResponse := jsonResponse(c, body) - c.Check(charmResponse.Error, gc.Equals, "") - c.Check(charmResponse.CharmURL, gc.Equals, expCharmURL) -} - -func (s *charmsSuite) assertGetFileResponse(c *gc.C, resp *http.Response, expBody, expContentType string) { - body := assertResponse(c, resp, http.StatusOK, expContentType) - c.Check(string(body), gc.Equals, expBody) -} - -func (s *charmsSuite) assertGetFileListResponse(c *gc.C, resp *http.Response, expFiles []string) { - body := assertResponse(c, resp, http.StatusOK, apihttp.CTypeJSON) - charmResponse := jsonResponse(c, body) - c.Check(charmResponse.Error, gc.Equals, "") - c.Check(charmResponse.Files, gc.DeepEquals, expFiles) -} - -func assertResponse(c *gc.C, resp *http.Response, expCode int, expContentType string) []byte { - c.Check(resp.StatusCode, gc.Equals, expCode) - body, err := ioutil.ReadAll(resp.Body) - defer resp.Body.Close() - c.Assert(err, jc.ErrorIsNil) - ctype := resp.Header.Get("Content-Type") - c.Assert(ctype, gc.Equals, expContentType) - return body -} - -func jsonResponse(c *gc.C, body []byte) (jsonResponse params.CharmsResponse) { - err := json.Unmarshal(body, &jsonResponse) - c.Assert(err, jc.ErrorIsNil) - return + resp := s.authRequest(c, httpRequestParams{method: "GET", url: uri}) + s.assertGetFileResponse(c, resp, contents, params.ContentTypeJS) +} + +type charmsWithMacaroonsSuite struct { + charmsCommonSuite +} + +var _ = gc.Suite(&charmsWithMacaroonsSuite{}) + +func (s *charmsWithMacaroonsSuite) SetUpTest(c *gc.C) { + s.macaroonAuthEnabled = true + s.authHttpSuite.SetUpTest(c) +} + +func (s *charmsWithMacaroonsSuite) TestWithNoBasicAuthReturnsDischargeRequiredError(c *gc.C) { + resp := s.sendRequest(c, httpRequestParams{ + method: "POST", + url: s.charmsURI(c, ""), + }) + + charmResponse := s.assertResponse(c, resp, http.StatusUnauthorized) + c.Assert(charmResponse.Error, gc.Equals, "verification failed: no macaroons") + c.Assert(charmResponse.ErrorCode, gc.Equals, params.CodeDischargeRequired) + c.Assert(charmResponse.ErrorInfo, gc.NotNil) + c.Assert(charmResponse.ErrorInfo.Macaroon, gc.NotNil) +} + +func (s *charmsWithMacaroonsSuite) TestCanPostWithDischargedMacaroon(c *gc.C) { + checkCount := 0 + s.DischargerLogin = func() string { + checkCount++ + return s.userTag.Id() + } + resp := s.sendRequest(c, httpRequestParams{ + do: s.doer(), + method: "POST", + url: s.charmsURI(c, ""), + }) + s.assertErrorResponse(c, resp, http.StatusBadRequest, "expected series=URL argument") + c.Assert(checkCount, gc.Equals, 1) +} + +// doer returns a Do function that can make a bakery request +// appropriate for a charms endpoint. +func (s *charmsWithMacaroonsSuite) doer() func(*http.Request) (*http.Response, error) { + return bakeryDo(nil, charmsBakeryGetError) +} + +// charmsBakeryGetError implements a getError function +// appropriate for passing to httpbakery.Client.DoWithBodyAndCustomError +// for the charms endpoint. +func charmsBakeryGetError(resp *http.Response) error { + if resp.StatusCode != http.StatusUnauthorized { + return nil + } + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return errors.Annotatef(err, "cannot read body") + } + var charmResp params.CharmsResponse + if err := json.Unmarshal(data, &charmResp); err != nil { + return errors.Annotatef(err, "cannot unmarshal body") + } + errResp := ¶ms.Error{ + Message: charmResp.Error, + Code: charmResp.ErrorCode, + Info: charmResp.ErrorInfo, + } + if errResp.Code != params.CodeDischargeRequired { + return errResp + } + if errResp.Info == nil { + return errors.Annotatef(err, "no error info found in discharge-required response error") + } + // It's a discharge-required error, so make an appropriate httpbakery + // error from it. + return &httpbakery.Error{ + Message: errResp.Message, + Code: httpbakery.ErrDischargeRequired, + Info: &httpbakery.ErrorInfo{ + Macaroon: errResp.Info.Macaroon, + MacaroonPath: errResp.Info.MacaroonPath, + }, + } } === modified file 'src/github.com/juju/juju/apiserver/cleaner/cleaner.go' --- src/github.com/juju/juju/apiserver/cleaner/cleaner.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/cleaner/cleaner.go 2016-03-22 15:18:22 +0000 @@ -16,7 +16,7 @@ ) func init() { - common.RegisterStandardFacade("Cleaner", 1, NewCleanerAPI) + common.RegisterStandardFacade("Cleaner", 2, NewCleanerAPI) } var logger = loggo.GetLogger("juju.apiserver.cleaner") @@ -33,7 +33,7 @@ res *common.Resources, authorizer common.Authorizer, ) (*CleanerAPI, error) { - if !authorizer.AuthEnvironManager() { + if !authorizer.AuthModelManager() { return nil, common.ErrPerm } return &CleanerAPI{ === modified file 'src/github.com/juju/juju/apiserver/client/api_test.go' --- src/github.com/juju/juju/apiserver/client/api_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/client/api_test.go 2016-03-22 15:18:22 +0000 @@ -5,7 +5,6 @@ import ( "fmt" - stdtesting "testing" "time" "github.com/juju/errors" @@ -30,10 +29,6 @@ "github.com/juju/juju/testing/factory" ) -func TestAll(t *stdtesting.T) { - coretesting.MgoTestPackage(t) -} - type baseSuite struct { testing.JujuConnSuite commontesting.BlockHelper @@ -124,7 +119,7 @@ stateInfo := s.MongoInfo(c) stateInfo.Tag = e.Tag() stateInfo.Password = password - st, err := state.Open(s.State.EnvironTag(), stateInfo, mongo.DialOpts{ + st, err := state.Open(s.State.ModelTag(), stateInfo, mongo.DialOpts{ Timeout: 25 * time.Millisecond, }, environs.NewStatePolicy()) if err == nil { @@ -158,7 +153,7 @@ // but this behavior is already tested in cmd/juju/status_test.go and // also tested live and it works. var scenarioStatus = ¶ms.FullStatus{ - EnvironmentName: "dummyenv", + ModelName: "dummymodel", Machines: map[string]params.MachineStatus{ "0": { Id: "0", @@ -167,13 +162,11 @@ Status: "started", Data: make(map[string]interface{}), }, - AgentState: "down", - AgentStateInfo: "(started)", - Series: "quantal", - Containers: map[string]params.MachineStatus{}, - Jobs: []multiwatcher.MachineJob{multiwatcher.JobManageEnviron}, - HasVote: false, - WantsVote: true, + Series: "quantal", + Containers: map[string]params.MachineStatus{}, + Jobs: []multiwatcher.MachineJob{multiwatcher.JobManageModel}, + HasVote: false, + WantsVote: true, }, "1": { Id: "1", @@ -182,13 +175,11 @@ Status: "started", Data: make(map[string]interface{}), }, - AgentState: "down", - AgentStateInfo: "(started)", - Series: "quantal", - Containers: map[string]params.MachineStatus{}, - Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, - HasVote: false, - WantsVote: false, + Series: "quantal", + Containers: map[string]params.MachineStatus{}, + Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, + HasVote: false, + WantsVote: false, }, "2": { Id: "2", @@ -197,13 +188,11 @@ Status: "started", Data: make(map[string]interface{}), }, - AgentState: "down", - AgentStateInfo: "(started)", - Series: "quantal", - Containers: map[string]params.MachineStatus{}, - Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, - HasVote: false, - WantsVote: false, + Series: "quantal", + Containers: map[string]params.MachineStatus{}, + Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, + HasVote: false, + WantsVote: false, }, }, Services: map[string]params.ServiceStatus{ @@ -380,7 +369,7 @@ setDefaultPassword(c, u) add(u) - m, err := s.State.AddMachine("quantal", state.JobManageEnviron) + m, err := s.State.AddMachine("quantal", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) c.Assert(m.Tag(), gc.Equals, names.NewMachineTag("0")) err = m.SetProvisioned(instance.Id("i-"+m.Tag().String()), "fake_nonce", nil) @@ -461,7 +450,7 @@ pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("loop-pool", provider.LoopProviderType, map[string]interface{}{}) c.Assert(err, jc.ErrorIsNil) - err = s.State.UpdateEnvironConfig(map[string]interface{}{ + err = s.State.UpdateModelConfig(map[string]interface{}{ "storage-default-block-source": "loop-pool", }, nil, nil) c.Assert(err, jc.ErrorIsNil) === added file 'src/github.com/juju/juju/apiserver/client/bundles.go' --- src/github.com/juju/juju/apiserver/client/bundles.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/client/bundles.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,57 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package client + +import ( + "strings" + + "github.com/juju/bundlechanges" + "github.com/juju/errors" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/constraints" + "github.com/juju/juju/storage" +) + +// GetBundleChanges returns the list of changes required to deploy the given +// bundle data. The changes are sorted by requirements, so that they can be +// applied in order. +func (c *Client) GetBundleChanges(args params.GetBundleChangesParams) (params.GetBundleChangesResults, error) { + var results params.GetBundleChangesResults + data, err := charm.ReadBundleData(strings.NewReader(args.BundleDataYAML)) + if err != nil { + return results, errors.Annotate(err, "cannot read bundle YAML") + } + verifyConstraints := func(s string) error { + _, err := constraints.Parse(s) + return err + } + verifyStorage := func(s string) error { + _, err := storage.ParseConstraints(s) + return err + } + if err := data.Verify(verifyConstraints, verifyStorage); err != nil { + if err, ok := err.(*charm.VerificationError); ok { + results.Errors = make([]string, len(err.Errors)) + for i, e := range err.Errors { + results.Errors[i] = e.Error() + } + return results, nil + } + // This should never happen as Verify only returns verification errors. + return results, errors.Annotate(err, "cannot verify bundle") + } + changes := bundlechanges.FromData(data) + results.Changes = make([]*params.BundleChangesChange, len(changes)) + for i, c := range changes { + results.Changes[i] = ¶ms.BundleChangesChange{ + Id: c.Id(), + Method: c.Method(), + Args: c.GUIArgs(), + Requires: c.Requires(), + } + } + return results, nil +} === added file 'src/github.com/juju/juju/apiserver/client/bundles_test.go' --- src/github.com/juju/juju/apiserver/client/bundles_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/client/bundles_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,169 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package client_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/params" +) + +func (s *serverSuite) TestGetBundleChangesBundleContentError(c *gc.C) { + args := params.GetBundleChangesParams{ + BundleDataYAML: ":", + } + r, err := s.client.GetBundleChanges(args) + c.Assert(err, gc.ErrorMatches, `cannot read bundle YAML: cannot unmarshal bundle data: YAML error: did not find expected key`) + c.Assert(r, gc.DeepEquals, params.GetBundleChangesResults{}) +} + +func (s *serverSuite) TestGetBundleChangesBundleVerificationErrors(c *gc.C) { + args := params.GetBundleChangesParams{ + BundleDataYAML: ` + services: + django: + charm: django + to: [1] + haproxy: + charm: 42 + num_units: -1 + `, + } + r, err := s.client.GetBundleChanges(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(r.Changes, gc.IsNil) + c.Assert(r.Errors, jc.SameContents, []string{ + `placement "1" refers to a machine not defined in this bundle`, + `too many units specified in unit placement for service "django"`, + `invalid charm URL in service "haproxy": URL has invalid charm or bundle name: "42"`, + `negative number of units specified on service "haproxy"`, + }) +} + +func (s *serverSuite) TestGetBundleChangesBundleConstraintsError(c *gc.C) { + args := params.GetBundleChangesParams{ + BundleDataYAML: ` + services: + django: + charm: django + num_units: 1 + constraints: bad=wolf + `, + } + r, err := s.client.GetBundleChanges(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(r.Changes, gc.IsNil) + c.Assert(r.Errors, jc.SameContents, []string{ + `invalid constraints "bad=wolf" in service "django": unknown constraint "bad"`, + }) +} + +func (s *serverSuite) TestGetBundleChangesBundleStorageError(c *gc.C) { + args := params.GetBundleChangesParams{ + BundleDataYAML: ` + services: + django: + charm: django + num_units: 1 + storage: + bad: 0,100M + `, + } + r, err := s.client.GetBundleChanges(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(r.Changes, gc.IsNil) + c.Assert(r.Errors, jc.SameContents, []string{ + `invalid storage "bad" in service "django": cannot parse count: count must be greater than zero, got "0"`, + }) +} + +func (s *serverSuite) TestGetBundleChangesSuccess(c *gc.C) { + args := params.GetBundleChangesParams{ + BundleDataYAML: ` + services: + django: + charm: django + options: + debug: true + storage: + tmpfs: tmpfs,1G + haproxy: + charm: cs:trusty/haproxy-42 + relations: + - - django:web + - haproxy:web + `, + } + r, err := s.client.GetBundleChanges(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(r.Changes, jc.DeepEquals, []*params.BundleChangesChange{{ + Id: "addCharm-0", + Method: "addCharm", + Args: []interface{}{"django"}, + }, { + Id: "deploy-1", + Method: "deploy", + Args: []interface{}{ + "$addCharm-0", "django", + map[string]interface{}{"debug": true}, "", + map[string]string{"tmpfs": "tmpfs,1G"}, + map[string]string{}, + }, + Requires: []string{"addCharm-0"}, + }, { + Id: "addCharm-2", + Method: "addCharm", + Args: []interface{}{"cs:trusty/haproxy-42"}, + }, { + Id: "deploy-3", + Method: "deploy", + Args: []interface{}{ + "$addCharm-2", "haproxy", + map[string]interface{}{}, "", + map[string]string{}, + map[string]string{}, + }, + Requires: []string{"addCharm-2"}, + }, { + Id: "addRelation-4", + Method: "addRelation", + Args: []interface{}{"$deploy-1:web", "$deploy-3:web"}, + Requires: []string{"deploy-1", "deploy-3"}, + }}) + c.Assert(r.Errors, gc.IsNil) +} + +func (s *serverSuite) TestGetBundleChangesBundleEndpointBindingsSuccess(c *gc.C) { + args := params.GetBundleChangesParams{ + BundleDataYAML: ` + services: + django: + charm: django + num_units: 1 + bindings: + url: public + `, + } + r, err := s.client.GetBundleChanges(args) + c.Assert(err, jc.ErrorIsNil) + + for _, change := range r.Changes { + if change.Method == "deploy" { + c.Assert(change, jc.DeepEquals, ¶ms.BundleChangesChange{ + Id: "deploy-1", + Method: "deploy", + Args: []interface{}{ + "$addCharm-0", + "django", + map[string]interface{}{}, + "", + map[string]string{}, + map[string]string{"url": "public"}, + }, + Requires: []string{"addCharm-0"}, + }) + } + } +} === modified file 'src/github.com/juju/juju/apiserver/client/client.go' --- src/github.com/juju/juju/apiserver/client/client.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/client/client.go 2016-03-22 15:18:22 +0000 @@ -5,161 +5,93 @@ import ( "fmt" - "strings" "time" "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api" "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/highavailability" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/apiserver/service" + "github.com/juju/juju/apiserver/usermanager" + "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/environs/manual" "github.com/juju/juju/instance" - jjj "github.com/juju/juju/juju" "github.com/juju/juju/network" "github.com/juju/juju/state" "github.com/juju/juju/version" ) func init() { - common.RegisterStandardFacade("Client", 0, NewClient) + common.RegisterStandardFacade("Client", 1, NewClient) } var logger = loggo.GetLogger("juju.apiserver.client") type API struct { - state *state.State - auth common.Authorizer - resources *common.Resources - client *Client + stateAccessor stateInterface + auth common.Authorizer + resources *common.Resources + client *Client // statusSetter provides common methods for updating an entity's provisioning status. statusSetter *common.StatusSetter toolsFinder *common.ToolsFinder } +// TODO(wallyworld) - remove this method +// state returns a state.State instance for this API. +// Until all code is refactored to use interfaces, we +// need this helper to keep older code happy. +func (api *API) state() *state.State { + return api.stateAccessor.(*stateShim).State +} + // Client serves client-specific API methods. type Client struct { api *API check *common.BlockChecker } +var getState = func(st *state.State) stateInterface { + return &stateShim{st} +} + // NewClient creates a new instance of the Client Facade. func NewClient(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*Client, error) { if !authorizer.AuthClient() { return nil, common.ErrPerm } - urlGetter := common.NewToolsURLGetter(st.EnvironUUID(), st) - return &Client{ + apiState := getState(st) + urlGetter := common.NewToolsURLGetter(apiState.ModelUUID(), apiState) + client := &Client{ api: &API{ - state: st, - auth: authorizer, - resources: resources, - statusSetter: common.NewStatusSetter(st, common.AuthAlways()), - toolsFinder: common.NewToolsFinder(st, st, urlGetter), + stateAccessor: apiState, + auth: authorizer, + resources: resources, + statusSetter: common.NewStatusSetter(st, common.AuthAlways()), + toolsFinder: common.NewToolsFinder(st, st, urlGetter), }, - check: common.NewBlockChecker(st)}, nil + check: common.NewBlockChecker(st)} + return client, nil } func (c *Client) WatchAll() (params.AllWatcherId, error) { - w := c.api.state.Watch() + w := c.api.stateAccessor.Watch() return params.AllWatcherId{ AllWatcherId: c.api.resources.Register(w), }, nil } -// ServiceSet implements the server side of Client.ServiceSet. Values set to an -// empty string will be unset. -// -// (Deprecated) Use NewServiceSetForClientAPI instead, to preserve values set to -// an empty string, and use ServiceUnset to unset values. -func (c *Client) ServiceSet(p params.ServiceSet) error { - if err := c.check.ChangeAllowed(); err != nil { - return errors.Trace(err) - } - svc, err := c.api.state.Service(p.ServiceName) - if err != nil { - return err - } - return service.ServiceSetSettingsStrings(svc, p.Options) -} - -// NewServiceSetForClientAPI implements the server side of -// Client.NewServiceSetForClientAPI. This is exactly like ServiceSet except that -// it does not unset values that are set to an empty string. ServiceUnset -// should be used for that. -// -// TODO(Nate): rename this to ServiceSet (and remove the deprecated ServiceSet) -// when the GUI handles the new behavior. -// TODO(mattyw, all): This api call should be move to the new service facade. The client api version will then need bumping. -func (c *Client) NewServiceSetForClientAPI(p params.ServiceSet) error { - svc, err := c.api.state.Service(p.ServiceName) - if err != nil { - return err - } - return newServiceSetSettingsStringsForClientAPI(svc, p.Options) -} - -// ServiceUnset implements the server side of Client.ServiceUnset. -// TODO(mattyw, all): This api call should be move to the new service facade. The client api version will then need bumping. -func (c *Client) ServiceUnset(p params.ServiceUnset) error { - if err := c.check.ChangeAllowed(); err != nil { - return errors.Trace(err) - } - svc, err := c.api.state.Service(p.ServiceName) - if err != nil { - return err - } - settings := make(charm.Settings) - for _, option := range p.Options { - settings[option] = nil - } - return svc.UpdateConfigSettings(settings) -} - -// ServiceSetYAML implements the server side of Client.ServerSetYAML. -// TODO(mattyw, all): This api call should be move to the new service facade. The client api version will then need bumping. -func (c *Client) ServiceSetYAML(p params.ServiceSetYAML) error { - if err := c.check.ChangeAllowed(); err != nil { - return errors.Trace(err) - } - svc, err := c.api.state.Service(p.ServiceName) - if err != nil { - return err - } - return serviceSetSettingsYAML(svc, p.Config) -} - -// ServiceCharmRelations implements the server side of Client.ServiceCharmRelations. -// TODO(mattyw, all): This api call should be move to the new service facade. The client api version will then need bumping. -func (c *Client) ServiceCharmRelations(p params.ServiceCharmRelations) (params.ServiceCharmRelationsResults, error) { - var results params.ServiceCharmRelationsResults - service, err := c.api.state.Service(p.ServiceName) - if err != nil { - return results, err - } - endpoints, err := service.Endpoints() - if err != nil { - return results, err - } - results.CharmRelations = make([]string, len(endpoints)) - for i, endpoint := range endpoints { - results.CharmRelations[i] = endpoint.Relation.Name - } - return results, nil -} - // Resolved implements the server side of Client.Resolved. func (c *Client) Resolved(p params.Resolved) error { if err := c.check.ChangeAllowed(); err != nil { return errors.Trace(err) } - unit, err := c.api.state.Unit(p.UnitName) + unit, err := c.api.stateAccessor.Unit(p.UnitName) if err != nil { return err } @@ -170,7 +102,7 @@ func (c *Client) PublicAddress(p params.PublicAddress) (results params.PublicAddressResults, err error) { switch { case names.IsValidMachine(p.Target): - machine, err := c.api.state.Machine(p.Target) + machine, err := c.api.stateAccessor.Machine(p.Target) if err != nil { return results, err } @@ -181,7 +113,7 @@ return params.PublicAddressResults{PublicAddress: addr.Value}, nil case names.IsValidUnit(p.Target): - unit, err := c.api.state.Unit(p.Target) + unit, err := c.api.stateAccessor.Unit(p.Target) if err != nil { return results, err } @@ -198,7 +130,7 @@ func (c *Client) PrivateAddress(p params.PrivateAddress) (results params.PrivateAddressResults, err error) { switch { case names.IsValidMachine(p.Target): - machine, err := c.api.state.Machine(p.Target) + machine, err := c.api.stateAccessor.Machine(p.Target) if err != nil { return results, err } @@ -209,7 +141,7 @@ return params.PrivateAddressResults{PrivateAddress: addr.Value}, nil case names.IsValidUnit(p.Target): - unit, err := c.api.state.Unit(p.Target) + unit, err := c.api.stateAccessor.Unit(p.Target) if err != nil { return results, err } @@ -220,359 +152,24 @@ return params.PrivateAddressResults{PrivateAddress: addr.Value}, nil } return results, fmt.Errorf("unknown unit or machine %q", p.Target) -} - -// ServiceExpose changes the juju-managed firewall to expose any ports that -// were also explicitly marked by units as open. -// TODO(mattyw, all): This api call should be move to the new service facade. The client api version will then need bumping. -func (c *Client) ServiceExpose(args params.ServiceExpose) error { - if err := c.check.ChangeAllowed(); err != nil { - return errors.Trace(err) - } - svc, err := c.api.state.Service(args.ServiceName) - if err != nil { - return err - } - return svc.SetExposed() -} - -// ServiceUnexpose changes the juju-managed firewall to unexpose any ports that -// were also explicitly marked by units as open. -// TODO(mattyw, all): This api call should be move to the new service facade. The client api version will then need bumping. -func (c *Client) ServiceUnexpose(args params.ServiceUnexpose) error { - if err := c.check.ChangeAllowed(); err != nil { - return errors.Trace(err) - } - svc, err := c.api.state.Service(args.ServiceName) - if err != nil { - return err - } - return svc.ClearExposed() -} - -// ServiceDeploy fetches the charm from the charm store and deploys it. -// AddCharm or AddLocalCharm should be called to add the charm -// before calling ServiceDeploy, although for backward compatibility -// this is not necessary until 1.16 support is removed. -func (c *Client) ServiceDeploy(args params.ServiceDeploy) error { - if err := c.check.ChangeAllowed(); err != nil { - return errors.Trace(err) - } - return service.DeployService(c.api.state, c.api.auth.GetAuthTag().String(), args) -} - -// ServiceDeployWithNetworks works exactly like ServiceDeploy, but -// allows specifying networks to include or exclude on the machine -// where the charm gets deployed (either with args.Network or with -// constraints). -// -// TODO(dimitern): Drop the special handling of networks in favor of -// spaces constraints, once possible. -func (c *Client) ServiceDeployWithNetworks(args params.ServiceDeploy) error { - return c.ServiceDeploy(args) -} - -// ServiceUpdate updates the service attributes, including charm URL, -// minimum number of units, settings and constraints. -// All parameters in params.ServiceUpdate except the service name are optional. -func (c *Client) ServiceUpdate(args params.ServiceUpdate) error { - if !args.ForceCharmUrl { - if err := c.check.ChangeAllowed(); err != nil { - return errors.Trace(err) - } - } - svc, err := c.api.state.Service(args.ServiceName) - if err != nil { - return err - } - // Set the charm for the given service. - if args.CharmUrl != "" { - if err = c.serviceSetCharm(svc, args.CharmUrl, args.ForceCharmUrl); err != nil { - return err - } - } - // Set the minimum number of units for the given service. - if args.MinUnits != nil { - if err = svc.SetMinUnits(*args.MinUnits); err != nil { - return err - } - } - // Set up service's settings. - if args.SettingsYAML != "" { - if err = serviceSetSettingsYAML(svc, args.SettingsYAML); err != nil { - return err - } - } else if len(args.SettingsStrings) > 0 { - if err = service.ServiceSetSettingsStrings(svc, args.SettingsStrings); err != nil { - return err - } - } - // Update service's constraints. - if args.Constraints != nil { - return svc.SetConstraints(*args.Constraints) - } - return nil -} - -// serviceSetCharm sets the charm for the given service. -func (c *Client) serviceSetCharm(service *state.Service, url string, force bool) error { - curl, err := charm.ParseURL(url) - if err != nil { - return err - } - sch, err := c.api.state.Charm(curl) - if errors.IsNotFound(err) { - // Charms should be added before trying to use them, with - // AddCharm or AddLocalCharm API calls. When they're not, - // we're reverting to 1.16 compatibility mode. - return c.serviceSetCharm1dot16(service, curl, force) - } - if err != nil { - return err - } - return service.SetCharm(sch, force) -} - -// serviceSetCharm1dot16 sets the charm for the given service in 1.16 -// compatibility mode. Remove this when support for 1.16 is dropped. -func (c *Client) serviceSetCharm1dot16(service *state.Service, curl *charm.URL, force bool) error { - if curl.Schema != "cs" { - return fmt.Errorf(`charm url has unsupported schema %q`, curl.Schema) - } - if curl.Revision < 0 { - return fmt.Errorf("charm url must include revision") - } - err := c.AddCharm(params.CharmURL{ - URL: curl.String(), - }) - if err != nil { - return err - } - ch, err := c.api.state.Charm(curl) - if err != nil { - return err - } - return service.SetCharm(ch, force) -} - -// serviceSetSettingsYAML updates the settings for the given service, -// taking the configuration from a YAML string. -func serviceSetSettingsYAML(service *state.Service, settings string) error { - ch, _, err := service.Charm() - if err != nil { - return err - } - changes, err := ch.Config().ParseSettingsYAML([]byte(settings), service.Name()) - if err != nil { - return err - } - return service.UpdateConfigSettings(changes) -} - -// newServiceSetSettingsStringsForClientAPI updates the settings for the given -// service, taking the configuration from a map of strings. -// -// TODO(Nate): replace serviceSetSettingsStrings with this onces the GUI no -// longer expects to be able to unset values by sending an empty string. -func newServiceSetSettingsStringsForClientAPI(service *state.Service, settings map[string]string) error { - ch, _, err := service.Charm() - if err != nil { - return err - } - - // Validate the settings. - changes, err := ch.Config().ParseSettingsStrings(settings) - if err != nil { - return err - } - - return service.UpdateConfigSettings(changes) -} - -// ServiceSetCharm sets the charm for a given service. -// TODO(mattyw, all): This api call should be move to the new service facade. The client api version will then need bumping. -func (c *Client) ServiceSetCharm(args params.ServiceSetCharm) error { - // when forced, don't block - if !args.Force { - if err := c.check.ChangeAllowed(); err != nil { - return errors.Trace(err) - } - } - service, err := c.api.state.Service(args.ServiceName) - if err != nil { - return err - } - return c.serviceSetCharm(service, args.CharmUrl, args.Force) -} - -// addServiceUnits adds a given number of units to a service. -func addServiceUnits(state *state.State, args params.AddServiceUnits) ([]*state.Unit, error) { - service, err := state.Service(args.ServiceName) - if err != nil { - return nil, err - } - if args.NumUnits < 1 { - return nil, fmt.Errorf("must add at least one unit") - } - - // New API uses placement directives. - if len(args.Placement) > 0 { - return jjj.AddUnitsWithPlacement(state, service, args.NumUnits, args.Placement) - } - - // Otherwise we use the older machine spec. - if args.NumUnits > 1 && args.ToMachineSpec != "" { - return nil, fmt.Errorf("cannot use NumUnits with ToMachineSpec") - } - - if args.ToMachineSpec != "" && names.IsValidMachine(args.ToMachineSpec) { - _, err = state.Machine(args.ToMachineSpec) - if err != nil { - return nil, errors.Annotatef(err, `cannot add units for service "%v" to machine %v`, args.ServiceName, args.ToMachineSpec) - } - } - return jjj.AddUnits(state, service, args.NumUnits, args.ToMachineSpec) -} - -// AddServiceUnits adds a given number of units to a service. -func (c *Client) AddServiceUnits(args params.AddServiceUnits) (params.AddServiceUnitsResults, error) { - return c.AddServiceUnitsWithPlacement(args) -} - -// AddServiceUnits adds a given number of units to a service. -func (c *Client) AddServiceUnitsWithPlacement(args params.AddServiceUnits) (params.AddServiceUnitsResults, error) { - if err := c.check.ChangeAllowed(); err != nil { - return params.AddServiceUnitsResults{}, errors.Trace(err) - } - units, err := addServiceUnits(c.api.state, args) - if err != nil { - return params.AddServiceUnitsResults{}, err - } - unitNames := make([]string, len(units)) - for i, unit := range units { - unitNames[i] = unit.String() - } - return params.AddServiceUnitsResults{Units: unitNames}, nil -} - -// DestroyServiceUnits removes a given set of service units. -func (c *Client) DestroyServiceUnits(args params.DestroyServiceUnits) error { - if err := c.check.RemoveAllowed(); err != nil { - return errors.Trace(err) - } - var errs []string - for _, name := range args.UnitNames { - unit, err := c.api.state.Unit(name) - switch { - case errors.IsNotFound(err): - err = fmt.Errorf("unit %q does not exist", name) - case err != nil: - case unit.Life() != state.Alive: - continue - case unit.IsPrincipal(): - err = unit.Destroy() - default: - err = fmt.Errorf("unit %q is a subordinate", name) - } - if err != nil { - errs = append(errs, err.Error()) - } - } - return destroyErr("units", args.UnitNames, errs) -} - -// ServiceDestroy destroys a given service. -// TODO(mattyw, all): This api call should be move to the new service facade. The client api version will then need bumping. -func (c *Client) ServiceDestroy(args params.ServiceDestroy) error { - if err := c.check.RemoveAllowed(); err != nil { - return errors.Trace(err) - } - svc, err := c.api.state.Service(args.ServiceName) - if err != nil { - return err - } - return svc.Destroy() -} - -// GetServiceConstraints returns the constraints for a given service. -// TODO(mattyw, all): This api call should be move to the new service facade. The client api version will then need bumping. -func (c *Client) GetServiceConstraints(args params.GetServiceConstraints) (params.GetConstraintsResults, error) { - svc, err := c.api.state.Service(args.ServiceName) - if err != nil { - return params.GetConstraintsResults{}, err - } - cons, err := svc.Constraints() - return params.GetConstraintsResults{cons}, err -} - -// GetEnvironmentConstraints returns the constraints for the environment. -func (c *Client) GetEnvironmentConstraints() (params.GetConstraintsResults, error) { - cons, err := c.api.state.EnvironConstraints() + +} + +// GetModelConstraints returns the constraints for the model. +func (c *Client) GetModelConstraints() (params.GetConstraintsResults, error) { + cons, err := c.api.stateAccessor.ModelConstraints() if err != nil { return params.GetConstraintsResults{}, err } return params.GetConstraintsResults{cons}, nil } -// SetServiceConstraints sets the constraints for a given service. -// TODO(mattyw, all): This api call should be move to the new service facade. The client api version will then need bumping. -func (c *Client) SetServiceConstraints(args params.SetConstraints) error { - if err := c.check.ChangeAllowed(); err != nil { - return errors.Trace(err) - } - svc, err := c.api.state.Service(args.ServiceName) - if err != nil { - return err - } - return svc.SetConstraints(args.Constraints) -} - -// SetEnvironmentConstraints sets the constraints for the environment. -func (c *Client) SetEnvironmentConstraints(args params.SetConstraints) error { - if err := c.check.ChangeAllowed(); err != nil { - return errors.Trace(err) - } - return c.api.state.SetEnvironConstraints(args.Constraints) -} - -// AddRelation adds a relation between the specified endpoints and returns the relation info. -func (c *Client) AddRelation(args params.AddRelation) (params.AddRelationResults, error) { - if err := c.check.ChangeAllowed(); err != nil { - return params.AddRelationResults{}, errors.Trace(err) - } - inEps, err := c.api.state.InferEndpoints(args.Endpoints...) - if err != nil { - return params.AddRelationResults{}, err - } - rel, err := c.api.state.AddRelation(inEps...) - if err != nil { - return params.AddRelationResults{}, err - } - outEps := make(map[string]charm.Relation) - for _, inEp := range inEps { - outEp, err := rel.Endpoint(inEp.ServiceName) - if err != nil { - return params.AddRelationResults{}, err - } - outEps[inEp.ServiceName] = outEp.Relation - } - return params.AddRelationResults{Endpoints: outEps}, nil -} - -// DestroyRelation removes the relation between the specified endpoints. -func (c *Client) DestroyRelation(args params.DestroyRelation) error { - if err := c.check.RemoveAllowed(); err != nil { - return errors.Trace(err) - } - eps, err := c.api.state.InferEndpoints(args.Endpoints...) - if err != nil { - return err - } - rel, err := c.api.state.EndpointsRelation(eps...) - if err != nil { - return err - } - return rel.Destroy() +// SetModelConstraints sets the constraints for the model. +func (c *Client) SetModelConstraints(args params.SetConstraints) error { + if err := c.check.ChangeAllowed(); err != nil { + return errors.Trace(err) + } + return c.api.stateAccessor.SetModelConstraints(args.Constraints) } // AddMachines adds new machines with the supplied parameters. @@ -631,7 +228,7 @@ } if p.Series == "" { - conf, err := c.api.state.EnvironConfig() + conf, err := c.api.stateAccessor.ModelConfig() if err != nil { return nil, err } @@ -640,14 +237,14 @@ var placementDirective string if p.Placement != nil { - env, err := c.api.state.Environment() + env, err := c.api.stateAccessor.Model() if err != nil { return nil, err } // For 1.21 we should support both UUID and name, and with 1.22 // just support UUID if p.Placement.Scope != env.Name() && p.Placement.Scope != env.UUID() { - return nil, fmt.Errorf("invalid environment name %q", p.Placement.Scope) + return nil, fmt.Errorf("invalid model name %q", p.Placement.Scope) } placementDirective = p.Placement.Directive } @@ -667,33 +264,33 @@ Placement: placementDirective, } if p.ContainerType == "" { - return c.api.state.AddOneMachine(template) + return c.api.stateAccessor.AddOneMachine(template) } if p.ParentId != "" { - return c.api.state.AddMachineInsideMachine(template, p.ParentId, p.ContainerType) + return c.api.stateAccessor.AddMachineInsideMachine(template, p.ParentId, p.ContainerType) } - return c.api.state.AddMachineInsideNewMachine(template, template, p.ContainerType) + return c.api.stateAccessor.AddMachineInsideNewMachine(template, template, p.ContainerType) } // ProvisioningScript returns a shell script that, when run, // provisions a machine agent on the machine executing the script. func (c *Client) ProvisioningScript(args params.ProvisioningScriptParams) (params.ProvisioningScriptResult, error) { var result params.ProvisioningScriptResult - icfg, err := InstanceConfig(c.api.state, args.MachineId, args.Nonce, args.DataDir) + icfg, err := InstanceConfig(c.api.state(), args.MachineId, args.Nonce, args.DataDir) if err != nil { return result, err } // Until DisablePackageCommands is retired, for backwards // compatibility, we must respect the client's request and - // override any environment settings the user may have specified. + // override any model settings the user may have specified. // If the client does specify this setting, it will only ever be // true. False indicates the client doesn't care and we should use - // what's specified in the environments.yaml file. + // what's specified in the environment config. if args.DisablePackageCommands { icfg.EnableOSRefreshUpdate = false icfg.EnableOSUpgrade = false - } else if cfg, err := c.api.state.EnvironConfig(); err != nil { + } else if cfg, err := c.api.stateAccessor.ModelConfig(); err != nil { return result, err } else { icfg.EnableOSUpgrade = cfg.EnableOSUpgrade() @@ -706,30 +303,11 @@ // DestroyMachines removes a given set of machines. func (c *Client) DestroyMachines(args params.DestroyMachines) error { - var errs []string - for _, id := range args.MachineNames { - machine, err := c.api.state.Machine(id) - switch { - case errors.IsNotFound(err): - err = fmt.Errorf("machine %s does not exist", id) - case err != nil: - case args.Force: - err = machine.ForceDestroy() - case machine.Life() != state.Alive: - continue - default: - { - if err := c.check.RemoveAllowed(); err != nil { - return errors.Trace(err) - } - err = machine.Destroy() - } - } - if err != nil { - errs = append(errs, err.Error()) - } + if err := c.check.RemoveAllowed(); !args.Force && err != nil { + return errors.Trace(err) } - return destroyErr("machines", args.MachineNames, errs) + + return common.DestroyMachines(c.api.stateAccessor, args.Force, args.MachineNames...) } // CharmInfo returns information about the requested charm. @@ -738,7 +316,7 @@ if err != nil { return api.CharmInfo{}, err } - charm, err := c.api.state.Charm(curl) + charm, err := c.api.stateAccessor.Charm(curl) if err != nil { return api.CharmInfo{}, err } @@ -752,31 +330,31 @@ return info, nil } -// EnvironmentInfo returns information about the current environment (default +// ModelInfo returns information about the current model (default // series and type). -func (c *Client) EnvironmentInfo() (api.EnvironmentInfo, error) { - state := c.api.state - conf, err := state.EnvironConfig() +func (c *Client) ModelInfo() (params.ModelInfo, error) { + state := c.api.stateAccessor + conf, err := state.ModelConfig() if err != nil { - return api.EnvironmentInfo{}, err + return params.ModelInfo{}, err } - env, err := state.Environment() + env, err := state.Model() if err != nil { - return api.EnvironmentInfo{}, err + return params.ModelInfo{}, err } - info := api.EnvironmentInfo{ - DefaultSeries: config.PreferredSeries(conf), - ProviderType: conf.Type(), - Name: conf.Name(), - UUID: env.UUID(), - ServerUUID: env.ServerUUID(), + info := params.ModelInfo{ + DefaultSeries: config.PreferredSeries(conf), + ProviderType: conf.Type(), + Name: conf.Name(), + UUID: env.UUID(), + ControllerUUID: env.ControllerUUID(), } return info, nil } -// ShareEnvironment manages allowing and denying the given user(s) access to the environment. -func (c *Client) ShareEnvironment(args params.ModifyEnvironUsers) (result params.ErrorResults, err error) { +// ShareModel manages allowing and denying the given user(s) access to the model. +func (c *Client) ShareModel(args params.ModifyModelUsers) (result params.ErrorResults, err error) { var createdBy names.UserTag var ok bool if createdBy, ok = c.api.auth.GetAuthTag().(names.UserTag); !ok { @@ -794,33 +372,19 @@ userTagString := arg.UserTag user, err := names.ParseUserTag(userTagString) if err != nil { - result.Results[i].Error = common.ServerError(errors.Annotate(err, "could not share environment")) + result.Results[i].Error = common.ServerError(errors.Annotate(err, "could not share model")) continue } - switch arg.Action { - case params.AddEnvUser: - _, err := c.api.state.AddEnvironmentUser(user, createdBy, "") - if err != nil { - err = errors.Annotate(err, "could not share environment") - result.Results[i].Error = common.ServerError(err) - } - case params.RemoveEnvUser: - err := c.api.state.RemoveEnvironmentUser(user) - if err != nil { - err = errors.Annotate(err, "could not unshare environment") - result.Results[i].Error = common.ServerError(err) - } - default: - result.Results[i].Error = common.ServerError(errors.Errorf("unknown action %q", arg.Action)) - } + result.Results[i].Error = common.ServerError( + usermanager.ShareModelAction(c.api.stateAccessor, c.api.stateAccessor.ModelTag(), createdBy, user, arg.Action)) } return result, nil } -// EnvUserInfo returns information on all users in the environment. -func (c *Client) EnvUserInfo() (params.EnvUserInfoResults, error) { - var results params.EnvUserInfoResults - env, err := c.api.state.Environment() +// ModelUserInfo returns information on all users in the model. +func (c *Client) ModelUserInfo() (params.ModelUserInfoResults, error) { + var results params.ModelUserInfoResults + env, err := c.api.stateAccessor.Model() if err != nil { return results, errors.Trace(err) } @@ -839,8 +403,8 @@ } else { lastConn = &userLastConn } - results.Results = append(results.Results, params.EnvUserInfoResult{ - Result: ¶ms.EnvUserInfo{ + results.Results = append(results.Results, params.ModelUserInfoResult{ + Result: ¶ms.ModelUserInfo{ UserName: user.UserName(), DisplayName: user.DisplayName(), CreatedBy: user.CreatedBy(), @@ -852,75 +416,17 @@ return results, nil } -// GetAnnotations returns annotations about a given entity. -// This API is now deprecated - "Annotations" client should be used instead. -// TODO(anastasiamac) remove for Juju 2.x -func (c *Client) GetAnnotations(args params.GetAnnotations) (params.GetAnnotationsResults, error) { - nothing := params.GetAnnotationsResults{} - tag, err := c.parseEntityTag(args.Tag) - if err != nil { - return nothing, errors.Trace(err) - } - entity, err := c.findEntity(tag) - if err != nil { - return nothing, errors.Trace(err) - } - ann, err := c.api.state.Annotations(entity) - if err != nil { - return nothing, errors.Trace(err) - } - return params.GetAnnotationsResults{Annotations: ann}, nil -} - -func (c *Client) parseEntityTag(tag0 string) (names.Tag, error) { - tag, err := names.ParseTag(tag0) - if err != nil { - return nil, errors.Trace(err) - } - if tag.Kind() == names.CharmTagKind { - return nil, common.NotSupportedError(tag, "client.annotations") - } - return tag, nil -} - -func (c *Client) findEntity(tag names.Tag) (state.GlobalEntity, error) { - entity0, err := c.api.state.FindEntity(tag) - if err != nil { - return nil, err - } - entity, ok := entity0.(state.GlobalEntity) - if !ok { - return nil, common.NotSupportedError(tag, "annotations") - } - return entity, nil -} - -// SetAnnotations stores annotations about a given entity. -// This API is now deprecated - "Annotations" client should be used instead. -// TODO(anastasiamac) remove for Juju 2.x -func (c *Client) SetAnnotations(args params.SetAnnotations) error { - tag, err := c.parseEntityTag(args.Tag) - if err != nil { - return errors.Trace(err) - } - entity, err := c.findEntity(tag) - if err != nil { - return errors.Trace(err) - } - return c.api.state.SetAnnotations(entity, args.Pairs) -} - // AgentVersion returns the current version that the API server is running. func (c *Client) AgentVersion() (params.AgentVersionResult, error) { - return params.AgentVersionResult{Version: version.Current.Number}, nil + return params.AgentVersionResult{Version: version.Current}, nil } -// EnvironmentGet implements the server-side part of the -// get-environment CLI command. -func (c *Client) EnvironmentGet() (params.EnvironmentConfigResults, error) { - result := params.EnvironmentConfigResults{} +// ModelGet implements the server-side part of the +// get-model-config CLI command. +func (c *Client) ModelGet() (params.ModelConfigResults, error) { + result := params.ModelConfigResults{} // Get the existing environment config from the state. - config, err := c.api.state.EnvironConfig() + config, err := c.api.stateAccessor.ModelConfig() if err != nil { return result, err } @@ -928,9 +434,9 @@ return result, nil } -// EnvironmentSet implements the server-side part of the -// set-environment CLI command. -func (c *Client) EnvironmentSet(args params.EnvironmentSet) error { +// ModelSet implements the server-side part of the +// set-model-config CLI command. +func (c *Client) ModelSet(args params.ModelSet) error { if err := c.check.ChangeAllowed(); err != nil { return errors.Trace(err) } @@ -949,31 +455,48 @@ // TODO(waigani) 2014-3-11 #1167616 // Add a txn retry loop to ensure that the settings on disk have not // changed underneath us. - return c.api.state.UpdateEnvironConfig(attrs, nil, checkAgentVersion) + return c.api.stateAccessor.UpdateModelConfig(attrs, nil, checkAgentVersion) } -// EnvironmentUnset implements the server-side part of the -// set-environment CLI command. -func (c *Client) EnvironmentUnset(args params.EnvironmentUnset) error { +// ModelUnset implements the server-side part of the +// set-model-config CLI command. +func (c *Client) ModelUnset(args params.ModelUnset) error { if err := c.check.ChangeAllowed(); err != nil { return errors.Trace(err) } // TODO(waigani) 2014-3-11 #1167616 // Add a txn retry loop to ensure that the settings on disk have not // changed underneath us. - return c.api.state.UpdateEnvironConfig(nil, args.Keys, nil) + return c.api.stateAccessor.UpdateModelConfig(nil, args.Keys, nil) } -// SetEnvironAgentVersion sets the environment agent version. -func (c *Client) SetEnvironAgentVersion(args params.SetEnvironAgentVersion) error { +// SetModelAgentVersion sets the model agent version. +func (c *Client) SetModelAgentVersion(args params.SetModelAgentVersion) error { if err := c.check.ChangeAllowed(); err != nil { return errors.Trace(err) } - if args.Version.Major > version.Current.Major && !args.MajorUpgradeAllowed { - return fmt.Errorf("major version upgrades must be initiated through a compatible client") - } + // Before changing the agent version to trigger an upgrade or downgrade, + // we'll do a very basic check to ensure the + cfg, err := c.api.stateAccessor.ModelConfig() + if err != nil { + return errors.Trace(err) + } + env, err := getEnvironment(cfg) + if err != nil { + return errors.Trace(err) + } + if err := environs.CheckProviderAPI(env); err != nil { + return err + } + return c.api.stateAccessor.SetModelAgentVersion(args.Version) +} - return c.api.state.SetEnvironAgentVersion(args.Version) +var getEnvironment = func(cfg *config.Config) (environs.Environ, error) { + env, err := environs.New(cfg) + if err != nil { + return nil, err + } + return env, nil } // AbortCurrentUpgrade aborts and archives the current upgrade @@ -982,7 +505,7 @@ if err := c.check.ChangeAllowed(); err != nil { return errors.Trace(err) } - return c.api.state.AbortCurrentUpgrade() + return c.api.stateAccessor.AbortCurrentUpgrade() } // FindTools returns a List containing all tools matching the given parameters. @@ -990,38 +513,26 @@ return c.api.toolsFinder.FindTools(args) } -func destroyErr(desc string, ids, errs []string) error { - if len(errs) == 0 { - return nil - } - msg := "some %s were not destroyed" - if len(errs) == len(ids) { - msg = "no %s were destroyed" - } - msg = fmt.Sprintf(msg, desc) - return fmt.Errorf("%s: %s", msg, strings.Join(errs, "; ")) -} - func (c *Client) AddCharm(args params.CharmURL) error { - return service.AddCharmWithAuthorization(c.api.state, params.AddCharmWithAuthorization{ + return service.AddCharmWithAuthorization(c.api.state(), params.AddCharmWithAuthorization{ URL: args.URL, }) } // AddCharmWithAuthorization adds the given charm URL (which must include revision) to -// the environment, if it does not exist yet. Local charms are not +// the model, if it does not exist yet. Local charms are not // supported, only charm store URLs. See also AddLocalCharm(). // // The authorization macaroon, args.CharmStoreMacaroon, may be // omitted, in which case this call is equivalent to AddCharm. func (c *Client) AddCharmWithAuthorization(args params.AddCharmWithAuthorization) error { - return service.AddCharmWithAuthorization(c.api.state, args) + return service.AddCharmWithAuthorization(c.api.state(), args) } // ResolveCharm resolves the best available charm URLs with series, for charm // locations without a series specified. func (c *Client) ResolveCharms(args params.ResolveCharms) (params.ResolveCharmResults, error) { - return service.ResolveCharms(c.api.state, args) + return service.ResolveCharms(c.api.state(), args) } // RetryProvisioning marks a provisioning error as transient on the machines. @@ -1041,36 +552,20 @@ // APIHostPorts returns the API host/port addresses stored in state. func (c *Client) APIHostPorts() (result params.APIHostPortsResult, err error) { var servers [][]network.HostPort - if servers, err = c.api.state.APIHostPorts(); err != nil { + if servers, err = c.api.stateAccessor.APIHostPorts(); err != nil { return params.APIHostPortsResult{}, err } result.Servers = params.FromNetworkHostsPorts(servers) return result, nil } -// EnsureAvailability ensures the availability of Juju state servers. -// DEPRECATED: remove when we stop supporting 1.20 and earlier clients. -// This API is now on the HighAvailability facade. -func (c *Client) EnsureAvailability(args params.StateServersSpecs) (params.StateServersChangeResults, error) { - if err := c.check.ChangeAllowed(); err != nil { - return params.StateServersChangeResults{}, errors.Trace(err) - } - results := params.StateServersChangeResults{Results: make([]params.StateServersChangeResult, len(args.Specs))} - for i, stateServersSpec := range args.Specs { - result, err := highavailability.EnsureAvailabilitySingle(c.api.state, stateServersSpec) - results.Results[i].Result = result - results.Results[i].Error = common.ServerError(err) - } - return results, nil -} - -// DestroyEnvironment will try to destroy the current environment. +// DestroyModel will try to destroy the current model. // If there is a block on destruction, this method will return an error. -func (c *Client) DestroyEnvironment() (err error) { +func (c *Client) DestroyModel() (err error) { if err := c.check.DestroyAllowed(); err != nil { return errors.Trace(err) } - environTag := c.api.state.EnvironTag() - return errors.Trace(common.DestroyEnvironment(c.api.state, environTag)) + modelTag := c.api.stateAccessor.ModelTag() + return errors.Trace(common.DestroyModel(c.api.state(), modelTag)) } === modified file 'src/github.com/juju/juju/apiserver/client/client_test.go' --- src/github.com/juju/juju/apiserver/client/client_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/client/client_test.go 2016-03-22 15:18:22 +0000 @@ -15,24 +15,22 @@ "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/charmrepo" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/agent" "github.com/juju/juju/api" "github.com/juju/juju/apiserver/client" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/apiserver/service" "github.com/juju/juju/apiserver/testing" - apiservertesting "github.com/juju/juju/apiserver/testing" "github.com/juju/juju/constraints" + "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/environs/manual" toolstesting "github.com/juju/juju/environs/tools/testing" "github.com/juju/juju/instance" "github.com/juju/juju/network" - "github.com/juju/juju/provider/dummy" + "github.com/juju/juju/rpc" "github.com/juju/juju/state" "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/state/presence" @@ -75,98 +73,50 @@ return pinger } -func (s *serverSuite) TestEnsureAvailabilityDeprecated(c *gc.C) { - _, err := s.State.AddMachine("quantal", state.JobManageEnviron) - c.Assert(err, jc.ErrorIsNil) - // We have to ensure the agents are alive, or EnsureAvailability will - // create more to replace them. - pingerA := s.setAgentPresence(c, "0") - defer assertKill(c, pingerA) - - machines, err := s.State.AllMachines() - c.Assert(err, jc.ErrorIsNil) - c.Assert(machines, gc.HasLen, 1) - c.Assert(machines[0].Series(), gc.Equals, "quantal") - - arg := params.StateServersSpecs{[]params.StateServersSpec{{NumStateServers: 3}}} - results, err := s.client.EnsureAvailability(arg) - c.Assert(err, jc.ErrorIsNil) - c.Assert(results.Results, gc.HasLen, 1) - result := results.Results[0] - c.Assert(result.Error, gc.IsNil) - ensureAvailabilityResult := result.Result - c.Assert(ensureAvailabilityResult.Maintained, gc.DeepEquals, []string{"machine-0"}) - c.Assert(ensureAvailabilityResult.Added, gc.DeepEquals, []string{"machine-1", "machine-2"}) - c.Assert(ensureAvailabilityResult.Removed, gc.HasLen, 0) - - machines, err = s.State.AllMachines() - c.Assert(err, jc.ErrorIsNil) - c.Assert(machines, gc.HasLen, 3) - c.Assert(machines[0].Series(), gc.Equals, "quantal") - c.Assert(machines[1].Series(), gc.Equals, "quantal") - c.Assert(machines[2].Series(), gc.Equals, "quantal") -} - -func (s *serverSuite) TestBlockEnsureAvailabilityDeprecated(c *gc.C) { - _, err := s.State.AddMachine("quantal", state.JobManageEnviron) - c.Assert(err, jc.ErrorIsNil) - - s.BlockAllChanges(c, "TestBlockEnsureAvailabilityDeprecated") - - arg := params.StateServersSpecs{[]params.StateServersSpec{{NumStateServers: 3}}} - results, err := s.client.EnsureAvailability(arg) - s.AssertBlocked(c, err, "TestBlockEnsureAvailabilityDeprecated") - c.Assert(results.Results, gc.HasLen, 0) - - machines, err := s.State.AllMachines() - c.Assert(err, jc.ErrorIsNil) - c.Assert(machines, gc.HasLen, 1) -} - -func (s *serverSuite) TestEnvUsersInfo(c *gc.C) { +func (s *serverSuite) TestModelUsersInfo(c *gc.C) { testAdmin := s.AdminUserTag(c) - owner, err := s.State.EnvironmentUser(testAdmin) - c.Assert(err, jc.ErrorIsNil) - - localUser1 := s.makeLocalEnvUser(c, "ralphdoe", "Ralph Doe") - localUser2 := s.makeLocalEnvUser(c, "samsmith", "Sam Smith") - remoteUser1 := s.Factory.MakeEnvUser(c, &factory.EnvUserParams{User: "bobjohns@ubuntuone", DisplayName: "Bob Johns"}) - remoteUser2 := s.Factory.MakeEnvUser(c, &factory.EnvUserParams{User: "nicshaw@idprovider", DisplayName: "Nic Shaw"}) - - results, err := s.client.EnvUserInfo() - c.Assert(err, jc.ErrorIsNil) - var expected params.EnvUserInfoResults + owner, err := s.State.ModelUser(testAdmin) + c.Assert(err, jc.ErrorIsNil) + + localUser1 := s.makeLocalModelUser(c, "ralphdoe", "Ralph Doe") + localUser2 := s.makeLocalModelUser(c, "samsmith", "Sam Smith") + remoteUser1 := s.Factory.MakeModelUser(c, &factory.ModelUserParams{User: "bobjohns@ubuntuone", DisplayName: "Bob Johns"}) + remoteUser2 := s.Factory.MakeModelUser(c, &factory.ModelUserParams{User: "nicshaw@idprovider", DisplayName: "Nic Shaw"}) + + results, err := s.client.ModelUserInfo() + c.Assert(err, jc.ErrorIsNil) + var expected params.ModelUserInfoResults for _, r := range []struct { - user *state.EnvironmentUser - info *params.EnvUserInfo + user *state.ModelUser + info *params.ModelUserInfo }{ { owner, - ¶ms.EnvUserInfo{ + ¶ms.ModelUserInfo{ UserName: owner.UserName(), DisplayName: owner.DisplayName(), }, }, { localUser1, - ¶ms.EnvUserInfo{ + ¶ms.ModelUserInfo{ UserName: "ralphdoe@local", DisplayName: "Ralph Doe", }, }, { localUser2, - ¶ms.EnvUserInfo{ + ¶ms.ModelUserInfo{ UserName: "samsmith@local", DisplayName: "Sam Smith", }, }, { remoteUser1, - ¶ms.EnvUserInfo{ + ¶ms.ModelUserInfo{ UserName: "bobjohns@ubuntuone", DisplayName: "Bob Johns", }, }, { remoteUser2, - ¶ms.EnvUserInfo{ + ¶ms.ModelUserInfo{ UserName: "nicshaw@idprovider", DisplayName: "Nic Shaw", }, @@ -175,7 +125,7 @@ r.info.CreatedBy = owner.UserName() r.info.DateCreated = r.user.DateCreated() r.info.LastConnection = lastConnPointer(c, r.user) - expected.Results = append(expected.Results, params.EnvUserInfoResult{Result: r.info}) + expected.Results = append(expected.Results, params.ModelUserInfoResult{Result: r.info}) } sort.Sort(ByUserName(expected.Results)) @@ -183,8 +133,8 @@ c.Assert(results, jc.DeepEquals, expected) } -func lastConnPointer(c *gc.C, envUser *state.EnvironmentUser) *time.Time { - lastConn, err := envUser.LastConnection() +func lastConnPointer(c *gc.C, modelUser *state.ModelUser) *time.Time { + lastConn, err := modelUser.LastConnection() if err != nil { if state.IsNeverConnectedError(err) { return nil @@ -194,147 +144,147 @@ return &lastConn } -// ByUserName implements sort.Interface for []params.EnvUserInfoResult based on +// ByUserName implements sort.Interface for []params.ModelUserInfoResult based on // the UserName field. -type ByUserName []params.EnvUserInfoResult +type ByUserName []params.ModelUserInfoResult func (a ByUserName) Len() int { return len(a) } func (a ByUserName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByUserName) Less(i, j int) bool { return a[i].Result.UserName < a[j].Result.UserName } -func (s *serverSuite) makeLocalEnvUser(c *gc.C, username, displayname string) *state.EnvironmentUser { - // factory.MakeUser will create an EnvUser for a local user by defalut +func (s *serverSuite) makeLocalModelUser(c *gc.C, username, displayname string) *state.ModelUser { + // factory.MakeUser will create an ModelUser for a local user by defalut user := s.Factory.MakeUser(c, &factory.UserParams{Name: username, DisplayName: displayname}) - envUser, err := s.State.EnvironmentUser(user.UserTag()) + modelUser, err := s.State.ModelUser(user.UserTag()) c.Assert(err, jc.ErrorIsNil) - return envUser + return modelUser } -func (s *serverSuite) TestShareEnvironmentAddMissingLocalFails(c *gc.C) { - args := params.ModifyEnvironUsers{ - Changes: []params.ModifyEnvironUser{{ +func (s *serverSuite) TestShareModelAddMissingLocalFails(c *gc.C) { + args := params.ModifyModelUsers{ + Changes: []params.ModifyModelUser{{ UserTag: names.NewLocalUserTag("foobar").String(), - Action: params.AddEnvUser, + Action: params.AddModelUser, }}} - result, err := s.client.ShareEnvironment(args) + result, err := s.client.ShareModel(args) c.Assert(err, jc.ErrorIsNil) - expectedErr := `could not share environment: user "foobar" does not exist locally: user "foobar" not found` + expectedErr := `could not share model: user "foobar" does not exist locally: user "foobar" not found` c.Assert(result.OneError(), gc.ErrorMatches, expectedErr) c.Assert(result.Results, gc.HasLen, 1) c.Assert(result.Results[0].Error, gc.ErrorMatches, expectedErr) } -func (s *serverSuite) TestUnshareEnvironment(c *gc.C) { - user := s.Factory.MakeEnvUser(c, nil) - _, err := s.State.EnvironmentUser(user.UserTag()) +func (s *serverSuite) TestUnshareModel(c *gc.C) { + user := s.Factory.MakeModelUser(c, nil) + _, err := s.State.ModelUser(user.UserTag()) c.Assert(err, jc.ErrorIsNil) - args := params.ModifyEnvironUsers{ - Changes: []params.ModifyEnvironUser{{ + args := params.ModifyModelUsers{ + Changes: []params.ModifyModelUser{{ UserTag: user.UserTag().String(), - Action: params.RemoveEnvUser, + Action: params.RemoveModelUser, }}} - result, err := s.client.ShareEnvironment(args) + result, err := s.client.ShareModel(args) c.Assert(err, jc.ErrorIsNil) c.Assert(result.OneError(), gc.IsNil) c.Assert(result.Results, gc.HasLen, 1) c.Assert(result.Results[0].Error, gc.IsNil) - _, err = s.State.EnvironmentUser(user.UserTag()) + _, err = s.State.ModelUser(user.UserTag()) c.Assert(errors.IsNotFound(err), jc.IsTrue) } -func (s *serverSuite) TestUnshareEnvironmentMissingUser(c *gc.C) { +func (s *serverSuite) TestUnshareModelMissingUser(c *gc.C) { user := names.NewUserTag("bob") - args := params.ModifyEnvironUsers{ - Changes: []params.ModifyEnvironUser{{ + args := params.ModifyModelUsers{ + Changes: []params.ModifyModelUser{{ UserTag: user.String(), - Action: params.RemoveEnvUser, + Action: params.RemoveModelUser, }}} - result, err := s.client.ShareEnvironment(args) + result, err := s.client.ShareModel(args) c.Assert(err, jc.ErrorIsNil) - c.Assert(result.OneError(), gc.ErrorMatches, `could not unshare environment: env user "bob@local" does not exist: transaction aborted`) + c.Assert(result.OneError(), gc.ErrorMatches, `could not unshare model: env user "bob@local" does not exist: transaction aborted`) c.Assert(result.Results, gc.HasLen, 1) c.Assert(result.Results[0].Error, gc.NotNil) - _, err = s.State.EnvironmentUser(user) + _, err = s.State.ModelUser(user) c.Assert(errors.IsNotFound(err), jc.IsTrue) } -func (s *serverSuite) TestShareEnvironmentAddLocalUser(c *gc.C) { - user := s.Factory.MakeUser(c, &factory.UserParams{Name: "foobar", NoEnvUser: true}) - args := params.ModifyEnvironUsers{ - Changes: []params.ModifyEnvironUser{{ +func (s *serverSuite) TestShareModelAddLocalUser(c *gc.C) { + user := s.Factory.MakeUser(c, &factory.UserParams{Name: "foobar", NoModelUser: true}) + args := params.ModifyModelUsers{ + Changes: []params.ModifyModelUser{{ UserTag: user.Tag().String(), - Action: params.AddEnvUser, + Action: params.AddModelUser, }}} - result, err := s.client.ShareEnvironment(args) + result, err := s.client.ShareModel(args) c.Assert(err, jc.ErrorIsNil) c.Assert(result.OneError(), gc.IsNil) c.Assert(result.Results, gc.HasLen, 1) c.Assert(result.Results[0].Error, gc.IsNil) - envUser, err := s.State.EnvironmentUser(user.UserTag()) + modelUser, err := s.State.ModelUser(user.UserTag()) c.Assert(err, jc.ErrorIsNil) - c.Assert(envUser.UserName(), gc.Equals, user.UserTag().Canonical()) - c.Assert(envUser.CreatedBy(), gc.Equals, dummy.AdminUserTag().Canonical()) - lastConn, err := envUser.LastConnection() + c.Assert(modelUser.UserName(), gc.Equals, user.UserTag().Canonical()) + c.Assert(modelUser.CreatedBy(), gc.Equals, "admin@local") + lastConn, err := modelUser.LastConnection() c.Assert(err, jc.Satisfies, state.IsNeverConnectedError) c.Assert(lastConn, gc.Equals, time.Time{}) } -func (s *serverSuite) TestShareEnvironmentAddRemoteUser(c *gc.C) { +func (s *serverSuite) TestShareModelAddRemoteUser(c *gc.C) { user := names.NewUserTag("foobar@ubuntuone") - args := params.ModifyEnvironUsers{ - Changes: []params.ModifyEnvironUser{{ + args := params.ModifyModelUsers{ + Changes: []params.ModifyModelUser{{ UserTag: user.String(), - Action: params.AddEnvUser, + Action: params.AddModelUser, }}} - result, err := s.client.ShareEnvironment(args) + result, err := s.client.ShareModel(args) c.Assert(err, jc.ErrorIsNil) c.Assert(result.OneError(), gc.IsNil) c.Assert(result.Results, gc.HasLen, 1) c.Assert(result.Results[0].Error, gc.IsNil) - envUser, err := s.State.EnvironmentUser(user) + modelUser, err := s.State.ModelUser(user) c.Assert(err, jc.ErrorIsNil) - c.Assert(envUser.UserName(), gc.Equals, user.Canonical()) - c.Assert(envUser.CreatedBy(), gc.Equals, dummy.AdminUserTag().Canonical()) - lastConn, err := envUser.LastConnection() + c.Assert(modelUser.UserName(), gc.Equals, user.Canonical()) + c.Assert(modelUser.CreatedBy(), gc.Equals, "admin@local") + lastConn, err := modelUser.LastConnection() c.Assert(err, jc.Satisfies, state.IsNeverConnectedError) c.Assert(lastConn.IsZero(), jc.IsTrue) } -func (s *serverSuite) TestShareEnvironmentAddUserTwice(c *gc.C) { +func (s *serverSuite) TestShareModelAddUserTwice(c *gc.C) { user := s.Factory.MakeUser(c, &factory.UserParams{Name: "foobar"}) - args := params.ModifyEnvironUsers{ - Changes: []params.ModifyEnvironUser{{ + args := params.ModifyModelUsers{ + Changes: []params.ModifyModelUser{{ UserTag: user.Tag().String(), - Action: params.AddEnvUser, + Action: params.AddModelUser, }}} - _, err := s.client.ShareEnvironment(args) + _, err := s.client.ShareModel(args) c.Assert(err, jc.ErrorIsNil) - result, err := s.client.ShareEnvironment(args) + result, err := s.client.ShareModel(args) c.Assert(err, jc.ErrorIsNil) - c.Assert(result.OneError(), gc.ErrorMatches, "could not share environment: environment user \"foobar@local\" already exists") + c.Assert(result.OneError(), gc.ErrorMatches, "could not share model: model user \"foobar@local\" already exists") c.Assert(result.Results, gc.HasLen, 1) - c.Assert(result.Results[0].Error, gc.ErrorMatches, "could not share environment: environment user \"foobar@local\" already exists") + c.Assert(result.Results[0].Error, gc.ErrorMatches, "could not share model: model user \"foobar@local\" already exists") c.Assert(result.Results[0].Error.Code, gc.Matches, params.CodeAlreadyExists) - envUser, err := s.State.EnvironmentUser(user.UserTag()) + modelUser, err := s.State.ModelUser(user.UserTag()) c.Assert(err, jc.ErrorIsNil) - c.Assert(envUser.UserName(), gc.Equals, user.UserTag().Canonical()) + c.Assert(modelUser.UserName(), gc.Equals, user.UserTag().Canonical()) } -func (s *serverSuite) TestShareEnvironmentInvalidTags(c *gc.C) { +func (s *serverSuite) TestShareModelInvalidTags(c *gc.C) { for _, testParam := range []struct { tag string validTag bool @@ -377,7 +327,7 @@ }, } { var expectedErr string - errPart := `could not share environment: "` + regexp.QuoteMeta(testParam.tag) + `" is not a valid ` + errPart := `could not share model: "` + regexp.QuoteMeta(testParam.tag) + `" is not a valid ` if testParam.validTag { @@ -389,14 +339,14 @@ expectedErr = errPart + `tag` } - args := params.ModifyEnvironUsers{ - Changes: []params.ModifyEnvironUser{{ + args := params.ModifyModelUsers{ + Changes: []params.ModifyModelUser{{ UserTag: testParam.tag, - Action: params.AddEnvUser, + Action: params.AddModelUser, }}} - _, err := s.client.ShareEnvironment(args) - result, err := s.client.ShareEnvironment(args) + _, err := s.client.ShareModel(args) + result, err := s.client.ShareModel(args) c.Assert(err, jc.ErrorIsNil) c.Assert(result.OneError(), gc.ErrorMatches, expectedErr) c.Assert(result.Results, gc.HasLen, 1) @@ -404,28 +354,28 @@ } } -func (s *serverSuite) TestShareEnvironmentZeroArgs(c *gc.C) { - args := params.ModifyEnvironUsers{Changes: []params.ModifyEnvironUser{{}}} +func (s *serverSuite) TestShareModelZeroArgs(c *gc.C) { + args := params.ModifyModelUsers{Changes: []params.ModifyModelUser{{}}} - _, err := s.client.ShareEnvironment(args) - result, err := s.client.ShareEnvironment(args) + _, err := s.client.ShareModel(args) + result, err := s.client.ShareModel(args) c.Assert(err, jc.ErrorIsNil) - expectedErr := `could not share environment: "" is not a valid tag` + expectedErr := `could not share model: "" is not a valid tag` c.Assert(result.OneError(), gc.ErrorMatches, expectedErr) c.Assert(result.Results, gc.HasLen, 1) c.Assert(result.Results[0].Error, gc.ErrorMatches, expectedErr) } -func (s *serverSuite) TestShareEnvironmentInvalidAction(c *gc.C) { - var dance params.EnvironAction = "dance" - args := params.ModifyEnvironUsers{ - Changes: []params.ModifyEnvironUser{{ +func (s *serverSuite) TestShareModelInvalidAction(c *gc.C) { + var dance params.ModelAction = "dance" + args := params.ModifyModelUsers{ + Changes: []params.ModifyModelUser{{ UserTag: "user-user@local", Action: dance, }}} - _, err := s.client.ShareEnvironment(args) - result, err := s.client.ShareEnvironment(args) + _, err := s.client.ShareModel(args) + result, err := s.client.ShareModel(args) c.Assert(err, jc.ErrorIsNil) expectedErr := `unknown action "dance"` c.Assert(result.OneError(), gc.ErrorMatches, expectedErr) @@ -433,76 +383,87 @@ c.Assert(result.Results[0].Error, gc.ErrorMatches, expectedErr) } -func (s *serverSuite) getAgentVersion(c *gc.C) string { - envConfig, err := s.State.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - agentVersion, found := envConfig.AllAttrs()["agent-version"] - c.Assert(found, jc.IsTrue) - vers, ok := agentVersion.(string) - c.Assert(ok, jc.IsTrue) - return vers -} - -func (s *serverSuite) TestSetEnvironAgentVersionMajorFailsWithOldClient(c *gc.C) { - newVersNum := version.Current.Number - newVersNum.Major++ - - args := params.SetEnvironAgentVersion{ - Version: newVersNum, - } - err := s.client.SetEnvironAgentVersion(args) - c.Assert(err, gc.ErrorMatches, "major version upgrades must be initiated through a compatible client") - c.Assert(s.getAgentVersion(c), gc.Equals, version.Current.Number.String()) -} - -func (s *serverSuite) TestSetEnvironAgentVersionMajorUpgrade(c *gc.C) { - versNum := version.Current.Number - versNum.Major++ - - args := params.SetEnvironAgentVersion{ - Version: versNum, - MajorUpgradeAllowed: true, - } - err := s.client.SetEnvironAgentVersion(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.getAgentVersion(c), gc.Equals, versNum.String()) -} - func (s *serverSuite) TestSetEnvironAgentVersion(c *gc.C) { - versNum := version.Current.Number - versNum.Minor++ - s.assertSetEnvironAgentVersion(c, versNum) -} - -func (s *serverSuite) assertSetEnvironAgentVersion(c *gc.C, vers version.Number) { - args := params.SetEnvironAgentVersion{ - Version: vers, - } - err := s.client.SetEnvironAgentVersion(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.getAgentVersion(c), gc.Equals, vers.String()) + args := params.SetModelAgentVersion{ + Version: version.MustParse("9.8.7"), + } + err := s.client.SetModelAgentVersion(args) + c.Assert(err, jc.ErrorIsNil) + + envConfig, err := s.State.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + agentVersion, found := envConfig.AllAttrs()["agent-version"] + c.Assert(found, jc.IsTrue) + c.Assert(agentVersion, gc.Equals, "9.8.7") +} + +type mockEnviron struct { + environs.Environ + allInstancesCalled bool + err error +} + +func (m *mockEnviron) AllInstances() ([]instance.Instance, error) { + m.allInstancesCalled = true + return nil, m.err +} + +func (s *serverSuite) assertCheckProviderAPI(c *gc.C, envError error, expectErr string) { + env := &mockEnviron{err: envError} + s.PatchValue(client.GetEnvironment, func(cfg *config.Config) (environs.Environ, error) { + return env, nil + }) + args := params.SetModelAgentVersion{ + Version: version.MustParse("9.8.7"), + } + err := s.client.SetModelAgentVersion(args) + c.Assert(env.allInstancesCalled, jc.IsTrue) + if expectErr != "" { + c.Assert(err, gc.ErrorMatches, expectErr) + } else { + c.Assert(err, jc.ErrorIsNil) + } +} + +func (s *serverSuite) TestCheckProviderAPISuccess(c *gc.C) { + s.assertCheckProviderAPI(c, nil, "") + s.assertCheckProviderAPI(c, environs.ErrPartialInstances, "") + s.assertCheckProviderAPI(c, environs.ErrNoInstances, "") +} + +func (s *serverSuite) TestCheckProviderAPIFail(c *gc.C) { + s.assertCheckProviderAPI(c, fmt.Errorf("instances error"), "cannot make API call to provider: instances error") +} + +func (s *serverSuite) assertSetEnvironAgentVersion(c *gc.C) { + args := params.SetModelAgentVersion{ + Version: version.MustParse("9.8.7"), + } + err := s.client.SetModelAgentVersion(args) + c.Assert(err, jc.ErrorIsNil) + envConfig, err := s.State.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + agentVersion, found := envConfig.AllAttrs()["agent-version"] + c.Assert(found, jc.IsTrue) + c.Assert(agentVersion, gc.Equals, "9.8.7") } func (s *serverSuite) assertSetEnvironAgentVersionBlocked(c *gc.C, msg string) { - args := params.SetEnvironAgentVersion{ + args := params.SetModelAgentVersion{ Version: version.MustParse("9.8.7"), } - err := s.client.SetEnvironAgentVersion(args) + err := s.client.SetModelAgentVersion(args) s.AssertBlocked(c, err, msg) } func (s *serverSuite) TestBlockDestroySetEnvironAgentVersion(c *gc.C) { - versNum := version.Current.Number - versNum.Minor++ - s.BlockDestroyEnvironment(c, "TestBlockDestroySetEnvironAgentVersion") - s.assertSetEnvironAgentVersion(c, versNum) + s.BlockDestroyModel(c, "TestBlockDestroySetEnvironAgentVersion") + s.assertSetEnvironAgentVersion(c) } func (s *serverSuite) TestBlockRemoveSetEnvironAgentVersion(c *gc.C) { - versNum := version.Current.Number - versNum.Minor++ s.BlockRemoveObject(c, "TestBlockRemoveSetEnvironAgentVersion") - s.assertSetEnvironAgentVersion(c, versNum) + s.assertSetEnvironAgentVersion(c) } func (s *serverSuite) TestBlockChangesSetEnvironAgentVersion(c *gc.C) { @@ -511,8 +472,8 @@ } func (s *serverSuite) TestAbortCurrentUpgrade(c *gc.C) { - // Create a provisioned state server. - machine, err := s.State.AddMachine("series", state.JobManageEnviron) + // Create a provisioned controller. + machine, err := s.State.AddMachine("series", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) err = machine.SetProvisioned(instance.Id("i-blah"), "fake-nonce", nil) c.Assert(err, jc.ErrorIsNil) @@ -551,8 +512,8 @@ } func (s *serverSuite) setupAbortCurrentUpgradeBlocked(c *gc.C) { - // Create a provisioned state server. - machine, err := s.State.AddMachine("series", state.JobManageEnviron) + // Create a provisioned controller. + machine, err := s.State.AddMachine("series", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) err = machine.SetProvisioned(instance.Id("i-blah"), "fake-nonce", nil) c.Assert(err, jc.ErrorIsNil) @@ -571,7 +532,7 @@ func (s *serverSuite) TestBlockDestroyAbortCurrentUpgrade(c *gc.C) { s.setupAbortCurrentUpgradeBlocked(c) - s.BlockDestroyEnvironment(c, "TestBlockDestroyAbortCurrentUpgrade") + s.BlockDestroyModel(c, "TestBlockDestroyAbortCurrentUpgrade") s.assertAbortCurrentUpgrade(c) } @@ -624,405 +585,6 @@ c.Assert(status, jc.DeepEquals, scenarioStatus) } -var ( - validSetTestValue = "a value with spaces\nand newline\nand UTF-8 characters: \U0001F604 / \U0001F44D" - invalidSetTestValue = "a value with an invalid UTF-8 sequence: " + string([]byte{0xFF, 0xFF}) - correctedSetTestValue = "a value with an invalid UTF-8 sequence: \ufffd\ufffd" -) - -func (s *clientSuite) TestClientServiceSet(c *gc.C) { - dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - - err := s.APIState.Client().ServiceSet("dummy", map[string]string{ - "title": "foobar", - "username": validSetTestValue, - }) - c.Assert(err, jc.ErrorIsNil) - settings, err := dummy.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, charm.Settings{ - "title": "foobar", - "username": validSetTestValue, - }) - - // Test doesn't fail because Go JSON marshalling converts invalid - // UTF-8 sequences transparently to U+FFFD. The test demonstrates - // this behavior. It's a currently accepted behavior as it never has - // been a real-life issue. - err = s.APIState.Client().ServiceSet("dummy", map[string]string{ - "title": "foobar", - "username": invalidSetTestValue, - }) - c.Assert(err, jc.ErrorIsNil) - settings, err = dummy.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, charm.Settings{ - "title": "foobar", - "username": correctedSetTestValue, - }) - - err = s.APIState.Client().ServiceSet("dummy", map[string]string{ - "title": "barfoo", - "username": "", - }) - c.Assert(err, jc.ErrorIsNil) - settings, err = dummy.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, charm.Settings{ - "title": "barfoo", - "username": "", - }) -} - -func (s *serverSuite) assertServiceSetBlocked(c *gc.C, dummy *state.Service, msg string) { - err := s.client.ServiceSet(params.ServiceSet{ - ServiceName: "dummy", - Options: map[string]string{ - "title": "foobar", - "username": validSetTestValue}}) - s.AssertBlocked(c, err, msg) -} - -func (s *serverSuite) assertServiceSet(c *gc.C, dummy *state.Service) { - err := s.client.ServiceSet(params.ServiceSet{ - ServiceName: "dummy", - Options: map[string]string{ - "title": "foobar", - "username": validSetTestValue}}) - c.Assert(err, jc.ErrorIsNil) - settings, err := dummy.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, charm.Settings{ - "title": "foobar", - "username": validSetTestValue, - }) -} - -func (s *serverSuite) TestBlockDestroyServiceSet(c *gc.C) { - dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - s.BlockDestroyEnvironment(c, "TestBlockDestroyServiceSet") - s.assertServiceSet(c, dummy) -} - -func (s *serverSuite) TestBlockRemoveServiceSet(c *gc.C) { - dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - s.BlockRemoveObject(c, "TestBlockRemoveServiceSet") - s.assertServiceSet(c, dummy) -} - -func (s *serverSuite) TestBlockChangesServiceSet(c *gc.C) { - dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - s.BlockAllChanges(c, "TestBlockChangesServiceSet") - s.assertServiceSetBlocked(c, dummy, "TestBlockChangesServiceSet") -} - -func (s *clientSuite) TestClientServerUnset(c *gc.C) { - dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - - err := s.APIState.Client().ServiceSet("dummy", map[string]string{ - "title": "foobar", - "username": "user name", - }) - c.Assert(err, jc.ErrorIsNil) - settings, err := dummy.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, charm.Settings{ - "title": "foobar", - "username": "user name", - }) - - err = s.APIState.Client().ServiceUnset("dummy", []string{"username"}) - c.Assert(err, jc.ErrorIsNil) - settings, err = dummy.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, charm.Settings{ - "title": "foobar", - }) -} - -func (s *serverSuite) setupServerUnsetBlocked(c *gc.C) *state.Service { - dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - - err := s.client.ServiceSet(params.ServiceSet{ - ServiceName: "dummy", - Options: map[string]string{ - "title": "foobar", - "username": "user name", - }}) - c.Assert(err, jc.ErrorIsNil) - settings, err := dummy.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, charm.Settings{ - "title": "foobar", - "username": "user name", - }) - return dummy -} - -func (s *serverSuite) assertServerUnset(c *gc.C, dummy *state.Service) { - err := s.client.ServiceUnset(params.ServiceUnset{ - ServiceName: "dummy", - Options: []string{"username"}, - }) - c.Assert(err, jc.ErrorIsNil) - settings, err := dummy.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, charm.Settings{ - "title": "foobar", - }) -} - -func (s *serverSuite) assertServerUnsetBlocked(c *gc.C, dummy *state.Service, msg string) { - err := s.client.ServiceUnset(params.ServiceUnset{ - ServiceName: "dummy", - Options: []string{"username"}, - }) - s.AssertBlocked(c, err, msg) -} - -func (s *serverSuite) TestBlockDestroyServerUnset(c *gc.C) { - dummy := s.setupServerUnsetBlocked(c) - s.BlockDestroyEnvironment(c, "TestBlockDestroyServerUnset") - s.assertServerUnset(c, dummy) -} - -func (s *serverSuite) TestBlockRemoveServerUnset(c *gc.C) { - dummy := s.setupServerUnsetBlocked(c) - s.BlockRemoveObject(c, "TestBlockRemoveServerUnset") - s.assertServerUnset(c, dummy) -} - -func (s *serverSuite) TestBlockChangesServerUnset(c *gc.C) { - dummy := s.setupServerUnsetBlocked(c) - s.BlockAllChanges(c, "TestBlockChangesServerUnset") - s.assertServerUnsetBlocked(c, dummy, "TestBlockChangesServerUnset") -} - -func (s *clientSuite) TestClientServiceSetYAML(c *gc.C) { - dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - - err := s.APIState.Client().ServiceSetYAML("dummy", "dummy:\n title: foobar\n username: user name\n") - c.Assert(err, jc.ErrorIsNil) - settings, err := dummy.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, charm.Settings{ - "title": "foobar", - "username": "user name", - }) - - err = s.APIState.Client().ServiceSetYAML("dummy", "dummy:\n title: barfoo\n username: \n") - c.Assert(err, jc.ErrorIsNil) - settings, err = dummy.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, charm.Settings{ - "title": "barfoo", - }) -} - -func (s *clientSuite) assertServiceSetYAML(c *gc.C, dummy *state.Service) { - err := s.APIState.Client().ServiceSetYAML("dummy", "dummy:\n title: foobar\n username: user name\n") - c.Assert(err, jc.ErrorIsNil) - settings, err := dummy.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, charm.Settings{ - "title": "foobar", - "username": "user name", - }) -} - -func (s *clientSuite) assertServiceSetYAMLBlocked(c *gc.C, dummy *state.Service, msg string) { - err := s.APIState.Client().ServiceSetYAML("dummy", "dummy:\n title: foobar\n username: user name\n") - s.AssertBlocked(c, err, msg) -} - -func (s *clientSuite) TestBlockDestroyServiceSetYAML(c *gc.C) { - dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - s.BlockDestroyEnvironment(c, "TestBlockDestroyServiceSetYAML") - s.assertServiceSetYAML(c, dummy) -} - -func (s *clientSuite) TestBlockRemoveServiceSetYAML(c *gc.C) { - dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - s.BlockRemoveObject(c, "TestBlockRemoveServiceSetYAML") - s.assertServiceSetYAML(c, dummy) -} - -func (s *clientSuite) TestBlockChangesServiceSetYAML(c *gc.C) { - dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - s.BlockAllChanges(c, "TestBlockChangesServiceSetYAML") - s.assertServiceSetYAMLBlocked(c, dummy, "TestBlockChangesServiceSetYAML") -} - -var clientAddServiceUnitsTests = []struct { - about string - service string // if not set, defaults to 'dummy' - expected []string - to string - err string -}{ - { - about: "returns unit names", - expected: []string{"dummy/0", "dummy/1", "dummy/2"}, - }, - { - about: "fails trying to add zero units", - err: "must add at least one unit", - }, - { - about: "cannot mix to when adding multiple units", - err: "cannot use NumUnits with ToMachineSpec", - expected: []string{"dummy/0", "dummy/1"}, - to: "0", - }, - { - // Note: chained-state, we add 1 unit here, but the 3 units - // from the first condition still exist - about: "force the unit onto bootstrap machine", - expected: []string{"dummy/3"}, - to: "0", - }, - { - about: "unknown service name", - service: "unknown-service", - err: `service "unknown-service" not found`, - }, -} - -func (s *clientSuite) TestClientAddServiceUnits(c *gc.C) { - s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - for i, t := range clientAddServiceUnitsTests { - c.Logf("test %d. %s", i, t.about) - serviceName := t.service - if serviceName == "" { - serviceName = "dummy" - } - units, err := s.APIState.Client().AddServiceUnits(serviceName, len(t.expected), t.to) - if t.err != "" { - c.Assert(err, gc.ErrorMatches, t.err) - continue - } - c.Assert(err, jc.ErrorIsNil) - c.Assert(units, gc.DeepEquals, t.expected) - } - // Test that we actually assigned the unit to machine 0 - forcedUnit, err := s.BackingState.Unit("dummy/3") - c.Assert(err, jc.ErrorIsNil) - assignedMachine, err := forcedUnit.AssignedMachineId() - c.Assert(err, jc.ErrorIsNil) - c.Assert(assignedMachine, gc.Equals, "0") -} - -func (s *clientSuite) TestClientAddServiceUnitsToNewContainer(c *gc.C) { - svc := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - machine, err := s.State.AddMachine("quantal", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) - - _, err = s.APIState.Client().AddServiceUnits("dummy", 1, "lxc:"+machine.Id()) - c.Assert(err, jc.ErrorIsNil) - - units, err := svc.AllUnits() - c.Assert(err, jc.ErrorIsNil) - mid, err := units[0].AssignedMachineId() - c.Assert(err, jc.ErrorIsNil) - c.Assert(mid, gc.Equals, machine.Id()+"/lxc/0") -} - -var clientAddServiceUnitsWithPlacementTests = []struct { - about string - service string // if not set, defaults to 'dummy' - expected []string - machineIds []string - placement []*instance.Placement - err string -}{ - { - about: "valid placement directives", - expected: []string{"dummy/0"}, - placement: []*instance.Placement{{"deadbeef-0bad-400d-8000-4b1d0d06f00d", "valid"}}, - machineIds: []string{"1"}, - }, { - about: "direct machine assignment placement directive", - expected: []string{"dummy/1", "dummy/2"}, - placement: []*instance.Placement{{"#", "1"}, {"lxc", "1"}}, - machineIds: []string{"1", "1/lxc/0"}, - }, { - about: "invalid placement directive", - err: ".* invalid placement is invalid", - expected: []string{"dummy/3"}, - placement: []*instance.Placement{{"deadbeef-0bad-400d-8000-4b1d0d06f00d", "invalid"}}, - }, -} - -func (s *clientSuite) TestClientAddServiceUnitsWithPlacement(c *gc.C) { - s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - // Add a machine for the units to be placed on. - _, err := s.State.AddMachine("quantal", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) - for i, t := range clientAddServiceUnitsWithPlacementTests { - c.Logf("test %d. %s", i, t.about) - serviceName := t.service - if serviceName == "" { - serviceName = "dummy" - } - units, err := s.APIState.Client().AddServiceUnitsWithPlacement(serviceName, len(t.expected), t.placement) - if t.err != "" { - c.Assert(err, gc.ErrorMatches, t.err) - continue - } - c.Assert(err, jc.ErrorIsNil) - c.Assert(units, gc.DeepEquals, t.expected) - for i, unitName := range units { - u, err := s.BackingState.Unit(unitName) - c.Assert(err, jc.ErrorIsNil) - assignedMachine, err := u.AssignedMachineId() - c.Assert(err, jc.ErrorIsNil) - c.Assert(assignedMachine, gc.Equals, t.machineIds[i]) - } - } -} - -func (s *clientSuite) assertAddServiceUnits(c *gc.C) { - units, err := s.APIState.Client().AddServiceUnits("dummy", 3, "") - c.Assert(err, jc.ErrorIsNil) - c.Assert(units, gc.DeepEquals, []string{"dummy/0", "dummy/1", "dummy/2"}) - - // Test that we actually assigned the unit to machine 0 - forcedUnit, err := s.BackingState.Unit("dummy/0") - c.Assert(err, jc.ErrorIsNil) - assignedMachine, err := forcedUnit.AssignedMachineId() - c.Assert(err, jc.ErrorIsNil) - c.Assert(assignedMachine, gc.Equals, "0") -} - -func (s *clientSuite) assertAddServiceUnitsBlocked(c *gc.C, msg string) { - _, err := s.APIState.Client().AddServiceUnits("dummy", 3, "") - s.AssertBlocked(c, err, msg) -} - -func (s *clientSuite) TestBlockDestroyAddServiceUnits(c *gc.C) { - s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - s.BlockDestroyEnvironment(c, "TestBlockDestroyAddServiceUnits") - s.assertAddServiceUnits(c) -} - -func (s *clientSuite) TestBlockRemoveAddServiceUnits(c *gc.C) { - s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - s.BlockRemoveObject(c, "TestBlockRemoveAddServiceUnits") - s.assertAddServiceUnits(c) -} - -func (s *clientSuite) TestBlockChangeAddServiceUnits(c *gc.C) { - s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - s.BlockAllChanges(c, "TestBlockChangeAddServiceUnits") - s.assertAddServiceUnitsBlocked(c, "TestBlockChangeAddServiceUnits") -} - -func (s *clientSuite) TestClientAddUnitToMachineNotFound(c *gc.C) { - s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - _, err := s.APIState.Client().AddServiceUnits("dummy", 1, "42") - c.Assert(err, gc.ErrorMatches, `cannot add units for service "dummy" to machine 42: machine 42 not found`) -} - func (s *clientSuite) TestClientCharmInfo(c *gc.C) { var clientCharmInfoTests = []struct { about string @@ -1073,25 +635,22 @@ url: "local:quantal/wordpress-3", }, { - about: "invalid URL", - charm: "wordpress", - expectedActions: &charm.Actions{ActionSpecs: nil}, - url: "not-valid", - err: "charm url series is not resolved", - }, - { - about: "invalid schema", - charm: "wordpress", - expectedActions: &charm.Actions{ActionSpecs: nil}, - url: "not-valid:your-arguments", - err: `charm URL has invalid schema: "not-valid:your-arguments"`, - }, - { - about: "unknown charm", - charm: "wordpress", - expectedActions: &charm.Actions{ActionSpecs: nil}, - url: "cs:missing/one-1", - err: `charm "cs:missing/one-1" not found`, + about: "invalid URL", + charm: "wordpress", + url: "not-valid!", + err: `URL has invalid charm or bundle name: "not-valid!"`, + }, + { + about: "invalid schema", + charm: "wordpress", + url: "not-valid:your-arguments", + err: `charm or bundle URL has invalid schema: "not-valid:your-arguments"`, + }, + { + about: "unknown charm", + charm: "wordpress", + url: "cs:missing/one-1", + err: `charm "cs:missing/one-1" not found \(not found\)`, }, } @@ -1116,360 +675,17 @@ } } -func (s *clientSuite) TestClientEnvironmentInfo(c *gc.C) { - conf, _ := s.State.EnvironConfig() - info, err := s.APIState.Client().EnvironmentInfo() +func (s *clientSuite) TestClientModelInfo(c *gc.C) { + conf, _ := s.State.ModelConfig() + info, err := s.APIState.Client().ModelInfo() c.Assert(err, jc.ErrorIsNil) - env, err := s.State.Environment() + env, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) c.Assert(info.DefaultSeries, gc.Equals, config.PreferredSeries(conf)) c.Assert(info.ProviderType, gc.Equals, conf.Type()) c.Assert(info.Name, gc.Equals, conf.Name()) c.Assert(info.UUID, gc.Equals, env.UUID()) - c.Assert(info.ServerUUID, gc.Equals, env.ServerUUID()) -} - -var clientAnnotationsTests = []struct { - about string - initial map[string]string - input map[string]string - expected map[string]string - err string -}{ - { - about: "test setting an annotation", - input: map[string]string{"mykey": "myvalue"}, - expected: map[string]string{"mykey": "myvalue"}, - }, - { - about: "test setting multiple annotations", - input: map[string]string{"key1": "value1", "key2": "value2"}, - expected: map[string]string{"key1": "value1", "key2": "value2"}, - }, - { - about: "test overriding annotations", - initial: map[string]string{"mykey": "myvalue"}, - input: map[string]string{"mykey": "another-value"}, - expected: map[string]string{"mykey": "another-value"}, - }, - { - about: "test setting an invalid annotation", - input: map[string]string{"invalid.key": "myvalue"}, - err: `cannot update annotations on .*: invalid key "invalid.key"`, - }, -} - -func (s *clientSuite) TestClientAnnotations(c *gc.C) { - // Set up entities. - service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - unit, err := service.AddUnit() - c.Assert(err, jc.ErrorIsNil) - machine, err := s.State.AddMachine("quantal", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) - environment, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - type taggedAnnotator interface { - state.Entity - } - entities := []taggedAnnotator{service, unit, machine, environment} - for i, t := range clientAnnotationsTests { - for _, entity := range entities { - id := entity.Tag().String() // this is WRONG, it should be Tag().Id() but the code is wrong. - c.Logf("test %d. %s. entity %s", i, t.about, id) - // Set initial entity annotations. - err := s.APIState.Client().SetAnnotations(id, t.initial) - c.Assert(err, jc.ErrorIsNil) - // Add annotations using the API call. - err = s.APIState.Client().SetAnnotations(id, t.input) - if t.err != "" { - c.Assert(err, gc.ErrorMatches, t.err) - continue - } - // Retrieve annotations using the API call. - ann, err := s.APIState.Client().GetAnnotations(id) - c.Assert(err, jc.ErrorIsNil) - // Check annotations are correctly returned. - c.Assert(ann, gc.DeepEquals, t.input) - // Clean up annotations on the current entity. - cleanup := make(map[string]string) - for key := range ann { - cleanup[key] = "" - } - err = s.APIState.Client().SetAnnotations(id, cleanup) - c.Assert(err, jc.ErrorIsNil) - } - } -} - -func (s *clientSuite) TestCharmAnnotationsUnsupported(c *gc.C) { - // Set up charm. - charm := s.AddTestingCharm(c, "dummy") - id := charm.Tag().Id() - for i, t := range clientAnnotationsTests { - c.Logf("test %d. %s. entity %s", i, t.about, id) - // Add annotations using the API call. - err := s.APIState.Client().SetAnnotations(id, t.input) - // Should not be able to annotate charm with this client - c.Assert(err.Error(), gc.Matches, ".*is not a valid tag.*") - - // Retrieve annotations using the API call. - ann, err := s.APIState.Client().GetAnnotations(id) - // Should not be able to get annotations from charm using this client - c.Assert(err.Error(), gc.Matches, ".*is not a valid tag.*") - c.Assert(ann, gc.IsNil) - } -} - -func (s *clientSuite) TestClientAnnotationsBadEntity(c *gc.C) { - bad := []string{"", "machine", "-foo", "foo-", "---", "machine-jim", "unit-123", "unit-foo", "service-", "service-foo/bar"} - expected := `".*" is not a valid( [a-z]+)? tag` - for _, id := range bad { - err := s.APIState.Client().SetAnnotations(id, map[string]string{"mykey": "myvalue"}) - c.Assert(err, gc.ErrorMatches, expected) - _, err = s.APIState.Client().GetAnnotations(id) - c.Assert(err, gc.ErrorMatches, expected) - } -} - -var serviceExposeTests = []struct { - about string - service string - err string - exposed bool -}{ - { - about: "unknown service name", - service: "unknown-service", - err: `service "unknown-service" not found`, - }, - { - about: "expose a service", - service: "dummy-service", - exposed: true, - }, - { - about: "expose an already exposed service", - service: "exposed-service", - exposed: true, - }, -} - -func (s *clientSuite) TestClientServiceExpose(c *gc.C) { - charm := s.AddTestingCharm(c, "dummy") - serviceNames := []string{"dummy-service", "exposed-service"} - svcs := make([]*state.Service, len(serviceNames)) - var err error - for i, name := range serviceNames { - svcs[i] = s.AddTestingService(c, name, charm) - c.Assert(svcs[i].IsExposed(), jc.IsFalse) - } - err = svcs[1].SetExposed() - c.Assert(err, jc.ErrorIsNil) - c.Assert(svcs[1].IsExposed(), jc.IsTrue) - for i, t := range serviceExposeTests { - c.Logf("test %d. %s", i, t.about) - err = s.APIState.Client().ServiceExpose(t.service) - if t.err != "" { - c.Assert(err, gc.ErrorMatches, t.err) - } else { - c.Assert(err, jc.ErrorIsNil) - service, err := s.State.Service(t.service) - c.Assert(err, jc.ErrorIsNil) - c.Assert(service.IsExposed(), gc.Equals, t.exposed) - } - } -} - -func (s *clientSuite) setupServiceExpose(c *gc.C) { - charm := s.AddTestingCharm(c, "dummy") - serviceNames := []string{"dummy-service", "exposed-service"} - svcs := make([]*state.Service, len(serviceNames)) - var err error - for i, name := range serviceNames { - svcs[i] = s.AddTestingService(c, name, charm) - c.Assert(svcs[i].IsExposed(), jc.IsFalse) - } - err = svcs[1].SetExposed() - c.Assert(err, jc.ErrorIsNil) - c.Assert(svcs[1].IsExposed(), jc.IsTrue) -} - -func (s *clientSuite) assertServiceExpose(c *gc.C) { - for i, t := range serviceExposeTests { - c.Logf("test %d. %s", i, t.about) - err := s.APIState.Client().ServiceExpose(t.service) - if t.err != "" { - c.Assert(err, gc.ErrorMatches, t.err) - } else { - c.Assert(err, jc.ErrorIsNil) - service, err := s.State.Service(t.service) - c.Assert(err, jc.ErrorIsNil) - c.Assert(service.IsExposed(), gc.Equals, t.exposed) - } - } -} - -func (s *clientSuite) assertServiceExposeBlocked(c *gc.C, msg string) { - for i, t := range serviceExposeTests { - c.Logf("test %d. %s", i, t.about) - err := s.APIState.Client().ServiceExpose(t.service) - s.AssertBlocked(c, err, msg) - } -} - -func (s *clientSuite) TestBlockDestroyServiceExpose(c *gc.C) { - s.setupServiceExpose(c) - s.BlockDestroyEnvironment(c, "TestBlockDestroyServiceExpose") - s.assertServiceExpose(c) -} - -func (s *clientSuite) TestBlockRemoveServiceExpose(c *gc.C) { - s.setupServiceExpose(c) - s.BlockRemoveObject(c, "TestBlockRemoveServiceExpose") - s.assertServiceExpose(c) -} - -func (s *clientSuite) TestBlockChangesServiceExpose(c *gc.C) { - s.setupServiceExpose(c) - s.BlockAllChanges(c, "TestBlockChangesServiceExpose") - s.assertServiceExposeBlocked(c, "TestBlockChangesServiceExpose") -} - -var serviceUnexposeTests = []struct { - about string - service string - err string - initial bool - expected bool -}{ - { - about: "unknown service name", - service: "unknown-service", - err: `service "unknown-service" not found`, - }, - { - about: "unexpose a service", - service: "dummy-service", - initial: true, - expected: false, - }, - { - about: "unexpose an already unexposed service", - service: "dummy-service", - initial: false, - expected: false, - }, -} - -func (s *clientSuite) TestClientServiceUnexpose(c *gc.C) { - charm := s.AddTestingCharm(c, "dummy") - for i, t := range serviceUnexposeTests { - c.Logf("test %d. %s", i, t.about) - svc := s.AddTestingService(c, "dummy-service", charm) - if t.initial { - svc.SetExposed() - } - c.Assert(svc.IsExposed(), gc.Equals, t.initial) - err := s.APIState.Client().ServiceUnexpose(t.service) - if t.err == "" { - c.Assert(err, jc.ErrorIsNil) - svc.Refresh() - c.Assert(svc.IsExposed(), gc.Equals, t.expected) - } else { - c.Assert(err, gc.ErrorMatches, t.err) - } - err = svc.Destroy() - c.Assert(err, jc.ErrorIsNil) - } -} - -func (s *clientSuite) setupServiceUnexpose(c *gc.C) *state.Service { - charm := s.AddTestingCharm(c, "dummy") - svc := s.AddTestingService(c, "dummy-service", charm) - svc.SetExposed() - c.Assert(svc.IsExposed(), gc.Equals, true) - return svc -} - -func (s *clientSuite) assertServiceUnexpose(c *gc.C, svc *state.Service) { - err := s.APIState.Client().ServiceUnexpose("dummy-service") - c.Assert(err, jc.ErrorIsNil) - svc.Refresh() - c.Assert(svc.IsExposed(), gc.Equals, false) - err = svc.Destroy() - c.Assert(err, jc.ErrorIsNil) -} - -func (s *clientSuite) assertServiceUnexposeBlocked(c *gc.C, svc *state.Service, msg string) { - err := s.APIState.Client().ServiceUnexpose("dummy-service") - s.AssertBlocked(c, err, msg) - err = svc.Destroy() - c.Assert(err, jc.ErrorIsNil) -} - -func (s *clientSuite) TestBlockDestroyServiceUnexpose(c *gc.C) { - svc := s.setupServiceUnexpose(c) - s.BlockDestroyEnvironment(c, "TestBlockDestroyServiceUnexpose") - s.assertServiceUnexpose(c, svc) -} - -func (s *clientSuite) TestBlockRemoveServiceUnexpose(c *gc.C) { - svc := s.setupServiceUnexpose(c) - s.BlockRemoveObject(c, "TestBlockRemoveServiceUnexpose") - s.assertServiceUnexpose(c, svc) -} - -func (s *clientSuite) TestBlockChangesServiceUnexpose(c *gc.C) { - svc := s.setupServiceUnexpose(c) - s.BlockAllChanges(c, "TestBlockChangesServiceUnexpose") - s.assertServiceUnexposeBlocked(c, svc, "TestBlockChangesServiceUnexpose") -} - -var serviceDestroyTests = []struct { - about string - service string - err string -}{ - { - about: "unknown service name", - service: "unknown-service", - err: `service "unknown-service" not found`, - }, - { - about: "destroy a service", - service: "dummy-service", - }, - { - about: "destroy an already destroyed service", - service: "dummy-service", - err: `service "dummy-service" not found`, - }, -} - -func (s *clientSuite) TestClientServiceDestroy(c *gc.C) { - s.AddTestingService(c, "dummy-service", s.AddTestingCharm(c, "dummy")) - for i, t := range serviceDestroyTests { - c.Logf("test %d. %s", i, t.about) - err := s.APIState.Client().ServiceDestroy(t.service) - if t.err != "" { - c.Assert(err, gc.ErrorMatches, t.err) - } else { - c.Assert(err, jc.ErrorIsNil) - } - } - - // Now do ServiceDestroy on a service with units. Destroy will - // cause the service to be not-Alive, but will not remove its - // document. - s.setUpScenario(c) - serviceName := "wordpress" - service, err := s.State.Service(serviceName) - c.Assert(err, jc.ErrorIsNil) - err = s.APIState.Client().ServiceDestroy(serviceName) - c.Assert(err, jc.ErrorIsNil) - err = service.Refresh() - c.Assert(err, jc.ErrorIsNil) - c.Assert(service.Life(), gc.Not(gc.Equals), state.Alive) + c.Assert(info.ControllerUUID, gc.Equals, env.ControllerUUID()) } func assertLife(c *gc.C, entity state.Living, life state.Life) { @@ -1488,7 +704,7 @@ } func (s *clientSuite) setupDestroyMachinesTest(c *gc.C) (*state.Machine, *state.Machine, *state.Machine, *state.Unit) { - m0, err := s.State.AddMachine("quantal", state.JobManageEnviron) + m0, err := s.State.AddMachine("quantal", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) m1, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) @@ -1514,43 +730,6 @@ s.assertForceDestroyMachines(c) } -func (s *clientSuite) TestDestroyPrincipalUnits(c *gc.C) { - wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) - units := make([]*state.Unit, 5) - for i := range units { - unit, err := wordpress.AddUnit() - c.Assert(err, jc.ErrorIsNil) - err = unit.SetAgentStatus(state.StatusIdle, "", nil) - c.Assert(err, jc.ErrorIsNil) - units[i] = unit - } - s.assertDestroyPrincipalUnits(c, units) -} - -func (s *clientSuite) TestDestroySubordinateUnits(c *gc.C) { - wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) - wordpress0, err := wordpress.AddUnit() - c.Assert(err, jc.ErrorIsNil) - s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging")) - eps, err := s.State.InferEndpoints("logging", "wordpress") - c.Assert(err, jc.ErrorIsNil) - rel, err := s.State.AddRelation(eps...) - c.Assert(err, jc.ErrorIsNil) - ru, err := rel.Unit(wordpress0) - c.Assert(err, jc.ErrorIsNil) - err = ru.EnterScope(nil) - c.Assert(err, jc.ErrorIsNil) - logging0, err := s.State.Unit("logging/0") - c.Assert(err, jc.ErrorIsNil) - - // Try to destroy the subordinate alone; check it fails. - err = s.APIState.Client().DestroyServiceUnits("logging/0") - c.Assert(err, gc.ErrorMatches, `no units were destroyed: unit "logging/0" is a subordinate`) - assertLife(c, logging0, state.Alive) - - s.assertDestroySubordinateUnits(c, wordpress0, logging0) -} - func (s *clientSuite) testClientUnitResolved(c *gc.C, retry bool, expectedResolvedMode state.ResolvedMode) { // Setup: s.setUpScenario(c) @@ -1606,7 +785,7 @@ func (s *clientSuite) TestBlockDestroyUnitResolved(c *gc.C) { u := s.setupResolved(c) - s.BlockDestroyEnvironment(c, "TestBlockDestroyUnitResolved") + s.BlockDestroyModel(c, "TestBlockDestroyUnitResolved") s.assertResolved(c, u) } @@ -1624,7 +803,7 @@ type clientRepoSuite struct { baseSuite - apiservertesting.CharmStoreSuite + testing.CharmStoreSuite } var _ = gc.Suite(&clientRepoSuite{}) @@ -1632,6 +811,7 @@ func (s *clientRepoSuite) SetUpSuite(c *gc.C) { s.CharmStoreSuite.SetUpSuite(c) s.baseSuite.SetUpSuite(c) + } func (s *clientRepoSuite) TearDownSuite(c *gc.C) { @@ -1643,6 +823,8 @@ s.baseSuite.SetUpTest(c) s.CharmStoreSuite.Session = s.baseSuite.Session s.CharmStoreSuite.SetUpTest(c) + + c.Assert(s.APIState, gc.NotNil) } func (s *clientRepoSuite) TearDownTest(c *gc.C) { @@ -1650,790 +832,10 @@ s.baseSuite.TearDownTest(c) } -func (s *clientRepoSuite) TestClientServiceDeployCharmErrors(c *gc.C) { - for url, expect := range map[string]string{ - "wordpress": "charm url series is not resolved", - "cs:wordpress": "charm url series is not resolved", - "cs:precise/wordpress": "charm url must include revision", - "cs:precise/wordpress-999999": `.* charm "cs:precise/wordpress-999999".* not found`, - } { - c.Logf("test %s", url) - err := s.APIState.Client().ServiceDeploy( - url, "service", 1, "", constraints.Value{}, "", - ) - c.Check(err, gc.ErrorMatches, expect) - _, err = s.State.Service("service") - c.Assert(err, jc.Satisfies, errors.IsNotFound) - } -} - -func (s *clientRepoSuite) TestClientServiceDeployWithNetworks(c *gc.C) { - curl, _ := s.UploadCharm(c, "precise/dummy-0", "dummy") - err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) - c.Assert(err, jc.ErrorIsNil) - cons := constraints.MustParse("mem=4G networks=^net3") - - // Check for invalid network tags handling. - err = s.APIState.Client().ServiceDeployWithNetworks( - curl.String(), "service", 3, "", cons, "", - []string{"net1", "net2"}, - ) - c.Assert(err, gc.ErrorMatches, `"net1" is not a valid tag`) - - err = s.APIState.Client().ServiceDeployWithNetworks( - curl.String(), "service", 3, "", cons, "", - []string{"network-net1", "network-net2"}, - ) - c.Assert(err, gc.ErrorMatches, "use of --networks is deprecated. Please use spaces") -} - -func (s *clientRepoSuite) setupServiceDeploy(c *gc.C, args string) (*charm.URL, charm.Charm, constraints.Value) { - curl, ch := s.UploadCharm(c, "precise/dummy-42", "dummy") - err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) - c.Assert(err, jc.ErrorIsNil) - cons := constraints.MustParse(args) - return curl, ch, cons -} - -func (s *clientRepoSuite) TestClientServiceDeployPrincipal(c *gc.C) { - // TODO(fwereade): test ToMachineSpec directly on srvClient, when we - // manage to extract it as a package and can thus do it conveniently. - curl, ch := s.UploadCharm(c, "trusty/dummy-1", "dummy") - err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) - c.Assert(err, jc.ErrorIsNil) - mem4g := constraints.MustParse("mem=4G") - err = s.APIState.Client().ServiceDeploy( - curl.String(), "service", 3, "", mem4g, "", - ) - c.Assert(err, jc.ErrorIsNil) - apiservertesting.AssertPrincipalServiceDeployed(c, s.State, "service", curl, false, ch, mem4g) -} - -func (s *clientRepoSuite) assertServiceDeployPrincipal(c *gc.C, curl *charm.URL, ch charm.Charm, mem4g constraints.Value) { - err := s.APIState.Client().ServiceDeploy( - curl.String(), "service", 3, "", mem4g, "", - ) - c.Assert(err, jc.ErrorIsNil) - apiservertesting.AssertPrincipalServiceDeployed(c, s.State, "service", curl, false, ch, mem4g) -} - -func (s *clientRepoSuite) assertServiceDeployPrincipalBlocked(c *gc.C, msg string, curl *charm.URL, mem4g constraints.Value) { - err := s.APIState.Client().ServiceDeploy( - curl.String(), "service", 3, "", mem4g, "", - ) - s.AssertBlocked(c, err, msg) -} - -func (s *clientRepoSuite) TestBlockDestroyServiceDeployPrincipal(c *gc.C) { - curl, bundle, cons := s.setupServiceDeploy(c, "mem=4G") - s.BlockDestroyEnvironment(c, "TestBlockDestroyServiceDeployPrincipal") - s.assertServiceDeployPrincipal(c, curl, bundle, cons) -} - -func (s *clientRepoSuite) TestBlockRemoveServiceDeployPrincipal(c *gc.C) { - curl, bundle, cons := s.setupServiceDeploy(c, "mem=4G") - s.BlockRemoveObject(c, "TestBlockRemoveServiceDeployPrincipal") - s.assertServiceDeployPrincipal(c, curl, bundle, cons) -} - -func (s *clientRepoSuite) TestBlockChangesServiceDeployPrincipal(c *gc.C) { - curl, _, cons := s.setupServiceDeploy(c, "mem=4G") - s.BlockAllChanges(c, "TestBlockChangesServiceDeployPrincipal") - s.assertServiceDeployPrincipalBlocked(c, "TestBlockChangesServiceDeployPrincipal", curl, cons) -} - -func (s *clientRepoSuite) TestClientServiceDeploySubordinate(c *gc.C) { - curl, ch := s.UploadCharm(c, "utopic/logging-47", "logging") - err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) - c.Assert(err, jc.ErrorIsNil) - err = s.APIState.Client().ServiceDeploy( - curl.String(), "service-name", 0, "", constraints.Value{}, "", - ) - service, err := s.State.Service("service-name") - c.Assert(err, jc.ErrorIsNil) - charm, force, err := service.Charm() - c.Assert(err, jc.ErrorIsNil) - c.Assert(force, jc.IsFalse) - c.Assert(charm.URL(), gc.DeepEquals, curl) - c.Assert(charm.Meta(), gc.DeepEquals, ch.Meta()) - c.Assert(charm.Config(), gc.DeepEquals, ch.Config()) - - units, err := service.AllUnits() - c.Assert(err, jc.ErrorIsNil) - c.Assert(units, gc.HasLen, 0) -} - -func (s *clientRepoSuite) TestClientServiceDeployConfig(c *gc.C) { - // TODO(fwereade): test Config/ConfigYAML handling directly on srvClient. - // Can't be done cleanly until it's extracted similarly to Machiner. - curl, _ := s.UploadCharm(c, "precise/dummy-0", "dummy") - err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) - c.Assert(err, jc.ErrorIsNil) - err = s.APIState.Client().ServiceDeploy( - curl.String(), "service-name", 1, "service-name:\n username: fred", constraints.Value{}, "", - ) - c.Assert(err, jc.ErrorIsNil) - service, err := s.State.Service("service-name") - c.Assert(err, jc.ErrorIsNil) - settings, err := service.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, charm.Settings{"username": "fred"}) -} - -func (s *clientRepoSuite) TestClientServiceDeployConfigError(c *gc.C) { - // TODO(fwereade): test Config/ConfigYAML handling directly on srvClient. - // Can't be done cleanly until it's extracted similarly to Machiner. - curl, _ := s.UploadCharm(c, "precise/dummy-0", "dummy") - err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) - c.Assert(err, jc.ErrorIsNil) - err = s.APIState.Client().ServiceDeploy( - curl.String(), "service-name", 1, "service-name:\n skill-level: fred", constraints.Value{}, "", - ) - c.Assert(err, gc.ErrorMatches, `option "skill-level" expected int, got "fred"`) - _, err = s.State.Service("service-name") - c.Assert(err, jc.Satisfies, errors.IsNotFound) -} - -func (s *clientRepoSuite) TestClientServiceDeployToMachine(c *gc.C) { - curl, ch := s.UploadCharm(c, "precise/dummy-0", "dummy") - err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) - c.Assert(err, jc.ErrorIsNil) - - machine, err := s.State.AddMachine("precise", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) - err = s.APIState.Client().ServiceDeploy( - curl.String(), "service-name", 1, "service-name:\n username: fred", constraints.Value{}, machine.Id(), - ) - c.Assert(err, jc.ErrorIsNil) - - service, err := s.State.Service("service-name") - c.Assert(err, jc.ErrorIsNil) - charm, force, err := service.Charm() - c.Assert(err, jc.ErrorIsNil) - c.Assert(force, jc.IsFalse) - c.Assert(charm.URL(), gc.DeepEquals, curl) - c.Assert(charm.Meta(), gc.DeepEquals, ch.Meta()) - c.Assert(charm.Config(), gc.DeepEquals, ch.Config()) - - units, err := service.AllUnits() - c.Assert(err, jc.ErrorIsNil) - c.Assert(units, gc.HasLen, 1) - mid, err := units[0].AssignedMachineId() - c.Assert(err, jc.ErrorIsNil) - c.Assert(mid, gc.Equals, machine.Id()) -} - -func (s *clientSuite) TestClientServiceDeployToMachineNotFound(c *gc.C) { - err := s.APIState.Client().ServiceDeploy( - "cs:precise/service-name-1", "service-name", 1, "", constraints.Value{}, "42", - ) - c.Assert(err, gc.ErrorMatches, `cannot deploy "service-name" to machine 42: machine 42 not found`) - - _, err = s.State.Service("service-name") - c.Assert(err, gc.ErrorMatches, `service "service-name" not found`) -} - -func (s *clientRepoSuite) TestClientServiceDeployServiceOwner(c *gc.C) { - curl, _ := s.UploadCharm(c, "precise/dummy-0", "dummy") - err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) - c.Assert(err, jc.ErrorIsNil) - - user := s.Factory.MakeUser(c, &factory.UserParams{Password: "password"}) - s.APIState = s.OpenAPIAs(c, user.Tag(), "password") - - err = s.APIState.Client().ServiceDeploy( - curl.String(), "service", 3, "", constraints.Value{}, "", - ) - c.Assert(err, jc.ErrorIsNil) - - service, err := s.State.Service("service") - c.Assert(err, jc.ErrorIsNil) - c.Assert(service.GetOwnerTag(), gc.Equals, user.Tag().String()) -} - -func (s *clientRepoSuite) deployServiceForTests(c *gc.C) { - curl, _ := s.UploadCharm(c, "precise/dummy-1", "dummy") - err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) - c.Assert(err, jc.ErrorIsNil) - err = s.APIState.Client().ServiceDeploy(curl.String(), - "service", 1, "", constraints.Value{}, "", - ) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *clientRepoSuite) checkClientServiceUpdateSetCharm(c *gc.C, forceCharmUrl bool) { - s.deployServiceForTests(c) - s.UploadCharm(c, "precise/wordpress-3", "wordpress") - - // Update the charm for the service. - args := params.ServiceUpdate{ - ServiceName: "service", - CharmUrl: "cs:precise/wordpress-3", - ForceCharmUrl: forceCharmUrl, - } - err := s.APIState.Client().ServiceUpdate(args) - c.Assert(err, jc.ErrorIsNil) - - // Ensure the charm has been updated and and the force flag correctly set. - service, err := s.State.Service("service") - c.Assert(err, jc.ErrorIsNil) - ch, force, err := service.Charm() - c.Assert(err, jc.ErrorIsNil) - c.Assert(ch.URL().String(), gc.Equals, "cs:precise/wordpress-3") - c.Assert(force, gc.Equals, forceCharmUrl) -} - -func (s *clientRepoSuite) TestClientServiceUpdateSetCharm(c *gc.C) { - s.checkClientServiceUpdateSetCharm(c, false) -} - -func (s *clientRepoSuite) TestBlockDestroyServiceUpdate(c *gc.C) { - s.BlockDestroyEnvironment(c, "TestBlockDestroyServiceUpdate") - s.checkClientServiceUpdateSetCharm(c, false) -} - -func (s *clientRepoSuite) TestBlockRemoveServiceUpdate(c *gc.C) { - s.BlockRemoveObject(c, "TestBlockRemoveServiceUpdate") - s.checkClientServiceUpdateSetCharm(c, false) -} - -func (s *clientRepoSuite) setupServiceUpdate(c *gc.C) { - s.deployServiceForTests(c) - s.UploadCharm(c, "precise/wordpress-3", "wordpress") -} - -func (s *clientRepoSuite) TestBlockChangeServiceUpdate(c *gc.C) { - s.setupServiceUpdate(c) - s.BlockAllChanges(c, "TestBlockChangeServiceUpdate") - // Update the charm for the service. - args := params.ServiceUpdate{ - ServiceName: "service", - CharmUrl: "cs:precise/wordpress-3", - ForceCharmUrl: false, - } - err := s.APIState.Client().ServiceUpdate(args) - s.AssertBlocked(c, err, "TestBlockChangeServiceUpdate") -} - -func (s *clientRepoSuite) TestClientServiceUpdateForceSetCharm(c *gc.C) { - s.checkClientServiceUpdateSetCharm(c, true) -} - -func (s *clientRepoSuite) TestBlockServiceUpdateForced(c *gc.C) { - s.setupServiceUpdate(c) - - // block all changes. Force should ignore block :) - s.BlockAllChanges(c, "TestBlockServiceUpdateForced") - s.BlockDestroyEnvironment(c, "TestBlockServiceUpdateForced") - s.BlockRemoveObject(c, "TestBlockServiceUpdateForced") - - // Update the charm for the service. - args := params.ServiceUpdate{ - ServiceName: "service", - CharmUrl: "cs:precise/wordpress-3", - ForceCharmUrl: true, - } - err := s.APIState.Client().ServiceUpdate(args) - c.Assert(err, jc.ErrorIsNil) - - // Ensure the charm has been updated and and the force flag correctly set. - service, err := s.State.Service("service") - c.Assert(err, jc.ErrorIsNil) - ch, force, err := service.Charm() - c.Assert(err, jc.ErrorIsNil) - c.Assert(ch.URL().String(), gc.Equals, "cs:precise/wordpress-3") - c.Assert(force, jc.IsTrue) -} - -func (s *clientRepoSuite) TestClientServiceUpdateSetCharmErrors(c *gc.C) { - s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) - for charmUrl, expect := range map[string]string{ - "wordpress": "charm url series is not resolved", - "cs:wordpress": "charm url series is not resolved", - "cs:precise/wordpress": "charm url must include revision", - "cs:precise/wordpress-999999": `cannot retrieve charm "cs:precise/wordpress-999999": charm not found`, - } { - c.Logf("test %s", charmUrl) - args := params.ServiceUpdate{ - ServiceName: "wordpress", - CharmUrl: charmUrl, - } - err := s.APIState.Client().ServiceUpdate(args) - c.Check(err, gc.ErrorMatches, expect) - } -} - -func (s *clientSuite) TestClientServiceUpdateSetMinUnits(c *gc.C) { - service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - - // Set minimum units for the service. - minUnits := 2 - args := params.ServiceUpdate{ - ServiceName: "dummy", - MinUnits: &minUnits, - } - err := s.APIState.Client().ServiceUpdate(args) - c.Assert(err, jc.ErrorIsNil) - - // Ensure the minimum number of units has been set. - c.Assert(service.Refresh(), gc.IsNil) - c.Assert(service.MinUnits(), gc.Equals, minUnits) -} - -func (s *clientSuite) TestClientServiceUpdateSetMinUnitsError(c *gc.C) { - service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - - // Set a negative minimum number of units for the service. - minUnits := -1 - args := params.ServiceUpdate{ - ServiceName: "dummy", - MinUnits: &minUnits, - } - err := s.APIState.Client().ServiceUpdate(args) - c.Assert(err, gc.ErrorMatches, - `cannot set minimum units for service "dummy": cannot set a negative minimum number of units`) - - // Ensure the minimum number of units has not been set. - c.Assert(service.Refresh(), gc.IsNil) - c.Assert(service.MinUnits(), gc.Equals, 0) -} - -func (s *clientSuite) TestClientServiceUpdateSetSettingsStrings(c *gc.C) { - service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - - // Update settings for the service. - args := params.ServiceUpdate{ - ServiceName: "dummy", - SettingsStrings: map[string]string{"title": "s-title", "username": "s-user"}, - } - err := s.APIState.Client().ServiceUpdate(args) - c.Assert(err, jc.ErrorIsNil) - - // Ensure the settings have been correctly updated. - expected := charm.Settings{"title": "s-title", "username": "s-user"} - obtained, err := service.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(obtained, gc.DeepEquals, expected) -} - -func (s *clientSuite) TestClientServiceUpdateSetSettingsYAML(c *gc.C) { - service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - - // Update settings for the service. - args := params.ServiceUpdate{ - ServiceName: "dummy", - SettingsYAML: "dummy:\n title: y-title\n username: y-user", - } - err := s.APIState.Client().ServiceUpdate(args) - c.Assert(err, jc.ErrorIsNil) - - // Ensure the settings have been correctly updated. - expected := charm.Settings{"title": "y-title", "username": "y-user"} - obtained, err := service.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(obtained, gc.DeepEquals, expected) -} - -func (s *clientSuite) TestClientServiceUpdateSetConstraints(c *gc.C) { - service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - - // Update constraints for the service. - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") - c.Assert(err, jc.ErrorIsNil) - args := params.ServiceUpdate{ - ServiceName: "dummy", - Constraints: &cons, - } - err = s.APIState.Client().ServiceUpdate(args) - c.Assert(err, jc.ErrorIsNil) - - // Ensure the constraints have been correctly updated. - obtained, err := service.Constraints() - c.Assert(err, jc.ErrorIsNil) - c.Assert(obtained, gc.DeepEquals, cons) -} - -func (s *clientRepoSuite) TestClientServiceUpdateAllParams(c *gc.C) { - s.deployServiceForTests(c) - s.UploadCharm(c, "precise/wordpress-3", "wordpress") - - // Update all the service attributes. - minUnits := 3 - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") - c.Assert(err, jc.ErrorIsNil) - args := params.ServiceUpdate{ - ServiceName: "service", - CharmUrl: "cs:precise/wordpress-3", - ForceCharmUrl: true, - MinUnits: &minUnits, - SettingsStrings: map[string]string{"blog-title": "string-title"}, - SettingsYAML: "service:\n blog-title: yaml-title\n", - Constraints: &cons, - } - err = s.APIState.Client().ServiceUpdate(args) - c.Assert(err, jc.ErrorIsNil) - - // Ensure the service has been correctly updated. - service, err := s.State.Service("service") - c.Assert(err, jc.ErrorIsNil) - - // Check the charm. - ch, force, err := service.Charm() - c.Assert(err, jc.ErrorIsNil) - c.Assert(ch.URL().String(), gc.Equals, "cs:precise/wordpress-3") - c.Assert(force, jc.IsTrue) - - // Check the minimum number of units. - c.Assert(service.MinUnits(), gc.Equals, minUnits) - - // Check the settings: also ensure the YAML settings take precedence - // over strings ones. - expectedSettings := charm.Settings{"blog-title": "yaml-title"} - obtainedSettings, err := service.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(obtainedSettings, gc.DeepEquals, expectedSettings) - - // Check the constraints. - obtainedConstraints, err := service.Constraints() - c.Assert(err, jc.ErrorIsNil) - c.Assert(obtainedConstraints, gc.DeepEquals, cons) -} - -func (s *clientSuite) TestClientServiceUpdateNoParams(c *gc.C) { - s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) - - // Calling ServiceUpdate with no parameters set is a no-op. - args := params.ServiceUpdate{ServiceName: "wordpress"} - err := s.APIState.Client().ServiceUpdate(args) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *clientSuite) TestClientServiceUpdateNoService(c *gc.C) { - err := s.APIState.Client().ServiceUpdate(params.ServiceUpdate{}) - c.Assert(err, gc.ErrorMatches, `"" is not a valid service name`) -} - -func (s *clientSuite) TestClientServiceUpdateInvalidService(c *gc.C) { - args := params.ServiceUpdate{ServiceName: "no-such-service"} - err := s.APIState.Client().ServiceUpdate(args) - c.Assert(err, gc.ErrorMatches, `service "no-such-service" not found`) -} - -func (s *clientRepoSuite) TestClientServiceSetCharm(c *gc.C) { - curl, _ := s.UploadCharm(c, "precise/dummy-0", "dummy") - err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) - c.Assert(err, jc.ErrorIsNil) - err = s.APIState.Client().ServiceDeploy( - curl.String(), "service", 3, "", constraints.Value{}, "", - ) - c.Assert(err, jc.ErrorIsNil) - s.UploadCharm(c, "precise/wordpress-3", "wordpress") - err = s.APIState.Client().ServiceSetCharm( - "service", "cs:precise/wordpress-3", false, - ) - c.Assert(err, jc.ErrorIsNil) - - // Ensure that the charm is not marked as forced. - service, err := s.State.Service("service") - c.Assert(err, jc.ErrorIsNil) - charm, force, err := service.Charm() - c.Assert(err, jc.ErrorIsNil) - c.Assert(charm.URL().String(), gc.Equals, "cs:precise/wordpress-3") - c.Assert(force, jc.IsFalse) -} - -func (s *clientRepoSuite) setupServiceSetCharm(c *gc.C) { - curl, _ := s.UploadCharm(c, "precise/dummy-0", "dummy") - err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) - c.Assert(err, jc.ErrorIsNil) - err = s.APIState.Client().ServiceDeploy( - curl.String(), "service", 3, "", constraints.Value{}, "", - ) - c.Assert(err, jc.ErrorIsNil) - s.UploadCharm(c, "precise/wordpress-3", "wordpress") -} - -func (s *clientRepoSuite) assertServiceSetCharm(c *gc.C, force bool) { - err := s.APIState.Client().ServiceSetCharm( - "service", "cs:precise/wordpress-3", force, - ) - c.Assert(err, jc.ErrorIsNil) - // Ensure that the charm is not marked as forced. - service, err := s.State.Service("service") - c.Assert(err, jc.ErrorIsNil) - charm, _, err := service.Charm() - c.Assert(err, jc.ErrorIsNil) - c.Assert(charm.URL().String(), gc.Equals, "cs:precise/wordpress-3") -} - -func (s *clientRepoSuite) assertServiceSetCharmBlocked(c *gc.C, force bool, msg string) { - err := s.APIState.Client().ServiceSetCharm( - "service", "cs:precise/wordpress-3", force, - ) - s.AssertBlocked(c, err, msg) -} - -func (s *clientRepoSuite) TestBlockDestroyServiceSetCharm(c *gc.C) { - s.setupServiceSetCharm(c) - s.BlockDestroyEnvironment(c, "TestBlockDestroyServiceSetCharm") - s.assertServiceSetCharm(c, false) -} - -func (s *clientRepoSuite) TestBlockRemoveServiceSetCharm(c *gc.C) { - s.setupServiceSetCharm(c) - s.BlockRemoveObject(c, "TestBlockRemoveServiceSetCharm") - s.assertServiceSetCharm(c, false) -} - -func (s *clientRepoSuite) TestBlockChangesServiceSetCharm(c *gc.C) { - s.setupServiceSetCharm(c) - s.BlockAllChanges(c, "TestBlockChangesServiceSetCharm") - s.assertServiceSetCharmBlocked(c, false, "TestBlockChangesServiceSetCharm") -} - -func (s *clientRepoSuite) TestClientServiceSetCharmForce(c *gc.C) { - curl, _ := s.UploadCharm(c, "precise/dummy-0", "dummy") - err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) - c.Assert(err, jc.ErrorIsNil) - err = s.APIState.Client().ServiceDeploy( - curl.String(), "service", 3, "", constraints.Value{}, "", - ) - c.Assert(err, jc.ErrorIsNil) - s.UploadCharm(c, "precise/wordpress-3", "wordpress") - err = s.APIState.Client().ServiceSetCharm( - "service", "cs:precise/wordpress-3", true, - ) - c.Assert(err, jc.ErrorIsNil) - - // Ensure that the charm is marked as forced. - service, err := s.State.Service("service") - c.Assert(err, jc.ErrorIsNil) - charm, force, err := service.Charm() - c.Assert(err, jc.ErrorIsNil) - c.Assert(charm.URL().String(), gc.Equals, "cs:precise/wordpress-3") - c.Assert(force, jc.IsTrue) -} - -func (s *clientRepoSuite) TestBlockServiceSetCharmForce(c *gc.C) { - s.setupServiceSetCharm(c) - - // block all changes - s.BlockAllChanges(c, "TestBlockServiceSetCharmForce") - s.BlockRemoveObject(c, "TestBlockServiceSetCharmForce") - s.BlockDestroyEnvironment(c, "TestBlockServiceSetCharmForce") - - s.assertServiceSetCharm(c, true) -} - -func (s *clientSuite) TestClientServiceSetCharmInvalidService(c *gc.C) { - err := s.APIState.Client().ServiceSetCharm( - "badservice", "cs:precise/wordpress-3", true, - ) - c.Assert(err, gc.ErrorMatches, `service "badservice" not found`) -} - -func (s *clientRepoSuite) TestClientServiceSetCharmErrors(c *gc.C) { - s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) - for url, expect := range map[string]string{ - // TODO(fwereade,Makyo) make these errors consistent one day. - "wordpress": "charm url series is not resolved", - "cs:wordpress": "charm url series is not resolved", - "cs:precise/wordpress": "charm url must include revision", - "cs:precise/wordpress-999999": `cannot retrieve charm "cs:precise/wordpress-999999": charm not found`, - } { - c.Logf("test %s", url) - err := s.APIState.Client().ServiceSetCharm( - "wordpress", url, false, - ) - c.Check(err, gc.ErrorMatches, expect) - } -} - -func (s *clientSuite) checkEndpoints(c *gc.C, endpoints map[string]charm.Relation) { - c.Assert(endpoints["wordpress"], gc.DeepEquals, charm.Relation{ - Name: "db", - Role: charm.RelationRole("requirer"), - Interface: "mysql", - Optional: false, - Limit: 1, - Scope: charm.RelationScope("global"), - }) - c.Assert(endpoints["mysql"], gc.DeepEquals, charm.Relation{ - Name: "server", - Role: charm.RelationRole("provider"), - Interface: "mysql", - Optional: false, - Limit: 0, - Scope: charm.RelationScope("global"), - }) -} - -func (s *clientSuite) assertAddRelation(c *gc.C, endpoints []string) { - s.setUpScenario(c) - res, err := s.APIState.Client().AddRelation(endpoints...) - c.Assert(err, jc.ErrorIsNil) - s.checkEndpoints(c, res.Endpoints) - // Show that the relation was added. - wpSvc, err := s.State.Service("wordpress") - c.Assert(err, jc.ErrorIsNil) - rels, err := wpSvc.Relations() - // There are 2 relations - the logging-wordpress one set up in the - // scenario and the one created in this test. - c.Assert(len(rels), gc.Equals, 2) - mySvc, err := s.State.Service("mysql") - c.Assert(err, jc.ErrorIsNil) - rels, err = mySvc.Relations() - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(rels), gc.Equals, 1) -} - -func (s *clientSuite) TestSuccessfullyAddRelation(c *gc.C) { - endpoints := []string{"wordpress", "mysql"} - s.assertAddRelation(c, endpoints) -} - -func (s *clientSuite) TestBlockDestroyAddRelation(c *gc.C) { - s.BlockDestroyEnvironment(c, "TestBlockDestroyAddRelation") - s.assertAddRelation(c, []string{"wordpress", "mysql"}) -} -func (s *clientSuite) TestBlockRemoveAddRelation(c *gc.C) { - s.BlockRemoveObject(c, "TestBlockRemoveAddRelation") - s.assertAddRelation(c, []string{"wordpress", "mysql"}) -} - -func (s *clientSuite) TestBlockChangesAddRelation(c *gc.C) { - s.setUpScenario(c) - s.BlockAllChanges(c, "TestBlockChangesAddRelation") - _, err := s.APIState.Client().AddRelation([]string{"wordpress", "mysql"}...) - s.AssertBlocked(c, err, "TestBlockChangesAddRelation") -} - -func (s *clientSuite) TestSuccessfullyAddRelationSwapped(c *gc.C) { - // Show that the order of the services listed in the AddRelation call - // does not matter. This is a repeat of the previous test with the service - // names swapped. - endpoints := []string{"mysql", "wordpress"} - s.assertAddRelation(c, endpoints) -} - -func (s *clientSuite) TestCallWithOnlyOneEndpoint(c *gc.C) { - s.setUpScenario(c) - endpoints := []string{"wordpress"} - _, err := s.APIState.Client().AddRelation(endpoints...) - c.Assert(err, gc.ErrorMatches, "no relations found") -} - -func (s *clientSuite) TestCallWithOneEndpointTooMany(c *gc.C) { - s.setUpScenario(c) - endpoints := []string{"wordpress", "mysql", "logging"} - _, err := s.APIState.Client().AddRelation(endpoints...) - c.Assert(err, gc.ErrorMatches, "cannot relate 3 endpoints") -} - -func (s *clientSuite) TestAddAlreadyAddedRelation(c *gc.C) { - s.setUpScenario(c) - // Add a relation between wordpress and mysql. - endpoints := []string{"wordpress", "mysql"} - eps, err := s.State.InferEndpoints(endpoints...) - c.Assert(err, jc.ErrorIsNil) - _, err = s.State.AddRelation(eps...) - c.Assert(err, jc.ErrorIsNil) - // And try to add it again. - _, err = s.APIState.Client().AddRelation(endpoints...) - c.Assert(err, gc.ErrorMatches, `cannot add relation "wordpress:db mysql:server": relation already exists`) -} - -func (s *clientSuite) setupRelationScenario(c *gc.C, endpoints []string) *state.Relation { - s.setUpScenario(c) - // Add a relation between the endpoints. - eps, err := s.State.InferEndpoints(endpoints...) - c.Assert(err, jc.ErrorIsNil) - relation, err := s.State.AddRelation(eps...) - c.Assert(err, jc.ErrorIsNil) - return relation -} - -func (s *clientSuite) assertDestroyRelation(c *gc.C, endpoints []string) { - s.assertDestroyRelationSuccess( - c, - s.setupRelationScenario(c, endpoints), - endpoints) -} - -func (s *clientSuite) assertDestroyRelationSuccess(c *gc.C, relation *state.Relation, endpoints []string) { - err := s.APIState.Client().DestroyRelation(endpoints...) - c.Assert(err, jc.ErrorIsNil) - // Show that the relation was removed. - c.Assert(relation.Refresh(), jc.Satisfies, errors.IsNotFound) -} - -func (s *clientSuite) TestSuccessfulDestroyRelation(c *gc.C) { - endpoints := []string{"wordpress", "mysql"} - s.assertDestroyRelation(c, endpoints) -} - -func (s *clientSuite) TestSuccessfullyDestroyRelationSwapped(c *gc.C) { - // Show that the order of the services listed in the DestroyRelation call - // does not matter. This is a repeat of the previous test with the service - // names swapped. - endpoints := []string{"mysql", "wordpress"} - s.assertDestroyRelation(c, endpoints) -} - -func (s *clientSuite) TestNoRelation(c *gc.C) { - s.setUpScenario(c) - endpoints := []string{"wordpress", "mysql"} - err := s.APIState.Client().DestroyRelation(endpoints...) - c.Assert(err, gc.ErrorMatches, `relation "wordpress:db mysql:server" not found`) -} - -func (s *clientSuite) TestAttemptDestroyingNonExistentRelation(c *gc.C) { - s.setUpScenario(c) - s.AddTestingService(c, "riak", s.AddTestingCharm(c, "riak")) - endpoints := []string{"riak", "wordpress"} - err := s.APIState.Client().DestroyRelation(endpoints...) - c.Assert(err, gc.ErrorMatches, "no relations found") -} - -func (s *clientSuite) TestAttemptDestroyingWithOnlyOneEndpoint(c *gc.C) { - s.setUpScenario(c) - endpoints := []string{"wordpress"} - err := s.APIState.Client().DestroyRelation(endpoints...) - c.Assert(err, gc.ErrorMatches, "no relations found") -} - -func (s *clientSuite) TestAttemptDestroyingPeerRelation(c *gc.C) { - s.setUpScenario(c) - s.AddTestingService(c, "riak", s.AddTestingCharm(c, "riak")) - - endpoints := []string{"riak:ring"} - err := s.APIState.Client().DestroyRelation(endpoints...) - c.Assert(err, gc.ErrorMatches, `cannot destroy relation "riak:ring": is a peer relation`) -} - -func (s *clientSuite) TestAttemptDestroyingAlreadyDestroyedRelation(c *gc.C) { - s.setUpScenario(c) - - // Add a relation between wordpress and mysql. - eps, err := s.State.InferEndpoints("wordpress", "mysql") - c.Assert(err, jc.ErrorIsNil) - rel, err := s.State.AddRelation(eps...) - c.Assert(err, jc.ErrorIsNil) - - endpoints := []string{"wordpress", "mysql"} - err = s.APIState.Client().DestroyRelation(endpoints...) - // Show that the relation was removed. - c.Assert(rel.Refresh(), jc.Satisfies, errors.IsNotFound) - - // And try to destroy it again. - err = s.APIState.Client().DestroyRelation(endpoints...) - c.Assert(err, gc.ErrorMatches, `relation "wordpress:db mysql:server" not found`) -} - func (s *clientSuite) TestClientWatchAll(c *gc.C) { // A very simple end-to-end test, because // all the logic is tested elsewhere. - m, err := s.State.AddMachine("quantal", state.JobManageEnviron) + m, err := s.State.AddMachine("quantal", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) err = m.SetProvisioned("i-0", agent.BootstrapNonce, nil) c.Assert(err, jc.ErrorIsNil) @@ -2447,14 +849,14 @@ c.Assert(err, jc.ErrorIsNil) if !c.Check(deltas, gc.DeepEquals, []multiwatcher.Delta{{ Entity: &multiwatcher.MachineInfo{ - EnvUUID: s.State.EnvironUUID(), + ModelUUID: s.State.ModelUUID(), Id: m.Id(), InstanceId: "i-0", Status: multiwatcher.Status("pending"), StatusData: map[string]interface{}{}, Life: multiwatcher.Life("alive"), Series: "quantal", - Jobs: []multiwatcher.MachineJob{state.JobManageEnviron.ToParams()}, + Jobs: []multiwatcher.MachineJob{state.JobManageModel.ToParams()}, Addresses: []network.Address{}, HardwareCharacteristics: &instance.HardwareCharacteristics{}, HasVote: false, @@ -2468,147 +870,65 @@ } } -func (s *clientSuite) TestClientSetServiceConstraints(c *gc.C) { - service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - - // Update constraints for the service. - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") - c.Assert(err, jc.ErrorIsNil) - err = s.APIState.Client().SetServiceConstraints("dummy", cons) - c.Assert(err, jc.ErrorIsNil) - - // Ensure the constraints have been correctly updated. - obtained, err := service.Constraints() - c.Assert(err, jc.ErrorIsNil) - c.Assert(obtained, gc.DeepEquals, cons) -} - -func (s *clientSuite) setupSetServiceConstraints(c *gc.C) (*state.Service, constraints.Value) { - service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - // Update constraints for the service. - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") - c.Assert(err, jc.ErrorIsNil) - return service, cons -} - -func (s *clientSuite) assertSetServiceConstraints(c *gc.C, service *state.Service, cons constraints.Value) { - err := s.APIState.Client().SetServiceConstraints("dummy", cons) - c.Assert(err, jc.ErrorIsNil) - // Ensure the constraints have been correctly updated. - obtained, err := service.Constraints() - c.Assert(err, jc.ErrorIsNil) - c.Assert(obtained, gc.DeepEquals, cons) -} - -func (s *clientSuite) assertSetServiceConstraintsBlocked(c *gc.C, msg string, service *state.Service, cons constraints.Value) { - err := s.APIState.Client().SetServiceConstraints("dummy", cons) - s.AssertBlocked(c, err, msg) -} - -func (s *clientSuite) TestBlockDestroySetServiceConstraints(c *gc.C) { - svc, cons := s.setupSetServiceConstraints(c) - s.BlockDestroyEnvironment(c, "TestBlockDestroySetServiceConstraints") - s.assertSetServiceConstraints(c, svc, cons) -} - -func (s *clientSuite) TestBlockRemoveSetServiceConstraints(c *gc.C) { - svc, cons := s.setupSetServiceConstraints(c) - s.BlockRemoveObject(c, "TestBlockRemoveSetServiceConstraints") - s.assertSetServiceConstraints(c, svc, cons) -} - -func (s *clientSuite) TestBlockChangesSetServiceConstraints(c *gc.C) { - svc, cons := s.setupSetServiceConstraints(c) - s.BlockAllChanges(c, "TestBlockChangesSetServiceConstraints") - s.assertSetServiceConstraintsBlocked(c, "TestBlockChangesSetServiceConstraints", svc, cons) -} - -func (s *clientSuite) TestClientGetServiceConstraints(c *gc.C) { - service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) - - // Set constraints for the service. - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") - c.Assert(err, jc.ErrorIsNil) - err = service.SetConstraints(cons) - c.Assert(err, jc.ErrorIsNil) - - // Check we can get the constraints. - obtained, err := s.APIState.Client().GetServiceConstraints("dummy") - c.Assert(err, jc.ErrorIsNil) - c.Assert(obtained, gc.DeepEquals, cons) -} - -func (s *clientSuite) TestClientSetEnvironmentConstraints(c *gc.C) { - // Set constraints for the environment. - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") - c.Assert(err, jc.ErrorIsNil) - err = s.APIState.Client().SetEnvironmentConstraints(cons) - c.Assert(err, jc.ErrorIsNil) - - // Ensure the constraints have been correctly updated. - obtained, err := s.State.EnvironConstraints() - c.Assert(err, jc.ErrorIsNil) - c.Assert(obtained, gc.DeepEquals, cons) -} - -func (s *clientSuite) assertSetEnvironmentConstraints(c *gc.C) { - // Set constraints for the environment. - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") - c.Assert(err, jc.ErrorIsNil) - err = s.APIState.Client().SetEnvironmentConstraints(cons) - c.Assert(err, jc.ErrorIsNil) - // Ensure the constraints have been correctly updated. - obtained, err := s.State.EnvironConstraints() - c.Assert(err, jc.ErrorIsNil) - c.Assert(obtained, gc.DeepEquals, cons) -} - -func (s *clientSuite) assertSetEnvironmentConstraintsBlocked(c *gc.C, msg string) { - // Set constraints for the environment. - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") - c.Assert(err, jc.ErrorIsNil) - err = s.APIState.Client().SetEnvironmentConstraints(cons) - s.AssertBlocked(c, err, msg) -} - -func (s *clientSuite) TestBlockDestroyClientSetEnvironmentConstraints(c *gc.C) { - s.BlockDestroyEnvironment(c, "TestBlockDestroyClientSetEnvironmentConstraints") - s.assertSetEnvironmentConstraints(c) -} - -func (s *clientSuite) TestBlockRemoveClientSetEnvironmentConstraints(c *gc.C) { - s.BlockRemoveObject(c, "TestBlockRemoveClientSetEnvironmentConstraints") - s.assertSetEnvironmentConstraints(c) -} - -func (s *clientSuite) TestBlockChangesClientSetEnvironmentConstraints(c *gc.C) { - s.BlockAllChanges(c, "TestBlockChangesClientSetEnvironmentConstraints") - s.assertSetEnvironmentConstraintsBlocked(c, "TestBlockChangesClientSetEnvironmentConstraints") -} - -func (s *clientSuite) TestClientGetEnvironmentConstraints(c *gc.C) { - // Set constraints for the environment. - cons, err := constraints.Parse("mem=4096", "cpu-cores=2") - c.Assert(err, jc.ErrorIsNil) - err = s.State.SetEnvironConstraints(cons) - c.Assert(err, jc.ErrorIsNil) - - // Check we can get the constraints. - obtained, err := s.APIState.Client().GetEnvironmentConstraints() - c.Assert(err, jc.ErrorIsNil) - c.Assert(obtained, gc.DeepEquals, cons) -} - -func (s *clientSuite) TestClientServiceCharmRelations(c *gc.C) { - s.setUpScenario(c) - _, err := s.APIState.Client().ServiceCharmRelations("blah") - c.Assert(err, gc.ErrorMatches, `service "blah" not found`) - - relations, err := s.APIState.Client().ServiceCharmRelations("wordpress") - c.Assert(err, jc.ErrorIsNil) - c.Assert(relations, gc.DeepEquals, []string{ - "cache", "db", "juju-info", "logging-dir", "monitoring-port", "url", - }) +func (s *clientSuite) TestClientSetModelConstraints(c *gc.C) { + // Set constraints for the model. + cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + c.Assert(err, jc.ErrorIsNil) + err = s.APIState.Client().SetModelConstraints(cons) + c.Assert(err, jc.ErrorIsNil) + + // Ensure the constraints have been correctly updated. + obtained, err := s.State.ModelConstraints() + c.Assert(err, jc.ErrorIsNil) + c.Assert(obtained, gc.DeepEquals, cons) +} + +func (s *clientSuite) assertSetModelConstraints(c *gc.C) { + // Set constraints for the model. + cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + c.Assert(err, jc.ErrorIsNil) + err = s.APIState.Client().SetModelConstraints(cons) + c.Assert(err, jc.ErrorIsNil) + // Ensure the constraints have been correctly updated. + obtained, err := s.State.ModelConstraints() + c.Assert(err, jc.ErrorIsNil) + c.Assert(obtained, gc.DeepEquals, cons) +} + +func (s *clientSuite) assertSetModelConstraintsBlocked(c *gc.C, msg string) { + // Set constraints for the model. + cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + c.Assert(err, jc.ErrorIsNil) + err = s.APIState.Client().SetModelConstraints(cons) + s.AssertBlocked(c, err, msg) +} + +func (s *clientSuite) TestBlockDestroyClientSetModelConstraints(c *gc.C) { + s.BlockDestroyModel(c, "TestBlockDestroyClientSetModelConstraints") + s.assertSetModelConstraints(c) +} + +func (s *clientSuite) TestBlockRemoveClientSetModelConstraints(c *gc.C) { + s.BlockRemoveObject(c, "TestBlockRemoveClientSetModelConstraints") + s.assertSetModelConstraints(c) +} + +func (s *clientSuite) TestBlockChangesClientSetModelConstraints(c *gc.C) { + s.BlockAllChanges(c, "TestBlockChangesClientSetModelConstraints") + s.assertSetModelConstraintsBlocked(c, "TestBlockChangesClientSetModelConstraints") +} + +func (s *clientSuite) TestClientGetModelConstraints(c *gc.C) { + // Set constraints for the model. + cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + c.Assert(err, jc.ErrorIsNil) + err = s.State.SetModelConstraints(cons) + c.Assert(err, jc.ErrorIsNil) + + // Check we can get the constraints. + obtained, err := s.APIState.Client().GetModelConstraints() + c.Assert(err, jc.ErrorIsNil) + c.Assert(obtained, gc.DeepEquals, cons) } func (s *clientSuite) TestClientPublicAddressErrors(c *gc.C) { @@ -2623,7 +943,7 @@ func (s *clientSuite) TestClientPublicAddressMachine(c *gc.C) { s.setUpScenario(c) - network.ResetGlobalPreferIPv6() + network.SetPreferIPv6(false) // Internally, network.SelectPublicAddress is used; the "most public" // address is returned. @@ -2666,7 +986,7 @@ func (s *clientSuite) TestClientPrivateAddress(c *gc.C) { s.setUpScenario(c) - network.ResetGlobalPreferIPv6() + network.SetPreferIPv6(false) // Internally, network.SelectInternalAddress is used; the public // address if no cloud-local one is available. @@ -2697,16 +1017,16 @@ c.Assert(addr, gc.Equals, "private") } -func (s *serverSuite) TestClientEnvironmentGet(c *gc.C) { - envConfig, err := s.State.EnvironConfig() +func (s *serverSuite) TestClientModelGet(c *gc.C) { + envConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) - result, err := s.client.EnvironmentGet() + result, err := s.client.ModelGet() c.Assert(err, jc.ErrorIsNil) c.Assert(result.Config, gc.DeepEquals, envConfig.AllAttrs()) } func (s *serverSuite) assertEnvValue(c *gc.C, key string, expected interface{}) { - envConfig, err := s.State.EnvironConfig() + envConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) value, found := envConfig.AllAttrs()[key] c.Assert(found, jc.IsTrue) @@ -2714,133 +1034,133 @@ } func (s *serverSuite) assertEnvValueMissing(c *gc.C, key string) { - envConfig, err := s.State.EnvironConfig() + envConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) _, found := envConfig.AllAttrs()[key] c.Assert(found, jc.IsFalse) } -func (s *serverSuite) TestClientEnvironmentSet(c *gc.C) { - envConfig, err := s.State.EnvironConfig() +func (s *serverSuite) TestClientModelSet(c *gc.C) { + envConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) _, found := envConfig.AllAttrs()["some-key"] c.Assert(found, jc.IsFalse) - params := params.EnvironmentSet{ + params := params.ModelSet{ Config: map[string]interface{}{ "some-key": "value", "other-key": "other value"}, } - err = s.client.EnvironmentSet(params) + err = s.client.ModelSet(params) c.Assert(err, jc.ErrorIsNil) s.assertEnvValue(c, "some-key", "value") s.assertEnvValue(c, "other-key", "other value") } -func (s *serverSuite) TestClientEnvironmentSetImmutable(c *gc.C) { +func (s *serverSuite) TestClientModelSetImmutable(c *gc.C) { // The various immutable config values are tested in // environs/config/config_test.go, so just choosing one here. - params := params.EnvironmentSet{ + params := params.ModelSet{ Config: map[string]interface{}{"state-port": "1"}, } - err := s.client.EnvironmentSet(params) + err := s.client.ModelSet(params) c.Check(err, gc.ErrorMatches, `cannot change state-port from .* to 1`) } -func (s *serverSuite) assertEnvironmentSetBlocked(c *gc.C, args map[string]interface{}, msg string) { - err := s.client.EnvironmentSet(params.EnvironmentSet{args}) +func (s *serverSuite) assertModelSetBlocked(c *gc.C, args map[string]interface{}, msg string) { + err := s.client.ModelSet(params.ModelSet{args}) s.AssertBlocked(c, err, msg) } -func (s *serverSuite) TestBlockChangesClientEnvironmentSet(c *gc.C) { - s.BlockAllChanges(c, "TestBlockChangesClientEnvironmentSet") +func (s *serverSuite) TestBlockChangesClientModelSet(c *gc.C) { + s.BlockAllChanges(c, "TestBlockChangesClientModelSet") args := map[string]interface{}{"some-key": "value"} - s.assertEnvironmentSetBlocked(c, args, "TestBlockChangesClientEnvironmentSet") + s.assertModelSetBlocked(c, args, "TestBlockChangesClientModelSet") } -func (s *serverSuite) TestClientEnvironmentSetDeprecated(c *gc.C) { - envConfig, err := s.State.EnvironConfig() +func (s *serverSuite) TestClientModelSetDeprecated(c *gc.C) { + envConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) url := envConfig.AllAttrs()["agent-metadata-url"] c.Assert(url, gc.Equals, "") - args := params.EnvironmentSet{ + args := params.ModelSet{ Config: map[string]interface{}{"tools-metadata-url": "value"}, } - err = s.client.EnvironmentSet(args) + err = s.client.ModelSet(args) c.Assert(err, jc.ErrorIsNil) s.assertEnvValue(c, "agent-metadata-url", "value") s.assertEnvValue(c, "tools-metadata-url", "value") } -func (s *serverSuite) TestClientEnvironmentSetCannotChangeAgentVersion(c *gc.C) { - args := params.EnvironmentSet{ +func (s *serverSuite) TestClientModelSetCannotChangeAgentVersion(c *gc.C) { + args := params.ModelSet{ map[string]interface{}{"agent-version": "9.9.9"}, } - err := s.client.EnvironmentSet(args) + err := s.client.ModelSet(args) c.Assert(err, gc.ErrorMatches, "agent-version cannot be changed") // It's okay to pass env back with the same agent-version. - result, err := s.client.EnvironmentGet() + result, err := s.client.ModelGet() c.Assert(err, jc.ErrorIsNil) c.Assert(result.Config["agent-version"], gc.NotNil) args.Config["agent-version"] = result.Config["agent-version"] - err = s.client.EnvironmentSet(args) + err = s.client.ModelSet(args) c.Assert(err, jc.ErrorIsNil) } -func (s *serverSuite) TestClientEnvironmentUnset(c *gc.C) { - err := s.State.UpdateEnvironConfig(map[string]interface{}{"abc": 123}, nil, nil) +func (s *serverSuite) TestClientModelUnset(c *gc.C) { + err := s.State.UpdateModelConfig(map[string]interface{}{"abc": 123}, nil, nil) c.Assert(err, jc.ErrorIsNil) - args := params.EnvironmentUnset{[]string{"abc"}} - err = s.client.EnvironmentUnset(args) + args := params.ModelUnset{[]string{"abc"}} + err = s.client.ModelUnset(args) c.Assert(err, jc.ErrorIsNil) s.assertEnvValueMissing(c, "abc") } -func (s *serverSuite) TestBlockClientEnvironmentUnset(c *gc.C) { - err := s.State.UpdateEnvironConfig(map[string]interface{}{"abc": 123}, nil, nil) +func (s *serverSuite) TestBlockClientModelUnset(c *gc.C) { + err := s.State.UpdateModelConfig(map[string]interface{}{"abc": 123}, nil, nil) c.Assert(err, jc.ErrorIsNil) - s.BlockAllChanges(c, "TestBlockClientEnvironmentUnset") + s.BlockAllChanges(c, "TestBlockClientModelUnset") - args := params.EnvironmentUnset{[]string{"abc"}} - err = s.client.EnvironmentUnset(args) - s.AssertBlocked(c, err, "TestBlockClientEnvironmentUnset") + args := params.ModelUnset{[]string{"abc"}} + err = s.client.ModelUnset(args) + s.AssertBlocked(c, err, "TestBlockClientModelUnset") } -func (s *serverSuite) TestClientEnvironmentUnsetMissing(c *gc.C) { +func (s *serverSuite) TestClientModelUnsetMissing(c *gc.C) { // It's okay to unset a non-existent attribute. - args := params.EnvironmentUnset{[]string{"not_there"}} - err := s.client.EnvironmentUnset(args) + args := params.ModelUnset{[]string{"not_there"}} + err := s.client.ModelUnset(args) c.Assert(err, jc.ErrorIsNil) } -func (s *serverSuite) TestClientEnvironmentUnsetError(c *gc.C) { - err := s.State.UpdateEnvironConfig(map[string]interface{}{"abc": 123}, nil, nil) +func (s *serverSuite) TestClientModelUnsetError(c *gc.C) { + err := s.State.UpdateModelConfig(map[string]interface{}{"abc": 123}, nil, nil) c.Assert(err, jc.ErrorIsNil) // "type" may not be removed, and this will cause an error. // If any one attribute's removal causes an error, there // should be no change. - args := params.EnvironmentUnset{[]string{"abc", "type"}} - err = s.client.EnvironmentUnset(args) + args := params.ModelUnset{[]string{"abc", "type"}} + err = s.client.ModelUnset(args) c.Assert(err, gc.ErrorMatches, "type: expected string, got nothing") s.assertEnvValue(c, "abc", 123) } func (s *clientSuite) TestClientFindTools(c *gc.C) { - result, err := s.APIState.Client().FindTools(2, -1, "", "") + result, err := s.APIState.Client().FindTools(99, -1, "", "") c.Assert(err, jc.ErrorIsNil) c.Assert(result.Error, jc.Satisfies, params.IsCodeNotFound) - toolstesting.UploadToStorage(c, s.DefaultToolsStorage, "released", version.MustParseBinary("2.12.0-precise-amd64")) - result, err = s.APIState.Client().FindTools(2, 12, "precise", "amd64") + toolstesting.UploadToStorage(c, s.DefaultToolsStorage, "released", version.MustParseBinary("2.99.0-precise-amd64")) + result, err = s.APIState.Client().FindTools(2, 99, "precise", "amd64") c.Assert(err, jc.ErrorIsNil) c.Assert(result.Error, gc.IsNil) c.Assert(result.List, gc.HasLen, 1) - c.Assert(result.List[0].Version, gc.Equals, version.MustParseBinary("2.12.0-precise-amd64")) - url := fmt.Sprintf("https://%s/environment/%s/tools/%s", - s.APIState.Addr(), coretesting.EnvironmentTag.Id(), result.List[0].Version) + c.Assert(result.List[0].Version, gc.Equals, version.MustParseBinary("2.99.0-precise-amd64")) + url := fmt.Sprintf("https://%s/model/%s/tools/%s", + s.APIState.Addr(), coretesting.ModelTag.Id(), result.List[0].Version) c.Assert(result.List[0].URL, gc.Equals, url) } @@ -2899,7 +1219,7 @@ } func (s *clientSuite) TestBlockDestroyClientAddMachinesDefaultSeries(c *gc.C) { - s.BlockDestroyEnvironment(c, "TestBlockDestroyClientAddMachinesDefaultSeries") + s.BlockDestroyModel(c, "TestBlockDestroyClientAddMachinesDefaultSeries") s.assertAddMachines(c) } @@ -2948,7 +1268,7 @@ // updateConfig sets config variable with given key to a given value // Asserts that no errors were encountered. func (s *baseSuite) updateConfig(c *gc.C, key string, block bool) { - err := s.State.UpdateEnvironConfig(map[string]interface{}{key: block}, nil, nil) + err := s.State.UpdateModelConfig(map[string]interface{}{key: block}, nil, nil) c.Assert(err, jc.ErrorIsNil) } @@ -2980,8 +1300,8 @@ apiParams[0].Placement = instance.MustParsePlacement("lxc") apiParams[1].Placement = instance.MustParsePlacement("lxc:0") apiParams[1].ContainerType = instance.LXC - apiParams[2].Placement = instance.MustParsePlacement("dummyenv:invalid") - apiParams[3].Placement = instance.MustParsePlacement("dummyenv:valid") + apiParams[2].Placement = instance.MustParsePlacement("dummymodel:invalid") + apiParams[3].Placement = instance.MustParsePlacement("dummymodel:valid") machines, err := s.APIState.Client().AddMachines(apiParams) c.Assert(err, jc.ErrorIsNil) c.Assert(len(machines), gc.Equals, 4) @@ -2995,33 +1315,6 @@ c.Assert(m.Placement(), gc.DeepEquals, apiParams[3].Placement.Directive) } -func (s *clientSuite) TestClientAddMachines1dot18(c *gc.C) { - apiParams := make([]params.AddMachineParams, 2) - for i := range apiParams { - apiParams[i] = params.AddMachineParams{ - Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, - } - } - apiParams[1].ContainerType = instance.LXC - apiParams[1].ParentId = "0" - machines, err := s.APIState.Client().AddMachines1dot18(apiParams) - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(machines), gc.Equals, 2) - c.Assert(machines[0].Machine, gc.Equals, "0") - c.Assert(machines[1].Machine, gc.Equals, "0/lxc/0") -} - -func (s *clientSuite) TestClientAddMachines1dot18SomeErrors(c *gc.C) { - apiParams := []params.AddMachineParams{{ - Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, - ParentId: "123", - }} - machines, err := s.APIState.Client().AddMachines1dot18(apiParams) - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(machines), gc.Equals, 1) - c.Check(machines[0].Error, gc.ErrorMatches, "parent machine specified without container type") -} - func (s *clientSuite) TestClientAddMachinesSomeErrors(c *gc.C) { // Here we check that adding a number of containers correctly handles the // case that some adds succeed and others fail and report the errors @@ -3044,7 +1337,7 @@ Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, } } - // This will cause a machine add to fail due to an unsupported container. + // This will cause a add-machine to fail due to an unsupported container. apiParams[2].ContainerType = instance.KVM apiParams[2].ParentId = host.Id() machines, err := s.APIState.Client().AddMachines(apiParams) @@ -3072,7 +1365,7 @@ Addrs: params.FromNetworkAddresses(addrs), } } - // This will cause the last machine add to fail. + // This will cause the last add-machine to fail. apiParams[2].Nonce = "" machines, err := s.APIState.Client().AddMachines(apiParams) c.Assert(err, jc.ErrorIsNil) @@ -3116,7 +1409,7 @@ Nonce: "nonce", }}, } - err := s.APIState.APICall("Client", 0, "", "AddMachines", args, &results) + err := s.APIState.APICall("Client", 1, "", "AddMachines", args, &results) c.Assert(err, jc.ErrorIsNil) c.Assert(results.Machines, gc.HasLen, 1) } @@ -3181,7 +1474,7 @@ } setUpdateBehavior := func(update, upgrade bool) { - s.State.UpdateEnvironConfig( + s.State.UpdateModelConfig( map[string]interface{}{ "enable-os-upgrade": upgrade, "enable-os-refresh-update": update, @@ -3217,7 +1510,7 @@ c.Check(script, gc.Not(jc.Contains), "apt-get upgrade") // Test that in the abasence of a client-specified - // DisablePackageCommands we use what's set in environments.yaml. + // DisablePackageCommands we use what's set in environment config. provParams.DisablePackageCommands = false setUpdateBehavior(false, false) //provParams.UpdateBehavior = ¶ms.UpdateBehavior{false, false} @@ -3227,52 +1520,6 @@ c.Check(script, gc.Not(jc.Contains), "apt-get upgrade") } -type testModeCharmRepo struct { - *charmrepo.CharmStore - testMode bool -} - -// WithTestMode returns a repository Interface where test mode is enabled. -func (s *testModeCharmRepo) WithTestMode() charmrepo.Interface { - s.testMode = true - return s.CharmStore.WithTestMode() -} - -func (s *clientRepoSuite) TestClientSpecializeStoreOnDeployServiceSetCharmAndAddCharm(c *gc.C) { - repo := &testModeCharmRepo{} - s.PatchValue(&service.NewCharmStore, func(p charmrepo.NewCharmStoreParams) charmrepo.Interface { - p.URL = s.Srv.URL() - repo.CharmStore = charmrepo.NewCharmStore(p).(*charmrepo.CharmStore) - return repo - }) - attrs := map[string]interface{}{"test-mode": true} - err := s.State.UpdateEnvironConfig(attrs, nil, nil) - c.Assert(err, jc.ErrorIsNil) - - // Check that the store's test mode is enabled when calling ServiceDeploy. - curl, _ := s.UploadCharm(c, "trusty/dummy-1", "dummy") - err = service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) - c.Assert(err, jc.ErrorIsNil) - err = s.APIState.Client().ServiceDeploy( - curl.String(), "service", 3, "", constraints.Value{}, "", - ) - c.Assert(err, jc.ErrorIsNil) - c.Assert(repo.testMode, jc.IsTrue) - - // Check that the store's test mode is enabled when calling ServiceSetCharm. - curl, _ = s.UploadCharm(c, "trusty/wordpress-2", "wordpress") - err = s.APIState.Client().ServiceSetCharm( - "service", curl.String(), false, - ) - c.Assert(repo.testMode, jc.IsTrue) - - // Check that the store's test mode is enabled when calling AddCharm. - curl, _ = s.UploadCharm(c, "utopic/riak-42", "riak") - err = s.APIState.Client().AddCharm(curl) - c.Assert(err, jc.ErrorIsNil) - c.Assert(repo.testMode, jc.IsTrue) -} - var resolveCharmTests = []struct { about string url string @@ -3302,15 +1549,15 @@ }, { about: "fully qualified reference not found", url: "cs:utopic/riak-42", - resolveErr: `cannot resolve charm URL "cs:utopic/riak-42": charm not found`, + resolveErr: `cannot resolve URL "cs:utopic/riak-42": charm not found`, }, { about: "reference not found", url: "cs:no-such", - resolveErr: `cannot resolve charm URL "cs:no-such": charm not found`, + resolveErr: `cannot resolve URL "cs:no-such": charm or bundle not found`, }, { about: "invalid charm name", url: "cs:", - parseErr: `charm URL has invalid charm name: "cs:"`, + parseErr: `URL has invalid charm or bundle name: "cs:"`, }, { about: "local charm", url: "local:wordpress", @@ -3334,7 +1581,7 @@ c.Logf("test %d: %s", i, test.about) client := s.APIState.Client() - ref, err := charm.ParseReference(test.url) + ref, err := charm.ParseURL(test.url) if test.parseErr == "" { if !c.Check(err, jc.ErrorIsNil) { continue @@ -3396,7 +1643,7 @@ func (s *clientSuite) TestBlockDestroyRetryProvisioning(c *gc.C) { m := s.setupRetryProvisioning(c) - s.BlockDestroyEnvironment(c, "TestBlockDestroyRetryProvisioning") + s.BlockDestroyModel(c, "TestBlockDestroyRetryProvisioning") s.assertRetryProvisioning(c, m) } @@ -3443,30 +1690,15 @@ func (s *clientSuite) TestClientAgentVersion(c *gc.C) { current := version.MustParse("1.2.0") - s.PatchValue(&version.Current.Number, current) + s.PatchValue(&version.Current, current) result, err := s.APIState.Client().AgentVersion() c.Assert(err, jc.ErrorIsNil) c.Assert(result, gc.Equals, current) } -func (s *serverSuite) TestBlockServiceDestroy(c *gc.C) { - s.AddTestingService(c, "dummy-service", s.AddTestingCharm(c, "dummy")) - - // block remove-objects - s.BlockRemoveObject(c, "TestBlockServiceDestroy") - err := s.APIState.Client().ServiceDestroy("dummy-service") - s.AssertBlocked(c, err, "TestBlockServiceDestroy") - // Tests may have invalid service names. - service, err := s.State.Service("dummy-service") - if err == nil { - // For valid service names, check that service is alive :-) - assertLife(c, service, state.Alive) - } -} - func (s *clientSuite) assertDestroyMachineSuccess(c *gc.C, u *state.Unit, m0, m1, m2 *state.Machine) { err := s.APIState.Client().DestroyMachines("0", "1", "2") - c.Assert(err, gc.ErrorMatches, `some machines were not destroyed: machine 0 is required by the environment; machine 1 has unit "wordpress/0" assigned`) + c.Assert(err, gc.ErrorMatches, `some machines were not destroyed: machine 0 is required by the model; machine 1 has unit "wordpress/0" assigned`) assertLife(c, m0, state.Alive) assertLife(c, m1, state.Alive) assertLife(c, m2, state.Dying) @@ -3474,7 +1706,7 @@ err = u.UnassignFromMachine() c.Assert(err, jc.ErrorIsNil) err = s.APIState.Client().DestroyMachines("0", "1", "2") - c.Assert(err, gc.ErrorMatches, `some machines were not destroyed: machine 0 is required by the environment`) + c.Assert(err, gc.ErrorMatches, `some machines were not destroyed: machine 0 is required by the model`) assertLife(c, m0, state.Alive) assertLife(c, m1, state.Dying) assertLife(c, m2, state.Dying) @@ -3496,6 +1728,14 @@ assertLife(c, living4, state.Alive) } +func (s *clientSuite) AssertBlocked(c *gc.C, err error, msg string) { + c.Assert(params.IsCodeOperationBlocked(err), jc.IsTrue, gc.Commentf("error: %#v", err)) + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: msg, + Code: "operation is blocked", + }) +} + func (s *clientSuite) TestBlockRemoveDestroyMachines(c *gc.C) { m0, m1, m2, u := s.setupDestroyMachinesTest(c) s.BlockRemoveObject(c, "TestBlockRemoveDestroyMachines") @@ -3512,14 +1752,14 @@ func (s *clientSuite) TestBlockDestoryDestroyMachines(c *gc.C) { m0, m1, m2, u := s.setupDestroyMachinesTest(c) - s.BlockDestroyEnvironment(c, "TestBlockDestoryDestroyMachines") + s.BlockDestroyModel(c, "TestBlockDestoryDestroyMachines") s.assertDestroyMachineSuccess(c, u, m0, m1, m2) } func (s *clientSuite) TestAnyBlockForceDestroyMachines(c *gc.C) { // force bypasses all blocks s.BlockAllChanges(c, "TestAnyBlockForceDestroyMachines") - s.BlockDestroyEnvironment(c, "TestAnyBlockForceDestroyMachines") + s.BlockDestroyModel(c, "TestAnyBlockForceDestroyMachines") s.BlockRemoveObject(c, "TestAnyBlockForceDestroyMachines") s.assertForceDestroyMachines(c) } @@ -3528,7 +1768,7 @@ m0, m1, m2, u := s.setupDestroyMachinesTest(c) err := s.APIState.Client().ForceDestroyMachines("0", "1", "2") - c.Assert(err, gc.ErrorMatches, `some machines were not destroyed: machine 0 is required by the environment`) + c.Assert(err, gc.ErrorMatches, `some machines were not destroyed: machine is required by the model`) assertLife(c, m0, state.Alive) assertLife(c, m1, state.Alive) assertLife(c, m2, state.Alive) @@ -3542,201 +1782,14 @@ assertRemoved(c, u) } -func (s *clientSuite) assertDestroyPrincipalUnits(c *gc.C, units []*state.Unit) { - // Destroy 2 of them; check they become Dying. - err := s.APIState.Client().DestroyServiceUnits("wordpress/0", "wordpress/1") - c.Assert(err, jc.ErrorIsNil) - assertLife(c, units[0], state.Dying) - assertLife(c, units[1], state.Dying) - - // Try to destroy an Alive one and a Dying one; check - // it destroys the Alive one and ignores the Dying one. - err = s.APIState.Client().DestroyServiceUnits("wordpress/2", "wordpress/0") - c.Assert(err, jc.ErrorIsNil) - assertLife(c, units[2], state.Dying) - - // Try to destroy an Alive one along with a nonexistent one; check that - // the valid instruction is followed but the invalid one is warned about. - err = s.APIState.Client().DestroyServiceUnits("boojum/123", "wordpress/3") - c.Assert(err, gc.ErrorMatches, `some units were not destroyed: unit "boojum/123" does not exist`) - assertLife(c, units[3], state.Dying) - - // Make one Dead, and destroy an Alive one alongside it; check no errors. - wp0, err := s.State.Unit("wordpress/0") - c.Assert(err, jc.ErrorIsNil) - err = wp0.EnsureDead() - c.Assert(err, jc.ErrorIsNil) - err = s.APIState.Client().DestroyServiceUnits("wordpress/0", "wordpress/4") - c.Assert(err, jc.ErrorIsNil) - assertLife(c, units[0], state.Dead) - assertLife(c, units[4], state.Dying) -} - -func (s *clientSuite) setupDestroyPrincipalUnits(c *gc.C) []*state.Unit { - units := make([]*state.Unit, 5) - wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) - for i := range units { - unit, err := wordpress.AddUnit() - c.Assert(err, jc.ErrorIsNil) - err = unit.SetAgentStatus(state.StatusIdle, "", nil) - c.Assert(err, jc.ErrorIsNil) - units[i] = unit - } - return units -} -func (s *clientSuite) TestBlockChangesDestroyPrincipalUnits(c *gc.C) { - units := s.setupDestroyPrincipalUnits(c) - s.BlockAllChanges(c, "TestBlockChangesDestroyPrincipalUnits") - err := s.APIState.Client().DestroyServiceUnits("wordpress/0", "wordpress/1") - s.assertBlockedErrorAndLiveliness(c, err, "TestBlockChangesDestroyPrincipalUnits", units[0], units[1], units[2], units[3]) -} - -func (s *clientSuite) TestBlockRemoveDestroyPrincipalUnits(c *gc.C) { - units := s.setupDestroyPrincipalUnits(c) - s.BlockRemoveObject(c, "TestBlockRemoveDestroyPrincipalUnits") - err := s.APIState.Client().DestroyServiceUnits("wordpress/0", "wordpress/1") - s.assertBlockedErrorAndLiveliness(c, err, "TestBlockRemoveDestroyPrincipalUnits", units[0], units[1], units[2], units[3]) -} - -func (s *clientSuite) TestBlockDestroyDestroyPrincipalUnits(c *gc.C) { - units := s.setupDestroyPrincipalUnits(c) - s.BlockDestroyEnvironment(c, "TestBlockDestroyDestroyPrincipalUnits") - err := s.APIState.Client().DestroyServiceUnits("wordpress/0", "wordpress/1") - c.Assert(err, jc.ErrorIsNil) - assertLife(c, units[0], state.Dying) - assertLife(c, units[1], state.Dying) -} - -func (s *clientSuite) assertDestroySubordinateUnits(c *gc.C, wordpress0, logging0 *state.Unit) { - // Try to destroy the principal and the subordinate together; check it warns - // about the subordinate, but destroys the one it can. (The principal unit - // agent will be resposible for destroying the subordinate.) - err := s.APIState.Client().DestroyServiceUnits("wordpress/0", "logging/0") - c.Assert(err, gc.ErrorMatches, `some units were not destroyed: unit "logging/0" is a subordinate`) - assertLife(c, wordpress0, state.Dying) - assertLife(c, logging0, state.Alive) -} - -func (s *clientSuite) TestBlockRemoveDestroySubordinateUnits(c *gc.C) { - wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) - wordpress0, err := wordpress.AddUnit() - c.Assert(err, jc.ErrorIsNil) - s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging")) - eps, err := s.State.InferEndpoints("logging", "wordpress") - c.Assert(err, jc.ErrorIsNil) - rel, err := s.State.AddRelation(eps...) - c.Assert(err, jc.ErrorIsNil) - ru, err := rel.Unit(wordpress0) - c.Assert(err, jc.ErrorIsNil) - err = ru.EnterScope(nil) - c.Assert(err, jc.ErrorIsNil) - logging0, err := s.State.Unit("logging/0") - c.Assert(err, jc.ErrorIsNil) - - s.BlockRemoveObject(c, "TestBlockRemoveDestroySubordinateUnits") - // Try to destroy the subordinate alone; check it fails. - err = s.APIState.Client().DestroyServiceUnits("logging/0") - s.AssertBlocked(c, err, "TestBlockRemoveDestroySubordinateUnits") - assertLife(c, rel, state.Alive) - assertLife(c, wordpress0, state.Alive) - assertLife(c, logging0, state.Alive) - - err = s.APIState.Client().DestroyServiceUnits("wordpress/0", "logging/0") - s.AssertBlocked(c, err, "TestBlockRemoveDestroySubordinateUnits") - assertLife(c, wordpress0, state.Alive) - assertLife(c, logging0, state.Alive) - assertLife(c, rel, state.Alive) -} - -func (s *clientSuite) TestBlockChangesDestroySubordinateUnits(c *gc.C) { - wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) - wordpress0, err := wordpress.AddUnit() - c.Assert(err, jc.ErrorIsNil) - s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging")) - eps, err := s.State.InferEndpoints("logging", "wordpress") - c.Assert(err, jc.ErrorIsNil) - rel, err := s.State.AddRelation(eps...) - c.Assert(err, jc.ErrorIsNil) - ru, err := rel.Unit(wordpress0) - c.Assert(err, jc.ErrorIsNil) - err = ru.EnterScope(nil) - c.Assert(err, jc.ErrorIsNil) - logging0, err := s.State.Unit("logging/0") - c.Assert(err, jc.ErrorIsNil) - - s.BlockAllChanges(c, "TestBlockChangesDestroySubordinateUnits") - // Try to destroy the subordinate alone; check it fails. - err = s.APIState.Client().DestroyServiceUnits("logging/0") - s.AssertBlocked(c, err, "TestBlockChangesDestroySubordinateUnits") - assertLife(c, rel, state.Alive) - assertLife(c, wordpress0, state.Alive) - assertLife(c, logging0, state.Alive) - - err = s.APIState.Client().DestroyServiceUnits("wordpress/0", "logging/0") - s.AssertBlocked(c, err, "TestBlockChangesDestroySubordinateUnits") - assertLife(c, wordpress0, state.Alive) - assertLife(c, logging0, state.Alive) - assertLife(c, rel, state.Alive) -} - -func (s *clientSuite) TestBlockDestroyDestroySubordinateUnits(c *gc.C) { - wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) - wordpress0, err := wordpress.AddUnit() - c.Assert(err, jc.ErrorIsNil) - s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging")) - eps, err := s.State.InferEndpoints("logging", "wordpress") - c.Assert(err, jc.ErrorIsNil) - rel, err := s.State.AddRelation(eps...) - c.Assert(err, jc.ErrorIsNil) - ru, err := rel.Unit(wordpress0) - c.Assert(err, jc.ErrorIsNil) - err = ru.EnterScope(nil) - c.Assert(err, jc.ErrorIsNil) - logging0, err := s.State.Unit("logging/0") - c.Assert(err, jc.ErrorIsNil) - - s.BlockDestroyEnvironment(c, "TestBlockDestroyDestroySubordinateUnits") - // Try to destroy the subordinate alone; check it fails. - err = s.APIState.Client().DestroyServiceUnits("logging/0") - c.Assert(err, gc.ErrorMatches, `no units were destroyed: unit "logging/0" is a subordinate`) - assertLife(c, logging0, state.Alive) - - s.assertDestroySubordinateUnits(c, wordpress0, logging0) -} - -func (s *clientSuite) TestBlockRemoveDestroyRelation(c *gc.C) { - endpoints := []string{"wordpress", "mysql"} - relation := s.setupRelationScenario(c, endpoints) - // block remove-objects - s.BlockRemoveObject(c, "TestBlockRemoveDestroyRelation") - err := s.APIState.Client().DestroyRelation(endpoints...) - s.AssertBlocked(c, err, "TestBlockRemoveDestroyRelation") - assertLife(c, relation, state.Alive) -} - -func (s *clientSuite) TestBlockChangeDestroyRelation(c *gc.C) { - endpoints := []string{"wordpress", "mysql"} - relation := s.setupRelationScenario(c, endpoints) - s.BlockAllChanges(c, "TestBlockChangeDestroyRelation") - err := s.APIState.Client().DestroyRelation(endpoints...) - s.AssertBlocked(c, err, "TestBlockChangeDestroyRelation") - assertLife(c, relation, state.Alive) -} - -func (s *clientSuite) TestBlockDestroyDestroyRelation(c *gc.C) { - s.BlockDestroyEnvironment(c, "TestBlockDestroyDestroyRelation") - endpoints := []string{"wordpress", "mysql"} - s.assertDestroyRelation(c, endpoints) -} - -func (s *clientSuite) TestDestroyEnvironment(c *gc.C) { - // The full tests for DestroyEnvironment are in environmentmanager. +func (s *clientSuite) TestDestroyModel(c *gc.C) { + // The full tests for DestroyModel are in modelmanager. // Here we just test that things are hooked up such that we can destroy - // the environment through the client endpoint to support older juju clients. - err := s.APIState.Client().DestroyEnvironment() + // the model through the client endpoint to support older juju clients. + err := s.APIState.Client().DestroyModel() c.Assert(err, jc.ErrorIsNil) - env, err := s.State.Environment() + env, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) c.Assert(env.Life(), gc.Equals, state.Dying) } === modified file 'src/github.com/juju/juju/apiserver/client/export_test.go' --- src/github.com/juju/juju/apiserver/client/export_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/client/export_test.go 2016-03-22 15:18:22 +0000 @@ -3,6 +3,8 @@ package client +import "github.com/juju/juju/state" + var ( RemoteParamsForMachine = remoteParamsForMachine GetAllUnitNames = getAllUnitNames @@ -22,4 +24,19 @@ type MachineAndContainers machineAndContainers -var StartSerialWaitParallel = startSerialWaitParallel +var ( + StartSerialWaitParallel = startSerialWaitParallel + GetEnvironment = &getEnvironment +) + +type StateInterface stateInterface + +type Patcher interface { + PatchValue(ptr, value interface{}) +} + +func PatchState(p Patcher, st StateInterface) { + p.PatchValue(&getState, func(*state.State) stateInterface { + return st + }) +} === modified file 'src/github.com/juju/juju/apiserver/client/filtering.go' --- src/github.com/juju/juju/apiserver/client/filtering.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/client/filtering.go 2016-03-22 15:18:22 +0000 @@ -11,6 +11,7 @@ "strings" "github.com/juju/errors" + "github.com/juju/names" "github.com/juju/juju/network" "github.com/juju/juju/state" @@ -125,6 +126,22 @@ // matches some criteria. type closurePredicate func() (matches bool, formatOK bool, _ error) +func matchMachineId(m *state.Machine, patterns []string) (bool, bool, error) { + var anyValid bool + for _, p := range patterns { + if !names.IsValidMachine(p) { + continue + } + anyValid = true + if m.Id() == p || strings.HasPrefix(m.Id(), p+"/") { + // Pattern matches the machine, or container's + // host machine. + return true, true, nil + } + } + return false, anyValid, nil +} + func unitMatchUnitName(u *state.Unit, patterns []string) (bool, bool, error) { um, err := NewUnitMatcher(patterns) if err != nil { @@ -238,6 +255,9 @@ } func buildMachineMatcherShims(m *state.Machine, patterns []string) (shims []closurePredicate, _ error) { + // Look at machine ID. + shims = append(shims, func() (bool, bool, error) { return matchMachineId(m, patterns) }) + // Look at machine status. statusInfo, err := m.Status() if err != nil { @@ -255,19 +275,9 @@ } shims = append(shims, func() (bool, bool, error) { return matchSubnet(patterns, addrs...) }) - // If the machine hosts a unit that matches any of the given - // criteria, consider the machine a match as well. - unitShims, err := buildShimsForUnit(m.Units, patterns...) - if err != nil { - return nil, err - } - shims = append(shims, unitShims...) - // Units may be able to match the pattern. Ultimately defer to // that logic, and guard against breaking the predicate-chain. - if len(unitShims) <= 0 { - shims = append(shims, func() (bool, bool, error) { return false, true, nil }) - } + shims = append(shims, func() (bool, bool, error) { return false, true, nil }) return } @@ -281,7 +291,6 @@ closeOver(unitMatchAgentStatus), closeOver(unitMatchWorkloadStatus), closeOver(unitMatchExposure), - closeOver(unitMatchSubnet), closeOver(unitMatchPort), } } @@ -301,24 +310,17 @@ oneValidPattern := false for _, p := range patterns { for _, a := range addresses { - ip, err := net.ResolveIPAddr("ip", a) - if err != nil { - errors.Trace(errors.Annotate(err, "could not parse machine's address")) - continue - } else if pip, err := net.ResolveIPAddr("ip", p); err == nil { - oneValidPattern = true - if ip.IP.Equal(pip.IP) { - return true, true, nil - } - } else if pip := net.ParseIP(p); pip != nil { - oneValidPattern = true - if ip.IP.Equal(pip) { - return true, true, nil - } - } else if _, ipNet, err := net.ParseCIDR(p); err == nil { - oneValidPattern = true - if ipNet.Contains(ip.IP) { - return true, true, nil + if p == a { + return true, true, nil + } + } + if _, ipNet, err := net.ParseCIDR(p); err == nil { + oneValidPattern = true + for _, a := range addresses { + if ip := net.ParseIP(a); ip != nil { + if ipNet.Contains(ip) { + return true, true, nil + } } } } === modified file 'src/github.com/juju/juju/apiserver/client/filtering_test.go' --- src/github.com/juju/juju/apiserver/client/filtering_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/client/filtering_test.go 2016-03-22 15:18:22 +0000 @@ -36,10 +36,11 @@ func (s *filteringUnitTests) TestMatchSubnet(c *gc.C) { + // We do not resolve hostnames. match, ok, err := client.MatchSubnet([]string{"localhost"}, "127.0.0.1") c.Check(err, jc.ErrorIsNil) - c.Check(ok, jc.IsTrue) - c.Check(match, jc.IsTrue) + c.Check(ok, jc.IsFalse) + c.Check(match, jc.IsFalse) match, ok, err = client.MatchSubnet([]string{"127.0.0.1"}, "127.0.0.1") c.Check(err, jc.ErrorIsNil) @@ -48,6 +49,11 @@ match, ok, err = client.MatchSubnet([]string{"localhost"}, "10.0.0.1") c.Check(err, jc.ErrorIsNil) + c.Check(ok, jc.IsFalse) + c.Check(match, jc.IsFalse) + + match, ok, err = client.MatchSubnet([]string{"testing.local"}, "testing.local") + c.Check(err, jc.ErrorIsNil) c.Check(ok, jc.IsTrue) - c.Check(match, jc.IsFalse) + c.Check(match, jc.IsTrue) } === removed file 'src/github.com/juju/juju/apiserver/client/get.go' --- src/github.com/juju/juju/apiserver/client/get.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/client/get.go 1970-01-01 00:00:00 +0000 @@ -1,72 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package client - -import ( - "gopkg.in/juju/charm.v5" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/constraints" -) - -// ServiceGet returns the configuration for a service. -func (c *Client) ServiceGet(args params.ServiceGet) (params.ServiceGetResults, error) { - service, err := c.api.state.Service(args.ServiceName) - if err != nil { - return params.ServiceGetResults{}, err - } - settings, err := service.ConfigSettings() - if err != nil { - return params.ServiceGetResults{}, err - } - charm, _, err := service.Charm() - if err != nil { - return params.ServiceGetResults{}, err - } - configInfo := describe(settings, charm.Config()) - var constraints constraints.Value - if service.IsPrincipal() { - constraints, err = service.Constraints() - if err != nil { - return params.ServiceGetResults{}, err - } - } - return params.ServiceGetResults{ - Service: args.ServiceName, - Charm: charm.Meta().Name, - Config: configInfo, - Constraints: constraints, - }, nil -} - -func describe(settings charm.Settings, config *charm.Config) map[string]interface{} { - results := make(map[string]interface{}) - for name, option := range config.Options { - info := map[string]interface{}{ - "description": option.Description, - "type": option.Type, - } - if value := settings[name]; value != nil { - info["value"] = value - } else { - if option.Default != nil { - info["value"] = option.Default - } - info["default"] = true - } - results[name] = info - } - return results -} - -// ServiceGetCharmURL returns the charm URL the given service is -// running at present. -func (c *Client) ServiceGetCharmURL(args params.ServiceGet) (params.StringResult, error) { - service, err := c.api.state.Service(args.ServiceName) - if err != nil { - return params.StringResult{}, err - } - charmURL, _ := service.CharmURL() - return params.StringResult{Result: charmURL.String()}, nil -} === removed file 'src/github.com/juju/juju/apiserver/client/get_test.go' --- src/github.com/juju/juju/apiserver/client/get_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/client/get_test.go 1970-01-01 00:00:00 +0000 @@ -1,198 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package client_test - -import ( - "fmt" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/constraints" -) - -type getSuite struct { - baseSuite -} - -var _ = gc.Suite(&getSuite{}) - -func (s *getSuite) TestClientServiceGetSmoketest(c *gc.C) { - s.setUpScenario(c) - results, err := s.APIState.Client().ServiceGet("wordpress") - c.Assert(err, jc.ErrorIsNil) - c.Assert(results, gc.DeepEquals, ¶ms.ServiceGetResults{ - Service: "wordpress", - Charm: "wordpress", - Config: map[string]interface{}{ - "blog-title": map[string]interface{}{ - "type": "string", - "value": "My Title", - "description": "A descriptive title used for the blog.", - "default": true, - }, - }, - }) -} - -func (s *getSuite) TestServiceGetUnknownService(c *gc.C) { - apiclient := s.APIState.Client() - _, err := apiclient.ServiceGet("unknown") - c.Assert(err, gc.ErrorMatches, `service "unknown" not found`) -} - -var getTests = []struct { - about string - charm string - constraints string - config charm.Settings - expect params.ServiceGetResults -}{{ - about: "deployed service", - charm: "dummy", - constraints: "mem=2G cpu-power=400", - config: charm.Settings{ - // Different from default. - "title": "Look To Windward", - // Same as default. - "username": "admin001", - // Use default (but there's no charm default) - "skill-level": nil, - // Outlook is left unset. - }, - expect: params.ServiceGetResults{ - Config: map[string]interface{}{ - "title": map[string]interface{}{ - "description": "A descriptive title used for the service.", - "type": "string", - "value": "Look To Windward", - }, - "outlook": map[string]interface{}{ - "description": "No default outlook.", - "type": "string", - "default": true, - }, - "username": map[string]interface{}{ - "description": "The name of the initial account (given admin permissions).", - "type": "string", - "value": "admin001", - }, - "skill-level": map[string]interface{}{ - "description": "A number indicating skill.", - "type": "int", - "default": true, - }, - }, - }, -}, { - about: "deployed service #2", - charm: "dummy", - config: charm.Settings{ - // Set title to default. - "title": nil, - // Value when there's a default. - "username": "foobie", - // Numeric value. - "skill-level": 0, - // String value. - "outlook": "phlegmatic", - }, - expect: params.ServiceGetResults{ - Config: map[string]interface{}{ - "title": map[string]interface{}{ - "description": "A descriptive title used for the service.", - "type": "string", - "value": "My Title", - "default": true, - }, - "outlook": map[string]interface{}{ - "description": "No default outlook.", - "type": "string", - "value": "phlegmatic", - }, - "username": map[string]interface{}{ - "description": "The name of the initial account (given admin permissions).", - "type": "string", - "value": "foobie", - }, - "skill-level": map[string]interface{}{ - "description": "A number indicating skill.", - "type": "int", - // TODO(jam): 2013-08-28 bug #1217742 - // we have to use float64() here, because the - // API does not preserve int types. This used - // to be int64() but we end up with a type - // mismatch when comparing the content - "value": float64(0), - }, - }, - }, -}, { - about: "subordinate service", - charm: "logging", - expect: params.ServiceGetResults{ - Config: map[string]interface{}{}, - }, -}} - -func (s *getSuite) TestServiceGet(c *gc.C) { - for i, t := range getTests { - c.Logf("test %d. %s", i, t.about) - ch := s.AddTestingCharm(c, t.charm) - svc := s.AddTestingService(c, fmt.Sprintf("test%d", i), ch) - - var constraintsv constraints.Value - if t.constraints != "" { - constraintsv = constraints.MustParse(t.constraints) - err := svc.SetConstraints(constraintsv) - c.Assert(err, jc.ErrorIsNil) - } - if t.config != nil { - err := svc.UpdateConfigSettings(t.config) - c.Assert(err, jc.ErrorIsNil) - } - expect := t.expect - expect.Constraints = constraintsv - expect.Service = svc.Name() - expect.Charm = ch.Meta().Name - apiclient := s.APIState.Client() - got, err := apiclient.ServiceGet(svc.Name()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(*got, gc.DeepEquals, expect) - } -} - -func (s *getSuite) TestServiceGetMaxResolutionInt(c *gc.C) { - // See the bug http://pad.lv/1217742 - // ServiceGet ends up pushing a map[string]interface{} which containts - // an int64 through a JSON Marshal & Unmarshal which ends up changing - // the int64 into a float64. We will fix it if we find it is actually a - // problem. - const nonFloatInt = (int64(1) << 54) + 1 - const asFloat = float64(nonFloatInt) - c.Assert(int64(asFloat), gc.Not(gc.Equals), nonFloatInt) - c.Assert(int64(asFloat)+1, gc.Equals, nonFloatInt) - - ch := s.AddTestingCharm(c, "dummy") - svc := s.AddTestingService(c, "test-service", ch) - - err := svc.UpdateConfigSettings(map[string]interface{}{"skill-level": nonFloatInt}) - c.Assert(err, jc.ErrorIsNil) - got, err := s.APIState.Client().ServiceGet(svc.Name()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(got.Config["skill-level"], gc.DeepEquals, map[string]interface{}{ - "description": "A number indicating skill.", - "type": "int", - "value": asFloat, - }) -} - -func (s *getSuite) TestServiceGetCharmURL(c *gc.C) { - s.setUpScenario(c) - charmURL, err := s.APIState.Client().ServiceGetCharmURL("wordpress") - c.Assert(err, jc.ErrorIsNil) - c.Assert(charmURL.String(), gc.Equals, "local:quantal/wordpress-3") -} === modified file 'src/github.com/juju/juju/apiserver/client/instanceconfig.go' --- src/github.com/juju/juju/apiserver/client/instanceconfig.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/client/instanceconfig.go 2016-03-22 15:18:22 +0000 @@ -10,17 +10,17 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cloudconfig/instancecfg" - "github.com/juju/juju/environmentserver/authentication" + "github.com/juju/juju/controller/authentication" "github.com/juju/juju/environs" "github.com/juju/juju/state" ) // InstanceConfig returns information from the environment config that -// is needed for machine cloud-init (for non-state servers only). It +// is needed for machine cloud-init (for non-controllers only). It // is exposed for testing purposes. // TODO(rog) fix environs/manual tests so they do not need to call this, or move this elsewhere. func InstanceConfig(st *state.State, machineId, nonce, dataDir string) (*instancecfg.InstanceConfig, error) { - environConfig, err := st.EnvironConfig() + environConfig, err := st.ModelConfig() if err != nil { return nil, err } @@ -43,9 +43,9 @@ // Find the appropriate tools information. agentVersion, ok := environConfig.AgentVersion() if !ok { - return nil, errors.New("no agent version set in environment configuration") + return nil, errors.New("no agent version set in model configuration") } - environment, err := st.Environment() + environment, err := st.Model() if err != nil { return nil, err } === modified file 'src/github.com/juju/juju/apiserver/client/instanceconfig_test.go' --- src/github.com/juju/juju/apiserver/client/instanceconfig_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/client/instanceconfig_test.go 2016-03-22 15:18:22 +0000 @@ -48,15 +48,15 @@ instanceConfig, err := client.InstanceConfig(s.State, machineId, apiParams.Nonce, "") c.Assert(err, jc.ErrorIsNil) - envConfig, err := s.State.EnvironConfig() + envConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) mongoAddrs := s.State.MongoConnectionInfo().Addrs apiAddrs := []string{net.JoinHostPort("localhost", strconv.Itoa(envConfig.APIPort()))} c.Check(instanceConfig.MongoInfo.Addrs, gc.DeepEquals, mongoAddrs) c.Check(instanceConfig.APIInfo.Addrs, gc.DeepEquals, apiAddrs) - toolsURL := fmt.Sprintf("https://%s/environment/%s/tools/%s", - apiAddrs[0], jujutesting.EnvironmentTag.Id(), instanceConfig.Tools.Version) + toolsURL := fmt.Sprintf("https://%s/model/%s/tools/%s", + apiAddrs[0], jujutesting.ModelTag.Id(), instanceConfig.Tools.Version) c.Assert(instanceConfig.Tools.URL, gc.Equals, toolsURL) c.Assert(instanceConfig.AgentEnvironment[agent.AllowsSecureConnection], gc.Equals, "true") } === added file 'src/github.com/juju/juju/apiserver/client/package_test.go' --- src/github.com/juju/juju/apiserver/client/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/client/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package client_test + +import ( + stdtesting "testing" + + "github.com/juju/testing" + + coretesting "github.com/juju/juju/testing" +) + +func TestPackage(t *stdtesting.T) { + if testing.RaceEnabled { + t.Skip("skipping package under -race, see LP 1518807") + } + coretesting.MgoTestPackage(t) +} === modified file 'src/github.com/juju/juju/apiserver/client/perm_test.go' --- src/github.com/juju/juju/apiserver/client/perm_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/client/perm_test.go 2016-03-22 15:18:22 +0000 @@ -6,14 +6,18 @@ import ( "strings" + "github.com/juju/errors" "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api" + "github.com/juju/juju/api/annotations" + "github.com/juju/juju/api/service" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/constraints" + "github.com/juju/juju/rpc" "github.com/juju/juju/state" "github.com/juju/juju/version" ) @@ -72,15 +76,11 @@ op: opClientStatus, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.ServiceSet", + about: "Service.Set", op: opClientServiceSet, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.ServiceSetYAML", - op: opClientServiceSetYAML, - allow: []names.Tag{userAdmin, userOther}, - }, { - about: "Client.ServiceGet", + about: "Service.Get", op: opClientServiceGet, allow: []names.Tag{userAdmin, userOther}, }, { @@ -88,71 +88,63 @@ op: opClientResolved, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.ServiceExpose", + about: "Service.Expose", op: opClientServiceExpose, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.ServiceUnexpose", + about: "Service.Unexpose", op: opClientServiceUnexpose, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.ServiceDeploy", - op: opClientServiceDeploy, - allow: []names.Tag{userAdmin, userOther}, - }, { - about: "Client.ServiceDeployWithNetworks", - op: opClientServiceDeployWithNetworks, - allow: []names.Tag{userAdmin, userOther}, - }, { - about: "Client.ServiceUpdate", + about: "Service.Update", op: opClientServiceUpdate, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.ServiceSetCharm", + about: "Service.SetCharm", op: opClientServiceSetCharm, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.GetAnnotations", + about: "Annotations.GetAnnotations", op: opClientGetAnnotations, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.SetAnnotations", + about: "Annotations.SetAnnotations", op: opClientSetAnnotations, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.AddServiceUnits", + about: "Service.AddUnits", op: opClientAddServiceUnits, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.DestroyServiceUnits", + about: "Service.DestroyUnits", op: opClientDestroyServiceUnits, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.ServiceDestroy", + about: "Service.Destroy", op: opClientServiceDestroy, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.GetServiceConstraints", + about: "Service.GetConstraints", op: opClientGetServiceConstraints, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.SetServiceConstraints", + about: "Service.SetConstraints", op: opClientSetServiceConstraints, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.SetEnvironmentConstraints", + about: "Client.SetModelConstraints", op: opClientSetEnvironmentConstraints, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.EnvironmentGet", + about: "Client.ModelGet", op: opClientEnvironmentGet, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.EnvironmentSet", + about: "Client.ModelSet", op: opClientEnvironmentSet, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.SetEnvironAgentVersion", + about: "Client.SetModelAgentVersion", op: opClientSetEnvironAgentVersion, allow: []names.Tag{userAdmin, userOther}, }, { @@ -164,11 +156,11 @@ op: opClientCharmInfo, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.AddRelation", + about: "Service.AddRelation", op: opClientAddRelation, allow: []names.Tag{userAdmin, userOther}, }, { - about: "Client.DestroyRelation", + about: "Service.DestroyRelation", op: opClientDestroyRelation, allow: []names.Tag{userAdmin, userOther}, }} { @@ -180,7 +172,10 @@ if allow[e] { c.Check(err, jc.ErrorIsNil) } else { - c.Check(err, gc.ErrorMatches, "permission denied") + c.Check(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: "permission denied", + Code: "unauthorized access", + }) c.Check(err, jc.Satisfies, params.IsCodeUnauthorized) } reset() @@ -215,7 +210,7 @@ } func opClientAddRelation(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - _, err := st.Client().AddRelation("nosuch1", "nosuch2") + _, err := service.NewClient(st).AddRelation("nosuch1", "nosuch2") if params.IsCodeNotFound(err) { err = nil } @@ -223,7 +218,7 @@ } func opClientDestroyRelation(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - err := st.Client().DestroyRelation("nosuch1", "nosuch2") + err := service.NewClient(st).DestroyRelation("nosuch1", "nosuch2") if params.IsCodeNotFound(err) { err = nil } @@ -243,7 +238,7 @@ func resetBlogTitle(c *gc.C, st api.Connection) func() { return func() { - err := st.Client().ServiceSet("wordpress", map[string]string{ + err := service.NewClient(st).Set("wordpress", map[string]string{ "blog-title": "", }) c.Assert(err, jc.ErrorIsNil) @@ -251,7 +246,7 @@ } func opClientServiceSet(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - err := st.Client().ServiceSet("wordpress", map[string]string{ + err := service.NewClient(st).Set("wordpress", map[string]string{ "blog-title": "foo", }) if err != nil { @@ -260,16 +255,8 @@ return resetBlogTitle(c, st), nil } -func opClientServiceSetYAML(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - err := st.Client().ServiceSetYAML("wordpress", `"wordpress": {"blog-title": "foo"}`) - if err != nil { - return func() {}, err - } - return resetBlogTitle(c, st), nil -} - func opClientServiceGet(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - _, err := st.Client().ServiceGet("wordpress") + _, err := service.NewClient(st).Get("wordpress") if err != nil { return func() {}, err } @@ -277,7 +264,7 @@ } func opClientServiceExpose(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - err := st.Client().ServiceExpose("wordpress") + err := service.NewClient(st).Expose("wordpress") if err != nil { return func() {}, err } @@ -289,7 +276,7 @@ } func opClientServiceUnexpose(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - err := st.Client().ServiceUnexpose("wordpress") + err := service.NewClient(st).Unexpose("wordpress") if err != nil { return func() {}, err } @@ -315,42 +302,35 @@ } func opClientGetAnnotations(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - ann, err := st.Client().GetAnnotations("service-wordpress") + ann, err := annotations.NewClient(st).Get([]string{"service-wordpress"}) if err != nil { return func() {}, err } - c.Assert(ann, gc.DeepEquals, make(map[string]string)) + c.Assert(ann, gc.DeepEquals, []params.AnnotationsGetResult{{ + EntityTag: "service-wordpress", + Annotations: map[string]string{}, + }}) return func() {}, nil } func opClientSetAnnotations(c *gc.C, st api.Connection, mst *state.State) (func(), error) { pairs := map[string]string{"key1": "value1", "key2": "value2"} - err := st.Client().SetAnnotations("service-wordpress", pairs) + setParams := map[string]map[string]string{ + "service-wordpress": pairs, + } + _, err := annotations.NewClient(st).Set(setParams) if err != nil { return func() {}, err } return func() { pairs := map[string]string{"key1": "", "key2": ""} - st.Client().SetAnnotations("service-wordpress", pairs) + setParams := map[string]map[string]string{ + "service-wordpress": pairs, + } + annotations.NewClient(st).Set(setParams) }, nil } -func opClientServiceDeploy(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - err := st.Client().ServiceDeploy("mad:bad/url-1", "x", 1, "", constraints.Value{}, "") - if err.Error() == `charm URL has invalid schema: "mad:bad/url-1"` { - err = nil - } - return func() {}, err -} - -func opClientServiceDeployWithNetworks(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - err := st.Client().ServiceDeployWithNetworks("mad:bad/url-1", "x", 1, "", constraints.Value{}, "", nil) - if err.Error() == `charm URL has invalid schema: "mad:bad/url-1"` { - err = nil - } - return func() {}, err -} - func opClientServiceUpdate(c *gc.C, st api.Connection, mst *state.State) (func(), error) { args := params.ServiceUpdate{ ServiceName: "no-such-charm", @@ -359,7 +339,7 @@ SettingsStrings: map[string]string{"blog-title": "foo"}, SettingsYAML: `"wordpress": {"blog-title": "foo"}`, } - err := st.Client().ServiceUpdate(args) + err := service.NewClient(st).Update(args) if params.IsCodeNotFound(err) { err = nil } @@ -367,7 +347,11 @@ } func opClientServiceSetCharm(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - err := st.Client().ServiceSetCharm("nosuch", "local:quantal/wordpress", false) + cfg := service.SetCharmConfig{ + ServiceName: "nosuch", + CharmUrl: "local:quantal/wordpress", + } + err := service.NewClient(st).SetCharm(cfg) if params.IsCodeNotFound(err) { err = nil } @@ -375,7 +359,7 @@ } func opClientAddServiceUnits(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - _, err := st.Client().AddServiceUnits("nosuch", 1, "") + _, err := service.NewClient(st).AddUnits("nosuch", 1, nil) if params.IsCodeNotFound(err) { err = nil } @@ -383,7 +367,7 @@ } func opClientDestroyServiceUnits(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - err := st.Client().DestroyServiceUnits("wordpress/99") + err := service.NewClient(st).DestroyUnits("wordpress/99") if err != nil && strings.HasPrefix(err.Error(), "no units were destroyed") { err = nil } @@ -391,7 +375,7 @@ } func opClientServiceDestroy(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - err := st.Client().ServiceDestroy("non-existent") + err := service.NewClient(st).Destroy("non-existent") if params.IsCodeNotFound(err) { err = nil } @@ -399,13 +383,13 @@ } func opClientGetServiceConstraints(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - _, err := st.Client().GetServiceConstraints("wordpress") + _, err := service.NewClient(st).GetConstraints("wordpress") return func() {}, err } func opClientSetServiceConstraints(c *gc.C, st api.Connection, mst *state.State) (func(), error) { nullConstraints := constraints.Value{} - err := st.Client().SetServiceConstraints("wordpress", nullConstraints) + err := service.NewClient(st).SetConstraints("wordpress", nullConstraints) if err != nil { return func() {}, err } @@ -414,7 +398,7 @@ func opClientSetEnvironmentConstraints(c *gc.C, st api.Connection, mst *state.State) (func(), error) { nullConstraints := constraints.Value{} - err := st.Client().SetEnvironmentConstraints(nullConstraints) + err := st.Client().SetModelConstraints(nullConstraints) if err != nil { return func() {}, err } @@ -422,7 +406,7 @@ } func opClientEnvironmentGet(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - _, err := st.Client().EnvironmentGet() + _, err := st.Client().ModelGet() if err != nil { return func() {}, err } @@ -431,22 +415,22 @@ func opClientEnvironmentSet(c *gc.C, st api.Connection, mst *state.State) (func(), error) { args := map[string]interface{}{"some-key": "some-value"} - err := st.Client().EnvironmentSet(args) + err := st.Client().ModelSet(args) if err != nil { return func() {}, err } return func() { args["some-key"] = nil - st.Client().EnvironmentSet(args) + st.Client().ModelSet(args) }, nil } func opClientSetEnvironAgentVersion(c *gc.C, st api.Connection, mst *state.State) (func(), error) { - attrs, err := st.Client().EnvironmentGet() + attrs, err := st.Client().ModelGet() if err != nil { return func() {}, err } - err = st.Client().SetEnvironAgentVersion(version.Current.Number) + err = st.Client().SetModelAgentVersion(version.Current) if err != nil { return func() {}, err } @@ -455,7 +439,7 @@ oldAgentVersion, found := attrs["agent-version"] if found { versionString := oldAgentVersion.(string) - st.Client().SetEnvironAgentVersion(version.MustParse(versionString)) + st.Client().SetModelAgentVersion(version.MustParse(versionString)) } }, nil } === modified file 'src/github.com/juju/juju/apiserver/client/run.go' --- src/github.com/juju/juju/apiserver/client/run.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/client/run.go 2016-03-22 15:18:22 +0000 @@ -14,13 +14,13 @@ "github.com/juju/utils" "github.com/juju/utils/clock" "github.com/juju/utils/set" + "github.com/juju/utils/ssh" "github.com/juju/juju/agent" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/network" "github.com/juju/juju/state" - "github.com/juju/juju/utils/ssh" ) // remoteParamsForMachine returns a filled in RemoteExec instance @@ -88,7 +88,7 @@ if err := c.check.ChangeAllowed(); err != nil { return params.RunResults{}, errors.Trace(err) } - units, err := getAllUnitNames(c.api.state, run.Units, run.Services) + units, err := getAllUnitNames(c.api.state(), run.Units, run.Services) if err != nil { return results, err } @@ -102,7 +102,7 @@ // We know that the unit is both a principal unit, and that it has an // assigned machine. machineId, _ := unit.AssignedMachineId() - machine, err := c.api.state.Machine(machineId) + machine, err := c.api.stateAccessor.Machine(machineId) if err != nil { return results, err } @@ -112,7 +112,7 @@ params = append(params, execParam) } for _, machineId := range run.Machines { - machine, err := c.api.state.Machine(machineId) + machine, err := c.api.stateAccessor.Machine(machineId) if err != nil { return results, err } @@ -128,7 +128,7 @@ if err := c.check.ChangeAllowed(); err != nil { return params.RunResults{}, errors.Trace(err) } - machines, err := c.api.state.AllMachines() + machines, err := c.api.stateAccessor.AllMachines() if err != nil { return params.RunResults{}, err } === modified file 'src/github.com/juju/juju/apiserver/client/run_test.go' --- src/github.com/juju/juju/apiserver/client/run_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/client/run_test.go 2016-03-22 15:18:22 +0000 @@ -8,17 +8,19 @@ "sync" "time" + "github.com/juju/errors" gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils/exec" + "github.com/juju/utils/ssh" gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/client" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/network" + "github.com/juju/juju/rpc" "github.com/juju/juju/state" "github.com/juju/juju/testing" - "github.com/juju/juju/utils/ssh" ) type runSuite struct { @@ -77,22 +79,22 @@ func (s *runSuite) TestGetAllUnitNames(c *gc.C) { charm := s.AddTestingCharm(c, "dummy") owner := s.AdminUserTag(c) - magic, err := s.State.AddService("magic", owner.String(), charm, nil, nil, nil) + magic, err := s.State.AddService(state.AddServiceArgs{Name: "magic", Owner: owner.String(), Charm: charm}) s.addUnit(c, magic) s.addUnit(c, magic) - notAssigned, err := s.State.AddService("not-assigned", owner.String(), charm, nil, nil, nil) + notAssigned, err := s.State.AddService(state.AddServiceArgs{Name: "not-assigned", Owner: owner.String(), Charm: charm}) c.Assert(err, jc.ErrorIsNil) _, err = notAssigned.AddUnit() c.Assert(err, jc.ErrorIsNil) - _, err = s.State.AddService("no-units", owner.String(), charm, nil, nil, nil) + _, err = s.State.AddService(state.AddServiceArgs{Name: "no-units", Owner: owner.String(), Charm: charm}) c.Assert(err, jc.ErrorIsNil) - wordpress, err := s.State.AddService("wordpress", owner.String(), s.AddTestingCharm(c, "wordpress"), nil, nil, nil) + wordpress, err := s.State.AddService(state.AddServiceArgs{Name: "wordpress", Owner: owner.String(), Charm: s.AddTestingCharm(c, "wordpress")}) c.Assert(err, jc.ErrorIsNil) wordpress0 := s.addUnit(c, wordpress) - _, err = s.State.AddService("logging", owner.String(), s.AddTestingCharm(c, "logging"), nil, nil, nil) + _, err = s.State.AddService(state.AddServiceArgs{Name: "logging", Owner: owner.String(), Charm: s.AddTestingCharm(c, "logging")}) c.Assert(err, jc.ErrorIsNil) eps, err := s.State.InferEndpoints("logging", "wordpress") @@ -260,6 +262,14 @@ c.Check(string(results[2].Stdout), gc.Equals, expectedCommand[0]) } +func (s *runSuite) AssertBlocked(c *gc.C, err error, msg string) { + c.Assert(params.IsCodeOperationBlocked(err), jc.IsTrue, gc.Commentf("error: %#v", err)) + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ + Message: msg, + Code: "operation is blocked", + }) +} + func (s *runSuite) TestBlockRunOnAllMachines(c *gc.C) { // Make three machines. s.addMachineWithAddress(c, "10.3.2.1") @@ -280,7 +290,7 @@ charm := s.AddTestingCharm(c, "dummy") owner := s.Factory.MakeUser(c, nil).Tag() - magic, err := s.State.AddService("magic", owner.String(), charm, nil, nil, nil) + magic, err := s.State.AddService(state.AddServiceArgs{Name: "magic", Owner: owner.String(), Charm: charm}) c.Assert(err, jc.ErrorIsNil) s.addUnit(c, magic) s.addUnit(c, magic) @@ -327,7 +337,7 @@ charm := s.AddTestingCharm(c, "dummy") owner := s.Factory.MakeUser(c, nil).Tag() - magic, err := s.State.AddService("magic", owner.String(), charm, nil, nil, nil) + magic, err := s.State.AddService(state.AddServiceArgs{Name: "magic", Owner: owner.String(), Charm: charm}) c.Assert(err, jc.ErrorIsNil) s.addUnit(c, magic) s.addUnit(c, magic) === added file 'src/github.com/juju/juju/apiserver/client/state.go' --- src/github.com/juju/juju/apiserver/client/state.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/client/state.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,77 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package client + +import ( + "github.com/juju/names" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/constraints" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/instance" + "github.com/juju/juju/network" + "github.com/juju/juju/state" + "github.com/juju/juju/version" +) + +// Unit represents a state.Unit. +type Unit interface { + state.StatusHistoryGetter + Life() state.Life + Destroy() (err error) + IsPrincipal() bool + PublicAddress() (network.Address, error) + PrivateAddress() (network.Address, error) + Resolve(retryHooks bool) error + AgentHistory() state.StatusHistoryGetter +} + +// stateInterface contains the state.State methods used in this package, +// allowing stubs to be created for testing. +type stateInterface interface { + FindEntity(names.Tag) (state.Entity, error) + Unit(string) (Unit, error) + Service(string) (*state.Service, error) + Machine(string) (*state.Machine, error) + AllMachines() ([]*state.Machine, error) + AllServices() ([]*state.Service, error) + AllRelations() ([]*state.Relation, error) + AllNetworks() ([]*state.Network, error) + AddOneMachine(state.MachineTemplate) (*state.Machine, error) + AddMachineInsideMachine(state.MachineTemplate, string, instance.ContainerType) (*state.Machine, error) + AddMachineInsideNewMachine(template, parentTemplate state.MachineTemplate, containerType instance.ContainerType) (*state.Machine, error) + ModelConstraints() (constraints.Value, error) + ModelConfig() (*config.Config, error) + UpdateModelConfig(map[string]interface{}, []string, state.ValidateConfigFunc) error + SetModelConstraints(constraints.Value) error + ModelUUID() string + ModelTag() names.ModelTag + Model() (*state.Model, error) + ForModel(tag names.ModelTag) (*state.State, error) + SetModelAgentVersion(version.Number) error + SetAnnotations(state.GlobalEntity, map[string]string) error + Annotations(state.GlobalEntity) (map[string]string, error) + InferEndpoints(...string) ([]state.Endpoint, error) + EndpointsRelation(...state.Endpoint) (*state.Relation, error) + Charm(*charm.URL) (*state.Charm, error) + LatestPlaceholderCharm(*charm.URL) (*state.Charm, error) + AddRelation(...state.Endpoint) (*state.Relation, error) + AddModelUser(state.ModelUserSpec) (*state.ModelUser, error) + RemoveModelUser(names.UserTag) error + Watch() *state.Multiwatcher + AbortCurrentUpgrade() error + APIHostPorts() ([][]network.HostPort, error) +} + +type stateShim struct { + *state.State +} + +func (s *stateShim) Unit(name string) (Unit, error) { + u, err := s.State.Unit(name) + if err != nil { + return nil, err + } + return u, nil +} === modified file 'src/github.com/juju/juju/apiserver/client/status.go' --- src/github.com/juju/juju/apiserver/client/status.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/client/status.go 2016-03-22 15:18:22 +0000 @@ -10,8 +10,8 @@ "github.com/juju/errors" "github.com/juju/utils/set" - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/constraints" @@ -48,45 +48,28 @@ return s[i].Since.Before(*s[j].Since) } -// TODO(perrito666) this client method requires more testing, only its parts are unittested. // UnitStatusHistory returns a slice of past statuses for a given unit. func (c *Client) UnitStatusHistory(args params.StatusHistory) (params.UnitStatusHistory, error) { - size := args.Size - 1 - if size < 1 { + if args.Size < 1 { return params.UnitStatusHistory{}, errors.Errorf("invalid history size: %d", args.Size) } - unit, err := c.api.state.Unit(args.Name) + unit, err := c.api.stateAccessor.Unit(args.Name) if err != nil { return params.UnitStatusHistory{}, errors.Trace(err) } statuses := params.UnitStatusHistory{} if args.Kind == params.KindCombined || args.Kind == params.KindWorkload { - unitStatuses, err := unit.StatusHistory(size) - if err != nil { - return params.UnitStatusHistory{}, errors.Trace(err) - } - - current, err := unit.Status() - if err != nil { - return params.UnitStatusHistory{}, errors.Trace(err) - } - unitStatuses = append(unitStatuses, current) - + unitStatuses, err := unit.StatusHistory(args.Size) + if err != nil { + return params.UnitStatusHistory{}, errors.Trace(err) + } statuses.Statuses = append(statuses.Statuses, agentStatusFromStatusInfo(unitStatuses, params.KindWorkload)...) } if args.Kind == params.KindCombined || args.Kind == params.KindAgent { - agent := unit.Agent() - agentStatuses, err := agent.StatusHistory(size) - if err != nil { - return params.UnitStatusHistory{}, errors.Trace(err) - } - - current, err := agent.Status() - if err != nil { - return params.UnitStatusHistory{}, errors.Trace(err) - } - agentStatuses = append(agentStatuses, current) - + agentStatuses, err := unit.AgentHistory().StatusHistory(args.Size) + if err != nil { + return params.UnitStatusHistory{}, errors.Trace(err) + } statuses.Statuses = append(statuses.Statuses, agentStatusFromStatusInfo(agentStatuses, params.KindAgent)...) } @@ -103,20 +86,20 @@ // FullStatus gives the information needed for juju status over the api func (c *Client) FullStatus(args params.StatusParams) (params.FullStatus, error) { - cfg, err := c.api.state.EnvironConfig() + cfg, err := c.api.stateAccessor.ModelConfig() if err != nil { return params.FullStatus{}, errors.Annotate(err, "could not get environ config") } var noStatus params.FullStatus var context statusContext if context.services, context.units, context.latestCharms, err = - fetchAllServicesAndUnits(c.api.state, len(args.Patterns) <= 0); err != nil { + fetchAllServicesAndUnits(c.api.stateAccessor, len(args.Patterns) <= 0); err != nil { return noStatus, errors.Annotate(err, "could not fetch services and units") - } else if context.machines, err = fetchMachines(c.api.state, nil); err != nil { + } else if context.machines, err = fetchMachines(c.api.stateAccessor, nil); err != nil { return noStatus, errors.Annotate(err, "could not fetch machines") - } else if context.relations, err = fetchRelations(c.api.state); err != nil { + } else if context.relations, err = fetchRelations(c.api.stateAccessor); err != nil { return noStatus, errors.Annotate(err, "could not fetch relations") - } else if context.networks, err = fetchNetworks(c.api.state); err != nil { + } else if context.networks, err = fetchNetworks(c.api.stateAccessor); err != nil { return noStatus, errors.Annotate(err, "could not fetch networks") } @@ -125,12 +108,37 @@ if len(args.Patterns) > 0 { predicate := BuildPredicateFor(args.Patterns) + // First, attempt to match machines. Any units on those + // machines are implicitly matched. + matchedMachines := make(set.Strings) + for _, machineList := range context.machines { + for _, m := range machineList { + matches, err := predicate(m) + if err != nil { + return noStatus, errors.Annotate( + err, "could not filter machines", + ) + } + if matches { + matchedMachines.Add(m.Id()) + } + } + } + // Filter units - unfilteredSvcs := make(set.Strings) - unfilteredMachines := make(set.Strings) + matchedSvcs := make(set.Strings) unitChainPredicate := UnitChainPredicateFn(predicate, context.unitByName) for _, unitMap := range context.units { for name, unit := range unitMap { + machineId, err := unit.AssignedMachineId() + if err != nil { + machineId = "" + } else if matchedMachines.Contains(machineId) { + // Unit is on a matching machine. + matchedSvcs.Add(unit.ServiceName()) + continue + } + // Always start examining at the top-level. This // prevents a situation where we filter a subordinate // before we discover its parent is a match. @@ -142,23 +150,17 @@ delete(unitMap, name) continue } - - // Track which services are utilized by the units so - // that we can be sure to not filter that service out. - unfilteredSvcs.Add(unit.ServiceName()) - machineId, err := unit.AssignedMachineId() - if err != nil { - return noStatus, err + matchedSvcs.Add(unit.ServiceName()) + if machineId != "" { + matchedMachines.Add(machineId) } - unfilteredMachines.Add(machineId) } } // Filter services for svcName, svc := range context.services { - if unfilteredSvcs.Contains(svcName) { - // Don't filter services which have units that were - // not filtered. + if matchedSvcs.Contains(svcName) { + // There are matched units for this service. continue } else if matches, err := predicate(svc); err != nil { return noStatus, errors.Annotate(err, "could not filter services") @@ -169,7 +171,7 @@ // Filter machines for status, machineList := range context.machines { - filteredList := make([]*state.Machine, 0, len(machineList)) + matched := make([]*state.Machine, 0, len(machineList)) for _, m := range machineList { machineContainers, err := m.Containers() if err != nil { @@ -177,19 +179,15 @@ } machineContainersSet := set.NewStrings(machineContainers...) - if unfilteredMachines.Contains(m.Id()) || !unfilteredMachines.Intersection(machineContainersSet).IsEmpty() { - // Don't filter machines which have an unfiltered - // unit running on them. - logger.Debugf("mid %s is hosting something.", m.Id()) - filteredList = append(filteredList, m) + if matchedMachines.Contains(m.Id()) || !matchedMachines.Intersection(machineContainersSet).IsEmpty() { + // The machine is matched directly, or contains a unit + // or container that matches. + logger.Tracef("machine %s is hosting something.", m.Id()) + matched = append(matched, m) continue - } else if matches, err := predicate(m); err != nil { - return noStatus, errors.Annotate(err, "could not filter machines") - } else if matches { - filteredList = append(filteredList, m) } } - context.machines[status] = filteredList + context.machines[status] = matched } } @@ -197,9 +195,11 @@ if err != nil { return noStatus, errors.Annotate(err, "cannot determine if there is a new tools version available") } - + if err != nil { + return noStatus, errors.Annotate(err, "cannot determine mongo information") + } return params.FullStatus{ - EnvironmentName: cfg.Name(), + ModelName: cfg.Name(), AvailableVersion: newToolsVersion, Machines: processMachines(context.machines), Services: context.processServices(), @@ -211,14 +211,14 @@ // newToolsVersionAvailable will return a string representing a tools // version only if the latest check is newer than current tools. func (c *Client) newToolsVersionAvailable() (string, error) { - env, err := c.api.state.Environment() + env, err := c.api.stateAccessor.Model() if err != nil { - return "", errors.Annotate(err, "cannot get environment") + return "", errors.Annotate(err, "cannot get model") } latestVersion := env.LatestToolsVersion() - envConfig, err := c.api.state.EnvironConfig() + envConfig, err := c.api.stateAccessor.ModelConfig() if err != nil { return "", errors.Annotate(err, "cannot obtain current environ config") } @@ -265,7 +265,7 @@ // machine and machines[1..n] are any containers (including nested ones). // // If machineIds is non-nil, only machines whose IDs are in the set are returned. -func fetchMachines(st *state.State, machineIds set.Strings) (map[string][]*state.Machine, error) { +func fetchMachines(st stateInterface, machineIds set.Strings) (map[string][]*state.Machine, error) { v := make(map[string][]*state.Machine) machines, err := st.AllMachines() if err != nil { @@ -296,7 +296,7 @@ // fetchAllServicesAndUnits returns a map from service name to service, // a map from service name to unit name to unit, and a map from base charm URL to latest URL. func fetchAllServicesAndUnits( - st *state.State, + st stateInterface, matchAny bool, ) (map[string]*state.Service, map[string]map[string]*state.Unit, map[charm.URL]string, error) { @@ -368,7 +368,7 @@ // to have the relations for each service. Reading them once here // avoids the repeated DB hits to retrieve the relations for each // service that used to happen in processServiceRelations(). -func fetchRelations(st *state.State) (map[string][]*state.Relation, error) { +func fetchRelations(st stateInterface) (map[string][]*state.Relation, error) { relations, err := st.AllRelations() if err != nil { return nil, err @@ -383,7 +383,7 @@ } // fetchNetworks returns a map from network name to network. -func fetchNetworks(st *state.State) (map[string]*state.Network, error) { +func fetchNetworks(st stateInterface) (map[string]*state.Network, error) { networks, err := st.AllNetworks() if err != nil { return nil, err @@ -416,7 +416,8 @@ } // Element 0 is assumed to be the top-level machine. - hostStatus := makeMachineStatus(machines[0]) + tlMachine := machines[0] + hostStatus := makeMachineStatus(tlMachine) machinesMap[id] = hostStatus cache[id] = hostStatus @@ -436,16 +437,9 @@ func makeMachineStatus(machine *state.Machine) (status params.MachineStatus) { status.Id = machine.Id() - agentStatus, compatStatus := processMachine(machine) + agentStatus := processMachine(machine) status.Agent = agentStatus - // These legacy status values will be deprecated for Juju 2.0. - status.AgentState = compatStatus.Status - status.AgentStateInfo = compatStatus.Info - status.AgentVersion = compatStatus.Version - status.Life = compatStatus.Life - status.Err = compatStatus.Err - status.Series = machine.Series() status.Jobs = paramsJobsFromJobs(machine.Jobs()) status.WantsVote = machine.WantsVote() @@ -462,7 +456,7 @@ // Usually this indicates that no addresses have been set on the // machine yet. addr = network.Address{} - logger.Warningf("error fetching public address: %q", err) + logger.Debugf("error fetching public address: %q", err) } status.DNSName = addr.Value } else { @@ -471,11 +465,6 @@ } else { status.InstanceId = "error" } - // There's no point in reporting a pending agent state - // if the machine hasn't been provisioned. This - // also makes unprovisioned machines visually distinct - // in the output. - status.AgentState = "" } hc, err := machine.HardwareCharacteristics() if err != nil { @@ -674,7 +663,7 @@ // Usually this indicates that no addresses have been set on the // machine yet. addr = network.Address{} - logger.Warningf("error fetching public address: %v", err) + logger.Debugf("error fetching public address: %v", err) } result.PublicAddress = addr.Value unitPorts, _ := unit.OpenedPorts() @@ -782,7 +771,7 @@ // processMachine retrieves version and status information for the given machine. // It also returns deprecated legacy status information. -func processMachine(machine *state.Machine) (out params.AgentStatus, compat params.AgentStatus) { +func processMachine(machine *state.Machine) (out params.AgentStatus) { out.Life = processLife(machine) if t, err := machine.AgentTools(); err == nil { @@ -790,7 +779,6 @@ } populateStatusFromGetter(&out, machine) - compat = out if out.Err != nil { return @@ -800,34 +788,6 @@ // in enquiring about the agent liveness. return } - agentAlive, err := machine.AgentPresence() - if err != nil { - return - } - - if machine.Life() != state.Dead && !agentAlive { - // The agent *should* be alive but is not. Set status to - // StatusDown and munge Info to indicate the previous status and - // info. This is unfortunately making presentation decisions - // on behalf of the client (crappy). - // - // This is munging is only being left in place for - // compatibility with older clients. TODO: At some point we - // should change this so that Info left alone. API version may - // help here. - // - // Better yet, Status shouldn't be changed here in the API at - // all! Status changes should only happen in State. One - // problem caused by this is that this status change won't be - // seen by clients using a watcher because it didn't happen in - // State. - if out.Info != "" { - compat.Info = fmt.Sprintf("(%s: %s)", out.Status, out.Info) - } else { - compat.Info = fmt.Sprintf("(%s)", out.Status) - } - compat.Status = params.StatusDown - } return } @@ -848,10 +808,8 @@ } func canBeLost(status *params.UnitStatus) bool { - // Pending and Installing are deprecated. - // Need to still check pending for existing deployments. switch status.UnitAgent.Status { - case params.StatusPending, params.StatusInstalling, params.StatusAllocating: + case params.StatusAllocating: return false case params.StatusExecuting: return status.UnitAgent.Info != operation.RunningHookMessage(string(hooks.Install)) === modified file 'src/github.com/juju/juju/apiserver/client/status_test.go' --- src/github.com/juju/juju/apiserver/client/status_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/client/status_test.go 2016-03-22 15:18:22 +0000 @@ -34,7 +34,7 @@ client := s.APIState.Client() status, err := client.Status(nil) c.Assert(err, jc.ErrorIsNil) - c.Check(status.EnvironmentName, gc.Equals, "dummyenv") + c.Check(status.ModelName, gc.Equals, "dummymodel") c.Check(status.Services, gc.HasLen, 0) c.Check(status.Machines, gc.HasLen, 1) c.Check(status.Networks, gc.HasLen, 0) === added file 'src/github.com/juju/juju/apiserver/client/statushistory_test.go' --- src/github.com/juju/juju/apiserver/client/statushistory_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/client/statushistory_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,218 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package client_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/client" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/state" + "github.com/juju/juju/testing" +) + +var _ = gc.Suite(&statusHistoryTestSuite{}) + +type statusHistoryTestSuite struct { + testing.BaseSuite + st *mockState + api *client.Client +} + +func (s *statusHistoryTestSuite) SetUpTest(c *gc.C) { + s.st = &mockState{} + client.PatchState(s, s.st) + tag := names.NewUserTag("user") + authorizer := &apiservertesting.FakeAuthorizer{Tag: tag} + var err error + s.api, err = client.NewClient(nil, nil, authorizer) + c.Assert(err, jc.ErrorIsNil) +} + +func statusInfoWithDates(si []state.StatusInfo) []state.StatusInfo { + // Add timestamps to input status info records. + // Timestamps will be in descending order so that we can + // check that sorting has occurred and the output should + // be in ascending order. + result := make([]state.StatusInfo, len(si)) + for i, s := range si { + t := time.Unix(int64(1000-i), 0) + s.Since = &t + result[i] = s + } + return result +} + +func reverseStatusInfo(si []state.StatusInfo) []state.StatusInfo { + result := make([]state.StatusInfo, len(si)) + for i, s := range si { + result[len(si)-i-1] = s + } + return result +} + +func checkStatusInfo(c *gc.C, obtained []params.AgentStatus, expected []state.StatusInfo) { + c.Assert(len(obtained), gc.Equals, len(expected)) + lastTimestamp := int64(0) + for i, obtainedInfo := range obtained { + thisTimeStamp := obtainedInfo.Since.Unix() + c.Assert(thisTimeStamp >= lastTimestamp, jc.IsTrue) + lastTimestamp = thisTimeStamp + obtainedInfo.Since = nil + c.Assert(obtainedInfo.Status, gc.Equals, params.Status(expected[i].Status)) + c.Assert(obtainedInfo.Info, gc.Equals, expected[i].Message) + } +} + +func (s *statusHistoryTestSuite) TestSizeRequired(c *gc.C) { + _, err := s.api.UnitStatusHistory(params.StatusHistory{ + Name: "unit", + Kind: params.KindCombined, + Size: 0, + }) + c.Assert(err, gc.ErrorMatches, "invalid history size: 0") +} + +func (s *statusHistoryTestSuite) TestStatusHistoryUnitOnly(c *gc.C) { + s.st.unitHistory = statusInfoWithDates([]state.StatusInfo{ + { + Status: state.StatusMaintenance, + Message: "working", + }, + { + Status: state.StatusActive, + Message: "running", + }, + }) + s.st.agentHistory = statusInfoWithDates([]state.StatusInfo{ + { + Status: state.StatusIdle, + }, + }) + h, err := s.api.UnitStatusHistory(params.StatusHistory{ + Name: "unit/0", + Kind: params.KindWorkload, + Size: 10, + }) + c.Assert(err, jc.ErrorIsNil) + checkStatusInfo(c, h.Statuses, reverseStatusInfo(s.st.unitHistory)) +} + +func (s *statusHistoryTestSuite) TestStatusHistoryAgentOnly(c *gc.C) { + s.st.unitHistory = statusInfoWithDates([]state.StatusInfo{ + { + Status: state.StatusMaintenance, + Message: "working", + }, + { + Status: state.StatusActive, + Message: "running", + }, + }) + s.st.agentHistory = statusInfoWithDates([]state.StatusInfo{ + { + Status: state.StatusExecuting, + }, + { + Status: state.StatusIdle, + }, + }) + h, err := s.api.UnitStatusHistory(params.StatusHistory{ + Name: "unit/0", + Kind: params.KindAgent, + Size: 10, + }) + c.Assert(err, jc.ErrorIsNil) + checkStatusInfo(c, h.Statuses, reverseStatusInfo(s.st.agentHistory)) +} + +func (s *statusHistoryTestSuite) TestStatusHistoryCombined(c *gc.C) { + s.st.unitHistory = statusInfoWithDates([]state.StatusInfo{ + { + Status: state.StatusMaintenance, + Message: "working", + }, + { + Status: state.StatusActive, + Message: "running", + }, + { + Status: state.StatusBlocked, + Message: "waiting", + }, + }) + s.st.agentHistory = statusInfoWithDates([]state.StatusInfo{ + { + Status: state.StatusExecuting, + }, + { + Status: state.StatusIdle, + }, + }) + h, err := s.api.UnitStatusHistory(params.StatusHistory{ + Name: "unit/0", + Kind: params.KindCombined, + Size: 3, + }) + c.Assert(err, jc.ErrorIsNil) + expected := []state.StatusInfo{ + s.st.agentHistory[1], + s.st.unitHistory[0], + s.st.agentHistory[0], + } + checkStatusInfo(c, h.Statuses, expected) +} + +type mockState struct { + client.StateInterface + unitHistory []state.StatusInfo + agentHistory []state.StatusInfo +} + +func (m *mockState) ModelUUID() string { + return "uuid" +} + +func (m *mockState) Unit(name string) (client.Unit, error) { + if name != "unit/0" { + return nil, errors.NotFoundf("%v", name) + } + return &mockUnit{ + status: m.unitHistory, + agent: &mockUnitAgent{m.agentHistory}, + }, nil +} + +type mockUnit struct { + status statuses + agent *mockUnitAgent + client.Unit +} + +func (m *mockUnit) StatusHistory(size int) ([]state.StatusInfo, error) { + return m.status.StatusHistory(size) +} + +func (m *mockUnit) AgentHistory() state.StatusHistoryGetter { + return m.agent +} + +type mockUnitAgent struct { + statuses +} + +type statuses []state.StatusInfo + +func (s statuses) StatusHistory(size int) ([]state.StatusInfo, error) { + if size > len(s) { + size = len(s) + } + return s[:size], nil +} === added file 'src/github.com/juju/juju/apiserver/client_auth_root.go' --- src/github.com/juju/juju/apiserver/client_auth_root.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/client_auth_root.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,56 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/rpc" + "github.com/juju/juju/rpc/rpcreflect" + "github.com/juju/juju/state" +) + +// clientAuthRoot restricts API calls for users of a model. Initially the +// authorisation checks are only for read only access to the model, but in the +// near future, full ACL support is desirable. +type clientAuthRoot struct { + finder rpc.MethodFinder + user *state.ModelUser +} + +// newClientAuthRoot returns a new restrictedRoot. +func newClientAuthRoot(finder rpc.MethodFinder, user *state.ModelUser) *clientAuthRoot { + return &clientAuthRoot{finder, user} +} + +// FindMethod returns a not supported error if the rootName is not one of the +// facades available at the server root when there is no active model. +func (r *clientAuthRoot) FindMethod(rootName string, version int, methodName string) (rpcreflect.MethodCaller, error) { + // The lookup of the name is done first to return a not found error if the + // user is looking for a method that we just don't have. + caller, err := r.finder.FindMethod(rootName, version, methodName) + if err != nil { + return nil, err + } + if r.user.ReadOnly() { + canCall := isCallAllowableByReadOnlyUser(rootName, methodName) || + isCallReadOnly(rootName, methodName) + if !canCall { + return nil, errors.Trace(common.ErrPerm) + } + } + + return caller, nil +} + +// isCallAllowableByReadOnlyUser returns whether or not the method on the facade +// can be called by a read only user. +func isCallAllowableByReadOnlyUser(facade, _ /*method*/ string) bool { + // At this stage, any facade that is part of the restricted root (those + // that are accessable outside of models) are OK because the user would + // have access to those facades if they went through the controller API + // endpoint rather than a model oriented one. + return restrictedRootNames.Contains(facade) +} === added file 'src/github.com/juju/juju/apiserver/client_auth_root_test.go' --- src/github.com/juju/juju/apiserver/client_auth_root_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/client_auth_root_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,98 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver + +import ( + "reflect" + + "github.com/juju/errors" + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/testing/factory" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/rpc/rpcreflect" + "github.com/juju/juju/state/testing" +) + +type clientAuthRootSuite struct { + testing.StateSuite +} + +var _ = gc.Suite(&clientAuthRootSuite{}) + +func (*clientAuthRootSuite) AssertCallGood(c *gc.C, client *clientAuthRoot, rootName string, version int, methodName string) { + caller, err := client.FindMethod(rootName, version, methodName) + c.Check(err, jc.ErrorIsNil) + c.Assert(caller, gc.NotNil) +} + +func (*clientAuthRootSuite) AssertCallNotImplemented(c *gc.C, client *clientAuthRoot, rootName string, version int, methodName string) { + caller, err := client.FindMethod(rootName, version, methodName) + c.Check(errors.Cause(err), jc.Satisfies, isCallNotImplementedError) + c.Assert(caller, gc.IsNil) +} + +func (s *clientAuthRootSuite) AssertCallErrPerm(c *gc.C, client *clientAuthRoot, rootName string, version int, methodName string) { + caller, err := client.FindMethod(rootName, version, methodName) + c.Check(errors.Cause(err), gc.Equals, common.ErrPerm) + c.Assert(caller, gc.IsNil) +} + +func (s *clientAuthRootSuite) TestNormalUser(c *gc.C) { + envUser := s.Factory.MakeModelUser(c, nil) + client := newClientAuthRoot(&fakeFinder{}, envUser) + s.AssertCallGood(c, client, "Service", 3, "Deploy") + s.AssertCallGood(c, client, "UserManager", 1, "UserInfo") + s.AssertCallNotImplemented(c, client, "Client", 1, "Unknown") + s.AssertCallNotImplemented(c, client, "Unknown", 1, "Method") +} + +func (s *clientAuthRootSuite) TestReadOnlyUser(c *gc.C) { + envUser := s.Factory.MakeModelUser(c, &factory.ModelUserParams{ReadOnly: true}) + client := newClientAuthRoot(&fakeFinder{}, envUser) + // deploys are bad + s.AssertCallErrPerm(c, client, "Service", 3, "Deploy") + // read only commands are fine + s.AssertCallGood(c, client, "Client", 1, "FullStatus") + // calls on the restricted root is also fine + s.AssertCallGood(c, client, "UserManager", 1, "AddUser") + s.AssertCallNotImplemented(c, client, "Client", 1, "Unknown") + s.AssertCallNotImplemented(c, client, "Unknown", 1, "Method") +} + +func isCallNotImplementedError(err error) bool { + _, ok := err.(*rpcreflect.CallNotImplementedError) + return ok +} + +type fakeFinder struct{} + +// FindMethod is the only thing we need to implement rpc.MethodFinder. +func (f *fakeFinder) FindMethod(rootName string, version int, methodName string) (rpcreflect.MethodCaller, error) { + _, _, err := lookupMethod(rootName, version, methodName) + if err != nil { + return nil, err + } + // Just return a valid caller. + return &fakeCaller{}, nil +} + +// fakeCaller implements a rpcreflect.MethodCaller. We don't care what the +// actual reflect.Types or values actually are, the caller just has to be +// valid. +type fakeCaller struct{} + +func (*fakeCaller) ParamsType() reflect.Type { + return reflect.TypeOf("") +} + +func (*fakeCaller) ResultType() reflect.Type { + return reflect.TypeOf("") +} + +func (*fakeCaller) Call(_ /*objId*/ string, _ /*arg*/ reflect.Value) (reflect.Value, error) { + return reflect.ValueOf(""), nil +} === modified file 'src/github.com/juju/juju/apiserver/common/addresses.go' --- src/github.com/juju/juju/apiserver/common/addresses.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/common/addresses.go 2016-03-22 15:18:22 +0000 @@ -11,12 +11,12 @@ ) // AddressAndCertGetter can be used to find out -// state server addresses and the CA public certificate. +// controller addresses and the CA public certificate. type AddressAndCertGetter interface { Addresses() ([]string, error) APIAddressesFromMachines() ([]string, error) CACert() string - EnvironUUID() string + ModelUUID() string APIHostPorts() ([][]network.HostPort, error) WatchAPIHostPorts() state.NotifyWatcher } @@ -83,10 +83,10 @@ } } -// EnvironUUID returns the environment UUID to connect to the environment +// ModelUUID returns the model UUID to connect to the environment // that the current connection is for. -func (a *APIAddresser) EnvironUUID() params.StringResult { - return params.StringResult{Result: a.getter.EnvironUUID()} +func (a *APIAddresser) ModelUUID() params.StringResult { + return params.StringResult{Result: a.getter.ModelUUID()} } // StateAddresser implements a common set of methods for getting state === modified file 'src/github.com/juju/juju/apiserver/common/addresses_test.go' --- src/github.com/juju/juju/apiserver/common/addresses_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/common/addresses_test.go 2016-03-22 15:18:22 +0000 @@ -52,7 +52,7 @@ } func (s *apiAddresserSuite) TestEnvironUUID(c *gc.C) { - result := s.addresser.EnvironUUID() + result := s.addresser.ModelUUID() c.Assert(string(result.Result), gc.Equals, "the environ uuid") } @@ -72,7 +72,7 @@ return "a cert" } -func (fakeAddresses) EnvironUUID() string { +func (fakeAddresses) ModelUUID() string { return "the environ uuid" } === modified file 'src/github.com/juju/juju/apiserver/common/block.go' --- src/github.com/juju/juju/apiserver/common/block.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/common/block.go 2016-03-22 15:18:22 +0000 @@ -63,7 +63,7 @@ return errors.Trace(err) } if isEnabled { - return ErrOperationBlocked(aBlock.Message()) + return OperationBlockedError(aBlock.Message()) } return nil } === modified file 'src/github.com/juju/juju/apiserver/common/block_test.go' --- src/github.com/juju/juju/apiserver/common/block_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/block_test.go 2016-03-22 15:18:22 +0000 @@ -23,16 +23,16 @@ func (m mockBlock) Id() string { return "" } -func (m mockBlock) Tag() (names.Tag, error) { return names.NewEnvironTag("mocktesting"), nil } +func (m mockBlock) Tag() (names.Tag, error) { return names.NewModelTag("mocktesting"), nil } func (m mockBlock) Type() state.BlockType { return m.t } func (m mockBlock) Message() string { return m.m } -func (m mockBlock) EnvUUID() string { return "" } +func (m mockBlock) ModelUUID() string { return "" } type blockCheckerSuite struct { - testing.FakeJujuHomeSuite + testing.FakeJujuXDGDataHomeSuite aBlock state.Block destroy, remove, change state.Block @@ -42,7 +42,7 @@ var _ = gc.Suite(&blockCheckerSuite{}) func (s *blockCheckerSuite) SetUpTest(c *gc.C) { - s.FakeJujuHomeSuite.SetUpTest(c) + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) s.destroy = mockBlock{t: state.DestroyBlock, m: "Mock BLOCK testing: DESTROY"} s.remove = mockBlock{t: state.RemoveBlock, m: "Mock BLOCK testing: REMOVE"} s.change = mockBlock{t: state.ChangeBlock, m: "Mock BLOCK testing: CHANGE"} === removed file 'src/github.com/juju/juju/apiserver/common/blockdevices.go' --- src/github.com/juju/juju/apiserver/common/blockdevices.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/blockdevices.go 1970-01-01 00:00:00 +0000 @@ -1,61 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common - -import ( - "github.com/juju/juju/state" - "github.com/juju/juju/storage" -) - -// BlockDeviceFromState translates a state.BlockDeviceInfo to a -// storage.BlockDevice. -func BlockDeviceFromState(in state.BlockDeviceInfo) storage.BlockDevice { - return storage.BlockDevice{ - in.DeviceName, - in.DeviceLinks, - in.Label, - in.UUID, - in.HardwareId, - in.BusAddress, - in.Size, - in.FilesystemType, - in.InUse, - in.MountPoint, - } -} - -// MatchingBlockDevice finds the block device that matches the -// provided volume info and volume attachment info. -func MatchingBlockDevice( - blockDevices []state.BlockDeviceInfo, - volumeInfo state.VolumeInfo, - attachmentInfo state.VolumeAttachmentInfo, -) (*state.BlockDeviceInfo, bool) { - for _, dev := range blockDevices { - if volumeInfo.HardwareId != "" { - if volumeInfo.HardwareId == dev.HardwareId { - return &dev, true - } - continue - } - if attachmentInfo.BusAddress != "" { - if attachmentInfo.BusAddress == dev.BusAddress { - return &dev, true - } - continue - } - if attachmentInfo.DeviceLink != "" { - for _, link := range dev.DeviceLinks { - if attachmentInfo.DeviceLink == link { - return &dev, true - } - } - continue - } - if attachmentInfo.DeviceName == dev.DeviceName { - return &dev, true - } - } - return nil, false -} === modified file 'src/github.com/juju/juju/apiserver/common/common_test.go' --- src/github.com/juju/juju/apiserver/common/common_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/common_test.go 2016-03-22 15:18:22 +0000 @@ -129,7 +129,7 @@ nil, // invalid tag names.NewActionTag(uuid.String()), names.NewCharmTag("cs:precise/missing"), - names.NewEnvironTag(uuid.String()), + names.NewModelTag(uuid.String()), names.NewFilesystemTag("20/20"), names.NewLocalUserTag("user"), names.NewMachineTag("42"), === removed file 'src/github.com/juju/juju/apiserver/common/environdestroy.go' --- src/github.com/juju/juju/apiserver/common/environdestroy.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/environdestroy.go 1970-01-01 00:00:00 +0000 @@ -1,116 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common - -import ( - "github.com/juju/errors" - "github.com/juju/names" - - "github.com/juju/juju/apiserver/metricsender" - "github.com/juju/juju/environs" - "github.com/juju/juju/instance" - "github.com/juju/juju/state" -) - -var sendMetrics = func(st *state.State) error { - err := metricsender.SendMetrics(st, metricsender.DefaultMetricSender(), metricsender.DefaultMaxBatchesPerSend()) - return errors.Trace(err) -} - -// DestroyEnvironment destroys all services and non-manager machine -// instances in the specified environment. This function assumes that all -// necessary authentication checks have been done. -func DestroyEnvironment(st *state.State, environTag names.EnvironTag) error { - var err error - if environTag != st.EnvironTag() { - if st, err = st.ForEnviron(environTag); err != nil { - return errors.Trace(err) - } - defer st.Close() - } - - check := NewBlockChecker(st) - if err = check.DestroyAllowed(); err != nil { - return errors.Trace(err) - } - - env, err := st.Environment() - if err != nil { - return errors.Trace(err) - } - - if err = env.Destroy(); err != nil { - return errors.Trace(err) - } - - machines, err := st.AllMachines() - if err != nil { - return errors.Trace(err) - } - - err = sendMetrics(st) - if err != nil { - logger.Warningf("failed to send leftover metrics: %v", err) - } - - // We must destroy instances server-side to support JES (Juju Environment - // Server), as there's no CLI to fall back on. In that case, we only ever - // destroy non-state machines; we leave destroying state servers in non- - // hosted environments to the CLI, as otherwise the API server may get cut - // off. - if err := destroyNonManagerMachines(st, machines); err != nil { - return errors.Trace(err) - } - - // If this is not the state server environment, remove all documents from - // state associated with the environment. - if env.EnvironTag() != env.ServerTag() { - return errors.Trace(st.RemoveAllEnvironDocs()) - } - - // Return to the caller. If it's the CLI, it will finish up - // by calling the provider's Destroy method, which will - // destroy the state servers, any straggler instances, and - // other provider-specific resources. - return nil -} - -// destroyNonManagerMachines directly destroys all non-manager, non-manual -// machine instances. -func destroyNonManagerMachines(st *state.State, machines []*state.Machine) error { - var ids []instance.Id - for _, m := range machines { - if m.IsManager() { - continue - } - if _, isContainer := m.ParentId(); isContainer { - continue - } - manual, err := m.IsManual() - if err != nil { - return err - } else if manual { - continue - } - // There is a possible race here if a machine is being - // provisioned, but hasn't yet come up. - id, err := m.InstanceId() - if err != nil { - continue - } - ids = append(ids, id) - } - if len(ids) == 0 { - return nil - } - envcfg, err := st.EnvironConfig() - if err != nil { - return err - } - env, err := environs.New(envcfg) - if err != nil { - return err - } - return env.StopInstances(ids...) -} === removed file 'src/github.com/juju/juju/apiserver/common/environdestroy_test.go' --- src/github.com/juju/juju/apiserver/common/environdestroy_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/environdestroy_test.go 1970-01-01 00:00:00 +0000 @@ -1,312 +0,0 @@ -// Copyright 2012-2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common_test - -import ( - "fmt" - - "github.com/juju/errors" - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/client" - "github.com/juju/juju/apiserver/common" - commontesting "github.com/juju/juju/apiserver/common/testing" - apiservertesting "github.com/juju/juju/apiserver/testing" - "github.com/juju/juju/environs" - "github.com/juju/juju/instance" - "github.com/juju/juju/juju/testing" - "github.com/juju/juju/provider/dummy" - "github.com/juju/juju/state" - jujutesting "github.com/juju/juju/testing" - "github.com/juju/juju/testing/factory" - jtesting "github.com/juju/testing" -) - -type destroyEnvironmentSuite struct { - testing.JujuConnSuite - commontesting.BlockHelper - metricSender *testMetricSender -} - -var _ = gc.Suite(&destroyEnvironmentSuite{}) - -func (s *destroyEnvironmentSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - s.BlockHelper = commontesting.NewBlockHelper(s.APIState) - s.AddCleanup(func(*gc.C) { s.BlockHelper.Close() }) - - s.metricSender = &testMetricSender{} - s.PatchValue(common.SendMetrics, s.metricSender.SendMetrics) -} - -// setUpManual adds "manually provisioned" machines to state: -// one manager machine, and one non-manager. -func (s *destroyEnvironmentSuite) setUpManual(c *gc.C) (m0, m1 *state.Machine) { - m0, err := s.State.AddMachine("precise", state.JobManageEnviron) - c.Assert(err, jc.ErrorIsNil) - err = m0.SetProvisioned(instance.Id("manual:0"), "manual:0:fake_nonce", nil) - c.Assert(err, jc.ErrorIsNil) - m1, err = s.State.AddMachine("precise", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) - err = m1.SetProvisioned(instance.Id("manual:1"), "manual:1:fake_nonce", nil) - c.Assert(err, jc.ErrorIsNil) - return m0, m1 -} - -// setUpInstances adds machines to state backed by instances: -// one manager machine, one non-manager, and a container in the -// non-manager. -func (s *destroyEnvironmentSuite) setUpInstances(c *gc.C) (m0, m1, m2 *state.Machine) { - m0, err := s.State.AddMachine("precise", state.JobManageEnviron) - c.Assert(err, jc.ErrorIsNil) - inst, _ := testing.AssertStartInstance(c, s.Environ, m0.Id()) - err = m0.SetProvisioned(inst.Id(), "fake_nonce", nil) - c.Assert(err, jc.ErrorIsNil) - - m1, err = s.State.AddMachine("precise", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) - inst, _ = testing.AssertStartInstance(c, s.Environ, m1.Id()) - err = m1.SetProvisioned(inst.Id(), "fake_nonce", nil) - c.Assert(err, jc.ErrorIsNil) - - m2, err = s.State.AddMachineInsideMachine(state.MachineTemplate{ - Series: "precise", - Jobs: []state.MachineJob{state.JobHostUnits}, - }, m1.Id(), instance.LXC) - c.Assert(err, jc.ErrorIsNil) - err = m2.SetProvisioned("container0", "fake_nonce", nil) - c.Assert(err, jc.ErrorIsNil) - - return m0, m1, m2 -} - -func (s *destroyEnvironmentSuite) TestDestroyEnvironmentManual(c *gc.C) { - _, nonManager := s.setUpManual(c) - - // If there are any non-manager manual machines in state, DestroyEnvironment will - // error. It will not set the Dying flag on the environment. - err := common.DestroyEnvironment(s.State, s.State.EnvironTag()) - c.Assert(err, gc.ErrorMatches, fmt.Sprintf("failed to destroy environment: manually provisioned machines must first be destroyed with `juju destroy-machine %s`", nonManager.Id())) - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - c.Assert(env.Life(), gc.Equals, state.Alive) - - // If we remove the non-manager machine, it should pass. - // Manager machines will remain. - err = nonManager.EnsureDead() - c.Assert(err, jc.ErrorIsNil) - err = nonManager.Remove() - c.Assert(err, jc.ErrorIsNil) - err = common.DestroyEnvironment(s.State, s.State.EnvironTag()) - c.Assert(err, jc.ErrorIsNil) - err = env.Refresh() - c.Assert(err, jc.ErrorIsNil) - c.Assert(env.Life(), gc.Equals, state.Dying) - - s.metricSender.CheckCalls(c, []jtesting.StubCall{{FuncName: "SendMetrics"}}) -} - -func (s *destroyEnvironmentSuite) TestDestroyEnvironment(c *gc.C) { - manager, nonManager, _ := s.setUpInstances(c) - managerId, _ := manager.InstanceId() - nonManagerId, _ := nonManager.InstanceId() - - instances, err := s.Environ.Instances([]instance.Id{managerId, nonManagerId}) - c.Assert(err, jc.ErrorIsNil) - for _, inst := range instances { - c.Assert(inst, gc.NotNil) - } - - services, err := s.State.AllServices() - c.Assert(err, jc.ErrorIsNil) - - err = common.DestroyEnvironment(s.State, s.State.EnvironTag()) - c.Assert(err, jc.ErrorIsNil) - - s.metricSender.CheckCalls(c, []jtesting.StubCall{{FuncName: "SendMetrics"}}) - - // After DestroyEnvironment returns, we should have: - // - all non-manager instances stopped - instances, err = s.Environ.Instances([]instance.Id{managerId, nonManagerId}) - c.Assert(err, gc.Equals, environs.ErrPartialInstances) - c.Assert(instances[0], gc.NotNil) - c.Assert(instances[1], jc.ErrorIsNil) - // - all services in state are Dying or Dead (or removed altogether), - // after running the state Cleanups. - needsCleanup, err := s.State.NeedsCleanup() - c.Assert(err, jc.ErrorIsNil) - c.Assert(needsCleanup, jc.IsTrue) - err = s.State.Cleanup() - c.Assert(err, jc.ErrorIsNil) - for _, s := range services { - err = s.Refresh() - if err != nil { - c.Assert(err, jc.Satisfies, errors.IsNotFound) - } else { - c.Assert(s.Life(), gc.Not(gc.Equals), state.Alive) - } - } - // - environment is Dying - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - c.Assert(env.Life(), gc.Equals, state.Dying) -} - -func (s *destroyEnvironmentSuite) TestDestroyEnvironmentWithContainers(c *gc.C) { - ops := make(chan dummy.Operation, 500) - dummy.Listen(ops) - - _, nonManager, _ := s.setUpInstances(c) - nonManagerId, _ := nonManager.InstanceId() - - err := common.DestroyEnvironment(s.State, s.State.EnvironTag()) - c.Assert(err, jc.ErrorIsNil) - for op := range ops { - if op, ok := op.(dummy.OpStopInstances); ok { - c.Assert(op.Ids, jc.SameContents, []instance.Id{nonManagerId}) - break - } - } - - s.metricSender.CheckCalls(c, []jtesting.StubCall{{FuncName: "SendMetrics"}}) -} - -func (s *destroyEnvironmentSuite) TestBlockDestroyDestroyEnvironment(c *gc.C) { - // Setup environment - s.setUpInstances(c) - s.BlockDestroyEnvironment(c, "TestBlockDestroyDestroyEnvironment") - err := common.DestroyEnvironment(s.State, s.State.EnvironTag()) - s.AssertBlocked(c, err, "TestBlockDestroyDestroyEnvironment") - s.metricSender.CheckCalls(c, []jtesting.StubCall{}) -} - -func (s *destroyEnvironmentSuite) TestBlockRemoveDestroyEnvironment(c *gc.C) { - // Setup environment - s.setUpInstances(c) - s.BlockRemoveObject(c, "TestBlockRemoveDestroyEnvironment") - err := common.DestroyEnvironment(s.State, s.State.EnvironTag()) - s.AssertBlocked(c, err, "TestBlockRemoveDestroyEnvironment") - s.metricSender.CheckCalls(c, []jtesting.StubCall{}) -} - -func (s *destroyEnvironmentSuite) TestBlockChangesDestroyEnvironment(c *gc.C) { - // Setup environment - s.setUpInstances(c) - // lock environment: can't destroy locked environment - s.BlockAllChanges(c, "TestBlockChangesDestroyEnvironment") - err := common.DestroyEnvironment(s.State, s.State.EnvironTag()) - s.AssertBlocked(c, err, "TestBlockChangesDestroyEnvironment") - s.metricSender.CheckCalls(c, []jtesting.StubCall{}) -} - -type destroyTwoEnvironmentsSuite struct { - testing.JujuConnSuite - otherState *state.State - otherEnvOwner names.UserTag - otherEnvClient *client.Client - metricSender *testMetricSender -} - -var _ = gc.Suite(&destroyTwoEnvironmentsSuite{}) - -func (s *destroyTwoEnvironmentsSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - _, err := s.State.AddUser("jess", "jess", "", "test") - c.Assert(err, jc.ErrorIsNil) - s.otherEnvOwner = names.NewUserTag("jess") - s.otherState = factory.NewFactory(s.State).MakeEnvironment(c, &factory.EnvParams{ - Owner: s.otherEnvOwner, - Prepare: true, - ConfigAttrs: jujutesting.Attrs{ - "state-server": false, - }, - }) - s.AddCleanup(func(*gc.C) { s.otherState.Close() }) - - // get the client for the other environment - auth := apiservertesting.FakeAuthorizer{ - Tag: s.otherEnvOwner, - EnvironManager: false, - } - s.otherEnvClient, err = client.NewClient(s.otherState, common.NewResources(), auth) - c.Assert(err, jc.ErrorIsNil) - - s.metricSender = &testMetricSender{} - s.PatchValue(common.SendMetrics, s.metricSender.SendMetrics) -} - -func (s *destroyTwoEnvironmentsSuite) TestCleanupEnvironDocs(c *gc.C) { - otherFactory := factory.NewFactory(s.otherState) - otherFactory.MakeMachine(c, nil) - m := otherFactory.MakeMachine(c, nil) - otherFactory.MakeMachineNested(c, m.Id(), nil) - - err := common.DestroyEnvironment(s.otherState, s.otherState.EnvironTag()) - c.Assert(err, jc.ErrorIsNil) - - _, err = s.otherState.Environment() - c.Assert(errors.IsNotFound(err), jc.IsTrue) - - _, err = s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.otherState.EnsureEnvironmentRemoved(), jc.ErrorIsNil) - s.metricSender.CheckCalls(c, []jtesting.StubCall{{FuncName: "SendMetrics"}}) -} - -func (s *destroyTwoEnvironmentsSuite) TestDifferentStateEnv(c *gc.C) { - otherFactory := factory.NewFactory(s.otherState) - otherFactory.MakeMachine(c, nil) - m := otherFactory.MakeMachine(c, nil) - otherFactory.MakeMachineNested(c, m.Id(), nil) - - // NOTE: pass in the main test State instance, which is 'bound' - // to the state server environment. - err := common.DestroyEnvironment(s.State, s.otherState.EnvironTag()) - c.Assert(err, jc.ErrorIsNil) - - _, err = s.otherState.Environment() - c.Assert(errors.IsNotFound(err), jc.IsTrue) - - _, err = s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.otherState.EnsureEnvironmentRemoved(), jc.ErrorIsNil) - - s.metricSender.CheckCalls(c, []jtesting.StubCall{{FuncName: "SendMetrics"}}) -} - -func (s *destroyTwoEnvironmentsSuite) TestDestroyStateServerAfterNonStateServerIsDestroyed(c *gc.C) { - err := common.DestroyEnvironment(s.State, s.State.EnvironTag()) - c.Assert(err, gc.ErrorMatches, "failed to destroy environment: hosting 1 other environments") - err = common.DestroyEnvironment(s.State, s.otherState.EnvironTag()) - c.Assert(err, jc.ErrorIsNil) - err = common.DestroyEnvironment(s.State, s.State.EnvironTag()) - c.Assert(err, jc.ErrorIsNil) - s.metricSender.CheckCalls(c, []jtesting.StubCall{{FuncName: "SendMetrics"}, {FuncName: "SendMetrics"}}) -} - -func (s *destroyTwoEnvironmentsSuite) TestCanDestroyNonBlockedEnv(c *gc.C) { - bh := commontesting.NewBlockHelper(s.APIState) - defer bh.Close() - - bh.BlockDestroyEnvironment(c, "TestBlockDestroyDestroyEnvironment") - - err := common.DestroyEnvironment(s.State, s.otherState.EnvironTag()) - c.Assert(err, jc.ErrorIsNil) - - err = common.DestroyEnvironment(s.State, s.State.EnvironTag()) - bh.AssertBlocked(c, err, "TestBlockDestroyDestroyEnvironment") - - s.metricSender.CheckCalls(c, []jtesting.StubCall{{FuncName: "SendMetrics"}}) -} - -type testMetricSender struct { - jtesting.Stub -} - -func (t *testMetricSender) SendMetrics(st *state.State) error { - t.AddCall("SendMetrics") - return nil -} === removed file 'src/github.com/juju/juju/apiserver/common/environmachineswatcher.go' --- src/github.com/juju/juju/apiserver/common/environmachineswatcher.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/common/environmachineswatcher.go 1970-01-01 00:00:00 +0000 @@ -1,51 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common - -import ( - "fmt" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/state" - "github.com/juju/juju/state/watcher" -) - -// EnvironMachinesWatcher implements a common WatchEnvironMachines -// method for use by various facades. -type EnvironMachinesWatcher struct { - st state.EnvironMachinesWatcher - resources *Resources - authorizer Authorizer -} - -// NewEnvironMachinesWatcher returns a new EnvironMachinesWatcher. The -// GetAuthFunc will be used on each invocation of WatchUnits to -// determine current permissions. -func NewEnvironMachinesWatcher(st state.EnvironMachinesWatcher, resources *Resources, authorizer Authorizer) *EnvironMachinesWatcher { - return &EnvironMachinesWatcher{ - st: st, - resources: resources, - authorizer: authorizer, - } -} - -// WatchEnvironMachines returns a StringsWatcher that notifies of -// changes to the life cycles of the top level machines in the current -// environment. -func (e *EnvironMachinesWatcher) WatchEnvironMachines() (params.StringsWatchResult, error) { - result := params.StringsWatchResult{} - if !e.authorizer.AuthEnvironManager() { - return result, ErrPerm - } - watch := e.st.WatchEnvironMachines() - // Consume the initial event and forward it to the result. - if changes, ok := <-watch.Changes(); ok { - result.StringsWatcherId = e.resources.Register(watch) - result.Changes = changes - } else { - err := watcher.EnsureErr(watch) - return result, fmt.Errorf("cannot obtain initial environment machines: %v", err) - } - return result, nil -} === removed file 'src/github.com/juju/juju/apiserver/common/environmachineswatcher_test.go' --- src/github.com/juju/juju/apiserver/common/environmachineswatcher_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/common/environmachineswatcher_test.go 1970-01-01 00:00:00 +0000 @@ -1,69 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common_test - -import ( - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - apiservertesting "github.com/juju/juju/apiserver/testing" - "github.com/juju/juju/state" - "github.com/juju/juju/testing" -) - -type environMachinesWatcherSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&environMachinesWatcherSuite{}) - -type fakeEnvironMachinesWatcher struct { - state.EnvironMachinesWatcher - initial []string -} - -func (f *fakeEnvironMachinesWatcher) WatchEnvironMachines() state.StringsWatcher { - changes := make(chan []string, 1) - // Simulate initial event. - changes <- f.initial - return &fakeStringsWatcher{changes} -} - -func (s *environMachinesWatcherSuite) TestWatchEnvironMachines(c *gc.C) { - authorizer := apiservertesting.FakeAuthorizer{ - Tag: names.NewMachineTag("0"), - EnvironManager: true, - } - resources := common.NewResources() - s.AddCleanup(func(_ *gc.C) { resources.StopAll() }) - e := common.NewEnvironMachinesWatcher( - &fakeEnvironMachinesWatcher{initial: []string{"foo"}}, - resources, - authorizer, - ) - result, err := e.WatchEnvironMachines() - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, jc.DeepEquals, params.StringsWatchResult{"1", []string{"foo"}, nil}) - c.Assert(resources.Count(), gc.Equals, 1) -} - -func (s *environMachinesWatcherSuite) TestWatchAuthError(c *gc.C) { - authorizer := apiservertesting.FakeAuthorizer{ - Tag: names.NewMachineTag("1"), - EnvironManager: false, - } - resources := common.NewResources() - s.AddCleanup(func(_ *gc.C) { resources.StopAll() }) - e := common.NewEnvironMachinesWatcher( - &fakeEnvironMachinesWatcher{}, - resources, - authorizer, - ) - _, err := e.WatchEnvironMachines() - c.Assert(err, gc.ErrorMatches, "permission denied") - c.Assert(resources.Count(), gc.Equals, 0) -} === removed file 'src/github.com/juju/juju/apiserver/common/environwatcher.go' --- src/github.com/juju/juju/apiserver/common/environwatcher.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/common/environwatcher.go 1970-01-01 00:00:00 +0000 @@ -1,84 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common - -import ( - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/environs" - "github.com/juju/juju/state" - "github.com/juju/juju/state/watcher" -) - -// EnvironWatcher implements two common methods for use by various -// facades - WatchForEnvironConfigChanges and EnvironConfig. -type EnvironWatcher struct { - st state.EnvironAccessor - resources *Resources - authorizer Authorizer -} - -// NewEnvironWatcher returns a new EnvironWatcher. Active watchers -// will be stored in the provided Resources. The two GetAuthFunc -// callbacks will be used on each invocation of the methods to -// determine current permissions. -// Right now, environment tags are not used, so both created AuthFuncs -// are called with "" for tag, which means "the current environment". -func NewEnvironWatcher(st state.EnvironAccessor, resources *Resources, authorizer Authorizer) *EnvironWatcher { - return &EnvironWatcher{ - st: st, - resources: resources, - authorizer: authorizer, - } -} - -// WatchForEnvironConfigChanges returns a NotifyWatcher that observes -// changes to the environment configuration. -// Note that although the NotifyWatchResult contains an Error field, -// it's not used because we are only returning a single watcher, -// so we use the regular error return. -func (e *EnvironWatcher) WatchForEnvironConfigChanges() (params.NotifyWatchResult, error) { - result := params.NotifyWatchResult{} - watch := e.st.WatchForEnvironConfigChanges() - // Consume the initial event. Technically, API - // calls to Watch 'transmit' the initial event - // in the Watch response. But NotifyWatchers - // have no state to transmit. - if _, ok := <-watch.Changes(); ok { - result.NotifyWatcherId = e.resources.Register(watch) - } else { - return result, watcher.EnsureErr(watch) - } - return result, nil -} - -// EnvironConfig returns the current environment's configuration. -func (e *EnvironWatcher) EnvironConfig() (params.EnvironConfigResult, error) { - result := params.EnvironConfigResult{} - - config, err := e.st.EnvironConfig() - if err != nil { - return result, err - } - allAttrs := config.AllAttrs() - - if !e.authorizer.AuthEnvironManager() { - // Mask out any secrets in the environment configuration - // with values of the same type, so it'll pass validation. - // - // TODO(dimitern) 201309-26 bug #1231384 - // Delete the code below and mark the bug as fixed, - // once it's live tested on MAAS and 1.16 compatibility - // is dropped. - provider, err := environs.Provider(config.Type()) - if err != nil { - return result, err - } - secretAttrs, err := provider.SecretAttrs(config) - for k := range secretAttrs { - allAttrs[k] = "not available" - } - } - result.Config = allAttrs - return result, nil -} === removed file 'src/github.com/juju/juju/apiserver/common/environwatcher_test.go' --- src/github.com/juju/juju/apiserver/common/environwatcher_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/common/environwatcher_test.go 1970-01-01 00:00:00 +0000 @@ -1,131 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common_test - -import ( - "fmt" - - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - apiservertesting "github.com/juju/juju/apiserver/testing" - "github.com/juju/juju/cmd/envcmd" - "github.com/juju/juju/environs" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/environs/configstore" - "github.com/juju/juju/provider/dummy" - "github.com/juju/juju/state" - "github.com/juju/juju/testing" -) - -type environWatcherSuite struct { - testing.BaseSuite - - testingEnvConfig *config.Config -} - -var _ = gc.Suite(&environWatcherSuite{}) - -type fakeEnvironAccessor struct { - envConfig *config.Config - envConfigError error -} - -func (*fakeEnvironAccessor) WatchForEnvironConfigChanges() state.NotifyWatcher { - changes := make(chan struct{}, 1) - // Simulate initial event. - changes <- struct{}{} - return &fakeNotifyWatcher{changes: changes} -} - -func (f *fakeEnvironAccessor) EnvironConfig() (*config.Config, error) { - if f.envConfigError != nil { - return nil, f.envConfigError - } - return f.envConfig, nil -} - -func (s *environWatcherSuite) TearDownTest(c *gc.C) { - dummy.Reset() - s.BaseSuite.TearDownTest(c) -} - -func (s *environWatcherSuite) TestWatchSuccess(c *gc.C) { - resources := common.NewResources() - s.AddCleanup(func(_ *gc.C) { resources.StopAll() }) - e := common.NewEnvironWatcher( - &fakeEnvironAccessor{}, - resources, - nil, - ) - result, err := e.WatchForEnvironConfigChanges() - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.NotifyWatchResult{"1", nil}) - c.Assert(resources.Count(), gc.Equals, 1) -} - -func (*environWatcherSuite) TestEnvironConfigSuccess(c *gc.C) { - authorizer := apiservertesting.FakeAuthorizer{ - Tag: names.NewMachineTag("0"), - EnvironManager: true, - } - testingEnvConfig := testingEnvConfig(c) - e := common.NewEnvironWatcher( - &fakeEnvironAccessor{envConfig: testingEnvConfig}, - nil, - authorizer, - ) - result, err := e.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - // Make sure we can read the secret attribute (i.e. it's not masked). - c.Check(result.Config["secret"], gc.Equals, "pork") - c.Check(map[string]interface{}(result.Config), jc.DeepEquals, testingEnvConfig.AllAttrs()) -} - -func (*environWatcherSuite) TestEnvironConfigFetchError(c *gc.C) { - authorizer := apiservertesting.FakeAuthorizer{ - Tag: names.NewMachineTag("0"), - EnvironManager: true, - } - e := common.NewEnvironWatcher( - &fakeEnvironAccessor{ - envConfigError: fmt.Errorf("pow"), - }, - nil, - authorizer, - ) - _, err := e.EnvironConfig() - c.Assert(err, gc.ErrorMatches, "pow") -} - -func (*environWatcherSuite) TestEnvironConfigMaskedSecrets(c *gc.C) { - authorizer := apiservertesting.FakeAuthorizer{ - Tag: names.NewMachineTag("0"), - EnvironManager: false, - } - testingEnvConfig := testingEnvConfig(c) - e := common.NewEnvironWatcher( - &fakeEnvironAccessor{envConfig: testingEnvConfig}, - nil, - authorizer, - ) - result, err := e.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - // Make sure the secret attribute is masked. - c.Check(result.Config["secret"], gc.Equals, "not available") - // And only that is masked. - result.Config["secret"] = "pork" - c.Check(map[string]interface{}(result.Config), jc.DeepEquals, testingEnvConfig.AllAttrs()) -} - -func testingEnvConfig(c *gc.C) *config.Config { - cfg, err := config.New(config.NoDefaults, dummy.SampleConfig()) - c.Assert(err, jc.ErrorIsNil) - env, err := environs.Prepare(cfg, envcmd.BootstrapContext(testing.Context(c)), configstore.NewMem()) - c.Assert(err, jc.ErrorIsNil) - return env.Config() -} === modified file 'src/github.com/juju/juju/apiserver/common/errors.go' --- src/github.com/juju/juju/apiserver/common/errors.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/common/errors.go 2016-03-22 15:18:22 +0000 @@ -4,29 +4,23 @@ package common import ( - stderrors "errors" "fmt" + "net/http" + "strings" "github.com/juju/errors" "github.com/juju/names" "github.com/juju/txn" + "gopkg.in/macaroon.v1" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/leadership" + "github.com/juju/juju/core/leadership" + "github.com/juju/juju/core/lease" "github.com/juju/juju/state" ) -type notSupportedError struct { - tag names.Tag - operation string -} - -func (e *notSupportedError) Error() string { - return fmt.Sprintf("entity %q does not support %s", e.tag, e.operation) -} - func NotSupportedError(tag names.Tag, operation string) error { - return ¬SupportedError{tag, operation} + return errors.Errorf("entity %q does not support %s", tag, operation) } type noAddressSetError struct { @@ -42,51 +36,73 @@ return &noAddressSetError{unitTag, addressName} } -func IsNoAddressSetError(err error) bool { +func isNoAddressSetError(err error) bool { _, ok := err.(*noAddressSetError) return ok } -type unknownEnvironmentError struct { +type unknownModelError struct { uuid string } -func (e *unknownEnvironmentError) Error() string { - return fmt.Sprintf("unknown environment: %q", e.uuid) -} - -func UnknownEnvironmentError(uuid string) error { - return &unknownEnvironmentError{uuid: uuid} -} - -func IsUnknownEnviromentError(err error) bool { - _, ok := err.(*unknownEnvironmentError) +func (e *unknownModelError) Error() string { + return fmt.Sprintf("unknown model: %q", e.uuid) +} + +func UnknownModelError(uuid string) error { + return &unknownModelError{uuid: uuid} +} + +func isUnknownModelError(err error) bool { + _, ok := err.(*unknownModelError) + return ok +} + +// DischargeRequiredError is the error returned when a macaroon requires discharging +// to complete authentication. +type DischargeRequiredError struct { + Cause error + Macaroon *macaroon.Macaroon +} + +// Error implements the error interface. +func (e *DischargeRequiredError) Error() string { + return e.Cause.Error() +} + +// IsDischargeRequiredError reports whether the cause +// of the error is a *DischargeRequiredError. +func IsDischargeRequiredError(err error) bool { + _, ok := errors.Cause(err).(*DischargeRequiredError) return ok } var ( - ErrBadId = stderrors.New("id not found") - ErrBadCreds = stderrors.New("invalid entity name or password") - ErrPerm = stderrors.New("permission denied") - ErrNotLoggedIn = stderrors.New("not logged in") - ErrUnknownWatcher = stderrors.New("unknown watcher id") - ErrUnknownPinger = stderrors.New("unknown pinger id") - ErrStoppedWatcher = stderrors.New("watcher has been stopped") - ErrBadRequest = stderrors.New("invalid request") - ErrTryAgain = stderrors.New("try again") - ErrActionNotAvailable = stderrors.New("action no longer available") - - ErrOperationBlocked = func(msg string) *params.Error { - if msg == "" { - msg = "The operation has been blocked." - } - return ¶ms.Error{ - Code: params.CodeOperationBlocked, - Message: msg, - } - } + ErrBadId = errors.New("id not found") + ErrBadCreds = errors.New("invalid entity name or password") + ErrPerm = errors.New("permission denied") + ErrNotLoggedIn = errors.New("not logged in") + ErrUnknownWatcher = errors.New("unknown watcher id") + ErrUnknownPinger = errors.New("unknown pinger id") + ErrStoppedWatcher = errors.New("watcher has been stopped") + ErrBadRequest = errors.New("invalid request") + ErrTryAgain = errors.New("try again") + ErrActionNotAvailable = errors.New("action no longer available") ) +// OperationBlockedError returns an error which signifies that +// an operation has been blocked; the message should describe +// what has been blocked. +func OperationBlockedError(msg string) error { + if msg == "" { + msg = "the operation has been blocked" + } + return ¶ms.Error{ + Message: msg, + Code: params.CodeOperationBlocked, + } +} + var singletonErrorCodes = map[error]string{ state.ErrCannotEnterScopeYet: params.CodeCannotEnterScopeYet, state.ErrCannotEnterScope: params.CodeCannotEnterScope, @@ -94,6 +110,7 @@ state.ErrDead: params.CodeDead, txn.ErrExcessiveContention: params.CodeExcessiveContention, leadership.ErrClaimDenied: params.CodeLeadershipClaimDenied, + lease.ErrClaimDenied: params.CodeLeaseClaimDenied, ErrBadId: params.CodeNotFound, ErrBadCreds: params.CodeUnauthorized, ErrPerm: params.CodeUnauthorized, @@ -125,6 +142,36 @@ return nil, false } +// ServerErrorAndStatus is like ServerError but also +// returns an HTTP status code appropriate for using +// in a response holding the given error. +func ServerErrorAndStatus(err error) (*params.Error, int) { + err1 := ServerError(err) + if err1 == nil { + return nil, http.StatusOK + } + status := http.StatusInternalServerError + switch err1.Code { + case params.CodeUnauthorized: + status = http.StatusUnauthorized + case params.CodeNotFound: + status = http.StatusNotFound + case params.CodeBadRequest: + status = http.StatusBadRequest + case params.CodeMethodNotAllowed: + status = http.StatusMethodNotAllowed + case params.CodeOperationBlocked: + // This should really be http.StatusForbidden but earlier versions + // of juju clients rely on the 400 status, so we leave it like that. + status = http.StatusBadRequest + case params.CodeForbidden: + status = http.StatusForbidden + case params.CodeDischargeRequired: + status = http.StatusUnauthorized + } + return err1, status +} + // ServerError returns an error suitable for returning to an API // client, with an error code suitable for various kinds of errors // generated in packages outside the API. @@ -136,6 +183,7 @@ // Skip past annotations when looking for the code. err = errors.Cause(err) code, ok := singletonCode(err) + var info *params.ErrorInfo switch { case ok: case errors.IsUnauthorized(err): @@ -148,7 +196,7 @@ code = params.CodeNotAssigned case state.IsHasAssignedUnitsError(err): code = params.CodeHasAssignedUnits - case IsNoAddressSetError(err): + case isNoAddressSetError(err): code = params.CodeNoAddressSet case errors.IsNotProvisioned(err): code = params.CodeNotProvisioned @@ -156,72 +204,105 @@ code = params.CodeUpgradeInProgress case state.IsHasAttachmentsError(err): code = params.CodeMachineHasAttachedStorage - case IsUnknownEnviromentError(err): + case isUnknownModelError(err): code = params.CodeNotFound case errors.IsNotSupported(err): code = params.CodeNotSupported + case errors.IsBadRequest(err): + code = params.CodeBadRequest + case errors.IsMethodNotAllowed(err): + code = params.CodeMethodNotAllowed default: + if err, ok := err.(*DischargeRequiredError); ok { + code = params.CodeDischargeRequired + info = ¶ms.ErrorInfo{ + Macaroon: err.Macaroon, + // One macaroon fits all. + MacaroonPath: "/", + } + break + } code = params.ErrCode(err) } return ¶ms.Error{ Message: msg, Code: code, - } + Info: info, + } +} + +func DestroyErr(desc string, ids, errs []string) error { + // TODO(waigani) refactor DestroyErr to take a map of ids to errors. + if len(errs) == 0 { + return nil + } + msg := "some %s were not destroyed" + if len(errs) == len(ids) { + msg = "no %s were destroyed" + } + msg = fmt.Sprintf(msg, desc) + return errors.Errorf("%s: %s", msg, strings.Join(errs, "; ")) } // RestoreError makes a best effort at converting the given error -// back into an error originally converted by ServerError(). If the -// error could not be converted then false is returned. -func RestoreError(err error) (error, bool) { +// back into an error originally converted by ServerError(). +func RestoreError(err error) error { err = errors.Cause(err) if apiErr, ok := err.(*params.Error); !ok { - return err, false + return err } else if apiErr == nil { - return nil, true + return nil } if params.ErrCode(err) == "" { - return err, false + return err } msg := err.Error() if singleton, ok := singletonError(err); ok { - return singleton, true + return singleton } // TODO(ericsnow) Support the other error types handled by ServerError(). switch { case params.IsCodeUnauthorized(err): - return errors.NewUnauthorized(nil, msg), true + return errors.NewUnauthorized(nil, msg) case params.IsCodeNotFound(err): - // TODO(ericsnow) unknownEnvironmentError should be handled here too. + // TODO(ericsnow) UnknownModelError should be handled here too. // ...by parsing msg? - return errors.NewNotFound(nil, msg), true + return errors.NewNotFound(nil, msg) case params.IsCodeAlreadyExists(err): - return errors.NewAlreadyExists(nil, msg), true + return errors.NewAlreadyExists(nil, msg) case params.IsCodeNotAssigned(err): - return errors.NewNotAssigned(nil, msg), true + return errors.NewNotAssigned(nil, msg) case params.IsCodeHasAssignedUnits(err): // TODO(ericsnow) Handle state.HasAssignedUnitsError here. // ...by parsing msg? - return err, false + return err case params.IsCodeNoAddressSet(err): // TODO(ericsnow) Handle isNoAddressSetError here. // ...by parsing msg? - return err, false + return err case params.IsCodeNotProvisioned(err): - return errors.NewNotProvisioned(nil, msg), true + return errors.NewNotProvisioned(nil, msg) case params.IsCodeUpgradeInProgress(err): // TODO(ericsnow) Handle state.UpgradeInProgressError here. // ...by parsing msg? - return err, false + return err case params.IsCodeMachineHasAttachedStorage(err): // TODO(ericsnow) Handle state.HasAttachmentsError here. // ...by parsing msg? - return err, false + return err case params.IsCodeNotSupported(err): - return errors.NewNotSupported(nil, msg), true + return errors.NewNotSupported(nil, msg) + case params.IsBadRequest(err): + return errors.NewBadRequest(nil, msg) + case params.IsMethodNotAllowed(err): + return errors.NewMethodNotAllowed(nil, msg) + case params.ErrCode(err) == params.CodeDischargeRequired: + // TODO(ericsnow) Handle DischargeRequiredError here. + return err default: - return err, false + return err } } === modified file 'src/github.com/juju/juju/apiserver/common/errors_test.go' --- src/github.com/juju/juju/apiserver/common/errors_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/common/errors_test.go 2016-03-22 15:18:22 +0000 @@ -5,16 +5,19 @@ import ( stderrors "errors" + "net/http" "github.com/juju/errors" "github.com/juju/names" jc "github.com/juju/testing/checkers" "github.com/juju/txn" gc "gopkg.in/check.v1" + "gopkg.in/macaroon.v1" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/leadership" + "github.com/juju/juju/core/leadership" + "github.com/juju/juju/core/lease" "github.com/juju/juju/state" "github.com/juju/juju/testing" ) @@ -28,114 +31,174 @@ var errorTransformTests = []struct { err error code string + status int helperFunc func(error) bool }{{ err: errors.NotFoundf("hello"), code: params.CodeNotFound, + status: http.StatusNotFound, helperFunc: params.IsCodeNotFound, }, { err: errors.Unauthorizedf("hello"), code: params.CodeUnauthorized, + status: http.StatusUnauthorized, helperFunc: params.IsCodeUnauthorized, }, { err: state.ErrCannotEnterScopeYet, code: params.CodeCannotEnterScopeYet, + status: http.StatusInternalServerError, helperFunc: params.IsCodeCannotEnterScopeYet, }, { err: state.ErrCannotEnterScope, code: params.CodeCannotEnterScope, + status: http.StatusInternalServerError, helperFunc: params.IsCodeCannotEnterScope, }, { err: state.ErrDead, code: params.CodeDead, + status: http.StatusInternalServerError, helperFunc: params.IsCodeDead, }, { err: txn.ErrExcessiveContention, code: params.CodeExcessiveContention, + status: http.StatusInternalServerError, helperFunc: params.IsCodeExcessiveContention, }, { err: state.ErrUnitHasSubordinates, code: params.CodeUnitHasSubordinates, + status: http.StatusInternalServerError, helperFunc: params.IsCodeUnitHasSubordinates, }, { err: common.ErrBadId, code: params.CodeNotFound, + status: http.StatusNotFound, helperFunc: params.IsCodeNotFound, }, { err: common.NoAddressSetError(names.NewUnitTag("mysql/0"), "public"), code: params.CodeNoAddressSet, + status: http.StatusInternalServerError, helperFunc: params.IsCodeNoAddressSet, }, { err: common.ErrBadCreds, code: params.CodeUnauthorized, + status: http.StatusUnauthorized, helperFunc: params.IsCodeUnauthorized, }, { err: common.ErrPerm, code: params.CodeUnauthorized, + status: http.StatusUnauthorized, helperFunc: params.IsCodeUnauthorized, }, { err: common.ErrNotLoggedIn, code: params.CodeUnauthorized, + status: http.StatusUnauthorized, helperFunc: params.IsCodeUnauthorized, }, { err: errors.NotProvisionedf("machine 0"), code: params.CodeNotProvisioned, + status: http.StatusInternalServerError, helperFunc: params.IsCodeNotProvisioned, }, { err: errors.AlreadyExistsf("blah"), code: params.CodeAlreadyExists, + status: http.StatusInternalServerError, helperFunc: params.IsCodeAlreadyExists, }, { err: common.ErrUnknownWatcher, code: params.CodeNotFound, + status: http.StatusNotFound, helperFunc: params.IsCodeNotFound, }, { err: errors.NotAssignedf("unit mysql/0"), code: params.CodeNotAssigned, + status: http.StatusInternalServerError, helperFunc: params.IsCodeNotAssigned, }, { err: common.ErrStoppedWatcher, code: params.CodeStopped, + status: http.StatusInternalServerError, helperFunc: params.IsCodeStopped, }, { err: &state.HasAssignedUnitsError{"42", []string{"a"}}, code: params.CodeHasAssignedUnits, + status: http.StatusInternalServerError, helperFunc: params.IsCodeHasAssignedUnits, }, { err: common.ErrTryAgain, code: params.CodeTryAgain, + status: http.StatusInternalServerError, helperFunc: params.IsCodeTryAgain, }, { - err: state.UpgradeInProgressError, - code: params.CodeUpgradeInProgress, - helperFunc: params.IsCodeUpgradeInProgress, -}, { err: leadership.ErrClaimDenied, code: params.CodeLeadershipClaimDenied, + status: http.StatusInternalServerError, helperFunc: params.IsCodeLeadershipClaimDenied, }, { - err: common.ErrOperationBlocked("test"), + err: lease.ErrClaimDenied, + code: params.CodeLeaseClaimDenied, + status: http.StatusInternalServerError, + helperFunc: params.IsCodeLeaseClaimDenied, +}, { + err: common.OperationBlockedError("test"), code: params.CodeOperationBlocked, + status: http.StatusBadRequest, helperFunc: params.IsCodeOperationBlocked, }, { err: errors.NotSupportedf("needed feature"), code: params.CodeNotSupported, + status: http.StatusInternalServerError, helperFunc: params.IsCodeNotSupported, }, { - err: stderrors.New("an error"), - code: "", -}, { - err: unhashableError{"foo"}, - code: "", -}, { - err: common.UnknownEnvironmentError("dead-beef-123456"), + err: errors.BadRequestf("something"), + code: params.CodeBadRequest, + status: http.StatusBadRequest, + helperFunc: params.IsBadRequest, +}, { + err: errors.MethodNotAllowedf("something"), + code: params.CodeMethodNotAllowed, + status: http.StatusMethodNotAllowed, + helperFunc: params.IsMethodNotAllowed, +}, { + err: stderrors.New("an error"), + status: http.StatusInternalServerError, + code: "", +}, { + err: &common.DischargeRequiredError{ + Cause: errors.New("something"), + Macaroon: sampleMacaroon, + }, + status: http.StatusUnauthorized, + code: params.CodeDischargeRequired, + helperFunc: func(err error) bool { + err1, ok := err.(*params.Error) + if !ok || err1.Info == nil || err1.Info.Macaroon != sampleMacaroon { + return false + } + return true + }, +}, { + err: unhashableError{"foo"}, + status: http.StatusInternalServerError, + code: "", +}, { + err: common.UnknownModelError("dead-beef-123456"), code: params.CodeNotFound, + status: http.StatusNotFound, helperFunc: params.IsCodeNotFound, }, { - err: nil, - code: "", + err: nil, + code: "", + status: http.StatusOK, }} +var sampleMacaroon = func() *macaroon.Macaroon { + m, err := macaroon.New([]byte("key"), "id", "loc") + if err != nil { + panic(err) + } + return m +}() + type unhashableError []string func (err unhashableError) Error() string { @@ -145,16 +208,22 @@ func (s *errorsSuite) TestErrorTransform(c *gc.C) { for i, t := range errorTransformTests { c.Logf("running test %d: %T{%q}", i, t.err, t.err) - - err1 := common.ServerError(t.err) + err1, status := common.ServerErrorAndStatus(t.err) + + // Sanity check that ServerError returns the same thing. + err2 := common.ServerError(t.err) + c.Assert(err2, gc.DeepEquals, err1) + c.Assert(status, gc.Equals, t.status) + if t.err == nil { c.Assert(err1, gc.IsNil) - } else { - c.Assert(err1.Message, gc.Equals, t.err.Error()) - c.Assert(err1.Code, gc.Equals, t.code) - if t.helperFunc != nil { - c.Assert(err1, jc.Satisfies, t.helperFunc) - } + c.Assert(status, gc.Equals, http.StatusOK) + continue + } + c.Assert(err1.Message, gc.Equals, t.err.Error()) + c.Assert(err1.Code, gc.Equals, t.code) + if t.helperFunc != nil { + c.Assert(err1, jc.Satisfies, t.helperFunc) } // TODO(ericsnow) Remove this switch once the other error types are supported. @@ -162,10 +231,11 @@ case params.CodeHasAssignedUnits, params.CodeNoAddressSet, params.CodeUpgradeInProgress, - params.CodeMachineHasAttachedStorage: + params.CodeMachineHasAttachedStorage, + params.CodeDischargeRequired: continue case params.CodeNotFound: - if common.IsUnknownEnviromentError(t.err) { + if common.IsUnknownModelError(t.err) { continue } case params.CodeOperationBlocked: @@ -174,15 +244,12 @@ } c.Logf(" checking restore (%#v)", err1) - restored, ok := common.RestoreError(err1) + restored := common.RestoreError(err1) if t.err == nil { - c.Check(ok, jc.IsTrue) c.Check(restored, jc.ErrorIsNil) } else if t.code == "" { - c.Check(ok, jc.IsFalse) c.Check(restored.Error(), gc.Equals, t.err.Error()) } else { - c.Check(ok, jc.IsTrue) // TODO(ericsnow) Use a stricter DeepEquals check. c.Check(errors.Cause(restored), gc.FitsTypeOf, t.err) c.Check(restored.Error(), gc.Equals, t.err.Error()) @@ -190,7 +257,28 @@ } } -func (s *errorsSuite) TestUnknownEnvironment(c *gc.C) { - err := common.UnknownEnvironmentError("dead-beef") - c.Check(err, gc.ErrorMatches, `unknown environment: "dead-beef"`) +func (s *errorsSuite) TestUnknownModel(c *gc.C) { + err := common.UnknownModelError("dead-beef") + c.Check(err, gc.ErrorMatches, `unknown model: "dead-beef"`) +} + +func (s *errorsSuite) TestDestroyErr(c *gc.C) { + errs := []string{ + "error one", + "error two", + "error three", + } + ids := []string{ + "id1", + "id2", + "id3", + } + + c.Assert(common.DestroyErr("entities", ids, nil), jc.ErrorIsNil) + + err := common.DestroyErr("entities", ids, errs) + c.Assert(err, gc.ErrorMatches, "no entities were destroyed: error one; error two; error three") + + err = common.DestroyErr("entities", ids, errs[1:]) + c.Assert(err, gc.ErrorMatches, "some entities were not destroyed: error two; error three") } === modified file 'src/github.com/juju/juju/apiserver/common/export_test.go' --- src/github.com/juju/juju/apiserver/common/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/export_test.go 2016-03-22 15:18:22 +0000 @@ -3,15 +3,15 @@ package common -import "github.com/juju/juju/state" - var ( - MachineJobFromParams = machineJobFromParams - ValidateNewFacade = validateNewFacade - WrapNewFacade = wrapNewFacade - NilFacadeRecord = facadeRecord{} - EnvtoolsFindTools = &envtoolsFindTools - SendMetrics = &sendMetrics + MachineJobFromParams = machineJobFromParams + ValidateNewFacade = validateNewFacade + WrapNewFacade = wrapNewFacade + NilFacadeRecord = facadeRecord{} + EnvtoolsFindTools = &envtoolsFindTools + SendMetrics = &sendMetrics + MockableDestroyMachines = destroyMachines + IsUnknownModelError = isUnknownModelError ) type Patcher interface { @@ -31,8 +31,3 @@ func DescriptionFromVersions(name string, vers Versions) FacadeDescription { return descriptionFromVersions(name, versions(vers)) } - -func NewMultiNotifyWatcher(w ...state.NotifyWatcher) state.NotifyWatcher { - mw := newMultiNotifyWatcher(w...) - return mw -} === removed file 'src/github.com/juju/juju/apiserver/common/filesystems.go' --- src/github.com/juju/juju/apiserver/common/filesystems.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/filesystems.go 1970-01-01 00:00:00 +0000 @@ -1,167 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common - -import ( - "github.com/juju/errors" - "github.com/juju/names" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/state" - "github.com/juju/juju/storage/poolmanager" -) - -// FilesystemParams returns the parameters for creating or destroying the -// given filesystem. -func FilesystemParams( - f state.Filesystem, - storageInstance state.StorageInstance, - environConfig *config.Config, - poolManager poolmanager.PoolManager, -) (params.FilesystemParams, error) { - - var pool string - var size uint64 - if stateFilesystemParams, ok := f.Params(); ok { - pool = stateFilesystemParams.Pool - size = stateFilesystemParams.Size - } else { - filesystemInfo, err := f.Info() - if err != nil { - return params.FilesystemParams{}, errors.Trace(err) - } - pool = filesystemInfo.Pool - size = filesystemInfo.Size - } - - filesystemTags, err := storageTags(storageInstance, environConfig) - if err != nil { - return params.FilesystemParams{}, errors.Annotate(err, "computing storage tags") - } - - providerType, cfg, err := StoragePoolConfig(pool, poolManager) - if err != nil { - return params.FilesystemParams{}, errors.Trace(err) - } - result := params.FilesystemParams{ - f.Tag().String(), - "", // volume tag - size, - string(providerType), - cfg.Attrs(), - filesystemTags, - nil, // attachment params set by the caller - } - - volumeTag, err := f.Volume() - if err == nil { - result.VolumeTag = volumeTag.String() - } else if err != state.ErrNoBackingVolume { - return params.FilesystemParams{}, errors.Trace(err) - } - - return result, nil -} - -// FilesystemsToState converts a slice of params.Filesystem to a mapping -// of filesystem tags to state.FilesystemInfo. -func FilesystemsToState(in []params.Filesystem) (map[names.FilesystemTag]state.FilesystemInfo, error) { - m := make(map[names.FilesystemTag]state.FilesystemInfo) - for _, v := range in { - tag, filesystemInfo, err := FilesystemToState(v) - if err != nil { - return nil, errors.Trace(err) - } - m[tag] = filesystemInfo - } - return m, nil -} - -// FilesystemToState converts a params.Filesystem to state.FilesystemInfo -// and names.FilesystemTag. -func FilesystemToState(v params.Filesystem) (names.FilesystemTag, state.FilesystemInfo, error) { - filesystemTag, err := names.ParseFilesystemTag(v.FilesystemTag) - if err != nil { - return names.FilesystemTag{}, state.FilesystemInfo{}, errors.Trace(err) - } - return filesystemTag, state.FilesystemInfo{ - v.Info.Size, - "", // pool is set by state - v.Info.FilesystemId, - }, nil -} - -// FilesystemFromState converts a state.Filesystem to params.Filesystem. -func FilesystemFromState(f state.Filesystem) (params.Filesystem, error) { - info, err := f.Info() - if err != nil { - return params.Filesystem{}, errors.Trace(err) - } - result := params.Filesystem{ - f.FilesystemTag().String(), - "", - params.FilesystemInfo{ - info.FilesystemId, - info.Size, - }, - } - volumeTag, err := f.Volume() - if err == nil { - result.VolumeTag = volumeTag.String() - } else if err != state.ErrNoBackingVolume { - return params.Filesystem{}, errors.Trace(err) - } - return result, nil -} - -// FilesystemAttachmentToState converts a storage.FilesystemAttachment -// to a state.FilesystemAttachmentInfo. -func FilesystemAttachmentToState(in params.FilesystemAttachment) (names.MachineTag, names.FilesystemTag, state.FilesystemAttachmentInfo, error) { - machineTag, err := names.ParseMachineTag(in.MachineTag) - if err != nil { - return names.MachineTag{}, names.FilesystemTag{}, state.FilesystemAttachmentInfo{}, err - } - filesystemTag, err := names.ParseFilesystemTag(in.FilesystemTag) - if err != nil { - return names.MachineTag{}, names.FilesystemTag{}, state.FilesystemAttachmentInfo{}, err - } - info := state.FilesystemAttachmentInfo{ - in.Info.MountPoint, - in.Info.ReadOnly, - } - return machineTag, filesystemTag, info, nil -} - -// FilesystemAttachmentFromState converts a state.FilesystemAttachment to params.FilesystemAttachment. -func FilesystemAttachmentFromState(v state.FilesystemAttachment) (params.FilesystemAttachment, error) { - info, err := v.Info() - if err != nil { - return params.FilesystemAttachment{}, errors.Trace(err) - } - return params.FilesystemAttachment{ - v.Filesystem().String(), - v.Machine().String(), - params.FilesystemAttachmentInfo{ - info.MountPoint, - info.ReadOnly, - }, - }, nil -} - -// ParseFilesystemAttachmentIds parses the strings, returning machine storage IDs. -func ParseFilesystemAttachmentIds(stringIds []string) ([]params.MachineStorageId, error) { - ids := make([]params.MachineStorageId, len(stringIds)) - for i, s := range stringIds { - m, f, err := state.ParseFilesystemAttachmentId(s) - if err != nil { - return nil, err - } - ids[i] = params.MachineStorageId{ - MachineTag: m.String(), - AttachmentTag: f.String(), - } - } - return ids, nil -} === modified file 'src/github.com/juju/juju/apiserver/common/interfaces.go' --- src/github.com/juju/juju/apiserver/common/interfaces.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/interfaces.go 2016-03-22 15:18:22 +0000 @@ -31,9 +31,9 @@ // as the given entity. AuthOwner(tag names.Tag) bool - // AuthEnvironManager returns whether the authenticated entity is + // AuthModelManager returns whether the authenticated entity is // a machine running the environment manager job. - AuthEnvironManager() bool + AuthModelManager() bool // AuthClient returns whether the authenticated entity // is a client user. === modified file 'src/github.com/juju/juju/apiserver/common/machine.go' --- src/github.com/juju/juju/apiserver/common/machine.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/common/machine.go 2016-03-22 15:18:22 +0000 @@ -28,14 +28,59 @@ switch job { case multiwatcher.JobHostUnits: return state.JobHostUnits, nil - case multiwatcher.JobManageEnviron: - return state.JobManageEnviron, nil + case multiwatcher.JobManageModel: + return state.JobManageModel, nil case multiwatcher.JobManageNetworking: return state.JobManageNetworking, nil - case multiwatcher.JobManageStateDeprecated: - // Deprecated in 1.18. - return state.JobManageStateDeprecated, nil default: return -1, errors.Errorf("invalid machine job %q", job) } } + +type origStateInterface interface { + Machine(string) (*state.Machine, error) +} + +type stateInterface interface { + Machine(string) (Machine, error) +} + +type stateShim struct { + origStateInterface +} + +func (st *stateShim) Machine(id string) (Machine, error) { + return st.origStateInterface.Machine(id) +} + +type Machine interface { + Life() state.Life + ForceDestroy() error + Destroy() error +} + +func DestroyMachines(st origStateInterface, force bool, ids ...string) error { + return destroyMachines(&stateShim{st}, force, ids...) +} + +func destroyMachines(st stateInterface, force bool, ids ...string) error { + var errs []string + for _, id := range ids { + machine, err := st.Machine(id) + switch { + case errors.IsNotFound(err): + err = errors.Errorf("machine %s does not exist", id) + case err != nil: + case force: + err = machine.ForceDestroy() + case machine.Life() != state.Alive: + continue + default: + err = machine.Destroy() + } + if err != nil { + errs = append(errs, err.Error()) + } + } + return DestroyErr("machines", ids, errs) +} === modified file 'src/github.com/juju/juju/apiserver/common/machine_test.go' --- src/github.com/juju/juju/apiserver/common/machine_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/common/machine_test.go 2016-03-22 15:18:22 +0000 @@ -4,6 +4,8 @@ package common_test import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/common" @@ -24,15 +26,12 @@ name: multiwatcher.JobHostUnits, want: state.JobHostUnits, }, { - name: multiwatcher.JobManageEnviron, - want: state.JobManageEnviron, + name: multiwatcher.JobManageModel, + want: state.JobManageModel, }, { name: multiwatcher.JobManageNetworking, want: state.JobManageNetworking, }, { - name: multiwatcher.JobManageStateDeprecated, - want: state.JobManageStateDeprecated, - }, { name: "invalid", want: -1, err: `invalid machine job "invalid"`, @@ -45,3 +44,84 @@ c.Check(got, gc.Equals, test.want) } } + +func (s *machineSuite) TestDestroyMachines(c *gc.C) { + st := mockState{ + machines: map[string]*mockMachine{ + "1": {}, + "2": {destroyErr: errors.New("unit exists error")}, + "3": {life: state.Dying}, + }, + } + err := common.MockableDestroyMachines(&st, false, "1", "2", "3", "4") + + c.Assert(st.machines["1"].Life(), gc.Equals, state.Dying) + c.Assert(st.machines["1"].forceDestroyCalled, jc.IsFalse) + + c.Assert(st.machines["2"].Life(), gc.Equals, state.Alive) + c.Assert(st.machines["2"].forceDestroyCalled, jc.IsFalse) + + c.Assert(st.machines["3"].forceDestroyCalled, jc.IsFalse) + c.Assert(st.machines["3"].destroyCalled, jc.IsFalse) + + c.Assert(err, gc.ErrorMatches, "some machines were not destroyed: unit exists error; machine 4 does not exist") +} + +func (s *machineSuite) TestForceDestroyMachines(c *gc.C) { + st := mockState{ + machines: map[string]*mockMachine{ + "1": {}, + "2": {life: state.Dying}, + }, + } + err := common.MockableDestroyMachines(&st, true, "1", "2") + + c.Assert(st.machines["1"].Life(), gc.Equals, state.Dying) + c.Assert(st.machines["1"].forceDestroyCalled, jc.IsTrue) + c.Assert(st.machines["2"].forceDestroyCalled, jc.IsTrue) + + c.Assert(err, jc.ErrorIsNil) +} + +type mockState struct { + state.State + machines map[string]*mockMachine +} + +func (st *mockState) Machine(id string) (common.Machine, error) { + if m, ok := st.machines[id]; ok { + return m, nil + } + return nil, errors.Errorf("machine %s does not exist", id) +} + +type mockMachine struct { + state.Machine + life state.Life + destroyErr error + forceDestroyErr error + forceDestroyCalled bool + destroyCalled bool +} + +func (m *mockMachine) Life() state.Life { + return m.life +} + +func (m *mockMachine) ForceDestroy() error { + m.forceDestroyCalled = true + if m.forceDestroyErr != nil { + return m.forceDestroyErr + } + m.life = state.Dying + return nil +} + +func (m *mockMachine) Destroy() error { + m.destroyCalled = true + if m.destroyErr != nil { + return m.destroyErr + } + m.life = state.Dying + return nil +} === added file 'src/github.com/juju/juju/apiserver/common/modeldestroy.go' --- src/github.com/juju/juju/apiserver/common/modeldestroy.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/modeldestroy.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,95 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common + +import ( + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/apiserver/metricsender" + "github.com/juju/juju/state" +) + +var sendMetrics = func(st *state.State) error { + err := metricsender.SendMetrics(st, metricsender.DefaultMetricSender(), metricsender.DefaultMaxBatchesPerSend()) + return errors.Trace(err) +} + +// DestroyModelIncludingHosted sets the model to dying. Cleanup jobs then destroy +// all services and non-manager, non-manual machine instances in the specified +// model. This function assumes that all necessary authentication checks +// have been done. If the model is a controller hosting other +// models, they will also be destroyed. +func DestroyModelIncludingHosted(st *state.State, modelTag names.ModelTag) error { + return destroyModel(st, modelTag, true) +} + +// DestroyModel sets the environment to dying. Cleanup jobs then destroy +// all services and non-manager, non-manual machine instances in the specified +// model. This function assumes that all necessary authentication checks +// have been done. An error will be returned if this model is a +// controller hosting other model. +func DestroyModel(st *state.State, modelTag names.ModelTag) error { + return destroyModel(st, modelTag, false) +} + +func destroyModel(st *state.State, modelTag names.ModelTag, destroyHostedModels bool) error { + var err error + if modelTag != st.ModelTag() { + if st, err = st.ForModel(modelTag); err != nil { + return errors.Trace(err) + } + defer st.Close() + } + + if destroyHostedModels { + envs, err := st.AllModels() + if err != nil { + return errors.Trace(err) + } + for _, env := range envs { + envSt, err := st.ForModel(env.ModelTag()) + defer envSt.Close() + if err != nil { + return errors.Trace(err) + } + check := NewBlockChecker(envSt) + if err = check.DestroyAllowed(); err != nil { + return errors.Trace(err) + } + } + } else { + check := NewBlockChecker(st) + if err = check.DestroyAllowed(); err != nil { + return errors.Trace(err) + } + } + + env, err := st.Model() + if err != nil { + return errors.Trace(err) + } + + if destroyHostedModels { + if err := env.DestroyIncludingHosted(); err != nil { + return err + } + } else { + if err = env.Destroy(); err != nil { + return errors.Trace(err) + } + } + + err = sendMetrics(st) + if err != nil { + logger.Warningf("failed to send leftover metrics: %v", err) + } + + // Return to the caller. If it's the CLI, it will finish up by calling the + // provider's Destroy method, which will destroy the controllers, any + // straggler instances, and other provider-specific resources. Once all + // resources are torn down, the Undertaker worker handles the removal of + // the environment. + return nil +} === added file 'src/github.com/juju/juju/apiserver/common/modeldestroy_test.go' --- src/github.com/juju/juju/apiserver/common/modeldestroy_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/modeldestroy_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,403 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common_test + +import ( + "fmt" + + "github.com/juju/errors" + "github.com/juju/names" + jtesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api" + "github.com/juju/juju/apiserver/client" + "github.com/juju/juju/apiserver/common" + commontesting "github.com/juju/juju/apiserver/common/testing" + apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/instance" + "github.com/juju/juju/juju/testing" + "github.com/juju/juju/state" + jujutesting "github.com/juju/juju/testing" + "github.com/juju/juju/testing/factory" +) + +type destroyModelSuite struct { + testing.JujuConnSuite + commontesting.BlockHelper +} + +var _ = gc.Suite(&destroyModelSuite{}) + +func (s *destroyModelSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + s.BlockHelper = commontesting.NewBlockHelper(s.APIState) + s.AddCleanup(func(*gc.C) { s.BlockHelper.Close() }) +} + +// setUpManual adds "manually provisioned" machines to state: +// one manager machine, and one non-manager. +func (s *destroyModelSuite) setUpManual(c *gc.C) (m0, m1 *state.Machine) { + m0, err := s.State.AddMachine("precise", state.JobManageModel) + c.Assert(err, jc.ErrorIsNil) + err = m0.SetProvisioned(instance.Id("manual:0"), "manual:0:fake_nonce", nil) + c.Assert(err, jc.ErrorIsNil) + m1, err = s.State.AddMachine("precise", state.JobHostUnits) + c.Assert(err, jc.ErrorIsNil) + err = m1.SetProvisioned(instance.Id("manual:1"), "manual:1:fake_nonce", nil) + c.Assert(err, jc.ErrorIsNil) + return m0, m1 +} + +// setUpInstances adds machines to state backed by instances: +// one manager machine, one non-manager, and a container in the +// non-manager. +func (s *destroyModelSuite) setUpInstances(c *gc.C) (m0, m1, m2 *state.Machine) { + m0, err := s.State.AddMachine("precise", state.JobManageModel) + c.Assert(err, jc.ErrorIsNil) + inst, _ := testing.AssertStartInstance(c, s.Environ, m0.Id()) + err = m0.SetProvisioned(inst.Id(), "fake_nonce", nil) + c.Assert(err, jc.ErrorIsNil) + + m1, err = s.State.AddMachine("precise", state.JobHostUnits) + c.Assert(err, jc.ErrorIsNil) + inst, _ = testing.AssertStartInstance(c, s.Environ, m1.Id()) + err = m1.SetProvisioned(inst.Id(), "fake_nonce", nil) + c.Assert(err, jc.ErrorIsNil) + + m2, err = s.State.AddMachineInsideMachine(state.MachineTemplate{ + Series: "precise", + Jobs: []state.MachineJob{state.JobHostUnits}, + }, m1.Id(), instance.LXC) + c.Assert(err, jc.ErrorIsNil) + err = m2.SetProvisioned("container0", "fake_nonce", nil) + c.Assert(err, jc.ErrorIsNil) + + return m0, m1, m2 +} + +type testMetricSender struct { + jtesting.Stub +} + +func (t *testMetricSender) SendMetrics(st *state.State) error { + t.AddCall("SendMetrics") + return nil +} + +func (s *destroyModelSuite) TestMetrics(c *gc.C) { + metricSender := &testMetricSender{} + s.PatchValue(common.SendMetrics, metricSender.SendMetrics) + + err := common.DestroyModel(s.State, s.State.ModelTag()) + c.Assert(err, jc.ErrorIsNil) + + metricSender.CheckCalls(c, []jtesting.StubCall{{FuncName: "SendMetrics"}}) +} + +func (s *destroyModelSuite) TestDestroyModelManual(c *gc.C) { + _, nonManager := s.setUpManual(c) + + // If there are any non-manager manual machines in state, DestroyModel will + // error. It will not set the Dying flag on the environment. + err := common.DestroyModel(s.State, s.State.ModelTag()) + c.Assert(err, gc.ErrorMatches, fmt.Sprintf("failed to destroy model: manually provisioned machines must first be destroyed with `juju destroy-machine %s`", nonManager.Id())) + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Life(), gc.Equals, state.Alive) + + // If we remove the non-manager machine, it should pass. + // Manager machines will remain. + err = nonManager.EnsureDead() + c.Assert(err, jc.ErrorIsNil) + err = nonManager.Remove() + c.Assert(err, jc.ErrorIsNil) + err = common.DestroyModel(s.State, s.State.ModelTag()) + c.Assert(err, jc.ErrorIsNil) + err = env.Refresh() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Life(), gc.Equals, state.Dying) + +} + +func (s *destroyModelSuite) TestDestroyModel(c *gc.C) { + manager, nonManager, _ := s.setUpInstances(c) + managerId, _ := manager.InstanceId() + nonManagerId, _ := nonManager.InstanceId() + + instances, err := s.Environ.Instances([]instance.Id{managerId, nonManagerId}) + c.Assert(err, jc.ErrorIsNil) + for _, inst := range instances { + c.Assert(inst, gc.NotNil) + } + + services, err := s.State.AllServices() + c.Assert(err, jc.ErrorIsNil) + + err = common.DestroyModel(s.State, s.State.ModelTag()) + c.Assert(err, jc.ErrorIsNil) + + runAllCleanups(c, s.State) + + // After DestroyModel returns and all cleanup jobs have run, we should have: + // - all non-manager machines dying + assertLife(c, manager, state.Alive) + // Note: we leave the machine in a dead state and rely on the provisioner + // to stop the backing instances, remove the dead machines and finally + // remove all environment docs from state. + assertLife(c, nonManager, state.Dead) + + // - all services in state are Dying or Dead (or removed altogether), + // after running the state Cleanups. + for _, s := range services { + err = s.Refresh() + if err != nil { + c.Assert(err, jc.Satisfies, errors.IsNotFound) + } else { + c.Assert(s.Life(), gc.Not(gc.Equals), state.Alive) + } + } + // - environment is Dying or Dead. + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Life(), gc.Not(gc.Equals), state.Alive) +} + +func assertLife(c *gc.C, entity state.Living, life state.Life) { + err := entity.Refresh() + c.Assert(err, jc.ErrorIsNil) + c.Assert(entity.Life(), gc.Equals, life) +} + +func (s *destroyModelSuite) TestBlockDestroyDestroyEnvironment(c *gc.C) { + // Setup environment + s.setUpInstances(c) + s.BlockDestroyModel(c, "TestBlockDestroyDestroyModel") + err := common.DestroyModel(s.State, s.State.ModelTag()) + s.AssertBlocked(c, err, "TestBlockDestroyDestroyModel") +} + +func (s *destroyModelSuite) TestBlockDestroyDestroyHostedModel(c *gc.C) { + otherSt := s.Factory.MakeModel(c, nil) + defer otherSt.Close() + info := s.APIInfo(c) + info.ModelTag = otherSt.ModelTag() + apiState, err := api.Open(info, api.DefaultDialOpts()) + + block := commontesting.NewBlockHelper(apiState) + defer block.Close() + + block.BlockDestroyModel(c, "TestBlockDestroyDestroyModel") + err = common.DestroyModelIncludingHosted(s.State, s.State.ModelTag()) + s.AssertBlocked(c, err, "TestBlockDestroyDestroyModel") +} + +func (s *destroyModelSuite) TestBlockRemoveDestroyModel(c *gc.C) { + // Setup model + s.setUpInstances(c) + s.BlockRemoveObject(c, "TestBlockRemoveDestroyModel") + err := common.DestroyModel(s.State, s.State.ModelTag()) + s.AssertBlocked(c, err, "TestBlockRemoveDestroyModel") +} + +func (s *destroyModelSuite) TestBlockChangesDestroyModel(c *gc.C) { + // Setup model + s.setUpInstances(c) + // lock model: can't destroy locked model + s.BlockAllChanges(c, "TestBlockChangesDestroyModel") + err := common.DestroyModel(s.State, s.State.ModelTag()) + s.AssertBlocked(c, err, "TestBlockChangesDestroyModel") +} + +type destroyTwoModelsSuite struct { + testing.JujuConnSuite + otherState *state.State + otherEnvOwner names.UserTag + otherEnvClient *client.Client +} + +var _ = gc.Suite(&destroyTwoModelsSuite{}) + +func (s *destroyTwoModelsSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + _, err := s.State.AddUser("jess", "jess", "", "test") + c.Assert(err, jc.ErrorIsNil) + s.otherEnvOwner = names.NewUserTag("jess") + s.otherState = factory.NewFactory(s.State).MakeModel(c, &factory.ModelParams{ + Owner: s.otherEnvOwner, + Prepare: true, + ConfigAttrs: jujutesting.Attrs{ + "controller": false, + }, + }) + s.AddCleanup(func(*gc.C) { s.otherState.Close() }) + + // get the client for the other model + auth := apiservertesting.FakeAuthorizer{ + Tag: s.otherEnvOwner, + EnvironManager: false, + } + s.otherEnvClient, err = client.NewClient(s.otherState, common.NewResources(), auth) + c.Assert(err, jc.ErrorIsNil) + +} + +func (s *destroyTwoModelsSuite) TestCleanupModelResources(c *gc.C) { + otherFactory := factory.NewFactory(s.otherState) + m := otherFactory.MakeMachine(c, nil) + otherFactory.MakeMachineNested(c, m.Id(), nil) + + err := common.DestroyModel(s.otherState, s.otherState.ModelTag()) + c.Assert(err, jc.ErrorIsNil) + + // Assert that the machines are not removed until the cleanup runs. + c.Assert(m.Refresh(), jc.ErrorIsNil) + assertMachineCount(c, s.otherState, 2) + runAllCleanups(c, s.otherState) + assertAllMachinesDeadAndRemove(c, s.otherState) + + otherEnv, err := s.otherState.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(otherEnv.Life(), gc.Equals, state.Dying) + + c.Assert(s.otherState.ProcessDyingModel(), jc.ErrorIsNil) + c.Assert(otherEnv.Refresh(), jc.ErrorIsNil) + c.Assert(otherEnv.Life(), gc.Equals, state.Dead) + +} + +// The provisioner will remove dead machines once their backing instances are +// stopped. For the tests, we remove them directly. +func assertAllMachinesDeadAndRemove(c *gc.C, st *state.State) { + machines, err := st.AllMachines() + c.Assert(err, jc.ErrorIsNil) + for _, m := range machines { + if m.IsManager() { + continue + } + if _, isContainer := m.ParentId(); isContainer { + continue + } + manual, err := m.IsManual() + c.Assert(err, jc.ErrorIsNil) + if manual { + continue + } + + c.Assert(m.Life(), gc.Equals, state.Dead) + c.Assert(m.Remove(), jc.ErrorIsNil) + } +} + +func (s *destroyTwoModelsSuite) TestDifferentStateModel(c *gc.C) { + otherFactory := factory.NewFactory(s.otherState) + otherFactory.MakeMachine(c, nil) + m := otherFactory.MakeMachine(c, nil) + otherFactory.MakeMachineNested(c, m.Id(), nil) + + // NOTE: pass in the main test State instance, which is 'bound' + // to the controller model. + err := common.DestroyModel(s.State, s.otherState.ModelTag()) + c.Assert(err, jc.ErrorIsNil) + + runAllCleanups(c, s.otherState) + assertAllMachinesDeadAndRemove(c, s.otherState) + + otherEnv, err := s.otherState.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.otherState.ProcessDyingModel(), jc.ErrorIsNil) + c.Assert(otherEnv.Refresh(), jc.ErrorIsNil) + c.Assert(otherEnv.Life(), gc.Equals, state.Dead) + + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Life(), gc.Equals, state.Alive) +} + +func (s *destroyTwoModelsSuite) TestDestroyControllerAfterNonControllerIsDestroyed(c *gc.C) { + otherFactory := factory.NewFactory(s.otherState) + otherFactory.MakeMachine(c, nil) + m := otherFactory.MakeMachine(c, nil) + otherFactory.MakeMachineNested(c, m.Id(), nil) + + err := common.DestroyModel(s.State, s.State.ModelTag()) + c.Assert(err, gc.ErrorMatches, "failed to destroy model: hosting 1 other models") + + needsCleanup, err := s.State.NeedsCleanup() + c.Assert(err, jc.ErrorIsNil) + c.Assert(needsCleanup, jc.IsFalse) + + err = common.DestroyModel(s.State, s.otherState.ModelTag()) + c.Assert(err, jc.ErrorIsNil) + + err = common.DestroyModel(s.State, s.State.ModelTag()) + c.Assert(err, jc.ErrorIsNil) + + // Make sure we can continue to take the hosted model down while the + // controller environ is dying. + runAllCleanups(c, s.otherState) + assertAllMachinesDeadAndRemove(c, s.otherState) + c.Assert(s.otherState.ProcessDyingModel(), jc.ErrorIsNil) + + otherEnv, err := s.otherState.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(otherEnv.Life(), gc.Equals, state.Dead) + + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Life(), gc.Equals, state.Dying) + c.Assert(s.State.ProcessDyingModel(), jc.ErrorIsNil) + c.Assert(env.Refresh(), jc.ErrorIsNil) + c.Assert(env.Life(), gc.Equals, state.Dead) +} + +func (s *destroyTwoModelsSuite) TestDestroyControllerAndNonController(c *gc.C) { + otherFactory := factory.NewFactory(s.otherState) + otherFactory.MakeMachine(c, nil) + m := otherFactory.MakeMachine(c, nil) + otherFactory.MakeMachineNested(c, m.Id(), nil) + + err := common.DestroyModelIncludingHosted(s.State, s.State.ModelTag()) + c.Assert(err, jc.ErrorIsNil) + + runAllCleanups(c, s.State) + runAllCleanups(c, s.otherState) + assertAllMachinesDeadAndRemove(c, s.otherState) + + // Make sure we can continue to take the hosted model down while the + // controller model is dying. + c.Assert(s.otherState.ProcessDyingModel(), jc.ErrorIsNil) +} + +func (s *destroyTwoModelsSuite) TestCanDestroyNonBlockedModel(c *gc.C) { + bh := commontesting.NewBlockHelper(s.APIState) + defer bh.Close() + + bh.BlockDestroyModel(c, "TestBlockDestroyDestroyModel") + + err := common.DestroyModel(s.State, s.otherState.ModelTag()) + c.Assert(err, jc.ErrorIsNil) + + err = common.DestroyModel(s.State, s.State.ModelTag()) + bh.AssertBlocked(c, err, "TestBlockDestroyDestroyModel") +} + +func runAllCleanups(c *gc.C, st *state.State) { + needCleanup, err := st.NeedsCleanup() + c.Assert(err, jc.ErrorIsNil) + + for needCleanup { + err := st.Cleanup() + c.Assert(err, jc.ErrorIsNil) + needCleanup, err = st.NeedsCleanup() + c.Assert(err, jc.ErrorIsNil) + } +} + +func assertMachineCount(c *gc.C, st *state.State, count int) { + otherMachines, err := st.AllMachines() + c.Assert(err, jc.ErrorIsNil) + c.Assert(otherMachines, gc.HasLen, count) +} === added file 'src/github.com/juju/juju/apiserver/common/modelmachineswatcher.go' --- src/github.com/juju/juju/apiserver/common/modelmachineswatcher.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/modelmachineswatcher.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,51 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common + +import ( + "fmt" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" + "github.com/juju/juju/state/watcher" +) + +// ModelMachinesWatcher implements a common WatchModelMachines +// method for use by various facades. +type ModelMachinesWatcher struct { + st state.ModelMachinesWatcher + resources *Resources + authorizer Authorizer +} + +// NewModelMachinesWatcher returns a new ModelMachinesWatcher. The +// GetAuthFunc will be used on each invocation of WatchUnits to +// determine current permissions. +func NewModelMachinesWatcher(st state.ModelMachinesWatcher, resources *Resources, authorizer Authorizer) *ModelMachinesWatcher { + return &ModelMachinesWatcher{ + st: st, + resources: resources, + authorizer: authorizer, + } +} + +// WatchModelMachines returns a StringsWatcher that notifies of +// changes to the life cycles of the top level machines in the current +// model. +func (e *ModelMachinesWatcher) WatchModelMachines() (params.StringsWatchResult, error) { + result := params.StringsWatchResult{} + if !e.authorizer.AuthModelManager() { + return result, ErrPerm + } + watch := e.st.WatchModelMachines() + // Consume the initial event and forward it to the result. + if changes, ok := <-watch.Changes(); ok { + result.StringsWatcherId = e.resources.Register(watch) + result.Changes = changes + } else { + err := watcher.EnsureErr(watch) + return result, fmt.Errorf("cannot obtain initial model machines: %v", err) + } + return result, nil +} === added file 'src/github.com/juju/juju/apiserver/common/modelmachineswatcher_test.go' --- src/github.com/juju/juju/apiserver/common/modelmachineswatcher_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/modelmachineswatcher_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,69 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common_test + +import ( + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/state" + "github.com/juju/juju/testing" +) + +type modelMachinesWatcherSuite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&modelMachinesWatcherSuite{}) + +type fakeModelMachinesWatcher struct { + state.ModelMachinesWatcher + initial []string +} + +func (f *fakeModelMachinesWatcher) WatchModelMachines() state.StringsWatcher { + changes := make(chan []string, 1) + // Simulate initial event. + changes <- f.initial + return &fakeStringsWatcher{changes} +} + +func (s *modelMachinesWatcherSuite) TestWatchModelMachines(c *gc.C) { + authorizer := apiservertesting.FakeAuthorizer{ + Tag: names.NewMachineTag("0"), + EnvironManager: true, + } + resources := common.NewResources() + s.AddCleanup(func(_ *gc.C) { resources.StopAll() }) + e := common.NewModelMachinesWatcher( + &fakeModelMachinesWatcher{initial: []string{"foo"}}, + resources, + authorizer, + ) + result, err := e.WatchModelMachines() + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, params.StringsWatchResult{"1", []string{"foo"}, nil}) + c.Assert(resources.Count(), gc.Equals, 1) +} + +func (s *modelMachinesWatcherSuite) TestWatchAuthError(c *gc.C) { + authorizer := apiservertesting.FakeAuthorizer{ + Tag: names.NewMachineTag("1"), + EnvironManager: false, + } + resources := common.NewResources() + s.AddCleanup(func(_ *gc.C) { resources.StopAll() }) + e := common.NewModelMachinesWatcher( + &fakeModelMachinesWatcher{}, + resources, + authorizer, + ) + _, err := e.WatchModelMachines() + c.Assert(err, gc.ErrorMatches, "permission denied") + c.Assert(resources.Count(), gc.Equals, 0) +} === added file 'src/github.com/juju/juju/apiserver/common/modelwatcher.go' --- src/github.com/juju/juju/apiserver/common/modelwatcher.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/modelwatcher.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,84 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common + +import ( + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs" + "github.com/juju/juju/state" + "github.com/juju/juju/state/watcher" +) + +// ModelWatcher implements two common methods for use by various +// facades - WatchForModelConfigChanges and ModelConfig. +type ModelWatcher struct { + st state.ModelAccessor + resources *Resources + authorizer Authorizer +} + +// NewModelWatcher returns a new ModelWatcher. Active watchers +// will be stored in the provided Resources. The two GetAuthFunc +// callbacks will be used on each invocation of the methods to +// determine current permissions. +// Right now, environment tags are not used, so both created AuthFuncs +// are called with "" for tag, which means "the current environment". +func NewModelWatcher(st state.ModelAccessor, resources *Resources, authorizer Authorizer) *ModelWatcher { + return &ModelWatcher{ + st: st, + resources: resources, + authorizer: authorizer, + } +} + +// WatchForModelConfigChanges returns a NotifyWatcher that observes +// changes to the environment configuration. +// Note that although the NotifyWatchResult contains an Error field, +// it's not used because we are only returning a single watcher, +// so we use the regular error return. +func (e *ModelWatcher) WatchForModelConfigChanges() (params.NotifyWatchResult, error) { + result := params.NotifyWatchResult{} + watch := e.st.WatchForModelConfigChanges() + // Consume the initial event. Technically, API + // calls to Watch 'transmit' the initial event + // in the Watch response. But NotifyWatchers + // have no state to transmit. + if _, ok := <-watch.Changes(); ok { + result.NotifyWatcherId = e.resources.Register(watch) + } else { + return result, watcher.EnsureErr(watch) + } + return result, nil +} + +// ModelConfig returns the current environment's configuration. +func (e *ModelWatcher) ModelConfig() (params.ModelConfigResult, error) { + result := params.ModelConfigResult{} + + config, err := e.st.ModelConfig() + if err != nil { + return result, err + } + allAttrs := config.AllAttrs() + + if !e.authorizer.AuthModelManager() { + // Mask out any secrets in the environment configuration + // with values of the same type, so it'll pass validation. + // + // TODO(dimitern) 201309-26 bug #1231384 + // Delete the code below and mark the bug as fixed, + // once it's live tested on MAAS and 1.16 compatibility + // is dropped. + provider, err := environs.Provider(config.Type()) + if err != nil { + return result, err + } + secretAttrs, err := provider.SecretAttrs(config) + for k := range secretAttrs { + allAttrs[k] = "not available" + } + } + result.Config = allAttrs + return result, nil +} === added file 'src/github.com/juju/juju/apiserver/common/modelwatcher_test.go' --- src/github.com/juju/juju/apiserver/common/modelwatcher_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/modelwatcher_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,136 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package common_test + +import ( + "fmt" + + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/environs/configstore" + "github.com/juju/juju/jujuclient/jujuclienttesting" + "github.com/juju/juju/provider/dummy" + "github.com/juju/juju/state" + "github.com/juju/juju/testing" +) + +type environWatcherSuite struct { + testing.BaseSuite + + testingEnvConfig *config.Config +} + +var _ = gc.Suite(&environWatcherSuite{}) + +type fakeModelAccessor struct { + modelConfig *config.Config + modelConfigError error +} + +func (*fakeModelAccessor) WatchForModelConfigChanges() state.NotifyWatcher { + changes := make(chan struct{}, 1) + // Simulate initial event. + changes <- struct{}{} + return &fakeNotifyWatcher{changes: changes} +} + +func (f *fakeModelAccessor) ModelConfig() (*config.Config, error) { + if f.modelConfigError != nil { + return nil, f.modelConfigError + } + return f.modelConfig, nil +} + +func (s *environWatcherSuite) TearDownTest(c *gc.C) { + dummy.Reset() + s.BaseSuite.TearDownTest(c) +} + +func (s *environWatcherSuite) TestWatchSuccess(c *gc.C) { + resources := common.NewResources() + s.AddCleanup(func(_ *gc.C) { resources.StopAll() }) + e := common.NewModelWatcher( + &fakeModelAccessor{}, + resources, + nil, + ) + result, err := e.WatchForModelConfigChanges() + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.NotifyWatchResult{"1", nil}) + c.Assert(resources.Count(), gc.Equals, 1) +} + +func (*environWatcherSuite) TestModelConfigSuccess(c *gc.C) { + authorizer := apiservertesting.FakeAuthorizer{ + Tag: names.NewMachineTag("0"), + EnvironManager: true, + } + testingEnvConfig := testingEnvConfig(c) + e := common.NewModelWatcher( + &fakeModelAccessor{modelConfig: testingEnvConfig}, + nil, + authorizer, + ) + result, err := e.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + // Make sure we can read the secret attribute (i.e. it's not masked). + c.Check(result.Config["secret"], gc.Equals, "pork") + c.Check(map[string]interface{}(result.Config), jc.DeepEquals, testingEnvConfig.AllAttrs()) +} + +func (*environWatcherSuite) TestModelConfigFetchError(c *gc.C) { + authorizer := apiservertesting.FakeAuthorizer{ + Tag: names.NewMachineTag("0"), + EnvironManager: true, + } + e := common.NewModelWatcher( + &fakeModelAccessor{ + modelConfigError: fmt.Errorf("pow"), + }, + nil, + authorizer, + ) + _, err := e.ModelConfig() + c.Assert(err, gc.ErrorMatches, "pow") +} + +func (*environWatcherSuite) TestModelConfigMaskedSecrets(c *gc.C) { + authorizer := apiservertesting.FakeAuthorizer{ + Tag: names.NewMachineTag("0"), + EnvironManager: false, + } + testingEnvConfig := testingEnvConfig(c) + e := common.NewModelWatcher( + &fakeModelAccessor{modelConfig: testingEnvConfig}, + nil, + authorizer, + ) + result, err := e.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + // Make sure the secret attribute is masked. + c.Check(result.Config["secret"], gc.Equals, "not available") + // And only that is masked. + result.Config["secret"] = "pork" + c.Check(map[string]interface{}(result.Config), jc.DeepEquals, testingEnvConfig.AllAttrs()) +} + +func testingEnvConfig(c *gc.C) *config.Config { + cfg, err := config.New(config.NoDefaults, dummy.SampleConfig()) + c.Assert(err, jc.ErrorIsNil) + env, err := environs.Prepare( + modelcmd.BootstrapContext(testing.Context(c)), configstore.NewMem(), + jujuclienttesting.NewMemStore(), + "dummycontroller", environs.PrepareForBootstrapParams{Config: cfg}, + ) + c.Assert(err, jc.ErrorIsNil) + return env.Config() +} === removed file 'src/github.com/juju/juju/apiserver/common/networking.go' --- src/github.com/juju/juju/apiserver/common/networking.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/networking.go 1970-01-01 00:00:00 +0000 @@ -1,126 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common - -import ( - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/network" - providercommon "github.com/juju/juju/provider/common" -) - -// BackingSubnet defines the methods supported by a Subnet entity -// stored persistently. -// -// TODO(dimitern): Once the state backing is implemented, remove this -// and just use *state.Subnet. -type BackingSubnet interface { - CIDR() string - VLANTag() int - ProviderId() string - AvailabilityZones() []string - Status() string - SpaceName() string - Life() params.Life -} - -// BackingSubnetInfo describes a single subnet to be added in the -// backing store. -// -// TODO(dimitern): Replace state.SubnetInfo with this and remove -// BackingSubnetInfo, once the rest of state backing methods and the -// following pre-reqs are done: -// * subnetDoc.AvailabilityZone becomes subnetDoc.AvailabilityZones, -// adding an upgrade step to migrate existing non empty zones on -// subnet docs. Also change state.Subnet.AvailabilityZone to -// * add subnetDoc.SpaceName - no upgrade step needed, as it will only -// be used for new space-aware subnets. -// * Subnets need a reference count to calculate Status. -// * ensure EC2 and MAAS providers accept empty IDs as Subnets() args -// and return all subnets, including the AvailabilityZones (for EC2; -// empty for MAAS as zones are orthogonal to networks). -type BackingSubnetInfo struct { - // ProviderId is a provider-specific network id. This may be empty. - ProviderId string - - // CIDR of the network, in 123.45.67.89/24 format. - CIDR string - - // VLANTag needs to be between 1 and 4094 for VLANs and 0 for normal - // networks. It's defined by IEEE 802.1Q standard. - VLANTag int - - // AllocatableIPHigh and Low describe the allocatable portion of the - // subnet. The remainder, if any, is reserved by the provider. - // Either both of these must be set or neither, if they're empty it - // means that none of the subnet is allocatable. If present they must - // be valid IP addresses within the subnet CIDR. - AllocatableIPHigh string - AllocatableIPLow string - - // AvailabilityZones describes which availability zone(s) this - // subnet is in. It can be empty if the provider does not support - // availability zones. - AvailabilityZones []string - - // SpaceName holds the juju network space this subnet is - // associated with. Can be empty if not supported. - SpaceName string - - // Status holds the status of the subnet. Normally this will be - // calculated from the reference count and Life of a subnet. - Status string - - // Live holds the life of the subnet - Life params.Life -} - -// BackingSpace defines the methods supported by a Space entity stored -// persistently. -type BackingSpace interface { - // Name returns the space name. - Name() string - - // Subnets returns the subnets in the space - Subnets() ([]BackingSubnet, error) - - // ProviderId returns the network ID of the provider - ProviderId() network.Id - - // Zones returns a list of availability zone(s) that this - // space is in. It can be empty if the provider does not support - // availability zones. - Zones() []string - - // Life returns the lifecycle state of the space - Life() params.Life -} - -// Backing defines the methods needed by the API facade to store and -// retrieve information from the underlying persistency layer (state -// DB). -type NetworkBacking interface { - // EnvironConfig returns the current environment config. - EnvironConfig() (*config.Config, error) - - // AvailabilityZones returns all cached availability zones (i.e. - // not from the provider, but in state). - AvailabilityZones() ([]providercommon.AvailabilityZone, error) - - // SetAvailabilityZones replaces the cached list of availability - // zones with the given zones. - SetAvailabilityZones([]providercommon.AvailabilityZone) error - - // AddSpace creates a space - AddSpace(Name string, Subnets []string, Public bool) error - - // AllSpaces returns all known Juju network spaces. - AllSpaces() ([]BackingSpace, error) - - // AddSubnet creates a backing subnet for an existing subnet. - AddSubnet(BackingSubnetInfo) (BackingSubnet, error) - - // AllSubnets returns all backing subnets. - AllSubnets() ([]BackingSubnet, error) -} === added directory 'src/github.com/juju/juju/apiserver/common/networkingcommon' === added file 'src/github.com/juju/juju/apiserver/common/networkingcommon/package_test.go' --- src/github.com/juju/juju/apiserver/common/networkingcommon/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/networkingcommon/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package networkingcommon_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/apiserver/common/networkingcommon/shims.go' --- src/github.com/juju/juju/apiserver/common/networkingcommon/shims.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/networkingcommon/shims.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,156 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package networkingcommon + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/network" + providercommon "github.com/juju/juju/provider/common" + "github.com/juju/juju/state" +) + +// NOTE: All of the following code is only tested with a feature test. + +// subnetShim forwards and adapts state.Subnets methods to BackingSubnet. +type subnetShim struct { + BackingSubnet + subnet *state.Subnet +} + +func (s *subnetShim) CIDR() string { + return s.subnet.CIDR() +} + +func (s *subnetShim) VLANTag() int { + return s.subnet.VLANTag() +} + +func (s *subnetShim) ProviderId() network.Id { + return s.subnet.ProviderId() +} + +func (s *subnetShim) AvailabilityZones() []string { + // TODO(dimitern): Add multiple zones to state.Subnet. + return []string{s.subnet.AvailabilityZone()} +} + +func (s *subnetShim) Life() params.Life { + return params.Life(s.subnet.Life().String()) +} + +func (s *subnetShim) Status() string { + // TODO(dimitern): This should happen in a cleaner way. + if s.Life() != params.Alive { + return "terminating" + } + return "in-use" +} + +func (s *subnetShim) SpaceName() string { + return s.subnet.SpaceName() +} + +// spaceShim forwards and adapts state.Space methods to BackingSpace. +type spaceShim struct { + BackingSpace + space *state.Space +} + +func (s *spaceShim) Name() string { + return s.space.Name() +} + +func (s *spaceShim) ProviderId() network.Id { + return s.space.ProviderId() +} + +func (s *spaceShim) Subnets() ([]BackingSubnet, error) { + results, err := s.space.Subnets() + if err != nil { + return nil, errors.Trace(err) + } + subnets := make([]BackingSubnet, len(results)) + for i, result := range results { + subnets[i] = &subnetShim{subnet: result} + } + return subnets, nil +} + +func NewStateShim(st *state.State) *stateShim { + return &stateShim{st: st} +} + +// stateShim forwards and adapts state.State methods to Backing +// method. +type stateShim struct { + NetworkBacking + st *state.State +} + +func (s *stateShim) ModelConfig() (*config.Config, error) { + return s.st.ModelConfig() +} + +func (s *stateShim) AddSpace(name string, providerId network.Id, subnetIds []string, public bool) error { + _, err := s.st.AddSpace(name, providerId, subnetIds, public) + return err +} + +func (s *stateShim) AllSpaces() ([]BackingSpace, error) { + // TODO(dimitern): Make this ListSpaces() instead. + results, err := s.st.AllSpaces() + if err != nil { + return nil, errors.Trace(err) + } + spaces := make([]BackingSpace, len(results)) + for i, result := range results { + spaces[i] = &spaceShim{space: result} + } + return spaces, nil +} + +func (s *stateShim) AddSubnet(info BackingSubnetInfo) (BackingSubnet, error) { + // TODO(dimitern): Add multiple AZs per subnet in state. + var firstZone string + if len(info.AvailabilityZones) > 0 { + firstZone = info.AvailabilityZones[0] + } + _, err := s.st.AddSubnet(state.SubnetInfo{ + CIDR: info.CIDR, + VLANTag: info.VLANTag, + ProviderId: info.ProviderId, + AvailabilityZone: firstZone, + SpaceName: info.SpaceName, + }) + return nil, err // Drop the first result, as it's unused. +} + +func (s *stateShim) AllSubnets() ([]BackingSubnet, error) { + results, err := s.st.AllSubnets() + if err != nil { + return nil, errors.Trace(err) + } + subnets := make([]BackingSubnet, len(results)) + for i, result := range results { + subnets[i] = &subnetShim{subnet: result} + } + return subnets, nil +} + +type availZoneShim struct{} + +func (availZoneShim) Name() string { return "not-set" } +func (availZoneShim) Available() bool { return true } + +func (s *stateShim) AvailabilityZones() ([]providercommon.AvailabilityZone, error) { + // TODO(dimitern): Fix this to get them from state when available! + return nil, nil +} + +func (s *stateShim) SetAvailabilityZones(zones []providercommon.AvailabilityZone) error { + return nil +} === added file 'src/github.com/juju/juju/apiserver/common/networkingcommon/spaces.go' --- src/github.com/juju/juju/apiserver/common/networkingcommon/spaces.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/networkingcommon/spaces.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,85 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package networkingcommon + +import ( + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs" + "github.com/juju/juju/network" +) + +// SupportsSpaces checks if the environment implements NetworkingEnviron +// and also if it supports spaces. +func SupportsSpaces(backing NetworkBacking) error { + config, err := backing.ModelConfig() + if err != nil { + return errors.Annotate(err, "getting model config") + } + env, err := environs.New(config) + if err != nil { + return errors.Annotate(err, "validating model config") + } + netEnv, ok := environs.SupportsNetworking(env) + if !ok { + return errors.NotSupportedf("networking") + } + ok, err = netEnv.SupportsSpaces() + if !ok { + if err != nil && !errors.IsNotSupported(err) { + logger.Warningf("checking model spaces support failed with: %v", err) + } + return errors.NotSupportedf("spaces") + } + return nil +} + +// CreateSpaces creates a new Juju network space, associating the +// specified subnets with it (optional; can be empty). +func CreateSpaces(backing NetworkBacking, args params.CreateSpacesParams) (results params.ErrorResults, err error) { + err = SupportsSpaces(backing) + if err != nil { + return results, common.ServerError(errors.Trace(err)) + } + + results.Results = make([]params.ErrorResult, len(args.Spaces)) + + for i, space := range args.Spaces { + err := createOneSpace(backing, space) + if err == nil { + continue + } + results.Results[i].Error = common.ServerError(errors.Trace(err)) + } + + return results, nil +} + +func createOneSpace(backing NetworkBacking, args params.CreateSpaceParams) error { + // Validate the args, assemble information for api.backing.AddSpaces + var subnets []string + + spaceTag, err := names.ParseSpaceTag(args.SpaceTag) + if err != nil { + return errors.Trace(err) + } + + for _, tag := range args.SubnetTags { + subnetTag, err := names.ParseSubnetTag(tag) + if err != nil { + return errors.Trace(err) + } + subnets = append(subnets, subnetTag.Id()) + } + + // Add the validated space. + err = backing.AddSpace(spaceTag.Id(), network.Id(args.ProviderId), subnets, args.Public) + if err != nil { + return errors.Trace(err) + } + return nil +} === added file 'src/github.com/juju/juju/apiserver/common/networkingcommon/spaces_test.go' --- src/github.com/juju/juju/apiserver/common/networkingcommon/spaces_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/networkingcommon/spaces_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,234 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package networkingcommon_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common/networkingcommon" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/network" + coretesting "github.com/juju/juju/testing" +) + +type SpacesSuite struct { + coretesting.BaseSuite + apiservertesting.StubNetwork +} + +var _ = gc.Suite(&SpacesSuite{}) + +func (s *SpacesSuite) SetUpSuite(c *gc.C) { + s.StubNetwork.SetUpSuite(c) + s.BaseSuite.SetUpSuite(c) +} + +func (s *SpacesSuite) TearDownSuite(c *gc.C) { + s.BaseSuite.TearDownSuite(c) +} + +func (s *SpacesSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + apiservertesting.BackingInstance.SetUp( + c, + apiservertesting.StubZonedNetworkingEnvironName, + apiservertesting.WithZones, + apiservertesting.WithSpaces, + apiservertesting.WithSubnets) +} + +func (s *SpacesSuite) TearDownTest(c *gc.C) { + s.BaseSuite.TearDownTest(c) +} + +type checkCreateSpacesParams struct { + Name string + Subnets []string + Error string + Public bool + ProviderId string +} + +func (s *SpacesSuite) checkCreateSpaces(c *gc.C, p checkCreateSpacesParams) { + args := params.CreateSpaceParams{} + if p.Name != "" { + args.SpaceTag = "space-" + p.Name + } + if len(p.Subnets) > 0 { + for _, cidr := range p.Subnets { + args.SubnetTags = append(args.SubnetTags, "subnet-"+cidr) + } + } + args.Public = p.Public + args.ProviderId = p.ProviderId + + spaces := params.CreateSpacesParams{} + spaces.Spaces = append(spaces.Spaces, args) + results, err := networkingcommon.CreateSpaces(apiservertesting.BackingInstance, spaces) + + c.Assert(len(results.Results), gc.Equals, 1) + c.Assert(err, gc.IsNil) + if p.Error == "" { + c.Assert(results.Results[0].Error, gc.IsNil) + } else { + c.Assert(results.Results[0].Error, gc.NotNil) + c.Assert(results.Results[0].Error, gc.ErrorMatches, p.Error) + } + + baseCalls := []apiservertesting.StubMethodCall{ + apiservertesting.BackingCall("ModelConfig"), + apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), + apiservertesting.ZonedNetworkingEnvironCall("SupportsSpaces"), + } + + addSpaceCalls := append(baseCalls, apiservertesting.BackingCall("AddSpace", p.Name, network.Id(p.ProviderId), p.Subnets, p.Public)) + + if p.Error == "" { + apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, addSpaceCalls...) + } else { + apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, baseCalls...) + } +} + +func (s *SpacesSuite) TestCreateInvalidSpace(c *gc.C) { + p := checkCreateSpacesParams{ + Name: "-", + Subnets: []string{"10.0.0.0/24"}, + Error: `"space--" is not a valid space tag`, + } + s.checkCreateSpaces(c, p) +} + +func (s *SpacesSuite) TestCreateInvalidSubnet(c *gc.C) { + p := checkCreateSpacesParams{ + Name: "foo", + Subnets: []string{"bar"}, + Error: `"subnet-bar" is not a valid subnet tag`, + } + s.checkCreateSpaces(c, p) +} + +func (s *SpacesSuite) TestPublic(c *gc.C) { + p := checkCreateSpacesParams{ + Name: "foo", + Subnets: []string{"10.0.0.0/24"}, + Public: true, + } + s.checkCreateSpaces(c, p) +} + +func (s *SpacesSuite) TestProviderId(c *gc.C) { + p := checkCreateSpacesParams{ + Name: "foo", + Subnets: []string{"10.0.0.0/24"}, + ProviderId: "foobar", + } + s.checkCreateSpaces(c, p) +} + +func (s *SpacesSuite) TestEmptySpaceName(c *gc.C) { + p := checkCreateSpacesParams{ + Subnets: []string{"10.0.0.0/24"}, + Error: `"" is not a valid tag`, + } + s.checkCreateSpaces(c, p) +} + +func (s *SpacesSuite) TestNoSubnets(c *gc.C) { + p := checkCreateSpacesParams{ + Name: "foo", + Subnets: nil, + } + s.checkCreateSpaces(c, p) +} + +func (s *SpacesSuite) TestCreateSpacesModelConfigError(c *gc.C) { + apiservertesting.SharedStub.SetErrors( + errors.New("boom"), // Backing.ModelConfig() + ) + + spaces := params.CreateSpacesParams{} + _, err := networkingcommon.CreateSpaces(apiservertesting.BackingInstance, spaces) + c.Assert(err, gc.ErrorMatches, "getting model config: boom") +} + +func (s *SpacesSuite) TestCreateSpacesProviderOpenError(c *gc.C) { + apiservertesting.SharedStub.SetErrors( + nil, // Backing.ModelConfig() + errors.New("boom"), // Provider.Open() + ) + + spaces := params.CreateSpacesParams{} + _, err := networkingcommon.CreateSpaces(apiservertesting.BackingInstance, spaces) + c.Assert(err, gc.ErrorMatches, "validating model config: boom") +} + +func (s *SpacesSuite) TestCreateSpacesNotSupportedError(c *gc.C) { + apiservertesting.SharedStub.SetErrors( + nil, // Backing.ModelConfig() + nil, // Provider.Open() + errors.NotSupportedf("spaces"), // ZonedNetworkingEnviron.SupportsSpaces() + ) + + spaces := params.CreateSpacesParams{} + _, err := networkingcommon.CreateSpaces(apiservertesting.BackingInstance, spaces) + c.Assert(err, gc.ErrorMatches, "spaces not supported") +} + +func (s *SpacesSuite) TestSuppportsSpacesModelConfigError(c *gc.C) { + apiservertesting.SharedStub.SetErrors( + errors.New("boom"), // Backing.ModelConfig() + ) + + err := networkingcommon.SupportsSpaces(apiservertesting.BackingInstance) + c.Assert(err, gc.ErrorMatches, "getting model config: boom") +} + +func (s *SpacesSuite) TestSuppportsSpacesEnvironNewError(c *gc.C) { + apiservertesting.SharedStub.SetErrors( + nil, // Backing.ModelConfig() + errors.New("boom"), // environs.New() + ) + + err := networkingcommon.SupportsSpaces(apiservertesting.BackingInstance) + c.Assert(err, gc.ErrorMatches, "validating model config: boom") +} + +func (s *SpacesSuite) TestSuppportsSpacesWithoutNetworking(c *gc.C) { + apiservertesting.BackingInstance.SetUp( + c, + apiservertesting.StubEnvironName, + apiservertesting.WithoutZones, + apiservertesting.WithoutSpaces, + apiservertesting.WithoutSubnets) + + err := networkingcommon.SupportsSpaces(apiservertesting.BackingInstance) + c.Assert(err, jc.Satisfies, errors.IsNotSupported) +} + +func (s *SpacesSuite) TestSuppportsSpacesWithoutSpaces(c *gc.C) { + apiservertesting.BackingInstance.SetUp( + c, + apiservertesting.StubNetworkingEnvironName, + apiservertesting.WithoutZones, + apiservertesting.WithoutSpaces, + apiservertesting.WithoutSubnets) + + apiservertesting.SharedStub.SetErrors( + nil, // Backing.ModelConfig() + nil, // environs.New() + errors.New("boom"), // Backing.SupportsSpaces() + ) + + err := networkingcommon.SupportsSpaces(apiservertesting.BackingInstance) + c.Assert(err, jc.Satisfies, errors.IsNotSupported) +} + +func (s *SpacesSuite) TestSuppportsSpaces(c *gc.C) { + err := networkingcommon.SupportsSpaces(apiservertesting.BackingInstance) + c.Assert(err, jc.ErrorIsNil) +} === added file 'src/github.com/juju/juju/apiserver/common/networkingcommon/subnets.go' --- src/github.com/juju/juju/apiserver/common/networkingcommon/subnets.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/networkingcommon/subnets.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,533 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package networkingcommon + +import ( + "fmt" + "net" + "strings" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + "github.com/juju/utils/set" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs" + "github.com/juju/juju/instance" + "github.com/juju/juju/network" + providercommon "github.com/juju/juju/provider/common" +) + +var logger = loggo.GetLogger("juju.apiserver.common.networkingcommon.subnets") + +// addSubnetsCache holds cached lists of spaces, zones, and subnets, used for +// fast lookups while adding subnets. +type addSubnetsCache struct { + api NetworkBacking + allSpaces set.Strings // all defined backing spaces + allZones set.Strings // all known provider zones + availableZones set.Strings // all the available zones + allSubnets []network.SubnetInfo // all (valid) provider subnets + // providerIdsByCIDR maps possibly duplicated CIDRs to one or more ids. + providerIdsByCIDR map[string]set.Strings + // subnetsByProviderId maps unique subnet ProviderIds to pointers + // to entries in allSubnets. + subnetsByProviderId map[string]*network.SubnetInfo +} + +func NewAddSubnetsCache(api NetworkBacking) *addSubnetsCache { + // Empty cache initially. + return &addSubnetsCache{ + api: api, + allSpaces: nil, + allZones: nil, + availableZones: nil, + allSubnets: nil, + providerIdsByCIDR: nil, + subnetsByProviderId: nil, + } +} + +// validateSpace parses the given spaceTag and verifies it exists by looking it +// up in the cache (or populates the cache if empty). +func (cache *addSubnetsCache) validateSpace(spaceTag string) (*names.SpaceTag, error) { + if spaceTag == "" { + return nil, errors.Errorf("SpaceTag is required") + } + tag, err := names.ParseSpaceTag(spaceTag) + if err != nil { + return nil, errors.Annotate(err, "given SpaceTag is invalid") + } + + // Otherwise we need the cache to validate. + if cache.allSpaces == nil { + // Not yet cached. + logger.Debugf("caching known spaces") + + allSpaces, err := cache.api.AllSpaces() + if err != nil { + return nil, errors.Annotate(err, "cannot validate given SpaceTag") + } + cache.allSpaces = set.NewStrings() + for _, space := range allSpaces { + if cache.allSpaces.Contains(space.Name()) { + logger.Warningf("ignoring duplicated space %q", space.Name()) + continue + } + cache.allSpaces.Add(space.Name()) + } + } + if cache.allSpaces.IsEmpty() { + return nil, errors.Errorf("no spaces defined") + } + logger.Tracef("using cached spaces: %v", cache.allSpaces.SortedValues()) + + if !cache.allSpaces.Contains(tag.Id()) { + return nil, errors.NotFoundf("space %q", tag.Id()) // " not found" + } + return &tag, nil +} + +// cacheZones populates the allZones and availableZones cache, if it's +// empty. +func (cache *addSubnetsCache) cacheZones() error { + if cache.allZones != nil { + // Already cached. + logger.Tracef("using cached zones: %v", cache.allZones.SortedValues()) + return nil + } + + allZones, err := AllZones(cache.api) + if err != nil { + return errors.Annotate(err, "given Zones cannot be validated") + } + cache.allZones = set.NewStrings() + cache.availableZones = set.NewStrings() + for _, zone := range allZones.Results { + // AllZones() does not use the Error result field, so no + // need to check it here. + if cache.allZones.Contains(zone.Name) { + logger.Warningf("ignoring duplicated zone %q", zone.Name) + continue + } + + if zone.Available { + cache.availableZones.Add(zone.Name) + } + cache.allZones.Add(zone.Name) + } + logger.Debugf( + "%d known and %d available zones cached: %v", + cache.allZones.Size(), cache.availableZones.Size(), cache.allZones.SortedValues(), + ) + if cache.allZones.IsEmpty() { + cache.allZones = nil + // Cached an empty list. + return errors.Errorf("no zones defined") + } + return nil +} + +// validateZones ensures givenZones are valid. When providerZones are also set, +// givenZones must be a subset of them or match exactly. With non-empty +// providerZones and empty givenZones, it returns the providerZones (i.e. trusts +// the provider to know better). When no providerZones and only givenZones are +// set, only then the cache is used to validate givenZones. +func (cache *addSubnetsCache) validateZones(providerZones, givenZones []string) ([]string, error) { + givenSet := set.NewStrings(givenZones...) + providerSet := set.NewStrings(providerZones...) + + // First check if we can validate without using the cache. + switch { + case providerSet.IsEmpty() && givenSet.IsEmpty(): + return nil, errors.Errorf("Zones cannot be discovered from the provider and must be set") + case !providerSet.IsEmpty() && givenSet.IsEmpty(): + // Use provider zones when none given. + return providerSet.SortedValues(), nil + case !providerSet.IsEmpty() && !givenSet.IsEmpty(): + // Ensure givenZones either match providerZones or are a + // subset of them. + extraGiven := givenSet.Difference(providerSet) + if !extraGiven.IsEmpty() { + extra := `"` + strings.Join(extraGiven.SortedValues(), `", "`) + `"` + msg := fmt.Sprintf("Zones contain zones not allowed by the provider: %s", extra) + return nil, errors.Errorf(msg) + } + } + + // Otherwise we need the cache to validate. + if err := cache.cacheZones(); err != nil { + return nil, errors.Trace(err) + } + + diffAvailable := givenSet.Difference(cache.availableZones) + diffAll := givenSet.Difference(cache.allZones) + + if !diffAll.IsEmpty() { + extra := `"` + strings.Join(diffAll.SortedValues(), `", "`) + `"` + return nil, errors.Errorf("Zones contain unknown zones: %s", extra) + } + if !diffAvailable.IsEmpty() { + extra := `"` + strings.Join(diffAvailable.SortedValues(), `", "`) + `"` + return nil, errors.Errorf("Zones contain unavailable zones: %s", extra) + } + // All good - given zones are a subset and none are + // unavailable. + return givenSet.SortedValues(), nil +} + +// cacheSubnets tries to get and cache once all known provider subnets. It +// handles the case when subnets have duplicated CIDRs but distinct ProviderIds. +// It also handles weird edge cases, like no CIDR and/or ProviderId set for a +// subnet. +func (cache *addSubnetsCache) cacheSubnets() error { + if cache.allSubnets != nil { + // Already cached. + logger.Tracef("using %d cached subnets", len(cache.allSubnets)) + return nil + } + + netEnv, err := networkingEnviron(cache.api) + if err != nil { + return errors.Trace(err) + } + subnetInfo, err := netEnv.Subnets(instance.UnknownId, nil) + if err != nil { + return errors.Annotate(err, "cannot get provider subnets") + } + logger.Debugf("got %d subnets to cache from the provider", len(subnetInfo)) + + if len(subnetInfo) > 0 { + // Trying to avoid reallocations. + cache.allSubnets = make([]network.SubnetInfo, 0, len(subnetInfo)) + } + cache.providerIdsByCIDR = make(map[string]set.Strings) + cache.subnetsByProviderId = make(map[string]*network.SubnetInfo) + + for i, _ := range subnetInfo { + subnet := subnetInfo[i] + cidr := subnet.CIDR + providerId := string(subnet.ProviderId) + logger.Debugf( + "caching subnet with CIDR %q, ProviderId %q, Zones: %q", + cidr, providerId, subnet.AvailabilityZones, + ) + + if providerId == "" && cidr == "" { + logger.Warningf("found subnet with empty CIDR and ProviderId") + // But we still save it for lookups, which will probably fail anyway. + } else if providerId == "" { + logger.Warningf("found subnet with CIDR %q and empty ProviderId", cidr) + // But we still save it for lookups. + } else { + _, ok := cache.subnetsByProviderId[providerId] + if ok { + logger.Warningf( + "found subnet with CIDR %q and duplicated ProviderId %q", + cidr, providerId, + ) + // We just overwrite what's there for the same id. + // It's a weird case and it shouldn't happen with + // properly written providers, but anyway.. + } + } + cache.subnetsByProviderId[providerId] = &subnet + + if ids, ok := cache.providerIdsByCIDR[cidr]; !ok { + cache.providerIdsByCIDR[cidr] = set.NewStrings(providerId) + } else { + ids.Add(providerId) + logger.Debugf( + "duplicated subnet CIDR %q; collected ProviderIds so far: %v", + cidr, ids.SortedValues(), + ) + cache.providerIdsByCIDR[cidr] = ids + } + + cache.allSubnets = append(cache.allSubnets, subnet) + } + logger.Debugf("%d provider subnets cached", len(cache.allSubnets)) + if len(cache.allSubnets) == 0 { + // Cached an empty list. + return errors.Errorf("no subnets defined") + } + return nil +} + +// validateSubnet ensures either subnetTag or providerId is valid (not both), +// then uses the cache to validate and lookup the provider SubnetInfo for the +// subnet, if found. +func (cache *addSubnetsCache) validateSubnet(subnetTag, providerId string) (*network.SubnetInfo, error) { + haveTag := subnetTag != "" + haveProviderId := providerId != "" + + if !haveTag && !haveProviderId { + return nil, errors.Errorf("either SubnetTag or SubnetProviderId is required") + } else if haveTag && haveProviderId { + return nil, errors.Errorf("SubnetTag and SubnetProviderId cannot be both set") + } + var tag names.SubnetTag + if haveTag { + var err error + tag, err = names.ParseSubnetTag(subnetTag) + if err != nil { + return nil, errors.Annotate(err, "given SubnetTag is invalid") + } + } + + // Otherwise we need the cache to validate. + if err := cache.cacheSubnets(); err != nil { + return nil, errors.Trace(err) + } + + if haveTag { + providerIds, ok := cache.providerIdsByCIDR[tag.Id()] + if !ok || providerIds.IsEmpty() { + return nil, errors.NotFoundf("subnet with CIDR %q", tag.Id()) + } + if providerIds.Size() > 1 { + ids := `"` + strings.Join(providerIds.SortedValues(), `", "`) + `"` + return nil, errors.Errorf( + "multiple subnets with CIDR %q: retry using ProviderId from: %s", + tag.Id(), ids, + ) + } + // A single CIDR matched. + providerId = providerIds.Values()[0] + } + + info, ok := cache.subnetsByProviderId[providerId] + if !ok || info == nil { + return nil, errors.NotFoundf( + "subnet with CIDR %q and ProviderId %q", + tag.Id(), providerId, + ) + } + // Do last-call validation. + if !names.IsValidSubnet(info.CIDR) { + _, ipnet, err := net.ParseCIDR(info.CIDR) + if err != nil && info.CIDR != "" { + // The underlying error is not important here, just that + // the CIDR is invalid. + return nil, errors.Errorf( + "subnet with CIDR %q and ProviderId %q: invalid CIDR", + info.CIDR, providerId, + ) + } + if info.CIDR == "" { + return nil, errors.Errorf( + "subnet with ProviderId %q: empty CIDR", providerId, + ) + } + return nil, errors.Errorf( + "subnet with ProviderId %q: incorrect CIDR format %q, expected %q", + providerId, info.CIDR, ipnet.String(), + ) + } + return info, nil +} + +// addOneSubnet validates the given arguments, using cache for lookups +// (initialized on first use), then adds it to the backing store, if successful. +func addOneSubnet(api NetworkBacking, args params.AddSubnetParams, cache *addSubnetsCache) error { + subnetInfo, err := cache.validateSubnet(args.SubnetTag, args.SubnetProviderId) + if err != nil { + return errors.Trace(err) + } + spaceTag, err := cache.validateSpace(args.SpaceTag) + if err != nil { + return errors.Trace(err) + } + zones, err := cache.validateZones(subnetInfo.AvailabilityZones, args.Zones) + if err != nil { + return errors.Trace(err) + } + + // Try adding the subnet. + backingInfo := BackingSubnetInfo{ + ProviderId: subnetInfo.ProviderId, + CIDR: subnetInfo.CIDR, + VLANTag: subnetInfo.VLANTag, + AvailabilityZones: zones, + SpaceName: spaceTag.Id(), + } + if subnetInfo.AllocatableIPLow != nil { + backingInfo.AllocatableIPLow = subnetInfo.AllocatableIPLow.String() + } + if subnetInfo.AllocatableIPHigh != nil { + backingInfo.AllocatableIPHigh = subnetInfo.AllocatableIPHigh.String() + } + if _, err := api.AddSubnet(backingInfo); err != nil { + return errors.Trace(err) + } + return nil +} + +// AddSubnets adds. +func AddSubnets(api NetworkBacking, args params.AddSubnetsParams) (params.ErrorResults, error) { + results := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.Subnets)), + } + + if len(args.Subnets) == 0 { + return results, nil + } + + cache := NewAddSubnetsCache(api) + for i, arg := range args.Subnets { + err := addOneSubnet(api, arg, cache) + if err != nil { + results.Results[i].Error = common.ServerError(err) + } + } + return results, nil +} + +// ListSubnets lists all the available subnets or only those matching +// all given optional filters. +func ListSubnets(api NetworkBacking, args params.SubnetsFilters) (results params.ListSubnetsResults, err error) { + subnets, err := api.AllSubnets() + if err != nil { + return results, errors.Trace(err) + } + + var spaceFilter string + if args.SpaceTag != "" { + tag, err := names.ParseSpaceTag(args.SpaceTag) + if err != nil { + return results, errors.Trace(err) + } + spaceFilter = tag.Id() + } + zoneFilter := args.Zone + + for _, subnet := range subnets { + if spaceFilter != "" && subnet.SpaceName() != spaceFilter { + logger.Tracef( + "filtering subnet %q from space %q not matching filter %q", + subnet.CIDR(), subnet.SpaceName(), spaceFilter, + ) + continue + } + zoneSet := set.NewStrings(subnet.AvailabilityZones()...) + if zoneFilter != "" && !zoneSet.IsEmpty() && !zoneSet.Contains(zoneFilter) { + logger.Tracef( + "filtering subnet %q with zones %v not matching filter %q", + subnet.CIDR(), subnet.AvailabilityZones(), zoneFilter, + ) + continue + } + result := params.Subnet{ + CIDR: subnet.CIDR(), + ProviderId: string(subnet.ProviderId()), + VLANTag: subnet.VLANTag(), + Life: subnet.Life(), + SpaceTag: names.NewSpaceTag(subnet.SpaceName()).String(), + Zones: subnet.AvailabilityZones(), + Status: subnet.Status(), + } + results.Results = append(results.Results, result) + } + return results, nil +} + +// networkingEnviron returns a environs.NetworkingEnviron instance from the +// current model config, if supported. If the model does not support +// environs.Networking, an error satisfying errors.IsNotSupported() will be +// returned. +func networkingEnviron(api NetworkBacking) (environs.NetworkingEnviron, error) { + envConfig, err := api.ModelConfig() + if err != nil { + return nil, errors.Annotate(err, "getting model config") + } + + env, err := environs.New(envConfig) + if err != nil { + return nil, errors.Annotate(err, "opening model") + } + if netEnv, ok := environs.SupportsNetworking(env); ok { + return netEnv, nil + } + return nil, errors.NotSupportedf("model networking features") // " not supported" +} + +// AllZones is defined on the API interface. +func AllZones(api NetworkBacking) (params.ZoneResults, error) { + var results params.ZoneResults + + zonesAsString := func(zones []providercommon.AvailabilityZone) string { + results := make([]string, len(zones)) + for i, zone := range zones { + results[i] = zone.Name() + } + return `"` + strings.Join(results, `", "`) + `"` + } + + // Try fetching cached zones first. + zones, err := api.AvailabilityZones() + if err != nil { + return results, errors.Trace(err) + } + + if len(zones) == 0 { + // This is likely the first time we're called. + // Fetch all zones from the provider and update. + zones, err = updateZones(api) + if err != nil { + return results, errors.Annotate(err, "cannot update known zones") + } + logger.Debugf( + "updated the list of known zones from the model: %s", zonesAsString(zones), + ) + } else { + logger.Debugf("using cached list of known zones: %s", zonesAsString(zones)) + } + + results.Results = make([]params.ZoneResult, len(zones)) + for i, zone := range zones { + results.Results[i].Name = zone.Name() + results.Results[i].Available = zone.Available() + } + return results, nil +} + +// updateZones attempts to retrieve all availability zones from the environment +// provider (if supported) and then updates the persisted list of zones in +// state, returning them as well on success. +func updateZones(api NetworkBacking) ([]providercommon.AvailabilityZone, error) { + zoned, err := zonedEnviron(api) + if err != nil { + return nil, errors.Trace(err) + } + zones, err := zoned.AvailabilityZones() + if err != nil { + return nil, errors.Trace(err) + } + + if err := api.SetAvailabilityZones(zones); err != nil { + return nil, errors.Trace(err) + } + return zones, nil +} + +// zonedEnviron returns a providercommon.ZonedEnviron instance from the current +// model config. If the model does not support zones, an error satisfying +// errors.IsNotSupported() will be returned. +func zonedEnviron(api NetworkBacking) (providercommon.ZonedEnviron, error) { + envConfig, err := api.ModelConfig() + if err != nil { + return nil, errors.Annotate(err, "getting model config") + } + + env, err := environs.New(envConfig) + if err != nil { + return nil, errors.Annotate(err, "opening model") + } + if zonedEnv, ok := env.(providercommon.ZonedEnviron); ok { + return zonedEnv, nil + } + return nil, errors.NotSupportedf("availability zones") +} === added file 'src/github.com/juju/juju/apiserver/common/networkingcommon/subnets_test.go' --- src/github.com/juju/juju/apiserver/common/networkingcommon/subnets_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/networkingcommon/subnets_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,787 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package networkingcommon_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common/networkingcommon" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/instance" + "github.com/juju/juju/network" + providercommon "github.com/juju/juju/provider/common" + coretesting "github.com/juju/juju/testing" +) + +type SubnetsSuite struct { + coretesting.BaseSuite + apiservertesting.StubNetwork +} + +var _ = gc.Suite(&SubnetsSuite{}) + +func (s *SubnetsSuite) SetUpSuite(c *gc.C) { + s.StubNetwork.SetUpSuite(c) + s.BaseSuite.SetUpSuite(c) +} + +func (s *SubnetsSuite) TearDownSuite(c *gc.C) { + s.BaseSuite.TearDownSuite(c) +} + +func (s *SubnetsSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + apiservertesting.BackingInstance.SetUp( + c, + apiservertesting.StubZonedEnvironName, + apiservertesting.WithZones, + apiservertesting.WithSpaces, + apiservertesting.WithSubnets) +} + +func (s *SubnetsSuite) TearDownTest(c *gc.C) { + s.BaseSuite.TearDownTest(c) +} + +// AssertAllZonesResult makes it easier to verify AllZones results. +func (s *SubnetsSuite) AssertAllZonesResult(c *gc.C, got params.ZoneResults, expected []providercommon.AvailabilityZone) { + results := make([]params.ZoneResult, len(expected)) + for i, zone := range expected { + results[i].Name = zone.Name() + results[i].Available = zone.Available() + } + c.Assert(got, jc.DeepEquals, params.ZoneResults{Results: results}) +} + +func (s *SubnetsSuite) TestAllZonesWhenBackingAvailabilityZonesFails(c *gc.C) { + apiservertesting.SharedStub.SetErrors(errors.NotSupportedf("zones")) + + results, err := networkingcommon.AllZones(apiservertesting.BackingInstance) + c.Assert(err, gc.ErrorMatches, "zones not supported") + // Verify the cause is not obscured. + c.Assert(err, jc.Satisfies, errors.IsNotSupported) + c.Assert(results, jc.DeepEquals, params.ZoneResults{}) + + apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, + apiservertesting.BackingCall("AvailabilityZones"), + ) +} + +func (s *SubnetsSuite) TestAllZonesUsesBackingZonesWhenAvailable(c *gc.C) { + results, err := networkingcommon.AllZones(apiservertesting.BackingInstance) + c.Assert(err, jc.ErrorIsNil) + s.AssertAllZonesResult(c, results, apiservertesting.BackingInstance.Zones) + + apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, + apiservertesting.BackingCall("AvailabilityZones"), + ) +} + +func (s *SubnetsSuite) TestAllZonesWithNoBackingZonesUpdates(c *gc.C) { + apiservertesting.BackingInstance.SetUp( + c, + apiservertesting.StubZonedEnvironName, + apiservertesting.WithoutZones, + apiservertesting.WithSpaces, + apiservertesting.WithSubnets) + + results, err := networkingcommon.AllZones(apiservertesting.BackingInstance) + c.Assert(err, jc.ErrorIsNil) + s.AssertAllZonesResult(c, results, apiservertesting.ProviderInstance.Zones) + + apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, + apiservertesting.BackingCall("AvailabilityZones"), + apiservertesting.BackingCall("ModelConfig"), + apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), + apiservertesting.ZonedEnvironCall("AvailabilityZones"), + apiservertesting.BackingCall("SetAvailabilityZones", apiservertesting.ProviderInstance.Zones), + ) +} + +func (s *SubnetsSuite) TestAllZonesWithNoBackingZonesAndSetFails(c *gc.C) { + apiservertesting.BackingInstance.SetUp( + c, + apiservertesting.StubZonedEnvironName, + apiservertesting.WithoutZones, + apiservertesting.WithSpaces, + apiservertesting.WithSubnets) + + apiservertesting.SharedStub.SetErrors( + nil, // Backing.AvailabilityZones + nil, // Backing.ModelConfig + nil, // Provider.Open + nil, // ZonedEnviron.AvailabilityZones + errors.NotSupportedf("setting"), // Backing.SetAvailabilityZones + ) + + results, err := networkingcommon.AllZones(apiservertesting.BackingInstance) + c.Assert(err, gc.ErrorMatches, + `cannot update known zones: setting not supported`, + ) + // Verify the cause is not obscured. + c.Assert(err, jc.Satisfies, errors.IsNotSupported) + c.Assert(results, jc.DeepEquals, params.ZoneResults{}) + + apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, + apiservertesting.BackingCall("AvailabilityZones"), + apiservertesting.BackingCall("ModelConfig"), + apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), + apiservertesting.ZonedEnvironCall("AvailabilityZones"), + apiservertesting.BackingCall("SetAvailabilityZones", apiservertesting.ProviderInstance.Zones), + ) +} + +func (s *SubnetsSuite) TestAllZonesWithNoBackingZonesAndFetchingZonesFails(c *gc.C) { + apiservertesting.BackingInstance.SetUp( + c, + apiservertesting.StubZonedEnvironName, + apiservertesting.WithoutZones, + apiservertesting.WithSpaces, + apiservertesting.WithSubnets) + + apiservertesting.SharedStub.SetErrors( + nil, // Backing.AvailabilityZones + nil, // Backing.ModelConfig + nil, // Provider.Open + errors.NotValidf("foo"), // ZonedEnviron.AvailabilityZones + ) + + results, err := networkingcommon.AllZones(apiservertesting.BackingInstance) + c.Assert(err, gc.ErrorMatches, + `cannot update known zones: foo not valid`, + ) + // Verify the cause is not obscured. + c.Assert(err, jc.Satisfies, errors.IsNotValid) + c.Assert(results, jc.DeepEquals, params.ZoneResults{}) + + apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, + apiservertesting.BackingCall("AvailabilityZones"), + apiservertesting.BackingCall("ModelConfig"), + apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), + apiservertesting.ZonedEnvironCall("AvailabilityZones"), + ) +} + +func (s *SubnetsSuite) TestAllZonesWithNoBackingZonesAndModelConfigFails(c *gc.C) { + apiservertesting.BackingInstance.SetUp( + c, + apiservertesting.StubZonedEnvironName, + apiservertesting.WithoutZones, + apiservertesting.WithSpaces, + apiservertesting.WithSubnets) + + apiservertesting.SharedStub.SetErrors( + nil, // Backing.AvailabilityZones + errors.NotFoundf("config"), // Backing.ModelConfig + ) + + results, err := networkingcommon.AllZones(apiservertesting.BackingInstance) + c.Assert(err, gc.ErrorMatches, + `cannot update known zones: getting model config: config not found`, + ) + // Verify the cause is not obscured. + c.Assert(err, jc.Satisfies, errors.IsNotFound) + c.Assert(results, jc.DeepEquals, params.ZoneResults{}) + + apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, + apiservertesting.BackingCall("AvailabilityZones"), + apiservertesting.BackingCall("ModelConfig"), + ) +} + +func (s *SubnetsSuite) TestAllZonesWithNoBackingZonesAndOpenFails(c *gc.C) { + apiservertesting.BackingInstance.SetUp( + c, + apiservertesting.StubZonedEnvironName, + apiservertesting.WithoutZones, + apiservertesting.WithSpaces, + apiservertesting.WithSubnets) + + apiservertesting.SharedStub.SetErrors( + nil, // Backing.AvailabilityZones + nil, // Backing.ModelConfig + errors.NotValidf("config"), // Provider.Open + ) + + results, err := networkingcommon.AllZones(apiservertesting.BackingInstance) + c.Assert(err, gc.ErrorMatches, + `cannot update known zones: opening model: config not valid`, + ) + // Verify the cause is not obscured. + c.Assert(err, jc.Satisfies, errors.IsNotValid) + c.Assert(results, jc.DeepEquals, params.ZoneResults{}) + + apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, + apiservertesting.BackingCall("AvailabilityZones"), + apiservertesting.BackingCall("ModelConfig"), + apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), + ) +} + +func (s *SubnetsSuite) TestAllZonesWithNoBackingZonesAndZonesNotSupported(c *gc.C) { + // ZonedEnviron not supported + apiservertesting.BackingInstance.SetUp( + c, + apiservertesting.StubEnvironName, + apiservertesting.WithoutZones, + apiservertesting.WithSpaces, + apiservertesting.WithSubnets) + + results, err := networkingcommon.AllZones(apiservertesting.BackingInstance) + c.Assert(err, gc.ErrorMatches, + `cannot update known zones: availability zones not supported`, + ) + // Verify the cause is not obscured. + c.Assert(err, jc.Satisfies, errors.IsNotSupported) + c.Assert(results, jc.DeepEquals, params.ZoneResults{}) + + apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, + apiservertesting.BackingCall("AvailabilityZones"), + apiservertesting.BackingCall("ModelConfig"), + apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), + ) +} + +func (s *SubnetsSuite) TestAddSubnetsParamsCombinations(c *gc.C) { + apiservertesting.BackingInstance.SetUp( + c, + apiservertesting.StubNetworkingEnvironName, + apiservertesting.WithZones, + apiservertesting.WithSpaces, + apiservertesting.WithSubnets) + + args := params.AddSubnetsParams{Subnets: []params.AddSubnetParams{{ + // nothing set; early exit: no calls + }, { + // neither tag nor id set: the rest is ignored; same as above + SpaceTag: "any", + Zones: []string{"any", "ignored"}, + }, { + // both tag and id set; same as above + SubnetTag: "any", + SubnetProviderId: "any", + }, { + // lookup by id needed, no cached subnets; ModelConfig(): error + SubnetProviderId: "any", + }, { + // same as above, need to cache subnets; ModelConfig(): ok; Open(): error + SubnetProviderId: "ignored", + }, { + // as above, caching again; ModelConfig(), Open(): ok; Subnets(): error + SubnetProviderId: "unimportant", + }, { + // exactly as above, except all 3 calls ok; cached lookup: id not found + SubnetProviderId: "missing", + }, { + // cached lookup by id (no calls): not found error + SubnetProviderId: "void", + }, { + // cached lookup by id: ok; parsing space tag: invalid tag error + SubnetProviderId: "sn-deadbeef", + SpaceTag: "invalid", + }, { + // as above, but slightly different error: invalid space tag error + SubnetProviderId: "sn-zadf00d", + SpaceTag: "unit-foo", + }, { + // as above; yet another similar error (valid tag with another kind) + SubnetProviderId: "vlan-42", + SpaceTag: "unit-foo-0", + }, { + // invalid tag (no kind): error (no calls) + SubnetTag: "invalid", + }, { + // invalid subnet tag (another kind): same as above + SubnetTag: "service-bar", + }, { + // cached lookup by missing CIDR: not found error + SubnetTag: "subnet-1.2.3.0/24", + }, { + // cached lookup by duplicate CIDR: multiple choices error + SubnetTag: "subnet-10.10.0.0/24", + }, { + // cached lookup by CIDR with empty provider id: ok; space tag is required error + SubnetTag: "subnet-10.20.0.0/16", + }, { + // cached lookup by id with invalid CIDR: cannot be added error + SubnetProviderId: "sn-invalid", + }, { + // cached lookup by id with empty CIDR: cannot be added error + SubnetProviderId: "sn-empty", + }, { + // cached lookup by id with incorrectly specified CIDR: cannot be added error + SubnetProviderId: "sn-awesome", + }, { + // cached lookup by CIDR: ok; valid tag; caching spaces: AllSpaces(): error + SubnetTag: "subnet-10.30.1.0/24", + SpaceTag: "space-unverified", + }, { + // exactly as above, except AllSpaces(): ok; cached lookup: space not found + SubnetTag: "subnet-2001:db8::/32", + SpaceTag: "space-missing", + }, { + // both cached lookups (CIDR, space): ok; no provider or given zones: error + SubnetTag: "subnet-10.42.0.0/16", + SpaceTag: "space-dmz", + }, { + // like above; with provider zones, extra given: error + SubnetProviderId: "vlan-42", + SpaceTag: "space-private", + Zones: []string{ + "zone2", // not allowed, existing, unavailable + "zone3", // allowed, existing, available + "missing", // not allowed, non-existing + "zone3", // duplicates are ignored (should they ?) + "zone1", // not allowed, existing, available + }, + }, { + // like above; no provider, only given zones; caching: AllZones(): error + SubnetTag: "subnet-10.42.0.0/16", + SpaceTag: "space-dmz", + Zones: []string{"any", "ignored"}, + }, { + // as above, but unknown zones given: cached: AllZones(): ok; unknown zones error + SubnetTag: "subnet-10.42.0.0/16", + SpaceTag: "space-dmz", + Zones: []string{"missing", "gone"}, + }, { + // as above, but unknown and unavailable zones given: same error (no calls) + SubnetTag: "subnet-10.42.0.0/16", + SpaceTag: "space-dmz", + Zones: []string{"zone4", "missing", "zone2"}, + }, { + // as above, but unavailable zones given: Zones contains unavailable error + SubnetTag: "subnet-10.42.0.0/16", + SpaceTag: "space-dmz", + Zones: []string{"zone2", "zone4"}, + }, { + // as above, but available and unavailable zones given: same error as above + SubnetTag: "subnet-10.42.0.0/16", + SpaceTag: "space-dmz", + Zones: []string{"zone4", "zone3"}, + }, { + // everything succeeds, using caches as needed, until: AddSubnet(): error + SubnetProviderId: "sn-ipv6", + SpaceTag: "space-dmz", + Zones: []string{"zone1"}, + // restriction of provider zones [zone1, zone3] + }, { + // cached lookups by CIDR, space: ok; duplicated provider id: unavailable zone2 + SubnetTag: "subnet-10.99.88.0/24", + SpaceTag: "space-dmz", + Zones: []string{"zone2"}, + // due to the duplicate ProviderId provider zones from subnet + // with the last ProviderId=sn-deadbeef are used + // (10.10.0.0/24); [zone2], not the 10.99.88.0/24 provider + // zones: [zone1, zone2]. + }, { + // same as above, but AddSubnet(): ok; success (backing verified later) + SubnetProviderId: "sn-ipv6", + SpaceTag: "space-dmz", + Zones: []string{"zone1"}, + // restriction of provider zones [zone1, zone3] + }, { + // success (CIDR lookup; with provider (no given) zones): AddSubnet(): ok + SubnetTag: "subnet-10.30.1.0/24", + SpaceTag: "space-private", + // Zones not given, so provider zones are used instead: [zone3] + }, { + // success (id lookup; given zones match provider zones) AddSubnet(): ok + SubnetProviderId: "sn-zadf00d", + SpaceTag: "space-private", + Zones: []string{"zone1"}, + }}} + apiservertesting.SharedStub.SetErrors( + // caching subnets (1st attempt): fails + errors.NotFoundf("config"), // BackingInstance.ModelConfig (1st call) + + // caching subnets (2nd attepmt): fails + nil, // BackingInstance.ModelConfig (2nd call) + errors.NotFoundf("provider"), // ProviderInstance.Open (1st call) + + // caching subnets (3rd attempt): fails + nil, // BackingInstance.ModelConfig (3rd call) + nil, // ProviderInstance.Open (2nd call) + errors.NotFoundf("subnets"), // NetworkingEnvironInstance.Subnets (1st call) + + // caching subnets (4th attempt): succeeds + nil, // BackingInstance.ModelConfig (4th call) + nil, // ProviderInstance.Open (3rd call) + nil, // NetworkingEnvironInstance.Subnets (2nd call) + + // caching spaces (1st and 2nd attempts) + errors.NotFoundf("spaces"), // BackingInstance.AllSpaces (1st call) + nil, // BackingInstance.AllSpaces (2nd call) + + // cacing zones (1st and 2nd attempts) + errors.NotFoundf("zones"), // BackingInstance.AvailabilityZones (1st call) + nil, // BackingInstance.AvailabilityZones (2nd call) + + // validation done; adding subnets to backing store + errors.NotFoundf("state"), // BackingInstance.AddSubnet (1st call) + // the next 3 BackingInstance.AddSubnet calls succeed(2nd call) + ) + + expectedErrors := []struct { + message string + satisfier func(error) bool + }{ + {"either SubnetTag or SubnetProviderId is required", nil}, + {"either SubnetTag or SubnetProviderId is required", nil}, + {"SubnetTag and SubnetProviderId cannot be both set", nil}, + {"getting model config: config not found", params.IsCodeNotFound}, + {"opening model: provider not found", params.IsCodeNotFound}, + {"cannot get provider subnets: subnets not found", params.IsCodeNotFound}, + {`subnet with CIDR "" and ProviderId "missing" not found`, params.IsCodeNotFound}, + {`subnet with CIDR "" and ProviderId "void" not found`, params.IsCodeNotFound}, + {`given SpaceTag is invalid: "invalid" is not a valid tag`, nil}, + {`given SpaceTag is invalid: "unit-foo" is not a valid unit tag`, nil}, + {`given SpaceTag is invalid: "unit-foo-0" is not a valid space tag`, nil}, + {`given SubnetTag is invalid: "invalid" is not a valid tag`, nil}, + {`given SubnetTag is invalid: "service-bar" is not a valid subnet tag`, nil}, + {`subnet with CIDR "1.2.3.0/24" not found`, params.IsCodeNotFound}, + { + `multiple subnets with CIDR "10.10.0.0/24": ` + + `retry using ProviderId from: "sn-deadbeef", "sn-zadf00d"`, nil, + }, + {"SpaceTag is required", nil}, + {`subnet with CIDR "invalid" and ProviderId "sn-invalid": invalid CIDR`, nil}, + {`subnet with ProviderId "sn-empty": empty CIDR`, nil}, + { + `subnet with ProviderId "sn-awesome": ` + + `incorrect CIDR format "0.1.2.3/4", expected "0.0.0.0/4"`, nil, + }, + {"cannot validate given SpaceTag: spaces not found", params.IsCodeNotFound}, + {`space "missing" not found`, params.IsCodeNotFound}, + {"Zones cannot be discovered from the provider and must be set", nil}, + {`Zones contain zones not allowed by the provider: "missing", "zone1", "zone2"`, nil}, + {"given Zones cannot be validated: zones not found", params.IsCodeNotFound}, + {`Zones contain unknown zones: "gone", "missing"`, nil}, + {`Zones contain unknown zones: "missing"`, nil}, + {`Zones contain unavailable zones: "zone2", "zone4"`, nil}, + {`Zones contain unavailable zones: "zone4"`, nil}, + {"state not found", params.IsCodeNotFound}, + {`Zones contain unavailable zones: "zone2"`, nil}, + {"", nil}, + {"", nil}, + {"", nil}, + } + expectedBackingInfos := []networkingcommon.BackingSubnetInfo{{ + ProviderId: "sn-ipv6", + CIDR: "2001:db8::/32", + VLANTag: 0, + AllocatableIPHigh: "", + AllocatableIPLow: "", + AvailabilityZones: []string{"zone1"}, + SpaceName: "dmz", + }, { + ProviderId: "vlan-42", + CIDR: "10.30.1.0/24", + VLANTag: 42, + AllocatableIPHigh: "", + AllocatableIPLow: "", + AvailabilityZones: []string{"zone3"}, + SpaceName: "private", + }, { + ProviderId: "sn-zadf00d", + CIDR: "10.10.0.0/24", + VLANTag: 0, + AllocatableIPHigh: "10.10.0.100", + AllocatableIPLow: "10.10.0.10", + AvailabilityZones: []string{"zone1"}, + SpaceName: "private", + }} + c.Check(expectedErrors, gc.HasLen, len(args.Subnets)) + results, err := networkingcommon.AddSubnets(apiservertesting.BackingInstance, args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(results.Results), gc.Equals, len(args.Subnets)) + for i, result := range results.Results { + c.Logf("result #%d: expected: %q", i, expectedErrors[i].message) + if expectedErrors[i].message == "" { + if !c.Check(result.Error, gc.IsNil) { + c.Logf("unexpected error: %v; args: %#v", result.Error, args.Subnets[i]) + } + continue + } + if !c.Check(result.Error, gc.NotNil) { + c.Logf("unexpected success; args: %#v", args.Subnets[i]) + continue + } + c.Check(result.Error.Message, gc.Equals, expectedErrors[i].message) + if expectedErrors[i].satisfier != nil { + c.Check(result.Error, jc.Satisfies, expectedErrors[i].satisfier) + } else { + c.Check(result.Error.Code, gc.Equals, "") + } + } + + apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, + // caching subnets (1st attempt): fails + apiservertesting.BackingCall("ModelConfig"), + + // caching subnets (2nd attepmt): fails + apiservertesting.BackingCall("ModelConfig"), + apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), + + // caching subnets (3rd attempt): fails + apiservertesting.BackingCall("ModelConfig"), + apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), + apiservertesting.NetworkingEnvironCall("Subnets", instance.UnknownId, []network.Id(nil)), + + // caching subnets (4th attempt): succeeds + apiservertesting.BackingCall("ModelConfig"), + apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), + apiservertesting.NetworkingEnvironCall("Subnets", instance.UnknownId, []network.Id(nil)), + + // caching spaces (1st and 2nd attempts) + apiservertesting.BackingCall("AllSpaces"), + apiservertesting.BackingCall("AllSpaces"), + + // cacing zones (1st and 2nd attempts) + apiservertesting.BackingCall("AvailabilityZones"), + apiservertesting.BackingCall("AvailabilityZones"), + + // validation done; adding subnets to backing store + apiservertesting.BackingCall("AddSubnet", expectedBackingInfos[0]), + apiservertesting.BackingCall("AddSubnet", expectedBackingInfos[0]), + apiservertesting.BackingCall("AddSubnet", expectedBackingInfos[1]), + apiservertesting.BackingCall("AddSubnet", expectedBackingInfos[2]), + ) + apiservertesting.ResetStub(apiservertesting.SharedStub) + + // Finally, check that no params yields no results. + results, err = networkingcommon.AddSubnets(apiservertesting.BackingInstance, params.AddSubnetsParams{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.NotNil) + c.Assert(results.Results, gc.HasLen, 0) + + apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub) +} + +func (s *SubnetsSuite) CheckAddSubnetsFails( + c *gc.C, envName string, + withZones, withSpaces, withSubnets apiservertesting.SetUpFlag, + expectedError string, + expectedSatisfies func(error) bool, +) { + apiservertesting.BackingInstance.SetUp(c, envName, withZones, withSpaces, withSubnets) + + // These calls always happen. + expectedCalls := []apiservertesting.StubMethodCall{ + apiservertesting.BackingCall("ModelConfig"), + apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), + } + + // Subnets is also always called. but the receiver is different. + switch envName { + case apiservertesting.StubNetworkingEnvironName: + expectedCalls = append( + expectedCalls, + apiservertesting.NetworkingEnvironCall("Subnets", instance.UnknownId, []network.Id(nil)), + ) + case apiservertesting.StubZonedNetworkingEnvironName: + expectedCalls = append( + expectedCalls, + apiservertesting.ZonedNetworkingEnvironCall("Subnets", instance.UnknownId, []network.Id(nil)), + ) + } + + if !withSubnets { + // Set provider subnets to empty for this test. + originalSubnets := make([]network.SubnetInfo, len(apiservertesting.ProviderInstance.Subnets)) + copy(originalSubnets, apiservertesting.ProviderInstance.Subnets) + apiservertesting.ProviderInstance.Subnets = []network.SubnetInfo{} + + defer func() { + apiservertesting.ProviderInstance.Subnets = make([]network.SubnetInfo, len(originalSubnets)) + copy(apiservertesting.ProviderInstance.Subnets, originalSubnets) + }() + + if envName == apiservertesting.StubEnvironName || envName == apiservertesting.StubNetworkingEnvironName { + // networking is either not supported or no subnets are + // defined, so expected the same calls for each of the two + // arguments to AddSubnets() below. + expectedCalls = append(expectedCalls, expectedCalls...) + } + } else { + // Having subnets implies spaces will be cached as well. + expectedCalls = append(expectedCalls, apiservertesting.BackingCall("AllSpaces")) + } + + if withSpaces && withSubnets { + // Having both subnets and spaces means we'll also cache zones. + expectedCalls = append(expectedCalls, apiservertesting.BackingCall("AvailabilityZones")) + } + + if !withZones && withSpaces { + // Set provider zones to empty for this test. + originalZones := make([]providercommon.AvailabilityZone, len(apiservertesting.ProviderInstance.Zones)) + copy(originalZones, apiservertesting.ProviderInstance.Zones) + apiservertesting.ProviderInstance.Zones = []providercommon.AvailabilityZone{} + + defer func() { + apiservertesting.ProviderInstance.Zones = make([]providercommon.AvailabilityZone, len(originalZones)) + copy(apiservertesting.ProviderInstance.Zones, originalZones) + }() + + // updateZones tries to constructs a ZonedEnviron with these calls. + zoneCalls := append([]apiservertesting.StubMethodCall{}, + apiservertesting.BackingCall("ModelConfig"), + apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), + ) + // Receiver can differ according to envName, but + // AvailabilityZones() will be called on either receiver. + switch envName { + case apiservertesting.StubZonedEnvironName: + zoneCalls = append( + zoneCalls, + apiservertesting.ZonedEnvironCall("AvailabilityZones"), + ) + case apiservertesting.StubZonedNetworkingEnvironName: + zoneCalls = append( + zoneCalls, + apiservertesting.ZonedNetworkingEnvironCall("AvailabilityZones"), + ) + } + // Finally after caching provider zones backing zones are + // updated. + zoneCalls = append( + zoneCalls, + apiservertesting.BackingCall("SetAvailabilityZones", apiservertesting.ProviderInstance.Zones), + ) + + // Now, because we have 2 arguments to AddSubnets() below, we + // need to expect the same zoneCalls twice, with a + // AvailabilityZones backing lookup between them. + expectedCalls = append(expectedCalls, zoneCalls...) + expectedCalls = append(expectedCalls, apiservertesting.BackingCall("AvailabilityZones")) + expectedCalls = append(expectedCalls, zoneCalls...) + } + + // Pass 2 arguments covering all cases we need. + args := params.AddSubnetsParams{ + Subnets: []params.AddSubnetParams{{ + SubnetTag: "subnet-10.42.0.0/16", + SpaceTag: "space-dmz", + Zones: []string{"zone1"}, + }, { + SubnetProviderId: "vlan-42", + SpaceTag: "space-private", + Zones: []string{"zone3"}, + }}, + } + results, err := networkingcommon.AddSubnets(apiservertesting.BackingInstance, args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, len(args.Subnets)) + for _, result := range results.Results { + if !c.Check(result.Error, gc.NotNil) { + continue + } + c.Check(result.Error, gc.ErrorMatches, expectedError) + if expectedSatisfies != nil { + c.Check(result.Error, jc.Satisfies, expectedSatisfies) + } else { + c.Check(result.Error.Code, gc.Equals, "") + } + } + + apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, expectedCalls...) +} + +func (s *SubnetsSuite) TestAddSubnetsWithNoProviderSubnetsFails(c *gc.C) { + s.CheckAddSubnetsFails( + c, apiservertesting.StubNetworkingEnvironName, + apiservertesting.WithoutZones, apiservertesting.WithoutSpaces, apiservertesting.WithoutSubnets, + "no subnets defined", + nil, + ) +} + +func (s *SubnetsSuite) TestAddSubnetsWithNoBackingSpacesFails(c *gc.C) { + s.CheckAddSubnetsFails( + c, apiservertesting.StubNetworkingEnvironName, + apiservertesting.WithoutZones, apiservertesting.WithoutSpaces, apiservertesting.WithSubnets, + "no spaces defined", + nil, + ) +} + +func (s *SubnetsSuite) TestAddSubnetsWithNoProviderZonesFails(c *gc.C) { + s.CheckAddSubnetsFails( + c, apiservertesting.StubZonedNetworkingEnvironName, + apiservertesting.WithoutZones, apiservertesting.WithSpaces, apiservertesting.WithSubnets, + "no zones defined", + nil, + ) +} + +func (s *SubnetsSuite) TestAddSubnetsWhenNetworkingEnvironNotSupported(c *gc.C) { + s.CheckAddSubnetsFails( + c, apiservertesting.StubEnvironName, + apiservertesting.WithoutZones, apiservertesting.WithoutSpaces, apiservertesting.WithoutSubnets, + "model networking features not supported", + params.IsCodeNotSupported, + ) +} + +func (s *SubnetsSuite) TestListSubnetsAndFiltering(c *gc.C) { + expected := []params.Subnet{{ + CIDR: "10.10.0.0/24", + ProviderId: "sn-zadf00d", + VLANTag: 0, + Life: "", + SpaceTag: "space-private", + Zones: []string{"zone1"}, + Status: "", + }, { + CIDR: "2001:db8::/32", + ProviderId: "sn-ipv6", + VLANTag: 0, + Life: "", + SpaceTag: "space-dmz", + Zones: []string{"zone1", "zone3"}, + Status: "", + }} + // No filtering. + args := params.SubnetsFilters{} + subnets, err := networkingcommon.ListSubnets(apiservertesting.BackingInstance, args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(subnets.Results, jc.DeepEquals, expected) + + // Filter by space only. + args.SpaceTag = "space-dmz" + subnets, err = networkingcommon.ListSubnets(apiservertesting.BackingInstance, args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(subnets.Results, jc.DeepEquals, expected[1:]) + + // Filter by zone only. + args.SpaceTag = "" + args.Zone = "zone3" + subnets, err = networkingcommon.ListSubnets(apiservertesting.BackingInstance, args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(subnets.Results, jc.DeepEquals, expected[1:]) + + // Filter by both space and zone. + args.SpaceTag = "space-private" + args.Zone = "zone1" + subnets, err = networkingcommon.ListSubnets(apiservertesting.BackingInstance, args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(subnets.Results, jc.DeepEquals, expected[:1]) +} + +func (s *SubnetsSuite) TestListSubnetsInvalidSpaceTag(c *gc.C) { + args := params.SubnetsFilters{SpaceTag: "invalid"} + _, err := networkingcommon.ListSubnets(apiservertesting.BackingInstance, args) + c.Assert(err, gc.ErrorMatches, `"invalid" is not a valid tag`) +} + +func (s *SubnetsSuite) TestListSubnetsAllSubnetError(c *gc.C) { + boom := errors.New("no subnets for you") + apiservertesting.BackingInstance.SetErrors(boom) + _, err := networkingcommon.ListSubnets(apiservertesting.BackingInstance, params.SubnetsFilters{}) + c.Assert(err, gc.ErrorMatches, "no subnets for you") +} === added file 'src/github.com/juju/juju/apiserver/common/networkingcommon/types.go' --- src/github.com/juju/juju/apiserver/common/networkingcommon/types.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/networkingcommon/types.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,150 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package networkingcommon + +import ( + "github.com/juju/names" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/network" + providercommon "github.com/juju/juju/provider/common" +) + +// BackingSubnet defines the methods supported by a Subnet entity +// stored persistently. +// +// TODO(dimitern): Once the state backing is implemented, remove this +// and just use *state.Subnet. +type BackingSubnet interface { + CIDR() string + VLANTag() int + ProviderId() network.Id + AvailabilityZones() []string + Status() string + SpaceName() string + Life() params.Life +} + +// BackingSubnetInfo describes a single subnet to be added in the +// backing store. +// +// TODO(dimitern): Replace state.SubnetInfo with this and remove +// BackingSubnetInfo, once the rest of state backing methods and the +// following pre-reqs are done: +// * subnetDoc.AvailabilityZone becomes subnetDoc.AvailabilityZones, +// adding an upgrade step to migrate existing non empty zones on +// subnet docs. Also change state.Subnet.AvailabilityZone to +// * add subnetDoc.SpaceName - no upgrade step needed, as it will only +// be used for new space-aware subnets. +// * Subnets need a reference count to calculate Status. +// * ensure EC2 and MAAS providers accept empty IDs as Subnets() args +// and return all subnets, including the AvailabilityZones (for EC2; +// empty for MAAS as zones are orthogonal to networks). +type BackingSubnetInfo struct { + // ProviderId is a provider-specific network id. This may be empty. + ProviderId network.Id + + // CIDR of the network, in 123.45.67.89/24 format. + CIDR string + + // VLANTag needs to be between 1 and 4094 for VLANs and 0 for normal + // networks. It's defined by IEEE 802.1Q standard. + VLANTag int + + // AllocatableIPHigh and Low describe the allocatable portion of the + // subnet. The remainder, if any, is reserved by the provider. + // Either both of these must be set or neither, if they're empty it + // means that none of the subnet is allocatable. If present they must + // be valid IP addresses within the subnet CIDR. + AllocatableIPHigh string + AllocatableIPLow string + + // AvailabilityZones describes which availability zone(s) this + // subnet is in. It can be empty if the provider does not support + // availability zones. + AvailabilityZones []string + + // SpaceName holds the juju network space this subnet is + // associated with. Can be empty if not supported. + SpaceName string + + // Status holds the status of the subnet. Normally this will be + // calculated from the reference count and Life of a subnet. + Status string + + // Live holds the life of the subnet + Life params.Life +} + +// BackingSpace defines the methods supported by a Space entity stored +// persistently. +type BackingSpace interface { + // Name returns the space name. + Name() string + + // Subnets returns the subnets in the space + Subnets() ([]BackingSubnet, error) + + // ProviderId returns the network ID of the provider + ProviderId() network.Id + + // Zones returns a list of availability zone(s) that this + // space is in. It can be empty if the provider does not support + // availability zones. + Zones() []string + + // Life returns the lifecycle state of the space + Life() params.Life +} + +// Backing defines the methods needed by the API facade to store and +// retrieve information from the underlying persistency layer (state +// DB). +type NetworkBacking interface { + // ModelConfig returns the current environment config. + ModelConfig() (*config.Config, error) + + // AvailabilityZones returns all cached availability zones (i.e. + // not from the provider, but in state). + AvailabilityZones() ([]providercommon.AvailabilityZone, error) + + // SetAvailabilityZones replaces the cached list of availability + // zones with the given zones. + SetAvailabilityZones([]providercommon.AvailabilityZone) error + + // AddSpace creates a space + AddSpace(Name string, ProviderId network.Id, Subnets []string, Public bool) error + + // AllSpaces returns all known Juju network spaces. + AllSpaces() ([]BackingSpace, error) + + // AddSubnet creates a backing subnet for an existing subnet. + AddSubnet(BackingSubnetInfo) (BackingSubnet, error) + + // AllSubnets returns all backing subnets. + AllSubnets() ([]BackingSubnet, error) +} + +func BackingSubnetToParamsSubnet(subnet BackingSubnet) params.Subnet { + cidr := subnet.CIDR() + vlantag := subnet.VLANTag() + providerid := subnet.ProviderId() + zones := subnet.AvailabilityZones() + status := subnet.Status() + var spaceTag names.SpaceTag + if subnet.SpaceName() != "" { + spaceTag = names.NewSpaceTag(subnet.SpaceName()) + } + + return params.Subnet{ + CIDR: cidr, + VLANTag: vlantag, + ProviderId: string(providerid), + Zones: zones, + Status: status, + SpaceTag: spaceTag.String(), + Life: subnet.Life(), + } +} === modified file 'src/github.com/juju/juju/apiserver/common/password_test.go' --- src/github.com/juju/juju/apiserver/common/password_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/common/password_test.go 2016-03-22 15:18:22 +0000 @@ -113,7 +113,7 @@ }, u("x/4"): &fakeUnitAuthenticator{}, u("x/5"): &fakeMachineAuthenticator{jobs: []state.MachineJob{state.JobHostUnits}}, - u("x/6"): &fakeMachineAuthenticator{jobs: []state.MachineJob{state.JobManageEnviron}}, + u("x/6"): &fakeMachineAuthenticator{jobs: []state.MachineJob{state.JobManageModel}}, }, } getCanChange := func() (common.AuthFunc, error) { === modified file 'src/github.com/juju/juju/apiserver/common/registry.go' --- src/github.com/juju/juju/apiserver/common/registry.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/registry.go 2016-03-22 15:18:22 +0000 @@ -220,6 +220,7 @@ } else { f.facades[name] = versions{version: record} } + logger.Tracef("Registered facade %q v%d", name, version) return nil } === modified file 'src/github.com/juju/juju/apiserver/common/setstatus.go' --- src/github.com/juju/juju/apiserver/common/setstatus.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/common/setstatus.go 2016-03-22 15:18:22 +0000 @@ -227,3 +227,27 @@ } return result, nil } + +// UnitAgentFinder is a state.EntityFinder that finds unit agents. +type UnitAgentFinder struct { + state.EntityFinder +} + +// FindEntity implements state.EntityFinder and returns unit agents. +func (ua *UnitAgentFinder) FindEntity(tag names.Tag) (state.Entity, error) { + _, ok := tag.(names.UnitTag) + if !ok { + return nil, errors.Errorf("unsupported tag %T", tag) + } + entity, err := ua.EntityFinder.FindEntity(tag) + if err != nil { + return nil, errors.Trace(err) + } + // this returns a state.Unit, but for testing we just cast to the minimal + // interface we need. + return entity.(hasAgent).Agent(), nil +} + +type hasAgent interface { + Agent() *state.UnitAgent +} === modified file 'src/github.com/juju/juju/apiserver/common/setstatus_test.go' --- src/github.com/juju/juju/apiserver/common/setstatus_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/setstatus_test.go 2016-03-22 15:18:22 +0000 @@ -6,6 +6,7 @@ import ( "time" + "github.com/juju/errors" "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -280,3 +281,50 @@ c.Assert(result.Results[1].Error, jc.Satisfies, params.IsCodeUnauthorized) c.Assert(result.Results[2].Error, gc.ErrorMatches, `"bad-tag" is not a valid tag`) } + +type unitAgentFinderSuite struct{} + +var _ = gc.Suite(&unitAgentFinderSuite{}) + +func (unitAgentFinderSuite) TestFindEntity(c *gc.C) { + f := fakeEntityFinder{ + unit: fakeUnit{ + agent: &state.UnitAgent{}, + }, + } + ua := &common.UnitAgentFinder{f} + entity, err := ua.FindEntity(names.NewUnitTag("unit/0")) + c.Assert(err, jc.ErrorIsNil) + c.Assert(entity, gc.DeepEquals, f.unit.agent) +} + +func (unitAgentFinderSuite) TestFindEntityBadTag(c *gc.C) { + ua := &common.UnitAgentFinder{fakeEntityFinder{}} + _, err := ua.FindEntity(names.NewServiceTag("foo")) + c.Assert(err, gc.ErrorMatches, "unsupported tag.*") +} + +func (unitAgentFinderSuite) TestFindEntityErr(c *gc.C) { + f := fakeEntityFinder{err: errors.Errorf("boo")} + ua := &common.UnitAgentFinder{f} + _, err := ua.FindEntity(names.NewUnitTag("unit/0")) + c.Assert(errors.Cause(err), gc.Equals, f.err) +} + +type fakeEntityFinder struct { + unit fakeUnit + err error +} + +func (f fakeEntityFinder) FindEntity(tag names.Tag) (state.Entity, error) { + return f.unit, f.err +} + +type fakeUnit struct { + state.Entity + agent *state.UnitAgent +} + +func (f fakeUnit) Agent() *state.UnitAgent { + return f.agent +} === removed file 'src/github.com/juju/juju/apiserver/common/storage.go' --- src/github.com/juju/juju/apiserver/common/storage.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/storage.go 1970-01-01 00:00:00 +0000 @@ -1,263 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common - -import ( - "github.com/juju/errors" - "github.com/juju/names" - - "github.com/juju/juju/environs/config" - "github.com/juju/juju/environs/tags" - "github.com/juju/juju/state" - "github.com/juju/juju/storage" -) - -// StorageInterface is an interface for obtaining information about storage -// instances and related entities. -type StorageInterface interface { - // StorageInstance returns the state.StorageInstance corresponding - // to the specified storage tag. - StorageInstance(names.StorageTag) (state.StorageInstance, error) - - // StorageInstanceFilesystem returns the state.Filesystem assigned - // to the storage instance with the specified storage tag. - StorageInstanceFilesystem(names.StorageTag) (state.Filesystem, error) - - // StorageInstanceVolume returns the state.Volume assigned to the - // storage instance with the specified storage tag. - StorageInstanceVolume(names.StorageTag) (state.Volume, error) - - // FilesystemAttachment returns the state.FilesystemAttachment - // corresponding to the identified machine and filesystem. - FilesystemAttachment(names.MachineTag, names.FilesystemTag) (state.FilesystemAttachment, error) - - // VolumeAttachment returns the state.VolumeAttachment corresponding - // to the identified machine and volume. - VolumeAttachment(names.MachineTag, names.VolumeTag) (state.VolumeAttachment, error) - - // WatchStorageAttachment watches for changes to the storage attachment - // corresponding to the identfified unit and storage instance. - WatchStorageAttachment(names.StorageTag, names.UnitTag) state.NotifyWatcher - - // WatchFilesystemAttachment watches for changes to the filesystem - // attachment corresponding to the identfified machine and filesystem. - WatchFilesystemAttachment(names.MachineTag, names.FilesystemTag) state.NotifyWatcher - - // WatchVolumeAttachment watches for changes to the volume attachment - // corresponding to the identfified machine and volume. - WatchVolumeAttachment(names.MachineTag, names.VolumeTag) state.NotifyWatcher - - // WatchBlockDevices watches for changes to block devices associated - // with the specified machine. - WatchBlockDevices(names.MachineTag) state.NotifyWatcher - - // BlockDevices returns information about block devices published - // for the specified machine. - BlockDevices(names.MachineTag) ([]state.BlockDeviceInfo, error) -} - -// StorageAttachmentInfo returns the StorageAttachmentInfo for the specified -// StorageAttachment by gathering information from related entities (volumes, -// filesystems). -// -// StorageAttachmentInfo returns an error satisfying errors.IsNotProvisioned -// if the storage attachment is not yet fully provisioned and ready for use -// by a charm. -func StorageAttachmentInfo( - st StorageInterface, - att state.StorageAttachment, - machineTag names.MachineTag, -) (*storage.StorageAttachmentInfo, error) { - storageInstance, err := st.StorageInstance(att.StorageInstance()) - if err != nil { - return nil, errors.Annotate(err, "getting storage instance") - } - switch storageInstance.Kind() { - case state.StorageKindBlock: - return volumeStorageAttachmentInfo(st, storageInstance, machineTag) - case state.StorageKindFilesystem: - return filesystemStorageAttachmentInfo(st, storageInstance, machineTag) - } - return nil, errors.Errorf("invalid storage kind %v", storageInstance.Kind()) -} - -func volumeStorageAttachmentInfo( - st StorageInterface, - storageInstance state.StorageInstance, - machineTag names.MachineTag, -) (*storage.StorageAttachmentInfo, error) { - storageTag := storageInstance.StorageTag() - volume, err := st.StorageInstanceVolume(storageTag) - if err != nil { - return nil, errors.Annotate(err, "getting volume") - } - volumeInfo, err := volume.Info() - if err != nil { - return nil, errors.Annotate(err, "getting volume info") - } - volumeAttachment, err := st.VolumeAttachment(machineTag, volume.VolumeTag()) - if err != nil { - return nil, errors.Annotate(err, "getting volume attachment") - } - volumeAttachmentInfo, err := volumeAttachment.Info() - if err != nil { - return nil, errors.Annotate(err, "getting volume attachment info") - } - blockDevices, err := st.BlockDevices(machineTag) - if err != nil { - return nil, errors.Annotate(err, "getting block devices") - } - blockDevice, ok := MatchingBlockDevice( - blockDevices, - volumeInfo, - volumeAttachmentInfo, - ) - if !ok { - // We must not say that a block-kind storage attachment is - // provisioned until its block device has shown up on the - // machine, otherwise the charm may attempt to use it and - // fail. - return nil, errors.NotProvisionedf("%v", names.ReadableString(storageTag)) - } - devicePath, err := volumeAttachmentDevicePath( - volumeInfo, - volumeAttachmentInfo, - *blockDevice, - ) - if err != nil { - return nil, errors.Trace(err) - } - return &storage.StorageAttachmentInfo{ - storage.StorageKindBlock, - devicePath, - }, nil -} - -func filesystemStorageAttachmentInfo( - st StorageInterface, - storageInstance state.StorageInstance, - machineTag names.MachineTag, -) (*storage.StorageAttachmentInfo, error) { - storageTag := storageInstance.StorageTag() - filesystem, err := st.StorageInstanceFilesystem(storageTag) - if err != nil { - return nil, errors.Annotate(err, "getting filesystem") - } - filesystemAttachment, err := st.FilesystemAttachment(machineTag, filesystem.FilesystemTag()) - if err != nil { - return nil, errors.Annotate(err, "getting filesystem attachment") - } - filesystemAttachmentInfo, err := filesystemAttachment.Info() - if err != nil { - return nil, errors.Annotate(err, "getting filesystem attachment info") - } - return &storage.StorageAttachmentInfo{ - storage.StorageKindFilesystem, - filesystemAttachmentInfo.MountPoint, - }, nil -} - -// WatchStorageAttachment returns a state.NotifyWatcher that reacts to changes -// to the VolumeAttachmentInfo or FilesystemAttachmentInfo corresponding to the -// tags specified. -func WatchStorageAttachment( - st StorageInterface, - storageTag names.StorageTag, - machineTag names.MachineTag, - unitTag names.UnitTag, -) (state.NotifyWatcher, error) { - storageInstance, err := st.StorageInstance(storageTag) - if err != nil { - return nil, errors.Annotate(err, "getting storage instance") - } - var watchers []state.NotifyWatcher - switch storageInstance.Kind() { - case state.StorageKindBlock: - volume, err := st.StorageInstanceVolume(storageTag) - if err != nil { - return nil, errors.Annotate(err, "getting storage volume") - } - // We need to watch both the volume attachment, and the - // machine's block devices. A volume attachment's block - // device could change (most likely, become present). - watchers = []state.NotifyWatcher{ - st.WatchVolumeAttachment(machineTag, volume.VolumeTag()), - // TODO(axw) 2015-09-30 #1501203 - // We should filter the events to only those relevant - // to the volume attachment. This means we would need - // to either start th block device watcher after we - // have provisioned the volume attachment (cleaner?), - // or have the filter ignore changes until the volume - // attachment is provisioned. - st.WatchBlockDevices(machineTag), - } - case state.StorageKindFilesystem: - filesystem, err := st.StorageInstanceFilesystem(storageTag) - if err != nil { - return nil, errors.Annotate(err, "getting storage filesystem") - } - watchers = []state.NotifyWatcher{ - st.WatchFilesystemAttachment(machineTag, filesystem.FilesystemTag()), - } - default: - return nil, errors.Errorf("invalid storage kind %v", storageInstance.Kind()) - } - watchers = append(watchers, st.WatchStorageAttachment(storageTag, unitTag)) - return newMultiNotifyWatcher(watchers...), nil -} - -// volumeAttachmentDevicePath returns the absolute device path for -// a volume attachment. The value is only meaningful in the context -// of the machine that the volume is attached to. -func volumeAttachmentDevicePath( - volumeInfo state.VolumeInfo, - volumeAttachmentInfo state.VolumeAttachmentInfo, - blockDevice state.BlockDeviceInfo, -) (string, error) { - if volumeInfo.HardwareId != "" || volumeAttachmentInfo.DeviceName != "" || volumeAttachmentInfo.DeviceLink != "" { - // Prefer the volume attachment's information over what is - // in the published block device information. - var deviceLinks []string - if volumeAttachmentInfo.DeviceLink != "" { - deviceLinks = []string{volumeAttachmentInfo.DeviceLink} - } - return storage.BlockDevicePath(storage.BlockDevice{ - HardwareId: volumeInfo.HardwareId, - DeviceName: volumeAttachmentInfo.DeviceName, - DeviceLinks: deviceLinks, - }) - } - return storage.BlockDevicePath(BlockDeviceFromState(blockDevice)) -} - -// MaybeAssignedStorageInstance calls the provided function to get a -// StorageTag, and returns the corresponding state.StorageInstance if -// it didn't return an errors.IsNotAssigned error, or nil if it did. -func MaybeAssignedStorageInstance( - getTag func() (names.StorageTag, error), - getStorageInstance func(names.StorageTag) (state.StorageInstance, error), -) (state.StorageInstance, error) { - tag, err := getTag() - if err == nil { - return getStorageInstance(tag) - } else if errors.IsNotAssigned(err) { - return nil, nil - } - return nil, errors.Trace(err) -} - -// storageTags returns the tags that should be set on a volume or filesystem, -// if the provider supports them. -func storageTags( - storageInstance state.StorageInstance, - cfg *config.Config, -) (map[string]string, error) { - uuid, _ := cfg.UUID() - storageTags := tags.ResourceTags(names.NewEnvironTag(uuid), cfg) - if storageInstance != nil { - storageTags[tags.JujuStorageInstance] = storageInstance.Tag().Id() - storageTags[tags.JujuStorageOwner] = storageInstance.Owner().Id() - } - return storageTags, nil -} === removed file 'src/github.com/juju/juju/apiserver/common/storage_test.go' --- src/github.com/juju/juju/apiserver/common/storage_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/storage_test.go 1970-01-01 00:00:00 +0000 @@ -1,241 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common_test - -import ( - "path/filepath" - - "github.com/juju/errors" - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" - "github.com/juju/juju/storage" -) - -type storageAttachmentInfoSuite struct { - machineTag names.MachineTag - volumeTag names.VolumeTag - storageTag names.StorageTag - st *fakeStorage - storageInstance *fakeStorageInstance - storageAttachment *fakeStorageAttachment - volume *fakeVolume - volumeAttachment *fakeVolumeAttachment - blockDevices []state.BlockDeviceInfo -} - -var _ = gc.Suite(&storageAttachmentInfoSuite{}) - -func (s *storageAttachmentInfoSuite) SetUpTest(c *gc.C) { - s.machineTag = names.NewMachineTag("0") - s.volumeTag = names.NewVolumeTag("0") - s.storageTag = names.NewStorageTag("osd-devices/0") - s.storageInstance = &fakeStorageInstance{ - tag: s.storageTag, - owner: s.machineTag, - kind: state.StorageKindBlock, - } - s.storageAttachment = &fakeStorageAttachment{ - storageTag: s.storageTag, - } - s.volume = &fakeVolume{ - tag: s.volumeTag, - info: &state.VolumeInfo{ - VolumeId: "vol-ume", - Pool: "radiance", - Size: 1024, - }, - } - s.volumeAttachment = &fakeVolumeAttachment{ - info: &state.VolumeAttachmentInfo{}, - } - s.blockDevices = []state.BlockDeviceInfo{{ - DeviceName: "sda", - DeviceLinks: []string{"/dev/disk/by-id/verbatim"}, - HardwareId: "whatever", - }} - s.st = &fakeStorage{ - storageInstance: func(tag names.StorageTag) (state.StorageInstance, error) { - return s.storageInstance, nil - }, - storageInstanceVolume: func(tag names.StorageTag) (state.Volume, error) { - return s.volume, nil - }, - volumeAttachment: func(m names.MachineTag, v names.VolumeTag) (state.VolumeAttachment, error) { - return s.volumeAttachment, nil - }, - blockDevices: func(m names.MachineTag) ([]state.BlockDeviceInfo, error) { - return s.blockDevices, nil - }, - } -} - -func (s *storageAttachmentInfoSuite) TestStorageAttachmentInfoPersistentDeviceName(c *gc.C) { - s.volumeAttachment.info.DeviceName = "sda" - info, err := common.StorageAttachmentInfo(s.st, s.storageAttachment, s.machineTag) - c.Assert(err, jc.ErrorIsNil) - s.st.CheckCallNames(c, "StorageInstance", "StorageInstanceVolume", "VolumeAttachment", "BlockDevices") - c.Assert(info, jc.DeepEquals, &storage.StorageAttachmentInfo{ - Kind: storage.StorageKindBlock, - Location: filepath.FromSlash("/dev/sda"), - }) -} - -func (s *storageAttachmentInfoSuite) TestStorageAttachmentInfoMissingBlockDevice(c *gc.C) { - // If the block device has not shown up yet, - // then we should get a NotProvisioned error. - s.blockDevices = nil - s.volumeAttachment.info.DeviceName = "sda" - _, err := common.StorageAttachmentInfo(s.st, s.storageAttachment, s.machineTag) - c.Assert(err, jc.Satisfies, errors.IsNotProvisioned) - s.st.CheckCallNames(c, "StorageInstance", "StorageInstanceVolume", "VolumeAttachment", "BlockDevices") -} - -func (s *storageAttachmentInfoSuite) TestStorageAttachmentInfoPersistentDeviceLink(c *gc.C) { - s.volumeAttachment.info.DeviceLink = "/dev/disk/by-id/verbatim" - info, err := common.StorageAttachmentInfo(s.st, s.storageAttachment, s.machineTag) - c.Assert(err, jc.ErrorIsNil) - s.st.CheckCallNames(c, "StorageInstance", "StorageInstanceVolume", "VolumeAttachment", "BlockDevices") - c.Assert(info, jc.DeepEquals, &storage.StorageAttachmentInfo{ - Kind: storage.StorageKindBlock, - Location: "/dev/disk/by-id/verbatim", - }) -} - -func (s *storageAttachmentInfoSuite) TestStorageAttachmentInfoPersistentHardwareId(c *gc.C) { - s.volume.info.HardwareId = "whatever" - info, err := common.StorageAttachmentInfo(s.st, s.storageAttachment, s.machineTag) - c.Assert(err, jc.ErrorIsNil) - s.st.CheckCallNames(c, "StorageInstance", "StorageInstanceVolume", "VolumeAttachment", "BlockDevices") - c.Assert(info, jc.DeepEquals, &storage.StorageAttachmentInfo{ - Kind: storage.StorageKindBlock, - Location: filepath.FromSlash("/dev/disk/by-id/whatever"), - }) -} - -func (s *storageAttachmentInfoSuite) TestStorageAttachmentInfoMatchingBlockDevice(c *gc.C) { - // The bus address alone is not enough to produce a path to the block - // device; we need to find a published block device with the matching - // bus address. - s.volumeAttachment.info.BusAddress = "scsi@1:2.3.4" - s.blockDevices = []state.BlockDeviceInfo{{ - DeviceName: "sda", - }, { - DeviceName: "sdb", - BusAddress: s.volumeAttachment.info.BusAddress, - }} - info, err := common.StorageAttachmentInfo(s.st, s.storageAttachment, s.machineTag) - c.Assert(err, jc.ErrorIsNil) - s.st.CheckCallNames(c, "StorageInstance", "StorageInstanceVolume", "VolumeAttachment", "BlockDevices") - c.Assert(info, jc.DeepEquals, &storage.StorageAttachmentInfo{ - Kind: storage.StorageKindBlock, - Location: filepath.FromSlash("/dev/sdb"), - }) -} - -func (s *storageAttachmentInfoSuite) TestStorageAttachmentInfoNoBlockDevice(c *gc.C) { - // Neither the volume nor the volume attachment has enough information - // to persistently identify the path, so we must enquire about block - // devices; there are none (yet), so NotProvisioned is returned. - s.volumeAttachment.info.BusAddress = "scsi@1:2.3.4" - _, err := common.StorageAttachmentInfo(s.st, s.storageAttachment, s.machineTag) - c.Assert(err, jc.Satisfies, errors.IsNotProvisioned) - s.st.CheckCallNames(c, "StorageInstance", "StorageInstanceVolume", "VolumeAttachment", "BlockDevices") -} - -type watchStorageAttachmentSuite struct { - storageTag names.StorageTag - machineTag names.MachineTag - unitTag names.UnitTag - st *fakeStorage - storageInstance *fakeStorageInstance - volume *fakeVolume - volumeAttachmentWatcher *fakeNotifyWatcher - blockDevicesWatcher *fakeNotifyWatcher - storageAttachmentWatcher *fakeNotifyWatcher -} - -var _ = gc.Suite(&watchStorageAttachmentSuite{}) - -func (s *watchStorageAttachmentSuite) SetUpTest(c *gc.C) { - s.storageTag = names.NewStorageTag("osd-devices/0") - s.machineTag = names.NewMachineTag("0") - s.unitTag = names.NewUnitTag("ceph/0") - s.storageInstance = &fakeStorageInstance{ - tag: s.storageTag, - owner: s.machineTag, - kind: state.StorageKindBlock, - } - s.volume = &fakeVolume{tag: names.NewVolumeTag("0")} - s.volumeAttachmentWatcher = &fakeNotifyWatcher{changes: make(chan struct{}, 1)} - s.blockDevicesWatcher = &fakeNotifyWatcher{changes: make(chan struct{}, 1)} - s.storageAttachmentWatcher = &fakeNotifyWatcher{changes: make(chan struct{}, 1)} - s.volumeAttachmentWatcher.changes <- struct{}{} - s.blockDevicesWatcher.changes <- struct{}{} - s.storageAttachmentWatcher.changes <- struct{}{} - s.st = &fakeStorage{ - storageInstance: func(tag names.StorageTag) (state.StorageInstance, error) { - return s.storageInstance, nil - }, - storageInstanceVolume: func(tag names.StorageTag) (state.Volume, error) { - return s.volume, nil - }, - watchVolumeAttachment: func(names.MachineTag, names.VolumeTag) state.NotifyWatcher { - return s.volumeAttachmentWatcher - }, - watchBlockDevices: func(names.MachineTag) state.NotifyWatcher { - return s.blockDevicesWatcher - }, - watchStorageAttachment: func(names.StorageTag, names.UnitTag) state.NotifyWatcher { - return s.storageAttachmentWatcher - }, - } -} - -func (s *watchStorageAttachmentSuite) TestWatchStorageAttachmentVolumeAttachmentChanges(c *gc.C) { - s.testWatchBlockStorageAttachment(c, func() { - s.volumeAttachmentWatcher.changes <- struct{}{} - }) -} - -func (s *watchStorageAttachmentSuite) TestWatchStorageAttachmentStorageAttachmentChanges(c *gc.C) { - s.testWatchBlockStorageAttachment(c, func() { - s.storageAttachmentWatcher.changes <- struct{}{} - }) -} - -func (s *watchStorageAttachmentSuite) TestWatchStorageAttachmentBlockDevicesChange(c *gc.C) { - s.testWatchBlockStorageAttachment(c, func() { - s.blockDevicesWatcher.changes <- struct{}{} - }) -} - -func (s *watchStorageAttachmentSuite) testWatchBlockStorageAttachment(c *gc.C, change func()) { - s.testWatchStorageAttachment(c, change) - s.st.CheckCallNames(c, - "StorageInstance", - "StorageInstanceVolume", - "WatchVolumeAttachment", - "WatchBlockDevices", - "WatchStorageAttachment", - ) -} - -func (s *watchStorageAttachmentSuite) testWatchStorageAttachment(c *gc.C, change func()) { - w, err := common.WatchStorageAttachment( - s.st, - s.storageTag, - s.machineTag, - s.unitTag, - ) - c.Assert(err, jc.ErrorIsNil) - wc := statetesting.NewNotifyWatcherC(c, nopSyncStarter{}, w) - wc.AssertOneChange() - change() - wc.AssertOneChange() -} === added directory 'src/github.com/juju/juju/apiserver/common/storagecommon' === added file 'src/github.com/juju/juju/apiserver/common/storagecommon/blockdevices.go' --- src/github.com/juju/juju/apiserver/common/storagecommon/blockdevices.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/storagecommon/blockdevices.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,61 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storagecommon + +import ( + "github.com/juju/juju/state" + "github.com/juju/juju/storage" +) + +// BlockDeviceFromState translates a state.BlockDeviceInfo to a +// storage.BlockDevice. +func BlockDeviceFromState(in state.BlockDeviceInfo) storage.BlockDevice { + return storage.BlockDevice{ + in.DeviceName, + in.DeviceLinks, + in.Label, + in.UUID, + in.HardwareId, + in.BusAddress, + in.Size, + in.FilesystemType, + in.InUse, + in.MountPoint, + } +} + +// MatchingBlockDevice finds the block device that matches the +// provided volume info and volume attachment info. +func MatchingBlockDevice( + blockDevices []state.BlockDeviceInfo, + volumeInfo state.VolumeInfo, + attachmentInfo state.VolumeAttachmentInfo, +) (*state.BlockDeviceInfo, bool) { + for _, dev := range blockDevices { + if volumeInfo.HardwareId != "" { + if volumeInfo.HardwareId == dev.HardwareId { + return &dev, true + } + continue + } + if attachmentInfo.BusAddress != "" { + if attachmentInfo.BusAddress == dev.BusAddress { + return &dev, true + } + continue + } + if attachmentInfo.DeviceLink != "" { + for _, link := range dev.DeviceLinks { + if attachmentInfo.DeviceLink == link { + return &dev, true + } + } + continue + } + if attachmentInfo.DeviceName == dev.DeviceName { + return &dev, true + } + } + return nil, false +} === added file 'src/github.com/juju/juju/apiserver/common/storagecommon/filesystems.go' --- src/github.com/juju/juju/apiserver/common/storagecommon/filesystems.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/storagecommon/filesystems.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,178 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storagecommon + +import ( + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/state" + "github.com/juju/juju/storage/poolmanager" +) + +// FilesystemParams returns the parameters for creating or destroying the +// given filesystem. +func FilesystemParams( + f state.Filesystem, + storageInstance state.StorageInstance, + environConfig *config.Config, + poolManager poolmanager.PoolManager, +) (params.FilesystemParams, error) { + + var pool string + var size uint64 + if stateFilesystemParams, ok := f.Params(); ok { + pool = stateFilesystemParams.Pool + size = stateFilesystemParams.Size + } else { + filesystemInfo, err := f.Info() + if err != nil { + return params.FilesystemParams{}, errors.Trace(err) + } + pool = filesystemInfo.Pool + size = filesystemInfo.Size + } + + filesystemTags, err := storageTags(storageInstance, environConfig) + if err != nil { + return params.FilesystemParams{}, errors.Annotate(err, "computing storage tags") + } + + providerType, cfg, err := StoragePoolConfig(pool, poolManager) + if err != nil { + return params.FilesystemParams{}, errors.Trace(err) + } + result := params.FilesystemParams{ + f.Tag().String(), + "", // volume tag + size, + string(providerType), + cfg.Attrs(), + filesystemTags, + nil, // attachment params set by the caller + } + + volumeTag, err := f.Volume() + if err == nil { + result.VolumeTag = volumeTag.String() + } else if err != state.ErrNoBackingVolume { + return params.FilesystemParams{}, errors.Trace(err) + } + + return result, nil +} + +// FilesystemsToState converts a slice of params.Filesystem to a mapping +// of filesystem tags to state.FilesystemInfo. +func FilesystemsToState(in []params.Filesystem) (map[names.FilesystemTag]state.FilesystemInfo, error) { + m := make(map[names.FilesystemTag]state.FilesystemInfo) + for _, v := range in { + tag, filesystemInfo, err := FilesystemToState(v) + if err != nil { + return nil, errors.Trace(err) + } + m[tag] = filesystemInfo + } + return m, nil +} + +// FilesystemToState converts a params.Filesystem to state.FilesystemInfo +// and names.FilesystemTag. +func FilesystemToState(v params.Filesystem) (names.FilesystemTag, state.FilesystemInfo, error) { + filesystemTag, err := names.ParseFilesystemTag(v.FilesystemTag) + if err != nil { + return names.FilesystemTag{}, state.FilesystemInfo{}, errors.Trace(err) + } + return filesystemTag, state.FilesystemInfo{ + v.Info.Size, + "", // pool is set by state + v.Info.FilesystemId, + }, nil +} + +// FilesystemFromState converts a state.Filesystem to params.Filesystem. +func FilesystemFromState(f state.Filesystem) (params.Filesystem, error) { + info, err := f.Info() + if err != nil { + return params.Filesystem{}, errors.Trace(err) + } + result := params.Filesystem{ + f.FilesystemTag().String(), + "", + FilesystemInfoFromState(info), + } + volumeTag, err := f.Volume() + if err == nil { + result.VolumeTag = volumeTag.String() + } else if err != state.ErrNoBackingVolume { + return params.Filesystem{}, errors.Trace(err) + } + return result, nil +} + +// FilesystemInfoFromState converts a state.FilesystemInfo to params.FilesystemInfo. +func FilesystemInfoFromState(info state.FilesystemInfo) params.FilesystemInfo { + return params.FilesystemInfo{ + info.FilesystemId, + info.Size, + } +} + +// FilesystemAttachmentToState converts a storage.FilesystemAttachment +// to a state.FilesystemAttachmentInfo. +func FilesystemAttachmentToState(in params.FilesystemAttachment) (names.MachineTag, names.FilesystemTag, state.FilesystemAttachmentInfo, error) { + machineTag, err := names.ParseMachineTag(in.MachineTag) + if err != nil { + return names.MachineTag{}, names.FilesystemTag{}, state.FilesystemAttachmentInfo{}, err + } + filesystemTag, err := names.ParseFilesystemTag(in.FilesystemTag) + if err != nil { + return names.MachineTag{}, names.FilesystemTag{}, state.FilesystemAttachmentInfo{}, err + } + info := state.FilesystemAttachmentInfo{ + in.Info.MountPoint, + in.Info.ReadOnly, + } + return machineTag, filesystemTag, info, nil +} + +// FilesystemAttachmentFromState converts a state.FilesystemAttachment to params.FilesystemAttachment. +func FilesystemAttachmentFromState(v state.FilesystemAttachment) (params.FilesystemAttachment, error) { + info, err := v.Info() + if err != nil { + return params.FilesystemAttachment{}, errors.Trace(err) + } + return params.FilesystemAttachment{ + v.Filesystem().String(), + v.Machine().String(), + FilesystemAttachmentInfoFromState(info), + }, nil +} + +// FilesystemAttachmentInfoFromState converts a state.FilesystemAttachmentInfo +// to params.FilesystemAttachmentInfo. +func FilesystemAttachmentInfoFromState(info state.FilesystemAttachmentInfo) params.FilesystemAttachmentInfo { + return params.FilesystemAttachmentInfo{ + info.MountPoint, + info.ReadOnly, + } +} + +// ParseFilesystemAttachmentIds parses the strings, returning machine storage IDs. +func ParseFilesystemAttachmentIds(stringIds []string) ([]params.MachineStorageId, error) { + ids := make([]params.MachineStorageId, len(stringIds)) + for i, s := range stringIds { + m, f, err := state.ParseFilesystemAttachmentId(s) + if err != nil { + return nil, err + } + ids[i] = params.MachineStorageId{ + MachineTag: m.String(), + AttachmentTag: f.String(), + } + } + return ids, nil +} === added file 'src/github.com/juju/juju/apiserver/common/storagecommon/mock_test.go' --- src/github.com/juju/juju/apiserver/common/storagecommon/mock_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/storagecommon/mock_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,167 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storagecommon_test + +import ( + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/testing" + "launchpad.net/tomb" + + "github.com/juju/juju/apiserver/common/storagecommon" + "github.com/juju/juju/state" + "github.com/juju/juju/storage" + "github.com/juju/juju/storage/poolmanager" +) + +type fakeStorage struct { + testing.Stub + storagecommon.StorageInterface + storageInstance func(names.StorageTag) (state.StorageInstance, error) + storageInstanceVolume func(names.StorageTag) (state.Volume, error) + volumeAttachment func(names.MachineTag, names.VolumeTag) (state.VolumeAttachment, error) + blockDevices func(names.MachineTag) ([]state.BlockDeviceInfo, error) + watchVolumeAttachment func(names.MachineTag, names.VolumeTag) state.NotifyWatcher + watchBlockDevices func(names.MachineTag) state.NotifyWatcher + watchStorageAttachment func(names.StorageTag, names.UnitTag) state.NotifyWatcher +} + +func (s *fakeStorage) StorageInstance(tag names.StorageTag) (state.StorageInstance, error) { + s.MethodCall(s, "StorageInstance", tag) + return s.storageInstance(tag) +} + +func (s *fakeStorage) StorageInstanceVolume(tag names.StorageTag) (state.Volume, error) { + s.MethodCall(s, "StorageInstanceVolume", tag) + return s.storageInstanceVolume(tag) +} + +func (s *fakeStorage) VolumeAttachment(m names.MachineTag, v names.VolumeTag) (state.VolumeAttachment, error) { + s.MethodCall(s, "VolumeAttachment", m, v) + return s.volumeAttachment(m, v) +} + +func (s *fakeStorage) BlockDevices(m names.MachineTag) ([]state.BlockDeviceInfo, error) { + s.MethodCall(s, "BlockDevices", m) + return s.blockDevices(m) +} + +func (s *fakeStorage) WatchVolumeAttachment(m names.MachineTag, v names.VolumeTag) state.NotifyWatcher { + s.MethodCall(s, "WatchVolumeAttachment", m, v) + return s.watchVolumeAttachment(m, v) +} + +func (s *fakeStorage) WatchBlockDevices(m names.MachineTag) state.NotifyWatcher { + s.MethodCall(s, "WatchBlockDevices", m) + return s.watchBlockDevices(m) +} + +func (s *fakeStorage) WatchStorageAttachment(st names.StorageTag, u names.UnitTag) state.NotifyWatcher { + s.MethodCall(s, "WatchStorageAttachment", st, u) + return s.watchStorageAttachment(st, u) +} + +type fakeStorageInstance struct { + state.StorageInstance + tag names.StorageTag + owner names.Tag + kind state.StorageKind +} + +func (i *fakeStorageInstance) StorageTag() names.StorageTag { + return i.tag +} + +func (i *fakeStorageInstance) Tag() names.Tag { + return i.tag +} + +func (i *fakeStorageInstance) Owner() names.Tag { + return i.owner +} + +func (i *fakeStorageInstance) Kind() state.StorageKind { + return i.kind +} + +type fakeStorageAttachment struct { + state.StorageAttachment + storageTag names.StorageTag +} + +func (a *fakeStorageAttachment) StorageInstance() names.StorageTag { + return a.storageTag +} + +type fakeVolume struct { + state.Volume + tag names.VolumeTag + params *state.VolumeParams + info *state.VolumeInfo +} + +func (v *fakeVolume) VolumeTag() names.VolumeTag { + return v.tag +} + +func (v *fakeVolume) Tag() names.Tag { + return v.tag +} + +func (v *fakeVolume) Params() (state.VolumeParams, bool) { + if v.params == nil { + return state.VolumeParams{}, false + } + return *v.params, true +} + +func (v *fakeVolume) Info() (state.VolumeInfo, error) { + if v.info == nil { + return state.VolumeInfo{}, errors.NotProvisionedf("volume %v", v.tag.Id()) + } + return *v.info, nil +} + +type fakeVolumeAttachment struct { + state.VolumeAttachment + info *state.VolumeAttachmentInfo +} + +func (v *fakeVolumeAttachment) Info() (state.VolumeAttachmentInfo, error) { + if v.info == nil { + return state.VolumeAttachmentInfo{}, errors.NotProvisionedf("volume attachment") + } + return *v.info, nil +} + +type fakePoolManager struct { + poolmanager.PoolManager +} + +func (pm *fakePoolManager) Get(name string) (*storage.Config, error) { + return nil, errors.NotFoundf("pool") +} + +type fakeNotifyWatcher struct { + tomb.Tomb + ch chan struct{} +} + +func (w *fakeNotifyWatcher) Kill() { + w.Tomb.Kill(nil) + w.Tomb.Done() +} + +func (w *fakeNotifyWatcher) Stop() error { + w.Kill() + return w.Wait() +} + +func (w *fakeNotifyWatcher) Changes() <-chan struct{} { + return w.ch +} + +type nopSyncStarter struct{} + +func (nopSyncStarter) StartSync() {} === added file 'src/github.com/juju/juju/apiserver/common/storagecommon/package_test.go' --- src/github.com/juju/juju/apiserver/common/storagecommon/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/storagecommon/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storagecommon_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/apiserver/common/storagecommon/storage.go' --- src/github.com/juju/juju/apiserver/common/storagecommon/storage.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/storagecommon/storage.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,266 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package storagecommon provides common storage-related services +// for API server facades. +package storagecommon + +import ( + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/environs/tags" + "github.com/juju/juju/state" + "github.com/juju/juju/storage" +) + +// StorageInterface is an interface for obtaining information about storage +// instances and related entities. +type StorageInterface interface { + // StorageInstance returns the state.StorageInstance corresponding + // to the specified storage tag. + StorageInstance(names.StorageTag) (state.StorageInstance, error) + + // StorageInstanceFilesystem returns the state.Filesystem assigned + // to the storage instance with the specified storage tag. + StorageInstanceFilesystem(names.StorageTag) (state.Filesystem, error) + + // StorageInstanceVolume returns the state.Volume assigned to the + // storage instance with the specified storage tag. + StorageInstanceVolume(names.StorageTag) (state.Volume, error) + + // FilesystemAttachment returns the state.FilesystemAttachment + // corresponding to the identified machine and filesystem. + FilesystemAttachment(names.MachineTag, names.FilesystemTag) (state.FilesystemAttachment, error) + + // VolumeAttachment returns the state.VolumeAttachment corresponding + // to the identified machine and volume. + VolumeAttachment(names.MachineTag, names.VolumeTag) (state.VolumeAttachment, error) + + // WatchStorageAttachment watches for changes to the storage attachment + // corresponding to the identfified unit and storage instance. + WatchStorageAttachment(names.StorageTag, names.UnitTag) state.NotifyWatcher + + // WatchFilesystemAttachment watches for changes to the filesystem + // attachment corresponding to the identfified machine and filesystem. + WatchFilesystemAttachment(names.MachineTag, names.FilesystemTag) state.NotifyWatcher + + // WatchVolumeAttachment watches for changes to the volume attachment + // corresponding to the identfified machine and volume. + WatchVolumeAttachment(names.MachineTag, names.VolumeTag) state.NotifyWatcher + + // WatchBlockDevices watches for changes to block devices associated + // with the specified machine. + WatchBlockDevices(names.MachineTag) state.NotifyWatcher + + // BlockDevices returns information about block devices published + // for the specified machine. + BlockDevices(names.MachineTag) ([]state.BlockDeviceInfo, error) +} + +// StorageAttachmentInfo returns the StorageAttachmentInfo for the specified +// StorageAttachment by gathering information from related entities (volumes, +// filesystems). +// +// StorageAttachmentInfo returns an error satisfying errors.IsNotProvisioned +// if the storage attachment is not yet fully provisioned and ready for use +// by a charm. +func StorageAttachmentInfo( + st StorageInterface, + att state.StorageAttachment, + machineTag names.MachineTag, +) (*storage.StorageAttachmentInfo, error) { + storageInstance, err := st.StorageInstance(att.StorageInstance()) + if err != nil { + return nil, errors.Annotate(err, "getting storage instance") + } + switch storageInstance.Kind() { + case state.StorageKindBlock: + return volumeStorageAttachmentInfo(st, storageInstance, machineTag) + case state.StorageKindFilesystem: + return filesystemStorageAttachmentInfo(st, storageInstance, machineTag) + } + return nil, errors.Errorf("invalid storage kind %v", storageInstance.Kind()) +} + +func volumeStorageAttachmentInfo( + st StorageInterface, + storageInstance state.StorageInstance, + machineTag names.MachineTag, +) (*storage.StorageAttachmentInfo, error) { + storageTag := storageInstance.StorageTag() + volume, err := st.StorageInstanceVolume(storageTag) + if err != nil { + return nil, errors.Annotate(err, "getting volume") + } + volumeInfo, err := volume.Info() + if err != nil { + return nil, errors.Annotate(err, "getting volume info") + } + volumeAttachment, err := st.VolumeAttachment(machineTag, volume.VolumeTag()) + if err != nil { + return nil, errors.Annotate(err, "getting volume attachment") + } + volumeAttachmentInfo, err := volumeAttachment.Info() + if err != nil { + return nil, errors.Annotate(err, "getting volume attachment info") + } + blockDevices, err := st.BlockDevices(machineTag) + if err != nil { + return nil, errors.Annotate(err, "getting block devices") + } + blockDevice, ok := MatchingBlockDevice( + blockDevices, + volumeInfo, + volumeAttachmentInfo, + ) + if !ok { + // We must not say that a block-kind storage attachment is + // provisioned until its block device has shown up on the + // machine, otherwise the charm may attempt to use it and + // fail. + return nil, errors.NotProvisionedf("%v", names.ReadableString(storageTag)) + } + devicePath, err := volumeAttachmentDevicePath( + volumeInfo, + volumeAttachmentInfo, + *blockDevice, + ) + if err != nil { + return nil, errors.Trace(err) + } + return &storage.StorageAttachmentInfo{ + storage.StorageKindBlock, + devicePath, + }, nil +} + +func filesystemStorageAttachmentInfo( + st StorageInterface, + storageInstance state.StorageInstance, + machineTag names.MachineTag, +) (*storage.StorageAttachmentInfo, error) { + storageTag := storageInstance.StorageTag() + filesystem, err := st.StorageInstanceFilesystem(storageTag) + if err != nil { + return nil, errors.Annotate(err, "getting filesystem") + } + filesystemAttachment, err := st.FilesystemAttachment(machineTag, filesystem.FilesystemTag()) + if err != nil { + return nil, errors.Annotate(err, "getting filesystem attachment") + } + filesystemAttachmentInfo, err := filesystemAttachment.Info() + if err != nil { + return nil, errors.Annotate(err, "getting filesystem attachment info") + } + return &storage.StorageAttachmentInfo{ + storage.StorageKindFilesystem, + filesystemAttachmentInfo.MountPoint, + }, nil +} + +// WatchStorageAttachment returns a state.NotifyWatcher that reacts to changes +// to the VolumeAttachmentInfo or FilesystemAttachmentInfo corresponding to the +// tags specified. +func WatchStorageAttachment( + st StorageInterface, + storageTag names.StorageTag, + machineTag names.MachineTag, + unitTag names.UnitTag, +) (state.NotifyWatcher, error) { + storageInstance, err := st.StorageInstance(storageTag) + if err != nil { + return nil, errors.Annotate(err, "getting storage instance") + } + var watchers []state.NotifyWatcher + switch storageInstance.Kind() { + case state.StorageKindBlock: + volume, err := st.StorageInstanceVolume(storageTag) + if err != nil { + return nil, errors.Annotate(err, "getting storage volume") + } + // We need to watch both the volume attachment, and the + // machine's block devices. A volume attachment's block + // device could change (most likely, become present). + watchers = []state.NotifyWatcher{ + st.WatchVolumeAttachment(machineTag, volume.VolumeTag()), + // TODO(axw) 2015-09-30 #1501203 + // We should filter the events to only those relevant + // to the volume attachment. This means we would need + // to either start th block device watcher after we + // have provisioned the volume attachment (cleaner?), + // or have the filter ignore changes until the volume + // attachment is provisioned. + st.WatchBlockDevices(machineTag), + } + case state.StorageKindFilesystem: + filesystem, err := st.StorageInstanceFilesystem(storageTag) + if err != nil { + return nil, errors.Annotate(err, "getting storage filesystem") + } + watchers = []state.NotifyWatcher{ + st.WatchFilesystemAttachment(machineTag, filesystem.FilesystemTag()), + } + default: + return nil, errors.Errorf("invalid storage kind %v", storageInstance.Kind()) + } + watchers = append(watchers, st.WatchStorageAttachment(storageTag, unitTag)) + return common.NewMultiNotifyWatcher(watchers...), nil +} + +// volumeAttachmentDevicePath returns the absolute device path for +// a volume attachment. The value is only meaningful in the context +// of the machine that the volume is attached to. +func volumeAttachmentDevicePath( + volumeInfo state.VolumeInfo, + volumeAttachmentInfo state.VolumeAttachmentInfo, + blockDevice state.BlockDeviceInfo, +) (string, error) { + if volumeInfo.HardwareId != "" || volumeAttachmentInfo.DeviceName != "" || volumeAttachmentInfo.DeviceLink != "" { + // Prefer the volume attachment's information over what is + // in the published block device information. + var deviceLinks []string + if volumeAttachmentInfo.DeviceLink != "" { + deviceLinks = []string{volumeAttachmentInfo.DeviceLink} + } + return storage.BlockDevicePath(storage.BlockDevice{ + HardwareId: volumeInfo.HardwareId, + DeviceName: volumeAttachmentInfo.DeviceName, + DeviceLinks: deviceLinks, + }) + } + return storage.BlockDevicePath(BlockDeviceFromState(blockDevice)) +} + +// MaybeAssignedStorageInstance calls the provided function to get a +// StorageTag, and returns the corresponding state.StorageInstance if +// it didn't return an errors.IsNotAssigned error, or nil if it did. +func MaybeAssignedStorageInstance( + getTag func() (names.StorageTag, error), + getStorageInstance func(names.StorageTag) (state.StorageInstance, error), +) (state.StorageInstance, error) { + tag, err := getTag() + if err == nil { + return getStorageInstance(tag) + } else if errors.IsNotAssigned(err) { + return nil, nil + } + return nil, errors.Trace(err) +} + +// storageTags returns the tags that should be set on a volume or filesystem, +// if the provider supports them. +func storageTags( + storageInstance state.StorageInstance, + cfg *config.Config, +) (map[string]string, error) { + uuid, _ := cfg.UUID() + storageTags := tags.ResourceTags(names.NewModelTag(uuid), cfg) + if storageInstance != nil { + storageTags[tags.JujuStorageInstance] = storageInstance.Tag().Id() + storageTags[tags.JujuStorageOwner] = storageInstance.Owner().Id() + } + return storageTags, nil +} === added file 'src/github.com/juju/juju/apiserver/common/storagecommon/storage_test.go' --- src/github.com/juju/juju/apiserver/common/storagecommon/storage_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/storagecommon/storage_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,241 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storagecommon_test + +import ( + "path/filepath" + + "github.com/juju/errors" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common/storagecommon" + "github.com/juju/juju/state" + statetesting "github.com/juju/juju/state/testing" + "github.com/juju/juju/storage" +) + +type storageAttachmentInfoSuite struct { + machineTag names.MachineTag + volumeTag names.VolumeTag + storageTag names.StorageTag + st *fakeStorage + storageInstance *fakeStorageInstance + storageAttachment *fakeStorageAttachment + volume *fakeVolume + volumeAttachment *fakeVolumeAttachment + blockDevices []state.BlockDeviceInfo +} + +var _ = gc.Suite(&storageAttachmentInfoSuite{}) + +func (s *storageAttachmentInfoSuite) SetUpTest(c *gc.C) { + s.machineTag = names.NewMachineTag("0") + s.volumeTag = names.NewVolumeTag("0") + s.storageTag = names.NewStorageTag("osd-devices/0") + s.storageInstance = &fakeStorageInstance{ + tag: s.storageTag, + owner: s.machineTag, + kind: state.StorageKindBlock, + } + s.storageAttachment = &fakeStorageAttachment{ + storageTag: s.storageTag, + } + s.volume = &fakeVolume{ + tag: s.volumeTag, + info: &state.VolumeInfo{ + VolumeId: "vol-ume", + Pool: "radiance", + Size: 1024, + }, + } + s.volumeAttachment = &fakeVolumeAttachment{ + info: &state.VolumeAttachmentInfo{}, + } + s.blockDevices = []state.BlockDeviceInfo{{ + DeviceName: "sda", + DeviceLinks: []string{"/dev/disk/by-id/verbatim"}, + HardwareId: "whatever", + }} + s.st = &fakeStorage{ + storageInstance: func(tag names.StorageTag) (state.StorageInstance, error) { + return s.storageInstance, nil + }, + storageInstanceVolume: func(tag names.StorageTag) (state.Volume, error) { + return s.volume, nil + }, + volumeAttachment: func(m names.MachineTag, v names.VolumeTag) (state.VolumeAttachment, error) { + return s.volumeAttachment, nil + }, + blockDevices: func(m names.MachineTag) ([]state.BlockDeviceInfo, error) { + return s.blockDevices, nil + }, + } +} + +func (s *storageAttachmentInfoSuite) TestStorageAttachmentInfoPersistentDeviceName(c *gc.C) { + s.volumeAttachment.info.DeviceName = "sda" + info, err := storagecommon.StorageAttachmentInfo(s.st, s.storageAttachment, s.machineTag) + c.Assert(err, jc.ErrorIsNil) + s.st.CheckCallNames(c, "StorageInstance", "StorageInstanceVolume", "VolumeAttachment", "BlockDevices") + c.Assert(info, jc.DeepEquals, &storage.StorageAttachmentInfo{ + Kind: storage.StorageKindBlock, + Location: filepath.FromSlash("/dev/sda"), + }) +} + +func (s *storageAttachmentInfoSuite) TestStorageAttachmentInfoMissingBlockDevice(c *gc.C) { + // If the block device has not shown up yet, + // then we should get a NotProvisioned error. + s.blockDevices = nil + s.volumeAttachment.info.DeviceName = "sda" + _, err := storagecommon.StorageAttachmentInfo(s.st, s.storageAttachment, s.machineTag) + c.Assert(err, jc.Satisfies, errors.IsNotProvisioned) + s.st.CheckCallNames(c, "StorageInstance", "StorageInstanceVolume", "VolumeAttachment", "BlockDevices") +} + +func (s *storageAttachmentInfoSuite) TestStorageAttachmentInfoPersistentDeviceLink(c *gc.C) { + s.volumeAttachment.info.DeviceLink = "/dev/disk/by-id/verbatim" + info, err := storagecommon.StorageAttachmentInfo(s.st, s.storageAttachment, s.machineTag) + c.Assert(err, jc.ErrorIsNil) + s.st.CheckCallNames(c, "StorageInstance", "StorageInstanceVolume", "VolumeAttachment", "BlockDevices") + c.Assert(info, jc.DeepEquals, &storage.StorageAttachmentInfo{ + Kind: storage.StorageKindBlock, + Location: "/dev/disk/by-id/verbatim", + }) +} + +func (s *storageAttachmentInfoSuite) TestStorageAttachmentInfoPersistentHardwareId(c *gc.C) { + s.volume.info.HardwareId = "whatever" + info, err := storagecommon.StorageAttachmentInfo(s.st, s.storageAttachment, s.machineTag) + c.Assert(err, jc.ErrorIsNil) + s.st.CheckCallNames(c, "StorageInstance", "StorageInstanceVolume", "VolumeAttachment", "BlockDevices") + c.Assert(info, jc.DeepEquals, &storage.StorageAttachmentInfo{ + Kind: storage.StorageKindBlock, + Location: filepath.FromSlash("/dev/disk/by-id/whatever"), + }) +} + +func (s *storageAttachmentInfoSuite) TestStorageAttachmentInfoMatchingBlockDevice(c *gc.C) { + // The bus address alone is not enough to produce a path to the block + // device; we need to find a published block device with the matching + // bus address. + s.volumeAttachment.info.BusAddress = "scsi@1:2.3.4" + s.blockDevices = []state.BlockDeviceInfo{{ + DeviceName: "sda", + }, { + DeviceName: "sdb", + BusAddress: s.volumeAttachment.info.BusAddress, + }} + info, err := storagecommon.StorageAttachmentInfo(s.st, s.storageAttachment, s.machineTag) + c.Assert(err, jc.ErrorIsNil) + s.st.CheckCallNames(c, "StorageInstance", "StorageInstanceVolume", "VolumeAttachment", "BlockDevices") + c.Assert(info, jc.DeepEquals, &storage.StorageAttachmentInfo{ + Kind: storage.StorageKindBlock, + Location: filepath.FromSlash("/dev/sdb"), + }) +} + +func (s *storageAttachmentInfoSuite) TestStorageAttachmentInfoNoBlockDevice(c *gc.C) { + // Neither the volume nor the volume attachment has enough information + // to persistently identify the path, so we must enquire about block + // devices; there are none (yet), so NotProvisioned is returned. + s.volumeAttachment.info.BusAddress = "scsi@1:2.3.4" + _, err := storagecommon.StorageAttachmentInfo(s.st, s.storageAttachment, s.machineTag) + c.Assert(err, jc.Satisfies, errors.IsNotProvisioned) + s.st.CheckCallNames(c, "StorageInstance", "StorageInstanceVolume", "VolumeAttachment", "BlockDevices") +} + +type watchStorageAttachmentSuite struct { + storageTag names.StorageTag + machineTag names.MachineTag + unitTag names.UnitTag + st *fakeStorage + storageInstance *fakeStorageInstance + volume *fakeVolume + volumeAttachmentWatcher *fakeNotifyWatcher + blockDevicesWatcher *fakeNotifyWatcher + storageAttachmentWatcher *fakeNotifyWatcher +} + +var _ = gc.Suite(&watchStorageAttachmentSuite{}) + +func (s *watchStorageAttachmentSuite) SetUpTest(c *gc.C) { + s.storageTag = names.NewStorageTag("osd-devices/0") + s.machineTag = names.NewMachineTag("0") + s.unitTag = names.NewUnitTag("ceph/0") + s.storageInstance = &fakeStorageInstance{ + tag: s.storageTag, + owner: s.machineTag, + kind: state.StorageKindBlock, + } + s.volume = &fakeVolume{tag: names.NewVolumeTag("0")} + s.volumeAttachmentWatcher = &fakeNotifyWatcher{ch: make(chan struct{}, 1)} + s.blockDevicesWatcher = &fakeNotifyWatcher{ch: make(chan struct{}, 1)} + s.storageAttachmentWatcher = &fakeNotifyWatcher{ch: make(chan struct{}, 1)} + s.volumeAttachmentWatcher.ch <- struct{}{} + s.blockDevicesWatcher.ch <- struct{}{} + s.storageAttachmentWatcher.ch <- struct{}{} + s.st = &fakeStorage{ + storageInstance: func(tag names.StorageTag) (state.StorageInstance, error) { + return s.storageInstance, nil + }, + storageInstanceVolume: func(tag names.StorageTag) (state.Volume, error) { + return s.volume, nil + }, + watchVolumeAttachment: func(names.MachineTag, names.VolumeTag) state.NotifyWatcher { + return s.volumeAttachmentWatcher + }, + watchBlockDevices: func(names.MachineTag) state.NotifyWatcher { + return s.blockDevicesWatcher + }, + watchStorageAttachment: func(names.StorageTag, names.UnitTag) state.NotifyWatcher { + return s.storageAttachmentWatcher + }, + } +} + +func (s *watchStorageAttachmentSuite) TestWatchStorageAttachmentVolumeAttachmentChanges(c *gc.C) { + s.testWatchBlockStorageAttachment(c, func() { + s.volumeAttachmentWatcher.ch <- struct{}{} + }) +} + +func (s *watchStorageAttachmentSuite) TestWatchStorageAttachmentStorageAttachmentChanges(c *gc.C) { + s.testWatchBlockStorageAttachment(c, func() { + s.storageAttachmentWatcher.ch <- struct{}{} + }) +} + +func (s *watchStorageAttachmentSuite) TestWatchStorageAttachmentBlockDevicesChange(c *gc.C) { + s.testWatchBlockStorageAttachment(c, func() { + s.blockDevicesWatcher.ch <- struct{}{} + }) +} + +func (s *watchStorageAttachmentSuite) testWatchBlockStorageAttachment(c *gc.C, change func()) { + s.testWatchStorageAttachment(c, change) + s.st.CheckCallNames(c, + "StorageInstance", + "StorageInstanceVolume", + "WatchVolumeAttachment", + "WatchBlockDevices", + "WatchStorageAttachment", + ) +} + +func (s *watchStorageAttachmentSuite) testWatchStorageAttachment(c *gc.C, change func()) { + w, err := storagecommon.WatchStorageAttachment( + s.st, + s.storageTag, + s.machineTag, + s.unitTag, + ) + c.Assert(err, jc.ErrorIsNil) + wc := statetesting.NewNotifyWatcherC(c, nopSyncStarter{}, w) + wc.AssertOneChange() + change() + wc.AssertOneChange() +} === added file 'src/github.com/juju/juju/apiserver/common/storagecommon/volumes.go' --- src/github.com/juju/juju/apiserver/common/storagecommon/volumes.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/storagecommon/volumes.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,224 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storagecommon + +import ( + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/state" + "github.com/juju/juju/storage" + "github.com/juju/juju/storage/poolmanager" + "github.com/juju/juju/storage/provider/registry" +) + +type volumeAlreadyProvisionedError struct { + error +} + +// IsVolumeAlreadyProvisioned returns true if the specified error +// is caused by a volume already being provisioned. +func IsVolumeAlreadyProvisioned(err error) bool { + _, ok := err.(*volumeAlreadyProvisionedError) + return ok +} + +// VolumeParams returns the parameters for creating or destroying +// the given volume. +func VolumeParams( + v state.Volume, + storageInstance state.StorageInstance, + environConfig *config.Config, + poolManager poolmanager.PoolManager, +) (params.VolumeParams, error) { + + var pool string + var size uint64 + if stateVolumeParams, ok := v.Params(); ok { + pool = stateVolumeParams.Pool + size = stateVolumeParams.Size + } else { + volumeInfo, err := v.Info() + if err != nil { + return params.VolumeParams{}, errors.Trace(err) + } + pool = volumeInfo.Pool + size = volumeInfo.Size + } + + volumeTags, err := storageTags(storageInstance, environConfig) + if err != nil { + return params.VolumeParams{}, errors.Annotate(err, "computing storage tags") + } + + providerType, cfg, err := StoragePoolConfig(pool, poolManager) + if err != nil { + return params.VolumeParams{}, errors.Trace(err) + } + return params.VolumeParams{ + v.Tag().String(), + size, + string(providerType), + cfg.Attrs(), + volumeTags, + nil, // attachment params set by the caller + }, nil +} + +// StoragePoolConfig returns the storage provider type and +// configuration for a named storage pool. If there is no +// such pool with the specified name, but it identifies a +// storage provider, then that type will be returned with a +// nil configuration. +func StoragePoolConfig(name string, poolManager poolmanager.PoolManager) (storage.ProviderType, *storage.Config, error) { + pool, err := poolManager.Get(name) + if errors.IsNotFound(err) { + // If not a storage pool, then maybe a provider type. + providerType := storage.ProviderType(name) + if _, err1 := registry.StorageProvider(providerType); err1 != nil { + return "", nil, errors.Trace(err) + } + return providerType, &storage.Config{}, nil + } else if err != nil { + return "", nil, errors.Annotatef(err, "getting pool %q", name) + } + return pool.Provider(), pool, nil +} + +// VolumesToState converts a slice of params.Volume to a mapping +// of volume tags to state.VolumeInfo. +func VolumesToState(in []params.Volume) (map[names.VolumeTag]state.VolumeInfo, error) { + m := make(map[names.VolumeTag]state.VolumeInfo) + for _, v := range in { + tag, volumeInfo, err := VolumeToState(v) + if err != nil { + return nil, errors.Trace(err) + } + m[tag] = volumeInfo + } + return m, nil +} + +// VolumeToState converts a params.Volume to state.VolumeInfo +// and names.VolumeTag. +func VolumeToState(v params.Volume) (names.VolumeTag, state.VolumeInfo, error) { + if v.VolumeTag == "" { + return names.VolumeTag{}, state.VolumeInfo{}, errors.New("Tag is empty") + } + volumeTag, err := names.ParseVolumeTag(v.VolumeTag) + if err != nil { + return names.VolumeTag{}, state.VolumeInfo{}, errors.Trace(err) + } + return volumeTag, state.VolumeInfo{ + v.Info.HardwareId, + v.Info.Size, + "", // pool is set by state + v.Info.VolumeId, + v.Info.Persistent, + }, nil +} + +// VolumeFromState converts a state.Volume to params.Volume. +func VolumeFromState(v state.Volume) (params.Volume, error) { + info, err := v.Info() + if err != nil { + return params.Volume{}, errors.Trace(err) + } + return params.Volume{ + v.VolumeTag().String(), + VolumeInfoFromState(info), + }, nil +} + +// VolumeInfoFromState converts a state.VolumeInfo to params.VolumeInfo. +func VolumeInfoFromState(info state.VolumeInfo) params.VolumeInfo { + return params.VolumeInfo{ + info.VolumeId, + info.HardwareId, + info.Size, + info.Persistent, + } +} + +// VolumeAttachmentFromState converts a state.VolumeAttachment to params.VolumeAttachment. +func VolumeAttachmentFromState(v state.VolumeAttachment) (params.VolumeAttachment, error) { + info, err := v.Info() + if err != nil { + return params.VolumeAttachment{}, errors.Trace(err) + } + return params.VolumeAttachment{ + v.Volume().String(), + v.Machine().String(), + VolumeAttachmentInfoFromState(info), + }, nil +} + +// VolumeAttachmentInfoFromState converts a state.VolumeAttachmentInfo to params.VolumeAttachmentInfo. +func VolumeAttachmentInfoFromState(info state.VolumeAttachmentInfo) params.VolumeAttachmentInfo { + return params.VolumeAttachmentInfo{ + info.DeviceName, + info.DeviceLink, + info.BusAddress, + info.ReadOnly, + } +} + +// VolumeAttachmentInfosToState converts a map of volume tags to +// params.VolumeAttachmentInfo to a map of volume tags to +// state.VolumeAttachmentInfo. +func VolumeAttachmentInfosToState(in map[string]params.VolumeAttachmentInfo) (map[names.VolumeTag]state.VolumeAttachmentInfo, error) { + m := make(map[names.VolumeTag]state.VolumeAttachmentInfo) + for k, v := range in { + volumeTag, err := names.ParseVolumeTag(k) + if err != nil { + return nil, errors.Trace(err) + } + m[volumeTag] = VolumeAttachmentInfoToState(v) + } + return m, nil +} + +// VolumeAttachmentToState converts a params.VolumeAttachment +// to a state.VolumeAttachmentInfo and tags. +func VolumeAttachmentToState(in params.VolumeAttachment) (names.MachineTag, names.VolumeTag, state.VolumeAttachmentInfo, error) { + machineTag, err := names.ParseMachineTag(in.MachineTag) + if err != nil { + return names.MachineTag{}, names.VolumeTag{}, state.VolumeAttachmentInfo{}, err + } + volumeTag, err := names.ParseVolumeTag(in.VolumeTag) + if err != nil { + return names.MachineTag{}, names.VolumeTag{}, state.VolumeAttachmentInfo{}, err + } + info := VolumeAttachmentInfoToState(in.Info) + return machineTag, volumeTag, info, nil +} + +// VolumeAttachmentInfoToState converts a params.VolumeAttachmentInfo +// to a state.VolumeAttachmentInfo. +func VolumeAttachmentInfoToState(in params.VolumeAttachmentInfo) state.VolumeAttachmentInfo { + return state.VolumeAttachmentInfo{ + in.DeviceName, + in.DeviceLink, + in.BusAddress, + in.ReadOnly, + } +} + +// ParseVolumeAttachmentIds parses the strings, returning machine storage IDs. +func ParseVolumeAttachmentIds(stringIds []string) ([]params.MachineStorageId, error) { + ids := make([]params.MachineStorageId, len(stringIds)) + for i, s := range stringIds { + m, v, err := state.ParseVolumeAttachmentId(s) + if err != nil { + return nil, err + } + ids[i] = params.MachineStorageId{ + MachineTag: m.String(), + AttachmentTag: v.String(), + } + } + return ids, nil +} === added file 'src/github.com/juju/juju/apiserver/common/storagecommon/volumes_test.go' --- src/github.com/juju/juju/apiserver/common/storagecommon/volumes_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/storagecommon/volumes_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,82 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storagecommon_test + +import ( + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common/storagecommon" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs/tags" + "github.com/juju/juju/state" + "github.com/juju/juju/testing" +) + +type volumesSuite struct{} + +var _ = gc.Suite(&volumesSuite{}) + +func (s *volumesSuite) TestVolumeParams(c *gc.C) { + s.testVolumeParams(c, &state.VolumeParams{ + Pool: "loop", + Size: 1024, + }, nil) +} + +func (s *volumesSuite) TestVolumeParamsAlreadyProvisioned(c *gc.C) { + s.testVolumeParams(c, nil, &state.VolumeInfo{ + Pool: "loop", + Size: 1024, + }) +} + +func (*volumesSuite) testVolumeParams(c *gc.C, volumeParams *state.VolumeParams, info *state.VolumeInfo) { + tag := names.NewVolumeTag("100") + p, err := storagecommon.VolumeParams( + &fakeVolume{tag: tag, params: volumeParams, info: info}, + nil, // StorageInstance + testing.CustomModelConfig(c, testing.Attrs{ + "resource-tags": "a=b c=", + }), + &fakePoolManager{}, + ) + c.Assert(err, jc.ErrorIsNil) + c.Assert(p, jc.DeepEquals, params.VolumeParams{ + VolumeTag: "volume-100", + Provider: "loop", + Size: 1024, + Tags: map[string]string{ + tags.JujuModel: testing.ModelTag.Id(), + "a": "b", + "c": "", + }, + }) +} + +func (*volumesSuite) TestVolumeParamsStorageTags(c *gc.C) { + volumeTag := names.NewVolumeTag("100") + storageTag := names.NewStorageTag("mystore/0") + unitTag := names.NewUnitTag("mysql/123") + p, err := storagecommon.VolumeParams( + &fakeVolume{tag: volumeTag, params: &state.VolumeParams{ + Pool: "loop", Size: 1024, + }}, + &fakeStorageInstance{tag: storageTag, owner: unitTag}, + testing.CustomModelConfig(c, nil), + &fakePoolManager{}, + ) + c.Assert(err, jc.ErrorIsNil) + c.Assert(p, jc.DeepEquals, params.VolumeParams{ + VolumeTag: "volume-100", + Provider: "loop", + Size: 1024, + Tags: map[string]string{ + tags.JujuModel: testing.ModelTag.Id(), + tags.JujuStorageInstance: "mystore/0", + tags.JujuStorageOwner: "mysql/123", + }, + }) +} === removed file 'src/github.com/juju/juju/apiserver/common/storagemock_test.go' --- src/github.com/juju/juju/apiserver/common/storagemock_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/storagemock_test.go 1970-01-01 00:00:00 +0000 @@ -1,143 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common_test - -import ( - "github.com/juju/errors" - "github.com/juju/names" - "github.com/juju/testing" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/state" - "github.com/juju/juju/storage" - "github.com/juju/juju/storage/poolmanager" -) - -type fakeStorage struct { - testing.Stub - common.StorageInterface - storageInstance func(names.StorageTag) (state.StorageInstance, error) - storageInstanceVolume func(names.StorageTag) (state.Volume, error) - volumeAttachment func(names.MachineTag, names.VolumeTag) (state.VolumeAttachment, error) - blockDevices func(names.MachineTag) ([]state.BlockDeviceInfo, error) - watchVolumeAttachment func(names.MachineTag, names.VolumeTag) state.NotifyWatcher - watchBlockDevices func(names.MachineTag) state.NotifyWatcher - watchStorageAttachment func(names.StorageTag, names.UnitTag) state.NotifyWatcher -} - -func (s *fakeStorage) StorageInstance(tag names.StorageTag) (state.StorageInstance, error) { - s.MethodCall(s, "StorageInstance", tag) - return s.storageInstance(tag) -} - -func (s *fakeStorage) StorageInstanceVolume(tag names.StorageTag) (state.Volume, error) { - s.MethodCall(s, "StorageInstanceVolume", tag) - return s.storageInstanceVolume(tag) -} - -func (s *fakeStorage) VolumeAttachment(m names.MachineTag, v names.VolumeTag) (state.VolumeAttachment, error) { - s.MethodCall(s, "VolumeAttachment", m, v) - return s.volumeAttachment(m, v) -} - -func (s *fakeStorage) BlockDevices(m names.MachineTag) ([]state.BlockDeviceInfo, error) { - s.MethodCall(s, "BlockDevices", m) - return s.blockDevices(m) -} - -func (s *fakeStorage) WatchVolumeAttachment(m names.MachineTag, v names.VolumeTag) state.NotifyWatcher { - s.MethodCall(s, "WatchVolumeAttachment", m, v) - return s.watchVolumeAttachment(m, v) -} - -func (s *fakeStorage) WatchBlockDevices(m names.MachineTag) state.NotifyWatcher { - s.MethodCall(s, "WatchBlockDevices", m) - return s.watchBlockDevices(m) -} - -func (s *fakeStorage) WatchStorageAttachment(st names.StorageTag, u names.UnitTag) state.NotifyWatcher { - s.MethodCall(s, "WatchStorageAttachment", st, u) - return s.watchStorageAttachment(st, u) -} - -type fakeStorageInstance struct { - state.StorageInstance - tag names.StorageTag - owner names.Tag - kind state.StorageKind -} - -func (i *fakeStorageInstance) StorageTag() names.StorageTag { - return i.tag -} - -func (i *fakeStorageInstance) Tag() names.Tag { - return i.tag -} - -func (i *fakeStorageInstance) Owner() names.Tag { - return i.owner -} - -func (i *fakeStorageInstance) Kind() state.StorageKind { - return i.kind -} - -type fakeStorageAttachment struct { - state.StorageAttachment - storageTag names.StorageTag -} - -func (a *fakeStorageAttachment) StorageInstance() names.StorageTag { - return a.storageTag -} - -type fakeVolume struct { - state.Volume - tag names.VolumeTag - params *state.VolumeParams - info *state.VolumeInfo -} - -func (v *fakeVolume) VolumeTag() names.VolumeTag { - return v.tag -} - -func (v *fakeVolume) Tag() names.Tag { - return v.tag -} - -func (v *fakeVolume) Params() (state.VolumeParams, bool) { - if v.params == nil { - return state.VolumeParams{}, false - } - return *v.params, true -} - -func (v *fakeVolume) Info() (state.VolumeInfo, error) { - if v.info == nil { - return state.VolumeInfo{}, errors.NotProvisionedf("volume %v", v.tag.Id()) - } - return *v.info, nil -} - -type fakeVolumeAttachment struct { - state.VolumeAttachment - info *state.VolumeAttachmentInfo -} - -func (v *fakeVolumeAttachment) Info() (state.VolumeAttachmentInfo, error) { - if v.info == nil { - return state.VolumeAttachmentInfo{}, errors.NotProvisionedf("volume attachment") - } - return *v.info, nil -} - -type fakePoolManager struct { - poolmanager.PoolManager -} - -func (pm *fakePoolManager) Get(name string) (*storage.Config, error) { - return nil, errors.NotFoundf("pool") -} === modified file 'src/github.com/juju/juju/apiserver/common/testing/block.go' --- src/github.com/juju/juju/apiserver/common/testing/block.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/testing/block.go 2016-03-22 15:18:22 +0000 @@ -6,6 +6,7 @@ import ( "fmt" + "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -35,11 +36,7 @@ // on switches on desired block and // asserts that no errors were encountered. func (s BlockHelper) on(c *gc.C, blockType multiwatcher.BlockType, msg string) { - c.Assert( - s.client.SwitchBlockOn( - fmt.Sprintf("%v", blockType), - msg), - gc.IsNil) + c.Assert(s.client.SwitchBlockOn(fmt.Sprintf("%v", blockType), msg), gc.IsNil) } // BlockAllChanges blocks all operations that could change environment. @@ -58,14 +55,17 @@ s.ApiState.Close() } -// BlockDestroyEnvironment blocks destroy-environment. -func (s BlockHelper) BlockDestroyEnvironment(c *gc.C, msg string) { +// BlockDestroyModel blocks destroy-model. +func (s BlockHelper) BlockDestroyModel(c *gc.C, msg string) { s.on(c, multiwatcher.BlockDestroy, msg) } // AssertBlocked checks if given error is // related to switched block. func (s BlockHelper) AssertBlocked(c *gc.C, err error, msg string) { - c.Assert(params.IsCodeOperationBlocked(err), jc.IsTrue) - c.Assert(err, gc.ErrorMatches, msg) + c.Assert(params.IsCodeOperationBlocked(err), jc.IsTrue, gc.Commentf("error: %#v", err)) + c.Assert(errors.Cause(err), gc.DeepEquals, ¶ms.Error{ + Message: msg, + Code: "operation is blocked", + }) } === removed file 'src/github.com/juju/juju/apiserver/common/testing/environwatcher.go' --- src/github.com/juju/juju/apiserver/common/testing/environwatcher.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/common/testing/environwatcher.go 1970-01-01 00:00:00 +0000 @@ -1,90 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package testing - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/environs" - "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" -) - -const ( - HasSecrets = true - NoSecrets = false -) - -type EnvironmentWatcher interface { - WatchForEnvironConfigChanges() (params.NotifyWatchResult, error) - EnvironConfig() (params.EnvironConfigResult, error) -} - -type EnvironWatcherTest struct { - envWatcher EnvironmentWatcher - st *state.State - resources *common.Resources - hasSecrets bool -} - -func NewEnvironWatcherTest( - envWatcher EnvironmentWatcher, - st *state.State, - resources *common.Resources, - hasSecrets bool) *EnvironWatcherTest { - return &EnvironWatcherTest{envWatcher, st, resources, hasSecrets} -} - -// AssertEnvironConfig provides a method to test the config from the -// envWatcher. This allows other tests that embed this type to have -// more than just the default test. -func (s *EnvironWatcherTest) AssertEnvironConfig(c *gc.C, envWatcher EnvironmentWatcher, hasSecrets bool) { - envConfig, err := s.st.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - - result, err := envWatcher.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - - configAttributes := envConfig.AllAttrs() - // If the implementor doesn't provide secrets, we need to replace the config - // values in our environment to compare against with the secrets replaced. - if !hasSecrets { - env, err := environs.New(envConfig) - c.Assert(err, jc.ErrorIsNil) - secretAttrs, err := env.Provider().SecretAttrs(envConfig) - c.Assert(err, jc.ErrorIsNil) - for key := range secretAttrs { - configAttributes[key] = "not available" - } - } - - c.Assert(result.Config, jc.DeepEquals, params.EnvironConfig(configAttributes)) -} - -func (s *EnvironWatcherTest) TestEnvironConfig(c *gc.C) { - s.AssertEnvironConfig(c, s.envWatcher, s.hasSecrets) -} - -func (s *EnvironWatcherTest) TestWatchForEnvironConfigChanges(c *gc.C) { - c.Assert(s.resources.Count(), gc.Equals, 0) - - result, err := s.envWatcher.WatchForEnvironConfigChanges() - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.NotifyWatchResult{ - NotifyWatcherId: "1", - }) - - // Verify the resources were registered and stop them when done. - c.Assert(s.resources.Count(), gc.Equals, 1) - resource := s.resources.Get("1") - defer statetesting.AssertStop(c, resource) - - // Check that the Watch has consumed the initial event ("returned" - // in the Watch call) - wc := statetesting.NewNotifyWatcherC(c, s.st, resource.(state.NotifyWatcher)) - wc.AssertNoChange() -} === added file 'src/github.com/juju/juju/apiserver/common/testing/modelwatcher.go' --- src/github.com/juju/juju/apiserver/common/testing/modelwatcher.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/common/testing/modelwatcher.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,90 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package testing + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs" + "github.com/juju/juju/state" + statetesting "github.com/juju/juju/state/testing" +) + +const ( + HasSecrets = true + NoSecrets = false +) + +type ModelWatcher interface { + WatchForModelConfigChanges() (params.NotifyWatchResult, error) + ModelConfig() (params.ModelConfigResult, error) +} + +type ModelWatcherTest struct { + modelWatcher ModelWatcher + st *state.State + resources *common.Resources + hasSecrets bool +} + +func NewModelWatcherTest( + modelWatcher ModelWatcher, + st *state.State, + resources *common.Resources, + hasSecrets bool) *ModelWatcherTest { + return &ModelWatcherTest{modelWatcher, st, resources, hasSecrets} +} + +// AssertModelConfig provides a method to test the config from the +// envWatcher. This allows other tests that embed this type to have +// more than just the default test. +func (s *ModelWatcherTest) AssertModelConfig(c *gc.C, envWatcher ModelWatcher, hasSecrets bool) { + envConfig, err := s.st.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + + result, err := envWatcher.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + + configAttributes := envConfig.AllAttrs() + // If the implementor doesn't provide secrets, we need to replace the config + // values in our environment to compare against with the secrets replaced. + if !hasSecrets { + env, err := environs.New(envConfig) + c.Assert(err, jc.ErrorIsNil) + secretAttrs, err := env.Provider().SecretAttrs(envConfig) + c.Assert(err, jc.ErrorIsNil) + for key := range secretAttrs { + configAttributes[key] = "not available" + } + } + + c.Assert(result.Config, jc.DeepEquals, params.ModelConfig(configAttributes)) +} + +func (s *ModelWatcherTest) TestModelConfig(c *gc.C) { + s.AssertModelConfig(c, s.modelWatcher, s.hasSecrets) +} + +func (s *ModelWatcherTest) TestWatchForModelConfigChanges(c *gc.C) { + c.Assert(s.resources.Count(), gc.Equals, 0) + + result, err := s.modelWatcher.WatchForModelConfigChanges() + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.NotifyWatchResult{ + NotifyWatcherId: "1", + }) + + // Verify the resources were registered and stop them when done. + c.Assert(s.resources.Count(), gc.Equals, 1) + resource := s.resources.Get("1") + defer statetesting.AssertStop(c, resource) + + // Check that the Watch has consumed the initial event ("returned" + // in the Watch call) + wc := statetesting.NewNotifyWatcherC(c, s.st, resource.(state.NotifyWatcher)) + wc.AssertNoChange() +} === modified file 'src/github.com/juju/juju/apiserver/common/tools.go' --- src/github.com/juju/juju/apiserver/common/tools.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/tools.go 2016-03-22 15:18:22 +0000 @@ -31,8 +31,8 @@ ToolsURL(v version.Binary) (string, error) } -type EnvironConfigGetter interface { - EnvironConfig() (*config.Config, error) +type ModelConfigGetter interface { + ModelConfig() (*config.Config, error) } // APIHostPortsGetter is an interface providing the APIHostPorts method. @@ -51,7 +51,7 @@ // facades. type ToolsGetter struct { entityFinder state.EntityFinder - configGetter EnvironConfigGetter + configGetter ModelConfigGetter toolsStorageGetter ToolsStorageGetter urlGetter ToolsURLGetter getCanRead GetAuthFunc @@ -59,7 +59,7 @@ // NewToolsGetter returns a new ToolsGetter. The GetAuthFunc will be // used on each invocation of Tools to determine current permissions. -func NewToolsGetter(f state.EntityFinder, c EnvironConfigGetter, s ToolsStorageGetter, t ToolsURLGetter, getCanRead GetAuthFunc) *ToolsGetter { +func NewToolsGetter(f state.EntityFinder, c ModelConfigGetter, s ToolsStorageGetter, t ToolsURLGetter, getCanRead GetAuthFunc) *ToolsGetter { return &ToolsGetter{f, c, s, t, getCanRead} } @@ -103,13 +103,13 @@ func (t *ToolsGetter) getGlobalAgentVersion() (version.Number, error) { // Get the Agent Version requested in the Environment Config nothing := version.Number{} - cfg, err := t.configGetter.EnvironConfig() + cfg, err := t.configGetter.ModelConfig() if err != nil { return nothing, err } agentVersion, ok := cfg.AgentVersion() if !ok { - return nothing, errors.New("agent version not set in environment config") + return nothing, errors.New("agent version not set in model config") } return agentVersion, nil } @@ -197,14 +197,14 @@ } type ToolsFinder struct { - configGetter EnvironConfigGetter + configGetter ModelConfigGetter toolsStorageGetter ToolsStorageGetter urlGetter ToolsURLGetter } // NewToolsFinder returns a new ToolsFinder, returning tools // with their URLs pointing at the API server. -func NewToolsFinder(c EnvironConfigGetter, s ToolsStorageGetter, t ToolsURLGetter) *ToolsFinder { +func NewToolsFinder(c ModelConfigGetter, s ToolsStorageGetter, t ToolsURLGetter) *ToolsFinder { return &ToolsFinder{c, s, t} } @@ -254,7 +254,7 @@ // Look for tools in simplestreams too, but don't replace // any versions found in storage. - cfg, err := f.configGetter.EnvironConfig() + cfg, err := f.configGetter.ModelConfig() if err != nil { return nil, err } @@ -311,7 +311,7 @@ } var matching coretools.List for _, tools := range list { - if args.MajorVersion > 0 && tools.Version.Major != args.MajorVersion { + if args.MajorVersion != -1 && tools.Version.Major != args.MajorVersion { continue } if args.MinorVersion != -1 && tools.Version.Minor != args.MinorVersion { @@ -334,14 +334,14 @@ } type toolsURLGetter struct { - envUUID string + modelUUID string apiHostPortsGetter APIHostPortsGetter } // NewToolsURLGetter creates a new ToolsURLGetter that // returns tools URLs pointing at an API server. -func NewToolsURLGetter(envUUID string, a APIHostPortsGetter) *toolsURLGetter { - return &toolsURLGetter{envUUID, a} +func NewToolsURLGetter(modelUUID string, a APIHostPortsGetter) *toolsURLGetter { + return &toolsURLGetter{modelUUID, a} } func (t *toolsURLGetter) ToolsURL(v version.Binary) (string, error) { @@ -366,7 +366,7 @@ if apiAddress == "" { return "", errors.Errorf("no suitable API server address to pick from %v", hostPorts) } - serverRoot := fmt.Sprintf("https://%s/environment/%s", apiAddress, t.envUUID) + serverRoot := fmt.Sprintf("https://%s/model/%s", apiAddress, t.modelUUID) return ToolsURL(serverRoot, v), nil } === modified file 'src/github.com/juju/juju/apiserver/common/tools_test.go' --- src/github.com/juju/juju/apiserver/common/tools_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/tools_test.go 2016-03-22 15:18:22 +0000 @@ -9,13 +9,14 @@ "github.com/juju/errors" "github.com/juju/names" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/arch" + "github.com/juju/utils/series" gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" apiservertesting "github.com/juju/juju/apiserver/testing" "github.com/juju/juju/environs" - "github.com/juju/juju/juju/arch" "github.com/juju/juju/juju/testing" "github.com/juju/juju/network" "github.com/juju/juju/state" @@ -31,6 +32,12 @@ var _ = gc.Suite(&toolsSuite{}) +var current = version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), +} + func (s *toolsSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) var err error @@ -48,7 +55,7 @@ tg := common.NewToolsGetter(s.State, s.State, s.State, sprintfURLGetter("tools:%s"), getCanRead) c.Assert(tg, gc.NotNil) - err := s.machine0.SetAgentVersion(version.Current) + err := s.machine0.SetAgentVersion(current) c.Assert(err, jc.ErrorIsNil) args := params.Entities{ @@ -62,8 +69,8 @@ c.Assert(result.Results, gc.HasLen, 3) c.Assert(result.Results[0].Error, gc.IsNil) c.Assert(result.Results[0].Tools, gc.NotNil) - c.Assert(result.Results[0].Tools.Version, gc.DeepEquals, version.Current) - c.Assert(result.Results[0].Tools.URL, gc.Equals, "tools:"+version.Current.String()) + c.Assert(result.Results[0].Tools.Version, gc.DeepEquals, current) + c.Assert(result.Results[0].Tools.URL, gc.Equals, "tools:"+current.String()) c.Assert(result.Results[0].DisableSSLHostnameVerification, jc.IsTrue) c.Assert(result.Results[1].Error, gc.DeepEquals, apiservertesting.ErrUnauthorized) c.Assert(result.Results[2].Error, gc.DeepEquals, apiservertesting.NotFoundError("machine 42")) @@ -91,24 +98,24 @@ ts := common.NewToolsSetter(s.State, getCanWrite) c.Assert(ts, gc.NotNil) - err := s.machine0.SetAgentVersion(version.Current) + err := s.machine0.SetAgentVersion(current) c.Assert(err, jc.ErrorIsNil) args := params.EntitiesVersion{ AgentTools: []params.EntityVersion{{ Tag: "machine-0", Tools: ¶ms.Version{ - Version: version.Current, + Version: current, }, }, { Tag: "machine-1", Tools: ¶ms.Version{ - Version: version.Current, + Version: current, }, }, { Tag: "machine-42", Tools: ¶ms.Version{ - Version: version.Current, + Version: current, }, }}, } @@ -118,7 +125,7 @@ c.Assert(result.Results[0].Error, gc.IsNil) agentTools, err := s.machine0.AgentTools() c.Assert(err, jc.ErrorIsNil) - c.Assert(agentTools.Version, gc.DeepEquals, version.Current) + c.Assert(agentTools.Version, gc.DeepEquals, current) c.Assert(result.Results[1].Error, gc.DeepEquals, apiservertesting.ErrUnauthorized) c.Assert(result.Results[2].Error, gc.DeepEquals, apiservertesting.NotFoundError("machine 42")) } @@ -132,7 +139,7 @@ AgentTools: []params.EntityVersion{{ Tag: "machine-42", Tools: ¶ms.Version{ - Version: version.Current, + Version: current, }, }}, } @@ -205,17 +212,18 @@ } s.PatchValue(&arch.HostArch, func() string { return arch.AMD64 }) - s.PatchValue(&version.Current, version.MustParseBinary("1.22-beta1-trusty-amd64")) + s.PatchValue(&series.HostSeries, func() string { return "trusty" }) + s.PatchValue(&version.Current, version.MustParseBinary("1.22-beta1-trusty-amd64").Number) s.testFindToolsExact(c, mockToolsStorage, true, true) - s.PatchValue(&version.Current, version.MustParseBinary("1.22.0-trusty-amd64")) + s.PatchValue(&version.Current, version.MustParseBinary("1.22.0-trusty-amd64").Number) s.testFindToolsExact(c, mockToolsStorage, true, false) } func (s *toolsSuite) TestFindToolsExactNotInStorage(c *gc.C) { mockToolsStorage := &mockToolsStorage{} - s.PatchValue(&version.Current.Number, version.MustParse("1.22-beta1")) + s.PatchValue(&version.Current, version.MustParse("1.22-beta1")) s.testFindToolsExact(c, mockToolsStorage, false, true) - s.PatchValue(&version.Current.Number, version.MustParse("1.22.0")) + s.PatchValue(&version.Current, version.MustParse("1.22.0")) s.testFindToolsExact(c, mockToolsStorage, false, false) } @@ -223,8 +231,8 @@ var called bool s.PatchValue(common.EnvtoolsFindTools, func(e environs.Environ, major, minor int, stream string, filter coretools.Filter) (list coretools.List, err error) { called = true - c.Assert(filter.Number, gc.Equals, version.Current.Number) - c.Assert(filter.Series, gc.Equals, version.Current.Series) + c.Assert(filter.Number, gc.Equals, version.Current) + c.Assert(filter.Series, gc.Equals, series.HostSeries()) c.Assert(filter.Arch, gc.Equals, arch.HostArch()) if develVersion { c.Assert(stream, gc.Equals, "devel") @@ -235,10 +243,10 @@ }) toolsFinder := common.NewToolsFinder(s.State, t, sprintfURLGetter("tools:%s")) result, err := toolsFinder.FindTools(params.FindToolsParams{ - Number: version.Current.Number, + Number: version.Current, MajorVersion: -1, MinorVersion: -1, - Series: version.Current.Series, + Series: series.HostSeries(), Arch: arch.HostArch(), }) c.Assert(err, jc.ErrorIsNil) @@ -274,13 +282,13 @@ func (s *toolsSuite) TestToolsURLGetterNoAPIHostPorts(c *gc.C) { g := common.NewToolsURLGetter("my-uuid", mockAPIHostPortsGetter{}) - _, err := g.ToolsURL(version.Current) + _, err := g.ToolsURL(current) c.Assert(err, gc.ErrorMatches, "no API host ports") } func (s *toolsSuite) TestToolsURLGetterAPIHostPortsError(c *gc.C) { g := common.NewToolsURLGetter("my-uuid", mockAPIHostPortsGetter{err: errors.New("oh noes")}) - _, err := g.ToolsURL(version.Current) + _, err := g.ToolsURL(current) c.Assert(err, gc.ErrorMatches, "oh noes") } @@ -290,9 +298,9 @@ network.NewHostPorts(1234, "0.1.2.3"), }, }) - url, err := g.ToolsURL(version.Current) + url, err := g.ToolsURL(current) c.Assert(err, jc.ErrorIsNil) - c.Assert(url, gc.Equals, "https://0.1.2.3:1234/environment/my-uuid/tools/"+version.Current.String()) + c.Assert(url, gc.Equals, "https://0.1.2.3:1234/model/my-uuid/tools/"+current.String()) } type sprintfURLGetter string === removed file 'src/github.com/juju/juju/apiserver/common/volumes.go' --- src/github.com/juju/juju/apiserver/common/volumes.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/volumes.go 1970-01-01 00:00:00 +0000 @@ -1,224 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common - -import ( - "github.com/juju/errors" - "github.com/juju/names" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/state" - "github.com/juju/juju/storage" - "github.com/juju/juju/storage/poolmanager" - "github.com/juju/juju/storage/provider/registry" -) - -type volumeAlreadyProvisionedError struct { - error -} - -// IsVolumeAlreadyProvisioned returns true if the specified error -// is caused by a volume already being provisioned. -func IsVolumeAlreadyProvisioned(err error) bool { - _, ok := err.(*volumeAlreadyProvisionedError) - return ok -} - -// VolumeParams returns the parameters for creating or destroying -// the given volume. -func VolumeParams( - v state.Volume, - storageInstance state.StorageInstance, - environConfig *config.Config, - poolManager poolmanager.PoolManager, -) (params.VolumeParams, error) { - - var pool string - var size uint64 - if stateVolumeParams, ok := v.Params(); ok { - pool = stateVolumeParams.Pool - size = stateVolumeParams.Size - } else { - volumeInfo, err := v.Info() - if err != nil { - return params.VolumeParams{}, errors.Trace(err) - } - pool = volumeInfo.Pool - size = volumeInfo.Size - } - - volumeTags, err := storageTags(storageInstance, environConfig) - if err != nil { - return params.VolumeParams{}, errors.Annotate(err, "computing storage tags") - } - - providerType, cfg, err := StoragePoolConfig(pool, poolManager) - if err != nil { - return params.VolumeParams{}, errors.Trace(err) - } - return params.VolumeParams{ - v.Tag().String(), - size, - string(providerType), - cfg.Attrs(), - volumeTags, - nil, // attachment params set by the caller - }, nil -} - -// StoragePoolConfig returns the storage provider type and -// configuration for a named storage pool. If there is no -// such pool with the specified name, but it identifies a -// storage provider, then that type will be returned with a -// nil configuration. -func StoragePoolConfig(name string, poolManager poolmanager.PoolManager) (storage.ProviderType, *storage.Config, error) { - pool, err := poolManager.Get(name) - if errors.IsNotFound(err) { - // If not a storage pool, then maybe a provider type. - providerType := storage.ProviderType(name) - if _, err1 := registry.StorageProvider(providerType); err1 != nil { - return "", nil, errors.Trace(err) - } - return providerType, &storage.Config{}, nil - } else if err != nil { - return "", nil, errors.Annotatef(err, "getting pool %q", name) - } - return pool.Provider(), pool, nil -} - -// VolumesToState converts a slice of params.Volume to a mapping -// of volume tags to state.VolumeInfo. -func VolumesToState(in []params.Volume) (map[names.VolumeTag]state.VolumeInfo, error) { - m := make(map[names.VolumeTag]state.VolumeInfo) - for _, v := range in { - tag, volumeInfo, err := VolumeToState(v) - if err != nil { - return nil, errors.Trace(err) - } - m[tag] = volumeInfo - } - return m, nil -} - -// VolumeToState converts a params.Volume to state.VolumeInfo -// and names.VolumeTag. -func VolumeToState(v params.Volume) (names.VolumeTag, state.VolumeInfo, error) { - if v.VolumeTag == "" { - return names.VolumeTag{}, state.VolumeInfo{}, errors.New("Tag is empty") - } - volumeTag, err := names.ParseVolumeTag(v.VolumeTag) - if err != nil { - return names.VolumeTag{}, state.VolumeInfo{}, errors.Trace(err) - } - return volumeTag, state.VolumeInfo{ - v.Info.HardwareId, - v.Info.Size, - "", // pool is set by state - v.Info.VolumeId, - v.Info.Persistent, - }, nil -} - -// VolumeFromState converts a state.Volume to params.Volume. -func VolumeFromState(v state.Volume) (params.Volume, error) { - info, err := v.Info() - if err != nil { - return params.Volume{}, errors.Trace(err) - } - return params.Volume{ - v.VolumeTag().String(), - VolumeInfoFromState(info), - }, nil -} - -// VolumeInfoFromState converts a state.VolumeInfo to params.VolumeInfo. -func VolumeInfoFromState(info state.VolumeInfo) params.VolumeInfo { - return params.VolumeInfo{ - info.VolumeId, - info.HardwareId, - info.Size, - info.Persistent, - } -} - -// VolumeAttachmentFromState converts a state.VolumeAttachment to params.VolumeAttachment. -func VolumeAttachmentFromState(v state.VolumeAttachment) (params.VolumeAttachment, error) { - info, err := v.Info() - if err != nil { - return params.VolumeAttachment{}, errors.Trace(err) - } - return params.VolumeAttachment{ - v.Volume().String(), - v.Machine().String(), - VolumeAttachmentInfoFromState(info), - }, nil -} - -// VolumeAttachmentInfoFromState converts a state.VolumeAttachmentInfo to params.VolumeAttachmentInfo. -func VolumeAttachmentInfoFromState(info state.VolumeAttachmentInfo) params.VolumeAttachmentInfo { - return params.VolumeAttachmentInfo{ - info.DeviceName, - info.DeviceLink, - info.BusAddress, - info.ReadOnly, - } -} - -// VolumeAttachmentInfosToState converts a map of volume tags to -// params.VolumeAttachmentInfo to a map of volume tags to -// state.VolumeAttachmentInfo. -func VolumeAttachmentInfosToState(in map[string]params.VolumeAttachmentInfo) (map[names.VolumeTag]state.VolumeAttachmentInfo, error) { - m := make(map[names.VolumeTag]state.VolumeAttachmentInfo) - for k, v := range in { - volumeTag, err := names.ParseVolumeTag(k) - if err != nil { - return nil, errors.Trace(err) - } - m[volumeTag] = VolumeAttachmentInfoToState(v) - } - return m, nil -} - -// VolumeAttachmentToState converts a params.VolumeAttachment -// to a state.VolumeAttachmentInfo and tags. -func VolumeAttachmentToState(in params.VolumeAttachment) (names.MachineTag, names.VolumeTag, state.VolumeAttachmentInfo, error) { - machineTag, err := names.ParseMachineTag(in.MachineTag) - if err != nil { - return names.MachineTag{}, names.VolumeTag{}, state.VolumeAttachmentInfo{}, err - } - volumeTag, err := names.ParseVolumeTag(in.VolumeTag) - if err != nil { - return names.MachineTag{}, names.VolumeTag{}, state.VolumeAttachmentInfo{}, err - } - info := VolumeAttachmentInfoToState(in.Info) - return machineTag, volumeTag, info, nil -} - -// VolumeAttachmentInfoToState converts a params.VolumeAttachmentInfo -// to a state.VolumeAttachmentInfo. -func VolumeAttachmentInfoToState(in params.VolumeAttachmentInfo) state.VolumeAttachmentInfo { - return state.VolumeAttachmentInfo{ - in.DeviceName, - in.DeviceLink, - in.BusAddress, - in.ReadOnly, - } -} - -// ParseVolumeAttachmentIds parses the strings, returning machine storage IDs. -func ParseVolumeAttachmentIds(stringIds []string) ([]params.MachineStorageId, error) { - ids := make([]params.MachineStorageId, len(stringIds)) - for i, s := range stringIds { - m, v, err := state.ParseVolumeAttachmentId(s) - if err != nil { - return nil, err - } - ids[i] = params.MachineStorageId{ - MachineTag: m.String(), - AttachmentTag: v.String(), - } - } - return ids, nil -} === removed file 'src/github.com/juju/juju/apiserver/common/volumes_test.go' --- src/github.com/juju/juju/apiserver/common/volumes_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/common/volumes_test.go 1970-01-01 00:00:00 +0000 @@ -1,82 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package common_test - -import ( - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/environs/tags" - "github.com/juju/juju/state" - "github.com/juju/juju/testing" -) - -type volumesSuite struct{} - -var _ = gc.Suite(&volumesSuite{}) - -func (s *volumesSuite) TestVolumeParams(c *gc.C) { - s.testVolumeParams(c, &state.VolumeParams{ - Pool: "loop", - Size: 1024, - }, nil) -} - -func (s *volumesSuite) TestVolumeParamsAlreadyProvisioned(c *gc.C) { - s.testVolumeParams(c, nil, &state.VolumeInfo{ - Pool: "loop", - Size: 1024, - }) -} - -func (*volumesSuite) testVolumeParams(c *gc.C, volumeParams *state.VolumeParams, info *state.VolumeInfo) { - tag := names.NewVolumeTag("100") - p, err := common.VolumeParams( - &fakeVolume{tag: tag, params: volumeParams, info: info}, - nil, // StorageInstance - testing.CustomEnvironConfig(c, testing.Attrs{ - "resource-tags": "a=b c=", - }), - &fakePoolManager{}, - ) - c.Assert(err, jc.ErrorIsNil) - c.Assert(p, jc.DeepEquals, params.VolumeParams{ - VolumeTag: "volume-100", - Provider: "loop", - Size: 1024, - Tags: map[string]string{ - tags.JujuEnv: testing.EnvironmentTag.Id(), - "a": "b", - "c": "", - }, - }) -} - -func (*volumesSuite) TestVolumeParamsStorageTags(c *gc.C) { - volumeTag := names.NewVolumeTag("100") - storageTag := names.NewStorageTag("mystore/0") - unitTag := names.NewUnitTag("mysql/123") - p, err := common.VolumeParams( - &fakeVolume{tag: volumeTag, params: &state.VolumeParams{ - Pool: "loop", Size: 1024, - }}, - &fakeStorageInstance{tag: storageTag, owner: unitTag}, - testing.CustomEnvironConfig(c, nil), - &fakePoolManager{}, - ) - c.Assert(err, jc.ErrorIsNil) - c.Assert(p, jc.DeepEquals, params.VolumeParams{ - VolumeTag: "volume-100", - Provider: "loop", - Size: 1024, - Tags: map[string]string{ - tags.JujuEnv: testing.EnvironmentTag.Id(), - tags.JujuStorageInstance: "mystore/0", - tags.JujuStorageOwner: "mysql/123", - }, - }) -} === modified file 'src/github.com/juju/juju/apiserver/common/watch.go' --- src/github.com/juju/juju/apiserver/common/watch.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/common/watch.go 2016-03-22 15:18:22 +0000 @@ -84,20 +84,20 @@ return result, nil } -// multiNotifyWatcher implements state.NotifyWatcher, combining +// MultiNotifyWatcher implements state.NotifyWatcher, combining // multiple NotifyWatchers. -type multiNotifyWatcher struct { +type MultiNotifyWatcher struct { tomb tomb.Tomb watchers []state.NotifyWatcher changes chan struct{} } -// newMultiNotifyWatcher creates a NotifyWatcher that combines +// NewMultiNotifyWatcher creates a NotifyWatcher that combines // each of the NotifyWatchers passed in. Each watcher's initial // event is consumed, and a single initial event is sent. // Subsequent events are not coalesced. -func newMultiNotifyWatcher(w ...state.NotifyWatcher) *multiNotifyWatcher { - m := &multiNotifyWatcher{ +func NewMultiNotifyWatcher(w ...state.NotifyWatcher) *MultiNotifyWatcher { + m := &MultiNotifyWatcher{ watchers: w, changes: make(chan struct{}), } @@ -125,7 +125,7 @@ // loop copies events from the input channel to the output channel, // coalescing events by waiting a short time between receiving and // sending. -func (w *multiNotifyWatcher) loop(in <-chan struct{}) { +func (w *MultiNotifyWatcher) loop(in <-chan struct{}) { defer close(w.changes) // out is initialised to m.changes to send the inital event. out := w.changes @@ -165,26 +165,26 @@ } } -func (w *multiNotifyWatcher) Kill() { +func (w *MultiNotifyWatcher) Kill() { w.tomb.Kill(nil) for _, w := range w.watchers { w.Kill() } } -func (w *multiNotifyWatcher) Wait() error { +func (w *MultiNotifyWatcher) Wait() error { return w.tomb.Wait() } -func (w *multiNotifyWatcher) Stop() error { +func (w *MultiNotifyWatcher) Stop() error { w.Kill() return w.Wait() } -func (w *multiNotifyWatcher) Err() error { +func (w *MultiNotifyWatcher) Err() error { return w.tomb.Err() } -func (w *multiNotifyWatcher) Changes() <-chan struct{} { +func (w *MultiNotifyWatcher) Changes() <-chan struct{} { return w.changes } === modified file 'src/github.com/juju/juju/apiserver/common/watch_test.go' --- src/github.com/juju/juju/apiserver/common/watch_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/common/watch_test.go 2016-03-22 15:18:22 +0000 @@ -6,11 +6,10 @@ import ( "fmt" - "launchpad.net/tomb" - "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "launchpad.net/tomb" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" === added directory 'src/github.com/juju/juju/apiserver/controller' === added file 'src/github.com/juju/juju/apiserver/controller/controller.go' --- src/github.com/juju/juju/apiserver/controller/controller.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/controller/controller.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,339 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// The controller package defines an API end point for functions dealing +// with controllers as a whole. +package controller + +import ( + "sort" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + "github.com/juju/utils/set" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" +) + +var logger = loggo.GetLogger("juju.apiserver.controller") + +func init() { + common.RegisterStandardFacade("Controller", 2, NewControllerAPI) +} + +// Controller defines the methods on the controller API end point. +type Controller interface { + AllModels() (params.UserModelList, error) + DestroyController(args params.DestroyControllerArgs) error + ModelConfig() (params.ModelConfigResults, error) + ListBlockedModels() (params.ModelBlockInfoList, error) + RemoveBlocks(args params.RemoveBlocksArgs) error + WatchAllModels() (params.AllWatcherId, error) + ModelStatus(req params.Entities) (params.ModelStatusResults, error) +} + +// ControllerAPI implements the environment manager interface and is +// the concrete implementation of the api end point. +type ControllerAPI struct { + state *state.State + authorizer common.Authorizer + apiUser names.UserTag + resources *common.Resources +} + +var _ Controller = (*ControllerAPI)(nil) + +// NewControllerAPI creates a new api server endpoint for managing +// environments. +func NewControllerAPI( + st *state.State, + resources *common.Resources, + authorizer common.Authorizer, +) (*ControllerAPI, error) { + if !authorizer.AuthClient() { + return nil, errors.Trace(common.ErrPerm) + } + + // Since we know this is a user tag (because AuthClient is true), + // we just do the type assertion to the UserTag. + apiUser, _ := authorizer.GetAuthTag().(names.UserTag) + isAdmin, err := st.IsControllerAdministrator(apiUser) + if err != nil { + return nil, errors.Trace(err) + } + // The entire end point is only accessible to controller administrators. + if !isAdmin { + return nil, errors.Trace(common.ErrPerm) + } + + return &ControllerAPI{ + state: st, + authorizer: authorizer, + apiUser: apiUser, + resources: resources, + }, nil +} + +// AllModels allows controller administrators to get the list of all the +// environments in the controller. +func (s *ControllerAPI) AllModels() (params.UserModelList, error) { + result := params.UserModelList{} + + // Get all the environments that the authenticated user can see, and + // supplement that with the other environments that exist that the user + // cannot see. The reason we do this is to get the LastConnection time for + // the environments that the user is able to see, so we have consistent + // output when listing with or without --all when an admin user. + environments, err := s.state.ModelsForUser(s.apiUser) + if err != nil { + return result, errors.Trace(err) + } + visibleEnvironments := set.NewStrings() + for _, env := range environments { + lastConn, err := env.LastConnection() + if err != nil && !state.IsNeverConnectedError(err) { + return result, errors.Trace(err) + } + visibleEnvironments.Add(env.UUID()) + result.UserModels = append(result.UserModels, params.UserModel{ + Model: params.Model{ + Name: env.Name(), + UUID: env.UUID(), + OwnerTag: env.Owner().String(), + }, + LastConnection: &lastConn, + }) + } + + allEnvs, err := s.state.AllModels() + if err != nil { + return result, errors.Trace(err) + } + + for _, env := range allEnvs { + if !visibleEnvironments.Contains(env.UUID()) { + result.UserModels = append(result.UserModels, params.UserModel{ + Model: params.Model{ + Name: env.Name(), + UUID: env.UUID(), + OwnerTag: env.Owner().String(), + }, + // No LastConnection as this user hasn't. + }) + } + } + + // Sort the resulting sequence by environment name, then owner. + sort.Sort(orderedUserModels(result.UserModels)) + + return result, nil +} + +// ListBlockedModels returns a list of all environments on the controller +// which have a block in place. The resulting slice is sorted by environment +// name, then owner. Callers must be controller administrators to retrieve the +// list. +func (s *ControllerAPI) ListBlockedModels() (params.ModelBlockInfoList, error) { + results := params.ModelBlockInfoList{} + + blocks, err := s.state.AllBlocksForController() + if err != nil { + return results, errors.Trace(err) + } + + envBlocks := make(map[string][]string) + for _, block := range blocks { + uuid := block.ModelUUID() + types, ok := envBlocks[uuid] + if !ok { + types = []string{block.Type().String()} + } else { + types = append(types, block.Type().String()) + } + envBlocks[uuid] = types + } + + for uuid, blocks := range envBlocks { + envInfo, err := s.state.GetModel(names.NewModelTag(uuid)) + if err != nil { + logger.Debugf("Unable to get name for model: %s", uuid) + continue + } + results.Models = append(results.Models, params.ModelBlockInfo{ + UUID: envInfo.UUID(), + Name: envInfo.Name(), + OwnerTag: envInfo.Owner().String(), + Blocks: blocks, + }) + } + + // Sort the resulting sequence by environment name, then owner. + sort.Sort(orderedBlockInfo(results.Models)) + + return results, nil +} + +// ModelConfig returns the environment config for the controller +// environment. For information on the current environment, use +// client.ModelGet +func (s *ControllerAPI) ModelConfig() (params.ModelConfigResults, error) { + result := params.ModelConfigResults{} + + controllerEnv, err := s.state.ControllerModel() + if err != nil { + return result, errors.Trace(err) + } + + config, err := controllerEnv.Config() + if err != nil { + return result, errors.Trace(err) + } + + result.Config = config.AllAttrs() + return result, nil +} + +// RemoveBlocks removes all the blocks in the controller. +func (s *ControllerAPI) RemoveBlocks(args params.RemoveBlocksArgs) error { + if !args.All { + return errors.New("not supported") + } + return errors.Trace(s.state.RemoveAllBlocksForController()) +} + +// WatchAllModels starts watching events for all models in the +// controller. The returned AllWatcherId should be used with Next on the +// AllModelWatcher endpoint to receive deltas. +func (c *ControllerAPI) WatchAllModels() (params.AllWatcherId, error) { + w := c.state.WatchAllModels() + return params.AllWatcherId{ + AllWatcherId: c.resources.Register(w), + }, nil +} + +type orderedBlockInfo []params.ModelBlockInfo + +func (o orderedBlockInfo) Len() int { + return len(o) +} + +func (o orderedBlockInfo) Less(i, j int) bool { + if o[i].Name < o[j].Name { + return true + } + if o[i].Name > o[j].Name { + return false + } + + if o[i].OwnerTag < o[j].OwnerTag { + return true + } + if o[i].OwnerTag > o[j].OwnerTag { + return false + } + + // Unreachable based on the rules of there not being duplicate + // environments of the same name for the same owner, but return false + // instead of panicing. + return false +} + +// ModelStatus returns a summary of the environment. +func (c *ControllerAPI) ModelStatus(req params.Entities) (params.ModelStatusResults, error) { + envs := req.Entities + results := params.ModelStatusResults{} + status := make([]params.ModelStatus, len(envs)) + for i, env := range envs { + envStatus, err := c.environStatus(env.Tag) + if err != nil { + return results, errors.Trace(err) + } + status[i] = envStatus + } + results.Results = status + return results, nil +} + +func (c *ControllerAPI) environStatus(tag string) (params.ModelStatus, error) { + var status params.ModelStatus + modelTag, err := names.ParseModelTag(tag) + if err != nil { + return status, errors.Trace(err) + } + st, err := c.state.ForModel(modelTag) + if err != nil { + return status, errors.Trace(err) + } + defer st.Close() + + machines, err := st.AllMachines() + if err != nil { + return status, errors.Trace(err) + } + + var hostedMachines []*state.Machine + for _, m := range machines { + if !m.IsManager() { + hostedMachines = append(hostedMachines, m) + } + } + + services, err := st.AllServices() + if err != nil { + return status, errors.Trace(err) + } + + env, err := st.Model() + if err != nil { + return status, errors.Trace(err) + } + if err != nil { + return status, errors.Trace(err) + } + + return params.ModelStatus{ + ModelTag: tag, + OwnerTag: env.Owner().String(), + Life: params.Life(env.Life().String()), + HostedMachineCount: len(hostedMachines), + ServiceCount: len(services), + }, nil +} + +func (o orderedBlockInfo) Swap(i, j int) { + o[i], o[j] = o[j], o[i] +} + +type orderedUserModels []params.UserModel + +func (o orderedUserModels) Len() int { + return len(o) +} + +func (o orderedUserModels) Less(i, j int) bool { + if o[i].Name < o[j].Name { + return true + } + if o[i].Name > o[j].Name { + return false + } + + if o[i].OwnerTag < o[j].OwnerTag { + return true + } + if o[i].OwnerTag > o[j].OwnerTag { + return false + } + + // Unreachable based on the rules of there not being duplicate + // environments of the same name for the same owner, but return false + // instead of panicing. + return false +} + +func (o orderedUserModels) Swap(i, j int) { + o[i], o[j] = o[j], o[i] +} === added file 'src/github.com/juju/juju/apiserver/controller/controller_test.go' --- src/github.com/juju/juju/apiserver/controller/controller_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/controller/controller_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,269 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package controller_test + +import ( + "time" + + "github.com/juju/loggo" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver" + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/controller" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/state" + "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/testing" + "github.com/juju/juju/testing/factory" +) + +type controllerSuite struct { + jujutesting.JujuConnSuite + + controller *controller.ControllerAPI + resources *common.Resources + authorizer apiservertesting.FakeAuthorizer +} + +var _ = gc.Suite(&controllerSuite{}) + +func (s *controllerSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + s.resources = common.NewResources() + s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) + + s.authorizer = apiservertesting.FakeAuthorizer{ + Tag: s.AdminUserTag(c), + } + + controller, err := controller.NewControllerAPI(s.State, s.resources, s.authorizer) + c.Assert(err, jc.ErrorIsNil) + s.controller = controller + + loggo.GetLogger("juju.apiserver.controller").SetLogLevel(loggo.TRACE) +} + +func (s *controllerSuite) TestNewAPIRefusesNonClient(c *gc.C) { + anAuthoriser := apiservertesting.FakeAuthorizer{ + Tag: names.NewUnitTag("mysql/0"), + } + endPoint, err := controller.NewControllerAPI(s.State, s.resources, anAuthoriser) + c.Assert(endPoint, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "permission denied") +} + +func (s *controllerSuite) TestNewAPIRefusesNonAdmins(c *gc.C) { + user := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) + anAuthoriser := apiservertesting.FakeAuthorizer{ + Tag: user.Tag(), + } + endPoint, err := controller.NewControllerAPI(s.State, s.resources, anAuthoriser) + c.Assert(endPoint, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "permission denied") +} + +func (s *controllerSuite) checkEnvironmentMatches(c *gc.C, env params.Model, expected *state.Model) { + c.Check(env.Name, gc.Equals, expected.Name()) + c.Check(env.UUID, gc.Equals, expected.UUID()) + c.Check(env.OwnerTag, gc.Equals, expected.Owner().String()) +} + +func (s *controllerSuite) TestAllModels(c *gc.C) { + admin := s.Factory.MakeUser(c, &factory.UserParams{Name: "foobar"}) + + s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "owned", Owner: admin.UserTag()}).Close() + remoteUserTag := names.NewUserTag("user@remote") + st := s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "user", Owner: remoteUserTag}) + defer st.Close() + st.AddModelUser(state.ModelUserSpec{ + User: admin.UserTag(), + CreatedBy: remoteUserTag, + DisplayName: "Foo Bar"}) + + s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "no-access", Owner: remoteUserTag}).Close() + + response, err := s.controller.AllModels() + c.Assert(err, jc.ErrorIsNil) + // The results are sorted. + expected := []string{"dummymodel", "no-access", "owned", "user"} + var obtained []string + for _, env := range response.UserModels { + obtained = append(obtained, env.Name) + stateEnv, err := s.State.GetModel(names.NewModelTag(env.UUID)) + c.Assert(err, jc.ErrorIsNil) + s.checkEnvironmentMatches(c, env.Model, stateEnv) + } + c.Assert(obtained, jc.DeepEquals, expected) +} + +func (s *controllerSuite) TestListBlockedModels(c *gc.C) { + st := s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "test"}) + defer st.Close() + + s.State.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyModel") + s.State.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") + st.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyModel") + st.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") + + list, err := s.controller.ListBlockedModels() + c.Assert(err, jc.ErrorIsNil) + + c.Assert(list.Models, jc.DeepEquals, []params.ModelBlockInfo{ + params.ModelBlockInfo{ + Name: "dummymodel", + UUID: s.State.ModelUUID(), + OwnerTag: s.AdminUserTag(c).String(), + Blocks: []string{ + "BlockDestroy", + "BlockChange", + }, + }, + params.ModelBlockInfo{ + Name: "test", + UUID: st.ModelUUID(), + OwnerTag: s.AdminUserTag(c).String(), + Blocks: []string{ + "BlockDestroy", + "BlockChange", + }, + }, + }) + +} + +func (s *controllerSuite) TestListBlockedModelsNoBlocks(c *gc.C) { + list, err := s.controller.ListBlockedModels() + c.Assert(err, jc.ErrorIsNil) + c.Assert(list.Models, gc.HasLen, 0) +} + +func (s *controllerSuite) TestModelConfig(c *gc.C) { + env, err := s.controller.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Config["name"], gc.Equals, "dummymodel") +} + +func (s *controllerSuite) TestModelConfigFromNonController(c *gc.C) { + st := s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "test"}) + defer st.Close() + + authorizer := &apiservertesting.FakeAuthorizer{Tag: s.AdminUserTag(c)} + controller, err := controller.NewControllerAPI(st, common.NewResources(), authorizer) + c.Assert(err, jc.ErrorIsNil) + env, err := controller.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Config["name"], gc.Equals, "dummymodel") +} + +func (s *controllerSuite) TestRemoveBlocks(c *gc.C) { + st := s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "test"}) + defer st.Close() + + s.State.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyModel") + s.State.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") + st.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyModel") + st.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") + + err := s.controller.RemoveBlocks(params.RemoveBlocksArgs{All: true}) + c.Assert(err, jc.ErrorIsNil) + + blocks, err := s.State.AllBlocksForController() + c.Assert(err, jc.ErrorIsNil) + c.Assert(blocks, gc.HasLen, 0) +} + +func (s *controllerSuite) TestRemoveBlocksNotAll(c *gc.C) { + err := s.controller.RemoveBlocks(params.RemoveBlocksArgs{}) + c.Assert(err, gc.ErrorMatches, "not supported") +} + +func (s *controllerSuite) TestWatchAllModels(c *gc.C) { + watcherId, err := s.controller.WatchAllModels() + c.Assert(err, jc.ErrorIsNil) + + watcherAPI_, err := apiserver.NewAllWatcher(s.State, s.resources, s.authorizer, watcherId.AllWatcherId) + c.Assert(err, jc.ErrorIsNil) + watcherAPI := watcherAPI_.(*apiserver.SrvAllWatcher) + defer func() { + err := watcherAPI.Stop() + c.Assert(err, jc.ErrorIsNil) + }() + + resultC := make(chan params.AllWatcherNextResults) + go func() { + result, err := watcherAPI.Next() + c.Assert(err, jc.ErrorIsNil) + resultC <- result + }() + + select { + case result := <-resultC: + // Expect to see the initial environment be reported. + deltas := result.Deltas + c.Assert(deltas, gc.HasLen, 1) + envInfo := deltas[0].Entity.(*multiwatcher.ModelInfo) + c.Assert(envInfo.ModelUUID, gc.Equals, s.State.ModelUUID()) + case <-time.After(testing.LongWait): + c.Fatal("timed out") + } +} + +func (s *controllerSuite) TestModelStatus(c *gc.C) { + otherEnvOwner := s.Factory.MakeModelUser(c, nil) + otherSt := s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "dummytoo", + Owner: otherEnvOwner.UserTag(), + Prepare: true, + ConfigAttrs: testing.Attrs{ + "controller": false, + }, + }) + defer otherSt.Close() + + s.Factory.MakeMachine(c, &factory.MachineParams{Jobs: []state.MachineJob{state.JobManageModel}}) + s.Factory.MakeMachine(c, &factory.MachineParams{Jobs: []state.MachineJob{state.JobHostUnits}}) + s.Factory.MakeService(c, &factory.ServiceParams{ + Charm: s.Factory.MakeCharm(c, nil), + }) + + otherFactory := factory.NewFactory(otherSt) + otherFactory.MakeMachine(c, nil) + otherFactory.MakeMachine(c, nil) + otherFactory.MakeService(c, &factory.ServiceParams{ + Charm: otherFactory.MakeCharm(c, nil), + }) + + controllerEnvTag := s.State.ModelTag().String() + hostedEnvTag := otherSt.ModelTag().String() + + req := params.Entities{ + Entities: []params.Entity{{Tag: controllerEnvTag}, {Tag: hostedEnvTag}}, + } + results, err := s.controller.ModelStatus(req) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.DeepEquals, []params.ModelStatus{{ + ModelTag: controllerEnvTag, + HostedMachineCount: 1, + ServiceCount: 1, + OwnerTag: "user-admin@local", + Life: params.Alive, + }, { + ModelTag: hostedEnvTag, + HostedMachineCount: 2, + ServiceCount: 1, + OwnerTag: otherEnvOwner.UserTag().String(), + Life: params.Alive, + }}) +} === added file 'src/github.com/juju/juju/apiserver/controller/destroy.go' --- src/github.com/juju/juju/apiserver/controller/destroy.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/controller/destroy.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,53 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package controller + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" +) + +// DestroyController will attempt to destroy the controller. If the args +// specify the removal of blocks or the destruction of the models, this +// method will attempt to do so. +func (s *ControllerAPI) DestroyController(args params.DestroyControllerArgs) error { + controllerEnv, err := s.state.ControllerModel() + if err != nil { + return errors.Trace(err) + } + systemTag := controllerEnv.ModelTag() + + if err = s.ensureNotBlocked(args); err != nil { + return errors.Trace(err) + } + + // If we are destroying models, we need to tolerate living + // models but set the controller to dying to prevent new + // models sneaking in. If we are not destroying hosted models, + // this will fail if any hosted models are found. + if args.DestroyModels { + return errors.Trace(common.DestroyModelIncludingHosted(s.state, systemTag)) + } + if err = common.DestroyModel(s.state, systemTag); state.IsHasHostedModelsError(err) { + err = errors.New("controller model cannot be destroyed before all other models are destroyed") + } + return errors.Trace(err) +} + +func (s *ControllerAPI) ensureNotBlocked(args params.DestroyControllerArgs) error { + // If there are blocks let the user know. + blocks, err := s.state.AllBlocksForController() + if err != nil { + logger.Debugf("Unable to get blocks for controller: %s", err) + return errors.Trace(err) + } + + if len(blocks) > 0 { + return common.OperationBlockedError("found blocks in controller models") + } + return nil +} === added file 'src/github.com/juju/juju/apiserver/controller/destroy_test.go' --- src/github.com/juju/juju/apiserver/controller/destroy_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/controller/destroy_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,169 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package controller_test + +import ( + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + commontesting "github.com/juju/juju/apiserver/common/testing" + "github.com/juju/juju/apiserver/controller" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/state" + "github.com/juju/juju/testing" + "github.com/juju/juju/testing/factory" +) + +// NOTE: the testing of the general environment destruction code +// is found in apiserver/common/environdestroy_test.go. +// +// The tests here are around the validation and behaviour of +// the flags passed in to the destroy controller call. + +type destroyControllerSuite struct { + jujutesting.JujuConnSuite + commontesting.BlockHelper + + controller *controller.ControllerAPI + + otherState *state.State + otherEnvOwner names.UserTag + otherModelUUID string +} + +var _ = gc.Suite(&destroyControllerSuite{}) + +func (s *destroyControllerSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + + s.BlockHelper = commontesting.NewBlockHelper(s.APIState) + s.AddCleanup(func(*gc.C) { s.BlockHelper.Close() }) + + resources := common.NewResources() + s.AddCleanup(func(_ *gc.C) { resources.StopAll() }) + + authoriser := apiservertesting.FakeAuthorizer{ + Tag: s.AdminUserTag(c), + } + controller, err := controller.NewControllerAPI(s.State, resources, authoriser) + c.Assert(err, jc.ErrorIsNil) + s.controller = controller + + s.otherEnvOwner = names.NewUserTag("jess@dummy") + s.otherState = factory.NewFactory(s.State).MakeModel(c, &factory.ModelParams{ + Name: "dummytoo", + Owner: s.otherEnvOwner, + Prepare: true, + ConfigAttrs: testing.Attrs{ + "controller": false, + }, + }) + s.AddCleanup(func(c *gc.C) { s.otherState.Close() }) + s.otherModelUUID = s.otherState.ModelUUID() +} + +func (s *destroyControllerSuite) TestDestroyControllerKillErrsOnHostedEnvsWithBlocks(c *gc.C) { + s.BlockDestroyModel(c, "TestBlockDestroyModel") + s.BlockRemoveObject(c, "TestBlockRemoveObject") + s.otherState.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyModel") + s.otherState.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") + + err := s.controller.DestroyController(params.DestroyControllerArgs{ + DestroyModels: true, + }) + c.Assert(err, gc.ErrorMatches, "found blocks in controller models") + + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Life(), gc.Equals, state.Alive) +} + +func (s *destroyControllerSuite) TestDestroyControllerReturnsBlockedEnvironmentsErr(c *gc.C) { + s.BlockDestroyModel(c, "TestBlockDestroyModel") + s.BlockRemoveObject(c, "TestBlockRemoveObject") + s.otherState.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyModel") + s.otherState.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") + + err := s.controller.DestroyController(params.DestroyControllerArgs{ + DestroyModels: true, + }) + c.Assert(params.IsCodeOperationBlocked(err), jc.IsTrue) + + numBlocks, err := s.State.AllBlocksForController() + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(numBlocks), gc.Equals, 4) + + _, err = s.otherState.Model() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *destroyControllerSuite) TestDestroyControllerKillsHostedEnvs(c *gc.C) { + err := s.controller.DestroyController(params.DestroyControllerArgs{ + DestroyModels: true, + }) + c.Assert(err, jc.ErrorIsNil) + + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Life(), gc.Equals, state.Dying) +} + +func (s *destroyControllerSuite) TestDestroyControllerLeavesBlocksIfNotKillAll(c *gc.C) { + s.BlockDestroyModel(c, "TestBlockDestroyModel") + s.BlockRemoveObject(c, "TestBlockRemoveObject") + s.otherState.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyModel") + s.otherState.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") + + err := s.controller.DestroyController(params.DestroyControllerArgs{}) + c.Assert(err, gc.ErrorMatches, "found blocks in controller models") + + numBlocks, err := s.State.AllBlocksForController() + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(numBlocks), gc.Equals, 4) +} + +func (s *destroyControllerSuite) TestDestroyControllerNoHostedEnvs(c *gc.C) { + err := common.DestroyModel(s.State, s.otherState.ModelTag()) + c.Assert(err, jc.ErrorIsNil) + + err = s.controller.DestroyController(params.DestroyControllerArgs{}) + c.Assert(err, jc.ErrorIsNil) + + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Life(), gc.Equals, state.Dying) +} + +func (s *destroyControllerSuite) TestDestroyControllerErrsOnNoHostedEnvsWithBlock(c *gc.C) { + err := common.DestroyModel(s.State, s.otherState.ModelTag()) + c.Assert(err, jc.ErrorIsNil) + + s.BlockDestroyModel(c, "TestBlockDestroyModel") + s.BlockRemoveObject(c, "TestBlockRemoveObject") + + err = s.controller.DestroyController(params.DestroyControllerArgs{}) + c.Assert(err, gc.ErrorMatches, "found blocks in controller models") + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Life(), gc.Equals, state.Alive) +} + +func (s *destroyControllerSuite) TestDestroyControllerNoHostedEnvsWithBlockFail(c *gc.C) { + err := common.DestroyModel(s.State, s.otherState.ModelTag()) + c.Assert(err, jc.ErrorIsNil) + + s.BlockDestroyModel(c, "TestBlockDestroyModel") + s.BlockRemoveObject(c, "TestBlockRemoveObject") + + err = s.controller.DestroyController(params.DestroyControllerArgs{}) + c.Assert(params.IsCodeOperationBlocked(err), jc.IsTrue) + + numBlocks, err := s.State.AllBlocksForController() + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(numBlocks), gc.Equals, 2) +} === added file 'src/github.com/juju/juju/apiserver/controller/package_test.go' --- src/github.com/juju/juju/apiserver/controller/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/controller/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package controller_test + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + testing.MgoTestPackage(t) +} === modified file 'src/github.com/juju/juju/apiserver/debuglog.go' --- src/github.com/juju/juju/apiserver/debuglog.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/debuglog.go 2016-03-22 15:18:22 +0000 @@ -4,8 +4,6 @@ package apiserver import ( - "encoding/json" - "fmt" "io" "net" "net/http" @@ -17,6 +15,7 @@ "github.com/juju/loggo" "golang.org/x/net/websocket" + "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" ) @@ -27,8 +26,7 @@ // variants. The supplied handle func allows for varied handling of // requests. type debugLogHandler struct { - httpHandler - stop <-chan struct{} + ctxt httpContext handle debugLogHandlerFunc } @@ -40,14 +38,12 @@ ) error func newDebugLogHandler( - statePool *state.StatePool, - stop <-chan struct{}, + ctxt httpContext, handle debugLogHandlerFunc, ) *debugLogHandler { return &debugLogHandler{ - httpHandler: httpHandler{statePool: statePool}, - stop: stop, - handle: handle, + ctxt: ctxt, + handle: handle, } } @@ -69,6 +65,8 @@ // - has no meaning if 'replay' is true // level -> string one of [TRACE, DEBUG, INFO, WARNING, ERROR] // replay -> string - one of [true, false], if true, start the file from the start +// noTail -> string - one of [true, false], if true, existing logs are sent back, +// - but the command does not wait for new ones. func (h *debugLogHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { server := websocket.Server{ Handler: func(conn *websocket.Conn) { @@ -79,30 +77,26 @@ // Validate before authenticate because the authentication is // dependent on the state connection that is determined during the // validation. - stateWrapper, err := h.validateEnvironUUID(req) + st, _, err := h.ctxt.stateForRequestAuthenticatedUser(req) if err != nil { socket.sendError(err) return } - if err := stateWrapper.authenticateUser(req); err != nil { - socket.sendError(fmt.Errorf("auth failed: %v", err)) - return - } - params, err := readDebugLogParams(req.URL.Query()) if err != nil { socket.sendError(err) return } - if err := h.handle(stateWrapper.state, params, socket, h.stop); err != nil { + if err := h.handle(st, params, socket, h.ctxt.stop()); err != nil { if isBrokenPipe(err) { logger.Tracef("debug-log handler stopped (client disconnected)") } else { logger.Errorf("debug-log handler error: %v", err) } } - }} + }, + } server.ServeHTTP(w, req) } @@ -120,10 +114,10 @@ io.Writer // sendOk sends a nil error response, indicating there were no errors. - sendOk() error + sendOk() // sendError sends a JSON-encoded error response. - sendError(err error) error + sendError(err error) } // debugLogSocketImpl implements the debugLogSocket interface. It @@ -133,32 +127,23 @@ *websocket.Conn } -// sendOK implements debugLogSocket. -func (s *debugLogSocketImpl) sendOk() error { - return s.sendError(nil) +// sendOk implements debugLogSocket. +func (s *debugLogSocketImpl) sendOk() { + s.sendError(nil) } -// sendErr implements debugLogSocket. -func (s *debugLogSocketImpl) sendError(err error) error { - response := ¶ms.ErrorResult{} - if err != nil { - response.Error = ¶ms.Error{Message: fmt.Sprint(err)} - } - message, err := json.Marshal(response) - if err != nil { - // If we are having trouble marshalling the error, we are in big trouble. - logger.Errorf("failure to marshal SimpleError: %v", err) - return err - } - message = append(message, []byte("\n")...) - _, err = s.Conn.Write(message) - return err +// sendError implements debugLogSocket. +func (s *debugLogSocketImpl) sendError(err error) { + sendJSON(s.Conn, ¶ms.ErrorResult{ + Error: common.ServerError(err), + }) } // debugLogParams contains the parsed debuglog API request parameters. type debugLogParams struct { maxLines uint fromTheStart bool + noTail bool backlog uint filterLevel loggo.Level includeEntity []string @@ -186,6 +171,14 @@ params.fromTheStart = replay } + if value := queryMap.Get("noTail"); value != "" { + noTail, err := strconv.ParseBool(value) + if err != nil { + return nil, errors.Errorf("noTail value %q is not a valid boolean", value) + } + params.noTail = noTail + } + if value := queryMap.Get("backlog"); value != "" { num, err := strconv.ParseUint(value, 10, 64) if err != nil { === modified file 'src/github.com/juju/juju/apiserver/debuglog_db.go' --- src/github.com/juju/juju/apiserver/debuglog_db.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/debuglog_db.go 2016-03-22 15:18:22 +0000 @@ -13,8 +13,8 @@ "github.com/juju/juju/state" ) -func newDebugLogDBHandler(statePool *state.StatePool, stop <-chan struct{}) http.Handler { - return newDebugLogHandler(statePool, stop, handleDebugLogDBRequest) +func newDebugLogDBHandler(ctxt httpContext) http.Handler { + return newDebugLogHandler(ctxt, handleDebugLogDBRequest) } func handleDebugLogDBRequest( @@ -28,9 +28,7 @@ defer tailer.Stop() // Indicate that all is well. - if err := socket.sendOk(); err != nil { - return errors.Trace(err) - } + socket.sendOk() var lineCount uint for { @@ -54,13 +52,12 @@ } } } - - return nil } func makeLogTailerParams(reqParams *debugLogParams) *state.LogTailerParams { params := &state.LogTailerParams{ MinLevel: reqParams.filterLevel, + NoTail: reqParams.noTail, InitialLines: int(reqParams.backlog), IncludeEntity: reqParams.includeEntity, ExcludeEntity: reqParams.excludeEntity, === modified file 'src/github.com/juju/juju/apiserver/debuglog_db_internal_test.go' --- src/github.com/juju/juju/apiserver/debuglog_db_internal_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/debuglog_db_internal_test.go 2016-03-22 15:18:22 +0000 @@ -30,6 +30,7 @@ func (s *debugLogDBIntSuite) TestParamConversion(c *gc.C) { reqParams := &debugLogParams{ fromTheStart: false, + noTail: true, backlog: 11, filterLevel: loggo.INFO, includeEntity: []string{"foo"}, @@ -45,7 +46,7 @@ // Start time will be used once the client is extended to send // time range arguments. c.Assert(params.StartTime.IsZero(), jc.IsTrue) - + c.Assert(params.NoTail, jc.IsTrue) c.Assert(params.MinLevel, gc.Equals, loggo.INFO) c.Assert(params.InitialLines, gc.Equals, 11) c.Assert(params.IncludeEntity, jc.DeepEquals, []string{"foo"}) @@ -234,14 +235,12 @@ writes chan string } -func (s *fakeDebugLogSocket) sendOk() error { +func (s *fakeDebugLogSocket) sendOk() { s.writes <- "ok" - return nil } -func (s *fakeDebugLogSocket) sendError(err error) error { +func (s *fakeDebugLogSocket) sendError(err error) { s.writes <- fmt.Sprintf("err: %v", err) - return nil } func (s *fakeDebugLogSocket) Write(buf []byte) (int, error) { === modified file 'src/github.com/juju/juju/apiserver/debuglog_db_test.go' --- src/github.com/juju/juju/apiserver/debuglog_db_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/debuglog_db_test.go 2016-03-22 15:18:22 +0000 @@ -14,10 +14,5 @@ var _ = gc.Suite(&debugLogDBSuite{}) -func (s *debugLogDBSuite) SetUpSuite(c *gc.C) { - s.SetInitialFeatureFlags("db-log") - s.debugLogBaseSuite.SetUpSuite(c) -} - // See debuglog_db_internal_test.go for DB specific unit tests and the // featuretests package for an end-to-end integration test. === removed file 'src/github.com/juju/juju/apiserver/debuglog_file.go' --- src/github.com/juju/juju/apiserver/debuglog_file.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/debuglog_file.go 1970-01-01 00:00:00 +0000 @@ -1,253 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package apiserver - -import ( - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/juju/juju/state" - "github.com/juju/loggo" - "github.com/juju/names" - "github.com/juju/utils/tailer" -) - -func newDebugLogFileHandler(statePool *state.StatePool, stop <-chan struct{}, logDir string) http.Handler { - fileHandler := &debugLogFileHandler{logDir: logDir} - return newDebugLogHandler(statePool, stop, fileHandler.handle) -} - -// debugLogFileHandler handles requests to watch all-machines.log. -type debugLogFileHandler struct { - logDir string -} - -func (h *debugLogFileHandler) handle( - _ state.LoggingState, - params *debugLogParams, - socket debugLogSocket, - stop <-chan struct{}, -) error { - stream := newLogFileStream(params) - - // Open log file. - logLocation := filepath.Join(h.logDir, "all-machines.log") - logFile, err := os.Open(logLocation) - if err != nil { - socket.sendError(fmt.Errorf("cannot open log file: %v", err)) - return err - } - defer logFile.Close() - - if err := stream.positionLogFile(logFile); err != nil { - socket.sendError(fmt.Errorf("cannot position log file: %v", err)) - return err - } - - // If we get to here, no more errors to report. - if err := socket.sendOk(); err != nil { - return err - } - - stream.start(logFile, socket) - return stream.wait(stop) -} - -func newLogFileStream(params *debugLogParams) *logFileStream { - return &logFileStream{ - debugLogParams: params, - maxLinesReached: make(chan bool), - } -} - -type logFileLine struct { - line string - agentTag string - agentName string - level loggo.Level - module string -} - -func parseLogLine(line string) *logFileLine { - const ( - agentTagIndex = 0 - levelIndex = 3 - moduleIndex = 4 - ) - fields := strings.Fields(line) - result := &logFileLine{ - line: line, - } - if len(fields) > agentTagIndex { - agentTag := fields[agentTagIndex] - // Drop mandatory trailing colon (:). - // Since colon is mandatory, agentTag without it is invalid and will be empty (""). - if strings.HasSuffix(agentTag, ":") { - result.agentTag = agentTag[:len(agentTag)-1] - } - /* - Drop unit suffix. - In logs, unit information may be prefixed with either a unit_tag by itself or a unit_tag[nnnn]. - The code below caters for both scenarios. - */ - if bracketIndex := strings.Index(agentTag, "["); bracketIndex != -1 { - result.agentTag = agentTag[:bracketIndex] - } - // If, at this stage, result.agentTag is empty, we could not deduce the tag. No point getting the name... - if result.agentTag != "" { - // Entity Name deduced from entity tag - entityTag, err := names.ParseTag(result.agentTag) - if err != nil { - /* - Logging error but effectively swallowing it as there is no where to propogate. - We don't expect ParseTag to fail since the tag was generated by juju in the first place. - */ - logger.Errorf("Could not deduce name from tag %q: %v\n", result.agentTag, err) - } - result.agentName = entityTag.Id() - } - } - if len(fields) > moduleIndex { - if level, valid := loggo.ParseLevel(fields[levelIndex]); valid { - result.level = level - result.module = fields[moduleIndex] - } - } - - return result -} - -// logFileStream runs the tailer to read a log file and stream it via -// a web socket. -type logFileStream struct { - *debugLogParams - logTailer *tailer.Tailer - lineCount uint - maxLinesReached chan bool -} - -// positionLogFile will update the internal read position of the logFile to be -// at the end of the file or somewhere in the middle if backlog has been specified. -func (stream *logFileStream) positionLogFile(logFile io.ReadSeeker) error { - // Seek to the end, or lines back from the end if we need to. - if !stream.fromTheStart { - return tailer.SeekLastLines(logFile, stream.backlog, stream.filterLine) - } - return nil -} - -// start the tailer listening to the logFile, and sending the matching -// lines to the writer. -func (stream *logFileStream) start(logFile io.ReadSeeker, writer io.Writer) { - stream.logTailer = tailer.NewTailer(logFile, writer, stream.countedFilterLine) -} - -// wait blocks until the logTailer is done or the maximum line count -// has been reached or the stop channel is closed. -func (stream *logFileStream) wait(stop <-chan struct{}) error { - select { - case <-stream.logTailer.Dead(): - return stream.logTailer.Err() - case <-stream.maxLinesReached: - stream.logTailer.Stop() - case <-stop: - stream.logTailer.Stop() - } - return nil -} - -// filterLine checks the received line for one of the configured tags. -func (stream *logFileStream) filterLine(line []byte) bool { - log := parseLogLine(string(line)) - return stream.checkIncludeEntity(log) && - stream.checkIncludeModule(log) && - !stream.exclude(log) && - stream.checkLevel(log) -} - -// countedFilterLine checks the received line for one of the configured tags, -// and also checks to make sure the stream doesn't send more than the -// specified number of lines. -func (stream *logFileStream) countedFilterLine(line []byte) bool { - result := stream.filterLine(line) - if result && stream.maxLines > 0 { - stream.lineCount++ - result = stream.lineCount <= stream.maxLines - if stream.lineCount == stream.maxLines { - close(stream.maxLinesReached) - } - } - return result -} - -func (stream *logFileStream) checkIncludeEntity(line *logFileLine) bool { - if len(stream.includeEntity) == 0 { - return true - } - for _, value := range stream.includeEntity { - if agentMatchesFilter(line, value) { - return true - } - } - return false -} - -// agentMatchesFilter checks if agentTag tag or agentTag name match given filter -func agentMatchesFilter(line *logFileLine, aFilter string) bool { - return hasMatch(line.agentName, aFilter) || hasMatch(line.agentTag, aFilter) -} - -// hasMatch determines if value contains filter using regular expressions. -// All wildcard occurrences are changed to `.*` -// Currently, all match exceptions are logged and not propagated. -func hasMatch(value, aFilter string) bool { - /* Special handling: out of 12 regexp metacharacters \^$.|?+()[*{ - only asterix (*) can be legally used as a wildcard in this context. - Both machine and unit tag and name specifications do not allow any other metas. - Consequently, if aFilter contains wildcard (*), do not escape it - - transform it into a regexp "any character(s)" sequence. - */ - aFilter = strings.Replace(aFilter, "*", `.*`, -1) - matches, err := regexp.MatchString("^"+aFilter+"$", value) - if err != nil { - // logging errors here... but really should they be swallowed? - logger.Errorf("\nCould not match filter %q and regular expression %q\n.%v\n", value, aFilter, err) - } - return matches -} - -func (stream *logFileStream) checkIncludeModule(line *logFileLine) bool { - if len(stream.includeModule) == 0 { - return true - } - for _, value := range stream.includeModule { - if strings.HasPrefix(line.module, value) { - return true - } - } - return false -} - -func (stream *logFileStream) exclude(line *logFileLine) bool { - for _, value := range stream.excludeEntity { - if agentMatchesFilter(line, value) { - return true - } - } - for _, value := range stream.excludeModule { - if strings.HasPrefix(line.module, value) { - return true - } - } - return false -} - -func (stream *logFileStream) checkLevel(line *logFileLine) bool { - return line.level >= stream.filterLevel -} === removed file 'src/github.com/juju/juju/apiserver/debuglog_file_internal_test.go' --- src/github.com/juju/juju/apiserver/debuglog_file_internal_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/debuglog_file_internal_test.go 1970-01-01 00:00:00 +0000 @@ -1,459 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// This is an internal package test. - -package apiserver - -import ( - "bytes" - "net/url" - "os" - "path/filepath" - "time" - - "github.com/juju/loggo" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/testing" -) - -type debugLogFileIntSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&debugLogFileIntSuite{}) - -func (s *debugLogFileIntSuite) TestParseLogLine(c *gc.C) { - line := "machine-0: 2014-03-24 22:34:25 INFO juju.cmd.jujud machine.go:127 machine agent machine-0 start (1.17.7.1-trusty-amd64 [gc])" - logLine := parseLogLine(line) - c.Assert(logLine.line, gc.Equals, line) - c.Assert(logLine.agentTag, gc.Equals, "machine-0") - c.Assert(logLine.level, gc.Equals, loggo.INFO) - c.Assert(logLine.module, gc.Equals, "juju.cmd.jujud") -} - -func (s *debugLogFileIntSuite) TestParseLogLineMachineMultiline(c *gc.C) { - line := "machine-1: continuation line" - logLine := parseLogLine(line) - c.Assert(logLine.line, gc.Equals, line) - c.Assert(logLine.agentTag, gc.Equals, "machine-1") - c.Assert(logLine.level, gc.Equals, loggo.UNSPECIFIED) - c.Assert(logLine.module, gc.Equals, "") -} - -func (s *debugLogFileIntSuite) TestParseLogLineInvalid(c *gc.C) { - line := "not a full line" - logLine := parseLogLine(line) - c.Assert(logLine.line, gc.Equals, line) - c.Assert(logLine.agentTag, gc.Equals, "") - c.Assert(logLine.level, gc.Equals, loggo.UNSPECIFIED) - c.Assert(logLine.module, gc.Equals, "") -} - -func checkLevel(logValue, streamValue loggo.Level) bool { - line := &logFileLine{level: logValue} - params := debugLogParams{} - if streamValue != loggo.UNSPECIFIED { - params.filterLevel = streamValue - } - return newLogFileStream(¶ms).checkLevel(line) -} - -func (s *debugLogFileIntSuite) TestCheckLevel(c *gc.C) { - c.Check(checkLevel(loggo.UNSPECIFIED, loggo.UNSPECIFIED), jc.IsTrue) - c.Check(checkLevel(loggo.TRACE, loggo.UNSPECIFIED), jc.IsTrue) - c.Check(checkLevel(loggo.DEBUG, loggo.UNSPECIFIED), jc.IsTrue) - c.Check(checkLevel(loggo.INFO, loggo.UNSPECIFIED), jc.IsTrue) - c.Check(checkLevel(loggo.WARNING, loggo.UNSPECIFIED), jc.IsTrue) - c.Check(checkLevel(loggo.ERROR, loggo.UNSPECIFIED), jc.IsTrue) - c.Check(checkLevel(loggo.CRITICAL, loggo.UNSPECIFIED), jc.IsTrue) - - c.Check(checkLevel(loggo.UNSPECIFIED, loggo.TRACE), jc.IsFalse) - c.Check(checkLevel(loggo.TRACE, loggo.TRACE), jc.IsTrue) - c.Check(checkLevel(loggo.DEBUG, loggo.TRACE), jc.IsTrue) - c.Check(checkLevel(loggo.INFO, loggo.TRACE), jc.IsTrue) - c.Check(checkLevel(loggo.WARNING, loggo.TRACE), jc.IsTrue) - c.Check(checkLevel(loggo.ERROR, loggo.TRACE), jc.IsTrue) - c.Check(checkLevel(loggo.CRITICAL, loggo.TRACE), jc.IsTrue) - - c.Check(checkLevel(loggo.UNSPECIFIED, loggo.INFO), jc.IsFalse) - c.Check(checkLevel(loggo.TRACE, loggo.INFO), jc.IsFalse) - c.Check(checkLevel(loggo.DEBUG, loggo.INFO), jc.IsFalse) - c.Check(checkLevel(loggo.INFO, loggo.INFO), jc.IsTrue) - c.Check(checkLevel(loggo.WARNING, loggo.INFO), jc.IsTrue) - c.Check(checkLevel(loggo.ERROR, loggo.INFO), jc.IsTrue) - c.Check(checkLevel(loggo.CRITICAL, loggo.INFO), jc.IsTrue) -} - -func checkIncludeEntity(logValue string, agent ...string) bool { - stream := newLogFileStream(&debugLogParams{ - includeEntity: agent, - }) - line := &logFileLine{agentTag: logValue} - return stream.checkIncludeEntity(line) -} - -func (s *debugLogFileIntSuite) TestCheckIncludeEntity(c *gc.C) { - c.Check(checkIncludeEntity("machine-0"), jc.IsTrue) - c.Check(checkIncludeEntity("machine-0", "machine-0"), jc.IsTrue) - c.Check(checkIncludeEntity("machine-1", "machine-0"), jc.IsFalse) - c.Check(checkIncludeEntity("machine-1", "machine-0", "machine-1"), jc.IsTrue) - c.Check(checkIncludeEntity("machine-0-lxc-0", "machine-0"), jc.IsFalse) - c.Check(checkIncludeEntity("machine-0-lxc-0", "machine-0*"), jc.IsTrue) - c.Check(checkIncludeEntity("machine-0-lxc-0", "machine-0-lxc-*"), jc.IsTrue) -} - -func checkIncludeModule(logValue string, module ...string) bool { - stream := newLogFileStream(&debugLogParams{ - includeModule: module, - }) - line := &logFileLine{module: logValue} - return stream.checkIncludeModule(line) -} - -func (s *debugLogFileIntSuite) TestCheckIncludeModule(c *gc.C) { - c.Check(checkIncludeModule("juju"), jc.IsTrue) - c.Check(checkIncludeModule("juju", "juju"), jc.IsTrue) - c.Check(checkIncludeModule("juju", "juju.environ"), jc.IsFalse) - c.Check(checkIncludeModule("juju.provisioner", "juju"), jc.IsTrue) - c.Check(checkIncludeModule("juju.provisioner", "juju*"), jc.IsFalse) - c.Check(checkIncludeModule("juju.provisioner", "juju.environ"), jc.IsFalse) - c.Check(checkIncludeModule("unit.mysql/1", "juju", "unit"), jc.IsTrue) -} - -func checkExcludeEntity(logValue string, agent ...string) bool { - stream := newLogFileStream(&debugLogParams{ - excludeEntity: agent, - }) - line := &logFileLine{agentTag: logValue} - return stream.exclude(line) -} - -func (s *debugLogFileIntSuite) TestCheckExcludeEntity(c *gc.C) { - c.Check(checkExcludeEntity("machine-0"), jc.IsFalse) - c.Check(checkExcludeEntity("machine-0", "machine-0"), jc.IsTrue) - c.Check(checkExcludeEntity("machine-1", "machine-0"), jc.IsFalse) - c.Check(checkExcludeEntity("machine-1", "machine-0", "machine-1"), jc.IsTrue) - c.Check(checkExcludeEntity("machine-0-lxc-0", "machine-0"), jc.IsFalse) - c.Check(checkExcludeEntity("machine-0-lxc-0", "machine-0*"), jc.IsTrue) - c.Check(checkExcludeEntity("machine-0-lxc-0", "machine-0-lxc-*"), jc.IsTrue) -} - -func checkExcludeModule(logValue string, module ...string) bool { - stream := newLogFileStream(&debugLogParams{ - excludeModule: module, - }) - line := &logFileLine{module: logValue} - return stream.exclude(line) -} - -func (s *debugLogFileIntSuite) TestCheckExcludeModule(c *gc.C) { - c.Check(checkExcludeModule("juju"), jc.IsFalse) - c.Check(checkExcludeModule("juju", "juju"), jc.IsTrue) - c.Check(checkExcludeModule("juju", "juju.environ"), jc.IsFalse) - c.Check(checkExcludeModule("juju.provisioner", "juju"), jc.IsTrue) - c.Check(checkExcludeModule("juju.provisioner", "juju*"), jc.IsFalse) - c.Check(checkExcludeModule("juju.provisioner", "juju.environ"), jc.IsFalse) - c.Check(checkExcludeModule("unit.mysql/1", "juju", "unit"), jc.IsTrue) -} - -func (s *debugLogFileIntSuite) TestFilterLine(c *gc.C) { - stream := newLogFileStream(&debugLogParams{ - filterLevel: loggo.INFO, - includeEntity: []string{"machine-0", "unit-mysql*"}, - includeModule: []string{"juju"}, - excludeEntity: []string{"unit-mysql-2"}, - excludeModule: []string{"juju.foo"}, - }) - c.Check(stream.filterLine([]byte( - "machine-0: date time WARNING juju")), jc.IsTrue) - c.Check(stream.filterLine([]byte( - "machine-1: date time WARNING juju")), jc.IsFalse) - c.Check(stream.filterLine([]byte( - "unit-mysql-0: date time WARNING juju")), jc.IsTrue) - c.Check(stream.filterLine([]byte( - "unit-mysql-1: date time WARNING juju")), jc.IsTrue) - c.Check(stream.filterLine([]byte( - "unit-mysql-2: date time WARNING juju")), jc.IsFalse) - c.Check(stream.filterLine([]byte( - "unit-wordpress-0: date time WARNING juju")), jc.IsFalse) - c.Check(stream.filterLine([]byte( - "machine-0: date time DEBUG juju")), jc.IsFalse) - c.Check(stream.filterLine([]byte( - "machine-0: date time WARNING juju.foo.bar")), jc.IsFalse) -} - -func (s *debugLogFileIntSuite) TestCountedFilterLineWithLimit(c *gc.C) { - stream := newLogFileStream(&debugLogParams{ - filterLevel: loggo.INFO, - maxLines: 5, - }) - line := []byte("machine-0: date time WARNING juju") - c.Check(stream.countedFilterLine(line), jc.IsTrue) - c.Check(stream.countedFilterLine(line), jc.IsTrue) - c.Check(stream.countedFilterLine(line), jc.IsTrue) - c.Check(stream.countedFilterLine(line), jc.IsTrue) - c.Check(stream.countedFilterLine(line), jc.IsTrue) - c.Check(stream.countedFilterLine(line), jc.IsFalse) - c.Check(stream.countedFilterLine(line), jc.IsFalse) -} - -type chanWriter struct { - ch chan []byte -} - -func (w *chanWriter) Write(buf []byte) (n int, err error) { - bufcopy := append([]byte{}, buf...) - w.ch <- bufcopy - return len(buf), nil -} - -func (s *debugLogFileIntSuite) testStreamInternal(c *gc.C, fromTheStart bool, backlog, maxLines uint, expected, errMatch string) { - - dir := c.MkDir() - logPath := filepath.Join(dir, "logfile.txt") - logFile, err := os.Create(logPath) - c.Assert(err, jc.ErrorIsNil) - defer logFile.Close() - logFileReader, err := os.Open(logPath) - c.Assert(err, jc.ErrorIsNil) - defer logFileReader.Close() - - logFile.WriteString(`line 1 -line 2 -line 3 -`) - - stream := newLogFileStream(&debugLogParams{ - fromTheStart: fromTheStart, - backlog: backlog, - maxLines: maxLines, - }) - err = stream.positionLogFile(logFileReader) - c.Assert(err, jc.ErrorIsNil) - var output bytes.Buffer - writer := &chanWriter{make(chan []byte)} - stream.start(logFileReader, writer) - defer stream.logTailer.Stop() - - logFile.WriteString("line 4\n") - logFile.WriteString("line 5\n") - - timeout := time.After(testing.LongWait) - for output.String() != expected { - select { - case buf := <-writer.ch: - output.Write(buf) - case <-timeout: - c.Fatalf("expected data didn't arrive:\n\tobtained: %#v\n\texpected: %#v", output.String(), expected) - } - } - - stream.logTailer.Stop() - - err = stream.wait(nil) - if errMatch == "" { - c.Assert(err, jc.ErrorIsNil) - } else { - c.Assert(err, gc.ErrorMatches, errMatch) - } -} - -func (s *debugLogFileIntSuite) TestLogStreamLoopFromTheStart(c *gc.C) { - expected := `line 1 -line 2 -line 3 -line 4 -line 5 -` - s.testStreamInternal(c, true, 0, 0, expected, "") -} - -func (s *debugLogFileIntSuite) TestLogStreamLoopFromTheStartMaxLines(c *gc.C) { - expected := `line 1 -line 2 -line 3 -` - s.testStreamInternal(c, true, 0, 3, expected, "") -} - -func (s *debugLogFileIntSuite) TestLogStreamLoopJustTail(c *gc.C) { - expected := `line 4 -line 5 -` - s.testStreamInternal(c, false, 0, 0, expected, "") -} - -func (s *debugLogFileIntSuite) TestLogStreamLoopBackOneLimitTwo(c *gc.C) { - expected := `line 3 -line 4 -` - s.testStreamInternal(c, false, 1, 2, expected, "") -} - -func (s *debugLogFileIntSuite) TestLogStreamLoopTailMaxLinesNotYetReached(c *gc.C) { - expected := `line 4 -line 5 -` - s.testStreamInternal(c, false, 0, 3, expected, "") -} - -func assertStreamParams(c *gc.C, obtained, expected *logFileStream) { - c.Check(obtained.includeEntity, jc.DeepEquals, expected.includeEntity) - c.Check(obtained.includeModule, jc.DeepEquals, expected.includeModule) - c.Check(obtained.excludeEntity, jc.DeepEquals, expected.excludeEntity) - c.Check(obtained.excludeModule, jc.DeepEquals, expected.excludeModule) - c.Check(obtained.maxLines, gc.Equals, expected.maxLines) - c.Check(obtained.fromTheStart, gc.Equals, expected.fromTheStart) - c.Check(obtained.filterLevel, gc.Equals, expected.filterLevel) - c.Check(obtained.backlog, gc.Equals, expected.backlog) -} - -func (s *debugLogFileIntSuite) TestNewLogStream(c *gc.C) { - params, err := readDebugLogParams(url.Values{ - "includeEntity": []string{"machine-1*", "machine-2"}, - "includeModule": []string{"juju", "unit"}, - "excludeEntity": []string{"machine-1-lxc*"}, - "excludeModule": []string{"juju.provisioner"}, - "maxLines": []string{"300"}, - "backlog": []string{"100"}, - "level": []string{"INFO"}, - // OK, just a little nonsense - "replay": []string{"true"}, - }) - c.Assert(err, jc.ErrorIsNil) - - assertStreamParams(c, newLogFileStream(params), &logFileStream{ - debugLogParams: &debugLogParams{ - includeEntity: []string{"machine-1*", "machine-2"}, - includeModule: []string{"juju", "unit"}, - excludeEntity: []string{"machine-1-lxc*"}, - excludeModule: []string{"juju.provisioner"}, - maxLines: 300, - backlog: 100, - filterLevel: loggo.INFO, - fromTheStart: true, - }, - }) -} - -func (s *debugLogFileIntSuite) TestParamErrors(c *gc.C) { - - _, err := readDebugLogParams(url.Values{"maxLines": []string{"foo"}}) - c.Assert(err, gc.ErrorMatches, `maxLines value "foo" is not a valid unsigned number`) - - _, err = readDebugLogParams(url.Values{"backlog": []string{"foo"}}) - c.Assert(err, gc.ErrorMatches, `backlog value "foo" is not a valid unsigned number`) - - _, err = readDebugLogParams(url.Values{"replay": []string{"foo"}}) - c.Assert(err, gc.ErrorMatches, `replay value "foo" is not a valid boolean`) - - _, err = readDebugLogParams(url.Values{"level": []string{"foo"}}) - c.Assert(err, gc.ErrorMatches, `level value "foo" is not one of "TRACE", "DEBUG", "INFO", "WARNING", "ERROR"`) -} - -type agentMatchTest struct { - about string - line string - filter string - expected bool -} - -var agentMatchTests []agentMatchTest = []agentMatchTest{ - { - about: "Matching with wildcard - match everything", - line: "machine-1: sdscsc", - filter: "*", - expected: true, - }, { - about: "Matching with wildcard as suffix - match machine tag...", - line: "machine-1: sdscsc", - filter: "mach*", - expected: true, - }, { - about: "Matching with wildcard as prefix - match machine tag...", - line: "machine-1: sdscsc", - filter: "*ch*", - expected: true, - }, { - about: "Matching with wildcard in the middle - match machine tag...", - line: "machine-1: sdscsc", - filter: "mach*1", - expected: true, - }, { - about: "Matching with wildcard - match machine name", - line: "machine-1: sdscsc", - filter: "1*", - expected: true, - }, { - about: "Matching exact machine name", - line: "machine-1: sdscsc", - filter: "2", - expected: false, - }, { - about: "Matching invalid filter", - line: "machine-1: sdscsc", - filter: "my-service", - expected: false, - }, { - about: "Matching exact machine tag", - line: "machine-1: sdscsc", - filter: "machine-1", - expected: true, - }, { - about: "Matching exact machine tag = not equal", - line: "machine-1: sdscsc", - filter: "machine-3", - expected: false, - }, { - about: "Matching with wildcard - match unit tag...", - line: "unit-ubuntu-1: sdscsc", - filter: "un*", - expected: true, - }, { - about: "Matching with wildcard - match unit name", - line: "unit-ubuntu-1: sdscsc", - filter: "ubuntu*", - expected: true, - }, { - about: "Matching exact unit name", - line: "unit-ubuntu-1: sdscsc", - filter: "ubuntu/2", - expected: false, - }, { - about: "Matching exact unit tag", - line: "unit-ubuntu-1: sdscsc", - filter: "unit-ubuntu-1", - expected: true, - }, { - about: "Matching exact unit tag = not equal", - line: "unit-ubuntu-2: sdscsc", - filter: "unit-ubuntu-1", - expected: false, - }, -} - -// TestAgentMatchesFilter tests that line agent matches desired filter as expected -func (s *debugLogFileIntSuite) TestAgentMatchesFilter(c *gc.C) { - for i, test := range agentMatchTests { - c.Logf("test %d: %v\n", i, test.about) - matched := AgentMatchesFilter(ParseLogLine(test.line), test.filter) - c.Assert(matched, gc.Equals, test.expected) - } -} - -// TestAgentLineFragmentParsing tests that agent tag and name are parsed correctly from log line -func (s *debugLogFileIntSuite) TestAgentLineFragmentParsing(c *gc.C) { - checkAgentParsing(c, "Drop trailing colon", "machine-1: sdscsc", "machine-1", "1") - checkAgentParsing(c, "Drop unit specific [", "unit-ubuntu-1[blah777787]: scscdcdc", "unit-ubuntu-1", "ubuntu/1") - checkAgentParsing(c, "No colon in log line - invalid", "unit-ubuntu-1 scscdcdc", "", "") -} - -func checkAgentParsing(c *gc.C, about, line, tag, name string) { - c.Logf("test %q\n", about) - logLine := ParseLogLine(line) - c.Assert(logLine.LogLineAgentTag(), gc.Equals, tag) - c.Assert(logLine.LogLineAgentName(), gc.Equals, name) -} === removed file 'src/github.com/juju/juju/apiserver/debuglog_file_test.go' --- src/github.com/juju/juju/apiserver/debuglog_file_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/debuglog_file_test.go 1970-01-01 00:00:00 +0000 @@ -1,377 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package apiserver_test - -import ( - "bufio" - "fmt" - "net/url" - "os" - "path/filepath" - "strings" - - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - gc "gopkg.in/check.v1" -) - -type debugLogFileSuite struct { - debugLogBaseSuite - logFile *os.File - last int -} - -var _ = gc.Suite(&debugLogFileSuite{}) - -func (s *debugLogFileSuite) TestNoLogfile(c *gc.C) { - reader := s.openWebsocket(c, nil) - assertJSONError(c, reader, "cannot open log file: .*: "+utils.NoSuchFileErrRegexp) - s.assertWebsocketClosed(c, reader) -} - -func (s *debugLogFileSuite) assertLogReader(c *gc.C, reader *bufio.Reader) { - s.assertLogFollowing(c, reader) - s.writeLogLines(c, logLineCount) - - linesRead := s.readLogLines(c, reader, logLineCount) - c.Assert(linesRead, jc.DeepEquals, logLines) -} - -func (s *debugLogFileSuite) TestServesLog(c *gc.C) { - s.ensureLogFile(c) - reader := s.openWebsocket(c, nil) - s.assertLogReader(c, reader) -} - -func (s *debugLogFileSuite) TestReadFromTopLevelPath(c *gc.C) { - // Backwards compatibility check, that we can read the log file at - // https://host:port/log - s.ensureLogFile(c) - reader := s.openWebsocketCustomPath(c, "/log") - s.assertLogReader(c, reader) -} - -func (s *debugLogFileSuite) TestReadFromEnvUUIDPath(c *gc.C) { - // Check that we can read the log at https://host:port/ENVUUID/log - environ, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - s.ensureLogFile(c) - reader := s.openWebsocketCustomPath(c, fmt.Sprintf("/environment/%s/log", environ.UUID())) - s.assertLogReader(c, reader) -} - -func (s *debugLogFileSuite) TestReadRejectsWrongEnvUUIDPath(c *gc.C) { - // Check that we cannot pull logs from https://host:port/BADENVUUID/log - s.ensureLogFile(c) - reader := s.openWebsocketCustomPath(c, "/environment/dead-beef-123456/log") - assertJSONError(c, reader, `unknown environment: "dead-beef-123456"`) - s.assertWebsocketClosed(c, reader) -} - -func (s *debugLogFileSuite) TestReadsFromEnd(c *gc.C) { - s.writeLogLines(c, 10) - - reader := s.openWebsocket(c, nil) - s.assertLogFollowing(c, reader) - s.writeLogLines(c, logLineCount) - - linesRead := s.readLogLines(c, reader, logLineCount-10) - c.Assert(linesRead, jc.DeepEquals, logLines[10:]) -} - -func (s *debugLogFileSuite) TestReplayFromStart(c *gc.C) { - s.writeLogLines(c, 10) - - reader := s.openWebsocket(c, url.Values{"replay": {"true"}}) - s.assertLogFollowing(c, reader) - s.writeLogLines(c, logLineCount) - - linesRead := s.readLogLines(c, reader, logLineCount) - c.Assert(linesRead, jc.DeepEquals, logLines) -} - -func (s *debugLogFileSuite) TestBacklog(c *gc.C) { - s.writeLogLines(c, 10) - - reader := s.openWebsocket(c, url.Values{"backlog": {"5"}}) - s.assertLogFollowing(c, reader) - s.writeLogLines(c, logLineCount) - - linesRead := s.readLogLines(c, reader, logLineCount-5) - c.Assert(linesRead, jc.DeepEquals, logLines[5:]) -} - -func (s *debugLogFileSuite) TestMaxLines(c *gc.C) { - s.writeLogLines(c, 10) - - reader := s.openWebsocket(c, url.Values{"maxLines": {"10"}}) - s.assertLogFollowing(c, reader) - s.writeLogLines(c, logLineCount) - - linesRead := s.readLogLines(c, reader, 10) - c.Assert(linesRead, jc.DeepEquals, logLines[10:20]) - s.assertWebsocketClosed(c, reader) -} - -func (s *debugLogFileSuite) TestBacklogWithMaxLines(c *gc.C) { - s.writeLogLines(c, 10) - - reader := s.openWebsocket(c, url.Values{"backlog": {"5"}, "maxLines": {"10"}}) - s.assertLogFollowing(c, reader) - s.writeLogLines(c, logLineCount) - - linesRead := s.readLogLines(c, reader, 10) - c.Assert(linesRead, jc.DeepEquals, logLines[5:15]) - s.assertWebsocketClosed(c, reader) -} - -type filterTest struct { - about string - filter url.Values - filtered []string -} - -var filterTests []filterTest = []filterTest{ - { - about: "Filter from original test", - filter: url.Values{ - "includeEntity": {"machine-0", "unit-ubuntu-0"}, - "includeModule": {"juju.cmd"}, - "excludeModule": {"juju.cmd.jujud"}, - }, - filtered: []string{logLines[0], logLines[40]}, - }, { - about: "Filter from original test inverted", - filter: url.Values{ - "excludeEntity": {"machine-1"}, - }, - filtered: []string{logLines[0], logLines[1]}, - }, { - about: "Include Entity Filter with only wildcard", - filter: url.Values{ - "includeEntity": {"*"}, - }, - filtered: []string{logLines[0], logLines[1]}, - }, { - about: "Exclude Entity Filter with only wildcard", - filter: url.Values{ - "excludeEntity": {"*"}, // exclude everything :-) - }, - filtered: []string{}, - }, { - about: "Include Entity Filter with 1 wildcard", - filter: url.Values{ - "includeEntity": {"unit-*"}, - }, - filtered: []string{logLines[40], logLines[41]}, - }, { - about: "Exclude Entity Filter with 1 wildcard", - filter: url.Values{ - "excludeEntity": {"machine-*"}, - }, - filtered: []string{logLines[40], logLines[41]}, - }, { - about: "Include Entity Filter using machine tag", - filter: url.Values{ - "includeEntity": {"machine-1"}, - }, - filtered: []string{logLines[27], logLines[28]}, - }, { - about: "Include Entity Filter using machine name", - filter: url.Values{ - "includeEntity": {"1"}, - }, - filtered: []string{logLines[27], logLines[28]}, - }, { - about: "Include Entity Filter using unit tag", - filter: url.Values{ - "includeEntity": {"unit-ubuntu-0"}, - }, - filtered: []string{logLines[40], logLines[41]}, - }, { - about: "Include Entity Filter using unit name", - filter: url.Values{ - "includeEntity": {"ubuntu/0"}, - }, - filtered: []string{logLines[40], logLines[41]}, - }, { - about: "Include Entity Filter using combination of machine tag and unit name", - filter: url.Values{ - "includeEntity": {"machine-1", "ubuntu/0"}, - "includeModule": {"juju.agent"}, - }, - filtered: []string{logLines[29], logLines[34], logLines[41]}, - }, { - about: "Exclude Entity Filter using machine tag", - filter: url.Values{ - "excludeEntity": {"machine-0"}, - }, - filtered: []string{logLines[27], logLines[28]}, - }, { - about: "Exclude Entity Filter using machine name", - filter: url.Values{ - "excludeEntity": {"0"}, - }, - filtered: []string{logLines[27], logLines[28]}, - }, { - about: "Exclude Entity Filter using unit tag", - filter: url.Values{ - "excludeEntity": {"machine-0", "machine-1", "unit-ubuntu-0"}, - }, - filtered: []string{logLines[54], logLines[55]}, - }, { - about: "Exclude Entity Filter using unit name", - filter: url.Values{ - "excludeEntity": {"machine-0", "machine-1", "ubuntu/0"}, - }, - filtered: []string{logLines[54], logLines[55]}, - }, { - about: "Exclude Entity Filter using combination of machine tag and unit name", - filter: url.Values{ - "excludeEntity": {"0", "1", "ubuntu/0"}, - }, - filtered: []string{logLines[54], logLines[55]}, - }, -} - -// TestFilter tests that filters are processed correctly given specific debug-log configuration. -func (s *debugLogFileSuite) TestFilter(c *gc.C) { - for i, test := range filterTests { - c.Logf("test %d: %v\n", i, test.about) - - // ensures log file - path := filepath.Join(s.LogDir, "all-machines.log") - var err error - s.logFile, err = os.Create(path) - c.Assert(err, jc.ErrorIsNil) - - // opens web socket - conn := s.dialWebsocket(c, test.filter) - reader := bufio.NewReader(conn) - - s.assertLogFollowing(c, reader) - s.writeLogLines(c, logLineCount) - /* - This will filter and return as many lines as filtered wanted to examine. - So, if specified filter can potentially return 40 lines from sample log but filtered only wanted 2, - then the first 2 lines that match the filter will be returned here. - */ - linesRead := s.readLogLines(c, reader, len(test.filtered)) - // compare retrieved lines with expected - c.Assert(linesRead, jc.DeepEquals, test.filtered) - - // release resources - conn.Close() - s.logFile.Close() - s.logFile = nil - s.last = 0 - } -} - -// readLogLines filters and returns as many lines as filtered wanted to examine. -// So, if specified filter can potentially return 40 lines from sample log but filtered only wanted 2, -// then the first 2 lines that match the filter will be returned here. -func (s *debugLogFileSuite) readLogLines(c *gc.C, reader *bufio.Reader, count int) (linesRead []string) { - for len(linesRead) < count { - line, err := reader.ReadString('\n') - c.Assert(err, jc.ErrorIsNil) - // Trim off the trailing \n - linesRead = append(linesRead, line[:len(line)-1]) - } - return linesRead -} - -func (s *debugLogFileSuite) ensureLogFile(c *gc.C) { - if s.logFile != nil { - return - } - path := filepath.Join(s.LogDir, "all-machines.log") - var err error - s.logFile, err = os.Create(path) - c.Assert(err, jc.ErrorIsNil) - s.AddCleanup(func(c *gc.C) { - s.logFile.Close() - s.logFile = nil - s.last = 0 - }) -} - -func (s *debugLogFileSuite) writeLogLines(c *gc.C, count int) { - s.ensureLogFile(c) - for i := 0; i < count && s.last < logLineCount; i++ { - s.logFile.WriteString(logLines[s.last] + "\n") - s.last++ - } -} - -func (s *debugLogFileSuite) assertLogFollowing(c *gc.C, reader *bufio.Reader) { - errResult := readJSONErrorLine(c, reader) - c.Assert(errResult.Error, gc.IsNil) -} - -var ( - logLines = strings.Split(` -machine-0: 2014-03-24 22:34:25 INFO juju.cmd supercommand.go:297 running juju-1.17.7.1-trusty-amd64 [gc] -machine-0: 2014-03-24 22:34:25 INFO juju.cmd.jujud machine.go:127 machine agent machine-0 start (1.17.7.1-trusty-amd64 [gc]) -machine-0: 2014-03-24 22:34:25 DEBUG juju.agent agent.go:384 read agent config, format "1.18" -machine-0: 2014-03-24 22:34:25 INFO juju.cmd.jujud machine.go:155 Starting StateWorker for machine-0 -machine-0: 2014-03-24 22:34:25 INFO juju runner.go:262 worker: start "state" -machine-0: 2014-03-24 22:34:25 INFO juju.state open.go:80 opening state; mongo addresses: ["localhost:37017"]; entity "machine-0" -machine-0: 2014-03-24 22:34:25 INFO juju runner.go:262 worker: start "api" -machine-0: 2014-03-24 22:34:25 INFO juju apiclient.go:114 api: dialing "wss://localhost:17070/" -machine-0: 2014-03-24 22:34:25 INFO juju runner.go:262 worker: start "termination" -machine-0: 2014-03-24 22:34:25 ERROR juju apiclient.go:119 api: websocket.Dial wss://localhost:17070/: dial tcp 127.0.0.1:17070: connection refused -machine-0: 2014-03-24 22:34:25 ERROR juju runner.go:220 worker: exited "api": websocket.Dial wss://localhost:17070/: dial tcp 127.0.0.1:17070: connection refused -machine-0: 2014-03-24 22:34:25 INFO juju runner.go:254 worker: restarting "api" in 3s -machine-0: 2014-03-24 22:34:25 INFO juju.state open.go:118 connection established -machine-0: 2014-03-24 22:34:25 DEBUG juju.utils gomaxprocs.go:24 setting GOMAXPROCS to 8 -machine-0: 2014-03-24 22:34:25 INFO juju runner.go:262 worker: start "local-storage" -machine-0: 2014-03-24 22:34:25 INFO juju runner.go:262 worker: start "instancepoller" -machine-0: 2014-03-24 22:34:25 INFO juju runner.go:262 worker: start "apiserver" -machine-0: 2014-03-24 22:34:25 INFO juju runner.go:262 worker: start "resumer" -machine-0: 2014-03-24 22:34:25 INFO juju runner.go:262 worker: start "cleaner" -machine-0: 2014-03-24 22:34:25 INFO juju.apiserver apiserver.go:43 listening on "[::]:17070" -machine-0: 2014-03-24 22:34:25 INFO juju runner.go:262 worker: start "minunitsworker" -machine-0: 2014-03-24 22:34:28 INFO juju runner.go:262 worker: start "api" -machine-0: 2014-03-24 22:34:28 INFO juju apiclient.go:114 api: dialing "wss://localhost:17070/" -machine-0: 2014-03-24 22:34:28 INFO juju.apiserver apiserver.go:131 [1] API connection from 127.0.0.1:36491 -machine-0: 2014-03-24 22:34:28 INFO juju apiclient.go:124 api: connection established -machine-0: 2014-03-24 22:34:28 DEBUG juju.apiserver apiserver.go:120 <- [1] {"RequestId":1,"Type":"Admin","Request":"Login","Params":{"AuthTag":"machine-0","Password":"ARbW7iCV4LuMugFEG+Y4e0yr","Nonce":"user-admin:bootstrap"}} -machine-0: 2014-03-24 22:34:28 DEBUG juju.apiserver apiserver.go:127 -> [1] machine-0 10.305679ms {"RequestId":1,"Response":{}} Admin[""].Login -machine-1: 2014-03-24 22:36:28 INFO juju.cmd supercommand.go:297 running juju-1.17.7.1-precise-amd64 [gc] -machine-1: 2014-03-24 22:36:28 INFO juju.cmd.jujud machine.go:127 machine agent machine-1 start (1.17.7.1-precise-amd64 [gc]) -machine-1: 2014-03-24 22:36:28 DEBUG juju.agent agent.go:384 read agent config, format "1.18" -machine-1: 2014-03-24 22:36:28 INFO juju runner.go:262 worker: start "api" -machine-1: 2014-03-24 22:36:28 INFO juju apiclient.go:114 api: dialing "wss://10.0.3.1:17070/" -machine-1: 2014-03-24 22:36:28 INFO juju runner.go:262 worker: start "termination" -machine-1: 2014-03-24 22:36:28 INFO juju apiclient.go:124 api: connection established -machine-1: 2014-03-24 22:36:28 DEBUG juju.agent agent.go:523 writing configuration file -machine-1: 2014-03-24 22:36:28 INFO juju runner.go:262 worker: start "upgrader" -machine-1: 2014-03-24 22:36:28 INFO juju runner.go:262 worker: start "upgrade-steps" -machine-1: 2014-03-24 22:36:28 INFO juju runner.go:262 worker: start "machiner" -machine-1: 2014-03-24 22:36:28 INFO juju.cmd.jujud machine.go:458 upgrade to 1.17.7.1-precise-amd64 already completed. -machine-1: 2014-03-24 22:36:28 INFO juju.cmd.jujud machine.go:445 upgrade to 1.17.7.1-precise-amd64 completed. -unit-ubuntu-0[32423]: 2014-03-24 22:36:28 INFO juju.cmd supercommand.go:297 running juju-1.17.7.1-precise-amd64 [gc] -unit-ubuntu-0[34543]: 2014-03-24 22:36:28 DEBUG juju.agent agent.go:384 read agent config, format "1.18" -unit-ubuntu-0: 2014-03-24 22:36:28 INFO juju.jujud unit.go:76 unit agent unit-ubuntu-0 start (1.17.7.1-precise-amd64 [gc]) -unit-ubuntu-0: 2014-03-24 22:36:28 INFO juju runner.go:262 worker: start "api" -unit-ubuntu-0: 2014-03-24 22:36:28 INFO juju apiclient.go:114 api: dialing "wss://10.0.3.1:17070/" -unit-ubuntu-0: 2014-03-24 22:36:28 INFO juju apiclient.go:124 api: connection established -unit-ubuntu-0: 2014-03-24 22:36:28 DEBUG juju.agent agent.go:523 writing configuration file -unit-ubuntu-0: 2014-03-24 22:36:28 INFO juju runner.go:262 worker: start "upgrader" -unit-ubuntu-0: 2014-03-24 22:36:28 INFO juju runner.go:262 worker: start "logger" -unit-ubuntu-0: 2014-03-24 22:36:28 DEBUG juju.worker.logger logger.go:35 initial log config: "=DEBUG" -unit-ubuntu-0: 2014-03-24 22:36:28 INFO juju runner.go:262 worker: start "uniter" -unit-ubuntu-0: 2014-03-24 22:36:28 DEBUG juju.worker.logger logger.go:60 logger setup -unit-ubuntu-0: 2014-03-24 22:36:28 INFO juju runner.go:262 worker: start "rsyslog" -unit-ubuntu-0: 2014-03-24 22:36:28 DEBUG juju.worker.rsyslog worker.go:76 starting rsyslog worker mode 1 for "unit-ubuntu-0" "tim-local" -unit-ubuntu-1: 2014-03-24 22:36:28 INFO juju runner.go:262 worker: start "logger" -unit-ubuntu-1: 2014-03-24 22:36:28 DEBUG juju.worker.logger logger.go:35 initial log config: "=DEBUG" -unit-ubuntu-1: 2014-03-24 22:36:28 INFO juju runner.go:262 worker: start "uniter" -unit-ubuntu-1: 2014-03-24 22:36:28 DEBUG juju.worker.logger logger.go:60 logger setup -unit-ubuntu-1: 2014-03-24 22:36:28 INFO juju runner.go:262 worker: start "rsyslog" -unit-ubuntu-1: 2014-03-24 22:36:28 DEBUG juju.worker.rsyslog worker.go:76 starting rsyslog worker mode 1 for "unit-ubuntu-0" "tim-local" -`[1:], "\n") - logLineCount = len(logLines) -) === modified file 'src/github.com/juju/juju/apiserver/debuglog_test.go' --- src/github.com/juju/juju/apiserver/debuglog_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/debuglog_test.go 2016-03-22 15:18:22 +0000 @@ -8,18 +8,18 @@ "net/http" "net/url" - jc "github.com/juju/testing/checkers" "github.com/juju/utils" "golang.org/x/net/websocket" gc "gopkg.in/check.v1" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/testing/factory" ) // debugLogBaseSuite has tests that should be run for both the file // and DB based variants of debuglog, as well as some test helpers. type debugLogBaseSuite struct { - userAuthHttpSuite + authHttpSuite } func (s *debugLogBaseSuite) TestBadParams(c *gc.C) { @@ -30,14 +30,16 @@ func (s *debugLogBaseSuite) TestWithHTTP(c *gc.C) { uri := s.logURL(c, "http", nil).String() - _, err := s.sendRequest(c, "", "", "GET", uri, "", nil) - c.Assert(err, gc.ErrorMatches, `.*malformed HTTP response.*`) + s.sendRequest(c, httpRequestParams{ + method: "GET", + url: uri, + expectError: `.*malformed HTTP response.*`, + }) } func (s *debugLogBaseSuite) TestWithHTTPS(c *gc.C) { uri := s.logURL(c, "https", nil).String() - response, err := s.sendRequest(c, "", "", "GET", uri, "", nil) - c.Assert(err, jc.ErrorIsNil) + response := s.sendRequest(c, httpRequestParams{method: "GET", url: uri}) c.Assert(response.StatusCode, gc.Equals, http.StatusBadRequest) } @@ -46,7 +48,7 @@ defer conn.Close() reader := bufio.NewReader(conn) - assertJSONError(c, reader, "auth failed: invalid request format") + assertJSONError(c, reader, "no credentials provided") s.assertWebsocketClosed(c, reader) } @@ -55,12 +57,12 @@ Nonce: "foo-nonce", }) header := utils.BasicAuthHeader(m.Tag().String(), password) - header.Add("X-Juju-Nonce", "foo-nonce") + header.Add(params.MachineNonceHeader, "foo-nonce") conn := s.dialWebsocketInternal(c, nil, header) defer conn.Close() reader := bufio.NewReader(conn) - assertJSONError(c, reader, "auth failed: invalid entity name or password") + assertJSONError(c, reader, "invalid entity name or password") s.assertWebsocketClosed(c, reader) } === modified file 'src/github.com/juju/juju/apiserver/deployer/deployer.go' --- src/github.com/juju/juju/apiserver/deployer/deployer.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/deployer/deployer.go 2016-03-22 15:18:22 +0000 @@ -14,7 +14,7 @@ ) func init() { - common.RegisterStandardFacade("Deployer", 0, NewDeployerAPI) + common.RegisterStandardFacade("Deployer", 1, NewDeployerAPI) } // DeployerAPI provides access to the Deployer API facade. === modified file 'src/github.com/juju/juju/apiserver/deployer/deployer_test.go' --- src/github.com/juju/juju/apiserver/deployer/deployer_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/deployer/deployer_test.go 2016-03-22 15:18:22 +0000 @@ -53,7 +53,7 @@ // machine 1 (authorized): mysql/0 (principal0), logging/0 (subordinate0) var err error - s.machine0, err = s.State.AddMachine("quantal", state.JobManageEnviron, state.JobHostUnits) + s.machine0, err = s.State.AddMachine("quantal", state.JobManageModel, state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) s.machine1, err = s.State.AddMachine("quantal", state.JobHostUnits) === added directory 'src/github.com/juju/juju/apiserver/discoverspaces' === added file 'src/github.com/juju/juju/apiserver/discoverspaces/discoverspaces.go' --- src/github.com/juju/juju/apiserver/discoverspaces/discoverspaces.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/discoverspaces/discoverspaces.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,101 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package discoverspaces + +import ( + "github.com/juju/errors" + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/common/networkingcommon" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" +) + +func init() { + common.RegisterStandardFacade("DiscoverSpaces", 2, NewDiscoverSpacesAPI) +} + +// DiscoverSpacesAPI implements the API used by the discoverspaces worker. +type DiscoverSpacesAPI struct { + st networkingcommon.NetworkBacking + resources *common.Resources + authorizer common.Authorizer +} + +// NewDiscoverSpacesAPI creates a new instance of the DiscoverSpaces API. +func NewDiscoverSpacesAPI(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*DiscoverSpacesAPI, error) { + return NewDiscoverSpacesAPIWithBacking(networkingcommon.NewStateShim(st), resources, authorizer) +} + +func NewDiscoverSpacesAPIWithBacking(st networkingcommon.NetworkBacking, resources *common.Resources, authorizer common.Authorizer) (*DiscoverSpacesAPI, error) { + if !authorizer.AuthModelManager() { + return nil, common.ErrPerm + } + return &DiscoverSpacesAPI{ + st: st, + authorizer: authorizer, + resources: resources, + }, nil +} + +// ModelConfig returns the current model's configuration. +func (api *DiscoverSpacesAPI) ModelConfig() (params.ModelConfigResult, error) { + result := params.ModelConfigResult{} + + config, err := api.st.ModelConfig() + if err != nil { + return result, err + } + allAttrs := config.AllAttrs() + // No need to obscure any secrets as caller needs to be a ModelManager to + // call any api methods. + result.Config = allAttrs + return result, nil +} + +// CreateSpaces creates a new Juju network space, associating the +// specified subnets with it (optional; can be empty). +func (api *DiscoverSpacesAPI) CreateSpaces(args params.CreateSpacesParams) (results params.ErrorResults, err error) { + return networkingcommon.CreateSpaces(api.st, args) +} + +// ListSpaces lists all the available spaces and their associated subnets. +func (api *DiscoverSpacesAPI) ListSpaces() (results params.DiscoverSpacesResults, err error) { + spaces, err := api.st.AllSpaces() + if err != nil { + return results, errors.Trace(err) + } + + results.Results = make([]params.ProviderSpace, len(spaces)) + for i, space := range spaces { + result := params.ProviderSpace{} + result.ProviderId = string(space.ProviderId()) + result.Name = space.Name() + + subnets, err := space.Subnets() + if err != nil { + err = errors.Annotatef(err, "fetching subnets") + result.Error = common.ServerError(err) + results.Results[i] = result + continue + } + + result.Subnets = make([]params.Subnet, len(subnets)) + for i, subnet := range subnets { + result.Subnets[i] = networkingcommon.BackingSubnetToParamsSubnet(subnet) + } + results.Results[i] = result + } + return results, nil +} + +// AddSubnets is defined on the API interface. +func (api *DiscoverSpacesAPI) AddSubnets(args params.AddSubnetsParams) (params.ErrorResults, error) { + return networkingcommon.AddSubnets(api.st, args) +} + +// ListSubnets lists all the available subnets or only those matching +// all given optional filters. +func (api *DiscoverSpacesAPI) ListSubnets(args params.SubnetsFilters) (results params.ListSubnetsResults, err error) { + return networkingcommon.ListSubnets(api.st, args) +} === added file 'src/github.com/juju/juju/apiserver/discoverspaces/discoverspaces_test.go' --- src/github.com/juju/juju/apiserver/discoverspaces/discoverspaces_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/discoverspaces/discoverspaces_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,134 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package discoverspaces_test + +import ( + "errors" + + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/discoverspaces" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + coretesting "github.com/juju/juju/testing" +) + +type DiscoverSpacesSuite struct { + coretesting.BaseSuite + apiservertesting.StubNetwork + + resources *common.Resources + authorizer apiservertesting.FakeAuthorizer + facade *discoverspaces.DiscoverSpacesAPI +} + +var _ = gc.Suite(&DiscoverSpacesSuite{}) + +func (s *DiscoverSpacesSuite) SetUpSuite(c *gc.C) { + s.StubNetwork.SetUpSuite(c) + s.BaseSuite.SetUpSuite(c) +} + +func (s *DiscoverSpacesSuite) TearDownSuite(c *gc.C) { + s.BaseSuite.TearDownSuite(c) +} + +func (s *DiscoverSpacesSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + apiservertesting.BackingInstance.SetUp( + c, + apiservertesting.StubZonedEnvironName, + apiservertesting.WithZones, + apiservertesting.WithSpaces, + apiservertesting.WithSubnets) + + s.resources = common.NewResources() + s.authorizer = apiservertesting.FakeAuthorizer{ + Tag: names.NewUserTag("admin"), + EnvironManager: true, + } + + var err error + s.facade, err = discoverspaces.NewDiscoverSpacesAPIWithBacking( + apiservertesting.BackingInstance, s.resources, s.authorizer, + ) + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.facade, gc.NotNil) +} + +func (s *DiscoverSpacesSuite) TearDownTest(c *gc.C) { + if s.resources != nil { + s.resources.StopAll() + } + s.BaseSuite.TearDownTest(c) +} + +func (s *DiscoverSpacesSuite) TestModelConfigFailure(c *gc.C) { + apiservertesting.BackingInstance.SetErrors(errors.New("boom")) + + result, err := s.facade.ModelConfig() + c.Assert(err, gc.ErrorMatches, "boom") + c.Assert(result, jc.DeepEquals, params.ModelConfigResult{}) + + apiservertesting.BackingInstance.CheckCallNames(c, "ModelConfig") +} + +func (s *DiscoverSpacesSuite) TestModelConfigSuccess(c *gc.C) { + result, err := s.facade.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, params.ModelConfigResult{ + Config: apiservertesting.BackingInstance.EnvConfig.AllAttrs(), + }) + + apiservertesting.BackingInstance.CheckCallNames(c, "ModelConfig") +} + +func (s *DiscoverSpacesSuite) TestListSpaces(c *gc.C) { + result, err := s.facade.ListSpaces() + c.Assert(err, jc.ErrorIsNil) + + expectedResult := []params.ProviderSpace{{ + Name: "default", + Subnets: []params.Subnet{ + {CIDR: "192.168.0.0/24", + ProviderId: "provider-192.168.0.0/24", + SpaceTag: "space-default", + Zones: []string{"foo"}, + Status: "in-use"}, + {CIDR: "192.168.3.0/24", + ProviderId: "provider-192.168.3.0/24", + VLANTag: 23, + SpaceTag: "space-default", + Zones: []string{"bar", "bam"}}}}, { + Name: "dmz", + Subnets: []params.Subnet{ + {CIDR: "192.168.1.0/24", + ProviderId: "provider-192.168.1.0/24", + VLANTag: 23, + SpaceTag: "space-dmz", + Zones: []string{"bar", "bam"}}}}, { + Name: "private", + Subnets: []params.Subnet{ + {CIDR: "192.168.2.0/24", + ProviderId: "provider-192.168.2.0/24", + SpaceTag: "space-private", + Zones: []string{"foo"}, + Status: "in-use"}}, + }} + c.Assert(result.Results, jc.DeepEquals, expectedResult) + apiservertesting.BackingInstance.CheckCallNames(c, "AllSpaces") +} + +func (s *DiscoverSpacesSuite) TestListSpacesFailure(c *gc.C) { + apiservertesting.BackingInstance.SetErrors(errors.New("boom")) + + result, err := s.facade.ListSpaces() + c.Assert(err, gc.ErrorMatches, "boom") + c.Assert(result, jc.DeepEquals, params.DiscoverSpacesResults{}) + + apiservertesting.BackingInstance.CheckCallNames(c, "AllSpaces") +} === added file 'src/github.com/juju/juju/apiserver/discoverspaces/package_test.go' --- src/github.com/juju/juju/apiserver/discoverspaces/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/discoverspaces/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package discoverspaces_test + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + testing.MgoTestPackage(t) +} === modified file 'src/github.com/juju/juju/apiserver/diskmanager/diskmanager.go' --- src/github.com/juju/juju/apiserver/diskmanager/diskmanager.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/diskmanager/diskmanager.go 2016-03-22 15:18:22 +0000 @@ -14,7 +14,7 @@ ) func init() { - common.RegisterStandardFacade("DiskManager", 1, NewDiskManagerAPI) + common.RegisterStandardFacade("DiskManager", 2, NewDiskManagerAPI) } var logger = loggo.GetLogger("juju.apiserver.diskmanager") === modified file 'src/github.com/juju/juju/apiserver/diskmanager/diskmanager_test.go' --- src/github.com/juju/juju/apiserver/diskmanager/diskmanager_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/diskmanager/diskmanager_test.go 2016-03-22 15:18:22 +0000 @@ -84,9 +84,9 @@ Results: []params.ErrorResult{{ Error: nil, }, { - Error: ¶ms.Error{"permission denied", "unauthorized access"}, + Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}, }, { - Error: ¶ms.Error{"permission denied", "unauthorized access"}, + Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}, }}, }) c.Assert(s.st.calls, gc.Equals, 1) @@ -102,7 +102,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(results, gc.DeepEquals, params.ErrorResults{ Results: []params.ErrorResult{{ - Error: ¶ms.Error{"boom", ""}, + Error: ¶ms.Error{Message: "boom", Code: ""}, }}, }) } === removed directory 'src/github.com/juju/juju/apiserver/environment' === removed file 'src/github.com/juju/juju/apiserver/environment/environment.go' --- src/github.com/juju/juju/apiserver/environment/environment.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/environment/environment.go 1970-01-01 00:00:00 +0000 @@ -1,27 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environment - -import ( - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/state" -) - -func init() { - common.RegisterStandardFacade("Environment", 0, NewEnvironmentAPI) -} - -// EnvironmentAPI implements the API used by the machine environment worker. -type EnvironmentAPI struct { - *common.EnvironWatcher - *EnvironTools -} - -// NewEnvironmentAPI creates a new instance of the Environment API. -func NewEnvironmentAPI(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*EnvironmentAPI, error) { - return &EnvironmentAPI{ - EnvironWatcher: common.NewEnvironWatcher(st, resources, authorizer), - EnvironTools: NewEnvironTools(st, authorizer), - }, nil -} === removed file 'src/github.com/juju/juju/apiserver/environment/environment_test.go' --- src/github.com/juju/juju/apiserver/environment/environment_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/environment/environment_test.go 1970-01-01 00:00:00 +0000 @@ -1,52 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environment_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - commontesting "github.com/juju/juju/apiserver/common/testing" - "github.com/juju/juju/apiserver/environment" - apiservertesting "github.com/juju/juju/apiserver/testing" - "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" -) - -type environmentSuite struct { - testing.JujuConnSuite - *commontesting.EnvironWatcherTest - - authorizer apiservertesting.FakeAuthorizer - resources *common.Resources - - machine0 *state.Machine - api *environment.EnvironmentAPI -} - -var _ = gc.Suite(&environmentSuite{}) - -func (s *environmentSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - - var err error - s.machine0, err = s.State.AddMachine("quantal", state.JobHostUnits, state.JobManageEnviron) - c.Assert(err, jc.ErrorIsNil) - - s.authorizer = apiservertesting.FakeAuthorizer{ - Tag: s.machine0.Tag(), - } - s.resources = common.NewResources() - s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) - - s.api, err = environment.NewEnvironmentAPI( - s.State, - s.resources, - s.authorizer, - ) - c.Assert(err, jc.ErrorIsNil) - s.EnvironWatcherTest = commontesting.NewEnvironWatcherTest( - s.api, s.State, s.resources, commontesting.NoSecrets) -} === removed file 'src/github.com/juju/juju/apiserver/environment/package_test.go' --- src/github.com/juju/juju/apiserver/environment/package_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/environment/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environment_test - -import ( - stdtesting "testing" - - "github.com/juju/juju/testing" -) - -func TestAll(t *stdtesting.T) { - testing.MgoTestPackage(t) -} === removed file 'src/github.com/juju/juju/apiserver/environment/toolsversionupdate.go' --- src/github.com/juju/juju/apiserver/environment/toolsversionupdate.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/environment/toolsversionupdate.go 1970-01-01 00:00:00 +0000 @@ -1,125 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environment - -import ( - "github.com/juju/errors" - - "github.com/juju/loggo" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/environs" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/environs/tools" - "github.com/juju/juju/state" - coretools "github.com/juju/juju/tools" - "github.com/juju/juju/version" -) - -var logger = loggo.GetLogger("juju.apiserver.environment") - -var ( - findTools = tools.FindTools -) - -// EnvironGetter represents a struct that can provide a state.Environment. -type EnvironGetter interface { - Environment() (*state.Environment, error) -} - -type toolsFinder func(environs.Environ, int, int, string, coretools.Filter) (coretools.List, error) -type envVersionUpdater func(*state.Environment, version.Number) error - -var newEnvirons = environs.New - -func checkToolsAvailability(cfg *config.Config, finder toolsFinder) (version.Number, error) { - currentVersion, ok := cfg.AgentVersion() - if !ok || currentVersion == version.Zero { - return version.Zero, nil - } - - env, err := newEnvirons(cfg) - if err != nil { - return version.Zero, errors.Annotatef(err, "cannot make environ") - } - - // finder receives major and minor as parameters as it uses them to filter versions and - // only return patches for the passed major.minor (from major.minor.patch). - // We'll try the released stream first, then fall back to the current configured stream - // if no released tools are found. - vers, err := finder(env, currentVersion.Major, currentVersion.Minor, tools.ReleasedStream, coretools.Filter{}) - preferredStream := tools.PreferredStream(¤tVersion, cfg.Development(), cfg.AgentStream()) - if preferredStream != tools.ReleasedStream && errors.Cause(err) == coretools.ErrNoMatches { - vers, err = finder(env, currentVersion.Major, currentVersion.Minor, preferredStream, coretools.Filter{}) - } - if err != nil { - return version.Zero, errors.Annotatef(err, "cannot find available tools") - } - // Newest also returns a list of the items in this list matching with the - // newest version. - newest, _ := vers.Newest() - return newest, nil -} - -var envConfig = func(e *state.Environment) (*config.Config, error) { - return e.Config() -} - -// Base implementation of envVersionUpdater -func envVersionUpdate(env *state.Environment, ver version.Number) error { - return env.UpdateLatestToolsVersion(ver) -} - -func updateToolsAvailability(st EnvironGetter, finder toolsFinder, update envVersionUpdater) error { - env, err := st.Environment() - if err != nil { - return errors.Annotate(err, "cannot get environment") - } - cfg, err := envConfig(env) - if err != nil { - return errors.Annotate(err, "cannot get config") - } - ver, err := checkToolsAvailability(cfg, finder) - if err != nil { - if errors.IsNotFound(err) { - // No newer tools, so exit silently. - return nil - } - return errors.Annotate(err, "cannot get latest version") - } - if ver == version.Zero { - logger.Debugf("tools lookup returned version Zero, this should only happen during bootstrap.") - return nil - } - return update(env, ver) -} - -// EnvironTools holds the required tools for an environ facade. -type EnvironTools struct { - st EnvironGetter - authorizer common.Authorizer - // tools lookup - findTools toolsFinder - envVersionUpdate envVersionUpdater -} - -// NewEnvironTools returns a new environ tools pointer with the passed attributes -// and some defaults that are only for changed during tests. -func NewEnvironTools(st EnvironGetter, authorizer common.Authorizer) *EnvironTools { - return &EnvironTools{ - st: st, - authorizer: authorizer, - findTools: findTools, - envVersionUpdate: envVersionUpdate, - } -} - -// UpdateToolsAvailable invokes a lookup and further update in environ -// for new patches of the current tool versions. -func (e *EnvironTools) UpdateToolsAvailable() error { - if !e.authorizer.AuthEnvironManager() { - return common.ErrPerm - } - return updateToolsAvailability(e.st, e.findTools, e.envVersionUpdate) -} === removed file 'src/github.com/juju/juju/apiserver/environment/toolsversionupdate_test.go' --- src/github.com/juju/juju/apiserver/environment/toolsversionupdate_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/environment/toolsversionupdate_test.go 1970-01-01 00:00:00 +0000 @@ -1,175 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environment - -import ( - "github.com/juju/errors" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/environs" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/state" - coretesting "github.com/juju/juju/testing" - coretools "github.com/juju/juju/tools" - "github.com/juju/juju/version" -) - -var _ = gc.Suite(&updaterSuite{}) - -type updaterSuite struct { - coretesting.BaseSuite -} - -type dummyEnviron struct { - environs.Environ -} - -func (s *updaterSuite) TestCheckTools(c *gc.C) { - sConfig := coretesting.FakeConfig() - sConfig = sConfig.Merge(coretesting.Attrs{ - "agent-version": "2.5.0", - }) - cfg, err := config.New(config.NoDefaults, sConfig) - c.Assert(err, jc.ErrorIsNil) - fakeNewEnvirons := func(*config.Config) (environs.Environ, error) { - return dummyEnviron{}, nil - } - s.PatchValue(&newEnvirons, fakeNewEnvirons) - var ( - calledWithEnviron environs.Environ - calledWithMajor, calledWithMinor int - calledWithFilter coretools.Filter - ) - fakeToolFinder := func(e environs.Environ, maj int, min int, stream string, filter coretools.Filter) (coretools.List, error) { - calledWithEnviron = e - calledWithMajor = maj - calledWithMinor = min - calledWithFilter = filter - ver := version.Binary{Number: version.Number{Major: maj, Minor: min}} - t := coretools.Tools{Version: ver, URL: "http://example.com", Size: 1} - c.Assert(calledWithMajor, gc.Equals, 2) - c.Assert(calledWithMinor, gc.Equals, 5) - c.Assert(stream, gc.Equals, "released") - return coretools.List{&t}, nil - } - - ver, err := checkToolsAvailability(cfg, fakeToolFinder) - c.Assert(err, jc.ErrorIsNil) - c.Assert(ver, gc.Not(gc.Equals), version.Zero) - c.Assert(ver, gc.Equals, version.Number{Major: 2, Minor: 5, Patch: 0}) -} - -func (s *updaterSuite) TestCheckToolsNonReleasedStream(c *gc.C) { - sConfig := coretesting.FakeConfig() - sConfig = sConfig.Merge(coretesting.Attrs{ - "agent-version": "2.5-alpha1", - "agent-stream": "proposed", - }) - cfg, err := config.New(config.NoDefaults, sConfig) - c.Assert(err, jc.ErrorIsNil) - fakeNewEnvirons := func(*config.Config) (environs.Environ, error) { - return dummyEnviron{}, nil - } - s.PatchValue(&newEnvirons, fakeNewEnvirons) - var ( - calledWithEnviron environs.Environ - calledWithMajor, calledWithMinor int - calledWithFilter coretools.Filter - calledWithStreams []string - ) - fakeToolFinder := func(e environs.Environ, maj int, min int, stream string, filter coretools.Filter) (coretools.List, error) { - calledWithEnviron = e - calledWithMajor = maj - calledWithMinor = min - calledWithFilter = filter - calledWithStreams = append(calledWithStreams, stream) - if stream == "released" { - return nil, coretools.ErrNoMatches - } - ver := version.Binary{Number: version.Number{Major: maj, Minor: min}} - t := coretools.Tools{Version: ver, URL: "http://example.com", Size: 1} - c.Assert(calledWithMajor, gc.Equals, 2) - c.Assert(calledWithMinor, gc.Equals, 5) - return coretools.List{&t}, nil - } - ver, err := checkToolsAvailability(cfg, fakeToolFinder) - c.Assert(err, jc.ErrorIsNil) - c.Assert(calledWithStreams, gc.DeepEquals, []string{"released", "proposed"}) - c.Assert(ver, gc.Not(gc.Equals), version.Zero) - c.Assert(ver, gc.Equals, version.Number{Major: 2, Minor: 5, Patch: 0}) -} - -type envGetter struct { -} - -func (e *envGetter) Environment() (*state.Environment, error) { - return &state.Environment{}, nil -} - -func (s *updaterSuite) TestUpdateToolsAvailability(c *gc.C) { - fakeNewEnvirons := func(*config.Config) (environs.Environ, error) { - return dummyEnviron{}, nil - } - s.PatchValue(&newEnvirons, fakeNewEnvirons) - - fakeEnvConfig := func(_ *state.Environment) (*config.Config, error) { - sConfig := coretesting.FakeConfig() - sConfig = sConfig.Merge(coretesting.Attrs{ - "agent-version": "2.5.0", - }) - return config.New(config.NoDefaults, sConfig) - } - s.PatchValue(&envConfig, fakeEnvConfig) - - fakeToolFinder := func(_ environs.Environ, _ int, _ int, _ string, _ coretools.Filter) (coretools.List, error) { - ver := version.Binary{Number: version.Number{Major: 2, Minor: 5, Patch: 2}} - olderVer := version.Binary{Number: version.Number{Major: 2, Minor: 5, Patch: 1}} - t := coretools.Tools{Version: ver, URL: "http://example.com", Size: 1} - tOld := coretools.Tools{Version: olderVer, URL: "http://example.com", Size: 1} - return coretools.List{&t, &tOld}, nil - } - - var ver version.Number - fakeUpdate := func(_ *state.Environment, v version.Number) error { - ver = v - return nil - } - - err := updateToolsAvailability(&envGetter{}, fakeToolFinder, fakeUpdate) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(ver, gc.Not(gc.Equals), version.Zero) - c.Assert(ver, gc.Equals, version.Number{Major: 2, Minor: 5, Patch: 2}) -} - -func (s *updaterSuite) TestUpdateToolsAvailabilityNoMatches(c *gc.C) { - fakeNewEnvirons := func(*config.Config) (environs.Environ, error) { - return dummyEnviron{}, nil - } - s.PatchValue(&newEnvirons, fakeNewEnvirons) - - fakeEnvConfig := func(_ *state.Environment) (*config.Config, error) { - sConfig := coretesting.FakeConfig() - sConfig = sConfig.Merge(coretesting.Attrs{ - "agent-version": "2.5.0", - }) - return config.New(config.NoDefaults, sConfig) - } - s.PatchValue(&envConfig, fakeEnvConfig) - - // No new tools available. - fakeToolFinder := func(_ environs.Environ, _ int, _ int, _ string, _ coretools.Filter) (coretools.List, error) { - return nil, errors.NotFoundf("tools") - } - - // Update should never be called. - fakeUpdate := func(_ *state.Environment, v version.Number) error { - c.Fail() - return nil - } - - err := updateToolsAvailability(&envGetter{}, fakeToolFinder, fakeUpdate) - c.Assert(err, jc.ErrorIsNil) -} === removed directory 'src/github.com/juju/juju/apiserver/environmentmanager' === removed file 'src/github.com/juju/juju/apiserver/environmentmanager/environmentmanager.go' --- src/github.com/juju/juju/apiserver/environmentmanager/environmentmanager.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/environmentmanager/environmentmanager.go 1970-01-01 00:00:00 +0000 @@ -1,380 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// The environmentmanager package defines an API end point for functions -// dealing with envionments. Creating, listing and sharing environments. -package environmentmanager - -import ( - "time" - - "github.com/juju/errors" - "github.com/juju/loggo" - "github.com/juju/names" - "github.com/juju/utils" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/environs" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/feature" - "github.com/juju/juju/state" - "github.com/juju/juju/version" -) - -var logger = loggo.GetLogger("juju.apiserver.environmentmanager") - -func init() { - common.RegisterStandardFacadeForFeature("EnvironmentManager", 1, NewEnvironmentManagerAPI, feature.JES) -} - -// EnvironmentManager defines the methods on the environmentmanager API end -// point. -type EnvironmentManager interface { - ConfigSkeleton(args params.EnvironmentSkeletonConfigArgs) (params.EnvironConfigResult, error) - CreateEnvironment(args params.EnvironmentCreateArgs) (params.Environment, error) - ListEnvironments(user params.Entity) (params.UserEnvironmentList, error) -} - -// EnvironmentManagerAPI implements the environment manager interface and is -// the concrete implementation of the api end point. -type EnvironmentManagerAPI struct { - state stateInterface - authorizer common.Authorizer - toolsFinder *common.ToolsFinder -} - -var _ EnvironmentManager = (*EnvironmentManagerAPI)(nil) - -// NewEnvironmentManagerAPI creates a new api server endpoint for managing -// environments. -func NewEnvironmentManagerAPI( - st *state.State, - resources *common.Resources, - authorizer common.Authorizer, -) (*EnvironmentManagerAPI, error) { - if !authorizer.AuthClient() { - return nil, common.ErrPerm - } - - urlGetter := common.NewToolsURLGetter(st.EnvironUUID(), st) - return &EnvironmentManagerAPI{ - state: getState(st), - authorizer: authorizer, - toolsFinder: common.NewToolsFinder(st, st, urlGetter), - }, nil -} - -// authCheck checks if the user is acting on their own behalf, or if they -// are an administrator acting on behalf of another user. -func (em *EnvironmentManagerAPI) authCheck(user names.UserTag) error { - // Since we know this is a user tag (because AuthClient is true), - // we just do the type assertion to the UserTag. - apiUser, _ := em.authorizer.GetAuthTag().(names.UserTag) - isAdmin, err := em.state.IsSystemAdministrator(apiUser) - if err != nil { - return errors.Trace(err) - } - if isAdmin { - logger.Tracef("%q is a system admin", apiUser.Canonical()) - return nil - } - - // We can't just compare the UserTags themselves as the provider part - // may be unset, and gets replaced with 'local'. We must compare against - // the Username of the user tag. - if apiUser.Canonical() == user.Canonical() { - return nil - } - return common.ErrPerm -} - -// ConfigSource describes a type that is able to provide config. -// Abstracted primarily for testing. -type ConfigSource interface { - Config() (*config.Config, error) -} - -var configValuesFromStateServer = []string{ - "type", - "ca-cert", - "state-port", - "api-port", - "syslog-port", - "rsyslog-ca-cert", - "rsyslog-ca-key", -} - -// ConfigSkeleton returns config values to be used as a starting point for the -// API caller to construct a valid environment specific config. The provider -// and region params are there for future use, and current behaviour expects -// both of these to be empty. -func (em *EnvironmentManagerAPI) ConfigSkeleton(args params.EnvironmentSkeletonConfigArgs) (params.EnvironConfigResult, error) { - var result params.EnvironConfigResult - if args.Provider != "" { - return result, errors.NotValidf("provider value %q", args.Provider) - } - if args.Region != "" { - return result, errors.NotValidf("region value %q", args.Region) - } - - stateServerEnv, err := em.state.StateServerEnvironment() - if err != nil { - return result, errors.Trace(err) - } - - config, err := em.configSkeleton(stateServerEnv) - if err != nil { - return result, errors.Trace(err) - } - - result.Config = config - return result, nil -} - -func (em *EnvironmentManagerAPI) restrictedProviderFields(providerType string) ([]string, error) { - provider, err := environs.Provider(providerType) - if err != nil { - return nil, errors.Trace(err) - } - - var fields []string - fields = append(fields, configValuesFromStateServer...) - fields = append(fields, provider.RestrictedConfigAttributes()...) - return fields, nil -} - -func (em *EnvironmentManagerAPI) configSkeleton(source ConfigSource) (map[string]interface{}, error) { - baseConfig, err := source.Config() - if err != nil { - return nil, errors.Trace(err) - } - baseMap := baseConfig.AllAttrs() - - fields, err := em.restrictedProviderFields(baseConfig.Type()) - if err != nil { - return nil, errors.Trace(err) - } - - var result = make(map[string]interface{}) - for _, field := range fields { - if value, found := baseMap[field]; found { - result[field] = value - } - } - return result, nil -} - -func (em *EnvironmentManagerAPI) checkVersion(cfg map[string]interface{}) error { - // If there is no agent-version specified, use the current version. - // otherwise we need to check for tools - value, found := cfg["agent-version"] - if !found { - cfg["agent-version"] = version.Current.Number.String() - return nil - } - valuestr, ok := value.(string) - if !ok { - return errors.Errorf("agent-version must be a string but has type '%T'", value) - } - num, err := version.Parse(valuestr) - if err != nil { - return errors.Trace(err) - } - if comp := num.Compare(version.Current.Number); comp > 0 { - return errors.Errorf("agent-version cannot be greater than the server: %s", version.Current.Number) - } else if comp < 0 { - // Look to see if we have tools available for that version. - // Obviously if the version is the same, we have the tools available. - list, err := em.toolsFinder.FindTools(params.FindToolsParams{ - Number: num, - }) - if err != nil { - return errors.Trace(err) - } - logger.Tracef("found tools: %#v", list) - if len(list.List) == 0 { - return errors.Errorf("no tools found for version %s", num) - } - } - return nil -} - -func (em *EnvironmentManagerAPI) validConfig(attrs map[string]interface{}) (*config.Config, error) { - cfg, err := config.New(config.UseDefaults, attrs) - if err != nil { - return nil, errors.Annotate(err, "creating config from values failed") - } - provider, err := environs.Provider(cfg.Type()) - if err != nil { - return nil, errors.Trace(err) - } - cfg, err = provider.PrepareForCreateEnvironment(cfg) - if err != nil { - return nil, errors.Trace(err) - } - cfg, err = provider.Validate(cfg, nil) - if err != nil { - return nil, errors.Annotate(err, "provider validation failed") - } - return cfg, nil -} - -func (em *EnvironmentManagerAPI) newEnvironmentConfig(args params.EnvironmentCreateArgs, source ConfigSource) (*config.Config, error) { - // For now, we just smash to the two maps together as we store - // the account values and the environment config together in the - // *config.Config instance. - joint := make(map[string]interface{}) - for key, value := range args.Config { - joint[key] = value - } - // Account info overrides any config values. - for key, value := range args.Account { - joint[key] = value - } - if _, found := joint["uuid"]; found { - return nil, errors.New("uuid is generated, you cannot specify one") - } - baseConfig, err := source.Config() - if err != nil { - return nil, errors.Trace(err) - } - baseMap := baseConfig.AllAttrs() - fields, err := em.restrictedProviderFields(baseConfig.Type()) - if err != nil { - return nil, errors.Trace(err) - } - // Before comparing any values, we need to push the config through - // the provider validation code. One of the reasons for this is that - // numbers being serialized through JSON get turned into float64. The - // schema code used in config will convert these back into integers. - // However, before we can create a valid config, we need to make sure - // we copy across fields from the main config that aren't there. - for _, field := range fields { - if _, found := joint[field]; !found { - if baseValue, found := baseMap[field]; found { - joint[field] = baseValue - } - } - } - - cfg, err := em.validConfig(joint) - if err != nil { - return nil, errors.Trace(err) - } - attrs := cfg.AllAttrs() - // Any values that would normally be copied from the state server - // config can also be defined, but if they differ from the state server - // values, an error is returned. - for _, field := range fields { - if value, found := attrs[field]; found { - if serverValue := baseMap[field]; value != serverValue { - return nil, errors.Errorf( - "specified %s \"%v\" does not match apiserver \"%v\"", - field, value, serverValue) - } - } - } - if err := em.checkVersion(attrs); err != nil { - return nil, errors.Trace(err) - } - - // Generate the UUID for the server. - uuid, err := utils.NewUUID() - if err != nil { - return nil, errors.Annotate(err, "failed to generate environment uuid") - } - attrs["uuid"] = uuid.String() - - return em.validConfig(attrs) -} - -// CreateEnvironment creates a new environment using the account and -// environment config specified in the args. -func (em *EnvironmentManagerAPI) CreateEnvironment(args params.EnvironmentCreateArgs) (params.Environment, error) { - result := params.Environment{} - // Get the state server environment first. We need it both for the state - // server owner and the ability to get the config. - stateServerEnv, err := em.state.StateServerEnvironment() - if err != nil { - return result, errors.Trace(err) - } - - ownerTag, err := names.ParseUserTag(args.OwnerTag) - if err != nil { - return result, errors.Trace(err) - } - - // Any user is able to create themselves an environment (until real fine - // grain permissions are available), and admins (the creator of the state - // server environment) are able to create environments for other people. - err = em.authCheck(ownerTag) - if err != nil { - return result, errors.Trace(err) - } - - newConfig, err := em.newEnvironmentConfig(args, stateServerEnv) - if err != nil { - return result, errors.Trace(err) - } - // NOTE: check the agent-version of the config, and if it is > the current - // version, it is not supported, also check existing tools, and if we don't - // have tools for that version, also die. - env, st, err := em.state.NewEnvironment(newConfig, ownerTag) - if err != nil { - return result, errors.Annotate(err, "failed to create new environment") - } - defer st.Close() - - result.Name = env.Name() - result.UUID = env.UUID() - result.OwnerTag = env.Owner().String() - - return result, nil -} - -// ListEnvironments returns the environments that the specified user -// has access to in the current server. Only that state server owner -// can list environments for any user (at this stage). Other users -// can only ask about their own environments. -func (em *EnvironmentManagerAPI) ListEnvironments(user params.Entity) (params.UserEnvironmentList, error) { - result := params.UserEnvironmentList{} - - userTag, err := names.ParseUserTag(user.Tag) - if err != nil { - return result, errors.Trace(err) - } - - err = em.authCheck(userTag) - if err != nil { - return result, errors.Trace(err) - } - - environments, err := em.state.EnvironmentsForUser(userTag) - if err != nil { - return result, errors.Trace(err) - } - - for _, env := range environments { - var lastConn *time.Time - userLastConn, err := env.LastConnection() - if err != nil { - if !state.IsNeverConnectedError(err) { - return result, errors.Trace(err) - } - } else { - lastConn = &userLastConn - } - result.UserEnvironments = append(result.UserEnvironments, params.UserEnvironment{ - Environment: params.Environment{ - Name: env.Name(), - UUID: env.UUID(), - OwnerTag: env.Owner().String(), - }, - LastConnection: lastConn, - }) - logger.Debugf("list env: %s, %s, %s", env.Name(), env.UUID(), env.Owner()) - } - - return result, nil -} === removed file 'src/github.com/juju/juju/apiserver/environmentmanager/environmentmanager_test.go' --- src/github.com/juju/juju/apiserver/environmentmanager/environmentmanager_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/environmentmanager/environmentmanager_test.go 1970-01-01 00:00:00 +0000 @@ -1,379 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environmentmanager_test - -import ( - "github.com/juju/loggo" - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/environmentmanager" - "github.com/juju/juju/apiserver/params" - apiservertesting "github.com/juju/juju/apiserver/testing" - "github.com/juju/juju/environs" - "github.com/juju/juju/environs/config" - jujutesting "github.com/juju/juju/juju/testing" - // Register the providers for the field check test - _ "github.com/juju/juju/provider/azure" - _ "github.com/juju/juju/provider/ec2" - _ "github.com/juju/juju/provider/joyent" - _ "github.com/juju/juju/provider/local" - _ "github.com/juju/juju/provider/maas" - _ "github.com/juju/juju/provider/openstack" - "github.com/juju/juju/state" - coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/version" -) - -type envManagerBaseSuite struct { - jujutesting.JujuConnSuite - - envmanager *environmentmanager.EnvironmentManagerAPI - resources *common.Resources - authoriser apiservertesting.FakeAuthorizer -} - -func (s *envManagerBaseSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - s.resources = common.NewResources() - s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) - - s.authoriser = apiservertesting.FakeAuthorizer{ - Tag: s.AdminUserTag(c), - } - - loggo.GetLogger("juju.apiserver.environmentmanager").SetLogLevel(loggo.TRACE) -} - -func (s *envManagerBaseSuite) setAPIUser(c *gc.C, user names.UserTag) { - s.authoriser.Tag = user - envmanager, err := environmentmanager.NewEnvironmentManagerAPI(s.State, s.resources, s.authoriser) - c.Assert(err, jc.ErrorIsNil) - s.envmanager = envmanager -} - -type envManagerSuite struct { - envManagerBaseSuite -} - -var _ = gc.Suite(&envManagerSuite{}) - -func (s *envManagerSuite) TestNewAPIAcceptsClient(c *gc.C) { - anAuthoriser := s.authoriser - anAuthoriser.Tag = names.NewUserTag("external@remote") - endPoint, err := environmentmanager.NewEnvironmentManagerAPI(s.State, s.resources, anAuthoriser) - c.Assert(err, jc.ErrorIsNil) - c.Assert(endPoint, gc.NotNil) -} - -func (s *envManagerSuite) TestNewAPIRefusesNonClient(c *gc.C) { - anAuthoriser := s.authoriser - anAuthoriser.Tag = names.NewUnitTag("mysql/0") - endPoint, err := environmentmanager.NewEnvironmentManagerAPI(s.State, s.resources, anAuthoriser) - c.Assert(endPoint, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "permission denied") -} - -func (s *envManagerSuite) createArgs(c *gc.C, owner names.UserTag) params.EnvironmentCreateArgs { - return params.EnvironmentCreateArgs{ - OwnerTag: owner.String(), - Account: make(map[string]interface{}), - Config: map[string]interface{}{ - "name": "test-env", - "authorized-keys": "ssh-key", - // And to make it a valid dummy config - "state-server": false, - }, - } -} - -func (s *envManagerSuite) createArgsForVersion(c *gc.C, owner names.UserTag, ver interface{}) params.EnvironmentCreateArgs { - params := s.createArgs(c, owner) - params.Config["agent-version"] = ver - return params -} - -func (s *envManagerSuite) TestUserCanCreateEnvironment(c *gc.C) { - owner := names.NewUserTag("external@remote") - s.setAPIUser(c, owner) - env, err := s.envmanager.CreateEnvironment(s.createArgs(c, owner)) - c.Assert(err, jc.ErrorIsNil) - c.Assert(env.OwnerTag, gc.Equals, owner.String()) - c.Assert(env.Name, gc.Equals, "test-env") -} - -func (s *envManagerSuite) TestAdminCanCreateEnvironmentForSomeoneElse(c *gc.C) { - s.setAPIUser(c, s.AdminUserTag(c)) - owner := names.NewUserTag("external@remote") - env, err := s.envmanager.CreateEnvironment(s.createArgs(c, owner)) - c.Assert(err, jc.ErrorIsNil) - c.Assert(env.OwnerTag, gc.Equals, owner.String()) - c.Assert(env.Name, gc.Equals, "test-env") - // Make sure that the environment created does actually have the correct - // owner, and that owner is actually allowed to use the environment. - newState, err := s.State.ForEnviron(names.NewEnvironTag(env.UUID)) - c.Assert(err, jc.ErrorIsNil) - defer newState.Close() - - newEnv, err := newState.Environment() - c.Assert(err, jc.ErrorIsNil) - c.Assert(newEnv.Owner(), gc.Equals, owner) - _, err = newState.EnvironmentUser(owner) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *envManagerSuite) TestNonAdminCannotCreateEnvironmentForSomeoneElse(c *gc.C) { - s.setAPIUser(c, names.NewUserTag("non-admin@remote")) - owner := names.NewUserTag("external@remote") - _, err := s.envmanager.CreateEnvironment(s.createArgs(c, owner)) - c.Assert(err, gc.ErrorMatches, "permission denied") -} - -func (s *envManagerSuite) TestRestrictedProviderFields(c *gc.C) { - s.setAPIUser(c, names.NewUserTag("non-admin@remote")) - for i, test := range []struct { - provider string - expected []string - }{ - { - provider: "azure", - expected: []string{ - "type", "ca-cert", "state-port", "api-port", "syslog-port", "rsyslog-ca-cert", "rsyslog-ca-key", - "location"}, - }, { - provider: "dummy", - expected: []string{ - "type", "ca-cert", "state-port", "api-port", "syslog-port", "rsyslog-ca-cert", "rsyslog-ca-key"}, - }, { - provider: "joyent", - expected: []string{ - "type", "ca-cert", "state-port", "api-port", "syslog-port", "rsyslog-ca-cert", "rsyslog-ca-key"}, - }, { - provider: "local", - expected: []string{ - "type", "ca-cert", "state-port", "api-port", "syslog-port", "rsyslog-ca-cert", "rsyslog-ca-key", - "container", "network-bridge", "root-dir", "proxy-ssh"}, - }, { - provider: "maas", - expected: []string{ - "type", "ca-cert", "state-port", "api-port", "syslog-port", "rsyslog-ca-cert", "rsyslog-ca-key", - "maas-server"}, - }, { - provider: "openstack", - expected: []string{ - "type", "ca-cert", "state-port", "api-port", "syslog-port", "rsyslog-ca-cert", "rsyslog-ca-key", - "region", "auth-url", "auth-mode"}, - }, { - provider: "ec2", - expected: []string{ - "type", "ca-cert", "state-port", "api-port", "syslog-port", "rsyslog-ca-cert", "rsyslog-ca-key", - "region"}, - }, - } { - c.Logf("%d: %s provider", i, test.provider) - fields, err := environmentmanager.RestrictedProviderFields(s.envmanager, test.provider) - c.Check(err, jc.ErrorIsNil) - c.Check(fields, jc.SameContents, test.expected) - } -} - -func (s *envManagerSuite) TestConfigSkeleton(c *gc.C) { - s.setAPIUser(c, names.NewUserTag("non-admin@remote")) - - _, err := s.envmanager.ConfigSkeleton( - params.EnvironmentSkeletonConfigArgs{Provider: "ec2"}) - c.Check(err, gc.ErrorMatches, `provider value "ec2" not valid`) - _, err = s.envmanager.ConfigSkeleton( - params.EnvironmentSkeletonConfigArgs{Region: "the sun"}) - c.Check(err, gc.ErrorMatches, `region value "the sun" not valid`) - - skeleton, err := s.envmanager.ConfigSkeleton(params.EnvironmentSkeletonConfigArgs{}) - c.Assert(err, jc.ErrorIsNil) - - // The apiPort changes every test run as the dummy provider - // looks for a random open port. - apiPort := s.Environ.Config().APIPort() - - c.Assert(skeleton.Config, jc.DeepEquals, params.EnvironConfig{ - "type": "dummy", - "ca-cert": coretesting.CACert, - "state-port": 1234, - "api-port": apiPort, - "syslog-port": 2345, - }) -} - -func (s *envManagerSuite) TestCreateEnvironmentValidatesConfig(c *gc.C) { - admin := s.AdminUserTag(c) - s.setAPIUser(c, admin) - args := s.createArgs(c, admin) - delete(args.Config, "state-server") - _, err := s.envmanager.CreateEnvironment(args) - c.Assert(err, gc.ErrorMatches, "provider validation failed: state-server: expected bool, got nothing") -} - -func (s *envManagerSuite) TestCreateEnvironmentBadConfig(c *gc.C) { - owner := names.NewUserTag("external@remote") - s.setAPIUser(c, owner) - for i, test := range []struct { - key string - value interface{} - errMatch string - }{ - { - key: "uuid", - value: "anything", - errMatch: `uuid is generated, you cannot specify one`, - }, { - key: "type", - value: "fake", - errMatch: `specified type "fake" does not match apiserver "dummy"`, - }, { - key: "ca-cert", - value: coretesting.OtherCACert, - errMatch: `(?s)specified ca-cert ".*" does not match apiserver ".*"`, - }, { - key: "state-port", - value: 9876, - errMatch: `specified state-port "9876" does not match apiserver "1234"`, - }, { - // The api-port is dynamic, but always in user-space, so > 1024. - key: "api-port", - value: 123, - errMatch: `specified api-port "123" does not match apiserver ".*"`, - }, { - key: "syslog-port", - value: 1234, - errMatch: `specified syslog-port "1234" does not match apiserver "2345"`, - }, { - key: "rsyslog-ca-cert", - value: "some-cert", - errMatch: `specified rsyslog-ca-cert "some-cert" does not match apiserver ".*"`, - }, { - key: "rsyslog-ca-key", - value: "some-key", - errMatch: `specified rsyslog-ca-key "some-key" does not match apiserver ".*"`, - }, - } { - c.Logf("%d: %s", i, test.key) - args := s.createArgs(c, owner) - args.Config[test.key] = test.value - _, err := s.envmanager.CreateEnvironment(args) - c.Assert(err, gc.ErrorMatches, test.errMatch) - - } -} - -func (s *envManagerSuite) TestCreateEnvironmentSameAgentVersion(c *gc.C) { - admin := s.AdminUserTag(c) - s.setAPIUser(c, admin) - args := s.createArgsForVersion(c, admin, version.Current.Number.String()) - _, err := s.envmanager.CreateEnvironment(args) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *envManagerSuite) TestCreateEnvironmentBadAgentVersion(c *gc.C) { - admin := s.AdminUserTag(c) - s.setAPIUser(c, admin) - - bigger := version.Current.Number - bigger.Minor += 1 - - smaller := version.Current.Number - smaller.Minor -= 1 - - for i, test := range []struct { - value interface{} - errMatch string - }{ - { - value: 42, - errMatch: `creating config from values failed: agent-version: expected string, got int\(42\)`, - }, { - value: "not a number", - errMatch: `creating config from values failed: invalid agent version in environment configuration: "not a number"`, - }, { - value: bigger.String(), - errMatch: "agent-version cannot be greater than the server: .*", - }, { - value: smaller.String(), - errMatch: "no tools found for version .*", - }, - } { - c.Logf("test %d", i) - args := s.createArgsForVersion(c, admin, test.value) - _, err := s.envmanager.CreateEnvironment(args) - c.Check(err, gc.ErrorMatches, test.errMatch) - } -} - -func (s *envManagerSuite) TestListEnvironmentsForSelf(c *gc.C) { - user := names.NewUserTag("external@remote") - s.setAPIUser(c, user) - result, err := s.envmanager.ListEnvironments(params.Entity{user.String()}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result.UserEnvironments, gc.HasLen, 0) -} - -func (s *envManagerSuite) TestListEnvironmentsForSelfLocalUser(c *gc.C) { - // When the user's credentials cache stores the simple name, but the - // api server converts it to a fully qualified name. - user := names.NewUserTag("local-user") - s.setAPIUser(c, names.NewUserTag("local-user@local")) - result, err := s.envmanager.ListEnvironments(params.Entity{user.String()}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result.UserEnvironments, gc.HasLen, 0) -} - -func (s *envManagerSuite) checkEnvironmentMatches(c *gc.C, env params.Environment, expected *state.Environment) { - c.Check(env.Name, gc.Equals, expected.Name()) - c.Check(env.UUID, gc.Equals, expected.UUID()) - c.Check(env.OwnerTag, gc.Equals, expected.Owner().String()) -} - -func (s *envManagerSuite) TestListEnvironmentsAdminSelf(c *gc.C) { - user := s.AdminUserTag(c) - s.setAPIUser(c, user) - result, err := s.envmanager.ListEnvironments(params.Entity{user.String()}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result.UserEnvironments, gc.HasLen, 1) - expected, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - s.checkEnvironmentMatches(c, result.UserEnvironments[0].Environment, expected) -} - -func (s *envManagerSuite) TestListEnvironmentsAdminListsOther(c *gc.C) { - user := s.AdminUserTag(c) - s.setAPIUser(c, user) - other := names.NewUserTag("external@remote") - result, err := s.envmanager.ListEnvironments(params.Entity{other.String()}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result.UserEnvironments, gc.HasLen, 0) -} - -func (s *envManagerSuite) TestListEnvironmentsDenied(c *gc.C) { - user := names.NewUserTag("external@remote") - s.setAPIUser(c, user) - other := names.NewUserTag("other@remote") - _, err := s.envmanager.ListEnvironments(params.Entity{other.String()}) - c.Assert(err, gc.ErrorMatches, "permission denied") -} - -type fakeProvider struct { - environs.EnvironProvider -} - -func (*fakeProvider) Validate(cfg, old *config.Config) (*config.Config, error) { - return cfg, nil -} - -func (*fakeProvider) PrepareForCreateEnvironment(cfg *config.Config) (*config.Config, error) { - return cfg, nil -} - -func init() { - environs.RegisterProvider("fake", &fakeProvider{}) -} === removed file 'src/github.com/juju/juju/apiserver/environmentmanager/export_test.go' --- src/github.com/juju/juju/apiserver/environmentmanager/export_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/environmentmanager/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,10 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environmentmanager - -var ConfigValuesFromStateServer = configValuesFromStateServer - -func RestrictedProviderFields(em *EnvironmentManagerAPI, providerType string) ([]string, error) { - return em.restrictedProviderFields(providerType) -} === removed file 'src/github.com/juju/juju/apiserver/environmentmanager/package_test.go' --- src/github.com/juju/juju/apiserver/environmentmanager/package_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/environmentmanager/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environmentmanager_test - -import ( - stdtesting "testing" - - "github.com/juju/juju/testing" -) - -func TestAll(t *stdtesting.T) { - testing.MgoTestPackage(t) -} === removed file 'src/github.com/juju/juju/apiserver/environmentmanager/state.go' --- src/github.com/juju/juju/apiserver/environmentmanager/state.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/environmentmanager/state.go 1970-01-01 00:00:00 +0000 @@ -1,26 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package environmentmanager - -import ( - "github.com/juju/names" - - "github.com/juju/juju/environs/config" - "github.com/juju/juju/state" -) - -var getState = func(st *state.State) stateInterface { - return stateShim{st} -} - -type stateInterface interface { - EnvironmentsForUser(names.UserTag) ([]*state.UserEnvironment, error) - IsSystemAdministrator(user names.UserTag) (bool, error) - NewEnvironment(*config.Config, names.UserTag) (*state.Environment, *state.State, error) - StateServerEnvironment() (*state.Environment, error) -} - -type stateShim struct { - *state.State -} === modified file 'src/github.com/juju/juju/apiserver/export_test.go' --- src/github.com/juju/juju/apiserver/export_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/export_test.go 2016-03-22 15:18:22 +0000 @@ -5,13 +5,16 @@ import ( "fmt" - "reflect" + "net" "time" "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon.v1" + "github.com/juju/juju/apiserver/authentication" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/rpc" @@ -19,18 +22,35 @@ ) var ( - RootType = reflect.TypeOf(&apiHandler{}) - NewPingTimeout = newPingTimeout - MaxClientPingInterval = &maxClientPingInterval - MongoPingInterval = &mongoPingInterval - NewBackups = &newBackups - ParseLogLine = parseLogLine - AgentMatchesFilter = agentMatchesFilter - NewLogTailer = &newLogTailer - + NewPingTimeout = newPingTimeout + MaxClientPingInterval = &maxClientPingInterval + MongoPingInterval = &mongoPingInterval + NewBackups = &newBackups AllowedMethodsDuringUpgrades = allowedMethodsDuringUpgrades ) +func ServerMacaroon(srv *Server) (*macaroon.Macaroon, error) { + auth, err := srv.authCtxt.macaroonAuth() + if err != nil { + return nil, err + } + return auth.(*authentication.MacaroonAuthenticator).Macaroon, nil +} + +func ServerBakeryService(srv *Server) (*bakery.Service, error) { + auth, err := srv.authCtxt.macaroonAuth() + if err != nil { + return nil, err + } + return auth.(*authentication.MacaroonAuthenticator).Service, nil +} + +// ServerAuthenticatorForTag calls the authenticatorForTag method +// of the server's authContext. +func ServerAuthenticatorForTag(srv *Server, tag names.Tag) (authentication.EntityAuthenticator, error) { + return srv.authCtxt.authenticatorForTag(tag) +} + func ApiHandlerWithEntity(entity state.Entity) *apiHandler { return &apiHandler{entity: entity} } @@ -47,9 +67,9 @@ cleanup = func() { doCheckCreds = checkCreds } - delayedCheckCreds := func(st *state.State, c params.LoginRequest, lookForEnvUser bool) (state.Entity, *time.Time, error) { + delayedCheckCreds := func(st *state.State, c params.LoginRequest, lookForModelUser bool, authenticator authentication.EntityAuthenticator) (state.Entity, *time.Time, error) { <-nextChan - return checkCreds(st, c, lookForEnvUser) + return checkCreds(st, c, lookForModelUser, authenticator) } doCheckCreds = delayedCheckCreds return @@ -73,12 +93,12 @@ state: srvSt, tag: names.NewMachineTag("0"), } - h, err := newApiHandler(srv, st, nil, nil, st.EnvironUUID()) + h, err := newApiHandler(srv, st, nil, nil, st.ModelUUID()) c.Assert(err, jc.ErrorIsNil) return h, h.getResources() } -// TestingUpgradingApiHandler returns a limited srvRoot +// TestingUpgradingRoot returns a limited srvRoot // in an upgrade scenario. func TestingUpgradingRoot(st *state.State) rpc.MethodFinder { r := TestingApiRoot(st) @@ -102,11 +122,11 @@ return r, nil } -var PreFacadeEnvironTag = names.NewEnvironTag("383c49f3-526d-4f9e-b50a-1e6fa4e9b3d9") +var PreFacadeModelTag = names.NewModelTag("383c49f3-526d-4f9e-b50a-1e6fa4e9b3d9") func (r *preFacadeAdminApi) Login(c params.Creds) (params.LoginResult, error) { return params.LoginResult{ - EnvironTag: PreFacadeEnvironTag.String(), + ModelTag: PreFacadeModelTag.String(), }, nil } @@ -139,12 +159,8 @@ factories := make(map[int]adminApiFactory) for _, n := range versions { switch n { - case 0: - factories[n] = newAdminApiV0 - case 1: - factories[n] = newAdminApiV1 - case 2: - factories[n] = newAdminApiV2 + case 3: + factories[n] = newAdminApiV3 default: panic(fmt.Errorf("unknown admin API version %d", n)) } @@ -166,12 +182,7 @@ return newAboutToRestoreRoot(r) } -// LogLineAgentTag gives tests access to an internal logFileLine attribute -func (logFileLine *logFileLine) LogLineAgentTag() string { - return logFileLine.agentTag -} - -// LogLineAgentName gives tests access to an internal logFileLine attribute -func (logFileLine *logFileLine) LogLineAgentName() string { - return logFileLine.agentName +// Addr returns the address that the server is listening on. +func (srv *Server) Addr() *net.TCPAddr { + return srv.lis.Addr().(*net.TCPAddr) // cannot fail } === modified file 'src/github.com/juju/juju/apiserver/firewaller/firewaller.go' --- src/github.com/juju/juju/apiserver/firewaller/firewaller.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/firewaller/firewaller.go 2016-03-22 15:18:22 +0000 @@ -16,16 +16,16 @@ func init() { // Version 0 is no longer supported. - common.RegisterStandardFacade("Firewaller", 1, NewFirewallerAPI) + common.RegisterStandardFacade("Firewaller", 2, NewFirewallerAPI) } // FirewallerAPI provides access to the Firewaller API facade. type FirewallerAPI struct { *common.LifeGetter - *common.EnvironWatcher + *common.ModelWatcher *common.AgentEntityWatcher *common.UnitsWatcher - *common.EnvironMachinesWatcher + *common.ModelMachinesWatcher *common.InstanceIdGetter st *state.State @@ -43,12 +43,12 @@ resources *common.Resources, authorizer common.Authorizer, ) (*FirewallerAPI, error) { - if !authorizer.AuthEnvironManager() { + if !authorizer.AuthModelManager() { // Firewaller must run as environment manager. return nil, common.ErrPerm } // Set up the various authorization checkers. - accessEnviron := common.AuthFuncForTagKind(names.EnvironTagKind) + accessEnviron := common.AuthFuncForTagKind(names.ModelTagKind) accessUnit := common.AuthFuncForTagKind(names.UnitTagKind) accessService := common.AuthFuncForTagKind(names.ServiceTagKind) accessMachine := common.AuthFuncForTagKind(names.MachineTagKind) @@ -60,9 +60,9 @@ st, accessUnitServiceOrMachine, ) - // EnvironConfig() and WatchForEnvironConfigChanges() are allowed + // ModelConfig() and WatchForModelConfigChanges() are allowed // with unrestriced access. - environWatcher := common.NewEnvironWatcher( + modelWatcher := common.NewModelWatcher( st, resources, authorizer, @@ -78,8 +78,8 @@ resources, accessMachine, ) - // WatchEnvironMachines() is allowed with unrestricted access. - machinesWatcher := common.NewEnvironMachinesWatcher( + // WatchModelMachines() is allowed with unrestricted access. + machinesWatcher := common.NewModelMachinesWatcher( st, resources, authorizer, @@ -91,19 +91,19 @@ ) return &FirewallerAPI{ - LifeGetter: lifeGetter, - EnvironWatcher: environWatcher, - AgentEntityWatcher: entityWatcher, - UnitsWatcher: unitsWatcher, - EnvironMachinesWatcher: machinesWatcher, - InstanceIdGetter: instanceIdGetter, - st: st, - resources: resources, - authorizer: authorizer, - accessUnit: accessUnit, - accessService: accessService, - accessMachine: accessMachine, - accessEnviron: accessEnviron, + LifeGetter: lifeGetter, + ModelWatcher: modelWatcher, + AgentEntityWatcher: entityWatcher, + UnitsWatcher: unitsWatcher, + ModelMachinesWatcher: machinesWatcher, + InstanceIdGetter: instanceIdGetter, + st: st, + resources: resources, + authorizer: authorizer, + accessUnit: accessUnit, + accessService: accessService, + accessMachine: accessMachine, + accessEnviron: accessEnviron, }, nil } === modified file 'src/github.com/juju/juju/apiserver/firewaller/firewaller_base_test.go' --- src/github.com/juju/juju/apiserver/firewaller/firewaller_base_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/firewaller/firewaller_base_test.go 2016-03-22 15:18:22 +0000 @@ -176,15 +176,15 @@ }) } -func (s *firewallerBaseSuite) testWatchEnvironMachines( +func (s *firewallerBaseSuite) testWatchModelMachines( c *gc.C, facade interface { - WatchEnvironMachines() (params.StringsWatchResult, error) + WatchModelMachines() (params.StringsWatchResult, error) }, ) { c.Assert(s.resources.Count(), gc.Equals, 0) - got, err := facade.WatchEnvironMachines() + got, err := facade.WatchModelMachines() c.Assert(err, jc.ErrorIsNil) want := params.StringsWatchResult{ StringsWatcherId: "1", === modified file 'src/github.com/juju/juju/apiserver/firewaller/firewaller_test.go' --- src/github.com/juju/juju/apiserver/firewaller/firewaller_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/firewaller/firewaller_test.go 2016-03-22 15:18:22 +0000 @@ -22,7 +22,7 @@ type firewallerSuite struct { firewallerBaseSuite - *commontesting.EnvironWatcherTest + *commontesting.ModelWatcherTest firewaller *firewaller.FirewallerAPI } @@ -40,7 +40,7 @@ ) c.Assert(err, jc.ErrorIsNil) s.firewaller = firewallerAPI - s.EnvironWatcherTest = commontesting.NewEnvironWatcherTest(s.firewaller, s.State, s.resources, commontesting.HasSecrets) + s.ModelWatcherTest = commontesting.NewModelWatcherTest(s.firewaller, s.State, s.resources, commontesting.HasSecrets) } func (s *firewallerSuite) TestFirewallerFailsWithNonEnvironManagerUser(c *gc.C) { @@ -59,8 +59,8 @@ s.testInstanceId(c, s.firewaller) } -func (s *firewallerSuite) TestWatchEnvironMachines(c *gc.C) { - s.testWatchEnvironMachines(c, s.firewaller) +func (s *firewallerSuite) TestWatchModelMachines(c *gc.C) { + s.testWatchModelMachines(c, s.firewaller) } func (s *firewallerSuite) TestWatch(c *gc.C) { @@ -102,7 +102,7 @@ "2:juju-public", } - fakeEnvTag := names.NewEnvironTag("deadbeef-deaf-face-feed-0123456789ab") + fakeEnvTag := names.NewModelTag("deadbeef-deaf-face-feed-0123456789ab") args := addFakeEntities(params.Entities{Entities: []params.Entity{ {Tag: fakeEnvTag.String()}, {Tag: s.machines[0].Tag().String()}, === modified file 'src/github.com/juju/juju/apiserver/highavailability/highavailability.go' --- src/github.com/juju/juju/apiserver/highavailability/highavailability.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/highavailability/highavailability.go 2016-03-22 15:18:22 +0000 @@ -15,12 +15,12 @@ ) func init() { - common.RegisterStandardFacade("HighAvailability", 1, NewHighAvailabilityAPI) + common.RegisterStandardFacade("HighAvailability", 2, NewHighAvailabilityAPI) } // HighAvailability defines the methods on the highavailability API end point. type HighAvailability interface { - EnsureAvailability(args params.StateServersSpecs) (params.StateServersChangeResults, error) + EnableHA(args params.ControllersSpecs) (params.ControllersChangeResults, error) } // HighAvailabilityAPI implements the HighAvailability interface and is the concrete @@ -36,7 +36,7 @@ // NewHighAvailabilityAPI creates a new server-side highavailability API end point. func NewHighAvailabilityAPI(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*HighAvailabilityAPI, error) { // Only clients and environment managers can access the high availability service. - if !authorizer.AuthClient() && !authorizer.AuthEnvironManager() { + if !authorizer.AuthClient() && !authorizer.AuthModelManager() { return nil, common.ErrPerm } return &HighAvailabilityAPI{ @@ -46,10 +46,10 @@ }, nil } -func (api *HighAvailabilityAPI) EnsureAvailability(args params.StateServersSpecs) (params.StateServersChangeResults, error) { - results := params.StateServersChangeResults{Results: make([]params.StateServersChangeResult, len(args.Specs))} - for i, stateServersSpec := range args.Specs { - result, err := EnsureAvailabilitySingle(api.state, stateServersSpec) +func (api *HighAvailabilityAPI) EnableHA(args params.ControllersSpecs) (params.ControllersChangeResults, error) { + results := params.ControllersChangeResults{Results: make([]params.ControllersChangeResult, len(args.Specs))} + for i, controllersServersSpec := range args.Specs { + result, err := EnableHASingle(api.state, controllersServersSpec) results.Results[i].Result = result results.Results[i].Error = common.ServerError(err) } @@ -65,9 +65,9 @@ return result } -// Generate a StateServersChanges structure. -func stateServersChanges(change state.StateServersChanges) params.StateServersChanges { - return params.StateServersChanges{ +// Generate a ControllersChanges structure. +func controllersChanges(change state.ControllersChanges) params.ControllersChanges { + return params.ControllersChanges{ Added: machineIdsToTags(change.Added...), Maintained: machineIdsToTags(change.Maintained...), Removed: machineIdsToTags(change.Removed...), @@ -77,33 +77,33 @@ } } -// EnsureAvailabilitySingle applies a single StateServersSpec specification to the current environment. +// EnableHASingle applies a single ControllersServersSpec specification to the current environment. // Exported so it can be called by the legacy client API in the client package. -func EnsureAvailabilitySingle(st *state.State, spec params.StateServersSpec) (params.StateServersChanges, error) { - if !st.IsStateServer() { - return params.StateServersChanges{}, errors.New("unsupported with hosted environments") +func EnableHASingle(st *state.State, spec params.ControllersSpec) (params.ControllersChanges, error) { + if !st.IsController() { + return params.ControllersChanges{}, errors.New("unsupported with hosted models") } // Check if changes are allowed and the command may proceed. blockChecker := common.NewBlockChecker(st) if err := blockChecker.ChangeAllowed(); err != nil { - return params.StateServersChanges{}, errors.Trace(err) + return params.ControllersChanges{}, errors.Trace(err) } // Validate the environment tag if present. - if spec.EnvironTag != "" { - tag, err := names.ParseEnvironTag(spec.EnvironTag) + if spec.ModelTag != "" { + tag, err := names.ParseModelTag(spec.ModelTag) if err != nil { - return params.StateServersChanges{}, errors.Errorf("invalid environment tag: %v", err) + return params.ControllersChanges{}, errors.Errorf("invalid model tag: %v", err) } if _, err := st.FindEntity(tag); err != nil { - return params.StateServersChanges{}, err + return params.ControllersChanges{}, err } } series := spec.Series if series == "" { - ssi, err := st.StateServerInfo() + ssi, err := st.ControllerInfo() if err != nil { - return params.StateServersChanges{}, err + return params.ControllersChanges{}, err } // We should always have at least one voting machine @@ -112,17 +112,49 @@ // the first one, then they'll stay in sync. if len(ssi.VotingMachineIds) == 0 { // Better than a panic()? - return params.StateServersChanges{}, fmt.Errorf("internal error, failed to find any voting machines") + return params.ControllersChanges{}, fmt.Errorf("internal error, failed to find any voting machines") } templateMachine, err := st.Machine(ssi.VotingMachineIds[0]) if err != nil { - return params.StateServersChanges{}, err + return params.ControllersChanges{}, err } series = templateMachine.Series() } - changes, err := st.EnsureAvailability(spec.NumStateServers, spec.Constraints, series, spec.Placement) - if err != nil { - return params.StateServersChanges{}, err - } - return stateServersChanges(changes), nil + changes, err := st.EnableHA(spec.NumControllers, spec.Constraints, series, spec.Placement) + if err != nil { + return params.ControllersChanges{}, err + } + return controllersChanges(changes), nil +} + +// StopHAReplicationForUpgrade will prompt the HA cluster to enter upgrade +// mongo mode. +func (api *HighAvailabilityAPI) StopHAReplicationForUpgrade(args params.UpgradeMongoParams) (params.MongoUpgradeResults, error) { + ha, err := api.state.SetUpgradeMongoMode(args.Target) + if err != nil { + return params.MongoUpgradeResults{}, errors.Annotate(err, "cannot stop HA for ugprade") + } + members := make([]params.HAMember, len(ha.Members)) + for i, m := range ha.Members { + members[i] = params.HAMember{ + Tag: m.Tag, + PublicAddress: m.PublicAddress, + Series: m.Series, + } + } + return params.MongoUpgradeResults{ + Master: params.HAMember{ + Tag: ha.Master.Tag, + PublicAddress: ha.Master.PublicAddress, + Series: ha.Master.Series, + }, + Members: members, + RsMembers: ha.RsMembers, + }, nil +} + +// ResumeHAReplicationAfterUpgrade will add the upgraded members of HA +// cluster to the upgraded master. +func (api *HighAvailabilityAPI) ResumeHAReplicationAfterUpgrade(args params.ResumeReplicationParams) error { + return api.state.ResumeReplication(args.Members) } === modified file 'src/github.com/juju/juju/apiserver/highavailability/highavailability_test.go' --- src/github.com/juju/juju/apiserver/highavailability/highavailability_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/highavailability/highavailability_test.go 2016-03-22 15:18:22 +0000 @@ -67,9 +67,9 @@ s.haServer, err = highavailability.NewHighAvailabilityAPI(s.State, s.resources, s.authoriser) c.Assert(err, jc.ErrorIsNil) - _, err = s.State.AddMachine("quantal", state.JobManageEnviron) + _, err = s.State.AddMachine("quantal", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) - // We have to ensure the agents are alive, or EnsureAvailability will + // We have to ensure the agents are alive, or EnableHA will // create more to replace them. s.pingers = []*presence.Pinger{s.setAgentPresence(c, "0")} s.BlockHelper = commontesting.NewBlockHelper(s.APIState) @@ -94,23 +94,23 @@ return pinger } -func (s *clientSuite) ensureAvailability( - c *gc.C, numStateServers int, cons constraints.Value, series string, placement []string, -) (params.StateServersChanges, error) { - return ensureAvailability(c, s.haServer, numStateServers, cons, series, placement) +func (s *clientSuite) enableHA( + c *gc.C, numControllers int, cons constraints.Value, series string, placement []string, +) (params.ControllersChanges, error) { + return enableHA(c, s.haServer, numControllers, cons, series, placement) } -func ensureAvailability( - c *gc.C, haServer *highavailability.HighAvailabilityAPI, numStateServers int, cons constraints.Value, series string, placement []string, -) (params.StateServersChanges, error) { - arg := params.StateServersSpecs{ - Specs: []params.StateServersSpec{{ - NumStateServers: numStateServers, - Constraints: cons, - Series: series, - Placement: placement, +func enableHA( + c *gc.C, haServer *highavailability.HighAvailabilityAPI, numControllers int, cons constraints.Value, series string, placement []string, +) (params.ControllersChanges, error) { + arg := params.ControllersSpecs{ + Specs: []params.ControllersSpec{{ + NumControllers: numControllers, + Constraints: cons, + Series: series, + Placement: placement, }}} - results, err := haServer.EnsureAvailability(arg) + results, err := haServer.EnableHA(arg) c.Assert(err, jc.ErrorIsNil) c.Assert(results.Results, gc.HasLen, 1) result := results.Results[0] @@ -123,18 +123,18 @@ return result.Result, err } -func (s *clientSuite) TestEnsureAvailabilitySeries(c *gc.C) { +func (s *clientSuite) TestEnableHASeries(c *gc.C) { machines, err := s.State.AllMachines() c.Assert(err, jc.ErrorIsNil) c.Assert(machines, gc.HasLen, 1) c.Assert(machines[0].Series(), gc.Equals, "quantal") - ensureAvailabilityResult, err := s.ensureAvailability(c, 3, emptyCons, defaultSeries, nil) + enableHAResult, err := s.enableHA(c, 3, emptyCons, defaultSeries, nil) c.Assert(err, jc.ErrorIsNil) - c.Assert(ensureAvailabilityResult.Maintained, gc.DeepEquals, []string{"machine-0"}) - c.Assert(ensureAvailabilityResult.Added, gc.DeepEquals, []string{"machine-1", "machine-2"}) - c.Assert(ensureAvailabilityResult.Removed, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Converted, gc.HasLen, 0) + c.Assert(enableHAResult.Maintained, gc.DeepEquals, []string{"machine-0"}) + c.Assert(enableHAResult.Added, gc.DeepEquals, []string{"machine-1", "machine-2"}) + c.Assert(enableHAResult.Removed, gc.HasLen, 0) + c.Assert(enableHAResult.Converted, gc.HasLen, 0) machines, err = s.State.AllMachines() c.Assert(err, jc.ErrorIsNil) @@ -149,12 +149,12 @@ pingerC := s.setAgentPresence(c, "2") defer assertKill(c, pingerC) - ensureAvailabilityResult, err = s.ensureAvailability(c, 5, emptyCons, "non-default", nil) + enableHAResult, err = s.enableHA(c, 5, emptyCons, "non-default", nil) c.Assert(err, jc.ErrorIsNil) - c.Assert(ensureAvailabilityResult.Maintained, gc.DeepEquals, []string{"machine-0", "machine-1", "machine-2"}) - c.Assert(ensureAvailabilityResult.Added, gc.DeepEquals, []string{"machine-3", "machine-4"}) - c.Assert(ensureAvailabilityResult.Removed, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Converted, gc.HasLen, 0) + c.Assert(enableHAResult.Maintained, gc.DeepEquals, []string{"machine-0", "machine-1", "machine-2"}) + c.Assert(enableHAResult.Added, gc.DeepEquals, []string{"machine-3", "machine-4"}) + c.Assert(enableHAResult.Removed, gc.HasLen, 0) + c.Assert(enableHAResult.Converted, gc.HasLen, 0) c.Assert(err, jc.ErrorIsNil) machines, err = s.State.AllMachines() @@ -167,13 +167,13 @@ c.Assert(machines[4].Series(), gc.Equals, "non-default") } -func (s *clientSuite) TestEnsureAvailabilityConstraints(c *gc.C) { - ensureAvailabilityResult, err := s.ensureAvailability(c, 3, constraints.MustParse("mem=4G"), defaultSeries, nil) +func (s *clientSuite) TestEnableHAConstraints(c *gc.C) { + enableHAResult, err := s.enableHA(c, 3, constraints.MustParse("mem=4G"), defaultSeries, nil) c.Assert(err, jc.ErrorIsNil) - c.Assert(ensureAvailabilityResult.Maintained, gc.DeepEquals, []string{"machine-0"}) - c.Assert(ensureAvailabilityResult.Added, gc.DeepEquals, []string{"machine-1", "machine-2"}) - c.Assert(ensureAvailabilityResult.Removed, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Converted, gc.HasLen, 0) + c.Assert(enableHAResult.Maintained, gc.DeepEquals, []string{"machine-0"}) + c.Assert(enableHAResult.Added, gc.DeepEquals, []string{"machine-1", "machine-2"}) + c.Assert(enableHAResult.Removed, gc.HasLen, 0) + c.Assert(enableHAResult.Converted, gc.HasLen, 0) machines, err := s.State.AllMachines() c.Assert(err, jc.ErrorIsNil) @@ -190,31 +190,31 @@ } } -func (s *clientSuite) TestBlockEnsureAvailability(c *gc.C) { +func (s *clientSuite) TestBlockMakeHA(c *gc.C) { // Block all changes. - s.BlockAllChanges(c, "TestBlockEnsureAvailability") - - ensureAvailabilityResult, err := s.ensureAvailability(c, 3, constraints.MustParse("mem=4G"), defaultSeries, nil) - s.AssertBlocked(c, err, "TestBlockEnsureAvailability") - - c.Assert(ensureAvailabilityResult.Maintained, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Added, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Removed, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Converted, gc.HasLen, 0) + s.BlockAllChanges(c, "TestBlockEnableHA") + + enableHAResult, err := s.enableHA(c, 3, constraints.MustParse("mem=4G"), defaultSeries, nil) + s.AssertBlocked(c, err, "TestBlockEnableHA") + + c.Assert(enableHAResult.Maintained, gc.HasLen, 0) + c.Assert(enableHAResult.Added, gc.HasLen, 0) + c.Assert(enableHAResult.Removed, gc.HasLen, 0) + c.Assert(enableHAResult.Converted, gc.HasLen, 0) machines, err := s.State.AllMachines() c.Assert(err, jc.ErrorIsNil) c.Assert(machines, gc.HasLen, 1) } -func (s *clientSuite) TestEnsureAvailabilityPlacement(c *gc.C) { +func (s *clientSuite) TestEnableHAPlacement(c *gc.C) { placement := []string{"valid"} - ensureAvailabilityResult, err := s.ensureAvailability(c, 3, constraints.MustParse("mem=4G"), defaultSeries, placement) + enableHAResult, err := s.enableHA(c, 3, constraints.MustParse("mem=4G"), defaultSeries, placement) c.Assert(err, jc.ErrorIsNil) - c.Assert(ensureAvailabilityResult.Maintained, gc.DeepEquals, []string{"machine-0"}) - c.Assert(ensureAvailabilityResult.Added, gc.DeepEquals, []string{"machine-1", "machine-2"}) - c.Assert(ensureAvailabilityResult.Removed, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Converted, gc.HasLen, 0) + c.Assert(enableHAResult.Maintained, gc.DeepEquals, []string{"machine-0"}) + c.Assert(enableHAResult.Added, gc.DeepEquals, []string{"machine-1", "machine-2"}) + c.Assert(enableHAResult.Removed, gc.HasLen, 0) + c.Assert(enableHAResult.Converted, gc.HasLen, 0) machines, err := s.State.AllMachines() c.Assert(err, jc.ErrorIsNil) @@ -233,7 +233,7 @@ } } -func (s *clientSuite) TestEnsureAvailabilityPlacementTo(c *gc.C) { +func (s *clientSuite) TestEnableHAPlacementTo(c *gc.C) { _, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) s.pingers = append(s.pingers, s.setAgentPresence(c, "1")) @@ -243,12 +243,12 @@ s.pingers = append(s.pingers, s.setAgentPresence(c, "2")) placement := []string{"1", "2"} - ensureAvailabilityResult, err := s.ensureAvailability(c, 3, emptyCons, defaultSeries, placement) + enableHAResult, err := s.enableHA(c, 3, emptyCons, defaultSeries, placement) c.Assert(err, jc.ErrorIsNil) - c.Assert(ensureAvailabilityResult.Maintained, gc.DeepEquals, []string{"machine-0"}) - c.Assert(ensureAvailabilityResult.Added, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Removed, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Converted, gc.DeepEquals, []string{"machine-1", "machine-2"}) + c.Assert(enableHAResult.Maintained, gc.DeepEquals, []string{"machine-0"}) + c.Assert(enableHAResult.Added, gc.HasLen, 0) + c.Assert(enableHAResult.Removed, gc.HasLen, 0) + c.Assert(enableHAResult.Converted, gc.DeepEquals, []string{"machine-1", "machine-2"}) machines, err := s.State.AllMachines() c.Assert(err, jc.ErrorIsNil) @@ -263,15 +263,15 @@ } } -func (s *clientSuite) TestEnsureAvailability0Preserves(c *gc.C) { +func (s *clientSuite) TestEnableHA0Preserves(c *gc.C) { // A value of 0 says either "if I'm not HA, make me HA" or "preserve my // current HA settings". - ensureAvailabilityResult, err := s.ensureAvailability(c, 0, emptyCons, defaultSeries, nil) + enableHAResult, err := s.enableHA(c, 0, emptyCons, defaultSeries, nil) c.Assert(err, jc.ErrorIsNil) - c.Assert(ensureAvailabilityResult.Maintained, gc.DeepEquals, []string{"machine-0"}) - c.Assert(ensureAvailabilityResult.Added, gc.DeepEquals, []string{"machine-1", "machine-2"}) - c.Assert(ensureAvailabilityResult.Removed, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Converted, gc.HasLen, 0) + c.Assert(enableHAResult.Maintained, gc.DeepEquals, []string{"machine-0"}) + c.Assert(enableHAResult.Added, gc.DeepEquals, []string{"machine-1", "machine-2"}) + c.Assert(enableHAResult.Removed, gc.HasLen, 0) + c.Assert(enableHAResult.Converted, gc.HasLen, 0) machines, err := s.State.AllMachines() c.Assert(machines, gc.HasLen, 3) @@ -280,27 +280,27 @@ defer assertKill(c, pingerB) // Now, we keep agent 1 alive, but not agent 2, calling - // EnsureAvailability(0) again will cause us to start another machine - ensureAvailabilityResult, err = s.ensureAvailability(c, 0, emptyCons, defaultSeries, nil) + // EnableHA(0) again will cause us to start another machine + enableHAResult, err = s.enableHA(c, 0, emptyCons, defaultSeries, nil) c.Assert(err, jc.ErrorIsNil) - c.Assert(ensureAvailabilityResult.Maintained, gc.DeepEquals, []string{"machine-0", "machine-1"}) - c.Assert(ensureAvailabilityResult.Added, gc.DeepEquals, []string{"machine-3"}) - c.Assert(ensureAvailabilityResult.Removed, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Converted, gc.HasLen, 0) + c.Assert(enableHAResult.Maintained, gc.DeepEquals, []string{"machine-0", "machine-1"}) + c.Assert(enableHAResult.Added, gc.DeepEquals, []string{"machine-3"}) + c.Assert(enableHAResult.Removed, gc.HasLen, 0) + c.Assert(enableHAResult.Converted, gc.HasLen, 0) machines, err = s.State.AllMachines() c.Assert(err, jc.ErrorIsNil) c.Assert(machines, gc.HasLen, 4) } -func (s *clientSuite) TestEnsureAvailability0Preserves5(c *gc.C) { +func (s *clientSuite) TestEnableHA0Preserves5(c *gc.C) { // Start off with 5 servers - ensureAvailabilityResult, err := s.ensureAvailability(c, 5, emptyCons, defaultSeries, nil) + enableHAResult, err := s.enableHA(c, 5, emptyCons, defaultSeries, nil) c.Assert(err, jc.ErrorIsNil) - c.Assert(ensureAvailabilityResult.Maintained, gc.DeepEquals, []string{"machine-0"}) - c.Assert(ensureAvailabilityResult.Added, gc.DeepEquals, []string{"machine-1", "machine-2", "machine-3", "machine-4"}) - c.Assert(ensureAvailabilityResult.Removed, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Converted, gc.HasLen, 0) + c.Assert(enableHAResult.Maintained, gc.DeepEquals, []string{"machine-0"}) + c.Assert(enableHAResult.Added, gc.DeepEquals, []string{"machine-1", "machine-2", "machine-3", "machine-4"}) + c.Assert(enableHAResult.Removed, gc.HasLen, 0) + c.Assert(enableHAResult.Converted, gc.HasLen, 0) machines, err := s.State.AllMachines() c.Assert(machines, gc.HasLen, 5) @@ -313,48 +313,48 @@ pingerD := s.setAgentPresence(c, "3") defer assertKill(c, pingerD) // Keeping all alive but one, will bring up 1 more server to preserve 5 - ensureAvailabilityResult, err = s.ensureAvailability(c, 0, emptyCons, defaultSeries, nil) + enableHAResult, err = s.enableHA(c, 0, emptyCons, defaultSeries, nil) c.Assert(err, jc.ErrorIsNil) - c.Assert(ensureAvailabilityResult.Maintained, gc.DeepEquals, []string{"machine-0", "machine-1", + c.Assert(enableHAResult.Maintained, gc.DeepEquals, []string{"machine-0", "machine-1", "machine-2", "machine-3"}) - c.Assert(ensureAvailabilityResult.Added, gc.DeepEquals, []string{"machine-5"}) - c.Assert(ensureAvailabilityResult.Removed, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Converted, gc.HasLen, 0) + c.Assert(enableHAResult.Added, gc.DeepEquals, []string{"machine-5"}) + c.Assert(enableHAResult.Removed, gc.HasLen, 0) + c.Assert(enableHAResult.Converted, gc.HasLen, 0) machines, err = s.State.AllMachines() c.Assert(machines, gc.HasLen, 6) c.Assert(err, jc.ErrorIsNil) } -func (s *clientSuite) TestEnsureAvailabilityErrors(c *gc.C) { - ensureAvailabilityResult, err := s.ensureAvailability(c, -1, emptyCons, defaultSeries, nil) - c.Assert(err, gc.ErrorMatches, "number of state servers must be odd and non-negative") +func (s *clientSuite) TestEnableHAErrors(c *gc.C) { + enableHAResult, err := s.enableHA(c, -1, emptyCons, defaultSeries, nil) + c.Assert(err, gc.ErrorMatches, "number of controllers must be odd and non-negative") - ensureAvailabilityResult, err = s.ensureAvailability(c, 3, emptyCons, defaultSeries, nil) + enableHAResult, err = s.enableHA(c, 3, emptyCons, defaultSeries, nil) c.Assert(err, jc.ErrorIsNil) - c.Assert(ensureAvailabilityResult.Maintained, gc.DeepEquals, []string{"machine-0"}) - c.Assert(ensureAvailabilityResult.Added, gc.DeepEquals, []string{"machine-1", "machine-2"}) - c.Assert(ensureAvailabilityResult.Removed, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Converted, gc.HasLen, 0) + c.Assert(enableHAResult.Maintained, gc.DeepEquals, []string{"machine-0"}) + c.Assert(enableHAResult.Added, gc.DeepEquals, []string{"machine-1", "machine-2"}) + c.Assert(enableHAResult.Removed, gc.HasLen, 0) + c.Assert(enableHAResult.Converted, gc.HasLen, 0) - _, err = s.ensureAvailability(c, 1, emptyCons, defaultSeries, nil) - c.Assert(err, gc.ErrorMatches, "failed to create new state server machines: cannot reduce state server count") + _, err = s.enableHA(c, 1, emptyCons, defaultSeries, nil) + c.Assert(err, gc.ErrorMatches, "failed to create new controller machines: cannot reduce controller count") } -func (s *clientSuite) TestEnsureAvailabilityHostedEnvErrors(c *gc.C) { - st2 := s.Factory.MakeEnvironment(c, &factory.EnvParams{ConfigAttrs: coretesting.Attrs{"state-server": false}}) +func (s *clientSuite) TestEnableHAHostedEnvErrors(c *gc.C) { + st2 := s.Factory.MakeModel(c, &factory.ModelParams{ConfigAttrs: coretesting.Attrs{"controller": false}}) defer st2.Close() haServer, err := highavailability.NewHighAvailabilityAPI(st2, s.resources, s.authoriser) c.Assert(err, jc.ErrorIsNil) - ensureAvailabilityResult, err := ensureAvailability(c, haServer, 3, constraints.MustParse("mem=4G"), defaultSeries, nil) - c.Assert(errors.Cause(err), gc.ErrorMatches, "unsupported with hosted environments") + enableHAResult, err := enableHA(c, haServer, 3, constraints.MustParse("mem=4G"), defaultSeries, nil) + c.Assert(errors.Cause(err), gc.ErrorMatches, "unsupported with hosted models") - c.Assert(ensureAvailabilityResult.Maintained, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Added, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Removed, gc.HasLen, 0) - c.Assert(ensureAvailabilityResult.Converted, gc.HasLen, 0) + c.Assert(enableHAResult.Maintained, gc.HasLen, 0) + c.Assert(enableHAResult.Added, gc.HasLen, 0) + c.Assert(enableHAResult.Removed, gc.HasLen, 0) + c.Assert(enableHAResult.Converted, gc.HasLen, 0) machines, err := st2.AllMachines() c.Assert(err, jc.ErrorIsNil) === removed directory 'src/github.com/juju/juju/apiserver/http' === removed file 'src/github.com/juju/juju/apiserver/http/http.go' --- src/github.com/juju/juju/apiserver/http/http.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/http/http.go 1970-01-01 00:00:00 +0000 @@ -1,27 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package http - -// DigestAlgorithm is one of the values in the IANA registry. See -// RFC 3230 and 5843. -type DigestAlgorithm string - -const ( - // DigestSHA is the HTTP digest algorithm value used in juju's HTTP code. - DigestSHA DigestAlgorithm = "SHA" - - // The values used for content-type in juju's direct HTTP code: - - // CTypeJSON is the HTTP content-type value used for JSON content. - CTypeJSON = "application/json" - - // CTypeRaw is the HTTP content-type value used for raw, unformattedcontent. - CTypeRaw = "application/octet-stream" - - // CTypeJS is the HTTP content-type value used for javascript. - CTypeJS = "application/javascript" - - // CTypeXJS is the outdated HTTP content-type value used for javascript. - CTypeXJS = "application/x-javascript" -) === removed file 'src/github.com/juju/juju/apiserver/http/package_test.go' --- src/github.com/juju/juju/apiserver/http/package_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/http/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package http_test - -import ( - "testing" - - gc "gopkg.in/check.v1" -) - -func TestPackage(t *testing.T) { - gc.TestingT(t) -} === removed file 'src/github.com/juju/juju/apiserver/http/request.go' --- src/github.com/juju/juju/apiserver/http/request.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/http/request.go 1970-01-01 00:00:00 +0000 @@ -1,148 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package http - -import ( - "bytes" - "encoding/json" - "io" - "io/ioutil" - "mime" - "mime/multipart" - "net/http" - "net/textproto" - "net/url" - "path" - "strings" - - "github.com/juju/errors" -) - -// NewRequest returns a new HTTP request suitable for the API. -func NewRequest(method string, baseURL *url.URL, pth, uuid, tag, pw string) (*http.Request, error) { - baseURL.Path = path.Join("/environment", uuid, pth) - - req, err := http.NewRequest(method, baseURL.String(), nil) - if err != nil { - return nil, errors.Annotate(err, "while building HTTP request") - } - - req.SetBasicAuth(tag, pw) - return req, nil -} - -// SetRequestArgs JSON-encodes the args and sets them as the request body. -func SetRequestArgs(req *http.Request, args interface{}) error { - data, err := json.Marshal(args) - if err != nil { - return errors.Annotate(err, "while serializing args") - } - - req.Header.Set("Content-Type", CTypeJSON) - req.Body = ioutil.NopCloser(bytes.NewBuffer(data)) - return nil -} - -// AttachToRequest attaches a reader's data to the request body as -// multi-part data, along with associated metadata. "name" is used to -// identify the attached "file", so a filename is an appropriate value. -func AttachToRequest(req *http.Request, attached io.Reader, meta interface{}, name string) error { - var parts bytes.Buffer - - // Set up the multi-part portion of the body. - writer := multipart.NewWriter(&parts) - req.Header.Set("Content-Type", writer.FormDataContentType()) - - // Initialize the request body. - req.Body = ioutil.NopCloser(io.MultiReader( - &parts, - attached, - bytes.NewBufferString("\r\n--"+writer.Boundary()+"--\r\n"), - )) - - // Set the metadata part. - header := make(textproto.MIMEHeader) - header.Set("Content-Disposition", `form-data; name="metadata"`) - header.Set("Content-Type", CTypeJSON) - part, err := writer.CreatePart(header) - if err != nil { - return errors.Trace(err) - } - if err := json.NewEncoder(part).Encode(meta); err != nil { - return errors.Trace(err) - } - - // Set the attached part. - _, err = writer.CreateFormFile("attached", name) - if err != nil { - return errors.Trace(err) - } - // We don't actually write the reader's data to the part. Instead We - // use a chained reader to facilitate streaming directly from the - // reader. - return nil -} - -// ExtractRequestAttachment extracts the attached file and its metadata -// from the multipart data in the request. -func ExtractRequestAttachment(req *http.Request, metaResult interface{}) (io.ReadCloser, error) { - ctype := req.Header.Get("Content-Type") - mediaType, cParams, err := mime.ParseMediaType(ctype) - if err != nil { - return nil, errors.Annotate(err, "while parsing content type header") - } - - if !strings.HasPrefix(mediaType, "multipart/") { - return nil, errors.Errorf("expected multipart Content-Type, got %q", mediaType) - } - reader := multipart.NewReader(req.Body, cParams["boundary"]) - - // Extract the metadata. - part, err := reader.NextPart() - if err != nil { - if err == io.EOF { - return nil, errors.New("missing metadata") - } - return nil, errors.Trace(err) - } - - if err := checkContentType(part.Header, CTypeJSON); err != nil { - return nil, errors.Trace(err) - } - if err := json.NewDecoder(part).Decode(metaResult); err != nil { - return nil, errors.Trace(err) - } - - // Extract the archive. - part, err = reader.NextPart() - if err != nil { - if err == io.EOF { - return nil, errors.New("missing archive") - } - return nil, errors.Trace(err) - } - if err := checkContentType(part.Header, CTypeRaw); err != nil { - return nil, errors.Trace(err) - } - // We're not going to worry about verifying that the file matches the - // metadata (e.g. size, checksum). - archive := part - - // We are going to trust that there aren't any more attachments after - // the file. If there are, we ignore them. - - return archive, nil -} - -type getter interface { - Get(string) string -} - -func checkContentType(header getter, expected string) error { - ctype := header.Get("Content-Type") - if ctype != expected { - return errors.Errorf("expected Content-Type %q, got %q", expected, ctype) - } - return nil -} === removed file 'src/github.com/juju/juju/apiserver/http/request_test.go' --- src/github.com/juju/juju/apiserver/http/request_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/http/request_test.go 1970-01-01 00:00:00 +0000 @@ -1,33 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package http_test - -import ( - "net/url" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - apihttp "github.com/juju/juju/apiserver/http" - apihttptesting "github.com/juju/juju/apiserver/http/testing" - "github.com/juju/juju/testing" -) - -type requestSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&requestSuite{}) - -func (s *requestSuite) TestNewRequestSuccess(c *gc.C) { - baseURL, err := url.Parse("https://localhost:8080/") - c.Assert(err, jc.ErrorIsNil) - uuid := "abcd-efedcb-012345-6789" - tag := "machine-0" - pw := "secure" - req, err := apihttp.NewRequest("GET", baseURL, "somefacade", uuid, tag, pw) - c.Assert(err, jc.ErrorIsNil) - - apihttptesting.CheckRequest(c, req, "GET", tag, pw, "localhost", "somefacade") -} === removed file 'src/github.com/juju/juju/apiserver/http/response.go' --- src/github.com/juju/juju/apiserver/http/response.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/http/response.go 1970-01-01 00:00:00 +0000 @@ -1,63 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package http - -import ( - "encoding/json" - "io/ioutil" - "net/http" - - "github.com/juju/errors" - - "github.com/juju/juju/apiserver/params" -) - -// ExtractJSONResult unserializes the JSON-encoded result into the -// provided struct. -func ExtractJSONResult(resp *http.Response, result interface{}) error { - // We defer closing the body here because we want it closed whether - // or not the subsequent read fails. - defer resp.Body.Close() - - if resp.Header.Get("Content-Type") != CTypeJSON { - return errors.Errorf(`expected "application/json" content type, got %q`, resp.Header.Get("Content-Type")) - } - - err := json.NewDecoder(resp.Body).Decode(result) - return errors.Trace(err) -} - -// ExtractAPIError returns the failure serialized in the response -// body. If there is no failure (an OK status code), it simply returns -// nil. -func ExtractAPIError(resp *http.Response) (*params.Error, error) { - if resp.StatusCode == http.StatusOK { - return nil, nil - } - // We defer closing the body here because we want it closed whether - // or not the subsequent read fails. - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, errors.Annotate(err, "while reading HTTP response") - } - - var failure params.Error - if resp.Header.Get("Content-Type") == CTypeJSON { - if err := json.Unmarshal(body, &failure); err != nil { - return nil, errors.Annotate(err, "while unserializing the error") - } - } else { - switch resp.StatusCode { - case http.StatusNotFound, http.StatusMethodNotAllowed: - failure.Code = params.CodeNotImplemented - default: - // Leave Code empty. - } - - failure.Message = string(body) - } - return &failure, nil -} === removed file 'src/github.com/juju/juju/apiserver/http/response_test.go' --- src/github.com/juju/juju/apiserver/http/response_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/http/response_test.go 1970-01-01 00:00:00 +0000 @@ -1,82 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package http_test - -import ( - "net/http" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - apihttp "github.com/juju/juju/apiserver/http" - apihttptesting "github.com/juju/juju/apiserver/http/testing" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/testing" -) - -type responseSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&responseSuite{}) - -func (s *responseSuite) TestExtractAPIErrorFailure(c *gc.C) { - original := ¶ms.Error{ - Message: "something went wrong!", - } - response := apihttptesting.NewFailureResponse(original) - failure, err := apihttp.ExtractAPIError(&response.Response) - c.Assert(err, jc.ErrorIsNil) - - c.Check(failure, gc.Not(gc.Equals), original) - c.Check(failure, gc.DeepEquals, original) -} - -func (s *responseSuite) TestExtractAPIErrorWrongContentType(c *gc.C) { - original := ¶ms.Error{ - Message: "something went wrong!", - } - response := apihttptesting.NewFailureResponse(original) - response.Header.Del("Content-Type") - failure, err := apihttp.ExtractAPIError(&response.Response) - c.Assert(err, jc.ErrorIsNil) - - c.Check(failure.Message, gc.Equals, `{"Message":"something went wrong!","Code":""}`+"\n") - c.Check(failure.Code, gc.Equals, "") -} - -func (s *responseSuite) TestExtractAPIErrorString(c *gc.C) { - response := apihttptesting.NewErrorResponse(http.StatusInternalServerError, "something went wrong!") - failure, err := apihttp.ExtractAPIError(&response.Response) - c.Assert(err, jc.ErrorIsNil) - - c.Check(failure.Message, gc.Equals, "something went wrong!") - c.Check(failure.Code, gc.Equals, "") -} - -func (s *responseSuite) TestExtractAPIErrorNotFound(c *gc.C) { - response := apihttptesting.NewErrorResponse(http.StatusNotFound, "something went wrong!") - failure, err := apihttp.ExtractAPIError(&response.Response) - c.Assert(err, jc.ErrorIsNil) - - c.Check(failure.Message, gc.Equals, "something went wrong!") - c.Check(failure.Code, gc.Equals, params.CodeNotImplemented) -} - -func (s *responseSuite) TestExtractAPIErrorMethodNotAllowed(c *gc.C) { - response := apihttptesting.NewErrorResponse(http.StatusMethodNotAllowed, "something went wrong!") - failure, err := apihttp.ExtractAPIError(&response.Response) - c.Assert(err, jc.ErrorIsNil) - - c.Check(failure.Message, gc.Equals, "something went wrong!") - c.Check(failure.Code, gc.Equals, params.CodeNotImplemented) -} - -func (s *responseSuite) TestExtractAPIErrorOK(c *gc.C) { - response := apihttptesting.NewHTTPResponse() - failure, err := apihttp.ExtractAPIError(&response.Response) - c.Assert(err, jc.ErrorIsNil) - - c.Check(failure, gc.IsNil) -} === removed directory 'src/github.com/juju/juju/apiserver/http/testing' === removed file 'src/github.com/juju/juju/apiserver/http/testing/request.go' --- src/github.com/juju/juju/apiserver/http/testing/request.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/http/testing/request.go 1970-01-01 00:00:00 +0000 @@ -1,24 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package testing - -import ( - "encoding/base64" - "net/http" - - gc "gopkg.in/check.v1" -) - -// CheckRequest verifies that the HTTP request matches the args -// as an API request should. We only check API-related request fields. -func CheckRequest(c *gc.C, req *http.Request, method, user, pw, host, pth string) { - c.Check(req.Method, gc.Equals, method) - - url := `https://` + host + `:\d+/environment/[-0-9a-f]+/` + pth - c.Check(req.URL.String(), gc.Matches, url) - - c.Assert(req.Header, gc.HasLen, 1) - auth := base64.StdEncoding.EncodeToString([]byte(user + ":" + pw)) - c.Check(req.Header.Get("Authorization"), gc.Equals, "Basic "+auth) -} === removed file 'src/github.com/juju/juju/apiserver/http/testing/response.go' --- src/github.com/juju/juju/apiserver/http/testing/response.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/http/testing/response.go 1970-01-01 00:00:00 +0000 @@ -1,59 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package testing - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - - apihttp "github.com/juju/juju/apiserver/http" - "github.com/juju/juju/apiserver/params" -) - -// HTTPResponse is an HTTP response for use in testing. -type HTTPResponse struct { - http.Response - // Buffer is the file underlying Body. - Buffer bytes.Buffer -} - -// NewHTTPResponse returns an HTTP response with an OK status, -// no headers set, and an empty body. -func NewHTTPResponse() *HTTPResponse { - resp := HTTPResponse{ - Response: http.Response{ - StatusCode: http.StatusOK, - Header: make(http.Header), - }, - } - resp.Body = ioutil.NopCloser(&resp.Buffer) - return &resp -} - -// NewErrorResponse returns an HTTP response with the status and -// body set to the provided values. -func NewErrorResponse(statusCode int, msg string) *HTTPResponse { - resp := NewHTTPResponse() - resp.StatusCode = statusCode - if _, err := resp.Buffer.WriteString(msg); err != nil { - panic(fmt.Sprintf("could not write to buffer: %v", err)) - } - return resp -} - -// NewFailureResponse returns an HTTP response with the status set -// to 500 (Internal Server Error) and the body set to the JSON-encoded -// error. -func NewFailureResponse(failure *params.Error) *HTTPResponse { - resp := NewHTTPResponse() - resp.StatusCode = http.StatusInternalServerError - resp.Header.Set("Content-Type", apihttp.CTypeJSON) - if err := json.NewEncoder(&resp.Buffer).Encode(failure); err != nil { - panic(fmt.Sprintf("could not JSON-encode failure: %v", err)) - } - return resp -} === added directory 'src/github.com/juju/juju/apiserver/httpattachment' === added file 'src/github.com/juju/juju/apiserver/httpattachment/attachment.go' --- src/github.com/juju/juju/apiserver/httpattachment/attachment.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/httpattachment/attachment.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,171 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package httpattachment provides facilities for attaching a streaming +// blob of data and associated metadata to an HTTP API request, +// and for reading that blob on the server side. +package httpattachment + +import ( + "bytes" + "encoding/json" + "io" + "mime" + "mime/multipart" + "net/http" + "net/textproto" + "strings" + + "github.com/juju/errors" + "github.com/juju/juju/apiserver/params" +) + +// NewBody returns an HTTP request body and content type +// suitable for using to make an HTTP request containing +// the given attached body data and JSON-marshaled metadata. +// +// The name parameter is used to identify the attached "file", so +// a filename is an appropriate value. +func NewBody(attached io.ReadSeeker, meta interface{}, name string) (body io.ReadSeeker, contentType string, err error) { + var parts bytes.Buffer + + // Set up the multi-part portion of the body. + writer := multipart.NewWriter(&parts) + + // Set the metadata part. + header := make(textproto.MIMEHeader) + header.Set("Content-Disposition", `form-data; name="metadata"`) + header.Set("Content-Type", params.ContentTypeJSON) + part, err := writer.CreatePart(header) + if err != nil { + return nil, "", errors.Trace(err) + } + if err := json.NewEncoder(part).Encode(meta); err != nil { + return nil, "", errors.Trace(err) + } + + // Set the attached part. + _, err = writer.CreateFormFile("attached", name) + if err != nil { + return nil, "", errors.Trace(err) + } + + // We don't actually write the reader's data to the part. + // Instead We use a chained reader to facilitate streaming + // directly from the reader. + // + // Technically this is boundary-breaking, as the knowledge of + // how to make multipart archives should be kept to the + // mime/multipart package, but doing it this way means we don't + // need to return a Writer which would be harder to turn into + // a ReadSeeker. + return newMultiReaderSeeker( + bytes.NewReader(parts.Bytes()), + attached, + strings.NewReader("\r\n--"+writer.Boundary()+"--\r\n"), + ), writer.FormDataContentType(), nil +} + +type multiReaderSeeker struct { + readers []io.ReadSeeker + index int +} + +// mewMultiReaderSeeker returns an io.ReadSeeker implementation that +// reads from all the given readers in turn. Its Seek method can be used +// to seek to the start, but returns an error if used to seek anywhere +// else (this corresponds with the needs of httpbakery.Client.DoWithBody +// which needs to re-read the body when retrying the request). +func newMultiReaderSeeker(readers ...io.ReadSeeker) *multiReaderSeeker { + return &multiReaderSeeker{ + readers: readers, + } +} + +// Read implements io.Reader.Read. +func (r *multiReaderSeeker) Read(buf []byte) (int, error) { + if r.index >= len(r.readers) { + return 0, io.EOF + } + n, err := r.readers[r.index].Read(buf) + if err == io.EOF { + r.index++ + err = nil + } + return n, err +} + +// Read implements io.Seeker.Seek. It can only be used to seek to the +// start. +func (r *multiReaderSeeker) Seek(offset int64, whence int) (int64, error) { + if offset != 0 || whence != 0 { + return 0, errors.New("cannot only seek to the start of multipart reader") + } + for _, reader := range r.readers { + if _, err := reader.Seek(0, 0); err != nil { + return 0, errors.Trace(err) + } + } + r.index = 0 + return 0, nil +} + +// Get extracts the attached file and its metadata from the multipart +// data in the request. The metadata is JSON-unmarshaled into the value +// pointed to by metaResult. +func Get(req *http.Request, metaResult interface{}) (io.ReadCloser, error) { + ctype := req.Header.Get("Content-Type") + mediaType, cParams, err := mime.ParseMediaType(ctype) + if err != nil { + return nil, errors.Annotate(err, "while parsing content type header") + } + + if !strings.HasPrefix(mediaType, "multipart/") { + return nil, errors.Errorf("expected multipart Content-Type, got %q", mediaType) + } + reader := multipart.NewReader(req.Body, cParams["boundary"]) + + // Extract the metadata. + part, err := reader.NextPart() + if err != nil { + if err == io.EOF { + return nil, errors.New("missing metadata") + } + return nil, errors.Trace(err) + } + + if err := checkContentType(part.Header, params.ContentTypeJSON); err != nil { + return nil, errors.Trace(err) + } + if err := json.NewDecoder(part).Decode(metaResult); err != nil { + return nil, errors.Trace(err) + } + + // Extract the archive. + part, err = reader.NextPart() + if err != nil { + if err == io.EOF { + return nil, errors.New("missing archive") + } + return nil, errors.Trace(err) + } + if err := checkContentType(part.Header, params.ContentTypeRaw); err != nil { + return nil, errors.Trace(err) + } + // We're not going to worry about verifying that the file matches the + // metadata (e.g. size, checksum). + archive := part + + // We are going to trust that there aren't any more attachments after + // the file. If there are, we ignore them. + + return archive, nil +} + +func checkContentType(h textproto.MIMEHeader, expected string) error { + ctype := h.Get("Content-Type") + if ctype != expected { + return errors.Errorf("expected Content-Type %q, got %q", expected, ctype) + } + return nil +} === added file 'src/github.com/juju/juju/apiserver/httpattachment/attachment_test.go' --- src/github.com/juju/juju/apiserver/httpattachment/attachment_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/httpattachment/attachment_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package httpattachment_test + +import ( + gc "gopkg.in/check.v1" + + "github.com/juju/juju/testing" +) + +type requestSuite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&requestSuite{}) + +// TODO the functions in this pacakge should be tested directly. +// https://bugs.launchpad.net/juju-core/+bug/1503990 === added file 'src/github.com/juju/juju/apiserver/httpattachment/package_test.go' --- src/github.com/juju/juju/apiserver/httpattachment/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/httpattachment/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package httpattachment_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/apiserver/httpcontext.go' --- src/github.com/juju/juju/apiserver/httpcontext.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/httpcontext.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,196 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + + "github.com/juju/errors" + "github.com/juju/names" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" +) + +// httpContext provides context for HTTP handlers. +type httpContext struct { + // strictValidation means that empty modelUUID values are not valid. + strictValidation bool + // controllerModelOnly only validates the controller model. + controllerModelOnly bool + // srv holds the API server instance. + srv *Server +} + +type errorSender interface { + sendError(w http.ResponseWriter, code int, err error) +} + +var errUnauthorized = errors.NewUnauthorized(nil, "unauthorized") + +// stateForRequestUnauthenticated returns a state instance appropriate for +// using for the model implicit in the given request +// without checking any authentication information. +func (ctxt *httpContext) stateForRequestUnauthenticated(r *http.Request) (*state.State, error) { + modelUUID, err := validateModelUUID(validateArgs{ + statePool: ctxt.srv.statePool, + modelUUID: r.URL.Query().Get(":modeluuid"), + strict: ctxt.strictValidation, + controllerModelOnly: ctxt.controllerModelOnly, + }) + if err != nil { + return nil, errors.Trace(err) + } + st, err := ctxt.srv.statePool.Get(modelUUID) + if err != nil { + return nil, errors.Trace(err) + } + return st, nil +} + +// stateForRequestAuthenticated returns a state instance appropriate for +// using for the model implicit in the given request. +// It also returns the authenticated entity. +func (ctxt *httpContext) stateForRequestAuthenticated(r *http.Request) (*state.State, state.Entity, error) { + st, err := ctxt.stateForRequestUnauthenticated(r) + if err != nil { + return nil, nil, errors.Trace(err) + } + req, err := ctxt.loginRequest(r) + if err != nil { + return nil, nil, errors.NewUnauthorized(err, "") + } + entity, _, err := checkCreds(st, req, true, ctxt.srv.authCtxt) + if err != nil { + // All errors other than a macaroon-discharge error count as + // unauthorized at this point. + if !common.IsDischargeRequiredError(err) { + err = errors.NewUnauthorized(err, "") + } + return nil, nil, errors.Trace(err) + } + return st, entity, nil +} + +// stateForRequestAuthenticatedUser is like stateForRequestAuthenticated +// except that it also verifies that the authenticated entity is a user. +func (ctxt *httpContext) stateForRequestAuthenticatedUser(r *http.Request) (*state.State, state.Entity, error) { + st, entity, err := ctxt.stateForRequestAuthenticated(r) + if err != nil { + return nil, nil, errors.Trace(err) + } + switch entity.Tag().(type) { + case names.UserTag: + return st, entity, nil + default: + return nil, nil, errors.Trace(common.ErrBadCreds) + } +} + +// stateForRequestAuthenticatedUser is like stateForRequestAuthenticated +// except that it also verifies that the authenticated entity is a user. +func (ctxt *httpContext) stateForRequestAuthenticatedAgent(r *http.Request) (*state.State, state.Entity, error) { + st, entity, err := ctxt.stateForRequestAuthenticated(r) + if err != nil { + return nil, nil, errors.Trace(err) + } + switch entity.Tag().(type) { + case names.MachineTag, names.UnitTag: + return st, entity, nil + default: + logger.Errorf("attempt to log in as an agent by %v", entity.Tag()) + return nil, nil, errors.Trace(common.ErrBadCreds) + } +} + +// loginRequest forms a LoginRequest from the information +// in the given HTTP request. +func (ctxt *httpContext) loginRequest(r *http.Request) (params.LoginRequest, error) { + authHeader := r.Header.Get("Authorization") + if authHeader == "" { + // No authorization header implies an attempt + // to login with macaroon authentication. + return params.LoginRequest{ + Macaroons: httpbakery.RequestMacaroons(r), + }, nil + } + parts := strings.Fields(authHeader) + if len(parts) != 2 || parts[0] != "Basic" { + // Invalid header format or no header provided. + return params.LoginRequest{}, errors.New("invalid request format") + } + // Challenge is a base64-encoded "tag:pass" string. + // See RFC 2617, Section 2. + challenge, err := base64.StdEncoding.DecodeString(parts[1]) + if err != nil { + return params.LoginRequest{}, errors.New("invalid request format") + } + tagPass := strings.SplitN(string(challenge), ":", 2) + if len(tagPass) != 2 { + return params.LoginRequest{}, errors.New("invalid request format") + } + // Ensure that a sensible tag was passed. + _, err = names.ParseTag(tagPass[0]) + if err != nil { + return params.LoginRequest{}, errors.Trace(common.ErrBadCreds) + } + return params.LoginRequest{ + AuthTag: tagPass[0], + Credentials: tagPass[1], + Nonce: r.Header.Get(params.MachineNonceHeader), + }, nil +} + +// stop returns a channel which will be closed when a handler should +// exit. +func (ctxt *httpContext) stop() <-chan struct{} { + return ctxt.srv.tomb.Dying() +} + +// sendJSON writes a JSON-encoded response value +// to the given writer along with a trailing newline. +func sendJSON(w io.Writer, response interface{}) { + body, err := json.Marshal(response) + if err != nil { + logger.Errorf("cannot marshal JSON result %#v: %v", response, err) + return + } + body = append(body, '\n') + w.Write(body) +} + +// sendStatusAndJSON sends an HTTP status code and +// a JSON-encoded response to a client. +func sendStatusAndJSON(w http.ResponseWriter, statusCode int, response interface{}) { + body, err := json.Marshal(response) + if err != nil { + logger.Errorf("cannot marshal JSON result %#v: %v", response, err) + return + } + + if statusCode == http.StatusUnauthorized { + w.Header().Set("WWW-Authenticate", `Basic realm="juju"`) + } + w.Header().Set("Content-Type", params.ContentTypeJSON) + w.Header().Set("Content-Length", fmt.Sprint(len(body))) + w.WriteHeader(statusCode) + w.Write(body) +} + +// sendError sends a JSON-encoded error response +// for errors encountered during processing. +func sendError(w http.ResponseWriter, err error) { + err1, statusCode := common.ServerErrorAndStatus(err) + logger.Debugf("sending error: %d %v", statusCode, err1) + sendStatusAndJSON(w, statusCode, ¶ms.ErrorResult{ + Error: err1, + }) +} === removed file 'src/github.com/juju/juju/apiserver/httphandler.go' --- src/github.com/juju/juju/apiserver/httphandler.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/httphandler.go 1970-01-01 00:00:00 +0000 @@ -1,120 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package apiserver - -import ( - "encoding/base64" - "net/http" - "strings" - - "github.com/juju/errors" - "github.com/juju/names" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/state" -) - -// errorSender implementations send errors back to the caller. -type errorSender interface { - sendError(w http.ResponseWriter, statusCode int, message string) -} - -// httpHandler handles http requests through HTTPS in the API server. -type httpHandler struct { - // A cache of State instances for different environments. - statePool *state.StatePool - // strictValidation means that empty envUUID values are not valid. - strictValidation bool - // stateServerEnvOnly only validates the state server environment - stateServerEnvOnly bool -} - -// httpStateWrapper reflects a state connection for a given http connection. -type httpStateWrapper struct { - state *state.State -} - -func (h *httpHandler) getEnvironUUID(r *http.Request) string { - return r.URL.Query().Get(":envuuid") -} - -// authError sends an unauthorized error. -func (h *httpHandler) authError(w http.ResponseWriter, sender errorSender) { - w.Header().Set("WWW-Authenticate", `Basic realm="juju"`) - sender.sendError(w, http.StatusUnauthorized, "unauthorized") -} - -func (h *httpHandler) validateEnvironUUID(r *http.Request) (*httpStateWrapper, error) { - envUUID := h.getEnvironUUID(r) - envState, err := validateEnvironUUID(validateArgs{ - statePool: h.statePool, - envUUID: envUUID, - strict: h.strictValidation, - stateServerEnvOnly: h.stateServerEnvOnly, - }) - if err != nil { - return nil, errors.Trace(err) - } - return &httpStateWrapper{state: envState}, nil -} - -// authenticate parses HTTP basic authentication and authorizes the -// request by looking up the provided tag and password against state. -func (h *httpStateWrapper) authenticate(r *http.Request) (names.Tag, error) { - parts := strings.Fields(r.Header.Get("Authorization")) - if len(parts) != 2 || parts[0] != "Basic" { - // Invalid header format or no header provided. - return nil, errors.New("invalid request format") - } - // Challenge is a base64-encoded "tag:pass" string. - // See RFC 2617, Section 2. - challenge, err := base64.StdEncoding.DecodeString(parts[1]) - if err != nil { - return nil, errors.New("invalid request format") - } - tagPass := strings.SplitN(string(challenge), ":", 2) - if len(tagPass) != 2 { - return nil, errors.New("invalid request format") - } - // Ensure that a sensible tag was passed. - tag, err := names.ParseTag(tagPass[0]) - if err != nil { - return nil, common.ErrBadCreds - } - _, _, err = checkCreds(h.state, params.LoginRequest{ - AuthTag: tagPass[0], - Credentials: tagPass[1], - Nonce: r.Header.Get("X-Juju-Nonce"), - }, true) - return tag, err -} - -func (h *httpStateWrapper) authenticateUser(r *http.Request) error { - tag, err := h.authenticate(r) - if err != nil { - return err - } - switch tag.(type) { - case names.UserTag: - return nil - default: - return common.ErrBadCreds - } -} - -func (h *httpStateWrapper) authenticateAgent(r *http.Request) (names.Tag, error) { - tag, err := h.authenticate(r) - if err != nil { - return nil, err - } - switch tag.(type) { - case names.MachineTag: - return tag, nil - case names.UnitTag: - return tag, nil - default: - return nil, common.ErrBadCreds - } -} === modified file 'src/github.com/juju/juju/apiserver/imagemanager/imagemanager.go' --- src/github.com/juju/juju/apiserver/imagemanager/imagemanager.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/imagemanager/imagemanager.go 2016-03-22 15:18:22 +0000 @@ -16,7 +16,7 @@ var logger = loggo.GetLogger("juju.apiserver.imagemanager") func init() { - common.RegisterStandardFacade("ImageManager", 1, NewImageManagerAPI) + common.RegisterStandardFacade("ImageManager", 2, NewImageManagerAPI) } // ImageManager defines the methods on the imagemanager API end point. === modified file 'src/github.com/juju/juju/apiserver/imagemanager/imagemanager_test.go' --- src/github.com/juju/juju/apiserver/imagemanager/imagemanager_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/imagemanager/imagemanager_test.go 2016-03-22 15:18:22 +0000 @@ -67,7 +67,7 @@ func (s *imageManagerSuite) addImage(c *gc.C, content string) { var r io.Reader = bytes.NewReader([]byte(content)) addedMetadata := &imagestorage.Metadata{ - EnvUUID: s.State.EnvironUUID(), + ModelUUID: s.State.ModelUUID(), Kind: "lxc", Series: "trusty", Arch: "amd64", === modified file 'src/github.com/juju/juju/apiserver/imagemetadata/export_test.go' --- src/github.com/juju/juju/apiserver/imagemetadata/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/imagemetadata/export_test.go 2016-03-22 15:18:22 +0000 @@ -3,7 +3,17 @@ package imagemetadata +import ( + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs" + "github.com/juju/juju/state/cloudimagemetadata" +) + var ( - CreateAPI = createAPI - ParseMetadataFromParams = parseMetadataFromParams + CreateAPI = createAPI + ProcessErrors = processErrors ) + +func ParseMetadataFromParams(api *API, p params.CloudImageMetadata, env environs.Environ) (cloudimagemetadata.Metadata, error) { + return api.parseMetadataFromParams(p, env) +} === modified file 'src/github.com/juju/juju/apiserver/imagemetadata/functions_test.go' --- src/github.com/juju/juju/apiserver/imagemetadata/functions_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/imagemetadata/functions_test.go 2016-03-22 15:18:22 +0000 @@ -4,54 +4,67 @@ package imagemetadata_test import ( + "fmt" + + jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/imagemetadata" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/environs/configstore" + envtesting "github.com/juju/juju/environs/testing" + "github.com/juju/juju/jujuclient/jujuclienttesting" "github.com/juju/juju/state/cloudimagemetadata" "github.com/juju/juju/testing" ) type funcSuite struct { - testing.BaseSuite + baseImageMetadataSuite + env environs.Environ expected cloudimagemetadata.Metadata } +var _ = gc.Suite(&funcSuite{}) + func (s *funcSuite) SetUpTest(c *gc.C) { - s.BaseSuite.SetUpTest(c) + s.baseImageMetadataSuite.SetUpTest(c) + + cfg, err := config.New(config.NoDefaults, mockConfig()) + c.Assert(err, jc.ErrorIsNil) + s.env, err = environs.Prepare( + envtesting.BootstrapContext(c), configstore.NewMem(), + jujuclienttesting.NewMemStore(), + "dummycontroller", environs.PrepareForBootstrapParams{Config: cfg}, + ) + c.Assert(err, jc.ErrorIsNil) + s.state = s.constructState(cfg) s.expected = cloudimagemetadata.Metadata{ cloudimagemetadata.MetadataAttributes{ Stream: "released", - Source: cloudimagemetadata.Custom, + Source: "custom", + Series: config.LatestLtsSeries(), + Arch: "amd64", + Region: "dummy_region", }, + 0, "", } } -var _ = gc.Suite(&funcSuite{}) - -func (s *funcSuite) TestParseMetadataSourcePanic(c *gc.C) { - m := func() { imagemetadata.ParseMetadataFromParams(params.CloudImageMetadata{}) } - c.Assert(m, gc.PanicMatches, `unknown cloud image metadata source ""`) -} - -func (s *funcSuite) TestParseMetadataCustom(c *gc.C) { - m := imagemetadata.ParseMetadataFromParams(params.CloudImageMetadata{Source: "custom"}) - c.Assert(m, gc.DeepEquals, s.expected) - - m = imagemetadata.ParseMetadataFromParams(params.CloudImageMetadata{Source: "CusTOM"}) - c.Assert(m, gc.DeepEquals, s.expected) -} - -func (s *funcSuite) TestParseMetadataPublic(c *gc.C) { - s.expected.Source = cloudimagemetadata.Public - - m := imagemetadata.ParseMetadataFromParams(params.CloudImageMetadata{Source: "public"}) - c.Assert(m, gc.DeepEquals, s.expected) - - m = imagemetadata.ParseMetadataFromParams(params.CloudImageMetadata{Source: "PubLic"}) +func (s *funcSuite) TestParseMetadataNoSource(c *gc.C) { + m, err := imagemetadata.ParseMetadataFromParams(s.api, params.CloudImageMetadata{}, s.env) + c.Assert(err, jc.ErrorIsNil) + c.Assert(m, gc.DeepEquals, s.expected) +} + +func (s *funcSuite) TestParseMetadataAnySource(c *gc.C) { + s.expected.Source = "any" + m, err := imagemetadata.ParseMetadataFromParams(s.api, params.CloudImageMetadata{Source: "any"}, s.env) + c.Assert(err, jc.ErrorIsNil) c.Assert(m, gc.DeepEquals, s.expected) } @@ -59,16 +72,85 @@ stream := "happy stream" s.expected.Stream = stream - m := imagemetadata.ParseMetadataFromParams(params.CloudImageMetadata{ - Source: "custom", - Stream: stream, - }) + m, err := imagemetadata.ParseMetadataFromParams(s.api, params.CloudImageMetadata{Stream: stream}, s.env) + c.Assert(err, jc.ErrorIsNil) c.Assert(m, gc.DeepEquals, s.expected) } func (s *funcSuite) TestParseMetadataDefaultStream(c *gc.C) { - m := imagemetadata.ParseMetadataFromParams(params.CloudImageMetadata{ - Source: "custom", - }) - c.Assert(m, gc.DeepEquals, s.expected) + m, err := imagemetadata.ParseMetadataFromParams(s.api, params.CloudImageMetadata{}, s.env) + c.Assert(err, jc.ErrorIsNil) + c.Assert(m, gc.DeepEquals, s.expected) +} + +func (s *funcSuite) TestParseMetadataAnyRegion(c *gc.C) { + region := "region" + s.expected.Region = region + + m, err := imagemetadata.ParseMetadataFromParams(s.api, params.CloudImageMetadata{Region: region}, s.env) + c.Assert(err, jc.ErrorIsNil) + c.Assert(m, gc.DeepEquals, s.expected) +} + +type funcMetadataSuite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&funcMetadataSuite{}) + +func (s *funcMetadataSuite) TestProcessErrorsNil(c *gc.C) { + s.assertProcessErrorsNone(c, nil) +} + +func (s *funcMetadataSuite) TestProcessErrorsEmpty(c *gc.C) { + s.assertProcessErrorsNone(c, []params.ErrorResult{}) +} + +func (s *funcMetadataSuite) TestProcessErrorsNilError(c *gc.C) { + s.assertProcessErrorsNone(c, []params.ErrorResult{{Error: nil}}) +} + +func (s *funcMetadataSuite) TestProcessErrorsEmptyMessageError(c *gc.C) { + s.assertProcessErrorsNone(c, []params.ErrorResult{{Error: ¶ms.Error{Message: ""}}}) +} + +func (s *funcMetadataSuite) TestProcessErrorsFullError(c *gc.C) { + msg := "my bad" + + errs := []params.ErrorResult{{Error: ¶ms.Error{Message: msg}}} + + expected := fmt.Sprintf(` +saving some image metadata: +%v`[1:], msg) + + s.assertProcessErrors(c, errs, expected) +} + +func (s *funcMetadataSuite) TestProcessErrorsMany(c *gc.C) { + msg1 := "my bad" + msg2 := "my good" + + errs := []params.ErrorResult{ + {Error: ¶ms.Error{Message: msg1}}, + {Error: ¶ms.Error{Message: ""}}, + {Error: nil}, + {Error: ¶ms.Error{Message: msg2}}, + } + + expected := fmt.Sprintf(` +saving some image metadata: +%v +%v`[1:], msg1, msg2) + + s.assertProcessErrors(c, errs, expected) +} + +var process = imagemetadata.ProcessErrors + +func (s *funcMetadataSuite) assertProcessErrorsNone(c *gc.C, errs []params.ErrorResult) { + c.Assert(process(errs), jc.ErrorIsNil) +} + +func (s *funcMetadataSuite) assertProcessErrors(c *gc.C, errs []params.ErrorResult, expected string) { + c.Assert(process(errs), gc.ErrorMatches, expected) } === modified file 'src/github.com/juju/juju/apiserver/imagemetadata/metadata.go' --- src/github.com/juju/juju/apiserver/imagemetadata/metadata.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/imagemetadata/metadata.go 2016-03-22 15:18:22 +0000 @@ -4,17 +4,27 @@ package imagemetadata import ( - "fmt" + "sort" "strings" + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/utils/series" + "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + envmetadata "github.com/juju/juju/environs/imagemetadata" + "github.com/juju/juju/environs/simplestreams" "github.com/juju/juju/state" "github.com/juju/juju/state/cloudimagemetadata" ) +var logger = loggo.GetLogger("juju.apiserver.imagemetadata") + func init() { - common.RegisterStandardFacade("ImageMetadata", 1, NewAPI) + common.RegisterStandardFacade("ImageMetadata", 2, NewAPI) } // API is the concrete implementation of the api end point @@ -30,7 +40,7 @@ resources *common.Resources, authorizer common.Authorizer, ) (*API, error) { - if !authorizer.AuthClient() && !authorizer.AuthEnvironManager() { + if !authorizer.AuthClient() && !authorizer.AuthModelManager() { return nil, common.ErrPerm } @@ -51,14 +61,14 @@ // List returns all found cloud image metadata that satisfy // given filter. -// Returned list contains metadata for custom images first, then public. +// Returned list contains metadata ordered by priority. func (api *API) List(filter params.ImageMetadataFilter) (params.ListCloudImageMetadataResult, error) { found, err := api.metadata.FindMetadata(cloudimagemetadata.MetadataFilter{ Region: filter.Region, Series: filter.Series, Arches: filter.Arches, Stream: filter.Stream, - VirtualType: filter.VirtualType, + VirtType: filter.VirtType, RootStorageType: filter.RootStorageType, }) if err != nil { @@ -72,12 +82,10 @@ } } - // First return metadata for custom images, then public. - // No other source for cloud images should exist at the moment. - // Once new source is identified, the order of returned metadata - // may need to be changed. - addAll(found[cloudimagemetadata.Custom]) - addAll(found[cloudimagemetadata.Public]) + for _, ms := range found { + addAll(ms) + } + sort.Sort(metadataList(all)) return params.ListCloudImageMetadataResult{Result: all}, nil } @@ -86,56 +94,235 @@ // It supports bulk calls. func (api *API) Save(metadata params.MetadataSaveParams) (params.ErrorResults, error) { all := make([]params.ErrorResult, len(metadata.Metadata)) + envCfg, err := api.metadata.ModelConfig() + if err != nil { + return params.ErrorResults{}, errors.Annotatef(err, "getting environ config") + } + env, err := environs.New(envCfg) + if err != nil { + return params.ErrorResults{}, errors.Annotatef(err, "getting environ") + } for i, one := range metadata.Metadata { - err := api.metadata.SaveMetadata(parseMetadataFromParams(one)) + md, err := api.parseMetadataListFromParams(one, env) + if err != nil { + all[i] = params.ErrorResult{Error: common.ServerError(err)} + continue + } + err = api.metadata.SaveMetadata(md) all[i] = params.ErrorResult{Error: common.ServerError(err)} } return params.ErrorResults{Results: all}, nil } +// Delete deletes cloud image metadata for given image ids. +// It supports bulk calls. +func (api *API) Delete(images params.MetadataImageIds) (params.ErrorResults, error) { + all := make([]params.ErrorResult, len(images.Ids)) + for i, imageId := range images.Ids { + err := api.metadata.DeleteMetadata(imageId) + all[i] = params.ErrorResult{common.ServerError(err)} + } + return params.ErrorResults{Results: all}, nil +} + func parseMetadataToParams(p cloudimagemetadata.Metadata) params.CloudImageMetadata { result := params.CloudImageMetadata{ ImageId: p.ImageId, Stream: p.Stream, Region: p.Region, + Version: p.Version, Series: p.Series, Arch: p.Arch, - VirtualType: p.VirtualType, + VirtType: p.VirtType, RootStorageType: p.RootStorageType, RootStorageSize: p.RootStorageSize, - Source: string(p.Source), + Source: p.Source, + Priority: p.Priority, } return result } -func parseMetadataFromParams(p params.CloudImageMetadata) cloudimagemetadata.Metadata { - - parseSource := func(s string) cloudimagemetadata.SourceType { - switch cloudimagemetadata.SourceType(strings.ToLower(s)) { - case cloudimagemetadata.Public: - return cloudimagemetadata.Public - case cloudimagemetadata.Custom: - return cloudimagemetadata.Custom - default: - panic(fmt.Sprintf("unknown cloud image metadata source %q", s)) +func (api *API) parseMetadataListFromParams( + p params.CloudImageMetadataList, env environs.Environ, +) ([]cloudimagemetadata.Metadata, error) { + results := make([]cloudimagemetadata.Metadata, len(p.Metadata)) + for i, metadata := range p.Metadata { + result, err := api.parseMetadataFromParams(metadata, env) + if err != nil { + return nil, errors.Trace(err) } + results[i] = result } + return results, nil +} +func (api *API) parseMetadataFromParams(p params.CloudImageMetadata, env environs.Environ) (cloudimagemetadata.Metadata, error) { result := cloudimagemetadata.Metadata{ cloudimagemetadata.MetadataAttributes{ Stream: p.Stream, Region: p.Region, + Version: p.Version, Series: p.Series, Arch: p.Arch, - VirtualType: p.VirtualType, + VirtType: p.VirtType, RootStorageType: p.RootStorageType, RootStorageSize: p.RootStorageSize, - Source: parseSource(p.Source), + Source: p.Source, }, + p.Priority, p.ImageId, } + + // Fill in any required default values. if p.Stream == "" { - result.Stream = "released" - } - return result + result.Stream = env.Config().ImageStream() + } + if p.Source == "" { + result.Source = "custom" + } + if result.Arch == "" { + result.Arch = "amd64" + } + if result.Series == "" { + result.Series = config.PreferredSeries(env.Config()) + } + if result.Region == "" { + // If the env supports regions, use the env default. + if r, ok := env.(simplestreams.HasRegion); ok { + spec, err := r.Region() + if err != nil { + return cloudimagemetadata.Metadata{}, errors.Annotatef(err, "getting cloud region") + } + result.Region = spec.Region + } + } + return result, nil +} + +// UpdateFromPublishedImages retrieves currently published image metadata and +// updates stored ones accordingly. +func (api *API) UpdateFromPublishedImages() error { + return api.retrievePublished() +} + +func (api *API) retrievePublished() error { + envCfg, err := api.metadata.ModelConfig() + if err != nil { + return errors.Annotatef(err, "getting environ config") + } + env, err := environs.New(envCfg) + if err != nil { + return errors.Annotatef(err, "getting environ") + } + + sources, err := environs.ImageMetadataSources(env) + if err != nil { + return errors.Annotatef(err, "getting cloud specific image metadata sources") + } + + cons := envmetadata.NewImageConstraint(simplestreams.LookupParams{}) + if inst, ok := env.(simplestreams.HasRegion); !ok { + // Published image metadata for some providers are in simple streams. + // Providers that do not rely on simplestreams, don't need to do anything here. + return nil + } else { + // If we can determine current region, + // we want only metadata specific to this region. + cloud, err := inst.Region() + if err != nil { + return errors.Annotatef(err, "getting cloud specific region information") + } + cons.CloudSpec = cloud + } + + // We want all relevant metadata from all data sources. + for _, source := range sources { + logger.Debugf("looking in data source %v", source.Description()) + metadata, info, err := envmetadata.Fetch([]simplestreams.DataSource{source}, cons) + if err != nil { + // Do not stop looking in other data sources if there is an issue here. + logger.Errorf("encountered %v while getting published images metadata from %v", err, source.Description()) + continue + } + err = api.saveAll(info, source.Priority(), metadata) + if err != nil { + // Do not stop looking in other data sources if there is an issue here. + logger.Errorf("encountered %v while saving published images metadata from %v", err, source.Description()) + } + } + return nil +} + +func (api *API) saveAll(info *simplestreams.ResolveInfo, priority int, published []*envmetadata.ImageMetadata) error { + metadata, parseErrs := convertToParams(info, priority, published) + + // Store converted metadata. + // Note that whether the metadata actually needs + // to be stored will be determined within this call. + errs, err := api.Save(metadata) + if err != nil { + return errors.Annotatef(err, "saving published images metadata") + } + + return processErrors(append(errs.Results, parseErrs...)) +} + +// convertToParams converts model-specific images metadata to structured metadata format. +var convertToParams = func(info *simplestreams.ResolveInfo, priority int, published []*envmetadata.ImageMetadata) (params.MetadataSaveParams, []params.ErrorResult) { + metadata := []params.CloudImageMetadataList{{}} + errs := []params.ErrorResult{} + for _, p := range published { + s, err := series.VersionSeries(p.Version) + if err != nil { + errs = append(errs, params.ErrorResult{Error: common.ServerError(err)}) + continue + } + + m := params.CloudImageMetadata{ + Source: info.Source, + ImageId: p.Id, + Stream: p.Stream, + Region: p.RegionName, + Arch: p.Arch, + VirtType: p.VirtType, + RootStorageType: p.Storage, + Series: s, + Priority: priority, + } + + metadata[0].Metadata = append(metadata[0].Metadata, m) + } + return params.MetadataSaveParams{Metadata: metadata}, errs +} + +func processErrors(errs []params.ErrorResult) error { + msgs := []string{} + for _, e := range errs { + if e.Error != nil && e.Error.Message != "" { + msgs = append(msgs, e.Error.Message) + } + } + if len(msgs) != 0 { + return errors.Errorf("saving some image metadata:\n%v", strings.Join(msgs, "\n")) + } + return nil +} + +// metadataList is a convenience type enabling to sort +// a collection of Metadata in order of priority. +type metadataList []params.CloudImageMetadata + +// Len implements sort.Interface +func (m metadataList) Len() int { + return len(m) +} + +// Less implements sort.Interface and sorts image metadata by priority. +func (m metadataList) Less(i, j int) bool { + return m[i].Priority < m[j].Priority +} + +// Swap implements sort.Interface +func (m metadataList) Swap(i, j int) { + m[i], m[j] = m[j], m[i] } === modified file 'src/github.com/juju/juju/apiserver/imagemetadata/metadata_test.go' --- src/github.com/juju/juju/apiserver/imagemetadata/metadata_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/imagemetadata/metadata_test.go 2016-03-22 15:18:22 +0000 @@ -22,47 +22,44 @@ found, err := s.api.List(params.ImageMetadataFilter{}) c.Assert(err, jc.ErrorIsNil) c.Assert(found.Result, gc.HasLen, 0) - s.assertCalls(c, []string{findMetadata}) + s.assertCalls(c, findMetadata) } func (s *metadataSuite) TestFindEmpty(c *gc.C) { - s.state.findMetadata = func(f cloudimagemetadata.MetadataFilter) (map[cloudimagemetadata.SourceType][]cloudimagemetadata.Metadata, error) { - s.calls = append(s.calls, findMetadata) - return map[cloudimagemetadata.SourceType][]cloudimagemetadata.Metadata{}, nil + s.state.findMetadata = func(f cloudimagemetadata.MetadataFilter) (map[string][]cloudimagemetadata.Metadata, error) { + return map[string][]cloudimagemetadata.Metadata{}, nil } found, err := s.api.List(params.ImageMetadataFilter{}) c.Assert(err, jc.ErrorIsNil) c.Assert(found.Result, gc.HasLen, 0) - s.assertCalls(c, []string{findMetadata}) + s.assertCalls(c, findMetadata) } func (s *metadataSuite) TestFindEmptyGroups(c *gc.C) { - s.state.findMetadata = func(f cloudimagemetadata.MetadataFilter) (map[cloudimagemetadata.SourceType][]cloudimagemetadata.Metadata, error) { - s.calls = append(s.calls, findMetadata) - return map[cloudimagemetadata.SourceType][]cloudimagemetadata.Metadata{ - cloudimagemetadata.Public: []cloudimagemetadata.Metadata{}, - cloudimagemetadata.Custom: []cloudimagemetadata.Metadata{}, + s.state.findMetadata = func(f cloudimagemetadata.MetadataFilter) (map[string][]cloudimagemetadata.Metadata, error) { + return map[string][]cloudimagemetadata.Metadata{ + "public": []cloudimagemetadata.Metadata{}, + "custom": []cloudimagemetadata.Metadata{}, }, nil } found, err := s.api.List(params.ImageMetadataFilter{}) c.Assert(err, jc.ErrorIsNil) c.Assert(found.Result, gc.HasLen, 0) - s.assertCalls(c, []string{findMetadata}) + s.assertCalls(c, findMetadata) } func (s *metadataSuite) TestFindError(c *gc.C) { msg := "find error" - s.state.findMetadata = func(f cloudimagemetadata.MetadataFilter) (map[cloudimagemetadata.SourceType][]cloudimagemetadata.Metadata, error) { - s.calls = append(s.calls, findMetadata) + s.state.findMetadata = func(f cloudimagemetadata.MetadataFilter) (map[string][]cloudimagemetadata.Metadata, error) { return nil, errors.New(msg) } found, err := s.api.List(params.ImageMetadataFilter{}) c.Assert(err, gc.ErrorMatches, msg) c.Assert(found.Result, gc.HasLen, 0) - s.assertCalls(c, []string{findMetadata}) + s.assertCalls(c, findMetadata) } func (s *metadataSuite) TestFindOrder(c *gc.C) { @@ -71,16 +68,15 @@ customImageId3 := "custom3" publicImageId := "public1" - s.state.findMetadata = func(f cloudimagemetadata.MetadataFilter) (map[cloudimagemetadata.SourceType][]cloudimagemetadata.Metadata, error) { - s.calls = append(s.calls, findMetadata) - return map[cloudimagemetadata.SourceType][]cloudimagemetadata.Metadata{ - cloudimagemetadata.Public: []cloudimagemetadata.Metadata{ - cloudimagemetadata.Metadata{ImageId: publicImageId}, + s.state.findMetadata = func(f cloudimagemetadata.MetadataFilter) (map[string][]cloudimagemetadata.Metadata, error) { + return map[string][]cloudimagemetadata.Metadata{ + "public": []cloudimagemetadata.Metadata{ + cloudimagemetadata.Metadata{ImageId: publicImageId, Priority: 15}, }, - cloudimagemetadata.Custom: []cloudimagemetadata.Metadata{ - cloudimagemetadata.Metadata{ImageId: customImageId}, - cloudimagemetadata.Metadata{ImageId: customImageId2}, - cloudimagemetadata.Metadata{ImageId: customImageId3}, + "custom": []cloudimagemetadata.Metadata{ + cloudimagemetadata.Metadata{ImageId: customImageId, Priority: 87}, + cloudimagemetadata.Metadata{ImageId: customImageId2, Priority: 20}, + cloudimagemetadata.Metadata{ImageId: customImageId3, Priority: 56}, }, }, nil @@ -89,13 +85,14 @@ found, err := s.api.List(params.ImageMetadataFilter{}) c.Assert(err, jc.ErrorIsNil) c.Assert(found.Result, gc.HasLen, 4) + c.Assert(found.Result, jc.SameContents, []params.CloudImageMetadata{ - params.CloudImageMetadata{ImageId: customImageId}, - params.CloudImageMetadata{ImageId: customImageId2}, - params.CloudImageMetadata{ImageId: customImageId3}, - params.CloudImageMetadata{ImageId: publicImageId}, + params.CloudImageMetadata{ImageId: customImageId, Priority: 87}, + params.CloudImageMetadata{ImageId: customImageId3, Priority: 56}, + params.CloudImageMetadata{ImageId: customImageId2, Priority: 20}, + params.CloudImageMetadata{ImageId: publicImageId, Priority: 15}, }) - s.assertCalls(c, []string{findMetadata}) + s.assertCalls(c, findMetadata) } func (s *metadataSuite) TestSaveEmpty(c *gc.C) { @@ -103,7 +100,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(errs.Results, gc.HasLen, 0) // not expected to call state :D - s.assertCalls(c, []string{}) + s.assertCalls(c, environConfig) } func (s *metadataSuite) TestSave(c *gc.C) { @@ -112,19 +109,55 @@ } msg := "save error" - s.state.saveMetadata = func(m cloudimagemetadata.Metadata) error { - s.calls = append(s.calls, saveMetadata) - if len(s.calls) == 1 { + saveCalls := 0 + s.state.saveMetadata = func(m []cloudimagemetadata.Metadata) error { + saveCalls += 1 + c.Assert(m, gc.HasLen, saveCalls) + if saveCalls == 1 { // don't err on first call return nil } return errors.New(msg) } - errs, err := s.api.Save(params.MetadataSaveParams{Metadata: []params.CloudImageMetadata{m, m}}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(errs.Results, gc.HasLen, 2) - c.Assert(errs.Results[0].Error, gc.IsNil) - c.Assert(errs.Results[1].Error, gc.ErrorMatches, msg) - s.assertCalls(c, []string{saveMetadata, saveMetadata}) + errs, err := s.api.Save(params.MetadataSaveParams{ + Metadata: []params.CloudImageMetadataList{{ + Metadata: []params.CloudImageMetadata{m}, + }, { + Metadata: []params.CloudImageMetadata{m, m}, + }}, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(errs.Results, gc.HasLen, 2) + c.Assert(errs.Results[0].Error, gc.IsNil) + c.Assert(errs.Results[1].Error, gc.ErrorMatches, msg) + s.assertCalls(c, environConfig, saveMetadata, saveMetadata) +} + +func (s *metadataSuite) TestDeleteEmpty(c *gc.C) { + errs, err := s.api.Delete(params.MetadataImageIds{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(errs.Results, gc.HasLen, 0) + // not expected to call state :D + s.assertCalls(c) +} + +func (s *metadataSuite) TestDelete(c *gc.C) { + idOk := "ok" + idFail := "fail" + msg := "delete error" + + s.state.deleteMetadata = func(imageId string) error { + if imageId == idFail { + return errors.New(msg) + } + return nil + } + + errs, err := s.api.Delete(params.MetadataImageIds{[]string{idOk, idFail}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(errs.Results, gc.HasLen, 2) + c.Assert(errs.Results[0].Error, gc.IsNil) + c.Assert(errs.Results[1].Error, gc.ErrorMatches, msg) + s.assertCalls(c, deleteMetadata, deleteMetadata) } === modified file 'src/github.com/juju/juju/apiserver/imagemetadata/package_test.go' --- src/github.com/juju/juju/apiserver/imagemetadata/package_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/imagemetadata/package_test.go 2016-03-22 15:18:22 +0000 @@ -7,12 +7,19 @@ stdtesting "testing" "github.com/juju/names" + gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/imagemetadata" "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/environs/configstore" + imagetesting "github.com/juju/juju/environs/imagemetadata/testing" + envtesting "github.com/juju/juju/environs/testing" + "github.com/juju/juju/jujuclient/jujuclienttesting" "github.com/juju/juju/state/cloudimagemetadata" coretesting "github.com/juju/juju/testing" ) @@ -21,6 +28,11 @@ gc.TestingT(t) } +func init() { + provider := mockEnvironProvider{} + environs.RegisterProvider("mock", provider) +} + type baseImageMetadataSuite struct { coretesting.BaseSuite @@ -29,8 +41,11 @@ api *imagemetadata.API state *mockState +} - calls []string +func (s *baseImageMetadataSuite) SetUpSuite(c *gc.C) { + s.BaseSuite.SetUpSuite(c) + imagetesting.PatchOfficialDataSources(&s.CleanupSuite, "test:") } func (s *baseImageMetadataSuite) SetUpTest(c *gc.C) { @@ -38,45 +53,84 @@ s.resources = common.NewResources() s.authorizer = testing.FakeAuthorizer{names.NewUserTag("testuser"), true} - s.calls = []string{} - s.state = s.constructState() + s.state = s.constructState(testConfig(c)) var err error s.api, err = imagemetadata.CreateAPI(s.state, s.resources, s.authorizer) c.Assert(err, jc.ErrorIsNil) } -func (s *baseImageMetadataSuite) assertCalls(c *gc.C, expectedCalls []string) { - c.Assert(s.calls, jc.SameContents, expectedCalls) +func (s *baseImageMetadataSuite) assertCalls(c *gc.C, expectedCalls ...string) { + s.state.Stub.CheckCallNames(c, expectedCalls...) } const ( - findMetadata = "findMetadata" - saveMetadata = "saveMetadata" + findMetadata = "findMetadata" + saveMetadata = "saveMetadata" + deleteMetadata = "deleteMetadata" + environConfig = "environConfig" ) -func (s *baseImageMetadataSuite) constructState() *mockState { +func (s *baseImageMetadataSuite) constructState(cfg *config.Config) *mockState { return &mockState{ - findMetadata: func(f cloudimagemetadata.MetadataFilter) (map[cloudimagemetadata.SourceType][]cloudimagemetadata.Metadata, error) { - s.calls = append(s.calls, findMetadata) + Stub: &gitjujutesting.Stub{}, + findMetadata: func(f cloudimagemetadata.MetadataFilter) (map[string][]cloudimagemetadata.Metadata, error) { return nil, nil }, - saveMetadata: func(m cloudimagemetadata.Metadata) error { - s.calls = append(s.calls, saveMetadata) - return nil + saveMetadata: func(m []cloudimagemetadata.Metadata) error { + return nil + }, + deleteMetadata: func(imageId string) error { + return nil + }, + environConfig: func() (*config.Config, error) { + return cfg, nil }, } } type mockState struct { - findMetadata func(f cloudimagemetadata.MetadataFilter) (map[cloudimagemetadata.SourceType][]cloudimagemetadata.Metadata, error) - saveMetadata func(m cloudimagemetadata.Metadata) error + *gitjujutesting.Stub + + findMetadata func(f cloudimagemetadata.MetadataFilter) (map[string][]cloudimagemetadata.Metadata, error) + saveMetadata func(m []cloudimagemetadata.Metadata) error + deleteMetadata func(imageId string) error + environConfig func() (*config.Config, error) } -func (st *mockState) FindMetadata(f cloudimagemetadata.MetadataFilter) (map[cloudimagemetadata.SourceType][]cloudimagemetadata.Metadata, error) { +func (st *mockState) FindMetadata(f cloudimagemetadata.MetadataFilter) (map[string][]cloudimagemetadata.Metadata, error) { + st.Stub.MethodCall(st, findMetadata, f) return st.findMetadata(f) } -func (st *mockState) SaveMetadata(m cloudimagemetadata.Metadata) error { +func (st *mockState) SaveMetadata(m []cloudimagemetadata.Metadata) error { + st.Stub.MethodCall(st, saveMetadata, m) return st.saveMetadata(m) } + +func (st *mockState) DeleteMetadata(imageId string) error { + st.Stub.MethodCall(st, deleteMetadata, imageId) + return st.deleteMetadata(imageId) +} + +func (st *mockState) ModelConfig() (*config.Config, error) { + st.Stub.MethodCall(st, environConfig) + return st.environConfig() +} + +func testConfig(c *gc.C) *config.Config { + attrs := coretesting.FakeConfig().Merge(coretesting.Attrs{ + "type": "mock", + "controller": true, + "state-id": "1", + }) + cfg, err := config.New(config.NoDefaults, attrs) + c.Assert(err, jc.ErrorIsNil) + _, err = environs.Prepare( + envtesting.BootstrapContext(c), configstore.NewMem(), + jujuclienttesting.NewMemStore(), + "dummycontroller", environs.PrepareForBootstrapParams{Config: cfg}, + ) + c.Assert(err, jc.ErrorIsNil) + return cfg +} === modified file 'src/github.com/juju/juju/apiserver/imagemetadata/state.go' --- src/github.com/juju/juju/apiserver/imagemetadata/state.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/imagemetadata/state.go 2016-03-22 15:18:22 +0000 @@ -4,13 +4,16 @@ package imagemetadata import ( + "github.com/juju/juju/environs/config" "github.com/juju/juju/state" "github.com/juju/juju/state/cloudimagemetadata" ) type metadataAcess interface { - FindMetadata(cloudimagemetadata.MetadataFilter) (map[cloudimagemetadata.SourceType][]cloudimagemetadata.Metadata, error) - SaveMetadata(cloudimagemetadata.Metadata) error + FindMetadata(cloudimagemetadata.MetadataFilter) (map[string][]cloudimagemetadata.Metadata, error) + SaveMetadata([]cloudimagemetadata.Metadata) error + DeleteMetadata(imageId string) error + ModelConfig() (*config.Config, error) } var getState = func(st *state.State) metadataAcess { @@ -21,10 +24,14 @@ *state.State } -func (s stateShim) FindMetadata(f cloudimagemetadata.MetadataFilter) (map[cloudimagemetadata.SourceType][]cloudimagemetadata.Metadata, error) { +func (s stateShim) FindMetadata(f cloudimagemetadata.MetadataFilter) (map[string][]cloudimagemetadata.Metadata, error) { return s.State.CloudImageMetadataStorage.FindMetadata(f) } -func (s stateShim) SaveMetadata(m cloudimagemetadata.Metadata) error { +func (s stateShim) SaveMetadata(m []cloudimagemetadata.Metadata) error { return s.State.CloudImageMetadataStorage.SaveMetadata(m) } + +func (s stateShim) DeleteMetadata(imageId string) error { + return s.State.CloudImageMetadataStorage.DeleteMetadata(imageId) +} === added file 'src/github.com/juju/juju/apiserver/imagemetadata/updatefrompublished_test.go' --- src/github.com/juju/juju/apiserver/imagemetadata/updatefrompublished_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/imagemetadata/updatefrompublished_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,439 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package imagemetadata_test + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/environs/configstore" + "github.com/juju/juju/environs/imagemetadata" + imagetesting "github.com/juju/juju/environs/imagemetadata/testing" + "github.com/juju/juju/environs/simplestreams" + sstesting "github.com/juju/juju/environs/simplestreams/testing" + "github.com/juju/juju/jujuclient/jujuclienttesting" + "github.com/juju/juju/provider/dummy" + "github.com/juju/juju/state/cloudimagemetadata" + "github.com/juju/juju/testing" +) + +// useTestImageData causes the given content to be served when published metadata is requested. +func useTestImageData(c *gc.C, files map[string]string) { + if files != nil { + sstesting.SetRoundTripperFiles(sstesting.AddSignedFiles(c, files), nil) + } else { + sstesting.SetRoundTripperFiles(nil, nil) + } +} + +// TODO (anastasiamac 2015-09-04) This metadata is so verbose. +// Need to generate the text by creating a struct and marshalling it. +var testImagesData = map[string]string{ + "/streams/v1/index.json": ` + { + "index": { + "com.ubuntu.cloud:released:aws": { + "updated": "Wed, 01 May 2013 13:31:26 +0000", + "clouds": [ + { + "region": "dummy_region", + "endpoint": "https://anywhere" + }, + { + "region": "another_dummy_region", + "endpoint": "" + } + ], + "cloudname": "aws", + "datatype": "image-ids", + "format": "products:1.0", + "products": [ + "com.ubuntu.cloud:server:12.04:amd64", + "com.ubuntu.cloud:server:14.04:amd64" + ], + "path": "streams/v1/image_metadata.json" + } + }, + "updated": "Wed, 27 May 2015 13:31:26 +0000", + "format": "index:1.0" + } +`, + "/streams/v1/image_metadata.json": ` +{ + "updated": "Wed, 27 May 2015 13:31:26 +0000", + "content_id": "com.ubuntu.cloud:released:aws", + "products": { + "com.ubuntu.cloud:server:14.04:amd64": { + "release": "trusty", + "version": "14.04", + "arch": "amd64", + "versions": { + "20140118": { + "items": { + "nzww1pe": { + "root_store": "ebs", + "virt": "pv", + "crsn": "da1", + "id": "ami-36745463" + }, + "nzww1pe2": { + "root_store": "ebs", + "virt": "pv", + "crsn": "da2", + "id": "ami-1136745463" + } + }, + "pubname": "ubuntu-trusty-14.04-amd64-server-20140118", + "label": "release" + } + } + }, + "com.ubuntu.cloud:server:12.04:amd64": { + "release": "precise", + "version": "12.04", + "arch": "amd64", + "versions": { + "20121218": { + "items": { + "usww1pe": { + "root_store": "ebs", + "virt": "pv", + "crsn": "da1", + "id": "ami-26745463" + }, + "usww1pe2": { + "root_store": "ebs", + "virt": "pv", + "crsn": "da2", + "id": "ami-1126745463" + } + }, + "pubname": "ubuntu-precise-12.04-amd64-server-20121218", + "label": "release" + } + } + } + }, + "_aliases": { + "crsn": { + "da1": { + "region": "dummy_region", + "endpoint": "https://anywhere" + }, + "da2": { + "region": "another_dummy_region", + "endpoint": "" + } + } + }, + "format": "products:1.0" +} +`, +} + +var _ = gc.Suite(&imageMetadataUpdateSuite{}) + +type imageMetadataUpdateSuite struct { + baseImageMetadataSuite +} + +func (s *imageMetadataUpdateSuite) SetUpSuite(c *gc.C) { + s.BaseSuite.SetUpSuite(c) + imagetesting.PatchOfficialDataSources(&s.CleanupSuite, "test:") + useTestImageData(c, testImagesData) +} + +func (s *imageMetadataUpdateSuite) TearDownSuite(c *gc.C) { + useTestImageData(c, nil) + s.BaseSuite.TearDownSuite(c) +} + +func (s *imageMetadataUpdateSuite) TestUpdateFromPublishedImagesForProviderWithNoRegions(c *gc.C) { + // This will save all available image metadata. + saved := []cloudimagemetadata.Metadata{} + + // testingEnvConfig prepares an environment configuration using + // the dummy provider since it doesn't implement simplestreams.HasRegion. + s.state.environConfig = func() (*config.Config, error) { + cfg, err := config.New(config.NoDefaults, dummy.SampleConfig()) + c.Assert(err, jc.ErrorIsNil) + env, err := environs.Prepare( + modelcmd.BootstrapContext(testing.Context(c)), configstore.NewMem(), + jujuclienttesting.NewMemStore(), + "dummycontroller", environs.PrepareForBootstrapParams{Config: cfg}, + ) + c.Assert(err, jc.ErrorIsNil) + return env.Config(), err + } + + s.state.saveMetadata = func(m []cloudimagemetadata.Metadata) error { + saved = append(saved, m...) + return nil + } + + err := s.api.UpdateFromPublishedImages() + c.Assert(err, jc.ErrorIsNil) + s.assertCalls(c, environConfig) + c.Assert(saved, jc.SameContents, []cloudimagemetadata.Metadata{}) +} + +// mockConfig returns a configuration for the usage of the +// mock provider below. +func mockConfig() testing.Attrs { + return dummy.SampleConfig().Merge(testing.Attrs{ + "type": "mock", + }) +} + +// mockEnviron is an environment without networking support. +type mockEnviron struct { + environs.Environ +} + +func (e mockEnviron) Config() *config.Config { + cfg, err := config.New(config.NoDefaults, mockConfig()) + if err != nil { + panic("invalid configuration for testing") + } + return cfg +} + +// Region is specified in the HasRegion interface. +func (e *mockEnviron) Region() (simplestreams.CloudSpec, error) { + return simplestreams.CloudSpec{ + Region: "dummy_region", + Endpoint: "https://anywhere", + }, nil +} + +// mockEnvironProvider is the smallest possible provider to +// test image metadata retrieval with region support. +type mockEnvironProvider struct { + environs.EnvironProvider +} + +func (p mockEnvironProvider) PrepareForBootstrap(environs.BootstrapContext, environs.PrepareForBootstrapParams) (environs.Environ, error) { + return &mockEnviron{}, nil +} + +func (p mockEnvironProvider) Open(*config.Config) (environs.Environ, error) { + return &mockEnviron{}, nil +} + +var _ = gc.Suite(®ionMetadataSuite{}) + +type regionMetadataSuite struct { + baseImageMetadataSuite + + env *mockEnviron + + saved []cloudimagemetadata.Metadata + expected []cloudimagemetadata.Metadata +} + +func (s *regionMetadataSuite) SetUpSuite(c *gc.C) { + s.baseImageMetadataSuite.SetUpSuite(c) + + s.env = &mockEnviron{} + + s.PatchValue(&imagemetadata.SimplestreamsImagesPublicKey, sstesting.SignedMetadataPublicKey) + // Prepare mock http transport for overriding metadata and images output in tests. + useTestImageData(c, testImagesData) +} + +func (s *regionMetadataSuite) TearDownSuite(c *gc.C) { + useTestImageData(c, nil) + s.baseImageMetadataSuite.TearDownSuite(c) +} + +func (s *regionMetadataSuite) SetUpTest(c *gc.C) { + s.baseImageMetadataSuite.SetUpTest(c) + + s.saved = nil + s.expected = nil +} + +func (s *regionMetadataSuite) setExpectations(c *gc.C) { + // This will only save image metadata specific to provider cloud spec. + s.expected = []cloudimagemetadata.Metadata{ + cloudimagemetadata.Metadata{ + cloudimagemetadata.MetadataAttributes{ + RootStorageType: "ebs", + VirtType: "pv", + Arch: "amd64", + Series: "trusty", + Region: "dummy_region", + Source: "default cloud images", + Stream: "released"}, + 10, + "ami-36745463", + }, + cloudimagemetadata.Metadata{ + cloudimagemetadata.MetadataAttributes{ + RootStorageType: "ebs", + VirtType: "pv", + Arch: "amd64", + Series: "precise", + Region: "dummy_region", + Source: "default cloud images", + Stream: "released"}, + 10, + "ami-26745463", + }, + } + + // testingEnvConfig prepares an environment configuration using + // mock provider which impelements simplestreams.HasRegion interface. + s.state.environConfig = func() (*config.Config, error) { + return s.env.Config(), nil + } + + s.state.saveMetadata = func(m []cloudimagemetadata.Metadata) error { + s.saved = append(s.saved, m...) + return nil + } +} + +func (s *regionMetadataSuite) checkStoredPublished(c *gc.C) { + err := s.api.UpdateFromPublishedImages() + c.Assert(err, jc.ErrorIsNil) + s.assertCalls(c, environConfig, environConfig, saveMetadata) + c.Assert(s.saved, jc.SameContents, s.expected) +} + +func (s *regionMetadataSuite) TestUpdateFromPublishedImagesForProviderWithRegions(c *gc.C) { + // This will only save image metadata specific to provider cloud spec. + s.setExpectations(c) + s.checkStoredPublished(c) +} + +const ( + indexContent = `{ + "index": { + "com.ubuntu.cloud:%v": { + "updated": "Fri, 17 Jul 2015 13:42:48 +1000", + "format": "products:1.0", + "datatype": "image-ids", + "cloudname": "custom", + "clouds": [ + { + "region": "%v", + "endpoint": "%v" + } + ], + "path": "streams/v1/products.json", + "products": [ + "com.ubuntu.cloud:server:14.04:%v" + ] + } + }, + "updated": "Fri, 17 Jul 2015 13:42:48 +1000", + "format": "index:1.0" +}` + + productContent = `{ + "products": { + "com.ubuntu.cloud:server:14.04:%v": { + "version": "14.04", + "arch": "%v", + "versions": { + "20151707": { + "items": { + "%v": { + "id": "%v", + "root_store": "%v", + "virt": "%v", + "region": "%v", + "endpoint": "%v" + } + } + } + } + } + }, + "updated": "Fri, 17 Jul 2015 13:42:48 +1000", + "format": "products:1.0", + "content_id": "com.ubuntu.cloud:custom" +}` +) + +func writeTempFiles(c *gc.C, metadataDir string, expected []struct{ path, content string }) { + for _, pair := range expected { + path := filepath.Join(metadataDir, pair.path) + err := os.MkdirAll(filepath.Dir(path), 0755) + c.Assert(err, jc.ErrorIsNil) + err = ioutil.WriteFile(path, []byte(pair.content), 0644) + c.Assert(err, jc.ErrorIsNil) + } +} + +func (s *regionMetadataSuite) createTestDataSource(c *gc.C, dsID string, files []struct{ path, content string }) int { + metadataDir := c.MkDir() + writeTempFiles(c, metadataDir, files) + + ds := simplestreams.NewURLDataSource(dsID, "file://"+metadataDir, false, 20, false) + environs.RegisterImageDataSourceFunc(dsID, func(environs.Environ) (simplestreams.DataSource, error) { + return ds, nil + }) + s.AddCleanup(func(*gc.C) { + environs.UnregisterImageDataSourceFunc(dsID) + }) + return ds.Priority() +} + +func (s *regionMetadataSuite) setupMetadata(c *gc.C, dsID string, cloudSpec simplestreams.CloudSpec, metadata cloudimagemetadata.Metadata) int { + files := []struct{ path, content string }{{ + path: "streams/v1/index.json", + content: fmt.Sprintf(indexContent, metadata.Source, metadata.Region, cloudSpec.Endpoint, metadata.Arch), + }, { + path: "streams/v1/products.json", + content: fmt.Sprintf(productContent, metadata.Arch, metadata.Arch, metadata.ImageId, metadata.ImageId, metadata.RootStorageType, metadata.VirtType, metadata.Region, cloudSpec.Endpoint), + }, { + path: "wayward/file.txt", + content: "ghi", + }} + return s.createTestDataSource(c, dsID, files) +} + +func (s *regionMetadataSuite) TestUpdateFromPublishedImagesMultipleDS(c *gc.C) { + s.setExpectations(c) + + // register another data source + cloudSpec, err := s.env.Region() + c.Assert(err, jc.ErrorIsNil) + anotherDS := "second ds" + + m1 := s.expected[0] + priority := s.setupMetadata(c, anotherDS, cloudSpec, m1) + m1.Source = anotherDS + m1.Priority = priority + + s.expected = append(s.expected, m1) + + err = s.api.UpdateFromPublishedImages() + c.Assert(err, jc.ErrorIsNil) + s.assertCalls(c, environConfig, environConfig, saveMetadata, environConfig, saveMetadata) + c.Assert(s.saved, jc.SameContents, s.expected) +} + +func (s *regionMetadataSuite) TestUpdateFromPublishedImagesMultipleDSError(c *gc.C) { + s.setExpectations(c) + + // register another data source that would error + files := []struct{ path, content string }{{ + path: "wayward/file.txt", + content: "ghi", + }} + s.createTestDataSource(c, "error in ds", files) + + s.checkStoredPublished(c) +} === modified file 'src/github.com/juju/juju/apiserver/images.go' --- src/github.com/juju/juju/apiserver/images.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/images.go 2016-03-22 15:18:22 +0000 @@ -5,7 +5,6 @@ import ( "crypto/sha256" - "encoding/json" "fmt" "io" "io/ioutil" @@ -17,60 +16,36 @@ "github.com/juju/errors" "github.com/juju/utils/fslock" - "github.com/juju/juju/apiserver/common" - apihttp "github.com/juju/juju/apiserver/http" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/container" "github.com/juju/juju/instance" "github.com/juju/juju/state" "github.com/juju/juju/state/imagestorage" - "github.com/juju/juju/utils" ) // imagesDownloadHandler handles image download through HTTPS in the API server. type imagesDownloadHandler struct { - httpHandler + ctxt httpContext dataDir string state *state.State } func (h *imagesDownloadHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - stateWrapper, err := h.validateEnvironUUID(r) + st, err := h.ctxt.stateForRequestUnauthenticated(r) if err != nil { - h.sendError(w, http.StatusNotFound, err.Error()) + sendError(w, err) return } switch r.Method { case "GET": - err := h.processGet(r, w, stateWrapper.state) + err := h.processGet(r, w, st) if err != nil { logger.Errorf("GET(%s) failed: %v", r.URL, err) - h.sendError(w, http.StatusInternalServerError, err.Error()) + sendError(w, err) return } default: - h.sendError(w, http.StatusMethodNotAllowed, fmt.Sprintf("unsupported method: %q", r.Method)) - } -} - -// sendJSON sends a JSON-encoded response to the client. -func (h *imagesDownloadHandler) sendJSON(w http.ResponseWriter, statusCode int, response *params.ErrorResult) error { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(statusCode) - body, err := json.Marshal(response) - if err != nil { - return err - } - w.Write(body) - return nil -} - -// sendError sends a JSON-encoded error response. -func (h *imagesDownloadHandler) sendError(w http.ResponseWriter, statusCode int, message string) { - logger.Debugf("sending error: %v %v", statusCode, message) - err := common.ServerError(errors.New(message)) - if err := h.sendJSON(w, statusCode, ¶ms.ErrorResult{Error: err}); err != nil { - logger.Errorf("failed to send error: %v", err) + sendError(w, errors.MethodNotAllowedf("unsupported method: %q", r.Method)) } } @@ -80,10 +55,10 @@ kind := r.URL.Query().Get(":kind") series := r.URL.Query().Get(":series") arch := r.URL.Query().Get(":arch") - envuuid := r.URL.Query().Get(":envuuid") + modelUUID := r.URL.Query().Get(":modeluuid") // Get the image details from storage. - metadata, imageReader, err := h.loadImage(st, envuuid, kind, series, arch) + metadata, imageReader, err := h.loadImage(st, modelUUID, kind, series, arch) if err != nil { return errors.Annotate(err, "error getting image from storage") } @@ -92,7 +67,7 @@ // Stream the image to the caller. logger.Debugf("streaming image from state blobstore: %+v", metadata) resp.Header().Set("Content-Type", "application/x-tar-gz") - resp.Header().Set("Digest", fmt.Sprintf("%s=%s", apihttp.DigestSHA, metadata.SHA256)) + resp.Header().Set("Digest", fmt.Sprintf("%s=%s", params.DigestSHA, metadata.SHA256)) resp.Header().Set("Content-Length", fmt.Sprint(metadata.Size)) resp.WriteHeader(http.StatusOK) if _, err := io.Copy(resp, imageReader); err != nil { @@ -103,14 +78,14 @@ // loadImage loads an os image from the blobstore, // downloading and caching it if necessary. -func (h *imagesDownloadHandler) loadImage(st *state.State, envuuid, kind, series, arch string) ( +func (h *imagesDownloadHandler) loadImage(st *state.State, modeluuid, kind, series, arch string) ( *imagestorage.Metadata, io.ReadCloser, error, ) { // We want to ensure that if an image needs to be downloaded and cached, // this only happens once. - imageIdent := fmt.Sprintf("image-%s-%s-%s-%s", envuuid, kind, series, arch) + imageIdent := fmt.Sprintf("image-%s-%s-%s-%s", modeluuid, kind, series, arch) lockDir := filepath.Join(h.dataDir, "locks") - lock, err := fslock.NewLock(lockDir, imageIdent) + lock, err := fslock.NewLock(lockDir, imageIdent, fslock.Defaults()) if err != nil { return nil, nil, errors.Trace(err) } @@ -120,11 +95,10 @@ metadata, imageReader, err := storage.Image(kind, series, arch) // Not in storage, so go fetch it. if errors.IsNotFound(err) { - err = h.fetchAndCacheLxcImage(storage, envuuid, series, arch) - if err != nil { + if err := h.fetchAndCacheLxcImage(storage, modeluuid, series, arch); err != nil { return nil, nil, errors.Annotate(err, "error fetching and caching image") } - err = utils.NetworkOperationWitDefaultRetries(func() error { + err = networkOperationWitDefaultRetries(func() error { metadata, imageReader, err = storage.Image(string(instance.LXC), series, arch) return err }, "streaming os image from blobstore")() @@ -137,8 +111,8 @@ // fetchAndCacheLxcImage fetches an lxc image tarball from http://cloud-images.ubuntu.com // and caches it in the state blobstore. -func (h *imagesDownloadHandler) fetchAndCacheLxcImage(storage imagestorage.Storage, envuuid, series, arch string) error { - cfg, err := h.state.EnvironConfig() +func (h *imagesDownloadHandler) fetchAndCacheLxcImage(storage imagestorage.Storage, modeluuid, series, arch string) error { + cfg, err := h.state.ModelConfig() if err != nil { return errors.Trace(err) } @@ -191,7 +165,7 @@ rdr := io.TeeReader(resp.Body, hash) metadata := &imagestorage.Metadata{ - EnvUUID: envuuid, + ModelUUID: modeluuid, Kind: string(instance.LXC), Series: series, Arch: arch, @@ -201,7 +175,7 @@ } // Stream the image to storage. - err = utils.NetworkOperationWitDefaultRetries(func() error { + err = networkOperationWitDefaultRetries(func() error { return storage.AddImage(rdr, metadata) }, "add os image to blobstore")() if err != nil { @@ -210,7 +184,7 @@ // Better check the downloaded image checksum. downloadChecksum := fmt.Sprintf("%x", hash.Sum(nil)) if downloadChecksum != checksum { - err = utils.NetworkOperationWitDefaultRetries(func() error { + err := networkOperationWitDefaultRetries(func() error { return storage.DeleteImage(metadata) }, "delete os image from blobstore")() if err != nil { === modified file 'src/github.com/juju/juju/apiserver/images_test.go' --- src/github.com/juju/juju/apiserver/images_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/images_test.go 2016-03-22 15:18:22 +0000 @@ -20,41 +20,36 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - apihttp "github.com/juju/juju/apiserver/http" "github.com/juju/juju/apiserver/params" containertesting "github.com/juju/juju/container/testing" - "github.com/juju/juju/environs/jujutest" + sstesting "github.com/juju/juju/environs/simplestreams/testing" "github.com/juju/juju/state" "github.com/juju/juju/state/imagestorage" coretesting "github.com/juju/juju/testing" ) +const testImageData = "abc" + +var testImageChecksum = fmt.Sprintf("%x", sha256.Sum256([]byte(testImageData))) + type imageSuite struct { - userAuthHttpSuite - archiveContentType string - imageData string - imageChecksum string + authHttpSuite } var _ = gc.Suite(&imageSuite{}) func (s *imageSuite) SetUpSuite(c *gc.C) { - s.userAuthHttpSuite.SetUpSuite(c) - s.archiveContentType = "application/x-tar-gz" - s.imageData = "abc" - s.imageChecksum = "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad" - testRoundTripper.RegisterForScheme("test") + s.authHttpSuite.SetUpSuite(c) } -func (s *imageSuite) TestDownloadMissingEnvUUIDPath(c *gc.C) { +func (s *imageSuite) TestDownloadMissingModelUUIDPath(c *gc.C) { s.storeFakeImage(c, s.State, "lxc", "trusty", "amd64") - s.envUUID = "" + s.modelUUID = "" url := s.imageURL(c, "lxc", "trusty", "amd64") - c.Assert(url.Path, jc.HasPrefix, "/environment//images") + c.Assert(url.Path, jc.HasPrefix, "/model//images") - response, err := s.downloadRequest(c, url) - c.Assert(err, jc.ErrorIsNil) + response := s.downloadRequest(c, url) s.testDownload(c, response) } @@ -62,41 +57,33 @@ s.storeFakeImage(c, s.State, "lxc", "trusty", "amd64") url := s.imageURL(c, "lxc", "trusty", "amd64") - c.Assert(url.Path, jc.HasPrefix, fmt.Sprintf("/environment/%s/", s.State.EnvironUUID())) + c.Assert(url.Path, jc.HasPrefix, fmt.Sprintf("/model/%s/", s.State.ModelUUID())) - response, err := s.downloadRequest(c, url) - c.Assert(err, jc.ErrorIsNil) + response := s.downloadRequest(c, url) s.testDownload(c, response) } func (s *imageSuite) TestDownloadOtherEnvironmentPath(c *gc.C) { - envState := s.setupOtherEnvironment(c) + envState := s.setupOtherModel(c) s.storeFakeImage(c, envState, "lxc", "trusty", "amd64") url := s.imageURL(c, "lxc", "trusty", "amd64") - c.Assert(url.Path, jc.HasPrefix, fmt.Sprintf("/environment/%s/", envState.EnvironUUID())) + c.Assert(url.Path, jc.HasPrefix, fmt.Sprintf("/model/%s/", envState.ModelUUID())) - response, err := s.downloadRequest(c, url) - c.Assert(err, jc.ErrorIsNil) + response := s.downloadRequest(c, url) s.testDownload(c, response) } -func (s *imageSuite) TestDownloadRejectsWrongEnvUUIDPath(c *gc.C) { - s.envUUID = "dead-beef-123456" +func (s *imageSuite) TestDownloadRejectsWrongModelUUIDPath(c *gc.C) { + s.modelUUID = "dead-beef-123456" url := s.imageURL(c, "lxc", "trusty", "amd64") - response, err := s.downloadRequest(c, url) - c.Assert(err, jc.ErrorIsNil) - c.Assert(err, gc.IsNil) - s.assertErrorResponse(c, response, http.StatusNotFound, `unknown environment: "dead-beef-123456"`) + response := s.downloadRequest(c, url) + s.assertErrorResponse(c, response, http.StatusNotFound, `unknown model: "dead-beef-123456"`) } -// This provides the content for code accessing test:///... URLs. This allows -// us to set the responses for things like image queries. -var testRoundTripper = &jujutest.ProxyRoundTripper{} - type CountingRoundTripper struct { count int - *jujutest.CannedRoundTripper + *coretesting.CannedRoundTripper } func (v *CountingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -106,11 +93,11 @@ func useTestImageData(files map[string]string) { if files != nil { - testRoundTripper.Sub = &CountingRoundTripper{ - CannedRoundTripper: jujutest.NewCannedRoundTripper(files, nil), + sstesting.TestRoundTripper.Sub = &CountingRoundTripper{ + CannedRoundTripper: coretesting.NewCannedRoundTripper(files, nil), } } else { - testRoundTripper.Sub = nil + sstesting.TestRoundTripper.Sub = nil } } @@ -118,8 +105,8 @@ // Set up some image data for a fake server. testing.PatchExecutable(c, s, "ubuntu-cloudimg-query", containertesting.FakeLxcURLScript) useTestImageData(map[string]string{ - "/trusty-released-amd64-root.tar.gz": s.imageData, - "/SHA256SUMS": s.imageChecksum + " *trusty-released-amd64-root.tar.gz", + "/trusty-released-amd64-root.tar.gz": testImageData, + "/SHA256SUMS": testImageChecksum + " *trusty-released-amd64-root.tar.gz", }) defer func() { useTestImageData(nil) @@ -129,15 +116,14 @@ // the API server to search for the image on cloud-images, fetches it, // and then cache it in imagestorage. url := s.imageURL(c, "lxc", "trusty", "amd64") - response, err := s.downloadRequest(c, url) - c.Assert(err, jc.ErrorIsNil) + response := s.downloadRequest(c, url) data := s.testDownload(c, response) metadata, cachedData := s.getImageFromStorage(c, s.State, "lxc", "trusty", "amd64") - c.Assert(metadata.Size, gc.Equals, int64(len(s.imageData))) - c.Assert(metadata.SHA256, gc.Equals, s.imageChecksum) + c.Assert(metadata.Size, gc.Equals, int64(len(testImageData))) + c.Assert(metadata.SHA256, gc.Equals, testImageChecksum) c.Assert(metadata.SourceURL, gc.Equals, "test://cloud-images/trusty-released-amd64-root.tar.gz") - c.Assert(string(data), gc.Equals, string(s.imageData)) + c.Assert(string(data), gc.Equals, string(testImageData)) c.Assert(string(data), gc.Equals, string(cachedData)) } @@ -145,8 +131,8 @@ // Set up some image data for a fake server. testing.PatchExecutable(c, s, "ubuntu-cloudimg-query", containertesting.FakeLxcURLScript) useTestImageData(map[string]string{ - "/trusty-released-amd64-root.tar.gz": s.imageData, - "/SHA256SUMS": s.imageChecksum + " *trusty-released-amd64-root.tar.gz", + "/trusty-released-amd64-root.tar.gz": testImageData, + "/SHA256SUMS": testImageChecksum + " *trusty-released-amd64-root.tar.gz", }) defer func() { useTestImageData(nil) @@ -162,10 +148,9 @@ go func() { defer wg.Done() url := s.imageURL(c, "lxc", "trusty", "amd64") - response, err := s.downloadRequest(c, url) - c.Assert(err, jc.ErrorIsNil) + response := s.downloadRequest(c, url) data := s.testDownload(c, response) - c.Assert(string(data), gc.Equals, string(s.imageData)) + c.Assert(string(data), gc.Equals, string(testImageData)) }() } wg.Wait() @@ -178,30 +163,29 @@ } // Downloading an image is 2 requests - one for image, one for SA256. - c.Assert(testRoundTripper.Sub.(*CountingRoundTripper).count, gc.Equals, 2) + c.Assert(sstesting.TestRoundTripper.Sub.(*CountingRoundTripper).count, gc.Equals, 2) // Check that the image is correctly cached. metadata, cachedData := s.getImageFromStorage(c, s.State, "lxc", "trusty", "amd64") - c.Assert(metadata.Size, gc.Equals, int64(len(s.imageData))) - c.Assert(metadata.SHA256, gc.Equals, s.imageChecksum) + c.Assert(metadata.Size, gc.Equals, int64(len(testImageData))) + c.Assert(metadata.SHA256, gc.Equals, testImageChecksum) c.Assert(metadata.SourceURL, gc.Equals, "test://cloud-images/trusty-released-amd64-root.tar.gz") - c.Assert(s.imageData, gc.Equals, string(cachedData)) + c.Assert(testImageData, gc.Equals, string(cachedData)) } func (s *imageSuite) TestDownloadFetchChecksumMismatch(c *gc.C) { // Set up some image data for a fake server. testing.PatchExecutable(c, s, "ubuntu-cloudimg-query", containertesting.FakeLxcURLScript) useTestImageData(map[string]string{ - "/trusty-released-amd64-root.tar.gz": s.imageData, + "/trusty-released-amd64-root.tar.gz": testImageData, "/SHA256SUMS": "different-checksum *trusty-released-amd64-root.tar.gz", }) defer func() { useTestImageData(nil) }() - resp, err := s.downloadRequest(c, s.imageURL(c, "lxc", "trusty", "amd64")) + resp := s.downloadRequest(c, s.imageURL(c, "lxc", "trusty", "amd64")) defer resp.Body.Close() - c.Assert(err, gc.IsNil) s.assertErrorResponse(c, resp, http.StatusInternalServerError, ".* download checksum mismatch .*") } @@ -209,52 +193,51 @@ // Set up some image data for a fake server. testing.PatchExecutable(c, s, "ubuntu-cloudimg-query", containertesting.FakeLxcURLScript) useTestImageData(map[string]string{ - "/trusty-released-amd64-root.tar.gz": s.imageData, + "/trusty-released-amd64-root.tar.gz": testImageData, }) defer func() { useTestImageData(nil) }() - resp, err := s.downloadRequest(c, s.imageURL(c, "lxc", "trusty", "amd64")) + resp := s.downloadRequest(c, s.imageURL(c, "lxc", "trusty", "amd64")) defer resp.Body.Close() - c.Assert(err, gc.IsNil) s.assertErrorResponse(c, resp, http.StatusInternalServerError, ".* cannot find sha256 checksum .*") } func (s *imageSuite) testDownload(c *gc.C, resp *http.Response) []byte { c.Check(resp.StatusCode, gc.Equals, http.StatusOK) - c.Check(resp.Header.Get("Digest"), gc.Equals, string(apihttp.DigestSHA)+"="+s.imageChecksum) - c.Check(resp.Header.Get("Content-Type"), gc.Equals, s.archiveContentType) - c.Check(resp.Header.Get("Content-Length"), gc.Equals, fmt.Sprintf("%v", len(s.imageData))) + c.Check(resp.Header.Get("Digest"), gc.Equals, string(params.DigestSHA)+"="+testImageChecksum) + c.Check(resp.Header.Get("Content-Type"), gc.Equals, "application/x-tar-gz") + c.Check(resp.Header.Get("Content-Length"), gc.Equals, fmt.Sprintf("%v", len(testImageData))) defer resp.Body.Close() data, err := ioutil.ReadAll(resp.Body) c.Assert(err, gc.IsNil) - c.Assert(data, gc.HasLen, len(s.imageData)) + c.Assert(data, gc.HasLen, len(testImageData)) hash := sha256.New() hash.Write(data) - c.Assert(fmt.Sprintf("%x", hash.Sum(nil)), gc.Equals, s.imageChecksum) + c.Assert(fmt.Sprintf("%x", hash.Sum(nil)), gc.Equals, testImageChecksum) return data } -func (s *imageSuite) downloadRequest(c *gc.C, url *url.URL) (*http.Response, error) { - return s.sendRequest(c, "", "", "GET", url.String(), "", nil) +func (s *imageSuite) downloadRequest(c *gc.C, url *url.URL) *http.Response { + return s.sendRequest(c, httpRequestParams{method: "GET", url: url.String()}) } func (s *imageSuite) storeFakeImage(c *gc.C, st *state.State, kind, series, arch string) { storage := st.ImageStorage() metadata := &imagestorage.Metadata{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Kind: kind, Series: series, Arch: arch, - Size: int64(len(s.imageData)), - SHA256: s.imageChecksum, + Size: int64(len(testImageData)), + SHA256: testImageChecksum, SourceURL: "http://path", } - err := storage.AddImage(strings.NewReader(s.imageData), metadata) + err := storage.AddImage(strings.NewReader(testImageData), metadata) c.Assert(err, gc.IsNil) } @@ -270,7 +253,7 @@ func (s *imageSuite) imageURL(c *gc.C, kind, series, arch string) *url.URL { uri := s.baseURL(c) - uri.Path = fmt.Sprintf("/environment/%s/images/%s/%s/%s/trusty-released-amd64-root.tar.gz", s.envUUID, kind, series, arch) + uri.Path = fmt.Sprintf("/model/%s/images/%s/%s/%s/trusty-released-amd64-root.tar.gz", s.modelUUID, kind, series, arch) return uri } === modified file 'src/github.com/juju/juju/apiserver/instancepoller/instancepoller.go' --- src/github.com/juju/juju/apiserver/instancepoller/instancepoller.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/instancepoller/instancepoller.go 2016-03-22 15:18:22 +0000 @@ -15,7 +15,7 @@ ) func init() { - common.RegisterStandardFacade("InstancePoller", 1, NewInstancePollerAPI) + common.RegisterStandardFacade("InstancePoller", 2, NewInstancePollerAPI) } var logger = loggo.GetLogger("juju.apiserver.instancepoller") @@ -23,8 +23,8 @@ // InstancePollerAPI provides access to the InstancePoller API facade. type InstancePollerAPI struct { *common.LifeGetter - *common.EnvironWatcher - *common.EnvironMachinesWatcher + *common.ModelWatcher + *common.ModelMachinesWatcher *common.InstanceIdGetter *common.StatusGetter @@ -42,7 +42,7 @@ authorizer common.Authorizer, ) (*InstancePollerAPI, error) { - if !authorizer.AuthEnvironManager() { + if !authorizer.AuthModelManager() { // InstancePoller must run as environment manager. return nil, common.ErrPerm } @@ -54,15 +54,15 @@ sti, accessMachine, ) - // EnvironConfig() and WatchForEnvironConfigChanges() are allowed + // ModelConfig() and WatchForModelConfigChanges() are allowed // with unrestriced access. - environWatcher := common.NewEnvironWatcher( + modelWatcher := common.NewModelWatcher( sti, resources, authorizer, ) - // WatchEnvironMachines() is allowed with unrestricted access. - machinesWatcher := common.NewEnvironMachinesWatcher( + // WatchModelMachines() is allowed with unrestricted access. + machinesWatcher := common.NewModelMachinesWatcher( sti, resources, authorizer, @@ -79,15 +79,15 @@ ) return &InstancePollerAPI{ - LifeGetter: lifeGetter, - EnvironWatcher: environWatcher, - EnvironMachinesWatcher: machinesWatcher, - InstanceIdGetter: instanceIdGetter, - StatusGetter: statusGetter, - st: sti, - resources: resources, - authorizer: authorizer, - accessMachine: accessMachine, + LifeGetter: lifeGetter, + ModelWatcher: modelWatcher, + ModelMachinesWatcher: machinesWatcher, + InstanceIdGetter: instanceIdGetter, + StatusGetter: statusGetter, + st: sti, + resources: resources, + authorizer: authorizer, + accessMachine: accessMachine, }, nil } === modified file 'src/github.com/juju/juju/apiserver/instancepoller/instancepoller_test.go' --- src/github.com/juju/juju/apiserver/instancepoller/instancepoller_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/instancepoller/instancepoller_test.go 2016-03-22 15:18:22 +0000 @@ -98,44 +98,44 @@ c.Assert(err, gc.ErrorMatches, "permission denied") } -func (s *InstancePollerSuite) TestEnvironConfigFailure(c *gc.C) { +func (s *InstancePollerSuite) TestModelConfigFailure(c *gc.C) { s.st.SetErrors(errors.New("boom")) - result, err := s.api.EnvironConfig() + result, err := s.api.ModelConfig() c.Assert(err, gc.ErrorMatches, "boom") - c.Assert(result, jc.DeepEquals, params.EnvironConfigResult{}) + c.Assert(result, jc.DeepEquals, params.ModelConfigResult{}) - s.st.CheckCallNames(c, "EnvironConfig") + s.st.CheckCallNames(c, "ModelConfig") } -func (s *InstancePollerSuite) TestEnvironConfigSuccess(c *gc.C) { - envConfig := coretesting.EnvironConfig(c) +func (s *InstancePollerSuite) TestModelConfigSuccess(c *gc.C) { + envConfig := coretesting.ModelConfig(c) s.st.SetConfig(c, envConfig) - result, err := s.api.EnvironConfig() + result, err := s.api.ModelConfig() c.Assert(err, jc.ErrorIsNil) - c.Assert(result, jc.DeepEquals, params.EnvironConfigResult{ + c.Assert(result, jc.DeepEquals, params.ModelConfigResult{ Config: envConfig.AllAttrs(), }) - s.st.CheckCallNames(c, "EnvironConfig") + s.st.CheckCallNames(c, "ModelConfig") } -func (s *InstancePollerSuite) TestWatchForEnvironConfigChangesFailure(c *gc.C) { +func (s *InstancePollerSuite) TestWatchForModelConfigChangesFailure(c *gc.C) { // Force the Changes() method of the mock watcher to return a // closed channel by setting an error. s.st.SetErrors(errors.New("boom")) - result, err := s.api.WatchForEnvironConfigChanges() + result, err := s.api.WatchForModelConfigChanges() c.Assert(err, gc.ErrorMatches, "boom") c.Assert(result, jc.DeepEquals, params.NotifyWatchResult{}) c.Assert(s.resources.Count(), gc.Equals, 0) // no watcher registered - s.st.CheckCallNames(c, "WatchForEnvironConfigChanges") + s.st.CheckCallNames(c, "WatchForModelConfigChanges") } -func (s *InstancePollerSuite) TestWatchForEnvironConfigChangesSuccess(c *gc.C) { - result, err := s.api.WatchForEnvironConfigChanges() +func (s *InstancePollerSuite) TestWatchForModelConfigChangesSuccess(c *gc.C) { + result, err := s.api.WatchForModelConfigChanges() c.Assert(err, jc.ErrorIsNil) c.Assert(result, jc.DeepEquals, params.NotifyWatchResult{ Error: nil, NotifyWatcherId: "1", @@ -150,28 +150,28 @@ wc := statetesting.NewNotifyWatcherC(c, s.st, resource.(state.NotifyWatcher)) wc.AssertNoChange() - s.st.CheckCallNames(c, "WatchForEnvironConfigChanges") + s.st.CheckCallNames(c, "WatchForModelConfigChanges") // Try changing the config to verify an event is reported. - envConfig := coretesting.EnvironConfig(c) + envConfig := coretesting.ModelConfig(c) s.st.SetConfig(c, envConfig) wc.AssertOneChange() } -func (s *InstancePollerSuite) TestWatchEnvironMachinesFailure(c *gc.C) { +func (s *InstancePollerSuite) TestWatchModelMachinesFailure(c *gc.C) { // Force the Changes() method of the mock watcher to return a // closed channel by setting an error. s.st.SetErrors(errors.Errorf("boom")) - result, err := s.api.WatchEnvironMachines() - c.Assert(err, gc.ErrorMatches, "cannot obtain initial environment machines: boom") + result, err := s.api.WatchModelMachines() + c.Assert(err, gc.ErrorMatches, "cannot obtain initial model machines: boom") c.Assert(result, jc.DeepEquals, params.StringsWatchResult{}) c.Assert(s.resources.Count(), gc.Equals, 0) // no watcher registered - s.st.CheckCallNames(c, "WatchEnvironMachines") + s.st.CheckCallNames(c, "WatchModelMachines") } -func (s *InstancePollerSuite) TestWatchEnvironMachinesSuccess(c *gc.C) { +func (s *InstancePollerSuite) TestWatchModelMachinesSuccess(c *gc.C) { // Add a couple of machines. s.st.SetMachineInfo(c, machineInfo{id: "2"}) s.st.SetMachineInfo(c, machineInfo{id: "1"}) @@ -181,7 +181,7 @@ StringsWatcherId: "1", Changes: []string{"1", "2"}, // initial event (sorted ids) } - result, err := s.api.WatchEnvironMachines() + result, err := s.api.WatchModelMachines() c.Assert(err, jc.ErrorIsNil) c.Assert(result, jc.DeepEquals, expectedResult) @@ -198,14 +198,14 @@ wc1 := statetesting.NewStringsWatcherC(c, s.st, resource1.(state.StringsWatcher)) wc1.AssertNoChange() - s.st.CheckCallNames(c, "WatchEnvironMachines") + s.st.CheckCallNames(c, "WatchModelMachines") // Add another watcher to verify events coalescence. - result, err = s.api.WatchEnvironMachines() + result, err = s.api.WatchModelMachines() c.Assert(err, jc.ErrorIsNil) expectedResult.StringsWatcherId = "2" c.Assert(result, jc.DeepEquals, expectedResult) - s.st.CheckCallNames(c, "WatchEnvironMachines", "WatchEnvironMachines") + s.st.CheckCallNames(c, "WatchModelMachines", "WatchModelMachines") c.Assert(s.resources.Count(), gc.Equals, 2) resource2 := s.resources.Get("2") defer statetesting.AssertStop(c, resource2) === modified file 'src/github.com/juju/juju/apiserver/instancepoller/mock_test.go' --- src/github.com/juju/juju/apiserver/instancepoller/mock_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/instancepoller/mock_test.go 2016-03-22 15:18:22 +0000 @@ -58,24 +58,24 @@ m.CheckCall(c, index, "SetProviderAddresses", args...) } -// WatchForEnvironConfigChanges implements StateInterface. -func (m *mockState) WatchForEnvironConfigChanges() state.NotifyWatcher { +// WatchForModelConfigChanges implements StateInterface. +func (m *mockState) WatchForModelConfigChanges() state.NotifyWatcher { m.mu.Lock() defer m.mu.Unlock() - m.MethodCall(m, "WatchForEnvironConfigChanges") + m.MethodCall(m, "WatchForModelConfigChanges") w := NewMockConfigWatcher(m.NextErr()) m.configWatchers = append(m.configWatchers, w) return w } -// EnvironConfig implements StateInterface. -func (m *mockState) EnvironConfig() (*config.Config, error) { +// ModelConfig implements StateInterface. +func (m *mockState) ModelConfig() (*config.Config, error) { m.mu.Lock() defer m.mu.Unlock() - m.MethodCall(m, "EnvironConfig") + m.MethodCall(m, "ModelConfig") if err := m.NextErr(); err != nil { return nil, err @@ -97,12 +97,12 @@ } } -// WatchEnvironMachines implements StateInterface. -func (m *mockState) WatchEnvironMachines() state.StringsWatcher { +// WatchModelMachines implements StateInterface. +func (m *mockState) WatchModelMachines() state.StringsWatcher { m.mu.Lock() defer m.mu.Unlock() - m.MethodCall(m, "WatchEnvironMachines") + m.MethodCall(m, "WatchModelMachines") ids := make([]string, 0, len(m.machines)) // Initial event - all machine ids, sorted. === modified file 'src/github.com/juju/juju/apiserver/instancepoller/state.go' --- src/github.com/juju/juju/apiserver/instancepoller/state.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/instancepoller/state.go 2016-03-22 15:18:22 +0000 @@ -26,8 +26,8 @@ } type StateInterface interface { - state.EnvironAccessor - state.EnvironMachinesWatcher + state.ModelAccessor + state.ModelMachinesWatcher state.EntityFinder Machine(id string) (StateMachine, error) === modified file 'src/github.com/juju/juju/apiserver/keymanager/keymanager.go' --- src/github.com/juju/juju/apiserver/keymanager/keymanager.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/keymanager/keymanager.go 2016-03-22 15:18:22 +0000 @@ -12,18 +12,18 @@ "github.com/juju/names" "github.com/juju/utils" "github.com/juju/utils/set" + "github.com/juju/utils/ssh" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/environs/config" "github.com/juju/juju/state" - "github.com/juju/juju/utils/ssh" ) var logger = loggo.GetLogger("juju.apiserver.keymanager") func init() { - common.RegisterStandardFacade("KeyManager", 0, NewKeyManagerAPI) + common.RegisterStandardFacade("KeyManager", 1, NewKeyManagerAPI) } // KeyManager defines the methods on the keymanager API end point. @@ -34,7 +34,7 @@ ImportKeys(arg params.ModifyUserSSHKeys) (params.ErrorResults, error) } -// KeyUpdaterAPI implements the KeyUpdater interface and is the concrete +// KeyManagerAPI implements the KeyUpdater interface and is the concrete // implementation of the api end point. type KeyManagerAPI struct { state *state.State @@ -50,10 +50,10 @@ // NewKeyManagerAPI creates a new server-side keyupdater API end point. func NewKeyManagerAPI(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*KeyManagerAPI, error) { // Only clients and environment managers can access the key manager service. - if !authorizer.AuthClient() && !authorizer.AuthEnvironManager() { + if !authorizer.AuthClient() && !authorizer.AuthModelManager() { return nil, common.ErrPerm } - env, err := st.Environment() + env, err := st.Model() if err != nil { return nil, errors.Trace(err) } @@ -101,7 +101,7 @@ // For now, authorised keys are global, common to all users. var keyInfo []string - cfg, configErr := api.state.EnvironConfig() + cfg, configErr := api.state.ModelConfig() if configErr == nil { keys := ssh.SplitAuthorisedKeys(cfg.AuthorizedKeys()) keyInfo = parseKeys(keys, arg.Mode) @@ -149,7 +149,7 @@ // TODO(waigani) 2014-03-17 bug #1293324 // Pass in validation to ensure SSH keys // have not changed underfoot - err := api.state.UpdateEnvironConfig(attrs, nil, nil) + err := api.state.UpdateModelConfig(attrs, nil, nil) if err != nil { return fmt.Errorf("writing environ config: %v", err) } @@ -159,7 +159,7 @@ // currentKeyDataForAdd gathers data used when adding ssh keys. func (api *KeyManagerAPI) currentKeyDataForAdd() (keys []string, fingerprints set.Strings, err error) { fingerprints = make(set.Strings) - cfg, err := api.state.EnvironConfig() + cfg, err := api.state.ModelConfig() if err != nil { return nil, nil, fmt.Errorf("reading current key data: %v", err) } @@ -319,7 +319,7 @@ func (api *KeyManagerAPI) currentKeyDataForDelete() ( keys map[string]string, invalidKeys []string, comments map[string]string, err error) { - cfg, err := api.state.EnvironConfig() + cfg, err := api.state.ModelConfig() if err != nil { return nil, nil, nil, fmt.Errorf("reading current key data: %v", err) } === modified file 'src/github.com/juju/juju/apiserver/keymanager/keymanager_test.go' --- src/github.com/juju/juju/apiserver/keymanager/keymanager_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/keymanager/keymanager_test.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,8 @@ "github.com/juju/names" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/ssh" + sshtesting "github.com/juju/utils/ssh/testing" gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/common" @@ -18,8 +20,6 @@ "github.com/juju/juju/apiserver/params" apiservertesting "github.com/juju/juju/apiserver/testing" jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/utils/ssh" - sshtesting "github.com/juju/juju/utils/ssh/testing" ) type keyManagerSuite struct { @@ -83,9 +83,9 @@ } func (s *keyManagerSuite) setAuthorisedKeys(c *gc.C, keys string) { - err := s.State.UpdateEnvironConfig(map[string]interface{}{"authorized-keys": keys}, nil, nil) + err := s.State.UpdateModelConfig(map[string]interface{}{"authorized-keys": keys}, nil, nil) c.Assert(err, jc.ErrorIsNil) - envConfig, err := s.State.EnvironConfig() + envConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) c.Assert(envConfig.AuthorizedKeys(), gc.Equals, keys) } @@ -113,7 +113,7 @@ } func (s *keyManagerSuite) assertEnvironKeys(c *gc.C, expected []string) { - envConfig, err := s.State.EnvironConfig() + envConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) keys := envConfig.AuthorizedKeys() c.Assert(keys, gc.Equals, strings.Join(expected, "\n")) === modified file 'src/github.com/juju/juju/apiserver/keymanager/testing/fakesshimport.go' --- src/github.com/juju/juju/apiserver/keymanager/testing/fakesshimport.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/keymanager/testing/fakesshimport.go 2016-03-22 15:18:22 +0000 @@ -6,18 +6,18 @@ import ( "strings" - sshtesting "github.com/juju/juju/utils/ssh/testing" + "github.com/juju/utils/ssh/testing" ) -var multiOneDup = sshtesting.ValidKeyFour.Key + "\n" + sshtesting.ValidKeyTwo.Key +var multiOneDup = testing.ValidKeyFour.Key + "\n" + testing.ValidKeyTwo.Key var importResponses = map[string]string{ - "lp:validuser": sshtesting.ValidKeyThree.Key, - "lp:existing": sshtesting.ValidKeyTwo.Key, - "lp:multi": sshtesting.ValidKeyMulti, - "lp:multipartial": sshtesting.PartValidKeyMulti, - "lp:multiempty": sshtesting.EmptyKeyMulti, - "lp:multiinvalid": sshtesting.MultiInvalid, + "lp:validuser": testing.ValidKeyThree.Key, + "lp:existing": testing.ValidKeyTwo.Key, + "lp:multi": testing.ValidKeyMulti, + "lp:multipartial": testing.PartValidKeyMulti, + "lp:multiempty": testing.EmptyKeyMulti, + "lp:multiinvalid": testing.MultiInvalid, "lp:multionedup": multiOneDup, } === modified file 'src/github.com/juju/juju/apiserver/keyupdater/authorisedkeys.go' --- src/github.com/juju/juju/apiserver/keyupdater/authorisedkeys.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/keyupdater/authorisedkeys.go 2016-03-22 15:18:22 +0000 @@ -6,16 +6,16 @@ import ( "github.com/juju/errors" "github.com/juju/names" + "github.com/juju/utils/ssh" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" "github.com/juju/juju/state/watcher" - "github.com/juju/juju/utils/ssh" ) func init() { - common.RegisterStandardFacade("KeyUpdater", 0, NewKeyUpdaterAPI) + common.RegisterStandardFacade("KeyUpdater", 1, NewKeyUpdaterAPI) } // KeyUpdater defines the methods on the keyupdater API end point. @@ -84,7 +84,7 @@ continue } // 3. Watch for changes - watch := api.state.WatchForEnvironConfigChanges() + watch := api.state.WatchForModelConfigChanges() // Consume the initial event. if _, ok := <-watch.Changes(); ok { results[i].NotifyWatcherId = api.resources.Register(watch) @@ -107,7 +107,7 @@ // For now, authorised keys are global, common to all machines. var keys []string - config, configErr := api.state.EnvironConfig() + config, configErr := api.state.ModelConfig() if configErr == nil { keys = ssh.SplitAuthorisedKeys(config.AuthorizedKeys()) } === modified file 'src/github.com/juju/juju/apiserver/keyupdater/authorisedkeys_test.go' --- src/github.com/juju/juju/apiserver/keyupdater/authorisedkeys_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/keyupdater/authorisedkeys_test.go 2016-03-22 15:18:22 +0000 @@ -43,7 +43,7 @@ s.unrelatedMachine, err = s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) - // The default auth is as a state server + // The default auth is as a controller s.authoriser = apiservertesting.FakeAuthorizer{ Tag: s.rawMachine.Tag(), } @@ -51,7 +51,7 @@ c.Assert(err, jc.ErrorIsNil) } -func (s *authorisedKeysSuite) TestNewKeyUpdaterAPIAcceptsStateServer(c *gc.C) { +func (s *authorisedKeysSuite) TestNewKeyUpdaterAPIAcceptsController(c *gc.C) { endPoint, err := keyupdater.NewKeyUpdaterAPI(s.State, s.resources, s.authoriser) c.Assert(err, jc.ErrorIsNil) c.Assert(endPoint, gc.NotNil) @@ -73,9 +73,9 @@ } func (s *authorisedKeysSuite) setAuthorizedKeys(c *gc.C, keys string) { - err := s.State.UpdateEnvironConfig(map[string]interface{}{"authorized-keys": keys}, nil, nil) + err := s.State.UpdateModelConfig(map[string]interface{}{"authorized-keys": keys}, nil, nil) c.Assert(err, jc.ErrorIsNil) - envConfig, err := s.State.EnvironConfig() + envConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) c.Assert(envConfig.AuthorizedKeys(), gc.Equals, keys) } === modified file 'src/github.com/juju/juju/apiserver/leadership/leadership.go' --- src/github.com/juju/juju/apiserver/leadership/leadership.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/leadership/leadership.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,7 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/leadership" + "github.com/juju/juju/core/leadership" "github.com/juju/juju/state" ) @@ -38,7 +38,7 @@ func init() { common.RegisterStandardFacade( FacadeName, - 1, + 2, NewLeadershipServiceFacade, ) } === modified file 'src/github.com/juju/juju/apiserver/leadership/leadership_test.go' --- src/github.com/juju/juju/apiserver/leadership/leadership_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/leadership/leadership_test.go 2016-03-22 15:18:22 +0000 @@ -21,7 +21,7 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/leadership" "github.com/juju/juju/apiserver/params" - coreleadership "github.com/juju/juju/leadership" + coreleadership "github.com/juju/juju/core/leadership" ) type leadershipSuite struct { === modified file 'src/github.com/juju/juju/apiserver/leadership/settings.go' --- src/github.com/juju/juju/apiserver/leadership/settings.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/leadership/settings.go 2016-03-22 15:18:22 +0000 @@ -8,7 +8,7 @@ "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/leadership" + "github.com/juju/juju/core/leadership" ) // NewLeadershipSettingsAccessor creates a new === modified file 'src/github.com/juju/juju/apiserver/leadership/settings_test.go' --- src/github.com/juju/juju/apiserver/leadership/settings_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/leadership/settings_test.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,7 @@ "github.com/juju/juju/apiserver/leadership" "github.com/juju/juju/apiserver/params" - coreleadership "github.com/juju/juju/leadership" + coreleadership "github.com/juju/juju/core/leadership" ) // TODO(fwereade): this is *severely* undertested. === modified file 'src/github.com/juju/juju/apiserver/logger/logger.go' --- src/github.com/juju/juju/apiserver/logger/logger.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/logger/logger.go 2016-03-22 15:18:22 +0000 @@ -16,7 +16,7 @@ var logger = loggo.GetLogger("juju.api.logger") func init() { - common.RegisterStandardFacade("Logger", 0, NewLoggerAPI) + common.RegisterStandardFacade("Logger", 1, NewLoggerAPI) } // Logger defines the methods on the logger API end point. Unfortunately, the @@ -64,7 +64,7 @@ } err = common.ErrPerm if api.authorizer.AuthOwner(tag) { - watch := api.state.WatchForEnvironConfigChanges() + watch := api.state.WatchForModelConfigChanges() // Consume the initial event. Technically, API calls to Watch // 'transmit' the initial event in the Watch response. But // NotifyWatchers have no state to transmit. @@ -86,7 +86,7 @@ return params.StringResults{} } results := make([]params.StringResult, len(arg.Entities)) - config, configErr := api.state.EnvironConfig() + config, configErr := api.state.ModelConfig() for i, entity := range arg.Entities { tag, err := names.ParseTag(entity.Tag) if err != nil { === modified file 'src/github.com/juju/juju/apiserver/logger/logger_test.go' --- src/github.com/juju/juju/apiserver/logger/logger_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/logger/logger_test.go 2016-03-22 15:18:22 +0000 @@ -73,9 +73,9 @@ } func (s *loggerSuite) setLoggingConfig(c *gc.C, loggingConfig string) { - err := s.State.UpdateEnvironConfig(map[string]interface{}{"logging-config": loggingConfig}, nil, nil) + err := s.State.UpdateModelConfig(map[string]interface{}{"logging-config": loggingConfig}, nil, nil) c.Assert(err, jc.ErrorIsNil) - envConfig, err := s.State.EnvironConfig() + envConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) c.Assert(envConfig.LoggingConfig(), gc.Equals, loggingConfig) } === modified file 'src/github.com/juju/juju/apiserver/logsink.go' --- src/github.com/juju/juju/apiserver/logsink.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/logsink.go 2016-03-22 15:18:22 +0000 @@ -4,7 +4,6 @@ package apiserver import ( - "encoding/json" "io" "net/http" "os" @@ -13,16 +12,16 @@ "time" "github.com/juju/errors" - "github.com/juju/loggo" "github.com/juju/utils" "golang.org/x/net/websocket" "gopkg.in/natefinch/lumberjack.v2" + "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" ) -func newLogSinkHandler(h httpHandler, logDir string) http.Handler { +func newLogSinkHandler(h httpContext, logDir string) http.Handler { logPath := filepath.Join(logDir, "logsink.log") if err := primeLogFile(logPath); err != nil { @@ -32,11 +31,11 @@ } return &logSinkHandler{ - httpHandler: h, + ctxt: h, fileLogger: &lumberjack.Logger{ Filename: logPath, - MaxSize: 500, // MB - MaxBackups: 1, + MaxSize: 300, // MB + MaxBackups: 2, }, } } @@ -54,107 +53,103 @@ } type logSinkHandler struct { - httpHandler + ctxt httpContext fileLogger io.WriteCloser } -// LogMessage is used to transmit log messages to the logsink API -// endpoint. Single character field names are used for serialisation -// to keep the size down. These messages are going to be sent a lot. -type LogMessage struct { - Time time.Time `json:"t"` - Module string `json:"m"` - Location string `json:"l"` - Level loggo.Level `json:"v"` - Message string `json:"x"` -} - // ServeHTTP implements the http.Handler interface. func (h *logSinkHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { server := websocket.Server{ Handler: func(socket *websocket.Conn) { defer socket.Close() - // Validate before authenticate because the authentication is - // dependent on the state connection that is determined during the - // validation. - stateWrapper, err := h.validateEnvironUUID(req) - if err != nil { - if errErr := h.sendError(socket, err); errErr != nil { - // Log at DEBUG so that in a standard environment - // logs cant't fill up with auth errors for - // unauthenticated connections. - logger.Debugf("error sending logsink error: %v", errErr) - } - return - } - - tag, err := stateWrapper.authenticateAgent(req) - if err != nil { - if errErr := h.sendError(socket, errors.Errorf("auth failed: %v", err)); errErr != nil { - // DEBUG used as above. - logger.Debugf("error sending logsink error: %v", errErr) - } - return - } + + st, entity, err := h.ctxt.stateForRequestAuthenticatedAgent(req) + if err != nil { + h.sendError(socket, req, err) + return + } + tag := entity.Tag() + + filePrefix := st.ModelUUID() + " " + tag.String() + ":" + dbLogger := state.NewDbLogger(st, tag) + defer dbLogger.Close() // If we get to here, no more errors to report, so we report a nil // error. This way the first line of the socket is always a json // formatted simple error. - if err := h.sendError(socket, nil); err != nil { - logger.Errorf("failed to send nil error at start of connection") - return - } + h.sendError(socket, req, nil) - st := stateWrapper.state - filePrefix := st.EnvironUUID() + " " + tag.String() + ":" - dbLogger := state.NewDbLogger(st, tag) - defer dbLogger.Close() - m := new(LogMessage) + logCh := h.receiveLogs(socket) for { - if err := websocket.JSON.Receive(socket, m); err != nil { - if err != io.EOF { - logger.Errorf("error while receiving logs: %v", err) - } - break - } - - fileErr := h.logToFile(filePrefix, m) - if fileErr != nil { - logger.Errorf("logging to logsink.log failed: %v", fileErr) - } - - dbErr := dbLogger.Log(m.Time, m.Module, m.Location, m.Level, m.Message) - if dbErr != nil { - logger.Errorf("logging to DB failed: %v", err) - } - - if fileErr != nil || dbErr != nil { - break + select { + case <-h.ctxt.stop(): + return + case m := <-logCh: + fileErr := h.logToFile(filePrefix, m) + if fileErr != nil { + logger.Errorf("logging to logsink.log failed: %v", fileErr) + } + dbErr := dbLogger.Log(m.Time, m.Module, m.Location, m.Level, m.Message) + if dbErr != nil { + logger.Errorf("logging to DB failed: %v", err) + } + if fileErr != nil || dbErr != nil { + return + } } } - }} + }, + } server.ServeHTTP(w, req) } +func (h *logSinkHandler) receiveLogs(socket *websocket.Conn) <-chan params.LogRecord { + logCh := make(chan params.LogRecord) + + go func() { + var m params.LogRecord + for { + // Receive() blocks until data arrives but will also be + // unblocked when the API handler calls socket.Close as it + // finishes. + if err := websocket.JSON.Receive(socket, &m); err != nil { + logger.Debugf("logsink receive error: %v", err) + return + } + + // Send the log message. + select { + case <-h.ctxt.stop(): + return + case logCh <- m: + } + } + }() + + return logCh +} + +func (h *logSinkHandler) running() bool { + select { + case <-h.ctxt.stop(): + return false + default: + return true + } +} + // sendError sends a JSON-encoded error response. -func (h *logSinkHandler) sendError(w io.Writer, err error) error { - response := ¶ms.ErrorResult{} - if err != nil { - response.Error = ¶ms.Error{Message: err.Error()} - } - message, err := json.Marshal(response) - if err != nil { - // If we are having trouble marshalling the error, we are in big trouble. - logger.Errorf("failure to marshal SimpleError: %v", err) - return errors.Trace(err) - } - message = append(message, []byte("\n")...) - _, err = w.Write(message) - return errors.Trace(err) +func (h *logSinkHandler) sendError(w io.Writer, req *http.Request, err error) { + if err != nil { + logger.Errorf("returning error from %s %s: %s", req.Method, req.URL.Path, errors.Details(err)) + } + sendJSON(w, ¶ms.ErrorResult{ + Error: common.ServerError(err), + }) } // logToFile writes a single log message to the logsink log file. -func (h *logSinkHandler) logToFile(prefix string, m *LogMessage) error { +func (h *logSinkHandler) logToFile(prefix string, m params.LogRecord) error { _, err := h.fileLogger.Write([]byte(strings.Join([]string{ prefix, m.Time.In(time.UTC).Format("2006-01-02 15:04:05"), === modified file 'src/github.com/juju/juju/apiserver/logsink_test.go' --- src/github.com/juju/juju/apiserver/logsink_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/logsink_test.go 2016-03-22 15:18:22 +0000 @@ -21,7 +21,7 @@ gc "gopkg.in/check.v1" "gopkg.in/mgo.v2/bson" - "github.com/juju/juju/apiserver" + "github.com/juju/juju/apiserver/params" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" ) @@ -32,7 +32,7 @@ } func (s *logsinkBaseSuite) logsinkURL(c *gc.C, scheme string) *url.URL { - return s.makeURL(c, scheme, "/environment/"+s.State.EnvironUUID()+"/logsink", nil) + return s.makeURL(c, scheme, "/model/"+s.State.ModelUUID()+"/logsink", nil) } type logsinkSuite struct { @@ -46,7 +46,6 @@ var _ = gc.Suite(&logsinkSuite{}) func (s *logsinkSuite) SetUpTest(c *gc.C) { - s.SetInitialFeatureFlags("db-log") s.logsinkBaseSuite.SetUpTest(c) s.nonce = "nonce" m, password := s.Factory.MakeMachineReturningPassword(c, &factory.MachineParams{ @@ -60,13 +59,13 @@ } func (s *logsinkSuite) TestRejectsBadEnvironUUID(c *gc.C) { - reader := s.openWebsocketCustomPath(c, "/environment/does-not-exist/logsink") - assertJSONError(c, reader, `unknown environment: "does-not-exist"`) + reader := s.openWebsocketCustomPath(c, "/model/does-not-exist/logsink") + assertJSONError(c, reader, `unknown model: "does-not-exist"`) s.assertWebsocketClosed(c, reader) } func (s *logsinkSuite) TestNoAuth(c *gc.C) { - s.checkAuthFails(c, nil, "invalid request format") + s.checkAuthFails(c, nil, "no credentials provided") } func (s *logsinkSuite) TestRejectsUserLogins(c *gc.C) { @@ -77,13 +76,13 @@ func (s *logsinkSuite) TestRejectsBadPassword(c *gc.C) { header := utils.BasicAuthHeader(s.machineTag.String(), "wrong") - header.Add("X-Juju-Nonce", s.nonce) + header.Add(params.MachineNonceHeader, s.nonce) s.checkAuthFailsWithEntityError(c, header) } func (s *logsinkSuite) TestRejectsIncorrectNonce(c *gc.C) { header := utils.BasicAuthHeader(s.machineTag.String(), s.password) - header.Add("X-Juju-Nonce", "wrong") + header.Add(params.MachineNonceHeader, "wrong") s.checkAuthFails(c, header, "machine 0 not provisioned") } @@ -95,7 +94,7 @@ conn := s.dialWebsocketInternal(c, header) defer conn.Close() reader := bufio.NewReader(conn) - assertJSONError(c, reader, "auth failed: "+message) + assertJSONError(c, reader, message) s.assertWebsocketClosed(c, reader) } @@ -109,7 +108,7 @@ c.Assert(errResult.Error, gc.IsNil) t0 := time.Date(2015, time.June, 1, 23, 2, 1, 0, time.UTC) - err := websocket.JSON.Send(conn, &apiserver.LogMessage{ + err := websocket.JSON.Send(conn, ¶ms.LogRecord{ Time: t0, Module: "some.where", Location: "foo.go:42", @@ -119,7 +118,7 @@ c.Assert(err, jc.ErrorIsNil) t1 := time.Date(2015, time.June, 1, 23, 2, 2, 0, time.UTC) - err = websocket.JSON.Send(conn, &apiserver.LogMessage{ + err = websocket.JSON.Send(conn, ¶ms.LogRecord{ Time: t1, Module: "else.where", Location: "bar.go:99", @@ -146,9 +145,9 @@ } // Check the recorded logs are correct. - envUUID := s.State.EnvironUUID() + modelUUID := s.State.ModelUUID() c.Assert(docs[0]["t"].(time.Time).Sub(t0), gc.Equals, time.Duration(0)) - c.Assert(docs[0]["e"], gc.Equals, envUUID) + c.Assert(docs[0]["e"], gc.Equals, modelUUID) c.Assert(docs[0]["n"], gc.Equals, s.machineTag.String()) c.Assert(docs[0]["m"], gc.Equals, "some.where") c.Assert(docs[0]["l"], gc.Equals, "foo.go:42") @@ -156,7 +155,7 @@ c.Assert(docs[0]["x"], gc.Equals, "all is well") c.Assert(docs[1]["t"].(time.Time).Sub(t1), gc.Equals, time.Duration(0)) - c.Assert(docs[1]["e"], gc.Equals, envUUID) + c.Assert(docs[1]["e"], gc.Equals, modelUUID) c.Assert(docs[1]["n"], gc.Equals, s.machineTag.String()) c.Assert(docs[1]["m"], gc.Equals, "else.where") c.Assert(docs[1]["l"], gc.Equals, "bar.go:99") @@ -175,7 +174,7 @@ } for a := shortAttempt.Start(); a.Next(); { for _, log := range s.logs.Log() { - c.Assert(log, jc.LessThan, loggo.ERROR) + c.Assert(log.Level, jc.LessThan, loggo.ERROR, gc.Commentf("log: %#v", log)) } } @@ -183,8 +182,8 @@ logPath := filepath.Join(s.LogDir, "logsink.log") logContents, err := ioutil.ReadFile(logPath) c.Assert(err, jc.ErrorIsNil) - line0 := envUUID + " machine-0: 2015-06-01 23:02:01 INFO some.where foo.go:42 all is well\n" - line1 := envUUID + " machine-0: 2015-06-01 23:02:02 ERROR else.where bar.go:99 oh noes\n" + line0 := modelUUID + " machine-0: 2015-06-01 23:02:01 INFO some.where foo.go:42 all is well\n" + line1 := modelUUID + " machine-0: 2015-06-01 23:02:02 ERROR else.where bar.go:99 oh noes\n" c.Assert(string(logContents), gc.Equals, line0+line1) // Check the file mode is as expected. This doesn't work on @@ -216,19 +215,6 @@ func (s *logsinkSuite) makeAuthHeader() http.Header { header := utils.BasicAuthHeader(s.machineTag.String(), s.password) - header.Add("X-Juju-Nonce", s.nonce) + header.Add(params.MachineNonceHeader, s.nonce) return header } - -type logsinkNoFeatureSuite struct { - logsinkBaseSuite -} - -var _ = gc.Suite(&logsinkNoFeatureSuite{}) - -func (s *logsinkNoFeatureSuite) TestNoApiWithoutFeatureFlag(c *gc.C) { - server := s.logsinkURL(c, "wss").String() - config := s.makeWebsocketConfigFromURL(c, server, nil) - _, err := websocket.DialConfig(config) - c.Assert(err, gc.ErrorMatches, ".+/logsink: bad status$") -} === modified file 'src/github.com/juju/juju/apiserver/machine/common_test.go' --- src/github.com/juju/juju/apiserver/machine/common_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/machine/common_test.go 2016-03-22 15:18:22 +0000 @@ -29,7 +29,7 @@ s.JujuConnSuite.SetUpTest(c) var err error - s.machine0, err = s.State.AddMachine("quantal", state.JobManageEnviron) + s.machine0, err = s.State.AddMachine("quantal", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) s.machine1, err = s.State.AddMachine("quantal", state.JobHostUnits) === modified file 'src/github.com/juju/juju/apiserver/machine/machiner.go' --- src/github.com/juju/juju/apiserver/machine/machiner.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/machine/machiner.go 2016-03-22 15:18:22 +0000 @@ -17,7 +17,7 @@ ) func init() { - common.RegisterStandardFacade("Machiner", 0, NewMachinerAPI) + common.RegisterStandardFacade("Machiner", 1, NewMachinerAPI) } var logger = loggo.GetLogger("juju.apiserver.machine") === modified file 'src/github.com/juju/juju/apiserver/machinemanager/machinemanager.go' --- src/github.com/juju/juju/apiserver/machinemanager/machinemanager.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/machinemanager/machinemanager.go 2016-03-22 15:18:22 +0000 @@ -16,7 +16,7 @@ ) func init() { - common.RegisterStandardFacade("MachineManager", 1, NewMachineManagerAPI) + common.RegisterStandardFacade("MachineManager", 2, NewMachineManagerAPI) } // MachineManagerAPI provides access to the MachineManager API facade. @@ -95,7 +95,7 @@ } if p.Series == "" { - conf, err := mm.st.EnvironConfig() + conf, err := mm.st.ModelConfig() if err != nil { return nil, err } @@ -104,14 +104,14 @@ var placementDirective string if p.Placement != nil { - env, err := mm.st.Environment() + env, err := mm.st.Model() if err != nil { return nil, err } // For 1.21 we should support both UUID and name, and with 1.22 // just support UUID if p.Placement.Scope != env.Name() && p.Placement.Scope != env.UUID() { - return nil, fmt.Errorf("invalid environment name %q", p.Placement.Scope) + return nil, fmt.Errorf("invalid model name %q", p.Placement.Scope) } placementDirective = p.Placement.Directive } === modified file 'src/github.com/juju/juju/apiserver/machinemanager/machinemanager_test.go' --- src/github.com/juju/juju/apiserver/machinemanager/machinemanager_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/machinemanager/machinemanager_test.go 2016-03-22 15:18:22 +0000 @@ -112,7 +112,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(results, gc.DeepEquals, params.AddMachinesResults{ Machines: []params.AddMachinesResult{{ - Error: ¶ms.Error{"boom", ""}, + Error: ¶ms.Error{Message: "boom", Code: ""}, }}, }) c.Assert(s.st.calls, gc.Equals, 1) @@ -135,11 +135,11 @@ return &mockBlock{}, false, nil } -func (st *mockState) EnvironConfig() (*config.Config, error) { +func (st *mockState) ModelConfig() (*config.Config, error) { panic("not implemented") } -func (st *mockState) Environment() (*state.Environment, error) { +func (st *mockState) Model() (*state.Model, error) { panic("not implemented") } @@ -171,6 +171,6 @@ return "not allowed" } -func (st *mockBlock) EnvUUID() string { +func (st *mockBlock) ModelUUID() string { return "uuid" } === modified file 'src/github.com/juju/juju/apiserver/machinemanager/state.go' --- src/github.com/juju/juju/apiserver/machinemanager/state.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/machinemanager/state.go 2016-03-22 15:18:22 +0000 @@ -10,8 +10,8 @@ ) type stateInterface interface { - EnvironConfig() (*config.Config, error) - Environment() (*state.Environment, error) + ModelConfig() (*config.Config, error) + Model() (*state.Model, error) GetBlockForType(t state.BlockType) (state.Block, bool, error) AddOneMachine(template state.MachineTemplate) (*state.Machine, error) AddMachineInsideNewMachine(template, parentTemplate state.MachineTemplate, containerType instance.ContainerType) (*state.Machine, error) @@ -22,12 +22,12 @@ *state.State } -func (s stateShim) EnvironConfig() (*config.Config, error) { - return s.State.EnvironConfig() +func (s stateShim) ModelConfig() (*config.Config, error) { + return s.State.ModelConfig() } -func (s stateShim) Environment() (*state.Environment, error) { - return s.State.Environment() +func (s stateShim) Model() (*state.Model, error) { + return s.State.Model() } func (s stateShim) GetBlockForType(t state.BlockType) (state.Block, bool, error) { === modified file 'src/github.com/juju/juju/apiserver/meterstatus/meterstatus.go' --- src/github.com/juju/juju/apiserver/meterstatus/meterstatus.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/meterstatus/meterstatus.go 2016-03-22 15:18:22 +0000 @@ -1,30 +1,127 @@ // Copyright 2013 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. -// Package meterstatus provides functions for getting meterstatus information -// about units. +// Package meterstatus provides the meter status API facade. package meterstatus import ( - "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" -) - -// MeterStatusWrapper takes a MeterStatus and converts it into an 'api friendly' form where -// Not Set and Not Available (which are important distinctions in state) are converted -// into Amber and Red respecitvely in the api. -func MeterStatusWrapper(getter func() (state.MeterStatus, error)) (state.MeterStatus, error) { - status, err := getter() - if err != nil { - return state.MeterStatus{}, errors.Trace(err) - } - if status.Code == state.MeterNotSet { - return state.MeterStatus{state.MeterAmber, "not set"}, nil - } - if status.Code == state.MeterNotAvailable { - - return state.MeterStatus{state.MeterRed, "not available"}, nil - } - return status, nil + "github.com/juju/juju/state/watcher" +) + +var ( + logger = loggo.GetLogger("juju.apiserver.meterstatus") +) + +func init() { + common.RegisterStandardFacade("MeterStatus", 1, NewMeterStatusAPI) +} + +// MeterStatus defines the methods exported by the meter status API facade. +type MeterStatus interface { + GetMeterStatus(args params.Entities) (params.MeterStatusResults, error) + WatchMeterStatus(args params.Entities) (params.NotifyWatchResults, error) +} + +// MeterStatusAPI implements the MeterStatus interface and is the concrete implementation +// of the API endpoint. +type MeterStatusAPI struct { + state *state.State + accessUnit common.GetAuthFunc + resources *common.Resources +} + +var _ MeterStatus = (*MeterStatusAPI)(nil) + +// NewMeterStatusAPI creates a new API endpoint for dealing with unit meter status. +func NewMeterStatusAPI( + st *state.State, + resources *common.Resources, + authorizer common.Authorizer, +) (*MeterStatusAPI, error) { + if !authorizer.AuthUnitAgent() { + return nil, common.ErrPerm + } + return &MeterStatusAPI{ + state: st, + accessUnit: func() (common.AuthFunc, error) { + return authorizer.AuthOwner, nil + }, + resources: resources, + }, nil +} + +// WatchMeterStatus returns a NotifyWatcher for observing changes +// to each unit's meter status. +func (m *MeterStatusAPI) WatchMeterStatus(args params.Entities) (params.NotifyWatchResults, error) { + result := params.NotifyWatchResults{ + Results: make([]params.NotifyWatchResult, len(args.Entities)), + } + canAccess, err := m.accessUnit() + if err != nil { + return params.NotifyWatchResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + watcherId := "" + if canAccess(tag) { + watcherId, err = m.watchOneUnitMeterStatus(tag) + } + result.Results[i].NotifyWatcherId = watcherId + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +func (m *MeterStatusAPI) watchOneUnitMeterStatus(tag names.UnitTag) (string, error) { + unit, err := m.state.Unit(tag.Id()) + if err != nil { + return "", err + } + watch := unit.WatchMeterStatus() + if _, ok := <-watch.Changes(); ok { + return m.resources.Register(watch), nil + } + return "", watcher.EnsureErr(watch) +} + +// GetMeterStatus returns meter status information for each unit. +func (m *MeterStatusAPI) GetMeterStatus(args params.Entities) (params.MeterStatusResults, error) { + result := params.MeterStatusResults{ + Results: make([]params.MeterStatusResult, len(args.Entities)), + } + canAccess, err := m.accessUnit() + if err != nil { + return params.MeterStatusResults{}, common.ErrPerm + } + for i, entity := range args.Entities { + unitTag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + var status state.MeterStatus + if canAccess(unitTag) { + var unit *state.Unit + unit, err = m.state.Unit(unitTag.Id()) + if err == nil { + status, err = MeterStatusWrapper(unit.GetMeterStatus) + } + result.Results[i].Code = status.Code.String() + result.Results[i].Info = status.Info + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil } === modified file 'src/github.com/juju/juju/apiserver/meterstatus/meterstatus_test.go' --- src/github.com/juju/juju/apiserver/meterstatus/meterstatus_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/meterstatus/meterstatus_test.go 2016-03-22 15:18:22 +0000 @@ -1,81 +1,99 @@ -// Copyright 2013 Canonical Ltd. +// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package meterstatus_test import ( - "errors" - jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/meterstatus" + meterstatustesting "github.com/juju/juju/apiserver/meterstatus/testing" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" + jujufactory "github.com/juju/juju/testing/factory" ) -type meterStatusSuite struct{} - var _ = gc.Suite(&meterStatusSuite{}) -func (s *meterStatusSuite) TestError(c *gc.C) { - _, err := meterstatus.MeterStatusWrapper(ErrorGetter) - c.Assert(err, gc.ErrorMatches, "an error") -} - -func (s *meterStatusSuite) TestWrapper(c *gc.C) { - tests := []struct { - about string - input func() (state.MeterStatus, error) - expectedOutput state.MeterStatus - }{{ - about: "notset in, amber out", - input: NotSetGetter, - expectedOutput: state.MeterStatus{state.MeterAmber, "not set"}, - }, { - about: "notavailable in, red out", - input: NotAvailableGetter, - expectedOutput: state.MeterStatus{state.MeterRed, "not available"}, - }, { - about: "red in, red out", - input: RedGetter, - expectedOutput: state.MeterStatus{state.MeterRed, "info"}, - }, { - about: "green in, green out", - input: GreenGetter, - expectedOutput: state.MeterStatus{state.MeterGreen, "info"}, - }, { - about: "amber in, amber out", - input: AmberGetter, - expectedOutput: state.MeterStatus{state.MeterAmber, "info"}, - }} - for i, test := range tests { - c.Logf("test %d: %s", i, test.about) - status, err := meterstatus.MeterStatusWrapper(test.input) - c.Assert(err, jc.ErrorIsNil) - c.Assert(status.Code, gc.Equals, test.expectedOutput.Code) - c.Assert(status.Info, gc.Equals, test.expectedOutput.Info) - } -} - -func ErrorGetter() (state.MeterStatus, error) { - return state.MeterStatus{}, errors.New("an error") -} - -func NotAvailableGetter() (state.MeterStatus, error) { - return state.MeterStatus{state.MeterNotAvailable, ""}, nil -} - -func NotSetGetter() (state.MeterStatus, error) { - return state.MeterStatus{state.MeterNotSet, ""}, nil -} - -func RedGetter() (state.MeterStatus, error) { - return state.MeterStatus{state.MeterRed, "info"}, nil -} - -func GreenGetter() (state.MeterStatus, error) { - return state.MeterStatus{state.MeterGreen, "info"}, nil -} -func AmberGetter() (state.MeterStatus, error) { - return state.MeterStatus{state.MeterAmber, "info"}, nil +type meterStatusSuite struct { + jujutesting.JujuConnSuite + + authorizer apiservertesting.FakeAuthorizer + resources *common.Resources + + factory *jujufactory.Factory + + unit *state.Unit + + status meterstatus.MeterStatus +} + +func (s *meterStatusSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + + s.factory = jujufactory.NewFactory(s.State) + s.unit = s.factory.MakeUnit(c, nil) + + // Create a FakeAuthorizer so we can check permissions, + // set up assuming unit 0 has logged in. + s.authorizer = apiservertesting.FakeAuthorizer{ + Tag: s.unit.UnitTag(), + } + + // Create the resource registry separately to track invocations to + // Register. + s.resources = common.NewResources() + s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) + + status, err := meterstatus.NewMeterStatusAPI(s.State, s.resources, s.authorizer) + c.Assert(err, jc.ErrorIsNil) + s.status = status +} + +func (s *meterStatusSuite) TestGetMeterStatusUnauthenticated(c *gc.C) { + service, err := s.unit.Service() + c.Assert(err, jc.ErrorIsNil) + otherunit := s.factory.MakeUnit(c, &jujufactory.UnitParams{Service: service}) + args := params.Entities{Entities: []params.Entity{{otherunit.Tag().String()}}} + result, err := s.status.GetMeterStatus(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Results, gc.HasLen, 1) + c.Assert(result.Results[0].Error, gc.ErrorMatches, "permission denied") + c.Assert(result.Results[0].Code, gc.Equals, "") + c.Assert(result.Results[0].Info, gc.Equals, "") +} + +func (s *meterStatusSuite) TestGetMeterStatusBadTag(c *gc.C) { + tags := []string{ + "user-admin", + "unit-nosuchunit", + "thisisnotatag", + "machine-0", + "model-blah", + } + args := params.Entities{Entities: make([]params.Entity, len(tags))} + for i, tag := range tags { + args.Entities[i] = params.Entity{Tag: tag} + } + result, err := s.status.GetMeterStatus(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Results, gc.HasLen, len(tags)) + for i, result := range result.Results { + c.Logf("checking result %d", i) + c.Assert(result.Code, gc.Equals, "") + c.Assert(result.Info, gc.Equals, "") + c.Assert(result.Error, gc.ErrorMatches, "permission denied") + } +} + +func (s *meterStatusSuite) TestGetMeterStatus(c *gc.C) { + meterstatustesting.TestGetMeterStatus(c, s.status, s.unit) +} + +func (s *meterStatusSuite) TestWatchMeterStatus(c *gc.C) { + meterstatustesting.TestWatchMeterStatus(c, s.status, s.unit, s.State, s.resources) } === modified file 'src/github.com/juju/juju/apiserver/meterstatus/package_test.go' --- src/github.com/juju/juju/apiserver/meterstatus/package_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/meterstatus/package_test.go 2016-03-22 15:18:22 +0000 @@ -6,9 +6,9 @@ import ( stdtesting "testing" - gc "gopkg.in/check.v1" + coretesting "github.com/juju/juju/testing" ) func TestAll(t *stdtesting.T) { - gc.TestingT(t) + coretesting.MgoTestPackage(t) } === added directory 'src/github.com/juju/juju/apiserver/meterstatus/testing' === added file 'src/github.com/juju/juju/apiserver/meterstatus/testing/tests.go' --- src/github.com/juju/juju/apiserver/meterstatus/testing/tests.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/meterstatus/testing/tests.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,72 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package testing + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/meterstatus" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + jujustate "github.com/juju/juju/state" + statetesting "github.com/juju/juju/state/testing" +) + +// TestGetMeterStatus tests unit meter status retrieval. +func TestGetMeterStatus(c *gc.C, status meterstatus.MeterStatus, unit *jujustate.Unit) { + args := params.Entities{Entities: []params.Entity{{Tag: unit.Tag().String()}}} + result, err := status.GetMeterStatus(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Results, gc.HasLen, 1) + c.Assert(result.Results[0].Error, gc.IsNil) + c.Assert(result.Results[0].Code, gc.Equals, "AMBER") + c.Assert(result.Results[0].Info, gc.Equals, "not set") + + newCode := "GREEN" + newInfo := "All is ok." + + err = unit.SetMeterStatus(newCode, newInfo) + c.Assert(err, jc.ErrorIsNil) + + result, err = status.GetMeterStatus(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Results, gc.HasLen, 1) + c.Assert(result.Results[0].Error, gc.IsNil) + c.Assert(result.Results[0].Code, gc.DeepEquals, newCode) + c.Assert(result.Results[0].Info, gc.DeepEquals, newInfo) +} + +// TestWatchMeterStatus tests the meter status watcher functionality. +func TestWatchMeterStatus(c *gc.C, status meterstatus.MeterStatus, unit *jujustate.Unit, state *jujustate.State, resources *common.Resources) { + c.Assert(resources.Count(), gc.Equals, 0) + + args := params.Entities{Entities: []params.Entity{ + {Tag: unit.UnitTag().String()}, + {Tag: "unit-foo-42"}, + }} + result, err := status.WatchMeterStatus(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.NotifyWatchResults{ + Results: []params.NotifyWatchResult{ + {NotifyWatcherId: "1"}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify the resource was registered and stop when done + c.Assert(resources.Count(), gc.Equals, 1) + resource := resources.Get("1") + defer statetesting.AssertStop(c, resource) + + // Check that the Watch has consumed the initial event ("returned" in + // the Watch call) + wc := statetesting.NewNotifyWatcherC(c, state, resource.(jujustate.NotifyWatcher)) + wc.AssertNoChange() + + err = unit.SetMeterStatus("GREEN", "No additional information.") + c.Assert(err, jc.ErrorIsNil) + wc.AssertOneChange() +} === added file 'src/github.com/juju/juju/apiserver/meterstatus/wrapper.go' --- src/github.com/juju/juju/apiserver/meterstatus/wrapper.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/meterstatus/wrapper.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,28 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/state" +) + +// MeterStatusWrapper takes a MeterStatus and converts it into an 'api friendly' form where +// Not Set and Not Available (which are important distinctions in state) are converted +// into Amber and Red respectively in the api. +func MeterStatusWrapper(getter func() (state.MeterStatus, error)) (state.MeterStatus, error) { + status, err := getter() + if err != nil { + return state.MeterStatus{}, errors.Trace(err) + } + if status.Code == state.MeterNotSet { + return state.MeterStatus{state.MeterAmber, "not set"}, nil + } + if status.Code == state.MeterNotAvailable { + + return state.MeterStatus{state.MeterRed, "not available"}, nil + } + return status, nil +} === added file 'src/github.com/juju/juju/apiserver/meterstatus/wrapper_test.go' --- src/github.com/juju/juju/apiserver/meterstatus/wrapper_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/meterstatus/wrapper_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,81 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus_test + +import ( + "errors" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/meterstatus" + "github.com/juju/juju/state" +) + +type meterStatusWrapperSuite struct{} + +var _ = gc.Suite(&meterStatusWrapperSuite{}) + +func (s *meterStatusWrapperSuite) TestError(c *gc.C) { + _, err := meterstatus.MeterStatusWrapper(ErrorGetter) + c.Assert(err, gc.ErrorMatches, "an error") +} + +func (s *meterStatusWrapperSuite) TestWrapper(c *gc.C) { + tests := []struct { + about string + input func() (state.MeterStatus, error) + expectedOutput state.MeterStatus + }{{ + about: "notset in, amber out", + input: NotSetGetter, + expectedOutput: state.MeterStatus{state.MeterAmber, "not set"}, + }, { + about: "notavailable in, red out", + input: NotAvailableGetter, + expectedOutput: state.MeterStatus{state.MeterRed, "not available"}, + }, { + about: "red in, red out", + input: RedGetter, + expectedOutput: state.MeterStatus{state.MeterRed, "info"}, + }, { + about: "green in, green out", + input: GreenGetter, + expectedOutput: state.MeterStatus{state.MeterGreen, "info"}, + }, { + about: "amber in, amber out", + input: AmberGetter, + expectedOutput: state.MeterStatus{state.MeterAmber, "info"}, + }} + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + status, err := meterstatus.MeterStatusWrapper(test.input) + c.Assert(err, jc.ErrorIsNil) + c.Assert(status.Code, gc.Equals, test.expectedOutput.Code) + c.Assert(status.Info, gc.Equals, test.expectedOutput.Info) + } +} + +func ErrorGetter() (state.MeterStatus, error) { + return state.MeterStatus{}, errors.New("an error") +} + +func NotAvailableGetter() (state.MeterStatus, error) { + return state.MeterStatus{state.MeterNotAvailable, ""}, nil +} + +func NotSetGetter() (state.MeterStatus, error) { + return state.MeterStatus{state.MeterNotSet, ""}, nil +} + +func RedGetter() (state.MeterStatus, error) { + return state.MeterStatus{state.MeterRed, "info"}, nil +} + +func GreenGetter() (state.MeterStatus, error) { + return state.MeterStatus{state.MeterGreen, "info"}, nil +} +func AmberGetter() (state.MeterStatus, error) { + return state.MeterStatus{state.MeterAmber, "info"}, nil +} === added directory 'src/github.com/juju/juju/apiserver/metricsadder' === added file 'src/github.com/juju/juju/apiserver/metricsadder/metricsadder.go' --- src/github.com/juju/juju/apiserver/metricsadder/metricsadder.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/metricsadder/metricsadder.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,84 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package metricsadder + +import ( + "github.com/juju/loggo" + "github.com/juju/names" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" +) + +var ( + logger = loggo.GetLogger("juju.apiserver.metricsadder") +) + +func init() { + common.RegisterStandardFacade("MetricsAdder", 2, NewMetricsAdderAPI) +} + +// MetricsAdder defines methods that are used to store metric batches in the state. +type MetricsAdder interface { + // AddMetricBatches stores the specified metric batches in the state. + AddMetricBatches(batches params.MetricBatchParams) (params.ErrorResults, error) +} + +// MetricsAdderAPI implements the metrics adder interface and is the concrete +// implementation of the API end point. +type MetricsAdderAPI struct { + state *state.State +} + +var _ MetricsAdder = (*MetricsAdderAPI)(nil) + +// NewMetricsAdderAPI creates a new API endpoint for adding metrics to state. +func NewMetricsAdderAPI( + st *state.State, + resources *common.Resources, + authorizer common.Authorizer, +) (*MetricsAdderAPI, error) { + // TODO(cmars): remove unit agent auth, once worker/metrics/sender manifold + // can be righteously relocated to machine agent. + if !authorizer.AuthMachineAgent() && !authorizer.AuthUnitAgent() { + return nil, common.ErrPerm + } + return &MetricsAdderAPI{ + state: st, + }, nil +} + +// AddMetricBatches implements the MetricsAdder interface. +func (api *MetricsAdderAPI) AddMetricBatches(args params.MetricBatchParams) (params.ErrorResults, error) { + result := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.Batches)), + } + for i, batch := range args.Batches { + tag, err := names.ParseUnitTag(batch.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(err) + continue + } + metrics := make([]state.Metric, len(batch.Batch.Metrics)) + for j, metric := range batch.Batch.Metrics { + metrics[j] = state.Metric{ + Key: metric.Key, + Value: metric.Value, + Time: metric.Time, + } + } + _, err = api.state.AddMetrics( + state.BatchParam{ + UUID: batch.Batch.UUID, + Created: batch.Batch.Created, + CharmURL: batch.Batch.CharmURL, + Metrics: metrics, + Unit: tag, + }, + ) + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} === added file 'src/github.com/juju/juju/apiserver/metricsadder/metricsadder_test.go' --- src/github.com/juju/juju/apiserver/metricsadder/metricsadder_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/metricsadder/metricsadder_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,241 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package metricsadder_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/metricsadder" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/state" + jujuFactory "github.com/juju/juju/testing/factory" +) + +var _ = gc.Suite(&metricsAdderSuite{}) + +type metricsAdderSuite struct { + jujutesting.JujuConnSuite + + authorizer apiservertesting.FakeAuthorizer + resources *common.Resources + + factory *jujuFactory.Factory + + machine0 *state.Machine + machine1 *state.Machine + mysqlService *state.Service + mysql *state.Service + mysqlUnit *state.Unit + meteredService *state.Service + meteredCharm *state.Charm + meteredUnit *state.Unit + + adder metricsadder.MetricsAdder +} + +func (s *metricsAdderSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + + s.factory = jujuFactory.NewFactory(s.State) + s.machine0 = s.factory.MakeMachine(c, &jujuFactory.MachineParams{ + Series: "quantal", + Jobs: []state.MachineJob{state.JobHostUnits, state.JobManageModel}, + }) + s.machine1 = s.factory.MakeMachine(c, &jujuFactory.MachineParams{ + Series: "quantal", + Jobs: []state.MachineJob{state.JobHostUnits}, + }) + mysqlCharm := s.factory.MakeCharm(c, &jujuFactory.CharmParams{ + Name: "mysql", + }) + s.mysql = s.factory.MakeService(c, &jujuFactory.ServiceParams{ + Name: "mysql", + Charm: mysqlCharm, + Creator: s.AdminUserTag(c), + }) + s.mysqlUnit = s.factory.MakeUnit(c, &jujuFactory.UnitParams{ + Service: s.mysql, + Machine: s.machine0, + }) + + s.meteredCharm = s.factory.MakeCharm(c, &jujuFactory.CharmParams{ + Name: "metered", + URL: "cs:quantal/metered", + }) + s.meteredService = s.factory.MakeService(c, &jujuFactory.ServiceParams{ + Charm: s.meteredCharm, + }) + s.meteredUnit = s.factory.MakeUnit(c, &jujuFactory.UnitParams{ + Service: s.meteredService, + SetCharmURL: true, + Machine: s.machine1, + }) + + // Create a FakeAuthorizer so we can check permissions, + // set up assuming unit 0 has logged in. + s.authorizer = apiservertesting.FakeAuthorizer{ + Tag: names.NewMachineTag("1"), + } + + // Create the resource registry separately to track invocations to + // Register. + s.resources = common.NewResources() + s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) + + adder, err := metricsadder.NewMetricsAdderAPI(s.State, s.resources, s.authorizer) + c.Assert(err, jc.ErrorIsNil) + s.adder = adder +} + +func (s *metricsAdderSuite) TestAddMetricsBatch(c *gc.C) { + metrics := []params.Metric{{Key: "pings", Value: "5", Time: time.Now().UTC()}} + uuid := utils.MustNewUUID().String() + + result, err := s.adder.AddMetricBatches(params.MetricBatchParams{ + Batches: []params.MetricBatchParam{{ + Tag: s.meteredUnit.Tag().String(), + Batch: params.MetricBatch{ + UUID: uuid, + CharmURL: s.meteredCharm.URL().String(), + Created: time.Now(), + Metrics: metrics, + }}}}, + ) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{{nil}}, + }) + + batches, err := s.State.AllMetricBatches() + c.Assert(err, jc.ErrorIsNil) + c.Assert(batches, gc.HasLen, 1) + batch := batches[0] + c.Assert(batch.UUID(), gc.Equals, uuid) + c.Assert(batch.CharmURL(), gc.Equals, s.meteredCharm.URL().String()) + c.Assert(batch.Unit(), gc.Equals, s.meteredUnit.Name()) + storedMetrics := batch.Metrics() + c.Assert(storedMetrics, gc.HasLen, 1) + c.Assert(storedMetrics[0].Key, gc.Equals, metrics[0].Key) + c.Assert(storedMetrics[0].Value, gc.Equals, metrics[0].Value) +} + +func (s *metricsAdderSuite) TestAddMetricsBatchNoCharmURL(c *gc.C) { + metrics := []params.Metric{{Key: "pings", Value: "5", Time: time.Now().UTC()}} + uuid := utils.MustNewUUID().String() + + result, err := s.adder.AddMetricBatches(params.MetricBatchParams{ + Batches: []params.MetricBatchParam{{ + Tag: s.meteredUnit.Tag().String(), + Batch: params.MetricBatch{ + UUID: uuid, + CharmURL: s.meteredCharm.URL().String(), + Created: time.Now(), + Metrics: metrics, + }}}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{{nil}}, + }) + + batches, err := s.State.AllMetricBatches() + c.Assert(err, jc.ErrorIsNil) + c.Assert(batches, gc.HasLen, 1) + batch := batches[0] + c.Assert(batch.UUID(), gc.Equals, uuid) + c.Assert(batch.CharmURL(), gc.Equals, s.meteredCharm.URL().String()) + c.Assert(batch.Unit(), gc.Equals, s.meteredUnit.Name()) + storedMetrics := batch.Metrics() + c.Assert(storedMetrics, gc.HasLen, 1) + c.Assert(storedMetrics[0].Key, gc.Equals, metrics[0].Key) + c.Assert(storedMetrics[0].Value, gc.Equals, metrics[0].Value) +} + +func (s *metricsAdderSuite) TestAddMetricsBatchDiffTag(c *gc.C) { + metrics := []params.Metric{{Key: "pings", Value: "5", Time: time.Now().UTC()}} + uuid := utils.MustNewUUID().String() + + tests := []struct { + about string + tag string + expect string + }{{ + about: "unknown unit", + tag: names.NewUnitTag("unknownservice/11").String(), + expect: "unit \"unknownservice/11\" not found", + }, { + about: "user tag", + tag: names.NewLocalUserTag("admin").String(), + expect: `"user-admin@local" is not a valid unit tag`, + }, { + about: "machine tag", + tag: names.NewMachineTag("0").String(), + expect: `"machine-0" is not a valid unit tag`, + }} + + for i, test := range tests { + c.Logf("test %d: %s -> %s", i, test.about, test.tag) + result, err := s.adder.AddMetricBatches(params.MetricBatchParams{ + Batches: []params.MetricBatchParam{{ + Tag: test.tag, + Batch: params.MetricBatch{ + UUID: uuid, + CharmURL: s.meteredCharm.URL().String(), + Created: time.Now(), + Metrics: metrics, + }}}}) + c.Assert(err, jc.ErrorIsNil) + if test.expect == "" { + c.Assert(result.OneError(), jc.ErrorIsNil) + } else { + c.Assert(result.OneError(), gc.ErrorMatches, test.expect) + } + + batches, err := s.State.AllMetricBatches() + c.Assert(err, jc.ErrorIsNil) + c.Assert(batches, gc.HasLen, 0) + + _, err = s.State.MetricBatch(uuid) + c.Assert(err, jc.Satisfies, errors.IsNotFound) + } +} + +func (s *metricsAdderSuite) TestNewMetricsAdderAPIRefusesNonAgent(c *gc.C) { + tests := []struct { + tag names.Tag + environManager bool + expectedError string + }{ + // TODO(cmars): unit agent should get permission denied when callers are + // moved to machine agent. + {names.NewUnitTag("mysql/0"), false, ""}, + + {names.NewLocalUserTag("admin"), true, "permission denied"}, + {names.NewMachineTag("0"), false, ""}, + {names.NewMachineTag("0"), true, ""}, + } + for i, test := range tests { + c.Logf("test %d", i) + + anAuthoriser := s.authorizer + anAuthoriser.EnvironManager = test.environManager + anAuthoriser.Tag = test.tag + endPoint, err := metricsadder.NewMetricsAdderAPI(s.State, nil, anAuthoriser) + if test.expectedError == "" { + c.Assert(err, jc.ErrorIsNil) + c.Assert(endPoint, gc.NotNil) + } else { + c.Assert(err, gc.ErrorMatches, test.expectedError) + c.Assert(endPoint, gc.IsNil) + } + } +} === added file 'src/github.com/juju/juju/apiserver/metricsadder/package_test.go' --- src/github.com/juju/juju/apiserver/metricsadder/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/metricsadder/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package metricsadder_test + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + testing.MgoTestPackage(t) +} === added directory 'src/github.com/juju/juju/apiserver/metricsdebug' === added file 'src/github.com/juju/juju/apiserver/metricsdebug/metricsdebug.go' --- src/github.com/juju/juju/apiserver/metricsdebug/metricsdebug.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/metricsdebug/metricsdebug.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,191 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package metricsdebug contains the implementation of an api endpoint +// for metrics debug functionality. +package metricsdebug + +import ( + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" +) + +var ( + logger = loggo.GetLogger("juju.apiserver.metricsdebug") +) + +func init() { + common.RegisterStandardFacade("MetricsDebug", 1, NewMetricsDebugAPI) +} + +type metricsDebug interface { + // MetricBatchesForUnit returns metric batches for the given unit. + MetricBatchesForUnit(unit string) ([]state.MetricBatch, error) + + // MetricBatchesForService returns metric batches for the given service. + MetricBatchesForService(service string) ([]state.MetricBatch, error) + + // Unit returns the unit based on its name. + Unit(string) (*state.Unit, error) + + // Service returns the service based on its name. + Service(string) (*state.Service, error) +} + +// MetricsDebug defines the methods on the metricsdebug API end point. +type MetricsDebug interface { + // GetMetrics returns all metrics stored by the state server. + GetMetrics(arg params.Entities) (params.MetricResults, error) + + // SetMeterStatus will set the meter status on the given entity tag. + SetMeterStatus(params.MeterStatusParams) (params.ErrorResults, error) +} + +// MetricsDebugAPI implements the metricsdebug interface and is the concrete +// implementation of the api end point. +type MetricsDebugAPI struct { + state metricsDebug +} + +var _ MetricsDebug = (*MetricsDebugAPI)(nil) + +// NewMetricsDebugAPI creates a new API endpoint for calling metrics debug functions. +func NewMetricsDebugAPI( + st *state.State, + resources *common.Resources, + authorizer common.Authorizer, +) (*MetricsDebugAPI, error) { + if !authorizer.AuthClient() { + return nil, common.ErrPerm + } + + return &MetricsDebugAPI{ + state: st, + }, nil +} + +// GetMetrics returns all metrics stored by the state server. +func (api *MetricsDebugAPI) GetMetrics(args params.Entities) (params.MetricResults, error) { + results := params.MetricResults{ + Results: make([]params.EntityMetrics, len(args.Entities)), + } + if len(args.Entities) == 0 { + return results, nil + } + for i, arg := range args.Entities { + tag, err := names.ParseTag(arg.Tag) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + var batches []state.MetricBatch + switch tag.Kind() { + case names.UnitTagKind: + batches, err = api.state.MetricBatchesForUnit(tag.Id()) + if err != nil { + err = errors.Annotate(err, "failed to get metrics") + results.Results[i].Error = common.ServerError(err) + continue + } + case names.ServiceTagKind: + batches, err = api.state.MetricBatchesForService(tag.Id()) + if err != nil { + err = errors.Annotate(err, "failed to get metrics") + results.Results[i].Error = common.ServerError(err) + continue + } + default: + err := errors.Errorf("invalid tag %v", arg.Tag) + results.Results[i].Error = common.ServerError(err) + } + metricCount := 0 + for _, b := range batches { + metricCount += len(b.Metrics()) + } + metrics := make([]params.MetricResult, metricCount) + ix := 0 + for _, mb := range batches { + for _, m := range mb.Metrics() { + metrics[ix] = params.MetricResult{ + Key: m.Key, + Value: m.Value, + Time: m.Time, + } + ix++ + } + results.Results[i].Metrics = metrics + } + } + return results, nil +} + +// SetMeterStatus sets meter statuses for entities. +func (api *MetricsDebugAPI) SetMeterStatus(args params.MeterStatusParams) (params.ErrorResults, error) { + results := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.Statuses)), + } + for i, arg := range args.Statuses { + tag, err := names.ParseTag(arg.Tag) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + err = api.setEntityMeterStatus(tag, state.MeterStatus{ + Code: state.MeterStatusFromString(arg.Code), + Info: arg.Info, + }) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + } + return results, nil +} + +func (api *MetricsDebugAPI) setEntityMeterStatus(entity names.Tag, status state.MeterStatus) error { + switch entity := entity.(type) { + case names.UnitTag: + unit, err := api.state.Unit(entity.Id()) + if err != nil { + return errors.Trace(err) + } + chURL, found := unit.CharmURL() + if !found { + return errors.New("no charm url") + } + if chURL.Schema != "local" { + return errors.New("not a local charm") + } + err = unit.SetMeterStatus(status.Code.String(), status.Info) + if err != nil { + return errors.Trace(err) + } + case names.ServiceTag: + service, err := api.state.Service(entity.Id()) + if err != nil { + return errors.Trace(err) + } + chURL, _ := service.CharmURL() + if chURL.Schema != "local" { + return errors.New("not a local charm") + } + units, err := service.AllUnits() + if err != nil { + return errors.Trace(err) + } + for _, unit := range units { + err := unit.SetMeterStatus(status.Code.String(), status.Info) + if err != nil { + return errors.Trace(err) + } + } + default: + return errors.Errorf("expected service or unit tag, got %T", entity) + } + return nil +} === added file 'src/github.com/juju/juju/apiserver/metricsdebug/metricsdebug_test.go' --- src/github.com/juju/juju/apiserver/metricsdebug/metricsdebug_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/metricsdebug/metricsdebug_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,277 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package metricsdebug_test + +import ( + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/metricsdebug" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/state" + "github.com/juju/juju/testing/factory" +) + +type metricsDebugSuite struct { + jujutesting.JujuConnSuite + + metricsdebug *metricsdebug.MetricsDebugAPI + authorizer apiservertesting.FakeAuthorizer + unit *state.Unit +} + +var _ = gc.Suite(&metricsDebugSuite{}) + +func (s *metricsDebugSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + s.authorizer = apiservertesting.FakeAuthorizer{ + Tag: s.AdminUserTag(c), + } + debug, err := metricsdebug.NewMetricsDebugAPI(s.State, nil, s.authorizer) + c.Assert(err, jc.ErrorIsNil) + s.metricsdebug = debug +} + +func (s *metricsDebugSuite) TestSetMeterStatus(c *gc.C) { + testCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + testService := s.Factory.MakeService(c, &factory.ServiceParams{Charm: testCharm}) + testUnit1 := s.Factory.MakeUnit(c, &factory.UnitParams{Service: testService, SetCharmURL: true}) + testUnit2 := s.Factory.MakeUnit(c, &factory.UnitParams{Service: testService, SetCharmURL: true}) + + csCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) + csService := s.Factory.MakeService(c, &factory.ServiceParams{Name: "cs-service", Charm: csCharm}) + csUnit1 := s.Factory.MakeUnit(c, &factory.UnitParams{Service: csService, SetCharmURL: true}) + + tests := []struct { + about string + params params.MeterStatusParams + err string + assert func(*gc.C, params.ErrorResults) + }{{ + about: "set service meter status", + params: params.MeterStatusParams{ + Statuses: []params.MeterStatusParam{{ + Tag: testService.Tag().String(), + Code: "RED", + Info: "test", + }, + }, + }, + assert: func(c *gc.C, results params.ErrorResults) { + err := results.OneError() + c.Assert(err, jc.ErrorIsNil) + ms1, err := testUnit1.GetMeterStatus() + c.Assert(err, jc.ErrorIsNil) + c.Assert(ms1, gc.DeepEquals, state.MeterStatus{ + Code: state.MeterRed, + Info: "test", + }) + ms2, err := testUnit2.GetMeterStatus() + c.Assert(err, jc.ErrorIsNil) + c.Assert(ms2, gc.DeepEquals, state.MeterStatus{ + Code: state.MeterRed, + Info: "test", + }) + }, + }, { + about: "set unit meter status", + params: params.MeterStatusParams{ + Statuses: []params.MeterStatusParam{{ + Tag: testUnit1.Tag().String(), + Code: "AMBER", + Info: "test", + }, + }, + }, + assert: func(c *gc.C, results params.ErrorResults) { + err := results.OneError() + c.Assert(err, jc.ErrorIsNil) + ms1, err := testUnit1.GetMeterStatus() + c.Assert(err, jc.ErrorIsNil) + c.Assert(ms1, gc.DeepEquals, state.MeterStatus{ + Code: state.MeterAmber, + Info: "test", + }) + }, + }, { + about: "not a local charm - service", + params: params.MeterStatusParams{ + Statuses: []params.MeterStatusParam{{ + Tag: csService.Tag().String(), + Code: "AMBER", + Info: "test", + }, + }, + }, + assert: func(c *gc.C, results params.ErrorResults) { + err := results.OneError() + c.Assert(err, gc.DeepEquals, ¶ms.Error{Message: "not a local charm"}) + }, + }, { + about: "not a local charm - unit", + params: params.MeterStatusParams{ + Statuses: []params.MeterStatusParam{{ + Tag: csUnit1.Tag().String(), + Code: "AMBER", + Info: "test", + }, + }, + }, + assert: func(c *gc.C, results params.ErrorResults) { + err := results.OneError() + c.Assert(err, gc.DeepEquals, ¶ms.Error{Message: "not a local charm"}) + }, + }, { + about: "invalid meter status", + params: params.MeterStatusParams{ + Statuses: []params.MeterStatusParam{{ + Tag: testUnit1.Tag().String(), + Code: "WRONG", + Info: "test", + }, + }, + }, + assert: func(c *gc.C, results params.ErrorResults) { + err := results.OneError() + c.Assert(err, gc.DeepEquals, ¶ms.Error{Message: "invalid meter status \"NOT AVAILABLE\""}) + }, + }, { + about: "not such service", + params: params.MeterStatusParams{ + Statuses: []params.MeterStatusParam{{ + Tag: "service-missing", + Code: "AMBER", + Info: "test", + }, + }, + }, + assert: func(c *gc.C, results params.ErrorResults) { + err := results.OneError() + c.Assert(err, gc.DeepEquals, ¶ms.Error{Message: "service \"missing\" not found", Code: "not found"}) + }, + }, + } + + for i, test := range tests { + c.Logf("running test %d: %v", i, test.about) + result, err := s.metricsdebug.SetMeterStatus(test.params) + if test.err == "" { + c.Assert(err, jc.ErrorIsNil) + test.assert(c, result) + } else { + c.Assert(err, gc.ErrorMatches, test.err) + } + } +} + +func (s *metricsDebugSuite) TestGetMetrics(c *gc.C) { + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + meteredService := s.Factory.MakeService(c, &factory.ServiceParams{Charm: meteredCharm}) + unit := s.Factory.MakeUnit(c, &factory.UnitParams{Service: meteredService, SetCharmURL: true}) + newTime := time.Now().Round(time.Second) + metricA := state.Metric{"pings", "5", newTime} + metricB := state.Metric{"pings", "10.5", newTime} + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit, Metrics: []state.Metric{metricA}}) + s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit, Metrics: []state.Metric{metricA, metricB}}) + args := params.Entities{Entities: []params.Entity{ + {"unit-metered/0"}, + }} + result, err := s.metricsdebug.GetMetrics(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Results, gc.HasLen, 1) + c.Assert(result.Results[0].Metrics, gc.HasLen, 3) + c.Assert(result.Results[0], gc.DeepEquals, params.EntityMetrics{ + Metrics: []params.MetricResult{ + { + Key: "pings", + Value: "5", + Time: newTime, + }, + { + Key: "pings", + Value: "5", + Time: newTime, + }, + { + Key: "pings", + Value: "10.5", + Time: newTime, + }, + }, + Error: nil, + }) +} + +func (s *metricsDebugSuite) TestGetMultipleMetricsNoMocks(c *gc.C) { + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + meteredService := s.Factory.MakeService(c, &factory.ServiceParams{ + Charm: meteredCharm, + }) + unit0 := s.Factory.MakeUnit(c, &factory.UnitParams{Service: meteredService, SetCharmURL: true}) + unit1 := s.Factory.MakeUnit(c, &factory.UnitParams{Service: meteredService, SetCharmURL: true}) + + metricUnit0 := s.Factory.MakeMetric(c, &factory.MetricParams{ + Unit: unit0, + }) + metricUnit1 := s.Factory.MakeMetric(c, &factory.MetricParams{ + Unit: unit1, + }) + + args0 := params.Entities{Entities: []params.Entity{ + {"unit-metered/0"}, + }} + args1 := params.Entities{Entities: []params.Entity{ + {"unit-metered/1"}, + }} + + metrics0, err := s.metricsdebug.GetMetrics(args0) + c.Assert(err, jc.ErrorIsNil) + c.Assert(metrics0.Results, gc.HasLen, 1) + c.Assert(metrics0.Results[0].Metrics[0].Key, gc.Equals, metricUnit0.Metrics()[0].Key) + c.Assert(metrics0.Results[0].Metrics[0].Value, gc.Equals, metricUnit0.Metrics()[0].Value) + c.Assert(metrics0.Results[0].Metrics[0].Time, jc.TimeBetween(metricUnit0.Metrics()[0].Time, metricUnit0.Metrics()[0].Time)) + + metrics1, err := s.metricsdebug.GetMetrics(args1) + c.Assert(err, jc.ErrorIsNil) + c.Assert(metrics1.Results, gc.HasLen, 1) + c.Assert(metrics1.Results[0].Metrics[0].Key, gc.Equals, metricUnit1.Metrics()[0].Key) + c.Assert(metrics1.Results[0].Metrics[0].Value, gc.Equals, metricUnit1.Metrics()[0].Value) + c.Assert(metrics1.Results[0].Metrics[0].Time, jc.TimeBetween(metricUnit1.Metrics()[0].Time, metricUnit1.Metrics()[0].Time)) +} + +func (s *metricsDebugSuite) TestGetMultipleMetricsNoMocksWithService(c *gc.C) { + meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + meteredService := s.Factory.MakeService(c, &factory.ServiceParams{ + Charm: meteredCharm, + }) + unit0 := s.Factory.MakeUnit(c, &factory.UnitParams{Service: meteredService, SetCharmURL: true}) + unit1 := s.Factory.MakeUnit(c, &factory.UnitParams{Service: meteredService, SetCharmURL: true}) + + metricUnit0 := s.Factory.MakeMetric(c, &factory.MetricParams{ + Unit: unit0, + }) + metricUnit1 := s.Factory.MakeMetric(c, &factory.MetricParams{ + Unit: unit1, + }) + + args := params.Entities{Entities: []params.Entity{ + {"service-metered"}, + }} + + metrics, err := s.metricsdebug.GetMetrics(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(metrics.Results, gc.HasLen, 1) + c.Assert(metrics.Results[0].Metrics, gc.HasLen, 2) + c.Assert(metrics.Results[0].Metrics[0].Key, gc.Equals, metricUnit0.Metrics()[0].Key) + c.Assert(metrics.Results[0].Metrics[0].Value, gc.Equals, metricUnit0.Metrics()[0].Value) + c.Assert(metrics.Results[0].Metrics[0].Time, jc.TimeBetween(metricUnit0.Metrics()[0].Time, metricUnit0.Metrics()[0].Time)) + + c.Assert(metrics.Results[0].Metrics[1].Key, gc.Equals, metricUnit1.Metrics()[0].Key) + c.Assert(metrics.Results[0].Metrics[1].Value, gc.Equals, metricUnit1.Metrics()[0].Value) + c.Assert(metrics.Results[0].Metrics[1].Time, jc.TimeBetween(metricUnit1.Metrics()[0].Time, metricUnit1.Metrics()[0].Time)) +} === added file 'src/github.com/juju/juju/apiserver/metricsdebug/package_test.go' --- src/github.com/juju/juju/apiserver/metricsdebug/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/metricsdebug/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package metricsdebug_test + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + testing.MgoTestPackage(t) +} === modified file 'src/github.com/juju/juju/apiserver/metricsender/export_test.go' --- src/github.com/juju/juju/apiserver/metricsender/export_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/metricsender/export_test.go 2016-03-22 15:18:22 +0000 @@ -2,17 +2,11 @@ package metricsender -import ( - "crypto/x509" - - "github.com/juju/testing" -) - -func PatchHostAndCertPool(host string, certPool *x509.CertPool) func() { +import "github.com/juju/testing" + +func PatchHost(host string) func() { restoreHost := testing.PatchValue(&metricsHost, host) - restoreCertsPool := testing.PatchValue(&metricsCertsPool, certPool) return func() { restoreHost() - restoreCertsPool() } } === modified file 'src/github.com/juju/juju/apiserver/metricsender/metricsender.go' --- src/github.com/juju/juju/apiserver/metricsender/metricsender.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/metricsender/metricsender.go 2016-03-22 15:18:22 +0000 @@ -2,7 +2,7 @@ // Licensed under the AGPLv3, see LICENCE file for details. // Package metricsender contains functions for sending -// metrics from a state server to a remote metric collector. +// metrics from a controller to a remote metric collector. package metricsender import ( @@ -10,8 +10,8 @@ "github.com/juju/errors" "github.com/juju/loggo" + wireformat "github.com/juju/romulus/wireformat/metrics" - "github.com/juju/juju/apiserver/metricsender/wireformat" "github.com/juju/juju/state" ) @@ -25,7 +25,7 @@ var ( defaultMaxBatchesPerSend = 10 - defaultSender MetricSender = &NopSender{} + defaultSender MetricSender = &HttpSender{} ) func handleResponse(mm *state.MetricsManager, st *state.State, response wireformat.Response) { @@ -62,18 +62,24 @@ if err != nil { return errors.Trace(err) } + sent := 0 for { metrics, err := st.MetricsToSend(batchSize) if err != nil { return errors.Trace(err) } - if len(metrics) == 0 { - logger.Infof("nothing to send") + lenM := len(metrics) + if lenM == 0 { + if sent == 0 { + logger.Infof("nothing to send") + } else { + logger.Infof("done sending") + } break } - wireData := make([]*wireformat.MetricBatch, len(metrics)) + wireData := make([]*wireformat.MetricBatch, lenM) for i, m := range metrics { - wireData[i] = wireformat.ToWire(m) + wireData[i] = ToWire(m) } response, err := sender.Send(wireData) if err != nil { @@ -93,17 +99,18 @@ return errors.Trace(err) } } + sent += lenM } unsent, err := st.CountOfUnsentMetrics() if err != nil { return errors.Trace(err) } - sent, err := st.CountOfSentMetrics() + sentStored, err := st.CountOfSentMetrics() if err != nil { return errors.Trace(err) } - logger.Infof("metrics collection summary: sent:%d unsent:%d", sent, unsent) + logger.Infof("metrics collection summary: sent:%d unsent:%d (%d sent metrics stored)", sent, unsent, sentStored) return nil } @@ -117,3 +124,25 @@ func DefaultMetricSender() MetricSender { return defaultSender } + +// ToWire converts the state.MetricBatch into a type +// that can be sent over the wire to the collector. +func ToWire(mb *state.MetricBatch) *wireformat.MetricBatch { + metrics := make([]wireformat.Metric, len(mb.Metrics())) + for i, m := range mb.Metrics() { + metrics[i] = wireformat.Metric{ + Key: m.Key, + Value: m.Value, + Time: m.Time.UTC(), + } + } + return &wireformat.MetricBatch{ + UUID: mb.UUID(), + ModelUUID: mb.ModelUUID(), + UnitName: mb.Unit(), + CharmUrl: mb.CharmURL(), + Created: mb.Created().UTC(), + Metrics: metrics, + Credentials: mb.Credentials(), + } +} === modified file 'src/github.com/juju/juju/apiserver/metricsender/metricsender_test.go' --- src/github.com/juju/juju/apiserver/metricsender/metricsender_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/metricsender/metricsender_test.go 2016-03-22 15:18:22 +0000 @@ -7,6 +7,7 @@ "errors" "time" + wireformat "github.com/juju/romulus/wireformat/metrics" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -35,6 +36,30 @@ s.unit = s.Factory.MakeUnit(c, &factory.UnitParams{Service: meteredService, SetCharmURL: true}) } +func (s *MetricSenderSuite) TestToWire(c *gc.C) { + now := time.Now().Round(time.Second) + metric := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now}) + result := metricsender.ToWire(metric) + m := metric.Metrics()[0] + metrics := []wireformat.Metric{ + { + Key: m.Key, + Value: m.Value, + Time: m.Time.UTC(), + }, + } + expected := &wireformat.MetricBatch{ + UUID: metric.UUID(), + ModelUUID: metric.ModelUUID(), + UnitName: metric.Unit(), + CharmUrl: metric.CharmURL(), + Created: metric.Created().UTC(), + Metrics: metrics, + Credentials: metric.Credentials(), + } + c.Assert(result, gc.DeepEquals, expected) +} + // TestSendMetrics creates 2 unsent metrics and a sent metric // and checks that the 2 unsent metrics get sent and have their // sent field set to true. === modified file 'src/github.com/juju/juju/apiserver/metricsender/nopsender.go' --- src/github.com/juju/juju/apiserver/metricsender/nopsender.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/metricsender/nopsender.go 2016-03-22 15:18:22 +0000 @@ -4,9 +4,8 @@ package metricsender import ( + wireformat "github.com/juju/romulus/wireformat/metrics" "github.com/juju/utils" - - "github.com/juju/juju/apiserver/metricsender/wireformat" ) // NopSender is a sender that acts like everything worked fine @@ -18,7 +17,7 @@ func (n NopSender) Send(batches []*wireformat.MetricBatch) (*wireformat.Response, error) { var resp = make(wireformat.EnvironmentResponses) for _, batch := range batches { - resp.Ack(batch.EnvUUID, batch.UUID) + resp.Ack(batch.ModelUUID, batch.UUID) } uuid, err := utils.NewUUID() if err != nil { === modified file 'src/github.com/juju/juju/apiserver/metricsender/sender.go' --- src/github.com/juju/juju/apiserver/metricsender/sender.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/metricsender/sender.go 2016-03-22 15:18:22 +0000 @@ -5,20 +5,15 @@ import ( "bytes" - "crypto/tls" - "crypto/x509" "encoding/json" "net/http" "github.com/juju/errors" - "github.com/juju/utils" - - "github.com/juju/juju/apiserver/metricsender/wireformat" + wireformat "github.com/juju/romulus/wireformat/metrics" ) var ( - metricsCertsPool *x509.CertPool - metricsHost string + metricsHost string = "https://api.jujucharms.com/omnibus/v2/metrics" ) // HttpSender is the default used for sending @@ -33,8 +28,7 @@ return nil, errors.Trace(err) } r := bytes.NewBuffer(b) - t := utils.NewHttpTLSTransport(&tls.Config{RootCAs: metricsCertsPool}) - client := &http.Client{Transport: t} + client := &http.Client{} resp, err := client.Post(metricsHost, "application/json", r) if err != nil { return nil, errors.Trace(err) === modified file 'src/github.com/juju/juju/apiserver/metricsender/sender_test.go' --- src/github.com/juju/juju/apiserver/metricsender/sender_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/metricsender/sender_test.go 2016-03-22 15:18:22 +0000 @@ -11,12 +11,12 @@ "net/http/httptest" "time" + wireformat "github.com/juju/romulus/wireformat/metrics" jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/metricsender" - "github.com/juju/juju/apiserver/metricsender/wireformat" "github.com/juju/juju/cert" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" @@ -50,16 +50,11 @@ s.unit = s.Factory.MakeUnit(c, &factory.UnitParams{Service: s.meteredService, SetCharmURL: true}) } -// startServer starts a server with TLS and the specified handler, returning a -// function that should be run at the end of the test to clean up. +// startServer starts a test HTTP server, returning a function that should be +// run at the end of the test to clean up. func (s *SenderSuite) startServer(c *gc.C, handler http.Handler) func() { - ts := httptest.NewUnstartedServer(handler) - certPool, cert := createCerts(c, "127.0.0.1") - ts.TLS = &tls.Config{ - Certificates: []tls.Certificate{cert}, - } - ts.StartTLS() - cleanup := metricsender.PatchHostAndCertPool(ts.URL, certPool) + ts := httptest.NewServer(handler) + cleanup := metricsender.PatchHost(ts.URL) return func() { ts.Close() cleanup() @@ -122,11 +117,11 @@ for _, batch := range incoming { c.Logf("received metrics batch: %+v", batch) - resp.Ack(batch.EnvUUID, batch.UUID) + resp.Ack(batch.ModelUUID, batch.UUID) if statusMap != nil { unitName, status, info := statusMap(batch.UnitName) - resp.SetStatus(batch.EnvUUID, unitName, status, info) + resp.SetStatus(batch.ModelUUID, unitName, status, info) } select { === modified file 'src/github.com/juju/juju/apiserver/metricsender/testing/mocksender.go' --- src/github.com/juju/juju/apiserver/metricsender/testing/mocksender.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/metricsender/testing/mocksender.go 2016-03-22 15:18:22 +0000 @@ -4,9 +4,8 @@ package testing import ( + wireformat "github.com/juju/romulus/wireformat/metrics" "github.com/juju/utils" - - "github.com/juju/juju/apiserver/metricsender/wireformat" ) // MockSender implements the metric sender interface. @@ -24,7 +23,7 @@ var envResponses = make(wireformat.EnvironmentResponses) for _, batch := range d { - envResponses.Ack(batch.EnvUUID, batch.UUID) + envResponses.Ack(batch.ModelUUID, batch.UUID) } return &wireformat.Response{ UUID: respUUID.String(), === removed directory 'src/github.com/juju/juju/apiserver/metricsender/wireformat' === removed file 'src/github.com/juju/juju/apiserver/metricsender/wireformat/package_test.go' --- src/github.com/juju/juju/apiserver/metricsender/wireformat/package_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/metricsender/wireformat/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package wireformat_test - -import ( - stdtesting "testing" - - "github.com/juju/juju/testing" -) - -func TestPackage(t *stdtesting.T) { - testing.MgoTestPackage(t) -} === removed file 'src/github.com/juju/juju/apiserver/metricsender/wireformat/wireformat.go' --- src/github.com/juju/juju/apiserver/metricsender/wireformat/wireformat.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/metricsender/wireformat/wireformat.go 1970-01-01 00:00:00 +0000 @@ -1,101 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// Package wireformat defines the format that will be used to send metric -// batches to the collector and receive updates. -package wireformat - -import ( - "time" - - "github.com/juju/juju/state" -) - -// MetricBatch is a batch of metrics that will be sent to -// the metric collector -type MetricBatch struct { - UUID string `json:"uuid"` - EnvUUID string `json:"env-uuid"` - UnitName string `json:"unit-name"` - CharmUrl string `json:"charm-url"` - Created time.Time `json:"created"` - Metrics []Metric `json:"metrics"` - Credentials []byte `json:"credentials"` -} - -// Metric represents a single Metric. -type Metric struct { - Key string `json:"key"` - Value string `json:"value"` - Time time.Time `json:"time"` -} - -// ToWire converts the state.MetricBatch into a type -// that can be sent over the wire to the collector. -func ToWire(mb *state.MetricBatch) *MetricBatch { - metrics := make([]Metric, len(mb.Metrics())) - for i, m := range mb.Metrics() { - metrics[i] = Metric{ - Key: m.Key, - Value: m.Value, - Time: m.Time.UTC(), - } - } - return &MetricBatch{ - UUID: mb.UUID(), - EnvUUID: mb.EnvUUID(), - UnitName: mb.Unit(), - CharmUrl: mb.CharmURL(), - Created: mb.Created().UTC(), - Metrics: metrics, - Credentials: mb.Credentials(), - } -} - -// Response represents the response from the metrics collector. -type Response struct { - UUID string `json:"uuid"` - EnvResponses EnvironmentResponses `json:"env-responses"` - NewGracePeriod time.Duration `json:"new-grace-period"` -} - -type EnvironmentResponses map[string]EnvResponse - -// Ack adds the specified the batch UUID to the list of acknowledged batches -// for the specified environment. -func (e EnvironmentResponses) Ack(envUUID, batchUUID string) { - env := e[envUUID] - - env.AcknowledgedBatches = append(env.AcknowledgedBatches, batchUUID) - e[envUUID] = env -} - -func (e EnvironmentResponses) SetStatus(envUUID, unitName, status, info string) { - s := UnitStatus{ - Status: status, - Info: info, - } - - env := e[envUUID] - - if env.UnitStatuses == nil { - env.UnitStatuses = map[string]UnitStatus{ - unitName: s, - } - } else { - env.UnitStatuses[unitName] = s - } - e[envUUID] = env - -} - -// EnvResponse contains the response data relevant to a concrete environment. -type EnvResponse struct { - AcknowledgedBatches []string `json:"acks,omitempty"` - UnitStatuses map[string]UnitStatus `json:"unit-statuses,omitempty"` -} - -type UnitStatus struct { - Status string `json:"status"` - Info string `json:"info"` -} === removed file 'src/github.com/juju/juju/apiserver/metricsender/wireformat/wireformat_test.go' --- src/github.com/juju/juju/apiserver/metricsender/wireformat/wireformat_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/metricsender/wireformat/wireformat_test.go 1970-01-01 00:00:00 +0000 @@ -1,98 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package wireformat_test - -import ( - "time" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/metricsender/wireformat" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/testing/factory" -) - -type WireFormatSuite struct { - jujutesting.JujuConnSuite -} - -var _ = gc.Suite(&WireFormatSuite{}) - -func (s *WireFormatSuite) TestToWire(c *gc.C) { - meteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) - meteredService := s.Factory.MakeService(c, &factory.ServiceParams{Charm: meteredCharm}) - unit := s.Factory.MakeUnit(c, &factory.UnitParams{Service: meteredService, SetCharmURL: true}) - now := time.Now().Round(time.Second) - metric := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: unit, Sent: false, Time: &now}) - result := wireformat.ToWire(metric) - m := metric.Metrics()[0] - metrics := []wireformat.Metric{ - { - Key: m.Key, - Value: m.Value, - Time: m.Time.UTC(), - }, - } - expected := &wireformat.MetricBatch{ - UUID: metric.UUID(), - EnvUUID: metric.EnvUUID(), - UnitName: metric.Unit(), - CharmUrl: metric.CharmURL(), - Created: metric.Created().UTC(), - Metrics: metrics, - Credentials: metric.Credentials(), - } - c.Assert(result, gc.DeepEquals, expected) -} - -func (s *WireFormatSuite) TestAck(c *gc.C) { - resp := wireformat.EnvironmentResponses{} - c.Assert(resp, gc.HasLen, 0) - - envUUID := "env-uuid" - envUUID2 := "env-uuid2" - batchUUID := "batch-uuid" - batchUUID2 := "batch-uuid2" - - resp.Ack(envUUID, batchUUID) - resp.Ack(envUUID, batchUUID2) - resp.Ack(envUUID2, batchUUID) - c.Assert(resp, gc.HasLen, 2) - - c.Assert(resp[envUUID].AcknowledgedBatches, jc.SameContents, []string{batchUUID, batchUUID2}) - c.Assert(resp[envUUID2].AcknowledgedBatches, jc.SameContents, []string{batchUUID}) -} - -func (s *WireFormatSuite) TestSetStatus(c *gc.C) { - resp := wireformat.EnvironmentResponses{} - c.Assert(resp, gc.HasLen, 0) - - envUUID := "env-uuid" - envUUID2 := "env-uuid2" - unitName := "some-unit/0" - unitName2 := "some-unit/1" - - resp.SetStatus(envUUID, unitName, "GREEN", "") - c.Assert(resp, gc.HasLen, 1) - c.Assert(resp[envUUID].UnitStatuses[unitName].Status, gc.Equals, "GREEN") - c.Assert(resp[envUUID].UnitStatuses[unitName].Info, gc.Equals, "") - - resp.SetStatus(envUUID, unitName2, "RED", "Unit unresponsive.") - c.Assert(resp, gc.HasLen, 1) - c.Assert(resp[envUUID].UnitStatuses[unitName].Status, gc.Equals, "GREEN") - c.Assert(resp[envUUID].UnitStatuses[unitName].Info, gc.Equals, "") - c.Assert(resp[envUUID].UnitStatuses[unitName2].Status, gc.Equals, "RED") - c.Assert(resp[envUUID].UnitStatuses[unitName2].Info, gc.Equals, "Unit unresponsive.") - - resp.SetStatus(envUUID2, unitName, "UNKNOWN", "") - c.Assert(resp, gc.HasLen, 2) - c.Assert(resp[envUUID2].UnitStatuses[unitName].Status, gc.Equals, "UNKNOWN") - c.Assert(resp[envUUID2].UnitStatuses[unitName].Info, gc.Equals, "") - - resp.SetStatus(envUUID, unitName, "RED", "Invalid data received.") - c.Assert(resp, gc.HasLen, 2) - c.Assert(resp[envUUID].UnitStatuses[unitName].Status, gc.Equals, "RED") - c.Assert(resp[envUUID].UnitStatuses[unitName].Info, gc.Equals, "Invalid data received.") -} === modified file 'src/github.com/juju/juju/apiserver/metricsmanager/metricsmanager.go' --- src/github.com/juju/juju/apiserver/metricsmanager/metricsmanager.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/metricsmanager/metricsmanager.go 2016-03-22 15:18:22 +0000 @@ -24,7 +24,7 @@ ) func init() { - common.RegisterStandardFacade("MetricsManager", 0, NewMetricsManagerAPI) + common.RegisterStandardFacade("MetricsManager", 1, NewMetricsManagerAPI) } // MetricsManager defines the methods on the metricsmanager API end point. @@ -49,7 +49,7 @@ resources *common.Resources, authorizer common.Authorizer, ) (*MetricsManagerAPI, error) { - if !(authorizer.AuthMachineAgent() && authorizer.AuthEnvironManager()) { + if !(authorizer.AuthMachineAgent() && authorizer.AuthModelManager()) { return nil, common.ErrPerm } @@ -59,7 +59,7 @@ if tag == nil { return false } - return tag == st.EnvironTag() + return tag == st.ModelTag() }, nil } @@ -85,7 +85,7 @@ return result, err } for i, arg := range args.Entities { - tag, err := names.ParseEnvironTag(arg.Tag) + tag, err := names.ParseModelTag(arg.Tag) if err != nil { result.Results[i].Error = common.ServerError(common.ErrPerm) continue @@ -116,7 +116,7 @@ return result, err } for i, arg := range args.Entities { - tag, err := names.ParseEnvironTag(arg.Tag) + tag, err := names.ParseModelTag(arg.Tag) if err != nil { result.Results[i].Error = common.ServerError(err) continue === modified file 'src/github.com/juju/juju/apiserver/metricsmanager/metricsmanager_test.go' --- src/github.com/juju/juju/apiserver/metricsmanager/metricsmanager_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/metricsmanager/metricsmanager_test.go 2016-03-22 15:18:22 +0000 @@ -77,10 +77,10 @@ oldTime := time.Now().Add(-(time.Hour * 25)) newTime := time.Now() metric := state.Metric{"pings", "5", newTime} - oldMetric := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: true, Time: &oldTime, Metrics: []state.Metric{metric}}) - newMetric := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: true, Time: &newTime, Metrics: []state.Metric{metric}}) + oldMetric := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: true, DeleteTime: &oldTime, Metrics: []state.Metric{metric}}) + newMetric := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: true, DeleteTime: &newTime, Metrics: []state.Metric{metric}}) args := params.Entities{Entities: []params.Entity{ - {s.State.EnvironTag().String()}, + {s.State.ModelTag().String()}, }} result, err := s.metricsmanager.CleanupOldMetrics(args) c.Assert(err, jc.ErrorIsNil) @@ -106,7 +106,7 @@ func (s *metricsManagerSuite) TestCleanupArgsIndependent(c *gc.C) { args := params.Entities{Entities: []params.Entity{ {"invalid"}, - {s.State.EnvironTag().String()}, + {s.State.ModelTag().String()}, }} result, err := s.metricsmanager.CleanupOldMetrics(args) c.Assert(result.Results, gc.HasLen, 2) @@ -124,7 +124,7 @@ s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: true, Time: &now, Metrics: []state.Metric{metric}}) unsent := s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now, Metrics: []state.Metric{metric}}) args := params.Entities{Entities: []params.Entity{ - {s.State.EnvironTag().String()}, + {s.State.ModelTag().String()}, }} result, err := s.metricsmanager.SendMetrics(args) c.Assert(err, jc.ErrorIsNil) @@ -150,7 +150,7 @@ func (s *metricsManagerSuite) TestSendArgsIndependent(c *gc.C) { args := params.Entities{Entities: []params.Entity{ {"invalid"}, - {s.State.EnvironTag().String()}, + {s.State.ModelTag().String()}, }} result, err := s.metricsmanager.SendMetrics(args) c.Assert(result.Results, gc.HasLen, 2) @@ -168,7 +168,7 @@ s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &now, Metrics: []state.Metric{metric}}) metricsmanager.PatchSender(&sender) args := params.Entities{Entities: []params.Entity{ - {s.State.EnvironTag().String()}, + {s.State.ModelTag().String()}, }} result, err := s.metricsmanager.SendMetrics(args) c.Assert(err, jc.ErrorIsNil) @@ -186,7 +186,7 @@ s.Factory.MakeMetric(c, &factory.MetricParams{Unit: s.unit, Sent: false, Time: &pastTime, Metrics: []state.Metric{metric}}) metricsmanager.PatchSender(&sender) args := params.Entities{Entities: []params.Entity{ - {s.State.EnvironTag().String()}, + {s.State.ModelTag().String()}, }} result, err := s.metricsmanager.SendMetrics(args) c.Assert(err, jc.ErrorIsNil) @@ -200,7 +200,7 @@ var sender testing.MockSender metricsmanager.PatchSender(&sender) args := params.Entities{Entities: []params.Entity{ - {s.State.EnvironTag().String()}, + {s.State.ModelTag().String()}, }} result, err := s.metricsmanager.SendMetrics(args) c.Assert(err, jc.ErrorIsNil) === added directory 'src/github.com/juju/juju/apiserver/modelmanager' === added file 'src/github.com/juju/juju/apiserver/modelmanager/export_test.go' --- src/github.com/juju/juju/apiserver/modelmanager/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/modelmanager/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package modelmanager + +func RestrictedProviderFields(mm *ModelManagerAPI, providerType string) ([]string, error) { + return mm.restrictedProviderFields(providerType) +} === added file 'src/github.com/juju/juju/apiserver/modelmanager/modelmanager.go' --- src/github.com/juju/juju/apiserver/modelmanager/modelmanager.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/modelmanager/modelmanager.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,378 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package modelmanager defines an API end point for functions +// dealing with models. Creating, listing and sharing models. +package modelmanager + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + "github.com/juju/utils" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/state" + "github.com/juju/juju/version" +) + +var logger = loggo.GetLogger("juju.apiserver.modelmanager") + +func init() { + common.RegisterStandardFacade("ModelManager", 2, NewModelManagerAPI) +} + +// ModelManager defines the methods on the modelmanager API end +// point. +type ModelManager interface { + ConfigSkeleton(args params.ModelSkeletonConfigArgs) (params.ModelConfigResult, error) + CreateModel(args params.ModelCreateArgs) (params.Model, error) + ListModels(user params.Entity) (params.UserModelList, error) +} + +// ModelManagerAPI implements the model manager interface and is +// the concrete implementation of the api end point. +type ModelManagerAPI struct { + state stateInterface + authorizer common.Authorizer + toolsFinder *common.ToolsFinder +} + +var _ ModelManager = (*ModelManagerAPI)(nil) + +// NewModelManagerAPI creates a new api server endpoint for managing +// models. +func NewModelManagerAPI( + st *state.State, + resources *common.Resources, + authorizer common.Authorizer, +) (*ModelManagerAPI, error) { + if !authorizer.AuthClient() { + return nil, common.ErrPerm + } + + urlGetter := common.NewToolsURLGetter(st.ModelUUID(), st) + return &ModelManagerAPI{ + state: getState(st), + authorizer: authorizer, + toolsFinder: common.NewToolsFinder(st, st, urlGetter), + }, nil +} + +// authCheck checks if the user is acting on their own behalf, or if they +// are an administrator acting on behalf of another user. +func (em *ModelManagerAPI) authCheck(user names.UserTag) error { + // Since we know this is a user tag (because AuthClient is true), + // we just do the type assertion to the UserTag. + apiUser, _ := em.authorizer.GetAuthTag().(names.UserTag) + isAdmin, err := em.state.IsControllerAdministrator(apiUser) + if err != nil { + return errors.Trace(err) + } + if isAdmin { + logger.Tracef("%q is a controller admin", apiUser.Canonical()) + return nil + } + + // We can't just compare the UserTags themselves as the provider part + // may be unset, and gets replaced with 'local'. We must compare against + // the Username of the user tag. + if apiUser.Canonical() == user.Canonical() { + return nil + } + return common.ErrPerm +} + +// ConfigSource describes a type that is able to provide config. +// Abstracted primarily for testing. +type ConfigSource interface { + Config() (*config.Config, error) +} + +var configValuesFromController = []string{ + "type", + "ca-cert", + "state-port", + "api-port", +} + +// ConfigSkeleton returns config values to be used as a starting point for the +// API caller to construct a valid model specific config. The provider +// and region params are there for future use, and current behaviour expects +// both of these to be empty. +func (em *ModelManagerAPI) ConfigSkeleton(args params.ModelSkeletonConfigArgs) (params.ModelConfigResult, error) { + var result params.ModelConfigResult + if args.Provider != "" { + return result, errors.NotValidf("provider value %q", args.Provider) + } + if args.Region != "" { + return result, errors.NotValidf("region value %q", args.Region) + } + + controllerEnv, err := em.state.ControllerModel() + if err != nil { + return result, errors.Trace(err) + } + + config, err := em.configSkeleton(controllerEnv) + if err != nil { + return result, errors.Trace(err) + } + + result.Config = config + return result, nil +} + +func (em *ModelManagerAPI) restrictedProviderFields(providerType string) ([]string, error) { + provider, err := environs.Provider(providerType) + if err != nil { + return nil, errors.Trace(err) + } + + var fields []string + fields = append(fields, configValuesFromController...) + fields = append(fields, provider.RestrictedConfigAttributes()...) + return fields, nil +} + +func (em *ModelManagerAPI) configSkeleton(source ConfigSource) (map[string]interface{}, error) { + baseConfig, err := source.Config() + if err != nil { + return nil, errors.Trace(err) + } + baseMap := baseConfig.AllAttrs() + + fields, err := em.restrictedProviderFields(baseConfig.Type()) + if err != nil { + return nil, errors.Trace(err) + } + + var result = make(map[string]interface{}) + for _, field := range fields { + if value, found := baseMap[field]; found { + result[field] = value + } + } + return result, nil +} + +func (em *ModelManagerAPI) checkVersion(cfg map[string]interface{}) error { + // If there is no agent-version specified, use the current version. + // otherwise we need to check for tools + value, found := cfg["agent-version"] + if !found { + cfg["agent-version"] = version.Current.String() + return nil + } + valuestr, ok := value.(string) + if !ok { + return errors.Errorf("agent-version must be a string but has type '%T'", value) + } + num, err := version.Parse(valuestr) + if err != nil { + return errors.Trace(err) + } + if comp := num.Compare(version.Current); comp > 0 { + return errors.Errorf("agent-version cannot be greater than the server: %s", version.Current) + } else if comp < 0 { + // Look to see if we have tools available for that version. + // Obviously if the version is the same, we have the tools available. + list, err := em.toolsFinder.FindTools(params.FindToolsParams{ + Number: num, + }) + if err != nil { + return errors.Trace(err) + } + logger.Tracef("found tools: %#v", list) + if len(list.List) == 0 { + return errors.Errorf("no tools found for version %s", num) + } + } + return nil +} + +func (em *ModelManagerAPI) validConfig(attrs map[string]interface{}) (*config.Config, error) { + cfg, err := config.New(config.UseDefaults, attrs) + if err != nil { + return nil, errors.Annotate(err, "creating config from values failed") + } + provider, err := environs.Provider(cfg.Type()) + if err != nil { + return nil, errors.Trace(err) + } + cfg, err = provider.PrepareForCreateEnvironment(cfg) + if err != nil { + return nil, errors.Trace(err) + } + cfg, err = provider.Validate(cfg, nil) + if err != nil { + return nil, errors.Annotate(err, "provider validation failed") + } + return cfg, nil +} + +func (em *ModelManagerAPI) newModelConfig(args params.ModelCreateArgs, source ConfigSource) (*config.Config, error) { + // For now, we just smash to the two maps together as we store + // the account values and the model config together in the + // *config.Config instance. + joint := make(map[string]interface{}) + for key, value := range args.Config { + joint[key] = value + } + // Account info overrides any config values. + for key, value := range args.Account { + joint[key] = value + } + if _, found := joint["uuid"]; found { + return nil, errors.New("uuid is generated, you cannot specify one") + } + baseConfig, err := source.Config() + if err != nil { + return nil, errors.Trace(err) + } + baseMap := baseConfig.AllAttrs() + fields, err := em.restrictedProviderFields(baseConfig.Type()) + if err != nil { + return nil, errors.Trace(err) + } + // Before comparing any values, we need to push the config through + // the provider validation code. One of the reasons for this is that + // numbers being serialized through JSON get turned into float64. The + // schema code used in config will convert these back into integers. + // However, before we can create a valid config, we need to make sure + // we copy across fields from the main config that aren't there. + for _, field := range fields { + if _, found := joint[field]; !found { + if baseValue, found := baseMap[field]; found { + joint[field] = baseValue + } + } + } + + // Generate the UUID for the server. + uuid, err := utils.NewUUID() + if err != nil { + return nil, errors.Annotate(err, "failed to generate environment uuid") + } + joint["uuid"] = uuid.String() + + if err := em.checkVersion(joint); err != nil { + return nil, errors.Annotate(err, "failed to create config") + } + + // validConfig must only be called once. + cfg, err := em.validConfig(joint) + if err != nil { + return nil, errors.Trace(err) + } + attrs := cfg.AllAttrs() + // Any values that would normally be copied from the controller + // config can also be defined, but if they differ from the controller + // values, an error is returned. + for _, field := range fields { + if value, found := attrs[field]; found { + if serverValue := baseMap[field]; value != serverValue { + return nil, errors.Errorf( + "specified %s \"%v\" does not match apiserver \"%v\"", + field, value, serverValue) + } + } + } + + return cfg, nil +} + +// CreateModel creates a new model using the account and +// model config specified in the args. +func (em *ModelManagerAPI) CreateModel(args params.ModelCreateArgs) (params.Model, error) { + result := params.Model{} + // Get the controller model first. We need it both for the state + // server owner and the ability to get the config. + controllerEnv, err := em.state.ControllerModel() + if err != nil { + return result, errors.Trace(err) + } + + ownerTag, err := names.ParseUserTag(args.OwnerTag) + if err != nil { + return result, errors.Trace(err) + } + + // Any user is able to create themselves an model (until real fine + // grain permissions are available), and admins (the creator of the state + // server model) are able to create models for other people. + err = em.authCheck(ownerTag) + if err != nil { + return result, errors.Trace(err) + } + + newConfig, err := em.newModelConfig(args, controllerEnv) + if err != nil { + return result, errors.Trace(err) + } + // NOTE: check the agent-version of the config, and if it is > the current + // version, it is not supported, also check existing tools, and if we don't + // have tools for that version, also die. + model, st, err := em.state.NewModel(newConfig, ownerTag) + if err != nil { + return result, errors.Annotate(err, "failed to create new model") + } + defer st.Close() + + result.Name = model.Name() + result.UUID = model.UUID() + result.OwnerTag = model.Owner().String() + + return result, nil +} + +// ListModels returns the models that the specified user +// has access to in the current server. Only that controller owner +// can list models for any user (at this stage). Other users +// can only ask about their own models. +func (em *ModelManagerAPI) ListModels(user params.Entity) (params.UserModelList, error) { + result := params.UserModelList{} + + userTag, err := names.ParseUserTag(user.Tag) + if err != nil { + return result, errors.Trace(err) + } + + err = em.authCheck(userTag) + if err != nil { + return result, errors.Trace(err) + } + + models, err := em.state.ModelsForUser(userTag) + if err != nil { + return result, errors.Trace(err) + } + + for _, model := range models { + var lastConn *time.Time + userLastConn, err := model.LastConnection() + if err != nil { + if !state.IsNeverConnectedError(err) { + return result, errors.Trace(err) + } + } else { + lastConn = &userLastConn + } + result.UserModels = append(result.UserModels, params.UserModel{ + Model: params.Model{ + Name: model.Name(), + UUID: model.UUID(), + OwnerTag: model.Owner().String(), + }, + LastConnection: lastConn, + }) + logger.Debugf("list models: %s, %s, %s", model.Name(), model.UUID(), model.Owner()) + } + + return result, nil +} === added file 'src/github.com/juju/juju/apiserver/modelmanager/modelmanager_test.go' --- src/github.com/juju/juju/apiserver/modelmanager/modelmanager_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/modelmanager/modelmanager_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,362 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package modelmanager_test + +import ( + "github.com/juju/loggo" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/modelmanager" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + jujutesting "github.com/juju/juju/juju/testing" + // Register the providers for the field check test + _ "github.com/juju/juju/provider/azure" + _ "github.com/juju/juju/provider/ec2" + _ "github.com/juju/juju/provider/joyent" + _ "github.com/juju/juju/provider/maas" + _ "github.com/juju/juju/provider/openstack" + "github.com/juju/juju/state" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/version" +) + +type modelManagerBaseSuite struct { + jujutesting.JujuConnSuite + + modelmanager *modelmanager.ModelManagerAPI + resources *common.Resources + authoriser apiservertesting.FakeAuthorizer +} + +func (s *modelManagerBaseSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + s.resources = common.NewResources() + s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) + + s.authoriser = apiservertesting.FakeAuthorizer{ + Tag: s.AdminUserTag(c), + } + + loggo.GetLogger("juju.apiserver.modelmanager").SetLogLevel(loggo.TRACE) +} + +func (s *modelManagerBaseSuite) setAPIUser(c *gc.C, user names.UserTag) { + s.authoriser.Tag = user + modelmanager, err := modelmanager.NewModelManagerAPI(s.State, s.resources, s.authoriser) + c.Assert(err, jc.ErrorIsNil) + s.modelmanager = modelmanager +} + +type modelManagerSuite struct { + modelManagerBaseSuite +} + +var _ = gc.Suite(&modelManagerSuite{}) + +func (s *modelManagerSuite) TestNewAPIAcceptsClient(c *gc.C) { + anAuthoriser := s.authoriser + anAuthoriser.Tag = names.NewUserTag("external@remote") + endPoint, err := modelmanager.NewModelManagerAPI(s.State, s.resources, anAuthoriser) + c.Assert(err, jc.ErrorIsNil) + c.Assert(endPoint, gc.NotNil) +} + +func (s *modelManagerSuite) TestNewAPIRefusesNonClient(c *gc.C) { + anAuthoriser := s.authoriser + anAuthoriser.Tag = names.NewUnitTag("mysql/0") + endPoint, err := modelmanager.NewModelManagerAPI(s.State, s.resources, anAuthoriser) + c.Assert(endPoint, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "permission denied") +} + +func (s *modelManagerSuite) createArgs(c *gc.C, owner names.UserTag) params.ModelCreateArgs { + return params.ModelCreateArgs{ + OwnerTag: owner.String(), + Account: make(map[string]interface{}), + Config: map[string]interface{}{ + "name": "test-model", + "authorized-keys": "ssh-key", + // And to make it a valid dummy config + "controller": false, + }, + } +} + +func (s *modelManagerSuite) createArgsForVersion(c *gc.C, owner names.UserTag, ver interface{}) params.ModelCreateArgs { + params := s.createArgs(c, owner) + params.Config["agent-version"] = ver + return params +} + +func (s *modelManagerSuite) TestUserCanCreateModel(c *gc.C) { + owner := names.NewUserTag("external@remote") + s.setAPIUser(c, owner) + model, err := s.modelmanager.CreateModel(s.createArgs(c, owner)) + c.Assert(err, jc.ErrorIsNil) + c.Assert(model.OwnerTag, gc.Equals, owner.String()) + c.Assert(model.Name, gc.Equals, "test-model") +} + +func (s *modelManagerSuite) TestAdminCanCreateModelForSomeoneElse(c *gc.C) { + s.setAPIUser(c, s.AdminUserTag(c)) + owner := names.NewUserTag("external@remote") + model, err := s.modelmanager.CreateModel(s.createArgs(c, owner)) + c.Assert(err, jc.ErrorIsNil) + c.Assert(model.OwnerTag, gc.Equals, owner.String()) + c.Assert(model.Name, gc.Equals, "test-model") + // Make sure that the environment created does actually have the correct + // owner, and that owner is actually allowed to use the environment. + newState, err := s.State.ForModel(names.NewModelTag(model.UUID)) + c.Assert(err, jc.ErrorIsNil) + defer newState.Close() + + newModel, err := newState.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(newModel.Owner(), gc.Equals, owner) + _, err = newState.ModelUser(owner) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *modelManagerSuite) TestNonAdminCannotCreateModelForSomeoneElse(c *gc.C) { + s.setAPIUser(c, names.NewUserTag("non-admin@remote")) + owner := names.NewUserTag("external@remote") + _, err := s.modelmanager.CreateModel(s.createArgs(c, owner)) + c.Assert(err, gc.ErrorMatches, "permission denied") +} + +func (s *modelManagerSuite) TestRestrictedProviderFields(c *gc.C) { + s.setAPIUser(c, names.NewUserTag("non-admin@remote")) + for i, test := range []struct { + provider string + expected []string + }{ + { + provider: "azure", + expected: []string{ + "type", "ca-cert", "state-port", "api-port", + "subscription-id", "tenant-id", "application-id", "application-password", "location", + "controller-resource-group", "storage-account-type"}, + }, { + provider: "dummy", + expected: []string{ + "type", "ca-cert", "state-port", "api-port"}, + }, { + provider: "joyent", + expected: []string{ + "type", "ca-cert", "state-port", "api-port"}, + }, { + provider: "maas", + expected: []string{ + "type", "ca-cert", "state-port", "api-port", + "maas-server"}, + }, { + provider: "openstack", + expected: []string{ + "type", "ca-cert", "state-port", "api-port", + "region", "auth-url", "auth-mode"}, + }, { + provider: "ec2", + expected: []string{ + "type", "ca-cert", "state-port", "api-port", + "region"}, + }, + } { + c.Logf("%d: %s provider", i, test.provider) + fields, err := modelmanager.RestrictedProviderFields(s.modelmanager, test.provider) + c.Check(err, jc.ErrorIsNil) + c.Check(fields, jc.SameContents, test.expected) + } +} + +func (s *modelManagerSuite) TestConfigSkeleton(c *gc.C) { + s.setAPIUser(c, names.NewUserTag("non-admin@remote")) + + _, err := s.modelmanager.ConfigSkeleton( + params.ModelSkeletonConfigArgs{Provider: "ec2"}) + c.Check(err, gc.ErrorMatches, `provider value "ec2" not valid`) + _, err = s.modelmanager.ConfigSkeleton( + params.ModelSkeletonConfigArgs{Region: "the sun"}) + c.Check(err, gc.ErrorMatches, `region value "the sun" not valid`) + + skeleton, err := s.modelmanager.ConfigSkeleton(params.ModelSkeletonConfigArgs{}) + c.Assert(err, jc.ErrorIsNil) + + // The apiPort changes every test run as the dummy provider + // looks for a random open port. + apiPort := s.Environ.Config().APIPort() + + c.Assert(skeleton.Config, jc.DeepEquals, params.ModelConfig{ + "type": "dummy", + "ca-cert": coretesting.CACert, + "state-port": 1234, + "api-port": apiPort, + }) +} + +func (s *modelManagerSuite) TestCreateModelValidatesConfig(c *gc.C) { + admin := s.AdminUserTag(c) + s.setAPIUser(c, admin) + args := s.createArgs(c, admin) + args.Config["controller"] = "maybe" + _, err := s.modelmanager.CreateModel(args) + c.Assert(err, gc.ErrorMatches, "provider validation failed: controller: expected bool, got string\\(\"maybe\"\\)") +} + +func (s *modelManagerSuite) TestCreateModelBadConfig(c *gc.C) { + owner := names.NewUserTag("external@remote") + s.setAPIUser(c, owner) + for i, test := range []struct { + key string + value interface{} + errMatch string + }{ + { + key: "uuid", + value: "anything", + errMatch: `uuid is generated, you cannot specify one`, + }, { + key: "type", + value: "fake", + errMatch: `specified type "fake" does not match apiserver "dummy"`, + }, { + key: "ca-cert", + value: coretesting.OtherCACert, + errMatch: `(?s)specified ca-cert ".*" does not match apiserver ".*"`, + }, { + key: "state-port", + value: 9876, + errMatch: `specified state-port "9876" does not match apiserver "1234"`, + }, { + // The api-port is dynamic, but always in user-space, so > 1024. + key: "api-port", + value: 123, + errMatch: `specified api-port "123" does not match apiserver ".*"`, + }, + } { + c.Logf("%d: %s", i, test.key) + args := s.createArgs(c, owner) + args.Config[test.key] = test.value + _, err := s.modelmanager.CreateModel(args) + c.Assert(err, gc.ErrorMatches, test.errMatch) + + } +} + +func (s *modelManagerSuite) TestCreateModelSameAgentVersion(c *gc.C) { + admin := s.AdminUserTag(c) + s.setAPIUser(c, admin) + args := s.createArgsForVersion(c, admin, version.Current.String()) + _, err := s.modelmanager.CreateModel(args) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *modelManagerSuite) TestCreateModelBadAgentVersion(c *gc.C) { + s.PatchValue(&version.Current, coretesting.FakeVersionNumber) + admin := s.AdminUserTag(c) + s.setAPIUser(c, admin) + + bigger := version.Current + bigger.Minor += 1 + + smaller := version.Current + smaller.Minor -= 1 + + for i, test := range []struct { + value interface{} + errMatch string + }{ + { + value: 42, + errMatch: `failed to create config: agent-version must be a string but has type 'int'`, + }, { + value: "not a number", + errMatch: `failed to create config: invalid version \"not a number\"`, + }, { + value: bigger.String(), + errMatch: "failed to create config: agent-version cannot be greater than the server: .*", + }, { + value: smaller.String(), + errMatch: "failed to create config: no tools found for version .*", + }, + } { + c.Logf("test %d", i) + args := s.createArgsForVersion(c, admin, test.value) + _, err := s.modelmanager.CreateModel(args) + c.Check(err, gc.ErrorMatches, test.errMatch) + } +} + +func (s *modelManagerSuite) TestListModelsForSelf(c *gc.C) { + user := names.NewUserTag("external@remote") + s.setAPIUser(c, user) + result, err := s.modelmanager.ListModels(params.Entity{user.String()}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.UserModels, gc.HasLen, 0) +} + +func (s *modelManagerSuite) TestListModelsForSelfLocalUser(c *gc.C) { + // When the user's credentials cache stores the simple name, but the + // api server converts it to a fully qualified name. + user := names.NewUserTag("local-user") + s.setAPIUser(c, names.NewUserTag("local-user@local")) + result, err := s.modelmanager.ListModels(params.Entity{user.String()}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.UserModels, gc.HasLen, 0) +} + +func (s *modelManagerSuite) checkModelMatches(c *gc.C, env params.Model, expected *state.Model) { + c.Check(env.Name, gc.Equals, expected.Name()) + c.Check(env.UUID, gc.Equals, expected.UUID()) + c.Check(env.OwnerTag, gc.Equals, expected.Owner().String()) +} + +func (s *modelManagerSuite) TestListModelsAdminSelf(c *gc.C) { + user := s.AdminUserTag(c) + s.setAPIUser(c, user) + result, err := s.modelmanager.ListModels(params.Entity{user.String()}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.UserModels, gc.HasLen, 1) + expected, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + s.checkModelMatches(c, result.UserModels[0].Model, expected) +} + +func (s *modelManagerSuite) TestListModelsAdminListsOther(c *gc.C) { + user := s.AdminUserTag(c) + s.setAPIUser(c, user) + other := names.NewUserTag("external@remote") + result, err := s.modelmanager.ListModels(params.Entity{other.String()}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.UserModels, gc.HasLen, 0) +} + +func (s *modelManagerSuite) TestListModelsDenied(c *gc.C) { + user := names.NewUserTag("external@remote") + s.setAPIUser(c, user) + other := names.NewUserTag("other@remote") + _, err := s.modelmanager.ListModels(params.Entity{other.String()}) + c.Assert(err, gc.ErrorMatches, "permission denied") +} + +type fakeProvider struct { + environs.EnvironProvider +} + +func (*fakeProvider) Validate(cfg, old *config.Config) (*config.Config, error) { + return cfg, nil +} + +func (*fakeProvider) PrepareForCreateEnvironment(cfg *config.Config) (*config.Config, error) { + return cfg, nil +} + +func init() { + environs.RegisterProvider("fake", &fakeProvider{}) +} === added file 'src/github.com/juju/juju/apiserver/modelmanager/package_test.go' --- src/github.com/juju/juju/apiserver/modelmanager/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/modelmanager/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package modelmanager_test + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + testing.MgoTestPackage(t) +} === added file 'src/github.com/juju/juju/apiserver/modelmanager/state.go' --- src/github.com/juju/juju/apiserver/modelmanager/state.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/modelmanager/state.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,26 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package modelmanager + +import ( + "github.com/juju/names" + + "github.com/juju/juju/environs/config" + "github.com/juju/juju/state" +) + +var getState = func(st *state.State) stateInterface { + return stateShim{st} +} + +type stateInterface interface { + ModelsForUser(names.UserTag) ([]*state.UserModel, error) + IsControllerAdministrator(user names.UserTag) (bool, error) + NewModel(*config.Config, names.UserTag) (*state.Model, *state.State, error) + ControllerModel() (*state.Model, error) +} + +type stateShim struct { + *state.State +} === added file 'src/github.com/juju/juju/apiserver/network.go' --- src/github.com/juju/juju/apiserver/network.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/network.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,56 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver + +import ( + "net" + "time" + + "github.com/juju/errors" + "github.com/juju/utils" +) + +var ( + // The defaults below are best suited to retries associated + // with disk I/O timeouts, eg database operations. + // Use the NetworkOperationWithRetries() variant to explicitly + // use retry values better suited to different scenarios. + + // defaultNetworkOperationRetryDelay is the default time + // to wait between operation retries. + defaultNetworkOperationRetryDelay = 30 * time.Second + + // defaultNetworkOperationAttempts is the default number + // of attempts before giving up. + defaultNetworkOperationAttempts = 10 +) + +// networkOperationWithDefaultRetries calls the supplied function and if it returns a +// network error which is temporary, will retry a number of times before giving up. +// A default attempt strategy is used. +func networkOperationWitDefaultRetries(networkOp func() error, description string) func() error { + attempt := utils.AttemptStrategy{ + Delay: defaultNetworkOperationRetryDelay, + Min: defaultNetworkOperationAttempts, + } + return networkOperationWithRetries(attempt, networkOp, description) +} + +// networkOperationWithRetries calls the supplied function and if it returns a +// network error which is temporary, will retry a number of times before giving up. +func networkOperationWithRetries(strategy utils.AttemptStrategy, networkOp func() error, description string) func() error { + return func() error { + for a := strategy.Start(); ; { + a.Next() + err := networkOp() + if !a.HasNext() || err == nil { + return errors.Trace(err) + } + if networkErr, ok := errors.Cause(err).(net.Error); !ok || !networkErr.Temporary() { + return errors.Trace(err) + } + logger.Debugf("%q error, will retry: %v", description, err) + } + } +} === added file 'src/github.com/juju/juju/apiserver/network_test.go' --- src/github.com/juju/juju/apiserver/network_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/network_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,102 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver + +import ( + "time" + + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/testing" +) + +type networkSuite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&networkSuite{}) + +func (s *networkSuite) TestOpSuccess(c *gc.C) { + isCalled := false + f := func() error { + isCalled = true + return nil + } + err := networkOperationWitDefaultRetries(f, "do it")() + c.Assert(err, jc.ErrorIsNil) + c.Assert(isCalled, jc.IsTrue) +} + +func (s *networkSuite) TestOpFailureNoRetry(c *gc.C) { + s.PatchValue(&defaultNetworkOperationRetryDelay, 1*time.Millisecond) + netErr := &netError{false} + callCount := 0 + f := func() error { + callCount++ + return netErr + } + err := networkOperationWitDefaultRetries(f, "do it")() + c.Assert(errors.Cause(err), gc.Equals, netErr) + c.Assert(callCount, gc.Equals, 1) +} + +func (s *networkSuite) TestOpFailureRetries(c *gc.C) { + s.PatchValue(&defaultNetworkOperationRetryDelay, 1*time.Millisecond) + netErr := &netError{true} + callCount := 0 + f := func() error { + callCount++ + return netErr + } + err := networkOperationWitDefaultRetries(f, "do it")() + c.Assert(errors.Cause(err), gc.Equals, netErr) + c.Assert(callCount, gc.Equals, 10) +} + +func (s *networkSuite) TestOpNestedFailureRetries(c *gc.C) { + s.PatchValue(&defaultNetworkOperationRetryDelay, 1*time.Millisecond) + netErr := &netError{true} + callCount := 0 + f := func() error { + callCount++ + return errors.Annotate(errors.Trace(netErr), "create a wrapped error") + } + err := networkOperationWitDefaultRetries(f, "do it")() + c.Assert(errors.Cause(err), gc.Equals, netErr) + c.Assert(callCount, gc.Equals, 10) +} + +func (s *networkSuite) TestOpSucceedsAfterRetries(c *gc.C) { + s.PatchValue(&defaultNetworkOperationRetryDelay, 1*time.Millisecond) + netErr := &netError{true} + callCount := 0 + f := func() error { + callCount++ + if callCount == 5 { + return nil + } + return netErr + } + err := networkOperationWitDefaultRetries(f, "do it")() + c.Assert(err, jc.ErrorIsNil) + c.Assert(callCount, gc.Equals, 5) +} + +type netError struct { + temporary bool +} + +func (e *netError) Error() string { + return "network error" +} + +func (e *netError) Temporary() bool { + return e.temporary +} + +func (e *netError) Timeout() bool { + return false +} === removed directory 'src/github.com/juju/juju/apiserver/networker' === removed file 'src/github.com/juju/juju/apiserver/networker/networker.go' --- src/github.com/juju/juju/apiserver/networker/networker.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/networker/networker.go 1970-01-01 00:00:00 +0000 @@ -1,188 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package networker - -import ( - "github.com/juju/loggo" - "github.com/juju/names" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/state" - "github.com/juju/juju/state/watcher" -) - -func init() { - // TODO: When the client can handle new versions, this should really be - // registered as version 1, since it was not present in the API in Juju - // 1.18 - common.RegisterStandardFacade("Networker", 0, NewNetworkerAPI) -} - -var logger = loggo.GetLogger("juju.apiserver.networker") - -// NetworkerAPI provides access to the Networker API facade. -type NetworkerAPI struct { - st *state.State - resources *common.Resources - authorizer common.Authorizer - getAuthFunc common.GetAuthFunc -} - -// NewNetworkerAPI creates a new server-side Networker API facade. -func NewNetworkerAPI( - st *state.State, - resources *common.Resources, - authorizer common.Authorizer, -) (*NetworkerAPI, error) { - if !authorizer.AuthMachineAgent() { - return nil, common.ErrPerm - } - getAuthFunc := func() (common.AuthFunc, error) { - authEntityTag := authorizer.GetAuthTag() - - return func(tag names.Tag) bool { - if tag == authEntityTag { - // A machine agent can always access its own machine. - return true - } - if _, ok := tag.(names.MachineTag); !ok { - // Only machine tags are allowed. - return false - } - id := tag.Id() - for parentId := state.ParentId(id); parentId != ""; parentId = state.ParentId(parentId) { - // Until a top-level machine is reached. - - // TODO (thumper): remove the names.Tag conversion when gccgo - // implements concrete-type-to-interface comparison correctly. - if names.Tag(names.NewMachineTag(parentId)) == authEntityTag { - // All containers with the authenticated machine as a - // parent are accessible by it. - return true - } - } - // Not found authorized machine agent among ancestors of the current one. - return false - }, nil - } - - return &NetworkerAPI{ - st: st, - resources: resources, - authorizer: authorizer, - getAuthFunc: getAuthFunc, - }, nil -} - -func (n *NetworkerAPI) oneMachineConfig(id string) ([]params.NetworkConfig, error) { - machine, err := n.st.Machine(id) - if err != nil { - return nil, err - } - ifaces, err := machine.NetworkInterfaces() - if err != nil { - return nil, err - } - configs := make([]params.NetworkConfig, len(ifaces)) - for i, iface := range ifaces { - nw, err := n.st.Network(iface.NetworkName()) - if err != nil { - return nil, err - } - configs[i] = params.NetworkConfig{ - MACAddress: iface.MACAddress(), - CIDR: nw.CIDR(), - NetworkName: iface.NetworkName(), - ProviderId: string(nw.ProviderId()), - VLANTag: nw.VLANTag(), - InterfaceName: iface.RawInterfaceName(), - Disabled: iface.IsDisabled(), - // TODO(dimitern) Add the rest of the fields, once we - // store them in state. - } - } - return configs, nil -} - -// MachineNetworkInfo returns the list of networks with related interfaces for a -// given set of machines. -// DEPRECATED: Use MachineNetworkConfig() instead. -func (n *NetworkerAPI) MachineNetworkInfo(args params.Entities) (params.MachineNetworkConfigResults, error) { - return n.MachineNetworkConfig(args) -} - -// MachineNetworkConfig returns the list of networks with related interfaces -// for a given set of machines. -func (n *NetworkerAPI) MachineNetworkConfig(args params.Entities) (params.MachineNetworkConfigResults, error) { - result := params.MachineNetworkConfigResults{ - Results: make([]params.MachineNetworkConfigResult, len(args.Entities)), - } - canAccess, err := n.getAuthFunc() - if err != nil { - return result, err - } - for i, entity := range args.Entities { - tag, err := names.ParseTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - - if !canAccess(tag) { - err = common.ErrPerm - } else { - tag, ok := tag.(names.MachineTag) - if ok { - id := tag.Id() - result.Results[i].Config, err = n.oneMachineConfig(id) - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -func (n *NetworkerAPI) watchOneMachineInterfaces(id string) (string, error) { - machine, err := n.st.Machine(id) - if err != nil { - return "", err - } - watch := machine.WatchInterfaces() - // Consume the initial event. - if _, ok := <-watch.Changes(); ok { - return n.resources.Register(watch), nil - } - return "", watcher.EnsureErr(watch) -} - -// WatchInterfaces returns a NotifyWatcher for observing changes -// to each unit's service configuration settings. -func (n *NetworkerAPI) WatchInterfaces(args params.Entities) (params.NotifyWatchResults, error) { - result := params.NotifyWatchResults{ - Results: make([]params.NotifyWatchResult, len(args.Entities)), - } - canAccess, err := n.getAuthFunc() - if err != nil { - return result, err - } - for i, entity := range args.Entities { - tag, err := names.ParseTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - if !canAccess(tag) { - err = common.ErrPerm - } else { - tag, ok := tag.(names.MachineTag) - if ok { - id := tag.Id() - result.Results[i].NotifyWatcherId, err = n.watchOneMachineInterfaces(id) - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} === removed file 'src/github.com/juju/juju/apiserver/networker/networker_test.go' --- src/github.com/juju/juju/apiserver/networker/networker_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/networker/networker_test.go 1970-01-01 00:00:00 +0000 @@ -1,404 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package networker_test - -import ( - "runtime" - "sort" - - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/networker" - "github.com/juju/juju/apiserver/params" - apiservertesting "github.com/juju/juju/apiserver/testing" - "github.com/juju/juju/instance" - "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" -) - -type networkerSuite struct { - testing.JujuConnSuite - - networks []state.NetworkInfo - - machine *state.Machine - container *state.Machine - nestedContainer *state.Machine - - machineIfaces []state.NetworkInterfaceInfo - containerIfaces []state.NetworkInterfaceInfo - nestedContainerIfaces []state.NetworkInterfaceInfo - - authorizer apiservertesting.FakeAuthorizer - resources *common.Resources - networker *networker.NetworkerAPI -} - -var _ = gc.Suite(&networkerSuite{}) - -// Create several networks. -func (s *networkerSuite) setUpNetworks(c *gc.C) { - s.networks = []state.NetworkInfo{{ - Name: "net1", - ProviderId: "net1", - CIDR: "0.1.2.0/24", - VLANTag: 0, - }, { - Name: "vlan42", - ProviderId: "vlan42", - CIDR: "0.2.2.0/24", - VLANTag: 42, - }, { - Name: "vlan69", - ProviderId: "vlan69", - CIDR: "0.3.2.0/24", - VLANTag: 69, - }, { - Name: "vlan123", - ProviderId: "vlan123", - CIDR: "0.4.2.0/24", - VLANTag: 123, - }, { - Name: "net2", - ProviderId: "net2", - CIDR: "0.5.2.0/24", - VLANTag: 0, - }} -} - -// Create a machine to use. -func (s *networkerSuite) setUpMachine(c *gc.C) { - var err error - s.machine, err = s.State.AddMachine("quantal", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) - hwChars := instance.MustParseHardware("arch=i386", "mem=4G") - s.machineIfaces = []state.NetworkInterfaceInfo{{ - MACAddress: "aa:bb:cc:dd:ee:f0", - InterfaceName: "eth0", - NetworkName: "net1", - IsVirtual: false, - }, { - MACAddress: "aa:bb:cc:dd:ee:f1", - InterfaceName: "eth1", - NetworkName: "net1", - IsVirtual: false, - }, { - MACAddress: "aa:bb:cc:dd:ee:f1", - InterfaceName: "eth1.42", - NetworkName: "vlan42", - IsVirtual: true, - }, { - MACAddress: "aa:bb:cc:dd:ee:f0", - InterfaceName: "eth0.69", - NetworkName: "vlan69", - IsVirtual: true, - }, { - MACAddress: "aa:bb:cc:dd:ee:f2", - InterfaceName: "eth2", - NetworkName: "net2", - IsVirtual: false, - Disabled: true, - }} - err = s.machine.SetInstanceInfo("i-am", "fake_nonce", &hwChars, s.networks, s.machineIfaces, nil, nil) - c.Assert(err, jc.ErrorIsNil) -} - -// Create and provision a container and a nested container. -func (s *networkerSuite) setUpContainers(c *gc.C) { - template := state.MachineTemplate{ - Series: "quantal", - Jobs: []state.MachineJob{state.JobHostUnits}, - } - var err error - s.container, err = s.State.AddMachineInsideMachine(template, s.machine.Id(), instance.LXC) - c.Assert(err, jc.ErrorIsNil) - s.containerIfaces = []state.NetworkInterfaceInfo{{ - MACAddress: "aa:bb:cc:dd:ee:e0", - InterfaceName: "eth0", - NetworkName: "net1", - IsVirtual: false, - }, { - MACAddress: "aa:bb:cc:dd:ee:e1", - InterfaceName: "eth1", - NetworkName: "net1", - IsVirtual: false, - }, { - MACAddress: "aa:bb:cc:dd:ee:e1", - InterfaceName: "eth1.42", - NetworkName: "vlan42", - IsVirtual: true, - }} - hwChars := instance.MustParseHardware("arch=i386", "mem=4G") - err = s.container.SetInstanceInfo("i-container", "fake_nonce", &hwChars, s.networks[:2], - s.containerIfaces, nil, nil) - c.Assert(err, jc.ErrorIsNil) - - s.nestedContainer, err = s.State.AddMachineInsideMachine(template, s.container.Id(), instance.LXC) - c.Assert(err, jc.ErrorIsNil) - s.nestedContainerIfaces = []state.NetworkInterfaceInfo{{ - MACAddress: "aa:bb:cc:dd:ee:d0", - InterfaceName: "eth0", - NetworkName: "net1", - }} - err = s.nestedContainer.SetInstanceInfo("i-too", "fake_nonce", &hwChars, s.networks[:1], - s.nestedContainerIfaces, nil, nil) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *networkerSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - - s.setUpNetworks(c) - s.setUpMachine(c) - s.setUpContainers(c) - - // Create a FakeAuthorizer so we can check permissions, - // set up assuming we logged in as a machine agent. - s.authorizer = apiservertesting.FakeAuthorizer{ - Tag: s.machine.Tag(), - } - - // Create the resource registry separately to track invocations to - // Register. - s.resources = common.NewResources() - - // Create a networker API for the machine. - var err error - s.networker, err = networker.NewNetworkerAPI( - s.State, - s.resources, - s.authorizer, - ) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *networkerSuite) TestNetworkerNonMachineAgent(c *gc.C) { - // Fails with not a machine agent - anAuthorizer := s.authorizer - anAuthorizer.Tag = names.NewUnitTag("ubuntu/1") - aNetworker, err := networker.NewNetworkerAPI(s.State, s.resources, anAuthorizer) - c.Assert(err, gc.ErrorMatches, "permission denied") - c.Assert(aNetworker, gc.IsNil) -} - -func (s *networkerSuite) TestMachineNetworkConfigPermissions(c *gc.C) { - args := params.Entities{Entities: []params.Entity{ - {Tag: "service-bar"}, - {Tag: "foo-42"}, - {Tag: "unit-mysql-0"}, - {Tag: "service-mysql"}, - {Tag: "user-foo"}, - {Tag: "machine-1"}, - {Tag: "machine-0-lxc-42"}, - }} - results, err := s.networker.MachineNetworkConfig(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(results, gc.DeepEquals, params.MachineNetworkConfigResults{ - Results: []params.MachineNetworkConfigResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.NotFoundError("machine 0/lxc/42")}, - }, - }) -} - -type orderedNetwork []params.NetworkConfig - -func (o orderedNetwork) Len() int { - return len(o) -} - -func (o orderedNetwork) Less(i, j int) bool { - if o[i].MACAddress < o[j].MACAddress { - return true - } - if o[i].MACAddress > o[j].MACAddress { - return false - } - if o[i].CIDR < o[j].CIDR { - return true - } - if o[i].CIDR > o[j].CIDR { - return false - } - if o[i].NetworkName < o[j].NetworkName { - return true - } - if o[i].NetworkName > o[j].NetworkName { - return false - } - return o[i].VLANTag < o[j].VLANTag -} - -func (o orderedNetwork) Swap(i, j int) { - o[i], o[j] = o[j], o[i] -} - -func (s *networkerSuite) TestMachineNetworkConfig(c *gc.C) { - // TODO(bogdanteleaga): Find out what's the problem with this test - // It seems to work on some machines - if runtime.GOOS == "windows" { - c.Skip("bug 1403084: currently does not work on windows") - } - // Expected results of MachineNetworkConfig for a machine and containers - expectedMachineConfig := []params.NetworkConfig{{ - MACAddress: "aa:bb:cc:dd:ee:f0", - CIDR: "0.1.2.0/24", - NetworkName: "net1", - ProviderId: "net1", - VLANTag: 0, - InterfaceName: "eth0", - }, { - MACAddress: "aa:bb:cc:dd:ee:f1", - CIDR: "0.1.2.0/24", - NetworkName: "net1", - ProviderId: "net1", - VLANTag: 0, - InterfaceName: "eth1", - }, { - MACAddress: "aa:bb:cc:dd:ee:f1", - CIDR: "0.2.2.0/24", - NetworkName: "vlan42", - ProviderId: "vlan42", - VLANTag: 42, - InterfaceName: "eth1", - }, { - MACAddress: "aa:bb:cc:dd:ee:f0", - CIDR: "0.3.2.0/24", - NetworkName: "vlan69", - ProviderId: "vlan69", - VLANTag: 69, - InterfaceName: "eth0", - }, { - MACAddress: "aa:bb:cc:dd:ee:f2", - CIDR: "0.5.2.0/24", - NetworkName: "net2", - ProviderId: "net2", - VLANTag: 0, - InterfaceName: "eth2", - Disabled: true, - }} - expectedContainerConfig := []params.NetworkConfig{{ - MACAddress: "aa:bb:cc:dd:ee:e0", - CIDR: "0.1.2.0/24", - NetworkName: "net1", - ProviderId: "net1", - VLANTag: 0, - InterfaceName: "eth0", - }, { - MACAddress: "aa:bb:cc:dd:ee:e1", - CIDR: "0.1.2.0/24", - NetworkName: "net1", - ProviderId: "net1", - VLANTag: 0, - InterfaceName: "eth1", - }, { - MACAddress: "aa:bb:cc:dd:ee:e1", - CIDR: "0.2.2.0/24", - NetworkName: "vlan42", - ProviderId: "vlan42", - VLANTag: 42, - InterfaceName: "eth1", - }} - expectedNestedContainerConfig := []params.NetworkConfig{{ - MACAddress: "aa:bb:cc:dd:ee:d0", - CIDR: "0.1.2.0/24", - NetworkName: "net1", - ProviderId: "net1", - VLANTag: 0, - InterfaceName: "eth0", - }} - args := params.Entities{Entities: []params.Entity{ - {Tag: "machine-0"}, - {Tag: "machine-0-lxc-0"}, - {Tag: "machine-0-lxc-0-lxc-0"}, - }} - - sort.Sort(orderedNetwork(expectedMachineConfig)) - sort.Sort(orderedNetwork(expectedContainerConfig)) - sort.Sort(orderedNetwork(expectedNestedContainerConfig)) - - expected := [][]params.NetworkConfig{ - expectedMachineConfig, - expectedContainerConfig, - expectedNestedContainerConfig, - } - - assert := func(f func(params.Entities) (params.MachineNetworkConfigResults, error)) { - results, err := f(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(results.Results, gc.HasLen, 3) - for i, r := range results.Results { - c.Assert(r.Error, gc.IsNil) - sort.Sort(orderedNetwork(r.Config)) - c.Assert(r.Config, jc.DeepEquals, expected[i]) - } - } - assert(s.networker.MachineNetworkInfo) - assert(s.networker.MachineNetworkConfig) -} - -func (s *networkerSuite) TestWatchInterfacesPermissions(c *gc.C) { - args := params.Entities{Entities: []params.Entity{ - {Tag: "service-bar"}, - {Tag: "foo-42"}, - {Tag: "unit-mysql-0"}, - {Tag: "service-mysql"}, - {Tag: "user-foo"}, - {Tag: "machine-1"}, - {Tag: "machine-0-lxc-42"}, - }} - results, err := s.networker.WatchInterfaces(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(results, gc.DeepEquals, params.NotifyWatchResults{ - Results: []params.NotifyWatchResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.NotFoundError("machine 0/lxc/42")}, - }, - }) -} - -func (s *networkerSuite) TestWatchInterfaces(c *gc.C) { - c.Assert(s.resources.Count(), gc.Equals, 0) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "machine-0"}, - {Tag: "machine-0-lxc-0"}, - {Tag: "machine-0-lxc-0-lxc-0"}, - }} - result, err := s.networker.WatchInterfaces(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.NotifyWatchResults{ - Results: []params.NotifyWatchResult{ - {NotifyWatcherId: "1"}, - {NotifyWatcherId: "2"}, - {NotifyWatcherId: "3"}, - }, - }) - - // Verify the resource was registered and stop when done - c.Assert(s.resources.Count(), gc.Equals, 3) - for _, watcherId := range []string{"1", "2", "3"} { - resource := s.resources.Get(watcherId) - defer statetesting.AssertStop(c, resource) - - // Check that the WatchInterfaces has consumed the initial event ("returned" in - // the Watch call) - wc := statetesting.NewNotifyWatcherC(c, s.State, resource.(state.NotifyWatcher)) - wc.AssertNoChange() - } -} === removed file 'src/github.com/juju/juju/apiserver/networker/package_test.go' --- src/github.com/juju/juju/apiserver/networker/package_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/networker/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package networker_test - -import ( - stdtesting "testing" - - "github.com/juju/juju/testing" -) - -func TestAll(t *stdtesting.T) { - testing.MgoTestPackage(t) -} === added file 'src/github.com/juju/juju/apiserver/package_test.go' --- src/github.com/juju/juju/apiserver/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver_test + +import ( + stdtesting "testing" + + "github.com/juju/testing" + + coretesting "github.com/juju/juju/testing" +) + +func TestPackage(t *stdtesting.T) { + if testing.RaceEnabled { + t.Skip("skipping package under -race, see LP 1518806") + } + coretesting.MgoTestPackage(t) +} === modified file 'src/github.com/juju/juju/apiserver/params/actions.go' --- src/github.com/juju/juju/apiserver/params/actions.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/params/actions.go 2016-03-22 15:18:22 +0000 @@ -7,7 +7,7 @@ "time" // TODO(jcw4) per fwereade 2014-11-21 remove this dependency - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" ) const ( === modified file 'src/github.com/juju/juju/apiserver/params/apierror.go' --- src/github.com/juju/juju/apiserver/params/apierror.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/params/apierror.go 2016-03-22 15:18:22 +0000 @@ -7,14 +7,35 @@ "fmt" "github.com/juju/errors" - - "github.com/juju/juju/rpc" + "gopkg.in/macaroon.v1" ) -// Error is the type of error returned by any call to the state API +// Error is the type of error returned by any call to the state API. type Error struct { Message string Code string + Info *ErrorInfo `json:",omitempty"` +} + +// ErrorInfo holds additional information provided by an error. +// Note that although these fields are compatible with the +// same fields in httpbakery.ErrorInfo, the Juju API server does +// not implement endpoints directly compatible with that protocol +// because the error response format varies according to +// the endpoint. +type ErrorInfo struct { + // Macaroon may hold a macaroon that, when + // discharged, may allow access to the juju API. + // This field is associated with the ErrDischargeRequired + // error code. + Macaroon *macaroon.Macaroon `json:",omitempty"` + + // MacaroonPath holds the URL path to be associated + // with the macaroon. The macaroon is potentially + // valid for all URLs under the given path. + // If it is empty, the macaroon will be associated with + // the original URL from which the error was returned. + MacaroonPath string `json:",omitempty"` } func (e Error) Error() string { @@ -25,8 +46,6 @@ return e.Code } -var _ rpc.ErrorCoder = (*Error)(nil) - // GoString implements fmt.GoStringer. It means that a *Error shows its // contents correctly when printed with %#v. func (e Error) GoString() string { @@ -49,40 +68,32 @@ CodeNotProvisioned = "not provisioned" CodeNoAddressSet = "no address set" CodeTryAgain = "try again" - CodeNotImplemented = rpc.CodeNotImplemented + CodeNotImplemented = "not implemented" // asserted to match rpc.codeNotImplemented in rpc/rpc_test.go CodeAlreadyExists = "already exists" CodeUpgradeInProgress = "upgrade in progress" CodeActionNotAvailable = "action no longer available" CodeOperationBlocked = "operation is blocked" CodeLeadershipClaimDenied = "leadership claim denied" + CodeLeaseClaimDenied = "lease claim denied" CodeNotSupported = "not supported" + CodeBadRequest = "bad request" + CodeMethodNotAllowed = "method not allowed" + CodeForbidden = "forbidden" + CodeDischargeRequired = "macaroon discharge required" ) // ErrCode returns the error code associated with // the given error, or the empty string if there // is none. func ErrCode(err error) string { - err = errors.Cause(err) - if err, _ := err.(rpc.ErrorCoder); err != nil { + type ErrorCoder interface { + ErrorCode() string + } + switch err := errors.Cause(err).(type) { + case ErrorCoder: return err.ErrorCode() - } - return "" -} - -// ClientError maps errors returned from an RPC call into local errors with -// appropriate values. -func ClientError(err error) error { - rerr, ok := err.(*rpc.RequestError) - if !ok { - return err - } - // We use our own error type rather than rpc.ServerError - // because we don't want the code or the "server error" prefix - // within the error message. Also, it's best not to make clients - // know that we're using the rpc package. - return &Error{ - Message: rerr.Message, - Code: rerr.Code, + default: + return "" } } @@ -175,6 +186,18 @@ return ErrCode(err) == CodeLeadershipClaimDenied } +func IsCodeLeaseClaimDenied(err error) bool { + return ErrCode(err) == CodeLeaseClaimDenied +} + func IsCodeNotSupported(err error) bool { return ErrCode(err) == CodeNotSupported } + +func IsBadRequest(err error) bool { + return ErrCode(err) == CodeBadRequest +} + +func IsMethodNotAllowed(err error) bool { + return ErrCode(err) == CodeMethodNotAllowed +} === modified file 'src/github.com/juju/juju/apiserver/params/apierror_test.go' --- src/github.com/juju/juju/apiserver/params/apierror_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/params/apierror_test.go 2016-03-22 15:18:22 +0000 @@ -8,10 +8,13 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/rpc" ) type errorSuite struct{} +var _ rpc.ErrorCoder = (*params.Error)(nil) + var _ = gc.Suite(&errorSuite{}) func (*errorSuite) TestErrCode(c *gc.C) { === modified file 'src/github.com/juju/juju/apiserver/params/backups.go' --- src/github.com/juju/juju/apiserver/params/backups.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/params/backups.go 2016-03-22 15:18:22 +0000 @@ -59,13 +59,13 @@ Size int64 Stored time.Time // May be zero... - Started time.Time - Finished time.Time // May be zero... - Notes string - Environment string - Machine string - Hostname string - Version version.Number + Started time.Time + Finished time.Time // May be zero... + Notes string + Model string + Machine string + Hostname string + Version version.Number } // RestoreArgs Holds the backup file or id === modified file 'src/github.com/juju/juju/apiserver/params/block.go' --- src/github.com/juju/juju/apiserver/params/block.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/params/block.go 2016-03-22 15:18:22 +0000 @@ -3,7 +3,7 @@ package params -// Block describes a Juju block that protects environment from +// Block describes a Juju block that protects model from // corruption. type Block struct { // Id is this blocks id. === modified file 'src/github.com/juju/juju/apiserver/params/constants.go' --- src/github.com/juju/juju/apiserver/params/constants.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/params/constants.go 2016-03-22 15:18:22 +0000 @@ -30,3 +30,5 @@ ResolvedRetryHooks ResolvedMode = "retry-hooks" ResolvedNoHooks ResolvedMode = "no-hooks" ) + +const MachineNonceHeader = "X-Juju-Nonce" === removed file 'src/github.com/juju/juju/apiserver/params/environment.go' --- src/github.com/juju/juju/apiserver/params/environment.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/params/environment.go 1970-01-01 00:00:00 +0000 @@ -1,75 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package params - -import ( - "time" - - "github.com/juju/juju/version" -) - -// EnvironmentConfigResults contains the result of client API calls -// to get environment config values. -type EnvironmentConfigResults struct { - Config map[string]interface{} -} - -// EnvironmentSet contains the arguments for EnvironmentSet client API -// call. -type EnvironmentSet struct { - Config map[string]interface{} -} - -// EnvironmentUnset contains the arguments for EnvironmentUnset client API -// call. -type EnvironmentUnset struct { - Keys []string -} - -// ModifyEnvironUsers holds the parameters for making Client ShareEnvironment calls. -type ModifyEnvironUsers struct { - Changes []ModifyEnvironUser -} - -// EnvironAction is an action that can be preformed on an environment. -type EnvironAction string - -// Actions that can be preformed on an environment. -const ( - AddEnvUser EnvironAction = "add" - RemoveEnvUser EnvironAction = "remove" -) - -// ModifyEnvironUser stores the parameters used for a Client.ShareEnvironment call. -type ModifyEnvironUser struct { - UserTag string `json:"user-tag"` - Action EnvironAction `json:"action"` -} - -// SetEnvironAgentVersion contains the arguments for -// SetEnvironAgentVersion client API call. -type SetEnvironAgentVersion struct { - Version version.Number `json:"version"` - MajorUpgradeAllowed bool `json:"majorupgradeallowed"` -} - -// EnvUserInfo holds information on a user. -type EnvUserInfo struct { - UserName string `json:"user"` - DisplayName string `json:"displayname"` - CreatedBy string `json:"createdby"` - DateCreated time.Time `json:"datecreated"` - LastConnection *time.Time `json:"lastconnection"` -} - -// EnvUserInfoResult holds the result of an EnvUserInfo call. -type EnvUserInfoResult struct { - Result *EnvUserInfo `json:"result,omitempty"` - Error *Error `json:"error,omitempty"` -} - -// EnvUserInfoResults holds the result of a bulk EnvUserInfo API call. -type EnvUserInfoResults struct { - Results []EnvUserInfoResult `json:"results"` -} === added file 'src/github.com/juju/juju/apiserver/params/http.go' --- src/github.com/juju/juju/apiserver/params/http.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/params/http.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,33 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package params + +// DigestAlgorithm is one of the values in the IANA registry. See +// RFC 3230 and 5843. +// +// Note that currently Juju does not conform to the standard. +// It stores a hexadecimal SHA256 value in the Digest header, +// but the above RFCs specify SHA-256 and a base64-encoded +// value for this. +// TODO fix that. https://bugs.launchpad.net/juju-core/+bug/1503992 +type DigestAlgorithm string + +const ( + // DigestSHA is the HTTP digest algorithm value used in juju's HTTP code. + DigestSHA DigestAlgorithm = "SHA" + + // The values used for content-type in juju's direct HTTP code: + + // ContentTypeJSON is the HTTP content-type value used for JSON content. + ContentTypeJSON = "application/json" + + // ContentTypeRaw is the HTTP content-type value used for raw, unformattedcontent. + ContentTypeRaw = "application/octet-stream" + + // ContentTypeJS is the HTTP content-type value used for javascript. + ContentTypeJS = "application/javascript" + + // ContentTypeXJS is the outdated HTTP content-type value used for javascript. + ContentTypeXJS = "application/x-javascript" +) === modified file 'src/github.com/juju/juju/apiserver/params/image_metadata.go' --- src/github.com/juju/juju/apiserver/params/image_metadata.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/params/image_metadata.go 2016-03-22 15:18:22 +0000 @@ -21,8 +21,8 @@ // simplestreams metadata supports. Stream string `json:"stream,omitempty"` - // VirtualType stores virtual type. - VirtualType string `json:"virtual_type,omitempty"` + // VirtType stores virtualisation type. + VirtType string `json:"virt_type,omitempty"` // RootStorageType stores storage type. RootStorageType string `json:"root-storage-type,omitempty"` @@ -40,14 +40,17 @@ // Region is the name of cloud region associated with the image. Region string `json:"region"` - // Series is OS version, for e.g. "quantal". + // Version is OS version, for e.g. "12.04". + Version string `json:"version"` + + // Series is OS series, for e.g. "trusty". Series string `json:"series"` // Arch is the architecture for this cloud image, for e.g. "amd64" Arch string `json:"arch"` - // VirtualType contains the type of the cloud image, for e.g. "pv", "hvm". "kvm". - VirtualType string `json:"virtual_type,omitempty"` + // VirtType contains the virtualisation type of the cloud image, for e.g. "pv", "hvm". "kvm". + VirtType string `json:"virt_type,omitempty"` // RootStorageType contains type of root storage, for e.g. "ebs", "instance". RootStorageType string `json:"root_storage_type,omitempty"` @@ -57,6 +60,11 @@ // Source describes where this image is coming from: is it public? custom? Source string `json:"source"` + + // Priority is an importance factor for image metadata. + // Higher number means higher priority. + // This will allow to sort metadata by importance. + Priority int `json:"priority"` } // ListCloudImageMetadataResult holds the results of querying cloud image metadata. @@ -64,7 +72,18 @@ Result []CloudImageMetadata `json:"result"` } -// MetadataSaveParams holds cloud image metadata details to save. +// MetadataSaveParams holds lists of cloud image metadata to save. Each list +// will be saved atomically. type MetadataSaveParams struct { - Metadata []CloudImageMetadata `json:"metadata"` + Metadata []CloudImageMetadataList `json:"metadata,omitempty"` +} + +// CloudImageMetadataList holds a list of cloud image metadata. +type CloudImageMetadataList struct { + Metadata []CloudImageMetadata `json:"metadata,omitempty"` +} + +// MetadataImageIds holds image ids and can be used to identify related image metadata. +type MetadataImageIds struct { + Ids []string `json:"image_ids"` } === modified file 'src/github.com/juju/juju/apiserver/params/internal.go' --- src/github.com/juju/juju/apiserver/params/internal.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/params/internal.go 2016-03-22 15:18:22 +0000 @@ -75,24 +75,24 @@ Results []StringResult } -// EnvironmentResult holds the result of an API call returning a name and UUID -// for an environment. -type EnvironmentResult struct { +// ModelResult holds the result of an API call returning a name and UUID +// for a model. +type ModelResult struct { Error *Error Name string UUID string } -// EnvironmentSkeletonConfigArgs wraps the args for environmentmanager.SkeletonConfig. -type EnvironmentSkeletonConfigArgs struct { +// ModelSkeletonConfigArgs wraps the args for modelmanager.SkeletonConfig. +type ModelSkeletonConfigArgs struct { Provider string Region string } -// EnvironmentCreateArgs holds the arguments that are necessary to create -// and environment. -type EnvironmentCreateArgs struct { - // OwnerTag represents the user that will own the new environment. +// ModelCreateArgs holds the arguments that are necessary to create +// a model. +type ModelCreateArgs struct { + // OwnerTag represents the user that will own the new model. // The OwnerTag must be a valid user tag. If the user tag represents // a local user, that user must exist. OwnerTag string @@ -101,32 +101,31 @@ // interact with the provider to create, list and destroy machines. Account map[string]interface{} - // Config defines the environment config, which includes the name of the - // environment. An environment UUID is allocated by the API server during - // the creation of the environment. + // Config defines the model config, which includes the name of the + // model. An model UUID is allocated by the API server during + // the creation of the model. Config map[string]interface{} } -// Environment holds the result of an API call returning a name and UUID -// for an environment and the tag of the server in which it is running. -type Environment struct { - Name string - UUID string - OwnerTag string - ServerUUID string +// Model holds the result of an API call returning a name and UUID +// for a model and the tag of the server in which it is running. +type Model struct { + Name string + UUID string + OwnerTag string } -// UserEnvironment holds information about an environment and the last -// time the environment was accessed for a particular user. -type UserEnvironment struct { - Environment +// UserModel holds information about a model and the last +// time the model was accessed for a particular user. +type UserModel struct { + Model LastConnection *time.Time } -// UserEnvironmentList holds information about a list of environments +// UserModelList holds information about a list of models // for a particular user. -type UserEnvironmentList struct { - UserEnvironments []UserEnvironment +type UserModelList struct { + UserModels []UserModel } // ResolvedModeResult holds a resolved mode or an error. @@ -167,6 +166,21 @@ Results []BoolResult } +// IntResults holds multiple results with an int in each. +type IntResults struct { + // Results holds a list of results for calls that return an int or error. + Results []IntResult +} + +// IntResult holds the result of an API call that returns a +// int or an error. +type IntResult struct { + // Error holds the error (if any) of this call. + Error *Error + // Result holds the integer result of the call (if Error is nil). + Result int +} + // Settings holds relation settings names and values. type Settings map[string]string @@ -197,12 +211,12 @@ Results []ConfigSettingsResult } -// EnvironConfig holds an environment configuration. -type EnvironConfig map[string]interface{} +// ModelConfig holds an model configuration. +type ModelConfig map[string]interface{} -// EnvironConfigResult holds environment configuration or an error. -type EnvironConfigResult struct { - Config EnvironConfig +// ModelConfigResult holds model configuration or an error. +type ModelConfigResult struct { + Config ModelConfig } // RelationUnit holds a relation and a unit tag. @@ -295,29 +309,6 @@ Results []LifeResult } -// MachineSetProvisioned holds a machine tag, provider-specific -// instance id, a nonce, or an error. -// -// NOTE: This is deprecated since 1.19.0 and not used by the -// provisioner, it's just retained for backwards-compatibility and -// should be removed. -type MachineSetProvisioned struct { - Tag string - InstanceId instance.Id - Nonce string - Characteristics *instance.HardwareCharacteristics -} - -// SetProvisioned holds the parameters for making a SetProvisioned -// call for a machine. -// -// NOTE: This is deprecated since 1.19.0 and not used by the -// provisioner, it's just retained for backwards-compatibility and -// should be removed. -type SetProvisioned struct { - Machines []MachineSetProvisioned -} - // InstanceInfo holds a machine tag, provider-specific instance id, a // nonce, a list of networks and interfaces to set up. type InstanceInfo struct { @@ -468,26 +459,45 @@ Results []StringsWatchResult } -// EntityWatchResult holds a EntityWatcher id, changes and an error +// EntitiesWatchResult holds a EntitiesWatcher id, changes and an error // (if any). -type EntityWatchResult struct { - EntityWatcherId string `json:"EntityWatcherId"` - Changes []string `json:"Changes"` - Error *Error `json:"Error"` -} - -// EntityWatchResults holds the results for any API call which ends up -// returning a list of EntityWatchers. -type EntityWatchResults struct { - Results []EntityWatchResult -} - -// RelationUnitsWatchResult holds a RelationUnitsWatcher id, changes -// and an error (if any). +type EntitiesWatchResult struct { + // Note legacy serialization tag. + EntitiesWatcherId string `json:"EntityWatcherId"` + Changes []string `json:"Changes"` + Error *Error `json:"Error"` +} + +// EntitiesWatchResults holds the results for any API call which ends up +// returning a list of EntitiesWatchers. +type EntitiesWatchResults struct { + Results []EntitiesWatchResult `json:"Results"` +} + +// UnitSettings specifies the version of some unit's settings in some relation. +type UnitSettings struct { + Version int64 `json:"Version"` +} + +// RelationUnitsChange describes the membership and settings of; or changes to; +// some relation scope. +type RelationUnitsChange struct { + + // Changed holds a set of units that are known to be in scope, and the + // latest known settings version for each. + Changed map[string]UnitSettings `json:"Changed"` + + // Departed holds a set of units that have previously been reported to + // be in scope, but which no longer are. + Departed []string `json:"Departed"` +} + +// RelationUnitsWatchResult holds a RelationUnitsWatcher id, baseline state +// (in the Changes field), and an error (if any). type RelationUnitsWatchResult struct { - RelationUnitsWatcherId string - Changes multiwatcher.RelationUnitsChange - Error *Error + RelationUnitsWatcherId string `json:"RelationUnitsWatcherId"` + Changes RelationUnitsChange `json:"Changes"` + Error *Error `json:"Error"` } // RelationUnitsWatchResults holds the results for any API call which ends up @@ -512,7 +522,18 @@ // CharmsResponse is the server response to charm upload or GET requests. type CharmsResponse struct { - Error string `json:",omitempty"` + Error string `json:",omitempty"` + + // ErrorCode holds the code associated with the error. + // Ideally, Error would hold an Error object and the + // code would be in that, but for backward compatibility, + // we cannot do that. + ErrorCode string `json:",omitempty"` + + // ErrorInfo holds extra information associated with the error. + // Like ErrorCode, this should really be in an Error object. + ErrorInfo *ErrorInfo + CharmURL string `json:",omitempty"` Files []string `json:",omitempty"` } @@ -551,14 +572,16 @@ // ProvisioningInfo holds machine provisioning info. type ProvisioningInfo struct { - Constraints constraints.Value - Series string - Placement string - Networks []string - Jobs []multiwatcher.MachineJob - Volumes []VolumeParams - Tags map[string]string - SubnetsToZones map[string][]string + Constraints constraints.Value + Series string + Placement string + Networks []string + Jobs []multiwatcher.MachineJob + Volumes []VolumeParams + Tags map[string]string + SubnetsToZones map[string][]string + ImageMetadata []CloudImageMetadata + EndpointBindings map[string]string } // ProvisioningInfoResult holds machine provisioning info or an error. @@ -620,3 +643,16 @@ type MeterStatusResults struct { Results []MeterStatusResult } + +// SingularClaim represents a request for exclusive model administration access +// on the part of some controller. +type SingularClaim struct { + ModelTag string `json:"ModelTag"` + ControllerTag string `json:"ControllerTag"` + Duration time.Duration `json:"Duration"` +} + +// SingularClaims holds any number of SingularClaim~s. +type SingularClaims struct { + Claims []SingularClaim `json:"Claims"` +} === added file 'src/github.com/juju/juju/apiserver/params/metrics.go' --- src/github.com/juju/juju/apiserver/params/metrics.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/params/metrics.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,38 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package params + +import ( + "time" +) + +// MetricResults contains results from a GetMetrics call, with +// one item per Entity given as an argument to the command. +type MetricResults struct { + Results []EntityMetrics `json:"results"` +} + +// OneError returns the first error +func (m *MetricResults) OneError() error { + for _, r := range m.Results { + if err := r.Error; err != nil { + return err + } + } + return nil +} + +// EntityMetrics contains the results of a GetMetrics call for a single +// entity. +type EntityMetrics struct { + Metrics []MetricResult `json:"metrics,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// MetricResults contains a single metric. +type MetricResult struct { + Time time.Time `json:"time"` + Key string `json:"key"` + Value string `json:"value"` +} === added file 'src/github.com/juju/juju/apiserver/params/model.go' --- src/github.com/juju/juju/apiserver/params/model.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/params/model.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,74 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package params + +import ( + "time" + + "github.com/juju/juju/version" +) + +// ModelConfigResults contains the result of client API calls +// to get model config values. +type ModelConfigResults struct { + Config map[string]interface{} +} + +// ModelSet contains the arguments for ModelSet client API +// call. +type ModelSet struct { + Config map[string]interface{} +} + +// ModelUnset contains the arguments for ModelUnset client API +// call. +type ModelUnset struct { + Keys []string +} + +// ModifyModelUsers holds the parameters for making Client ShareModel calls. +type ModifyModelUsers struct { + Changes []ModifyModelUser +} + +// ModelAction is an action that can be preformed on a model. +type ModelAction string + +// Actions that can be preformed on a model. +const ( + AddModelUser ModelAction = "add" + RemoveModelUser ModelAction = "remove" +) + +// ModifyModelUser stores the parameters used for a Client.ShareModel call. +type ModifyModelUser struct { + UserTag string `json:"user-tag"` + Action ModelAction `json:"action"` +} + +// SetModelAgentVersion contains the arguments for +// SetModelAgentVersion client API call. +type SetModelAgentVersion struct { + Version version.Number +} + +// ModelUserInfo holds information on a user. +type ModelUserInfo struct { + UserName string `json:"user"` + DisplayName string `json:"displayname"` + CreatedBy string `json:"createdby"` + DateCreated time.Time `json:"datecreated"` + LastConnection *time.Time `json:"lastconnection"` +} + +// ModelUserInfoResult holds the result of an ModelUserInfo call. +type ModelUserInfoResult struct { + Result *ModelUserInfo `json:"result,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// ModelUserInfoResults holds the result of a bulk ModelUserInfo API call. +type ModelUserInfoResults struct { + Results []ModelUserInfoResult `json:"results"` +} === modified file 'src/github.com/juju/juju/apiserver/params/network.go' --- src/github.com/juju/juju/apiserver/params/network.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/params/network.go 2016-03-22 15:18:22 +0000 @@ -19,7 +19,7 @@ CIDR string `json:"CIDR"` // ProviderId is the provider-specific subnet ID (if applicable). - ProviderId string `json:"ProviderId,omitempty` + ProviderId string `json:"ProviderId,omitempty"` // VLANTag needs to be between 1 and 4094 for VLANs and 0 for // normal networks. It's defined by IEEE 802.1Q standard. @@ -162,6 +162,12 @@ ExtraConfig map[string]string `json:"ExtraConfig,omitempty"` } +// NetworkConfigs holds the network configuration for multiple networks +type NetworkConfigs struct { + Results []NetworkConfig + Errors []error +} + // Port encapsulates a protocol and port number. It is used in API // requests/responses. See also network.Port, from/to which this is // transformed. @@ -253,6 +259,7 @@ Type string `json:"Type"` NetworkName string `json:"NetworkName"` Scope string `json:"Scope"` + SpaceName string `json:"SpaceName,omitempty"` } // FromNetworkAddress is a convenience helper to create a parameter @@ -263,6 +270,7 @@ Type: string(naddr.Type), NetworkName: naddr.NetworkName, Scope: string(naddr.Scope), + SpaceName: string(naddr.SpaceName), } } @@ -274,6 +282,7 @@ Type: network.AddressType(addr.Type), NetworkName: addr.NetworkName, Scope: network.Scope(addr.Scope), + SpaceName: network.SpaceName(addr.SpaceName), } } @@ -425,6 +434,19 @@ Results []RequestedNetworkResult `json:"Results"` } +// UnitNetworkConfigResult holds network configuration for a single unit. +type UnitNetworkConfigResult struct { + Error *Error `json:"Error"` + + // Tagged to Info due to compatibility reasons. + Config []NetworkConfig `json:"Info"` +} + +// UnitNetworkConfigResults holds network configuration for multiple machines. +type UnitNetworkConfigResults struct { + Results []UnitNetworkConfigResult `json:"Results"` +} + // MachineNetworkConfigResult holds network configuration for a single machine. type MachineNetworkConfigResult struct { Error *Error `json:"Error"` @@ -551,6 +573,7 @@ SubnetTags []string `json:"SubnetTags"` SpaceTag string `json:"SpaceTag"` Public bool `json:"Public"` + ProviderId string `json:"ProviderId,omitempty"` } // ListSpacesResults holds the list of all available spaces. @@ -564,3 +587,16 @@ Subnets []Subnet `json:"Subnets"` Error *Error `json:"Error,omitempty"` } + +// DiscoverSpacesResults holds the list of all provider spaces. +type DiscoverSpacesResults struct { + Results []ProviderSpace `json:"Results"` +} + +// ProviderSpace holds the information about a single space and its associated subnets. +type ProviderSpace struct { + Name string `json:"Name"` + ProviderId string `json:"ProviderId"` + Subnets []Subnet `json:"Subnets"` + Error *Error `json:"Error,omitempty"` +} === modified file 'src/github.com/juju/juju/apiserver/params/network_test.go' --- src/github.com/juju/juju/apiserver/params/network_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/params/network_test.go 2016-03-22 15:18:22 +0000 @@ -36,7 +36,7 @@ mkPortsResult := func(msg, code string, ports ...P) params.PortsResult { pr := params.PortsResult{} if msg != "" { - pr.Error = ¶ms.Error{msg, code} + pr.Error = ¶ms.Error{Message: msg, Code: code} } for _, p := range ports { pr.Ports = append(pr.Ports, params.Port{p.prot, p.num}) === modified file 'src/github.com/juju/juju/apiserver/params/params.go' --- src/github.com/juju/juju/apiserver/params/params.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/params/params.go 2016-03-22 15:18:22 +0000 @@ -9,16 +9,20 @@ "time" "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/replicaset" "github.com/juju/utils/proxy" - "gopkg.in/juju/charm.v5" + "github.com/juju/utils/ssh" + "gopkg.in/juju/charm.v6-unstable" "gopkg.in/macaroon.v1" "github.com/juju/juju/constraints" "github.com/juju/juju/instance" + "github.com/juju/juju/mongo" + "github.com/juju/juju/network" "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/storage" "github.com/juju/juju/tools" - "github.com/juju/juju/utils/ssh" "github.com/juju/juju/version" ) @@ -162,8 +166,7 @@ Addrs []Address `json:"Addrs"` } -// AddMachines holds the parameters for making the -// AddMachinesWithPlacement call. +// AddMachines holds the parameters for making the AddMachines call. type AddMachines struct { MachineParams []AddMachineParams `json:"MachineParams"` } @@ -191,25 +194,28 @@ Services []ServiceDeploy } -// ServiceDeploy holds the parameters for making the ServiceDeploy call. +// ServiceDeploy holds the parameters for making the service Deploy call. type ServiceDeploy struct { - ServiceName string - CharmUrl string - NumUnits int - Config map[string]string - ConfigYAML string // Takes precedence over config if both are present. - Constraints constraints.Value - ToMachineSpec string - Placement []*instance.Placement - Networks []string - Storage map[string]storage.Constraints + ServiceName string + Series string + CharmUrl string + NumUnits int + Config map[string]string + ConfigYAML string // Takes precedence over config if both are present. + Constraints constraints.Value + Placement []*instance.Placement + Networks []string + Storage map[string]storage.Constraints + EndpointBindings map[string]string + Resources map[string]string } -// ServiceUpdate holds the parameters for making the ServiceUpdate call. +// ServiceUpdate holds the parameters for making the service Update call. type ServiceUpdate struct { ServiceName string CharmUrl string ForceCharmUrl bool + ForceSeries bool MinUnits *int SettingsStrings map[string]string SettingsYAML string // Takes precedence over SettingsStrings if both are present. @@ -218,32 +224,33 @@ // ServiceSetCharm sets the charm for a given service. type ServiceSetCharm struct { - ServiceName string - CharmUrl string - Force bool + // ServiceName is the name of the service to set the charm on. + ServiceName string `json:"servicename"` + // CharmUrl is the new url for the charm. + CharmUrl string `json:"charmurl"` + // ForceUnits forces the upgrade on units in an error state. + ForceUnits bool `json:"forceunits"` + // ForceSeries forces the use of the charm even if it doesn't match the + // series of the unit. + ForceSeries bool `json:"forceseries"` + // ResourceIDs is a map of resource names to resource IDs to activate during + // the upgrade. + ResourceIDs map[string]string `json:"resourceids"` } -// ServiceExpose holds the parameters for making the ServiceExpose call. +// ServiceExpose holds the parameters for making the service Expose call. type ServiceExpose struct { ServiceName string } -// ServiceSet holds the parameters for a ServiceSet +// ServiceSet holds the parameters for a service Set // command. Options contains the configuration data. type ServiceSet struct { ServiceName string Options map[string]string } -// ServiceSetYAML holds the parameters for -// a ServiceSetYAML command. Config contains the -// configuration data in YAML format. -type ServiceSetYAML struct { - ServiceName string - Config string -} - -// ServiceUnset holds the parameters for a ServiceUnset +// ServiceUnset holds the parameters for a service Unset // command. Options contains the option attribute names // to unset. type ServiceUnset struct { @@ -251,13 +258,13 @@ Options []string } -// ServiceGet holds parameters for making the ServiceGet or -// ServiceGetCharmURL calls. +// ServiceGet holds parameters for making the Get or +// GetCharmURL calls. type ServiceGet struct { ServiceName string } -// ServiceGetResults holds results of the ServiceGet call. +// ServiceGetResults holds results of the service Get call. type ServiceGetResults struct { Service string Charm string @@ -265,17 +272,17 @@ Constraints constraints.Value } -// ServiceCharmRelations holds parameters for making the ServiceCharmRelations call. +// ServiceCharmRelations holds parameters for making the service CharmRelations call. type ServiceCharmRelations struct { ServiceName string } -// ServiceCharmRelationsResults holds the results of the ServiceCharmRelations call. +// ServiceCharmRelationsResults holds the results of the service CharmRelations call. type ServiceCharmRelationsResults struct { CharmRelations []string } -// ServiceUnexpose holds parameters for the ServiceUnexpose call. +// ServiceUnexpose holds parameters for the service Unexpose call. type ServiceUnexpose struct { ServiceName string } @@ -325,17 +332,16 @@ } // AddServiceUnitsResults holds the names of the units added by the -// AddServiceUnits call. +// AddUnits call. type AddServiceUnitsResults struct { Units []string } // AddServiceUnits holds parameters for the AddUnits call. type AddServiceUnits struct { - ServiceName string - NumUnits int - ToMachineSpec string - Placement []*instance.Placement + ServiceName string + NumUnits int + Placement []*instance.Placement } // DestroyServiceUnits holds parameters for the DestroyUnits call. @@ -343,7 +349,7 @@ UnitNames []string } -// ServiceDestroy holds the parameters for making the ServiceDestroy call. +// ServiceDestroy holds the parameters for making the service Destroy call. type ServiceDestroy struct { ServiceName string } @@ -356,11 +362,17 @@ } // LoginRequest holds credentials for identifying an entity to the Login v1 -// facade. +// facade. AuthTag holds the tag of the user to connect as. If it is empty, +// then the provided macaroon slices will be used for authentication (if +// any one is valid, the authentication succeeds). If there are no +// valid macaroons and macaroon authentication is configured, +// the LoginResponse will contain a macaroon that when +// discharged, may allow access. type LoginRequest struct { - AuthTag string `json:"auth-tag"` - Credentials string `json:"credentials"` - Nonce string `json:"nonce"` + AuthTag string `json:"auth-tag"` + Credentials string `json:"credentials"` + Nonce string `json:"nonce"` + Macaroons []macaroon.Slice `json:"macaroons"` } // LoginRequestCompat holds credentials for identifying an entity to the Login v1 @@ -398,13 +410,13 @@ // SetConstraints stores parameters for making the SetConstraints call. type SetConstraints struct { - ServiceName string //optional, if empty, environment constraints are set. + ServiceName string //optional, if empty, model constraints are set. Constraints constraints.Value } // ResolveCharms stores charm references for a ResolveCharms call. type ResolveCharms struct { - References []charm.Reference + References []charm.URL } // ResolveCharmResult holds the result of resolving a charm reference to a URL, or any error that occurred. @@ -445,10 +457,10 @@ type StateServingInfo struct { APIPort int StatePort int - // The state server cert and corresponding private key. + // The controller cert and corresponding private key. Cert string PrivateKey string - // The private key for the CA cert so that a new state server + // The private key for the CA cert so that a new controller // cert can be generated when needed. CAPrivateKey string // this will be passed as the KeyFile argument to MongoDB @@ -470,7 +482,7 @@ Type instance.ContainerType } -// ContainerManagerConfig contains information from the environment config +// ContainerManagerConfig contains information from the model config // that is needed for configuring the container manager. type ContainerManagerConfig struct { ManagerConfig map[string]string @@ -483,7 +495,7 @@ EnableOSUpgrade bool } -// ContainerConfig contains information from the environment config that is +// ContainerConfig contains information from the model config that is // needed for container cloud-init. type ContainerConfig struct { ProviderType string @@ -526,30 +538,6 @@ APIAddresses []string } -// SetRsyslogCertParams holds parameters for the SetRsyslogCert call. -type SetRsyslogCertParams struct { - CACert []byte - CAKey []byte -} - -// RsyslogConfigResult holds the result of a GetRsyslogConfig call. -type RsyslogConfigResult struct { - Error *Error `json:"Error"` - CACert string `json:"CACert"` - CAKey string `json:"CAKey"` - // Port is only used by state servers as the port to listen on. - // Clients should use HostPorts for the rsyslog addresses to forward - // logs to. - Port int `json:"Port"` - - HostPorts []HostPort `json:"HostPorts"` -} - -// RsyslogConfigResults is the bulk form of RyslogConfigResult -type RsyslogConfigResults struct { - Results []RsyslogConfigResult -} - // JobsResult holds the jobs for a machine that are returned by a call to Jobs. type JobsResult struct { Jobs []multiwatcher.MachineJob `json:"Jobs"` @@ -584,7 +572,7 @@ // LoginResult holds the result of a Login call. type LoginResult struct { Servers [][]HostPort `json:"Servers"` - EnvironTag string `json:"EnvironTag"` + ModelTag string `json:"ModelTag"` LastConnection *time.Time `json:"LastConnection"` Facades []FacadeVersions `json:"Facades"` } @@ -609,69 +597,79 @@ // LoginResultV1 holds the result of an Admin v1 Login call. type LoginResultV1 struct { + // DischargeRequired implies that the login request has failed, and none of + // the other fields are populated. It contains a macaroon which, when + // discharged, will grant access on a subsequent call to Login. + // Note: It is OK to use the Macaroon type here as it is explicitely + // designed to provide stable serialisation of macaroons. It's good + // practice to only use primitives in types that will be serialised, + // however because of the above it is suitable to use the Macaroon type + // here. + DischargeRequired *macaroon.Macaroon `json:"discharge-required,omitempty"` + + // DischargeRequiredReason holds the reason that the above discharge was + // required. + DischargeRequiredReason string `json:"discharge-required-error,omitempty"` + // Servers is the list of API server addresses. - Servers [][]HostPort `json:"servers"` - - // EnvironTag is the tag for the environment that is being connected to. - EnvironTag string `json:"environ-tag"` - - // ServerTag is the tag for the environment that holds the API servers. - // This is the initial environment created when bootstrapping juju. - ServerTag string `json:"server-tag"` - - // ReauthRequest can be used to relay any further authentication handshaking - // required on the part of the client to complete the Login, if any. - ReauthRequest *ReauthRequest `json:"reauth-request,omitempty"` + Servers [][]HostPort `json:"servers,omitempty"` + + // ModelTag is the tag for the model that is being connected to. + ModelTag string `json:"model-tag,omitempty"` + + // ControllerTag is the tag for the model that holds the API servers. + // This is the initial model created when bootstrapping juju. + ControllerTag string `json:"server-tag,omitempty"` // UserInfo describes the authenticated user, if any. UserInfo *AuthUserInfo `json:"user-info,omitempty"` // Facades describes all the available API facade versions to the // authenticated client. - Facades []FacadeVersions `json:"facades"` + Facades []FacadeVersions `json:"facades,omitempty"` // ServerVersion is the string representation of the server version // if the server supports it. ServerVersion string `json:"server-version,omitempty"` } -// StateServersSpec contains arguments for -// the EnsureAvailability client API call. -type StateServersSpec struct { - EnvironTag string - NumStateServers int `json:"num-state-servers"` - Constraints constraints.Value `json:"constraints,omitempty"` - // Series is the series to associate with new state server machines. - // If this is empty, then the environment's default series is used. +// ControllersServersSpec contains arguments for +// the EnableHA client API call. +type ControllersSpec struct { + ModelTag string + NumControllers int `json:"num-controllers"` + Constraints constraints.Value `json:"constraints,omitempty"` + // Series is the series to associate with new controller machines. + // If this is empty, then the model's default series is used. Series string `json:"series,omitempty"` - // Placement defines specific machines to become new state server machines. + // Placement defines specific machines to become new controller machines. Placement []string `json:"placement,omitempty"` } -// StateServersSpecs contains all the arguments -// for the EnsureAvailability API call. -type StateServersSpecs struct { - Specs []StateServersSpec +// ControllersServersSpecs contains all the arguments +// for the EnableHA API call. +type ControllersSpecs struct { + Specs []ControllersSpec } -// StateServersChangeResult contains the results -// of a single EnsureAvailability API call or +// ControllersChangeResult contains the results +// of a single EnableHA API call or // an error. -type StateServersChangeResult struct { - Result StateServersChanges +type ControllersChangeResult struct { + Result ControllersChanges Error *Error } -// StateServersChangeResults contains the results -// of the EnsureAvailability API call. -type StateServersChangeResults struct { - Results []StateServersChangeResult +// ControllersChangeResults contains the results +// of the EnableHA API call. +type ControllersChangeResults struct { + Results []ControllersChangeResult } -// StateServersChanges lists the servers +// ControllersChanges lists the servers // that have been added, removed or maintained in the -// pool as a result of an ensure-availability operation. -type StateServersChanges struct { +// pool as a result of an enable-ha operation. +type ControllersChanges struct { Added []string `json:"added,omitempty"` Maintained []string `json:"maintained,omitempty"` Removed []string `json:"removed,omitempty"` @@ -742,3 +740,96 @@ Result RebootAction `json:"result,omitempty"` Error *Error `json:"error,omitempty"` } + +// LogRecord is used to transmit log messages to the logsink API +// endpoint. Single character field names are used for serialisation +// to keep the size down. These messages are going to be sent a lot. +type LogRecord struct { + Time time.Time `json:"t"` + Module string `json:"m"` + Location string `json:"l"` + Level loggo.Level `json:"v"` + Message string `json:"x"` +} + +// GetBundleChangesParams holds parameters for making GetBundleChanges calls. +type GetBundleChangesParams struct { + // BundleDataYAML is the YAML-encoded charm bundle data + // (see "github.com/juju/charm.BundleData"). + BundleDataYAML string `json:"yaml"` +} + +// GetBundleChangesResults holds results of the GetBundleChanges call. +type GetBundleChangesResults struct { + // Changes holds the list of changes required to deploy the bundle. + // It is omitted if the provided bundle YAML has verification errors. + Changes []*BundleChangesChange `json:"changes,omitempty"` + // Errors holds possible bundle verification errors. + Errors []string `json:"errors,omitempty"` +} + +// BundleChangesChange holds a single change required to deploy a bundle. +type BundleChangesChange struct { + // Id is the unique identifier for this change. + Id string `json:"id"` + // Method is the action to be performed to apply this change. + Method string `json:"method"` + // Args holds a list of arguments to pass to the method. + Args []interface{} `json:"args"` + // Requires holds a list of dependencies for this change. Each dependency + // is represented by the corresponding change id, and must be applied + // before this change is applied. + Requires []string `json:"requires"` +} + +// UpgradeMongoParams holds the arguments required to +// enter upgrade mongo mode. +type UpgradeMongoParams struct { + Target mongo.Version +} + +// HAMember holds information that identifies one member +// of HA. +type HAMember struct { + Tag string + PublicAddress network.Address + Series string +} + +// MongoUpgradeResults holds the results of an attempt +// to enter upgrade mongo mode. +type MongoUpgradeResults struct { + RsMembers []replicaset.Member + Master HAMember + Members []HAMember +} + +// ResumeReplicationParams holds the members of a HA that +// must be resumed. +type ResumeReplicationParams struct { + Members []replicaset.Member +} + +// ModelInfo holds information about the Juju model. +type ModelInfo struct { + DefaultSeries string `json:"DefaultSeries"` + ProviderType string `json:"ProviderType"` + Name string `json:"Name"` + UUID string `json:"UUID"` + // The json name here is as per the older field name and is required + // for backward compatability. The other fields also have explicit + // matching serialization directives for the benefit of being explicit. + ControllerUUID string `json:"ServerUUID"` +} + +// MeterStatusParam holds meter status information to be set for the specified tag. +type MeterStatusParam struct { + Tag string `json:"tag"` + Code string `json:"code"` + Info string `json:"info, omitempty"` +} + +// MeterStatusParams holds parameters for making SetMeterStatus calls. +type MeterStatusParams struct { + Statuses []MeterStatusParam `json:"statues"` +} === modified file 'src/github.com/juju/juju/apiserver/params/params_test.go' --- src/github.com/juju/juju/apiserver/params/params_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/params/params_test.go 2016-03-22 15:18:22 +0000 @@ -9,7 +9,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/constraints" @@ -39,7 +39,7 @@ about: "MachineInfo Delta", value: multiwatcher.Delta{ Entity: &multiwatcher.MachineInfo{ - EnvUUID: "uuid", + ModelUUID: "uuid", Id: "Benji", InstanceId: "Shazam", Status: "error", @@ -47,17 +47,17 @@ Life: multiwatcher.Life("alive"), Series: "trusty", SupportedContainers: []instance.ContainerType{instance.LXC}, - Jobs: []multiwatcher.MachineJob{state.JobManageEnviron.ToParams()}, + Jobs: []multiwatcher.MachineJob{state.JobManageModel.ToParams()}, Addresses: []network.Address{}, HardwareCharacteristics: &instance.HardwareCharacteristics{}, }, }, - json: `["machine","change",{"EnvUUID": "uuid", "Id":"Benji","InstanceId":"Shazam","HasVote":false,"WantsVote":false,"Status":"error","StatusInfo":"foo","StatusData":null,"Life":"alive","Series":"trusty","SupportedContainers":["lxc"],"SupportedContainersKnown":false,"Jobs":["JobManageEnviron"],"Addresses":[],"HardwareCharacteristics":{}}]`, + json: `["machine","change",{"ModelUUID": "uuid", "Id":"Benji","InstanceId":"Shazam","HasVote":false,"WantsVote":false,"Status":"error","StatusInfo":"foo","StatusData":null,"Life":"alive","Series":"trusty","SupportedContainers":["lxc"],"SupportedContainersKnown":false,"Jobs":["JobManageModel"],"Addresses":[],"HardwareCharacteristics":{}}]`, }, { about: "ServiceInfo Delta", value: multiwatcher.Delta{ Entity: &multiwatcher.ServiceInfo{ - EnvUUID: "uuid", + ModelUUID: "uuid", Name: "Benji", Exposed: true, CharmURL: "cs:quantal/name", @@ -75,16 +75,16 @@ }, }, }, - json: `["service","change",{"EnvUUID": "uuid", "CharmURL": "cs:quantal/name","Name":"Benji","Exposed":true,"Life":"dying","OwnerTag":"test-owner","MinUnits":42,"Constraints":{"arch":"armhf", "mem": 1024},"Config": {"hello":"goodbye","foo":false},"Subordinate":false,"Status":{"Current":"active", "Message":"all good", "Version": "", "Err": null, "Data": null, "Since": null}}]`, + json: `["service","change",{"ModelUUID": "uuid", "CharmURL": "cs:quantal/name","Name":"Benji","Exposed":true,"Life":"dying","OwnerTag":"test-owner","MinUnits":42,"Constraints":{"arch":"armhf", "mem": 1024},"Config": {"hello":"goodbye","foo":false},"Subordinate":false,"Status":{"Current":"active", "Message":"all good", "Version": "", "Err": null, "Data": null, "Since": null}}]`, }, { about: "UnitInfo Delta", value: multiwatcher.Delta{ Entity: &multiwatcher.UnitInfo{ - EnvUUID: "uuid", - Name: "Benji", - Service: "Shazam", - Series: "precise", - CharmURL: "cs:~user/precise/wordpress-42", + ModelUUID: "uuid", + Name: "Benji", + Service: "Shazam", + Series: "precise", + CharmURL: "cs:~user/precise/wordpress-42", Ports: []network.Port{{ Protocol: "http", Number: 80, @@ -108,43 +108,43 @@ }, }, }, - json: `["unit", "change", {"EnvUUID": "uuid", "CharmURL": "cs:~user/precise/wordpress-42", "MachineId": "1", "Series": "precise", "Name": "Benji", "PublicAddress": "testing.invalid", "Service": "Shazam", "PrivateAddress": "10.0.0.1", "Ports": [{"Protocol": "http", "Number": 80}], "PortRanges": [{"FromPort": 80, "ToPort": 80, "Protocol": "http"}], "Status": "error", "StatusInfo": "foo", "StatusData": null, "WorkloadStatus":{"Current":"active", "Message":"all good", "Version": "", "Err": null, "Data": null, "Since": null}, "AgentStatus":{"Current":"idle", "Message":"", "Version": "", "Err": null, "Data": null, "Since": null}, "Subordinate": false}]`, + json: `["unit", "change", {"ModelUUID": "uuid", "CharmURL": "cs:~user/precise/wordpress-42", "MachineId": "1", "Series": "precise", "Name": "Benji", "PublicAddress": "testing.invalid", "Service": "Shazam", "PrivateAddress": "10.0.0.1", "Ports": [{"Protocol": "http", "Number": 80}], "PortRanges": [{"FromPort": 80, "ToPort": 80, "Protocol": "http"}], "Status": "error", "StatusInfo": "foo", "StatusData": null, "WorkloadStatus":{"Current":"active", "Message":"all good", "Version": "", "Err": null, "Data": null, "Since": null}, "AgentStatus":{"Current":"idle", "Message":"", "Version": "", "Err": null, "Data": null, "Since": null}, "Subordinate": false}]`, }, { about: "RelationInfo Delta", value: multiwatcher.Delta{ Entity: &multiwatcher.RelationInfo{ - EnvUUID: "uuid", - Key: "Benji", - Id: 4711, + ModelUUID: "uuid", + Key: "Benji", + Id: 4711, Endpoints: []multiwatcher.Endpoint{ {ServiceName: "logging", Relation: charm.Relation{Name: "logging-directory", Role: "requirer", Interface: "logging", Optional: false, Limit: 1, Scope: "container"}}, {ServiceName: "wordpress", Relation: charm.Relation{Name: "logging-dir", Role: "provider", Interface: "logging", Optional: false, Limit: 0, Scope: "container"}}}, }, }, - json: `["relation","change",{"EnvUUID": "uuid", "Key":"Benji", "Id": 4711, "Endpoints": [{"ServiceName":"logging", "Relation":{"Name":"logging-directory", "Role":"requirer", "Interface":"logging", "Optional":false, "Limit":1, "Scope":"container"}}, {"ServiceName":"wordpress", "Relation":{"Name":"logging-dir", "Role":"provider", "Interface":"logging", "Optional":false, "Limit":0, "Scope":"container"}}]}]`, + json: `["relation","change",{"ModelUUID": "uuid", "Key":"Benji", "Id": 4711, "Endpoints": [{"ServiceName":"logging", "Relation":{"Name":"logging-directory", "Role":"requirer", "Interface":"logging", "Optional":false, "Limit":1, "Scope":"container"}}, {"ServiceName":"wordpress", "Relation":{"Name":"logging-dir", "Role":"provider", "Interface":"logging", "Optional":false, "Limit":0, "Scope":"container"}}]}]`, }, { about: "AnnotationInfo Delta", value: multiwatcher.Delta{ Entity: &multiwatcher.AnnotationInfo{ - EnvUUID: "uuid", - Tag: "machine-0", + ModelUUID: "uuid", + Tag: "machine-0", Annotations: map[string]string{ "foo": "bar", "arble": "2 4", }, }, }, - json: `["annotation","change",{"EnvUUID": "uuid", "Tag":"machine-0","Annotations":{"foo":"bar","arble":"2 4"}}]`, + json: `["annotation","change",{"ModelUUID": "uuid", "Tag":"machine-0","Annotations":{"foo":"bar","arble":"2 4"}}]`, }, { about: "Delta Removed True", value: multiwatcher.Delta{ Removed: true, Entity: &multiwatcher.RelationInfo{ - EnvUUID: "uuid", - Key: "Benji", + ModelUUID: "uuid", + Key: "Benji", }, }, - json: `["relation","remove",{"EnvUUID": "uuid", "Key":"Benji", "Id": 0, "Endpoints": null}]`, + json: `["relation","remove",{"ModelUUID": "uuid", "Key":"Benji", "Id": 0, "Endpoints": null}]`, }} func (s *MarshalSuite) TestDeltaMarshalJSON(c *gc.C) { === added file 'src/github.com/juju/juju/apiserver/params/registration.go' --- src/github.com/juju/juju/apiserver/params/registration.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/params/registration.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,56 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package params + +// SecretKeyLoginRequest contains the parameters for completing +// the registration of a user. The request contains the tag of +// the user, and an encrypted and authenticated payload that +// proves that the requester has a secret key recorded on the +// controller. +type SecretKeyLoginRequest struct { + // User is the tag-representation of the user that the + // requester wishes to authenticate as. + User string `json:"user"` + + // Nonce is the nonce used by the client to encrypt + // and authenticate PayloadCiphertext. + Nonce []byte `json:"nonce"` + + // PayloadCiphertext is the encrypted and authenticated + // payload. The payload is encrypted/authenticated using + // NaCl Secretbox. + PayloadCiphertext []byte `json:"ciphertext"` +} + +// SecretKeyLoginRequestPayload is JSON-encoded and then encrypted +// and authenticated with the NaCl Secretbox algorithm. +type SecretKeyLoginRequestPayload struct { + // Password is the new password to set for the user. + Password string `json:"password"` +} + +// SecretKeyLoginResponse contains the result of completing a user +// registration. This contains an encrypted and authenticated payload, +// containing the information necessary to securely log into the +// controller via the standard password authentication method. +type SecretKeyLoginResponse struct { + // Nonce is the nonce used by the server to encrypt and + // authenticate PayloadCiphertext. + Nonce []byte `json:"nonce"` + + // PayloadCiphertext is the encrypted and authenticated + // payload, which is a JSON-encoded SecretKeyLoginResponsePayload. + PayloadCiphertext []byte `json:"ciphertext"` +} + +// SecretKeyLoginResponsePayload is JSON-encoded and then encrypted +// and authenticated with the NaCl Secretbox algorithm. +type SecretKeyLoginResponsePayload struct { + // CACert is the CA certificate, required to establish a secure + // TLS connection to the Juju controller + CACert string `json:"ca-cert"` + + // ControllerUUID is the UUID of the Juju controller. + ControllerUUID string `json:"controller-uuid"` +} === added file 'src/github.com/juju/juju/apiserver/params/retrystrategy.go' --- src/github.com/juju/juju/apiserver/params/retrystrategy.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/params/retrystrategy.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,30 @@ +// Copyright 2016 Canonical Ltd. +// Copyright 2016 Cloudbase Solutions +// Licensed under the AGPLv3, see LICENCE file for details. + +package params + +import ( + "time" +) + +// RetryStrategy holds the necessary information to configure retries. +type RetryStrategy struct { + ShouldRetry bool + MinRetryTime time.Duration + MaxRetryTime time.Duration + JitterRetryTime bool + RetryTimeFactor int64 +} + +// RetryStrategyResult holds a RetryStrategy or an error. +type RetryStrategyResult struct { + Error *Error + Result *RetryStrategy +} + +// RetryStrategyResults holds the bulk operation result of an API call +// that returns a RetryStrategy or an error. +type RetryStrategyResults struct { + Results []RetryStrategyResult +} === modified file 'src/github.com/juju/juju/apiserver/params/status.go' --- src/github.com/juju/juju/apiserver/params/status.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/params/status.go 2016-03-22 15:18:22 +0000 @@ -8,7 +8,7 @@ import ( "time" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/instance" "github.com/juju/juju/network" @@ -22,9 +22,9 @@ // TODO(ericsnow) Add FullStatusResult. -// Status holds information about the status of a juju environment. +// FullStatus holds information about the status of a juju model. type FullStatus struct { - EnvironmentName string + ModelName string AvailableVersion string Machines map[string]MachineStatus Services map[string]ServiceStatus @@ -36,16 +36,6 @@ type MachineStatus struct { Agent AgentStatus - // The following fields mirror fields in AgentStatus (introduced - // in 1.19.x). The old fields below are being kept for - // compatibility with old clients. - // They can be removed once API versioning lands. - AgentState Status - AgentStateInfo string - AgentVersion string - Life string - Err error - DNSName string InstanceId instance.Id InstanceState string @@ -155,7 +145,7 @@ Err error } -// LegacyStatus holds minimal information on the status of a juju environment. +// LegacyStatus holds minimal information on the status of a juju model. type LegacyStatus struct { Machines map[string]LegacyMachineStatus } @@ -181,6 +171,22 @@ Statuses []AgentStatus } +const ( + // DefaultMaxLogsPerEntity is the default value for logs for each entity + // that should be kept at any given time. + DefaultMaxLogsPerEntity = 100 + + // DefaultPruneInterval is the default interval that should be waited + // between prune calls. + DefaultPruneInterval = 5 * time.Minute +) + +// StatusHistoryPruneArgs holds arguments for status history +// prunning process. +type StatusHistoryPruneArgs struct { + MaxLogsPerEntity int +} + // StatusResult holds an entity status, extra information, or an // error. type StatusResult struct { @@ -210,11 +216,16 @@ Results []ServiceStatusResult } +// HistoryKind represents the possible types of +// status history entries. type HistoryKind string const ( + // KindCombined represents all possible kinds. KindCombined HistoryKind = "combined" - KindAgent HistoryKind = "agent" + // KindAgent represent a unit agent status history entry. + KindAgent HistoryKind = "agent" + // KindWorkload represents a charm workload status history entry. KindWorkload HistoryKind = "workload" ) @@ -234,11 +245,12 @@ const ( // Status values common to machine and unit agents. - // The entity requires human intervention in order to operate - // correctly. + // StatusError means the entity requires human intervention + // in order to operate correctly. StatusError Status = "error" - // The entity is actively participating in the environment. + // StatusStarted is set when: + // The entity is actively participating in the model. // For unit agents, this is a state we preserve for backwards // compatibility with scripts during the life of Juju 1.x. // In Juju 2.x, the agent-state will remain “active†and scripts @@ -249,13 +261,16 @@ const ( // Status values specific to machine agents. - // The machine is not yet participating in the environment. + // StatusPending is set when: + // The machine is not yet participating in the model. StatusPending Status = "pending" + // StatusStopped is set when: // The machine's agent will perform no further action, other than // to set the unit to Dead at a suitable moment. StatusStopped Status = "stopped" + // StatusDown is set when: // The machine ought to be signalling activity, but it cannot be // detected. StatusDown Status = "down" @@ -264,66 +279,70 @@ const ( // Status values specific to unit agents. + // StatusAllocating is set when: // The machine on which a unit is to be hosted is still being // spun up in the cloud. StatusAllocating Status = "allocating" + // StatusRebooting is set when: // The machine on which this agent is running is being rebooted. // The juju-agent should move from rebooting to idle when the reboot is complete. StatusRebooting Status = "rebooting" + // StatusExecuting is set when: // The agent is running a hook or action. The human-readable message should reflect // which hook or action is being run. StatusExecuting Status = "executing" + // StatusIdle is set when: // Once the agent is installed and running it will notify the Juju server and its state // becomes "idle". It will stay "idle" until some action (e.g. it needs to run a hook) or // error (e.g it loses contact with the Juju server) moves it to a different state. StatusIdle Status = "idle" + // StatusFailed is set when: // The unit agent has failed in some way,eg the agent ought to be signalling // activity, but it cannot be detected. It might also be that the unit agent // detected an unrecoverable condition and managed to tell the Juju server about it. StatusFailed Status = "failed" + // StatusLost is set when: // The juju agent has has not communicated with the juju server for an unexpectedly long time; // the unit agent ought to be signalling activity, but none has been detected. StatusLost Status = "lost" - - // ---- Outdated ---- - // The unit agent is downloading the charm and running the install hook. - StatusInstalling Status = "installing" - - // The unit is being destroyed; the agent will soon mark the unit as “deadâ€. - // In Juju 2.x this will describe the state of the agent rather than a unit. - StatusStopping Status = "stopping" ) const ( // Status values specific to services and units, reflecting the // state of the software itself. + // StatusMaintenance is set when: // The unit is not yet providing services, but is actively doing stuff // in preparation for providing those services. // This is a "spinning" state, not an error state. // It reflects activity on the unit itself, not on peers or related units. StatusMaintenance Status = "maintenance" + // StatusTerminated is set when: // This unit used to exist, we have a record of it (perhaps because of storage // allocated for it that was flagged to survive it). Nonetheless, it is now gone. StatusTerminated Status = "terminated" + // StatusUnknown is set when: // A unit-agent has finished calling install, config-changed, and start, // but the charm has not called status-set yet. StatusUnknown Status = "unknown" + // StatusWaiting is set when: // The unit is unable to progress to an active state because a service to // which it is related is not running. StatusWaiting Status = "waiting" + // StatusBlocked is set when: // The unit needs manual intervention to get back to the Running state. StatusBlocked Status = "blocked" + // StatusActive is set when: // The unit believes it is correctly offering all the services it has // been asked to offer. StatusActive Status = "active" === modified file 'src/github.com/juju/juju/apiserver/params/storage.go' --- src/github.com/juju/juju/apiserver/params/storage.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/params/storage.go 2016-03-22 15:18:22 +0000 @@ -162,13 +162,13 @@ Ids []MachineStorageId `json:"ids"` } -// Volume identifies and describes a storage volume in the environment. +// Volume identifies and describes a storage volume in the model. type Volume struct { VolumeTag string `json:"volumetag"` Info VolumeInfo `json:"info"` } -// Volume describes a storage volume in the environment. +// Volume describes a storage volume in the model. type VolumeInfo struct { VolumeId string `json:"volumeid"` HardwareId string `json:"hardwareid,omitempty"` @@ -177,7 +177,7 @@ Persistent bool `json:"persistent"` } -// Volumes describes a set of storage volumes in the environment. +// Volumes describes a set of storage volumes in the model. type Volumes struct { Volumes []Volume `json:"volumes"` } @@ -283,21 +283,21 @@ Results []VolumeAttachmentParamsResult `json:"results,omitempty"` } -// Filesystem identifies and describes a storage filesystem in the environment. +// Filesystem identifies and describes a storage filesystem in the model. type Filesystem struct { FilesystemTag string `json:"filesystemtag"` VolumeTag string `json:"volumetag,omitempty"` Info FilesystemInfo `json:"info"` } -// Filesystem describes a storage filesystem in the environment. +// Filesystem describes a storage filesystem in the model. type FilesystemInfo struct { FilesystemId string `json:"filesystemid"` // Size is the size of the filesystem in MiB. Size uint64 `json:"size"` } -// Filesystems describes a set of storage filesystems in the environment. +// Filesystems describes a set of storage filesystems in the model. type Filesystems struct { Filesystems []Filesystem `json:"filesystems"` } @@ -414,39 +414,22 @@ Attachments map[string]StorageAttachmentDetails `json:"attachments,omitempty"` } -// LegacyStorageDetails holds information about storage. -// -// NOTE(axw): this is for backwards compatibility only. This struct -// should not be changed! -type LegacyStorageDetails struct { - // StorageTag holds tag for this storage. - StorageTag string `json:"storagetag"` - - // OwnerTag holds tag for the owner of this storage, unit or service. - OwnerTag string `json:"ownertag"` - - // Kind holds what kind of storage this instance is. - Kind StorageKind `json:"kind"` - - // Status indicates storage status, e.g. pending, provisioned, attached. - Status string `json:"status,omitempty"` - - // UnitTag holds tag for unit for attached instances. - UnitTag string `json:"unittag,omitempty"` - - // Location holds location for provisioned attached instances. - Location string `json:"location,omitempty"` - - // Persistent indicates whether the storage is persistent or not. - Persistent bool `json:"persistent"` +// StorageFilter holds filter terms for listing storage details. +type StorageFilter struct { + // We don't currently implement any filters. This exists to get the + // API structure right, and so we can add filters later as necessary. +} + +// StorageFilters holds a set of storage filters. +type StorageFilters struct { + Filters []StorageFilter `json:"filters,omitempty"` } // StorageDetailsResult holds information about a storage instance // or error related to its retrieval. type StorageDetailsResult struct { - Result *StorageDetails `json:"details,omitempty"` - Legacy LegacyStorageDetails `json:"result"` - Error *Error `json:"error,omitempty"` + Result *StorageDetails `json:"result,omitempty"` + Error *Error `json:"error,omitempty"` } // StorageDetailsResults holds results for storage details or related storage error. @@ -454,6 +437,17 @@ Results []StorageDetailsResult `json:"results,omitempty"` } +// StorageDetailsListResult holds a collection of storage details. +type StorageDetailsListResult struct { + Result []StorageDetails `json:"result,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// StorageDetailsListResults holds a collection of collections of storage details. +type StorageDetailsListResults struct { + Results []StorageDetailsListResult `json:"results,omitempty"` +} + // StorageAttachmentDetails holds detailed information about a storage attachment. type StorageAttachmentDetails struct { // StorageTag is the tag of the storage instance. @@ -472,7 +466,6 @@ // StoragePool holds data for a pool instance. type StoragePool struct { - // Name is the pool's name. Name string `json:"name"` @@ -483,9 +476,8 @@ Attrs map[string]interface{} `json:"attrs"` } -// StoragePoolFilter holds a filter for pool API call. +// StoragePoolFilter holds a filter for matching storage pools. type StoragePoolFilter struct { - // Names are pool's names to filter on. Names []string `json:"names,omitempty"` @@ -493,9 +485,20 @@ Providers []string `json:"providers,omitempty"` } -// StoragePoolsResult holds a collection of pool instances. +// StoragePoolFilters holds a collection of storage pool filters. +type StoragePoolFilters struct { + Filters []StoragePoolFilter `json:"filters,omitempty"` +} + +// StoragePoolsResult holds a collection of storage pools. type StoragePoolsResult struct { - Results []StoragePool `json:"results,omitempty"` + Result []StoragePool `json:"storagepools,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// StoragePoolsResults holds a collection of storage pools results. +type StoragePoolsResults struct { + Results []StoragePoolsResult `json:"results,omitempty"` } // VolumeFilter holds a filter for volume list API call. @@ -509,7 +512,28 @@ return len(f.Machines) == 0 } -// VolumeDetails describes a storage volume in the environment +// VolumeFilters holds a collection of volume filters. +type VolumeFilters struct { + Filters []VolumeFilter `json:"filters,omitempty"` +} + +// FilesystemFilter holds a filter for filter list API call. +type FilesystemFilter struct { + // Machines are machine tags to filter on. + Machines []string `json:"machines,omitempty"` +} + +// IsEmpty determines if filter is empty +func (f *FilesystemFilter) IsEmpty() bool { + return len(f.Machines) == 0 +} + +// FilesystemFilters holds a collection of filesystem filters. +type FilesystemFilters struct { + Filters []FilesystemFilter `json:"filters,omitempty"` +} + +// VolumeDetails describes a storage volume in the model // for the purpose of volume CLI commands. // // This is kept separate from Volume which contains only information @@ -517,7 +541,6 @@ // to contain complete information about a volume and related // information (status, attachments, storage). type VolumeDetails struct { - // VolumeTag is the tag for the volume. VolumeTag string `json:"volumetag"` @@ -536,67 +559,11 @@ Storage *StorageDetails `json:"storage,omitempty"` } -// LegacyVolumeDetails describes a storage volume in the environment -// for the purpose of volume CLI commands. -// -// This is kept separate from Volume which contains only information -// specific to the volume model, whereas LegacyVolumeDetails is intended -// to contain complete information about a volume. -// -// NOTE(axw): this is for backwards compatibility only. This struct -// should not be changed! -type LegacyVolumeDetails struct { - - // VolumeTag is tag for this volume instance. - VolumeTag string `json:"volumetag"` - - // StorageInstance returns the tag of the storage instance that this - // volume is assigned to, if any. - StorageTag string `json:"storage,omitempty"` - - // UnitTag is the tag of the unit attached to storage instance - // for this volume. - UnitTag string `json:"unit,omitempty"` - - // VolumeId is a unique provider-supplied ID for the volume. - VolumeId string `json:"volumeid,omitempty"` - - // HardwareId is the volume's hardware ID. - HardwareId string `json:"hardwareid,omitempty"` - - // Size is the size of the volume in MiB. - Size uint64 `json:"size,omitempty"` - - // Persistent reflects whether the volume is destroyed with the - // machine to which it is attached. - Persistent bool `json:"persistent"` - - // Status contains the current status of the volume. - Status EntityStatus `json:"status"` -} - // VolumeDetailsResult contains details about a volume, its attachments or // an error preventing retrieving those details. type VolumeDetailsResult struct { - - // Details describes the volume in detail. - Details *VolumeDetails `json:"details,omitempty"` - - // LegacyVolume describes the volume in detail. - // - // NOTE(axw): VolumeDetails contains redundant and nonsensical - // information. Use Details if it is available, and only use - // this for backwards-compatibility. - LegacyVolume *LegacyVolumeDetails `json:"volume,omitempty"` - - // LegacyAttachments describes the attachments of the volume to - // machines. - // - // NOTE(axw): this should have gone into VolumeDetails, but it's too - // late for that now. We'll continue to populate it, and use it - // if it's defined but Volume.Attachments is not. Please do not - // copy this structure. - LegacyAttachments []VolumeAttachment `json:"attachments,omitempty"` + // Result describes the volume in detail. + Result *VolumeDetails `json:"details,omitempty"` // Error contains volume retrieval error. Error *Error `json:"error,omitempty"` @@ -607,6 +574,71 @@ Results []VolumeDetailsResult `json:"results,omitempty"` } +// VolumeDetailsListResult holds a collection of volume details. +type VolumeDetailsListResult struct { + Result []VolumeDetails `json:"result,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// VolumeDetailsListResults holds a collection of collections of volume details. +type VolumeDetailsListResults struct { + Results []VolumeDetailsListResult `json:"results,omitempty"` +} + +// FilesystemDetails describes a storage filesystem in the model +// for the purpose of filesystem CLI commands. +// +// This is kept separate from Filesystem which contains only information +// specific to the filesystem model, whereas FilesystemDetails is intended +// to contain complete information about a filesystem and related +// information (status, attachments, storage). +type FilesystemDetails struct { + // FilesystemTag is the tag for the filesystem. + FilesystemTag string `json:"filesystemtag"` + + // VolumeTag is the tag for the volume backing the + // filesystem, if any. + VolumeTag string `json:"volumetag,omitempty"` + + // Info contains information about the filesystem. + Info FilesystemInfo `json:"info"` + + // Status contains the status of the filesystem. + Status EntityStatus `json:"status"` + + // MachineAttachments contains a mapping from + // machine tag to filesystem attachment information. + MachineAttachments map[string]FilesystemAttachmentInfo `json:"machineattachments,omitempty"` + + // Storage contains details about the storage instance + // that the volume is assigned to, if any. + Storage *StorageDetails `json:"storage,omitempty"` +} + +// FilesystemDetailsResult contains details about a filesystem, its attachments or +// an error preventing retrieving those details. +type FilesystemDetailsResult struct { + Result *FilesystemDetails `json:"result,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// FilesystemDetailsResults holds filesystem details. +type FilesystemDetailsResults struct { + Results []FilesystemDetailsResult `json:"results,omitempty"` +} + +// FilesystemDetailsListResult holds a collection of filesystem details. +type FilesystemDetailsListResult struct { + Result []FilesystemDetails `json:"result,omitempty"` + Error *Error `json:"error,omitempty"` +} + +// FilesystemDetailsListResults holds a collection of collections of +// filesystem details. +type FilesystemDetailsListResults struct { + Results []FilesystemDetailsListResult `json:"results,omitempty"` +} + // StorageConstraints contains constraints for storage instance. type StorageConstraints struct { // Pool is the name of the storage pool from which to provision the === modified file 'src/github.com/juju/juju/apiserver/params/systemmanager.go' --- src/github.com/juju/juju/apiserver/params/systemmanager.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/params/systemmanager.go 2016-03-22 15:18:22 +0000 @@ -3,36 +3,46 @@ package params -// DestroySystemArgs holds the arguments for destroying a system. -type DestroySystemArgs struct { - // DestroyEnvironments specifies whether or not the hosted environments +// DestroyControllerArgs holds the arguments for destroying a controller. +type DestroyControllerArgs struct { + // DestroyModels specifies whether or not the hosted models // should be destroyed as well. If this is not specified, and there are - // other hosted environments, the destruction of the system will fail. - DestroyEnvironments bool `json:"destroy-environments"` - - // IgnoreBlocks specifies whether or not to ignore blocks - // on hosted environments. - IgnoreBlocks bool `json:"ignore-blocks"` + // other hosted models, the destruction of the controller will fail. + DestroyModels bool `json:"destroy-models"` } -// EnvironmentBlockInfo holds information about an environment and its +// ModelBlockInfo holds information about an model and its // current blocks. -type EnvironmentBlockInfo struct { +type ModelBlockInfo struct { Name string `json:"name"` - UUID string `json:"env-uuid"` + UUID string `json:"model-uuid"` OwnerTag string `json:"owner-tag"` Blocks []string `json:"blocks"` } -// EnvironmentBlockInfoList holds information about the blocked environments -// for a system. -type EnvironmentBlockInfoList struct { - Environments []EnvironmentBlockInfo `json:"environments,omitempty"` +// ModelBlockInfoList holds information about the blocked models +// for a controller. +type ModelBlockInfoList struct { + Models []ModelBlockInfo `json:"models,omitempty"` } // RemoveBlocksArgs holds the arguments for the RemoveBlocks command. It is a // struct to facilitate the easy addition of being able to remove blocks for -// individual environments at a later date. +// individual models at a later date. type RemoveBlocksArgs struct { All bool `json:"all"` } + +// ModelStatus holds information about the status of a juju model. +type ModelStatus struct { + ModelTag string `json:"model-tag"` + Life Life `json:"life"` + HostedMachineCount int `json:"hosted-machine-count"` + ServiceCount int `json:"service-count"` + OwnerTag string `json:"owner-tag"` +} + +// ModelStatusResults holds status information about a group of models. +type ModelStatusResults struct { + Results []ModelStatus `json:"models"` +} === added file 'src/github.com/juju/juju/apiserver/params/undertaker.go' --- src/github.com/juju/juju/apiserver/params/undertaker.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/params/undertaker.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,25 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package params + +import ( + "time" +) + +// UndertakerModelInfo returns information on an model needed by the undertaker worker. +type UndertakerModelInfo struct { + UUID string + Name string + GlobalName string + IsSystem bool + Life Life + TimeOfDeath *time.Time +} + +// UndertakerModelInfoResult holds the result of an API call that returns an +// UndertakerModelInfoResult or an error. +type UndertakerModelInfoResult struct { + Error *Error + Result UndertakerModelInfo +} === modified file 'src/github.com/juju/juju/apiserver/params/usermanager.go' --- src/github.com/juju/juju/apiserver/params/usermanager.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/params/usermanager.go 2016-03-22 15:18:22 +0000 @@ -42,9 +42,16 @@ // AddUser stores the parameters to add one user. type AddUser struct { - Username string `json:"username"` - DisplayName string `json:"display-name"` - Password string `json:"password"` + Username string `json:"username"` + DisplayName string `json:"display-name"` + SharedModelTags []string `json:"shared-model-tags"` + + // Password is optional. If it is empty, then + // a secret key will be generated for the user + // and returned in AddUserResult. It will not + // be possible to login with a password until + // registration with the secret key is completed. + Password string `json:"password,omitempty"` } // AddUserResults holds the results of the bulk AddUser API call. @@ -52,8 +59,11 @@ Results []AddUserResult `json:"results"` } -// AddUserResult returns the tag of the newly created user, or an error. +// AddUserResult returns the tag of the newly created user +// and the secret key required to complete registration, +// or an error. type AddUserResult struct { - Tag string `json:"tag,omitempty"` - Error *Error `json:"error,omitempty"` + Tag string `json:"tag,omitempty"` + SecretKey []byte `json:"secret-key,omitempty"` + Error *Error `json:"error,omitempty"` } === modified file 'src/github.com/juju/juju/apiserver/pinger.go' --- src/github.com/juju/juju/apiserver/pinger.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/pinger.go 2016-03-22 15:18:22 +0000 @@ -14,7 +14,7 @@ ) func init() { - common.RegisterStandardFacade("Pinger", 0, NewPinger) + common.RegisterStandardFacade("Pinger", 1, NewPinger) } // NewPinger returns an object that can be pinged by calling its Ping method. === modified file 'src/github.com/juju/juju/apiserver/pinger_test.go' --- src/github.com/juju/juju/apiserver/pinger_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/pinger_test.go 2016-03-22 15:18:22 +0000 @@ -11,6 +11,7 @@ import ( "time" + "github.com/juju/errors" "github.com/juju/loggo" gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" @@ -67,7 +68,7 @@ err = st.Close() c.Assert(err, jc.ErrorIsNil) err = st.Ping() - c.Assert(err, gc.Equals, rpc.ErrShutdown) + c.Assert(errors.Cause(err), gc.Equals, rpc.ErrShutdown) // Make sure that ping messages have not been logged. for _, m := range tw.Log() { === modified file 'src/github.com/juju/juju/apiserver/provisioner/container_test.go' --- src/github.com/juju/juju/apiserver/provisioner/container_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/provisioner/container_test.go 2016-03-22 15:18:22 +0000 @@ -4,7 +4,6 @@ package provisioner_test import ( - "encoding/hex" "fmt" "strings" @@ -30,6 +29,8 @@ provAPI *provisioner.ProvisionerAPI } +const regexpMACAddress = "([a-f0-9]{2}:){5}[a-f0-9]{2}" + func (s *containerSuite) SetUpTest(c *gc.C) { s.setUpTest(c, false) // Reset any "broken" dummy provider methods. @@ -152,13 +153,11 @@ c.Assert(cfg[j].Address, gc.Matches, rex) expectResults.Results[i].Config[j].Address = cfg[j].Address } - macAddress := cfg[j].MACAddress - c.Assert(macAddress[:8], gc.Equals, provisioner.MACAddressTemplate[:8]) - remainder := strings.Replace(macAddress[8:], ":", "", 3) - c.Assert(remainder, gc.HasLen, 6) - _, err = hex.DecodeString(remainder) - c.Assert(err, jc.ErrorIsNil) - expectResults.Results[i].Config[j].MACAddress = macAddress + if strings.HasPrefix(expCfg.MACAddress, "regex:") { + rex := strings.TrimPrefix(expCfg.MACAddress, "regex:") + c.Assert(cfg[j].MACAddress, gc.Matches, rex) + expectResults.Results[i].Config[j].MACAddress = cfg[j].MACAddress + } } } @@ -177,13 +176,66 @@ return err, tw.Log() } -func (s *prepareSuite) TestErrorWitnNoFeatureFlag(c *gc.C) { +func (s *prepareSuite) TestErrorWithNoFeatureFlag(c *gc.C) { s.SetFeatureFlags() // clear the flags. container := s.newAPI(c, true, true) args := s.makeArgs(container) - s.assertCall(c, args, ¶ms.MachineNetworkConfigResults{}, - `address allocation not supported`, - ) + expectedError := ¶ms.Error{ + Message: `failed to allocate an address for "0/lxc/0": address allocation not supported`, + Code: params.CodeNotSupported, + } + s.assertCall(c, args, ¶ms.MachineNetworkConfigResults{ + Results: []params.MachineNetworkConfigResult{ + {Error: expectedError}, + }, + }, "") +} + +func (s *prepareSuite) TestErrorWithNoFeatureFlagAndBrokenAllocate(c *gc.C) { + s.breakEnvironMethods(c, "AllocateAddress") + s.SetFeatureFlags() + // Use the special "i-alloc-" prefix to force the dummy provider to allow + // AllocateAddress to run without the feature flag. + container := s.newCustomAPI(c, "i-alloc-me", true, false) + args := s.makeArgs(container) + expectedError := ¶ms.Error{ + Message: `failed to allocate an address for "0/lxc/0": dummy.AllocateAddress is broken`, + } + s.assertCall(c, args, ¶ms.MachineNetworkConfigResults{ + Results: []params.MachineNetworkConfigResult{ + {Error: expectedError}, + }, + }, "") +} + +func (s *prepareSuite) TestErrorWithNoFeatureFlagAllocateSuccess(c *gc.C) { + s.SetFeatureFlags() + s.breakEnvironMethods(c) + // Use the special "i-alloc-" prefix to force the dummy provider to allow + // AllocateAddress to run without the feature flag, which simulates a MAAS + // 1.8+ environment where without the flag we still try calling + // AllocateAddress for the device we created for the container. + container := s.newCustomAPI(c, "i-alloc-me", true, false) + args := s.makeArgs(container) + _, testLog := s.assertCall(c, args, s.makeResults([]params.NetworkConfig{{ + DeviceIndex: 0, + NetworkName: "juju-private", + ProviderId: "dummy-eth0", + InterfaceName: "eth0", + DNSServers: []string{"ns1.dummy", "ns2.dummy"}, + GatewayAddress: "0.10.0.1", + ConfigType: "static", + MACAddress: "regex:" + regexpMACAddress, + Address: "regex:0.10.0.[0-9]{1,3}", // we don't care about the actual value. + }}), "") + + c.Assert(testLog, jc.LogMatches, jc.SimpleMessages{{ + loggo.INFO, + `allocated address ".+" on instance "i-alloc-me" for container "juju-machine-0-lxc-0"`, + }, { + loggo.INFO, + `assigned address ".+" to container "0/lxc/0"`, + }}) } func (s *prepareSuite) TestErrorWithNonProvisionedHost(c *gc.C) { @@ -419,8 +471,9 @@ // are called along with the addresses to verify the logs later. var allocAttemptedAddrs, allocAddrsOK, setAddrs, releasedAddrs []string s.PatchValue(provisioner.AllocateAddrTo, func(ip *state.IPAddress, m *state.Machine, mac string) error { - c.Logf("allocateAddrTo called for address %q, machine %q", ip.String(), m) + c.Logf("allocateAddrTo called for address %q, machine %q, mac %q", ip.String(), m, mac) c.Assert(m.Id(), gc.Equals, container.Id()) + c.Assert(mac, gc.Matches, regexpMACAddress) allocAttemptedAddrs = append(allocAttemptedAddrs, ip.Value()) // Succeed on every other call to give a chance to call @@ -523,6 +576,7 @@ DeviceIndex: 0, InterfaceName: "eth0", VLANTag: 0, + MACAddress: "regex:" + regexpMACAddress, Disabled: false, NoAutoStart: false, ConfigType: "static", @@ -604,6 +658,7 @@ DeviceIndex: 0, InterfaceName: "eth0", VLANTag: 0, + MACAddress: "regex:" + regexpMACAddress, Disabled: false, NoAutoStart: false, ConfigType: "static", @@ -640,6 +695,7 @@ DeviceIndex: 1, InterfaceName: "eth1", VLANTag: 1, + MACAddress: "regex:" + regexpMACAddress, Disabled: false, NoAutoStart: true, ConfigType: "static", @@ -713,9 +769,12 @@ s.SetFeatureFlags() // clear the flags. s.newAPI(c, true, false) args := s.makeArgs(s.machines[0]) - s.assertCall(c, args, ¶ms.ErrorResults{}, - "address allocation not supported", - ) + expectedError := `cannot mark addresses for removal for "machine-0": not a container` + s.assertCall(c, args, ¶ms.ErrorResults{ + Results: []params.ErrorResult{{ + Error: apiservertesting.ServerError(expectedError), + }}, + }, "") } func (s *releaseSuite) TestErrorWithHostInsteadOfContainer(c *gc.C) { === added file 'src/github.com/juju/juju/apiserver/provisioner/imagemetadata_test.go' --- src/github.com/juju/juju/apiserver/provisioner/imagemetadata_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/provisioner/imagemetadata_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,286 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package provisioner_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/apiserver/provisioner" + "github.com/juju/juju/environs/imagemetadata" + imagetesting "github.com/juju/juju/environs/imagemetadata/testing" + sstesting "github.com/juju/juju/environs/simplestreams/testing" + "github.com/juju/juju/state/cloudimagemetadata" +) + +// useTestImageData causes the given content to be served when published metadata is requested. +func useTestImageData(c *gc.C, files map[string]string) { + if files != nil { + sstesting.SetRoundTripperFiles(sstesting.AddSignedFiles(c, files), nil) + } else { + sstesting.SetRoundTripperFiles(nil, nil) + } +} + +type ImageMetadataSuite struct { + provisionerSuite +} + +var _ = gc.Suite(&ImageMetadataSuite{}) + +func (s *ImageMetadataSuite) SetUpSuite(c *gc.C) { + s.provisionerSuite.SetUpSuite(c) + + // Make sure that there is nothing in data sources. + // Each individual tests will decide if it needs metadata there. + imagetesting.PatchOfficialDataSources(&s.CleanupSuite, "test:/daily") + s.PatchValue(&imagemetadata.SimplestreamsImagesPublicKey, sstesting.SignedMetadataPublicKey) + useTestImageData(c, nil) +} + +func (s *ImageMetadataSuite) TearDownSuite(c *gc.C) { + useTestImageData(c, nil) + s.provisionerSuite.TearDownSuite(c) +} + +func (s *ImageMetadataSuite) SetUpTest(c *gc.C) { + s.provisionerSuite.SetUpTest(c) +} + +func (s *ImageMetadataSuite) TestMetadataNone(c *gc.C) { + api, err := provisioner.NewProvisionerAPI(s.State, s.resources, s.authorizer) + c.Assert(err, jc.ErrorIsNil) + + result, err := api.ProvisioningInfo(s.getTestMachinesTags(c)) + c.Assert(err, jc.ErrorIsNil) + + expected := make([][]params.CloudImageMetadata, len(s.machines)) + for i, _ := range result.Results { + expected[i] = nil + } + s.assertImageMetadataResults(c, result, expected...) +} + +func (s *ImageMetadataSuite) TestMetadataNotInStateButInDataSources(c *gc.C) { + // ensure metadata in data sources and not in state + useTestImageData(c, testImagesData) + + criteria := cloudimagemetadata.MetadataFilter{Stream: "daily"} + found, err := s.State.CloudImageMetadataStorage.FindMetadata(criteria) + c.Assert(errors.IsNotFound(err), jc.IsTrue) + c.Assert(found, gc.HasLen, 0) + + api, err := provisioner.NewProvisionerAPI(s.State, s.resources, s.authorizer) + c.Assert(err, jc.ErrorIsNil) + + result, err := api.ProvisioningInfo(s.getTestMachinesTags(c)) + c.Assert(err, jc.ErrorIsNil) + + expected := s.expectedDataSoureImageMetadata() + s.assertImageMetadataResults(c, result, expected...) + + // Also make sure that these images metadata has been written to state for re-use + saved, err := s.State.CloudImageMetadataStorage.FindMetadata(criteria) + c.Assert(err, jc.ErrorIsNil) + c.Assert(saved, gc.DeepEquals, map[string][]cloudimagemetadata.Metadata{ + "default cloud images": s.convertCloudImageMetadata(expected[0]), + }) +} + +func (s *ImageMetadataSuite) TestMetadataFromState(c *gc.C) { + api, err := provisioner.NewProvisionerAPI(s.State, s.resources, s.authorizer) + c.Assert(err, jc.ErrorIsNil) + + expected := s.expectedDataSoureImageMetadata() + + // Write metadata to state. + metadata := s.convertCloudImageMetadata(expected[0]) + for _, m := range metadata { + err := s.State.CloudImageMetadataStorage.SaveMetadata( + []cloudimagemetadata.Metadata{m}, + ) + c.Assert(err, jc.ErrorIsNil) + } + + result, err := api.ProvisioningInfo(s.getTestMachinesTags(c)) + c.Assert(err, jc.ErrorIsNil) + + s.assertImageMetadataResults(c, result, expected...) +} + +func (s *ImageMetadataSuite) getTestMachinesTags(c *gc.C) params.Entities { + + testMachines := make([]params.Entity, len(s.machines)) + + for i, m := range s.machines { + testMachines[i] = params.Entity{Tag: m.Tag().String()} + } + return params.Entities{Entities: testMachines} +} + +func (s *ImageMetadataSuite) convertCloudImageMetadata(all []params.CloudImageMetadata) []cloudimagemetadata.Metadata { + expected := make([]cloudimagemetadata.Metadata, len(all)) + for i, one := range all { + expected[i] = cloudimagemetadata.Metadata{ + cloudimagemetadata.MetadataAttributes{ + Region: one.Region, + Version: one.Version, + Series: one.Series, + Arch: one.Arch, + VirtType: one.VirtType, + RootStorageType: one.RootStorageType, + Source: one.Source, + Stream: one.Stream, + }, + one.Priority, + one.ImageId, + } + } + return expected +} + +func (s *ImageMetadataSuite) expectedDataSoureImageMetadata() [][]params.CloudImageMetadata { + expected := make([][]params.CloudImageMetadata, len(s.machines)) + for i, _ := range s.machines { + expected[i] = []params.CloudImageMetadata{ + {ImageId: "ami-1126745463", + Region: "another_dummy_region", + Version: "12.10", + Series: "quantal", + Arch: "amd64", + VirtType: "pv", + RootStorageType: "ebs", + Source: "default cloud images", + Stream: "daily", + Priority: 10, + }, + {ImageId: "ami-26745463", + Region: "dummy_region", + Version: "12.10", + Series: "quantal", + Arch: "amd64", + VirtType: "pv", + RootStorageType: "ebs", + Stream: "daily", + Source: "default cloud images", + Priority: 10}, + } + } + return expected +} + +func (s *ImageMetadataSuite) assertImageMetadataResults(c *gc.C, obtained params.ProvisioningInfoResults, expected ...[]params.CloudImageMetadata) { + c.Assert(obtained.Results, gc.HasLen, len(expected)) + for i, one := range obtained.Results { + // We are only concerned with images here + c.Assert(one.Result.ImageMetadata, gc.DeepEquals, expected[i]) + } +} + +// TODO (anastasiamac 2015-09-04) This metadata is so verbose. +// Need to generate the text by creating a struct and marshalling it. +var testImagesData = map[string]string{ + "/daily/streams/v1/index.json": ` + { + "index": { + "com.ubuntu.cloud:daily:aws": { + "updated": "Wed, 01 May 2013 13:31:26 +0000", + "clouds": [ + { + "region": "dummy_region", + "endpoint": "https://anywhere" + }, + { + "region": "another_dummy_region", + "endpoint": "" + } + ], + "cloudname": "aws", + "datatype": "image-ids", + "format": "products:1.0", + "products": [ + "com.ubuntu.cloud.daily:server:12.10:amd64", + "com.ubuntu.cloud.daily:server:14.04:amd64" + ], + "path": "streams/v1/image_metadata.json" + } + }, + "updated": "Wed, 27 May 2015 13:31:26 +0000", + "format": "index:1.0" + } +`, + "/daily/streams/v1/image_metadata.json": ` +{ + "updated": "Wed, 27 May 2015 13:31:26 +0000", + "content_id": "com.ubuntu.cloud:daily:aws", + "products": { + "com.ubuntu.cloud.daily:server:14.04:amd64": { + "release": "trusty", + "version": "14.04", + "arch": "amd64", + "versions": { + "20140118": { + "items": { + "nzww1pe": { + "root_store": "ebs", + "virt": "pv", + "crsn": "da1", + "id": "ami-36745463" + }, + "nzww1pe2": { + "root_store": "ebs", + "virt": "pv", + "crsn": "da2", + "id": "ami-1136745463" + } + }, + "pubname": "ubuntu-trusty-14.04-amd64-server-20140118", + "label": "release" + } + } + }, + "com.ubuntu.cloud.daily:server:12.10:amd64": { + "release": "quantal", + "version": "12.10", + "arch": "amd64", + "versions": { + "20121218": { + "items": { + "usww1pe": { + "root_store": "ebs", + "virt": "pv", + "crsn": "da1", + "id": "ami-26745463" + }, + "usww1pe2": { + "root_store": "ebs", + "virt": "pv", + "crsn": "da2", + "id": "ami-1126745463" + } + }, + "pubname": "ubuntu-quantal-12.10-amd64-server-20121218", + "label": "release" + } + } + } + }, + "_aliases": { + "crsn": { + "da1": { + "region": "dummy_region", + "endpoint": "https://anywhere" + }, + "da2": { + "region": "another_dummy_region", + "endpoint": "" + } + } + }, + "format": "products:1.0" +} +`, +} === modified file 'src/github.com/juju/juju/apiserver/provisioner/provisioner.go' --- src/github.com/juju/juju/apiserver/provisioner/provisioner.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/provisioner/provisioner.go 2016-03-22 15:18:22 +0000 @@ -6,8 +6,6 @@ import ( "fmt" "math/rand" - "sort" - "strings" "github.com/juju/errors" "github.com/juju/loggo" @@ -15,17 +13,15 @@ "github.com/juju/utils/set" "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/common/storagecommon" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cloudconfig/instancecfg" "github.com/juju/juju/constraints" "github.com/juju/juju/container" "github.com/juju/juju/environs" - "github.com/juju/juju/environs/tags" "github.com/juju/juju/instance" "github.com/juju/juju/network" "github.com/juju/juju/provider" "github.com/juju/juju/state" - "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/state/watcher" "github.com/juju/juju/storage" "github.com/juju/juju/storage/poolmanager" @@ -35,14 +31,14 @@ var logger = loggo.GetLogger("juju.apiserver.provisioner") func init() { - common.RegisterStandardFacade("Provisioner", 0, NewProvisionerAPI) + common.RegisterStandardFacade("Provisioner", 1, NewProvisionerAPI) // Version 1 has the same set of methods as 0, with the same // signatures, but its ProvisioningInfo returns additional // information. Clients may require version 1 so that they // receive this additional information; otherwise they are // compatible. - common.RegisterStandardFacade("Provisioner", 1, NewProvisionerAPI) + common.RegisterStandardFacade("Provisioner", 2, NewProvisionerAPI) } // ProvisionerAPI provides access to the Provisioner API facade. @@ -55,8 +51,8 @@ *common.LifeGetter *common.StateAddresser *common.APIAddresser - *common.EnvironWatcher - *common.EnvironMachinesWatcher + *common.ModelWatcher + *common.ModelMachinesWatcher *common.InstanceIdGetter *common.ToolsFinder *common.ToolsGetter @@ -69,11 +65,11 @@ // NewProvisionerAPI creates a new server-side ProvisionerAPI facade. func NewProvisionerAPI(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*ProvisionerAPI, error) { - if !authorizer.AuthMachineAgent() && !authorizer.AuthEnvironManager() { + if !authorizer.AuthMachineAgent() && !authorizer.AuthModelManager() { return nil, common.ErrPerm } getAuthFunc := func() (common.AuthFunc, error) { - isEnvironManager := authorizer.AuthEnvironManager() + isModelManager := authorizer.AuthModelManager() isMachineAgent := authorizer.AuthMachineAgent() authEntityTag := authorizer.GetAuthTag() @@ -88,7 +84,7 @@ if parentId == "" { // All top-level machines are accessible by the // environment manager. - return isEnvironManager + return isModelManager } // All containers with the authenticated machine as a // parent are accessible by it. @@ -104,29 +100,29 @@ getAuthOwner := func() (common.AuthFunc, error) { return authorizer.AuthOwner, nil } - env, err := st.Environment() + env, err := st.Model() if err != nil { return nil, err } urlGetter := common.NewToolsURLGetter(env.UUID(), st) return &ProvisionerAPI{ - Remover: common.NewRemover(st, false, getAuthFunc), - StatusSetter: common.NewStatusSetter(st, getAuthFunc), - StatusGetter: common.NewStatusGetter(st, getAuthFunc), - DeadEnsurer: common.NewDeadEnsurer(st, getAuthFunc), - PasswordChanger: common.NewPasswordChanger(st, getAuthFunc), - LifeGetter: common.NewLifeGetter(st, getAuthFunc), - StateAddresser: common.NewStateAddresser(st), - APIAddresser: common.NewAPIAddresser(st, resources), - EnvironWatcher: common.NewEnvironWatcher(st, resources, authorizer), - EnvironMachinesWatcher: common.NewEnvironMachinesWatcher(st, resources, authorizer), - InstanceIdGetter: common.NewInstanceIdGetter(st, getAuthFunc), - ToolsFinder: common.NewToolsFinder(st, st, urlGetter), - ToolsGetter: common.NewToolsGetter(st, st, st, urlGetter, getAuthOwner), - st: st, - resources: resources, - authorizer: authorizer, - getAuthFunc: getAuthFunc, + Remover: common.NewRemover(st, false, getAuthFunc), + StatusSetter: common.NewStatusSetter(st, getAuthFunc), + StatusGetter: common.NewStatusGetter(st, getAuthFunc), + DeadEnsurer: common.NewDeadEnsurer(st, getAuthFunc), + PasswordChanger: common.NewPasswordChanger(st, getAuthFunc), + LifeGetter: common.NewLifeGetter(st, getAuthFunc), + StateAddresser: common.NewStateAddresser(st), + APIAddresser: common.NewAPIAddresser(st, resources), + ModelWatcher: common.NewModelWatcher(st, resources, authorizer), + ModelMachinesWatcher: common.NewModelMachinesWatcher(st, resources, authorizer), + InstanceIdGetter: common.NewInstanceIdGetter(st, getAuthFunc), + ToolsFinder: common.NewToolsFinder(st, st, urlGetter), + ToolsGetter: common.NewToolsGetter(st, st, st, urlGetter, getAuthOwner), + st: st, + resources: resources, + authorizer: authorizer, + getAuthFunc: getAuthFunc, }, nil } @@ -233,7 +229,7 @@ // needed for configuring the container manager. func (p *ProvisionerAPI) ContainerManagerConfig(args params.ContainerManagerConfigParams) (params.ContainerManagerConfig, error) { var result params.ContainerManagerConfig - config, err := p.st.EnvironConfig() + config, err := p.st.ModelConfig() if err != nil { return result, err } @@ -292,7 +288,7 @@ // needed for container cloud-init. func (p *ProvisionerAPI) ContainerConfig() (params.ContainerConfig, error) { result := params.ContainerConfig{} - config, err := p.st.EnvironConfig() + config, err := p.st.ModelConfig() if err != nil { return result, err } @@ -380,69 +376,6 @@ return result, nil } -// ProvisioningInfo returns the provisioning information for each given machine entity. -func (p *ProvisionerAPI) ProvisioningInfo(args params.Entities) (params.ProvisioningInfoResults, error) { - result := params.ProvisioningInfoResults{ - Results: make([]params.ProvisioningInfoResult, len(args.Entities)), - } - canAccess, err := p.getAuthFunc() - if err != nil { - return result, err - } - for i, entity := range args.Entities { - tag, err := names.ParseMachineTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - machine, err := p.getMachine(canAccess, tag) - if err == nil { - result.Results[i].Result, err = p.getProvisioningInfo(machine) - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -func (p *ProvisionerAPI) getProvisioningInfo(m *state.Machine) (*params.ProvisioningInfo, error) { - cons, err := m.Constraints() - if err != nil { - return nil, err - } - volumes, err := p.machineVolumeParams(m) - if err != nil { - return nil, errors.Trace(err) - } - // TODO(dimitern) Drop this once we only use spaces for - // deployments. - networks, err := m.RequestedNetworks() - if err != nil { - return nil, err - } - var jobs []multiwatcher.MachineJob - for _, job := range m.Jobs() { - jobs = append(jobs, job.ToParams()) - } - tags, err := p.machineTags(m, jobs) - if err != nil { - return nil, errors.Trace(err) - } - subnetsToZones, err := p.machineSubnetsAndZones(m) - if err != nil { - return nil, errors.Annotate(err, "cannot match subnets to zones") - } - return ¶ms.ProvisioningInfo{ - Constraints: cons, - Series: m.Series(), - Placement: m.Placement(), - Networks: networks, - Jobs: jobs, - Volumes: volumes, - Tags: tags, - SubnetsToZones: subnetsToZones, - }, nil -} - // DistributionGroup returns, for each given machine entity, // a slice of instance.Ids that belong to the same distribution // group as that machine. This information may be used to @@ -480,7 +413,7 @@ // environManagerInstances returns all environ manager instances. func environManagerInstances(st *state.State) ([]instance.Id, error) { - info, err := st.StateServerInfo() + info, err := st.ControllerInfo() if err != nil { return nil, err } @@ -556,70 +489,6 @@ return result, nil } -// machineVolumeParams retrieves VolumeParams for the volumes that should be -// provisioned with, and attached to, the machine. The client should ignore -// parameters that it does not know how to handle. -func (p *ProvisionerAPI) machineVolumeParams(m *state.Machine) ([]params.VolumeParams, error) { - volumeAttachments, err := m.VolumeAttachments() - if err != nil { - return nil, err - } - if len(volumeAttachments) == 0 { - return nil, nil - } - envConfig, err := p.st.EnvironConfig() - if err != nil { - return nil, err - } - poolManager := poolmanager.New(state.NewStateSettings(p.st)) - allVolumeParams := make([]params.VolumeParams, 0, len(volumeAttachments)) - for _, volumeAttachment := range volumeAttachments { - volumeTag := volumeAttachment.Volume() - volume, err := p.st.Volume(volumeTag) - if err != nil { - return nil, errors.Annotatef(err, "getting volume %q", volumeTag.Id()) - } - storageInstance, err := common.MaybeAssignedStorageInstance( - volume.StorageInstance, p.st.StorageInstance, - ) - if err != nil { - return nil, errors.Annotatef(err, "getting volume %q storage instance", volumeTag.Id()) - } - volumeParams, err := common.VolumeParams(volume, storageInstance, envConfig, poolManager) - if err != nil { - return nil, errors.Annotatef(err, "getting volume %q parameters", volumeTag.Id()) - } - provider, err := registry.StorageProvider(storage.ProviderType(volumeParams.Provider)) - if err != nil { - return nil, errors.Annotate(err, "getting storage provider") - } - if provider.Dynamic() { - // Leave dynamic storage to the storage provisioner. - continue - } - volumeAttachmentParams, ok := volumeAttachment.Params() - if !ok { - // Attachment is already provisioned; this is an insane - // state, so we should not proceed with the volume. - return nil, errors.Errorf( - "volume %s already attached to machine %s", - volumeTag.Id(), m.Id(), - ) - } - // Not provisioned yet, so ask the cloud provisioner do it. - volumeParams.Attachment = ¶ms.VolumeAttachmentParams{ - volumeTag.String(), - m.Tag().String(), - "", // we're creating the volume, so it has no volume ID. - "", // we're creating the machine, so it has no instance ID. - volumeParams.Provider, - volumeAttachmentParams.ReadOnly, - } - allVolumeParams = append(allVolumeParams, volumeParams) - } - return allVolumeParams, nil -} - // storageConfig returns the provider type and config attributes for the // specified poolName. If no such pool exists, we check to see if poolName is // actually a provider type, in which case config will be empty. @@ -731,36 +600,6 @@ return result, nil } -// SetProvisioned sets the provider specific instance id, nonce and -// metadata for each given machine. Once set, the instance id cannot -// be changed. -// -// TODO(dimitern) This is not used anymore (as of 1.19.0) and is -// retained only for backwards-compatibility. It should be removed as -// deprecated. SetInstanceInfo is used instead. -func (p *ProvisionerAPI) SetProvisioned(args params.SetProvisioned) (params.ErrorResults, error) { - result := params.ErrorResults{ - Results: make([]params.ErrorResult, len(args.Machines)), - } - canAccess, err := p.getAuthFunc() - if err != nil { - return result, err - } - for i, arg := range args.Machines { - tag, err := names.ParseMachineTag(arg.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - machine, err := p.getMachine(canAccess, tag) - if err == nil { - err = machine.SetProvisioned(arg.InstanceId, arg.Nonce, arg.Characteristics) - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - // SetInstanceInfo sets the provider specific machine id, nonce, // metadata and network info for each given machine. Once set, the // instance id cannot be changed. @@ -785,11 +624,11 @@ if err != nil { return err } - volumes, err := common.VolumesToState(arg.Volumes) + volumes, err := storagecommon.VolumesToState(arg.Volumes) if err != nil { return err } - volumeAttachments, err := common.VolumeAttachmentInfosToState(arg.VolumeAttachments) + volumeAttachments, err := storagecommon.VolumeAttachmentInfosToState(arg.VolumeAttachments) if err != nil { return err } @@ -815,7 +654,7 @@ // the provisioner should retry provisioning machines with transient errors. func (p *ProvisionerAPI) WatchMachineErrorRetry() (params.NotifyWatchResult, error) { result := params.NotifyWatchResult{} - if !p.authorizer.AuthEnvironManager() { + if !p.authorizer.AuthModelManager() { return result, common.ErrPerm } watch := newWatchMachineErrorRetry() @@ -828,6 +667,10 @@ return result, nil } +func containerHostname(containerTag names.Tag) string { + return fmt.Sprintf("%s-%s", container.DefaultNamespace, containerTag.String()) +} + // ReleaseContainerAddresses finds addresses allocated to a container // and marks them as Dead, to be released and removed. It accepts // container tags as arguments. If address allocation feature flag is @@ -837,10 +680,6 @@ Results: make([]params.ErrorResult, len(args.Entities)), } - if !environs.AddressAllocationEnabled() { - return result, errors.NotSupportedf("address allocation") - } - canAccess, err := p.getAuthFunc() if err != nil { logger.Errorf("failed to get an authorisation function: %v", err) @@ -919,6 +758,9 @@ // generateMACAddress creates a random MAC address within the space defined by // MACAddressTemplate above. +// +// TODO(dimitern): We should make a best effort to ensure the MAC address we +// generate is unique at least within the current environment. func generateMACAddress() string { digits := make([]interface{}, 3) for i := range digits { @@ -930,16 +772,16 @@ // prepareOrGetContainerInterfaceInfo optionally allocates an address and returns information // for configuring networking on a container. It accepts container tags as arguments. func (p *ProvisionerAPI) prepareOrGetContainerInterfaceInfo( - args params.Entities, provisionContainer bool) ( - params.MachineNetworkConfigResults, error) { + args params.Entities, + provisionContainer bool, +) ( + params.MachineNetworkConfigResults, + error, +) { result := params.MachineNetworkConfigResults{ Results: make([]params.MachineNetworkConfigResult, len(args.Entities)), } - if !environs.AddressAllocationEnabled() { - return result, errors.NotSupportedf("address allocation") - } - // Some preparations first. environ, host, canAccess, err := p.prepareContainerAccessEnvironment() if err != nil { @@ -953,9 +795,27 @@ err = errors.NotProvisionedf("cannot allocate addresses: host machine %q", host) return result, err } - subnet, subnetInfo, interfaceInfo, err := p.prepareAllocationNetwork(environ, host, instId) - if err != nil { - return result, errors.Annotate(err, "cannot allocate addresses") + var subnet *state.Subnet + var subnetInfo network.SubnetInfo + var interfaceInfo network.InterfaceInfo + if environs.AddressAllocationEnabled() { + // We don't need a subnet unless we need to allocate a static IP. + subnet, subnetInfo, interfaceInfo, err = p.prepareAllocationNetwork(environ, instId) + if err != nil { + return result, errors.Annotate(err, "cannot allocate addresses") + } + } else { + var allInterfaceInfos []network.InterfaceInfo + allInterfaceInfos, err = environ.NetworkInterfaces(instId) + if err != nil { + return result, errors.Annotatef(err, "cannot instance %q interfaces", instId) + } else if len(allInterfaceInfos) == 0 { + return result, errors.New("no interfaces available") + } + // Currently we only support a single NIC per container, so we only need + // the information from the host instance's first NIC. + logger.Tracef("interfaces for instance %q: %v", instId, allInterfaceInfos) + interfaceInfo = allInterfaceInfos[0] } // Loop over the passed container tags. @@ -993,7 +853,7 @@ var macAddress string var address *state.IPAddress if provisionContainer { - // Allocate and set address. + // Allocate and set an address. macAddress = generateMACAddress() address, err = p.allocateAddress(environ, subnet, host, container, instId, macAddress) if err != nil { @@ -1021,15 +881,17 @@ address = addresses[0] macAddress = address.MACAddress() } + // Store it on the machine, construct and set an interface result. dnsServers := make([]string, len(interfaceInfo.DNSServers)) - for i, dns := range interfaceInfo.DNSServers { - dnsServers[i] = dns.Value + for l, dns := range interfaceInfo.DNSServers { + dnsServers[l] = dns.Value } if macAddress == "" { macAddress = interfaceInfo.MACAddress } + // TODO(dimitern): Support allocating one address per NIC on // the host, effectively creating the same number of NICs in // the container. @@ -1048,30 +910,37 @@ DNSServers: dnsServers, ConfigType: string(network.ConfigStatic), Address: address.Value(), - // container's gateway is the host's primary NIC's IP. - GatewayAddress: interfaceInfo.Address.Value, - ExtraConfig: interfaceInfo.ExtraConfig, + GatewayAddress: interfaceInfo.GatewayAddress.Value, + ExtraConfig: interfaceInfo.ExtraConfig, }}, } } return result, nil } +func (p *ProvisionerAPI) maybeGetNetworkingEnviron() (environs.NetworkingEnviron, error) { + cfg, err := p.st.ModelConfig() + if err != nil { + return nil, errors.Annotate(err, "failed to get model config") + } + environ, err := environs.New(cfg) + if err != nil { + return nil, errors.Annotate(err, "failed to construct a model from config") + } + netEnviron, supported := environs.SupportsNetworking(environ) + if !supported { + // " not supported" will be appended to the message below. + return nil, errors.NotSupportedf("model %q networking", cfg.Name()) + } + return netEnviron, nil +} + // prepareContainerAccessEnvironment retrieves the environment, host machine, and access // for working with containers. func (p *ProvisionerAPI) prepareContainerAccessEnvironment() (environs.NetworkingEnviron, *state.Machine, common.AuthFunc, error) { - cfg, err := p.st.EnvironConfig() - if err != nil { - return nil, nil, nil, errors.Annotate(err, "failed to get environment config") - } - environ, err := environs.New(cfg) - if err != nil { - return nil, nil, nil, errors.Annotate(err, "failed to construct an environment from config") - } - netEnviron, supported := environs.SupportsNetworking(environ) - if !supported { - // " not supported" will be appended to the message below. - return nil, nil, nil, errors.NotSupportedf("environment %q networking", cfg.Name()) + netEnviron, err := p.maybeGetNetworkingEnviron() + if err != nil { + return nil, nil, nil, errors.Trace(err) } canAccess, err := p.getAuthFunc() @@ -1097,7 +966,6 @@ // for the allocations. func (p *ProvisionerAPI) prepareAllocationNetwork( environ environs.NetworkingEnviron, - host *state.Machine, instId instance.Id, ) ( *state.Subnet, @@ -1112,7 +980,7 @@ if err != nil { return nil, subnetInfo, interfaceInfo, errors.Trace(err) } else if len(interfaces) == 0 { - return nil, subnetInfo, interfaceInfo, errors.Errorf("no interfaces available") + return nil, subnetInfo, interfaceInfo, errors.New("no interfaces available") } logger.Tracef("interfaces for instance %q: %v", instId, interfaces) @@ -1158,10 +1026,24 @@ // this subnet has no allocatable IPs continue } + if sub.AllocatableIPLow != nil && sub.AllocatableIPLow.To4() == nil { + logger.Tracef("ignoring IPv6 subnet %q - allocating IPv6 addresses not yet supported", sub.ProviderId) + // Until we change the way we pick addresses, IPv6 subnets with + // their *huge* ranges (/64 being the default), there is no point in + // allowing such subnets (it won't even work as PickNewAddress() + // assumes IPv4 allocatable range anyway). + continue + } ok, err := environ.SupportsAddressAllocation(sub.ProviderId) if err == nil && ok { subnetInfo = sub interfaceInfo = subnetIdToInterface[sub.ProviderId] + + // Since with addressable containers the host acts like a gateway + // for the containers, instead of using the same gateway for the + // containers as their host's + interfaceInfo.GatewayAddress.Value = interfaceInfo.Address.Value + success = true break } @@ -1204,17 +1086,50 @@ instId instance.Id, macAddress string, ) (*state.IPAddress, error) { + hostname := containerHostname(container.Tag()) + + if !environs.AddressAllocationEnabled() { + // Even if the address allocation feature flag is not enabled, we might + // be running on MAAS 1.8+ with devices support, which we can use to + // register containers getting IPs via DHCP. However, most of the usual + // allocation code can be bypassed, we just need the parent instance ID + // and a MAC address (no subnet or IP address). + allocatedAddress := network.Address{} + err := environ.AllocateAddress(instId, network.AnySubnet, &allocatedAddress, macAddress, hostname) + if err != nil { + // Not using MAAS 1.8+ or some other error. + return nil, errors.Trace(err) + } + + logger.Infof( + "allocated address %q on instance %q for container %q", + allocatedAddress.String(), instId, hostname, + ) + + // Add the address to state, so we can look it up later by MAC address. + stateAddr, err := p.st.AddIPAddress(allocatedAddress, string(network.AnySubnet)) + if err != nil { + return nil, errors.Annotatef(err, "failed to save address %q", allocatedAddress) + } + + err = p.setAllocatedOrRelease(stateAddr, environ, instId, container, network.AnySubnet, macAddress) + if err != nil { + return nil, errors.Trace(err) + } + + return stateAddr, nil + } subnetId := network.Id(subnet.ProviderId()) - name := names.NewMachineTag(container.Id()).String() for { addr, err := subnet.PickNewAddress() if err != nil { return nil, err } + netAddr := addr.Address() logger.Tracef("picked new address %q on subnet %q", addr.String(), subnetId) // Attempt to allocate with environ. - err = environ.AllocateAddress(instId, subnetId, addr.Address(), macAddress, name) + err = environ.AllocateAddress(instId, subnetId, &netAddr, macAddress, hostname) if err != nil { logger.Warningf( "allocating address %q on instance %q and subnet %q failed: %v (retrying)", @@ -1278,7 +1193,7 @@ addr.String(), state.AddressStateUnavailable, err, ) } - err = environ.ReleaseAddress(instId, subnetId, addr.Address(), addr.MACAddress()) + err = environ.ReleaseAddress(instId, subnetId, addr.Address(), addr.MACAddress(), "") if err == nil { logger.Infof("address %q released; trying to allocate new", addr.String()) return @@ -1304,7 +1219,7 @@ func (p *ProvisionerAPI) createOrFetchStateSubnet(subnetInfo network.SubnetInfo) (*state.Subnet, error) { stateSubnetInfo := state.SubnetInfo{ - ProviderId: string(subnetInfo.ProviderId), + ProviderId: subnetInfo.ProviderId, CIDR: subnetInfo.CIDR, VLANTag: subnetInfo.VLANTag, AllocatableIPHigh: subnetInfo.AllocatableIPHigh.String(), @@ -1321,93 +1236,3 @@ } return subnet, nil } - -// machineTags returns machine-specific tags to set on the instance. -func (p *ProvisionerAPI) machineTags(m *state.Machine, jobs []multiwatcher.MachineJob) (map[string]string, error) { - // Names of all units deployed to the machine. - // - // TODO(axw) 2015-06-02 #1461358 - // We need a worker that periodically updates - // instance tags with current deployment info. - units, err := m.Units() - if err != nil { - return nil, errors.Trace(err) - } - unitNames := make([]string, 0, len(units)) - for _, unit := range units { - if !unit.IsPrincipal() { - continue - } - unitNames = append(unitNames, unit.Name()) - } - sort.Strings(unitNames) - - cfg, err := p.st.EnvironConfig() - if err != nil { - return nil, errors.Trace(err) - } - machineTags := instancecfg.InstanceTags(cfg, jobs) - if len(unitNames) > 0 { - machineTags[tags.JujuUnitsDeployed] = strings.Join(unitNames, " ") - } - return machineTags, nil -} - -// machineSubnetsAndZones returns a map of subnet provider-specific id -// to list of availability zone names for that subnet. The result can -// be empty if there are no spaces constraints specified for the -// machine, or there's an error fetching them. -func (p *ProvisionerAPI) machineSubnetsAndZones(m *state.Machine) (map[string][]string, error) { - mcons, err := m.Constraints() - if err != nil { - return nil, errors.Annotate(err, "cannot get machine constraints") - } - includeSpaces := mcons.IncludeSpaces() - if len(includeSpaces) < 1 { - // Nothing to do. - return nil, nil - } - // TODO(dimitern): For the network model MVP we only use the first - // included space and ignore the rest. - spaceName := includeSpaces[0] - if len(includeSpaces) > 1 { - logger.Debugf( - "using space %q from constraints for machine %q (ignoring remaining: %v)", - spaceName, m.Id(), includeSpaces[1:], - ) - } - space, err := p.st.Space(spaceName) - if err != nil { - return nil, errors.Trace(err) - } - subnets, err := space.Subnets() - if err != nil { - return nil, errors.Trace(err) - } - if len(subnets) == 0 { - return nil, errors.Errorf("cannot use space %q as deployment target: no subnets", spaceName) - } - subnetsToZones := make(map[string][]string, len(subnets)) - for _, subnet := range subnets { - warningPrefix := fmt.Sprintf( - "not using subnet %q in space %q for machine %q provisioning: ", - subnet.CIDR(), spaceName, m.Id(), - ) - // TODO(dimitern): state.Subnet.ProviderId needs to be of type - // network.Id. - providerId := subnet.ProviderId() - if providerId == "" { - logger.Warningf(warningPrefix + "no ProviderId set") - continue - } - // TODO(dimitern): Once state.Subnet supports multiple zones, - // use all of them below. - zone := subnet.AvailabilityZone() - if zone == "" { - logger.Warningf(warningPrefix + "no availability zone(s) set") - continue - } - subnetsToZones[providerId] = []string{zone} - } - return subnetsToZones, nil -} === modified file 'src/github.com/juju/juju/apiserver/provisioner/provisioner_test.go' --- src/github.com/juju/juju/apiserver/provisioner/provisioner_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/provisioner/provisioner_test.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,7 @@ "github.com/juju/errors" "github.com/juju/names" + jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils/proxy" gc "gopkg.in/check.v1" @@ -20,22 +21,21 @@ apiservertesting "github.com/juju/juju/apiserver/testing" "github.com/juju/juju/constraints" "github.com/juju/juju/container" - "github.com/juju/juju/environs/tags" "github.com/juju/juju/feature" "github.com/juju/juju/instance" "github.com/juju/juju/juju/testing" "github.com/juju/juju/network" "github.com/juju/juju/provider/dummy" "github.com/juju/juju/state" - "github.com/juju/juju/state/multiwatcher" statetesting "github.com/juju/juju/state/testing" "github.com/juju/juju/storage/poolmanager" - storagedummy "github.com/juju/juju/storage/provider/dummy" - "github.com/juju/juju/storage/provider/registry" coretesting "github.com/juju/juju/testing" ) -func Test(t *stdtesting.T) { +func TestPackage(t *stdtesting.T) { + if jujutesting.RaceEnabled { + t.Skip("skipping package under -race, see LP 1517632") + } coretesting.MgoTestPackage(t) } @@ -55,7 +55,10 @@ s.setUpTest(c, false) } -func (s *provisionerSuite) setUpTest(c *gc.C, withStateServer bool) { +func (s *provisionerSuite) setUpTest(c *gc.C, withController bool) { + s.JujuConnSuite.ConfigAttrs = map[string]interface{}{ + "image-stream": "daily", + } s.JujuConnSuite.SetUpTest(c) // We're testing with address allocation on by default. There are // separate tests to check the behavior when the flag is not @@ -63,12 +66,12 @@ s.SetFeatureFlags(feature.AddressAllocation) // Reset previous machines (if any) and create 3 machines - // for the tests, plus an optional state server machine. + // for the tests, plus an optional controller machine. s.machines = nil // Note that the specific machine ids allocated are assumed // to be numerically consecutive from zero. - if withStateServer { - s.machines = append(s.machines, testing.AddStateServerMachine(c, s.State)) + if withController { + s.machines = append(s.machines, testing.AddControllerMachine(c, s.State)) } for i := 0; i < 5; i++ { machine, err := s.State.AddMachine("quantal", state.JobHostUnits) @@ -96,19 +99,19 @@ s.provisioner = provisionerAPI } -type withoutStateServerSuite struct { +type withoutControllerSuite struct { provisionerSuite - *commontesting.EnvironWatcherTest + *commontesting.ModelWatcherTest } -var _ = gc.Suite(&withoutStateServerSuite{}) +var _ = gc.Suite(&withoutControllerSuite{}) -func (s *withoutStateServerSuite) SetUpTest(c *gc.C) { +func (s *withoutControllerSuite) SetUpTest(c *gc.C) { s.setUpTest(c, false) - s.EnvironWatcherTest = commontesting.NewEnvironWatcherTest(s.provisioner, s.State, s.resources, commontesting.HasSecrets) + s.ModelWatcherTest = commontesting.NewModelWatcherTest(s.provisioner, s.State, s.resources, commontesting.HasSecrets) } -func (s *withoutStateServerSuite) TestProvisionerFailsWithNonMachineAgentNonManagerUser(c *gc.C) { +func (s *withoutControllerSuite) TestProvisionerFailsWithNonMachineAgentNonManagerUser(c *gc.C) { anAuthorizer := s.authorizer anAuthorizer.EnvironManager = true // Works with an environment manager, which is not a machine agent. @@ -124,7 +127,7 @@ c.Assert(err, gc.ErrorMatches, "permission denied") } -func (s *withoutStateServerSuite) TestSetPasswords(c *gc.C) { +func (s *withoutControllerSuite) TestSetPasswords(c *gc.C) { args := params.EntityPasswords{ Changes: []params.EntityPassword{ {Tag: s.machines[0].Tag().String(), Password: "xxx0-1234567890123457890"}, @@ -162,7 +165,7 @@ } } -func (s *withoutStateServerSuite) TestShortSetPasswords(c *gc.C) { +func (s *withoutControllerSuite) TestShortSetPasswords(c *gc.C) { args := params.EntityPasswords{ Changes: []params.EntityPassword{ {Tag: s.machines[1].Tag().String(), Password: "xxx1"}, @@ -175,7 +178,7 @@ "password is only 4 bytes long, and is not a valid Agent password") } -func (s *withoutStateServerSuite) TestLifeAsMachineAgent(c *gc.C) { +func (s *withoutControllerSuite) TestLifeAsMachineAgent(c *gc.C) { // NOTE: This and the next call serve to test the two // different authorization schemes: // 1. Machine agents can access their own machine and @@ -238,7 +241,7 @@ }) } -func (s *withoutStateServerSuite) TestLifeAsEnvironManager(c *gc.C) { +func (s *withoutControllerSuite) TestLifeAsEnvironManager(c *gc.C) { err := s.machines[1].EnsureDead() c.Assert(err, jc.ErrorIsNil) err = s.machines[1].Refresh() @@ -287,7 +290,7 @@ }) } -func (s *withoutStateServerSuite) TestRemove(c *gc.C) { +func (s *withoutControllerSuite) TestRemove(c *gc.C) { err := s.machines[1].EnsureDead() c.Assert(err, jc.ErrorIsNil) s.assertLife(c, 0, state.Alive) @@ -322,7 +325,7 @@ s.assertLife(c, 2, state.Alive) } -func (s *withoutStateServerSuite) TestSetStatus(c *gc.C) { +func (s *withoutControllerSuite) TestSetStatus(c *gc.C) { err := s.machines[0].SetStatus(state.StatusStarted, "blah", nil) c.Assert(err, jc.ErrorIsNil) err = s.machines[1].SetStatus(state.StatusStopped, "foo", nil) @@ -359,7 +362,7 @@ s.assertStatus(c, 2, state.StatusStarted, "again", map[string]interface{}{}) } -func (s *withoutStateServerSuite) TestMachinesWithTransientErrors(c *gc.C) { +func (s *withoutControllerSuite) TestMachinesWithTransientErrors(c *gc.C) { err := s.machines[0].SetStatus(state.StatusStarted, "blah", nil) c.Assert(err, jc.ErrorIsNil) err = s.machines[1].SetStatus(state.StatusError, "transient error", @@ -387,7 +390,7 @@ }) } -func (s *withoutStateServerSuite) TestMachinesWithTransientErrorsPermission(c *gc.C) { +func (s *withoutControllerSuite) TestMachinesWithTransientErrorsPermission(c *gc.C) { // Machines where there's permission issues are omitted. anAuthorizer := s.authorizer anAuthorizer.EnvironManager = false @@ -414,7 +417,7 @@ }) } -func (s *withoutStateServerSuite) TestEnsureDead(c *gc.C) { +func (s *withoutControllerSuite) TestEnsureDead(c *gc.C) { err := s.machines[1].EnsureDead() c.Assert(err, jc.ErrorIsNil) s.assertLife(c, 0, state.Alive) @@ -448,13 +451,13 @@ s.assertLife(c, 2, state.Dead) } -func (s *withoutStateServerSuite) assertLife(c *gc.C, index int, expectLife state.Life) { +func (s *withoutControllerSuite) assertLife(c *gc.C, index int, expectLife state.Life) { err := s.machines[index].Refresh() c.Assert(err, jc.ErrorIsNil) c.Assert(s.machines[index].Life(), gc.Equals, expectLife) } -func (s *withoutStateServerSuite) assertStatus(c *gc.C, index int, expectStatus state.Status, expectInfo string, +func (s *withoutControllerSuite) assertStatus(c *gc.C, index int, expectStatus state.Status, expectInfo string, expectData map[string]interface{}) { statusInfo, err := s.machines[index].Status() @@ -464,7 +467,7 @@ c.Assert(statusInfo.Data, gc.DeepEquals, expectData) } -func (s *withoutStateServerSuite) TestWatchContainers(c *gc.C) { +func (s *withoutControllerSuite) TestWatchContainers(c *gc.C) { c.Assert(s.resources.Count(), gc.Equals, 0) args := params.WatchContainers{Params: []params.WatchContainer{ @@ -501,7 +504,7 @@ wc1.AssertNoChange() } -func (s *withoutStateServerSuite) TestWatchAllContainers(c *gc.C) { +func (s *withoutControllerSuite) TestWatchAllContainers(c *gc.C) { c.Assert(s.resources.Count(), gc.Equals, 0) args := params.WatchContainers{Params: []params.WatchContainer{ @@ -538,7 +541,7 @@ wc1.AssertNoChange() } -func (s *withoutStateServerSuite) TestEnvironConfigNonManager(c *gc.C) { +func (s *withoutControllerSuite) TestModelConfigNonManager(c *gc.C) { // Now test it with a non-environment manager and make sure // the secret attributes are masked. anAuthorizer := s.authorizer @@ -547,10 +550,10 @@ aProvisioner, err := provisioner.NewProvisionerAPI(s.State, s.resources, anAuthorizer) c.Assert(err, jc.ErrorIsNil) - s.AssertEnvironConfig(c, aProvisioner, commontesting.NoSecrets) + s.AssertModelConfig(c, aProvisioner, commontesting.NoSecrets) } -func (s *withoutStateServerSuite) TestStatus(c *gc.C) { +func (s *withoutControllerSuite) TestStatus(c *gc.C) { err := s.machines[0].SetStatus(state.StatusStarted, "blah", nil) c.Assert(err, jc.ErrorIsNil) err = s.machines[1].SetStatus(state.StatusStopped, "foo", nil) @@ -589,7 +592,7 @@ }) } -func (s *withoutStateServerSuite) TestSeries(c *gc.C) { +func (s *withoutControllerSuite) TestSeries(c *gc.C) { // Add a machine with different series. foobarMachine, err := s.State.AddMachine("foobar", state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) @@ -616,7 +619,7 @@ }) } -func (s *withoutStateServerSuite) TestDistributionGroup(c *gc.C) { +func (s *withoutControllerSuite) TestDistributionGroup(c *gc.C) { addUnits := func(name string, machines ...*state.Machine) (units []*state.Unit) { svc := s.AddTestingService(c, name, s.AddTestingCharm(c, name)) for _, m := range machines { @@ -650,8 +653,8 @@ setProvisioned("2") setProvisioned("3") - // Add a few state servers, provision two of them. - _, err = s.State.EnsureAvailability(3, constraints.Value{}, "quantal", nil) + // Add a few controllers, provision two of them. + _, err = s.State.EnableHA(3, constraints.Value{}, "quantal", nil) c.Assert(err, jc.ErrorIsNil) setProvisioned("5") setProvisioned("7") @@ -687,7 +690,7 @@ }) } -func (s *withoutStateServerSuite) TestDistributionGroupEnvironManagerAuth(c *gc.C) { +func (s *withoutControllerSuite) TestDistributionGroupEnvironManagerAuth(c *gc.C) { args := params.Entities{Entities: []params.Entity{ {Tag: "machine-0"}, {Tag: "machine-42"}, @@ -712,7 +715,7 @@ }) } -func (s *withoutStateServerSuite) TestDistributionGroupMachineAgentAuth(c *gc.C) { +func (s *withoutControllerSuite) TestDistributionGroupMachineAgentAuth(c *gc.C) { anAuthorizer := s.authorizer anAuthorizer.Tag = names.NewMachineTag("1") anAuthorizer.EnvironManager = false @@ -742,257 +745,7 @@ }) } -func (s *withoutStateServerSuite) TestProvisioningInfo(c *gc.C) { - // Add a couple of spaces. - _, err := s.State.AddSpace("space1", nil, true) - c.Assert(err, jc.ErrorIsNil) - _, err = s.State.AddSpace("space2", nil, false) - c.Assert(err, jc.ErrorIsNil) - // Add 1 subnet into space1, and 2 into space2. - // Only the first subnet of space2 has AllocatableIPLow|High set. - // Each subnet is in a matching zone (e.g "subnet-#" in "zone#"). - testing.AddSubnetsWithTemplate(c, s.State, 3, state.SubnetInfo{ - CIDR: "10.10.{{.}}.0/24", - ProviderId: "subnet-{{.}}", - AllocatableIPLow: "{{if (eq . 1)}}10.10.{{.}}.5{{end}}", - AllocatableIPHigh: "{{if (eq . 1)}}10.10.{{.}}.254{{end}}", - AvailabilityZone: "zone{{.}}", - SpaceName: "{{if (eq . 0)}}space1{{else}}space2{{end}}", - VLANTag: 42, - }) - - registry.RegisterProvider("static", &storagedummy.StorageProvider{IsDynamic: false}) - defer registry.RegisterProvider("static", nil) - registry.RegisterEnvironStorageProviders("dummy", "static") - - pm := poolmanager.New(state.NewStateSettings(s.State)) - _, err = pm.Create("static-pool", "static", map[string]interface{}{"foo": "bar"}) - c.Assert(err, jc.ErrorIsNil) - - cons := constraints.MustParse("cpu-cores=123 mem=8G spaces=^space1,space2") - template := state.MachineTemplate{ - Series: "quantal", - Jobs: []state.MachineJob{state.JobHostUnits}, - Constraints: cons, - Placement: "valid", - Volumes: []state.MachineVolumeParams{ - {Volume: state.VolumeParams{Size: 1000, Pool: "static-pool"}}, - {Volume: state.VolumeParams{Size: 2000, Pool: "static-pool"}}, - }, - } - placementMachine, err := s.State.AddOneMachine(template) - c.Assert(err, jc.ErrorIsNil) - - args := params.Entities{Entities: []params.Entity{ - {Tag: s.machines[0].Tag().String()}, - {Tag: placementMachine.Tag().String()}, - {Tag: "machine-42"}, - {Tag: "unit-foo-0"}, - {Tag: "service-bar"}, - }} - result, err := s.provisioner.ProvisioningInfo(args) - c.Assert(err, jc.ErrorIsNil) - - expected := params.ProvisioningInfoResults{ - Results: []params.ProvisioningInfoResult{ - {Result: ¶ms.ProvisioningInfo{ - Series: "quantal", - Networks: []string{}, - Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, - Tags: map[string]string{ - tags.JujuEnv: coretesting.EnvironmentTag.Id(), - }, - }}, - {Result: ¶ms.ProvisioningInfo{ - Series: "quantal", - Constraints: template.Constraints, - Placement: template.Placement, - Networks: template.RequestedNetworks, - Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, - Tags: map[string]string{ - tags.JujuEnv: coretesting.EnvironmentTag.Id(), - }, - SubnetsToZones: map[string][]string{ - "subnet-1": []string{"zone1"}, - "subnet-2": []string{"zone2"}, - }, - Volumes: []params.VolumeParams{{ - VolumeTag: "volume-0", - Size: 1000, - Provider: "static", - Attributes: map[string]interface{}{"foo": "bar"}, - Tags: map[string]string{ - tags.JujuEnv: coretesting.EnvironmentTag.Id(), - }, - Attachment: ¶ms.VolumeAttachmentParams{ - MachineTag: placementMachine.Tag().String(), - VolumeTag: "volume-0", - Provider: "static", - }, - }, { - VolumeTag: "volume-1", - Size: 2000, - Provider: "static", - Attributes: map[string]interface{}{"foo": "bar"}, - Tags: map[string]string{ - tags.JujuEnv: coretesting.EnvironmentTag.Id(), - }, - Attachment: ¶ms.VolumeAttachmentParams{ - MachineTag: placementMachine.Tag().String(), - VolumeTag: "volume-1", - Provider: "static", - }, - }}, - }}, - {Error: apiservertesting.NotFoundError("machine 42")}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }, - } - // The order of volumes is not predictable, so we make sure we - // compare the right ones. This only applies to Results[1] since - // it is the only result to contain volumes. - if expected.Results[1].Result.Volumes[0].VolumeTag != result.Results[1].Result.Volumes[0].VolumeTag { - vols := expected.Results[1].Result.Volumes - vols[0], vols[1] = vols[1], vols[0] - } - c.Assert(result, jc.DeepEquals, expected) -} - -func (s *withoutStateServerSuite) TestProvisioningInfoWhenUsingUnsuitableSpaces(c *gc.C) { - // Add an empty space. - _, err := s.State.AddSpace("empty", nil, true) - c.Assert(err, jc.ErrorIsNil) - - consEmptySpace := constraints.MustParse("cpu-cores=123 mem=8G spaces=empty") - consMissingSpace := constraints.MustParse("cpu-cores=123 mem=8G spaces=missing") - templates := []state.MachineTemplate{{ - Series: "quantal", - Jobs: []state.MachineJob{state.JobHostUnits}, - Constraints: consEmptySpace, - Placement: "valid", - }, { - Series: "quantal", - Jobs: []state.MachineJob{state.JobHostUnits}, - Constraints: consMissingSpace, - Placement: "valid", - }} - placementMachines, err := s.State.AddMachines(templates...) - c.Assert(err, jc.ErrorIsNil) - c.Assert(placementMachines, gc.HasLen, 2) - - args := params.Entities{Entities: []params.Entity{ - {Tag: placementMachines[0].Tag().String()}, - {Tag: placementMachines[1].Tag().String()}, - }} - result, err := s.provisioner.ProvisioningInfo(args) - c.Assert(err, jc.ErrorIsNil) - - expectedErrorEmptySpace := `cannot match subnets to zones: ` + - `cannot use space "empty" as deployment target: no subnets` - expectedErrorMissingSpace := `cannot match subnets to zones: ` + - `space "missing"` // " not found" will be appended by NotFoundError helper below. - expected := params.ProvisioningInfoResults{Results: []params.ProvisioningInfoResult{ - {Error: apiservertesting.ServerError(expectedErrorEmptySpace)}, - {Error: apiservertesting.NotFoundError(expectedErrorMissingSpace)}, - }} - c.Assert(result, jc.DeepEquals, expected) -} - -func (s *withoutStateServerSuite) TestStorageProviderFallbackToType(c *gc.C) { - registry.RegisterProvider("dynamic", &storagedummy.StorageProvider{IsDynamic: true}) - defer registry.RegisterProvider("dynamic", nil) - registry.RegisterProvider("static", &storagedummy.StorageProvider{IsDynamic: false}) - defer registry.RegisterProvider("static", nil) - registry.RegisterEnvironStorageProviders("dummy", "dynamic", "static") - - template := state.MachineTemplate{ - Series: "quantal", - Jobs: []state.MachineJob{state.JobHostUnits}, - Placement: "valid", - RequestedNetworks: []string{"net1", "net2"}, - Volumes: []state.MachineVolumeParams{ - {Volume: state.VolumeParams{Size: 1000, Pool: "dynamic"}}, - {Volume: state.VolumeParams{Size: 1000, Pool: "static"}}, - }, - } - placementMachine, err := s.State.AddOneMachine(template) - c.Assert(err, jc.ErrorIsNil) - - args := params.Entities{Entities: []params.Entity{ - {Tag: placementMachine.Tag().String()}, - }} - result, err := s.provisioner.ProvisioningInfo(args) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(result, jc.DeepEquals, params.ProvisioningInfoResults{ - Results: []params.ProvisioningInfoResult{ - {Result: ¶ms.ProvisioningInfo{ - Series: "quantal", - Constraints: template.Constraints, - Placement: template.Placement, - Networks: template.RequestedNetworks, - Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, - Tags: map[string]string{ - tags.JujuEnv: coretesting.EnvironmentTag.Id(), - }, - Volumes: []params.VolumeParams{{ - VolumeTag: "volume-1", - Size: 1000, - Provider: "static", - Attributes: nil, - Tags: map[string]string{ - tags.JujuEnv: coretesting.EnvironmentTag.Id(), - }, - Attachment: ¶ms.VolumeAttachmentParams{ - MachineTag: placementMachine.Tag().String(), - VolumeTag: "volume-1", - Provider: "static", - }, - }}, - }}, - }, - }) -} - -func (s *withoutStateServerSuite) TestProvisioningInfoPermissions(c *gc.C) { - // Login as a machine agent for machine 0. - anAuthorizer := s.authorizer - anAuthorizer.EnvironManager = false - anAuthorizer.Tag = s.machines[0].Tag() - aProvisioner, err := provisioner.NewProvisionerAPI(s.State, s.resources, anAuthorizer) - c.Assert(err, jc.ErrorIsNil) - c.Assert(aProvisioner, gc.NotNil) - - args := params.Entities{Entities: []params.Entity{ - {Tag: s.machines[0].Tag().String()}, - {Tag: s.machines[0].Tag().String() + "-lxc-0"}, - {Tag: "machine-42"}, - {Tag: s.machines[1].Tag().String()}, - {Tag: "service-bar"}, - }} - - // Only machine 0 and containers therein can be accessed. - results, err := aProvisioner.ProvisioningInfo(args) - c.Assert(results, jc.DeepEquals, params.ProvisioningInfoResults{ - Results: []params.ProvisioningInfoResult{ - {Result: ¶ms.ProvisioningInfo{ - Series: "quantal", - Networks: []string{}, - Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, - Tags: map[string]string{ - tags.JujuEnv: coretesting.EnvironmentTag.Id(), - }, - }}, - {Error: apiservertesting.NotFoundError("machine 0/lxc/0")}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *withoutStateServerSuite) TestConstraints(c *gc.C) { +func (s *withoutControllerSuite) TestConstraints(c *gc.C) { // Add a machine with some constraints. cons := constraints.MustParse("cpu-cores=123", "mem=8G", "networks=net3,^net4") template := state.MachineTemplate{ @@ -1026,7 +779,7 @@ }) } -func (s *withoutStateServerSuite) TestRequestedNetworks(c *gc.C) { +func (s *withoutControllerSuite) TestRequestedNetworks(c *gc.C) { // Add a machine with some requested networks. template := state.MachineTemplate{ Series: "quantal", @@ -1063,61 +816,13 @@ }) } -func (s *withoutStateServerSuite) TestSetProvisioned(c *gc.C) { - // Provision machine 0 first. - hwChars := instance.MustParseHardware("arch=i386", "mem=4G") - err := s.machines[0].SetProvisioned("i-am", "fake_nonce", &hwChars) - c.Assert(err, jc.ErrorIsNil) - - args := params.SetProvisioned{Machines: []params.MachineSetProvisioned{ - {Tag: s.machines[0].Tag().String(), InstanceId: "i-was", Nonce: "fake_nonce", Characteristics: nil}, - {Tag: s.machines[1].Tag().String(), InstanceId: "i-will", Nonce: "fake_nonce", Characteristics: &hwChars}, - {Tag: s.machines[2].Tag().String(), InstanceId: "i-am-too", Nonce: "fake", Characteristics: nil}, - {Tag: "machine-42", InstanceId: "", Nonce: "", Characteristics: nil}, - {Tag: "unit-foo-0", InstanceId: "", Nonce: "", Characteristics: nil}, - {Tag: "service-bar", InstanceId: "", Nonce: "", Characteristics: nil}, - }} - result, err := s.provisioner.SetProvisioned(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {¶ms.Error{ - Message: `cannot set instance data for machine "0": already set`, - }}, - {nil}, - {nil}, - {apiservertesting.NotFoundError("machine 42")}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify machine 1 and 2 were provisioned. - c.Assert(s.machines[1].Refresh(), gc.IsNil) - c.Assert(s.machines[2].Refresh(), gc.IsNil) - - instanceId, err := s.machines[1].InstanceId() - c.Assert(err, jc.ErrorIsNil) - c.Check(instanceId, gc.Equals, instance.Id("i-will")) - instanceId, err = s.machines[2].InstanceId() - c.Assert(err, jc.ErrorIsNil) - c.Check(instanceId, gc.Equals, instance.Id("i-am-too")) - c.Check(s.machines[1].CheckProvisioned("fake_nonce"), jc.IsTrue) - c.Check(s.machines[2].CheckProvisioned("fake"), jc.IsTrue) - gotHardware, err := s.machines[1].HardwareCharacteristics() - c.Assert(err, jc.ErrorIsNil) - c.Check(gotHardware, gc.DeepEquals, &hwChars) -} - -func (s *withoutStateServerSuite) TestSetInstanceInfo(c *gc.C) { - registry.RegisterProvider("static", &storagedummy.StorageProvider{IsDynamic: false}) - defer registry.RegisterProvider("static", nil) - registry.RegisterEnvironStorageProviders("dummy", "static") +func (s *withoutControllerSuite) TestSetInstanceInfo(c *gc.C) { + s.registerStorageProviders(c, "static") pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("static-pool", "static", map[string]interface{}{"foo": "bar"}) c.Assert(err, jc.ErrorIsNil) - err = s.State.UpdateEnvironConfig(map[string]interface{}{ + err = s.State.UpdateModelConfig(map[string]interface{}{ "storage-default-block-source": "static-pool", }, nil, nil) c.Assert(err, jc.ErrorIsNil) @@ -1318,7 +1023,7 @@ c.Assert(volumeAttachments, gc.HasLen, 0) } -func (s *withoutStateServerSuite) TestInstanceId(c *gc.C) { +func (s *withoutControllerSuite) TestInstanceId(c *gc.C) { // Provision 2 machines first. err := s.machines[0].SetProvisioned("i-am", "fake_nonce", nil) c.Assert(err, jc.ErrorIsNil) @@ -1348,10 +1053,10 @@ }) } -func (s *withoutStateServerSuite) TestWatchEnvironMachines(c *gc.C) { +func (s *withoutControllerSuite) TestWatchModelMachines(c *gc.C) { c.Assert(s.resources.Count(), gc.Equals, 0) - got, err := s.provisioner.WatchEnvironMachines() + got, err := s.provisioner.WatchModelMachines() c.Assert(err, jc.ErrorIsNil) want := params.StringsWatchResult{ StringsWatcherId: "1", @@ -1370,14 +1075,14 @@ wc := statetesting.NewStringsWatcherC(c, s.State, resource.(state.StringsWatcher)) wc.AssertNoChange() - // Make sure WatchEnvironMachines fails with a machine agent login. + // Make sure WatchModelMachines fails with a machine agent login. anAuthorizer := s.authorizer anAuthorizer.Tag = names.NewMachineTag("1") anAuthorizer.EnvironManager = false aProvisioner, err := provisioner.NewProvisionerAPI(s.State, s.resources, anAuthorizer) c.Assert(err, jc.ErrorIsNil) - result, err := aProvisioner.WatchEnvironMachines() + result, err := aProvisioner.WatchModelMachines() c.Assert(err, gc.ErrorMatches, "permission denied") c.Assert(result, gc.DeepEquals, params.StringsWatchResult{}) } @@ -1389,7 +1094,7 @@ return results.ManagerConfig } -func (s *withoutStateServerSuite) TestContainerManagerConfig(c *gc.C) { +func (s *withoutControllerSuite) TestContainerManagerConfig(c *gc.C) { cfg := s.getManagerConfig(c, instance.KVM) c.Assert(cfg, jc.DeepEquals, map[string]string{ container.ConfigName: "juju", @@ -1400,7 +1105,7 @@ }) } -func (s *withoutStateServerSuite) TestContainerManagerConfigNoFeatureFlagNoIPForwarding(c *gc.C) { +func (s *withoutControllerSuite) TestContainerManagerConfigNoFeatureFlagNoIPForwarding(c *gc.C) { s.SetFeatureFlags() // clear the flags. cfg := s.getManagerConfig(c, instance.KVM) @@ -1410,7 +1115,7 @@ }) } -func (s *withoutStateServerSuite) TestContainerManagerConfigNoIPForwarding(c *gc.C) { +func (s *withoutControllerSuite) TestContainerManagerConfigNoIPForwarding(c *gc.C) { // Break dummy provider's SupportsAddressAllocation method to // ensure ConfigIPForwarding is not set below. s.AssertConfigParameterUpdated(c, "broken", "SupportsAddressAllocation") @@ -1421,12 +1126,12 @@ }) } -func (s *withoutStateServerSuite) TestContainerConfig(c *gc.C) { +func (s *withoutControllerSuite) TestContainerConfig(c *gc.C) { attrs := map[string]interface{}{ "http-proxy": "http://proxy.example.com:9000", "allow-lxc-loop-mounts": true, } - err := s.State.UpdateEnvironConfig(attrs, nil, nil) + err := s.State.UpdateModelConfig(attrs, nil, nil) c.Assert(err, jc.ErrorIsNil) expectedProxy := proxy.Settings{ Http: "http://proxy.example.com:9000", @@ -1444,7 +1149,7 @@ c.Check(results.AllowLXCLoopMounts, jc.IsTrue) } -func (s *withoutStateServerSuite) TestSetSupportedContainers(c *gc.C) { +func (s *withoutControllerSuite) TestSetSupportedContainers(c *gc.C) { args := params.MachineContainersParams{Params: []params.MachineContainers{{ MachineTag: "machine-0", ContainerTypes: []instance.ContainerType{instance.LXC}, @@ -1470,7 +1175,7 @@ c.Assert(containers, gc.DeepEquals, []instance.ContainerType{instance.LXC, instance.KVM}) } -func (s *withoutStateServerSuite) TestSetSupportedContainersPermissions(c *gc.C) { +func (s *withoutControllerSuite) TestSetSupportedContainersPermissions(c *gc.C) { // Login as a machine agent for machine 0. anAuthorizer := s.authorizer anAuthorizer.EnvironManager = false @@ -1503,7 +1208,7 @@ }) } -func (s *withoutStateServerSuite) TestSupportsNoContainers(c *gc.C) { +func (s *withoutControllerSuite) TestSupportsNoContainers(c *gc.C) { args := params.MachineContainersParams{ Params: []params.MachineContainers{ { @@ -1522,17 +1227,17 @@ c.Assert(containers, gc.DeepEquals, []instance.ContainerType{}) } -var _ = gc.Suite(&withStateServerSuite{}) +var _ = gc.Suite(&withControllerSuite{}) -type withStateServerSuite struct { +type withControllerSuite struct { provisionerSuite } -func (s *withStateServerSuite) SetUpTest(c *gc.C) { +func (s *withControllerSuite) SetUpTest(c *gc.C) { s.provisionerSuite.setUpTest(c, true) } -func (s *withStateServerSuite) TestAPIAddresses(c *gc.C) { +func (s *withControllerSuite) TestAPIAddresses(c *gc.C) { hostPorts := [][]network.HostPort{ network.NewHostPorts(1234, "0.1.2.3"), } @@ -1546,7 +1251,7 @@ }) } -func (s *withStateServerSuite) TestStateAddresses(c *gc.C) { +func (s *withControllerSuite) TestStateAddresses(c *gc.C) { addresses, err := s.State.Addresses() c.Assert(err, jc.ErrorIsNil) @@ -1557,14 +1262,14 @@ }) } -func (s *withStateServerSuite) TestCACert(c *gc.C) { +func (s *withControllerSuite) TestCACert(c *gc.C) { result := s.provisioner.CACert() c.Assert(result, gc.DeepEquals, params.BytesResult{ Result: []byte(s.State.CACert()), }) } -func (s *withoutStateServerSuite) TestWatchMachineErrorRetry(c *gc.C) { +func (s *withoutControllerSuite) TestWatchMachineErrorRetry(c *gc.C) { coretesting.SkipIfI386(c, "lp:1425569") s.PatchValue(&provisioner.ErrorRetryWaitDelay, 2*coretesting.ShortWait) @@ -1598,7 +1303,7 @@ c.Assert(result, gc.DeepEquals, params.NotifyWatchResult{}) } -func (s *withoutStateServerSuite) TestFindTools(c *gc.C) { +func (s *withoutControllerSuite) TestFindTools(c *gc.C) { args := params.FindToolsParams{ MajorVersion: -1, MinorVersion: -1, @@ -1608,8 +1313,8 @@ c.Assert(result.Error, gc.IsNil) c.Assert(result.List, gc.Not(gc.HasLen), 0) for _, tools := range result.List { - url := fmt.Sprintf("https://%s/environment/%s/tools/%s", - s.APIState.Addr(), coretesting.EnvironmentTag.Id(), tools.Version) + url := fmt.Sprintf("https://%s/model/%s/tools/%s", + s.APIState.Addr(), coretesting.ModelTag.Id(), tools.Version) c.Assert(tools.URL, gc.Equals, url) } } @@ -1628,7 +1333,7 @@ s.DummyConfig["lxc-default-mtu"] = 9000 s.provisionerSuite.SetUpTest(c) - stateConfig, err := s.State.EnvironConfig() + stateConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) value, ok := stateConfig.LXCDefaultMTU() c.Assert(ok, jc.IsTrue) === added file 'src/github.com/juju/juju/apiserver/provisioner/provisioninginfo.go' --- src/github.com/juju/juju/apiserver/provisioner/provisioninginfo.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/provisioner/provisioninginfo.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,585 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package provisioner + +import ( + "fmt" + "sort" + "strings" + + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/utils/series" + "github.com/juju/utils/set" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/common/storagecommon" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cloudconfig/instancecfg" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/environs/imagemetadata" + "github.com/juju/juju/environs/simplestreams" + "github.com/juju/juju/environs/tags" + "github.com/juju/juju/state" + "github.com/juju/juju/state/cloudimagemetadata" + "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/storage" + "github.com/juju/juju/storage/poolmanager" + "github.com/juju/juju/storage/provider/registry" +) + +// ProvisioningInfo returns the provisioning information for each given machine entity. +func (p *ProvisionerAPI) ProvisioningInfo(args params.Entities) (params.ProvisioningInfoResults, error) { + result := params.ProvisioningInfoResults{ + Results: make([]params.ProvisioningInfoResult, len(args.Entities)), + } + canAccess, err := p.getAuthFunc() + if err != nil { + return result, err + } + for i, entity := range args.Entities { + tag, err := names.ParseMachineTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + machine, err := p.getMachine(canAccess, tag) + if err == nil { + result.Results[i].Result, err = p.getProvisioningInfo(machine) + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +func (p *ProvisionerAPI) getProvisioningInfo(m *state.Machine) (*params.ProvisioningInfo, error) { + cons, err := m.Constraints() + if err != nil { + return nil, err + } + + volumes, err := p.machineVolumeParams(m) + if err != nil { + return nil, errors.Trace(err) + } + + // TODO(dimitern) Drop this once we only use spaces for + // deployments. + networks, err := m.RequestedNetworks() + if err != nil { + return nil, err + } + + var jobs []multiwatcher.MachineJob + for _, job := range m.Jobs() { + jobs = append(jobs, job.ToParams()) + } + + tags, err := p.machineTags(m, jobs) + if err != nil { + return nil, errors.Trace(err) + } + + subnetsToZones, err := p.machineSubnetsAndZones(m) + if err != nil { + return nil, errors.Annotate(err, "cannot match subnets to zones") + } + + endpointBindings, err := p.machineEndpointBindings(m) + if err != nil { + return nil, errors.Annotate(err, "cannot determine machine endpoint bindings") + } + imageMetadata, err := p.availableImageMetadata(m) + if err != nil { + return nil, errors.Annotate(err, "cannot get available image metadata") + } + + return ¶ms.ProvisioningInfo{ + Constraints: cons, + Series: m.Series(), + Placement: m.Placement(), + Networks: networks, + Jobs: jobs, + Volumes: volumes, + Tags: tags, + SubnetsToZones: subnetsToZones, + EndpointBindings: endpointBindings, + ImageMetadata: imageMetadata, + }, nil +} + +// machineVolumeParams retrieves VolumeParams for the volumes that should be +// provisioned with, and attached to, the machine. The client should ignore +// parameters that it does not know how to handle. +func (p *ProvisionerAPI) machineVolumeParams(m *state.Machine) ([]params.VolumeParams, error) { + volumeAttachments, err := m.VolumeAttachments() + if err != nil { + return nil, err + } + if len(volumeAttachments) == 0 { + return nil, nil + } + envConfig, err := p.st.ModelConfig() + if err != nil { + return nil, err + } + poolManager := poolmanager.New(state.NewStateSettings(p.st)) + allVolumeParams := make([]params.VolumeParams, 0, len(volumeAttachments)) + for _, volumeAttachment := range volumeAttachments { + volumeTag := volumeAttachment.Volume() + volume, err := p.st.Volume(volumeTag) + if err != nil { + return nil, errors.Annotatef(err, "getting volume %q", volumeTag.Id()) + } + storageInstance, err := storagecommon.MaybeAssignedStorageInstance( + volume.StorageInstance, p.st.StorageInstance, + ) + if err != nil { + return nil, errors.Annotatef(err, "getting volume %q storage instance", volumeTag.Id()) + } + volumeParams, err := storagecommon.VolumeParams(volume, storageInstance, envConfig, poolManager) + if err != nil { + return nil, errors.Annotatef(err, "getting volume %q parameters", volumeTag.Id()) + } + provider, err := registry.StorageProvider(storage.ProviderType(volumeParams.Provider)) + if err != nil { + return nil, errors.Annotate(err, "getting storage provider") + } + if provider.Dynamic() { + // Leave dynamic storage to the storage provisioner. + continue + } + volumeAttachmentParams, ok := volumeAttachment.Params() + if !ok { + // Attachment is already provisioned; this is an insane + // state, so we should not proceed with the volume. + return nil, errors.Errorf( + "volume %s already attached to machine %s", + volumeTag.Id(), m.Id(), + ) + } + // Not provisioned yet, so ask the cloud provisioner do it. + volumeParams.Attachment = ¶ms.VolumeAttachmentParams{ + volumeTag.String(), + m.Tag().String(), + "", // we're creating the volume, so it has no volume ID. + "", // we're creating the machine, so it has no instance ID. + volumeParams.Provider, + volumeAttachmentParams.ReadOnly, + } + allVolumeParams = append(allVolumeParams, volumeParams) + } + return allVolumeParams, nil +} + +// machineTags returns machine-specific tags to set on the instance. +func (p *ProvisionerAPI) machineTags(m *state.Machine, jobs []multiwatcher.MachineJob) (map[string]string, error) { + // Names of all units deployed to the machine. + // + // TODO(axw) 2015-06-02 #1461358 + // We need a worker that periodically updates + // instance tags with current deployment info. + units, err := m.Units() + if err != nil { + return nil, errors.Trace(err) + } + unitNames := make([]string, 0, len(units)) + for _, unit := range units { + if !unit.IsPrincipal() { + continue + } + unitNames = append(unitNames, unit.Name()) + } + sort.Strings(unitNames) + + cfg, err := p.st.ModelConfig() + if err != nil { + return nil, errors.Trace(err) + } + machineTags := instancecfg.InstanceTags(cfg, jobs) + if len(unitNames) > 0 { + machineTags[tags.JujuUnitsDeployed] = strings.Join(unitNames, " ") + } + return machineTags, nil +} + +// machineSubnetsAndZones returns a map of subnet provider-specific id +// to list of availability zone names for that subnet. The result can +// be empty if there are no spaces constraints specified for the +// machine, or there's an error fetching them. +func (p *ProvisionerAPI) machineSubnetsAndZones(m *state.Machine) (map[string][]string, error) { + mcons, err := m.Constraints() + if err != nil { + return nil, errors.Annotate(err, "cannot get machine constraints") + } + includeSpaces := mcons.IncludeSpaces() + if len(includeSpaces) < 1 { + // Nothing to do. + return nil, nil + } + // TODO(dimitern): For the network model MVP we only use the first + // included space and ignore the rest. + // + // LKK Card: https://canonical.leankit.com/Boards/View/101652562/117352306 + // LP Bug: http://pad.lv/1498232 + spaceName := includeSpaces[0] + if len(includeSpaces) > 1 { + logger.Debugf( + "using space %q from constraints for machine %q (ignoring remaining: %v)", + spaceName, m.Id(), includeSpaces[1:], + ) + } + space, err := p.st.Space(spaceName) + if err != nil { + return nil, errors.Trace(err) + } + subnets, err := space.Subnets() + if err != nil { + return nil, errors.Trace(err) + } + if len(subnets) == 0 { + return nil, errors.Errorf("cannot use space %q as deployment target: no subnets", spaceName) + } + subnetsToZones := make(map[string][]string, len(subnets)) + for _, subnet := range subnets { + warningPrefix := fmt.Sprintf( + "not using subnet %q in space %q for machine %q provisioning: ", + subnet.CIDR(), spaceName, m.Id(), + ) + providerId := subnet.ProviderId() + if providerId == "" { + logger.Warningf(warningPrefix + "no ProviderId set") + continue + } + // TODO(dimitern): Once state.Subnet supports multiple zones, + // use all of them below. + // + // LKK Card: https://canonical.leankit.com/Boards/View/101652562/119979611 + zone := subnet.AvailabilityZone() + if zone == "" { + logger.Warningf(warningPrefix + "no availability zone(s) set") + continue + } + subnetsToZones[string(providerId)] = []string{zone} + } + return subnetsToZones, nil +} + +func (p *ProvisionerAPI) machineEndpointBindings(m *state.Machine) (map[string]string, error) { + units, err := m.Units() + if err != nil { + return nil, errors.Trace(err) + } + + spacesNamesToProviderIds, err := p.allSpaceNamesToProviderIds() + if err != nil { + return nil, errors.Trace(err) + } + + var combinedBindings map[string]string + processedServicesSet := set.NewStrings() + for _, unit := range units { + if !unit.IsPrincipal() { + continue + } + service, err := unit.Service() + if err != nil { + return nil, err + } + if processedServicesSet.Contains(service.Name()) { + // Already processed, skip it. + continue + } + bindings, err := service.EndpointBindings() + if err != nil { + return nil, err + } + processedServicesSet.Add(service.Name()) + + if len(bindings) == 0 { + continue + } + if combinedBindings == nil { + combinedBindings = make(map[string]string) + } + + for endpoint, spaceName := range bindings { + if spaceName == "" { + // Skip unspecified bindings, as they won't affect the instance + // selected for provisioning. + continue + } + + spaceProviderId, nameKnown := spacesNamesToProviderIds[spaceName] + if nameKnown { + combinedBindings[endpoint] = spaceProviderId + } else { + // Technically, this can't happen in practice, as we're + // validating the bindings during service deployment. + return nil, errors.Errorf("unknown space %q with no provider ID specified for endpoint %q", spaceName, endpoint) + } + } + } + return combinedBindings, nil +} + +func (p *ProvisionerAPI) allSpaceNamesToProviderIds() (map[string]string, error) { + allSpaces, err := p.st.AllSpaces() + if err != nil { + return nil, errors.Annotate(err, "getting all spaces") + } + + namesToProviderIds := make(map[string]string, len(allSpaces)) + for _, space := range allSpaces { + name := space.Name() + + // For providers without native support for spaces, use the name instead + // as provider ID. + providerId := string(space.ProviderId()) + if len(providerId) == 0 { + providerId = name + } + + namesToProviderIds[name] = providerId + } + + return namesToProviderIds, nil +} + +// availableImageMetadata returns all image metadata available to this machine +// or an error fetching them. +func (p *ProvisionerAPI) availableImageMetadata(m *state.Machine) ([]params.CloudImageMetadata, error) { + imageConstraint, env, err := p.constructImageConstraint(m) + if err != nil { + return nil, errors.Annotate(err, "could not construct image constraint") + } + + // Look for image metadata in state. + data, err := p.findImageMetadata(imageConstraint, env) + if err != nil { + return nil, err + } + sort.Sort(metadataList(data)) + logger.Debugf("available image metadata for provisioning: %v", data) + return data, nil +} + +// constructImageConstraint returns model-specific criteria used to look for image metadata. +func (p *ProvisionerAPI) constructImageConstraint(m *state.Machine) (*imagemetadata.ImageConstraint, environs.Environ, error) { + // If we can determine current region, + // we want only metadata specific to this region. + cloud, cfg, env, err := p.obtainEnvCloudConfig() + if err != nil { + return nil, nil, errors.Trace(err) + } + + lookup := simplestreams.LookupParams{ + Series: []string{m.Series()}, + Stream: cfg.ImageStream(), + } + + mcons, err := m.Constraints() + if err != nil { + return nil, nil, errors.Annotatef(err, "cannot get machine constraints for machine %v", m.MachineTag().Id()) + } + + if mcons.Arch != nil { + lookup.Arches = []string{*mcons.Arch} + } + if cloud != nil { + lookup.CloudSpec = *cloud + } + + return imagemetadata.NewImageConstraint(lookup), env, nil +} + +// obtainEnvCloudConfig returns environment specific cloud information +// to be used in search for compatible images and their metadata. +func (p *ProvisionerAPI) obtainEnvCloudConfig() (*simplestreams.CloudSpec, *config.Config, environs.Environ, error) { + cfg, err := p.st.ModelConfig() + if err != nil { + return nil, nil, nil, errors.Annotate(err, "could not get model config") + } + + env, err := environs.New(cfg) + if err != nil { + return nil, nil, nil, errors.Annotate(err, "could not get model") + } + + if inst, ok := env.(simplestreams.HasRegion); ok { + cloud, err := inst.Region() + if err != nil { + // can't really find images if we cannot determine cloud region + // TODO (anastasiamac 2015-12-03) or can we? + return nil, nil, nil, errors.Annotate(err, "getting provider region information (cloud spec)") + } + return &cloud, cfg, env, nil + } + return nil, cfg, env, nil +} + +// findImageMetadata returns all image metadata or an error fetching them. +// It looks for image metadata in state. +// If none are found, we fall back on original image search in simple streams. +func (p *ProvisionerAPI) findImageMetadata(imageConstraint *imagemetadata.ImageConstraint, env environs.Environ) ([]params.CloudImageMetadata, error) { + // Look for image metadata in state. + stateMetadata, err := p.imageMetadataFromState(imageConstraint) + if err != nil && !errors.IsNotFound(err) { + // look into simple stream if for some reason can't get from controller, + // so do not exit on error. + logger.Infof("could not get image metadata from controller: %v", err) + } + logger.Debugf("got from controller %d metadata", len(stateMetadata)) + // No need to look in data sources if found in state. + if len(stateMetadata) != 0 { + return stateMetadata, nil + } + + // If no metadata is found in state, fall back to original simple stream search. + // Currently, an image metadata worker picks up this metadata periodically (daily), + // and stores it in state. So potentially, this collection could be different + // to what is in state. + dsMetadata, err := p.imageMetadataFromDataSources(env, imageConstraint) + if err != nil { + if !errors.IsNotFound(err) { + return nil, errors.Trace(err) + } + } + logger.Debugf("got from data sources %d metadata", len(dsMetadata)) + + return dsMetadata, nil +} + +// imageMetadataFromState returns image metadata stored in state +// that matches given criteria. +func (p *ProvisionerAPI) imageMetadataFromState(constraint *imagemetadata.ImageConstraint) ([]params.CloudImageMetadata, error) { + filter := cloudimagemetadata.MetadataFilter{ + Series: constraint.Series, + Arches: constraint.Arches, + Region: constraint.Region, + Stream: constraint.Stream, + } + stored, err := p.st.CloudImageMetadataStorage.FindMetadata(filter) + if err != nil { + return nil, errors.Trace(err) + } + + toParams := func(m cloudimagemetadata.Metadata) params.CloudImageMetadata { + return params.CloudImageMetadata{ + ImageId: m.ImageId, + Stream: m.Stream, + Region: m.Region, + Version: m.Version, + Series: m.Series, + Arch: m.Arch, + VirtType: m.VirtType, + RootStorageType: m.RootStorageType, + RootStorageSize: m.RootStorageSize, + Source: m.Source, + Priority: m.Priority, + } + } + + var all []params.CloudImageMetadata + for _, ms := range stored { + for _, m := range ms { + all = append(all, toParams(m)) + } + } + return all, nil +} + +// imageMetadataFromDataSources finds image metadata that match specified criteria in existing data sources. +func (p *ProvisionerAPI) imageMetadataFromDataSources(env environs.Environ, constraint *imagemetadata.ImageConstraint) ([]params.CloudImageMetadata, error) { + sources, err := environs.ImageMetadataSources(env) + if err != nil { + return nil, err + } + + getStream := func(current string) string { + if current == "" { + if constraint.Stream != "" { + return constraint.Stream + } + return env.Config().ImageStream() + } + return current + } + + toModel := func(m *imagemetadata.ImageMetadata, mStream string, mSeries string, source string, priority int) cloudimagemetadata.Metadata { + + return cloudimagemetadata.Metadata{ + cloudimagemetadata.MetadataAttributes{ + Region: m.RegionName, + Arch: m.Arch, + VirtType: m.VirtType, + RootStorageType: m.Storage, + Source: source, + Series: mSeries, + Stream: mStream, + }, + priority, + m.Id, + } + } + + var metadataState []cloudimagemetadata.Metadata + for _, source := range sources { + logger.Debugf("looking in data source %v", source.Description()) + found, info, err := imagemetadata.Fetch([]simplestreams.DataSource{source}, constraint) + if err != nil { + // Do not stop looking in other data sources if there is an issue here. + logger.Warningf("encountered %v while getting published images metadata from %v", err, source.Description()) + continue + } + for _, m := range found { + mSeries, err := series.VersionSeries(m.Version) + if err != nil { + logger.Warningf("could not determine series for image id %s: %v", m.Id, err) + continue + } + mStream := getStream(m.Stream) + metadataState = append(metadataState, toModel(m, mStream, mSeries, info.Source, source.Priority())) + } + } + if len(metadataState) > 0 { + if err := p.st.CloudImageMetadataStorage.SaveMetadata(metadataState); err != nil { + // No need to react here, just take note + logger.Warningf("failed to save published image metadata: %v", err) + } + } + + // Since we've fallen through to data sources search and have saved all needed images into controller, + // let's try to get them from controller to avoid duplication of conversion logic here. + all, err := p.imageMetadataFromState(constraint) + if err != nil { + return nil, errors.Annotate(err, "could not read metadata from controller after saving it there from data sources") + } + + if len(all) == 0 { + return nil, errors.NotFoundf("image metadata for series %v, arch %v", constraint.Series, constraint.Arches) + } + + return all, nil +} + +// metadataList is a convenience type enabling to sort +// a collection of CloudImageMetadata in order of priority. +type metadataList []params.CloudImageMetadata + +// Implements sort.Interface +func (m metadataList) Len() int { + return len(m) +} + +// Implements sort.Interface and sorts image metadata by priority. +func (m metadataList) Less(i, j int) bool { + return m[i].Priority < m[j].Priority +} + +// Implements sort.Interface +func (m metadataList) Swap(i, j int) { + m[i], m[j] = m[j], m[i] +} === added file 'src/github.com/juju/juju/apiserver/provisioner/provisioninginfo_test.go' --- src/github.com/juju/juju/apiserver/provisioner/provisioninginfo_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/provisioner/provisioninginfo_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,376 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package provisioner_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/apiserver/provisioner" + apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/constraints" + "github.com/juju/juju/environs/tags" + "github.com/juju/juju/juju/testing" + "github.com/juju/juju/state" + "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/storage" + "github.com/juju/juju/storage/poolmanager" + storagedummy "github.com/juju/juju/storage/provider/dummy" + "github.com/juju/juju/storage/provider/registry" + coretesting "github.com/juju/juju/testing" +) + +func (s *withoutControllerSuite) TestProvisioningInfoWithStorage(c *gc.C) { + s.registerStorageProviders(c, "static") + + pm := poolmanager.New(state.NewStateSettings(s.State)) + _, err := pm.Create("static-pool", "static", map[string]interface{}{"foo": "bar"}) + c.Assert(err, jc.ErrorIsNil) + + cons := constraints.MustParse("cpu-cores=123 mem=8G") + template := state.MachineTemplate{ + Series: "quantal", + Jobs: []state.MachineJob{state.JobHostUnits}, + Constraints: cons, + Placement: "valid", + Volumes: []state.MachineVolumeParams{ + {Volume: state.VolumeParams{Size: 1000, Pool: "static-pool"}}, + {Volume: state.VolumeParams{Size: 2000, Pool: "static-pool"}}, + }, + } + placementMachine, err := s.State.AddOneMachine(template) + c.Assert(err, jc.ErrorIsNil) + + args := params.Entities{Entities: []params.Entity{ + {Tag: s.machines[0].Tag().String()}, + {Tag: placementMachine.Tag().String()}, + }} + result, err := s.provisioner.ProvisioningInfo(args) + c.Assert(err, jc.ErrorIsNil) + + expected := params.ProvisioningInfoResults{ + Results: []params.ProvisioningInfoResult{ + {Result: ¶ms.ProvisioningInfo{ + Series: "quantal", + Networks: []string{}, + Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, + Tags: map[string]string{ + tags.JujuModel: coretesting.ModelTag.Id(), + }, + }}, + {Result: ¶ms.ProvisioningInfo{ + Series: "quantal", + Constraints: template.Constraints, + Placement: template.Placement, + Networks: template.RequestedNetworks, + Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, + Tags: map[string]string{ + tags.JujuModel: coretesting.ModelTag.Id(), + }, + Volumes: []params.VolumeParams{{ + VolumeTag: "volume-0", + Size: 1000, + Provider: "static", + Attributes: map[string]interface{}{"foo": "bar"}, + Tags: map[string]string{ + tags.JujuModel: coretesting.ModelTag.Id(), + }, + Attachment: ¶ms.VolumeAttachmentParams{ + MachineTag: placementMachine.Tag().String(), + VolumeTag: "volume-0", + Provider: "static", + }, + }, { + VolumeTag: "volume-1", + Size: 2000, + Provider: "static", + Attributes: map[string]interface{}{"foo": "bar"}, + Tags: map[string]string{ + tags.JujuModel: coretesting.ModelTag.Id(), + }, + Attachment: ¶ms.VolumeAttachmentParams{ + MachineTag: placementMachine.Tag().String(), + VolumeTag: "volume-1", + Provider: "static", + }, + }}, + }}, + }, + } + // The order of volumes is not predictable, so we make sure we + // compare the right ones. This only applies to Results[1] since + // it is the only result to contain volumes. + if expected.Results[1].Result.Volumes[0].VolumeTag != result.Results[1].Result.Volumes[0].VolumeTag { + vols := expected.Results[1].Result.Volumes + vols[0], vols[1] = vols[1], vols[0] + } + c.Assert(result, jc.DeepEquals, expected) +} + +func (s *withoutControllerSuite) registerStorageProviders(c *gc.C, names ...string) { + types := make([]storage.ProviderType, len(names)) + for i, name := range names { + types[i] = storage.ProviderType(name) + if name == "dynamic" { + s.registerDynamicStorageProvider(c) + } else if name == "static" { + s.registerStaticStorageProvider(c) + } else { + c.Fatalf("unknown storage provider type: %q, expected static or dynamic", name) + } + } + registry.RegisterEnvironStorageProviders("dummy", types...) +} + +func (s *withoutControllerSuite) registerDynamicStorageProvider(c *gc.C) { + registry.RegisterProvider("dynamic", &storagedummy.StorageProvider{IsDynamic: true}) + s.AddCleanup(func(*gc.C) { + registry.RegisterProvider("dynamic", nil) + }) +} + +func (s *withoutControllerSuite) registerStaticStorageProvider(c *gc.C) { + registry.RegisterProvider("static", &storagedummy.StorageProvider{IsDynamic: false}) + s.AddCleanup(func(*gc.C) { + registry.RegisterProvider("static", nil) + }) +} + +func (s *withoutControllerSuite) TestProvisioningInfoWithSingleNegativeAndPositiveSpaceInConstraints(c *gc.C) { + s.addSpacesAndSubnets(c) + + cons := constraints.MustParse("cpu-cores=123 mem=8G spaces=^space1,space2") + template := state.MachineTemplate{ + Series: "quantal", + Jobs: []state.MachineJob{state.JobHostUnits}, + Constraints: cons, + Placement: "valid", + } + placementMachine, err := s.State.AddOneMachine(template) + c.Assert(err, jc.ErrorIsNil) + + args := params.Entities{Entities: []params.Entity{ + {Tag: placementMachine.Tag().String()}, + }} + result, err := s.provisioner.ProvisioningInfo(args) + c.Assert(err, jc.ErrorIsNil) + + expected := params.ProvisioningInfoResults{ + Results: []params.ProvisioningInfoResult{{ + Result: ¶ms.ProvisioningInfo{ + Series: "quantal", + Constraints: template.Constraints, + Placement: template.Placement, + Networks: template.RequestedNetworks, + Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, + Tags: map[string]string{ + tags.JujuModel: coretesting.ModelTag.Id(), + }, + SubnetsToZones: map[string][]string{ + "subnet-1": []string{"zone1"}, + "subnet-2": []string{"zone2"}, + }, + }, + }}} + c.Assert(result, jc.DeepEquals, expected) +} + +func (s *withoutControllerSuite) addSpacesAndSubnets(c *gc.C) { + // Add a couple of spaces. + _, err := s.State.AddSpace("space1", "first space id", nil, true) + c.Assert(err, jc.ErrorIsNil) + _, err = s.State.AddSpace("space2", "", nil, false) // no provider ID + c.Assert(err, jc.ErrorIsNil) + // Add 1 subnet into space1, and 2 into space2. + // Only the first subnet of space2 has AllocatableIPLow|High set. + // Each subnet is in a matching zone (e.g "subnet-#" in "zone#"). + testing.AddSubnetsWithTemplate(c, s.State, 3, state.SubnetInfo{ + CIDR: "10.10.{{.}}.0/24", + ProviderId: "subnet-{{.}}", + AllocatableIPLow: "{{if (eq . 1)}}10.10.{{.}}.5{{end}}", + AllocatableIPHigh: "{{if (eq . 1)}}10.10.{{.}}.254{{end}}", + AvailabilityZone: "zone{{.}}", + SpaceName: "{{if (eq . 0)}}space1{{else}}space2{{end}}", + VLANTag: 42, + }) +} + +func (s *withoutControllerSuite) TestProvisioningInfoWithEndpointBindings(c *gc.C) { + s.addSpacesAndSubnets(c) + + wordpressMachine, err := s.State.AddOneMachine(state.MachineTemplate{ + Series: "quantal", + Jobs: []state.MachineJob{state.JobHostUnits}, + }) + c.Assert(err, jc.ErrorIsNil) + + // Use juju names for spaces in bindings, simulating ''juju deploy + // --bind...' was called. + bindings := map[string]string{ + "url": "space1", // has both name and provider ID + "db": "space2", // has only name, no provider ID + } + wordpressCharm := s.AddTestingCharm(c, "wordpress") + wordpressService := s.AddTestingServiceWithBindings(c, "wordpress", wordpressCharm, bindings) + wordpressUnit, err := wordpressService.AddUnit() + c.Assert(err, jc.ErrorIsNil) + err = wordpressUnit.AssignToMachine(wordpressMachine) + c.Assert(err, jc.ErrorIsNil) + + args := params.Entities{Entities: []params.Entity{ + {Tag: wordpressMachine.Tag().String()}, + }} + result, err := s.provisioner.ProvisioningInfo(args) + c.Assert(err, jc.ErrorIsNil) + + expected := params.ProvisioningInfoResults{ + Results: []params.ProvisioningInfoResult{{ + Result: ¶ms.ProvisioningInfo{ + Series: "quantal", + Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, + Tags: map[string]string{ + tags.JujuModel: coretesting.ModelTag.Id(), + tags.JujuUnitsDeployed: wordpressUnit.Name(), + }, + // Ensure space names are translated to provider IDs, where + // possible. + EndpointBindings: map[string]string{ + "db": "space2", // just name, no provider ID + "url": "first space id", // has provider ID + // We expect none of the unspecified bindings in the result. + }, + }, + }}} + c.Assert(result, jc.DeepEquals, expected) +} + +func (s *withoutControllerSuite) TestProvisioningInfoWithUnsuitableSpacesConstraints(c *gc.C) { + // Add an empty space. + _, err := s.State.AddSpace("empty", "", nil, true) + c.Assert(err, jc.ErrorIsNil) + + consEmptySpace := constraints.MustParse("cpu-cores=123 mem=8G spaces=empty") + consMissingSpace := constraints.MustParse("cpu-cores=123 mem=8G spaces=missing") + templates := []state.MachineTemplate{{ + Series: "quantal", + Jobs: []state.MachineJob{state.JobHostUnits}, + Constraints: consEmptySpace, + Placement: "valid", + }, { + Series: "quantal", + Jobs: []state.MachineJob{state.JobHostUnits}, + Constraints: consMissingSpace, + Placement: "valid", + }} + placementMachines, err := s.State.AddMachines(templates...) + c.Assert(err, jc.ErrorIsNil) + c.Assert(placementMachines, gc.HasLen, 2) + + args := params.Entities{Entities: []params.Entity{ + {Tag: placementMachines[0].Tag().String()}, + {Tag: placementMachines[1].Tag().String()}, + }} + result, err := s.provisioner.ProvisioningInfo(args) + c.Assert(err, jc.ErrorIsNil) + + expectedErrorEmptySpace := `cannot match subnets to zones: ` + + `cannot use space "empty" as deployment target: no subnets` + expectedErrorMissingSpace := `cannot match subnets to zones: ` + + `space "missing"` // " not found" will be appended by NotFoundError helper below. + expected := params.ProvisioningInfoResults{Results: []params.ProvisioningInfoResult{ + {Error: apiservertesting.ServerError(expectedErrorEmptySpace)}, + {Error: apiservertesting.NotFoundError(expectedErrorMissingSpace)}, + }} + c.Assert(result, jc.DeepEquals, expected) +} + +func (s *withoutControllerSuite) TestStorageProviderFallbackToType(c *gc.C) { + s.registerStorageProviders(c, "dynamic", "static") + + template := state.MachineTemplate{ + Series: "quantal", + Jobs: []state.MachineJob{state.JobHostUnits}, + Placement: "valid", + RequestedNetworks: []string{"net1", "net2"}, + Volumes: []state.MachineVolumeParams{ + {Volume: state.VolumeParams{Size: 1000, Pool: "dynamic"}}, + {Volume: state.VolumeParams{Size: 1000, Pool: "static"}}, + }, + } + placementMachine, err := s.State.AddOneMachine(template) + c.Assert(err, jc.ErrorIsNil) + + args := params.Entities{Entities: []params.Entity{ + {Tag: placementMachine.Tag().String()}, + }} + result, err := s.provisioner.ProvisioningInfo(args) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(result, jc.DeepEquals, params.ProvisioningInfoResults{ + Results: []params.ProvisioningInfoResult{ + {Result: ¶ms.ProvisioningInfo{ + Series: "quantal", + Constraints: template.Constraints, + Placement: template.Placement, + Networks: template.RequestedNetworks, + Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, + Tags: map[string]string{ + tags.JujuModel: coretesting.ModelTag.Id(), + }, + Volumes: []params.VolumeParams{{ + VolumeTag: "volume-1", + Size: 1000, + Provider: "static", + Attributes: nil, + Tags: map[string]string{ + tags.JujuModel: coretesting.ModelTag.Id(), + }, + Attachment: ¶ms.VolumeAttachmentParams{ + MachineTag: placementMachine.Tag().String(), + VolumeTag: "volume-1", + Provider: "static", + }, + }}, + }}, + }, + }) +} + +func (s *withoutControllerSuite) TestProvisioningInfoPermissions(c *gc.C) { + // Login as a machine agent for machine 0. + anAuthorizer := s.authorizer + anAuthorizer.EnvironManager = false + anAuthorizer.Tag = s.machines[0].Tag() + aProvisioner, err := provisioner.NewProvisionerAPI(s.State, s.resources, anAuthorizer) + c.Assert(err, jc.ErrorIsNil) + c.Assert(aProvisioner, gc.NotNil) + + args := params.Entities{Entities: []params.Entity{ + {Tag: s.machines[0].Tag().String()}, + {Tag: s.machines[0].Tag().String() + "-lxc-0"}, + {Tag: "machine-42"}, + {Tag: s.machines[1].Tag().String()}, + {Tag: "service-bar"}, + }} + + // Only machine 0 and containers therein can be accessed. + results, err := aProvisioner.ProvisioningInfo(args) + c.Assert(results, jc.DeepEquals, params.ProvisioningInfoResults{ + Results: []params.ProvisioningInfoResult{ + {Result: ¶ms.ProvisioningInfo{ + Series: "quantal", + Networks: []string{}, + Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, + Tags: map[string]string{ + tags.JujuModel: coretesting.ModelTag.Id(), + }, + }}, + {Error: apiservertesting.NotFoundError("machine 0/lxc/0")}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} === added directory 'src/github.com/juju/juju/apiserver/proxyupdater' === added file 'src/github.com/juju/juju/apiserver/proxyupdater/model.go' --- src/github.com/juju/juju/apiserver/proxyupdater/model.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/proxyupdater/model.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,25 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package proxyupdater + +import ( + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/state" +) + +func init() { + common.RegisterStandardFacade("ProxyUpdater", 1, NewProxyUpdaterAPI) +} + +// ProxyUpdaterAPI implements the API used by the proxy updater worker. +type ProxyUpdaterAPI struct { + *common.ModelWatcher +} + +// NewProxyUpdaterAPI creates a new instance of the ProxyUpdater API. +func NewProxyUpdaterAPI(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*ProxyUpdaterAPI, error) { + return &ProxyUpdaterAPI{ + ModelWatcher: common.NewModelWatcher(st, resources, authorizer), + }, nil +} === added file 'src/github.com/juju/juju/apiserver/proxyupdater/model_test.go' --- src/github.com/juju/juju/apiserver/proxyupdater/model_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/proxyupdater/model_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,52 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package proxyupdater_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/agent" + "github.com/juju/juju/apiserver/common" + commontesting "github.com/juju/juju/apiserver/common/testing" + apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/juju/testing" + "github.com/juju/juju/state" +) + +type ProxyUpdaterSuite struct { + testing.JujuConnSuite + *commontesting.ModelWatcherTest + + authorizer apiservertesting.FakeAuthorizer + resources *common.Resources + + machine0 *state.Machine + api *agent.AgentAPIV2 +} + +var _ = gc.Suite(&ProxyUpdaterSuite{}) + +func (s *ProxyUpdaterSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + + var err error + s.machine0, err = s.State.AddMachine("quantal", state.JobHostUnits, state.JobManageModel) + c.Assert(err, jc.ErrorIsNil) + + s.authorizer = apiservertesting.FakeAuthorizer{ + Tag: s.machine0.Tag(), + } + s.resources = common.NewResources() + s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) + + s.api, err = agent.NewAgentAPIV2( + s.State, + s.resources, + s.authorizer, + ) + c.Assert(err, jc.ErrorIsNil) + s.ModelWatcherTest = commontesting.NewModelWatcherTest( + s.api, s.State, s.resources, commontesting.NoSecrets) +} === added file 'src/github.com/juju/juju/apiserver/proxyupdater/package_test.go' --- src/github.com/juju/juju/apiserver/proxyupdater/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/proxyupdater/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package proxyupdater_test + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + testing.MgoTestPackage(t) +} === added file 'src/github.com/juju/juju/apiserver/read_only_calls.go' --- src/github.com/juju/juju/apiserver/read_only_calls.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/read_only_calls.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,71 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver + +import ( + "github.com/juju/utils/set" +) + +// readOnlyCalls specify a white-list of API calls that do not +// modify the database. The format of the calls is ".". +// At this stage, we are explicitly ignoring the facade version. +var readOnlyCalls = set.NewStrings( + "Action.Actions", + "Action.FindActionTagsByPrefix", + "Action.ListAll", + "Action.ListPending", + "Action.ListRunning", + "Action.ListCompleted", + "Action.ServicesCharmActions", + "Annotations.Get", + "Block.List", + "Charms.CharmInfo", + "Charms.IsMetered", + "Charms.List", + "Client.AgentVersion", + "Client.APIHostPorts", + "Client.CharmInfo", + "Client.ModelGet", + "Client.ModelInfo", + "Client.ModelUserInfo", + "Client.FullStatus", + // FindTools, while being technically read only, isn't a useful + // command for a read only user to run. + // While GetBundleChanges is technically read only, it is a precursor + // to deploying the bundle or changes. But... let's leave it here anyway. + "Client.GetBundleChanges", + "Client.GetModelConstraints", + "Client.PrivateAddress", + "Client.PublicAddress", + // ResolveCharms, while being technically read only, isn't a useful + // command for a read only user to run. + // Status is so old it shouldn't be used. + "Client.UnitStatusHistory", + "Client.WatchAll", + // TODO: add controller work. + "KeyManager.ListKeys", + "Service.GetConstraints", + "Service.CharmRelations", + "Service.Get", + "Spaces.ListSpaces", + "Storage.ListStorageDetails", + "Storage.ListFilesystems", + "Storage.ListPools", + "Storage.ListVolumes", + "Subnets.AllSpaces", + "Subnets.AllZones", + "Subnets.ListSubnets", + "UserManager.UserInfo", +) + +// isCallReadOnly returns whether or not the method on the facade +// is known to not alter the database. +func isCallReadOnly(facade, method string) bool { + key := facade + "." + method + // NOTE: maybe useful in the future to be able to specify entire facades + // as read only, in which case specifying something like "Facade.*" would + // be useful. Not sure we'll ever need this, but something to think about + // perhaps. + return readOnlyCalls.Contains(key) +} === added file 'src/github.com/juju/juju/apiserver/read_only_calls_test.go' --- src/github.com/juju/juju/apiserver/read_only_calls_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/read_only_calls_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,73 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver + +import ( + "strings" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" +) + +type readOnlyCallsSuite struct { +} + +var _ = gc.Suite(&readOnlyCallsSuite{}) + +func (*readOnlyCallsSuite) TestReadOnlyCallsExist(c *gc.C) { + // Iterate through the list of readOnlyCalls and make sure + // that the facades are reachable. + facades := common.Facades.List() + + maxVersion := map[string]int{} + for _, facade := range facades { + version := 0 + for _, ver := range facade.Versions { + if ver > version { + version = ver + } + } + maxVersion[facade.Name] = version + } + + for _, name := range readOnlyCalls.Values() { + parts := strings.Split(name, ".") + facade, method := parts[0], parts[1] + version := maxVersion[facade] + + _, _, err := lookupMethod(facade, version, method) + c.Check(err, jc.ErrorIsNil) + } +} + +func (*readOnlyCallsSuite) TestReadOnlyCall(c *gc.C) { + for _, test := range []struct { + facade string + method string + }{ + {"Action", "Actions"}, + {"Client", "FullStatus"}, + {"Service", "Get"}, + {"Storage", "ListStorageDetails"}, + } { + c.Logf("check %s.%s", test.facade, test.method) + c.Check(isCallReadOnly(test.facade, test.method), jc.IsTrue) + } +} + +func (*readOnlyCallsSuite) TestWritableCalls(c *gc.C) { + for _, test := range []struct { + facade string + method string + }{ + {"Client", "UnknownMethod"}, + {"Service", "Deploy"}, + {"UnknownFacade", "List"}, + } { + c.Logf("check %s.%s", test.facade, test.method) + c.Check(isCallReadOnly(test.facade, test.method), jc.IsFalse) + } +} === modified file 'src/github.com/juju/juju/apiserver/reboot/reboot.go' --- src/github.com/juju/juju/apiserver/reboot/reboot.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/reboot/reboot.go 2016-03-22 15:18:22 +0000 @@ -33,7 +33,7 @@ } func init() { - common.RegisterStandardFacade("Reboot", 1, NewRebootAPI) + common.RegisterStandardFacade("Reboot", 2, NewRebootAPI) } // NewRebootAPI creates a new server-side RebootAPI facade. === added file 'src/github.com/juju/juju/apiserver/registration.go' --- src/github.com/juju/juju/apiserver/registration.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/registration.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,162 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver + +import ( + "crypto/rand" + "encoding/json" + "io" + "io/ioutil" + "net/http" + + "github.com/juju/errors" + "github.com/juju/names" + "golang.org/x/crypto/nacl/secretbox" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" +) + +const ( + secretboxNonceLength = 24 + secretboxKeyLength = 32 +) + +// registerUserHandler is an http.Handler for the "/register" endpoint. This is +// used to complete a secure user registration process, and provide controller +// login credentials. +type registerUserHandler struct { + ctxt httpContext +} + +// ServeHTTP implements the http.Handler interface. +func (h *registerUserHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if req.Method != "POST" { + sendError(w, errors.MethodNotAllowedf("unsupported method: %q", req.Method)) + return + } + st, err := h.ctxt.stateForRequestUnauthenticated(req) + if err != nil { + sendError(w, err) + return + } + response, err := h.processPost(req, st) + if err != nil { + sendError(w, err) + return + } + sendStatusAndJSON(w, http.StatusOK, response) +} + +// The client will POST to the "/register" endpoint with a JSON-encoded +// params.SecretKeyLoginRequest. This contains the tag of the user they +// are registering, a (supposedly) unique nonce, and a ciphertext which +// is the result of concatenating the user and nonce values, and then +// encrypting and authenticating them with the NaCl Secretbox algorithm. +// +// If the server can decrypt the ciphertext, then it knows the client +// has the required secret key; thus they are authenticated. The client +// does not have the CA certificate for communicating securely with the +// server, and so must also authenticate the server. The server will +// similarly generate a unique nonce and encrypt the response payload +// using the same secret key as the client. If the client can decrypt +// the payload, it knows the server has the required secret key; thus +// it is also authenticated. +// +// NOTE(axw) it is important that the client and server choose their +// own nonces, because reusing a nonce means that the key-stream can +// be revealed. +func (h *registerUserHandler) processPost(req *http.Request, st *state.State) (*params.SecretKeyLoginResponse, error) { + + data, err := ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + var loginRequest params.SecretKeyLoginRequest + if err := json.Unmarshal(data, &loginRequest); err != nil { + return nil, err + } + + // Basic validation: ensure that the request contains a valid user tag, + // nonce, and ciphertext of the expected length. + userTag, err := names.ParseUserTag(loginRequest.User) + if err != nil { + return nil, err + } + if len(loginRequest.Nonce) != secretboxNonceLength { + return nil, errors.NotValidf("nonce") + } + + // Decrypt the ciphertext with the user's secret key (if it has one). + user, err := st.User(userTag) + if err != nil { + return nil, err + } + if len(user.SecretKey()) != secretboxKeyLength { + return nil, errors.NotFoundf("secret key for user %q", user.Name()) + } + var key [secretboxKeyLength]byte + var nonce [secretboxNonceLength]byte + copy(key[:], user.SecretKey()) + copy(nonce[:], loginRequest.Nonce) + payloadBytes, ok := secretbox.Open(nil, loginRequest.PayloadCiphertext, &nonce, &key) + if !ok { + // Cannot decrypt the ciphertext, which implies that the secret + // key specified by the client is invalid. + return nil, errors.NotValidf("secret key") + } + + // Unmarshal the request payload, which contains the new password to + // set for the user. + var requestPayload params.SecretKeyLoginRequestPayload + if err := json.Unmarshal(payloadBytes, &requestPayload); err != nil { + return nil, errors.Annotate(err, "cannot unmarshal payload") + } + if err := user.SetPassword(requestPayload.Password); err != nil { + return nil, errors.Annotate(err, "setting new password") + } + + // Respond with the CA-cert and password, encrypted again with the + // secret key. + responsePayload, err := h.getSecretKeyLoginResponsePayload(st) + if err != nil { + return nil, errors.Trace(err) + } + payloadBytes, err = json.Marshal(responsePayload) + if err != nil { + return nil, errors.Trace(err) + } + if _, err := rand.Read(nonce[:]); err != nil { + return nil, errors.Trace(err) + } + response := ¶ms.SecretKeyLoginResponse{ + Nonce: nonce[:], + PayloadCiphertext: secretbox.Seal(nil, payloadBytes, &nonce, &key), + } + return response, nil +} + +// getSecretKeyLoginResponsePayload returns the information required by the +// client to login to the controller securely. +func (h *registerUserHandler) getSecretKeyLoginResponsePayload( + st *state.State, +) (*params.SecretKeyLoginResponsePayload, error) { + if !st.IsController() { + return nil, errors.New("state is not for a controller") + } + payload := params.SecretKeyLoginResponsePayload{ + CACert: st.CACert(), + ControllerUUID: st.ModelUUID(), + } + return &payload, nil +} + +// sendError sends a JSON-encoded error response. +func (h *registerUserHandler) sendError(w io.Writer, req *http.Request, err error) { + if err != nil { + logger.Errorf("returning error from %s %s: %s", req.Method, req.URL.Path, errors.Details(err)) + } + sendJSON(w, ¶ms.ErrorResult{Error: common.ServerError(err)}) +} === added file 'src/github.com/juju/juju/apiserver/registration_test.go' --- src/github.com/juju/juju/apiserver/registration_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/registration_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,215 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package apiserver_test + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + "github.com/juju/utils" + "golang.org/x/crypto/nacl/secretbox" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" +) + +type registrationSuite struct { + authHttpSuite + bob *state.User +} + +var _ = gc.Suite(®istrationSuite{}) + +func (s *registrationSuite) SetUpTest(c *gc.C) { + s.authHttpSuite.SetUpTest(c) + bob, err := s.BackingState.AddUserWithSecretKey("bob", "", "admin") + c.Assert(err, jc.ErrorIsNil) + s.bob = bob +} + +func (s *registrationSuite) assertErrorResponse(c *gc.C, resp *http.Response, expCode int, expError string) { + body := assertResponse(c, resp, expCode, params.ContentTypeJSON) + var result params.ErrorResult + s.unmarshal(c, body, &result) + c.Assert(result.Error, gc.NotNil) + c.Assert(result.Error, gc.Matches, expError) +} + +func (s *registrationSuite) assertResponse(c *gc.C, resp *http.Response) params.SecretKeyLoginResponse { + body := assertResponse(c, resp, http.StatusOK, params.ContentTypeJSON) + var response params.SecretKeyLoginResponse + s.unmarshal(c, body, &response) + return response +} + +func (*registrationSuite) unmarshal(c *gc.C, body []byte, out interface{}) { + err := json.Unmarshal(body, out) + c.Assert(err, jc.ErrorIsNil, gc.Commentf("body: %s", body)) +} + +func (s *registrationSuite) registrationURL(c *gc.C) string { + url := s.baseURL(c) + url.Path = "/register" + return url.String() +} + +func (s *registrationSuite) TestRegister(c *gc.C) { + // Ensure we cannot log in with the password yet. + const password = "hunter2" + c.Assert(s.bob.PasswordValid(password), jc.IsFalse) + + validNonce := []byte(strings.Repeat("X", 24)) + secretKey := s.bob.SecretKey() + ciphertext := s.sealBox( + c, validNonce, secretKey, fmt.Sprintf(`{"password": "%s"}`, password), + ) + resp := httptesting.Do(c, httptesting.DoRequestParams{ + Do: utils.GetNonValidatingHTTPClient().Do, + URL: s.registrationURL(c), + Method: "POST", + JSONBody: ¶ms.SecretKeyLoginRequest{ + User: "user-bob", + Nonce: validNonce, + PayloadCiphertext: ciphertext, + }, + }) + c.Assert(resp.StatusCode, gc.Equals, http.StatusOK) + defer resp.Body.Close() + + // It should be possible to log in as bob with the + // password "hunter2" now, and there should be no + // secret key any longer. + err := s.bob.Refresh() + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.bob.PasswordValid(password), jc.IsTrue) + c.Assert(s.bob.SecretKey(), gc.IsNil) + + var response params.SecretKeyLoginResponse + bodyData, err := ioutil.ReadAll(resp.Body) + c.Assert(err, jc.ErrorIsNil) + err = json.Unmarshal(bodyData, &response) + c.Assert(err, jc.ErrorIsNil) + c.Assert(response.Nonce, gc.HasLen, len(validNonce)) + plaintext := s.openBox(c, response.PayloadCiphertext, response.Nonce, secretKey) + + var responsePayload params.SecretKeyLoginResponsePayload + err = json.Unmarshal(plaintext, &responsePayload) + c.Assert(err, jc.ErrorIsNil) + c.Assert(responsePayload.CACert, gc.Equals, s.BackingState.CACert()) + model, err := s.BackingState.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(responsePayload.ControllerUUID, gc.Equals, model.ControllerUUID()) +} + +func (s *registrationSuite) TestRegisterInvalidMethod(c *gc.C) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Do: utils.GetNonValidatingHTTPClient().Do, + URL: s.registrationURL(c), + Method: "GET", + ExpectStatus: http.StatusMethodNotAllowed, + ExpectBody: ¶ms.ErrorResult{ + Error: ¶ms.Error{ + Message: `unsupported method: "GET"`, + Code: params.CodeMethodNotAllowed, + }, + }, + }) +} + +func (s *registrationSuite) TestRegisterInvalidFormat(c *gc.C) { + s.testInvalidRequest( + c, "[]", "json: cannot unmarshal array into Go value of type params.SecretKeyLoginRequest", "", + http.StatusInternalServerError, + ) +} + +func (s *registrationSuite) TestRegisterInvalidUserTag(c *gc.C) { + s.testInvalidRequest( + c, `{"user": "service-bob"}`, `"service-bob" is not a valid user tag`, "", + http.StatusInternalServerError, + ) +} + +func (s *registrationSuite) TestRegisterInvalidNonce(c *gc.C) { + s.testInvalidRequest( + c, `{"user": "user-bob", "nonce": ""}`, `nonce not valid`, "", + http.StatusInternalServerError, + ) +} + +func (s *registrationSuite) TestRegisterInvalidCiphertext(c *gc.C) { + validNonce := []byte(strings.Repeat("X", 24)) + s.testInvalidRequest(c, + fmt.Sprintf( + `{"user": "user-bob", "nonce": "%s"}`, + base64.StdEncoding.EncodeToString(validNonce), + ), `secret key not valid`, "", + http.StatusInternalServerError, + ) +} + +func (s *registrationSuite) TestRegisterNoSecretKey(c *gc.C) { + err := s.bob.SetPassword("anything") + c.Assert(err, jc.ErrorIsNil) + validNonce := []byte(strings.Repeat("X", 24)) + s.testInvalidRequest(c, + fmt.Sprintf( + `{"user": "user-bob", "nonce": "%s"}`, + base64.StdEncoding.EncodeToString(validNonce), + ), `secret key for user "bob" not found`, params.CodeNotFound, + http.StatusNotFound, + ) +} + +func (s *registrationSuite) TestRegisterInvalidRequestPayload(c *gc.C) { + validNonce := []byte(strings.Repeat("X", 24)) + ciphertext := s.sealBox(c, validNonce, s.bob.SecretKey(), "[]") + s.testInvalidRequest(c, + fmt.Sprintf( + `{"user": "user-bob", "nonce": "%s", "ciphertext": "%s"}`, + base64.StdEncoding.EncodeToString(validNonce), + base64.StdEncoding.EncodeToString(ciphertext), + ), + `cannot unmarshal payload: json: cannot unmarshal array into Go value of type params.SecretKeyLoginRequestPayload`, "", + http.StatusInternalServerError, + ) +} + +func (s *registrationSuite) testInvalidRequest(c *gc.C, requestBody, errorMessage, errorCode string, statusCode int) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Do: utils.GetNonValidatingHTTPClient().Do, + URL: s.registrationURL(c), + Method: "POST", + Body: strings.NewReader(requestBody), + ExpectStatus: statusCode, + ExpectBody: ¶ms.ErrorResult{ + Error: ¶ms.Error{Message: errorMessage, Code: errorCode}, + }, + }) +} + +func (s *registrationSuite) sealBox(c *gc.C, nonce, key []byte, message string) []byte { + var nonceArray [24]byte + var keyArray [32]byte + c.Assert(copy(nonceArray[:], nonce), gc.Equals, len(nonceArray)) + c.Assert(copy(keyArray[:], key), gc.Equals, len(keyArray)) + return secretbox.Seal(nil, []byte(message), &nonceArray, &keyArray) +} + +func (s *registrationSuite) openBox(c *gc.C, ciphertext, nonce, key []byte) []byte { + var nonceArray [24]byte + var keyArray [32]byte + c.Assert(copy(nonceArray[:], nonce), gc.Equals, len(nonceArray)) + c.Assert(copy(keyArray[:], key), gc.Equals, len(keyArray)) + message, ok := secretbox.Open(nil, ciphertext, &nonceArray, &keyArray) + c.Assert(ok, jc.IsTrue) + return message +} === added file 'src/github.com/juju/juju/apiserver/resource.go' --- src/github.com/juju/juju/apiserver/resource.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/resource.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,73 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// TODO(ericsnow) Remove this file once we add a registration mechanism. + +package apiserver + +import ( + "net/http" + + "github.com/juju/errors" + "github.com/juju/names" + + internalserver "github.com/juju/juju/resource/api/private/server" + "github.com/juju/juju/resource/api/server" + "github.com/juju/juju/resource/resourceadapters" + "github.com/juju/juju/state" +) + +type resourcesHandlerDeps struct { + httpCtxt httpContext +} + +// ConnectForUser connects to state for an API user. +func (deps resourcesHandlerDeps) ConnectForUser(req *http.Request) (*state.State, state.Entity, error) { + return deps.httpCtxt.stateForRequestAuthenticatedUser(req) +} + +// ConnectForUnitAgent connects to state for a unit agent. +func (deps resourcesHandlerDeps) ConnectForUnitAgent(req *http.Request) (*state.State, *state.Unit, error) { + st, ent, err := deps.httpCtxt.stateForRequestAuthenticatedAgent(req) + if err != nil { + return nil, nil, errors.Trace(err) + } + + unit, ok := ent.(*state.Unit) + if !ok { + logger.Errorf("unexpected type: %T", ent) + return nil, nil, errors.Errorf("unexpected type: %T", ent) + } + return st, unit, nil +} + +// TODO(ericsnow) Move these functions to resourceadapters? + +func newResourceHandler(httpCtxt httpContext) http.Handler { + deps := resourcesHandlerDeps{httpCtxt} + return server.NewLegacyHTTPHandler( + func(req *http.Request) (server.DataStore, names.Tag, error) { + st, entity, err := deps.ConnectForUser(req) + if err != nil { + return nil, nil, errors.Trace(err) + } + resources, err := st.Resources() + if err != nil { + return nil, nil, errors.Trace(err) + } + ds := resourceadapters.DataStore{ + Resources: resources, + State: st, + } + return ds, entity.Tag(), nil + }, + ) +} + +func newUnitResourceHandler(httpCtxt httpContext) http.Handler { + extractor := resourceadapters.HTTPDownloadRequestExtractor{ + Connector: &resourcesHandlerDeps{httpCtxt}, + } + deps := internalserver.NewLegacyHTTPHandlerDeps(extractor) + return internalserver.NewLegacyHTTPHandler(deps) +} === modified file 'src/github.com/juju/juju/apiserver/restoring_root.go' --- src/github.com/juju/juju/apiserver/restoring_root.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/restoring_root.go 2016-03-22 15:18:22 +0000 @@ -57,7 +57,7 @@ var allowedMethodsAboutToRestore = set.NewStrings( "Client.FullStatus", // for "juju status" - "Client.EnvironmentGet", // for "juju ssh" + "Client.ModelGet", // for "juju ssh" "Client.PrivateAddress", // for "juju ssh" "Client.PublicAddress", // for "juju ssh" "Client.WatchDebugLog", // for "juju debug-log" === modified file 'src/github.com/juju/juju/apiserver/restoring_root_test.go' --- src/github.com/juju/juju/apiserver/restoring_root_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/restoring_root_test.go 2016-03-22 15:18:22 +0000 @@ -21,10 +21,7 @@ func (r *restoreRootSuite) TestFindAllowedMethodWhenPreparing(c *gc.C) { root := apiserver.TestingAboutToRestoreRoot(nil) - // TODO(perrito666): Uncomment when Restore lands and delete - // the following line. - //caller, err := root.FindMethod("Backups", 0, "Restore") - caller, err := root.FindMethod("Client", 0, "FullStatus") + caller, err := root.FindMethod("Backups", 1, "Restore") c.Assert(err, jc.ErrorIsNil) c.Assert(caller, gc.NotNil) @@ -33,7 +30,7 @@ func (r *restoreRootSuite) TestNothingAllowedMethodWhenPreparing(c *gc.C) { root := apiserver.TestingRestoreInProgressRoot(nil) - caller, err := root.FindMethod("Client", 0, "ServiceDeploy") + caller, err := root.FindMethod("Service", 3, "Deploy") c.Assert(err, gc.ErrorMatches, "juju restore is in progress - Juju api is off to prevent data loss") c.Assert(caller, gc.IsNil) @@ -42,7 +39,7 @@ func (r *restoreRootSuite) TestFindDisallowedMethodWhenPreparing(c *gc.C) { root := apiserver.TestingAboutToRestoreRoot(nil) - caller, err := root.FindMethod("Client", 0, "ServiceDeploy") + caller, err := root.FindMethod("Service", 3, "Deploy") c.Assert(err, gc.ErrorMatches, "juju restore is in progress - Juju functionality is limited to avoid data loss") c.Assert(caller, gc.IsNil) @@ -51,7 +48,7 @@ func (r *restoreRootSuite) TestFindDisallowedMethodWhenRestoring(c *gc.C) { root := apiserver.TestingRestoreInProgressRoot(nil) - caller, err := root.FindMethod("Client", 0, "ServiceDeploy") + caller, err := root.FindMethod("Service", 3, "Deploy") c.Assert(err, gc.ErrorMatches, "juju restore is in progress - Juju api is off to prevent data loss") c.Assert(caller, gc.IsNil) === modified file 'src/github.com/juju/juju/apiserver/restricted_root.go' --- src/github.com/juju/juju/apiserver/restricted_root.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/restricted_root.go 2016-03-22 15:18:22 +0000 @@ -26,9 +26,9 @@ // of the API server. Any facade added here needs to work across environment // boundaries. var restrictedRootNames = set.NewStrings( - "AllEnvWatcher", - "EnvironmentManager", - "SystemManager", + "AllModelWatcher", + "Controller", + "ModelManager", "UserManager", ) @@ -36,14 +36,15 @@ // of the facades available at the server root when there is no active // environment. func (r *restrictedRoot) FindMethod(rootName string, version int, methodName string) (rpcreflect.MethodCaller, error) { - // The lookup of the name is done first to return a not found error if the - // user is looking for a method that we just don't have. + // We restrict what facades are advertised at login, filtered on the restricted root names. + // Therefore we can't accurately know if a method is not found unless it resides on one + // of the restricted facades. + if !restrictedRootNames.Contains(rootName) { + return nil, errors.NotSupportedf("logged in to server, no model, %q", rootName) + } caller, err := r.MethodFinder.FindMethod(rootName, version, methodName) if err != nil { return nil, err } - if !restrictedRootNames.Contains(rootName) { - return nil, errors.NotSupportedf("logged in to server, no environment, %q", rootName) - } return caller, nil } === modified file 'src/github.com/juju/juju/apiserver/restricted_root_test.go' --- src/github.com/juju/juju/apiserver/restricted_root_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/restricted_root_test.go 2016-03-22 15:18:22 +0000 @@ -9,7 +9,6 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver" - "github.com/juju/juju/feature" "github.com/juju/juju/rpc" "github.com/juju/juju/testing" ) @@ -24,7 +23,6 @@ func (r *restrictedRootSuite) SetUpTest(c *gc.C) { r.BaseSuite.SetUpTest(c) - r.SetFeatureFlags(feature.JES) r.root = apiserver.TestingRestrictedApiHandler(nil) } @@ -35,41 +33,41 @@ } func (r *restrictedRootSuite) TestFindAllowedMethod(c *gc.C) { - r.assertMethodAllowed(c, "AllEnvWatcher", 1, "Next") - r.assertMethodAllowed(c, "AllEnvWatcher", 1, "Stop") - - r.assertMethodAllowed(c, "EnvironmentManager", 1, "CreateEnvironment") - r.assertMethodAllowed(c, "EnvironmentManager", 1, "ListEnvironments") - - r.assertMethodAllowed(c, "UserManager", 0, "AddUser") - r.assertMethodAllowed(c, "UserManager", 0, "SetPassword") - r.assertMethodAllowed(c, "UserManager", 0, "UserInfo") - - r.assertMethodAllowed(c, "SystemManager", 1, "AllEnvironments") - r.assertMethodAllowed(c, "SystemManager", 1, "DestroySystem") - r.assertMethodAllowed(c, "SystemManager", 1, "EnvironmentConfig") - r.assertMethodAllowed(c, "SystemManager", 1, "ListBlockedEnvironments") + r.assertMethodAllowed(c, "AllModelWatcher", 2, "Next") + r.assertMethodAllowed(c, "AllModelWatcher", 2, "Stop") + + r.assertMethodAllowed(c, "ModelManager", 2, "CreateModel") + r.assertMethodAllowed(c, "ModelManager", 2, "ListModels") + + r.assertMethodAllowed(c, "UserManager", 1, "AddUser") + r.assertMethodAllowed(c, "UserManager", 1, "SetPassword") + r.assertMethodAllowed(c, "UserManager", 1, "UserInfo") + + r.assertMethodAllowed(c, "Controller", 2, "AllModels") + r.assertMethodAllowed(c, "Controller", 2, "DestroyController") + r.assertMethodAllowed(c, "Controller", 2, "ModelConfig") + r.assertMethodAllowed(c, "Controller", 2, "ListBlockedModels") } func (r *restrictedRootSuite) TestFindDisallowedMethod(c *gc.C) { - caller, err := r.root.FindMethod("Client", 0, "Status") + caller, err := r.root.FindMethod("Client", 1, "Status") - c.Assert(err, gc.ErrorMatches, `logged in to server, no environment, "Client" not supported`) + c.Assert(err, gc.ErrorMatches, `logged in to server, no model, "Client" not supported`) c.Assert(errors.IsNotSupported(err), jc.IsTrue) c.Assert(caller, gc.IsNil) } func (r *restrictedRootSuite) TestNonExistentFacade(c *gc.C) { - caller, err := r.root.FindMethod("NonExistent", 0, "Method") + caller, err := r.root.FindMethod("SomeFacade", 0, "Method") - c.Assert(err, gc.ErrorMatches, `unknown object type "NonExistent"`) + c.Assert(err, gc.ErrorMatches, `logged in to server, no model, "SomeFacade" not supported`) c.Assert(caller, gc.IsNil) } func (r *restrictedRootSuite) TestFindNonExistentMethod(c *gc.C) { - caller, err := r.root.FindMethod("EnvironmentManager", 1, "Bar") + caller, err := r.root.FindMethod("ModelManager", 2, "Bar") - c.Assert(err, gc.ErrorMatches, `no such request - method EnvironmentManager\(1\).Bar is not implemented`) + c.Assert(err, gc.ErrorMatches, `no such request - method ModelManager\(2\).Bar is not implemented`) c.Assert(caller, gc.IsNil) } === modified file 'src/github.com/juju/juju/apiserver/resumer/resumer.go' --- src/github.com/juju/juju/apiserver/resumer/resumer.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/resumer/resumer.go 2016-03-22 15:18:22 +0000 @@ -13,7 +13,7 @@ ) func init() { - common.RegisterStandardFacade("Resumer", 1, NewResumerAPI) + common.RegisterStandardFacade("Resumer", 2, NewResumerAPI) } var logger = loggo.GetLogger("juju.apiserver.resumer") @@ -26,7 +26,7 @@ // NewResumerAPI creates a new instance of the Resumer API. func NewResumerAPI(st *state.State, _ *common.Resources, authorizer common.Authorizer) (*ResumerAPI, error) { - if !authorizer.AuthEnvironManager() { + if !authorizer.AuthModelManager() { return nil, common.ErrPerm } return &ResumerAPI{ === added directory 'src/github.com/juju/juju/apiserver/retrystrategy' === added file 'src/github.com/juju/juju/apiserver/retrystrategy/pacakge_test.go' --- src/github.com/juju/juju/apiserver/retrystrategy/pacakge_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/retrystrategy/pacakge_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,15 @@ +// Copyright 2016 Canonical Ltd. +// Copyright 2016 Cloudbase Solutions +// Licensed under the AGPLv3, see LICENCE file for details. + +package retrystrategy_test + +import ( + stdtesting "testing" + + coretesting "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + coretesting.MgoTestPackage(t) +} === added file 'src/github.com/juju/juju/apiserver/retrystrategy/retrystrategy.go' --- src/github.com/juju/juju/apiserver/retrystrategy/retrystrategy.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/retrystrategy/retrystrategy.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,136 @@ +// Copyright 2016 Canonical Ltd. +// Copyright 2016 Cloudbase Solutions +// Licensed under the AGPLv3, see LICENCE file for details. + +package retrystrategy + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" + "github.com/juju/juju/state/watcher" +) + +// Right now, these are defined as constants, but the plan is to maybe make +// them configurable in the future +const ( + MinRetryTime = 5 * time.Second + MaxRetryTime = 5 * time.Minute + JitterRetryTime = true + RetryTimeFactor = 2 +) + +func init() { + common.RegisterStandardFacade("RetryStrategy", 1, NewRetryStrategyAPI) +} + +// RetryStrategy defines the methods exported by the RetryStrategy API facade. +type RetryStrategy interface { + RetryStrategy(params.Entities) (params.RetryStrategyResults, error) + WatchRetryStrategy(params.Entities) (params.NotifyWatchResults, error) +} + +// RetryStrategyAPI implements RetryStrategy +type RetryStrategyAPI struct { + st *state.State + accessUnit common.GetAuthFunc + resources *common.Resources +} + +var _ RetryStrategy = (*RetryStrategyAPI)(nil) + +// NewRetryStrategyAPI creates a new API endpoint for getting retry strategies. +func NewRetryStrategyAPI( + st *state.State, + resources *common.Resources, + authorizer common.Authorizer, +) (*RetryStrategyAPI, error) { + if !authorizer.AuthUnitAgent() { + return nil, common.ErrPerm + } + return &RetryStrategyAPI{ + st: st, + accessUnit: func() (common.AuthFunc, error) { + return authorizer.AuthOwner, nil + }, + resources: resources, + }, nil +} + +// RetryStrategy returns RetryStrategyResults that can be used by any code that uses +// to configure the retry timer that's currently in juju utils. +func (h *RetryStrategyAPI) RetryStrategy(args params.Entities) (params.RetryStrategyResults, error) { + results := params.RetryStrategyResults{ + Results: make([]params.RetryStrategyResult, len(args.Entities)), + } + canAccess, err := h.accessUnit() + if err != nil { + return params.RetryStrategyResults{}, errors.Trace(err) + } + config, err := h.st.ModelConfig() + if err != nil { + return params.RetryStrategyResults{}, errors.Trace(err) + } + for i, entity := range args.Entities { + tag, err := names.ParseTag(entity.Tag) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + err = common.ErrPerm + if canAccess(tag) { + // Right now the only real configurable value is ShouldRetry, + // which is taken from the environment + // The rest are hardcoded + results.Results[i].Result = ¶ms.RetryStrategy{ + ShouldRetry: config.AutomaticallyRetryHooks(), + MinRetryTime: MinRetryTime, + MaxRetryTime: MaxRetryTime, + JitterRetryTime: JitterRetryTime, + RetryTimeFactor: RetryTimeFactor, + } + err = nil + } + results.Results[i].Error = common.ServerError(err) + } + return results, nil +} + +// WatchRetryStrategy watches for changes to the environment. Currently we only allow +// changes to the boolean that determines whether retries should be attempted or not. +func (h *RetryStrategyAPI) WatchRetryStrategy(args params.Entities) (params.NotifyWatchResults, error) { + results := params.NotifyWatchResults{ + Results: make([]params.NotifyWatchResult, len(args.Entities)), + } + canAccess, err := h.accessUnit() + if err != nil { + return params.NotifyWatchResults{}, errors.Trace(err) + } + for i, entity := range args.Entities { + tag, err := names.ParseTag(entity.Tag) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + err = common.ErrPerm + if canAccess(tag) { + watch := h.st.WatchForModelConfigChanges() + // Consume the initial event. Technically, API calls to Watch + // 'transmit' the initial event in the Watch response. But + // NotifyWatchers have no state to transmit. + if _, ok := <-watch.Changes(); ok { + results.Results[i].NotifyWatcherId = h.resources.Register(watch) + err = nil + } else { + err = watcher.EnsureErr(watch) + } + } + results.Results[i].Error = common.ServerError(err) + } + return results, nil +} === added file 'src/github.com/juju/juju/apiserver/retrystrategy/retrystrategy_test.go' --- src/github.com/juju/juju/apiserver/retrystrategy/retrystrategy_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/retrystrategy/retrystrategy_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,183 @@ +// Copyright 2016 Canonical Ltd. +// Copyright 2016 Cloudbase Solutions +// Licensed under the AGPLv3, see LICENCE file for details. + +package retrystrategy_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/apiserver/retrystrategy" + apiservertesting "github.com/juju/juju/apiserver/testing" + jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/state" + statetesting "github.com/juju/juju/state/testing" + jujufactory "github.com/juju/juju/testing/factory" +) + +var _ = gc.Suite(&retryStrategySuite{}) + +type retryStrategySuite struct { + jujutesting.JujuConnSuite + + authorizer apiservertesting.FakeAuthorizer + resources *common.Resources + + factory *jujufactory.Factory + + unit *state.Unit + + strategy retrystrategy.RetryStrategy +} + +var tagsTests = []struct { + tag string + expectedErr string +}{ + {"user-admin", "permission denied"}, + {"unit-wut-4", "permission denied"}, + {"definitelynotatag", `"definitelynotatag" is not a valid tag`}, + {"machine-5", "permission denied"}, +} + +func (s *retryStrategySuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + + s.factory = jujufactory.NewFactory(s.State) + s.unit = s.factory.MakeUnit(c, nil) + + // Create a FakeAuthorizer so we can check permissions, + // set up assuming unit 0 has logged in. + s.authorizer = apiservertesting.FakeAuthorizer{ + Tag: s.unit.UnitTag(), + } + + // Create the resource registry separately to track invocations to + // Register. + s.resources = common.NewResources() + s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) + + strategy, err := retrystrategy.NewRetryStrategyAPI(s.State, s.resources, s.authorizer) + c.Assert(err, jc.ErrorIsNil) + s.strategy = strategy +} + +func (s *retryStrategySuite) TestRetryStrategyUnauthenticated(c *gc.C) { + svc, err := s.unit.Service() + c.Assert(err, jc.ErrorIsNil) + otherUnit := s.factory.MakeUnit(c, &jujufactory.UnitParams{Service: svc}) + args := params.Entities{Entities: []params.Entity{{otherUnit.Tag().String()}}} + + res, err := s.strategy.RetryStrategy(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(res.Results, gc.HasLen, 1) + c.Assert(res.Results[0].Error, gc.ErrorMatches, "permission denied") + c.Assert(res.Results[0].Result, gc.IsNil) +} + +func (s *retryStrategySuite) TestRetryStrategyBadTag(c *gc.C) { + args := params.Entities{Entities: make([]params.Entity, len(tagsTests))} + for i, t := range tagsTests { + args.Entities[i] = params.Entity{Tag: t.tag} + } + res, err := s.strategy.RetryStrategy(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(res.Results, gc.HasLen, len(tagsTests)) + for i, r := range res.Results { + c.Logf("result %d", i) + c.Assert(r.Error, gc.ErrorMatches, tagsTests[i].expectedErr) + c.Assert(res.Results[i].Result, gc.IsNil) + } +} + +func (s *retryStrategySuite) TestRetryStrategy(c *gc.C) { + expected := ¶ms.RetryStrategy{ + ShouldRetry: true, + MinRetryTime: retrystrategy.MinRetryTime, + MaxRetryTime: retrystrategy.MaxRetryTime, + JitterRetryTime: retrystrategy.JitterRetryTime, + RetryTimeFactor: retrystrategy.RetryTimeFactor, + } + args := params.Entities{Entities: []params.Entity{{Tag: s.unit.Tag().String()}}} + r, err := s.strategy.RetryStrategy(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(r.Results, gc.HasLen, 1) + c.Assert(r.Results[0].Error, gc.IsNil) + c.Assert(r.Results[0].Result, jc.DeepEquals, expected) + + s.setRetryStrategy(c, false) + expected.ShouldRetry = false + + r, err = s.strategy.RetryStrategy(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(r.Results, gc.HasLen, 1) + c.Assert(r.Results[0].Error, gc.IsNil) + c.Assert(r.Results[0].Result, jc.DeepEquals, expected) +} + +func (s *retryStrategySuite) setRetryStrategy(c *gc.C, automaticallyRetryHooks bool) { + err := s.State.UpdateModelConfig(map[string]interface{}{"automatically-retry-hooks": automaticallyRetryHooks}, nil, nil) + c.Assert(err, jc.ErrorIsNil) + envConfig, err := s.State.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + c.Assert(envConfig.AutomaticallyRetryHooks(), gc.Equals, automaticallyRetryHooks) +} + +func (s *retryStrategySuite) TestWatchRetryStrategyUnauthenticated(c *gc.C) { + svc, err := s.unit.Service() + c.Assert(err, jc.ErrorIsNil) + otherUnit := s.factory.MakeUnit(c, &jujufactory.UnitParams{Service: svc}) + args := params.Entities{Entities: []params.Entity{{otherUnit.Tag().String()}}} + + res, err := s.strategy.WatchRetryStrategy(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(res.Results, gc.HasLen, 1) + c.Assert(res.Results[0].Error, gc.ErrorMatches, "permission denied") + c.Assert(res.Results[0].NotifyWatcherId, gc.Equals, "") +} + +func (s *retryStrategySuite) TestWatchRetryStrategyBadTag(c *gc.C) { + args := params.Entities{Entities: make([]params.Entity, len(tagsTests))} + for i, t := range tagsTests { + args.Entities[i] = params.Entity{Tag: t.tag} + } + res, err := s.strategy.WatchRetryStrategy(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(res.Results, gc.HasLen, len(tagsTests)) + for i, r := range res.Results { + c.Logf("result %d", i) + c.Assert(r.Error, gc.ErrorMatches, tagsTests[i].expectedErr) + c.Assert(res.Results[i].NotifyWatcherId, gc.Equals, "") + } +} + +func (s *retryStrategySuite) TestWatchRetryStrategy(c *gc.C) { + c.Assert(s.resources.Count(), gc.Equals, 0) + + args := params.Entities{Entities: []params.Entity{ + {Tag: s.unit.UnitTag().String()}, + {Tag: "unit-foo-42"}, + }} + r, err := s.strategy.WatchRetryStrategy(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(r, gc.DeepEquals, params.NotifyWatchResults{ + Results: []params.NotifyWatchResult{ + {NotifyWatcherId: "1"}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) + + c.Assert(s.resources.Count(), gc.Equals, 1) + resource := s.resources.Get("1") + defer statetesting.AssertStop(c, resource) + + wc := statetesting.NewNotifyWatcherC(c, s.State, resource.(state.NotifyWatcher)) + wc.AssertNoChange() + + s.setRetryStrategy(c, false) + c.Assert(err, jc.ErrorIsNil) + wc.AssertOneChange() +} === modified file 'src/github.com/juju/juju/apiserver/root.go' --- src/github.com/juju/juju/apiserver/root.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/root.go 2016-03-22 15:18:22 +0000 @@ -42,26 +42,28 @@ // after it has logged in. It contains an rpc.MethodFinder which it // uses to dispatch Api calls appropriately. type apiHandler struct { - state *state.State - rpcConn *rpc.Conn - resources *common.Resources - entity state.Entity - // An empty envUUID means that the user has logged in through the - // root of the API server rather than the /environment/:env-uuid/api + state *state.State + rpcConn *rpc.Conn + resources *common.Resources + entity state.Entity + mongoUnavailable *uint32 + // An empty modelUUID means that the user has logged in through the + // root of the API server rather than the /model/:model-uuid/api // path, logins processed with v2 or later will only offer the - // user manager and environment manager api endpoints from here. - envUUID string + // user manager and model manager api endpoints from here. + modelUUID string } var _ = (*apiHandler)(nil) // newApiHandler returns a new apiHandler. -func newApiHandler(srv *Server, st *state.State, rpcConn *rpc.Conn, reqNotifier *requestNotifier, envUUID string) (*apiHandler, error) { +func newApiHandler(srv *Server, st *state.State, rpcConn *rpc.Conn, reqNotifier *requestNotifier, modelUUID string) (*apiHandler, error) { r := &apiHandler{ - state: st, - resources: common.NewResources(), - rpcConn: rpcConn, - envUUID: envUUID, + state: st, + resources: common.NewResources(), + rpcConn: rpcConn, + modelUUID: modelUUID, + mongoUnavailable: &srv.mongoUnavailable, } if err := r.resources.RegisterNamed("machineID", common.StringResource(srv.tag.Id())); err != nil { return nil, errors.Trace(err) @@ -153,7 +155,7 @@ // For more information about how FindMethod should work, see rpc/server.go and // rpc/rpcreflect/value.go func (r *apiRoot) FindMethod(rootName string, version int, methodName string) (rpcreflect.MethodCaller, error) { - goType, objMethod, err := r.lookupMethod(rootName, version, methodName) + goType, objMethod, err := lookupMethod(rootName, version, methodName) if err != nil { return nil, err } @@ -209,7 +211,7 @@ }, nil } -func (r *apiRoot) lookupMethod(rootName string, version int, methodName string) (reflect.Type, rpcreflect.ObjMethod, error) { +func lookupMethod(rootName string, version int, methodName string) (reflect.Type, rpcreflect.ObjMethod, error) { noMethod := rpcreflect.ObjMethod{} goType, err := common.Facades.GetType(rootName, version) if err != nil { @@ -262,10 +264,9 @@ if api, ok := r.adminApis[version]; ok { return rpcreflect.ValueOf(reflect.ValueOf(api)).FindMethod(rootName, 0, methodName) } - return nil, &rpcreflect.CallNotImplementedError{ - RootMethod: rootName, - Method: methodName, - Version: version, + return nil, &rpc.RequestError{ + Code: params.CodeNotSupported, + Message: "this version of Juju does not support login from old clients", } } @@ -287,10 +288,10 @@ return r.entity.Tag() == tag } -// AuthEnvironManager returns whether the authenticated user is a +// AuthModelManager returns whether the authenticated user is a // machine with running the ManageEnviron job. -func (r *apiHandler) AuthEnvironManager() bool { - return isMachineWithJob(r.entity, state.JobManageEnviron) +func (r *apiHandler) AuthModelManager() bool { + return isMachineWithJob(r.entity, state.JobManageModel) } // AuthClient returns whether the authenticated entity is a client === modified file 'src/github.com/juju/juju/apiserver/root_test.go' --- src/github.com/juju/juju/apiserver/root_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/root_test.go 2016-03-22 15:18:22 +0000 @@ -28,7 +28,7 @@ var allowedDiscardedMethods = []string{ "AuthClient", - "AuthEnvironManager", + "AuthModelManager", "AuthMachineAgent", "AuthOwner", "AuthUnitAgent", @@ -84,22 +84,11 @@ func (s *errRootSuite) TestErrorRoot(c *gc.C) { origErr := fmt.Errorf("my custom error") errRoot := apiserver.NewErrRoot(origErr) - st, err := errRoot.Admin("") + st, err := errRoot.FindMethod("", 0, "") c.Check(st, gc.IsNil) c.Check(err, gc.Equals, origErr) } -func (s *errRootSuite) TestErrorRootViaRPC(c *gc.C) { - origErr := fmt.Errorf("my custom error") - errRoot := apiserver.NewErrRoot(origErr) - val := rpcreflect.ValueOf(reflect.ValueOf(errRoot)) - caller, err := val.FindMethod("Admin", 0, "Login") - c.Assert(err, jc.ErrorIsNil) - resp, err := caller.Call("", reflect.Value{}) - c.Check(err, gc.Equals, origErr) - c.Check(resp.IsValid(), jc.IsFalse) -} - type testingType struct{} func (testingType) Exposed() error { @@ -382,7 +371,7 @@ } clientVersions := asMap["Client"] c.Assert(len(clientVersions), jc.GreaterThan, 0) - c.Check(clientVersions[0], gc.Equals, 0) + c.Check(clientVersions[0], gc.Equals, 1) } type stubStateEntity struct{ tag names.Tag } === removed directory 'src/github.com/juju/juju/apiserver/rsyslog' === removed file 'src/github.com/juju/juju/apiserver/rsyslog/config.go' --- src/github.com/juju/juju/apiserver/rsyslog/config.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/rsyslog/config.go 1970-01-01 00:00:00 +0000 @@ -1,29 +0,0 @@ -package rsyslog - -import ( - apirsyslog "github.com/juju/juju/api/rsyslog" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/network" -) - -// newRsyslogConfig creates a new instance of the RsyslogConfig. -func newRsyslogConfig(envCfg *config.Config, api *RsyslogAPI) (*apirsyslog.RsyslogConfig, error) { - stateAddrsResult, err := api.StateAddresser.StateAddresses() - if err != nil { - return nil, err - } - port := envCfg.SyslogPort() - - apiHostPorts, err := network.ParseHostPorts(stateAddrsResult.Result...) - if err != nil { - return nil, err - } - apiAddresses := network.HostsWithoutPort(apiHostPorts) - - return &apirsyslog.RsyslogConfig{ - CACert: envCfg.RsyslogCACert(), - CAKey: envCfg.RsyslogCAKey(), - Port: port, - HostPorts: network.AddressesWithPort(apiAddresses, port), - }, nil -} === removed file 'src/github.com/juju/juju/apiserver/rsyslog/package_test.go' --- src/github.com/juju/juju/apiserver/rsyslog/package_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/rsyslog/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package rsyslog_test - -import ( - stdtesting "testing" - - "github.com/juju/juju/testing" -) - -func TestAll(t *stdtesting.T) { - testing.MgoTestPackage(t) -} === removed file 'src/github.com/juju/juju/apiserver/rsyslog/rsyslog.go' --- src/github.com/juju/juju/apiserver/rsyslog/rsyslog.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/rsyslog/rsyslog.go 1970-01-01 00:00:00 +0000 @@ -1,116 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package rsyslog - -import ( - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cert" - "github.com/juju/juju/state" - "github.com/juju/juju/state/watcher" -) - -func init() { - common.RegisterStandardFacade("Rsyslog", 0, NewRsyslogAPI) -} - -// RsyslogAPI implements the API used by the rsyslog worker. -type RsyslogAPI struct { - *common.EnvironWatcher - - st *state.State - resources *common.Resources - authorizer common.Authorizer - StateAddresser *common.StateAddresser - canModify bool -} - -// NewRsyslogAPI creates a new instance of the Rsyslog API. -func NewRsyslogAPI(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*RsyslogAPI, error) { - if !authorizer.AuthMachineAgent() && !authorizer.AuthUnitAgent() { - return nil, common.ErrPerm - } - return &RsyslogAPI{ - EnvironWatcher: common.NewEnvironWatcher(st, resources, authorizer), - st: st, - authorizer: authorizer, - resources: resources, - canModify: authorizer.AuthEnvironManager(), - StateAddresser: common.NewStateAddresser(st), - }, nil -} - -// SetRsyslogCert sets the rsyslog CACert. -func (api *RsyslogAPI) SetRsyslogCert(args params.SetRsyslogCertParams) (params.ErrorResult, error) { - var result params.ErrorResult - if !api.canModify { - result.Error = common.ServerError(common.ErrBadCreds) - return result, nil - } - if _, err := cert.ParseCert(string(args.CACert)); err != nil { - result.Error = common.ServerError(err) - return result, nil - } - - attrs := map[string]interface{}{ - "rsyslog-ca-cert": string(args.CACert), - "rsyslog-ca-key": string(args.CAKey), - } - if err := api.st.UpdateEnvironConfig(attrs, nil, nil); err != nil { - result.Error = common.ServerError(err) - } - return result, nil -} - -// GetRsyslogConfig returns a RsyslogConfigResult. -func (api *RsyslogAPI) GetRsyslogConfig(args params.Entities) (params.RsyslogConfigResults, error) { - result := params.RsyslogConfigResults{ - Results: make([]params.RsyslogConfigResult, len(args.Entities)), - } - cfg, err := api.st.EnvironConfig() - if err != nil { - return result, err - } - for i := range args.Entities { - rsyslogCfg, err := newRsyslogConfig(cfg, api) - if err == nil { - result.Results[i] = params.RsyslogConfigResult{ - CACert: rsyslogCfg.CACert, - CAKey: rsyslogCfg.CAKey, - Port: rsyslogCfg.Port, - HostPorts: params.FromNetworkHostPorts(rsyslogCfg.HostPorts), - } - } else { - result.Results[i].Error = common.ServerError(err) - } - } - return result, nil -} - -// WatchForRsyslogChanges starts a watcher to track if there are changes -// that require we update the rsyslog.d configurations for a machine and/or unit. -func (api *RsyslogAPI) WatchForRsyslogChanges(args params.Entities) (params.NotifyWatchResults, error) { - result := params.NotifyWatchResults{ - Results: make([]params.NotifyWatchResult, len(args.Entities)), - } - for i := range args.Entities { - err := common.ErrPerm - if api.authorizer.AuthMachineAgent() || api.authorizer.AuthUnitAgent() { - watch := api.st.WatchAPIHostPorts() - // Consume the initial event. Technically, API - // calls to Watch 'transmit' the initial event - // in the Watch response. But NotifyWatchers - // have no state to transmit. - if _, ok := <-watch.Changes(); ok { - result.Results[i].NotifyWatcherId = api.resources.Register(watch) - err = nil - } else { - err = watcher.EnsureErr(watch) - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil - -} === removed file 'src/github.com/juju/juju/apiserver/rsyslog/rsyslog_test.go' --- src/github.com/juju/juju/apiserver/rsyslog/rsyslog_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/rsyslog/rsyslog_test.go 1970-01-01 00:00:00 +0000 @@ -1,119 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package rsyslog_test - -import ( - "encoding/pem" - - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - apirsyslog "github.com/juju/juju/api/rsyslog" - "github.com/juju/juju/apiserver/common" - commontesting "github.com/juju/juju/apiserver/common/testing" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/apiserver/rsyslog" - apiservertesting "github.com/juju/juju/apiserver/testing" - "github.com/juju/juju/juju/testing" - "github.com/juju/juju/network" - "github.com/juju/juju/state" - coretesting "github.com/juju/juju/testing" -) - -type rsyslogSuite struct { - testing.JujuConnSuite - *commontesting.EnvironWatcherTest - authorizer apiservertesting.FakeAuthorizer - resources *common.Resources - rsyslog *rsyslog.RsyslogAPI -} - -var _ = gc.Suite(&rsyslogSuite{}) - -func (s *rsyslogSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - s.authorizer = apiservertesting.FakeAuthorizer{ - Tag: names.NewMachineTag("1"), - EnvironManager: false, - } - s.resources = common.NewResources() - s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) - api, err := rsyslog.NewRsyslogAPI(s.State, s.resources, s.authorizer) - c.Assert(err, jc.ErrorIsNil) - s.EnvironWatcherTest = commontesting.NewEnvironWatcherTest( - api, s.State, s.resources, commontesting.NoSecrets) -} - -func verifyRsyslogCACert(c *gc.C, st *apirsyslog.State, expectedCA, expectedKey string) { - cfg, err := st.GetRsyslogConfig("foo") - c.Assert(err, jc.ErrorIsNil) - c.Assert(cfg.CACert, gc.DeepEquals, expectedCA) - c.Assert(cfg.CAKey, gc.DeepEquals, expectedKey) -} - -func (s *rsyslogSuite) TestSetRsyslogCert(c *gc.C) { - st, m := s.OpenAPIAsNewMachine(c, state.JobManageEnviron) - err := m.SetProviderAddresses(network.NewAddress("0.1.2.3")) - c.Assert(err, jc.ErrorIsNil) - - err = st.Rsyslog().SetRsyslogCert(coretesting.CACert, coretesting.CAKey) - c.Assert(err, jc.ErrorIsNil) - verifyRsyslogCACert(c, st.Rsyslog(), coretesting.CACert, coretesting.CAKey) -} - -func (s *rsyslogSuite) TestSetRsyslogCertNil(c *gc.C) { - st, m := s.OpenAPIAsNewMachine(c, state.JobManageEnviron) - err := m.SetProviderAddresses(network.NewAddress("0.1.2.3")) - c.Assert(err, jc.ErrorIsNil) - - err = st.Rsyslog().SetRsyslogCert("", "") - c.Assert(err, gc.ErrorMatches, "no certificates found") - verifyRsyslogCACert(c, st.Rsyslog(), "", "") -} - -func (s *rsyslogSuite) TestSetRsyslogCertInvalid(c *gc.C) { - st, m := s.OpenAPIAsNewMachine(c, state.JobManageEnviron) - err := m.SetProviderAddresses(network.NewAddress("0.1.2.3")) - c.Assert(err, jc.ErrorIsNil) - - err = st.Rsyslog().SetRsyslogCert(string(pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: []byte("not a valid certificate"), - })), "") - c.Assert(err, gc.ErrorMatches, ".*structure error.*") - verifyRsyslogCACert(c, st.Rsyslog(), "", "") -} - -func (s *rsyslogSuite) TestSetRsyslogCertPerms(c *gc.C) { - // create a machine-0 so we have an addresss to log to - m, err := s.State.AddMachine("trusty", state.JobManageEnviron) - c.Assert(err, jc.ErrorIsNil) - err = m.SetProviderAddresses(network.NewAddress("0.1.2.3")) - c.Assert(err, jc.ErrorIsNil) - - unitState, _ := s.OpenAPIAsNewMachine(c, state.JobHostUnits) - err = unitState.Rsyslog().SetRsyslogCert(coretesting.CACert, coretesting.CAKey) - c.Assert(err, gc.ErrorMatches, "invalid entity name or password") - c.Assert(err, jc.Satisfies, params.IsCodeUnauthorized) - // Verify no change was effected. - verifyRsyslogCACert(c, unitState.Rsyslog(), "", "") -} - -func (s *rsyslogSuite) TestUpgraderAPIAllowsUnitAgent(c *gc.C) { - anAuthorizer := s.authorizer - anAuthorizer.Tag = names.NewUnitTag("seven/9") - anUpgrader, err := rsyslog.NewRsyslogAPI(s.State, s.resources, anAuthorizer) - c.Check(err, jc.ErrorIsNil) - c.Check(anUpgrader, gc.NotNil) -} - -func (s *rsyslogSuite) TestUpgraderAPIRefusesNonUnitNonMachineAgent(c *gc.C) { - anAuthorizer := s.authorizer - anAuthorizer.Tag = names.NewServiceTag("hadoop") - anUpgrader, err := rsyslog.NewRsyslogAPI(s.State, s.resources, anAuthorizer) - c.Check(err, gc.NotNil) - c.Check(anUpgrader, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "permission denied") -} === modified file 'src/github.com/juju/juju/apiserver/server_test.go' --- src/github.com/juju/juju/apiserver/server_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/server_test.go 2016-03-22 15:18:22 +0000 @@ -9,21 +9,28 @@ "fmt" "io" "net" - stdtesting "testing" + "net/http" "time" "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" "golang.org/x/net/websocket" gc "gopkg.in/check.v1" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/bakerytest" + "gopkg.in/macaroon-bakery.v1/httpbakery" "github.com/juju/juju/api" "github.com/juju/juju/apiserver" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cert" + "github.com/juju/juju/environs/config" jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/mongo" "github.com/juju/juju/network" "github.com/juju/juju/rpc" "github.com/juju/juju/state" @@ -32,10 +39,6 @@ "github.com/juju/juju/testing/factory" ) -func TestAll(t *stdtesting.T) { - coretesting.MgoTestPackage(t) -} - var fastDialOpts = api.DialOpts{} type serverSuite struct { @@ -47,14 +50,7 @@ func (s *serverSuite) TestStop(c *gc.C) { // Start our own instance of the server so we have // a handle on it to stop it. - listener, err := net.Listen("tcp", ":0") - c.Assert(err, jc.ErrorIsNil) - srv, err := apiserver.NewServer(s.State, listener, apiserver.ServerConfig{ - Cert: []byte(coretesting.ServerCert), - Key: []byte(coretesting.ServerKey), - Tag: names.NewMachineTag("0"), - }) - c.Assert(err, jc.ErrorIsNil) + srv := newServer(c, s.State) defer srv.Stop() machine, password := s.Factory.MakeMachineReturningPassword( @@ -65,12 +61,12 @@ // Note we can't use openAs because we're not connecting to apiInfo := &api.Info{ - Tag: machine.Tag(), - Password: password, - Nonce: "fake_nonce", - Addrs: []string{address}, - CACert: coretesting.CACert, - EnvironTag: s.State.EnvironTag(), + Tag: machine.Tag(), + Password: password, + Nonce: "fake_nonce", + Addrs: []string{address}, + CACert: coretesting.CACert, + ModelTag: s.State.ModelTag(), } st, err := api.Open(apiInfo, fastDialOpts) c.Assert(err, jc.ErrorIsNil) @@ -101,14 +97,7 @@ // Start our own instance of the server listening on // both IPv4 and IPv6 localhost addresses and an ephemeral port. - listener, err := net.Listen("tcp", ":0") - c.Assert(err, jc.ErrorIsNil) - srv, err := apiserver.NewServer(s.State, listener, apiserver.ServerConfig{ - Cert: []byte(coretesting.ServerCert), - Key: []byte(coretesting.ServerKey), - Tag: names.NewMachineTag("0"), - }) - c.Assert(err, jc.ErrorIsNil) + srv := newServer(c, s.State) defer srv.Stop() port := srv.Addr().Port @@ -119,12 +108,12 @@ // Now connect twice - using IPv4 and IPv6 endpoints. apiInfo := &api.Info{ - Tag: machine.Tag(), - Password: password, - Nonce: "fake_nonce", - Addrs: []string{net.JoinHostPort("127.0.0.1", portString)}, - CACert: coretesting.CACert, - EnvironTag: s.State.EnvironTag(), + Tag: machine.Tag(), + Password: password, + Nonce: "fake_nonce", + Addrs: []string{net.JoinHostPort("127.0.0.1", portString)}, + CACert: coretesting.CACert, + ModelTag: s.State.ModelTag(), } ipv4State, err := api.Open(apiInfo, fastDialOpts) c.Assert(err, jc.ErrorIsNil) @@ -154,7 +143,7 @@ assertNotProvisioned := func(err error) { c.Assert(err, gc.NotNil) c.Assert(err, jc.Satisfies, params.IsCodeNotProvisioned) - c.Assert(err, gc.ErrorMatches, `machine \d+ not provisioned`) + c.Assert(err, gc.ErrorMatches, `machine \d+ not provisioned \(not provisioned\)`) } machine, password := s.Factory.MakeMachineReturningPassword( @@ -197,6 +186,27 @@ c.Assert(st, gc.IsNil) } +func (s *serverSuite) TestNewServerDoesNotAccessState(c *gc.C) { + mongoInfo := s.MongoInfo(c) + + proxy := testing.NewTCPProxy(c, mongoInfo.Addrs[0]) + mongoInfo.Addrs = []string{proxy.Addr()} + + st, err := state.Open(s.State.ModelTag(), mongoInfo, mongo.DefaultDialOpts(), nil) + c.Assert(err, gc.IsNil) + defer st.Close() + + // Now close the proxy so that any attempts to use the + // controller will fail. + proxy.Close() + + // Creating the server should succeed because it doesn't + // access the state (note that newServer does not log in, + // which *would* access the state). + srv := newServer(c, st) + srv.Stop() +} + func (s *serverSuite) TestMachineLoginStartsPinger(c *gc.C) { // This is the same steps as OpenAPIAsNewMachine but we need to assert // the agent is not alive before we actually open the API. @@ -273,14 +283,7 @@ func (s *serverSuite) TestMinTLSVersion(c *gc.C) { loggo.GetLogger("juju.apiserver").SetLogLevel(loggo.TRACE) - listener, err := net.Listen("tcp", ":0") - c.Assert(err, jc.ErrorIsNil) - srv, err := apiserver.NewServer(s.State, listener, apiserver.ServerConfig{ - Cert: []byte(coretesting.ServerCert), - Key: []byte(coretesting.ServerKey), - Tag: names.NewMachineTag("0"), - }) - c.Assert(err, jc.ErrorIsNil) + srv := newServer(c, s.State) defer srv.Stop() // We have to use 'localhost' because that is what the TLS cert says. @@ -293,17 +296,10 @@ } func (s *serverSuite) TestNonCompatiblePathsAre404(c *gc.C) { - // we expose the API at '/' for compatibility, and at '/ENVUUID/api' + // we expose the API at '/' for compatibility, and at '/ModelUUID/api' // for the correct location, but other Paths should fail. loggo.GetLogger("juju.apiserver").SetLogLevel(loggo.TRACE) - listener, err := net.Listen("tcp", ":0") - c.Assert(err, jc.ErrorIsNil) - srv, err := apiserver.NewServer(s.State, listener, apiserver.ServerConfig{ - Cert: []byte(coretesting.ServerCert), - Key: []byte(coretesting.ServerKey), - Tag: names.NewMachineTag("0"), - }) - c.Assert(err, jc.ErrorIsNil) + srv := newServer(c, s.State) defer srv.Stop() // We have to use 'localhost' because that is what the TLS cert says. @@ -312,8 +308,8 @@ conn, err := dialWebsocket(c, addr, "/", 0) c.Assert(err, jc.ErrorIsNil) conn.Close() - // '/environment/ENVIRONUUID/api' should be fine - conn, err = dialWebsocket(c, addr, "/environment/dead-beef-123456/api", 0) + // '/model/MODELUUID/api' should be fine + conn, err = dialWebsocket(c, addr, "/model/dead-beef-123456/api", 0) c.Assert(err, jc.ErrorIsNil) conn.Close() @@ -326,6 +322,111 @@ c.Assert(conn, gc.IsNil) } +func (s *serverSuite) TestNoBakeryWhenNoIdentityURL(c *gc.C) { + srv := newServer(c, s.State) + defer srv.Stop() + // By default, when there is no identity location, no + // bakery service or macaroon is created. + _, err := apiserver.ServerMacaroon(srv) + c.Assert(err, gc.ErrorMatches, "macaroon authentication is not configured") + _, err = apiserver.ServerBakeryService(srv) + c.Assert(err, gc.ErrorMatches, "macaroon authentication is not configured") +} + +type macaroonServerSuite struct { + jujutesting.JujuConnSuite + discharger *bakerytest.Discharger +} + +var _ = gc.Suite(&macaroonServerSuite{}) + +func (s *macaroonServerSuite) SetUpTest(c *gc.C) { + s.discharger = bakerytest.NewDischarger(nil, noCheck) + s.ConfigAttrs = map[string]interface{}{ + config.IdentityURL: s.discharger.Location(), + } + s.JujuConnSuite.SetUpTest(c) +} + +func (s *macaroonServerSuite) TearDownTest(c *gc.C) { + s.discharger.Close() + s.JujuConnSuite.TearDownTest(c) +} + +func (s *macaroonServerSuite) TestServerBakery(c *gc.C) { + srv := newServer(c, s.State) + defer srv.Stop() + m, err := apiserver.ServerMacaroon(srv) + c.Assert(err, gc.IsNil) + bsvc, err := apiserver.ServerBakeryService(srv) + c.Assert(err, gc.IsNil) + + // Check that we can add a third party caveat addressed to the + // discharger, which indirectly ensures that the discharger's public + // key has been added to the bakery service's locator. + m = m.Clone() + err = bsvc.AddCaveat(m, checkers.Caveat{ + Location: s.discharger.Location(), + Condition: "true", + }) + c.Assert(err, jc.ErrorIsNil) + + // Check that we can discharge the macaroon and check it with + // the service. + client := httpbakery.NewClient() + ms, err := client.DischargeAll(m) + c.Assert(err, jc.ErrorIsNil) + + err = bsvc.Check(ms, checkers.New()) + c.Assert(err, gc.IsNil) +} + +type macaroonServerWrongPublicKeySuite struct { + jujutesting.JujuConnSuite + discharger *bakerytest.Discharger +} + +var _ = gc.Suite(&macaroonServerWrongPublicKeySuite{}) + +func (s *macaroonServerWrongPublicKeySuite) SetUpTest(c *gc.C) { + s.discharger = bakerytest.NewDischarger(nil, noCheck) + wrongKey, err := bakery.GenerateKey() + c.Assert(err, gc.IsNil) + s.ConfigAttrs = map[string]interface{}{ + config.IdentityURL: s.discharger.Location(), + config.IdentityPublicKey: wrongKey.Public.String(), + } + s.JujuConnSuite.SetUpTest(c) +} + +func (s *macaroonServerWrongPublicKeySuite) TearDownTest(c *gc.C) { + s.discharger.Close() + s.JujuConnSuite.TearDownTest(c) +} + +func (s *macaroonServerWrongPublicKeySuite) TestDischargeFailsWithWrongPublicKey(c *gc.C) { + srv := newServer(c, s.State) + defer srv.Stop() + m, err := apiserver.ServerMacaroon(srv) + c.Assert(err, gc.IsNil) + m = m.Clone() + bsvc, err := apiserver.ServerBakeryService(srv) + c.Assert(err, gc.IsNil) + err = bsvc.AddCaveat(m, checkers.Caveat{ + Location: s.discharger.Location(), + Condition: "true", + }) + c.Assert(err, gc.IsNil) + client := httpbakery.NewClient() + + _, err = client.DischargeAll(m) + c.Assert(err, gc.ErrorMatches, `cannot get discharge from ".*": third party refused discharge: cannot discharge: discharger cannot decode caveat id: public key mismatch`) +} + +func noCheck(req *http.Request, cond, arg string) ([]checkers.Caveat, error) { + return nil, nil +} + type fakeResource struct { stopped bool } @@ -334,3 +435,37 @@ r.stopped = true return nil } + +func (s *serverSuite) TestApiHandlerTeardownInitialEnviron(c *gc.C) { + s.checkApiHandlerTeardown(c, s.State, s.State) +} + +func (s *serverSuite) TestApiHandlerTeardownOtherEnviron(c *gc.C) { + otherState := s.Factory.MakeModel(c, nil) + defer otherState.Close() + s.checkApiHandlerTeardown(c, s.State, otherState) +} + +func (s *serverSuite) checkApiHandlerTeardown(c *gc.C, srvSt, st *state.State) { + handler, resources := apiserver.TestingApiHandler(c, srvSt, st) + resource := new(fakeResource) + resources.Register(resource) + + c.Assert(resource.stopped, jc.IsFalse) + handler.Kill() + c.Assert(resource.stopped, jc.IsTrue) +} + +// newServer returns a new running API server. +func newServer(c *gc.C, st *state.State) *apiserver.Server { + listener, err := net.Listen("tcp", ":0") + c.Assert(err, jc.ErrorIsNil) + srv, err := apiserver.NewServer(st, listener, apiserver.ServerConfig{ + Cert: []byte(coretesting.ServerCert), + Key: []byte(coretesting.ServerKey), + Tag: names.NewMachineTag("0"), + LogDir: c.MkDir(), + }) + c.Assert(err, jc.ErrorIsNil) + return srv +} === modified file 'src/github.com/juju/juju/apiserver/service/charmstore.go' --- src/github.com/juju/juju/apiserver/service/charmstore.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/service/charmstore.go 2016-03-22 15:18:22 +0000 @@ -11,10 +11,10 @@ "github.com/juju/errors" "github.com/juju/utils" - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/charmrepo" - "gopkg.in/juju/charmstore.v4/csclient" - "gopkg.in/macaroon-bakery.v0/httpbakery" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient" + "gopkg.in/macaroon-bakery.v1/httpbakery" "gopkg.in/macaroon.v1" "github.com/juju/juju/apiserver/params" @@ -26,7 +26,11 @@ // to use interfaces. // NewCharmStore instantiates a new charm store repository. // It is defined at top level for testing purposes. -var NewCharmStore = charmrepo.NewCharmStore +var NewCharmStore = newCharmStore + +func newCharmStore(p charmrepo.NewCharmStoreParams) charmrepo.Interface { + return charmrepo.NewCharmStore(p) +} // AddCharmWithAuthorization adds the given charm URL (which must include revision) to // the environment, if it does not exist yet. Local charms are not @@ -57,7 +61,7 @@ } // Get the charm and its information from the store. - envConfig, err := st.EnvironConfig() + envConfig, err := st.ModelConfig() if err != nil { return err } @@ -120,7 +124,7 @@ // StoreCharmArchive stores a charm archive in environment storage. func StoreCharmArchive(st *state.State, curl *charm.URL, ch charm.Charm, r io.Reader, size int64, sha256 string) error { - storage := newStateStorage(st.EnvironUUID(), st.MongoSession()) + storage := newStateStorage(st.ModelUUID(), st.MongoSession()) storagePath, err := charmArchiveStoragePath(curl) if err != nil { return errors.Annotate(err, "cannot generate charm archive name") @@ -167,7 +171,7 @@ func ResolveCharms(st *state.State, args params.ResolveCharms) (params.ResolveCharmResults, error) { var results params.ResolveCharmResults - envConfig, err := st.EnvironConfig() + envConfig, err := st.ModelConfig() if err != nil { return params.ResolveCharmResults{}, err } @@ -188,15 +192,18 @@ return results, nil } -func resolveCharm(ref *charm.Reference, repo charmrepo.Interface) (*charm.URL, error) { +func resolveCharm(ref *charm.URL, repo charmrepo.Interface) (*charm.URL, error) { if ref.Schema != "cs" { return nil, fmt.Errorf("only charm store charm references are supported, with cs: schema") } // Resolve the charm location with the repository. - curl, err := repo.Resolve(ref) + resolved, _, err := repo.Resolve(ref) if err != nil { return nil, err } - return curl.WithRevision(ref.Revision), nil + if resolved.Series == "" { + return nil, errors.Errorf("no series found in charm URL %q", resolved) + } + return resolved.WithRevision(ref.Revision), nil } === added file 'src/github.com/juju/juju/apiserver/service/get.go' --- src/github.com/juju/juju/apiserver/service/get.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/service/get.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,61 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package service + +import ( + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/constraints" +) + +// Get returns the configuration for a service. +func (api *API) Get(args params.ServiceGet) (params.ServiceGetResults, error) { + service, err := api.state.Service(args.ServiceName) + if err != nil { + return params.ServiceGetResults{}, err + } + settings, err := service.ConfigSettings() + if err != nil { + return params.ServiceGetResults{}, err + } + charm, _, err := service.Charm() + if err != nil { + return params.ServiceGetResults{}, err + } + configInfo := describe(settings, charm.Config()) + var constraints constraints.Value + if service.IsPrincipal() { + constraints, err = service.Constraints() + if err != nil { + return params.ServiceGetResults{}, err + } + } + return params.ServiceGetResults{ + Service: args.ServiceName, + Charm: charm.Meta().Name, + Config: configInfo, + Constraints: constraints, + }, nil +} + +func describe(settings charm.Settings, config *charm.Config) map[string]interface{} { + results := make(map[string]interface{}) + for name, option := range config.Options { + info := map[string]interface{}{ + "description": option.Description, + "type": option.Type, + } + if value := settings[name]; value != nil { + info["value"] = value + } else { + if option.Default != nil { + info["value"] = option.Default + } + info["default"] = true + } + results[name] = info + } + return results +} === added file 'src/github.com/juju/juju/apiserver/service/get_test.go' --- src/github.com/juju/juju/apiserver/service/get_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/service/get_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,209 @@ +// Copyright 2012, 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package service_test + +import ( + "fmt" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + apiservice "github.com/juju/juju/api/service" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/apiserver/service" + apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/constraints" + jujutesting "github.com/juju/juju/juju/testing" +) + +type getSuite struct { + jujutesting.JujuConnSuite + + serviceApi *service.API + authorizer apiservertesting.FakeAuthorizer +} + +var _ = gc.Suite(&getSuite{}) + +func (s *getSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + + s.authorizer = apiservertesting.FakeAuthorizer{ + Tag: s.AdminUserTag(c), + } + var err error + s.serviceApi, err = service.NewAPI(s.State, nil, s.authorizer) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *getSuite) TestClientServiceGetSmoketest(c *gc.C) { + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + results, err := s.serviceApi.Get(params.ServiceGet{"wordpress"}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, gc.DeepEquals, params.ServiceGetResults{ + Service: "wordpress", + Charm: "wordpress", + Config: map[string]interface{}{ + "blog-title": map[string]interface{}{ + "type": "string", + "value": "My Title", + "description": "A descriptive title used for the blog.", + "default": true, + }, + }, + }) +} + +func (s *getSuite) TestServiceGetUnknownService(c *gc.C) { + _, err := s.serviceApi.Get(params.ServiceGet{"unknown"}) + c.Assert(err, gc.ErrorMatches, `service "unknown" not found`) +} + +var getTests = []struct { + about string + charm string + constraints string + config charm.Settings + expect params.ServiceGetResults +}{{ + about: "deployed service", + charm: "dummy", + constraints: "mem=2G cpu-power=400", + config: charm.Settings{ + // Different from default. + "title": "Look To Windward", + // Same as default. + "username": "admin001", + // Use default (but there's no charm default) + "skill-level": nil, + // Outlook is left unset. + }, + expect: params.ServiceGetResults{ + Config: map[string]interface{}{ + "title": map[string]interface{}{ + "description": "A descriptive title used for the service.", + "type": "string", + "value": "Look To Windward", + }, + "outlook": map[string]interface{}{ + "description": "No default outlook.", + "type": "string", + "default": true, + }, + "username": map[string]interface{}{ + "description": "The name of the initial account (given admin permissions).", + "type": "string", + "value": "admin001", + }, + "skill-level": map[string]interface{}{ + "description": "A number indicating skill.", + "type": "int", + "default": true, + }, + }, + }, +}, { + about: "deployed service #2", + charm: "dummy", + config: charm.Settings{ + // Set title to default. + "title": nil, + // Value when there's a default. + "username": "foobie", + // Numeric value. + "skill-level": 0, + // String value. + "outlook": "phlegmatic", + }, + expect: params.ServiceGetResults{ + Config: map[string]interface{}{ + "title": map[string]interface{}{ + "description": "A descriptive title used for the service.", + "type": "string", + "value": "My Title", + "default": true, + }, + "outlook": map[string]interface{}{ + "description": "No default outlook.", + "type": "string", + "value": "phlegmatic", + }, + "username": map[string]interface{}{ + "description": "The name of the initial account (given admin permissions).", + "type": "string", + "value": "foobie", + }, + "skill-level": map[string]interface{}{ + "description": "A number indicating skill.", + "type": "int", + // TODO(jam): 2013-08-28 bug #1217742 + // we have to use float64() here, because the + // API does not preserve int types. This used + // to be int64() but we end up with a type + // mismatch when comparing the content + "value": float64(0), + }, + }, + }, +}, { + about: "subordinate service", + charm: "logging", + expect: params.ServiceGetResults{ + Config: map[string]interface{}{}, + }, +}} + +func (s *getSuite) TestServiceGet(c *gc.C) { + for i, t := range getTests { + c.Logf("test %d. %s", i, t.about) + ch := s.AddTestingCharm(c, t.charm) + svc := s.AddTestingService(c, fmt.Sprintf("test%d", i), ch) + + var constraintsv constraints.Value + if t.constraints != "" { + constraintsv = constraints.MustParse(t.constraints) + err := svc.SetConstraints(constraintsv) + c.Assert(err, jc.ErrorIsNil) + } + if t.config != nil { + err := svc.UpdateConfigSettings(t.config) + c.Assert(err, jc.ErrorIsNil) + } + expect := t.expect + expect.Constraints = constraintsv + expect.Service = svc.Name() + expect.Charm = ch.Meta().Name + client := apiservice.NewClient(s.APIState) + got, err := client.Get(svc.Name()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(*got, gc.DeepEquals, expect) + } +} + +func (s *getSuite) TestGetMaxResolutionInt(c *gc.C) { + // See the bug http://pad.lv/1217742 + // Get ends up pushing a map[string]interface{} which containts + // an int64 through a JSON Marshal & Unmarshal which ends up changing + // the int64 into a float64. We will fix it if we find it is actually a + // problem. + const nonFloatInt = (int64(1) << 54) + 1 + const asFloat = float64(nonFloatInt) + c.Assert(int64(asFloat), gc.Not(gc.Equals), nonFloatInt) + c.Assert(int64(asFloat)+1, gc.Equals, nonFloatInt) + + ch := s.AddTestingCharm(c, "dummy") + svc := s.AddTestingService(c, "test-service", ch) + + err := svc.UpdateConfigSettings(map[string]interface{}{"skill-level": nonFloatInt}) + c.Assert(err, jc.ErrorIsNil) + client := apiservice.NewClient(s.APIState) + got, err := client.Get(svc.Name()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(got.Config["skill-level"], jc.DeepEquals, map[string]interface{}{ + "description": "A number indicating skill.", + "type": "int", + "value": asFloat, + }) +} === modified file 'src/github.com/juju/juju/apiserver/service/service.go' --- src/github.com/juju/juju/apiserver/service/service.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/service/service.go 2016-03-22 15:18:22 +0000 @@ -10,10 +10,12 @@ "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" + goyaml "gopkg.in/yaml.v2" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/instance" jjj "github.com/juju/juju/juju" "github.com/juju/juju/state" statestorage "github.com/juju/juju/state/storage" @@ -26,7 +28,7 @@ ) func init() { - common.RegisterStandardFacade("Service", 1, NewAPI) + common.RegisterStandardFacade("Service", 3, NewAPI) } // Service defines the methods on the service API end point. @@ -81,14 +83,9 @@ return result, nil } -// ServicesDeploy fetches the charms from the charm store and deploys them. -func (api *API) ServicesDeploy(args params.ServicesDeploy) (params.ErrorResults, error) { - return api.ServicesDeployWithPlacement(args) -} - -// ServicesDeployWithPlacement fetches the charms from the charm store and deploys them +// Deploy fetches the charms from the charm store and deploys them // using the specified placement directives. -func (api *API) ServicesDeployWithPlacement(args params.ServicesDeploy) (params.ErrorResults, error) { +func (api *API) Deploy(args params.ServicesDeploy) (params.ErrorResults, error) { result := params.ErrorResults{ Results: make([]params.ErrorResult, len(args.Services)), } @@ -97,7 +94,7 @@ } owner := api.authorizer.GetAuthTag().String() for i, arg := range args.Services { - err := DeployService(api.state, owner, arg) + err := deployService(api.state, owner, arg) result.Results[i].Error = common.ServerError(err) } return result, nil @@ -106,7 +103,7 @@ // DeployService fetches the charm from the charm store and deploys it. // The logic has been factored out into a common function which is called by // both the legacy API on the client facade, as well as the new service facade. -func DeployService(st *state.State, owner string, args params.ServiceDeploy) error { +func deployService(st *state.State, owner string, args params.ServiceDeploy) error { curl, err := charm.ParseURL(args.CharmUrl) if err != nil { return errors.Trace(err) @@ -116,10 +113,13 @@ } // Do a quick but not complete validation check before going any further. - if len(args.Placement) == 0 && args.ToMachineSpec != "" && names.IsValidMachine(args.ToMachineSpec) { - _, err = st.Machine(args.ToMachineSpec) + for _, p := range args.Placement { + if p.Scope != instance.MachineScope { + continue + } + _, err = st.Machine(p.Directive) if err != nil { - return errors.Annotatef(err, `cannot deploy "%v" to machine %v`, args.ServiceName, args.ToMachineSpec) + return errors.Annotatef(err, `cannot deploy "%v" to machine %v`, args.ServiceName, p.Directive) } } @@ -159,18 +159,20 @@ _, err = jjj.DeployService(st, jjj.DeployServiceParams{ ServiceName: args.ServiceName, + Series: args.Series, // TODO(dfc) ServiceOwner should be a tag - ServiceOwner: owner, - Charm: ch, - NumUnits: args.NumUnits, - ConfigSettings: settings, - Constraints: args.Constraints, - ToMachineSpec: args.ToMachineSpec, - Placement: args.Placement, - Networks: requestedNetworks, - Storage: args.Storage, + ServiceOwner: owner, + Charm: ch, + NumUnits: args.NumUnits, + ConfigSettings: settings, + Constraints: args.Constraints, + Placement: args.Placement, + Networks: requestedNetworks, + Storage: args.Storage, + EndpointBindings: args.EndpointBindings, + Resources: args.Resources, }) - return err + return errors.Trace(err) } // ServiceSetSettingsStrings updates the settings for the given service, @@ -178,12 +180,12 @@ func ServiceSetSettingsStrings(service *state.Service, settings map[string]string) error { ch, _, err := service.Charm() if err != nil { - return err + return errors.Trace(err) } // Parse config in a compatible way (see function comment). changes, err := parseSettingsCompatible(ch, settings) if err != nil { - return err + return errors.Trace(err) } return service.UpdateConfigSettings(changes) } @@ -232,3 +234,360 @@ } return changes, nil } + +// Update updates the service attributes, including charm URL, +// minimum number of units, settings and constraints. +// All parameters in params.ServiceUpdate except the service name are optional. +func (api *API) Update(args params.ServiceUpdate) error { + if !args.ForceCharmUrl { + if err := api.check.ChangeAllowed(); err != nil { + return errors.Trace(err) + } + } + svc, err := api.state.Service(args.ServiceName) + if err != nil { + return errors.Trace(err) + } + // Set the charm for the given service. + if args.CharmUrl != "" { + if err = api.serviceSetCharm(svc, args.CharmUrl, args.ForceSeries, args.ForceCharmUrl, nil); err != nil { + return errors.Trace(err) + } + } + // Set the minimum number of units for the given service. + if args.MinUnits != nil { + if err = svc.SetMinUnits(*args.MinUnits); err != nil { + return errors.Trace(err) + } + } + // Set up service's settings. + if args.SettingsYAML != "" { + if err = serviceSetSettingsYAML(svc, args.SettingsYAML); err != nil { + return errors.Annotate(err, "setting configuration from YAML") + } + } else if len(args.SettingsStrings) > 0 { + if err = ServiceSetSettingsStrings(svc, args.SettingsStrings); err != nil { + return errors.Trace(err) + } + } + // Update service's constraints. + if args.Constraints != nil { + return svc.SetConstraints(*args.Constraints) + } + return nil +} + +// SetCharm sets the charm for a given service. +func (api *API) SetCharm(args params.ServiceSetCharm) error { + // when forced units in error, don't block + if !args.ForceUnits { + if err := api.check.ChangeAllowed(); err != nil { + return errors.Trace(err) + } + } + service, err := api.state.Service(args.ServiceName) + if err != nil { + return errors.Trace(err) + } + return api.serviceSetCharm(service, args.CharmUrl, args.ForceSeries, args.ForceUnits, args.ResourceIDs) +} + +// serviceSetCharm sets the charm for the given service. +func (api *API) serviceSetCharm(service *state.Service, url string, forceSeries, forceUnits bool, resourceIDs map[string]string) error { + curl, err := charm.ParseURL(url) + if err != nil { + return errors.Trace(err) + } + sch, err := api.state.Charm(curl) + if err != nil { + return errors.Trace(err) + } + cfg := state.SetCharmConfig{ + Charm: sch, + ForceSeries: forceSeries, + ForceUnits: forceUnits, + ResourceIDs: resourceIDs, + } + return service.SetCharm(cfg) +} + +// settingsYamlFromGetYaml will parse a yaml produced by juju get and generate +// charm.Settings from it that can then be sent to the service. +func settingsFromGetYaml(yamlContents map[string]interface{}) (charm.Settings, error) { + onlySettings := charm.Settings{} + settingsMap, ok := yamlContents["settings"].(map[interface{}]interface{}) + if !ok { + return nil, errors.New("unknown format for settings") + } + + for setting := range settingsMap { + s, ok := settingsMap[setting].(map[interface{}]interface{}) + if !ok { + return nil, errors.Errorf("unknown format for settings section %v", setting) + } + // some keys might not have a value, we don't care about those. + v, ok := s["value"] + if !ok { + continue + } + stringSetting, ok := setting.(string) + if !ok { + return nil, errors.Errorf("unexpected setting key, expected string got %T", setting) + } + onlySettings[stringSetting] = v + } + return onlySettings, nil +} + +// serviceSetSettingsYAML updates the settings for the given service, +// taking the configuration from a YAML string. +func serviceSetSettingsYAML(service *state.Service, settings string) error { + b := []byte(settings) + var all map[string]interface{} + if err := goyaml.Unmarshal(b, &all); err != nil { + return errors.Annotate(err, "parsing settings data") + } + // The file is already in the right format. + if _, ok := all[service.Name()]; !ok { + changes, err := settingsFromGetYaml(all) + if err != nil { + return errors.Annotate(err, "processing YAML generated by get") + } + return errors.Annotate(service.UpdateConfigSettings(changes), "updating settings with service YAML") + } + + ch, _, err := service.Charm() + if err != nil { + return errors.Annotate(err, "obtaining charm for this service") + } + + changes, err := ch.Config().ParseSettingsYAML(b, service.Name()) + if err != nil { + return errors.Annotate(err, "creating config from YAML") + } + return errors.Annotate(service.UpdateConfigSettings(changes), "updating settings") +} + +// GetCharmURL returns the charm URL the given service is +// running at present. +func (api *API) GetCharmURL(args params.ServiceGet) (params.StringResult, error) { + service, err := api.state.Service(args.ServiceName) + if err != nil { + return params.StringResult{}, err + } + charmURL, _ := service.CharmURL() + return params.StringResult{Result: charmURL.String()}, nil +} + +// Set implements the server side of Service.Set. +// It does not unset values that are set to an empty string. +// Unset should be used for that. +func (api *API) Set(p params.ServiceSet) error { + if err := api.check.ChangeAllowed(); err != nil { + return errors.Trace(err) + } + svc, err := api.state.Service(p.ServiceName) + if err != nil { + return err + } + ch, _, err := svc.Charm() + if err != nil { + return err + } + // Validate the settings. + changes, err := ch.Config().ParseSettingsStrings(p.Options) + if err != nil { + return err + } + + return svc.UpdateConfigSettings(changes) + +} + +// Unset implements the server side of Client.Unset. +func (api *API) Unset(p params.ServiceUnset) error { + if err := api.check.ChangeAllowed(); err != nil { + return errors.Trace(err) + } + svc, err := api.state.Service(p.ServiceName) + if err != nil { + return err + } + settings := make(charm.Settings) + for _, option := range p.Options { + settings[option] = nil + } + return svc.UpdateConfigSettings(settings) +} + +// CharmRelations implements the server side of Service.CharmRelations. +func (api *API) CharmRelations(p params.ServiceCharmRelations) (params.ServiceCharmRelationsResults, error) { + var results params.ServiceCharmRelationsResults + service, err := api.state.Service(p.ServiceName) + if err != nil { + return results, err + } + endpoints, err := service.Endpoints() + if err != nil { + return results, err + } + results.CharmRelations = make([]string, len(endpoints)) + for i, endpoint := range endpoints { + results.CharmRelations[i] = endpoint.Relation.Name + } + return results, nil +} + +// Expose changes the juju-managed firewall to expose any ports that +// were also explicitly marked by units as open. +func (api *API) Expose(args params.ServiceExpose) error { + if err := api.check.ChangeAllowed(); err != nil { + return errors.Trace(err) + } + svc, err := api.state.Service(args.ServiceName) + if err != nil { + return err + } + return svc.SetExposed() +} + +// Unexpose changes the juju-managed firewall to unexpose any ports that +// were also explicitly marked by units as open. +func (api *API) Unexpose(args params.ServiceUnexpose) error { + if err := api.check.ChangeAllowed(); err != nil { + return errors.Trace(err) + } + svc, err := api.state.Service(args.ServiceName) + if err != nil { + return err + } + return svc.ClearExposed() +} + +// addServiceUnits adds a given number of units to a service. +func addServiceUnits(st *state.State, args params.AddServiceUnits) ([]*state.Unit, error) { + service, err := st.Service(args.ServiceName) + if err != nil { + return nil, err + } + if args.NumUnits < 1 { + return nil, errors.New("must add at least one unit") + } + return jjj.AddUnits(st, service, args.NumUnits, args.Placement) +} + +// AddUnits adds a given number of units to a service. +func (api *API) AddUnits(args params.AddServiceUnits) (params.AddServiceUnitsResults, error) { + if err := api.check.ChangeAllowed(); err != nil { + return params.AddServiceUnitsResults{}, errors.Trace(err) + } + units, err := addServiceUnits(api.state, args) + if err != nil { + return params.AddServiceUnitsResults{}, err + } + unitNames := make([]string, len(units)) + for i, unit := range units { + unitNames[i] = unit.String() + } + return params.AddServiceUnitsResults{Units: unitNames}, nil +} + +// DestroyUnits removes a given set of service units. +func (api *API) DestroyUnits(args params.DestroyServiceUnits) error { + if err := api.check.RemoveAllowed(); err != nil { + return errors.Trace(err) + } + var errs []string + for _, name := range args.UnitNames { + unit, err := api.state.Unit(name) + switch { + case errors.IsNotFound(err): + err = errors.Errorf("unit %q does not exist", name) + case err != nil: + case unit.Life() != state.Alive: + continue + case unit.IsPrincipal(): + err = unit.Destroy() + default: + err = errors.Errorf("unit %q is a subordinate", name) + } + if err != nil { + errs = append(errs, err.Error()) + } + } + return common.DestroyErr("units", args.UnitNames, errs) +} + +// Destroy destroys a given service. +func (api *API) Destroy(args params.ServiceDestroy) error { + if err := api.check.RemoveAllowed(); err != nil { + return errors.Trace(err) + } + svc, err := api.state.Service(args.ServiceName) + if err != nil { + return err + } + return svc.Destroy() +} + +// GetConstraints returns the constraints for a given service. +func (api *API) GetConstraints(args params.GetServiceConstraints) (params.GetConstraintsResults, error) { + svc, err := api.state.Service(args.ServiceName) + if err != nil { + return params.GetConstraintsResults{}, err + } + cons, err := svc.Constraints() + return params.GetConstraintsResults{cons}, err +} + +// SetConstraints sets the constraints for a given service. +func (api *API) SetConstraints(args params.SetConstraints) error { + if err := api.check.ChangeAllowed(); err != nil { + return errors.Trace(err) + } + svc, err := api.state.Service(args.ServiceName) + if err != nil { + return err + } + return svc.SetConstraints(args.Constraints) +} + +// AddRelation adds a relation between the specified endpoints and returns the relation info. +func (api *API) AddRelation(args params.AddRelation) (params.AddRelationResults, error) { + if err := api.check.ChangeAllowed(); err != nil { + return params.AddRelationResults{}, errors.Trace(err) + } + inEps, err := api.state.InferEndpoints(args.Endpoints...) + if err != nil { + return params.AddRelationResults{}, err + } + rel, err := api.state.AddRelation(inEps...) + if err != nil { + return params.AddRelationResults{}, err + } + outEps := make(map[string]charm.Relation) + for _, inEp := range inEps { + outEp, err := rel.Endpoint(inEp.ServiceName) + if err != nil { + return params.AddRelationResults{}, err + } + outEps[inEp.ServiceName] = outEp.Relation + } + return params.AddRelationResults{Endpoints: outEps}, nil +} + +// DestroyRelation removes the relation between the specified endpoints. +func (api *API) DestroyRelation(args params.DestroyRelation) error { + if err := api.check.RemoveAllowed(); err != nil { + return errors.Trace(err) + } + eps, err := api.state.InferEndpoints(args.Endpoints...) + if err != nil { + return err + } + rel, err := api.state.EndpointsRelation(eps...) + if err != nil { + return err + } + return rel.Destroy() +} === modified file 'src/github.com/juju/juju/apiserver/service/service_test.go' --- src/github.com/juju/juju/apiserver/service/service_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/service/service_test.go 2016-03-22 15:18:22 +0000 @@ -9,11 +9,13 @@ "sync" "github.com/juju/errors" + "github.com/juju/names" jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charmstore.v4/csclient" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient" "gopkg.in/macaroon.v1" "gopkg.in/mgo.v2" @@ -130,7 +132,7 @@ }}, params.ErrorResults{[]params.ErrorResult{ {Error: nil}, - {Error: ¶ms.Error{`service "not-a-service" not found`, "not found"}}, + {Error: ¶ms.Error{Message: `service "not-a-service" not found`, Code: "not found"}}, }}, }, } @@ -185,13 +187,13 @@ pm := poolmanager.New(state.NewStateSettings(st)) _, err := pm.Create("loop-pool", provider.LoopProviderType, map[string]interface{}{}) c.Assert(err, jc.ErrorIsNil) - err = st.UpdateEnvironConfig(map[string]interface{}{ + err = st.UpdateModelConfig(map[string]interface{}{ "storage-default-block-source": "loop-pool", }, nil, nil) c.Assert(err, jc.ErrorIsNil) } -func (s *serviceSuite) TestClientServiceDeployWithStorage(c *gc.C) { +func (s *serviceSuite) TestServiceDeployWithStorage(c *gc.C) { setupStoragePool(c, s.State) curl, ch := s.UploadCharm(c, "utopic/storage-block-10", "storage-block") storageConstraints := map[string]storage.Constraints{ @@ -210,7 +212,7 @@ Constraints: cons, Storage: storageConstraints, } - results, err := s.serviceApi.ServicesDeploy(params.ServicesDeploy{ + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ Services: []params.ServiceDeploy{args}}, ) c.Assert(err, jc.ErrorIsNil) @@ -234,7 +236,7 @@ }) } -func (s *serviceSuite) TestClientServiceDeployWithInvalidStoragePool(c *gc.C) { +func (s *serviceSuite) TestServiceDeployWithInvalidStoragePool(c *gc.C) { setupStoragePool(c, s.State) curl, _ := s.UploadCharm(c, "utopic/storage-block-0", "storage-block") storageConstraints := map[string]storage.Constraints{ @@ -253,7 +255,7 @@ Constraints: cons, Storage: storageConstraints, } - results, err := s.serviceApi.ServicesDeploy(params.ServicesDeploy{ + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ Services: []params.ServiceDeploy{args}}, ) c.Assert(err, jc.ErrorIsNil) @@ -261,7 +263,7 @@ c.Assert(results.Results[0].Error, gc.ErrorMatches, `.* pool "foo" not found`) } -func (s *serviceSuite) TestClientServiceDeployWithUnsupportedStoragePool(c *gc.C) { +func (s *serviceSuite) TestServiceDeployWithUnsupportedStoragePool(c *gc.C) { registry.RegisterProvider("hostloop", &mockStorageProvider{kind: storage.StorageKindBlock}) pm := poolmanager.New(state.NewStateSettings(s.State)) _, err := pm.Create("host-loop-pool", provider.HostLoopProviderType, map[string]interface{}{}) @@ -284,16 +286,16 @@ Constraints: cons, Storage: storageConstraints, } - results, err := s.serviceApi.ServicesDeploy(params.ServicesDeploy{ + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ Services: []params.ServiceDeploy{args}}, ) c.Assert(err, jc.ErrorIsNil) c.Assert(results.Results, gc.HasLen, 1) c.Assert(results.Results[0].Error, gc.ErrorMatches, - `.*pool "host-loop-pool" uses storage provider "hostloop" which is not supported for environments of type "dummy"`) + `.*pool "host-loop-pool" uses storage provider "hostloop" which is not supported for models of type "dummy"`) } -func (s *serviceSuite) TestClientServiceDeployDefaultFilesystemStorage(c *gc.C) { +func (s *serviceSuite) TestServiceDeployDefaultFilesystemStorage(c *gc.C) { setupStoragePool(c, s.State) curl, ch := s.UploadCharm(c, "trusty/storage-filesystem-1", "storage-filesystem") var cons constraints.Value @@ -303,7 +305,7 @@ NumUnits: 1, Constraints: cons, } - results, err := s.serviceApi.ServicesDeploy(params.ServicesDeploy{ + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ Services: []params.ServiceDeploy{args}}, ) c.Assert(err, jc.ErrorIsNil) @@ -322,7 +324,7 @@ }) } -func (s *serviceSuite) TestClientServiceDeployWithPlacement(c *gc.C) { +func (s *serviceSuite) TestServiceDeploy(c *gc.C) { curl, ch := s.UploadCharm(c, "precise/dummy-42", "dummy") err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) c.Assert(err, jc.ErrorIsNil) @@ -335,9 +337,8 @@ Placement: []*instance.Placement{ {"deadbeef-0bad-400d-8000-4b1d0d06f00d", "valid"}, }, - ToMachineSpec: "will be ignored", } - results, err := s.serviceApi.ServicesDeploy(params.ServicesDeploy{ + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ Services: []params.ServiceDeploy{args}}, ) c.Assert(err, jc.ErrorIsNil) @@ -350,7 +351,7 @@ c.Assert(units, gc.HasLen, 1) } -func (s *serviceSuite) TestClientServiceDeployWithInvalidPlacement(c *gc.C) { +func (s *serviceSuite) TestServiceDeployWithInvalidPlacement(c *gc.C) { curl, _ := s.UploadCharm(c, "precise/dummy-42", "dummy") err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) c.Assert(err, jc.ErrorIsNil) @@ -364,14 +365,62 @@ {"deadbeef-0bad-400d-8000-4b1d0d06f00d", "invalid"}, }, } - results, err := s.serviceApi.ServicesDeploy(params.ServicesDeploy{ + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ Services: []params.ServiceDeploy{args}}, ) c.Assert(err, jc.ErrorIsNil) c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.NotNil) c.Assert(results.Results[0].Error.Error(), gc.Matches, ".* invalid placement is invalid") } +func (s *serviceSuite) testClientServicesDeployWithBindings(c *gc.C, endpointBindings, expected map[string]string) { + curl, _ := s.UploadCharm(c, "utopic/riak-42", "riak") + + var cons constraints.Value + args := params.ServiceDeploy{ + ServiceName: "service", + CharmUrl: curl.String(), + NumUnits: 1, + Constraints: cons, + EndpointBindings: endpointBindings, + } + + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{args}}, + ) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) + + service, err := s.State.Service(args.ServiceName) + c.Assert(err, jc.ErrorIsNil) + + retrievedBindings, err := service.EndpointBindings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(retrievedBindings, jc.DeepEquals, expected) +} + +func (s *serviceSuite) TestClientServicesDeployWithBindings(c *gc.C) { + s.State.AddSpace("a-space", "", nil, true) + expected := map[string]string{ + "endpoint": "a-space", + "ring": "", + "admin": "", + } + endpointBindings := map[string]string{"endpoint": "a-space"} + s.testClientServicesDeployWithBindings(c, endpointBindings, expected) +} + +func (s *serviceSuite) TestClientServicesDeployWithDefaultBindings(c *gc.C) { + expected := map[string]string{ + "endpoint": "", + "ring": "", + "admin": "", + } + s.testClientServicesDeployWithBindings(c, nil, expected) +} + // TODO(wallyworld) - the following charm tests have been moved from the apiserver/client // package in order to use the fake charm store testing infrastructure. They are legacy tests // written to use the api client instead of the apiserver logic. They need to be rewritten and @@ -387,7 +436,7 @@ client := s.APIState.Client() // First test the sanity checks. err := client.AddCharm(&charm.URL{Name: "nonsense"}) - c.Assert(err, gc.ErrorMatches, `charm URL has invalid schema: ":nonsense-0"`) + c.Assert(err, gc.ErrorMatches, `cannot parse charm or bundle URL: ":nonsense-0"`) err = client.AddCharm(charm.MustParseURL("local:precise/dummy")) c.Assert(err, gc.ErrorMatches, "only charm store charm URLs are supported, with cs: schema") err = client.AddCharm(charm.MustParseURL("cs:precise/wordpress")) @@ -413,7 +462,7 @@ c.Assert(err, jc.ErrorIsNil) // Verify it's in state and it got uploaded. - storage := statestorage.NewStorage(s.State.EnvironUUID(), s.State.MongoSession()) + storage := statestorage.NewStorage(s.State.ModelUUID(), s.State.MongoSession()) sch, err = s.State.Charm(curl) c.Assert(err, jc.ErrorIsNil) s.assertUploaded(c, storage, sch.StoragePath(), sch.BundleSha256()) @@ -426,17 +475,17 @@ // Change permissions on the new charm such that only bob // can read from it. s.DischargeUser = "restricted" - err := s.Srv.NewClient().Put("/"+curl.Path()+"/meta/perm/read", []string{"bob"}) + err := s.Client.Put("/"+curl.Path()+"/meta/perm/read", []string{"bob"}) c.Assert(err, jc.ErrorIsNil) // Try to add a charm to the environment without authorization. s.DischargeUser = "" err = s.APIState.Client().AddCharm(curl) - c.Assert(err, gc.ErrorMatches, `cannot retrieve charm "cs:~restricted/precise/wordpress-3": cannot get archive: cannot get discharge from ".*": third party refused discharge: cannot discharge: discharge denied`) + c.Assert(err, gc.ErrorMatches, `cannot retrieve charm "cs:~restricted/precise/wordpress-3": cannot get archive: cannot get discharge from "https://.*": third party refused discharge: cannot discharge: discharge denied \(unauthorized access\)`) tryAs := func(user string) error { client := csclient.New(csclient.Params{ - URL: s.Srv.URL(), + URL: s.Srv.URL, }) s.DischargeUser = user var m *macaroon.Macaroon @@ -507,7 +556,7 @@ } } - storage := statestorage.NewStorage(s.State.EnvironUUID(), s.State.MongoSession()) + storage := statestorage.NewStorage(s.State.ModelUUID(), s.State.MongoSession()) s.assertUploaded(c, storage, sch.StoragePath(), sch.BundleSha256()) } @@ -543,6 +592,1969 @@ c.Assert(sch.IsUploaded(), jc.IsTrue) } +func (s *serviceSuite) TestServiceGetCharmURL(c *gc.C) { + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + result, err := s.serviceApi.GetCharmURL(params.ServiceGet{"wordpress"}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Error, gc.IsNil) + c.Assert(result.Result, gc.Equals, "local:quantal/wordpress-3") +} + +func (s *serviceSuite) TestServiceSetCharm(c *gc.C) { + curl, _ := s.UploadCharm(c, "precise/dummy-0", "dummy") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: curl.String(), + ServiceName: "service", + NumUnits: 3, + }}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) + curl, _ = s.UploadCharm(c, "precise/wordpress-3", "wordpress") + err = service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + err = s.serviceApi.SetCharm(params.ServiceSetCharm{ + ServiceName: "service", + CharmUrl: curl.String(), + }) + c.Assert(err, jc.ErrorIsNil) + + // Ensure that the charm is not marked as forced. + service, err := s.State.Service("service") + c.Assert(err, jc.ErrorIsNil) + charm, force, err := service.Charm() + c.Assert(err, jc.ErrorIsNil) + c.Assert(charm.URL().String(), gc.Equals, curl.String()) + c.Assert(force, jc.IsFalse) +} + +func (s *serviceSuite) setupServiceSetCharm(c *gc.C) { + curl, _ := s.UploadCharm(c, "precise/dummy-0", "dummy") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: curl.String(), + ServiceName: "service", + NumUnits: 3, + }}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) + curl, _ = s.UploadCharm(c, "precise/wordpress-3", "wordpress") + err = service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *serviceSuite) assertServiceSetCharm(c *gc.C, forceUnits bool) { + err := s.serviceApi.SetCharm(params.ServiceSetCharm{ + ServiceName: "service", + CharmUrl: "cs:~who/precise/wordpress-3", + ForceUnits: forceUnits, + }) + c.Assert(err, jc.ErrorIsNil) + // Ensure that the charm is not marked as forced. + service, err := s.State.Service("service") + c.Assert(err, jc.ErrorIsNil) + charm, _, err := service.Charm() + c.Assert(err, jc.ErrorIsNil) + c.Assert(charm.URL().String(), gc.Equals, "cs:~who/precise/wordpress-3") +} + +func (s *serviceSuite) assertServiceSetCharmBlocked(c *gc.C, msg string) { + err := s.serviceApi.SetCharm(params.ServiceSetCharm{ + ServiceName: "service", + CharmUrl: "cs:~who/precise/wordpress-3", + }) + s.AssertBlocked(c, err, msg) +} + +func (s *serviceSuite) TestBlockDestroyServiceSetCharm(c *gc.C) { + s.setupServiceSetCharm(c) + s.BlockDestroyModel(c, "TestBlockDestroyServiceSetCharm") + s.assertServiceSetCharm(c, false) +} + +func (s *serviceSuite) TestBlockRemoveServiceSetCharm(c *gc.C) { + s.setupServiceSetCharm(c) + s.BlockRemoveObject(c, "TestBlockRemoveServiceSetCharm") + s.assertServiceSetCharm(c, false) +} + +func (s *serviceSuite) TestBlockChangesServiceSetCharm(c *gc.C) { + s.setupServiceSetCharm(c) + s.BlockAllChanges(c, "TestBlockChangesServiceSetCharm") + s.assertServiceSetCharmBlocked(c, "TestBlockChangesServiceSetCharm") +} + +func (s *serviceSuite) TestServiceSetCharmForceUnits(c *gc.C) { + curl, _ := s.UploadCharm(c, "precise/dummy-0", "dummy") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: curl.String(), + ServiceName: "service", + NumUnits: 3, + }}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) + curl, _ = s.UploadCharm(c, "precise/wordpress-3", "wordpress") + err = service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + err = s.serviceApi.SetCharm(params.ServiceSetCharm{ + ServiceName: "service", + CharmUrl: curl.String(), + ForceUnits: true, + }) + c.Assert(err, jc.ErrorIsNil) + + // Ensure that the charm is marked as forced. + service, err := s.State.Service("service") + c.Assert(err, jc.ErrorIsNil) + charm, force, err := service.Charm() + c.Assert(err, jc.ErrorIsNil) + c.Assert(charm.URL().String(), gc.Equals, curl.String()) + c.Assert(force, jc.IsTrue) +} + +func (s *serviceSuite) TestBlockServiceSetCharmForce(c *gc.C) { + s.setupServiceSetCharm(c) + + // block all changes + s.BlockAllChanges(c, "TestBlockServiceSetCharmForce") + s.BlockRemoveObject(c, "TestBlockServiceSetCharmForce") + s.BlockDestroyModel(c, "TestBlockServiceSetCharmForce") + + s.assertServiceSetCharm(c, true) +} + +func (s *serviceSuite) TestServiceSetCharmInvalidService(c *gc.C) { + err := s.serviceApi.SetCharm(params.ServiceSetCharm{ + ServiceName: "badservice", + CharmUrl: "cs:precise/wordpress-3", + ForceSeries: true, + ForceUnits: true, + }) + c.Assert(err, gc.ErrorMatches, `service "badservice" not found`) +} + +func (s *serviceSuite) TestServiceAddCharmErrors(c *gc.C) { + for url, expect := range map[string]string{ + "wordpress": "charm URL must include revision", + "cs:wordpress": "charm URL must include revision", + "cs:precise/wordpress": "charm URL must include revision", + "cs:precise/wordpress-999999": `cannot retrieve "cs:precise/wordpress-999999": charm not found`, + } { + c.Logf("test %s", url) + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{ + URL: url, + }) + c.Check(err, gc.ErrorMatches, expect) + } +} + +func (s *serviceSuite) TestServiceSetCharmLegacy(c *gc.C) { + curl, _ := s.UploadCharm(c, "precise/dummy-0", "dummy") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: curl.String(), + ServiceName: "service", + }}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) + curl, _ = s.UploadCharm(c, "trusty/dummy-1", "dummy") + err = service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + + // Even with forceSeries = true, we can't change a charm where + // the series is sepcified in the URL. + err = s.serviceApi.SetCharm(params.ServiceSetCharm{ + ServiceName: "service", + CharmUrl: curl.String(), + ForceSeries: true, + }) + c.Assert(err, gc.ErrorMatches, "cannot change a service's series") +} + +func (s *serviceSuite) TestServiceSetCharmUnsupportedSeries(c *gc.C) { + curl, _ := s.UploadCharmMultiSeries(c, "~who/multi-series", "multi-series") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: curl.String(), + ServiceName: "service", + Series: "precise", + }}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) + curl, _ = s.UploadCharmMultiSeries(c, "~who/multi-series", "multi-series2") + err = service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + + err = s.serviceApi.SetCharm(params.ServiceSetCharm{ + ServiceName: "service", + CharmUrl: curl.String(), + }) + c.Assert(err, gc.ErrorMatches, "cannot upgrade charm, only these series are supported: trusty, wily") +} + +func (s *serviceSuite) TestServiceSetCharmUnsupportedSeriesForce(c *gc.C) { + curl, _ := s.UploadCharmMultiSeries(c, "~who/multi-series", "multi-series") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: curl.String(), + ServiceName: "service", + Series: "precise", + }}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) + curl, _ = s.UploadCharmMultiSeries(c, "~who/multi-series2", "multi-series2") + err = service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + + err = s.serviceApi.SetCharm(params.ServiceSetCharm{ + ServiceName: "service", + CharmUrl: curl.String(), + ForceSeries: true, + }) + c.Assert(err, jc.ErrorIsNil) + svc, err := s.State.Service("service") + c.Assert(err, jc.ErrorIsNil) + ch, _, err := svc.Charm() + c.Assert(err, jc.ErrorIsNil) + c.Assert(ch.URL().String(), gc.Equals, "cs:~who/multi-series2-0") +} + +func (s *serviceSuite) TestServiceSetCharmWrongOS(c *gc.C) { + curl, _ := s.UploadCharmMultiSeries(c, "~who/multi-series", "multi-series") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: curl.String(), + ServiceName: "service", + Series: "precise", + }}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) + curl, _ = s.UploadCharmMultiSeries(c, "~who/multi-series-windows", "multi-series-windows") + err = service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + + err = s.serviceApi.SetCharm(params.ServiceSetCharm{ + ServiceName: "service", + CharmUrl: curl.String(), + ForceSeries: true, + }) + c.Assert(err, gc.ErrorMatches, `cannot upgrade charm, OS "Ubuntu" not supported by charm`) +} + +type testModeCharmRepo struct { + *charmrepo.CharmStore + testMode bool +} + +// WithTestMode returns a repository Interface where test mode is enabled. +func (s *testModeCharmRepo) WithTestMode() charmrepo.Interface { + s.testMode = true + return s.CharmStore.WithTestMode() +} + +func (s *serviceSuite) TestSpecializeStoreOnDeployServiceSetCharmAndAddCharm(c *gc.C) { + repo := &testModeCharmRepo{} + s.PatchValue(&service.NewCharmStore, func(p charmrepo.NewCharmStoreParams) charmrepo.Interface { + p.URL = s.Srv.URL + repo.CharmStore = charmrepo.NewCharmStore(p) + return repo + }) + attrs := map[string]interface{}{"test-mode": true} + err := s.State.UpdateModelConfig(attrs, nil, nil) + c.Assert(err, jc.ErrorIsNil) + + // Check that the store's test mode is enabled when calling service Deploy. + curl, _ := s.UploadCharm(c, "trusty/dummy-1", "dummy") + err = service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: curl.String(), + ServiceName: "service", + NumUnits: 3, + }}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) + c.Assert(repo.testMode, jc.IsTrue) + + // Check that the store's test mode is enabled when calling SetCharm. + curl, _ = s.UploadCharm(c, "trusty/wordpress-2", "wordpress") + err = s.serviceApi.SetCharm(params.ServiceSetCharm{ + ServiceName: "service", + CharmUrl: curl.String(), + }) + c.Assert(repo.testMode, jc.IsTrue) + + // Check that the store's test mode is enabled when calling AddCharm. + curl, _ = s.UploadCharm(c, "utopic/riak-42", "riak") + err = s.APIState.Client().AddCharm(curl) + c.Assert(err, jc.ErrorIsNil) + c.Assert(repo.testMode, jc.IsTrue) +} + +func (s *serviceSuite) setupServiceDeploy(c *gc.C, args string) (*charm.URL, charm.Charm, constraints.Value) { + curl, ch := s.UploadCharm(c, "precise/dummy-42", "dummy") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + cons := constraints.MustParse(args) + return curl, ch, cons +} + +func (s *serviceSuite) assertServiceDeployPrincipal(c *gc.C, curl *charm.URL, ch charm.Charm, mem4g constraints.Value) { + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: curl.String(), + ServiceName: "service", + NumUnits: 3, + Constraints: mem4g, + }}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) + apiservertesting.AssertPrincipalServiceDeployed(c, s.State, "service", curl, false, ch, mem4g) +} + +func (s *serviceSuite) assertServiceDeployPrincipalBlocked(c *gc.C, msg string, curl *charm.URL, mem4g constraints.Value) { + _, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: curl.String(), + ServiceName: "service", + NumUnits: 3, + Constraints: mem4g, + }}}) + s.AssertBlocked(c, err, msg) +} + +func (s *serviceSuite) TestBlockDestroyServiceDeployPrincipal(c *gc.C) { + curl, bundle, cons := s.setupServiceDeploy(c, "mem=4G") + s.BlockDestroyModel(c, "TestBlockDestroyServiceDeployPrincipal") + s.assertServiceDeployPrincipal(c, curl, bundle, cons) +} + +func (s *serviceSuite) TestBlockRemoveServiceDeployPrincipal(c *gc.C) { + curl, bundle, cons := s.setupServiceDeploy(c, "mem=4G") + s.BlockRemoveObject(c, "TestBlockRemoveServiceDeployPrincipal") + s.assertServiceDeployPrincipal(c, curl, bundle, cons) +} + +func (s *serviceSuite) TestBlockChangesServiceDeployPrincipal(c *gc.C) { + curl, _, cons := s.setupServiceDeploy(c, "mem=4G") + s.BlockAllChanges(c, "TestBlockChangesServiceDeployPrincipal") + s.assertServiceDeployPrincipalBlocked(c, "TestBlockChangesServiceDeployPrincipal", curl, cons) +} + +func (s *serviceSuite) TestServiceDeploySubordinate(c *gc.C) { + curl, ch := s.UploadCharm(c, "utopic/logging-47", "logging") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: curl.String(), + ServiceName: "service-name", + }}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) + + service, err := s.State.Service("service-name") + c.Assert(err, jc.ErrorIsNil) + charm, force, err := service.Charm() + c.Assert(err, jc.ErrorIsNil) + c.Assert(force, jc.IsFalse) + c.Assert(charm.URL(), gc.DeepEquals, curl) + c.Assert(charm.Meta(), gc.DeepEquals, ch.Meta()) + c.Assert(charm.Config(), gc.DeepEquals, ch.Config()) + + units, err := service.AllUnits() + c.Assert(err, jc.ErrorIsNil) + c.Assert(units, gc.HasLen, 0) +} + +func (s *serviceSuite) TestServiceDeployConfig(c *gc.C) { + curl, _ := s.UploadCharm(c, "precise/dummy-0", "dummy") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: curl.String(), + ServiceName: "service-name", + NumUnits: 1, + ConfigYAML: "service-name:\n username: fred", + }}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) + + service, err := s.State.Service("service-name") + c.Assert(err, jc.ErrorIsNil) + settings, err := service.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, gc.DeepEquals, charm.Settings{"username": "fred"}) +} + +func (s *serviceSuite) TestServiceDeployConfigError(c *gc.C) { + // TODO(fwereade): test Config/ConfigYAML handling directly on srvClient. + // Can't be done cleanly until it's extracted similarly to Machiner. + curl, _ := s.UploadCharm(c, "precise/dummy-0", "dummy") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: curl.String(), + ServiceName: "service-name", + NumUnits: 1, + ConfigYAML: "service-name:\n skill-level: fred", + }}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.ErrorMatches, `option "skill-level" expected int, got "fred"`) + _, err = s.State.Service("service-name") + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} + +func (s *serviceSuite) TestServiceDeployToMachine(c *gc.C) { + curl, ch := s.UploadCharm(c, "precise/dummy-0", "dummy") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + + machine, err := s.State.AddMachine("precise", state.JobHostUnits) + c.Assert(err, jc.ErrorIsNil) + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: curl.String(), + ServiceName: "service-name", + NumUnits: 1, + ConfigYAML: "service-name:\n username: fred", + }}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) + + service, err := s.State.Service("service-name") + c.Assert(err, jc.ErrorIsNil) + charm, force, err := service.Charm() + c.Assert(err, jc.ErrorIsNil) + c.Assert(force, jc.IsFalse) + c.Assert(charm.URL(), gc.DeepEquals, curl) + c.Assert(charm.Meta(), gc.DeepEquals, ch.Meta()) + c.Assert(charm.Config(), gc.DeepEquals, ch.Config()) + + errs, err := s.APIState.UnitAssigner().AssignUnits([]names.UnitTag{names.NewUnitTag("service-name/0")}) + c.Assert(errs, gc.DeepEquals, []error{nil}) + c.Assert(err, jc.ErrorIsNil) + + units, err := service.AllUnits() + c.Assert(err, jc.ErrorIsNil) + c.Assert(units, gc.HasLen, 1) + + mid, err := units[0].AssignedMachineId() + c.Assert(err, jc.ErrorIsNil) + c.Assert(mid, gc.Equals, machine.Id()) +} + +func (s *serviceSuite) TestServiceDeployToMachineNotFound(c *gc.C) { + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: "cs:precise/service-name-1", + ServiceName: "service-name", + NumUnits: 1, + Placement: []*instance.Placement{instance.MustParsePlacement("42")}, + }}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.ErrorMatches, `cannot deploy "service-name" to machine 42: machine 42 not found`) + + _, err = s.State.Service("service-name") + c.Assert(err, gc.ErrorMatches, `service "service-name" not found`) +} + +func (s *serviceSuite) TestServiceDeployServiceOwner(c *gc.C) { + curl, _ := s.UploadCharm(c, "precise/dummy-0", "dummy") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: curl.String(), + ServiceName: "service", + NumUnits: 3, + }}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) + + service, err := s.State.Service("service") + c.Assert(err, jc.ErrorIsNil) + c.Assert(service.GetOwnerTag(), gc.Equals, s.authorizer.GetAuthTag().String()) +} + +func (s *serviceSuite) deployServiceForUpdateTests(c *gc.C) { + curl, _ := s.UploadCharm(c, "precise/dummy-1", "dummy") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + results, err := s.serviceApi.Deploy(params.ServicesDeploy{ + Services: []params.ServiceDeploy{{ + CharmUrl: curl.String(), + ServiceName: "service", + NumUnits: 1, + }}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.IsNil) +} + +func (s *serviceSuite) checkClientServiceUpdateSetCharm(c *gc.C, forceCharmUrl bool) { + s.deployServiceForUpdateTests(c) + curl, _ := s.UploadCharm(c, "precise/wordpress-3", "wordpress") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + + // Update the charm for the service. + args := params.ServiceUpdate{ + ServiceName: "service", + CharmUrl: curl.String(), + ForceCharmUrl: forceCharmUrl, + } + err = s.serviceApi.Update(args) + c.Assert(err, jc.ErrorIsNil) + + // Ensure the charm has been updated and and the force flag correctly set. + service, err := s.State.Service("service") + c.Assert(err, jc.ErrorIsNil) + ch, force, err := service.Charm() + c.Assert(err, jc.ErrorIsNil) + c.Assert(ch.URL().String(), gc.Equals, curl.String()) + c.Assert(force, gc.Equals, forceCharmUrl) +} + +func (s *serviceSuite) TestServiceUpdateSetCharm(c *gc.C) { + s.checkClientServiceUpdateSetCharm(c, false) +} + +func (s *serviceSuite) TestBlockDestroyServiceUpdate(c *gc.C) { + s.BlockDestroyModel(c, "TestBlockDestroyServiceUpdate") + s.checkClientServiceUpdateSetCharm(c, false) +} + +func (s *serviceSuite) TestBlockRemoveServiceUpdate(c *gc.C) { + s.BlockRemoveObject(c, "TestBlockRemoveServiceUpdate") + s.checkClientServiceUpdateSetCharm(c, false) +} + +func (s *serviceSuite) setupServiceUpdate(c *gc.C) string { + s.deployServiceForUpdateTests(c) + curl, _ := s.UploadCharm(c, "precise/wordpress-3", "wordpress") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + return curl.String() +} + +func (s *serviceSuite) TestBlockChangeServiceUpdate(c *gc.C) { + curl := s.setupServiceUpdate(c) + s.BlockAllChanges(c, "TestBlockChangeServiceUpdate") + // Update the charm for the service. + args := params.ServiceUpdate{ + ServiceName: "service", + CharmUrl: curl, + ForceCharmUrl: false, + } + err := s.serviceApi.Update(args) + s.AssertBlocked(c, err, "TestBlockChangeServiceUpdate") +} + +func (s *serviceSuite) TestServiceUpdateForceSetCharm(c *gc.C) { + s.checkClientServiceUpdateSetCharm(c, true) +} + +func (s *serviceSuite) TestBlockServiceUpdateForced(c *gc.C) { + curl := s.setupServiceUpdate(c) + + // block all changes. Force should ignore block :) + s.BlockAllChanges(c, "TestBlockServiceUpdateForced") + s.BlockDestroyModel(c, "TestBlockServiceUpdateForced") + s.BlockRemoveObject(c, "TestBlockServiceUpdateForced") + + // Update the charm for the service. + args := params.ServiceUpdate{ + ServiceName: "service", + CharmUrl: curl, + ForceCharmUrl: true, + } + err := s.serviceApi.Update(args) + c.Assert(err, jc.ErrorIsNil) + + // Ensure the charm has been updated and and the force flag correctly set. + service, err := s.State.Service("service") + c.Assert(err, jc.ErrorIsNil) + ch, force, err := service.Charm() + c.Assert(err, jc.ErrorIsNil) + c.Assert(ch.URL().String(), gc.Equals, curl) + c.Assert(force, jc.IsTrue) +} + +func (s *serviceSuite) TestServiceUpdateSetCharmNotFound(c *gc.C) { + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + args := params.ServiceUpdate{ + ServiceName: "wordpress", + CharmUrl: "cs:precise/wordpress-999999", + } + err := s.serviceApi.Update(args) + c.Check(err, gc.ErrorMatches, `charm "cs:precise/wordpress-999999" not found`) +} + +func (s *serviceSuite) TestServiceUpdateSetMinUnits(c *gc.C) { + service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + + // Set minimum units for the service. + minUnits := 2 + args := params.ServiceUpdate{ + ServiceName: "dummy", + MinUnits: &minUnits, + } + err := s.serviceApi.Update(args) + c.Assert(err, jc.ErrorIsNil) + + // Ensure the minimum number of units has been set. + c.Assert(service.Refresh(), gc.IsNil) + c.Assert(service.MinUnits(), gc.Equals, minUnits) +} + +func (s *serviceSuite) TestServiceUpdateSetMinUnitsError(c *gc.C) { + service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + + // Set a negative minimum number of units for the service. + minUnits := -1 + args := params.ServiceUpdate{ + ServiceName: "dummy", + MinUnits: &minUnits, + } + err := s.serviceApi.Update(args) + c.Assert(err, gc.ErrorMatches, + `cannot set minimum units for service "dummy": cannot set a negative minimum number of units`) + + // Ensure the minimum number of units has not been set. + c.Assert(service.Refresh(), gc.IsNil) + c.Assert(service.MinUnits(), gc.Equals, 0) +} + +func (s *serviceSuite) TestServiceUpdateSetSettingsStrings(c *gc.C) { + service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + + // Update settings for the service. + args := params.ServiceUpdate{ + ServiceName: "dummy", + SettingsStrings: map[string]string{"title": "s-title", "username": "s-user"}, + } + err := s.serviceApi.Update(args) + c.Assert(err, jc.ErrorIsNil) + + // Ensure the settings have been correctly updated. + expected := charm.Settings{"title": "s-title", "username": "s-user"} + obtained, err := service.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(obtained, gc.DeepEquals, expected) +} + +func (s *serviceSuite) TestServiceUpdateSetSettingsYAML(c *gc.C) { + service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + + // Update settings for the service. + args := params.ServiceUpdate{ + ServiceName: "dummy", + SettingsYAML: "dummy:\n title: y-title\n username: y-user", + } + err := s.serviceApi.Update(args) + c.Assert(err, jc.ErrorIsNil) + + // Ensure the settings have been correctly updated. + expected := charm.Settings{"title": "y-title", "username": "y-user"} + obtained, err := service.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(obtained, gc.DeepEquals, expected) +} + +func (s *serviceSuite) TestClientServiceUpdateSetSettingsGetYAML(c *gc.C) { + service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + + // Update settings for the service. + args := params.ServiceUpdate{ + ServiceName: "dummy", + SettingsYAML: "charm: dummy\nservice: dummy\nsettings:\n title:\n value: y-title\n type: string\n username:\n value: y-user\n ignore:\n blah: true", + } + err := s.serviceApi.Update(args) + c.Assert(err, jc.ErrorIsNil) + + // Ensure the settings have been correctly updated. + expected := charm.Settings{"title": "y-title", "username": "y-user"} + obtained, err := service.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(obtained, gc.DeepEquals, expected) +} + +func (s *serviceSuite) TestServiceUpdateSetConstraints(c *gc.C) { + service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + + // Update constraints for the service. + cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + c.Assert(err, jc.ErrorIsNil) + args := params.ServiceUpdate{ + ServiceName: "dummy", + Constraints: &cons, + } + err = s.serviceApi.Update(args) + c.Assert(err, jc.ErrorIsNil) + + // Ensure the constraints have been correctly updated. + obtained, err := service.Constraints() + c.Assert(err, jc.ErrorIsNil) + c.Assert(obtained, gc.DeepEquals, cons) +} + +func (s *serviceSuite) TestServiceUpdateAllParams(c *gc.C) { + s.deployServiceForUpdateTests(c) + curl, _ := s.UploadCharm(c, "precise/wordpress-3", "wordpress") + err := service.AddCharmWithAuthorization(s.State, params.AddCharmWithAuthorization{URL: curl.String()}) + c.Assert(err, jc.ErrorIsNil) + + // Update all the service attributes. + minUnits := 3 + cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + c.Assert(err, jc.ErrorIsNil) + args := params.ServiceUpdate{ + ServiceName: "service", + CharmUrl: curl.String(), + ForceCharmUrl: true, + MinUnits: &minUnits, + SettingsStrings: map[string]string{"blog-title": "string-title"}, + SettingsYAML: "service:\n blog-title: yaml-title\n", + Constraints: &cons, + } + err = s.serviceApi.Update(args) + c.Assert(err, jc.ErrorIsNil) + + // Ensure the service has been correctly updated. + service, err := s.State.Service("service") + c.Assert(err, jc.ErrorIsNil) + + // Check the charm. + ch, force, err := service.Charm() + c.Assert(err, jc.ErrorIsNil) + c.Assert(ch.URL().String(), gc.Equals, curl.String()) + c.Assert(force, jc.IsTrue) + + // Check the minimum number of units. + c.Assert(service.MinUnits(), gc.Equals, minUnits) + + // Check the settings: also ensure the YAML settings take precedence + // over strings ones. + expectedSettings := charm.Settings{"blog-title": "yaml-title"} + obtainedSettings, err := service.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(obtainedSettings, gc.DeepEquals, expectedSettings) + + // Check the constraints. + obtainedConstraints, err := service.Constraints() + c.Assert(err, jc.ErrorIsNil) + c.Assert(obtainedConstraints, gc.DeepEquals, cons) +} + +func (s *serviceSuite) TestServiceUpdateNoParams(c *gc.C) { + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + + // Calling Update with no parameters set is a no-op. + args := params.ServiceUpdate{ServiceName: "wordpress"} + err := s.serviceApi.Update(args) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *serviceSuite) TestServiceUpdateNoService(c *gc.C) { + err := s.serviceApi.Update(params.ServiceUpdate{}) + c.Assert(err, gc.ErrorMatches, `"" is not a valid service name`) +} + +func (s *serviceSuite) TestServiceUpdateInvalidService(c *gc.C) { + args := params.ServiceUpdate{ServiceName: "no-such-service"} + err := s.serviceApi.Update(args) + c.Assert(err, gc.ErrorMatches, `service "no-such-service" not found`) +} + +var ( + validSetTestValue = "a value with spaces\nand newline\nand UTF-8 characters: \U0001F604 / \U0001F44D" +) + +func (s *serviceSuite) TestServiceSet(c *gc.C) { + dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + + err := s.serviceApi.Set(params.ServiceSet{ServiceName: "dummy", Options: map[string]string{ + "title": "foobar", + "username": validSetTestValue, + }}) + c.Assert(err, jc.ErrorIsNil) + settings, err := dummy.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, gc.DeepEquals, charm.Settings{ + "title": "foobar", + "username": validSetTestValue, + }) + + err = s.serviceApi.Set(params.ServiceSet{ServiceName: "dummy", Options: map[string]string{ + "title": "barfoo", + "username": "", + }}) + c.Assert(err, jc.ErrorIsNil) + settings, err = dummy.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, gc.DeepEquals, charm.Settings{ + "title": "barfoo", + "username": "", + }) +} + +func (s *serviceSuite) assertServiceSetBlocked(c *gc.C, dummy *state.Service, msg string) { + err := s.serviceApi.Set(params.ServiceSet{ + ServiceName: "dummy", + Options: map[string]string{ + "title": "foobar", + "username": validSetTestValue}}) + s.AssertBlocked(c, err, msg) +} + +func (s *serviceSuite) assertServiceSet(c *gc.C, dummy *state.Service) { + err := s.serviceApi.Set(params.ServiceSet{ + ServiceName: "dummy", + Options: map[string]string{ + "title": "foobar", + "username": validSetTestValue}}) + c.Assert(err, jc.ErrorIsNil) + settings, err := dummy.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, gc.DeepEquals, charm.Settings{ + "title": "foobar", + "username": validSetTestValue, + }) +} + +func (s *serviceSuite) TestBlockDestroyServiceSet(c *gc.C) { + dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + s.BlockDestroyModel(c, "TestBlockDestroyServiceSet") + s.assertServiceSet(c, dummy) +} + +func (s *serviceSuite) TestBlockRemoveServiceSet(c *gc.C) { + dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + s.BlockRemoveObject(c, "TestBlockRemoveServiceSet") + s.assertServiceSet(c, dummy) +} + +func (s *serviceSuite) TestBlockChangesServiceSet(c *gc.C) { + dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + s.BlockAllChanges(c, "TestBlockChangesServiceSet") + s.assertServiceSetBlocked(c, dummy, "TestBlockChangesServiceSet") +} + +func (s *serviceSuite) TestServerUnset(c *gc.C) { + dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + + err := s.serviceApi.Set(params.ServiceSet{ServiceName: "dummy", Options: map[string]string{ + "title": "foobar", + "username": "user name", + }}) + c.Assert(err, jc.ErrorIsNil) + settings, err := dummy.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, gc.DeepEquals, charm.Settings{ + "title": "foobar", + "username": "user name", + }) + + err = s.serviceApi.Unset(params.ServiceUnset{ServiceName: "dummy", Options: []string{"username"}}) + c.Assert(err, jc.ErrorIsNil) + settings, err = dummy.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, gc.DeepEquals, charm.Settings{ + "title": "foobar", + }) +} + +func (s *serviceSuite) setupServerUnsetBlocked(c *gc.C) *state.Service { + dummy := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + + err := s.serviceApi.Set(params.ServiceSet{ + ServiceName: "dummy", + Options: map[string]string{ + "title": "foobar", + "username": "user name", + }}) + c.Assert(err, jc.ErrorIsNil) + settings, err := dummy.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, gc.DeepEquals, charm.Settings{ + "title": "foobar", + "username": "user name", + }) + return dummy +} + +func (s *serviceSuite) assertServerUnset(c *gc.C, dummy *state.Service) { + err := s.serviceApi.Unset(params.ServiceUnset{ + ServiceName: "dummy", + Options: []string{"username"}, + }) + c.Assert(err, jc.ErrorIsNil) + settings, err := dummy.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, gc.DeepEquals, charm.Settings{ + "title": "foobar", + }) +} + +func (s *serviceSuite) assertServerUnsetBlocked(c *gc.C, dummy *state.Service, msg string) { + err := s.serviceApi.Unset(params.ServiceUnset{ + ServiceName: "dummy", + Options: []string{"username"}, + }) + s.AssertBlocked(c, err, msg) +} + +func (s *serviceSuite) TestBlockDestroyServerUnset(c *gc.C) { + dummy := s.setupServerUnsetBlocked(c) + s.BlockDestroyModel(c, "TestBlockDestroyServerUnset") + s.assertServerUnset(c, dummy) +} + +func (s *serviceSuite) TestBlockRemoveServerUnset(c *gc.C) { + dummy := s.setupServerUnsetBlocked(c) + s.BlockRemoveObject(c, "TestBlockRemoveServerUnset") + s.assertServerUnset(c, dummy) +} + +func (s *serviceSuite) TestBlockChangesServerUnset(c *gc.C) { + dummy := s.setupServerUnsetBlocked(c) + s.BlockAllChanges(c, "TestBlockChangesServerUnset") + s.assertServerUnsetBlocked(c, dummy, "TestBlockChangesServerUnset") +} + +var clientAddServiceUnitsTests = []struct { + about string + service string // if not set, defaults to 'dummy' + expected []string + to string + err string +}{ + { + about: "returns unit names", + expected: []string{"dummy/0", "dummy/1", "dummy/2"}, + }, + { + about: "fails trying to add zero units", + err: "must add at least one unit", + }, + { + // Note: chained-state, we add 1 unit here, but the 3 units + // from the first condition still exist + about: "force the unit onto bootstrap machine", + expected: []string{"dummy/3"}, + to: "0", + }, + { + about: "unknown service name", + service: "unknown-service", + err: `service "unknown-service" not found`, + }, +} + +func (s *serviceSuite) TestClientAddServiceUnits(c *gc.C) { + s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + for i, t := range clientAddServiceUnitsTests { + c.Logf("test %d. %s", i, t.about) + serviceName := t.service + if serviceName == "" { + serviceName = "dummy" + } + args := params.AddServiceUnits{ + ServiceName: serviceName, + NumUnits: len(t.expected), + } + if t.to != "" { + args.Placement = []*instance.Placement{instance.MustParsePlacement(t.to)} + } + result, err := s.serviceApi.AddUnits(args) + if t.err != "" { + c.Assert(err, gc.ErrorMatches, t.err) + continue + } + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Units, gc.DeepEquals, t.expected) + } + // Test that we actually assigned the unit to machine 0 + forcedUnit, err := s.BackingState.Unit("dummy/3") + c.Assert(err, jc.ErrorIsNil) + assignedMachine, err := forcedUnit.AssignedMachineId() + c.Assert(err, jc.ErrorIsNil) + c.Assert(assignedMachine, gc.Equals, "0") +} + +func (s *serviceSuite) TestAddServiceUnitsToNewContainer(c *gc.C) { + svc := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + machine, err := s.State.AddMachine("quantal", state.JobHostUnits) + c.Assert(err, jc.ErrorIsNil) + + _, err = s.serviceApi.AddUnits(params.AddServiceUnits{ + ServiceName: "dummy", + NumUnits: 1, + Placement: []*instance.Placement{instance.MustParsePlacement("lxc:" + machine.Id())}, + }) + c.Assert(err, jc.ErrorIsNil) + + units, err := svc.AllUnits() + c.Assert(err, jc.ErrorIsNil) + mid, err := units[0].AssignedMachineId() + c.Assert(err, jc.ErrorIsNil) + c.Assert(mid, gc.Equals, machine.Id()+"/lxc/0") +} + +var addServiceUnitTests = []struct { + about string + service string // if not set, defaults to 'dummy' + expected []string + machineIds []string + placement []*instance.Placement + err string +}{ + { + about: "valid placement directives", + expected: []string{"dummy/0"}, + placement: []*instance.Placement{{"deadbeef-0bad-400d-8000-4b1d0d06f00d", "valid"}}, + machineIds: []string{"1"}, + }, { + about: "direct machine assignment placement directive", + expected: []string{"dummy/1", "dummy/2"}, + placement: []*instance.Placement{{"#", "1"}, {"lxc", "1"}}, + machineIds: []string{"1", "1/lxc/0"}, + }, { + about: "invalid placement directive", + err: ".* invalid placement is invalid", + expected: []string{"dummy/3"}, + placement: []*instance.Placement{{"deadbeef-0bad-400d-8000-4b1d0d06f00d", "invalid"}}, + }, +} + +func (s *serviceSuite) TestAddServiceUnits(c *gc.C) { + s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + // Add a machine for the units to be placed on. + _, err := s.State.AddMachine("quantal", state.JobHostUnits) + c.Assert(err, jc.ErrorIsNil) + for i, t := range addServiceUnitTests { + c.Logf("test %d. %s", i, t.about) + serviceName := t.service + if serviceName == "" { + serviceName = "dummy" + } + result, err := s.serviceApi.AddUnits(params.AddServiceUnits{ + ServiceName: serviceName, + NumUnits: len(t.expected), + Placement: t.placement, + }) + if t.err != "" { + c.Assert(err, gc.ErrorMatches, t.err) + continue + } + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Units, gc.DeepEquals, t.expected) + for i, unitName := range result.Units { + u, err := s.BackingState.Unit(unitName) + c.Assert(err, jc.ErrorIsNil) + assignedMachine, err := u.AssignedMachineId() + c.Assert(err, jc.ErrorIsNil) + c.Assert(assignedMachine, gc.Equals, t.machineIds[i]) + } + } +} + +func (s *serviceSuite) assertAddServiceUnits(c *gc.C) { + result, err := s.serviceApi.AddUnits(params.AddServiceUnits{ + ServiceName: "dummy", + NumUnits: 3, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Units, gc.DeepEquals, []string{"dummy/0", "dummy/1", "dummy/2"}) + + // Test that we actually assigned the unit to machine 0 + forcedUnit, err := s.BackingState.Unit("dummy/0") + c.Assert(err, jc.ErrorIsNil) + assignedMachine, err := forcedUnit.AssignedMachineId() + c.Assert(err, jc.ErrorIsNil) + c.Assert(assignedMachine, gc.Equals, "0") +} + +func (s *serviceSuite) TestServiceCharmRelations(c *gc.C) { + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging")) + eps, err := s.State.InferEndpoints("logging", "wordpress") + c.Assert(err, jc.ErrorIsNil) + _, err = s.State.AddRelation(eps...) + c.Assert(err, jc.ErrorIsNil) + + _, err = s.serviceApi.CharmRelations(params.ServiceCharmRelations{"blah"}) + c.Assert(err, gc.ErrorMatches, `service "blah" not found`) + + result, err := s.serviceApi.CharmRelations(params.ServiceCharmRelations{"wordpress"}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.CharmRelations, gc.DeepEquals, []string{ + "cache", "db", "juju-info", "logging-dir", "monitoring-port", "url", + }) +} + +func (s *serviceSuite) assertAddServiceUnitsBlocked(c *gc.C, msg string) { + _, err := s.serviceApi.AddUnits(params.AddServiceUnits{ + ServiceName: "dummy", + NumUnits: 3, + }) + s.AssertBlocked(c, err, msg) +} + +func (s *serviceSuite) TestBlockDestroyAddServiceUnits(c *gc.C) { + s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + s.BlockDestroyModel(c, "TestBlockDestroyAddServiceUnits") + s.assertAddServiceUnits(c) +} + +func (s *serviceSuite) TestBlockRemoveAddServiceUnits(c *gc.C) { + s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + s.BlockRemoveObject(c, "TestBlockRemoveAddServiceUnits") + s.assertAddServiceUnits(c) +} + +func (s *serviceSuite) TestBlockChangeAddServiceUnits(c *gc.C) { + s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + s.BlockAllChanges(c, "TestBlockChangeAddServiceUnits") + s.assertAddServiceUnitsBlocked(c, "TestBlockChangeAddServiceUnits") +} + +func (s *serviceSuite) TestAddUnitToMachineNotFound(c *gc.C) { + s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + _, err := s.serviceApi.AddUnits(params.AddServiceUnits{ + ServiceName: "dummy", + NumUnits: 3, + Placement: []*instance.Placement{instance.MustParsePlacement("42")}, + }) + c.Assert(err, gc.ErrorMatches, `adding new machine to host unit "dummy/0": machine 42 not found`) +} + +func (s *serviceSuite) TestServiceExpose(c *gc.C) { + charm := s.AddTestingCharm(c, "dummy") + serviceNames := []string{"dummy-service", "exposed-service"} + svcs := make([]*state.Service, len(serviceNames)) + var err error + for i, name := range serviceNames { + svcs[i] = s.AddTestingService(c, name, charm) + c.Assert(svcs[i].IsExposed(), jc.IsFalse) + } + err = svcs[1].SetExposed() + c.Assert(err, jc.ErrorIsNil) + c.Assert(svcs[1].IsExposed(), jc.IsTrue) + for i, t := range serviceExposeTests { + c.Logf("test %d. %s", i, t.about) + err = s.serviceApi.Expose(params.ServiceExpose{t.service}) + if t.err != "" { + c.Assert(err, gc.ErrorMatches, t.err) + } else { + c.Assert(err, jc.ErrorIsNil) + service, err := s.State.Service(t.service) + c.Assert(err, jc.ErrorIsNil) + c.Assert(service.IsExposed(), gc.Equals, t.exposed) + } + } +} + +func (s *serviceSuite) setupServiceExpose(c *gc.C) { + charm := s.AddTestingCharm(c, "dummy") + serviceNames := []string{"dummy-service", "exposed-service"} + svcs := make([]*state.Service, len(serviceNames)) + var err error + for i, name := range serviceNames { + svcs[i] = s.AddTestingService(c, name, charm) + c.Assert(svcs[i].IsExposed(), jc.IsFalse) + } + err = svcs[1].SetExposed() + c.Assert(err, jc.ErrorIsNil) + c.Assert(svcs[1].IsExposed(), jc.IsTrue) +} + +var serviceExposeTests = []struct { + about string + service string + err string + exposed bool +}{ + { + about: "unknown service name", + service: "unknown-service", + err: `service "unknown-service" not found`, + }, + { + about: "expose a service", + service: "dummy-service", + exposed: true, + }, + { + about: "expose an already exposed service", + service: "exposed-service", + exposed: true, + }, +} + +func (s *serviceSuite) assertServiceExpose(c *gc.C) { + for i, t := range serviceExposeTests { + c.Logf("test %d. %s", i, t.about) + err := s.serviceApi.Expose(params.ServiceExpose{t.service}) + if t.err != "" { + c.Assert(err, gc.ErrorMatches, t.err) + } else { + c.Assert(err, jc.ErrorIsNil) + service, err := s.State.Service(t.service) + c.Assert(err, jc.ErrorIsNil) + c.Assert(service.IsExposed(), gc.Equals, t.exposed) + } + } +} + +func (s *serviceSuite) assertServiceExposeBlocked(c *gc.C, msg string) { + for i, t := range serviceExposeTests { + c.Logf("test %d. %s", i, t.about) + err := s.serviceApi.Expose(params.ServiceExpose{t.service}) + s.AssertBlocked(c, err, msg) + } +} + +func (s *serviceSuite) TestBlockDestroyServiceExpose(c *gc.C) { + s.setupServiceExpose(c) + s.BlockDestroyModel(c, "TestBlockDestroyServiceExpose") + s.assertServiceExpose(c) +} + +func (s *serviceSuite) TestBlockRemoveServiceExpose(c *gc.C) { + s.setupServiceExpose(c) + s.BlockRemoveObject(c, "TestBlockRemoveServiceExpose") + s.assertServiceExpose(c) +} + +func (s *serviceSuite) TestBlockChangesServiceExpose(c *gc.C) { + s.setupServiceExpose(c) + s.BlockAllChanges(c, "TestBlockChangesServiceExpose") + s.assertServiceExposeBlocked(c, "TestBlockChangesServiceExpose") +} + +var serviceUnexposeTests = []struct { + about string + service string + err string + initial bool + expected bool +}{ + { + about: "unknown service name", + service: "unknown-service", + err: `service "unknown-service" not found`, + }, + { + about: "unexpose a service", + service: "dummy-service", + initial: true, + expected: false, + }, + { + about: "unexpose an already unexposed service", + service: "dummy-service", + initial: false, + expected: false, + }, +} + +func (s *serviceSuite) TestServiceUnexpose(c *gc.C) { + charm := s.AddTestingCharm(c, "dummy") + for i, t := range serviceUnexposeTests { + c.Logf("test %d. %s", i, t.about) + svc := s.AddTestingService(c, "dummy-service", charm) + if t.initial { + svc.SetExposed() + } + c.Assert(svc.IsExposed(), gc.Equals, t.initial) + err := s.serviceApi.Unexpose(params.ServiceUnexpose{t.service}) + if t.err == "" { + c.Assert(err, jc.ErrorIsNil) + svc.Refresh() + c.Assert(svc.IsExposed(), gc.Equals, t.expected) + } else { + c.Assert(err, gc.ErrorMatches, t.err) + } + err = svc.Destroy() + c.Assert(err, jc.ErrorIsNil) + } +} + +func (s *serviceSuite) setupServiceUnexpose(c *gc.C) *state.Service { + charm := s.AddTestingCharm(c, "dummy") + svc := s.AddTestingService(c, "dummy-service", charm) + svc.SetExposed() + c.Assert(svc.IsExposed(), gc.Equals, true) + return svc +} + +func (s *serviceSuite) assertServiceUnexpose(c *gc.C, svc *state.Service) { + err := s.serviceApi.Unexpose(params.ServiceUnexpose{"dummy-service"}) + c.Assert(err, jc.ErrorIsNil) + svc.Refresh() + c.Assert(svc.IsExposed(), gc.Equals, false) + err = svc.Destroy() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *serviceSuite) assertServiceUnexposeBlocked(c *gc.C, svc *state.Service, msg string) { + err := s.serviceApi.Unexpose(params.ServiceUnexpose{"dummy-service"}) + s.AssertBlocked(c, err, msg) + err = svc.Destroy() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *serviceSuite) TestBlockDestroyServiceUnexpose(c *gc.C) { + svc := s.setupServiceUnexpose(c) + s.BlockDestroyModel(c, "TestBlockDestroyServiceUnexpose") + s.assertServiceUnexpose(c, svc) +} + +func (s *serviceSuite) TestBlockRemoveServiceUnexpose(c *gc.C) { + svc := s.setupServiceUnexpose(c) + s.BlockRemoveObject(c, "TestBlockRemoveServiceUnexpose") + s.assertServiceUnexpose(c, svc) +} + +func (s *serviceSuite) TestBlockChangesServiceUnexpose(c *gc.C) { + svc := s.setupServiceUnexpose(c) + s.BlockAllChanges(c, "TestBlockChangesServiceUnexpose") + s.assertServiceUnexposeBlocked(c, svc, "TestBlockChangesServiceUnexpose") +} + +var serviceDestroyTests = []struct { + about string + service string + err string +}{ + { + about: "unknown service name", + service: "unknown-service", + err: `service "unknown-service" not found`, + }, + { + about: "destroy a service", + service: "dummy-service", + }, + { + about: "destroy an already destroyed service", + service: "dummy-service", + err: `service "dummy-service" not found`, + }, +} + +func (s *serviceSuite) TestServiceDestroy(c *gc.C) { + s.AddTestingService(c, "dummy-service", s.AddTestingCharm(c, "dummy")) + for i, t := range serviceDestroyTests { + c.Logf("test %d. %s", i, t.about) + err := s.serviceApi.Destroy(params.ServiceDestroy{t.service}) + if t.err != "" { + c.Assert(err, gc.ErrorMatches, t.err) + } else { + c.Assert(err, jc.ErrorIsNil) + } + } + + // Now do Destroy on a service with units. Destroy will + // cause the service to be not-Alive, but will not remove its + // document. + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + serviceName := "wordpress" + service, err := s.State.Service(serviceName) + c.Assert(err, jc.ErrorIsNil) + err = s.serviceApi.Destroy(params.ServiceDestroy{serviceName}) + c.Assert(err, jc.ErrorIsNil) + err = service.Refresh() + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} + +func assertLife(c *gc.C, entity state.Living, life state.Life) { + err := entity.Refresh() + c.Assert(err, jc.ErrorIsNil) + c.Assert(entity.Life(), gc.Equals, life) +} + +func (s *serviceSuite) TestBlockServiceDestroy(c *gc.C) { + s.AddTestingService(c, "dummy-service", s.AddTestingCharm(c, "dummy")) + + // block remove-objects + s.BlockRemoveObject(c, "TestBlockServiceDestroy") + err := s.serviceApi.Destroy(params.ServiceDestroy{"dummy-service"}) + s.AssertBlocked(c, err, "TestBlockServiceDestroy") + // Tests may have invalid service names. + service, err := s.State.Service("dummy-service") + if err == nil { + // For valid service names, check that service is alive :-) + assertLife(c, service, state.Alive) + } +} + +func (s *serviceSuite) TestDestroyPrincipalUnits(c *gc.C) { + wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + units := make([]*state.Unit, 5) + for i := range units { + unit, err := wordpress.AddUnit() + c.Assert(err, jc.ErrorIsNil) + err = unit.SetAgentStatus(state.StatusIdle, "", nil) + c.Assert(err, jc.ErrorIsNil) + units[i] = unit + } + s.assertDestroyPrincipalUnits(c, units) +} + +func (s *serviceSuite) TestDestroySubordinateUnits(c *gc.C) { + wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + wordpress0, err := wordpress.AddUnit() + c.Assert(err, jc.ErrorIsNil) + s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging")) + eps, err := s.State.InferEndpoints("logging", "wordpress") + c.Assert(err, jc.ErrorIsNil) + rel, err := s.State.AddRelation(eps...) + c.Assert(err, jc.ErrorIsNil) + ru, err := rel.Unit(wordpress0) + c.Assert(err, jc.ErrorIsNil) + err = ru.EnterScope(nil) + c.Assert(err, jc.ErrorIsNil) + logging0, err := s.State.Unit("logging/0") + c.Assert(err, jc.ErrorIsNil) + + // Try to destroy the subordinate alone; check it fails. + err = s.serviceApi.DestroyUnits(params.DestroyServiceUnits{ + UnitNames: []string{"logging/0"}, + }) + c.Assert(err, gc.ErrorMatches, `no units were destroyed: unit "logging/0" is a subordinate`) + assertLife(c, logging0, state.Alive) + + s.assertDestroySubordinateUnits(c, wordpress0, logging0) +} + +func (s *serviceSuite) assertDestroyPrincipalUnits(c *gc.C, units []*state.Unit) { + // Destroy 2 of them; check they become Dying. + err := s.serviceApi.DestroyUnits(params.DestroyServiceUnits{ + UnitNames: []string{"wordpress/0", "wordpress/1"}, + }) + c.Assert(err, jc.ErrorIsNil) + assertLife(c, units[0], state.Dying) + assertLife(c, units[1], state.Dying) + + // Try to destroy an Alive one and a Dying one; check + // it destroys the Alive one and ignores the Dying one. + err = s.serviceApi.DestroyUnits(params.DestroyServiceUnits{ + UnitNames: []string{"wordpress/2", "wordpress/0"}, + }) + c.Assert(err, jc.ErrorIsNil) + assertLife(c, units[2], state.Dying) + + // Try to destroy an Alive one along with a nonexistent one; check that + // the valid instruction is followed but the invalid one is warned about. + err = s.serviceApi.DestroyUnits(params.DestroyServiceUnits{ + UnitNames: []string{"boojum/123", "wordpress/3"}, + }) + c.Assert(err, gc.ErrorMatches, `some units were not destroyed: unit "boojum/123" does not exist`) + assertLife(c, units[3], state.Dying) + + // Make one Dead, and destroy an Alive one alongside it; check no errors. + wp0, err := s.State.Unit("wordpress/0") + c.Assert(err, jc.ErrorIsNil) + err = wp0.EnsureDead() + c.Assert(err, jc.ErrorIsNil) + err = s.serviceApi.DestroyUnits(params.DestroyServiceUnits{ + UnitNames: []string{"wordpress/0", "wordpress/4"}, + }) + c.Assert(err, jc.ErrorIsNil) + assertLife(c, units[0], state.Dead) + assertLife(c, units[4], state.Dying) +} + +func (s *serviceSuite) setupDestroyPrincipalUnits(c *gc.C) []*state.Unit { + units := make([]*state.Unit, 5) + wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + for i := range units { + unit, err := wordpress.AddUnit() + c.Assert(err, jc.ErrorIsNil) + err = unit.SetAgentStatus(state.StatusIdle, "", nil) + c.Assert(err, jc.ErrorIsNil) + units[i] = unit + } + return units +} + +func (s *serviceSuite) assertBlockedErrorAndLiveliness( + c *gc.C, + err error, + msg string, + living1 state.Living, + living2 state.Living, + living3 state.Living, + living4 state.Living, +) { + s.AssertBlocked(c, err, msg) + assertLife(c, living1, state.Alive) + assertLife(c, living2, state.Alive) + assertLife(c, living3, state.Alive) + assertLife(c, living4, state.Alive) +} + +func (s *serviceSuite) TestBlockChangesDestroyPrincipalUnits(c *gc.C) { + units := s.setupDestroyPrincipalUnits(c) + s.BlockAllChanges(c, "TestBlockChangesDestroyPrincipalUnits") + err := s.serviceApi.DestroyUnits(params.DestroyServiceUnits{ + UnitNames: []string{"wordpress/0", "wordpress/1"}, + }) + s.assertBlockedErrorAndLiveliness(c, err, "TestBlockChangesDestroyPrincipalUnits", units[0], units[1], units[2], units[3]) +} + +func (s *serviceSuite) TestBlockRemoveDestroyPrincipalUnits(c *gc.C) { + units := s.setupDestroyPrincipalUnits(c) + s.BlockRemoveObject(c, "TestBlockRemoveDestroyPrincipalUnits") + err := s.serviceApi.DestroyUnits(params.DestroyServiceUnits{ + UnitNames: []string{"wordpress/0", "wordpress/1"}, + }) + s.assertBlockedErrorAndLiveliness(c, err, "TestBlockRemoveDestroyPrincipalUnits", units[0], units[1], units[2], units[3]) +} + +func (s *serviceSuite) TestBlockDestroyDestroyPrincipalUnits(c *gc.C) { + units := s.setupDestroyPrincipalUnits(c) + s.BlockDestroyModel(c, "TestBlockDestroyDestroyPrincipalUnits") + err := s.serviceApi.DestroyUnits(params.DestroyServiceUnits{ + UnitNames: []string{"wordpress/0", "wordpress/1"}, + }) + c.Assert(err, jc.ErrorIsNil) + assertLife(c, units[0], state.Dying) + assertLife(c, units[1], state.Dying) +} + +func (s *serviceSuite) assertDestroySubordinateUnits(c *gc.C, wordpress0, logging0 *state.Unit) { + // Try to destroy the principal and the subordinate together; check it warns + // about the subordinate, but destroys the one it can. (The principal unit + // agent will be responsible for destroying the subordinate.) + err := s.serviceApi.DestroyUnits(params.DestroyServiceUnits{ + UnitNames: []string{"wordpress/0", "logging/0"}, + }) + c.Assert(err, gc.ErrorMatches, `some units were not destroyed: unit "logging/0" is a subordinate`) + assertLife(c, wordpress0, state.Dying) + assertLife(c, logging0, state.Alive) +} + +func (s *serviceSuite) TestBlockRemoveDestroySubordinateUnits(c *gc.C) { + wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + wordpress0, err := wordpress.AddUnit() + c.Assert(err, jc.ErrorIsNil) + s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging")) + eps, err := s.State.InferEndpoints("logging", "wordpress") + c.Assert(err, jc.ErrorIsNil) + rel, err := s.State.AddRelation(eps...) + c.Assert(err, jc.ErrorIsNil) + ru, err := rel.Unit(wordpress0) + c.Assert(err, jc.ErrorIsNil) + err = ru.EnterScope(nil) + c.Assert(err, jc.ErrorIsNil) + logging0, err := s.State.Unit("logging/0") + c.Assert(err, jc.ErrorIsNil) + + s.BlockRemoveObject(c, "TestBlockRemoveDestroySubordinateUnits") + // Try to destroy the subordinate alone; check it fails. + err = s.serviceApi.DestroyUnits(params.DestroyServiceUnits{ + UnitNames: []string{"logging/0"}, + }) + s.AssertBlocked(c, err, "TestBlockRemoveDestroySubordinateUnits") + assertLife(c, rel, state.Alive) + assertLife(c, wordpress0, state.Alive) + assertLife(c, logging0, state.Alive) + + err = s.serviceApi.DestroyUnits(params.DestroyServiceUnits{ + UnitNames: []string{"wordpress/0", "logging/0"}, + }) + s.AssertBlocked(c, err, "TestBlockRemoveDestroySubordinateUnits") + assertLife(c, wordpress0, state.Alive) + assertLife(c, logging0, state.Alive) + assertLife(c, rel, state.Alive) +} + +func (s *serviceSuite) TestBlockChangesDestroySubordinateUnits(c *gc.C) { + wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + wordpress0, err := wordpress.AddUnit() + c.Assert(err, jc.ErrorIsNil) + s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging")) + eps, err := s.State.InferEndpoints("logging", "wordpress") + c.Assert(err, jc.ErrorIsNil) + rel, err := s.State.AddRelation(eps...) + c.Assert(err, jc.ErrorIsNil) + ru, err := rel.Unit(wordpress0) + c.Assert(err, jc.ErrorIsNil) + err = ru.EnterScope(nil) + c.Assert(err, jc.ErrorIsNil) + logging0, err := s.State.Unit("logging/0") + c.Assert(err, jc.ErrorIsNil) + + s.BlockAllChanges(c, "TestBlockChangesDestroySubordinateUnits") + // Try to destroy the subordinate alone; check it fails. + err = s.serviceApi.DestroyUnits(params.DestroyServiceUnits{ + UnitNames: []string{"logging/0"}, + }) + s.AssertBlocked(c, err, "TestBlockChangesDestroySubordinateUnits") + assertLife(c, rel, state.Alive) + assertLife(c, wordpress0, state.Alive) + assertLife(c, logging0, state.Alive) + + err = s.serviceApi.DestroyUnits(params.DestroyServiceUnits{ + UnitNames: []string{"wordpress/0", "logging/0"}, + }) + s.AssertBlocked(c, err, "TestBlockChangesDestroySubordinateUnits") + assertLife(c, wordpress0, state.Alive) + assertLife(c, logging0, state.Alive) + assertLife(c, rel, state.Alive) +} + +func (s *serviceSuite) TestBlockDestroyDestroySubordinateUnits(c *gc.C) { + wordpress := s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + wordpress0, err := wordpress.AddUnit() + c.Assert(err, jc.ErrorIsNil) + s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging")) + eps, err := s.State.InferEndpoints("logging", "wordpress") + c.Assert(err, jc.ErrorIsNil) + rel, err := s.State.AddRelation(eps...) + c.Assert(err, jc.ErrorIsNil) + ru, err := rel.Unit(wordpress0) + c.Assert(err, jc.ErrorIsNil) + err = ru.EnterScope(nil) + c.Assert(err, jc.ErrorIsNil) + logging0, err := s.State.Unit("logging/0") + c.Assert(err, jc.ErrorIsNil) + + s.BlockDestroyModel(c, "TestBlockDestroyDestroySubordinateUnits") + // Try to destroy the subordinate alone; check it fails. + err = s.serviceApi.DestroyUnits(params.DestroyServiceUnits{ + UnitNames: []string{"logging/0"}, + }) + c.Assert(err, gc.ErrorMatches, `no units were destroyed: unit "logging/0" is a subordinate`) + assertLife(c, logging0, state.Alive) + + s.assertDestroySubordinateUnits(c, wordpress0, logging0) +} + +func (s *serviceSuite) TestClientSetServiceConstraints(c *gc.C) { + service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + + // Update constraints for the service. + cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + c.Assert(err, jc.ErrorIsNil) + err = s.serviceApi.SetConstraints(params.SetConstraints{ServiceName: "dummy", Constraints: cons}) + c.Assert(err, jc.ErrorIsNil) + + // Ensure the constraints have been correctly updated. + obtained, err := service.Constraints() + c.Assert(err, jc.ErrorIsNil) + c.Assert(obtained, gc.DeepEquals, cons) +} + +func (s *serviceSuite) setupSetServiceConstraints(c *gc.C) (*state.Service, constraints.Value) { + service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + // Update constraints for the service. + cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + c.Assert(err, jc.ErrorIsNil) + return service, cons +} + +func (s *serviceSuite) assertSetServiceConstraints(c *gc.C, service *state.Service, cons constraints.Value) { + err := s.serviceApi.SetConstraints(params.SetConstraints{ServiceName: "dummy", Constraints: cons}) + c.Assert(err, jc.ErrorIsNil) + // Ensure the constraints have been correctly updated. + obtained, err := service.Constraints() + c.Assert(err, jc.ErrorIsNil) + c.Assert(obtained, gc.DeepEquals, cons) +} + +func (s *serviceSuite) assertSetServiceConstraintsBlocked(c *gc.C, msg string, service *state.Service, cons constraints.Value) { + err := s.serviceApi.SetConstraints(params.SetConstraints{ServiceName: "dummy", Constraints: cons}) + s.AssertBlocked(c, err, msg) +} + +func (s *serviceSuite) TestBlockDestroySetServiceConstraints(c *gc.C) { + svc, cons := s.setupSetServiceConstraints(c) + s.BlockDestroyModel(c, "TestBlockDestroySetServiceConstraints") + s.assertSetServiceConstraints(c, svc, cons) +} + +func (s *serviceSuite) TestBlockRemoveSetServiceConstraints(c *gc.C) { + svc, cons := s.setupSetServiceConstraints(c) + s.BlockRemoveObject(c, "TestBlockRemoveSetServiceConstraints") + s.assertSetServiceConstraints(c, svc, cons) +} + +func (s *serviceSuite) TestBlockChangesSetServiceConstraints(c *gc.C) { + svc, cons := s.setupSetServiceConstraints(c) + s.BlockAllChanges(c, "TestBlockChangesSetServiceConstraints") + s.assertSetServiceConstraintsBlocked(c, "TestBlockChangesSetServiceConstraints", svc, cons) +} + +func (s *serviceSuite) TestClientGetServiceConstraints(c *gc.C) { + service := s.AddTestingService(c, "dummy", s.AddTestingCharm(c, "dummy")) + + // Set constraints for the service. + cons, err := constraints.Parse("mem=4096", "cpu-cores=2") + c.Assert(err, jc.ErrorIsNil) + err = service.SetConstraints(cons) + c.Assert(err, jc.ErrorIsNil) + + // Check we can get the constraints. + result, err := s.serviceApi.GetConstraints(params.GetServiceConstraints{"dummy"}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Constraints, gc.DeepEquals, cons) +} + +func (s *serviceSuite) checkEndpoints(c *gc.C, endpoints map[string]charm.Relation) { + c.Assert(endpoints["wordpress"], gc.DeepEquals, charm.Relation{ + Name: "db", + Role: charm.RelationRole("requirer"), + Interface: "mysql", + Optional: false, + Limit: 1, + Scope: charm.RelationScope("global"), + }) + c.Assert(endpoints["mysql"], gc.DeepEquals, charm.Relation{ + Name: "server", + Role: charm.RelationRole("provider"), + Interface: "mysql", + Optional: false, + Limit: 0, + Scope: charm.RelationScope("global"), + }) +} + +func (s *serviceSuite) setupRelationScenario(c *gc.C) { + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging")) + eps, err := s.State.InferEndpoints("logging", "wordpress") + c.Assert(err, jc.ErrorIsNil) + _, err = s.State.AddRelation(eps...) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *serviceSuite) assertAddRelation(c *gc.C, endpoints []string) { + s.setupRelationScenario(c) + res, err := s.serviceApi.AddRelation(params.AddRelation{Endpoints: endpoints}) + c.Assert(err, jc.ErrorIsNil) + s.checkEndpoints(c, res.Endpoints) + // Show that the relation was added. + wpSvc, err := s.State.Service("wordpress") + c.Assert(err, jc.ErrorIsNil) + rels, err := wpSvc.Relations() + // There are 2 relations - the logging-wordpress one set up in the + // scenario and the one created in this test. + c.Assert(len(rels), gc.Equals, 2) + mySvc, err := s.State.Service("mysql") + c.Assert(err, jc.ErrorIsNil) + rels, err = mySvc.Relations() + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(rels), gc.Equals, 1) +} + +func (s *serviceSuite) TestSuccessfullyAddRelation(c *gc.C) { + endpoints := []string{"wordpress", "mysql"} + s.assertAddRelation(c, endpoints) +} + +func (s *serviceSuite) TestBlockDestroyAddRelation(c *gc.C) { + s.BlockDestroyModel(c, "TestBlockDestroyAddRelation") + s.assertAddRelation(c, []string{"wordpress", "mysql"}) +} +func (s *serviceSuite) TestBlockRemoveAddRelation(c *gc.C) { + s.BlockRemoveObject(c, "TestBlockRemoveAddRelation") + s.assertAddRelation(c, []string{"wordpress", "mysql"}) +} + +func (s *serviceSuite) TestBlockChangesAddRelation(c *gc.C) { + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + s.BlockAllChanges(c, "TestBlockChangesAddRelation") + _, err := s.serviceApi.AddRelation(params.AddRelation{Endpoints: []string{"wordpress", "mysql"}}) + s.AssertBlocked(c, err, "TestBlockChangesAddRelation") +} + +func (s *serviceSuite) TestSuccessfullyAddRelationSwapped(c *gc.C) { + // Show that the order of the services listed in the AddRelation call + // does not matter. This is a repeat of the previous test with the service + // names swapped. + endpoints := []string{"mysql", "wordpress"} + s.assertAddRelation(c, endpoints) +} + +func (s *serviceSuite) TestCallWithOnlyOneEndpoint(c *gc.C) { + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + endpoints := []string{"wordpress"} + _, err := s.serviceApi.AddRelation(params.AddRelation{Endpoints: endpoints}) + c.Assert(err, gc.ErrorMatches, "no relations found") +} + +func (s *serviceSuite) TestCallWithOneEndpointTooMany(c *gc.C) { + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging")) + endpoints := []string{"wordpress", "mysql", "logging"} + _, err := s.serviceApi.AddRelation(params.AddRelation{Endpoints: endpoints}) + c.Assert(err, gc.ErrorMatches, "cannot relate 3 endpoints") +} + +func (s *serviceSuite) TestAddAlreadyAddedRelation(c *gc.C) { + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + // Add a relation between wordpress and mysql. + endpoints := []string{"wordpress", "mysql"} + eps, err := s.State.InferEndpoints(endpoints...) + c.Assert(err, jc.ErrorIsNil) + _, err = s.State.AddRelation(eps...) + c.Assert(err, jc.ErrorIsNil) + // And try to add it again. + _, err = s.serviceApi.AddRelation(params.AddRelation{Endpoints: endpoints}) + c.Assert(err, gc.ErrorMatches, `cannot add relation "wordpress:db mysql:server": relation already exists`) +} + +func (s *serviceSuite) setupDestroyRelationScenario(c *gc.C, endpoints []string) *state.Relation { + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + // Add a relation between the endpoints. + eps, err := s.State.InferEndpoints(endpoints...) + c.Assert(err, jc.ErrorIsNil) + relation, err := s.State.AddRelation(eps...) + c.Assert(err, jc.ErrorIsNil) + return relation +} + +func (s *serviceSuite) assertDestroyRelation(c *gc.C, endpoints []string) { + s.assertDestroyRelationSuccess( + c, + s.setupDestroyRelationScenario(c, endpoints), + endpoints) +} + +func (s *serviceSuite) assertDestroyRelationSuccess(c *gc.C, relation *state.Relation, endpoints []string) { + err := s.serviceApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + c.Assert(err, jc.ErrorIsNil) + // Show that the relation was removed. + c.Assert(relation.Refresh(), jc.Satisfies, errors.IsNotFound) +} + +func (s *serviceSuite) TestSuccessfulDestroyRelation(c *gc.C) { + endpoints := []string{"wordpress", "mysql"} + s.assertDestroyRelation(c, endpoints) +} + +func (s *serviceSuite) TestSuccessfullyDestroyRelationSwapped(c *gc.C) { + // Show that the order of the services listed in the DestroyRelation call + // does not matter. This is a repeat of the previous test with the service + // names swapped. + endpoints := []string{"mysql", "wordpress"} + s.assertDestroyRelation(c, endpoints) +} + +func (s *serviceSuite) TestNoRelation(c *gc.C) { + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + endpoints := []string{"wordpress", "mysql"} + err := s.serviceApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + c.Assert(err, gc.ErrorMatches, `relation "wordpress:db mysql:server" not found`) +} + +func (s *serviceSuite) TestAttemptDestroyingNonExistentRelation(c *gc.C) { + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + s.AddTestingService(c, "riak", s.AddTestingCharm(c, "riak")) + endpoints := []string{"riak", "wordpress"} + err := s.serviceApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + c.Assert(err, gc.ErrorMatches, "no relations found") +} + +func (s *serviceSuite) TestAttemptDestroyingWithOnlyOneEndpoint(c *gc.C) { + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + endpoints := []string{"wordpress"} + err := s.serviceApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + c.Assert(err, gc.ErrorMatches, "no relations found") +} + +func (s *serviceSuite) TestAttemptDestroyingPeerRelation(c *gc.C) { + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + s.AddTestingService(c, "riak", s.AddTestingCharm(c, "riak")) + + endpoints := []string{"riak:ring"} + err := s.serviceApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + c.Assert(err, gc.ErrorMatches, `cannot destroy relation "riak:ring": is a peer relation`) +} + +func (s *serviceSuite) TestAttemptDestroyingAlreadyDestroyedRelation(c *gc.C) { + s.AddTestingService(c, "wordpress", s.AddTestingCharm(c, "wordpress")) + + // Add a relation between wordpress and mysql. + eps, err := s.State.InferEndpoints("wordpress", "mysql") + c.Assert(err, jc.ErrorIsNil) + rel, err := s.State.AddRelation(eps...) + c.Assert(err, jc.ErrorIsNil) + + endpoints := []string{"wordpress", "mysql"} + err = s.serviceApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + // Show that the relation was removed. + c.Assert(rel.Refresh(), jc.Satisfies, errors.IsNotFound) + + // And try to destroy it again. + err = s.serviceApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + c.Assert(err, gc.ErrorMatches, `relation "wordpress:db mysql:server" not found`) +} + +func (s *serviceSuite) TestBlockRemoveDestroyRelation(c *gc.C) { + endpoints := []string{"wordpress", "mysql"} + relation := s.setupDestroyRelationScenario(c, endpoints) + // block remove-objects + s.BlockRemoveObject(c, "TestBlockRemoveDestroyRelation") + err := s.serviceApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + s.AssertBlocked(c, err, "TestBlockRemoveDestroyRelation") + assertLife(c, relation, state.Alive) +} + +func (s *serviceSuite) TestBlockChangeDestroyRelation(c *gc.C) { + endpoints := []string{"wordpress", "mysql"} + relation := s.setupDestroyRelationScenario(c, endpoints) + s.BlockAllChanges(c, "TestBlockChangeDestroyRelation") + err := s.serviceApi.DestroyRelation(params.DestroyRelation{Endpoints: endpoints}) + s.AssertBlocked(c, err, "TestBlockChangeDestroyRelation") + assertLife(c, relation, state.Alive) +} + +func (s *serviceSuite) TestBlockDestroyDestroyRelation(c *gc.C) { + s.BlockDestroyModel(c, "TestBlockDestroyDestroyRelation") + endpoints := []string{"wordpress", "mysql"} + s.assertDestroyRelation(c, endpoints) +} + type mockStorageProvider struct { storage.Provider kind storage.StorageKind === added directory 'src/github.com/juju/juju/apiserver/singular' === added file 'src/github.com/juju/juju/apiserver/singular/fixture_test.go' --- src/github.com/juju/juju/apiserver/singular/fixture_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/singular/fixture_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,58 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package singular_test + +import ( + "time" + + "github.com/juju/names" + "github.com/juju/testing" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/core/lease" + coretesting "github.com/juju/juju/testing" +) + +// mockAuth represents a machine which may or may not be an environ manager. +type mockAuth struct { + common.Authorizer + nonManager bool +} + +// AuthModelManager is part of the common.Authorizer interface. +func (mock mockAuth) AuthModelManager() bool { + return !mock.nonManager +} + +// GetAuthTag is part of the common.Authorizer interface. +func (mockAuth) GetAuthTag() names.Tag { + return names.NewMachineTag("123") +} + +// mockBackend implements singular.Backend and lease.Claimer. +type mockBackend struct { + stub testing.Stub +} + +// ModelTag is part of the singular.Backend interface. +func (mock *mockBackend) ModelTag() names.ModelTag { + return coretesting.ModelTag +} + +// SingularClaimer is part of the singular.Backend interface. +func (mock *mockBackend) SingularClaimer() lease.Claimer { + return mock +} + +// Claim is part of the lease.Claimer interface. +func (mock *mockBackend) Claim(lease, holder string, duration time.Duration) error { + mock.stub.AddCall("Claim", lease, holder, duration) + return mock.stub.NextErr() +} + +// WaitUntilExpired is part of the lease.Claimer interface. +func (mock *mockBackend) WaitUntilExpired(lease string) error { + mock.stub.AddCall("WaitUntilExpired", lease) + return mock.stub.NextErr() +} === added file 'src/github.com/juju/juju/apiserver/singular/package_test.go' --- src/github.com/juju/juju/apiserver/singular/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/singular/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package singular_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/apiserver/singular/singular.go' --- src/github.com/juju/juju/apiserver/singular/singular.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/singular/singular.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,105 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package singular + +import ( + "time" + + "github.com/juju/names" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/core/lease" + "github.com/juju/juju/state" +) + +func init() { + common.RegisterStandardFacade( + "Singular", 1, + func(st *state.State, _ *common.Resources, auth common.Authorizer) (*Facade, error) { + return NewFacade(st, auth) + }, + ) +} + +// Backend supplies capabilities required by a Facade. +type Backend interface { + + // ModelTag tells the Facade what models it should consider requests for. + ModelTag() names.ModelTag + + // SingularClaimer allows the Facade to make claims. + SingularClaimer() lease.Claimer +} + +// NewFacade returns a singular-controller API facade, backed by the supplied +// state, so long as the authorizer represents a controller machine. +func NewFacade(backend Backend, auth common.Authorizer) (*Facade, error) { + if !auth.AuthModelManager() { + return nil, common.ErrPerm + } + return &Facade{ + auth: auth, + model: backend.ModelTag(), + claimer: backend.SingularClaimer(), + }, nil +} + +// Facade allows controller machines to request exclusive rights to administer +// some specific model for a limited time. +type Facade struct { + auth common.Authorizer + model names.ModelTag + claimer lease.Claimer +} + +// Wait waits for the singular-controller lease to expire for all supplied +// entities. (In practice, any requests that do not refer to the connection's +// model will be rejected.) +func (facade *Facade) Wait(args params.Entities) (result params.ErrorResults) { + result.Results = make([]params.ErrorResult, len(args.Entities)) + for i, entity := range args.Entities { + var err error + switch { + case entity.Tag != facade.model.String(): + err = common.ErrPerm + default: + err = facade.claimer.WaitUntilExpired(facade.model.Id()) + } + result.Results[i].Error = common.ServerError(err) + } + return result +} + +// Claim makes the supplied singular-controller lease requests. (In practice, +// any requests not for the connection's model, or not on behalf of the +// connected EnvironManager machine, will be rejected.) +func (facade *Facade) Claim(args params.SingularClaims) (result params.ErrorResults) { + result.Results = make([]params.ErrorResult, len(args.Claims)) + for i, claim := range args.Claims { + var err error + switch { + case claim.ModelTag != facade.model.String(): + err = common.ErrPerm + case claim.ControllerTag != facade.auth.GetAuthTag().String(): + err = common.ErrPerm + case !allowedDuration(claim.Duration): + err = common.ErrPerm + default: + err = facade.claimer.Claim(facade.model.Id(), claim.ControllerTag, claim.Duration) + } + result.Results[i].Error = common.ServerError(err) + } + return result +} + +// allowedDuration returns true if the supplied duration is at least one second, +// and no more than one minute. (We expect to refine the lease-length times, but +// these seem like reasonable bounds.) +func allowedDuration(duration time.Duration) bool { + if duration < time.Second { + return false + } + return duration <= time.Minute +} === added file 'src/github.com/juju/juju/apiserver/singular/singular_test.go' --- src/github.com/juju/juju/apiserver/singular/singular_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/singular/singular_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,176 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package singular_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/apiserver/singular" + "github.com/juju/juju/core/lease" + coretesting "github.com/juju/juju/testing" +) + +type SingularSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&SingularSuite{}) + +func (s *SingularSuite) TestRequiresEnvironManager(c *gc.C) { + auth := mockAuth{nonManager: true} + facade, err := singular.NewFacade(nil, auth) + c.Check(facade, gc.IsNil) + c.Check(err, gc.Equals, common.ErrPerm) +} + +func (s *SingularSuite) TestAcceptsEnvironManager(c *gc.C) { + backend := &mockBackend{} + facade, err := singular.NewFacade(backend, mockAuth{}) + c.Check(facade, gc.NotNil) + c.Check(err, jc.ErrorIsNil) + + backend.stub.CheckCallNames(c) +} + +func (s *SingularSuite) TestInvalidClaims(c *gc.C) { + breakers := []func(claim *params.SingularClaim){ + func(claim *params.SingularClaim) { claim.ModelTag = "" }, + func(claim *params.SingularClaim) { claim.ModelTag = "machine-123" }, + func(claim *params.SingularClaim) { claim.ModelTag = "environ-blargle" }, + func(claim *params.SingularClaim) { claim.ControllerTag = "" }, + func(claim *params.SingularClaim) { claim.ControllerTag = "machine-456" }, + func(claim *params.SingularClaim) { claim.ControllerTag = coretesting.ModelTag.String() }, + func(claim *params.SingularClaim) { claim.Duration = time.Second - time.Millisecond }, + func(claim *params.SingularClaim) { claim.Duration = time.Minute + time.Millisecond }, + } + count := len(breakers) + + var claims params.SingularClaims + claims.Claims = make([]params.SingularClaim, count) + for i, breaker := range breakers { + claim := params.SingularClaim{ + ModelTag: coretesting.ModelTag.String(), + ControllerTag: "machine-123", + Duration: time.Minute, + } + breaker(&claim) + claims.Claims[i] = claim + } + + backend := &mockBackend{} + facade, err := singular.NewFacade(backend, mockAuth{}) + c.Assert(err, jc.ErrorIsNil) + result := facade.Claim(claims) + c.Assert(result.Results, gc.HasLen, count) + + for i, result := range result.Results { + c.Logf("checking claim %d", i) + checkDenied(c, result) + } + backend.stub.CheckCallNames(c) +} + +func (s *SingularSuite) TestValidClaims(c *gc.C) { + durations := []time.Duration{ + time.Second, + 10 * time.Second, + 30 * time.Second, + time.Minute, + } + errors := []error{ + nil, + errors.New("pow!"), + lease.ErrClaimDenied, + nil, + } + count := len(durations) + if len(errors) != count { + c.Fatalf("please fix your test data") + } + + var claims params.SingularClaims + claims.Claims = make([]params.SingularClaim, count) + expectCalls := []testing.StubCall{} + for i, duration := range durations { + claims.Claims[i] = params.SingularClaim{ + ModelTag: coretesting.ModelTag.String(), + ControllerTag: "machine-123", + Duration: duration, + } + expectCalls = append(expectCalls, testing.StubCall{ + FuncName: "Claim", + Args: []interface{}{ + coretesting.ModelTag.Id(), + "machine-123", + durations[i], + }, + }) + } + + backend := &mockBackend{} + backend.stub.SetErrors(errors...) + facade, err := singular.NewFacade(backend, mockAuth{}) + c.Assert(err, jc.ErrorIsNil) + result := facade.Claim(claims) + c.Assert(result.Results, gc.HasLen, count) + + for i, err := range result.Results { + switch errors[i] { + case nil: + c.Check(err.Error, gc.IsNil) + case lease.ErrClaimDenied: + c.Check(err.Error, jc.Satisfies, params.IsCodeLeaseClaimDenied) + default: + c.Check(err.Error.Error(), gc.Equals, errors[i].Error()) + } + } + backend.stub.CheckCalls(c, expectCalls) +} + +func (s *SingularSuite) TestWait(c *gc.C) { + waits := params.Entities{ + Entities: []params.Entity{{ + "machine-123", // rejected + }, { + "grarble floop", // rejected + }, { + coretesting.ModelTag.String(), // stub-error + }, { + coretesting.ModelTag.String(), // success + }}, + } + count := len(waits.Entities) + + backend := &mockBackend{} + backend.stub.SetErrors(errors.New("zap!"), nil) + facade, err := singular.NewFacade(backend, mockAuth{}) + c.Assert(err, jc.ErrorIsNil) + result := facade.Wait(waits) + c.Assert(result.Results, gc.HasLen, count) + + checkDenied(c, result.Results[0]) + checkDenied(c, result.Results[1]) + c.Check(result.Results[2].Error, gc.ErrorMatches, "zap!") + c.Check(result.Results[3].Error, gc.IsNil) + + backend.stub.CheckCalls(c, []testing.StubCall{{ + FuncName: "WaitUntilExpired", + Args: []interface{}{coretesting.ModelTag.Id()}, + }, { + FuncName: "WaitUntilExpired", + Args: []interface{}{coretesting.ModelTag.Id()}, + }}) +} + +func checkDenied(c *gc.C, result params.ErrorResult) { + c.Check(result.Error, gc.ErrorMatches, "permission denied") + c.Check(result.Error, jc.Satisfies, params.IsCodeUnauthorized) +} === removed file 'src/github.com/juju/juju/apiserver/spaces/shims.go' --- src/github.com/juju/juju/apiserver/spaces/shims.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/spaces/shims.go 1970-01-01 00:00:00 +0000 @@ -1,106 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package spaces - -import ( - "github.com/juju/errors" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/state" -) - -// NOTE: All of the following code is only tested with a feature test. - -// subnetShim forwards and adapts state.Subnets methods to -// common.BackingSubnet. -type subnetShim struct { - common.BackingSubnet - subnet *state.Subnet -} - -func (s *subnetShim) CIDR() string { - return s.subnet.CIDR() -} - -func (s *subnetShim) VLANTag() int { - return s.subnet.VLANTag() -} - -func (s *subnetShim) ProviderId() string { - return s.subnet.ProviderId() -} - -func (s *subnetShim) AvailabilityZones() []string { - // TODO(dimitern): Add multiple zones to state.Subnet. - return []string{s.subnet.AvailabilityZone()} -} - -func (s *subnetShim) Life() params.Life { - return params.Life(s.subnet.Life().String()) -} - -func (s *subnetShim) Status() string { - // TODO(dimitern): This should happen in a cleaner way. - if s.Life() != params.Alive { - return "terminating" - } - return "in-use" -} - -func (s *subnetShim) SpaceName() string { - return s.subnet.SpaceName() -} - -// spaceShim forwards and adapts state.Space methods to BackingSpace. -type spaceShim struct { - common.BackingSpace - space *state.Space -} - -func (s *spaceShim) Name() string { - return s.space.Name() -} - -func (s *spaceShim) Subnets() ([]common.BackingSubnet, error) { - results, err := s.space.Subnets() - if err != nil { - return nil, errors.Trace(err) - } - subnets := make([]common.BackingSubnet, len(results)) - for i, result := range results { - subnets[i] = &subnetShim{subnet: result} - } - return subnets, nil -} - -// stateShim forwards and adapts state.State methods to Backing -// method. -type stateShim struct { - Backing - st *state.State -} - -func (s *stateShim) EnvironConfig() (*config.Config, error) { - return s.st.EnvironConfig() -} - -func (s *stateShim) AddSpace(name string, subnetIds []string, public bool) error { - _, err := s.st.AddSpace(name, subnetIds, public) - return err -} - -func (s *stateShim) AllSpaces() ([]common.BackingSpace, error) { - // TODO(dimitern): Make this ListSpaces() instead. - results, err := s.st.AllSpaces() - if err != nil { - return nil, errors.Trace(err) - } - spaces := make([]common.BackingSpace, len(results)) - for i, result := range results { - spaces[i] = &spaceShim{space: result} - } - return spaces, nil -} === modified file 'src/github.com/juju/juju/apiserver/spaces/spaces.go' --- src/github.com/juju/juju/apiserver/spaces/spaces.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/spaces/spaces.go 2016-03-22 15:18:22 +0000 @@ -6,19 +6,17 @@ import ( "github.com/juju/errors" "github.com/juju/loggo" - "github.com/juju/names" "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/common/networkingcommon" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/environs" - "github.com/juju/juju/environs/config" "github.com/juju/juju/state" ) var logger = loggo.GetLogger("juju.apiserver.spaces") func init() { - common.RegisterStandardFacade("Spaces", 1, NewAPI) + common.RegisterStandardFacade("Spaces", 2, NewAPI) } // API defines the methods the Spaces API facade implements. @@ -27,22 +25,9 @@ ListSpaces() (params.ListSpacesResults, error) } -// Backing defines the state methods this facede needs, so they can be -// mocked for testing. -type Backing interface { - // EnvironConfig returns the configuration of the environment. - EnvironConfig() (*config.Config, error) - - // AddSpace creates a space. - AddSpace(name string, subnetIds []string, public bool) error - - // AllSpaces returns all known Juju network spaces. - AllSpaces() ([]common.BackingSpace, error) -} - // spacesAPI implements the API interface. type spacesAPI struct { - backing Backing + backing networkingcommon.NetworkBacking resources *common.Resources authorizer common.Authorizer } @@ -50,12 +35,12 @@ // NewAPI creates a new Space API server-side facade with a // state.State backing. func NewAPI(st *state.State, res *common.Resources, auth common.Authorizer) (API, error) { - return newAPIWithBacking(&stateShim{st: st}, res, auth) + return newAPIWithBacking(networkingcommon.NewStateShim(st), res, auth) } // newAPIWithBacking creates a new server-side Spaces API facade with // the given Backing. -func newAPIWithBacking(backing Backing, resources *common.Resources, authorizer common.Authorizer) (API, error) { +func newAPIWithBacking(backing networkingcommon.NetworkBacking, resources *common.Resources, authorizer common.Authorizer) (API, error) { // Only clients can access the Spaces facade. if !authorizer.AuthClient() { return nil, common.ErrPerm @@ -70,74 +55,12 @@ // CreateSpaces creates a new Juju network space, associating the // specified subnets with it (optional; can be empty). func (api *spacesAPI) CreateSpaces(args params.CreateSpacesParams) (results params.ErrorResults, err error) { - err = api.supportsSpaces() - if err != nil { - return results, common.ServerError(errors.Trace(err)) - } - - results.Results = make([]params.ErrorResult, len(args.Spaces)) - - for i, space := range args.Spaces { - err := api.createOneSpace(space) - if err == nil { - continue - } - results.Results[i].Error = common.ServerError(errors.Trace(err)) - } - - return results, nil -} - -func (api *spacesAPI) createOneSpace(args params.CreateSpaceParams) error { - // Validate the args, assemble information for api.backing.AddSpaces - var subnets []string - - spaceTag, err := names.ParseSpaceTag(args.SpaceTag) - if err != nil { - return errors.Trace(err) - } - - for _, tag := range args.SubnetTags { - subnetTag, err := names.ParseSubnetTag(tag) - if err != nil { - return errors.Trace(err) - } - subnets = append(subnets, subnetTag.Id()) - } - - // Add the validated space - err = api.backing.AddSpace(spaceTag.Id(), subnets, args.Public) - if err != nil { - return errors.Trace(err) - } - return nil -} - -func backingSubnetToParamsSubnet(subnet common.BackingSubnet) params.Subnet { - cidr := subnet.CIDR() - vlantag := subnet.VLANTag() - providerid := subnet.ProviderId() - zones := subnet.AvailabilityZones() - status := subnet.Status() - var spaceTag names.SpaceTag - if subnet.SpaceName() != "" { - spaceTag = names.NewSpaceTag(subnet.SpaceName()) - } - - return params.Subnet{ - CIDR: cidr, - VLANTag: vlantag, - ProviderId: providerid, - Zones: zones, - Status: status, - SpaceTag: spaceTag.String(), - Life: subnet.Life(), - } + return networkingcommon.CreateSpaces(api.backing, args) } // ListSpaces lists all the available spaces and their associated subnets. func (api *spacesAPI) ListSpaces() (results params.ListSpacesResults, err error) { - err = api.supportsSpaces() + err = networkingcommon.SupportsSpaces(api.backing) if err != nil { return results, common.ServerError(errors.Trace(err)) } @@ -162,31 +85,9 @@ result.Subnets = make([]params.Subnet, len(subnets)) for i, subnet := range subnets { - result.Subnets[i] = backingSubnetToParamsSubnet(subnet) + result.Subnets[i] = networkingcommon.BackingSubnetToParamsSubnet(subnet) } results.Results[i] = result } return results, nil } - -// supportsSpaces checks if the environment implements NetworkingEnviron -// and also if it supports spaces. -func (api *spacesAPI) supportsSpaces() error { - config, err := api.backing.EnvironConfig() - if err != nil { - return errors.Annotate(err, "getting environment config") - } - env, err := environs.New(config) - if err != nil { - return errors.Annotate(err, "validating environment config") - } - netEnv, ok := environs.SupportsNetworking(env) - if !ok { - return errors.NotSupportedf("networking") - } - ok, err = netEnv.SupportsSpaces() - if err != nil { - logger.Warningf("environment does not support spaces: %v", err) - } - return err -} === modified file 'src/github.com/juju/juju/apiserver/spaces/spaces_test.go' --- src/github.com/juju/juju/apiserver/spaces/spaces_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/spaces/spaces_test.go 2016-03-22 15:18:22 +0000 @@ -15,6 +15,7 @@ "github.com/juju/juju/apiserver/params" "github.com/juju/juju/apiserver/spaces" apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/network" coretesting "github.com/juju/juju/testing" ) @@ -119,11 +120,13 @@ } baseCalls := []apiservertesting.StubMethodCall{ - apiservertesting.BackingCall("EnvironConfig"), + apiservertesting.BackingCall("ModelConfig"), apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), apiservertesting.ZonedNetworkingEnvironCall("SupportsSpaces"), } - addSpaceCalls := append(baseCalls, apiservertesting.BackingCall("AddSpace", p.Name, p.Subnets, p.Public)) + + // AddSpace from the api always uses an empty ProviderId. + addSpaceCalls := append(baseCalls, apiservertesting.BackingCall("AddSpace", p.Name, network.Id(""), p.Subnets, p.Public)) if p.Error == "" || p.MakesCall { apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, addSpaceCalls...) @@ -159,7 +162,7 @@ func (s *SpacesSuite) TestAddSpacesAPIError(c *gc.C) { apiservertesting.SharedStub.SetErrors( - nil, // Backing.EnvironConfig() + nil, // Backing.ModelConfig() nil, // Provider.Open() nil, // ZonedNetworkingEnviron.SupportsSpaces() errors.AlreadyExistsf("space-foo"), // Backing.AddSpace() @@ -260,12 +263,12 @@ boom := errors.New("backing boom") apiservertesting.BackingInstance.SetErrors(boom) _, err := s.facade.ListSpaces() - c.Assert(err, gc.ErrorMatches, "getting environment config: backing boom") + c.Assert(err, gc.ErrorMatches, "getting model config: backing boom") } func (s *SpacesSuite) TestListSpacesSubnetsError(c *gc.C) { apiservertesting.SharedStub.SetErrors( - nil, // Backing.EnvironConfig() + nil, // Backing.ModelConfig() nil, // Provider.Open() nil, // ZonedNetworkingEnviron.SupportsSpaces() nil, // Backing.AllSpaces() @@ -285,7 +288,7 @@ func (s *SpacesSuite) TestListSpacesSubnetsSingleSubnetError(c *gc.C) { boom := errors.New("boom") apiservertesting.SharedStub.SetErrors( - nil, // Backing.EnvironConfig() + nil, // Backing.ModelConfig() nil, // Provider.Open() nil, // ZonedNetworkingEnviron.SupportsSpaces() nil, // Backing.AllSpaces() @@ -304,30 +307,30 @@ c.Assert(err, jc.ErrorIsNil) } -func (s *SpacesSuite) TestCreateSpacesEnvironConfigError(c *gc.C) { +func (s *SpacesSuite) TestCreateSpacesModelConfigError(c *gc.C) { apiservertesting.SharedStub.SetErrors( - errors.New("boom"), // Backing.EnvironConfig() + errors.New("boom"), // Backing.ModelConfig() ) spaces := params.CreateSpacesParams{} _, err := s.facade.CreateSpaces(spaces) - c.Assert(err, gc.ErrorMatches, "getting environment config: boom") + c.Assert(err, gc.ErrorMatches, "getting model config: boom") } func (s *SpacesSuite) TestCreateSpacesProviderOpenError(c *gc.C) { apiservertesting.SharedStub.SetErrors( - nil, // Backing.EnvironConfig() + nil, // Backing.ModelConfig() errors.New("boom"), // Provider.Open() ) spaces := params.CreateSpacesParams{} _, err := s.facade.CreateSpaces(spaces) - c.Assert(err, gc.ErrorMatches, "validating environment config: boom") + c.Assert(err, gc.ErrorMatches, "validating model config: boom") } func (s *SpacesSuite) TestCreateSpacesNotSupportedError(c *gc.C) { apiservertesting.SharedStub.SetErrors( - nil, // Backing.EnvironConfig() + nil, // Backing.ModelConfig() nil, // Provider.Open() errors.NotSupportedf("spaces"), // ZonedNetworkingEnviron.SupportsSpaces() ) @@ -339,7 +342,7 @@ func (s *SpacesSuite) TestListSpacesNotSupportedError(c *gc.C) { apiservertesting.SharedStub.SetErrors( - nil, // Backing.EnvironConfig() + nil, // Backing.ModelConfig() nil, // Provider.Open errors.NotSupportedf("spaces"), // ZonedNetworkingEnviron.SupportsSpaces() ) === added directory 'src/github.com/juju/juju/apiserver/statushistory' === added file 'src/github.com/juju/juju/apiserver/statushistory/pruner.go' --- src/github.com/juju/juju/apiserver/statushistory/pruner.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/statushistory/pruner.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,41 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package statushistory + +import ( + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" + + "github.com/juju/loggo" +) + +func init() { + common.RegisterStandardFacade("StatusHistory", 2, NewAPI) +} + +var logger = loggo.GetLogger("juju.apiserver.statushistory") + +// API is the concrete implementation of the Pruner endpoint.. +type API struct { + st *state.State + authorizer common.Authorizer +} + +// NewAPI returns an API Instance. +func NewAPI(st *state.State, _ *common.Resources, auth common.Authorizer) (*API, error) { + return &API{ + st: st, + authorizer: auth, + }, nil +} + +// Prune endpoint removes status history entries until +// only the N newest records per unit remain. +func (api *API) Prune(p params.StatusHistoryPruneArgs) error { + if !api.authorizer.AuthModelManager() { + return common.ErrPerm + } + return state.PruneStatusHistory(api.st, p.MaxLogsPerEntity) +} === added file 'src/github.com/juju/juju/apiserver/storage/base_test.go' --- src/github.com/juju/juju/apiserver/storage/base_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/storage/base_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,284 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storage_test + +import ( + "github.com/juju/errors" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/apiserver/storage" + "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/state" + jujustorage "github.com/juju/juju/storage" + coretesting "github.com/juju/juju/testing" +) + +type baseStorageSuite struct { + coretesting.BaseSuite + + resources *common.Resources + authorizer testing.FakeAuthorizer + + api *storage.API + state *mockState + + storageTag names.StorageTag + storageInstance *mockStorageInstance + unitTag names.UnitTag + machineTag names.MachineTag + + volumeTag names.VolumeTag + volume *mockVolume + volumeAttachment *mockVolumeAttachment + filesystemTag names.FilesystemTag + filesystem *mockFilesystem + filesystemAttachment *mockFilesystemAttachment + calls []string + + poolManager *mockPoolManager + pools map[string]*jujustorage.Config + + blocks map[state.BlockType]state.Block +} + +func (s *baseStorageSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.resources = common.NewResources() + s.authorizer = testing.FakeAuthorizer{names.NewUserTag("testuser"), true} + s.calls = []string{} + s.state = s.constructState() + + s.pools = make(map[string]*jujustorage.Config) + s.poolManager = s.constructPoolManager() + + var err error + s.api, err = storage.CreateAPI(s.state, s.poolManager, s.resources, s.authorizer) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *baseStorageSuite) assertCalls(c *gc.C, expectedCalls []string) { + c.Assert(s.calls, jc.SameContents, expectedCalls) +} + +const ( + allStorageInstancesCall = "allStorageInstances" + storageInstanceAttachmentsCall = "storageInstanceAttachments" + unitAssignedMachineCall = "UnitAssignedMachine" + storageInstanceCall = "StorageInstance" + storageInstanceFilesystemCall = "StorageInstanceFilesystem" + storageInstanceFilesystemAttachmentCall = "storageInstanceFilesystemAttachment" + storageInstanceVolumeCall = "storageInstanceVolume" + volumeCall = "volumeCall" + machineVolumeAttachmentsCall = "machineVolumeAttachments" + volumeAttachmentsCall = "volumeAttachments" + allVolumesCall = "allVolumes" + filesystemCall = "filesystemCall" + machineFilesystemAttachmentsCall = "machineFilesystemAttachments" + filesystemAttachmentsCall = "filesystemAttachments" + allFilesystemsCall = "allFilesystems" + addStorageForUnitCall = "addStorageForUnit" + getBlockForTypeCall = "getBlockForType" + volumeAttachmentCall = "volumeAttachment" +) + +func (s *baseStorageSuite) constructState() *mockState { + s.unitTag = names.NewUnitTag("mysql/0") + s.storageTag = names.NewStorageTag("data/0") + + s.storageInstance = &mockStorageInstance{ + kind: state.StorageKindFilesystem, + owner: s.unitTag, + storageTag: s.storageTag, + } + + storageInstanceAttachment := &mockStorageAttachment{storage: s.storageInstance} + + s.machineTag = names.NewMachineTag("66") + s.filesystemTag = names.NewFilesystemTag("104") + s.volumeTag = names.NewVolumeTag("22") + s.filesystem = &mockFilesystem{ + tag: s.filesystemTag, + storage: &s.storageTag, + } + s.filesystemAttachment = &mockFilesystemAttachment{ + filesystem: s.filesystemTag, + machine: s.machineTag, + } + s.volume = &mockVolume{tag: s.volumeTag, storage: &s.storageTag} + s.volumeAttachment = &mockVolumeAttachment{ + VolumeTag: s.volumeTag, + MachineTag: s.machineTag, + } + + s.blocks = make(map[state.BlockType]state.Block) + return &mockState{ + allStorageInstances: func() ([]state.StorageInstance, error) { + s.calls = append(s.calls, allStorageInstancesCall) + return []state.StorageInstance{s.storageInstance}, nil + }, + storageInstance: func(sTag names.StorageTag) (state.StorageInstance, error) { + s.calls = append(s.calls, storageInstanceCall) + if sTag == s.storageTag { + return s.storageInstance, nil + } + return nil, errors.NotFoundf("%s", names.ReadableString(sTag)) + }, + storageInstanceAttachments: func(tag names.StorageTag) ([]state.StorageAttachment, error) { + s.calls = append(s.calls, storageInstanceAttachmentsCall) + if tag == s.storageTag { + return []state.StorageAttachment{storageInstanceAttachment}, nil + } + return nil, errors.NotFoundf("%s", names.ReadableString(tag)) + }, + storageInstanceFilesystem: func(sTag names.StorageTag) (state.Filesystem, error) { + s.calls = append(s.calls, storageInstanceFilesystemCall) + if sTag == s.storageTag { + return s.filesystem, nil + } + return nil, errors.NotFoundf("%s", names.ReadableString(sTag)) + }, + storageInstanceFilesystemAttachment: func(m names.MachineTag, f names.FilesystemTag) (state.FilesystemAttachment, error) { + s.calls = append(s.calls, storageInstanceFilesystemAttachmentCall) + if m == s.machineTag && f == s.filesystemTag { + return s.filesystemAttachment, nil + } + return nil, errors.NotFoundf("filesystem attachment %s:%s", m, f) + }, + storageInstanceVolume: func(t names.StorageTag) (state.Volume, error) { + s.calls = append(s.calls, storageInstanceVolumeCall) + if t == s.storageTag { + return s.volume, nil + } + return nil, errors.NotFoundf("%s", names.ReadableString(t)) + }, + volumeAttachment: func(names.MachineTag, names.VolumeTag) (state.VolumeAttachment, error) { + s.calls = append(s.calls, volumeAttachmentCall) + return s.volumeAttachment, nil + }, + unitAssignedMachine: func(u names.UnitTag) (names.MachineTag, error) { + s.calls = append(s.calls, unitAssignedMachineCall) + if u == s.unitTag { + return s.machineTag, nil + } + return names.MachineTag{}, errors.NotFoundf("%s", names.ReadableString(u)) + }, + volume: func(tag names.VolumeTag) (state.Volume, error) { + s.calls = append(s.calls, volumeCall) + if tag == s.volumeTag { + return s.volume, nil + } + return nil, errors.NotFoundf("%s", names.ReadableString(tag)) + }, + machineVolumeAttachments: func(machine names.MachineTag) ([]state.VolumeAttachment, error) { + s.calls = append(s.calls, machineVolumeAttachmentsCall) + if machine == s.machineTag { + return []state.VolumeAttachment{s.volumeAttachment}, nil + } + return nil, nil + }, + volumeAttachments: func(volume names.VolumeTag) ([]state.VolumeAttachment, error) { + s.calls = append(s.calls, volumeAttachmentsCall) + if volume == s.volumeTag { + return []state.VolumeAttachment{s.volumeAttachment}, nil + } + return nil, nil + }, + allVolumes: func() ([]state.Volume, error) { + s.calls = append(s.calls, allVolumesCall) + return []state.Volume{s.volume}, nil + }, + filesystem: func(tag names.FilesystemTag) (state.Filesystem, error) { + s.calls = append(s.calls, filesystemCall) + if tag == s.filesystemTag { + return s.filesystem, nil + } + return nil, errors.NotFoundf("%s", names.ReadableString(tag)) + }, + machineFilesystemAttachments: func(machine names.MachineTag) ([]state.FilesystemAttachment, error) { + s.calls = append(s.calls, machineFilesystemAttachmentsCall) + if machine == s.machineTag { + return []state.FilesystemAttachment{s.filesystemAttachment}, nil + } + return nil, nil + }, + filesystemAttachments: func(filesystem names.FilesystemTag) ([]state.FilesystemAttachment, error) { + s.calls = append(s.calls, filesystemAttachmentsCall) + if filesystem == s.filesystemTag { + return []state.FilesystemAttachment{s.filesystemAttachment}, nil + } + return nil, nil + }, + allFilesystems: func() ([]state.Filesystem, error) { + s.calls = append(s.calls, allFilesystemsCall) + return []state.Filesystem{s.filesystem}, nil + }, + modelName: "storagetest", + addStorageForUnit: func(u names.UnitTag, name string, cons state.StorageConstraints) error { + s.calls = append(s.calls, addStorageForUnitCall) + return nil + }, + getBlockForType: func(t state.BlockType) (state.Block, bool, error) { + s.calls = append(s.calls, getBlockForTypeCall) + val, found := s.blocks[t] + return val, found, nil + }, + } +} + +func (s *baseStorageSuite) addBlock(c *gc.C, t state.BlockType, msg string) { + s.blocks[t] = mockBlock{ + t: t, + msg: msg, + } +} + +func (s *baseStorageSuite) blockAllChanges(c *gc.C, msg string) { + s.addBlock(c, state.ChangeBlock, msg) +} + +func (s *baseStorageSuite) blockDestroyEnvironment(c *gc.C, msg string) { + s.addBlock(c, state.DestroyBlock, msg) +} + +func (s *baseStorageSuite) blockRemoveObject(c *gc.C, msg string) { + s.addBlock(c, state.RemoveBlock, msg) +} + +func (s *baseStorageSuite) assertBlocked(c *gc.C, err error, msg string) { + c.Assert(params.IsCodeOperationBlocked(err), jc.IsTrue) + c.Assert(err, gc.ErrorMatches, msg) +} + +func (s *baseStorageSuite) constructPoolManager() *mockPoolManager { + return &mockPoolManager{ + getPool: func(name string) (*jujustorage.Config, error) { + if one, ok := s.pools[name]; ok { + return one, nil + } + return nil, errors.NotFoundf("mock pool manager: get pool %v", name) + }, + createPool: func(name string, providerType jujustorage.ProviderType, attrs map[string]interface{}) (*jujustorage.Config, error) { + pool, err := jujustorage.NewConfig(name, providerType, attrs) + s.pools[name] = pool + return pool, err + }, + deletePool: func(name string) error { + delete(s.pools, name) + return nil + }, + listPools: func() ([]*jujustorage.Config, error) { + result := make([]*jujustorage.Config, len(s.pools)) + i := 0 + for _, v := range s.pools { + result[i] = v + i++ + } + return result, nil + }, + } +} === modified file 'src/github.com/juju/juju/apiserver/storage/export_test.go' --- src/github.com/juju/juju/apiserver/storage/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/storage/export_test.go 2016-03-22 15:18:22 +0000 @@ -4,9 +4,9 @@ package storage var ( - IsValidPoolListFilter = (*API).isValidPoolListFilter - ValidateNames = (*API).isValidNameCriteria - ValidateProviders = (*API).isValidProviderCriteria + ValidatePoolListFilter = (*API).validatePoolListFilter + ValidateNameCriteria = (*API).validateNameCriteria + ValidateProviderCriteria = (*API).validateProviderCriteria CreateAPI = createAPI ) === added file 'src/github.com/juju/juju/apiserver/storage/filesystemlist_test.go' --- src/github.com/juju/juju/apiserver/storage/filesystemlist_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/storage/filesystemlist_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,150 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storage_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" +) + +type filesystemSuite struct { + baseStorageSuite +} + +var _ = gc.Suite(&filesystemSuite{}) + +func (s *filesystemSuite) expectedFilesystemDetails() params.FilesystemDetails { + return params.FilesystemDetails{ + FilesystemTag: s.filesystemTag.String(), + Status: params.EntityStatus{ + Status: "attached", + }, + MachineAttachments: map[string]params.FilesystemAttachmentInfo{ + s.machineTag.String(): params.FilesystemAttachmentInfo{}, + }, + Storage: ¶ms.StorageDetails{ + StorageTag: "storage-data-0", + OwnerTag: "unit-mysql-0", + Kind: params.StorageKindFilesystem, + Status: params.EntityStatus{ + Status: "attached", + }, + Attachments: map[string]params.StorageAttachmentDetails{ + "unit-mysql-0": params.StorageAttachmentDetails{ + StorageTag: "storage-data-0", + UnitTag: "unit-mysql-0", + MachineTag: "machine-66", + }, + }, + }, + } +} + +func (s *filesystemSuite) TestListFilesystemsEmptyFilter(c *gc.C) { + found, err := s.api.ListFilesystems(params.FilesystemFilters{ + []params.FilesystemFilter{{}}, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(found.Results, gc.HasLen, 1) + c.Assert(found.Results[0].Result, gc.HasLen, 1) + c.Assert(found.Results[0].Result[0], gc.DeepEquals, s.expectedFilesystemDetails()) +} + +func (s *filesystemSuite) TestListFilesystemsError(c *gc.C) { + msg := "inventing error" + s.state.allFilesystems = func() ([]state.Filesystem, error) { + return nil, errors.New(msg) + } + results, err := s.api.ListFilesystems(params.FilesystemFilters{ + []params.FilesystemFilter{{}}, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.ErrorMatches, msg) +} + +func (s *filesystemSuite) TestListFilesystemsNoFilesystems(c *gc.C) { + s.state.allFilesystems = func() ([]state.Filesystem, error) { + return nil, nil + } + results, err := s.api.ListFilesystems(params.FilesystemFilters{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 0) +} + +func (s *filesystemSuite) TestListFilesystemsFilter(c *gc.C) { + filters := []params.FilesystemFilter{{ + Machines: []string{s.machineTag.String()}, + }} + found, err := s.api.ListFilesystems(params.FilesystemFilters{filters}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(found.Results, gc.HasLen, 1) + c.Assert(found.Results[0].Result, gc.HasLen, 1) + c.Assert(found.Results[0].Result[0], jc.DeepEquals, s.expectedFilesystemDetails()) +} + +func (s *filesystemSuite) TestListFilesystemsFilterNonMatching(c *gc.C) { + filters := []params.FilesystemFilter{{ + Machines: []string{"machine-42"}, + }} + found, err := s.api.ListFilesystems(params.FilesystemFilters{filters}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(found.Results, gc.HasLen, 1) + c.Assert(found.Results[0].Error, gc.IsNil) + c.Assert(found.Results[0].Result, gc.HasLen, 0) +} + +func (s *filesystemSuite) TestListFilesystemsFilesystemInfo(c *gc.C) { + s.filesystem.info = &state.FilesystemInfo{ + Size: 123, + } + expected := s.expectedFilesystemDetails() + expected.Info.Size = 123 + found, err := s.api.ListFilesystems(params.FilesystemFilters{ + []params.FilesystemFilter{{}}, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(found.Results, gc.HasLen, 1) + c.Assert(found.Results[0].Result, gc.HasLen, 1) + c.Assert(found.Results[0].Result[0], jc.DeepEquals, expected) +} + +func (s *filesystemSuite) TestListFilesystemsAttachmentInfo(c *gc.C) { + s.filesystemAttachment.info = &state.FilesystemAttachmentInfo{ + MountPoint: "/tmp", + ReadOnly: true, + } + expected := s.expectedFilesystemDetails() + expected.MachineAttachments[s.machineTag.String()] = params.FilesystemAttachmentInfo{ + MountPoint: "/tmp", + ReadOnly: true, + } + expectedStorageAttachmentDetails := expected.Storage.Attachments["unit-mysql-0"] + expectedStorageAttachmentDetails.Location = "/tmp" + expected.Storage.Attachments["unit-mysql-0"] = expectedStorageAttachmentDetails + found, err := s.api.ListFilesystems(params.FilesystemFilters{ + []params.FilesystemFilter{{}}, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(found.Results, gc.HasLen, 1) + c.Assert(found.Results[0].Result, gc.HasLen, 1) + c.Assert(found.Results[0].Result[0], jc.DeepEquals, expected) +} + +func (s *filesystemSuite) TestListFilesystemsVolumeBacked(c *gc.C) { + s.filesystem.volume = &s.volumeTag + expected := s.expectedFilesystemDetails() + expected.VolumeTag = s.volumeTag.String() + found, err := s.api.ListFilesystems(params.FilesystemFilters{ + []params.FilesystemFilter{{}}, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(found.Results, gc.HasLen, 1) + c.Assert(found.Results[0].Result, gc.HasLen, 1) + c.Assert(found.Results[0].Result[0], jc.DeepEquals, expected) +} === added file 'src/github.com/juju/juju/apiserver/storage/mock_test.go' --- src/github.com/juju/juju/apiserver/storage/mock_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/storage/mock_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,349 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storage_test + +import ( + "github.com/juju/errors" + "github.com/juju/names" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/state" + jujustorage "github.com/juju/juju/storage" +) + +type mockPoolManager struct { + getPool func(name string) (*jujustorage.Config, error) + createPool func(name string, providerType jujustorage.ProviderType, attrs map[string]interface{}) (*jujustorage.Config, error) + deletePool func(name string) error + listPools func() ([]*jujustorage.Config, error) +} + +func (m *mockPoolManager) Get(name string) (*jujustorage.Config, error) { + return m.getPool(name) +} + +func (m *mockPoolManager) Create(name string, providerType jujustorage.ProviderType, attrs map[string]interface{}) (*jujustorage.Config, error) { + return m.createPool(name, providerType, attrs) +} + +func (m *mockPoolManager) Delete(name string) error { + return m.deletePool(name) +} + +func (m *mockPoolManager) List() ([]*jujustorage.Config, error) { + return m.listPools() +} + +type mockState struct { + storageInstance func(names.StorageTag) (state.StorageInstance, error) + allStorageInstances func() ([]state.StorageInstance, error) + storageInstanceAttachments func(names.StorageTag) ([]state.StorageAttachment, error) + unitAssignedMachine func(u names.UnitTag) (names.MachineTag, error) + storageInstanceVolume func(names.StorageTag) (state.Volume, error) + volumeAttachment func(names.MachineTag, names.VolumeTag) (state.VolumeAttachment, error) + storageInstanceFilesystem func(names.StorageTag) (state.Filesystem, error) + storageInstanceFilesystemAttachment func(m names.MachineTag, f names.FilesystemTag) (state.FilesystemAttachment, error) + watchStorageAttachment func(names.StorageTag, names.UnitTag) state.NotifyWatcher + watchFilesystemAttachment func(names.MachineTag, names.FilesystemTag) state.NotifyWatcher + watchVolumeAttachment func(names.MachineTag, names.VolumeTag) state.NotifyWatcher + watchBlockDevices func(names.MachineTag) state.NotifyWatcher + modelName string + volume func(tag names.VolumeTag) (state.Volume, error) + machineVolumeAttachments func(machine names.MachineTag) ([]state.VolumeAttachment, error) + volumeAttachments func(volume names.VolumeTag) ([]state.VolumeAttachment, error) + allVolumes func() ([]state.Volume, error) + filesystem func(tag names.FilesystemTag) (state.Filesystem, error) + machineFilesystemAttachments func(machine names.MachineTag) ([]state.FilesystemAttachment, error) + filesystemAttachments func(filesystem names.FilesystemTag) ([]state.FilesystemAttachment, error) + allFilesystems func() ([]state.Filesystem, error) + addStorageForUnit func(u names.UnitTag, name string, cons state.StorageConstraints) error + getBlockForType func(t state.BlockType) (state.Block, bool, error) + blockDevices func(names.MachineTag) ([]state.BlockDeviceInfo, error) +} + +func (st *mockState) StorageInstance(s names.StorageTag) (state.StorageInstance, error) { + return st.storageInstance(s) +} + +func (st *mockState) AllStorageInstances() ([]state.StorageInstance, error) { + return st.allStorageInstances() +} + +func (st *mockState) StorageAttachments(tag names.StorageTag) ([]state.StorageAttachment, error) { + return st.storageInstanceAttachments(tag) +} + +func (st *mockState) UnitAssignedMachine(unit names.UnitTag) (names.MachineTag, error) { + return st.unitAssignedMachine(unit) +} + +func (st *mockState) FilesystemAttachment(m names.MachineTag, f names.FilesystemTag) (state.FilesystemAttachment, error) { + return st.storageInstanceFilesystemAttachment(m, f) +} + +func (st *mockState) StorageInstanceFilesystem(s names.StorageTag) (state.Filesystem, error) { + return st.storageInstanceFilesystem(s) +} + +func (st *mockState) StorageInstanceVolume(s names.StorageTag) (state.Volume, error) { + return st.storageInstanceVolume(s) +} + +func (st *mockState) VolumeAttachment(m names.MachineTag, v names.VolumeTag) (state.VolumeAttachment, error) { + return st.volumeAttachment(m, v) +} + +func (st *mockState) WatchStorageAttachment(s names.StorageTag, u names.UnitTag) state.NotifyWatcher { + return st.watchStorageAttachment(s, u) +} + +func (st *mockState) WatchFilesystemAttachment(mtag names.MachineTag, f names.FilesystemTag) state.NotifyWatcher { + return st.watchFilesystemAttachment(mtag, f) +} + +func (st *mockState) WatchVolumeAttachment(mtag names.MachineTag, v names.VolumeTag) state.NotifyWatcher { + return st.watchVolumeAttachment(mtag, v) +} + +func (st *mockState) WatchBlockDevices(mtag names.MachineTag) state.NotifyWatcher { + return st.watchBlockDevices(mtag) +} + +func (st *mockState) ModelName() (string, error) { + return st.modelName, nil +} + +func (st *mockState) AllVolumes() ([]state.Volume, error) { + return st.allVolumes() +} + +func (st *mockState) VolumeAttachments(volume names.VolumeTag) ([]state.VolumeAttachment, error) { + return st.volumeAttachments(volume) +} + +func (st *mockState) MachineVolumeAttachments(machine names.MachineTag) ([]state.VolumeAttachment, error) { + return st.machineVolumeAttachments(machine) +} + +func (st *mockState) Volume(tag names.VolumeTag) (state.Volume, error) { + return st.volume(tag) +} + +func (st *mockState) AllFilesystems() ([]state.Filesystem, error) { + return st.allFilesystems() +} + +func (st *mockState) FilesystemAttachments(filesystem names.FilesystemTag) ([]state.FilesystemAttachment, error) { + return st.filesystemAttachments(filesystem) +} + +func (st *mockState) MachineFilesystemAttachments(machine names.MachineTag) ([]state.FilesystemAttachment, error) { + return st.machineFilesystemAttachments(machine) +} + +func (st *mockState) Filesystem(tag names.FilesystemTag) (state.Filesystem, error) { + return st.filesystem(tag) +} + +func (st *mockState) AddStorageForUnit(u names.UnitTag, name string, cons state.StorageConstraints) error { + return st.addStorageForUnit(u, name, cons) +} + +func (st *mockState) GetBlockForType(t state.BlockType) (state.Block, bool, error) { + return st.getBlockForType(t) +} + +func (st *mockState) BlockDevices(m names.MachineTag) ([]state.BlockDeviceInfo, error) { + if st.blockDevices != nil { + return st.blockDevices(m) + } + return []state.BlockDeviceInfo{}, nil +} + +type mockNotifyWatcher struct { + state.NotifyWatcher + changes chan struct{} +} + +func (m *mockNotifyWatcher) Changes() <-chan struct{} { + return m.changes +} + +type mockVolume struct { + state.Volume + tag names.VolumeTag + storage *names.StorageTag + info *state.VolumeInfo +} + +func (m *mockVolume) StorageInstance() (names.StorageTag, error) { + if m.storage != nil { + return *m.storage, nil + } + return names.StorageTag{}, errors.NewNotAssigned(nil, "error from mock") +} + +func (m *mockVolume) VolumeTag() names.VolumeTag { + return m.tag +} + +func (m *mockVolume) Params() (state.VolumeParams, bool) { + return state.VolumeParams{ + Pool: "loop", + Size: 1024, + }, true +} + +func (m *mockVolume) Info() (state.VolumeInfo, error) { + if m.info != nil { + return *m.info, nil + } + return state.VolumeInfo{}, errors.NotProvisionedf("%v", m.tag) +} + +func (m *mockVolume) Status() (state.StatusInfo, error) { + return state.StatusInfo{Status: state.StatusAttached}, nil +} + +type mockFilesystem struct { + state.Filesystem + tag names.FilesystemTag + storage *names.StorageTag + volume *names.VolumeTag + info *state.FilesystemInfo +} + +func (m *mockFilesystem) Storage() (names.StorageTag, error) { + if m.storage != nil { + return *m.storage, nil + } + return names.StorageTag{}, errors.NewNotAssigned(nil, "error from mock") +} + +func (m *mockFilesystem) FilesystemTag() names.FilesystemTag { + return m.tag +} + +func (m *mockFilesystem) Volume() (names.VolumeTag, error) { + if m.volume != nil { + return *m.volume, nil + } + return names.VolumeTag{}, state.ErrNoBackingVolume +} + +func (m *mockFilesystem) Info() (state.FilesystemInfo, error) { + if m.info != nil { + return *m.info, nil + } + return state.FilesystemInfo{}, errors.NotProvisionedf("filesystem") +} + +func (m *mockFilesystem) Status() (state.StatusInfo, error) { + return state.StatusInfo{Status: state.StatusAttached}, nil +} + +type mockFilesystemAttachment struct { + state.FilesystemAttachment + filesystem names.FilesystemTag + machine names.MachineTag + info *state.FilesystemAttachmentInfo +} + +func (m *mockFilesystemAttachment) Filesystem() names.FilesystemTag { + return m.filesystem +} + +func (m *mockFilesystemAttachment) Machine() names.MachineTag { + return m.machine +} + +func (m *mockFilesystemAttachment) Info() (state.FilesystemAttachmentInfo, error) { + if m.info != nil { + return *m.info, nil + } + return state.FilesystemAttachmentInfo{}, errors.NotProvisionedf("filesystem attachment") +} + +type mockStorageInstance struct { + state.StorageInstance + kind state.StorageKind + owner names.Tag + storageTag names.Tag +} + +func (m *mockStorageInstance) Kind() state.StorageKind { + return m.kind +} + +func (m *mockStorageInstance) Owner() names.Tag { + return m.owner +} + +func (m *mockStorageInstance) Tag() names.Tag { + return m.storageTag +} + +func (m *mockStorageInstance) StorageTag() names.StorageTag { + return m.storageTag.(names.StorageTag) +} + +func (m *mockStorageInstance) CharmURL() *charm.URL { + panic("not implemented for test") +} + +type mockStorageAttachment struct { + state.StorageAttachment + storage *mockStorageInstance +} + +func (m *mockStorageAttachment) StorageInstance() names.StorageTag { + return m.storage.Tag().(names.StorageTag) +} + +func (m *mockStorageAttachment) Unit() names.UnitTag { + return m.storage.Owner().(names.UnitTag) +} + +type mockVolumeAttachment struct { + VolumeTag names.VolumeTag + MachineTag names.MachineTag + info *state.VolumeAttachmentInfo +} + +func (va *mockVolumeAttachment) Volume() names.VolumeTag { + return va.VolumeTag +} + +func (va *mockVolumeAttachment) Machine() names.MachineTag { + return va.MachineTag +} + +func (va *mockVolumeAttachment) Life() state.Life { + panic("not implemented for test") +} + +func (va *mockVolumeAttachment) Info() (state.VolumeAttachmentInfo, error) { + if va.info != nil { + return *va.info, nil + } + return state.VolumeAttachmentInfo{}, errors.NotProvisionedf("volume attachment") +} + +func (va *mockVolumeAttachment) Params() (state.VolumeAttachmentParams, bool) { + panic("not implemented for test") +} + +type mockBlock struct { + state.Block + t state.BlockType + msg string +} + +func (b mockBlock) Type() state.BlockType { + return b.t +} + +func (b mockBlock) Message() string { + return b.msg +} === modified file 'src/github.com/juju/juju/apiserver/storage/package_test.go' --- src/github.com/juju/juju/apiserver/storage/package_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/storage/package_test.go 2016-03-22 15:18:22 +0000 @@ -6,511 +6,9 @@ import ( stdtesting "testing" - "github.com/juju/errors" - "github.com/juju/names" - jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/apiserver/storage" - "github.com/juju/juju/apiserver/testing" - "github.com/juju/juju/state" - jujustorage "github.com/juju/juju/storage" - coretesting "github.com/juju/juju/testing" ) func TestAll(t *stdtesting.T) { gc.TestingT(t) } - -type baseStorageSuite struct { - coretesting.BaseSuite - - resources *common.Resources - authorizer testing.FakeAuthorizer - - api *storage.API - state *mockState - - storageTag names.StorageTag - storageInstance *mockStorageInstance - unitTag names.UnitTag - machineTag names.MachineTag - - volumeTag names.VolumeTag - volume *mockVolume - volumeAttachment *mockVolumeAttachment - calls []string - - poolManager *mockPoolManager - pools map[string]*jujustorage.Config - - blocks map[state.BlockType]state.Block -} - -func (s *baseStorageSuite) SetUpTest(c *gc.C) { - s.BaseSuite.SetUpTest(c) - s.resources = common.NewResources() - s.authorizer = testing.FakeAuthorizer{names.NewUserTag("testuser"), true} - s.calls = []string{} - s.state = s.constructState(c) - - s.pools = make(map[string]*jujustorage.Config) - s.poolManager = s.constructPoolManager(c) - - var err error - s.api, err = storage.CreateAPI(s.state, s.poolManager, s.resources, s.authorizer) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *baseStorageSuite) assertCalls(c *gc.C, expectedCalls []string) { - c.Assert(s.calls, jc.SameContents, expectedCalls) -} - -const ( - allStorageInstancesCall = "allStorageInstances" - storageInstanceAttachmentsCall = "storageInstanceAttachments" - unitAssignedMachineCall = "UnitAssignedMachine" - storageInstanceCall = "StorageInstance" - storageInstanceFilesystemCall = "StorageInstanceFilesystem" - storageInstanceFilesystemAttachmentCall = "storageInstanceFilesystemAttachment" - storageInstanceVolumeCall = "storageInstanceVolume" - volumeCall = "volumeCall" - machineVolumeAttachmentsCall = "machineVolumeAttachments" - volumeAttachmentsCall = "volumeAttachments" - allVolumesCall = "allVolumes" - addStorageForUnitCall = "addStorageForUnit" - getBlockForTypeCall = "getBlockForType" - volumeAttachmentCall = "volumeAttachment" -) - -func (s *baseStorageSuite) constructState(c *gc.C) *mockState { - s.unitTag = names.NewUnitTag("mysql/0") - s.storageTag = names.NewStorageTag("data/0") - - s.storageInstance = &mockStorageInstance{ - kind: state.StorageKindFilesystem, - owner: s.unitTag, - storageTag: s.storageTag, - } - - storageInstanceAttachment := &mockStorageAttachment{storage: s.storageInstance} - - s.machineTag = names.NewMachineTag("66") - filesystemTag := names.NewFilesystemTag("104") - s.volumeTag = names.NewVolumeTag("22") - filesystem := &mockFilesystem{tag: filesystemTag} - filesystemAttachment := &mockFilesystemAttachment{} - s.volume = &mockVolume{tag: s.volumeTag, storage: s.storageTag} - s.volumeAttachment = &mockVolumeAttachment{ - VolumeTag: s.volumeTag, - MachineTag: s.machineTag, - } - - s.blocks = make(map[state.BlockType]state.Block) - return &mockState{ - allStorageInstances: func() ([]state.StorageInstance, error) { - s.calls = append(s.calls, allStorageInstancesCall) - return []state.StorageInstance{s.storageInstance}, nil - }, - storageInstance: func(sTag names.StorageTag) (state.StorageInstance, error) { - s.calls = append(s.calls, storageInstanceCall) - c.Assert(sTag, gc.DeepEquals, s.storageTag) - return s.storageInstance, nil - }, - storageInstanceAttachments: func(tag names.StorageTag) ([]state.StorageAttachment, error) { - s.calls = append(s.calls, storageInstanceAttachmentsCall) - c.Assert(tag, gc.DeepEquals, s.storageTag) - return []state.StorageAttachment{storageInstanceAttachment}, nil - }, - storageInstanceFilesystem: func(sTag names.StorageTag) (state.Filesystem, error) { - s.calls = append(s.calls, storageInstanceFilesystemCall) - c.Assert(sTag, gc.DeepEquals, s.storageTag) - return filesystem, nil - }, - storageInstanceFilesystemAttachment: func(m names.MachineTag, f names.FilesystemTag) (state.FilesystemAttachment, error) { - s.calls = append(s.calls, storageInstanceFilesystemAttachmentCall) - c.Assert(m, gc.DeepEquals, s.machineTag) - c.Assert(f, gc.DeepEquals, filesystemTag) - return filesystemAttachment, nil - }, - storageInstanceVolume: func(t names.StorageTag) (state.Volume, error) { - s.calls = append(s.calls, storageInstanceVolumeCall) - c.Assert(t, gc.DeepEquals, s.storageTag) - return s.volume, nil - }, - volumeAttachment: func(names.MachineTag, names.VolumeTag) (state.VolumeAttachment, error) { - s.calls = append(s.calls, volumeAttachmentCall) - return s.volumeAttachment, nil - }, - unitAssignedMachine: func(u names.UnitTag) (names.MachineTag, error) { - s.calls = append(s.calls, unitAssignedMachineCall) - c.Assert(u, gc.DeepEquals, s.unitTag) - return s.machineTag, nil - }, - volume: func(tag names.VolumeTag) (state.Volume, error) { - s.calls = append(s.calls, volumeCall) - c.Assert(tag, gc.DeepEquals, s.volumeTag) - return s.volume, nil - }, - machineVolumeAttachments: func(machine names.MachineTag) ([]state.VolumeAttachment, error) { - s.calls = append(s.calls, machineVolumeAttachmentsCall) - c.Assert(machine, gc.DeepEquals, s.machineTag) - return []state.VolumeAttachment{s.volumeAttachment}, nil - }, - volumeAttachments: func(volume names.VolumeTag) ([]state.VolumeAttachment, error) { - s.calls = append(s.calls, volumeAttachmentsCall) - c.Assert(volume, gc.DeepEquals, s.volumeTag) - return []state.VolumeAttachment{s.volumeAttachment}, nil - }, - allVolumes: func() ([]state.Volume, error) { - s.calls = append(s.calls, allVolumesCall) - return []state.Volume{s.volume}, nil - }, - envName: "storagetest", - addStorageForUnit: func(u names.UnitTag, name string, cons state.StorageConstraints) error { - s.calls = append(s.calls, addStorageForUnitCall) - return nil - }, - getBlockForType: func(t state.BlockType) (state.Block, bool, error) { - s.calls = append(s.calls, getBlockForTypeCall) - val, found := s.blocks[t] - return val, found, nil - }, - } -} - -func (s *baseStorageSuite) addBlock(c *gc.C, t state.BlockType, msg string) { - s.blocks[t] = mockBlock{ - t: t, - msg: msg, - } -} - -func (s *baseStorageSuite) blockAllChanges(c *gc.C, msg string) { - s.addBlock(c, state.ChangeBlock, msg) -} - -func (s *baseStorageSuite) blockDestroyEnvironment(c *gc.C, msg string) { - s.addBlock(c, state.DestroyBlock, msg) -} - -func (s *baseStorageSuite) blockRemoveObject(c *gc.C, msg string) { - s.addBlock(c, state.RemoveBlock, msg) -} - -func (s *baseStorageSuite) assertBlocked(c *gc.C, err error, msg string) { - c.Assert(params.IsCodeOperationBlocked(err), jc.IsTrue) - c.Assert(err, gc.ErrorMatches, msg) -} - -func (s *baseStorageSuite) constructPoolManager(c *gc.C) *mockPoolManager { - return &mockPoolManager{ - getPool: func(name string) (*jujustorage.Config, error) { - if one, ok := s.pools[name]; ok { - return one, nil - } - return nil, errors.NotFoundf("mock pool manager: get pool %v", name) - }, - createPool: func(name string, providerType jujustorage.ProviderType, attrs map[string]interface{}) (*jujustorage.Config, error) { - pool, err := jujustorage.NewConfig(name, providerType, attrs) - s.pools[name] = pool - return pool, err - }, - deletePool: func(name string) error { - delete(s.pools, name) - return nil - }, - listPools: func() ([]*jujustorage.Config, error) { - result := make([]*jujustorage.Config, len(s.pools)) - i := 0 - for _, v := range s.pools { - result[i] = v - i++ - } - return result, nil - }, - } -} - -type mockPoolManager struct { - getPool func(name string) (*jujustorage.Config, error) - createPool func(name string, providerType jujustorage.ProviderType, attrs map[string]interface{}) (*jujustorage.Config, error) - deletePool func(name string) error - listPools func() ([]*jujustorage.Config, error) -} - -func (m *mockPoolManager) Get(name string) (*jujustorage.Config, error) { - return m.getPool(name) -} - -func (m *mockPoolManager) Create(name string, providerType jujustorage.ProviderType, attrs map[string]interface{}) (*jujustorage.Config, error) { - return m.createPool(name, providerType, attrs) -} - -func (m *mockPoolManager) Delete(name string) error { - return m.deletePool(name) -} - -func (m *mockPoolManager) List() ([]*jujustorage.Config, error) { - return m.listPools() -} - -type mockState struct { - storageInstance func(names.StorageTag) (state.StorageInstance, error) - allStorageInstances func() ([]state.StorageInstance, error) - storageInstanceAttachments func(names.StorageTag) ([]state.StorageAttachment, error) - unitAssignedMachine func(u names.UnitTag) (names.MachineTag, error) - storageInstanceVolume func(names.StorageTag) (state.Volume, error) - volumeAttachment func(names.MachineTag, names.VolumeTag) (state.VolumeAttachment, error) - storageInstanceFilesystem func(names.StorageTag) (state.Filesystem, error) - storageInstanceFilesystemAttachment func(m names.MachineTag, f names.FilesystemTag) (state.FilesystemAttachment, error) - watchStorageAttachment func(names.StorageTag, names.UnitTag) state.NotifyWatcher - watchFilesystemAttachment func(names.MachineTag, names.FilesystemTag) state.NotifyWatcher - watchVolumeAttachment func(names.MachineTag, names.VolumeTag) state.NotifyWatcher - watchBlockDevices func(names.MachineTag) state.NotifyWatcher - envName string - volume func(tag names.VolumeTag) (state.Volume, error) - machineVolumeAttachments func(machine names.MachineTag) ([]state.VolumeAttachment, error) - volumeAttachments func(volume names.VolumeTag) ([]state.VolumeAttachment, error) - allVolumes func() ([]state.Volume, error) - addStorageForUnit func(u names.UnitTag, name string, cons state.StorageConstraints) error - getBlockForType func(t state.BlockType) (state.Block, bool, error) - blockDevices func(names.MachineTag) ([]state.BlockDeviceInfo, error) -} - -func (st *mockState) StorageInstance(s names.StorageTag) (state.StorageInstance, error) { - return st.storageInstance(s) -} - -func (st *mockState) AllStorageInstances() ([]state.StorageInstance, error) { - return st.allStorageInstances() -} - -func (st *mockState) StorageAttachments(tag names.StorageTag) ([]state.StorageAttachment, error) { - return st.storageInstanceAttachments(tag) -} - -func (st *mockState) UnitAssignedMachine(unit names.UnitTag) (names.MachineTag, error) { - return st.unitAssignedMachine(unit) -} - -func (st *mockState) FilesystemAttachment(m names.MachineTag, f names.FilesystemTag) (state.FilesystemAttachment, error) { - return st.storageInstanceFilesystemAttachment(m, f) -} - -func (st *mockState) StorageInstanceFilesystem(s names.StorageTag) (state.Filesystem, error) { - return st.storageInstanceFilesystem(s) -} - -func (st *mockState) StorageInstanceVolume(s names.StorageTag) (state.Volume, error) { - return st.storageInstanceVolume(s) -} - -func (st *mockState) VolumeAttachment(m names.MachineTag, v names.VolumeTag) (state.VolumeAttachment, error) { - return st.volumeAttachment(m, v) -} - -func (st *mockState) WatchStorageAttachment(s names.StorageTag, u names.UnitTag) state.NotifyWatcher { - return st.watchStorageAttachment(s, u) -} - -func (st *mockState) WatchFilesystemAttachment(mtag names.MachineTag, f names.FilesystemTag) state.NotifyWatcher { - return st.watchFilesystemAttachment(mtag, f) -} - -func (st *mockState) WatchVolumeAttachment(mtag names.MachineTag, v names.VolumeTag) state.NotifyWatcher { - return st.watchVolumeAttachment(mtag, v) -} - -func (st *mockState) WatchBlockDevices(mtag names.MachineTag) state.NotifyWatcher { - return st.watchBlockDevices(mtag) -} - -func (st *mockState) EnvName() (string, error) { - return st.envName, nil -} - -func (st *mockState) AllVolumes() ([]state.Volume, error) { - return st.allVolumes() -} - -func (st *mockState) VolumeAttachments(volume names.VolumeTag) ([]state.VolumeAttachment, error) { - return st.volumeAttachments(volume) -} - -func (st *mockState) MachineVolumeAttachments(machine names.MachineTag) ([]state.VolumeAttachment, error) { - return st.machineVolumeAttachments(machine) -} - -func (st *mockState) Volume(tag names.VolumeTag) (state.Volume, error) { - return st.volume(tag) -} - -func (st *mockState) AddStorageForUnit(u names.UnitTag, name string, cons state.StorageConstraints) error { - return st.addStorageForUnit(u, name, cons) -} - -func (st *mockState) GetBlockForType(t state.BlockType) (state.Block, bool, error) { - return st.getBlockForType(t) -} - -func (st *mockState) BlockDevices(m names.MachineTag) ([]state.BlockDeviceInfo, error) { - if st.blockDevices != nil { - return st.blockDevices(m) - } - return []state.BlockDeviceInfo{}, nil -} - -type mockNotifyWatcher struct { - state.NotifyWatcher - changes chan struct{} -} - -func (m *mockNotifyWatcher) Changes() <-chan struct{} { - return m.changes -} - -type mockVolume struct { - state.Volume - tag names.VolumeTag - storage names.StorageTag - hasNoStorage bool - info *state.VolumeInfo -} - -func (m *mockVolume) StorageInstance() (names.StorageTag, error) { - if m.hasNoStorage { - return names.StorageTag{}, errors.NewNotAssigned(nil, "error from mock") - } - return m.storage, nil -} - -func (m *mockVolume) VolumeTag() names.VolumeTag { - return m.tag -} - -func (m *mockVolume) Params() (state.VolumeParams, bool) { - return state.VolumeParams{ - Pool: "loop", - Size: 1024, - }, true -} - -func (m *mockVolume) Info() (state.VolumeInfo, error) { - if m.info != nil { - return *m.info, nil - } - return state.VolumeInfo{}, errors.NotProvisionedf("%v", m.tag) -} - -func (m *mockVolume) Status() (state.StatusInfo, error) { - return state.StatusInfo{Status: state.StatusAttached}, nil -} - -type mockFilesystem struct { - state.Filesystem - tag names.FilesystemTag -} - -func (m *mockFilesystem) FilesystemTag() names.FilesystemTag { - return m.tag -} - -type mockFilesystemAttachment struct { - state.FilesystemAttachment - tag names.FilesystemTag -} - -func (m *mockFilesystemAttachment) Filesystem() names.FilesystemTag { - return m.tag -} - -func (m *mockFilesystemAttachment) Info() (state.FilesystemAttachmentInfo, error) { - return state.FilesystemAttachmentInfo{}, nil -} - -type mockStorageInstance struct { - state.StorageInstance - kind state.StorageKind - owner names.Tag - storageTag names.Tag -} - -func (m *mockStorageInstance) Kind() state.StorageKind { - return m.kind -} - -func (m *mockStorageInstance) Owner() names.Tag { - return m.owner -} - -func (m *mockStorageInstance) Tag() names.Tag { - return m.storageTag -} - -func (m *mockStorageInstance) StorageTag() names.StorageTag { - return m.storageTag.(names.StorageTag) -} - -func (m *mockStorageInstance) CharmURL() *charm.URL { - panic("not implemented for test") -} - -type mockStorageAttachment struct { - state.StorageAttachment - storage *mockStorageInstance -} - -func (m *mockStorageAttachment) StorageInstance() names.StorageTag { - return m.storage.Tag().(names.StorageTag) -} - -func (m *mockStorageAttachment) Unit() names.UnitTag { - return m.storage.Owner().(names.UnitTag) -} - -type mockVolumeAttachment struct { - VolumeTag names.VolumeTag - MachineTag names.MachineTag - info *state.VolumeAttachmentInfo -} - -func (va *mockVolumeAttachment) Volume() names.VolumeTag { - return va.VolumeTag -} - -func (va *mockVolumeAttachment) Machine() names.MachineTag { - return va.MachineTag -} - -func (va *mockVolumeAttachment) Life() state.Life { - panic("not implemented for test") -} - -func (va *mockVolumeAttachment) Info() (state.VolumeAttachmentInfo, error) { - if va.info != nil { - return *va.info, nil - } - return state.VolumeAttachmentInfo{}, errors.NotProvisionedf("volume attachment") -} - -func (va *mockVolumeAttachment) Params() (state.VolumeAttachmentParams, bool) { - panic("not implemented for test") -} - -type mockBlock struct { - state.Block - t state.BlockType - msg string -} - -func (b mockBlock) Type() state.BlockType { - return b.t -} - -func (b mockBlock) Message() string { - return b.msg -} === modified file 'src/github.com/juju/juju/apiserver/storage/poollist_test.go' --- src/github.com/juju/juju/apiserver/storage/poollist_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/storage/poollist_test.go 2016-03-22 15:18:22 +0000 @@ -6,7 +6,6 @@ import ( "fmt" - "github.com/juju/errors" jc "github.com/juju/testing/checkers" "github.com/juju/utils/set" gc "gopkg.in/check.v1" @@ -40,19 +39,21 @@ func (s *poolSuite) TestList(c *gc.C) { s.createPools(c, 1) - pools, err := s.api.ListPools(params.StoragePoolFilter{}) + results, err := s.api.ListPools(params.StoragePoolFilters{[]params.StoragePoolFilter{{}}}) c.Assert(err, jc.ErrorIsNil) - c.Assert(pools.Results, gc.HasLen, 1) - one := pools.Results[0] - c.Assert(one.Name, gc.Equals, fmt.Sprintf("%v%v", tstName, 0)) - c.Assert(one.Provider, gc.Equals, string(provider.LoopProviderType)) + c.Assert(results.Results, gc.HasLen, 1) + one := results.Results[0] + c.Assert(one.Error, gc.IsNil) + c.Assert(one.Result, gc.HasLen, 1) + c.Assert(one.Result[0].Name, gc.Equals, fmt.Sprintf("%v%v", tstName, 0)) + c.Assert(one.Result[0].Provider, gc.Equals, string(provider.LoopProviderType)) } func (s *poolSuite) TestListManyResults(c *gc.C) { s.createPools(c, 2) - pools, err := s.api.ListPools(params.StoragePoolFilter{}) + results, err := s.api.ListPools(params.StoragePoolFilters{[]params.StoragePoolFilter{{}}}) c.Assert(err, jc.ErrorIsNil) - assertPoolNames(c, pools.Results, + assertPoolNames(c, results.Results[0].Result, "testpool0", "testpool1", "dummy", "loop", "tmpfs", "rootfs") @@ -62,11 +63,15 @@ s.createPools(c, 2) tstName := fmt.Sprintf("%v%v", tstName, 1) - pools, err := s.api.ListPools(params.StoragePoolFilter{ - Names: []string{tstName}}) + results, err := s.api.ListPools(params.StoragePoolFilters{ + []params.StoragePoolFilter{{ + Names: []string{tstName}, + }}, + }) c.Assert(err, jc.ErrorIsNil) - c.Assert(pools.Results, gc.HasLen, 1) - c.Assert(pools.Results[0].Name, gc.DeepEquals, tstName) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Result, gc.HasLen, 1) + c.Assert(results.Results[0].Result[0].Name, gc.DeepEquals, tstName) } func (s *poolSuite) TestListByType(c *gc.C) { @@ -79,10 +84,13 @@ storage.NewConfig(poolName, provider.TmpfsProviderType, nil) c.Assert(err, jc.ErrorIsNil) - pools, err := s.api.ListPools(params.StoragePoolFilter{ - Providers: []string{tstType}}) + results, err := s.api.ListPools(params.StoragePoolFilters{ + []params.StoragePoolFilter{{ + Providers: []string{tstType}, + }}, + }) c.Assert(err, jc.ErrorIsNil) - assertPoolNames(c, pools.Results, "rayofsunshine", "tmpfs") + assertPoolNames(c, results.Results[0].Result, "rayofsunshine", "tmpfs") } func (s *poolSuite) TestListByNameAndTypeAnd(c *gc.C) { @@ -94,13 +102,17 @@ s.baseStorageSuite.pools[poolName], err = storage.NewConfig(poolName, provider.TmpfsProviderType, nil) c.Assert(err, jc.ErrorIsNil) - pools, err := s.api.ListPools(params.StoragePoolFilter{ - Providers: []string{tstType}, - Names: []string{poolName}}) + results, err := s.api.ListPools(params.StoragePoolFilters{ + []params.StoragePoolFilter{{ + Providers: []string{tstType}, + Names: []string{poolName}, + }}, + }) c.Assert(err, jc.ErrorIsNil) - c.Assert(pools.Results, gc.HasLen, 1) - c.Assert(pools.Results[0].Provider, gc.DeepEquals, tstType) - c.Assert(pools.Results[0].Name, gc.DeepEquals, poolName) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Result, gc.HasLen, 1) + c.Assert(results.Results[0].Result[0].Provider, gc.DeepEquals, tstType) + c.Assert(results.Results[0].Result[0].Name, gc.DeepEquals, poolName) } func (s *poolSuite) TestListByNamesOr(c *gc.C) { @@ -111,13 +123,16 @@ s.baseStorageSuite.pools[poolName], err = storage.NewConfig(poolName, provider.TmpfsProviderType, nil) c.Assert(err, jc.ErrorIsNil) - pools, err := s.api.ListPools(params.StoragePoolFilter{ - Names: []string{ - fmt.Sprintf("%v%v", tstName, 1), - fmt.Sprintf("%v%v", tstName, 0), - }}) + results, err := s.api.ListPools(params.StoragePoolFilters{ + []params.StoragePoolFilter{{ + Names: []string{ + fmt.Sprintf("%v%v", tstName, 1), + fmt.Sprintf("%v%v", tstName, 0), + }, + }}, + }) c.Assert(err, jc.ErrorIsNil) - assertPoolNames(c, pools.Results, "testpool0", "testpool1") + assertPoolNames(c, results.Results[0].Result, "testpool0", "testpool1") } func assertPoolNames(c *gc.C, results []params.StoragePool, expected ...string) { @@ -137,21 +152,25 @@ s.baseStorageSuite.pools[poolName], err = storage.NewConfig(poolName, provider.TmpfsProviderType, nil) c.Assert(err, jc.ErrorIsNil) - pools, err := s.api.ListPools(params.StoragePoolFilter{ - Providers: []string{tstType, string(provider.LoopProviderType)}}) + results, err := s.api.ListPools(params.StoragePoolFilters{ + []params.StoragePoolFilter{{ + Providers: []string{tstType, string(provider.LoopProviderType)}, + }}, + }) c.Assert(err, jc.ErrorIsNil) - assertPoolNames(c, pools.Results, "testpool0", "testpool1", "rayofsunshine", "loop", "tmpfs") + assertPoolNames(c, results.Results[0].Result, "testpool0", "testpool1", "rayofsunshine", "loop", "tmpfs") } func (s *poolSuite) TestListNoPools(c *gc.C) { - pools, err := s.api.ListPools(params.StoragePoolFilter{}) + results, err := s.api.ListPools(params.StoragePoolFilters{[]params.StoragePoolFilter{{}}}) c.Assert(err, jc.ErrorIsNil) - assertPoolNames(c, pools.Results, "dummy", "rootfs", "loop", "tmpfs") + c.Assert(results.Results, gc.HasLen, 1) + assertPoolNames(c, results.Results[0].Result, "dummy", "rootfs", "loop", "tmpfs") } func (s *poolSuite) TestListFilterEmpty(c *gc.C) { - valid, err := apiserverstorage.IsValidPoolListFilter(s.api, params.StoragePoolFilter{}) - s.assertNoError(c, valid, err) + err := apiserverstorage.ValidatePoolListFilter(s.api, params.StoragePoolFilter{}) + c.Assert(err, jc.ErrorIsNil) } const ( @@ -163,90 +182,80 @@ func (s *poolSuite) TestListFilterValidProviders(c *gc.C) { s.registerProviders(c) - valid, err := apiserverstorage.ValidateProviders( + err := apiserverstorage.ValidateProviderCriteria( s.api, []string{validProvider}) - s.assertNoError(c, valid, err) + c.Assert(err, jc.ErrorIsNil) } func (s *poolSuite) TestListFilterUnregisteredProvider(c *gc.C) { - s.state.envName = "noprovidersregistered" - valid, err := apiserverstorage.ValidateProviders( + s.state.modelName = "noprovidersregistered" + err := apiserverstorage.ValidateProviderCriteria( s.api, []string{validProvider}) - s.assertError(c, valid, err, ".*not supported.*") + c.Assert(err, gc.ErrorMatches, ".*not supported.*") } func (s *poolSuite) TestListFilterUnknownProvider(c *gc.C) { s.registerProviders(c) - valid, err := apiserverstorage.ValidateProviders( + err := apiserverstorage.ValidateProviderCriteria( s.api, []string{invalidProvider}) - s.assertError(c, valid, err, ".*not supported.*") + c.Assert(err, gc.ErrorMatches, ".*not supported.*") } func (s *poolSuite) TestListFilterValidNames(c *gc.C) { - valid, err := apiserverstorage.ValidateNames( + err := apiserverstorage.ValidateNameCriteria( s.api, []string{validName}) - s.assertNoError(c, valid, err) + c.Assert(err, jc.ErrorIsNil) } func (s *poolSuite) TestListFilterInvalidNames(c *gc.C) { - valid, err := apiserverstorage.ValidateNames( + err := apiserverstorage.ValidateNameCriteria( s.api, []string{invalidName}) - s.assertError(c, valid, err, ".*not valid.*") + c.Assert(err, gc.ErrorMatches, ".*not valid.*") } func (s *poolSuite) TestListFilterValidProvidersAndNames(c *gc.C) { s.registerProviders(c) - valid, err := apiserverstorage.IsValidPoolListFilter( + err := apiserverstorage.ValidatePoolListFilter( s.api, params.StoragePoolFilter{ Providers: []string{validProvider}, Names: []string{validName}}) - s.assertNoError(c, valid, err) + c.Assert(err, jc.ErrorIsNil) } func (s *poolSuite) TestListFilterValidProvidersAndInvalidNames(c *gc.C) { s.registerProviders(c) - valid, err := apiserverstorage.IsValidPoolListFilter( + err := apiserverstorage.ValidatePoolListFilter( s.api, params.StoragePoolFilter{ Providers: []string{validProvider}, Names: []string{invalidName}}) - s.assertError(c, valid, err, ".*not valid.*") + c.Assert(err, gc.ErrorMatches, ".*not valid.*") } func (s *poolSuite) TestListFilterInvalidProvidersAndValidNames(c *gc.C) { - valid, err := apiserverstorage.IsValidPoolListFilter( + err := apiserverstorage.ValidatePoolListFilter( s.api, params.StoragePoolFilter{ Providers: []string{invalidProvider}, Names: []string{validName}}) - s.assertError(c, valid, err, ".*not supported.*") + c.Assert(err, gc.ErrorMatches, ".*not supported.*") } func (s *poolSuite) TestListFilterInvalidProvidersAndNames(c *gc.C) { - valid, err := apiserverstorage.IsValidPoolListFilter( + err := apiserverstorage.ValidatePoolListFilter( s.api, params.StoragePoolFilter{ Providers: []string{invalidProvider}, Names: []string{invalidName}}) - s.assertError(c, valid, err, ".*not supported.*") + c.Assert(err, gc.ErrorMatches, ".*not supported.*") } func (s *poolSuite) registerProviders(c *gc.C) { - registry.RegisterEnvironStorageProviders(s.state.envName, "dummy") -} - -func (s *poolSuite) assertNoError(c *gc.C, result bool, err error) { - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, jc.IsTrue) -} - -func (s *poolSuite) assertError(c *gc.C, result bool, err error, msg string) { - c.Assert(errors.Cause(err), gc.ErrorMatches, msg) - c.Assert(result, jc.IsFalse) + registry.RegisterEnvironStorageProviders(s.state.modelName, "dummy") } === modified file 'src/github.com/juju/juju/apiserver/storage/state.go' --- src/github.com/juju/juju/apiserver/storage/state.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/storage/state.go 2016-03-22 15:18:22 +0000 @@ -50,8 +50,8 @@ // BlockDevices is required for storage functionality. BlockDevices(names.MachineTag) ([]state.BlockDeviceInfo, error) - // EnvName is required for pool functionality. - EnvName() (string, error) + // ModelName is required for pool functionality. + ModelName() (string, error) // AllVolumes is required for volume functionality. AllVolumes() ([]state.Volume, error) @@ -65,6 +65,18 @@ // Volume is required for volume functionality. Volume(tag names.VolumeTag) (state.Volume, error) + // AllFilesystems is required for filesystem functionality. + AllFilesystems() ([]state.Filesystem, error) + + // FilesystemAttachments is required for filesystem functionality. + FilesystemAttachments(filesystem names.FilesystemTag) ([]state.FilesystemAttachment, error) + + // MachineFilesystemAttachments is required for filesystem functionality. + MachineFilesystemAttachments(machine names.MachineTag) ([]state.FilesystemAttachment, error) + + // Filesystem is required for filesystem functionality. + Filesystem(tag names.FilesystemTag) (state.Filesystem, error) + // AddStorageForUnit is required for storage add functionality. AddStorageForUnit(tag names.UnitTag, name string, cons state.StorageConstraints) error @@ -95,10 +107,10 @@ return names.NewMachineTag(mid), nil } -// EnvName returns the name of Juju environment, +// ModelName returns the name of Juju environment, // or an error if environment configuration is not retrievable. -func (s stateShim) EnvName() (string, error) { - cfg, err := s.State.EnvironConfig() +func (s stateShim) ModelName() (string, error) { + cfg, err := s.State.ModelConfig() if err != nil { return "", errors.Trace(err) } === modified file 'src/github.com/juju/juju/apiserver/storage/storage.go' --- src/github.com/juju/juju/apiserver/storage/storage.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/storage/storage.go 2016-03-22 15:18:22 +0000 @@ -1,17 +1,17 @@ // Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. +// Package storage provides an API server facade for managing +// storage entities. package storage import ( - "time" - "github.com/juju/errors" - "github.com/juju/loggo" "github.com/juju/names" "github.com/juju/utils/set" "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/common/storagecommon" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" "github.com/juju/juju/storage" @@ -19,10 +19,8 @@ "github.com/juju/juju/storage/provider/registry" ) -var logger = loggo.GetLogger("juju.apiserver.storage") - func init() { - common.RegisterStandardFacade("Storage", 1, NewAPI) + common.RegisterStandardFacade("Storage", 2, NewAPI) } // API implements the storage interface and is the concrete @@ -64,10 +62,10 @@ return poolmanager.New(state.NewStateSettings(st)) } -// Show retrieves and returns detailed information about desired storage -// identified by supplied tags. If specified storage cannot be retrieved, -// individual error is returned instead of storage information. -func (api *API) Show(entities params.Entities) (params.StorageDetailsResults, error) { +// StorageDetails retrieves and returns detailed information about desired +// storage identified by supplied tags. If specified storage cannot be +// retrieved, individual error is returned instead of storage information. +func (api *API) StorageDetails(entities params.Entities) (params.StorageDetailsResults, error) { results := make([]params.StorageDetailsResult, len(entities.Entities)) for i, entity := range entities.Entities { storageTag, err := names.ParseStorageTag(entity.Tag) @@ -80,62 +78,70 @@ results[i].Error = common.ServerError(err) continue } - results[i] = api.createStorageDetailsResult(storageInstance) + details, err := createStorageDetails(api.storage, storageInstance) + if err != nil { + results[i].Error = common.ServerError(err) + continue + } + results[i].Result = details } return params.StorageDetailsResults{Results: results}, nil } -// List returns all currently known storage. Unlike Show(), -// if errors encountered while retrieving a particular -// storage, this error is treated as part of the returned storage detail. -func (api *API) List() (params.StorageDetailsResults, error) { +// ListStorageDetails returns storage matching a filter. +func (api *API) ListStorageDetails(filters params.StorageFilters) (params.StorageDetailsListResults, error) { + results := params.StorageDetailsListResults{ + Results: make([]params.StorageDetailsListResult, len(filters.Filters)), + } + for i, filter := range filters.Filters { + list, err := api.listStorageDetails(filter) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + results.Results[i].Result = list + } + return results, nil +} + +func (api *API) listStorageDetails(filter params.StorageFilter) ([]params.StorageDetails, error) { + if filter != (params.StorageFilter{}) { + // StorageFilter has no fields at the time of writing, but + // check that no fields are set in case we forget to update + // this code. + return nil, errors.NotSupportedf("storage filters") + } stateInstances, err := api.storage.AllStorageInstances() if err != nil { - return params.StorageDetailsResults{}, common.ServerError(err) + return nil, common.ServerError(err) } - results := make([]params.StorageDetailsResult, len(stateInstances)) + results := make([]params.StorageDetails, len(stateInstances)) for i, stateInstance := range stateInstances { - results[i] = api.createStorageDetailsResult(stateInstance) - } - return params.StorageDetailsResults{Results: results}, nil -} - -func (api *API) createStorageDetailsResult(si state.StorageInstance) params.StorageDetailsResult { - details, err := createStorageDetails(api.storage, si) - if err != nil { - return params.StorageDetailsResult{Error: common.ServerError(err)} - } - - legacy := params.LegacyStorageDetails{ - details.StorageTag, - details.OwnerTag, - details.Kind, - string(details.Status.Status), - "", // unit tag set below - "", // location set below - details.Persistent, - } - if len(details.Attachments) == 1 { - for unitTag, attachmentDetails := range details.Attachments { - legacy.UnitTag = unitTag - legacy.Location = attachmentDetails.Location + details, err := createStorageDetails(api.storage, stateInstance) + if err != nil { + return nil, errors.Annotatef( + err, "getting details for %s", + names.ReadableString(stateInstance.Tag()), + ) } + results[i] = *details } - - return params.StorageDetailsResult{Result: details, Legacy: legacy} + return results, nil } func createStorageDetails(st storageAccess, si state.StorageInstance) (*params.StorageDetails, error) { // Get information from underlying volume or filesystem. var persistent bool - var entityStatus params.EntityStatus + var statusEntity state.StatusGetter if si.Kind() != state.StorageKindBlock { // TODO(axw) when we support persistent filesystems, // e.g. CephFS, we'll need to do set "persistent" // here too. - nowUTC := time.Now().UTC() - entityStatus.Status = params.StatusUnknown - entityStatus.Since = &nowUTC + filesystem, err := st.StorageInstanceFilesystem(si.StorageTag()) + if err != nil { + return nil, errors.Trace(err) + } + statusEntity = filesystem } else { volume, err := st.StorageInstanceVolume(si.StorageTag()) if err != nil { @@ -144,11 +150,11 @@ if info, err := volume.Info(); err == nil { persistent = info.Persistent } - status, err := volume.Status() - if err != nil { - return nil, errors.Trace(err) - } - entityStatus = common.EntityStatusFromState(status) + statusEntity = volume + } + status, err := statusEntity.Status() + if err != nil { + return nil, errors.Trace(err) } // Get unit storage attachments. @@ -174,24 +180,11 @@ } } - // Hack to set filesystem status. - // - // TODO(axw) we can undo this in 1.26, - // where we have proper filesystem status. - if entityStatus.Status == params.StatusUnknown { - entityStatus.Status = params.StatusPending - for _, details := range storageAttachmentDetails { - if details.Location != "" { - entityStatus.Status = params.StatusAttached - } - } - } - return ¶ms.StorageDetails{ StorageTag: si.Tag().String(), OwnerTag: si.Owner().String(), Kind: params.StorageKind(si.Kind()), - Status: entityStatus, + Status: common.EntityStatusFromState(status), Persistent: persistent, Attachments: storageAttachmentDetails, }, nil @@ -204,7 +197,7 @@ } else if err != nil { return names.MachineTag{}, "", errors.Trace(err) } - info, err := common.StorageAttachmentInfo(st, a, machineTag) + info, err := storagecommon.StorageAttachmentInfo(st, a, machineTag) if errors.IsNotProvisioned(err) { return machineTag, "", nil } else if err != nil { @@ -222,27 +215,40 @@ // This method lists union of pools and environment provider types. // If no filter is provided, all pools are returned. func (a *API) ListPools( - filter params.StoragePoolFilter, -) (params.StoragePoolsResult, error) { - - if ok, err := a.isValidPoolListFilter(filter); !ok { - return params.StoragePoolsResult{}, err - } - + filters params.StoragePoolFilters, +) (params.StoragePoolsResults, error) { + results := params.StoragePoolsResults{ + Results: make([]params.StoragePoolsResult, len(filters.Filters)), + } + for i, filter := range filters.Filters { + pools, err := a.listPools(filter) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + results.Results[i].Result = pools + } + return results, nil +} + +func (a *API) listPools(filter params.StoragePoolFilter) ([]params.StoragePool, error) { + if err := a.validatePoolListFilter(filter); err != nil { + return nil, err + } pools, err := a.poolManager.List() if err != nil { - return params.StoragePoolsResult{}, err + return nil, err } providers, err := a.allProviders() if err != nil { - return params.StoragePoolsResult{}, err + return nil, err } matches := buildFilter(filter) results := append( filterPools(pools, matches), filterProviders(providers, matches)..., ) - return params.StoragePoolsResult{results}, nil + return results, nil } func buildFilter(filter params.StoragePoolFilter) func(n, p string) bool { @@ -302,7 +308,7 @@ } func (a *API) allProviders() ([]storage.ProviderType, error) { - envName, err := a.storage.EnvName() + envName, err := a.storage.ModelName() if err != nil { return nil, errors.Annotate(err, "getting env name") } @@ -312,42 +318,36 @@ return nil, nil } -func (a *API) isValidPoolListFilter( - filter params.StoragePoolFilter, -) (bool, error) { - if len(filter.Providers) != 0 { - if valid, err := a.isValidProviderCriteria(filter.Providers); !valid { - return false, errors.Trace(err) - } - } - if len(filter.Names) != 0 { - if valid, err := a.isValidNameCriteria(filter.Names); !valid { - return false, errors.Trace(err) - } - } - return true, nil +func (a *API) validatePoolListFilter(filter params.StoragePoolFilter) error { + if err := a.validateProviderCriteria(filter.Providers); err != nil { + return errors.Trace(err) + } + if err := a.validateNameCriteria(filter.Names); err != nil { + return errors.Trace(err) + } + return nil } -func (a *API) isValidNameCriteria(names []string) (bool, error) { +func (a *API) validateNameCriteria(names []string) error { for _, n := range names { if !storage.IsValidPoolName(n) { - return false, errors.NotValidf("pool name %q", n) + return errors.NotValidf("pool name %q", n) } } - return true, nil + return nil } -func (a *API) isValidProviderCriteria(providers []string) (bool, error) { - envName, err := a.storage.EnvName() +func (a *API) validateProviderCriteria(providers []string) error { + envName, err := a.storage.ModelName() if err != nil { - return false, errors.Annotate(err, "getting env name") + return errors.Annotate(err, "getting model name") } for _, p := range providers { if !registry.IsProviderSupported(envName, storage.ProviderType(p)) { - return false, errors.NotSupportedf("%q for environment %q", p, envName) + return errors.NotSupportedf("%q", p) } } - return true, nil + return nil } // CreatePool creates a new pool with specified parameters. @@ -359,13 +359,29 @@ return err } -func (a *API) ListVolumes(filter params.VolumeFilter) (params.VolumeDetailsResults, error) { - volumes, volumeAttachments, err := filterVolumes(a.storage, filter) - if err != nil { - return params.VolumeDetailsResults{}, common.ServerError(err) - } - results := createVolumeDetailsResults(a.storage, volumes, volumeAttachments) - return params.VolumeDetailsResults{Results: results}, nil +// ListVolumes lists volumes with the given filters. Each filter produces +// an independent list of volumes, or an error if the filter is invalid +// or the volumes could not be listed. +func (a *API) ListVolumes(filters params.VolumeFilters) (params.VolumeDetailsListResults, error) { + results := params.VolumeDetailsListResults{ + Results: make([]params.VolumeDetailsListResult, len(filters.Filters)), + } + for i, filter := range filters.Filters { + volumes, volumeAttachments, err := filterVolumes(a.storage, filter) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + details, err := createVolumeDetailsList( + a.storage, volumes, volumeAttachments, + ) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + results.Results[i].Result = details + } + return results, nil } func filterVolumes( @@ -419,60 +435,27 @@ return volumes, volumeAttachments, nil } -func createVolumeDetailsResults( +func createVolumeDetailsList( st storageAccess, volumes []state.Volume, attachments map[names.VolumeTag][]state.VolumeAttachment, -) []params.VolumeDetailsResult { +) ([]params.VolumeDetails, error) { if len(volumes) == 0 { - return nil + return nil, nil } - - results := make([]params.VolumeDetailsResult, len(volumes)) + results := make([]params.VolumeDetails, len(volumes)) for i, v := range volumes { details, err := createVolumeDetails(st, v, attachments[v.VolumeTag()]) if err != nil { - results[i].Error = common.ServerError(err) - continue - } - result := params.VolumeDetailsResult{ - Details: details, - } - - // We need to populate the legacy fields for old clients. - if len(details.MachineAttachments) > 0 { - result.LegacyAttachments = make([]params.VolumeAttachment, 0, len(details.MachineAttachments)) - for machineTag, attachmentInfo := range details.MachineAttachments { - result.LegacyAttachments = append(result.LegacyAttachments, params.VolumeAttachment{ - VolumeTag: details.VolumeTag, - MachineTag: machineTag, - Info: attachmentInfo, - }) - } - } - result.LegacyVolume = ¶ms.LegacyVolumeDetails{ - VolumeTag: details.VolumeTag, - VolumeId: details.Info.VolumeId, - HardwareId: details.Info.HardwareId, - Size: details.Info.Size, - Persistent: details.Info.Persistent, - Status: details.Status, - } - if details.Storage != nil { - result.LegacyVolume.StorageTag = details.Storage.StorageTag - kind, err := names.TagKind(details.Storage.OwnerTag) - if err != nil { - results[i].Error = common.ServerError(err) - continue - } - if kind == names.UnitTagKind { - result.LegacyVolume.UnitTag = details.Storage.OwnerTag - } - } - results[i] = result + return nil, errors.Annotatef( + err, "getting details for %s", + names.ReadableString(v.VolumeTag()), + ) + } + results[i] = *details } - return results + return results, nil } func createVolumeDetails( @@ -484,7 +467,7 @@ } if info, err := v.Info(); err == nil { - details.Info = common.VolumeInfoFromState(info) + details.Info = storagecommon.VolumeInfoFromState(info) } if len(attachments) > 0 { @@ -493,7 +476,7 @@ stateInfo, err := attachment.Info() var info params.VolumeAttachmentInfo if err == nil { - info = common.VolumeAttachmentInfoFromState(stateInfo) + info = storagecommon.VolumeAttachmentInfoFromState(stateInfo) } details.MachineAttachments[attachment.Machine().String()] = info } @@ -520,6 +503,154 @@ return details, nil } +// ListFilesystems returns a list of filesystems in the environment matching +// the provided filter. Each result describes a filesystem in detail, including +// the filesystem's attachments. +func (a *API) ListFilesystems(filters params.FilesystemFilters) (params.FilesystemDetailsListResults, error) { + results := params.FilesystemDetailsListResults{ + Results: make([]params.FilesystemDetailsListResult, len(filters.Filters)), + } + for i, filter := range filters.Filters { + filesystems, filesystemAttachments, err := filterFilesystems(a.storage, filter) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + details, err := createFilesystemDetailsList( + a.storage, filesystems, filesystemAttachments, + ) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + results.Results[i].Result = details + } + return results, nil +} + +func filterFilesystems( + st storageAccess, + f params.FilesystemFilter, +) ([]state.Filesystem, map[names.FilesystemTag][]state.FilesystemAttachment, error) { + if f.IsEmpty() { + // No filter was specified: get all filesystems, and all attachments. + filesystems, err := st.AllFilesystems() + if err != nil { + return nil, nil, errors.Trace(err) + } + filesystemAttachments := make(map[names.FilesystemTag][]state.FilesystemAttachment) + for _, f := range filesystems { + attachments, err := st.FilesystemAttachments(f.FilesystemTag()) + if err != nil { + return nil, nil, errors.Trace(err) + } + filesystemAttachments[f.FilesystemTag()] = attachments + } + return filesystems, filesystemAttachments, nil + } + filesystemsByTag := make(map[names.FilesystemTag]state.Filesystem) + filesystemAttachments := make(map[names.FilesystemTag][]state.FilesystemAttachment) + for _, machine := range f.Machines { + machineTag, err := names.ParseMachineTag(machine) + if err != nil { + return nil, nil, errors.Trace(err) + } + attachments, err := st.MachineFilesystemAttachments(machineTag) + if err != nil { + return nil, nil, errors.Trace(err) + } + for _, attachment := range attachments { + filesystemTag := attachment.Filesystem() + filesystemsByTag[filesystemTag] = nil + filesystemAttachments[filesystemTag] = append(filesystemAttachments[filesystemTag], attachment) + } + } + for filesystemTag := range filesystemsByTag { + filesystem, err := st.Filesystem(filesystemTag) + if err != nil { + return nil, nil, errors.Trace(err) + } + filesystemsByTag[filesystemTag] = filesystem + } + filesystems := make([]state.Filesystem, 0, len(filesystemsByTag)) + for _, filesystem := range filesystemsByTag { + filesystems = append(filesystems, filesystem) + } + return filesystems, filesystemAttachments, nil +} + +func createFilesystemDetailsList( + st storageAccess, + filesystems []state.Filesystem, + attachments map[names.FilesystemTag][]state.FilesystemAttachment, +) ([]params.FilesystemDetails, error) { + + if len(filesystems) == 0 { + return nil, nil + } + results := make([]params.FilesystemDetails, len(filesystems)) + for i, f := range filesystems { + details, err := createFilesystemDetails(st, f, attachments[f.FilesystemTag()]) + if err != nil { + return nil, errors.Annotatef( + err, "getting details for %s", + names.ReadableString(f.FilesystemTag()), + ) + } + results[i] = *details + } + return results, nil +} + +func createFilesystemDetails( + st storageAccess, f state.Filesystem, attachments []state.FilesystemAttachment, +) (*params.FilesystemDetails, error) { + + details := ¶ms.FilesystemDetails{ + FilesystemTag: f.FilesystemTag().String(), + } + + if volumeTag, err := f.Volume(); err == nil { + details.VolumeTag = volumeTag.String() + } + + if info, err := f.Info(); err == nil { + details.Info = storagecommon.FilesystemInfoFromState(info) + } + + if len(attachments) > 0 { + details.MachineAttachments = make(map[string]params.FilesystemAttachmentInfo, len(attachments)) + for _, attachment := range attachments { + stateInfo, err := attachment.Info() + var info params.FilesystemAttachmentInfo + if err == nil { + info = storagecommon.FilesystemAttachmentInfoFromState(stateInfo) + } + details.MachineAttachments[attachment.Machine().String()] = info + } + } + + status, err := f.Status() + if err != nil { + return nil, errors.Trace(err) + } + details.Status = common.EntityStatusFromState(status) + + if storageTag, err := f.Storage(); err == nil { + storageInstance, err := st.StorageInstance(storageTag) + if err != nil { + return nil, errors.Trace(err) + } + storageDetails, err := createStorageDetails(st, storageInstance) + if err != nil { + return nil, errors.Trace(err) + } + details.Storage = storageDetails + } + + return details, nil +} + // AddToUnit validates and creates additional storage instances for units. // This method handles bulk add operations and // a failure on one individual storage instance does not block remaining === modified file 'src/github.com/juju/juju/apiserver/storage/storage_test.go' --- src/github.com/juju/juju/apiserver/storage/storage_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/storage/storage_test.go 2016-03-22 15:18:22 +0000 @@ -27,18 +27,25 @@ return []state.StorageInstance{}, nil } - found, err := s.api.List() + found, err := s.api.ListStorageDetails( + params.StorageFilters{[]params.StorageFilter{{}}}, + ) c.Assert(err, jc.ErrorIsNil) - c.Assert(found.Results, gc.HasLen, 0) + c.Assert(found.Results, gc.HasLen, 1) + c.Assert(found.Results[0].Error, gc.IsNil) + c.Assert(found.Results[0].Result, gc.HasLen, 0) s.assertCalls(c, []string{allStorageInstancesCall}) } func (s *storageSuite) TestStorageListFilesystem(c *gc.C) { - found, err := s.api.List() + found, err := s.api.ListStorageDetails( + params.StorageFilters{[]params.StorageFilter{{}}}, + ) c.Assert(err, jc.ErrorIsNil) expectedCalls := []string{ allStorageInstancesCall, + storageInstanceFilesystemCall, storageInstanceAttachmentsCall, unitAssignedMachineCall, storageInstanceCall, @@ -48,16 +55,17 @@ s.assertCalls(c, expectedCalls) c.Assert(found.Results, gc.HasLen, 1) - wantedDetails := s.createTestStorageDetailsResult() - - c.Assert(found.Results[0].Result.Status.Since, gc.NotNil) - found.Results[0].Result.Status.Since = nil - s.assertInstanceInfoError(c, found.Results[0], wantedDetails, "") + c.Assert(found.Results[0].Error, gc.IsNil) + c.Assert(found.Results[0].Result, gc.HasLen, 1) + wantedDetails := s.createTestStorageDetails() + c.Assert(found.Results[0].Result[0], jc.DeepEquals, wantedDetails) } func (s *storageSuite) TestStorageListVolume(c *gc.C) { s.storageInstance.kind = state.StorageKindBlock - found, err := s.api.List() + found, err := s.api.ListStorageDetails( + params.StorageFilters{[]params.StorageFilter{{}}}, + ) c.Assert(err, jc.ErrorIsNil) expectedCalls := []string{ @@ -71,12 +79,12 @@ s.assertCalls(c, expectedCalls) c.Assert(found.Results, gc.HasLen, 1) - wantedDetails := s.createTestStorageDetailsResult() - wantedDetails.Result.Kind = params.StorageKindBlock - wantedDetails.Result.Status.Status = params.StatusAttached - wantedDetails.Legacy.Kind = params.StorageKindBlock - wantedDetails.Legacy.Status = "attached" - s.assertInstanceInfoError(c, found.Results[0], wantedDetails, "") + c.Assert(found.Results[0].Error, gc.IsNil) + c.Assert(found.Results[0].Result, gc.HasLen, 1) + wantedDetails := s.createTestStorageDetails() + wantedDetails.Kind = params.StorageKindBlock + wantedDetails.Status.Status = params.StatusAttached + c.Assert(found.Results[0].Result[0], jc.DeepEquals, wantedDetails) } func (s *storageSuite) TestStorageListError(c *gc.C) { @@ -86,14 +94,15 @@ return []state.StorageInstance{}, errors.Errorf(msg) } - found, err := s.api.List() - c.Assert(errors.Cause(err), gc.ErrorMatches, msg) + found, err := s.api.ListStorageDetails( + params.StorageFilters{[]params.StorageFilter{{}}}, + ) + c.Assert(err, jc.ErrorIsNil) + c.Assert(found.Results, gc.HasLen, 1) + c.Assert(found.Results[0].Error, gc.ErrorMatches, msg) - expectedCalls := []string{ - allStorageInstancesCall, - } + expectedCalls := []string{allStorageInstancesCall} s.assertCalls(c, expectedCalls) - c.Assert(found.Results, gc.HasLen, 0) } func (s *storageSuite) TestStorageListInstanceError(c *gc.C) { @@ -104,20 +113,23 @@ return nil, errors.Errorf(msg) } - found, err := s.api.List() + found, err := s.api.ListStorageDetails( + params.StorageFilters{[]params.StorageFilter{{}}}, + ) c.Assert(err, jc.ErrorIsNil) expectedCalls := []string{ allStorageInstancesCall, + storageInstanceFilesystemCall, storageInstanceAttachmentsCall, unitAssignedMachineCall, storageInstanceCall, } s.assertCalls(c, expectedCalls) c.Assert(found.Results, gc.HasLen, 1) - wanted := s.createTestStorageDetailsResultWithError("", - fmt.Sprintf("getting storage attachment info: getting storage instance: %v", msg)) - s.assertInstanceInfoError(c, found.Results[0], wanted, msg) + c.Assert(found.Results[0].Error, gc.ErrorMatches, + fmt.Sprintf("getting details for storage data/0: getting storage instance: %v", msg), + ) } func (s *storageSuite) TestStorageListAttachmentError(c *gc.C) { @@ -127,18 +139,20 @@ return []state.StorageAttachment{}, errors.Errorf("list test error") } - found, err := s.api.List() + found, err := s.api.ListStorageDetails( + params.StorageFilters{[]params.StorageFilter{{}}}, + ) c.Assert(err, jc.ErrorIsNil) expectedCalls := []string{ allStorageInstancesCall, + storageInstanceFilesystemCall, storageInstanceAttachmentsCall, } s.assertCalls(c, expectedCalls) c.Assert(found.Results, gc.HasLen, 1) - expectedErr := "list test error" - wanted := s.createTestStorageDetailsResultWithError("", expectedErr) - s.assertInstanceInfoError(c, found.Results[0], wanted, expectedErr) + c.Assert(found.Results[0].Error, gc.ErrorMatches, + "getting details for storage data/0: list test error") } func (s *storageSuite) TestStorageListMachineError(c *gc.C) { @@ -149,19 +163,22 @@ return names.MachineTag{}, errors.Errorf(msg) } - found, err := s.api.List() + found, err := s.api.ListStorageDetails( + params.StorageFilters{[]params.StorageFilter{{}}}, + ) c.Assert(err, jc.ErrorIsNil) expectedCalls := []string{ allStorageInstancesCall, + storageInstanceFilesystemCall, storageInstanceAttachmentsCall, unitAssignedMachineCall, } s.assertCalls(c, expectedCalls) c.Assert(found.Results, gc.HasLen, 1) - wanted := s.createTestStorageDetailsResultWithError("", - fmt.Sprintf("getting unit for storage attachment: %v", msg)) - s.assertInstanceInfoError(c, found.Results[0], wanted, msg) + c.Assert(found.Results[0].Error, gc.ErrorMatches, + fmt.Sprintf("getting details for storage data/0: %v", msg), + ) } func (s *storageSuite) TestStorageListFilesystemError(c *gc.C) { @@ -172,21 +189,20 @@ return nil, errors.Errorf(msg) } - found, err := s.api.List() + found, err := s.api.ListStorageDetails( + params.StorageFilters{[]params.StorageFilter{{}}}, + ) c.Assert(err, jc.ErrorIsNil) expectedCalls := []string{ allStorageInstancesCall, - storageInstanceAttachmentsCall, - unitAssignedMachineCall, - storageInstanceCall, storageInstanceFilesystemCall, } s.assertCalls(c, expectedCalls) c.Assert(found.Results, gc.HasLen, 1) - wanted := s.createTestStorageDetailsResultWithError("", - fmt.Sprintf("getting storage attachment info: getting filesystem: %v", msg)) - s.assertInstanceInfoError(c, found.Results[0], wanted, msg) + c.Assert(found.Results[0].Error, gc.ErrorMatches, + fmt.Sprintf("getting details for storage data/0: %v", msg), + ) } func (s *storageSuite) TestStorageListFilesystemAttachmentError(c *gc.C) { @@ -197,54 +213,40 @@ return s.machineTag, errors.Errorf(msg) } - found, err := s.api.List() + found, err := s.api.ListStorageDetails( + params.StorageFilters{[]params.StorageFilter{{}}}, + ) c.Assert(err, jc.ErrorIsNil) expectedCalls := []string{ allStorageInstancesCall, + storageInstanceFilesystemCall, storageInstanceAttachmentsCall, unitAssignedMachineCall, } s.assertCalls(c, expectedCalls) c.Assert(found.Results, gc.HasLen, 1) - wanted := s.createTestStorageDetailsResultWithError("", - fmt.Sprintf("getting unit for storage attachment: %v", msg)) - s.assertInstanceInfoError(c, found.Results[0], wanted, msg) -} - -func (s *storageSuite) createTestStorageDetailsResultWithError(code, msg string) params.StorageDetailsResult { - wanted := s.createTestStorageDetailsResult() - wanted.Error = ¶ms.Error{Code: code, - Message: fmt.Sprintf("getting attachments for storage data/0: %v", msg)} - return wanted -} - -func (s *storageSuite) createTestStorageDetailsResult() params.StorageDetailsResult { - return params.StorageDetailsResult{ - ¶ms.StorageDetails{ - StorageTag: s.storageTag.String(), - OwnerTag: s.unitTag.String(), - Kind: params.StorageKindFilesystem, - Status: params.EntityStatus{ - Status: "pending", - }, - Attachments: map[string]params.StorageAttachmentDetails{ - s.unitTag.String(): params.StorageAttachmentDetails{ - s.storageTag.String(), - s.unitTag.String(), - s.machineTag.String(), - "", // location - }, - }, - }, - params.LegacyStorageDetails{ - StorageTag: s.storageTag.String(), - OwnerTag: s.unitTag.String(), - UnitTag: s.unitTag.String(), - Kind: params.StorageKindFilesystem, - Status: "pending", - }, - nil, + c.Assert(found.Results[0].Error, gc.ErrorMatches, + fmt.Sprintf("getting details for storage data/0: %v", msg), + ) +} + +func (s *storageSuite) createTestStorageDetails() params.StorageDetails { + return params.StorageDetails{ + StorageTag: s.storageTag.String(), + OwnerTag: s.unitTag.String(), + Kind: params.StorageKindFilesystem, + Status: params.EntityStatus{ + Status: "attached", + }, + Attachments: map[string]params.StorageAttachmentDetails{ + s.unitTag.String(): params.StorageAttachmentDetails{ + s.storageTag.String(), + s.unitTag.String(), + s.machineTag.String(), + "", // location + }, + }, } } @@ -252,7 +254,6 @@ if expected != "" { c.Assert(errors.Cause(obtained.Error), gc.ErrorMatches, fmt.Sprintf(".*%v.*", expected)) c.Assert(obtained.Result, gc.IsNil) - c.Assert(obtained.Legacy, jc.DeepEquals, params.LegacyStorageDetails{}) } else { c.Assert(obtained.Error, gc.IsNil) c.Assert(obtained, jc.DeepEquals, wanted) @@ -260,23 +261,27 @@ } func (s *storageSuite) TestShowStorageEmpty(c *gc.C) { - found, err := s.api.Show(params.Entities{}) + found, err := s.api.StorageDetails(params.Entities{}) c.Assert(err, jc.ErrorIsNil) - // Nothing should have matched the filter :D c.Assert(found.Results, gc.HasLen, 0) } -func (s *storageSuite) TestShowStorageNoFilter(c *gc.C) { - found, err := s.api.Show(params.Entities{Entities: []params.Entity{}}) +func (s *storageSuite) TestShowStorageInvalidTag(c *gc.C) { + // Only storage tags are permitted + found, err := s.api.StorageDetails(params.Entities{ + Entities: []params.Entity{{Tag: "machine-1"}}, + }) c.Assert(err, jc.ErrorIsNil) - // Nothing should have matched the filter :D - c.Assert(found.Results, gc.HasLen, 0) + c.Assert(found.Results, gc.HasLen, 1) + c.Assert(found.Results[0].Error, gc.ErrorMatches, `"machine-1" is not a valid storage tag`) } func (s *storageSuite) TestShowStorage(c *gc.C) { entity := params.Entity{Tag: s.storageTag.String()} - found, err := s.api.Show(params.Entities{Entities: []params.Entity{entity}}) + found, err := s.api.StorageDetails( + params.Entities{Entities: []params.Entity{entity}}, + ) c.Assert(err, jc.ErrorIsNil) c.Assert(found.Results, gc.HasLen, 1) @@ -288,7 +293,7 @@ OwnerTag: s.unitTag.String(), Kind: params.StorageKindFilesystem, Status: params.EntityStatus{ - Status: "pending", + Status: "attached", }, Attachments: map[string]params.StorageAttachmentDetails{ s.unitTag.String(): params.StorageAttachmentDetails{ @@ -299,8 +304,6 @@ }, }, } - c.Assert(one.Result.Status.Since, gc.NotNil) - one.Result.Status.Since = nil c.Assert(one.Result, jc.DeepEquals, &expected) } @@ -308,7 +311,7 @@ storageTag := "foo" entity := params.Entity{Tag: storageTag} - found, err := s.api.Show(params.Entities{Entities: []params.Entity{entity}}) + found, err := s.api.StorageDetails(params.Entities{Entities: []params.Entity{entity}}) c.Assert(err, jc.ErrorIsNil) c.Assert(found.Results, gc.HasLen, 1) s.assertInstanceInfoError(c, found.Results[0], params.StorageDetailsResult{}, `"foo" is not a valid tag`) === modified file 'src/github.com/juju/juju/apiserver/storage/volumelist_test.go' --- src/github.com/juju/juju/apiserver/storage/volumelist_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/storage/volumelist_test.go 2016-03-22 15:18:22 +0000 @@ -21,60 +21,46 @@ var _ = gc.Suite(&volumeSuite{}) -func (s *volumeSuite) expectedVolumeDetailsResult() params.VolumeDetailsResult { - return params.VolumeDetailsResult{ - Details: ¶ms.VolumeDetails{ - VolumeTag: s.volumeTag.String(), - Status: params.EntityStatus{ - Status: "attached", - }, - MachineAttachments: map[string]params.VolumeAttachmentInfo{ - s.machineTag.String(): params.VolumeAttachmentInfo{}, - }, - Storage: ¶ms.StorageDetails{ - StorageTag: "storage-data-0", - OwnerTag: "unit-mysql-0", - Kind: params.StorageKindFilesystem, - Status: params.EntityStatus{ - Status: "pending", - }, - Attachments: map[string]params.StorageAttachmentDetails{ - "unit-mysql-0": params.StorageAttachmentDetails{ - StorageTag: "storage-data-0", - UnitTag: "unit-mysql-0", - MachineTag: "machine-66", - }, - }, - }, - }, - LegacyVolume: ¶ms.LegacyVolumeDetails{ - VolumeTag: s.volumeTag.String(), +func (s *volumeSuite) expectedVolumeDetails() params.VolumeDetails { + return params.VolumeDetails{ + VolumeTag: s.volumeTag.String(), + Status: params.EntityStatus{ + Status: "attached", + }, + MachineAttachments: map[string]params.VolumeAttachmentInfo{ + s.machineTag.String(): params.VolumeAttachmentInfo{}, + }, + Storage: ¶ms.StorageDetails{ StorageTag: "storage-data-0", - UnitTag: "unit-mysql-0", + OwnerTag: "unit-mysql-0", + Kind: params.StorageKindFilesystem, Status: params.EntityStatus{ Status: "attached", }, + Attachments: map[string]params.StorageAttachmentDetails{ + "unit-mysql-0": params.StorageAttachmentDetails{ + StorageTag: "storage-data-0", + UnitTag: "unit-mysql-0", + MachineTag: "machine-66", + }, + }, }, - LegacyAttachments: []params.VolumeAttachment{{ - VolumeTag: s.volumeTag.String(), - MachineTag: s.machineTag.String(), - }}, } } -// TODO(axw) drop this in 1.26. This exists only because we don't have -// Filesystem.Status, and so we use time.Now() to get Status.Since. -func (s *volumeSuite) assertAndClearStorageStatus(c *gc.C, details *params.VolumeDetails) { - c.Assert(details.Storage.Status.Since, gc.NotNil) - details.Storage.Status.Since = nil +func (s *volumeSuite) TestListVolumesNoFilters(c *gc.C) { + found, err := s.api.ListVolumes(params.VolumeFilters{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(found.Results, gc.HasLen, 0) } func (s *volumeSuite) TestListVolumesEmptyFilter(c *gc.C) { - found, err := s.api.ListVolumes(params.VolumeFilter{}) + found, err := s.api.ListVolumes(params.VolumeFilters{[]params.VolumeFilter{{}}}) c.Assert(err, jc.ErrorIsNil) c.Assert(found.Results, gc.HasLen, 1) - s.assertAndClearStorageStatus(c, found.Results[0].Details) - c.Assert(found.Results[0], gc.DeepEquals, s.expectedVolumeDetailsResult()) + c.Assert(found.Results[0].Error, gc.IsNil) + c.Assert(found.Results[0].Result, gc.HasLen, 1) + c.Assert(found.Results[0].Result[0], gc.DeepEquals, s.expectedVolumeDetails()) } func (s *volumeSuite) TestListVolumesError(c *gc.C) { @@ -82,37 +68,44 @@ s.state.allVolumes = func() ([]state.Volume, error) { return nil, errors.New(msg) } - _, err := s.api.ListVolumes(params.VolumeFilter{}) - c.Assert(err, gc.ErrorMatches, msg) + results, err := s.api.ListVolumes(params.VolumeFilters{[]params.VolumeFilter{{}}}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Error, gc.ErrorMatches, msg) } func (s *volumeSuite) TestListVolumesNoVolumes(c *gc.C) { s.state.allVolumes = func() ([]state.Volume, error) { return nil, nil } - results, err := s.api.ListVolumes(params.VolumeFilter{}) + results, err := s.api.ListVolumes(params.VolumeFilters{[]params.VolumeFilter{{}}}) c.Assert(err, jc.ErrorIsNil) - c.Assert(results.Results, gc.HasLen, 0) + c.Assert(results.Results, gc.HasLen, 1) + c.Assert(results.Results[0].Result, gc.HasLen, 0) + c.Assert(results.Results[0].Error, gc.IsNil) } func (s *volumeSuite) TestListVolumesFilter(c *gc.C) { - filter := params.VolumeFilter{ + filters := []params.VolumeFilter{{ Machines: []string{s.machineTag.String()}, - } - found, err := s.api.ListVolumes(filter) + }} + found, err := s.api.ListVolumes(params.VolumeFilters{filters}) c.Assert(err, jc.ErrorIsNil) c.Assert(found.Results, gc.HasLen, 1) - s.assertAndClearStorageStatus(c, found.Results[0].Details) - c.Assert(found.Results[0], jc.DeepEquals, s.expectedVolumeDetailsResult()) + c.Assert(found.Results[0].Result, gc.HasLen, 1) + c.Assert(found.Results[0].Error, gc.IsNil) + c.Assert(found.Results[0].Result[0], jc.DeepEquals, s.expectedVolumeDetails()) } func (s *volumeSuite) TestListVolumesFilterNonMatching(c *gc.C) { - filter := params.VolumeFilter{ + filters := []params.VolumeFilter{{ Machines: []string{"machine-42"}, - } - found, err := s.api.ListVolumes(filter) + }} + found, err := s.api.ListVolumes(params.VolumeFilters{filters}) c.Assert(err, jc.ErrorIsNil) - c.Assert(found.Results, gc.HasLen, 0) + c.Assert(found.Results, gc.HasLen, 1) + c.Assert(found.Results[0].Result, gc.HasLen, 0) + c.Assert(found.Results[0].Error, gc.IsNil) } func (s *volumeSuite) TestListVolumesVolumeInfo(c *gc.C) { @@ -121,18 +114,15 @@ HardwareId: "abc", Persistent: true, } - expected := s.expectedVolumeDetailsResult() - expected.Details.Info.Size = 123 - expected.Details.Info.HardwareId = "abc" - expected.Details.Info.Persistent = true - expected.LegacyVolume.Size = 123 - expected.LegacyVolume.HardwareId = "abc" - expected.LegacyVolume.Persistent = true - found, err := s.api.ListVolumes(params.VolumeFilter{}) + expected := s.expectedVolumeDetails() + expected.Info.Size = 123 + expected.Info.HardwareId = "abc" + expected.Info.Persistent = true + found, err := s.api.ListVolumes(params.VolumeFilters{[]params.VolumeFilter{{}}}) c.Assert(err, jc.ErrorIsNil) c.Assert(found.Results, gc.HasLen, 1) - s.assertAndClearStorageStatus(c, found.Results[0].Details) - c.Assert(found.Results[0], jc.DeepEquals, expected) + c.Assert(found.Results[0].Result, gc.HasLen, 1) + c.Assert(found.Results[0].Result[0], jc.DeepEquals, expected) } func (s *volumeSuite) TestListVolumesAttachmentInfo(c *gc.C) { @@ -140,20 +130,16 @@ DeviceName: "xvdf1", ReadOnly: true, } - expected := s.expectedVolumeDetailsResult() - expected.Details.MachineAttachments[s.machineTag.String()] = params.VolumeAttachmentInfo{ - DeviceName: "xvdf1", - ReadOnly: true, - } - expected.LegacyAttachments[0].Info = params.VolumeAttachmentInfo{ - DeviceName: "xvdf1", - ReadOnly: true, - } - found, err := s.api.ListVolumes(params.VolumeFilter{}) + expected := s.expectedVolumeDetails() + expected.MachineAttachments[s.machineTag.String()] = params.VolumeAttachmentInfo{ + DeviceName: "xvdf1", + ReadOnly: true, + } + found, err := s.api.ListVolumes(params.VolumeFilters{[]params.VolumeFilter{{}}}) c.Assert(err, jc.ErrorIsNil) c.Assert(found.Results, gc.HasLen, 1) - s.assertAndClearStorageStatus(c, found.Results[0].Details) - c.Assert(found.Results[0], jc.DeepEquals, expected) + c.Assert(found.Results[0].Result, gc.HasLen, 1) + c.Assert(found.Results[0].Result[0], jc.DeepEquals, expected) } func (s *volumeSuite) TestListVolumesStorageLocationNoBlockDevice(c *gc.C) { @@ -162,19 +148,17 @@ s.volumeAttachment.info = &state.VolumeAttachmentInfo{ ReadOnly: true, } - expected := s.expectedVolumeDetailsResult() - expected.Details.Storage.Kind = params.StorageKindBlock - expected.Details.Storage.Status.Status = params.StatusAttached - expected.Details.MachineAttachments[s.machineTag.String()] = params.VolumeAttachmentInfo{ - ReadOnly: true, - } - expected.LegacyAttachments[0].Info = params.VolumeAttachmentInfo{ - ReadOnly: true, - } - found, err := s.api.ListVolumes(params.VolumeFilter{}) + expected := s.expectedVolumeDetails() + expected.Storage.Kind = params.StorageKindBlock + expected.Storage.Status.Status = params.StatusAttached + expected.MachineAttachments[s.machineTag.String()] = params.VolumeAttachmentInfo{ + ReadOnly: true, + } + found, err := s.api.ListVolumes(params.VolumeFilters{[]params.VolumeFilter{{}}}) c.Assert(err, jc.ErrorIsNil) c.Assert(found.Results, gc.HasLen, 1) - c.Assert(found.Results[0], jc.DeepEquals, expected) + c.Assert(found.Results[0].Result, gc.HasLen, 1) + c.Assert(found.Results[0].Result[0], jc.DeepEquals, expected) } func (s *volumeSuite) TestListVolumesStorageLocationBlockDevicePath(c *gc.C) { @@ -190,22 +174,18 @@ BusAddress: "bus-addr", ReadOnly: true, } - expected := s.expectedVolumeDetailsResult() - expected.Details.Storage.Kind = params.StorageKindBlock - expected.Details.Storage.Status.Status = params.StatusAttached - storageAttachmentDetails := expected.Details.Storage.Attachments["unit-mysql-0"] + expected := s.expectedVolumeDetails() + expected.Storage.Kind = params.StorageKindBlock + expected.Storage.Status.Status = params.StatusAttached + storageAttachmentDetails := expected.Storage.Attachments["unit-mysql-0"] storageAttachmentDetails.Location = filepath.FromSlash("/dev/sdd") - expected.Details.Storage.Attachments["unit-mysql-0"] = storageAttachmentDetails - expected.Details.MachineAttachments[s.machineTag.String()] = params.VolumeAttachmentInfo{ - BusAddress: "bus-addr", - ReadOnly: true, - } - expected.LegacyAttachments[0].Info = params.VolumeAttachmentInfo{ - BusAddress: "bus-addr", - ReadOnly: true, - } - found, err := s.api.ListVolumes(params.VolumeFilter{}) + expected.Storage.Attachments["unit-mysql-0"] = storageAttachmentDetails + expected.MachineAttachments[s.machineTag.String()] = params.VolumeAttachmentInfo{ + BusAddress: "bus-addr", + ReadOnly: true, + } + found, err := s.api.ListVolumes(params.VolumeFilters{[]params.VolumeFilter{{}}}) c.Assert(err, jc.ErrorIsNil) c.Assert(found.Results, gc.HasLen, 1) - c.Assert(found.Results[0], jc.DeepEquals, expected) + c.Assert(found.Results[0].Result[0], jc.DeepEquals, expected) } === modified file 'src/github.com/juju/juju/apiserver/storageprovisioner/state.go' --- src/github.com/juju/juju/apiserver/storageprovisioner/state.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/storageprovisioner/state.go 2016-03-22 15:18:22 +0000 @@ -13,18 +13,18 @@ type provisionerState interface { state.EntityFinder - state.EnvironAccessor + state.ModelAccessor MachineInstanceId(names.MachineTag) (instance.Id, error) BlockDevices(names.MachineTag) ([]state.BlockDeviceInfo, error) WatchBlockDevices(names.MachineTag) state.NotifyWatcher WatchMachine(names.MachineTag) (state.NotifyWatcher, error) - WatchEnvironFilesystems() state.StringsWatcher + WatchModelFilesystems() state.StringsWatcher WatchEnvironFilesystemAttachments() state.StringsWatcher WatchMachineFilesystems(names.MachineTag) state.StringsWatcher WatchMachineFilesystemAttachments(names.MachineTag) state.StringsWatcher - WatchEnvironVolumes() state.StringsWatcher + WatchModelVolumes() state.StringsWatcher WatchEnvironVolumeAttachments() state.StringsWatcher WatchMachineVolumes(names.MachineTag) state.StringsWatcher WatchMachineVolumeAttachments(names.MachineTag) state.StringsWatcher === modified file 'src/github.com/juju/juju/apiserver/storageprovisioner/storageprovisioner.go' --- src/github.com/juju/juju/apiserver/storageprovisioner/storageprovisioner.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/storageprovisioner/storageprovisioner.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,7 @@ "github.com/juju/names" "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/common/storagecommon" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" "github.com/juju/juju/state/watcher" @@ -19,14 +20,14 @@ var logger = loggo.GetLogger("juju.apiserver.storageprovisioner") func init() { - common.RegisterStandardFacade("StorageProvisioner", 1, NewStorageProvisionerAPI) + common.RegisterStandardFacade("StorageProvisioner", 2, NewStorageProvisionerAPI) } // StorageProvisionerAPI provides access to the Provisioner API facade. type StorageProvisionerAPI struct { *common.LifeGetter *common.DeadEnsurer - *common.EnvironWatcher + *common.ModelWatcher *common.InstanceIdGetter *common.StatusSetter @@ -63,7 +64,7 @@ } parentId := state.ParentId(tag.Id()) if parentId == "" { - return allowEnvironManager && authorizer.AuthEnvironManager() + return allowEnvironManager && authorizer.AuthModelManager() } // All containers with the authenticated // machine as a parent are accessible by it. @@ -72,11 +73,11 @@ getScopeAuthFunc := func() (common.AuthFunc, error) { return func(tag names.Tag) bool { switch tag := tag.(type) { - case names.EnvironTag: + case names.ModelTag: // Environment managers can access all volumes // and filesystems scoped to the environment. - isEnvironManager := authorizer.AuthEnvironManager() - return isEnvironManager && tag == st.EnvironTag() + isModelManager := authorizer.AuthModelManager() + return isModelManager && tag == st.ModelTag() case names.MachineTag: return canAccessStorageMachine(tag, false) default: @@ -91,13 +92,13 @@ if ok { return canAccessStorageMachine(machineTag, false) } - return authorizer.AuthEnvironManager() + return authorizer.AuthModelManager() case names.FilesystemTag: machineTag, ok := names.FilesystemMachine(tag) if ok { return canAccessStorageMachine(machineTag, false) } - return authorizer.AuthEnvironManager() + return authorizer.AuthModelManager() case names.MachineTag: return allowMachines && canAccessStorageMachine(tag, true) default: @@ -124,11 +125,11 @@ if !canAccessStorageMachine(machineTag, true) { return false } - // Environment managers can access environment-scoped + // Environment managers can access model-scoped // volumes and volumes scoped to their own machines. // Other machine agents can access volumes regardless // of their scope. - if !authorizer.AuthEnvironManager() { + if !authorizer.AuthModelManager() { return true } var machineScope names.MachineTag @@ -163,7 +164,7 @@ return &StorageProvisionerAPI{ LifeGetter: common.NewLifeGetter(stateInterface, getLifeAuthFunc), DeadEnsurer: common.NewDeadEnsurer(stateInterface, getStorageEntityAuthFunc), - EnvironWatcher: common.NewEnvironWatcher(stateInterface, resources, authorizer), + ModelWatcher: common.NewModelWatcher(stateInterface, resources, authorizer), InstanceIdGetter: common.NewInstanceIdGetter(st, getMachineAuthFunc), StatusSetter: common.NewStatusSetter(st, getStorageEntityAuthFunc), @@ -257,13 +258,13 @@ // WatchVolumes watches for changes to volumes scoped to the // entity with the tag passed to NewState. func (s *StorageProvisionerAPI) WatchVolumes(args params.Entities) (params.StringsWatchResults, error) { - return s.watchStorageEntities(args, s.st.WatchEnvironVolumes, s.st.WatchMachineVolumes) + return s.watchStorageEntities(args, s.st.WatchModelVolumes, s.st.WatchMachineVolumes) } // WatchFilesystems watches for changes to filesystems scoped // to the entity with the tag passed to NewState. func (s *StorageProvisionerAPI) WatchFilesystems(args params.Entities) (params.StringsWatchResults, error) { - return s.watchStorageEntities(args, s.st.WatchEnvironFilesystems, s.st.WatchMachineFilesystems) + return s.watchStorageEntities(args, s.st.WatchModelFilesystems, s.st.WatchMachineFilesystems) } func (s *StorageProvisionerAPI) watchStorageEntities( @@ -315,7 +316,7 @@ args, s.st.WatchEnvironVolumeAttachments, s.st.WatchMachineVolumeAttachments, - common.ParseVolumeAttachmentIds, + storagecommon.ParseVolumeAttachmentIds, ) } @@ -326,7 +327,7 @@ args, s.st.WatchEnvironFilesystemAttachments, s.st.WatchMachineFilesystemAttachments, - common.ParseFilesystemAttachmentIds, + storagecommon.ParseFilesystemAttachmentIds, ) } @@ -398,7 +399,7 @@ } else if err != nil { return params.Volume{}, err } - return common.VolumeFromState(volume) + return storagecommon.VolumeFromState(volume) } for i, arg := range args.Entities { var result params.VolumeResult @@ -433,7 +434,7 @@ } else if err != nil { return params.Filesystem{}, err } - return common.FilesystemFromState(filesystem) + return storagecommon.FilesystemFromState(filesystem) } for i, arg := range args.Entities { var result params.FilesystemResult @@ -462,7 +463,7 @@ if err != nil { return params.VolumeAttachment{}, err } - return common.VolumeAttachmentFromState(volumeAttachment) + return storagecommon.VolumeAttachmentFromState(volumeAttachment) } for i, arg := range args.Ids { var result params.VolumeAttachmentResult @@ -492,7 +493,7 @@ if err != nil { return storage.BlockDevice{}, err } - return common.BlockDeviceFromState(stateBlockDevice), nil + return storagecommon.BlockDeviceFromState(stateBlockDevice), nil } for i, arg := range args.Ids { var result params.BlockDeviceResult @@ -521,7 +522,7 @@ if err != nil { return params.FilesystemAttachment{}, err } - return common.FilesystemAttachmentFromState(filesystemAttachment) + return storagecommon.FilesystemAttachmentFromState(filesystemAttachment) } for i, arg := range args.Ids { var result params.FilesystemAttachmentResult @@ -543,7 +544,7 @@ if err != nil { return params.VolumeParamsResults{}, err } - envConfig, err := s.st.EnvironConfig() + envConfig, err := s.st.ModelConfig() if err != nil { return params.VolumeParamsResults{}, err } @@ -566,14 +567,14 @@ if err != nil { return params.VolumeParams{}, err } - storageInstance, err := common.MaybeAssignedStorageInstance( + storageInstance, err := storagecommon.MaybeAssignedStorageInstance( volume.StorageInstance, s.st.StorageInstance, ) if err != nil { return params.VolumeParams{}, err } - volumeParams, err := common.VolumeParams(volume, storageInstance, envConfig, poolManager) + volumeParams, err := storagecommon.VolumeParams(volume, storageInstance, envConfig, poolManager) if err != nil { return params.VolumeParams{}, err } @@ -629,7 +630,7 @@ if err != nil { return params.FilesystemParamsResults{}, err } - envConfig, err := s.st.EnvironConfig() + envConfig, err := s.st.ModelConfig() if err != nil { return params.FilesystemParamsResults{}, err } @@ -648,14 +649,14 @@ } else if err != nil { return params.FilesystemParams{}, err } - storageInstance, err := common.MaybeAssignedStorageInstance( + storageInstance, err := storagecommon.MaybeAssignedStorageInstance( filesystem.Storage, s.st.StorageInstance, ) if err != nil { return params.FilesystemParams{}, err } - filesystemParams, err := common.FilesystemParams( + filesystemParams, err := storagecommon.FilesystemParams( filesystem, storageInstance, envConfig, poolManager, ) if err != nil { @@ -717,7 +718,7 @@ volumeId = volumeInfo.VolumeId pool = volumeInfo.Pool } - providerType, _, err := common.StoragePoolConfig(pool, poolManager) + providerType, _, err := storagecommon.StoragePoolConfig(pool, poolManager) if err != nil { return params.VolumeAttachmentParams{}, errors.Trace(err) } @@ -796,7 +797,7 @@ filesystemId = filesystemInfo.FilesystemId pool = filesystemInfo.Pool } - providerType, _, err := common.StoragePoolConfig(pool, poolManager) + providerType, _, err := storagecommon.StoragePoolConfig(pool, poolManager) if err != nil { return params.FilesystemAttachmentParams{}, errors.Trace(err) } @@ -887,7 +888,7 @@ if err != nil { return state.BlockDeviceInfo{}, err } - blockDevice, ok := common.MatchingBlockDevice( + blockDevice, ok := storagecommon.MatchingBlockDevice( blockDevices, volumeInfo, volumeAttachmentInfo, @@ -935,7 +936,7 @@ Results: make([]params.ErrorResult, len(args.Volumes)), } one := func(arg params.Volume) error { - volumeTag, volumeInfo, err := common.VolumeToState(arg) + volumeTag, volumeInfo, err := storagecommon.VolumeToState(arg) if err != nil { return errors.Trace(err) } else if !canAccessVolume(volumeTag) { @@ -964,7 +965,7 @@ Results: make([]params.ErrorResult, len(args.Filesystems)), } one := func(arg params.Filesystem) error { - filesystemTag, filesystemInfo, err := common.FilesystemToState(arg) + filesystemTag, filesystemInfo, err := storagecommon.FilesystemToState(arg) if err != nil { return errors.Trace(err) } else if !canAccessFilesystem(filesystemTag) { @@ -996,7 +997,7 @@ Results: make([]params.ErrorResult, len(args.VolumeAttachments)), } one := func(arg params.VolumeAttachment) error { - machineTag, volumeTag, volumeAttachmentInfo, err := common.VolumeAttachmentToState(arg) + machineTag, volumeTag, volumeAttachmentInfo, err := storagecommon.VolumeAttachmentToState(arg) if err != nil { return errors.Trace(err) } @@ -1029,7 +1030,7 @@ Results: make([]params.ErrorResult, len(args.FilesystemAttachments)), } one := func(arg params.FilesystemAttachment) error { - machineTag, filesystemTag, filesystemAttachmentInfo, err := common.FilesystemAttachmentToState(arg) + machineTag, filesystemTag, filesystemAttachmentInfo, err := storagecommon.FilesystemAttachmentToState(arg) if err != nil { return errors.Trace(err) } === modified file 'src/github.com/juju/juju/apiserver/storageprovisioner/storageprovisioner_test.go' --- src/github.com/juju/juju/apiserver/storageprovisioner/storageprovisioner_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/storageprovisioner/storageprovisioner_test.go 2016-03-22 15:18:22 +0000 @@ -191,8 +191,8 @@ Persistent: true, }, }}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) } @@ -212,7 +212,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(results, gc.DeepEquals, params.VolumeResults{ Results: []params.VolumeResult{ - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, {Error: common.ServerError(errors.NotProvisionedf(`volume "1"`))}, {Result: params.Volume{ VolumeTag: "volume-2", @@ -222,7 +222,7 @@ Size: 4096, }, }}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) } @@ -248,7 +248,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(results, jc.DeepEquals, params.FilesystemResults{ Results: []params.FilesystemResult{ - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, {Error: common.ServerError(errors.NotProvisionedf(`filesystem "1"`))}, {Result: params.Filesystem{ FilesystemTag: "filesystem-2", @@ -257,7 +257,7 @@ Size: 4096, }, }}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) } @@ -299,7 +299,7 @@ Code: params.CodeNotProvisioned, Message: `volume attachment "2" on "0" not provisioned`, }}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) } @@ -345,7 +345,7 @@ Code: params.CodeNotProvisioned, Message: `filesystem attachment "2" on "0" not provisioned`, }}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) } @@ -368,7 +368,7 @@ Size: 1024, Provider: "machinescoped", Tags: map[string]string{ - tags.JujuEnv: testing.EnvironmentTag.Id(), + tags.JujuModel: testing.ModelTag.Id(), }, Attachment: ¶ms.VolumeAttachmentParams{ MachineTag: "machine-0", @@ -382,7 +382,7 @@ Size: 2048, Provider: "environscoped", Tags: map[string]string{ - tags.JujuEnv: testing.EnvironmentTag.Id(), + tags.JujuModel: testing.ModelTag.Id(), }, Attachment: ¶ms.VolumeAttachmentParams{ MachineTag: "machine-0", @@ -396,7 +396,7 @@ Size: 4096, Provider: "environscoped", Tags: map[string]string{ - tags.JujuEnv: testing.EnvironmentTag.Id(), + tags.JujuModel: testing.ModelTag.Id(), }, Attachment: ¶ms.VolumeAttachmentParams{ MachineTag: "machine-0", @@ -406,7 +406,7 @@ ReadOnly: true, }, }}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) } @@ -430,7 +430,7 @@ Size: 1024, Provider: "machinescoped", Tags: map[string]string{ - tags.JujuEnv: testing.EnvironmentTag.Id(), + tags.JujuModel: testing.ModelTag.Id(), }, }}, {Result: params.FilesystemParams{ @@ -438,10 +438,10 @@ Size: 2048, Provider: "environscoped", Tags: map[string]string{ - tags.JujuEnv: testing.EnvironmentTag.Id(), + tags.JujuModel: testing.ModelTag.Id(), }, }}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) } @@ -513,7 +513,7 @@ VolumeTag: "volume-4", Provider: "environscoped", }}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) } @@ -576,7 +576,7 @@ FilesystemTag: "filesystem-3", Provider: "environscoped", }}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) } @@ -622,9 +622,9 @@ c.Assert(results, jc.DeepEquals, params.ErrorResults{ Results: []params.ErrorResult{ {}, - {Error: ¶ms.Error{`cannot set info for volume attachment 1:0: volume "1" not provisioned`, "not provisioned"}}, - {Error: ¶ms.Error{`cannot set info for volume attachment 4:2: machine 2 not provisioned`, "not provisioned"}}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: `cannot set info for volume attachment 1:0: volume "1" not provisioned`, Code: "not provisioned"}}, + {Error: ¶ms.Error{Message: `cannot set info for volume attachment 4:2: machine 2 not provisioned`, Code: "not provisioned"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) } @@ -670,9 +670,9 @@ c.Assert(results, jc.DeepEquals, params.ErrorResults{ Results: []params.ErrorResult{ {}, - {Error: ¶ms.Error{`cannot set info for filesystem attachment 1:0: filesystem "1" not provisioned`, "not provisioned"}}, - {Error: ¶ms.Error{`cannot set info for filesystem attachment 3:2: machine 2 not provisioned`, "not provisioned"}}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: `cannot set info for filesystem attachment 1:0: filesystem "1" not provisioned`, Code: "not provisioned"}}, + {Error: ¶ms.Error{Message: `cannot set info for filesystem attachment 3:2: machine 2 not provisioned`, Code: "not provisioned"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) } @@ -684,7 +684,7 @@ args := params.Entities{Entities: []params.Entity{ {"machine-0"}, - {s.State.EnvironTag().String()}, + {s.State.ModelTag().String()}, {"environ-adb650da-b77b-4ee8-9cbb-d57a9a592847"}, {"machine-1"}, {"machine-42"}}, @@ -724,7 +724,7 @@ args := params.Entities{Entities: []params.Entity{ {"machine-0"}, - {s.State.EnvironTag().String()}, + {s.State.ModelTag().String()}, {"environ-adb650da-b77b-4ee8-9cbb-d57a9a592847"}, {"machine-1"}, {"machine-42"}}, @@ -785,7 +785,7 @@ args := params.Entities{Entities: []params.Entity{ {"machine-0"}, - {s.State.EnvironTag().String()}, + {s.State.ModelTag().String()}, {"environ-adb650da-b77b-4ee8-9cbb-d57a9a592847"}, {"machine-1"}, {"machine-42"}}, @@ -830,7 +830,7 @@ args := params.Entities{Entities: []params.Entity{ {"machine-0"}, - {s.State.EnvironTag().String()}, + {s.State.ModelTag().String()}, {"environ-adb650da-b77b-4ee8-9cbb-d57a9a592847"}, {"machine-1"}, {"machine-42"}}, @@ -1009,7 +1009,7 @@ {Life: params.Alive}, {Life: params.Alive}, {Life: params.Alive}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) } @@ -1029,8 +1029,8 @@ }) } -func (s *provisionerSuite) TestWatchForEnvironConfigChanges(c *gc.C) { - result, err := s.api.WatchForEnvironConfigChanges() +func (s *provisionerSuite) TestWatchForModelConfigChanges(c *gc.C) { + result, err := s.api.WatchForModelConfigChanges() c.Assert(err, jc.ErrorIsNil) c.Assert(result.NotifyWatcherId, gc.Equals, "1") @@ -1045,18 +1045,18 @@ wc.AssertNoChange() // Updating config should trigger the watcher. - err = s.State.UpdateEnvironConfig(map[string]interface{}{"what": "ever"}, nil, nil) + err = s.State.UpdateModelConfig(map[string]interface{}{"what": "ever"}, nil, nil) c.Assert(err, jc.ErrorIsNil) wc.AssertOneChange() } -func (s *provisionerSuite) TestEnvironConfig(c *gc.C) { - stateEnvironConfig, err := s.State.EnvironConfig() +func (s *provisionerSuite) TestModelConfig(c *gc.C) { + stateModelConfig, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) - result, err := s.api.EnvironConfig() + result, err := s.api.ModelConfig() c.Assert(err, jc.ErrorIsNil) - c.Assert(result.Config, jc.DeepEquals, params.EnvironConfig(stateEnvironConfig.AllAttrs())) + c.Assert(result.Config, jc.DeepEquals, params.ModelConfig(stateModelConfig.AllAttrs())) } func (s *provisionerSuite) TestRemoveVolumesEnvironManager(c *gc.C) { @@ -1077,12 +1077,12 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(result, gc.DeepEquals, params.ErrorResults{ Results: []params.ErrorResult{ - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, {Error: nil}, {Error: ¶ms.Error{Message: "removing volume 2: volume is not dead"}}, {Error: nil}, {Error: ¶ms.Error{Message: `"volume-invalid" is not a valid volume tag`}}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) } @@ -1105,12 +1105,12 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(result, gc.DeepEquals, params.ErrorResults{ Results: []params.ErrorResult{ - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, {Error: nil}, {Error: ¶ms.Error{Message: "removing filesystem 2: filesystem is not dead"}}, {Error: nil}, {Error: ¶ms.Error{Message: `"filesystem-invalid" is not a valid filesystem tag`}}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) } @@ -1136,9 +1136,9 @@ Results: []params.ErrorResult{ {Error: nil}, {Error: nil}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, {Error: ¶ms.Error{Message: `"volume-invalid" is not a valid volume tag`}}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) } @@ -1164,9 +1164,9 @@ Results: []params.ErrorResult{ {Error: nil}, {Error: nil}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, {Error: ¶ms.Error{Message: `"filesystem-invalid" is not a valid filesystem tag`}}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, }, }) } @@ -1198,8 +1198,8 @@ Results: []params.ErrorResult{ {Error: ¶ms.Error{Message: "removing attachment of volume 0/0 from machine 0: volume attachment is not dying"}}, {Error: nil}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, - {Error: ¶ms.Error{`removing attachment of volume 42 from machine 0: volume "42" on machine "0" not found`, "not found"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, + {Error: ¶ms.Error{Message: `removing attachment of volume 42 from machine 0: volume "42" on machine "0" not found`, Code: "not found"}}, }, }) } @@ -1231,8 +1231,8 @@ Results: []params.ErrorResult{ {Error: ¶ms.Error{Message: "removing attachment of filesystem 0/0 from machine 0: filesystem attachment is not dying"}}, {Error: nil}, - {Error: ¶ms.Error{"permission denied", "unauthorized access"}}, - {Error: ¶ms.Error{`removing attachment of filesystem 42 from machine 0: filesystem "42" on machine "0" not found`, "not found"}}, + {Error: ¶ms.Error{Message: "permission denied", Code: "unauthorized access"}}, + {Error: ¶ms.Error{Message: `removing attachment of filesystem 42 from machine 0: filesystem "42" on machine "0" not found`, Code: "not found"}}, }, }) } === removed file 'src/github.com/juju/juju/apiserver/subnets/shims.go' --- src/github.com/juju/juju/apiserver/subnets/shims.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/subnets/shims.go 1970-01-01 00:00:00 +0000 @@ -1,145 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package subnets - -import ( - "github.com/juju/errors" - "github.com/juju/juju/environs/config" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - providercommon "github.com/juju/juju/provider/common" - "github.com/juju/juju/state" -) - -// NOTE: All of the following code is only tested with a feature test. - -// subnetShim forwards and adapts state.Subnets methods to -// common.BackingSubnet. -type subnetShim struct { - common.BackingSubnet - subnet *state.Subnet -} - -func (s *subnetShim) CIDR() string { - return s.subnet.CIDR() -} - -func (s *subnetShim) VLANTag() int { - return s.subnet.VLANTag() -} - -func (s *subnetShim) ProviderId() string { - return s.subnet.ProviderId() -} - -func (s *subnetShim) AvailabilityZones() []string { - // TODO(dimitern): Add multiple zones to state.Subnet. - return []string{s.subnet.AvailabilityZone()} -} - -func (s *subnetShim) Life() params.Life { - return params.Life(s.subnet.Life().String()) -} - -func (s *subnetShim) Status() string { - // TODO(dimitern): This should happen in a cleaner way. - if s.Life() != params.Alive { - return "terminating" - } - return "in-use" -} - -func (s *subnetShim) SpaceName() string { - return s.subnet.SpaceName() -} - -// spaceShim forwards and adapts state.Space methods to BackingSpace. -type spaceShim struct { - common.BackingSpace - space *state.Space -} - -func (s *spaceShim) Name() string { - return s.space.Name() -} - -func (s *spaceShim) Subnets() ([]common.BackingSubnet, error) { - results, err := s.space.Subnets() - if err != nil { - return nil, errors.Trace(err) - } - subnets := make([]common.BackingSubnet, len(results)) - for i, result := range results { - subnets[i] = &subnetShim{subnet: result} - } - return subnets, nil -} - -// stateShim forwards and adapts state.State methods to Backing -// method. -type stateShim struct { - common.NetworkBacking - st *state.State -} - -func (s *stateShim) EnvironConfig() (*config.Config, error) { - return s.st.EnvironConfig() -} - -func (s *stateShim) AllSpaces() ([]common.BackingSpace, error) { - results, err := s.st.AllSpaces() - if err != nil { - return nil, errors.Trace(err) - } - spaces := make([]common.BackingSpace, len(results)) - for i, result := range results { - spaces[i] = &spaceShim{space: result} - } - return spaces, nil -} - -func (s *stateShim) AddSubnet(info common.BackingSubnetInfo) (common.BackingSubnet, error) { - // TODO(dimitern): Add multiple AZs per subnet in state. - var firstZone string - if len(info.AvailabilityZones) > 0 { - firstZone = info.AvailabilityZones[0] - } - _, err := s.st.AddSubnet(state.SubnetInfo{ - CIDR: info.CIDR, - VLANTag: info.VLANTag, - ProviderId: info.ProviderId, - AvailabilityZone: firstZone, - SpaceName: info.SpaceName, - }) - return nil, err // Drop the first result, as it's unused. -} - -func (s *stateShim) AllSubnets() ([]common.BackingSubnet, error) { - results, err := s.st.AllSubnets() - if err != nil { - return nil, errors.Trace(err) - } - subnets := make([]common.BackingSubnet, len(results)) - for i, result := range results { - subnets[i] = &subnetShim{subnet: result} - } - return subnets, nil -} - -type availZoneShim struct{} - -func (availZoneShim) Name() string { return "not-set" } -func (availZoneShim) Available() bool { return true } - -func (s *stateShim) AvailabilityZones() ([]providercommon.AvailabilityZone, error) { - // TODO(dimitern): Fix this to get them from state when available! - logger.Debugf("not getting availability zones from state yet") - return nil, nil -} - -func (s *stateShim) SetAvailabilityZones(zones []providercommon.AvailabilityZone) error { - logger.Debugf("not setting availability zones in state yet: %+v", zones) - return nil -} === modified file 'src/github.com/juju/juju/apiserver/subnets/subnets.go' --- src/github.com/juju/juju/apiserver/subnets/subnets.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/subnets/subnets.go 2016-03-22 15:18:22 +0000 @@ -4,32 +4,24 @@ package subnets import ( - "fmt" - "net" - "strings" - "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" - "github.com/juju/utils/set" "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/common/networkingcommon" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/environs" - "github.com/juju/juju/instance" - "github.com/juju/juju/network" - providercommon "github.com/juju/juju/provider/common" "github.com/juju/juju/state" ) var logger = loggo.GetLogger("juju.apiserver.subnets") func init() { - common.RegisterStandardFacade("Subnets", 1, NewAPI) + common.RegisterStandardFacade("Subnets", 2, NewAPI) } -// API defines the methods the Subnets API facade implements. -type API interface { +// SubnetsAPI defines the methods the Subnets API facade implements. +type SubnetsAPI interface { // AllZones returns all availability zones known to Juju. If a // zone is unusable, unavailable, or deprecated the Available // field will be false. @@ -46,22 +38,22 @@ ListSubnets(args params.SubnetsFilters) (params.ListSubnetsResults, error) } -// subnetsAPI implements the API interface. +// subnetsAPI implements the SubnetsAPI interface. type subnetsAPI struct { - backing common.NetworkBacking + backing networkingcommon.NetworkBacking resources *common.Resources authorizer common.Authorizer } // NewAPI creates a new Subnets API server-side facade with a // state.State backing. -func NewAPI(st *state.State, res *common.Resources, auth common.Authorizer) (API, error) { - return newAPIWithBacking(&stateShim{st: st}, res, auth) +func NewAPI(st *state.State, res *common.Resources, auth common.Authorizer) (SubnetsAPI, error) { + return newAPIWithBacking(networkingcommon.NewStateShim(st), res, auth) } // newAPIWithBacking creates a new server-side Subnets API facade with // a common.NetworkBacking -func newAPIWithBacking(backing common.NetworkBacking, resources *common.Resources, authorizer common.Authorizer) (API, error) { +func newAPIWithBacking(backing networkingcommon.NetworkBacking, resources *common.Resources, authorizer common.Authorizer) (SubnetsAPI, error) { // Only clients can access the Subnets facade. if !authorizer.AuthClient() { return nil, common.ErrPerm @@ -75,42 +67,7 @@ // AllZones is defined on the API interface. func (api *subnetsAPI) AllZones() (params.ZoneResults, error) { - var results params.ZoneResults - - zonesAsString := func(zones []providercommon.AvailabilityZone) string { - results := make([]string, len(zones)) - for i, zone := range zones { - results[i] = zone.Name() - } - return `"` + strings.Join(results, `", "`) + `"` - } - - // Try fetching cached zones first. - zones, err := api.backing.AvailabilityZones() - if err != nil { - return results, errors.Trace(err) - } - - if len(zones) == 0 { - // This is likely the first time we're called. - // Fetch all zones from the provider and update. - zones, err = api.updateZones() - if err != nil { - return results, errors.Annotate(err, "cannot update known zones") - } - logger.Debugf( - "updated the list of known zones from the environment: %s", zonesAsString(zones), - ) - } else { - logger.Debugf("using cached list of known zones: %s", zonesAsString(zones)) - } - - results.Results = make([]params.ZoneResult, len(zones)) - for i, zone := range zones { - results.Results[i].Name = zone.Name() - results.Results[i].Available = zone.Available() - } - return results, nil + return networkingcommon.AllZones(api.backing) } // AllSpaces is defined on the API interface. @@ -132,474 +89,13 @@ return results, nil } -// zonedEnviron returns a providercommon.ZonedEnviron instance from -// the current environment config. If the environment does not support -// zones, an error satisfying errors.IsNotSupported() will be -// returned. -func (api *subnetsAPI) zonedEnviron() (providercommon.ZonedEnviron, error) { - envConfig, err := api.backing.EnvironConfig() - if err != nil { - return nil, errors.Annotate(err, "getting environment config") - } - - env, err := environs.New(envConfig) - if err != nil { - return nil, errors.Annotate(err, "opening environment") - } - if zonedEnv, ok := env.(providercommon.ZonedEnviron); ok { - return zonedEnv, nil - } - return nil, errors.NotSupportedf("availability zones") -} - -// networkingEnviron returns a environs.NetworkingEnviron instance -// from the current environment config, if supported. If the -// environment does not support environs.Networking, an error -// satisfying errors.IsNotSupported() will be returned. -func (api *subnetsAPI) networkingEnviron() (environs.NetworkingEnviron, error) { - envConfig, err := api.backing.EnvironConfig() - if err != nil { - return nil, errors.Annotate(err, "getting environment config") - } - - env, err := environs.New(envConfig) - if err != nil { - return nil, errors.Annotate(err, "opening environment") - } - if netEnv, ok := environs.SupportsNetworking(env); ok { - return netEnv, nil - } - return nil, errors.NotSupportedf("environment networking features") // " not supported" -} - -// updateZones attempts to retrieve all availability zones from the -// environment provider (if supported) and then updates the persisted -// list of zones in state, returning them as well on success. -func (api *subnetsAPI) updateZones() ([]providercommon.AvailabilityZone, error) { - zoned, err := api.zonedEnviron() - if err != nil { - return nil, errors.Trace(err) - } - zones, err := zoned.AvailabilityZones() - if err != nil { - return nil, errors.Trace(err) - } - - if err := api.backing.SetAvailabilityZones(zones); err != nil { - return nil, errors.Trace(err) - } - return zones, nil -} - -// addSubnetsCache holds cached lists of spaces, zones, and subnets, -// used for fast lookups while adding subnets. -type addSubnetsCache struct { - api *subnetsAPI - allSpaces set.Strings // all defined backing spaces - allZones set.Strings // all known provider zones - availableZones set.Strings // all the available zones - allSubnets []network.SubnetInfo // all (valid) provider subnets - // providerIdsByCIDR maps possibly duplicated CIDRs to one or more ids. - providerIdsByCIDR map[string]set.Strings - // subnetsByProviderId maps unique subnet ProviderIds to pointers - // to entries in allSubnets. - subnetsByProviderId map[string]*network.SubnetInfo -} - -func newAddSubnetsCache(api *subnetsAPI) *addSubnetsCache { - // Empty cache initially. - return &addSubnetsCache{ - api: api, - allSpaces: nil, - allZones: nil, - availableZones: nil, - allSubnets: nil, - providerIdsByCIDR: nil, - subnetsByProviderId: nil, - } -} - -// validateSpace parses the given spaceTag and verifies it exists by -// looking it up in the cache (or populates the cache if empty). -func (cache *addSubnetsCache) validateSpace(spaceTag string) (*names.SpaceTag, error) { - if spaceTag == "" { - return nil, errors.Errorf("SpaceTag is required") - } - tag, err := names.ParseSpaceTag(spaceTag) - if err != nil { - return nil, errors.Annotate(err, "given SpaceTag is invalid") - } - - // Otherwise we need the cache to validate. - if cache.allSpaces == nil { - // Not yet cached. - logger.Debugf("caching known spaces") - - allSpaces, err := cache.api.backing.AllSpaces() - if err != nil { - return nil, errors.Annotate(err, "cannot validate given SpaceTag") - } - cache.allSpaces = set.NewStrings() - for _, space := range allSpaces { - if cache.allSpaces.Contains(space.Name()) { - logger.Warningf("ignoring duplicated space %q", space.Name()) - continue - } - cache.allSpaces.Add(space.Name()) - } - } - if cache.allSpaces.IsEmpty() { - return nil, errors.Errorf("no spaces defined") - } - logger.Tracef("using cached spaces: %v", cache.allSpaces.SortedValues()) - - if !cache.allSpaces.Contains(tag.Id()) { - return nil, errors.NotFoundf("space %q", tag.Id()) // " not found" - } - return &tag, nil -} - -// cacheZones populates the allZones and availableZones cache, if it's -// empty. -func (cache *addSubnetsCache) cacheZones() error { - if cache.allZones != nil { - // Already cached. - logger.Tracef("using cached zones: %v", cache.allZones.SortedValues()) - return nil - } - - allZones, err := cache.api.AllZones() - if err != nil { - return errors.Annotate(err, "given Zones cannot be validated") - } - cache.allZones = set.NewStrings() - cache.availableZones = set.NewStrings() - for _, zone := range allZones.Results { - // AllZones() does not use the Error result field, so no - // need to check it here. - if cache.allZones.Contains(zone.Name) { - logger.Warningf("ignoring duplicated zone %q", zone.Name) - continue - } - - if zone.Available { - cache.availableZones.Add(zone.Name) - } - cache.allZones.Add(zone.Name) - } - logger.Debugf( - "%d known and %d available zones cached: %v", - cache.allZones.Size(), cache.availableZones.Size(), cache.allZones.SortedValues(), - ) - if cache.allZones.IsEmpty() { - cache.allZones = nil - // Cached an empty list. - return errors.Errorf("no zones defined") - } - return nil -} - -// validateZones ensures givenZones are valid. When providerZones are -// also set, givenZones must be a subset of them or match exactly. -// With non-empty providerZones and empty givenZones, it returns the -// providerZones (i.e. trusts the provider to know better). When no -// providerZones and only givenZones are set, only then the cache is -// used to validate givenZones. -func (cache *addSubnetsCache) validateZones(providerZones, givenZones []string) ([]string, error) { - givenSet := set.NewStrings(givenZones...) - providerSet := set.NewStrings(providerZones...) - - // First check if we can validate without using the cache. - switch { - case providerSet.IsEmpty() && givenSet.IsEmpty(): - return nil, errors.Errorf("Zones cannot be discovered from the provider and must be set") - case !providerSet.IsEmpty() && givenSet.IsEmpty(): - // Use provider zones when none given. - return providerSet.SortedValues(), nil - case !providerSet.IsEmpty() && !givenSet.IsEmpty(): - // Ensure givenZones either match providerZones or are a - // subset of them. - extraGiven := givenSet.Difference(providerSet) - if !extraGiven.IsEmpty() { - extra := `"` + strings.Join(extraGiven.SortedValues(), `", "`) + `"` - msg := fmt.Sprintf("Zones contain zones not allowed by the provider: %s", extra) - return nil, errors.Errorf(msg) - } - } - - // Otherwise we need the cache to validate. - if err := cache.cacheZones(); err != nil { - return nil, errors.Trace(err) - } - - diffAvailable := givenSet.Difference(cache.availableZones) - diffAll := givenSet.Difference(cache.allZones) - - if !diffAll.IsEmpty() { - extra := `"` + strings.Join(diffAll.SortedValues(), `", "`) + `"` - return nil, errors.Errorf("Zones contain unknown zones: %s", extra) - } - if !diffAvailable.IsEmpty() { - extra := `"` + strings.Join(diffAvailable.SortedValues(), `", "`) + `"` - return nil, errors.Errorf("Zones contain unavailable zones: %s", extra) - } - // All good - given zones are a subset and none are - // unavailable. - return givenSet.SortedValues(), nil -} - -// cacheSubnets tries to get and cache once all known provider -// subnets. It handles the case when subnets have duplicated CIDRs but -// distinct ProviderIds. It also handles weird edge cases, like no -// CIDR and/or ProviderId set for a subnet. -func (cache *addSubnetsCache) cacheSubnets() error { - if cache.allSubnets != nil { - // Already cached. - logger.Tracef("using %d cached subnets", len(cache.allSubnets)) - return nil - } - - netEnv, err := cache.api.networkingEnviron() - if err != nil { - return errors.Trace(err) - } - subnetInfo, err := netEnv.Subnets(instance.UnknownId, nil) - if err != nil { - return errors.Annotate(err, "cannot get provider subnets") - } - logger.Debugf("got %d subnets to cache from the provider", len(subnetInfo)) - - if len(subnetInfo) > 0 { - // Trying to avoid reallocations. - cache.allSubnets = make([]network.SubnetInfo, 0, len(subnetInfo)) - } - cache.providerIdsByCIDR = make(map[string]set.Strings) - cache.subnetsByProviderId = make(map[string]*network.SubnetInfo) - - for i, _ := range subnetInfo { - subnet := subnetInfo[i] - cidr := subnet.CIDR - providerId := string(subnet.ProviderId) - logger.Debugf( - "caching subnet with CIDR %q, ProviderId %q, Zones: %q", - cidr, providerId, subnet.AvailabilityZones, - ) - - if providerId == "" && cidr == "" { - logger.Warningf("found subnet with empty CIDR and ProviderId") - // But we still save it for lookups, which will probably fail anyway. - } else if providerId == "" { - logger.Warningf("found subnet with CIDR %q and empty ProviderId", cidr) - // But we still save it for lookups. - } else { - _, ok := cache.subnetsByProviderId[providerId] - if ok { - logger.Warningf( - "found subnet with CIDR %q and duplicated ProviderId %q", - cidr, providerId, - ) - // We just overwrite what's there for the same id. - // It's a weird case and it shouldn't happen with - // properly written providers, but anyway.. - } - } - cache.subnetsByProviderId[providerId] = &subnet - - if ids, ok := cache.providerIdsByCIDR[cidr]; !ok { - cache.providerIdsByCIDR[cidr] = set.NewStrings(providerId) - } else { - ids.Add(providerId) - logger.Debugf( - "duplicated subnet CIDR %q; collected ProviderIds so far: %v", - cidr, ids.SortedValues(), - ) - cache.providerIdsByCIDR[cidr] = ids - } - - cache.allSubnets = append(cache.allSubnets, subnet) - } - logger.Debugf("%d provider subnets cached", len(cache.allSubnets)) - if len(cache.allSubnets) == 0 { - // Cached an empty list. - return errors.Errorf("no subnets defined") - } - return nil -} - -// validateSubnet ensures either subnetTag or providerId is valid (not -// both), then uses the cache to validate and lookup the provider -// SubnetInfo for the subnet, if found. -func (cache *addSubnetsCache) validateSubnet(subnetTag, providerId string) (*network.SubnetInfo, error) { - haveTag := subnetTag != "" - haveProviderId := providerId != "" - - if !haveTag && !haveProviderId { - return nil, errors.Errorf("either SubnetTag or SubnetProviderId is required") - } else if haveTag && haveProviderId { - return nil, errors.Errorf("SubnetTag and SubnetProviderId cannot be both set") - } - var tag names.SubnetTag - if haveTag { - var err error - tag, err = names.ParseSubnetTag(subnetTag) - if err != nil { - return nil, errors.Annotate(err, "given SubnetTag is invalid") - } - } - - // Otherwise we need the cache to validate. - if err := cache.cacheSubnets(); err != nil { - return nil, errors.Trace(err) - } - - if haveTag { - providerIds, ok := cache.providerIdsByCIDR[tag.Id()] - if !ok || providerIds.IsEmpty() { - return nil, errors.NotFoundf("subnet with CIDR %q", tag.Id()) - } - if providerIds.Size() > 1 { - ids := `"` + strings.Join(providerIds.SortedValues(), `", "`) + `"` - return nil, errors.Errorf( - "multiple subnets with CIDR %q: retry using ProviderId from: %s", - tag.Id(), ids, - ) - } - // A single CIDR matched. - providerId = providerIds.Values()[0] - } - - info, ok := cache.subnetsByProviderId[providerId] - if !ok || info == nil { - return nil, errors.NotFoundf( - "subnet with CIDR %q and ProviderId %q", - tag.Id(), providerId, - ) - } - // Do last-call validation. - if !names.IsValidSubnet(info.CIDR) { - _, ipnet, err := net.ParseCIDR(info.CIDR) - if err != nil && info.CIDR != "" { - // The underlying error is not important here, just that - // the CIDR is invalid. - return nil, errors.Errorf( - "subnet with CIDR %q and ProviderId %q: invalid CIDR", - info.CIDR, providerId, - ) - } - if info.CIDR == "" { - return nil, errors.Errorf( - "subnet with ProviderId %q: empty CIDR", providerId, - ) - } - return nil, errors.Errorf( - "subnet with ProviderId %q: incorrect CIDR format %q, expected %q", - providerId, info.CIDR, ipnet.String(), - ) - } - return info, nil -} - -// addOneSubnet validates the given arguments, using cache for lookups -// (initialized on first use), then adds it to the backing store, if -// successful. -func (api *subnetsAPI) addOneSubnet(args params.AddSubnetParams, cache *addSubnetsCache) error { - subnetInfo, err := cache.validateSubnet(args.SubnetTag, args.SubnetProviderId) - if err != nil { - return errors.Trace(err) - } - spaceTag, err := cache.validateSpace(args.SpaceTag) - if err != nil { - return errors.Trace(err) - } - zones, err := cache.validateZones(subnetInfo.AvailabilityZones, args.Zones) - if err != nil { - return errors.Trace(err) - } - - // Try adding the subnet. - backingInfo := common.BackingSubnetInfo{ - ProviderId: string(subnetInfo.ProviderId), - CIDR: subnetInfo.CIDR, - VLANTag: subnetInfo.VLANTag, - AvailabilityZones: zones, - SpaceName: spaceTag.Id(), - } - if subnetInfo.AllocatableIPLow != nil { - backingInfo.AllocatableIPLow = subnetInfo.AllocatableIPLow.String() - } - if subnetInfo.AllocatableIPHigh != nil { - backingInfo.AllocatableIPHigh = subnetInfo.AllocatableIPHigh.String() - } - if _, err := api.backing.AddSubnet(backingInfo); err != nil { - return errors.Trace(err) - } - return nil -} - // AddSubnets is defined on the API interface. func (api *subnetsAPI) AddSubnets(args params.AddSubnetsParams) (params.ErrorResults, error) { - results := params.ErrorResults{ - Results: make([]params.ErrorResult, len(args.Subnets)), - } - - if len(args.Subnets) == 0 { - return results, nil - } - - cache := newAddSubnetsCache(api) - for i, arg := range args.Subnets { - err := api.addOneSubnet(arg, cache) - if err != nil { - results.Results[i].Error = common.ServerError(err) - } - } - return results, nil + return networkingcommon.AddSubnets(api.backing, args) } // ListSubnets lists all the available subnets or only those matching // all given optional filters. func (api *subnetsAPI) ListSubnets(args params.SubnetsFilters) (results params.ListSubnetsResults, err error) { - subnets, err := api.backing.AllSubnets() - if err != nil { - return results, errors.Trace(err) - } - - var spaceFilter string - if args.SpaceTag != "" { - tag, err := names.ParseSpaceTag(args.SpaceTag) - if err != nil { - return results, errors.Trace(err) - } - spaceFilter = tag.Id() - } - zoneFilter := args.Zone - - for _, subnet := range subnets { - if spaceFilter != "" && subnet.SpaceName() != spaceFilter { - logger.Tracef( - "filtering subnet %q from space %q not matching filter %q", - subnet.CIDR(), subnet.SpaceName(), spaceFilter, - ) - continue - } - zoneSet := set.NewStrings(subnet.AvailabilityZones()...) - if zoneFilter != "" && !zoneSet.IsEmpty() && !zoneSet.Contains(zoneFilter) { - logger.Tracef( - "filtering subnet %q with zones %v not matching filter %q", - subnet.CIDR(), subnet.AvailabilityZones(), zoneFilter, - ) - continue - } - result := params.Subnet{ - CIDR: subnet.CIDR(), - ProviderId: subnet.ProviderId(), - VLANTag: subnet.VLANTag(), - Life: subnet.Life(), - SpaceTag: names.NewSpaceTag(subnet.SpaceName()).String(), - Zones: subnet.AvailabilityZones(), - Status: subnet.Status(), - } - results.Results = append(results.Results, result) - } - return results, nil + return networkingcommon.ListSubnets(api.backing, args) } === modified file 'src/github.com/juju/juju/apiserver/subnets/subnets_test.go' --- src/github.com/juju/juju/apiserver/subnets/subnets_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/subnets/subnets_test.go 2016-03-22 15:18:22 +0000 @@ -11,6 +11,7 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/common/networkingcommon" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/apiserver/subnets" apiservertesting "github.com/juju/juju/apiserver/testing" @@ -26,7 +27,7 @@ resources *common.Resources authorizer apiservertesting.FakeAuthorizer - facade subnets.API + facade subnets.SubnetsAPI } var _ = gc.Suite(&SubnetsSuite{}) @@ -76,7 +77,7 @@ } // AssertAllSpacesResult makes it easier to verify AllSpaces results. -func (s *SubnetsSuite) AssertAllSpacesResult(c *gc.C, got params.SpaceResults, expected []common.BackingSpace) { +func (s *SubnetsSuite) AssertAllSpacesResult(c *gc.C, got params.SpaceResults, expected []networkingcommon.BackingSpace) { seen := set.Strings{} results := []params.SpaceResult{} for _, space := range expected { @@ -146,7 +147,7 @@ apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, apiservertesting.BackingCall("AvailabilityZones"), - apiservertesting.BackingCall("EnvironConfig"), + apiservertesting.BackingCall("ModelConfig"), apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), apiservertesting.ZonedEnvironCall("AvailabilityZones"), apiservertesting.BackingCall("SetAvailabilityZones", apiservertesting.ProviderInstance.Zones), @@ -157,7 +158,7 @@ apiservertesting.BackingInstance.SetUp(c, apiservertesting.StubZonedEnvironName, apiservertesting.WithoutZones, apiservertesting.WithSpaces, apiservertesting.WithSubnets) apiservertesting.SharedStub.SetErrors( nil, // Backing.AvailabilityZones - nil, // Backing.EnvironConfig + nil, // Backing.ModelConfig nil, // Provider.Open nil, // ZonedEnviron.AvailabilityZones errors.NotSupportedf("setting"), // Backing.SetAvailabilityZones @@ -173,7 +174,7 @@ apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, apiservertesting.BackingCall("AvailabilityZones"), - apiservertesting.BackingCall("EnvironConfig"), + apiservertesting.BackingCall("ModelConfig"), apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), apiservertesting.ZonedEnvironCall("AvailabilityZones"), apiservertesting.BackingCall("SetAvailabilityZones", apiservertesting.ProviderInstance.Zones), @@ -184,7 +185,7 @@ apiservertesting.BackingInstance.SetUp(c, apiservertesting.StubZonedEnvironName, apiservertesting.WithoutZones, apiservertesting.WithSpaces, apiservertesting.WithSubnets) apiservertesting.SharedStub.SetErrors( nil, // Backing.AvailabilityZones - nil, // Backing.EnvironConfig + nil, // Backing.ModelConfig nil, // Provider.Open errors.NotValidf("foo"), // ZonedEnviron.AvailabilityZones ) @@ -199,22 +200,22 @@ apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, apiservertesting.BackingCall("AvailabilityZones"), - apiservertesting.BackingCall("EnvironConfig"), + apiservertesting.BackingCall("ModelConfig"), apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), apiservertesting.ZonedEnvironCall("AvailabilityZones"), ) } -func (s *SubnetsSuite) TestAllZonesWithNoBackingZonesAndEnvironConfigFails(c *gc.C) { +func (s *SubnetsSuite) TestAllZonesWithNoBackingZonesAndModelConfigFails(c *gc.C) { apiservertesting.BackingInstance.SetUp(c, apiservertesting.StubZonedEnvironName, apiservertesting.WithoutZones, apiservertesting.WithSpaces, apiservertesting.WithSubnets) apiservertesting.SharedStub.SetErrors( nil, // Backing.AvailabilityZones - errors.NotFoundf("config"), // Backing.EnvironConfig + errors.NotFoundf("config"), // Backing.ModelConfig ) results, err := s.facade.AllZones() c.Assert(err, gc.ErrorMatches, - `cannot update known zones: getting environment config: config not found`, + `cannot update known zones: getting model config: config not found`, ) // Verify the cause is not obscured. c.Assert(err, jc.Satisfies, errors.IsNotFound) @@ -222,7 +223,7 @@ apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, apiservertesting.BackingCall("AvailabilityZones"), - apiservertesting.BackingCall("EnvironConfig"), + apiservertesting.BackingCall("ModelConfig"), ) } @@ -230,13 +231,13 @@ apiservertesting.BackingInstance.SetUp(c, apiservertesting.StubZonedEnvironName, apiservertesting.WithoutZones, apiservertesting.WithSpaces, apiservertesting.WithSubnets) apiservertesting.SharedStub.SetErrors( nil, // Backing.AvailabilityZones - nil, // Backing.EnvironConfig + nil, // Backing.ModelConfig errors.NotValidf("config"), // Provider.Open ) results, err := s.facade.AllZones() c.Assert(err, gc.ErrorMatches, - `cannot update known zones: opening environment: config not valid`, + `cannot update known zones: opening model: config not valid`, ) // Verify the cause is not obscured. c.Assert(err, jc.Satisfies, errors.IsNotValid) @@ -244,7 +245,7 @@ apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, apiservertesting.BackingCall("AvailabilityZones"), - apiservertesting.BackingCall("EnvironConfig"), + apiservertesting.BackingCall("ModelConfig"), apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), ) } @@ -263,7 +264,7 @@ apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, apiservertesting.BackingCall("AvailabilityZones"), - apiservertesting.BackingCall("EnvironConfig"), + apiservertesting.BackingCall("ModelConfig"), apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), ) } @@ -316,13 +317,13 @@ SubnetTag: "any", SubnetProviderId: "any", }, { - // lookup by id needed, no cached subnets; EnvironConfig(): error + // lookup by id needed, no cached subnets; ModelConfig(): error SubnetProviderId: "any", }, { - // same as above, need to cache subnets; EnvironConfig(): ok; Open(): error + // same as above, need to cache subnets; ModelConfig(): ok; Open(): error SubnetProviderId: "ignored", }, { - // as above, caching again; EnvironConfig(), Open(): ok; Subnets(): error + // as above, caching again; ModelConfig(), Open(): ok; Subnets(): error SubnetProviderId: "unimportant", }, { // exactly as above, except all 3 calls ok; cached lookup: id not found @@ -448,19 +449,19 @@ }}} apiservertesting.SharedStub.SetErrors( // caching subnets (1st attempt): fails - errors.NotFoundf("config"), // BackingInstance.EnvironConfig (1st call) + errors.NotFoundf("config"), // BackingInstance.ModelConfig (1st call) // caching subnets (2nd attepmt): fails - nil, // BackingInstance.EnvironConfig (2nd call) + nil, // BackingInstance.ModelConfig (2nd call) errors.NotFoundf("provider"), // ProviderInstance.Open (1st call) // caching subnets (3rd attempt): fails - nil, // BackingInstance.EnvironConfig (3rd call) + nil, // BackingInstance.ModelConfig (3rd call) nil, // ProviderInstance.Open (2nd call) errors.NotFoundf("subnets"), // NetworkingEnvironInstance.Subnets (1st call) // caching subnets (4th attempt): succeeds - nil, // BackingInstance.EnvironConfig (4th call) + nil, // BackingInstance.ModelConfig (4th call) nil, // ProviderInstance.Open (3rd call) nil, // NetworkingEnvironInstance.Subnets (2nd call) @@ -484,8 +485,8 @@ {"either SubnetTag or SubnetProviderId is required", nil}, {"either SubnetTag or SubnetProviderId is required", nil}, {"SubnetTag and SubnetProviderId cannot be both set", nil}, - {"getting environment config: config not found", params.IsCodeNotFound}, - {"opening environment: provider not found", params.IsCodeNotFound}, + {"getting model config: config not found", params.IsCodeNotFound}, + {"opening model: provider not found", params.IsCodeNotFound}, {"cannot get provider subnets: subnets not found", params.IsCodeNotFound}, {`subnet with CIDR "" and ProviderId "missing" not found`, params.IsCodeNotFound}, {`subnet with CIDR "" and ProviderId "void" not found`, params.IsCodeNotFound}, @@ -521,7 +522,7 @@ {"", nil}, {"", nil}, } - expectedBackingInfos := []common.BackingSubnetInfo{{ + expectedBackingInfos := []networkingcommon.BackingSubnetInfo{{ ProviderId: "sn-ipv6", CIDR: "2001:db8::/32", VLANTag: 0, @@ -572,19 +573,19 @@ apiservertesting.CheckMethodCalls(c, apiservertesting.SharedStub, // caching subnets (1st attempt): fails - apiservertesting.BackingCall("EnvironConfig"), + apiservertesting.BackingCall("ModelConfig"), // caching subnets (2nd attepmt): fails - apiservertesting.BackingCall("EnvironConfig"), + apiservertesting.BackingCall("ModelConfig"), apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), // caching subnets (3rd attempt): fails - apiservertesting.BackingCall("EnvironConfig"), + apiservertesting.BackingCall("ModelConfig"), apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), apiservertesting.NetworkingEnvironCall("Subnets", instance.UnknownId, []network.Id(nil)), // caching subnets (4th attempt): succeeds - apiservertesting.BackingCall("EnvironConfig"), + apiservertesting.BackingCall("ModelConfig"), apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), apiservertesting.NetworkingEnvironCall("Subnets", instance.UnknownId, []network.Id(nil)), @@ -623,7 +624,7 @@ // These calls always happen. expectedCalls := []apiservertesting.StubMethodCall{ - apiservertesting.BackingCall("EnvironConfig"), + apiservertesting.BackingCall("ModelConfig"), apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), } @@ -681,7 +682,7 @@ // updateZones tries to constructs a ZonedEnviron with these calls. zoneCalls := append([]apiservertesting.StubMethodCall{}, - apiservertesting.BackingCall("EnvironConfig"), + apiservertesting.BackingCall("ModelConfig"), apiservertesting.ProviderCall("Open", apiservertesting.BackingInstance.EnvConfig), ) // Receiver can differ according to envName, but @@ -774,7 +775,7 @@ s.CheckAddSubnetsFails( c, apiservertesting.StubEnvironName, apiservertesting.WithoutZones, apiservertesting.WithoutSpaces, apiservertesting.WithoutSubnets, - "environment networking features not supported", + "model networking features not supported", params.IsCodeNotSupported, ) } === removed directory 'src/github.com/juju/juju/apiserver/systemmanager' === removed file 'src/github.com/juju/juju/apiserver/systemmanager/destroy_test.go' --- src/github.com/juju/juju/apiserver/systemmanager/destroy_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/systemmanager/destroy_test.go 1970-01-01 00:00:00 +0000 @@ -1,182 +0,0 @@ -// Copyright 2012-2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package systemmanager_test - -import ( - "github.com/juju/errors" - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - commontesting "github.com/juju/juju/apiserver/common/testing" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/apiserver/systemmanager" - apiservertesting "github.com/juju/juju/apiserver/testing" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" - "github.com/juju/juju/testing" - "github.com/juju/juju/testing/factory" -) - -// NOTE: the testing of the general environment destruction code -// is found in apiserver/common/environdestroy_test.go. -// -// The tests here are around the validation and behaviour of -// the flags passed in to the system manager destroy system call. - -type destroySystemSuite struct { - jujutesting.JujuConnSuite - commontesting.BlockHelper - - systemManager *systemmanager.SystemManagerAPI - - otherState *state.State - otherEnvOwner names.UserTag - otherEnvUUID string -} - -var _ = gc.Suite(&destroySystemSuite{}) - -func (s *destroySystemSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - - s.BlockHelper = commontesting.NewBlockHelper(s.APIState) - s.AddCleanup(func(*gc.C) { s.BlockHelper.Close() }) - - resources := common.NewResources() - s.AddCleanup(func(_ *gc.C) { resources.StopAll() }) - - authoriser := apiservertesting.FakeAuthorizer{ - Tag: s.AdminUserTag(c), - } - systemManager, err := systemmanager.NewSystemManagerAPI(s.State, resources, authoriser) - c.Assert(err, jc.ErrorIsNil) - s.systemManager = systemManager - - s.otherEnvOwner = names.NewUserTag("jess@dummy") - s.otherState = factory.NewFactory(s.State).MakeEnvironment(c, &factory.EnvParams{ - Name: "dummytoo", - Owner: s.otherEnvOwner, - Prepare: true, - ConfigAttrs: testing.Attrs{ - "state-server": false, - }, - }) - s.AddCleanup(func(c *gc.C) { s.otherState.Close() }) - s.otherEnvUUID = s.otherState.EnvironUUID() -} - -func (s *destroySystemSuite) TestDestroySystemKillsHostedEnvsWithBlocks(c *gc.C) { - s.BlockDestroyEnvironment(c, "TestBlockDestroyEnvironment") - s.BlockRemoveObject(c, "TestBlockRemoveObject") - s.otherState.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyEnvironment") - s.otherState.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") - - err := s.systemManager.DestroySystem(params.DestroySystemArgs{ - DestroyEnvironments: true, - IgnoreBlocks: true, - }) - c.Assert(err, jc.ErrorIsNil) - - _, err = s.otherState.Environment() - c.Assert(errors.IsNotFound(err), jc.IsTrue) - - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - c.Assert(env.Life(), gc.Equals, state.Dying) -} - -func (s *destroySystemSuite) TestDestroySystemReturnsBlockedEnvironmentsErr(c *gc.C) { - s.BlockDestroyEnvironment(c, "TestBlockDestroyEnvironment") - s.BlockRemoveObject(c, "TestBlockRemoveObject") - s.otherState.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyEnvironment") - s.otherState.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") - - err := s.systemManager.DestroySystem(params.DestroySystemArgs{ - DestroyEnvironments: true, - }) - c.Assert(params.IsCodeOperationBlocked(err), jc.IsTrue) - - numBlocks, err := s.State.AllBlocksForSystem() - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(numBlocks), gc.Equals, 4) - - _, err = s.otherState.Environment() - c.Assert(err, jc.ErrorIsNil) -} - -func (s *destroySystemSuite) TestDestroySystemKillsHostedEnvs(c *gc.C) { - err := s.systemManager.DestroySystem(params.DestroySystemArgs{ - DestroyEnvironments: true, - }) - c.Assert(err, jc.ErrorIsNil) - - _, err = s.otherState.Environment() - c.Assert(errors.IsNotFound(err), jc.IsTrue) - - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - c.Assert(env.Life(), gc.Equals, state.Dying) -} - -func (s *destroySystemSuite) TestDestroySystemLeavesBlocksIfNotKillAll(c *gc.C) { - s.BlockDestroyEnvironment(c, "TestBlockDestroyEnvironment") - s.BlockRemoveObject(c, "TestBlockRemoveObject") - s.otherState.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyEnvironment") - s.otherState.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") - - err := s.systemManager.DestroySystem(params.DestroySystemArgs{ - IgnoreBlocks: true, - }) - c.Assert(err, gc.ErrorMatches, "state server environment cannot be destroyed before all other environments are destroyed") - - numBlocks, err := s.State.AllBlocksForSystem() - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(numBlocks), gc.Equals, 4) -} - -func (s *destroySystemSuite) TestDestroySystemNoHostedEnvs(c *gc.C) { - err := common.DestroyEnvironment(s.State, s.otherState.EnvironTag()) - c.Assert(err, jc.ErrorIsNil) - - err = s.systemManager.DestroySystem(params.DestroySystemArgs{}) - c.Assert(err, jc.ErrorIsNil) - - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - c.Assert(env.Life(), gc.Equals, state.Dying) -} - -func (s *destroySystemSuite) TestDestroySystemNoHostedEnvsWithBlock(c *gc.C) { - err := common.DestroyEnvironment(s.State, s.otherState.EnvironTag()) - c.Assert(err, jc.ErrorIsNil) - - s.BlockDestroyEnvironment(c, "TestBlockDestroyEnvironment") - s.BlockRemoveObject(c, "TestBlockRemoveObject") - - err = s.systemManager.DestroySystem(params.DestroySystemArgs{ - IgnoreBlocks: true, - }) - c.Assert(err, jc.ErrorIsNil) - - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - c.Assert(env.Life(), gc.Equals, state.Dying) -} - -func (s *destroySystemSuite) TestDestroySystemNoHostedEnvsWithBlockFail(c *gc.C) { - err := common.DestroyEnvironment(s.State, s.otherState.EnvironTag()) - c.Assert(err, jc.ErrorIsNil) - - s.BlockDestroyEnvironment(c, "TestBlockDestroyEnvironment") - s.BlockRemoveObject(c, "TestBlockRemoveObject") - - err = s.systemManager.DestroySystem(params.DestroySystemArgs{}) - c.Assert(params.IsCodeOperationBlocked(err), jc.IsTrue) - - numBlocks, err := s.State.AllBlocksForSystem() - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(numBlocks), gc.Equals, 2) -} === removed file 'src/github.com/juju/juju/apiserver/systemmanager/package_test.go' --- src/github.com/juju/juju/apiserver/systemmanager/package_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/systemmanager/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package systemmanager_test - -import ( - stdtesting "testing" - - "github.com/juju/juju/testing" -) - -func TestAll(t *stdtesting.T) { - testing.MgoTestPackage(t) -} === removed file 'src/github.com/juju/juju/apiserver/systemmanager/systemmanager.go' --- src/github.com/juju/juju/apiserver/systemmanager/systemmanager.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/systemmanager/systemmanager.go 1970-01-01 00:00:00 +0000 @@ -1,333 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// The systemmanager package defines an API end point for functions dealing -// with systems as a whole. Primarily the destruction of systems. -package systemmanager - -import ( - "sort" - - "github.com/juju/errors" - "github.com/juju/loggo" - "github.com/juju/names" - "github.com/juju/utils/set" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/feature" - "github.com/juju/juju/state" -) - -var logger = loggo.GetLogger("juju.apiserver.systemmanager") - -func init() { - common.RegisterStandardFacadeForFeature("SystemManager", 1, NewSystemManagerAPI, feature.JES) -} - -// SystemManager defines the methods on the systemmanager API end point. -type SystemManager interface { - AllEnvironments() (params.UserEnvironmentList, error) - DestroySystem(args params.DestroySystemArgs) error - EnvironmentConfig() (params.EnvironmentConfigResults, error) - ListBlockedEnvironments() (params.EnvironmentBlockInfoList, error) - RemoveBlocks(args params.RemoveBlocksArgs) error - WatchAllEnvs() (params.AllWatcherId, error) -} - -// SystemManagerAPI implements the environment manager interface and is -// the concrete implementation of the api end point. -type SystemManagerAPI struct { - state *state.State - authorizer common.Authorizer - apiUser names.UserTag - resources *common.Resources -} - -var _ SystemManager = (*SystemManagerAPI)(nil) - -// NewSystemManagerAPI creates a new api server endpoint for managing -// environments. -func NewSystemManagerAPI( - st *state.State, - resources *common.Resources, - authorizer common.Authorizer, -) (*SystemManagerAPI, error) { - if !authorizer.AuthClient() { - return nil, errors.Trace(common.ErrPerm) - } - - // Since we know this is a user tag (because AuthClient is true), - // we just do the type assertion to the UserTag. - apiUser, _ := authorizer.GetAuthTag().(names.UserTag) - isAdmin, err := st.IsSystemAdministrator(apiUser) - if err != nil { - return nil, errors.Trace(err) - } - // The entire end point is only accessible to system administrators. - if !isAdmin { - return nil, errors.Trace(common.ErrPerm) - } - - return &SystemManagerAPI{ - state: st, - authorizer: authorizer, - apiUser: apiUser, - resources: resources, - }, nil -} - -// AllEnvironments allows system administrators to get the list of all the -// environments in the system. -func (s *SystemManagerAPI) AllEnvironments() (params.UserEnvironmentList, error) { - result := params.UserEnvironmentList{} - - // Get all the environments that the authenticated user can see, and - // supplement that with the other environments that exist that the user - // cannot see. The reason we do this is to get the LastConnection time for - // the environments that the user is able to see, so we have consistent - // output when listing with or without --all when an admin user. - environments, err := s.state.EnvironmentsForUser(s.apiUser) - if err != nil { - return result, errors.Trace(err) - } - visibleEnvironments := set.NewStrings() - for _, env := range environments { - lastConn, err := env.LastConnection() - if err != nil && !state.IsNeverConnectedError(err) { - return result, errors.Trace(err) - } - visibleEnvironments.Add(env.UUID()) - result.UserEnvironments = append(result.UserEnvironments, params.UserEnvironment{ - Environment: params.Environment{ - Name: env.Name(), - UUID: env.UUID(), - OwnerTag: env.Owner().String(), - }, - LastConnection: &lastConn, - }) - } - - allEnvs, err := s.state.AllEnvironments() - if err != nil { - return result, errors.Trace(err) - } - - for _, env := range allEnvs { - if !visibleEnvironments.Contains(env.UUID()) { - result.UserEnvironments = append(result.UserEnvironments, params.UserEnvironment{ - Environment: params.Environment{ - Name: env.Name(), - UUID: env.UUID(), - OwnerTag: env.Owner().String(), - }, - // No LastConnection as this user hasn't. - }) - } - } - - // Sort the resulting sequence by environment name, then owner. - sort.Sort(orderedUserEnvironments(result.UserEnvironments)) - - return result, nil -} - -// ListBlockedEnvironments returns a list of all environments on the system -// which have a block in place. The resulting slice is sorted by environment -// name, then owner. Callers must be system administrators to retrieve the -// list. -func (s *SystemManagerAPI) ListBlockedEnvironments() (params.EnvironmentBlockInfoList, error) { - results := params.EnvironmentBlockInfoList{} - - blocks, err := s.state.AllBlocksForSystem() - if err != nil { - return results, errors.Trace(err) - } - - envBlocks := make(map[string][]string) - for _, block := range blocks { - uuid := block.EnvUUID() - types, ok := envBlocks[uuid] - if !ok { - types = []string{block.Type().String()} - } else { - types = append(types, block.Type().String()) - } - envBlocks[uuid] = types - } - - for uuid, blocks := range envBlocks { - envInfo, err := s.state.GetEnvironment(names.NewEnvironTag(uuid)) - if err != nil { - logger.Debugf("Unable to get name for environment: %s", uuid) - continue - } - results.Environments = append(results.Environments, params.EnvironmentBlockInfo{ - UUID: envInfo.UUID(), - Name: envInfo.Name(), - OwnerTag: envInfo.Owner().String(), - Blocks: blocks, - }) - } - - // Sort the resulting sequence by environment name, then owner. - sort.Sort(orderedBlockInfo(results.Environments)) - - return results, nil -} - -// DestroySystem will attempt to destroy the system. If the args specify the -// removal of blocks or the destruction of the environments, this method will -// attempt to do so. -func (s *SystemManagerAPI) DestroySystem(args params.DestroySystemArgs) error { - // Get list of all environments in the system. - allEnvs, err := s.state.AllEnvironments() - if err != nil { - return errors.Trace(err) - } - - // If there are hosted environments and DestroyEnvironments was not - // specified, don't bother trying to destroy the system, as it will fail. - if len(allEnvs) > 1 && !args.DestroyEnvironments { - return errors.Errorf("state server environment cannot be destroyed before all other environments are destroyed") - } - - // If there are blocks, and we aren't being told to ignore them, let the - // user know. - blocks, err := s.state.AllBlocksForSystem() - if err != nil { - logger.Debugf("Unable to get blocks for system: %s", err) - if !args.IgnoreBlocks { - return errors.Trace(err) - } - } - if len(blocks) > 0 { - if !args.IgnoreBlocks { - return common.ErrOperationBlocked("found blocks in system environments") - } - - err := s.state.RemoveAllBlocksForSystem() - if err != nil { - return errors.Trace(err) - } - } - - systemEnv, err := s.state.StateServerEnvironment() - if err != nil { - return errors.Trace(err) - } - systemTag := systemEnv.EnvironTag() - - if args.DestroyEnvironments { - for _, env := range allEnvs { - environTag := env.EnvironTag() - if environTag != systemTag { - if err := common.DestroyEnvironment(s.state, environTag); err != nil { - logger.Errorf("unable to destroy environment %q: %s", env.UUID(), err) - } - } - } - } - - return errors.Trace(common.DestroyEnvironment(s.state, systemTag)) -} - -// EnvironmentConfig returns the environment config for the system -// environment. For information on the current environment, use -// client.EnvironmentGet -func (s *SystemManagerAPI) EnvironmentConfig() (params.EnvironmentConfigResults, error) { - result := params.EnvironmentConfigResults{} - - stateServerEnv, err := s.state.StateServerEnvironment() - if err != nil { - return result, errors.Trace(err) - } - - config, err := stateServerEnv.Config() - if err != nil { - return result, errors.Trace(err) - } - - result.Config = config.AllAttrs() - return result, nil -} - -// RemoveBlocks removes all the blocks in the system. -func (s *SystemManagerAPI) RemoveBlocks(args params.RemoveBlocksArgs) error { - if !args.All { - return errors.New("not supported") - } - return errors.Trace(s.state.RemoveAllBlocksForSystem()) -} - -// WatchAllEnvs starts watching events for all environments in the -// system. The returned AllWatcherId should be used with Next on the -// AllEnvWatcher endpoint to receive deltas. -func (c *SystemManagerAPI) WatchAllEnvs() (params.AllWatcherId, error) { - w := c.state.WatchAllEnvs() - return params.AllWatcherId{ - AllWatcherId: c.resources.Register(w), - }, nil -} - -type orderedBlockInfo []params.EnvironmentBlockInfo - -func (o orderedBlockInfo) Len() int { - return len(o) -} - -func (o orderedBlockInfo) Less(i, j int) bool { - if o[i].Name < o[j].Name { - return true - } - if o[i].Name > o[j].Name { - return false - } - - if o[i].OwnerTag < o[j].OwnerTag { - return true - } - if o[i].OwnerTag > o[j].OwnerTag { - return false - } - - // Unreachable based on the rules of there not being duplicate - // environments of the same name for the same owner, but return false - // instead of panicing. - return false -} - -func (o orderedBlockInfo) Swap(i, j int) { - o[i], o[j] = o[j], o[i] -} - -type orderedUserEnvironments []params.UserEnvironment - -func (o orderedUserEnvironments) Len() int { - return len(o) -} - -func (o orderedUserEnvironments) Less(i, j int) bool { - if o[i].Name < o[j].Name { - return true - } - if o[i].Name > o[j].Name { - return false - } - - if o[i].OwnerTag < o[j].OwnerTag { - return true - } - if o[i].OwnerTag > o[j].OwnerTag { - return false - } - - // Unreachable based on the rules of there not being duplicate - // environments of the same name for the same owner, but return false - // instead of panicing. - return false -} - -func (o orderedUserEnvironments) Swap(i, j int) { - o[i], o[j] = o[j], o[i] -} === removed file 'src/github.com/juju/juju/apiserver/systemmanager/systemmanager_test.go' --- src/github.com/juju/juju/apiserver/systemmanager/systemmanager_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/systemmanager/systemmanager_test.go 1970-01-01 00:00:00 +0000 @@ -1,218 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package systemmanager_test - -import ( - "time" - - "github.com/juju/loggo" - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver" - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/apiserver/systemmanager" - apiservertesting "github.com/juju/juju/apiserver/testing" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" - "github.com/juju/juju/state/multiwatcher" - "github.com/juju/juju/testing" - "github.com/juju/juju/testing/factory" -) - -type systemManagerSuite struct { - jujutesting.JujuConnSuite - - systemManager *systemmanager.SystemManagerAPI - resources *common.Resources - authorizer apiservertesting.FakeAuthorizer -} - -var _ = gc.Suite(&systemManagerSuite{}) - -func (s *systemManagerSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - s.resources = common.NewResources() - s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) - - s.authorizer = apiservertesting.FakeAuthorizer{ - Tag: s.AdminUserTag(c), - } - - systemManager, err := systemmanager.NewSystemManagerAPI(s.State, s.resources, s.authorizer) - c.Assert(err, jc.ErrorIsNil) - s.systemManager = systemManager - - loggo.GetLogger("juju.apiserver.systemmanager").SetLogLevel(loggo.TRACE) -} - -func (s *systemManagerSuite) TestNewAPIRefusesNonClient(c *gc.C) { - anAuthoriser := apiservertesting.FakeAuthorizer{ - Tag: names.NewUnitTag("mysql/0"), - } - endPoint, err := systemmanager.NewSystemManagerAPI(s.State, s.resources, anAuthoriser) - c.Assert(endPoint, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "permission denied") -} - -func (s *systemManagerSuite) TestNewAPIRefusesNonAdmins(c *gc.C) { - user := s.Factory.MakeUser(c, &factory.UserParams{NoEnvUser: true}) - anAuthoriser := apiservertesting.FakeAuthorizer{ - Tag: user.Tag(), - } - endPoint, err := systemmanager.NewSystemManagerAPI(s.State, s.resources, anAuthoriser) - c.Assert(endPoint, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "permission denied") -} - -func (s *systemManagerSuite) checkEnvironmentMatches(c *gc.C, env params.Environment, expected *state.Environment) { - c.Check(env.Name, gc.Equals, expected.Name()) - c.Check(env.UUID, gc.Equals, expected.UUID()) - c.Check(env.OwnerTag, gc.Equals, expected.Owner().String()) -} - -func (s *systemManagerSuite) TestAllEnvironments(c *gc.C) { - admin := s.Factory.MakeUser(c, &factory.UserParams{Name: "foobar"}) - - s.Factory.MakeEnvironment(c, &factory.EnvParams{ - Name: "owned", Owner: admin.UserTag()}).Close() - remoteUserTag := names.NewUserTag("user@remote") - st := s.Factory.MakeEnvironment(c, &factory.EnvParams{ - Name: "user", Owner: remoteUserTag}) - defer st.Close() - st.AddEnvironmentUser(admin.UserTag(), remoteUserTag, "Foo Bar") - - s.Factory.MakeEnvironment(c, &factory.EnvParams{ - Name: "no-access", Owner: remoteUserTag}).Close() - - response, err := s.systemManager.AllEnvironments() - c.Assert(err, jc.ErrorIsNil) - // The results are sorted. - expected := []string{"dummyenv", "no-access", "owned", "user"} - var obtained []string - for _, env := range response.UserEnvironments { - obtained = append(obtained, env.Name) - stateEnv, err := s.State.GetEnvironment(names.NewEnvironTag(env.UUID)) - c.Assert(err, jc.ErrorIsNil) - s.checkEnvironmentMatches(c, env.Environment, stateEnv) - } - c.Assert(obtained, jc.DeepEquals, expected) -} - -func (s *systemManagerSuite) TestListBlockedEnvironments(c *gc.C) { - st := s.Factory.MakeEnvironment(c, &factory.EnvParams{ - Name: "test"}) - defer st.Close() - - s.State.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyEnvironment") - s.State.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") - st.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyEnvironment") - st.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") - - list, err := s.systemManager.ListBlockedEnvironments() - c.Assert(err, jc.ErrorIsNil) - - c.Assert(list.Environments, jc.DeepEquals, []params.EnvironmentBlockInfo{ - params.EnvironmentBlockInfo{ - Name: "dummyenv", - UUID: s.State.EnvironUUID(), - OwnerTag: s.AdminUserTag(c).String(), - Blocks: []string{ - "BlockDestroy", - "BlockChange", - }, - }, - params.EnvironmentBlockInfo{ - Name: "test", - UUID: st.EnvironUUID(), - OwnerTag: s.AdminUserTag(c).String(), - Blocks: []string{ - "BlockDestroy", - "BlockChange", - }, - }, - }) - -} - -func (s *systemManagerSuite) TestListBlockedEnvironmentsNoBlocks(c *gc.C) { - list, err := s.systemManager.ListBlockedEnvironments() - c.Assert(err, jc.ErrorIsNil) - c.Assert(list.Environments, gc.HasLen, 0) -} - -func (s *systemManagerSuite) TestEnvironmentConfig(c *gc.C) { - env, err := s.systemManager.EnvironmentConfig() - c.Assert(err, jc.ErrorIsNil) - c.Assert(env.Config["name"], gc.Equals, "dummyenv") -} - -func (s *systemManagerSuite) TestEnvironmentConfigFromNonStateServer(c *gc.C) { - st := s.Factory.MakeEnvironment(c, &factory.EnvParams{ - Name: "test"}) - defer st.Close() - - authorizer := &apiservertesting.FakeAuthorizer{Tag: s.AdminUserTag(c)} - systemManager, err := systemmanager.NewSystemManagerAPI(st, common.NewResources(), authorizer) - c.Assert(err, jc.ErrorIsNil) - env, err := systemManager.EnvironmentConfig() - c.Assert(err, jc.ErrorIsNil) - c.Assert(env.Config["name"], gc.Equals, "dummyenv") -} - -func (s *systemManagerSuite) TestRemoveBlocks(c *gc.C) { - st := s.Factory.MakeEnvironment(c, &factory.EnvParams{ - Name: "test"}) - defer st.Close() - - s.State.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyEnvironment") - s.State.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") - st.SwitchBlockOn(state.DestroyBlock, "TestBlockDestroyEnvironment") - st.SwitchBlockOn(state.ChangeBlock, "TestChangeBlock") - - err := s.systemManager.RemoveBlocks(params.RemoveBlocksArgs{All: true}) - c.Assert(err, jc.ErrorIsNil) - - blocks, err := s.State.AllBlocksForSystem() - c.Assert(err, jc.ErrorIsNil) - c.Assert(blocks, gc.HasLen, 0) -} - -func (s *systemManagerSuite) TestRemoveBlocksNotAll(c *gc.C) { - err := s.systemManager.RemoveBlocks(params.RemoveBlocksArgs{}) - c.Assert(err, gc.ErrorMatches, "not supported") -} - -func (s *systemManagerSuite) TestWatchAllEnvs(c *gc.C) { - watcherId, err := s.systemManager.WatchAllEnvs() - c.Assert(err, jc.ErrorIsNil) - - watcherAPI_, err := apiserver.NewAllWatcher(s.State, s.resources, s.authorizer, watcherId.AllWatcherId) - c.Assert(err, jc.ErrorIsNil) - watcherAPI := watcherAPI_.(*apiserver.SrvAllWatcher) - defer func() { - err := watcherAPI.Stop() - c.Assert(err, jc.ErrorIsNil) - }() - - resultC := make(chan params.AllWatcherNextResults) - go func() { - result, err := watcherAPI.Next() - c.Assert(err, jc.ErrorIsNil) - resultC <- result - }() - - select { - case result := <-resultC: - // Expect to see the initial environment be reported. - deltas := result.Deltas - c.Assert(deltas, gc.HasLen, 1) - envInfo := deltas[0].Entity.(*multiwatcher.EnvironmentInfo) - c.Assert(envInfo.EnvUUID, gc.Equals, s.State.EnvironUUID()) - case <-time.After(testing.LongWait): - c.Fatal("timed out") - } -} === modified file 'src/github.com/juju/juju/apiserver/testing/fakeauthorizer.go' --- src/github.com/juju/juju/apiserver/testing/fakeauthorizer.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/testing/fakeauthorizer.go 2016-03-22 15:18:22 +0000 @@ -17,7 +17,7 @@ return fa.Tag == tag } -func (fa FakeAuthorizer) AuthEnvironManager() bool { +func (fa FakeAuthorizer) AuthModelManager() bool { return fa.EnvironManager } === modified file 'src/github.com/juju/juju/apiserver/testing/fakecharmstore.go' --- src/github.com/juju/juju/apiserver/testing/fakecharmstore.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/testing/fakecharmstore.go 2016-03-22 15:18:22 +0000 @@ -6,15 +6,17 @@ import ( "fmt" "net/http" + "net/http/httptest" - gitjujutesting "github.com/juju/testing" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/charmrepo" - "gopkg.in/juju/charmstore.v4" - "gopkg.in/juju/charmstore.v4/charmstoretesting" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" - "gopkg.in/macaroon-bakery.v0/bakerytest" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient" + "gopkg.in/juju/charmstore.v5-unstable" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/bakerytest" "gopkg.in/mgo.v2" "github.com/juju/juju/apiserver/service" @@ -22,7 +24,7 @@ ) type CharmStoreSuite struct { - gitjujutesting.CleanupSuite + testing.CleanupSuite Session *mgo.Session // DischargeUser holds the identity of the user @@ -31,7 +33,9 @@ DischargeUser string discharger *bakerytest.Discharger - Srv *charmstoretesting.Server + handler charmstore.HTTPCloseHandler + Srv *httptest.Server + Client *csclient.Client } func (s *CharmStoreSuite) SetUpTest(c *gc.C) { @@ -45,32 +49,41 @@ checkers.DeclaredCaveat("username", s.DischargeUser), }, nil }) - s.Srv = charmstoretesting.OpenServer(c, s.Session, charmstore.ServerParams{ + db := s.Session.DB("juju-testing") + params := charmstore.ServerParams{ + AuthUsername: "test-user", + AuthPassword: "test-password", IdentityLocation: s.discharger.Location(), PublicKeyLocator: s.discharger, + } + handler, err := charmstore.NewServer(db, nil, "", params, charmstore.V4) + c.Assert(err, jc.ErrorIsNil) + s.handler = handler + s.Srv = httptest.NewServer(handler) + s.Client = csclient.New(csclient.Params{ + URL: s.Srv.URL, + User: params.AuthUsername, + Password: params.AuthPassword, }) + s.PatchValue(&charmrepo.CacheDir, c.MkDir()) s.PatchValue(&service.NewCharmStore, func(p charmrepo.NewCharmStoreParams) charmrepo.Interface { - p.URL = s.Srv.URL() + p.URL = s.Srv.URL return charmrepo.NewCharmStore(p) }) } func (s *CharmStoreSuite) TearDownTest(c *gc.C) { s.discharger.Close() + s.handler.Close() s.Srv.Close() s.CleanupSuite.TearDownTest(c) } func (s *CharmStoreSuite) UploadCharm(c *gc.C, url, name string) (*charm.URL, charm.Charm) { - id := charm.MustParseReference(url) - promulgated := false - if id.User == "" { - id.User = "who" - promulgated = true - } - ch := testcharms.Repo.CharmArchive(c.MkDir(), name) - id = s.Srv.UploadCharm(c, ch, id, promulgated) - curl := (*charm.URL)(id) - return curl, ch + return testcharms.UploadCharm(c, s.Client, url, name) +} + +func (s *CharmStoreSuite) UploadCharmMultiSeries(c *gc.C, url, name string) (*charm.URL, charm.Charm) { + return testcharms.UploadCharmMultiSeries(c, s.Client, url, name) } === modified file 'src/github.com/juju/juju/apiserver/testing/service.go' --- src/github.com/juju/juju/apiserver/testing/service.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/testing/service.go 2016-03-22 15:18:22 +0000 @@ -6,10 +6,11 @@ import ( jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/constraints" "github.com/juju/juju/state" + coretesting "github.com/juju/juju/testing" ) func AssertPrincipalServiceDeployed(c *gc.C, st *state.State, serviceName string, curl *charm.URL, forced bool, bundle charm.Charm, cons constraints.Value) *state.Service { @@ -35,16 +36,24 @@ serviceCons, err := service.Constraints() c.Assert(err, jc.ErrorIsNil) c.Assert(serviceCons, gc.DeepEquals, cons) - units, err := service.AllUnits() - c.Assert(err, jc.ErrorIsNil) - for _, unit := range units { - mid, err := unit.AssignedMachineId() - c.Assert(err, jc.ErrorIsNil) - machine, err := st.Machine(mid) - c.Assert(err, jc.ErrorIsNil) - machineCons, err := machine.Constraints() - c.Assert(err, jc.ErrorIsNil) - c.Assert(machineCons, gc.DeepEquals, cons) + + for a := coretesting.LongAttempt.Start(); a.Next(); { + units, err := service.AllUnits() + c.Assert(err, jc.ErrorIsNil) + for _, unit := range units { + mid, err := unit.AssignedMachineId() + if !a.HasNext() { + c.Assert(err, jc.ErrorIsNil) + } else if err != nil { + continue + } + machine, err := st.Machine(mid) + c.Assert(err, jc.ErrorIsNil) + machineCons, err := machine.Constraints() + c.Assert(err, jc.ErrorIsNil) + c.Assert(machineCons, gc.DeepEquals, cons) + } + break } return service } === modified file 'src/github.com/juju/juju/apiserver/testing/stub_network.go' --- src/github.com/juju/juju/apiserver/testing/stub_network.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/testing/stub_network.go 2016-03-22 15:18:22 +0000 @@ -6,9 +6,7 @@ "strconv" "strings" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/common/networkingcommon" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" @@ -19,6 +17,7 @@ "github.com/juju/testing" "github.com/juju/utils" "github.com/juju/utils/set" + gc "gopkg.in/check.v1" ) type StubNetwork struct { @@ -45,6 +44,13 @@ ) func (s StubNetwork) SetUpSuite(c *gc.C) { + providers := environs.RegisteredProviders() + for _, name := range providers { + if name == StubProviderType { + return + } + } + ProviderInstance.Zones = []providercommon.AvailabilityZone{ &FakeZone{"zone1", true}, &FakeZone{"zone2", false}, @@ -108,7 +114,7 @@ type errReturner func() error -// FakeSpace implements common.BackingSpace for testing. +// FakeSpace implements networkingcommon.BackingSpace for testing. type FakeSpace struct { SpaceName string SubnetIds []string @@ -116,21 +122,21 @@ NextErr errReturner } -var _ common.BackingSpace = (*FakeSpace)(nil) +var _ networkingcommon.BackingSpace = (*FakeSpace)(nil) func (f *FakeSpace) Name() string { return f.SpaceName } -func (f *FakeSpace) Subnets() (bs []common.BackingSubnet, err error) { - outputSubnets := []common.BackingSubnet{} +func (f *FakeSpace) Subnets() (bs []networkingcommon.BackingSubnet, err error) { + outputSubnets := []networkingcommon.BackingSubnet{} if err = f.NextErr(); err != nil { return outputSubnets, err } for _, subnetId := range f.SubnetIds { - providerId := "provider-" + subnetId + providerId := network.Id("provider-" + subnetId) // Pick the third element of the IP address and use this to // decide how we construct the Subnet. It provides variation of @@ -148,7 +154,7 @@ status = "" } - backing := common.BackingSubnetInfo{ + backing := networkingcommon.BackingSubnetInfo{ CIDR: subnetId, SpaceName: f.SpaceName, ProviderId: providerId, @@ -279,12 +285,12 @@ return fmt.Sprintf("&FakeZone{%q, %v}", f.ZoneName, f.ZoneAvailable) } -// FakeSubnet implements common.BackingSubnet for testing. +// FakeSubnet implements networkingcommon.BackingSubnet for testing. type FakeSubnet struct { - info common.BackingSubnetInfo + info networkingcommon.BackingSubnetInfo } -var _ common.BackingSubnet = (*FakeSubnet)(nil) +var _ networkingcommon.BackingSubnet = (*FakeSubnet)(nil) // GoString implements fmt.GoStringer. func (f *FakeSubnet) GoString() string { @@ -303,7 +309,7 @@ return f.info.AvailabilityZones } -func (f *FakeSubnet) ProviderId() string { +func (f *FakeSubnet) ProviderId() network.Id { return f.info.ProviderId } @@ -324,7 +330,7 @@ *stub = testing.Stub{} } -// StubBacking implements common.NetworkBacking and records calls to its +// StubBacking implements networkingcommon.NetworkBacking and records calls to its // methods. type StubBacking struct { *testing.Stub @@ -332,11 +338,11 @@ EnvConfig *config.Config Zones []providercommon.AvailabilityZone - Spaces []common.BackingSpace - Subnets []common.BackingSubnet + Spaces []networkingcommon.BackingSpace + Subnets []networkingcommon.BackingSubnet } -var _ common.NetworkBacking = (*StubBacking)(nil) +var _ networkingcommon.NetworkBacking = (*StubBacking)(nil) type SetUpFlag bool @@ -361,17 +367,17 @@ "type": StubProviderType, "name": envName, } - sb.EnvConfig = coretesting.CustomEnvironConfig(c, extraAttrs) + sb.EnvConfig = coretesting.CustomModelConfig(c, extraAttrs) sb.Zones = []providercommon.AvailabilityZone{} if withZones { sb.Zones = make([]providercommon.AvailabilityZone, len(ProviderInstance.Zones)) copy(sb.Zones, ProviderInstance.Zones) } - sb.Spaces = []common.BackingSpace{} + sb.Spaces = []networkingcommon.BackingSpace{} if withSpaces { // Note that full subnet data is generated from the SubnetIds in // FakeSpace.Subnets(). - sb.Spaces = []common.BackingSpace{ + sb.Spaces = []networkingcommon.BackingSpace{ &FakeSpace{ SpaceName: "default", SubnetIds: []string{"192.168.0.0/24", "192.168.3.0/24"}, @@ -390,32 +396,32 @@ NextErr: sb.NextErr}, // duplicates are ignored when caching spaces. } } - sb.Subnets = []common.BackingSubnet{} + sb.Subnets = []networkingcommon.BackingSubnet{} if withSubnets { - info0 := common.BackingSubnetInfo{ + info0 := networkingcommon.BackingSubnetInfo{ CIDR: ProviderInstance.Subnets[0].CIDR, - ProviderId: string(ProviderInstance.Subnets[0].ProviderId), + ProviderId: ProviderInstance.Subnets[0].ProviderId, AllocatableIPLow: ProviderInstance.Subnets[0].AllocatableIPLow.String(), AllocatableIPHigh: ProviderInstance.Subnets[0].AllocatableIPHigh.String(), AvailabilityZones: ProviderInstance.Subnets[0].AvailabilityZones, SpaceName: "private", } - info1 := common.BackingSubnetInfo{ + info1 := networkingcommon.BackingSubnetInfo{ CIDR: ProviderInstance.Subnets[1].CIDR, - ProviderId: string(ProviderInstance.Subnets[1].ProviderId), + ProviderId: ProviderInstance.Subnets[1].ProviderId, AvailabilityZones: ProviderInstance.Subnets[1].AvailabilityZones, SpaceName: "dmz", } - sb.Subnets = []common.BackingSubnet{ + sb.Subnets = []networkingcommon.BackingSubnet{ &FakeSubnet{info0}, &FakeSubnet{info1}, } } } -func (sb *StubBacking) EnvironConfig() (*config.Config, error) { - sb.MethodCall(sb, "EnvironConfig") +func (sb *StubBacking) ModelConfig() (*config.Config, error) { + sb.MethodCall(sb, "ModelConfig") if err := sb.NextErr(); err != nil { return nil, err } @@ -435,7 +441,7 @@ return sb.NextErr() } -func (sb *StubBacking) AllSpaces() ([]common.BackingSpace, error) { +func (sb *StubBacking) AllSpaces() ([]networkingcommon.BackingSpace, error) { sb.MethodCall(sb, "AllSpaces") if err := sb.NextErr(); err != nil { return nil, err @@ -443,7 +449,7 @@ // Filter duplicates. seen := set.Strings{} - output := []common.BackingSpace{} + output := []networkingcommon.BackingSpace{} for _, space := range sb.Spaces { if seen.Contains(space.Name()) { continue @@ -454,7 +460,7 @@ return output, nil } -func (sb *StubBacking) AllSubnets() ([]common.BackingSubnet, error) { +func (sb *StubBacking) AllSubnets() ([]networkingcommon.BackingSubnet, error) { sb.MethodCall(sb, "AllSubnets") if err := sb.NextErr(); err != nil { return nil, err @@ -462,7 +468,7 @@ // Filter duplicates. seen := set.Strings{} - output := []common.BackingSubnet{} + output := []networkingcommon.BackingSubnet{} for _, subnet := range sb.Subnets { if seen.Contains(subnet.CIDR()) { continue @@ -473,7 +479,7 @@ return output, nil } -func (sb *StubBacking) AddSubnet(subnetInfo common.BackingSubnetInfo) (common.BackingSubnet, error) { +func (sb *StubBacking) AddSubnet(subnetInfo networkingcommon.BackingSubnetInfo) (networkingcommon.BackingSubnet, error) { sb.MethodCall(sb, "AddSubnet", subnetInfo) if err := sb.NextErr(); err != nil { return nil, err @@ -483,8 +489,8 @@ return fs, nil } -func (sb *StubBacking) AddSpace(name string, subnets []string, public bool) error { - sb.MethodCall(sb, "AddSpace", name, subnets, public) +func (sb *StubBacking) AddSpace(name string, providerId network.Id, subnets []string, public bool) error { + sb.MethodCall(sb, "AddSpace", name, providerId, subnets, public) if err := sb.NextErr(); err != nil { return err } @@ -526,7 +532,7 @@ case StubZonedNetworkingEnvironName: return ZonedNetworkingEnvironInstance, nil } - panic("unexpected environment name: " + cfg.Name()) + panic("unexpected model name: " + cfg.Name()) } // GoString implements fmt.GoStringer. @@ -589,6 +595,14 @@ return ProviderInstance.Subnets, nil } +func (se *StubNetworkingEnviron) SupportsSpaces() (bool, error) { + se.MethodCall(se, "SupportsSpaces") + if err := se.NextErr(); err != nil { + return false, err + } + return true, nil +} + // GoString implements fmt.GoStringer. func (se *StubNetworkingEnviron) GoString() string { return "&StubNetworkingEnviron{}" === modified file 'src/github.com/juju/juju/apiserver/tools.go' --- src/github.com/juju/juju/apiserver/tools.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/tools.go 2016-03-22 15:18:22 +0000 @@ -6,7 +6,6 @@ import ( "bytes" "crypto/sha256" - "encoding/json" "fmt" "io" "io/ioutil" @@ -18,7 +17,6 @@ "github.com/juju/utils" "github.com/juju/juju/apiserver/common" - apihttp "github.com/juju/juju/apiserver/http" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/environs" envtools "github.com/juju/juju/environs/tools" @@ -28,96 +26,57 @@ "github.com/juju/juju/version" ) -// toolsHandler is the base type for uploading and downloading -// tools over HTTPS via the API server. -type toolsHandler struct { - httpHandler -} - // toolsHandler handles tool upload through HTTPS in the API server. type toolsUploadHandler struct { - toolsHandler + ctxt httpContext } // toolsHandler handles tool download through HTTPS in the API server. type toolsDownloadHandler struct { - toolsHandler + ctxt httpContext } func (h *toolsDownloadHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - stateWrapper, err := h.validateEnvironUUID(r) + st, err := h.ctxt.stateForRequestUnauthenticated(r) if err != nil { - h.sendExistingError(w, http.StatusNotFound, err) + sendError(w, err) return } switch r.Method { case "GET": - tarball, err := h.processGet(r, stateWrapper.state) + tarball, err := h.processGet(r, st) if err != nil { logger.Errorf("GET(%s) failed: %v", r.URL, err) - h.sendExistingError(w, http.StatusBadRequest, err) + sendError(w, errors.NewBadRequest(err, "")) return } h.sendTools(w, http.StatusOK, tarball) default: - h.sendError(w, http.StatusMethodNotAllowed, fmt.Sprintf("unsupported method: %q", r.Method)) + sendError(w, errors.MethodNotAllowedf("unsupported method: %q", r.Method)) } } func (h *toolsUploadHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Validate before authenticate because the authentication is dependent // on the state connection that is determined during the validation. - stateWrapper, err := h.validateEnvironUUID(r) + st, _, err := h.ctxt.stateForRequestAuthenticatedUser(r) if err != nil { - h.sendExistingError(w, http.StatusNotFound, err) - return - } - - if err := stateWrapper.authenticateUser(r); err != nil { - h.authError(w, h) + sendError(w, err) return } switch r.Method { case "POST": // Add tools to storage. - agentTools, err := h.processPost(r, stateWrapper.state) + agentTools, err := h.processPost(r, st) if err != nil { - h.sendExistingError(w, http.StatusBadRequest, err) + sendError(w, err) return } - h.sendJSON(w, http.StatusOK, ¶ms.ToolsResult{Tools: agentTools}) + sendStatusAndJSON(w, http.StatusOK, ¶ms.ToolsResult{Tools: agentTools}) default: - h.sendError(w, http.StatusMethodNotAllowed, fmt.Sprintf("unsupported method: %q", r.Method)) - } -} - -// sendJSON sends a JSON-encoded response to the client. -func (h *toolsHandler) sendJSON(w http.ResponseWriter, statusCode int, response *params.ToolsResult) error { - w.Header().Set("Content-Type", apihttp.CTypeJSON) - w.WriteHeader(statusCode) - body, err := json.Marshal(response) - if err != nil { - return err - } - w.Write(body) - return nil -} - -// sendError sends a JSON-encoded error response using desired -// error message. -func (h *toolsHandler) sendError(w http.ResponseWriter, statusCode int, message string) { - h.sendExistingError(w, statusCode, errors.New(message)) -} - -// sendExistingError sends a JSON-encoded error response -// for errors encountered during processing. -func (h *toolsHandler) sendExistingError(w http.ResponseWriter, statusCode int, existing error) { - logger.Debugf("sending error: %v %v", statusCode, existing) - err := common.ServerError(existing) - if err := h.sendJSON(w, statusCode, ¶ms.ToolsResult{Error: err}); err != nil { - logger.Errorf("failed to send error: %v", err) + sendError(w, errors.MethodNotAllowedf("unsupported method: %q", r.Method)) } } @@ -158,7 +117,7 @@ // in simplestreams and GETting it, caching the result in toolstorage before returning // to the caller. func (h *toolsDownloadHandler) fetchAndCacheTools(v version.Binary, stor toolstorage.Storage, st *state.State) (io.ReadCloser, error) { - envcfg, err := st.EnvironConfig() + envcfg, err := st.ModelConfig() if err != nil { return nil, err } @@ -214,7 +173,7 @@ w.Header().Set("Content-Length", fmt.Sprint(len(tarball))) w.WriteHeader(statusCode) if _, err := w.Write(tarball); err != nil { - h.sendExistingError(w, http.StatusBadRequest, errors.Annotatef(err, "failed to write tools")) + sendError(w, errors.NewBadRequest(errors.Annotatef(err, "failed to write tools"), "")) return } } @@ -225,23 +184,23 @@ binaryVersionParam := query.Get("binaryVersion") if binaryVersionParam == "" { - return nil, errors.New("expected binaryVersion argument") + return nil, errors.BadRequestf("expected binaryVersion argument") } toolsVersion, err := version.ParseBinary(binaryVersionParam) if err != nil { - return nil, errors.Annotatef(err, "invalid tools version %q", binaryVersionParam) + return nil, errors.NewBadRequest(err, fmt.Sprintf("invalid tools version %q", binaryVersionParam)) } // Make sure the content type is x-tar-gz. contentType := r.Header.Get("Content-Type") if contentType != "application/x-tar-gz" { - return nil, errors.Errorf("expected Content-Type: application/x-tar-gz, got: %v", contentType) + return nil, errors.BadRequestf("expected Content-Type: application/x-tar-gz, got: %v", contentType) } // Get the server root, so we know how to form the URL in the Tools returned. serverRoot, err := h.getServerRoot(r, query, st) if err != nil { - return nil, errors.Annotate(err, "cannot to determine server root") + return nil, errors.NewBadRequest(err, "cannot to determine server root") } // We'll clone the tools for each additional series specified. @@ -264,15 +223,15 @@ } func (h *toolsUploadHandler) getServerRoot(r *http.Request, query url.Values, st *state.State) (string, error) { - uuid := query.Get(":envuuid") + uuid := query.Get(":modeluuid") if uuid == "" { - env, err := st.Environment() + env, err := st.Model() if err != nil { return "", err } uuid = env.UUID() } - return fmt.Sprintf("https://%s/environment/%s", r.Host, uuid), nil + return fmt.Sprintf("https://%s/model/%s", r.Host, uuid), nil } // handleUpload uploads the tools data from the reader to env storage as the specified version. @@ -294,7 +253,7 @@ return nil, err } if len(data) == 0 { - return nil, errors.New("no tools uploaded") + return nil, errors.BadRequestf("no tools uploaded") } // TODO(wallyworld): check integrity of tools tarball. === modified file 'src/github.com/juju/juju/apiserver/tools_test.go' --- src/github.com/juju/juju/apiserver/tools_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/tools_test.go 2016-03-22 15:18:22 +0000 @@ -16,15 +16,15 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" "github.com/juju/utils" + "github.com/juju/utils/arch" + "github.com/juju/utils/series" gc "gopkg.in/check.v1" commontesting "github.com/juju/juju/apiserver/common/testing" - apihttp "github.com/juju/juju/apiserver/http" "github.com/juju/juju/apiserver/params" envtesting "github.com/juju/juju/environs/testing" envtools "github.com/juju/juju/environs/tools" toolstesting "github.com/juju/juju/environs/tools/testing" - "github.com/juju/juju/state" "github.com/juju/juju/state/toolstorage" "github.com/juju/juju/testing" @@ -32,20 +32,71 @@ "github.com/juju/juju/version" ) +// charmsCommonSuite wraps authHttpSuite and adds +// some helper methods suitable for working with the +// tools endpoint. +type toolsCommonSuite struct { + authHttpSuite +} + +func (s *toolsCommonSuite) toolsURL(c *gc.C, query string) *url.URL { + uri := s.baseURL(c) + uri.Path = fmt.Sprintf("/model/%s/tools", s.modelUUID) + uri.RawQuery = query + return uri +} + +func (s *toolsCommonSuite) toolsURI(c *gc.C, query string) string { + if query != "" && query[0] == '?' { + query = query[1:] + } + return s.toolsURL(c, query).String() +} + +func (s *toolsCommonSuite) downloadRequest(c *gc.C, version version.Binary, uuid string) *http.Response { + url := s.toolsURL(c, "") + if uuid == "" { + url.Path = fmt.Sprintf("/tools/%s", version) + } else { + url.Path = fmt.Sprintf("/model/%s/tools/%s", uuid, version) + } + return s.sendRequest(c, httpRequestParams{method: "GET", url: url.String()}) +} + +func (s *toolsCommonSuite) assertUploadResponse(c *gc.C, resp *http.Response, agentTools *coretools.Tools) { + toolsResponse := s.assertResponse(c, resp, http.StatusOK) + c.Check(toolsResponse.Error, gc.IsNil) + c.Check(toolsResponse.Tools, gc.DeepEquals, agentTools) +} + +func (s *toolsCommonSuite) assertGetFileResponse(c *gc.C, resp *http.Response, expBody, expContentType string) { + body := assertResponse(c, resp, http.StatusOK, expContentType) + c.Check(string(body), gc.Equals, expBody) +} + +func (s *toolsCommonSuite) assertErrorResponse(c *gc.C, resp *http.Response, expCode int, expError string) { + toolsResponse := s.assertResponse(c, resp, expCode) + c.Assert(toolsResponse.Error, gc.NotNil) + c.Assert(toolsResponse.Error.Message, gc.Matches, expError) +} + +func (s *toolsCommonSuite) assertResponse(c *gc.C, resp *http.Response, expStatus int) params.ToolsResult { + body := assertResponse(c, resp, expStatus, params.ContentTypeJSON) + var toolsResponse params.ToolsResult + err := json.Unmarshal(body, &toolsResponse) + c.Assert(err, jc.ErrorIsNil, gc.Commentf("body: %s", body)) + return toolsResponse +} + type toolsSuite struct { - userAuthHttpSuite + toolsCommonSuite commontesting.BlockHelper } var _ = gc.Suite(&toolsSuite{}) -func (s *toolsSuite) SetUpSuite(c *gc.C) { - s.userAuthHttpSuite.SetUpSuite(c) - s.archiveContentType = "application/x-tar-gz" -} - func (s *toolsSuite) SetUpTest(c *gc.C) { - s.userAuthHttpSuite.SetUpTest(c) + s.toolsCommonSuite.SetUpTest(c) s.BlockHelper = commontesting.NewBlockHelper(s.APIState) s.AddCleanup(func(*gc.C) { s.BlockHelper.Close() }) } @@ -53,19 +104,20 @@ func (s *toolsSuite) TestToolsUploadedSecurely(c *gc.C) { info := s.APIInfo(c) uri := "http://" + info.Addrs[0] + "/tools" - _, err := s.sendRequest(c, "", "", "PUT", uri, "", nil) - c.Assert(err, gc.ErrorMatches, `.*malformed HTTP response.*`) + s.sendRequest(c, httpRequestParams{ + method: "PUT", + url: uri, + expectError: `.*malformed HTTP response.*`, + }) } func (s *toolsSuite) TestRequiresAuth(c *gc.C) { - resp, err := s.sendRequest(c, "", "", "GET", s.toolsURI(c, ""), "", nil) - c.Assert(err, jc.ErrorIsNil) - s.assertErrorResponse(c, resp, http.StatusUnauthorized, "unauthorized") + resp := s.sendRequest(c, httpRequestParams{method: "GET", url: s.toolsURI(c, "")}) + s.assertErrorResponse(c, resp, http.StatusUnauthorized, "no credentials provided") } func (s *toolsSuite) TestRequiresPOST(c *gc.C) { - resp, err := s.authRequest(c, "PUT", s.toolsURI(c, ""), "", nil) - c.Assert(err, jc.ErrorIsNil) + resp := s.authRequest(c, httpRequestParams{method: "PUT", url: s.toolsURI(c, "")}) s.assertErrorResponse(c, resp, http.StatusMethodNotAllowed, `unsupported method: "PUT"`) } @@ -80,19 +132,16 @@ err = machine.SetPassword(password) c.Assert(err, jc.ErrorIsNil) - resp, err := s.sendRequest(c, machine.Tag().String(), password, "POST", s.toolsURI(c, ""), "", nil) - c.Assert(err, jc.ErrorIsNil) - s.assertErrorResponse(c, resp, http.StatusUnauthorized, "unauthorized") + resp := s.sendRequest(c, httpRequestParams{tag: machine.Tag().String(), password: password, method: "POST", url: s.toolsURI(c, "")}) + s.assertErrorResponse(c, resp, http.StatusUnauthorized, "machine 0 not provisioned") // Now try a user login. - resp, err = s.authRequest(c, "POST", s.toolsURI(c, ""), "", nil) - c.Assert(err, jc.ErrorIsNil) + resp = s.authRequest(c, httpRequestParams{method: "POST", url: s.toolsURI(c, "")}) s.assertErrorResponse(c, resp, http.StatusBadRequest, "expected binaryVersion argument") } func (s *toolsSuite) TestUploadRequiresVersion(c *gc.C) { - resp, err := s.authRequest(c, "POST", s.toolsURI(c, ""), "", nil) - c.Assert(err, jc.ErrorIsNil) + resp := s.authRequest(c, httpRequestParams{method: "POST", url: s.toolsURI(c, "")}) s.assertErrorResponse(c, resp, http.StatusBadRequest, "expected binaryVersion argument") } @@ -101,8 +150,7 @@ tempFile, err := ioutil.TempFile(c.MkDir(), "tools") c.Assert(err, jc.ErrorIsNil) - resp, err := s.uploadRequest(c, s.toolsURI(c, "?binaryVersion=1.18.0-quantal-amd64"), true, tempFile.Name()) - c.Assert(err, jc.ErrorIsNil) + resp := s.uploadRequest(c, s.toolsURI(c, "?binaryVersion=1.18.0-quantal-amd64"), "application/x-tar-gz", tempFile.Name()) s.assertErrorResponse(c, resp, http.StatusBadRequest, "no tools uploaded") } @@ -112,8 +160,7 @@ c.Assert(err, jc.ErrorIsNil) // Now try with the default Content-Type. - resp, err := s.uploadRequest(c, s.toolsURI(c, "?binaryVersion=1.18.0-quantal-amd64"), false, tempFile.Name()) - c.Assert(err, jc.ErrorIsNil) + resp := s.uploadRequest(c, s.toolsURI(c, "?binaryVersion=1.18.0-quantal-amd64"), "application/octet-stream", tempFile.Name()) s.assertErrorResponse( c, resp, http.StatusBadRequest, "expected Content-Type: application/x-tar-gz, got: application/octet-stream") } @@ -131,12 +178,11 @@ // Make some fake tools. expectedTools, vers, toolPath := s.setupToolsForUpload(c) // Now try uploading them. - resp, err := s.uploadRequest( - c, s.toolsURI(c, "?binaryVersion="+vers.String()), true, toolPath) - c.Assert(err, jc.ErrorIsNil) + resp := s.uploadRequest( + c, s.toolsURI(c, "?binaryVersion="+vers.String()), "application/x-tar-gz", toolPath) // Check the response. - expectedTools[0].URL = fmt.Sprintf("%s/environment/%s/tools/%s", s.baseURL(c), s.State.EnvironUUID(), vers) + expectedTools[0].URL = fmt.Sprintf("%s/model/%s/tools/%s", s.baseURL(c), s.State.ModelUUID(), vers) s.assertUploadResponse(c, resp, expectedTools[0]) // Check the contents. @@ -154,11 +200,10 @@ // Block all changes. s.BlockAllChanges(c, "TestUpload") // Now try uploading them. - resp, err := s.uploadRequest( - c, s.toolsURI(c, "?binaryVersion="+vers.String()), true, toolPath) - c.Assert(err, jc.ErrorIsNil) - problem := s.assertErrorResponse(c, resp, http.StatusBadRequest, "TestUpload") - s.AssertBlocked(c, problem, "TestUpload") + resp := s.uploadRequest( + c, s.toolsURI(c, "?binaryVersion="+vers.String()), "application/x-tar-gz", toolPath) + toolsResponse := s.assertResponse(c, resp, http.StatusBadRequest) + s.AssertBlocked(c, toolsResponse.Error, "TestUpload") // Check the contents. storage, err := s.State.ToolsStorage() @@ -174,45 +219,41 @@ expectedTools, vers, toolPath := s.setupToolsForUpload(c) url := s.toolsURL(c, "binaryVersion="+vers.String()) url.Path = "/tools" - resp, err := s.uploadRequest(c, url.String(), true, toolPath) - c.Assert(err, jc.ErrorIsNil) - // Check the response. - expectedTools[0].URL = fmt.Sprintf("%s/environment/%s/tools/%s", s.baseURL(c), s.State.EnvironUUID(), vers) - s.assertUploadResponse(c, resp, expectedTools[0]) -} - -func (s *toolsSuite) TestUploadAllowsEnvUUIDPath(c *gc.C) { - // Check that we can upload tools to https://host:port/ENVUUID/tools - expectedTools, vers, toolPath := s.setupToolsForUpload(c) - url := s.toolsURL(c, "binaryVersion="+vers.String()) - url.Path = fmt.Sprintf("/environment/%s/tools", s.State.EnvironUUID()) - resp, err := s.uploadRequest(c, url.String(), true, toolPath) - c.Assert(err, jc.ErrorIsNil) - // Check the response. - expectedTools[0].URL = fmt.Sprintf("%s/environment/%s/tools/%s", s.baseURL(c), s.State.EnvironUUID(), vers) - s.assertUploadResponse(c, resp, expectedTools[0]) -} - -func (s *toolsSuite) TestUploadAllowsOtherEnvUUIDPath(c *gc.C) { - envState := s.setupOtherEnvironment(c) - // Check that we can upload tools to https://host:port/ENVUUID/tools - expectedTools, vers, toolPath := s.setupToolsForUpload(c) - url := s.toolsURL(c, "binaryVersion="+vers.String()) - url.Path = fmt.Sprintf("/environment/%s/tools", envState.EnvironUUID()) - resp, err := s.uploadRequest(c, url.String(), true, toolPath) - c.Assert(err, jc.ErrorIsNil) - // Check the response. - expectedTools[0].URL = fmt.Sprintf("%s/environment/%s/tools/%s", s.baseURL(c), envState.EnvironUUID(), vers) - s.assertUploadResponse(c, resp, expectedTools[0]) -} - -func (s *toolsSuite) TestUploadRejectsWrongEnvUUIDPath(c *gc.C) { - // Check that we cannot access the tools at https://host:port/BADENVUUID/tools + resp := s.uploadRequest(c, url.String(), "application/x-tar-gz", toolPath) + // Check the response. + expectedTools[0].URL = fmt.Sprintf("%s/model/%s/tools/%s", s.baseURL(c), s.State.ModelUUID(), vers) + s.assertUploadResponse(c, resp, expectedTools[0]) +} + +func (s *toolsSuite) TestUploadAllowsModelUUIDPath(c *gc.C) { + // Check that we can upload tools to https://host:port/ModelUUID/tools + expectedTools, vers, toolPath := s.setupToolsForUpload(c) + url := s.toolsURL(c, "binaryVersion="+vers.String()) + url.Path = fmt.Sprintf("/model/%s/tools", s.State.ModelUUID()) + resp := s.uploadRequest(c, url.String(), "application/x-tar-gz", toolPath) + // Check the response. + expectedTools[0].URL = fmt.Sprintf("%s/model/%s/tools/%s", s.baseURL(c), s.State.ModelUUID(), vers) + s.assertUploadResponse(c, resp, expectedTools[0]) +} + +func (s *toolsSuite) TestUploadAllowsOtherModelUUIDPath(c *gc.C) { + envState := s.setupOtherModel(c) + // Check that we can upload tools to https://host:port/ModelUUID/tools + expectedTools, vers, toolPath := s.setupToolsForUpload(c) + url := s.toolsURL(c, "binaryVersion="+vers.String()) + url.Path = fmt.Sprintf("/model/%s/tools", envState.ModelUUID()) + resp := s.uploadRequest(c, url.String(), "application/x-tar-gz", toolPath) + // Check the response. + expectedTools[0].URL = fmt.Sprintf("%s/model/%s/tools/%s", s.baseURL(c), envState.ModelUUID(), vers) + s.assertUploadResponse(c, resp, expectedTools[0]) +} + +func (s *toolsSuite) TestUploadRejectsWrongModelUUIDPath(c *gc.C) { + // Check that we cannot access the tools at https://host:port/BADModelUUID/tools url := s.toolsURL(c, "") - url.Path = "/environment/dead-beef-123456/tools" - resp, err := s.authRequest(c, "POST", url.String(), "", nil) - c.Assert(err, jc.ErrorIsNil) - s.assertErrorResponse(c, resp, http.StatusNotFound, `unknown environment: "dead-beef-123456"`) + url.Path = "/model/dead-beef-123456/tools" + resp := s.authRequest(c, httpRequestParams{method: "POST", url: url.String()}) + s.assertErrorResponse(c, resp, http.StatusNotFound, `unknown model: "dead-beef-123456"`) } func (s *toolsSuite) TestUploadSeriesExpanded(c *gc.C) { @@ -221,13 +262,12 @@ // Now try uploading them. The tools will be cloned for // each additional series specified. params := "?binaryVersion=" + vers.String() + "&series=quantal,precise" - resp, err := s.uploadRequest(c, s.toolsURI(c, params), true, toolPath) - c.Assert(err, jc.ErrorIsNil) + resp := s.uploadRequest(c, s.toolsURI(c, params), "application/x-tar-gz", toolPath) c.Assert(resp.StatusCode, gc.Equals, http.StatusOK) // Check the response. info := s.APIInfo(c) - expectedTools[0].URL = fmt.Sprintf("%s/environment/%s/tools/%s", s.baseURL(c), info.EnvironTag.Id(), vers) + expectedTools[0].URL = fmt.Sprintf("%s/model/%s/tools/%s", s.baseURL(c), info.ModelTag.Id(), vers) s.assertUploadResponse(c, resp, expectedTools[0]) // Check the contents. @@ -253,30 +293,42 @@ c.Assert(err, jc.Satisfies, errors.IsNotFound) } -func (s *toolsSuite) TestDownloadEnvUUIDPath(c *gc.C) { +func (s *toolsSuite) TestDownloadModelUUIDPath(c *gc.C) { tools := s.storeFakeTools(c, s.State, "abc", toolstorage.Metadata{ - Version: version.Current, - Size: 3, - SHA256: "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", + Version: version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + }, + Size: 3, + SHA256: "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", }) - s.testDownload(c, tools, s.State.EnvironUUID()) + s.testDownload(c, tools, s.State.ModelUUID()) } -func (s *toolsSuite) TestDownloadOtherEnvUUIDPath(c *gc.C) { - envState := s.setupOtherEnvironment(c) +func (s *toolsSuite) TestDownloadOtherModelUUIDPath(c *gc.C) { + envState := s.setupOtherModel(c) tools := s.storeFakeTools(c, envState, "abc", toolstorage.Metadata{ - Version: version.Current, - Size: 3, - SHA256: "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", + Version: version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + }, + Size: 3, + SHA256: "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", }) - s.testDownload(c, tools, envState.EnvironUUID()) + s.testDownload(c, tools, envState.ModelUUID()) } func (s *toolsSuite) TestDownloadTopLevelPath(c *gc.C) { tools := s.storeFakeTools(c, s.State, "abc", toolstorage.Metadata{ - Version: version.Current, - Size: 3, - SHA256: "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", + Version: version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + }, + Size: 3, + SHA256: "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", }) s.testDownload(c, tools, "") } @@ -299,31 +351,39 @@ func (s *toolsSuite) TestDownloadFetchesAndVerifiesSize(c *gc.C) { // Upload fake tools, then upload over the top so the SHA256 hash does not match. - s.PatchValue(&version.Current.Number, testing.FakeVersionNumber) + s.PatchValue(&version.Current, testing.FakeVersionNumber) stor := s.DefaultToolsStorage envtesting.RemoveTools(c, stor, "released") - tools := envtesting.AssertUploadFakeToolsVersions(c, stor, "released", "released", version.Current)[0] + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + tools := envtesting.AssertUploadFakeToolsVersions(c, stor, "released", "released", current)[0] err := stor.Put(envtools.StorageName(tools.Version, "released"), strings.NewReader("!"), 1) c.Assert(err, jc.ErrorIsNil) - resp, err := s.downloadRequest(c, tools.Version, "") - c.Assert(err, jc.ErrorIsNil) + resp := s.downloadRequest(c, tools.Version, "") s.assertErrorResponse(c, resp, http.StatusBadRequest, "error fetching tools: size mismatch for .*") s.assertToolsNotStored(c, tools.Version) } func (s *toolsSuite) TestDownloadFetchesAndVerifiesHash(c *gc.C) { // Upload fake tools, then upload over the top so the SHA256 hash does not match. - s.PatchValue(&version.Current.Number, testing.FakeVersionNumber) + s.PatchValue(&version.Current, testing.FakeVersionNumber) stor := s.DefaultToolsStorage envtesting.RemoveTools(c, stor, "released") - tools := envtesting.AssertUploadFakeToolsVersions(c, stor, "released", "released", version.Current)[0] + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + tools := envtesting.AssertUploadFakeToolsVersions(c, stor, "released", "released", current)[0] sameSize := strings.Repeat("!", int(tools.Size)) err := stor.Put(envtools.StorageName(tools.Version, "released"), strings.NewReader(sameSize), tools.Size) c.Assert(err, jc.ErrorIsNil) - resp, err := s.downloadRequest(c, tools.Version, "") - c.Assert(err, jc.ErrorIsNil) + resp := s.downloadRequest(c, tools.Version, "") s.assertErrorResponse(c, resp, http.StatusBadRequest, "error fetching tools: hash mismatch for .*") s.assertToolsNotStored(c, tools.Version) } @@ -371,8 +431,7 @@ } func (s *toolsSuite) testDownload(c *gc.C, tools *coretools.Tools, uuid string) []byte { - resp, err := s.downloadRequest(c, tools.Version, uuid) - c.Assert(err, jc.ErrorIsNil) + resp := s.downloadRequest(c, tools.Version, uuid) defer resp.Body.Close() data, err := ioutil.ReadAll(resp.Body) c.Assert(err, jc.ErrorIsNil) @@ -384,58 +443,58 @@ return data } -func (s *toolsSuite) TestDownloadRejectsWrongEnvUUIDPath(c *gc.C) { - resp, err := s.downloadRequest(c, version.Current, "dead-beef-123456") - c.Assert(err, jc.ErrorIsNil) - s.assertErrorResponse(c, resp, http.StatusNotFound, `unknown environment: "dead-beef-123456"`) -} - -func (s *toolsSuite) toolsURL(c *gc.C, query string) *url.URL { - uri := s.baseURL(c) - uri.Path = fmt.Sprintf("/environment/%s/tools", s.envUUID) - uri.RawQuery = query - return uri -} - -func (s *toolsSuite) toolsURI(c *gc.C, query string) string { - if query != "" && query[0] == '?' { - query = query[1:] - } - return s.toolsURL(c, query).String() -} - -func (s *toolsSuite) downloadRequest(c *gc.C, version version.Binary, uuid string) (*http.Response, error) { - url := s.toolsURL(c, "") - if uuid == "" { - url.Path = fmt.Sprintf("/tools/%s", version) - } else { - url.Path = fmt.Sprintf("/environment/%s/tools/%s", uuid, version) - } - return s.sendRequest(c, "", "", "GET", url.String(), "", nil) -} - -func (s *toolsSuite) assertUploadResponse(c *gc.C, resp *http.Response, agentTools *coretools.Tools) { - body := assertResponse(c, resp, http.StatusOK, apihttp.CTypeJSON) - toolsResult := jsonToolsResponse(c, body) - c.Check(toolsResult.Error, gc.IsNil) - c.Check(toolsResult.Tools, gc.DeepEquals, agentTools) -} - -func (s *toolsSuite) assertGetFileResponse(c *gc.C, resp *http.Response, expBody, expContentType string) { - body := assertResponse(c, resp, http.StatusOK, expContentType) - c.Check(string(body), gc.Equals, expBody) -} - -func (s *toolsSuite) assertErrorResponse(c *gc.C, resp *http.Response, expCode int, expError string) error { - body := assertResponse(c, resp, expCode, apihttp.CTypeJSON) - err := jsonToolsResponse(c, body).Error - c.Assert(err, gc.NotNil) - c.Check(err, gc.ErrorMatches, expError) - return err -} - -func jsonToolsResponse(c *gc.C, body []byte) (jsonResponse params.ToolsResult) { - err := json.Unmarshal(body, &jsonResponse) - c.Assert(err, jc.ErrorIsNil) - return +func (s *toolsSuite) TestDownloadRejectsWrongModelUUIDPath(c *gc.C) { + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + resp := s.downloadRequest(c, current, "dead-beef-123456") + s.assertErrorResponse(c, resp, http.StatusNotFound, `unknown model: "dead-beef-123456"`) +} + +type toolsWithMacaroonsSuite struct { + toolsCommonSuite +} + +var _ = gc.Suite(&toolsWithMacaroonsSuite{}) + +func (s *toolsWithMacaroonsSuite) SetUpTest(c *gc.C) { + s.macaroonAuthEnabled = true + s.toolsCommonSuite.SetUpTest(c) +} + +func (s *toolsWithMacaroonsSuite) TestWithNoBasicAuthReturnsDischargeRequiredError(c *gc.C) { + resp := s.sendRequest(c, httpRequestParams{ + method: "POST", + url: s.toolsURI(c, ""), + }) + + charmResponse := s.assertResponse(c, resp, http.StatusUnauthorized) + c.Assert(charmResponse.Error, gc.NotNil) + c.Assert(charmResponse.Error.Message, gc.Equals, "verification failed: no macaroons") + c.Assert(charmResponse.Error.Code, gc.Equals, params.CodeDischargeRequired) + c.Assert(charmResponse.Error.Info, gc.NotNil) + c.Assert(charmResponse.Error.Info.Macaroon, gc.NotNil) +} + +func (s *toolsWithMacaroonsSuite) TestCanPostWithDischargedMacaroon(c *gc.C) { + checkCount := 0 + s.DischargerLogin = func() string { + checkCount++ + return s.userTag.Id() + } + resp := s.sendRequest(c, httpRequestParams{ + do: s.doer(), + method: "POST", + url: s.toolsURI(c, ""), + }) + s.assertErrorResponse(c, resp, http.StatusBadRequest, "expected binaryVersion argument") + c.Assert(checkCount, gc.Equals, 1) +} + +// doer returns a Do function that can make a bakery request +// appropriate for a charms endpoint. +func (s *toolsWithMacaroonsSuite) doer() func(*http.Request) (*http.Response, error) { + return bakeryDo(nil, bakeryGetError) } === added directory 'src/github.com/juju/juju/apiserver/undertaker' === added file 'src/github.com/juju/juju/apiserver/undertaker/export_test.go' --- src/github.com/juju/juju/apiserver/undertaker/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/undertaker/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,6 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package undertaker + +var NewUndertaker = newUndertakerAPI === added file 'src/github.com/juju/juju/apiserver/undertaker/mock_test.go' --- src/github.com/juju/juju/apiserver/undertaker/mock_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/undertaker/mock_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,161 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package undertaker_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/apiserver/undertaker" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/state" +) + +// mockState implements State interface and allows inspection of called +// methods. +type mockState struct { + env *mockModel + removed bool + isSystem bool + machines []undertaker.Machine + services []undertaker.Service +} + +var _ undertaker.State = (*mockState)(nil) + +func newMockState(envOwner names.UserTag, envName string, isSystem bool) *mockState { + machine := &mockMachine{ + watcher: &mockWatcher{ + changes: make(chan struct{}, 1), + }, + } + service := &mockService{ + watcher: &mockWatcher{ + changes: make(chan struct{}, 1), + }, + } + + env := mockModel{ + owner: envOwner, + name: envName, + life: state.Alive, + } + + m := &mockState{ + env: &env, + isSystem: isSystem, + machines: []undertaker.Machine{machine}, + services: []undertaker.Service{service}, + } + return m +} + +func (m *mockState) EnsureModelRemoved() error { + if !m.removed { + return errors.New("found documents for model") + } + return nil +} + +func (m *mockState) RemoveAllModelDocs() error { + if m.env.life != state.Dead { + return errors.New("transaction aborted") + } + m.removed = true + return nil +} + +func (m *mockState) ProcessDyingModel() error { + if m.env.life != state.Dying { + return errors.New("model is not dying") + } + m.env.life = state.Dead + return nil +} + +func (m *mockState) AllMachines() ([]undertaker.Machine, error) { + return m.machines, nil +} + +func (m *mockState) AllServices() ([]undertaker.Service, error) { + return m.services, nil +} + +func (m *mockState) IsController() bool { + return m.isSystem +} + +func (m *mockState) Model() (undertaker.Model, error) { + return m.env, nil +} + +func (m *mockState) ModelConfig() (*config.Config, error) { + return &config.Config{}, nil +} + +// mockModel implements Model interface and allows inspection of called +// methods. +type mockModel struct { + tod time.Time + owner names.UserTag + life state.Life + name string + uuid string +} + +var _ undertaker.Model = (*mockModel)(nil) + +func (m *mockModel) TimeOfDeath() time.Time { + return m.tod +} + +func (m *mockModel) Owner() names.UserTag { + return m.owner +} + +func (m *mockModel) Life() state.Life { + return m.life +} + +func (m *mockModel) Name() string { + return m.name +} + +func (m *mockModel) UUID() string { + return m.uuid +} + +func (m *mockModel) Destroy() error { + m.life = state.Dying + return nil +} + +type mockMachine struct { + watcher state.NotifyWatcher + err error +} + +func (m *mockMachine) Watch() state.NotifyWatcher { + return m.watcher +} + +type mockService struct { + watcher state.NotifyWatcher + err error +} + +func (s *mockService) Watch() state.NotifyWatcher { + return s.watcher +} + +type mockWatcher struct { + state.NotifyWatcher + changes chan struct{} +} + +func (w *mockWatcher) Changes() <-chan struct{} { + return w.changes +} === added file 'src/github.com/juju/juju/apiserver/undertaker/package_test.go' --- src/github.com/juju/juju/apiserver/undertaker/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/undertaker/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package undertaker_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/apiserver/undertaker/state.go' --- src/github.com/juju/juju/apiserver/undertaker/state.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/undertaker/state.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,118 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package undertaker + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/environs/config" + "github.com/juju/juju/state" +) + +// State defines the needed methods of state.State +// for the work of the undertaker API. +type State interface { + + // Model returns the model entity. + Model() (Model, error) + + // IsController returns true if this state instance has the bootstrap + // model UUID. + IsController() bool + + // ProcessDyingModel checks if there are any machines or services left in + // state. If there are none, the model's life is changed from dying to dead. + ProcessDyingModel() (err error) + + // RemoveAllModelDocs removes all documents from multi-environment + // collections. + RemoveAllModelDocs() error + + // AllMachines returns all machines in the model ordered by id. + AllMachines() ([]Machine, error) + + // AllServices returns all deployed services in the model. + AllServices() ([]Service, error) + + // ModelConfig retrieves the model configuration. + ModelConfig() (*config.Config, error) +} + +type stateShim struct { + *state.State +} + +func (s *stateShim) AllMachines() ([]Machine, error) { + stateMachines, err := s.State.AllMachines() + if err != nil { + return nil, errors.Trace(err) + } + + machines := make([]Machine, len(stateMachines)) + for i := range stateMachines { + machines[i] = stateMachines[i] + } + + return machines, nil +} + +// Machine defines the needed methods of state.Machine for +// the work of the undertaker API. +type Machine interface { + // Watch returns a watcher for observing changes to a machine. + Watch() state.NotifyWatcher +} + +func (s *stateShim) AllServices() ([]Service, error) { + stateServices, err := s.State.AllServices() + if err != nil { + return nil, errors.Trace(err) + } + + services := make([]Service, len(stateServices)) + for i := range stateServices { + services[i] = stateServices[i] + } + + return services, nil +} + +// Service defines the needed methods of state.Service for +// the work of the undertaker API. +type Service interface { + // Watch returns a watcher for observing changes to a service. + Watch() state.NotifyWatcher +} + +func (s *stateShim) Model() (Model, error) { + return s.State.Model() +} + +// Model defines the needed methods of state.Model for +// the work of the undertaker API. +type Model interface { + + // TimeOfDeath returns when the model Life was set to Dead. + TimeOfDeath() time.Time + + // Owner returns tag representing the owner of the model. + // The owner is the user that created the model. + Owner() names.UserTag + + // Life returns whether the model is Alive, Dying or Dead. + Life() state.Life + + // Name returns the human friendly name of the model. + Name() string + + // UUID returns the universally unique identifier of the model. + UUID() string + + // Destroy sets the model's lifecycle to Dying, preventing + // addition of services or machines to state. + Destroy() error +} === added file 'src/github.com/juju/juju/apiserver/undertaker/undertaker.go' --- src/github.com/juju/juju/apiserver/undertaker/undertaker.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/undertaker/undertaker.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,135 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package undertaker + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" + "github.com/juju/juju/state/watcher" +) + +func init() { + common.RegisterStandardFacade("Undertaker", 1, NewUndertakerAPI) +} + +// UndertakerAPI implements the API used by the machine undertaker worker. +type UndertakerAPI struct { + st State + resources *common.Resources +} + +// NewUndertakerAPI creates a new instance of the undertaker API. +func NewUndertakerAPI(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*UndertakerAPI, error) { + return newUndertakerAPI(&stateShim{st}, resources, authorizer) +} + +func newUndertakerAPI(st State, resources *common.Resources, authorizer common.Authorizer) (*UndertakerAPI, error) { + if !authorizer.AuthMachineAgent() || !authorizer.AuthModelManager() { + return nil, common.ErrPerm + } + return &UndertakerAPI{ + st: st, + resources: resources, + }, nil +} + +// ModelInfo returns information on the model needed by the undertaker worker. +func (u *UndertakerAPI) ModelInfo() (params.UndertakerModelInfoResult, error) { + result := params.UndertakerModelInfoResult{} + env, err := u.st.Model() + + if err != nil { + return result, errors.Trace(err) + } + tod := env.TimeOfDeath() + + result.Result = params.UndertakerModelInfo{ + UUID: env.UUID(), + GlobalName: env.Owner().String() + "/" + env.Name(), + Name: env.Name(), + IsSystem: u.st.IsController(), + Life: params.Life(env.Life().String()), + TimeOfDeath: &tod, + } + if tod.IsZero() { + result.Result.TimeOfDeath = nil + } + + return result, nil +} + +// ProcessDyingModel checks if a dying environment has any machines or services. +// If there are none, the environment's life is changed from dying to dead. +func (u *UndertakerAPI) ProcessDyingModel() error { + return u.st.ProcessDyingModel() +} + +// RemoveModel removes any records of this model from Juju. +func (u *UndertakerAPI) RemoveModel() error { + err := u.st.RemoveAllModelDocs() + if err != nil { + // TODO(waigani) Return a human friendly error for now. The proper fix + // is to run a buildTxn within state.RemoveAllModelDocs, so we + // can return better errors than "transaction aborted". + return errors.New("an error occurred, unable to remove model") + } + return nil +} + +func (u *UndertakerAPI) environResourceWatcher() params.NotifyWatchResult { + var nothing params.NotifyWatchResult + machines, err := u.st.AllMachines() + if err != nil { + nothing.Error = common.ServerError(err) + return nothing + } + services, err := u.st.AllServices() + if err != nil { + nothing.Error = common.ServerError(err) + return nothing + } + var watchers []state.NotifyWatcher + for _, machine := range machines { + watchers = append(watchers, machine.Watch()) + } + for _, service := range services { + watchers = append(watchers, service.Watch()) + } + + watch := common.NewMultiNotifyWatcher(watchers...) + + if _, ok := <-watch.Changes(); ok { + return params.NotifyWatchResult{ + NotifyWatcherId: u.resources.Register(watch), + } + } + nothing.Error = common.ServerError(watcher.EnsureErr(watch)) + return nothing +} + +// WatchModelResources creates watchers for changes to the lifecycle of an +// model's machines and services. +func (u *UndertakerAPI) WatchModelResources() params.NotifyWatchResults { + return params.NotifyWatchResults{ + Results: []params.NotifyWatchResult{ + u.environResourceWatcher(), + }, + } +} + +// ModelConfig returns the model's configuration. +func (u *UndertakerAPI) ModelConfig() (params.ModelConfigResult, error) { + result := params.ModelConfigResult{} + + config, err := u.st.ModelConfig() + if err != nil { + return result, err + } + allAttrs := config.AllAttrs() + result.Config = allAttrs + return result, nil +} === added file 'src/github.com/juju/juju/apiserver/undertaker/undertaker_test.go' --- src/github.com/juju/juju/apiserver/undertaker/undertaker_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/undertaker/undertaker_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,157 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package undertaker_test + +import ( + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/apiserver/undertaker" + "github.com/juju/juju/state" + coretesting "github.com/juju/juju/testing" +) + +type undertakerSuite struct { + coretesting.BaseSuite +} + +var _ = gc.Suite(&undertakerSuite{}) + +func (s *undertakerSuite) setupStateAndAPI(c *gc.C, isSystem bool, envName string) (*mockState, *undertaker.UndertakerAPI) { + machineNo := "1" + if isSystem { + machineNo = "0" + } + + authorizer := apiservertesting.FakeAuthorizer{ + Tag: names.NewMachineTag(machineNo), + EnvironManager: true, + } + + st := newMockState(names.NewUserTag("admin"), envName, isSystem) + api, err := undertaker.NewUndertaker(st, nil, authorizer) + c.Assert(err, jc.ErrorIsNil) + return st, api +} + +func (s *undertakerSuite) TestNoPerms(c *gc.C) { + for _, authorizer := range []apiservertesting.FakeAuthorizer{ + apiservertesting.FakeAuthorizer{ + Tag: names.NewMachineTag("0"), + }, + apiservertesting.FakeAuthorizer{ + Tag: names.NewUserTag("bob"), + EnvironManager: true, + }, + } { + st := newMockState(names.NewUserTag("admin"), "dummymodel", true) + _, err := undertaker.NewUndertaker( + st, + nil, + authorizer, + ) + c.Assert(err, gc.ErrorMatches, "permission denied") + } +} + +func (s *undertakerSuite) TestEnvironInfo(c *gc.C) { + otherSt, hostedAPI := s.setupStateAndAPI(c, false, "hostedenv") + st, api := s.setupStateAndAPI(c, true, "dummymodel") + for _, test := range []struct { + st *mockState + api *undertaker.UndertakerAPI + isSystem bool + envName string + }{ + {otherSt, hostedAPI, false, "hostedenv"}, + {st, api, true, "dummymodel"}, + } { + env, err := test.st.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Destroy(), jc.ErrorIsNil) + + result, err := test.api.ModelInfo() + c.Assert(err, jc.ErrorIsNil) + + info := result.Result + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Error, gc.IsNil) + + c.Assert(info.UUID, gc.Equals, env.UUID()) + c.Assert(info.GlobalName, gc.Equals, "user-admin/"+test.envName) + c.Assert(info.Name, gc.Equals, test.envName) + c.Assert(info.IsSystem, gc.Equals, test.isSystem) + c.Assert(info.Life, gc.Equals, params.Dying) + c.Assert(info.TimeOfDeath, gc.IsNil) + } +} + +func (s *undertakerSuite) TestProcessDyingEnviron(c *gc.C) { + otherSt, hostedAPI := s.setupStateAndAPI(c, false, "hostedenv") + env, err := otherSt.Model() + c.Assert(err, jc.ErrorIsNil) + + err = hostedAPI.ProcessDyingModel() + c.Assert(err, gc.ErrorMatches, "model is not dying") + c.Assert(env.Life(), gc.Equals, state.Alive) + + err = env.Destroy() + c.Assert(err, jc.ErrorIsNil) + + c.Assert(env.Life(), gc.Equals, state.Dying) + + err = hostedAPI.ProcessDyingModel() + c.Assert(err, gc.IsNil) + c.Assert(env.Life(), gc.Equals, state.Dead) +} + +func (s *undertakerSuite) TestRemoveAliveEnviron(c *gc.C) { + otherSt, hostedAPI := s.setupStateAndAPI(c, false, "hostedenv") + _, err := otherSt.Model() + c.Assert(err, jc.ErrorIsNil) + + err = hostedAPI.RemoveModel() + c.Assert(err, gc.ErrorMatches, "an error occurred, unable to remove model") +} + +func (s *undertakerSuite) TestRemoveDyingEnviron(c *gc.C) { + otherSt, hostedAPI := s.setupStateAndAPI(c, false, "hostedenv") + env, err := otherSt.Model() + c.Assert(err, jc.ErrorIsNil) + + // Set env to dying + err = env.Destroy() + c.Assert(err, jc.ErrorIsNil) + + err = hostedAPI.RemoveModel() + c.Assert(err, gc.ErrorMatches, "an error occurred, unable to remove model") +} + +func (s *undertakerSuite) TestDeadRemoveEnviron(c *gc.C) { + otherSt, hostedAPI := s.setupStateAndAPI(c, false, "hostedenv") + env, err := otherSt.Model() + c.Assert(err, jc.ErrorIsNil) + + // Set env to dead + err = env.Destroy() + c.Assert(err, jc.ErrorIsNil) + err = hostedAPI.ProcessDyingModel() + c.Assert(err, gc.IsNil) + + err = hostedAPI.RemoveModel() + c.Assert(err, jc.ErrorIsNil) + + c.Assert(otherSt.removed, jc.IsTrue) +} + +func (s *undertakerSuite) TestModelConfig(c *gc.C) { + _, hostedAPI := s.setupStateAndAPI(c, false, "hostedenv") + + cfg, err := hostedAPI.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + c.Assert(cfg, gc.NotNil) +} === added directory 'src/github.com/juju/juju/apiserver/unitassigner' === added file 'src/github.com/juju/juju/apiserver/unitassigner/package_test.go' --- src/github.com/juju/juju/apiserver/unitassigner/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/unitassigner/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package unitassigner + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/apiserver/unitassigner/unitassigner.go' --- src/github.com/juju/juju/apiserver/unitassigner/unitassigner.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/unitassigner/unitassigner.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,105 @@ +package unitassigner + +import ( + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" + "github.com/juju/juju/state/watcher" +) + +func init() { + common.RegisterStandardFacade("UnitAssigner", 1, New) +} + +// assignerState defines the state methods this facade needs, so they can be mocked +// for testing. +type assignerState interface { + WatchForUnitAssignment() state.StringsWatcher + AssignStagedUnits(ids []string) ([]state.UnitAssignmentResult, error) +} + +type statusSetter interface { + SetStatus(args params.SetStatus) (params.ErrorResults, error) +} + +// API implements the functionality for assigning units to machines. +type API struct { + st assignerState + res *common.Resources + statusSetter statusSetter +} + +// New returns a new unitAssigner api instance. +func New(st *state.State, res *common.Resources, _ common.Authorizer) (*API, error) { + setter := common.NewStatusSetter(&common.UnitAgentFinder{st}, common.AuthAlways()) + return &API{ + st: st, + res: res, + statusSetter: setter, + }, nil +} + +// AssignUnits assigns the units with the given ids to the correct machine. The +// error results are returned in the same order as the given entities. +func (a *API) AssignUnits(args params.Entities) (params.ErrorResults, error) { + result := params.ErrorResults{} + + // state uses ids, but the API uses Tags, so we have to convert back and + // forth (whee!). The list of ids is (crucially) in the same order as the + // list of tags. This is the same order as the list of errors we return. + ids := make([]string, len(args.Entities)) + for i, e := range args.Entities { + tag, err := names.ParseUnitTag(e.Tag) + if err != nil { + return result, err + } + ids[i] = tag.Id() + } + + res, err := a.st.AssignStagedUnits(ids) + if err != nil { + return result, common.ServerError(err) + } + + // The results come back from state in an undetermined order and do not + // include results for units that were not found, so we have to make up for + // that here. + resultMap := make(map[string]error, len(ids)) + for _, r := range res { + resultMap[r.Unit] = r.Error + } + + result.Results = make([]params.ErrorResult, len(args.Entities)) + for i, id := range ids { + if err, ok := resultMap[id]; ok { + result.Results[i].Error = common.ServerError(err) + } else { + result.Results[i].Error = + common.ServerError(errors.NotFoundf("unit %q", args.Entities[i].Tag)) + } + } + + return result, nil +} + +// WatchUnitAssignments returns a strings watcher that is notified when new unit +// assignments are added to the db. +func (a *API) WatchUnitAssignments() (params.StringsWatchResult, error) { + watch := a.st.WatchForUnitAssignment() + if changes, ok := <-watch.Changes(); ok { + return params.StringsWatchResult{ + StringsWatcherId: a.res.Register(watch), + Changes: changes, + }, nil + } + return params.StringsWatchResult{}, watcher.EnsureErr(watch) +} + +// SetAgentStatus will set status for agents of Units passed in args, if one +// of the args is not an Unit it will fail. +func (a *API) SetAgentStatus(args params.SetStatus) (params.ErrorResults, error) { + return a.statusSetter.SetStatus(args) +} === added file 'src/github.com/juju/juju/apiserver/unitassigner/unitassigner_test.go' --- src/github.com/juju/juju/apiserver/unitassigner/unitassigner_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/unitassigner/unitassigner_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,101 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package unitassigner + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state" +) + +var _ = gc.Suite(testsuite{}) + +type testsuite struct{} + +func (testsuite) TestAssignUnits(c *gc.C) { + f := &fakeState{} + f.results = []state.UnitAssignmentResult{{Unit: "foo/0"}} + api := API{st: f, res: common.NewResources()} + args := params.Entities{Entities: []params.Entity{{Tag: "unit-foo-0"}, {Tag: "unit-bar-1"}}} + res, err := api.AssignUnits(args) + c.Assert(f.ids, gc.DeepEquals, []string{"foo/0", "bar/1"}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(res.Results, gc.HasLen, 2) + c.Assert(res.Results, gc.HasLen, 2) + c.Assert(res.Results[0].Error, gc.IsNil) + c.Assert(res.Results[1].Error, gc.ErrorMatches, `unit "unit-bar-1" not found`) +} + +func (testsuite) TestWatchUnitAssignment(c *gc.C) { + f := &fakeState{} + api := API{st: f, res: common.NewResources()} + f.ids = []string{"boo", "far"} + res, err := api.WatchUnitAssignments() + c.Assert(f.watchCalled, jc.IsTrue) + c.Assert(err, jc.ErrorIsNil) + c.Assert(res.Changes, gc.DeepEquals, f.ids) +} + +func (testsuite) TestSetStatus(c *gc.C) { + f := &fakeStatusSetter{ + res: params.ErrorResults{ + Results: []params.ErrorResult{ + {Error: ¶ms.Error{Message: "boo"}}}}} + api := API{statusSetter: f} + args := params.SetStatus{ + Entities: []params.EntityStatusArgs{{Tag: "foo/0"}}, + } + res, err := api.SetAgentStatus(args) + c.Assert(args, jc.DeepEquals, f.args) + c.Assert(res, jc.DeepEquals, f.res) + c.Assert(err, gc.Equals, f.err) +} + +type fakeState struct { + watchCalled bool + ids []string + results []state.UnitAssignmentResult + err error +} + +func (f *fakeState) WatchForUnitAssignment() state.StringsWatcher { + f.watchCalled = true + return fakeWatcher{f.ids} +} + +func (f *fakeState) AssignStagedUnits(ids []string) ([]state.UnitAssignmentResult, error) { + f.ids = ids + return f.results, f.err +} + +type fakeWatcher struct { + changes []string +} + +func (f fakeWatcher) Changes() <-chan []string { + changes := make(chan []string, 1) + changes <- f.changes + return changes +} +func (fakeWatcher) Kill() {} + +func (fakeWatcher) Wait() error { return nil } + +func (fakeWatcher) Stop() error { return nil } + +func (fakeWatcher) Err() error { return nil } + +type fakeStatusSetter struct { + args params.SetStatus + res params.ErrorResults + err error +} + +func (f *fakeStatusSetter) SetStatus(args params.SetStatus) (params.ErrorResults, error) { + f.args = args + return f.res, f.err +} === modified file 'src/github.com/juju/juju/apiserver/uniter/export_test.go' --- src/github.com/juju/juju/apiserver/uniter/export_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/uniter/export_test.go 2016-03-22 15:18:22 +0000 @@ -3,10 +3,15 @@ package uniter -import "github.com/juju/juju/apiserver/common" +import ( + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/meterstatus" +) var ( GetZone = &getZone + + _ meterstatus.MeterStatus = (*UniterAPIV3)(nil) ) type StorageStateInterface storageStateInterface === modified file 'src/github.com/juju/juju/apiserver/uniter/package_test.go' --- src/github.com/juju/juju/apiserver/uniter/package_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/uniter/package_test.go 2016-03-22 15:18:22 +0000 @@ -6,9 +6,14 @@ import ( stdtesting "testing" + "github.com/juju/testing" + coretesting "github.com/juju/juju/testing" ) -func TestAll(t *stdtesting.T) { +func TestPackage(t *stdtesting.T) { + if testing.RaceEnabled { + t.Skip("skipping package under -race, see LP 1518809") + } coretesting.MgoTestPackage(t) } === modified file 'src/github.com/juju/juju/apiserver/uniter/status.go' --- src/github.com/juju/juju/apiserver/uniter/status.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/uniter/status.go 2016-03-22 15:18:22 +0000 @@ -4,9 +4,6 @@ package uniter import ( - "github.com/juju/errors" - "github.com/juju/names" - "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" @@ -24,22 +21,6 @@ getCanModify common.GetAuthFunc } -type unitAgentFinder struct { - state.EntityFinder -} - -func (ua *unitAgentFinder) FindEntity(tag names.Tag) (state.Entity, error) { - _, ok := tag.(names.UnitTag) - if !ok { - return nil, errors.Errorf("unsupported tag %T", tag) - } - entity, err := ua.EntityFinder.FindEntity(tag) - if err != nil { - return nil, errors.Trace(err) - } - return entity.(*state.Unit).Agent(), nil -} - // NewStatusAPI creates a new server-side Status setter API facade. func NewStatusAPI(st *state.State, getCanModify common.GetAuthFunc) *StatusAPI { // TODO(fwereade): so *all* of these have exactly the same auth @@ -48,7 +29,7 @@ unitGetter := common.NewStatusGetter(st, getCanModify) serviceSetter := common.NewServiceStatusSetter(st, getCanModify) serviceGetter := common.NewServiceStatusGetter(st, getCanModify) - agentSetter := common.NewStatusSetter(&unitAgentFinder{st}, getCanModify) + agentSetter := common.NewStatusSetter(&common.UnitAgentFinder{st}, getCanModify) return &StatusAPI{ agentSetter: agentSetter, unitSetter: unitSetter, === modified file 'src/github.com/juju/juju/apiserver/uniter/storage.go' --- src/github.com/juju/juju/apiserver/uniter/storage.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/uniter/storage.go 2016-03-22 15:18:22 +0000 @@ -8,6 +8,7 @@ "github.com/juju/names" "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/common/storagecommon" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" "github.com/juju/juju/state/watcher" @@ -171,7 +172,7 @@ if err != nil { return params.StorageAttachment{}, err } - info, err := common.StorageAttachmentInfo(s.st, stateStorageAttachment, machineTag) + info, err := storagecommon.StorageAttachmentInfo(s.st, stateStorageAttachment, machineTag) if err != nil { return params.StorageAttachment{}, err } @@ -266,7 +267,7 @@ if err != nil { return nothing, err } - watch, err := common.WatchStorageAttachment(s.st, storageTag, machineTag, unitTag) + watch, err := storagecommon.WatchStorageAttachment(s.st, storageTag, machineTag, unitTag) if err != nil { return nothing, errors.Trace(err) } === added file 'src/github.com/juju/juju/apiserver/uniter/uniter.go' --- src/github.com/juju/juju/apiserver/uniter/uniter.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/uniter/uniter.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1825 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package uniter implements the API interface used by the uniter worker. + +package uniter + +import ( + "fmt" + "net/url" + "path" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/apiserver/common" + leadershipapiserver "github.com/juju/juju/apiserver/leadership" + "github.com/juju/juju/apiserver/meterstatus" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/core/leadership" + "github.com/juju/juju/network" + "github.com/juju/juju/state" + "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/state/watcher" +) + +var logger = loggo.GetLogger("juju.apiserver.uniter") + +func init() { + common.RegisterStandardFacade("Uniter", 3, NewUniterAPIV3) +} + +// UniterAPIV3 implements the API version 3, used by the uniter worker. +type UniterAPIV3 struct { + *common.LifeGetter + *StatusAPI + *common.DeadEnsurer + *common.AgentEntityWatcher + *common.APIAddresser + *common.ModelWatcher + *common.RebootRequester + *leadershipapiserver.LeadershipSettingsAccessor + meterstatus.MeterStatus + + st *state.State + auth common.Authorizer + resources *common.Resources + accessUnit common.GetAuthFunc + accessService common.GetAuthFunc + unit *state.Unit + accessMachine common.GetAuthFunc + StorageAPI +} + +// NewUniterAPIV3 creates a new instance of the Uniter API, version 3. +func NewUniterAPIV3(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*UniterAPIV3, error) { + if !authorizer.AuthUnitAgent() { + return nil, common.ErrPerm + } + var unit *state.Unit + var err error + switch tag := authorizer.GetAuthTag().(type) { + case names.UnitTag: + unit, err = st.Unit(tag.Id()) + if err != nil { + return nil, errors.Trace(err) + } + default: + return nil, errors.Errorf("expected names.UnitTag, got %T", tag) + } + accessUnit := func() (common.AuthFunc, error) { + return authorizer.AuthOwner, nil + } + accessService := func() (common.AuthFunc, error) { + switch tag := authorizer.GetAuthTag().(type) { + case names.UnitTag: + entity, err := st.Unit(tag.Id()) + if err != nil { + return nil, errors.Trace(err) + } + serviceName := entity.ServiceName() + serviceTag := names.NewServiceTag(serviceName) + return func(tag names.Tag) bool { + return tag == serviceTag + }, nil + default: + return nil, errors.Errorf("expected names.UnitTag, got %T", tag) + } + } + accessMachine := func() (common.AuthFunc, error) { + switch tag := authorizer.GetAuthTag().(type) { + case names.UnitTag: + entity, err := st.Unit(tag.Id()) + if err != nil { + return nil, errors.Trace(err) + } + machineId, err := entity.AssignedMachineId() + if err != nil { + return nil, errors.Trace(err) + } + machineTag := names.NewMachineTag(machineId) + return func(tag names.Tag) bool { + return tag == machineTag + }, nil + default: + return nil, errors.Errorf("expected names.UnitTag, got %T", tag) + } + } + storageAPI, err := newStorageAPI(getStorageState(st), resources, accessUnit) + if err != nil { + return nil, err + } + msAPI, err := meterstatus.NewMeterStatusAPI(st, resources, authorizer) + if err != nil { + return nil, errors.Annotate(err, "could not create meter status API handler") + } + accessUnitOrService := common.AuthEither(accessUnit, accessService) + return &UniterAPIV3{ + LifeGetter: common.NewLifeGetter(st, accessUnitOrService), + DeadEnsurer: common.NewDeadEnsurer(st, accessUnit), + AgentEntityWatcher: common.NewAgentEntityWatcher(st, resources, accessUnitOrService), + APIAddresser: common.NewAPIAddresser(st, resources), + ModelWatcher: common.NewModelWatcher(st, resources, authorizer), + RebootRequester: common.NewRebootRequester(st, accessMachine), + LeadershipSettingsAccessor: leadershipSettingsAccessorFactory(st, resources, authorizer), + MeterStatus: msAPI, + // TODO(fwereade): so *every* unit should be allowed to get/set its + // own status *and* its service's? This is not a pleasing arrangement. + StatusAPI: NewStatusAPI(st, accessUnitOrService), + + st: st, + auth: authorizer, + resources: resources, + accessUnit: accessUnit, + accessService: accessService, + accessMachine: accessMachine, + unit: unit, + StorageAPI: *storageAPI, + }, nil +} + +// AllMachinePorts returns all opened port ranges for each given +// machine (on all networks). +func (u *UniterAPIV3) AllMachinePorts(args params.Entities) (params.MachinePortsResults, error) { + result := params.MachinePortsResults{ + Results: make([]params.MachinePortsResult, len(args.Entities)), + } + canAccess, err := u.accessMachine() + if err != nil { + return params.MachinePortsResults{}, err + } + for i, entity := range args.Entities { + result.Results[i] = u.getOneMachinePorts(canAccess, entity.Tag) + } + return result, nil +} + +// ServiceOwner returns the owner user for each given service tag. +func (u *UniterAPIV3) ServiceOwner(args params.Entities) (params.StringResults, error) { + result := params.StringResults{ + Results: make([]params.StringResult, len(args.Entities)), + } + canAccess, err := u.accessService() + if err != nil { + return params.StringResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseServiceTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + if !canAccess(tag) { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + service, err := u.getService(tag) + if err != nil { + result.Results[i].Error = common.ServerError(err) + continue + } + result.Results[i].Result = service.GetOwnerTag() + } + return result, nil +} + +// AssignedMachine returns the machine tag for each given unit tag, or +// an error satisfying params.IsCodeNotAssigned when a unit has no +// assigned machine. +func (u *UniterAPIV3) AssignedMachine(args params.Entities) (params.StringResults, error) { + result := params.StringResults{ + Results: make([]params.StringResult, len(args.Entities)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.StringResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + if !canAccess(tag) { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + unit, err := u.getUnit(tag) + if err != nil { + result.Results[i].Error = common.ServerError(err) + continue + } + machineId, err := unit.AssignedMachineId() + if err != nil { + result.Results[i].Error = common.ServerError(err) + } else { + result.Results[i].Result = names.NewMachineTag(machineId).String() + } + } + return result, nil +} + +func (u *UniterAPIV3) getMachine(tag names.MachineTag) (*state.Machine, error) { + return u.st.Machine(tag.Id()) +} + +func (u *UniterAPIV3) getOneMachinePorts(canAccess common.AuthFunc, machineTag string) params.MachinePortsResult { + tag, err := names.ParseMachineTag(machineTag) + if err != nil { + return params.MachinePortsResult{Error: common.ServerError(common.ErrPerm)} + } + if !canAccess(tag) { + return params.MachinePortsResult{Error: common.ServerError(common.ErrPerm)} + } + machine, err := u.getMachine(tag) + if err != nil { + return params.MachinePortsResult{Error: common.ServerError(err)} + } + allPorts, err := machine.AllPorts() + if err != nil { + return params.MachinePortsResult{Error: common.ServerError(err)} + } + var resultPorts []params.MachinePortRange + for _, ports := range allPorts { + // AllPortRanges gives a map, but apis require a stable order + // for results, so sort the port ranges. + portRangesToUnits := ports.AllPortRanges() + portRanges := make([]network.PortRange, 0, len(portRangesToUnits)) + for portRange := range portRangesToUnits { + portRanges = append(portRanges, portRange) + } + network.SortPortRanges(portRanges) + for _, portRange := range portRanges { + unitName := portRangesToUnits[portRange] + resultPorts = append(resultPorts, params.MachinePortRange{ + UnitTag: names.NewUnitTag(unitName).String(), + PortRange: params.FromNetworkPortRange(portRange), + }) + } + } + return params.MachinePortsResult{ + Ports: resultPorts, + } +} + +// PublicAddress returns the public address for each given unit, if set. +func (u *UniterAPIV3) PublicAddress(args params.Entities) (params.StringResults, error) { + result := params.StringResults{ + Results: make([]params.StringResult, len(args.Entities)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.StringResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + if canAccess(tag) { + var unit *state.Unit + unit, err = u.getUnit(tag) + if err == nil { + var address network.Address + address, err = unit.PublicAddress() + if err == nil { + result.Results[i].Result = address.Value + } else if network.IsNoAddress(err) { + err = common.NoAddressSetError(tag, "public") + } + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// PrivateAddress returns the private address for each given unit, if set. +func (u *UniterAPIV3) PrivateAddress(args params.Entities) (params.StringResults, error) { + result := params.StringResults{ + Results: make([]params.StringResult, len(args.Entities)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.StringResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + if canAccess(tag) { + var unit *state.Unit + unit, err = u.getUnit(tag) + if err == nil { + var address network.Address + address, err = unit.PrivateAddress() + if err == nil { + result.Results[i].Result = address.Value + } else if network.IsNoAddress(err) { + err = common.NoAddressSetError(tag, "private") + } + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// TODO(ericsnow) Factor out the common code amongst the many methods here. + +var getZone = func(st *state.State, tag names.Tag) (string, error) { + unit, err := st.Unit(tag.Id()) + if err != nil { + return "", errors.Trace(err) + } + zone, err := unit.AvailabilityZone() + return zone, errors.Trace(err) +} + +// AvailabilityZone returns the availability zone for each given unit, if applicable. +func (u *UniterAPIV3) AvailabilityZone(args params.Entities) (params.StringResults, error) { + var results params.StringResults + + canAccess, err := u.accessUnit() + if err != nil { + return results, errors.Trace(err) + } + + // Prep the results. + results = params.StringResults{ + Results: make([]params.StringResult, len(args.Entities)), + } + + // Collect the zones. No zone will be collected for any entity where + // the tag is invalid or not authorized. Instead the corresponding + // result will be updated with the error. + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + results.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + if canAccess(tag) { + var zone string + zone, err = getZone(u.st, tag) + if err == nil { + results.Results[i].Result = zone + } + } + results.Results[i].Error = common.ServerError(err) + } + + return results, nil +} + +// Resolved returns the current resolved setting for each given unit. +func (u *UniterAPIV3) Resolved(args params.Entities) (params.ResolvedModeResults, error) { + result := params.ResolvedModeResults{ + Results: make([]params.ResolvedModeResult, len(args.Entities)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.ResolvedModeResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + if canAccess(tag) { + var unit *state.Unit + unit, err = u.getUnit(tag) + if err == nil { + result.Results[i].Mode = params.ResolvedMode(unit.Resolved()) + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// ClearResolved removes any resolved setting from each given unit. +func (u *UniterAPIV3) ClearResolved(args params.Entities) (params.ErrorResults, error) { + result := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.Entities)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.ErrorResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + if canAccess(tag) { + var unit *state.Unit + unit, err = u.getUnit(tag) + if err == nil { + err = unit.ClearResolved() + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// GetPrincipal returns the result of calling PrincipalName() and +// converting it to a tag, on each given unit. +func (u *UniterAPIV3) GetPrincipal(args params.Entities) (params.StringBoolResults, error) { + result := params.StringBoolResults{ + Results: make([]params.StringBoolResult, len(args.Entities)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.StringBoolResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + if canAccess(tag) { + var unit *state.Unit + unit, err = u.getUnit(tag) + if err == nil { + principal, ok := unit.PrincipalName() + if principal != "" { + result.Results[i].Result = names.NewUnitTag(principal).String() + } + result.Results[i].Ok = ok + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// Destroy advances all given Alive units' lifecycles as far as +// possible. See state/Unit.Destroy(). +func (u *UniterAPIV3) Destroy(args params.Entities) (params.ErrorResults, error) { + result := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.Entities)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.ErrorResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + if canAccess(tag) { + var unit *state.Unit + unit, err = u.getUnit(tag) + if err == nil { + err = unit.Destroy() + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// DestroyAllSubordinates destroys all subordinates of each given unit. +func (u *UniterAPIV3) DestroyAllSubordinates(args params.Entities) (params.ErrorResults, error) { + result := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.Entities)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.ErrorResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + if canAccess(tag) { + var unit *state.Unit + unit, err = u.getUnit(tag) + if err == nil { + err = u.destroySubordinates(unit) + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// HasSubordinates returns the whether each given unit has any subordinates. +func (u *UniterAPIV3) HasSubordinates(args params.Entities) (params.BoolResults, error) { + result := params.BoolResults{ + Results: make([]params.BoolResult, len(args.Entities)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.BoolResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + if canAccess(tag) { + var unit *state.Unit + unit, err = u.getUnit(tag) + if err == nil { + subordinates := unit.SubordinateNames() + result.Results[i].Result = len(subordinates) > 0 + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// CharmModifiedVersion returns the most CharmModifiedVersion for all given +// units or services. +func (u *UniterAPIV3) CharmModifiedVersion(args params.Entities) (params.IntResults, error) { + results := params.IntResults{ + Results: make([]params.IntResult, len(args.Entities)), + } + + accessUnitOrService := common.AuthEither(u.accessUnit, u.accessService) + canAccess, err := accessUnitOrService() + if err != nil { + return results, err + } + for i, entity := range args.Entities { + ver, err := u.charmModifiedVersion(entity.Tag, canAccess) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + results.Results[i].Result = ver + } + return results, nil +} + +func (u *UniterAPIV3) charmModifiedVersion(tagStr string, canAccess func(names.Tag) bool) (int, error) { + tag, err := names.ParseTag(tagStr) + if err != nil { + return -1, common.ErrPerm + } + if !canAccess(tag) { + return -1, common.ErrPerm + } + unitOrService, err := u.st.FindEntity(tag) + if err != nil { + return -1, err + } + var service *state.Service + switch entity := unitOrService.(type) { + case *state.Service: + service = entity + case *state.Unit: + service, err = entity.Service() + if err != nil { + return -1, err + } + default: + return -1, errors.BadRequestf("type %t does not have a CharmModifiedVersion", entity) + } + return service.CharmModifiedVersion(), nil +} + +// CharmURL returns the charm URL for all given units or services. +func (u *UniterAPIV3) CharmURL(args params.Entities) (params.StringBoolResults, error) { + result := params.StringBoolResults{ + Results: make([]params.StringBoolResult, len(args.Entities)), + } + accessUnitOrService := common.AuthEither(u.accessUnit, u.accessService) + canAccess, err := accessUnitOrService() + if err != nil { + return params.StringBoolResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + if canAccess(tag) { + var unitOrService state.Entity + unitOrService, err = u.st.FindEntity(tag) + if err == nil { + charmURLer := unitOrService.(interface { + CharmURL() (*charm.URL, bool) + }) + curl, ok := charmURLer.CharmURL() + if curl != nil { + result.Results[i].Result = curl.String() + result.Results[i].Ok = ok + } + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// SetCharmURL sets the charm URL for each given unit. An error will +// be returned if a unit is dead, or the charm URL is not know. +func (u *UniterAPIV3) SetCharmURL(args params.EntitiesCharmURL) (params.ErrorResults, error) { + result := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.Entities)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.ErrorResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + if canAccess(tag) { + var unit *state.Unit + unit, err = u.getUnit(tag) + if err == nil { + var curl *charm.URL + curl, err = charm.ParseURL(entity.CharmURL) + if err == nil { + err = unit.SetCharmURL(curl) + } + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// OpenPorts sets the policy of the port range with protocol to be +// opened, for all given units. +func (u *UniterAPIV3) OpenPorts(args params.EntitiesPortRanges) (params.ErrorResults, error) { + result := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.Entities)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.ErrorResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + if canAccess(tag) { + var unit *state.Unit + unit, err = u.getUnit(tag) + if err == nil { + err = unit.OpenPorts(entity.Protocol, entity.FromPort, entity.ToPort) + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// ClosePorts sets the policy of the port range with protocol to be +// closed, for all given units. +func (u *UniterAPIV3) ClosePorts(args params.EntitiesPortRanges) (params.ErrorResults, error) { + result := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.Entities)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.ErrorResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + if canAccess(tag) { + var unit *state.Unit + unit, err = u.getUnit(tag) + if err == nil { + err = unit.ClosePorts(entity.Protocol, entity.FromPort, entity.ToPort) + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// WatchConfigSettings returns a NotifyWatcher for observing changes +// to each unit's service configuration settings. See also +// state/watcher.go:Unit.WatchConfigSettings(). +func (u *UniterAPIV3) WatchConfigSettings(args params.Entities) (params.NotifyWatchResults, error) { + result := params.NotifyWatchResults{ + Results: make([]params.NotifyWatchResult, len(args.Entities)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.NotifyWatchResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + watcherId := "" + if canAccess(tag) { + watcherId, err = u.watchOneUnitConfigSettings(tag) + } + result.Results[i].NotifyWatcherId = watcherId + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// WatchActionNotifications returns a StringsWatcher for observing +// incoming action calls to a unit. See also state/watcher.go +// Unit.WatchActionNotifications(). This method is called from +// api/uniter/uniter.go WatchActionNotifications(). +func (u *UniterAPIV3) WatchActionNotifications(args params.Entities) (params.StringsWatchResults, error) { + nothing := params.StringsWatchResults{} + + result := params.StringsWatchResults{ + Results: make([]params.StringsWatchResult, len(args.Entities)), + } + canAccess, err := u.accessUnit() + if err != nil { + return nothing, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + return nothing, err + } + err = common.ErrPerm + if canAccess(tag) { + result.Results[i], err = u.watchOneUnitActionNotifications(tag) + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// ConfigSettings returns the complete set of service charm config +// settings available to each given unit. +func (u *UniterAPIV3) ConfigSettings(args params.Entities) (params.ConfigSettingsResults, error) { + result := params.ConfigSettingsResults{ + Results: make([]params.ConfigSettingsResult, len(args.Entities)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.ConfigSettingsResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + if canAccess(tag) { + var unit *state.Unit + unit, err = u.getUnit(tag) + if err == nil { + var settings charm.Settings + settings, err = unit.ConfigSettings() + if err == nil { + result.Results[i].Settings = params.ConfigSettings(settings) + } + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// WatchServiceRelations returns a StringsWatcher, for each given +// service, that notifies of changes to the lifecycles of relations +// involving that service. +func (u *UniterAPIV3) WatchServiceRelations(args params.Entities) (params.StringsWatchResults, error) { + result := params.StringsWatchResults{ + Results: make([]params.StringsWatchResult, len(args.Entities)), + } + canAccess, err := u.accessService() + if err != nil { + return params.StringsWatchResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseServiceTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + if canAccess(tag) { + result.Results[i], err = u.watchOneServiceRelations(tag) + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// CharmArchiveSha256 returns the SHA256 digest of the charm archive +// (bundle) data for each charm url in the given parameters. +func (u *UniterAPIV3) CharmArchiveSha256(args params.CharmURLs) (params.StringResults, error) { + result := params.StringResults{ + Results: make([]params.StringResult, len(args.URLs)), + } + for i, arg := range args.URLs { + curl, err := charm.ParseURL(arg.URL) + if err != nil { + err = common.ErrPerm + } else { + var sch *state.Charm + sch, err = u.st.Charm(curl) + if errors.IsNotFound(err) { + err = common.ErrPerm + } + if err == nil { + result.Results[i].Result = sch.BundleSha256() + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// CharmArchiveURLs returns the URLS for the charm archive +// (bundle) data for each charm url in the given parameters. +func (u *UniterAPIV3) CharmArchiveURLs(args params.CharmURLs) (params.StringsResults, error) { + apiHostPorts, err := u.st.APIHostPorts() + if err != nil { + return params.StringsResults{}, err + } + modelUUID := u.st.ModelUUID() + result := params.StringsResults{ + Results: make([]params.StringsResult, len(args.URLs)), + } + for i, curl := range args.URLs { + if _, err := charm.ParseURL(curl.URL); err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + urlPath := "/" + if modelUUID != "" { + urlPath = path.Join(urlPath, "model", modelUUID) + } + urlPath = path.Join(urlPath, "charms") + archiveURLs := make([]string, len(apiHostPorts)) + for j, server := range apiHostPorts { + archiveURL := &url.URL{ + Scheme: "https", + Host: network.SelectInternalHostPort(server, false), + Path: urlPath, + } + q := archiveURL.Query() + q.Set("url", curl.URL) + q.Set("file", "*") + archiveURL.RawQuery = q.Encode() + archiveURLs[j] = archiveURL.String() + } + result.Results[i].Result = archiveURLs + } + return result, nil +} + +// Relation returns information about all given relation/unit pairs, +// including their id, key and the local endpoint. +func (u *UniterAPIV3) Relation(args params.RelationUnits) (params.RelationResults, error) { + result := params.RelationResults{ + Results: make([]params.RelationResult, len(args.RelationUnits)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.RelationResults{}, err + } + for i, rel := range args.RelationUnits { + relParams, err := u.getOneRelation(canAccess, rel.Relation, rel.Unit) + if err == nil { + result.Results[i] = relParams + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// Actions returns the Actions by Tags passed and ensures that the Unit asking +// for them is the same Unit that has the Actions. +func (u *UniterAPIV3) Actions(args params.Entities) (params.ActionsQueryResults, error) { + nothing := params.ActionsQueryResults{} + + actionFn, err := u.authAndActionFromTagFn() + if err != nil { + return nothing, err + } + + results := params.ActionsQueryResults{ + Results: make([]params.ActionsQueryResult, len(args.Entities)), + } + + for i, arg := range args.Entities { + action, err := actionFn(arg.Tag) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + if action.Status() != state.ActionPending { + results.Results[i].Error = common.ServerError(common.ErrActionNotAvailable) + continue + } + results.Results[i].Action.Action = ¶ms.Action{ + Name: action.Name(), + Parameters: action.Parameters(), + } + } + + return results, nil +} + +// BeginActions marks the actions represented by the passed in Tags as running. +func (u *UniterAPIV3) BeginActions(args params.Entities) (params.ErrorResults, error) { + nothing := params.ErrorResults{} + + actionFn, err := u.authAndActionFromTagFn() + if err != nil { + return nothing, err + } + + results := params.ErrorResults{Results: make([]params.ErrorResult, len(args.Entities))} + + for i, arg := range args.Entities { + action, err := actionFn(arg.Tag) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + + _, err = action.Begin() + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + } + + return results, nil +} + +// FinishActions saves the result of a completed Action +func (u *UniterAPIV3) FinishActions(args params.ActionExecutionResults) (params.ErrorResults, error) { + nothing := params.ErrorResults{} + + actionFn, err := u.authAndActionFromTagFn() + if err != nil { + return nothing, err + } + + results := params.ErrorResults{Results: make([]params.ErrorResult, len(args.Results))} + + for i, arg := range args.Results { + action, err := actionFn(arg.ActionTag) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + actionResults, err := paramsActionExecutionResultsToStateActionResults(arg) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + + _, err = action.Finish(actionResults) + if err != nil { + results.Results[i].Error = common.ServerError(err) + continue + } + } + + return results, nil +} + +// paramsActionExecutionResultsToStateActionResults does exactly what +// the name implies. +func paramsActionExecutionResultsToStateActionResults(arg params.ActionExecutionResult) (state.ActionResults, error) { + var status state.ActionStatus + switch arg.Status { + case params.ActionCancelled: + status = state.ActionCancelled + case params.ActionCompleted: + status = state.ActionCompleted + case params.ActionFailed: + status = state.ActionFailed + case params.ActionPending: + status = state.ActionPending + default: + return state.ActionResults{}, errors.Errorf("unrecognized action status '%s'", arg.Status) + } + return state.ActionResults{ + Status: status, + Results: arg.Results, + Message: arg.Message, + }, nil +} + +// RelationById returns information about all given relations, +// specified by their ids, including their key and the local +// endpoint. +func (u *UniterAPIV3) RelationById(args params.RelationIds) (params.RelationResults, error) { + result := params.RelationResults{ + Results: make([]params.RelationResult, len(args.RelationIds)), + } + for i, relId := range args.RelationIds { + relParams, err := u.getOneRelationById(relId) + if err == nil { + result.Results[i] = relParams + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// JoinedRelations returns the tags of all relations for which each supplied unit +// has entered scope. It should be called RelationsInScope, but it's not convenient +// to make that change until we have versioned APIs. +func (u *UniterAPIV3) JoinedRelations(args params.Entities) (params.StringsResults, error) { + result := params.StringsResults{ + Results: make([]params.StringsResult, len(args.Entities)), + } + if len(args.Entities) == 0 { + return result, nil + } + canRead, err := u.accessUnit() + if err != nil { + return params.StringsResults{}, err + } + for i, entity := range args.Entities { + tag, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + if canRead(tag) { + var unit *state.Unit + unit, err = u.getUnit(tag) + if err == nil { + result.Results[i].Result, err = relationsInScopeTags(unit) + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// CurrentModel returns the name and UUID for the current juju model. +func (u *UniterAPIV3) CurrentModel() (params.ModelResult, error) { + result := params.ModelResult{} + env, err := u.st.Model() + if err == nil { + result.Name = env.Name() + result.UUID = env.UUID() + } + return result, err +} + +// ProviderType returns the provider type used by the current juju +// model. +// +// TODO(dimitern): Refactor the uniter to call this instead of calling +// ModelConfig() just to get the provider type. Once we have machine +// addresses, this might be completely unnecessary though. +func (u *UniterAPIV3) ProviderType() (params.StringResult, error) { + result := params.StringResult{} + cfg, err := u.st.ModelConfig() + if err == nil { + result.Result = cfg.Type() + } + return result, err +} + +// EnterScope ensures each unit has entered its scope in the relation, +// for all of the given relation/unit pairs. See also +// state.RelationUnit.EnterScope(). +func (u *UniterAPIV3) EnterScope(args params.RelationUnits) (params.ErrorResults, error) { + result := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.RelationUnits)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.ErrorResults{}, err + } + for i, arg := range args.RelationUnits { + tag, err := names.ParseUnitTag(arg.Unit) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + relUnit, err := u.getRelationUnit(canAccess, arg.Relation, tag) + if err == nil { + // Construct the settings, passing the unit's + // private address (we already know it). + privateAddress, _ := relUnit.PrivateAddress() + settings := map[string]interface{}{ + "private-address": privateAddress.Value, + } + err = relUnit.EnterScope(settings) + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// LeaveScope signals each unit has left its scope in the relation, +// for all of the given relation/unit pairs. See also +// state.RelationUnit.LeaveScope(). +func (u *UniterAPIV3) LeaveScope(args params.RelationUnits) (params.ErrorResults, error) { + result := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.RelationUnits)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.ErrorResults{}, err + } + for i, arg := range args.RelationUnits { + unit, err := names.ParseUnitTag(arg.Unit) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + relUnit, err := u.getRelationUnit(canAccess, arg.Relation, unit) + if err == nil { + err = relUnit.LeaveScope() + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// ReadSettings returns the local settings of each given set of +// relation/unit. +func (u *UniterAPIV3) ReadSettings(args params.RelationUnits) (params.SettingsResults, error) { + result := params.SettingsResults{ + Results: make([]params.SettingsResult, len(args.RelationUnits)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.SettingsResults{}, err + } + for i, arg := range args.RelationUnits { + unit, err := names.ParseUnitTag(arg.Unit) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + relUnit, err := u.getRelationUnit(canAccess, arg.Relation, unit) + if err == nil { + var settings *state.Settings + settings, err = relUnit.Settings() + if err == nil { + result.Results[i].Settings, err = convertRelationSettings(settings.Map()) + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// ReadRemoteSettings returns the remote settings of each given set of +// relation/local unit/remote unit. +func (u *UniterAPIV3) ReadRemoteSettings(args params.RelationUnitPairs) (params.SettingsResults, error) { + result := params.SettingsResults{ + Results: make([]params.SettingsResult, len(args.RelationUnitPairs)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.SettingsResults{}, err + } + for i, arg := range args.RelationUnitPairs { + unit, err := names.ParseUnitTag(arg.LocalUnit) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + relUnit, err := u.getRelationUnit(canAccess, arg.Relation, unit) + if err == nil { + // TODO(dfc) rework this logic + remoteUnit := "" + remoteUnit, err = u.checkRemoteUnit(relUnit, arg.RemoteUnit) + if err == nil { + var settings map[string]interface{} + settings, err = relUnit.ReadSettings(remoteUnit) + if err == nil { + result.Results[i].Settings, err = convertRelationSettings(settings) + } + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// UpdateSettings persists all changes made to the local settings of +// all given pairs of relation and unit. Keys with empty values are +// considered a signal to delete these values. +func (u *UniterAPIV3) UpdateSettings(args params.RelationUnitsSettings) (params.ErrorResults, error) { + result := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.RelationUnits)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.ErrorResults{}, err + } + for i, arg := range args.RelationUnits { + unit, err := names.ParseUnitTag(arg.Unit) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + relUnit, err := u.getRelationUnit(canAccess, arg.Relation, unit) + if err == nil { + var settings *state.Settings + settings, err = relUnit.Settings() + if err == nil { + for k, v := range arg.Settings { + if v == "" { + settings.Delete(k) + } else { + settings.Set(k, v) + } + } + _, err = settings.Write() + } + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// WatchRelationUnits returns a RelationUnitsWatcher for observing +// changes to every unit in the supplied relation that is visible to +// the supplied unit. See also state/watcher.go:RelationUnit.Watch(). +func (u *UniterAPIV3) WatchRelationUnits(args params.RelationUnits) (params.RelationUnitsWatchResults, error) { + result := params.RelationUnitsWatchResults{ + Results: make([]params.RelationUnitsWatchResult, len(args.RelationUnits)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.RelationUnitsWatchResults{}, err + } + for i, arg := range args.RelationUnits { + unit, err := names.ParseUnitTag(arg.Unit) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + relUnit, err := u.getRelationUnit(canAccess, arg.Relation, unit) + if err == nil { + result.Results[i], err = u.watchOneRelationUnit(relUnit) + } + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// WatchUnitAddresses returns a NotifyWatcher for observing changes +// to each unit's addresses. +func (u *UniterAPIV3) WatchUnitAddresses(args params.Entities) (params.NotifyWatchResults, error) { + result := params.NotifyWatchResults{ + Results: make([]params.NotifyWatchResult, len(args.Entities)), + } + canAccess, err := u.accessUnit() + if err != nil { + return params.NotifyWatchResults{}, err + } + for i, entity := range args.Entities { + unit, err := names.ParseUnitTag(entity.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + err = common.ErrPerm + watcherId := "" + if canAccess(unit) { + watcherId, err = u.watchOneUnitAddresses(unit) + } + result.Results[i].NotifyWatcherId = watcherId + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +func (u *UniterAPIV3) getUnit(tag names.UnitTag) (*state.Unit, error) { + return u.st.Unit(tag.Id()) +} + +func (u *UniterAPIV3) getService(tag names.ServiceTag) (*state.Service, error) { + return u.st.Service(tag.Id()) +} + +func (u *UniterAPIV3) getRelationUnit(canAccess common.AuthFunc, relTag string, unitTag names.UnitTag) (*state.RelationUnit, error) { + rel, unit, err := u.getRelationAndUnit(canAccess, relTag, unitTag) + if err != nil { + return nil, err + } + return rel.Unit(unit) +} + +func (u *UniterAPIV3) getOneRelationById(relId int) (params.RelationResult, error) { + nothing := params.RelationResult{} + rel, err := u.st.Relation(relId) + if errors.IsNotFound(err) { + return nothing, common.ErrPerm + } else if err != nil { + return nothing, err + } + tag := u.auth.GetAuthTag() + switch tag.(type) { + case names.UnitTag: + // do nothing + default: + panic("authenticated entity is not a unit") + } + unit, err := u.st.FindEntity(tag) + if err != nil { + return nothing, err + } + // Use the currently authenticated unit to get the endpoint. + result, err := u.prepareRelationResult(rel, unit.(*state.Unit)) + if err != nil { + // An error from prepareRelationResult means the authenticated + // unit's service is not part of the requested + // relation. That's why it's appropriate to return ErrPerm + // here. + return nothing, common.ErrPerm + } + return result, nil +} + +func (u *UniterAPIV3) getRelationAndUnit(canAccess common.AuthFunc, relTag string, unitTag names.UnitTag) (*state.Relation, *state.Unit, error) { + tag, err := names.ParseRelationTag(relTag) + if err != nil { + return nil, nil, common.ErrPerm + } + rel, err := u.st.KeyRelation(tag.Id()) + if errors.IsNotFound(err) { + return nil, nil, common.ErrPerm + } else if err != nil { + return nil, nil, err + } + if !canAccess(unitTag) { + return nil, nil, common.ErrPerm + } + unit, err := u.getUnit(unitTag) + return rel, unit, err +} + +func (u *UniterAPIV3) prepareRelationResult(rel *state.Relation, unit *state.Unit) (params.RelationResult, error) { + nothing := params.RelationResult{} + ep, err := rel.Endpoint(unit.ServiceName()) + if err != nil { + // An error here means the unit's service is not part of the + // relation. + return nothing, err + } + return params.RelationResult{ + Id: rel.Id(), + Key: rel.String(), + Life: params.Life(rel.Life().String()), + Endpoint: multiwatcher.Endpoint{ + ServiceName: ep.ServiceName, + Relation: ep.Relation, + }, + }, nil +} + +func (u *UniterAPIV3) getOneRelation(canAccess common.AuthFunc, relTag, unitTag string) (params.RelationResult, error) { + nothing := params.RelationResult{} + tag, err := names.ParseUnitTag(unitTag) + if err != nil { + return nothing, common.ErrPerm + } + rel, unit, err := u.getRelationAndUnit(canAccess, relTag, tag) + if err != nil { + return nothing, err + } + return u.prepareRelationResult(rel, unit) +} + +func (u *UniterAPIV3) destroySubordinates(principal *state.Unit) error { + subordinates := principal.SubordinateNames() + for _, subName := range subordinates { + unit, err := u.getUnit(names.NewUnitTag(subName)) + if err != nil { + return err + } + if err = unit.Destroy(); err != nil { + return err + } + } + return nil +} + +func (u *UniterAPIV3) watchOneServiceRelations(tag names.ServiceTag) (params.StringsWatchResult, error) { + nothing := params.StringsWatchResult{} + service, err := u.getService(tag) + if err != nil { + return nothing, err + } + watch := service.WatchRelations() + // Consume the initial event and forward it to the result. + if changes, ok := <-watch.Changes(); ok { + return params.StringsWatchResult{ + StringsWatcherId: u.resources.Register(watch), + Changes: changes, + }, nil + } + return nothing, watcher.EnsureErr(watch) +} + +func (u *UniterAPIV3) watchOneUnitConfigSettings(tag names.UnitTag) (string, error) { + unit, err := u.getUnit(tag) + if err != nil { + return "", err + } + watch, err := unit.WatchConfigSettings() + if err != nil { + return "", err + } + // Consume the initial event. Technically, API + // calls to Watch 'transmit' the initial event + // in the Watch response. But NotifyWatchers + // have no state to transmit. + if _, ok := <-watch.Changes(); ok { + return u.resources.Register(watch), nil + } + return "", watcher.EnsureErr(watch) +} + +func (u *UniterAPIV3) watchOneUnitActionNotifications(tag names.UnitTag) (params.StringsWatchResult, error) { + nothing := params.StringsWatchResult{} + unit, err := u.getUnit(tag) + if err != nil { + return nothing, err + } + watch := unit.WatchActionNotifications() + + if changes, ok := <-watch.Changes(); ok { + return params.StringsWatchResult{ + StringsWatcherId: u.resources.Register(watch), + Changes: changes, + }, nil + } + return nothing, watcher.EnsureErr(watch) +} + +func (u *UniterAPIV3) watchOneUnitAddresses(tag names.UnitTag) (string, error) { + unit, err := u.getUnit(tag) + if err != nil { + return "", err + } + machineId, err := unit.AssignedMachineId() + if err != nil { + return "", err + } + machine, err := u.st.Machine(machineId) + if err != nil { + return "", err + } + watch := machine.WatchAddresses() + // Consume the initial event. Technically, API + // calls to Watch 'transmit' the initial event + // in the Watch response. But NotifyWatchers + // have no state to transmit. + if _, ok := <-watch.Changes(); ok { + return u.resources.Register(watch), nil + } + return "", watcher.EnsureErr(watch) +} + +func (u *UniterAPIV3) watchOneRelationUnit(relUnit *state.RelationUnit) (params.RelationUnitsWatchResult, error) { + watch := relUnit.Watch() + // Consume the initial event and forward it to the result. + if changes, ok := <-watch.Changes(); ok { + return params.RelationUnitsWatchResult{ + RelationUnitsWatcherId: u.resources.Register(watch), + Changes: changes, + }, nil + } + return params.RelationUnitsWatchResult{}, watcher.EnsureErr(watch) +} + +func (u *UniterAPIV3) checkRemoteUnit(relUnit *state.RelationUnit, remoteUnitTag string) (string, error) { + // Make sure the unit is indeed remote. + if remoteUnitTag == u.auth.GetAuthTag().String() { + return "", common.ErrPerm + } + // Check remoteUnit is indeed related. Note that we don't want to actually get + // the *Unit, because it might have been removed; but its relation settings will + // persist until the relation itself has been removed (and must remain accessible + // because the local unit's view of reality may be time-shifted). + tag, err := names.ParseUnitTag(remoteUnitTag) + if err != nil { + return "", common.ErrPerm + } + remoteUnitName := tag.Id() + remoteServiceName, err := names.UnitService(remoteUnitName) + if err != nil { + return "", common.ErrPerm + } + rel := relUnit.Relation() + _, err = rel.RelatedEndpoints(remoteServiceName) + if err != nil { + return "", common.ErrPerm + } + return remoteUnitName, nil +} + +// authAndActionFromTagFn first authenticates the request, and then returns +// a function with which to authenticate and retrieve each action in the +// request. +func (u *UniterAPIV3) authAndActionFromTagFn() (func(string) (*state.Action, error), error) { + canAccess, err := u.accessUnit() + if err != nil { + return nil, err + } + unit, ok := u.auth.GetAuthTag().(names.UnitTag) + if !ok { + return nil, fmt.Errorf("calling entity is not a unit") + } + + return func(tag string) (*state.Action, error) { + actionTag, err := names.ParseActionTag(tag) + if err != nil { + return nil, err + } + action, err := u.st.ActionByTag(actionTag) + if err != nil { + return nil, err + } + receiverTag, err := names.ActionReceiverTag(action.Receiver()) + if err != nil { + return nil, err + } + if unit != receiverTag { + return nil, common.ErrPerm + } + + if !canAccess(receiverTag) { + return nil, common.ErrPerm + } + return action, nil + }, nil +} + +func convertRelationSettings(settings map[string]interface{}) (params.Settings, error) { + result := make(params.Settings) + for k, v := range settings { + // All relation settings should be strings. + sval, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unexpected relation setting %q: expected string, got %T", k, v) + } + result[k] = sval + } + return result, nil +} + +func relationsInScopeTags(unit *state.Unit) ([]string, error) { + relations, err := unit.RelationsInScope() + if err != nil { + return nil, err + } + tags := make([]string, len(relations)) + for i, relation := range relations { + tags[i] = relation.Tag().String() + } + return tags, nil +} + +func leadershipSettingsAccessorFactory( + st *state.State, + resources *common.Resources, + auth common.Authorizer, +) *leadershipapiserver.LeadershipSettingsAccessor { + registerWatcher := func(serviceId string) (string, error) { + service, err := st.Service(serviceId) + if err != nil { + return "", err + } + w := service.WatchLeaderSettings() + if _, ok := <-w.Changes(); ok { + return resources.Register(w), nil + } + return "", watcher.EnsureErr(w) + } + getSettings := func(serviceId string) (map[string]string, error) { + service, err := st.Service(serviceId) + if err != nil { + return nil, err + } + return service.LeaderSettings() + } + writeSettings := func(token leadership.Token, serviceId string, settings map[string]string) error { + service, err := st.Service(serviceId) + if err != nil { + return err + } + return service.UpdateLeaderSettings(token, settings) + } + return leadershipapiserver.NewLeadershipSettingsAccessor( + auth, + registerWatcher, + getSettings, + st.LeadershipChecker().LeadershipCheck, + writeSettings, + ) +} + +// AddMetricBatches adds the metrics for the specified unit. +func (u *UniterAPIV3) AddMetricBatches(args params.MetricBatchParams) (params.ErrorResults, error) { + result := params.ErrorResults{ + Results: make([]params.ErrorResult, len(args.Batches)), + } + canAccess, err := u.accessUnit() + if err != nil { + logger.Warningf("failed to check unit access: %v", err) + return params.ErrorResults{}, common.ErrPerm + } + for i, batch := range args.Batches { + tag, err := names.ParseUnitTag(batch.Tag) + if err != nil { + result.Results[i].Error = common.ServerError(err) + continue + } + if !canAccess(tag) { + result.Results[i].Error = common.ServerError(common.ErrPerm) + continue + } + metrics := make([]state.Metric, len(batch.Batch.Metrics)) + for j, metric := range batch.Batch.Metrics { + metrics[j] = state.Metric{ + Key: metric.Key, + Value: metric.Value, + Time: metric.Time, + } + } + _, err = u.st.AddMetrics(state.BatchParam{ + UUID: batch.Batch.UUID, + Created: batch.Batch.Created, + CharmURL: batch.Batch.CharmURL, + Metrics: metrics, + Unit: tag, + }) + result.Results[i].Error = common.ServerError(err) + } + return result, nil +} + +// NetworkConfig returns information about all given relation/unit pairs, +// including their id, key and the local endpoint. +func (u *UniterAPIV3) NetworkConfig(args params.RelationUnits) (params.UnitNetworkConfigResults, error) { + result := params.UnitNetworkConfigResults{ + Results: make([]params.UnitNetworkConfigResult, len(args.RelationUnits)), + } + + canAccess, err := u.accessUnit() + if err != nil { + return params.UnitNetworkConfigResults{}, err + } + + for i, rel := range args.RelationUnits { + netConfig, err := u.getOneNetworkConfig(canAccess, rel.Relation, rel.Unit) + if err == nil { + result.Results[i].Error = nil + result.Results[i].Config = netConfig + } else { + result.Results[i].Error = common.ServerError(err) + } + } + return result, nil +} + +func (u *UniterAPIV3) getOneNetworkConfig(canAccess common.AuthFunc, tagRel, tagUnit string) ([]params.NetworkConfig, error) { + unitTag, err := names.ParseUnitTag(tagUnit) + if err != nil { + return nil, errors.Trace(err) + } + + if !canAccess(unitTag) { + return nil, common.ErrPerm + } + + relTag, err := names.ParseRelationTag(tagRel) + if err != nil { + return nil, errors.Trace(err) + } + + unit, err := u.getUnit(unitTag) + if err != nil { + return nil, errors.Trace(err) + } + + service, err := unit.Service() + if err != nil { + return nil, errors.Trace(err) + } + + bindings, err := service.EndpointBindings() + if err != nil { + return nil, errors.Trace(err) + } + + rel, err := u.st.KeyRelation(relTag.Id()) + if err != nil { + return nil, errors.Trace(err) + } + + endpoint, err := rel.Endpoint(service.Name()) + if err != nil { + return nil, errors.Trace(err) + } + + machineID, err := unit.AssignedMachineId() + if err != nil { + return nil, errors.Trace(err) + } + + machine, err := u.st.Machine(machineID) + if err != nil { + return nil, errors.Trace(err) + } + + var results []params.NetworkConfig + + boundSpace, isBound := bindings[endpoint.Name] + if !isBound || boundSpace == "" { + name := endpoint.Name + logger.Debugf("endpoint %q not explicitly bound to a space, using preferred private address for machine %q", name, machineID) + + privateAddress, err := machine.PrivateAddress() + if err != nil && !network.IsNoAddress(err) { + return nil, errors.Annotatef(err, "getting machine %q preferred private address", machineID) + } + + results = append(results, params.NetworkConfig{ + Address: privateAddress.Value, + }) + return results, nil + } + logger.Debugf("endpoint %q is explicitly bound to space %q", endpoint.Name, boundSpace) + + // TODO(dimitern): Use NetworkInterfaces() instead later, this is just for + // the PoC to enable minimal network-get implementation returning just the + // primary address. + // + // LKK Card: https://canonical.leankit.com/Boards/View/101652562/119258804 + addresses := machine.ProviderAddresses() + logger.Infof( + "geting network config for machine %q with addresses %+v, hosting unit %q of service %q, with bindings %+v", + machineID, addresses, unit.Name(), service.Name(), bindings, + ) + + for _, addr := range addresses { + space := string(addr.SpaceName) + if space != boundSpace { + logger.Debugf("skipping address %q: want bound to space %q, got space %q", addr.Value, boundSpace, space) + continue + } + logger.Debugf("endpoint %q bound to space %q has address %q", endpoint.Name, boundSpace, addr.Value) + + // TODO(dimitern): Fill in the rest later (see linked LKK card above). + results = append(results, params.NetworkConfig{ + Address: addr.Value, + }) + } + + return results, nil +} === removed file 'src/github.com/juju/juju/apiserver/uniter/uniter_base.go' --- src/github.com/juju/juju/apiserver/uniter/uniter_base.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/uniter/uniter_base.go 1970-01-01 00:00:00 +0000 @@ -1,1590 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// The uniter package implements the API interface used by the uniter -// worker. This file contains code common to all API versions. -package uniter - -import ( - "fmt" - "net/url" - "path" - - "github.com/juju/errors" - "github.com/juju/names" - "gopkg.in/juju/charm.v5" - - "github.com/juju/juju/apiserver/common" - leadershipapiserver "github.com/juju/juju/apiserver/leadership" - "github.com/juju/juju/apiserver/meterstatus" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/leadership" - "github.com/juju/juju/network" - "github.com/juju/juju/state" - "github.com/juju/juju/state/multiwatcher" - "github.com/juju/juju/state/watcher" -) - -// Note that the charm payloads component has its own unit-based facade -// that the payload-related hook context commands use. See -// workload/api/internal and component/all/workloads.go. - -// uniterBaseAPI implements common methods used by all API versions, -// and it's intended for embedding. -type uniterBaseAPI struct { - *common.LifeGetter - *StatusAPI - *common.DeadEnsurer - *common.AgentEntityWatcher - *common.APIAddresser - *common.EnvironWatcher - *common.RebootRequester - *leadershipapiserver.LeadershipSettingsAccessor - - st *state.State - auth common.Authorizer - resources *common.Resources - accessUnit common.GetAuthFunc - accessService common.GetAuthFunc - unit *state.Unit -} - -// newUniterBaseAPI creates a new instance of the uniter base API. -func newUniterBaseAPI(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*uniterBaseAPI, error) { - if !authorizer.AuthUnitAgent() { - return nil, common.ErrPerm - } - var unit *state.Unit - var err error - switch tag := authorizer.GetAuthTag().(type) { - case names.UnitTag: - unit, err = st.Unit(tag.Id()) - if err != nil { - return nil, errors.Trace(err) - } - default: - return nil, errors.Errorf("expected names.UnitTag, got %T", tag) - } - accessUnit := func() (common.AuthFunc, error) { - return authorizer.AuthOwner, nil - } - accessService := func() (common.AuthFunc, error) { - switch tag := authorizer.GetAuthTag().(type) { - case names.UnitTag: - entity, err := st.Unit(tag.Id()) - if err != nil { - return nil, errors.Trace(err) - } - serviceName := entity.ServiceName() - serviceTag := names.NewServiceTag(serviceName) - return func(tag names.Tag) bool { - return tag == serviceTag - }, nil - default: - return nil, errors.Errorf("expected names.UnitTag, got %T", tag) - } - } - accessMachine := func() (common.AuthFunc, error) { - machineId, err := unit.AssignedMachineId() - if err != nil { - return nil, errors.Trace(err) - } - machine, err := st.Machine(machineId) - if err != nil { - return nil, errors.Trace(err) - } - return func(tag names.Tag) bool { - return tag == machine.Tag() - }, nil - } - - accessUnitOrService := common.AuthEither(accessUnit, accessService) - return &uniterBaseAPI{ - LifeGetter: common.NewLifeGetter(st, accessUnitOrService), - DeadEnsurer: common.NewDeadEnsurer(st, accessUnit), - AgentEntityWatcher: common.NewAgentEntityWatcher(st, resources, accessUnitOrService), - APIAddresser: common.NewAPIAddresser(st, resources), - EnvironWatcher: common.NewEnvironWatcher(st, resources, authorizer), - RebootRequester: common.NewRebootRequester(st, accessMachine), - LeadershipSettingsAccessor: leadershipSettingsAccessorFactory(st, resources, authorizer), - - // TODO(fwereade): so *every* unit should be allowed to get/set its - // own status *and* its service's? This is not a pleasing arrangement. - StatusAPI: NewStatusAPI(st, accessUnitOrService), - - st: st, - auth: authorizer, - resources: resources, - accessUnit: accessUnit, - accessService: accessService, - unit: unit, - }, nil -} - -// PublicAddress returns the public address for each given unit, if set. -func (u *uniterBaseAPI) PublicAddress(args params.Entities) (params.StringResults, error) { - result := params.StringResults{ - Results: make([]params.StringResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.StringResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - if canAccess(tag) { - var unit *state.Unit - unit, err = u.getUnit(tag) - if err == nil { - var address network.Address - address, err = unit.PublicAddress() - if err == nil { - result.Results[i].Result = address.Value - } else if network.IsNoAddress(err) { - err = common.NoAddressSetError(tag, "public") - } - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// PrivateAddress returns the private address for each given unit, if set. -func (u *uniterBaseAPI) PrivateAddress(args params.Entities) (params.StringResults, error) { - result := params.StringResults{ - Results: make([]params.StringResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.StringResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - if canAccess(tag) { - var unit *state.Unit - unit, err = u.getUnit(tag) - if err == nil { - var address network.Address - address, err = unit.PrivateAddress() - if err == nil { - result.Results[i].Result = address.Value - } else if network.IsNoAddress(err) { - err = common.NoAddressSetError(tag, "private") - } - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// TODO(ericsnow) Factor out the common code amongst the many methods here. - -var getZone = func(st *state.State, tag names.Tag) (string, error) { - unit, err := st.Unit(tag.Id()) - if err != nil { - return "", errors.Trace(err) - } - zone, err := unit.AvailabilityZone() - return zone, errors.Trace(err) -} - -// AvailabilityZone returns the availability zone for each given unit, if applicable. -func (u *uniterBaseAPI) AvailabilityZone(args params.Entities) (params.StringResults, error) { - var results params.StringResults - - canAccess, err := u.accessUnit() - if err != nil { - return results, errors.Trace(err) - } - - // Prep the results. - results = params.StringResults{ - Results: make([]params.StringResult, len(args.Entities)), - } - - // Collect the zones. No zone will be collected for any entity where - // the tag is invalid or not authorized. Instead the corresponding - // result will be updated with the error. - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - results.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - if canAccess(tag) { - var zone string - zone, err = getZone(u.st, tag) - if err == nil { - results.Results[i].Result = zone - } - } - results.Results[i].Error = common.ServerError(err) - } - - return results, nil -} - -// Resolved returns the current resolved setting for each given unit. -func (u *uniterBaseAPI) Resolved(args params.Entities) (params.ResolvedModeResults, error) { - result := params.ResolvedModeResults{ - Results: make([]params.ResolvedModeResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.ResolvedModeResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - if canAccess(tag) { - var unit *state.Unit - unit, err = u.getUnit(tag) - if err == nil { - result.Results[i].Mode = params.ResolvedMode(unit.Resolved()) - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// ClearResolved removes any resolved setting from each given unit. -func (u *uniterBaseAPI) ClearResolved(args params.Entities) (params.ErrorResults, error) { - result := params.ErrorResults{ - Results: make([]params.ErrorResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.ErrorResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - if canAccess(tag) { - var unit *state.Unit - unit, err = u.getUnit(tag) - if err == nil { - err = unit.ClearResolved() - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// GetPrincipal returns the result of calling PrincipalName() and -// converting it to a tag, on each given unit. -func (u *uniterBaseAPI) GetPrincipal(args params.Entities) (params.StringBoolResults, error) { - result := params.StringBoolResults{ - Results: make([]params.StringBoolResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.StringBoolResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - if canAccess(tag) { - var unit *state.Unit - unit, err = u.getUnit(tag) - if err == nil { - principal, ok := unit.PrincipalName() - if principal != "" { - result.Results[i].Result = names.NewUnitTag(principal).String() - } - result.Results[i].Ok = ok - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// Destroy advances all given Alive units' lifecycles as far as -// possible. See state/Unit.Destroy(). -func (u *uniterBaseAPI) Destroy(args params.Entities) (params.ErrorResults, error) { - result := params.ErrorResults{ - Results: make([]params.ErrorResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.ErrorResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - if canAccess(tag) { - var unit *state.Unit - unit, err = u.getUnit(tag) - if err == nil { - err = unit.Destroy() - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// DestroyAllSubordinates destroys all subordinates of each given unit. -func (u *uniterBaseAPI) DestroyAllSubordinates(args params.Entities) (params.ErrorResults, error) { - result := params.ErrorResults{ - Results: make([]params.ErrorResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.ErrorResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - if canAccess(tag) { - var unit *state.Unit - unit, err = u.getUnit(tag) - if err == nil { - err = u.destroySubordinates(unit) - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// HasSubordinates returns the whether each given unit has any subordinates. -func (u *uniterBaseAPI) HasSubordinates(args params.Entities) (params.BoolResults, error) { - result := params.BoolResults{ - Results: make([]params.BoolResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.BoolResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - if canAccess(tag) { - var unit *state.Unit - unit, err = u.getUnit(tag) - if err == nil { - subordinates := unit.SubordinateNames() - result.Results[i].Result = len(subordinates) > 0 - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// CharmURL returns the charm URL for all given units or services. -func (u *uniterBaseAPI) CharmURL(args params.Entities) (params.StringBoolResults, error) { - result := params.StringBoolResults{ - Results: make([]params.StringBoolResult, len(args.Entities)), - } - accessUnitOrService := common.AuthEither(u.accessUnit, u.accessService) - canAccess, err := accessUnitOrService() - if err != nil { - return params.StringBoolResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - if canAccess(tag) { - var unitOrService state.Entity - unitOrService, err = u.st.FindEntity(tag) - if err == nil { - charmURLer := unitOrService.(interface { - CharmURL() (*charm.URL, bool) - }) - curl, ok := charmURLer.CharmURL() - if curl != nil { - result.Results[i].Result = curl.String() - result.Results[i].Ok = ok - } - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// SetCharmURL sets the charm URL for each given unit. An error will -// be returned if a unit is dead, or the charm URL is not know. -func (u *uniterBaseAPI) SetCharmURL(args params.EntitiesCharmURL) (params.ErrorResults, error) { - result := params.ErrorResults{ - Results: make([]params.ErrorResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.ErrorResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - if canAccess(tag) { - var unit *state.Unit - unit, err = u.getUnit(tag) - if err == nil { - var curl *charm.URL - curl, err = charm.ParseURL(entity.CharmURL) - if err == nil { - err = unit.SetCharmURL(curl) - } - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// OpenPorts sets the policy of the port range with protocol to be -// opened, for all given units. -func (u *uniterBaseAPI) OpenPorts(args params.EntitiesPortRanges) (params.ErrorResults, error) { - result := params.ErrorResults{ - Results: make([]params.ErrorResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.ErrorResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - if canAccess(tag) { - var unit *state.Unit - unit, err = u.getUnit(tag) - if err == nil { - err = unit.OpenPorts(entity.Protocol, entity.FromPort, entity.ToPort) - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// ClosePorts sets the policy of the port range with protocol to be -// closed, for all given units. -func (u *uniterBaseAPI) ClosePorts(args params.EntitiesPortRanges) (params.ErrorResults, error) { - result := params.ErrorResults{ - Results: make([]params.ErrorResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.ErrorResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - if canAccess(tag) { - var unit *state.Unit - unit, err = u.getUnit(tag) - if err == nil { - err = unit.ClosePorts(entity.Protocol, entity.FromPort, entity.ToPort) - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// OpenPort sets the policy of the port with protocol an number to be -// opened, for all given units. -// -// TODO(dimitern): This is deprecated and is kept for -// backwards-compatibility. Use OpenPorts instead. -func (u *uniterBaseAPI) OpenPort(args params.EntitiesPorts) (params.ErrorResults, error) { - rangesArgs := params.EntitiesPortRanges{ - Entities: make([]params.EntityPortRange, len(args.Entities)), - } - for i, entity := range args.Entities { - rangesArgs.Entities[i] = params.EntityPortRange{ - Tag: entity.Tag, - Protocol: entity.Protocol, - FromPort: entity.Port, - ToPort: entity.Port, - } - } - return u.OpenPorts(rangesArgs) -} - -// ClosePort sets the policy of the port with protocol and number to -// be closed, for all given units. -// -// TODO(dimitern): This is deprecated and is kept for -// backwards-compatibility. Use ClosePorts instead. -func (u *uniterBaseAPI) ClosePort(args params.EntitiesPorts) (params.ErrorResults, error) { - rangesArgs := params.EntitiesPortRanges{ - Entities: make([]params.EntityPortRange, len(args.Entities)), - } - for i, entity := range args.Entities { - rangesArgs.Entities[i] = params.EntityPortRange{ - Tag: entity.Tag, - Protocol: entity.Protocol, - FromPort: entity.Port, - ToPort: entity.Port, - } - } - return u.ClosePorts(rangesArgs) -} - -// WatchConfigSettings returns a NotifyWatcher for observing changes -// to each unit's service configuration settings. See also -// state/watcher.go:Unit.WatchConfigSettings(). -func (u *uniterBaseAPI) WatchConfigSettings(args params.Entities) (params.NotifyWatchResults, error) { - result := params.NotifyWatchResults{ - Results: make([]params.NotifyWatchResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.NotifyWatchResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - watcherId := "" - if canAccess(tag) { - watcherId, err = u.watchOneUnitConfigSettings(tag) - } - result.Results[i].NotifyWatcherId = watcherId - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// WatchMeterStatus returns a NotifyWatcher for observing changes -// to each unit's meter status. -func (u *uniterBaseAPI) WatchMeterStatus(args params.Entities) (params.NotifyWatchResults, error) { - result := params.NotifyWatchResults{ - Results: make([]params.NotifyWatchResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.NotifyWatchResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - watcherId := "" - if canAccess(tag) { - watcherId, err = u.watchOneUnitMeterStatus(tag) - } - result.Results[i].NotifyWatcherId = watcherId - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// WatchActionNotifications returns a StringsWatcher for observing -// incoming action calls to a unit. See also state/watcher.go -// Unit.WatchActionNotifications(). This method is called from -// api/uniter/uniter.go WatchActionNotifications(). -func (u *uniterBaseAPI) WatchActionNotifications(args params.Entities) (params.StringsWatchResults, error) { - nothing := params.StringsWatchResults{} - - result := params.StringsWatchResults{ - Results: make([]params.StringsWatchResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return nothing, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - return nothing, err - } - err = common.ErrPerm - if canAccess(tag) { - result.Results[i], err = u.watchOneUnitActionNotifications(tag) - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// ConfigSettings returns the complete set of service charm config -// settings available to each given unit. -func (u *uniterBaseAPI) ConfigSettings(args params.Entities) (params.ConfigSettingsResults, error) { - result := params.ConfigSettingsResults{ - Results: make([]params.ConfigSettingsResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.ConfigSettingsResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - if canAccess(tag) { - var unit *state.Unit - unit, err = u.getUnit(tag) - if err == nil { - var settings charm.Settings - settings, err = unit.ConfigSettings() - if err == nil { - result.Results[i].Settings = params.ConfigSettings(settings) - } - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// WatchServiceRelations returns a StringsWatcher, for each given -// service, that notifies of changes to the lifecycles of relations -// involving that service. -func (u *uniterBaseAPI) WatchServiceRelations(args params.Entities) (params.StringsWatchResults, error) { - result := params.StringsWatchResults{ - Results: make([]params.StringsWatchResult, len(args.Entities)), - } - canAccess, err := u.accessService() - if err != nil { - return params.StringsWatchResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseServiceTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - if canAccess(tag) { - result.Results[i], err = u.watchOneServiceRelations(tag) - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// CharmArchiveSha256 returns the SHA256 digest of the charm archive -// (bundle) data for each charm url in the given parameters. -func (u *uniterBaseAPI) CharmArchiveSha256(args params.CharmURLs) (params.StringResults, error) { - result := params.StringResults{ - Results: make([]params.StringResult, len(args.URLs)), - } - for i, arg := range args.URLs { - curl, err := charm.ParseURL(arg.URL) - if err != nil { - err = common.ErrPerm - } else { - var sch *state.Charm - sch, err = u.st.Charm(curl) - if errors.IsNotFound(err) { - err = common.ErrPerm - } - if err == nil { - result.Results[i].Result = sch.BundleSha256() - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// CharmArchiveURLs returns the URLS for the charm archive -// (bundle) data for each charm url in the given parameters. -func (u *uniterBaseAPI) CharmArchiveURLs(args params.CharmURLs) (params.StringsResults, error) { - apiHostPorts, err := u.st.APIHostPorts() - if err != nil { - return params.StringsResults{}, err - } - envUUID := u.st.EnvironUUID() - result := params.StringsResults{ - Results: make([]params.StringsResult, len(args.URLs)), - } - for i, curl := range args.URLs { - if _, err := charm.ParseURL(curl.URL); err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - urlPath := "/" - if envUUID != "" { - urlPath = path.Join(urlPath, "environment", envUUID) - } - urlPath = path.Join(urlPath, "charms") - archiveURLs := make([]string, len(apiHostPorts)) - for j, server := range apiHostPorts { - archiveURL := &url.URL{ - Scheme: "https", - Host: network.SelectInternalHostPort(server, false), - Path: urlPath, - } - q := archiveURL.Query() - q.Set("url", curl.URL) - q.Set("file", "*") - archiveURL.RawQuery = q.Encode() - archiveURLs[j] = archiveURL.String() - } - result.Results[i].Result = archiveURLs - } - return result, nil -} - -// Relation returns information about all given relation/unit pairs, -// including their id, key and the local endpoint. -func (u *uniterBaseAPI) Relation(args params.RelationUnits) (params.RelationResults, error) { - result := params.RelationResults{ - Results: make([]params.RelationResult, len(args.RelationUnits)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.RelationResults{}, err - } - for i, rel := range args.RelationUnits { - relParams, err := u.getOneRelation(canAccess, rel.Relation, rel.Unit) - if err == nil { - result.Results[i] = relParams - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// Actions returns the Actions by Tags passed and ensures that the Unit asking -// for them is the same Unit that has the Actions. -func (u *uniterBaseAPI) Actions(args params.Entities) (params.ActionsQueryResults, error) { - nothing := params.ActionsQueryResults{} - - actionFn, err := u.authAndActionFromTagFn() - if err != nil { - return nothing, err - } - - results := params.ActionsQueryResults{ - Results: make([]params.ActionsQueryResult, len(args.Entities)), - } - - for i, arg := range args.Entities { - action, err := actionFn(arg.Tag) - if err != nil { - results.Results[i].Error = common.ServerError(err) - continue - } - if action.Status() != state.ActionPending { - results.Results[i].Error = common.ServerError(common.ErrActionNotAvailable) - continue - } - results.Results[i].Action.Action = ¶ms.Action{ - Name: action.Name(), - Parameters: action.Parameters(), - } - } - - return results, nil -} - -// BeginActions marks the actions represented by the passed in Tags as running. -func (u *uniterBaseAPI) BeginActions(args params.Entities) (params.ErrorResults, error) { - nothing := params.ErrorResults{} - - actionFn, err := u.authAndActionFromTagFn() - if err != nil { - return nothing, err - } - - results := params.ErrorResults{Results: make([]params.ErrorResult, len(args.Entities))} - - for i, arg := range args.Entities { - action, err := actionFn(arg.Tag) - if err != nil { - results.Results[i].Error = common.ServerError(err) - continue - } - - _, err = action.Begin() - if err != nil { - results.Results[i].Error = common.ServerError(err) - continue - } - } - - return results, nil -} - -// FinishActions saves the result of a completed Action -func (u *uniterBaseAPI) FinishActions(args params.ActionExecutionResults) (params.ErrorResults, error) { - nothing := params.ErrorResults{} - - actionFn, err := u.authAndActionFromTagFn() - if err != nil { - return nothing, err - } - - results := params.ErrorResults{Results: make([]params.ErrorResult, len(args.Results))} - - for i, arg := range args.Results { - action, err := actionFn(arg.ActionTag) - if err != nil { - results.Results[i].Error = common.ServerError(err) - continue - } - actionResults, err := paramsActionExecutionResultsToStateActionResults(arg) - if err != nil { - results.Results[i].Error = common.ServerError(err) - continue - } - - _, err = action.Finish(actionResults) - if err != nil { - results.Results[i].Error = common.ServerError(err) - continue - } - } - - return results, nil -} - -// paramsActionExecutionResultsToStateActionResults does exactly what -// the name implies. -func paramsActionExecutionResultsToStateActionResults(arg params.ActionExecutionResult) (state.ActionResults, error) { - var status state.ActionStatus - switch arg.Status { - case params.ActionCancelled: - status = state.ActionCancelled - case params.ActionCompleted: - status = state.ActionCompleted - case params.ActionFailed: - status = state.ActionFailed - case params.ActionPending: - status = state.ActionPending - default: - return state.ActionResults{}, errors.Errorf("unrecognized action status '%s'", arg.Status) - } - return state.ActionResults{ - Status: status, - Results: arg.Results, - Message: arg.Message, - }, nil -} - -// RelationById returns information about all given relations, -// specified by their ids, including their key and the local -// endpoint. -func (u *uniterBaseAPI) RelationById(args params.RelationIds) (params.RelationResults, error) { - result := params.RelationResults{ - Results: make([]params.RelationResult, len(args.RelationIds)), - } - for i, relId := range args.RelationIds { - relParams, err := u.getOneRelationById(relId) - if err == nil { - result.Results[i] = relParams - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// JoinedRelations returns the tags of all relations for which each supplied unit -// has entered scope. It should be called RelationsInScope, but it's not convenient -// to make that change until we have versioned APIs. -func (u *uniterBaseAPI) JoinedRelations(args params.Entities) (params.StringsResults, error) { - result := params.StringsResults{ - Results: make([]params.StringsResult, len(args.Entities)), - } - if len(args.Entities) == 0 { - return result, nil - } - canRead, err := u.accessUnit() - if err != nil { - return params.StringsResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - if canRead(tag) { - var unit *state.Unit - unit, err = u.getUnit(tag) - if err == nil { - result.Results[i].Result, err = relationsInScopeTags(unit) - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// CurrentEnvironUUID returns the UUID for the current juju environment. -func (u *uniterBaseAPI) CurrentEnvironUUID() (params.StringResult, error) { - result := params.StringResult{} - env, err := u.st.Environment() - if err == nil { - result.Result = env.UUID() - } - return result, err -} - -// CurrentEnvironment returns the name and UUID for the current juju environment. -func (u *uniterBaseAPI) CurrentEnvironment() (params.EnvironmentResult, error) { - result := params.EnvironmentResult{} - env, err := u.st.Environment() - if err == nil { - result.Name = env.Name() - result.UUID = env.UUID() - } - return result, err -} - -// ProviderType returns the provider type used by the current juju -// environment. -// -// TODO(dimitern): Refactor the uniter to call this instead of calling -// EnvironConfig() just to get the provider type. Once we have machine -// addresses, this might be completely unnecessary though. -func (u *uniterBaseAPI) ProviderType() (params.StringResult, error) { - result := params.StringResult{} - cfg, err := u.st.EnvironConfig() - if err == nil { - result.Result = cfg.Type() - } - return result, err -} - -// EnterScope ensures each unit has entered its scope in the relation, -// for all of the given relation/unit pairs. See also -// state.RelationUnit.EnterScope(). -func (u *uniterBaseAPI) EnterScope(args params.RelationUnits) (params.ErrorResults, error) { - result := params.ErrorResults{ - Results: make([]params.ErrorResult, len(args.RelationUnits)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.ErrorResults{}, err - } - for i, arg := range args.RelationUnits { - tag, err := names.ParseUnitTag(arg.Unit) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - relUnit, err := u.getRelationUnit(canAccess, arg.Relation, tag) - if err == nil { - // Construct the settings, passing the unit's - // private address (we already know it). - privateAddress, _ := relUnit.PrivateAddress() - settings := map[string]interface{}{ - "private-address": privateAddress.Value, - } - err = relUnit.EnterScope(settings) - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// LeaveScope signals each unit has left its scope in the relation, -// for all of the given relation/unit pairs. See also -// state.RelationUnit.LeaveScope(). -func (u *uniterBaseAPI) LeaveScope(args params.RelationUnits) (params.ErrorResults, error) { - result := params.ErrorResults{ - Results: make([]params.ErrorResult, len(args.RelationUnits)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.ErrorResults{}, err - } - for i, arg := range args.RelationUnits { - unit, err := names.ParseUnitTag(arg.Unit) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - relUnit, err := u.getRelationUnit(canAccess, arg.Relation, unit) - if err == nil { - err = relUnit.LeaveScope() - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// ReadSettings returns the local settings of each given set of -// relation/unit. -func (u *uniterBaseAPI) ReadSettings(args params.RelationUnits) (params.SettingsResults, error) { - result := params.SettingsResults{ - Results: make([]params.SettingsResult, len(args.RelationUnits)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.SettingsResults{}, err - } - for i, arg := range args.RelationUnits { - unit, err := names.ParseUnitTag(arg.Unit) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - relUnit, err := u.getRelationUnit(canAccess, arg.Relation, unit) - if err == nil { - var settings *state.Settings - settings, err = relUnit.Settings() - if err == nil { - result.Results[i].Settings, err = convertRelationSettings(settings.Map()) - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// ReadRemoteSettings returns the remote settings of each given set of -// relation/local unit/remote unit. -func (u *uniterBaseAPI) ReadRemoteSettings(args params.RelationUnitPairs) (params.SettingsResults, error) { - result := params.SettingsResults{ - Results: make([]params.SettingsResult, len(args.RelationUnitPairs)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.SettingsResults{}, err - } - for i, arg := range args.RelationUnitPairs { - unit, err := names.ParseUnitTag(arg.LocalUnit) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - relUnit, err := u.getRelationUnit(canAccess, arg.Relation, unit) - if err == nil { - // TODO(dfc) rework this logic - remoteUnit := "" - remoteUnit, err = u.checkRemoteUnit(relUnit, arg.RemoteUnit) - if err == nil { - var settings map[string]interface{} - settings, err = relUnit.ReadSettings(remoteUnit) - if err == nil { - result.Results[i].Settings, err = convertRelationSettings(settings) - } - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// UpdateSettings persists all changes made to the local settings of -// all given pairs of relation and unit. Keys with empty values are -// considered a signal to delete these values. -func (u *uniterBaseAPI) UpdateSettings(args params.RelationUnitsSettings) (params.ErrorResults, error) { - result := params.ErrorResults{ - Results: make([]params.ErrorResult, len(args.RelationUnits)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.ErrorResults{}, err - } - for i, arg := range args.RelationUnits { - unit, err := names.ParseUnitTag(arg.Unit) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - relUnit, err := u.getRelationUnit(canAccess, arg.Relation, unit) - if err == nil { - var settings *state.Settings - settings, err = relUnit.Settings() - if err == nil { - for k, v := range arg.Settings { - if v == "" { - settings.Delete(k) - } else { - settings.Set(k, v) - } - } - _, err = settings.Write() - } - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// WatchRelationUnits returns a RelationUnitsWatcher for observing -// changes to every unit in the supplied relation that is visible to -// the supplied unit. See also state/watcher.go:RelationUnit.Watch(). -func (u *uniterBaseAPI) WatchRelationUnits(args params.RelationUnits) (params.RelationUnitsWatchResults, error) { - result := params.RelationUnitsWatchResults{ - Results: make([]params.RelationUnitsWatchResult, len(args.RelationUnits)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.RelationUnitsWatchResults{}, err - } - for i, arg := range args.RelationUnits { - unit, err := names.ParseUnitTag(arg.Unit) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - relUnit, err := u.getRelationUnit(canAccess, arg.Relation, unit) - if err == nil { - result.Results[i], err = u.watchOneRelationUnit(relUnit) - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// WatchAddresses returns a NotifyWatcher for observing changes -// to each unit's addresses. -func (u *uniterBaseAPI) WatchUnitAddresses(args params.Entities) (params.NotifyWatchResults, error) { - result := params.NotifyWatchResults{ - Results: make([]params.NotifyWatchResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.NotifyWatchResults{}, err - } - for i, entity := range args.Entities { - unit, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - watcherId := "" - if canAccess(unit) { - watcherId, err = u.watchOneUnitAddresses(unit) - } - result.Results[i].NotifyWatcherId = watcherId - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// GetMeterStatus returns meter status information for each unit. -func (u *uniterBaseAPI) GetMeterStatus(args params.Entities) (params.MeterStatusResults, error) { - result := params.MeterStatusResults{ - Results: make([]params.MeterStatusResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.MeterStatusResults{}, common.ErrPerm - } - for i, entity := range args.Entities { - unitTag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - err = common.ErrPerm - var status state.MeterStatus - if canAccess(unitTag) { - var unit *state.Unit - unit, err = u.getUnit(unitTag) - if err == nil { - status, err = meterstatus.MeterStatusWrapper(unit.GetMeterStatus) - } - result.Results[i].Code = status.Code.String() - result.Results[i].Info = status.Info - } - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -func (u *uniterBaseAPI) getUnit(tag names.UnitTag) (*state.Unit, error) { - return u.st.Unit(tag.Id()) -} - -func (u *uniterBaseAPI) getService(tag names.ServiceTag) (*state.Service, error) { - return u.st.Service(tag.Id()) -} - -func (u *uniterBaseAPI) getRelationUnit(canAccess common.AuthFunc, relTag string, unitTag names.UnitTag) (*state.RelationUnit, error) { - rel, unit, err := u.getRelationAndUnit(canAccess, relTag, unitTag) - if err != nil { - return nil, err - } - return rel.Unit(unit) -} - -func (u *uniterBaseAPI) getOneRelationById(relId int) (params.RelationResult, error) { - nothing := params.RelationResult{} - rel, err := u.st.Relation(relId) - if errors.IsNotFound(err) { - return nothing, common.ErrPerm - } else if err != nil { - return nothing, err - } - tag := u.auth.GetAuthTag() - switch tag.(type) { - case names.UnitTag: - // do nothing - default: - panic("authenticated entity is not a unit") - } - unit, err := u.st.FindEntity(tag) - if err != nil { - return nothing, err - } - // Use the currently authenticated unit to get the endpoint. - result, err := u.prepareRelationResult(rel, unit.(*state.Unit)) - if err != nil { - // An error from prepareRelationResult means the authenticated - // unit's service is not part of the requested - // relation. That's why it's appropriate to return ErrPerm - // here. - return nothing, common.ErrPerm - } - return result, nil -} - -func (u *uniterBaseAPI) getRelationAndUnit(canAccess common.AuthFunc, relTag string, unitTag names.UnitTag) (*state.Relation, *state.Unit, error) { - tag, err := names.ParseRelationTag(relTag) - if err != nil { - return nil, nil, common.ErrPerm - } - rel, err := u.st.KeyRelation(tag.Id()) - if errors.IsNotFound(err) { - return nil, nil, common.ErrPerm - } else if err != nil { - return nil, nil, err - } - if !canAccess(unitTag) { - return nil, nil, common.ErrPerm - } - unit, err := u.getUnit(unitTag) - return rel, unit, err -} - -func (u *uniterBaseAPI) prepareRelationResult(rel *state.Relation, unit *state.Unit) (params.RelationResult, error) { - nothing := params.RelationResult{} - ep, err := rel.Endpoint(unit.ServiceName()) - if err != nil { - // An error here means the unit's service is not part of the - // relation. - return nothing, err - } - return params.RelationResult{ - Id: rel.Id(), - Key: rel.String(), - Life: params.Life(rel.Life().String()), - Endpoint: multiwatcher.Endpoint{ - ServiceName: ep.ServiceName, - Relation: ep.Relation, - }, - }, nil -} - -func (u *uniterBaseAPI) getOneRelation(canAccess common.AuthFunc, relTag, unitTag string) (params.RelationResult, error) { - nothing := params.RelationResult{} - tag, err := names.ParseUnitTag(unitTag) - if err != nil { - return nothing, common.ErrPerm - } - rel, unit, err := u.getRelationAndUnit(canAccess, relTag, tag) - if err != nil { - return nothing, err - } - return u.prepareRelationResult(rel, unit) -} - -func (u *uniterBaseAPI) destroySubordinates(principal *state.Unit) error { - subordinates := principal.SubordinateNames() - for _, subName := range subordinates { - unit, err := u.getUnit(names.NewUnitTag(subName)) - if err != nil { - return err - } - if err = unit.Destroy(); err != nil { - return err - } - } - return nil -} - -func (u *uniterBaseAPI) watchOneServiceRelations(tag names.ServiceTag) (params.StringsWatchResult, error) { - nothing := params.StringsWatchResult{} - service, err := u.getService(tag) - if err != nil { - return nothing, err - } - watch := service.WatchRelations() - // Consume the initial event and forward it to the result. - if changes, ok := <-watch.Changes(); ok { - return params.StringsWatchResult{ - StringsWatcherId: u.resources.Register(watch), - Changes: changes, - }, nil - } - return nothing, watcher.EnsureErr(watch) -} - -func (u *uniterBaseAPI) watchOneUnitConfigSettings(tag names.UnitTag) (string, error) { - unit, err := u.getUnit(tag) - if err != nil { - return "", err - } - watch, err := unit.WatchConfigSettings() - if err != nil { - return "", err - } - // Consume the initial event. Technically, API - // calls to Watch 'transmit' the initial event - // in the Watch response. But NotifyWatchers - // have no state to transmit. - if _, ok := <-watch.Changes(); ok { - return u.resources.Register(watch), nil - } - return "", watcher.EnsureErr(watch) -} - -func (u *uniterBaseAPI) watchOneUnitActionNotifications(tag names.UnitTag) (params.StringsWatchResult, error) { - nothing := params.StringsWatchResult{} - unit, err := u.getUnit(tag) - if err != nil { - return nothing, err - } - watch := unit.WatchActionNotifications() - - if changes, ok := <-watch.Changes(); ok { - return params.StringsWatchResult{ - StringsWatcherId: u.resources.Register(watch), - Changes: changes, - }, nil - } - return nothing, watcher.EnsureErr(watch) -} - -func (u *uniterBaseAPI) watchOneUnitAddresses(tag names.UnitTag) (string, error) { - unit, err := u.getUnit(tag) - if err != nil { - return "", err - } - machineId, err := unit.AssignedMachineId() - if err != nil { - return "", err - } - machine, err := u.st.Machine(machineId) - if err != nil { - return "", err - } - watch := machine.WatchAddresses() - // Consume the initial event. Technically, API - // calls to Watch 'transmit' the initial event - // in the Watch response. But NotifyWatchers - // have no state to transmit. - if _, ok := <-watch.Changes(); ok { - return u.resources.Register(watch), nil - } - return "", watcher.EnsureErr(watch) -} - -func (u *uniterBaseAPI) watchOneRelationUnit(relUnit *state.RelationUnit) (params.RelationUnitsWatchResult, error) { - watch := relUnit.Watch() - // Consume the initial event and forward it to the result. - if changes, ok := <-watch.Changes(); ok { - return params.RelationUnitsWatchResult{ - RelationUnitsWatcherId: u.resources.Register(watch), - Changes: changes, - }, nil - } - return params.RelationUnitsWatchResult{}, watcher.EnsureErr(watch) -} - -func (u *uniterBaseAPI) watchOneUnitMeterStatus(tag names.UnitTag) (string, error) { - unit, err := u.getUnit(tag) - if err != nil { - return "", err - } - watch := unit.WatchMeterStatus() - if _, ok := <-watch.Changes(); ok { - return u.resources.Register(watch), nil - } - return "", watcher.EnsureErr(watch) -} - -func (u *uniterBaseAPI) checkRemoteUnit(relUnit *state.RelationUnit, remoteUnitTag string) (string, error) { - // Make sure the unit is indeed remote. - if remoteUnitTag == u.auth.GetAuthTag().String() { - return "", common.ErrPerm - } - // Check remoteUnit is indeed related. Note that we don't want to actually get - // the *Unit, because it might have been removed; but its relation settings will - // persist until the relation itself has been removed (and must remain accessible - // because the local unit's view of reality may be time-shifted). - tag, err := names.ParseUnitTag(remoteUnitTag) - if err != nil { - return "", common.ErrPerm - } - remoteUnitName := tag.Id() - remoteServiceName, err := names.UnitService(remoteUnitName) - if err != nil { - return "", common.ErrPerm - } - rel := relUnit.Relation() - _, err = rel.RelatedEndpoints(remoteServiceName) - if err != nil { - return "", common.ErrPerm - } - return remoteUnitName, nil -} - -// authAndActionFromTagFn first authenticates the request, and then returns -// a function with which to authenticate and retrieve each action in the -// request. -func (u *uniterBaseAPI) authAndActionFromTagFn() (func(string) (*state.Action, error), error) { - canAccess, err := u.accessUnit() - if err != nil { - return nil, err - } - unit, ok := u.auth.GetAuthTag().(names.UnitTag) - if !ok { - return nil, fmt.Errorf("calling entity is not a unit") - } - - return func(tag string) (*state.Action, error) { - actionTag, err := names.ParseActionTag(tag) - if err != nil { - return nil, err - } - action, err := u.st.ActionByTag(actionTag) - if err != nil { - return nil, err - } - receiverTag, err := names.ActionReceiverTag(action.Receiver()) - if err != nil { - return nil, err - } - if unit != receiverTag { - return nil, common.ErrPerm - } - - if !canAccess(receiverTag) { - return nil, common.ErrPerm - } - return action, nil - }, nil -} - -func convertRelationSettings(settings map[string]interface{}) (params.Settings, error) { - result := make(params.Settings) - for k, v := range settings { - // All relation settings should be strings. - sval, ok := v.(string) - if !ok { - return nil, fmt.Errorf("unexpected relation setting %q: expected string, got %T", k, v) - } - result[k] = sval - } - return result, nil -} - -func relationsInScopeTags(unit *state.Unit) ([]string, error) { - relations, err := unit.RelationsInScope() - if err != nil { - return nil, err - } - tags := make([]string, len(relations)) - for i, relation := range relations { - tags[i] = relation.Tag().String() - } - return tags, nil -} - -func leadershipSettingsAccessorFactory( - st *state.State, - resources *common.Resources, - auth common.Authorizer, -) *leadershipapiserver.LeadershipSettingsAccessor { - registerWatcher := func(serviceId string) (string, error) { - service, err := st.Service(serviceId) - if err != nil { - return "", err - } - w := service.WatchLeaderSettings() - if _, ok := <-w.Changes(); ok { - return resources.Register(w), nil - } - return "", watcher.EnsureErr(w) - } - getSettings := func(serviceId string) (map[string]string, error) { - service, err := st.Service(serviceId) - if err != nil { - return nil, err - } - return service.LeaderSettings() - } - writeSettings := func(token leadership.Token, serviceId string, settings map[string]string) error { - service, err := st.Service(serviceId) - if err != nil { - return err - } - return service.UpdateLeaderSettings(token, settings) - } - return leadershipapiserver.NewLeadershipSettingsAccessor( - auth, - registerWatcher, - getSettings, - st.LeadershipChecker().LeadershipCheck, - writeSettings, - ) -} === removed file 'src/github.com/juju/juju/apiserver/uniter/uniter_base_test.go' --- src/github.com/juju/juju/apiserver/uniter/uniter_base_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/uniter/uniter_base_test.go 1970-01-01 00:00:00 +0000 @@ -1,2324 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package uniter_test - -import ( - "fmt" - "time" - - "github.com/juju/errors" - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - apiservertesting "github.com/juju/juju/apiserver/testing" - "github.com/juju/juju/apiserver/uniter" - "github.com/juju/juju/juju/testing" - "github.com/juju/juju/network" - "github.com/juju/juju/state" - "github.com/juju/juju/state/multiwatcher" - statetesting "github.com/juju/juju/state/testing" - coretesting "github.com/juju/juju/testing" - jujuFactory "github.com/juju/juju/testing/factory" -) - -// uniterBaseSuite implements common testing suite for all API -// versions. It's not intended to be used directly or registered as a -// suite, but embedded. -type uniterBaseSuite struct { - testing.JujuConnSuite - - authorizer apiservertesting.FakeAuthorizer - resources *common.Resources - - machine0 *state.Machine - machine1 *state.Machine - wordpress *state.Service - wpCharm *state.Charm - mysql *state.Service - wordpressUnit *state.Unit - mysqlUnit *state.Unit - - meteredService *state.Service - meteredCharm *state.Charm - meteredUnit *state.Unit -} - -func (s *uniterBaseSuite) setUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - - factory := jujuFactory.NewFactory(s.State) - // Create two machines, two services and add a unit to each service. - s.machine0 = factory.MakeMachine(c, &jujuFactory.MachineParams{ - Series: "quantal", - Jobs: []state.MachineJob{state.JobHostUnits, state.JobManageEnviron}, - }) - s.machine1 = factory.MakeMachine(c, &jujuFactory.MachineParams{ - Series: "quantal", - Jobs: []state.MachineJob{state.JobHostUnits}, - }) - s.wpCharm = factory.MakeCharm(c, &jujuFactory.CharmParams{ - Name: "wordpress", - URL: "cs:quantal/wordpress-3", - }) - s.wordpress = factory.MakeService(c, &jujuFactory.ServiceParams{ - Name: "wordpress", - Charm: s.wpCharm, - Creator: s.AdminUserTag(c), - }) - mysqlCharm := factory.MakeCharm(c, &jujuFactory.CharmParams{ - Name: "mysql", - }) - s.mysql = factory.MakeService(c, &jujuFactory.ServiceParams{ - Name: "mysql", - Charm: mysqlCharm, - Creator: s.AdminUserTag(c), - }) - s.wordpressUnit = factory.MakeUnit(c, &jujuFactory.UnitParams{ - Service: s.wordpress, - Machine: s.machine0, - }) - s.mysqlUnit = factory.MakeUnit(c, &jujuFactory.UnitParams{ - Service: s.mysql, - Machine: s.machine1, - }) - - s.meteredCharm = s.Factory.MakeCharm(c, &jujuFactory.CharmParams{ - Name: "metered", - URL: "cs:quantal/metered", - }) - s.meteredService = s.Factory.MakeService(c, &jujuFactory.ServiceParams{ - Charm: s.meteredCharm, - }) - s.meteredUnit = s.Factory.MakeUnit(c, &jujuFactory.UnitParams{ - Service: s.meteredService, - SetCharmURL: true, - }) - - // Create a FakeAuthorizer so we can check permissions, - // set up assuming unit 0 has logged in. - s.authorizer = apiservertesting.FakeAuthorizer{ - Tag: s.wordpressUnit.Tag(), - } - - // Create the resource registry separately to track invocations to - // Register. - s.resources = common.NewResources() - s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) -} - -func (s *uniterBaseSuite) testUniterFailsWithNonUnitAgentUser( - c *gc.C, - factory func(_ *state.State, _ *common.Resources, _ common.Authorizer) error, -) { - anAuthorizer := s.authorizer - anAuthorizer.Tag = names.NewMachineTag("9") - err := factory(s.State, s.resources, anAuthorizer) - c.Assert(err, gc.NotNil) - c.Assert(err, gc.ErrorMatches, "permission denied") -} - -func (s *uniterBaseSuite) testSetStatus( - c *gc.C, - facade interface { - SetStatus(args params.SetStatus) (params.ErrorResults, error) - }, -) { - err := s.wordpressUnit.SetAgentStatus(state.StatusExecuting, "blah", nil) - c.Assert(err, jc.ErrorIsNil) - err = s.mysqlUnit.SetAgentStatus(state.StatusExecuting, "foo", nil) - c.Assert(err, jc.ErrorIsNil) - - args := params.SetStatus{ - Entities: []params.EntityStatusArgs{ - {Tag: "unit-mysql-0", Status: params.StatusError, Info: "not really"}, - {Tag: "unit-wordpress-0", Status: params.StatusRebooting, Info: "foobar"}, - {Tag: "unit-foo-42", Status: params.StatusActive, Info: "blah"}, - }} - result, err := facade.SetStatus(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - {apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify mysqlUnit - no change. - statusInfo, err := s.mysqlUnit.AgentStatus() - c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, state.StatusExecuting) - c.Assert(statusInfo.Message, gc.Equals, "foo") - // ...wordpressUnit is fine though. - statusInfo, err = s.wordpressUnit.AgentStatus() - c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, state.StatusRebooting) - c.Assert(statusInfo.Message, gc.Equals, "foobar") -} - -func (s *uniterBaseSuite) testSetAgentStatus( - c *gc.C, - facade interface { - SetAgentStatus(args params.SetStatus) (params.ErrorResults, error) - }, -) { - err := s.wordpressUnit.SetAgentStatus(state.StatusExecuting, "blah", nil) - c.Assert(err, jc.ErrorIsNil) - err = s.mysqlUnit.SetAgentStatus(state.StatusExecuting, "foo", nil) - c.Assert(err, jc.ErrorIsNil) - - args := params.SetStatus{ - Entities: []params.EntityStatusArgs{ - {Tag: "unit-mysql-0", Status: params.StatusError, Info: "not really"}, - {Tag: "unit-wordpress-0", Status: params.StatusExecuting, Info: "foobar"}, - {Tag: "unit-foo-42", Status: params.StatusRebooting, Info: "blah"}, - }} - result, err := facade.SetAgentStatus(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - {apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify mysqlUnit - no change. - statusInfo, err := s.mysqlUnit.AgentStatus() - c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, state.StatusExecuting) - c.Assert(statusInfo.Message, gc.Equals, "foo") - // ...wordpressUnit is fine though. - statusInfo, err = s.wordpressUnit.AgentStatus() - c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, state.StatusExecuting) - c.Assert(statusInfo.Message, gc.Equals, "foobar") -} - -func (s *uniterBaseSuite) testSetUnitStatus( - c *gc.C, - facade interface { - SetUnitStatus(args params.SetStatus) (params.ErrorResults, error) - }, -) { - err := s.wordpressUnit.SetStatus(state.StatusActive, "blah", nil) - c.Assert(err, jc.ErrorIsNil) - err = s.mysqlUnit.SetStatus(state.StatusTerminated, "foo", nil) - c.Assert(err, jc.ErrorIsNil) - - args := params.SetStatus{ - Entities: []params.EntityStatusArgs{ - {Tag: "unit-mysql-0", Status: params.StatusError, Info: "not really"}, - {Tag: "unit-wordpress-0", Status: params.StatusTerminated, Info: "foobar"}, - {Tag: "unit-foo-42", Status: params.StatusActive, Info: "blah"}, - }} - result, err := facade.SetUnitStatus(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - {apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify mysqlUnit - no change. - statusInfo, err := s.mysqlUnit.Status() - c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, state.StatusTerminated) - c.Assert(statusInfo.Message, gc.Equals, "foo") - // ...wordpressUnit is fine though. - statusInfo, err = s.wordpressUnit.Status() - c.Assert(err, jc.ErrorIsNil) - c.Assert(statusInfo.Status, gc.Equals, state.StatusTerminated) - c.Assert(statusInfo.Message, gc.Equals, "foobar") -} - -func (s *uniterBaseSuite) testLife( - c *gc.C, - facade interface { - Life(args params.Entities) (params.LifeResults, error) - }, -) { - // Add a relation wordpress-mysql. - rel := s.addRelation(c, "wordpress", "mysql") - relUnit, err := rel.Unit(s.wordpressUnit) - c.Assert(err, jc.ErrorIsNil) - err = relUnit.EnterScope(nil) - c.Assert(err, jc.ErrorIsNil) - c.Assert(rel.Life(), gc.Equals, state.Alive) - - // Make the wordpressUnit dead. - err = s.wordpressUnit.EnsureDead() - c.Assert(err, jc.ErrorIsNil) - err = s.wordpressUnit.Refresh() - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.wordpressUnit.Life(), gc.Equals, state.Dead) - - // Add another unit, so the service will stay dying when we - // destroy it later. - extraUnit, err := s.wordpress.AddUnit() - c.Assert(err, jc.ErrorIsNil) - c.Assert(extraUnit, gc.NotNil) - - // Make the wordpress service dying. - err = s.wordpress.Destroy() - c.Assert(err, jc.ErrorIsNil) - err = s.wordpress.Refresh() - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.wordpress.Life(), gc.Equals, state.Dying) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - {Tag: "service-mysql"}, - {Tag: "service-wordpress"}, - {Tag: "machine-0"}, - {Tag: "machine-1"}, - {Tag: "machine-42"}, - {Tag: "service-foo"}, - // TODO(dfc) these aren't valid tags any more - // but I hope to restore this test when params.Entity takes - // tags, not strings, which is coming soon. - // {Tag: "just-foo"}, - {Tag: rel.Tag().String()}, - {Tag: "relation-svc1.rel1#svc2.rel2"}, - // {Tag: "relation-blah"}, - }} - result, err := facade.Life(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.LifeResults{ - Results: []params.LifeResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Life: "dead"}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Life: "dying"}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - // TODO(dfc) see above - // {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - // {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *uniterBaseSuite) testEnsureDead( - c *gc.C, - facade interface { - EnsureDead(args params.Entities) (params.ErrorResults, error) - }, -) { - c.Assert(s.wordpressUnit.Life(), gc.Equals, state.Alive) - c.Assert(s.mysqlUnit.Life(), gc.Equals, state.Alive) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - }} - result, err := facade.EnsureDead(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - {apiservertesting.ErrUnauthorized}, - }, - }) - - err = s.wordpressUnit.Refresh() - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.wordpressUnit.Life(), gc.Equals, state.Dead) - err = s.mysqlUnit.Refresh() - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.mysqlUnit.Life(), gc.Equals, state.Alive) - - // Try it again on a Dead unit; should work. - args = params.Entities{ - Entities: []params.Entity{{Tag: "unit-wordpress-0"}}, - } - result, err = facade.EnsureDead(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{{nil}}, - }) - - // Verify Life is unchanged. - err = s.wordpressUnit.Refresh() - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.wordpressUnit.Life(), gc.Equals, state.Dead) -} - -func (s *uniterBaseSuite) testWatch( - c *gc.C, - facade interface { - Watch(args params.Entities) (params.NotifyWatchResults, error) - }, -) { - c.Assert(s.resources.Count(), gc.Equals, 0) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - {Tag: "service-mysql"}, - {Tag: "service-wordpress"}, - {Tag: "service-foo"}, - // TODO(dfc) these aren't valid tags any more - // but I hope to restore this test when params.Entity takes - // tags, not strings, which is coming soon. - // {Tag: "just-foo"}, - }} - result, err := facade.Watch(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.NotifyWatchResults{ - Results: []params.NotifyWatchResult{ - {Error: apiservertesting.ErrUnauthorized}, - {NotifyWatcherId: "1"}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {NotifyWatcherId: "2"}, - {Error: apiservertesting.ErrUnauthorized}, - // see above - // {Error: apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify the resource was registered and stop when done - c.Assert(s.resources.Count(), gc.Equals, 2) - resource1 := s.resources.Get("1") - defer statetesting.AssertStop(c, resource1) - resource2 := s.resources.Get("2") - defer statetesting.AssertStop(c, resource2) - - // Check that the Watch has consumed the initial event ("returned" in - // the Watch call) - wc := statetesting.NewNotifyWatcherC(c, s.State, resource1.(state.NotifyWatcher)) - wc.AssertNoChange() - wc = statetesting.NewNotifyWatcherC(c, s.State, resource2.(state.NotifyWatcher)) - wc.AssertNoChange() -} - -func (s *uniterBaseSuite) testPublicAddress( - c *gc.C, - facade interface { - PublicAddress(args params.Entities) (params.StringResults, error) - }, -) { - // Try first without setting an address. - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - }} - expectErr := ¶ms.Error{ - Code: params.CodeNoAddressSet, - Message: `"unit-wordpress-0" has no public address set`, - } - result, err := facade.PublicAddress(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.StringResults{ - Results: []params.StringResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Error: expectErr}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) - - // Now set it an try again. - err = s.machine0.SetProviderAddresses( - network.NewScopedAddress("1.2.3.4", network.ScopePublic), - ) - c.Assert(err, jc.ErrorIsNil) - address, err := s.wordpressUnit.PublicAddress() - c.Assert(address.Value, gc.Equals, "1.2.3.4") - c.Assert(err, jc.ErrorIsNil) - - result, err = facade.PublicAddress(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.StringResults{ - Results: []params.StringResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Result: "1.2.3.4"}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *uniterBaseSuite) testPrivateAddress( - c *gc.C, - facade interface { - PrivateAddress(args params.Entities) (params.StringResults, error) - }, -) { - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - }} - expectErr := ¶ms.Error{ - Code: params.CodeNoAddressSet, - Message: `"unit-wordpress-0" has no private address set`, - } - result, err := facade.PrivateAddress(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.StringResults{ - Results: []params.StringResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Error: expectErr}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) - - // Now set it and try again. - err = s.machine0.SetProviderAddresses( - network.NewScopedAddress("1.2.3.4", network.ScopeCloudLocal), - ) - c.Assert(err, jc.ErrorIsNil) - address, err := s.wordpressUnit.PrivateAddress() - c.Assert(address.Value, gc.Equals, "1.2.3.4") - c.Assert(err, jc.ErrorIsNil) - - result, err = facade.PrivateAddress(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.StringResults{ - Results: []params.StringResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Result: "1.2.3.4"}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *uniterBaseSuite) testAvailabilityZone( - c *gc.C, - facade interface { - AvailabilityZone(args params.Entities) (params.StringResults, error) - }, -) { - s.PatchValue(uniter.GetZone, func(st *state.State, tag names.Tag) (string, error) { - return "a_zone", nil - }) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-wordpress-0"}, - }} - result, err := facade.AvailabilityZone(args) - c.Assert(err, jc.ErrorIsNil) - - c.Check(result, gc.DeepEquals, params.StringResults{ - Results: []params.StringResult{ - {Result: "a_zone"}, - }, - }) -} - -func (s *uniterBaseSuite) testResolved( - c *gc.C, - facade interface { - Resolved(args params.Entities) (params.ResolvedModeResults, error) - }, -) { - err := s.wordpressUnit.SetResolved(state.ResolvedRetryHooks) - c.Assert(err, jc.ErrorIsNil) - mode := s.wordpressUnit.Resolved() - c.Assert(mode, gc.Equals, state.ResolvedRetryHooks) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - }} - result, err := facade.Resolved(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ResolvedModeResults{ - Results: []params.ResolvedModeResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Mode: params.ResolvedMode(mode)}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *uniterBaseSuite) testClearResolved( - c *gc.C, - facade interface { - ClearResolved(args params.Entities) (params.ErrorResults, error) - }, -) { - err := s.wordpressUnit.SetResolved(state.ResolvedRetryHooks) - c.Assert(err, jc.ErrorIsNil) - mode := s.wordpressUnit.Resolved() - c.Assert(mode, gc.Equals, state.ResolvedRetryHooks) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - }} - result, err := facade.ClearResolved(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - {apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify wordpressUnit's resolved mode has changed. - err = s.wordpressUnit.Refresh() - c.Assert(err, jc.ErrorIsNil) - mode = s.wordpressUnit.Resolved() - c.Assert(mode, gc.Equals, state.ResolvedNone) -} - -type getPrincipal interface { - GetPrincipal(args params.Entities) (params.StringBoolResults, error) -} - -func (s *uniterBaseSuite) testGetPrincipal( - c *gc.C, - facade getPrincipal, - factory func(_ *state.State, _ *common.Resources, _ common.Authorizer) (getPrincipal, error), -) { - // Add a subordinate to wordpressUnit. - _, _, subordinate := s.addRelatedService(c, "wordpress", "logging", s.wordpressUnit) - - principal, ok := subordinate.PrincipalName() - c.Assert(principal, gc.Equals, s.wordpressUnit.Name()) - c.Assert(ok, jc.IsTrue) - - // First try it as wordpressUnit's agent. - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: subordinate.Tag().String()}, - {Tag: "unit-foo-42"}, - }} - result, err := facade.GetPrincipal(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.StringBoolResults{ - Results: []params.StringBoolResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Result: "", Ok: false, Error: nil}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) - - // Now try as subordinate's agent. - subAuthorizer := s.authorizer - subAuthorizer.Tag = subordinate.Tag() - subUniter, err := factory(s.State, s.resources, subAuthorizer) - c.Assert(err, jc.ErrorIsNil) - - result, err = subUniter.GetPrincipal(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.StringBoolResults{ - Results: []params.StringBoolResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Result: "unit-wordpress-0", Ok: true, Error: nil}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *uniterBaseSuite) testHasSubordinates( - c *gc.C, - facade interface { - HasSubordinates(args params.Entities) (params.BoolResults, error) - }, -) { - // Try first without any subordinates for wordpressUnit. - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-logging-0"}, - {Tag: "unit-foo-42"}, - }} - result, err := facade.HasSubordinates(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.BoolResults{ - Results: []params.BoolResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Result: false}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) - - // Add two subordinates to wordpressUnit and try again. - s.addRelatedService(c, "wordpress", "logging", s.wordpressUnit) - s.addRelatedService(c, "wordpress", "monitoring", s.wordpressUnit) - - result, err = facade.HasSubordinates(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.BoolResults{ - Results: []params.BoolResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Result: true}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *uniterBaseSuite) testDestroy( - c *gc.C, - facade interface { - Destroy(args params.Entities) (params.ErrorResults, error) - }, -) { - c.Assert(s.wordpressUnit.Life(), gc.Equals, state.Alive) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - }} - result, err := facade.Destroy(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - {apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify wordpressUnit is destroyed and removed. - err = s.wordpressUnit.Refresh() - c.Assert(err, jc.Satisfies, errors.IsNotFound) -} - -func (s *uniterBaseSuite) testDestroyAllSubordinates( - c *gc.C, - facade interface { - DestroyAllSubordinates(args params.Entities) (params.ErrorResults, error) - }, -) { - // Add two subordinates to wordpressUnit. - _, _, loggingSub := s.addRelatedService(c, "wordpress", "logging", s.wordpressUnit) - _, _, monitoringSub := s.addRelatedService(c, "wordpress", "monitoring", s.wordpressUnit) - c.Assert(loggingSub.Life(), gc.Equals, state.Alive) - c.Assert(monitoringSub.Life(), gc.Equals, state.Alive) - - err := s.wordpressUnit.Refresh() - c.Assert(err, jc.ErrorIsNil) - subordinates := s.wordpressUnit.SubordinateNames() - c.Assert(subordinates, gc.DeepEquals, []string{"logging/0", "monitoring/0"}) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - }} - result, err := facade.DestroyAllSubordinates(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - {apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify wordpressUnit's subordinates were destroyed. - err = loggingSub.Refresh() - c.Assert(err, jc.ErrorIsNil) - c.Assert(loggingSub.Life(), gc.Equals, state.Dying) - err = monitoringSub.Refresh() - c.Assert(err, jc.ErrorIsNil) - c.Assert(monitoringSub.Life(), gc.Equals, state.Dying) -} - -func (s *uniterBaseSuite) testCharmURL( - c *gc.C, - facade interface { - CharmURL(args params.Entities) (params.StringBoolResults, error) - }, -) { - // Set wordpressUnit's charm URL first. - err := s.wordpressUnit.SetCharmURL(s.wpCharm.URL()) - c.Assert(err, jc.ErrorIsNil) - curl, ok := s.wordpressUnit.CharmURL() - c.Assert(curl, gc.DeepEquals, s.wpCharm.URL()) - c.Assert(ok, jc.IsTrue) - - // Make sure wordpress service's charm is what we expect. - curl, force := s.wordpress.CharmURL() - c.Assert(curl, gc.DeepEquals, s.wpCharm.URL()) - c.Assert(force, jc.IsFalse) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - {Tag: "service-mysql"}, - {Tag: "service-wordpress"}, - {Tag: "service-foo"}, - // TODO(dfc) these aren't valid tags any more - // but I hope to restore this test when params.Entity takes - // tags, not strings, which is coming soon. - // {Tag: "just-foo"}, - }} - result, err := facade.CharmURL(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.StringBoolResults{ - Results: []params.StringBoolResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Result: s.wpCharm.String(), Ok: ok}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Result: s.wpCharm.String(), Ok: force}, - {Error: apiservertesting.ErrUnauthorized}, - // see above - // {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *uniterBaseSuite) testSetCharmURL( - c *gc.C, - facade interface { - SetCharmURL(args params.EntitiesCharmURL) (params.ErrorResults, error) - }, -) { - _, ok := s.wordpressUnit.CharmURL() - c.Assert(ok, jc.IsFalse) - - args := params.EntitiesCharmURL{Entities: []params.EntityCharmURL{ - {Tag: "unit-mysql-0", CharmURL: "cs:quantal/service-42"}, - {Tag: "unit-wordpress-0", CharmURL: s.wpCharm.String()}, - {Tag: "unit-foo-42", CharmURL: "cs:quantal/foo-321"}, - }} - result, err := facade.SetCharmURL(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - {apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify the charm URL was set. - err = s.wordpressUnit.Refresh() - c.Assert(err, jc.ErrorIsNil) - charmUrl, needsUpgrade := s.wordpressUnit.CharmURL() - c.Assert(charmUrl, gc.NotNil) - c.Assert(charmUrl.String(), gc.Equals, s.wpCharm.String()) - c.Assert(needsUpgrade, jc.IsTrue) -} - -func (s *uniterBaseSuite) testOpenPorts( - c *gc.C, - facade interface { - OpenPorts(args params.EntitiesPortRanges) (params.ErrorResults, error) - }, -) { - openedPorts, err := s.wordpressUnit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(openedPorts, gc.HasLen, 0) - - args := params.EntitiesPortRanges{Entities: []params.EntityPortRange{ - {Tag: "unit-mysql-0", Protocol: "tcp", FromPort: 1234, ToPort: 1400}, - {Tag: "unit-wordpress-0", Protocol: "udp", FromPort: 4321, ToPort: 5000}, - {Tag: "unit-foo-42", Protocol: "tcp", FromPort: 42, ToPort: 42}, - }} - result, err := facade.OpenPorts(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - {apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify the wordpressUnit's port is opened. - openedPorts, err = s.wordpressUnit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(openedPorts, gc.DeepEquals, []network.PortRange{ - {Protocol: "udp", FromPort: 4321, ToPort: 5000}, - }) -} - -func (s *uniterBaseSuite) testClosePorts( - c *gc.C, - facade interface { - ClosePorts(args params.EntitiesPortRanges) (params.ErrorResults, error) - }, -) { - // Open port udp:4321 in advance on wordpressUnit. - err := s.wordpressUnit.OpenPorts("udp", 4321, 5000) - c.Assert(err, jc.ErrorIsNil) - openedPorts, err := s.wordpressUnit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(openedPorts, gc.DeepEquals, []network.PortRange{ - {Protocol: "udp", FromPort: 4321, ToPort: 5000}, - }) - - args := params.EntitiesPortRanges{Entities: []params.EntityPortRange{ - {Tag: "unit-mysql-0", Protocol: "tcp", FromPort: 1234, ToPort: 1400}, - {Tag: "unit-wordpress-0", Protocol: "udp", FromPort: 4321, ToPort: 5000}, - {Tag: "unit-foo-42", Protocol: "tcp", FromPort: 42, ToPort: 42}, - }} - result, err := facade.ClosePorts(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - {apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify the wordpressUnit's port is closed. - openedPorts, err = s.wordpressUnit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(openedPorts, gc.HasLen, 0) -} - -func (s *uniterBaseSuite) testOpenPort( - c *gc.C, - facade interface { - OpenPort(args params.EntitiesPorts) (params.ErrorResults, error) - }, -) { - openedPorts, err := s.wordpressUnit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(openedPorts, gc.HasLen, 0) - - args := params.EntitiesPorts{Entities: []params.EntityPort{ - {Tag: "unit-mysql-0", Protocol: "tcp", Port: 1234}, - {Tag: "unit-wordpress-0", Protocol: "udp", Port: 4321}, - {Tag: "unit-foo-42", Protocol: "tcp", Port: 42}, - }} - result, err := facade.OpenPort(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - {apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify the wordpressUnit's port is opened. - openedPorts, err = s.wordpressUnit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(openedPorts, gc.DeepEquals, []network.PortRange{ - {Protocol: "udp", FromPort: 4321, ToPort: 4321}, - }) -} - -func (s *uniterBaseSuite) testClosePort( - c *gc.C, - facade interface { - ClosePort(args params.EntitiesPorts) (params.ErrorResults, error) - }, -) { - // Open port udp:4321 in advance on wordpressUnit. - err := s.wordpressUnit.OpenPort("udp", 4321) - c.Assert(err, jc.ErrorIsNil) - openedPorts, err := s.wordpressUnit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(openedPorts, gc.DeepEquals, []network.PortRange{ - {Protocol: "udp", FromPort: 4321, ToPort: 4321}, - }) - - args := params.EntitiesPorts{Entities: []params.EntityPort{ - {Tag: "unit-mysql-0", Protocol: "tcp", Port: 1234}, - {Tag: "unit-wordpress-0", Protocol: "udp", Port: 4321}, - {Tag: "unit-foo-42", Protocol: "tcp", Port: 42}, - }} - result, err := facade.ClosePort(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - {apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify the wordpressUnit's port is closed. - openedPorts, err = s.wordpressUnit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(openedPorts, gc.HasLen, 0) -} - -func (s *uniterBaseSuite) testWatchConfigSettings( - c *gc.C, - facade interface { - WatchConfigSettings(args params.Entities) (params.NotifyWatchResults, error) - }, -) { - err := s.wordpressUnit.SetCharmURL(s.wpCharm.URL()) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(s.resources.Count(), gc.Equals, 0) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - }} - result, err := facade.WatchConfigSettings(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.NotifyWatchResults{ - Results: []params.NotifyWatchResult{ - {Error: apiservertesting.ErrUnauthorized}, - {NotifyWatcherId: "1"}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify the resource was registered and stop when done - c.Assert(s.resources.Count(), gc.Equals, 1) - resource := s.resources.Get("1") - defer statetesting.AssertStop(c, resource) - - // Check that the Watch has consumed the initial event ("returned" in - // the Watch call) - wc := statetesting.NewNotifyWatcherC(c, s.State, resource.(state.NotifyWatcher)) - wc.AssertNoChange() -} - -type watchActions interface { - WatchActionNotifications(args params.Entities) (params.StringsWatchResults, error) -} - -func (s *uniterBaseSuite) testWatchActionNotifications(c *gc.C, facade watchActions) { - err := s.wordpressUnit.SetCharmURL(s.wpCharm.URL()) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(s.resources.Count(), gc.Equals, 0) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - }} - result, err := facade.WatchActionNotifications(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.StringsWatchResults{ - Results: []params.StringsWatchResult{ - {Error: apiservertesting.ErrUnauthorized}, - {StringsWatcherId: "1"}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify the resource was registered and stop when done - c.Assert(s.resources.Count(), gc.Equals, 1) - resource := s.resources.Get("1") - defer statetesting.AssertStop(c, resource) - - // Check that the Watch has consumed the initial event ("returned" in - // the Watch call) - wc := statetesting.NewStringsWatcherC(c, s.State, resource.(state.StringsWatcher)) - wc.AssertNoChange() - - addedAction, err := s.wordpressUnit.AddAction("fakeaction", nil) - - wc.AssertChange(addedAction.Id()) - wc.AssertNoChange() -} - -func (s *uniterBaseSuite) testWatchPreexistingActions(c *gc.C, facade watchActions) { - err := s.wordpressUnit.SetCharmURL(s.wpCharm.URL()) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(s.resources.Count(), gc.Equals, 0) - - action1, err := s.wordpressUnit.AddAction("fakeaction", nil) - c.Assert(err, jc.ErrorIsNil) - action2, err := s.wordpressUnit.AddAction("fakeaction", nil) - c.Assert(err, jc.ErrorIsNil) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-wordpress-0"}, - }} - - s.State.StartSync() - results, err := facade.WatchActionNotifications(args) - c.Assert(err, jc.ErrorIsNil) - - checkUnorderedActionIdsEqual(c, []string{action1.Id(), action2.Id()}, results) - - // Verify the resource was registered and stop when done - c.Assert(s.resources.Count(), gc.Equals, 1) - resource := s.resources.Get("1") - defer statetesting.AssertStop(c, resource) - - // Check that the Watch has consumed the initial event ("returned" in - // the Watch call) - wc := statetesting.NewStringsWatcherC(c, s.State, resource.(state.StringsWatcher)) - wc.AssertNoChange() - - addedAction, err := s.wordpressUnit.AddAction("fakeaction", nil) - c.Assert(err, jc.ErrorIsNil) - wc.AssertChange(addedAction.Id()) - wc.AssertNoChange() -} - -func (s *uniterBaseSuite) testWatchActionNotificationsMalformedTag(c *gc.C, facade watchActions) { - args := params.Entities{Entities: []params.Entity{ - {Tag: "ewenit-mysql-0"}, - }} - _, err := facade.WatchActionNotifications(args) - c.Assert(err, gc.NotNil) - c.Assert(err.Error(), gc.Equals, `"ewenit-mysql-0" is not a valid tag`) -} - -func (s *uniterBaseSuite) testWatchActionNotificationsMalformedUnitName(c *gc.C, facade watchActions) { - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-01"}, - }} - _, err := facade.WatchActionNotifications(args) - c.Assert(err, gc.NotNil) - c.Assert(err.Error(), gc.Equals, `"unit-mysql-01" is not a valid unit tag`) -} - -func (s *uniterBaseSuite) testWatchActionNotificationsNotUnit(c *gc.C, facade watchActions) { - action, err := s.mysqlUnit.AddAction("fakeaction", nil) - c.Assert(err, jc.ErrorIsNil) - args := params.Entities{Entities: []params.Entity{ - {Tag: action.Tag().String()}, - }} - _, err = facade.WatchActionNotifications(args) - c.Assert(err, gc.NotNil) - c.Assert(err.Error(), gc.Equals, `"action-`+action.Id()+`" is not a valid unit tag`) -} - -func (s *uniterBaseSuite) testWatchActionNotificationsPermissionDenied(c *gc.C, facade watchActions) { - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-nonexistentgarbage-0"}, - }} - results, err := facade.WatchActionNotifications(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(results, gc.NotNil) - c.Assert(len(results.Results), gc.Equals, 1) - result := results.Results[0] - c.Assert(result.Error, gc.NotNil) - c.Assert(result.Error.Message, gc.Equals, "permission denied") -} - -func (s *uniterBaseSuite) testConfigSettings( - c *gc.C, - facade interface { - ConfigSettings(args params.Entities) (params.ConfigSettingsResults, error) - }, -) { - err := s.wordpressUnit.SetCharmURL(s.wpCharm.URL()) - c.Assert(err, jc.ErrorIsNil) - settings, err := s.wordpressUnit.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, charm.Settings{"blog-title": "My Title"}) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - }} - result, err := facade.ConfigSettings(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ConfigSettingsResults{ - Results: []params.ConfigSettingsResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Settings: params.ConfigSettings{"blog-title": "My Title"}}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *uniterBaseSuite) testWatchServiceRelations( - c *gc.C, - facade interface { - WatchServiceRelations(args params.Entities) (params.StringsWatchResults, error) - }, -) { - c.Assert(s.resources.Count(), gc.Equals, 0) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "service-mysql"}, - {Tag: "service-wordpress"}, - {Tag: "service-foo"}, - }} - result, err := facade.WatchServiceRelations(args) - s.assertOneStringsWatcher(c, result, err) -} - -func (s *uniterBaseSuite) testCharmArchiveSha256( - c *gc.C, - facade interface { - CharmArchiveSha256(args params.CharmURLs) (params.StringResults, error) - }, -) { - dummyCharm := s.AddTestingCharm(c, "dummy") - - args := params.CharmURLs{URLs: []params.CharmURL{ - {URL: "something-invalid"}, - {URL: s.wpCharm.String()}, - {URL: dummyCharm.String()}, - }} - result, err := facade.CharmArchiveSha256(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.StringResults{ - Results: []params.StringResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Result: s.wpCharm.BundleSha256()}, - {Result: dummyCharm.BundleSha256()}, - }, - }) -} - -func (s *uniterBaseSuite) testCharmArchiveURLs( - c *gc.C, - facade interface { - CharmArchiveURLs(args params.CharmURLs) (params.StringsResults, error) - }, -) { - dummyCharm := s.AddTestingCharm(c, "dummy") - - hostPorts := [][]network.HostPort{ - network.AddressesWithPort([]network.Address{ - network.NewScopedAddress("1.2.3.4", network.ScopePublic), - network.NewScopedAddress("0.1.2.3", network.ScopeCloudLocal), - }, 1234), - network.AddressesWithPort([]network.Address{ - network.NewScopedAddress("1.2.3.5", network.ScopePublic), - }, 1234), - } - err := s.State.SetAPIHostPorts(hostPorts) - c.Assert(err, jc.ErrorIsNil) - - args := params.CharmURLs{URLs: []params.CharmURL{ - {URL: "something-invalid"}, - {URL: s.wpCharm.String()}, - {URL: dummyCharm.String()}, - }} - result, err := facade.CharmArchiveURLs(args) - c.Assert(err, jc.ErrorIsNil) - - wordpressURLs := []string{ - fmt.Sprintf("https://0.1.2.3:1234/environment/%s/charms?file=%%2A&url=cs%%3Aquantal%%2Fwordpress-3", coretesting.EnvironmentTag.Id()), - fmt.Sprintf("https://1.2.3.5:1234/environment/%s/charms?file=%%2A&url=cs%%3Aquantal%%2Fwordpress-3", coretesting.EnvironmentTag.Id()), - } - dummyURLs := []string{ - fmt.Sprintf("https://0.1.2.3:1234/environment/%s/charms?file=%%2A&url=local%%3Aquantal%%2Fdummy-1", coretesting.EnvironmentTag.Id()), - fmt.Sprintf("https://1.2.3.5:1234/environment/%s/charms?file=%%2A&url=local%%3Aquantal%%2Fdummy-1", coretesting.EnvironmentTag.Id()), - } - - c.Assert(result, jc.DeepEquals, params.StringsResults{ - Results: []params.StringsResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Result: wordpressURLs}, - {Result: dummyURLs}, - }, - }) -} - -func (s *uniterBaseSuite) testCurrentEnvironUUID( - c *gc.C, - facade interface { - CurrentEnvironUUID() (params.StringResult, error) - }, -) { - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - - result, err := facade.CurrentEnvironUUID() - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.StringResult{Result: env.UUID()}) -} - -func (s *uniterBaseSuite) testCurrentEnvironment( - c *gc.C, - facade interface { - CurrentEnvironment() (params.EnvironmentResult, error) - }, -) { - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - - result, err := facade.CurrentEnvironment() - c.Assert(err, jc.ErrorIsNil) - expected := params.EnvironmentResult{ - Name: env.Name(), - UUID: env.UUID(), - } - c.Assert(result, gc.DeepEquals, expected) -} - -type actions interface { - Actions(args params.Entities) (params.ActionsQueryResults, error) -} - -func (s *uniterBaseSuite) testActions(c *gc.C, facade actions) { - var actionTests = []struct { - description string - action params.ActionResult - }{{ - description: "A simple action.", - action: params.ActionResult{ - Action: ¶ms.Action{ - Name: "fakeaction", - Parameters: map[string]interface{}{ - "outfile": "foo.txt", - }}, - }, - }, { - description: "An action with nested parameters.", - action: params.ActionResult{ - Action: ¶ms.Action{ - Name: "fakeaction", - Parameters: map[string]interface{}{ - "outfile": "foo.bz2", - "compression": map[string]interface{}{ - "kind": "bzip", - "quality": 5, - }, - }}, - }, - }} - - for i, actionTest := range actionTests { - c.Logf("test %d: %s", i, actionTest.description) - - a, err := s.wordpressUnit.AddAction( - actionTest.action.Action.Name, - actionTest.action.Action.Parameters) - c.Assert(err, jc.ErrorIsNil) - c.Assert(names.IsValidAction(a.Id()), gc.Equals, true) - actionTag := names.NewActionTag(a.Id()) - c.Assert(a.ActionTag(), gc.Equals, actionTag) - - args := params.Entities{ - Entities: []params.Entity{{ - Tag: actionTag.String(), - }}, - } - results, err := facade.Actions(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(results.Results, gc.HasLen, 1) - - actionsQueryResult := results.Results[0] - - c.Assert(actionsQueryResult.Error, gc.IsNil) - c.Assert(actionsQueryResult.Action, jc.DeepEquals, actionTest.action) - } -} - -func (s *uniterBaseSuite) testActionsNotPresent(c *gc.C, facade actions) { - uuid, err := utils.NewUUID() - c.Assert(err, jc.ErrorIsNil) - args := params.Entities{ - Entities: []params.Entity{{ - Tag: names.NewActionTag(uuid.String()).String(), - }}, - } - results, err := facade.Actions(args) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(results.Results, gc.HasLen, 1) - actionsQueryResult := results.Results[0] - c.Assert(actionsQueryResult.Error, gc.NotNil) - c.Assert(actionsQueryResult.Error, gc.ErrorMatches, `action "[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}" not found`) -} - -func (s *uniterBaseSuite) testActionsWrongUnit( - c *gc.C, - factory func(_ *state.State, _ *common.Resources, _ common.Authorizer) (actions, error), -) { - // Action doesn't match unit. - mysqlUnitAuthorizer := apiservertesting.FakeAuthorizer{ - Tag: s.mysqlUnit.Tag(), - } - mysqlUnitFacade, err := factory(s.State, s.resources, mysqlUnitAuthorizer) - c.Assert(err, jc.ErrorIsNil) - - action, err := s.wordpressUnit.AddAction("fakeaction", nil) - c.Assert(err, jc.ErrorIsNil) - args := params.Entities{ - Entities: []params.Entity{{ - Tag: action.Tag().String(), - }}, - } - actions, err := mysqlUnitFacade.Actions(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(actions.Results), gc.Equals, 1) - c.Assert(actions.Results[0].Error, jc.Satisfies, params.IsCodeUnauthorized) -} - -func (s *uniterBaseSuite) testActionsPermissionDenied(c *gc.C, facade actions) { - action, err := s.mysqlUnit.AddAction("fakeaction", nil) - c.Assert(err, jc.ErrorIsNil) - args := params.Entities{ - Entities: []params.Entity{{ - Tag: action.Tag().String(), - }}, - } - actions, err := facade.Actions(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(actions.Results), gc.Equals, 1) - c.Assert(actions.Results[0].Error, jc.Satisfies, params.IsCodeUnauthorized) -} - -type finishActions interface { - FinishActions(args params.ActionExecutionResults) (params.ErrorResults, error) -} - -func (s *uniterBaseSuite) testFinishActionsSuccess(c *gc.C, facade finishActions) { - testName := "fakeaction" - testOutput := map[string]interface{}{"output": "completed fakeaction successfully"} - - results, err := s.wordpressUnit.CompletedActions() - c.Assert(err, jc.ErrorIsNil) - c.Assert(results, gc.DeepEquals, ([]*state.Action)(nil)) - - action, err := s.wordpressUnit.AddAction(testName, nil) - c.Assert(err, jc.ErrorIsNil) - - actionResults := params.ActionExecutionResults{ - Results: []params.ActionExecutionResult{{ - ActionTag: action.ActionTag().String(), - Status: params.ActionCompleted, - Results: testOutput, - }}, - } - res, err := facade.FinishActions(actionResults) - c.Assert(err, jc.ErrorIsNil) - c.Assert(res, gc.DeepEquals, params.ErrorResults{Results: []params.ErrorResult{{Error: nil}}}) - - results, err = s.wordpressUnit.CompletedActions() - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(results), gc.Equals, 1) - c.Assert(results[0].Status(), gc.Equals, state.ActionCompleted) - res2, errstr := results[0].Results() - c.Assert(errstr, gc.Equals, "") - c.Assert(res2, gc.DeepEquals, testOutput) - c.Assert(results[0].Name(), gc.Equals, testName) -} - -func (s *uniterBaseSuite) testFinishActionsFailure(c *gc.C, facade finishActions) { - testName := "fakeaction" - testError := "fakeaction was a dismal failure" - - results, err := s.wordpressUnit.CompletedActions() - c.Assert(err, jc.ErrorIsNil) - c.Assert(results, gc.DeepEquals, ([]*state.Action)(nil)) - - action, err := s.wordpressUnit.AddAction(testName, nil) - c.Assert(err, jc.ErrorIsNil) - - actionResults := params.ActionExecutionResults{ - Results: []params.ActionExecutionResult{{ - ActionTag: action.ActionTag().String(), - Status: params.ActionFailed, - Results: nil, - Message: testError, - }}, - } - res, err := facade.FinishActions(actionResults) - c.Assert(err, jc.ErrorIsNil) - c.Assert(res, gc.DeepEquals, params.ErrorResults{Results: []params.ErrorResult{{Error: nil}}}) - - results, err = s.wordpressUnit.CompletedActions() - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(results), gc.Equals, 1) - c.Assert(results[0].Status(), gc.Equals, state.ActionFailed) - res2, errstr := results[0].Results() - c.Assert(errstr, gc.Equals, testError) - c.Assert(res2, gc.DeepEquals, map[string]interface{}{}) - c.Assert(results[0].Name(), gc.Equals, testName) -} - -func (s *uniterBaseSuite) testFinishActionsAuthAccess(c *gc.C, facade finishActions) { - good, err := s.wordpressUnit.AddAction("fakeaction", nil) - c.Assert(err, jc.ErrorIsNil) - - bad, err := s.mysqlUnit.AddAction("fakeaction", nil) - c.Assert(err, jc.ErrorIsNil) - - var tests = []struct { - actionTag names.ActionTag - err error - }{ - {actionTag: good.ActionTag(), err: nil}, - {actionTag: bad.ActionTag(), err: common.ErrPerm}, - } - - // Queue up actions from tests - actionResults := params.ActionExecutionResults{Results: make([]params.ActionExecutionResult, len(tests))} - for i, test := range tests { - actionResults.Results[i] = params.ActionExecutionResult{ - ActionTag: test.actionTag.String(), - Status: params.ActionCompleted, - Results: map[string]interface{}{}, - } - } - - // Invoke FinishActions - res, err := facade.FinishActions(actionResults) - c.Assert(err, jc.ErrorIsNil) - - // Verify permissions errors for actions queued on different unit - for i, result := range res.Results { - expected := tests[i].err - if expected != nil { - c.Assert(result.Error, gc.NotNil) - c.Assert(result.Error.Error(), gc.Equals, expected.Error()) - } else { - c.Assert(result.Error, gc.IsNil) - } - } -} - -type beginActions interface { - BeginActions(args params.Entities) (params.ErrorResults, error) -} - -func (s *uniterBaseSuite) testBeginActions(c *gc.C, facade beginActions) { - ten_seconds_ago := time.Now().Add(-10 * time.Second) - good, err := s.wordpressUnit.AddAction("fakeaction", nil) - c.Assert(err, jc.ErrorIsNil) - - running, err := s.wordpressUnit.RunningActions() - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(running), gc.Equals, 0, gc.Commentf("expected no running actions, got %d", len(running))) - - args := params.Entities{Entities: []params.Entity{{Tag: good.ActionTag().String()}}} - res, err := facade.BeginActions(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(res.Results), gc.Equals, 1) - c.Assert(res.Results[0].Error, gc.IsNil) - - running, err = s.wordpressUnit.RunningActions() - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(running), gc.Equals, 1, gc.Commentf("expected one running action, got %d", len(running))) - c.Assert(running[0].ActionTag(), gc.Equals, good.ActionTag()) - enqueued, started := running[0].Enqueued(), running[0].Started() - c.Assert(ten_seconds_ago.Before(enqueued), jc.IsTrue, gc.Commentf("enqueued time should be after 10 seconds ago")) - c.Assert(ten_seconds_ago.Before(started), jc.IsTrue, gc.Commentf("started time should be after 10 seconds ago")) - c.Assert(started.After(enqueued) || started.Equal(enqueued), jc.IsTrue, gc.Commentf("started should be after or equal to enqueued time")) -} - -func (s *uniterBaseSuite) testRelation( - c *gc.C, - facade interface { - Relation(args params.RelationUnits) (params.RelationResults, error) - }, -) { - rel := s.addRelation(c, "wordpress", "mysql") - wpEp, err := rel.Endpoint("wordpress") - c.Assert(err, jc.ErrorIsNil) - - args := params.RelationUnits{RelationUnits: []params.RelationUnit{ - {Relation: "relation-42", Unit: "unit-foo-0"}, - {Relation: rel.Tag().String(), Unit: "unit-wordpress-0"}, - {Relation: rel.Tag().String(), Unit: "unit-mysql-0"}, - {Relation: rel.Tag().String(), Unit: "unit-foo-0"}, - {Relation: "relation-blah", Unit: "unit-wordpress-0"}, - {Relation: "service-foo", Unit: "user-foo"}, - {Relation: "foo", Unit: "bar"}, - {Relation: "unit-wordpress-0", Unit: rel.Tag().String()}, - }} - result, err := facade.Relation(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.RelationResults{ - Results: []params.RelationResult{ - {Error: apiservertesting.ErrUnauthorized}, - { - Id: rel.Id(), - Key: rel.String(), - Life: params.Life(rel.Life().String()), - Endpoint: multiwatcher.Endpoint{ - ServiceName: wpEp.ServiceName, - Relation: wpEp.Relation, - }, - }, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *uniterBaseSuite) testRelationById( - c *gc.C, - facade interface { - RelationById(args params.RelationIds) (params.RelationResults, error) - }, -) { - rel := s.addRelation(c, "wordpress", "mysql") - c.Assert(rel.Id(), gc.Equals, 0) - wpEp, err := rel.Endpoint("wordpress") - c.Assert(err, jc.ErrorIsNil) - - // Add another relation to mysql service, so we can see we can't - // get it. - otherRel, _, _ := s.addRelatedService(c, "mysql", "logging", s.mysqlUnit) - - args := params.RelationIds{ - RelationIds: []int{-1, rel.Id(), otherRel.Id(), 42, 234}, - } - result, err := facade.RelationById(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.RelationResults{ - Results: []params.RelationResult{ - {Error: apiservertesting.ErrUnauthorized}, - { - Id: rel.Id(), - Key: rel.String(), - Life: params.Life(rel.Life().String()), - Endpoint: multiwatcher.Endpoint{ - ServiceName: wpEp.ServiceName, - Relation: wpEp.Relation, - }, - }, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *uniterBaseSuite) testProviderType( - c *gc.C, - facade interface { - ProviderType() (params.StringResult, error) - }, -) { - cfg, err := s.State.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - - result, err := facade.ProviderType() - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.StringResult{Result: cfg.Type()}) -} - -func (s *uniterBaseSuite) testEnterScope( - c *gc.C, - facade interface { - EnterScope(args params.RelationUnits) (params.ErrorResults, error) - }, -) { - // Set wordpressUnit's private address first. - err := s.machine0.SetProviderAddresses( - network.NewScopedAddress("1.2.3.4", network.ScopeCloudLocal), - ) - c.Assert(err, jc.ErrorIsNil) - - rel := s.addRelation(c, "wordpress", "mysql") - relUnit, err := rel.Unit(s.wordpressUnit) - c.Assert(err, jc.ErrorIsNil) - s.assertInScope(c, relUnit, false) - - args := params.RelationUnits{RelationUnits: []params.RelationUnit{ - {Relation: "relation-42", Unit: "unit-foo-0"}, - {Relation: rel.Tag().String(), Unit: "unit-wordpress-0"}, - {Relation: rel.Tag().String(), Unit: "unit-wordpress-0"}, - {Relation: "relation-42", Unit: "unit-wordpress-0"}, - {Relation: "relation-foo", Unit: "unit-wordpress-0"}, - {Relation: "service-wordpress", Unit: "unit-foo-0"}, - {Relation: "foo", Unit: "bar"}, - {Relation: rel.Tag().String(), Unit: "unit-mysql-0"}, - {Relation: rel.Tag().String(), Unit: "service-wordpress"}, - {Relation: rel.Tag().String(), Unit: "service-mysql"}, - {Relation: rel.Tag().String(), Unit: "user-foo"}, - }} - result, err := facade.EnterScope(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - {nil}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify the scope changes and settings. - s.assertInScope(c, relUnit, true) - readSettings, err := relUnit.ReadSettings(s.wordpressUnit.Name()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(readSettings, gc.DeepEquals, map[string]interface{}{ - "private-address": "1.2.3.4", - }) -} - -func (s *uniterBaseSuite) testLeaveScope( - c *gc.C, - facade interface { - LeaveScope(args params.RelationUnits) (params.ErrorResults, error) - }, -) { - rel := s.addRelation(c, "wordpress", "mysql") - relUnit, err := rel.Unit(s.wordpressUnit) - c.Assert(err, jc.ErrorIsNil) - settings := map[string]interface{}{ - "some": "settings", - } - err = relUnit.EnterScope(settings) - c.Assert(err, jc.ErrorIsNil) - s.assertInScope(c, relUnit, true) - - args := params.RelationUnits{RelationUnits: []params.RelationUnit{ - {Relation: "relation-42", Unit: "unit-foo-0"}, - {Relation: rel.Tag().String(), Unit: "unit-wordpress-0"}, - {Relation: rel.Tag().String(), Unit: "unit-wordpress-0"}, - {Relation: "relation-42", Unit: "unit-wordpress-0"}, - {Relation: "relation-foo", Unit: "unit-wordpress-0"}, - {Relation: "service-wordpress", Unit: "unit-foo-0"}, - {Relation: "foo", Unit: "bar"}, - {Relation: rel.Tag().String(), Unit: "unit-mysql-0"}, - {Relation: rel.Tag().String(), Unit: "service-wordpress"}, - {Relation: rel.Tag().String(), Unit: "service-mysql"}, - {Relation: rel.Tag().String(), Unit: "user-foo"}, - }} - result, err := facade.LeaveScope(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - {nil}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify the scope changes. - s.assertInScope(c, relUnit, false) - readSettings, err := relUnit.ReadSettings(s.wordpressUnit.Name()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(readSettings, gc.DeepEquals, settings) -} - -func (s *uniterBaseSuite) testJoinedRelations( - c *gc.C, - facade interface { - JoinedRelations(args params.Entities) (params.StringsResults, error) - }, -) { - rel := s.addRelation(c, "wordpress", "mysql") - relUnit, err := rel.Unit(s.wordpressUnit) - c.Assert(err, jc.ErrorIsNil) - err = relUnit.EnterScope(nil) - c.Assert(err, jc.ErrorIsNil) - - args := params.Entities{ - Entities: []params.Entity{ - {s.wordpressUnit.Tag().String()}, - {s.mysqlUnit.Tag().String()}, - {"unit-unknown-1"}, - {"service-wordpress"}, - {"machine-0"}, - {rel.Tag().String()}, - }, - } - expect := params.StringsResults{ - Results: []params.StringsResult{ - {Result: []string{rel.Tag().String()}}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }, - } - check := func() { - result, err := facade.JoinedRelations(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, expect) - } - check() - err = relUnit.PrepareLeaveScope() - c.Assert(err, jc.ErrorIsNil) - check() -} - -type readSettings interface { - ReadSettings(args params.RelationUnits) (params.SettingsResults, error) -} - -func (s *uniterBaseSuite) testReadSettings(c *gc.C, facade readSettings) { - rel := s.addRelation(c, "wordpress", "mysql") - relUnit, err := rel.Unit(s.wordpressUnit) - c.Assert(err, jc.ErrorIsNil) - settings := map[string]interface{}{ - "some": "settings", - } - err = relUnit.EnterScope(settings) - c.Assert(err, jc.ErrorIsNil) - s.assertInScope(c, relUnit, true) - - args := params.RelationUnits{RelationUnits: []params.RelationUnit{ - {Relation: "relation-42", Unit: "unit-foo-0"}, - {Relation: rel.Tag().String(), Unit: "unit-wordpress-0"}, - {Relation: rel.Tag().String(), Unit: "unit-mysql-0"}, - {Relation: "relation-42", Unit: "unit-wordpress-0"}, - {Relation: "relation-foo", Unit: ""}, - {Relation: "service-wordpress", Unit: "unit-foo-0"}, - {Relation: "foo", Unit: "bar"}, - {Relation: rel.Tag().String(), Unit: "unit-mysql-0"}, - {Relation: rel.Tag().String(), Unit: "service-wordpress"}, - {Relation: rel.Tag().String(), Unit: "service-mysql"}, - {Relation: rel.Tag().String(), Unit: "user-foo"}, - }} - result, err := facade.ReadSettings(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.SettingsResults{ - Results: []params.SettingsResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Settings: params.Settings{ - "some": "settings", - }}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *uniterBaseSuite) testReadSettingsWithNonStringValuesFails(c *gc.C, facade readSettings) { - rel := s.addRelation(c, "wordpress", "mysql") - relUnit, err := rel.Unit(s.wordpressUnit) - c.Assert(err, jc.ErrorIsNil) - settings := map[string]interface{}{ - "other": "things", - "invalid-bool": false, - } - err = relUnit.EnterScope(settings) - c.Assert(err, jc.ErrorIsNil) - s.assertInScope(c, relUnit, true) - - args := params.RelationUnits{RelationUnits: []params.RelationUnit{ - {Relation: rel.Tag().String(), Unit: "unit-wordpress-0"}, - }} - expectErr := `unexpected relation setting "invalid-bool": expected string, got bool` - result, err := facade.ReadSettings(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.SettingsResults{ - Results: []params.SettingsResult{ - {Error: ¶ms.Error{Message: expectErr}}, - }, - }) -} - -type readRemoteSettings interface { - ReadRemoteSettings(args params.RelationUnitPairs) (params.SettingsResults, error) -} - -func (s *uniterBaseSuite) testReadRemoteSettings(c *gc.C, facade readRemoteSettings) { - rel := s.addRelation(c, "wordpress", "mysql") - relUnit, err := rel.Unit(s.wordpressUnit) - c.Assert(err, jc.ErrorIsNil) - settings := map[string]interface{}{ - "some": "settings", - } - err = relUnit.EnterScope(settings) - c.Assert(err, jc.ErrorIsNil) - s.assertInScope(c, relUnit, true) - - // First test most of the invalid args tests and try to read the - // (unset) remote unit settings. - args := params.RelationUnitPairs{RelationUnitPairs: []params.RelationUnitPair{ - {Relation: "relation-42", LocalUnit: "unit-foo-0", RemoteUnit: "foo"}, - {Relation: rel.Tag().String(), LocalUnit: "unit-wordpress-0", RemoteUnit: "unit-wordpress-0"}, - {Relation: rel.Tag().String(), LocalUnit: "unit-wordpress-0", RemoteUnit: "unit-mysql-0"}, - {Relation: "relation-42", LocalUnit: "unit-wordpress-0", RemoteUnit: ""}, - {Relation: "relation-foo", LocalUnit: "", RemoteUnit: ""}, - {Relation: "service-wordpress", LocalUnit: "unit-foo-0", RemoteUnit: "user-foo"}, - {Relation: "foo", LocalUnit: "bar", RemoteUnit: "baz"}, - {Relation: rel.Tag().String(), LocalUnit: "unit-mysql-0", RemoteUnit: "unit-wordpress-0"}, - {Relation: rel.Tag().String(), LocalUnit: "service-wordpress", RemoteUnit: "service-mysql"}, - {Relation: rel.Tag().String(), LocalUnit: "service-mysql", RemoteUnit: "foo"}, - {Relation: rel.Tag().String(), LocalUnit: "user-foo", RemoteUnit: "unit-wordpress-0"}, - }} - result, err := facade.ReadRemoteSettings(args) - - // We don't set the remote unit settings on purpose to test the error. - expectErr := `cannot read settings for unit "mysql/0" in relation "wordpress:db mysql:server": settings` - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, jc.DeepEquals, params.SettingsResults{ - Results: []params.SettingsResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.NotFoundError(expectErr)}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) - - // Now leave the mysqlUnit and re-enter with new settings. - relUnit, err = rel.Unit(s.mysqlUnit) - c.Assert(err, jc.ErrorIsNil) - settings = map[string]interface{}{ - "other": "things", - } - err = relUnit.LeaveScope() - c.Assert(err, jc.ErrorIsNil) - s.assertInScope(c, relUnit, false) - err = relUnit.EnterScope(settings) - c.Assert(err, jc.ErrorIsNil) - s.assertInScope(c, relUnit, true) - - // Test the remote unit settings can be read. - args = params.RelationUnitPairs{RelationUnitPairs: []params.RelationUnitPair{{ - Relation: rel.Tag().String(), - LocalUnit: "unit-wordpress-0", - RemoteUnit: "unit-mysql-0", - }}} - expect := params.SettingsResults{ - Results: []params.SettingsResult{ - {Settings: params.Settings{ - "other": "things", - }}, - }, - } - result, err = facade.ReadRemoteSettings(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, expect) - - // Now destroy the remote unit, and check its settings can still be read. - err = s.mysqlUnit.Destroy() - c.Assert(err, jc.ErrorIsNil) - err = s.mysqlUnit.EnsureDead() - c.Assert(err, jc.ErrorIsNil) - err = s.mysqlUnit.Remove() - c.Assert(err, jc.ErrorIsNil) - result, err = facade.ReadRemoteSettings(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, expect) -} - -func (s *uniterBaseSuite) testReadRemoteSettingsWithNonStringValuesFails(c *gc.C, facade readRemoteSettings) { - rel := s.addRelation(c, "wordpress", "mysql") - relUnit, err := rel.Unit(s.mysqlUnit) - c.Assert(err, jc.ErrorIsNil) - settings := map[string]interface{}{ - "other": "things", - "invalid-bool": false, - } - err = relUnit.EnterScope(settings) - c.Assert(err, jc.ErrorIsNil) - s.assertInScope(c, relUnit, true) - - args := params.RelationUnitPairs{RelationUnitPairs: []params.RelationUnitPair{{ - Relation: rel.Tag().String(), - LocalUnit: "unit-wordpress-0", - RemoteUnit: "unit-mysql-0", - }}} - expectErr := `unexpected relation setting "invalid-bool": expected string, got bool` - result, err := facade.ReadRemoteSettings(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.SettingsResults{ - Results: []params.SettingsResult{ - {Error: ¶ms.Error{Message: expectErr}}, - }, - }) -} - -func (s *uniterBaseSuite) testUpdateSettings( - c *gc.C, - facade interface { - UpdateSettings(args params.RelationUnitsSettings) (params.ErrorResults, error) - }, -) { - rel := s.addRelation(c, "wordpress", "mysql") - relUnit, err := rel.Unit(s.wordpressUnit) - c.Assert(err, jc.ErrorIsNil) - settings := map[string]interface{}{ - "some": "settings", - "other": "stuff", - } - err = relUnit.EnterScope(settings) - s.assertInScope(c, relUnit, true) - - newSettings := params.Settings{ - "some": "different", - "other": "", - } - - args := params.RelationUnitsSettings{RelationUnits: []params.RelationUnitSettings{ - {Relation: "relation-42", Unit: "unit-foo-0", Settings: nil}, - {Relation: rel.Tag().String(), Unit: "unit-wordpress-0", Settings: newSettings}, - {Relation: "relation-42", Unit: "unit-wordpress-0", Settings: nil}, - {Relation: "relation-foo", Unit: "unit-wordpress-0", Settings: nil}, - {Relation: "service-wordpress", Unit: "unit-foo-0", Settings: nil}, - {Relation: "foo", Unit: "bar", Settings: nil}, - {Relation: rel.Tag().String(), Unit: "unit-mysql-0", Settings: nil}, - {Relation: rel.Tag().String(), Unit: "service-wordpress", Settings: nil}, - {Relation: rel.Tag().String(), Unit: "service-mysql", Settings: nil}, - {Relation: rel.Tag().String(), Unit: "user-foo", Settings: nil}, - }} - result, err := facade.UpdateSettings(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {apiservertesting.ErrUnauthorized}, - {nil}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - {apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify the settings were saved. - s.assertInScope(c, relUnit, true) - readSettings, err := relUnit.ReadSettings(s.wordpressUnit.Name()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(readSettings, gc.DeepEquals, map[string]interface{}{ - "some": "different", - }) -} - -func (s *uniterBaseSuite) testWatchRelationUnits( - c *gc.C, - facade interface { - WatchRelationUnits(args params.RelationUnits) (params.RelationUnitsWatchResults, error) - }, -) { - // Add a relation between wordpress and mysql and enter scope with - // mysqlUnit. - rel := s.addRelation(c, "wordpress", "mysql") - myRelUnit, err := rel.Unit(s.mysqlUnit) - c.Assert(err, jc.ErrorIsNil) - err = myRelUnit.EnterScope(nil) - c.Assert(err, jc.ErrorIsNil) - s.assertInScope(c, myRelUnit, true) - - c.Assert(s.resources.Count(), gc.Equals, 0) - - args := params.RelationUnits{RelationUnits: []params.RelationUnit{ - {Relation: "relation-42", Unit: "unit-foo-0"}, - {Relation: rel.Tag().String(), Unit: "unit-wordpress-0"}, - {Relation: rel.Tag().String(), Unit: "unit-mysql-0"}, - {Relation: "relation-42", Unit: "unit-wordpress-0"}, - {Relation: "relation-foo", Unit: ""}, - {Relation: "service-wordpress", Unit: "unit-foo-0"}, - {Relation: "foo", Unit: "bar"}, - {Relation: rel.Tag().String(), Unit: "unit-mysql-0"}, - {Relation: rel.Tag().String(), Unit: "service-wordpress"}, - {Relation: rel.Tag().String(), Unit: "service-mysql"}, - {Relation: rel.Tag().String(), Unit: "user-foo"}, - }} - result, err := facade.WatchRelationUnits(args) - c.Assert(err, jc.ErrorIsNil) - // UnitSettings versions are volatile, so we don't check them. - // We just make sure the keys of the Changed field are as - // expected. - c.Assert(result.Results, gc.HasLen, len(args.RelationUnits)) - mysqlChanges := result.Results[1].Changes - c.Assert(mysqlChanges, gc.NotNil) - changed, ok := mysqlChanges.Changed["mysql/0"] - c.Assert(ok, jc.IsTrue) - expectChanges := multiwatcher.RelationUnitsChange{ - Changed: map[string]multiwatcher.UnitSettings{"mysql/0": changed}, - } - c.Assert(result, gc.DeepEquals, params.RelationUnitsWatchResults{ - Results: []params.RelationUnitsWatchResult{ - {Error: apiservertesting.ErrUnauthorized}, - { - RelationUnitsWatcherId: "1", - Changes: expectChanges, - }, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify the resource was registered and stop when done - c.Assert(s.resources.Count(), gc.Equals, 1) - resource := s.resources.Get("1") - defer statetesting.AssertStop(c, resource) - - // Check that the Watch has consumed the initial event ("returned" in - // the Watch call) - wc := statetesting.NewRelationUnitsWatcherC(c, s.State, resource.(state.RelationUnitsWatcher)) - wc.AssertNoChange() - - // Leave scope with mysqlUnit and check it's detected. - err = myRelUnit.LeaveScope() - c.Assert(err, jc.ErrorIsNil) - s.assertInScope(c, myRelUnit, false) - - wc.AssertChange(nil, []string{"mysql/0"}) -} - -func (s *uniterBaseSuite) testAPIAddresses( - c *gc.C, - facade interface { - APIAddresses() (params.StringsResult, error) - }, -) { - hostPorts := [][]network.HostPort{ - network.NewHostPorts(1234, "0.1.2.3"), - } - err := s.State.SetAPIHostPorts(hostPorts) - c.Assert(err, jc.ErrorIsNil) - - result, err := facade.APIAddresses() - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.StringsResult{ - Result: []string{"0.1.2.3:1234"}, - }) -} - -func (s *uniterBaseSuite) testWatchUnitAddresses( - c *gc.C, - facade interface { - WatchUnitAddresses(args params.Entities) (params.NotifyWatchResults, error) - }, -) { - c.Assert(s.resources.Count(), gc.Equals, 0) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - {Tag: "machine-0"}, - {Tag: "service-wordpress"}, - }} - result, err := facade.WatchUnitAddresses(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.NotifyWatchResults{ - Results: []params.NotifyWatchResult{ - {Error: apiservertesting.ErrUnauthorized}, - {NotifyWatcherId: "1"}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify the resource was registered and stop when done - c.Assert(s.resources.Count(), gc.Equals, 1) - resource := s.resources.Get("1") - defer statetesting.AssertStop(c, resource) - - // Check that the Watch has consumed the initial event ("returned" in - // the Watch call) - wc := statetesting.NewNotifyWatcherC(c, s.State, resource.(state.NotifyWatcher)) - wc.AssertNoChange() -} - -type getMeterStatus interface { - GetMeterStatus(args params.Entities) (params.MeterStatusResults, error) -} - -func (s *uniterBaseSuite) testGetMeterStatus(c *gc.C, facade getMeterStatus) { - args := params.Entities{Entities: []params.Entity{{Tag: s.wordpressUnit.Tag().String()}}} - result, err := facade.GetMeterStatus(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result.Results, gc.HasLen, 1) - c.Assert(result.Results[0].Error, gc.IsNil) - c.Assert(result.Results[0].Code, gc.Equals, "AMBER") - c.Assert(result.Results[0].Info, gc.Equals, "not set") - - newCode := "GREEN" - newInfo := "All is ok." - - err = s.wordpressUnit.SetMeterStatus(newCode, newInfo) - c.Assert(err, jc.ErrorIsNil) - - result, err = facade.GetMeterStatus(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result.Results, gc.HasLen, 1) - c.Assert(result.Results[0].Error, gc.IsNil) - c.Assert(result.Results[0].Code, gc.DeepEquals, newCode) - c.Assert(result.Results[0].Info, gc.DeepEquals, newInfo) -} - -func (s *uniterBaseSuite) testGetMeterStatusUnauthenticated(c *gc.C, facade getMeterStatus) { - args := params.Entities{Entities: []params.Entity{{s.mysqlUnit.Tag().String()}}} - result, err := facade.GetMeterStatus(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result.Results, gc.HasLen, 1) - c.Assert(result.Results[0].Error, gc.ErrorMatches, "permission denied") - c.Assert(result.Results[0].Code, gc.Equals, "") - c.Assert(result.Results[0].Info, gc.Equals, "") -} - -func (s *uniterBaseSuite) testGetMeterStatusBadTag(c *gc.C, facade getMeterStatus) { - tags := []string{ - "user-admin", - "unit-nosuchunit", - "thisisnotatag", - "machine-0", - "environment-blah", - } - args := params.Entities{Entities: make([]params.Entity, len(tags))} - for i, tag := range tags { - args.Entities[i] = params.Entity{Tag: tag} - } - result, err := facade.GetMeterStatus(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result.Results, gc.HasLen, len(tags)) - for i, result := range result.Results { - c.Logf("checking result %d", i) - c.Assert(result.Code, gc.Equals, "") - c.Assert(result.Info, gc.Equals, "") - c.Assert(result.Error, gc.ErrorMatches, "permission denied") - } -} - -func (s *uniterBaseSuite) testWatchMeterStatus( - c *gc.C, - facade interface { - WatchMeterStatus(args params.Entities) (params.NotifyWatchResults, error) - }, -) { - c.Assert(s.resources.Count(), gc.Equals, 0) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - }} - result, err := facade.WatchMeterStatus(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.NotifyWatchResults{ - Results: []params.NotifyWatchResult{ - {Error: apiservertesting.ErrUnauthorized}, - {NotifyWatcherId: "1"}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) - - // Verify the resource was registered and stop when done - c.Assert(s.resources.Count(), gc.Equals, 1) - resource := s.resources.Get("1") - defer statetesting.AssertStop(c, resource) - - // Check that the Watch has consumed the initial event ("returned" in - // the Watch call) - wc := statetesting.NewNotifyWatcherC(c, s.State, resource.(state.NotifyWatcher)) - wc.AssertNoChange() - - err = s.wordpressUnit.SetMeterStatus("GREEN", "No additional information.") - c.Assert(err, jc.ErrorIsNil) - wc.AssertOneChange() -} - -func (s *uniterBaseSuite) assertOneStringsWatcher(c *gc.C, result params.StringsWatchResults, err error) { - c.Assert(err, jc.ErrorIsNil) - c.Assert(result.Results, gc.HasLen, 3) - c.Assert(result.Results[0].Error, gc.DeepEquals, apiservertesting.ErrUnauthorized) - c.Assert(result.Results[1].StringsWatcherId, gc.Equals, "1") - c.Assert(result.Results[1].Changes, gc.NotNil) - c.Assert(result.Results[1].Error, gc.IsNil) - c.Assert(result.Results[2].Error, gc.DeepEquals, apiservertesting.ErrUnauthorized) - - // Verify the resource was registered and stop when done - c.Assert(s.resources.Count(), gc.Equals, 1) - resource := s.resources.Get("1") - defer statetesting.AssertStop(c, resource) - - // Check that the Watch has consumed the initial event ("returned" in - // the Watch call) - wc := statetesting.NewStringsWatcherC(c, s.State, resource.(state.StringsWatcher)) - wc.AssertNoChange() -} - -func (s *uniterBaseSuite) assertInScope(c *gc.C, relUnit *state.RelationUnit, inScope bool) { - ok, err := relUnit.InScope() - c.Assert(err, jc.ErrorIsNil) - c.Assert(ok, gc.Equals, inScope) -} - -func (s *uniterBaseSuite) addRelation(c *gc.C, first, second string) *state.Relation { - eps, err := s.State.InferEndpoints(first, second) - c.Assert(err, jc.ErrorIsNil) - rel, err := s.State.AddRelation(eps...) - c.Assert(err, jc.ErrorIsNil) - return rel -} - -func (s *uniterBaseSuite) addRelatedService(c *gc.C, firstSvc, relatedSvc string, unit *state.Unit) (*state.Relation, *state.Service, *state.Unit) { - relatedService := s.AddTestingService(c, relatedSvc, s.AddTestingCharm(c, relatedSvc)) - rel := s.addRelation(c, firstSvc, relatedSvc) - relUnit, err := rel.Unit(unit) - c.Assert(err, jc.ErrorIsNil) - err = relUnit.EnterScope(nil) - c.Assert(err, jc.ErrorIsNil) - relatedUnit, err := s.State.Unit(relatedSvc + "/0") - c.Assert(err, jc.ErrorIsNil) - return rel, relatedService, relatedUnit -} - -func checkUnorderedActionIdsEqual(c *gc.C, ids []string, results params.StringsWatchResults) { - c.Assert(results, gc.NotNil) - content := results.Results - c.Assert(len(content), gc.Equals, 1) - result := content[0] - c.Assert(result.StringsWatcherId, gc.Equals, "1") - obtainedIds := map[string]int{} - expectedIds := map[string]int{} - for _, id := range ids { - expectedIds[id]++ - } - // The count of each ID that has been seen. - for _, change := range result.Changes { - obtainedIds[change]++ - } - c.Check(obtainedIds, jc.DeepEquals, expectedIds) -} === added file 'src/github.com/juju/juju/apiserver/uniter/uniter_test.go' --- src/github.com/juju/juju/apiserver/uniter/uniter_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/apiserver/uniter/uniter_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2554 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package uniter_test + +import ( + "fmt" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/apiserver/common" + commontesting "github.com/juju/juju/apiserver/common/testing" + "github.com/juju/juju/apiserver/params" + apiservertesting "github.com/juju/juju/apiserver/testing" + "github.com/juju/juju/apiserver/uniter" + "github.com/juju/juju/juju/testing" + "github.com/juju/juju/network" + "github.com/juju/juju/state" + "github.com/juju/juju/state/multiwatcher" + statetesting "github.com/juju/juju/state/testing" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/testing/factory" + jujuFactory "github.com/juju/juju/testing/factory" +) + +// uniterSuite implements common testing suite for all API +// versions. It's not intended to be used directly or registered as a +// suite, but embedded. +type uniterSuite struct { + testing.JujuConnSuite + + authorizer apiservertesting.FakeAuthorizer + resources *common.Resources + uniter *uniter.UniterAPIV3 + + machine0 *state.Machine + machine1 *state.Machine + wordpress *state.Service + wpCharm *state.Charm + mysql *state.Service + wordpressUnit *state.Unit + mysqlUnit *state.Unit + + meteredService *state.Service + meteredCharm *state.Charm + meteredUnit *state.Unit +} + +var _ = gc.Suite(&uniterSuite{}) + +func (s *uniterSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + + factory := jujuFactory.NewFactory(s.State) + // Create two machines, two services and add a unit to each service. + s.machine0 = factory.MakeMachine(c, &jujuFactory.MachineParams{ + Series: "quantal", + Jobs: []state.MachineJob{state.JobHostUnits, state.JobManageModel}, + }) + s.machine1 = factory.MakeMachine(c, &jujuFactory.MachineParams{ + Series: "quantal", + Jobs: []state.MachineJob{state.JobHostUnits}, + }) + s.wpCharm = factory.MakeCharm(c, &jujuFactory.CharmParams{ + Name: "wordpress", + URL: "cs:quantal/wordpress-3", + }) + s.wordpress = factory.MakeService(c, &jujuFactory.ServiceParams{ + Name: "wordpress", + Charm: s.wpCharm, + Creator: s.AdminUserTag(c), + }) + mysqlCharm := factory.MakeCharm(c, &jujuFactory.CharmParams{ + Name: "mysql", + }) + s.mysql = factory.MakeService(c, &jujuFactory.ServiceParams{ + Name: "mysql", + Charm: mysqlCharm, + Creator: s.AdminUserTag(c), + }) + s.wordpressUnit = factory.MakeUnit(c, &jujuFactory.UnitParams{ + Service: s.wordpress, + Machine: s.machine0, + }) + s.mysqlUnit = factory.MakeUnit(c, &jujuFactory.UnitParams{ + Service: s.mysql, + Machine: s.machine1, + }) + + s.meteredCharm = s.Factory.MakeCharm(c, &jujuFactory.CharmParams{ + Name: "metered", + URL: "cs:quantal/metered", + }) + s.meteredService = s.Factory.MakeService(c, &jujuFactory.ServiceParams{ + Charm: s.meteredCharm, + }) + s.meteredUnit = s.Factory.MakeUnit(c, &jujuFactory.UnitParams{ + Service: s.meteredService, + SetCharmURL: true, + }) + + // Create a FakeAuthorizer so we can check permissions, + // set up assuming unit 0 has logged in. + s.authorizer = apiservertesting.FakeAuthorizer{ + Tag: s.wordpressUnit.Tag(), + } + + // Create the resource registry separately to track invocations to + // Register. + s.resources = common.NewResources() + s.AddCleanup(func(_ *gc.C) { s.resources.StopAll() }) + + uniterAPIV3, err := uniter.NewUniterAPIV3( + s.State, + s.resources, + s.authorizer, + ) + c.Assert(err, jc.ErrorIsNil) + s.uniter = uniterAPIV3 +} + +func (s *uniterSuite) TestUniterFailsWithNonUnitAgentUser(c *gc.C) { + anAuthorizer := s.authorizer + anAuthorizer.Tag = names.NewMachineTag("9") + _, err := uniter.NewUniterAPIV3(s.State, s.resources, anAuthorizer) + c.Assert(err, gc.NotNil) + c.Assert(err, gc.ErrorMatches, "permission denied") +} + +func (s *uniterSuite) TestSetStatus(c *gc.C) { + err := s.wordpressUnit.SetAgentStatus(state.StatusExecuting, "blah", nil) + c.Assert(err, jc.ErrorIsNil) + err = s.mysqlUnit.SetAgentStatus(state.StatusExecuting, "foo", nil) + c.Assert(err, jc.ErrorIsNil) + + args := params.SetStatus{ + Entities: []params.EntityStatusArgs{ + {Tag: "unit-mysql-0", Status: params.StatusError, Info: "not really"}, + {Tag: "unit-wordpress-0", Status: params.StatusRebooting, Info: "foobar"}, + {Tag: "unit-foo-42", Status: params.StatusActive, Info: "blah"}, + }} + result, err := s.uniter.SetStatus(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {apiservertesting.ErrUnauthorized}, + {nil}, + {apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify mysqlUnit - no change. + statusInfo, err := s.mysqlUnit.AgentStatus() + c.Assert(err, jc.ErrorIsNil) + c.Assert(statusInfo.Status, gc.Equals, state.StatusExecuting) + c.Assert(statusInfo.Message, gc.Equals, "foo") + // ...wordpressUnit is fine though. + statusInfo, err = s.wordpressUnit.AgentStatus() + c.Assert(err, jc.ErrorIsNil) + c.Assert(statusInfo.Status, gc.Equals, state.StatusRebooting) + c.Assert(statusInfo.Message, gc.Equals, "foobar") +} + +func (s *uniterSuite) TestSetAgentStatus(c *gc.C) { + err := s.wordpressUnit.SetAgentStatus(state.StatusExecuting, "blah", nil) + c.Assert(err, jc.ErrorIsNil) + err = s.mysqlUnit.SetAgentStatus(state.StatusExecuting, "foo", nil) + c.Assert(err, jc.ErrorIsNil) + + args := params.SetStatus{ + Entities: []params.EntityStatusArgs{ + {Tag: "unit-mysql-0", Status: params.StatusError, Info: "not really"}, + {Tag: "unit-wordpress-0", Status: params.StatusExecuting, Info: "foobar"}, + {Tag: "unit-foo-42", Status: params.StatusRebooting, Info: "blah"}, + }} + result, err := s.uniter.SetAgentStatus(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {apiservertesting.ErrUnauthorized}, + {nil}, + {apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify mysqlUnit - no change. + statusInfo, err := s.mysqlUnit.AgentStatus() + c.Assert(err, jc.ErrorIsNil) + c.Assert(statusInfo.Status, gc.Equals, state.StatusExecuting) + c.Assert(statusInfo.Message, gc.Equals, "foo") + // ...wordpressUnit is fine though. + statusInfo, err = s.wordpressUnit.AgentStatus() + c.Assert(err, jc.ErrorIsNil) + c.Assert(statusInfo.Status, gc.Equals, state.StatusExecuting) + c.Assert(statusInfo.Message, gc.Equals, "foobar") +} + +func (s *uniterSuite) TestSetUnitStatus(c *gc.C) { + err := s.wordpressUnit.SetStatus(state.StatusActive, "blah", nil) + c.Assert(err, jc.ErrorIsNil) + err = s.mysqlUnit.SetStatus(state.StatusTerminated, "foo", nil) + c.Assert(err, jc.ErrorIsNil) + + args := params.SetStatus{ + Entities: []params.EntityStatusArgs{ + {Tag: "unit-mysql-0", Status: params.StatusError, Info: "not really"}, + {Tag: "unit-wordpress-0", Status: params.StatusTerminated, Info: "foobar"}, + {Tag: "unit-foo-42", Status: params.StatusActive, Info: "blah"}, + }} + result, err := s.uniter.SetUnitStatus(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {apiservertesting.ErrUnauthorized}, + {nil}, + {apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify mysqlUnit - no change. + statusInfo, err := s.mysqlUnit.Status() + c.Assert(err, jc.ErrorIsNil) + c.Assert(statusInfo.Status, gc.Equals, state.StatusTerminated) + c.Assert(statusInfo.Message, gc.Equals, "foo") + // ...wordpressUnit is fine though. + statusInfo, err = s.wordpressUnit.Status() + c.Assert(err, jc.ErrorIsNil) + c.Assert(statusInfo.Status, gc.Equals, state.StatusTerminated) + c.Assert(statusInfo.Message, gc.Equals, "foobar") +} + +func (s *uniterSuite) TestLife(c *gc.C) { + // Add a relation wordpress-mysql. + rel := s.addRelation(c, "wordpress", "mysql") + relUnit, err := rel.Unit(s.wordpressUnit) + c.Assert(err, jc.ErrorIsNil) + err = relUnit.EnterScope(nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(rel.Life(), gc.Equals, state.Alive) + + // Make the wordpressUnit dead. + err = s.wordpressUnit.EnsureDead() + c.Assert(err, jc.ErrorIsNil) + err = s.wordpressUnit.Refresh() + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.wordpressUnit.Life(), gc.Equals, state.Dead) + + // Add another unit, so the service will stay dying when we + // destroy it later. + extraUnit, err := s.wordpress.AddUnit() + c.Assert(err, jc.ErrorIsNil) + c.Assert(extraUnit, gc.NotNil) + + // Make the wordpress service dying. + err = s.wordpress.Destroy() + c.Assert(err, jc.ErrorIsNil) + err = s.wordpress.Refresh() + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.wordpress.Life(), gc.Equals, state.Dying) + + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + {Tag: "service-mysql"}, + {Tag: "service-wordpress"}, + {Tag: "machine-0"}, + {Tag: "machine-1"}, + {Tag: "machine-42"}, + {Tag: "service-foo"}, + // TODO(dfc) these aren't valid tags any more + // but I hope to restore this test when params.Entity takes + // tags, not strings, which is coming soon. + // {Tag: "just-foo"}, + {Tag: rel.Tag().String()}, + {Tag: "relation-svc1.rel1#svc2.rel2"}, + // {Tag: "relation-blah"}, + }} + result, err := s.uniter.Life(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.LifeResults{ + Results: []params.LifeResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Life: "dead"}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Life: "dying"}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + // TODO(dfc) see above + // {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + // {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +func (s *uniterSuite) TestEnsureDead(c *gc.C) { + c.Assert(s.wordpressUnit.Life(), gc.Equals, state.Alive) + c.Assert(s.mysqlUnit.Life(), gc.Equals, state.Alive) + + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + }} + result, err := s.uniter.EnsureDead(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {apiservertesting.ErrUnauthorized}, + {nil}, + {apiservertesting.ErrUnauthorized}, + }, + }) + + err = s.wordpressUnit.Refresh() + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.wordpressUnit.Life(), gc.Equals, state.Dead) + err = s.mysqlUnit.Refresh() + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.mysqlUnit.Life(), gc.Equals, state.Alive) + + // Try it again on a Dead unit; should work. + args = params.Entities{ + Entities: []params.Entity{{Tag: "unit-wordpress-0"}}, + } + result, err = s.uniter.EnsureDead(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{{nil}}, + }) + + // Verify Life is unchanged. + err = s.wordpressUnit.Refresh() + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.wordpressUnit.Life(), gc.Equals, state.Dead) +} + +func (s *uniterSuite) TestWatch(c *gc.C) { + c.Assert(s.resources.Count(), gc.Equals, 0) + + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + {Tag: "service-mysql"}, + {Tag: "service-wordpress"}, + {Tag: "service-foo"}, + // TODO(dfc) these aren't valid tags any more + // but I hope to restore this test when params.Entity takes + // tags, not strings, which is coming soon. + // {Tag: "just-foo"}, + }} + result, err := s.uniter.Watch(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.NotifyWatchResults{ + Results: []params.NotifyWatchResult{ + {Error: apiservertesting.ErrUnauthorized}, + {NotifyWatcherId: "1"}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {NotifyWatcherId: "2"}, + {Error: apiservertesting.ErrUnauthorized}, + // see above + // {Error: apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify the resource was registered and stop when done + c.Assert(s.resources.Count(), gc.Equals, 2) + resource1 := s.resources.Get("1") + defer statetesting.AssertStop(c, resource1) + resource2 := s.resources.Get("2") + defer statetesting.AssertStop(c, resource2) + + // Check that the Watch has consumed the initial event ("returned" in + // the Watch call) + wc := statetesting.NewNotifyWatcherC(c, s.State, resource1.(state.NotifyWatcher)) + wc.AssertNoChange() + wc = statetesting.NewNotifyWatcherC(c, s.State, resource2.(state.NotifyWatcher)) + wc.AssertNoChange() +} + +func (s *uniterSuite) TestPublicAddress(c *gc.C) { + // Try first without setting an address. + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + }} + expectErr := ¶ms.Error{ + Code: params.CodeNoAddressSet, + Message: `"unit-wordpress-0" has no public address set`, + } + result, err := s.uniter.PublicAddress(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.StringResults{ + Results: []params.StringResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Error: expectErr}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) + + // Now set it an try again. + err = s.machine0.SetProviderAddresses( + network.NewScopedAddress("1.2.3.4", network.ScopePublic), + ) + c.Assert(err, jc.ErrorIsNil) + address, err := s.wordpressUnit.PublicAddress() + c.Assert(address.Value, gc.Equals, "1.2.3.4") + c.Assert(err, jc.ErrorIsNil) + + result, err = s.uniter.PublicAddress(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.StringResults{ + Results: []params.StringResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Result: "1.2.3.4"}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +func (s *uniterSuite) TestPrivateAddress(c *gc.C) { + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + }} + expectErr := ¶ms.Error{ + Code: params.CodeNoAddressSet, + Message: `"unit-wordpress-0" has no private address set`, + } + result, err := s.uniter.PrivateAddress(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.StringResults{ + Results: []params.StringResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Error: expectErr}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) + + // Now set it and try again. + err = s.machine0.SetProviderAddresses( + network.NewScopedAddress("1.2.3.4", network.ScopeCloudLocal), + ) + c.Assert(err, jc.ErrorIsNil) + address, err := s.wordpressUnit.PrivateAddress() + c.Assert(address.Value, gc.Equals, "1.2.3.4") + c.Assert(err, jc.ErrorIsNil) + + result, err = s.uniter.PrivateAddress(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.StringResults{ + Results: []params.StringResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Result: "1.2.3.4"}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +func (s *uniterSuite) TestAvailabilityZone(c *gc.C) { + s.PatchValue(uniter.GetZone, func(st *state.State, tag names.Tag) (string, error) { + return "a_zone", nil + }) + + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-wordpress-0"}, + }} + result, err := s.uniter.AvailabilityZone(args) + c.Assert(err, jc.ErrorIsNil) + + c.Check(result, gc.DeepEquals, params.StringResults{ + Results: []params.StringResult{ + {Result: "a_zone"}, + }, + }) +} + +func (s *uniterSuite) TestResolved(c *gc.C) { + err := s.wordpressUnit.SetResolved(state.ResolvedRetryHooks) + c.Assert(err, jc.ErrorIsNil) + mode := s.wordpressUnit.Resolved() + c.Assert(mode, gc.Equals, state.ResolvedRetryHooks) + + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + }} + result, err := s.uniter.Resolved(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ResolvedModeResults{ + Results: []params.ResolvedModeResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Mode: params.ResolvedMode(mode)}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +func (s *uniterSuite) TestClearResolved(c *gc.C) { + err := s.wordpressUnit.SetResolved(state.ResolvedRetryHooks) + c.Assert(err, jc.ErrorIsNil) + mode := s.wordpressUnit.Resolved() + c.Assert(mode, gc.Equals, state.ResolvedRetryHooks) + + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + }} + result, err := s.uniter.ClearResolved(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {apiservertesting.ErrUnauthorized}, + {nil}, + {apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify wordpressUnit's resolved mode has changed. + err = s.wordpressUnit.Refresh() + c.Assert(err, jc.ErrorIsNil) + mode = s.wordpressUnit.Resolved() + c.Assert(mode, gc.Equals, state.ResolvedNone) +} + +func (s *uniterSuite) TestGetPrincipal(c *gc.C) { + // Add a subordinate to wordpressUnit. + _, _, subordinate := s.addRelatedService(c, "wordpress", "logging", s.wordpressUnit) + + principal, ok := subordinate.PrincipalName() + c.Assert(principal, gc.Equals, s.wordpressUnit.Name()) + c.Assert(ok, jc.IsTrue) + + // First try it as wordpressUnit's agent. + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: subordinate.Tag().String()}, + {Tag: "unit-foo-42"}, + }} + result, err := s.uniter.GetPrincipal(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.StringBoolResults{ + Results: []params.StringBoolResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Result: "", Ok: false, Error: nil}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) + + // Now try as subordinate's agent. + subAuthorizer := s.authorizer + subAuthorizer.Tag = subordinate.Tag() + subUniter, err := uniter.NewUniterAPIV3(s.State, s.resources, subAuthorizer) + c.Assert(err, jc.ErrorIsNil) + + result, err = subUniter.GetPrincipal(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.StringBoolResults{ + Results: []params.StringBoolResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Result: "unit-wordpress-0", Ok: true, Error: nil}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +func (s *uniterSuite) TestHasSubordinates(c *gc.C) { + // Try first without any subordinates for wordpressUnit. + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-logging-0"}, + {Tag: "unit-foo-42"}, + }} + result, err := s.uniter.HasSubordinates(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.BoolResults{ + Results: []params.BoolResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Result: false}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) + + // Add two subordinates to wordpressUnit and try again. + s.addRelatedService(c, "wordpress", "logging", s.wordpressUnit) + s.addRelatedService(c, "wordpress", "monitoring", s.wordpressUnit) + + result, err = s.uniter.HasSubordinates(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.BoolResults{ + Results: []params.BoolResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Result: true}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +func (s *uniterSuite) TestDestroy(c *gc.C) { + c.Assert(s.wordpressUnit.Life(), gc.Equals, state.Alive) + + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + }} + result, err := s.uniter.Destroy(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {apiservertesting.ErrUnauthorized}, + {nil}, + {apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify wordpressUnit is destroyed and removed. + err = s.wordpressUnit.Refresh() + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} + +func (s *uniterSuite) TestDestroyAllSubordinates(c *gc.C) { + // Add two subordinates to wordpressUnit. + _, _, loggingSub := s.addRelatedService(c, "wordpress", "logging", s.wordpressUnit) + _, _, monitoringSub := s.addRelatedService(c, "wordpress", "monitoring", s.wordpressUnit) + c.Assert(loggingSub.Life(), gc.Equals, state.Alive) + c.Assert(monitoringSub.Life(), gc.Equals, state.Alive) + + err := s.wordpressUnit.Refresh() + c.Assert(err, jc.ErrorIsNil) + subordinates := s.wordpressUnit.SubordinateNames() + c.Assert(subordinates, gc.DeepEquals, []string{"logging/0", "monitoring/0"}) + + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + }} + result, err := s.uniter.DestroyAllSubordinates(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {apiservertesting.ErrUnauthorized}, + {nil}, + {apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify wordpressUnit's subordinates were destroyed. + err = loggingSub.Refresh() + c.Assert(err, jc.ErrorIsNil) + c.Assert(loggingSub.Life(), gc.Equals, state.Dying) + err = monitoringSub.Refresh() + c.Assert(err, jc.ErrorIsNil) + c.Assert(monitoringSub.Life(), gc.Equals, state.Dying) +} + +func (s *uniterSuite) TestCharmURL(c *gc.C) { + // Set wordpressUnit's charm URL first. + err := s.wordpressUnit.SetCharmURL(s.wpCharm.URL()) + c.Assert(err, jc.ErrorIsNil) + curl, ok := s.wordpressUnit.CharmURL() + c.Assert(curl, gc.DeepEquals, s.wpCharm.URL()) + c.Assert(ok, jc.IsTrue) + + // Make sure wordpress service's charm is what we expect. + curl, force := s.wordpress.CharmURL() + c.Assert(curl, gc.DeepEquals, s.wpCharm.URL()) + c.Assert(force, jc.IsFalse) + + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + {Tag: "service-mysql"}, + {Tag: "service-wordpress"}, + {Tag: "service-foo"}, + // TODO(dfc) these aren't valid tags any more + // but I hope to restore this test when params.Entity takes + // tags, not strings, which is coming soon. + // {Tag: "just-foo"}, + }} + result, err := s.uniter.CharmURL(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.StringBoolResults{ + Results: []params.StringBoolResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Result: s.wpCharm.String(), Ok: ok}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Result: s.wpCharm.String(), Ok: force}, + {Error: apiservertesting.ErrUnauthorized}, + // see above + // {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +func (s *uniterSuite) TestSetCharmURL(c *gc.C) { + _, ok := s.wordpressUnit.CharmURL() + c.Assert(ok, jc.IsFalse) + + args := params.EntitiesCharmURL{Entities: []params.EntityCharmURL{ + {Tag: "unit-mysql-0", CharmURL: "cs:quantal/service-42"}, + {Tag: "unit-wordpress-0", CharmURL: s.wpCharm.String()}, + {Tag: "unit-foo-42", CharmURL: "cs:quantal/foo-321"}, + }} + result, err := s.uniter.SetCharmURL(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {apiservertesting.ErrUnauthorized}, + {nil}, + {apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify the charm URL was set. + err = s.wordpressUnit.Refresh() + c.Assert(err, jc.ErrorIsNil) + charmUrl, needsUpgrade := s.wordpressUnit.CharmURL() + c.Assert(charmUrl, gc.NotNil) + c.Assert(charmUrl.String(), gc.Equals, s.wpCharm.String()) + c.Assert(needsUpgrade, jc.IsTrue) +} + +func (s *uniterSuite) TestCharmModifiedVersion(c *gc.C) { + args := params.Entities{Entities: []params.Entity{ + {Tag: "service-mysql"}, + {Tag: "service-wordpress"}, + {Tag: "unit-wordpress-0"}, + {Tag: "service-foo"}, + }} + result, err := s.uniter.CharmModifiedVersion(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.IntResults{ + Results: []params.IntResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Result: s.wordpress.CharmModifiedVersion()}, + {Result: s.wordpress.CharmModifiedVersion()}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +func (s *uniterSuite) TestOpenPorts(c *gc.C) { + openedPorts, err := s.wordpressUnit.OpenedPorts() + c.Assert(err, jc.ErrorIsNil) + c.Assert(openedPorts, gc.HasLen, 0) + + args := params.EntitiesPortRanges{Entities: []params.EntityPortRange{ + {Tag: "unit-mysql-0", Protocol: "tcp", FromPort: 1234, ToPort: 1400}, + {Tag: "unit-wordpress-0", Protocol: "udp", FromPort: 4321, ToPort: 5000}, + {Tag: "unit-foo-42", Protocol: "tcp", FromPort: 42, ToPort: 42}, + }} + result, err := s.uniter.OpenPorts(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {apiservertesting.ErrUnauthorized}, + {nil}, + {apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify the wordpressUnit's port is opened. + openedPorts, err = s.wordpressUnit.OpenedPorts() + c.Assert(err, jc.ErrorIsNil) + c.Assert(openedPorts, gc.DeepEquals, []network.PortRange{ + {Protocol: "udp", FromPort: 4321, ToPort: 5000}, + }) +} + +func (s *uniterSuite) TestClosePorts(c *gc.C) { + // Open port udp:4321 in advance on wordpressUnit. + err := s.wordpressUnit.OpenPorts("udp", 4321, 5000) + c.Assert(err, jc.ErrorIsNil) + openedPorts, err := s.wordpressUnit.OpenedPorts() + c.Assert(err, jc.ErrorIsNil) + c.Assert(openedPorts, gc.DeepEquals, []network.PortRange{ + {Protocol: "udp", FromPort: 4321, ToPort: 5000}, + }) + + args := params.EntitiesPortRanges{Entities: []params.EntityPortRange{ + {Tag: "unit-mysql-0", Protocol: "tcp", FromPort: 1234, ToPort: 1400}, + {Tag: "unit-wordpress-0", Protocol: "udp", FromPort: 4321, ToPort: 5000}, + {Tag: "unit-foo-42", Protocol: "tcp", FromPort: 42, ToPort: 42}, + }} + result, err := s.uniter.ClosePorts(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {apiservertesting.ErrUnauthorized}, + {nil}, + {apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify the wordpressUnit's port is closed. + openedPorts, err = s.wordpressUnit.OpenedPorts() + c.Assert(err, jc.ErrorIsNil) + c.Assert(openedPorts, gc.HasLen, 0) +} + +func (s *uniterSuite) TestWatchConfigSettings(c *gc.C) { + err := s.wordpressUnit.SetCharmURL(s.wpCharm.URL()) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(s.resources.Count(), gc.Equals, 0) + + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + }} + result, err := s.uniter.WatchConfigSettings(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.NotifyWatchResults{ + Results: []params.NotifyWatchResult{ + {Error: apiservertesting.ErrUnauthorized}, + {NotifyWatcherId: "1"}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify the resource was registered and stop when done + c.Assert(s.resources.Count(), gc.Equals, 1) + resource := s.resources.Get("1") + defer statetesting.AssertStop(c, resource) + + // Check that the Watch has consumed the initial event ("returned" in + // the Watch call) + wc := statetesting.NewNotifyWatcherC(c, s.State, resource.(state.NotifyWatcher)) + wc.AssertNoChange() +} + +func (s *uniterSuite) TestWatchActionNotifications(c *gc.C) { + err := s.wordpressUnit.SetCharmURL(s.wpCharm.URL()) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(s.resources.Count(), gc.Equals, 0) + + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + }} + result, err := s.uniter.WatchActionNotifications(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.StringsWatchResults{ + Results: []params.StringsWatchResult{ + {Error: apiservertesting.ErrUnauthorized}, + {StringsWatcherId: "1"}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify the resource was registered and stop when done + c.Assert(s.resources.Count(), gc.Equals, 1) + resource := s.resources.Get("1") + defer statetesting.AssertStop(c, resource) + + // Check that the Watch has consumed the initial event ("returned" in + // the Watch call) + wc := statetesting.NewStringsWatcherC(c, s.State, resource.(state.StringsWatcher)) + wc.AssertNoChange() + + addedAction, err := s.wordpressUnit.AddAction("fakeaction", nil) + + wc.AssertChange(addedAction.Id()) + wc.AssertNoChange() +} + +func (s *uniterSuite) TestWatchPreexistingActions(c *gc.C) { + err := s.wordpressUnit.SetCharmURL(s.wpCharm.URL()) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(s.resources.Count(), gc.Equals, 0) + + action1, err := s.wordpressUnit.AddAction("fakeaction", nil) + c.Assert(err, jc.ErrorIsNil) + action2, err := s.wordpressUnit.AddAction("fakeaction", nil) + c.Assert(err, jc.ErrorIsNil) + + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-wordpress-0"}, + }} + + s.State.StartSync() + results, err := s.uniter.WatchActionNotifications(args) + c.Assert(err, jc.ErrorIsNil) + + checkUnorderedActionIdsEqual(c, []string{action1.Id(), action2.Id()}, results) + + // Verify the resource was registered and stop when done + c.Assert(s.resources.Count(), gc.Equals, 1) + resource := s.resources.Get("1") + defer statetesting.AssertStop(c, resource) + + // Check that the Watch has consumed the initial event ("returned" in + // the Watch call) + wc := statetesting.NewStringsWatcherC(c, s.State, resource.(state.StringsWatcher)) + wc.AssertNoChange() + + addedAction, err := s.wordpressUnit.AddAction("fakeaction", nil) + c.Assert(err, jc.ErrorIsNil) + wc.AssertChange(addedAction.Id()) + wc.AssertNoChange() +} + +func (s *uniterSuite) TestWatchActionNotificationsMalformedTag(c *gc.C) { + args := params.Entities{Entities: []params.Entity{ + {Tag: "ewenit-mysql-0"}, + }} + _, err := s.uniter.WatchActionNotifications(args) + c.Assert(err, gc.NotNil) + c.Assert(err.Error(), gc.Equals, `"ewenit-mysql-0" is not a valid tag`) +} + +func (s *uniterSuite) TestWatchActionNotificationsMalformedUnitName(c *gc.C) { + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-01"}, + }} + _, err := s.uniter.WatchActionNotifications(args) + c.Assert(err, gc.NotNil) + c.Assert(err.Error(), gc.Equals, `"unit-mysql-01" is not a valid unit tag`) +} + +func (s *uniterSuite) TestWatchActionNotificationsNotUnit(c *gc.C) { + action, err := s.mysqlUnit.AddAction("fakeaction", nil) + c.Assert(err, jc.ErrorIsNil) + args := params.Entities{Entities: []params.Entity{ + {Tag: action.Tag().String()}, + }} + _, err = s.uniter.WatchActionNotifications(args) + c.Assert(err, gc.NotNil) + c.Assert(err.Error(), gc.Equals, `"action-`+action.Id()+`" is not a valid unit tag`) +} + +func (s *uniterSuite) TestWatchActionNotificationsPermissionDenied(c *gc.C) { + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-nonexistentgarbage-0"}, + }} + results, err := s.uniter.WatchActionNotifications(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, gc.NotNil) + c.Assert(len(results.Results), gc.Equals, 1) + result := results.Results[0] + c.Assert(result.Error, gc.NotNil) + c.Assert(result.Error.Message, gc.Equals, "permission denied") +} + +func (s *uniterSuite) TestConfigSettings(c *gc.C) { + err := s.wordpressUnit.SetCharmURL(s.wpCharm.URL()) + c.Assert(err, jc.ErrorIsNil) + settings, err := s.wordpressUnit.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, gc.DeepEquals, charm.Settings{"blog-title": "My Title"}) + + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + }} + result, err := s.uniter.ConfigSettings(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ConfigSettingsResults{ + Results: []params.ConfigSettingsResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Settings: params.ConfigSettings{"blog-title": "My Title"}}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +func (s *uniterSuite) TestWatchServiceRelations(c *gc.C) { + c.Assert(s.resources.Count(), gc.Equals, 0) + + args := params.Entities{Entities: []params.Entity{ + {Tag: "service-mysql"}, + {Tag: "service-wordpress"}, + {Tag: "service-foo"}, + }} + result, err := s.uniter.WatchServiceRelations(args) + s.assertOneStringsWatcher(c, result, err) +} + +func (s *uniterSuite) TestCharmArchiveSha256(c *gc.C) { + dummyCharm := s.AddTestingCharm(c, "dummy") + + args := params.CharmURLs{URLs: []params.CharmURL{ + {URL: "something-invalid"}, + {URL: s.wpCharm.String()}, + {URL: dummyCharm.String()}, + }} + result, err := s.uniter.CharmArchiveSha256(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.StringResults{ + Results: []params.StringResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Result: s.wpCharm.BundleSha256()}, + {Result: dummyCharm.BundleSha256()}, + }, + }) +} + +func (s *uniterSuite) TestCharmArchiveURLs(c *gc.C) { + dummyCharm := s.AddTestingCharm(c, "dummy") + + hostPorts := [][]network.HostPort{ + network.AddressesWithPort([]network.Address{ + network.NewScopedAddress("1.2.3.4", network.ScopePublic), + network.NewScopedAddress("0.1.2.3", network.ScopeCloudLocal), + }, 1234), + network.AddressesWithPort([]network.Address{ + network.NewScopedAddress("1.2.3.5", network.ScopePublic), + }, 1234), + } + err := s.State.SetAPIHostPorts(hostPorts) + c.Assert(err, jc.ErrorIsNil) + + args := params.CharmURLs{URLs: []params.CharmURL{ + {URL: "something-invalid!"}, + {URL: s.wpCharm.String()}, + {URL: dummyCharm.String()}, + }} + result, err := s.uniter.CharmArchiveURLs(args) + c.Assert(err, jc.ErrorIsNil) + + wordpressURLs := []string{ + fmt.Sprintf("https://0.1.2.3:1234/model/%s/charms?file=%%2A&url=cs%%3Aquantal%%2Fwordpress-3", coretesting.ModelTag.Id()), + fmt.Sprintf("https://1.2.3.5:1234/model/%s/charms?file=%%2A&url=cs%%3Aquantal%%2Fwordpress-3", coretesting.ModelTag.Id()), + } + dummyURLs := []string{ + fmt.Sprintf("https://0.1.2.3:1234/model/%s/charms?file=%%2A&url=local%%3Aquantal%%2Fdummy-1", coretesting.ModelTag.Id()), + fmt.Sprintf("https://1.2.3.5:1234/model/%s/charms?file=%%2A&url=local%%3Aquantal%%2Fdummy-1", coretesting.ModelTag.Id()), + } + + c.Assert(result, jc.DeepEquals, params.StringsResults{ + Results: []params.StringsResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Result: wordpressURLs}, + {Result: dummyURLs}, + }, + }) +} + +func (s *uniterSuite) TestCurrentModel(c *gc.C) { + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + + result, err := s.uniter.CurrentModel() + c.Assert(err, jc.ErrorIsNil) + expected := params.ModelResult{ + Name: env.Name(), + UUID: env.UUID(), + } + c.Assert(result, gc.DeepEquals, expected) +} + +func (s *uniterSuite) TestActions(c *gc.C) { + var actionTests = []struct { + description string + action params.ActionResult + }{{ + description: "A simple action.", + action: params.ActionResult{ + Action: ¶ms.Action{ + Name: "fakeaction", + Parameters: map[string]interface{}{ + "outfile": "foo.txt", + }}, + }, + }, { + description: "An action with nested parameters.", + action: params.ActionResult{ + Action: ¶ms.Action{ + Name: "fakeaction", + Parameters: map[string]interface{}{ + "outfile": "foo.bz2", + "compression": map[string]interface{}{ + "kind": "bzip", + "quality": 5, + }, + }}, + }, + }} + + for i, actionTest := range actionTests { + c.Logf("test %d: %s", i, actionTest.description) + + a, err := s.wordpressUnit.AddAction( + actionTest.action.Action.Name, + actionTest.action.Action.Parameters) + c.Assert(err, jc.ErrorIsNil) + c.Assert(names.IsValidAction(a.Id()), gc.Equals, true) + actionTag := names.NewActionTag(a.Id()) + c.Assert(a.ActionTag(), gc.Equals, actionTag) + + args := params.Entities{ + Entities: []params.Entity{{ + Tag: actionTag.String(), + }}, + } + results, err := s.uniter.Actions(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(results.Results, gc.HasLen, 1) + + actionsQueryResult := results.Results[0] + + c.Assert(actionsQueryResult.Error, gc.IsNil) + c.Assert(actionsQueryResult.Action, jc.DeepEquals, actionTest.action) + } +} + +func (s *uniterSuite) TestActionsNotPresent(c *gc.C) { + uuid, err := utils.NewUUID() + c.Assert(err, jc.ErrorIsNil) + args := params.Entities{ + Entities: []params.Entity{{ + Tag: names.NewActionTag(uuid.String()).String(), + }}, + } + results, err := s.uniter.Actions(args) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(results.Results, gc.HasLen, 1) + actionsQueryResult := results.Results[0] + c.Assert(actionsQueryResult.Error, gc.NotNil) + c.Assert(actionsQueryResult.Error, gc.ErrorMatches, `action "[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}" not found`) +} + +func (s *uniterSuite) TestActionsWrongUnit(c *gc.C) { + // Action doesn't match unit. + mysqlUnitAuthorizer := apiservertesting.FakeAuthorizer{ + Tag: s.mysqlUnit.Tag(), + } + mysqlUnitFacade, err := uniter.NewUniterAPIV3(s.State, s.resources, mysqlUnitAuthorizer) + c.Assert(err, jc.ErrorIsNil) + + action, err := s.wordpressUnit.AddAction("fakeaction", nil) + c.Assert(err, jc.ErrorIsNil) + args := params.Entities{ + Entities: []params.Entity{{ + Tag: action.Tag().String(), + }}, + } + actions, err := mysqlUnitFacade.Actions(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(actions.Results), gc.Equals, 1) + c.Assert(actions.Results[0].Error, jc.Satisfies, params.IsCodeUnauthorized) +} + +func (s *uniterSuite) TestActionsPermissionDenied(c *gc.C) { + action, err := s.mysqlUnit.AddAction("fakeaction", nil) + c.Assert(err, jc.ErrorIsNil) + args := params.Entities{ + Entities: []params.Entity{{ + Tag: action.Tag().String(), + }}, + } + actions, err := s.uniter.Actions(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(actions.Results), gc.Equals, 1) + c.Assert(actions.Results[0].Error, jc.Satisfies, params.IsCodeUnauthorized) +} + +func (s *uniterSuite) TestFinishActionsSuccess(c *gc.C) { + testName := "fakeaction" + testOutput := map[string]interface{}{"output": "completed fakeaction successfully"} + + results, err := s.wordpressUnit.CompletedActions() + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, gc.DeepEquals, ([]*state.Action)(nil)) + + action, err := s.wordpressUnit.AddAction(testName, nil) + c.Assert(err, jc.ErrorIsNil) + + actionResults := params.ActionExecutionResults{ + Results: []params.ActionExecutionResult{{ + ActionTag: action.ActionTag().String(), + Status: params.ActionCompleted, + Results: testOutput, + }}, + } + res, err := s.uniter.FinishActions(actionResults) + c.Assert(err, jc.ErrorIsNil) + c.Assert(res, gc.DeepEquals, params.ErrorResults{Results: []params.ErrorResult{{Error: nil}}}) + + results, err = s.wordpressUnit.CompletedActions() + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(results), gc.Equals, 1) + c.Assert(results[0].Status(), gc.Equals, state.ActionCompleted) + res2, errstr := results[0].Results() + c.Assert(errstr, gc.Equals, "") + c.Assert(res2, gc.DeepEquals, testOutput) + c.Assert(results[0].Name(), gc.Equals, testName) +} + +func (s *uniterSuite) TestFinishActionsFailure(c *gc.C) { + testName := "fakeaction" + testError := "fakeaction was a dismal failure" + + results, err := s.wordpressUnit.CompletedActions() + c.Assert(err, jc.ErrorIsNil) + c.Assert(results, gc.DeepEquals, ([]*state.Action)(nil)) + + action, err := s.wordpressUnit.AddAction(testName, nil) + c.Assert(err, jc.ErrorIsNil) + + actionResults := params.ActionExecutionResults{ + Results: []params.ActionExecutionResult{{ + ActionTag: action.ActionTag().String(), + Status: params.ActionFailed, + Results: nil, + Message: testError, + }}, + } + res, err := s.uniter.FinishActions(actionResults) + c.Assert(err, jc.ErrorIsNil) + c.Assert(res, gc.DeepEquals, params.ErrorResults{Results: []params.ErrorResult{{Error: nil}}}) + + results, err = s.wordpressUnit.CompletedActions() + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(results), gc.Equals, 1) + c.Assert(results[0].Status(), gc.Equals, state.ActionFailed) + res2, errstr := results[0].Results() + c.Assert(errstr, gc.Equals, testError) + c.Assert(res2, gc.DeepEquals, map[string]interface{}{}) + c.Assert(results[0].Name(), gc.Equals, testName) +} + +func (s *uniterSuite) TestFinishActionsAuthAccess(c *gc.C) { + good, err := s.wordpressUnit.AddAction("fakeaction", nil) + c.Assert(err, jc.ErrorIsNil) + + bad, err := s.mysqlUnit.AddAction("fakeaction", nil) + c.Assert(err, jc.ErrorIsNil) + + var tests = []struct { + actionTag names.ActionTag + err error + }{ + {actionTag: good.ActionTag(), err: nil}, + {actionTag: bad.ActionTag(), err: common.ErrPerm}, + } + + // Queue up actions from tests + actionResults := params.ActionExecutionResults{Results: make([]params.ActionExecutionResult, len(tests))} + for i, test := range tests { + actionResults.Results[i] = params.ActionExecutionResult{ + ActionTag: test.actionTag.String(), + Status: params.ActionCompleted, + Results: map[string]interface{}{}, + } + } + + // Invoke FinishActions + res, err := s.uniter.FinishActions(actionResults) + c.Assert(err, jc.ErrorIsNil) + + // Verify permissions errors for actions queued on different unit + for i, result := range res.Results { + expected := tests[i].err + if expected != nil { + c.Assert(result.Error, gc.NotNil) + c.Assert(result.Error.Error(), gc.Equals, expected.Error()) + } else { + c.Assert(result.Error, gc.IsNil) + } + } +} + +func (s *uniterSuite) TestBeginActions(c *gc.C) { + ten_seconds_ago := time.Now().Add(-10 * time.Second) + good, err := s.wordpressUnit.AddAction("fakeaction", nil) + c.Assert(err, jc.ErrorIsNil) + + running, err := s.wordpressUnit.RunningActions() + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(running), gc.Equals, 0, gc.Commentf("expected no running actions, got %d", len(running))) + + args := params.Entities{Entities: []params.Entity{{Tag: good.ActionTag().String()}}} + res, err := s.uniter.BeginActions(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(res.Results), gc.Equals, 1) + c.Assert(res.Results[0].Error, gc.IsNil) + + running, err = s.wordpressUnit.RunningActions() + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(running), gc.Equals, 1, gc.Commentf("expected one running action, got %d", len(running))) + c.Assert(running[0].ActionTag(), gc.Equals, good.ActionTag()) + enqueued, started := running[0].Enqueued(), running[0].Started() + c.Assert(ten_seconds_ago.Before(enqueued), jc.IsTrue, gc.Commentf("enqueued time should be after 10 seconds ago")) + c.Assert(ten_seconds_ago.Before(started), jc.IsTrue, gc.Commentf("started time should be after 10 seconds ago")) + c.Assert(started.After(enqueued) || started.Equal(enqueued), jc.IsTrue, gc.Commentf("started should be after or equal to enqueued time")) +} + +func (s *uniterSuite) TestRelation(c *gc.C) { + rel := s.addRelation(c, "wordpress", "mysql") + wpEp, err := rel.Endpoint("wordpress") + c.Assert(err, jc.ErrorIsNil) + + args := params.RelationUnits{RelationUnits: []params.RelationUnit{ + {Relation: "relation-42", Unit: "unit-foo-0"}, + {Relation: rel.Tag().String(), Unit: "unit-wordpress-0"}, + {Relation: rel.Tag().String(), Unit: "unit-mysql-0"}, + {Relation: rel.Tag().String(), Unit: "unit-foo-0"}, + {Relation: "relation-blah", Unit: "unit-wordpress-0"}, + {Relation: "service-foo", Unit: "user-foo"}, + {Relation: "foo", Unit: "bar"}, + {Relation: "unit-wordpress-0", Unit: rel.Tag().String()}, + }} + result, err := s.uniter.Relation(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.RelationResults{ + Results: []params.RelationResult{ + {Error: apiservertesting.ErrUnauthorized}, + { + Id: rel.Id(), + Key: rel.String(), + Life: params.Life(rel.Life().String()), + Endpoint: multiwatcher.Endpoint{ + ServiceName: wpEp.ServiceName, + Relation: wpEp.Relation, + }, + }, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +func (s *uniterSuite) TestRelationById(c *gc.C) { + rel := s.addRelation(c, "wordpress", "mysql") + c.Assert(rel.Id(), gc.Equals, 0) + wpEp, err := rel.Endpoint("wordpress") + c.Assert(err, jc.ErrorIsNil) + + // Add another relation to mysql service, so we can see we can't + // get it. + otherRel, _, _ := s.addRelatedService(c, "mysql", "logging", s.mysqlUnit) + + args := params.RelationIds{ + RelationIds: []int{-1, rel.Id(), otherRel.Id(), 42, 234}, + } + result, err := s.uniter.RelationById(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.RelationResults{ + Results: []params.RelationResult{ + {Error: apiservertesting.ErrUnauthorized}, + { + Id: rel.Id(), + Key: rel.String(), + Life: params.Life(rel.Life().String()), + Endpoint: multiwatcher.Endpoint{ + ServiceName: wpEp.ServiceName, + Relation: wpEp.Relation, + }, + }, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +func (s *uniterSuite) TestProviderType(c *gc.C) { + cfg, err := s.State.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + + result, err := s.uniter.ProviderType() + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.StringResult{Result: cfg.Type()}) +} + +func (s *uniterSuite) TestEnterScope(c *gc.C) { + // Set wordpressUnit's private address first. + err := s.machine0.SetProviderAddresses( + network.NewScopedAddress("1.2.3.4", network.ScopeCloudLocal), + ) + c.Assert(err, jc.ErrorIsNil) + + rel := s.addRelation(c, "wordpress", "mysql") + relUnit, err := rel.Unit(s.wordpressUnit) + c.Assert(err, jc.ErrorIsNil) + s.assertInScope(c, relUnit, false) + + args := params.RelationUnits{RelationUnits: []params.RelationUnit{ + {Relation: "relation-42", Unit: "unit-foo-0"}, + {Relation: rel.Tag().String(), Unit: "unit-wordpress-0"}, + {Relation: rel.Tag().String(), Unit: "unit-wordpress-0"}, + {Relation: "relation-42", Unit: "unit-wordpress-0"}, + {Relation: "relation-foo", Unit: "unit-wordpress-0"}, + {Relation: "service-wordpress", Unit: "unit-foo-0"}, + {Relation: "foo", Unit: "bar"}, + {Relation: rel.Tag().String(), Unit: "unit-mysql-0"}, + {Relation: rel.Tag().String(), Unit: "service-wordpress"}, + {Relation: rel.Tag().String(), Unit: "service-mysql"}, + {Relation: rel.Tag().String(), Unit: "user-foo"}, + }} + result, err := s.uniter.EnterScope(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {apiservertesting.ErrUnauthorized}, + {nil}, + {nil}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify the scope changes and settings. + s.assertInScope(c, relUnit, true) + readSettings, err := relUnit.ReadSettings(s.wordpressUnit.Name()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(readSettings, gc.DeepEquals, map[string]interface{}{ + "private-address": "1.2.3.4", + }) +} + +func (s *uniterSuite) TestLeaveScope(c *gc.C) { + rel := s.addRelation(c, "wordpress", "mysql") + relUnit, err := rel.Unit(s.wordpressUnit) + c.Assert(err, jc.ErrorIsNil) + settings := map[string]interface{}{ + "some": "settings", + } + err = relUnit.EnterScope(settings) + c.Assert(err, jc.ErrorIsNil) + s.assertInScope(c, relUnit, true) + + args := params.RelationUnits{RelationUnits: []params.RelationUnit{ + {Relation: "relation-42", Unit: "unit-foo-0"}, + {Relation: rel.Tag().String(), Unit: "unit-wordpress-0"}, + {Relation: rel.Tag().String(), Unit: "unit-wordpress-0"}, + {Relation: "relation-42", Unit: "unit-wordpress-0"}, + {Relation: "relation-foo", Unit: "unit-wordpress-0"}, + {Relation: "service-wordpress", Unit: "unit-foo-0"}, + {Relation: "foo", Unit: "bar"}, + {Relation: rel.Tag().String(), Unit: "unit-mysql-0"}, + {Relation: rel.Tag().String(), Unit: "service-wordpress"}, + {Relation: rel.Tag().String(), Unit: "service-mysql"}, + {Relation: rel.Tag().String(), Unit: "user-foo"}, + }} + result, err := s.uniter.LeaveScope(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {apiservertesting.ErrUnauthorized}, + {nil}, + {nil}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify the scope changes. + s.assertInScope(c, relUnit, false) + readSettings, err := relUnit.ReadSettings(s.wordpressUnit.Name()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(readSettings, gc.DeepEquals, settings) +} + +func (s *uniterSuite) TestJoinedRelations(c *gc.C) { + rel := s.addRelation(c, "wordpress", "mysql") + relUnit, err := rel.Unit(s.wordpressUnit) + c.Assert(err, jc.ErrorIsNil) + err = relUnit.EnterScope(nil) + c.Assert(err, jc.ErrorIsNil) + + args := params.Entities{ + Entities: []params.Entity{ + {s.wordpressUnit.Tag().String()}, + {s.mysqlUnit.Tag().String()}, + {"unit-unknown-1"}, + {"service-wordpress"}, + {"machine-0"}, + {rel.Tag().String()}, + }, + } + expect := params.StringsResults{ + Results: []params.StringsResult{ + {Result: []string{rel.Tag().String()}}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + }, + } + check := func() { + result, err := s.uniter.JoinedRelations(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, expect) + } + check() + err = relUnit.PrepareLeaveScope() + c.Assert(err, jc.ErrorIsNil) + check() +} + +func (s *uniterSuite) TestReadSettings(c *gc.C) { + rel := s.addRelation(c, "wordpress", "mysql") + relUnit, err := rel.Unit(s.wordpressUnit) + c.Assert(err, jc.ErrorIsNil) + settings := map[string]interface{}{ + "some": "settings", + } + err = relUnit.EnterScope(settings) + c.Assert(err, jc.ErrorIsNil) + s.assertInScope(c, relUnit, true) + + args := params.RelationUnits{RelationUnits: []params.RelationUnit{ + {Relation: "relation-42", Unit: "unit-foo-0"}, + {Relation: rel.Tag().String(), Unit: "unit-wordpress-0"}, + {Relation: rel.Tag().String(), Unit: "unit-mysql-0"}, + {Relation: "relation-42", Unit: "unit-wordpress-0"}, + {Relation: "relation-foo", Unit: ""}, + {Relation: "service-wordpress", Unit: "unit-foo-0"}, + {Relation: "foo", Unit: "bar"}, + {Relation: rel.Tag().String(), Unit: "unit-mysql-0"}, + {Relation: rel.Tag().String(), Unit: "service-wordpress"}, + {Relation: rel.Tag().String(), Unit: "service-mysql"}, + {Relation: rel.Tag().String(), Unit: "user-foo"}, + }} + result, err := s.uniter.ReadSettings(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.SettingsResults{ + Results: []params.SettingsResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Settings: params.Settings{ + "some": "settings", + }}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +func (s *uniterSuite) TestReadSettingsWithNonStringValuesFails(c *gc.C) { + rel := s.addRelation(c, "wordpress", "mysql") + relUnit, err := rel.Unit(s.wordpressUnit) + c.Assert(err, jc.ErrorIsNil) + settings := map[string]interface{}{ + "other": "things", + "invalid-bool": false, + } + err = relUnit.EnterScope(settings) + c.Assert(err, jc.ErrorIsNil) + s.assertInScope(c, relUnit, true) + + args := params.RelationUnits{RelationUnits: []params.RelationUnit{ + {Relation: rel.Tag().String(), Unit: "unit-wordpress-0"}, + }} + expectErr := `unexpected relation setting "invalid-bool": expected string, got bool` + result, err := s.uniter.ReadSettings(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.SettingsResults{ + Results: []params.SettingsResult{ + {Error: ¶ms.Error{Message: expectErr}}, + }, + }) +} + +func (s *uniterSuite) TestReadRemoteSettings(c *gc.C) { + rel := s.addRelation(c, "wordpress", "mysql") + relUnit, err := rel.Unit(s.wordpressUnit) + c.Assert(err, jc.ErrorIsNil) + settings := map[string]interface{}{ + "some": "settings", + } + err = relUnit.EnterScope(settings) + c.Assert(err, jc.ErrorIsNil) + s.assertInScope(c, relUnit, true) + + // First test most of the invalid args tests and try to read the + // (unset) remote unit settings. + args := params.RelationUnitPairs{RelationUnitPairs: []params.RelationUnitPair{ + {Relation: "relation-42", LocalUnit: "unit-foo-0", RemoteUnit: "foo"}, + {Relation: rel.Tag().String(), LocalUnit: "unit-wordpress-0", RemoteUnit: "unit-wordpress-0"}, + {Relation: rel.Tag().String(), LocalUnit: "unit-wordpress-0", RemoteUnit: "unit-mysql-0"}, + {Relation: "relation-42", LocalUnit: "unit-wordpress-0", RemoteUnit: ""}, + {Relation: "relation-foo", LocalUnit: "", RemoteUnit: ""}, + {Relation: "service-wordpress", LocalUnit: "unit-foo-0", RemoteUnit: "user-foo"}, + {Relation: "foo", LocalUnit: "bar", RemoteUnit: "baz"}, + {Relation: rel.Tag().String(), LocalUnit: "unit-mysql-0", RemoteUnit: "unit-wordpress-0"}, + {Relation: rel.Tag().String(), LocalUnit: "service-wordpress", RemoteUnit: "service-mysql"}, + {Relation: rel.Tag().String(), LocalUnit: "service-mysql", RemoteUnit: "foo"}, + {Relation: rel.Tag().String(), LocalUnit: "user-foo", RemoteUnit: "unit-wordpress-0"}, + }} + result, err := s.uniter.ReadRemoteSettings(args) + + // We don't set the remote unit settings on purpose to test the error. + expectErr := `cannot read settings for unit "mysql/0" in relation "wordpress:db mysql:server": settings` + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, params.SettingsResults{ + Results: []params.SettingsResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.NotFoundError(expectErr)}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) + + // Now leave the mysqlUnit and re-enter with new settings. + relUnit, err = rel.Unit(s.mysqlUnit) + c.Assert(err, jc.ErrorIsNil) + settings = map[string]interface{}{ + "other": "things", + } + err = relUnit.LeaveScope() + c.Assert(err, jc.ErrorIsNil) + s.assertInScope(c, relUnit, false) + err = relUnit.EnterScope(settings) + c.Assert(err, jc.ErrorIsNil) + s.assertInScope(c, relUnit, true) + + // Test the remote unit settings can be read. + args = params.RelationUnitPairs{RelationUnitPairs: []params.RelationUnitPair{{ + Relation: rel.Tag().String(), + LocalUnit: "unit-wordpress-0", + RemoteUnit: "unit-mysql-0", + }}} + expect := params.SettingsResults{ + Results: []params.SettingsResult{ + {Settings: params.Settings{ + "other": "things", + }}, + }, + } + result, err = s.uniter.ReadRemoteSettings(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, expect) + + // Now destroy the remote unit, and check its settings can still be read. + err = s.mysqlUnit.Destroy() + c.Assert(err, jc.ErrorIsNil) + err = s.mysqlUnit.EnsureDead() + c.Assert(err, jc.ErrorIsNil) + err = s.mysqlUnit.Remove() + c.Assert(err, jc.ErrorIsNil) + result, err = s.uniter.ReadRemoteSettings(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, expect) +} + +func (s *uniterSuite) TestReadRemoteSettingsWithNonStringValuesFails(c *gc.C) { + rel := s.addRelation(c, "wordpress", "mysql") + relUnit, err := rel.Unit(s.mysqlUnit) + c.Assert(err, jc.ErrorIsNil) + settings := map[string]interface{}{ + "other": "things", + "invalid-bool": false, + } + err = relUnit.EnterScope(settings) + c.Assert(err, jc.ErrorIsNil) + s.assertInScope(c, relUnit, true) + + args := params.RelationUnitPairs{RelationUnitPairs: []params.RelationUnitPair{{ + Relation: rel.Tag().String(), + LocalUnit: "unit-wordpress-0", + RemoteUnit: "unit-mysql-0", + }}} + expectErr := `unexpected relation setting "invalid-bool": expected string, got bool` + result, err := s.uniter.ReadRemoteSettings(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.SettingsResults{ + Results: []params.SettingsResult{ + {Error: ¶ms.Error{Message: expectErr}}, + }, + }) +} + +func (s *uniterSuite) TestUpdateSettings(c *gc.C) { + rel := s.addRelation(c, "wordpress", "mysql") + relUnit, err := rel.Unit(s.wordpressUnit) + c.Assert(err, jc.ErrorIsNil) + settings := map[string]interface{}{ + "some": "settings", + "other": "stuff", + } + err = relUnit.EnterScope(settings) + s.assertInScope(c, relUnit, true) + + newSettings := params.Settings{ + "some": "different", + "other": "", + } + + args := params.RelationUnitsSettings{RelationUnits: []params.RelationUnitSettings{ + {Relation: "relation-42", Unit: "unit-foo-0", Settings: nil}, + {Relation: rel.Tag().String(), Unit: "unit-wordpress-0", Settings: newSettings}, + {Relation: "relation-42", Unit: "unit-wordpress-0", Settings: nil}, + {Relation: "relation-foo", Unit: "unit-wordpress-0", Settings: nil}, + {Relation: "service-wordpress", Unit: "unit-foo-0", Settings: nil}, + {Relation: "foo", Unit: "bar", Settings: nil}, + {Relation: rel.Tag().String(), Unit: "unit-mysql-0", Settings: nil}, + {Relation: rel.Tag().String(), Unit: "service-wordpress", Settings: nil}, + {Relation: rel.Tag().String(), Unit: "service-mysql", Settings: nil}, + {Relation: rel.Tag().String(), Unit: "user-foo", Settings: nil}, + }} + result, err := s.uniter.UpdateSettings(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {apiservertesting.ErrUnauthorized}, + {nil}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + {apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify the settings were saved. + s.assertInScope(c, relUnit, true) + readSettings, err := relUnit.ReadSettings(s.wordpressUnit.Name()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(readSettings, gc.DeepEquals, map[string]interface{}{ + "some": "different", + }) +} + +func (s *uniterSuite) TestWatchRelationUnits(c *gc.C) { + // Add a relation between wordpress and mysql and enter scope with + // mysqlUnit. + rel := s.addRelation(c, "wordpress", "mysql") + myRelUnit, err := rel.Unit(s.mysqlUnit) + c.Assert(err, jc.ErrorIsNil) + err = myRelUnit.EnterScope(nil) + c.Assert(err, jc.ErrorIsNil) + s.assertInScope(c, myRelUnit, true) + + c.Assert(s.resources.Count(), gc.Equals, 0) + + args := params.RelationUnits{RelationUnits: []params.RelationUnit{ + {Relation: "relation-42", Unit: "unit-foo-0"}, + {Relation: rel.Tag().String(), Unit: "unit-wordpress-0"}, + {Relation: rel.Tag().String(), Unit: "unit-mysql-0"}, + {Relation: "relation-42", Unit: "unit-wordpress-0"}, + {Relation: "relation-foo", Unit: ""}, + {Relation: "service-wordpress", Unit: "unit-foo-0"}, + {Relation: "foo", Unit: "bar"}, + {Relation: rel.Tag().String(), Unit: "unit-mysql-0"}, + {Relation: rel.Tag().String(), Unit: "service-wordpress"}, + {Relation: rel.Tag().String(), Unit: "service-mysql"}, + {Relation: rel.Tag().String(), Unit: "user-foo"}, + }} + result, err := s.uniter.WatchRelationUnits(args) + c.Assert(err, jc.ErrorIsNil) + // UnitSettings versions are volatile, so we don't check them. + // We just make sure the keys of the Changed field are as + // expected. + c.Assert(result.Results, gc.HasLen, len(args.RelationUnits)) + mysqlChanges := result.Results[1].Changes + c.Assert(mysqlChanges, gc.NotNil) + changed, ok := mysqlChanges.Changed["mysql/0"] + c.Assert(ok, jc.IsTrue) + expectChanges := params.RelationUnitsChange{ + Changed: map[string]params.UnitSettings{ + "mysql/0": params.UnitSettings{changed.Version}, + }, + } + c.Assert(result, gc.DeepEquals, params.RelationUnitsWatchResults{ + Results: []params.RelationUnitsWatchResult{ + {Error: apiservertesting.ErrUnauthorized}, + { + RelationUnitsWatcherId: "1", + Changes: expectChanges, + }, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify the resource was registered and stop when done + c.Assert(s.resources.Count(), gc.Equals, 1) + resource := s.resources.Get("1") + defer statetesting.AssertStop(c, resource) + + // Check that the Watch has consumed the initial event ("returned" in + // the Watch call) + wc := statetesting.NewRelationUnitsWatcherC(c, s.State, resource.(state.RelationUnitsWatcher)) + wc.AssertNoChange() + + // Leave scope with mysqlUnit and check it's detected. + err = myRelUnit.LeaveScope() + c.Assert(err, jc.ErrorIsNil) + s.assertInScope(c, myRelUnit, false) + + wc.AssertChange(nil, []string{"mysql/0"}) +} + +func (s *uniterSuite) TestAPIAddresses(c *gc.C) { + hostPorts := [][]network.HostPort{ + network.NewHostPorts(1234, "0.1.2.3"), + } + err := s.State.SetAPIHostPorts(hostPorts) + c.Assert(err, jc.ErrorIsNil) + + result, err := s.uniter.APIAddresses() + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.StringsResult{ + Result: []string{"0.1.2.3:1234"}, + }) +} + +func (s *uniterSuite) TestWatchUnitAddresses(c *gc.C) { + c.Assert(s.resources.Count(), gc.Equals, 0) + + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + {Tag: "machine-0"}, + {Tag: "service-wordpress"}, + }} + result, err := s.uniter.WatchUnitAddresses(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.NotifyWatchResults{ + Results: []params.NotifyWatchResult{ + {Error: apiservertesting.ErrUnauthorized}, + {NotifyWatcherId: "1"}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) + + // Verify the resource was registered and stop when done + c.Assert(s.resources.Count(), gc.Equals, 1) + resource := s.resources.Get("1") + defer statetesting.AssertStop(c, resource) + + // Check that the Watch has consumed the initial event ("returned" in + // the Watch call) + wc := statetesting.NewNotifyWatcherC(c, s.State, resource.(state.NotifyWatcher)) + wc.AssertNoChange() +} + +func (s *uniterSuite) TestGetMeterStatusUnauthenticated(c *gc.C) { + args := params.Entities{Entities: []params.Entity{{s.mysqlUnit.Tag().String()}}} + result, err := s.uniter.GetMeterStatus(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Results, gc.HasLen, 1) + c.Assert(result.Results[0].Error, gc.ErrorMatches, "permission denied") + c.Assert(result.Results[0].Code, gc.Equals, "") + c.Assert(result.Results[0].Info, gc.Equals, "") +} + +func (s *uniterSuite) TestGetMeterStatusBadTag(c *gc.C) { + tags := []string{ + "user-admin", + "unit-nosuchunit", + "thisisnotatag", + "machine-0", + "model-blah", + } + args := params.Entities{Entities: make([]params.Entity, len(tags))} + for i, tag := range tags { + args.Entities[i] = params.Entity{Tag: tag} + } + result, err := s.uniter.GetMeterStatus(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Results, gc.HasLen, len(tags)) + for i, result := range result.Results { + c.Logf("checking result %d", i) + c.Assert(result.Code, gc.Equals, "") + c.Assert(result.Info, gc.Equals, "") + c.Assert(result.Error, gc.ErrorMatches, "permission denied") + } +} + +func (s *uniterSuite) assertOneStringsWatcher(c *gc.C, result params.StringsWatchResults, err error) { + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Results, gc.HasLen, 3) + c.Assert(result.Results[0].Error, gc.DeepEquals, apiservertesting.ErrUnauthorized) + c.Assert(result.Results[1].StringsWatcherId, gc.Equals, "1") + c.Assert(result.Results[1].Changes, gc.NotNil) + c.Assert(result.Results[1].Error, gc.IsNil) + c.Assert(result.Results[2].Error, gc.DeepEquals, apiservertesting.ErrUnauthorized) + + // Verify the resource was registered and stop when done + c.Assert(s.resources.Count(), gc.Equals, 1) + resource := s.resources.Get("1") + defer statetesting.AssertStop(c, resource) + + // Check that the Watch has consumed the initial event ("returned" in + // the Watch call) + wc := statetesting.NewStringsWatcherC(c, s.State, resource.(state.StringsWatcher)) + wc.AssertNoChange() +} + +func (s *uniterSuite) assertInScope(c *gc.C, relUnit *state.RelationUnit, inScope bool) { + ok, err := relUnit.InScope() + c.Assert(err, jc.ErrorIsNil) + c.Assert(ok, gc.Equals, inScope) +} + +func (s *uniterSuite) addRelation(c *gc.C, first, second string) *state.Relation { + eps, err := s.State.InferEndpoints(first, second) + c.Assert(err, jc.ErrorIsNil) + rel, err := s.State.AddRelation(eps...) + c.Assert(err, jc.ErrorIsNil) + return rel +} + +func (s *uniterSuite) addRelatedService(c *gc.C, firstSvc, relatedSvc string, unit *state.Unit) (*state.Relation, *state.Service, *state.Unit) { + relatedService := s.AddTestingService(c, relatedSvc, s.AddTestingCharm(c, relatedSvc)) + rel := s.addRelation(c, firstSvc, relatedSvc) + relUnit, err := rel.Unit(unit) + c.Assert(err, jc.ErrorIsNil) + err = relUnit.EnterScope(nil) + c.Assert(err, jc.ErrorIsNil) + relatedUnit, err := s.State.Unit(relatedSvc + "/0") + c.Assert(err, jc.ErrorIsNil) + return rel, relatedService, relatedUnit +} + +func (s *uniterSuite) TestRequestReboot(c *gc.C) { + args := params.Entities{Entities: []params.Entity{ + {Tag: s.machine0.Tag().String()}, + {Tag: s.machine1.Tag().String()}, + {Tag: "bogus"}, + {Tag: "nasty-tag"}, + }} + errResult, err := s.uniter.RequestReboot(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(errResult, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{ + {Error: nil}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + }}) + + rFlag, err := s.machine0.GetRebootFlag() + c.Assert(err, jc.ErrorIsNil) + c.Assert(rFlag, jc.IsTrue) + + rFlag, err = s.machine1.GetRebootFlag() + c.Assert(err, jc.ErrorIsNil) + c.Assert(rFlag, jc.IsFalse) +} + +func checkUnorderedActionIdsEqual(c *gc.C, ids []string, results params.StringsWatchResults) { + c.Assert(results, gc.NotNil) + content := results.Results + c.Assert(len(content), gc.Equals, 1) + result := content[0] + c.Assert(result.StringsWatcherId, gc.Equals, "1") + obtainedIds := map[string]int{} + expectedIds := map[string]int{} + for _, id := range ids { + expectedIds[id]++ + } + // The count of each ID that has been seen. + for _, change := range result.Changes { + obtainedIds[change]++ + } + c.Check(obtainedIds, jc.DeepEquals, expectedIds) +} + +func (s *uniterSuite) TestStorageAttachments(c *gc.C) { + // We need to set up a unit that has storage metadata defined. + ch := s.AddTestingCharm(c, "storage-block") + sCons := map[string]state.StorageConstraints{ + "data": {Pool: "", Size: 1024, Count: 1}, + } + service := s.AddTestingServiceWithStorage(c, "storage-block", ch, sCons) + unit, err := service.AddUnit() + c.Assert(err, jc.ErrorIsNil) + err = s.State.AssignUnit(unit, state.AssignCleanEmpty) + c.Assert(err, jc.ErrorIsNil) + assignedMachineId, err := unit.AssignedMachineId() + c.Assert(err, jc.ErrorIsNil) + machine, err := s.State.Machine(assignedMachineId) + c.Assert(err, jc.ErrorIsNil) + + volumeAttachments, err := machine.VolumeAttachments() + c.Assert(err, jc.ErrorIsNil) + c.Assert(volumeAttachments, gc.HasLen, 1) + + err = machine.SetProvisioned("inst-id", "fake_nonce", nil) + c.Assert(err, jc.ErrorIsNil) + + err = s.State.SetVolumeInfo( + volumeAttachments[0].Volume(), + state.VolumeInfo{VolumeId: "vol-123", Size: 456}, + ) + c.Assert(err, jc.ErrorIsNil) + + err = s.State.SetVolumeAttachmentInfo( + machine.MachineTag(), + volumeAttachments[0].Volume(), + state.VolumeAttachmentInfo{DeviceName: "xvdf1"}, + ) + c.Assert(err, jc.ErrorIsNil) + + password, err := utils.RandomPassword() + err = unit.SetPassword(password) + c.Assert(err, jc.ErrorIsNil) + st := s.OpenAPIAs(c, unit.Tag(), password) + uniter, err := st.Uniter() + c.Assert(err, jc.ErrorIsNil) + + attachments, err := uniter.UnitStorageAttachments(unit.UnitTag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(attachments, gc.DeepEquals, []params.StorageAttachmentId{{ + StorageTag: "storage-data-0", + UnitTag: unit.Tag().String(), + }}) +} + +func (s *uniterSuite) TestUnitStatus(c *gc.C) { + err := s.wordpressUnit.SetStatus(state.StatusMaintenance, "blah", nil) + c.Assert(err, jc.ErrorIsNil) + err = s.mysqlUnit.SetStatus(state.StatusTerminated, "foo", nil) + c.Assert(err, jc.ErrorIsNil) + + args := params.Entities{ + Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + {Tag: "machine-1"}, + {Tag: "invalid"}, + }} + result, err := s.uniter.UnitStatus(args) + c.Assert(err, jc.ErrorIsNil) + // Zero out the updated timestamps so we can easily check the results. + for i, statusResult := range result.Results { + r := statusResult + if r.Status != "" { + c.Assert(r.Since, gc.NotNil) + } + r.Since = nil + result.Results[i] = r + } + c.Assert(result, gc.DeepEquals, params.StatusResults{ + Results: []params.StatusResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Status: params.StatusMaintenance, Info: "blah", Data: map[string]interface{}{}}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ServerError(`"invalid" is not a valid tag`)}, + }, + }) +} + +func (s *uniterSuite) TestServiceOwner(c *gc.C) { + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "service-wordpress"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + {Tag: "machine-0"}, + {Tag: "service-foo"}, + }} + result, err := s.uniter.ServiceOwner(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, params.StringResults{ + Results: []params.StringResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Result: s.AdminUserTag(c).String()}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +func (s *uniterSuite) TestAssignedMachine(c *gc.C) { + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "unit-wordpress-0"}, + {Tag: "unit-foo-42"}, + {Tag: "service-mysql"}, + {Tag: "service-wordpress"}, + {Tag: "machine-0"}, + {Tag: "machine-1"}, + {Tag: "machine-42"}, + {Tag: "service-foo"}, + {Tag: "relation-svc1.rel1#svc2.rel2"}, + }} + result, err := s.uniter.AssignedMachine(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, params.StringResults{ + Results: []params.StringResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Result: "machine-0"}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +func (s *uniterSuite) TestAllMachinePorts(c *gc.C) { + // Verify no ports are opened yet on the machine or unit. + machinePorts, err := s.machine0.AllPorts() + c.Assert(err, jc.ErrorIsNil) + c.Assert(machinePorts, gc.HasLen, 0) + unitPorts, err := s.wordpressUnit.OpenedPorts() + c.Assert(err, jc.ErrorIsNil) + c.Assert(unitPorts, gc.HasLen, 0) + + // Add another mysql unit on machine 0. + mysqlUnit1, err := s.mysql.AddUnit() + c.Assert(err, jc.ErrorIsNil) + err = mysqlUnit1.AssignToMachine(s.machine0) + c.Assert(err, jc.ErrorIsNil) + + // Open some ports on both units. + err = s.wordpressUnit.OpenPorts("tcp", 100, 200) + c.Assert(err, jc.ErrorIsNil) + err = s.wordpressUnit.OpenPorts("udp", 10, 20) + c.Assert(err, jc.ErrorIsNil) + err = mysqlUnit1.OpenPorts("tcp", 201, 250) + c.Assert(err, jc.ErrorIsNil) + err = mysqlUnit1.OpenPorts("udp", 1, 8) + c.Assert(err, jc.ErrorIsNil) + + args := params.Entities{Entities: []params.Entity{ + {Tag: "unit-mysql-0"}, + {Tag: "machine-0"}, + {Tag: "machine-1"}, + {Tag: "unit-foo-42"}, + {Tag: "machine-42"}, + {Tag: "service-wordpress"}, + }} + expectPorts := []params.MachinePortRange{ + {UnitTag: "unit-wordpress-0", PortRange: params.PortRange{100, 200, "tcp"}}, + {UnitTag: "unit-mysql-1", PortRange: params.PortRange{201, 250, "tcp"}}, + {UnitTag: "unit-mysql-1", PortRange: params.PortRange{1, 8, "udp"}}, + {UnitTag: "unit-wordpress-0", PortRange: params.PortRange{10, 20, "udp"}}, + } + result, err := s.uniter.AllMachinePorts(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.DeepEquals, params.MachinePortsResults{ + Results: []params.MachinePortsResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Ports: expectPorts}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ErrUnauthorized}, + }, + }) +} + +type unitMetricBatchesSuite struct { + uniterSuite + *commontesting.ModelWatcherTest + uniter *uniter.UniterAPIV3 +} + +var _ = gc.Suite(&unitMetricBatchesSuite{}) + +func (s *unitMetricBatchesSuite) SetUpTest(c *gc.C) { + s.uniterSuite.SetUpTest(c) + + meteredAuthorizer := apiservertesting.FakeAuthorizer{ + Tag: s.meteredUnit.Tag(), + } + var err error + s.uniter, err = uniter.NewUniterAPIV3( + s.State, + s.resources, + meteredAuthorizer, + ) + c.Assert(err, jc.ErrorIsNil) + + s.ModelWatcherTest = commontesting.NewModelWatcherTest( + s.uniter, + s.State, + s.resources, + commontesting.NoSecrets, + ) +} + +func (s *unitMetricBatchesSuite) TestAddMetricsBatch(c *gc.C) { + metrics := []params.Metric{{Key: "pings", Value: "5", Time: time.Now().UTC()}} + uuid := utils.MustNewUUID().String() + + result, err := s.uniter.AddMetricBatches(params.MetricBatchParams{ + Batches: []params.MetricBatchParam{{ + Tag: s.meteredUnit.Tag().String(), + Batch: params.MetricBatch{ + UUID: uuid, + CharmURL: s.meteredCharm.URL().String(), + Created: time.Now(), + Metrics: metrics, + }}}}, + ) + + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{{nil}}, + }) + c.Assert(err, jc.ErrorIsNil) + + batch, err := s.State.MetricBatch(uuid) + c.Assert(err, jc.ErrorIsNil) + c.Assert(batch.UUID(), gc.Equals, uuid) + c.Assert(batch.CharmURL(), gc.Equals, s.meteredCharm.URL().String()) + c.Assert(batch.Unit(), gc.Equals, s.meteredUnit.Name()) + storedMetrics := batch.Metrics() + c.Assert(storedMetrics, gc.HasLen, 1) + c.Assert(storedMetrics[0].Key, gc.Equals, metrics[0].Key) + c.Assert(storedMetrics[0].Value, gc.Equals, metrics[0].Value) +} + +func (s *unitMetricBatchesSuite) TestAddMetricsBatchNoCharmURL(c *gc.C) { + metrics := []params.Metric{{Key: "pings", Value: "5", Time: time.Now().UTC()}} + uuid := utils.MustNewUUID().String() + + result, err := s.uniter.AddMetricBatches(params.MetricBatchParams{ + Batches: []params.MetricBatchParam{{ + Tag: s.meteredUnit.Tag().String(), + Batch: params.MetricBatch{ + UUID: uuid, + CharmURL: s.meteredCharm.URL().String(), + Created: time.Now(), + Metrics: metrics, + }}}}) + + c.Assert(result, gc.DeepEquals, params.ErrorResults{ + Results: []params.ErrorResult{{nil}}, + }) + c.Assert(err, jc.ErrorIsNil) + + batch, err := s.State.MetricBatch(uuid) + c.Assert(err, jc.ErrorIsNil) + c.Assert(batch.UUID(), gc.Equals, uuid) + c.Assert(batch.CharmURL(), gc.Equals, s.meteredCharm.URL().String()) + c.Assert(batch.Unit(), gc.Equals, s.meteredUnit.Name()) + storedMetrics := batch.Metrics() + c.Assert(storedMetrics, gc.HasLen, 1) + c.Assert(storedMetrics[0].Key, gc.Equals, metrics[0].Key) + c.Assert(storedMetrics[0].Value, gc.Equals, metrics[0].Value) +} + +func (s *unitMetricBatchesSuite) TestAddMetricsBatchDiffTag(c *gc.C) { + unit2 := s.Factory.MakeUnit(c, &factory.UnitParams{Service: s.meteredService, SetCharmURL: true}) + + metrics := []params.Metric{{Key: "pings", Value: "5", Time: time.Now().UTC()}} + uuid := utils.MustNewUUID().String() + + tests := []struct { + about string + tag string + expect string + }{{ + about: "different unit", + tag: unit2.Tag().String(), + expect: "permission denied", + }, { + about: "user tag", + tag: names.NewLocalUserTag("admin").String(), + expect: `"user-admin@local" is not a valid unit tag`, + }, { + about: "machine tag", + tag: names.NewMachineTag("0").String(), + expect: `"machine-0" is not a valid unit tag`, + }} + + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + result, err := s.uniter.AddMetricBatches(params.MetricBatchParams{ + Batches: []params.MetricBatchParam{{ + Tag: test.tag, + Batch: params.MetricBatch{ + UUID: uuid, + CharmURL: "", + Created: time.Now(), + Metrics: metrics, + }}}}) + + if test.expect == "" { + c.Assert(result.OneError(), jc.ErrorIsNil) + } else { + c.Assert(result.OneError(), gc.ErrorMatches, test.expect) + } + c.Assert(err, jc.ErrorIsNil) + + _, err = s.State.MetricBatch(uuid) + c.Assert(err, jc.Satisfies, errors.IsNotFound) + } +} + +type uniterNetworkConfigSuite struct { + base uniterSuite // not embedded so it doesn't run all tests. +} + +var _ = gc.Suite(&uniterNetworkConfigSuite{}) + +func (s *uniterNetworkConfigSuite) SetUpTest(c *gc.C) { + s.base.JujuConnSuite.SetUpTest(c) + + var err error + s.base.machine0, err = s.base.State.AddMachine("quantal", state.JobHostUnits) + c.Assert(err, jc.ErrorIsNil) + + _, err = s.base.State.AddSpace("internal", "", nil, false) + c.Assert(err, jc.ErrorIsNil) + _, err = s.base.State.AddSpace("public", "", nil, false) + c.Assert(err, jc.ErrorIsNil) + + providerAddresses := []network.Address{ + network.NewAddressOnSpace("public", "8.8.8.8"), + network.NewAddressOnSpace("", "8.8.4.4"), + network.NewAddressOnSpace("internal", "10.0.0.1"), + network.NewAddressOnSpace("internal", "10.0.0.2"), + network.NewAddressOnSpace("", "fc00::1"), + } + + err = s.base.machine0.SetProviderAddresses(providerAddresses...) + c.Assert(err, jc.ErrorIsNil) + + err = s.base.machine0.SetInstanceInfo("i-am", "fake_nonce", nil, nil, nil, nil, nil) + c.Assert(err, jc.ErrorIsNil) + + factory := jujuFactory.NewFactory(s.base.State) + s.base.wpCharm = factory.MakeCharm(c, &jujuFactory.CharmParams{ + Name: "wordpress", + URL: "cs:quantal/wordpress-3", + }) + s.base.wordpress, err = s.base.State.AddService(state.AddServiceArgs{ + Name: "wordpress", + Charm: s.base.wpCharm, + Owner: s.base.AdminUserTag(c).String(), + EndpointBindings: map[string]string{ + "db": "internal", + }, + }) + c.Assert(err, jc.ErrorIsNil) + s.base.wordpressUnit = factory.MakeUnit(c, &jujuFactory.UnitParams{ + Service: s.base.wordpress, + Machine: s.base.machine0, + }) + + s.base.machine1 = factory.MakeMachine(c, &jujuFactory.MachineParams{ + Series: "quantal", + Jobs: []state.MachineJob{state.JobHostUnits}, + }) + + err = s.base.machine1.SetProviderAddresses(providerAddresses...) + c.Assert(err, jc.ErrorIsNil) + + mysqlCharm := factory.MakeCharm(c, &jujuFactory.CharmParams{ + Name: "mysql", + }) + s.base.mysql = factory.MakeService(c, &jujuFactory.ServiceParams{ + Name: "mysql", + Charm: mysqlCharm, + Creator: s.base.AdminUserTag(c), + }) + s.base.wordpressUnit = factory.MakeUnit(c, &jujuFactory.UnitParams{ + Service: s.base.wordpress, + Machine: s.base.machine0, + }) + s.base.mysqlUnit = factory.MakeUnit(c, &jujuFactory.UnitParams{ + Service: s.base.mysql, + Machine: s.base.machine1, + }) + + // Create the resource registry separately to track invocations to + // Register. + s.base.resources = common.NewResources() + s.base.AddCleanup(func(_ *gc.C) { s.base.resources.StopAll() }) + + s.setupUniterAPIForUnit(c, s.base.wordpressUnit) +} + +func (s *uniterNetworkConfigSuite) TearDownTest(c *gc.C) { + s.base.JujuConnSuite.TearDownTest(c) +} + +func (s *uniterNetworkConfigSuite) setupUniterAPIForUnit(c *gc.C, givenUnit *state.Unit) { + // Create a FakeAuthorizer so we can check permissions, set up assuming the + // given unit agent has logged in. + s.base.authorizer = apiservertesting.FakeAuthorizer{ + Tag: givenUnit.Tag(), + } + + var err error + s.base.uniter, err = uniter.NewUniterAPIV3( + s.base.State, + s.base.resources, + s.base.authorizer, + ) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *uniterNetworkConfigSuite) TestNetworkConfigPermissions(c *gc.C) { + rel := s.addRelationAndAssertInScope(c) + + args := params.RelationUnits{RelationUnits: []params.RelationUnit{ + {Relation: "relation-42", Unit: "unit-foo-0"}, + {Relation: rel.Tag().String(), Unit: "invalid"}, + {Relation: rel.Tag().String(), Unit: "unit-mysql-0"}, + {Relation: "relation-42", Unit: s.base.wordpressUnit.Tag().String()}, + }} + + result, err := s.base.uniter.NetworkConfig(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, params.UnitNetworkConfigResults{ + Results: []params.UnitNetworkConfigResult{ + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ServerError(`"invalid" is not a valid tag`)}, + {Error: apiservertesting.ErrUnauthorized}, + {Error: apiservertesting.ServerError(`"relation-42" is not a valid relation tag`)}, + }, + }) +} + +func (s *uniterNetworkConfigSuite) addRelationAndAssertInScope(c *gc.C) *state.Relation { + // Add a relation between wordpress and mysql and enter scope with + // mysqlUnit. + rel := s.base.addRelation(c, "wordpress", "mysql") + wpRelUnit, err := rel.Unit(s.base.wordpressUnit) + c.Assert(err, jc.ErrorIsNil) + err = wpRelUnit.EnterScope(nil) + c.Assert(err, jc.ErrorIsNil) + s.base.assertInScope(c, wpRelUnit, true) + return rel +} + +func (s *uniterNetworkConfigSuite) TestNetworkConfigForExplicitlyBoundEndpoint(c *gc.C) { + rel := s.addRelationAndAssertInScope(c) + + args := params.RelationUnits{RelationUnits: []params.RelationUnit{ + {Relation: rel.Tag().String(), Unit: s.base.wordpressUnit.Tag().String()}, + }} + + // For the relation "wordpress:db mysql:server" we expect to see only + // addresses bound to the "internal" space, where the "db" endpoint itself + // is bound to. + expectedConfig := []params.NetworkConfig{{ + Address: "10.0.0.1", + }, { + Address: "10.0.0.2", + }} + + result, err := s.base.uniter.NetworkConfig(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, params.UnitNetworkConfigResults{ + Results: []params.UnitNetworkConfigResult{ + {Config: expectedConfig}, + }, + }) +} + +func (s *uniterNetworkConfigSuite) TestNetworkConfigForImplicitlyBoundEndpoint(c *gc.C) { + // Since wordpressUnit as explicit binding for "db", switch the API to + // mysqlUnit and check "mysql:server" uses the machine preferred private + // address. + s.setupUniterAPIForUnit(c, s.base.mysqlUnit) + rel := s.base.addRelation(c, "mysql", "wordpress") + mysqlRelUnit, err := rel.Unit(s.base.mysqlUnit) + c.Assert(err, jc.ErrorIsNil) + err = mysqlRelUnit.EnterScope(nil) + c.Assert(err, jc.ErrorIsNil) + s.base.assertInScope(c, mysqlRelUnit, true) + + args := params.RelationUnits{RelationUnits: []params.RelationUnit{ + {Relation: rel.Tag().String(), Unit: s.base.mysqlUnit.Tag().String()}, + }} + + privateAddress, err := s.base.machine1.PrivateAddress() + c.Assert(err, jc.ErrorIsNil) + + expectedConfig := []params.NetworkConfig{{Address: privateAddress.Value}} + + result, err := s.base.uniter.NetworkConfig(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, params.UnitNetworkConfigResults{ + Results: []params.UnitNetworkConfigResult{ + {Config: expectedConfig}, + }, + }) +} === removed file 'src/github.com/juju/juju/apiserver/uniter/uniter_v0.go' --- src/github.com/juju/juju/apiserver/uniter/uniter_v0.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/uniter/uniter_v0.go 1970-01-01 00:00:00 +0000 @@ -1,57 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// The uniter package implements the API interface used by the uniter -// worker. This file contains the API facade version 0. -package uniter - -import ( - "github.com/juju/names" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/state" -) - -func init() { - common.RegisterStandardFacade("Uniter", 0, NewUniterAPIV0) -} - -// UniterAPIV0 implements the API facade version 0, used by the uniter -// worker. -type UniterAPIV0 struct { - uniterBaseAPI -} - -// NewUniterAPIV0 creates a new instance of the Uniter API, version 0. -func NewUniterAPIV0(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*UniterAPIV0, error) { - baseAPI, err := newUniterBaseAPI(st, resources, authorizer) - if err != nil { - return nil, err - } - return &UniterAPIV0{ - uniterBaseAPI: *baseAPI, - }, nil -} - -// GetOwnerTag returns the user tag of the owner of the first given -// service tag in args. -// -// NOTE: This is obsolete and is replaced by ServiceOwner in APIV1, -// which should be used instead. This method is not propely handling -// multiple tags and does not check for permissions. See also -// http://pad.lv/1270795. -func (u *UniterAPIV0) GetOwnerTag(args params.Entities) (params.StringResult, error) { - var nothing params.StringResult - tag, err := names.ParseServiceTag(args.Entities[0].Tag) - if err != nil { - return nothing, common.ErrPerm - } - service, err := u.getService(tag) - if err != nil { - return nothing, err - } - return params.StringResult{ - Result: service.GetOwnerTag(), - }, nil -} === removed file 'src/github.com/juju/juju/apiserver/uniter/uniter_v0_test.go' --- src/github.com/juju/juju/apiserver/uniter/uniter_v0_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/uniter/uniter_v0_test.go 1970-01-01 00:00:00 +0000 @@ -1,355 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package uniter_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - commontesting "github.com/juju/juju/apiserver/common/testing" - "github.com/juju/juju/apiserver/params" - apiservertesting "github.com/juju/juju/apiserver/testing" - "github.com/juju/juju/apiserver/uniter" - "github.com/juju/juju/state" -) - -type uniterV0Suite struct { - uniterBaseSuite - *commontesting.EnvironWatcherTest - - uniter *uniter.UniterAPIV0 - meteredUniter *uniter.UniterAPIV0 -} - -var _ = gc.Suite(&uniterV0Suite{}) - -func (s *uniterV0Suite) SetUpTest(c *gc.C) { - s.uniterBaseSuite.setUpTest(c) - - uniterAPIV0, err := uniter.NewUniterAPIV0( - s.State, - s.resources, - s.authorizer, - ) - c.Assert(err, jc.ErrorIsNil) - s.uniter = uniterAPIV0 - - meteredAuthorizer := apiservertesting.FakeAuthorizer{ - Tag: s.meteredUnit.Tag(), - } - s.meteredUniter, err = uniter.NewUniterAPIV0( - s.State, - s.resources, - meteredAuthorizer, - ) - c.Assert(err, jc.ErrorIsNil) - - s.EnvironWatcherTest = commontesting.NewEnvironWatcherTest( - s.uniter, - s.State, - s.resources, - commontesting.NoSecrets, - ) -} - -func (s *uniterV0Suite) TestUniterFailsWithNonUnitAgentUser(c *gc.C) { - factory := func(st *state.State, res *common.Resources, auth common.Authorizer) error { - _, err := uniter.NewUniterAPIV0(st, res, auth) - return err - } - s.testUniterFailsWithNonUnitAgentUser(c, factory) -} - -func (s *uniterV0Suite) TestSetStatus(c *gc.C) { - s.testSetStatus(c, s.uniter) -} - -func (s *uniterV0Suite) TestLife(c *gc.C) { - s.testLife(c, s.uniter) -} - -func (s *uniterV0Suite) TestEnsureDead(c *gc.C) { - s.testEnsureDead(c, s.uniter) -} - -func (s *uniterV0Suite) TestWatch(c *gc.C) { - s.testWatch(c, s.uniter) -} - -func (s *uniterV0Suite) TestPublicAddress(c *gc.C) { - s.testPublicAddress(c, s.uniter) -} - -func (s *uniterV0Suite) TestPrivateAddress(c *gc.C) { - s.testPrivateAddress(c, s.uniter) -} - -func (s *uniterV0Suite) TestResolved(c *gc.C) { - s.testResolved(c, s.uniter) -} - -func (s *uniterV0Suite) TestClearResolved(c *gc.C) { - s.testClearResolved(c, s.uniter) -} - -func (s *uniterV0Suite) TestGetPrincipal(c *gc.C) { - factory := func( - st *state.State, - resources *common.Resources, - authorizer common.Authorizer, - ) (getPrincipal, error) { - return uniter.NewUniterAPIV0(st, resources, authorizer) - } - s.testGetPrincipal(c, s.uniter, factory) -} - -func (s *uniterV0Suite) TestHasSubordinates(c *gc.C) { - s.testHasSubordinates(c, s.uniter) -} - -func (s *uniterV0Suite) TestDestroy(c *gc.C) { - s.testDestroy(c, s.uniter) -} - -func (s *uniterV0Suite) TestDestroyAllSubordinates(c *gc.C) { - s.testDestroyAllSubordinates(c, s.uniter) -} - -func (s *uniterV0Suite) TestCharmURL(c *gc.C) { - s.testCharmURL(c, s.uniter) -} - -func (s *uniterV0Suite) TestSetCharmURL(c *gc.C) { - s.testSetCharmURL(c, s.uniter) -} - -func (s *uniterV0Suite) TestOpenPorts(c *gc.C) { - s.testOpenPorts(c, s.uniter) -} - -func (s *uniterV0Suite) TestClosePorts(c *gc.C) { - s.testClosePorts(c, s.uniter) -} - -func (s *uniterV0Suite) TestOpenPort(c *gc.C) { - s.testOpenPort(c, s.uniter) -} - -func (s *uniterV0Suite) TestClosePort(c *gc.C) { - s.testClosePort(c, s.uniter) -} - -func (s *uniterV0Suite) TestWatchConfigSettings(c *gc.C) { - s.testWatchConfigSettings(c, s.uniter) -} - -func (s *uniterV0Suite) TestWatchActionNotifications(c *gc.C) { - s.testWatchActionNotifications(c, s.uniter) -} - -func (s *uniterV0Suite) TestWatchPreexistingActions(c *gc.C) { - s.testWatchPreexistingActions(c, s.uniter) -} - -func (s *uniterV0Suite) TestWatchActionNotificationsMalformedTag(c *gc.C) { - s.testWatchActionNotificationsMalformedTag(c, s.uniter) -} - -func (s *uniterV0Suite) TestWatchActionNotificationsMalformedUnitName(c *gc.C) { - s.testWatchActionNotificationsMalformedUnitName(c, s.uniter) -} - -func (s *uniterV0Suite) TestWatchActionNotificationsNotUnit(c *gc.C) { - s.testWatchActionNotificationsNotUnit(c, s.uniter) -} - -func (s *uniterV0Suite) TestWatchActionNotificationsPermissionDenied(c *gc.C) { - s.testWatchActionNotificationsPermissionDenied(c, s.uniter) -} - -func (s *uniterV0Suite) TestConfigSettings(c *gc.C) { - s.testConfigSettings(c, s.uniter) -} - -func (s *uniterV0Suite) TestWatchServiceRelations(c *gc.C) { - s.testWatchServiceRelations(c, s.uniter) -} - -func (s *uniterV0Suite) TestCharmArchiveSha256(c *gc.C) { - s.testCharmArchiveSha256(c, s.uniter) -} - -func (s *uniterV0Suite) TestCharmArchiveURLs(c *gc.C) { - s.testCharmArchiveURLs(c, s.uniter) -} - -func (s *uniterV0Suite) TestCurrentEnvironUUID(c *gc.C) { - s.testCurrentEnvironUUID(c, s.uniter) -} - -func (s *uniterV0Suite) TestCurrentEnvironment(c *gc.C) { - s.testCurrentEnvironment(c, s.uniter) -} - -func (s *uniterV0Suite) TestActions(c *gc.C) { - s.testActions(c, s.uniter) -} - -func (s *uniterV0Suite) TestActionsNotPresent(c *gc.C) { - s.testActionsNotPresent(c, s.uniter) -} - -func (s *uniterV0Suite) TestActionsWrongUnit(c *gc.C) { - factory := func( - st *state.State, - resources *common.Resources, - authorizer common.Authorizer, - ) (actions, error) { - return uniter.NewUniterAPIV0(st, resources, authorizer) - } - s.testActionsWrongUnit(c, factory) -} - -func (s *uniterV0Suite) TestActionsPermissionDenied(c *gc.C) { - s.testActionsPermissionDenied(c, s.uniter) -} - -func (s *uniterV0Suite) TestFinishActionsSuccess(c *gc.C) { - s.testFinishActionsSuccess(c, s.uniter) -} - -func (s *uniterV0Suite) TestFinishActionsFailure(c *gc.C) { - s.testFinishActionsFailure(c, s.uniter) -} - -func (s *uniterV0Suite) TestFinishActionsAuthAccess(c *gc.C) { - s.testFinishActionsAuthAccess(c, s.uniter) -} - -func (s *uniterV0Suite) TestBeginActions(c *gc.C) { - s.testBeginActions(c, s.uniter) -} - -func (s *uniterV0Suite) TestRelation(c *gc.C) { - s.testRelation(c, s.uniter) -} - -func (s *uniterV0Suite) TestRelationById(c *gc.C) { - s.testRelationById(c, s.uniter) -} - -func (s *uniterV0Suite) TestProviderType(c *gc.C) { - s.testProviderType(c, s.uniter) -} - -func (s *uniterV0Suite) TestEnterScope(c *gc.C) { - s.testEnterScope(c, s.uniter) -} - -func (s *uniterV0Suite) TestLeaveScope(c *gc.C) { - s.testLeaveScope(c, s.uniter) -} - -func (s *uniterV0Suite) TestJoinedRelations(c *gc.C) { - s.testJoinedRelations(c, s.uniter) -} - -func (s *uniterV0Suite) TestReadSettings(c *gc.C) { - s.testReadSettings(c, s.uniter) -} - -func (s *uniterV0Suite) TestReadSettingsWithNonStringValuesFails(c *gc.C) { - s.testReadSettingsWithNonStringValuesFails(c, s.uniter) -} - -func (s *uniterV0Suite) TestReadRemoteSettings(c *gc.C) { - s.testReadRemoteSettings(c, s.uniter) -} - -func (s *uniterV0Suite) TestReadRemoteSettingsWithNonStringValuesFails(c *gc.C) { - s.testReadRemoteSettingsWithNonStringValuesFails(c, s.uniter) -} - -func (s *uniterV0Suite) TestUpdateSettings(c *gc.C) { - s.testUpdateSettings(c, s.uniter) -} - -func (s *uniterV0Suite) TestWatchRelationUnits(c *gc.C) { - s.testWatchRelationUnits(c, s.uniter) -} - -func (s *uniterV0Suite) TestAPIAddresses(c *gc.C) { - s.testAPIAddresses(c, s.uniter) -} - -func (s *uniterV0Suite) TestWatchUnitAddresses(c *gc.C) { - s.testWatchUnitAddresses(c, s.uniter) -} - -func (s *uniterV0Suite) TestGetMeterStatus(c *gc.C) { - s.testGetMeterStatus(c, s.uniter) -} - -func (s *uniterV0Suite) TestGetMeterStatusUnauthenticated(c *gc.C) { - s.testGetMeterStatusUnauthenticated(c, s.uniter) -} - -func (s *uniterV0Suite) TestGetMeterStatusBadTag(c *gc.C) { - s.testGetMeterStatusBadTag(c, s.uniter) -} - -func (s *uniterV0Suite) TestWatchMeterStatus(c *gc.C) { - s.testWatchMeterStatus(c, s.uniter) -} - -func (s *uniterV0Suite) TestGetOwnerTag(c *gc.C) { - tag := s.mysql.Tag().String() - args := params.Entities{Entities: []params.Entity{ - {Tag: tag}, - }} - result, err := s.uniter.GetOwnerTag(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.StringResult{ - Result: s.AdminUserTag(c).String(), - }) -} - -func (s *uniterV0Suite) TestServiceOwnerV0NotImplemented(c *gc.C) { - apiservertesting.AssertNotImplemented(c, s.uniter, "ServiceOwner") -} - -func (s *uniterV0Suite) TestAssignedMachineV0NotImplemented(c *gc.C) { - apiservertesting.AssertNotImplemented(c, s.uniter, "AssignedMachine") -} - -func (s *uniterV0Suite) TestAllMachinePortsV0NotImplemented(c *gc.C) { - apiservertesting.AssertNotImplemented(c, s.uniter, "AllMachinePorts") -} - -func (s *uniterV0Suite) TestRequestReboot(c *gc.C) { - args := params.Entities{Entities: []params.Entity{ - {Tag: s.machine0.Tag().String()}, - {Tag: s.machine1.Tag().String()}, - {Tag: "bogus"}, - {Tag: "nasty-tag"}, - }} - errResult, err := s.uniter.RequestReboot(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(errResult, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {Error: nil}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }}) - - rFlag, err := s.machine0.GetRebootFlag() - c.Assert(err, jc.ErrorIsNil) - c.Assert(rFlag, jc.IsTrue) - - rFlag, err = s.machine1.GetRebootFlag() - c.Assert(err, jc.ErrorIsNil) - c.Assert(rFlag, jc.IsFalse) -} === removed file 'src/github.com/juju/juju/apiserver/uniter/uniter_v1.go' --- src/github.com/juju/juju/apiserver/uniter/uniter_v1.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/uniter/uniter_v1.go 1970-01-01 00:00:00 +0000 @@ -1,183 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// The uniter package implements the API interface used by the uniter -// worker. This file contains the API facade version 1. -package uniter - -import ( - "github.com/juju/errors" - "github.com/juju/names" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/network" - "github.com/juju/juju/state" -) - -func init() { - common.RegisterStandardFacade("Uniter", 1, NewUniterAPIV1) -} - -// UniterAPI implements the API version 1, used by the uniter worker. -type UniterAPIV1 struct { - uniterBaseAPI - - accessMachine common.GetAuthFunc -} - -// NewUniterAPIV1 creates a new instance of the Uniter API, version 1. -func NewUniterAPIV1(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*UniterAPIV1, error) { - baseAPI, err := newUniterBaseAPI(st, resources, authorizer) - if err != nil { - return nil, err - } - accessMachine := func() (common.AuthFunc, error) { - switch tag := authorizer.GetAuthTag().(type) { - case names.UnitTag: - entity, err := st.Unit(tag.Id()) - if err != nil { - return nil, errors.Trace(err) - } - machineId, err := entity.AssignedMachineId() - if err != nil { - return nil, errors.Trace(err) - } - machineTag := names.NewMachineTag(machineId) - return func(tag names.Tag) bool { - return tag == machineTag - }, nil - default: - return nil, errors.Errorf("expected names.UnitTag, got %T", tag) - } - } - return &UniterAPIV1{ - uniterBaseAPI: *baseAPI, - - accessMachine: accessMachine, - }, nil -} - -// AllMachinePorts returns all opened port ranges for each given -// machine (on all networks). -func (u *UniterAPIV1) AllMachinePorts(args params.Entities) (params.MachinePortsResults, error) { - result := params.MachinePortsResults{ - Results: make([]params.MachinePortsResult, len(args.Entities)), - } - canAccess, err := u.accessMachine() - if err != nil { - return params.MachinePortsResults{}, err - } - for i, entity := range args.Entities { - result.Results[i] = u.getOneMachinePorts(canAccess, entity.Tag) - } - return result, nil -} - -// ServiceOwner returns the owner user for each given service tag. -func (u *UniterAPIV1) ServiceOwner(args params.Entities) (params.StringResults, error) { - result := params.StringResults{ - Results: make([]params.StringResult, len(args.Entities)), - } - canAccess, err := u.accessService() - if err != nil { - return params.StringResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseServiceTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - if !canAccess(tag) { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - service, err := u.getService(tag) - if err != nil { - result.Results[i].Error = common.ServerError(err) - continue - } - result.Results[i].Result = service.GetOwnerTag() - } - return result, nil -} - -// AssignedMachine returns the machine tag for each given unit tag, or -// an error satisfying params.IsCodeNotAssigned when a unit has no -// assigned machine. -func (u *UniterAPIV1) AssignedMachine(args params.Entities) (params.StringResults, error) { - result := params.StringResults{ - Results: make([]params.StringResult, len(args.Entities)), - } - canAccess, err := u.accessUnit() - if err != nil { - return params.StringResults{}, err - } - for i, entity := range args.Entities { - tag, err := names.ParseUnitTag(entity.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - if !canAccess(tag) { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - unit, err := u.getUnit(tag) - if err != nil { - result.Results[i].Error = common.ServerError(err) - continue - } - machineId, err := unit.AssignedMachineId() - if err != nil { - result.Results[i].Error = common.ServerError(err) - } else { - result.Results[i].Result = names.NewMachineTag(machineId).String() - } - } - return result, nil -} - -func (u *UniterAPIV1) getMachine(tag names.MachineTag) (*state.Machine, error) { - return u.st.Machine(tag.Id()) -} - -func (u *UniterAPIV1) getOneMachinePorts(canAccess common.AuthFunc, machineTag string) params.MachinePortsResult { - tag, err := names.ParseMachineTag(machineTag) - if err != nil { - return params.MachinePortsResult{Error: common.ServerError(common.ErrPerm)} - } - if !canAccess(tag) { - return params.MachinePortsResult{Error: common.ServerError(common.ErrPerm)} - } - machine, err := u.getMachine(tag) - if err != nil { - return params.MachinePortsResult{Error: common.ServerError(err)} - } - allPorts, err := machine.AllPorts() - if err != nil { - return params.MachinePortsResult{Error: common.ServerError(err)} - } - var resultPorts []params.MachinePortRange - for _, ports := range allPorts { - // AllPortRanges gives a map, but apis require a stable order - // for results, so sort the port ranges. - portRangesToUnits := ports.AllPortRanges() - portRanges := make([]network.PortRange, 0, len(portRangesToUnits)) - for portRange := range portRangesToUnits { - portRanges = append(portRanges, portRange) - } - network.SortPortRanges(portRanges) - for _, portRange := range portRanges { - unitName := portRangesToUnits[portRange] - resultPorts = append(resultPorts, params.MachinePortRange{ - UnitTag: names.NewUnitTag(unitName).String(), - PortRange: params.FromNetworkPortRange(portRange), - }) - } - } - return params.MachinePortsResult{ - Ports: resultPorts, - } -} === removed file 'src/github.com/juju/juju/apiserver/uniter/uniter_v1_test.go' --- src/github.com/juju/juju/apiserver/uniter/uniter_v1_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/uniter/uniter_v1_test.go 1970-01-01 00:00:00 +0000 @@ -1,446 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package uniter_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/common" - commontesting "github.com/juju/juju/apiserver/common/testing" - "github.com/juju/juju/apiserver/params" - apiservertesting "github.com/juju/juju/apiserver/testing" - "github.com/juju/juju/apiserver/uniter" - "github.com/juju/juju/state" -) - -type uniterV1Suite struct { - uniterBaseSuite - *commontesting.EnvironWatcherTest - - uniter *uniter.UniterAPIV1 - meteredUniter *uniter.UniterAPIV1 -} - -var _ = gc.Suite(&uniterV1Suite{}) - -func (s *uniterV1Suite) SetUpTest(c *gc.C) { - s.uniterBaseSuite.setUpTest(c) - - uniterAPIV1, err := uniter.NewUniterAPIV1( - s.State, - s.resources, - s.authorizer, - ) - c.Assert(err, jc.ErrorIsNil) - s.uniter = uniterAPIV1 - - meteredAuthorizer := apiservertesting.FakeAuthorizer{ - Tag: s.meteredUnit.Tag(), - } - s.meteredUniter, err = uniter.NewUniterAPIV1( - s.State, - s.resources, - meteredAuthorizer, - ) - c.Assert(err, jc.ErrorIsNil) - - s.EnvironWatcherTest = commontesting.NewEnvironWatcherTest( - s.uniter, - s.State, - s.resources, - commontesting.NoSecrets, - ) -} - -func (s *uniterV1Suite) TestUniterFailsWithNonUnitAgentUser(c *gc.C) { - factory := func(st *state.State, res *common.Resources, auth common.Authorizer) error { - _, err := uniter.NewUniterAPIV1(st, res, auth) - return err - } - s.testUniterFailsWithNonUnitAgentUser(c, factory) -} - -func (s *uniterV1Suite) TestSetStatus(c *gc.C) { - s.testSetStatus(c, s.uniter) -} - -func (s *uniterV1Suite) TestLife(c *gc.C) { - s.testLife(c, s.uniter) -} - -func (s *uniterV1Suite) TestEnsureDead(c *gc.C) { - s.testEnsureDead(c, s.uniter) -} - -func (s *uniterV1Suite) TestWatch(c *gc.C) { - s.testWatch(c, s.uniter) -} - -func (s *uniterV1Suite) TestPublicAddress(c *gc.C) { - s.testPublicAddress(c, s.uniter) -} - -func (s *uniterV1Suite) TestAvailabilityZone(c *gc.C) { - s.testAvailabilityZone(c, s.uniter) -} - -func (s *uniterV1Suite) TestPrivateAddress(c *gc.C) { - s.testPrivateAddress(c, s.uniter) -} - -func (s *uniterV1Suite) TestResolved(c *gc.C) { - s.testResolved(c, s.uniter) -} - -func (s *uniterV1Suite) TestClearResolved(c *gc.C) { - s.testClearResolved(c, s.uniter) -} - -func (s *uniterV1Suite) TestGetPrincipal(c *gc.C) { - factory := func( - st *state.State, - resources *common.Resources, - authorizer common.Authorizer, - ) (getPrincipal, error) { - return uniter.NewUniterAPIV1(st, resources, authorizer) - } - s.testGetPrincipal(c, s.uniter, factory) -} - -func (s *uniterV1Suite) TestHasSubordinates(c *gc.C) { - s.testHasSubordinates(c, s.uniter) -} - -func (s *uniterV1Suite) TestDestroy(c *gc.C) { - s.testDestroy(c, s.uniter) -} - -func (s *uniterV1Suite) TestDestroyAllSubordinates(c *gc.C) { - s.testDestroyAllSubordinates(c, s.uniter) -} - -func (s *uniterV1Suite) TestCharmURL(c *gc.C) { - s.testCharmURL(c, s.uniter) -} - -func (s *uniterV1Suite) TestSetCharmURL(c *gc.C) { - s.testSetCharmURL(c, s.uniter) -} - -func (s *uniterV1Suite) TestOpenPorts(c *gc.C) { - s.testOpenPorts(c, s.uniter) -} - -func (s *uniterV1Suite) TestClosePorts(c *gc.C) { - s.testClosePorts(c, s.uniter) -} - -func (s *uniterV1Suite) TestOpenPort(c *gc.C) { - s.testOpenPort(c, s.uniter) -} - -func (s *uniterV1Suite) TestClosePort(c *gc.C) { - s.testClosePort(c, s.uniter) -} - -func (s *uniterV1Suite) TestWatchConfigSettings(c *gc.C) { - s.testWatchConfigSettings(c, s.uniter) -} - -func (s *uniterV1Suite) TestWatchActionNotifications(c *gc.C) { - s.testWatchActionNotifications(c, s.uniter) -} - -func (s *uniterV1Suite) TestWatchPreexistingActions(c *gc.C) { - s.testWatchPreexistingActions(c, s.uniter) -} - -func (s *uniterV1Suite) TestWatchActionNotificationsMalformedTag(c *gc.C) { - s.testWatchActionNotificationsMalformedTag(c, s.uniter) -} - -func (s *uniterV1Suite) TestWatchActionNotificationsMalformedUnitName(c *gc.C) { - s.testWatchActionNotificationsMalformedUnitName(c, s.uniter) -} - -func (s *uniterV1Suite) TestWatchActionNotificationsNotUnit(c *gc.C) { - s.testWatchActionNotificationsNotUnit(c, s.uniter) -} - -func (s *uniterV1Suite) TestWatchActionNotificationsPermissionDenied(c *gc.C) { - s.testWatchActionNotificationsPermissionDenied(c, s.uniter) -} - -func (s *uniterV1Suite) TestConfigSettings(c *gc.C) { - s.testConfigSettings(c, s.uniter) -} - -func (s *uniterV1Suite) TestWatchServiceRelations(c *gc.C) { - s.testWatchServiceRelations(c, s.uniter) -} - -func (s *uniterV1Suite) TestCharmArchiveSha256(c *gc.C) { - s.testCharmArchiveSha256(c, s.uniter) -} - -func (s *uniterV1Suite) TestCharmArchiveURLs(c *gc.C) { - s.testCharmArchiveURLs(c, s.uniter) -} - -func (s *uniterV1Suite) TestCurrentEnvironUUID(c *gc.C) { - s.testCurrentEnvironUUID(c, s.uniter) -} - -func (s *uniterV1Suite) TestCurrentEnvironment(c *gc.C) { - s.testCurrentEnvironment(c, s.uniter) -} - -func (s *uniterV1Suite) TestActions(c *gc.C) { - s.testActions(c, s.uniter) -} - -func (s *uniterV1Suite) TestActionsNotPresent(c *gc.C) { - s.testActionsNotPresent(c, s.uniter) -} - -func (s *uniterV1Suite) TestActionsWrongUnit(c *gc.C) { - factory := func( - st *state.State, - resources *common.Resources, - authorizer common.Authorizer, - ) (actions, error) { - return uniter.NewUniterAPIV1(st, resources, authorizer) - } - s.testActionsWrongUnit(c, factory) -} - -func (s *uniterV1Suite) TestActionsPermissionDenied(c *gc.C) { - s.testActionsPermissionDenied(c, s.uniter) -} - -func (s *uniterV1Suite) TestFinishActionsSuccess(c *gc.C) { - s.testFinishActionsSuccess(c, s.uniter) -} - -func (s *uniterV1Suite) TestFinishActionsFailure(c *gc.C) { - s.testFinishActionsFailure(c, s.uniter) -} - -func (s *uniterV1Suite) TestFinishActionsAuthAccess(c *gc.C) { - s.testFinishActionsAuthAccess(c, s.uniter) -} - -func (s *uniterV1Suite) TestBeginActions(c *gc.C) { - s.testBeginActions(c, s.uniter) -} - -func (s *uniterV1Suite) TestRelation(c *gc.C) { - s.testRelation(c, s.uniter) -} - -func (s *uniterV1Suite) TestRelationById(c *gc.C) { - s.testRelationById(c, s.uniter) -} - -func (s *uniterV1Suite) TestProviderType(c *gc.C) { - s.testProviderType(c, s.uniter) -} - -func (s *uniterV1Suite) TestEnterScope(c *gc.C) { - s.testEnterScope(c, s.uniter) -} - -func (s *uniterV1Suite) TestLeaveScope(c *gc.C) { - s.testLeaveScope(c, s.uniter) -} - -func (s *uniterV1Suite) TestJoinedRelations(c *gc.C) { - s.testJoinedRelations(c, s.uniter) -} - -func (s *uniterV1Suite) TestReadSettings(c *gc.C) { - s.testReadSettings(c, s.uniter) -} - -func (s *uniterV1Suite) TestReadSettingsWithNonStringValuesFails(c *gc.C) { - s.testReadSettingsWithNonStringValuesFails(c, s.uniter) -} - -func (s *uniterV1Suite) TestReadRemoteSettings(c *gc.C) { - s.testReadRemoteSettings(c, s.uniter) -} - -func (s *uniterV1Suite) TestReadRemoteSettingsWithNonStringValuesFails(c *gc.C) { - s.testReadRemoteSettingsWithNonStringValuesFails(c, s.uniter) -} - -func (s *uniterV1Suite) TestUpdateSettings(c *gc.C) { - s.testUpdateSettings(c, s.uniter) -} - -func (s *uniterV1Suite) TestWatchRelationUnits(c *gc.C) { - s.testWatchRelationUnits(c, s.uniter) -} - -func (s *uniterV1Suite) TestAPIAddresses(c *gc.C) { - s.testAPIAddresses(c, s.uniter) -} - -func (s *uniterV1Suite) TestWatchUnitAddresses(c *gc.C) { - s.testWatchUnitAddresses(c, s.uniter) -} - -func (s *uniterV1Suite) TestGetMeterStatus(c *gc.C) { - s.testGetMeterStatus(c, s.uniter) -} - -func (s *uniterV1Suite) TestGetMeterStatusUnauthenticated(c *gc.C) { - s.testGetMeterStatusUnauthenticated(c, s.uniter) -} - -func (s *uniterV1Suite) TestGetMeterStatusBadTag(c *gc.C) { - s.testGetMeterStatusBadTag(c, s.uniter) -} - -func (s *uniterV1Suite) TestWatchMeterStatus(c *gc.C) { - s.testWatchMeterStatus(c, s.uniter) -} - -func (s *uniterV1Suite) TestGetOwnerTagV1NotImplemented(c *gc.C) { - apiservertesting.AssertNotImplemented(c, s.uniter, "GetOwnerTag") -} - -func (s *uniterV1Suite) TestServiceOwner(c *gc.C) { - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "service-wordpress"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - {Tag: "machine-0"}, - {Tag: "service-foo"}, - }} - result, err := s.uniter.ServiceOwner(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, jc.DeepEquals, params.StringResults{ - Results: []params.StringResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Result: s.AdminUserTag(c).String()}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *uniterV1Suite) TestAssignedMachine(c *gc.C) { - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - {Tag: "service-mysql"}, - {Tag: "service-wordpress"}, - {Tag: "machine-0"}, - {Tag: "machine-1"}, - {Tag: "machine-42"}, - {Tag: "service-foo"}, - {Tag: "relation-svc1.rel1#svc2.rel2"}, - }} - result, err := s.uniter.AssignedMachine(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, jc.DeepEquals, params.StringResults{ - Results: []params.StringResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Result: "machine-0"}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *uniterV1Suite) TestAllMachinePorts(c *gc.C) { - // Verify no ports are opened yet on the machine or unit. - machinePorts, err := s.machine0.AllPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(machinePorts, gc.HasLen, 0) - unitPorts, err := s.wordpressUnit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(unitPorts, gc.HasLen, 0) - - // Add another mysql unit on machine 0. - mysqlUnit1, err := s.mysql.AddUnit() - c.Assert(err, jc.ErrorIsNil) - err = mysqlUnit1.AssignToMachine(s.machine0) - c.Assert(err, jc.ErrorIsNil) - - // Open some ports on both units. - err = s.wordpressUnit.OpenPorts("tcp", 100, 200) - c.Assert(err, jc.ErrorIsNil) - err = s.wordpressUnit.OpenPorts("udp", 10, 20) - c.Assert(err, jc.ErrorIsNil) - err = mysqlUnit1.OpenPorts("tcp", 201, 250) - c.Assert(err, jc.ErrorIsNil) - err = mysqlUnit1.OpenPorts("udp", 1, 8) - c.Assert(err, jc.ErrorIsNil) - - args := params.Entities{Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "machine-0"}, - {Tag: "machine-1"}, - {Tag: "unit-foo-42"}, - {Tag: "machine-42"}, - {Tag: "service-wordpress"}, - }} - expectPorts := []params.MachinePortRange{ - {UnitTag: "unit-wordpress-0", PortRange: params.PortRange{100, 200, "tcp"}}, - {UnitTag: "unit-mysql-1", PortRange: params.PortRange{201, 250, "tcp"}}, - {UnitTag: "unit-mysql-1", PortRange: params.PortRange{1, 8, "udp"}}, - {UnitTag: "unit-wordpress-0", PortRange: params.PortRange{10, 20, "udp"}}, - } - result, err := s.uniter.AllMachinePorts(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, gc.DeepEquals, params.MachinePortsResults{ - Results: []params.MachinePortsResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Ports: expectPorts}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }, - }) -} - -func (s *uniterV1Suite) TestRequestReboot(c *gc.C) { - args := params.Entities{Entities: []params.Entity{ - {Tag: s.machine0.Tag().String()}, - {Tag: s.machine1.Tag().String()}, - {Tag: "bogus"}, - {Tag: "nasty-tag"}, - }} - errResult, err := s.uniter.RequestReboot(args) - c.Assert(err, jc.ErrorIsNil) - c.Assert(errResult, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{ - {Error: nil}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - }}) - - rFlag, err := s.machine0.GetRebootFlag() - c.Assert(err, jc.ErrorIsNil) - c.Assert(rFlag, jc.IsTrue) - - rFlag, err = s.machine1.GetRebootFlag() - c.Assert(err, jc.ErrorIsNil) - c.Assert(rFlag, jc.IsFalse) -} === removed file 'src/github.com/juju/juju/apiserver/uniter/uniter_v2.go' --- src/github.com/juju/juju/apiserver/uniter/uniter_v2.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/uniter/uniter_v2.go 1970-01-01 00:00:00 +0000 @@ -1,78 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// The uniter package implements the API interface used by the uniter -// worker. This file contains the API facade version 2. - -package uniter - -import ( - "github.com/juju/loggo" - "github.com/juju/names" - - "github.com/juju/juju/apiserver/common" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/state" -) - -var logger = loggo.GetLogger("juju.apiserver.uniter") - -func init() { - common.RegisterStandardFacade("Uniter", 2, NewUniterAPIV2) -} - -// UniterAPI implements the API version 2, used by the uniter worker. -type UniterAPIV2 struct { - UniterAPIV1 - StorageAPI -} - -// AddMetricBatches adds the metrics for the specified unit. -func (u *UniterAPIV2) AddMetricBatches(args params.MetricBatchParams) (params.ErrorResults, error) { - result := params.ErrorResults{ - Results: make([]params.ErrorResult, len(args.Batches)), - } - canAccess, err := u.accessUnit() - if err != nil { - logger.Warningf("failed to check unit access: %v", err) - return params.ErrorResults{}, common.ErrPerm - } - for i, batch := range args.Batches { - tag, err := names.ParseUnitTag(batch.Tag) - if err != nil { - result.Results[i].Error = common.ServerError(err) - continue - } - if !canAccess(tag) { - result.Results[i].Error = common.ServerError(common.ErrPerm) - continue - } - metrics := make([]state.Metric, len(batch.Batch.Metrics)) - for j, metric := range batch.Batch.Metrics { - metrics[j] = state.Metric{ - Key: metric.Key, - Value: metric.Value, - Time: metric.Time, - } - } - _, err = u.unit.AddMetrics(batch.Batch.UUID, batch.Batch.Created, batch.Batch.CharmURL, metrics) - result.Results[i].Error = common.ServerError(err) - } - return result, nil -} - -// NewUniterAPIV2 creates a new instance of the Uniter API, version 2. -func NewUniterAPIV2(st *state.State, resources *common.Resources, authorizer common.Authorizer) (*UniterAPIV2, error) { - baseAPI, err := NewUniterAPIV1(st, resources, authorizer) - if err != nil { - return nil, err - } - storageAPI, err := newStorageAPI(getStorageState(st), resources, baseAPI.accessUnit) - if err != nil { - return nil, err - } - return &UniterAPIV2{ - UniterAPIV1: *baseAPI, - StorageAPI: *storageAPI, - }, nil -} === removed file 'src/github.com/juju/juju/apiserver/uniter/uniter_v2_test.go' --- src/github.com/juju/juju/apiserver/uniter/uniter_v2_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/uniter/uniter_v2_test.go 1970-01-01 00:00:00 +0000 @@ -1,276 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package uniter_test - -import ( - "time" - - "github.com/juju/errors" - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/params" - apiservertesting "github.com/juju/juju/apiserver/testing" - "github.com/juju/juju/apiserver/uniter" - "github.com/juju/juju/state" - "github.com/juju/juju/testing/factory" -) - -//TODO run all common V0 and V1 tests. -type uniterV2Suite struct { - uniterBaseSuite - uniter *uniter.UniterAPIV2 -} - -var _ = gc.Suite(&uniterV2Suite{}) - -func (s *uniterV2Suite) SetUpTest(c *gc.C) { - s.uniterBaseSuite.setUpTest(c) - - uniterAPIV2, err := uniter.NewUniterAPIV2( - s.State, - s.resources, - s.authorizer, - ) - c.Assert(err, jc.ErrorIsNil) - s.uniter = uniterAPIV2 -} - -func (s *uniterV2Suite) TestStorageAttachments(c *gc.C) { - // We need to set up a unit that has storage metadata defined. - ch := s.AddTestingCharm(c, "storage-block") - sCons := map[string]state.StorageConstraints{ - "data": {Pool: "", Size: 1024, Count: 1}, - } - service := s.AddTestingServiceWithStorage(c, "storage-block", ch, sCons) - unit, err := service.AddUnit() - c.Assert(err, jc.ErrorIsNil) - err = s.State.AssignUnit(unit, state.AssignCleanEmpty) - c.Assert(err, jc.ErrorIsNil) - assignedMachineId, err := unit.AssignedMachineId() - c.Assert(err, jc.ErrorIsNil) - machine, err := s.State.Machine(assignedMachineId) - c.Assert(err, jc.ErrorIsNil) - - volumeAttachments, err := machine.VolumeAttachments() - c.Assert(err, jc.ErrorIsNil) - c.Assert(volumeAttachments, gc.HasLen, 1) - - err = machine.SetProvisioned("inst-id", "fake_nonce", nil) - c.Assert(err, jc.ErrorIsNil) - - err = s.State.SetVolumeInfo( - volumeAttachments[0].Volume(), - state.VolumeInfo{VolumeId: "vol-123", Size: 456}, - ) - c.Assert(err, jc.ErrorIsNil) - - err = s.State.SetVolumeAttachmentInfo( - machine.MachineTag(), - volumeAttachments[0].Volume(), - state.VolumeAttachmentInfo{DeviceName: "xvdf1"}, - ) - c.Assert(err, jc.ErrorIsNil) - - password, err := utils.RandomPassword() - err = unit.SetPassword(password) - c.Assert(err, jc.ErrorIsNil) - st := s.OpenAPIAs(c, unit.Tag(), password) - uniter, err := st.Uniter() - c.Assert(err, jc.ErrorIsNil) - - attachments, err := uniter.UnitStorageAttachments(unit.UnitTag()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(attachments, gc.DeepEquals, []params.StorageAttachmentId{{ - StorageTag: "storage-data-0", - UnitTag: unit.Tag().String(), - }}) -} - -// TestSetStatus tests backwards compatibility for -// set status has been properly implemented. -func (s *uniterV2Suite) TestSetStatus(c *gc.C) { - s.testSetStatus(c, s.uniter) -} - -// TestSetAgentStatus tests agent part of set status -// implemented for this version. -func (s *uniterV2Suite) TestSetAgentStatus(c *gc.C) { - s.testSetAgentStatus(c, s.uniter) -} - -// TestSetUnitStatus tests unit part of set status -// implemented for this version. -func (s *uniterV2Suite) TestSetUnitStatus(c *gc.C) { - s.testSetUnitStatus(c, s.uniter) -} - -func (s *uniterV2Suite) TestUnitStatus(c *gc.C) { - err := s.wordpressUnit.SetStatus(state.StatusMaintenance, "blah", nil) - c.Assert(err, jc.ErrorIsNil) - err = s.mysqlUnit.SetStatus(state.StatusTerminated, "foo", nil) - c.Assert(err, jc.ErrorIsNil) - - args := params.Entities{ - Entities: []params.Entity{ - {Tag: "unit-mysql-0"}, - {Tag: "unit-wordpress-0"}, - {Tag: "unit-foo-42"}, - {Tag: "machine-1"}, - {Tag: "invalid"}, - }} - result, err := s.uniter.UnitStatus(args) - c.Assert(err, jc.ErrorIsNil) - // Zero out the updated timestamps so we can easily check the results. - for i, statusResult := range result.Results { - r := statusResult - if r.Status != "" { - c.Assert(r.Since, gc.NotNil) - } - r.Since = nil - result.Results[i] = r - } - c.Assert(result, gc.DeepEquals, params.StatusResults{ - Results: []params.StatusResult{ - {Error: apiservertesting.ErrUnauthorized}, - {Status: params.StatusMaintenance, Info: "blah", Data: map[string]interface{}{}}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ErrUnauthorized}, - {Error: apiservertesting.ServerError(`"invalid" is not a valid tag`)}, - }, - }) -} - -type unitMetricBatchesSuite struct { - uniterBaseSuite - uniter *uniter.UniterAPIV2 -} - -var _ = gc.Suite(&unitMetricBatchesSuite{}) - -func (s *unitMetricBatchesSuite) SetUpTest(c *gc.C) { - s.uniterBaseSuite.setUpTest(c) - - meteredAuthorizer := apiservertesting.FakeAuthorizer{ - Tag: s.meteredUnit.Tag(), - } - var err error - s.uniter, err = uniter.NewUniterAPIV2( - s.State, - s.resources, - meteredAuthorizer, - ) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *unitMetricBatchesSuite) TestAddMetricsBatch(c *gc.C) { - metrics := []params.Metric{{Key: "pings", Value: "5", Time: time.Now().UTC()}} - uuid := utils.MustNewUUID().String() - - result, err := s.uniter.AddMetricBatches(params.MetricBatchParams{ - Batches: []params.MetricBatchParam{{ - Tag: s.meteredUnit.Tag().String(), - Batch: params.MetricBatch{ - UUID: uuid, - CharmURL: s.meteredCharm.URL().String(), - Created: time.Now(), - Metrics: metrics, - }}}}, - ) - - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{{nil}}, - }) - c.Assert(err, jc.ErrorIsNil) - - batch, err := s.State.MetricBatch(uuid) - c.Assert(err, jc.ErrorIsNil) - c.Assert(batch.UUID(), gc.Equals, uuid) - c.Assert(batch.CharmURL(), gc.Equals, s.meteredCharm.URL().String()) - c.Assert(batch.Unit(), gc.Equals, s.meteredUnit.Name()) - storedMetrics := batch.Metrics() - c.Assert(storedMetrics, gc.HasLen, 1) - c.Assert(storedMetrics[0].Key, gc.Equals, metrics[0].Key) - c.Assert(storedMetrics[0].Value, gc.Equals, metrics[0].Value) -} - -func (s *unitMetricBatchesSuite) TestAddMetricsBatchNoCharmURL(c *gc.C) { - metrics := []params.Metric{{Key: "pings", Value: "5", Time: time.Now().UTC()}} - uuid := utils.MustNewUUID().String() - - result, err := s.uniter.AddMetricBatches(params.MetricBatchParams{ - Batches: []params.MetricBatchParam{{ - Tag: s.meteredUnit.Tag().String(), - Batch: params.MetricBatch{ - UUID: uuid, - CharmURL: s.meteredCharm.URL().String(), - Created: time.Now(), - Metrics: metrics, - }}}}) - - c.Assert(result, gc.DeepEquals, params.ErrorResults{ - Results: []params.ErrorResult{{nil}}, - }) - c.Assert(err, jc.ErrorIsNil) - - batch, err := s.State.MetricBatch(uuid) - c.Assert(err, jc.ErrorIsNil) - c.Assert(batch.UUID(), gc.Equals, uuid) - c.Assert(batch.CharmURL(), gc.Equals, s.meteredCharm.URL().String()) - c.Assert(batch.Unit(), gc.Equals, s.meteredUnit.Name()) - storedMetrics := batch.Metrics() - c.Assert(storedMetrics, gc.HasLen, 1) - c.Assert(storedMetrics[0].Key, gc.Equals, metrics[0].Key) - c.Assert(storedMetrics[0].Value, gc.Equals, metrics[0].Value) -} - -func (s *unitMetricBatchesSuite) TestAddMetricsBatchDiffTag(c *gc.C) { - unit2 := s.Factory.MakeUnit(c, &factory.UnitParams{Service: s.meteredService, SetCharmURL: true}) - - metrics := []params.Metric{{Key: "pings", Value: "5", Time: time.Now().UTC()}} - uuid := utils.MustNewUUID().String() - - tests := []struct { - about string - tag string - expect string - }{{ - about: "different unit", - tag: unit2.Tag().String(), - expect: "permission denied", - }, { - about: "user tag", - tag: names.NewLocalUserTag("admin").String(), - expect: `"user-admin@local" is not a valid unit tag`, - }, { - about: "machine tag", - tag: names.NewMachineTag("0").String(), - expect: `"machine-0" is not a valid unit tag`, - }} - - for i, test := range tests { - c.Logf("test %d: %s", i, test.about) - result, err := s.uniter.AddMetricBatches(params.MetricBatchParams{ - Batches: []params.MetricBatchParam{{ - Tag: test.tag, - Batch: params.MetricBatch{ - UUID: uuid, - CharmURL: "", - Created: time.Now(), - Metrics: metrics, - }}}}) - - if test.expect == "" { - c.Assert(result.OneError(), jc.ErrorIsNil) - } else { - c.Assert(result.OneError(), gc.ErrorMatches, test.expect) - } - c.Assert(err, jc.ErrorIsNil) - - _, err = s.State.MetricBatch(uuid) - c.Assert(err, jc.Satisfies, errors.IsNotFound) - } -} === modified file 'src/github.com/juju/juju/apiserver/upgrader/unitupgrader_test.go' --- src/github.com/juju/juju/apiserver/upgrader/unitupgrader_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/apiserver/upgrader/unitupgrader_test.go 2016-03-22 15:18:22 +0000 @@ -7,6 +7,8 @@ "github.com/juju/errors" "github.com/juju/names" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/arch" + "github.com/juju/utils/series" gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/common" @@ -37,6 +39,12 @@ var _ = gc.Suite(&unitUpgraderSuite{}) +var current = version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), +} + func (s *unitUpgraderSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) s.resources = common.NewResources() @@ -153,7 +161,7 @@ // The machine must have its existing tools set before we query for the // next tools. This is so that we can grab Arch and Series without // having to pass it in again - err := s.rawMachine.SetAgentVersion(version.Current) + err := s.rawMachine.SetAgentVersion(current) c.Assert(err, jc.ErrorIsNil) args := params.Entities{Entities: []params.Entity{agent}} @@ -163,7 +171,7 @@ c.Check(results.Results, gc.HasLen, 1) c.Assert(results.Results[0].Error, gc.IsNil) agentTools := results.Results[0].Tools - c.Check(agentTools.Version.Number, gc.DeepEquals, version.Current.Number) + c.Check(agentTools.Version.Number, gc.DeepEquals, version.Current) c.Assert(agentTools.URL, gc.NotNil) } assertTools() @@ -185,7 +193,11 @@ AgentTools: []params.EntityVersion{{ Tag: s.rawUnit.Tag().String(), Tools: ¶ms.Version{ - Version: version.Current, + Version: version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + }, }, }}, } @@ -196,7 +208,11 @@ } func (s *unitUpgraderSuite) TestSetTools(c *gc.C) { - cur := version.Current + cur := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } _, err := s.rawUnit.AgentTools() c.Assert(err, jc.Satisfies, errors.IsNotFound) args := params.EntitiesVersion{ @@ -250,7 +266,12 @@ } func (s *unitUpgraderSuite) TestDesiredVersionNoticesMixedAgents(c *gc.C) { - err := s.rawMachine.SetAgentVersion(version.Current) + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + err := s.rawMachine.SetAgentVersion(current) c.Assert(err, jc.ErrorIsNil) args := params.Entities{Entities: []params.Entity{ {Tag: s.rawUnit.Tag().String()}, @@ -262,7 +283,7 @@ c.Assert(results.Results[0].Error, gc.IsNil) agentVersion := results.Results[0].Version c.Assert(agentVersion, gc.NotNil) - c.Check(*agentVersion, gc.DeepEquals, version.Current.Number) + c.Check(*agentVersion, gc.DeepEquals, version.Current) c.Assert(results.Results[1].Error, gc.DeepEquals, apiservertesting.ErrUnauthorized) c.Assert(results.Results[1].Version, gc.IsNil) @@ -270,7 +291,12 @@ } func (s *unitUpgraderSuite) TestDesiredVersionForAgent(c *gc.C) { - err := s.rawMachine.SetAgentVersion(version.Current) + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + err := s.rawMachine.SetAgentVersion(current) c.Assert(err, jc.ErrorIsNil) args := params.Entities{Entities: []params.Entity{{Tag: s.rawUnit.Tag().String()}}} results, err := s.upgrader.DesiredVersion(args) @@ -279,5 +305,5 @@ c.Assert(results.Results[0].Error, gc.IsNil) agentVersion := results.Results[0].Version c.Assert(agentVersion, gc.NotNil) - c.Check(*agentVersion, gc.DeepEquals, version.Current.Number) + c.Check(*agentVersion, gc.DeepEquals, version.Current) } === modified file 'src/github.com/juju/juju/apiserver/upgrader/upgrader.go' --- src/github.com/juju/juju/apiserver/upgrader/upgrader.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/upgrader/upgrader.go 2016-03-22 15:18:22 +0000 @@ -19,7 +19,7 @@ var logger = loggo.GetLogger("juju.apiserver.upgrader") func init() { - common.RegisterStandardFacade("Upgrader", 0, upgraderFacade) + common.RegisterStandardFacade("Upgrader", 1, upgraderFacade) } // upgraderFacade is a bit unique vs the other API Facades, as it has two @@ -76,7 +76,7 @@ getCanReadWrite := func() (common.AuthFunc, error) { return authorizer.AuthOwner, nil } - env, err := st.Environment() + env, err := st.Model() if err != nil { return nil, err } @@ -103,7 +103,7 @@ } err = common.ErrPerm if u.authorizer.AuthOwner(tag) { - watch := u.st.WatchForEnvironConfigChanges() + watch := u.st.WatchForModelConfigChanges() // Consume the initial event. Technically, API // calls to Watch 'transmit' the initial event // in the Watch response. But NotifyWatchers @@ -122,13 +122,13 @@ func (u *UpgraderAPI) getGlobalAgentVersion() (version.Number, *config.Config, error) { // Get the Agent Version requested in the Environment Config - cfg, err := u.st.EnvironConfig() + cfg, err := u.st.ModelConfig() if err != nil { return version.Number{}, nil, err } agentVersion, ok := cfg.AgentVersion() if !ok { - return version.Number{}, nil, errors.New("agent version not set in environment config") + return version.Number{}, nil, errors.New("agent version not set in model config") } return agentVersion, cfg, nil } @@ -160,7 +160,7 @@ return params.VersionResults{}, common.ServerError(err) } // Is the desired version greater than the current API server version? - isNewerVersion := agentVersion.Compare(version.Current.Number) > 0 + isNewerVersion := agentVersion.Compare(version.Current) > 0 for i, entity := range args.Entities { tag, err := names.ParseTag(entity.Tag) if err != nil { @@ -170,7 +170,7 @@ err = common.ErrPerm if u.authorizer.AuthOwner(tag) { // Only return the globally desired agent version if the - // asking entity is a machine agent with JobManageEnviron or + // asking entity is a machine agent with JobManageModel or // if this API server is running the globally desired agent // version. Otherwise report this API server's current // agent version. @@ -182,8 +182,8 @@ if !isNewerVersion || u.entityIsManager(tag) { results[i].Version = &agentVersion } else { - logger.Debugf("desired version is %s, but current version is %s and agent is not a manager node", agentVersion, version.Current.Number) - results[i].Version = &version.Current.Number + logger.Debugf("desired version is %s, but current version is %s and agent is not a manager node", agentVersion, version.Current) + results[i].Version = &version.Current } err = nil } === modified file 'src/github.com/juju/juju/apiserver/upgrader/upgrader_test.go' --- src/github.com/juju/juju/apiserver/upgrader/upgrader_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/upgrader/upgrader_test.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,8 @@ "github.com/juju/errors" "github.com/juju/names" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/arch" + "github.com/juju/utils/series" gc "gopkg.in/check.v1" "github.com/juju/juju/apiserver/common" @@ -44,9 +46,9 @@ // Create a machine to work with var err error // The first machine created is the only one allowed to - // JobManageEnviron + // JobManageModel s.apiMachine, err = s.State.AddMachine("quantal", state.JobHostUnits, - state.JobManageEnviron) + state.JobManageModel) c.Assert(err, jc.ErrorIsNil) s.rawMachine, err = s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) @@ -146,13 +148,17 @@ } func (s *upgraderSuite) TestToolsForAgent(c *gc.C) { - cur := version.Current + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } agent := params.Entity{Tag: s.rawMachine.Tag().String()} // The machine must have its existing tools set before we query for the // next tools. This is so that we can grab Arch and Series without // having to pass it in again - err := s.rawMachine.SetAgentVersion(version.Current) + err := s.rawMachine.SetAgentVersion(current) c.Assert(err, jc.ErrorIsNil) args := params.Entities{Entities: []params.Entity{agent}} @@ -162,10 +168,10 @@ c.Check(results.Results, gc.HasLen, 1) c.Assert(results.Results[0].Error, gc.IsNil) agentTools := results.Results[0].Tools - url := fmt.Sprintf("https://%s/environment/%s/tools/%s", - s.APIState.Addr(), coretesting.EnvironmentTag.Id(), version.Current) + url := fmt.Sprintf("https://%s/model/%s/tools/%s", + s.APIState.Addr(), coretesting.ModelTag.Id(), current) c.Check(agentTools.URL, gc.Equals, url) - c.Check(agentTools.Version, gc.DeepEquals, cur) + c.Check(agentTools.Version, gc.DeepEquals, current) } assertTools() } @@ -186,7 +192,11 @@ AgentTools: []params.EntityVersion{{ Tag: s.rawMachine.Tag().String(), Tools: ¶ms.Version{ - Version: version.Current, + Version: version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + }, }, }}, } @@ -197,14 +207,18 @@ } func (s *upgraderSuite) TestSetTools(c *gc.C) { - cur := version.Current + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } _, err := s.rawMachine.AgentTools() c.Assert(err, jc.Satisfies, errors.IsNotFound) args := params.EntitiesVersion{ AgentTools: []params.EntityVersion{{ Tag: s.rawMachine.Tag().String(), Tools: ¶ms.Version{ - Version: cur, + Version: current, }}, }, } @@ -218,12 +232,7 @@ c.Assert(err, jc.ErrorIsNil) realTools, err := s.rawMachine.AgentTools() c.Assert(err, jc.ErrorIsNil) - c.Check(realTools.Version.Arch, gc.Equals, cur.Arch) - c.Check(realTools.Version.Series, gc.Equals, cur.Series) - c.Check(realTools.Version.Major, gc.Equals, cur.Major) - c.Check(realTools.Version.Minor, gc.Equals, cur.Minor) - c.Check(realTools.Version.Patch, gc.Equals, cur.Patch) - c.Check(realTools.Version.Build, gc.Equals, cur.Build) + c.Check(realTools.Version, gc.Equals, current) c.Check(realTools.URL, gc.Equals, "") } @@ -261,7 +270,7 @@ c.Assert(results.Results[0].Error, gc.IsNil) agentVersion := results.Results[0].Version c.Assert(agentVersion, gc.NotNil) - c.Check(*agentVersion, gc.DeepEquals, version.Current.Number) + c.Check(*agentVersion, gc.DeepEquals, version.Current) c.Assert(results.Results[1].Error, gc.DeepEquals, apiservertesting.ErrUnauthorized) c.Assert(results.Results[1].Version, gc.IsNil) @@ -276,19 +285,24 @@ c.Assert(results.Results[0].Error, gc.IsNil) agentVersion := results.Results[0].Version c.Assert(agentVersion, gc.NotNil) - c.Check(*agentVersion, gc.DeepEquals, version.Current.Number) + c.Check(*agentVersion, gc.DeepEquals, version.Current) } func (s *upgraderSuite) bumpDesiredAgentVersion(c *gc.C) version.Number { - // In order to call SetEnvironAgentVersion we have to first SetTools on + // In order to call SetModelAgentVersion we have to first SetTools on // all the existing machines - s.apiMachine.SetAgentVersion(version.Current) - s.rawMachine.SetAgentVersion(version.Current) - newer := version.Current + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + s.apiMachine.SetAgentVersion(current) + s.rawMachine.SetAgentVersion(current) + newer := current newer.Patch++ - err := s.State.SetEnvironAgentVersion(newer.Number) + err := s.State.SetModelAgentVersion(newer.Number) c.Assert(err, jc.ErrorIsNil) - cfg, err := s.State.EnvironConfig() + cfg, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) vers, ok := cfg.AgentVersion() c.Assert(ok, jc.IsTrue) @@ -316,7 +330,7 @@ func (s *upgraderSuite) TestDesiredVersionRestrictedForNonAPIAgents(c *gc.C) { newVersion := s.bumpDesiredAgentVersion(c) - c.Assert(newVersion, gc.Not(gc.Equals), version.Current.Number) + c.Assert(newVersion, gc.Not(gc.Equals), version.Current) args := params.Entities{Entities: []params.Entity{{Tag: s.rawMachine.Tag().String()}}} results, err := s.upgrader.DesiredVersion(args) c.Assert(err, jc.ErrorIsNil) @@ -324,5 +338,5 @@ c.Assert(results.Results[0].Error, gc.IsNil) agentVersion := results.Results[0].Version c.Assert(agentVersion, gc.NotNil) - c.Check(*agentVersion, gc.DeepEquals, version.Current.Number) + c.Check(*agentVersion, gc.DeepEquals, version.Current) } === modified file 'src/github.com/juju/juju/apiserver/upgrading_root.go' --- src/github.com/juju/juju/apiserver/upgrading_root.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/upgrading_root.go 2016-03-22 15:18:22 +0000 @@ -27,10 +27,13 @@ // allowedMethodsDuringUpgrades stores api calls // that are not blocked during the upgrade process // as well as their respective facade names. +// When needed, at some future point, this solution +// will need to be adjusted to cater for different +// facade versions as well. var allowedMethodsDuringUpgrades = map[string]set.Strings{ "Client": set.NewStrings( "FullStatus", // for "juju status" - "EnvironmentGet", // for "juju ssh" + "ModelGet", // for "juju ssh" "PrivateAddress", // for "juju ssh" "PublicAddress", // for "juju ssh" "FindTools", // for "juju upgrade-juju", before we can reset upgrade to re-run === modified file 'src/github.com/juju/juju/apiserver/upgrading_root_test.go' --- src/github.com/juju/juju/apiserver/upgrading_root_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/apiserver/upgrading_root_test.go 2016-03-22 15:18:22 +0000 @@ -23,8 +23,8 @@ for facadeName, methods := range apiserver.AllowedMethodsDuringUpgrades { for _, method := range methods.Values() { // for now all of the api calls of interest, - // reside on version 0 of their respective facade. - caller, err := root.FindMethod(facadeName, 0, method) + // reside on version 1 of their respective facade. + caller, err := root.FindMethod(facadeName, 1, method) c.Check(err, jc.ErrorIsNil) c.Check(caller, gc.NotNil) } @@ -34,7 +34,7 @@ func (r *upgradingRootSuite) TestFindDisallowedMethod(c *gc.C) { root := apiserver.TestingUpgradingRoot(nil) - caller, err := root.FindMethod("Client", 0, "ServiceDeploy") + caller, err := root.FindMethod("Client", 1, "ModelSet") c.Assert(err, gc.ErrorMatches, "upgrade in progress - Juju functionality is limited") c.Assert(caller, gc.IsNil) === modified file 'src/github.com/juju/juju/apiserver/usermanager/usermanager.go' --- src/github.com/juju/juju/apiserver/usermanager/usermanager.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/usermanager/usermanager.go 2016-03-22 15:18:22 +0000 @@ -18,16 +18,7 @@ var logger = loggo.GetLogger("juju.apiserver.usermanager") func init() { - common.RegisterStandardFacade("UserManager", 0, NewUserManagerAPI) -} - -// UserManager defines the methods on the usermanager API end point. -type UserManager interface { - AddUser(args params.AddUsers) (params.AddUserResults, error) - DisableUser(args params.Entities) (params.ErrorResults, error) - EnableUser(args params.Entities) (params.ErrorResults, error) - SetPassword(args params.EntityPasswords) (params.ErrorResults, error) - UserInfo(args params.UserInfoRequest) (params.UserInfoResults, error) + common.RegisterStandardFacade("UserManager", 1, NewUserManagerAPI) } // UserManagerAPI implements the user manager interface and is the concrete @@ -38,8 +29,6 @@ check *common.BlockChecker } -var _ UserManager = (*UserManagerAPI)(nil) - func NewUserManagerAPI( st *state.State, resources *common.Resources, @@ -60,7 +49,7 @@ // TODO(thumper): PERMISSIONS Change this permission check when we have // real permissions. For now, only the owner of the initial environment is // able to add users. - initialEnv, err := api.state.StateServerEnvironment() + initialEnv, err := api.state.ControllerModel() if err != nil { return errors.Trace(err) } @@ -70,7 +59,8 @@ return nil } -// AddUser adds a user. +// AddUser adds a user with a username, and either a password or +// a randomly generated secret key which will be returned. func (api *UserManagerAPI) AddUser(args params.AddUsers) (params.AddUserResults, error) { result := params.AddUserResults{ Results: make([]params.AddUserResult, len(args.Users)), @@ -87,23 +77,72 @@ return result, errors.Wrap(err, common.ErrPerm) } // TODO(thumper): PERMISSIONS Change this permission check when we have - // real permissions. For now, only the owner of the initial environment is + // real permissions. For now, only the owner of the initial model is // able to add users. if err := api.permissionCheck(loggedInUser); err != nil { return result, errors.Trace(err) } for i, arg := range args.Users { - user, err := api.state.AddUser(arg.Username, arg.DisplayName, arg.Password, loggedInUser.Id()) + var user *state.User + var err error + if arg.Password != "" { + user, err = api.state.AddUser(arg.Username, arg.DisplayName, arg.Password, loggedInUser.Id()) + } else { + user, err = api.state.AddUserWithSecretKey(arg.Username, arg.DisplayName, loggedInUser.Id()) + } if err != nil { err = errors.Annotate(err, "failed to create user") result.Results[i].Error = common.ServerError(err) + continue } else { - result.Results[i].Tag = user.Tag().String() + result.Results[i] = params.AddUserResult{ + Tag: user.Tag().String(), + SecretKey: user.SecretKey(), + } + } + userTag := user.Tag().(names.UserTag) + for _, modelTagStr := range arg.SharedModelTags { + modelTag, err := names.ParseModelTag(modelTagStr) + if err != nil { + err = errors.Annotatef(err, "user %q created but model %q not shared", arg.Username, modelTagStr) + result.Results[i].Error = common.ServerError(err) + break + } + err = ShareModelAction(api.state, modelTag, loggedInUser, userTag, params.AddModelUser) + if err != nil { + err = errors.Annotatef(err, "user %q created but model %q not shared", arg.Username, modelTagStr) + result.Results[i].Error = common.ServerError(err) + break + } } } return result, nil } +type stateAccessor interface { + ForModel(tag names.ModelTag) (*state.State, error) +} + +// ShareModelAction performs the requested share action (add/remove) for the specified +// sharedWith user on the specified model. +func ShareModelAction(stateAccess stateAccessor, modelTag names.ModelTag, createdBy, sharedWith names.UserTag, action params.ModelAction) error { + st, err := stateAccess.ForModel(modelTag) + if err != nil { + return errors.Annotate(err, "could lookup model") + } + defer st.Close() + switch action { + case params.AddModelUser: + _, err = st.AddModelUser(state.ModelUserSpec{User: sharedWith, CreatedBy: createdBy}) + return errors.Annotate(err, "could not share model") + case params.RemoveModelUser: + err := st.RemoveModelUser(sharedWith) + return errors.Annotate(err, "could not unshare model") + default: + return errors.Errorf("unknown action %q", action) + } +} + func (api *UserManagerAPI) getUser(tag string) (*state.User, error) { userTag, err := names.ParseUserTag(tag) if err != nil { @@ -225,10 +264,9 @@ return errors.Trace(common.ErrPerm) } if arg.Password == "" { - return errors.New("can not use an empty password") + return errors.New("cannot use an empty password") } - err = user.SetPassword(arg.Password) - if err != nil { + if err := user.SetPassword(arg.Password); err != nil { return errors.Annotate(err, "failed to set password") } return nil === modified file 'src/github.com/juju/juju/apiserver/usermanager/usermanager_test.go' --- src/github.com/juju/juju/apiserver/usermanager/usermanager_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/usermanager/usermanager_test.go 2016-03-22 15:18:22 +0000 @@ -56,12 +56,43 @@ c.Assert(err, gc.ErrorMatches, "permission denied") } +func (s *userManagerSuite) assertAddUser(c *gc.C, sharedModelTags []string) { + sharedModelState := s.Factory.MakeModel(c, nil) + defer sharedModelState.Close() + + args := params.AddUsers{ + Users: []params.AddUser{{ + Username: "foobar", + DisplayName: "Foo Bar", + Password: "password", + SharedModelTags: sharedModelTags, + }}} + + result, err := s.usermanager.AddUser(args) + // Check that the call is successful + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Results, gc.HasLen, 1) + foobarTag := names.NewLocalUserTag("foobar") + c.Assert(result.Results[0], gc.DeepEquals, params.AddUserResult{ + Tag: foobarTag.String()}) + // Check that the call results in a new user being created + user, err := s.State.User(foobarTag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(user, gc.NotNil) + c.Assert(user.Name(), gc.Equals, "foobar") + c.Assert(user.DisplayName(), gc.Equals, "Foo Bar") +} + func (s *userManagerSuite) TestAddUser(c *gc.C) { + s.assertAddUser(c, nil) +} + +func (s *userManagerSuite) TestAddUserWithSecretKey(c *gc.C) { args := params.AddUsers{ Users: []params.AddUser{{ Username: "foobar", DisplayName: "Foo Bar", - Password: "password", + Password: "", // assign secret key }}} result, err := s.usermanager.AddUser(args) @@ -69,14 +100,43 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(result.Results, gc.HasLen, 1) foobarTag := names.NewLocalUserTag("foobar") - c.Assert(result.Results[0], gc.DeepEquals, params.AddUserResult{ - Tag: foobarTag.String()}) + // Check that the call results in a new user being created user, err := s.State.User(foobarTag) c.Assert(err, jc.ErrorIsNil) c.Assert(user, gc.NotNil) c.Assert(user.Name(), gc.Equals, "foobar") c.Assert(user.DisplayName(), gc.Equals, "Foo Bar") + c.Assert(user.SecretKey(), gc.NotNil) + c.Assert(user.PasswordValid(""), jc.IsFalse) + + // Check that the secret key returned by the API matches what + // is in state. + c.Assert(result.Results[0], gc.DeepEquals, params.AddUserResult{ + Tag: foobarTag.String(), + SecretKey: user.SecretKey(), + }) +} + +func (s *userManagerSuite) TestAddUserWithSharedModel(c *gc.C) { + sharedModelState := s.Factory.MakeModel(c, nil) + defer sharedModelState.Close() + + s.assertAddUser(c, []string{sharedModelState.ModelTag().String()}) + + // Check that the model has been shared. + sharedModel, err := sharedModelState.Model() + c.Assert(err, jc.ErrorIsNil) + users, err := sharedModel.Users() + c.Assert(err, jc.ErrorIsNil) + var modelUserTags = make([]names.UserTag, len(users)) + for i, u := range users { + modelUserTags[i] = u.UserTag() + } + c.Assert(modelUserTags, jc.SameContents, []names.UserTag{ + names.NewLocalUserTag("foobar"), + names.NewLocalUserTag("admin"), + }) } func (s *userManagerSuite) TestBlockAddUser(c *gc.C) { @@ -361,7 +421,7 @@ admin, err := s.State.User(s.AdminUserTag(c)) c.Assert(err, jc.ErrorIsNil) userFoo := s.Factory.MakeUser(c, &factory.UserParams{Name: "foobar", DisplayName: "Foo Bar"}) - userBar := s.Factory.MakeUser(c, &factory.UserParams{Name: "barfoo", DisplayName: "Bar Foo", Disabled: true}) + userAardvark := s.Factory.MakeUser(c, &factory.UserParams{Name: "aardvark", DisplayName: "Aard Vark", Disabled: true}) args := params.UserInfoRequest{IncludeDisabled: true} results, err := s.usermanager.UserInfo(args) @@ -370,28 +430,26 @@ for _, r := range []struct { user *state.User info *params.UserInfo - }{ - { - user: userBar, - info: ¶ms.UserInfo{ - Username: "barfoo", - DisplayName: "Bar Foo", - Disabled: true, - }, - }, { - user: admin, - info: ¶ms.UserInfo{ - Username: s.adminName, - DisplayName: admin.DisplayName(), - }, - }, { - user: userFoo, - info: ¶ms.UserInfo{ - Username: "foobar", - DisplayName: "Foo Bar", - }, - }, - } { + }{{ + user: userAardvark, + info: ¶ms.UserInfo{ + Username: "aardvark", + DisplayName: "Aard Vark", + Disabled: true, + }, + }, { + user: admin, + info: ¶ms.UserInfo{ + Username: s.adminName, + DisplayName: admin.DisplayName(), + }, + }, { + user: userFoo, + info: ¶ms.UserInfo{ + Username: "foobar", + DisplayName: "Foo Bar", + }, + }} { r.info.CreatedBy = s.adminName r.info.DateCreated = r.user.DateCreated() r.info.LastConnection = lastLoginPointer(c, r.user) @@ -401,7 +459,7 @@ results, err = s.usermanager.UserInfo(params.UserInfoRequest{}) c.Assert(err, jc.ErrorIsNil) - // Same results as before, but without the deactivated barfoo user + // Same results as before, but without the deactivated user expected.Results = expected.Results[1:] c.Assert(results, jc.DeepEquals, expected) } === modified file 'src/github.com/juju/juju/apiserver/utils.go' --- src/github.com/juju/juju/apiserver/utils.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/utils.go 2016-03-22 15:18:22 +0000 @@ -37,54 +37,49 @@ type validateArgs struct { statePool *state.StatePool - envUUID string + modelUUID string // strict validation does not allow empty UUID values strict bool - // stateServerEnvOnly only validates the state server environment - stateServerEnvOnly bool + // controllerModelOnly only validates the controller model + controllerModelOnly bool } -// validateEnvironUUID is the common validator for the various -// apiserver components that need to check for a valid environment -// UUID. An empty envUUID means that the connection has come in at -// the root of the URL space and refers to the state server -// environment. The returned *state.State is a connection for the -// specified environment UUID if the UUID refers to an environment -// contained in the database. -func validateEnvironUUID(args validateArgs) (*state.State, error) { +// validateModelUUID is the common validator for the various +// apiserver components that need to check for a valid model +// UUID. An empty modelUUID means that the connection has come in at +// the root of the URL space and refers to the controller +// model. +// +// It returns the validated model UUID. +func validateModelUUID(args validateArgs) (string, error) { ssState := args.statePool.SystemState() - - if args.envUUID == "" { - // We allow the environUUID to be empty for 2 cases + if args.modelUUID == "" { + // We allow the modelUUID to be empty for 2 cases // 1) Compatibility with older clients - // 2) TODO: server a limited API at the root (empty envUUID) - // with just the user manager and environment manager + // 2) TODO: server a limited API at the root (empty modelUUID) + // with just the user manager and model manager // if the connection comes over a sufficiently up to date // login command. if args.strict { - return nil, errors.Trace(common.UnknownEnvironmentError(args.envUUID)) + return "", errors.Trace(common.UnknownModelError(args.modelUUID)) } - logger.Debugf("validate env uuid: empty envUUID") - return ssState, nil - } - if args.envUUID == ssState.EnvironUUID() { - logger.Debugf("validate env uuid: state server environment - %s", args.envUUID) - return ssState, nil - } - if args.stateServerEnvOnly { - return nil, errors.Unauthorizedf("requested environment %q is not the state server environment", args.envUUID) - } - if !names.IsValidEnvironment(args.envUUID) { - return nil, errors.Trace(common.UnknownEnvironmentError(args.envUUID)) - } - envTag := names.NewEnvironTag(args.envUUID) - if _, err := ssState.GetEnvironment(envTag); err != nil { - return nil, errors.Wrap(err, common.UnknownEnvironmentError(args.envUUID)) - } - logger.Debugf("validate env uuid: %s", args.envUUID) - st, err := args.statePool.Get(args.envUUID) - if err != nil { - return nil, errors.Trace(err) - } - return st, nil + logger.Debugf("validate model uuid: empty modelUUID") + return ssState.ModelUUID(), nil + } + if args.modelUUID == ssState.ModelUUID() { + logger.Debugf("validate model uuid: controller model - %s", args.modelUUID) + return args.modelUUID, nil + } + if args.controllerModelOnly { + return "", errors.Unauthorizedf("requested model %q is not the controller model", args.modelUUID) + } + if !names.IsValidModel(args.modelUUID) { + return "", errors.Trace(common.UnknownModelError(args.modelUUID)) + } + modelTag := names.NewModelTag(args.modelUUID) + if _, err := ssState.GetModel(modelTag); err != nil { + return "", errors.Wrap(err, common.UnknownModelError(args.modelUUID)) + } + logger.Debugf("validate model uuid: %s", args.modelUUID) + return args.modelUUID, nil } === modified file 'src/github.com/juju/juju/apiserver/utils_test.go' --- src/github.com/juju/juju/apiserver/utils_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/utils_test.go 2016-03-22 15:18:22 +0000 @@ -25,76 +25,75 @@ } func (s *utilsSuite) TestValidateEmpty(c *gc.C) { - st, err := validateEnvironUUID( + uuid, err := validateModelUUID( validateArgs{ statePool: s.pool, }) c.Assert(err, jc.ErrorIsNil) - c.Assert(st.EnvironUUID(), gc.Equals, s.State.EnvironUUID()) + c.Assert(uuid, gc.Equals, s.State.ModelUUID()) } func (s *utilsSuite) TestValidateEmptyStrict(c *gc.C) { - _, err := validateEnvironUUID( - validateArgs{ - statePool: s.pool, - strict: true, - }) - c.Assert(err, gc.ErrorMatches, `unknown environment: ""`) -} - -func (s *utilsSuite) TestValidateStateServer(c *gc.C) { - st, err := validateEnvironUUID( - validateArgs{ - statePool: s.pool, - envUUID: s.State.EnvironUUID(), - }) - c.Assert(err, jc.ErrorIsNil) - c.Assert(st.EnvironUUID(), gc.Equals, s.State.EnvironUUID()) -} - -func (s *utilsSuite) TestValidateStateServerStrict(c *gc.C) { - st, err := validateEnvironUUID( - validateArgs{ - statePool: s.pool, - envUUID: s.State.EnvironUUID(), - strict: true, - }) - c.Assert(err, jc.ErrorIsNil) - c.Assert(st.EnvironUUID(), gc.Equals, s.State.EnvironUUID()) -} - -func (s *utilsSuite) TestValidateBadEnvUUID(c *gc.C) { - _, err := validateEnvironUUID( - validateArgs{ - statePool: s.pool, - envUUID: "bad", - }) - c.Assert(err, gc.ErrorMatches, `unknown environment: "bad"`) -} - -func (s *utilsSuite) TestValidateOtherEnvironment(c *gc.C) { - envState := s.Factory.MakeEnvironment(c, nil) - defer envState.Close() - - st, err := validateEnvironUUID( - validateArgs{ - statePool: s.pool, - envUUID: envState.EnvironUUID(), - }) - c.Assert(err, jc.ErrorIsNil) - c.Assert(st.EnvironUUID(), gc.Equals, envState.EnvironUUID()) - st.Close() -} - -func (s *utilsSuite) TestValidateOtherEnvironmentStateServerOnly(c *gc.C) { - envState := s.Factory.MakeEnvironment(c, nil) - defer envState.Close() - - _, err := validateEnvironUUID( - validateArgs{ - statePool: s.pool, - envUUID: envState.EnvironUUID(), - stateServerEnvOnly: true, - }) - c.Assert(err, gc.ErrorMatches, `requested environment ".*" is not the state server environment`) + _, err := validateModelUUID( + validateArgs{ + statePool: s.pool, + strict: true, + }) + c.Assert(err, gc.ErrorMatches, `unknown model: ""`) +} + +func (s *utilsSuite) TestValidateController(c *gc.C) { + uuid, err := validateModelUUID( + validateArgs{ + statePool: s.pool, + modelUUID: s.State.ModelUUID(), + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(uuid, gc.Equals, s.State.ModelUUID()) +} + +func (s *utilsSuite) TestValidateControllerStrict(c *gc.C) { + uuid, err := validateModelUUID( + validateArgs{ + statePool: s.pool, + modelUUID: s.State.ModelUUID(), + strict: true, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(uuid, gc.Equals, s.State.ModelUUID()) +} + +func (s *utilsSuite) TestValidateBadModelUUID(c *gc.C) { + _, err := validateModelUUID( + validateArgs{ + statePool: s.pool, + modelUUID: "bad", + }) + c.Assert(err, gc.ErrorMatches, `unknown model: "bad"`) +} + +func (s *utilsSuite) TestValidateOtherModel(c *gc.C) { + envState := s.Factory.MakeModel(c, nil) + defer envState.Close() + + uuid, err := validateModelUUID( + validateArgs{ + statePool: s.pool, + modelUUID: envState.ModelUUID(), + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(uuid, gc.Equals, envState.ModelUUID()) +} + +func (s *utilsSuite) TestValidateOtherModelControllerOnly(c *gc.C) { + envState := s.Factory.MakeModel(c, nil) + defer envState.Close() + + _, err := validateModelUUID( + validateArgs{ + statePool: s.pool, + modelUUID: envState.ModelUUID(), + controllerModelOnly: true, + }) + c.Assert(err, gc.ErrorMatches, `requested model ".*" is not the controller model`) } === modified file 'src/github.com/juju/juju/apiserver/watcher.go' --- src/github.com/juju/juju/apiserver/watcher.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/apiserver/watcher.go 2016-03-22 15:18:22 +0000 @@ -9,51 +9,52 @@ "github.com/juju/errors" "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/common/storagecommon" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" ) func init() { common.RegisterFacade( - "AllWatcher", 0, NewAllWatcher, + "AllWatcher", 1, NewAllWatcher, reflect.TypeOf((*SrvAllWatcher)(nil)), ) - // Note: AllEnvWatcher uses the same infrastructure as AllWatcher + // Note: AllModelWatcher uses the same infrastructure as AllWatcher // but they are get under separate names as it possible the may // diverge in the future (especially in terms of authorisation // checks). common.RegisterFacade( - "AllEnvWatcher", 1, NewAllWatcher, + "AllModelWatcher", 2, NewAllWatcher, reflect.TypeOf((*SrvAllWatcher)(nil)), ) common.RegisterFacade( - "NotifyWatcher", 0, newNotifyWatcher, + "NotifyWatcher", 1, newNotifyWatcher, reflect.TypeOf((*srvNotifyWatcher)(nil)), ) common.RegisterFacade( - "StringsWatcher", 0, newStringsWatcher, + "StringsWatcher", 1, newStringsWatcher, reflect.TypeOf((*srvStringsWatcher)(nil)), ) common.RegisterFacade( - "RelationUnitsWatcher", 0, newRelationUnitsWatcher, + "RelationUnitsWatcher", 1, newRelationUnitsWatcher, reflect.TypeOf((*srvRelationUnitsWatcher)(nil)), ) common.RegisterFacade( - "VolumeAttachmentsWatcher", 1, newVolumeAttachmentsWatcher, - reflect.TypeOf((*srvMachineStorageIdsWatcher)(nil)), - ) - common.RegisterFacade( - "FilesystemAttachmentsWatcher", 1, newFilesystemAttachmentsWatcher, - reflect.TypeOf((*srvMachineStorageIdsWatcher)(nil)), - ) - common.RegisterFacade( - "EntityWatcher", 1, newEntityWatcher, - reflect.TypeOf((*srvEntityWatcher)(nil)), + "VolumeAttachmentsWatcher", 2, newVolumeAttachmentsWatcher, + reflect.TypeOf((*srvMachineStorageIdsWatcher)(nil)), + ) + common.RegisterFacade( + "FilesystemAttachmentsWatcher", 2, newFilesystemAttachmentsWatcher, + reflect.TypeOf((*srvMachineStorageIdsWatcher)(nil)), + ) + common.RegisterFacade( + "EntityWatcher", 2, newEntitiesWatcher, + reflect.TypeOf((*srvEntitiesWatcher)(nil)), ) } -// NewAllEnvWatcher returns a new API server endpoint for interacting -// with a watcher created by the WatchAll and WatchAllEnvs API calls. +// NewAllModelWatcher returns a new API server endpoint for interacting +// with a watcher created by the WatchAll and WatchAllModels API calls. func NewAllWatcher(st *state.State, resources *common.Resources, auth common.Authorizer, id string) (interface{}, error) { if !auth.AuthClient() { return nil, common.ErrPerm @@ -73,7 +74,7 @@ // SrvAllWatcher defines the API methods on a state.Multiwatcher. // which watches any changes to the state. Each client has its own // current set of watchers, stored in resources. It is used by both -// the AllWatcher and AllEnvWatcher facades. +// the AllWatcher and AllModelWatcher facades. type SrvAllWatcher struct { watcher *state.Multiwatcher id string @@ -249,7 +250,7 @@ id string, ) (interface{}, error) { return newMachineStorageIdsWatcher( - st, resources, auth, id, common.ParseVolumeAttachmentIds, + st, resources, auth, id, storagecommon.ParseVolumeAttachmentIds, ) } @@ -260,7 +261,7 @@ id string, ) (interface{}, error) { return newMachineStorageIdsWatcher( - st, resources, auth, id, common.ParseFilesystemAttachmentIds, + st, resources, auth, id, storagecommon.ParseFilesystemAttachmentIds, ) } @@ -306,10 +307,10 @@ return w.resources.Stop(w.id) } -// EntityWatcher defines an interface based on the StringsWatcher +// EntitiesWatcher defines an interface based on the StringsWatcher // but also providing a method for the mapping of the received // strings to the tags of the according entities. -type EntityWatcher interface { +type EntitiesWatcher interface { state.StringsWatcher // MapChanges maps the received strings to their according tag strings. @@ -318,27 +319,27 @@ MapChanges(in []string) ([]string, error) } -// srvEntityWatcher defines the API for methods on a state.StringsWatcher. +// srvEntitiesWatcher defines the API for methods on a state.StringsWatcher. // Each client has its own current set of watchers, stored in resources. -// srvEntityWatcher notifies about changes for all entities of a given kind, +// srvEntitiesWatcher notifies about changes for all entities of a given kind, // sending the changes as a list of strings, which could be transformed // from state entity ids to their corresponding entity tags. -type srvEntityWatcher struct { +type srvEntitiesWatcher struct { st *state.State resources *common.Resources id string - watcher EntityWatcher + watcher EntitiesWatcher } -func newEntityWatcher(st *state.State, resources *common.Resources, auth common.Authorizer, id string) (interface{}, error) { +func newEntitiesWatcher(st *state.State, resources *common.Resources, auth common.Authorizer, id string) (interface{}, error) { if !isAgent(auth) { return nil, common.ErrPerm } - watcher, ok := resources.Get(id).(EntityWatcher) + watcher, ok := resources.Get(id).(EntitiesWatcher) if !ok { return nil, common.ErrUnknownWatcher } - return &srvEntityWatcher{ + return &srvEntitiesWatcher{ st: st, resources: resources, id: id, @@ -348,14 +349,14 @@ // Next returns when a change has occured to an entity of the // collection being watched since the most recent call to Next -// or the Watch call that created the srvEntityWatcher. -func (w *srvEntityWatcher) Next() (params.EntityWatchResult, error) { +// or the Watch call that created the srvEntitiesWatcher. +func (w *srvEntitiesWatcher) Next() (params.EntitiesWatchResult, error) { if changes, ok := <-w.watcher.Changes(); ok { mapped, err := w.watcher.MapChanges(changes) if err != nil { - return params.EntityWatchResult{}, errors.Annotate(err, "cannot map changes") + return params.EntitiesWatchResult{}, errors.Annotate(err, "cannot map changes") } - return params.EntityWatchResult{ + return params.EntitiesWatchResult{ Changes: mapped, }, nil } @@ -363,10 +364,10 @@ if err == nil { err = common.ErrStoppedWatcher } - return params.EntityWatchResult{}, err + return params.EntitiesWatchResult{}, err } // Stop stops the watcher. -func (w *srvEntityWatcher) Stop() error { +func (w *srvEntitiesWatcher) Stop() error { return w.resources.Stop(w.id) } === modified file 'src/github.com/juju/juju/apiserver/watcher_test.go' --- src/github.com/juju/juju/apiserver/watcher_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/apiserver/watcher_test.go 2016-03-22 15:18:22 +0000 @@ -45,7 +45,7 @@ s.authorizer.Tag = names.NewMachineTag("123") ch <- []string{"0:1", "1:2"} - facade := s.getFacade(c, "VolumeAttachmentsWatcher", 1, id).(machineStorageIdsWatcher) + facade := s.getFacade(c, "VolumeAttachmentsWatcher", 2, id).(machineStorageIdsWatcher) result, err := facade.Next() c.Assert(err, jc.ErrorIsNil) @@ -63,7 +63,7 @@ s.authorizer.Tag = names.NewMachineTag("123") ch <- []string{"0:1", "1:2"} - facade := s.getFacade(c, "FilesystemAttachmentsWatcher", 1, id).(machineStorageIdsWatcher) + facade := s.getFacade(c, "FilesystemAttachmentsWatcher", 2, id).(machineStorageIdsWatcher) result, err := facade.Next() c.Assert(err, jc.ErrorIsNil) === removed directory 'src/github.com/juju/juju/bzr' === removed file 'src/github.com/juju/juju/bzr/bzr.go' --- src/github.com/juju/juju/bzr/bzr.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/bzr/bzr.go 1970-01-01 00:00:00 +0000 @@ -1,162 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// Package bzr offers an interface to manage branches of the Bazaar VCS. -package bzr - -import ( - "bytes" - "fmt" - "os" - "os/exec" - "path" - "strings" -) - -// Branch represents a Bazaar branch. -type Branch struct { - location string - env []string -} - -// New returns a new Branch for the Bazaar branch at location. -func New(location string) *Branch { - b := &Branch{location, cenv()} - if _, err := os.Stat(location); err == nil { - stdout, _, err := b.bzr("root") - if err == nil { - // Need to trim \r as well as \n for Windows compatibility - b.location = strings.TrimRight(string(stdout), "\r\n") - } - } - return b -} - -// cenv returns a copy of the current process environment with LC_ALL=C. -func cenv() []string { - env := os.Environ() - for i, pair := range env { - if strings.HasPrefix(pair, "LC_ALL=") { - env[i] = "LC_ALL=C" - return env - } - } - return append(env, "LC_ALL=C") -} - -// Location returns the location of branch b. -func (b *Branch) Location() string { - return b.location -} - -// Join returns b's location with parts appended as path components. -// In other words, if b's location is "lp:foo", and parts is {"bar, baz"}, -// Join returns "lp:foo/bar/baz". -func (b *Branch) Join(parts ...string) string { - return path.Join(append([]string{b.location}, parts...)...) -} - -func (b *Branch) bzr(subcommand string, args ...string) (stdout, stderr []byte, err error) { - cmd := exec.Command("bzr", append([]string{subcommand}, args...)...) - if _, err := os.Stat(b.location); err == nil { - cmd.Dir = b.location - } - errbuf := &bytes.Buffer{} - cmd.Stderr = errbuf - cmd.Env = b.env - stdout, err = cmd.Output() - // Some commands fail with exit status 0 (e.g. bzr root). :-( - if err != nil || bytes.Contains(errbuf.Bytes(), []byte("ERROR")) { - var errmsg string - if err != nil { - errmsg = err.Error() - } - return nil, nil, fmt.Errorf(`error running "bzr %s": %s%s%s`, subcommand, stdout, errbuf.Bytes(), errmsg) - } - return stdout, errbuf.Bytes(), err -} - -// Init intializes a new branch at b's location. -func (b *Branch) Init() error { - _, _, err := b.bzr("init", b.location) - return err -} - -// Add adds to b the path resultant from calling b.Join(parts...). -func (b *Branch) Add(parts ...string) error { - _, _, err := b.bzr("add", b.Join(parts...)) - return err -} - -// Commit commits pending changes into b. -func (b *Branch) Commit(message string) error { - _, _, err := b.bzr("commit", "-q", "-m", message) - return err -} - -// RevisionId returns the Bazaar revision id for the tip of b. -func (b *Branch) RevisionId() (string, error) { - stdout, stderr, err := b.bzr("revision-info", "-d", b.location) - if err != nil { - return "", err - } - pair := bytes.Fields(stdout) - if len(pair) != 2 { - return "", fmt.Errorf(`invalid output from "bzr revision-info": %s%s`, stdout, stderr) - } - id := string(pair[1]) - if id == "null:" { - return "", fmt.Errorf("branch has no content") - } - return id, nil -} - -// PushLocation returns the default push location for b. -func (b *Branch) PushLocation() (string, error) { - stdout, _, err := b.bzr("info", b.location) - if err != nil { - return "", err - } - if i := bytes.Index(stdout, []byte("push branch:")); i >= 0 { - return string(stdout[i+13 : i+bytes.IndexAny(stdout[i:], "\r\n")]), nil - } - return "", fmt.Errorf("no push branch location defined") -} - -// PushAttr holds options for the Branch.Push method. -type PushAttr struct { - Location string // Location to push to. Use the default push location if empty. - Remember bool // Whether to remember the location being pushed to as the default. -} - -// Push pushes any new revisions in b to attr.Location if that's -// provided, or to the default push location otherwise. -// See PushAttr for other options. -func (b *Branch) Push(attr *PushAttr) error { - var args []string - if attr != nil { - if attr.Remember { - args = append(args, "--remember") - } - if attr.Location != "" { - args = append(args, attr.Location) - } - } - _, _, err := b.bzr("push", args...) - return err -} - -// CheckClean returns an error if 'bzr status' is not clean. -func (b *Branch) CheckClean() error { - stdout, _, err := b.bzr("status", b.location) - if err != nil { - return err - } - if bytes.Count(stdout, []byte{'\n'}) == 1 && bytes.Contains(stdout, []byte(`See "bzr shelve --list" for details.`)) { - return nil // Shelves are fine. - } - if len(stdout) > 0 { - return fmt.Errorf("branch is not clean (bzr status)") - } - return nil -} === removed file 'src/github.com/juju/juju/bzr/bzr_test.go' --- src/github.com/juju/juju/bzr/bzr_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/bzr/bzr_test.go 1970-01-01 00:00:00 +0000 @@ -1,162 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Copyright 2014 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. - -package bzr_test - -import ( - "io/ioutil" - "os" - "os/exec" - "path/filepath" - stdtesting "testing" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/bzr" - "github.com/juju/juju/testing" -) - -func Test(t *stdtesting.T) { - gc.TestingT(t) -} - -var _ = gc.Suite(&BzrSuite{}) - -type BzrSuite struct { - testing.BaseSuite - b *bzr.Branch -} - -const bzr_config = `[DEFAULT] -email = testing -` - -func (s *BzrSuite) SetUpTest(c *gc.C) { - s.BaseSuite.SetUpTest(c) - bzrdir := c.MkDir() - s.PatchEnvironment("BZR_HOME", bzrdir) - err := os.MkdirAll(filepath.Join(bzrdir, bzrHome), 0755) - c.Assert(err, jc.ErrorIsNil) - err = ioutil.WriteFile( - filepath.Join(bzrdir, bzrHome, "bazaar.conf"), - []byte(bzr_config), 0644) - c.Assert(err, jc.ErrorIsNil) - s.b = bzr.New(c.MkDir()) - c.Assert(s.b.Init(), gc.IsNil) -} - -func (s *BzrSuite) TestNewFindsRoot(c *gc.C) { - err := os.Mkdir(s.b.Join("dir"), 0755) - c.Assert(err, jc.ErrorIsNil) - b := bzr.New(s.b.Join("dir")) - // When bzr has to search for the root, it will expand any symlinks it - // found along the way. - path, err := filepath.EvalSymlinks(s.b.Location()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(b.Location(), jc.SamePath, path) -} - -func (s *BzrSuite) TestJoin(c *gc.C) { - path := bzr.New("lp:foo").Join("baz", "bar") - c.Assert(path, gc.Equals, "lp:foo/baz/bar") -} - -func (s *BzrSuite) TestErrorHandling(c *gc.C) { - err := bzr.New("/non/existent/path").Init() - c.Assert(err, gc.ErrorMatches, `(?s)error running "bzr init":.*does not exist.*`) -} - -func (s *BzrSuite) TestInit(c *gc.C) { - _, err := os.Stat(s.b.Join(".bzr")) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *BzrSuite) TestRevisionIdOnEmpty(c *gc.C) { - revid, err := s.b.RevisionId() - c.Assert(err, gc.ErrorMatches, "branch has no content") - c.Assert(revid, gc.Equals, "") -} - -func (s *BzrSuite) TestCommit(c *gc.C) { - f, err := os.Create(s.b.Join("myfile")) - c.Assert(err, jc.ErrorIsNil) - f.Close() - err = s.b.Add("myfile") - c.Assert(err, jc.ErrorIsNil) - err = s.b.Commit("my log message") - c.Assert(err, jc.ErrorIsNil) - - revid, err := s.b.RevisionId() - c.Assert(err, jc.ErrorIsNil) - - cmd := exec.Command("bzr", "log", "--long", "--show-ids", "-v", s.b.Location()) - output, err := cmd.CombinedOutput() - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(output), gc.Matches, "(?s).*revision-id: "+revid+"\n.*message:\n.*my log message\n.*added:\n.*myfile .*") -} - -func (s *BzrSuite) TestPush(c *gc.C) { - b1 := bzr.New(c.MkDir()) - b2 := bzr.New(c.MkDir()) - b3 := bzr.New(c.MkDir()) - c.Assert(b1.Init(), gc.IsNil) - c.Assert(b2.Init(), gc.IsNil) - c.Assert(b3.Init(), gc.IsNil) - - // Create and add b1/file to the branch. - f, err := os.Create(b1.Join("file")) - c.Assert(err, jc.ErrorIsNil) - f.Close() - err = b1.Add("file") - c.Assert(err, jc.ErrorIsNil) - err = b1.Commit("added file") - c.Assert(err, jc.ErrorIsNil) - - // Push file to b2. - err = b1.Push(&bzr.PushAttr{Location: b2.Location()}) - c.Assert(err, jc.ErrorIsNil) - - // Push location should be set to b2. - location, err := b1.PushLocation() - c.Assert(err, jc.ErrorIsNil) - c.Assert(location, jc.SamePath, b2.Location()) - - // Now push it to b3. - err = b1.Push(&bzr.PushAttr{Location: b3.Location()}) - c.Assert(err, jc.ErrorIsNil) - - // Push location is still set to b2. - location, err = b1.PushLocation() - c.Assert(err, jc.ErrorIsNil) - c.Assert(location, jc.SamePath, b2.Location()) - - // Push it again, this time with the remember flag set. - err = b1.Push(&bzr.PushAttr{Location: b3.Location(), Remember: true}) - c.Assert(err, jc.ErrorIsNil) - - // Now the push location has shifted to b3. - location, err = b1.PushLocation() - c.Assert(err, jc.ErrorIsNil) - c.Assert(location, jc.SamePath, b3.Location()) - - // Both b2 and b3 should have the file. - _, err = os.Stat(b2.Join("file")) - c.Assert(err, jc.ErrorIsNil) - _, err = os.Stat(b3.Join("file")) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *BzrSuite) TestCheckClean(c *gc.C) { - err := s.b.CheckClean() - c.Assert(err, jc.ErrorIsNil) - - // Create and add b1/file to the branch. - f, err := os.Create(s.b.Join("file")) - c.Assert(err, jc.ErrorIsNil) - f.Close() - - err = s.b.CheckClean() - c.Assert(err, gc.ErrorMatches, `branch is not clean \(bzr status\)`) -} === removed file 'src/github.com/juju/juju/bzr/bzr_unix_test.go' --- src/github.com/juju/juju/bzr/bzr_unix_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/bzr/bzr_unix_test.go 1970-01-01 00:00:00 +0000 @@ -1,9 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Copyright 2014 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. - -// +build !windows - -package bzr_test - -const bzrHome = ".bazaar" === removed file 'src/github.com/juju/juju/bzr/bzr_windows_test.go' --- src/github.com/juju/juju/bzr/bzr_windows_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/bzr/bzr_windows_test.go 1970-01-01 00:00:00 +0000 @@ -1,9 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Copyright 2014 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. - -// +build windows - -package bzr_test - -const bzrHome = "Bazaar/2.0" === modified file 'src/github.com/juju/juju/cert/cert.go' --- src/github.com/juju/juju/cert/cert.go 2015-04-14 14:11:54 +0000 +++ src/github.com/juju/juju/cert/cert.go 2016-03-22 15:18:22 +0000 @@ -91,7 +91,7 @@ template := &x509.Certificate{ SerialNumber: new(big.Int), Subject: pkix.Name{ - CommonName: fmt.Sprintf("juju-generated CA for environment %q", envName), + CommonName: fmt.Sprintf("juju-generated CA for model %q", envName), Organization: []string{"juju"}, }, NotBefore: now.UTC().AddDate(0, 0, -7), @@ -104,7 +104,7 @@ } certDER, err := x509.CreateCertificate(rand.Reader, template, template, &key.PublicKey, key) if err != nil { - return "", "", fmt.Errorf("canot create certificate: %v", err) + return "", "", fmt.Errorf("cannot create certificate: %v", err) } certPEMData := pem.EncodeToMemory(&pem.Block{ Type: "CERTIFICATE", === modified file 'src/github.com/juju/juju/cert/cert_test.go' --- src/github.com/juju/juju/cert/cert_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cert/cert_test.go 2016-03-22 15:18:22 +0000 @@ -75,7 +75,7 @@ c.Assert(err, jc.ErrorIsNil) c.Check(caKey, gc.FitsTypeOf, (*rsa.PrivateKey)(nil)) - c.Check(caCert.Subject.CommonName, gc.Equals, `juju-generated CA for environment "foo"`) + c.Check(caCert.Subject.CommonName, gc.Equals, `juju-generated CA for model "foo"`) checkNotBefore(c, caCert, now) checkNotAfter(c, caCert, expiry) c.Check(caCert.BasicConstraintsValid, jc.IsTrue) === added directory 'src/github.com/juju/juju/charmstore' === added file 'src/github.com/juju/juju/charmstore/client.go' --- src/github.com/juju/juju/charmstore/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/charmstore/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,45 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore + +import ( + "io" + + "gopkg.in/juju/charm.v6-unstable" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" +) + +// Client exposes the functionality of the charm store, as provided +// by github.com/juju/charmrepo/csclient.Client. +// +// Note that the following csclient.Client methods are used as well, +// but only in tests: +// - Put(path string, val interface{}) error +// - UploadCharm(id *charm.URL, ch charm.Charm) (*charm.URL, error) +// - UploadCharmWithRevision(id *charm.URL, ch charm.Charm, promulgatedRevision int) error +// - UploadBundleWithRevision() +type Client interface { + // TODO(ericsnow) Replace use of Get with use of more specific API methods? + + // Get makes a GET request to the given path in the charm store. The + // path must have a leading slash, but must not include the host + // name or version prefix. The result is parsed as JSON into the + // given result value, which should be a pointer to the expected + // data, but may be nil if no result is desired. + Get(path string, result interface{}) error + + // TODO(ericsnow) Just embed resource/charmstore.BaseClient? + + // ListResources composes, for each of the identified charms, the + // list of details for each of the charm's resources. Those details + // are those associated with the specific charm revision. They + // include the resource's metadata and revision. + ListResources(charmURLs []*charm.URL) ([][]charmresource.Resource, error) + + // GetResource returns a reader for the resource's data. That data + // is streamed from the charm store. The charm's revision, if any, + // is ignored. If the identified resource is not in the charm store + // then errors.NotFound is returned. + GetResource(cURL *charm.URL, resourceName string, revision int) (io.ReadCloser, error) +} === added directory 'src/github.com/juju/juju/cloud' === added file 'src/github.com/juju/juju/cloud/clouds.go' --- src/github.com/juju/juju/cloud/clouds.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/cloud/clouds.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,397 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package cloud provides functionality to parse information +// describing clouds, including regions, supported auth types etc. + +package cloud + +import ( + "fmt" + "io/ioutil" + "os" + "reflect" + "strings" + + "github.com/juju/errors" + "github.com/juju/utils" + "gopkg.in/yaml.v2" + + "github.com/juju/juju/juju/osenv" +) + +//go:generate go run ../generate/filetoconst.go fallbackPublicCloudInfo fallback-public-cloud.yaml fallback_public_cloud.go 2015 + +// AuthType is the type of authentication used by the cloud. +type AuthType string + +const ( + // AccessKeyAuthType is an authentication type using a key and secret. + AccessKeyAuthType AuthType = "access-key" + + // UserPassAuthType is an authentication type using a username and password. + UserPassAuthType AuthType = "userpass" + + // OAuth1AuthType is an authentication type using oauth1. + OAuth1AuthType AuthType = "oauth1" + + // OAuth2AuthType is an authentication type using oauth2. + OAuth2AuthType AuthType = "oauth2" + + // JSONFileAuthType is an authentication type that takes a path to + // a JSON file. + JSONFileAuthType AuthType = "jsonfile" + + // EmptyAuthType is the authentication type used for providers + // that require no credentials, e.g. "lxd", and "manual". + EmptyAuthType AuthType = "empty" +) + +// Cloud is a cloud definition. +type Cloud struct { + // Type is the type of cloud, eg aws, openstack etc. + Type string + + // AuthTypes are the authentication modes supported by the cloud. + AuthTypes []AuthType + + // Endpoint is the default endpoint for the cloud regions, may be + // overridden by a region. + Endpoint string + + // StorageEndpoint is the default storage endpoint for the cloud + // regions, may be overridden by a region. + StorageEndpoint string + + // Regions are the regions available in the cloud. + // + // Regions is a slice, and not a map, because order is important. + // The first region in the slice is the default region for the + // cloud. + Regions []Region +} + +// Region is a cloud region. +type Region struct { + // Name is the name of the region. + Name string + + // Endpoint is the region's primary endpoint URL. + Endpoint string + + // StorageEndpoint is the region's storage endpoint URL. + // If the cloud/region does not have a storage-specific + // endpoint URL, this will be empty. + StorageEndpoint string +} + +// cloudSet contains cloud definitions, used for marshalling and +// unmarshalling. +type cloudSet struct { + // Clouds is a map of cloud definitions, keyed on cloud name. + Clouds map[string]*cloud `yaml:"clouds"` +} + +// cloud is equivalent to Cloud, for marshalling and unmarshalling. +type cloud struct { + Type string `yaml:"type"` + AuthTypes []AuthType `yaml:"auth-types,omitempty,flow"` + Endpoint string `yaml:"endpoint,omitempty"` + StorageEndpoint string `yaml:"storage-endpoint,omitempty"` + Regions regions `yaml:"regions,omitempty"` +} + +// regions is a collection of regions, either as a map and/or +// as a yaml.MapSlice. +// +// When marshalling, we populate the Slice field only. This is +// necessary for us to control the order of map items. +// +// When unmarshalling, we populate both Map and Slice. Map is +// populated to simplify conversion to Region objects. Slice +// is populated so we can identify the first map item, which +// becomes the default region for the cloud. +type regions struct { + Map map[string]*region + Slice yaml.MapSlice +} + +// region is equivalent to Region, for marshalling and unmarshalling. +type region struct { + Endpoint string `yaml:"endpoint,omitempty"` + StorageEndpoint string `yaml:"storage-endpoint,omitempty"` +} + +// BuiltInProviderNames work out of the box. +var BuiltInProviderNames = []string{"lxd", "manual", "maas"} + +// CloudByName returns the cloud with the specified name. +// If there exists no cloud with the specified name, an +// error satisfying errors.IsNotFound will be returned. +// +// TODO(axw) write unit tests for this. +func CloudByName(name string) (*Cloud, error) { + // Personal clouds take precedence. + personalClouds, err := PersonalCloudMetadata() + if err != nil { + return nil, errors.Trace(err) + } + if cloud, ok := personalClouds[name]; ok { + return &cloud, nil + } + clouds, _, err := PublicCloudMetadata(JujuPublicCloudsPath()) + if err != nil { + return nil, errors.Trace(err) + } + if cloud, ok := clouds[name]; ok { + return &cloud, nil + } + return nil, errors.NotFoundf("cloud %s", name) +} + +// JujuPublicCloudsPath is the location where public cloud information is +// expected to be found. Requires JUJU_HOME to be set. +func JujuPublicCloudsPath() string { + return osenv.JujuXDGDataHomePath("public-clouds.yaml") +} + +// PublicCloudMetadata looks in searchPath for cloud metadata files and if none +// are found, returns the fallback public cloud metadata. +func PublicCloudMetadata(searchPath ...string) (result map[string]Cloud, fallbackUsed bool, err error) { + for _, file := range searchPath { + data, err := ioutil.ReadFile(file) + if err != nil && os.IsNotExist(err) { + continue + } + if err != nil { + return nil, false, errors.Trace(err) + } + clouds, err := ParseCloudMetadata(data) + if err != nil { + return nil, false, errors.Trace(err) + } + return clouds, false, err + } + clouds, err := ParseCloudMetadata([]byte(fallbackPublicCloudInfo)) + return clouds, true, err +} + +// ParseCloudMetadata parses the given yaml bytes into Clouds metadata. +func ParseCloudMetadata(data []byte) (map[string]Cloud, error) { + var metadata cloudSet + if err := yaml.Unmarshal(data, &metadata); err != nil { + return nil, errors.Annotate(err, "cannot unmarshal yaml cloud metadata") + } + + // Translate to the exported type. For each cloud, we store + // the first region for the cloud as its default region. + clouds := make(map[string]Cloud) + for name, cloud := range metadata.Clouds { + var regions []Region + if len(cloud.Regions.Map) > 0 { + for _, item := range cloud.Regions.Slice { + name := fmt.Sprint(item.Key) + r := cloud.Regions.Map[name] + if r == nil { + // r will be nil if none of the fields in + // the YAML are set. + regions = append(regions, Region{Name: name}) + } else { + regions = append(regions, Region{ + name, r.Endpoint, r.StorageEndpoint, + }) + } + } + } + meta := Cloud{ + Type: cloud.Type, + AuthTypes: cloud.AuthTypes, + Endpoint: cloud.Endpoint, + StorageEndpoint: cloud.StorageEndpoint, + Regions: regions, + } + meta.denormaliseMetadata() + clouds[name] = meta + } + return clouds, nil +} + +// WritePublicCloudMetadata marshals to YAML and writes the cloud metadata +// to the public cloud file. +func WritePublicCloudMetadata(cloudsMap map[string]Cloud) error { + data, err := marshalCloudMetadata(cloudsMap) + if err != nil { + return errors.Trace(err) + } + return utils.AtomicWriteFile(JujuPublicCloudsPath(), data, 0600) +} + +// IsSameCloudMetadata returns true if both meta and meta2 contain the +// same cloud metadata. +func IsSameCloudMetadata(meta1, meta2 map[string]Cloud) (bool, error) { + // The easiest approach is to simply marshall to YAML and compare. + yaml1, err := marshalCloudMetadata(meta1) + if err != nil { + return false, err + } + yaml2, err := marshalCloudMetadata(meta2) + if err != nil { + return false, err + } + return string(yaml1) == string(yaml2), nil +} + +// marshalCloudMetadata marshals the given clouds to YAML. +func marshalCloudMetadata(cloudsMap map[string]Cloud) ([]byte, error) { + clouds := cloudSet{make(map[string]*cloud)} + for name, metadata := range cloudsMap { + var regions regions + for _, r := range metadata.Regions { + regions.Slice = append(regions.Slice, yaml.MapItem{ + r.Name, region{r.Endpoint, r.StorageEndpoint}, + }) + } + clouds.Clouds[name] = &cloud{ + Type: metadata.Type, + AuthTypes: metadata.AuthTypes, + Endpoint: metadata.Endpoint, + StorageEndpoint: metadata.StorageEndpoint, + Regions: regions, + } + } + data, err := yaml.Marshal(clouds) + if err != nil { + return nil, errors.Annotate(err, "cannot marshal cloud metadata") + } + return data, nil +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (r regions) MarshalYAML() (interface{}, error) { + return r.Slice, nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (r *regions) UnmarshalYAML(f func(interface{}) error) error { + if err := f(&r.Map); err != nil { + return err + } + return f(&r.Slice) +} + +// To keep the metadata concise, attributes on the metadata struct which +// have the same value for each item may be moved up to a higher level in +// the tree. denormaliseMetadata descends the tree and fills in any missing +// attributes with values from a higher level. +func (cloud Cloud) denormaliseMetadata() { + for name, region := range cloud.Regions { + r := region + inherit(&r, &cloud) + cloud.Regions[name] = r + } +} + +type structTags map[reflect.Type]map[string]int + +var tagsForType structTags = make(structTags) + +// RegisterStructTags ensures the yaml tags for the given structs are able to be used +// when parsing cloud metadata. +func RegisterStructTags(vals ...interface{}) { + tags := mkTags(vals...) + for k, v := range tags { + tagsForType[k] = v + } +} + +func init() { + RegisterStructTags(Cloud{}, Region{}) +} + +func mkTags(vals ...interface{}) map[reflect.Type]map[string]int { + typeMap := make(map[reflect.Type]map[string]int) + for _, v := range vals { + t := reflect.TypeOf(v) + typeMap[t] = yamlTags(t) + } + return typeMap +} + +// yamlTags returns a map from yaml tag to the field index for the string fields in the given type. +func yamlTags(t reflect.Type) map[string]int { + if t.Kind() != reflect.Struct { + panic(errors.Errorf("cannot get yaml tags on type %s", t)) + } + tags := make(map[string]int) + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type != reflect.TypeOf("") { + continue + } + if tag := f.Tag.Get("yaml"); tag != "" { + if i := strings.Index(tag, ","); i >= 0 { + tag = tag[0:i] + } + if tag == "-" { + continue + } + if tag != "" { + f.Name = tag + } + } + tags[f.Name] = i + } + return tags +} + +// inherit sets any blank fields in dst to their equivalent values in fields in src that have matching json tags. +// The dst parameter must be a pointer to a struct. +func inherit(dst, src interface{}) { + for tag := range tags(dst) { + setFieldByTag(dst, tag, fieldByTag(src, tag), false) + } +} + +// tags returns the field offsets for the JSON tags defined by the given value, which must be +// a struct or a pointer to a struct. +func tags(x interface{}) map[string]int { + t := reflect.TypeOf(x) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + panic(errors.Errorf("expected struct, not %s", t)) + } + + if tagm := tagsForType[t]; tagm != nil { + return tagm + } + panic(errors.Errorf("%s not found in type table", t)) +} + +// fieldByTag returns the value for the field in x with the given JSON tag, or "" if there is no such field. +func fieldByTag(x interface{}, tag string) string { + tagm := tags(x) + v := reflect.ValueOf(x) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if i, ok := tagm[tag]; ok { + return v.Field(i).Interface().(string) + } + return "" +} + +// setFieldByTag sets the value for the field in x with the given JSON tag to val. +// The override parameter specifies whether the value will be set even if the original value is non-empty. +func setFieldByTag(x interface{}, tag, val string, override bool) { + i, ok := tags(x)[tag] + if !ok { + return + } + v := reflect.ValueOf(x).Elem() + f := v.Field(i) + if override || f.Interface().(string) == "" { + f.Set(reflect.ValueOf(val)) + } +} === added file 'src/github.com/juju/juju/cloud/clouds_test.go' --- src/github.com/juju/juju/cloud/clouds_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/cloud/clouds_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,173 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cloud_test + +import ( + "io/ioutil" + "path/filepath" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cloud" + "github.com/juju/juju/juju/osenv" + "github.com/juju/juju/testing" +) + +type cloudSuite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&cloudSuite{}) + +var publicCloudNames = []string{ + "aws", "aws-china", "aws-gov", "google", "azure", "azure-china", "rackspace", "joyent", "cloudsigma", +} + +func parsePublicClouds(c *gc.C) map[string]cloud.Cloud { + clouds, err := cloud.ParseCloudMetadata([]byte(cloud.FallbackPublicCloudInfo)) + c.Assert(err, jc.ErrorIsNil) + c.Assert(clouds, gc.HasLen, len(publicCloudNames)) + return clouds +} + +func (s *cloudSuite) TestParseClouds(c *gc.C) { + clouds := parsePublicClouds(c) + var cloudNames []string + for name, _ := range clouds { + cloudNames = append(cloudNames, name) + } + c.Assert(cloudNames, jc.SameContents, publicCloudNames) +} + +func (s *cloudSuite) TestParseCloudsEndpointDenormalisation(c *gc.C) { + clouds := parsePublicClouds(c) + rackspace := clouds["rackspace"] + c.Assert(rackspace.Type, gc.Equals, "rackspace") + c.Assert(rackspace.Endpoint, gc.Equals, "https://identity.api.rackspacecloud.com/v2.0") + var regionNames []string + for _, region := range rackspace.Regions { + regionNames = append(regionNames, region.Name) + if region.Name == "LON" { + c.Assert(region.Endpoint, gc.Equals, "https://lon.identity.api.rackspacecloud.com/v2.0") + } else { + c.Assert(region.Endpoint, gc.Equals, "https://identity.api.rackspacecloud.com/v2.0") + } + } + c.Assert(regionNames, jc.SameContents, []string{"DFW", "ORD", "IAD", "LON", "SYD", "HKG"}) +} + +func (s *cloudSuite) TestParseCloudsAuthTypes(c *gc.C) { + clouds := parsePublicClouds(c) + rackspace := clouds["rackspace"] + c.Assert(rackspace.AuthTypes, jc.SameContents, []cloud.AuthType{"access-key", "userpass"}) +} + +func (s *cloudSuite) TestPublicCloudsMetadataFallback(c *gc.C) { + clouds, fallbackUsed, err := cloud.PublicCloudMetadata("badfile.yaml") + c.Assert(err, jc.ErrorIsNil) + c.Assert(fallbackUsed, jc.IsTrue) + var cloudNames []string + for name, _ := range clouds { + cloudNames = append(cloudNames, name) + } + c.Assert(cloudNames, jc.SameContents, publicCloudNames) +} + +func (s *cloudSuite) TestPublicCloudsMetadata(c *gc.C) { + metadata := ` +clouds: + aws-me: + type: aws + auth-types: [ userpass ] +`[1:] + dir := c.MkDir() + cloudyamlfile := filepath.Join(dir, "public-clouds.yaml") + err := ioutil.WriteFile(cloudyamlfile, []byte(metadata), 0644) + c.Assert(err, jc.ErrorIsNil) + clouds, fallbackUsed, err := cloud.PublicCloudMetadata(cloudyamlfile) + c.Assert(err, jc.ErrorIsNil) + c.Assert(fallbackUsed, jc.IsFalse) + c.Assert(clouds, jc.DeepEquals, map[string]cloud.Cloud{ + "aws-me": cloud.Cloud{ + Type: "aws", + AuthTypes: []cloud.AuthType{"userpass"}, + }, + }) +} + +func (s *cloudSuite) TestGeneratedPublicCloudInfo(c *gc.C) { + cloudData, err := ioutil.ReadFile("fallback-public-cloud.yaml") + c.Assert(err, jc.ErrorIsNil) + clouds, err := cloud.ParseCloudMetadata(cloudData) + c.Assert(err, jc.ErrorIsNil) + + generatedClouds := parsePublicClouds(c) + c.Assert(clouds, jc.DeepEquals, generatedClouds) +} + +func (s *cloudSuite) TestWritePublicCloudsMetadata(c *gc.C) { + origHome := osenv.SetJujuXDGDataHome(c.MkDir()) + s.AddCleanup(func(*gc.C) { osenv.SetJujuXDGDataHome(origHome) }) + + clouds := map[string]cloud.Cloud{ + "aws-me": cloud.Cloud{ + Type: "aws", + AuthTypes: []cloud.AuthType{"userpass"}, + }, + } + err := cloud.WritePublicCloudMetadata(clouds) + c.Assert(err, jc.ErrorIsNil) + publicClouds, fallbackUsed, err := cloud.PublicCloudMetadata(cloud.JujuPublicCloudsPath()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(fallbackUsed, jc.IsFalse) + c.Assert(publicClouds, jc.DeepEquals, clouds) +} + +func (s *cloudSuite) assertCompareClouds(c *gc.C, meta2 string, expected bool) { + meta1 := ` +clouds: + aws-me: + type: aws + auth-types: [ userpass ] +`[1:] + if meta2 == "" { + meta2 = meta1 + } + c1, err := cloud.ParseCloudMetadata([]byte(meta1)) + c.Assert(err, jc.ErrorIsNil) + c2, err := cloud.ParseCloudMetadata([]byte(meta2)) + c.Assert(err, jc.ErrorIsNil) + result, err := cloud.IsSameCloudMetadata(c1, c2) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, gc.Equals, expected) +} + +func (s *cloudSuite) TestIsSameCloudsMetadataSameData(c *gc.C) { + s.assertCompareClouds(c, "", true) +} + +func (s *cloudSuite) TestIsSameCloudsMetadataExistingCloudChanged(c *gc.C) { + metadata := ` +clouds: + aws-me: + type: aws + auth-types: [ userpass ] + endpoint: http://endpoint +`[1:] + s.assertCompareClouds(c, metadata, false) +} + +func (s *cloudSuite) TestIsSameCloudsMetadataNewCloudAdded(c *gc.C) { + metadata := ` +clouds: + aws-me: + type: aws + auth-types: [ userpass ] + gce-me: + type: gce + auth-types: [ userpass ] +`[1:] + s.assertCompareClouds(c, metadata, false) +} === added file 'src/github.com/juju/juju/cloud/credentials.go' --- src/github.com/juju/juju/cloud/credentials.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/cloud/credentials.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,309 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cloud + +import ( + "fmt" + "strings" + + "github.com/juju/errors" + "github.com/juju/schema" + "gopkg.in/juju/environschema.v1" + "gopkg.in/yaml.v2" +) + +// CloudCredential contains attributes used to define credentials for a cloud. +type CloudCredential struct { + // DefaultCredential is the named credential to use by default. + DefaultCredential string `yaml:"default-credential,omitempty"` + + // DefaultRegion is the cloud region to use by default. + DefaultRegion string `yaml:"default-region,omitempty"` + + // AuthCredentials is the credentials for a cloud, keyed on name. + AuthCredentials map[string]Credential `yaml:",omitempty,inline"` +} + +// Credential instances represent cloud credentials. +type Credential struct { + authType AuthType + attributes map[string]string + + // Label is optionally set to describe the credentials + // to a user. + Label string +} + +// AuthType returns the authentication type. +func (c Credential) AuthType() AuthType { + return c.authType +} + +func copyStringMap(in map[string]string) map[string]string { + out := make(map[string]string) + for k, v := range in { + out[k] = v + } + return out +} + +// Attributes returns the credential attributes. +func (c Credential) Attributes() map[string]string { + return copyStringMap(c.attributes) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (c Credential) MarshalYAML() (interface{}, error) { + return struct { + AuthType AuthType `yaml:"auth-type"` + Attributes map[string]string `yaml:",omitempty,inline"` + }{c.authType, c.attributes}, nil +} + +// NewCredential returns a new, immutable, Credential with the supplied +// auth-type and attributes. +func NewCredential(authType AuthType, attributes map[string]string) Credential { + return Credential{authType: authType, attributes: copyStringMap(attributes)} +} + +// NewEmptyCredential returns a new Credential with the EmptyAuthType +// auth-type. +func NewEmptyCredential() Credential { + return Credential{authType: EmptyAuthType, attributes: nil} +} + +// NewEmptyCloudCredential returns a new CloudCredential with an empty +// default credential. +func NewEmptyCloudCredential() *CloudCredential { + return &CloudCredential{AuthCredentials: map[string]Credential{"default": NewEmptyCredential()}} +} + +// CredentialSchema describes the schema of a credential. Credential schemas +// are specific to cloud providers. +type CredentialSchema map[string]CredentialAttr + +// FinalizeCredential finalizes a credential by matching it with one of the +// provided credential schemas, and reading any file attributes into their +// corresponding non-file attributes. This will also validate the credential. +// +// If there is no schema with the matching auth-type, and error satisfying +// errors.IsNotSupported will be returned. +func FinalizeCredential( + credential Credential, + schemas map[AuthType]CredentialSchema, + readFile func(string) ([]byte, error), +) (*Credential, error) { + schema, ok := schemas[credential.authType] + if !ok { + return nil, errors.NotSupportedf("auth-type %q", credential.authType) + } + attrs, err := schema.Finalize(credential.attributes, readFile) + if err != nil { + return nil, errors.Trace(err) + } + return &Credential{authType: credential.authType, attributes: attrs}, nil +} + +// Finalize finalizes the given credential attributes against the credential +// schema. If the attributes are invalid, Finalize will return an error. +// +// An updated attribute map will be returned, having any file attributes +// deleted, and replaced by their non-file counterparts with the values set +// to the contents of the files. +func (s CredentialSchema) Finalize( + attrs map[string]string, + readFile func(string) ([]byte, error), +) (map[string]string, error) { + checker, err := s.schemaChecker() + if err != nil { + return nil, errors.Trace(err) + } + m := make(map[string]interface{}) + for k, v := range attrs { + m[k] = v + } + result, err := checker.Coerce(m, nil) + if err != nil { + return nil, errors.Trace(err) + } + + resultMap := result.(map[string]interface{}) + newAttrs := make(map[string]string) + for name, field := range s { + if field.FileAttr == "" { + newAttrs[name] = resultMap[name].(string) + continue + } + if fieldVal, ok := resultMap[name]; ok { + if _, ok := resultMap[field.FileAttr]; ok { + return nil, errors.NotValidf( + "specifying both %q and %q", + name, field.FileAttr, + ) + } + newAttrs[name] = fieldVal.(string) + continue + } + fieldVal, ok := resultMap[field.FileAttr] + if !ok { + return nil, errors.NewNotValid(nil, fmt.Sprintf( + "either %q or %q must be specified", + name, field.FileAttr, + )) + } + data, err := readFile(fieldVal.(string)) + if err != nil { + return nil, errors.Annotatef(err, "reading file for %q", name) + } + if len(data) == 0 { + return nil, errors.NotValidf("empty file for %q", name) + } + newAttrs[name] = string(data) + } + return newAttrs, nil +} + +func (s CredentialSchema) schemaChecker() (schema.Checker, error) { + fields := make(environschema.Fields) + for name, field := range s { + fields[name] = environschema.Attr{ + Description: field.Description, + Type: environschema.Tstring, + Group: environschema.AccountGroup, + Mandatory: field.FileAttr == "", + Secret: field.Hidden, + } + } + // TODO(axw) add support to environschema for attributes whose values + // can be read in from a file. + for _, field := range s { + if field.FileAttr == "" { + continue + } + if _, ok := fields[field.FileAttr]; ok { + return nil, errors.Errorf("duplicate field %q", field.FileAttr) + } + fields[field.FileAttr] = environschema.Attr{ + Description: field.Description + " (file)", + Type: environschema.Tstring, + Group: environschema.AccountGroup, + Mandatory: false, + Secret: false, + } + } + schemaFields, schemaDefaults, err := fields.ValidationSchema() + if err != nil { + return nil, errors.Trace(err) + } + return schema.FieldMap(schemaFields, schemaDefaults), nil +} + +// CredentialAttr describes the properties of a credential attribute. +type CredentialAttr struct { + // Description is a human-readable description of the credential + // attribute. + Description string + + // Hidden controls whether or not the attribute value will be hidden + // when being entered interactively. Regardless of this, all credential + // attributes are provided only to the Juju controllers. + Hidden bool + + // FileAttr is the name of an attribute that may be specified instead + // of this one, which points to a file that will be read in and its + // value used for this attribute. + FileAttr string +} + +type cloudCredentialChecker struct{} + +func (c cloudCredentialChecker) Coerce(v interface{}, path []string) (interface{}, error) { + out := CloudCredential{ + AuthCredentials: make(map[string]Credential), + } + v, err := schema.StringMap(cloudCredentialValueChecker{}).Coerce(v, path) + if err != nil { + return nil, err + } + mapv := v.(map[string]interface{}) + for k, v := range mapv { + switch k { + case "default-region": + out.DefaultRegion = v.(string) + case "default-credential": + out.DefaultCredential = v.(string) + default: + out.AuthCredentials[k] = v.(Credential) + } + } + return out, nil +} + +type cloudCredentialValueChecker struct{} + +func (c cloudCredentialValueChecker) Coerce(v interface{}, path []string) (interface{}, error) { + field := path[len(path)-1] + switch field { + case "default-region", "default-credential": + return schema.String().Coerce(v, path) + } + v, err := schema.StringMap(schema.String()).Coerce(v, path) + if err != nil { + return nil, err + } + mapv := v.(map[string]interface{}) + + authType, _ := mapv["auth-type"].(string) + if authType == "" { + return nil, errors.Errorf("%v: missing auth-type", strings.Join(path, "")) + } + + attrs := make(map[string]string) + delete(mapv, "auth-type") + for k, v := range mapv { + attrs[k] = v.(string) + } + return Credential{authType: AuthType(authType), attributes: attrs}, nil +} + +// ParseCredentials parses the given yaml bytes into Credentials, but does +// not validate the credential attributes. +func ParseCredentials(data []byte) (map[string]CloudCredential, error) { + var credentialsYAML struct { + Credentials map[string]interface{} `yaml:"credentials"` + } + err := yaml.Unmarshal(data, &credentialsYAML) + if err != nil { + return nil, errors.Annotate(err, "cannot unmarshal yaml credentials") + } + credentials := make(map[string]CloudCredential) + for cloud, v := range credentialsYAML.Credentials { + v, err := cloudCredentialChecker{}.Coerce( + v, []string{"credentials." + cloud}, + ) + if err != nil { + return nil, errors.Trace(err) + } + credentials[cloud] = v.(CloudCredential) + } + return credentials, nil +} + +// RemoveSecrets returns a copy of the given credential with secret fields removed. +func RemoveSecrets( + credential Credential, + schemas map[AuthType]CredentialSchema, +) (*Credential, error) { + schema, ok := schemas[credential.authType] + if !ok { + return nil, errors.NotSupportedf("auth-type %q", credential.authType) + } + redactedAttrs := credential.Attributes() + for attrName, attr := range schema { + if attr.Hidden { + delete(redactedAttrs, attrName) + } + } + return &Credential{authType: credential.authType, attributes: redactedAttrs}, nil +} === added file 'src/github.com/juju/juju/cloud/credentials_test.go' --- src/github.com/juju/juju/cloud/credentials_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/cloud/credentials_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,477 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cloud_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cloud" + "github.com/juju/juju/testing" +) + +type credentialsSuite struct { + testing.FakeJujuXDGDataHomeSuite +} + +var _ = gc.Suite(&credentialsSuite{}) + +func (s *credentialsSuite) TestMarshalAccessKey(c *gc.C) { + creds := map[string]cloud.CloudCredential{ + "aws": { + DefaultCredential: "default-cred", + DefaultRegion: "us-west-2", + AuthCredentials: map[string]cloud.Credential{ + "peter": cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ + "access-key": "key", + "secret-key": "secret", + }), + // TODO(wallyworld) - add anther credential once goyaml.v2 supports inline MapSlice. + //"paul": &cloud.AccessKeyCredentials{ + // Key: "paulkey", + // Secret: "paulsecret", + //}, + }, + }, + } + out, err := cloud.MarshalCredentials(creds) + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(out), gc.Equals, ` +credentials: + aws: + default-credential: default-cred + default-region: us-west-2 + peter: + auth-type: access-key + access-key: key + secret-key: secret +`[1:]) +} + +func (s *credentialsSuite) TestMarshalOpenstackAccessKey(c *gc.C) { + creds := map[string]cloud.CloudCredential{ + "openstack": { + DefaultCredential: "default-cred", + DefaultRegion: "region-a", + AuthCredentials: map[string]cloud.Credential{ + "peter": cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ + "access-key": "key", + "secret-key": "secret", + "tenant-name": "tenant", + }), + }, + }, + } + out, err := cloud.MarshalCredentials(creds) + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(out), gc.Equals, ` +credentials: + openstack: + default-credential: default-cred + default-region: region-a + peter: + auth-type: access-key + access-key: key + secret-key: secret + tenant-name: tenant +`[1:]) +} + +func (s *credentialsSuite) TestMarshalOpenstackUserPass(c *gc.C) { + creds := map[string]cloud.CloudCredential{ + "openstack": { + DefaultCredential: "default-cred", + DefaultRegion: "region-a", + AuthCredentials: map[string]cloud.Credential{ + "peter": cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ + "username": "user", + "password": "secret", + "tenant-name": "tenant", + }), + }, + }, + } + out, err := cloud.MarshalCredentials(creds) + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(out), gc.Equals, ` +credentials: + openstack: + default-credential: default-cred + default-region: region-a + peter: + auth-type: userpass + password: secret + tenant-name: tenant + username: user +`[1:]) +} + +func (s *credentialsSuite) TestMarshalAzureCredntials(c *gc.C) { + creds := map[string]cloud.CloudCredential{ + "azure": { + DefaultCredential: "default-cred", + DefaultRegion: "Central US", + AuthCredentials: map[string]cloud.Credential{ + "peter": cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ + "application-id": "app-id", + "application-password": "app-secret", + "subscription-id": "subscription-id", + "tenant-id": "tenant-id", + }), + }, + }, + } + out, err := cloud.MarshalCredentials(creds) + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(out), gc.Equals, ` +credentials: + azure: + default-credential: default-cred + default-region: Central US + peter: + auth-type: userpass + application-id: app-id + application-password: app-secret + subscription-id: subscription-id + tenant-id: tenant-id +`[1:]) +} + +func (s *credentialsSuite) TestMarshalOAuth1(c *gc.C) { + creds := map[string]cloud.CloudCredential{ + "maas": { + DefaultCredential: "default-cred", + DefaultRegion: "region-default", + AuthCredentials: map[string]cloud.Credential{ + "peter": cloud.NewCredential(cloud.OAuth1AuthType, map[string]string{ + "consumer-key": "consumer-key", + "consumer-secret": "consumer-secret", + "access-token": "access-token", + "token-secret": "token-secret", + }), + }, + }, + } + out, err := cloud.MarshalCredentials(creds) + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(out), gc.Equals, ` +credentials: + maas: + default-credential: default-cred + default-region: region-default + peter: + auth-type: oauth1 + access-token: access-token + consumer-key: consumer-key + consumer-secret: consumer-secret + token-secret: token-secret +`[1:]) +} + +func (s *credentialsSuite) TestMarshalOAuth2(c *gc.C) { + creds := map[string]cloud.CloudCredential{ + "google": { + DefaultCredential: "default-cred", + DefaultRegion: "West US", + AuthCredentials: map[string]cloud.Credential{ + "peter": cloud.NewCredential(cloud.OAuth2AuthType, map[string]string{ + "client-id": "client-id", + "client-email": "client-email", + "private-key": "secret", + }), + }, + }, + } + out, err := cloud.MarshalCredentials(creds) + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(out), gc.Equals, ` +credentials: + google: + default-credential: default-cred + default-region: West US + peter: + auth-type: oauth2 + client-email: client-email + client-id: client-id + private-key: secret +`[1:]) +} + +func (s *credentialsSuite) TestParseCredentials(c *gc.C) { + s.testParseCredentials(c, []byte(` +credentials: + aws: + default-credential: peter + default-region: us-east-2 + peter: + auth-type: access-key + access-key: key + secret-key: secret + aws-china: + default-credential: zhu8jie + zhu8jie: + auth-type: access-key + access-key: key + secret-key: secret + sun5kong: + auth-type: access-key + access-key: quay + secret-key: sekrit + aws-gov: + default-region: us-gov-west-1 + supersekrit: + auth-type: access-key + access-key: super + secret-key: sekrit +`[1:]), map[string]cloud.CloudCredential{ + "aws": cloud.CloudCredential{ + DefaultCredential: "peter", + DefaultRegion: "us-east-2", + AuthCredentials: map[string]cloud.Credential{ + "peter": cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ + "access-key": "key", + "secret-key": "secret", + }), + }, + }, + "aws-china": cloud.CloudCredential{ + DefaultCredential: "zhu8jie", + AuthCredentials: map[string]cloud.Credential{ + "zhu8jie": cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ + "access-key": "key", + "secret-key": "secret", + }), + "sun5kong": cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ + "access-key": "quay", + "secret-key": "sekrit", + }), + }, + }, + "aws-gov": cloud.CloudCredential{ + DefaultRegion: "us-gov-west-1", + AuthCredentials: map[string]cloud.Credential{ + "supersekrit": cloud.NewCredential(cloud.AccessKeyAuthType, map[string]string{ + "access-key": "super", + "secret-key": "sekrit", + }), + }, + }, + }) +} + +func (s *credentialsSuite) TestParseCredentialsUnknownAuthType(c *gc.C) { + // Unknown auth-type is not validated by ParseCredentials. + // Validation is deferred to FinalizeCredential. + s.testParseCredentials(c, []byte(` +credentials: + cloud-name: + credential-name: + auth-type: woop +`[1:]), map[string]cloud.CloudCredential{ + "cloud-name": cloud.CloudCredential{ + AuthCredentials: map[string]cloud.Credential{ + "credential-name": cloud.NewCredential("woop", nil), + }, + }, + }) +} + +func (s *credentialsSuite) testParseCredentials(c *gc.C, input []byte, expect map[string]cloud.CloudCredential) { + output, err := cloud.ParseCredentials(input) + c.Assert(err, jc.ErrorIsNil) + c.Assert(output, jc.DeepEquals, expect) +} + +func (s *credentialsSuite) TestParseCredentialsMissingAuthType(c *gc.C) { + s.testParseCredentialsError(c, []byte(` +credentials: + cloud-name: + credential-name: + doesnt: really-matter +`[1:]), "credentials.cloud-name.credential-name: missing auth-type") +} + +func (s *credentialsSuite) TestParseCredentialsNonStringValue(c *gc.C) { + s.testParseCredentialsError(c, []byte(` +credentials: + cloud-name: + credential-name: + non-string-value: 123 +`[1:]), `credentials\.cloud-name\.credential-name\.non-string-value: expected string, got int\(123\)`) +} + +func (s *credentialsSuite) testParseCredentialsError(c *gc.C, input []byte, expect string) { + _, err := cloud.ParseCredentials(input) + c.Assert(err, gc.ErrorMatches, expect) +} + +func (s *credentialsSuite) TestFinalizeCredential(c *gc.C) { + cred := cloud.NewCredential( + cloud.UserPassAuthType, + map[string]string{ + "key": "value", + }, + ) + schema := cloud.CredentialSchema{ + "key": { + Description: "key credential", + Hidden: true, + }, + } + _, err := cloud.FinalizeCredential(cred, map[cloud.AuthType]cloud.CredentialSchema{ + cloud.UserPassAuthType: schema, + }, readFileNotSupported) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *credentialsSuite) TestFinalizeCredentialFileAttr(c *gc.C) { + cred := cloud.NewCredential( + cloud.UserPassAuthType, + map[string]string{ + "key-file": "path", + "quay": "value", + }, + ) + schema := cloud.CredentialSchema{ + "key": { + Description: "key credential", + Hidden: true, + FileAttr: "key-file", + }, + "quay": { + FileAttr: "quay-file", + }, + } + readFile := func(s string) ([]byte, error) { + c.Assert(s, gc.Equals, "path") + return []byte("file-value"), nil + } + newCred, err := cloud.FinalizeCredential(cred, map[cloud.AuthType]cloud.CredentialSchema{ + cloud.UserPassAuthType: schema, + }, readFile) + c.Assert(err, jc.ErrorIsNil) + c.Assert(newCred.Attributes(), jc.DeepEquals, map[string]string{ + "key": "file-value", + "quay": "value", + }) +} + +func (s *credentialsSuite) TestFinalizeCredentialFileEmpty(c *gc.C) { + cred := cloud.NewCredential( + cloud.UserPassAuthType, + map[string]string{ + "key-file": "path", + }, + ) + schema := cloud.CredentialSchema{ + "key": { + Description: "key credential", + Hidden: true, + FileAttr: "key-file", + }, + } + readFile := func(string) ([]byte, error) { + return nil, nil + } + _, err := cloud.FinalizeCredential(cred, map[cloud.AuthType]cloud.CredentialSchema{ + cloud.UserPassAuthType: schema, + }, readFile) + c.Assert(err, gc.ErrorMatches, `empty file for "key" not valid`) +} + +func (s *credentialsSuite) TestFinalizeCredentialFileAttrNeither(c *gc.C) { + cred := cloud.NewCredential( + cloud.UserPassAuthType, + map[string]string{}, + ) + schema := cloud.CredentialSchema{ + "key": { + Description: "key credential", + Hidden: true, + FileAttr: "key-file", + }, + } + _, err := cloud.FinalizeCredential(cred, map[cloud.AuthType]cloud.CredentialSchema{ + cloud.UserPassAuthType: schema, + }, readFileNotSupported) + c.Assert(err, gc.ErrorMatches, `either "key" or "key-file" must be specified`) +} + +func (s *credentialsSuite) TestFinalizeCredentialFileAttrBoth(c *gc.C) { + cred := cloud.NewCredential( + cloud.UserPassAuthType, + map[string]string{ + "key": "value", + "key-file": "path", + }, + ) + schema := cloud.CredentialSchema{ + "key": { + Description: "key credential", + Hidden: true, + FileAttr: "key-file", + }, + } + _, err := cloud.FinalizeCredential(cred, map[cloud.AuthType]cloud.CredentialSchema{ + cloud.UserPassAuthType: schema, + }, readFileNotSupported) + c.Assert(err, gc.ErrorMatches, `specifying both "key" and "key-file" not valid`) +} + +func (s *credentialsSuite) TestFinalizeCredentialInvalid(c *gc.C) { + cred := cloud.NewCredential( + cloud.UserPassAuthType, + map[string]string{}, + ) + schema := cloud.CredentialSchema{ + "key": { + Description: "key credential", + Hidden: true, + }, + } + _, err := cloud.FinalizeCredential(cred, map[cloud.AuthType]cloud.CredentialSchema{ + cloud.UserPassAuthType: schema, + }, readFileNotSupported) + c.Assert(err, gc.ErrorMatches, "key: expected string, got nothing") +} + +func (s *credentialsSuite) TestFinalizeCredentialNotSupported(c *gc.C) { + cred := cloud.NewCredential( + cloud.OAuth2AuthType, + map[string]string{}, + ) + _, err := cloud.FinalizeCredential( + cred, map[cloud.AuthType]cloud.CredentialSchema{}, readFileNotSupported, + ) + c.Assert(err, jc.Satisfies, errors.IsNotSupported) + c.Assert(err, gc.ErrorMatches, `auth-type "oauth2" not supported`) +} + +func readFileNotSupported(f string) ([]byte, error) { + return nil, errors.NotSupportedf("reading file %q", f) +} + +func (s *credentialsSuite) TestRemoveSecrets(c *gc.C) { + cred := cloud.NewCredential( + cloud.UserPassAuthType, + map[string]string{ + "username": "user", + "password": "secret", + }, + ) + schema := cloud.CredentialSchema{ + "username": {}, + "password": { + Hidden: true, + }, + } + sanitisedCred, err := cloud.RemoveSecrets(cred, map[cloud.AuthType]cloud.CredentialSchema{ + cloud.UserPassAuthType: schema, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(sanitisedCred.Attributes(), jc.DeepEquals, map[string]string{ + "username": "user", + }) +} === added file 'src/github.com/juju/juju/cloud/export_test.go' --- src/github.com/juju/juju/cloud/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/cloud/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,20 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cloud + +import ( + "gopkg.in/yaml.v2" +) + +var ( + FallbackPublicCloudInfo = fallbackPublicCloudInfo +) + +func MarshalCredentials(credentialsMap map[string]CloudCredential) ([]byte, error) { + var credentialsYAML struct { + Credentials map[string]CloudCredential `yaml:"credentials"` + } + credentialsYAML.Credentials = credentialsMap + return yaml.Marshal(credentialsYAML) +} === added file 'src/github.com/juju/juju/cloud/fallback-public-cloud.yaml' --- src/github.com/juju/juju/cloud/fallback-public-cloud.yaml 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/cloud/fallback-public-cloud.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,165 @@ +# DO NOT EDIT, will be overwritten, use “juju update-clouds†to refresh. +clouds: + aws: + type: ec2 + auth-types: [ access-key ] + regions: + us-east-1: + endpoint: https://us-east-1.aws.amazon.com/v1.2/ + us-west-1: + endpoint: https://us-west-1.aws.amazon.com/v1.2/ + us-west-2: + endpoint: https://us-west-2.aws.amazon.com/v1.2/ + eu-west-1: + endpoint: https://eu-west-1.aws.amazon.com/v1.2/ + eu-central-1: + endpoint: https://eu-central-1.aws.amazon.com/v1.2/ + ap-southeast-1: + endpoint: https://ap-southeast-1.aws.amazon.com/v1.2/ + ap-southeast-2: + endpoint: https://ap-southeast-2.aws.amazon.com/v1.2/ + ap-northeast-1: + endpoint: https://ap-northeast-1.aws.amazon.com/v1.2/ + ap-northeast-2: + endpoint: https://ap-northeast-2.aws.amazon.com/v1.2/ + sa-east-1: + endpoint: https://sa-east-1.aws.amazon.com/v1.2/ + aws-china: + type: ec2 + auth-types: [ access-key ] + regions: + cn-north-1: + endpoint: https://ec2.cn-north-1.amazonaws.com.cn/ + aws-gov: + type: ec2 + auth-types: [ access-key ] + regions: + us-gov-west-1: + endpoint: https://ec2.us-gov-west-1.amazonaws-govcloud.com + google: + type: gce + auth-types: [ oauth2 ] + regions: + us-east1: + endpoint: https://www.googleapis.com + us-central1: + endpoint: https://www.googleapis.com + europe-west1: + endpoint: https://www.googleapis.com + asia-east1: + endpoint: https://www.googleapis.com + azure: + type: azure + auth-types: [ userpass ] + regions: + centralus: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + eastus: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + eastus2: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + northcentralus: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + southcentralus: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + westus: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + northeurope: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + westeurope: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + eastasia: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + southeastasia: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + japaneast: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + japanwest: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + brazilsouth: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + australiaeast: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + australiasoutheast: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + centralindia: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + southindia: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + westindia: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + azure-china: + type: azure + auth-types: [ userpass ] + regions: + chinaeast: + endpoint: https://management.chinacloudapi.cn + storage-endpoint: https://core.chinacloudapi.cn + chinanorth: + endpoint: https://management.chinacloudapi.cn + storage-endpoint: https://core.chinacloudapi.cn + rackspace: + type: rackspace + auth-types: [ access-key, userpass ] + endpoint: https://identity.api.rackspacecloud.com/v2.0 + regions: + DFW: + endpoint: https://identity.api.rackspacecloud.com/v2.0 + ORD: + endpoint: https://identity.api.rackspacecloud.com/v2.0 + IAD: + endpoint: https://identity.api.rackspacecloud.com/v2.0 + LON: + endpoint: https://lon.identity.api.rackspacecloud.com/v2.0 + SYD: + endpoint: https://identity.api.rackspacecloud.com/v2.0 + HKG: + endpoint: https://identity.api.rackspacecloud.com/v2.0 + joyent: + type: joyent + auth-types: [ userpass ] + regions: + eu-ams-1: + endpoint: https://eu-ams-1.api.joyentcloud.com + us-sw-1: + endpoint: https://us-sw-1.api.joyentcloud.com + us-east-1: + endpoint: https://us-east-1.api.joyentcloud.com + us-east-2: + endpoint: https://us-east-2.api.joyentcloud.com + us-east-3: + endpoint: https://us-east-3.api.joyentcloud.com + us-west-1: + endpoint: https://us-west-1.api.joyentcloud.com + cloudsigma: + type: cloudsigma + auth-types: [ userpass ] + regions: + hnl: + endpoint: https://hnl.cloudsigma.com/api/2.0/ + mia: + endpoint: https://mia.cloudsigma.com/api/2.0/ + sjc: + endpoint: https://sjc.cloudsigma.com/api/2.0/ + wdc: + endpoint: https://wdc.cloudsigma.com/api/2.0/ + zrh: + endpoint: https://zrh.cloudsigma.com/api/2.0/ === added file 'src/github.com/juju/juju/cloud/fallback_public_cloud.go' --- src/github.com/juju/juju/cloud/fallback_public_cloud.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/cloud/fallback_public_cloud.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,174 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cloud + +// Generated code - do not edit. + +const fallbackPublicCloudInfo = ` +# DO NOT EDIT, will be overwritten, use “juju update-clouds†to refresh. +clouds: + aws: + type: ec2 + auth-types: [ access-key ] + regions: + us-east-1: + endpoint: https://us-east-1.aws.amazon.com/v1.2/ + us-west-1: + endpoint: https://us-west-1.aws.amazon.com/v1.2/ + us-west-2: + endpoint: https://us-west-2.aws.amazon.com/v1.2/ + eu-west-1: + endpoint: https://eu-west-1.aws.amazon.com/v1.2/ + eu-central-1: + endpoint: https://eu-central-1.aws.amazon.com/v1.2/ + ap-southeast-1: + endpoint: https://ap-southeast-1.aws.amazon.com/v1.2/ + ap-southeast-2: + endpoint: https://ap-southeast-2.aws.amazon.com/v1.2/ + ap-northeast-1: + endpoint: https://ap-northeast-1.aws.amazon.com/v1.2/ + ap-northeast-2: + endpoint: https://ap-northeast-2.aws.amazon.com/v1.2/ + sa-east-1: + endpoint: https://sa-east-1.aws.amazon.com/v1.2/ + aws-china: + type: ec2 + auth-types: [ access-key ] + regions: + cn-north-1: + endpoint: https://ec2.cn-north-1.amazonaws.com.cn/ + aws-gov: + type: ec2 + auth-types: [ access-key ] + regions: + us-gov-west-1: + endpoint: https://ec2.us-gov-west-1.amazonaws-govcloud.com + google: + type: gce + auth-types: [ oauth2 ] + regions: + us-east1: + endpoint: https://www.googleapis.com + us-central1: + endpoint: https://www.googleapis.com + europe-west1: + endpoint: https://www.googleapis.com + asia-east1: + endpoint: https://www.googleapis.com + azure: + type: azure + auth-types: [ userpass ] + regions: + centralus: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + eastus: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + eastus2: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + northcentralus: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + southcentralus: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + westus: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + northeurope: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + westeurope: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + eastasia: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + southeastasia: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + japaneast: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + japanwest: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + brazilsouth: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + australiaeast: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + australiasoutheast: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + centralindia: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + southindia: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + westindia: + endpoint: https://management.azure.com + storage-endpoint: https://core.windows.net + azure-china: + type: azure + auth-types: [ userpass ] + regions: + chinaeast: + endpoint: https://management.chinacloudapi.cn + storage-endpoint: https://core.chinacloudapi.cn + chinanorth: + endpoint: https://management.chinacloudapi.cn + storage-endpoint: https://core.chinacloudapi.cn + rackspace: + type: rackspace + auth-types: [ access-key, userpass ] + endpoint: https://identity.api.rackspacecloud.com/v2.0 + regions: + DFW: + endpoint: https://identity.api.rackspacecloud.com/v2.0 + ORD: + endpoint: https://identity.api.rackspacecloud.com/v2.0 + IAD: + endpoint: https://identity.api.rackspacecloud.com/v2.0 + LON: + endpoint: https://lon.identity.api.rackspacecloud.com/v2.0 + SYD: + endpoint: https://identity.api.rackspacecloud.com/v2.0 + HKG: + endpoint: https://identity.api.rackspacecloud.com/v2.0 + joyent: + type: joyent + auth-types: [ userpass ] + regions: + eu-ams-1: + endpoint: https://eu-ams-1.api.joyentcloud.com + us-sw-1: + endpoint: https://us-sw-1.api.joyentcloud.com + us-east-1: + endpoint: https://us-east-1.api.joyentcloud.com + us-east-2: + endpoint: https://us-east-2.api.joyentcloud.com + us-east-3: + endpoint: https://us-east-3.api.joyentcloud.com + us-west-1: + endpoint: https://us-west-1.api.joyentcloud.com + cloudsigma: + type: cloudsigma + auth-types: [ userpass ] + regions: + hnl: + endpoint: https://hnl.cloudsigma.com/api/2.0/ + mia: + endpoint: https://mia.cloudsigma.com/api/2.0/ + sjc: + endpoint: https://sjc.cloudsigma.com/api/2.0/ + wdc: + endpoint: https://wdc.cloudsigma.com/api/2.0/ + zrh: + endpoint: https://zrh.cloudsigma.com/api/2.0/ +` === added file 'src/github.com/juju/juju/cloud/package_test.go' --- src/github.com/juju/juju/cloud/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/cloud/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cloud_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/cloud/personalclouds.go' --- src/github.com/juju/juju/cloud/personalclouds.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/cloud/personalclouds.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,57 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package cloud provides functionality to parse information +// describing clouds, including regions, supported auth types etc. + +package cloud + +import ( + "io/ioutil" + "os" + + "github.com/juju/errors" + + "github.com/juju/juju/juju/osenv" +) + +// JujuPersonalCloudsPath is the location where personal cloud information is +// expected to be found. Requires JUJU_HOME to be set. +func JujuPersonalCloudsPath() string { + return osenv.JujuXDGDataHomePath("clouds.yaml") +} + +// PersonalCloudMetadata loads any personal cloud metadata defined +// in the Juju Home directory. If not cloud metadata is found, +// that is not an error; nil is returned. +func PersonalCloudMetadata() (map[string]Cloud, error) { + clouds, err := ParseCloudMetadataFile(JujuPersonalCloudsPath()) + if err != nil && os.IsNotExist(err) { + return nil, nil + } + return clouds, err +} + +// ParseCloudMetadataFile loads any cloud metadata defined +// in the specified file. +func ParseCloudMetadataFile(file string) (map[string]Cloud, error) { + data, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + clouds, err := ParseCloudMetadata(data) + if err != nil { + return nil, err + } + return clouds, err +} + +// WritePersonalCloudMetadata marshals to YAMl and writes the cloud metadata +// to the personal cloud file. +func WritePersonalCloudMetadata(cloudsMap map[string]Cloud) error { + data, err := marshalCloudMetadata(cloudsMap) + if err != nil { + return errors.Trace(err) + } + return ioutil.WriteFile(JujuPersonalCloudsPath(), data, os.FileMode(0600)) +} === added file 'src/github.com/juju/juju/cloud/personalclouds_test.go' --- src/github.com/juju/juju/cloud/personalclouds_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/cloud/personalclouds_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,140 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cloud_test + +import ( + "io/ioutil" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cloud" + "github.com/juju/juju/juju/osenv" + "github.com/juju/juju/testing" +) + +type personalCloudSuite struct { + testing.FakeJujuXDGDataHomeSuite +} + +var _ = gc.Suite(&personalCloudSuite{}) + +func (s *personalCloudSuite) TestWritePersonalClouds(c *gc.C) { + clouds := map[string]cloud.Cloud{ + "homestack": cloud.Cloud{ + Type: "openstack", + AuthTypes: []cloud.AuthType{"userpass", "access-key"}, + Endpoint: "http://homestack", + Regions: []cloud.Region{ + cloud.Region{Name: "london", Endpoint: "http://london/1.0"}, + }, + }, + "azurestack": cloud.Cloud{ + Type: "azure", + AuthTypes: []cloud.AuthType{"userpass"}, + Regions: []cloud.Region{{ + Name: "prod", + Endpoint: "http://prod.azurestack.local", + }, { + Name: "dev", + Endpoint: "http://dev.azurestack.local", + }, { + Name: "test", + Endpoint: "http://test.azurestack.local", + }}, + }, + } + err := cloud.WritePersonalCloudMetadata(clouds) + c.Assert(err, jc.ErrorIsNil) + data, err := ioutil.ReadFile(osenv.JujuXDGDataHomePath("clouds.yaml")) + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(data), gc.Equals, ` +clouds: + azurestack: + type: azure + auth-types: [userpass] + regions: + prod: + endpoint: http://prod.azurestack.local + dev: + endpoint: http://dev.azurestack.local + test: + endpoint: http://test.azurestack.local + homestack: + type: openstack + auth-types: [userpass, access-key] + endpoint: http://homestack + regions: + london: + endpoint: http://london/1.0 +`[1:]) +} + +func (s *personalCloudSuite) TestReadPersonalCloudsNone(c *gc.C) { + clouds, err := cloud.PersonalCloudMetadata() + c.Assert(err, jc.ErrorIsNil) + c.Assert(clouds, gc.IsNil) +} + +func (s *personalCloudSuite) TestReadPersonalClouds(c *gc.C) { + s.setupReadClouds(c, osenv.JujuXDGDataHomePath("clouds.yaml")) + clouds, err := cloud.PersonalCloudMetadata() + c.Assert(err, jc.ErrorIsNil) + s.assertPersonalClouds(c, clouds) +} + +func (s *personalCloudSuite) TestReadUserSpecifiedClouds(c *gc.C) { + file := osenv.JujuXDGDataHomePath("somemoreclouds.yaml") + s.setupReadClouds(c, file) + clouds, err := cloud.ParseCloudMetadataFile(file) + c.Assert(err, jc.ErrorIsNil) + s.assertPersonalClouds(c, clouds) +} + +func (s *personalCloudSuite) assertPersonalClouds(c *gc.C, clouds map[string]cloud.Cloud) { + c.Assert(clouds, jc.DeepEquals, map[string]cloud.Cloud{ + "homestack": cloud.Cloud{ + Type: "openstack", + AuthTypes: []cloud.AuthType{"userpass", "access-key"}, + Endpoint: "http://homestack", + Regions: []cloud.Region{ + cloud.Region{Name: "london", Endpoint: "http://london/1.0"}, + }, + }, + "azurestack": cloud.Cloud{ + Type: "azure", + AuthTypes: []cloud.AuthType{"userpass"}, + StorageEndpoint: "http://storage.azurestack.local", + Regions: []cloud.Region{ + cloud.Region{ + Name: "local", + Endpoint: "http://azurestack.local", + StorageEndpoint: "http://storage.azurestack.local", + }, + }, + }, + }) +} + +func (s *personalCloudSuite) setupReadClouds(c *gc.C, destPath string) { + data := ` +clouds: + homestack: + type: openstack + auth-types: [userpass, access-key] + endpoint: http://homestack + regions: + london: + endpoint: http://london/1.0 + azurestack: + type: azure + auth-types: [userpass] + storage-endpoint: http://storage.azurestack.local + regions: + local: + endpoint: http://azurestack.local +`[1:] + err := ioutil.WriteFile(destPath, []byte(data), 0600) + c.Assert(err, jc.ErrorIsNil) +} === modified file 'src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit.go' --- src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit.go 2016-03-22 15:18:22 +0000 @@ -7,10 +7,10 @@ import ( "strings" - "github.com/juju/juju/utils/ssh" "github.com/juju/utils/packaging/commands" "github.com/juju/utils/packaging/config" "github.com/juju/utils/shell" + "github.com/juju/utils/ssh" ) // cloudConfig represents a set of cloud-init configuration options. @@ -103,20 +103,43 @@ delete(cfg.attrs, name) } -// SetUser is defined on the UserConfig interface. -func (cfg *cloudConfig) SetUser(user string) { - cfg.SetAttr("user", user) -} - -// UnsetUser is defined on the UserConfig interface. -func (cfg *cloudConfig) UnsetUser() { - cfg.UnsetAttr("user") -} - -// User is defined on the UserConfig interface. -func (cfg *cloudConfig) User() string { - user, _ := cfg.attrs["user"].(string) - return user +func annotateKeys(rawKeys string) []string { + cfgKeys := []string{} + keys := ssh.SplitAuthorisedKeys(rawKeys) + for _, key := range keys { + // ensure our keys have "Juju:" prepended to differentiate + // Juju-managed keys and externally added ones + jujuKey := ssh.EnsureJujuComment(key) + cfgKeys = append(cfgKeys, jujuKey) + } + return cfgKeys +} + +// AddUser is defined on the UsersConfig interface. +func (cfg *cloudConfig) AddUser(user *User) { + users, _ := cfg.attrs["users"].([]map[string]interface{}) + newUser := map[string]interface{}{ + "name": user.Name, + "lock_passwd": true, + } + if user.Groups != nil { + newUser["groups"] = user.Groups + } + if user.Shell != "" { + newUser["shell"] = user.Shell + } + if user.SSHAuthorizedKeys != "" { + newUser["ssh-authorized-keys"] = annotateKeys(user.SSHAuthorizedKeys) + } + if user.Sudo != nil { + newUser["sudo"] = user.Sudo + } + cfg.SetAttr("users", append(users, newUser)) +} + +// UnsetUsers is defined on the UsersConfig interface. +func (cfg *cloudConfig) UnsetUsers() { + cfg.UnsetAttr("users") } // SetSystemUpdate is defined on the SystemUpdateConfig interface. @@ -292,29 +315,14 @@ return stdout, stderr } -// AddSSHKey is defined on the SSHKeyConfi interface. -func (cfg *cloudConfig) AddSSHKey(keyType SSHKeyType, key string) { - keys, _ := cfg.attrs["ssh_keys"].(map[SSHKeyType]string) - if keys == nil { - keys = make(map[SSHKeyType]string) - cfg.SetAttr("ssh_keys", keys) - } - - keys[keyType] = key -} - -// AddSSHAuthorizedKeys is defined on the SSHKeysConfig interface. -func (cfg *cloudConfig) AddSSHAuthorizedKeys(rawKeys string) { - cfgKeys, _ := cfg.attrs["ssh_authorized_keys"].([]string) - keys := ssh.SplitAuthorisedKeys(rawKeys) - for _, key := range keys { - // ensure our keys have "Juju:" prepended in order to differenciate - // Juju-managed keys and externally added ones - jujuKey := ssh.EnsureJujuComment(key) - - cfgKeys = append(cfgKeys, jujuKey) - } - cfg.SetAttr("ssh_authorized_keys", cfgKeys) +// SetSSHAuthorizedKeys is defined on the SSHAuthorizedKeysConfig interface. +func (cfg *cloudConfig) SetSSHAuthorizedKeys(rawKeys string) { + keys := annotateKeys(rawKeys) + if len(keys) != 0 { + cfg.SetAttr("ssh_authorized_keys", keys) + } else { + cfg.UnsetAttr("ssh_authorized_keys") + } } // SetDisableRoot is defined on the RootUserConfig interface. === modified file 'src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_centos.go' --- src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_centos.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_centos.go 2016-03-22 15:18:22 +0000 @@ -11,7 +11,7 @@ "github.com/juju/utils/packaging" "github.com/juju/utils/packaging/config" "github.com/juju/utils/proxy" - "gopkg.in/yaml.v1" + "gopkg.in/yaml.v2" ) // centOSCloudConfig is the cloudconfig type specific to CentOS machines. === modified file 'src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_test.go' --- src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_test.go 2016-03-22 15:18:22 +0000 @@ -10,11 +10,11 @@ jc "github.com/juju/testing/checkers" "github.com/juju/utils/packaging" + sshtesting "github.com/juju/utils/ssh/testing" gc "gopkg.in/check.v1" "github.com/juju/juju/cloudconfig/cloudinit" coretesting "github.com/juju/juju/testing" - sshtesting "github.com/juju/juju/utils/ssh/testing" ) // TODO integration tests, but how? @@ -34,12 +34,6 @@ expect map[string]interface{} setOption func(cfg cloudinit.CloudConfig) }{{ - "User", - map[string]interface{}{"user": "me"}, - func(cfg cloudinit.CloudConfig) { - cfg.SetUser("me") - }, -}, { "PackageUpgrade", map[string]interface{}{"package_upgrade": true}, func(cfg cloudinit.CloudConfig) { @@ -88,60 +82,99 @@ cfg.SetDisableRoot(false) }, }, { - "SSHAuthorizedKeys", + "SetSSHAuthorizedKeys with two keys", map[string]interface{}{"ssh_authorized_keys": []string{ fmt.Sprintf("%s Juju:user@host", sshtesting.ValidKeyOne.Key), fmt.Sprintf("%s Juju:another@host", sshtesting.ValidKeyTwo.Key), }}, func(cfg cloudinit.CloudConfig) { - cfg.AddSSHAuthorizedKeys(sshtesting.ValidKeyOne.Key + " Juju:user@host") - cfg.AddSSHAuthorizedKeys(sshtesting.ValidKeyTwo.Key + " another@host") + cfg.SetSSHAuthorizedKeys( + sshtesting.ValidKeyOne.Key + " Juju:user@host\n" + + sshtesting.ValidKeyTwo.Key + " another@host") }, }, { - "SSHAuthorizedKeys", + "SetSSHAuthorizedKeys with comments in keys", map[string]interface{}{"ssh_authorized_keys": []string{ fmt.Sprintf("%s Juju:sshkey", sshtesting.ValidKeyOne.Key), fmt.Sprintf("%s Juju:user@host", sshtesting.ValidKeyTwo.Key), fmt.Sprintf("%s Juju:another@host", sshtesting.ValidKeyThree.Key), }}, func(cfg cloudinit.CloudConfig) { - cfg.AddSSHAuthorizedKeys("#command\n" + sshtesting.ValidKeyOne.Key) - cfg.AddSSHAuthorizedKeys( - sshtesting.ValidKeyTwo.Key + " user@host\n# comment\n\n" + + cfg.SetSSHAuthorizedKeys( + "#command\n" + sshtesting.ValidKeyOne.Key + "\n" + + sshtesting.ValidKeyTwo.Key + " user@host\n" + + "# comment\n\n" + sshtesting.ValidKeyThree.Key + " another@host") - cfg.AddSSHAuthorizedKeys("") - }, -}, { - "SSHKeys RSAPrivate", - map[string]interface{}{"ssh_keys": map[string]interface{}{ - "rsa_private": "key1data", - }}, - func(cfg cloudinit.CloudConfig) { - cfg.AddSSHKey(cloudinit.RSAPrivate, "key1data") - }, -}, { - "SSHKeys RSAPublic", - map[string]interface{}{"ssh_keys": map[string]interface{}{ - "rsa_public": "key2data", - }}, - func(cfg cloudinit.CloudConfig) { - cfg.AddSSHKey(cloudinit.RSAPublic, "key2data") - }, -}, { - "SSHKeys DSAPublic", - map[string]interface{}{"ssh_keys": map[string]interface{}{ - "dsa_public": "key1data", - }}, - func(cfg cloudinit.CloudConfig) { - cfg.AddSSHKey(cloudinit.DSAPublic, "key1data") - }, -}, { - "SSHKeys DSAPrivate", - map[string]interface{}{"ssh_keys": map[string]interface{}{ - "dsa_private": "key2data", - }}, - func(cfg cloudinit.CloudConfig) { - cfg.AddSSHKey(cloudinit.DSAPrivate, "key2data") + }, +}, { + "SetSSHAuthorizedKeys unsets keys", + map[string]interface{}{}, + func(cfg cloudinit.CloudConfig) { + cfg.SetSSHAuthorizedKeys(sshtesting.ValidKeyOne.Key) + cfg.SetSSHAuthorizedKeys("") + }, +}, { + "AddUser with keys", + map[string]interface{}{"users": []interface{}{map[string]interface{}{ + "name": "auser", + "lock_passwd": true, + "ssh-authorized-keys": []string{ + fmt.Sprintf("%s Juju:user@host", sshtesting.ValidKeyOne.Key), + fmt.Sprintf("%s Juju:another@host", sshtesting.ValidKeyTwo.Key), + }, + }}}, + func(cfg cloudinit.CloudConfig) { + keys := (sshtesting.ValidKeyOne.Key + " Juju:user@host\n" + + sshtesting.ValidKeyTwo.Key + " another@host") + cfg.AddUser(&cloudinit.User{ + Name: "auser", + SSHAuthorizedKeys: keys, + }) + }, +}, { + "AddUser with groups", + map[string]interface{}{"users": []interface{}{map[string]interface{}{ + "name": "auser", + "lock_passwd": true, + "groups": []string{"agroup", "bgroup"}, + }}}, + func(cfg cloudinit.CloudConfig) { + cfg.AddUser(&cloudinit.User{ + Name: "auser", + Groups: []string{"agroup", "bgroup"}, + }) + }, +}, { + "AddUser with everything", + map[string]interface{}{"users": []interface{}{map[string]interface{}{ + "name": "auser", + "lock_passwd": true, + "groups": []string{"agroup", "bgroup"}, + "shell": "/bin/sh", + "ssh-authorized-keys": []string{ + sshtesting.ValidKeyOne.Key + " Juju:sshkey", + }, + "sudo": []string{"ALL=(ALL) ALL"}, + }}}, + func(cfg cloudinit.CloudConfig) { + cfg.AddUser(&cloudinit.User{ + Name: "auser", + Groups: []string{"agroup", "bgroup"}, + Shell: "/bin/sh", + SSHAuthorizedKeys: sshtesting.ValidKeyOne.Key + "\n", + Sudo: []string{"ALL=(ALL) ALL"}, + }) + }, +}, { + "AddUser with only name", + map[string]interface{}{"users": []interface{}{map[string]interface{}{ + "name": "auser", + "lock_passwd": true, + }}}, + func(cfg cloudinit.CloudConfig) { + cfg.AddUser(&cloudinit.User{ + Name: "auser", + }) }, }, { "Output", === modified file 'src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_ubuntu.go' --- src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_ubuntu.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_ubuntu.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,7 @@ "github.com/juju/utils/packaging" "github.com/juju/utils/packaging/config" "github.com/juju/utils/proxy" - "gopkg.in/yaml.v1" + "gopkg.in/yaml.v2" ) // ubuntuCloudConfig is the cloudconfig type specific to Ubuntu machines === modified file 'src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_win.go' --- src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_win.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cloudconfig/cloudinit/cloudinit_win.go 2016-03-22 15:18:22 +0000 @@ -69,7 +69,7 @@ // RenderScript is defined on the RenderConfig interface. func (cfg *windowsCloudConfig) RenderScript() (string, error) { // NOTE: This shouldn't really be called on windows as it's used only for - // initialization via ssh or on local providers. + // initialization via ssh. script, err := cfg.renderWindows() if err != nil { return "", err === added directory 'src/github.com/juju/juju/cloudconfig/cloudinit/cloudinittest' === added file 'src/github.com/juju/juju/cloudconfig/cloudinit/cloudinittest/fake.go' --- src/github.com/juju/juju/cloudconfig/cloudinit/cloudinittest/fake.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/cloudconfig/cloudinit/cloudinittest/fake.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,28 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cloudinittest + +import ( + "github.com/juju/testing" + + "github.com/juju/juju/cloudconfig/cloudinit" +) + +type CloudConfig struct { + cloudinit.CloudConfig + testing.Stub + + YAML []byte + Script string +} + +func (c *CloudConfig) RenderYAML() ([]byte, error) { + c.MethodCall(c, "RenderYAML") + return c.YAML, c.NextErr() +} + +func (c *CloudConfig) RenderScript() (string, error) { + c.MethodCall(c, "RenderScript") + return c.Script, c.NextErr() +} === modified file 'src/github.com/juju/juju/cloudconfig/cloudinit/helpers_test.go' --- src/github.com/juju/juju/cloudconfig/cloudinit/helpers_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cloudconfig/cloudinit/helpers_test.go 2016-03-22 15:18:22 +0000 @@ -1,9 +1,8 @@ package cloudinit import ( + "github.com/juju/utils/proxy" gc "gopkg.in/check.v1" - - "github.com/juju/utils/proxy" ) type HelperSuite struct{} === modified file 'src/github.com/juju/juju/cloudconfig/cloudinit/interface.go' --- src/github.com/juju/juju/cloudconfig/cloudinit/interface.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cloudconfig/cloudinit/interface.go 2016-03-22 15:18:22 +0000 @@ -9,13 +9,13 @@ import ( "github.com/juju/errors" + "github.com/juju/utils/os" "github.com/juju/utils/packaging" "github.com/juju/utils/packaging/commands" "github.com/juju/utils/packaging/config" "github.com/juju/utils/proxy" + "github.com/juju/utils/series" "github.com/juju/utils/shell" - - "github.com/juju/juju/version" ) // CloudConfig is the interface of all cloud-init cloudconfig options. @@ -34,7 +34,7 @@ // CloudConfig also contains all the smaller interfaces for config // management: - UserConfig + UsersConfig SystemUpdateConfig SystemUpgradeConfig PackageProxyConfig @@ -48,28 +48,13 @@ LocaleConfig DeviceMountConfig OutputConfig - SSHKeysConfig + SSHAuthorizedKeysConfig RootUserConfig WrittenFilesConfig RenderConfig AdvancedPackagingConfig } -// UserConfig is the interface for managing all user-related settings. -type UserConfig interface { - // SetUser sets the username to be written in the config. - // NOTE: the user must exist beforehand, as no steps are taken to create it. - // NOTE: if not set, cloud-init defaults to using "ubuntu" - SetUser(string) - - // UnsetUser unsets the "user" cloudinit config attribute set with SetUser. - // If the attribute has not been previously set, no error occurs. - UnsetUser() - - // User returns the value set with SetUser or an empty string. - User() string -} - // SystemUpdateConfig is the interface for managing all system update options. type SystemUpdateConfig interface { // SetSystemUpdate sets whether the system should refresh the local package @@ -278,17 +263,11 @@ Output(OutputKind) (string, string) } -// SSHKeysConfig is the interface for all ssh key-related settings. -type SSHKeysConfig interface { - // AddSSHKey adds a pre-generated ssh key to the server keyring. - // Valid SSHKeyType options are: rsa_{public,private}, dsa_{public,private} - // Added keys will be written to /etc/ssh. - // As a result, new random keys are prevented from being generated. - AddSSHKey(SSHKeyType, string) - - // AddSSHAuthorizedKeys adds a set of keys in ssh authorized_keys format - // that will be added to ~/.ssh/authorized_keys for the configured user (see SetUser). - AddSSHAuthorizedKeys(string) +// SSHAuthorizedKeysConfig is the interface for adding ssh authorized keys. +type SSHAuthorizedKeysConfig interface { + // SetSSHAuthorizedKeys puts a set of authorized keys for the default + // user in the ~/.ssh/authorized_keys file. + SetSSHAuthorizedKeys(string) } // RootUserConfig is the interface for all root user-related settings. @@ -331,8 +310,7 @@ RenderYAML() ([]byte, error) // Renders a script that will execute the cloud config - // It is eiher used over ssh for bootstrapping and manual or locally by - // the local provider + // It is used over ssh for bootstrapping with the manual provider. RenderScript() (string, error) // ShellRenderer renturns the shell renderer of this particular instance. @@ -377,46 +355,73 @@ AddCloudArchiveCloudTools() } +type User struct { + // Login name for the user. + Name string + + // Additional groups to add the user to. + Groups []string + + // Path to shell to use by default. + Shell string + + // SSH keys to add to the authorized keys file. + SSHAuthorizedKeys string + + // Sudo directives to add. + Sudo []string +} + +// UsersConfig is the interface for managing user additions +type UsersConfig interface { + // AddUser sets a new user to be created with the given configuration. + AddUser(*User) + + // UnsetUsers unsets any users set in the config, meaning the default + // user specified in the image cloud config will be used. + UnsetUsers() +} + // New returns a new Config with no options set. -func New(series string) (CloudConfig, error) { - os, err := version.GetOSFromSeries(series) +func New(ser string) (CloudConfig, error) { + seriesos, err := series.GetOSFromSeries(ser) if err != nil { return nil, err } - switch os { - case version.Windows: + switch seriesos { + case os.Windows: renderer, _ := shell.NewRenderer("powershell") return &windowsCloudConfig{ &cloudConfig{ - series: series, + series: ser, renderer: renderer, attrs: make(map[string]interface{}), }, }, nil - case version.Ubuntu: + case os.Ubuntu: renderer, _ := shell.NewRenderer("bash") return &ubuntuCloudConfig{ &cloudConfig{ - series: series, + series: ser, paccmder: commands.NewAptPackageCommander(), - pacconfer: config.NewAptPackagingConfigurer(series), + pacconfer: config.NewAptPackagingConfigurer(ser), renderer: renderer, attrs: make(map[string]interface{}), }, }, nil - case version.CentOS: + case os.CentOS: renderer, _ := shell.NewRenderer("bash") return ¢OSCloudConfig{ &cloudConfig{ - series: series, + series: ser, paccmder: commands.NewYumPackageCommander(), - pacconfer: config.NewYumPackagingConfigurer(series), + pacconfer: config.NewYumPackagingConfigurer(ser), renderer: renderer, attrs: make(map[string]interface{}), }, }, nil default: - return nil, errors.NotFoundf("cloudconfig for series %q", series) + return nil, errors.NotFoundf("cloudconfig for series %q", ser) } } === modified file 'src/github.com/juju/juju/cloudconfig/cloudinit/renderscript_test.go' --- src/github.com/juju/juju/cloudconfig/cloudinit/renderscript_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/cloudconfig/cloudinit/renderscript_test.go 2016-03-22 15:18:22 +0000 @@ -41,7 +41,7 @@ environs.RegisterProvider("sshinit_test", &testProvider{}) } -func testConfig(c *gc.C, stateServer bool, vers version.Binary) *config.Config { +func testConfig(c *gc.C, controller bool, vers version.Binary) *config.Config { testConfig, err := config.New(config.UseDefaults, coretesting.FakeConfig()) c.Assert(err, jc.ErrorIsNil) testConfig, err = testConfig.Apply(map[string]interface{}{ @@ -53,14 +53,17 @@ return testConfig } -func (s *configureSuite) getCloudConfig(c *gc.C, stateServer bool, vers version.Binary) cloudinit.CloudConfig { +func (s *configureSuite) getCloudConfig(c *gc.C, controller bool, vers version.Binary) cloudinit.CloudConfig { var icfg *instancecfg.InstanceConfig var err error - if stateServer { - icfg, err = instancecfg.NewBootstrapInstanceConfig(constraints.Value{}, vers.Series, "") + if controller { + icfg, err = instancecfg.NewBootstrapInstanceConfig( + constraints.Value{}, constraints.Value{}, + vers.Series, "", + ) c.Assert(err, jc.ErrorIsNil) icfg.InstanceId = "instance-id" - icfg.Jobs = []multiwatcher.MachineJob{multiwatcher.JobManageEnviron, multiwatcher.JobHostUnits} + icfg.Jobs = []multiwatcher.MachineJob{multiwatcher.JobManageModel, multiwatcher.JobHostUnits} } else { icfg, err = instancecfg.NewInstanceConfig("0", "ya", imagemetadata.ReleasedStream, vers.Series, "", true, nil, nil, nil) c.Assert(err, jc.ErrorIsNil) @@ -70,7 +73,7 @@ Version: vers, URL: "http://testing.invalid/tools.tar.gz", } - environConfig := testConfig(c, stateServer, vers) + environConfig := testConfig(c, controller, vers) err = instancecfg.FinishInstanceConfig(icfg, environConfig) c.Assert(err, jc.ErrorIsNil) cloudcfg, err := cloudinit.New(icfg.Series) === modified file 'src/github.com/juju/juju/cloudconfig/containerinit/container_userdata.go' --- src/github.com/juju/juju/cloudconfig/containerinit/container_userdata.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cloudconfig/containerinit/container_userdata.go 2016-03-22 15:18:22 +0000 @@ -38,7 +38,7 @@ networkConfig *container.NetworkConfig, directory string, ) (string, error) { - userData, err := cloudInitUserData(instanceConfig, networkConfig) + userData, err := CloudInitUserData(instanceConfig, networkConfig) if err != nil { logger.Errorf("failed to create user data: %v", err) return "", err @@ -125,7 +125,7 @@ return cloudConfig, nil } -func cloudInitUserData( +func CloudInitUserData( instanceConfig *instancecfg.InstanceConfig, networkConfig *container.NetworkConfig, ) ([]byte, error) { @@ -153,7 +153,7 @@ return data, nil } -// templateUserData returns a minimal user data necessary for the template. +// TemplateUserData returns a minimal user data necessary for the template. // This should have the authorized keys, base packages, the cloud archive if // necessary, initial apt proxy config, and it should do the apt-get // update/upgrade initially. @@ -179,6 +179,7 @@ return nil, errors.Trace(err) } } + cloudconfig.SetUbuntuUser(config, authorizedKeys) config.AddScripts( "set -xe", // ensure we run all the scripts or abort. ) @@ -190,7 +191,6 @@ enablePackageUpdates = true } - config.AddSSHAuthorizedKeys(authorizedKeys) if enablePackageUpdates && config.RequiresCloudArchiveCloudTools() { config.AddCloudArchiveCloudTools() } @@ -217,7 +217,7 @@ // /etc/network/interfaces file which is left on the template LXC // container on shutdown. This is needed to allow cloned containers to // start in case no network config is provided during cloud-init, e.g. -// when AUFS is used or with the local provider (see bug #1431888). +// when AUFS is used. const defaultEtcNetworkInterfaces = ` # loopback interface auto lo === modified file 'src/github.com/juju/juju/cloudconfig/containerinit/container_userdata_test.go' --- src/github.com/juju/juju/cloudconfig/containerinit/container_userdata_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cloudconfig/containerinit/container_userdata_test.go 2016-03-22 15:18:22 +0000 @@ -11,7 +11,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/yaml.v1" + "gopkg.in/yaml.v2" "github.com/juju/juju/cloudconfig/cloudinit" "github.com/juju/juju/cloudconfig/containerinit" === modified file 'src/github.com/juju/juju/cloudconfig/containerinit/export_test.go' --- src/github.com/juju/juju/cloudconfig/containerinit/export_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cloudconfig/containerinit/export_test.go 2016-03-22 15:18:22 +0000 @@ -5,7 +5,6 @@ package containerinit var ( - CloudInitUserData = cloudInitUserData NetworkInterfacesFile = &networkInterfacesFile NewCloudInitConfigWithNetworks = newCloudInitConfigWithNetworks ShutdownInitCommands = shutdownInitCommands === modified file 'src/github.com/juju/juju/cloudconfig/instancecfg/instancecfg.go' --- src/github.com/juju/juju/cloudconfig/instancecfg/instancecfg.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/cloudconfig/instancecfg/instancecfg.go 2016-03-22 15:18:22 +0000 @@ -15,6 +15,7 @@ "github.com/juju/names" "github.com/juju/utils" "github.com/juju/utils/proxy" + "github.com/juju/utils/series" "github.com/juju/utils/shell" "github.com/juju/juju/agent" @@ -50,22 +51,22 @@ // StateServingInfo holds the information for serving the state. // This must only be set if the Bootstrap field is true - // (state servers started subsequently will acquire their serving info + // (controllers started subsequently will acquire their serving info // from another server) StateServingInfo *params.StateServingInfo // MongoInfo holds the means for the new instance to communicate with the - // juju state database. Unless the new instance is running a state server - // (StateServer is set), there must be at least one state server address supplied. + // juju state database. Unless the new instance is running a controller + // (Controller is set), there must be at least one controller address supplied. // The entity name must match that of the instance being started, - // or be empty when starting a state server. + // or be empty when starting a controller. MongoInfo *mongo.MongoInfo // APIInfo holds the means for the new instance to communicate with the - // juju state API. Unless the new instance is running a state server (StateServer is - // set), there must be at least one state server address supplied. + // juju state API. Unless the new instance is running a controller (Controller is + // set), there must be at least one controller address supplied. // The entity name must match that of the instance being started, - // or be empty when starting a state server. + // or be empty when starting a controller. APIInfo *api.Info // InstanceId is the instance ID of the instance being initialised. @@ -91,6 +92,10 @@ // LogDir holds the directory that juju logs will be written to. LogDir string + // MetricsSpoolDir represents the spool directory path, where all + // metrics are stored. + MetricsSpoolDir string + // Jobs holds what machine jobs to run. Jobs []multiwatcher.MachineJob @@ -128,14 +133,17 @@ AgentEnvironment map[string]string // WARNING: this is only set if the instance being configured is - // a state server node. + // a controller node. // // Config holds the initial environment configuration. Config *config.Config - // Constraints holds the initial environment constraints. + // Constraints holds the machine constraints. Constraints constraints.Value + // EnvironConstraints holds the initial environment constraints. + EnvironConstraints constraints.Value + // DisableSSLHostnameVerification can be set to true to tell cloud-init // that it shouldn't verify SSL certificates DisableSSLHostnameVerification bool @@ -212,7 +220,7 @@ toolsVersion version.Number, ) (agent.ConfigSetter, error) { // TODO for HAState: the stateHostAddrs and apiHostAddrs here assume that - // if the instance is a stateServer then to use localhost. This may be + // if the instance is a controller then to use localhost. This may be // sufficient, but needs thought in the new world order. var password string if cfg.MongoInfo == nil { @@ -221,8 +229,11 @@ password = cfg.MongoInfo.Password } configParams := agent.AgentConfigParams{ - DataDir: cfg.DataDir, - LogDir: cfg.LogDir, + Paths: agent.Paths{ + DataDir: cfg.DataDir, + LogDir: cfg.LogDir, + MetricsSpoolDir: cfg.MetricsSpoolDir, + }, Jobs: cfg.Jobs, Tag: tag, UpgradedToVersion: toolsVersion, @@ -233,7 +244,7 @@ CACert: cfg.MongoInfo.CACert, Values: cfg.AgentEnvironment, PreferIPv6: cfg.PreferIPv6, - Environment: cfg.APIInfo.EnvironTag, + Model: cfg.APIInfo.ModelTag, } if !cfg.Bootstrap { return agent.NewAgentConfig(configParams) @@ -297,6 +308,9 @@ if cfg.LogDir == "" { return errors.New("missing log directory") } + if cfg.MetricsSpoolDir == "" { + return errors.New("missing metrics spool directory") + } if len(cfg.Jobs) == 0 { return errors.New("missing machine jobs") } @@ -318,8 +332,8 @@ if cfg.APIInfo == nil { return errors.New("missing API info") } - if cfg.APIInfo.EnvironTag.Id() == "" { - return errors.New("missing environment tag") + if cfg.APIInfo.ModelTag.Id() == "" { + return errors.New("missing model tag") } if len(cfg.APIInfo.CACert) == 0 { return errors.New("missing API CA certificate") @@ -329,22 +343,22 @@ } if cfg.Bootstrap { if cfg.Config == nil { - return errors.New("missing environment configuration") + return errors.New("missing model configuration") } if cfg.MongoInfo.Tag != nil { - return errors.New("entity tag must be nil when starting a state server") + return errors.New("entity tag must be nil when starting a controller") } if cfg.APIInfo.Tag != nil { - return errors.New("entity tag must be nil when starting a state server") + return errors.New("entity tag must be nil when starting a controller") } if cfg.StateServingInfo == nil { return errors.New("missing state serving info") } if len(cfg.StateServingInfo.Cert) == 0 { - return errors.New("missing state server certificate") + return errors.New("missing controller certificate") } if len(cfg.StateServingInfo.PrivateKey) == 0 { - return errors.New("missing state server private key") + return errors.New("missing controller private key") } if len(cfg.StateServingInfo.CAPrivateKey) == 0 { return errors.New("missing ca cert private key") @@ -383,11 +397,15 @@ // logDir returns a filesystem path to the location where applications // may create a folder containing logs -var logDir = paths.MustSucceed(paths.LogDir(version.Current.Series)) +var logDir = paths.MustSucceed(paths.LogDir(series.HostSeries())) + +// DefaultBridgePrefix is the prefix for all network bridge device +// name used for LXC and KVM containers. +const DefaultBridgePrefix = "br-" // DefaultBridgeName is the network bridge device name used for LXC and KVM // containers -const DefaultBridgeName = "juju-br0" +const DefaultBridgeName = DefaultBridgePrefix + "eth0" // NewInstanceConfig sets up a basic machine configuration, for a // non-bootstrap node. You'll still need to supply more information, @@ -412,11 +430,16 @@ if err != nil { return nil, err } + metricsSpoolDir, err := paths.MetricsSpoolDir(series) + if err != nil { + return nil, err + } cloudInitOutputLog := path.Join(logDir, "cloud-init-output.log") icfg := &InstanceConfig{ // Fixed entries. DataDir: dataDir, LogDir: path.Join(logDir, "juju"), + MetricsSpoolDir: metricsSpoolDir, Jobs: []multiwatcher.MachineJob{multiwatcher.JobHostUnits}, CloudInitOutputLog: cloudInitOutputLog, MachineAgentServiceName: "jujud-" + names.NewMachineTag(machineID).String(), @@ -441,7 +464,7 @@ // NewBootstrapInstanceConfig sets up a basic machine configuration for a // bootstrap node. You'll still need to supply more information, but this // takes care of the fixed entries and the ones that are always needed. -func NewBootstrapInstanceConfig(cons constraints.Value, series, publicImageSigningKey string) (*InstanceConfig, error) { +func NewBootstrapInstanceConfig(cons, environCons constraints.Value, series, publicImageSigningKey string) (*InstanceConfig, error) { // For a bootstrap instance, FinishInstanceConfig will provide the // state.Info and the api.Info. The machine id must *always* be "0". icfg, err := NewInstanceConfig("0", agent.BootstrapNonce, "", series, publicImageSigningKey, true, nil, nil, nil) @@ -450,10 +473,11 @@ } icfg.Bootstrap = true icfg.Jobs = []multiwatcher.MachineJob{ - multiwatcher.JobManageEnviron, + multiwatcher.JobManageModel, multiwatcher.JobHostUnits, } icfg.Constraints = cons + icfg.EnvironConstraints = environCons return icfg, nil } @@ -473,7 +497,7 @@ enableOSUpgrade bool, ) error { if authorizedKeys == "" { - return fmt.Errorf("environment configuration has no authorized-keys") + return fmt.Errorf("model configuration has no authorized-keys") } icfg.AuthorizedKeys = authorizedKeys if icfg.AgentEnvironment == nil { @@ -521,13 +545,13 @@ if isStateInstanceConfig(icfg) { // Add NUMACTL preference. Needed to work for both bootstrap and high availability - // Only makes sense for state server + // Only makes sense for controller logger.Debugf("Setting numa ctl preference to %v", cfg.NumaCtlPreference()) // Unfortunately, AgentEnvironment can only take strings as values icfg.AgentEnvironment[agent.NumaCtlPreference] = fmt.Sprintf("%v", cfg.NumaCtlPreference()) } // The following settings are only appropriate at bootstrap time. At the - // moment, the only state server is the bootstrap node, but this + // moment, the only controller is the bootstrap node, but this // will probably change. if !icfg.Bootstrap { return nil @@ -537,35 +561,35 @@ } caCert, hasCACert := cfg.CACert() if !hasCACert { - return errors.New("environment configuration has no ca-cert") + return errors.New("model configuration has no ca-cert") } password := cfg.AdminSecret() if password == "" { - return errors.New("environment configuration has no admin-secret") + return errors.New("model configuration has no admin-secret") } passwordHash := utils.UserPasswordHash(password, utils.CompatSalt) - envUUID, uuidSet := cfg.UUID() + modelUUID, uuidSet := cfg.UUID() if !uuidSet { - return errors.New("config missing environment uuid") + return errors.New("config missing model uuid") } icfg.APIInfo = &api.Info{ - Password: passwordHash, - CACert: caCert, - EnvironTag: names.NewEnvironTag(envUUID), + Password: passwordHash, + CACert: caCert, + ModelTag: names.NewModelTag(modelUUID), } icfg.MongoInfo = &mongo.MongoInfo{Password: passwordHash, Info: mongo.Info{CACert: caCert}} - // These really are directly relevant to running a state server. - // Initially, generate a state server certificate with no host IP - // addresses in the SAN field. Once the state server is up and the + // These really are directly relevant to running a controller. + // Initially, generate a controller certificate with no host IP + // addresses in the SAN field. Once the controller is up and the // NIC addresses become known, the certificate can be regenerated. - cert, key, err := cfg.GenerateStateServerCertAndKey(nil) + cert, key, err := cfg.GenerateControllerCertAndKey(nil) if err != nil { - return errors.Annotate(err, "cannot generate state server certificate") + return errors.Annotate(err, "cannot generate controller certificate") } caPrivateKey, hasCAPrivateKey := cfg.CAPrivateKey() if !hasCAPrivateKey { - return errors.New("environment configuration has no ca-private-key") + return errors.New("model configuration has no ca-private-key") } srvInfo := params.StateServingInfo{ StatePort: cfg.StatePort(), @@ -586,9 +610,9 @@ // machine instance, if the provider supports them. func InstanceTags(cfg *config.Config, jobs []multiwatcher.MachineJob) map[string]string { uuid, _ := cfg.UUID() - instanceTags := tags.ResourceTags(names.NewEnvironTag(uuid), cfg) + instanceTags := tags.ResourceTags(names.NewModelTag(uuid), cfg) if multiwatcher.AnyJobNeedsState(jobs...) { - instanceTags[tags.JujuStateServer] = "true" + instanceTags[tags.JujuController] = "true" } return instanceTags } @@ -608,17 +632,17 @@ return nil, err } if _, ok := cfg.AgentVersion(); !ok { - return nil, fmt.Errorf("environment configuration has no agent-version") + return nil, fmt.Errorf("model configuration has no agent-version") } return cfg, nil } // isStateInstanceConfig determines if given machine configuration -// is for State Server by iterating over machine's jobs. -// If JobManageEnviron is present, this is a state server. +// is for controller by iterating over machine's jobs. +// If JobManageModel is present, this is a controller. func isStateInstanceConfig(icfg *InstanceConfig) bool { for _, aJob := range icfg.Jobs { - if aJob == multiwatcher.JobManageEnviron { + if aJob == multiwatcher.JobManageModel { return true } } === modified file 'src/github.com/juju/juju/cloudconfig/instancecfg/instancecfg_test.go' --- src/github.com/juju/juju/cloudconfig/instancecfg/instancecfg_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cloudconfig/instancecfg/instancecfg_test.go 2016-03-22 15:18:22 +0000 @@ -19,16 +19,16 @@ var _ = gc.Suite(&instancecfgSuite{}) -func (*instancecfgSuite) TestInstanceTagsStateServer(c *gc.C) { - cfg := testing.CustomEnvironConfig(c, testing.Attrs{}) - stateServerJobs := []multiwatcher.MachineJob{multiwatcher.JobManageEnviron} - nonStateServerJobs := []multiwatcher.MachineJob{multiwatcher.JobHostUnits} - testInstanceTags(c, cfg, stateServerJobs, map[string]string{ - "juju-env-uuid": testing.EnvironmentTag.Id(), - "juju-is-state": "true", +func (*instancecfgSuite) TestInstanceTagsController(c *gc.C) { + cfg := testing.CustomModelConfig(c, testing.Attrs{}) + controllerJobs := []multiwatcher.MachineJob{multiwatcher.JobManageModel} + nonControllerJobs := []multiwatcher.MachineJob{multiwatcher.JobHostUnits} + testInstanceTags(c, cfg, controllerJobs, map[string]string{ + "juju-model-uuid": testing.ModelTag.Id(), + "juju-is-controller": "true", }) - testInstanceTags(c, cfg, nonStateServerJobs, map[string]string{ - "juju-env-uuid": testing.EnvironmentTag.Id(), + testInstanceTags(c, cfg, nonControllerJobs, map[string]string{ + "juju-model-uuid": testing.ModelTag.Id(), }) } @@ -40,18 +40,18 @@ testInstanceTags(c, cfgWithoutUUID, []multiwatcher.MachineJob(nil), - map[string]string{"juju-env-uuid": ""}, + map[string]string{"juju-model-uuid": ""}, ) } func (*instancecfgSuite) TestInstanceTagsUserSpecified(c *gc.C) { - cfg := testing.CustomEnvironConfig(c, testing.Attrs{ + cfg := testing.CustomModelConfig(c, testing.Attrs{ "resource-tags": "a=b c=", }) testInstanceTags(c, cfg, nil, map[string]string{ - "juju-env-uuid": testing.EnvironmentTag.Id(), - "a": "b", - "c": "", + "juju-model-uuid": testing.ModelTag.Id(), + "a": "b", + "c": "", }) } === modified file 'src/github.com/juju/juju/cloudconfig/powershell_helpers.go' --- src/github.com/juju/juju/cloudconfig/powershell_helpers.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/cloudconfig/powershell_helpers.go 2016-03-22 15:18:22 +0000 @@ -26,12 +26,11 @@ $ErrorActionPreference = "Stop" -function ExecRetry($command, $maxRetryCount = 10, $retryInterval=2) +function ExecRetry($command, $retryInterval = 15) { $currErrorActionPreference = $ErrorActionPreference $ErrorActionPreference = "Continue" - $retryCount = 0 while ($true) { try @@ -41,17 +40,8 @@ } catch [System.Exception] { - $retryCount++ - if ($retryCount -ge $maxRetryCount) - { - $ErrorActionPreference = $currErrorActionPreference - throw - } - else - { - Write-Error $_.Exception - Start-Sleep $retryInterval - } + Write-Error $_.Exception + Start-Sleep $retryInterval } } @@ -844,7 +834,7 @@ New-ItemProperty $path -Name "jujud" -Value 0 -PropertyType "DWord" $secpasswd = ConvertTo-SecureString $juju_passwd -AsPlainText -Force -$jujuCreds = New-Object System.Management.Automation.PSCredential (".\jujud", $secpasswd) +$jujuCreds = New-Object System.Management.Automation.PSCredential ($juju_user, $secpasswd) ` === modified file 'src/github.com/juju/juju/cloudconfig/providerinit/providerinit.go' --- src/github.com/juju/juju/cloudconfig/providerinit/providerinit.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cloudconfig/providerinit/providerinit.go 2016-03-22 15:18:22 +0000 @@ -9,12 +9,12 @@ import ( "github.com/juju/errors" "github.com/juju/loggo" + "github.com/juju/utils/series" "github.com/juju/juju/cloudconfig" "github.com/juju/juju/cloudconfig/cloudinit" "github.com/juju/juju/cloudconfig/instancecfg" "github.com/juju/juju/cloudconfig/providerinit/renderers" - "github.com/juju/juju/version" ) var logger = loggo.GetLogger("juju.cloudconfig.providerinit") @@ -59,17 +59,11 @@ if err != nil { return nil, errors.Trace(err) } - operatingSystem, err := version.GetOSFromSeries(icfg.Series) - if err != nil { - return nil, errors.Trace(err) - } - // This might get replaced by a renderer.RenderUserdata which will either - // render it as YAML or Bash since some CentOS images might ship without cloudnit - udata, err := cloudcfg.RenderYAML() - if err != nil { - return nil, errors.Trace(err) - } - udata, err = renderer.EncodeUserdata(udata, operatingSystem) + operatingSystem, err := series.GetOSFromSeries(icfg.Series) + if err != nil { + return nil, errors.Trace(err) + } + udata, err := renderer.Render(cloudcfg, operatingSystem) if err != nil { return nil, errors.Trace(err) } === modified file 'src/github.com/juju/juju/cloudconfig/providerinit/providerinit_test.go' --- src/github.com/juju/juju/cloudconfig/providerinit/providerinit_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cloudconfig/providerinit/providerinit_test.go 2016-03-22 15:18:22 +0000 @@ -14,7 +14,7 @@ jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" - goyaml "gopkg.in/yaml.v1" + goyaml "gopkg.in/yaml.v2" "github.com/juju/juju/agent" "github.com/juju/juju/api" @@ -37,12 +37,12 @@ ) // dummySampleConfig returns the dummy sample config without -// the state server configured. +// the controller configured. // This function also exists in environs/config_test // Maybe place it in dummy and export it? func dummySampleConfig() testing.Attrs { return dummy.SampleConfig().Merge(testing.Attrs{ - "state-server": false, + "controller": false, }) } @@ -140,7 +140,7 @@ "authorized-keys": "we-are-the-keys", "admin-secret": "lisboan-pork", "agent-version": "1.2.3", - "state-server": false, + "controller": false, }) cfg, err := config.New(config.NoDefaults, attrs) c.Assert(err, jc.ErrorIsNil) @@ -155,7 +155,7 @@ password := utils.UserPasswordHash("lisboan-pork", utils.CompatSalt) c.Check(icfg.APIInfo, gc.DeepEquals, &api.Info{ Password: password, CACert: testing.CACert, - EnvironTag: testing.EnvironmentTag, + ModelTag: testing.ModelTag, }) c.Check(icfg.MongoInfo, gc.DeepEquals, &mongo.MongoInfo{ Password: password, Info: mongo.Info{CACert: testing.CACert}, @@ -181,29 +181,33 @@ } func (s *CloudInitSuite) TestUserData(c *gc.C) { - s.testUserData(c, false) -} - -func (s *CloudInitSuite) TestStateServerUserData(c *gc.C) { - s.testUserData(c, true) -} - -func (*CloudInitSuite) testUserData(c *gc.C, bootstrap bool) { - testJujuHome := c.MkDir() - defer osenv.SetJujuHome(osenv.SetJujuHome(testJujuHome)) + s.testUserData(c, "quantal", false) +} + +func (s *CloudInitSuite) TestControllerUserData(c *gc.C) { + s.testUserData(c, "quantal", true) +} + +func (s *CloudInitSuite) TestControllerUserDataPrecise(c *gc.C) { + s.testUserData(c, "precise", true) +} + +func (*CloudInitSuite) testUserData(c *gc.C, series string, bootstrap bool) { + testJujuXDGDataHome := c.MkDir() + defer osenv.SetJujuXDGDataHome(osenv.SetJujuXDGDataHome(testJujuXDGDataHome)) // Use actual series paths instead of local defaults - logDir := must(paths.LogDir("quantal")) - dataDir := must(paths.DataDir("quantal")) + logDir := must(paths.LogDir(series)) + metricsSpoolDir := must(paths.MetricsSpoolDir(series)) + dataDir := must(paths.DataDir(series)) tools := &tools.Tools{ - URL: "http://foo.com/tools/released/juju1.2.3-quantal-amd64.tgz", - Version: version.MustParseBinary("1.2.3-quantal-amd64"), + URL: "http://tools.testing/tools/released/juju.tgz", + Version: version.Binary{version.MustParse("1.2.3"), "quantal", "amd64"}, } envConfig, err := config.New(config.NoDefaults, dummySampleConfig()) c.Assert(err, jc.ErrorIsNil) - series := "quantal" allJobs := []multiwatcher.MachineJob{ - multiwatcher.JobManageEnviron, + multiwatcher.JobManageModel, multiwatcher.JobHostUnits, multiwatcher.JobManageNetworking, } @@ -221,14 +225,15 @@ Tag: names.NewMachineTag("10"), }, APIInfo: &api.Info{ - Addrs: []string{"127.0.0.1:1234"}, - Password: "pw2", - CACert: "CA CERT\n" + testing.CACert, - Tag: names.NewMachineTag("10"), - EnvironTag: testing.EnvironmentTag, + Addrs: []string{"127.0.0.1:1234"}, + Password: "pw2", + CACert: "CA CERT\n" + testing.CACert, + Tag: names.NewMachineTag("10"), + ModelTag: testing.ModelTag, }, DataDir: dataDir, LogDir: path.Join(logDir, "juju"), + MetricsSpoolDir: metricsSpoolDir, Jobs: allJobs, CloudInitOutputLog: path.Join(logDir, "cloud-init-output.log"), Config: envConfig, @@ -277,7 +282,7 @@ // for MAAS. MAAS needs to configure and then bounce the // network interfaces, which would sever the SSH connection // in the synchronous bootstrap phase. - c.Check(config, jc.DeepEquals, map[interface{}]interface{}{ + expected := map[interface{}]interface{}{ "output": map[interface{}]interface{}{ "all": "| tee -a /var/log/cloud-init-output.log", }, @@ -289,8 +294,29 @@ "install -D -m 644 /dev/null '/var/lib/juju/nonce.txt'", "printf '%s\\n' '5432' > '/var/lib/juju/nonce.txt'", }, - "ssh_authorized_keys": []interface{}{"wheredidileavemykeys"}, - }) + } + // Series with old cloudinit versions don't support adding + // users so need the old way to set SSH authorized keys. + if series == "precise" { + expected["ssh_authorized_keys"] = []interface{}{ + "wheredidileavemykeys", + } + } else { + expected["users"] = []interface{}{ + map[interface{}]interface{}{ + "name": "ubuntu", + "lock_passwd": true, + "groups": []interface{}{"adm", "audio", + "cdrom", "dialout", "dip", + "floppy", "netdev", "plugdev", + "sudo", "video"}, + "shell": "/bin/bash", + "sudo": []interface{}{"ALL=(ALL) NOPASSWD:ALL"}, + "ssh-authorized-keys": []interface{}{"wheredidileavemykeys"}, + }, + } + } + c.Check(config, jc.DeepEquals, expected) } else { // Just check that the cloudinit config looks good, // and that there are more runcmds than the additional @@ -302,6 +328,7 @@ func (s *CloudInitSuite) TestWindowsUserdataEncoding(c *gc.C) { series := "win8" + metricsSpoolDir := must(paths.MetricsSpoolDir("win8")) tools := &tools.Tools{ URL: "http://foo.com/tools/released/juju1.2.3-win8-amd64.tgz", Version: version.MustParseBinary("1.2.3-win8-amd64"), @@ -330,15 +357,16 @@ }, }, APIInfo: &api.Info{ - Addrs: []string{"state-addr.testing.invalid:54321"}, - Password: "bletch", - CACert: "CA CERT\n" + testing.CACert, - Tag: names.NewMachineTag("10"), - EnvironTag: testing.EnvironmentTag, + Addrs: []string{"state-addr.testing.invalid:54321"}, + Password: "bletch", + CACert: "CA CERT\n" + testing.CACert, + Tag: names.NewMachineTag("10"), + ModelTag: testing.ModelTag, }, MachineAgentServiceName: "jujud-machine-10", DataDir: dataDir, LogDir: path.Join(logDir, "juju"), + MetricsSpoolDir: metricsSpoolDir, CloudInitOutputLog: path.Join(logDir, "cloud-init-output.log"), } === modified file 'src/github.com/juju/juju/cloudconfig/providerinit/renderers/common.go' --- src/github.com/juju/juju/cloudconfig/providerinit/renderers/common.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cloudconfig/providerinit/renderers/common.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,7 @@ "fmt" "github.com/juju/juju/cloudconfig" + "github.com/juju/juju/cloudconfig/cloudinit" "github.com/juju/utils" ) @@ -32,3 +33,33 @@ string(udata) + ``) } + +// Decorator is a function that can be used as part of a rendering pipeline. +type Decorator func([]byte) []byte + +// RenderYAML renders the given cloud-config as YAML, and then passes the +// YAML through the given decorators. +func RenderYAML(cfg cloudinit.RenderConfig, ds ...Decorator) ([]byte, error) { + out, err := cfg.RenderYAML() + if err != nil { + return nil, err + } + return applyDecorators(out, ds), nil +} + +// RenderScript renders the given cloud-config as a script, and then passes the +// script through the given decorators. +func RenderScript(cfg cloudinit.RenderConfig, ds ...Decorator) ([]byte, error) { + out, err := cfg.RenderScript() + if err != nil { + return nil, err + } + return applyDecorators([]byte(out), ds), nil +} + +func applyDecorators(out []byte, ds []Decorator) []byte { + for _, d := range ds { + out = d(out) + } + return out +} === modified file 'src/github.com/juju/juju/cloudconfig/providerinit/renderers/common_test.go' --- src/github.com/juju/juju/cloudconfig/providerinit/renderers/common_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cloudconfig/providerinit/renderers/common_test.go 2016-03-22 15:18:22 +0000 @@ -13,6 +13,7 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/cloudconfig" + "github.com/juju/juju/cloudconfig/cloudinit/cloudinittest" "github.com/juju/juju/cloudconfig/providerinit/renderers" "github.com/juju/juju/testing" ) @@ -43,3 +44,31 @@ out := renderers.AddPowershellTags(in) c.Assert(out, jc.DeepEquals, expected) } + +func (s *RenderersSuite) TestRenderYAML(c *gc.C) { + cloudcfg := &cloudinittest.CloudConfig{YAML: []byte("yaml")} + d1 := func(in []byte) []byte { + return []byte("1." + string(in)) + } + d2 := func(in []byte) []byte { + return []byte("2." + string(in)) + } + out, err := renderers.RenderYAML(cloudcfg, d2, d1) + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(out), jc.DeepEquals, "1.2.yaml") + cloudcfg.CheckCallNames(c, "RenderYAML") +} + +func (s *RenderersSuite) TestRenderScript(c *gc.C) { + cloudcfg := &cloudinittest.CloudConfig{Script: "script"} + d1 := func(in []byte) []byte { + return []byte("1." + string(in)) + } + d2 := func(in []byte) []byte { + return []byte("2." + string(in)) + } + out, err := renderers.RenderScript(cloudcfg, d2, d1) + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(out), jc.DeepEquals, "1.2.script") + cloudcfg.CheckCallNames(c, "RenderScript") +} === modified file 'src/github.com/juju/juju/cloudconfig/providerinit/renderers/interface.go' --- src/github.com/juju/juju/cloudconfig/providerinit/renderers/interface.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cloudconfig/providerinit/renderers/interface.go 2016-03-22 15:18:22 +0000 @@ -9,7 +9,9 @@ package renderers import ( - "github.com/juju/juju/version" + "github.com/juju/utils/os" + + "github.com/juju/juju/cloudconfig/cloudinit" ) // ProviderRenderer defines a method to encode userdata depending on @@ -18,8 +20,5 @@ // the userdata differently(bash vs yaml) since some providers might // not ship cloudinit on every OS type ProviderRenderer interface { - - // EncodeUserdata takes a []byte and encodes it in the right format. - // The implementations are based on the different providers and OSTypes. - EncodeUserdata([]byte, version.OSType) ([]byte, error) + Render(cloudinit.CloudConfig, os.OSType) ([]byte, error) } === modified file 'src/github.com/juju/juju/cloudconfig/sshinit/configure.go' --- src/github.com/juju/juju/cloudconfig/sshinit/configure.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cloudconfig/sshinit/configure.go 2016-03-22 15:18:22 +0000 @@ -5,13 +5,16 @@ package sshinit import ( + "encoding/base64" + "fmt" "io" "strings" "github.com/juju/loggo" + "github.com/juju/utils" + "github.com/juju/utils/ssh" "github.com/juju/juju/cloudconfig/cloudinit" - "github.com/juju/juju/utils/ssh" ) var logger = loggo.GetLogger("juju.cloudinit.sshinit") @@ -54,7 +57,32 @@ // to have been returned by cloudinit ConfigureScript. func RunConfigureScript(script string, params ConfigureParams) error { logger.Tracef("Running script on %s: %s", params.Host, script) - cmd := ssh.Command(params.Host, []string{"sudo", "/bin/bash"}, nil) + + encoded := base64.StdEncoding.EncodeToString([]byte(` +set -e +tmpfile=$(mktemp) +trap "rm -f $tmpfile" EXIT +cat > $tmpfile +/bin/bash $tmpfile +`)) + + // bash will read a byte at a time when consuming commands + // from stdin. We avoid sending the entire script -- which + // will be very large when uploading tools -- directly to + // bash for this reason. Instead, run cat which will write + // the script to disk, and then execute it from there. + cmd := ssh.Command(params.Host, []string{ + "sudo", "/bin/bash", "-c", + // The outer bash interprets the $(...), and executes + // the decoded script in the nested bash. This avoids + // linebreaks in the commandline, which the go.crypto- + // based client has trouble with. + fmt.Sprintf( + `/bin/bash -c "$(echo %s | base64 -d)"`, + utils.ShQuote(encoded), + ), + }, nil) + cmd.Stdin = strings.NewReader(script) cmd.Stderr = params.ProgressWriter return cmd.Run() === modified file 'src/github.com/juju/juju/cloudconfig/userdatacfg.go' --- src/github.com/juju/juju/cloudconfig/userdatacfg.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cloudconfig/userdatacfg.go 2016-03-22 15:18:22 +0000 @@ -10,11 +10,12 @@ "github.com/juju/errors" "github.com/juju/names" "github.com/juju/utils" + "github.com/juju/utils/os" + "github.com/juju/utils/series" "github.com/juju/juju/agent" "github.com/juju/juju/cloudconfig/cloudinit" "github.com/juju/juju/cloudconfig/instancecfg" - "github.com/juju/juju/version" ) const ( @@ -42,13 +43,13 @@ ConfigureJuju() error } -// UserdataConfig is supposed to take in an instanceConfig as well as a +// NewUserdataConfig is supposed to take in an instanceConfig as well as a // cloudinit.cloudConfig and add attributes in the cloudinit structure based on // the values inside instanceConfig and on the series func NewUserdataConfig(icfg *instancecfg.InstanceConfig, conf cloudinit.CloudConfig) (UserdataConfig, error) { // TODO(ericsnow) bug #1426217 // Protect icfg and conf better. - operatingSystem, err := version.GetOSFromSeries(icfg.Series) + operatingSystem, err := series.GetOSFromSeries(icfg.Series) if err != nil { return nil, err } @@ -61,11 +62,11 @@ } switch operatingSystem { - case version.Ubuntu: - return &unixConfigure{base}, nil - case version.CentOS: - return &unixConfigure{base}, nil - case version.Windows: + case os.Ubuntu: + return &unixConfigure{base}, nil + case os.CentOS: + return &unixConfigure{base}, nil + case os.Windows: return &windowsConfigure{base}, nil default: return nil, errors.NotSupportedf("OS %s", icfg.Series) @@ -76,7 +77,7 @@ tag names.Tag icfg *instancecfg.InstanceConfig conf cloudinit.CloudConfig - os version.OSType + os os.OSType } // addAgentInfo adds agent-required information to the agent's directory @@ -121,23 +122,52 @@ svcName := c.icfg.MachineAgentServiceName // TODO (gsamfira): This is temporary until we find a cleaner way to fix // cloudinit.LogProgressCmd to not add >&9 on Windows. - targetOS, err := version.GetOSFromSeries(c.icfg.Series) + targetOS, err := series.GetOSFromSeries(c.icfg.Series) if err != nil { return err } - if targetOS != version.Windows { + if targetOS != os.Windows { c.conf.AddRunCmd(cloudinit.LogProgressCmd("Starting Juju machine agent (%s)", svcName)) } c.conf.AddScripts(cmds...) return nil } +// SetUbuntuUser creates an "ubuntu" use for unix systems so the juju client +// can access the machine using ssh with the configuration we expect. +// On precise, the default cloudinit version is too old to support the users +// option, so instead rely on the default user being created and adding keys. +// It may make sense in the future to add a "juju" user instead across +// all distributions. +func SetUbuntuUser(conf cloudinit.CloudConfig, authorizedKeys string) { + targetSeries := conf.GetSeries() + if targetSeries == "precise" { + conf.SetSSHAuthorizedKeys(authorizedKeys) + } else { + var groups []string + targetOS, _ := series.GetOSFromSeries(targetSeries) + switch targetOS { + case os.Ubuntu: + groups = UbuntuGroups + case os.CentOS: + groups = CentOSGroups + } + conf.AddUser(&cloudinit.User{ + Name: "ubuntu", + Groups: groups, + Shell: "/bin/bash", + Sudo: []string{"ALL=(ALL) NOPASSWD:ALL"}, + SSHAuthorizedKeys: authorizedKeys, + }) + } +} + // TODO(ericsnow) toolsSymlinkCommand should just be replaced with a // call to shell.Renderer.Symlink. func (c *baseConfigure) toolsSymlinkCommand(toolsDir string) string { switch c.os { - case version.Windows: + case os.Windows: return fmt.Sprintf( `cmd.exe /C mklink /D %s %v`, c.conf.ShellRenderer().FromSlash(toolsDir), === modified file 'src/github.com/juju/juju/cloudconfig/userdatacfg_test.go' --- src/github.com/juju/juju/cloudconfig/userdatacfg_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/cloudconfig/userdatacfg_test.go 2016-03-22 15:18:22 +0000 @@ -17,7 +17,7 @@ pacconf "github.com/juju/utils/packaging/config" "github.com/juju/utils/set" gc "gopkg.in/check.v1" - goyaml "gopkg.in/yaml.v1" + goyaml "gopkg.in/yaml.v2" "github.com/juju/juju/agent" "github.com/juju/juju/api" @@ -44,10 +44,11 @@ var _ = gc.Suite(&cloudinitSuite{}) var ( - envConstraints = constraints.MustParse("mem=2G") + envConstraints = constraints.MustParse("mem=2G") + bootstrapConstraints = constraints.MustParse("mem=4G") allMachineJobs = []multiwatcher.MachineJob{ - multiwatcher.JobManageEnviron, + multiwatcher.JobManageModel, multiwatcher.JobHostUnits, multiwatcher.JobManageNetworking, } @@ -68,6 +69,10 @@ return path.Join(logDir, "cloud-init-output.log") } +func metricsSpoolDir(series string) string { + return must(paths.MetricsSpoolDir(series)) +} + // TODO: add this to the utils package func must(s string, err error) string { if err != nil { @@ -103,6 +108,7 @@ cfg.MachineNonce = "FAKE_NONCE" cfg.InstanceId = "i-machine" cfg.Jobs = normalMachineJobs + cfg.MetricsSpoolDir = metricsSpoolDir(series) // MongoInfo and APIInfo (sans Tag) must be initialized before // calling setMachineID(). cfg.MongoInfo = &mongo.MongoInfo{ @@ -113,16 +119,17 @@ }, } cfg.APIInfo = &api.Info{ - Addrs: []string{"state-addr.testing.invalid:54321"}, - Password: "bletch", - CACert: "CA CERT\n" + testing.CACert, - EnvironTag: testing.EnvironmentTag, + Addrs: []string{"state-addr.testing.invalid:54321"}, + Password: "bletch", + CACert: "CA CERT\n" + testing.CACert, + ModelTag: testing.ModelTag, } cfg.setMachineID(defaultMachineID) cfg.setSeries(series) if bootstrap { - return cfg.setStateServer() + return cfg.setController() } + return cfg } @@ -152,9 +159,9 @@ return cfg } -// maybeSetEnvironConfig sets the Config field to the given envConfig, if not +// maybeSetModelConfig sets the Config field to the given envConfig, if not // nil. -func (cfg *testInstanceConfig) maybeSetEnvironConfig(envConfig *config.Config) *testInstanceConfig { +func (cfg *testInstanceConfig) maybeSetModelConfig(envConfig *config.Config) *testInstanceConfig { if envConfig != nil { cfg.Config = envConfig } @@ -180,11 +187,12 @@ return cfg } -// setStateServer updates the config to be suitable for bootstrapping -// a state server instance. -func (cfg *testInstanceConfig) setStateServer() *testInstanceConfig { +// setController updates the config to be suitable for bootstrapping +// a controller instance. +func (cfg *testInstanceConfig) setController() *testInstanceConfig { cfg.setMachineID("0") - cfg.Constraints = envConstraints + cfg.Constraints = bootstrapConstraints + cfg.EnvironConstraints = envConstraints cfg.Bootstrap = true cfg.StateServingInfo = stateServingInfo cfg.Jobs = allMachineJobs @@ -221,7 +229,7 @@ inexactMatch bool } -func minimalEnvironConfig(c *gc.C) *config.Config { +func minimalModelConfig(c *gc.C) *config.Config { cfg, err := config.New(config.NoDefaults, testing.FakeConfig()) c.Assert(err, jc.ErrorIsNil) c.Assert(cfg, gc.NotNil) @@ -271,7 +279,7 @@ setEnvConfig: true, }, - // precise state server + // precise controller { cfg: makeBootstrapConfig("precise"), setEnvConfig: true, @@ -301,7 +309,7 @@ cat > '/var/lib/juju/agents/machine-0/agent\.conf' << 'EOF'\\n.*\\nEOF chmod 0600 '/var/lib/juju/agents/machine-0/agent\.conf' echo 'Bootstrapping Juju machine agent'.* -/var/lib/juju/tools/1\.2\.3-precise-amd64/jujud bootstrap-state --data-dir '/var/lib/juju' --env-config '[^']*' --instance-id 'i-bootstrap' --constraints 'mem=2048M' --debug +/var/lib/juju/tools/1\.2\.3-precise-amd64/jujud bootstrap-state --data-dir '/var/lib/juju' --model-config '[^']*' --instance-id 'i-bootstrap' --bootstrap-constraints 'mem=4096M' --constraints 'mem=2048M' --debug ln -s 1\.2\.3-precise-amd64 '/var/lib/juju/tools/machine-0' echo 'Starting Juju machine agent \(jujud-machine-0\)'.* cat > /etc/init/jujud-machine-0\.conf << 'EOF'\\ndescription "juju agent for machine-0"\\nauthor "Juju Team "\\nstart on runlevel \[2345\]\\nstop on runlevel \[!2345\]\\nrespawn\\nnormal exit 0\\n\\nlimit nofile 20000 20000\\n\\nscript\\n\\n\\n # Ensure log files are properly protected\\n touch /var/log/juju/machine-0\.log\\n chown syslog:syslog /var/log/juju/machine-0\.log\\n chmod 0600 /var/log/juju/machine-0\.log\\n\\n exec '/var/lib/juju/tools/machine-0/jujud' machine --data-dir '/var/lib/juju' --machine-id 0 --debug >> /var/log/juju/machine-0\.log 2>&1\\nend script\\nEOF\\n @@ -310,7 +318,7 @@ `, }, - // raring state server - we just test the raring-specific parts of the output. + // raring controller - we just test the raring-specific parts of the output. { cfg: makeBootstrapConfig("raring"), setEnvConfig: true, @@ -321,13 +329,13 @@ sha256sum \$bin/tools\.tar\.gz > \$bin/juju1\.2\.3-raring-amd64\.sha256 grep '1234' \$bin/juju1\.2\.3-raring-amd64.sha256 \|\| \(echo "Tools checksum mismatch"; exit 1\) printf %s '{"version":"1\.2\.3-raring-amd64","url":"http://foo\.com/tools/released/juju1\.2\.3-raring-amd64\.tgz","sha256":"1234","size":10}' > \$bin/downloaded-tools\.txt -/var/lib/juju/tools/1\.2\.3-raring-amd64/jujud bootstrap-state --data-dir '/var/lib/juju' --env-config '[^']*' --instance-id 'i-bootstrap' --constraints 'mem=2048M' --debug +/var/lib/juju/tools/1\.2\.3-raring-amd64/jujud bootstrap-state --data-dir '/var/lib/juju' --model-config '[^']*' --instance-id 'i-bootstrap' --bootstrap-constraints 'mem=4096M' --constraints 'mem=2048M' --debug ln -s 1\.2\.3-raring-amd64 '/var/lib/juju/tools/machine-0' rm \$bin/tools\.tar\.gz && rm \$bin/juju1\.2\.3-raring-amd64\.sha256 `, }, - // quantal non state server. + // quantal non controller. { cfg: makeNormalConfig("quantal"), expectScripts: ` @@ -361,7 +369,7 @@ `, }, - // non state server with systemd (vivid) + // non controller with systemd (vivid) { cfg: makeNormalConfig("vivid"), inexactMatch: true, @@ -376,7 +384,7 @@ `, }, - // CentOS non state server with systemd + // CentOS non controller with systemd { cfg: makeNormalConfig("centos7"), inexactMatch: true, @@ -422,7 +430,19 @@ setEnvConfig: true, inexactMatch: true, expectScripts: ` -/var/lib/juju/tools/1\.2\.3-precise-amd64/jujud bootstrap-state --data-dir '/var/lib/juju' --env-config '[^']*' --instance-id 'i-bootstrap' --debug +/var/lib/juju/tools/1\.2\.3-precise-amd64/jujud bootstrap-state --data-dir '/var/lib/juju' --model-config '[^']*' --instance-id 'i-bootstrap' --constraints 'mem=2048M' --debug +`, + }, + + // empty environ contraints. + { + cfg: makeBootstrapConfig("precise").mutate(func(cfg *testInstanceConfig) { + cfg.EnvironConstraints = constraints.Value{} + }), + setEnvConfig: true, + inexactMatch: true, + expectScripts: ` +/var/lib/juju/tools/1\.2\.3-precise-amd64/jujud bootstrap-state --data-dir '/var/lib/juju' --model-config '[^']*' --instance-id 'i-bootstrap' --bootstrap-constraints 'mem=4096M' --debug `, }, @@ -491,10 +511,10 @@ return cfg } -// check that any --env-config $base64 is valid and matches t.cfg.Config +// check that any --model-config $base64 is valid and matches t.cfg.Config func checkEnvConfig(c *gc.C, cfg *config.Config, x map[interface{}]interface{}, scripts []string) { c.Assert(scripts, gc.Not(gc.HasLen), 0) - re := regexp.MustCompile(`--env-config '([^']+)'`) + re := regexp.MustCompile(`--model-config '([^']+)'`) found := false for _, s := range scripts { m := re.FindStringSubmatch(s) @@ -520,9 +540,9 @@ c.Logf("test %d", i) var envConfig *config.Config if test.setEnvConfig { - envConfig = minimalEnvironConfig(c) + envConfig = minimalModelConfig(c) } - testConfig := test.cfg.maybeSetEnvironConfig(envConfig).render() + testConfig := test.cfg.maybeSetModelConfig(envConfig).render() ci, err := cloudinit.New(testConfig.Series) c.Assert(err, jc.ErrorIsNil) udata, err := cloudconfig.NewUserdataConfig(&testConfig, ci) @@ -575,7 +595,7 @@ func (*cloudinitSuite) TestCloudInitConfigure(c *gc.C) { for i, test := range cloudinitTests { - testConfig := test.cfg.maybeSetEnvironConfig(minimalEnvironConfig(c)).render() + testConfig := test.cfg.maybeSetModelConfig(minimalModelConfig(c)).render() c.Logf("test %d (Configure)", i) cloudcfg, err := cloudinit.New(testConfig.Series) c.Assert(err, jc.ErrorIsNil) @@ -588,8 +608,8 @@ func (*cloudinitSuite) TestCloudInitConfigureBootstrapLogging(c *gc.C) { loggo.GetLogger("").SetLogLevel(loggo.INFO) - envConfig := minimalEnvironConfig(c) - instConfig := makeBootstrapConfig("quantal").maybeSetEnvironConfig(envConfig) + envConfig := minimalModelConfig(c) + instConfig := makeBootstrapConfig("quantal").maybeSetModelConfig(envConfig) rendered := instConfig.render() cloudcfg, err := cloudinit.New(rendered.Series) c.Assert(err, jc.ErrorIsNil) @@ -610,8 +630,9 @@ c.Logf("scripts[%d]: %q", i, script) } } - expected := "jujud bootstrap-state --data-dir '.*' --env-config '.*'" + - " --instance-id '.*' --constraints 'mem=2048M' --show-log" + expected := "jujud bootstrap-state --data-dir '.*' --model-config '.*'" + + " --instance-id '.*' --bootstrap-constraints 'mem=4096M'" + + " --constraints 'mem=2048M' --show-log" assertScriptMatch(c, scripts, expected, false) } @@ -621,8 +642,8 @@ c.Assert(err, jc.ErrorIsNil) script := "test script" cloudcfg.AddRunCmd(script) - envConfig := minimalEnvironConfig(c) - testConfig := cloudinitTests[0].cfg.maybeSetEnvironConfig(envConfig).render() + envConfig := minimalModelConfig(c) + testConfig := cloudinitTests[0].cfg.maybeSetModelConfig(envConfig).render() udata, err := cloudconfig.NewUserdataConfig(&testConfig, cloudcfg) c.Assert(err, jc.ErrorIsNil) err = udata.Configure() @@ -779,7 +800,7 @@ {"invalid machine id", func(cfg *instancecfg.InstanceConfig) { cfg.MachineId = "-1" }}, - {"missing environment configuration", func(cfg *instancecfg.InstanceConfig) { + {"missing model configuration", func(cfg *instancecfg.InstanceConfig) { cfg.Config = nil }}, {"missing state info", func(cfg *instancecfg.InstanceConfig) { @@ -788,7 +809,7 @@ {"missing API info", func(cfg *instancecfg.InstanceConfig) { cfg.APIInfo = nil }}, - {"missing environment tag", func(cfg *instancecfg.InstanceConfig) { + {"missing model tag", func(cfg *instancecfg.InstanceConfig) { cfg.APIInfo = &api.Info{ Addrs: []string{"foo:35"}, Tag: names.NewMachineTag("99"), @@ -804,10 +825,10 @@ }, } cfg.APIInfo = &api.Info{ - Addrs: []string{"foo:35"}, - Tag: names.NewMachineTag("99"), - CACert: testing.CACert, - EnvironTag: testing.EnvironmentTag, + Addrs: []string{"foo:35"}, + Tag: names.NewMachineTag("99"), + CACert: testing.CACert, + ModelTag: testing.ModelTag, } }}, {"missing API hosts", func(cfg *instancecfg.InstanceConfig) { @@ -820,9 +841,9 @@ Tag: names.NewMachineTag("99"), } cfg.APIInfo = &api.Info{ - Tag: names.NewMachineTag("99"), - CACert: testing.CACert, - EnvironTag: testing.EnvironmentTag, + Tag: names.NewMachineTag("99"), + CACert: testing.CACert, + ModelTag: testing.ModelTag, } }}, {"missing CA certificate", func(cfg *instancecfg.InstanceConfig) { @@ -837,12 +858,12 @@ }, } }}, - {"missing state server certificate", func(cfg *instancecfg.InstanceConfig) { + {"missing controller certificate", func(cfg *instancecfg.InstanceConfig) { info := *cfg.StateServingInfo info.Cert = "" cfg.StateServingInfo = &info }}, - {"missing state server private key", func(cfg *instancecfg.InstanceConfig) { + {"missing controller private key", func(cfg *instancecfg.InstanceConfig) { info := *cfg.StateServingInfo info.PrivateKey = "" cfg.StateServingInfo = &info @@ -901,12 +922,12 @@ info.Tag = nil cfg.APIInfo = &info }}, - {"entity tag must be nil when starting a state server", func(cfg *instancecfg.InstanceConfig) { + {"entity tag must be nil when starting a controller", func(cfg *instancecfg.InstanceConfig) { info := *cfg.MongoInfo info.Tag = names.NewMachineTag("0") cfg.MongoInfo = &info }}, - {"entity tag must be nil when starting a state server", func(cfg *instancecfg.InstanceConfig) { + {"entity tag must be nil when starting a controller", func(cfg *instancecfg.InstanceConfig) { info := *cfg.APIInfo info.Tag = names.NewMachineTag("0") cfg.APIInfo = &info @@ -950,13 +971,14 @@ Password: "password", }, APIInfo: &api.Info{ - Addrs: []string{"host:9999"}, - CACert: testing.CACert, - EnvironTag: testing.EnvironmentTag, + Addrs: []string{"host:9999"}, + CACert: testing.CACert, + ModelTag: testing.ModelTag, }, - Config: minimalEnvironConfig(c), + Config: minimalModelConfig(c), DataDir: jujuDataDir("quantal"), LogDir: jujuLogDir("quantal"), + MetricsSpoolDir: metricsSpoolDir("quantal"), Jobs: normalMachineJobs, CloudInitOutputLog: cloudInitOutputLog("quantal"), InstanceId: "i-bootstrap", @@ -1004,7 +1026,7 @@ } func (s *cloudinitSuite) TestAptProxyNotWrittenIfNotSet(c *gc.C) { - environConfig := minimalEnvironConfig(c) + environConfig := minimalModelConfig(c) instanceCfg := s.createInstanceConfig(c, environConfig) cloudcfg, err := cloudinit.New("quantal") c.Assert(err, jc.ErrorIsNil) @@ -1018,7 +1040,7 @@ } func (s *cloudinitSuite) TestAptProxyWritten(c *gc.C) { - environConfig := minimalEnvironConfig(c) + environConfig := minimalModelConfig(c) environConfig, err := environConfig.Apply(map[string]interface{}{ "apt-http-proxy": "http://user@10.0.0.1", }) @@ -1037,7 +1059,7 @@ } func (s *cloudinitSuite) TestProxyWritten(c *gc.C) { - environConfig := minimalEnvironConfig(c) + environConfig := minimalModelConfig(c) environConfig, err := environConfig.Apply(map[string]interface{}{ "http-proxy": "http://user@10.0.0.1", "no-proxy": "localhost,10.0.3.1", @@ -1075,7 +1097,7 @@ } func (s *cloudinitSuite) TestAptMirror(c *gc.C) { - environConfig := minimalEnvironConfig(c) + environConfig := minimalModelConfig(c) environConfig, err := environConfig.Apply(map[string]interface{}{ "apt-mirror": "http://my.archive.ubuntu.com/ubuntu", }) @@ -1084,7 +1106,7 @@ } func (s *cloudinitSuite) TestAptMirrorNotSet(c *gc.C) { - environConfig := minimalEnvironConfig(c) + environConfig := minimalModelConfig(c) s.testAptMirror(c, environConfig, "") } @@ -1170,7 +1192,8 @@ command := cloudconfig.ToolsDownloadCommand("download", []string{"a", "b", "c"}) expected := ` -for n in $(seq 5); do +n=1 +while true; do printf "Attempt $n to download tools from %s...\n" 'a' download 'a' && echo "Tools downloaded successfully." && break @@ -1181,10 +1204,70 @@ printf "Attempt $n to download tools from %s...\n" 'c' download 'c' && echo "Tools downloaded successfully." && break - if [ $n -lt 5 ]; then - echo "Download failed..... wait 15s" - fi + echo "Download failed, retrying in 15s" sleep 15 + n=$((n+1)) done` c.Assert(command, gc.Equals, expected) } + +func expectedUbuntuUser(groups, keys []string) map[string]interface{} { + user := map[string]interface{}{ + "name": "ubuntu", + "lock_passwd": true, + "shell": "/bin/bash", + "sudo": []interface{}{"ALL=(ALL) NOPASSWD:ALL"}, + } + if groups != nil { + user["groups"] = groups + } + if keys != nil { + user["ssh-authorized-keys"] = keys + } + return map[string]interface{}{ + "users": []map[string]interface{}{user}, + } +} + +func (*cloudinitSuite) TestSetUbuntuUserPrecise(c *gc.C) { + ci, err := cloudinit.New("precise") + c.Assert(err, jc.ErrorIsNil) + cloudconfig.SetUbuntuUser(ci, "akey") + data, err := ci.RenderYAML() + c.Assert(err, jc.ErrorIsNil) + expected := map[string]interface{}{"ssh_authorized_keys": []string{ + "akey", + }} + c.Assert(string(data), jc.YAMLEquals, expected) +} + +func (*cloudinitSuite) TestSetUbuntuUserPreciseNoKeys(c *gc.C) { + ci, err := cloudinit.New("precise") + c.Assert(err, jc.ErrorIsNil) + cloudconfig.SetUbuntuUser(ci, "") + data, err := ci.RenderYAML() + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(data), jc.YAMLEquals, map[string]interface{}{}) +} + +func (*cloudinitSuite) TestSetUbuntuUserQuantal(c *gc.C) { + ci, err := cloudinit.New("quantal") + c.Assert(err, jc.ErrorIsNil) + cloudconfig.SetUbuntuUser(ci, "akey") + data, err := ci.RenderYAML() + c.Assert(err, jc.ErrorIsNil) + keys := []string{"akey"} + expected := expectedUbuntuUser(cloudconfig.UbuntuGroups, keys) + c.Assert(string(data), jc.YAMLEquals, expected) +} + +func (*cloudinitSuite) TestSetUbuntuUserCentOS(c *gc.C) { + ci, err := cloudinit.New("centos7") + c.Assert(err, jc.ErrorIsNil) + cloudconfig.SetUbuntuUser(ci, "akey\n#also\nbkey") + data, err := ci.RenderYAML() + c.Assert(err, jc.ErrorIsNil) + keys := []string{"akey", "bkey"} + expected := expectedUbuntuUser(cloudconfig.CentOSGroups, keys) + c.Assert(string(data), jc.YAMLEquals, expected) +} === modified file 'src/github.com/juju/juju/cloudconfig/userdatacfg_unix.go' --- src/github.com/juju/juju/cloudconfig/userdatacfg_unix.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/cloudconfig/userdatacfg_unix.go 2016-03-22 15:18:22 +0000 @@ -19,9 +19,9 @@ "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" - "github.com/juju/utils" + "github.com/juju/utils/os" "github.com/juju/utils/proxy" - goyaml "gopkg.in/yaml.v1" + goyaml "gopkg.in/yaml.v2" "github.com/juju/juju/agent" "github.com/juju/juju/cloudconfig/cloudinit" @@ -31,17 +31,12 @@ "github.com/juju/juju/service" "github.com/juju/juju/service/systemd" "github.com/juju/juju/service/upstart" - "github.com/juju/juju/version" ) const ( // curlCommand is the base curl command used to download tools. curlCommand = "curl -sSfw 'tools from %{url_effective} downloaded: HTTP %{http_code}; time %{time_total}s; size %{size_download} bytes; speed %{speed_download} bytes/s '" - // toolsDownloadAttempts is the number of attempts to make for - // each tools URL when downloading tools. - toolsDownloadAttempts = 5 - // toolsDownloadWaitTime is the number of seconds to wait between // each iterations of download attempts. toolsDownloadWaitTime = 15 @@ -49,18 +44,29 @@ // toolsDownloadTemplate is a bash template that generates a // bash command to cycle through a list of URLs to download tools. toolsDownloadTemplate = `{{$curl := .ToolsDownloadCommand}} -for n in $(seq {{.ToolsDownloadAttempts}}); do +n=1 +while true; do {{range .URLs}} printf "Attempt $n to download tools from %s...\n" {{shquote .}} {{$curl}} {{shquote .}} && echo "Tools downloaded successfully." && break {{end}} - if [ $n -lt {{.ToolsDownloadAttempts}} ]; then - echo "Download failed..... wait {{.ToolsDownloadWaitTime}}s" - fi + echo "Download failed, retrying in {{.ToolsDownloadWaitTime}}s" sleep {{.ToolsDownloadWaitTime}} + n=$((n+1)) done` ) +var ( + // UbuntuGroups is the set of unix groups to add the "ubuntu" user to + // when initializing an Ubuntu system. + UbuntuGroups = []string{"adm", "audio", "cdrom", "dialout", "dip", + "floppy", "netdev", "plugdev", "sudo", "video"} + + // CentOSGroups is the set of unix groups to add the "ubuntu" user to + // when initializing a CentOS system. + CentOSGroups = []string{"adm", "systemd-journal", "wheel"} +) + type unixConfigure struct { baseConfigure } @@ -93,8 +99,7 @@ "set -xe", // ensure we run all the scripts or abort. ) switch w.os { - case version.Ubuntu: - w.conf.AddSSHAuthorizedKeys(w.icfg.AuthorizedKeys) + case os.Ubuntu: if w.icfg.Tools != nil { initSystem, err := service.VersionInitSystem(w.icfg.Series) if err != nil { @@ -102,15 +107,8 @@ } w.addCleanShutdownJob(initSystem) } - // On unix systems that are not ubuntu we create an ubuntu user so that we - // are able to ssh in the machine and have all the functionality dependant - // on having an ubuntu user there. - // Hopefully in the future we are going to move all the distirbutions to - // having a "juju" user - case version.CentOS: + case os.CentOS: w.conf.AddScripts( - fmt.Sprintf(initUbuntuScript, utils.ShQuote(w.icfg.AuthorizedKeys)), - // Mask and stop firewalld, if enabled, so it cannot start. See // http://pad.lv/1492066. firewalld might be missing, in which case // is-enabled and is-active prints an error, which is why the output @@ -122,6 +120,7 @@ ) w.addCleanShutdownJob(service.InitSystemSystemd) } + SetUbuntuUser(w.conf, w.icfg.AuthorizedKeys) w.conf.SetOutput(cloudinit.OutAll, "| tee -a "+w.icfg.CloudInitOutputLog, "") // Create a file in a well-defined location containing the machine's // nonce. The presence and contents of this file will be verified @@ -148,10 +147,9 @@ } func (w *unixConfigure) setDataDirPermissions() string { - os, _ := version.GetOSFromSeries(w.icfg.Series) var user string - switch os { - case version.CentOS: + switch w.os { + case os.CentOS: user = "root" default: user = "syslog" @@ -194,7 +192,7 @@ w.conf.AddScripts( // We look to see if the proxy line is there already as // the manual provider may have had it already. The ubuntu - // user may not exist (local provider only). + // user may not exist. `([ ! -e /home/ubuntu/.profile ] || grep -q '.juju-proxy' /home/ubuntu/.profile) || ` + `printf '\n# Added by juju\n[ -f "$HOME/.juju-proxy" ] && . "$HOME/.juju-proxy"\n' >> /home/ubuntu/.profile`) if (w.icfg.ProxySettings != proxy.Settings{}) { @@ -207,7 +205,7 @@ } if w.icfg.PublicImageSigningKey != "" { - keyFile := filepath.Join(agent.DefaultConfDir, simplestreams.SimplestreamsPublicKeyFile) + keyFile := filepath.Join(agent.DefaultPaths.ConfDir, simplestreams.SimplestreamsPublicKeyFile) w.conf.AddRunTextFile(keyFile, w.icfg.PublicImageSigningKey, 0644) } @@ -248,13 +246,13 @@ urls = append(urls, w.icfg.Tools.URL) } else { for _, addr := range w.icfg.ApiHostAddrs() { - // TODO(axw) encode env UUID in URL when EnvironTag + // TODO(axw) encode env UUID in URL when ModelTag // is guaranteed to be available in APIInfo. url := fmt.Sprintf("https://%s/tools/%s", addr, w.icfg.Tools.Version) urls = append(urls, url) } - // Don't go through the proxy when downloading tools from the state servers + // Don't go through the proxy when downloading tools from the controllers curlCommand += ` --noproxy "*"` // Our API server certificates are unusable by curl (invalid subject name), @@ -321,9 +319,13 @@ metadataDir = " --image-metadata " + shquote(metadataDir) } - cons := w.icfg.Constraints.String() - if cons != "" { - cons = " --constraints " + shquote(cons) + bootstrapCons := w.icfg.Constraints.String() + if bootstrapCons != "" { + bootstrapCons = " --bootstrap-constraints " + shquote(bootstrapCons) + } + environCons := w.icfg.EnvironConstraints.String() + if environCons != "" { + environCons = " --constraints " + shquote(environCons) } var hardware string if w.icfg.HardwareCharacteristics != nil { @@ -342,10 +344,11 @@ // The bootstrapping is always run with debug on. w.icfg.JujuTools() + "/jujud bootstrap-state" + " --data-dir " + shquote(w.icfg.DataDir) + - " --env-config " + shquote(base64yaml(w.icfg.Config)) + + " --model-config " + shquote(base64yaml(w.icfg.Config)) + " --instance-id " + shquote(string(w.icfg.InstanceId)) + hardware + - cons + + bootstrapCons + + environCons + metadataDir + loggingOption, ) @@ -366,7 +369,6 @@ var buf bytes.Buffer err := parsedTemplate.Execute(&buf, map[string]interface{}{ "ToolsDownloadCommand": curlCommand, - "ToolsDownloadAttempts": toolsDownloadAttempts, "ToolsDownloadWaitTime": toolsDownloadWaitTime, "URLs": urls, }) @@ -384,17 +386,3 @@ } return base64.StdEncoding.EncodeToString(data) } - -const initUbuntuScript = ` -set -e -(id ubuntu &> /dev/null) || useradd -m ubuntu -s /bin/bash -umask 0077 -temp=$(mktemp) -echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' > $temp -install -m 0440 $temp /etc/sudoers.d/90-juju-ubuntu -rm $temp -su ubuntu -c 'install -D -m 0600 /dev/null ~/.ssh/authorized_keys' -export authorized_keys=%s -if [ ! -z "$authorized_keys" ]; then - su ubuntu -c 'printf "%%s\n" "$authorized_keys" >> ~/.ssh/authorized_keys' -fi` === modified file 'src/github.com/juju/juju/cloudconfig/windows_userdata_test.go' --- src/github.com/juju/juju/cloudconfig/windows_userdata_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/cloudconfig/windows_userdata_test.go 2016-03-22 15:18:22 +0000 @@ -19,12 +19,11 @@ $ErrorActionPreference = "Stop" -function ExecRetry($command, $maxRetryCount = 10, $retryInterval=2) +function ExecRetry($command, $retryInterval = 15) { $currErrorActionPreference = $ErrorActionPreference $ErrorActionPreference = "Continue" - $retryCount = 0 while ($true) { try @@ -34,17 +33,8 @@ } catch [System.Exception] { - $retryCount++ - if ($retryCount -ge $maxRetryCount) - { - $ErrorActionPreference = $currErrorActionPreference - throw - } - else - { - Write-Error $_.Exception - Start-Sleep $retryInterval - } + Write-Error $_.Exception + Start-Sleep $retryInterval } } @@ -837,7 +827,7 @@ New-ItemProperty $path -Name "jujud" -Value 0 -PropertyType "DWord" $secpasswd = ConvertTo-SecureString $juju_passwd -AsPlainText -Force -$jujuCreds = New-Object System.Management.Automation.PSCredential (".\jujud", $secpasswd) +$jujuCreds = New-Object System.Management.Automation.PSCredential ($juju_user, $secpasswd) mkdir -Force "C:\Juju" @@ -886,6 +876,7 @@ tag: machine-10 datadir: C:/Juju/lib/juju logdir: C:/Juju/log/juju +metricsspooldir: C:/Juju/lib/juju/metricspool nonce: FAKE_NONCE jobs: - JobHostUnits @@ -905,13 +896,14 @@ -----END CERTIFICATE----- stateaddresses: - state-addr.testing.invalid:12345 -environment: environment-deadbeef-0bad-400d-8000-4b1d0d06f00d +model: model-deadbeef-0bad-400d-8000-4b1d0d06f00d apiaddresses: - state-addr.testing.invalid:54321 oldpassword: arble values: AGENT_SERVICE_NAME: jujud-machine-10 PROVIDER_TYPE: dummy +mongoversion: "0.0" "@ cmd.exe /C mklink /D C:\Juju\lib\juju\tools\machine-10 1.2.3-win8-amd64 === removed directory 'src/github.com/juju/juju/cmd/envcmd' === removed file 'src/github.com/juju/juju/cmd/envcmd/environmentcommand.go' --- src/github.com/juju/juju/cmd/envcmd/environmentcommand.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cmd/envcmd/environmentcommand.go 1970-01-01 00:00:00 +0000 @@ -1,368 +0,0 @@ -// Copyright 2013-2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package envcmd - -import ( - "io" - "os" - "strconv" - - "github.com/juju/cmd" - "github.com/juju/errors" - "github.com/juju/loggo" - "launchpad.net/gnuflag" - - "github.com/juju/juju/api" - "github.com/juju/juju/environs" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/environs/configstore" - "github.com/juju/juju/juju" - "github.com/juju/juju/juju/osenv" - "github.com/juju/juju/version" -) - -var logger = loggo.GetLogger("juju.cmd.envcmd") - -// ErrNoEnvironmentSpecified is returned by commands that operate on -// an environment if there is no current environment, no environment -// has been explicitly specified, and there is no default environment. -var ErrNoEnvironmentSpecified = errors.New("no environment specified") - -// GetDefaultEnvironment returns the name of the Juju default environment. -// There is simple ordering for the default environment. Firstly check the -// JUJU_ENV environment variable. If that is set, it gets used. If it isn't -// set, look in the $JUJU_HOME/current-environment file. If neither are -// available, read environments.yaml and use the default environment therein. -// If no default is specified in the environments file, an empty string is returned. -// Not having a default environment specified is not an error. -func GetDefaultEnvironment() (string, error) { - if defaultEnv := os.Getenv(osenv.JujuEnvEnvKey); defaultEnv != "" { - return defaultEnv, nil - } - if currentEnv, err := ReadCurrentEnvironment(); err != nil { - return "", errors.Trace(err) - } else if currentEnv != "" { - return currentEnv, nil - } - if currentSystem, err := ReadCurrentSystem(); err != nil { - return "", errors.Trace(err) - } else if currentSystem != "" { - return "", errors.Errorf("not operating on an environment, using system %q", currentSystem) - } - envs, err := environs.ReadEnvirons("") - if environs.IsNoEnv(err) { - // That's fine, not an error here. - return "", nil - } else if err != nil { - return "", errors.Trace(err) - } - return envs.Default, nil -} - -// EnvironCommand extends cmd.Command with a SetEnvName method. -type EnvironCommand interface { - cmd.Command - - // SetEnvName is called prior to the wrapped command's Init method - // with the active environment name. The environment name is guaranteed - // to be non-empty at entry of Init. - SetEnvName(envName string) -} - -// EnvCommandBase is a convenience type for embedding in commands -// that wish to implement EnvironCommand. -type EnvCommandBase struct { - cmd.CommandBase - // EnvName will very soon be package visible only as we want to be able - // to specify an environment in multiple ways, and not always referencing - // a file on disk based on the EnvName or the environemnts.yaml file. - envName string - - // compatVersion defines the minimum CLI version - // that this command should be compatible with. - compatVerson *int - - envGetterClient EnvironmentGetter - envGetterErr error -} - -func (c *EnvCommandBase) SetEnvName(envName string) { - c.envName = envName -} - -func (c *EnvCommandBase) NewAPIClient() (*api.Client, error) { - root, err := c.NewAPIRoot() - if err != nil { - return nil, errors.Trace(err) - } - return root.Client(), nil -} - -// NewEnvironmentGetter returns a new object which implements the -// EnvironmentGetter interface. -func (c *EnvCommandBase) NewEnvironmentGetter() (EnvironmentGetter, error) { - if c.envGetterErr != nil { - return nil, c.envGetterErr - } - - if c.envGetterClient != nil { - return c.envGetterClient, nil - } - - return c.NewAPIClient() -} - -func (c *EnvCommandBase) NewAPIRoot() (api.Connection, error) { - // This is work in progress as we remove the EnvName from downstream code. - // We want to be able to specify the environment in a number of ways, one of - // which is the connection name on the client machine. - if c.envName == "" { - return nil, errors.Trace(ErrNoEnvironmentSpecified) - } - return juju.NewAPIFromName(c.envName) -} - -// Config returns the configuration for the environment; obtaining bootstrap -// information from the API if necessary. If callers already have an active -// client API connection, it will be used. Otherwise, a new API connection will -// be used if necessary. -func (c *EnvCommandBase) Config(store configstore.Storage, client EnvironmentGetter) (*config.Config, error) { - if c.envName == "" { - return nil, errors.Trace(ErrNoEnvironmentSpecified) - } - cfg, _, err := environs.ConfigForName(c.envName, store) - if err == nil { - return cfg, nil - } else if !environs.IsEmptyConfig(err) { - return nil, errors.Trace(err) - } - - if client == nil { - client, err = c.NewEnvironmentGetter() - if err != nil { - return nil, errors.Trace(err) - } - defer client.Close() - } - - bootstrapCfg, err := client.EnvironmentGet() - if err != nil { - return nil, errors.Trace(err) - } - return config.New(config.NoDefaults, bootstrapCfg) -} - -// ConnectionCredentials returns the credentials used to connect to the API for -// the specified environment. -func (c *EnvCommandBase) ConnectionCredentials() (configstore.APICredentials, error) { - // TODO: the user may soon be specified through the command line - // or through an environment setting, so return these when they are ready. - var emptyCreds configstore.APICredentials - if c.envName == "" { - return emptyCreds, errors.Trace(ErrNoEnvironmentSpecified) - } - info, err := ConnectionInfoForName(c.envName) - if err != nil { - return emptyCreds, errors.Trace(err) - } - return info.APICredentials(), nil -} - -// ConnectionEndpoint returns the end point information used to -// connect to the API for the specified environment. -func (c *EnvCommandBase) ConnectionEndpoint(refresh bool) (configstore.APIEndpoint, error) { - // TODO: the endpoint information may soon be specified through the command line - // or through an environment setting, so return these when they are ready. - // NOTE: refresh when specified through command line should error. - var emptyEndpoint configstore.APIEndpoint - if c.envName == "" { - return emptyEndpoint, errors.Trace(ErrNoEnvironmentSpecified) - } - info, err := ConnectionInfoForName(c.envName) - if err != nil { - return emptyEndpoint, errors.Trace(err) - } - endpoint := info.APIEndpoint() - if !refresh && len(endpoint.Addresses) > 0 { - logger.Debugf("found cached addresses, not connecting to API server") - return endpoint, nil - } - - // We need to connect to refresh our endpoint settings - // The side effect of connecting is that we update the store with new API information - refresher, err := endpointRefresher(c) - if err != nil { - return emptyEndpoint, err - } - refresher.Close() - - info, err = ConnectionInfoForName(c.envName) - if err != nil { - return emptyEndpoint, err - } - return info.APIEndpoint(), nil -} - -// ConnectionWriter defines the methods needed to write information about -// a given connection. This is a subset of the methods in the interface -// defined in configstore.EnvironInfo. -type ConnectionWriter interface { - Write() error - SetAPICredentials(configstore.APICredentials) - SetAPIEndpoint(configstore.APIEndpoint) - SetBootstrapConfig(map[string]interface{}) - Location() string -} - -var endpointRefresher = func(c *EnvCommandBase) (io.Closer, error) { - return c.NewAPIRoot() -} - -var getConfigStore = func() (configstore.Storage, error) { - store, err := configstore.Default() - if err != nil { - return nil, errors.Trace(err) - } - return store, nil -} - -// ConnectionInfoForName reads the environment information for the named -// environment (envName) and returns it. -func ConnectionInfoForName(envName string) (configstore.EnvironInfo, error) { - store, err := getConfigStore() - if err != nil { - return nil, errors.Trace(err) - } - info, err := store.ReadInfo(envName) - if err != nil { - return nil, errors.Trace(err) - } - return info, nil -} - -// ConnectionWriter returns an instance that is able to be used -// to record information about the connection. When the connection -// is determined through either command line parameters or environment -// variables, an error is returned. -func (c *EnvCommandBase) ConnectionWriter() (ConnectionWriter, error) { - // TODO: when accessing with just command line params or environment - // variables, this should error. - if c.envName == "" { - return nil, errors.Trace(ErrNoEnvironmentSpecified) - } - return ConnectionInfoForName(c.envName) -} - -// CompatVersion returns the minimum CLI version -// that this command should be compatible with. -func (c *EnvCommandBase) CompatVersion() int { - if c.compatVerson != nil { - return *c.compatVerson - } - compatVerson := 1 - val := os.Getenv(osenv.JujuCLIVersion) - if val != "" { - vers, err := strconv.Atoi(val) - if err != nil { - logger.Warningf("invalid %s value: %v", osenv.JujuCLIVersion, val) - } else { - compatVerson = vers - } - } - c.compatVerson = &compatVerson - return *c.compatVerson -} - -// ConnectionName returns the name of the connection if there is one. -// It is possible that the name of the connection is empty if the -// connection information is supplied through command line arguments -// or environment variables. -func (c *EnvCommandBase) ConnectionName() string { - return c.envName -} - -// Wrap wraps the specified EnvironCommand, returning a Command -// that proxies to each of the EnvironCommand methods. -func Wrap(c EnvironCommand) cmd.Command { - return &environCommandWrapper{EnvironCommand: c} -} - -type environCommandWrapper struct { - EnvironCommand - envName string -} - -func (w *environCommandWrapper) SetFlags(f *gnuflag.FlagSet) { - f.StringVar(&w.envName, "e", "", "juju environment to operate in") - f.StringVar(&w.envName, "environment", "", "") - w.EnvironCommand.SetFlags(f) -} - -func (w *environCommandWrapper) Init(args []string) error { - if w.envName == "" { - // Look for the default. - defaultEnv, err := GetDefaultEnvironment() - if err != nil { - return err - } - w.envName = defaultEnv - } - w.SetEnvName(w.envName) - return w.EnvironCommand.Init(args) -} - -type bootstrapContext struct { - *cmd.Context - verifyCredentials bool -} - -// ShouldVerifyCredentials implements BootstrapContext.ShouldVerifyCredentials -func (ctx *bootstrapContext) ShouldVerifyCredentials() bool { - return ctx.verifyCredentials -} - -// BootstrapContext returns a new BootstrapContext constructed from a command Context. -func BootstrapContext(cmdContext *cmd.Context) environs.BootstrapContext { - return &bootstrapContext{ - Context: cmdContext, - verifyCredentials: true, - } -} - -// BootstrapContextNoVerify returns a new BootstrapContext constructed from a command Context -// where the validation of credentials is false. -func BootstrapContextNoVerify(cmdContext *cmd.Context) environs.BootstrapContext { - return &bootstrapContext{ - Context: cmdContext, - verifyCredentials: false, - } -} - -type EnvironmentGetter interface { - EnvironmentGet() (map[string]interface{}, error) - Close() error -} - -// GetEnvironmentVersion retrieves the environment's agent-version -// value from an API client. -func GetEnvironmentVersion(client EnvironmentGetter) (version.Number, error) { - noVersion := version.Number{} - attrs, err := client.EnvironmentGet() - if err != nil { - return noVersion, errors.Annotate(err, "unable to retrieve environment config") - } - vi, found := attrs["agent-version"] - if !found { - return noVersion, errors.New("version not found in environment config") - } - vs, ok := vi.(string) - if !ok { - return noVersion, errors.New("invalid environment version type in config") - } - v, err := version.Parse(vs) - if err != nil { - return noVersion, errors.Annotate(err, "unable to parse environment version") - } - return v, nil -} === removed file 'src/github.com/juju/juju/cmd/envcmd/environmentcommand_test.go' --- src/github.com/juju/juju/cmd/envcmd/environmentcommand_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cmd/envcmd/environmentcommand_test.go 1970-01-01 00:00:00 +0000 @@ -1,426 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package envcmd_test - -import ( - "io" - "os" - - "github.com/juju/cmd" - "github.com/juju/cmd/cmdtesting" - "github.com/juju/errors" - gitjujutesting "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/cmd/envcmd" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/environs/configstore" - "github.com/juju/juju/juju/osenv" - "github.com/juju/juju/testing" - "github.com/juju/juju/version" -) - -type EnvironmentCommandSuite struct { - testing.FakeJujuHomeSuite -} - -func (s *EnvironmentCommandSuite) SetUpTest(c *gc.C) { - s.FakeJujuHomeSuite.SetUpTest(c) - s.PatchEnvironment("JUJU_CLI_VERSION", "") -} - -var _ = gc.Suite(&EnvironmentCommandSuite{}) - -func (s *EnvironmentCommandSuite) TestGetDefaultEnvironment(c *gc.C) { - env, err := envcmd.GetDefaultEnvironment() - c.Assert(env, gc.Equals, "erewhemos") - c.Assert(err, jc.ErrorIsNil) -} - -func (s *EnvironmentCommandSuite) TestGetDefaultEnvironmentNothingSet(c *gc.C) { - envPath := gitjujutesting.HomePath(".juju", "environments.yaml") - err := os.Remove(envPath) - c.Assert(err, jc.ErrorIsNil) - env, err := envcmd.GetDefaultEnvironment() - c.Assert(env, gc.Equals, "") - c.Assert(err, jc.ErrorIsNil) -} - -func (s *EnvironmentCommandSuite) TestGetDefaultEnvironmentCurrentEnvironmentSet(c *gc.C) { - err := envcmd.WriteCurrentEnvironment("fubar") - c.Assert(err, jc.ErrorIsNil) - env, err := envcmd.GetDefaultEnvironment() - c.Assert(env, gc.Equals, "fubar") - c.Assert(err, jc.ErrorIsNil) -} - -func (s *EnvironmentCommandSuite) TestGetDefaultEnvironmentJujuEnvSet(c *gc.C) { - os.Setenv(osenv.JujuEnvEnvKey, "magic") - env, err := envcmd.GetDefaultEnvironment() - c.Assert(env, gc.Equals, "magic") - c.Assert(err, jc.ErrorIsNil) -} - -func (s *EnvironmentCommandSuite) TestGetDefaultEnvironmentBothSet(c *gc.C) { - os.Setenv(osenv.JujuEnvEnvKey, "magic") - err := envcmd.WriteCurrentEnvironment("fubar") - c.Assert(err, jc.ErrorIsNil) - env, err := envcmd.GetDefaultEnvironment() - c.Assert(env, gc.Equals, "magic") - c.Assert(err, jc.ErrorIsNil) -} - -func (s *EnvironmentCommandSuite) TestEnvironCommandInitExplicit(c *gc.C) { - // Take environment name from command line arg. - testEnsureEnvName(c, "explicit", "-e", "explicit") -} - -func (s *EnvironmentCommandSuite) TestEnvironCommandInitMultipleConfigs(c *gc.C) { - // Take environment name from the default. - testing.WriteEnvironments(c, testing.MultipleEnvConfig) - testEnsureEnvName(c, testing.SampleEnvName) -} - -func (s *EnvironmentCommandSuite) TestEnvironCommandInitSingleConfig(c *gc.C) { - // Take environment name from the one and only environment, - // even if it is not explicitly marked as default. - testing.WriteEnvironments(c, testing.SingleEnvConfigNoDefault) - testEnsureEnvName(c, testing.SampleEnvName) -} - -func (s *EnvironmentCommandSuite) TestEnvironCommandInitEnvFile(c *gc.C) { - // If there is a current-environment file, use that. - err := envcmd.WriteCurrentEnvironment("fubar") - c.Assert(err, jc.ErrorIsNil) - testEnsureEnvName(c, "fubar") -} - -func (s *EnvironmentCommandSuite) TestEnvironCommandInitSystemFile(c *gc.C) { - // If there is a current-system file, error raised. - err := envcmd.WriteCurrentSystem("fubar") - c.Assert(err, jc.ErrorIsNil) - _, err = initTestCommand(c) - c.Assert(err, gc.ErrorMatches, `not operating on an environment, using system "fubar"`) -} - -func (s *EnvironmentCommandSuite) TestEnvironCommandInitNoEnvFile(c *gc.C) { - envPath := gitjujutesting.HomePath(".juju", "environments.yaml") - err := os.Remove(envPath) - c.Assert(err, jc.ErrorIsNil) - testEnsureEnvName(c, "") -} - -func (s *EnvironmentCommandSuite) TestEnvironCommandInitMultipleConfigNoDefault(c *gc.C) { - // If there are multiple environments but no default, the connection name is empty. - testing.WriteEnvironments(c, testing.MultipleEnvConfigNoDefault) - testEnsureEnvName(c, "") -} - -func (s *EnvironmentCommandSuite) TestBootstrapContext(c *gc.C) { - ctx := envcmd.BootstrapContext(&cmd.Context{}) - c.Assert(ctx.ShouldVerifyCredentials(), jc.IsTrue) -} - -func (s *EnvironmentCommandSuite) TestBootstrapContextNoVerify(c *gc.C) { - ctx := envcmd.BootstrapContextNoVerify(&cmd.Context{}) - c.Assert(ctx.ShouldVerifyCredentials(), jc.IsFalse) -} - -func (s *EnvironmentCommandSuite) TestCompatVersion(c *gc.C) { - s.PatchEnvironment(osenv.JujuCLIVersion, "2") - cmd, err := initTestCommand(c) - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmd.CompatVersion(), gc.Equals, 2) -} - -func (s *EnvironmentCommandSuite) TestCompatVersionDefault(c *gc.C) { - cmd, err := initTestCommand(c) - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmd.CompatVersion(), gc.Equals, 1) -} - -func (s *EnvironmentCommandSuite) TestCompatVersionInvalid(c *gc.C) { - s.PatchEnvironment(osenv.JujuCLIVersion, "invalid") - cmd, err := initTestCommand(c) - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmd.CompatVersion(), gc.Equals, 1) -} - -type testCommand struct { - envcmd.EnvCommandBase -} - -func (c *testCommand) Info() *cmd.Info { - panic("should not be called") -} - -func (c *testCommand) Run(ctx *cmd.Context) error { - panic("should not be called") -} - -func initTestCommand(c *gc.C, args ...string) (*testCommand, error) { - cmd := new(testCommand) - wrapped := envcmd.Wrap(cmd) - return cmd, cmdtesting.InitCommand(wrapped, args) -} - -func testEnsureEnvName(c *gc.C, expect string, args ...string) { - cmd, err := initTestCommand(c, args...) - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmd.ConnectionName(), gc.Equals, expect) -} - -type ConnectionEndpointSuite struct { - testing.FakeJujuHomeSuite - store configstore.Storage - endpoint configstore.APIEndpoint -} - -var _ = gc.Suite(&ConnectionEndpointSuite{}) - -func (s *ConnectionEndpointSuite) SetUpTest(c *gc.C) { - s.FakeHomeSuite.SetUpTest(c) - s.store = configstore.NewMem() - s.PatchValue(envcmd.GetConfigStore, func() (configstore.Storage, error) { - return s.store, nil - }) - newInfo := s.store.CreateInfo("env-name") - newInfo.SetAPICredentials(configstore.APICredentials{ - User: "foo", - Password: "foopass", - }) - s.endpoint = configstore.APIEndpoint{ - Addresses: []string{"0.1.2.3"}, - Hostnames: []string{"foo.invalid"}, - CACert: "certificated", - EnvironUUID: "fake-uuid", - } - newInfo.SetAPIEndpoint(s.endpoint) - err := newInfo.Write() - c.Assert(err, jc.ErrorIsNil) -} - -func (s *ConnectionEndpointSuite) TestAPIEndpointInStoreCached(c *gc.C) { - cmd, err := initTestCommand(c, "-e", "env-name") - c.Assert(err, jc.ErrorIsNil) - endpoint, err := cmd.ConnectionEndpoint(false) - c.Assert(err, jc.ErrorIsNil) - c.Assert(endpoint, gc.DeepEquals, s.endpoint) -} - -func (s *ConnectionEndpointSuite) TestAPIEndpointForEnvSuchName(c *gc.C) { - cmd, err := initTestCommand(c, "-e", "no-such-env") - c.Assert(err, jc.ErrorIsNil) - _, err = cmd.ConnectionEndpoint(false) - c.Assert(err, jc.Satisfies, errors.IsNotFound) - c.Assert(err, gc.ErrorMatches, `environment "no-such-env" not found`) -} - -func (s *ConnectionEndpointSuite) TestAPIEndpointRefresh(c *gc.C) { - newEndpoint := configstore.APIEndpoint{ - Addresses: []string{"0.1.2.3"}, - Hostnames: []string{"foo.example.com"}, - CACert: "certificated", - EnvironUUID: "fake-uuid", - } - s.PatchValue(envcmd.EndpointRefresher, func(_ *envcmd.EnvCommandBase) (io.Closer, error) { - info, err := s.store.ReadInfo("env-name") - info.SetAPIEndpoint(newEndpoint) - err = info.Write() - c.Assert(err, jc.ErrorIsNil) - return new(closer), nil - }) - - cmd, err := initTestCommand(c, "-e", "env-name") - c.Assert(err, jc.ErrorIsNil) - endpoint, err := cmd.ConnectionEndpoint(true) - c.Assert(err, jc.ErrorIsNil) - c.Assert(endpoint, gc.DeepEquals, newEndpoint) -} - -type closer struct{} - -func (*closer) Close() error { - return nil -} - -type EnvironmentVersionSuite struct { - fake *fakeEnvGetter -} - -var _ = gc.Suite(&EnvironmentVersionSuite{}) - -type fakeEnvGetter struct { - agentVersion interface{} - err error - results map[string]interface{} - closeCalled bool - getCalled bool -} - -func (g *fakeEnvGetter) EnvironmentGet() (map[string]interface{}, error) { - g.getCalled = true - if g.err != nil { - return nil, g.err - } else if g.results != nil { - return g.results, nil - } else if g.agentVersion == nil { - return map[string]interface{}{}, nil - } else { - return map[string]interface{}{ - "agent-version": g.agentVersion, - }, nil - } -} - -func (g *fakeEnvGetter) Close() error { - g.closeCalled = true - return nil -} - -func (s *EnvironmentVersionSuite) SetUpTest(*gc.C) { - s.fake = new(fakeEnvGetter) -} - -func (s *EnvironmentVersionSuite) TestApiCallFails(c *gc.C) { - s.fake.err = errors.New("boom") - _, err := envcmd.GetEnvironmentVersion(s.fake) - c.Assert(err, gc.ErrorMatches, "unable to retrieve environment config: boom") -} - -func (s *EnvironmentVersionSuite) TestNoVersion(c *gc.C) { - _, err := envcmd.GetEnvironmentVersion(s.fake) - c.Assert(err, gc.ErrorMatches, "version not found in environment config") -} - -func (s *EnvironmentVersionSuite) TestInvalidVersionType(c *gc.C) { - s.fake.agentVersion = 99 - _, err := envcmd.GetEnvironmentVersion(s.fake) - c.Assert(err, gc.ErrorMatches, "invalid environment version type in config") -} - -func (s *EnvironmentVersionSuite) TestInvalidVersion(c *gc.C) { - s.fake.agentVersion = "a.b.c" - _, err := envcmd.GetEnvironmentVersion(s.fake) - c.Assert(err, gc.ErrorMatches, "unable to parse environment version: .+") -} - -func (s *EnvironmentVersionSuite) TestSuccess(c *gc.C) { - vs := "1.22.1" - s.fake.agentVersion = vs - v, err := envcmd.GetEnvironmentVersion(s.fake) - c.Assert(err, jc.ErrorIsNil) - c.Assert(v.Compare(version.MustParse(vs)), gc.Equals, 0) -} - -type EnvConfigSuite struct { - testing.FakeJujuHomeSuite - client *fakeEnvGetter - store configstore.Storage - envName string -} - -var _ = gc.Suite(&EnvConfigSuite{}) - -func createBootstrapInfo(c *gc.C, name string) map[string]interface{} { - bootstrapCfg, err := config.New(config.UseDefaults, map[string]interface{}{ - "type": "dummy", - "name": name, - "state-server": "true", - "state-id": "1", - }) - c.Assert(err, jc.ErrorIsNil) - return bootstrapCfg.AllAttrs() -} - -func (s *EnvConfigSuite) SetUpTest(c *gc.C) { - s.FakeJujuHomeSuite.SetUpTest(c) - s.envName = "test-env" - s.client = &fakeEnvGetter{results: createBootstrapInfo(c, s.envName)} - - var err error - s.store, err = configstore.Default() - c.Assert(err, jc.ErrorIsNil) -} - -func (s *EnvConfigSuite) writeStore(c *gc.C, bootstrapInfo bool) { - info := s.store.CreateInfo(s.envName) - info.SetAPIEndpoint(configstore.APIEndpoint{ - Addresses: []string{"localhost"}, - CACert: testing.CACert, - EnvironUUID: s.envName + "-UUID", - ServerUUID: s.envName + "-UUID", - }) - - if bootstrapInfo { - info.SetBootstrapConfig(createBootstrapInfo(c, s.envName)) - } - err := info.Write() - c.Assert(err, jc.ErrorIsNil) -} - -func (s *EnvConfigSuite) TestConfigWithBootstrapInfo(c *gc.C) { - cmd := envcmd.NewEnvCommandBase(s.envName, s.client, nil) - s.writeStore(c, true) - - cfg, err := cmd.Config(s.store, s.client) - c.Assert(err, jc.ErrorIsNil) - c.Check(cfg.Name(), gc.Equals, s.envName) - c.Check(s.client.getCalled, jc.IsFalse) - c.Check(s.client.closeCalled, jc.IsFalse) -} - -func (s *EnvConfigSuite) TestConfigWithNoBootstrapWithClient(c *gc.C) { - cmd := envcmd.NewEnvCommandBase(s.envName, s.client, nil) - s.writeStore(c, false) - - cfg, err := cmd.Config(s.store, s.client) - c.Assert(err, jc.ErrorIsNil) - c.Check(cfg.Name(), gc.Equals, s.envName) - c.Check(s.client.getCalled, jc.IsTrue) - c.Check(s.client.closeCalled, jc.IsFalse) -} - -func (s *EnvConfigSuite) TestConfigWithNoBootstrapNoClient(c *gc.C) { - cmd := envcmd.NewEnvCommandBase(s.envName, s.client, nil) - s.writeStore(c, false) - - cfg, err := cmd.Config(s.store, nil) - c.Assert(err, jc.ErrorIsNil) - c.Check(cfg.Name(), gc.Equals, s.envName) - c.Check(s.client.getCalled, jc.IsTrue) - c.Check(s.client.closeCalled, jc.IsTrue) -} - -func (s *EnvConfigSuite) TestConfigWithNoBootstrapWithClientErr(c *gc.C) { - cmd := envcmd.NewEnvCommandBase(s.envName, s.client, errors.New("problem opening connection")) - s.writeStore(c, false) - - _, err := cmd.Config(s.store, nil) - c.Assert(err, gc.ErrorMatches, "problem opening connection") - c.Check(s.client.getCalled, jc.IsFalse) - c.Check(s.client.closeCalled, jc.IsFalse) -} - -func (s *EnvConfigSuite) TestConfigWithNoBootstrapWithEnvGetError(c *gc.C) { - cmd := envcmd.NewEnvCommandBase(s.envName, s.client, nil) - s.writeStore(c, false) - s.client.err = errors.New("problem getting environment attributes") - - _, err := cmd.Config(s.store, nil) - c.Assert(err, gc.ErrorMatches, "problem getting environment attributes") - c.Check(s.client.getCalled, jc.IsTrue) - c.Check(s.client.closeCalled, jc.IsTrue) -} - -func (s *EnvConfigSuite) TestConfigEnvDoesntExist(c *gc.C) { - cmd := envcmd.NewEnvCommandBase("dummy", s.client, nil) - s.writeStore(c, false) - - _, err := cmd.Config(s.store, nil) - c.Assert(err, jc.Satisfies, errors.IsNotFound) - c.Check(s.client.getCalled, jc.IsFalse) - c.Check(s.client.closeCalled, jc.IsFalse) -} === removed file 'src/github.com/juju/juju/cmd/envcmd/export_test.go' --- src/github.com/juju/juju/cmd/envcmd/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cmd/envcmd/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,21 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package envcmd - -var ( - GetCurrentEnvironmentFilePath = getCurrentEnvironmentFilePath - GetCurrentSystemFilePath = getCurrentSystemFilePath - GetConfigStore = &getConfigStore - EndpointRefresher = &endpointRefresher -) - -// NewEnvCommandBase returns a new EnvCommandBase with the environment name, client, -// and error as specified for testing purposes. -func NewEnvCommandBase(name string, client EnvironmentGetter, err error) *EnvCommandBase { - return &EnvCommandBase{ - envName: name, - envGetterClient: client, - envGetterErr: err, - } -} === removed file 'src/github.com/juju/juju/cmd/envcmd/files.go' --- src/github.com/juju/juju/cmd/envcmd/files.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cmd/envcmd/files.go 1970-01-01 00:00:00 +0000 @@ -1,220 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package envcmd - -import ( - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" - - "github.com/juju/cmd" - "github.com/juju/errors" - "github.com/juju/utils/fslock" - - "github.com/juju/juju/juju/osenv" -) - -const ( - CurrentEnvironmentFilename = "current-environment" - CurrentSystemFilename = "current-system" - - lockName = "current.lock" - - systemSuffix = " (system)" -) - -var ( - // 5 seconds should be way more than enough to write or read any files - // even on heavily loaded systems. - lockTimeout = 5 * time.Second -) - -// ServerFile describes the information that is needed for a user -// to connect to an api server. -type ServerFile struct { - Addresses []string `yaml:"addresses"` - CACert string `yaml:"ca-cert,omitempty"` - Username string `yaml:"username"` - Password string `yaml:"password"` -} - -// NOTE: synchronisation across functions in this file. -// -// Each of the read and write functions use a fslock to synchronise calls -// across both the current executable and across different executables. - -func getCurrentEnvironmentFilePath() string { - return filepath.Join(osenv.JujuHome(), CurrentEnvironmentFilename) -} - -func getCurrentSystemFilePath() string { - return filepath.Join(osenv.JujuHome(), CurrentSystemFilename) -} - -// Read the file $JUJU_HOME/current-environment and return the value stored -// there. If the file doesn't exist an empty string is returned and no error. -func ReadCurrentEnvironment() (string, error) { - lock, err := acquireEnvironmentLock("read current-environment") - if err != nil { - return "", errors.Trace(err) - } - defer lock.Unlock() - - current, err := ioutil.ReadFile(getCurrentEnvironmentFilePath()) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", errors.Trace(err) - } - return strings.TrimSpace(string(current)), nil -} - -// Read the file $JUJU_HOME/current-system and return the value stored there. -// If the file doesn't exist an empty string is returned and no error. -func ReadCurrentSystem() (string, error) { - lock, err := acquireEnvironmentLock("read current-system") - if err != nil { - return "", errors.Trace(err) - } - defer lock.Unlock() - - current, err := ioutil.ReadFile(getCurrentSystemFilePath()) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", errors.Trace(err) - } - return strings.TrimSpace(string(current)), nil -} - -// Write the envName to the file $JUJU_HOME/current-environment file. -func WriteCurrentEnvironment(envName string) error { - lock, err := acquireEnvironmentLock("write current-environment") - if err != nil { - return errors.Trace(err) - } - defer lock.Unlock() - - path := getCurrentEnvironmentFilePath() - err = ioutil.WriteFile(path, []byte(envName+"\n"), 0644) - if err != nil { - return errors.Errorf("unable to write to the environment file: %q, %s", path, err) - } - // If there is a current system file, remove it. - if err := os.Remove(getCurrentSystemFilePath()); err != nil && !os.IsNotExist(err) { - logger.Debugf("removing the current environment file due to %s", err) - // Best attempt to remove the file we just wrote. - os.Remove(path) - return err - } - return nil -} - -// Write the systemName to the file $JUJU_HOME/current-system file. -func WriteCurrentSystem(systemName string) error { - lock, err := acquireEnvironmentLock("write current-system") - if err != nil { - return errors.Trace(err) - } - defer lock.Unlock() - - path := getCurrentSystemFilePath() - err = ioutil.WriteFile(path, []byte(systemName+"\n"), 0644) - if err != nil { - return errors.Errorf("unable to write to the system file: %q, %s", path, err) - } - // If there is a current environment file, remove it. - if err := os.Remove(getCurrentEnvironmentFilePath()); err != nil && !os.IsNotExist(err) { - logger.Debugf("removing the current system file due to %s", err) - // Best attempt to remove the file we just wrote. - os.Remove(path) - return err - } - return nil -} - -func acquireEnvironmentLock(operation string) (*fslock.Lock, error) { - // NOTE: any reading or writing from the directory should be done with a - // fslock to make sure we have a consistent read or write. Also worth - // noting, we should use a very short timeout. - lock, err := fslock.NewLock(osenv.JujuHome(), lockName) - if err != nil { - return nil, errors.Trace(err) - } - err = lock.LockWithTimeout(lockTimeout, operation) - if err != nil { - return nil, errors.Trace(err) - } - return lock, nil -} - -// CurrentConnectionName looks at both the current environment file -// and the current system file to determine which is active. -// The name of the current environment or system is returned along with -// a boolean to express whether the name refers to a system or environment. -func CurrentConnectionName() (name string, is_system bool, err error) { - currentEnv, err := ReadCurrentEnvironment() - if err != nil { - return "", false, errors.Trace(err) - } else if currentEnv != "" { - return currentEnv, false, nil - } - - currentSystem, err := ReadCurrentSystem() - if err != nil { - return "", false, errors.Trace(err) - } else if currentSystem != "" { - return currentSystem, true, nil - } - - return "", false, nil -} - -func currentName() (string, error) { - name, isSystem, err := CurrentConnectionName() - if err != nil { - return "", errors.Trace(err) - } - if isSystem { - name = name + systemSuffix - } - if name != "" { - name += " " - } - return name, nil -} - -// SetCurrentEnvironment writes out the current environment file and writes a -// standard message to the command context. -func SetCurrentEnvironment(context *cmd.Context, environmentName string) error { - current, err := currentName() - if err != nil { - return errors.Trace(err) - } - err = WriteCurrentEnvironment(environmentName) - if err != nil { - return errors.Trace(err) - } - context.Infof("%s-> %s", current, environmentName) - return nil -} - -// SetCurrentSystem writes out the current system file and writes a standard -// message to the command context. -func SetCurrentSystem(context *cmd.Context, systemName string) error { - current, err := currentName() - if err != nil { - return errors.Trace(err) - } - err = WriteCurrentSystem(systemName) - if err != nil { - return errors.Trace(err) - } - context.Infof("%s-> %s%s", current, systemName, systemSuffix) - return nil -} === removed file 'src/github.com/juju/juju/cmd/envcmd/files_test.go' --- src/github.com/juju/juju/cmd/envcmd/files_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cmd/envcmd/files_test.go 1970-01-01 00:00:00 +0000 @@ -1,180 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package envcmd_test - -import ( - "io/ioutil" - "os" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/cmd/envcmd" - "github.com/juju/juju/testing" -) - -type filesSuite struct { - testing.FakeJujuHomeSuite -} - -var _ = gc.Suite(&filesSuite{}) - -func (s *filesSuite) assertCurrentEnvironment(c *gc.C, environmentName string) { - current, err := envcmd.ReadCurrentEnvironment() - c.Assert(err, jc.ErrorIsNil) - c.Assert(current, gc.Equals, environmentName) -} - -func (s *filesSuite) assertCurrentSystem(c *gc.C, systemName string) { - current, err := envcmd.ReadCurrentSystem() - c.Assert(err, jc.ErrorIsNil) - c.Assert(current, gc.Equals, systemName) -} - -func (s *filesSuite) TestReadCurrentEnvironmentUnset(c *gc.C) { - s.assertCurrentEnvironment(c, "") -} - -func (s *filesSuite) TestReadCurrentSystemUnset(c *gc.C) { - s.assertCurrentSystem(c, "") -} - -func (s *filesSuite) TestReadCurrentEnvironmentSet(c *gc.C) { - err := envcmd.WriteCurrentEnvironment("fubar") - c.Assert(err, jc.ErrorIsNil) - s.assertCurrentEnvironment(c, "fubar") -} - -func (s *filesSuite) TestReadCurrentSystemSet(c *gc.C) { - err := envcmd.WriteCurrentSystem("fubar") - c.Assert(err, jc.ErrorIsNil) - s.assertCurrentSystem(c, "fubar") -} - -func (s *filesSuite) TestWriteEnvironmentAddsNewline(c *gc.C) { - err := envcmd.WriteCurrentEnvironment("fubar") - c.Assert(err, jc.ErrorIsNil) - current, err := ioutil.ReadFile(envcmd.GetCurrentEnvironmentFilePath()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(current), gc.Equals, "fubar\n") -} - -func (s *filesSuite) TestWriteSystemAddsNewline(c *gc.C) { - err := envcmd.WriteCurrentSystem("fubar") - c.Assert(err, jc.ErrorIsNil) - current, err := ioutil.ReadFile(envcmd.GetCurrentSystemFilePath()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(current), gc.Equals, "fubar\n") -} - -func (s *filesSuite) TestWriteEnvironmentRemovesSystemFile(c *gc.C) { - err := envcmd.WriteCurrentSystem("baz") - c.Assert(err, jc.ErrorIsNil) - err = envcmd.WriteCurrentEnvironment("fubar") - c.Assert(err, jc.ErrorIsNil) - c.Assert(envcmd.GetCurrentSystemFilePath(), jc.DoesNotExist) -} - -func (s *filesSuite) TestWriteSystemRemovesEnvironmentFile(c *gc.C) { - err := envcmd.WriteCurrentEnvironment("fubar") - c.Assert(err, jc.ErrorIsNil) - err = envcmd.WriteCurrentSystem("baz") - c.Assert(err, jc.ErrorIsNil) - c.Assert(envcmd.GetCurrentEnvironmentFilePath(), jc.DoesNotExist) -} - -func (*filesSuite) TestErrorWritingCurrentEnvironment(c *gc.C) { - // Can't write a file over a directory. - os.MkdirAll(envcmd.GetCurrentEnvironmentFilePath(), 0777) - err := envcmd.WriteCurrentEnvironment("fubar") - c.Assert(err, gc.ErrorMatches, "unable to write to the environment file: .*") -} - -func (*filesSuite) TestErrorWritingCurrentSystem(c *gc.C) { - // Can't write a file over a directory. - os.MkdirAll(envcmd.GetCurrentSystemFilePath(), 0777) - err := envcmd.WriteCurrentSystem("fubar") - c.Assert(err, gc.ErrorMatches, "unable to write to the system file: .*") -} - -func (*filesSuite) TestCurrentCommenctionNameMissing(c *gc.C) { - name, isSystem, err := envcmd.CurrentConnectionName() - c.Assert(err, jc.ErrorIsNil) - c.Assert(isSystem, jc.IsFalse) - c.Assert(name, gc.Equals, "") -} - -func (*filesSuite) TestCurrentCommenctionNameEnvironment(c *gc.C) { - err := envcmd.WriteCurrentEnvironment("fubar") - c.Assert(err, jc.ErrorIsNil) - name, isSystem, err := envcmd.CurrentConnectionName() - c.Assert(err, jc.ErrorIsNil) - c.Assert(isSystem, jc.IsFalse) - c.Assert(name, gc.Equals, "fubar") -} - -func (*filesSuite) TestCurrentCommenctionNameSystem(c *gc.C) { - err := envcmd.WriteCurrentSystem("baz") - c.Assert(err, jc.ErrorIsNil) - name, isSystem, err := envcmd.CurrentConnectionName() - c.Assert(err, jc.ErrorIsNil) - c.Assert(isSystem, jc.IsTrue) - c.Assert(name, gc.Equals, "baz") -} - -func (s *filesSuite) TestSetCurrentEnvironment(c *gc.C) { - ctx := testing.Context(c) - err := envcmd.SetCurrentEnvironment(ctx, "new-env") - c.Assert(err, jc.ErrorIsNil) - s.assertCurrentEnvironment(c, "new-env") - c.Assert(testing.Stderr(ctx), gc.Equals, "-> new-env\n") -} - -func (s *filesSuite) TestSetCurrentEnvironmentExistingEnv(c *gc.C) { - err := envcmd.WriteCurrentEnvironment("fubar") - c.Assert(err, jc.ErrorIsNil) - ctx := testing.Context(c) - err = envcmd.SetCurrentEnvironment(ctx, "new-env") - c.Assert(err, jc.ErrorIsNil) - s.assertCurrentEnvironment(c, "new-env") - c.Assert(testing.Stderr(ctx), gc.Equals, "fubar -> new-env\n") -} - -func (s *filesSuite) TestSetCurrentEnvironmentExistingSystem(c *gc.C) { - err := envcmd.WriteCurrentSystem("fubar") - c.Assert(err, jc.ErrorIsNil) - ctx := testing.Context(c) - err = envcmd.SetCurrentEnvironment(ctx, "new-env") - c.Assert(err, jc.ErrorIsNil) - s.assertCurrentEnvironment(c, "new-env") - c.Assert(testing.Stderr(ctx), gc.Equals, "fubar (system) -> new-env\n") -} - -func (s *filesSuite) TestSetCurrentSystem(c *gc.C) { - ctx := testing.Context(c) - err := envcmd.SetCurrentSystem(ctx, "new-sys") - c.Assert(err, jc.ErrorIsNil) - s.assertCurrentSystem(c, "new-sys") - c.Assert(testing.Stderr(ctx), gc.Equals, "-> new-sys (system)\n") -} - -func (s *filesSuite) TestSetCurrentSystemExistingEnv(c *gc.C) { - err := envcmd.WriteCurrentEnvironment("fubar") - c.Assert(err, jc.ErrorIsNil) - ctx := testing.Context(c) - err = envcmd.SetCurrentSystem(ctx, "new-sys") - c.Assert(err, jc.ErrorIsNil) - s.assertCurrentSystem(c, "new-sys") - c.Assert(testing.Stderr(ctx), gc.Equals, "fubar -> new-sys (system)\n") -} - -func (s *filesSuite) TestSetCurrentSystemExistingSystem(c *gc.C) { - err := envcmd.WriteCurrentSystem("fubar") - c.Assert(err, jc.ErrorIsNil) - ctx := testing.Context(c) - err = envcmd.SetCurrentSystem(ctx, "new-sys") - c.Assert(err, jc.ErrorIsNil) - s.assertCurrentSystem(c, "new-sys") - c.Assert(testing.Stderr(ctx), gc.Equals, "fubar (system) -> new-sys (system)\n") -} === removed file 'src/github.com/juju/juju/cmd/envcmd/package_test.go' --- src/github.com/juju/juju/cmd/envcmd/package_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cmd/envcmd/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package envcmd_test - -import ( - "testing" - - gc "gopkg.in/check.v1" -) - -func Test(t *testing.T) { - gc.TestingT(t) -} === removed file 'src/github.com/juju/juju/cmd/envcmd/systemcommand.go' --- src/github.com/juju/juju/cmd/envcmd/systemcommand.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cmd/envcmd/systemcommand.go 1970-01-01 00:00:00 +0000 @@ -1,180 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package envcmd - -import ( - "github.com/juju/cmd" - "github.com/juju/errors" - "launchpad.net/gnuflag" - - "github.com/juju/juju/api" - "github.com/juju/juju/api/environmentmanager" - "github.com/juju/juju/api/systemmanager" - "github.com/juju/juju/api/usermanager" - "github.com/juju/juju/environs/configstore" - "github.com/juju/juju/juju" -) - -// ErrNoSystemSpecified is returned by commands that operate on -// a system if there is no current system, no system has been -// explicitly specified, and there is no default system. -var ErrNoSystemSpecified = errors.New("no system specified") - -// SystemCommand is intended to be a base for all commands -// that need to operate on systems as opposed to environments. -type SystemCommand interface { - cmd.Command - - // SetSystemName is called prior to the wrapped command's Init method with - // the active system name. The system name is guaranteed to be non-empty - // at entry of Init. - SetSystemName(systemName string) - - // SystemName returns the name of the system or environment used to - // determine that API end point. - SystemName() string -} - -// SysCommandBase is a convenience type for embedding in commands -// that wish to implement SystemCommand. -type SysCommandBase struct { - cmd.CommandBase - systemName string -} - -// SetSystemName records the current environment name in the SysCommandBase -func (c *SysCommandBase) SetSystemName(systemName string) { - c.systemName = systemName -} - -// SystemName returns the name of the system or environment used to determine -// that API end point. -func (c *SysCommandBase) SystemName() string { - return c.systemName -} - -// NewEnvironmentManagerAPIClient returns an API client for the -// EnvironmentManager on the current system using the current credentials. -func (c *SysCommandBase) NewEnvironmentManagerAPIClient() (*environmentmanager.Client, error) { - root, err := c.newAPIRoot() - if err != nil { - return nil, errors.Trace(err) - } - return environmentmanager.NewClient(root), nil -} - -// NewSystemManagerAPIClient returns an API client for the SystemManager on -// the current system using the current credentials. -func (c *SysCommandBase) NewSystemManagerAPIClient() (*systemmanager.Client, error) { - root, err := c.newAPIRoot() - if err != nil { - return nil, errors.Trace(err) - } - return systemmanager.NewClient(root), nil -} - -// NewUserManagerAPIClient returns an API client for the UserManager on the -// current system using the current credentials. -func (c *SysCommandBase) NewUserManagerAPIClient() (*usermanager.Client, error) { - root, err := c.newAPIRoot() - if err != nil { - return nil, errors.Trace(err) - } - return usermanager.NewClient(root), nil -} - -// newAPIRoot returns a restricted API for the current system using the current -// credentials. Only the UserManager and EnvironmentManager may be accessed -// through this API connection. -func (c *SysCommandBase) newAPIRoot() (api.Connection, error) { - if c.systemName == "" { - return nil, errors.Trace(ErrNoSystemSpecified) - } - return juju.NewAPIFromName(c.systemName) -} - -// ConnectionCredentials returns the credentials used to connect to the API for -// the specified system. -func (c *SysCommandBase) ConnectionCredentials() (configstore.APICredentials, error) { - // TODO: the user may soon be specified through the command line - // or through an environment setting, so return these when they are ready. - var emptyCreds configstore.APICredentials - info, err := c.ConnectionInfo() - if err != nil { - return emptyCreds, errors.Trace(err) - } - return info.APICredentials(), nil -} - -// ConnectionEndpoint returns the endpoint details used to connect to the API for -// the specified system. -func (c *SysCommandBase) ConnectionEndpoint() (configstore.APIEndpoint, error) { - // TODO: the user may soon be specified through the command line - // or through an environment setting, so return these when they are ready. - var empty configstore.APIEndpoint - info, err := c.ConnectionInfo() - if err != nil { - return empty, errors.Trace(err) - } - return info.APIEndpoint(), nil -} - -// ConnectionInfo returns the environ info from the cached config store. -func (c *SysCommandBase) ConnectionInfo() (configstore.EnvironInfo, error) { - // TODO: the user may soon be specified through the command line - // or through an environment setting, so return these when they are ready. - if c.systemName == "" { - return nil, errors.Trace(ErrNoSystemSpecified) - } - info, err := ConnectionInfoForName(c.systemName) - if err != nil { - return nil, errors.Trace(err) - } - return info, nil -} - -// Wrap wraps the specified SystemCommand, returning a Command -// that proxies to each of the SystemCommand methods. -func WrapSystem(c SystemCommand) cmd.Command { - return &sysCommandWrapper{SystemCommand: c} -} - -type sysCommandWrapper struct { - SystemCommand - systemName string -} - -// SetFlags implements Command.SetFlags, then calls the wrapped command's SetFlags. -func (w *sysCommandWrapper) SetFlags(f *gnuflag.FlagSet) { - f.StringVar(&w.systemName, "s", "", "juju system to operate in") - f.StringVar(&w.systemName, "system", "", "") - w.SystemCommand.SetFlags(f) -} - -func (w *sysCommandWrapper) getDefaultSystemName() (string, error) { - if currentSystem, err := ReadCurrentSystem(); err != nil { - return "", errors.Trace(err) - } else if currentSystem != "" { - return currentSystem, nil - } - if currentEnv, err := ReadCurrentEnvironment(); err != nil { - return "", errors.Trace(err) - } else if currentEnv != "" { - return currentEnv, nil - } - return "", errors.Trace(ErrNoSystemSpecified) -} - -// Init implements Command.Init, then calls the wrapped command's Init. -func (w *sysCommandWrapper) Init(args []string) error { - if w.systemName == "" { - name, err := w.getDefaultSystemName() - if err != nil { - return errors.Trace(err) - } - w.systemName = name - } - w.SetSystemName(w.systemName) - return w.SystemCommand.Init(args) -} === removed file 'src/github.com/juju/juju/cmd/envcmd/systemcommand_test.go' --- src/github.com/juju/juju/cmd/envcmd/systemcommand_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cmd/envcmd/systemcommand_test.go 1970-01-01 00:00:00 +0000 @@ -1,85 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package envcmd_test - -import ( - "os" - - "github.com/juju/cmd" - "github.com/juju/cmd/cmdtesting" - gitjujutesting "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/cmd/envcmd" - "github.com/juju/juju/testing" -) - -type SystemCommandSuite struct { - testing.FakeJujuHomeSuite -} - -var _ = gc.Suite(&SystemCommandSuite{}) - -func (s *SystemCommandSuite) TestSystemCommandInitMultipleConfigs(c *gc.C) { - // The environments.yaml file is ignored for system commands. - testing.WriteEnvironments(c, testing.MultipleEnvConfig) - _, err := initTestSystemCommand(c) - c.Assert(err, gc.ErrorMatches, "no system specified") -} - -func (s *SystemCommandSuite) TestSystemCommandInitNoEnvFile(c *gc.C) { - // Since we ignore the environments.yaml file, we don't care if it isn't - // there. - envPath := gitjujutesting.HomePath(".juju", "environments.yaml") - err := os.Remove(envPath) - _, err = initTestSystemCommand(c) - c.Assert(err, gc.ErrorMatches, "no system specified") -} - -func (s *SystemCommandSuite) TestSystemCommandInitSystemFile(c *gc.C) { - // If there is a current-system file, use that. - err := envcmd.WriteCurrentSystem("fubar") - c.Assert(err, jc.ErrorIsNil) - testEnsureSystemName(c, "fubar") -} -func (s *SystemCommandSuite) TestSystemCommandInitEnvFile(c *gc.C) { - // If there is a current-environment file, use that. - err := envcmd.WriteCurrentEnvironment("fubar") - c.Assert(err, jc.ErrorIsNil) - testEnsureSystemName(c, "fubar") -} - -func (s *SystemCommandSuite) TestSystemCommandInitExplicit(c *gc.C) { - // Take system name from command line arg, and it trumps the current- - // system file. - err := envcmd.WriteCurrentSystem("fubar") - c.Assert(err, jc.ErrorIsNil) - testEnsureSystemName(c, "explicit", "-s", "explicit") - testEnsureSystemName(c, "explicit", "--system", "explicit") -} - -type testSystemCommand struct { - envcmd.SysCommandBase -} - -func (c *testSystemCommand) Info() *cmd.Info { - panic("should not be called") -} - -func (c *testSystemCommand) Run(ctx *cmd.Context) error { - panic("should not be called") -} - -func initTestSystemCommand(c *gc.C, args ...string) (*testSystemCommand, error) { - cmd := new(testSystemCommand) - wrapped := envcmd.WrapSystem(cmd) - return cmd, cmdtesting.InitCommand(wrapped, args) -} - -func testEnsureSystemName(c *gc.C, expect string, args ...string) { - cmd, err := initTestSystemCommand(c, args...) - c.Assert(err, jc.ErrorIsNil) - c.Assert(cmd.SystemName(), gc.Equals, expect) -} === modified file 'src/github.com/juju/juju/cmd/juju/action/action.go' --- src/github.com/juju/juju/cmd/juju/action/action.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cmd/juju/action/action.go 2016-03-22 15:18:22 +0000 @@ -8,11 +8,11 @@ "github.com/juju/cmd" "github.com/juju/errors" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api/action" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cmd/envcmd" + "github.com/juju/juju/cmd/modelcmd" ) var actionDoc = ` @@ -32,10 +32,10 @@ UsagePrefix: "juju", Purpose: actionPurpose, }) - actionCmd.Register(envcmd.Wrap(&DefinedCommand{})) - actionCmd.Register(envcmd.Wrap(&DoCommand{})) - actionCmd.Register(envcmd.Wrap(&FetchCommand{})) - actionCmd.Register(envcmd.Wrap(&StatusCommand{})) + actionCmd.Register(newDefinedCommand()) + actionCmd.Register(newDoCommand()) + actionCmd.Register(newFetchCommand()) + actionCmd.Register(newStatusCommand()) return actionCmd } @@ -82,7 +82,7 @@ // ActionCommandBase is the base type for action sub-commands. type ActionCommandBase struct { - envcmd.EnvCommandBase + modelcmd.ModelCommandBase } // NewActionAPIClient returns a client for the action api endpoint. === modified file 'src/github.com/juju/juju/cmd/juju/action/common.go' --- src/github.com/juju/juju/cmd/juju/action/common.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cmd/juju/action/common.go 2016-03-22 15:18:22 +0000 @@ -8,7 +8,7 @@ "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" - "gopkg.in/yaml.v1" + "gopkg.in/yaml.v2" "github.com/juju/juju/apiserver/params" ) === modified file 'src/github.com/juju/juju/cmd/juju/action/defined.go' --- src/github.com/juju/juju/cmd/juju/action/defined.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cmd/juju/action/defined.go 2016-03-22 15:18:22 +0000 @@ -10,10 +10,15 @@ "launchpad.net/gnuflag" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cmd/modelcmd" ) -// DefinedCommand lists actions defined by the charm of a given service. -type DefinedCommand struct { +func newDefinedCommand() cmd.Command { + return modelcmd.Wrap(&definedCommand{}) +} + +// definedCommand lists actions defined by the charm of a given service. +type definedCommand struct { ActionCommandBase serviceTag names.ServiceTag fullSchema bool @@ -28,12 +33,12 @@ ` // Set up the output. -func (c *DefinedCommand) SetFlags(f *gnuflag.FlagSet) { +func (c *definedCommand) SetFlags(f *gnuflag.FlagSet) { c.out.AddFlags(f, "smart", cmd.DefaultFormatters) f.BoolVar(&c.fullSchema, "schema", false, "display the full action schema") } -func (c *DefinedCommand) Info() *cmd.Info { +func (c *definedCommand) Info() *cmd.Info { return &cmd.Info{ Name: "defined", Args: "", @@ -43,7 +48,7 @@ } // Init validates the service name and any other options. -func (c *DefinedCommand) Init(args []string) error { +func (c *definedCommand) Init(args []string) error { switch len(args) { case 0: return errors.New("no service name specified") @@ -61,7 +66,7 @@ // Run grabs the Actions spec from the api. It then sets up a sensible // output format for the map. -func (c *DefinedCommand) Run(ctx *cmd.Context) error { +func (c *definedCommand) Run(ctx *cmd.Context) error { api, err := c.NewActionAPIClient() if err != nil { return err === modified file 'src/github.com/juju/juju/cmd/juju/action/defined_test.go' --- src/github.com/juju/juju/cmd/juju/action/defined_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cmd/juju/action/defined_test.go 2016-03-22 15:18:22 +0000 @@ -8,11 +8,12 @@ "errors" "strings" + "github.com/juju/cmd" "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/yaml.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/yaml.v2" "github.com/juju/juju/cmd/juju/action" "github.com/juju/juju/state" @@ -21,18 +22,20 @@ type DefinedSuite struct { BaseActionSuite - svc *state.Service - subcommand *action.DefinedCommand + svc *state.Service + wrappedCommand cmd.Command + command *action.DefinedCommand } var _ = gc.Suite(&DefinedSuite{}) func (s *DefinedSuite) SetUpTest(c *gc.C) { s.BaseActionSuite.SetUpTest(c) + s.wrappedCommand, s.command = action.NewDefinedCommand(s.store) } func (s *DefinedSuite) TestHelp(c *gc.C) { - s.checkHelp(c, s.subcommand) + s.checkHelp(c, s.wrappedCommand) } func (s *DefinedSuite) TestInit(c *gc.C) { @@ -66,15 +69,18 @@ }} for i, t := range tests { - c.Logf("test %d should %s: juju actions defined %s", i, - t.should, strings.Join(t.args, " ")) - s.subcommand = &action.DefinedCommand{} - err := testing.InitCommand(s.subcommand, t.args) - if t.expectedErr == "" { - c.Check(s.subcommand.ServiceTag(), gc.Equals, t.expectedSvc) - c.Check(s.subcommand.FullSchema(), gc.Equals, t.expectedOutputSchema) - } else { - c.Check(err, gc.ErrorMatches, t.expectedErr) + for _, modelFlag := range s.modelFlags { + c.Logf("test %d should %s: juju actions defined %s", i, + t.should, strings.Join(t.args, " ")) + s.wrappedCommand, s.command = action.NewDefinedCommand(s.store) + args := append([]string{modelFlag, "dummymodel"}, t.args...) + err := testing.InitCommand(s.wrappedCommand, args) + if t.expectedErr == "" { + c.Check(s.command.ServiceTag(), gc.Equals, t.expectedSvc) + c.Check(s.command.FullSchema(), gc.Equals, t.expectedOutputSchema) + } else { + c.Check(err, gc.ErrorMatches, t.expectedErr) + } } } } @@ -112,33 +118,37 @@ }} for i, t := range tests { - func() { - c.Logf("test %d should %s", i, t.should) - - fakeClient := &fakeAPIClient{charmActions: t.withCharmActions} - if t.withAPIErr != "" { - fakeClient.apiErr = errors.New(t.withAPIErr) - } - restore := s.patchAPIClient(fakeClient) - defer restore() - - s.subcommand = &action.DefinedCommand{} - ctx, err := testing.RunCommand(c, s.subcommand, t.withArgs...) - - if t.expectedErr != "" || t.withAPIErr != "" { - c.Check(err, gc.ErrorMatches, t.expectedErr) - } else { - c.Assert(err, gc.IsNil) - result := ctx.Stdout.(*bytes.Buffer).Bytes() - if t.expectFullSchema { - checkFullSchema(c, t.withCharmActions, result) - } else if t.expectNoResults { - c.Check(string(result), gc.Matches, t.expectMessage+"(?sm).*") + for _, modelFlag := range s.modelFlags { + func() { + c.Logf("test %d should %s", i, t.should) + + fakeClient := &fakeAPIClient{charmActions: t.withCharmActions} + if t.withAPIErr != "" { + fakeClient.apiErr = errors.New(t.withAPIErr) + } + restore := s.patchAPIClient(fakeClient) + defer restore() + + args := append([]string{modelFlag, "dummymodel"}, t.withArgs...) + s.wrappedCommand, s.command = action.NewDefinedCommand(s.store) + ctx, err := testing.RunCommand(c, s.wrappedCommand, args...) + + if t.expectedErr != "" || t.withAPIErr != "" { + c.Check(err, gc.ErrorMatches, t.expectedErr) } else { - checkSimpleSchema(c, t.withCharmActions, result) + c.Assert(err, gc.IsNil) + result := ctx.Stdout.(*bytes.Buffer).Bytes() + if t.expectFullSchema { + checkFullSchema(c, t.withCharmActions, result) + } else if t.expectNoResults { + c.Check(string(result), gc.Matches, t.expectMessage+"(?sm).*") + } else { + checkSimpleSchema(c, t.withCharmActions, result) + } } - } - }() + + }() + } } } === modified file 'src/github.com/juju/juju/cmd/juju/action/do.go' --- src/github.com/juju/juju/cmd/juju/action/do.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cmd/juju/action/do.go 2016-03-22 15:18:22 +0000 @@ -11,18 +11,23 @@ "github.com/juju/cmd" "github.com/juju/errors" "github.com/juju/names" - yaml "gopkg.in/yaml.v1" + yaml "gopkg.in/yaml.v2" "launchpad.net/gnuflag" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cmd/juju/common" + "github.com/juju/juju/cmd/modelcmd" ) var keyRule = regexp.MustCompile("^[a-z0-9](?:[a-z0-9-]*[a-z0-9])?$") -// DoCommand enqueues an Action for running on the given unit with given +func newDoCommand() cmd.Command { + return modelcmd.Wrap(&doCommand{}) +} + +// doCommand enqueues an Action for running on the given unit with given // params -type DoCommand struct { +type doCommand struct { ActionCommandBase unitTag names.UnitTag actionName string @@ -100,17 +105,17 @@ The value for the "time" param will be the string literal "1000". ` -// actionNameRule describes the format an action name must match to be valid. -var actionNameRule = regexp.MustCompile("^[a-z](?:[a-z-]*[a-z])?$") +// ActionNameRule describes the format an action name must match to be valid. +var ActionNameRule = regexp.MustCompile("^[a-z](?:[a-z-]*[a-z])?$") // SetFlags offers an option for YAML output. -func (c *DoCommand) SetFlags(f *gnuflag.FlagSet) { +func (c *doCommand) SetFlags(f *gnuflag.FlagSet) { c.out.AddFlags(f, "smart", cmd.DefaultFormatters) f.Var(&c.paramsYAML, "params", "path to yaml-formatted params file") f.BoolVar(&c.parseStrings, "string-args", false, "use raw string values of CLI args") } -func (c *DoCommand) Info() *cmd.Info { +func (c *doCommand) Info() *cmd.Info { return &cmd.Info{ Name: "do", Args: " [key.key.key...=value]", @@ -120,7 +125,7 @@ } // Init gets the unit tag, and checks for other correct args. -func (c *DoCommand) Init(args []string) error { +func (c *doCommand) Init(args []string) error { switch len(args) { case 0: return errors.New("no unit specified") @@ -132,12 +137,12 @@ if !names.IsValidUnit(unitName) { return errors.Errorf("invalid unit name %q", unitName) } - actionName := args[1] - if valid := actionNameRule.MatchString(actionName); !valid { - return fmt.Errorf("invalid action name %q", actionName) + ActionName := args[1] + if valid := ActionNameRule.MatchString(ActionName); !valid { + return fmt.Errorf("invalid action name %q", ActionName) } c.unitTag = names.NewUnitTag(unitName) - c.actionName = actionName + c.actionName = ActionName if len(args) == 2 { return nil } @@ -162,7 +167,7 @@ } } -func (c *DoCommand) Run(ctx *cmd.Context) error { +func (c *doCommand) Run(ctx *cmd.Context) error { api, err := c.NewActionAPIClient() if err != nil { return err === modified file 'src/github.com/juju/juju/cmd/juju/action/do_test.go' --- src/github.com/juju/juju/cmd/juju/action/do_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cmd/juju/action/do_test.go 2016-03-22 15:18:22 +0000 @@ -13,7 +13,7 @@ jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" - "gopkg.in/yaml.v1" + "gopkg.in/yaml.v2" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" @@ -39,8 +39,7 @@ type DoSuite struct { BaseActionSuite - subcommand *action.DoCommand - dir string + dir string } var _ = gc.Suite(&DoSuite{}) @@ -57,7 +56,8 @@ } func (s *DoSuite) TestHelp(c *gc.C) { - s.checkHelp(c, s.subcommand) + cmd, _ := action.NewDoCommand(s.store) + s.checkHelp(c, cmd) } func (s *DoSuite) TestInit(c *gc.C) { @@ -166,18 +166,21 @@ }} for i, t := range tests { - s.subcommand = &action.DoCommand{} - c.Logf("test %d: should %s:\n$ juju actions do %s\n", i, - t.should, strings.Join(t.args, " ")) - err := testing.InitCommand(s.subcommand, t.args) - if t.expectError == "" { - c.Check(s.subcommand.UnitTag(), gc.Equals, t.expectUnit) - c.Check(s.subcommand.ActionName(), gc.Equals, t.expectAction) - c.Check(s.subcommand.ParamsYAMLPath(), gc.Equals, t.expectParamsYamlPath) - c.Check(s.subcommand.KeyValueDoArgs(), jc.DeepEquals, t.expectKVArgs) - c.Check(s.subcommand.ParseStrings(), gc.Equals, t.expectParseStrings) - } else { - c.Check(err, gc.ErrorMatches, t.expectError) + for _, modelFlag := range s.modelFlags { + wrappedCommand, command := action.NewDoCommand(s.store) + c.Logf("test %d: should %s:\n$ juju actions do %s\n", i, + t.should, strings.Join(t.args, " ")) + args := append([]string{modelFlag, "dummymodel"}, t.args...) + err := testing.InitCommand(wrappedCommand, args) + if t.expectError == "" { + c.Check(command.UnitTag(), gc.Equals, t.expectUnit) + c.Check(command.ActionName(), gc.Equals, t.expectAction) + c.Check(command.ParamsYAML().Path, gc.Equals, t.expectParamsYamlPath) + c.Check(command.Args(), jc.DeepEquals, t.expectKVArgs) + c.Check(command.ParseStrings(), gc.Equals, t.expectParseStrings) + } else { + c.Check(err, gc.ErrorMatches, t.expectError) + } } } } @@ -232,17 +235,17 @@ withArgs: []string{validUnitId, "some-action", "--params", s.dir + "/" + "invalidParams.yml", }, - expectedErr: "YAML error: line 3: mapping values are not allowed in this context", + expectedErr: "yaml: line 3: mapping values are not allowed in this context", }, { should: "fail with invalid UTF in file", withArgs: []string{validUnitId, "some-action", "--params", s.dir + "/" + "invalidUTF.yml", }, - expectedErr: "YAML error: invalid leading UTF-8 octet", + expectedErr: "yaml: invalid leading UTF-8 octet", }, { should: "fail with invalid YAML passed as arg and no --string-args", withArgs: []string{validUnitId, "some-action", "foo.bar=\""}, - expectedErr: "YAML error: found unexpected end of stream", + expectedErr: "yaml: found unexpected end of stream", }, { should: "enqueue a basic action with no params", withArgs: []string{validUnitId, "some-action"}, @@ -354,47 +357,50 @@ }} for i, t := range tests { - func() { - c.Logf("test %d: should %s:\n$ juju actions do %s\n", i, - t.should, strings.Join(t.withArgs, " ")) - fakeClient := &fakeAPIClient{ - actionResults: t.withActionResults, - } - if t.withAPIErr != "" { - fakeClient.apiErr = errors.New(t.withAPIErr) - } - restore := s.patchAPIClient(fakeClient) - defer restore() - - s.subcommand = &action.DoCommand{} - ctx, err := testing.RunCommand(c, s.subcommand, t.withArgs...) - - if t.expectedErr != "" || t.withAPIErr != "" { - c.Check(err, gc.ErrorMatches, t.expectedErr) - } else { - c.Assert(err, gc.IsNil) - // Before comparing, double-check to avoid - // panics in malformed tests. - c.Assert(len(t.withActionResults), gc.Equals, 1) - // Make sure the test's expected Action was - // non-nil and correct. - c.Assert(t.withActionResults[0].Action, gc.NotNil) - expectedTag, err := names.ParseActionTag(t.withActionResults[0].Action.Tag) - c.Assert(err, gc.IsNil) - // Make sure the CLI responded with the expected tag - keyToCheck := "Action queued with id" - expectedMap := map[string]string{keyToCheck: expectedTag.Id()} - outputResult := ctx.Stdout.(*bytes.Buffer).Bytes() - resultMap := make(map[string]string) - err = yaml.Unmarshal(outputResult, &resultMap) - c.Assert(err, gc.IsNil) - c.Check(resultMap, jc.DeepEquals, expectedMap) - // Make sure the Action sent to the API to be - // enqueued was indeed the expected map - enqueued := fakeClient.EnqueuedActions() - c.Assert(enqueued.Actions, gc.HasLen, 1) - c.Check(enqueued.Actions[0], jc.DeepEquals, t.expectedActionEnqueued) - } - }() + for _, modelFlag := range s.modelFlags { + func() { + c.Logf("test %d: should %s:\n$ juju actions do %s\n", i, + t.should, strings.Join(t.withArgs, " ")) + fakeClient := &fakeAPIClient{ + actionResults: t.withActionResults, + } + if t.withAPIErr != "" { + fakeClient.apiErr = errors.New(t.withAPIErr) + } + restore := s.patchAPIClient(fakeClient) + defer restore() + + wrappedCommand, _ := action.NewDoCommand(s.store) + args := append([]string{modelFlag, "dummymodel"}, t.withArgs...) + ctx, err := testing.RunCommand(c, wrappedCommand, args...) + + if t.expectedErr != "" || t.withAPIErr != "" { + c.Check(err, gc.ErrorMatches, t.expectedErr) + } else { + c.Assert(err, gc.IsNil) + // Before comparing, double-check to avoid + // panics in malformed tests. + c.Assert(len(t.withActionResults), gc.Equals, 1) + // Make sure the test's expected Action was + // non-nil and correct. + c.Assert(t.withActionResults[0].Action, gc.NotNil) + expectedTag, err := names.ParseActionTag(t.withActionResults[0].Action.Tag) + c.Assert(err, gc.IsNil) + // Make sure the CLI responded with the expected tag + keyToCheck := "Action queued with id" + expectedMap := map[string]string{keyToCheck: expectedTag.Id()} + outputResult := ctx.Stdout.(*bytes.Buffer).Bytes() + resultMap := make(map[string]string) + err = yaml.Unmarshal(outputResult, &resultMap) + c.Assert(err, gc.IsNil) + c.Check(resultMap, jc.DeepEquals, expectedMap) + // Make sure the Action sent to the API to be + // enqueued was indeed the expected map + enqueued := fakeClient.EnqueuedActions() + c.Assert(enqueued.Actions, gc.HasLen, 1) + c.Check(enqueued.Actions[0], jc.DeepEquals, t.expectedActionEnqueued) + } + }() + } } } === modified file 'src/github.com/juju/juju/cmd/juju/action/export_test.go' --- src/github.com/juju/juju/cmd/juju/action/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cmd/juju/action/export_test.go 2016-03-22 15:18:22 +0000 @@ -4,9 +4,12 @@ package action import ( + "github.com/juju/cmd" "github.com/juju/names" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/jujuclient" ) var ( @@ -14,12 +17,16 @@ AddValueToMap = addValueToMap ) -func (c *DefinedCommand) ServiceTag() names.ServiceTag { - return c.serviceTag -} - -func (c *DefinedCommand) FullSchema() bool { - return c.fullSchema +type FetchCommand struct { + *fetchCommand +} + +type StatusCommand struct { + *statusCommand +} + +type DoCommand struct { + *doCommand } func (c *DoCommand) UnitTag() names.UnitTag { @@ -30,18 +37,54 @@ return c.actionName } -func (c *DoCommand) ParamsYAMLPath() string { - return c.paramsYAML.Path -} - -func (c *DoCommand) KeyValueDoArgs() [][]string { - return c.args -} - func (c *DoCommand) ParseStrings() bool { return c.parseStrings } +func (c *DoCommand) ParamsYAML() cmd.FileVar { + return c.paramsYAML +} + +func (c *DoCommand) Args() [][]string { + return c.args +} + +type DefinedCommand struct { + *definedCommand +} + +func (c *DefinedCommand) ServiceTag() names.ServiceTag { + return c.serviceTag +} + +func (c *DefinedCommand) FullSchema() bool { + return c.fullSchema +} + +func NewFetchCommand(store jujuclient.ClientStore) (cmd.Command, *FetchCommand) { + c := &fetchCommand{} + c.SetClientStore(store) + return modelcmd.Wrap(c), &FetchCommand{c} +} + +func NewStatusCommand(store jujuclient.ClientStore) (cmd.Command, *StatusCommand) { + c := &statusCommand{} + c.SetClientStore(store) + return modelcmd.Wrap(c), &StatusCommand{c} +} + +func NewDefinedCommand(store jujuclient.ClientStore) (cmd.Command, *DefinedCommand) { + c := &definedCommand{} + c.SetClientStore(store) + return modelcmd.Wrap(c, modelcmd.ModelSkipDefault), &DefinedCommand{c} +} + +func NewDoCommand(store jujuclient.ClientStore) (cmd.Command, *DoCommand) { + c := &doCommand{} + c.SetClientStore(store) + return modelcmd.Wrap(c, modelcmd.ModelSkipDefault), &DoCommand{c} +} + func ActionResultsToMap(results []params.ActionResult) map[string]interface{} { return resultsToMap(results) } === modified file 'src/github.com/juju/juju/cmd/juju/action/fetch.go' --- src/github.com/juju/juju/cmd/juju/action/fetch.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cmd/juju/action/fetch.go 2016-03-22 15:18:22 +0000 @@ -12,10 +12,15 @@ "launchpad.net/gnuflag" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cmd/modelcmd" ) -// FetchCommand fetches the results of an action by ID. -type FetchCommand struct { +func newFetchCommand() cmd.Command { + return modelcmd.Wrap(&fetchCommand{}) +} + +// fetchCommand fetches the results of an action by ID. +type fetchCommand struct { ActionCommandBase out cmd.Output requestedId string @@ -35,12 +40,12 @@ ` // Set up the output. -func (c *FetchCommand) SetFlags(f *gnuflag.FlagSet) { +func (c *fetchCommand) SetFlags(f *gnuflag.FlagSet) { c.out.AddFlags(f, "smart", cmd.DefaultFormatters) f.StringVar(&c.wait, "wait", "-1s", "wait for results") } -func (c *FetchCommand) Info() *cmd.Info { +func (c *fetchCommand) Info() *cmd.Info { return &cmd.Info{ Name: "fetch", Args: "", @@ -50,7 +55,7 @@ } // Init validates the action ID and any other options. -func (c *FetchCommand) Init(args []string) error { +func (c *fetchCommand) Init(args []string) error { switch len(args) { case 0: return errors.New("no action ID specified") @@ -63,7 +68,7 @@ } // Run issues the API call to get Actions by ID. -func (c *FetchCommand) Run(ctx *cmd.Context) error { +func (c *fetchCommand) Run(ctx *cmd.Context) error { // Check whether units were left off our time string. r := regexp.MustCompile("[a-zA-Z]") matches := r.FindStringSubmatch(c.wait[len(c.wait)-1:]) === modified file 'src/github.com/juju/juju/cmd/juju/action/fetch_test.go' --- src/github.com/juju/juju/cmd/juju/action/fetch_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cmd/juju/action/fetch_test.go 2016-03-22 15:18:22 +0000 @@ -19,7 +19,6 @@ type FetchSuite struct { BaseActionSuite - subcommand *action.FetchCommand } var _ = gc.Suite(&FetchSuite{}) @@ -29,7 +28,8 @@ } func (s *FetchSuite) TestHelp(c *gc.C) { - s.checkHelp(c, s.subcommand) + cmd, _ := action.NewFetchCommand(s.store) + s.checkHelp(c, cmd) } func (s *FetchSuite) TestInit(c *gc.C) { @@ -48,11 +48,15 @@ }} for i, t := range tests { - c.Logf("test %d: it should %s: juju actions fetch %s", i, - t.should, strings.Join(t.args, " ")) - err := testing.InitCommand(&action.FetchCommand{}, t.args) - if t.expectError != "" { - c.Check(err, gc.ErrorMatches, t.expectError) + for _, modelFlag := range s.modelFlags { + c.Logf("test %d: it should %s: juju actions fetch %s", i, + t.should, strings.Join(t.args, " ")) + cmd, _ := action.NewFetchCommand(s.store) + args := append([]string{modelFlag, "dummymodel"}, t.args...) + err := testing.InitCommand(cmd, args) + if t.expectError != "" { + c.Check(err, gc.ErrorMatches, t.expectError) + } } } } @@ -270,32 +274,35 @@ }} for i, t := range tests { - c.Logf("test %d: should %s", i, t.should) - testRunHelper( - c, s, - makeFakeClient( - t.withAPIDelay, - t.withAPITimeout, - t.withTags, - t.withAPIResponse, - t.withAPIError), - t.expectedErr, - t.expectedOutput, - t.withClientWait, - t.withClientQueryID, - ) + for _, modelFlag := range s.modelFlags { + c.Logf("test %d (model flag %v): should %s", i, modelFlag, t.should) + testRunHelper( + c, s, + makeFakeClient( + t.withAPIDelay, + t.withAPITimeout, + t.withTags, + t.withAPIResponse, + t.withAPIError), + t.expectedErr, + t.expectedOutput, + t.withClientWait, + t.withClientQueryID, + modelFlag, + ) + } } } -func testRunHelper(c *gc.C, s *FetchSuite, client *fakeAPIClient, expectedErr, expectedOutput, wait, query string) { +func testRunHelper(c *gc.C, s *FetchSuite, client *fakeAPIClient, expectedErr, expectedOutput, wait, query, modelFlag string) { unpatch := s.BaseActionSuite.patchAPIClient(client) defer unpatch() - args := []string{query} + args := append([]string{modelFlag, "dummymodel"}, query) if wait != "" { - args = append(args, "--wait") - args = append(args, wait) + args = append(args, "--wait", wait) } - ctx, err := testing.RunCommand(c, &action.FetchCommand{}, args...) + cmd, _ := action.NewFetchCommand(s.store) + ctx, err := testing.RunCommand(c, cmd, args...) if expectedErr != "" { c.Check(err, gc.ErrorMatches, expectedErr) } else { === modified file 'src/github.com/juju/juju/cmd/juju/action/package_test.go' --- src/github.com/juju/juju/cmd/juju/action/package_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cmd/juju/action/package_test.go 2016-03-22 15:18:22 +0000 @@ -14,11 +14,13 @@ jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cmd/envcmd" "github.com/juju/juju/cmd/juju/action" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/jujuclient" + "github.com/juju/juju/jujuclient/jujuclienttesting" coretesting "github.com/juju/juju/testing" ) @@ -38,14 +40,25 @@ } type BaseActionSuite struct { - jujutesting.IsolationSuite + coretesting.FakeJujuXDGDataHomeSuite command cmd.Command + + modelFlags []string + store *jujuclienttesting.MemStore } -var _ = gc.Suite(&FetchSuite{}) - func (s *BaseActionSuite) SetUpTest(c *gc.C) { + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) s.command = action.NewSuperCommand() + + s.modelFlags = []string{"-m", "--model"} + + err := modelcmd.WriteCurrentController("ctrl") + c.Assert(err, jc.ErrorIsNil) + s.store = jujuclienttesting.NewMemStore() + s.store.Accounts["ctrl"] = &jujuclient.ControllerAccounts{ + CurrentAccount: "admin@local", + } } func (s *BaseActionSuite) patchAPIClient(client *fakeAPIClient) func() { @@ -56,7 +69,7 @@ ) } -func (s *BaseActionSuite) checkHelp(c *gc.C, subcmd envcmd.EnvironCommand) { +func (s *BaseActionSuite) checkHelp(c *gc.C, subcmd cmd.Command) { ctx, err := coretesting.RunCommand(c, s.command, subcmd.Info().Name, "--help") c.Assert(err, gc.IsNil) === modified file 'src/github.com/juju/juju/cmd/juju/action/status.go' --- src/github.com/juju/juju/cmd/juju/action/status.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cmd/juju/action/status.go 2016-03-22 15:18:22 +0000 @@ -10,10 +10,15 @@ "launchpad.net/gnuflag" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/cmd/modelcmd" ) -// StatusCommand shows the status of an Action by ID. -type StatusCommand struct { +func newStatusCommand() cmd.Command { + return modelcmd.Wrap(&statusCommand{}) +} + +// statusCommand shows the status of an Action by ID. +type statusCommand struct { ActionCommandBase out cmd.Output requestedId string @@ -24,11 +29,11 @@ ` // Set up the output. -func (c *StatusCommand) SetFlags(f *gnuflag.FlagSet) { +func (c *statusCommand) SetFlags(f *gnuflag.FlagSet) { c.out.AddFlags(f, "smart", cmd.DefaultFormatters) } -func (c *StatusCommand) Info() *cmd.Info { +func (c *statusCommand) Info() *cmd.Info { return &cmd.Info{ Name: "status", Args: "[|]", @@ -37,7 +42,7 @@ } } -func (c *StatusCommand) Init(args []string) error { +func (c *statusCommand) Init(args []string) error { switch len(args) { case 0: c.requestedId = "" @@ -50,7 +55,7 @@ } } -func (c *StatusCommand) Run(ctx *cmd.Context) error { +func (c *statusCommand) Run(ctx *cmd.Context) error { api, err := c.NewActionAPIClient() if err != nil { return err === modified file 'src/github.com/juju/juju/cmd/juju/action/status_test.go' --- src/github.com/juju/juju/cmd/juju/action/status_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/cmd/juju/action/status_test.go 2016-03-22 15:18:22 +0000 @@ -18,14 +18,14 @@ type StatusSuite struct { BaseActionSuite - subcommand *action.StatusCommand + subcommand cmd.Command } var _ = gc.Suite(&StatusSuite{}) func (s *StatusSuite) SetUpTest(c *gc.C) { s.BaseActionSuite.SetUpTest(c) - s.subcommand = &action.StatusCommand{} + s.subcommand, _ = action.NewStatusCommand(s.store) } func (s *StatusSuite) TestHelp(c *gc.C) { @@ -71,29 +71,32 @@ } func (s *StatusSuite) runTestCase(c *gc.C, tc statusTestCase) { - fakeClient := makeFakeClient( - 0*time.Second, // No API delay - 5*time.Second, // 5 second test timeout - tc.tags, - tc.results, - "", // No API error - ) - - restore := s.patchAPIClient(fakeClient) - defer restore() - - s.subcommand = &action.StatusCommand{} - ctx, err := testing.RunCommand(c, s.subcommand, tc.args...) - if tc.expectError == "" { - c.Check(err, jc.ErrorIsNil) - } else { - c.Check(err, gc.ErrorMatches, tc.expectError) - } - if len(tc.results) > 0 { - buf, err := cmd.DefaultFormatters["yaml"](action.ActionResultsToMap(tc.results)) - c.Check(err, jc.ErrorIsNil) - c.Check(ctx.Stdout.(*bytes.Buffer).String(), gc.Equals, string(buf)+"\n") - c.Check(ctx.Stderr.(*bytes.Buffer).String(), gc.Equals, "") + for _, modelFlag := range s.modelFlags { + fakeClient := makeFakeClient( + 0*time.Second, // No API delay + 5*time.Second, // 5 second test timeout + tc.tags, + tc.results, + "", // No API error + ) + + restore := s.patchAPIClient(fakeClient) + defer restore() + + s.subcommand, _ = action.NewStatusCommand(s.store) + args := append([]string{modelFlag, "dummymodel"}, tc.args...) + ctx, err := testing.RunCommand(c, s.subcommand, args...) + if tc.expectError == "" { + c.Assert(err, jc.ErrorIsNil) + } else { + c.Assert(err, gc.ErrorMatches, tc.expectError) + } + if len(tc.results) > 0 { + buf, err := cmd.DefaultFormatters["yaml"](action.ActionResultsToMap(tc.results)) + c.Check(err, jc.ErrorIsNil) + c.Check(ctx.Stdout.(*bytes.Buffer).String(), gc.Equals, string(buf)+"\n") + c.Check(ctx.Stderr.(*bytes.Buffer).String(), gc.Equals, "") + } } } === modified file 'src/github.com/juju/juju/cmd/juju/backups/backups.go' --- src/github.com/juju/juju/cmd/juju/backups/backups.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cmd/juju/backups/backups.go 2016-03-22 15:18:22 +0000 @@ -10,59 +10,41 @@ "github.com/juju/cmd" "github.com/juju/errors" - "github.com/juju/utils/featureflag" + "launchpad.net/gnuflag" "github.com/juju/juju/api/backups" apiserverbackups "github.com/juju/juju/apiserver/backups" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/cmd/envcmd" - "github.com/juju/juju/feature" + "github.com/juju/juju/cmd/modelcmd" statebackups "github.com/juju/juju/state/backups" ) var backupsDoc = ` -"juju backups" is used to manage backups of the state of a juju environment. -` - -var jesBackupsDoc = ` -"juju backups" is used to manage backups of the state of a juju system. -Backups are only supported on juju systems, not hosted environments. For -more information on juju systems, see: - - juju help juju-systems +"juju backups" is used to manage backups of the state of a juju controller. +Backups are only supported on juju controllers, not hosted models. For +more information on juju controllers, see: + + juju help juju-controllers ` const backupsPurpose = "create, manage, and restore backups of juju's state" -// Command is the top-level command wrapping all backups functionality. -type Command struct { - cmd.SuperCommand -} - -// NewCommand returns a new backups super-command. -func NewCommand() cmd.Command { - if featureflag.Enabled(feature.JES) { - backupsDoc = jesBackupsDoc - } - - backupsCmd := Command{ - SuperCommand: *cmd.NewSuperCommand( - cmd.SuperCommandParams{ - Name: "backups", - Doc: backupsDoc, - UsagePrefix: "juju", - Purpose: backupsPurpose, - }, - ), - } - backupsCmd.Register(envcmd.Wrap(&CreateCommand{})) - backupsCmd.Register(envcmd.Wrap(&InfoCommand{})) - backupsCmd.Register(envcmd.Wrap(&ListCommand{})) - backupsCmd.Register(envcmd.Wrap(&DownloadCommand{})) - backupsCmd.Register(envcmd.Wrap(&UploadCommand{})) - backupsCmd.Register(envcmd.Wrap(&RemoveCommand{})) - backupsCmd.Register(envcmd.Wrap(&RestoreCommand{})) - return &backupsCmd +// NewSuperCommand returns a new backups super-command. +func NewSuperCommand() cmd.Command { + backupsCmd := cmd.NewSuperCommand(cmd.SuperCommandParams{ + Name: "backups", + Doc: backupsDoc, + UsagePrefix: "juju", + Purpose: backupsPurpose, + }) + backupsCmd.Register(newCreateCommand()) + backupsCmd.Register(newInfoCommand()) + backupsCmd.Register(newListCommand()) + backupsCmd.Register(newDownloadCommand()) + backupsCmd.Register(newUploadCommand()) + backupsCmd.Register(newRemoveCommand()) + backupsCmd.Register(newRestoreCommand()) + return backupsCmd } // APIClient represents the backups API client functionality used by @@ -78,18 +60,20 @@ // Download pulls the backup archive file. Download(id string) (io.ReadCloser, error) // Upload pushes a backup archive to storage. - Upload(ar io.Reader, meta params.BackupsMetadataResult) (string, error) + Upload(ar io.ReadSeeker, meta params.BackupsMetadataResult) (string, error) // Remove removes the stored backup. Remove(id string) error - // Restore will restore a backup with the given id into the state server. + // Restore will restore a backup with the given id into the controller. Restore(string, backups.ClientConnection) error - // Restore will restore a backup file into the state server. - RestoreReader(io.Reader, *params.BackupsMetadataResult, backups.ClientConnection) error + // RestoreReader will restore a backup file into the controller. + RestoreReader(io.ReadSeeker, *params.BackupsMetadataResult, backups.ClientConnection) error } // CommandBase is the base type for backups sub-commands. type CommandBase struct { - envcmd.EnvCommandBase + // TODO(wallyworld) - remove Log when backup command is flattened. + Log *cmd.Log + modelcmd.ModelCommandBase } // NewAPIClient returns a client for the backups api endpoint. @@ -97,12 +81,19 @@ return newAPIClient(c) } +// SetFlags implements Command.SetFlags. +func (c *CommandBase) SetFlags(f *gnuflag.FlagSet) { + if c.Log != nil { + c.Log.AddFlags(f) + } +} + var newAPIClient = func(c *CommandBase) (APIClient, error) { root, err := c.NewAPIRoot() if err != nil { return nil, errors.Trace(err) } - return backups.NewClient(root), nil + return backups.NewClient(root) } // dumpMetadata writes the formatted backup metadata to stdout. @@ -117,13 +108,18 @@ fmt.Fprintf(ctx.Stdout, "finished: %v\n", result.Finished) fmt.Fprintf(ctx.Stdout, "notes: %q\n", result.Notes) - fmt.Fprintf(ctx.Stdout, "environment ID: %q\n", result.Environment) + fmt.Fprintf(ctx.Stdout, "model ID: %q\n", result.Model) fmt.Fprintf(ctx.Stdout, "machine ID: %q\n", result.Machine) fmt.Fprintf(ctx.Stdout, "created on host: %q\n", result.Hostname) fmt.Fprintf(ctx.Stdout, "juju version: %v\n", result.Version) } -func getArchive(filename string) (rc io.ReadCloser, metaResult *params.BackupsMetadataResult, err error) { +type readSeekCloser interface { + io.ReadSeeker + io.Closer +} + +func getArchive(filename string) (rc readSeekCloser, metaResult *params.BackupsMetadataResult, err error) { defer func() { if err != nil && rc != nil { rc.Close() === modified file 'src/github.com/juju/juju/cmd/juju/backups/backups_test.go' --- src/github.com/juju/juju/cmd/juju/backups/backups_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/cmd/juju/backups/backups_test.go 2016-03-22 15:18:22 +0000 @@ -4,8 +4,6 @@ package backups_test import ( - "strings" - jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -29,32 +27,10 @@ var _ = gc.Suite(&backupsSuite{}) -func (s *backupsSuite) checkHelpCommands(c *gc.C) { - ctx, err := testing.RunCommand(c, s.command, "--help") - c.Assert(err, jc.ErrorIsNil) - - // Check that we have registered all the sub commands by - // inspecting the help output. - var namesFound []string - commandHelp := strings.SplitAfter(testing.Stdout(ctx), "commands:")[1] - commandHelp = strings.TrimSpace(commandHelp) - for _, line := range strings.Split(commandHelp, "\n") { - name := strings.TrimSpace(strings.Split(line, " - ")[0]) - namesFound = append(namesFound, name) - } - c.Check(namesFound, gc.DeepEquals, expectedSubCommmandNames) -} - func (s *backupsSuite) TestHelp(c *gc.C) { + // Check the help output ctx, err := testing.RunCommand(c, s.command, "--help") c.Assert(err, jc.ErrorIsNil) - - expected := "(?s)usage: juju backups \\[options\\] .+" - c.Check(testing.Stdout(ctx), gc.Matches, expected) - expected = "(?sm).*^purpose: " + s.command.Purpose + "$.*" - c.Check(testing.Stdout(ctx), gc.Matches, expected) - expected = "(?sm).*^" + s.command.Doc + "$.*" - c.Check(testing.Stdout(ctx), gc.Matches, expected) - - s.checkHelpCommands(c) + namesFound := testing.ExtractCommandsFromHelpOutput(ctx) + c.Assert(namesFound, gc.DeepEquals, expectedSubCommmandNames) } === modified file 'src/github.com/juju/juju/cmd/juju/backups/create.go' --- src/github.com/juju/juju/cmd/juju/backups/create.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/cmd/juju/backups/create.go 2016-03-22 15:18:22 +0000 @@ -13,6 +13,7 @@ "github.com/juju/errors" "launchpad.net/gnuflag" + "github.com/juju/juju/cmd/modelcmd" "github.com/juju/juju/state/backups" ) @@ -32,21 +33,23 @@ that case, the backup archive will be stored in the current working directory with a name matching juju-backup--", + "client-key": "", + "server-cert": "", + }) + + clientCfg, err := ecfg.ClientConfig() + c.Assert(err, jc.ErrorIsNil) + + c.Check(clientCfg, jc.DeepEquals, lxdclient.Config{ + Namespace: cfg.Name(), + Remote: lxdclient.Remote{ + Name: "juju-remote", + Host: "10.0.0.1", + Cert: &lxdclient.Cert{ + Name: fmt.Sprintf("juju cert for env %q", s.config.Name()), + CertPEM: []byte(""), + KeyPEM: []byte(""), + }, + ServerPEMCert: "", + }, + }) +} + +func (s *configSuite) TestUpdateForClientConfigLocal(c *gc.C) { + cfg := lxd.NewBaseConfig(c) + ecfg := lxd.NewConfig(cfg) + + clientCfg, err := ecfg.ClientConfig() + c.Assert(err, jc.ErrorIsNil) + updated, err := ecfg.UpdateForClientConfig(clientCfg) + c.Assert(err, jc.ErrorIsNil) + + values, extras := updated.Values(c) + c.Assert(extras, gc.HasLen, 0) + + c.Check(values, jc.DeepEquals, lxd.ConfigValues{ + Namespace: cfg.Name(), + RemoteURL: "", + ClientCert: "", + ClientKey: "", + ServerCert: "", + }) +} + +func (s *configSuite) TestUpdateForClientConfigNonLocal(c *gc.C) { + cfg := lxd.NewBaseConfig(c) + ecfg := lxd.NewConfig(cfg) + ecfg = ecfg.Apply(c, map[string]interface{}{ + "remote-url": "10.0.0.1", + "client-cert": "", + "client-key": "", + "server-cert": "", + }) + + before, extras := ecfg.Values(c) + c.Assert(extras, gc.HasLen, 0) + + clientCfg, err := ecfg.ClientConfig() + c.Assert(err, jc.ErrorIsNil) + updated, err := ecfg.UpdateForClientConfig(clientCfg) + c.Assert(err, jc.ErrorIsNil) + + after, extras := updated.Values(c) + c.Assert(extras, gc.HasLen, 0) + + c.Check(before, jc.DeepEquals, lxd.ConfigValues{ + Namespace: cfg.Name(), + RemoteURL: "10.0.0.1", + ClientCert: "", + ClientKey: "", + ServerCert: "", + }) + c.Check(after, jc.DeepEquals, lxd.ConfigValues{ + Namespace: cfg.Name(), + RemoteURL: "10.0.0.1", + ClientCert: "", + ClientKey: "", + ServerCert: "", + }) +} + +func (s *configSuite) TestUpdateForClientConfigGeneratedCert(c *gc.C) { + cfg := lxd.NewBaseConfig(c) + ecfg := lxd.NewConfig(cfg) + ecfg = ecfg.Apply(c, map[string]interface{}{ + "remote-url": "10.0.0.1", + "client-cert": "", + "client-key": "", + "server-cert": "", + }) + + before, extras := ecfg.Values(c) + c.Assert(extras, gc.HasLen, 0) + + clientCfg, err := ecfg.ClientConfig() + c.Assert(err, jc.ErrorIsNil) + updated, err := ecfg.UpdateForClientConfig(clientCfg) + c.Assert(err, jc.ErrorIsNil) + + after, extras := updated.Values(c) + c.Assert(extras, gc.HasLen, 0) + + c.Check(before, jc.DeepEquals, lxd.ConfigValues{ + Namespace: cfg.Name(), + RemoteURL: "10.0.0.1", + ClientCert: "", + ClientKey: "", + ServerCert: "", + }) + after.CheckCert(c) + after.ClientCert = "" + after.ClientKey = "" + after.ServerCert = "" + c.Check(after, jc.DeepEquals, lxd.ConfigValues{ + Namespace: cfg.Name(), + RemoteURL: "10.0.0.1", + ClientCert: "", + ClientKey: "", + ServerCert: "", + }) +} + +// TODO(ericsnow) Each test only deals with a single field, so having +// multiple values in insert and remove (in configTestSpec) is a little +// misleading and unecessary. + +// configTestSpec defines a subtest to run in a table driven test. +type configTestSpec struct { + // info describes the subtest. + info string + // insert holds attrs that should be merged into the config. + insert testing.Attrs + // remove has the names of attrs that should be removed. + remove []string + // expect defines the expected attributes in a success case. + expect testing.Attrs + // err is the error message to expect in a failure case. + err string +} + +func (ts configTestSpec) checkSuccess(c *gc.C, value interface{}, err error) { + if !c.Check(err, jc.ErrorIsNil) { + return + } + + var cfg *config.Config + switch typed := value.(type) { + case *config.Config: + cfg = typed + case environs.Environ: + cfg = typed.Config() + } + + attrs := cfg.AllAttrs() + for field, value := range ts.expect { + c.Check(attrs[field], gc.Equals, value) + } +} + +func (ts configTestSpec) checkFailure(c *gc.C, err error, msg string) { + c.Check(err, gc.ErrorMatches, msg+": "+ts.err) +} + +func (ts configTestSpec) checkAttrs(c *gc.C, attrs map[string]interface{}, cfg *config.Config) { + for field, expected := range cfg.UnknownAttrs() { + value := attrs[field] + c.Check(value, gc.Equals, expected) + } +} + +func (ts configTestSpec) attrs() testing.Attrs { + attrs := lxd.ConfigAttrs + return attrs.Merge(ts.insert).Delete(ts.remove...) +} + +func (ts configTestSpec) newConfig(c *gc.C) *config.Config { + attrs := ts.attrs() + cfg, err := testing.ModelConfig(c).Apply(attrs) + c.Assert(err, jc.ErrorIsNil) + return cfg +} + +func (ts configTestSpec) fixCfg(c *gc.C, cfg *config.Config) *config.Config { + fixes := make(map[string]interface{}) + + // Set changed values. + fixes = updateAttrs(fixes, ts.insert) + + newCfg, err := cfg.Apply(fixes) + c.Assert(err, jc.ErrorIsNil) + return newCfg +} + +func updateAttrs(attrs, updates testing.Attrs) testing.Attrs { + updated := make(testing.Attrs, len(attrs)) + for k, v := range attrs { + updated[k] = v + } + for k, v := range updates { + updated[k] = v + } + return updated +} + +var newConfigTests = []configTestSpec{{ + info: "namespace is optional", + remove: []string{"namespace"}, + expect: testing.Attrs{"namespace": "testenv"}, +}, { + info: "namespace can be empty", + insert: testing.Attrs{"namespace": ""}, + expect: testing.Attrs{"namespace": "testenv"}, +}, { + info: "remote-url is optional", + remove: []string{"remote-url"}, + expect: testing.Attrs{"remote-url": ""}, +}, { + info: "remote-url can be empty", + insert: testing.Attrs{"remote-url": ""}, + expect: testing.Attrs{"remote-url": ""}, +}, { + info: "client-cert is optional", + remove: []string{"client-cert"}, + expect: testing.Attrs{"client-cert": ""}, +}, { + info: "client-cert can be empty", + insert: testing.Attrs{"client-cert": ""}, + expect: testing.Attrs{"client-cert": ""}, +}, { + info: "client-key is optional", + remove: []string{"client-key"}, + expect: testing.Attrs{"client-key": ""}, +}, { + info: "client-key can be empty", + insert: testing.Attrs{"client-key": ""}, + expect: testing.Attrs{"client-key": ""}, +}, { + info: "server-cert is optional", + remove: []string{"server-cert"}, + expect: testing.Attrs{"server-cert": ""}, +}, { + info: "unknown field is not touched", + insert: testing.Attrs{"unknown-field": 12345}, + expect: testing.Attrs{"unknown-field": 12345}, +}} + +func (s *configSuite) TestNewModelConfig(c *gc.C) { + // TODO(ericsnow) Move to a functional suite. + if !s.IsRunningLocally(c) { + c.Skip("LXD not running locally") + } + + for i, test := range newConfigTests { + c.Logf("test %d: %s", i, test.info) + + testConfig := test.newConfig(c) + environ, err := environs.New(testConfig) + + // Check the result + if test.err != "" { + test.checkFailure(c, err, "invalid config") + } else { + test.checkSuccess(c, environ, err) + } + } +} + +// TODO(wwitzel3) refactor to provider_test file +func (s *configSuite) TestValidateNewConfig(c *gc.C) { + for i, test := range newConfigTests { + c.Logf("test %d: %s", i, test.info) + + testConfig := test.newConfig(c) + validatedConfig, err := lxd.Provider.Validate(testConfig, nil) + + // Check the result + if test.err != "" { + test.checkFailure(c, err, "invalid config") + } else { + c.Check(validatedConfig, gc.NotNil) + test.checkSuccess(c, validatedConfig, err) + } + } +} + +// TODO(wwitzel3) refactor to the provider_test file +func (s *configSuite) TestValidateOldConfig(c *gc.C) { + for i, test := range newConfigTests { + c.Logf("test %d: %s", i, test.info) + + oldcfg := test.newConfig(c) + var err error + oldcfg, err = lxd.Provider.Validate(oldcfg, nil) + c.Assert(err, jc.ErrorIsNil) + newcfg := test.fixCfg(c, s.config) + expected := updateAttrs(lxd.ConfigAttrs, test.insert) + + // Validate the new config (relative to the old one) using the + // provider. + validatedConfig, err := lxd.Provider.Validate(newcfg, oldcfg) + + // Check the result. + if test.err != "" { + test.checkFailure(c, err, "invalid base config") + } else { + if !c.Check(err, jc.ErrorIsNil) { + continue + } + // We verify that Validate filled in the defaults + // appropriately. + c.Check(validatedConfig, gc.NotNil) + test.checkAttrs(c, expected, validatedConfig) + } + } +} + +// TODO(ericsnow) Add tests for client-cert and client-key. + +var changeConfigTests = []configTestSpec{{ + info: "no change, no error", + expect: lxd.ConfigAttrs, +}, { + info: "cannot change namespace", + insert: testing.Attrs{"namespace": "spam"}, + err: "namespace: cannot change from testenv to spam", + //}, { + // TODO(ericsnow) This triggers cert generation... + // info: "cannot change remote-url", + // insert: testing.Attrs{"remote-url": "eggs"}, + // err: "remote-url: cannot change from to eggs", +}, { + info: "can insert unknown field", + insert: testing.Attrs{"unknown": "ignoti"}, + expect: testing.Attrs{"unknown": "ignoti"}, +}} + +// TODO(wwitzel3) refactor this to the provider_test file. +func (s *configSuite) TestValidateChange(c *gc.C) { + for i, test := range changeConfigTests { + c.Logf("test %d: %s", i, test.info) + + testConfig := test.newConfig(c) + validatedConfig, err := lxd.Provider.Validate(testConfig, s.config) + + // Check the result. + if test.err != "" { + test.checkFailure(c, err, "invalid config change") + } else { + test.checkSuccess(c, validatedConfig, err) + } + } +} + +func (s *configSuite) TestSetConfig(c *gc.C) { + // TODO(ericsnow) Move to a functional suite. + if !s.IsRunningLocally(c) { + c.Skip("LXD not running locally") + } + + for i, test := range changeConfigTests { + c.Logf("test %d: %s", i, test.info) + + environ, err := environs.New(s.config) + c.Assert(err, jc.ErrorIsNil) + + testConfig := test.newConfig(c) + err = environ.SetConfig(testConfig) + + // Check the result. + if test.err != "" { + test.checkFailure(c, err, "invalid config change") + expected, err := lxd.Provider.Validate(s.config, nil) + c.Assert(err, jc.ErrorIsNil) + test.checkAttrs(c, environ.Config().AllAttrs(), expected) + } else { + test.checkSuccess(c, environ.Config(), err) + } + } +} === added file 'src/github.com/juju/juju/provider/lxd/credentials.go' --- src/github.com/juju/juju/provider/lxd/credentials.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/credentials.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd + +import ( + "github.com/juju/juju/cloud" +) + +type environProviderCredentials struct{} + +// CredentialSchemas is part of the environs.ProviderCredentials interface. +func (environProviderCredentials) CredentialSchemas() map[cloud.AuthType]cloud.CredentialSchema { + return map[cloud.AuthType]cloud.CredentialSchema{cloud.EmptyAuthType: {}} +} + +// DetectCredentials is part of the environs.ProviderCredentials interface. +func (environProviderCredentials) DetectCredentials() (*cloud.CloudCredential, error) { + return cloud.NewEmptyCloudCredential(), nil +} === added file 'src/github.com/juju/juju/provider/lxd/credentials_test.go' --- src/github.com/juju/juju/provider/lxd/credentials_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/credentials_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,39 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package lxd_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" + envtesting "github.com/juju/juju/environs/testing" + "github.com/juju/juju/testing" +) + +type credentialsSuite struct { + testing.BaseSuite + provider environs.EnvironProvider +} + +var _ = gc.Suite(&credentialsSuite{}) + +func (s *credentialsSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + + var err error + s.provider, err = environs.Provider("lxd") + c.Assert(err, jc.ErrorIsNil) +} + +func (s *credentialsSuite) TestCredentialSchemas(c *gc.C) { + envtesting.AssertProviderAuthTypes(c, s.provider, "empty") +} + +func (s *credentialsSuite) TestDetectCredentials(c *gc.C) { + credentials, err := s.provider.DetectCredentials() + c.Assert(err, jc.ErrorIsNil) + c.Assert(credentials, jc.DeepEquals, cloud.NewEmptyCloudCredential()) +} === added file 'src/github.com/juju/juju/provider/lxd/environ.go' --- src/github.com/juju/juju/provider/lxd/environ.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/environ.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,178 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd + +import ( + "sync" + + "github.com/juju/errors" + + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/provider/common" +) + +type baseProvider interface { + // BootstrapEnv bootstraps a Juju environment. + BootstrapEnv(environs.BootstrapContext, environs.BootstrapParams) (*environs.BootstrapResult, error) + + // DestroyEnv destroys the provided Juju environment. + DestroyEnv() error +} + +type environ struct { + common.SupportsUnitPlacementPolicy + + name string + uuid string + raw *rawProvider + base baseProvider + + lock sync.Mutex + ecfg *environConfig +} + +type newRawProviderFunc func(*environConfig) (*rawProvider, error) + +func newEnviron(cfg *config.Config, newRawProvider newRawProviderFunc) (*environ, error) { + ecfg, err := newValidConfig(cfg, configDefaults) + if err != nil { + return nil, errors.Annotate(err, "invalid config") + } + + // Connect and authenticate. + raw, err := newRawProvider(ecfg) + if err != nil { + return nil, errors.Trace(err) + } + + env, err := newEnvironRaw(ecfg, raw) + if err != nil { + return nil, errors.Trace(err) + } + + //TODO(wwitzel3) make sure we are also cleaning up profiles during destroy + if err := env.initProfile(); err != nil { + return nil, errors.Trace(err) + } + + return env, nil +} + +func newEnvironRaw(ecfg *environConfig, raw *rawProvider) (*environ, error) { + uuid, ok := ecfg.UUID() + if !ok { + return nil, errors.New("UUID not set") + } + + env := &environ{ + name: ecfg.Name(), + uuid: uuid, + ecfg: ecfg, + raw: raw, + } + env.base = common.DefaultProvider{Env: env} + return env, nil +} + +var defaultProfileConfig = map[string]string{ + "boot.autostart": "true", + "security.nesting": "true", +} + +func (env *environ) initProfile() error { + hasProfile, err := env.raw.HasProfile(env.profileName()) + if err != nil { + return errors.Trace(err) + } + + if hasProfile { + return nil + } + + return env.raw.CreateProfile(env.profileName(), defaultProfileConfig) +} + +func (env *environ) profileName() string { + return "juju-" + env.ecfg.Name() +} + +// Name returns the name of the environment. +func (env *environ) Name() string { + return env.name +} + +// Provider returns the environment provider that created this env. +func (*environ) Provider() environs.EnvironProvider { + return providerInstance +} + +// SetConfig updates the env's configuration. +func (env *environ) SetConfig(cfg *config.Config) error { + env.lock.Lock() + defer env.lock.Unlock() + + if env.ecfg == nil { + return errors.New("cannot set config on uninitialized env") + } + + if err := env.ecfg.update(cfg); err != nil { + return errors.Annotate(err, "invalid config change") + } + return nil +} + +// getSnapshot returns a copy of the environment. This is useful for +// ensuring the env you are using does not get changed by other code +// while you are using it. +func (env *environ) getSnapshot() *environ { + e := *env + return &e +} + +// Config returns the configuration data with which the env was created. +func (env *environ) Config() *config.Config { + return env.getSnapshot().ecfg.Config +} + +// Bootstrap creates a new instance, chosing the series and arch out of +// available tools. The series and arch are returned along with a func +// that must be called to finalize the bootstrap process by transferring +// the tools and installing the initial juju controller. +func (env *environ) Bootstrap(ctx environs.BootstrapContext, params environs.BootstrapParams) (*environs.BootstrapResult, error) { + // TODO(ericsnow) Ensure currently not the root user + // if remote is local host? + + // Using the Bootstrap func from provider/common should be fine. + // Local provider does its own thing because it has to deal directly + // with localhost rather than using SSH. + return env.base.BootstrapEnv(ctx, params) +} + +// Destroy shuts down all known machines and destroys the rest of the +// known environment. +func (env *environ) Destroy() error { + ports, err := env.Ports() + if err != nil { + return errors.Trace(err) + } + + if len(ports) > 0 { + if err := env.ClosePorts(ports); err != nil { + return errors.Trace(err) + } + } + + if err := env.base.DestroyEnv(); err != nil { + return errors.Trace(err) + } + return nil +} + +func (env *environ) verifyCredentials() error { + // TODO(ericsnow) Do something here? + return nil +} === added file 'src/github.com/juju/juju/provider/lxd/environ_broker.go' --- src/github.com/juju/juju/provider/lxd/environ_broker.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/environ_broker.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,216 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd + +import ( + "fmt" + + "github.com/juju/errors" + "github.com/juju/utils/arch" + + "github.com/juju/juju/agent" + "github.com/juju/juju/cloudconfig/instancecfg" + "github.com/juju/juju/cloudconfig/providerinit" + "github.com/juju/juju/environs" + "github.com/juju/juju/instance" + "github.com/juju/juju/provider/common" + "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/tools/lxdclient" +) + +func isController(icfg *instancecfg.InstanceConfig) bool { + return multiwatcher.AnyJobNeedsState(icfg.Jobs...) +} + +// MaintainInstance is specified in the InstanceBroker interface. +func (*environ) MaintainInstance(args environs.StartInstanceParams) error { + return nil +} + +// StartInstance implements environs.InstanceBroker. +func (env *environ) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { + // Please note that in order to fulfil the demands made of Instances and + // AllInstances, it is imperative that some environment feature be used to + // keep track of which instances were actually started by juju. + env = env.getSnapshot() + + // Start a new instance. + + if args.InstanceConfig.HasNetworks() { + return nil, errors.New("starting instances with networks is not supported yet") + } + + series := args.Tools.OneSeries() + logger.Debugf("StartInstance: %q, %s", args.InstanceConfig.MachineId, series) + + if err := env.finishInstanceConfig(args); err != nil { + return nil, errors.Trace(err) + } + + // TODO(ericsnow) Handle constraints? + + raw, err := env.newRawInstance(args) + if err != nil { + return nil, errors.Trace(err) + } + logger.Infof("started instance %q", raw.Name) + inst := newInstance(raw, env) + + // Build the result. + hwc := env.getHardwareCharacteristics(args, inst) + result := environs.StartInstanceResult{ + Instance: inst, + Hardware: hwc, + } + return &result, nil +} + +func (env *environ) finishInstanceConfig(args environs.StartInstanceParams) error { + args.InstanceConfig.Tools = args.Tools[0] + logger.Debugf("tools: %#v", args.InstanceConfig.Tools) + + if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, env.ecfg.Config); err != nil { + return errors.Trace(err) + } + + // TODO: evaluate the impact of setting the constraints on the + // instanceConfig for all machines rather than just controller nodes. + // This limitation is why the constraints are assigned directly here. + args.InstanceConfig.Constraints = args.Constraints + + args.InstanceConfig.AgentEnvironment[agent.Namespace] = env.ecfg.namespace() + + return nil +} + +// newRawInstance is where the new physical instance is actually +// provisioned, relative to the provided args and spec. Info for that +// low-level instance is returned. +func (env *environ) newRawInstance(args environs.StartInstanceParams) (*lxdclient.Instance, error) { + machineID := common.MachineFullName(env, args.InstanceConfig.MachineId) + + series := args.Tools.OneSeries() + image := "ubuntu-" + series + + err := env.raw.EnsureImageExists(series) + if err != nil { + return nil, errors.Trace(err) + } + + metadata, err := getMetadata(args) + if err != nil { + return nil, errors.Trace(err) + } + //tags := []string{ + // env.globalFirewallName(), + // machineID, + //} + // TODO(ericsnow) Use the env ID for the network name (instead of default)? + // TODO(ericsnow) Make the network name configurable? + // TODO(ericsnow) Support multiple networks? + // TODO(ericsnow) Use a different net interface name? Configurable? + instSpec := lxdclient.InstanceSpec{ + Name: machineID, + Image: image, + //Type: spec.InstanceType.Name, + //Disks: getDisks(spec, args.Constraints), + //NetworkInterfaces: []string{"ExternalNAT"}, + Metadata: metadata, + Profiles: []string{ + //TODO(wwitzel3) allow the user to specify lxc profiles to apply. This allows the + // user to setup any custom devices order config settings for their environment. + // Also we must ensure that a device with the parent: lxcbr0 exists in at least + // one of the profiles. + "default", + env.profileName(), + }, + //Tags: tags, + // Network is omitted (left empty). + } + + logger.Infof("starting instance %q (image %q)...", instSpec.Name, instSpec.Image) + inst, err := env.raw.AddInstance(instSpec) + if err != nil { + return nil, errors.Trace(err) + } + return inst, nil +} + +// getMetadata builds the raw "user-defined" metadata for the new +// instance (relative to the provided args) and returns it. +func getMetadata(args environs.StartInstanceParams) (map[string]string, error) { + renderer := lxdRenderer{} + uncompressed, err := providerinit.ComposeUserData(args.InstanceConfig, nil, renderer) + if err != nil { + return nil, errors.Annotate(err, "cannot make user data") + } + logger.Debugf("LXD user data; %d bytes", len(uncompressed)) + + // TODO(ericsnow) Looks like LXD does not handle gzipped userdata + // correctly. It likely has to do with the HTTP transport, much + // as we have to b64encode the userdata for GCE. Until that is + // resolved we simply pass the plain text. + //compressed := utils.Gzip(compressed) + userdata := string(uncompressed) + + metadata := map[string]string{ + metadataKeyIsState: metadataValueFalse, + // We store a gz snapshop of information that is used by + // cloud-init and unpacked in to the /var/lib/cloud/instances folder + // for the instance. + metadataKeyCloudInit: userdata, + } + if isController(args.InstanceConfig) { + metadata[metadataKeyIsState] = metadataValueTrue + } + + return metadata, nil +} + +// getHardwareCharacteristics compiles hardware-related details about +// the given instance and relative to the provided spec and returns it. +func (env *environ) getHardwareCharacteristics(args environs.StartInstanceParams, inst *environInstance) *instance.HardwareCharacteristics { + raw := inst.raw.Hardware + + archStr := raw.Architecture + if archStr == "unknown" || !arch.IsSupportedArch(archStr) { + // TODO(ericsnow) This special-case should be improved. + archStr = arch.HostArch() + } + + hwc, err := instance.ParseHardware( + "arch="+archStr, + fmt.Sprintf("cpu-cores=%d", raw.NumCores), + fmt.Sprintf("mem=%dM", raw.MemoryMB), + //"root-disk=", + //"tags=", + ) + if err != nil { + logger.Errorf("unexpected problem parsing hardware info: %v", err) + // Keep moving... + } + return &hwc +} + +// AllInstances implements environs.InstanceBroker. +func (env *environ) AllInstances() ([]instance.Instance, error) { + instances, err := getInstances(env) + return instances, errors.Trace(err) +} + +// StopInstances implements environs.InstanceBroker. +func (env *environ) StopInstances(instances ...instance.Id) error { + env = env.getSnapshot() + + var ids []string + for _, id := range instances { + ids = append(ids, string(id)) + } + + prefix := common.MachineFullName(env, "") + err := env.raw.RemoveInstances(prefix, ids...) + return errors.Trace(err) +} === added file 'src/github.com/juju/juju/provider/lxd/environ_broker_test.go' --- src/github.com/juju/juju/provider/lxd/environ_broker_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/environ_broker_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,47 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd_test + +import ( + gitjujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/provider/lxd" +) + +type environBrokerSuite struct { + lxd.BaseSuite +} + +var _ = gc.Suite(&environBrokerSuite{}) + +func (s *environBrokerSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) +} + +func (s *environBrokerSuite) TestStartInstance(c *gc.C) { + s.Client.Inst = s.RawInstance + + result, err := s.Env.StartInstance(s.StartInstArgs) + + c.Assert(err, jc.ErrorIsNil) + c.Check(result.Instance, gc.DeepEquals, s.Instance) + c.Check(result.Hardware, gc.DeepEquals, s.HWC) +} + +func (s *environBrokerSuite) TestStopInstances(c *gc.C) { + err := s.Env.StopInstances(s.Instance.Id()) + c.Assert(err, jc.ErrorIsNil) + + s.Stub.CheckCalls(c, []gitjujutesting.StubCall{{ + FuncName: "RemoveInstances", + Args: []interface{}{ + "juju-2d02eeac-9dbb-11e4-89d3-123b93f75cba-machine-", + []string{"spam"}, + }, + }}) +} === added file 'src/github.com/juju/juju/provider/lxd/environ_instance.go' --- src/github.com/juju/juju/provider/lxd/environ_instance.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/environ_instance.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,125 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/environs" + "github.com/juju/juju/instance" + "github.com/juju/juju/provider/common" + "github.com/juju/juju/tools/lxdclient" +) + +// instStatus is the list of statuses to accept when filtering +// for "alive" instances. +var instStatuses = lxdclient.AliveStatuses + +// Instances returns the available instances in the environment that +// match the provided instance IDs. For IDs that did not match any +// instances, the result at the corresponding index will be nil. In that +// case the error will be environs.ErrPartialInstances (or +// ErrNoInstances if none of the IDs match an instance). +func (env *environ) Instances(ids []instance.Id) ([]instance.Instance, error) { + if len(ids) == 0 { + return nil, environs.ErrNoInstances + } + + instances, err := getInstances(env) + if err != nil { + // We don't return the error since we need to pack one instance + // for each ID into the result. If there is a problem then we + // will return either ErrPartialInstances or ErrNoInstances. + // TODO(ericsnow) Skip returning here only for certain errors? + logger.Errorf("failed to get instances from LXD: %v", err) + err = errors.Trace(err) + } + + // Build the result, matching the provided instance IDs. + numFound := 0 // This will never be greater than len(ids). + results := make([]instance.Instance, len(ids)) + for i, id := range ids { + inst := findInst(id, instances) + if inst != nil { + numFound++ + } + results[i] = inst + } + + if numFound == 0 { + if err == nil { + err = environs.ErrNoInstances + } + } else if numFound != len(ids) { + err = environs.ErrPartialInstances + } + return results, err +} + +var getInstances = func(env *environ) ([]instance.Instance, error) { + return env.instances() +} + +// instances returns a list of all "alive" instances in the environment. +// This means only instances where the IDs match +// "juju--machine-*". This is important because otherwise juju +// will see they are not tracked in state, assume they're stale/rogue, +// and shut them down. +func (env *environ) instances() ([]instance.Instance, error) { + env = env.getSnapshot() + + prefix := common.MachineFullName(env, "") + instances, err := env.raw.Instances(prefix, instStatuses...) + err = errors.Trace(err) + + // Turn lxdclient.Instance values into *environInstance values, + // whether or not we got an error. + var results []instance.Instance + for _, base := range instances { + // If we don't make a copy then the same pointer is used for the + // base of all resulting instances. + copied := base + inst := newInstance(&copied, env) + results = append(results, inst) + } + + return results, err +} + +// ControllerInstances returns the IDs of the instances corresponding +// to juju controllers. +func (env *environ) ControllerInstances() ([]instance.Id, error) { + env = env.getSnapshot() + + prefix := common.MachineFullName(env, "") + instances, err := env.raw.Instances(prefix, instStatuses...) + if err != nil { + return nil, errors.Trace(err) + } + + var results []instance.Id + for _, inst := range instances { + metadata := inst.Metadata() + isState, ok := metadata[metadataKeyIsState] + if ok && isState == metadataValueTrue { + results = append(results, instance.Id(inst.Name)) + } + } + if len(results) == 0 { + return nil, environs.ErrNotBootstrapped + } + return results, nil +} + +type instPlacement struct{} + +func (env *environ) parsePlacement(placement string) (*instPlacement, error) { + if placement == "" { + return &instPlacement{}, nil + } + + return nil, errors.Errorf("unknown placement directive: %v", placement) +} === added file 'src/github.com/juju/juju/provider/lxd/environ_instance_test.go' --- src/github.com/juju/juju/provider/lxd/environ_instance_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/environ_instance_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,119 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd_test + +import ( + "github.com/juju/errors" + gitjujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/environs" + "github.com/juju/juju/instance" + "github.com/juju/juju/provider/lxd" + "github.com/juju/juju/tools/lxdclient" +) + +type environInstSuite struct { + lxd.BaseSuite +} + +var _ = gc.Suite(&environInstSuite{}) + +func (s *environInstSuite) TestInstancesOkay(c *gc.C) { + ids := []instance.Id{"spam", "eggs", "ham"} + var raw []lxdclient.Instance + var expected []instance.Instance + for _, id := range ids { + raw = append(raw, *s.NewRawInstance(c, string(id))) + expected = append(expected, s.NewInstance(c, string(id))) + } + s.Client.Insts = raw + + insts, err := s.Env.Instances(ids) + c.Assert(err, jc.ErrorIsNil) + + c.Check(insts, jc.DeepEquals, expected) +} + +func (s *environInstSuite) TestInstancesAPI(c *gc.C) { + ids := []instance.Id{"spam", "eggs", "ham"} + s.Env.Instances(ids) + + s.Stub.CheckCalls(c, []gitjujutesting.StubCall{{ + FuncName: "Instances", + Args: []interface{}{ + s.Prefix + "machine-", + lxdclient.AliveStatuses, + }, + }}) +} + +func (s *environInstSuite) TestInstancesEmptyArg(c *gc.C) { + insts, err := s.Env.Instances(nil) + + c.Check(insts, gc.HasLen, 0) + c.Check(errors.Cause(err), gc.Equals, environs.ErrNoInstances) +} + +func (s *environInstSuite) TestInstancesInstancesFailed(c *gc.C) { + failure := errors.New("") + s.Stub.SetErrors(failure) + + ids := []instance.Id{"spam"} + insts, err := s.Env.Instances(ids) + + c.Check(insts, jc.DeepEquals, []instance.Instance{nil}) + c.Check(errors.Cause(err), gc.Equals, failure) +} + +func (s *environInstSuite) TestInstancesPartialMatch(c *gc.C) { + raw := s.NewRawInstance(c, "spam") + expected := s.NewInstance(c, "spam") + s.Client.Insts = []lxdclient.Instance{*raw} + + ids := []instance.Id{"spam", "eggs"} + insts, err := s.Env.Instances(ids) + + c.Check(insts, jc.DeepEquals, []instance.Instance{expected, nil}) + c.Check(errors.Cause(err), gc.Equals, environs.ErrPartialInstances) +} + +func (s *environInstSuite) TestInstancesNoMatch(c *gc.C) { + raw := s.NewRawInstance(c, "spam") + s.Client.Insts = []lxdclient.Instance{*raw} + + ids := []instance.Id{"eggs"} + insts, err := s.Env.Instances(ids) + + c.Check(insts, jc.DeepEquals, []instance.Instance{nil}) + c.Check(errors.Cause(err), gc.Equals, environs.ErrNoInstances) +} + +func (s *environInstSuite) TestControllerInstancesOkay(c *gc.C) { + s.Client.Insts = []lxdclient.Instance{*s.RawInstance} + + ids, err := s.Env.ControllerInstances() + c.Assert(err, jc.ErrorIsNil) + + c.Check(ids, jc.DeepEquals, []instance.Id{"spam"}) +} + +func (s *environInstSuite) TestControllerInstancesNotBootstrapped(c *gc.C) { + _, err := s.Env.ControllerInstances() + + c.Check(err, gc.Equals, environs.ErrNotBootstrapped) +} + +func (s *environInstSuite) TestControllerInstancesMixed(c *gc.C) { + other := lxdclient.NewInstance(lxdclient.InstanceSummary{}, nil) + s.Client.Insts = []lxdclient.Instance{*s.RawInstance, *other} + + ids, err := s.Env.ControllerInstances() + c.Assert(err, jc.ErrorIsNil) + + c.Check(ids, jc.DeepEquals, []instance.Id{"spam"}) +} === added file 'src/github.com/juju/juju/provider/lxd/environ_network.go' --- src/github.com/juju/juju/provider/lxd/environ_network.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/environ_network.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,54 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/network" + "github.com/juju/juju/provider/common" +) + +// globalFirewallName returns the name to use for the global firewall. +func (env *environ) globalFirewallName() string { + return common.EnvFullName(env) +} + +// OpenPorts opens the given port ranges for the whole environment. +// Must only be used if the environment was setup with the +// FwGlobal firewall mode. +func (env *environ) OpenPorts(ports []network.PortRange) error { + err := env.raw.OpenPorts(env.globalFirewallName(), ports...) + if errors.IsNotImplemented(err) { + // TODO(ericsnow) for now... + return nil + } + return errors.Trace(err) +} + +// ClosePorts closes the given port ranges for the whole environment. +// Must only be used if the environment was setup with the +// FwGlobal firewall mode. +func (env *environ) ClosePorts(ports []network.PortRange) error { + err := env.raw.ClosePorts(env.globalFirewallName(), ports...) + if errors.IsNotImplemented(err) { + // TODO(ericsnow) for now... + return nil + } + return errors.Trace(err) +} + +// Ports returns the port ranges opened for the whole environment. +// Must only be used if the environment was setup with the +// FwGlobal firewall mode. +func (env *environ) Ports() ([]network.PortRange, error) { + ports, err := env.raw.Ports(env.globalFirewallName()) + if errors.IsNotImplemented(err) { + // TODO(ericsnow) for now... + return nil, nil + } + return ports, errors.Trace(err) +} === added file 'src/github.com/juju/juju/provider/lxd/environ_network_test.go' --- src/github.com/juju/juju/provider/lxd/environ_network_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/environ_network_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,89 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd_test + +import ( + gitjujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/provider/lxd" +) + +type environNetSuite struct { + lxd.BaseSuite +} + +var _ = gc.Suite(&environNetSuite{}) + +func (s *environNetSuite) TestGlobalFirewallName(c *gc.C) { + uuid, _ := s.Config.UUID() + fwname := lxd.GlobalFirewallName(s.Env) + + c.Check(fwname, gc.Equals, "juju-"+uuid) +} + +func (s *environNetSuite) TestOpenPortsOkay(c *gc.C) { + err := s.Env.OpenPorts(s.Ports) + + c.Check(err, jc.ErrorIsNil) +} + +func (s *environNetSuite) TestOpenPortsAPI(c *gc.C) { + fwname := lxd.GlobalFirewallName(s.Env) + err := s.Env.OpenPorts(s.Ports) + c.Assert(err, jc.ErrorIsNil) + + s.Stub.CheckCalls(c, []gitjujutesting.StubCall{{ + FuncName: "OpenPorts", + Args: []interface{}{ + fwname, + s.Ports, + }, + }}) +} + +func (s *environNetSuite) TestClosePortsOkay(c *gc.C) { + err := s.Env.ClosePorts(s.Ports) + + c.Check(err, jc.ErrorIsNil) +} + +func (s *environNetSuite) TestClosePortsAPI(c *gc.C) { + fwname := lxd.GlobalFirewallName(s.Env) + err := s.Env.ClosePorts(s.Ports) + c.Assert(err, jc.ErrorIsNil) + + s.Stub.CheckCalls(c, []gitjujutesting.StubCall{{ + FuncName: "ClosePorts", + Args: []interface{}{ + fwname, + s.Ports, + }, + }}) +} + +func (s *environNetSuite) TestPortsOkay(c *gc.C) { + s.Firewaller.PortRanges = s.Ports + + ports, err := s.Env.Ports() + c.Assert(err, jc.ErrorIsNil) + + c.Check(ports, jc.DeepEquals, s.Ports) +} + +func (s *environNetSuite) TestPortsAPI(c *gc.C) { + fwname := lxd.GlobalFirewallName(s.Env) + _, err := s.Env.Ports() + c.Assert(err, jc.ErrorIsNil) + + s.Stub.CheckCalls(c, []gitjujutesting.StubCall{{ + FuncName: "Ports", + Args: []interface{}{ + fwname, + }, + }}) +} === added file 'src/github.com/juju/juju/provider/lxd/environ_policy.go' --- src/github.com/juju/juju/provider/lxd/environ_policy.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/environ_policy.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,113 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd + +import ( + "github.com/juju/errors" + "github.com/juju/utils/arch" + + "github.com/juju/juju/constraints" + "github.com/juju/juju/network" +) + +var supportedContainerTypes = []string{ + "lxd", +} + +type policyProvider interface { + // SupportedArchitectures returns the list of image architectures + // supported by this environment. + SupportedArchitectures() ([]string, error) +} + +type lxdPolicyProvider struct{} + +// SupportedArchitectures returns the image architectures which can +// be hosted by this environment. +func (pp *lxdPolicyProvider) SupportedArchitectures() ([]string, error) { + // TODO(ericsnow) Use common.SupportedArchitectures? + localArch := arch.HostArch() + return []string{localArch}, nil +} + +// PrecheckInstance verifies that the provided series and constraints +// are valid for use in creating an instance in this environment. +func (env *environ) PrecheckInstance(series string, cons constraints.Value, placement string) error { + if _, err := env.parsePlacement(placement); err != nil { + return errors.Trace(err) + } + + if cons.HasInstanceType() { + return errors.Errorf("LXD does not support instance types (got %q)", *cons.InstanceType) + } + + return nil +} + +// SupportedArchitectures returns the image architectures which can +// be hosted by this environment. +func (env *environ) SupportedArchitectures() ([]string, error) { + // TODO(ericsnow) The supported arch depends on the targetted + // remote. Thus we may need to support the remote as a constraint. + arches, err := env.raw.SupportedArchitectures() + if err != nil { + return nil, errors.Trace(err) + } + return arches, nil +} + +var unsupportedConstraints = []string{ + constraints.CpuCores, + constraints.CpuPower, + //TODO(ericsnow) Add constraints.Mem as unsupported? + constraints.InstanceType, + constraints.Tags, +} + +// ConstraintsValidator returns a Validator value which is used to +// validate and merge constraints. +func (env *environ) ConstraintsValidator() (constraints.Validator, error) { + validator := constraints.NewValidator() + + // Register conflicts. + + // We don't have any conflicts to register. + + // Register unsupported constraints. + + validator.RegisterUnsupported(unsupportedConstraints) + + // Register the constraints vocab. + + // TODO(ericsnow) This depends on the targetted remote host. + supportedArches, err := env.SupportedArchitectures() + if err != nil { + return nil, errors.Trace(err) + } + validator.RegisterVocabulary(constraints.Arch, supportedArches) + + // TODO(ericsnow) Get this working... + //validator.RegisterVocabulary(constraints.Container, supportedContainerTypes) + + return validator, nil +} + +// environ provides SupportsUnitPlacement (a method of the +// state.EnvironCapatability interface) by embedding +// common.SupportsUnitPlacementPolicy. + +// SupportNetworks returns whether the environment has support to +// specify networks for services and machines. +func (env *environ) SupportNetworks() bool { + return false +} + +// SupportAddressAllocation takes a network.Id and returns a bool +// and an error. The bool indicates whether that network supports +// static ip address allocation. +func (env *environ) SupportAddressAllocation(netID network.Id) (bool, error) { + return false, nil +} === added file 'src/github.com/juju/juju/provider/lxd/environ_policy_test.go' --- src/github.com/juju/juju/provider/lxd/environ_policy_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/environ_policy_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,207 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd_test + +import ( + "sort" + "strings" + + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/arch" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/constraints" + "github.com/juju/juju/provider/lxd" + "github.com/juju/juju/testing" +) + +type environPolSuite struct { + lxd.BaseSuite +} + +var _ = gc.Suite(&environPolSuite{}) + +func (s *environPolSuite) TestPrecheckInstanceOkay(c *gc.C) { + cons := constraints.Value{} + placement := "" + err := s.Env.PrecheckInstance(testing.FakeDefaultSeries, cons, placement) + + c.Check(err, jc.ErrorIsNil) +} + +func (s *environPolSuite) TestPrecheckInstanceAPI(c *gc.C) { + cons := constraints.Value{} + placement := "" + err := s.Env.PrecheckInstance(testing.FakeDefaultSeries, cons, placement) + c.Assert(err, jc.ErrorIsNil) + + s.CheckNoAPI(c) +} + +func (s *environPolSuite) TestPrecheckInstanceHasInstanceType(c *gc.C) { + cons := constraints.MustParse("instance-type=some-instance-type") + placement := "" + err := s.Env.PrecheckInstance(testing.FakeDefaultSeries, cons, placement) + + c.Check(err, gc.ErrorMatches, `LXD does not support instance types.*`) +} + +func (s *environPolSuite) TestPrecheckInstanceDiskSize(c *gc.C) { + cons := constraints.MustParse("root-disk=1G") + placement := "" + err := s.Env.PrecheckInstance(testing.FakeDefaultSeries, cons, placement) + + c.Check(err, jc.ErrorIsNil) +} + +func (s *environPolSuite) TestPrecheckInstanceUnsupportedArch(c *gc.C) { + s.Policy.Arches = []string{arch.AMD64} + + cons := constraints.MustParse("arch=i386") + placement := "" + err := s.Env.PrecheckInstance(testing.FakeDefaultSeries, cons, placement) + + c.Check(err, jc.ErrorIsNil) +} + +func (s *environPolSuite) TestPrecheckInstanceAvailZone(c *gc.C) { + cons := constraints.Value{} + placement := "zone=a-zone" + err := s.Env.PrecheckInstance(testing.FakeDefaultSeries, cons, placement) + + c.Check(err, gc.ErrorMatches, `unknown placement directive: .*`) +} + +func (s *environPolSuite) TestSupportedArchitecturesOkay(c *gc.C) { + s.Policy.Arches = []string{arch.AMD64} + + archList, err := s.Env.SupportedArchitectures() + c.Assert(err, jc.ErrorIsNil) + + c.Check(archList, jc.SameContents, []string{arch.AMD64}) +} + +func (s *environPolSuite) TestConstraintsValidatorOkay(c *gc.C) { + s.Policy.Arches = []string{arch.AMD64} + + validator, err := s.Env.ConstraintsValidator() + c.Assert(err, jc.ErrorIsNil) + + cons := constraints.MustParse("arch=amd64") + unsupported, err := validator.Validate(cons) + c.Assert(err, jc.ErrorIsNil) + + c.Check(unsupported, gc.HasLen, 0) +} + +func (s *environPolSuite) TestConstraintsValidatorEmpty(c *gc.C) { + validator, err := s.Env.ConstraintsValidator() + c.Assert(err, jc.ErrorIsNil) + + unsupported, err := validator.Validate(constraints.Value{}) + c.Assert(err, jc.ErrorIsNil) + + c.Check(unsupported, gc.HasLen, 0) +} + +func (s *environPolSuite) TestConstraintsValidatorUnsupported(c *gc.C) { + s.Policy.Arches = []string{arch.AMD64} + + validator, err := s.Env.ConstraintsValidator() + c.Assert(err, jc.ErrorIsNil) + + cons := constraints.MustParse(strings.Join([]string{ + "arch=amd64", + "tags=foo", + "mem=3", + "instance-type=some-type", + "cpu-cores=2", + "cpu-power=250", + }, " ")) + unsupported, err := validator.Validate(cons) + c.Assert(err, jc.ErrorIsNil) + + expected := []string{ + "tags", + "instance-type", + "cpu-cores", + "cpu-power", + } + sort.Strings(expected) + sort.Strings(unsupported) + c.Check(unsupported, jc.DeepEquals, expected) +} + +func (s *environPolSuite) TestConstraintsValidatorVocabArchKnown(c *gc.C) { + s.Policy.Arches = []string{arch.AMD64} + + validator, err := s.Env.ConstraintsValidator() + c.Assert(err, jc.ErrorIsNil) + + cons := constraints.MustParse("arch=amd64") + _, err = validator.Validate(cons) + + c.Check(err, jc.ErrorIsNil) +} + +func (s *environPolSuite) TestConstraintsValidatorVocabArchUnknown(c *gc.C) { + s.Policy.Arches = []string{arch.AMD64} + + validator, err := s.Env.ConstraintsValidator() + c.Assert(err, jc.ErrorIsNil) + + cons := constraints.MustParse("arch=ppc64el") + _, err = validator.Validate(cons) + + c.Check(err, gc.ErrorMatches, "invalid constraint value: arch=ppc64el\nvalid values are:.*") +} + +func (s *environPolSuite) TestConstraintsValidatorVocabContainerUnknown(c *gc.C) { + c.Skip("this will fail until we add a container vocabulary") + validator, err := s.Env.ConstraintsValidator() + c.Assert(err, jc.ErrorIsNil) + + cons := constraints.MustParse("container=lxc") + _, err = validator.Validate(cons) + + c.Check(err, gc.ErrorMatches, "invalid constraint value: container=lxc\nvalid values are:.*") +} + +func (s *environPolSuite) TestConstraintsValidatorConflicts(c *gc.C) { + s.Policy.Arches = []string{arch.AMD64} + + validator, err := s.Env.ConstraintsValidator() + c.Assert(err, jc.ErrorIsNil) + + cons := constraints.MustParse("instance-type=n1-standard-1") + consFallback := constraints.MustParse("cpu-cores=2 cpu-power=1000 mem=10000 tags=bar") + merged, err := validator.Merge(consFallback, cons) + c.Assert(err, jc.ErrorIsNil) + + // tags is not supported, but we're not validating here... + expected := constraints.MustParse("instance-type=n1-standard-1 tags=bar cpu-cores=2 cpu-power=1000 mem=10000") + c.Check(merged, jc.DeepEquals, expected) +} + +func (s *environPolSuite) TestSupportNetworks(c *gc.C) { + isSupported := s.Env.SupportNetworks() + + c.Check(isSupported, jc.IsFalse) +} + +func (s *environPolSuite) TestSupportAddressAllocation(c *gc.C) { + isSupported, err := s.Env.SupportAddressAllocation("some-network") + c.Assert(err, jc.ErrorIsNil) + + c.Check(isSupported, jc.IsFalse) +} + +func (s *environPolSuite) TestSupportAddressAllocationEmpty(c *gc.C) { + isSupported, err := s.Env.SupportAddressAllocation("") + c.Assert(err, jc.ErrorIsNil) + + c.Check(isSupported, jc.IsFalse) +} === added file 'src/github.com/juju/juju/provider/lxd/environ_raw.go' --- src/github.com/juju/juju/provider/lxd/environ_raw.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/environ_raw.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,79 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/network" + "github.com/juju/juju/provider/common" + "github.com/juju/juju/tools/lxdclient" +) + +type rawProvider struct { + lxdInstances + lxdProfiles + lxdImages + common.Firewaller + policyProvider +} + +type lxdInstances interface { + Instances(string, ...string) ([]lxdclient.Instance, error) + AddInstance(lxdclient.InstanceSpec) (*lxdclient.Instance, error) + RemoveInstances(string, ...string) error + Addresses(string) ([]network.Address, error) +} + +type lxdProfiles interface { + CreateProfile(string, map[string]string) error + HasProfile(string) (bool, error) +} + +type lxdImages interface { + EnsureImageExists(series string) error +} + +func newRawProvider(ecfg *environConfig) (*rawProvider, error) { + client, err := newClient(ecfg) + if err != nil { + return nil, errors.Trace(err) + } + + firewaller, err := newFirewaller(ecfg) + if err != nil { + return nil, errors.Trace(err) + } + + policy := &lxdPolicyProvider{} + + raw := &rawProvider{ + lxdInstances: client, + lxdProfiles: client, + lxdImages: client, + Firewaller: firewaller, + policyProvider: policy, + } + return raw, nil +} + +func newClient(ecfg *environConfig) (*lxdclient.Client, error) { + clientCfg, err := ecfg.clientConfig() + if err != nil { + return nil, errors.Trace(err) + } + + client, err := lxdclient.Connect(clientCfg) + if err != nil { + return nil, errors.Trace(err) + } + + return client, nil +} + +func newFirewaller(ecfg *environConfig) (common.Firewaller, error) { + return common.NewFirewaller(), nil +} === added file 'src/github.com/juju/juju/provider/lxd/environ_test.go' --- src/github.com/juju/juju/provider/lxd/environ_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/environ_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,122 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd_test + +import ( + gitjujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cloudconfig/instancecfg" + "github.com/juju/juju/environs" + envtesting "github.com/juju/juju/environs/testing" + "github.com/juju/juju/provider/lxd" +) + +type environSuite struct { + lxd.BaseSuite +} + +var _ = gc.Suite(&environSuite{}) + +func (s *environSuite) TestName(c *gc.C) { + name := s.Env.Name() + + c.Check(name, gc.Equals, "lxd") +} + +func (s *environSuite) TestProvider(c *gc.C) { + provider := s.Env.Provider() + + c.Check(provider, gc.Equals, lxd.Provider) +} + +func (s *environSuite) TestSetConfigOkay(c *gc.C) { + err := s.Env.SetConfig(s.Config) + c.Assert(err, jc.ErrorIsNil) + + c.Check(lxd.ExposeEnvConfig(s.Env), jc.DeepEquals, s.EnvConfig) + // Ensure the client did not change. + c.Check(lxd.ExposeEnvClient(s.Env), gc.Equals, s.Client) +} + +func (s *environSuite) TestSetConfigNoAPI(c *gc.C) { + err := s.Env.SetConfig(s.Config) + c.Assert(err, jc.ErrorIsNil) + + s.Stub.CheckCallNames(c, "asNonLocal") +} + +func (s *environSuite) TestSetConfigMissing(c *gc.C) { + lxd.UnsetEnvConfig(s.Env) + + err := s.Env.SetConfig(s.Config) + + c.Check(err, gc.ErrorMatches, "cannot set config on uninitialized env") +} + +func (s *environSuite) TestConfig(c *gc.C) { + cfg := s.Env.Config() + + c.Check(cfg, jc.DeepEquals, s.Config) +} + +func (s *environSuite) TestBootstrapOkay(c *gc.C) { + s.Common.BootstrapResult = &environs.BootstrapResult{ + Arch: "amd64", + Series: "trusty", + Finalize: func(environs.BootstrapContext, *instancecfg.InstanceConfig) error { + return nil + }, + } + + ctx := envtesting.BootstrapContext(c) + params := environs.BootstrapParams{} + result, err := s.Env.Bootstrap(ctx, params) + c.Assert(err, jc.ErrorIsNil) + + c.Check(result.Arch, gc.Equals, "amd64") + c.Check(result.Series, gc.Equals, "trusty") + // We don't check bsFinalizer because functions cannot be compared. + c.Check(result.Finalize, gc.NotNil) +} + +func (s *environSuite) TestBootstrapAPI(c *gc.C) { + ctx := envtesting.BootstrapContext(c) + params := environs.BootstrapParams{} + _, err := s.Env.Bootstrap(ctx, params) + c.Assert(err, jc.ErrorIsNil) + + s.Stub.CheckCalls(c, []gitjujutesting.StubCall{{ + FuncName: "Bootstrap", + Args: []interface{}{ + ctx, + params, + }, + }}) +} + +func (s *environSuite) TestDestroy(c *gc.C) { + err := s.Env.Destroy() + + c.Check(err, jc.ErrorIsNil) +} + +func (s *environSuite) TestDestroyAPI(c *gc.C) { + err := s.Env.Destroy() + c.Assert(err, jc.ErrorIsNil) + + fwname := s.Prefix[:len(s.Prefix)-1] + s.Stub.CheckCalls(c, []gitjujutesting.StubCall{{ + FuncName: "Ports", + Args: []interface{}{ + fwname, + }, + }, { + FuncName: "Destroy", + Args: nil, + }}) +} === added file 'src/github.com/juju/juju/provider/lxd/export_test.go' --- src/github.com/juju/juju/provider/lxd/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,37 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd + +import ( + "github.com/juju/juju/environs" + "github.com/juju/juju/tools/lxdclient" +) + +var ( + Provider environs.EnvironProvider = providerInstance + GlobalFirewallName = (*environ).globalFirewallName + NewInstance = newInstance +) + +func ExposeInstRaw(inst *environInstance) *lxdclient.Instance { + return inst.raw +} + +func ExposeInstEnv(inst *environInstance) *environ { + return inst.env +} + +func UnsetEnvConfig(env *environ) { + env.ecfg = nil +} + +func ExposeEnvConfig(env *environ) *environConfig { + return env.ecfg +} + +func ExposeEnvClient(env *environ) lxdInstances { + return env.raw.lxdInstances +} === added file 'src/github.com/juju/juju/provider/lxd/init.go' --- src/github.com/juju/juju/provider/lxd/init.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/init.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,21 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd + +import ( + "github.com/juju/juju/environs" + "github.com/juju/juju/storage/provider/registry" +) + +const ( + providerType = "lxd" +) + +func init() { + environs.RegisterProvider(providerType, providerInstance) + + registry.RegisterEnvironStorageProviders(providerType) +} === added file 'src/github.com/juju/juju/provider/lxd/instance.go' --- src/github.com/juju/juju/provider/lxd/instance.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/instance.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,96 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/instance" + "github.com/juju/juju/network" + "github.com/juju/juju/provider/common" + "github.com/juju/juju/tools/lxdclient" +) + +type environInstance struct { + raw *lxdclient.Instance + env *environ +} + +var _ instance.Instance = (*environInstance)(nil) + +func newInstance(raw *lxdclient.Instance, env *environ) *environInstance { + return &environInstance{ + raw: raw, + env: env, + } +} + +// Id implements instance.Instance. +func (inst *environInstance) Id() instance.Id { + return instance.Id(inst.raw.Name) +} + +// Status implements instance.Instance. +func (inst *environInstance) Status() string { + return inst.raw.Status() +} + +// Addresses implements instance.Instance. +func (inst *environInstance) Addresses() ([]network.Address, error) { + return inst.env.raw.Addresses(inst.raw.Name) +} + +func findInst(id instance.Id, instances []instance.Instance) instance.Instance { + for _, inst := range instances { + if id == inst.Id() { + return inst + } + } + return nil +} + +// firewall stuff + +// OpenPorts opens the given ports on the instance, which +// should have been started with the given machine id. +func (inst *environInstance) OpenPorts(machineID string, ports []network.PortRange) error { + // TODO(ericsnow) Make sure machineId matches inst.Id()? + name := common.MachineFullName(inst.env, machineID) + env := inst.env.getSnapshot() + err := env.raw.OpenPorts(name, ports...) + if errors.IsNotImplemented(err) { + // TODO(ericsnow) for now... + return nil + } + return errors.Trace(err) +} + +// ClosePorts closes the given ports on the instance, which +// should have been started with the given machine id. +func (inst *environInstance) ClosePorts(machineID string, ports []network.PortRange) error { + name := common.MachineFullName(inst.env, machineID) + env := inst.env.getSnapshot() + err := env.raw.ClosePorts(name, ports...) + if errors.IsNotImplemented(err) { + // TODO(ericsnow) for now... + return nil + } + return errors.Trace(err) +} + +// Ports returns the set of ports open on the instance, which +// should have been started with the given machine id. +// The ports are returned as sorted by SortPorts. +func (inst *environInstance) Ports(machineID string) ([]network.PortRange, error) { + name := common.MachineFullName(inst.env, machineID) + env := inst.env.getSnapshot() + ports, err := env.raw.Ports(name) + if errors.IsNotImplemented(err) { + // TODO(ericsnow) for now... + return nil, nil + } + return ports, errors.Trace(err) +} === added file 'src/github.com/juju/juju/provider/lxd/instance_test.go' --- src/github.com/juju/juju/provider/lxd/instance_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/instance_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,98 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd_test + +import ( + gitjujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/instance" + "github.com/juju/juju/provider/lxd" + "github.com/juju/juju/tools/lxdclient" +) + +type instanceSuite struct { + lxd.BaseSuite +} + +var _ = gc.Suite(&instanceSuite{}) + +func (s *instanceSuite) TestNewInstance(c *gc.C) { + inst := lxd.NewInstance(s.RawInstance, s.Env) + + c.Check(lxd.ExposeInstRaw(inst), gc.Equals, s.RawInstance) + c.Check(lxd.ExposeInstEnv(inst), gc.Equals, s.Env) + s.CheckNoAPI(c) +} + +func (s *instanceSuite) TestID(c *gc.C) { + id := s.Instance.Id() + + c.Check(id, gc.Equals, instance.Id("spam")) + s.CheckNoAPI(c) +} + +func (s *instanceSuite) TestStatus(c *gc.C) { + status := s.Instance.Status() + + c.Check(status, gc.Equals, lxdclient.StatusRunning) + s.CheckNoAPI(c) +} + +func (s *instanceSuite) TestAddresses(c *gc.C) { + addresses, err := s.Instance.Addresses() + c.Assert(err, jc.ErrorIsNil) + + c.Check(addresses, jc.DeepEquals, s.Addresses) +} + +func (s *instanceSuite) TestOpenPortsAPI(c *gc.C) { + err := s.Instance.OpenPorts("spam", s.Ports) + c.Assert(err, jc.ErrorIsNil) + + s.Stub.CheckCalls(c, []gitjujutesting.StubCall{{ + FuncName: "OpenPorts", + Args: []interface{}{ + s.InstName, + s.Ports, + }, + }}) +} + +func (s *instanceSuite) TestClosePortsAPI(c *gc.C) { + err := s.Instance.ClosePorts("spam", s.Ports) + c.Assert(err, jc.ErrorIsNil) + + s.Stub.CheckCalls(c, []gitjujutesting.StubCall{{ + FuncName: "ClosePorts", + Args: []interface{}{ + s.InstName, + s.Ports, + }, + }}) +} + +func (s *instanceSuite) TestPortsOkay(c *gc.C) { + s.Firewaller.PortRanges = s.Ports + + ports, err := s.Instance.Ports("spam") + c.Assert(err, jc.ErrorIsNil) + + c.Check(ports, jc.DeepEquals, s.Ports) +} + +func (s *instanceSuite) TestPortsAPI(c *gc.C) { + _, err := s.Instance.Ports("spam") + c.Assert(err, jc.ErrorIsNil) + + s.Stub.CheckCalls(c, []gitjujutesting.StubCall{{ + FuncName: "Ports", + Args: []interface{}{ + s.InstName, + }, + }}) +} === added file 'src/github.com/juju/juju/provider/lxd/lxd.go' --- src/github.com/juju/juju/provider/lxd/lxd.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/lxd.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,29 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd + +import ( + "github.com/juju/loggo" + + "github.com/juju/juju/environs/tags" + "github.com/juju/juju/tools/lxdclient" +) + +// The metadata keys used when creating new instances. +const ( + metadataKeyIsState = tags.JujuModel + metadataKeyCloudInit = lxdclient.UserdataKey +) + +// Common metadata values used when creating new instances. +const ( + metadataValueTrue = "true" + metadataValueFalse = "false" +) + +var ( + logger = loggo.GetLogger("juju.provider.lxd") +) === added file 'src/github.com/juju/juju/provider/lxd/package_test.go' --- src/github.com/juju/juju/provider/lxd/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/provider/lxd/provider.go' --- src/github.com/juju/juju/provider/lxd/provider.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/provider.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,110 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" +) + +type environProvider struct { + environProviderCredentials +} + +var providerInstance environProvider + +// Open implements environs.EnvironProvider. +func (environProvider) Open(cfg *config.Config) (environs.Environ, error) { + // TODO(ericsnow) verify prerequisites (see provider/local/prereq.go)? + // TODO(ericsnow) do something similar to correctLocalhostURLs() + // (in provider/local/environprovider.go)? + + env, err := newEnviron(cfg, newRawProvider) + return env, errors.Trace(err) +} + +// PrepareForBootstrap implements environs.EnvironProvider. +func (p environProvider) PrepareForBootstrap(ctx environs.BootstrapContext, args environs.PrepareForBootstrapParams) (environs.Environ, error) { + cfg := args.Config + cfg, err := p.PrepareForCreateEnvironment(cfg) + if err != nil { + return nil, errors.Trace(err) + } + + // TODO(ericsnow) Do some of what happens in local provider's + // PrepareForBootstrap()? Only if "remote" is local host? + + env, err := newEnviron(cfg, newRawProvider) + if err != nil { + return nil, errors.Trace(err) + } + + if ctx.ShouldVerifyCredentials() { + if err := env.verifyCredentials(); err != nil { + return nil, errors.Trace(err) + } + } + return env, nil +} + +// PrepareForCreateEnvironment is specified in the EnvironProvider interface. +func (environProvider) PrepareForCreateEnvironment(cfg *config.Config) (*config.Config, error) { + return cfg, nil +} + +// RestrictedConfigAttributes is specified in the EnvironProvider interface. +func (environProvider) RestrictedConfigAttributes() []string { + return []string{ + "remote-url", + "client-cert", + "client-key", + "server-cert", + } +} + +// Validate implements environs.EnvironProvider. +func (environProvider) Validate(cfg, old *config.Config) (valid *config.Config, err error) { + if old == nil { + ecfg, err := newValidConfig(cfg, configDefaults) + if err != nil { + return nil, errors.Annotate(err, "invalid config") + } + return ecfg.Config, nil + } + + // The defaults should be set already, so we pass nil. + ecfg, err := newValidConfig(old, nil) + if err != nil { + return nil, errors.Annotate(err, "invalid base config") + } + + if err := ecfg.update(cfg); err != nil { + return nil, errors.Annotate(err, "invalid config change") + } + + return ecfg.Config, nil +} + +// SecretAttrs implements environs.EnvironProvider. +func (environProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { + // The defaults should be set already, so we pass nil. + ecfg, err := newValidConfig(cfg, nil) + if err != nil { + return nil, errors.Trace(err) + } + return ecfg.secret(), nil +} + +// DetectRegions implements environs.CloudRegionDetector. +func (environProvider) DetectRegions() ([]cloud.Region, error) { + // For now we just return a hard-coded "localhost" region, + // i.e. the local LXD daemon. We may later want to detect + // locally-configured remotes. + return []cloud.Region{{Name: "localhost"}}, nil +} === added file 'src/github.com/juju/juju/provider/lxd/provider_test.go' --- src/github.com/juju/juju/provider/lxd/provider_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/provider_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,97 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" + envtesting "github.com/juju/juju/environs/testing" + "github.com/juju/juju/provider/lxd" +) + +var ( + _ = gc.Suite(&providerSuite{}) + _ = gc.Suite(&ProviderFunctionalSuite{}) +) + +type providerSuite struct { + lxd.BaseSuite + + provider environs.EnvironProvider +} + +func (s *providerSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + + provider, err := environs.Provider("lxd") + c.Assert(err, jc.ErrorIsNil) + s.provider = provider +} + +func (s *providerSuite) TestDetectRegions(c *gc.C) { + c.Assert(s.provider, gc.Implements, new(environs.CloudRegionDetector)) + regions, err := s.provider.(environs.CloudRegionDetector).DetectRegions() + c.Assert(err, jc.ErrorIsNil) + c.Assert(regions, jc.DeepEquals, []cloud.Region{{Name: "localhost"}}) +} + +func (s *providerSuite) TestRegistered(c *gc.C) { + c.Check(s.provider, gc.Equals, lxd.Provider) +} + +func (s *providerSuite) TestValidate(c *gc.C) { + validCfg, err := s.provider.Validate(s.Config, nil) + c.Assert(err, jc.ErrorIsNil) + validAttrs := validCfg.AllAttrs() + + c.Check(s.Config.AllAttrs(), gc.DeepEquals, validAttrs) +} + +func (s *providerSuite) TestSecretAttrs(c *gc.C) { + obtainedAttrs, err := s.provider.SecretAttrs(s.Config) + c.Assert(err, jc.ErrorIsNil) + + c.Check(obtainedAttrs, gc.DeepEquals, map[string]string{"client-key": ""}) +} + +type ProviderFunctionalSuite struct { + lxd.BaseSuite + + provider environs.EnvironProvider +} + +func (s *ProviderFunctionalSuite) SetUpTest(c *gc.C) { + if !s.IsRunningLocally(c) { + c.Skip("LXD not running locally") + } + + s.BaseSuite.SetUpTest(c) + + provider, err := environs.Provider("lxd") + c.Assert(err, jc.ErrorIsNil) + + s.provider = provider +} + +func (s *ProviderFunctionalSuite) TestOpen(c *gc.C) { + env, err := s.provider.Open(s.Config) + c.Assert(err, jc.ErrorIsNil) + envConfig := env.Config() + + c.Check(envConfig.Name(), gc.Equals, "testenv") +} + +func (s *ProviderFunctionalSuite) TestPrepareForBootstrap(c *gc.C) { + env, err := s.provider.PrepareForBootstrap(envtesting.BootstrapContext(c), environs.PrepareForBootstrapParams{ + Config: s.Config, + }) + c.Assert(err, jc.ErrorIsNil) + + c.Check(env, gc.NotNil) +} === added file 'src/github.com/juju/juju/provider/lxd/testing_test.go' --- src/github.com/juju/juju/provider/lxd/testing_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/testing_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,562 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd + +import ( + "crypto/tls" + "encoding/pem" + "os" + + "github.com/juju/errors" + gitjujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/arch" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cloudconfig/instancecfg" + "github.com/juju/juju/cloudconfig/providerinit" + "github.com/juju/juju/constraints" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/instance" + "github.com/juju/juju/network" + "github.com/juju/juju/testing" + "github.com/juju/juju/tools" + "github.com/juju/juju/tools/lxdclient" + "github.com/juju/juju/version" +) + +// These values are stub LXD client credentials for use in tests. +const ( + PublicKey = `-----BEGIN CERTIFICATE----- +... +... +... +... +... +... +... +... +... +... +... +... +... +... +-----END CERTIFICATE----- +` + PrivateKey = `-----BEGIN PRIVATE KEY----- +... +... +... +... +... +... +... +... +... +... +... +... +... +... +-----END PRIVATE KEY----- +` +) + +// These are stub config values for use in tests. +var ( + ConfigAttrs = testing.FakeConfig().Merge(testing.Attrs{ + "type": "lxd", + "namespace": "", + "remote-url": "", + "client-cert": "", + "client-key": "", + "server-cert": "", + "uuid": "2d02eeac-9dbb-11e4-89d3-123b93f75cba", + }) +) + +// We test these here since they are not exported. +var ( + _ environs.Environ = (*environ)(nil) + _ instance.Instance = (*environInstance)(nil) +) + +type BaseSuiteUnpatched struct { + gitjujutesting.IsolationSuite + + osPathOrig string + + Config *config.Config + EnvConfig *environConfig + Env *environ + Prefix string + + Addresses []network.Address + Instance *environInstance + RawInstance *lxdclient.Instance + InstName string + Hardware *lxdclient.InstanceHardware + HWC *instance.HardwareCharacteristics + Metadata map[string]string + StartInstArgs environs.StartInstanceParams + //InstanceType instances.InstanceType + + Ports []network.PortRange +} + +func (s *BaseSuiteUnpatched) SetUpSuite(c *gc.C) { + s.osPathOrig = os.Getenv("PATH") + if s.osPathOrig == "" { + // TODO(ericsnow) This shouldn't happen. However, an undiagnosed + // bug in testing.IsolationSuite is causing $PATH to remain unset + // sometimes. Once that is cleared up this special-case can go + // away. + s.osPathOrig = + "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin" + } + s.IsolationSuite.SetUpTest(c) +} + +func (s *BaseSuiteUnpatched) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.initEnv(c) + s.initInst(c) + s.initNet(c) +} + +func (s *BaseSuiteUnpatched) initEnv(c *gc.C) { + s.Env = &environ{ + name: "lxd", + } + cfg := s.NewConfig(c, nil) + s.setConfig(c, cfg) +} + +func (s *BaseSuiteUnpatched) initInst(c *gc.C) { + tools := []*tools.Tools{{ + Version: version.Binary{Arch: arch.AMD64, Series: "trusty"}, + URL: "https://example.org", + }} + + cons := constraints.Value{ + // nothing + } + + instanceConfig, err := instancecfg.NewBootstrapInstanceConfig(cons, cons, "trusty", "") + c.Assert(err, jc.ErrorIsNil) + + instanceConfig.Tools = tools[0] + instanceConfig.AuthorizedKeys = s.Config.AuthorizedKeys() + + userData, err := providerinit.ComposeUserData(instanceConfig, nil, lxdRenderer{}) + c.Assert(err, jc.ErrorIsNil) + + s.Hardware = &lxdclient.InstanceHardware{ + Architecture: arch.AMD64, + NumCores: 1, + MemoryMB: 3750, + } + var archName string = arch.AMD64 + var numCores uint64 = 1 + var memoryMB uint64 = 3750 + s.HWC = &instance.HardwareCharacteristics{ + Arch: &archName, + CpuCores: &numCores, + Mem: &memoryMB, + } + + s.Metadata = map[string]string{ // userdata + metadataKeyIsState: metadataValueTrue, // bootstrap + metadataKeyCloudInit: string(userData), + } + s.Addresses = []network.Address{{ + Value: "10.0.0.1", + Type: network.IPv4Address, + Scope: network.ScopeCloudLocal, + }} + s.Instance = s.NewInstance(c, "spam") + s.RawInstance = s.Instance.raw + s.InstName = s.Prefix + "machine-spam" + + s.StartInstArgs = environs.StartInstanceParams{ + InstanceConfig: instanceConfig, + Tools: tools, + Constraints: cons, + } +} + +func (s *BaseSuiteUnpatched) initNet(c *gc.C) { + s.Ports = []network.PortRange{{ + FromPort: 80, + ToPort: 80, + Protocol: "tcp", + }} +} + +func (s *BaseSuiteUnpatched) setConfig(c *gc.C, cfg *config.Config) { + s.Config = cfg + ecfg, err := newValidConfig(cfg, configDefaults) + c.Assert(err, jc.ErrorIsNil) + s.EnvConfig = ecfg + uuid, _ := cfg.UUID() + s.Env.uuid = uuid + s.Env.ecfg = s.EnvConfig + s.Prefix = "juju-" + uuid + "-" +} + +func (s *BaseSuiteUnpatched) NewConfig(c *gc.C, updates testing.Attrs) *config.Config { + if updates == nil { + updates = make(testing.Attrs) + } + var err error + cfg := testing.ModelConfig(c) + cfg, err = cfg.Apply(ConfigAttrs) + c.Assert(err, jc.ErrorIsNil) + if raw := updates[cfgNamespace]; raw == nil || raw.(string) == "" { + updates[cfgNamespace] = cfg.Name() + } + cfg, err = cfg.Apply(updates) + c.Assert(err, jc.ErrorIsNil) + return cfg +} + +func (s *BaseSuiteUnpatched) UpdateConfig(c *gc.C, attrs map[string]interface{}) { + cfg, err := s.Config.Apply(attrs) + c.Assert(err, jc.ErrorIsNil) + s.setConfig(c, cfg) +} + +func (s *BaseSuiteUnpatched) NewRawInstance(c *gc.C, name string) *lxdclient.Instance { + summary := lxdclient.InstanceSummary{ + Name: name, + Status: lxdclient.StatusRunning, + Hardware: *s.Hardware, + Metadata: s.Metadata, + } + instanceSpec := lxdclient.InstanceSpec{ + Name: name, + Profiles: []string{}, + Ephemeral: false, + Metadata: s.Metadata, + } + return lxdclient.NewInstance(summary, &instanceSpec) +} + +func (s *BaseSuiteUnpatched) NewInstance(c *gc.C, name string) *environInstance { + raw := s.NewRawInstance(c, name) + return newInstance(raw, s.Env) +} + +func (s *BaseSuiteUnpatched) IsRunningLocally(c *gc.C) bool { + restore := gitjujutesting.PatchEnvPathPrepend(s.osPathOrig) + defer restore() + + running, err := lxdclient.IsRunningLocally() + c.Assert(err, jc.ErrorIsNil) + return running +} + +type BaseSuite struct { + BaseSuiteUnpatched + + Stub *gitjujutesting.Stub + Client *stubClient + Firewaller *stubFirewaller + Common *stubCommon + Policy *stubPolicy +} + +func (s *BaseSuite) SetUpTest(c *gc.C) { + // Do this *before* s.initEnv() gets called. + s.PatchValue(&asNonLocal, s.asNonLocal) + + s.BaseSuiteUnpatched.SetUpTest(c) + + s.Stub = &gitjujutesting.Stub{} + s.Client = &stubClient{stub: s.Stub} + s.Firewaller = &stubFirewaller{stub: s.Stub} + s.Common = &stubCommon{stub: s.Stub} + s.Policy = &stubPolicy{stub: s.Stub} + + // Patch out all expensive external deps. + s.Env.raw = &rawProvider{ + lxdInstances: s.Client, + lxdImages: s.Client, + Firewaller: s.Firewaller, + policyProvider: s.Policy, + } + s.Env.base = s.Common +} + +func (s *BaseSuite) CheckNoAPI(c *gc.C) { + s.Stub.CheckCalls(c, nil) +} + +func (s *BaseSuite) asNonLocal(clientCfg lxdclient.Config) (lxdclient.Config, error) { + if s.Stub == nil { + return clientCfg, nil + } + s.Stub.AddCall("asNonLocal", clientCfg) + if err := s.Stub.NextErr(); err != nil { + return clientCfg, errors.Trace(err) + } + + return clientCfg, nil +} + +func NewBaseConfig(c *gc.C) *config.Config { + var err error + cfg := testing.ModelConfig(c) + + cfg, err = cfg.Apply(ConfigAttrs) + c.Assert(err, jc.ErrorIsNil) + + cfg, err = cfg.Apply(map[string]interface{}{ + // Default the namespace to the env name. + cfgNamespace: cfg.Name(), + }) + c.Assert(err, jc.ErrorIsNil) + + return cfg +} + +func NewCustomBaseConfig(c *gc.C, updates map[string]interface{}) *config.Config { + if updates == nil { + updates = make(testing.Attrs) + } + + cfg := NewBaseConfig(c) + + cfg, err := cfg.Apply(updates) + c.Assert(err, jc.ErrorIsNil) + + return cfg +} + +type ConfigValues struct { + Namespace string + RemoteURL string + ClientCert string + ClientKey string + ServerCert string +} + +func (cv ConfigValues) CheckCert(c *gc.C) { + certPEM := []byte(cv.ClientCert) + keyPEM := []byte(cv.ClientKey) + + _, err := tls.X509KeyPair(certPEM, keyPEM) + c.Check(err, jc.ErrorIsNil) + + block, remainder := pem.Decode(certPEM) + c.Check(block.Type, gc.Equals, "CERTIFICATE") + c.Check(remainder, gc.HasLen, 0) + + block, remainder = pem.Decode(keyPEM) + c.Check(block.Type, gc.Equals, "RSA PRIVATE KEY") + c.Check(remainder, gc.HasLen, 0) + + if cv.ServerCert != "" { + block, remainder = pem.Decode([]byte(cv.ServerCert)) + c.Check(block.Type, gc.Equals, "CERTIFICATE") + c.Check(remainder, gc.HasLen, 1) + } +} + +type Config struct { + *environConfig +} + +func NewConfig(cfg *config.Config) *Config { + ecfg := newConfig(cfg) + return &Config{ecfg} +} + +func NewValidConfig(cfg *config.Config) (*Config, error) { + ecfg, err := newValidConfig(cfg, nil) + return &Config{ecfg}, err +} + +func NewValidDefaultConfig(cfg *config.Config) (*Config, error) { + ecfg, err := newValidConfig(cfg, configDefaults) + return &Config{ecfg}, err +} + +func (ecfg *Config) Values(c *gc.C) (ConfigValues, map[string]interface{}) { + c.Assert(ecfg.attrs, jc.DeepEquals, ecfg.UnknownAttrs()) + + var values ConfigValues + extras := make(map[string]interface{}) + for k, v := range ecfg.attrs { + switch k { + case cfgNamespace: + values.Namespace = v.(string) + case cfgRemoteURL: + values.RemoteURL = v.(string) + case cfgClientCert: + values.ClientCert = v.(string) + case cfgClientKey: + values.ClientKey = v.(string) + case cfgServerPEMCert: + values.ServerCert = v.(string) + default: + extras[k] = v + } + } + return values, extras +} + +func (ecfg *Config) Apply(c *gc.C, updates map[string]interface{}) *Config { + cfg, err := ecfg.Config.Apply(updates) + c.Assert(err, jc.ErrorIsNil) + return NewConfig(cfg) +} + +func (ecfg *Config) Validate() error { + return ecfg.validate() +} + +func (ecfg *Config) ClientConfig() (lxdclient.Config, error) { + return ecfg.clientConfig() +} + +func (ecfg *Config) UpdateForClientConfig(clientCfg lxdclient.Config) (*Config, error) { + updated, err := ecfg.updateForClientConfig(clientCfg) + return &Config{updated}, err +} + +type stubCommon struct { + stub *gitjujutesting.Stub + + BootstrapResult *environs.BootstrapResult +} + +func (sc *stubCommon) BootstrapEnv(ctx environs.BootstrapContext, params environs.BootstrapParams) (*environs.BootstrapResult, error) { + sc.stub.AddCall("Bootstrap", ctx, params) + if err := sc.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return sc.BootstrapResult, nil +} + +func (sc *stubCommon) DestroyEnv() error { + sc.stub.AddCall("Destroy") + if err := sc.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +type stubPolicy struct { + stub *gitjujutesting.Stub + + Arches []string +} + +func (s *stubPolicy) SupportedArchitectures() ([]string, error) { + s.stub.AddCall("SupportedArchitectures") + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.Arches, nil +} + +type stubClient struct { + stub *gitjujutesting.Stub + + Insts []lxdclient.Instance + Inst *lxdclient.Instance +} + +func (conn *stubClient) Instances(prefix string, statuses ...string) ([]lxdclient.Instance, error) { + conn.stub.AddCall("Instances", prefix, statuses) + if err := conn.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return conn.Insts, nil +} + +func (conn *stubClient) AddInstance(spec lxdclient.InstanceSpec) (*lxdclient.Instance, error) { + conn.stub.AddCall("AddInstance", spec) + if err := conn.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return conn.Inst, nil +} + +func (conn *stubClient) RemoveInstances(prefix string, ids ...string) error { + conn.stub.AddCall("RemoveInstances", prefix, ids) + if err := conn.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (conn *stubClient) EnsureImageExists(series string) error { + conn.stub.AddCall("EnsureImageExists", series) + if err := conn.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (conn *stubClient) Addresses(name string) ([]network.Address, error) { + conn.stub.AddCall("Addresses", name) + if err := conn.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return []network.Address{network.Address{ + Value: "10.0.0.1", + Type: network.IPv4Address, + Scope: network.ScopeCloudLocal, + }}, nil +} + +// TODO(ericsnow) Move stubFirewaller to environs/testing or provider/common/testing. + +type stubFirewaller struct { + stub *gitjujutesting.Stub + + PortRanges []network.PortRange +} + +func (fw *stubFirewaller) Ports(fwname string) ([]network.PortRange, error) { + fw.stub.AddCall("Ports", fwname) + if err := fw.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return fw.PortRanges, nil +} + +func (fw *stubFirewaller) OpenPorts(fwname string, ports ...network.PortRange) error { + fw.stub.AddCall("OpenPorts", fwname, ports) + if err := fw.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (fw *stubFirewaller) ClosePorts(fwname string, ports ...network.PortRange) error { + fw.stub.AddCall("ClosePorts", fwname, ports) + if err := fw.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} === added file 'src/github.com/juju/juju/provider/lxd/userdata.go' --- src/github.com/juju/juju/provider/lxd/userdata.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/lxd/userdata.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,26 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxd + +import ( + "github.com/juju/errors" + jujuos "github.com/juju/utils/os" + + "github.com/juju/juju/cloudconfig/cloudinit" + "github.com/juju/juju/cloudconfig/providerinit/renderers" +) + +type lxdRenderer struct{} + +// EncodeUserdata implements renderers.ProviderRenderer. +func (lxdRenderer) Render(cfg cloudinit.CloudConfig, os jujuos.OSType) ([]byte, error) { + switch os { + case jujuos.Ubuntu, jujuos.CentOS: + return renderers.RenderYAML(cfg) + default: + return nil, errors.Errorf("cannot encode userdata for OS %q", os) + } +} === modified file 'src/github.com/juju/juju/provider/maas/config.go' --- src/github.com/juju/juju/provider/maas/config.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/maas/config.go 2016-03-22 15:18:22 +0000 @@ -10,56 +10,81 @@ "strings" "github.com/juju/schema" + "gopkg.in/juju/environschema.v1" "github.com/juju/juju/environs/config" ) -var configFields = schema.Fields{ - "maas-server": schema.String(), - // maas-oauth is a colon-separated triplet of: - // consumer-key:resource-token:resource-secret - "maas-oauth": schema.String(), - // maas-agent-name is an optional UUID to group the instances - // acquired from MAAS, to support multiple environments per MAAS user. - "maas-agent-name": schema.String(), +var configSchema = environschema.Fields{ + "maas-server": { + Description: "maas-server specifies the location of the MAAS server. It must specify the base path.", + Type: environschema.Tstring, + Example: "http://192.168.1.1/MAAS/", + }, + "maas-oauth": { + Description: "maas-oauth holds the OAuth credentials from MAAS.", + Type: environschema.Tstring, + }, + "maas-agent-name": { + Description: "maas-agent-name is an optional UUID to group the instances acquired from MAAS, to support multiple models per MAAS user.", + Type: environschema.Tstring, + }, } + +var configFields = func() schema.Fields { + fs, _, err := configSchema.ValidationSchema() + if err != nil { + panic(err) + } + return fs +}() + var configDefaults = schema.Defaults{ // For backward-compatibility, maas-agent-name is the empty string // by default. However, new environments should all use a UUID. "maas-agent-name": "", } -type maasEnvironConfig struct { +type maasModelConfig struct { *config.Config attrs map[string]interface{} } -func (cfg *maasEnvironConfig) maasServer() string { +func (cfg *maasModelConfig) maasServer() string { return cfg.attrs["maas-server"].(string) } -func (cfg *maasEnvironConfig) maasOAuth() string { +func (cfg *maasModelConfig) maasOAuth() string { return cfg.attrs["maas-oauth"].(string) } -func (cfg *maasEnvironConfig) maasAgentName() string { +func (cfg *maasModelConfig) maasAgentName() string { if uuid, ok := cfg.attrs["maas-agent-name"].(string); ok { return uuid } return "" } -func (prov maasEnvironProvider) newConfig(cfg *config.Config) (*maasEnvironConfig, error) { +func (prov maasEnvironProvider) newConfig(cfg *config.Config) (*maasModelConfig, error) { validCfg, err := prov.Validate(cfg, nil) if err != nil { return nil, err } - result := new(maasEnvironConfig) + result := new(maasModelConfig) result.Config = validCfg result.attrs = validCfg.UnknownAttrs() return result, nil } +// Schema returns the configuration schema for an environment. +func (maasEnvironProvider) Schema() environschema.Fields { + fields, err := config.Schema(configSchema) + if err != nil { + panic(err) + } + return fields +} + var errMalformedMaasOAuth = errors.New("malformed maas-oauth (3 items separated by colons)") func (prov maasEnvironProvider) Validate(cfg, oldCfg *config.Config) (*config.Config, error) { @@ -102,7 +127,7 @@ return nil, fmt.Errorf("cannot change maas-agent-name") } } - envCfg := new(maasEnvironConfig) + envCfg := new(maasModelConfig) envCfg.Config = cfg envCfg.attrs = validated server := envCfg.maasServer() === modified file 'src/github.com/juju/juju/provider/maas/config_test.go' --- src/github.com/juju/juju/provider/maas/config_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/maas/config_test.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,7 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" "github.com/juju/juju/testing" ) @@ -28,12 +29,16 @@ } // newConfig creates a MAAS environment config from attributes. -func newConfig(values map[string]interface{}) (*maasEnvironConfig, error) { +func newConfig(values map[string]interface{}) (*maasModelConfig, error) { attrs := testing.FakeConfig().Merge(testing.Attrs{ "name": "testenv", "type": "maas", }).Merge(values) - env, err := environs.NewFromAttrs(attrs) + cfg, err := config.New(config.NoDefaults, attrs) + if err != nil { + return nil, err + } + env, err := environs.New(cfg) if err != nil { return nil, err } @@ -132,3 +137,14 @@ _, err = maasEnvironProvider{}.Validate(newCfg, oldCfg.Config) c.Assert(err, gc.ErrorMatches, "cannot change maas-agent-name") } + +func (*configSuite) TestSchema(c *gc.C) { + fields := providerInstance.Schema() + // Check that all the fields defined in environs/config + // are in the returned schema. + globalFields, err := config.Schema(nil) + c.Assert(err, gc.IsNil) + for name, field := range globalFields { + c.Check(fields[name], jc.DeepEquals, field) + } +} === added file 'src/github.com/juju/juju/provider/maas/constraints.go' --- src/github.com/juju/juju/provider/maas/constraints.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/maas/constraints.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,238 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package maas + +import ( + "fmt" + "net/url" + "strings" + + "github.com/juju/errors" + "github.com/juju/utils/set" + + "github.com/juju/juju/constraints" +) + +var unsupportedConstraints = []string{ + constraints.CpuPower, + constraints.InstanceType, +} + +// ConstraintsValidator is defined on the Environs interface. +func (environ *maasEnviron) ConstraintsValidator() (constraints.Validator, error) { + validator := constraints.NewValidator() + validator.RegisterUnsupported(unsupportedConstraints) + supportedArches, err := environ.SupportedArchitectures() + if err != nil { + return nil, err + } + validator.RegisterVocabulary(constraints.Arch, supportedArches) + return validator, nil +} + +// convertConstraints converts the given constraints into an url.Values object +// suitable to pass to MAAS when acquiring a node. CpuPower is ignored because +// it cannot be translated into something meaningful for MAAS right now. +func convertConstraints(cons constraints.Value) url.Values { + params := url.Values{} + if cons.Arch != nil { + // Note: Juju and MAAS use the same architecture names. + // MAAS also accepts a subarchitecture (e.g. "highbank" + // for ARM), which defaults to "generic" if unspecified. + params.Add("arch", *cons.Arch) + } + if cons.CpuCores != nil { + params.Add("cpu_count", fmt.Sprintf("%d", *cons.CpuCores)) + } + if cons.Mem != nil { + params.Add("mem", fmt.Sprintf("%d", *cons.Mem)) + } + convertTagsToParams(params, cons.Tags) + if cons.CpuPower != nil { + logger.Warningf("ignoring unsupported constraint 'cpu-power'") + } + return params +} + +// convertTagsToParams converts a list of positive/negative tags from +// constraints into two comma-delimited lists of values, which can then be +// passed to MAAS using the "tags" and "not_tags" arguments to acquire. If +// either list of tags is empty, the respective argument is not added to params. +func convertTagsToParams(params url.Values, tags *[]string) { + if tags == nil || len(*tags) == 0 { + return + } + positives, negatives := parseDelimitedValues(*tags) + if len(positives) > 0 { + params.Add("tags", strings.Join(positives, ",")) + } + if len(negatives) > 0 { + params.Add("not_tags", strings.Join(negatives, ",")) + } +} + +// convertSpacesFromConstraints extracts spaces from constraints and converts +// them to two lists of positive and negative spaces. +func convertSpacesFromConstraints(spaces *[]string) ([]string, []string) { + if spaces == nil || len(*spaces) == 0 { + return nil, nil + } + return parseDelimitedValues(*spaces) +} + +// parseDelimitedValues parses a slice of raw values coming from constraints +// (Tags or Spaces). The result is split into two slices - positives and +// negatives (prefixed with "^"). Empty values are ignored. +func parseDelimitedValues(rawValues []string) (positives, negatives []string) { + for _, value := range rawValues { + if value == "" || value == "^" { + // Neither of these cases should happen in practise, as constraints + // are validated before setting them and empty names for spaces or + // tags are not allowed. + continue + } + if strings.HasPrefix(value, "^") { + negatives = append(negatives, strings.TrimPrefix(value, "^")) + } else { + positives = append(positives, value) + } + } + return positives, negatives +} + +// interfaceBinding defines a requirement that a node interface must satisfy in +// order for that node to get selected and started, based on deploy-time +// bindings of a service. +// +// TODO(dimitern): Once the services have bindings defined in state, a version +// of this should go to the network package (needs to be non-MAAS-specifc +// first). Also, we need to transform Juju space names from constraints into +// MAAS space provider IDs. +type interfaceBinding struct { + Name string + SpaceProviderId string + + // add more as needed. +} + +// numericLabelLimit is a sentinel value used in addInterfaces to limit the +// number of disabmiguation inner loop iterations in case named labels clash +// with numeric labels for spaces coming from constraints. It's defined here to +// facilitate testing this behavior. +var numericLabelLimit uint = 0xffff + +// addInterfaces converts a slice of interface bindings, postiveSpaces and +// negativeSpaces coming from constraints to the format MAAS expects for the +// "interfaces" and "not_networks" arguments to acquire node. Returns an error +// satisfying errors.IsNotValid() if the bindings contains duplicates, empty +// Name/SpaceProviderId, or if negative spaces clash with specified bindings. +// Duplicates between specified bindings and positiveSpaces are silently +// skipped. +func addInterfaces( + params url.Values, + bindings []interfaceBinding, + positiveSpaces, negativeSpaces []string, +) error { + var ( + index uint + combinedBindings []string + ) + namesSet := set.NewStrings() + spacesSet := set.NewStrings() + for _, binding := range bindings { + switch { + case binding.Name == "": + return errors.NewNotValid(nil, "interface bindings cannot have empty names") + case binding.SpaceProviderId == "": + return errors.NewNotValid(nil, fmt.Sprintf( + "invalid interface binding %q: space provider ID is required", + binding.Name, + )) + case namesSet.Contains(binding.Name): + return errors.NewNotValid(nil, fmt.Sprintf( + "duplicated interface binding %q", + binding.Name, + )) + } + namesSet.Add(binding.Name) + spacesSet.Add(binding.SpaceProviderId) + + item := fmt.Sprintf("%s:space=%s", binding.Name, binding.SpaceProviderId) + combinedBindings = append(combinedBindings, item) + } + + for _, space := range positiveSpaces { + if spacesSet.Contains(space) { + // Skip duplicates in positiveSpaces. + continue + } + spacesSet.Add(space) + // Make sure we pick a label that doesn't clash with possible bindings. + var label string + for { + label = fmt.Sprintf("%v", index) + if !namesSet.Contains(label) { + break + } + if index > numericLabelLimit { // ...just to make sure we won't loop forever. + return errors.Errorf("too many conflicting numeric labels, giving up.") + } + index++ + } + namesSet.Add(label) + item := fmt.Sprintf("%s:space=%s", label, space) + combinedBindings = append(combinedBindings, item) + index++ + } + + var negatives []string + for _, space := range negativeSpaces { + if spacesSet.Contains(space) { + return errors.NewNotValid(nil, fmt.Sprintf( + "negative space %q from constraints clashes with interface bindings", + space, + )) + } + negatives = append(negatives, fmt.Sprintf("space:%s", space)) + } + + if len(combinedBindings) > 0 { + params.Add("interfaces", strings.Join(combinedBindings, ";")) + } + if len(negatives) > 0 { + params.Add("not_networks", strings.Join(negatives, ",")) + } + return nil +} + +// addStorage converts volume information into url.Values object suitable to +// pass to MAAS when acquiring a node. +func addStorage(params url.Values, volumes []volumeInfo) { + if len(volumes) == 0 { + return + } + // Requests for specific values are passed to the acquire URL + // as a storage URL parameter of the form: + // [volume-name:]sizeinGB[tag,...] + // See http://maas.ubuntu.com/docs/api.html#nodes + + // eg storage=root:0(ssd),data:20(magnetic,5400rpm),45 + makeVolumeParams := func(v volumeInfo) string { + var params string + if v.name != "" { + params = v.name + ":" + } + params += fmt.Sprintf("%d", v.sizeInGB) + if len(v.tags) > 0 { + params += fmt.Sprintf("(%s)", strings.Join(v.tags, ",")) + } + return params + } + var volParms []string + for _, v := range volumes { + params := makeVolumeParams(v) + volParms = append(volParms, params) + } + params.Add("storage", strings.Join(volParms, ",")) +} === added file 'src/github.com/juju/juju/provider/maas/constraints_test.go' --- src/github.com/juju/juju/provider/maas/constraints_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/maas/constraints_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,482 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package maas + +import ( + "fmt" + "net/url" + + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/constraints" +) + +func (*environSuite) TestConvertConstraints(c *gc.C) { + for i, test := range []struct { + cons constraints.Value + expected url.Values + }{{ + cons: constraints.Value{Arch: stringp("arm")}, + expected: url.Values{"arch": {"arm"}}, + }, { + cons: constraints.Value{CpuCores: uint64p(4)}, + expected: url.Values{"cpu_count": {"4"}}, + }, { + cons: constraints.Value{Mem: uint64p(1024)}, + expected: url.Values{"mem": {"1024"}}, + }, { // Spaces are converted to bindings and not_networks, but only in acquireNode + cons: constraints.Value{Spaces: stringslicep("foo", "bar", "^baz", "^oof")}, + expected: url.Values{}, + }, { + cons: constraints.Value{Tags: stringslicep("tag1", "tag2", "^tag3", "^tag4")}, + expected: url.Values{ + "tags": {"tag1,tag2"}, + "not_tags": {"tag3,tag4"}, + }, + }, { // CpuPower is ignored. + cons: constraints.Value{CpuPower: uint64p(1024)}, + expected: url.Values{}, + }, { // RootDisk is ignored. + cons: constraints.Value{RootDisk: uint64p(8192)}, + expected: url.Values{}, + }, { + cons: constraints.Value{Tags: stringslicep("foo", "bar")}, + expected: url.Values{"tags": {"foo,bar"}}, + }, { + cons: constraints.Value{ + Arch: stringp("arm"), + CpuCores: uint64p(4), + Mem: uint64p(1024), + CpuPower: uint64p(1024), + RootDisk: uint64p(8192), + Spaces: stringslicep("foo", "^bar"), + Tags: stringslicep("^tag1", "tag2"), + }, + expected: url.Values{ + "arch": {"arm"}, + "cpu_count": {"4"}, + "mem": {"1024"}, + "tags": {"tag2"}, + "not_tags": {"tag1"}, + }, + }} { + c.Logf("test #%d: cons=%s", i, test.cons.String()) + c.Check(convertConstraints(test.cons), jc.DeepEquals, test.expected) + } +} + +var nilStringSlice []string + +func (*environSuite) TestConvertTagsToParams(c *gc.C) { + for i, test := range []struct { + tags *[]string + expected url.Values + }{{ + tags: nil, + expected: url.Values{}, + }, { + tags: &nilStringSlice, + expected: url.Values{}, + }, { + tags: &[]string{}, + expected: url.Values{}, + }, { + tags: stringslicep(""), + expected: url.Values{}, + }, { + tags: stringslicep("foo"), + expected: url.Values{ + "tags": {"foo"}, + }, + }, { + tags: stringslicep("^bar"), + expected: url.Values{ + "not_tags": {"bar"}, + }, + }, { + tags: stringslicep("foo", "^bar", "baz", "^oof"), + expected: url.Values{ + "tags": {"foo,baz"}, + "not_tags": {"bar,oof"}, + }, + }, { + tags: stringslicep("", "^bar", "^", "^oof"), + expected: url.Values{ + "not_tags": {"bar,oof"}, + }, + }, { + tags: stringslicep("foo", "^", " b a z ", "^^ ^"), + expected: url.Values{ + "tags": {"foo, b a z "}, + "not_tags": {"^ ^"}, + }, + }, { + tags: stringslicep("", "^bar", " ", " ^ o of "), + expected: url.Values{ + "tags": {" , ^ o of "}, + "not_tags": {"bar"}, + }, + }, { + tags: stringslicep("foo", "foo", "^bar", "^bar"), + expected: url.Values{ + "tags": {"foo,foo"}, + "not_tags": {"bar,bar"}, + }, + }} { + c.Logf("test #%d: tags=%v", i, test.tags) + var vals = url.Values{} + convertTagsToParams(vals, test.tags) + c.Check(vals, jc.DeepEquals, test.expected) + } +} + +func uint64p(val uint64) *uint64 { + return &val +} + +func stringp(val string) *string { + return &val +} + +func stringslicep(values ...string) *[]string { + return &values +} + +func (suite *environSuite) TestSelectNodeValidZone(c *gc.C) { + env := suite.makeEnviron() + suite.testMAASObject.TestServer.NewNode(`{"system_id": "node0", "hostname": "host0", "zone": "bar"}`) + + snArgs := selectNodeArgs{ + AvailabilityZones: []string{"foo", "bar"}, + Constraints: constraints.Value{}, + } + + node, err := env.selectNode(snArgs) + c.Assert(err, jc.ErrorIsNil) + c.Assert(node, gc.NotNil) +} + +func (suite *environSuite) TestSelectNodeInvalidZone(c *gc.C) { + env := suite.makeEnviron() + + snArgs := selectNodeArgs{ + AvailabilityZones: []string{"foo", "bar"}, + Constraints: constraints.Value{}, + } + + _, err := env.selectNode(snArgs) + c.Assert(err, gc.NotNil) + c.Assert(fmt.Sprintf("%s", err), gc.Equals, "cannot run instances: gomaasapi: got error back from server: 409 Conflict ()") +} + +func (suite *environSuite) TestAcquireNode(c *gc.C) { + env := suite.makeEnviron() + suite.testMAASObject.TestServer.NewNode(`{"system_id": "node0", "hostname": "host0"}`) + + _, err := env.acquireNode("", "", constraints.Value{}, nil, nil) + + c.Check(err, jc.ErrorIsNil) + operations := suite.testMAASObject.TestServer.NodeOperations() + actions, found := operations["node0"] + c.Assert(found, jc.IsTrue) + c.Check(actions, gc.DeepEquals, []string{"acquire"}) + + // no "name" parameter should have been passed through + values := suite.testMAASObject.TestServer.NodeOperationRequestValues()["node0"][0] + _, found = values["name"] + c.Assert(found, jc.IsFalse) +} + +func (suite *environSuite) TestAcquireNodeByName(c *gc.C) { + env := suite.makeEnviron() + suite.testMAASObject.TestServer.NewNode(`{"system_id": "node0", "hostname": "host0"}`) + + _, err := env.acquireNode("host0", "", constraints.Value{}, nil, nil) + + c.Check(err, jc.ErrorIsNil) + operations := suite.testMAASObject.TestServer.NodeOperations() + actions, found := operations["node0"] + c.Assert(found, jc.IsTrue) + c.Check(actions, gc.DeepEquals, []string{"acquire"}) + + // no "name" parameter should have been passed through + values := suite.testMAASObject.TestServer.NodeOperationRequestValues()["node0"][0] + nodeName := values.Get("name") + c.Assert(nodeName, gc.Equals, "host0") +} + +func (suite *environSuite) TestAcquireNodeTakesConstraintsIntoAccount(c *gc.C) { + env := suite.makeEnviron() + suite.testMAASObject.TestServer.NewNode( + `{"system_id": "node0", "hostname": "host0", "architecture": "arm/generic", "memory": 2048}`, + ) + constraints := constraints.Value{Arch: stringp("arm"), Mem: uint64p(1024)} + + _, err := env.acquireNode("", "", constraints, nil, nil) + + c.Check(err, jc.ErrorIsNil) + requestValues := suite.testMAASObject.TestServer.NodeOperationRequestValues() + nodeRequestValues, found := requestValues["node0"] + c.Assert(found, jc.IsTrue) + c.Assert(nodeRequestValues[0].Get("arch"), gc.Equals, "arm") + c.Assert(nodeRequestValues[0].Get("mem"), gc.Equals, "1024") +} + +func (suite *environSuite) TestParseDelimitedValues(c *gc.C) { + for i, test := range []struct { + about string + input []string + positives []string + negatives []string + }{{ + about: "nil input", + input: nil, + positives: []string{}, + negatives: []string{}, + }, { + about: "empty input", + input: []string{}, + positives: []string{}, + negatives: []string{}, + }, { + about: "values list with embedded whitespace", + input: []string{" val1 ", " val2", " ^ not Val 3 ", " ", " ", "^", "", "^ notVal4 "}, + positives: []string{" val1 ", " val2", " ^ not Val 3 ", " ", " "}, + negatives: []string{" notVal4 "}, + }, { + about: "only positives", + input: []string{"val1", "val2", "val3"}, + positives: []string{"val1", "val2", "val3"}, + negatives: []string{}, + }, { + about: "only negatives", + input: []string{"^val1", "^val2", "^val3"}, + positives: []string{}, + negatives: []string{"val1", "val2", "val3"}, + }, { + about: "multi-caret negatives", + input: []string{"^foo^", "^v^a^l2", " ^^ ^", "^v^al3", "^^", "^"}, + positives: []string{" ^^ ^"}, + negatives: []string{"foo^", "v^a^l2", "v^al3", "^"}, + }, { + about: "both positives and negatives", + input: []string{"^val1", "val2", "^val3", "val4"}, + positives: []string{"val2", "val4"}, + negatives: []string{"val1", "val3"}, + }, { + about: "single positive value", + input: []string{"val1"}, + positives: []string{"val1"}, + negatives: []string{}, + }, { + about: "single negative value", + input: []string{"^val1"}, + positives: []string{}, + negatives: []string{"val1"}, + }} { + c.Logf("test %d: %s", i, test.about) + positives, negatives := parseDelimitedValues(test.input) + c.Check(positives, jc.DeepEquals, test.positives) + c.Check(negatives, jc.DeepEquals, test.negatives) + } +} + +func (suite *environSuite) TestAcquireNodePassedAgentName(c *gc.C) { + env := suite.makeEnviron() + suite.testMAASObject.TestServer.NewNode(`{"system_id": "node0", "hostname": "host0"}`) + + _, err := env.acquireNode("", "", constraints.Value{}, nil, nil) + + c.Check(err, jc.ErrorIsNil) + requestValues := suite.testMAASObject.TestServer.NodeOperationRequestValues() + nodeRequestValues, found := requestValues["node0"] + c.Assert(found, jc.IsTrue) + c.Assert(nodeRequestValues[0].Get("agent_name"), gc.Equals, exampleAgentName) +} + +func (suite *environSuite) TestAcquireNodePassesPositiveAndNegativeTags(c *gc.C) { + env := suite.makeEnviron() + suite.testMAASObject.TestServer.NewNode(`{"system_id": "node0"}`) + + _, err := env.acquireNode( + "", "", + constraints.Value{Tags: stringslicep("tag1", "^tag2", "tag3", "^tag4")}, + nil, nil, + ) + + c.Check(err, jc.ErrorIsNil) + requestValues := suite.testMAASObject.TestServer.NodeOperationRequestValues() + nodeValues, found := requestValues["node0"] + c.Assert(found, jc.IsTrue) + c.Assert(nodeValues[0].Get("tags"), gc.Equals, "tag1,tag3") + c.Assert(nodeValues[0].Get("not_tags"), gc.Equals, "tag2,tag4") +} + +func (suite *environSuite) TestAcquireNodePassesPositiveAndNegativeSpaces(c *gc.C) { + env := suite.makeEnviron() + suite.testMAASObject.TestServer.NewNode(`{"system_id": "node0"}`) + + _, err := env.acquireNode( + "", "", + constraints.Value{Spaces: stringslicep("space1", "^space2", "space3", "^space4")}, + nil, nil, + ) + c.Check(err, jc.ErrorIsNil) + requestValues := suite.testMAASObject.TestServer.NodeOperationRequestValues() + nodeValues, found := requestValues["node0"] + c.Assert(found, jc.IsTrue) + c.Check(nodeValues[0].Get("interfaces"), gc.Equals, "0:space=space1;1:space=space3") + c.Check(nodeValues[0].Get("not_networks"), gc.Equals, "space:space2,space:space4") +} + +func (suite *environSuite) TestAcquireNodeDisambiguatesNamedLabelsFromIndexedUpToALimit(c *gc.C) { + var shortLimit uint = 0 + suite.PatchValue(&numericLabelLimit, shortLimit) + env := suite.makeEnviron() + suite.testMAASObject.TestServer.NewNode(`{"system_id": "node0"}`) + + _, err := env.acquireNode( + "", "", + constraints.Value{Spaces: stringslicep("space1", "^space2", "space3", "^space4")}, + []interfaceBinding{{"0", "first-clash"}, {"1", "final-clash"}}, + nil, + ) + c.Assert(err, gc.ErrorMatches, `too many conflicting numeric labels, giving up.`) +} + +func (suite *environSuite) TestAcquireNodeStorage(c *gc.C) { + for i, test := range []struct { + volumes []volumeInfo + expected string + }{{ + volumes: nil, + expected: "", + }, { + volumes: []volumeInfo{{"volume-1", 1234, nil}}, + expected: "volume-1:1234", + }, { + volumes: []volumeInfo{{"", 1234, []string{"tag1", "tag2"}}}, + expected: "1234(tag1,tag2)", + }, { + volumes: []volumeInfo{{"volume-1", 1234, []string{"tag1", "tag2"}}}, + expected: "volume-1:1234(tag1,tag2)", + }, { + volumes: []volumeInfo{ + {"volume-1", 1234, []string{"tag1", "tag2"}}, + {"volume-2", 4567, []string{"tag1", "tag3"}}, + }, + expected: "volume-1:1234(tag1,tag2),volume-2:4567(tag1,tag3)", + }} { + c.Logf("test #%d: volumes=%v", i, test.volumes) + env := suite.makeEnviron() + suite.testMAASObject.TestServer.NewNode(`{"system_id": "node0", "hostname": "host0"}`) + _, err := env.acquireNode("", "", constraints.Value{}, nil, test.volumes) + c.Check(err, jc.ErrorIsNil) + requestValues := suite.testMAASObject.TestServer.NodeOperationRequestValues() + nodeRequestValues, found := requestValues["node0"] + c.Check(found, jc.IsTrue) + c.Check(nodeRequestValues[0].Get("storage"), gc.Equals, test.expected) + suite.testMAASObject.TestServer.Clear() + } +} + +func (suite *environSuite) TestAcquireNodeInterfaces(c *gc.C) { + // Add some constraints, including spaces to verify specified bindings + // always override any spaces constraints. + cons := constraints.Value{ + Spaces: stringslicep("foo", "^bar"), + } + + for i, test := range []struct { + interfaces []interfaceBinding + expectedPositives string + expectedNegatives string + expectedError string + }{{ // without specified bindings, spaces constraints are used instead. + interfaces: nil, + expectedPositives: "0:space=foo", + expectedNegatives: "space:bar", + expectedError: "", + }, { + interfaces: []interfaceBinding{{"name-1", "space-1"}}, + expectedPositives: "name-1:space=space-1;0:space=foo", + expectedNegatives: "space:bar", + }, { + interfaces: []interfaceBinding{ + {"name-1", "space-1"}, + {"name-2", "space-2"}, + {"name-3", "space-3"}, + }, + expectedPositives: "name-1:space=space-1;name-2:space=space-2;name-3:space=space-3;0:space=foo", + expectedNegatives: "space:bar", + }, { + interfaces: []interfaceBinding{{"", "anything"}}, + expectedError: "interface bindings cannot have empty names", + }, { + interfaces: []interfaceBinding{{"shared-db", "bar"}}, + expectedError: `negative space "bar" from constraints clashes with interface bindings`, + }, { + interfaces: []interfaceBinding{ + {"shared-db", "dup-space"}, + {"db", "dup-space"}, + }, + expectedPositives: "shared-db:space=dup-space;db:space=dup-space;0:space=foo", + expectedNegatives: "space:bar", + }, { + interfaces: []interfaceBinding{{"", ""}}, + expectedError: "interface bindings cannot have empty names", + }, { + interfaces: []interfaceBinding{ + {"valid", "ok"}, + {"", "valid-but-ignored-space"}, + {"valid-name-empty-space", ""}, + {"", ""}, + }, + expectedError: "interface bindings cannot have empty names", + }, { + interfaces: []interfaceBinding{{"foo", ""}}, + expectedError: `invalid interface binding "foo": space provider ID is required`, + }, { + interfaces: []interfaceBinding{ + {"bar", ""}, + {"valid", "ok"}, + {"", "valid-but-ignored-space"}, + {"", ""}, + }, + expectedError: `invalid interface binding "bar": space provider ID is required`, + }, { + interfaces: []interfaceBinding{ + {"dup-name", "space-1"}, + {"dup-name", "space-2"}, + }, + expectedError: `duplicated interface binding "dup-name"`, + }, { + interfaces: []interfaceBinding{ + {"valid-1", "space-0"}, + {"dup-name", "space-1"}, + {"dup-name", "space-2"}, + {"valid-2", "space-3"}, + }, + expectedError: `duplicated interface binding "dup-name"`, + }} { + c.Logf("test #%d: interfaces=%v", i, test.interfaces) + env := suite.makeEnviron() + suite.testMAASObject.TestServer.NewNode(`{"system_id": "node0", "hostname": "host0"}`) + _, err := env.acquireNode("", "", cons, test.interfaces, nil) + if test.expectedError != "" { + c.Check(err, gc.ErrorMatches, test.expectedError) + c.Check(err, jc.Satisfies, errors.IsNotValid) + continue + } + c.Check(err, jc.ErrorIsNil) + requestValues := suite.testMAASObject.TestServer.NodeOperationRequestValues() + nodeRequestValues, found := requestValues["node0"] + c.Check(found, jc.IsTrue) + c.Check(nodeRequestValues[0].Get("interfaces"), gc.Equals, test.expectedPositives) + c.Check(nodeRequestValues[0].Get("not_networks"), gc.Equals, test.expectedNegatives) + suite.testMAASObject.TestServer.Clear() + } +} === added file 'src/github.com/juju/juju/provider/maas/credentials.go' --- src/github.com/juju/juju/provider/maas/credentials.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/maas/credentials.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,29 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package maas + +import ( + "github.com/juju/errors" + "github.com/juju/juju/cloud" +) + +type environProviderCredentials struct{} + +// CredentialSchemas is part of the environs.ProviderCredentials interface. +func (environProviderCredentials) CredentialSchemas() map[cloud.AuthType]cloud.CredentialSchema { + return map[cloud.AuthType]cloud.CredentialSchema{ + cloud.OAuth1AuthType: { + "maas-oauth": { + Description: "OAuth/API-key credentials for MAAS", + Hidden: true, + }, + }, + } +} + +// DetectCredentials is part of the environs.ProviderCredentials interface. +func (environProviderCredentials) DetectCredentials() (*cloud.CloudCredential, error) { + // TODO(axw) find out where the MAAS CLI stores credentials. + return nil, errors.NotFoundf("credentials") +} === added file 'src/github.com/juju/juju/provider/maas/credentials_test.go' --- src/github.com/juju/juju/provider/maas/credentials_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/maas/credentials_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,48 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package maas_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/environs" + envtesting "github.com/juju/juju/environs/testing" +) + +type credentialsSuite struct { + testing.IsolationSuite + provider environs.EnvironProvider +} + +var _ = gc.Suite(&credentialsSuite{}) + +func (s *credentialsSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + var err error + s.provider, err = environs.Provider("maas") + c.Assert(err, jc.ErrorIsNil) +} + +func (s *credentialsSuite) TestCredentialSchemas(c *gc.C) { + envtesting.AssertProviderAuthTypes(c, s.provider, "oauth1") +} + +func (s *credentialsSuite) TestOAuth1CredentialsValid(c *gc.C) { + envtesting.AssertProviderCredentialsValid(c, s.provider, "oauth1", map[string]string{ + "maas-oauth": "123:456:789", + }) +} + +func (s *credentialsSuite) TestOAuth1HiddenAttributes(c *gc.C) { + envtesting.AssertProviderCredentialsAttributesHidden(c, s.provider, "oauth1", "maas-oauth") +} + +func (s *credentialsSuite) TestDetectCredentialsNotFound(c *gc.C) { + _, err := s.provider.DetectCredentials() + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} === modified file 'src/github.com/juju/juju/provider/maas/environ.go' --- src/github.com/juju/juju/provider/maas/environ.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/provider/maas/environ.go 2016-03-22 15:18:22 +0000 @@ -4,7 +4,6 @@ package maas import ( - "encoding/xml" "fmt" "net" "net/http" @@ -15,10 +14,12 @@ "time" "github.com/juju/errors" + "github.com/juju/gomaasapi" + "github.com/juju/names" "github.com/juju/utils" + "github.com/juju/utils/os" + "github.com/juju/utils/series" "github.com/juju/utils/set" - "gopkg.in/mgo.v2/bson" - "launchpad.net/gomaasapi" "github.com/juju/juju/agent" "github.com/juju/juju/cloudconfig/cloudinit" @@ -33,13 +34,13 @@ "github.com/juju/juju/provider/common" "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/tools" - "github.com/juju/juju/version" - "github.com/juju/names" ) const ( // We're using v1.0 of the MAAS API. apiVersion = "1.0" + // The string from the api indicating the dynamic range of a subnet. + dynamicRange = "dynamic-range" ) // A request may fail to due "eventual consistency" semantics, which @@ -56,6 +57,8 @@ ReleaseNodes = releaseNodes ReserveIPAddress = reserveIPAddress ReserveIPAddressOnDevice = reserveIPAddressOnDevice + NewDeviceParams = newDeviceParams + UpdateDeviceHostname = updateDeviceHostname ReleaseIPAddress = releaseIPAddress DeploymentStatusCall = deploymentStatusCall ) @@ -73,13 +76,58 @@ return err } -func reserveIPAddressOnDevice(devices gomaasapi.MAASObject, deviceId string, addr network.Address) error { - device := devices.GetSubObject(deviceId) +func reserveIPAddressOnDevice(devices gomaasapi.MAASObject, deviceID, macAddress string, addr network.Address) (network.Address, error) { + device := devices.GetSubObject(deviceID) params := url.Values{} - params.Add("requested_address", addr.Value) - _, err := device.CallPost("claim_sticky_ip_address", params) - return err - + if addr.Value != "" { + params.Add("requested_address", addr.Value) + } + if macAddress != "" { + params.Add("mac_address", macAddress) + } + resp, err := device.CallPost("claim_sticky_ip_address", params) + if err != nil { + return network.Address{}, errors.Annotatef( + err, "failed to reserve sticky IP address for device %q", + deviceID, + ) + } + respMap, err := resp.GetMap() + if err != nil { + return network.Address{}, errors.Annotate(err, "failed to parse response") + } + addresses, err := respMap["ip_addresses"].GetArray() + if err != nil { + return network.Address{}, errors.Annotatef(err, "failed to parse IP addresses") + } + if len(addresses) == 0 { + return network.Address{}, errors.Errorf( + "expected to find a sticky IP address for device %q: MAAS API response contains no IP addresses", + deviceID, + ) + } + var firstAddress network.Address + for _, address := range addresses { + value, err := address.GetString() + if err != nil { + return network.Address{}, errors.Annotatef(err, + "failed to parse reserved IP address for device %q", + deviceID, + ) + } + if ip := net.ParseIP(value); ip == nil { + return network.Address{}, errors.Annotatef(err, + "failed to parse reserved IP address %q for device %q", + value, deviceID, + ) + } + if firstAddress.Value == "" { + // We only need the first address, but we're logging all we got. + firstAddress = network.NewAddress(value) + } + logger.Debugf("reserved address %q for device %q and MAC %q", value, deviceID, macAddress) + } + return firstAddress, nil } func releaseIPAddress(ipaddresses gomaasapi.MAASObject, addr network.Address) error { @@ -103,12 +151,17 @@ // ecfgMutex protects the *Unlocked fields below. ecfgMutex sync.Mutex - ecfgUnlocked *maasEnvironConfig + ecfgUnlocked *maasModelConfig maasClientUnlocked *gomaasapi.MAASObject storageUnlocked storage.Storage availabilityZonesMutex sync.Mutex availabilityZones []common.AvailabilityZone + + // The following are initialized from the discovered MAAS API capabilities. + supportsDevices bool + supportsStaticIPs bool + supportsNetworkDeploymentUbuntu bool } var _ environs.Environ = (*maasEnviron)(nil) @@ -121,11 +174,32 @@ } env.name = cfg.Name() env.storageUnlocked = NewStorage(env) + + // Since we need to switch behavior based on the available API capabilities, + // get them as soon as possible and cache them. + capabilities, err := env.getCapabilities() + if err != nil { + logger.Warningf("cannot get MAAS API capabilities: %v", err) + } + logger.Tracef("MAAS API capabilities: %v", capabilities.SortedValues()) + env.supportsDevices = capabilities.Contains(capDevices) + env.supportsStaticIPs = capabilities.Contains(capStaticIPAddresses) + env.supportsNetworkDeploymentUbuntu = capabilities.Contains(capNetworkDeploymentUbuntu) return env, nil } +var noDevicesWarning = ` +Using MAAS version older than 1.8.2: devices API support not detected! + +Juju cannot guarantee resources allocated to containers, like DHCP +leases or static IP addresses will be properly cleaned up when the +container, its host, or the model is destroyed. + +Juju recommends upgrading MAAS to version 1.8.2 or later. +`[1:] + // Bootstrap is specified in the Environ interface. -func (env *maasEnviron) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (arch, series string, _ environs.BootstrapFinalizer, _ error) { +func (env *maasEnviron) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (*environs.BootstrapResult, error) { if !environs.AddressAllocationEnabled() { // When address allocation is not enabled, we should use the // default bridge for both LXC and KVM containers. The bridge @@ -136,6 +210,11 @@ instancecfg.DefaultBridgeName, ) args.ContainerBridgeName = instancecfg.DefaultBridgeName + + if !env.supportsDevices { + // Inform the user container resources might leak. + ctx.Infof("WARNING: %s", noDevicesWarning) + } } else { logger.Debugf( "address allocation feature enabled; using static IPs for containers: %q", @@ -145,7 +224,7 @@ result, series, finalizer, err := common.BootstrapInstance(ctx, env, args) if err != nil { - return "", "", nil, err + return nil, err } // We want to destroy the started instance if it doesn't transition to Deployed. @@ -158,19 +237,25 @@ }() // Wait for bootstrap instance to change to deployed state. if err := env.waitForNodeDeployment(result.Instance.Id()); err != nil { - return "", "", nil, errors.Annotate(err, "bootstrap instance started but did not change to Deployed state") - } - return *result.Hardware.Arch, series, finalizer, nil + return nil, errors.Annotate(err, "bootstrap instance started but did not change to Deployed state") + } + + bsResult := &environs.BootstrapResult{ + Arch: *result.Hardware.Arch, + Series: series, + Finalize: finalizer, + } + return bsResult, nil } -// StateServerInstances is specified in the Environ interface. -func (env *maasEnviron) StateServerInstances() ([]instance.Id, error) { +// ControllerInstances is specified in the Environ interface. +func (env *maasEnviron) ControllerInstances() ([]instance.Id, error) { return common.ProviderStateInstances(env, env.Storage()) } -// ecfg returns the environment's maasEnvironConfig, and protects it with a +// ecfg returns the environment's maasModelConfig, and protects it with a // mutex. -func (env *maasEnviron) ecfg() *maasEnvironConfig { +func (env *maasEnviron) ecfg() *maasModelConfig { env.ecfgMutex.Lock() defer env.ecfgMutex.Unlock() return env.ecfgUnlocked @@ -241,20 +326,25 @@ // SupportsSpaces is specified on environs.Networking. func (env *maasEnviron) SupportsSpaces() (bool, error) { - return false, errors.NotSupportedf("spaces") + return env.supportsNetworkDeploymentUbuntu, nil +} + +// SupportsSpaceDiscovery is specified on environs.Networking. +func (env *maasEnviron) SupportsSpaceDiscovery() (bool, error) { + return env.supportsNetworkDeploymentUbuntu, nil } // SupportsAddressAllocation is specified on environs.Networking. func (env *maasEnviron) SupportsAddressAllocation(_ network.Id) (bool, error) { if !environs.AddressAllocationEnabled() { - return false, errors.NotSupportedf("address allocation") + if !env.supportsDevices { + return false, errors.NotSupportedf("address allocation") + } + // We can use devices for DHCP-allocated container IPs. + return true, nil } - caps, err := env.getCapabilities() - if err != nil { - return false, errors.Annotatef(err, "getCapabilities failed") - } - return caps.Contains(capStaticIPAddresses), nil + return env.supportsStaticIPs, nil } // allBootImages queries MAAS for all of the boot-images across @@ -310,55 +400,6 @@ return nodegroups, nil } -func (env *maasEnviron) getNodegroupInterfaces(nodegroups []string) map[string][]net.IP { - nodegroupsObject := env.getMAASClient().GetSubObject("nodegroups") - - nodegroupsInterfacesMap := make(map[string][]net.IP) - for _, uuid := range nodegroups { - interfacesObject := nodegroupsObject.GetSubObject(uuid).GetSubObject("interfaces") - interfacesResult, err := interfacesObject.CallGet("list", nil) - if err != nil { - logger.Debugf("cannot list interfaces for nodegroup %v: %v", uuid, err) - continue - } - interfaces, err := interfacesResult.GetArray() - if err != nil { - logger.Debugf("cannot get interfaces for nodegroup %v: %v", uuid, err) - continue - } - for _, interfaceResult := range interfaces { - nic, err := interfaceResult.GetMap() - if err != nil { - logger.Debugf("cannot get interface %v for nodegroup %v: %v", nic, uuid, err) - continue - } - ip, err := nic["ip"].GetString() - if err != nil { - logger.Debugf("cannot get interface IP %v for nodegroup %v: %v", nic, uuid, err) - continue - } - static_low, err := nic["static_ip_range_low"].GetString() - if err != nil { - logger.Debugf("cannot get static IP range lower bound for interface %v on nodegroup %v: %v", nic, uuid, err) - continue - } - static_high, err := nic["static_ip_range_high"].GetString() - if err != nil { - logger.Infof("cannot get static IP range higher bound for interface %v on nodegroup %v: %v", nic, uuid, err) - continue - } - static_low_ip := net.ParseIP(static_low) - static_high_ip := net.ParseIP(static_high) - if static_low_ip == nil || static_high_ip == nil { - logger.Debugf("invalid IP in static range for interface %v on nodegroup %v: %q %q", nic, uuid, static_low_ip, static_high_ip) - continue - } - nodegroupsInterfacesMap[ip] = []net.IP{static_low_ip, static_high_ip} - } - } - return nodegroupsInterfacesMap -} - type bootImage struct { architecture string release string @@ -531,19 +572,12 @@ } const ( - capNetworksManagement = "networks-management" - capStaticIPAddresses = "static-ipaddresses" - capDevices = "devices-management" + capNetworksManagement = "networks-management" + capStaticIPAddresses = "static-ipaddresses" + capDevices = "devices-management" + capNetworkDeploymentUbuntu = "network-deployment-ubuntu" ) -func (env *maasEnviron) supportsDevices() (bool, error) { - caps, err := env.getCapabilities() - if err != nil { - return false, errors.Trace(err) - } - return caps.Contains(capDevices), nil -} - // getCapabilities asks the MAAS server for its capabilities, if // supported by the server. func (env *maasEnviron) getCapabilities() (set.Strings, error) { @@ -596,113 +630,21 @@ return env.maasClientUnlocked } -// convertConstraints converts the given constraints into an url.Values -// object suitable to pass to MAAS when acquiring a node. -// CpuPower is ignored because it cannot translated into something -// meaningful for MAAS right now. -func convertConstraints(cons constraints.Value) url.Values { - params := url.Values{} - if cons.Arch != nil { - // Note: Juju and MAAS use the same architecture names. - // MAAS also accepts a subarchitecture (e.g. "highbank" - // for ARM), which defaults to "generic" if unspecified. - params.Add("arch", *cons.Arch) - } - if cons.CpuCores != nil { - params.Add("cpu_count", fmt.Sprintf("%d", *cons.CpuCores)) - } - if cons.Mem != nil { - params.Add("mem", fmt.Sprintf("%d", *cons.Mem)) - } - if cons.Tags != nil && len(*cons.Tags) > 0 { - tags, notTags := parseTags(*cons.Tags) - if len(tags) > 0 { - params.Add("tags", strings.Join(tags, ",")) - } - if len(notTags) > 0 { - params.Add("not_tags", strings.Join(notTags, ",")) - } - } - if cons.CpuPower != nil { - logger.Warningf("ignoring unsupported constraint 'cpu-power'") - } - return params -} - -// parseTags parses a tags constraints, splitting it into a positive -// and negative tags to pass to MAAS. Positive tags have no prefix, -// negative tags have a "^" prefix. All spaces inside the rawTags are -// stripped before parsing. -func parseTags(rawTags []string) (tags, notTags []string) { - for _, tag := range rawTags { - tag = strings.Replace(tag, " ", "", -1) - if len(tag) == 0 { - continue - } - if strings.HasPrefix(tag, "^") { - notTags = append(notTags, strings.TrimPrefix(tag, "^")) - } else { - tags = append(tags, tag) - } - } - return tags, notTags -} - -// addNetworks converts networks include/exclude information into -// url.Values object suitable to pass to MAAS when acquiring a node. -func addNetworks(params url.Values, includeNetworks, excludeNetworks []string) { - // Network Inclusion/Exclusion setup - if len(includeNetworks) > 0 { - for _, name := range includeNetworks { - params.Add("networks", name) - } - } - if len(excludeNetworks) > 0 { - for _, name := range excludeNetworks { - params.Add("not_networks", name) - } - } -} - -// addVolumes converts volume information into -// url.Values object suitable to pass to MAAS when acquiring a node. -func addVolumes(params url.Values, volumes []volumeInfo) { - if len(volumes) == 0 { - return - } - // Requests for specific values are passed to the acquire URL - // as a storage URL parameter of the form: - // [volume-name:]sizeinGB[tag,...] - // See http://maas.ubuntu.com/docs/api.html#nodes - - // eg storage=root:0(ssd),data:20(magnetic,5400rpm),45 - makeVolumeParams := func(v volumeInfo) string { - var params string - if v.name != "" { - params = v.name + ":" - } - params += fmt.Sprintf("%d", v.sizeInGB) - if len(v.tags) > 0 { - params += fmt.Sprintf("(%s)", strings.Join(v.tags, ",")) - } - return params - } - var volParms []string - for _, v := range volumes { - params := makeVolumeParams(v) - volParms = append(volParms, params) - } - params.Add("storage", strings.Join(volParms, ",")) -} - // acquireNode allocates a node from the MAAS. func (environ *maasEnviron) acquireNode( - nodeName, zoneName string, cons constraints.Value, includeNetworks, excludeNetworks []string, volumes []volumeInfo, + nodeName, zoneName string, + cons constraints.Value, + interfaces []interfaceBinding, + volumes []volumeInfo, ) (gomaasapi.MAASObject, error) { acquireParams := convertConstraints(cons) - addNetworks(acquireParams, includeNetworks, excludeNetworks) - addVolumes(acquireParams, volumes) + positiveSpaces, negativeSpaces := convertSpacesFromConstraints(cons.Spaces) + err := addInterfaces(acquireParams, interfaces, positiveSpaces, negativeSpaces) + if err != nil { + return gomaasapi.MAASObject{}, err + } + addStorage(acquireParams, volumes) acquireParams.Add("agent_name", environ.ecfg().maasAgentName()) if zoneName != "" { acquireParams.Add("zone", zoneName) @@ -728,9 +670,9 @@ } var result gomaasapi.JSONObject - var err error for a := shortAttempt.Start(); a.Next(); { client := environ.getMAASClient().GetSubObject("nodes/") + logger.Tracef("calling acquire with params: %+v", acquireParams) result, err = client.CallPost("acquire", acquireParams) if err == nil { break @@ -748,7 +690,7 @@ } // startNode installs and boots a node. -func (environ *maasEnviron) startNode(node gomaasapi.MAASObject, series string, userdata []byte) error { +func (environ *maasEnviron) startNode(node gomaasapi.MAASObject, series string, userdata []byte) (*gomaasapi.MAASObject, error) { params := url.Values{ "distro_series": {series}, "user_data": {string(userdata)}, @@ -756,89 +698,24 @@ // Initialize err to a non-nil value as a sentinel for the following // loop. err := fmt.Errorf("(no error)") + var result gomaasapi.JSONObject for a := shortAttempt.Start(); a.Next() && err != nil; { - _, err = node.CallPost("start", params) - } - return err -} - -var unsupportedConstraints = []string{ - constraints.CpuPower, - constraints.InstanceType, -} - -// ConstraintsValidator is defined on the Environs interface. -func (environ *maasEnviron) ConstraintsValidator() (constraints.Validator, error) { - validator := constraints.NewValidator() - validator.RegisterUnsupported(unsupportedConstraints) - supportedArches, err := environ.SupportedArchitectures() - if err != nil { - return nil, err - } - validator.RegisterVocabulary(constraints.Arch, supportedArches) - return validator, nil -} - -// setupNetworks prepares a []network.InterfaceInfo for the given -// instance. Any networks in networksToDisable will be configured as -// disabled on the machine. Any disabled network interfaces (as -// discovered from the lshw output for the node) will stay disabled. -// The interface name discovered as primary is also returned. -func (environ *maasEnviron) setupNetworks(inst instance.Instance, networksToDisable set.Strings) ([]network.InterfaceInfo, string, error) { - // Get the instance network interfaces first. - interfaces, primaryIface, err := environ.getInstanceNetworkInterfaces(inst) - if err != nil { - return nil, "", errors.Annotatef(err, "getInstanceNetworkInterfaces failed") - } - logger.Debugf("node %q has network interfaces %v", inst.Id(), interfaces) - networks, err := environ.getInstanceNetworks(inst) - if err != nil { - return nil, "", errors.Annotatef(err, "getInstanceNetworks failed") - } - logger.Debugf("node %q has networks %v", inst.Id(), networks) - var tempInterfaceInfo []network.InterfaceInfo - for _, netw := range networks { - disabled := networksToDisable.Contains(netw.Name) - netCIDR := &net.IPNet{ - IP: net.ParseIP(netw.IP), - Mask: net.IPMask(net.ParseIP(netw.Mask)), + result, err = node.CallPost("start", params) + if err == nil { + break } - macs, err := environ.getNetworkMACs(netw.Name) + } + + if err == nil { + var startedNode gomaasapi.MAASObject + startedNode, err = result.GetMAASObject() if err != nil { - return nil, "", errors.Annotatef(err, "getNetworkMACs failed") - } - logger.Debugf("network %q has MACs: %v", netw.Name, macs) - for _, mac := range macs { - if ifinfo, ok := interfaces[mac]; ok { - tempInterfaceInfo = append(tempInterfaceInfo, network.InterfaceInfo{ - MACAddress: mac, - InterfaceName: ifinfo.InterfaceName, - DeviceIndex: ifinfo.DeviceIndex, - CIDR: netCIDR.String(), - VLANTag: netw.VLANTag, - ProviderId: network.Id(netw.Name), - NetworkName: netw.Name, - Disabled: disabled || ifinfo.Disabled, - }) - } - } - } - // Verify we filled-in everything for all networks/interfaces - // and drop incomplete records. - var interfaceInfo []network.InterfaceInfo - for _, info := range tempInterfaceInfo { - if info.ProviderId == "" || info.NetworkName == "" || info.CIDR == "" { - logger.Infof("ignoring interface %q: missing subnet info", info.InterfaceName) - continue - } - if info.MACAddress == "" || info.InterfaceName == "" { - logger.Infof("ignoring subnet %q: missing interface info", info.ProviderId) - continue - } - interfaceInfo = append(interfaceInfo, info) - } - logger.Debugf("node %q network information: %#v", inst.Id(), interfaceInfo) - return interfaceInfo, primaryIface, nil + logger.Errorf("cannot process API response after successfully starting node: %v", err) + return nil, err + } + return &startedNode, nil + } + return nil, err } // DistributeInstances implements the state.InstanceDistributor policy. @@ -900,35 +777,34 @@ availabilityZones = []string{""} } - // Networking. - // - // TODO(dimitern): Once we can get from spaces constraints to MAAS - // networks (or even directly to spaces), include them in the - // instance selection. - requestedNetworks := args.InstanceConfig.Networks - includeNetworks := append(args.Constraints.IncludeNetworks(), requestedNetworks...) - excludeNetworks := args.Constraints.ExcludeNetworks() - // Storage. volumes, err := buildMAASVolumeParameters(args.Volumes, args.Constraints) if err != nil { return nil, errors.Annotate(err, "invalid volume parameters") } + var interfaceBindings []interfaceBinding + if len(args.EndpointBindings) != 0 { + for endpoint, spaceProviderID := range args.EndpointBindings { + interfaceBindings = append(interfaceBindings, interfaceBinding{ + Name: endpoint, + SpaceProviderId: string(spaceProviderID), + }) + } + } snArgs := selectNodeArgs{ Constraints: args.Constraints, AvailabilityZones: availabilityZones, NodeName: nodeName, - IncludeNetworks: includeNetworks, - ExcludeNetworks: excludeNetworks, + Interfaces: interfaceBindings, Volumes: volumes, } - node, err := environ.selectNode(snArgs) + selectedNode, err := environ.selectNode(snArgs) if err != nil { return nil, errors.Errorf("cannot run instances: %v", err) } - inst := &maasInstance{maasObject: node, environ: environ} + inst := &maasInstance{selectedNode} defer func() { if err != nil { if err := environ.StopInstances(inst.Id()); err != nil { @@ -950,12 +826,6 @@ } args.InstanceConfig.Tools = selectedTools[0] - var networkInfo []network.InterfaceInfo - networkInfo, primaryIface, err := environ.setupNetworks(inst, set.NewStrings(excludeNetworks...)) - if err != nil { - return nil, err - } - hostname, err := inst.hostname() if err != nil { return nil, err @@ -974,7 +844,7 @@ } series := args.InstanceConfig.Tools.Version.Series - cloudcfg, err := environ.newCloudinitConfig(hostname, primaryIface, series) + cloudcfg, err := environ.newCloudinitConfig(hostname, series) if err != nil { return nil, err } @@ -985,8 +855,28 @@ } logger.Debugf("maas user data; %d bytes", len(userdata)) - if err := environ.startNode(*inst.maasObject, series, userdata); err != nil { + var startedNode *gomaasapi.MAASObject + var interfaces []network.InterfaceInfo + if startedNode, err = environ.startNode(*inst.maasObject, series, userdata); err != nil { return nil, err + } else { + // Once the instance has started the response should contain the + // assigned IP addresses, even when NICs are set to "auto" instead of + // "static". So instead of selectedNode, which only contains the + // acquire-time details (no IP addresses for NICs set to "auto" vs + // "static"), we use the up-to-date statedNode response to get the + // interfaces. + + if environ.supportsNetworkDeploymentUbuntu { + // Use the new 1.9 API when available. + interfaces, err = maasObjectNetworkInterfaces(startedNode) + } else { + // Use the legacy approach. + interfaces, err = environ.setupNetworks(inst) + } + if err != nil { + return nil, errors.Trace(err) + } } logger.Debugf("started instance %q", inst.Id()) @@ -1015,7 +905,7 @@ return &environs.StartInstanceResult{ Instance: inst, Hardware: hc, - NetworkInfo: networkInfo, + NetworkInfo: interfaces, Volumes: resultVolumes, VolumeAttachments: resultAttachments, }, nil @@ -1088,8 +978,7 @@ AvailabilityZones []string NodeName string Constraints constraints.Value - IncludeNetworks []string - ExcludeNetworks []string + Interfaces []interfaceBinding Volumes []volumeInfo } @@ -1102,8 +991,7 @@ args.NodeName, zoneName, args.Constraints, - args.IncludeNetworks, - args.ExcludeNetworks, + args.Interfaces, args.Volumes, ) @@ -1142,7 +1030,7 @@ // going forward: python 3 only return fmt.Sprintf(` -trap 'rm -f %[2]q' EXIT +trap 'rm -f %[1]q' EXIT if [ -x /usr/bin/python2 ]; then juju_networking_preferred_python_binary=/usr/bin/python2 @@ -1153,15 +1041,24 @@ fi if [ ! -z "${juju_networking_preferred_python_binary:-}" ]; then - juju_ipv4_interface_to_bridge=$(ip -4 route list exact default | head -n1 | cut -d' ' -f5) - if [ -f %[2]q ]; then - $juju_networking_preferred_python_binary %[2]q --bridge-name=%[3]q --interface-to-bridge=%[1]q --one-time-backup --activate %[4]q + if [ -f %[1]q ]; then +# We are sharing this code between master, maas-spaces2 and 1.25. +# For the moment we want master and 1.25 to not bridge all interfaces. +# This setting allows us to easily switch the behaviour when merging +# the code between those various branches. + juju_bridge_all_interfaces=0 + if [ $juju_bridge_all_interfaces -eq 1 ]; then + $juju_networking_preferred_python_binary %[1]q --bridge-prefix=%[2]q --one-time-backup --activate %[4]q + else + juju_ipv4_interface_to_bridge=$(ip -4 route list exact default | head -n1 | cut -d' ' -f5) + $juju_networking_preferred_python_binary %[1]q --bridge-name=%[3]q --interface-to-bridge="${juju_ipv4_interface_to_bridge:-unknown}" --one-time-backup --activate %[4]q + fi fi else echo "error: no Python installation found; cannot run Juju's bridge script" fi`, - "$juju_ipv4_interface_to_bridge", bridgeScriptPath, + instancecfg.DefaultBridgePrefix, instancecfg.DefaultBridgeName, "/etc/network/interfaces") } @@ -1170,10 +1067,10 @@ return setupJujuNetworking() } -// newCloudinitConfig creates a cloudinit.Config structure -// suitable as a base for initialising a MAAS node. -func (environ *maasEnviron) newCloudinitConfig(hostname, primaryIface, series string) (cloudinit.CloudConfig, error) { - cloudcfg, err := cloudinit.New(series) +// newCloudinitConfig creates a cloudinit.Config structure suitable as a base +// for initialising a MAAS node. +func (environ *maasEnviron) newCloudinitConfig(hostname, forSeries string) (cloudinit.CloudConfig, error) { + cloudcfg, err := cloudinit.New(forSeries) if err != nil { return nil, err } @@ -1184,14 +1081,14 @@ return nil, errors.Trace(err) } - operatingSystem, err := version.GetOSFromSeries(series) + operatingSystem, err := series.GetOSFromSeries(forSeries) if err != nil { return nil, errors.Trace(err) } switch operatingSystem { - case version.Windows: + case os.Windows: cloudcfg.AddScripts(runCmd) - case version.Ubuntu: + case os.Ubuntu: cloudcfg.SetSystemUpdate(true) cloudcfg.AddScripts("set -xe", runCmd) // Only create the default bridge if we're not using static @@ -1306,10 +1203,7 @@ if err != nil { return nil, err } - instances[index] = &maasInstance{ - maasObject: &node, - environ: environ, - } + instances[index] = &maasInstance{&node} } return instances, nil } @@ -1349,16 +1243,62 @@ return result, nil } -// newDevice creates a new MAAS device for a MAC address, returning the Id of -// the new device. -func (environ *maasEnviron) newDevice(macAddress string, instId instance.Id, hostname string) (string, error) { - client := environ.getMAASClient() - devices := client.GetSubObject("devices") - params := url.Values{} +// transformDeviceHostname transforms deviceHostname to include hostnameSuffix +// after the first "." in deviceHostname. Returns errors if deviceHostname does +// not contain any "." or hostnameSuffix is empty. +func transformDeviceHostname(deviceID, deviceHostname, hostnameSuffix string) (string, error) { + if hostnameSuffix == "" { + return "", errors.New("hostname suffix cannot be empty") + } + parts := strings.SplitN(deviceHostname, ".", 2) + if len(parts) != 2 { + return "", errors.Errorf("unexpected device %q hostname %q", deviceID, deviceHostname) + } + return fmt.Sprintf("%s-%s.%s", parts[0], hostnameSuffix, parts[1]), nil +} + +// updateDeviceHostname updates the hostname of a MAAS device to be unique and +// to contain the given hostnameSuffix. +func updateDeviceHostname(client *gomaasapi.MAASObject, deviceID, deviceHostname, hostnameSuffix string) (string, error) { + + newHostname, err := transformDeviceHostname(deviceID, deviceHostname, hostnameSuffix) + if err != nil { + return "", errors.Trace(err) + } + + deviceObj := client.GetSubObject("devices").GetSubObject(deviceID) + params := make(url.Values) + params.Add("hostname", newHostname) + if _, err := deviceObj.Update(params); err != nil { + return "", errors.Annotatef(err, "updating device %q hostname to %q", deviceID, newHostname) + } + return newHostname, nil +} + +// newDeviceParams prepares the params to call "devices new" API. Declared +// separately so it can be mocked out in the test to work around the gomaasapi's +// testservice limitation. +func newDeviceParams(macAddress string, instId instance.Id, _ string) url.Values { + params := make(url.Values) params.Add("mac_addresses", macAddress) - params.Add("hostname", hostname) + // We create the device without a hostname, to allow MAAS to create a unique + // hostname first. params.Add("parent", extractSystemId(instId)) - logger.Tracef("creating a new MAAS device for MAC %q, hostname %q, parent %q", macAddress, hostname, string(instId)) + + return params +} + +// newDevice creates a new MAAS device with parent instance instId, using the +// given macAddress and hostnameSuffix, returning the ID of the new device. +func (environ *maasEnviron) newDevice(macAddress string, instId instance.Id, hostnameSuffix string) (string, error) { + client := environ.getMAASClient() + devices := client.GetSubObject("devices") + // Work around the limitation of gomaasapi's testservice expecting all 3 + // arguments (parent, mac_addresses, and hostname) to be filled in. + params := NewDeviceParams(macAddress, instId, hostnameSuffix) + logger.Tracef( + "creating a new MAAS device for MAC %q, parent %q", macAddress, instId, + ) result, err := devices.CallPost("new", params) if err != nil { return "", errors.Trace(err) @@ -1369,38 +1309,62 @@ return "", errors.Trace(err) } - device, err := resultMap["system_id"].GetString() + deviceID, err := resultMap["system_id"].GetString() if err != nil { return "", errors.Trace(err) } - return device, nil + deviceHostname, err := resultMap["hostname"].GetString() + if err != nil { + return deviceID, errors.Trace(err) + } + + logger.Tracef("created device %q with MAC %q and hostname %q", deviceID, macAddress, deviceHostname) + + newHostname, err := UpdateDeviceHostname(client, deviceID, deviceHostname, hostnameSuffix) + if err != nil { + return deviceID, errors.Trace(err) + } + logger.Tracef("updated device %q hostname to %q", deviceID, newHostname) + + return deviceID, nil } -// fetchFullDevice fetches an existing device Id associated with a MAC address, or -// returns an error if there is no device. +// fetchFullDevice fetches an existing device ID associated with the given +// macAddress, or returns an error if there is no device. func (environ *maasEnviron) fetchFullDevice(macAddress string) (map[string]gomaasapi.JSONObject, error) { + if macAddress == "" { + return nil, errors.Errorf("given MAC address is empty") + } + client := environ.getMAASClient() devices := client.GetSubObject("devices") params := url.Values{} params.Add("mac_address", macAddress) + result, err := devices.CallGet("list", params) if err != nil { return nil, errors.Trace(err) } + resultArray, err := result.GetArray() if err != nil { return nil, errors.Trace(err) } + if len(resultArray) == 0 { - return nil, errors.NotFoundf("no device for MAC %q", macAddress) + return nil, errors.NotFoundf("no device for MAC address %q", macAddress) } + if len(resultArray) != 1 { return nil, errors.Errorf("unexpected response, expected 1 device got %d", len(resultArray)) } + resultMap, err := resultArray[0].GetMap() if err != nil { return nil, errors.Trace(err) } + + logger.Tracef("device found as %+v", resultMap) return resultMap, nil } @@ -1410,11 +1374,11 @@ return "", errors.Trace(err) } - deviceId, err := deviceMap["system_id"].GetString() + deviceID, err := deviceMap["system_id"].GetString() if err != nil { return "", errors.Trace(err) } - return deviceId, nil + return deviceID, nil } // createOrFetchDevice returns a device Id associated with a MAC address. If @@ -1427,37 +1391,70 @@ if !errors.IsNotFound(err) { return "", errors.Trace(err) } - device, err = environ.newDevice(macAddress, instId, hostname) - if err != nil { - return "", errors.Trace(err) - } - return device, nil + return environ.newDevice(macAddress, instId, hostname) } // AllocateAddress requests an address to be allocated for the // given instance on the given network. -func (environ *maasEnviron) AllocateAddress(instId instance.Id, subnetId network.Id, addr network.Address, macAddress, hostname string) (err error) { +func (environ *maasEnviron) AllocateAddress(instId instance.Id, subnetId network.Id, addr *network.Address, macAddress, hostname string) (err error) { + logger.Tracef( + "AllocateAddress for instId %q, subnet %q, addr %q, MAC %q, hostname %q", + instId, subnetId, addr, macAddress, hostname, + ) + if addr == nil { + return errors.NewNotValid(nil, "invalid address: cannot be nil") + } + if !environs.AddressAllocationEnabled() { - return errors.NotSupportedf("address allocation") + if !environ.supportsDevices { + logger.Warningf( + "resources used by container %q with MAC address %q can leak: devices API not supported", + hostname, macAddress, + ) + return errors.NotSupportedf("address allocation") + } + logger.Tracef("creating device for container %q with MAC %q", hostname, macAddress) + deviceID, err := environ.createOrFetchDevice(macAddress, instId, hostname) + if err != nil { + return errors.Annotatef( + err, + "failed creating MAAS device for container %q with MAC address %q", + hostname, macAddress, + ) + } + logger.Infof( + "created device %q for container %q with MAC address %q on parent node %q", + deviceID, hostname, macAddress, instId, + ) + devices := environ.getMAASClient().GetSubObject("devices") + newAddr, err := ReserveIPAddressOnDevice(devices, deviceID, macAddress, network.Address{}) + if err != nil { + return errors.Trace(err) + } + logger.Infof( + "reserved sticky IP address %q for device %q with MAC address %q representing container %q", + newAddr, deviceID, macAddress, hostname, + ) + *addr = newAddr + return nil } defer errors.DeferredAnnotatef(&err, "failed to allocate address %q for instance %q", addr, instId) client := environ.getMAASClient() var maasErr gomaasapi.ServerError - supportsDevices, err := environ.supportsDevices() - if err != nil { - return err - } - if supportsDevices { - device, err := environ.createOrFetchDevice(macAddress, instId, hostname) + if environ.supportsDevices { + deviceID, err := environ.createOrFetchDevice(macAddress, instId, hostname) if err != nil { return err } devices := client.GetSubObject("devices") - err = ReserveIPAddressOnDevice(devices, device, addr) + newAddr, err := ReserveIPAddressOnDevice(devices, deviceID, macAddress, *addr) if err == nil { - logger.Infof("allocated address %q for instance %q on device %q", addr, instId, device) + logger.Infof( + "allocated address %q for instance %q on device %q (asked for address %q)", + addr, instId, deviceID, newAddr, + ) return nil } @@ -1471,7 +1468,7 @@ var subnets []network.SubnetInfo subnets, err = environ.Subnets(instId, []network.Id{subnetId}) - logger.Tracef("Subnets(%q, %q, %q) returned: %v (%v)", instId, subnetId, addr, subnets, err) + logger.Tracef("Subnets(%q, %q, %q) returned: %v (%v)", instId, subnetId, *addr, subnets, err) if err != nil { return errors.Trace(err) } @@ -1483,9 +1480,9 @@ cidr := foundSub.CIDR ipaddresses := client.GetSubObject("ipaddresses") - err = ReserveIPAddress(ipaddresses, cidr, addr) + err = ReserveIPAddress(ipaddresses, cidr, *addr) if err == nil { - logger.Infof("allocated address %q for instance %q on subnet %q", addr, instId, cidr) + logger.Infof("allocated address %q for instance %q on subnet %q", *addr, instId, cidr) return nil } @@ -1515,22 +1512,46 @@ // ReleaseAddress releases a specific address previously allocated with // AllocateAddress. -func (environ *maasEnviron) ReleaseAddress(instId instance.Id, _ network.Id, addr network.Address, macAddress string) (err error) { +func (environ *maasEnviron) ReleaseAddress(instId instance.Id, _ network.Id, addr network.Address, macAddress, hostname string) (err error) { if !environs.AddressAllocationEnabled() { - return errors.NotSupportedf("address allocation") + if !environ.supportsDevices { + logger.Warningf( + "resources used by container %q with MAC address %q can leak: devices API not supported", + hostname, macAddress, + ) + return errors.NotSupportedf("address allocation") + } + logger.Tracef("getting device ID for container %q with MAC %q", macAddress, hostname) + deviceID, err := environ.fetchDevice(macAddress) + if err != nil { + return errors.Annotatef( + err, + "getting MAAS device for container %q with MAC address %q", + hostname, macAddress, + ) + } + logger.Tracef("deleting device %q for container %q", deviceID, hostname) + apiDevice := environ.getMAASClient().GetSubObject("devices").GetSubObject(deviceID) + if err := apiDevice.Delete(); err != nil { + return errors.Annotatef( + err, + "deleting MAAS device %q for container %q with MAC address %q", + deviceID, instId, macAddress, + ) + } + logger.Debugf("deleted device %q for container %q with MAC address %q", deviceID, instId, macAddress) + return nil } defer errors.DeferredAnnotatef(&err, "failed to release IP address %q from instance %q", addr, instId) - supportsDevices, err := environ.supportsDevices() - if err != nil { - return err - } - - logger.Infof("releasing address: %q, MAC address: %q, supports devices: %v", addr, macAddress, supportsDevices) + logger.Infof( + "releasing address: %q, MAC address: %q, hostname: %q, supports devices: %v", + addr, macAddress, hostname, environ.supportsDevices, + ) // Addresses originally allocated without a device will have macAddress // set to "". We shouldn't look for a device for these addresses. - if supportsDevices && macAddress != "" { + if environ.supportsDevices && macAddress != "" { device, err := environ.fetchFullDevice(macAddress) if err == nil { addresses, err := device["ip_addresses"].GetArray() @@ -1578,113 +1599,356 @@ return err } -// NetworkInterfaces implements Environ.NetworkInterfaces. -func (environ *maasEnviron) NetworkInterfaces(instId instance.Id) ([]network.InterfaceInfo, error) { +// subnetsFromNode fetches all the subnets for a specific node. +func (environ *maasEnviron) subnetsFromNode(nodeId string) ([]gomaasapi.JSONObject, error) { + client := environ.getMAASClient().GetSubObject("nodes").GetSubObject(nodeId) + json, err := client.CallGet("", nil) + if err != nil { + if maasErr, ok := err.(gomaasapi.ServerError); ok && maasErr.StatusCode == http.StatusNotFound { + return nil, errors.NotFoundf("intance %q", nodeId) + } + return nil, errors.Trace(err) + } + nodeMap, err := json.GetMap() + if err != nil { + return nil, errors.Trace(err) + } + interfacesArray, err := nodeMap["interface_set"].GetArray() + if err != nil { + return nil, errors.Trace(err) + } + var subnets []gomaasapi.JSONObject + for _, iface := range interfacesArray { + ifaceMap, err := iface.GetMap() + if err != nil { + return nil, errors.Trace(err) + } + linksArray, err := ifaceMap["links"].GetArray() + if err != nil { + return nil, errors.Trace(err) + } + for _, link := range linksArray { + linkMap, err := link.GetMap() + if err != nil { + return nil, errors.Trace(err) + } + subnet, ok := linkMap["subnet"] + if !ok { + return nil, errors.New("subnet not found") + } + subnets = append(subnets, subnet) + } + } + return subnets, nil +} + +// Deduce the allocatable portion of the subnet by subtracting the dynamic +// range from the full subnet range. +func (environ *maasEnviron) allocatableRangeForSubnet(cidr string, subnetId string) (net.IP, net.IP, error) { + // Initialize the low and high bounds of the allocatable range to the + // whole CIDR. Reduce the scope of this when we find the dynamic range. + ip, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return nil, nil, errors.Trace(err) + } + // Skip IPv6 subnets until we can handle them correctly. + if ip.To4() == nil && ip.To16() != nil { + logger.Debugf("ignoring static IP range for IPv6 subnet %q", cidr) + return nil, nil, nil + } + + // TODO(mfoord): needs updating to work with IPv6 as well. + lowBound, err := network.IPv4ToDecimal(ip) + if err != nil { + return nil, nil, errors.Trace(err) + } + // Don't include the zero address in the allocatable bounds. + lowBound = lowBound + 1 + ones, bits := ipnet.Mask.Size() + zeros := bits - ones + numIPs := uint32(1) << uint32(zeros) + highBound := lowBound + numIPs - 2 + + client := environ.getMAASClient().GetSubObject("subnets").GetSubObject(subnetId) + + json, err := client.CallGet("reserved_ip_ranges", nil) + if err != nil { + return nil, nil, errors.Trace(err) + } + jsonRanges, err := json.GetArray() + if err != nil { + return nil, nil, errors.Trace(err) + } + + for _, jsonRange := range jsonRanges { + rangeMap, err := jsonRange.GetMap() + if err != nil { + return nil, nil, errors.Trace(err) + } + purposeArray, err := rangeMap["purpose"].GetArray() + if err != nil { + return nil, nil, errors.Trace(err) + } + found := false + for _, jsonPurpose := range purposeArray { + purpose, err := jsonPurpose.GetString() + if err != nil { + return nil, nil, errors.Trace(err) + } + if purpose == dynamicRange { + found = true + break + } + } + if !found { + // This is not the range we're looking for + continue + } + + start, err := rangeMap["start"].GetString() + if err != nil { + return nil, nil, errors.Trace(err) + } + end, err := rangeMap["end"].GetString() + if err != nil { + return nil, nil, errors.Trace(err) + } + dynamicLow, err := network.IPv4ToDecimal(net.ParseIP(start)) + if err != nil { + return nil, nil, errors.Trace(err) + } + dynamicHigh, err := network.IPv4ToDecimal(net.ParseIP(end)) + if err != nil { + return nil, nil, errors.Trace(err) + } + + // We pick the larger of the two portions of the subnet around + // the dynamic range. Either ending one below the start of the + // dynamic range or starting one after the end. + above := highBound - dynamicHigh + below := dynamicLow - lowBound + if above > below { + lowBound = dynamicHigh + 1 + } else { + highBound = dynamicLow - 1 + } + break + } + return network.DecimalToIPv4(lowBound), network.DecimalToIPv4(highBound), nil +} + +// subnetsWithSpaces uses the MAAS 1.9+ API to fetch subnet information +// including space name. +func (environ *maasEnviron) subnetsWithSpaces(instId instance.Id, subnetIds []network.Id) ([]network.SubnetInfo, error) { + var nodeId string + if instId != instance.UnknownId { + inst, err := environ.getInstance(instId) + if err != nil { + return nil, errors.Trace(err) + } + nodeId, err = environ.nodeIdFromInstance(inst) + if err != nil { + return nil, errors.Trace(err) + } + } + subnets, err := environ.filteredSubnets(nodeId, subnetIds) + if err != nil { + return nil, errors.Trace(err) + } + if instId != instance.UnknownId { + logger.Debugf("instance %q has subnets %v", instId, subnets) + } else { + logger.Debugf("found subnets %v", subnets) + } + + return subnets, nil +} + +// subnetFromJson populates a network.SubnetInfo from a gomaasapi.JSONObject +// representing a single subnet. This can come from either the subnets api +// endpoint or the node endpoint. +func (environ *maasEnviron) subnetFromJson(subnet gomaasapi.JSONObject) (network.SubnetInfo, error) { + var subnetInfo network.SubnetInfo + fields, err := subnet.GetMap() + if err != nil { + return subnetInfo, errors.Trace(err) + } + subnetIdFloat, err := fields["id"].GetFloat64() + if err != nil { + return subnetInfo, errors.Annotatef(err, "cannot get subnet Id") + } + subnetId := strconv.Itoa(int(subnetIdFloat)) + cidr, err := fields["cidr"].GetString() + if err != nil { + return subnetInfo, errors.Errorf("cannot get cidr: %v", err) + } + spaceName, err := fields["space"].GetString() + if err != nil { + return subnetInfo, errors.Errorf("cannot get space name: %v", err) + } + vid := 0 + vidField, ok := fields["vid"] + if ok && !vidField.IsNil() { + // vid is optional, so assume it's 0 when missing or nil. + vidFloat, err := vidField.GetFloat64() + if err != nil { + return subnetInfo, errors.Errorf("cannot get vlan tag: %v", err) + } + vid = int(vidFloat) + } + allocatableLow, allocatableHigh, err := environ.allocatableRangeForSubnet(cidr, subnetId) + if err != nil { + return subnetInfo, errors.Trace(err) + } + + subnetInfo = network.SubnetInfo{ + ProviderId: network.Id(subnetId), + VLANTag: vid, + CIDR: cidr, + SpaceProviderId: network.Id(spaceName), + AllocatableIPLow: allocatableLow, + AllocatableIPHigh: allocatableHigh, + } + return subnetInfo, nil +} + +// filteredSubnets fetches subnets, filtering optionally by nodeId and/or a +// slice of subnetIds. If subnetIds is empty then all subnets for that node are +// fetched. If nodeId is empty, all subnets are returned (filtering by subnetIds +// first, if set). +func (environ *maasEnviron) filteredSubnets(nodeId string, subnetIds []network.Id) ([]network.SubnetInfo, error) { + var jsonNets []gomaasapi.JSONObject + var err error + if nodeId != "" { + jsonNets, err = environ.subnetsFromNode(nodeId) + if err != nil { + return nil, errors.Trace(err) + } + } else { + jsonNets, err = environ.fetchAllSubnets() + if err != nil { + return nil, errors.Trace(err) + } + } + subnetIdSet := make(map[string]bool) + for _, netId := range subnetIds { + subnetIdSet[string(netId)] = false + } + + subnets := []network.SubnetInfo{} + for _, jsonNet := range jsonNets { + fields, err := jsonNet.GetMap() + if err != nil { + return nil, err + } + subnetIdFloat, err := fields["id"].GetFloat64() + if err != nil { + return nil, errors.Errorf("cannot get subnet Id: %v", err) + } + subnetId := strconv.Itoa(int(subnetIdFloat)) + + // If we're filtering by subnet id check if this subnet is one + // we're looking for. + if len(subnetIds) != 0 { + _, ok := subnetIdSet[subnetId] + if !ok { + // This id is not what we're looking for. + continue + } + subnetIdSet[subnetId] = true + } + subnetInfo, err := environ.subnetFromJson(jsonNet) + if err != nil { + return nil, errors.Trace(err) + } + subnets = append(subnets, subnetInfo) + logger.Tracef("found subnet with info %#v", subnetInfo) + } + return subnets, checkNotFound(subnetIdSet) +} + +func (environ *maasEnviron) getInstance(instId instance.Id) (instance.Instance, error) { instances, err := environ.acquiredInstances([]instance.Id{instId}) if err != nil { - return nil, errors.Annotatef(err, "could not find instance %q", instId) + if maasErr, ok := err.(gomaasapi.ServerError); ok && maasErr.StatusCode == http.StatusNotFound { + return nil, errors.NotFoundf("instance %q", instId) + } + return nil, errors.Annotatef(err, "getting instance %q", instId) } if len(instances) == 0 { return nil, errors.NotFoundf("instance %q", instId) } inst := instances[0] - interfaces, _, err := environ.getInstanceNetworkInterfaces(inst) - if err != nil { - return nil, errors.Annotatef(err, "failed to get instance %q network interfaces", instId) - } - - networks, err := environ.getInstanceNetworks(inst) - if err != nil { - return nil, errors.Annotatef(err, "failed to get instance %q subnets", instId) - } - - macToNetworkMap := make(map[string]networkDetails) - for _, network := range networks { - macs, err := environ.listConnectedMacs(network) + return inst, nil +} + +// fetchAllSubnets calls the MAAS subnets API to get all subnets and returns the +// JSON response or an error. If capNetworkDeploymentUbuntu is not available, an +// error satisfying errors.IsNotSupported will be returned. +func (environ *maasEnviron) fetchAllSubnets() ([]gomaasapi.JSONObject, error) { + if !environ.supportsNetworkDeploymentUbuntu { + return nil, errors.NotSupportedf("Spaces") + } + client := environ.getMAASClient().GetSubObject("subnets") + + json, err := client.CallGet("", nil) + if err != nil { + return nil, errors.Trace(err) + } + return json.GetArray() +} + +// Spaces returns all the spaces, that have subnets, known to the provider. +// Space name is not filled in as the provider doesn't know the juju name for +// the space. +func (environ *maasEnviron) Spaces() ([]network.SpaceInfo, error) { + jsonNets, err := environ.fetchAllSubnets() + if err != nil { + return nil, errors.Trace(err) + } + spaceMap := make(map[network.Id]*network.SpaceInfo) + names := set.Strings{} + for _, jsonNet := range jsonNets { + subnetInfo, err := environ.subnetFromJson(jsonNet) if err != nil { return nil, errors.Trace(err) } - for _, mac := range macs { - macToNetworkMap[mac] = network - } - } - - result := []network.InterfaceInfo{} - for serial, iface := range interfaces { - deviceIndex := iface.DeviceIndex - interfaceName := iface.InterfaceName - disabled := iface.Disabled - - ifaceInfo := network.InterfaceInfo{ - DeviceIndex: deviceIndex, - InterfaceName: interfaceName, - Disabled: disabled, - NoAutoStart: disabled, - MACAddress: serial, - ConfigType: network.ConfigDHCP, - } - details, ok := macToNetworkMap[serial] - if ok { - ifaceInfo.VLANTag = details.VLANTag - ifaceInfo.ProviderSubnetId = network.Id(details.Name) - mask := net.IPMask(net.ParseIP(details.Mask)) - cidr := net.IPNet{net.ParseIP(details.IP), mask} - ifaceInfo.CIDR = cidr.String() - ifaceInfo.Address = network.NewAddress(cidr.IP.String()) - } else { - logger.Debugf("no subnet information for MAC address %q, instance %q", serial, instId) - } - result = append(result, ifaceInfo) - } - return result, nil -} - -// listConnectedMacs calls the MAAS list_connected_macs API to fetch all the -// the MAC addresses attached to a specific network. -func (environ *maasEnviron) listConnectedMacs(network networkDetails) ([]string, error) { - client := environ.getMAASClient().GetSubObject("networks").GetSubObject(network.Name) - json, err := client.CallGet("list_connected_macs", nil) - if err != nil { - return nil, err - } - - macs, err := json.GetArray() - if err != nil { - return nil, err - } - result := []string{} - for _, macObj := range macs { - macMap, err := macObj.GetMap() - if err != nil { - return nil, err - } - mac, err := macMap["mac_address"].GetString() - if err != nil { - return nil, err - } - - result = append(result, mac) - } - return result, nil + space, ok := spaceMap[subnetInfo.SpaceProviderId] + if !ok { + space = &network.SpaceInfo{ + ProviderId: subnetInfo.SpaceProviderId, + } + spaceMap[space.ProviderId] = space + names.Add(string(space.ProviderId)) + } + space.Subnets = append(space.Subnets, subnetInfo) + } + spaces := make([]network.SpaceInfo, len(names)) + for i, name := range names.SortedValues() { + spaces[i] = *spaceMap[network.Id(name)] + } + return spaces, nil } // Subnets returns basic information about the specified subnets known // by the provider for the specified instance. subnetIds must not be // empty. Implements NetworkingEnviron.Subnets. func (environ *maasEnviron) Subnets(instId instance.Id, subnetIds []network.Id) ([]network.SubnetInfo, error) { - // At some point in the future an empty netIds may mean "fetch all subnets" - // but until that functionality is needed it's an error. + if environ.supportsNetworkDeploymentUbuntu { + return environ.subnetsWithSpaces(instId, subnetIds) + } + // When not using MAAS API with spaces support, we require both instance ID + // and list of subnet IDs, that's due to the limitations of the old API. + if instId == instance.UnknownId { + return nil, errors.Errorf("instance ID is required") + } + inst, err := environ.getInstance(instId) + if err != nil { + return nil, errors.Trace(err) + } if len(subnetIds) == 0 { - return nil, errors.Errorf("subnetIds must not be empty") - } - instances, err := environ.acquiredInstances([]instance.Id{instId}) - if err != nil { - return nil, errors.Annotatef(err, "could not find instance %q", instId) - } - if len(instances) == 0 { - return nil, errors.NotFoundf("instance %v", instId) - } - inst := instances[0] + return nil, errors.Errorf("subnet IDs must not be empty") + } // The MAAS API get networks call returns named subnets, not physical networks, // so we save the data from this call into a variable called subnets. // http://maas.ubuntu.com/docs/api.html#networks @@ -1700,26 +1964,24 @@ } nodegroupInterfaces := environ.getNodegroupInterfaces(nodegroups) - subnetIdSet := make(map[network.Id]bool) + subnetIdSet := make(map[string]bool) for _, netId := range subnetIds { - subnetIdSet[netId] = false + subnetIdSet[string(netId)] = false } - processedIds := make(map[network.Id]bool) var networkInfo []network.SubnetInfo for _, subnet := range subnets { - _, ok := subnetIdSet[network.Id(subnet.Name)] + found, ok := subnetIdSet[subnet.Name] if !ok { // This id is not what we're looking for. continue } - if _, ok := processedIds[network.Id(subnet.Name)]; ok { + if found { // Don't add the same subnet twice. continue } // mark that we've found this subnet - processedIds[network.Id(subnet.Name)] = true - subnetIdSet[network.Id(subnet.Name)] = true + subnetIdSet[subnet.Name] = true netCIDR := &net.IPNet{ IP: net.ParseIP(subnet.IP), Mask: net.IPMask(net.ParseIP(subnet.Mask)), @@ -1752,18 +2014,20 @@ networkInfo = append(networkInfo, subnetInfo) } logger.Debugf("available subnets for instance %v: %#v", inst.Id(), networkInfo) + return networkInfo, checkNotFound(subnetIdSet) +} - notFound := []network.Id{} +func checkNotFound(subnetIdSet map[string]bool) error { + notFound := []string{} for subnetId, found := range subnetIdSet { if !found { - notFound = append(notFound, subnetId) + notFound = append(notFound, string(subnetId)) } } if len(notFound) != 0 { - return nil, errors.Errorf("failed to find the following subnets: %v", notFound) + return errors.Errorf("failed to find the following subnets: %v", strings.Join(notFound, ", ")) } - - return networkInfo, nil + return nil } // AllInstances returns all the instance.Instance in this provider. @@ -1779,13 +2043,11 @@ } func (environ *maasEnviron) Destroy() error { - if environ.ecfg().maasAgentName() == "" { - logger.Warningf("No MAAS agent name specified.\n\n" + - "The environment is either not running or from a very early Juju version.\n" + - "It is not safe to release all MAAS instances without an agent name.\n" + - "If the environment is still running, please manually decomission the MAAS machines.") - return errors.New("unsafe destruction") + if !environ.supportsDevices { + // Warn the user that container resources can leak. + logger.Warningf(noDevicesWarning) } + if err := common.Destroy(environ); err != nil { return errors.Trace(err) } @@ -1812,202 +2074,12 @@ return &providerInstance } -// networkDetails holds information about a MAAS network. -type networkDetails struct { - Name string - IP string - Mask string - VLANTag int - Description string -} - -// getInstanceNetworks returns a list of all MAAS networks for a given node. -func (environ *maasEnviron) getInstanceNetworks(inst instance.Instance) ([]networkDetails, error) { +func (environ *maasEnviron) nodeIdFromInstance(inst instance.Instance) (string, error) { maasInst := inst.(*maasInstance) maasObj := maasInst.maasObject - client := environ.getMAASClient().GetSubObject("networks") nodeId, err := maasObj.GetField("system_id") if err != nil { - return nil, err - } - params := url.Values{"node": {nodeId}} - json, err := client.CallGet("", params) - if err != nil { - return nil, err - } - jsonNets, err := json.GetArray() - if err != nil { - return nil, err - } - - networks := make([]networkDetails, len(jsonNets)) - for i, jsonNet := range jsonNets { - fields, err := jsonNet.GetMap() - if err != nil { - return nil, err - } - name, err := fields["name"].GetString() - if err != nil { - return nil, fmt.Errorf("cannot get name: %v", err) - } - ip, err := fields["ip"].GetString() - if err != nil { - return nil, fmt.Errorf("cannot get ip: %v", err) - } - netmask, err := fields["netmask"].GetString() - if err != nil { - return nil, fmt.Errorf("cannot get netmask: %v", err) - } - vlanTag := 0 - vlanTagField, ok := fields["vlan_tag"] - if ok && !vlanTagField.IsNil() { - // vlan_tag is optional, so assume it's 0 when missing or nil. - vlanTagFloat, err := vlanTagField.GetFloat64() - if err != nil { - return nil, fmt.Errorf("cannot get vlan_tag: %v", err) - } - vlanTag = int(vlanTagFloat) - } - description, err := fields["description"].GetString() - if err != nil { - return nil, fmt.Errorf("cannot get description: %v", err) - } - - networks[i] = networkDetails{ - Name: name, - IP: ip, - Mask: netmask, - VLANTag: vlanTag, - Description: description, - } - } - return networks, nil -} - -// getNetworkMACs returns all MAC addresses connected to the given -// network. -func (environ *maasEnviron) getNetworkMACs(networkName string) ([]string, error) { - client := environ.getMAASClient().GetSubObject("networks").GetSubObject(networkName) - json, err := client.CallGet("list_connected_macs", nil) - if err != nil { - return nil, err - } - jsonMACs, err := json.GetArray() - if err != nil { - return nil, err - } - - macs := make([]string, len(jsonMACs)) - for i, jsonMAC := range jsonMACs { - fields, err := jsonMAC.GetMap() - if err != nil { - return nil, err - } - macAddress, err := fields["mac_address"].GetString() - if err != nil { - return nil, fmt.Errorf("cannot get mac_address: %v", err) - } - macs[i] = macAddress - } - return macs, nil -} - -// getInstanceNetworkInterfaces returns a map of interface MAC address -// to ifaceInfo for each network interface of the given instance, as -// discovered during the commissioning phase. In addition, it also -// returns the interface name discovered as primary. -func (environ *maasEnviron) getInstanceNetworkInterfaces(inst instance.Instance) (map[string]ifaceInfo, string, error) { - maasInst := inst.(*maasInstance) - maasObj := maasInst.maasObject - result, err := maasObj.CallGet("details", nil) - if err != nil { - return nil, "", errors.Trace(err) - } - // Get the node's lldp / lshw details discovered at commissioning. - data, err := result.GetBytes() - if err != nil { - return nil, "", errors.Trace(err) - } - var parsed map[string]interface{} - if err := bson.Unmarshal(data, &parsed); err != nil { - return nil, "", errors.Trace(err) - } - lshwData, ok := parsed["lshw"] - if !ok { - return nil, "", errors.Errorf("no hardware information available for node %q", inst.Id()) - } - lshwXML, ok := lshwData.([]byte) - if !ok { - return nil, "", errors.Errorf("invalid hardware information for node %q", inst.Id()) - } - // Now we have the lshw XML data, parse it to extract and return NICs. - return extractInterfaces(inst, lshwXML) -} - -type ifaceInfo struct { - DeviceIndex int - InterfaceName string - Disabled bool -} - -// extractInterfaces parses the XML output of lswh and extracts all -// network interfaces, returing a map MAC address to ifaceInfo, as -// well as the interface name discovered as primary. -func extractInterfaces(inst instance.Instance, lshwXML []byte) (map[string]ifaceInfo, string, error) { - type Node struct { - Id string `xml:"id,attr"` - Disabled bool `xml:"disabled,attr,omitempty"` - Description string `xml:"description"` - Serial string `xml:"serial"` - LogicalName string `xml:"logicalname"` - Children []Node `xml:"node"` - } - type List struct { - Nodes []Node `xml:"node"` - } - var lshw List - if err := xml.Unmarshal(lshwXML, &lshw); err != nil { - return nil, "", errors.Annotatef(err, "cannot parse lshw XML details for node %q", inst.Id()) - } - primaryIface := "" - interfaces := make(map[string]ifaceInfo) - var processNodes func(nodes []Node) error - var baseIndex int - processNodes = func(nodes []Node) error { - for _, node := range nodes { - if strings.HasPrefix(node.Id, "network") { - index := baseIndex - if strings.HasPrefix(node.Id, "network:") { - // There is an index suffix, parse it. - var err error - index, err = strconv.Atoi(strings.TrimPrefix(node.Id, "network:")) - if err != nil { - return errors.Annotatef(err, "lshw output for node %q has invalid ID suffix for %q", inst.Id(), node.Id) - } - } else { - baseIndex++ - } - - if primaryIface == "" && !node.Disabled { - primaryIface = node.LogicalName - logger.Debugf("node %q primary network interface is %q", inst.Id(), primaryIface) - } - interfaces[node.Serial] = ifaceInfo{ - DeviceIndex: index, - InterfaceName: node.LogicalName, - Disabled: node.Disabled, - } - if node.Disabled { - logger.Debugf("node %q skipping disabled network interface %q", inst.Id(), node.LogicalName) - } - - } - if err := processNodes(node.Children); err != nil { - return err - } - } - return nil - } - err := processNodes(lshw.Nodes) - return interfaces, primaryIface, err + return "", err + } + return nodeId, err } === modified file 'src/github.com/juju/juju/provider/maas/environ_test.go' --- src/github.com/juju/juju/provider/maas/environ_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/provider/maas/environ_test.go 2016-03-22 15:18:22 +0000 @@ -6,9 +6,9 @@ import ( stdtesting "testing" + "github.com/juju/gomaasapi" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/gomaasapi" "github.com/juju/juju/environs/config" envtesting "github.com/juju/juju/environs/testing" @@ -117,10 +117,10 @@ // bug #1256179 is that when using an older version of Juju (<1.16.2) // we didn't include maas-agent-name in the database, so it was 'nil' // in the OldConfig. However, when setting an environment, we would set - // it to "" (because maasEnvironConfig.Validate ensures it is a 'valid' + // it to "" (because maasModelConfig.Validate ensures it is a 'valid' // string). We can't create that from NewEnviron or newConfig because - // both of them Validate the contents. 'cmd/juju/environment - // SetEnvironmentCommand' instead uses conn.State.EnvironConfig() which + // both of them Validate the contents. 'cmd/juju/model + // SetEnvironmentCommand' instead uses conn.State.ModelConfig() which // just reads the content of the database into a map, so we just create // the map ourselves. @@ -147,16 +147,6 @@ c.Check(err, gc.ErrorMatches, ".*cannot change maas-agent-name.*") } -func (*environSuite) TestDestroyWithEmptyAgentName(c *gc.C) { - // Related bug #1256179, comment as above. - baseCfg := getSimpleTestConfig(c, coretesting.Attrs{"maas-agent-name": ""}) - env, err := maas.NewEnviron(baseCfg) - c.Assert(err, jc.ErrorIsNil) - - err = env.Destroy() - c.Assert(err, gc.ErrorMatches, "unsafe destruction") -} - func (*environSuite) TestSetConfigAllowsChangingNilAgentNameToEmptyString(c *gc.C) { oldCfg := getSimpleTestConfig(c, nil) newCfgTwo := getSimpleTestConfig(c, coretesting.Attrs{"maas-agent-name": ""}) @@ -215,7 +205,7 @@ cfg := getSimpleTestConfig(c, nil) env, err := maas.NewEnviron(cfg) c.Assert(err, jc.ErrorIsNil) - cloudcfg, err := maas.NewCloudinitConfig(env, "testing.invalid", "eth0", "quantal") + cloudcfg, err := maas.NewCloudinitConfig(env, "testing.invalid", "quantal") c.Assert(err, jc.ErrorIsNil) c.Assert(cloudcfg.SystemUpdate(), jc.IsTrue) c.Assert(cloudcfg.RunCmds(), jc.DeepEquals, expectedCloudinitConfig) @@ -226,7 +216,7 @@ env, err := maas.NewEnviron(cfg) c.Assert(err, jc.ErrorIsNil) testCase := func(expectedConfig []string) { - cloudcfg, err := maas.NewCloudinitConfig(env, "testing.invalid", "eth0", "quantal") + cloudcfg, err := maas.NewCloudinitConfig(env, "testing.invalid", "quantal") c.Assert(err, jc.ErrorIsNil) c.Assert(cloudcfg.SystemUpdate(), jc.IsTrue) c.Assert(cloudcfg.RunCmds(), jc.DeepEquals, expectedConfig) @@ -249,7 +239,7 @@ cfg := getSimpleTestConfig(c, attrs) env, err := maas.NewEnviron(cfg) c.Assert(err, jc.ErrorIsNil) - cloudcfg, err := maas.NewCloudinitConfig(env, "testing.invalid", "eth0", "quantal") + cloudcfg, err := maas.NewCloudinitConfig(env, "testing.invalid", "quantal") c.Assert(err, jc.ErrorIsNil) c.Assert(cloudcfg.SystemUpdate(), jc.IsTrue) c.Assert(cloudcfg.RunCmds(), jc.DeepEquals, expectedCloudinitConfig) === modified file 'src/github.com/juju/juju/provider/maas/environ_whitebox_test.go' --- src/github.com/juju/juju/provider/maas/environ_whitebox_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/maas/environ_whitebox_test.go 2016-03-22 15:18:22 +0000 @@ -12,29 +12,29 @@ "net" "net/http" "net/url" + "regexp" "strings" - "text/template" "github.com/juju/errors" + "github.com/juju/gomaasapi" "github.com/juju/names" jc "github.com/juju/testing/checkers" "github.com/juju/utils" - "github.com/juju/utils/set" + "github.com/juju/utils/arch" gc "gopkg.in/check.v1" - goyaml "gopkg.in/yaml.v1" - "launchpad.net/gomaasapi" + goyaml "gopkg.in/yaml.v2" "github.com/juju/juju/cloudconfig/cloudinit" "github.com/juju/juju/constraints" "github.com/juju/juju/environs" "github.com/juju/juju/environs/bootstrap" "github.com/juju/juju/environs/config" - "github.com/juju/juju/environs/simplestreams" + sstesting "github.com/juju/juju/environs/simplestreams/testing" envstorage "github.com/juju/juju/environs/storage" envtesting "github.com/juju/juju/environs/testing" envtools "github.com/juju/juju/environs/tools" "github.com/juju/juju/instance" - "github.com/juju/juju/juju/arch" + "github.com/juju/juju/juju" "github.com/juju/juju/juju/testing" "github.com/juju/juju/network" "github.com/juju/juju/provider/common" @@ -69,6 +69,7 @@ } func (suite *environSuite) setupFakeTools(c *gc.C) { + suite.PatchValue(&juju.JujuPublicKey, sstesting.SignedMetadataPublicKey) storageDir := c.MkDir() suite.PatchValue(&envtools.DefaultBaseURL, "file://"+storageDir+"/tools") suite.UploadFakeToolsToDirectory(c, storageDir, "released", "released") @@ -157,43 +158,6 @@ return utils.Gunzip(data) } -const lshwXMLTemplate = ` - - - - - Computer - VirtualBox () - 64 - - Motherboard - - Host bridge{{$list := .}}{{range $mac, $ifi := $list}} - - Ethernet interface - 82540EM Gigabit Ethernet Controller - {{$ifi.InterfaceName}} - {{$mac}} - {{end}} - - - - -` - -func (suite *environSuite) generateHWTemplate(netMacs map[string]ifaceInfo) (string, error) { - tmpl, err := template.New("test").Parse(lshwXMLTemplate) - if err != nil { - return "", err - } - var buf bytes.Buffer - err = tmpl.Execute(&buf, netMacs) - if err != nil { - return "", err - } - return string(buf.Bytes()), nil -} - func (suite *environSuite) TestStartInstanceStartsInstance(c *gc.C) { suite.setupFakeTools(c) env := suite.makeEnviron() @@ -214,8 +178,8 @@ c.Check(actions, gc.DeepEquals, []string{"acquire", "start"}) // Test the instance id is correctly recorded for the bootstrap node. - // Check that StateServerInstances returns the id of the bootstrap machine. - instanceIds, err := env.StateServerInstances() + // Check that ControllerInstances returns the id of the bootstrap machine. + instanceIds, err := env.ControllerInstances() c.Assert(err, jc.ErrorIsNil) c.Assert(instanceIds, gc.HasLen, 1) insts, err := env.AllInstances() @@ -268,248 +232,6 @@ c.Check(err, jc.Satisfies, errors.IsNotFound) } -func uint64p(val uint64) *uint64 { - return &val -} - -func stringp(val string) *string { - return &val -} - -func (suite *environSuite) TestSelectNodeValidZone(c *gc.C) { - env := suite.makeEnviron() - suite.testMAASObject.TestServer.NewNode(`{"system_id": "node0", "hostname": "host0", "zone": "bar"}`) - - snArgs := selectNodeArgs{ - AvailabilityZones: []string{"foo", "bar"}, - Constraints: constraints.Value{}, - IncludeNetworks: nil, - ExcludeNetworks: nil, - } - - node, err := env.selectNode(snArgs) - c.Assert(err, jc.ErrorIsNil) - c.Assert(node, gc.NotNil) -} - -func (suite *environSuite) TestSelectNodeInvalidZone(c *gc.C) { - env := suite.makeEnviron() - - snArgs := selectNodeArgs{ - AvailabilityZones: []string{"foo", "bar"}, - Constraints: constraints.Value{}, - IncludeNetworks: nil, - ExcludeNetworks: nil, - } - - _, err := env.selectNode(snArgs) - c.Assert(fmt.Sprintf("%s", err), gc.Equals, "cannot run instances: gomaasapi: got error back from server: 409 Conflict ()") -} - -func (suite *environSuite) TestAcquireNode(c *gc.C) { - env := suite.makeEnviron() - suite.testMAASObject.TestServer.NewNode(`{"system_id": "node0", "hostname": "host0"}`) - - _, err := env.acquireNode("", "", constraints.Value{}, nil, nil, nil) - - c.Check(err, jc.ErrorIsNil) - operations := suite.testMAASObject.TestServer.NodeOperations() - actions, found := operations["node0"] - c.Assert(found, jc.IsTrue) - c.Check(actions, gc.DeepEquals, []string{"acquire"}) - - // no "name" parameter should have been passed through - values := suite.testMAASObject.TestServer.NodeOperationRequestValues()["node0"][0] - _, found = values["name"] - c.Assert(found, jc.IsFalse) -} - -func (suite *environSuite) TestAcquireNodeByName(c *gc.C) { - env := suite.makeEnviron() - suite.testMAASObject.TestServer.NewNode(`{"system_id": "node0", "hostname": "host0"}`) - - _, err := env.acquireNode("host0", "", constraints.Value{}, nil, nil, nil) - - c.Check(err, jc.ErrorIsNil) - operations := suite.testMAASObject.TestServer.NodeOperations() - actions, found := operations["node0"] - c.Assert(found, jc.IsTrue) - c.Check(actions, gc.DeepEquals, []string{"acquire"}) - - // no "name" parameter should have been passed through - values := suite.testMAASObject.TestServer.NodeOperationRequestValues()["node0"][0] - nodeName := values.Get("name") - c.Assert(nodeName, gc.Equals, "host0") -} - -func (suite *environSuite) TestAcquireNodeTakesConstraintsIntoAccount(c *gc.C) { - env := suite.makeEnviron() - suite.testMAASObject.TestServer.NewNode( - `{"system_id": "node0", "hostname": "host0", "architecture": "arm/generic", "memory": 2048}`, - ) - constraints := constraints.Value{Arch: stringp("arm"), Mem: uint64p(1024)} - - _, err := env.acquireNode("", "", constraints, nil, nil, nil) - - c.Check(err, jc.ErrorIsNil) - requestValues := suite.testMAASObject.TestServer.NodeOperationRequestValues() - nodeRequestValues, found := requestValues["node0"] - c.Assert(found, jc.IsTrue) - c.Assert(nodeRequestValues[0].Get("arch"), gc.Equals, "arm") - c.Assert(nodeRequestValues[0].Get("mem"), gc.Equals, "1024") -} - -func (suite *environSuite) TestParseTags(c *gc.C) { - tests := []struct { - about string - input []string - tags, notTags []string - }{{ - about: "nil input", - input: nil, - tags: []string{}, - notTags: []string{}, - }, { - about: "empty input", - input: []string{}, - tags: []string{}, - notTags: []string{}, - }, { - about: "tag list with embedded spaces", - input: []string{" tag1 ", " tag2", " ^ not Tag 3 ", " ", " ", "", "", " ^notTag4 "}, - tags: []string{"tag1", "tag2"}, - notTags: []string{"notTag3", "notTag4"}, - }, { - about: "only positive tags", - input: []string{"tag1", "tag2", "tag3"}, - tags: []string{"tag1", "tag2", "tag3"}, - notTags: []string{}, - }, { - about: "only negative tags", - input: []string{"^tag1", "^tag2", "^tag3"}, - tags: []string{}, - notTags: []string{"tag1", "tag2", "tag3"}, - }, { - about: "both positive and negative tags", - input: []string{"^tag1", "tag2", "^tag3", "tag4"}, - tags: []string{"tag2", "tag4"}, - notTags: []string{"tag1", "tag3"}, - }, { - about: "single positive tag", - input: []string{"tag1"}, - tags: []string{"tag1"}, - notTags: []string{}, - }, { - about: "single negative tag", - input: []string{"^tag1"}, - tags: []string{}, - notTags: []string{"tag1"}, - }} - for i, test := range tests { - c.Logf("test %d: %s", i, test.about) - tags, notTags := parseTags(test.input) - c.Check(tags, jc.DeepEquals, test.tags) - c.Check(notTags, jc.DeepEquals, test.notTags) - } -} - -func (suite *environSuite) TestAcquireNodePassedAgentName(c *gc.C) { - env := suite.makeEnviron() - suite.testMAASObject.TestServer.NewNode(`{"system_id": "node0", "hostname": "host0"}`) - - _, err := env.acquireNode("", "", constraints.Value{}, nil, nil, nil) - - c.Check(err, jc.ErrorIsNil) - requestValues := suite.testMAASObject.TestServer.NodeOperationRequestValues() - nodeRequestValues, found := requestValues["node0"] - c.Assert(found, jc.IsTrue) - c.Assert(nodeRequestValues[0].Get("agent_name"), gc.Equals, exampleAgentName) -} - -func (suite *environSuite) TestAcquireNodePassesPositiveAndNegativeTags(c *gc.C) { - env := suite.makeEnviron() - suite.testMAASObject.TestServer.NewNode(`{"system_id": "node0"}`) - - _, err := env.acquireNode( - "", "", - constraints.Value{Tags: &[]string{"tag1", "^tag2", "tag3", "^tag4"}}, - nil, nil, nil, - ) - - c.Check(err, jc.ErrorIsNil) - requestValues := suite.testMAASObject.TestServer.NodeOperationRequestValues() - nodeValues, found := requestValues["node0"] - c.Assert(found, jc.IsTrue) - c.Assert(nodeValues[0].Get("tags"), gc.Equals, "tag1,tag3") - c.Assert(nodeValues[0].Get("not_tags"), gc.Equals, "tag2,tag4") -} - -func (suite *environSuite) TestAcquireNodeStorage(c *gc.C) { - for i, test := range []struct { - volumes []volumeInfo - expected string - }{ - { - nil, - "", - }, - { - []volumeInfo{{"volume-1", 1234, nil}}, - "volume-1:1234", - }, - { - []volumeInfo{{"", 1234, []string{"tag1", "tag2"}}}, - "1234(tag1,tag2)", - }, - { - []volumeInfo{{"volume-1", 1234, []string{"tag1", "tag2"}}}, - "volume-1:1234(tag1,tag2)", - }, - { - []volumeInfo{ - {"volume-1", 1234, []string{"tag1", "tag2"}}, - {"volume-2", 4567, []string{"tag1", "tag3"}}, - }, - "volume-1:1234(tag1,tag2),volume-2:4567(tag1,tag3)", - }, - } { - c.Logf("test %d", i) - env := suite.makeEnviron() - suite.testMAASObject.TestServer.NewNode(`{"system_id": "node0", "hostname": "host0"}`) - _, err := env.acquireNode("", "", constraints.Value{}, nil, nil, test.volumes) - c.Check(err, jc.ErrorIsNil) - requestValues := suite.testMAASObject.TestServer.NodeOperationRequestValues() - nodeRequestValues, found := requestValues["node0"] - c.Check(found, jc.IsTrue) - c.Check(nodeRequestValues[0].Get("storage"), gc.Equals, test.expected) - suite.testMAASObject.TestServer.Clear() - } -} - -var testValues = []struct { - constraints constraints.Value - expectedResult url.Values -}{ - {constraints.Value{Arch: stringp("arm")}, url.Values{"arch": {"arm"}}}, - {constraints.Value{CpuCores: uint64p(4)}, url.Values{"cpu_count": {"4"}}}, - {constraints.Value{Mem: uint64p(1024)}, url.Values{"mem": {"1024"}}}, - {constraints.Value{Tags: &[]string{"tag1", "tag2", "^tag3", "^tag4"}}, url.Values{"tags": {"tag1,tag2"}, "not_tags": {"tag3,tag4"}}}, - - // CpuPower is ignored. - {constraints.Value{CpuPower: uint64p(1024)}, url.Values{}}, - - // RootDisk is ignored. - {constraints.Value{RootDisk: uint64p(8192)}, url.Values{}}, - {constraints.Value{Tags: &[]string{"foo", "bar"}}, url.Values{"tags": {"foo,bar"}}}, - {constraints.Value{Arch: stringp("arm"), CpuCores: uint64p(4), Mem: uint64p(1024), CpuPower: uint64p(1024), RootDisk: uint64p(8192), Tags: &[]string{"foo", "bar"}}, url.Values{"arch": {"arm"}, "cpu_count": {"4"}, "mem": {"1024"}, "tags": {"foo,bar"}}}, -} - -func (*environSuite) TestConvertConstraints(c *gc.C) { - for _, test := range testValues { - c.Check(convertConstraints(test.constraints), gc.DeepEquals, test.expectedResult) - } -} - var testNetworkValues = []struct { includeNetworks []string excludeNetworks []string @@ -540,30 +262,41 @@ }, } -func (*environSuite) TestConvertNetworks(c *gc.C) { - for _, test := range testNetworkValues { - var vals = url.Values{} - addNetworks(vals, test.includeNetworks, test.excludeNetworks) - c.Check(vals, gc.DeepEquals, test.expectedResult) - } -} - func (suite *environSuite) getInstance(systemId string) *maasInstance { input := fmt.Sprintf(`{"system_id": %q}`, systemId) node := suite.testMAASObject.TestServer.NewNode(input) - return &maasInstance{maasObject: &node, environ: suite.makeEnviron()} + return &maasInstance{&node} } -func (suite *environSuite) getNetwork(name string, id int, vlanTag int) *gomaasapi.MAASObject { +func (suite *environSuite) newNetwork(name string, id int, vlanTag int, defaultGateway string) *gomaasapi.MAASObject { var vlan string if vlanTag == 0 { vlan = "null" } else { vlan = fmt.Sprintf("%d", vlanTag) } - var input string - input = fmt.Sprintf(`{"name": %q, "ip":"192.168.%d.1", "netmask": "255.255.255.0",`+ - `"vlan_tag": %s, "description": "%s_%d_%d" }`, name, id, vlan, name, id, vlanTag) + + if defaultGateway != "null" { + // since we use %s below only "null" (if passed) should remain unquoted. + defaultGateway = fmt.Sprintf("%q", defaultGateway) + } + + // TODO(dimitern): Use JSON tags on structs, JSON encoder, or at least + // text/template below and in similar cases. + input := fmt.Sprintf(`{ + "name": %q, + "ip":"192.168.%d.2", + "netmask": "255.255.255.0", + "vlan_tag": %s, + "description": "%s_%d_%d", + "default_gateway": %s + }`, + name, + id, + vlan, + name, id, vlanTag, + defaultGateway, + ) network := suite.testMAASObject.TestServer.NewNetwork(input) return &network } @@ -643,9 +376,9 @@ c.Assert(errors.Cause(err), gc.Equals, environs.ErrNoInstances) } -func (suite *environSuite) TestStateServerInstances(c *gc.C) { +func (suite *environSuite) TestControllerInstances(c *gc.C) { env := suite.makeEnviron() - _, err := env.StateServerInstances() + _, err := env.ControllerInstances() c.Assert(err, gc.Equals, environs.ErrNotBootstrapped) tests := [][]instance.Id{{}, {"inst-0"}, {"inst-0", "inst-1"}} @@ -654,15 +387,15 @@ StateInstances: expected, }) c.Assert(err, jc.ErrorIsNil) - stateServerInstances, err := env.StateServerInstances() + controllerInstances, err := env.ControllerInstances() c.Assert(err, jc.ErrorIsNil) - c.Assert(stateServerInstances, jc.SameContents, expected) + c.Assert(controllerInstances, jc.SameContents, expected) } } -func (suite *environSuite) TestStateServerInstancesFailsIfNoStateInstances(c *gc.C) { +func (suite *environSuite) TestControllerInstancesFailsIfNoStateInstances(c *gc.C) { env := suite.makeEnviron() - _, err := env.StateServerInstances() + _, err := env.ControllerInstances() c.Check(err, gc.Equals, environs.ErrNotBootstrapped) } @@ -737,13 +470,13 @@ env := suite.makeEnviron() // Disable auto-uploading by setting the agent version. cfg, err := env.Config().Apply(map[string]interface{}{ - "agent-version": version.Current.Number.String(), + "agent-version": version.Current.String(), }) c.Assert(err, jc.ErrorIsNil) err = env.SetConfig(cfg) c.Assert(err, jc.ErrorIsNil) err = bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) - c.Check(err, gc.ErrorMatches, "Juju cannot bootstrap because no tools are available for your environment(.|\n)*") + c.Check(err, gc.ErrorMatches, "Juju cannot bootstrap because no tools are available for your model(.|\n)*") } func (suite *environSuite) TestBootstrapFailsIfNoNodes(c *gc.C) { @@ -755,15 +488,6 @@ c.Check(err, gc.ErrorMatches, ".*409.*") } -func assertSourceContents(c *gc.C, source simplestreams.DataSource, filename string, content []byte) { - rc, _, err := source.Fetch(filename) - c.Assert(err, jc.ErrorIsNil) - defer rc.Close() - retrieved, err := ioutil.ReadAll(rc) - c.Assert(err, jc.ErrorIsNil) - c.Assert(retrieved, gc.DeepEquals, content) -} - func (suite *environSuite) TestGetToolsMetadataSources(c *gc.C) { env := suite.makeEnviron() // Add a dummy file to storage so we can use that to check the @@ -821,8 +545,10 @@ c.Assert(err, gc.ErrorMatches, "invalid constraint value: arch=ppc64el\nvalid values are: \\[amd64 armhf\\]") } -func (suite *environSuite) TestGetNetworkMACs(c *gc.C) { +func (suite *environSuite) TestSupportsNetworking(c *gc.C) { env := suite.makeEnviron() + _, supported := environs.SupportsNetworking(env) + c.Assert(supported, jc.IsTrue) suite.testMAASObject.TestServer.NewNode(`{"system_id": "node_1"}`) suite.testMAASObject.TestServer.NewNode(`{"system_id": "node_2"}`) @@ -850,256 +576,6 @@ c.Assert(err, jc.ErrorIsNil) } -func (suite *environSuite) TestGetInstanceNetworks(c *gc.C) { - suite.getNetwork("test_network", 123, 321) - testInstance := suite.getInstance("instance_for_network") - suite.testMAASObject.TestServer.ConnectNodeToNetwork("instance_for_network", "test_network") - networks, err := suite.makeEnviron().getInstanceNetworks(testInstance) - c.Assert(err, jc.ErrorIsNil) - c.Check(networks, gc.DeepEquals, []networkDetails{ - {Name: "test_network", IP: "192.168.123.1", Mask: "255.255.255.0", VLANTag: 321, - Description: "test_network_123_321"}, - }) -} - -// A typical lshw XML dump with lots of things left out. -const lshwXMLTestExtractInterfaces = ` - - - - - Notebook - MyMachine - 1.0 - 64 - - Motherboard - - CPU - - - wlan0 - aa:bb:cc:dd:ee:ff - - - eth0 - aa:bb:cc:dd:ee:f1 - - - - - - vnet1 - aa:bb:cc:dd:ee:f2 - - - -` - -// An lshw XML dump with implicit network interface indexes. -const lshwXMLTestExtractInterfacesImplicitIndexes = ` - - - - - Notebook - MyMachine - 1.0 - 64 - - Motherboard - - CPU - - - wlan0 - aa:bb:cc:dd:ee:ff - - - eth0 - aa:bb:cc:dd:ee:f1 - - - - - - vnet1 - aa:bb:cc:dd:ee:f2 - - - -` - -func (suite *environSuite) TestExtractInterfaces(c *gc.C) { - rawData := []string{ - lshwXMLTestExtractInterfaces, - lshwXMLTestExtractInterfacesImplicitIndexes, - } - for _, data := range rawData { - inst := suite.getInstance("testInstance") - interfaces, primaryIface, err := extractInterfaces(inst, []byte(data)) - c.Assert(err, jc.ErrorIsNil) - c.Check(primaryIface, gc.Equals, "eth0") - c.Check(interfaces, jc.DeepEquals, map[string]ifaceInfo{ - "aa:bb:cc:dd:ee:ff": {0, "wlan0", true}, - "aa:bb:cc:dd:ee:f1": {1, "eth0", false}, - "aa:bb:cc:dd:ee:f2": {2, "vnet1", false}, - }) - } -} - -func (suite *environSuite) TestGetInstanceNetworkInterfaces(c *gc.C) { - inst := suite.getInstance("testInstance") - templateInterfaces := map[string]ifaceInfo{ - "aa:bb:cc:dd:ee:ff": {0, "wlan0", true}, - "aa:bb:cc:dd:ee:f1": {1, "eth0", true}, - "aa:bb:cc:dd:ee:f2": {2, "vnet1", false}, - } - lshwXML, err := suite.generateHWTemplate(templateInterfaces) - c.Assert(err, jc.ErrorIsNil) - - suite.testMAASObject.TestServer.AddNodeDetails("testInstance", lshwXML) - interfaces, primaryIface, err := inst.environ.getInstanceNetworkInterfaces(inst) - c.Assert(err, jc.ErrorIsNil) - // Both wlan0 and eth0 are disabled in lshw output. - c.Check(primaryIface, gc.Equals, "vnet1") - c.Check(interfaces, jc.DeepEquals, templateInterfaces) -} - -func (suite *environSuite) TestSetupNetworks(c *gc.C) { - testInstance := suite.getInstance("node1") - templateInterfaces := map[string]ifaceInfo{ - "aa:bb:cc:dd:ee:ff": {0, "wlan0", true}, - "aa:bb:cc:dd:ee:f1": {1, "eth0", true}, - "aa:bb:cc:dd:ee:f2": {2, "vnet1", false}, - } - lshwXML, err := suite.generateHWTemplate(templateInterfaces) - c.Assert(err, jc.ErrorIsNil) - - suite.testMAASObject.TestServer.AddNodeDetails("node1", lshwXML) - suite.getNetwork("LAN", 2, 42) - suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "LAN", "aa:bb:cc:dd:ee:f1") - suite.getNetwork("Virt", 3, 0) - suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "Virt", "aa:bb:cc:dd:ee:f2") - suite.getNetwork("WLAN", 1, 0) - suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "WLAN", "aa:bb:cc:dd:ee:ff") - networkInfo, primaryIface, err := suite.makeEnviron().setupNetworks( - testInstance, - set.NewStrings("WLAN"), // Disable WLAN only. - ) - c.Assert(err, jc.ErrorIsNil) - - // Note: order of networks is based on lshwXML - c.Check(primaryIface, gc.Equals, "vnet1") - // Unfortunately, because network.InterfaceInfo is unhashable - // (contains a map) we can't use jc.SameContents here. - c.Check(networkInfo, gc.HasLen, 3) - for _, info := range networkInfo { - switch info.DeviceIndex { - case 0: - c.Check(info, jc.DeepEquals, network.InterfaceInfo{ - MACAddress: "aa:bb:cc:dd:ee:ff", - CIDR: "192.168.1.1/24", - NetworkName: "WLAN", - ProviderId: "WLAN", - VLANTag: 0, - DeviceIndex: 0, - InterfaceName: "wlan0", - Disabled: true, // from networksToDisable("WLAN") - }) - case 1: - c.Check(info, jc.DeepEquals, network.InterfaceInfo{ - DeviceIndex: 1, - MACAddress: "aa:bb:cc:dd:ee:f1", - CIDR: "192.168.2.1/24", - NetworkName: "LAN", - ProviderId: "LAN", - VLANTag: 42, - InterfaceName: "eth0", - Disabled: true, // from networksToDisable("WLAN") - }) - case 2: - c.Check(info, jc.DeepEquals, network.InterfaceInfo{ - MACAddress: "aa:bb:cc:dd:ee:f2", - CIDR: "192.168.3.1/24", - NetworkName: "Virt", - ProviderId: "Virt", - VLANTag: 0, - DeviceIndex: 2, - InterfaceName: "vnet1", - Disabled: false, - }) - } - } -} - -// The same test, but now "Virt" network does not have matched MAC address -func (suite *environSuite) TestSetupNetworksPartialMatch(c *gc.C) { - testInstance := suite.getInstance("node1") - templateInterfaces := map[string]ifaceInfo{ - "aa:bb:cc:dd:ee:ff": {0, "wlan0", true}, - "aa:bb:cc:dd:ee:f1": {1, "eth0", false}, - "aa:bb:cc:dd:ee:f2": {2, "vnet1", false}, - } - lshwXML, err := suite.generateHWTemplate(templateInterfaces) - c.Assert(err, jc.ErrorIsNil) - - suite.testMAASObject.TestServer.AddNodeDetails("node1", lshwXML) - suite.getNetwork("LAN", 2, 42) - suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "LAN", "aa:bb:cc:dd:ee:f1") - suite.getNetwork("Virt", 3, 0) - suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "Virt", "aa:bb:cc:dd:ee:f3") - networkInfo, primaryIface, err := suite.makeEnviron().setupNetworks( - testInstance, - set.NewStrings(), // All enabled. - ) - c.Assert(err, jc.ErrorIsNil) - - // Note: order of networks is based on lshwXML - c.Check(primaryIface, gc.Equals, "eth0") - c.Check(networkInfo, jc.DeepEquals, []network.InterfaceInfo{{ - MACAddress: "aa:bb:cc:dd:ee:f1", - CIDR: "192.168.2.1/24", - NetworkName: "LAN", - ProviderId: "LAN", - VLANTag: 42, - DeviceIndex: 1, - InterfaceName: "eth0", - Disabled: false, - }}) -} - -// The same test, but now no networks have matched MAC -func (suite *environSuite) TestSetupNetworksNoMatch(c *gc.C) { - testInstance := suite.getInstance("node1") - templateInterfaces := map[string]ifaceInfo{ - "aa:bb:cc:dd:ee:ff": {0, "wlan0", true}, - "aa:bb:cc:dd:ee:f1": {1, "eth0", false}, - "aa:bb:cc:dd:ee:f2": {2, "vnet1", false}, - } - lshwXML, err := suite.generateHWTemplate(templateInterfaces) - c.Assert(err, jc.ErrorIsNil) - - suite.testMAASObject.TestServer.AddNodeDetails("node1", lshwXML) - suite.getNetwork("Virt", 3, 0) - suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "Virt", "aa:bb:cc:dd:ee:f3") - networkInfo, primaryIface, err := suite.makeEnviron().setupNetworks( - testInstance, - set.NewStrings(), // All enabled. - ) - c.Assert(err, jc.ErrorIsNil) - - // Note: order of networks is based on lshwXML - c.Check(primaryIface, gc.Equals, "eth0") - c.Check(networkInfo, gc.HasLen, 0) -} - -func (suite *environSuite) TestSupportsNetworking(c *gc.C) { - env := suite.makeEnviron() - _, supported := environs.SupportsNetworking(env) - c.Assert(supported, jc.IsTrue) -} - func (suite *environSuite) TestSupportsAddressAllocation(c *gc.C) { env := suite.makeEnviron() supported, err := env.SupportsAddressAllocation("") @@ -1107,8 +583,39 @@ c.Assert(supported, jc.IsTrue) } +func (suite *environSuite) TestSupportsSpacesDefaultFalse(c *gc.C) { + env := suite.makeEnviron() + supported, err := env.SupportsSpaces() + c.Assert(err, jc.ErrorIsNil) + c.Assert(supported, jc.IsFalse) +} + +func (suite *environSuite) TestSupportsSpaceDiscoveryDefaultFalse(c *gc.C) { + env := suite.makeEnviron() + supported, err := env.SupportsSpaceDiscovery() + c.Assert(err, jc.ErrorIsNil) + c.Assert(supported, jc.IsFalse) +} + +func (suite *environSuite) TestSupportsSpaces(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["network-deployment-ubuntu"]}`) + env := suite.makeEnviron() + supported, err := env.SupportsSpaces() + c.Assert(err, jc.ErrorIsNil) + c.Assert(supported, jc.IsTrue) +} + +func (suite *environSuite) TestSupportsSpaceDiscovery(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["network-deployment-ubuntu"]}`) + env := suite.makeEnviron() + supported, err := env.SupportsSpaceDiscovery() + c.Assert(err, jc.ErrorIsNil) + c.Assert(supported, jc.IsTrue) +} + func (suite *environSuite) createSubnets(c *gc.C, duplicates bool) instance.Instance { testInstance := suite.getInstance("node1") + testServer := suite.testMAASObject.TestServer templateInterfaces := map[string]ifaceInfo{ "aa:bb:cc:dd:ee:ff": {0, "wlan0", true}, "aa:bb:cc:dd:ee:f1": {1, "eth0", false}, @@ -1121,24 +628,24 @@ lshwXML, err := suite.generateHWTemplate(templateInterfaces) c.Assert(err, jc.ErrorIsNil) - suite.testMAASObject.TestServer.AddNodeDetails("node1", lshwXML) + testServer.AddNodeDetails("node1", lshwXML) // resulting CIDR 192.168.2.1/24 - suite.getNetwork("LAN", 2, 42) - suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "LAN", "aa:bb:cc:dd:ee:f1") + suite.newNetwork("LAN", 2, 42, "192.168.2.1") // primary + gateway + testServer.ConnectNodeToNetworkWithMACAddress("node1", "LAN", "aa:bb:cc:dd:ee:f1") // resulting CIDR 192.168.3.1/24 - suite.getNetwork("Virt", 3, 0) - suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "Virt", "aa:bb:cc:dd:ee:f2") + suite.newNetwork("Virt", 3, 0, "") + testServer.ConnectNodeToNetworkWithMACAddress("node1", "Virt", "aa:bb:cc:dd:ee:f2") // resulting CIDR 192.168.1.1/24 - suite.getNetwork("WLAN", 1, 0) - suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "WLAN", "aa:bb:cc:dd:ee:ff") + suite.newNetwork("WLAN", 1, 0, "") + testServer.ConnectNodeToNetworkWithMACAddress("node1", "WLAN", "aa:bb:cc:dd:ee:ff") if duplicates { - suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "LAN", "aa:bb:cc:dd:ee:f3") - suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "Virt", "aa:bb:cc:dd:ee:f4") + testServer.ConnectNodeToNetworkWithMACAddress("node1", "LAN", "aa:bb:cc:dd:ee:f3") + testServer.ConnectNodeToNetworkWithMACAddress("node1", "Virt", "aa:bb:cc:dd:ee:f4") } // needed for getNodeGroups to work - suite.testMAASObject.TestServer.AddBootImage("uuid-0", `{"architecture": "amd64", "release": "precise"}`) - suite.testMAASObject.TestServer.AddBootImage("uuid-1", `{"architecture": "amd64", "release": "precise"}`) + testServer.AddBootImage("uuid-0", `{"architecture": "amd64", "release": "precise"}`) + testServer.AddBootImage("uuid-1", `{"architecture": "amd64", "release": "precise"}`) jsonText1 := `{ "ip_range_high": "192.168.2.255", @@ -1188,108 +695,314 @@ "static_ip_range_high": "172.16.8.255", "interface": "eth3" }` - suite.testMAASObject.TestServer.NewNodegroupInterface("uuid-0", jsonText1) - suite.testMAASObject.TestServer.NewNodegroupInterface("uuid-0", jsonText2) - suite.testMAASObject.TestServer.NewNodegroupInterface("uuid-1", jsonText3) - suite.testMAASObject.TestServer.NewNodegroupInterface("uuid-1", jsonText4) + testServer.NewNodegroupInterface("uuid-0", jsonText1) + testServer.NewNodegroupInterface("uuid-0", jsonText2) + testServer.NewNodegroupInterface("uuid-1", jsonText3) + testServer.NewNodegroupInterface("uuid-1", jsonText4) return testInstance } -func (suite *environSuite) TestNetworkInterfaces(c *gc.C) { +func (suite *environSuite) TestSubnetsWithInstanceIdAndSubnetIdsWhenSpacesNotSupported(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": []}`) testInstance := suite.createSubnets(c, false) - - netInfo, err := suite.makeEnviron().NetworkInterfaces(testInstance.Id()) + subnetsInfo, err := suite.makeEnviron().Subnets(testInstance.Id(), []network.Id{"LAN", "Virt", "WLAN"}) c.Assert(err, jc.ErrorIsNil) - expectedInfo := []network.InterfaceInfo{{ - DeviceIndex: 0, - MACAddress: "aa:bb:cc:dd:ee:ff", - CIDR: "192.168.1.1/24", - ProviderSubnetId: "WLAN", - VLANTag: 0, - InterfaceName: "wlan0", - Disabled: true, - NoAutoStart: true, - ConfigType: network.ConfigDHCP, - ExtraConfig: nil, - GatewayAddress: network.Address{}, - Address: network.NewScopedAddress("192.168.1.1", network.ScopeCloudLocal), - }, { - DeviceIndex: 1, - MACAddress: "aa:bb:cc:dd:ee:f1", - CIDR: "192.168.2.1/24", - ProviderSubnetId: "LAN", - VLANTag: 42, - InterfaceName: "eth0", - Disabled: false, - NoAutoStart: false, - ConfigType: network.ConfigDHCP, - ExtraConfig: nil, - GatewayAddress: network.Address{}, - Address: network.NewScopedAddress("192.168.2.1", network.ScopeCloudLocal), - }, { - DeviceIndex: 2, - MACAddress: "aa:bb:cc:dd:ee:f2", - CIDR: "192.168.3.1/24", - ProviderSubnetId: "Virt", - VLANTag: 0, - InterfaceName: "vnet1", - Disabled: false, - NoAutoStart: false, - ConfigType: network.ConfigDHCP, - ExtraConfig: nil, - GatewayAddress: network.Address{}, - Address: network.NewScopedAddress("192.168.3.1", network.ScopeCloudLocal), + expectedInfo := []network.SubnetInfo{{ + CIDR: "192.168.2.2/24", + ProviderId: "LAN", + VLANTag: 42, + AllocatableIPLow: net.ParseIP("192.168.2.0"), + AllocatableIPHigh: net.ParseIP("192.168.2.127"), + }, { + CIDR: "192.168.3.2/24", + ProviderId: "Virt", + AllocatableIPLow: nil, + AllocatableIPHigh: nil, + VLANTag: 0, + }, { + CIDR: "192.168.1.2/24", + ProviderId: "WLAN", + VLANTag: 0, + AllocatableIPLow: net.ParseIP("192.168.1.129"), + AllocatableIPHigh: net.ParseIP("192.168.1.255"), }} - network.SortInterfaceInfo(netInfo) - c.Assert(netInfo, jc.DeepEquals, expectedInfo) -} - -func (suite *environSuite) TestSubnets(c *gc.C) { - testInstance := suite.createSubnets(c, false) - - netInfo, err := suite.makeEnviron().Subnets(testInstance.Id(), []network.Id{"LAN", "Virt", "WLAN"}) - c.Assert(err, jc.ErrorIsNil) - - expectedInfo := []network.SubnetInfo{ - {CIDR: "192.168.2.1/24", ProviderId: "LAN", VLANTag: 42, AllocatableIPLow: net.ParseIP("192.168.2.0"), AllocatableIPHigh: net.ParseIP("192.168.2.127")}, - {CIDR: "192.168.3.1/24", ProviderId: "Virt", VLANTag: 0}, - {CIDR: "192.168.1.1/24", ProviderId: "WLAN", VLANTag: 0, AllocatableIPLow: net.ParseIP("192.168.1.129"), AllocatableIPHigh: net.ParseIP("192.168.1.255")}} - c.Assert(netInfo, jc.DeepEquals, expectedInfo) -} - -func (suite *environSuite) TestSubnetsNoNetIds(c *gc.C) { - testInstance := suite.createSubnets(c, false) - _, err := suite.makeEnviron().Subnets(testInstance.Id(), []network.Id{}) - c.Assert(err, gc.ErrorMatches, "subnetIds must not be empty") -} - -func (suite *environSuite) TestSubnetsMissingNetwork(c *gc.C) { + c.Assert(subnetsInfo, jc.DeepEquals, expectedInfo) +} + +func (suite *environSuite) TestSubnetsWithInstanceIdNoSubnetIdsWhenSpacesNotSupported(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": []}`) + testInstance := suite.createSubnets(c, false) + env := suite.makeEnviron() + _, err := env.Subnets(testInstance.Id(), []network.Id{}) + c.Assert(err, gc.ErrorMatches, "subnet IDs must not be empty") + + _, err = env.Subnets(testInstance.Id(), nil) + c.Assert(err, gc.ErrorMatches, "subnet IDs must not be empty") +} + +func (suite *environSuite) TestSubnetsNoInstanceIdWithSubnetIdsWhenSpacesNotSupported(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": []}`) + suite.createSubnets(c, false) + _, err := suite.makeEnviron().Subnets(instance.UnknownId, []network.Id{"LAN", "Virt", "WLAN"}) + c.Assert(err, gc.ErrorMatches, "instance ID is required") +} + +func (suite *environSuite) TestSubnetsNoInstaceIdNoSubnetIdsWhenSpacesNotSupported(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": []}`) + suite.createSubnets(c, false) + env := suite.makeEnviron() + _, err := env.Subnets(instance.UnknownId, nil) + c.Assert(err, gc.ErrorMatches, "instance ID is required") +} + +func (suite *environSuite) TestSubnetsInvalidInstaceIdAnySubnetIdsWhenSpacesNotSupported(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": []}`) + suite.createSubnets(c, false) + env := suite.makeEnviron() + _, err := env.Subnets("invalid", []network.Id{"anything"}) + c.Assert(err, gc.ErrorMatches, `instance "invalid" not found`) + c.Assert(err, jc.Satisfies, errors.IsNotFound) + + _, err = env.Subnets("invalid", nil) + c.Assert(err, gc.ErrorMatches, `instance "invalid" not found`) + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} + +func (suite *environSuite) TestSubnetsWithInstanceIdAndSubnetIdsWhenSpacesAreSupported(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["network-deployment-ubuntu"]}`) + var subnetIDs []network.Id + var uintIDs []uint + for _, i := range []uint{1, 2, 3} { + id := suite.addSubnet(c, i, i, "node1") + subnetIDs = append(subnetIDs, network.Id(fmt.Sprintf("%v", id))) + uintIDs = append(uintIDs, id) + suite.addSubnet(c, i+5, i, "node2") + suite.addSubnet(c, i+10, i, "") // not linked to a node + } + testInstance := suite.getInstance("node1") + env := suite.makeEnviron() + + subnetsInfo, err := env.Subnets(testInstance.Id(), subnetIDs) + c.Assert(err, jc.ErrorIsNil) + expectedInfo := []network.SubnetInfo{ + createSubnetInfo(uintIDs[0], 1, 1), + createSubnetInfo(uintIDs[1], 2, 2), + createSubnetInfo(uintIDs[2], 3, 3), + } + c.Assert(subnetsInfo, jc.DeepEquals, expectedInfo) + + subnetsInfo, err = env.Subnets(testInstance.Id(), subnetIDs[1:]) + c.Assert(err, jc.ErrorIsNil) + c.Assert(subnetsInfo, jc.DeepEquals, expectedInfo[1:]) +} + +func (suite *environSuite) TestSubnetsWithInstaceIdNoSubnetIdsWhenSpacesAreSupported(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["network-deployment-ubuntu"]}`) + id1 := suite.addSubnet(c, 1, 1, "node1") + id2 := suite.addSubnet(c, 2, 2, "node1") + suite.addSubnet(c, 3, 2, "") // not linked to a node + suite.addSubnet(c, 4, 2, "node2") // linked to another node + testInstance := suite.getInstance("node1") + env := suite.makeEnviron() + + subnetsInfo, err := env.Subnets(testInstance.Id(), []network.Id{}) + c.Assert(err, jc.ErrorIsNil) + expectedInfo := []network.SubnetInfo{ + createSubnetInfo(id1, 1, 1), + createSubnetInfo(id2, 2, 2), + } + c.Assert(subnetsInfo, jc.DeepEquals, expectedInfo) + + subnetsInfo, err = env.Subnets(testInstance.Id(), nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(subnetsInfo, jc.DeepEquals, expectedInfo) +} + +func (suite *environSuite) TestSubnetsInvalidInstaceIdAnySubnetIdsWhenSpacesAreSupported(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["network-deployment-ubuntu"]}`) + suite.addSubnet(c, 1, 1, "node1") + suite.addSubnet(c, 2, 2, "node2") + + _, err := suite.makeEnviron().Subnets("invalid", []network.Id{"anything"}) + c.Assert(err, gc.ErrorMatches, `instance "invalid" not found`) + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} +func (suite *environSuite) TestSubnetsNoInstanceIdWithSubnetIdsWhenSpacesAreSupported(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["network-deployment-ubuntu"]}`) + id1 := suite.addSubnet(c, 1, 1, "node1") + id2 := suite.addSubnet(c, 2, 2, "node2") + subnetIDs := []network.Id{ + network.Id(fmt.Sprintf("%v", id1)), + network.Id(fmt.Sprintf("%v", id2)), + } + + subnetsInfo, err := suite.makeEnviron().Subnets(instance.UnknownId, subnetIDs) + c.Assert(err, jc.ErrorIsNil) + expectedInfo := []network.SubnetInfo{ + createSubnetInfo(id1, 1, 1), + createSubnetInfo(id2, 2, 2), + } + c.Assert(subnetsInfo, jc.DeepEquals, expectedInfo) +} + +func (suite *environSuite) TestSubnetsNoInstanceIdNoSubnetIdsWhenSpacesAreSupported(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["network-deployment-ubuntu"]}`) + id1 := suite.addSubnet(c, 1, 1, "node1") + id2 := suite.addSubnet(c, 2, 2, "node2") + env := suite.makeEnviron() + + subnetsInfo, err := suite.makeEnviron().Subnets(instance.UnknownId, []network.Id{}) + c.Assert(err, jc.ErrorIsNil) + expectedInfo := []network.SubnetInfo{ + createSubnetInfo(id1, 1, 1), + createSubnetInfo(id2, 2, 2), + } + c.Assert(subnetsInfo, jc.DeepEquals, expectedInfo) + + subnetsInfo, err = env.Subnets(instance.UnknownId, nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(subnetsInfo, jc.DeepEquals, expectedInfo) +} + +func (suite *environSuite) TestSubnetsMissingSubnetWhenSpacesNotSupported(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": []}`) testInstance := suite.createSubnets(c, false) _, err := suite.makeEnviron().Subnets(testInstance.Id(), []network.Id{"WLAN", "Missing"}) - c.Assert(err, gc.ErrorMatches, "failed to find the following subnets: \\[Missing\\]") + c.Assert(err, gc.ErrorMatches, "failed to find the following subnets: Missing") +} + +func (suite *environSuite) TestSubnetsMissingSubnetWhenSpacesAreSupported(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["network-deployment-ubuntu"]}`) + testInstance := suite.getInstance("node1") + suite.addSubnet(c, 1, 1, "node1") + _, err := suite.makeEnviron().Subnets(testInstance.Id(), []network.Id{"1", "2"}) + c.Assert(err, gc.ErrorMatches, "failed to find the following subnets: 2") } func (suite *environSuite) TestSubnetsNoDuplicates(c *gc.C) { testInstance := suite.createSubnets(c, true) - netInfo, err := suite.makeEnviron().Subnets(testInstance.Id(), []network.Id{"LAN", "Virt", "WLAN"}) - c.Assert(err, jc.ErrorIsNil) - - expectedInfo := []network.SubnetInfo{ - {CIDR: "192.168.2.1/24", ProviderId: "LAN", VLANTag: 42, AllocatableIPLow: net.ParseIP("192.168.2.0"), AllocatableIPHigh: net.ParseIP("192.168.2.127")}, - {CIDR: "192.168.3.1/24", ProviderId: "Virt", VLANTag: 0}, - {CIDR: "192.168.1.1/24", ProviderId: "WLAN", VLANTag: 0, AllocatableIPLow: net.ParseIP("192.168.1.129"), AllocatableIPHigh: net.ParseIP("192.168.1.255")}} - c.Assert(netInfo, jc.DeepEquals, expectedInfo) + subnetsInfo, err := suite.makeEnviron().Subnets(testInstance.Id(), []network.Id{"LAN", "Virt", "WLAN"}) + c.Assert(err, jc.ErrorIsNil) + + expectedInfo := []network.SubnetInfo{{ + CIDR: "192.168.2.2/24", + ProviderId: "LAN", + VLANTag: 42, + AllocatableIPLow: net.ParseIP("192.168.2.0"), + AllocatableIPHigh: net.ParseIP("192.168.2.127"), + }, { + CIDR: "192.168.3.2/24", + ProviderId: "Virt", + AllocatableIPLow: nil, + AllocatableIPHigh: nil, + VLANTag: 0, + }, { + CIDR: "192.168.1.2/24", + ProviderId: "WLAN", + VLANTag: 0, + AllocatableIPLow: net.ParseIP("192.168.1.129"), + AllocatableIPHigh: net.ParseIP("192.168.1.255"), + }} + c.Assert(subnetsInfo, jc.DeepEquals, expectedInfo) +} + +func (suite *environSuite) TestSpaces(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["network-deployment-ubuntu"]}`) + for _, i := range []uint{1, 2, 3} { + suite.addSubnet(c, i, i, "node1") + suite.addSubnet(c, i+5, i, "node1") + } + + spaces, err := suite.makeEnviron().Spaces() + c.Assert(err, jc.ErrorIsNil) + expectedSpaces := []network.SpaceInfo{{ + ProviderId: "Space 1", + Subnets: []network.SubnetInfo{ + createSubnetInfo(1, 1, 1), + createSubnetInfo(2, 1, 6), + }, + }, { + ProviderId: "Space 2", + Subnets: []network.SubnetInfo{ + createSubnetInfo(3, 2, 2), + createSubnetInfo(4, 2, 7), + }, + }, { + ProviderId: "Space 3", + Subnets: []network.SubnetInfo{ + createSubnetInfo(5, 3, 3), + createSubnetInfo(6, 3, 8), + }, + }} + c.Assert(spaces, jc.DeepEquals, expectedSpaces) +} + +func (suite *environSuite) TestSpacesNeedsSupportsSpaces(c *gc.C) { + _, err := suite.makeEnviron().Spaces() + c.Assert(err, jc.Satisfies, errors.IsNotSupported) +} + +func (suite *environSuite) assertSpaces(c *gc.C, numberOfSubnets int, filters []network.Id) { + server := suite.testMAASObject.TestServer + server.SetVersionJSON(`{"capabilities": ["network-deployment-ubuntu"]}`) + testInstance := suite.createSubnets(c, false) + systemID := "node1" + for i := 1; i <= numberOfSubnets; i++ { + // Put most, but not all, of the subnets on node1. + if i == 2 { + systemID = "node2" + } else { + systemID = "node1" + } + suite.addSubnet(c, uint(i), uint(i), systemID) + } + + subnets, err := suite.makeEnviron().Subnets(testInstance.Id(), filters) + c.Assert(err, jc.ErrorIsNil) + expectedSubnets := []network.SubnetInfo{ + createSubnetInfo(1, 1, 1), + createSubnetInfo(3, 3, 3), + } + c.Assert(subnets, jc.DeepEquals, expectedSubnets) + +} + +func (suite *environSuite) TestSubnetsWithSpacesAllSubnets(c *gc.C) { + suite.assertSpaces(c, 3, []network.Id{}) +} + +func (suite *environSuite) TestSubnetsWithSpacesFilteredIds(c *gc.C) { + suite.assertSpaces(c, 4, []network.Id{"1", "3"}) +} + +func (suite *environSuite) TestSubnetsWithSpacesMissingSubnet(c *gc.C) { + server := suite.testMAASObject.TestServer + server.SetVersionJSON(`{"capabilities": ["network-deployment-ubuntu"]}`) + testInstance := suite.createSubnets(c, false) + for _, i := range []uint{1, 2} { + suite.addSubnet(c, i, i, "node1") + } + + _, err := suite.makeEnviron().Subnets(testInstance.Id(), []network.Id{"1", "3", "6"}) + errorRe := regexp.MustCompile("failed to find the following subnets: (\\d), (\\d)$") + errorText := err.Error() + c.Assert(errorRe.MatchString(errorText), jc.IsTrue) + matches := errorRe.FindStringSubmatch(errorText) + c.Assert(matches, gc.HasLen, 3) + c.Assert(matches[1:], jc.SameContents, []string{"3", "6"}) } func (suite *environSuite) TestAllocateAddress(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["networks-management","static-ipaddresses"]}`) + testInstance := suite.createSubnets(c, false) env := suite.makeEnviron() // note that the default test server always succeeds if we provide a // valid instance id and net id - err := env.AllocateAddress(testInstance.Id(), "LAN", network.Address{Value: "192.168.2.1"}, "foo", "bar") + err := env.AllocateAddress(testInstance.Id(), "LAN", &network.Address{Value: "192.168.2.1"}, "foo", "bar") c.Assert(err, jc.ErrorIsNil) } @@ -1298,9 +1011,37 @@ testInstance := suite.createSubnets(c, false) env := suite.makeEnviron() + // Work around the lack of support for devices PUT and POST without hostname + // set in gomaasapi's testservices + newParams := func(macAddress string, instId instance.Id, hostnameSuffix string) url.Values { + c.Check(macAddress, gc.Equals, "aa:bb:cc:dd:ee:f0") // passed to AllocateAddress() below + c.Check(instId, gc.Equals, testInstance.Id()) + c.Check(hostnameSuffix, gc.Equals, "juju-machine-0-kvm-5") // passed to AllocateAddress() below + params := make(url.Values) + params.Add("mac_addresses", macAddress) + params.Add("hostname", "auto-generated.maas") + params.Add("parent", extractSystemId(instId)) + return params + } + suite.PatchValue(&NewDeviceParams, newParams) + updateHostname := func(client *gomaasapi.MAASObject, deviceID, deviceHostname, hostnameSuffix string) (string, error) { + c.Check(client, gc.NotNil) + c.Check(deviceID, gc.Matches, `node-[0-9a-f-]+`) + c.Check(deviceHostname, gc.Equals, "auto-generated.maas") // "generated" above in NewDeviceParams() + c.Check(hostnameSuffix, gc.Equals, "juju-machine-0-kvm-5") // passed to AllocateAddress() below + return "auto-generated-juju-lxc.maas", nil + } + suite.PatchValue(&UpdateDeviceHostname, updateHostname) + // note that the default test server always succeeds if we provide a // valid instance id and net id - err := env.AllocateAddress(testInstance.Id(), "LAN", network.Address{Value: "192.168.2.1"}, "foo", "bar") + err := env.AllocateAddress( + testInstance.Id(), + "LAN", + &network.Address{Value: "192.168.2.1"}, + "aa:bb:cc:dd:ee:f0", + "juju-machine-0-kvm-5", + ) c.Assert(err, jc.ErrorIsNil) devicesArray := suite.getDeviceArray(c) @@ -1311,7 +1052,7 @@ hostname, err := device["hostname"].GetString() c.Assert(err, jc.ErrorIsNil) - c.Assert(hostname, gc.Equals, "bar") + c.Assert(hostname, gc.Equals, "auto-generated.maas") parent, err := device["parent"].GetString() c.Assert(err, jc.ErrorIsNil) @@ -1334,7 +1075,106 @@ c.Assert(err, jc.ErrorIsNil) mac, err := macMap["mac_address"].GetString() c.Assert(err, jc.ErrorIsNil) - c.Assert(mac, gc.Equals, "foo") + c.Assert(mac, gc.Equals, "aa:bb:cc:dd:ee:f0") +} + +func (suite *environSuite) TestTransformDeviceHostname(c *gc.C) { + for i, test := range []struct { + deviceHostname string + hostnameSuffix string + + expectedOutput string + expectedError string + }{{ + deviceHostname: "shiny-town.maas", + hostnameSuffix: "juju-machine-1-lxc-2", + expectedOutput: "shiny-town-juju-machine-1-lxc-2.maas", + }, { + deviceHostname: "foo.subdomain.example.com", + hostnameSuffix: "suffix", + expectedOutput: "foo-suffix.subdomain.example.com", + }, { + deviceHostname: "bad-food.example.com", + hostnameSuffix: "suffix.example.org", + expectedOutput: "bad-food-suffix.example.org.example.com", + }, { + deviceHostname: "strangers-and.freaks", + hostnameSuffix: "just-this", + expectedOutput: "strangers-and-just-this.freaks", + }, { + deviceHostname: "no-dot-hostname", + hostnameSuffix: "anything", + expectedError: `unexpected device "dev-id" hostname "no-dot-hostname"`, + }, { + deviceHostname: "anything", + hostnameSuffix: "", + expectedError: "hostname suffix cannot be empty", + }} { + c.Logf( + "test #%d: %q + %q -> %q (err: %s)", + i, test.deviceHostname, test.hostnameSuffix, + test.expectedOutput, test.expectedError, + ) + output, err := transformDeviceHostname("dev-id", test.deviceHostname, test.hostnameSuffix) + if test.expectedError != "" { + c.Check(err, gc.ErrorMatches, test.expectedError) + c.Check(output, gc.Equals, "") + continue + } + c.Check(err, jc.ErrorIsNil) + c.Check(output, gc.Equals, test.expectedOutput) + } +} + +func (suite *environSuite) patchDeviceCreation() { + // Work around the lack of support for devices PUT and POST without hostname + // set in gomaasapi's testservices + newParams := func(macAddress string, instId instance.Id, _ string) url.Values { + params := make(url.Values) + params.Add("mac_addresses", macAddress) + params.Add("hostname", "auto-generated.maas") + params.Add("parent", extractSystemId(instId)) + return params + } + suite.PatchValue(&NewDeviceParams, newParams) + updateHostname := func(_ *gomaasapi.MAASObject, _, _, _ string) (string, error) { + return "auto-generated-juju-lxc.maas", nil + } + suite.PatchValue(&UpdateDeviceHostname, updateHostname) +} + +func (suite *environSuite) TestAllocateAddressDevicesFailures(c *gc.C) { + suite.SetFeatureFlags() + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["devices-management"]}`) + testInstance := suite.createSubnets(c, false) + env := suite.makeEnviron() + suite.patchDeviceCreation() + + responses := []string{ + "claim_sticky_ip_address failed", + "GetMap of the response failed", + "no ip_addresses in response", + "unexpected ip_addresses in response", + "IP in ip_addresses not a string", + } + reserveIP := func(_ gomaasapi.MAASObject, deviceID, macAddress string, addr network.Address) (network.Address, error) { + c.Check(deviceID, gc.Matches, "node-[a-f0-9]+") + c.Check(macAddress, gc.Matches, "aa:bb:cc:dd:ee:f0") + c.Check(addr, jc.DeepEquals, network.Address{}) + nextError := responses[0] + return network.Address{}, errors.New(nextError) + } + suite.PatchValue(&ReserveIPAddressOnDevice, reserveIP) + + for len(responses) > 0 { + addr := &network.Address{} + err := env.AllocateAddress( + testInstance.Id(), network.AnySubnet, addr, + "aa:bb:cc:dd:ee:f0", "juju-lxc", + ) + c.Check(err, gc.ErrorMatches, responses[0]) + responses = responses[1:] + } } func (suite *environSuite) getDeviceArray(c *gc.C) []gomaasapi.JSONObject { @@ -1358,14 +1198,20 @@ suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["networks-management","static-ipaddresses", "devices-management"]}`) testInstance := suite.createSubnets(c, false) env := suite.makeEnviron() + suite.patchDeviceCreation() + addr := network.NewAddress("192.168.2.1") - err := env.AllocateAddress(testInstance.Id(), "LAN", addr, "foo", "bar") + err := env.AllocateAddress(testInstance.Id(), "LAN", &addr, "foo", "juju-lxc") c.Assert(err, jc.ErrorIsNil) devicesArray := suite.getDeviceArray(c) c.Assert(devicesArray, gc.HasLen, 1) - err = env.ReleaseAddress(testInstance.Id(), "LAN", addr, "foo") + // Since we're mocking out updateDeviceHostname, no need to check if the + // hostname was updated (only manually tested for now until we change + // gomaasapi). + + err = env.ReleaseAddress(testInstance.Id(), "LAN", addr, "foo", "juju-lxc") c.Assert(err, jc.ErrorIsNil) devicesArray = suite.getDeviceArray(c) @@ -1373,22 +1219,25 @@ } func (suite *environSuite) TestAllocateAddressInvalidInstance(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["networks-management","static-ipaddresses"]}`) env := suite.makeEnviron() addr := network.Address{Value: "192.168.2.1"} instId := instance.Id("foo") - err := env.AllocateAddress(instId, "bar", addr, "foo", "bar") + err := env.AllocateAddress(instId, "bar", &addr, "foo", "juju-lxc") expected := fmt.Sprintf("failed to allocate address %q for instance %q.*", addr, instId) c.Assert(err, gc.ErrorMatches, expected) } func (suite *environSuite) TestAllocateAddressMissingSubnet(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["networks-management","static-ipaddresses"]}`) testInstance := suite.createSubnets(c, false) env := suite.makeEnviron() - err := env.AllocateAddress(testInstance.Id(), "bar", network.Address{Value: "192.168.2.1"}, "foo", "bar") - c.Assert(errors.Cause(err), gc.ErrorMatches, "failed to find the following subnets: \\[bar\\]") + err := env.AllocateAddress(testInstance.Id(), "bar", &network.Address{Value: "192.168.2.1"}, "foo", "bar") + c.Assert(errors.Cause(err), gc.ErrorMatches, "failed to find the following subnets: bar") } func (suite *environSuite) TestAllocateAddressIPAddressUnavailable(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["networks-management","static-ipaddresses"]}`) testInstance := suite.createSubnets(c, false) env := suite.makeEnviron() @@ -1398,7 +1247,7 @@ suite.PatchValue(&ReserveIPAddress, reserveIPAddress) ipAddress := network.Address{Value: "192.168.2.1"} - err := env.AllocateAddress(testInstance.Id(), "LAN", ipAddress, "foo", "bar") + err := env.AllocateAddress(testInstance.Id(), "LAN", &ipAddress, "foo", "bar") c.Assert(errors.Cause(err), gc.Equals, environs.ErrIPAddressUnavailable) expected := fmt.Sprintf("failed to allocate address %q for instance %q.*", ipAddress, testInstance.Id()) c.Assert(err, gc.ErrorMatches, expected) @@ -1413,25 +1262,28 @@ } func (suite *environSuite) TestReleaseAddress(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["networks-management","static-ipaddresses"]}`) testInstance := suite.createSubnets(c, false) env := suite.makeEnviron() - err := env.AllocateAddress(testInstance.Id(), "LAN", network.Address{Value: "192.168.2.1"}, "foo", "bar") + err := env.AllocateAddress(testInstance.Id(), "LAN", &network.Address{Value: "192.168.2.1"}, "foo", "bar") c.Assert(err, jc.ErrorIsNil) ipAddress := network.Address{Value: "192.168.2.1"} macAddress := "foobar" - err = env.ReleaseAddress(testInstance.Id(), "bar", ipAddress, macAddress) + hostname := "myhostname" + err = env.ReleaseAddress(testInstance.Id(), "bar", ipAddress, macAddress, hostname) c.Assert(err, jc.ErrorIsNil) // by releasing again we can test that the first release worked, *and* // the error handling of ReleaseError - err = env.ReleaseAddress(testInstance.Id(), "bar", ipAddress, macAddress) + err = env.ReleaseAddress(testInstance.Id(), "bar", ipAddress, macAddress, hostname) expected := fmt.Sprintf("(?s).*failed to release IP address %q from instance %q.*", ipAddress, testInstance.Id()) c.Assert(err, gc.ErrorMatches, expected) } func (suite *environSuite) TestReleaseAddressRetry(c *gc.C) { + suite.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["networks-management","static-ipaddresses"]}`) // Patch short attempt params. suite.PatchValue(&shortAttempt, utils.AttemptStrategy{ Min: 5, @@ -1450,13 +1302,14 @@ testInstance := suite.createSubnets(c, false) env := suite.makeEnviron() - err := env.AllocateAddress(testInstance.Id(), "LAN", network.Address{Value: "192.168.2.1"}, "foo", "bar") + err := env.AllocateAddress(testInstance.Id(), "LAN", &network.Address{Value: "192.168.2.1"}, "foo", "bar") c.Assert(err, jc.ErrorIsNil) // ReleaseAddress must fail with 5 retries. ipAddress := network.Address{Value: "192.168.2.1"} macAddress := "foobar" - err = env.ReleaseAddress(testInstance.Id(), "bar", ipAddress, macAddress) + hostname := "myhostname" + err = env.ReleaseAddress(testInstance.Id(), "bar", ipAddress, macAddress, hostname) expected := fmt.Sprintf("(?s).*failed to release IP address %q from instance %q: ouch", ipAddress, testInstance.Id()) c.Assert(err, gc.ErrorMatches, expected) c.Assert(retries, gc.Equals, 5) @@ -1464,7 +1317,7 @@ // Now let it succeed after 3 retries. retries = 0 enoughRetries = 3 - err = env.ReleaseAddress(testInstance.Id(), "bar", ipAddress, macAddress) + err = env.ReleaseAddress(testInstance.Id(), "bar", ipAddress, macAddress, hostname) c.Assert(err, jc.ErrorIsNil) c.Assert(retries, gc.Equals, 3) } === modified file 'src/github.com/juju/juju/provider/maas/environprovider.go' --- src/github.com/juju/juju/provider/maas/environprovider.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/maas/environprovider.go 2016-03-22 15:18:22 +0000 @@ -4,13 +4,15 @@ package maas import ( + "fmt" "net/http" "github.com/juju/errors" + "github.com/juju/gomaasapi" "github.com/juju/loggo" "github.com/juju/utils" - "launchpad.net/gomaasapi" + "github.com/juju/juju/cloud" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" ) @@ -18,14 +20,16 @@ // Logger for the MAAS provider. var logger = loggo.GetLogger("juju.provider.maas") -type maasEnvironProvider struct{} +type maasEnvironProvider struct { + environProviderCredentials +} var _ environs.EnvironProvider = (*maasEnvironProvider)(nil) var providerInstance maasEnvironProvider func (maasEnvironProvider) Open(cfg *config.Config) (environs.Environ, error) { - logger.Debugf("opening environment %q.", cfg.Name()) + logger.Debugf("opening model %q.", cfg.Name()) env, err := NewEnviron(cfg) if err != nil { return nil, err @@ -56,8 +60,38 @@ return cfg.Apply(attrs) } -func (p maasEnvironProvider) PrepareForBootstrap(ctx environs.BootstrapContext, cfg *config.Config) (environs.Environ, error) { - cfg, err := p.PrepareForCreateEnvironment(cfg) +func (p maasEnvironProvider) PrepareForBootstrap(ctx environs.BootstrapContext, args environs.PrepareForBootstrapParams) (environs.Environ, error) { + // For MAAS, either: + // 1. the endpoint from the cloud definition defines the MAAS server URL + // (if a full cloud definition had been set up) + // 2. the region defines the MAAS server ip/host + // (if the bootstrap shortcut is used) + server := args.CloudEndpoint + if server == "" && args.CloudRegion != "" { + server = fmt.Sprintf("http://%s/MAAS", args.CloudRegion) + } + if server == "" { + return nil, errors.New("MAAS server not specified") + } + attrs := map[string]interface{}{ + "maas-server": server, + } + // Add the credentials. + switch authType := args.Credentials.AuthType(); authType { + case cloud.OAuth1AuthType: + credentialAttrs := args.Credentials.Attributes() + for k, v := range credentialAttrs { + attrs[k] = v + } + default: + return nil, errors.NotSupportedf("%q auth-type", authType) + } + cfg, err := args.Config.Apply(attrs) + if err != nil { + return nil, errors.Trace(err) + } + + cfg, err = p.PrepareForCreateEnvironment(cfg) if err != nil { return nil, err } @@ -85,49 +119,6 @@ return nil } -// Boilerplate config YAML. Don't mess with the indentation or add newlines! -var boilerplateYAML = ` -# https://juju.ubuntu.com/docs/config-maas.html -maas: - type: maas - - # maas-server specifies the location of the MAAS server. It must - # specify the base path. - # - maas-server: 'http://192.168.1.1/MAAS/' - - # maas-oauth holds the OAuth credentials from MAAS. - # - maas-oauth: '' - - # maas-server bootstrap ssh connection options - # - - # bootstrap-timeout time to wait contacting a state server, in seconds. - bootstrap-timeout: 1800 - - # Whether or not to refresh the list of available updates for an - # OS. The default option of true is recommended for use in - # production systems, but disabling this can speed up local - # deployments for development or testing. - # - # enable-os-refresh-update: true - - # Whether or not to perform OS upgrades when machines are - # provisioned. The default option of true is recommended for use - # in production systems, but disabling this can speed up local - # deployments for development or testing. - # - # enable-os-upgrade: true - - -`[1:] - -// BoilerplateConfig is specified in the EnvironProvider interface. -func (maasEnvironProvider) BoilerplateConfig() string { - return boilerplateYAML -} - // SecretAttrs is specified in the EnvironProvider interface. func (prov maasEnvironProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { secretAttrs := make(map[string]string) @@ -138,3 +129,8 @@ secretAttrs["maas-oauth"] = maasCfg.maasOAuth() return secretAttrs, nil } + +// DetectRegions is specified in the environs.CloudRegionDetector interface. +func (p maasEnvironProvider) DetectRegions() ([]cloud.Region, error) { + return nil, errors.NotFoundf("regions") +} === modified file 'src/github.com/juju/juju/provider/maas/environprovider_test.go' --- src/github.com/juju/juju/provider/maas/environprovider_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/maas/environprovider_test.go 2016-03-22 15:18:22 +0000 @@ -10,6 +10,8 @@ "github.com/juju/utils" gc "gopkg.in/check.v1" + "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" envtesting "github.com/juju/juju/environs/testing" "github.com/juju/juju/testing" @@ -38,28 +40,90 @@ c.Check(secretAttrs, gc.DeepEquals, expectedAttrs) } +func (suite *EnvironProviderSuite) TestCredentialsSetup(c *gc.C) { + attrs := testing.FakeConfig().Merge(testing.Attrs{ + "type": "maas", + }) + config, err := config.New(config.NoDefaults, attrs) + c.Assert(err, jc.ErrorIsNil) + + ctx := envtesting.BootstrapContext(c) + environ, err := providerInstance.PrepareForBootstrap(ctx, environs.PrepareForBootstrapParams{ + Config: config, + CloudEndpoint: "http://maas.testing.invalid/maas/", + Credentials: cloud.NewCredential( + cloud.OAuth1AuthType, + map[string]string{ + "maas-oauth": "aa:bb:cc", + }, + ), + }) + c.Assert(err, jc.ErrorIsNil) + + cfg := environ.Config() + attrs = cfg.UnknownAttrs() + server, ok := attrs["maas-server"] + c.Assert(ok, jc.IsTrue) + c.Assert(server, gc.Equals, "http://maas.testing.invalid/maas/") + oauth, ok := attrs["maas-oauth"] + c.Assert(ok, jc.IsTrue) + c.Assert(oauth, gc.Equals, "aa:bb:cc") +} + func (suite *EnvironProviderSuite) TestUnknownAttrsContainAgentName(c *gc.C) { attrs := testing.FakeConfig().Merge(testing.Attrs{ - "type": "maas", - "maas-oauth": "aa:bb:cc", - "maas-server": "http://maas.testing.invalid/maas/", + "type": "maas", }) config, err := config.New(config.NoDefaults, attrs) c.Assert(err, jc.ErrorIsNil) ctx := envtesting.BootstrapContext(c) - environ, err := providerInstance.PrepareForBootstrap(ctx, config) + environ, err := providerInstance.PrepareForBootstrap(ctx, environs.PrepareForBootstrapParams{ + Config: config, + CloudEndpoint: "http://maas.testing.invalid/maas/", + Credentials: cloud.NewCredential( + cloud.OAuth1AuthType, + map[string]string{ + "maas-oauth": "aa:bb:cc", + }, + ), + }) c.Assert(err, jc.ErrorIsNil) preparedConfig := environ.Config() unknownAttrs := preparedConfig.UnknownAttrs() + c.Assert(unknownAttrs["maas-server"], gc.Equals, "http://maas.testing.invalid/maas/") uuid, ok := unknownAttrs["maas-agent-name"] - c.Assert(ok, jc.IsTrue) c.Assert(uuid, jc.Satisfies, utils.IsValidUUIDString) } +func (suite *EnvironProviderSuite) TestMAASServerFromRegion(c *gc.C) { + attrs := testing.FakeConfig().Merge(testing.Attrs{ + "type": "maas", + }) + config, err := config.New(config.NoDefaults, attrs) + c.Assert(err, jc.ErrorIsNil) + + ctx := envtesting.BootstrapContext(c) + environ, err := providerInstance.PrepareForBootstrap(ctx, environs.PrepareForBootstrapParams{ + Config: config, + CloudRegion: "maas.testing", + Credentials: cloud.NewCredential( + cloud.OAuth1AuthType, + map[string]string{ + "maas-oauth": "aa:bb:cc", + }, + ), + }) + c.Assert(err, jc.ErrorIsNil) + + preparedConfig := environ.Config() + unknownAttrs := preparedConfig.UnknownAttrs() + c.Assert(unknownAttrs["maas-server"], gc.Equals, "http://maas.testing/MAAS") +} + func (suite *EnvironProviderSuite) TestPrepareSetsAgentName(c *gc.C) { attrs := testing.FakeConfig().Merge(testing.Attrs{ "type": "maas", @@ -94,15 +158,22 @@ func (suite *EnvironProviderSuite) TestAgentNameShouldNotBeSetByHand(c *gc.C) { attrs := testing.FakeConfig().Merge(testing.Attrs{ "type": "maas", - "maas-oauth": "aa:bb:cc", - "maas-server": "http://maas.testing.invalid/maas/", "maas-agent-name": "foobar", }) config, err := config.New(config.NoDefaults, attrs) c.Assert(err, jc.ErrorIsNil) ctx := envtesting.BootstrapContext(c) - _, err = providerInstance.PrepareForBootstrap(ctx, config) + _, err = providerInstance.PrepareForBootstrap(ctx, environs.PrepareForBootstrapParams{ + Config: config, + CloudEndpoint: "http://maas.testing.invalid/maas/", + Credentials: cloud.NewCredential( + cloud.OAuth1AuthType, + map[string]string{ + "maas-oauth": "aa:bb:cc", + }, + ), + }) c.Assert(err, gc.Equals, errAgentNameAlreadySet) } === modified file 'src/github.com/juju/juju/provider/maas/export_test.go' --- src/github.com/juju/juju/provider/maas/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/maas/export_test.go 2016-03-22 15:18:22 +0000 @@ -4,15 +4,10 @@ package maas import ( - "strings" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "launchpad.net/gomaasapi" + "github.com/juju/gomaasapi" "github.com/juju/juju/cloudconfig/cloudinit" "github.com/juju/juju/environs" - "github.com/juju/juju/environs/storage" ) var ( @@ -29,68 +24,8 @@ return env.(*maasEnviron).getMAASClient() } -func NewCloudinitConfig(env environs.Environ, hostname, iface, series string) (cloudinit.CloudConfig, error) { - return env.(*maasEnviron).newCloudinitConfig(hostname, iface, series) +func NewCloudinitConfig(env environs.Environ, hostname, series string) (cloudinit.CloudConfig, error) { + return env.(*maasEnviron).newCloudinitConfig(hostname, series) } var RenderEtcNetworkInterfacesScript = renderEtcNetworkInterfacesScript - -var indexData = ` -{ - "index": { - "com.ubuntu.cloud:released:maas": { - "updated": "Fri, 14 Feb 2014 13:39:35 +0000", - "cloudname": "maas", - "datatype": "image-ids", - "format": "products:1.0", - "products": [ - "com.ubuntu.cloud:server:12.04:amd64" - ], - "path": "streams/v1/com.ubuntu.cloud:released:maas.json" - } - }, - "updated": "Fri, 14 Feb 2014 13:39:35 +0000", - "format": "index:1.0" -} -` - -var imagesData = ` -{ - "content_id": "com.ubuntu.cloud:released:maas", - "format": "products:1.0", - "updated": "Fri, 14 Feb 2014 13:39:35 +0000", - "datatype": "image-ids", - "products": { - "com.ubuntu.cloud:server:12.04:amd64": { - "release": "precise", - "version": "12.04", - "arch": "amd64", - "versions": { - "20140214": { - "items": { - "11223344-0a0a-ff99-11bb-0a1b2c3d4e5f": { - "region": "some-region", - "id": "11223344-0a0a-ff99-11bb-0a1b2c3d4e5f", - "virt": "kvm" - } - }, - "pubname": "ubuntu-precise-12.04-amd64-server-20140214", - "label": "release" - } - } - } - } -} -` - -func UseTestImageMetadata(c *gc.C, stor storage.Storage) { - files := map[string]string{ - "images/streams/v1/index.json": indexData, - "images/streams/v1/com.ubuntu.cloud:released:maas.json": imagesData, - } - for f, d := range files { - rdr := strings.NewReader(d) - err := stor.Put(f, rdr, int64(len(d))) - c.Assert(err, jc.ErrorIsNil) - } -} === modified file 'src/github.com/juju/juju/provider/maas/instance.go' --- src/github.com/juju/juju/provider/maas/instance.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/provider/maas/instance.go 2016-03-22 15:18:22 +0000 @@ -6,19 +6,15 @@ import ( "fmt" "strings" - "sync" "github.com/juju/errors" - "launchpad.net/gomaasapi" + "github.com/juju/gomaasapi" "github.com/juju/juju/instance" "github.com/juju/juju/network" ) type maasInstance struct { - environ *maasEnviron - - mu sync.Mutex maasObject *gomaasapi.MAASObject } @@ -39,7 +35,7 @@ } func (mi *maasInstance) Id() instance.Id { - return maasObjectId(mi.getMaasObject()) + return maasObjectId(mi.maasObject) } func maasObjectId(maasObject *gomaasapi.MAASObject) instance.Id { @@ -56,26 +52,25 @@ return "" } -// Refresh refreshes the instance with the most up-to-date information -// from the MAAS server. -func (mi *maasInstance) Refresh() error { - mi.mu.Lock() - defer mi.mu.Unlock() - insts, err := mi.environ.Instances([]instance.Id{maasObjectId(mi.maasObject)}) - if err != nil { - return err - } - mi.maasObject = insts[0].(*maasInstance).maasObject - return nil -} - -func (mi *maasInstance) getMaasObject() *gomaasapi.MAASObject { - mi.mu.Lock() - defer mi.mu.Unlock() - return mi.maasObject -} - func (mi *maasInstance) Addresses() ([]network.Address, error) { + interfaceAddresses, err := mi.interfaceAddresses() + if errors.IsNotSupported(err) { + logger.Warningf( + "using legacy approach to get instance addresses as %q API capability is not supported: %v", + capNetworkDeploymentUbuntu, err, + ) + return mi.legacyAddresses() + } else if err != nil { + return nil, errors.Annotate(err, "getting node interfaces") + } + + logger.Debugf("instance %q has interface addresses: %+v", mi.Id(), interfaceAddresses) + return interfaceAddresses, nil +} + +// legacyAddresses is used to extract all IP addresses of the node when not +// using MAAS 1.9+ API. +func (mi *maasInstance) legacyAddresses() ([]network.Address, error) { name, err := mi.hostname() if err != nil { return nil, err @@ -88,12 +83,23 @@ addrs[0].Scope = network.ScopePublic addrs[1].Scope = network.ScopeCloudLocal - // Append any remaining IP addresses after the preferred ones. - ips, err := mi.ipAddresses() - if err != nil { - return nil, err + // Append any remaining IP addresses after the preferred ones. We have to do + // this the hard way, since maasObject doesn't have this built-in yet + addressArray := mi.maasObject.GetMap()["ip_addresses"] + if !addressArray.IsNil() { + // Older MAAS versions do not return ip_addresses. + objs, err := addressArray.GetArray() + if err != nil { + return nil, err + } + for _, obj := range objs { + s, err := obj.GetString() + if err != nil { + return nil, err + } + addrs = append(addrs, network.NewAddress(s)) + } } - addrs = append(addrs, network.NewAddresses(ips...)...) // Although we would prefer a DNS name there's no point // returning unresolvable names because activities like 'juju @@ -101,32 +107,45 @@ return resolveHostnames(addrs), nil } -func (mi *maasInstance) ipAddresses() ([]string, error) { - // we have to do this the hard way, since maasObject doesn't have this built-in yet - addressArray := mi.getMaasObject().GetMap()["ip_addresses"] - if addressArray.IsNil() { - // Older MAAS versions do not return ip_addresses. - return nil, nil - } - objs, err := addressArray.GetArray() - if err != nil { - return nil, err - } - ips := make([]string, len(objs)) - for i, obj := range objs { - s, err := obj.GetString() - if err != nil { - return nil, err +var refreshMAASObject = func(maasObject *gomaasapi.MAASObject) (gomaasapi.MAASObject, error) { + // Defined like this to allow patching in tests to overcome limitations of + // gomaasapi's test server. + return maasObject.Get() +} + +// interfaceAddresses fetches a fresh copy of the node details from MAAS and +// extracts all addresses from the node's interfaces. Returns an error +// satisfying errors.IsNotSupported() if MAAS API does not report interfaces +// information. +func (mi *maasInstance) interfaceAddresses() ([]network.Address, error) { + // Fetch a fresh copy of the instance JSON first. + obj, err := refreshMAASObject(mi.maasObject) + if err != nil { + return nil, errors.Annotate(err, "getting instance details") + } + + // Get all the interface details and extract the addresses. + interfaces, err := maasObjectNetworkInterfaces(&obj) + if err != nil { + return nil, errors.Trace(err) + } + + var addresses []network.Address + for _, iface := range interfaces { + if iface.Address.Value != "" { + logger.Debugf("found address %q on interface %q", iface.Address, iface.InterfaceName) + addresses = append(addresses, iface.Address) + } else { + logger.Infof("no address found on interface %q", iface.InterfaceName) } - ips[i] = s } - return ips, nil + return addresses, nil } func (mi *maasInstance) architecture() (arch, subarch string, err error) { // MAAS may return an architecture of the form, for example, // "amd64/generic"; we only care about the major part. - arch, err = mi.getMaasObject().GetField("architecture") + arch, err = mi.maasObject.GetField("architecture") if err != nil { return "", "", err } @@ -139,12 +158,12 @@ } func (mi *maasInstance) zone() string { - zone, _ := mi.getMaasObject().GetField("zone") + zone, _ := mi.maasObject.GetField("zone") return zone } func (mi *maasInstance) cpuCount() (uint64, error) { - count, err := mi.getMaasObject().GetMap()["cpu_count"].GetFloat64() + count, err := mi.maasObject.GetMap()["cpu_count"].GetFloat64() if err != nil { return 0, err } @@ -152,7 +171,7 @@ } func (mi *maasInstance) memory() (uint64, error) { - mem, err := mi.getMaasObject().GetMap()["memory"].GetFloat64() + mem, err := mi.maasObject.GetMap()["memory"].GetFloat64() if err != nil { return 0, err } @@ -160,7 +179,7 @@ } func (mi *maasInstance) tagNames() ([]string, error) { - obj := mi.getMaasObject().GetMap()["tag_names"] + obj := mi.maasObject.GetMap()["tag_names"] if obj.IsNil() { return nil, errors.NotFoundf("tag_names") } @@ -211,7 +230,7 @@ func (mi *maasInstance) hostname() (string, error) { // A MAAS instance has its DNS name immediately. - return mi.getMaasObject().GetField("hostname") + return mi.maasObject.GetField("hostname") } // MAAS does not do firewalling so these port methods do nothing. === modified file 'src/github.com/juju/juju/provider/maas/instance_test.go' --- src/github.com/juju/juju/provider/maas/instance_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/maas/instance_test.go 2016-03-22 15:18:22 +0000 @@ -6,6 +6,7 @@ import ( "fmt" + "github.com/juju/gomaasapi" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -22,7 +23,7 @@ jsonValue := `{"system_id": "system_id", "test": "test"}` obj := s.testMAASObject.TestServer.NewNode(jsonValue) resourceURI, _ := obj.GetField("resource_uri") - instance := maasInstance{maasObject: &obj, environ: s.makeEnviron()} + instance := maasInstance{&obj} c.Check(string(instance.Id()), gc.Equals, resourceURI) } @@ -30,7 +31,7 @@ func (s *instanceTest) TestString(c *gc.C) { jsonValue := `{"hostname": "thethingintheplace", "system_id": "system_id", "test": "test"}` obj := s.testMAASObject.TestServer.NewNode(jsonValue) - instance := &maasInstance{maasObject: &obj, environ: s.makeEnviron()} + instance := &maasInstance{&obj} hostname, err := instance.hostname() c.Assert(err, jc.ErrorIsNil) expected := hostname + ":" + string(instance.Id()) @@ -41,35 +42,24 @@ // For good measure, test what happens if we don't have a hostname. jsonValue := `{"system_id": "system_id", "test": "test"}` obj := s.testMAASObject.TestServer.NewNode(jsonValue) - instance := &maasInstance{maasObject: &obj, environ: s.makeEnviron()} + instance := &maasInstance{&obj} _, err := instance.hostname() c.Assert(err, gc.NotNil) expected := fmt.Sprintf("", err) + ":" + string(instance.Id()) c.Assert(fmt.Sprint(instance), gc.Equals, expected) } -func (s *instanceTest) TestRefreshInstance(c *gc.C) { - jsonValue := `{"system_id": "system_id", "test": "test"}` - obj := s.testMAASObject.TestServer.NewNode(jsonValue) - s.testMAASObject.TestServer.ChangeNode("system_id", "test2", "test2") - instance := maasInstance{maasObject: &obj, environ: s.makeEnviron()} - - err := instance.Refresh() - - c.Check(err, jc.ErrorIsNil) - testField, err := (*instance.maasObject).GetField("test2") - c.Check(err, jc.ErrorIsNil) - c.Check(testField, gc.Equals, "test2") -} - -func (s *instanceTest) TestAddresses(c *gc.C) { +func (s *instanceTest) TestAddressesLegacy(c *gc.C) { + // We simulate an older MAAS (1.8-) which returns ip_addresses, but no + // interface_set for a node. We also verify we don't get the space of an + // address. jsonValue := `{ "hostname": "testing.invalid", "system_id": "system_id", "ip_addresses": [ "1.2.3.4", "fe80::d806:dbff:fe23:1199" ] }` obj := s.testMAASObject.TestServer.NewNode(jsonValue) - inst := maasInstance{maasObject: &obj, environ: s.makeEnviron()} + inst := maasInstance{&obj} expected := []network.Address{ network.NewScopedAddress("testing.invalid", network.ScopePublic), @@ -84,6 +74,54 @@ c.Check(addr, gc.DeepEquals, expected) } +func (s *instanceTest) TestAddressesViaInterfaces(c *gc.C) { + // We simulate an newer MAAS (1.9+) which returns both ip_addresses and + // interface_set for a node. To verify we use interfaces we deliberately put + // different items in ip_addresses + jsonValue := `{ + "hostname": "-testing.invalid", + "system_id": "system_id", + "interface_set" : [ + { "name": "eth0", "links": [ + { "subnet": { "space": "bar" }, "ip_address": "8.7.6.5" }, + { "subnet": { "space": "bar" }, "ip_address": "8.7.6.6" } + ] }, + { "name": "eth1", "links": [ + { "subnet": { "space": "storage" }, "ip_address": "10.0.1.1" } + ] }, + { "name": "eth3", "links": [ + { "subnet": { "space": "db" }, "ip_address": "fc00::123" } + ] }, + { "name": "eth4" }, + { "name": "eth5", "links": [ + { "mode": "link-up" } + ] } + ], + "ip_addresses": [ "anything", "foo", "0.1.2.3" ] + }` + obj := s.testMAASObject.TestServer.NewNode(jsonValue) + inst := maasInstance{&obj} + // Since gomaasapi treats "interface_set" specially and the only way to + // change it is via SetNodeNetworkLink(), which in turn does not allow you + // to specify ip_address, we need to patch the call which gets a fresh copy + // of the node details from the test server to avoid manging the + // interface_set we used above. + s.PatchValue(&refreshMAASObject, func(mo *gomaasapi.MAASObject) (gomaasapi.MAASObject, error) { + return *mo, nil + }) + + expected := []network.Address{ + network.NewAddressOnSpace("bar", "8.7.6.5"), + network.NewAddressOnSpace("bar", "8.7.6.6"), + network.NewAddressOnSpace("storage", "10.0.1.1"), + network.NewAddressOnSpace("db", "fc00::123"), + } + + addr, err := inst.Addresses() + c.Assert(err, jc.ErrorIsNil) + c.Check(addr, jc.DeepEquals, expected) +} + func (s *instanceTest) TestAddressesMissing(c *gc.C) { // Older MAAS versions do not have ip_addresses returned, for these // just the DNS name should be returned without error. @@ -92,7 +130,7 @@ "system_id": "system_id" }` obj := s.testMAASObject.TestServer.NewNode(jsonValue) - inst := maasInstance{maasObject: &obj, environ: s.makeEnviron()} + inst := maasInstance{&obj} addr, err := inst.Addresses() c.Assert(err, jc.ErrorIsNil) @@ -109,7 +147,7 @@ "ip_addresses": "incompatible" }` obj := s.testMAASObject.TestServer.NewNode(jsonValue) - inst := maasInstance{maasObject: &obj, environ: s.makeEnviron()} + inst := maasInstance{&obj} _, err := inst.Addresses() c.Assert(err, gc.NotNil) @@ -122,7 +160,7 @@ "ip_addresses": [42] }` obj := s.testMAASObject.TestServer.NewNode(jsonValue) - inst := maasInstance{maasObject: &obj, environ: s.makeEnviron()} + inst := maasInstance{&obj} _, err := inst.Addresses() c.Assert(err, gc.NotNil) @@ -136,7 +174,7 @@ "memory": 16384 }` obj := s.testMAASObject.TestServer.NewNode(jsonValue) - inst := maasInstance{maasObject: &obj, environ: s.makeEnviron()} + inst := maasInstance{&obj} hc, err := inst.hardwareCharacteristics() c.Assert(err, jc.ErrorIsNil) c.Assert(hc, gc.NotNil) @@ -152,7 +190,7 @@ "tag_names": ["a", "b"] }` obj := s.testMAASObject.TestServer.NewNode(jsonValue) - inst := maasInstance{maasObject: &obj, environ: s.makeEnviron()} + inst := maasInstance{&obj} hc, err := inst.hardwareCharacteristics() c.Assert(err, jc.ErrorIsNil) c.Assert(hc, gc.NotNil) @@ -172,7 +210,7 @@ func (s *instanceTest) testHardwareCharacteristicsMissing(c *gc.C, json, expect string) { obj := s.testMAASObject.TestServer.NewNode(json) - inst := maasInstance{maasObject: &obj, environ: s.makeEnviron()} + inst := maasInstance{&obj} _, err := inst.hardwareCharacteristics() c.Assert(err, gc.ErrorMatches, expect) } === added file 'src/github.com/juju/juju/provider/maas/interfaces.go' --- src/github.com/juju/juju/provider/maas/interfaces.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/maas/interfaces.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,628 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package maas + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "net" + "net/url" + "strconv" + "strings" + + "github.com/juju/errors" + "github.com/juju/gomaasapi" + "gopkg.in/mgo.v2/bson" + + "github.com/juju/juju/instance" + "github.com/juju/juju/network" +) + +//////////////////////////////////////////////////////////////////////////////// +// New (1.9 and later) environs.NetworkInterfaces() implementation details follow. + +// TODO(dimitern): The types below should be part of gomaasapi. +// LKK Card: https://canonical.leankit.com/Boards/View/101652562/119310616 + +type maasLinkMode string + +const ( + modeUnknown maasLinkMode = "" + modeAuto maasLinkMode = "auto" + modeStatic maasLinkMode = "static" + modeDHCP maasLinkMode = "dhcp" + modeLinkUp maasLinkMode = "link_up" +) + +type maasInterfaceLink struct { + ID int `json:"id"` + Subnet *maasSubnet `json:"subnet,omitempty"` + IPAddress string `json:"ip_address,omitempty"` + Mode maasLinkMode `json:"mode"` +} + +type maasInterfaceType string + +const ( + typeUnknown maasInterfaceType = "" + typePhysical maasInterfaceType = "physical" + typeVLAN maasInterfaceType = "vlan" + typeBond maasInterfaceType = "bond" +) + +type maasInterface struct { + ID int `json:"id"` + Name string `json:"name"` + Type maasInterfaceType `json:"type"` + Enabled bool `json:"enabled"` + + MACAddress string `json:"mac_address"` + VLAN maasVLAN `json:"vlan"` + EffectveMTU int `json:"effective_mtu"` + + Links []maasInterfaceLink `json:"links"` + + Parents []string `json:"parents"` + Children []string `json:"children"` + + ResourceURI string `json:"resource_uri"` +} + +type maasVLAN struct { + ID int `json:"id"` + Name string `json:"name"` + VID int `json:"vid"` + MTU int `json:"mtu"` + Fabric string `json:"fabric"` + ResourceURI string `json:"resource_uri"` +} + +type maasSubnet struct { + ID int `json:"id"` + Name string `json:"name"` + Space string `json:"space"` + VLAN maasVLAN `json:"vlan"` + GatewayIP string `json:"gateway_ip"` + DNSServers []string `json:"dns_servers"` + CIDR string `json:"cidr"` + ResourceURI string `json:"resource_uri"` +} + +func parseInterfaces(jsonBytes []byte) ([]maasInterface, error) { + var interfaces []maasInterface + if err := json.Unmarshal(jsonBytes, &interfaces); err != nil { + return nil, errors.Annotate(err, "parsing interfaces") + } + return interfaces, nil +} + +// maasObjectNetworkInterfaces implements environs.NetworkInterfaces() using the +// new (1.9+) MAAS API, parsing the node details JSON embedded into the given +// maasObject to extract all the relevant InterfaceInfo fields. It returns an +// error satisfying errors.IsNotSupported() if it cannot find the required +// "interface_set" node details field. +func maasObjectNetworkInterfaces(maasObject *gomaasapi.MAASObject) ([]network.InterfaceInfo, error) { + + interfaceSet, ok := maasObject.GetMap()["interface_set"] + if !ok || interfaceSet.IsNil() { + // This means we're using an older MAAS API. + return nil, errors.NotSupportedf("interface_set") + } + + // TODO(dimitern): Change gomaasapi JSONObject to give access to the raw + // JSON bytes directly, rather than having to do call MarshalJSON just so + // the result can be unmarshaled from it. + // + // LKK Card: https://canonical.leankit.com/Boards/View/101652562/119311323 + + rawBytes, err := interfaceSet.MarshalJSON() + if err != nil { + return nil, errors.Annotate(err, "cannot get interface_set JSON bytes") + } + + interfaces, err := parseInterfaces(rawBytes) + if err != nil { + return nil, errors.Trace(err) + } + + infos := make([]network.InterfaceInfo, 0, len(interfaces)) + for i, iface := range interfaces { + nicInfo := network.InterfaceInfo{ + DeviceIndex: i, + MACAddress: iface.MACAddress, + ProviderId: network.Id(fmt.Sprintf("%v", iface.ID)), + VLANTag: iface.VLAN.VID, + InterfaceName: iface.Name, + Disabled: !iface.Enabled, + NoAutoStart: !iface.Enabled, + // This is not needed anymore, but the provisioner still validates it's set. + NetworkName: network.DefaultPrivate, + } + + for _, link := range iface.Links { + switch link.Mode { + case modeUnknown: + nicInfo.ConfigType = network.ConfigUnknown + case modeDHCP: + nicInfo.ConfigType = network.ConfigDHCP + case modeStatic, modeLinkUp: + nicInfo.ConfigType = network.ConfigStatic + default: + nicInfo.ConfigType = network.ConfigManual + } + + if link.IPAddress == "" { + logger.Warningf("interface %q has no address", iface.Name) + } else { + // We set it here initially without a space, just so we don't + // lose it when we have no linked subnet below. + nicInfo.Address = network.NewAddress(link.IPAddress) + } + + if link.Subnet == nil { + logger.Warningf("interface %q link %d missing subnet", iface.Name, link.ID) + infos = append(infos, nicInfo) + continue + } + + sub := link.Subnet + nicInfo.CIDR = sub.CIDR + nicInfo.ProviderSubnetId = network.Id(fmt.Sprintf("%v", sub.ID)) + + // Now we know the subnet and space, we can update the address to + // store the space with it. + nicInfo.Address = network.NewAddressOnSpace(sub.Space, link.IPAddress) + + gwAddr := network.NewAddressOnSpace(sub.Space, sub.GatewayIP) + nicInfo.GatewayAddress = gwAddr + + nicInfo.DNSServers = network.NewAddressesOnSpace(sub.Space, sub.DNSServers...) + nicInfo.MTU = sub.VLAN.MTU + + // Each link we represent as a separate InterfaceInfo, but with the + // same name and device index, just different addres, subnet, etc. + infos = append(infos, nicInfo) + } + } + return infos, nil +} + +// NetworkInterfaces implements Environ.NetworkInterfaces. +func (environ *maasEnviron) NetworkInterfaces(instId instance.Id) ([]network.InterfaceInfo, error) { + if !environ.supportsNetworkDeploymentUbuntu { + // No need to check if the instance JSON has "interface_set" in this + // case, as it won't. + return environ.legacyNetworkInterfaces(instId) + } + + inst, err := environ.getInstance(instId) + if err != nil { + return nil, errors.Trace(err) + } + mi := inst.(*maasInstance) + return maasObjectNetworkInterfaces(mi.maasObject) +} + +//////////////////////////////////////////////////////////////////////////////// +// Legacy (pre 1.9) environs.NetworkInterfaces() implementation details follow. + +func (env *maasEnviron) getNodegroupInterfaces(nodegroups []string) map[string][]net.IP { + nodegroupsObject := env.getMAASClient().GetSubObject("nodegroups") + + nodegroupsInterfacesMap := make(map[string][]net.IP) + for _, uuid := range nodegroups { + interfacesObject := nodegroupsObject.GetSubObject(uuid).GetSubObject("interfaces") + interfacesResult, err := interfacesObject.CallGet("list", nil) + if err != nil { + logger.Debugf("cannot list interfaces for nodegroup %v: %v", uuid, err) + continue + } + interfaces, err := interfacesResult.GetArray() + if err != nil { + logger.Debugf("cannot get interfaces for nodegroup %v: %v", uuid, err) + continue + } + for _, interfaceResult := range interfaces { + nic, err := interfaceResult.GetMap() + if err != nil { + logger.Debugf("cannot get interface %v for nodegroup %v: %v", nic, uuid, err) + continue + } + ip, err := nic["ip"].GetString() + if err != nil { + logger.Debugf("cannot get interface IP %v for nodegroup %v: %v", nic, uuid, err) + continue + } + static_low, err := nic["static_ip_range_low"].GetString() + if err != nil { + logger.Debugf("cannot get static IP range lower bound for interface %v on nodegroup %v: %v", nic, uuid, err) + continue + } + static_high, err := nic["static_ip_range_high"].GetString() + if err != nil { + logger.Infof("cannot get static IP range higher bound for interface %v on nodegroup %v: %v", nic, uuid, err) + continue + } + static_low_ip := net.ParseIP(static_low) + static_high_ip := net.ParseIP(static_high) + if static_low_ip == nil || static_high_ip == nil { + logger.Debugf("invalid IP in static range for interface %v on nodegroup %v: %q %q", nic, uuid, static_low_ip, static_high_ip) + continue + } + nodegroupsInterfacesMap[ip] = []net.IP{static_low_ip, static_high_ip} + } + } + return nodegroupsInterfacesMap +} + +// networkDetails holds information about a MAAS network. +type networkDetails struct { + Name string + IP string + Mask string + VLANTag int + Description string + DefaultGateway string +} + +// getInstanceNetworks returns a list of all MAAS networks for a given node. +func (environ *maasEnviron) getInstanceNetworks(inst instance.Instance) ([]networkDetails, error) { + nodeId, err := environ.nodeIdFromInstance(inst) + if err != nil { + return nil, err + } + client := environ.getMAASClient().GetSubObject("networks") + params := url.Values{"node": {nodeId}} + json, err := client.CallGet("", params) + if err != nil { + return nil, err + } + jsonNets, err := json.GetArray() + if err != nil { + return nil, err + } + + networks := make([]networkDetails, len(jsonNets)) + for i, jsonNet := range jsonNets { + fields, err := jsonNet.GetMap() + if err != nil { + return nil, errors.Annotatef(err, "parsing network details") + } + name, err := fields["name"].GetString() + if err != nil { + return nil, errors.Annotatef(err, "cannot get name") + } + ip, err := fields["ip"].GetString() + if err != nil { + return nil, errors.Annotatef(err, "cannot get ip") + } + + defaultGateway := "" + defaultGatewayField, ok := fields["default_gateway"] + if ok && !defaultGatewayField.IsNil() { + // default_gateway is optional, so ignore it when unset or + // null. + defaultGateway, err = defaultGatewayField.GetString() + if err != nil { + return nil, errors.Annotatef(err, "cannot get default_gateway") + } + } + + netmask, err := fields["netmask"].GetString() + if err != nil { + return nil, errors.Annotatef(err, "cannot get netmask") + } + vlanTag := 0 + vlanTagField, ok := fields["vlan_tag"] + if ok && !vlanTagField.IsNil() { + // vlan_tag is optional, so assume it's 0 when missing or nil. + vlanTagFloat, err := vlanTagField.GetFloat64() + if err != nil { + return nil, errors.Annotatef(err, "cannot get vlan_tag") + } + vlanTag = int(vlanTagFloat) + } + description, err := fields["description"].GetString() + if err != nil { + return nil, fmt.Errorf("cannot get description: %v", err) + } + + networks[i] = networkDetails{ + Name: name, + IP: ip, + Mask: netmask, + DefaultGateway: defaultGateway, + VLANTag: vlanTag, + Description: description, + } + } + return networks, nil +} + +// getNetworkMACs returns all MAC addresses connected to the given +// network. +func (environ *maasEnviron) getNetworkMACs(networkName string) ([]string, error) { + client := environ.getMAASClient().GetSubObject("networks").GetSubObject(networkName) + json, err := client.CallGet("list_connected_macs", nil) + if err != nil { + return nil, err + } + jsonMACs, err := json.GetArray() + if err != nil { + return nil, err + } + + macs := make([]string, len(jsonMACs)) + for i, jsonMAC := range jsonMACs { + fields, err := jsonMAC.GetMap() + if err != nil { + return nil, err + } + macAddress, err := fields["mac_address"].GetString() + if err != nil { + return nil, fmt.Errorf("cannot get mac_address: %v", err) + } + macs[i] = macAddress + } + return macs, nil +} + +// getInstanceNetworkInterfaces returns a map of interface MAC address +// to ifaceInfo for each network interface of the given instance, as +// discovered during the commissioning phase. +func (environ *maasEnviron) getInstanceNetworkInterfaces(inst instance.Instance) (map[string]ifaceInfo, error) { + maasInst := inst.(*maasInstance) + maasObj := maasInst.maasObject + result, err := maasObj.CallGet("details", nil) + if err != nil { + return nil, errors.Trace(err) + } + // Get the node's lldp / lshw details discovered at commissioning. + data, err := result.GetBytes() + if err != nil { + return nil, errors.Trace(err) + } + var parsed map[string]interface{} + if err := bson.Unmarshal(data, &parsed); err != nil { + return nil, errors.Trace(err) + } + lshwData, ok := parsed["lshw"] + if !ok { + return nil, errors.Errorf("no hardware information available for node %q", inst.Id()) + } + lshwXML, ok := lshwData.([]byte) + if !ok { + return nil, errors.Errorf("invalid hardware information for node %q", inst.Id()) + } + // Now we have the lshw XML data, parse it to extract and return NICs. + return extractInterfaces(inst, lshwXML) +} + +type ifaceInfo struct { + DeviceIndex int + InterfaceName string + Disabled bool +} + +// extractInterfaces parses the XML output of lswh and extracts all +// network interfaces, returning a map MAC address to ifaceInfo. +func extractInterfaces(inst instance.Instance, lshwXML []byte) (map[string]ifaceInfo, error) { + type Node struct { + Id string `xml:"id,attr"` + Disabled bool `xml:"disabled,attr,omitempty"` + Description string `xml:"description"` + Serial string `xml:"serial"` + LogicalName string `xml:"logicalname"` + Children []Node `xml:"node"` + } + type List struct { + Nodes []Node `xml:"node"` + } + var lshw List + if err := xml.Unmarshal(lshwXML, &lshw); err != nil { + return nil, errors.Annotatef(err, "cannot parse lshw XML details for node %q", inst.Id()) + } + interfaces := make(map[string]ifaceInfo) + var processNodes func(nodes []Node) error + var baseIndex int + processNodes = func(nodes []Node) error { + for _, node := range nodes { + if strings.HasPrefix(node.Id, "network") { + index := baseIndex + if strings.HasPrefix(node.Id, "network:") { + // There is an index suffix, parse it. + var err error + index, err = strconv.Atoi(strings.TrimPrefix(node.Id, "network:")) + if err != nil { + return errors.Annotatef(err, "lshw output for node %q has invalid ID suffix for %q", inst.Id(), node.Id) + } + } else { + baseIndex++ + } + + if node.Disabled { + logger.Debugf("node %q skipping disabled network interface %q", inst.Id(), node.LogicalName) + } + interfaces[node.Serial] = ifaceInfo{ + DeviceIndex: index, + InterfaceName: node.LogicalName, + Disabled: node.Disabled, + } + } + if err := processNodes(node.Children); err != nil { + return err + } + } + return nil + } + err := processNodes(lshw.Nodes) + return interfaces, err +} + +// setupNetworks prepares a []network.InterfaceInfo for the given instance. Any +// disabled network interfaces (as discovered from the lshw output for the node) +// will stay disabled. +func (environ *maasEnviron) setupNetworks(inst instance.Instance) ([]network.InterfaceInfo, error) { + // Get the instance network interfaces first. + interfaces, err := environ.getInstanceNetworkInterfaces(inst) + if err != nil { + return nil, errors.Annotatef(err, "getInstanceNetworkInterfaces failed") + } + logger.Debugf("node %q has network interfaces %v", inst.Id(), interfaces) + networks, err := environ.getInstanceNetworks(inst) + if err != nil { + return nil, errors.Annotatef(err, "getInstanceNetworks failed") + } + logger.Debugf("node %q has networks %v", inst.Id(), networks) + var tempInterfaceInfo []network.InterfaceInfo + for _, netw := range networks { + netCIDR := &net.IPNet{ + IP: net.ParseIP(netw.IP), + Mask: net.IPMask(net.ParseIP(netw.Mask)), + } + macs, err := environ.getNetworkMACs(netw.Name) + if err != nil { + return nil, errors.Annotatef(err, "getNetworkMACs failed") + } + logger.Debugf("network %q has MACs: %v", netw.Name, macs) + var defaultGateway network.Address + if netw.DefaultGateway != "" { + defaultGateway = network.NewAddress(netw.DefaultGateway) + } + for _, mac := range macs { + if ifinfo, ok := interfaces[mac]; ok { + tempInterfaceInfo = append(tempInterfaceInfo, network.InterfaceInfo{ + MACAddress: mac, + InterfaceName: ifinfo.InterfaceName, + DeviceIndex: ifinfo.DeviceIndex, + CIDR: netCIDR.String(), + VLANTag: netw.VLANTag, + ProviderId: network.Id(netw.Name), + NetworkName: netw.Name, + Disabled: ifinfo.Disabled, + GatewayAddress: defaultGateway, + }) + } + } + } + // Verify we filled-in everything for all networks/interfaces + // and drop incomplete records. + var interfaceInfo []network.InterfaceInfo + for _, info := range tempInterfaceInfo { + if info.ProviderId == "" || info.NetworkName == "" || info.CIDR == "" { + logger.Infof("ignoring interface %q: missing subnet info", info.InterfaceName) + continue + } + if info.MACAddress == "" || info.InterfaceName == "" { + logger.Infof("ignoring subnet %q: missing interface info", info.ProviderId) + continue + } + interfaceInfo = append(interfaceInfo, info) + } + logger.Debugf("node %q network information: %#v", inst.Id(), interfaceInfo) + return interfaceInfo, nil +} + +// listConnectedMacs calls the MAAS list_connected_macs API to fetch all the +// the MAC addresses attached to a specific network. +func (environ *maasEnviron) listConnectedMacs(network networkDetails) ([]string, error) { + client := environ.getMAASClient().GetSubObject("networks").GetSubObject(network.Name) + json, err := client.CallGet("list_connected_macs", nil) + if err != nil { + return nil, err + } + + macs, err := json.GetArray() + if err != nil { + return nil, err + } + result := []string{} + for _, macObj := range macs { + macMap, err := macObj.GetMap() + if err != nil { + return nil, err + } + mac, err := macMap["mac_address"].GetString() + if err != nil { + return nil, err + } + + result = append(result, mac) + } + return result, nil +} + +// legacyNetworkInterfaces implements Environ.NetworkInterfaces() on MAAS 1.8 and earlier. +func (environ *maasEnviron) legacyNetworkInterfaces(instId instance.Id) ([]network.InterfaceInfo, error) { + instances, err := environ.acquiredInstances([]instance.Id{instId}) + if err != nil { + return nil, errors.Annotatef(err, "could not find instance %q", instId) + } + if len(instances) == 0 { + return nil, errors.NotFoundf("instance %q", instId) + } + inst := instances[0] + interfaces, err := environ.getInstanceNetworkInterfaces(inst) + if err != nil { + return nil, errors.Annotatef(err, "failed to get instance %q network interfaces", instId) + } + + networks, err := environ.getInstanceNetworks(inst) + if err != nil { + return nil, errors.Annotatef(err, "failed to get instance %q subnets", instId) + } + + macToNetworksMap := make(map[string][]networkDetails) + for _, network := range networks { + macs, err := environ.listConnectedMacs(network) + if err != nil { + return nil, errors.Trace(err) + } + for _, mac := range macs { + if networks, found := macToNetworksMap[mac]; found { + macToNetworksMap[mac] = append(networks, network) + } else { + macToNetworksMap[mac] = append([]networkDetails(nil), network) + } + } + } + + result := []network.InterfaceInfo{} + for serial, iface := range interfaces { + deviceIndex := iface.DeviceIndex + interfaceName := iface.InterfaceName + disabled := iface.Disabled + + ifaceInfo := network.InterfaceInfo{ + DeviceIndex: deviceIndex, + InterfaceName: interfaceName, + Disabled: disabled, + NoAutoStart: disabled, + MACAddress: serial, + ConfigType: network.ConfigDHCP, + } + allDetails, ok := macToNetworksMap[serial] + if !ok { + logger.Debugf("no subnet information for MAC address %q, instance %q", serial, instId) + continue + } + for _, details := range allDetails { + ifaceInfo.VLANTag = details.VLANTag + ifaceInfo.ProviderSubnetId = network.Id(details.Name) + mask := net.IPMask(net.ParseIP(details.Mask)) + cidr := net.IPNet{ + IP: net.ParseIP(details.IP), + Mask: mask, + } + ifaceInfo.CIDR = cidr.String() + ifaceInfo.Address = network.NewAddress(cidr.IP.String()) + if details.DefaultGateway != "" { + ifaceInfo.GatewayAddress = network.NewAddress(details.DefaultGateway) + } + result = append(result, ifaceInfo) + } + } + return result, nil +} === added file 'src/github.com/juju/juju/provider/maas/interfaces_test.go' --- src/github.com/juju/juju/provider/maas/interfaces_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/maas/interfaces_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,899 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package maas + +import ( + "bytes" + "fmt" + "text/template" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/network" +) + +//////////////////////////////////////////////////////////////////////////////// +// New (1.9 and later) environs.NetworkInterfaces() implementation tests follow. + +type interfacesSuite struct { + providerSuite +} + +var _ = gc.Suite(&interfacesSuite{}) + +const exampleInterfaceSetJSON = ` +[ + { + "name": "eth0", + "links": [ + { + "subnet": { + "dns_servers": ["10.20.19.2", "10.20.19.3"], + "name": "pxe", + "space": "default", + "vlan": { + "name": "untagged", + "vid": 0, + "mtu": 1500, + "fabric": "managed", + "id": 5001, + "resource_uri": "/MAAS/api/1.0/vlans/5001/" + }, + "gateway_ip": "10.20.19.2", + "cidr": "10.20.19.0/24", + "id": 3, + "resource_uri": "/MAAS/api/1.0/subnets/3/" + }, + "ip_address": "10.20.19.103", + "id": 436, + "mode": "static" + }, + { + "subnet": { + "dns_servers": ["10.20.19.2", "10.20.19.3"], + "name": "pxe", + "space": "default", + "vlan": { + "name": "untagged", + "vid": 0, + "mtu": 1500, + "fabric": "managed", + "id": 5001, + "resource_uri": "/MAAS/api/1.0/vlans/5001/" + }, + "gateway_ip": "10.20.19.2", + "cidr": "10.20.19.0/24", + "id": 3, + "resource_uri": "/MAAS/api/1.0/subnets/3/" + }, + "ip_address": "10.20.19.104", + "id": 437, + "mode": "static" + } + ], + "tags": [], + "vlan": { + "name": "untagged", + "vid": 0, + "mtu": 1500, + "fabric": "managed", + "id": 5001, + "resource_uri": "/MAAS/api/1.0/vlans/5001/" + }, + "enabled": true, + "id": 91, + "discovered": [ + { + "subnet": { + "dns_servers": [], + "name": "pxe", + "space": "default", + "vlan": { + "name": "untagged", + "vid": 0, + "mtu": 1500, + "fabric": "managed", + "id": 5001, + "resource_uri": "/MAAS/api/1.0/vlans/5001/" + }, + "gateway_ip": "10.20.19.2", + "cidr": "10.20.19.0/24", + "id": 3, + "resource_uri": "/MAAS/api/1.0/subnets/3/" + }, + "ip_address": "10.20.19.20" + } + ], + "mac_address": "52:54:00:70:9b:fe", + "parents": [], + "effective_mtu": 1500, + "params": {}, + "type": "physical", + "children": [ + "eth0.100", + "eth0.250", + "eth0.50" + ], + "resource_uri": "/MAAS/api/1.0/nodes/node-18489434-9eb0-11e5-bdef-00163e40c3b6/interfaces/91/" + }, + { + "name": "eth0.50", + "links": [ + { + "subnet": { + "dns_servers": [], + "name": "admin", + "space": "admin", + "vlan": { + "name": "admin", + "vid": 50, + "mtu": 1500, + "fabric": "managed", + "id": 5004, + "resource_uri": "/MAAS/api/1.0/vlans/5004/" + }, + "gateway_ip": "10.50.19.2", + "cidr": "10.50.19.0/24", + "id": 5, + "resource_uri": "/MAAS/api/1.0/subnets/5/" + }, + "ip_address": "10.50.19.103", + "id": 517, + "mode": "static" + } + ], + "tags": [], + "vlan": { + "name": "admin", + "vid": 50, + "mtu": 1500, + "fabric": "managed", + "id": 5004, + "resource_uri": "/MAAS/api/1.0/vlans/5004/" + }, + "enabled": true, + "id": 150, + "discovered": null, + "mac_address": "52:54:00:70:9b:fe", + "parents": [ + "eth0" + ], + "effective_mtu": 1500, + "params": {}, + "type": "vlan", + "children": [], + "resource_uri": "/MAAS/api/1.0/nodes/node-18489434-9eb0-11e5-bdef-00163e40c3b6/interfaces/150/" + }, + { + "name": "eth0.100", + "links": [ + { + "subnet": { + "dns_servers": [], + "name": "public", + "space": "public", + "vlan": { + "name": "public", + "vid": 100, + "mtu": 1500, + "fabric": "managed", + "id": 5005, + "resource_uri": "/MAAS/api/1.0/vlans/5005/" + }, + "gateway_ip": "10.100.19.2", + "cidr": "10.100.19.0/24", + "id": 6, + "resource_uri": "/MAAS/api/1.0/subnets/6/" + }, + "ip_address": "10.100.19.103", + "id": 519, + "mode": "static" + } + ], + "tags": [], + "vlan": { + "name": "public", + "vid": 100, + "mtu": 1500, + "fabric": "managed", + "id": 5005, + "resource_uri": "/MAAS/api/1.0/vlans/5005/" + }, + "enabled": true, + "id": 151, + "discovered": null, + "mac_address": "52:54:00:70:9b:fe", + "parents": [ + "eth0" + ], + "effective_mtu": 1500, + "params": {}, + "type": "vlan", + "children": [], + "resource_uri": "/MAAS/api/1.0/nodes/node-18489434-9eb0-11e5-bdef-00163e40c3b6/interfaces/151/" + }, + { + "name": "eth0.250", + "links": [ + { + "subnet": { + "dns_servers": [], + "name": "storage", + "space": "storage", + "vlan": { + "name": "storage", + "vid": 250, + "mtu": 1500, + "fabric": "managed", + "id": 5008, + "resource_uri": "/MAAS/api/1.0/vlans/5008/" + }, + "gateway_ip": "10.250.19.2", + "cidr": "10.250.19.0/24", + "id": 8, + "resource_uri": "/MAAS/api/1.0/subnets/8/" + }, + "ip_address": "10.250.19.103", + "id": 523, + "mode": "static" + } + ], + "tags": [], + "vlan": { + "name": "storage", + "vid": 250, + "mtu": 1500, + "fabric": "managed", + "id": 5008, + "resource_uri": "/MAAS/api/1.0/vlans/5008/" + }, + "enabled": true, + "id": 152, + "discovered": null, + "mac_address": "52:54:00:70:9b:fe", + "parents": [ + "eth0" + ], + "effective_mtu": 1500, + "params": {}, + "type": "vlan", + "children": [], + "resource_uri": "/MAAS/api/1.0/nodes/node-18489434-9eb0-11e5-bdef-00163e40c3b6/interfaces/152/" + } +]` + +func (s *interfacesSuite) TestParseInterfacesNoJSON(c *gc.C) { + result, err := parseInterfaces(nil) + c.Check(err, gc.ErrorMatches, "parsing interfaces: unexpected end of JSON input") + c.Check(result, gc.IsNil) +} + +func (s *interfacesSuite) TestParseInterfacesBadJSON(c *gc.C) { + result, err := parseInterfaces([]byte("$bad")) + c.Check(err, gc.ErrorMatches, `parsing interfaces: invalid character '\$' .*`) + c.Check(result, gc.IsNil) +} + +func (s *interfacesSuite) TestParseInterfacesExampleJSON(c *gc.C) { + + vlan0 := maasVLAN{ + ID: 5001, + Name: "untagged", + VID: 0, + MTU: 1500, + Fabric: "managed", + ResourceURI: "/MAAS/api/1.0/vlans/5001/", + } + + vlan50 := maasVLAN{ + ID: 5004, + Name: "admin", + VID: 50, + MTU: 1500, + Fabric: "managed", + ResourceURI: "/MAAS/api/1.0/vlans/5004/", + } + + vlan100 := maasVLAN{ + ID: 5005, + Name: "public", + VID: 100, + MTU: 1500, + Fabric: "managed", + ResourceURI: "/MAAS/api/1.0/vlans/5005/", + } + + vlan250 := maasVLAN{ + ID: 5008, + Name: "storage", + VID: 250, + MTU: 1500, + Fabric: "managed", + ResourceURI: "/MAAS/api/1.0/vlans/5008/", + } + + subnetPXE := maasSubnet{ + ID: 3, + Name: "pxe", + Space: "default", + VLAN: vlan0, + GatewayIP: "10.20.19.2", + DNSServers: []string{"10.20.19.2", "10.20.19.3"}, + CIDR: "10.20.19.0/24", + ResourceURI: "/MAAS/api/1.0/subnets/3/", + } + + expected := []maasInterface{{ + ID: 91, + Name: "eth0", + Type: "physical", + Enabled: true, + MACAddress: "52:54:00:70:9b:fe", + VLAN: vlan0, + EffectveMTU: 1500, + Links: []maasInterfaceLink{{ + ID: 436, + Subnet: &subnetPXE, + IPAddress: "10.20.19.103", + Mode: "static", + }, { + ID: 437, + Subnet: &subnetPXE, + IPAddress: "10.20.19.104", + Mode: "static", + }}, + Parents: []string{}, + Children: []string{"eth0.100", "eth0.250", "eth0.50"}, + ResourceURI: "/MAAS/api/1.0/nodes/node-18489434-9eb0-11e5-bdef-00163e40c3b6/interfaces/91/", + }, { + ID: 150, + Name: "eth0.50", + Type: "vlan", + Enabled: true, + MACAddress: "52:54:00:70:9b:fe", + VLAN: vlan50, + EffectveMTU: 1500, + Links: []maasInterfaceLink{{ + ID: 517, + Subnet: &maasSubnet{ + ID: 5, + Name: "admin", + Space: "admin", + VLAN: vlan50, + GatewayIP: "10.50.19.2", + DNSServers: []string{}, + CIDR: "10.50.19.0/24", + ResourceURI: "/MAAS/api/1.0/subnets/5/", + }, + IPAddress: "10.50.19.103", + Mode: "static", + }}, + Parents: []string{"eth0"}, + Children: []string{}, + ResourceURI: "/MAAS/api/1.0/nodes/node-18489434-9eb0-11e5-bdef-00163e40c3b6/interfaces/150/", + }, { + ID: 151, + Name: "eth0.100", + Type: "vlan", + Enabled: true, + MACAddress: "52:54:00:70:9b:fe", + VLAN: vlan100, + EffectveMTU: 1500, + Links: []maasInterfaceLink{{ + ID: 519, + Subnet: &maasSubnet{ + ID: 6, + Name: "public", + Space: "public", + VLAN: vlan100, + GatewayIP: "10.100.19.2", + DNSServers: []string{}, + CIDR: "10.100.19.0/24", + ResourceURI: "/MAAS/api/1.0/subnets/6/", + }, + IPAddress: "10.100.19.103", + Mode: "static", + }}, + Parents: []string{"eth0"}, + Children: []string{}, + ResourceURI: "/MAAS/api/1.0/nodes/node-18489434-9eb0-11e5-bdef-00163e40c3b6/interfaces/151/", + }, { + ID: 152, + Name: "eth0.250", + Type: "vlan", + Enabled: true, + MACAddress: "52:54:00:70:9b:fe", + VLAN: vlan250, + EffectveMTU: 1500, + Links: []maasInterfaceLink{{ + ID: 523, + Subnet: &maasSubnet{ + ID: 8, + Name: "storage", + Space: "storage", + VLAN: vlan250, + GatewayIP: "10.250.19.2", + DNSServers: []string{}, + CIDR: "10.250.19.0/24", + ResourceURI: "/MAAS/api/1.0/subnets/8/", + }, + IPAddress: "10.250.19.103", + Mode: "static", + }}, + Parents: []string{"eth0"}, + Children: []string{}, + ResourceURI: "/MAAS/api/1.0/nodes/node-18489434-9eb0-11e5-bdef-00163e40c3b6/interfaces/152/", + }} + + result, err := parseInterfaces([]byte(exampleInterfaceSetJSON)) + c.Check(err, jc.ErrorIsNil) + c.Check(result, jc.DeepEquals, expected) +} + +func (s *interfacesSuite) TestMAASObjectNetworkInterfaces(c *gc.C) { + nodeJSON := fmt.Sprintf(`{ + "system_id": "foo", + "interface_set": %s + }`, exampleInterfaceSetJSON) + obj := s.testMAASObject.TestServer.NewNode(nodeJSON) + + expected := []network.InterfaceInfo{{ + DeviceIndex: 0, + MACAddress: "52:54:00:70:9b:fe", + CIDR: "10.20.19.0/24", + NetworkName: "juju-private", + ProviderId: "91", + ProviderSubnetId: "3", + AvailabilityZones: nil, + VLANTag: 0, + InterfaceName: "eth0", + Disabled: false, + NoAutoStart: false, + ConfigType: "static", + Address: network.NewAddressOnSpace("default", "10.20.19.103"), + DNSServers: network.NewAddressesOnSpace("default", "10.20.19.2", "10.20.19.3"), + DNSSearch: "", + MTU: 1500, + GatewayAddress: network.NewAddressOnSpace("default", "10.20.19.2"), + ExtraConfig: nil, + }, { + DeviceIndex: 0, + MACAddress: "52:54:00:70:9b:fe", + CIDR: "10.20.19.0/24", + NetworkName: "juju-private", + ProviderId: "91", + ProviderSubnetId: "3", + AvailabilityZones: nil, + VLANTag: 0, + InterfaceName: "eth0", + Disabled: false, + NoAutoStart: false, + ConfigType: "static", + Address: network.NewAddressOnSpace("default", "10.20.19.104"), + DNSServers: network.NewAddressesOnSpace("default", "10.20.19.2", "10.20.19.3"), + DNSSearch: "", + MTU: 1500, + GatewayAddress: network.NewAddressOnSpace("default", "10.20.19.2"), + ExtraConfig: nil, + }, { + DeviceIndex: 1, + MACAddress: "52:54:00:70:9b:fe", + CIDR: "10.50.19.0/24", + NetworkName: "juju-private", + ProviderId: "150", + ProviderSubnetId: "5", + AvailabilityZones: nil, + VLANTag: 50, + InterfaceName: "eth0.50", + Disabled: false, + NoAutoStart: false, + ConfigType: "static", + Address: network.NewAddressOnSpace("admin", "10.50.19.103"), + DNSServers: nil, + DNSSearch: "", + MTU: 1500, + GatewayAddress: network.NewAddressOnSpace("admin", "10.50.19.2"), + ExtraConfig: nil, + }, { + DeviceIndex: 2, + MACAddress: "52:54:00:70:9b:fe", + CIDR: "10.100.19.0/24", + NetworkName: "juju-private", + ProviderId: "151", + ProviderSubnetId: "6", + AvailabilityZones: nil, + VLANTag: 100, + InterfaceName: "eth0.100", + Disabled: false, + NoAutoStart: false, + ConfigType: "static", + Address: network.NewAddressOnSpace("public", "10.100.19.103"), + DNSServers: nil, + DNSSearch: "", + MTU: 1500, + GatewayAddress: network.NewAddressOnSpace("public", "10.100.19.2"), + ExtraConfig: nil, + }, { + DeviceIndex: 3, + MACAddress: "52:54:00:70:9b:fe", + CIDR: "10.250.19.0/24", + NetworkName: "juju-private", + ProviderId: "152", + ProviderSubnetId: "8", + AvailabilityZones: nil, + VLANTag: 250, + InterfaceName: "eth0.250", + Disabled: false, + NoAutoStart: false, + ConfigType: "static", + Address: network.NewAddressOnSpace("storage", "10.250.19.103"), + DNSServers: nil, + DNSSearch: "", + MTU: 1500, + GatewayAddress: network.NewAddressOnSpace("storage", "10.250.19.2"), + ExtraConfig: nil, + }} + + infos, err := maasObjectNetworkInterfaces(&obj) + c.Assert(err, jc.ErrorIsNil) + c.Check(infos, jc.DeepEquals, expected) +} + +//////////////////////////////////////////////////////////////////////////////// +// Legacy (pre 1.9) environs.NetworkInterfaces() implementation tests follow. + +const lshwXMLTemplate = ` + + + + + Computer + VirtualBox () + 64 + + Motherboard + + Host bridge{{$list := .}}{{range $mac, $ifi := $list}} + + Ethernet interface + 82540EM Gigabit Ethernet Controller + {{$ifi.InterfaceName}} + {{$mac}} + {{end}} + + + + +` + +func (suite *environSuite) generateHWTemplate(netMacs map[string]ifaceInfo) (string, error) { + tmpl, err := template.New("test").Parse(lshwXMLTemplate) + if err != nil { + return "", err + } + var buf bytes.Buffer + err = tmpl.Execute(&buf, netMacs) + if err != nil { + return "", err + } + return string(buf.Bytes()), nil +} + +func (suite *environSuite) TestGetNetworkMACs(c *gc.C) { + env := suite.makeEnviron() + + suite.testMAASObject.TestServer.NewNode(`{"system_id": "node_1"}`) + suite.testMAASObject.TestServer.NewNode(`{"system_id": "node_2"}`) + suite.testMAASObject.TestServer.NewNetwork( + `{"name": "net_1","ip":"0.1.2.0","netmask":"255.255.255.0"}`, + ) + suite.testMAASObject.TestServer.NewNetwork( + `{"name": "net_2","ip":"0.2.2.0","netmask":"255.255.255.0"}`, + ) + suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node_2", "net_2", "aa:bb:cc:dd:ee:22") + suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node_1", "net_1", "aa:bb:cc:dd:ee:11") + suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node_2", "net_1", "aa:bb:cc:dd:ee:21") + suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node_1", "net_2", "aa:bb:cc:dd:ee:12") + + networks, err := env.getNetworkMACs("net_1") + c.Assert(err, jc.ErrorIsNil) + c.Check(networks, jc.SameContents, []string{"aa:bb:cc:dd:ee:11", "aa:bb:cc:dd:ee:21"}) + + networks, err = env.getNetworkMACs("net_2") + c.Assert(err, jc.ErrorIsNil) + c.Check(networks, jc.SameContents, []string{"aa:bb:cc:dd:ee:12", "aa:bb:cc:dd:ee:22"}) + + networks, err = env.getNetworkMACs("net_3") + c.Check(networks, gc.HasLen, 0) + c.Assert(err, jc.ErrorIsNil) +} + +func (suite *environSuite) TestGetInstanceNetworks(c *gc.C) { + suite.newNetwork("test_network", 123, 321, "null") + testInstance := suite.getInstance("instance_for_network") + suite.testMAASObject.TestServer.ConnectNodeToNetwork("instance_for_network", "test_network") + networks, err := suite.makeEnviron().getInstanceNetworks(testInstance) + c.Assert(err, jc.ErrorIsNil) + c.Check(networks, gc.DeepEquals, []networkDetails{{ + Name: "test_network", + IP: "192.168.123.2", + Mask: "255.255.255.0", + VLANTag: 321, + Description: "test_network_123_321", + DefaultGateway: "", // "null" and "" are treated as N/A. + }, + }) +} + +// A typical lshw XML dump with lots of things left out. +const lshwXMLTestExtractInterfaces = ` + + + + + Notebook + MyMachine + 1.0 + 64 + + Motherboard + + CPU + + + wlan0 + aa:bb:cc:dd:ee:ff + + + eth0 + aa:bb:cc:dd:ee:f1 + + + + + + vnet1 + aa:bb:cc:dd:ee:f2 + + + +` + +// An lshw XML dump with implicit network interface indexes. +const lshwXMLTestExtractInterfacesImplicitIndexes = ` + + + + + Notebook + MyMachine + 1.0 + 64 + + Motherboard + + CPU + + + wlan0 + aa:bb:cc:dd:ee:ff + + + eth0 + aa:bb:cc:dd:ee:f1 + + + + + + vnet1 + aa:bb:cc:dd:ee:f2 + + + +` + +func (suite *environSuite) TestExtractInterfaces(c *gc.C) { + rawData := []string{ + lshwXMLTestExtractInterfaces, + lshwXMLTestExtractInterfacesImplicitIndexes, + } + for _, data := range rawData { + inst := suite.getInstance("testInstance") + interfaces, err := extractInterfaces(inst, []byte(data)) + c.Assert(err, jc.ErrorIsNil) + c.Check(interfaces, jc.DeepEquals, map[string]ifaceInfo{ + "aa:bb:cc:dd:ee:ff": {0, "wlan0", true}, + "aa:bb:cc:dd:ee:f1": {1, "eth0", false}, + "aa:bb:cc:dd:ee:f2": {2, "vnet1", false}, + }) + } +} + +func (suite *environSuite) TestGetInstanceNetworkInterfaces(c *gc.C) { + inst := suite.getInstance("testInstance") + templateInterfaces := map[string]ifaceInfo{ + "aa:bb:cc:dd:ee:ff": {0, "wlan0", true}, + "aa:bb:cc:dd:ee:f1": {1, "eth0", true}, + "aa:bb:cc:dd:ee:f2": {2, "vnet1", false}, + } + lshwXML, err := suite.generateHWTemplate(templateInterfaces) + c.Assert(err, jc.ErrorIsNil) + + suite.testMAASObject.TestServer.AddNodeDetails("testInstance", lshwXML) + env := suite.makeEnviron() + interfaces, err := env.getInstanceNetworkInterfaces(inst) + c.Assert(err, jc.ErrorIsNil) + // Both wlan0 and eth0 are disabled in lshw output. + c.Check(interfaces, jc.DeepEquals, templateInterfaces) +} + +func (suite *environSuite) TestSetupNetworks(c *gc.C) { + testInstance := suite.getInstance("node1") + templateInterfaces := map[string]ifaceInfo{ + "aa:bb:cc:dd:ee:ff": {0, "wlan0", true}, + "aa:bb:cc:dd:ee:f1": {1, "eth0", true}, + "aa:bb:cc:dd:ee:f2": {2, "vnet1", false}, + } + lshwXML, err := suite.generateHWTemplate(templateInterfaces) + c.Assert(err, jc.ErrorIsNil) + + suite.testMAASObject.TestServer.AddNodeDetails("node1", lshwXML) + suite.newNetwork("LAN", 2, 42, "null") + suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "LAN", "aa:bb:cc:dd:ee:f1") + suite.newNetwork("Virt", 3, 0, "0.1.2.3") // primary + gateway + suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "Virt", "aa:bb:cc:dd:ee:f2") + suite.newNetwork("WLAN", 1, 0, "") // "" same as "null" for gateway + suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "WLAN", "aa:bb:cc:dd:ee:ff") + networkInfo, err := suite.makeEnviron().setupNetworks(testInstance) + c.Assert(err, jc.ErrorIsNil) + + // Note: order of networks is based on lshwXML + // Unfortunately, because network.InterfaceInfo is unhashable + // (contains a map) we can't use jc.SameContents here. + c.Check(networkInfo, gc.HasLen, 3) + for _, info := range networkInfo { + switch info.DeviceIndex { + case 0: + c.Check(info, jc.DeepEquals, network.InterfaceInfo{ + MACAddress: "aa:bb:cc:dd:ee:ff", + CIDR: "192.168.1.2/24", + NetworkName: "WLAN", + ProviderId: "WLAN", + VLANTag: 0, + DeviceIndex: 0, + InterfaceName: "wlan0", + Disabled: true, // from networksToDisable("WLAN") + }) + case 1: + c.Check(info, jc.DeepEquals, network.InterfaceInfo{ + DeviceIndex: 1, + MACAddress: "aa:bb:cc:dd:ee:f1", + CIDR: "192.168.2.2/24", + NetworkName: "LAN", + ProviderId: "LAN", + VLANTag: 42, + InterfaceName: "eth0", + Disabled: true, // from networksToDisable("WLAN") + }) + case 2: + c.Check(info, jc.DeepEquals, network.InterfaceInfo{ + MACAddress: "aa:bb:cc:dd:ee:f2", + CIDR: "192.168.3.2/24", + NetworkName: "Virt", + ProviderId: "Virt", + VLANTag: 0, + DeviceIndex: 2, + InterfaceName: "vnet1", + Disabled: false, + GatewayAddress: network.NewAddress("0.1.2.3"), // from newNetwork("Virt", 3, 0, "0.1.2.3") + }) + } + } +} + +// The same test, but now "Virt" network does not have matched MAC address +func (suite *environSuite) TestSetupNetworksPartialMatch(c *gc.C) { + testInstance := suite.getInstance("node1") + templateInterfaces := map[string]ifaceInfo{ + "aa:bb:cc:dd:ee:ff": {0, "wlan0", true}, + "aa:bb:cc:dd:ee:f1": {1, "eth0", false}, + "aa:bb:cc:dd:ee:f2": {2, "vnet1", false}, + } + lshwXML, err := suite.generateHWTemplate(templateInterfaces) + c.Assert(err, jc.ErrorIsNil) + + suite.testMAASObject.TestServer.AddNodeDetails("node1", lshwXML) + suite.newNetwork("LAN", 2, 42, "192.168.2.1") + suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "LAN", "aa:bb:cc:dd:ee:f1") + suite.newNetwork("Virt", 3, 0, "") + suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "Virt", "aa:bb:cc:dd:ee:f3") + networkInfo, err := suite.makeEnviron().setupNetworks(testInstance) + c.Assert(err, jc.ErrorIsNil) + + // Note: order of networks is based on lshwXML + c.Check(networkInfo, jc.DeepEquals, []network.InterfaceInfo{{ + MACAddress: "aa:bb:cc:dd:ee:f1", + CIDR: "192.168.2.2/24", + NetworkName: "LAN", + ProviderId: "LAN", + VLANTag: 42, + DeviceIndex: 1, + InterfaceName: "eth0", + Disabled: false, + GatewayAddress: network.NewAddress("192.168.2.1"), + }}) +} + +// The same test, but now no networks have matched MAC +func (suite *environSuite) TestSetupNetworksNoMatch(c *gc.C) { + testInstance := suite.getInstance("node1") + templateInterfaces := map[string]ifaceInfo{ + "aa:bb:cc:dd:ee:ff": {0, "wlan0", true}, + "aa:bb:cc:dd:ee:f1": {1, "eth0", false}, + "aa:bb:cc:dd:ee:f2": {2, "vnet1", false}, + } + lshwXML, err := suite.generateHWTemplate(templateInterfaces) + c.Assert(err, jc.ErrorIsNil) + + suite.testMAASObject.TestServer.AddNodeDetails("node1", lshwXML) + suite.newNetwork("Virt", 3, 0, "") + suite.testMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node1", "Virt", "aa:bb:cc:dd:ee:f3") + networkInfo, err := suite.makeEnviron().setupNetworks(testInstance) + c.Assert(err, jc.ErrorIsNil) + + // Note: order of networks is based on lshwXML + c.Check(networkInfo, gc.HasLen, 0) +} + +func (suite *environSuite) TestNetworkInterfacesLegacy(c *gc.C) { + testInstance := suite.createSubnets(c, false) + + netInfo, err := suite.makeEnviron().NetworkInterfaces(testInstance.Id()) + c.Assert(err, jc.ErrorIsNil) + + expectedInfo := []network.InterfaceInfo{{ + DeviceIndex: 0, + MACAddress: "aa:bb:cc:dd:ee:ff", + CIDR: "192.168.1.2/24", + ProviderSubnetId: "WLAN", + VLANTag: 0, + InterfaceName: "wlan0", + Disabled: true, + NoAutoStart: true, + ConfigType: network.ConfigDHCP, + ExtraConfig: nil, + GatewayAddress: network.Address{}, + Address: network.NewScopedAddress("192.168.1.2", network.ScopeCloudLocal), + }, { + DeviceIndex: 1, + MACAddress: "aa:bb:cc:dd:ee:f1", + CIDR: "192.168.2.2/24", + ProviderSubnetId: "LAN", + VLANTag: 42, + InterfaceName: "eth0", + Disabled: false, + NoAutoStart: false, + ConfigType: network.ConfigDHCP, + ExtraConfig: nil, + GatewayAddress: network.NewScopedAddress("192.168.2.1", network.ScopeCloudLocal), + Address: network.NewScopedAddress("192.168.2.2", network.ScopeCloudLocal), + }, { + DeviceIndex: 2, + MACAddress: "aa:bb:cc:dd:ee:f2", + CIDR: "192.168.3.2/24", + ProviderSubnetId: "Virt", + VLANTag: 0, + InterfaceName: "vnet1", + Disabled: false, + NoAutoStart: false, + ConfigType: network.ConfigDHCP, + ExtraConfig: nil, + GatewayAddress: network.Address{}, + Address: network.NewScopedAddress("192.168.3.2", network.ScopeCloudLocal), + }} + network.SortInterfaceInfo(netInfo) + c.Assert(netInfo, jc.DeepEquals, expectedInfo) +} === modified file 'src/github.com/juju/juju/provider/maas/maas_test.go' --- src/github.com/juju/juju/provider/maas/maas_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/provider/maas/maas_test.go 2016-03-22 15:18:22 +0000 @@ -4,25 +4,31 @@ package maas import ( + "bytes" + "encoding/json" + "fmt" + "net" + "strconv" "time" + "github.com/juju/gomaasapi" + jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/gomaasapi" - "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" envtesting "github.com/juju/juju/environs/testing" + envtools "github.com/juju/juju/environs/tools" "github.com/juju/juju/feature" + "github.com/juju/juju/instance" "github.com/juju/juju/network" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/version" + "github.com/juju/utils/arch" + "github.com/juju/utils/series" ) -// Ensure maasEnviron supports environs.NetworkingEnviron. -var _ environs.NetworkingEnviron = (*maasEnviron)(nil) - type providerSuite struct { - coretesting.FakeJujuHomeSuite + coretesting.FakeJujuXDGDataHomeSuite envtesting.ToolsFixture testMAASObject *gomaasapi.TestMAASObject } @@ -30,7 +36,7 @@ var _ = gc.Suite(&providerSuite{}) func (s *providerSuite) SetUpSuite(c *gc.C) { - s.FakeJujuHomeSuite.SetUpSuite(c) + s.FakeJujuXDGDataHomeSuite.SetUpSuite(c) restoreTimeouts := envtesting.PatchAttemptStrategies(&shortAttempt) TestMAASObject := gomaasapi.NewTestMAAS("1.0") s.testMAASObject = TestMAASObject @@ -48,21 +54,24 @@ } func (s *providerSuite) SetUpTest(c *gc.C) { - s.PatchValue(&version.Current.Number, coretesting.FakeVersionNumber) - s.FakeJujuHomeSuite.SetUpTest(c) + s.PatchValue(&version.Current, coretesting.FakeVersionNumber) + s.PatchValue(&arch.HostArch, func() string { return arch.AMD64 }) + s.PatchValue(&series.HostSeries, func() string { return coretesting.FakeDefaultSeries }) + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) s.ToolsFixture.SetUpTest(c) s.SetFeatureFlags(feature.AddressAllocation) + s.testMAASObject.TestServer.SetVersionJSON(`{"capabilities": ["networks-management","static-ipaddresses"]}`) } func (s *providerSuite) TearDownTest(c *gc.C) { s.testMAASObject.TestServer.Clear() s.ToolsFixture.TearDownTest(c) - s.FakeJujuHomeSuite.TearDownTest(c) + s.FakeJujuXDGDataHomeSuite.TearDownTest(c) } func (s *providerSuite) TearDownSuite(c *gc.C) { s.testMAASObject.Close() - s.FakeJujuHomeSuite.TearDownSuite(c) + s.FakeJujuXDGDataHomeSuite.TearDownSuite(c) } const exampleAgentName = "dfb69555-0bc4-4d1f-85f2-4ee390974984" @@ -76,9 +85,12 @@ // makeEnviron creates a functional maasEnviron for a test. func (suite *providerSuite) makeEnviron() *maasEnviron { - testAttrs := maasEnvAttrs + testAttrs := coretesting.Attrs{} + for k, v := range maasEnvAttrs { + testAttrs[k] = v + } testAttrs["maas-server"] = suite.testMAASObject.TestServer.URL - attrs := coretesting.FakeConfig().Merge(maasEnvAttrs) + attrs := coretesting.FakeConfig().Merge(testAttrs) cfg, err := config.New(config.NoDefaults, attrs) if err != nil { panic(err) @@ -89,3 +101,86 @@ } return env } + +func (suite *providerSuite) setupFakeTools(c *gc.C) { + storageDir := c.MkDir() + suite.PatchValue(&envtools.DefaultBaseURL, "file://"+storageDir+"/tools") + suite.UploadFakeToolsToDirectory(c, storageDir, "released", "released") +} + +func (suite *providerSuite) addNode(jsonText string) instance.Id { + node := suite.testMAASObject.TestServer.NewNode(jsonText) + resourceURI, _ := node.GetField("resource_uri") + return instance.Id(resourceURI) +} + +func (suite *providerSuite) getInstance(systemId string) *maasInstance { + input := fmt.Sprintf(`{"system_id": %q}`, systemId) + node := suite.testMAASObject.TestServer.NewNode(input) + return &maasInstance{&node} +} + +func (suite *providerSuite) getNetwork(name string, id int, vlanTag int) *gomaasapi.MAASObject { + var vlan string + if vlanTag == 0 { + vlan = "null" + } else { + vlan = fmt.Sprintf("%d", vlanTag) + } + var input string + input = fmt.Sprintf(`{"name": %q, "ip":"192.168.%d.1", "netmask": "255.255.255.0",`+ + `"vlan_tag": %s, "description": "%s_%d_%d" }`, name, id, vlan, name, id, vlanTag) + network := suite.testMAASObject.TestServer.NewNetwork(input) + return &network +} + +func createSubnetInfo(subnetID, spaceID, ipRange uint) network.SubnetInfo { + return network.SubnetInfo{ + CIDR: fmt.Sprintf("192.168.%d.0/24", ipRange), + ProviderId: network.Id(strconv.Itoa(int(subnetID))), + AllocatableIPLow: net.ParseIP(fmt.Sprintf("192.168.%d.139", ipRange)).To4(), + AllocatableIPHigh: net.ParseIP(fmt.Sprintf("192.168.%d.255", ipRange)).To4(), + SpaceProviderId: network.Id(fmt.Sprintf("Space %d", spaceID)), + } +} + +func createSubnet(ipRange, spaceAndNICID uint) gomaasapi.CreateSubnet { + var s gomaasapi.CreateSubnet + s.DNSServers = []string{"192.168.1.2"} + s.Name = fmt.Sprintf("maas-eth%d", spaceAndNICID) + s.Space = fmt.Sprintf("Space %d", spaceAndNICID) + s.GatewayIP = fmt.Sprintf("192.168.%v.1", ipRange) + s.CIDR = fmt.Sprintf("192.168.%v.0/24", ipRange) + return s +} + +func (suite *providerSuite) addSubnet(c *gc.C, ipRange, spaceAndNICID uint, systemID string) uint { + out := bytes.Buffer{} + err := json.NewEncoder(&out).Encode(createSubnet(ipRange, spaceAndNICID)) + c.Assert(err, jc.ErrorIsNil) + subnet := suite.testMAASObject.TestServer.NewSubnet(&out) + c.Assert(err, jc.ErrorIsNil) + + other := gomaasapi.AddressRange{} + other.Start = fmt.Sprintf("192.168.%d.139", ipRange) + other.End = fmt.Sprintf("192.168.%d.149", ipRange) + other.Purpose = []string{"not-the-dynamic-range"} + suite.testMAASObject.TestServer.AddFixedAddressRange(subnet.ID, other) + + ar := gomaasapi.AddressRange{} + ar.Start = fmt.Sprintf("192.168.%d.10", ipRange) + ar.End = fmt.Sprintf("192.168.%d.138", ipRange) + ar.Purpose = []string{"something", "dynamic-range"} + suite.testMAASObject.TestServer.AddFixedAddressRange(subnet.ID, ar) + if systemID != "" { + var nni gomaasapi.NodeNetworkInterface + nni.Name = subnet.Name + nni.Links = append(nni.Links, gomaasapi.NetworkLink{ + ID: uint(1), + Mode: "auto", + Subnet: subnet, + }) + suite.testMAASObject.TestServer.SetNodeNetworkLink(systemID, nni) + } + return subnet.ID +} === modified file 'src/github.com/juju/juju/provider/maas/storage.go' --- src/github.com/juju/juju/provider/maas/storage.go 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/provider/maas/storage.go 2016-03-22 15:18:22 +0000 @@ -15,8 +15,8 @@ "sync" "github.com/juju/errors" + "github.com/juju/gomaasapi" "github.com/juju/utils" - "launchpad.net/gomaasapi" "github.com/juju/juju/environs/storage" ) === modified file 'src/github.com/juju/juju/provider/maas/storage_test.go' --- src/github.com/juju/juju/provider/maas/storage_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/provider/maas/storage_test.go 2016-03-22 15:18:22 +0000 @@ -13,9 +13,9 @@ "sync" "github.com/juju/errors" + "github.com/juju/gomaasapi" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "launchpad.net/gomaasapi" "github.com/juju/juju/environs/storage" ) === modified file 'src/github.com/juju/juju/provider/maas/userdata.go' --- src/github.com/juju/juju/provider/maas/userdata.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/maas/userdata.go 2016-03-22 15:18:22 +0000 @@ -7,20 +7,21 @@ import ( "github.com/juju/errors" "github.com/juju/utils" + jujuos "github.com/juju/utils/os" + "github.com/juju/juju/cloudconfig/cloudinit" "github.com/juju/juju/cloudconfig/providerinit/renderers" - "github.com/juju/juju/version" ) type MAASRenderer struct{} -func (MAASRenderer) EncodeUserdata(udata []byte, vers version.OSType) ([]byte, error) { - switch vers { - case version.Ubuntu, version.CentOS: - return renderers.ToBase64(utils.Gzip(udata)), nil - case version.Windows: - return renderers.ToBase64(renderers.WinEmbedInScript(udata)), nil +func (MAASRenderer) Render(cfg cloudinit.CloudConfig, os jujuos.OSType) ([]byte, error) { + switch os { + case jujuos.Ubuntu, jujuos.CentOS: + return renderers.RenderYAML(cfg, utils.Gzip, renderers.ToBase64) + case jujuos.Windows: + return renderers.RenderYAML(cfg, renderers.WinEmbedInScript, renderers.ToBase64) default: - return nil, errors.Errorf("Cannot encode userdata for OS: %s", vers) + return nil, errors.Errorf("Cannot encode userdata for OS: %s", os.String()) } } === modified file 'src/github.com/juju/juju/provider/maas/userdata_test.go' --- src/github.com/juju/juju/provider/maas/userdata_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/maas/userdata_test.go 2016-03-22 15:18:22 +0000 @@ -9,12 +9,13 @@ jc "github.com/juju/testing/checkers" "github.com/juju/utils" + "github.com/juju/utils/os" gc "gopkg.in/check.v1" + "github.com/juju/juju/cloudconfig/cloudinit/cloudinittest" "github.com/juju/juju/cloudconfig/providerinit/renderers" "github.com/juju/juju/provider/maas" "github.com/juju/juju/testing" - "github.com/juju/juju/version" ) type RenderersSuite struct { @@ -25,31 +26,33 @@ func (s *RenderersSuite) TestMAASUnix(c *gc.C) { renderer := maas.MAASRenderer{} - data := []byte("test") - result, err := renderer.EncodeUserdata(data, version.Ubuntu) + cloudcfg := &cloudinittest.CloudConfig{YAML: []byte("yaml")} + + result, err := renderer.Render(cloudcfg, os.Ubuntu) c.Assert(err, jc.ErrorIsNil) - expected := base64.StdEncoding.EncodeToString(utils.Gzip(data)) + expected := base64.StdEncoding.EncodeToString(utils.Gzip(cloudcfg.YAML)) c.Assert(string(result), jc.DeepEquals, expected) - data = []byte("test") - result, err = renderer.EncodeUserdata(data, version.CentOS) + result, err = renderer.Render(cloudcfg, os.CentOS) c.Assert(err, jc.ErrorIsNil) - expected = base64.StdEncoding.EncodeToString(utils.Gzip(data)) + expected = base64.StdEncoding.EncodeToString(utils.Gzip(cloudcfg.YAML)) c.Assert(string(result), jc.DeepEquals, expected) } func (s *RenderersSuite) TestMAASWindows(c *gc.C) { renderer := maas.MAASRenderer{} - data := []byte("test") - result, err := renderer.EncodeUserdata(data, version.Windows) + cloudcfg := &cloudinittest.CloudConfig{YAML: []byte("yaml")} + + result, err := renderer.Render(cloudcfg, os.Windows) c.Assert(err, jc.ErrorIsNil) - expected := base64.StdEncoding.EncodeToString(renderers.WinEmbedInScript(data)) + expected := base64.StdEncoding.EncodeToString(renderers.WinEmbedInScript(cloudcfg.YAML)) c.Assert(string(result), jc.DeepEquals, expected) } func (s *RenderersSuite) TestMAASUnknownOS(c *gc.C) { renderer := maas.MAASRenderer{} - result, err := renderer.EncodeUserdata(nil, version.Arch) + cloudcfg := &cloudinittest.CloudConfig{} + result, err := renderer.Render(cloudcfg, os.Arch) c.Assert(result, gc.IsNil) c.Assert(err, gc.ErrorMatches, "Cannot encode userdata for OS: Arch") } === modified file 'src/github.com/juju/juju/provider/maas/util.go' --- src/github.com/juju/juju/provider/maas/util.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/maas/util.go 2016-03-22 15:18:22 +0000 @@ -10,7 +10,7 @@ "github.com/juju/errors" "github.com/juju/utils" - goyaml "gopkg.in/yaml.v1" + goyaml "gopkg.in/yaml.v2" "github.com/juju/juju/cloudconfig/cloudinit" "github.com/juju/juju/environs/config" === modified file 'src/github.com/juju/juju/provider/maas/util_test.go' --- src/github.com/juju/juju/provider/maas/util_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/maas/util_test.go 2016-03-22 15:18:22 +0000 @@ -8,7 +8,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - goyaml "gopkg.in/yaml.v1" + goyaml "gopkg.in/yaml.v2" "github.com/juju/juju/cloudconfig/cloudinit" "github.com/juju/juju/instance" === modified file 'src/github.com/juju/juju/provider/maas/volumes.go' --- src/github.com/juju/juju/provider/maas/volumes.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/maas/volumes.go 2016-03-22 15:18:22 +0000 @@ -168,13 +168,13 @@ var volumes []storage.Volume var attachments []storage.VolumeAttachment - deviceInfo, ok := mi.getMaasObject().GetMap()["physicalblockdevice_set"] + deviceInfo, ok := mi.maasObject.GetMap()["physicalblockdevice_set"] // Older MAAS servers don't support storage. if !ok || deviceInfo.IsNil() { return volumes, attachments, nil } - labelsMap, ok := mi.getMaasObject().GetMap()["constraint_map"] + labelsMap, ok := mi.maasObject.GetMap()["constraint_map"] if !ok || labelsMap.IsNil() { return nil, nil, errors.NotFoundf("constraint map field") } === modified file 'src/github.com/juju/juju/provider/maas/volumes_test.go' --- src/github.com/juju/juju/provider/maas/volumes_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/maas/volumes_test.go 2016-03-22 15:18:22 +0000 @@ -74,7 +74,7 @@ func (s *volumeSuite) TestInstanceVolumes(c *gc.C) { obj := s.testMAASObject.TestServer.NewNode(validVolumeJson) - instance := maasInstance{maasObject: &obj, environ: s.makeEnviron()} + instance := maasInstance{&obj} mTag := names.NewMachineTag("1") volumes, attachments, err := instance.volumes(mTag, []names.VolumeTag{ names.NewVolumeTag("1"), @@ -128,7 +128,7 @@ func (s *volumeSuite) TestInstanceVolumesOldMass(c *gc.C) { obj := s.testMAASObject.TestServer.NewNode(`{"system_id": "node0"}`) - instance := maasInstance{maasObject: &obj, environ: s.makeEnviron()} + instance := maasInstance{&obj} volumes, attachments, err := instance.volumes(names.NewMachineTag("1"), []names.VolumeTag{ names.NewVolumeTag("1"), names.NewVolumeTag("2"), === modified file 'src/github.com/juju/juju/provider/manual/config.go' --- src/github.com/juju/juju/provider/manual/config.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/provider/manual/config.go 2016-03-22 15:18:22 +0000 @@ -4,31 +4,23 @@ package manual import ( - "fmt" - "net" - "strconv" - "github.com/juju/schema" "github.com/juju/juju/environs/config" ) -const defaultStoragePort = 8040 - var ( configFields = schema.Fields{ - "bootstrap-host": schema.String(), - "bootstrap-user": schema.String(), - "storage-listen-ip": schema.String(), - "storage-port": schema.ForceInt(), - "storage-auth-key": schema.String(), - "use-sshstorage": schema.Bool(), + "bootstrap-host": schema.String(), + "bootstrap-user": schema.String(), + // NOTE(axw) use-sshstorage, despite its name, is now used + // just for determining whether the code is running inside + // or outside the Juju environment. + "use-sshstorage": schema.Bool(), } configDefaults = schema.Defaults{ - "bootstrap-user": "", - "storage-listen-ip": "", - "storage-port": defaultStoragePort, - "use-sshstorage": true, + "bootstrap-user": "", + "use-sshstorage": true, } ) @@ -37,7 +29,7 @@ attrs map[string]interface{} } -func newEnvironConfig(config *config.Config, attrs map[string]interface{}) *environConfig { +func newModelConfig(config *config.Config, attrs map[string]interface{}) *environConfig { return &environConfig{Config: config, attrs: attrs} } @@ -56,34 +48,3 @@ func (c *environConfig) bootstrapUser() string { return c.attrs["bootstrap-user"].(string) } - -func (c *environConfig) storageListenIPAddress() string { - return c.attrs["storage-listen-ip"].(string) -} - -func (c *environConfig) storagePort() int { - switch val := c.attrs["storage-port"].(type) { - case float64: - return int(val) - case int: - return val - default: - panic(fmt.Sprintf("Unexpected %T in storage-port: %#v. Expected float64 or int.", val, val)) - } -} - -func (c *environConfig) storageAuthKey() string { - return c.attrs["storage-auth-key"].(string) -} - -// storageAddr returns an address for connecting to the -// bootstrap machine's localstorage. -func (c *environConfig) storageAddr() string { - return net.JoinHostPort(c.bootstrapHost(), strconv.Itoa(c.storagePort())) -} - -// storageListenAddr returns an address for the bootstrap -// machine to listen on for its localstorage. -func (c *environConfig) storageListenAddr() string { - return net.JoinHostPort(c.storageListenIPAddress(), strconv.Itoa(c.storagePort())) -} === modified file 'src/github.com/juju/juju/provider/manual/config_test.go' --- src/github.com/juju/juju/provider/manual/config_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/provider/manual/config_test.go 2016-03-22 15:18:22 +0000 @@ -15,20 +15,17 @@ ) type configSuite struct { - coretesting.FakeJujuHomeSuite + coretesting.FakeJujuXDGDataHomeSuite } var _ = gc.Suite(&configSuite{}) func MinimalConfigValues() map[string]interface{} { return map[string]interface{}{ - "name": "test", - "type": "manual", - "bootstrap-host": "hostname", - "bootstrap-user": "", - "storage-auth-key": "whatever", - "storage-port": 8040, - "storage-listen-ip": "", + "name": "test", + "type": "manual", + "bootstrap-host": "hostname", + "bootstrap-user": "", // Not strictly necessary, but simplifies testing by disabling // ssh storage by default. "use-sshstorage": false, @@ -46,7 +43,7 @@ return testConfig } -func getEnvironConfig(c *gc.C, attrs map[string]interface{}) *environConfig { +func getModelConfig(c *gc.C, attrs map[string]interface{}) *environConfig { testConfig, err := config.New(config.UseDefaults, attrs) c.Assert(err, jc.ErrorIsNil) envConfig, err := manualProvider{}.validate(testConfig, nil) @@ -61,15 +58,8 @@ _, err = manualProvider{}.Validate(testConfig, nil) c.Assert(err, gc.ErrorMatches, "bootstrap-host must be specified") - testConfig, err = testConfig.Apply(map[string]interface{}{"storage-auth-key": nil}) - c.Assert(err, jc.ErrorIsNil) - _, err = manualProvider{}.Validate(testConfig, nil) - c.Assert(err, gc.ErrorMatches, "storage-auth-key: expected string, got nothing") - values := MinimalConfigValues() delete(values, "bootstrap-user") - delete(values, "storage-listen-ip") - delete(values, "storage-port") testConfig, err = config.New(config.UseDefaults, values) c.Assert(err, jc.ErrorIsNil) @@ -78,8 +68,6 @@ unknownAttrs := valid.UnknownAttrs() c.Assert(unknownAttrs["bootstrap-host"], gc.Equals, "hostname") c.Assert(unknownAttrs["bootstrap-user"], gc.Equals, "") - c.Assert(unknownAttrs["storage-listen-ip"], gc.Equals, "") - c.Assert(unknownAttrs["storage-port"], gc.Equals, int(8040)) } func (s *configSuite) TestConfigMutability(c *gc.C) { @@ -93,10 +81,8 @@ // machine agent's config/upstart config. oldConfig := testConfig for k, v := range map[string]interface{}{ - "bootstrap-host": "new-hostname", - "bootstrap-user": "new-username", - "storage-listen-ip": "10.0.0.123", - "storage-port": 1234, + "bootstrap-host": "new-hostname", + "bootstrap-user": "new-username", } { testConfig = MinimalConfig(c) testConfig, err = testConfig.Apply(map[string]interface{}{k: v}) @@ -110,28 +96,16 @@ func (s *configSuite) TestBootstrapHostUser(c *gc.C) { values := MinimalConfigValues() - testConfig := getEnvironConfig(c, values) + testConfig := getModelConfig(c, values) c.Assert(testConfig.bootstrapHost(), gc.Equals, "hostname") c.Assert(testConfig.bootstrapUser(), gc.Equals, "") values["bootstrap-host"] = "127.0.0.1" values["bootstrap-user"] = "ubuntu" - testConfig = getEnvironConfig(c, values) + testConfig = getModelConfig(c, values) c.Assert(testConfig.bootstrapHost(), gc.Equals, "127.0.0.1") c.Assert(testConfig.bootstrapUser(), gc.Equals, "ubuntu") } -func (s *configSuite) TestStorageParams(c *gc.C) { - values := MinimalConfigValues() - testConfig := getEnvironConfig(c, values) - c.Assert(testConfig.storageAddr(), gc.Equals, "hostname:8040") - c.Assert(testConfig.storageListenAddr(), gc.Equals, ":8040") - values["storage-listen-ip"] = "10.0.0.123" - values["storage-port"] = 1234 - testConfig = getEnvironConfig(c, values) - c.Assert(testConfig.storageAddr(), gc.Equals, "hostname:1234") - c.Assert(testConfig.storageListenAddr(), gc.Equals, "10.0.0.123:1234") -} - func (s *configSuite) TestStorageCompat(c *gc.C) { // Older environment configurations will not have the // use-sshstorage attribute. We treat them as if they @@ -140,26 +114,7 @@ delete(values, "use-sshstorage") cfg, err := config.New(config.UseDefaults, values) c.Assert(err, jc.ErrorIsNil) - envConfig := newEnvironConfig(cfg, values) + envConfig := newModelConfig(cfg, values) c.Assert(err, jc.ErrorIsNil) c.Assert(envConfig.useSSHStorage(), jc.IsFalse) } - -func (s *configSuite) TestConfigWithFloatStoragePort(c *gc.C) { - // When the config values get serialized through JSON, the integers - // get coerced to float64 values. The parsing needs to handle this. - values := MinimalConfigValues() - values["storage-port"] = float64(8040) - - cfg, err := config.New(config.UseDefaults, values) - c.Assert(err, jc.ErrorIsNil) - valid, err := ProviderInstance.Validate(cfg, nil) - c.Assert(err, jc.ErrorIsNil) - unknownAttrs := valid.UnknownAttrs() - c.Assert(unknownAttrs["storage-port"], gc.Equals, int(8040)) - - env, err := ProviderInstance.Open(cfg) - c.Assert(err, jc.ErrorIsNil) - // really, we're asserting that this doesn't panic :) - c.Assert(env.(*manualEnviron).cfg.storagePort(), gc.Equals, int(8040)) -} === added file 'src/github.com/juju/juju/provider/manual/credentials.go' --- src/github.com/juju/juju/provider/manual/credentials.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/manual/credentials.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,20 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package manual + +import ( + "github.com/juju/juju/cloud" +) + +type environProviderCredentials struct{} + +// CredentialSchemas is part of the environs.ProviderCredentials interface. +func (environProviderCredentials) CredentialSchemas() map[cloud.AuthType]cloud.CredentialSchema { + return map[cloud.AuthType]cloud.CredentialSchema{cloud.EmptyAuthType: {}} +} + +// DetectCredentials is part of the environs.ProviderCredentials interface. +func (environProviderCredentials) DetectCredentials() (*cloud.CloudCredential, error) { + return cloud.NewEmptyCloudCredential(), nil +} === added file 'src/github.com/juju/juju/provider/manual/credentials_test.go' --- src/github.com/juju/juju/provider/manual/credentials_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/manual/credentials_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,39 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package manual_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" + envtesting "github.com/juju/juju/environs/testing" + "github.com/juju/juju/testing" +) + +type credentialsSuite struct { + testing.BaseSuite + provider environs.EnvironProvider +} + +var _ = gc.Suite(&credentialsSuite{}) + +func (s *credentialsSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + + var err error + s.provider, err = environs.Provider("manual") + c.Assert(err, jc.ErrorIsNil) +} + +func (s *credentialsSuite) TestCredentialSchemas(c *gc.C) { + envtesting.AssertProviderAuthTypes(c, s.provider, "empty") +} + +func (s *credentialsSuite) TestDetectCredentials(c *gc.C) { + credentials, err := s.provider.DetectCredentials() + c.Assert(err, jc.ErrorIsNil) + c.Assert(credentials, jc.DeepEquals, cloud.NewEmptyCloudCredential()) +} === modified file 'src/github.com/juju/juju/provider/manual/environ.go' --- src/github.com/juju/juju/provider/manual/environ.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/manual/environ.go 2016-03-22 15:18:22 +0000 @@ -6,7 +6,6 @@ import ( "bytes" "fmt" - "net" "path" "strings" "sync" @@ -14,23 +13,19 @@ "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/utils" + "github.com/juju/utils/arch" + "github.com/juju/utils/ssh" "github.com/juju/juju/agent" "github.com/juju/juju/cloudconfig/instancecfg" "github.com/juju/juju/constraints" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" - "github.com/juju/juju/environs/httpstorage" "github.com/juju/juju/environs/manual" - "github.com/juju/juju/environs/sshstorage" - "github.com/juju/juju/environs/storage" "github.com/juju/juju/instance" - "github.com/juju/juju/juju/arch" "github.com/juju/juju/mongo" "github.com/juju/juju/network" "github.com/juju/juju/provider/common" - "github.com/juju/juju/utils/ssh" - "github.com/juju/juju/worker/localstorage" "github.com/juju/juju/worker/terminationworker" ) @@ -38,15 +33,6 @@ // BootstrapInstanceId is the instance ID used // for the manual provider's bootstrap instance. BootstrapInstanceId instance.Id = "manual:" - - // storageSubdir is the subdirectory of - // dataDir in which storage will be located. - storageSubdir = "storage" - - // storageTmpSubdir is the subdirectory of - // dataDir in which temporary storage will - // be located. - storageTmpSubdir = "storage-tmp" ) var ( @@ -60,7 +46,6 @@ cfg *environConfig cfgmutex sync.Mutex - storage storage.Storage ubuntuUserInited bool ubuntuUserInitMutex sync.Mutex } @@ -101,32 +86,28 @@ return arch.AllSupportedArches, nil } -func (e *manualEnviron) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (arch, series string, _ environs.BootstrapFinalizer, _ error) { +func (e *manualEnviron) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (*environs.BootstrapResult, error) { // Set "use-sshstorage" to false, so agents know not to use sshstorage. cfg, err := e.Config().Apply(map[string]interface{}{"use-sshstorage": false}) if err != nil { - return "", "", nil, err + return nil, err } if err := e.SetConfig(cfg); err != nil { - return "", "", nil, err - } - agentEnv, err := localstorage.StoreConfig(e) - if err != nil { - return "", "", nil, err + return nil, err } envConfig := e.envConfig() // TODO(axw) consider how we can use placement to override bootstrap-host. host := envConfig.bootstrapHost() provisioned, err := manualCheckProvisioned(host) if err != nil { - return "", "", nil, errors.Annotate(err, "failed to check provisioned status") + return nil, errors.Annotate(err, "failed to check provisioned status") } if provisioned { - return "", "", nil, manual.ErrProvisioned + return nil, manual.ErrProvisioned } hc, series, err := manualDetectSeriesAndHardwareCharacteristics(host) if err != nil { - return "", "", nil, err + return nil, err } finalize := func(ctx environs.BootstrapContext, icfg *instancecfg.InstanceConfig) error { icfg.InstanceId = BootstrapInstanceId @@ -134,16 +115,19 @@ if err := instancecfg.FinishInstanceConfig(icfg, e.Config()); err != nil { return err } - for k, v := range agentEnv { - icfg.AgentEnvironment[k] = v - } return common.ConfigureMachine(ctx, ssh.DefaultClient, host, icfg) } - return *hc.Arch, series, finalize, nil + + result := &environs.BootstrapResult{ + Arch: *hc.Arch, + Series: series, + Finalize: finalize, + } + return result, nil } -// StateServerInstances is specified in the Environ interface. -func (e *manualEnviron) StateServerInstances() ([]instance.Id, error) { +// ControllerInstances is specified in the Environ interface. +func (e *manualEnviron) ControllerInstances() ([]instance.Id, error) { // If we're running from the bootstrap host, then // useSSHStorage will be false; in that case, we // do not need or want to verify the bootstrap host. @@ -160,7 +144,7 @@ // if the agents directory exists. Note that we cannot test the // root data directory, as that is created in the process of // initialising sshstorage. - agentsDir := path.Join(agent.DefaultDataDir, "agents") + agentsDir := path.Join(agent.DefaultPaths.DataDir, "agents") const noAgentDir = "no-agent-dir" stdin := fmt.Sprintf( "test -d %s || echo %s", @@ -193,37 +177,7 @@ if err != nil { return err } - envConfig := newEnvironConfig(cfg, cfg.UnknownAttrs()) - // Set storage. If "use-sshstorage" is true then use the SSH storage. - // Otherwise, use HTTP storage. - // - // We don't change storage once it's been set. Storage parameters - // are fixed at bootstrap time, and it is not possible to change - // them. - if e.storage == nil { - var stor storage.Storage - if envConfig.useSSHStorage() { - storageDir := e.StorageDir() - storageTmpdir := path.Join(agent.DefaultDataDir, storageTmpSubdir) - stor, err = newSSHStorage("ubuntu@"+e.cfg.bootstrapHost(), storageDir, storageTmpdir) - if err != nil { - return fmt.Errorf("initialising SSH storage failed: %v", err) - } - } else { - caCertPEM, ok := envConfig.CACert() - if !ok { - // should not be possible to validate base config - return fmt.Errorf("ca-cert not set") - } - authkey := envConfig.storageAuthKey() - stor, err = httpstorage.ClientTLS(envConfig.storageAddr(), caCertPEM, authkey) - if err != nil { - return fmt.Errorf("initialising HTTPS storage failed: %v", err) - } - } - e.storage = stor - } - e.cfg = envConfig + e.cfg = newModelConfig(cfg, cfg.UnknownAttrs()) return nil } @@ -249,21 +203,6 @@ return instances, err } -var newSSHStorage = func(sshHost, storageDir, storageTmpdir string) (storage.Storage, error) { - logger.Debugf("using ssh storage at host %q dir %q", sshHost, storageDir) - return sshstorage.NewSSHStorage(sshstorage.NewSSHStorageParams{ - Host: sshHost, - StorageDir: storageDir, - TmpDir: storageTmpdir, - }) -} - -func (e *manualEnviron) Storage() storage.Storage { - e.cfgmutex.Lock() - defer e.cfgmutex.Unlock() - return e.storage -} - var runSSHCommand = func(host string, command []string, stdin string) (stdout string, err error) { cmd := ssh.Command(host, command, nil) cmd.Stdin = strings.NewReader(stdin) @@ -293,13 +232,13 @@ script = fmt.Sprintf( script, utils.ShQuote(path.Join( - agent.DefaultDataDir, + agent.DefaultPaths.DataDir, agent.UninstallAgentFile, )), terminationworker.TerminationSignal, - mongo.ServiceName(""), - utils.ShQuote(agent.DefaultDataDir), - utils.ShQuote(agent.DefaultLogDir), + mongo.ServiceName, + utils.ShQuote(agent.DefaultPaths.DataDir), + utils.ShQuote(agent.DefaultPaths.LogDir), ) _, err := runSSHCommand( "ubuntu@"+e.envConfig().bootstrapHost(), @@ -340,50 +279,3 @@ func (*manualEnviron) Provider() environs.EnvironProvider { return manualProvider{} } - -func (e *manualEnviron) StorageAddr() string { - return e.envConfig().storageListenAddr() -} - -func (e *manualEnviron) StorageDir() string { - return path.Join(agent.DefaultDataDir, storageSubdir) -} - -func (e *manualEnviron) SharedStorageAddr() string { - return "" -} - -func (e *manualEnviron) SharedStorageDir() string { - return "" -} - -func (e *manualEnviron) StorageCACert() string { - if cert, ok := e.envConfig().CACert(); ok { - return cert - } - return "" -} - -func (e *manualEnviron) StorageCAKey() string { - if key, ok := e.envConfig().CAPrivateKey(); ok { - return key - } - return "" -} - -func (e *manualEnviron) StorageHostnames() []string { - cfg := e.envConfig() - hostnames := []string{cfg.bootstrapHost()} - if ip := net.ParseIP(cfg.storageListenIPAddress()); ip != nil { - if !ip.IsUnspecified() { - hostnames = append(hostnames, ip.String()) - } - } - return hostnames -} - -func (e *manualEnviron) StorageAuthKey() string { - return e.envConfig().storageAuthKey() -} - -var _ localstorage.LocalTLSStorageConfig = (*manualEnviron)(nil) === modified file 'src/github.com/juju/juju/provider/manual/environ_test.go' --- src/github.com/juju/juju/provider/manual/environ_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/manual/environ_test.go 2016-03-22 15:18:22 +0000 @@ -7,30 +7,25 @@ "github.com/juju/errors" "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/arch" gc "gopkg.in/check.v1" "github.com/juju/juju/constraints" "github.com/juju/juju/environs" - "github.com/juju/juju/environs/storage" envtesting "github.com/juju/juju/environs/testing" "github.com/juju/juju/instance" - "github.com/juju/juju/juju/arch" coretesting "github.com/juju/juju/testing" ) type environSuite struct { - coretesting.FakeJujuHomeSuite + coretesting.FakeJujuXDGDataHomeSuite env *manualEnviron } -type dummyStorage struct { - storage.Storage -} - var _ = gc.Suite(&environSuite{}) func (s *environSuite) SetUpTest(c *gc.C) { - s.FakeJujuHomeSuite.SetUpTest(c) + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) env, err := manualProvider{}.Open(MinimalConfig(c)) c.Assert(err, jc.ErrorIsNil) s.env = env.(*manualEnviron) @@ -123,14 +118,6 @@ } } -func (s *environSuite) TestLocalStorageConfig(c *gc.C) { - c.Assert(s.env.StorageDir(), gc.Equals, "/var/lib/juju/storage") - c.Assert(s.env.cfg.storageListenAddr(), gc.Equals, ":8040") - c.Assert(s.env.StorageAddr(), gc.Equals, s.env.cfg.storageListenAddr()) - c.Assert(s.env.SharedStorageAddr(), gc.Equals, "") - c.Assert(s.env.SharedStorageDir(), gc.Equals, "") -} - func (s *environSuite) TestSupportedArchitectures(c *gc.C) { arches, err := s.env.SupportedArchitectures() c.Assert(err, jc.ErrorIsNil) @@ -152,14 +139,14 @@ } type bootstrapSuite struct { - coretesting.FakeJujuHomeSuite + coretesting.FakeJujuXDGDataHomeSuite env *manualEnviron } var _ = gc.Suite(&bootstrapSuite{}) func (s *bootstrapSuite) SetUpTest(c *gc.C) { - s.FakeJujuHomeSuite.SetUpTest(c) + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) // ensure use-sshstorage=true to mimic what happens // in the real client: the environment is Prepared, @@ -188,7 +175,7 @@ cfg := s.env.Config() c.Assert(cfg.UnknownAttrs()["use-sshstorage"], jc.IsTrue) - _, _, _, err := s.env.Bootstrap(envtesting.BootstrapContext(c), environs.BootstrapParams{}) + _, err := s.env.Bootstrap(envtesting.BootstrapContext(c), environs.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) // Bootstrap must set use-sshstorage to false within the environment. @@ -196,18 +183,17 @@ c.Assert(cfg.UnknownAttrs()["use-sshstorage"], jc.IsFalse) } -type stateServerInstancesSuite struct { - coretesting.FakeJujuHomeSuite +type controllerInstancesSuite struct { + coretesting.FakeJujuXDGDataHomeSuite env *manualEnviron } -var _ = gc.Suite(&stateServerInstancesSuite{}) - -func (s *stateServerInstancesSuite) SetUpTest(c *gc.C) { - s.FakeJujuHomeSuite.SetUpTest(c) - +var _ = gc.Suite(&controllerInstancesSuite{}) + +func (s *controllerInstancesSuite) SetUpTest(c *gc.C) { + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) // ensure use-sshstorage=true, or bootstrap-host - // verification won't happen in StateServerInstances. + // verification won't happen in ControllerInstances. cfg := MinimalConfig(c) cfg, err := cfg.Apply(map[string]interface{}{ "use-sshstorage": true, @@ -219,7 +205,7 @@ s.env = env.(*manualEnviron) } -func (s *stateServerInstancesSuite) TestStateServerInstances(c *gc.C) { +func (s *controllerInstancesSuite) TestControllerInstances(c *gc.C) { var outputResult string var errResult error runSSHCommandTesting := func(host string, command []string, stdin string) (string, error) { @@ -236,7 +222,7 @@ output: "", }, { output: "no-agent-dir", - expectedErr: "environment is not bootstrapped", + expectedErr: "model is not bootstrapped", }, { output: "woo", expectedErr: `unexpected output: "woo"`, @@ -249,7 +235,7 @@ c.Logf("test %d", i) outputResult = test.output errResult = test.err - instances, err := s.env.StateServerInstances() + instances, err := s.env.ControllerInstances() if test.expectedErr == "" { c.Assert(err, jc.ErrorIsNil) c.Assert(instances, gc.DeepEquals, []instance.Id{BootstrapInstanceId}) @@ -260,28 +246,28 @@ } } -func (s *stateServerInstancesSuite) TestStateServerInstancesStderr(c *gc.C) { - // Stderr should not affect the behaviour of StateServerInstances. +func (s *controllerInstancesSuite) TestControllerInstancesStderr(c *gc.C) { + // Stderr should not affect the behaviour of ControllerInstances. testing.PatchExecutable(c, s, "ssh", "#!/bin/sh\nhead -n1 > /dev/null; echo abc >&2; exit 0") - _, err := s.env.StateServerInstances() + _, err := s.env.ControllerInstances() c.Assert(err, jc.ErrorIsNil) } -func (s *stateServerInstancesSuite) TestStateServerInstancesError(c *gc.C) { +func (s *controllerInstancesSuite) TestControllerInstancesError(c *gc.C) { // If the ssh execution fails, its stderr will be captured in the error message. testing.PatchExecutable(c, s, "ssh", "#!/bin/sh\nhead -n1 > /dev/null; echo abc >&2; exit 1") - _, err := s.env.StateServerInstances() + _, err := s.env.ControllerInstances() c.Assert(err, gc.ErrorMatches, "abc: .*") } -func (s *stateServerInstancesSuite) TestStateServerInstancesInternal(c *gc.C) { +func (s *controllerInstancesSuite) TestControllerInstancesInternal(c *gc.C) { // If use-sshstorage=false, then we're on the bootstrap host; // verification is elided. env, err := manualProvider{}.Open(MinimalConfig(c)) c.Assert(err, jc.ErrorIsNil) testing.PatchExecutable(c, s, "ssh", "#!/bin/sh\nhead -n1 > /dev/null; echo abc >&2; exit 1") - instances, err := env.StateServerInstances() + instances, err := env.ControllerInstances() c.Assert(err, jc.ErrorIsNil) c.Assert(instances, gc.DeepEquals, []instance.Id{BootstrapInstanceId}) } === modified file 'src/github.com/juju/juju/provider/manual/export_test.go' --- src/github.com/juju/juju/provider/manual/export_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/provider/manual/export_test.go 2016-03-22 15:18:22 +0000 @@ -9,7 +9,6 @@ var ( ProviderInstance = manualProvider{} - NewSSHStorage = &newSSHStorage InitUbuntuUser = &initUbuntuUser ) === modified file 'src/github.com/juju/juju/provider/manual/provider.go' --- src/github.com/juju/juju/provider/manual/provider.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/manual/provider.go 2016-03-22 15:18:22 +0000 @@ -7,14 +7,16 @@ "fmt" "github.com/juju/errors" - "github.com/juju/utils" + "github.com/juju/juju/cloud" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/environs/manual" ) -type manualProvider struct{} +type manualProvider struct { + environProviderCredentials +} // Verify that we conform to the interface. var _ environs.EnvironProvider = (*manualProvider)(nil) @@ -38,24 +40,42 @@ return []string{"bootstrap-host", "bootstrap-user"} } +// DetectRegions is specified in the environs.CloudRegionDetector interface. +func (p manualProvider) DetectRegions() ([]cloud.Region, error) { + return nil, errors.NotFoundf("regions") +} + // PrepareForCreateEnvironment is specified in the EnvironProvider interface. func (p manualProvider) PrepareForCreateEnvironment(cfg *config.Config) (*config.Config, error) { // Not even sure if this will ever make sense. return nil, errors.NotImplementedf("PrepareForCreateEnvironment") } -func (p manualProvider) PrepareForBootstrap(ctx environs.BootstrapContext, cfg *config.Config) (environs.Environ, error) { - if _, ok := cfg.UnknownAttrs()["storage-auth-key"]; !ok { - uuid, err := utils.NewUUID() - if err != nil { - return nil, err - } - cfg, err = cfg.Apply(map[string]interface{}{ - "storage-auth-key": uuid.String(), - }) - if err != nil { - return nil, err - } +func (p manualProvider) PrepareForBootstrap(ctx environs.BootstrapContext, args environs.PrepareForBootstrapParams) (environs.Environ, error) { + + var bootstrapHost string + switch { + case args.CloudEndpoint != "": + // If an endpoint is specified, then we expect that the user + // has specified in their clouds.yaml a region with the + // bootstrap host as the endpoint. + bootstrapHost = args.CloudEndpoint + case args.CloudRegion != "": + // If only a region is specified, then we expect that the user + // has run "juju bootstrap manual/", and treat the region + // name as the name of the bootstrap machine. + bootstrapHost = args.CloudRegion + default: + return nil, errors.Errorf( + "missing address of host to bootstrap: " + + `please specify "juju bootstrap manual/"`, + ) + } + cfg, err := args.Config.Apply(map[string]interface{}{ + "bootstrap-host": bootstrapHost, + }) + if err != nil { + return nil, errors.Trace(err) } if use, ok := cfg.UnknownAttrs()["use-sshstorage"].(bool); ok && !use { return nil, fmt.Errorf("use-sshstorage must not be specified") @@ -68,7 +88,7 @@ if err != nil { return nil, err } - envConfig = newEnvironConfig(cfg, envConfig.attrs) + envConfig = newModelConfig(cfg, envConfig.attrs) if err := ensureBootstrapUbuntuUser(ctx, envConfig); err != nil { return nil, err } @@ -83,7 +103,7 @@ // validate adds missing manual-specific config attributes // with their defaults in the result; we don't wnat that in // Open. - envConfig := newEnvironConfig(cfg, cfg.UnknownAttrs()) + envConfig := newModelConfig(cfg, cfg.UnknownAttrs()) return p.open(envConfig) } @@ -112,7 +132,7 @@ if err != nil { return nil, err } - envConfig := newEnvironConfig(cfg, validated) + envConfig := newModelConfig(cfg, validated) if envConfig.bootstrapHost() == "" { return nil, errNoBootstrapHost } @@ -125,16 +145,11 @@ for _, key := range [...]string{ "bootstrap-user", "bootstrap-host", - "storage-listen-ip", } { if err = checkImmutableString(envConfig, oldEnvConfig, key); err != nil { return nil, err } } - oldPort, newPort := oldEnvConfig.storagePort(), envConfig.storagePort() - if oldPort != newPort { - return nil, fmt.Errorf("cannot change storage-port from %q to %q", oldPort, newPort) - } oldUseSSHStorage, newUseSSHStorage := oldEnvConfig.useSSHStorage(), envConfig.useSSHStorage() if oldUseSSHStorage != newUseSSHStorage && newUseSSHStorage == true { return nil, fmt.Errorf("cannot change use-sshstorage from %v to %v", oldUseSSHStorage, newUseSSHStorage) @@ -166,52 +181,7 @@ return cfg.Apply(envConfig.attrs) } -func (_ manualProvider) BoilerplateConfig() string { - return ` -manual: - type: manual - # bootstrap-host holds the host name of the machine where the - # bootstrap machine agent will be started. - bootstrap-host: somehost.example.com - - # bootstrap-user specifies the user to authenticate as when - # connecting to the bootstrap machine. It defaults to - # the current user. - # bootstrap-user: joebloggs - - # storage-listen-ip specifies the IP address that the - # bootstrap machine's Juju storage server will listen - # on. By default, storage will be served on all - # network interfaces. - # storage-listen-ip: - - # storage-port specifes the TCP port that the - # bootstrap machine's Juju storage server will listen - # on. It defaults to ` + fmt.Sprint(defaultStoragePort) + ` - # storage-port: ` + fmt.Sprint(defaultStoragePort) + ` - - # Whether or not to refresh the list of available updates for an - # OS. The default option of true is recommended for use in - # production systems. - # - # enable-os-refresh-update: true - - # Whether or not to perform OS upgrades when machines are - # provisioned. The default option of false is set so that Juju - # does not subsume any other way the system might be - # maintained. - # - # enable-os-upgrade: false - -`[1:] -} - func (p manualProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { - envConfig, err := p.validate(cfg, nil) - if err != nil { - return nil, err - } attrs := make(map[string]string) - attrs["storage-auth-key"] = envConfig.storageAuthKey() return attrs, nil } === modified file 'src/github.com/juju/juju/provider/manual/provider_test.go' --- src/github.com/juju/juju/provider/manual/provider_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/manual/provider_test.go 2016-03-22 15:18:22 +0000 @@ -4,45 +4,65 @@ package manual_test import ( - "fmt" "io" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" - "github.com/juju/utils" gc "gopkg.in/check.v1" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" - "github.com/juju/juju/environs/storage" envtesting "github.com/juju/juju/environs/testing" "github.com/juju/juju/provider/manual" coretesting "github.com/juju/juju/testing" ) type providerSuite struct { - coretesting.FakeJujuHomeSuite + coretesting.FakeJujuXDGDataHomeSuite + testing.Stub } var _ = gc.Suite(&providerSuite{}) func (s *providerSuite) SetUpTest(c *gc.C) { - s.FakeJujuHomeSuite.SetUpTest(c) + s.FakeJujuXDGDataHomeSuite.SetUpTest(c) + s.Stub.ResetCalls() s.PatchValue(manual.InitUbuntuUser, func(host, user, keys string, stdin io.Reader, stdout io.Writer) error { - return nil + s.AddCall("InitUbuntuUser", host, user, keys, stdin, stdout) + return s.NextErr() }) } -func (s *providerSuite) TestPrepareForBootstrap(c *gc.C) { +func (s *providerSuite) TestPrepareForBootstrapCloudEndpointAndRegion(c *gc.C) { + ctx, err := s.testPrepareForBootstrap(c, "endpoint", "region") + c.Assert(err, jc.ErrorIsNil) + s.CheckCall(c, 0, "InitUbuntuUser", "endpoint", "", "public auth key\n", ctx.GetStdin(), ctx.GetStdout()) +} + +func (s *providerSuite) TestPrepareForBootstrapCloudRegionOnly(c *gc.C) { + ctx, err := s.testPrepareForBootstrap(c, "", "region") + c.Assert(err, jc.ErrorIsNil) + s.CheckCall(c, 0, "InitUbuntuUser", "region", "", "public auth key\n", ctx.GetStdin(), ctx.GetStdout()) +} + +func (s *providerSuite) TestPrepareForBootstrapNoCloudEndpointOrRegion(c *gc.C) { + _, err := s.testPrepareForBootstrap(c, "", "") + c.Assert(err, gc.ErrorMatches, + `missing address of host to bootstrap: please specify "juju bootstrap manual/"`) +} + +func (s *providerSuite) testPrepareForBootstrap(c *gc.C, endpoint, region string) (environs.BootstrapContext, error) { minimal := manual.MinimalConfigValues() minimal["use-sshstorage"] = true - delete(minimal, "storage-auth-key") testConfig, err := config.New(config.UseDefaults, minimal) c.Assert(err, jc.ErrorIsNil) - env, err := manual.ProviderInstance.PrepareForBootstrap(envtesting.BootstrapContext(c), testConfig) - c.Assert(err, jc.ErrorIsNil) - cfg := env.Config() - key, _ := cfg.UnknownAttrs()["storage-auth-key"].(string) - c.Assert(key, jc.Satisfies, utils.IsValidUUIDString) + ctx := envtesting.BootstrapContext(c) + _, err = manual.ProviderInstance.PrepareForBootstrap(ctx, environs.PrepareForBootstrapParams{ + Config: testConfig, + CloudEndpoint: endpoint, + CloudRegion: region, + }) + return ctx, err } func (s *providerSuite) TestPrepareUseSSHStorage(c *gc.C) { @@ -50,17 +70,20 @@ minimal["use-sshstorage"] = false testConfig, err := config.New(config.UseDefaults, minimal) c.Assert(err, jc.ErrorIsNil) - _, err = manual.ProviderInstance.PrepareForBootstrap(envtesting.BootstrapContext(c), testConfig) + _, err = manual.ProviderInstance.PrepareForBootstrap(envtesting.BootstrapContext(c), environs.PrepareForBootstrapParams{ + Config: testConfig, + CloudEndpoint: "hostname", + }) c.Assert(err, gc.ErrorMatches, "use-sshstorage must not be specified") - s.PatchValue(manual.NewSSHStorage, func(sshHost, storageDir, storageTmpdir string) (storage.Storage, error) { - return nil, fmt.Errorf("newSSHStorage failed") - }) minimal["use-sshstorage"] = true testConfig, err = config.New(config.UseDefaults, minimal) c.Assert(err, jc.ErrorIsNil) - _, err = manual.ProviderInstance.PrepareForBootstrap(envtesting.BootstrapContext(c), testConfig) - c.Assert(err, gc.ErrorMatches, "initialising SSH storage failed: newSSHStorage failed") + _, err = manual.ProviderInstance.PrepareForBootstrap(envtesting.BootstrapContext(c), environs.PrepareForBootstrapParams{ + Config: testConfig, + CloudEndpoint: "hostname", + }) + c.Assert(err, jc.ErrorIsNil) } func (s *providerSuite) TestPrepareSetsUseSSHStorage(c *gc.C) { @@ -69,7 +92,10 @@ testConfig, err := config.New(config.UseDefaults, attrs) c.Assert(err, jc.ErrorIsNil) - env, err := manual.ProviderInstance.PrepareForBootstrap(envtesting.BootstrapContext(c), testConfig) + env, err := manual.ProviderInstance.PrepareForBootstrap(envtesting.BootstrapContext(c), environs.PrepareForBootstrapParams{ + Config: testConfig, + CloudEndpoint: "hostname", + }) c.Assert(err, jc.ErrorIsNil) cfg := env.Config() value := cfg.AllAttrs()["use-sshstorage"] === modified file 'src/github.com/juju/juju/provider/manual/suite_test.go' --- src/github.com/juju/juju/provider/manual/suite_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/manual/suite_test.go 2016-03-22 15:18:22 +0000 @@ -8,9 +8,6 @@ "testing" gc "gopkg.in/check.v1" - - "github.com/juju/juju/environs/storage" - "github.com/juju/juju/provider/manual" ) func Test(t *testing.T) { @@ -19,9 +16,5 @@ if runtime.GOOS == "windows" { t.Skip("Manual provider is not yet supported on windows") } - // Prevent any use of ssh for storage. - *manual.NewSSHStorage = func(sshHost, storageDir, storageTmpdir string) (storage.Storage, error) { - return nil, nil - } gc.TestingT(t) } === modified file 'src/github.com/juju/juju/provider/openstack/cinder.go' --- src/github.com/juju/juju/provider/openstack/cinder.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/provider/openstack/cinder.go 2016-03-22 15:18:22 +0000 @@ -55,12 +55,12 @@ } uuid, ok := environConfig.UUID() if !ok { - return nil, errors.NotFoundf("environment UUID") + return nil, errors.NotFoundf("model UUID") } source := &cinderVolumeSource{ storageAdapter: storageAdapter, envName: environConfig.Name(), - envUUID: uuid, + modelUUID: uuid, } return source, nil } @@ -99,7 +99,7 @@ type cinderVolumeSource struct { storageAdapter openstackStorage envName string // non unique, informational only - envUUID string + modelUUID string } var _ storage.VolumeSource = (*cinderVolumeSource)(nil) @@ -161,8 +161,8 @@ } volumeIds := make([]string, 0, len(cinderVolumes)) for _, volume := range cinderVolumes { - envUUID, ok := volume.Metadata[tags.JujuEnv] - if !ok || envUUID != s.envUUID { + modelUUID, ok := volume.Metadata[tags.JujuModel] + if !ok || modelUUID != s.modelUUID { continue } volumeIds = append(volumeIds, cinderToJujuVolumeInfo(&volume).VolumeId) === modified file 'src/github.com/juju/juju/provider/openstack/cinder_test.go' --- src/github.com/juju/juju/provider/openstack/cinder_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/provider/openstack/cinder_test.go 2016-03-22 15:18:22 +0000 @@ -216,12 +216,12 @@ }, { ID: "volume-2", Metadata: map[string]string{ - tags.JujuEnv: "something-else", + tags.JujuModel: "something-else", }, }, { ID: "volume-3", Metadata: map[string]string{ - tags.JujuEnv: testing.EnvironmentTag.Id(), + tags.JujuModel: testing.ModelTag.Id(), }, }}, nil }, === modified file 'src/github.com/juju/juju/provider/openstack/config.go' --- src/github.com/juju/juju/provider/openstack/config.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/openstack/config.go 2016-03-22 15:18:22 +0000 @@ -7,6 +7,7 @@ "fmt" "net/url" + "github.com/juju/errors" "github.com/juju/schema" "gopkg.in/goose.v1/identity" "gopkg.in/juju/environschema.v1" @@ -66,10 +67,6 @@ Type: environschema.Tstring, EnvVars: identity.CredEnvRegion, }, - "control-bucket": { - Description: "The name to use for the control bucket (do not set unless you know what you are doing!).", - Type: environschema.Tstring, - }, "use-floating-ip": { Description: "Whether a floating IP address is required to give the nodes a public IP address. Some installations assign public IP addresses by default without requiring a floating IP address.", Type: environschema.Tbool, @@ -92,21 +89,6 @@ return fs }() -var configDefaults = schema.Defaults{ - "username": "", - "password": "", - "tenant-name": "", - "auth-url": "", - "auth-mode": string(AuthUserPass), - "access-key": "", - "secret-key": "", - "region": "", - "control-bucket": "", - "use-floating-ip": false, - "use-default-secgroup": false, - "network": "", -} - type environConfig struct { *config.Config attrs map[string]interface{} @@ -144,10 +126,6 @@ return c.attrs["secret-key"].(string) } -func (c *environConfig) controlBucket() string { - return c.attrs["control-bucket"].(string) -} - func (c *environConfig) useFloatingIP() bool { return c.attrs["use-floating-ip"].(bool) } @@ -160,14 +138,6 @@ return c.attrs["network"].(string) } -func (p environProvider) newConfig(cfg *config.Config) (*environConfig, error) { - valid, err := p.Validate(cfg, nil) - if err != nil { - return nil, err - } - return &environConfig{valid, valid.UnknownAttrs()}, nil -} - type AuthMode string const ( @@ -177,7 +147,7 @@ ) // Schema returns the configuration schema for an environment. -func (environProvider) Schema() environschema.Fields { +func (EnvironProvider) Schema() environschema.Fields { fields, err := config.Schema(configSchema) if err != nil { panic(err) @@ -185,19 +155,19 @@ return fields } -func (p environProvider) Validate(cfg, old *config.Config) (valid *config.Config, err error) { +func (p EnvironProvider) Validate(cfg, old *config.Config) (valid *config.Config, err error) { // Check for valid changes for the base config values. if err := config.Validate(cfg, old); err != nil { return nil, err } - validated, err := cfg.ValidateUnknownAttrs(configFields, configDefaults) + validated, err := cfg.ValidateUnknownAttrs(configFields, p.Configurator.GetConfigDefaults()) if err != nil { return nil, err } // Add Openstack specific defaults. - providerDefaults := make(map[string]interface{}) + providerDefaults := map[string]interface{}{} // Storage. if _, ok := cfg.StorageDefaultBlockSource(); !ok { @@ -211,61 +181,38 @@ ecfg := &environConfig{cfg, validated} - if ecfg.authURL() != "" { - parts, err := url.Parse(ecfg.authURL()) - if err != nil || parts.Host == "" || parts.Scheme == "" { - return nil, fmt.Errorf("invalid auth-url value %q", ecfg.authURL()) - } - } - cred := identity.CredentialsFromEnv() - format := "required environment variable not set for credentials attribute: %s" switch ecfg.authMode() { case AuthUserPass, AuthLegacy: if ecfg.username() == "" { - if cred.User == "" { - return nil, fmt.Errorf(format, "User") - } - ecfg.attrs["username"] = cred.User + return nil, errors.NotValidf("missing username") } if ecfg.password() == "" { - if cred.Secrets == "" { - return nil, fmt.Errorf(format, "Secrets") - } - ecfg.attrs["password"] = cred.Secrets + return nil, errors.NotValidf("missing password") } case AuthKeyPair: if ecfg.accessKey() == "" { - if cred.User == "" { - return nil, fmt.Errorf(format, "User") - } - ecfg.attrs["access-key"] = cred.User + return nil, errors.NotValidf("missing access-key") } if ecfg.secretKey() == "" { - if cred.Secrets == "" { - return nil, fmt.Errorf(format, "Secrets") - } - ecfg.attrs["secret-key"] = cred.Secrets + return nil, errors.NotValidf("missing secret-key") } default: return nil, fmt.Errorf("unexpected authentication mode %q", ecfg.authMode()) } + if ecfg.authURL() == "" { - if cred.URL == "" { - return nil, fmt.Errorf(format, "URL") - } - ecfg.attrs["auth-url"] = cred.URL + return nil, errors.NotValidf("missing auth-url") } if ecfg.tenantName() == "" { - if cred.TenantName == "" { - return nil, fmt.Errorf(format, "TenantName") - } - ecfg.attrs["tenant-name"] = cred.TenantName + return nil, errors.NotValidf("missing tenant-name") } if ecfg.region() == "" { - if cred.Region == "" { - return nil, fmt.Errorf(format, "Region") - } - ecfg.attrs["region"] = cred.Region + return nil, errors.NotValidf("missing region") + } + + parts, err := url.Parse(ecfg.authURL()) + if err != nil || parts.Host == "" || parts.Scheme == "" { + return nil, fmt.Errorf("invalid auth-url value %q", ecfg.authURL()) } if old != nil { @@ -273,9 +220,6 @@ if region, _ := attrs["region"].(string); ecfg.region() != region { return nil, fmt.Errorf("cannot change region from %q to %q", region, ecfg.region()) } - if controlBucket, _ := attrs["control-bucket"].(string); ecfg.controlBucket() != controlBucket { - return nil, fmt.Errorf("cannot change control-bucket from %q to %q", controlBucket, ecfg.controlBucket()) - } } // Check for deprecated fields and log a warning. We also print to stderr to ensure the user sees the message @@ -295,7 +239,7 @@ msg := fmt.Sprintf( "Config attribute %q (%v) is deprecated and ignored.\n"+ "The correct instance flavor is determined using constraints, globally specified\n"+ - "when an environment is bootstrapped, or individually when a charm is deployed.\n"+ + "when an model is bootstrapped, or individually when a charm is deployed.\n"+ "See 'juju help bootstrap' or 'juju help deploy'.", "default-instance-type", defaultInstanceType) logger.Warningf(msg) === modified file 'src/github.com/juju/juju/provider/openstack/config_test.go' --- src/github.com/juju/juju/provider/openstack/config_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/openstack/config_test.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "github.com/juju/juju/cloud" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" envtesting "github.com/juju/juju/environs/testing" @@ -46,12 +47,11 @@ // parse matches the given error. type configTest struct { summary string - config map[string]interface{} + config testing.Attrs change map[string]interface{} expect map[string]interface{} envVars map[string]string region string - controlBucket string useFloatingIP bool useDefaultSecurityGroup bool network string @@ -69,7 +69,13 @@ blockStorageSource string } -type attrs map[string]interface{} +var requiredConfig = testing.Attrs{ + "region": "configtest", + "auth-url": "http://auth", + "username": "user", + "password": "pass", + "tenant-name": "tenant", +} func restoreEnvVars(envVars map[string]string) { for k, v := range envVars { @@ -79,8 +85,7 @@ func (t configTest) check(c *gc.C) { attrs := testing.FakeConfig().Merge(testing.Attrs{ - "type": "openstack", - "control-bucket": "x", + "type": "openstack", }).Merge(t.config) cfg, err := config.New(config.NoDefaults, attrs) @@ -102,7 +107,7 @@ // Testing a change in configuration. var old, changed, valid *config.Config - osenv := e.(*environ) + osenv := e.(*Environ) old = osenv.ecfg().Config changed, err = old.Apply(t.change) c.Assert(err, jc.ErrorIsNil) @@ -119,9 +124,8 @@ } c.Assert(err, jc.ErrorIsNil) - ecfg := e.(*environ).ecfg() + ecfg := e.(*Environ).ecfg() c.Assert(ecfg.Name(), gc.Equals, "testenv") - c.Assert(ecfg.controlBucket(), gc.Equals, "x") if t.region != "" { c.Assert(ecfg.region(), gc.Equals, t.region) } @@ -182,7 +186,7 @@ s.savedVars[v] = os.Getenv(v) os.Setenv(v, val) } - s.PatchValue(&authenticateClient, func(*environ) error { return nil }) + s.PatchValue(&authenticateClient, func(*Environ) error { return nil }) } func (s *ConfigSuite) TearDownTest(c *gc.C) { @@ -195,271 +199,225 @@ var configTests = []configTest{ { summary: "setting region", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "region": "testreg", - }, + }), region: "testreg", }, { summary: "setting region (2)", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "region": "configtest", - }, + }), region: "configtest", }, { summary: "changing region", - config: attrs{ - "region": "configtest", - }, - change: attrs{ - "region": "somereg", - }, - err: `cannot change region from "configtest" to "somereg"`, + config: requiredConfig, + change: testing.Attrs{ + "region": "otherregion", + }, + err: `cannot change region from "configtest" to "otherregion"`, }, { summary: "invalid region", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "region": 666, - }, + }), err: `.*expected string, got int\(666\)`, }, { - summary: "missing region in environment", - envVars: map[string]string{ - "OS_REGION_NAME": "", - "NOVA_REGION": "", - }, - err: "required environment variable not set for credentials attribute: Region", + summary: "missing region in model", + config: requiredConfig.Delete("region"), + err: "missing region not valid", }, { summary: "invalid username", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "username": 666, - }, + }), err: `.*expected string, got int\(666\)`, }, { - summary: "missing username in environment", - err: "required environment variable not set for credentials attribute: User", - envVars: map[string]string{ - "OS_USERNAME": "", - "NOVA_USERNAME": "", - }, + summary: "missing username in model", + config: requiredConfig.Delete("username"), + err: "missing username not valid", }, { summary: "invalid password", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "password": 666, - }, + }), err: `.*expected string, got int\(666\)`, }, { - summary: "missing password in environment", - err: "required environment variable not set for credentials attribute: Secrets", - envVars: map[string]string{ - "OS_PASSWORD": "", - "NOVA_PASSWORD": "", - }, + summary: "missing password in model", + config: requiredConfig.Delete("password"), + err: "missing password not valid", }, { summary: "invalid tenant-name", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "tenant-name": 666, - }, + }), err: `.*expected string, got int\(666\)`, }, { - summary: "missing tenant in environment", - err: "required environment variable not set for credentials attribute: TenantName", - envVars: map[string]string{ - "OS_TENANT_NAME": "", - "NOVA_PROJECT_ID": "", - }, + summary: "missing tenant in model", + config: requiredConfig.Delete("tenant-name"), + err: "missing tenant-name not valid", }, { summary: "invalid auth-url type", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "auth-url": 666, - }, + }), err: `.*expected string, got int\(666\)`, }, { - summary: "missing auth-url in environment", - err: "required environment variable not set for credentials attribute: URL", - envVars: map[string]string{ - "OS_AUTH_URL": "", - }, + summary: "missing auth-url in model", + config: requiredConfig.Delete("auth-url"), + err: "missing auth-url not valid", }, { summary: "invalid authorization mode", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "auth-mode": "invalid-mode", - }, + }), err: `auth-mode: expected one of \[keypair legacy userpass\], got "invalid-mode"`, }, { summary: "keypair authorization mode", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "auth-mode": "keypair", "access-key": "MyAccessKey", "secret-key": "MySecretKey", - }, + }), authMode: "keypair", accessKey: "MyAccessKey", secretKey: "MySecretKey", }, { summary: "keypair authorization mode without access key", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "auth-mode": "keypair", "secret-key": "MySecretKey", - }, - envVars: map[string]string{ - "OS_USERNAME": "", - }, - err: "required environment variable not set for credentials attribute: User", + }), + err: "missing access-key not valid", }, { summary: "keypair authorization mode without secret key", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "auth-mode": "keypair", "access-key": "MyAccessKey", - }, - envVars: map[string]string{ - "OS_PASSWORD": "", - }, - err: "required environment variable not set for credentials attribute: Secrets", + }), + err: "missing secret-key not valid", }, { summary: "invalid auth-url format", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "auth-url": "invalid", - }, + }), err: `invalid auth-url value "invalid"`, }, { - summary: "invalid control-bucket", - config: attrs{ - "control-bucket": 666, - }, - err: `.*expected string, got int\(666\)`, - }, { - summary: "changing control-bucket", - change: attrs{ - "control-bucket": "new-x", - }, - err: `cannot change control-bucket from "x" to "new-x"`, - }, { summary: "valid auth args", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "username": "jujuer", "password": "open sesame", "tenant-name": "juju tenant", "auth-mode": "legacy", "auth-url": "http://some/url", - }, + }), username: "jujuer", password: "open sesame", tenantName: "juju tenant", authURL: "http://some/url", authMode: AuthLegacy, }, { - summary: "valid auth args in environment", - envVars: map[string]string{ - "OS_USERNAME": "jujuer", - "OS_PASSWORD": "open sesame", - "OS_AUTH_URL": "http://some/url", - "OS_TENANT_NAME": "juju tenant", - "OS_REGION_NAME": "region", - }, - username: "jujuer", - password: "open sesame", - tenantName: "juju tenant", - authURL: "http://some/url", - region: "region", - }, { - summary: "default auth mode based on environment", - authMode: AuthUserPass, - }, { summary: "default use floating ip", + config: requiredConfig, // Do not use floating IP's by default. useFloatingIP: false, }, { summary: "use floating ip", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "use-floating-ip": true, - }, + }), useFloatingIP: true, }, { summary: "default use default security group", + config: requiredConfig, // Do not use default security group by default. useDefaultSecurityGroup: false, }, { summary: "use default security group", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "use-default-secgroup": true, - }, + }), useDefaultSecurityGroup: true, }, { summary: "admin-secret given", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "admin-secret": "Futumpsh", - }, + }), }, { summary: "default firewall-mode", - config: attrs{}, + config: requiredConfig, firewallMode: config.FwInstance, }, { summary: "instance firewall-mode", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "firewall-mode": "instance", - }, + }), firewallMode: config.FwInstance, }, { summary: "global firewall-mode", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "firewall-mode": "global", - }, + }), firewallMode: config.FwGlobal, }, { summary: "none firewall-mode", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "firewall-mode": "none", - }, + }), firewallMode: config.FwNone, }, { - config: attrs{ - "future": "hammerstein", - }, - expect: attrs{ - "future": "hammerstein", - }, - }, { - change: attrs{ - "future": "hammerstein", - }, - expect: attrs{ - "future": "hammerstein", - }, - }, { - change: attrs{ + config: requiredConfig.Merge(testing.Attrs{ + "future": "hammerstein", + }), + expect: testing.Attrs{ + "future": "hammerstein", + }, + }, { + config: requiredConfig, + change: testing.Attrs{ + "future": "hammerstein", + }, + expect: testing.Attrs{ + "future": "hammerstein", + }, + }, { + config: requiredConfig, + change: testing.Attrs{ "ssl-hostname-verification": false, }, sslHostnameVerification: false, sslHostnameSet: true, }, { - change: attrs{ + config: requiredConfig, + change: testing.Attrs{ "ssl-hostname-verification": true, }, sslHostnameVerification: true, sslHostnameSet: true, }, { summary: "default network", + config: requiredConfig, network: "", }, { summary: "network", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "network": "a-network-label", - }, + }), network: "a-network-label", }, { summary: "no default block storage specified", - config: attrs{}, + config: requiredConfig, blockStorageSource: "cinder", }, { summary: "block storage specified", - config: attrs{ + config: requiredConfig.Merge(testing.Attrs{ "storage-default-block-source": "my-cinder", - }, + }), blockStorageSource: "my-cinder", }, } func (s *ConfigSuite) TestConfig(c *gc.C) { - s.setupEnvCredentials() for i, t := range configTests { c.Logf("test %d: %s (%v)", i, t.summary, t.config) t.check(c) @@ -467,12 +425,15 @@ } func (s *ConfigSuite) TestDeprecatedAttributesRemoved(c *gc.C) { - s.setupEnvCredentials() attrs := testing.FakeConfig().Merge(testing.Attrs{ "type": "openstack", - "control-bucket": "x", "default-image-id": "id-1234", "default-instance-type": "big", + "username": "u", + "password": "p", + "tenant-name": "t", + "region": "r", + "auth-url": "http://auth", }) cfg, err := config.New(config.NoDefaults, attrs) @@ -488,64 +449,31 @@ } } -func (s *ConfigSuite) TestPrepareInsertsUniqueControlBucket(c *gc.C) { - s.setupEnvCredentials() - attrs := testing.FakeConfig().Merge(testing.Attrs{ - "type": "openstack", - }) - cfg, err := config.New(config.NoDefaults, attrs) - c.Assert(err, jc.ErrorIsNil) - - ctx := envtesting.BootstrapContext(c) - env0, err := providerInstance.PrepareForBootstrap(ctx, cfg) - c.Assert(err, jc.ErrorIsNil) - bucket0 := env0.(*environ).ecfg().controlBucket() - c.Assert(bucket0, gc.Matches, "[a-f0-9]{32}") - - env1, err := providerInstance.PrepareForBootstrap(ctx, cfg) - c.Assert(err, jc.ErrorIsNil) - bucket1 := env1.(*environ).ecfg().controlBucket() - c.Assert(bucket1, gc.Matches, "[a-f0-9]{32}") - - c.Assert(bucket1, gc.Not(gc.Equals), bucket0) -} - -func (s *ConfigSuite) TestPrepareDoesNotTouchExistingControlBucket(c *gc.C) { - s.setupEnvCredentials() - attrs := testing.FakeConfig().Merge(testing.Attrs{ - "type": "openstack", - "control-bucket": "burblefoo", - }) - cfg, err := config.New(config.NoDefaults, attrs) - c.Assert(err, jc.ErrorIsNil) - - env, err := providerInstance.PrepareForBootstrap(envtesting.BootstrapContext(c), cfg) - c.Assert(err, jc.ErrorIsNil) - bucket := env.(*environ).ecfg().controlBucket() - c.Assert(bucket, gc.Equals, "burblefoo") -} - func (s *ConfigSuite) TestPrepareSetsDefaultBlockSource(c *gc.C) { - s.setupEnvCredentials() attrs := testing.FakeConfig().Merge(testing.Attrs{ "type": "openstack", }) cfg, err := config.New(config.NoDefaults, attrs) c.Assert(err, jc.ErrorIsNil) - env, err := providerInstance.PrepareForBootstrap(envtesting.BootstrapContext(c), cfg) + env, err := providerInstance.PrepareForBootstrap(envtesting.BootstrapContext(c), s.prepareForBootstrapParams(cfg)) c.Assert(err, jc.ErrorIsNil) - source, ok := env.(*environ).ecfg().StorageDefaultBlockSource() + source, ok := env.(*Environ).ecfg().StorageDefaultBlockSource() c.Assert(ok, jc.IsTrue) c.Assert(source, gc.Equals, "cinder") } -func (s *ConfigSuite) setupEnvCredentials() { - os.Setenv("OS_USERNAME", "user") - os.Setenv("OS_PASSWORD", "secret") - os.Setenv("OS_AUTH_URL", "http://auth") - os.Setenv("OS_TENANT_NAME", "sometenant") - os.Setenv("OS_REGION_NAME", "region") +func (s *ConfigSuite) prepareForBootstrapParams(cfg *config.Config) environs.PrepareForBootstrapParams { + return environs.PrepareForBootstrapParams{ + Config: cfg, + Credentials: cloud.NewCredential(cloud.UserPassAuthType, map[string]string{ + "username": "user", + "password": "secret", + "tenant-name": "sometenant", + }), + CloudRegion: "region", + CloudEndpoint: "http://auth", + } } func (*ConfigSuite) TestSchema(c *gc.C) { === added file 'src/github.com/juju/juju/provider/openstack/credentials.go' --- src/github.com/juju/juju/provider/openstack/credentials.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/openstack/credentials.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,132 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package openstack + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/juju/errors" + "github.com/juju/utils" + "gopkg.in/goose.v1/identity" + "gopkg.in/ini.v1" + + "github.com/juju/juju/cloud" +) + +type OpenstackCredentials struct{} + +// CredentialSchemas is part of the environs.ProviderCredentials interface. +func (OpenstackCredentials) CredentialSchemas() map[cloud.AuthType]cloud.CredentialSchema { + return map[cloud.AuthType]cloud.CredentialSchema{ + cloud.UserPassAuthType: { + "username": { + Description: "The username to authenticate with.", + }, + "password": { + Description: "The password for the specified username.", + Hidden: true, + }, + "tenant-name": { + Description: "The OpenStack tenant name.", + }, + }, + cloud.AccessKeyAuthType: { + "access-key": { + Description: "The access key to authenticate with.", + }, + "secret-key": { + Description: "The secret key to authenticate with.", + Hidden: true, + }, + "tenant-name": { + Description: "The OpenStack tenant name.", + }, + }, + } +} + +// DetectCredentials is part of the environs.ProviderCredentials interface. +func (c OpenstackCredentials) DetectCredentials() (*cloud.CloudCredential, error) { + result := cloud.CloudCredential{ + AuthCredentials: make(map[string]cloud.Credential), + } + + // Try just using environment variables + creds, user, region, err := c.detectCredential() + if err == nil { + result.DefaultRegion = region + result.AuthCredentials[user] = *creds + } + + // Now look for .novarc file in home dir. + novarc := filepath.Join(utils.Home(), ".novarc") + novaInfo, err := ini.LooseLoad(novarc) + if err != nil { + return nil, errors.Annotate(err, "loading novarc file") + } + keyValues := novaInfo.Section(ini.DEFAULT_SECTION).KeysHash() + if len(keyValues) > 0 { + for k, v := range keyValues { + os.Setenv(k, v) + } + creds, user, region, err := c.detectCredential() + if err == nil { + result.DefaultRegion = region + result.AuthCredentials[user] = *creds + } + } + if len(result.AuthCredentials) == 0 { + return nil, errors.NotFoundf("openstack credentials") + } + return &result, nil +} + +func (c OpenstackCredentials) detectCredential() (*cloud.Credential, string, string, error) { + creds := identity.CredentialsFromEnv() + if creds.TenantName == "" { + return nil, "", "", errors.NewNotFound(nil, "OS_TENANT_NAME environment variable not set") + } + if creds.User == "" { + return nil, "", "", errors.NewNotFound(nil, "neither OS_USERNAME nor OS_ACCESS_KEY environment variable not set") + } + if creds.Secrets == "" { + return nil, "", "", errors.NewNotFound(nil, "neither OS_PASSWORD nor OS_SECRET_KEY environment variable not set") + } + + user, err := utils.LocalUsername() + if err != nil { + return nil, "", "", errors.Trace(err) + } + + // If OS_USERNAME or NOVA_USERNAME is set, assume userpass. + var credential cloud.Credential + if os.Getenv("OS_USERNAME") != "" || os.Getenv("NOVA_USERNAME") != "" { + user = creds.User + credential = cloud.NewCredential( + cloud.UserPassAuthType, + map[string]string{ + "username": creds.User, + "password": creds.Secrets, + "tenant-name": creds.TenantName, + }, + ) + } else { + credential = cloud.NewCredential( + cloud.AccessKeyAuthType, + map[string]string{ + "access-key": creds.User, + "secret-key": creds.Secrets, + "tenant-name": creds.TenantName, + }, + ) + } + region := creds.Region + if region == "" { + region = "" + } + credential.Label = fmt.Sprintf("openstack region %q project %q user %q", region, creds.TenantName, user) + return &credential, user, creds.Region, nil +} === added file 'src/github.com/juju/juju/provider/openstack/credentials_test.go' --- src/github.com/juju/juju/provider/openstack/credentials_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/openstack/credentials_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,145 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package openstack_test + +import ( + "io/ioutil" + "path/filepath" + "runtime" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" + envtesting "github.com/juju/juju/environs/testing" +) + +type credentialsSuite struct { + testing.IsolationSuite + provider environs.EnvironProvider +} + +var _ = gc.Suite(&credentialsSuite{}) + +func (s *credentialsSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + var err error + s.provider, err = environs.Provider("openstack") + c.Assert(err, jc.ErrorIsNil) +} + +func (s *credentialsSuite) TestCredentialSchemas(c *gc.C) { + envtesting.AssertProviderAuthTypes(c, s.provider, "access-key", "userpass") +} + +func (s *credentialsSuite) TestAccessKeyCredentialsValid(c *gc.C) { + envtesting.AssertProviderCredentialsValid(c, s.provider, "access-key", map[string]string{ + "access-key": "key", + "secret-key": "secret", + "tenant-name": "gary", + }) +} + +func (s *credentialsSuite) TestAccessKeyHiddenAttributes(c *gc.C) { + envtesting.AssertProviderCredentialsAttributesHidden(c, s.provider, "access-key", "secret-key") +} + +func (s *credentialsSuite) TestUserPassCredentialsValid(c *gc.C) { + envtesting.AssertProviderCredentialsValid(c, s.provider, "userpass", map[string]string{ + "username": "bob", + "password": "dobbs", + "tenant-name": "gary", + }) +} + +func (s *credentialsSuite) TestUserPassHiddenAttributes(c *gc.C) { + envtesting.AssertProviderCredentialsAttributesHidden(c, s.provider, "userpass", "password") +} + +func (s *credentialsSuite) TestDetectCredentialsNotFound(c *gc.C) { + // No environment variables set, so no credentials should be found. + _, err := s.provider.DetectCredentials() + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} + +func (s *credentialsSuite) TestDetectCredentialsAccessKeyEnvironmentVariables(c *gc.C) { + s.PatchEnvironment("USER", "fred") + s.PatchEnvironment("OS_TENANT_NAME", "gary") + s.PatchEnvironment("OS_ACCESS_KEY", "key-id") + s.PatchEnvironment("OS_SECRET_KEY", "secret-access-key") + s.PatchEnvironment("OS_REGION_NAME", "east") + + credentials, err := s.provider.DetectCredentials() + c.Assert(err, jc.ErrorIsNil) + c.Assert(credentials.DefaultRegion, gc.Equals, "east") + expected := cloud.NewCredential( + cloud.AccessKeyAuthType, map[string]string{ + "access-key": "key-id", + "secret-key": "secret-access-key", + "tenant-name": "gary", + }, + ) + expected.Label = `openstack region "east" project "gary" user "fred"` + c.Assert(credentials.AuthCredentials["fred"], jc.DeepEquals, expected) +} + +func (s *credentialsSuite) TestDetectCredentialsUserPassEnvironmentVariables(c *gc.C) { + s.PatchEnvironment("USER", "fred") + s.PatchEnvironment("OS_TENANT_NAME", "gary") + s.PatchEnvironment("OS_USERNAME", "bob") + s.PatchEnvironment("OS_PASSWORD", "dobbs") + s.PatchEnvironment("OS_REGION_NAME", "west") + + credentials, err := s.provider.DetectCredentials() + c.Assert(err, jc.ErrorIsNil) + c.Assert(credentials.DefaultRegion, gc.Equals, "west") + expected := cloud.NewCredential( + cloud.UserPassAuthType, map[string]string{ + "username": "bob", + "password": "dobbs", + "tenant-name": "gary", + }, + ) + expected.Label = `openstack region "west" project "gary" user "bob"` + c.Assert(credentials.AuthCredentials["bob"], jc.DeepEquals, expected) +} + +func (s *credentialsSuite) TestDetectCredentialsNovarc(c *gc.C) { + if runtime.GOOS != "linux" { + c.Skip("not running linux") + } + home := utils.Home() + dir := c.MkDir() + utils.SetHome(dir) + s.AddCleanup(func(*gc.C) { + utils.SetHome(home) + }) + + content := ` +# Some secrets +OS_TENANT_NAME=gary +OS_USERNAME=bob +OS_PASSWORD=dobbs +`[1:] + novarc := filepath.Join(dir, ".novarc") + err := ioutil.WriteFile(novarc, []byte(content), 0600) + + credentials, err := s.provider.DetectCredentials() + c.Assert(err, jc.ErrorIsNil) + c.Assert(credentials.DefaultRegion, gc.Equals, "") + expected := cloud.NewCredential( + cloud.UserPassAuthType, map[string]string{ + "username": "bob", + "password": "dobbs", + "tenant-name": "gary", + }, + ) + expected.Label = `openstack region "" project "gary" user "bob"` + c.Assert(credentials.AuthCredentials["bob"], jc.DeepEquals, expected) +} === modified file 'src/github.com/juju/juju/provider/openstack/export_test.go' --- src/github.com/juju/juju/provider/openstack/export_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/provider/openstack/export_test.go 2016-03-22 15:18:22 +0000 @@ -17,25 +17,17 @@ "github.com/juju/juju/constraints" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" + "github.com/juju/juju/environs/imagemetadata" "github.com/juju/juju/environs/instances" - "github.com/juju/juju/environs/jujutest" "github.com/juju/juju/environs/simplestreams" envstorage "github.com/juju/juju/environs/storage" + envtesting "github.com/juju/juju/environs/testing" "github.com/juju/juju/instance" "github.com/juju/juju/network" "github.com/juju/juju/storage" "github.com/juju/juju/testing" ) -// This provides the content for code accessing test:///... URLs. This allows -// us to set the responses for things like the Metadata server, by pointing -// metadata requests at test:///... rather than http://169.254.169.254 -var testRoundTripper = &jujutest.ProxyRoundTripper{} - -func init() { - testRoundTripper.RegisterForScheme("test") -} - var ( ShortAttempt = &shortAttempt StorageAttempt = &storageAttempt @@ -44,7 +36,7 @@ // MetadataStorage returns a Storage instance which is used to store simplestreams metadata for tests. func MetadataStorage(e environs.Environ) envstorage.Storage { - ecfg := e.(*environ).ecfg() + ecfg := e.(*Environ).ecfg() container := "juju-dist-test" metadataStorage := &openstackstorage{ containerName: container, @@ -89,8 +81,8 @@ func NewCinderVolumeSource(s OpenstackStorage) storage.VolumeSource { const envName = "testenv" - envUUID := testing.EnvironmentTag.Id() - return &cinderVolumeSource{openstackStorage(s), envName, envUUID} + modelUUID := testing.ModelTag.Id() + return &cinderVolumeSource{openstackStorage(s), envName, modelUUID} } var indexData = ` @@ -269,17 +261,18 @@ stor.Put(simplestreams.UnsignedIndex("v1", 1), bytes.NewReader(data), int64(len(data))) stor.Put( productMetadatafile, strings.NewReader(imagesData), int64(len(imagesData))) + + envtesting.SignTestTools(stor) } func RemoveTestImageData(stor envstorage.Storage) { - stor.Remove(simplestreams.UnsignedIndex("v1", 1)) - stor.Remove(productMetadatafile) + stor.RemoveAll() } // DiscardSecurityGroup cleans up a security group, it is not an error to // delete something that doesn't exist. func DiscardSecurityGroup(e environs.Environ, name string) error { - env := e.(*environ) + env := e.(*Environ) novaClient := env.nova() group, err := novaClient.SecurityGroupByName(name) if err != nil { @@ -295,43 +288,41 @@ return nil } -func FindInstanceSpec(e environs.Environ, series, arch, cons string) (spec *instances.InstanceSpec, err error) { - env := e.(*environ) - spec, err = findInstanceSpec(env, &instances.InstanceConstraint{ +func FindInstanceSpec( + e environs.Environ, + series, arch, cons string, + imageMetadata []*imagemetadata.ImageMetadata, +) (spec *instances.InstanceSpec, err error) { + env := e.(*Environ) + return findInstanceSpec(env, &instances.InstanceConstraint{ Series: series, Arches: []string{arch}, Region: env.ecfg().region(), Constraints: constraints.MustParse(cons), - }) - return -} - -func ControlBucketName(e environs.Environ) string { - env := e.(*environ) - return env.ecfg().controlBucket() + }, imageMetadata) } func GetSwiftURL(e environs.Environ) (string, error) { - return e.(*environ).client.MakeServiceURL("object-store", nil) + return e.(*Environ).client.MakeServiceURL("object-store", nil) } func SetUseFloatingIP(e environs.Environ, val bool) { - env := e.(*environ) + env := e.(*Environ) env.ecfg().attrs["use-floating-ip"] = val } func SetUpGlobalGroup(e environs.Environ, name string, apiPort int) (nova.SecurityGroup, error) { - return e.(*environ).setUpGlobalGroup(name, apiPort) + return e.(*Environ).firewaller.(*defaultFirewaller).setUpGlobalGroup(name, apiPort) } func EnsureGroup(e environs.Environ, name string, rules []nova.RuleInfo) (nova.SecurityGroup, error) { - return e.(*environ).ensureGroup(name, rules) + return e.(*Environ).firewaller.(*defaultFirewaller).ensureGroup(name, rules) } // ImageMetadataStorage returns a Storage object pointing where the goose // infrastructure sets up its keystone entry for image metadata func ImageMetadataStorage(e environs.Environ) envstorage.Storage { - env := e.(*environ) + env := e.(*Environ) return &openstackstorage{ containerName: "imagemetadata", swift: swift.New(env.client), @@ -341,7 +332,7 @@ // CreateCustomStorage creates a swift container and returns the Storage object // so you can put data into it. func CreateCustomStorage(e environs.Environ, containerName string) envstorage.Storage { - env := e.(*environ) + env := e.(*Environ) swiftClient := swift.New(env.client) if err := swiftClient.CreateContainer(containerName, swift.PublicRead); err != nil { panic(err) @@ -358,12 +349,12 @@ } func GetNovaClient(e environs.Environ) *nova.Client { - return e.(*environ).nova() + return e.(*Environ).nova() } // ResolveNetwork exposes environ helper function resolveNetwork for testing func ResolveNetwork(e environs.Environ, networkName string) (string, error) { - return e.(*environ).resolveNetwork(networkName) + return e.(*Environ).resolveNetwork(networkName) } var PortsToRuleInfo = portsToRuleInfo === added file 'src/github.com/juju/juju/provider/openstack/firewaller.go' --- src/github.com/juju/juju/provider/openstack/firewaller.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/openstack/firewaller.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,436 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package openstack + +import ( + "fmt" + "regexp" + "strings" + "time" + + "github.com/juju/errors" + "github.com/juju/retry" + "github.com/juju/utils/clock" + gooseerrors "gopkg.in/goose.v1/errors" + "gopkg.in/goose.v1/nova" + + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/instance" + "github.com/juju/juju/network" +) + +//factory for obtaining firawaller object. +type FirewallerFactory interface { + GetFirewaller(env environs.Environ) Firewaller +} + +// Firewaller allows custom openstack provider behaviour. +// This is used in other providers that embed the openstack provider. +type Firewaller interface { + // OpenPorts opens the given port ranges for the whole environment. + OpenPorts(ports []network.PortRange) error + + // ClosePorts closes the given port ranges for the whole environment. + ClosePorts(ports []network.PortRange) error + + // Ports returns the port ranges opened for the whole environment. + Ports() ([]network.PortRange, error) + + // Implementations shoud delete all global security groups. + DeleteGlobalGroups() error + + // Implementations should return list of security groups, that belong to given instances. + GetSecurityGroups(ids ...instance.Id) ([]string, error) + + // Implementations should set up initial security groups, if any. + SetUpGroups(machineId string, apiPort int) ([]nova.SecurityGroup, error) + + // Set of initial networks, that should be added by default to all new instances. + InitialNetworks() []nova.ServerNetworks + + // OpenInstancePorts opens the given port ranges for the specified instance. + OpenInstancePorts(inst instance.Instance, machineId string, ports []network.PortRange) error + + // CloseInstancePorts closes the given port ranges for the specified instance. + CloseInstancePorts(inst instance.Instance, machineId string, ports []network.PortRange) error + + // InstancePorts returns the port ranges opened for the specified instance. + InstancePorts(inst instance.Instance, machineId string) ([]network.PortRange, error) +} + +type firewallerFactory struct { +} + +// GetFirewaller implements FirewallerFactory +func (f *firewallerFactory) GetFirewaller(env environs.Environ) Firewaller { + return &defaultFirewaller{environ: env.(*Environ)} +} + +type defaultFirewaller struct { + environ *Environ +} + +// InitialNetworks implements Firewaller interface. +func (c *defaultFirewaller) InitialNetworks() []nova.ServerNetworks { + return []nova.ServerNetworks{} +} + +// SetUpGroups creates the security groups for the new machine, and +// returns them. +// +// Instances are tagged with a group so they can be distinguished from +// other instances that might be running on the same OpenStack account. +// In addition, a specific machine security group is created for each +// machine, so that its firewall rules can be configured per machine. +// +// Note: ideally we'd have a better way to determine group membership so that 2 +// people that happen to share an openstack account and name their environment +// "openstack" don't end up destroying each other's machines. +func (c *defaultFirewaller) SetUpGroups(machineId string, apiPort int) ([]nova.SecurityGroup, error) { + jujuGroup, err := c.setUpGlobalGroup(c.jujuGroupName(), apiPort) + if err != nil { + return nil, err + } + var machineGroup nova.SecurityGroup + switch c.environ.Config().FirewallMode() { + case config.FwInstance: + machineGroup, err = c.ensureGroup(c.machineGroupName(machineId), nil) + case config.FwGlobal: + machineGroup, err = c.ensureGroup(c.globalGroupName(), nil) + } + if err != nil { + return nil, err + } + groups := []nova.SecurityGroup{jujuGroup, machineGroup} + if c.environ.ecfg().useDefaultSecurityGroup() { + defaultGroup, err := c.environ.nova().SecurityGroupByName("default") + if err != nil { + return nil, fmt.Errorf("loading default security group: %v", err) + } + groups = append(groups, *defaultGroup) + } + return groups, nil +} + +func (c *defaultFirewaller) setUpGlobalGroup(groupName string, apiPort int) (nova.SecurityGroup, error) { + return c.ensureGroup(groupName, + []nova.RuleInfo{ + { + IPProtocol: "tcp", + FromPort: 22, + ToPort: 22, + Cidr: "0.0.0.0/0", + }, + { + IPProtocol: "tcp", + FromPort: apiPort, + ToPort: apiPort, + Cidr: "0.0.0.0/0", + }, + { + IPProtocol: "tcp", + FromPort: 1, + ToPort: 65535, + }, + { + IPProtocol: "udp", + FromPort: 1, + ToPort: 65535, + }, + { + IPProtocol: "icmp", + FromPort: -1, + ToPort: -1, + }, + }) +} + +// zeroGroup holds the zero security group. +var zeroGroup nova.SecurityGroup + +// ensureGroup returns the security group with name and perms. +// If a group with name does not exist, one will be created. +// If it exists, its permissions are set to perms. +func (c *defaultFirewaller) ensureGroup(name string, rules []nova.RuleInfo) (nova.SecurityGroup, error) { + novaClient := c.environ.nova() + // First attempt to look up an existing group by name. + group, err := novaClient.SecurityGroupByName(name) + if err == nil { + // Group exists, so assume it is correctly set up and return it. + // TODO(jam): 2013-09-18 http://pad.lv/121795 + // We really should verify the group is set up correctly, + // because deleting and re-creating environments can get us bad + // groups (especially if they were set up under Python) + return *group, nil + } + // Doesn't exist, so try and create it. + group, err = novaClient.CreateSecurityGroup(name, "juju group") + if err != nil { + if !gooseerrors.IsDuplicateValue(err) { + return zeroGroup, err + } else { + // We just tried to create a duplicate group, so load the existing group. + group, err = novaClient.SecurityGroupByName(name) + if err != nil { + return zeroGroup, err + } + return *group, nil + } + } + // The new group is created so now add the rules. + group.Rules = make([]nova.SecurityGroupRule, len(rules)) + for i, rule := range rules { + rule.ParentGroupId = group.Id + if rule.Cidr == "" { + // http://pad.lv/1226996 Rules that don't have a CIDR + // are meant to apply only to this group. If you don't + // supply CIDR or GroupId then openstack assumes you + // mean CIDR=0.0.0.0/0 + rule.GroupId = &group.Id + } + groupRule, err := novaClient.CreateSecurityGroupRule(rule) + if err != nil && !gooseerrors.IsDuplicateValue(err) { + return zeroGroup, err + } + group.Rules[i] = *groupRule + } + return *group, nil +} + +// GetSecurityGroups implements Firewaller interface. +func (c *defaultFirewaller) GetSecurityGroups(ids ...instance.Id) ([]string, error) { + var securityGroupNames []string + if c.environ.Config().FirewallMode() == config.FwInstance { + instances, err := c.environ.Instances(ids) + if err != nil { + return nil, err + } + securityGroupNames = make([]string, 0, len(ids)) + for _, inst := range instances { + if inst == nil { + continue + } + openstackName := inst.(*openstackInstance).getServerDetail().Name + lastDashPos := strings.LastIndex(openstackName, "-") + if lastDashPos == -1 { + return nil, fmt.Errorf("cannot identify machine ID in openstack server name %q", openstackName) + } + securityGroupName := c.machineGroupName(openstackName[lastDashPos+1:]) + securityGroupNames = append(securityGroupNames, securityGroupName) + } + } + return securityGroupNames, nil +} + +// DeleteGlobalGroups implements Firewaller interface. +func (c *defaultFirewaller) DeleteGlobalGroups() error { + novaClient := c.environ.nova() + securityGroups, err := novaClient.ListSecurityGroups() + if err != nil { + return errors.Annotate(err, "cannot list security groups") + } + re, err := regexp.Compile(fmt.Sprintf("^%s(-\\d+)?$", c.jujuGroupName())) + if err != nil { + return errors.Trace(err) + } + globalGroupName := c.globalGroupName() + for _, group := range securityGroups { + if re.MatchString(group.Name) || group.Name == globalGroupName { + deleteSecurityGroup(novaClient, group.Name, group.Id) + } + } + return nil +} + +// deleteSecurityGroup attempts to delete the security group. Should it fail, +// the deletion is retried due to timing issues in openstack. A security group +// cannot be deleted while it is in use. Theoretically we terminate all the +// instances before we attempt to delete the associated security groups, but +// in practice nova hasn't always finished with the instance before it +// returns, so there is a race condition where we think the instance is +// terminated and hence attempt to delete the security groups but nova still +// has it around internally. To attempt to catch this timing issue, deletion +// of the groups is tried multiple times. +func deleteSecurityGroup(novaclient *nova.Client, name, id string) { + logger.Debugf("deleting security group %q", name) + err := retry.Call(retry.CallArgs{ + Func: func() error { + return novaclient.DeleteSecurityGroup(id) + }, + NotifyFunc: func(err error, attempt int) { + if attempt%4 == 0 { + message := fmt.Sprintf("waiting to delete security group %q", name) + if attempt != 4 { + message = "still " + message + } + logger.Debugf(message) + } + }, + Attempts: 30, + Delay: time.Second, + Clock: clock.WallClock, + }) + if err != nil { + logger.Warningf("cannot delete security group %q. Used by another model?", name) + } +} + +// OpenPorts implements Firewaller interface. +func (c *defaultFirewaller) OpenPorts(ports []network.PortRange) error { + if c.environ.Config().FirewallMode() != config.FwGlobal { + return fmt.Errorf("invalid firewall mode %q for opening ports on model", + c.environ.Config().FirewallMode()) + } + if err := c.openPortsInGroup(c.globalGroupName(), ports); err != nil { + return err + } + logger.Infof("opened ports in global group: %v", ports) + return nil +} + +// ClosePorts implements Firewaller interface. +func (c *defaultFirewaller) ClosePorts(ports []network.PortRange) error { + if c.environ.Config().FirewallMode() != config.FwGlobal { + return fmt.Errorf("invalid firewall mode %q for closing ports on model", + c.environ.Config().FirewallMode()) + } + if err := c.closePortsInGroup(c.globalGroupName(), ports); err != nil { + return err + } + logger.Infof("closed ports in global group: %v", ports) + return nil +} + +// Ports implements Firewaller interface. +func (c *defaultFirewaller) Ports() ([]network.PortRange, error) { + if c.environ.Config().FirewallMode() != config.FwGlobal { + return nil, fmt.Errorf("invalid firewall mode %q for retrieving ports from model", + c.environ.Config().FirewallMode()) + } + return c.portsInGroup(c.globalGroupName()) +} + +// OpenInstancePorts implements Firewaller interface. +func (c *defaultFirewaller) OpenInstancePorts(inst instance.Instance, machineId string, ports []network.PortRange) error { + if c.environ.Config().FirewallMode() != config.FwInstance { + return fmt.Errorf("invalid firewall mode %q for opening ports on instance", + c.environ.Config().FirewallMode()) + } + name := c.machineGroupName(machineId) + if err := c.openPortsInGroup(name, ports); err != nil { + return err + } + logger.Infof("opened ports in security group %s: %v", name, ports) + return nil +} + +// CloseInstancePorts implements Firewaller interface. +func (c *defaultFirewaller) CloseInstancePorts(inst instance.Instance, machineId string, ports []network.PortRange) error { + if c.environ.Config().FirewallMode() != config.FwInstance { + return fmt.Errorf("invalid firewall mode %q for closing ports on instance", + c.environ.Config().FirewallMode()) + } + name := c.machineGroupName(machineId) + if err := c.closePortsInGroup(name, ports); err != nil { + return err + } + logger.Infof("closed ports in security group %s: %v", name, ports) + return nil +} + +// InstancePorts implements Firewaller interface. +func (c *defaultFirewaller) InstancePorts(inst instance.Instance, machineId string) ([]network.PortRange, error) { + if c.environ.Config().FirewallMode() != config.FwInstance { + return nil, fmt.Errorf("invalid firewall mode %q for retrieving ports from instance", + c.environ.Config().FirewallMode()) + } + name := c.machineGroupName(machineId) + portRanges, err := c.portsInGroup(name) + if err != nil { + return nil, err + } + return portRanges, nil +} + +func (c *defaultFirewaller) openPortsInGroup(name string, portRanges []network.PortRange) error { + novaclient := c.environ.nova() + group, err := novaclient.SecurityGroupByName(name) + if err != nil { + return err + } + rules := portsToRuleInfo(group.Id, portRanges) + for _, rule := range rules { + _, err := novaclient.CreateSecurityGroupRule(rule) + if err != nil { + // TODO: if err is not rule already exists, raise? + logger.Debugf("error creating security group rule: %v", err.Error()) + } + } + return nil +} + +// ruleMatchesPortRange checks if supplied nova security group rule matches the port range +func ruleMatchesPortRange(rule nova.SecurityGroupRule, portRange network.PortRange) bool { + if rule.IPProtocol == nil || rule.FromPort == nil || rule.ToPort == nil { + return false + } + return *rule.IPProtocol == portRange.Protocol && + *rule.FromPort == portRange.FromPort && + *rule.ToPort == portRange.ToPort +} + +func (c *defaultFirewaller) closePortsInGroup(name string, portRanges []network.PortRange) error { + if len(portRanges) == 0 { + return nil + } + novaclient := c.environ.nova() + group, err := novaclient.SecurityGroupByName(name) + if err != nil { + return err + } + // TODO: Hey look ma, it's quadratic + for _, portRange := range portRanges { + for _, p := range (*group).Rules { + if !ruleMatchesPortRange(p, portRange) { + continue + } + err := novaclient.DeleteSecurityGroupRule(p.Id) + if err != nil { + return err + } + break + } + } + return nil +} + +func (c *defaultFirewaller) portsInGroup(name string) (portRanges []network.PortRange, err error) { + group, err := c.environ.nova().SecurityGroupByName(name) + if err != nil { + return nil, err + } + for _, p := range (*group).Rules { + portRanges = append(portRanges, network.PortRange{ + Protocol: *p.IPProtocol, + FromPort: *p.FromPort, + ToPort: *p.ToPort, + }) + } + network.SortPortRanges(portRanges) + return portRanges, nil +} +func (c *defaultFirewaller) globalGroupName() string { + return fmt.Sprintf("%s-global", c.jujuGroupName()) +} + +func (c *defaultFirewaller) machineGroupName(machineId string) string { + return fmt.Sprintf("%s-%s", c.jujuGroupName(), machineId) +} + +func (c *defaultFirewaller) jujuGroupName() string { + cfg := c.environ.Config() + eUUID, _ := cfg.UUID() + return fmt.Sprintf("juju-%s", eUUID) +} === modified file 'src/github.com/juju/juju/provider/openstack/image.go' --- src/github.com/juju/juju/provider/openstack/image.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/provider/openstack/image.go 2016-03-22 15:18:22 +0000 @@ -4,15 +4,17 @@ package openstack import ( - "github.com/juju/juju/environs" "github.com/juju/juju/environs/imagemetadata" "github.com/juju/juju/environs/instances" - "github.com/juju/juju/environs/simplestreams" ) // findInstanceSpec returns an image and instance type satisfying the constraint. // The instance type comes from querying the flavors supported by the deployment. -func findInstanceSpec(e *environ, ic *instances.InstanceConstraint) (*instances.InstanceSpec, error) { +func findInstanceSpec( + e *Environ, + ic *instances.InstanceConstraint, + imageMetadata []*imagemetadata.ImageMetadata, +) (*instances.InstanceSpec, error) { // first construct all available instance types from the supported flavors. nova := e.nova() flavors, err := nova.ListFlavorsDetail() @@ -33,22 +35,7 @@ allInstanceTypes = append(allInstanceTypes, instanceType) } - imageConstraint := imagemetadata.NewImageConstraint(simplestreams.LookupParams{ - CloudSpec: simplestreams.CloudSpec{ic.Region, e.ecfg().authURL()}, - Series: []string{ic.Series}, - Arches: ic.Arches, - Stream: e.Config().ImageStream(), - }) - sources, err := environs.ImageMetadataSources(e) - if err != nil { - return nil, err - } - // TODO (wallyworld): use an env parameter (default true) to mandate use of only signed image metadata. - matchingImages, _, err := imagemetadata.Fetch(sources, imageConstraint, false) - if err != nil { - return nil, err - } - images := instances.ImageMetadataToImages(matchingImages) + images := instances.ImageMetadataToImages(imageMetadata) spec, err := instances.FindInstanceSpec(images, ic, allInstanceTypes) if err != nil { return nil, err === modified file 'src/github.com/juju/juju/provider/openstack/init.go' --- src/github.com/juju/juju/provider/openstack/init.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/openstack/init.go 2016-03-22 15:18:22 +0000 @@ -14,9 +14,8 @@ ) func init() { - environs.RegisterProvider(providerType, environProvider{}) + environs.RegisterProvider(providerType, providerInstance) - logger.Infof("openstack init") environs.RegisterImageDataSourceFunc("keystone catalog", getKeystoneImageSource) tools.RegisterToolsDataSourceFunc("keystone catalog", getKeystoneToolsSource) === modified file 'src/github.com/juju/juju/provider/openstack/live_test.go' --- src/github.com/juju/juju/provider/openstack/live_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/openstack/live_test.go 2016-03-22 15:18:22 +0000 @@ -47,15 +47,14 @@ // secret-key: $OS_PASSWORD // attrs := coretesting.FakeConfig().Merge(coretesting.Attrs{ - "name": "sample-" + randomName(), - "type": "openstack", - "auth-mode": "userpass", - "control-bucket": "juju-test-" + randomName(), - "username": cred.User, - "password": cred.Secrets, - "region": cred.Region, - "auth-url": cred.URL, - "tenant-name": cred.TenantName, + "name": "sample-" + randomName(), + "type": "openstack", + "auth-mode": "userpass", + "username": cred.User, + "password": cred.Secrets, + "region": cred.Region, + "auth-url": cred.URL, + "tenant-name": cred.TenantName, }) return attrs } @@ -186,7 +185,7 @@ // all ports to other machines inside the same group // TODO(jam): 2013-09-18 http://pad.lv/1227142 // We shouldn't be exposing the API port on all the machines - // that *aren't* hosting the state server. + // that *aren't* hosting the controller. stringRules := make([]string, 0, len(group.Rules)) for _, rule := range group.Rules { ruleStr := fmt.Sprintf("%s %d %d %q %q", @@ -214,7 +213,6 @@ func (s *LiveTests) assertStartInstanceDefaultSecurityGroup(c *gc.C, useDefault bool) { attrs := s.TestConfig.Merge(coretesting.Attrs{ "name": "sample-" + randomName(), - "control-bucket": "juju-test-" + randomName(), "use-default-secgroup": useDefault, }) cfg, err := config.New(config.NoDefaults, attrs) === modified file 'src/github.com/juju/juju/provider/openstack/local_test.go' --- src/github.com/juju/juju/provider/openstack/local_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/provider/openstack/local_test.go 2016-03-22 15:18:22 +0000 @@ -18,6 +18,9 @@ jujuerrors "github.com/juju/errors" gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/arch" + "github.com/juju/utils/series" + "github.com/juju/utils/ssh" gc "gopkg.in/check.v1" "gopkg.in/goose.v1/client" "gopkg.in/goose.v1/identity" @@ -27,6 +30,7 @@ "gopkg.in/goose.v1/testservices/novaservice" "gopkg.in/goose.v1/testservices/openstackservice" + "github.com/juju/juju/cloud" "github.com/juju/juju/cloudconfig/instancecfg" "github.com/juju/juju/constraints" "github.com/juju/juju/environs" @@ -38,18 +42,19 @@ imagetesting "github.com/juju/juju/environs/imagemetadata/testing" "github.com/juju/juju/environs/jujutest" "github.com/juju/juju/environs/simplestreams" + sstesting "github.com/juju/juju/environs/simplestreams/testing" "github.com/juju/juju/environs/storage" envtesting "github.com/juju/juju/environs/testing" "github.com/juju/juju/environs/tools" "github.com/juju/juju/instance" - "github.com/juju/juju/juju/arch" + "github.com/juju/juju/juju" "github.com/juju/juju/juju/testing" + "github.com/juju/juju/jujuclient/jujuclienttesting" "github.com/juju/juju/network" "github.com/juju/juju/provider/common" "github.com/juju/juju/provider/openstack" "github.com/juju/juju/storage/provider/registry" coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/utils/ssh" "github.com/juju/juju/version" ) @@ -168,8 +173,17 @@ func (s *localLiveSuite) SetUpSuite(c *gc.C) { s.BaseSuite.SetUpSuite(c) + c.Logf("Running live tests using openstack service test double") s.srv.start(c, s.cred, newFullOpenstackService) + + // Set credentials to use when bootstrapping. Must be done after + // starting server to get the auth URL. + args := prepareForBootstrapParams(nil, s.cred) + s.CloudRegion = args.CloudRegion + s.CloudEndpoint = args.CloudEndpoint + s.Credential = args.Credentials + s.LiveTests.SetUpSuite(c) openstack.UseTestImageData(openstack.ImageMetadataStorage(s.Env), s.cred) restoreFinishBootstrap := envtesting.DisableFinishBootstrap() @@ -220,6 +234,14 @@ func (s *localServerSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.srv.start(c, s.cred, newFullOpenstackService) + + // Set credentials to use when bootstrapping. Must be done after + // starting server to get the auth URL. + args := prepareForBootstrapParams(nil, s.cred) + s.CloudRegion = args.CloudRegion + s.CloudEndpoint = args.CloudEndpoint + s.Credential = args.Credentials + cl := client.NewClient(s.cred, identity.AuthUserPass, nil) err := cl.Authenticate() c.Assert(err, jc.ErrorIsNil) @@ -230,7 +252,7 @@ "image-metadata-url": containerURL + "/juju-dist-test", "auth-url": s.cred.URL, }) - s.PatchValue(&version.Current.Number, coretesting.FakeVersionNumber) + s.PatchValue(&version.Current, coretesting.FakeVersionNumber) s.Tests.SetUpTest(c) // For testing, we create a storage instance to which is uploaded tools and image metadata. s.env = s.Prepare(c) @@ -255,6 +277,20 @@ s.BaseSuite.TearDownTest(c) } +func (s *localServerSuite) TestBootstrap(c *gc.C) { + // Tests uses Prepare, so destroy first. + err := environs.Destroy(s.env.Config().Name(), s.env, s.ConfigStore, s.ControllerStore) + c.Assert(err, jc.ErrorIsNil) + s.Tests.TestBootstrap(c) +} + +func (s *localServerSuite) TestStartStop(c *gc.C) { + // Tests uses Prepare, so destroy first. + err := environs.Destroy(s.env.Config().Name(), s.env, s.ConfigStore, s.ControllerStore) + c.Assert(err, jc.ErrorIsNil) + s.Tests.TestStartStop(c) +} + // If the bootstrap node is configured to require a public IP address, // bootstrapping fails if an address cannot be allocated. func (s *localServerSuite) TestBootstrapFailsWhenPublicIPError(c *gc.C) { @@ -268,6 +304,9 @@ ) defer cleanup() + err := environs.Destroy(s.env.Config().Name(), s.env, s.ConfigStore, s.ControllerStore) + c.Assert(err, jc.ErrorIsNil) + // Create a config that matches s.TestConfig but with use-floating-ip set to true cfg, err := config.New(config.NoDefaults, s.TestConfig.Merge(coretesting.Attrs{ "use-floating-ip": true, @@ -282,7 +321,13 @@ func (s *localServerSuite) TestAddressesWithPublicIP(c *gc.C) { // Floating IP address is 10.0.0.1 bootstrapFinished := false - s.PatchValue(&common.FinishBootstrap, func(ctx environs.BootstrapContext, client ssh.Client, inst instance.Instance, instanceConfig *instancecfg.InstanceConfig) error { + s.PatchValue(&common.FinishBootstrap, func( + ctx environs.BootstrapContext, + client ssh.Client, + env environs.Environ, + inst instance.Instance, + instanceConfig *instancecfg.InstanceConfig, + ) error { addr, err := inst.Addresses() c.Assert(err, jc.ErrorIsNil) c.Assert(addr, jc.SameContents, []network.Address{ @@ -310,7 +355,13 @@ func (s *localServerSuite) TestAddressesWithoutPublicIP(c *gc.C) { bootstrapFinished := false - s.PatchValue(&common.FinishBootstrap, func(ctx environs.BootstrapContext, client ssh.Client, inst instance.Instance, instanceConfig *instancecfg.InstanceConfig) error { + s.PatchValue(&common.FinishBootstrap, func( + ctx environs.BootstrapContext, + client ssh.Client, + env environs.Environ, + inst instance.Instance, + instanceConfig *instancecfg.InstanceConfig, + ) error { addr, err := inst.Addresses() c.Assert(err, jc.ErrorIsNil) c.Assert(addr, jc.SameContents, []network.Address{ @@ -353,11 +404,18 @@ ) defer cleanup() + err := environs.Destroy(s.env.Config().Name(), s.env, s.ConfigStore, s.ControllerStore) + c.Assert(err, jc.ErrorIsNil) + cfg, err := config.New(config.NoDefaults, s.TestConfig.Merge(coretesting.Attrs{ "use-floating-ip": false, })) c.Assert(err, jc.ErrorIsNil) - env, err := environs.Prepare(cfg, envtesting.BootstrapContext(c), s.ConfigStore) + env, err := environs.Prepare( + envtesting.BootstrapContext(c), s.ConfigStore, + s.ControllerStore, + cfg.Name(), prepareForBootstrapParams(cfg, s.cred), + ) c.Assert(err, jc.ErrorIsNil) err = bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) @@ -368,16 +426,21 @@ func (s *localServerSuite) TestStartInstanceHardwareCharacteristics(c *gc.C) { // Ensure amd64 tools are available, to ensure an amd64 image. - amd64Version := version.Current - amd64Version.Arch = arch.AMD64 - for _, series := range version.SupportedSeries() { + amd64Version := version.Binary{ + Number: version.Current, + Arch: arch.AMD64, + } + for _, series := range series.SupportedSeries() { amd64Version.Series = series envtesting.AssertUploadFakeToolsVersions( c, s.toolsMetadataStorage, s.env.Config().AgentStream(), s.env.Config().AgentStream(), amd64Version) } + err := environs.Destroy(s.env.Config().Name(), s.env, s.ConfigStore, s.ControllerStore) + c.Assert(err, jc.ErrorIsNil) + env := s.Prepare(c) - err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) + err = bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) _, hc := testing.AssertStartInstanceWithConstraints(c, env, "100", constraints.MustParse("mem=1024")) c.Check(*hc.Arch, gc.Equals, "amd64") @@ -469,12 +532,12 @@ // Openstack now has three security groups for the server, the default // group, one group for the entire environment, and another for the // new instance. - name := env.Config().Name() - assertSecurityGroups(c, env, []string{"default", fmt.Sprintf("juju-%v", name), fmt.Sprintf("juju-%v-%v", name, instanceName)}) + eUUID, _ := env.Config().UUID() + assertSecurityGroups(c, env, []string{"default", fmt.Sprintf("juju-%v", eUUID), fmt.Sprintf("juju-%v-%v", eUUID, instanceName)}) err = env.StopInstances(inst.Id()) c.Assert(err, jc.ErrorIsNil) // The security group for this instance is now removed. - assertSecurityGroups(c, env, []string{"default", fmt.Sprintf("juju-%v", name)}) + assertSecurityGroups(c, env, []string{"default", fmt.Sprintf("juju-%v", eUUID)}) } // Due to bug #1300755 it can happen that the security group intended for @@ -499,8 +562,8 @@ c.Assert(err, jc.ErrorIsNil) instanceName := "100" inst, _ := testing.AssertStartInstance(c, env, instanceName) - name := env.Config().Name() - allSecurityGroups := []string{"default", fmt.Sprintf("juju-%v", name), fmt.Sprintf("juju-%v-%v", name, instanceName)} + eUUID, _ := env.Config().UUID() + allSecurityGroups := []string{"default", fmt.Sprintf("juju-%v", eUUID), fmt.Sprintf("juju-%v-%v", eUUID, instanceName)} assertSecurityGroups(c, env, allSecurityGroups) err = env.StopInstances(inst.Id()) c.Assert(err, jc.ErrorIsNil) @@ -515,8 +578,8 @@ c.Assert(err, jc.ErrorIsNil) instanceName := "100" testing.AssertStartInstance(c, env, instanceName) - name := env.Config().Name() - allSecurityGroups := []string{"default", fmt.Sprintf("juju-%v", name), fmt.Sprintf("juju-%v-%v", name, instanceName)} + eUUID, _ := env.Config().UUID() + allSecurityGroups := []string{"default", fmt.Sprintf("juju-%v", eUUID), fmt.Sprintf("juju-%v-%v", eUUID, instanceName)} assertSecurityGroups(c, env, allSecurityGroups) err = env.Destroy() c.Check(err, jc.ErrorIsNil) @@ -531,8 +594,8 @@ c.Assert(err, jc.ErrorIsNil) instanceName := "100" testing.AssertStartInstance(c, env, instanceName) - name := env.Config().Name() - allSecurityGroups := []string{"default", fmt.Sprintf("juju-%v", name), fmt.Sprintf("juju-%v-global", name)} + eUUID, _ := env.Config().UUID() + allSecurityGroups := []string{"default", fmt.Sprintf("juju-%v", eUUID), fmt.Sprintf("juju-%v-global", eUUID)} assertSecurityGroups(c, env, allSecurityGroups) err = env.Destroy() c.Check(err, jc.ErrorIsNil) @@ -587,11 +650,10 @@ } func (s *localServerSuite) TestInstanceStatus(c *gc.C) { - env := s.Prepare(c) // goose's test service always returns ACTIVE state. - inst, _ := testing.AssertStartInstance(c, env, "100") + inst, _ := testing.AssertStartInstance(c, s.env, "100") c.Assert(inst.Status(), gc.Equals, nova.StatusActive) - err := env.StopInstances(inst.Id()) + err := s.env.StopInstances(inst.Id()) c.Assert(err, jc.ErrorIsNil) } @@ -680,7 +742,6 @@ func (s *localServerSuite) TestInstancesBuildSpawning(c *gc.C) { coretesting.SkipIfPPC64EL(c, "lp:1425242") - env := s.Prepare(c) // HP servers are available once they are BUILD(spawning). cleanup := s.srv.Nova.RegisterControlPoint( "addServer", @@ -691,13 +752,13 @@ }, ) defer cleanup() - stateInst, _ := testing.AssertStartInstance(c, env, "100") + stateInst, _ := testing.AssertStartInstance(c, s.env, "100") defer func() { - err := env.StopInstances(stateInst.Id()) + err := s.env.StopInstances(stateInst.Id()) c.Assert(err, jc.ErrorIsNil) }() - instances, err := env.Instances([]instance.Id{stateInst.Id()}) + instances, err := s.env.Instances([]instance.Id{stateInst.Id()}) c.Assert(err, jc.ErrorIsNil) c.Assert(instances, gc.HasLen, 1) @@ -707,7 +768,6 @@ func (s *localServerSuite) TestInstancesShutoffSuspended(c *gc.C) { coretesting.SkipIfPPC64EL(c, "lp:1425242") - env := s.Prepare(c) cleanup := s.srv.Nova.RegisterControlPoint( "addServer", func(sc hook.ServiceControl, args ...interface{}) error { @@ -724,14 +784,14 @@ }, ) defer cleanup() - stateInst1, _ := testing.AssertStartInstance(c, env, "100") - stateInst2, _ := testing.AssertStartInstance(c, env, "101") + stateInst1, _ := testing.AssertStartInstance(c, s.env, "100") + stateInst2, _ := testing.AssertStartInstance(c, s.env, "101") defer func() { - err := env.StopInstances(stateInst1.Id(), stateInst2.Id()) + err := s.env.StopInstances(stateInst1.Id(), stateInst2.Id()) c.Assert(err, jc.ErrorIsNil) }() - instances, err := env.Instances([]instance.Id{stateInst1.Id(), stateInst2.Id()}) + instances, err := s.env.Instances([]instance.Id{stateInst1.Id(), stateInst2.Id()}) c.Assert(err, jc.ErrorIsNil) c.Assert(instances, gc.HasLen, 2) @@ -742,7 +802,6 @@ func (s *localServerSuite) TestInstancesErrorResponse(c *gc.C) { coretesting.SkipIfPPC64EL(c, "lp:1425242") - env := s.Prepare(c) cleanup := s.srv.Nova.RegisterControlPoint( "server", func(sc hook.ServiceControl, args ...interface{}) error { @@ -751,7 +810,7 @@ ) defer cleanup() - instances, err := env.Instances([]instance.Id{"1"}) + instances, err := s.env.Instances([]instance.Id{"1"}) c.Check(instances, gc.IsNil) c.Assert(err, gc.ErrorMatches, "(?s).*strange error not instance.*") } @@ -759,7 +818,6 @@ func (s *localServerSuite) TestInstancesMultiErrorResponse(c *gc.C) { coretesting.SkipIfPPC64EL(c, "lp:1425242") - env := s.Prepare(c) cleanup := s.srv.Nova.RegisterControlPoint( "matchServers", func(sc hook.ServiceControl, args ...interface{}) error { @@ -768,7 +826,7 @@ ) defer cleanup() - instances, err := env.Instances([]instance.Id{"1", "2"}) + instances, err := s.env.Instances([]instance.Id{"1", "2"}) c.Check(instances, gc.IsNil) c.Assert(err, gc.ErrorMatches, "(?s).*strange error no instances.*") } @@ -776,22 +834,15 @@ // TODO (wallyworld) - this test was copied from the ec2 provider. // It should be moved to environs.jujutests.Tests. func (s *localServerSuite) TestBootstrapInstanceUserDataAndState(c *gc.C) { - env := s.Prepare(c) - err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), s.env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) - // Check that StateServerInstances returns the ID of the bootstrap machine. - ids, err := env.StateServerInstances() + // Check that ControllerInstances returns the ID of the bootstrap machine. + ids, err := s.env.ControllerInstances() c.Assert(err, jc.ErrorIsNil) c.Assert(ids, gc.HasLen, 1) - // Storage should be empty; it is not used anymore. - stor := env.(environs.EnvironStorage).Storage() - entries, err := stor.List("") - c.Assert(err, jc.ErrorIsNil) - c.Assert(entries, gc.HasLen, 0) - - insts, err := env.AllInstances() + insts, err := s.env.AllInstances() c.Assert(err, jc.ErrorIsNil) c.Assert(insts, gc.HasLen, 1) c.Check(insts[0].Id(), gc.Equals, ids[0]) @@ -893,7 +944,7 @@ env := s.Open(c) // An error occurs if no suitable image is found. - _, err := openstack.FindInstanceSpec(env, "saucy", "amd64", "mem=1G") + _, err := openstack.FindInstanceSpec(env, "saucy", "amd64", "mem=1G", nil) c.Assert(err, gc.ErrorMatches, `no "saucy" images in some-region with arches \[amd64\]`) } @@ -931,17 +982,30 @@ } func (s *localServerSuite) TestFindImageInstanceConstraint(c *gc.C) { - imagetesting.PatchOfficialDataSources(&s.CleanupSuite, "") env := s.Open(c) - spec, err := openstack.FindInstanceSpec(env, coretesting.FakeDefaultSeries, "amd64", "instance-type=m1.tiny") + imageMetadata := []*imagemetadata.ImageMetadata{{ + Id: "image-id", + Arch: "amd64", + }} + + spec, err := openstack.FindInstanceSpec( + env, coretesting.FakeDefaultSeries, "amd64", "instance-type=m1.tiny", + imageMetadata, + ) c.Assert(err, jc.ErrorIsNil) c.Assert(spec.InstanceType.Name, gc.Equals, "m1.tiny") } func (s *localServerSuite) TestFindImageInvalidInstanceConstraint(c *gc.C) { - imagetesting.PatchOfficialDataSources(&s.CleanupSuite, "") env := s.Open(c) - _, err := openstack.FindInstanceSpec(env, coretesting.FakeDefaultSeries, "amd64", "instance-type=m1.large") + imageMetadata := []*imagemetadata.ImageMetadata{{ + Id: "image-id", + Arch: "amd64", + }} + _, err := openstack.FindInstanceSpec( + env, coretesting.FakeDefaultSeries, "amd64", "instance-type=m1.large", + imageMetadata, + ) c.Assert(err, gc.ErrorMatches, `no instance types in some-region matching constraints "instance-type=m1.large"`) } @@ -962,31 +1026,27 @@ } func (t *localServerSuite) TestPrecheckInstanceAvailZone(c *gc.C) { - env := t.Prepare(c) placement := "zone=test-available" - err := env.PrecheckInstance(coretesting.FakeDefaultSeries, constraints.Value{}, placement) + err := t.env.PrecheckInstance(coretesting.FakeDefaultSeries, constraints.Value{}, placement) c.Assert(err, jc.ErrorIsNil) } func (t *localServerSuite) TestPrecheckInstanceAvailZoneUnavailable(c *gc.C) { - env := t.Prepare(c) placement := "zone=test-unavailable" - err := env.PrecheckInstance(coretesting.FakeDefaultSeries, constraints.Value{}, placement) + err := t.env.PrecheckInstance(coretesting.FakeDefaultSeries, constraints.Value{}, placement) c.Assert(err, jc.ErrorIsNil) } func (t *localServerSuite) TestPrecheckInstanceAvailZoneUnknown(c *gc.C) { - env := t.Prepare(c) placement := "zone=test-unknown" - err := env.PrecheckInstance(coretesting.FakeDefaultSeries, constraints.Value{}, placement) + err := t.env.PrecheckInstance(coretesting.FakeDefaultSeries, constraints.Value{}, placement) c.Assert(err, gc.ErrorMatches, `invalid availability zone "test-unknown"`) } func (t *localServerSuite) TestPrecheckInstanceAvailZonesUnsupported(c *gc.C) { t.srv.Nova.SetAvailabilityZones() // no availability zone support - env := t.Prepare(c) placement := "zone=test-unknown" - err := env.PrecheckInstance(coretesting.FakeDefaultSeries, constraints.Value{}, placement) + err := t.env.PrecheckInstance(coretesting.FakeDefaultSeries, constraints.Value{}, placement) c.Assert(err, jc.Satisfies, jujuerrors.IsNotImplemented) } @@ -1004,7 +1064,7 @@ func (s *localServerSuite) TestImageMetadataSourceOrder(c *gc.C) { src := func(env environs.Environ) (simplestreams.DataSource, error) { - return simplestreams.NewURLDataSource("my datasource", "bar", false), nil + return simplestreams.NewURLDataSource("my datasource", "bar", false, simplestreams.CUSTOM_CLOUD_DATA, false), nil } environs.RegisterUserImageDataSourceFunc("my func", src) env := s.Open(c) @@ -1018,55 +1078,9 @@ "image-metadata-url", "my datasource", "keystone catalog", "default cloud images", "default ubuntu cloud images"}) } -func (s *localServerSuite) TestRemoveAll(c *gc.C) { - env := s.Prepare(c) - stor := env.(environs.EnvironStorage).Storage() - for _, a := range []byte("abcdefghijklmnopqrstuvwxyz") { - content := []byte{a} - name := string(content) - err := stor.Put(name, bytes.NewBuffer(content), - int64(len(content))) - c.Assert(err, jc.ErrorIsNil) - } - reader, err := storage.Get(stor, "a") - c.Assert(err, jc.ErrorIsNil) - allContent, err := ioutil.ReadAll(reader) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(allContent), gc.Equals, "a") - err = stor.RemoveAll() - c.Assert(err, jc.ErrorIsNil) - _, err = storage.Get(stor, "a") - c.Assert(err, gc.NotNil) -} - -func (s *localServerSuite) TestDeleteMoreThan100(c *gc.C) { - env := s.Prepare(c) - stor := env.(environs.EnvironStorage).Storage() - // 6*26 = 156 items - for _, a := range []byte("abcdef") { - for _, b := range []byte("abcdefghijklmnopqrstuvwxyz") { - content := []byte{a, b} - name := string(content) - err := stor.Put(name, bytes.NewBuffer(content), - int64(len(content))) - c.Assert(err, jc.ErrorIsNil) - } - } - reader, err := storage.Get(stor, "ab") - c.Assert(err, jc.ErrorIsNil) - allContent, err := ioutil.ReadAll(reader) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(allContent), gc.Equals, "ab") - err = stor.RemoveAll() - c.Assert(err, jc.ErrorIsNil) - _, err = storage.Get(stor, "ab") - c.Assert(err, gc.NotNil) -} - // TestEnsureGroup checks that when creating a duplicate security group, the existing group is // returned and the existing rules have been left as is. func (s *localServerSuite) TestEnsureGroup(c *gc.C) { - env := s.Prepare(c) rule := []nova.RuleInfo{ { IPProtocol: "tcp", @@ -1082,7 +1096,7 @@ c.Check(*group.Rules[0].ToPort, gc.Equals, 22) } - group, err := openstack.EnsureGroup(env, "test group", rule) + group, err := openstack.EnsureGroup(s.env, "test group", rule) c.Assert(err, jc.ErrorIsNil) c.Assert(group.Name, gc.Equals, "test group") assertRule(group) @@ -1095,7 +1109,7 @@ ToPort: 65535, }, } - group, err = openstack.EnsureGroup(env, "test group", anotherRule) + group, err = openstack.EnsureGroup(s.env, "test group", anotherRule) c.Assert(err, jc.ErrorIsNil) c.Check(group.Id, gc.Equals, id) c.Assert(group.Name, gc.Equals, "test group") @@ -1144,7 +1158,7 @@ func (s *localHTTPSServerSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) - s.PatchValue(&version.Current.Number, coretesting.FakeVersionNumber) + s.PatchValue(&version.Current, coretesting.FakeVersionNumber) s.srv.UseTLS = true cred := &identity.Credentials{ User: "fred", @@ -1159,7 +1173,11 @@ c.Assert(attrs["auth-url"].(string)[:8], gc.Equals, "https://") cfg, err := config.New(config.NoDefaults, attrs) c.Assert(err, jc.ErrorIsNil) - s.env, err = environs.Prepare(cfg, envtesting.BootstrapContext(c), configstore.NewMem()) + s.env, err = environs.Prepare( + envtesting.BootstrapContext(c), configstore.NewMem(), + jujuclienttesting.NewMemStore(), + cfg.Name(), prepareForBootstrapParams(cfg, s.cred), + ) c.Assert(err, jc.ErrorIsNil) s.attrs = s.env.Config().AllAttrs() } @@ -1185,19 +1203,12 @@ newattrs[k] = v } newattrs["ssl-hostname-verification"] = true - env, err := environs.NewFromAttrs(newattrs) - c.Assert(err, jc.ErrorIsNil) - err = env.(environs.EnvironStorage).Storage().Put("test-name", strings.NewReader("content"), 7) - c.Assert(err, gc.ErrorMatches, "(.|\n)*x509: certificate signed by unknown authority") - // However, it works just fine if you use the one with the credentials set - err = s.env.(environs.EnvironStorage).Storage().Put("test-name", strings.NewReader("content"), 7) - c.Assert(err, jc.ErrorIsNil) - _, err = env.(environs.EnvironStorage).Storage().Get("test-name") - c.Assert(err, gc.ErrorMatches, "(.|\n)*x509: certificate signed by unknown authority") - reader, err := s.env.(environs.EnvironStorage).Storage().Get("test-name") - c.Assert(err, jc.ErrorIsNil) - contents, err := ioutil.ReadAll(reader) - c.Assert(string(contents), gc.Equals, "content") + cfg, err := config.New(config.NoDefaults, newattrs) + c.Assert(err, jc.ErrorIsNil) + env, err := environs.New(cfg) + c.Assert(err, jc.ErrorIsNil) + _, err = env.AllInstances() + c.Assert(err, gc.ErrorMatches, "(.|\n)*x509: certificate signed by unknown authority") } func (s *localHTTPSServerSuite) TestCanBootstrap(c *gc.C) { @@ -1341,12 +1352,11 @@ } func (s *localServerSuite) TestAllInstancesIgnoresOtherMachines(c *gc.C) { - env := s.Prepare(c) - err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), s.env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) // Check that we see 1 instance in the environment - insts, err := env.AllInstances() + insts, err := s.env.AllInstances() c.Assert(err, jc.ErrorIsNil) c.Check(insts, gc.HasLen, 1) @@ -1354,12 +1364,12 @@ // but not matching name, and ensure it isn't seen by AllInstances // See bug #1257481, for how similar names were causing them to get // listed (and thus destroyed) at the wrong time - existingEnvName := s.TestConfig["name"] - newMachineName := fmt.Sprintf("juju-%s-2-machine-0", existingEnvName) + existingModelName := s.TestConfig["name"] + newMachineName := fmt.Sprintf("juju-%s-2-machine-0", existingModelName) // We grab the Nova client directly from the env, just to save time // looking all the stuff up - novaClient := openstack.GetNovaClient(env) + novaClient := openstack.GetNovaClient(s.env) entity, err := novaClient.RunServer(nova.RunServerOpts{ Name: newMachineName, FlavorId: "1", // test service has 1,2,3 for flavor ids @@ -1373,33 +1383,30 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(servers, gc.HasLen, 2) - insts, err = env.AllInstances() + insts, err = s.env.AllInstances() c.Assert(err, jc.ErrorIsNil) c.Check(insts, gc.HasLen, 1) } func (s *localServerSuite) TestResolveNetworkUUID(c *gc.C) { - env := s.Prepare(c) var sampleUUID = "f81d4fae-7dec-11d0-a765-00a0c91e6bf6" - networkId, err := openstack.ResolveNetwork(env, sampleUUID) + networkId, err := openstack.ResolveNetwork(s.env, sampleUUID) c.Assert(err, jc.ErrorIsNil) c.Assert(networkId, gc.Equals, sampleUUID) } func (s *localServerSuite) TestResolveNetworkLabel(c *gc.C) { - env := s.Prepare(c) // For now this test has to cheat and use knowledge of goose internals var networkLabel = "net" var expectNetworkId = "1" - networkId, err := openstack.ResolveNetwork(env, networkLabel) + networkId, err := openstack.ResolveNetwork(s.env, networkLabel) c.Assert(err, jc.ErrorIsNil) c.Assert(networkId, gc.Equals, expectNetworkId) } func (s *localServerSuite) TestResolveNetworkNotPresent(c *gc.C) { - env := s.Prepare(c) var notPresentNetwork = "no-network-with-this-label" - networkId, err := openstack.ResolveNetwork(env, notPresentNetwork) + networkId, err := openstack.ResolveNetwork(s.env, notPresentNetwork) c.Check(networkId, gc.Equals, "") c.Assert(err, gc.ErrorMatches, `No networks exist with label "no-network-with-this-label"`) } @@ -1423,12 +1430,11 @@ } func (t *localServerSuite) testStartInstanceAvailZone(c *gc.C, zone string) (instance.Instance, error) { - env := t.Prepare(c) - err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), t.env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) params := environs.StartInstanceParams{Placement: "zone=" + zone} - result, err := testing.StartInstanceWithParams(env, "1", params, nil) + result, err := testing.StartInstanceWithParams(t.env, "1", params, nil) if err != nil { return nil, err } @@ -1441,7 +1447,7 @@ t.PatchValue(openstack.NovaListAvailabilityZones, func(c *nova.Client) ([]nova.AvailabilityZone, error) { return append([]nova.AvailabilityZone{}, resultZones...), resultErr }) - env := t.Prepare(c).(common.ZonedEnviron) + env := t.env.(common.ZonedEnviron) resultErr = fmt.Errorf("failed to get availability zones") zones, err := env.AvailabilityZones() @@ -1472,7 +1478,7 @@ t.PatchValue(openstack.NovaListAvailabilityZones, func(c *nova.Client) ([]nova.AvailabilityZone, error) { return append([]nova.AvailabilityZone{}, resultZones...), nil }) - env := t.Prepare(c).(common.ZonedEnviron) + env := t.env.(common.ZonedEnviron) resultZones = make([]nova.AvailabilityZone, 2) resultZones[0].Name = "az1" resultZones[1].Name = "az2" @@ -1501,15 +1507,14 @@ } func (t *localServerSuite) TestStartInstanceDistributionParams(c *gc.C) { - env := t.Prepare(c) - err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), t.env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) var mock mockAvailabilityZoneAllocations t.PatchValue(openstack.AvailabilityZoneAllocations, mock.AvailabilityZoneAllocations) // no distribution group specified - testing.AssertStartInstance(c, env, "1") + testing.AssertStartInstance(c, t.env, "1") c.Assert(mock.group, gc.HasLen, 0) // distribution group specified: ensure it's passed through to AvailabilityZone. @@ -1519,21 +1524,20 @@ return expectedInstances, nil }, } - _, err = testing.StartInstanceWithParams(env, "1", params, nil) + _, err = testing.StartInstanceWithParams(t.env, "1", params, nil) c.Assert(err, jc.ErrorIsNil) c.Assert(mock.group, gc.DeepEquals, expectedInstances) } func (t *localServerSuite) TestStartInstanceDistributionErrors(c *gc.C) { - env := t.Prepare(c) - err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), t.env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) mock := mockAvailabilityZoneAllocations{ err: fmt.Errorf("AvailabilityZoneAllocations failed"), } t.PatchValue(openstack.AvailabilityZoneAllocations, mock.AvailabilityZoneAllocations) - _, _, _, err = testing.StartInstance(env, "1") + _, _, _, err = testing.StartInstance(t.env, "1") c.Assert(jujuerrors.Cause(err), gc.Equals, mock.err) mock.err = nil @@ -1543,18 +1547,17 @@ return nil, dgErr }, } - _, err = testing.StartInstanceWithParams(env, "1", params, nil) + _, err = testing.StartInstanceWithParams(t.env, "1", params, nil) c.Assert(jujuerrors.Cause(err), gc.Equals, dgErr) } func (t *localServerSuite) TestStartInstanceDistribution(c *gc.C) { - env := t.Prepare(c) - err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), t.env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) // test-available is the only available AZ, so AvailabilityZoneAllocations // is guaranteed to return that. - inst, _ := testing.AssertStartInstance(c, env, "1") + inst, _ := testing.AssertStartInstance(c, t.env, "1") c.Assert(openstack.InstanceServerDetail(inst).AvailabilityZone, gc.Equals, "test-available") } @@ -1585,8 +1588,7 @@ }, ) - env := t.Prepare(c) - err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), t.env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) cleanup := t.srv.Nova.RegisterControlPoint( @@ -1600,7 +1602,7 @@ }, ) defer cleanup() - inst, _ := testing.AssertStartInstance(c, env, "1") + inst, _ := testing.AssertStartInstance(c, t.env, "1") c.Assert(openstack.InstanceServerDetail(inst).AvailabilityZone, gc.Equals, "az3") } @@ -1624,8 +1626,7 @@ }, ) - env := t.Prepare(c) - err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), t.env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) cleanup := t.srv.Nova.RegisterControlPoint( @@ -1639,13 +1640,12 @@ }, ) defer cleanup() - _, _, _, err = testing.StartInstance(env, "1") + _, _, _, err = testing.StartInstance(t.env, "1") c.Assert(err, gc.ErrorMatches, "(?s).*Some unknown error.*") } func (t *localServerSuite) TestStartInstanceDistributionAZNotImplemented(c *gc.C) { - env := t.Prepare(c) - err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), t.env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) mock := mockAvailabilityZoneAllocations{ @@ -1654,16 +1654,15 @@ t.PatchValue(openstack.AvailabilityZoneAllocations, mock.AvailabilityZoneAllocations) // Instance will be created without an availability zone specified. - inst, _ := testing.AssertStartInstance(c, env, "1") + inst, _ := testing.AssertStartInstance(c, t.env, "1") c.Assert(openstack.InstanceServerDetail(inst).AvailabilityZone, gc.Equals, "") } func (t *localServerSuite) TestInstanceTags(c *gc.C) { - env := t.Prepare(c) - err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), t.env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) - instances, err := env.AllInstances() + instances, err := t.env.AllInstances() c.Assert(err, jc.ErrorIsNil) c.Assert(instances, gc.HasLen, 1) @@ -1671,40 +1670,39 @@ openstack.InstanceServerDetail(instances[0]).Metadata, jc.DeepEquals, map[string]string{ - "juju-env-uuid": coretesting.EnvironmentTag.Id(), - "juju-is-state": "true", + "juju-model-uuid": coretesting.ModelTag.Id(), + "juju-is-controller": "true", }, ) } func (t *localServerSuite) TestTagInstance(c *gc.C) { - env := t.Prepare(c) - err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), env, bootstrap.BootstrapParams{}) + err := bootstrap.Bootstrap(envtesting.BootstrapContext(c), t.env, bootstrap.BootstrapParams{}) c.Assert(err, jc.ErrorIsNil) assertMetadata := func(extraKey, extraValue string) { // Refresh instance - instances, err := env.AllInstances() + instances, err := t.env.AllInstances() c.Assert(err, jc.ErrorIsNil) c.Assert(instances, gc.HasLen, 1) c.Assert( openstack.InstanceServerDetail(instances[0]).Metadata, jc.DeepEquals, map[string]string{ - "juju-env-uuid": coretesting.EnvironmentTag.Id(), - "juju-is-state": "true", - extraKey: extraValue, + "juju-model-uuid": coretesting.ModelTag.Id(), + "juju-is-controller": "true", + extraKey: extraValue, }, ) } - instances, err := env.AllInstances() + instances, err := t.env.AllInstances() c.Assert(err, jc.ErrorIsNil) c.Assert(instances, gc.HasLen, 1) extraKey := "extra-k" extraValue := "extra-v" - err = env.(environs.InstanceTagger).TagInstance( + err = t.env.(environs.InstanceTagger).TagInstance( instances[0].Id(), map[string]string{extraKey: extraValue}, ) c.Assert(err, jc.ErrorIsNil) @@ -1712,13 +1710,29 @@ // Ensure that a second call updates existing tags. extraValue = "extra-v2" - err = env.(environs.InstanceTagger).TagInstance( + err = t.env.(environs.InstanceTagger).TagInstance( instances[0].Id(), map[string]string{extraKey: extraValue}, ) c.Assert(err, jc.ErrorIsNil) assertMetadata(extraKey, extraValue) } +func prepareForBootstrapParams(cfg *config.Config, cred *identity.Credentials) environs.PrepareForBootstrapParams { + return environs.PrepareForBootstrapParams{ + Config: cfg, + Credentials: cloud.NewCredential( + cloud.UserPassAuthType, + map[string]string{ + "username": cred.User, + "password": cred.Secrets, + "tenant-name": cred.TenantName, + }, + ), + CloudEndpoint: cred.URL, + CloudRegion: cred.Region, + } +} + // noSwiftSuite contains tests that run against an OpenStack service double // that lacks Swift. type noSwiftSuite struct { @@ -1732,6 +1746,9 @@ s.BaseSuite.SetUpSuite(c) restoreFinishBootstrap := envtesting.DisableFinishBootstrap() s.AddSuiteCleanup(func(*gc.C) { restoreFinishBootstrap() }) + + s.PatchValue(&imagemetadata.SimplestreamsImagesPublicKey, sstesting.SignedMetadataPublicKey) + s.PatchValue(&juju.JujuPublicKey, sstesting.SignedMetadataPublicKey) } func (s *noSwiftSuite) SetUpTest(c *gc.C) { @@ -1748,16 +1765,10 @@ "name": "sample-no-swift", "type": "openstack", "auth-mode": "userpass", - "control-bucket": "juju-test-no-swift", - "username": s.cred.User, - "password": s.cred.Secrets, - "region": s.cred.Region, - "auth-url": s.cred.URL, - "tenant-name": s.cred.TenantName, "agent-version": coretesting.FakeVersionNumber.String(), "authorized-keys": "fakekey", }) - s.PatchValue(&version.Current.Number, coretesting.FakeVersionNumber) + s.PatchValue(&version.Current, coretesting.FakeVersionNumber) // Serve fake tools and image metadata using "filestorage", // rather than Swift as the rest of the tests do. storageDir := c.MkDir() @@ -1778,7 +1789,11 @@ cfg, err := config.New(config.NoDefaults, attrs) c.Assert(err, jc.ErrorIsNil) configStore := configstore.NewMem() - env, err := environs.Prepare(cfg, envtesting.BootstrapContext(c), configStore) + env, err := environs.Prepare( + envtesting.BootstrapContext(c), configStore, + jujuclienttesting.NewMemStore(), + cfg.Name(), prepareForBootstrapParams(cfg, s.cred), + ) c.Assert(err, jc.ErrorIsNil) s.env = env } === modified file 'src/github.com/juju/juju/provider/openstack/provider.go' --- src/github.com/juju/juju/provider/openstack/provider.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/openstack/provider.go 2016-03-22 15:18:22 +0000 @@ -9,7 +9,6 @@ "fmt" "io/ioutil" "net/http" - "regexp" "strings" "sync" "time" @@ -18,12 +17,13 @@ "github.com/juju/loggo" "github.com/juju/names" "github.com/juju/utils" + "github.com/juju/utils/arch" "gopkg.in/goose.v1/client" gooseerrors "gopkg.in/goose.v1/errors" "gopkg.in/goose.v1/identity" "gopkg.in/goose.v1/nova" - "gopkg.in/goose.v1/swift" + "github.com/juju/juju/cloud" "github.com/juju/juju/cloudconfig/instancecfg" "github.com/juju/juju/cloudconfig/providerinit" "github.com/juju/juju/constraints" @@ -32,10 +32,8 @@ "github.com/juju/juju/environs/imagemetadata" "github.com/juju/juju/environs/instances" "github.com/juju/juju/environs/simplestreams" - "github.com/juju/juju/environs/storage" "github.com/juju/juju/environs/tags" "github.com/juju/juju/instance" - "github.com/juju/juju/juju/arch" "github.com/juju/juju/network" "github.com/juju/juju/provider/common" "github.com/juju/juju/state" @@ -44,11 +42,19 @@ var logger = loggo.GetLogger("juju.provider.openstack") -type environProvider struct{} - -var _ environs.EnvironProvider = (*environProvider)(nil) - -var providerInstance environProvider +type EnvironProvider struct { + environs.ProviderCredentials + Configurator ProviderConfigurator + FirewallerFactory FirewallerFactory +} + +var _ environs.EnvironProvider = (*EnvironProvider)(nil) + +var providerInstance *EnvironProvider = &EnvironProvider{ + OpenstackCredentials{}, + &defaultConfigurator{}, + &firewallerFactory{}, +} var makeServiceURL = client.AuthenticatingClient.MakeServiceURL @@ -62,186 +68,12 @@ Delay: 200 * time.Millisecond, } -func (p environProvider) BoilerplateConfig() string { - return ` -# https://juju.ubuntu.com/docs/config-openstack.html -openstack: - type: openstack - - # use-floating-ip specifies whether a floating IP address is - # required to give the nodes a public IP address. Some - # installations assign public IP addresses by default without - # requiring a floating IP address. - # - # use-floating-ip: false - - # use-default-secgroup specifies whether new machine instances - # should have the "default" Openstack security group assigned. - # - # use-default-secgroup: false - - # network specifies the network label or uuid to bring machines up - # on, in the case where multiple networks exist. It may be omitted - # otherwise. - # - # network: - - # agent-metadata-url specifies the location of the Juju tools and - # metadata. It defaults to the global public tools metadata - # location https://streams.canonical.com/tools. - # - # agent-metadata-url: https://your-agent-metadata-url - - # image-metadata-url specifies the location of Ubuntu cloud image - # metadata. It defaults to the global public image metadata - # location https://cloud-images.ubuntu.com/releases. - # - # image-metadata-url: https://your-image-metadata-url - - # image-stream chooses a simplestreams stream from which to select - # OS images, for example daily or released images (or any other stream - # available on simplestreams). - # - # image-stream: "released" - - # agent-stream chooses a simplestreams stream from which to select tools, - # for example released or proposed tools (or any other stream available - # on simplestreams). - # - # agent-stream: "released" - - # auth-url defaults to the value of the environment variable - # OS_AUTH_URL, but can be specified here. - # - # auth-url: https://yourkeystoneurl:443/v2.0/ - - # tenant-name holds the openstack tenant name. It defaults to the - # environment variable OS_TENANT_NAME. - # - # tenant-name: - - # region holds the openstack region. It defaults to the - # environment variable OS_REGION_NAME. - # - # region: - - # The auth-mode, username and password attributes are used for - # userpass authentication (the default). - # - # auth-mode holds the authentication mode. For user-password - # authentication, auth-mode should be "userpass" and username and - # password should be set appropriately; they default to the - # environment variables OS_USERNAME and OS_PASSWORD respectively. - # - # auth-mode: userpass - # username: - # password: - - # For key-pair authentication, auth-mode should be "keypair" and - # access-key and secret-key should be set appropriately; they - # default to the environment variables OS_ACCESS_KEY and - # OS_SECRET_KEY respectively. - # - # auth-mode: keypair - # access-key: - # secret-key: - - # Whether or not to refresh the list of available updates for an - # OS. The default option of true is recommended for use in - # production systems, but disabling this can speed up local - # deployments for development or testing. - # - # enable-os-refresh-update: true - - # Whether or not to perform OS upgrades when machines are - # provisioned. The default option of true is recommended for use - # in production systems, but disabling this can speed up local - # deployments for development or testing. - # - # enable-os-upgrade: true - -# https://juju.ubuntu.com/docs/config-hpcloud.html -hpcloud: - type: openstack - - # use-floating-ip specifies whether a floating IP address is - # required to give the nodes a public IP address. Some - # installations assign public IP addresses by default without - # requiring a floating IP address. - # - # use-floating-ip: true - - # use-default-secgroup specifies whether new machine instances - # should have the "default" Openstack security group assigned. - # - # use-default-secgroup: false - - # tenant-name holds the openstack tenant name. In HPCloud, this is - # synonymous with the project-name It defaults to the environment - # variable OS_TENANT_NAME. - # - # tenant-name: - - # image-stream chooses a simplestreams stream from which to select - # OS images, for example daily or released images (or any other stream - # available on simplestreams). - # - # image-stream: "released" - - # agent-stream chooses a simplestreams stream from which to select tools, - # for example released or proposed tools (or any other stream available - # on simplestreams). - # - # agent-stream: "released" - - # auth-url holds the keystone url for authentication. It defaults - # to the value of the environment variable OS_AUTH_URL. - # - # auth-url: https://region-a.geo-1.identity.hpcloudsvc.com:35357/v2.0/ - - # region holds the HP Cloud region (e.g. region-a.geo-1). It - # defaults to the environment variable OS_REGION_NAME. - # - # region: - - # auth-mode holds the authentication mode. For user-password - # authentication, auth-mode should be "userpass" and username and - # password should be set appropriately; they default to the - # environment variables OS_USERNAME and OS_PASSWORD respectively. - # - # auth-mode: userpass - # username: - # password: - - # For key-pair authentication, auth-mode should be "keypair" and - # access-key and secret-key should be set appropriately; they - # default to the environment variables OS_ACCESS_KEY and - # OS_SECRET_KEY respectively. - # - # auth-mode: keypair - # access-key: - # secret-key: - - # Whether or not to refresh the list of available updates for an - # OS. The default option of true is recommended for use in - # production systems, but disabling this can speed up local - # deployments for development or testing. - # - # enable-os-refresh-update: true - - # Whether or not to perform OS upgrades when machines are - # provisioned. The default option of true is recommended for use - # in production systems, but disabling this can speed up local - # deployments for development or testing. - # - # enable-os-upgrade: true - -`[1:] -} - -func (p environProvider) Open(cfg *config.Config) (environs.Environ, error) { - logger.Infof("opening environment %q", cfg.Name()) - e := new(environ) +func (p EnvironProvider) Open(cfg *config.Config) (environs.Environ, error) { + logger.Infof("opening model %q", cfg.Name()) + e := new(Environ) + + e.firewaller = p.FirewallerFactory.GetFirewaller(e) + e.configurator = p.Configurator err := e.SetConfig(cfg) if err != nil { return nil, err @@ -251,25 +83,64 @@ } // RestrictedConfigAttributes is specified in the EnvironProvider interface. -func (p environProvider) RestrictedConfigAttributes() []string { +func (p EnvironProvider) RestrictedConfigAttributes() []string { return []string{"region", "auth-url", "auth-mode"} } +// DetectRegions implements environs.CloudRegionDetector. +func (EnvironProvider) DetectRegions() ([]cloud.Region, error) { + // If OS_REGION_NAME and OS_AUTH_URL are both set, + // return return a region using them. + creds := identity.CredentialsFromEnv() + if creds.Region == "" { + return nil, errors.NewNotFound(nil, "OS_REGION_NAME environment variable not set") + } + if creds.URL == "" { + return nil, errors.NewNotFound(nil, "OS_AUTH_URL environment variable not set") + } + return []cloud.Region{{ + Name: creds.Region, + Endpoint: creds.URL, + }}, nil +} + // PrepareForCreateEnvironment is specified in the EnvironProvider interface. -func (p environProvider) PrepareForCreateEnvironment(cfg *config.Config) (*config.Config, error) { - attrs := cfg.UnknownAttrs() - if _, ok := attrs["control-bucket"]; !ok { - uuid, err := utils.NewUUID() - if err != nil { - return nil, errors.Trace(err) - } - attrs["control-bucket"] = fmt.Sprintf("%x", uuid.Raw()) - } - return cfg.Apply(attrs) +func (p EnvironProvider) PrepareForCreateEnvironment(cfg *config.Config) (*config.Config, error) { + return cfg, nil } -func (p environProvider) PrepareForBootstrap(ctx environs.BootstrapContext, cfg *config.Config) (environs.Environ, error) { - cfg, err := p.PrepareForCreateEnvironment(cfg) +func (p EnvironProvider) PrepareForBootstrap( + ctx environs.BootstrapContext, + args environs.PrepareForBootstrapParams, +) (environs.Environ, error) { + + // Add credentials to the configuration. + attrs := map[string]interface{}{ + "region": args.CloudRegion, + "auth-url": args.CloudEndpoint, + } + credentialAttrs := args.Credentials.Attributes() + switch authType := args.Credentials.AuthType(); authType { + case cloud.UserPassAuthType: + // TODO(axw) we need a way of saying to use legacy auth. + attrs["username"] = credentialAttrs["username"] + attrs["password"] = credentialAttrs["password"] + attrs["tenant-name"] = credentialAttrs["tenant-name"] + attrs["auth-mode"] = AuthUserPass + case cloud.AccessKeyAuthType: + attrs["access-key"] = credentialAttrs["access-key"] + attrs["secret-key"] = credentialAttrs["secret-key"] + attrs["tenant-name"] = credentialAttrs["tenant-name"] + attrs["auth-mode"] = AuthKeyPair + default: + return nil, errors.NotSupportedf("%q auth-type", authType) + } + cfg, err := args.Config.Apply(attrs) + if err != nil { + return nil, errors.Trace(err) + } + + cfg, err = p.PrepareForCreateEnvironment(cfg) if err != nil { return nil, err } @@ -278,7 +149,7 @@ return nil, err } // Verify credentials. - if err := authenticateClient(e.(*environ)); err != nil { + if err := authenticateClient(e.(*Environ)); err != nil { return nil, err } return e, nil @@ -286,7 +157,7 @@ // MetadataLookupParams returns parameters which are used to query image metadata to // find matching image information. -func (p environProvider) MetadataLookupParams(region string) (*simplestreams.MetadataLookupParams, error) { +func (p EnvironProvider) MetadataLookupParams(region string) (*simplestreams.MetadataLookupParams, error) { if region == "" { return nil, fmt.Errorf("region must be specified") } @@ -296,9 +167,9 @@ }, nil } -func (p environProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { +func (p EnvironProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { m := make(map[string]string) - ecfg, err := providerInstance.newConfig(cfg) + ecfg, err := p.newConfig(cfg) if err != nil { return nil, err } @@ -308,6 +179,14 @@ return m, nil } +func (p EnvironProvider) newConfig(cfg *config.Config) (*environConfig, error) { + valid, err := p.Validate(cfg, nil) + if err != nil { + return nil, err + } + return &environConfig{valid, valid.UnknownAttrs()}, nil +} + func retryGet(uri string) (data []byte, err error) { for a := shortAttempt.Start(); a.Next(); { var resp *http.Response @@ -333,7 +212,7 @@ return } -type environ struct { +type Environ struct { common.SupportsUnitPlacementPolicy name string @@ -344,11 +223,10 @@ // for which images can be instantiated. supportedArchitectures []string - ecfgMutex sync.Mutex - ecfgUnlocked *environConfig - client client.AuthenticatingClient - novaUnlocked *nova.Client - storageUnlocked storage.Storage + ecfgMutex sync.Mutex + ecfgUnlocked *environConfig + client client.AuthenticatingClient + novaUnlocked *nova.Client // keystoneImageDataSource caches the result of getKeystoneImageSource. keystoneImageDataSourceMutex sync.Mutex @@ -360,16 +238,18 @@ availabilityZonesMutex sync.Mutex availabilityZones []common.AvailabilityZone + firewaller Firewaller + configurator ProviderConfigurator } -var _ environs.Environ = (*environ)(nil) -var _ simplestreams.HasRegion = (*environ)(nil) -var _ state.Prechecker = (*environ)(nil) -var _ state.InstanceDistributor = (*environ)(nil) -var _ environs.InstanceTagger = (*environ)(nil) +var _ environs.Environ = (*Environ)(nil) +var _ simplestreams.HasRegion = (*Environ)(nil) +var _ state.Prechecker = (*Environ)(nil) +var _ state.InstanceDistributor = (*Environ)(nil) +var _ environs.InstanceTagger = (*Environ)(nil) type openstackInstance struct { - e *environ + e *Environ instType *instances.InstanceType arch *string @@ -498,55 +378,26 @@ return machineAddresses } -// TODO: following 30 lines nearly verbatim from environs/ec2 - func (inst *openstackInstance) OpenPorts(machineId string, ports []network.PortRange) error { - if inst.e.Config().FirewallMode() != config.FwInstance { - return fmt.Errorf("invalid firewall mode %q for opening ports on instance", - inst.e.Config().FirewallMode()) - } - name := inst.e.machineGroupName(machineId) - if err := inst.e.openPortsInGroup(name, ports); err != nil { - return err - } - logger.Infof("opened ports in security group %s: %v", name, ports) - return nil + return inst.e.firewaller.OpenInstancePorts(inst, machineId, ports) } func (inst *openstackInstance) ClosePorts(machineId string, ports []network.PortRange) error { - if inst.e.Config().FirewallMode() != config.FwInstance { - return fmt.Errorf("invalid firewall mode %q for closing ports on instance", - inst.e.Config().FirewallMode()) - } - name := inst.e.machineGroupName(machineId) - if err := inst.e.closePortsInGroup(name, ports); err != nil { - return err - } - logger.Infof("closed ports in security group %s: %v", name, ports) - return nil + return inst.e.firewaller.CloseInstancePorts(inst, machineId, ports) } func (inst *openstackInstance) Ports(machineId string) ([]network.PortRange, error) { - if inst.e.Config().FirewallMode() != config.FwInstance { - return nil, fmt.Errorf("invalid firewall mode %q for retrieving ports from instance", - inst.e.Config().FirewallMode()) - } - name := inst.e.machineGroupName(machineId) - portRanges, err := inst.e.portsInGroup(name) - if err != nil { - return nil, err - } - return portRanges, nil + return inst.e.firewaller.InstancePorts(inst, machineId) } -func (e *environ) ecfg() *environConfig { +func (e *Environ) ecfg() *environConfig { e.ecfgMutex.Lock() ecfg := e.ecfgUnlocked e.ecfgMutex.Unlock() return ecfg } -func (e *environ) nova() *nova.Client { +func (e *Environ) nova() *nova.Client { e.ecfgMutex.Lock() nova := e.novaUnlocked e.ecfgMutex.Unlock() @@ -554,7 +405,7 @@ } // SupportedArchitectures is specified on the EnvironCapability interface. -func (e *environ) SupportedArchitectures() ([]string, error) { +func (e *Environ) SupportedArchitectures() ([]string, error) { e.archMutex.Lock() defer e.archMutex.Unlock() if e.supportedArchitectures != nil { @@ -579,7 +430,7 @@ } // ConstraintsValidator is defined on the Environs interface. -func (e *environ) ConstraintsValidator() (constraints.Validator, error) { +func (e *Environ) ConstraintsValidator() (constraints.Validator, error) { validator := constraints.NewValidator() validator.RegisterConflicts( []string{constraints.InstanceType}, @@ -618,7 +469,7 @@ } // AvailabilityZones returns a slice of availability zones. -func (e *environ) AvailabilityZones() ([]common.AvailabilityZone, error) { +func (e *Environ) AvailabilityZones() ([]common.AvailabilityZone, error) { e.availabilityZonesMutex.Lock() defer e.availabilityZonesMutex.Unlock() if e.availabilityZones == nil { @@ -639,7 +490,7 @@ // InstanceAvailabilityZoneNames returns the availability zone names for each // of the specified instances. -func (e *environ) InstanceAvailabilityZoneNames(ids []instance.Id) ([]string, error) { +func (e *Environ) InstanceAvailabilityZoneNames(ids []instance.Id) ([]string, error) { instances, err := e.Instances(ids) if err != nil && err != environs.ErrPartialInstances { return nil, err @@ -658,7 +509,7 @@ availabilityZone nova.AvailabilityZone } -func (e *environ) parsePlacement(placement string) (*openstackPlacement, error) { +func (e *Environ) parsePlacement(placement string) (*openstackPlacement, error) { pos := strings.IndexRune(placement, '=') if pos == -1 { return nil, fmt.Errorf("unknown placement directive: %v", placement) @@ -683,7 +534,7 @@ } // PrecheckInstance is defined on the state.Prechecker interface. -func (e *environ) PrecheckInstance(series string, cons constraints.Value, placement string) error { +func (e *Environ) PrecheckInstance(series string, cons constraints.Value, placement string) error { if placement != "" { if _, err := e.parsePlacement(placement); err != nil { return err @@ -706,25 +557,18 @@ return fmt.Errorf("invalid Openstack flavour %q specified", *cons.InstanceType) } -func (e *environ) Storage() storage.Storage { - e.ecfgMutex.Lock() - stor := e.storageUnlocked - e.ecfgMutex.Unlock() - return stor -} - -func (e *environ) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (arch, series string, _ environs.BootstrapFinalizer, _ error) { +func (e *Environ) Bootstrap(ctx environs.BootstrapContext, args environs.BootstrapParams) (*environs.BootstrapResult, error) { // The client's authentication may have been reset when finding tools if the agent-version // attribute was updated so we need to re-authenticate. This will be a no-op if already authenticated. // An authenticated client is needed for the URL() call below. if err := authenticateClient(e); err != nil { - return "", "", nil, err + return nil, err } return common.Bootstrap(ctx, e, args) } -func (e *environ) StateServerInstances() ([]instance.Id, error) { - // Find all instances tagged with tags.JujuStateServer. +func (e *Environ) ControllerInstances() ([]instance.Id, error) { + // Find all instances tagged with tags.JujuController. instances, err := e.AllInstances() if err != nil { return nil, errors.Trace(err) @@ -732,7 +576,7 @@ ids := make([]instance.Id, 0, 1) for _, instance := range instances { detail := instance.(*openstackInstance).getServerDetail() - if detail.Metadata[tags.JujuStateServer] == "true" { + if detail.Metadata[tags.JujuController] == "true" { ids = append(ids, instance.Id()) } } @@ -742,7 +586,7 @@ return ids, nil } -func (e *environ) Config() *config.Config { +func (e *Environ) Config() *config.Config { return e.ecfg().Config } @@ -777,7 +621,7 @@ return client } -var authenticateClient = func(e *environ) error { +var authenticateClient = func(e *Environ) error { err := e.client.Authenticate() if err != nil { // Log the error in case there are any useful hints, @@ -788,12 +632,12 @@ Please ensure the credentials are correct. A common mistake is to specify the wrong tenant. Use the OpenStack "project" name -for tenant-name in your environment configuration.`) +for tenant-name in your model configuration.`) } return nil } -func (e *environ) SetConfig(cfg *config.Config) error { +func (e *Environ) SetConfig(cfg *config.Config) error { ecfg, err := providerInstance.newConfig(cfg) if err != nil { return err @@ -807,25 +651,15 @@ e.client = authClient(ecfg) e.novaUnlocked = nova.New(e.client) - - // To support upgrading from old environments, we continue to interface - // with Swift object storage. We do not use it except for upgrades, so - // new environments will work with OpenStack deployments that lack Swift. - e.storageUnlocked = &openstackstorage{ - containerName: ecfg.controlBucket(), - // this is possibly just a hack - if the ACL is swift.Private, - // the machine won't be able to get the tools (401 error) - containerACL: swift.PublicRead, - swift: swift.New(e.client)} return nil } // getKeystoneImageSource is an imagemetadata.ImageDataSourceFunc that // returns a DataSource using the "product-streams" keystone URL. func getKeystoneImageSource(env environs.Environ) (simplestreams.DataSource, error) { - e, ok := env.(*environ) + e, ok := env.(*Environ) if !ok { - return nil, errors.NotSupportedf("non-openstack environment") + return nil, errors.NotSupportedf("non-openstack model") } return e.getKeystoneDataSource(&e.keystoneImageDataSourceMutex, &e.keystoneImageDataSource, "product-streams") } @@ -833,14 +667,14 @@ // getKeystoneToolsSource is a tools.ToolsDataSourceFunc that // returns a DataSource using the "juju-tools" keystone URL. func getKeystoneToolsSource(env environs.Environ) (simplestreams.DataSource, error) { - e, ok := env.(*environ) + e, ok := env.(*Environ) if !ok { - return nil, errors.NotSupportedf("non-openstack environment") + return nil, errors.NotSupportedf("non-openstack model") } return e.getKeystoneDataSource(&e.keystoneToolsDataSourceMutex, &e.keystoneToolsDataSource, "juju-tools") } -func (e *environ) getKeystoneDataSource(mu *sync.Mutex, datasource *simplestreams.DataSource, keystoneName string) (simplestreams.DataSource, error) { +func (e *Environ) getKeystoneDataSource(mu *sync.Mutex, datasource *simplestreams.DataSource, keystoneName string) (simplestreams.DataSource, error) { mu.Lock() defer mu.Unlock() if *datasource != nil { @@ -860,18 +694,13 @@ if !e.Config().SSLHostnameVerification() { verify = utils.NoVerifySSLHostnames } - *datasource = simplestreams.NewURLDataSource("keystone catalog", url, verify) + *datasource = simplestreams.NewURLDataSource("keystone catalog", url, verify, simplestreams.SPECIFIC_CLOUD_DATA, false) return *datasource, nil } -// TODO(gz): Move this somewhere more reusable -const uuidPattern = "^([a-fA-F0-9]{8})-([a-fA-f0-9]{4})-([1-5][a-fA-f0-9]{3})-([a-fA-f0-9]{4})-([a-fA-f0-9]{12})$" - -var uuidRegexp = regexp.MustCompile(uuidPattern) - // resolveNetwork takes either a network id or label and returns a network id -func (e *environ) resolveNetwork(networkName string) (string, error) { - if uuidRegexp.MatchString(networkName) { +func (e *Environ) resolveNetwork(networkName string) (string, error) { + if utils.IsValidUUIDString(networkName) { // Network id supplied, assume valid as boot will fail if not return networkName, nil } @@ -897,7 +726,7 @@ // allocatePublicIP tries to find an available floating IP address, or // allocates a new one, returning it, or an error -func (e *environ) allocatePublicIP() (*nova.FloatingIP, error) { +func (e *Environ) allocatePublicIP() (*nova.FloatingIP, error) { fips, err := e.nova().ListFloatingIPs() if err != nil { return nil, err @@ -928,7 +757,7 @@ // assignPublicIP tries to assign the given floating IP address to the // specified server, or returns an error. -func (e *environ) assignPublicIP(fip *nova.FloatingIP, serverId string) (err error) { +func (e *Environ) assignPublicIP(fip *nova.FloatingIP, serverId string) (err error) { if fip == nil { return fmt.Errorf("cannot assign a nil public IP to %q", serverId) } @@ -948,19 +777,19 @@ } // DistributeInstances implements the state.InstanceDistributor policy. -func (e *environ) DistributeInstances(candidates, distributionGroup []instance.Id) ([]instance.Id, error) { +func (e *Environ) DistributeInstances(candidates, distributionGroup []instance.Id) ([]instance.Id, error) { return common.DistributeInstances(e, candidates, distributionGroup) } var availabilityZoneAllocations = common.AvailabilityZoneAllocations // MaintainInstance is specified in the InstanceBroker interface. -func (*environ) MaintainInstance(args environs.StartInstanceParams) error { +func (*Environ) MaintainInstance(args environs.StartInstanceParams) error { return nil } // StartInstance is specified in the InstanceBroker interface. -func (e *environ) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { +func (e *Environ) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { var availabilityZones []string if args.Placement != "" { placement, err := e.parsePlacement(args.Placement) @@ -1013,7 +842,7 @@ Series: series, Arches: arches, Constraints: args.Constraints, - }) + }, args.ImageMetadata) if err != nil { return nil, err } @@ -1027,13 +856,17 @@ if err := instancecfg.FinishInstanceConfig(args.InstanceConfig, e.Config()); err != nil { return nil, err } - userData, err := providerinit.ComposeUserData(args.InstanceConfig, nil, OpenstackRenderer{}) + cloudcfg, err := e.configurator.GetCloudConfig(args) + if err != nil { + return nil, errors.Trace(err) + } + userData, err := providerinit.ComposeUserData(args.InstanceConfig, cloudcfg, OpenstackRenderer{}) if err != nil { return nil, fmt.Errorf("cannot make user data: %v", err) } logger.Debugf("openstack user data; %d bytes", len(userData)) - var networks = []nova.ServerNetworks{} + var networks = e.firewaller.InitialNetworks() usingNetwork := e.ecfg().network() if usingNetwork != "" { networkId, err := e.resolveNetwork(usingNetwork) @@ -1056,18 +889,22 @@ } cfg := e.Config() - groups, err := e.setUpGroups(args.InstanceConfig.MachineId, cfg.APIPort()) + var groupNames = make([]nova.SecurityGroupName, 0) + groups, err := e.firewaller.SetUpGroups(args.InstanceConfig.MachineId, cfg.APIPort()) if err != nil { return nil, fmt.Errorf("cannot set up groups: %v", err) } - var groupNames = make([]nova.SecurityGroupName, len(groups)) - for i, g := range groups { - groupNames[i] = nova.SecurityGroupName{g.Name} - } + for _, g := range groups { + groupNames = append(groupNames, nova.SecurityGroupName{g.Name}) + } + eUUID, ok := e.Config().UUID() + if !ok { + return nil, errors.NotFoundf("UUID in environ config") + } machineName := resourceName( names.NewMachineTag(args.InstanceConfig.MachineId), - e.Config().Name(), + eUUID, ) var server *nova.Entity @@ -1082,6 +919,7 @@ AvailabilityZone: availZone, Metadata: args.InstanceConfig.Tags, } + e.configurator.ModifyRunServerOptions(&opts) for a := shortAttempt.Start(); a.Next(); { server, err = e.nova().RunServer(opts) if err == nil || !gooseerrors.IsNotFound(err) { @@ -1130,27 +968,14 @@ return ok && strings.Contains(gooseErr.Cause().Error(), "No valid host was found") } -func (e *environ) StopInstances(ids ...instance.Id) error { +func (e *Environ) StopInstances(ids ...instance.Id) error { // If in instance firewall mode, gather the security group names. - var securityGroupNames []string - if e.Config().FirewallMode() == config.FwInstance { - instances, err := e.Instances(ids) - if err == environs.ErrNoInstances { - return nil - } - securityGroupNames = make([]string, 0, len(ids)) - for _, inst := range instances { - if inst == nil { - continue - } - openstackName := inst.(*openstackInstance).getServerDetail().Name - lastDashPos := strings.LastIndex(openstackName, "-") - if lastDashPos == -1 { - return fmt.Errorf("cannot identify machine ID in openstack server name %q", openstackName) - } - securityGroupName := e.machineGroupName(openstackName[lastDashPos+1:]) - securityGroupNames = append(securityGroupNames, securityGroupName) - } + securityGroupNames, err := e.firewaller.GetSecurityGroups(ids...) + if err == environs.ErrNoInstances { + return nil + } + if err != nil { + return err } logger.Debugf("terminating instances %v", ids) if err := e.terminateInstances(ids); err != nil { @@ -1162,7 +987,7 @@ return nil } -func (e *environ) isAliveServer(server nova.ServerDetail) bool { +func (e *Environ) isAliveServer(server nova.ServerDetail) bool { switch server.Status { // HPCloud uses "BUILD(spawning)" as an intermediate BUILD state // once networking is available. @@ -1172,7 +997,7 @@ return false } -func (e *environ) listServers(ids []instance.Id) ([]nova.ServerDetail, error) { +func (e *Environ) listServers(ids []instance.Id) ([]nova.ServerDetail, error) { wantedServers := make([]nova.ServerDetail, 0, len(ids)) if len(ids) == 1 { // Common case, single instance, may return NotFound @@ -1208,7 +1033,7 @@ // updateFloatingIPAddresses updates the instances with any floating IP address // that have been assigned to those instances. -func (e *environ) updateFloatingIPAddresses(instances map[string]instance.Instance) error { +func (e *Environ) updateFloatingIPAddresses(instances map[string]instance.Instance) error { fips, err := e.nova().ListFloatingIPs() if err != nil { return err @@ -1225,7 +1050,7 @@ return nil } -func (e *environ) Instances(ids []instance.Id) ([]instance.Instance, error) { +func (e *Environ) Instances(ids []instance.Id) ([]instance.Instance, error) { if len(ids) == 0 { return nil, nil } @@ -1280,13 +1105,22 @@ return insts, err } -func (e *environ) AllInstances() (insts []instance.Instance, err error) { +func (e *Environ) AllInstances() (insts []instance.Instance, err error) { servers, err := e.nova().ListServersDetail(e.machinesFilter()) if err != nil { return nil, err } instsById := make(map[string]instance.Instance) + cfg := e.Config() + eUUID, ok := cfg.UUID() + if !ok { + return nil, errors.NotFoundf("model UUID") + } for _, server := range servers { + modelUUID, ok := server.Metadata[tags.JujuModel] + if !ok || modelUUID != eUUID { + continue + } if e.isAliveServer(server) { var s = server // TODO(wallyworld): lookup the flavor details to fill in the instance type data @@ -1306,39 +1140,12 @@ return insts, err } -func (e *environ) Destroy() error { +func (e *Environ) Destroy() error { err := common.Destroy(e) if err != nil { return errors.Trace(err) } - novaClient := e.nova() - securityGroups, err := novaClient.ListSecurityGroups() - if err != nil { - return errors.Annotate(err, "cannot list security groups") - } - re, err := regexp.Compile(fmt.Sprintf("^%s(-\\d+)?$", e.jujuGroupName())) - if err != nil { - return errors.Trace(err) - } - globalGroupName := e.globalGroupName() - for _, group := range securityGroups { - if re.MatchString(group.Name) || group.Name == globalGroupName { - deleteSecurityGroup(novaClient, group.Name, group.Id) - } - } - return nil -} - -func (e *environ) globalGroupName() string { - return fmt.Sprintf("%s-global", e.jujuGroupName()) -} - -func (e *environ) machineGroupName(machineId string) string { - return fmt.Sprintf("%s-%s", e.jujuGroupName(), machineId) -} - -func (e *environ) jujuGroupName() string { - return fmt.Sprintf("juju-%s", e.name) + return e.firewaller.DeleteGlobalGroups() } func resourceName(tag names.Tag, envName string) string { @@ -1346,9 +1153,10 @@ } // machinesFilter returns a nova.Filter matching all machines in the environment. -func (e *environ) machinesFilter() *nova.Filter { +func (e *Environ) machinesFilter() *nova.Filter { filter := nova.NewFilter() - filter.Set(nova.FilterServer, fmt.Sprintf("juju-%s-machine-\\d*", e.Config().Name())) + eUUID, _ := e.Config().UUID() + filter.Set(nova.FilterServer, fmt.Sprintf("juju-%s-machine-\\d*", eUUID)) return filter } @@ -1367,238 +1175,26 @@ return rules } -func (e *environ) openPortsInGroup(name string, portRanges []network.PortRange) error { - novaclient := e.nova() - group, err := novaclient.SecurityGroupByName(name) - if err != nil { - return err - } - rules := portsToRuleInfo(group.Id, portRanges) - for _, rule := range rules { - _, err := novaclient.CreateSecurityGroupRule(rule) - if err != nil { - // TODO: if err is not rule already exists, raise? - logger.Debugf("error creating security group rule: %v", err.Error()) - } - } - return nil -} - -// ruleMatchesPortRange checks if supplied nova security group rule matches the port range -func ruleMatchesPortRange(rule nova.SecurityGroupRule, portRange network.PortRange) bool { - if rule.IPProtocol == nil || rule.FromPort == nil || rule.ToPort == nil { - return false - } - return *rule.IPProtocol == portRange.Protocol && - *rule.FromPort == portRange.FromPort && - *rule.ToPort == portRange.ToPort -} - -func (e *environ) closePortsInGroup(name string, portRanges []network.PortRange) error { - if len(portRanges) == 0 { - return nil - } - novaclient := e.nova() - group, err := novaclient.SecurityGroupByName(name) - if err != nil { - return err - } - // TODO: Hey look ma, it's quadratic - for _, portRange := range portRanges { - for _, p := range (*group).Rules { - if !ruleMatchesPortRange(p, portRange) { - continue - } - err := novaclient.DeleteSecurityGroupRule(p.Id) - if err != nil { - return err - } - break - } - } - return nil -} - -func (e *environ) portsInGroup(name string) (portRanges []network.PortRange, err error) { - group, err := e.nova().SecurityGroupByName(name) - if err != nil { - return nil, err - } - for _, p := range (*group).Rules { - portRanges = append(portRanges, network.PortRange{ - Protocol: *p.IPProtocol, - FromPort: *p.FromPort, - ToPort: *p.ToPort, - }) - } - network.SortPortRanges(portRanges) - return portRanges, nil -} - -// TODO: following 30 lines nearly verbatim from environs/ec2 - -func (e *environ) OpenPorts(ports []network.PortRange) error { - if e.Config().FirewallMode() != config.FwGlobal { - return fmt.Errorf("invalid firewall mode %q for opening ports on environment", - e.Config().FirewallMode()) - } - if err := e.openPortsInGroup(e.globalGroupName(), ports); err != nil { - return err - } - logger.Infof("opened ports in global group: %v", ports) - return nil -} - -func (e *environ) ClosePorts(ports []network.PortRange) error { - if e.Config().FirewallMode() != config.FwGlobal { - return fmt.Errorf("invalid firewall mode %q for closing ports on environment", - e.Config().FirewallMode()) - } - if err := e.closePortsInGroup(e.globalGroupName(), ports); err != nil { - return err - } - logger.Infof("closed ports in global group: %v", ports) - return nil -} - -func (e *environ) Ports() ([]network.PortRange, error) { - if e.Config().FirewallMode() != config.FwGlobal { - return nil, fmt.Errorf("invalid firewall mode %q for retrieving ports from environment", - e.Config().FirewallMode()) - } - return e.portsInGroup(e.globalGroupName()) -} - -func (e *environ) Provider() environs.EnvironProvider { - return &providerInstance -} - -func (e *environ) setUpGlobalGroup(groupName string, apiPort int) (nova.SecurityGroup, error) { - return e.ensureGroup(groupName, - []nova.RuleInfo{ - { - IPProtocol: "tcp", - FromPort: 22, - ToPort: 22, - Cidr: "0.0.0.0/0", - }, - { - IPProtocol: "tcp", - FromPort: apiPort, - ToPort: apiPort, - Cidr: "0.0.0.0/0", - }, - { - IPProtocol: "tcp", - FromPort: 1, - ToPort: 65535, - }, - { - IPProtocol: "udp", - FromPort: 1, - ToPort: 65535, - }, - { - IPProtocol: "icmp", - FromPort: -1, - ToPort: -1, - }, - }) -} - -// setUpGroups creates the security groups for the new machine, and -// returns them. -// -// Instances are tagged with a group so they can be distinguished from -// other instances that might be running on the same OpenStack account. -// In addition, a specific machine security group is created for each -// machine, so that its firewall rules can be configured per machine. -// -// Note: ideally we'd have a better way to determine group membership so that 2 -// people that happen to share an openstack account and name their environment -// "openstack" don't end up destroying each other's machines. -func (e *environ) setUpGroups(machineId string, apiPort int) ([]nova.SecurityGroup, error) { - jujuGroup, err := e.setUpGlobalGroup(e.jujuGroupName(), apiPort) - if err != nil { - return nil, err - } - var machineGroup nova.SecurityGroup - switch e.Config().FirewallMode() { - case config.FwInstance: - machineGroup, err = e.ensureGroup(e.machineGroupName(machineId), nil) - case config.FwGlobal: - machineGroup, err = e.ensureGroup(e.globalGroupName(), nil) - } - if err != nil { - return nil, err - } - groups := []nova.SecurityGroup{jujuGroup, machineGroup} - if e.ecfg().useDefaultSecurityGroup() { - defaultGroup, err := e.nova().SecurityGroupByName("default") - if err != nil { - return nil, fmt.Errorf("loading default security group: %v", err) - } - groups = append(groups, *defaultGroup) - } - return groups, nil -} - -// zeroGroup holds the zero security group. -var zeroGroup nova.SecurityGroup - -// ensureGroup returns the security group with name and perms. -// If a group with name does not exist, one will be created. -// If it exists, its permissions are set to perms. -func (e *environ) ensureGroup(name string, rules []nova.RuleInfo) (nova.SecurityGroup, error) { - novaClient := e.nova() - // First attempt to look up an existing group by name. - group, err := novaClient.SecurityGroupByName(name) - if err == nil { - // Group exists, so assume it is correctly set up and return it. - // TODO(jam): 2013-09-18 http://pad.lv/121795 - // We really should verify the group is set up correctly, - // because deleting and re-creating environments can get us bad - // groups (especially if they were set up under Python) - return *group, nil - } - // Doesn't exist, so try and create it. - group, err = novaClient.CreateSecurityGroup(name, "juju group") - if err != nil { - if !gooseerrors.IsDuplicateValue(err) { - return zeroGroup, err - } else { - // We just tried to create a duplicate group, so load the existing group. - group, err = novaClient.SecurityGroupByName(name) - if err != nil { - return zeroGroup, err - } - return *group, nil - } - } - // The new group is created so now add the rules. - group.Rules = make([]nova.SecurityGroupRule, len(rules)) - for i, rule := range rules { - rule.ParentGroupId = group.Id - if rule.Cidr == "" { - // http://pad.lv/1226996 Rules that don't have a CIDR - // are meant to apply only to this group. If you don't - // supply CIDR or GroupId then openstack assumes you - // mean CIDR=0.0.0.0/0 - rule.GroupId = &group.Id - } - groupRule, err := novaClient.CreateSecurityGroupRule(rule) - if err != nil && !gooseerrors.IsDuplicateValue(err) { - return zeroGroup, err - } - group.Rules[i] = *groupRule - } - return *group, nil +func (e *Environ) OpenPorts(ports []network.PortRange) error { + return e.firewaller.OpenPorts(ports) +} + +func (e *Environ) ClosePorts(ports []network.PortRange) error { + return e.firewaller.ClosePorts(ports) +} + +func (e *Environ) Ports() ([]network.PortRange, error) { + return e.firewaller.Ports() +} + +func (e *Environ) Provider() environs.EnvironProvider { + return providerInstance } // deleteSecurityGroups deletes the given security groups. If a security // group is also used by another environment (see bug #1300755), an attempt // to delete this group fails. A warning is logged in this case. -func (e *environ) deleteSecurityGroups(securityGroupNames []string) error { +func (e *Environ) deleteSecurityGroups(securityGroupNames []string) error { novaclient := e.nova() allSecurityGroups, err := novaclient.ListSecurityGroups() if err != nil { @@ -1615,40 +1211,7 @@ return nil } -// deleteSecurityGroup attempts to delete the security group. Should it fail, -// the deletion is retried due to timing issues in openstack. A security group -// cannot be deleted while it is in use. Theoretically we terminate all the -// instances before we attempt to delete the associated security groups, but -// in practice nova hasn't always finished with the instance before it -// returns, so there is a race condition where we think the instance is -// terminated and hence attempt to delete the security groups but nova still -// has it around internally. To attempt to catch this timing issue, deletion -// of the groups is tried multiple times. -func deleteSecurityGroup(novaclient *nova.Client, name, id string) { - attempts := utils.AttemptStrategy{ - Total: 30 * time.Second, - Delay: time.Second, - } - logger.Debugf("deleting security group %q", name) - i := 0 - for attempt := attempts.Start(); attempt.Next(); { - err := novaclient.DeleteSecurityGroup(id) - if err == nil { - return - } - i++ - if i%4 == 0 { - message := fmt.Sprintf("waiting to delete security group %q", name) - if i != 4 { - message = "still " + message - } - logger.Debugf(message) - } - } - logger.Warningf("cannot delete security group %q. Used by another environment?", name) -} - -func (e *environ) terminateInstances(ids []instance.Id) error { +func (e *Environ) terminateInstances(ids []instance.Id) error { if len(ids) == 0 { return nil } @@ -1668,7 +1231,7 @@ } // MetadataLookupParams returns parameters which are used to query simplestreams metadata. -func (e *environ) MetadataLookupParams(region string) (*simplestreams.MetadataLookupParams, error) { +func (e *Environ) MetadataLookupParams(region string) (*simplestreams.MetadataLookupParams, error) { if region == "" { region = e.ecfg().region() } @@ -1685,11 +1248,11 @@ } // Region is specified in the HasRegion interface. -func (e *environ) Region() (simplestreams.CloudSpec, error) { +func (e *Environ) Region() (simplestreams.CloudSpec, error) { return e.cloudSpec(e.ecfg().region()) } -func (e *environ) cloudSpec(region string) (simplestreams.CloudSpec, error) { +func (e *Environ) cloudSpec(region string) (simplestreams.CloudSpec, error) { return simplestreams.CloudSpec{ Region: region, Endpoint: e.ecfg().authURL(), @@ -1697,7 +1260,7 @@ } // TagInstance implements environs.InstanceTagger. -func (e *environ) TagInstance(id instance.Id, tags map[string]string) error { +func (e *Environ) TagInstance(id instance.Id, tags map[string]string) error { if err := e.nova().SetServerMetadata(string(id), tags); err != nil { return errors.Annotate(err, "setting server metadata") } === added file 'src/github.com/juju/juju/provider/openstack/provider_configurator.go' --- src/github.com/juju/juju/provider/openstack/provider_configurator.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/openstack/provider_configurator.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,55 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package openstack + +import ( + "github.com/juju/schema" + "gopkg.in/goose.v1/nova" + + "github.com/juju/juju/cloudconfig/cloudinit" + "github.com/juju/juju/environs" +) + +// This interface is added to allow to customize openstack provider behaviour. +// This is used in other providers, that embeds openstack provider. +type ProviderConfigurator interface { + // GetConfigDefaults sets some configuration default values, if any + GetConfigDefaults() schema.Defaults + + // This method allows to adjust defult RunServerOptions, before new server is actually created. + ModifyRunServerOptions(options *nova.RunServerOpts) + + // This method provides default cloud config. + // This config can be different for different providers. + GetCloudConfig(args environs.StartInstanceParams) (cloudinit.CloudConfig, error) +} + +type defaultConfigurator struct { +} + +// ModifyRunServerOptions implements ProviderConfigurator interface. +func (c *defaultConfigurator) ModifyRunServerOptions(options *nova.RunServerOpts) { +} + +// GetCloudConfig implements ProviderConfigurator interface. +func (c *defaultConfigurator) GetCloudConfig(args environs.StartInstanceParams) (cloudinit.CloudConfig, error) { + return nil, nil +} + +// GetConfigDefaults implements ProviderConfigurator interface. +func (c *defaultConfigurator) GetConfigDefaults() schema.Defaults { + return schema.Defaults{ + "username": "", + "password": "", + "tenant-name": "", + "auth-url": "", + "auth-mode": string(AuthUserPass), + "access-key": "", + "secret-key": "", + "region": "", + "use-floating-ip": false, + "use-default-secgroup": false, + "network": "", + } +} === modified file 'src/github.com/juju/juju/provider/openstack/provider_test.go' --- src/github.com/juju/juju/provider/openstack/provider_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/openstack/provider_test.go 2016-03-22 15:18:22 +0000 @@ -4,18 +4,21 @@ package openstack_test import ( + gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/goose.v1/nova" - "github.com/juju/juju/environs/config" + "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" "github.com/juju/juju/network" "github.com/juju/juju/provider/openstack" - "github.com/juju/juju/testing" ) // localTests contains tests which do not require a live service or test double to run. -type localTests struct{} +type localTests struct { + gitjujutesting.IsolationSuite +} var _ = gc.Suite(&localTests{}) @@ -349,16 +352,30 @@ } } -func (t *localTests) TestPrepareSetsControlBucket(c *gc.C) { - attrs := testing.FakeConfig().Merge(testing.Attrs{ - "type": "openstack", +func (s *localTests) TestDetectRegionsNoRegionName(c *gc.C) { + _, err := s.detectRegions(c) + c.Assert(err, gc.ErrorMatches, "OS_REGION_NAME environment variable not set") +} + +func (s *localTests) TestDetectRegionsNoAuthURL(c *gc.C) { + s.PatchEnvironment("OS_REGION_NAME", "oceania") + _, err := s.detectRegions(c) + c.Assert(err, gc.ErrorMatches, "OS_AUTH_URL environment variable not set") +} + +func (s *localTests) TestDetectRegions(c *gc.C) { + s.PatchEnvironment("OS_REGION_NAME", "oceania") + s.PatchEnvironment("OS_AUTH_URL", "http://keystone.internal") + regions, err := s.detectRegions(c) + c.Assert(err, jc.ErrorIsNil) + c.Assert(regions, jc.DeepEquals, []cloud.Region{ + {Name: "oceania", Endpoint: "http://keystone.internal"}, }) - cfg, err := config.New(config.NoDefaults, attrs) - c.Assert(err, jc.ErrorIsNil) - - cfg, err = openstack.ProviderInstance.PrepareForCreateEnvironment(cfg) - c.Assert(err, jc.ErrorIsNil) - - bucket := cfg.UnknownAttrs()["control-bucket"] - c.Assert(bucket, gc.Matches, "[a-f0-9]{32}") +} + +func (s *localTests) detectRegions(c *gc.C) ([]cloud.Region, error) { + provider, err := environs.Provider("openstack") + c.Assert(err, jc.ErrorIsNil) + c.Assert(provider, gc.Implements, new(environs.CloudRegionDetector)) + return provider.(environs.CloudRegionDetector).DetectRegions() } === added file 'src/github.com/juju/juju/provider/openstack/upgrade.go' --- src/github.com/juju/juju/provider/openstack/upgrade.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/openstack/upgrade.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,108 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package openstack + +import ( + "fmt" + "strings" + + "github.com/juju/errors" + "github.com/juju/juju/version" + "gopkg.in/goose.v1/nova" +) + +// RunUpgradeStepsFor implements provider.Upgradable +func (e *Environ) RunUpgradeStepsFor(ver version.Number) error { + switch ver { + case version.Number{Major: 1, Minor: 26}: + if err := addUUIDToSecurityGroupNames(e); err != nil { + return errors.Annotate(err, "upgrading security group names in upgrade step for version 1.26") + } + if err := addUUIDToMachineNames(e); err != nil { + return errors.Annotate(err, "upgrading security machine names in upgrade step for version 1.26") + } + } + return nil +} + +func replaceNameWithID(oldName, envName, eUUID string) (string, bool, error) { + prefix := "juju-" + envName + if !strings.HasPrefix(oldName, prefix) { + return "", false, nil + } + // This might be an env with a name that shares prefix with our current one. + if len(prefix) < len(oldName) && !strings.HasPrefix(oldName, prefix+"-") { + return "", false, nil + } + newPrefix := "juju-" + eUUID + return strings.Replace(oldName, prefix, newPrefix, -1), true, nil +} + +func addUUIDToSecurityGroupNames(e *Environ) error { + nova := e.nova() + groups, err := nova.ListSecurityGroups() + if err != nil { + return errors.Annotate(err, "upgrading instance names") + } + cfg := e.Config() + eName := cfg.Name() + eUUID, ok := cfg.UUID() + if !ok { + return errors.NotFoundf("model uuid for model %q", eName) + } + for _, group := range groups { + newName, ok, err := replaceNameWithID(group.Name, eName, eUUID) + if err != nil { + return errors.Annotate(err, "generating the new security group name") + } + if !ok { + continue + } + // Name should have uuid instead of name + _, err = nova.UpdateSecurityGroup(group.Id, newName, group.Description) + if err != nil { + return errors.Annotatef(err, "upgrading security group name from %q to %q", group.Name, newName) + } + + } + return nil +} + +// oldMachinesFilter returns a nova.Filter matching all machines in the environment +// that use the old name schema (juju-EnvironmentName-number). +func oldMachinesFilter(e *Environ) *nova.Filter { + filter := nova.NewFilter() + filter.Set(nova.FilterServer, fmt.Sprintf("juju-%s-machine-\\d*", e.Config().Name())) + return filter +} + +func addUUIDToMachineNames(e *Environ) error { + nova := e.nova() + servers, err := nova.ListServers(oldMachinesFilter(e)) + if err != nil { + return errors.Annotate(err, "upgrading server names") + } + cfg := e.Config() + eName := cfg.Name() + eUUID, ok := cfg.UUID() + if !ok { + return errors.NotFoundf("model uuid for model %q", eName) + } + for _, server := range servers { + newName, ok, err := replaceNameWithID(server.Name, eName, eUUID) + if err != nil { + return errors.Annotate(err, "generating the new server name") + } + if !ok { + continue + } + // Name should have uuid instead of name + _, err = nova.UpdateServerName(server.Id, newName) + if err != nil { + return errors.Annotatef(err, "upgrading machine name from %q to %q", server.Name, newName) + } + + } + return nil +} === modified file 'src/github.com/juju/juju/provider/openstack/userdata.go' --- src/github.com/juju/juju/provider/openstack/userdata.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/openstack/userdata.go 2016-03-22 15:18:22 +0000 @@ -7,20 +7,21 @@ import ( "github.com/juju/errors" "github.com/juju/utils" + jujuos "github.com/juju/utils/os" + "github.com/juju/juju/cloudconfig/cloudinit" "github.com/juju/juju/cloudconfig/providerinit/renderers" - "github.com/juju/juju/version" ) type OpenstackRenderer struct{} -func (OpenstackRenderer) EncodeUserdata(udata []byte, vers version.OSType) ([]byte, error) { - switch vers { - case version.Ubuntu, version.CentOS: - return utils.Gzip(udata), nil - case version.Windows: - return renderers.WinEmbedInScript(udata), nil +func (OpenstackRenderer) Render(cfg cloudinit.CloudConfig, os jujuos.OSType) ([]byte, error) { + switch os { + case jujuos.Ubuntu, jujuos.CentOS: + return renderers.RenderYAML(cfg, utils.Gzip) + case jujuos.Windows: + return renderers.RenderYAML(cfg, renderers.WinEmbedInScript) default: - return nil, errors.Errorf("Cannot encode userdata for OS: %s", vers) + return nil, errors.Errorf("Cannot encode userdata for OS: %s", os.String()) } } === modified file 'src/github.com/juju/juju/provider/openstack/userdata_test.go' --- src/github.com/juju/juju/provider/openstack/userdata_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/openstack/userdata_test.go 2016-03-22 15:18:22 +0000 @@ -7,12 +7,13 @@ import ( jc "github.com/juju/testing/checkers" "github.com/juju/utils" + "github.com/juju/utils/os" gc "gopkg.in/check.v1" + "github.com/juju/juju/cloudconfig/cloudinit/cloudinittest" "github.com/juju/juju/cloudconfig/providerinit/renderers" "github.com/juju/juju/provider/openstack" "github.com/juju/juju/testing" - "github.com/juju/juju/version" ) type UserdataSuite struct { @@ -23,28 +24,30 @@ func (s *UserdataSuite) TestOpenstackUnix(c *gc.C) { renderer := openstack.OpenstackRenderer{} - data := []byte("test") - result, err := renderer.EncodeUserdata(data, version.Ubuntu) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, jc.DeepEquals, utils.Gzip(data)) - - data = []byte("test") - result, err = renderer.EncodeUserdata(data, version.CentOS) - c.Assert(err, jc.ErrorIsNil) - c.Assert(result, jc.DeepEquals, utils.Gzip(data)) + cloudcfg := &cloudinittest.CloudConfig{YAML: []byte("yaml")} + + result, err := renderer.Render(cloudcfg, os.Ubuntu) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, utils.Gzip(cloudcfg.YAML)) + + result, err = renderer.Render(cloudcfg, os.CentOS) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result, jc.DeepEquals, utils.Gzip(cloudcfg.YAML)) } func (s *UserdataSuite) TestOpenstackWindows(c *gc.C) { renderer := openstack.OpenstackRenderer{} - data := []byte("test") - result, err := renderer.EncodeUserdata(data, version.Windows) + cloudcfg := &cloudinittest.CloudConfig{YAML: []byte("yaml")} + + result, err := renderer.Render(cloudcfg, os.Windows) c.Assert(err, jc.ErrorIsNil) - c.Assert(result, jc.DeepEquals, renderers.WinEmbedInScript(data)) + c.Assert(result, jc.DeepEquals, renderers.WinEmbedInScript(cloudcfg.YAML)) } func (s *UserdataSuite) TestOpenstackUnknownOS(c *gc.C) { renderer := openstack.OpenstackRenderer{} - result, err := renderer.EncodeUserdata(nil, version.Arch) + cloudcfg := &cloudinittest.CloudConfig{} + result, err := renderer.Render(cloudcfg, os.Arch) c.Assert(result, gc.IsNil) c.Assert(err, gc.ErrorMatches, "Cannot encode userdata for OS: Arch") } === modified file 'src/github.com/juju/juju/provider/provider.go' --- src/github.com/juju/juju/provider/provider.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/provider.go 2016-03-22 15:18:22 +0000 @@ -9,18 +9,6 @@ const ( Joyent = "joyent" - Local = "local" MAAS = "maas" EC2 = "ec2" ) - -// IsManual returns true iff the specified provider -// type refers to the manual provider. -func IsManual(provider string) bool { - switch provider { - case "null", "manual": - return true - default: - return false - } -} === added directory 'src/github.com/juju/juju/provider/rackspace' === added file 'src/github.com/juju/juju/provider/rackspace/environ.go' --- src/github.com/juju/juju/provider/rackspace/environ.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/rackspace/environ.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,85 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package rackspace + +import ( + "io/ioutil" + "os" + "time" + + "github.com/juju/errors" + + "github.com/juju/juju/cloudconfig/instancecfg" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/provider/common" + "github.com/juju/juju/state/multiwatcher" + jujuos "github.com/juju/utils/os" + "github.com/juju/utils/series" + "github.com/juju/utils/ssh" +) + +type environ struct { + environs.Environ +} + +var bootstrap = common.Bootstrap + +// Bootstrap implements environs.Environ. +func (e environ) Bootstrap(ctx environs.BootstrapContext, params environs.BootstrapParams) (*environs.BootstrapResult, error) { + // can't redirect to openstack provider as ussually, because correct environ should be passed for common.Bootstrap + return bootstrap(ctx, e, params) +} + +func isController(mcfg *instancecfg.InstanceConfig) bool { + return multiwatcher.AnyJobNeedsState(mcfg.Jobs...) +} + +var waitSSH = common.WaitSSH + +// StartInstance implements environs.Environ. +func (e environ) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { + osString, err := series.GetOSFromSeries(args.Tools.OneSeries()) + if err != nil { + return nil, errors.Trace(err) + } + fwmode := e.Config().FirewallMode() + if osString == jujuos.Windows && fwmode != config.FwNone { + return nil, errors.Errorf("rackspace provider doesn't support firewalls for windows instances") + + } + r, err := e.Environ.StartInstance(args) + if err != nil { + return nil, errors.Trace(err) + } + if fwmode != config.FwNone { + interrupted := make(chan os.Signal, 1) + timeout := config.SSHTimeoutOpts{ + Timeout: time.Minute * 5, + RetryDelay: time.Second * 5, + AddressesDelay: time.Second * 20, + } + addr, err := waitSSH(ioutil.Discard, interrupted, ssh.DefaultClient, common.GetCheckNonceCommand(args.InstanceConfig), &common.RefreshableInstance{r.Instance, e}, timeout) + if err != nil { + return nil, errors.Trace(err) + } + client := newInstanceConfigurator(addr) + apiPort := 0 + if isController(args.InstanceConfig) { + apiPort = args.InstanceConfig.StateServingInfo.APIPort + } + err = client.DropAllPorts([]int{apiPort, 22}, addr) + if err != nil { + return nil, errors.Trace(err) + } + } + return r, nil +} + +var newInstanceConfigurator = common.NewSshInstanceConfigurator + +// Provider implements environs.Environ. +func (e environ) Provider() environs.EnvironProvider { + return providerInstance +} === added file 'src/github.com/juju/juju/provider/rackspace/environ_test.go' --- src/github.com/juju/juju/provider/rackspace/environ_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/rackspace/environ_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,293 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package rackspace_test + +import ( + "io" + "os" + + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cloudconfig/instancecfg" + "github.com/juju/juju/constraints" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/instance" + "github.com/juju/juju/network" + "github.com/juju/juju/provider/common" + "github.com/juju/juju/provider/rackspace" + "github.com/juju/juju/testing" + "github.com/juju/juju/tools" + "github.com/juju/juju/version" + "github.com/juju/utils/ssh" +) + +type environSuite struct { + testing.BaseSuite + environ environs.Environ + innerEnviron *fakeEnviron +} + +var _ = gc.Suite(&environSuite{}) + +func (s *environSuite) SetUpTest(c *gc.C) { + s.innerEnviron = new(fakeEnviron) + s.environ = rackspace.NewEnviron(s.innerEnviron) +} + +func (s *environSuite) TestBootstrap(c *gc.C) { + s.PatchValue(rackspace.Bootstrap, func(ctx environs.BootstrapContext, env environs.Environ, args environs.BootstrapParams) (*environs.BootstrapResult, error) { + return s.innerEnviron.Bootstrap(ctx, args) + }) + s.environ.Bootstrap(nil, environs.BootstrapParams{}) + c.Check(s.innerEnviron.Pop().name, gc.Equals, "Bootstrap") +} + +func (s *environSuite) TestStartInstance(c *gc.C) { + configurator := &fakeConfigurator{} + s.PatchValue(rackspace.WaitSSH, func(stdErr io.Writer, interrupted <-chan os.Signal, client ssh.Client, checkHostScript string, inst common.Addresser, timeout config.SSHTimeoutOpts) (addr string, err error) { + addresses, err := inst.Addresses() + if err != nil { + return "", err + } + return addresses[0].Value, nil + }) + s.PatchValue(rackspace.NewInstanceConfigurator, func(host string) common.InstanceConfigurator { + return configurator + }) + config, err := config.New(config.UseDefaults, map[string]interface{}{ + "name": "some-name", + "type": "some-type", + "authorized-keys": "key", + }) + c.Assert(err, gc.IsNil) + err = s.environ.SetConfig(config) + c.Assert(err, gc.IsNil) + _, err = s.environ.StartInstance(environs.StartInstanceParams{ + InstanceConfig: &instancecfg.InstanceConfig{ + Config: config, + }, + Tools: tools.List{&tools.Tools{ + Version: version.Binary{Series: "trusty"}, + }}, + }) + c.Check(err, gc.IsNil) + c.Check(s.innerEnviron.Pop().name, gc.Equals, "StartInstance") + dropParams := configurator.Pop() + c.Check(dropParams.name, gc.Equals, "DropAllPorts") + c.Check(dropParams.params[1], gc.Equals, "1.1.1.1") +} + +type methodCall struct { + name string + params []interface{} +} + +type fakeEnviron struct { + config *config.Config + methodCalls []methodCall +} + +func (p *fakeEnviron) Push(name string, params ...interface{}) { + p.methodCalls = append(p.methodCalls, methodCall{name, params}) +} + +func (p *fakeEnviron) Pop() methodCall { + m := p.methodCalls[0] + p.methodCalls = p.methodCalls[1:] + return m +} + +func (p *fakeEnviron) Open(cfg *config.Config) (environs.Environ, error) { + p.Push("Open", cfg) + return nil, nil +} + +func (e *fakeEnviron) Bootstrap(ctx environs.BootstrapContext, params environs.BootstrapParams) (*environs.BootstrapResult, error) { + e.Push("Bootstrap", ctx, params) + return nil, nil +} + +func (e *fakeEnviron) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { + e.Push("StartInstance", args) + return &environs.StartInstanceResult{ + Instance: &fakeInstance{}, + }, nil +} + +func (e *fakeEnviron) StopInstances(ids ...instance.Id) error { + e.Push("StopInstances", ids) + return nil +} + +func (e *fakeEnviron) AllInstances() ([]instance.Instance, error) { + e.Push("AllInstances") + return nil, nil +} + +func (e *fakeEnviron) MaintainInstance(args environs.StartInstanceParams) error { + e.Push("MaintainInstance", args) + return nil +} + +func (e *fakeEnviron) Config() *config.Config { + return e.config +} + +func (e *fakeEnviron) SupportedArchitectures() ([]string, error) { + e.Push("SupportedArchitectures") + return nil, nil +} + +func (e *fakeEnviron) SupportsUnitPlacement() error { + e.Push("SupportsUnitPlacement") + return nil +} + +func (e *fakeEnviron) ConstraintsValidator() (constraints.Validator, error) { + e.Push("ConstraintsValidator") + return nil, nil +} + +func (e *fakeEnviron) SetConfig(cfg *config.Config) error { + e.config = cfg + return nil +} + +func (e *fakeEnviron) Instances(ids []instance.Id) ([]instance.Instance, error) { + e.Push("Instances", ids) + return []instance.Instance{&fakeInstance{}}, nil +} + +func (e *fakeEnviron) ControllerInstances() ([]instance.Id, error) { + e.Push("ControllerInstances") + return nil, nil +} + +func (e *fakeEnviron) Destroy() error { + e.Push("Destroy") + return nil +} + +func (e *fakeEnviron) OpenPorts(ports []network.PortRange) error { + e.Push("OpenPorts", ports) + return nil +} + +func (e *fakeEnviron) ClosePorts(ports []network.PortRange) error { + e.Push("ClosePorts", ports) + return nil +} + +func (e *fakeEnviron) Ports() ([]network.PortRange, error) { + e.Push("Ports") + return nil, nil +} + +func (e *fakeEnviron) Provider() environs.EnvironProvider { + e.Push("Provider") + return nil +} + +func (e *fakeEnviron) PrecheckInstance(series string, cons constraints.Value, placement string) error { + e.Push("PrecheckInstance", series, cons, placement) + return nil +} + +type fakeConfigurator struct { + methodCalls []methodCall +} + +func (p *fakeConfigurator) Push(name string, params ...interface{}) { + p.methodCalls = append(p.methodCalls, methodCall{name, params}) +} + +func (p *fakeConfigurator) Pop() methodCall { + m := p.methodCalls[0] + p.methodCalls = p.methodCalls[1:] + return m +} + +func (e *fakeConfigurator) DropAllPorts(exceptPorts []int, addr string) error { + e.Push("DropAllPorts", exceptPorts, addr) + return nil +} + +func (e *fakeConfigurator) ConfigureExternalIpAddress(apiPort int) error { + e.Push("ConfigureExternalIpAddress", apiPort) + return nil +} + +func (e *fakeConfigurator) ChangePorts(ipAddress string, insert bool, ports []network.PortRange) error { + e.Push("ChangePorts", ipAddress, insert, ports) + return nil +} + +func (e *fakeConfigurator) FindOpenPorts() ([]network.PortRange, error) { + e.Push("FindOpenPorts") + return nil, nil +} + +func (e *fakeConfigurator) AddIpAddress(nic string, addr string) error { + e.Push("AddIpAddress", nic, addr) + return nil +} + +func (e *fakeConfigurator) ReleaseIpAddress(addr string) error { + e.Push("AddIpAddress", addr) + return nil +} + +type fakeInstance struct { + methodCalls []methodCall +} + +func (p *fakeInstance) Push(name string, params ...interface{}) { + p.methodCalls = append(p.methodCalls, methodCall{name, params}) +} + +func (p *fakeInstance) Pop() methodCall { + m := p.methodCalls[0] + p.methodCalls = p.methodCalls[1:] + return m +} + +func (e *fakeInstance) Id() instance.Id { + e.Push("Id") + return instance.Id("") +} + +func (e *fakeInstance) Status() string { + e.Push("Status") + return "" +} + +func (e *fakeInstance) Refresh() error { + e.Push("Refresh") + return nil +} + +func (e *fakeInstance) Addresses() ([]network.Address, error) { + e.Push("Addresses") + return []network.Address{network.Address{ + Value: "1.1.1.1", + Type: network.IPv4Address, + Scope: network.ScopePublic, + }}, nil +} + +func (e *fakeInstance) OpenPorts(machineId string, ports []network.PortRange) error { + e.Push("OpenPorts", machineId, ports) + return nil +} + +func (e *fakeInstance) ClosePorts(machineId string, ports []network.PortRange) error { + e.Push("ClosePorts", machineId, ports) + return nil +} + +func (e *fakeInstance) Ports(machineId string) ([]network.PortRange, error) { + e.Push("Ports", machineId) + return nil, nil +} === added file 'src/github.com/juju/juju/provider/rackspace/export_test.go' --- src/github.com/juju/juju/provider/rackspace/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/rackspace/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package rackspace + +import ( + "github.com/juju/juju/environs" +) + +func NewProvider(innerProvider environs.EnvironProvider) environs.EnvironProvider { + return environProvider{innerProvider} +} + +func NewEnviron(innerEnviron environs.Environ) environs.Environ { + return environ{innerEnviron} +} + +var Bootstrap = &bootstrap + +var WaitSSH = &waitSSH + +var NewInstanceConfigurator = &newInstanceConfigurator === added file 'src/github.com/juju/juju/provider/rackspace/firewaller.go' --- src/github.com/juju/juju/provider/rackspace/firewaller.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/rackspace/firewaller.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,120 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package rackspace + +import ( + "github.com/juju/errors" + "gopkg.in/goose.v1/nova" + + "github.com/juju/juju/environs" + "github.com/juju/juju/instance" + "github.com/juju/juju/network" + "github.com/juju/juju/provider/common" + "github.com/juju/juju/provider/openstack" +) + +type firewallerFactory struct { +} + +var _ openstack.FirewallerFactory = (*firewallerFactory)(nil) + +// GetFirewaller implements FirewallerFactory +func (f *firewallerFactory) GetFirewaller(env environs.Environ) openstack.Firewaller { + return &rackspaceFirewaller{} +} + +type rackspaceFirewaller struct{} + +var _ openstack.Firewaller = (*rackspaceFirewaller)(nil) + +// InitialNetworks implements Firewaller interface. +func (c *rackspaceFirewaller) InitialNetworks() []nova.ServerNetworks { + // These are the default rackspace networks, see: + // http://docs.rackspace.com/servers/api/v2/cs-devguide/content/provision_server_with_networks.html + return []nova.ServerNetworks{ + {NetworkId: "00000000-0000-0000-0000-000000000000"}, //Racksapce PublicNet + {NetworkId: "11111111-1111-1111-1111-111111111111"}, //Rackspace ServiceNet + } +} + +// OpenPorts is not supported. +func (c *rackspaceFirewaller) OpenPorts(ports []network.PortRange) error { + return errors.NotSupportedf("OpenPorts") +} + +// ClosePorts is not supported. +func (c *rackspaceFirewaller) ClosePorts(ports []network.PortRange) error { + return errors.NotSupportedf("ClosePorts") +} + +// Ports returns the port ranges opened for the whole environment. +// Must only be used if the environment was setup with the +// FwGlobal firewall mode. +func (c *rackspaceFirewaller) Ports() ([]network.PortRange, error) { + return nil, errors.NotSupportedf("Ports") +} + +// DeleteGlobalGroups implements OpenstackFirewaller interface. +func (c *rackspaceFirewaller) DeleteGlobalGroups() error { + return nil +} + +// GetSecurityGroups implements OpenstackFirewaller interface. +func (c *rackspaceFirewaller) GetSecurityGroups(ids ...instance.Id) ([]string, error) { + return nil, nil +} + +// SetUpGroups implements OpenstackFirewaller interface. +func (c *rackspaceFirewaller) SetUpGroups(machineId string, apiPort int) ([]nova.SecurityGroup, error) { + return nil, nil +} + +// OpenInstancePorts implements Firewaller interface. +func (c *rackspaceFirewaller) OpenInstancePorts(inst instance.Instance, machineId string, ports []network.PortRange) error { + return c.changePorts(inst, true, ports) +} + +// CloseInstancePorts implements Firewaller interface. +func (c *rackspaceFirewaller) CloseInstancePorts(inst instance.Instance, machineId string, ports []network.PortRange) error { + return c.changePorts(inst, false, ports) +} + +// InstancePorts implements Firewaller interface. +func (c *rackspaceFirewaller) InstancePorts(inst instance.Instance, machineId string) ([]network.PortRange, error) { + _, configurator, err := c.getInstanceConfigurator(inst) + if err != nil { + return nil, errors.Trace(err) + } + return configurator.FindOpenPorts() +} + +func (c *rackspaceFirewaller) changePorts(inst instance.Instance, insert bool, ports []network.PortRange) error { + addresses, sshClient, err := c.getInstanceConfigurator(inst) + if err != nil { + return errors.Trace(err) + } + + for _, addr := range addresses { + if addr.Scope == network.ScopePublic { + err = sshClient.ChangePorts(addr.Value, insert, ports) + if err != nil { + return errors.Trace(err) + } + } + } + return nil +} + +func (c *rackspaceFirewaller) getInstanceConfigurator(inst instance.Instance) ([]network.Address, common.InstanceConfigurator, error) { + addresses, err := inst.Addresses() + if err != nil { + return nil, nil, errors.Trace(err) + } + if len(addresses) == 0 { + return addresses, nil, errors.New("No addresses found") + } + + client := common.NewSshInstanceConfigurator(addresses[0].Value) + return addresses, client, err +} === added file 'src/github.com/juju/juju/provider/rackspace/init.go' --- src/github.com/juju/juju/provider/rackspace/init.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/rackspace/init.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,28 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package rackspace + +import ( + "github.com/juju/juju/environs" + "github.com/juju/juju/provider/openstack" + "github.com/juju/juju/storage/provider/registry" +) + +const ( + providerType = "rackspace" +) + +func init() { + osProvider := openstack.EnvironProvider{ + openstack.OpenstackCredentials{}, + &rackspaceConfigurator{}, + &firewallerFactory{}, + } + providerInstance = &environProvider{ + osProvider, + } + environs.RegisterProvider(providerType, providerInstance) + + registry.RegisterEnvironStorageProviders(providerType, openstack.CinderProviderType) +} === added file 'src/github.com/juju/juju/provider/rackspace/package_test.go' --- src/github.com/juju/juju/provider/rackspace/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/rackspace/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package rackspace_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/provider/rackspace/provider.go' --- src/github.com/juju/juju/provider/rackspace/provider.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/rackspace/provider.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,18 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package rackspace + +import ( + "github.com/juju/loggo" + + "github.com/juju/juju/environs" +) + +var logger = loggo.GetLogger("juju.provider.rackspace") + +type environProvider struct { + environs.EnvironProvider +} + +var providerInstance *environProvider === added file 'src/github.com/juju/juju/provider/rackspace/provider_configurator.go' --- src/github.com/juju/juju/provider/rackspace/provider_configurator.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/rackspace/provider_configurator.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,53 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package rackspace + +import ( + "github.com/juju/errors" + "github.com/juju/schema" + "gopkg.in/goose.v1/nova" + + "github.com/juju/juju/cloudconfig/cloudinit" + "github.com/juju/juju/environs" + "github.com/juju/juju/provider/openstack" +) + +type rackspaceConfigurator struct { +} + +// ModifyRunServerOptions implements ProviderConfigurator interface. +func (c *rackspaceConfigurator) ModifyRunServerOptions(options *nova.RunServerOpts) { + // More on how ConfigDrive option is used on rackspace: + // http://docs.rackspace.com/servers/api/v2/cs-devguide/content/config_drive_ext.html + options.ConfigDrive = true +} + +// GetCloudConfig implements ProviderConfigurator interface. +func (c *rackspaceConfigurator) GetCloudConfig(args environs.StartInstanceParams) (cloudinit.CloudConfig, error) { + cloudcfg, err := cloudinit.New(args.Tools.OneSeries()) + if err != nil { + return nil, errors.Trace(err) + } + // Additional package required for sshInstanceConfigurator, to save + // iptables state between restarts. + cloudcfg.AddPackage("iptables-persistent") + return cloudcfg, nil +} + +// GetConfigDefaults implements ProviderConfigurator interface. +func (c *rackspaceConfigurator) GetConfigDefaults() schema.Defaults { + return schema.Defaults{ + "username": "", + "password": "", + "tenant-name": "", + "auth-url": "https://identity.api.rackspacecloud.com/v2.0", + "auth-mode": string(openstack.AuthUserPass), + "access-key": "", + "secret-key": "", + "region": "", + "use-floating-ip": false, + "use-default-secgroup": false, + "network": "", + } +} === added file 'src/github.com/juju/juju/provider/rackspace/provider_test.go' --- src/github.com/juju/juju/provider/rackspace/provider_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/rackspace/provider_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,92 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package rackspace_test + +import ( + gc "gopkg.in/check.v1" + + "github.com/juju/errors" + "github.com/juju/juju/cloud" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/provider/rackspace" +) + +type providerSuite struct { + provider environs.EnvironProvider + innerProvider *fakeProvider +} + +var _ = gc.Suite(&providerSuite{}) + +func (s *providerSuite) SetUpTest(c *gc.C) { + s.innerProvider = new(fakeProvider) + s.provider = rackspace.NewProvider(s.innerProvider) +} + +func (s *providerSuite) TestValidate(c *gc.C) { + cfg, err := config.New(config.UseDefaults, map[string]interface{}{ + "name": "some-name", + "type": "some-type", + "authorized-keys": "key", + }) + c.Check(err, gc.IsNil) + _, err = s.provider.Validate(cfg, nil) + c.Check(err, gc.IsNil) + c.Check(s.innerProvider.Pop().name, gc.Equals, "Validate") +} + +type fakeProvider struct { + methodCalls []methodCall +} + +func (p *fakeProvider) Push(name string, params ...interface{}) { + p.methodCalls = append(p.methodCalls, methodCall{name, params}) +} + +func (p *fakeProvider) Pop() methodCall { + m := p.methodCalls[0] + p.methodCalls = p.methodCalls[1:] + return m +} + +func (p *fakeProvider) Open(cfg *config.Config) (environs.Environ, error) { + p.Push("Open", cfg) + return nil, nil +} + +func (p *fakeProvider) RestrictedConfigAttributes() []string { + p.Push("RestrictedConfigAttributes") + return nil +} + +func (p *fakeProvider) PrepareForCreateEnvironment(cfg *config.Config) (*config.Config, error) { + p.Push("PrepareForCreateEnvironment", cfg) + return nil, nil +} + +func (p *fakeProvider) PrepareForBootstrap(ctx environs.BootstrapContext, args environs.PrepareForBootstrapParams) (environs.Environ, error) { + p.Push("PrepareForBootstrap", ctx, args) + return nil, nil +} + +func (p *fakeProvider) Validate(cfg, old *config.Config) (valid *config.Config, err error) { + p.Push("Validate", cfg, old) + return cfg, nil +} + +func (p *fakeProvider) SecretAttrs(cfg *config.Config) (map[string]string, error) { + p.Push("SecretAttrs", cfg) + return nil, nil +} + +func (p *fakeProvider) CredentialSchemas() map[cloud.AuthType]cloud.CredentialSchema { + p.Push("CredentialSchemas") + return nil +} + +func (p *fakeProvider) DetectCredentials() (*cloud.CloudCredential, error) { + p.Push("DetectCredentials") + return nil, errors.NotFoundf("credentials") +} === modified file 'src/github.com/juju/juju/provider/vsphere/client.go' --- src/github.com/juju/juju/provider/vsphere/client.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/vsphere/client.go 2016-03-22 15:18:22 +0000 @@ -25,6 +25,7 @@ "github.com/juju/juju/instance" "github.com/juju/juju/network" + "github.com/juju/juju/provider/common" ) const ( @@ -96,13 +97,17 @@ if err != nil { return nil, errors.Trace(err) } + // We assign public ip address for all instances. + // We can't assign public ip only when OpenPort is called, as assigning + // an ip address via reconfiguring the VM makes it inaccessible to the + // controller. if ecfg.externalNetwork() != "" { ip, err := vm.WaitForIP(context.TODO()) if err != nil { return nil, errors.Trace(err) } - client := newSshClient(ip) - err = client.configureExternalIpAddress(spec.apiPort) + client := common.NewSshInstanceConfigurator(ip) + err = client.ConfigureExternalIpAddress(spec.apiPort) if err != nil { return nil, errors.Trace(err) } === modified file 'src/github.com/juju/juju/provider/vsphere/config.go' --- src/github.com/juju/juju/provider/vsphere/config.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/vsphere/config.go 2016-03-22 15:18:22 +0000 @@ -24,28 +24,6 @@ cfgExternalNetwork = "external-network" ) -// boilerplateConfig will be shown in help output, so please keep it up to -// date when you change environment configuration below. -var boilerplateConfig = ` -vmware: - type: vsphere - - # IP address or DNS name of vsphere API host. - host: - - # Vsphere API user credentials. - user: - password: - - # Name of vsphere datacenter. - datacenter: - - # Name of the network, that all created vms will use ot obtain public ip address. - # This network should have ip pool configured or DHCP server connected to it. - # This parameter is optional. - extenal-network: -`[1:] - // configFields is the spec for each vmware config value's type. var configFields = schema.Fields{ cfgHost: schema.String(), === modified file 'src/github.com/juju/juju/provider/vsphere/config_test.go' --- src/github.com/juju/juju/provider/vsphere/config_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/vsphere/config_test.go 2016-03-22 15:18:22 +0000 @@ -26,7 +26,7 @@ func (s *ConfigSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) - cfg, err := testing.EnvironConfig(c).Apply(vsphere.ConfigAttrs) + cfg, err := testing.ModelConfig(c).Apply(vsphere.ConfigAttrs) c.Assert(err, jc.ErrorIsNil) s.config = cfg } @@ -80,7 +80,7 @@ func (ts configTestSpec) newConfig(c *gc.C) *config.Config { attrs := ts.attrs() - cfg, err := testing.EnvironConfig(c).Apply(attrs) + cfg, err := testing.ModelConfig(c).Apply(attrs) c.Assert(err, jc.ErrorIsNil) return cfg } @@ -123,7 +123,7 @@ expect: testing.Attrs{"unknown-field": "12345"}, }} -func (*ConfigSuite) TestNewEnvironConfig(c *gc.C) { +func (*ConfigSuite) TestNewModelConfig(c *gc.C) { for i, test := range newConfigTests { c.Logf("test %d: %s", i, test.info) === added file 'src/github.com/juju/juju/provider/vsphere/credentials.go' --- src/github.com/juju/juju/provider/vsphere/credentials.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/vsphere/credentials.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,34 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build !gccgo + +package vsphere + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/cloud" +) + +type environProviderCredentials struct{} + +// CredentialSchemas is part of the environs.ProviderCredentials interface. +func (environProviderCredentials) CredentialSchemas() map[cloud.AuthType]cloud.CredentialSchema { + return map[cloud.AuthType]cloud.CredentialSchema{ + cloud.UserPassAuthType: { + "user": { + Description: "The username to authenticate with.", + }, + "password": { + Description: "The password to authenticate with.", + Hidden: true, + }, + }, + } +} + +// DetectCredentials is part of the environs.ProviderCredentials interface. +func (environProviderCredentials) DetectCredentials() (*cloud.CloudCredential, error) { + return nil, errors.NotFoundf("credentials") +} === added file 'src/github.com/juju/juju/provider/vsphere/credentials_test.go' --- src/github.com/juju/juju/provider/vsphere/credentials_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/provider/vsphere/credentials_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,49 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package vsphere_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/environs" + envtesting "github.com/juju/juju/environs/testing" +) + +type credentialsSuite struct { + testing.IsolationSuite + provider environs.EnvironProvider +} + +var _ = gc.Suite(&credentialsSuite{}) + +func (s *credentialsSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + var err error + s.provider, err = environs.Provider("vsphere") + c.Assert(err, jc.ErrorIsNil) +} + +func (s *credentialsSuite) TestCredentialSchemas(c *gc.C) { + envtesting.AssertProviderAuthTypes(c, s.provider, "userpass") +} + +func (s *credentialsSuite) TestUserPassCredentialsValid(c *gc.C) { + envtesting.AssertProviderCredentialsValid(c, s.provider, "userpass", map[string]string{ + "user": "bob", + "password": "dobbs", + }) +} + +func (s *credentialsSuite) TestUserPassHiddenAttributes(c *gc.C) { + envtesting.AssertProviderCredentialsAttributesHidden(c, s.provider, "userpass", "password") +} + +func (s *credentialsSuite) TestDetectCredentialsNotFound(c *gc.C) { + _, err := s.provider.DetectCredentials() + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} === modified file 'src/github.com/juju/juju/provider/vsphere/environ.go' --- src/github.com/juju/juju/provider/vsphere/environ.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/vsphere/environ.go 2016-03-22 15:18:22 +0000 @@ -80,8 +80,9 @@ // getSnapshot returns a copy of the environment. This is useful for // ensuring the env you are using does not get changed by other code // while you are using it. -func (env environ) getSnapshot() *environ { - return &env +func (env *environ) getSnapshot() *environ { + e := *env + return &e } // Config returns the configuration data with which the env was created. @@ -95,8 +96,8 @@ // Bootstrap creates a new instance, chosing the series and arch out of // available tools. The series and arch are returned along with a func // that must be called to finalize the bootstrap process by transferring -// the tools and installing the initial juju state server. -func (env *environ) Bootstrap(ctx environs.BootstrapContext, params environs.BootstrapParams) (arch, series string, _ environs.BootstrapFinalizer, _ error) { +// the tools and installing the initial juju controller. +func (env *environ) Bootstrap(ctx environs.BootstrapContext, params environs.BootstrapParams) (*environs.BootstrapResult, error) { return Bootstrap(ctx, env, params) } === modified file 'src/github.com/juju/juju/provider/vsphere/environ_broker.go' --- src/github.com/juju/juju/provider/vsphere/environ_broker.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/provider/vsphere/environ_broker.go 2016-03-22 15:18:22 +0000 @@ -25,7 +25,7 @@ DefaultMemMb = uint64(2000) ) -func isStateServer(mcfg *instancecfg.InstanceConfig) bool { +func isController(mcfg *instancecfg.InstanceConfig) bool { return multiwatcher.AnyJobNeedsState(mcfg.Jobs...) } @@ -135,7 +135,7 @@ continue } apiPort := 0 - if isStateServer(args.InstanceConfig) { + if isController(args.InstanceConfig) { apiPort = args.InstanceConfig.StateServingInfo.APIPort } spec := &instanceSpec{ @@ -145,7 +145,7 @@ img: img, userData: userData, sshKey: args.InstanceConfig.AuthorizedKeys, - isState: isStateServer(args.InstanceConfig), + isState: isController(args.InstanceConfig), apiPort: apiPort, } inst, err = env.client.CreateInstance(env.ecfg, spec) === modified file 'src/github.com/juju/juju/provider/vsphere/environ_broker_test.go' --- src/github.com/juju/juju/provider/vsphere/environ_broker_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/provider/vsphere/environ_broker_test.go 2016-03-22 15:18:22 +0000 @@ -6,13 +6,13 @@ package vsphere_test import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "github.com/juju/errors" "github.com/juju/govmomi/vim25/methods" "github.com/juju/govmomi/vim25/soap" "github.com/juju/govmomi/vim25/types" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/arch" + gc "gopkg.in/check.v1" "github.com/juju/juju/cloudconfig/instancecfg" "github.com/juju/juju/constraints" @@ -20,7 +20,6 @@ "github.com/juju/juju/environs/config" imagetesting "github.com/juju/juju/environs/imagemetadata/testing" "github.com/juju/juju/instance" - "github.com/juju/juju/juju/arch" "github.com/juju/juju/provider/common" "github.com/juju/juju/provider/vsphere" "github.com/juju/juju/tools" @@ -57,7 +56,7 @@ cons := constraints.Value{} - instanceConfig, err := instancecfg.NewBootstrapInstanceConfig(cons, "trusty", "") + instanceConfig, err := instancecfg.NewBootstrapInstanceConfig(cons, cons, "trusty", "") c.Assert(err, jc.ErrorIsNil) instanceConfig.Tools = tools[0] === modified file 'src/github.com/juju/juju/provider/vsphere/environ_instance.go' --- src/github.com/juju/juju/provider/vsphere/environ_instance.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/vsphere/environ_instance.go 2016-03-22 15:18:22 +0000 @@ -79,9 +79,9 @@ return results, err } -// StateServerInstances returns the IDs of the instances corresponding -// to juju state servers. -func (env *environ) StateServerInstances() ([]instance.Id, error) { +// ControllerInstances returns the IDs of the instances corresponding +// to juju controllers. +func (env *environ) ControllerInstances() ([]instance.Id, error) { env = env.getSnapshot() prefix := common.MachineFullName(env, "") === modified file 'src/github.com/juju/juju/provider/vsphere/environ_network.go' --- src/github.com/juju/juju/provider/vsphere/environ_network.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/vsphere/environ_network.go 2016-03-22 15:18:22 +0000 @@ -13,12 +13,12 @@ ) // AllocateAddress implements environs.Environ. -func (env *environ) AllocateAddress(instID instance.Id, subnetID network.Id, addr network.Address, _, _ string) error { - return env.changeAddress(instID, subnetID, addr, true) +func (env *environ) AllocateAddress(instID instance.Id, subnetID network.Id, addr *network.Address, _, _ string) error { + return env.changeAddress(instID, subnetID, *addr, true) } // ReleaseAddress implements environs.Environ. -func (env *environ) ReleaseAddress(instID instance.Id, netID network.Id, addr network.Address, _ string) error { +func (env *environ) ReleaseAddress(instID instance.Id, netID network.Id, addr network.Address, _, _ string) error { return env.changeAddress(instID, netID, addr, false) } @@ -28,7 +28,7 @@ return errors.Trace(err) } inst := instances[0].(*environInstance) - _, client, err := inst.getSshClient() + _, client, err := inst.getInstanceConfigurator() if err != nil { return errors.Trace(err) } @@ -37,9 +37,9 @@ interfaceName = "eth1" } if add { - err = client.addIpAddress(interfaceName, addr.Value) + err = client.AddIpAddress(interfaceName, addr.Value) } else { - err = client.releaseIpAddress(interfaceName, addr.Value) + err = client.ReleaseIpAddress(addr.Value) } return errors.Trace(err) === modified file 'src/github.com/juju/juju/provider/vsphere/environ_policy_test.go' --- src/github.com/juju/juju/provider/vsphere/environ_policy_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/vsphere/environ_policy_test.go 2016-03-22 15:18:22 +0000 @@ -7,10 +7,10 @@ import ( jc "github.com/juju/testing/checkers" + "github.com/juju/utils/arch" gc "gopkg.in/check.v1" "github.com/juju/juju/constraints" - "github.com/juju/juju/juju/arch" "github.com/juju/juju/provider/vsphere" ) === modified file 'src/github.com/juju/juju/provider/vsphere/environ_test.go' --- src/github.com/juju/juju/provider/vsphere/environ_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/vsphere/environ_test.go 2016-03-22 15:18:22 +0000 @@ -29,12 +29,12 @@ func (s *environSuite) TestBootstrap(c *gc.C) { s.PatchValue(&vsphere.Bootstrap, func(ctx environs.BootstrapContext, env environs.Environ, args environs.BootstrapParams, - ) (string, string, environs.BootstrapFinalizer, error) { - return "", "", nil, errors.New("Bootstrap called") + ) (*environs.BootstrapResult, error) { + return nil, errors.New("Bootstrap called") }) os.Setenv(osenv.JujuFeatureFlagEnvKey, feature.VSphereProvider) - _, _, _, err := s.Env.Bootstrap(nil, environs.BootstrapParams{}) + _, err := s.Env.Bootstrap(nil, environs.BootstrapParams{}) c.Assert(err, gc.ErrorMatches, "Bootstrap called") } === modified file 'src/github.com/juju/juju/provider/vsphere/image_metadata.go' --- src/github.com/juju/juju/provider/vsphere/image_metadata.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/provider/vsphere/image_metadata.go 2016-03-22 15:18:22 +0000 @@ -62,7 +62,6 @@ func imageMetadataFetch(sources []simplestreams.DataSource, cons *imagemetadata.ImageConstraint) ([]*OvaFileMetadata, error) { params := simplestreams.GetMetadataParams{ StreamsVersion: imagemetadata.StreamsVersionV1, - OnlySigned: false, LookupConstraint: cons, ValueParams: simplestreams.ValueParams{ DataType: "image-downloads", === modified file 'src/github.com/juju/juju/provider/vsphere/instance.go' --- src/github.com/juju/juju/provider/vsphere/instance.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/vsphere/instance.go 2016-03-22 15:18:22 +0000 @@ -11,6 +11,7 @@ "github.com/juju/juju/instance" "github.com/juju/juju/network" + "github.com/juju/juju/provider/common" ) type environInstance struct { @@ -38,13 +39,6 @@ return "" } -// Refresh implements instance.Instance. -func (inst *environInstance) Refresh() error { - env := inst.env.getSnapshot() - err := env.client.Refresh(inst.base) - return errors.Trace(err) -} - // Addresses implements instance.Instance. func (inst *environInstance) Addresses() ([]network.Address, error) { if inst.base.Guest == nil || inst.base.Guest.IpAddress == "" { @@ -89,25 +83,25 @@ // Ports returns the set of ports open on the instance, which // should have been started with the given machine id. func (inst *environInstance) Ports(machineID string) ([]network.PortRange, error) { - _, sshClient, err := inst.getSshClient() + _, client, err := inst.getInstanceConfigurator() if err != nil { return nil, errors.Trace(err) } - return sshClient.findOpenPorts() + return client.FindOpenPorts() } func (inst *environInstance) changePorts(insert bool, ports []network.PortRange) error { if inst.env.ecfg.externalNetwork() == "" { return errors.New("Can't close/open ports without external network") } - addresses, sshClient, err := inst.getSshClient() + addresses, client, err := inst.getInstanceConfigurator() if err != nil { return errors.Trace(err) } for _, addr := range addresses { if addr.Scope == network.ScopePublic { - err = sshClient.changePorts(addr.Value, insert, ports) + err = client.ChangePorts(addr.Value, insert, ports) if err != nil { return errors.Trace(err) } @@ -116,7 +110,7 @@ return nil } -func (inst *environInstance) getSshClient() ([]network.Address, *sshClient, error) { +func (inst *environInstance) getInstanceConfigurator() ([]network.Address, common.InstanceConfigurator, error) { addresses, err := inst.Addresses() if err != nil { return nil, nil, errors.Trace(err) @@ -130,6 +124,6 @@ } } - client := newSshClient(localAddr) + client := common.NewSshInstanceConfigurator(localAddr) return addresses, client, err } === modified file 'src/github.com/juju/juju/provider/vsphere/ova_import_manager.go' --- src/github.com/juju/juju/provider/vsphere/ova_import_manager.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/provider/vsphere/ova_import_manager.go 2016-03-22 15:18:22 +0000 @@ -186,7 +186,6 @@ if err != nil { return "", errors.Trace(err) } - file, err := os.Open(ovfFilePath) defer file.Close() if err != nil { === modified file 'src/github.com/juju/juju/provider/vsphere/provider.go' --- src/github.com/juju/juju/provider/vsphere/provider.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/vsphere/provider.go 2016-03-22 15:18:22 +0000 @@ -9,11 +9,14 @@ "github.com/juju/errors" "github.com/juju/loggo" + "github.com/juju/juju/cloud" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" ) -type environProvider struct{} +type environProvider struct { + environProviderCredentials +} var providerInstance = environProvider{} var _ environs.EnvironProvider = providerInstance @@ -27,7 +30,24 @@ } // PrepareForBootstrap implements environs.EnvironProvider. -func (p environProvider) PrepareForBootstrap(ctx environs.BootstrapContext, cfg *config.Config) (environs.Environ, error) { +func (p environProvider) PrepareForBootstrap(ctx environs.BootstrapContext, args environs.PrepareForBootstrapParams) (environs.Environ, error) { + + cfg := args.Config + switch authType := args.Credentials.AuthType(); authType { + case cloud.UserPassAuthType: + credentialAttrs := args.Credentials.Attributes() + var err error + cfg, err = cfg.Apply(map[string]interface{}{ + cfgUser: credentialAttrs["user"], + cfgPassword: credentialAttrs["password"], + }) + if err != nil { + return nil, errors.Trace(err) + } + default: + return nil, errors.NotSupportedf("%q auth-type", authType) + } + cfg, err := p.PrepareForCreateEnvironment(cfg) if err != nil { return nil, errors.Trace(err) @@ -87,10 +107,3 @@ } return ecfg.secret(), nil } - -// BoilerplateConfig implements environs.EnvironProvider. -func (environProvider) BoilerplateConfig() string { - // boilerplateConfig is kept in config.go, in the hope that people editing - // config will keep it up to date. - return boilerplateConfig -} === modified file 'src/github.com/juju/juju/provider/vsphere/provider_test.go' --- src/github.com/juju/juju/provider/vsphere/provider_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/vsphere/provider_test.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "github.com/juju/juju/cloud" "github.com/juju/juju/environs" envtesting "github.com/juju/juju/environs/testing" "github.com/juju/juju/provider/vsphere" @@ -43,7 +44,13 @@ } func (s *providerSuite) TestPrepareForBootstrap(c *gc.C) { - env, err := s.provider.PrepareForBootstrap(envtesting.BootstrapContext(c), s.Config) + env, err := s.provider.PrepareForBootstrap(envtesting.BootstrapContext(c), environs.PrepareForBootstrapParams{ + Config: s.Config, + Credentials: cloud.NewCredential( + cloud.UserPassAuthType, + map[string]string{"user": "u", "password": "p"}, + ), + }) c.Check(err, jc.ErrorIsNil) c.Check(env, gc.NotNil) } === removed file 'src/github.com/juju/juju/provider/vsphere/ssh_client.go' --- src/github.com/juju/juju/provider/vsphere/ssh_client.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/vsphere/ssh_client.go 1970-01-01 00:00:00 +0000 @@ -1,191 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// +build !gccgo - -package vsphere - -import ( - "fmt" - "strconv" - "strings" - - "github.com/juju/errors" - - "github.com/juju/juju/network" - "github.com/juju/juju/utils/ssh" -) - -type sshClient struct { - client ssh.Client - host string - options *ssh.Options -} - -func newSshClient(host string) *sshClient { - options := ssh.Options{} - options.SetIdentities("/var/lib/juju/system-identity") - return &sshClient{ - client: ssh.DefaultClient, - host: "ubuntu@" + host, - options: &options, - } -} - -func (c *sshClient) configureExternalIpAddress(apiPort int) error { - cmd := `printf 'auto eth1\niface eth1 inet dhcp' | sudo tee -a /etc/network/interfaces.d/eth1.cfg -sudo ifup eth1 -sudo iptables -i eth1 -I INPUT -j DROP` - - if apiPort > 0 { - cmd += fmt.Sprintf("\nsudo iptables -I INPUT -p tcp --dport %d -j ACCEPT", apiPort) - } - - command := c.client.Command(c.host, []string{"/bin/bash"}, c.options) - command.Stdin = strings.NewReader(cmd) - output, err := command.CombinedOutput() - if err != nil { - return errors.Errorf("failed to allocate external IP address: %s", output) - } - logger.Tracef("configure external ip address output: %s", output) - return nil -} - -func (c *sshClient) changePorts(ipAddress string, insert bool, ports []network.PortRange) error { - cmd := "" - insertArg := "-I" - if !insert { - insertArg = "-D" - } - for _, port := range ports { - if port.ToPort-port.FromPort > 0 { - cmd += fmt.Sprintf("sudo iptables -d %s %s INPUT -p %s --match multiport --dports %d:%d -j ACCEPT\n", ipAddress, insertArg, port.Protocol, port.FromPort, port.ToPort) - } else { - - cmd += fmt.Sprintf("sudo iptables -d %s %s INPUT -p %s --dport %d -j ACCEPT\n", ipAddress, insertArg, port.Protocol, port.FromPort) - } - } - cmd += "sudo /etc/init.d/iptables-persistent save\n" - command := c.client.Command(c.host, []string{"/bin/bash"}, c.options) - command.Stdin = strings.NewReader(cmd) - output, err := command.CombinedOutput() - if err != nil { - return errors.Errorf("failed to configure ports on external network: %s", output) - } - logger.Tracef("change ports output: %s", output) - return nil -} - -func (c *sshClient) findOpenPorts() ([]network.PortRange, error) { - cmd := "sudo iptables -L INPUT -n" - command := c.client.Command(c.host, []string{"/bin/bash"}, c.options) - command.Stdin = strings.NewReader(cmd) - output, err := command.CombinedOutput() - if err != nil { - return nil, errors.Errorf("failed to list open ports: %s", output) - } - logger.Tracef("find open ports output: %s", output) - - //the output have the following format, we will skipp all other rules - //Chain INPUT (policy ACCEPT) - //target prot opt source destination - //ACCEPT tcp -- 0.0.0.0/0 192.168.0.1 multiport dports 3456:3458 - //ACCEPT tcp -- 0.0.0.0/0 192.168.0.2 tcp dpt:12345 - - res := make([]network.PortRange, 0) - var addSinglePortRange = func(items []string) { - ports := strings.Split(items[6], ":") - if len(ports) != 2 { - return - } - to, err := strconv.ParseInt(ports[1], 10, 32) - if err != nil { - return - } - - res = append(res, network.PortRange{ - Protocol: items[1], - FromPort: int(to), - ToPort: int(to), - }) - } - var addMultiplePortRange = func(items []string) { - ports := strings.Split(items[7], ":") - if len(ports) != 2 { - return - } - from, err := strconv.ParseInt(ports[0], 10, 32) - if err != nil { - return - } - to, err := strconv.ParseInt(ports[1], 10, 32) - if err != nil { - return - } - - res = append(res, network.PortRange{ - Protocol: items[1], - FromPort: int(from), - ToPort: int(to), - }) - } - - for i, line := range strings.Split(string(output), "\n") { - if i == 1 || i == 0 { - continue - } - items := strings.Split(line, " ") - if len(items) == 7 && items[0] == "ACCEPT" && items[3] == "0.0.0.0/0" { - addSinglePortRange(items) - } - if len(items) == 8 && items[0] == "ACCEPT" && items[3] == "0.0.0.0/0" && items[5] != "multiport" && items[6] != "dports" { - addMultiplePortRange(items) - } - } - return res, nil -} - -func (c *sshClient) addIpAddress(nic string, addr string) error { - cmd := fmt.Sprintf("ls /etc/network/interfaces.d | grep %s: | sed 's/%s://' | sed 's/.cfg//' | tail -1", nic, nic) - command := c.client.Command(c.host, []string{"/bin/bash"}, c.options) - command.Stdin = strings.NewReader(cmd) - lastIndStr, err := command.CombinedOutput() - if err != nil { - return errors.Errorf("failed to obtain last device index: %s", lastIndStr) - } - lastInd := 0 - if ind, err := strconv.ParseInt(string(lastIndStr), 10, 64); err != nil { - lastInd = int(ind) + 1 - } - nic = fmt.Sprintf("%s:%d", nic, lastInd) - cmd = fmt.Sprintf("printf 'auto %s\\niface %s inet static\\naddress %s' | sudo tee -a /etc/network/interfaces.d/%s.cfg\nsudo ifup %s", nic, nic, addr, nic, nic) - - command = c.client.Command(c.host, []string{"/bin/bash"}, c.options) - command.Stdin = strings.NewReader(cmd) - output, err := command.CombinedOutput() - if err != nil { - return errors.Errorf("failed to add IP address: %s", output) - } - logger.Tracef("add ip address output: %s", output) - return nil -} - -func (c *sshClient) releaseIpAddress(_ string, addr string) error { - cmd := fmt.Sprintf("ip addr show | grep %s | awk '{print $7}'", addr) - command := c.client.Command(c.host, []string{"/bin/bash"}, c.options) - command.Stdin = strings.NewReader(cmd) - nic, err := command.CombinedOutput() - if err != nil { - return errors.Errorf("faild to get nic by ip address: %s", nic) - } - - cmd = fmt.Sprintf("sudo rm %s.cfg \nsudo ifdown %s", nic, nic) - command = c.client.Command(c.host, []string{"/bin/bash"}, c.options) - command.Stdin = strings.NewReader(cmd) - output, err := command.CombinedOutput() - if err != nil { - return errors.Errorf("failed to release IP address: %s", output) - } - logger.Tracef("release ip address output: %s", output) - return nil -} === modified file 'src/github.com/juju/juju/provider/vsphere/testing_test.go' --- src/github.com/juju/juju/provider/vsphere/testing_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/provider/vsphere/testing_test.go 2016-03-22 15:18:22 +0000 @@ -59,11 +59,11 @@ s.initEnv(c) s.setUpHttpProxy(c) s.FakeMetadataServer() - osenv.SetJujuHome(c.MkDir()) + osenv.SetJujuXDGDataHome(c.MkDir()) } func (s *BaseSuite) initEnv(c *gc.C) { - cfg, err := testing.EnvironConfig(c).Apply(ConfigAttrs) + cfg, err := testing.ModelConfig(c).Apply(ConfigAttrs) c.Assert(err, jc.ErrorIsNil) env, err := environs.New(cfg) c.Assert(err, jc.ErrorIsNil) === modified file 'src/github.com/juju/juju/provider/vsphere/userdata.go' --- src/github.com/juju/juju/provider/vsphere/userdata.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/vsphere/userdata.go 2016-03-22 15:18:22 +0000 @@ -7,17 +7,18 @@ import ( "github.com/juju/errors" + "github.com/juju/juju/cloudconfig/cloudinit" "github.com/juju/juju/cloudconfig/providerinit/renderers" - "github.com/juju/juju/version" + jujuos "github.com/juju/utils/os" ) type VsphereRenderer struct{} -func (VsphereRenderer) EncodeUserdata(udata []byte, vers version.OSType) ([]byte, error) { - switch vers { - case version.Ubuntu, version.CentOS: - return renderers.ToBase64(udata), nil +func (VsphereRenderer) Render(cfg cloudinit.CloudConfig, os jujuos.OSType) ([]byte, error) { + switch os { + case jujuos.Ubuntu, jujuos.CentOS: + return renderers.RenderYAML(cfg, renderers.ToBase64) default: - return nil, errors.Errorf("Cannot encode userdata for OS: %s", vers) + return nil, errors.Errorf("Cannot encode userdata for OS: %s", os.String()) } } === modified file 'src/github.com/juju/juju/provider/vsphere/userdata_test.go' --- src/github.com/juju/juju/provider/vsphere/userdata_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/provider/vsphere/userdata_test.go 2016-03-22 15:18:22 +0000 @@ -8,11 +8,12 @@ "encoding/base64" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/os" gc "gopkg.in/check.v1" + "github.com/juju/juju/cloudconfig/cloudinit/cloudinittest" "github.com/juju/juju/provider/vsphere" "github.com/juju/juju/testing" - "github.com/juju/juju/version" ) type UserdataSuite struct { @@ -23,22 +24,23 @@ func (s *UserdataSuite) TestVsphereUnix(c *gc.C) { renderer := vsphere.VsphereRenderer{} - data := []byte("test") - result, err := renderer.EncodeUserdata(data, version.Ubuntu) + cloudcfg := &cloudinittest.CloudConfig{YAML: []byte("yaml")} + + result, err := renderer.Render(cloudcfg, os.Ubuntu) c.Assert(err, jc.ErrorIsNil) - expected := base64.StdEncoding.EncodeToString(data) + expected := base64.StdEncoding.EncodeToString(cloudcfg.YAML) c.Assert(string(result), jc.DeepEquals, expected) - data = []byte("test") - result, err = renderer.EncodeUserdata(data, version.CentOS) + result, err = renderer.Render(cloudcfg, os.CentOS) c.Assert(err, jc.ErrorIsNil) - expected = base64.StdEncoding.EncodeToString(data) + expected = base64.StdEncoding.EncodeToString(cloudcfg.YAML) c.Assert(string(result), jc.DeepEquals, expected) } func (s *UserdataSuite) TestVsphereUnknownOS(c *gc.C) { renderer := vsphere.VsphereRenderer{} - result, err := renderer.EncodeUserdata(nil, version.Windows) + cloudcfg := &cloudinittest.CloudConfig{} + result, err := renderer.Render(cloudcfg, os.Windows) c.Assert(result, gc.IsNil) c.Assert(err, gc.ErrorMatches, "Cannot encode userdata for OS: Windows") } === added directory 'src/github.com/juju/juju/resource' === added directory 'src/github.com/juju/juju/resource/api' === added directory 'src/github.com/juju/juju/resource/api/client' === added file 'src/github.com/juju/juju/resource/api/client/base_test.go' --- src/github.com/juju/juju/resource/api/client/base_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/client/base_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,153 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package client_test + +import ( + "fmt" + "io" + "net/http" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + basetesting "github.com/juju/juju/api/base/testing" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/api" + "github.com/juju/juju/resource/resourcetesting" +) + +type BaseSuite struct { + testing.IsolationSuite + + stub *testing.Stub + facade *stubFacade + response *api.UploadResult +} + +func (s *BaseSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = &testing.Stub{} + s.facade = newStubFacade(c, s.stub) + s.response = &api.UploadResult{} +} + +func (s *BaseSuite) Do(req *http.Request, body io.ReadSeeker, resp interface{}) error { + s.stub.AddCall("Do", req, body, resp) + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + result, ok := resp.(*api.UploadResult) + if !ok { + msg := fmt.Sprintf("bad response type %T, expected api.UploadResult", resp) + return errors.NewNotValid(nil, msg) + } + + *result = *s.response + return nil +} + +func newResourceResult(c *gc.C, serviceID string, names ...string) ([]resource.Resource, api.ResourcesResult) { + var resources []resource.Resource + var apiResult api.ResourcesResult + for _, name := range names { + data := name + "...spamspamspam" + res, apiRes := newResource(c, name, "a-user", data) + resources = append(resources, res) + apiResult.Resources = append(apiResult.Resources, apiRes) + } + return resources, apiResult +} + +func newResource(c *gc.C, name, username, data string) (resource.Resource, api.Resource) { + opened := resourcetesting.NewResource(c, nil, name, "a-service", data) + res := opened.Resource + res.Revision = 1 + res.Username = username + if username == "" { + // Note that resourcetesting.NewResource() returns a resources + // with a username and timestamp set. So if the username was + // "un-set" then we have to also unset the timestamp. + res.Timestamp = time.Time{} + } + + apiRes := api.Resource{ + CharmResource: api.CharmResource{ + Name: name, + Type: "file", + Path: res.Path, + Origin: "upload", + Revision: 1, + Fingerprint: res.Fingerprint.Bytes(), + Size: res.Size, + }, + ID: res.ID, + ServiceID: res.ServiceID, + Username: username, + Timestamp: res.Timestamp, + } + + return res, apiRes +} + +type stubFacade struct { + basetesting.StubFacadeCaller + + apiResults map[string]api.ResourcesResult + pendingIDs []string +} + +func newStubFacade(c *gc.C, stub *testing.Stub) *stubFacade { + s := &stubFacade{ + StubFacadeCaller: basetesting.StubFacadeCaller{ + Stub: stub, + }, + apiResults: make(map[string]api.ResourcesResult), + } + + s.FacadeCallFn = func(_ string, args, response interface{}) error { + switch typedResponse := response.(type) { + case *api.ResourcesResults: + typedArgs, ok := args.(*api.ListResourcesArgs) + c.Assert(ok, jc.IsTrue) + + for _, e := range typedArgs.Entities { + tag, err := names.ParseTag(e.Tag) + c.Assert(err, jc.ErrorIsNil) + service := tag.Id() + + apiResult, ok := s.apiResults[service] + if !ok { + apiResult.Error = ¶ms.Error{ + Message: fmt.Sprintf("service %q not found", service), + Code: params.CodeNotFound, + } + } + typedResponse.Results = append(typedResponse.Results, apiResult) + } + case *api.AddPendingResourcesResult: + typedResponse.PendingIDs = s.pendingIDs + default: + c.Errorf("bad type %T", response) + } + return nil + } + + return s +} + +func (s *stubFacade) Close() error { + s.Stub.AddCall("Close") + if err := s.Stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} === added file 'src/github.com/juju/juju/resource/api/client/client.go' --- src/github.com/juju/juju/resource/api/client/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/client/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,179 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package client + +import ( + "io" + "net/http" + "strings" + + "github.com/juju/errors" + "github.com/juju/loggo" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/api" +) + +var logger = loggo.GetLogger("juju.resource.api.client") + +// TODO(ericsnow) Move FacadeCaller to a component-central package. + +// FacadeCaller has the api/base.FacadeCaller methods needed for the component. +type FacadeCaller interface { + FacadeCall(request string, params, response interface{}) error +} + +// Doer +type Doer interface { + Do(req *http.Request, body io.ReadSeeker, resp interface{}) error +} + +// Client is the public client for the resources API facade. +type Client struct { + FacadeCaller + io.Closer + doer Doer +} + +// NewClient returns a new Client for the given raw API caller. +func NewClient(caller FacadeCaller, doer Doer, closer io.Closer) *Client { + return &Client{ + FacadeCaller: caller, + Closer: closer, + doer: doer, + } +} + +// ListResources calls the ListResources API server method with +// the given service names. +func (c Client) ListResources(services []string) ([]resource.ServiceResources, error) { + args, err := api.NewListResourcesArgs(services) + if err != nil { + return nil, errors.Trace(err) + } + + var apiResults api.ResourcesResults + if err := c.FacadeCall("ListResources", &args, &apiResults); err != nil { + return nil, errors.Trace(err) + } + + if len(apiResults.Results) != len(services) { + // We don't bother returning the results we *did* get since + // something bad happened on the server. + return nil, errors.Errorf("got invalid data from server (expected %d results, got %d)", len(services), len(apiResults.Results)) + } + + var errs []error + results := make([]resource.ServiceResources, len(services)) + for i := range services { + apiResult := apiResults.Results[i] + + result, err := api.APIResult2ServiceResources(apiResult) + if err != nil { + errs = append(errs, errors.Trace(err)) + } + results[i] = result + } + if err := resolveErrors(errs); err != nil { + return nil, errors.Trace(err) + } + + return results, nil +} + +// Upload sends the provided resource blob up to Juju. +func (c Client) Upload(service, name string, reader io.ReadSeeker) error { + uReq, err := api.NewUploadRequest(service, name, reader) + if err != nil { + return errors.Trace(err) + } + req, err := uReq.HTTPRequest() + if err != nil { + return errors.Trace(err) + } + + var response api.UploadResult // ignored + if err := c.doer.Do(req, reader, &response); err != nil { + return errors.Trace(err) + } + + return nil +} + +// AddPendingResources sends the provided resource info up to Juju +// without making it available yet. +func (c Client) AddPendingResources(serviceID string, resources []charmresource.Resource) (pendingIDs []string, err error) { + args, err := api.NewAddPendingResourcesArgs(serviceID, resources) + if err != nil { + return nil, errors.Trace(err) + } + + var result api.AddPendingResourcesResult + if err := c.FacadeCall("AddPendingResources", &args, &result); err != nil { + return nil, errors.Trace(err) + } + if result.Error != nil { + err := common.RestoreError(result.Error) + return nil, errors.Trace(err) + } + + if len(result.PendingIDs) != len(resources) { + return nil, errors.Errorf("bad data from server: expected %d IDs, got %d", len(resources), len(result.PendingIDs)) + } + for i, id := range result.PendingIDs { + if id == "" { + return nil, errors.Errorf("bad data from server: got an empty ID for resource %q", resources[i].Name) + } + // TODO(ericsnow) Do other validation? + } + + return result.PendingIDs, nil +} + +// AddPendingResource sends the provided resource blob up to Juju +// without making it available yet. For example, AddPendingResource() +// is used before the service is deployed. +func (c Client) AddPendingResource(serviceID string, res charmresource.Resource, reader io.ReadSeeker) (pendingID string, err error) { + ids, err := c.AddPendingResources(serviceID, []charmresource.Resource{res}) + if err != nil { + return "", errors.Trace(err) + } + pendingID = ids[0] + + if reader != nil { + uReq, err := api.NewUploadRequest(serviceID, res.Name, reader) + if err != nil { + return "", errors.Trace(err) + } + uReq.PendingID = pendingID + req, err := uReq.HTTPRequest() + if err != nil { + return "", errors.Trace(err) + } + + var response api.UploadResult // ignored + if err := c.doer.Do(req, reader, &response); err != nil { + return "", errors.Trace(err) + } + } + + return pendingID, nil +} + +func resolveErrors(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + msgs := make([]string, len(errs)) + for i, err := range errs { + msgs[i] = err.Error() + } + return errors.New(strings.Join(msgs, "\n")) + } +} === added file 'src/github.com/juju/juju/resource/api/client/client_listresources_test.go' --- src/github.com/juju/juju/resource/api/client/client_listresources_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/client/client_listresources_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,217 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package client_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/api" + "github.com/juju/juju/resource/api/client" +) + +var _ = gc.Suite(&ListResourcesSuite{}) + +type ListResourcesSuite struct { + BaseSuite +} + +func (s *ListResourcesSuite) TestOkay(c *gc.C) { + expected, apiResult := newResourceResult(c, "a-service", "spam") + s.facade.apiResults["a-service"] = apiResult + + cl := client.NewClient(s.facade, s, s.facade) + + services := []string{"a-service"} + results, err := cl.ListResources(services) + c.Assert(err, jc.ErrorIsNil) + + c.Check(results, jc.DeepEquals, []resource.ServiceResources{ + {Resources: expected}, + }) + c.Check(s.stub.Calls(), gc.HasLen, 1) + s.stub.CheckCall(c, 0, "FacadeCall", + "ListResources", + &api.ListResourcesArgs{[]params.Entity{{ + Tag: "service-a-service", + }}}, + &api.ResourcesResults{ + Results: []api.ResourcesResult{ + apiResult, + }, + }, + ) +} + +func (s *ListResourcesSuite) TestBulk(c *gc.C) { + expected1, apiResult1 := newResourceResult(c, "a-service", "spam") + s.facade.apiResults["a-service"] = apiResult1 + expected2, apiResult2 := newResourceResult(c, "other-service", "eggs", "ham") + s.facade.apiResults["other-service"] = apiResult2 + + cl := client.NewClient(s.facade, s, s.facade) + + services := []string{"a-service", "other-service"} + results, err := cl.ListResources(services) + c.Assert(err, jc.ErrorIsNil) + + c.Check(results, jc.DeepEquals, []resource.ServiceResources{ + {Resources: expected1}, + {Resources: expected2}, + }) + c.Check(s.stub.Calls(), gc.HasLen, 1) + s.stub.CheckCall(c, 0, "FacadeCall", + "ListResources", + &api.ListResourcesArgs{[]params.Entity{ + { + Tag: "service-a-service", + }, { + Tag: "service-other-service", + }, + }}, + &api.ResourcesResults{ + Results: []api.ResourcesResult{ + apiResult1, + apiResult2, + }, + }, + ) +} + +func (s *ListResourcesSuite) TestNoServices(c *gc.C) { + cl := client.NewClient(s.facade, s, s.facade) + + var services []string + results, err := cl.ListResources(services) + c.Assert(err, jc.ErrorIsNil) + + c.Check(results, gc.HasLen, 0) + s.stub.CheckCallNames(c, "FacadeCall") +} + +func (s *ListResourcesSuite) TestBadServices(c *gc.C) { + cl := client.NewClient(s.facade, s, s.facade) + + services := []string{"???"} + _, err := cl.ListResources(services) + + c.Check(err, gc.ErrorMatches, `.*invalid service.*`) + s.stub.CheckNoCalls(c) +} + +func (s *ListResourcesSuite) TestServiceNotFound(c *gc.C) { + cl := client.NewClient(s.facade, s, s.facade) + + services := []string{"a-service"} + _, err := cl.ListResources(services) + + c.Check(err, jc.Satisfies, errors.IsNotFound) + s.stub.CheckCallNames(c, "FacadeCall") +} + +func (s *ListResourcesSuite) TestServiceEmpty(c *gc.C) { + s.facade.apiResults["a-service"] = api.ResourcesResult{} + + cl := client.NewClient(s.facade, s, s.facade) + + services := []string{"a-service"} + results, err := cl.ListResources(services) + c.Assert(err, jc.ErrorIsNil) + + c.Check(results, jc.DeepEquals, []resource.ServiceResources{ + {}, + }) + s.stub.CheckCallNames(c, "FacadeCall") +} + +func (s *ListResourcesSuite) TestServerError(c *gc.C) { + failure := errors.New("") + s.facade.FacadeCallFn = func(_ string, _, _ interface{}) error { + return failure + } + + cl := client.NewClient(s.facade, s, s.facade) + + services := []string{"a-service"} + _, err := cl.ListResources(services) + + c.Check(err, gc.ErrorMatches, ``) + s.stub.CheckCallNames(c, "FacadeCall") +} + +func (s *ListResourcesSuite) TestTooFew(c *gc.C) { + s.facade.FacadeCallFn = func(_ string, _, response interface{}) error { + typedResponse, ok := response.(*api.ResourcesResults) + c.Assert(ok, jc.IsTrue) + + typedResponse.Results = []api.ResourcesResult{{ + Resources: nil, + }} + + return nil + } + + cl := client.NewClient(s.facade, s, s.facade) + + services := []string{"a-service", "other-service"} + results, err := cl.ListResources(services) + + c.Check(results, gc.HasLen, 0) + c.Check(err, gc.ErrorMatches, `.*got invalid data from server \(expected 2 results, got 1\).*`) + s.stub.CheckCallNames(c, "FacadeCall") +} + +func (s *ListResourcesSuite) TestTooMany(c *gc.C) { + s.facade.FacadeCallFn = func(_ string, _, response interface{}) error { + typedResponse, ok := response.(*api.ResourcesResults) + c.Assert(ok, jc.IsTrue) + + typedResponse.Results = []api.ResourcesResult{{ + Resources: nil, + }, { + Resources: nil, + }, { + Resources: nil, + }} + + return nil + } + + cl := client.NewClient(s.facade, s, s.facade) + + services := []string{"a-service", "other-service"} + results, err := cl.ListResources(services) + + c.Check(results, gc.HasLen, 0) + c.Check(err, gc.ErrorMatches, `.*got invalid data from server \(expected 2 results, got 3\).*`) + s.stub.CheckCallNames(c, "FacadeCall") +} + +func (s *ListResourcesSuite) TestConversionFailed(c *gc.C) { + s.facade.FacadeCallFn = func(_ string, _, response interface{}) error { + typedResponse, ok := response.(*api.ResourcesResults) + c.Assert(ok, jc.IsTrue) + + var res api.Resource + res.Name = "spam" + typedResponse.Results = []api.ResourcesResult{{ + Resources: []api.Resource{ + res, + }, + }} + + return nil + } + + cl := client.NewClient(s.facade, s, s.facade) + + services := []string{"a-service"} + _, err := cl.ListResources(services) + + c.Check(err, gc.ErrorMatches, `.*got bad data.*`) + s.stub.CheckCallNames(c, "FacadeCall") +} === added file 'src/github.com/juju/juju/resource/api/client/client_upload_test.go' --- src/github.com/juju/juju/resource/api/client/client_upload_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/client/client_upload_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,216 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package client_test + +import ( + "fmt" + "io" + "net/http" + "strings" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource/api/client" +) + +var _ = gc.Suite(&UploadSuite{}) + +type UploadSuite struct { + BaseSuite +} + +func (s *UploadSuite) TestOkay(c *gc.C) { + data := "" + _, s.response.Resource = newResource(c, "spam", "a-user", data) + fp, err := charmresource.GenerateFingerprint(strings.NewReader(data)) + c.Assert(err, jc.ErrorIsNil) + req, err := http.NewRequest("PUT", "/services/a-service/resources/spam", nil) + c.Assert(err, jc.ErrorIsNil) + req.Header.Set("Content-Type", "application/octet-stream") + req.Header.Set("Content-SHA384", fp.String()) + req.Header.Set("Content-Length", fmt.Sprint(len(data))) + req.ContentLength = int64(len(data)) + reader := &stubFile{stub: s.stub} + reader.returnRead = strings.NewReader(data) + cl := client.NewClient(s.facade, s, s.facade) + + err = cl.Upload("a-service", "spam", reader) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "Read", "Read", "Seek", "Do") + s.stub.CheckCall(c, 3, "Do", req, reader, s.response) +} + +func (s *UploadSuite) TestBadService(c *gc.C) { + cl := client.NewClient(s.facade, s, s.facade) + + err := cl.Upload("???", "spam", nil) + + c.Check(err, gc.ErrorMatches, `.*invalid service.*`) + s.stub.CheckNoCalls(c) +} + +func (s *UploadSuite) TestBadRequest(c *gc.C) { + reader := &stubFile{stub: s.stub} + cl := client.NewClient(s.facade, s, s.facade) + failure := errors.New("") + s.stub.SetErrors(failure) + + err := cl.Upload("a-service", "spam", reader) + + c.Check(errors.Cause(err), gc.Equals, failure) + s.stub.CheckCallNames(c, "Read") +} + +func (s *UploadSuite) TestRequestFailed(c *gc.C) { + reader := &stubFile{stub: s.stub} + reader.returnRead = strings.NewReader("") + cl := client.NewClient(s.facade, s, s.facade) + failure := errors.New("") + s.stub.SetErrors(nil, nil, nil, failure) + + err := cl.Upload("a-service", "spam", reader) + + c.Check(errors.Cause(err), gc.Equals, failure) + s.stub.CheckCallNames(c, "Read", "Read", "Seek", "Do") +} + +func (s *UploadSuite) TestPendingOkay(c *gc.C) { + res, apiResult := newResourceResult(c, "a-service", "spam") + uuid, err := utils.NewUUID() + c.Assert(err, jc.ErrorIsNil) + expected := uuid.String() + s.response.Resource = apiResult.Resources[0] + data := "" + fp, err := charmresource.GenerateFingerprint(strings.NewReader(data)) + c.Assert(err, jc.ErrorIsNil) + req, err := http.NewRequest("PUT", "/services/a-service/resources/spam", nil) + c.Assert(err, jc.ErrorIsNil) + req.Header.Set("Content-Type", "application/octet-stream") + req.Header.Set("Content-SHA384", fp.String()) + req.Header.Set("Content-Length", fmt.Sprint(len(data))) + req.ContentLength = int64(len(data)) + req.URL.RawQuery = "pendingid=" + expected + reader := &stubFile{stub: s.stub} + reader.returnRead = strings.NewReader(data) + s.facade.pendingIDs = []string{expected} + cl := client.NewClient(s.facade, s, s.facade) + + uploadID, err := cl.AddPendingResource("a-service", res[0].Resource, reader) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, + "FacadeCall", + "Read", + "Read", + "Seek", + "Do", + ) + s.stub.CheckCall(c, 4, "Do", req, reader, s.response) + c.Check(uploadID, gc.Equals, expected) +} + +func (s *UploadSuite) TestPendingNoFile(c *gc.C) { + res, apiResult := newResourceResult(c, "a-service", "spam") + uuid, err := utils.NewUUID() + c.Assert(err, jc.ErrorIsNil) + expected := uuid.String() + s.response.Resource = apiResult.Resources[0] + s.facade.pendingIDs = []string{expected} + cl := client.NewClient(s.facade, s, s.facade) + + uploadID, err := cl.AddPendingResource("a-service", res[0].Resource, nil) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, + "FacadeCall", + ) + c.Check(uploadID, gc.Equals, expected) +} + +func (s *UploadSuite) TestPendingBadService(c *gc.C) { + res, _ := newResourceResult(c, "a-service", "spam") + s.facade.FacadeCallFn = nil + cl := client.NewClient(s.facade, s, s.facade) + + _, err := cl.AddPendingResource("???", res[0].Resource, nil) + + c.Check(err, gc.ErrorMatches, `.*invalid service.*`) + s.stub.CheckNoCalls(c) +} + +func (s *UploadSuite) TestPendingBadRequest(c *gc.C) { + res, _ := newResource(c, "spam", "", "") + chRes := res.Resource + reader := &stubFile{stub: s.stub} + s.facade.pendingIDs = []string{"some-unique-id"} + cl := client.NewClient(s.facade, s, s.facade) + failure := errors.New("") + s.stub.SetErrors(nil, failure) + + _, err := cl.AddPendingResource("a-service", chRes, reader) + + c.Check(errors.Cause(err), gc.Equals, failure) + s.stub.CheckCallNames(c, "FacadeCall", "Read") +} + +func (s *UploadSuite) TestPendingRequestFailed(c *gc.C) { + res, _ := newResourceResult(c, "a-service", "spam") + reader := &stubFile{stub: s.stub} + reader.returnRead = strings.NewReader("") + s.facade.pendingIDs = []string{"some-unique-id"} + cl := client.NewClient(s.facade, s, s.facade) + failure := errors.New("") + s.stub.SetErrors(nil, nil, nil, nil, failure) + + _, err := cl.AddPendingResource("a-service", res[0].Resource, reader) + + c.Check(errors.Cause(err), gc.Equals, failure) + s.stub.CheckCallNames(c, + "FacadeCall", + "Read", + "Read", + "Seek", + "Do", + ) +} + +type stubFile struct { + stub *testing.Stub + + returnRead io.Reader + returnSeek int64 +} + +func (s *stubFile) Read(buf []byte) (int, error) { + s.stub.AddCall("Read", buf) + if err := s.stub.NextErr(); err != nil { + return 0, errors.Trace(err) + } + + return s.returnRead.Read(buf) +} + +func (s *stubFile) Seek(offset int64, whence int) (int64, error) { + s.stub.AddCall("Seek", offset, whence) + if err := s.stub.NextErr(); err != nil { + return 0, errors.Trace(err) + } + + return s.returnSeek, nil +} + +func (s *stubFile) Close() error { + s.stub.AddCall("Close") + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} === added file 'src/github.com/juju/juju/resource/api/client/package_test.go' --- src/github.com/juju/juju/resource/api/client/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/client/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package client_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/resource/api/data.go' --- src/github.com/juju/juju/resource/api/data.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/data.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,189 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package api + +// TODO(ericsnow) Eliminate the dependence on apiserver if possible. + +import ( + "strings" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/apiserver/params" +) + +// ListResourcesArgs are the arguments for the ListResources endpoint. +type ListResourcesArgs params.Entities + +// NewListResourcesArgs returns the arguments for the ListResources endpoint. +func NewListResourcesArgs(services []string) (ListResourcesArgs, error) { + var args ListResourcesArgs + var errs []error + for _, service := range services { + if !names.IsValidService(service) { + err := errors.Errorf("invalid service %q", service) + errs = append(errs, err) + continue + } + args.Entities = append(args.Entities, params.Entity{ + Tag: names.NewServiceTag(service).String(), + }) + } + if err := resolveErrors(errs); err != nil { + return args, errors.Trace(err) + } + return args, nil +} + +// AddPendingResourcesArgs holds the arguments to the AddPendingResources +// API endpoint. +type AddPendingResourcesArgs struct { + params.Entity + + // Resources is the list of resources to add as pending. + Resources []CharmResource +} + +// NewAddPendingResourcesArgs returns the arguments for the +// AddPendingResources API endpoint. +func NewAddPendingResourcesArgs(serviceID string, resources []charmresource.Resource) (AddPendingResourcesArgs, error) { + var args AddPendingResourcesArgs + + if !names.IsValidService(serviceID) { + return args, errors.Errorf("invalid service %q", serviceID) + } + tag := names.NewServiceTag(serviceID).String() + + var apiResources []CharmResource + for _, res := range resources { + if err := res.Validate(); err != nil { + return args, errors.Trace(err) + } + apiRes := CharmResource2API(res) + apiResources = append(apiResources, apiRes) + } + args.Tag = tag + args.Resources = apiResources + return args, nil +} + +// AddPendingResourcesResult holds the result of the AddPendingResources +// API endpoint. +type AddPendingResourcesResult struct { + params.ErrorResult + + // PendingIDs holds the "pending ID" for each of the requested + // resources. + PendingIDs []string +} + +// ResourcesResults holds the resources that result +// from a bulk API call. +type ResourcesResults struct { + // Results is the list of resource results. + Results []ResourcesResult +} + +// ResourcesResult holds the resources that result from an API call +// for a single service. +type ResourcesResult struct { + params.ErrorResult + + // Resources is the list of resources for the service. + Resources []Resource + + // CharmStoreResources is the list of resources associated with the charm in + // the charmstore. + CharmStoreResources []CharmResource + + // UnitResources contains a list of the resources for each unit in the + // service. + UnitResources []UnitResources +} + +// A UnitResources contains a list of the resources the unit defined by Entity. +type UnitResources struct { + params.Entity + + // Resources is a list of resources for the unit. + Resources []Resource +} + +// UploadResult is the response from an upload request. +type UploadResult struct { + params.ErrorResult + + // Resource describes the resource that was stored in the model. + Resource Resource +} + +// Resource contains info about a Resource. +type Resource struct { + CharmResource + + // ID uniquely identifies a resource-service pair within the model. + // Note that the model ignores pending resources (those with a + // pending ID) except for in a few clearly pending-related places. + ID string + + // PendingID identifies that this resource is pending and + // distinguishes it from other pending resources with the same model + // ID (and from the active resource). + PendingID string + + // ServiceID identifies the service for the resource. + ServiceID string + + // Username is the ID of the user that added the revision + // to the model (whether implicitly or explicitly). + Username string `json:"username"` + + // Timestamp indicates when the resource was added to the model. + Timestamp time.Time `json:"timestamp"` +} + +// CharmResource contains the definition for a resource. +type CharmResource struct { + // Name identifies the resource. + Name string `json:"name"` + + // Type is the name of the resource type. + Type string `json:"type"` + + // Path is where the resource will be stored. + Path string `json:"path"` + + // Description contains user-facing info about the resource. + Description string `json:"description,omitempty"` + + // Origin is where the resource will come from. + Origin string `json:"origin"` + + // Revision is the revision, if applicable. + Revision int `json:"revision"` + + // Fingerprint is the SHA-384 checksum for the resource blob. + Fingerprint []byte `json:"fingerprint"` + + // Size is the size of the resource, in bytes. + Size int64 +} + +func resolveErrors(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + msgs := make([]string, len(errs)) + for i, err := range errs { + msgs[i] = err.Error() + } + return errors.New(strings.Join(msgs, "\n")) + } +} === added file 'src/github.com/juju/juju/resource/api/download.go' --- src/github.com/juju/juju/resource/api/download.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/download.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,50 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package api + +import ( + "fmt" + "net/http" + + "github.com/juju/errors" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource" +) + +// NewHTTPDownloadRequest creates a new HTTP download request +// for the given resource. +// +// Intended for use on the client side. +func NewHTTPDownloadRequest(resourceName string) (*http.Request, error) { + return http.NewRequest("GET", "/resources/"+resourceName, nil) +} + +// ExtractDownloadRequest pulls the download request info out of the +// given HTTP request. +// +// Intended for use on the server side. +func ExtractDownloadRequest(req *http.Request) string { + return req.URL.Query().Get(":resource") +} + +// UpdateDownloadResponse sets the appropriate headers in the response +// to an HTTP download request. +// +// Intended for use on the server side. +func UpdateDownloadResponse(resp http.ResponseWriter, resource resource.Resource) { + resp.Header().Set("Content-Type", ContentTypeRaw) + resp.Header().Set("Content-Length", fmt.Sprint(resource.Size)) + resp.Header().Set("Content-Sha384", resource.Fingerprint.String()) +} + +// ExtractDownloadResponse pulls the download size and checksum +// from the HTTP response. +func ExtractDownloadResponse(resp *http.Response) (int64, charmresource.Fingerprint, error) { + var fp charmresource.Fingerprint + + // TODO(ericsnow) Finish! + // See UpdateDownloadResponse for the data to extract. + return 0, fp, errors.New("not finished") +} === added file 'src/github.com/juju/juju/resource/api/helpers.go' --- src/github.com/juju/juju/resource/api/helpers.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/helpers.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,185 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package api + +// TODO(ericsnow) Eliminate the dependence on apiserver if possible. + +import ( + "github.com/juju/errors" + "github.com/juju/names" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/resource" +) + +// Resource2API converts a resource.Resource into +// a Resource struct. +func Resource2API(res resource.Resource) Resource { + return Resource{ + CharmResource: CharmResource2API(res.Resource), + ID: res.ID, + PendingID: res.PendingID, + ServiceID: res.ServiceID, + Username: res.Username, + Timestamp: res.Timestamp, + } +} + +// APIResult2ServiceResources converts a ResourcesResult into a resource.ServiceResources. +func APIResult2ServiceResources(apiResult ResourcesResult) (resource.ServiceResources, error) { + var result resource.ServiceResources + + if apiResult.Error != nil { + // TODO(ericsnow) Return the resources too? + err := common.RestoreError(apiResult.Error) + return resource.ServiceResources{}, errors.Trace(err) + } + + for _, apiRes := range apiResult.Resources { + res, err := API2Resource(apiRes) + if err != nil { + // This could happen if the server is misbehaving + // or non-conforming. + // TODO(ericsnow) Aggregate errors? + return resource.ServiceResources{}, errors.Annotate(err, "got bad data from server") + } + result.Resources = append(result.Resources, res) + } + + for _, unitRes := range apiResult.UnitResources { + tag, err := names.ParseUnitTag(unitRes.Tag) + if err != nil { + return resource.ServiceResources{}, errors.Annotate(err, "got bad data from server") + } + unitResources := resource.UnitResources{Tag: tag} + for _, apiRes := range unitRes.Resources { + res, err := API2Resource(apiRes) + if err != nil { + return resource.ServiceResources{}, errors.Annotate(err, "got bad data from server") + } + unitResources.Resources = append(unitResources.Resources, res) + } + result.UnitResources = append(result.UnitResources, unitResources) + } + + for _, chRes := range apiResult.CharmStoreResources { + res, err := API2CharmResource(chRes) + if err != nil { + return resource.ServiceResources{}, errors.Annotate(err, "got bad data from server") + } + result.CharmStoreResources = append(result.CharmStoreResources, res) + } + + return result, nil +} + +func ServiceResources2APIResult(svcRes resource.ServiceResources, units []names.UnitTag) ResourcesResult { + var result ResourcesResult + for _, res := range svcRes.Resources { + result.Resources = append(result.Resources, Resource2API(res)) + } + unitResources := make(map[names.UnitTag]resource.UnitResources, len(svcRes.UnitResources)) + for _, unitRes := range svcRes.UnitResources { + unitResources[unitRes.Tag] = unitRes + } + + result.UnitResources = make([]UnitResources, len(units)) + for i, tag := range units { + apiRes := UnitResources{ + Entity: params.Entity{Tag: tag.String()}, + } + for _, res := range unitResources[tag].Resources { + apiRes.Resources = append(apiRes.Resources, Resource2API(res)) + } + result.UnitResources[i] = apiRes + } + + result.CharmStoreResources = make([]CharmResource, len(svcRes.CharmStoreResources)) + for i, chRes := range svcRes.CharmStoreResources { + result.CharmStoreResources[i] = CharmResource2API(chRes) + } + return result +} + +// API2Resource converts an API Resource struct into +// a resource.Resource. +func API2Resource(apiRes Resource) (resource.Resource, error) { + var res resource.Resource + + charmRes, err := API2CharmResource(apiRes.CharmResource) + if err != nil { + return res, errors.Trace(err) + } + + res = resource.Resource{ + Resource: charmRes, + ID: apiRes.ID, + PendingID: apiRes.PendingID, + ServiceID: apiRes.ServiceID, + Username: apiRes.Username, + Timestamp: apiRes.Timestamp, + } + + if err := res.Validate(); err != nil { + return res, errors.Trace(err) + } + + return res, nil +} + +// CharmResource2API converts a charm resource into +// a CharmResource struct. +func CharmResource2API(res charmresource.Resource) CharmResource { + return CharmResource{ + Name: res.Name, + Type: res.Type.String(), + Path: res.Path, + Description: res.Description, + Origin: res.Origin.String(), + Revision: res.Revision, + Fingerprint: res.Fingerprint.Bytes(), + Size: res.Size, + } +} + +// API2CharmResource converts an API CharmResource struct into +// a charm resource. +func API2CharmResource(apiInfo CharmResource) (charmresource.Resource, error) { + var res charmresource.Resource + + rtype, err := charmresource.ParseType(apiInfo.Type) + if err != nil { + return res, errors.Trace(err) + } + + origin, err := charmresource.ParseOrigin(apiInfo.Origin) + if err != nil { + return res, errors.Trace(err) + } + + fp, err := resource.DeserializeFingerprint(apiInfo.Fingerprint) + if err != nil { + return res, errors.Trace(err) + } + + res = charmresource.Resource{ + Meta: charmresource.Meta{ + Name: apiInfo.Name, + Type: rtype, + Path: apiInfo.Path, + Description: apiInfo.Description, + }, + Origin: origin, + Revision: apiInfo.Revision, + Fingerprint: fp, + Size: apiInfo.Size, + } + + if err := res.Validate(); err != nil { + return res, errors.Trace(err) + } + return res, nil +} === added file 'src/github.com/juju/juju/resource/api/helpers_test.go' --- src/github.com/juju/juju/resource/api/helpers_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/helpers_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,579 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package api_test + +import ( + "strings" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/api" + "github.com/juju/juju/resource/resourcetesting" +) + +const fingerprint = "123456789012345678901234567890123456789012345678" + +func newFingerprint(c *gc.C, data string) charmresource.Fingerprint { + fp, err := charmresource.GenerateFingerprint(strings.NewReader(data)) + c.Assert(err, jc.ErrorIsNil) + return fp +} + +type HelpersSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&HelpersSuite{}) + +func (HelpersSuite) TestResource2API(c *gc.C) { + fp, err := charmresource.NewFingerprint([]byte(fingerprint)) + c.Assert(err, jc.ErrorIsNil) + now := time.Now() + res := resource.Resource{ + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "spam", + Type: charmresource.TypeFile, + Path: "spam.tgz", + Description: "you need it", + }, + Origin: charmresource.OriginUpload, + Revision: 1, + Fingerprint: fp, + Size: 10, + }, + ID: "a-service/spam", + PendingID: "some-unique-ID", + ServiceID: "a-service", + Username: "a-user", + Timestamp: now, + } + err = res.Validate() + c.Assert(err, jc.ErrorIsNil) + apiRes := api.Resource2API(res) + + c.Check(apiRes, jc.DeepEquals, api.Resource{ + CharmResource: api.CharmResource{ + Name: "spam", + Type: "file", + Path: "spam.tgz", + Description: "you need it", + Origin: "upload", + Revision: 1, + Fingerprint: []byte(fingerprint), + Size: 10, + }, + ID: "a-service/spam", + PendingID: "some-unique-ID", + ServiceID: "a-service", + Username: "a-user", + Timestamp: now, + }) +} + +func (HelpersSuite) TestAPIResult2ServiceResourcesOkay(c *gc.C) { + fp, err := charmresource.NewFingerprint([]byte(fingerprint)) + c.Assert(err, jc.ErrorIsNil) + now := time.Now() + expected := resource.Resource{ + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "spam", + Type: charmresource.TypeFile, + Path: "spam.tgz", + Description: "you need it", + }, + Origin: charmresource.OriginUpload, + Revision: 1, + Fingerprint: fp, + Size: 10, + }, + ID: "a-service/spam", + PendingID: "some-unique-ID", + ServiceID: "a-service", + Username: "a-user", + Timestamp: now, + } + err = expected.Validate() + c.Assert(err, jc.ErrorIsNil) + + unitExpected := resource.Resource{ + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "unitspam", + Type: charmresource.TypeFile, + Path: "unitspam.tgz", + Description: "you need it", + }, + Origin: charmresource.OriginUpload, + Revision: 1, + Fingerprint: fp, + Size: 10, + }, + ID: "a-service/spam", + PendingID: "some-unique-ID", + ServiceID: "a-service", + Username: "a-user", + Timestamp: now, + } + err = unitExpected.Validate() + c.Assert(err, jc.ErrorIsNil) + + apiRes := api.Resource{ + CharmResource: api.CharmResource{ + Name: "spam", + Type: "file", + Path: "spam.tgz", + Description: "you need it", + Origin: "upload", + Revision: 1, + Fingerprint: []byte(fingerprint), + Size: 10, + }, + ID: "a-service/spam", + PendingID: "some-unique-ID", + ServiceID: "a-service", + Username: "a-user", + Timestamp: now, + } + + unitRes := api.Resource{ + CharmResource: api.CharmResource{ + Name: "unitspam", + Type: "file", + Path: "unitspam.tgz", + Description: "you need it", + Origin: "upload", + Revision: 1, + Fingerprint: []byte(fingerprint), + Size: 10, + }, + ID: "a-service/spam", + PendingID: "some-unique-ID", + ServiceID: "a-service", + Username: "a-user", + Timestamp: now, + } + + fp2, err := charmresource.GenerateFingerprint(strings.NewReader("boo!")) + c.Assert(err, jc.ErrorIsNil) + + chRes := api.CharmResource{ + Name: "unitspam2", + Type: "file", + Path: "unitspam.tgz2", + Description: "you need it2", + Origin: "upload", + Revision: 2, + Fingerprint: fp2.Bytes(), + Size: 11, + } + + chExpected := charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "unitspam2", + Type: charmresource.TypeFile, + Path: "unitspam.tgz2", + Description: "you need it2", + }, + Origin: charmresource.OriginUpload, + Revision: 2, + Fingerprint: fp2, + Size: 11, + } + + resources, err := api.APIResult2ServiceResources(api.ResourcesResult{ + Resources: []api.Resource{ + apiRes, + }, + CharmStoreResources: []api.CharmResource{ + chRes, + }, + UnitResources: []api.UnitResources{ + { + Entity: params.Entity{ + Tag: "unit-foo-0", + }, + Resources: []api.Resource{ + unitRes, + }, + }, + }, + }) + c.Assert(err, jc.ErrorIsNil) + + serviceResource := resource.ServiceResources{ + Resources: []resource.Resource{ + expected, + }, + CharmStoreResources: []charmresource.Resource{ + chExpected, + }, + UnitResources: []resource.UnitResources{ + { + Tag: names.NewUnitTag("foo/0"), + Resources: []resource.Resource{ + unitExpected, + }, + }, + }, + } + + c.Check(resources, jc.DeepEquals, serviceResource) +} + +func (HelpersSuite) TestAPIResult2ServiceResourcesBadUnitTag(c *gc.C) { + fp, err := charmresource.NewFingerprint([]byte(fingerprint)) + c.Assert(err, jc.ErrorIsNil) + now := time.Now() + expected := resource.Resource{ + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "spam", + Type: charmresource.TypeFile, + Path: "spam.tgz", + Description: "you need it", + }, + Origin: charmresource.OriginUpload, + Revision: 1, + Fingerprint: fp, + Size: 10, + }, + ID: "a-service/spam", + PendingID: "some-unique-ID", + ServiceID: "a-service", + Username: "a-user", + Timestamp: now, + } + err = expected.Validate() + c.Assert(err, jc.ErrorIsNil) + + unitExpected := resource.Resource{ + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "unitspam", + Type: charmresource.TypeFile, + Path: "unitspam.tgz", + Description: "you need it", + }, + Origin: charmresource.OriginUpload, + Revision: 1, + Fingerprint: fp, + Size: 10, + }, + ID: "a-service/spam", + PendingID: "some-unique-ID", + ServiceID: "a-service", + Username: "a-user", + Timestamp: now, + } + err = unitExpected.Validate() + c.Assert(err, jc.ErrorIsNil) + + apiRes := api.Resource{ + CharmResource: api.CharmResource{ + Name: "spam", + Type: "file", + Path: "spam.tgz", + Description: "you need it", + Origin: "upload", + Revision: 1, + Fingerprint: []byte(fingerprint), + Size: 10, + }, + ID: "a-service/spam", + PendingID: "some-unique-ID", + ServiceID: "a-service", + Username: "a-user", + Timestamp: now, + } + + unitRes := api.Resource{ + CharmResource: api.CharmResource{ + Name: "unitspam", + Type: "file", + Path: "unitspam.tgz", + Description: "you need it", + Origin: "upload", + Revision: 1, + Fingerprint: []byte(fingerprint), + Size: 10, + }, + ID: "a-service/spam", + PendingID: "some-unique-ID", + ServiceID: "a-service", + Username: "a-user", + Timestamp: now, + } + + _, err = api.APIResult2ServiceResources(api.ResourcesResult{ + Resources: []api.Resource{ + apiRes, + }, + UnitResources: []api.UnitResources{ + { + Entity: params.Entity{ + Tag: "THIS IS NOT A GOOD UNIT TAG", + }, + Resources: []api.Resource{ + unitRes, + }, + }, + }, + }) + c.Assert(err, gc.ErrorMatches, ".*got bad data from server.*") +} + +func (HelpersSuite) TestAPIResult2ServiceResourcesFailure(c *gc.C) { + apiRes := api.Resource{ + CharmResource: api.CharmResource{ + Name: "spam", + Type: "file", + Path: "spam.tgz", + Origin: "upload", + Revision: 1, + Fingerprint: []byte(fingerprint), + Size: 10, + }, + ID: "a-service/spam", + ServiceID: "a-service", + } + failure := errors.New("") + + _, err := api.APIResult2ServiceResources(api.ResourcesResult{ + ErrorResult: params.ErrorResult{ + Error: ¶ms.Error{ + Message: failure.Error(), + }, + }, + Resources: []api.Resource{ + apiRes, + }, + }) + + c.Check(err, gc.ErrorMatches, "") + c.Check(errors.Cause(err), gc.Not(gc.Equals), failure) +} + +func (HelpersSuite) TestAPIResult2ServiceResourcesNotFound(c *gc.C) { + apiRes := api.Resource{ + CharmResource: api.CharmResource{ + Name: "spam", + Type: "file", + Path: "spam.tgz", + Origin: "upload", + Revision: 1, + Fingerprint: []byte(fingerprint), + Size: 10, + }, + ID: "a-service/spam", + ServiceID: "a-service", + } + + _, err := api.APIResult2ServiceResources(api.ResourcesResult{ + ErrorResult: params.ErrorResult{ + Error: ¶ms.Error{ + Message: `service "a-service" not found`, + Code: params.CodeNotFound, + }, + }, + Resources: []api.Resource{ + apiRes, + }, + }) + + c.Check(err, jc.Satisfies, errors.IsNotFound) +} + +func (HelpersSuite) TestAPI2Resource(c *gc.C) { + now := time.Now() + res, err := api.API2Resource(api.Resource{ + CharmResource: api.CharmResource{ + Name: "spam", + Type: "file", + Path: "spam.tgz", + Description: "you need it", + Origin: "upload", + Revision: 1, + Fingerprint: []byte(fingerprint), + Size: 10, + }, + ID: "a-service/spam", + PendingID: "some-unique-ID", + ServiceID: "a-service", + Username: "a-user", + Timestamp: now, + }) + c.Assert(err, jc.ErrorIsNil) + + fp, err := charmresource.NewFingerprint([]byte(fingerprint)) + c.Assert(err, jc.ErrorIsNil) + expected := resource.Resource{ + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "spam", + Type: charmresource.TypeFile, + Path: "spam.tgz", + Description: "you need it", + }, + Origin: charmresource.OriginUpload, + Revision: 1, + Fingerprint: fp, + Size: 10, + }, + ID: "a-service/spam", + PendingID: "some-unique-ID", + ServiceID: "a-service", + Username: "a-user", + Timestamp: now, + } + err = expected.Validate() + c.Assert(err, jc.ErrorIsNil) + + c.Check(res, jc.DeepEquals, expected) +} + +func (HelpersSuite) TestCharmResource2API(c *gc.C) { + fp, err := charmresource.NewFingerprint([]byte(fingerprint)) + c.Assert(err, jc.ErrorIsNil) + res := charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "spam", + Type: charmresource.TypeFile, + Path: "spam.tgz", + Description: "you need it", + }, + Origin: charmresource.OriginUpload, + Revision: 1, + Fingerprint: fp, + Size: 10, + } + err = res.Validate() + c.Assert(err, jc.ErrorIsNil) + apiInfo := api.CharmResource2API(res) + + c.Check(apiInfo, jc.DeepEquals, api.CharmResource{ + Name: "spam", + Type: "file", + Path: "spam.tgz", + Description: "you need it", + Origin: "upload", + Revision: 1, + Fingerprint: []byte(fingerprint), + Size: 10, + }) +} + +func (HelpersSuite) TestAPI2CharmResource(c *gc.C) { + res, err := api.API2CharmResource(api.CharmResource{ + Name: "spam", + Type: "file", + Path: "spam.tgz", + Description: "you need it", + Origin: "upload", + Revision: 1, + Fingerprint: []byte(fingerprint), + Size: 10, + }) + c.Assert(err, jc.ErrorIsNil) + + fp, err := charmresource.NewFingerprint([]byte(fingerprint)) + c.Assert(err, jc.ErrorIsNil) + expected := charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "spam", + Type: charmresource.TypeFile, + Path: "spam.tgz", + Description: "you need it", + }, + Origin: charmresource.OriginUpload, + Revision: 1, + Fingerprint: fp, + Size: 10, + } + err = expected.Validate() + c.Assert(err, jc.ErrorIsNil) + + c.Check(res, jc.DeepEquals, expected) +} + +func (HelpersSuite) TestServiceResource2API(c *gc.C) { + res1 := resourcetesting.NewResource(c, nil, "res1", "a-service", "data").Resource + res2 := resourcetesting.NewResource(c, nil, "res2", "a-service", "data2").Resource + + tag0 := names.NewUnitTag("a-service/0") + tag1 := names.NewUnitTag("a-service/1") + + chres1 := res1.Resource + chres2 := res2.Resource + chres1.Revision++ + chres2.Revision++ + + svcRes := resource.ServiceResources{ + Resources: []resource.Resource{ + res1, + res2, + }, + UnitResources: []resource.UnitResources{ + { + Tag: tag0, + Resources: []resource.Resource{ + res1, + res2, + }, + }, + // note: nothing for tag1 + }, + CharmStoreResources: []charmresource.Resource{ + chres1, + chres2, + }, + } + + result := api.ServiceResources2APIResult(svcRes, []names.UnitTag{tag0, tag1}) + + apiRes1 := api.Resource2API(res1) + apiRes2 := api.Resource2API(res2) + + apiChRes1 := api.CharmResource2API(chres1) + apiChRes2 := api.CharmResource2API(chres2) + + c.Check(result, jc.DeepEquals, api.ResourcesResult{ + Resources: []api.Resource{ + apiRes1, + apiRes2, + }, + UnitResources: []api.UnitResources{ + { + Entity: params.Entity{ + Tag: "unit-a-service-0", + }, + Resources: []api.Resource{ + apiRes1, + apiRes2, + }, + }, + { + // we should have a listing for every unit, even if they + // have no resources. + Entity: params.Entity{ + Tag: "unit-a-service-1", + }, + }, + }, + CharmStoreResources: []api.CharmResource{ + apiChRes1, + apiChRes2, + }, + }) + +} === added file 'src/github.com/juju/juju/resource/api/http.go' --- src/github.com/juju/juju/resource/api/http.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/http.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,78 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package api + +// TODO(ericsnow) Eliminate the apiserver dependencies, if possible. + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/juju/errors" + "github.com/juju/loggo" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" +) + +var logger = loggo.GetLogger("juju.resource.api") + +const ( + // HTTPEndpointPath is the URL path, with substitutions, for + // a resource request. + HTTPEndpointPath = "/services/%s/resources/%s" +) + +const ( + // ContentTypeRaw is the HTTP content-type value used for raw, unformattedcontent. + ContentTypeRaw = "application/octet-stream" + + // ContentTypeJSON is the HTTP content-type value used for JSON content. + ContentTypeJSON = "application/json" +) + +// NewEndpointPath returns the API URL path for the identified resource. +func NewEndpointPath(service, name string) string { + return fmt.Sprintf(HTTPEndpointPath, service, name) +} + +// ExtractEndpointDetails pulls the endpoint wildcard values from +// the provided URL. +func ExtractEndpointDetails(url *url.URL) (service, name string) { + service = url.Query().Get(":service") + name = url.Query().Get(":resource") + return service, name +} + +// TODO(ericsnow) These are copied from apiserver/httpcontext.go... + +// SendHTTPError sends a JSON-encoded error response +// for errors encountered during processing. +func SendHTTPError(w http.ResponseWriter, err error) { + err1, statusCode := common.ServerErrorAndStatus(err) + logger.Debugf("sending error: %d %v", statusCode, err1) + SendHTTPStatusAndJSON(w, statusCode, ¶ms.ErrorResult{ + Error: err1, + }) +} + +// SendStatusAndJSON sends an HTTP status code and +// a JSON-encoded response to a client. +func SendHTTPStatusAndJSON(w http.ResponseWriter, statusCode int, response interface{}) { + body, err := json.Marshal(response) + if err != nil { + http.Error(w, errors.Annotatef(err, "cannot marshal JSON result %#v", response).Error(), 504) + return + } + + if statusCode == http.StatusUnauthorized { + w.Header().Set("WWW-Authenticate", `Basic realm="juju"`) + } + w.Header().Set("Content-Type", params.ContentTypeJSON) + w.Header().Set("Content-Length", fmt.Sprint(len(body))) + w.WriteHeader(statusCode) + w.Write(body) +} === added file 'src/github.com/juju/juju/resource/api/package_test.go' --- src/github.com/juju/juju/resource/api/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package api_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} === added directory 'src/github.com/juju/juju/resource/api/private' === added directory 'src/github.com/juju/juju/resource/api/private/client' === added file 'src/github.com/juju/juju/resource/api/private/client/client.go' --- src/github.com/juju/juju/resource/api/private/client/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/private/client/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,133 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package client + +import ( + "io" + "net/http" + "path" + + "github.com/juju/errors" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/api" + "github.com/juju/juju/resource/api/private" +) + +// FacadeCaller exposes the raw API caller functionality needed here. +type FacadeCaller interface { + // FacadeCall makes an API request. + FacadeCall(request string, params, response interface{}) error +} + +// HTTPClient exposes the raw API HTTP caller functionality needed here. +type HTTPClient interface { + // Do sends the HTTP request/body and unpacks the response into + // the provided "resp". If that is a **http.Response then it is + // unpacked as-is. Otherwise it is unmarshaled from JSON. + Do(req *http.Request, body io.ReadSeeker, resp interface{}) error +} + +// UnitHTTPClient exposes the raw API HTTP caller functionality needed here. +type UnitHTTPClient interface { + HTTPClient + + // Unit Returns the name of the unit for this client. + Unit() string +} + +// NewUnitFacadeClient creates a new API client for the resources +// portion of the uniter facade. +func NewUnitFacadeClient(facadeCaller FacadeCaller, httpClient UnitHTTPClient) *UnitFacadeClient { + return &UnitFacadeClient{ + FacadeCaller: facadeCaller, + HTTPClient: httpClient, + } +} + +// UnitFacadeClient is an API client for the resources portion +// of the uniter facade. +type UnitFacadeClient struct { + FacadeCaller + HTTPClient +} + +// GetResource opens the resource (metadata/blob), if it exists, via +// the HTTP API and returns it. If it does not exist or hasn't been +// uploaded yet then errors.NotFound is returned. +func (c *UnitFacadeClient) GetResource(resourceName string) (resource.Resource, io.ReadCloser, error) { + var response *http.Response + req, err := api.NewHTTPDownloadRequest(resourceName) + if err != nil { + return resource.Resource{}, nil, errors.Annotate(err, "failed to build API request") + } + if err := c.Do(req, nil, &response); err != nil { + return resource.Resource{}, nil, errors.Annotate(err, "HTTP request failed") + } + + // HACK(katco): Combine this into one request? + resourceInfo, err := c.getResourceInfo(resourceName) + if err != nil { + return resource.Resource{}, nil, errors.Trace(err) + } + + // TODO(katco): Check headers against resource info + // TODO(katco): Check in on all the response headers + return resourceInfo, response.Body, nil +} + +func (c *UnitFacadeClient) getResourceInfo(resourceName string) (resource.Resource, error) { + var response private.ResourcesResult + + args := private.ListResourcesArgs{ + ResourceNames: []string{resourceName}, + } + if err := c.FacadeCall("GetResourceInfo", &args, &response); err != nil { + return resource.Resource{}, errors.Annotate(err, "could not get resource info") + } + if response.Error != nil { + err := common.RestoreError(response.Error) + return resource.Resource{}, errors.Annotate(err, "request failed on server") + } + + if len(response.Resources) != 1 { + return resource.Resource{}, errors.New("got bad response from API server") + } + if response.Resources[0].Error != nil { + err := common.RestoreError(response.Error) + return resource.Resource{}, errors.Annotate(err, "request failed for resource") + } + res, err := api.API2Resource(response.Resources[0].Resource) + if err != nil { + return resource.Resource{}, errors.Annotate(err, "got bad data from API server") + } + return res, nil +} + +type unitHTTPClient struct { + HTTPClient + unitName string +} + +// NewUnitHTTPClient wraps an HTTP client (a la httprequest.Client) +// with unit information. This allows rewriting of the URL to match +// the relevant unit. +func NewUnitHTTPClient(client HTTPClient, unitName string) UnitHTTPClient { + return &unitHTTPClient{ + HTTPClient: client, + unitName: unitName, + } +} + +// Unit returns the name of the unit. +func (uhc unitHTTPClient) Unit() string { + return uhc.unitName +} + +// Do implements httprequest.Doer. +func (uhc *unitHTTPClient) Do(req *http.Request, body io.ReadSeeker, response interface{}) error { + req.URL.Path = path.Join("/units", uhc.unitName, req.URL.Path) + return uhc.HTTPClient.Do(req, body, response) +} === added file 'src/github.com/juju/juju/resource/api/private/client/client_test.go' --- src/github.com/juju/juju/resource/api/private/client/client_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/private/client/client_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,124 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package client_test + +import ( + "io" + "net/http" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/filetesting" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/api" + "github.com/juju/juju/resource/api/private" + "github.com/juju/juju/resource/api/private/client" + "github.com/juju/juju/resource/resourcetesting" +) + +var _ = gc.Suite(&UnitFacadeClientSuite{}) + +type UnitFacadeClientSuite struct { + testing.IsolationSuite + + stub *testing.Stub + api *stubAPI +} + +func (s *UnitFacadeClientSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = &testing.Stub{} + s.api = &stubAPI{Stub: s.stub} +} + +func (s *UnitFacadeClientSuite) TestNewUnitFacadeClient(c *gc.C) { + caller := &stubAPI{Stub: s.stub} + doer := &stubAPI{Stub: s.stub} + + cl := client.NewUnitFacadeClient(caller, doer) + + s.stub.CheckNoCalls(c) + c.Check(cl.FacadeCaller, gc.Equals, caller) + c.Check(cl.HTTPClient, gc.Equals, doer) +} + +func (s *UnitFacadeClientSuite) TestGetResource(c *gc.C) { + opened := resourcetesting.NewResource(c, s.stub, "spam", "a-service", "some data") + s.api.setResource(opened.Resource, opened) + cl := client.NewUnitFacadeClient(s.api, s.api) + + info, content, err := cl.GetResource("spam") + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "Do", "FacadeCall") + c.Check(info, jc.DeepEquals, opened.Resource) + c.Check(content, jc.DeepEquals, opened) +} + +func (s *UnitFacadeClientSuite) TestUnitDoer(c *gc.C) { + req, err := http.NewRequest("GET", "/resources/eggs", nil) + c.Assert(err, jc.ErrorIsNil) + body := filetesting.NewStubFile(s.stub, nil) + var resp *http.Response + doer := client.NewUnitHTTPClient(s.api, "spam/1") + + err = doer.Do(req, body, &resp) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "Do") + //s.stub.CheckCall(c, 0, "Do", expected, body, resp) + c.Check(req.URL.Path, gc.Equals, "/units/spam/1/resources/eggs") +} + +type stubAPI struct { + *testing.Stub + + ReturnFacadeCall private.ResourcesResult + ReturnUnit string + ReturnDo *http.Response +} + +func (s *stubAPI) setResource(info resource.Resource, reader io.ReadCloser) { + s.ReturnFacadeCall = private.ResourcesResult{ + Resources: []private.ResourceResult{{ + Resource: api.Resource2API(info), + }}, + } + s.ReturnDo = &http.Response{ + Body: reader, + } +} + +func (s *stubAPI) FacadeCall(request string, params, response interface{}) error { + s.AddCall("FacadeCall", params, response) + if err := s.NextErr(); err != nil { + return errors.Trace(err) + } + + resp := response.(*private.ResourcesResult) + *resp = s.ReturnFacadeCall + return nil +} + +func (s *stubAPI) Unit() string { + s.AddCall("Unit") + s.NextErr() // Pop one off. + + return s.ReturnUnit +} + +func (s *stubAPI) Do(req *http.Request, body io.ReadSeeker, response interface{}) error { + s.AddCall("Do", req, body, response) + if err := s.NextErr(); err != nil { + return errors.Trace(err) + } + + resp := response.(**http.Response) + *resp = s.ReturnDo + return nil +} === added file 'src/github.com/juju/juju/resource/api/private/client/package_test.go' --- src/github.com/juju/juju/resource/api/private/client/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/private/client/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package client_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/resource/api/private/data.go' --- src/github.com/juju/juju/resource/api/private/data.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/private/data.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,38 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package private + +// TODO(ericsnow) Eliminate the apiserver dependencies, if possible. + +import ( + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/resource/api" +) + +// ListResourcesArgs holds the arguments for an API request to list +// resources for a service. The service is implicit to the uniter- +// specific HTTP connection. +type ListResourcesArgs struct { + // ResourceNames holds the names of the service's resources for + // which information should be provided. + ResourceNames []string +} + +// ResourcesResult holds the resource info for a list of requested +// resources. +type ResourcesResult struct { + params.ErrorResult + + // Resources is the list of results for the requested resources, + // in the same order as requested. + Resources []ResourceResult +} + +// ResourceResult is the result for a single requested resource. +type ResourceResult struct { + params.ErrorResult + + // Resource is the info for the requested resource. + Resource api.Resource +} === added directory 'src/github.com/juju/juju/resource/api/private/server' === added file 'src/github.com/juju/juju/resource/api/private/server/handler.go' --- src/github.com/juju/juju/resource/api/private/server/handler.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/private/server/handler.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,134 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package server + +// TODO(ericsnow) Eliminate the apiserver dependencies, if possible. + +import ( + "io" + "net/http" + + "github.com/juju/errors" + + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/api" +) + +// TODO(ericsnow) Define the HTTPHandlerConstraints here? Perhaps +// even the HTTPHandlerSpec? + +// LegacyHTTPHandler is the HTTP handler for the resources +// endpoint. We use it rather having a separate handler for each HTTP +// method since registered API handlers must handle *all* HTTP methods +// currently. +type LegacyHTTPHandler struct { + LegacyHTTPHandlerDeps +} + +// NewLegacyHTTPHandler creates a new http.Handler for the resources endpoint. +func NewLegacyHTTPHandler(deps LegacyHTTPHandlerDeps) *LegacyHTTPHandler { + return &LegacyHTTPHandler{ + LegacyHTTPHandlerDeps: deps, + } +} + +// ServeHTTP implements http.Handler. +func (h *LegacyHTTPHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + opener, err := h.NewResourceOpener(req) + if err != nil { + h.SendHTTPError(resp, err) + return + } + + // We do this *after* authorization, etc. (in h.Extract...) in order + // to prioritize errors that may originate there. + switch req.Method { + case "GET": + logger.Infof("handling resource download request") + + opened, err := h.HandleDownload(opener, req) + if err != nil { + logger.Errorf("cannot fetch resource reader: %v", err) + h.SendHTTPError(resp, err) + return + } + defer opened.Close() + + h.UpdateDownloadResponse(resp, opened.Resource) + + resp.WriteHeader(http.StatusOK) + if err := h.Copy(resp, opened); err != nil { + // We cannot use api.SendHTTPError here, so we log the error + // and move on. + logger.Errorf("unable to complete stream for resource: %v", err) + return + } + + logger.Infof("resource download request successful") + default: + h.SendHTTPError(resp, errors.MethodNotAllowedf("unsupported method: %q", req.Method)) + } +} + +// LegacyHTTPHandlerDeps exposes the external dependencies +// of LegacyHTTPHandler. +type LegacyHTTPHandlerDeps interface { + baseLegacyHTTPHandlerDeps + ExtraDeps +} + +//ExtraDeps exposes the non-superficial dependencies of LegacyHTTPHandler. +type ExtraDeps interface { + // NewResourceOpener returns a new opener for the request. + NewResourceOpener(*http.Request) (resource.Opener, error) +} + +type baseLegacyHTTPHandlerDeps interface { + // UpdateDownloadResponse updates the HTTP response with the info + // from the resource. + UpdateDownloadResponse(http.ResponseWriter, resource.Resource) + + // SendHTTPError wraps the error in an API error and writes it to the response. + SendHTTPError(http.ResponseWriter, error) + + // HandleDownload provides the download functionality. + HandleDownload(resource.Opener, *http.Request) (resource.Opened, error) + + // Copy implements the functionality of io.Copy(). + Copy(io.Writer, io.Reader) error +} + +// NewLegacyHTTPHandlerDeps returns an implementation of LegacyHTTPHandlerDeps. +func NewLegacyHTTPHandlerDeps(extraDeps ExtraDeps) LegacyHTTPHandlerDeps { + return &legacyHTTPHandlerDeps{ + ExtraDeps: extraDeps, + } +} + +// legacyHTTPHandlerDeps is a partial implementation of LegacyHandlerDeps. +type legacyHTTPHandlerDeps struct { + ExtraDeps +} + +// SendHTTPError implements LegacyHTTPHandlerDeps. +func (deps legacyHTTPHandlerDeps) SendHTTPError(resp http.ResponseWriter, err error) { + api.SendHTTPError(resp, err) +} + +// UpdateDownloadResponse implements LegacyHTTPHandlerDeps. +func (deps legacyHTTPHandlerDeps) UpdateDownloadResponse(resp http.ResponseWriter, info resource.Resource) { + api.UpdateDownloadResponse(resp, info) +} + +// HandleDownload implements LegacyHTTPHandlerDeps. +func (deps legacyHTTPHandlerDeps) HandleDownload(opener resource.Opener, req *http.Request) (resource.Opened, error) { + name := api.ExtractDownloadRequest(req) + return opener.OpenResource(name) +} + +// Copy implements LegacyHTTPHandlerDeps. +func (deps legacyHTTPHandlerDeps) Copy(w io.Writer, r io.Reader) error { + _, err := io.Copy(w, r) + return err +} === added file 'src/github.com/juju/juju/resource/api/private/server/handler_test.go' --- src/github.com/juju/juju/resource/api/private/server/handler_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/private/server/handler_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,302 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package server_test + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/filetesting" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/api" + "github.com/juju/juju/resource/api/private/server" + "github.com/juju/juju/resource/resourcetesting" +) + +var _ = gc.Suite(&LegacyHTTPHandlerSuite{}) + +type LegacyHTTPHandlerSuite struct { + testing.IsolationSuite + + stub *testing.Stub + opener *stubResourceOpener + deps *stubLegacyHTTPHandlerDeps + resp *stubResponseWriter +} + +func (s *LegacyHTTPHandlerSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = &testing.Stub{} + s.opener = &stubResourceOpener{Stub: s.stub} + s.deps = &stubLegacyHTTPHandlerDeps{Stub: s.stub} + s.resp = newStubResponseWriter(s.stub) +} + +func (s *LegacyHTTPHandlerSuite) TestIntegration(c *gc.C) { + opened := resourcetesting.NewResource(c, s.stub, "spam", "a-service", "some data") + s.opener.ReturnOpenResource = opened + s.deps.ReturnNewResourceOpener = s.opener + deps := server.NewLegacyHTTPHandlerDeps(s.deps) + h := server.NewLegacyHTTPHandler(deps) + req, err := api.NewHTTPDownloadRequest("spam") + c.Assert(err, jc.ErrorIsNil) + req.URL, err = url.ParseRequestURI("https://api:17018/units/eggs/1/resources/spam?:resource=spam") + c.Assert(err, jc.ErrorIsNil) + resp := &fakeResponseWriter{ + stubResponseWriter: s.resp, + } + + c.Logf("%#v", opened.ReadCloser) + h.ServeHTTP(resp, req) + + resp.checkWritten(c, "some data", http.Header{ + "Content-Type": []string{api.ContentTypeRaw}, + "Content-Length": []string{"9"}, // len("some data") + "Content-Sha384": []string{opened.Fingerprint.String()}, + }) +} + +func (s *LegacyHTTPHandlerSuite) TestNewLegacyHTTPHandler(c *gc.C) { + h := server.NewLegacyHTTPHandler(s.deps) + + s.stub.CheckNoCalls(c) + c.Check(h, gc.NotNil) +} + +func (s *LegacyHTTPHandlerSuite) TestServeHTTPDownloadOkay(c *gc.C) { + s.deps.ReturnNewResourceOpener = s.opener + opened := resourcetesting.NewResource(c, s.stub, "spam", "a-service", "some data") + s.deps.ReturnHandleDownload = opened + h := &server.LegacyHTTPHandler{ + LegacyHTTPHandlerDeps: s.deps, + } + req, err := http.NewRequest("GET", "...", nil) + c.Assert(err, jc.ErrorIsNil) + + h.ServeHTTP(s.resp, req) + + s.stub.CheckCallNames(c, + "NewResourceOpener", + "HandleDownload", + "UpdateDownloadResponse", + "WriteHeader", + "Copy", + "Close", + ) + s.stub.CheckCall(c, 0, "NewResourceOpener", req) + s.stub.CheckCall(c, 1, "HandleDownload", s.opener, req) + s.stub.CheckCall(c, 2, "UpdateDownloadResponse", s.resp, opened.Resource) + s.stub.CheckCall(c, 3, "WriteHeader", http.StatusOK) + s.stub.CheckCall(c, 4, "Copy", s.resp, opened) +} + +func (s *LegacyHTTPHandlerSuite) TestServeHTTPDownloadHandlerFailed(c *gc.C) { + h := &server.LegacyHTTPHandler{ + LegacyHTTPHandlerDeps: s.deps, + } + failure := errors.New("") + s.stub.SetErrors(nil, failure) + req, err := http.NewRequest("GET", "...", nil) + c.Assert(err, jc.ErrorIsNil) + + h.ServeHTTP(s.resp, req) + + s.stub.CheckCallNames(c, + "NewResourceOpener", + "HandleDownload", + "SendHTTPError", + ) + s.stub.CheckCall(c, 2, "SendHTTPError", s.resp, failure) +} + +func (s *LegacyHTTPHandlerSuite) TestServeHTTPDownloadCopyFailed(c *gc.C) { + s.deps.ReturnHandleDownload = resourcetesting.NewResource(c, s.stub, "spam", "a-service", "some data") + h := &server.LegacyHTTPHandler{ + LegacyHTTPHandlerDeps: s.deps, + } + failure := errors.New("") + s.stub.SetErrors(nil, nil, failure) + req, err := http.NewRequest("GET", "...", nil) + c.Assert(err, jc.ErrorIsNil) + + h.ServeHTTP(s.resp, req) + + s.stub.CheckCallNames(c, + "NewResourceOpener", + "HandleDownload", + "UpdateDownloadResponse", + "WriteHeader", + "Copy", + "Close", + ) +} + +func (s *LegacyHTTPHandlerSuite) TestServeHTTPConnectFailed(c *gc.C) { + h := &server.LegacyHTTPHandler{ + LegacyHTTPHandlerDeps: s.deps, + } + failure := errors.New("") + s.stub.SetErrors(failure) + req, err := http.NewRequest("GET", "...", nil) + c.Assert(err, jc.ErrorIsNil) + + h.ServeHTTP(s.resp, req) + + s.stub.CheckCallNames(c, + "NewResourceOpener", + "SendHTTPError", + ) + s.stub.CheckCall(c, 1, "SendHTTPError", s.resp, failure) +} + +func (s *LegacyHTTPHandlerSuite) TestServeHTTPUnsupportedMethod(c *gc.C) { + h := &server.LegacyHTTPHandler{ + LegacyHTTPHandlerDeps: s.deps, + } + req, err := http.NewRequest("HEAD", "...", nil) + c.Assert(err, jc.ErrorIsNil) + + h.ServeHTTP(s.resp, req) + + s.stub.CheckCallNames(c, + "NewResourceOpener", + "SendHTTPError", + ) +} + +type stubLegacyHTTPHandlerDeps struct { + *testing.Stub + + ReturnNewResourceOpener resource.Opener + ReturnHandleDownload resource.Opened +} + +func (s *stubLegacyHTTPHandlerDeps) NewResourceOpener(req *http.Request) (resource.Opener, error) { + s.AddCall("NewResourceOpener", req) + if err := s.NextErr(); err != nil { + return nil, err + } + + return s.ReturnNewResourceOpener, nil +} + +func (s *stubLegacyHTTPHandlerDeps) SendHTTPError(resp http.ResponseWriter, err error) { + s.AddCall("SendHTTPError", resp, err) + s.NextErr() // Pop one off. +} + +func (s *stubLegacyHTTPHandlerDeps) UpdateDownloadResponse(resp http.ResponseWriter, info resource.Resource) { + s.AddCall("UpdateDownloadResponse", resp, info) + s.NextErr() // Pop one off. +} + +func (s *stubLegacyHTTPHandlerDeps) HandleDownload(opener resource.Opener, req *http.Request) (resource.Opened, error) { + s.AddCall("HandleDownload", opener, req) + if err := s.NextErr(); err != nil { + return resource.Opened{}, err + } + + return s.ReturnHandleDownload, nil +} + +type stubResourceOpener struct { + *testing.Stub + + ReturnOpenResource resource.Opened +} + +func (s *stubResourceOpener) OpenResource(name string) (resource.Opened, error) { + s.AddCall("OpenResource", name) + if err := s.NextErr(); err != nil { + return resource.Opened{}, err + } + + return s.ReturnOpenResource, nil +} + +func (s *stubLegacyHTTPHandlerDeps) Copy(w io.Writer, r io.Reader) error { + s.AddCall("Copy", w, r) + if err := s.NextErr(); err != nil { + return err + } + + return nil +} + +type stubResponseWriter struct { + *testing.Stub + io.Writer + buf *bytes.Buffer + + ReturnHeader http.Header +} + +func newStubResponseWriter(stub *testing.Stub) *stubResponseWriter { + writer, buf := filetesting.NewStubWriter(stub) + return &stubResponseWriter{ + Stub: stub, + Writer: writer, + buf: buf, + + ReturnHeader: make(http.Header), + } +} + +func (s *stubResponseWriter) Header() http.Header { + s.AddCall("Header") + s.NextErr() // Pop one off. + + return s.ReturnHeader +} + +func (s *stubResponseWriter) WriteHeader(code int) { + s.AddCall("WriteHeader", code) + s.NextErr() // Pop one off. +} + +type fakeResponseWriter struct { + *stubResponseWriter + + writeCalled bool + writtenHeader http.Header +} + +func (f *fakeResponseWriter) checkWritten(c *gc.C, body string, header http.Header) { + if !c.Check(f.writeCalled, jc.IsTrue) { + return + } + c.Check(f.buf.String(), gc.Equals, body) + c.Check(f.writtenHeader, jc.DeepEquals, header) + c.Check(f.writtenHeader.Get("Content-Length"), gc.Equals, fmt.Sprint(len(body))) +} + +func (f *fakeResponseWriter) WriteHeader(code int) { + f.stubResponseWriter.WriteHeader(code) + + // See http.Header.clone() in the stdlib (net/http/header.go). + header := make(http.Header) + for k, vv := range f.ReturnHeader { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + header[k] = vv2 + } + f.writtenHeader = header +} + +func (f *fakeResponseWriter) Write(data []byte) (int, error) { + f.writeCalled = true + if f.writtenHeader == nil { + f.WriteHeader(http.StatusOK) + } + return f.stubResponseWriter.Write(data) +} === added file 'src/github.com/juju/juju/resource/api/private/server/package_test.go' --- src/github.com/juju/juju/resource/api/private/server/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/private/server/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package server_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/resource/api/private/server/server.go' --- src/github.com/juju/juju/resource/api/private/server/server.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/private/server/server.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// This package holds the hook context API server implementation. +package server + +import ( + "github.com/juju/loggo" +) + +var logger = loggo.GetLogger("juju.resource.api.private.server") === added file 'src/github.com/juju/juju/resource/api/private/server/stub_test.go' --- src/github.com/juju/juju/resource/api/private/server/stub_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/private/server/stub_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,48 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package server_test + +import ( + "io" + + "github.com/juju/errors" + "github.com/juju/testing" + + "github.com/juju/juju/resource" +) + +type stubUnitDataStore struct { + *testing.Stub + + ReturnOpenResource resource.Opened + ReturnGetResource resource.Resource + ReturnListResources resource.ServiceResources +} + +func (s *stubUnitDataStore) OpenResource(name string) (resource.Resource, io.ReadCloser, error) { + s.AddCall("OpenResource", name) + if err := s.NextErr(); err != nil { + return resource.Resource{}, nil, errors.Trace(err) + } + + return s.ReturnOpenResource.Resource, s.ReturnOpenResource.ReadCloser, nil +} + +func (s *stubUnitDataStore) GetResource(name string) (resource.Resource, error) { + s.AddCall("GetResource", name) + if err := s.NextErr(); err != nil { + return resource.Resource{}, errors.Trace(err) + } + + return s.ReturnGetResource, nil +} + +func (s *stubUnitDataStore) ListResources() (resource.ServiceResources, error) { + s.AddCall("ListResources") + if err := s.NextErr(); err != nil { + return resource.ServiceResources{}, errors.Trace(err) + } + + return s.ReturnListResources, nil +} === added file 'src/github.com/juju/juju/resource/api/private/server/unitfacade.go' --- src/github.com/juju/juju/resource/api/private/server/unitfacade.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/private/server/unitfacade.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,71 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package server + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/api" + "github.com/juju/juju/resource/api/private" +) + +// FacadeVersion is the version of the current API facade. +// (We start at 1 to distinguish from the default value.) +const FacadeVersion = 1 + +// UnitDataStore exposes the data storage functionality needed here. +// All functionality is tied to the unit's service. +type UnitDataStore interface { + // ListResources lists all the resources for the service. + ListResources() (resource.ServiceResources, error) +} + +// NewUnitFacade returns the resources portion of the uniter's API facade. +func NewUnitFacade(dataStore UnitDataStore) *UnitFacade { + return &UnitFacade{ + DataStore: dataStore, + } +} + +// UnitFacade is the resources portion of the uniter's API facade. +type UnitFacade struct { + //DataStore is the data store used by the facade. + DataStore UnitDataStore +} + +// GetResourceInfo returns the resource info for each of the given +// resource names (for the implicit service). If any one is missing then +// the corresponding result is set with errors.NotFound. +func (uf UnitFacade) GetResourceInfo(args private.ListResourcesArgs) (private.ResourcesResult, error) { + var r private.ResourcesResult + r.Resources = make([]private.ResourceResult, len(args.ResourceNames)) + + resources, err := uf.DataStore.ListResources() + if err != nil { + r.Error = common.ServerError(err) + return r, nil + } + + for i, name := range args.ResourceNames { + res, ok := lookUpResource(name, resources.Resources) + if !ok { + r.Resources[i].Error = common.ServerError(errors.NotFoundf("resource %q", name)) + continue + } + + r.Resources[i].Resource = api.Resource2API(res) + } + return r, nil +} + +func lookUpResource(name string, resources []resource.Resource) (resource.Resource, bool) { + for _, res := range resources { + if name == res.Name { + return res, true + } + } + return resource.Resource{}, false +} === added file 'src/github.com/juju/juju/resource/api/private/server/unitfacade_test.go' --- src/github.com/juju/juju/resource/api/private/server/unitfacade_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/private/server/unitfacade_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,110 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package server_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/api" + "github.com/juju/juju/resource/api/private" + "github.com/juju/juju/resource/api/private/server" + "github.com/juju/juju/resource/resourcetesting" +) + +var _ = gc.Suite(&UnitFacadeSuite{}) + +type UnitFacadeSuite struct { + testing.IsolationSuite + + stub *testing.Stub +} + +func (s *UnitFacadeSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = &testing.Stub{} +} + +func (s *UnitFacadeSuite) TestNewUnitFacade(c *gc.C) { + expected := &stubUnitDataStore{Stub: s.stub} + + uf := server.NewUnitFacade(expected) + + s.stub.CheckNoCalls(c) + c.Check(uf.DataStore, gc.Equals, expected) +} + +func (s *UnitFacadeSuite) TestGetResourceInfoOkay(c *gc.C) { + opened1 := resourcetesting.NewResource(c, s.stub, "spam", "a-service", "some data") + res1 := opened1.Resource + opened2 := resourcetesting.NewResource(c, s.stub, "eggs", "a-service", "other data") + res2 := opened2.Resource + store := &stubUnitDataStore{Stub: s.stub} + store.ReturnListResources = resource.ServiceResources{ + Resources: []resource.Resource{res1, res2}, + } + uf := server.UnitFacade{DataStore: store} + + results, err := uf.GetResourceInfo(private.ListResourcesArgs{ + ResourceNames: []string{"spam", "eggs"}, + }) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "ListResources") + c.Check(results, jc.DeepEquals, private.ResourcesResult{ + Resources: []private.ResourceResult{{ + Resource: api.Resource2API(res1), + }, { + Resource: api.Resource2API(res2), + }}, + }) +} + +func (s *UnitFacadeSuite) TestGetResourceInfoEmpty(c *gc.C) { + opened := resourcetesting.NewResource(c, s.stub, "spam", "a-service", "some data") + store := &stubUnitDataStore{Stub: s.stub} + store.ReturnListResources = resource.ServiceResources{ + Resources: []resource.Resource{opened.Resource}, + } + uf := server.UnitFacade{DataStore: store} + + results, err := uf.GetResourceInfo(private.ListResourcesArgs{ + ResourceNames: []string{}, + }) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "ListResources") + c.Check(results, jc.DeepEquals, private.ResourcesResult{ + Resources: []private.ResourceResult{}, + }) +} + +func (s *UnitFacadeSuite) TestGetResourceInfoNotFound(c *gc.C) { + opened := resourcetesting.NewResource(c, s.stub, "spam", "a-service", "some data") + store := &stubUnitDataStore{Stub: s.stub} + store.ReturnListResources = resource.ServiceResources{ + Resources: []resource.Resource{opened.Resource}, + } + uf := server.UnitFacade{DataStore: store} + + results, err := uf.GetResourceInfo(private.ListResourcesArgs{ + ResourceNames: []string{"eggs"}, + }) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "ListResources") + c.Check(results, jc.DeepEquals, private.ResourcesResult{ + Resources: []private.ResourceResult{{ + ErrorResult: params.ErrorResult{ + Error: common.ServerError(errors.NotFoundf(`resource "eggs"`)), + }, + }}, + }) +} === added directory 'src/github.com/juju/juju/resource/api/server' === added file 'src/github.com/juju/juju/resource/api/server/base_test.go' --- src/github.com/juju/juju/resource/api/server/base_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/server/base_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,135 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package server_test + +import ( + "io" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/testing" + gc "gopkg.in/check.v1" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/api" + "github.com/juju/juju/resource/resourcetesting" +) + +type BaseSuite struct { + testing.IsolationSuite + + stub *testing.Stub + data *stubDataStore +} + +func (s *BaseSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = &testing.Stub{} + s.data = &stubDataStore{stub: s.stub} +} + +func newResource(c *gc.C, name, username, data string) (resource.Resource, api.Resource) { + opened := resourcetesting.NewResource(c, nil, name, "a-service", data) + res := opened.Resource + res.Username = username + if username == "" { + res.Timestamp = time.Time{} + } + + apiRes := api.Resource{ + CharmResource: api.CharmResource{ + Name: name, + Type: "file", + Path: res.Path, + Origin: "upload", + Revision: 0, + Fingerprint: res.Fingerprint.Bytes(), + Size: res.Size, + }, + ID: res.ID, + ServiceID: res.ServiceID, + Username: username, + Timestamp: res.Timestamp, + } + + return res, apiRes +} + +type stubDataStore struct { + stub *testing.Stub + + ReturnListResources resource.ServiceResources + ReturnAddPendingResource string + ReturnGetResource resource.Resource + ReturnGetPendingResource resource.Resource + ReturnSetResource resource.Resource + ReturnUpdatePendingResource resource.Resource + ReturnUnits []names.UnitTag +} + +func (s *stubDataStore) ListResources(service string) (resource.ServiceResources, error) { + s.stub.AddCall("ListResources", service) + if err := s.stub.NextErr(); err != nil { + return resource.ServiceResources{}, errors.Trace(err) + } + + return s.ReturnListResources, nil +} + +func (s *stubDataStore) AddPendingResource(service, userID string, chRes charmresource.Resource, r io.Reader) (string, error) { + s.stub.AddCall("AddPendingResource", service, userID, chRes, r) + if err := s.stub.NextErr(); err != nil { + return "", errors.Trace(err) + } + + return s.ReturnAddPendingResource, nil +} + +func (s *stubDataStore) GetResource(service, name string) (resource.Resource, error) { + s.stub.AddCall("GetResource", service, name) + if err := s.stub.NextErr(); err != nil { + return resource.Resource{}, errors.Trace(err) + } + + return s.ReturnGetResource, nil +} + +func (s *stubDataStore) GetPendingResource(service, name, pendingID string) (resource.Resource, error) { + s.stub.AddCall("GetPendingResource", service, name, pendingID) + if err := s.stub.NextErr(); err != nil { + return resource.Resource{}, errors.Trace(err) + } + + return s.ReturnGetPendingResource, nil +} + +func (s *stubDataStore) SetResource(serviceID, userID string, res charmresource.Resource, r io.Reader) (resource.Resource, error) { + s.stub.AddCall("SetResource", serviceID, userID, res, r) + if err := s.stub.NextErr(); err != nil { + return resource.Resource{}, errors.Trace(err) + } + + return s.ReturnSetResource, nil +} + +func (s *stubDataStore) UpdatePendingResource(serviceID, pendingID, userID string, res charmresource.Resource, r io.Reader) (resource.Resource, error) { + s.stub.AddCall("UpdatePendingResource", serviceID, pendingID, userID, res, r) + if err := s.stub.NextErr(); err != nil { + return resource.Resource{}, errors.Trace(err) + } + + return s.ReturnUpdatePendingResource, nil +} + +func (s *stubDataStore) Units(serviceID string) ([]names.UnitTag, error) { + s.stub.AddCall("Units", serviceID) + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.ReturnUnits, nil +} === added file 'src/github.com/juju/juju/resource/api/server/handler.go' --- src/github.com/juju/juju/resource/api/server/handler.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/server/handler.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,77 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package server + +import ( + "net/http" + + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/resource/api" +) + +// TODO(ericsnow) Define the HTTPHandlerConstraints here? Perhaps +// even the HTTPHandlerSpec? + +// LegacyHTTPHandler is the HTTP handler for the resources endpoint. We +// use it rather having a separate handler for each HTTP method since +// registered API handlers must handle *all* HTTP methods currently. +type LegacyHTTPHandler struct { + // Connect opens a connection to state resources. + Connect func(*http.Request) (DataStore, names.Tag, error) + + // HandleUpload provides the upload functionality. + HandleUpload func(username string, st DataStore, req *http.Request) (*api.UploadResult, error) +} + +// TODO(ericsnow) Can username be extracted from the request? + +// NewLegacyHTTPHandler creates a new http.Handler for the resources endpoint. +func NewLegacyHTTPHandler(connect func(*http.Request) (DataStore, names.Tag, error)) *LegacyHTTPHandler { + return &LegacyHTTPHandler{ + Connect: connect, + HandleUpload: func(username string, st DataStore, req *http.Request) (*api.UploadResult, error) { + uh := UploadHandler{ + Username: username, + Store: st, + } + return uh.HandleRequest(req) + }, + } +} + +// ServeHTTP implements http.Handler. +func (h *LegacyHTTPHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) { + st, tag, err := h.Connect(req) + if err != nil { + api.SendHTTPError(resp, err) + return + } + + var username string + switch tag := tag.(type) { + case *names.UserTag: + username = tag.Name() + default: + // TODO(ericsnow) Fail? + username = tag.Id() + } + + // We do this *after* authorization, etc. (in h.Connect) in order + // to prioritize errors that may originate there. + switch req.Method { + case "PUT": + logger.Infof("handling resource upload request") + response, err := h.HandleUpload(username, st, req) + if err != nil { + api.SendHTTPError(resp, err) + return + } + api.SendHTTPStatusAndJSON(resp, http.StatusOK, &response) + logger.Infof("resource upload request successful") + default: + api.SendHTTPError(resp, errors.MethodNotAllowedf("unsupported method: %q", req.Method)) + } +} === added file 'src/github.com/juju/juju/resource/api/server/handler_test.go' --- src/github.com/juju/juju/resource/api/server/handler_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/server/handler_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,239 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package server_test + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/resource/api" + "github.com/juju/juju/resource/api/server" +) + +type LegacyHTTPHandlerSuite struct { + BaseSuite + + username string + req *http.Request + header http.Header + resp *stubHTTPResponseWriter + result *api.UploadResult +} + +var _ = gc.Suite(&LegacyHTTPHandlerSuite{}) + +func (s *LegacyHTTPHandlerSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + + method := "..." + urlStr := "..." + body := strings.NewReader("...") + req, err := http.NewRequest(method, urlStr, body) + c.Assert(err, jc.ErrorIsNil) + + s.req = req + s.header = make(http.Header) + s.resp = &stubHTTPResponseWriter{ + stub: s.stub, + returnHeader: s.header, + } + s.result = &api.UploadResult{} +} + +func (s *LegacyHTTPHandlerSuite) connect(req *http.Request) (server.DataStore, names.Tag, error) { + s.stub.AddCall("Connect", req) + if err := s.stub.NextErr(); err != nil { + return nil, nil, errors.Trace(err) + } + + tag := names.NewUserTag(s.username) + return s.data, tag, nil +} + +func (s *LegacyHTTPHandlerSuite) handleUpload(username string, st server.DataStore, req *http.Request) (*api.UploadResult, error) { + s.stub.AddCall("HandleUpload", username, st, req) + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.result, nil +} + +func (s *LegacyHTTPHandlerSuite) TestServeHTTPConnectFailure(c *gc.C) { + s.username = "youknowwho" + handler := server.LegacyHTTPHandler{ + Connect: s.connect, + HandleUpload: s.handleUpload, + } + copied := *s.req + req := &copied + failure, expected := apiFailure(c, "", "") + s.stub.SetErrors(failure) + + handler.ServeHTTP(s.resp, req) + + s.stub.CheckCallNames(c, + "Connect", + "Header", + "Header", + "WriteHeader", + "Write", + ) + s.stub.CheckCall(c, 0, "Connect", req) + s.stub.CheckCall(c, 3, "WriteHeader", http.StatusInternalServerError) + s.stub.CheckCall(c, 4, "Write", expected) + c.Check(req, jc.DeepEquals, s.req) // did not change + c.Check(s.header, jc.DeepEquals, http.Header{ + "Content-Type": []string{"application/json"}, + "Content-Length": []string{strconv.Itoa(len(expected))}, + }) +} + +func (s *LegacyHTTPHandlerSuite) TestServeHTTPUnsupportedMethod(c *gc.C) { + s.username = "youknowwho" + handler := server.LegacyHTTPHandler{ + Connect: s.connect, + HandleUpload: s.handleUpload, + } + s.req.Method = "POST" + copied := *s.req + req := &copied + _, expected := apiFailure(c, `unsupported method: "POST"`, params.CodeMethodNotAllowed) + + handler.ServeHTTP(s.resp, req) + + s.stub.CheckCallNames(c, + "Connect", + "Header", + "Header", + "WriteHeader", + "Write", + ) + s.stub.CheckCall(c, 0, "Connect", req) + s.stub.CheckCall(c, 3, "WriteHeader", http.StatusMethodNotAllowed) + s.stub.CheckCall(c, 4, "Write", expected) + c.Check(req, jc.DeepEquals, s.req) // did not change + c.Check(s.header, jc.DeepEquals, http.Header{ + "Content-Type": []string{"application/json"}, + "Content-Length": []string{strconv.Itoa(len(expected))}, + }) +} + +func (s *LegacyHTTPHandlerSuite) TestServeHTTPPutSuccess(c *gc.C) { + s.result.Resource.Name = "spam" + expected, err := json.Marshal(s.result) + c.Assert(err, jc.ErrorIsNil) + s.username = "youknowwho" + handler := server.LegacyHTTPHandler{ + Connect: s.connect, + HandleUpload: s.handleUpload, + } + s.req.Method = "PUT" + copied := *s.req + req := &copied + + handler.ServeHTTP(s.resp, req) + + s.stub.CheckCallNames(c, + "Connect", + "HandleUpload", + "Header", + "Header", + "WriteHeader", + "Write", + ) + s.stub.CheckCall(c, 0, "Connect", req) + s.stub.CheckCall(c, 1, "HandleUpload", "youknowwho", s.data, req) + s.stub.CheckCall(c, 4, "WriteHeader", http.StatusOK) + s.stub.CheckCall(c, 5, "Write", string(expected)) + c.Check(req, jc.DeepEquals, s.req) // did not change + c.Check(s.header, jc.DeepEquals, http.Header{ + "Content-Type": []string{"application/json"}, + "Content-Length": []string{fmt.Sprint(len(expected))}, + }) +} + +func (s *LegacyHTTPHandlerSuite) TestServeHTTPPutHandleUploadFailure(c *gc.C) { + s.username = "youknowwho" + handler := server.LegacyHTTPHandler{ + Connect: s.connect, + HandleUpload: s.handleUpload, + } + s.req.Method = "PUT" + copied := *s.req + req := &copied + failure, expected := apiFailure(c, "", "") + s.stub.SetErrors(nil, failure) + + handler.ServeHTTP(s.resp, req) + + s.stub.CheckCallNames(c, + "Connect", + "HandleUpload", + "Header", + "Header", + "WriteHeader", + "Write", + ) + s.stub.CheckCall(c, 0, "Connect", req) + s.stub.CheckCall(c, 1, "HandleUpload", "youknowwho", s.data, req) + s.stub.CheckCall(c, 4, "WriteHeader", http.StatusInternalServerError) + s.stub.CheckCall(c, 5, "Write", expected) + c.Check(req, jc.DeepEquals, s.req) // did not change + c.Check(s.header, jc.DeepEquals, http.Header{ + "Content-Type": []string{"application/json"}, + "Content-Length": []string{strconv.Itoa(len(expected))}, + }) +} + +func apiFailure(c *gc.C, msg, code string) (error, string) { + failure := errors.New(msg) + + data, err := json.Marshal(params.ErrorResult{ + Error: ¶ms.Error{ + Message: msg, + Code: code, + }, + }) + c.Assert(err, jc.ErrorIsNil) + + return failure, string(data) +} + +type stubHTTPResponseWriter struct { + stub *testing.Stub + + returnHeader http.Header +} + +func (s *stubHTTPResponseWriter) Header() http.Header { + s.stub.AddCall("Header") + s.stub.NextErr() // Pop one off. + + return s.returnHeader +} + +func (s *stubHTTPResponseWriter) Write(data []byte) (int, error) { + s.stub.AddCall("Write", string(data)) + if err := s.stub.NextErr(); err != nil { + return 0, errors.Trace(err) + } + + return len(data), nil +} + +func (s *stubHTTPResponseWriter) WriteHeader(code int) { + s.stub.AddCall("WriteHeader", code) + s.stub.NextErr() // Pop one off. +} === added file 'src/github.com/juju/juju/resource/api/server/package_test.go' --- src/github.com/juju/juju/resource/api/server/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/server/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package server_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/resource/api/server/server.go' --- src/github.com/juju/juju/resource/api/server/server.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/server/server.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,157 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package server + +import ( + "io" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/api" +) + +var logger = loggo.GetLogger("juju.resource.api.server") + +const ( + // Version is the version number of the current Facade. + Version = 1 +) + +// DataStore is the functionality of Juju's state needed for the resources API. +type DataStore interface { + resourceInfoStore + UploadDataStore +} + +// Facade is the public API facade for resources. +type Facade struct { + // store is the data source for the facade. + store resourceInfoStore +} + +// NewFacade returns a new resoures facade for the given Juju state. +func NewFacade(store DataStore) *Facade { + return &Facade{ + store: store, + } +} + +// resourceInfoStore is the portion of Juju's "state" needed +// for the resources facade. +type resourceInfoStore interface { + // ListResources returns the resources for the given service. + ListResources(service string) (resource.ServiceResources, error) + + // AddPendingResource adds the resource to the data store in a + // "pending" state. It will stay pending (and unavailable) until + // it is resolved. The returned ID is used to identify the pending + // resources when resolving it. + AddPendingResource(serviceID, userID string, chRes charmresource.Resource, r io.Reader) (string, error) + + // Units returns the tags for all units in the given service. + Units(serviceID string) (units []names.UnitTag, err error) +} + +// ListResources returns the list of resources for the given service. +func (f Facade) ListResources(args api.ListResourcesArgs) (api.ResourcesResults, error) { + var r api.ResourcesResults + r.Results = make([]api.ResourcesResult, len(args.Entities)) + + for i, e := range args.Entities { + logger.Tracef("Listing resources for %q", e.Tag) + tag, apierr := parseServiceTag(e.Tag) + if apierr != nil { + r.Results[i] = api.ResourcesResult{ + ErrorResult: params.ErrorResult{ + Error: apierr, + }, + } + continue + } + + svcRes, err := f.store.ListResources(tag.Id()) + if err != nil { + r.Results[i] = errorResult(err) + continue + } + + units, err := f.store.Units(tag.Id()) + if err != nil { + r.Results[i] = errorResult(err) + continue + } + + r.Results[i] = api.ServiceResources2APIResult(svcRes, units) + } + return r, nil +} + +// AddPendingResources adds the provided resources (info) to the Juju +// model in a pending state, meaning they are not available until +// resolved. +func (f Facade) AddPendingResources(args api.AddPendingResourcesArgs) (api.AddPendingResourcesResult, error) { + var result api.AddPendingResourcesResult + + tag, apiErr := parseServiceTag(args.Tag) + if apiErr != nil { + result.Error = apiErr + return result, nil + } + serviceID := tag.Id() + + var ids []string + for _, apiRes := range args.Resources { + pendingID, err := f.addPendingResource(serviceID, apiRes) + if err != nil { + result.Error = common.ServerError(err) + // We don't bother aggregating errors since a partial + // completion is disruptive and a retry of this endpoint + // is not expensive. + return result, nil + } + ids = append(ids, pendingID) + } + result.PendingIDs = ids + return result, nil +} + +func (f Facade) addPendingResource(serviceID string, apiRes api.CharmResource) (pendingID string, err error) { + chRes, err := api.API2CharmResource(apiRes) + if err != nil { + return "", errors.Annotatef(err, "bad resource info for %q", chRes.Name) + } + + userID := "" + var reader io.Reader + pendingID, err = f.store.AddPendingResource(serviceID, userID, chRes, reader) + if err != nil { + return "", errors.Annotatef(err, "while adding pending resource info for %q", chRes.Name) + } + return pendingID, nil +} + +func parseServiceTag(tagStr string) (names.ServiceTag, *params.Error) { // note the concrete error type + serviceTag, err := names.ParseServiceTag(tagStr) + if err != nil { + return serviceTag, ¶ms.Error{ + Message: err.Error(), + Code: params.CodeBadRequest, + } + } + return serviceTag, nil +} + +func errorResult(err error) api.ResourcesResult { + return api.ResourcesResult{ + ErrorResult: params.ErrorResult{ + Error: common.ServerError(err), + }, + } +} === added file 'src/github.com/juju/juju/resource/api/server/server_addpending_test.go' --- src/github.com/juju/juju/resource/api/server/server_addpending_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/server/server_addpending_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,69 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package server_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/resource/api" + "github.com/juju/juju/resource/api/server" +) + +var _ = gc.Suite(&AddPendingResourcesSuite{}) + +type AddPendingResourcesSuite struct { + BaseSuite +} + +func (s *AddPendingResourcesSuite) TestOkay(c *gc.C) { + res1, apiRes1 := newResource(c, "spam", "a-user", "spamspamspam") + id1 := "some-unique-ID" + s.data.ReturnAddPendingResource = id1 + facade := server.NewFacade(s.data) + + result, err := facade.AddPendingResources(api.AddPendingResourcesArgs{ + Entity: params.Entity{ + Tag: "service-a-service", + }, + Resources: []api.CharmResource{ + apiRes1.CharmResource, + }, + }) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "AddPendingResource") + s.stub.CheckCall(c, 0, "AddPendingResource", "a-service", "", res1.Resource, nil) + c.Check(result, jc.DeepEquals, api.AddPendingResourcesResult{ + PendingIDs: []string{ + id1, + }, + }) +} + +func (s *AddPendingResourcesSuite) TestError(c *gc.C) { + _, apiRes1 := newResource(c, "spam", "a-user", "spamspamspam") + failure := errors.New("") + s.stub.SetErrors(failure) + facade := server.NewFacade(s.data) + + result, err := facade.AddPendingResources(api.AddPendingResourcesArgs{ + Entity: params.Entity{ + Tag: "service-a-service", + }, + Resources: []api.CharmResource{ + apiRes1.CharmResource, + }, + }) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "AddPendingResource") + c.Check(result, jc.DeepEquals, api.AddPendingResourcesResult{ + ErrorResult: params.ErrorResult{Error: ¶ms.Error{ + Message: `while adding pending resource info for "spam": `, + }}, + }) +} === added file 'src/github.com/juju/juju/resource/api/server/server_listresources_test.go' --- src/github.com/juju/juju/resource/api/server/server_listresources_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/server/server_listresources_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,147 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package server_test + +import ( + "github.com/juju/errors" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/api" + "github.com/juju/juju/resource/api/server" +) + +var _ = gc.Suite(&ListResourcesSuite{}) + +type ListResourcesSuite struct { + BaseSuite +} + +func (s *ListResourcesSuite) TestOkay(c *gc.C) { + res1, apiRes1 := newResource(c, "spam", "a-user", "spamspamspam") + res2, apiRes2 := newResource(c, "eggs", "a-user", "...") + + tag0 := names.NewUnitTag("a-service/0") + tag1 := names.NewUnitTag("a-service/1") + + chres1 := res1.Resource + chres2 := res2.Resource + chres1.Revision++ + chres2.Revision++ + + apiChRes1 := apiRes1.CharmResource + apiChRes2 := apiRes2.CharmResource + apiChRes1.Revision++ + apiChRes2.Revision++ + + s.data.ReturnListResources = resource.ServiceResources{ + Resources: []resource.Resource{ + res1, + res2, + }, + UnitResources: []resource.UnitResources{ + { + Tag: tag0, + Resources: []resource.Resource{ + res1, + res2, + }, + }, + // note: nothing for tag1 + }, + CharmStoreResources: []charmresource.Resource{ + chres1, + chres2, + }, + } + + s.data.ReturnUnits = []names.UnitTag{ + tag0, + tag1, + } + + facade := server.NewFacade(s.data) + + results, err := facade.ListResources(api.ListResourcesArgs{ + Entities: []params.Entity{{ + Tag: "service-a-service", + }}, + }) + c.Assert(err, jc.ErrorIsNil) + + c.Check(results, jc.DeepEquals, api.ResourcesResults{ + Results: []api.ResourcesResult{{ + Resources: []api.Resource{ + apiRes1, + apiRes2, + }, + UnitResources: []api.UnitResources{ + { + Entity: params.Entity{ + Tag: "unit-a-service-0", + }, + Resources: []api.Resource{ + apiRes1, + apiRes2, + }, + }, + { + // we should have a listing for every unit, even if they + // have no resources. + Entity: params.Entity{ + Tag: "unit-a-service-1", + }, + }, + }, + CharmStoreResources: []api.CharmResource{ + apiChRes1, + apiChRes2, + }, + }}, + }) + s.stub.CheckCallNames(c, "ListResources", "Units") + s.stub.CheckCall(c, 0, "ListResources", "a-service") +} + +func (s *ListResourcesSuite) TestEmpty(c *gc.C) { + facade := server.NewFacade(s.data) + + results, err := facade.ListResources(api.ListResourcesArgs{ + Entities: []params.Entity{{ + Tag: "service-a-service", + }}, + }) + c.Assert(err, jc.ErrorIsNil) + + c.Check(results, jc.DeepEquals, api.ResourcesResults{ + Results: []api.ResourcesResult{{}}, + }) + s.stub.CheckCallNames(c, "ListResources", "Units") +} + +func (s *ListResourcesSuite) TestError(c *gc.C) { + failure := errors.New("") + s.stub.SetErrors(failure) + facade := server.NewFacade(s.data) + + results, err := facade.ListResources(api.ListResourcesArgs{ + Entities: []params.Entity{{ + Tag: "service-a-service", + }}, + }) + c.Assert(err, jc.ErrorIsNil) + + c.Check(results, jc.DeepEquals, api.ResourcesResults{ + Results: []api.ResourcesResult{{ + ErrorResult: params.ErrorResult{Error: ¶ms.Error{ + Message: "", + }}, + }}, + }) + s.stub.CheckCallNames(c, "ListResources") +} === added file 'src/github.com/juju/juju/resource/api/server/upload.go' --- src/github.com/juju/juju/resource/api/server/upload.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/server/upload.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,133 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package server + +import ( + "io" + "net/http" + + "github.com/juju/errors" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/api" +) + +// UploadDataStore describes the the portion of Juju's "state" +// needed for handling upload requests. +type UploadDataStore interface { + // GetResource returns the identified resource. + GetResource(serviceID, name string) (resource.Resource, error) + + // GetPendingResource returns the identified resource. + GetPendingResource(serviceID, name, pendingID string) (resource.Resource, error) + + // SetResource adds the resource to blob storage and updates the metadata. + SetResource(serviceID, userID string, res charmresource.Resource, r io.Reader) (resource.Resource, error) + + // UpdatePendingResource adds the resource to blob storage and updates the metadata. + UpdatePendingResource(serviceID, pendingID, userID string, res charmresource.Resource, r io.Reader) (resource.Resource, error) +} + +// TODO(ericsnow) Replace UploadedResource with resource.Opened. + +// UploadedResource holds both the information about an uploaded +// resource and the reader containing its data. +type UploadedResource struct { + // Service is the name of the service associated with the resource. + Service string + + // PendingID is the resource-specific sub-ID for a pending resource. + PendingID string + + // Resource is the information about the resource. + Resource charmresource.Resource + + // Data holds the resource blob. + Data io.ReadCloser +} + +// UploadHandler provides the functionality to handle upload requests. +type UploadHandler struct { + // Username is the ID of the user making the upload request. + Username string + + // Store is the data store into which the resource will be stored. + Store UploadDataStore +} + +// HandleRequest handles a resource upload request. +func (uh UploadHandler) HandleRequest(req *http.Request) (*api.UploadResult, error) { + defer req.Body.Close() + + uploaded, err := uh.ReadResource(req) + if err != nil { + return nil, errors.Trace(err) + } + + var stored resource.Resource + if uploaded.PendingID != "" { + stored, err = uh.Store.UpdatePendingResource(uploaded.Service, uploaded.PendingID, uh.Username, uploaded.Resource, uploaded.Data) + if err != nil { + return nil, errors.Trace(err) + } + } else { + stored, err = uh.Store.SetResource(uploaded.Service, uh.Username, uploaded.Resource, uploaded.Data) + if err != nil { + return nil, errors.Trace(err) + } + } + + result := &api.UploadResult{ + Resource: api.Resource2API(stored), + } + return result, nil +} + +// ReadResource extracts the relevant info from the request. +func (uh UploadHandler) ReadResource(req *http.Request) (*UploadedResource, error) { + uReq, err := api.ExtractUploadRequest(req) + if err != nil { + return nil, errors.Trace(err) + } + var res resource.Resource + if uReq.PendingID != "" { + res, err = uh.Store.GetPendingResource(uReq.Service, uReq.Name, uReq.PendingID) + if err != nil { + return nil, errors.Trace(err) + } + } else { + res, err = uh.Store.GetResource(uReq.Service, uReq.Name) + if err != nil { + return nil, errors.Trace(err) + } + } + + chRes, err := uh.updateResource(res.Resource, uReq.Fingerprint, uReq.Size) + if err != nil { + return nil, errors.Trace(err) + } + + uploaded := &UploadedResource{ + Service: uReq.Service, + PendingID: uReq.PendingID, + Resource: chRes, + Data: req.Body, + } + return uploaded, nil +} + +// updateResource returns a copy of the provided resource, updated with +// the given information. +func (uh UploadHandler) updateResource(res charmresource.Resource, fp charmresource.Fingerprint, size int64) (charmresource.Resource, error) { + res.Origin = charmresource.OriginUpload + res.Revision = 0 + res.Fingerprint = fp + res.Size = size + + if err := res.Validate(); err != nil { + return res, errors.Trace(err) + } + return res, nil +} === added file 'src/github.com/juju/juju/resource/api/server/upload_test.go' --- src/github.com/juju/juju/resource/api/server/upload_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/server/upload_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,242 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package server_test + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource/api" + "github.com/juju/juju/resource/api/server" +) + +type UploadSuite struct { + BaseSuite + + req *http.Request + header http.Header + resp *stubHTTPResponseWriter +} + +var _ = gc.Suite(&UploadSuite{}) + +func (s *UploadSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + + method := "..." + urlStr := "..." + body := strings.NewReader("...") + req, err := http.NewRequest(method, urlStr, body) + c.Assert(err, jc.ErrorIsNil) + + s.req = req + s.header = make(http.Header) + s.resp = &stubHTTPResponseWriter{ + stub: s.stub, + returnHeader: s.header, + } +} + +func (s *UploadSuite) TestHandleRequestOkay(c *gc.C) { + content := "" + res, _ := newResource(c, "spam", "a-user", content) + stored, _ := newResource(c, "spam", "", "") + s.data.ReturnGetResource = stored + s.data.ReturnSetResource = res + uh := server.UploadHandler{ + Username: "a-user", + Store: s.data, + } + req, body := newUploadRequest(c, "spam", "a-service", content) + + result, err := uh.HandleRequest(req) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "GetResource", "SetResource") + s.stub.CheckCall(c, 0, "GetResource", "a-service", "spam") + s.stub.CheckCall(c, 1, "SetResource", "a-service", "a-user", res.Resource, ioutil.NopCloser(body)) + c.Check(result, jc.DeepEquals, &api.UploadResult{ + Resource: api.Resource2API(res), + }) +} + +func (s *UploadSuite) TestHandleRequestPending(c *gc.C) { + content := "" + res, _ := newResource(c, "spam", "a-user", content) + res.PendingID = "some-unique-id" + stored, _ := newResource(c, "spam", "", "") + stored.PendingID = "some-unique-id" + s.data.ReturnGetPendingResource = stored + s.data.ReturnUpdatePendingResource = res + uh := server.UploadHandler{ + Username: "a-user", + Store: s.data, + } + req, body := newUploadRequest(c, "spam", "a-service", content) + req.URL.RawQuery += "&pendingid=some-unique-id" + + result, err := uh.HandleRequest(req) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "GetPendingResource", "UpdatePendingResource") + s.stub.CheckCall(c, 0, "GetPendingResource", "a-service", "spam", "some-unique-id") + s.stub.CheckCall(c, 1, "UpdatePendingResource", "a-service", "some-unique-id", "a-user", res.Resource, ioutil.NopCloser(body)) + c.Check(result, jc.DeepEquals, &api.UploadResult{ + Resource: api.Resource2API(res), + }) +} + +func (s *UploadSuite) TestHandleRequestSetResourceFailure(c *gc.C) { + content := "" + stored, _ := newResource(c, "spam", "", "") + s.data.ReturnGetResource = stored + uh := server.UploadHandler{ + Username: "a-user", + Store: s.data, + } + req, _ := newUploadRequest(c, "spam", "a-service", content) + failure := errors.New("") + s.stub.SetErrors(nil, failure) + + _, err := uh.HandleRequest(req) + + c.Check(errors.Cause(err), gc.Equals, failure) + s.stub.CheckCallNames(c, "GetResource", "SetResource") +} + +func (s *UploadSuite) TestReadResourceOkay(c *gc.C) { + content := "" + expected, _ := newResource(c, "spam", "a-user", content) + stored, _ := newResource(c, "spam", "", "") + s.data.ReturnGetResource = stored + uh := server.UploadHandler{ + Username: "a-user", + Store: s.data, + } + req, body := newUploadRequest(c, "spam", "a-service", content) + + uploaded, err := uh.ReadResource(req) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "GetResource") + s.stub.CheckCall(c, 0, "GetResource", "a-service", "spam") + c.Check(uploaded, jc.DeepEquals, &server.UploadedResource{ + Service: "a-service", + Resource: expected.Resource, + Data: ioutil.NopCloser(body), + }) +} + +func (s *UploadSuite) TestReadResourcePending(c *gc.C) { + content := "" + expected, _ := newResource(c, "spam", "a-user", content) + stored, _ := newResource(c, "spam", "", "") + s.data.ReturnGetPendingResource = stored + uh := server.UploadHandler{ + Username: "a-user", + Store: s.data, + } + req, body := newUploadRequest(c, "spam", "a-service", content) + req.URL.RawQuery += "&pendingid=some-unique-id" + + uploaded, err := uh.ReadResource(req) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "GetPendingResource") + s.stub.CheckCall(c, 0, "GetPendingResource", "a-service", "spam", "some-unique-id") + c.Check(uploaded, jc.DeepEquals, &server.UploadedResource{ + Service: "a-service", + PendingID: "some-unique-id", + Resource: expected.Resource, + Data: ioutil.NopCloser(body), + }) +} + +func (s *UploadSuite) TestReadResourceBadContentType(c *gc.C) { + uh := server.UploadHandler{ + Username: "a-user", + Store: s.data, + } + req, _ := newUploadRequest(c, "spam", "a-service", "") + req.Header.Set("Content-Type", "text/plain") + + _, err := uh.ReadResource(req) + + c.Check(err, gc.ErrorMatches, "unsupported content type .*") + s.stub.CheckNoCalls(c) +} + +func (s *UploadSuite) TestReadResourceGetResourceFailure(c *gc.C) { + uh := server.UploadHandler{ + Username: "a-user", + Store: s.data, + } + req, _ := newUploadRequest(c, "spam", "a-service", "") + failure := errors.New("") + s.stub.SetErrors(failure) + + _, err := uh.ReadResource(req) + + c.Check(errors.Cause(err), gc.Equals, failure) + s.stub.CheckCallNames(c, "GetResource") +} + +func (s *UploadSuite) TestReadResourceBadFingerprint(c *gc.C) { + stored, _ := newResource(c, "spam", "", "") + s.data.ReturnGetResource = stored + uh := server.UploadHandler{ + Username: "a-user", + Store: s.data, + } + req, _ := newUploadRequest(c, "spam", "a-service", "") + req.Header.Set("Content-SHA384", "bogus") + + _, err := uh.ReadResource(req) + + c.Check(err, gc.ErrorMatches, "invalid fingerprint.*") + s.stub.CheckNoCalls(c) +} + +func (s *UploadSuite) TestReadResourceBadSize(c *gc.C) { + stored, _ := newResource(c, "spam", "", "") + s.data.ReturnGetResource = stored + uh := server.UploadHandler{ + Username: "a-user", + Store: s.data, + } + req, _ := newUploadRequest(c, "spam", "a-service", "") + req.Header.Set("Content-Length", "should-be-an-int") + + _, err := uh.ReadResource(req) + + c.Check(err, gc.ErrorMatches, "invalid size.*") + s.stub.CheckNoCalls(c) +} + +func newUploadRequest(c *gc.C, name, service, content string) (*http.Request, io.Reader) { + fp, err := charmresource.GenerateFingerprint(strings.NewReader(content)) + c.Assert(err, jc.ErrorIsNil) + + method := "PUT" + urlStr := "https://api:17017/services/%s/resources/%s" + urlStr += "?:service=%s&:resource=%s" // ...added by the mux. + urlStr = fmt.Sprintf(urlStr, service, name, service, name) + body := strings.NewReader(content) + req, err := http.NewRequest(method, urlStr, body) + c.Assert(err, jc.ErrorIsNil) + + req.Header.Set("Content-Type", "application/octet-stream") + req.Header.Set("Content-Length", fmt.Sprint(len(content))) + req.Header.Set("Content-SHA384", fp.String()) + + return req, body +} === added file 'src/github.com/juju/juju/resource/api/upload.go' --- src/github.com/juju/juju/resource/api/upload.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/api/upload.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,116 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package api + +import ( + "fmt" + "io" + "net/http" + "strconv" + + "github.com/juju/errors" + "github.com/juju/names" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource" +) + +// UploadRequest defines a single upload request. +type UploadRequest struct { + // Service is the service ID. + Service string + + // Name is the resource name. + Name string + + // Size is the size of the uploaded data, in bytes. + Size int64 + + // Fingerprint is the fingerprint of the uploaded data. + Fingerprint charmresource.Fingerprint + + // PendingID is the pending ID to associate with this upload, if any. + PendingID string +} + +// NewUploadRequest generates a new upload request for the given resource. +func NewUploadRequest(service, name string, r io.ReadSeeker) (UploadRequest, error) { + if !names.IsValidService(service) { + return UploadRequest{}, errors.Errorf("invalid service %q", service) + } + + content, err := resource.GenerateContent(r) + if err != nil { + return UploadRequest{}, errors.Trace(err) + } + + ur := UploadRequest{ + Service: service, + Name: name, + Size: content.Size, + Fingerprint: content.Fingerprint, + } + return ur, nil +} + +// ExtractUploadRequest pulls the required info from the HTTP request. +func ExtractUploadRequest(req *http.Request) (UploadRequest, error) { + var ur UploadRequest + + if req.Header.Get("Content-Length") == "" { + req.Header.Set("Content-Length", fmt.Sprint(req.ContentLength)) + } + + ctype := req.Header.Get("Content-Type") + if ctype != ContentTypeRaw { + return ur, errors.Errorf("unsupported content type %q", ctype) + } + + service, name := ExtractEndpointDetails(req.URL) + fingerprint := req.Header.Get("Content-Sha384") // This parallels "Content-MD5". + sizeRaw := req.Header.Get("Content-Length") + pendingID := req.URL.Query().Get("pendingid") + + fp, err := charmresource.ParseFingerprint(fingerprint) + if err != nil { + return ur, errors.Annotate(err, "invalid fingerprint") + } + + size, err := strconv.ParseInt(sizeRaw, 10, 64) + if err != nil { + return ur, errors.Annotate(err, "invalid size") + } + + ur = UploadRequest{ + Service: service, + Name: name, + Size: size, + Fingerprint: fp, + PendingID: pendingID, + } + return ur, nil +} + +// HTTPRequest generates a new HTTP request. +func (ur UploadRequest) HTTPRequest() (*http.Request, error) { + // TODO(ericsnow) What about the rest of the URL? + urlStr := NewEndpointPath(ur.Service, ur.Name) + req, err := http.NewRequest("PUT", urlStr, nil) + if err != nil { + return nil, errors.Trace(err) + } + + req.Header.Set("Content-Type", ContentTypeRaw) + req.Header.Set("Content-Sha384", ur.Fingerprint.String()) + req.Header.Set("Content-Length", fmt.Sprint(ur.Size)) + req.ContentLength = ur.Size + + if ur.PendingID != "" { + query := req.URL.Query() + query.Set("pendingid", ur.PendingID) + req.URL.RawQuery = query.Encode() + } + + return req, nil +} === added directory 'src/github.com/juju/juju/resource/charmstore' === added file 'src/github.com/juju/juju/resource/charmstore/cache.go' --- src/github.com/juju/juju/resource/charmstore/cache.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/charmstore/cache.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,79 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore + +import ( + "io" + + "github.com/juju/errors" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource" +) + +// EntityCache exposes the functionality needed to cache data from +// the charm store. The operations apply to a single service (or unit). +type EntityCache interface { + // GetResource returns the resource data for the identified resource. + GetResource(name string) (resource.Resource, error) + + // SetResource stores the resource in the local cache. + SetResource(res charmresource.Resource, reader io.Reader) (resource.Resource, error) + + // OpenResource returns metadata about the resource, and a reader + // for the resource. + OpenResource(name string) (resource.Resource, io.ReadCloser, error) +} + +// cacheForOperations is a wrapper around EntityCache. It supports +// the operations type. +type cacheForOperations struct { + EntityCache +} + +// get retrieves the resource info and data from the cache. If only +// the info is found then the returned reader will be nil. If no cache +// is in use then errors.NotFound is returned. +func (cfo cacheForOperations) get(name string) (resource.Resource, io.ReadCloser, error) { + if cfo.EntityCache == nil { + return resource.Resource{}, nil, errors.NotFoundf("resource %q", name) + } + + res, reader, err := cfo.OpenResource(name) + if errors.IsNotFound(err) { + reader = nil + res, err = cfo.GetResource(name) + } + if err != nil { + return resource.Resource{}, nil, errors.Trace(err) + } + + return res, reader, nil +} + +// set stores the resource info and data in the cache, +// if there is one. If no cache is in use then this is a no-op. Note +// that the returned reader may or may not be the same one that was +// passed in. +func (cfo cacheForOperations) set(chRes charmresource.Resource, reader io.ReadCloser) (resource.Resource, io.ReadCloser, error) { + if cfo.EntityCache == nil { + res := resource.Resource{ + Resource: chRes, + } + return res, reader, nil // a no-op + } + defer reader.Close() + + res, err := cfo.SetResource(chRes, reader) + if err != nil { + return resource.Resource{}, nil, errors.Trace(err) + } + + _, reader, err = cfo.OpenResource(res.Name) + if err != nil { + return resource.Resource{}, nil, errors.Trace(err) + } + + return res, reader, nil +} === added file 'src/github.com/juju/juju/resource/charmstore/client.go' --- src/github.com/juju/juju/resource/charmstore/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/charmstore/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,34 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore + +import ( + "io" + + "gopkg.in/juju/charm.v6-unstable" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" +) + +// Client exposes the functionality of a charm store client as needed +// for charm store operations for Juju resources. +type Client interface { + BaseClient + io.Closer +} + +// BaseClient exposes the functionality of charmrepo/csclient.Client, +// as used for charm store operations for Juju resources. +type BaseClient interface { + // ListResources composes, for each of the identified charms, the + // list of details for each of the charm's resources. Those details + // are those associated with the specific charm revision. They + // include the resource's metadata and revision. + ListResources(charmURLs []*charm.URL) ([][]charmresource.Resource, error) + + // GetResource returns a reader for the resource's data. That data + // is streamed from the charm store. The charm's revision, if any, + // is ignored. If the identified resource is not in the charm store + // then errors.NotFound is returned. + GetResource(cURL *charm.URL, resourceName string, resourceRevision int) (io.ReadCloser, error) +} === added file 'src/github.com/juju/juju/resource/charmstore/operations.go' --- src/github.com/juju/juju/resource/charmstore/operations.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/charmstore/operations.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,92 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore + +import ( + "io" + + "github.com/juju/errors" + "gopkg.in/juju/charm.v6-unstable" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource" +) + +// GetResourceArgs holds the arguments to GetResource(). +type GetResourceArgs struct { + // Client is the charm store client to use. + Client Client + + // EntityCache is the charm store cache to use. It is optional. + Cache EntityCache + + // CharmURL and Name together identify the resource to get. + CharmURL *charm.URL + Name string +} + +func (args GetResourceArgs) validate() error { + if args.Client == nil { + return errors.Errorf("missing charm store client") + } + // FYI, args.Cache may be nil. + if args.CharmURL == nil { + return errors.Errorf("missing charm URL") + } + if args.Name == "" { + return errors.Errorf("missing resource name") + } + return nil +} + +// GetResource returns a reader for the resource's data. That data is +// streamed from the charm store. +// +// If a cache is set up then the resource is read from there. If the +// resource is not in the cache at all then errors.NotFound is returned. +// If only the resource's details are in the cache (but not the actual +// file) then the file is read from the charm store. In that case the +// cache is updated to contain the file too. +func GetResource(args GetResourceArgs) (resource.Resource, io.ReadCloser, error) { + if err := args.validate(); err != nil { + return resource.Resource{}, nil, errors.Trace(err) + } + + cache := cacheForOperations{ + EntityCache: args.Cache, + } + + res, reader, err := cache.get(args.Name) + if err != nil { + return resource.Resource{}, nil, errors.Trace(err) + } + if reader != nil { + // Both the info *and* the data were found in the cache. + return res, reader, nil + } + + // Otherwise, just the info was found in the cache. So we read the + // data from the charm store through a new client and set the data + // for the resource in the cache. + + if res.Origin != charmresource.OriginStore { + return resource.Resource{}, nil, errors.NotFoundf("resource %q", res.Name) + } + + reader, err = args.Client.GetResource(args.CharmURL, res.Name, res.Revision) + if errors.IsNotFound(err) { + msg := "while getting resource from the charm store" + return resource.Resource{}, nil, errors.Annotate(err, msg) + } + if err != nil { + return resource.Resource{}, nil, errors.Trace(err) + } + + res, reader, err = cache.set(res.Resource, reader) + if err != nil { + return resource.Resource{}, nil, errors.Trace(err) + } + + return res, reader, nil +} === added directory 'src/github.com/juju/juju/resource/cmd' === added file 'src/github.com/juju/juju/resource/cmd/deploy.go' --- src/github.com/juju/juju/resource/cmd/deploy.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/deploy.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,169 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "io" + "os" + "strings" + + "github.com/juju/errors" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" +) + +// DeployClient exposes the functionality of the resources API needed +// for deploy. +type DeployClient interface { + // AddPendingResources adds pending metadata for store-based resources. + AddPendingResources(serviceID string, resources []charmresource.Resource) (ids []string, err error) + // AddPendingResource uploads data and metadata for a pending resource for the given service. + AddPendingResource(serviceID string, resource charmresource.Resource, r io.ReadSeeker) (id string, err error) +} + +// DeployResources uploads the bytes for the given files to the server and +// creates pending resource metadata for the all resource mentioned in the +// metadata. It returns a map of resource name to pending resource IDs. +func DeployResources(serviceID string, files map[string]string, resources map[string]charmresource.Meta, client DeployClient) (ids map[string]string, err error) { + d := deployUploader{ + serviceID: serviceID, + client: client, + resources: resources, + osOpen: func(s string) (ReadSeekCloser, error) { return os.Open(s) }, + osStat: func(s string) error { _, err := os.Stat(s); return err }, + } + + ids, err = d.upload(files) + if err != nil { + return nil, errors.Trace(err) + } + return ids, nil +} + +type deployUploader struct { + serviceID string + resources map[string]charmresource.Meta + client DeployClient + osOpen func(path string) (ReadSeekCloser, error) + osStat func(path string) error +} + +func (d deployUploader) upload(files map[string]string) (map[string]string, error) { + if err := d.validateResources(); err != nil { + return nil, errors.Trace(err) + } + + if err := d.checkExpectedResources(files); err != nil { + return nil, errors.Trace(err) + } + + if err := d.checkFiles(files); err != nil { + return nil, errors.Trace(err) + } + + storeResources := d.storeResources(files) + pending := map[string]string{} + if len(storeResources) > 0 { + ids, err := d.client.AddPendingResources(d.serviceID, storeResources) + if err != nil { + return nil, errors.Trace(err) + } + // guaranteed 1:1 correlation between ids and resources. + for i, res := range storeResources { + pending[res.Name] = ids[i] + } + } + + for name, filename := range files { + id, err := d.uploadFile(name, filename) + if err != nil { + return nil, errors.Trace(err) + } + pending[name] = id + } + + return pending, nil +} + +func (d deployUploader) checkFiles(files map[string]string) error { + for name, path := range files { + err := d.osStat(path) + if os.IsNotExist(err) { + return errors.Annotatef(err, "file for resource %q", name) + } + if err != nil { + return errors.Annotatef(err, "can't read file for resource %q", name) + } + } + return nil +} + +func (d deployUploader) validateResources() error { + var errs []error + for _, meta := range d.resources { + if err := meta.Validate(); err != nil { + errs = append(errs, err) + } + } + if len(errs) == 1 { + return errors.Trace(errs[0]) + } + if len(errs) > 1 { + msgs := make([]string, len(errs)) + for i, err := range errs { + msgs[i] = err.Error() + } + return errors.NewNotValid(nil, strings.Join(msgs, ", ")) + } + return nil +} + +func (d deployUploader) storeResources(uploads map[string]string) []charmresource.Resource { + var resources []charmresource.Resource + for name, meta := range d.resources { + if _, ok := uploads[name]; !ok { + resources = append(resources, charmresource.Resource{ + Meta: meta, + Origin: charmresource.OriginStore, + // Revision, Fingerprint, and Size will be added server-side, + // when we download the bytes from the store. + }) + } + } + return resources +} + +func (d deployUploader) uploadFile(resourcename, filename string) (id string, err error) { + f, err := d.osOpen(filename) + if err != nil { + return "", errors.Trace(err) + } + defer f.Close() + res := charmresource.Resource{ + Meta: d.resources[resourcename], + Origin: charmresource.OriginUpload, + } + + id, err = d.client.AddPendingResource(d.serviceID, res, f) + if err != nil { + return "", errors.Trace(err) + } + return id, err +} + +func (d deployUploader) checkExpectedResources(provided map[string]string) error { + var unknown []string + + for name := range provided { + if _, ok := d.resources[name]; !ok { + unknown = append(unknown, name) + } + } + if len(unknown) == 1 { + return errors.Errorf("unrecognized resource %q", unknown[0]) + } + if len(unknown) > 1 { + return errors.Errorf("unrecognized resources: %s", strings.Join(unknown, ", ")) + } + return nil +} === added file 'src/github.com/juju/juju/resource/cmd/deploy_test.go' --- src/github.com/juju/juju/resource/cmd/deploy_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/deploy_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,174 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "bytes" + "io" + "os" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" +) + +type DeploySuite struct { + testing.IsolationSuite + + stub *testing.Stub +} + +var _ = gc.Suite(&DeploySuite{}) + +func (s *DeploySuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = &testing.Stub{} +} + +func (s DeploySuite) TestUploadOK(c *gc.C) { + deps := uploadDeps{s.stub, rsc{&bytes.Buffer{}}} + du := deployUploader{ + serviceID: "mysql", + client: deps, + resources: map[string]charmresource.Meta{ + "upload": { + Name: "upload", + Type: charmresource.TypeFile, + Path: "upload", + }, + "store": { + Name: "store", + Type: charmresource.TypeFile, + Path: "store", + }, + }, + osOpen: deps.Open, + osStat: deps.Stat, + } + + files := map[string]string{ + "upload": "foobar.txt", + } + ids, err := du.upload(files) + c.Assert(err, jc.ErrorIsNil) + c.Check(ids, gc.DeepEquals, map[string]string{ + "upload": "id-upload", + "store": "id-store", + }) + + expectedStore := []charmresource.Resource{ + { + Meta: du.resources["store"], + Origin: charmresource.OriginStore, + }, + } + s.stub.CheckCall(c, 1, "AddPendingResources", "mysql", expectedStore) + s.stub.CheckCall(c, 2, "Open", "foobar.txt") + + expectedUpload := charmresource.Resource{ + Meta: du.resources["upload"], + Origin: charmresource.OriginUpload, + } + s.stub.CheckCall(c, 3, "AddPendingResource", "mysql", expectedUpload, deps.ReadSeekCloser) +} + +func (s DeploySuite) TestUploadUnexpectedResource(c *gc.C) { + deps := uploadDeps{s.stub, rsc{&bytes.Buffer{}}} + du := deployUploader{ + serviceID: "mysql", + client: deps, + resources: map[string]charmresource.Meta{ + "res1": { + Name: "res1", + Type: charmresource.TypeFile, + Path: "path", + }, + }, + osOpen: deps.Open, + osStat: deps.Stat, + } + + files := map[string]string{"some bad resource": "foobar.txt"} + _, err := du.upload(files) + c.Check(err, gc.ErrorMatches, `unrecognized resource "some bad resource"`) + + s.stub.CheckNoCalls(c) +} + +func (s DeploySuite) TestMissingResource(c *gc.C) { + deps := uploadDeps{s.stub, rsc{&bytes.Buffer{}}} + du := deployUploader{ + serviceID: "mysql", + client: deps, + resources: map[string]charmresource.Meta{ + "res1": { + Name: "res1", + Type: charmresource.TypeFile, + Path: "path", + }, + }, + osOpen: deps.Open, + osStat: deps.Stat, + } + + // set the error that will be returned by os.Stat + s.stub.SetErrors(os.ErrNotExist) + + files := map[string]string{"res1": "foobar.txt"} + _, err := du.upload(files) + c.Check(err, gc.ErrorMatches, `file for resource "res1".*`) + c.Check(errors.Cause(err), jc.Satisfies, os.IsNotExist) +} + +type uploadDeps struct { + stub *testing.Stub + ReadSeekCloser ReadSeekCloser +} + +func (s uploadDeps) AddPendingResources(serviceID string, resources []charmresource.Resource) (ids []string, err error) { + s.stub.AddCall("AddPendingResources", serviceID, resources) + if err := s.stub.NextErr(); err != nil { + return nil, err + } + ids = make([]string, len(resources)) + for i, res := range resources { + ids[i] = "id-" + res.Name + } + return ids, nil +} + +func (s uploadDeps) AddPendingResource(serviceID string, resource charmresource.Resource, r io.ReadSeeker) (id string, err error) { + s.stub.AddCall("AddPendingResource", serviceID, resource, r) + if err := s.stub.NextErr(); err != nil { + return "", err + } + return "id-" + resource.Name, nil +} + +func (s uploadDeps) Open(name string) (ReadSeekCloser, error) { + s.stub.AddCall("Open", name) + if err := s.stub.NextErr(); err != nil { + return nil, err + } + return s.ReadSeekCloser, nil +} + +func (s uploadDeps) Stat(name string) error { + s.stub.AddCall("Stat", name) + return s.stub.NextErr() +} + +type rsc struct { + *bytes.Buffer +} + +func (rsc) Close() error { + return nil +} +func (rsc) Seek(offset int64, whence int) (int64, error) { + return 0, nil +} === added file 'src/github.com/juju/juju/resource/cmd/file.go' --- src/github.com/juju/juju/resource/cmd/file.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/file.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,37 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "fmt" + "strings" + + "github.com/juju/errors" +) + +// resourceFile associates a resource name to a filename. +type resourceFile struct { + service string + name string + filename string +} + +// parseResourceFileArg converts the provided string into a name and +// filename. The string must be in the "=" format. +func parseResourceFileArg(raw string) (name string, filename string, _ error) { + vals := strings.SplitN(raw, "=", 2) + if len(vals) < 2 { + msg := fmt.Sprintf("expected name=path format") + return "", "", errors.NewNotValid(nil, msg) + } + + name, filename = vals[0], vals[1] + if name == "" { + return "", "", errors.NewNotValid(nil, "missing resource name") + } + if filename == "" { + return "", "", errors.NewNotValid(nil, "missing filename") + } + return name, filename, nil +} === added file 'src/github.com/juju/juju/resource/cmd/formatted.go' --- src/github.com/juju/juju/resource/cmd/formatted.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/formatted.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,73 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import "time" + +// FormattedCharmResource holds the formatted representation of a resource's info. +type FormattedCharmResource struct { + // These fields are exported for the sake of serialization. + Name string `json:"name" yaml:"name"` + Type string `json:"type" yaml:"type"` + Path string `json:"path" yaml:"path"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + Revision int `json:"revision,omitempty" yaml:"revision,omitempty"` + Fingerprint string `json:"fingerprint" yaml:"fingerprint"` + Size int64 `json:"size" yaml:"size"` + Origin string `json:"origin" yaml:"origin"` +} + +// FormattedServiceInfo holds the formatted representation of the information +// about a service's resources. +type FormattedServiceInfo struct { + Resources []FormattedSvcResource `json:"resources,omitempty" yaml:"resources,omitempty"` + Updates []FormattedCharmResource `json:"updates,omitempty" yaml:"updates,omitempty"` +} + +// FormattedSvcResource holds the formatted representation of a resource's info. +type FormattedSvcResource struct { + // These fields are exported for the sake of serialization. + ID string `json:"resourceid,omitempty" yaml:"resourceid,omitempty"` + ServiceID string `json:"serviceid,omitempty" yaml:"serviceid,omitempty"` + Name string `json:"name" yaml:"name"` + Type string `json:"type" yaml:"type"` + Path string `json:"path" yaml:"path"` + Description string `json:"description,omitempty" yaml:"description,omitempty"` + Revision int `json:"revision,omitempty" yaml:"revision,omitempty"` + Fingerprint string `json:"fingerprint" yaml:"fingerprint"` + Size int64 `json:"size" yaml:"size"` + Origin string `json:"origin" yaml:"origin"` + Used bool `json:"used" yaml:"used"` + Timestamp time.Time `json:"timestamp,omitempty" yaml:"timestamp,omitempty"` + Username string `json:"username,omitempty" yaml:"username,omitempty"` + + // These fields are not exported so they won't be serialized, since they are + // specific to the tabular output. + combinedRevision string + usedYesNo string + combinedOrigin string +} + +// FormattedUnitResource holds the formatted representation of a resource's info. +type FormattedUnitResource FormattedSvcResource + +// FormattedDetailResource is the data for a single line of tabular output for +// juju resources --details. +type FormattedDetailResource struct { + UnitID string `json:"unitID" yaml:"unitID"` + Unit FormattedSvcResource `json:"unit" yaml:"unit"` + Expected FormattedSvcResource `json:"expected" yaml:"expected"` + unitNumber int +} + +// FormattedServiceDetails is the data for the tabular output for juju resources +// --details. +type FormattedServiceDetails struct { + Resources []FormattedDetailResource `json:"resources,omitempty" yaml:"resources,omitempty"` + Updates []FormattedCharmResource `json:"updates,omitempty" yaml:"updates,omitempty"` +} + +// FormattedDetailResource is the data for the tabular output for juju resources +// --details. +type FormattedUnitDetails []FormattedDetailResource === added file 'src/github.com/juju/juju/resource/cmd/formatter.go' --- src/github.com/juju/juju/resource/cmd/formatter.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/formatter.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,209 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "fmt" + "strconv" + "strings" + + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/errors" + "github.com/juju/juju/resource" + "github.com/juju/names" +) + +type charmResourcesFormatter struct { + resources []charmresource.Resource +} + +func newCharmResourcesFormatter(resources []charmresource.Resource) *charmResourcesFormatter { + // Note that unlike the "juju status" code, we don't worry + // about "compatVersion". + crf := charmResourcesFormatter{ + resources: resources, + } + return &crf +} + +func (crf *charmResourcesFormatter) format() []FormattedCharmResource { + if crf.resources == nil { + return nil + } + + var formatted []FormattedCharmResource + for _, res := range crf.resources { + formatted = append(formatted, FormatCharmResource(res)) + } + return formatted +} + +// FormatCharmResource converts the resource info into a FormattedCharmResource. +func FormatCharmResource(res charmresource.Resource) FormattedCharmResource { + return FormattedCharmResource{ + Name: res.Name, + Type: res.Type.String(), + Path: res.Path, + Description: res.Description, + Revision: res.Revision, + Origin: res.Origin.String(), + Fingerprint: res.Fingerprint.String(), // ...the hex string. + Size: res.Size, + } +} + +// FormatSvcResource converts the resource info into a FormattedServiceResource. +func FormatSvcResource(res resource.Resource) FormattedSvcResource { + used := !res.IsPlaceholder() + return FormattedSvcResource{ + ID: res.ID, + ServiceID: res.ServiceID, + Name: res.Name, + Type: res.Type.String(), + Path: res.Path, + Description: res.Description, + Revision: res.Revision, + Origin: res.Origin.String(), + Fingerprint: res.Fingerprint.String(), + Size: res.Size, + Used: used, + Timestamp: res.Timestamp, + Username: res.Username, + combinedRevision: combinedRevision(res), + combinedOrigin: combinedOrigin(used, res), + usedYesNo: usedYesNo(used), + } +} + +func formatServiceResources(sr resource.ServiceResources) (FormattedServiceInfo, error) { + var formatted FormattedServiceInfo + updates, err := sr.Updates() + if err != nil { + return formatted, errors.Trace(err) + } + formatted = FormattedServiceInfo{ + Resources: make([]FormattedSvcResource, len(sr.Resources)), + Updates: make([]FormattedCharmResource, len(updates)), + } + + for i, r := range sr.Resources { + formatted.Resources[i] = FormatSvcResource(r) + } + for i, u := range updates { + formatted.Updates[i] = FormatCharmResource(u) + } + return formatted, nil +} + +// FormatServiceDetails converts a ServiceResources value into a formatted value +// for display on the command line. +func FormatServiceDetails(sr resource.ServiceResources) (FormattedServiceDetails, error) { + var formatted FormattedServiceDetails + details, err := detailedResources("", sr) + if err != nil { + return formatted, errors.Trace(err) + } + updates, err := sr.Updates() + if err != nil { + return formatted, errors.Trace(err) + } + formatted = FormattedServiceDetails{ + Resources: details, + Updates: make([]FormattedCharmResource, len(updates)), + } + for i, u := range updates { + formatted.Updates[i] = FormatCharmResource(u) + } + return formatted, nil +} + +// FormatDetailResource converts the arguments into a FormattedServiceResource. +func FormatDetailResource(tag names.UnitTag, svc, unit resource.Resource) (FormattedDetailResource, error) { + // note that the unit resource can be a zero value here, to indicate that + // the unit has not downloaded that resource yet. + + unitNum, err := unitNum(tag) + if err != nil { + return FormattedDetailResource{}, errors.Trace(err) + } + return FormattedDetailResource{ + UnitID: tag.Id(), + unitNumber: unitNum, + Unit: FormatSvcResource(unit), + Expected: FormatSvcResource(svc), + }, nil +} + +func combinedRevision(r resource.Resource) string { + switch r.Origin { + case charmresource.OriginStore: + return fmt.Sprintf("%d", r.Revision) + case charmresource.OriginUpload: + if !r.Timestamp.IsZero() { + return r.Timestamp.Format("2006-02-01T15:04") + } + } + return "-" +} + +func combinedOrigin(used bool, r resource.Resource) string { + if r.Origin == charmresource.OriginUpload && used && r.Username != "" { + return r.Username + } + if r.Origin == charmresource.OriginStore { + return "charmstore" + } + return r.Origin.String() +} + +func usedYesNo(used bool) string { + if used { + return "yes" + } + return "no" +} + +func unitNum(unit names.UnitTag) (int, error) { + vals := strings.SplitN(unit.Id(), "/", 2) + if len(vals) != 2 { + return 0, errors.Errorf("%q is not a valid unit ID", unit.Id()) + } + num, err := strconv.Atoi(vals[1]) + if err != nil { + return 0, errors.Annotatef(err, "%q is not a valid unit ID", unit.Id()) + } + return num, nil +} + +// detailedResources shows the version of each resource on each unit, with the +// corresponding version of the resource that exists in the controller. if unit +// is non-empty, only units matching that unitID will be returned. +func detailedResources(unit string, sr resource.ServiceResources) ([]FormattedDetailResource, error) { + var formatted []FormattedDetailResource + for _, ur := range sr.UnitResources { + if unit == "" || unit == ur.Tag.Id() { + units := resourceMap(ur.Resources) + for _, svc := range sr.Resources { + f, err := FormatDetailResource(ur.Tag, svc, units[svc.Name]) + if err != nil { + return nil, errors.Trace(err) + } + formatted = append(formatted, f) + } + if unit != "" { + break + } + } + } + return formatted, nil +} + +func resourceMap(resources []resource.Resource) map[string]resource.Resource { + m := make(map[string]resource.Resource, len(resources)) + for _, res := range resources { + m[res.Name] = res + } + return m +} === added file 'src/github.com/juju/juju/resource/cmd/formatter_test.go' --- src/github.com/juju/juju/resource/cmd/formatter_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/formatter_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,232 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "strings" + "time" + + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource" +) + +var _ = gc.Suite(&CharmFormatterSuite{}) + +type CharmFormatterSuite struct { + testing.IsolationSuite +} + +func (s *CharmFormatterSuite) TestFormatCharmResource(c *gc.C) { + res := charmRes(c, "spam", ".tgz", "X", "spamspamspam") + res.Revision = 5 + + formatted := FormatCharmResource(res) + + c.Check(formatted, jc.DeepEquals, FormattedCharmResource{ + Name: "spam", + Type: "file", + Path: "spam.tgz", + Description: "X", + Revision: 5, + Fingerprint: res.Fingerprint.String(), + Size: int64(len("spamspamspam")), + Origin: "store", + }) +} + +var _ = gc.Suite(&SvcFormatterSuite{}) + +type SvcFormatterSuite struct { + testing.IsolationSuite +} + +func (s *SvcFormatterSuite) TestFormatSvcResource(c *gc.C) { + fp, err := charmresource.GenerateFingerprint(strings.NewReader("something")) + c.Assert(err, jc.ErrorIsNil) + r := resource.Resource{ + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "website", + Description: "your website data", + Type: charmresource.TypeFile, + Path: "foobar", + }, + Revision: 5, + Origin: charmresource.OriginStore, + Fingerprint: fp, + Size: 10, + }, + Username: "Bill User", + Timestamp: time.Now().Add(-1 * time.Hour * 24 * 365), + ID: "a-service/website", + ServiceID: "a-service", + } + + f := FormatSvcResource(r) + c.Assert(f, gc.Equals, FormattedSvcResource{ + ID: "a-service/website", + ServiceID: "a-service", + Name: r.Name, + Type: "file", + Path: r.Path, + Used: true, + Revision: r.Revision, + Origin: "store", + Fingerprint: fp.String(), + Size: 10, + Description: r.Description, + Timestamp: r.Timestamp, + Username: r.Username, + combinedRevision: "5", + usedYesNo: "yes", + combinedOrigin: "charmstore", + }) + +} + +func (s *SvcFormatterSuite) TestNotUsed(c *gc.C) { + r := resource.Resource{ + Timestamp: time.Time{}, + } + f := FormatSvcResource(r) + c.Assert(f.Used, jc.IsFalse) +} + +func (s *SvcFormatterSuite) TestUsed(c *gc.C) { + r := resource.Resource{ + Timestamp: time.Now(), + } + f := FormatSvcResource(r) + c.Assert(f.Used, jc.IsTrue) +} + +func (s *SvcFormatterSuite) TestOriginUploadDeployed(c *gc.C) { + // represents what we get when we first deploy a service + r := resource.Resource{ + Resource: charmresource.Resource{ + Origin: charmresource.OriginUpload, + }, + Username: "bill", + Timestamp: time.Now(), + } + f := FormatSvcResource(r) + c.Assert(f.combinedOrigin, gc.Equals, "bill") +} + +func (s *SvcFormatterSuite) TestInitialOriginUpload(c *gc.C) { + r := resource.Resource{ + Resource: charmresource.Resource{ + Origin: charmresource.OriginUpload, + }, + } + f := FormatSvcResource(r) + c.Assert(f.combinedOrigin, gc.Equals, "upload") +} + +var _ = gc.Suite(&DetailFormatterSuite{}) + +type DetailFormatterSuite struct { + testing.IsolationSuite +} + +func (s *DetailFormatterSuite) TestFormatDetail(c *gc.C) { + fp, err := charmresource.GenerateFingerprint(strings.NewReader("something")) + c.Assert(err, jc.ErrorIsNil) + + svc := resource.Resource{ + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "website", + Description: "your website data", + Type: charmresource.TypeFile, + Path: "foobar", + }, + Revision: 5, + Origin: charmresource.OriginStore, + Fingerprint: fp, + Size: 10, + }, + Username: "Bill User", + Timestamp: time.Now().Add(-1 * time.Hour * 24 * 365), + ID: "a-service/website", + ServiceID: "a-service", + } + + fp2, err := charmresource.GenerateFingerprint(strings.NewReader("other")) + c.Assert(err, jc.ErrorIsNil) + + unit := resource.Resource{ + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "website", + Description: "your website data", + Type: charmresource.TypeFile, + Path: "foobar", + }, + Revision: 7, + Origin: charmresource.OriginStore, + Fingerprint: fp2, + Size: 15, + }, + Username: "Bill User", + Timestamp: time.Now(), + ID: "a-service/website", + ServiceID: "a-service", + } + tag := names.NewUnitTag("a-service/55") + + d, err := FormatDetailResource(tag, svc, unit) + c.Assert(err, jc.ErrorIsNil) + c.Assert(d, gc.Equals, + FormattedDetailResource{ + unitNumber: 55, + UnitID: "a-service/55", + Expected: FormatSvcResource(svc), + Unit: FormatSvcResource(unit), + }, + ) +} + +func (s *DetailFormatterSuite) TestFormatDetailEmpty(c *gc.C) { + fp, err := charmresource.GenerateFingerprint(strings.NewReader("something")) + c.Assert(err, jc.ErrorIsNil) + + svc := resource.Resource{ + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "website", + Description: "your website data", + Type: charmresource.TypeFile, + Path: "foobar", + }, + Revision: 5, + Origin: charmresource.OriginStore, + Fingerprint: fp, + Size: 10, + }, + Username: "Bill User", + Timestamp: time.Now().Add(-1 * time.Hour * 24 * 365), + ID: "a-service/website", + ServiceID: "a-service", + } + + unit := resource.Resource{} + tag := names.NewUnitTag("a-service/55") + + d, err := FormatDetailResource(tag, svc, unit) + c.Assert(err, jc.ErrorIsNil) + c.Assert(d, gc.Equals, + FormattedDetailResource{ + unitNumber: 55, + UnitID: "a-service/55", + Expected: FormatSvcResource(svc), + Unit: FormatSvcResource(unit), + }, + ) +} === added file 'src/github.com/juju/juju/resource/cmd/list_charm_resources.go' --- src/github.com/juju/juju/resource/cmd/list_charm_resources.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/list_charm_resources.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,155 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "github.com/juju/cmd" + "github.com/juju/errors" + "gopkg.in/juju/charm.v6-unstable" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + "launchpad.net/gnuflag" + + "github.com/juju/juju/cmd/modelcmd" +) + +// CharmCommandBase exposes the functionality of charmcmd.CommandBase +// needed here. +type CharmCommandBase interface { + // Connect connects to the charm store and returns a client. + Connect() (CharmResourceLister, error) +} + +// CharmResourceLister has the charm store API methods needed by ListCharmResourcesCommand. +type CharmResourceLister interface { + // ListResources lists the resources for each of the identified charms. + ListResources(charmURLs []*charm.URL) ([][]charmresource.Resource, error) + + // Close closes the client. + Close() error +} + +// ListCharmResourcesCommand implements the "juju charm list-resources" command. +type ListCharmResourcesCommand struct { + modelcmd.ModelCommandBase + CharmCommandBase + out cmd.Output + charm string +} + +// NewListCharmResourcesCommand returns a new command that lists resources defined +// by a charm. +func NewListCharmResourcesCommand(base CharmCommandBase) *ListCharmResourcesCommand { + cmd := &ListCharmResourcesCommand{ + CharmCommandBase: base, + } + return cmd +} + +var listCharmResourcesDoc = ` +This command will report the resources for a charm in the charm store. + + can be a charm URL, or an unambiguously condensed form of it, +just like the deploy command. So the following forms will be accepted: + +For cs:trusty/mysql + mysql + trusty/mysql + +For cs:~user/trusty/mysql + cs:~user/mysql + +Where the series is not supplied, the series from your local host is used. +Thus the above examples imply that the local series is trusty. +` + +// Info implements cmd.Command. +func (c *ListCharmResourcesCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "list-resources", + Args: "", + Purpose: "display the resources for a charm in the charm store", + Doc: listCharmResourcesDoc, + Aliases: []string{"resources"}, + } +} + +// SetFlags implements cmd.Command. +func (c *ListCharmResourcesCommand) SetFlags(f *gnuflag.FlagSet) { + defaultFormat := "tabular" + c.out.AddFlags(f, defaultFormat, map[string]cmd.Formatter{ + "tabular": FormatCharmTabular, + "yaml": cmd.FormatYaml, + "json": cmd.FormatJson, + }) +} + +// Init implements cmd.Command. +func (c *ListCharmResourcesCommand) Init(args []string) error { + if len(args) == 0 { + return errors.New("missing charm") + } + c.charm = args[0] + + if err := cmd.CheckEmpty(args[1:]); err != nil { + return errors.Trace(err) + } + + return nil +} + +// Run implements cmd.Command. +func (c *ListCharmResourcesCommand) Run(ctx *cmd.Context) error { + // TODO(ericsnow) Adjust this to the charm store. + + apiclient, err := c.Connect() + if err != nil { + // TODO(ericsnow) Return a more user-friendly error? + return errors.Trace(err) + } + defer apiclient.Close() + + charmURLs, err := resolveCharms([]string{c.charm}) + if err != nil { + return errors.Trace(err) + } + + resources, err := apiclient.ListResources(charmURLs) + if err != nil { + return errors.Trace(err) + } + if len(resources) != 1 { + return errors.New("got bad data from charm store") + } + + // Note that we do not worry about c.CompatVersion + // for show-charm-resources... + formatter := newCharmResourcesFormatter(resources[0]) + formatted := formatter.format() + return c.out.Write(ctx, formatted) +} + +func resolveCharms(charms []string) ([]*charm.URL, error) { + var charmURLs []*charm.URL + for _, raw := range charms { + charmURL, err := resolveCharm(raw) + if err != nil { + return nil, errors.Trace(err) + } + charmURLs = append(charmURLs, charmURL) + } + return charmURLs, nil +} + +func resolveCharm(raw string) (*charm.URL, error) { + charmURL, err := charm.ParseURL(raw) + if err != nil { + return charmURL, errors.Trace(err) + } + + if charmURL.Series == "bundle" { + return charmURL, errors.Errorf("charm bundles are not supported") + } + + return charmURL, nil +} === added file 'src/github.com/juju/juju/resource/cmd/list_charm_resources_test.go' --- src/github.com/juju/juju/resource/cmd/list_charm_resources_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/list_charm_resources_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,193 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "strings" + + jujucmd "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" +) + +var _ = gc.Suite(&ListCharmSuite{}) + +type ListCharmSuite struct { + testing.IsolationSuite + + stub *testing.Stub + client *stubCharmStore +} + +func (s *ListCharmSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = &testing.Stub{} + s.client = &stubCharmStore{stub: s.stub} +} + +func (s *ListCharmSuite) newAPIClient(c *ListCharmResourcesCommand) (CharmResourceLister, error) { + s.stub.AddCall("newAPIClient", c) + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.client, nil +} + +func (s *ListCharmSuite) TestInfo(c *gc.C) { + var command ListCharmResourcesCommand + info := command.Info() + + c.Check(info, jc.DeepEquals, &jujucmd.Info{ + Name: "list-resources", + Args: "", + Purpose: "display the resources for a charm in the charm store", + Doc: ` +This command will report the resources for a charm in the charm store. + + can be a charm URL, or an unambiguously condensed form of it, +just like the deploy command. So the following forms will be accepted: + +For cs:trusty/mysql + mysql + trusty/mysql + +For cs:~user/trusty/mysql + cs:~user/mysql + +Where the series is not supplied, the series from your local host is used. +Thus the above examples imply that the local series is trusty. +`, + Aliases: []string{"resources"}, + }) +} + +func (s *ListCharmSuite) TestOkay(c *gc.C) { + resources := newCharmResources(c, + "website:.tgz of your website", + "music:mp3 of your backing vocals", + ) + resources[0].Revision = 2 + s.client.ReturnListResources = [][]charmresource.Resource{resources} + + command := NewListCharmResourcesCommand(s.client) + code, stdout, stderr := runCmd(c, command, "cs:a-charm") + c.Check(code, gc.Equals, 0) + + c.Check(stdout, gc.Equals, ` +RESOURCE REVISION +website 2 +music 1 + +`[1:]) + c.Check(stderr, gc.Equals, "") + s.stub.CheckCallNames(c, + "Connect", + "ListResources", + "Close", + ) + s.stub.CheckCall(c, 1, "ListResources", []*charm.URL{{ + Schema: "cs", + User: "", + Name: "a-charm", + Revision: -1, + Series: "", + Channel: "", + }}) +} + +func (s *ListCharmSuite) TestNoResources(c *gc.C) { + s.client.ReturnListResources = [][]charmresource.Resource{{}} + + command := NewListCharmResourcesCommand(s.client) + code, stdout, stderr := runCmd(c, command, "cs:a-charm") + c.Check(code, gc.Equals, 0) + + c.Check(stdout, gc.Equals, ` +RESOURCE REVISION + +`[1:]) + c.Check(stderr, gc.Equals, "") + s.stub.CheckCallNames(c, "Connect", "ListResources", "Close") +} + +func (s *ListCharmSuite) TestOutputFormats(c *gc.C) { + fp1, err := charmresource.GenerateFingerprint(strings.NewReader("abc")) + c.Assert(err, jc.ErrorIsNil) + fp2, err := charmresource.GenerateFingerprint(strings.NewReader("xyz")) + c.Assert(err, jc.ErrorIsNil) + resources := []charmresource.Resource{ + charmRes(c, "website", ".tgz", ".tgz of your website", string(fp1.Bytes())), + charmRes(c, "music", ".mp3", "mp3 of your backing vocals", string(fp2.Bytes())), + } + s.client.ReturnListResources = [][]charmresource.Resource{resources} + + formats := map[string]string{ + "tabular": ` +RESOURCE REVISION +website 1 +music 1 + +`[1:], + "yaml": ` +- name: website + type: file + path: website.tgz + description: .tgz of your website + revision: 1 + fingerprint: 73100f01cf258766906c34a30f9a486f07259c627ea0696d97c4582560447f59a6df4a7cf960708271a30324b1481ef4 + size: 48 + origin: store +- name: music + type: file + path: music.mp3 + description: mp3 of your backing vocals + revision: 1 + fingerprint: b0ea2a0f90267a8bd32848c65d7a61569a136f4e421b56127b6374b10a576d29e09294e620b4dcdee40f602115104bd5 + size: 48 + origin: store +`[1:], + "json": strings.Replace(""+ + "["+ + " {"+ + ` "name":"website",`+ + ` "type":"file",`+ + ` "path":"website.tgz",`+ + ` "description":".tgz of your website",`+ + ` "revision":1,`+ + ` "fingerprint":"73100f01cf258766906c34a30f9a486f07259c627ea0696d97c4582560447f59a6df4a7cf960708271a30324b1481ef4",`+ + ` "size":48,`+ + ` "origin":"store"`+ + " },{"+ + ` "name":"music",`+ + ` "type":"file",`+ + ` "path":"music.mp3",`+ + ` "description":"mp3 of your backing vocals",`+ + ` "revision":1,`+ + ` "fingerprint":"b0ea2a0f90267a8bd32848c65d7a61569a136f4e421b56127b6374b10a576d29e09294e620b4dcdee40f602115104bd5",`+ + ` "size":48,`+ + ` "origin":"store"`+ + " }"+ + "]\n", + " ", "", -1), + } + for format, expected := range formats { + c.Logf("checking format %q", format) + command := NewListCharmResourcesCommand(s.client) + args := []string{ + "--format", format, + "cs:a-charm", + } + code, stdout, stderr := runCmd(c, command, args...) + c.Check(code, gc.Equals, 0) + + c.Check(stdout, gc.Equals, expected) + c.Check(stderr, gc.Equals, "") + } +} === added file 'src/github.com/juju/juju/resource/cmd/output_tabular.go' --- src/github.com/juju/juju/resource/cmd/output_tabular.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/output_tabular.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,198 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "bytes" + "fmt" + "sort" + "text/tabwriter" + + "github.com/juju/errors" +) + +// FormatCharmTabular returns a tabular summary of charm resources. +func FormatCharmTabular(value interface{}) ([]byte, error) { + resources, valueConverted := value.([]FormattedCharmResource) + if !valueConverted { + return nil, errors.Errorf("expected value of type %T, got %T", resources, value) + } + + // TODO(ericsnow) sort the rows first? + + var out bytes.Buffer + + // To format things into columns. + tw := tabwriter.NewWriter(&out, 0, 1, 1, ' ', 0) + + // Write the header. + // We do not print a section label. + fmt.Fprintln(tw, "RESOURCE\tREVISION") + + // Print each info to its own row. + for _, res := range resources { + // the column headers must be kept in sync with these. + fmt.Fprintf(tw, "%s\t%d\n", + res.Name, + res.Revision, + ) + } + tw.Flush() + + return out.Bytes(), nil +} + +// FormatSvcTabular returns a tabular summary of resources. +func FormatSvcTabular(value interface{}) ([]byte, error) { + switch resources := value.(type) { + case FormattedServiceInfo: + return formatServiceTabular(resources), nil + case []FormattedUnitResource: + return formatUnitTabular(resources), nil + case FormattedServiceDetails: + return formatServiceDetailTabular(resources), nil + case FormattedUnitDetails: + return formatUnitDetailTabular(resources), nil + default: + return nil, errors.Errorf("unexpected type for data: %T", resources) + } +} + +func formatServiceTabular(info FormattedServiceInfo) []byte { + // TODO(ericsnow) sort the rows first? + + var out bytes.Buffer + + fmt.Fprintln(&out, "[Service]") + tw := tabwriter.NewWriter(&out, 0, 1, 1, ' ', 0) + fmt.Fprintln(tw, "RESOURCE\tSUPPLIED BY\tREVISION") + + // Print each info to its own row. + for _, r := range info.Resources { + // the column headers must be kept in sync with these. + fmt.Fprintf(tw, "%v\t%v\t%v\n", + r.Name, + r.combinedOrigin, + r.combinedRevision, + ) + } + + // Don't forget to flush! The Tab writer won't actually write to the output + // until you flush, which would then have its output incorrectly ordered + // with the below fmt.Fprintlns. + tw.Flush() + + writeUpdates(info.Updates, &out, tw) + + return out.Bytes() +} + +func writeUpdates(updates []FormattedCharmResource, out *bytes.Buffer, tw *tabwriter.Writer) { + if len(updates) > 0 { + fmt.Fprintln(out, "") + fmt.Fprintln(out, "[Updates Available]") + fmt.Fprintln(tw, "RESOURCE\tREVISION") + for _, r := range updates { + fmt.Fprintf(tw, "%v\t%v\n", + r.Name, + r.Revision, + ) + } + } + + tw.Flush() +} + +func formatUnitTabular(resources []FormattedUnitResource) []byte { + // TODO(ericsnow) sort the rows first? + + var out bytes.Buffer + + fmt.Fprintln(&out, "[Unit]") + + // To format things into columns. + tw := tabwriter.NewWriter(&out, 0, 1, 1, ' ', 0) + + // Write the header. + // We do not print a section label. + fmt.Fprintln(tw, "RESOURCE\tREVISION") + + // Print each info to its own row. + for _, r := range resources { + // the column headers must be kept in sync with these. + fmt.Fprintf(tw, "%v\t%v\n", + r.Name, + r.combinedRevision, + ) + } + tw.Flush() + + return out.Bytes() +} + +func formatServiceDetailTabular(resources FormattedServiceDetails) []byte { + // note that the unit resource can be a zero value here, to indicate that + // the unit has not downloaded that resource yet. + + var out bytes.Buffer + fmt.Fprintln(&out, "[Units]") + + sort.Sort(byUnitID(resources.Resources)) + // To format things into columns. + tw := tabwriter.NewWriter(&out, 0, 1, 1, ' ', 0) + + // Write the header. + fmt.Fprintln(tw, "UNIT\tRESOURCE\tREVISION\tEXPECTED") + + for _, r := range resources.Resources { + fmt.Fprintf(tw, "%v\t%v\t%v\t%v\n", + r.unitNumber, + r.Expected.Name, + r.Unit.combinedRevision, + r.Expected.combinedRevision, + ) + } + tw.Flush() + + writeUpdates(resources.Updates, &out, tw) + + return out.Bytes() +} + +func formatUnitDetailTabular(resources FormattedUnitDetails) []byte { + // note that the unit resource can be a zero value here, to indicate that + // the unit has not downloaded that resource yet. + + var out bytes.Buffer + fmt.Fprintln(&out, "[Unit]") + + sort.Sort(byUnitID(resources)) + // To format things into columns. + tw := tabwriter.NewWriter(&out, 0, 1, 1, ' ', 0) + + // Write the header. + fmt.Fprintln(tw, "RESOURCE\tREVISION\tEXPECTED") + + for _, r := range resources { + fmt.Fprintf(tw, "%v\t%v\t%v\n", + r.Expected.Name, + r.Unit.combinedRevision, + r.Expected.combinedRevision, + ) + } + tw.Flush() + return out.Bytes() +} + +type byUnitID []FormattedDetailResource + +func (b byUnitID) Len() int { return len(b) } +func (b byUnitID) Swap(i, j int) { b[i], b[j] = b[j], b[i] } + +func (b byUnitID) Less(i, j int) bool { + if b[i].unitNumber != b[j].unitNumber { + return b[i].unitNumber < b[j].unitNumber + } + return b[i].Expected.Name < b[j].Expected.Name +} === added file 'src/github.com/juju/juju/resource/cmd/output_tabular_test.go' --- src/github.com/juju/juju/resource/cmd/output_tabular_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/output_tabular_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,362 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "time" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource" +) + +var _ = gc.Suite(&CharmTabularSuite{}) + +type CharmTabularSuite struct { + testing.IsolationSuite +} + +func (s *CharmTabularSuite) TestFormatCharmTabularOkay(c *gc.C) { + res := charmRes(c, "spam", ".tgz", "...", "") + formatted := []FormattedCharmResource{FormatCharmResource(res)} + + data, err := FormatCharmTabular(formatted) + c.Assert(err, jc.ErrorIsNil) + + c.Check(string(data), gc.Equals, ` +RESOURCE REVISION +spam 1 +`[1:]) +} + +func (s *CharmTabularSuite) TestFormatCharmTabularMinimal(c *gc.C) { + res := charmRes(c, "spam", "", "", "") + formatted := []FormattedCharmResource{FormatCharmResource(res)} + + data, err := FormatCharmTabular(formatted) + c.Assert(err, jc.ErrorIsNil) + + c.Check(string(data), gc.Equals, ` +RESOURCE REVISION +spam 1 +`[1:]) +} + +func (s *CharmTabularSuite) TestFormatCharmTabularUpload(c *gc.C) { + res := charmRes(c, "spam", "", "", "") + res.Origin = charmresource.OriginUpload + formatted := []FormattedCharmResource{FormatCharmResource(res)} + + data, err := FormatCharmTabular(formatted) + c.Assert(err, jc.ErrorIsNil) + + c.Check(string(data), gc.Equals, ` +RESOURCE REVISION +spam 1 +`[1:]) +} + +func (s *CharmTabularSuite) TestFormatCharmTabularMulti(c *gc.C) { + formatted := []FormattedCharmResource{ + FormatCharmResource(charmRes(c, "spam", ".tgz", "spamspamspamspam", "")), + FormatCharmResource(charmRes(c, "eggs", "", "...", "")), + FormatCharmResource(charmRes(c, "somethingbig", ".zip", "", "")), + FormatCharmResource(charmRes(c, "song", ".mp3", "your favorite", "")), + FormatCharmResource(charmRes(c, "avatar", ".png", "your picture", "")), + } + formatted[1].Revision = 2 + + data, err := FormatCharmTabular(formatted) + c.Assert(err, jc.ErrorIsNil) + + c.Check(string(data), gc.Equals, ` +RESOURCE REVISION +spam 1 +eggs 2 +somethingbig 1 +song 1 +avatar 1 +`[1:]) +} + +func (s *CharmTabularSuite) TestFormatCharmTabularBadValue(c *gc.C) { + bogus := "should have been something else" + _, err := FormatCharmTabular(bogus) + + c.Check(err, gc.ErrorMatches, `expected value of type .*`) +} + +var _ = gc.Suite(&SvcTabularSuite{}) + +type SvcTabularSuite struct { + testing.IsolationSuite +} + +func (s *SvcTabularSuite) TestFormatServiceOkay(c *gc.C) { + res := resource.Resource{ + + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "openjdk", + Description: "the java runtime", + }, + Origin: charmresource.OriginStore, + Revision: 7, + }, + Timestamp: time.Now(), + } + + formatted := FormattedServiceInfo{ + Resources: []FormattedSvcResource{FormatSvcResource(res)}, + } + + data, err := FormatSvcTabular(formatted) + c.Assert(err, jc.ErrorIsNil) + + c.Check(string(data), gc.Equals, ` +[Service] +RESOURCE SUPPLIED BY REVISION +openjdk charmstore 7 +`[1:]) +} + +func (s *SvcTabularSuite) TestFormatUnitOkay(c *gc.C) { + res := resource.Resource{ + + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "openjdk", + Description: "the java runtime", + }, + Origin: charmresource.OriginStore, + Revision: 7, + }, + Timestamp: time.Now(), + } + + formatted := []FormattedUnitResource{ + FormattedUnitResource(FormatSvcResource(res)), + } + + data, err := FormatSvcTabular(formatted) + c.Assert(err, jc.ErrorIsNil) + + c.Check(string(data), gc.Equals, ` +[Unit] +RESOURCE REVISION +openjdk 7 +`[1:]) +} + +func (s *SvcTabularSuite) TestFormatSvcTabularMulti(c *gc.C) { + res := []resource.Resource{ + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "openjdk", + Description: "the java runtime", + }, + Origin: charmresource.OriginStore, + Revision: 7, + }, + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "website", + Description: "your website data", + Type: charmresource.TypeFile, + }, + Origin: charmresource.OriginUpload, + }, + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "openjdk2", + Description: "another java runtime", + }, + Origin: charmresource.OriginStore, + Revision: 8, + }, + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "website2", + Description: "your website data", + }, + Origin: charmresource.OriginUpload, + }, + Username: "Bill User", + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + } + + charmResources := []charmresource.Resource{ + { + // This resource has a higher revision than the corresponding one + // above. + Meta: charmresource.Meta{ + Name: "openjdk", + Description: "the java runtime", + }, + Revision: 10, + Origin: charmresource.OriginStore, + }, + { + // This resource is the same revision as the corresponding one + // above. + Meta: charmresource.Meta{ + Name: "openjdk2", + Description: "your website data", + Type: charmresource.TypeFile, + Path: "foobar", + }, + Revision: 8, + Origin: charmresource.OriginStore, + }, + { + // This resource has been overridden by an uploaded resource above, + // so we won't show it as an available update. + Meta: charmresource.Meta{ + Name: "website2", + Description: "your website data", + }, + Revision: 99, + Origin: charmresource.OriginStore, + }, + { + Meta: charmresource.Meta{ + Name: "website", + Description: "your website data", + Type: charmresource.TypeFile, + }, + }, + } + + formatted, err := formatServiceResources(resource.ServiceResources{ + Resources: res, + CharmStoreResources: charmResources, + }) + c.Assert(err, jc.ErrorIsNil) + + data, err := FormatSvcTabular(formatted) + c.Assert(err, jc.ErrorIsNil) + + // Notes: sorted by name, then by revision, newest first. + c.Check(string(data), gc.Equals, ` +[Service] +RESOURCE SUPPLIED BY REVISION +openjdk charmstore 7 +website upload - +openjdk2 charmstore 8 +website2 Bill User 2012-12-12T12:12 + +[Updates Available] +RESOURCE REVISION +openjdk 10 +`[1:]) +} + +func (s *SvcTabularSuite) TestFormatSvcTabularBadValue(c *gc.C) { + bogus := "should have been something else" + _, err := FormatSvcTabular(bogus) + c.Check(err, gc.ErrorMatches, `unexpected type for data: string`) +} + +var _ = gc.Suite(&DetailsTabularSuite{}) + +type DetailsTabularSuite struct { + testing.IsolationSuite +} + +func (s *DetailsTabularSuite) TestFormatServiceDetailsOkay(c *gc.C) { + res := charmRes(c, "spam", ".tgz", "...", "") + updates := []FormattedCharmResource{FormatCharmResource(res)} + + data := FormattedServiceDetails{ + Resources: []FormattedDetailResource{ + { + UnitID: "svc/10", + unitNumber: 10, + Unit: fakeFmtSvcRes("data", "1"), + Expected: fakeFmtSvcRes("data", "1"), + }, + { + UnitID: "svc/5", + unitNumber: 5, + Unit: fakeFmtSvcRes("config", "2"), + Expected: fakeFmtSvcRes("config", "3"), + }, + }, + Updates: updates, + } + + output, err := FormatSvcTabular(data) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(string(output), gc.Equals, ` +[Units] +UNIT RESOURCE REVISION EXPECTED +5 config combRev2 combRev3 +10 data combRev1 combRev1 + +[Updates Available] +RESOURCE REVISION +spam 1 +`[1:]) +} + +func (s *DetailsTabularSuite) TestFormatUnitDetailsOkay(c *gc.C) { + data := FormattedUnitDetails{ + { + UnitID: "svc/10", + unitNumber: 10, + Unit: fakeFmtSvcRes("data", "1"), + Expected: fakeFmtSvcRes("data", "1"), + }, + { + UnitID: "svc/10", + unitNumber: 10, + Unit: fakeFmtSvcRes("config", "2"), + Expected: fakeFmtSvcRes("config", "3"), + }, + } + + output, err := FormatSvcTabular(data) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(string(output), gc.Equals, ` +[Unit] +RESOURCE REVISION EXPECTED +config combRev2 combRev3 +data combRev1 combRev1 +`[1:]) +} + +func fakeFmtSvcRes(name, suffix string) FormattedSvcResource { + return FormattedSvcResource{ + ID: "ID" + suffix, + ServiceID: "svc", + Name: name, + Type: "Type" + suffix, + Path: "Path + suffix", + Description: "Desc" + suffix, + Revision: 1, + Fingerprint: "Fingerprint" + suffix, + Size: 1, + Origin: "Origin" + suffix, + Used: true, + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + Username: "Username" + suffix, + combinedRevision: "combRev" + suffix, + usedYesNo: "usedYesNo" + suffix, + combinedOrigin: "combOrig" + suffix, + } +} === added file 'src/github.com/juju/juju/resource/cmd/package_test.go' --- src/github.com/juju/juju/resource/cmd/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/resource/cmd/show_service.go' --- src/github.com/juju/juju/resource/cmd/show_service.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/show_service.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,172 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/names" + "launchpad.net/gnuflag" + + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/resource" +) + +// ShowServiceClient has the API client methods needed by ShowServiceCommand. +type ShowServiceClient interface { + // ListResources returns info about resources for services in the model. + ListResources(services []string) ([]resource.ServiceResources, error) + // Close closes the connection. + Close() error +} + +// ShowServiceDeps is a type that contains external functions that ShowService +// depends on to function. +type ShowServiceDeps struct { + // NewClient returns the value that wraps the API for showing service + // resources from the server. + NewClient func(*ShowServiceCommand) (ShowServiceClient, error) +} + +// ShowServiceCommand implements the upload command. +type ShowServiceCommand struct { + modelcmd.ModelCommandBase + + details bool + deps ShowServiceDeps + out cmd.Output + target string +} + +// NewShowServiceCommand returns a new command that lists resources defined +// by a charm. +func NewShowServiceCommand(deps ShowServiceDeps) *ShowServiceCommand { + return &ShowServiceCommand{deps: deps} +} + +// Info implements cmd.Command.Info. +func (c *ShowServiceCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "list-resources", + Aliases: []string{"resources"}, + Args: "service-or-unit", + Purpose: "show the resources for a service or unit", + Doc: ` +This command shows the resources required by and those in use by an existing +service or unit in your model. When run for a service, it will also show any +updates available for resources from the charmstore. +`, + } +} + +// SetFlags implements cmd.Command.SetFlags. +func (c *ShowServiceCommand) SetFlags(f *gnuflag.FlagSet) { + const defaultFlag = "tabular" + c.out.AddFlags(f, defaultFlag, map[string]cmd.Formatter{ + defaultFlag: FormatSvcTabular, + "yaml": cmd.FormatYaml, + "json": cmd.FormatJson, + }) + + f.BoolVar(&c.details, "details", false, "show detailed information about resources used by each unit.") +} + +// Init implements cmd.Command.Init. It will return an error satisfying +// errors.BadRequest if you give it an incorrect number of arguments. +func (c *ShowServiceCommand) Init(args []string) error { + if len(args) == 0 { + return errors.NewBadRequest(nil, "missing service name") + } + c.target = args[0] + if err := cmd.CheckEmpty(args[1:]); err != nil { + return errors.NewBadRequest(err, "") + } + return nil +} + +// Run implements cmd.Command.Run. +func (c *ShowServiceCommand) Run(ctx *cmd.Context) error { + apiclient, err := c.deps.NewClient(c) + if err != nil { + return errors.Annotatef(err, "can't connect to %s", c.ConnectionName()) + } + defer apiclient.Close() + + var unit string + var service string + if names.IsValidService(c.target) { + service = c.target + } else { + service, err = names.UnitService(c.target) + if err != nil { + return errors.Errorf("%q is neither a service nor a unit", c.target) + } + unit = c.target + } + + vals, err := apiclient.ListResources([]string{service}) + if err != nil { + return errors.Trace(err) + } + if len(vals) != 1 { + return errors.Errorf("bad data returned from server") + } + v := vals[0] + if unit == "" { + return c.formatServiceResources(ctx, v) + } + return c.formatUnitResources(ctx, unit, service, v) +} + +func (c *ShowServiceCommand) formatServiceResources(ctx *cmd.Context, sr resource.ServiceResources) error { + if c.details { + formatted, err := FormatServiceDetails(sr) + if err != nil { + return errors.Trace(err) + } + + return c.out.Write(ctx, formatted) + } + + formatted, err := formatServiceResources(sr) + if err != nil { + return errors.Trace(err) + } + return c.out.Write(ctx, formatted) +} + +func (c *ShowServiceCommand) formatUnitResources(ctx *cmd.Context, unit, service string, sr resource.ServiceResources) error { + if c.details { + formatted, err := detailedResources(unit, sr) + if err != nil { + return errors.Trace(err) + } + return c.out.Write(ctx, FormattedUnitDetails(formatted)) + } + + resources, err := unitResources(unit, service, sr) + if err != nil { + return errors.Trace(err) + } + res := make([]FormattedUnitResource, len(resources)) + + for i, r := range resources { + res[i] = FormattedUnitResource(FormatSvcResource(r)) + } + + return c.out.Write(ctx, res) + +} + +func unitResources(unit, service string, v resource.ServiceResources) ([]resource.Resource, error) { + for _, res := range v.UnitResources { + if res.Tag.Id() == unit { + return res.Resources, nil + } + } + // TODO(natefinch): we need to differentiate between a unit with no + // resources and a unit that doesn't exist. This requires a serverside + // change. + return nil, nil +} === added file 'src/github.com/juju/juju/resource/cmd/show_service_test.go' --- src/github.com/juju/juju/resource/cmd/show_service_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/show_service_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,571 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "time" + + jujucmd "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource" +) + +var _ = gc.Suite(&ShowServiceSuite{}) + +type ShowServiceSuite struct { + testing.IsolationSuite + + stubDeps *stubShowServiceDeps +} + +func (s *ShowServiceSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + stub := &testing.Stub{} + s.stubDeps = &stubShowServiceDeps{ + stub: stub, + client: &stubServiceClient{stub: stub}, + } +} + +func (*ShowServiceSuite) TestInitEmpty(c *gc.C) { + s := ShowServiceCommand{} + + err := s.Init([]string{}) + c.Assert(err, jc.Satisfies, errors.IsBadRequest) +} + +func (*ShowServiceSuite) TestInitGood(c *gc.C) { + s := ShowServiceCommand{} + err := s.Init([]string{"foo"}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.target, gc.Equals, "foo") +} + +func (*ShowServiceSuite) TestInitTooManyArgs(c *gc.C) { + s := ShowServiceCommand{} + + err := s.Init([]string{"foo", "bar"}) + c.Assert(err, jc.Satisfies, errors.IsBadRequest) +} + +func (s *ShowServiceSuite) TestInfo(c *gc.C) { + var command ShowServiceCommand + info := command.Info() + + c.Check(info, jc.DeepEquals, &jujucmd.Info{ + Name: "list-resources", + Aliases: []string{"resources"}, + Args: "service-or-unit", + Purpose: "show the resources for a service or unit", + Doc: ` +This command shows the resources required by and those in use by an existing +service or unit in your model. When run for a service, it will also show any +updates available for resources from the charmstore. +`, + }) +} + +func (s *ShowServiceSuite) TestRun(c *gc.C) { + data := []resource.ServiceResources{ + { + Resources: []resource.Resource{ + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "openjdk", + Description: "the java runtime", + }, + Origin: charmresource.OriginStore, + Revision: 7, + }, + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "website", + Description: "your website data", + }, + Origin: charmresource.OriginUpload, + }, + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "rsc1234", + Description: "a big description", + }, + Origin: charmresource.OriginStore, + Revision: 15, + }, + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "website2", + Description: "awesome data", + }, + Origin: charmresource.OriginUpload, + }, + Username: "Bill User", + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + }, + CharmStoreResources: []charmresource.Resource{ + { + // This resource has a higher revision than the corresponding one + // above. + Meta: charmresource.Meta{ + Name: "openjdk", + Description: "the java runtime", + Type: charmresource.TypeFile, + Path: "foobar", + }, + Revision: 10, + Origin: charmresource.OriginStore, + }, + { + // This resource is the same revision as the corresponding one + // above. + Meta: charmresource.Meta{ + Name: "rsc1234", + Description: "a big description", + Type: charmresource.TypeFile, + Path: "foobar", + }, + Revision: 15, + Origin: charmresource.OriginStore, + }, + { + Meta: charmresource.Meta{ + Name: "website", + Description: "your website data", + }, + Origin: charmresource.OriginUpload, + }, + { + Meta: charmresource.Meta{ + Name: "website2", + Description: "awesome data", + }, + Origin: charmresource.OriginUpload, + }, + }, + }, + } + s.stubDeps.client.ReturnResources = data + + cmd := &ShowServiceCommand{ + deps: ShowServiceDeps{ + NewClient: s.stubDeps.NewClient, + }, + } + + code, stdout, stderr := runCmd(c, cmd, "svc") + c.Check(code, gc.Equals, 0) + c.Check(stderr, gc.Equals, "") + + c.Check(stdout, gc.Equals, ` +[Service] +RESOURCE SUPPLIED BY REVISION +openjdk charmstore 7 +website upload - +rsc1234 charmstore 15 +website2 Bill User 2012-12-12T12:12 + +[Updates Available] +RESOURCE REVISION +openjdk 10 + +`[1:]) + + s.stubDeps.stub.CheckCall(c, 1, "ListResources", []string{"svc"}) +} + +func (s *ShowServiceSuite) TestRunUnit(c *gc.C) { + data := []resource.ServiceResources{{ + UnitResources: []resource.UnitResources{{ + Tag: names.NewUnitTag("svc/0"), + Resources: []resource.Resource{ + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "rsc1234", + Description: "a big description", + }, + Origin: charmresource.OriginStore, + Revision: 15, + }, + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "website2", + Description: "awesome data", + }, + Origin: charmresource.OriginUpload, + }, + Username: "Bill User", + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + }, + }}, + }} + s.stubDeps.client.ReturnResources = data + + cmd := &ShowServiceCommand{ + deps: ShowServiceDeps{ + NewClient: s.stubDeps.NewClient, + }, + } + + code, stdout, stderr := runCmd(c, cmd, "svc/0") + c.Assert(code, gc.Equals, 0) + c.Assert(stderr, gc.Equals, "") + + c.Check(stdout, gc.Equals, ` +[Unit] +RESOURCE REVISION +rsc1234 15 +website2 2012-12-12T12:12 + +`[1:]) + + s.stubDeps.stub.CheckCall(c, 1, "ListResources", []string{"svc"}) +} + +func (s *ShowServiceSuite) TestRunDetails(c *gc.C) { + data := []resource.ServiceResources{{ + Resources: []resource.Resource{ + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "alpha", + Description: "a big comment", + }, + Origin: charmresource.OriginStore, + Revision: 15, + }, + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "charlie", + Description: "awesome data", + }, + Origin: charmresource.OriginUpload, + }, + Username: "Bill User", + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "beta", + Description: "more data", + }, + Origin: charmresource.OriginUpload, + }, + Username: "Bill User", + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + }, + CharmStoreResources: []charmresource.Resource{ + { + Meta: charmresource.Meta{ + Name: "alpha", + Description: "a big comment", + }, + Origin: charmresource.OriginStore, + Revision: 15, + }, + { + Meta: charmresource.Meta{ + Name: "charlie", + Description: "awesome data", + }, + Origin: charmresource.OriginUpload, + }, + { + Meta: charmresource.Meta{ + Name: "beta", + Description: "more data", + }, + Origin: charmresource.OriginUpload, + }, + }, + UnitResources: []resource.UnitResources{ + { + Tag: names.NewUnitTag("svc/10"), + Resources: []resource.Resource{ + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "alpha", + Description: "a big comment", + }, + Origin: charmresource.OriginStore, + Revision: 10, // note the reivision is different for this unit + }, + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "charlie", + Description: "awesome data", + }, + Origin: charmresource.OriginUpload, + }, + Username: "Bill User", + // note the different time + Timestamp: time.Date(2011, 11, 11, 11, 11, 11, 0, time.UTC), + }, + // note we're missing the beta resource for this unit + }, + }, + { + Tag: names.NewUnitTag("svc/5"), + Resources: []resource.Resource{ + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "alpha", + Description: "a big comment", + }, + Origin: charmresource.OriginStore, + Revision: 10, // note the reivision is different for this unit + }, + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "charlie", + Description: "awesome data", + }, + Origin: charmresource.OriginUpload, + }, + Username: "Bill User", + // note the different time + Timestamp: time.Date(2011, 11, 11, 11, 11, 11, 0, time.UTC), + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "beta", + Description: "more data", + }, + Origin: charmresource.OriginUpload, + }, + Username: "Bill User", + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + }, + }, + }, + }} + s.stubDeps.client.ReturnResources = data + + cmd := &ShowServiceCommand{ + deps: ShowServiceDeps{ + NewClient: s.stubDeps.NewClient, + }, + } + + code, stdout, stderr := runCmd(c, cmd, "svc", "--details") + c.Check(code, gc.Equals, 0) + c.Check(stderr, gc.Equals, "") + + c.Check(stdout, gc.Equals, ` +[Units] +UNIT RESOURCE REVISION EXPECTED +5 alpha 10 15 +5 beta 2012-12-12T12:12 2012-12-12T12:12 +5 charlie 2011-11-11T11:11 2012-12-12T12:12 +10 alpha 10 15 +10 beta - 2012-12-12T12:12 +10 charlie 2011-11-11T11:11 2012-12-12T12:12 + +`[1:]) + + s.stubDeps.stub.CheckCall(c, 1, "ListResources", []string{"svc"}) +} + +func (s *ShowServiceSuite) TestRunUnitDetails(c *gc.C) { + data := []resource.ServiceResources{{ + Resources: []resource.Resource{ + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "alpha", + Description: "a big comment", + }, + Origin: charmresource.OriginStore, + Revision: 15, + }, + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "charlie", + Description: "awesome data", + }, + Origin: charmresource.OriginUpload, + }, + Username: "Bill User", + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "beta", + Description: "more data", + }, + Origin: charmresource.OriginUpload, + }, + Username: "Bill User", + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + }, + UnitResources: []resource.UnitResources{ + { + Tag: names.NewUnitTag("svc/10"), + Resources: []resource.Resource{ + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "alpha", + Description: "a big comment", + }, + Origin: charmresource.OriginStore, + Revision: 10, // note the reivision is different for this unit + }, + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "charlie", + Description: "awesome data", + }, + Origin: charmresource.OriginUpload, + }, + Username: "Bill User", + // note the different time + Timestamp: time.Date(2011, 11, 11, 11, 11, 11, 0, time.UTC), + }, + // note we're missing the beta resource for this unit + }, + }, + { + Tag: names.NewUnitTag("svc/5"), + Resources: []resource.Resource{ + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "alpha", + Description: "a big comment", + }, + Origin: charmresource.OriginStore, + Revision: 10, // note the reivision is different for this unit + }, + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "charlie", + Description: "awesome data", + }, + Origin: charmresource.OriginUpload, + }, + Username: "Bill User", + // note the different time + Timestamp: time.Date(2011, 11, 11, 11, 11, 11, 0, time.UTC), + }, + { + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "beta", + Description: "more data", + }, + Origin: charmresource.OriginUpload, + }, + Username: "Bill User", + Timestamp: time.Date(2012, 12, 12, 12, 12, 12, 0, time.UTC), + }, + }, + }, + }, + }} + s.stubDeps.client.ReturnResources = data + + cmd := &ShowServiceCommand{ + deps: ShowServiceDeps{ + NewClient: s.stubDeps.NewClient, + }, + } + + code, stdout, stderr := runCmd(c, cmd, "svc/10", "--details") + c.Assert(code, gc.Equals, 0) + c.Assert(stderr, gc.Equals, "") + + c.Check(stdout, gc.Equals, ` +[Unit] +RESOURCE REVISION EXPECTED +alpha 10 15 +beta - 2012-12-12T12:12 +charlie 2011-11-11T11:11 2012-12-12T12:12 + +`[1:]) + + s.stubDeps.stub.CheckCall(c, 1, "ListResources", []string{"svc"}) +} + +type stubShowServiceDeps struct { + stub *testing.Stub + client *stubServiceClient +} + +func (s *stubShowServiceDeps) NewClient(c *ShowServiceCommand) (ShowServiceClient, error) { + s.stub.AddCall("NewClient", c) + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.client, nil +} + +type stubServiceClient struct { + stub *testing.Stub + ReturnResources []resource.ServiceResources +} + +func (s *stubServiceClient) ListResources(services []string) ([]resource.ServiceResources, error) { + s.stub.AddCall("ListResources", services) + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + return s.ReturnResources, nil +} + +func (s *stubServiceClient) Close() error { + s.stub.AddCall("Close") + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} === added file 'src/github.com/juju/juju/resource/cmd/stub_test.go' --- src/github.com/juju/juju/resource/cmd/stub_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/stub_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,79 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "io" + + "github.com/juju/errors" + "github.com/juju/testing" + "gopkg.in/juju/charm.v6-unstable" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" +) + +type stubCharmStore struct { + stub *testing.Stub + + ReturnListResources [][]charmresource.Resource +} + +func (s *stubCharmStore) Connect() (CharmResourceLister, error) { + s.stub.AddCall("Connect") + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s, nil +} + +func (s *stubCharmStore) ListResources(charmURLs []*charm.URL) ([][]charmresource.Resource, error) { + s.stub.AddCall("ListResources", charmURLs) + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.ReturnListResources, nil +} + +func (s *stubCharmStore) Close() error { + s.stub.AddCall("Close") + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +type stubAPIClient struct { + stub *testing.Stub +} + +func (s *stubAPIClient) Upload(service, name string, resource io.ReadSeeker) error { + s.stub.AddCall("Upload", service, name, resource) + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *stubAPIClient) Close() error { + s.stub.AddCall("Close") + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +type stubFile struct { + // No one actually tries to read from this during tests. + io.ReadSeeker + stub *testing.Stub +} + +func (s *stubFile) Close() error { + s.stub.AddCall("FileClose") + return errors.Trace(s.stub.NextErr()) +} === added file 'src/github.com/juju/juju/resource/cmd/upload.go' --- src/github.com/juju/juju/resource/cmd/upload.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/upload.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,133 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "io" + + "github.com/juju/cmd" + "github.com/juju/errors" + + "github.com/juju/juju/cmd/modelcmd" +) + +// UploadClient has the API client methods needed by UploadCommand. +type UploadClient interface { + // Upload sends the resource to Juju. + Upload(service, name string, resource io.ReadSeeker) error + + // Close closes the client. + Close() error +} + +// ReadSeekCloser combines 2 interfaces. +type ReadSeekCloser interface { + io.ReadCloser + io.Seeker +} + +// UploadDeps is a type that contains external functions that Upload depends on +// to function. +type UploadDeps struct { + // NewClient returns the value that wraps the API for uploading to the server. + NewClient func(*UploadCommand) (UploadClient, error) + + // OpenResource handles creating a reader from the resource path. + OpenResource func(path string) (ReadSeekCloser, error) +} + +// UploadCommand implements the upload command. +type UploadCommand struct { + deps UploadDeps + modelcmd.ModelCommandBase + service string + resourceFile resourceFile +} + +// NewUploadCommand returns a new command that lists resources defined +// by a charm. +func NewUploadCommand(deps UploadDeps) *UploadCommand { + return &UploadCommand{deps: deps} +} + +// Info implements cmd.Command.Info +func (c *UploadCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "push-resource", + Args: "service name=file", + Purpose: "upload a file as a resource for a service", + Doc: ` +This command uploads a file from your local disk to the juju controller to be +used as a resource for a service. +`, + } +} + +// Init implements cmd.Command.Init. It will return an error satisfying +// errors.BadRequest if you give it an incorrect number of arguments. +func (c *UploadCommand) Init(args []string) error { + switch len(args) { + case 0: + return errors.BadRequestf("missing service name") + case 1: + return errors.BadRequestf("no resource specified") + } + + service := args[0] + if service == "" { // TODO(ericsnow) names.IsValidService + return errors.NewNotValid(nil, "missing service name") + } + c.service = service + + if err := c.addResourceFile(args[1]); err != nil { + return errors.Trace(err) + } + if err := cmd.CheckEmpty(args[2:]); err != nil { + return errors.NewBadRequest(err, "") + } + + return nil +} + +// addResourceFile parses the given arg into a name and a resource file, +// and saves it in c.resourceFiles. +func (c *UploadCommand) addResourceFile(arg string) error { + name, filename, err := parseResourceFileArg(arg) + if err != nil { + return errors.Annotatef(err, "bad resource arg %q", arg) + } + c.resourceFile = resourceFile{ + service: c.service, + name: name, + filename: filename, + } + + return nil +} + +// Run implements cmd.Command.Run. +func (c *UploadCommand) Run(*cmd.Context) error { + apiclient, err := c.deps.NewClient(c) + if err != nil { + return errors.Annotatef(err, "can't connect to %s", c.ConnectionName()) + } + defer apiclient.Close() + + if err := c.upload(c.resourceFile, apiclient); err != nil { + return errors.Annotatef(err, "failed to upload resource %q", c.resourceFile.name) + } + return nil +} + +// upload opens the given file and calls the apiclient to upload it to the given +// service with the given name. +func (c *UploadCommand) upload(rf resourceFile, client UploadClient) error { + f, err := c.deps.OpenResource(rf.filename) + if err != nil { + return errors.Trace(err) + } + defer f.Close() + err = client.Upload(rf.service, rf.name, f) + return errors.Trace(err) +} === added file 'src/github.com/juju/juju/resource/cmd/upload_test.go' --- src/github.com/juju/juju/resource/cmd/upload_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/upload_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,154 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + jujucmd "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" +) + +var _ = gc.Suite(&UploadSuite{}) + +type UploadSuite struct { + testing.IsolationSuite + + stub *testing.Stub + stubDeps *stubUploadDeps +} + +func (s *UploadSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = &testing.Stub{} + s.stubDeps = &stubUploadDeps{ + stub: s.stub, + client: &stubAPIClient{stub: s.stub}, + } +} + +func (*UploadSuite) TestInitEmpty(c *gc.C) { + var u UploadCommand + + err := u.Init([]string{}) + c.Assert(err, jc.Satisfies, errors.IsBadRequest) +} + +func (*UploadSuite) TestInitOneArg(c *gc.C) { + var u UploadCommand + err := u.Init([]string{"foo"}) + c.Assert(err, jc.Satisfies, errors.IsBadRequest) +} + +func (*UploadSuite) TestInitJustName(c *gc.C) { + var u UploadCommand + + err := u.Init([]string{"foo", "bar"}) + c.Assert(err, jc.Satisfies, errors.IsNotValid) +} + +func (*UploadSuite) TestInitNoName(c *gc.C) { + var u UploadCommand + + err := u.Init([]string{"foo", "=foobar"}) + c.Assert(errors.Cause(err), jc.Satisfies, errors.IsNotValid) +} + +func (*UploadSuite) TestInitNoPath(c *gc.C) { + var u UploadCommand + + err := u.Init([]string{"foo", "foobar="}) + c.Assert(errors.Cause(err), jc.Satisfies, errors.IsNotValid) +} + +func (*UploadSuite) TestInitGood(c *gc.C) { + var u UploadCommand + + err := u.Init([]string{"foo", "bar=baz"}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(u.resourceFile, gc.DeepEquals, resourceFile{ + service: "foo", + name: "bar", + filename: "baz", + }) + c.Assert(u.service, gc.Equals, "foo") +} + +func (*UploadSuite) TestInitTwoResources(c *gc.C) { + var u UploadCommand + + err := u.Init([]string{"foo", "bar=baz", "fizz=buzz"}) + c.Assert(err, jc.Satisfies, errors.IsBadRequest) +} + +func (s *UploadSuite) TestInfo(c *gc.C) { + var command UploadCommand + info := command.Info() + + c.Check(info, jc.DeepEquals, &jujucmd.Info{ + Name: "push-resource", + Args: "service name=file", + Purpose: "upload a file as a resource for a service", + Doc: ` +This command uploads a file from your local disk to the juju controller to be +used as a resource for a service. +`, + }) +} + +func (s *UploadSuite) TestRun(c *gc.C) { + file := &stubFile{stub: s.stub} + s.stubDeps.file = file + u := UploadCommand{ + deps: UploadDeps{ + NewClient: s.stubDeps.NewClient, + OpenResource: s.stubDeps.OpenResource, + }, + resourceFile: resourceFile{ + service: "svc", + name: "foo", + filename: "bar", + }, + service: "svc", + } + + err := u.Run(nil) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, + "NewClient", + "OpenResource", + "Upload", + "FileClose", + "Close", + ) + s.stub.CheckCall(c, 1, "OpenResource", "bar") + s.stub.CheckCall(c, 2, "Upload", "svc", "foo", file) +} + +type stubUploadDeps struct { + stub *testing.Stub + file ReadSeekCloser + client UploadClient +} + +func (s *stubUploadDeps) NewClient(c *UploadCommand) (UploadClient, error) { + s.stub.AddCall("NewClient", c) + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.client, nil +} + +func (s *stubUploadDeps) OpenResource(path string) (ReadSeekCloser, error) { + s.stub.AddCall("OpenResource", path) + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.file, nil +} === added file 'src/github.com/juju/juju/resource/cmd/util_test.go' --- src/github.com/juju/juju/resource/cmd/util_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/cmd/util_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,65 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "bytes" + "strings" + + jujucmd "github.com/juju/cmd" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + coretesting "github.com/juju/juju/testing" +) + +func charmRes(c *gc.C, name, suffix, description, content string) charmresource.Resource { + if content == "" { + content = name + } + + fp, err := charmresource.GenerateFingerprint(strings.NewReader(content)) + c.Assert(err, jc.ErrorIsNil) + + res := charmresource.Resource{ + Meta: charmresource.Meta{ + Name: name, + Type: charmresource.TypeFile, + Path: name + suffix, + Description: description, + }, + Origin: charmresource.OriginStore, + Revision: 1, + Fingerprint: fp, + Size: int64(len(content)), + } + err = res.Validate() + c.Assert(err, jc.ErrorIsNil) + return res +} + +func newCharmResources(c *gc.C, names ...string) []charmresource.Resource { + var resources []charmresource.Resource + for _, name := range names { + var description string + parts := strings.SplitN(name, ":", 2) + if len(parts) == 2 { + name = parts[0] + description = parts[1] + } + + res := charmRes(c, name, ".tgz", description, "") + resources = append(resources, res) + } + return resources +} + +func runCmd(c *gc.C, command jujucmd.Command, args ...string) (code int, stdout string, stderr string) { + ctx := coretesting.Context(c) + code = jujucmd.Main(command, ctx, args) + stdout = string(ctx.Stdout.(*bytes.Buffer).Bytes()) + stderr = string(ctx.Stderr.(*bytes.Buffer).Bytes()) + return code, stdout, stderr +} === added file 'src/github.com/juju/juju/resource/component.go' --- src/github.com/juju/juju/resource/component.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/component.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,10 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// The resource package provides the functionality of the "resources" +// feature in Juju. The various pieces are connected to the Juju +// machinery in component/all/resource.go. +package resource + +// ComponentName is the name of the Juju component for resource management. +const ComponentName = "resources" === added file 'src/github.com/juju/juju/resource/content.go' --- src/github.com/juju/juju/resource/content.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/content.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,49 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resource + +// TODO(ericsnow) Move this file to the charm repo? + +import ( + "io" + "os" + + "github.com/juju/errors" + "github.com/juju/utils" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" +) + +// Content holds a reader for the content of a resource along +// with details about that content. +type Content struct { + // Data holds the resouce content, ready to be read (once). + Data io.Reader + + // Size is the byte count of the data. + Size int64 + + // Fingerprint holds the checksum of the data. + Fingerprint charmresource.Fingerprint +} + +// GenerateContent returns a new Content for the given data stream. +func GenerateContent(reader io.ReadSeeker) (Content, error) { + var sizer utils.SizeTracker + sizingReader := io.TeeReader(reader, &sizer) + fp, err := charmresource.GenerateFingerprint(sizingReader) + if err != nil { + return Content{}, errors.Trace(err) + } + if _, err := reader.Seek(0, os.SEEK_SET); err != nil { + return Content{}, errors.Trace(err) + } + size := sizer.Size() + + content := Content{ + Data: reader, + Size: size, + Fingerprint: fp, + } + return content, nil +} === added directory 'src/github.com/juju/juju/resource/context' === added directory 'src/github.com/juju/juju/resource/context/cmd' === added file 'src/github.com/juju/juju/resource/context/cmd/cmd.go' --- src/github.com/juju/juju/resource/context/cmd/cmd.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/cmd/cmd.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,18 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "github.com/juju/loggo" +) + +var logger = loggo.GetLogger("juju.resource.context.cmd") + +// HookContext exposes the functionality needed by the "resource-*" +// hook commands. +type HookContext interface { + // Download downloads the named resource and returns + // the path to which it was downloaded. + Download(name string) (filePath string, _ error) +} === added file 'src/github.com/juju/juju/resource/context/cmd/get.go' --- src/github.com/juju/juju/resource/context/cmd/get.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/cmd/get.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,100 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "fmt" + + "github.com/juju/cmd" + "github.com/juju/errors" +) + +// GetCmdName is the name of the resource-get command. +const GetCmdName = "resource-get" + +// NewGetCmd creates a new GetCmd for the given hook context. +func NewGetCmd(c HookContext) (*GetCmd, error) { + return &GetCmd{ + hookContext: c, + }, nil +} + +// GetCmd provides the functionality of the resource-get command. +type GetCmd struct { + cmd.CommandBase + + hookContext HookContext + resourceName string +} + +// TODO(ericsnow) Also provide an indicator of whether or not +// the resource has changed (in addition to the file path)? + +// Info implements cmd.Command. +func (c GetCmd) Info() *cmd.Info { + return &cmd.Info{ + Name: GetCmdName, + Args: "", + Purpose: "get the path to the locally cached resource file", + Doc: ` +"resource-get" is used while a hook is running to get the local path +to the file for the identified resource. This file is an fs-local copy, +unique to the unit for which the hook is running. It is downloaded from +the controller, if necessary. + +If "resource-get" for a resource has not been run before (for the unit) +then the resource is downloaded from the controller at the revision +associated with the unit's service. That file is stored in the unit's +local cache. If "resource-get" *has* been run before then each +subsequent run syncs the resource with the controller. This ensures +that the revision of the unit-local copy of the resource matches the +revision of the resource associated with the unit's service. + +Either way, the path provided by "resource-get" references the +up-to-date file for the resource. Note that the resource may get +updated on the controller for the service at any time, meaning the +cached copy *may* be out of date at any time after you call +"resource-get". Consequently, the command should be run at every +point where it is critical that the resource be up to date. + +The "upgrade-charm" hook is useful for keeping your charm's resources +on a unit up to date. Run "resource-get" there for each of your +charm's resources to do so. The hook fires whenever the the file for +one of the service's resources changes on the controller (in addition +to when the charm itself changes). That means it happens in response +to "juju upgrade-charm" as well as to "juju push-resource". + +Note that the "upgrade-charm" hook does not run when the unit is +started up. So be sure to run "resource-get" for your resources in the +"install" hook (or "config-changed", etc.). + +Note that "resource-get" only provides an FS path to the resource file. +It does not provide any information about the resource (e.g. revision). +`, + } +} + +// Init implements cmd.Command. +func (c *GetCmd) Init(args []string) error { + if len(args) < 1 { + return errors.Errorf("missing required resource name") + } else if err := cmd.CheckEmpty(args[1:]); err != nil { + return errors.Trace(err) + } + c.resourceName = args[0] + return nil +} + +// Run implements cmd.Command. +func (c GetCmd) Run(ctx *cmd.Context) error { + filePath, err := c.hookContext.Download(c.resourceName) + if err != nil { + return errors.Annotate(err, "could not download resource") + } + + if _, err := fmt.Fprintf(ctx.Stdout, filePath); err != nil { + return errors.Annotate(err, "could not write resource path to stdout") + } + return nil +} === added file 'src/github.com/juju/juju/resource/context/cmd/get_test.go' --- src/github.com/juju/juju/resource/context/cmd/get_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/cmd/get_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,97 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + coretesting "github.com/juju/juju/testing" +) + +var _ = gc.Suite(&GetCmdSuite{}) + +type GetCmdSuite struct { + testing.IsolationSuite + + stub *testing.Stub + hctx *stubHookContext +} + +func (s *GetCmdSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = &testing.Stub{} + s.hctx = &stubHookContext{stub: s.stub} +} + +func (s *GetCmdSuite) TestInitNilArgs(c *gc.C) { + getCmd := GetCmd{} + + err := getCmd.Init(nil) + + c.Check(err, gc.NotNil) +} + +func (s *GetCmdSuite) TestInitTooFewArgs(c *gc.C) { + getCmd := GetCmd{} + + err := getCmd.Init([]string{}) + + c.Check(err, gc.NotNil) +} + +func (s *GetCmdSuite) TestInitTooManyArgs(c *gc.C) { + getCmd := GetCmd{} + + err := getCmd.Init([]string{"spam", "eggs"}) + + c.Check(err, gc.NotNil) +} + +func (s *GetCmdSuite) TestInit(c *gc.C) { + getCmd := GetCmd{} + + err := getCmd.Init([]string{"spam"}) + c.Assert(err, jc.ErrorIsNil) + + c.Check(getCmd.resourceName, gc.Equals, "spam") +} + +func (s *GetCmdSuite) TestRunOkay(c *gc.C) { + getCmd := GetCmd{ + hookContext: s.hctx, + resourceName: "spam", + } + const expected = "/var/lib/juju/agents/unit-foo-1/resources/spam/a-file.tgz" + s.hctx.ReturnDownload = expected + ctx := coretesting.Context(c) + + err := getCmd.Run(ctx) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "Download") + s.stub.CheckCall(c, 0, "Download", "spam") + c.Check(coretesting.Stdout(ctx), gc.Equals, expected) + c.Check(coretesting.Stderr(ctx), gc.Equals, "") +} + +func (s *GetCmdSuite) TestRunDownloadFailure(c *gc.C) { + getCmd := GetCmd{ + hookContext: s.hctx, + resourceName: "spam", + } + failure := errors.New("") + s.stub.SetErrors(failure) + ctx := coretesting.Context(c) + + err := getCmd.Run(ctx) + + s.stub.CheckCallNames(c, "Download") + c.Check(errors.Cause(err), gc.Equals, failure) + c.Check(coretesting.Stdout(ctx), gc.Equals, "") + c.Check(coretesting.Stderr(ctx), gc.Equals, "") +} === added file 'src/github.com/juju/juju/resource/context/cmd/package_test.go' --- src/github.com/juju/juju/resource/context/cmd/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/cmd/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/resource/context/cmd/stub_test.go' --- src/github.com/juju/juju/resource/context/cmd/stub_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/cmd/stub_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,24 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "github.com/juju/errors" + "github.com/juju/testing" +) + +type stubHookContext struct { + stub *testing.Stub + + ReturnDownload string +} + +func (s *stubHookContext) Download(name string) (string, error) { + s.stub.AddCall("Download", name) + if err := s.stub.NextErr(); err != nil { + return "", errors.Trace(err) + } + + return s.ReturnDownload, nil +} === added file 'src/github.com/juju/juju/resource/context/context.go' --- src/github.com/juju/juju/resource/context/context.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/context.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,158 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/utils" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/context/internal" +) + +var logger = loggo.GetLogger("juju.resource.context") + +// HookContextFacade is the name of the API facade for resources in the uniter. +const HookContextFacade = resource.ComponentName + "-hook-context" + +// APIClient exposes the uniter API functionality needed for resources. +type APIClient interface { + // GetResource returns the resource info and content for the given + // name (and unit-implied service). + GetResource(resourceName string) (resource.Resource, io.ReadCloser, error) +} + +// Content is the resources portion of a uniter hook context. +type Context struct { + apiClient APIClient + + // dataDir is the path to the directory where all resources are + // stored for a unit. It will look something like this: + // + // /var/lib/juju/agents/unit-spam-1/resources + dataDir string +} + +// NewContextAPI returns a new Content for the given API client and data dir. +func NewContextAPI(apiClient APIClient, dataDir string) *Context { + return &Context{ + apiClient: apiClient, + dataDir: dataDir, + } +} + +// Flush implements jujuc.Context. +func (c *Context) Flush() error { + return nil +} + +// Download downloads the named resource and returns the path +// to which it was downloaded. If the resource does not exist or has +// not been uploaded yet then errors.NotFound is returned. +// +// Note that the downloaded file is checked for correctness. +func (c *Context) Download(name string) (string, error) { + deps := &contextDeps{ + APIClient: c.apiClient, + name: name, + dataDir: c.dataDir, + } + path, err := internal.ContextDownload(deps) + if err != nil { + return "", errors.Trace(err) + } + return path, nil +} + +// contextDeps implements all the external dependencies +// of ContextDownload(). +type contextDeps struct { + APIClient + name string + dataDir string +} + +func (deps *contextDeps) NewContextDirectorySpec() internal.ContextDirectorySpec { + return internal.NewContextDirectorySpec(deps.dataDir, deps.name, deps) +} + +func (deps *contextDeps) OpenResource() (internal.ContextOpenedResource, error) { + return internal.OpenResource(deps.name, deps) +} + +func (deps *contextDeps) Download(target internal.DownloadTarget, remote internal.ContextOpenedResource) error { + return internal.DownloadIndirect(target, remote, deps) +} + +func (deps *contextDeps) DownloadDirect(target internal.DownloadTarget, remote internal.ContentSource) error { + return internal.Download(target, remote) +} + +func (deps *contextDeps) ReplaceDirectory(tgt, src string) error { + return internal.ReplaceDirectory(tgt, src, deps) +} + +func (deps *contextDeps) NewTempDirSpec() (internal.DownloadTempTarget, error) { + spec, err := internal.NewTempDirectorySpec(deps.name, deps) + if err != nil { + return nil, errors.Trace(err) + } + dir := &internal.ContextDownloadDirectory{ + spec, + } + return dir, nil +} + +func (deps *contextDeps) WriteContent(target io.Writer, content internal.Content) error { + return internal.WriteContent(target, content, deps) +} + +func (deps contextDeps) CloseAndLog(closer io.Closer, label string) { + internal.CloseAndLog(closer, label, logger) +} + +func (deps contextDeps) MkdirAll(dirname string) error { + return os.MkdirAll(dirname, 0755) +} + +func (deps contextDeps) CreateWriter(filename string) (io.WriteCloser, error) { + // TODO(ericsnow) chmod 0644? + return os.Create(filename) +} + +func (deps contextDeps) NewTempDir() (string, error) { + return ioutil.TempDir("", "juju-resource-") +} + +func (deps contextDeps) RemoveDir(dirname string) error { + return os.RemoveAll(dirname) +} + +func (deps contextDeps) Move(target, source string) error { + // Note that we follow the io.Copy() argument arder here + // (os.Rename does not). + return os.Rename(source, target) +} + +func (deps contextDeps) Copy(target io.Writer, source io.Reader) error { + _, err := io.Copy(target, source) + return err +} + +func (deps contextDeps) Join(path ...string) string { + return filepath.Join(path...) +} + +func (deps contextDeps) NewChecker(content internal.Content) internal.ContentChecker { + var sizer utils.SizeTracker + checksumWriter := charmresource.NewFingerprintHash() + return internal.NewContentChecker(content, &sizer, checksumWriter) +} === added directory 'src/github.com/juju/juju/resource/context/internal' === added file 'src/github.com/juju/juju/resource/context/internal/base_test.go' --- src/github.com/juju/juju/resource/context/internal/base_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/internal/base_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,26 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package internal_test + +import ( + "io" + "time" + + "github.com/juju/testing" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/resourcetesting" +) + +func newResource(c *gc.C, stub *testing.Stub, name, content string) (resource.Resource, io.ReadCloser) { + opened := resourcetesting.NewResource(c, stub, name, "a-service", content) + res := opened.Resource + if content != "" { + return res, opened.ReadCloser + } + res.Username = "" + res.Timestamp = time.Time{} + return res, nil +} === added file 'src/github.com/juju/juju/resource/context/internal/content.go' --- src/github.com/juju/juju/resource/context/internal/content.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/internal/content.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,158 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package internal + +// TODO(ericsnow) Move this file elsewhere? +// (e.g. top-level resource pkg, charm/resource) + +import ( + "bytes" + "io" + + "github.com/juju/errors" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource" +) + +// Content holds a reader for the content of a resource along +// with details about that content. +type Content struct { + // Data holds the resouce content, ready to be read (once). + Data io.Reader + + // Size is the byte count of the data. + Size int64 + + // Fingerprint holds the checksum of the data. + Fingerprint charmresource.Fingerprint +} + +// verify ensures that the actual resource content details match +// the expected ones. +func (c Content) Verify(size int64, fp charmresource.Fingerprint) error { + if size != c.Size { + return errors.Errorf("resource size does not match expected (%d != %d)", size, c.Size) + } + if !bytes.Equal(fp.Bytes(), c.Fingerprint.Bytes()) { + return errors.Errorf("resource fingerprint does not match expected (%q != %q)", fp, c.Fingerprint) + } + return nil +} + +// ContentSource represents the functionality of OpenedResource, +// relative to Content. +type ContentSource interface { + // Content returns the content for the opened resource. + Content() Content + + // Info returns the info for the opened resource. + Info() resource.Resource +} + +// TODO(ericsnow) Need a lockfile around create/write? + +// WriteContent writes the resource file to the target provided +// by the deps. +func WriteContent(target io.Writer, content Content, deps WriteContentDeps) error { + checker := deps.NewChecker(content) + source := checker.WrapReader(content.Data) + + if err := deps.Copy(target, source); err != nil { + return errors.Annotate(err, "could not write resource to file") + } + + if err := checker.Verify(); err != nil { + return errors.Trace(err) + } + + return nil +} + +// WriteContentDeps exposes the external functionality needed by WriteContent. +type WriteContentDeps interface { + //NewChecker provides a content checker for the given content. + NewChecker(Content) ContentChecker + + // Copy copies the data from the reader into the writer. + Copy(io.Writer, io.Reader) error +} + +// ContentChecker exposes functionality for verifying the data read from a reader. +type ContentChecker interface { + // WrapReader wraps the provided reader in another reader + // that tracks the read data. + WrapReader(io.Reader) io.Reader + + // Verify fails if the tracked data does not match + // the expected data. + Verify() error +} + +// Checker provides the functionality for verifying that read data +// is correct. +type Checker struct { + // Content holds the expected content values. + Content Content + + // SizeTracker tracks the number of bytes read. + SizeTracker SizeTracker + + // ChecksumWriter tracks the checksum of the read bytes. + ChecksumWriter ChecksumWriter +} + +// NewContentChecker returns a Checker for the provided data. +func NewContentChecker(content Content, sizeTracker SizeTracker, checksumWriter ChecksumWriter) *Checker { + return &Checker{ + Content: content, + SizeTracker: sizeTracker, + ChecksumWriter: checksumWriter, + } +} + +// WrapReader implements ContentChecker. +func (c Checker) WrapReader(reader io.Reader) io.Reader { + hashingReader := io.TeeReader(reader, c.ChecksumWriter) + return io.TeeReader(hashingReader, c.SizeTracker) +} + +// Verify implements ContentChecker. +func (c Checker) Verify() error { + size := c.SizeTracker.Size() + fp := c.ChecksumWriter.Fingerprint() + if err := c.Content.Verify(size, fp); err != nil { + return errors.Trace(err) + } + return nil +} + +// NopChecker is a ContentChecker that accepts all data. +type NopChecker struct{} + +// WrapReader implements ContentChecker. +func (NopChecker) WrapReader(reader io.Reader) io.Reader { + return reader +} + +// Verify implements ContentChecker. +func (NopChecker) Verify() error { + return nil +} + +// SizeTracker tracks the number of bytes written. +type SizeTracker interface { + io.Writer + + // Size returns the number of bytes written. + Size() int64 +} + +// ChecksumWriter tracks the checksum of all written bytes. +type ChecksumWriter interface { + io.Writer + + // Fingerprint is the fingerprint for the tracked checksum. + Fingerprint() charmresource.Fingerprint +} === added file 'src/github.com/juju/juju/resource/context/internal/content_test.go' --- src/github.com/juju/juju/resource/context/internal/content_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/internal/content_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,291 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package internal_test + +import ( + "io" + "io/ioutil" + "strings" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/filetesting" + gc "gopkg.in/check.v1" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource/context/internal" +) + +var _ = gc.Suite(&ContentSuite{}) + +type ContentSuite struct { + testing.IsolationSuite + + stub *internalStub +} + +func (s *ContentSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = newInternalStub() +} + +func (s *ContentSuite) TestVerifyOkay(c *gc.C) { + info, reader := newResource(c, s.stub.Stub, "spam", "some data") + content := internal.Content{ + Data: reader, + Size: info.Size, + Fingerprint: info.Fingerprint, + } + + err := content.Verify(info.Size, info.Fingerprint) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckNoCalls(c) +} + +func (s *ContentSuite) TestVerifyBadSize(c *gc.C) { + info, reader := newResource(c, s.stub.Stub, "spam", "some data") + content := internal.Content{ + Data: reader, + Size: info.Size, + Fingerprint: info.Fingerprint, + } + + err := content.Verify(info.Size+1, info.Fingerprint) + + c.Check(err, gc.ErrorMatches, `resource size does not match expected \(10 != 9\)`) + s.stub.CheckNoCalls(c) +} + +func (s *ContentSuite) TestVerifyBadFingerprint(c *gc.C) { + fp, err := charmresource.GenerateFingerprint(strings.NewReader("other data")) + c.Assert(err, jc.ErrorIsNil) + info, reader := newResource(c, s.stub.Stub, "spam", "some data") + content := internal.Content{ + Data: reader, + Size: info.Size, + Fingerprint: info.Fingerprint, + } + + err = content.Verify(info.Size, fp) + + c.Check(err, gc.ErrorMatches, `resource fingerprint does not match expected .*`) + s.stub.CheckNoCalls(c) +} + +func (s *ContentSuite) TestWriteContent(c *gc.C) { + info, reader := newResource(c, s.stub.Stub, "spam", "some data") + content := internal.Content{ + Data: reader, + Size: info.Size, + Fingerprint: info.Fingerprint, + } + target, _ := filetesting.NewStubWriter(s.stub.Stub) + stub := &stubContent{ + internalStub: s.stub, + Reader: reader, + } + stub.ReturnNewChecker = stub + stub.ReturnWrapReader = stub + deps := stub + + err := internal.WriteContent(target, content, deps) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, + "NewChecker", + "WrapReader", + "Copy", + "Verify", + ) +} + +var _ = gc.Suite(&CheckerSuite{}) + +type CheckerSuite struct { + testing.IsolationSuite + + stub *internalStub +} + +func (s *CheckerSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = newInternalStub() +} + +func (s *CheckerSuite) TestNewContentChecker(c *gc.C) { + info, reader := newResource(c, s.stub.Stub, "spam", "some data") + content := internal.Content{ + Data: reader, + Size: info.Size, + Fingerprint: info.Fingerprint, + } + sizeWriter, sizeBuf := filetesting.NewStubWriter(s.stub.Stub) + sizeTracker := &stubChecker{ + Writer: sizeWriter, + stub: s.stub.Stub, + } + sumWriter, sumBuf := filetesting.NewStubWriter(s.stub.Stub) + checksumWriter := &stubChecker{ + Writer: sumWriter, + stub: s.stub.Stub, + } + + checker := internal.NewContentChecker(content, sizeTracker, checksumWriter) + + s.stub.CheckNoCalls(c) + c.Check(checker, jc.DeepEquals, &internal.Checker{ + Content: content, + SizeTracker: sizeTracker, + ChecksumWriter: checksumWriter, + }) + c.Check(sizeBuf.String(), gc.Equals, "") + c.Check(sumBuf.String(), gc.Equals, "") +} + +func (s *CheckerSuite) TestWrapReader(c *gc.C) { + info, reader := newResource(c, s.stub.Stub, "spam", "some data") + sizeWriter, sizeBuf := filetesting.NewStubWriter(s.stub.Stub) + sumWriter, sumBuf := filetesting.NewStubWriter(s.stub.Stub) + checker := internal.Checker{ + Content: internal.Content{ + Size: info.Size, + Fingerprint: info.Fingerprint, + }, + SizeTracker: &stubChecker{ + Writer: sizeWriter, + stub: s.stub.Stub, + }, + ChecksumWriter: &stubChecker{ + Writer: sumWriter, + stub: s.stub.Stub, + }, + } + + wrapped := checker.WrapReader(reader) + + s.stub.CheckNoCalls(c) + data, err := ioutil.ReadAll(wrapped) + c.Assert(err, jc.ErrorIsNil) + s.stub.CheckCallNames(c, + "Read", + "Write", + "Write", + "Read", + ) + c.Check(string(data), gc.Equals, "some data") + c.Check(sizeBuf.String(), gc.Equals, "some data") + c.Check(sumBuf.String(), gc.Equals, "some data") +} + +func (s *CheckerSuite) TestVerifyOkay(c *gc.C) { + info, _ := newResource(c, s.stub.Stub, "spam", "some data") + stub := &stubChecker{ + stub: s.stub.Stub, + ReturnSize: info.Size, + ReturnFingerprint: info.Fingerprint, + } + checker := internal.Checker{ + Content: internal.Content{ + Size: info.Size, + Fingerprint: info.Fingerprint, + }, + SizeTracker: stub, + ChecksumWriter: stub, + } + + err := checker.Verify() + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "Size", "Fingerprint") +} + +func (s *CheckerSuite) TestVerifyFailed(c *gc.C) { + info, _ := newResource(c, s.stub.Stub, "spam", "some data") + stub := &stubChecker{ + stub: s.stub.Stub, + ReturnSize: info.Size + 1, + ReturnFingerprint: info.Fingerprint, + } + checker := internal.Checker{ + Content: internal.Content{ + Size: info.Size, + Fingerprint: info.Fingerprint, + }, + SizeTracker: stub, + ChecksumWriter: stub, + } + + err := checker.Verify() + + s.stub.CheckCallNames(c, "Size", "Fingerprint") + c.Check(err, gc.ErrorMatches, `resource size does not match expected \(10 != 9\)`) +} + +func (s *CheckerSuite) TestNopWrapReader(c *gc.C) { + _, reader := newResource(c, s.stub.Stub, "spam", "some data") + checker := internal.NopChecker{} + + wrapped := checker.WrapReader(reader) + + s.stub.CheckNoCalls(c) + c.Check(wrapped, gc.Equals, reader) +} + +func (s *CheckerSuite) TestNopVerify(c *gc.C) { + checker := internal.NopChecker{} + + err := checker.Verify() + + c.Check(err, jc.ErrorIsNil) +} + +type stubContent struct { + *internalStub + io.Reader + + ReturnWrapReader io.Reader + ReturnCreateTarget io.WriteCloser +} + +func (s *stubContent) WrapReader(reader io.Reader) io.Reader { + s.Stub.AddCall("WrapReader", reader) + s.Stub.NextErr() // Pop one off. + + return s.ReturnWrapReader +} + +func (s *stubContent) Verify() error { + s.Stub.AddCall("Verify") + if err := s.Stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +type stubChecker struct { + io.Writer + stub *testing.Stub + + ReturnSize int64 + ReturnFingerprint charmresource.Fingerprint +} + +func (s *stubChecker) Size() int64 { + s.stub.AddCall("Size") + s.stub.NextErr() // Pop one off. + + return s.ReturnSize +} + +func (s *stubChecker) Fingerprint() charmresource.Fingerprint { + s.stub.AddCall("Fingerprint") + s.stub.NextErr() // Pop one off. + + return s.ReturnFingerprint +} === added file 'src/github.com/juju/juju/resource/context/internal/context.go' --- src/github.com/juju/juju/resource/context/internal/context.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/internal/context.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,132 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package internal + +import ( + "io" + + "github.com/juju/errors" +) + +// ContextDownload downloads the named resource and returns the path +// to which it was downloaded. If the resource does not exist or has +// not been uploaded yet then errors.NotFound is returned. +// +// Note that the downloaded file is checked for correctness. +func ContextDownload(deps ContextDownloadDeps) (path string, err error) { + // TODO(katco): Potential race-condition: two commands running at + // once. Solve via collision using os.Mkdir() with a uniform + // temp dir name (e.g. "/..download")? + + resDirSpec := deps.NewContextDirectorySpec() + + remote, err := deps.OpenResource() + if err != nil { + return "", errors.Trace(err) + } + defer deps.CloseAndLog(remote, "remote resource") + path = resDirSpec.Resolve(remote.Info().Path) + + isUpToDate, err := resDirSpec.IsUpToDate(remote.Content()) + if err != nil { + return "", errors.Trace(err) + } + if isUpToDate { + // We're up to date already! + return path, nil + } + + if err := deps.Download(resDirSpec, remote); err != nil { + return "", errors.Trace(err) + } + + return path, nil +} + +// ContextDownloadDeps provides the externally defined functions +// on which ContextDownload depends. The functionality all relates +// to a single resource. +type ContextDownloadDeps interface { + // NewContextDirectorySpec returns the dir spec for the resource + // in the hook context. + NewContextDirectorySpec() ContextDirectorySpec + + // OpenResource reads the resource info and opens the resource + // content for reading. + OpenResource() (ContextOpenedResource, error) + + // CloseAndLog closes the closer and logs any error. + CloseAndLog(io.Closer, string) + + // Download writes the remote to the target directory. + Download(DownloadTarget, ContextOpenedResource) error +} + +// ContextDirectorySpec exposes the functionality of a resource dir spec +// in a hook context. +type ContextDirectorySpec interface { + Resolver + + // Initializeprepares the target directory and returns it. + Initialize() (DownloadDirectory, error) + + // IsUpToDate indicates whether or not the resource dir is in sync + // with the content. + IsUpToDate(Content) (bool, error) +} + +// NewContextDirectorySpec returns a new directory spec for the context. +func NewContextDirectorySpec(dataDir, name string, deps DirectorySpecDeps) ContextDirectorySpec { + return &contextDirectorySpec{ + DirectorySpec: NewDirectorySpec(dataDir, name, deps), + } +} + +type contextDirectorySpec struct { + *DirectorySpec +} + +// Initializeimplements ContextDirectorySpec. +func (spec contextDirectorySpec) Initialize() (DownloadDirectory, error) { + return spec.DirectorySpec.Initialize() +} + +// ContextDownloadDirectory is an adapter for TempDirectorySpec. +type ContextDownloadDirectory struct { + *TempDirectorySpec +} + +// Initialize implements DownloadTarget. +func (dir ContextDownloadDirectory) Initialize() (DownloadDirectory, error) { + return dir.TempDirectorySpec.Initialize() +} + +// ContextOpenedResource exposes the functionality of an "opened" +// resource. +type ContextOpenedResource interface { + ContentSource + io.Closer +} + +// NewContextContentChecker returns a content checker for the hook context. +func NewContextContentChecker(content Content, deps NewContextContentCheckerDeps) ContentChecker { + if content.Fingerprint.IsZero() { + return &NopChecker{} + } + + sizer := deps.NewSizeTracker() + checksumWriter := deps.NewChecksumWriter() + //checker.checksumWriter = charmresource.NewFingerprintHash() + return NewContentChecker(content, sizer, checksumWriter) +} + +// NewContextContentCheckerDeps exposes the functionality needed +// by NewContextContentChecker(). +type NewContextContentCheckerDeps interface { + // NewSizeTracker returns a new size tracker. + NewSizeTracker() SizeTracker + + // NewChecksumWriter returns a new checksum writer. + NewChecksumWriter() ChecksumWriter +} === added file 'src/github.com/juju/juju/resource/context/internal/context_test.go' --- src/github.com/juju/juju/resource/context/internal/context_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/internal/context_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,147 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package internal_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/filetesting" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/context/internal" +) + +var _ = gc.Suite(&ContextSuite{}) + +type ContextSuite struct { + testing.IsolationSuite + + stub *internalStub +} + +func (s *ContextSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = newInternalStub() +} + +func (s *ContextSuite) TestContextDownloadOutOfDate(c *gc.C) { + info, reader := newResource(c, s.stub.Stub, "spam", "some data") + content := internal.Content{ + Data: reader, + Size: info.Size, + Fingerprint: info.Fingerprint, + } + stub := &stubContext{ + internalStub: s.stub, + StubCloser: &filetesting.StubCloser{Stub: s.stub.Stub}, + } + stub.ReturnNewContextDirectorySpec = stub + stub.ReturnOpenResource = stub + stub.ReturnResolve = "/var/lib/juju/agents/unit-spam-1/resources/spam/eggs.tgz" + stub.ReturnInfo = info + stub.ReturnContent = content + deps := stub + + path, err := internal.ContextDownload(deps) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, + "NewContextDirectorySpec", + "OpenResource", + "Info", + "Resolve", + "Content", + "IsUpToDate", + "Download", + "CloseAndLog", + ) + c.Check(path, gc.Equals, "/var/lib/juju/agents/unit-spam-1/resources/spam/eggs.tgz") +} + +func (s *ContextSuite) TestContextDownloadUpToDate(c *gc.C) { + info, reader := newResource(c, s.stub.Stub, "spam", "some data") + content := internal.Content{ + Data: reader, + Size: info.Size, + Fingerprint: info.Fingerprint, + } + stub := &stubContext{ + internalStub: s.stub, + StubCloser: &filetesting.StubCloser{Stub: s.stub.Stub}, + } + stub.ReturnNewContextDirectorySpec = stub + stub.ReturnOpenResource = stub + stub.ReturnResolve = "/var/lib/juju/agents/unit-spam-1/resources/spam/eggs.tgz" + stub.ReturnInfo = info + stub.ReturnContent = content + stub.ReturnIsUpToDate = true + deps := stub + + path, err := internal.ContextDownload(deps) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, + "NewContextDirectorySpec", + "OpenResource", + "Info", + "Resolve", + "Content", + "IsUpToDate", + "CloseAndLog", + ) + c.Check(path, gc.Equals, "/var/lib/juju/agents/unit-spam-1/resources/spam/eggs.tgz") +} + +type stubContext struct { + *internalStub + *filetesting.StubCloser + + ReturnResolve string + ReturnInfo resource.Resource + ReturnContent internal.Content + ReturnInitialize internal.DownloadDirectory + ReturnIsUpToDate bool +} + +func (s *stubContext) Resolve(path ...string) string { + s.AddCall("Resolve", path) + s.NextErr() // Pop one off. + + return s.ReturnResolve +} + +func (s *stubContext) Info() resource.Resource { + s.AddCall("Info") + s.NextErr() // Pop one off. + + return s.ReturnInfo +} + +func (s *stubContext) Content() internal.Content { + s.AddCall("Content") + s.NextErr() // Pop one off. + + return s.ReturnContent +} + +func (s *stubContext) Initialize() (internal.DownloadDirectory, error) { + s.AddCall("Initialize") + if err := s.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.ReturnInitialize, nil +} + +func (s *stubContext) IsUpToDate(content internal.Content) (bool, error) { + s.AddCall("IsUpToDate", content) + if err := s.NextErr(); err != nil { + return false, errors.Trace(err) + } + + return s.ReturnIsUpToDate, nil +} === added file 'src/github.com/juju/juju/resource/context/internal/download.go' --- src/github.com/juju/juju/resource/context/internal/download.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/internal/download.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,102 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package internal + +// TODO(ericsnow) Move this file elsewhere? +// (e.g. top-level resource pkg, charm/resource) + +import ( + "io" + + "github.com/juju/errors" +) + +// Download downloads the resource from the provied source to the target. +func Download(target DownloadTarget, remote ContentSource) error { + resDir, err := target.Initialize() + if err != nil { + return errors.Trace(err) + } + + if err := resDir.Write(remote); err != nil { + return errors.Trace(err) + } + + return nil +} + +// DownloadIndirect downloads the resource from the source into a temp +// directory. Then the target is replaced by the temp directory. +func DownloadIndirect(target DownloadTarget, remote ContentSource, deps DownloadIndirectDeps) error { + tempDirSpec, err := deps.NewTempDirSpec() + defer deps.CloseAndLog(tempDirSpec, "resource temp dir") + if err != nil { + return errors.Trace(err) + } + + if err := deps.DownloadDirect(tempDirSpec, remote); err != nil { + return errors.Trace(err) + } + + resDir, err := target.Initialize() + if err != nil { + return errors.Trace(err) + } + + oldDir := tempDirSpec.Resolve() + newDir := resDir.Resolve() + if err := deps.ReplaceDirectory(newDir, oldDir); err != nil { + return errors.Annotate(err, "could not replace existing resource directory") + } + + return nil +} + +// DownloadIndirectDeps exposes the external functionality needed +// by DownloadIndirect. +type DownloadIndirectDeps interface { + // NewTempDirSpec returns a directory spec for the resource under a temporary datadir. + NewTempDirSpec() (DownloadTempTarget, error) + + // CloseAndLog closes the closer and logs any error. + CloseAndLog(io.Closer, string) + + // DownloadDirect downloads the source into the target. + DownloadDirect(DownloadTarget, ContentSource) error + + // ReplaceDirectory moves the source directory path to the target + // path. If the target path already exists then it is replaced atomically. + ReplaceDirectory(tgt, src string) error +} + +// DownloadTarget exposes the functionality of a directory spec +// needed by Download(). +type DownloadTarget interface { + // Initialize prepares the target directory and returns it. + Initialize() (DownloadDirectory, error) +} + +// DownloadDirectory exposes the functionality of a resource directory +// needed by Download(). +type DownloadDirectory interface { + Resolver + + // Write writes all the relevant files for the provided source + // to the directory. + Write(ContentSource) error +} + +// DownloadTempTarget represents a temporary download directory. +type DownloadTempTarget interface { + DownloadTarget + Resolver + io.Closer +} + +// Resolver exposes the functionality of DirectorySpec needed +// by DownloadIndirect. +type Resolver interface { + // Resolve returns the fully resolved path for the provided path items. + Resolve(...string) string +} === added file 'src/github.com/juju/juju/resource/context/internal/download_test.go' --- src/github.com/juju/juju/resource/context/internal/download_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/internal/download_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,132 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package internal_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/resource/context/internal" +) + +var _ = gc.Suite(&DownloadSuite{}) + +type DownloadSuite struct { + testing.IsolationSuite + + stub *internalStub +} + +func (s *DownloadSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = newInternalStub() +} + +func (s *DownloadSuite) TestDownload(c *gc.C) { + stub := &stubDownload{ + internalStub: s.stub, + } + target := stub + remote := stub + + err := internal.Download(target, remote) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "Initialize", "Write") + s.stub.CheckCall(c, 1, "Write", remote) +} + +func (s *DownloadSuite) TestDownloadIndirectOkay(c *gc.C) { + stub := &stubDownload{ + internalStub: s.stub, + } + stub.ReturnNewTempDirSpec = stub + stub.ReturnResolve = []string{ + "/tmp/xyz/eggs", + "/var/lib/juju/agents/unit-spam-1/resources/eggs", + } + target := stub + remote := stub + deps := stub + + err := internal.DownloadIndirect(target, remote, deps) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, + "NewTempDirSpec", + "DownloadDirect", + "Initialize", + "Resolve", + "Resolve", + "ReplaceDirectory", + "CloseAndLog", + ) + s.stub.CheckCall(c, 1, "DownloadDirect", stub, remote) +} + +func (s *DownloadSuite) TestDownloadIndirectTempDirFailure(c *gc.C) { + stub := &stubDownload{ + internalStub: s.stub, + } + stub.ReturnNewTempDirSpec = stub + failure := errors.New("") + stub.SetErrors(failure) + target := stub + remote := stub + deps := stub + + err := internal.DownloadIndirect(target, remote, deps) + + c.Check(errors.Cause(err), gc.Equals, failure) + s.stub.CheckCallNames(c, + "NewTempDirSpec", + "CloseAndLog", + ) +} + +type stubDownload struct { + *internalStub + internal.ContentSource + + ReturnResolve []string +} + +func (s *stubDownload) Close() error { + s.Stub.AddCall("Close") + if err := s.Stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *stubDownload) Initialize() (internal.DownloadDirectory, error) { + s.Stub.AddCall("Initialize") + if err := s.Stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s, nil +} + +func (s *stubDownload) Write(source internal.ContentSource) error { + s.Stub.AddCall("Write", source) + if err := s.Stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *stubDownload) Resolve(path ...string) string { + s.Stub.AddCall("Resolve", path) + s.Stub.NextErr() // Pop one off. + + resolved := s.ReturnResolve[0] + s.ReturnResolve = s.ReturnResolve[1:] + return resolved +} === added file 'src/github.com/juju/juju/resource/context/internal/package_test.go' --- src/github.com/juju/juju/resource/context/internal/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/internal/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package internal_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/resource/context/internal/resource.go' --- src/github.com/juju/juju/resource/context/internal/resource.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/internal/resource.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,56 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package internal + +// TODO(ericsnow) Move this file elsewhere? +// (e.g. top-level resource pkg, charm/resource) + +import ( + "io" + + "github.com/juju/errors" + + "github.com/juju/juju/resource" +) + +// OpenedResourceClient exposes the API functionality needed by OpenResource. +type OpenedResourceClient interface { + // GetResource returns the resource info and content for the given + // name (and unit-implied service). + GetResource(resourceName string) (resource.Resource, io.ReadCloser, error) +} + +// OpenedResource wraps the resource info and reader returned +// from the API. +type OpenedResource struct { + resource.Resource + io.ReadCloser +} + +// OpenResource opens the identified resource using the provided client. +func OpenResource(name string, client OpenedResourceClient) (*OpenedResource, error) { + info, reader, err := client.GetResource(name) + if err != nil { + return nil, errors.Trace(err) + } + or := &OpenedResource{ + Resource: info, + ReadCloser: reader, + } + return or, nil +} + +// Content returns the "content" for the opened resource. +func (or OpenedResource) Content() Content { + return Content{ + Data: or.ReadCloser, + Size: or.Size, + Fingerprint: or.Fingerprint, + } +} + +// Info returns the info for the opened resource. +func (or OpenedResource) Info() resource.Resource { + return or.Resource +} === added file 'src/github.com/juju/juju/resource/context/internal/resource_test.go' --- src/github.com/juju/juju/resource/context/internal/resource_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/internal/resource_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,71 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package internal_test + +import ( + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/resource/context/internal" +) + +var _ = gc.Suite(&OpenedResourceSuite{}) + +type OpenedResourceSuite struct { + testing.IsolationSuite + + stub *internalStub +} + +func (s *OpenedResourceSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = newInternalStub() +} + +func (s *OpenedResourceSuite) TestOpenResource(c *gc.C) { + info, reader := newResource(c, s.stub.Stub, "spam", "some data") + s.stub.ReturnGetResourceInfo = info + s.stub.ReturnGetResourceData = reader + + opened, err := internal.OpenResource("spam", s.stub) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "GetResource") + c.Check(opened, jc.DeepEquals, &internal.OpenedResource{ + Resource: info, + ReadCloser: reader, + }) +} + +func (s *OpenedResourceSuite) TestContent(c *gc.C) { + info, reader := newResource(c, s.stub.Stub, "spam", "some data") + opened := internal.OpenedResource{ + Resource: info, + ReadCloser: reader, + } + + content := opened.Content() + + s.stub.CheckNoCalls(c) + c.Check(content, jc.DeepEquals, internal.Content{ + Data: reader, + Size: info.Size, + Fingerprint: info.Fingerprint, + }) +} + +func (s *OpenedResourceSuite) TestInfo(c *gc.C) { + expected, reader := newResource(c, s.stub.Stub, "spam", "some data") + opened := internal.OpenedResource{ + Resource: expected, + ReadCloser: reader, + } + + info := opened.Info() + + s.stub.CheckNoCalls(c) + c.Check(info, jc.DeepEquals, expected) +} === added file 'src/github.com/juju/juju/resource/context/internal/resourcedir.go' --- src/github.com/juju/juju/resource/context/internal/resourcedir.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/internal/resourcedir.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,182 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package internal + +// TODO(ericsnow) Move this file elsewhere? +// (e.g. top-level resource pkg, charm/resource) + +import ( + "io" + + "github.com/juju/errors" +) + +// DirectorySpec identifies information for a resource directory. +type DirectorySpec struct { + // Name is the resource name. + Name string + + // Dirname is the path to the resource directory. + Dirname string + + // Deps is the external dependencies of DirectorySpec. + Deps DirectorySpecDeps +} + +// NewDirectorySpec returns a new directory spec for the given info. +func NewDirectorySpec(dataDir, name string, deps DirectorySpecDeps) *DirectorySpec { + dirname := deps.Join(dataDir, name) + + spec := &DirectorySpec{ + Name: name, + Dirname: dirname, + + Deps: deps, + } + return spec +} + +// Resolve returns the fully resolved file path, relative to the directory. +func (spec DirectorySpec) Resolve(path ...string) string { + return spec.Deps.Join(append([]string{spec.Dirname}, path...)...) +} + +// TODO(ericsnow) Make IsUpToDate a stand-alone function? + +// IsUpToDate determines whether or not the content matches the resource directory. +func (spec DirectorySpec) IsUpToDate(content Content) (bool, error) { + // TODO(katco): Check to see if we have latest version + return false, nil +} + +// Initialize preps the spec'ed directory and returns it. +func (spec DirectorySpec) Initialize() (*Directory, error) { + if err := spec.Deps.MkdirAll(spec.Dirname); err != nil { + return nil, errors.Annotate(err, "could not create resource dir") + } + + return NewDirectory(&spec, spec.Deps), nil +} + +// DirectorySpecDeps exposes the external depenedencies of DirectorySpec. +type DirectorySpecDeps interface { + DirectoryDeps + + // Join exposes the functionality of filepath.Join(). + Join(...string) string + + // MkdirAll exposes the functionality of os.MkdirAll(). + MkdirAll(string) error +} + +// TempDirectorySpec represents a resource directory placed under a temporary data dir. +type TempDirectorySpec struct { + *DirectorySpec + + // CleanUp cleans up the temp directory in which the resource + // directory is placed. + CleanUp func() error +} + +// NewTempDirectorySpec creates a new temp directory spec +// for the given resource. +func NewTempDirectorySpec(name string, deps TempDirDeps) (*TempDirectorySpec, error) { + tempDir, err := deps.NewTempDir() + if err != nil { + return nil, errors.Trace(err) + } + + spec := &TempDirectorySpec{ + DirectorySpec: NewDirectorySpec(tempDir, name, deps), + CleanUp: func() error { + return deps.RemoveDir(tempDir) + }, + } + return spec, nil +} + +// TempDirDeps exposes the external functionality needed by +// NewTempDirectorySpec(). +type TempDirDeps interface { + DirectorySpecDeps + + // NewTempDir returns the path to a new temporary directory. + NewTempDir() (string, error) + + // RemoveDir deletes the specified directory. + RemoveDir(string) error +} + +// Close implements io.Closer. +func (spec TempDirectorySpec) Close() error { + if err := spec.CleanUp(); err != nil { + return errors.Annotate(err, "could not clean up temp dir") + } + return nil +} + +// Directory represents a resource directory. +type Directory struct { + *DirectorySpec + + // Deps holds the external dependencies of the directory. + Deps DirectoryDeps +} + +// NewDirectory returns a new directory for the provided spec. +func NewDirectory(spec *DirectorySpec, deps DirectoryDeps) *Directory { + dir := &Directory{ + DirectorySpec: spec, + Deps: deps, + } + return dir +} + +// Write writes all relevant files from the given source +// to the directory. +func (dir *Directory) Write(opened ContentSource) error { + // TODO(ericsnow) Also write the info file... + + relPath := opened.Info().Path + if err := dir.WriteContent(relPath, opened.Content()); err != nil { + return errors.Trace(err) + } + + return nil +} + +// WriteContent writes the resource file to the given path +// within the directory. +func (dir *Directory) WriteContent(relPath string, content Content) error { + if len(relPath) == 0 { + // TODO(ericsnow) Use rd.readInfo().Path, like openResource() does? + return errors.NotImplementedf("") + } + filename := dir.Resolve(relPath) + + target, err := dir.Deps.CreateWriter(filename) + if err != nil { + return errors.Annotate(err, "could not create new file for resource") + } + defer dir.Deps.CloseAndLog(target, filename) + + if err := dir.Deps.WriteContent(target, content); err != nil { + return errors.Trace(err) + } + + return nil +} + +// DirectoryDeps exposes the external functionality needed by Directory. +type DirectoryDeps interface { + // CreateWriter creates a new writer to which the resource file + // will be written. + CreateWriter(string) (io.WriteCloser, error) + + // CloseAndLog closes the closer and logs any error. + CloseAndLog(io.Closer, string) + + // WriteContent writes the content to the directory. + WriteContent(io.Writer, Content) error +} === added file 'src/github.com/juju/juju/resource/context/internal/resourcedir_test.go' --- src/github.com/juju/juju/resource/context/internal/resourcedir_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/internal/resourcedir_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,252 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package internal_test + +import ( + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/context/internal" + "github.com/juju/juju/resource/resourcetesting" +) + +var _ = gc.Suite(&DirectorySpecSuite{}) + +type DirectorySpecSuite struct { + testing.IsolationSuite + + stub *internalStub +} + +func (s *DirectorySpecSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = newInternalStub() +} + +func (s *DirectorySpecSuite) TestNewDirectorySpec(c *gc.C) { + dataDir := "/var/lib/juju/agents/unit-spam-1/resources" + deps := s.stub + + spec := internal.NewDirectorySpec(dataDir, "eggs", deps) + + s.stub.CheckCallNames(c, "Join") + c.Check(spec, jc.DeepEquals, &internal.DirectorySpec{ + Name: "eggs", + Dirname: dataDir + "/eggs", + Deps: deps, + }) +} + +func (s *DirectorySpecSuite) TestResolveFile(c *gc.C) { + dataDir := "/var/lib/juju/agents/unit-spam-1/resources" + deps := s.stub + spec := internal.NewDirectorySpec(dataDir, "eggs", deps) + s.stub.ResetCalls() + + resolved := spec.Resolve("ham/ham.tgz") + + s.stub.CheckCallNames(c, "Join") + c.Check(resolved, gc.Equals, dataDir+"/eggs/ham/ham.tgz") +} + +func (s *DirectorySpecSuite) TestResolveEmpty(c *gc.C) { + dataDir := "/var/lib/juju/agents/unit-spam-1/resources" + deps := s.stub + spec := internal.NewDirectorySpec(dataDir, "eggs", deps) + s.stub.ResetCalls() + + resolved := spec.Resolve() + + s.stub.CheckCallNames(c, "Join") + c.Check(resolved, gc.Equals, dataDir+"/eggs") +} + +func (s *DirectorySpecSuite) TestIsUpToDate(c *gc.C) { + info, reader := newResource(c, s.stub.Stub, "spam", "some data") + content := internal.Content{ + Data: reader, + Size: info.Size, + Fingerprint: info.Fingerprint, + } + dataDir := "/var/lib/juju/agents/unit-spam-1/resources" + deps := s.stub + spec := internal.NewDirectorySpec(dataDir, "eggs", deps) + s.stub.ResetCalls() + + isUpToDate, err := spec.IsUpToDate(content) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckNoCalls(c) + c.Check(isUpToDate, jc.IsFalse) // For now, always... +} + +func (s *DirectorySpecSuite) TestInitialize(c *gc.C) { + dataDir := "/var/lib/juju/agents/unit-spam-1/resources" + deps := s.stub + spec := internal.NewDirectorySpec(dataDir, "eggs", deps) + s.stub.ResetCalls() + + dir, err := spec.Initialize() + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "MkdirAll") + s.stub.CheckCall(c, 0, "MkdirAll", spec.Dirname) + c.Check(dir, jc.DeepEquals, &internal.Directory{ + DirectorySpec: spec, + Deps: deps, + }) +} + +var _ = gc.Suite(&TempDirectorySpecSuite{}) + +type TempDirectorySpecSuite struct { + testing.IsolationSuite + + stub *internalStub +} + +func (s *TempDirectorySpecSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = newInternalStub() +} + +func (s *TempDirectorySpecSuite) TestNewTempDirectorySpec(c *gc.C) { + s.stub.ReturnNewTempDir = "/tmp/juju-resource-xyz" + deps := s.stub + + spec, err := internal.NewTempDirectorySpec("eggs", deps) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "NewTempDir", "Join") + c.Check(spec.CleanUp, gc.NotNil) // We can't compare functions. + spec.CleanUp = nil + c.Check(spec, jc.DeepEquals, &internal.TempDirectorySpec{ + DirectorySpec: &internal.DirectorySpec{ + Name: "eggs", + Dirname: "/tmp/juju-resource-xyz/eggs", + Deps: deps, + }, + }) +} + +func (s *TempDirectorySpecSuite) TestClose(c *gc.C) { + s.stub.ReturnNewTempDir = "/tmp/juju-resource-xyz" + deps := s.stub + spec, err := internal.NewTempDirectorySpec("eggs", deps) + c.Assert(err, jc.ErrorIsNil) + s.stub.ResetCalls() + + err = spec.Close() + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "RemoveDir") +} + +var _ = gc.Suite(&DirectorySuite{}) + +type DirectorySuite struct { + testing.IsolationSuite + + stub *internalStub +} + +func (s *DirectorySuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = newInternalStub() +} + +func (s *DirectorySuite) TestNewDirectory(c *gc.C) { + dataDir := "/var/lib/juju/agents/unit-spam-1/resources" + deps := s.stub + spec := internal.NewDirectorySpec(dataDir, "eggs", deps) + s.stub.ResetCalls() + + dir := internal.NewDirectory(spec, deps) + + s.stub.CheckNoCalls(c) + c.Check(dir, jc.DeepEquals, &internal.Directory{ + DirectorySpec: spec, + Deps: deps, + }) +} + +func (s *DirectorySuite) TestWrite(c *gc.C) { + res := resourcetesting.NewResource(c, s.stub.Stub, "spam", "a-service", "some data") + stub := &stubDirectory{ + internalStub: s.stub, + } + stub.ReturnInfo = res.Resource + opened := stub + dataDir := "/var/lib/juju/agents/unit-spam-1/resources" + deps := s.stub + spec := internal.NewDirectorySpec(dataDir, "eggs", deps) + s.stub.ResetCalls() + dir := internal.NewDirectory(spec, deps) + + err := dir.Write(opened) + c.Assert(err, jc.ErrorIsNil) + + stub.CheckCallNames(c, + "Info", + "Content", + "Join", + "CreateWriter", + "WriteContent", + "CloseAndLog", + ) +} + +func (s *DirectorySuite) TestWriteContent(c *gc.C) { + info, reader := newResource(c, s.stub.Stub, "spam", "some data") + content := internal.Content{ + Data: reader, + Size: info.Size, + Fingerprint: info.Fingerprint, + } + relPath := info.Path + stub := &stubDirectory{ + internalStub: s.stub, + } + dataDir := "/var/lib/juju/agents/unit-spam-1/resources" + deps := s.stub + spec := internal.NewDirectorySpec(dataDir, "eggs", deps) + dir := internal.NewDirectory(spec, deps) + s.stub.ResetCalls() + + err := dir.WriteContent(relPath, content) + c.Assert(err, jc.ErrorIsNil) + + stub.CheckCallNames(c, + "Join", + "CreateWriter", + "WriteContent", + "CloseAndLog", + ) +} + +type stubDirectory struct { + *internalStub + + ReturnInfo resource.Resource + ReturnContent internal.Content +} + +func (s *stubDirectory) Info() resource.Resource { + s.AddCall("Info") + s.NextErr() // Pop one off. + + return s.ReturnInfo +} + +func (s *stubDirectory) Content() internal.Content { + s.AddCall("Content") + s.NextErr() // Pop one off. + + return s.ReturnContent +} === added file 'src/github.com/juju/juju/resource/context/internal/stub_test.go' --- src/github.com/juju/juju/resource/context/internal/stub_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/internal/stub_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,178 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package internal_test + +import ( + "io" + "path" + + "github.com/juju/errors" + "github.com/juju/testing" + + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/context/internal" +) + +type internalStub struct { + *testing.Stub + + ReturnGetResourceInfo resource.Resource + ReturnGetResourceData io.ReadCloser + ReturnNewContextDirectorySpec internal.ContextDirectorySpec + ReturnOpenResource internal.ContextOpenedResource + ReturnNewTempDirSpec internal.DownloadTempTarget + ReturnNewChecker internal.ContentChecker + ReturnCreateWriter io.WriteCloser + ReturnNewTempDir string +} + +func newInternalStub() *internalStub { + stub := &testing.Stub{} + return &internalStub{ + Stub: stub, + } +} + +func (s *internalStub) GetResource(name string) (resource.Resource, io.ReadCloser, error) { + s.Stub.AddCall("GetResource", name) + if err := s.Stub.NextErr(); err != nil { + return resource.Resource{}, nil, errors.Trace(err) + } + + return s.ReturnGetResourceInfo, s.ReturnGetResourceData, nil +} + +func (s *internalStub) NewContextDirectorySpec() internal.ContextDirectorySpec { + s.Stub.AddCall("NewContextDirectorySpec") + s.Stub.NextErr() // Pop one off. + + return s.ReturnNewContextDirectorySpec +} + +func (s *internalStub) OpenResource() (internal.ContextOpenedResource, error) { + s.Stub.AddCall("OpenResource") + if err := s.Stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.ReturnOpenResource, nil +} + +func (s *internalStub) Download(target internal.DownloadTarget, remote internal.ContextOpenedResource) error { + s.Stub.AddCall("Download", target, remote) + if err := s.Stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *internalStub) DownloadDirect(target internal.DownloadTarget, remote internal.ContentSource) error { + s.Stub.AddCall("DownloadDirect", target, remote) + if err := s.Stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *internalStub) ReplaceDirectory(tgt, src string) error { + s.Stub.AddCall("ReplaceDirectory", tgt, src) + if err := s.Stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *internalStub) NewTempDirSpec() (internal.DownloadTempTarget, error) { + s.Stub.AddCall("NewTempDirSpec") + if err := s.Stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.ReturnNewTempDirSpec, nil +} + +func (s *internalStub) NewChecker(content internal.Content) internal.ContentChecker { + s.Stub.AddCall("NewChecker", content) + s.Stub.NextErr() // Pop one off. + + return s.ReturnNewChecker +} + +func (s *internalStub) WriteContent(target io.Writer, content internal.Content) error { + s.Stub.AddCall("WriteContent", target, content) + if err := s.Stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *internalStub) CloseAndLog(closer io.Closer, label string) { + s.Stub.AddCall("CloseAndLog", closer, label) + s.Stub.NextErr() // Pop one off. +} + +func (s *internalStub) MkdirAll(dirname string) error { + s.Stub.AddCall("MkdirAll", dirname) + if err := s.Stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *internalStub) CreateWriter(filename string) (io.WriteCloser, error) { + s.Stub.AddCall("CreateWriter", filename) + if err := s.Stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.ReturnCreateWriter, nil +} + +func (s *internalStub) NewTempDir() (string, error) { + s.Stub.AddCall("NewTempDir") + if err := s.Stub.NextErr(); err != nil { + return "", errors.Trace(err) + } + + return s.ReturnNewTempDir, nil +} + +func (s *internalStub) RemoveDir(dirname string) error { + s.Stub.AddCall("RemoveDir", dirname) + if err := s.Stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *internalStub) Move(target, source string) error { + s.Stub.AddCall("Move", target, source) + if err := s.Stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *internalStub) Copy(target io.Writer, source io.Reader) error { + s.Stub.AddCall("Copy", target, source) + if err := s.Stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *internalStub) Join(pth ...string) string { + s.Stub.AddCall("Join", pth) + s.Stub.NextErr() // Pop one off. + + return path.Join(pth...) +} === added file 'src/github.com/juju/juju/resource/context/internal/util.go' --- src/github.com/juju/juju/resource/context/internal/util.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/internal/util.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,56 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package internal + +import ( + "io" + "os" + "path/filepath" + + "github.com/juju/errors" +) + +// Logger exposes the logger functionality needed by CloseAndLog. +type Logger interface { + // Errorf formats the provided log message and writes it to the log. + Errorf(string, ...interface{}) +} + +// CloseAndLog calls the closer's Close() and logs any error returned therefrom. +func CloseAndLog(closer io.Closer, label string, logger Logger) { + if closer == nil { + return + } + if err := closer.Close(); err != nil { + logger.Errorf("while closing %s: %v", label, err) + } +} + +// ReplaceDirectory replaces the target directory with the source. This +// involves removing the target if it exists and then moving the source +// into place. +func ReplaceDirectory(targetDir, sourceDir string, deps ReplaceDirectoryDeps) error { + // TODO(ericsnow) Move it out of the way and remove it after the rename. + if err := deps.RemoveDir(targetDir); err != nil { + return errors.Trace(err) + } + + if err := os.MkdirAll(filepath.Dir(targetDir), 0755); err != nil { + return errors.Trace(err) + } + + if err := deps.Move(targetDir, sourceDir); err != nil { + return errors.Trace(err) + } + return nil +} + +// ReplaceDirectoryDeps exposes the functionality needed by ReplaceDirectory. +type ReplaceDirectoryDeps interface { + // RemoveDir deletes the directory at the given path. + RemoveDir(dirname string) error + + // Move moves the directory at the source path to the target path. + Move(target, source string) error +} === added file 'src/github.com/juju/juju/resource/context/internal/util_test.go' --- src/github.com/juju/juju/resource/context/internal/util_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/internal/util_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,73 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package internal_test + +import ( + "fmt" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/filetesting" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/resource/context/internal" +) + +var _ = gc.Suite(&UtilSuite{}) + +type UtilSuite struct { + testing.IsolationSuite + + stub *internalStub +} + +func (s *UtilSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = newInternalStub() +} + +func (s *UtilSuite) TestCloseAndLogNoError(c *gc.C) { + closer := &filetesting.StubCloser{Stub: s.stub.Stub} + logger := &stubLogger{Stub: s.stub.Stub} + + internal.CloseAndLog(closer, "a thing", logger) + + s.stub.CheckCallNames(c, "Close") +} + +func (s *UtilSuite) TestCloseAndLog(c *gc.C) { + closer := &filetesting.StubCloser{Stub: s.stub.Stub} + logger := &stubLogger{Stub: s.stub.Stub} + failure := errors.New("") + s.stub.SetErrors(failure) + + internal.CloseAndLog(closer, "a thing", logger) + + s.stub.CheckCallNames(c, "Close", "Errorf") + c.Check(logger.logged, gc.Equals, "while closing a thing: ") +} + +func (s *UtilSuite) TestReplaceDirectory(c *gc.C) { + deps := s.stub + + err := internal.ReplaceDirectory("target_dir", "source_dir", deps) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "RemoveDir", "Move") +} + +type stubLogger struct { + *testing.Stub + + logged string +} + +func (s *stubLogger) Errorf(msg string, args ...interface{}) { + s.AddCall("Errorf", msg, args) + s.NextErr() // Pop one off. + + s.logged = fmt.Sprintf(msg, args...) +} === added file 'src/github.com/juju/juju/resource/context/package_test.go' --- src/github.com/juju/juju/resource/context/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/context/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/resource/opened.go' --- src/github.com/juju/juju/resource/opened.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/opened.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,32 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resource + +// TODO(ericsnow) Move this file to the charm repo? + +import ( + "io" +) + +// Opened provides both the resource info and content. +type Opened struct { + Resource + io.ReadCloser +} + +// Content returns the "content" for the opened resource. +func (o Opened) Content() Content { + return Content{ + Data: o.ReadCloser, + Size: o.Size, + Fingerprint: o.Fingerprint, + } +} + +// Opener exposes the functionality for opening a resource. +type Opener interface { + // OpenResource returns an opened resource with a reader that will + // stream the resource content. + OpenResource(name string) (Opened, error) +} === added file 'src/github.com/juju/juju/resource/package_test.go' --- src/github.com/juju/juju/resource/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resource_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/resource/resource.go' --- src/github.com/juju/juju/resource/resource.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/resource.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,198 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// The resource package provides the functionality of the "resources" +// feature in Juju. +package resource + +import ( + "fmt" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + "gopkg.in/juju/charm.v6-unstable/resource" +) + +// Resource defines a single resource within a Juju model. +// +// Each service will have have exactly the same resources associated +// with it as are defined in the charm's metadata, no more, no less. +// When associated with the service the resource may have additional +// information associated with it. +// +// A resource may be a "placeholder", meaning it is only partially +// populated before an upload (whether local or from the charm store). +// In that case the following fields are not set: +// +// Timestamp +// Username +// +// For "upload" placeholders, the following additional fields are +// not set: +// +// Fingerprint +// Size +// +// A resource may also be added to the model as "pending", meaning it +// is queued up to be used as a resource for the service. Until it is +// "activated", a pending resources is virtually invisible. There may +// be more that one pending resource for a given resource ID. +type Resource struct { + resource.Resource + + // ID uniquely identifies a resource-service pair within the model. + // Note that the model ignores pending resources (those with a + // pending ID) except for in a few clearly pending-related places. + // ID may be empty if the ID (assigned by the model) is not known. + ID string + + // PendingID identifies that this resource is pending and + // distinguishes it from other pending resources with the same model + // ID (and from the active resource). The active resource for the + // services will not have PendingID set. + PendingID string + + // TODO(ericsnow) Use names.ServiceTag for ServiceID? + + // ServiceID identifies the service for the resource. + ServiceID string + + // TODO(ericsnow) Use names.UserTag for Username? + + // Username is the ID of the user that added the revision + // to the model (whether implicitly or explicitly). + Username string + + // Timestamp indicates when the resource was added to the model. + Timestamp time.Time +} + +// Validate ensures that the spec is valid. +func (res Resource) Validate() error { + // TODO(ericsnow) Ensure that the "placeholder" fields are not set + // if IsLocalPlaceholder() returns true (and that they *are* set + // otherwise)? Also ensure an "upload" origin in the "placeholder" + // case? + + if err := res.Resource.Validate(); err != nil { + return errors.Annotate(err, "bad info") + } + + if res.ServiceID == "" { + return errors.NewNotValid(nil, "missing service ID") + } + + // TODO(ericsnow) Require that Username be set if timestamp is? + + if res.Timestamp.IsZero() && res.Username != "" { + return errors.NewNotValid(nil, "missing timestamp") + } + + return nil +} + +// IsPlaceholder indicates whether or not the resource is a +// "placeholder" (partially populated pending an upload). +func (res Resource) IsPlaceholder() bool { + return res.Timestamp.IsZero() +} + +// TimestampGranular returns the timestamp at a resolution of 1 second. +func (res Resource) TimestampGranular() time.Time { + return time.Unix(res.Timestamp.Unix(), 0) +} + +// RevisionString returns the human-readable revision for the resource. +func (res Resource) RevisionString() string { + switch res.Origin { + case resource.OriginUpload: + if res.IsPlaceholder() { + return "-" + } + return res.TimestampGranular().UTC().String() + case resource.OriginStore: + return fmt.Sprintf("%d", res.Revision) + default: + // note: this should probably never happen. + return "-" + } +} + +// ServiceResources contains the list of resources for the service and all its +// units. +type ServiceResources struct { + // Resources are the current version of the resource for the service that + // resource-get will retrieve. + Resources []Resource + + // CharmStoreResources provides the resource info from the charm + // store for each of the service's resources. The information from + // the charm store is current as of the last time the charm store + // was polled. Each entry here corresponds to the same indexed entry + // in the Resources field. + CharmStoreResources []resource.Resource + + // UnitResources reports the currenly-in-use version of resources for each + // unit. + UnitResources []UnitResources +} + +// Updates returns the list of charm store resources corresponding to +// the service's resources that are out of date. Note that there must be +// a charm store resource for each of the service resources and +// vice-versa. If they are out of sync then an error is returned. +func (sr ServiceResources) Updates() ([]resource.Resource, error) { + storeResources, err := sr.alignStoreResources() + if err != nil { + return nil, errors.Trace(err) + } + + var updates []resource.Resource + for i, res := range sr.Resources { + if res.Origin != resource.OriginStore { + continue + } + csRes := storeResources[i] + // If the revision is the same then all the other info must be. + if res.Revision == csRes.Revision { + continue + } + updates = append(updates, csRes) + } + return updates, nil +} + +func (sr ServiceResources) alignStoreResources() ([]resource.Resource, error) { + if len(sr.CharmStoreResources) > len(sr.Resources) { + return nil, errors.Errorf("have more charm store resources than service resources") + } + if len(sr.CharmStoreResources) < len(sr.Resources) { + return nil, errors.Errorf("have fewer charm store resources than service resources") + } + + var store []resource.Resource + for _, res := range sr.Resources { + found := false + for _, chRes := range sr.CharmStoreResources { + if chRes.Name == res.Name { + store = append(store, chRes) + found = true + break + } + } + if !found { + return nil, errors.Errorf("charm store resource %q not found", res.Name) + } + } + return store, nil +} + +// UnitResources conains the list of resources used by a unit. +type UnitResources struct { + // Tag is the tag of the unit. + Tag names.UnitTag + + // Resources are the resource versions currently in use by this unit. + Resources []Resource +} === added file 'src/github.com/juju/juju/resource/resource_test.go' --- src/github.com/juju/juju/resource/resource_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/resource_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,302 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resource_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/resourcetesting" +) + +type ResourceSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&ResourceSuite{}) + +func (ResourceSuite) TestValidateUploadUsed(c *gc.C) { + res := resource.Resource{ + Resource: newFullCharmResource(c, "spam"), + ID: "a-service/spam", + ServiceID: "a-service", + Username: "a-user", + Timestamp: time.Now(), + } + + err := res.Validate() + + c.Check(err, jc.ErrorIsNil) +} + +func (ResourceSuite) TestValidateUploadNotUsed(c *gc.C) { + res := resource.Resource{ + Resource: newFullCharmResource(c, "spam"), + ID: "a-service/spam", + ServiceID: "a-service", + } + + err := res.Validate() + + c.Check(err, jc.ErrorIsNil) +} + +func (ResourceSuite) TestValidateUploadPending(c *gc.C) { + res := resource.Resource{ + Resource: newFullCharmResource(c, "spam"), + ID: "a-service/spam", + PendingID: "some-unique-ID", + ServiceID: "a-service", + Username: "a-user", + Timestamp: time.Now(), + } + + err := res.Validate() + + c.Check(err, jc.ErrorIsNil) +} + +func (ResourceSuite) TestValidateZeroValue(c *gc.C) { + var res resource.Resource + + err := res.Validate() + + c.Check(errors.Cause(err), jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `.*bad info.*`) +} + +func (ResourceSuite) TestValidateBadInfo(c *gc.C) { + var charmRes charmresource.Resource + c.Assert(charmRes.Validate(), gc.NotNil) + + res := resource.Resource{ + Resource: charmRes, + } + + err := res.Validate() + + c.Check(errors.Cause(err), jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `.*bad info.*`) +} + +func (ResourceSuite) TestValidateMissingID(c *gc.C) { + res := resource.Resource{ + Resource: newFullCharmResource(c, "spam"), + ServiceID: "a-service", + Username: "a-user", + Timestamp: time.Now(), + } + + err := res.Validate() + + c.Check(err, jc.ErrorIsNil) +} + +func (ResourceSuite) TestValidateMissingServiceID(c *gc.C) { + res := resource.Resource{ + Resource: newFullCharmResource(c, "spam"), + ID: "a-service/spam", + Username: "a-user", + Timestamp: time.Now(), + } + + err := res.Validate() + + c.Check(errors.Cause(err), jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `.*missing service ID.*`) +} + +func (ResourceSuite) TestValidateMissingUsername(c *gc.C) { + res := resource.Resource{ + Resource: newFullCharmResource(c, "spam"), + ID: "a-service/spam", + ServiceID: "a-service", + Username: "", + Timestamp: time.Now(), + } + + err := res.Validate() + + c.Check(err, jc.ErrorIsNil) +} + +func (ResourceSuite) TestValidateMissingTimestamp(c *gc.C) { + res := resource.Resource{ + Resource: newFullCharmResource(c, "spam"), + ID: "a-service/spam", + ServiceID: "a-service", + Username: "a-user", + Timestamp: time.Time{}, + } + + err := res.Validate() + + c.Check(errors.Cause(err), jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `.*missing timestamp.*`) +} + +func (ResourceSuite) TestRevisionStringNone(c *gc.C) { + res := resource.Resource{ + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "foo", + Type: charmresource.TypeFile, + Path: "foo.tgz", + Description: "you need it", + }, + Origin: charmresource.OriginUpload, + }, + ServiceID: "svc", + } + + err := res.Validate() + c.Check(err, jc.ErrorIsNil) + + c.Check(res.RevisionString(), gc.Equals, "-") +} + +func (ResourceSuite) TestRevisionStringTime(c *gc.C) { + res := resource.Resource{ + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "foo", + Type: charmresource.TypeFile, + Path: "foo.tgz", + Description: "you need it", + }, + Origin: charmresource.OriginUpload, + }, + ServiceID: "svc", + Username: "a-user", + Timestamp: time.Date(2012, 7, 8, 15, 59, 5, 5, time.UTC), + } + + err := res.Validate() + c.Check(err, jc.ErrorIsNil) + + c.Check(res.RevisionString(), gc.Equals, "2012-07-08 15:59:05 +0000 UTC") +} + +func (ResourceSuite) TestRevisionStringNumber(c *gc.C) { + res := resource.Resource{ + Resource: charmresource.Resource{ + Meta: charmresource.Meta{ + Name: "foo", + Type: charmresource.TypeFile, + Path: "foo.tgz", + Description: "you need it", + }, + Origin: charmresource.OriginStore, + Revision: 7, + }, + ServiceID: "svc", + Username: "a-user", + Timestamp: time.Date(2012, 7, 8, 15, 59, 5, 5, time.UTC), + } + + err := res.Validate() + c.Check(err, jc.ErrorIsNil) + + c.Check(res.RevisionString(), gc.Equals, "7") +} + +func (s *ResourceSuite) TestUpdatesUploaded(c *gc.C) { + csRes := newStoreResource(c, "spam", "a-service", 2) + res := csRes // a copy + res.Origin = charmresource.OriginUpload + sr := resource.ServiceResources{ + Resources: []resource.Resource{ + res, + }, + CharmStoreResources: []charmresource.Resource{ + csRes.Resource, + }, + } + + updates, err := sr.Updates() + c.Assert(err, jc.ErrorIsNil) + + c.Check(updates, gc.HasLen, 0) +} + +func (s *ResourceSuite) TestUpdatesDifferent(c *gc.C) { + spam := newStoreResource(c, "spam", "a-service", 2) + eggs := newStoreResource(c, "eggs", "a-service", 3) + expected := eggs.Resource + expected.Revision += 1 + sr := resource.ServiceResources{ + Resources: []resource.Resource{ + spam, + eggs, + }, + CharmStoreResources: []charmresource.Resource{ + spam.Resource, + expected, + }, + } + + updates, err := sr.Updates() + c.Assert(err, jc.ErrorIsNil) + + c.Check(updates, jc.DeepEquals, []charmresource.Resource{expected}) +} + +func (s *ResourceSuite) TestUpdatesBadOrdering(c *gc.C) { + spam := newStoreResource(c, "spam", "a-service", 2) + eggs := newStoreResource(c, "eggs", "a-service", 3) + expected := eggs.Resource + expected.Revision += 1 + sr := resource.ServiceResources{ + Resources: []resource.Resource{ + spam, + eggs, + }, + CharmStoreResources: []charmresource.Resource{ + expected, + spam.Resource, + }, + } + + updates, err := sr.Updates() + c.Assert(err, jc.ErrorIsNil) + + c.Check(updates, jc.DeepEquals, []charmresource.Resource{expected}) +} + +func (s *ResourceSuite) TestUpdatesNone(c *gc.C) { + spam := newStoreResource(c, "spam", "a-service", 2) + eggs := newStoreResource(c, "eggs", "a-service", 3) + sr := resource.ServiceResources{ + Resources: []resource.Resource{ + spam, + eggs, + }, + CharmStoreResources: []charmresource.Resource{ + spam.Resource, + eggs.Resource, + }, + } + + updates, err := sr.Updates() + c.Assert(err, jc.ErrorIsNil) + + c.Check(updates, gc.HasLen, 0) +} + +func newStoreResource(c *gc.C, name, serviceID string, revision int) resource.Resource { + content := name + opened := resourcetesting.NewResource(c, nil, name, serviceID, content) + res := opened.Resource + res.Origin = charmresource.OriginStore + res.Revision = revision + err := res.Validate() + c.Assert(err, jc.ErrorIsNil) + return res +} === added directory 'src/github.com/juju/juju/resource/resourceadapters' === added file 'src/github.com/juju/juju/resource/resourceadapters/apiclient.go' --- src/github.com/juju/juju/resource/resourceadapters/apiclient.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/resourceadapters/apiclient.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,38 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resourceadapters + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/api" + "github.com/juju/juju/api/base" + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/api/client" + "github.com/juju/juju/resource/api/server" +) + +// NewAPIClient is mostly a copy of the newClient code in +// component/all/resources.go. It lives here because it simplifies this code +// immensely. +func NewAPIClient(newAPICaller func() (api.Connection, error)) (*client.Client, error) { + apiCaller, err := newAPICaller() + if err != nil { + return nil, errors.Trace(err) + } + + return newAPIClient(apiCaller) +} + +func newAPIClient(apiCaller api.Connection) (*client.Client, error) { + caller := base.NewFacadeCallerForVersion(apiCaller, resource.ComponentName, server.Version) + + httpClient, err := apiCaller.HTTPClient() + if err != nil { + return nil, errors.Trace(err) + } + // The apiCaller takes care of prepending /environment/. + apiClient := client.NewClient(caller, httpClient, apiCaller) + return apiClient, nil +} === added file 'src/github.com/juju/juju/resource/resourceadapters/apiserver.go' --- src/github.com/juju/juju/resource/resourceadapters/apiserver.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/resourceadapters/apiserver.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,47 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resourceadapters + +import ( + "net/http" + + "github.com/juju/errors" + + "github.com/juju/juju/resource" + corestate "github.com/juju/juju/state" +) + +// StateConnector exposes ways to connect to Juju's state. +type StateConnector interface { + // ConnectForUnitAgent connects to state for a unit agent. + ConnectForUnitAgent(*http.Request) (*corestate.State, *corestate.Unit, error) +} + +// HTTPDownloadRequestExtractor provides the functionality needed to +// handle a resource download HTTP request. +type HTTPDownloadRequestExtractor struct { + // Connector provides connections to Juju's state. + Connector StateConnector +} + +// NewResourceOpener returns a new resource.Opener for the given +// HTTP request. +func (ex HTTPDownloadRequestExtractor) NewResourceOpener(req *http.Request) (resource.Opener, error) { + st, unit, err := ex.Connector.ConnectForUnitAgent(req) + if err != nil { + return nil, errors.Trace(err) + } + + resources, err := st.Resources() + if err != nil { + return nil, errors.Trace(err) + } + + opener := &resourceOpener{ + st: resources, + userID: unit.Tag(), + unit: unit, + } + return opener, nil +} === added file 'src/github.com/juju/juju/resource/resourceadapters/charmstore.go' --- src/github.com/juju/juju/resource/resourceadapters/charmstore.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/resourceadapters/charmstore.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,119 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resourceadapters + +import ( + "io" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/retry" + "github.com/juju/utils/clock" + "gopkg.in/juju/charm.v6-unstable" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/charmstore" + corestate "github.com/juju/juju/state" +) + +// charmstoreEntityCache adapts between resource state and charmstore.EntityCache. +type charmstoreEntityCache struct { + st corestate.Resources + userID names.Tag + unit resource.Unit + serviceID string +} + +// GetResource implements charmstore.EntityCache. +func (cache *charmstoreEntityCache) GetResource(name string) (resource.Resource, error) { + return cache.st.GetResource(cache.serviceID, name) +} + +// SetResource implements charmstore.EntityCache. +func (cache *charmstoreEntityCache) SetResource(chRes charmresource.Resource, reader io.Reader) (resource.Resource, error) { + return cache.st.SetResource(cache.serviceID, cache.userID.Id(), chRes, reader) +} + +// OpenResource implements charmstore.EntityCache. +func (cache *charmstoreEntityCache) OpenResource(name string) (resource.Resource, io.ReadCloser, error) { + if cache.unit == nil { + return resource.Resource{}, nil, errors.NotImplementedf("") + } + return cache.st.OpenResourceForUniter(cache.unit, name) +} + +type charmstoreOpener struct { + // TODO(ericsnow) What do we need? +} + +func newCharmstoreOpener(cURL *charm.URL) *charmstoreOpener { + // TODO(ericsnow) Extract the charm store URL from the charm URL. + return &charmstoreOpener{} +} + +// NewClient opens a new charm store client. +func (cs *charmstoreOpener) NewClient() (charmstore.Client, error) { + // TODO(ericsnow) Return an actual charm store client. + client := newFakeCharmStoreClient(nil) + return newCSRetryClient(client), nil +} + +type csRetryClient struct { + charmstore.Client + retryArgs retry.CallArgs +} + +func newCSRetryClient(client charmstore.Client) *csRetryClient { + retryArgs := retry.CallArgs{ + // The only error that stops the retry loop should be "not found". + IsFatalError: errors.IsNotFound, + // We want to retry until the charm store either gives us the + // resource (and we cache it) or the resource isn't found in the + // charm store. + Attempts: -1, // retry forever... + // A one minute gives enough time for potential connection + // issues to sort themselves out without making the caller wait + // for an exceptional amount of time. + Delay: 1 * time.Minute, + Clock: clock.WallClock, + } + return &csRetryClient{ + Client: client, + retryArgs: retryArgs, + } +} + +// GetResource returns a reader for the resource's data. +func (client csRetryClient) GetResource(cURL *charm.URL, resourceName string, revision int) (io.ReadCloser, error) { + args := client.retryArgs // a copy + + var reader io.ReadCloser + args.Func = func() error { + csReader, err := client.Client.GetResource(cURL, resourceName, revision) + if err != nil { + return errors.Trace(err) + } + reader = csReader + return nil + } + + var lastErr error + args.NotifyFunc = func(err error, i int) { + // Remember the error we're hiding and then retry! + logger.Debugf("(attempt %d) retrying resource download from charm store due to error: %v", i, err) + lastErr = err + } + + err := retry.Call(args) + if retry.IsAttemptsExceeded(err) { + return nil, errors.Annotate(lastErr, "failed after retrying") + } + if err != nil { + return nil, errors.Trace(err) + } + + return reader, nil +} === added file 'src/github.com/juju/juju/resource/resourceadapters/deploy.go' --- src/github.com/juju/juju/resource/resourceadapters/deploy.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/resourceadapters/deploy.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,28 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resourceadapters + +import ( + "github.com/juju/errors" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/api" + "github.com/juju/juju/resource/cmd" +) + +// DeployResources uploads the bytes for the given files to the server and +// creates pending resource metadata for the all resource mentioned in the +// metadata. It returns a map of resource name to pending resource IDs. +func DeployResources(serviceID string, files map[string]string, resources map[string]charmresource.Meta, conn api.Connection) (ids map[string]string, err error) { + client, err := newAPIClient(conn) + if err != nil { + return nil, errors.Trace(err) + } + + ids, err = cmd.DeployResources(serviceID, files, resources, client) + if err != nil { + return nil, errors.Trace(err) + } + return ids, nil +} === added file 'src/github.com/juju/juju/resource/resourceadapters/fakes.go' --- src/github.com/juju/juju/resource/resourceadapters/fakes.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/resourceadapters/fakes.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,68 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resourceadapters + +import ( + "io" + + "github.com/juju/errors" + "gopkg.in/juju/charm.v6-unstable" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/cmd/juju/charmcmd" + "github.com/juju/juju/resource/charmstore" + "github.com/juju/juju/resource/cmd" +) + +// TODO(ericsnow) Get rid of fakeCharmStoreClient once csclient.Client grows the methods. + +type baseCharmStoreClient interface { + io.Closer +} + +func newFakeCharmStoreClient(base baseCharmStoreClient) charmstore.Client { + return &fakeCharmStoreClient{base} +} + +type fakeCharmStoreClient struct { + baseCharmStoreClient +} + +// ListResources implements resource/charmstore.Client as a noop. +func (fakeCharmStoreClient) ListResources(charmURLs []*charm.URL) ([][]charmresource.Resource, error) { + res := make([][]charmresource.Resource, len(charmURLs)) + return res, nil +} + +// GetResource implements resource/charmstore.Client as a noop. +func (fakeCharmStoreClient) GetResource(cURL *charm.URL, resourceName string, revision int) (io.ReadCloser, error) { + return nil, errors.NotFoundf("resource %q", resourceName) +} + +// Close implements io.Closer. +func (client fakeCharmStoreClient) Close() error { + if client.baseCharmStoreClient == nil { + return nil + } + return client.baseCharmStoreClient.Close() +} + +// TODO(ericsnow) Get rid of fakeCharmCmdBase once csclient.Client grows the methods. + +type fakeCharmCmdBase struct { + *charmcmd.CommandBase +} + +func NewFakeCharmCmdBase(base *charmcmd.CommandBase) cmd.CharmCommandBase { + return &fakeCharmCmdBase{base} +} + +// Connect implements cmd.CommandBase. +func (c *fakeCharmCmdBase) Connect() (cmd.CharmResourceLister, error) { + client, err := c.CommandBase.Connect() + if err != nil { + return nil, errors.Trace(err) + } + return newFakeCharmStoreClient(client), nil +} === added file 'src/github.com/juju/juju/resource/resourceadapters/opener.go' --- src/github.com/juju/juju/resource/resourceadapters/opener.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/resourceadapters/opener.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,58 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resourceadapters + +import ( + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/charmstore" + corestate "github.com/juju/juju/state" +) + +// resourceOpener is an implementation of server.ResourceOpener. +type resourceOpener struct { + st corestate.Resources + userID names.Tag + unit resource.Unit +} + +// OpenResource implements server.ResourceOpener. +func (ro *resourceOpener) OpenResource(name string) (resource.Opened, error) { + if ro.unit == nil { + return resource.Opened{}, errors.Errorf("missing unit") + } + cURL, _ := ro.unit.CharmURL() + + csOpener := newCharmstoreOpener(cURL) + client, err := csOpener.NewClient() + if err != nil { + return resource.Opened{}, errors.Trace(err) + } + defer client.Close() + + cache := &charmstoreEntityCache{ + st: ro.st, + userID: ro.userID, + unit: ro.unit, + serviceID: ro.unit.ServiceName(), + } + + res, reader, err := charmstore.GetResource(charmstore.GetResourceArgs{ + Client: client, + Cache: cache, + CharmURL: cURL, + Name: name, + }) + if err != nil { + return resource.Opened{}, errors.Trace(err) + } + + opened := resource.Opened{ + Resource: res, + ReadCloser: reader, + } + return opened, nil +} === added file 'src/github.com/juju/juju/resource/resourceadapters/resourceadapters.go' --- src/github.com/juju/juju/resource/resourceadapters/resourceadapters.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/resourceadapters/resourceadapters.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,10 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resourceadapters + +import ( + "github.com/juju/loggo" +) + +var logger = loggo.GetLogger("juju.resource.resourceadapters") === added file 'src/github.com/juju/juju/resource/resourceadapters/state.go' --- src/github.com/juju/juju/resource/resourceadapters/state.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/resourceadapters/state.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,48 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resourceadapters + +import ( + "github.com/juju/errors" + "github.com/juju/names" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/state" +) + +type service struct { + *state.Service +} + +func (s *service) ID() names.ServiceTag { + return names.NewServiceTag(s.Name()) +} + +// CharmURL implements resource/workers.Service. +func (s *service) CharmURL() *charm.URL { + cURL, _ := s.Service.CharmURL() + return cURL +} + +// DataStore implements functionality wrapping state for resources. +type DataStore struct { + state.Resources + State *state.State +} + +// Units returns the tags for all units in the service. +func (d DataStore) Units(serviceID string) (tags []names.UnitTag, err error) { + svc, err := d.State.Service(serviceID) + if err != nil { + return nil, errors.Trace(err) + } + units, err := svc.AllUnits() + if err != nil { + return nil, errors.Trace(err) + } + for _, u := range units { + tags = append(tags, u.UnitTag()) + } + return tags, nil +} === added file 'src/github.com/juju/juju/resource/resourceadapters/workers.go' --- src/github.com/juju/juju/resource/resourceadapters/workers.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/resourceadapters/workers.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,64 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resourceadapters + +import ( + "time" + + "github.com/juju/errors" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource/workers" + "github.com/juju/juju/state" + "github.com/juju/juju/worker" +) + +// WorkerFactory is an implementation of cmd/jujud/agent.WorkerFactory +// for resources.. +type WorkerFactory struct{} + +// NewWorkerFactory returns a new worker factory for resources. +func NewWorkerFactory() *WorkerFactory { + return &WorkerFactory{} +} + +// NewModelWorker implements cmd/jujud/agent.WorkerFactory. +func (wf WorkerFactory) NewModelWorker(st *state.State) func() (worker.Worker, error) { + wfs := &workerFactoryState{st: st} + csOpener := charmstoreOpener{} + poller := workers.NewCharmStorePoller(wfs, func() (workers.CharmStoreClient, error) { + return csOpener.NewClient() + }) + newWorker := func() (worker.Worker, error) { + return poller.NewWorker(), nil + } + return newWorker +} + +type workerFactoryState struct { + st *state.State +} + +// ListAllServices returns all the services in the model. +func (wfs *workerFactoryState) ListAllServices() ([]workers.Service, error) { + var services []workers.Service + actual, err := wfs.st.AllServices() + if err != nil { + return nil, errors.Trace(err) + } + for _, svc := range actual { + services = append(services, &service{svc}) + } + return services, nil +} + +// SetCharmStoreResources sets the "polled from the charm store" +// resources for the service to the provided values. +func (wfs *workerFactoryState) SetCharmStoreResources(serviceID string, info []charmresource.Resource, lastPolled time.Time) error { + resources, err := wfs.st.Resources() + if err != nil { + return errors.Trace(err) + } + return resources.SetCharmStoreResources(serviceID, info, lastPolled) +} === added directory 'src/github.com/juju/juju/resource/resourcetesting' === added file 'src/github.com/juju/juju/resource/resourcetesting/resource.go' --- src/github.com/juju/juju/resource/resourcetesting/resource.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/resourcetesting/resource.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,93 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// This package provides helpers for testing with resources. +package resourcetesting + +import ( + "io" + "strings" + "time" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/filetesting" + gc "gopkg.in/check.v1" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource" +) + +// NewResource produces full resource info for the given name and +// content. The origin is set set to "upload". A reader is also returned +// which contains the content. +func NewResource(c *gc.C, stub *testing.Stub, name, serviceID, content string) resource.Opened { + username := "a-user" + return resource.Opened{ + Resource: newResource(c, name, serviceID, username, content), + ReadCloser: newStubReadCloser(stub, content), + } +} + +// NewCharmResource produces basic resource info for the given name +// and content. The origin is set set to "upload". +func NewCharmResource(c *gc.C, name, content string) charmresource.Resource { + fp, err := charmresource.GenerateFingerprint(strings.NewReader(content)) + c.Assert(err, jc.ErrorIsNil) + res := charmresource.Resource{ + Meta: charmresource.Meta{ + Name: name, + Type: charmresource.TypeFile, + Path: name + ".tgz", + }, + Origin: charmresource.OriginUpload, + Revision: 0, + Fingerprint: fp, + Size: int64(len(content)), + } + err = res.Validate() + c.Assert(err, jc.ErrorIsNil) + + return res +} + +// NewPlaceholderResource returns resource info for a resource that +// has not been uploaded or pulled from the charm store yet. The origin +// is set to "upload". +func NewPlaceholderResource(c *gc.C, name, serviceID string) resource.Resource { + res := newResource(c, name, serviceID, "", "") + res.Fingerprint = charmresource.Fingerprint{} + return res +} + +func newResource(c *gc.C, name, serviceID, username, content string) resource.Resource { + var timestamp time.Time + if username != "" { + timestamp = time.Now().UTC() + } + res := resource.Resource{ + Resource: NewCharmResource(c, name, content), + ID: serviceID + "/" + name, + PendingID: "", + ServiceID: serviceID, + Username: username, + Timestamp: timestamp, + } + err := res.Validate() + c.Assert(err, jc.ErrorIsNil) + return res +} + +type stubReadCloser struct { + io.Reader + io.Closer +} + +func newStubReadCloser(stub *testing.Stub, content string) io.ReadCloser { + return &stubReadCloser{ + Reader: filetesting.NewStubReader(stub, content), + Closer: &filetesting.StubCloser{ + Stub: stub, + }, + } +} === added file 'src/github.com/juju/juju/resource/resourcetesting/state.go' --- src/github.com/juju/juju/resource/resourcetesting/state.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/resourcetesting/state.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resourcetesting + +import ( + "github.com/juju/testing" + "gopkg.in/juju/charm.v6-unstable" +) + +// StubUnit is a testing implementation of resource.Unit. +type StubUnit struct { + *testing.Stub + + ReturnName string + ReturnServiceName string + ReturnCharmURL *charm.URL +} + +// Name implements resource.Unit. +func (s *StubUnit) Name() string { + s.AddCall("Name") + s.NextErr() // Pop one off. + + return s.ReturnName +} + +// ServiceName implements resource.Unit. +func (s *StubUnit) ServiceName() string { + s.AddCall("ServiceName") + s.NextErr() // Pop one off. + + return s.ReturnServiceName +} + +// CharmURL implements resource.Unit. +func (s *StubUnit) CharmURL() (*charm.URL, bool) { + s.AddCall("CharmURL") + s.NextErr() // Pop one off. + + forceCharm := false + return s.ReturnCharmURL, forceCharm +} === added file 'src/github.com/juju/juju/resource/serialization.go' --- src/github.com/juju/juju/resource/serialization.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/serialization.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,23 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resource + +import ( + "github.com/juju/errors" + "gopkg.in/juju/charm.v6-unstable/resource" +) + +// DeserializeFingerprint converts the serialized fingerprint back into +// a Fingerprint. "zero" values are treated appropriately. +func DeserializeFingerprint(fpSum []byte) (resource.Fingerprint, error) { + var fp resource.Fingerprint + if len(fpSum) != 0 { + var err error + fp, err = resource.NewFingerprint(fpSum) + if err != nil { + return fp, errors.Trace(err) + } + } + return fp, nil +} === added file 'src/github.com/juju/juju/resource/serialization_test.go' --- src/github.com/juju/juju/resource/serialization_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/serialization_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,46 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resource_test + +import ( + "strings" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource" +) + +type SerializationSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&SerializationSuite{}) + +func (s *SerializationSuite) TestDeserializeFingerprintOkay(c *gc.C) { + content := "some data\n..." + expected, err := charmresource.GenerateFingerprint(strings.NewReader(content)) + c.Assert(err, jc.ErrorIsNil) + + fp, err := resource.DeserializeFingerprint(expected.Bytes()) + c.Assert(err, jc.ErrorIsNil) + + c.Check(fp, jc.DeepEquals, expected) +} + +func (s *SerializationSuite) TestDeserializeFingerprintInvalid(c *gc.C) { + _, err := resource.DeserializeFingerprint([]byte("")) + + c.Check(err, jc.Satisfies, errors.IsNotValid) +} + +func (s *SerializationSuite) TestDeserializeFingerprintZeroValue(c *gc.C) { + fp, err := resource.DeserializeFingerprint(nil) + c.Assert(err, jc.ErrorIsNil) + + c.Check(fp, jc.DeepEquals, charmresource.Fingerprint{}) +} === added directory 'src/github.com/juju/juju/resource/state' === added file 'src/github.com/juju/juju/resource/state/package_test.go' --- src/github.com/juju/juju/resource/state/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/state/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/resource/state/resource.go' --- src/github.com/juju/juju/resource/state/resource.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/state/resource.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,380 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state + +// TODO(ericsnow) Figure out a way to drop the txn dependency here? + +import ( + "fmt" + "io" + "path" + "time" + + "github.com/juju/errors" + "github.com/juju/utils" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + "gopkg.in/mgo.v2/txn" + + "github.com/juju/juju/resource" +) + +type resourcePersistence interface { + // ListResources returns the resource data for the given service ID. + // None of the resources will be pending. + ListResources(serviceID string) (resource.ServiceResources, error) + + // ListPendingResources returns the resource data for the given + // service ID. + ListPendingResources(serviceID string) ([]resource.Resource, error) + + // GetResource returns the extended, model-related info for the + // non-pending resource. + GetResource(id string) (res resource.Resource, storagePath string, _ error) + + // StageResource adds the resource in a separate staging area + // if the resource isn't already staged. If the resource already + // exists then it is treated as unavailable as long as the new one + // is staged. + StageResource(res resource.Resource, storagePath string) (StagedResource, error) + + // SetResource stores the info for the resource. + SetResource(args resource.Resource) error + + // SetCharmStoreResource stores the resource info that was retrieved + // from the charm store. + SetCharmStoreResource(id, serviceID string, res charmresource.Resource, lastPolled time.Time) error + + // SetUnitResource stores the resource info for a unit. + SetUnitResource(unitID string, args resource.Resource) error + + // NewResolvePendingResourceOps generates mongo transaction operations + // to set the identified resource as active. + NewResolvePendingResourceOps(resID, pendingID string) ([]txn.Op, error) +} + +// StagedResource represents resource info that has been added to the +// "staging" area of the persistence layer. +// +// A separate staging area is necessary because we are dealing with +// the DB and storage at the same time for the same resource in some +// operations (e.g. SetResource). Resources are staged in the DB, +// added to storage, and then finalized in the DB. +type StagedResource interface { + // Unstage ensures that the resource is removed + // from the staging area. If it isn't in the staging area + // then this is a noop. + Unstage() error + + // Activate makes the staged resource the active resource. + Activate() error +} + +type resourceStorage interface { + // PutAndCheckHash stores the content of the reader into the storage. + PutAndCheckHash(path string, r io.Reader, length int64, hash string) error + + // Remove removes the identified data from the storage. + Remove(path string) error + + // Get returns a reader for the resource at path. The size of the + // data is also returned. + Get(path string) (io.ReadCloser, int64, error) +} + +type resourceState struct { + persist resourcePersistence + storage resourceStorage + + newPendingID func() (string, error) + currentTimestamp func() time.Time +} + +// ListResources returns the resource data for the given service ID. +func (st resourceState) ListResources(serviceID string) (resource.ServiceResources, error) { + resources, err := st.persist.ListResources(serviceID) + if err != nil { + return resource.ServiceResources{}, errors.Trace(err) + } + + return resources, nil +} + +// GetResource returns the resource data for the identified resource. +func (st resourceState) GetResource(serviceID, name string) (resource.Resource, error) { + id := newResourceID(serviceID, name) + res, _, err := st.persist.GetResource(id) + if err != nil { + return res, errors.Trace(err) + } + return res, nil +} + +// GetPendingResource returns the resource data for the identified resource. +func (st resourceState) GetPendingResource(serviceID, name, pendingID string) (resource.Resource, error) { + var res resource.Resource + + resources, err := st.persist.ListPendingResources(serviceID) + if err != nil { + return res, errors.Trace(err) + } + + for _, res := range resources { + if res.Name == name && res.PendingID == pendingID { + return res, nil + } + } + return res, errors.NotFoundf("pending resource %q (%s)", name, pendingID) +} + +// TODO(ericsnow) Separate setting the metadata from storing the blob? + +// SetResource stores the resource in the Juju model. +func (st resourceState) SetResource(serviceID, userID string, chRes charmresource.Resource, r io.Reader) (resource.Resource, error) { + logger.Tracef("adding resource %q for service %q", chRes.Name, serviceID) + pendingID := "" + res, err := st.setResource(pendingID, serviceID, userID, chRes, r) + if err != nil { + return res, errors.Trace(err) + } + return res, nil +} + +// AddPendingResource stores the resource in the Juju model. +func (st resourceState) AddPendingResource(serviceID, userID string, chRes charmresource.Resource, r io.Reader) (pendingID string, err error) { + pendingID, err = st.newPendingID() + if err != nil { + return "", errors.Annotate(err, "could not generate resource ID") + } + logger.Tracef("adding pending resource %q for service %q (ID: %s)", chRes.Name, serviceID, pendingID) + + if _, err := st.setResource(pendingID, serviceID, userID, chRes, r); err != nil { + return "", errors.Trace(err) + } + + return pendingID, nil +} + +// UpdatePendingResource stores the resource in the Juju model. +func (st resourceState) UpdatePendingResource(serviceID, pendingID, userID string, chRes charmresource.Resource, r io.Reader) (resource.Resource, error) { + logger.Tracef("updating pending resource %q (%s) for service %q", chRes.Name, pendingID, serviceID) + res, err := st.setResource(pendingID, serviceID, userID, chRes, r) + if err != nil { + return res, errors.Trace(err) + } + return res, nil +} + +// TODO(ericsnow) Add ResolvePendingResource(). + +func (st resourceState) setResource(pendingID, serviceID, userID string, chRes charmresource.Resource, r io.Reader) (resource.Resource, error) { + id := newResourceID(serviceID, chRes.Name) + + res := resource.Resource{ + Resource: chRes, + ID: id, + PendingID: pendingID, + ServiceID: serviceID, + } + if r != nil { + // TODO(ericsnow) Validate the user ID (or use a tag). + res.Username = userID + res.Timestamp = st.currentTimestamp() + } + + if err := res.Validate(); err != nil { + return res, errors.Annotate(err, "bad resource metadata") + } + + if r == nil { + if err := st.persist.SetResource(res); err != nil { + return res, errors.Trace(err) + } + } else { + if err := st.storeResource(res, r); err != nil { + return res, errors.Trace(err) + } + } + + return res, nil +} + +func (st resourceState) storeResource(res resource.Resource, r io.Reader) error { + // We use a staging approach for adding the resource metadata + // to the model. This is necessary because the resource data + // is stored separately and adding to both should be an atomic + // operation. + + storagePath := storagePath(res.Name, res.ServiceID, res.PendingID) + staged, err := st.persist.StageResource(res, storagePath) + if err != nil { + return errors.Trace(err) + } + + hash := res.Fingerprint.String() + if err := st.storage.PutAndCheckHash(storagePath, r, res.Size, hash); err != nil { + if err := staged.Unstage(); err != nil { + logger.Errorf("could not unstage resource %q (service %q): %v", res.Name, res.ServiceID, err) + } + return errors.Trace(err) + } + + if err := staged.Activate(); err != nil { + if err := st.storage.Remove(storagePath); err != nil { + logger.Errorf("could not remove resource %q (service %q) from storage: %v", res.Name, res.ServiceID, err) + } + if err := staged.Unstage(); err != nil { + logger.Errorf("could not unstage resource %q (service %q): %v", res.Name, res.ServiceID, err) + } + return errors.Trace(err) + } + + return nil +} + +// OpenResource returns metadata about the resource, and a reader for +// the resource. +func (st resourceState) OpenResource(serviceID, name string) (resource.Resource, io.ReadCloser, error) { + id := newResourceID(serviceID, name) + resourceInfo, storagePath, err := st.persist.GetResource(id) + if err != nil { + return resource.Resource{}, nil, errors.Annotate(err, "while getting resource info") + } + if resourceInfo.IsPlaceholder() { + logger.Tracef("placeholder resource %q treated as not found", name) + return resource.Resource{}, nil, errors.NotFoundf("resource %q", name) + } + + resourceReader, resSize, err := st.storage.Get(storagePath) + if err != nil { + return resource.Resource{}, nil, errors.Annotate(err, "while retrieving resource data") + } + if resSize != resourceInfo.Size { + msg := "storage returned a size (%d) which doesn't match resource metadata (%d)" + return resource.Resource{}, nil, errors.Errorf(msg, resSize, resourceInfo.Size) + } + + return resourceInfo, resourceReader, nil +} + +// OpenResourceForUniter returns metadata about the resource and +// a reader for the resource. The resource is associated with +// the unit once the reader is completely exhausted. +func (st resourceState) OpenResourceForUniter(unit resource.Unit, name string) (resource.Resource, io.ReadCloser, error) { + serviceID := unit.ServiceName() + + resourceInfo, resourceReader, err := st.OpenResource(serviceID, name) + if err != nil { + return resource.Resource{}, nil, errors.Trace(err) + } + + resourceReader = unitSetter{ + ReadCloser: resourceReader, + persist: st.persist, + unit: unit, + resource: resourceInfo, + } + + return resourceInfo, resourceReader, nil +} + +// SetCharmStoreResources sets the "polled" resources for the +// service to the provided values. +func (st resourceState) SetCharmStoreResources(serviceID string, info []charmresource.Resource, lastPolled time.Time) error { + for _, chRes := range info { + id := newResourceID(serviceID, chRes.Name) + if err := st.persist.SetCharmStoreResource(id, serviceID, chRes, lastPolled); err != nil { + return errors.Trace(err) + } + // TODO(ericsnow) Worry about extras? missing? + } + + return nil +} + +// TODO(ericsnow) Rename NewResolvePendingResourcesOps to reflect that +// it has more meat to it? + +// NewResolvePendingResourcesOps generates mongo transaction operations +// to set the identified resources as active. +// +// Leaking mongo details (transaction ops) is a necessary evil since we +// do not have any machinery to facilitate transactions between +// different components. +func (st resourceState) NewResolvePendingResourcesOps(serviceID string, pendingIDs map[string]string) ([]txn.Op, error) { + if len(pendingIDs) == 0 { + return nil, nil + } + + // TODO(ericsnow) The resources need to be pulled in from the charm + // store before we get to this point. + + var allOps []txn.Op + for name, pendingID := range pendingIDs { + ops, err := st.newResolvePendingResourceOps(serviceID, name, pendingID) + if err != nil { + return nil, errors.Trace(err) + } + allOps = append(allOps, ops...) + } + return allOps, nil +} + +func (st resourceState) newResolvePendingResourceOps(serviceID, name, pendingID string) ([]txn.Op, error) { + resID := newResourceID(serviceID, name) + return st.persist.NewResolvePendingResourceOps(resID, pendingID) +} + +// TODO(ericsnow) Incorporate the service and resource name into the ID +// instead of just using a UUID? + +// newPendingID generates a new unique identifier for a resource. +func newPendingID() (string, error) { + uuid, err := utils.NewUUID() + if err != nil { + return "", errors.Annotate(err, "could not create new resource ID") + } + return uuid.String(), nil +} + +// newResourceID produces a new ID to use for the resource in the model. +func newResourceID(serviceID, name string) string { + return fmt.Sprintf("%s/%s", serviceID, name) +} + +// storagePath returns the path used as the location where the resource +// is stored in state storage. This requires that the returned string +// be unique and that it be organized in a structured way. In this case +// we start with a top-level (the service), then under that service use +// the "resources" section. The provided ID is located under there. +func storagePath(name, serviceID, pendingID string) string { + // TODO(ericsnow) Use services//resources/? + id := name + if pendingID != "" { + // TODO(ericsnow) How to resolve this later? + id += "-" + pendingID + } + return path.Join("service-"+serviceID, "resources", id) +} + +// unitSetter records the resource as in use by a unit when the wrapped +// reader has been fully read. +type unitSetter struct { + io.ReadCloser + persist resourcePersistence + unit resource.Unit + resource resource.Resource +} + +// Read implements io.Reader. +func (u unitSetter) Read(p []byte) (n int, err error) { + n, err = u.ReadCloser.Read(p) + if err == io.EOF { + // record that the unit is now using this version of the resource + if err := u.persist.SetUnitResource(u.unit.Name(), u.resource); err != nil { + msg := "Failed to record that unit %q is using resource %q revision %v" + logger.Errorf(msg, u.unit.Name(), u.resource.Name, u.resource.RevisionString()) + } + } + return n, err +} === added file 'src/github.com/juju/juju/resource/state/resource_test.go' --- src/github.com/juju/juju/resource/state/resource_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/state/resource_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,703 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state + +import ( + "bytes" + "io" + "io/ioutil" + "strings" + "time" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + "gopkg.in/mgo.v2/txn" + + "github.com/juju/juju/resource" + "github.com/juju/juju/resource/resourcetesting" +) + +var _ = gc.Suite(&ResourceSuite{}) + +type ResourceSuite struct { + testing.IsolationSuite + + stub *testing.Stub + raw *stubRawState + persist *stubPersistence + storage *stubStorage + timestamp time.Time + pendingID string +} + +func (s *ResourceSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = &testing.Stub{} + s.raw = &stubRawState{stub: s.stub} + s.persist = &stubPersistence{stub: s.stub} + s.persist.ReturnStageResource = &stubStagedResource{stub: s.stub} + s.storage = &stubStorage{stub: s.stub} + s.raw.ReturnPersistence = s.persist + s.raw.ReturnStorage = s.storage + s.timestamp = time.Now().UTC() + s.pendingID = "" +} + +func (s *ResourceSuite) now() time.Time { + s.stub.AddCall("currentTimestamp") + s.stub.NextErr() // Pop one off. + + return s.timestamp +} + +func (s *ResourceSuite) newPendingID() (string, error) { + s.stub.AddCall("newPendingID") + if err := s.stub.NextErr(); err != nil { + return "", errors.Trace(err) + } + + return s.pendingID, nil +} + +func (s *ResourceSuite) TestListResourcesOkay(c *gc.C) { + expected := newUploadResources(c, "spam", "eggs") + s.persist.ReturnListResources = resource.ServiceResources{Resources: expected} + st := NewState(s.raw) + s.stub.ResetCalls() + + resources, err := st.ListResources("a-service") + c.Assert(err, jc.ErrorIsNil) + + c.Check(resources.Resources, jc.DeepEquals, expected) + s.stub.CheckCallNames(c, "ListResources") + s.stub.CheckCall(c, 0, "ListResources", "a-service") +} + +func (s *ResourceSuite) TestListResourcesEmpty(c *gc.C) { + st := NewState(s.raw) + s.stub.ResetCalls() + + resources, err := st.ListResources("a-service") + c.Assert(err, jc.ErrorIsNil) + + c.Check(resources.Resources, gc.HasLen, 0) + s.stub.CheckCallNames(c, "ListResources") +} + +func (s *ResourceSuite) TestListResourcesError(c *gc.C) { + expected := newUploadResources(c, "spam", "eggs") + s.persist.ReturnListResources = resource.ServiceResources{Resources: expected} + st := NewState(s.raw) + s.stub.ResetCalls() + failure := errors.New("") + s.stub.SetErrors(failure) + + _, err := st.ListResources("a-service") + + c.Check(errors.Cause(err), gc.Equals, failure) + s.stub.CheckCallNames(c, "ListResources") +} + +func (s *ResourceSuite) TestGetPendingResource(c *gc.C) { + resources := newUploadResources(c, "spam", "eggs") + resources[0].PendingID = "some-unique-id" + resources[1].PendingID = "other-unique-id" + s.persist.ReturnListPendingResources = resources + st := NewState(s.raw) + s.stub.ResetCalls() + + res, err := st.GetPendingResource("a-service", "eggs", "other-unique-id") + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "ListPendingResources") + s.stub.CheckCall(c, 0, "ListPendingResources", "a-service") + c.Check(res, jc.DeepEquals, resources[1]) +} + +func (s *ResourceSuite) TestSetResourceOkay(c *gc.C) { + expected := newUploadResource(c, "spam", "spamspamspam") + expected.Timestamp = s.timestamp + chRes := expected.Resource + hash := chRes.Fingerprint.String() + path := "service-a-service/resources/spam" + file := &stubReader{stub: s.stub} + st := NewState(s.raw) + st.currentTimestamp = s.now + s.stub.ResetCalls() + + res, err := st.SetResource("a-service", "a-user", chRes, file) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, + "currentTimestamp", + "StageResource", + "PutAndCheckHash", + "Activate", + ) + s.stub.CheckCall(c, 1, "StageResource", expected, path) + s.stub.CheckCall(c, 2, "PutAndCheckHash", path, file, res.Size, hash) + c.Check(res, jc.DeepEquals, resource.Resource{ + Resource: chRes, + ID: "a-service/" + res.Name, + ServiceID: "a-service", + Username: "a-user", + Timestamp: s.timestamp, + }) +} + +func (s *ResourceSuite) TestSetResourceInfoOnly(c *gc.C) { + expected := newUploadResource(c, "spam", "spamspamspam") + expected.Timestamp = time.Time{} + expected.Username = "" + chRes := expected.Resource + st := NewState(s.raw) + st.currentTimestamp = s.now + s.stub.ResetCalls() + + res, err := st.SetResource("a-service", "a-user", chRes, nil) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, + "SetResource", + ) + s.stub.CheckCall(c, 0, "SetResource", expected) + c.Check(res, jc.DeepEquals, resource.Resource{ + Resource: chRes, + ID: "a-service/" + res.Name, + ServiceID: "a-service", + }) +} + +func (s *ResourceSuite) TestSetResourceBadResource(c *gc.C) { + res := newUploadResource(c, "spam", "spamspamspam") + res.Revision = -1 + file := &stubReader{stub: s.stub} + st := NewState(s.raw) + st.currentTimestamp = s.now + s.stub.ResetCalls() + + _, err := st.SetResource("a-service", "a-user", res.Resource, file) + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `bad resource metadata.*`) + s.stub.CheckCallNames(c, "currentTimestamp") +} + +func (s *ResourceSuite) TestSetResourceStagingFailure(c *gc.C) { + expected := newUploadResource(c, "spam", "spamspamspam") + expected.Timestamp = s.timestamp + path := "service-a-service/resources/spam" + file := &stubReader{stub: s.stub} + st := NewState(s.raw) + st.currentTimestamp = s.now + s.stub.ResetCalls() + failure := errors.New("") + ignoredErr := errors.New("") + s.stub.SetErrors(nil, failure, nil, nil, ignoredErr) + + _, err := st.SetResource("a-service", "a-user", expected.Resource, file) + + c.Check(errors.Cause(err), gc.Equals, failure) + s.stub.CheckCallNames(c, "currentTimestamp", "StageResource") + s.stub.CheckCall(c, 1, "StageResource", expected, path) +} + +func (s *ResourceSuite) TestSetResourcePutFailureBasic(c *gc.C) { + expected := newUploadResource(c, "spam", "spamspamspam") + expected.Timestamp = s.timestamp + hash := expected.Fingerprint.String() + path := "service-a-service/resources/spam" + file := &stubReader{stub: s.stub} + st := NewState(s.raw) + st.currentTimestamp = s.now + s.stub.ResetCalls() + failure := errors.New("") + ignoredErr := errors.New("") + s.stub.SetErrors(nil, nil, failure, nil, ignoredErr) + + _, err := st.SetResource("a-service", "a-user", expected.Resource, file) + + c.Check(errors.Cause(err), gc.Equals, failure) + s.stub.CheckCallNames(c, + "currentTimestamp", + "StageResource", + "PutAndCheckHash", + "Unstage", + ) + s.stub.CheckCall(c, 1, "StageResource", expected, path) + s.stub.CheckCall(c, 2, "PutAndCheckHash", path, file, expected.Size, hash) +} + +func (s *ResourceSuite) TestSetResourcePutFailureExtra(c *gc.C) { + expected := newUploadResource(c, "spam", "spamspamspam") + expected.Timestamp = s.timestamp + hash := expected.Fingerprint.String() + path := "service-a-service/resources/spam" + file := &stubReader{stub: s.stub} + st := NewState(s.raw) + st.currentTimestamp = s.now + s.stub.ResetCalls() + failure := errors.New("") + extraErr := errors.New("") + ignoredErr := errors.New("") + s.stub.SetErrors(nil, nil, failure, extraErr, ignoredErr) + + _, err := st.SetResource("a-service", "a-user", expected.Resource, file) + + c.Check(errors.Cause(err), gc.Equals, failure) + s.stub.CheckCallNames(c, + "currentTimestamp", + "StageResource", + "PutAndCheckHash", + "Unstage", + ) + s.stub.CheckCall(c, 1, "StageResource", expected, path) + s.stub.CheckCall(c, 2, "PutAndCheckHash", path, file, expected.Size, hash) +} + +func (s *ResourceSuite) TestSetResourceSetFailureBasic(c *gc.C) { + expected := newUploadResource(c, "spam", "spamspamspam") + expected.Timestamp = s.timestamp + hash := expected.Fingerprint.String() + path := "service-a-service/resources/spam" + file := &stubReader{stub: s.stub} + st := NewState(s.raw) + st.currentTimestamp = s.now + s.stub.ResetCalls() + failure := errors.New("") + ignoredErr := errors.New("") + s.stub.SetErrors(nil, nil, nil, failure, nil, nil, ignoredErr) + + _, err := st.SetResource("a-service", "a-user", expected.Resource, file) + + c.Check(errors.Cause(err), gc.Equals, failure) + s.stub.CheckCallNames(c, + "currentTimestamp", + "StageResource", + "PutAndCheckHash", + "Activate", + "Remove", + "Unstage", + ) + s.stub.CheckCall(c, 1, "StageResource", expected, path) + s.stub.CheckCall(c, 2, "PutAndCheckHash", path, file, expected.Size, hash) + s.stub.CheckCall(c, 4, "Remove", path) +} + +func (s *ResourceSuite) TestSetResourceSetFailureExtra(c *gc.C) { + expected := newUploadResource(c, "spam", "spamspamspam") + expected.Timestamp = s.timestamp + hash := expected.Fingerprint.String() + path := "service-a-service/resources/spam" + file := &stubReader{stub: s.stub} + st := NewState(s.raw) + st.currentTimestamp = s.now + s.stub.ResetCalls() + failure := errors.New("") + extraErr1 := errors.New("") + extraErr2 := errors.New("") + ignoredErr := errors.New("") + s.stub.SetErrors(nil, nil, nil, failure, extraErr1, extraErr2, ignoredErr) + + _, err := st.SetResource("a-service", "a-user", expected.Resource, file) + + c.Check(errors.Cause(err), gc.Equals, failure) + s.stub.CheckCallNames(c, + "currentTimestamp", + "StageResource", + "PutAndCheckHash", + "Activate", + "Remove", + "Unstage", + ) + s.stub.CheckCall(c, 1, "StageResource", expected, path) + s.stub.CheckCall(c, 2, "PutAndCheckHash", path, file, expected.Size, hash) + s.stub.CheckCall(c, 4, "Remove", path) +} + +func (s *ResourceSuite) TestUpdatePendingResourceOkay(c *gc.C) { + expected := newUploadResource(c, "spam", "spamspamspam") + expected.PendingID = "some-unique-id" + expected.Timestamp = s.timestamp + chRes := expected.Resource + hash := chRes.Fingerprint.String() + path := "service-a-service/resources/spam-some-unique-id" + file := &stubReader{stub: s.stub} + st := NewState(s.raw) + st.currentTimestamp = s.now + s.stub.ResetCalls() + + res, err := st.UpdatePendingResource("a-service", "some-unique-id", "a-user", chRes, file) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, + "currentTimestamp", + "StageResource", + "PutAndCheckHash", + "Activate", + ) + s.stub.CheckCall(c, 1, "StageResource", expected, path) + s.stub.CheckCall(c, 2, "PutAndCheckHash", path, file, res.Size, hash) + c.Check(res, jc.DeepEquals, resource.Resource{ + Resource: chRes, + ID: "a-service/" + res.Name, + ServiceID: "a-service", + PendingID: "some-unique-id", + Username: "a-user", + Timestamp: s.timestamp, + }) +} + +func (s *ResourceSuite) TestAddPendingResourceOkay(c *gc.C) { + s.pendingID = "some-unique-ID-001" + expected := newUploadResource(c, "spam", "spamspamspam") + expected.PendingID = s.pendingID + expected.Timestamp = s.timestamp + chRes := expected.Resource + hash := chRes.Fingerprint.String() + path := "service-a-service/resources/spam-some-unique-ID-001" + file := &stubReader{stub: s.stub} + st := NewState(s.raw) + st.currentTimestamp = s.now + st.newPendingID = s.newPendingID + s.stub.ResetCalls() + + pendingID, err := st.AddPendingResource("a-service", "a-user", chRes, file) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, + "newPendingID", + "currentTimestamp", + "StageResource", + "PutAndCheckHash", + "Activate", + ) + s.stub.CheckCall(c, 2, "StageResource", expected, path) + s.stub.CheckCall(c, 3, "PutAndCheckHash", path, file, expected.Size, hash) + c.Check(pendingID, gc.Equals, s.pendingID) +} + +func (s *ResourceSuite) TestOpenResourceOkay(c *gc.C) { + data := "some data" + opened := resourcetesting.NewResource(c, s.stub, "spam", "a-service", data) + s.persist.ReturnGetResource = opened.Resource + s.persist.ReturnGetResourcePath = "service-a-service/resources/spam" + s.storage.ReturnGet = opened.Content() + st := NewState(s.raw) + s.stub.ResetCalls() + + info, reader, err := st.OpenResource("a-service", "spam") + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "GetResource", "Get") + s.stub.CheckCall(c, 1, "Get", "service-a-service/resources/spam") + c.Check(info, jc.DeepEquals, opened.Resource) + c.Check(reader, gc.Equals, opened.ReadCloser) +} + +func (s *ResourceSuite) TestOpenResourceNotFound(c *gc.C) { + st := NewState(s.raw) + s.stub.ResetCalls() + + _, _, err := st.OpenResource("a-service", "spam") + + s.stub.CheckCallNames(c, "GetResource") + c.Check(err, jc.Satisfies, errors.IsNotFound) +} + +func (s *ResourceSuite) TestOpenResourcePlaceholder(c *gc.C) { + res := resourcetesting.NewPlaceholderResource(c, "spam", "a-service") + s.persist.ReturnGetResource = res + s.persist.ReturnGetResourcePath = "service-a-service/resources/spam" + st := NewState(s.raw) + s.stub.ResetCalls() + + _, _, err := st.OpenResource("a-service", "spam") + + s.stub.CheckCallNames(c, "GetResource") + c.Check(err, jc.Satisfies, errors.IsNotFound) +} + +func (s *ResourceSuite) TestOpenResourceSizeMismatch(c *gc.C) { + opened := resourcetesting.NewResource(c, s.stub, "spam", "a-service", "some data") + s.persist.ReturnGetResource = opened.Resource + s.persist.ReturnGetResourcePath = "service-a-service/resources/spam" + content := opened.Content() + content.Size += 1 + s.storage.ReturnGet = content + st := NewState(s.raw) + s.stub.ResetCalls() + + _, _, err := st.OpenResource("a-service", "spam") + + s.stub.CheckCallNames(c, "GetResource", "Get") + c.Check(err, gc.ErrorMatches, `storage returned a size \(10\) which doesn't match resource metadata \(9\)`) +} + +func (s *ResourceSuite) TestOpenResourceForUniterOkay(c *gc.C) { + data := "some data" + opened := resourcetesting.NewResource(c, s.stub, "spam", "a-service", data) + s.persist.ReturnGetResource = opened.Resource + s.persist.ReturnGetResourcePath = "service-a-service/resources/spam" + s.storage.ReturnGet = opened.Content() + unit := newUnit(s.stub, "a-service/0") + st := NewState(s.raw) + s.stub.ResetCalls() + + info, reader, err := st.OpenResourceForUniter(unit, "spam") + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "ServiceName", "GetResource", "Get") + s.stub.CheckCall(c, 2, "Get", "service-a-service/resources/spam") + c.Check(info, jc.DeepEquals, opened.Resource) + + b, err := ioutil.ReadAll(reader) + // note ioutil.ReadAll converts EOF to nil + c.Check(err, jc.ErrorIsNil) + c.Check(b, gc.DeepEquals, []byte(data)) +} + +func (s *ResourceSuite) TestOpenResourceForUniterNotFound(c *gc.C) { + unit := newUnit(s.stub, "a-service/0") + st := NewState(s.raw) + s.stub.ResetCalls() + + _, _, err := st.OpenResourceForUniter(unit, "spam") + + s.stub.CheckCallNames(c, "ServiceName", "GetResource") + c.Check(err, jc.Satisfies, errors.IsNotFound) +} + +func (s *ResourceSuite) TestOpenResourceForUniterPlaceholder(c *gc.C) { + res := resourcetesting.NewPlaceholderResource(c, "spam", "a-service") + s.persist.ReturnGetResource = res + s.persist.ReturnGetResourcePath = "service-a-service/resources/spam" + unit := newUnit(s.stub, "a-service/0") + st := NewState(s.raw) + s.stub.ResetCalls() + + _, _, err := st.OpenResourceForUniter(unit, "spam") + + s.stub.CheckCallNames(c, "ServiceName", "GetResource") + c.Check(err, jc.Satisfies, errors.IsNotFound) +} + +func (s *ResourceSuite) TestOpenResourceForUniterSizeMismatch(c *gc.C) { + opened := resourcetesting.NewResource(c, s.stub, "spam", "a-service", "some data") + s.persist.ReturnGetResource = opened.Resource + s.persist.ReturnGetResourcePath = "service-a-service/resources/spam" + content := opened.Content() + content.Size += 1 + s.storage.ReturnGet = content + unit := newUnit(s.stub, "a-service/0") + st := NewState(s.raw) + s.stub.ResetCalls() + + _, _, err := st.OpenResourceForUniter(unit, "spam") + + s.stub.CheckCallNames(c, "ServiceName", "GetResource", "Get") + c.Check(err, gc.ErrorMatches, `storage returned a size \(10\) which doesn't match resource metadata \(9\)`) +} + +func (s *ResourceSuite) TestSetCharmStoreResources(c *gc.C) { + lastPolled := time.Now().UTC() + resources := newStoreResources(c, "spam", "eggs") + var info []charmresource.Resource + for _, res := range resources { + chRes := res.Resource + info = append(info, chRes) + } + st := NewState(s.raw) + s.stub.ResetCalls() + + err := st.SetCharmStoreResources("a-service", info, lastPolled) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, + "SetCharmStoreResource", + "SetCharmStoreResource", + ) + s.stub.CheckCall(c, 0, "SetCharmStoreResource", "a-service/spam", "a-service", info[0], lastPolled) + s.stub.CheckCall(c, 1, "SetCharmStoreResource", "a-service/eggs", "a-service", info[1], lastPolled) +} + +func (s *ResourceSuite) TestNewResourcePendingResourcesOps(c *gc.C) { + doc1 := map[string]string{"a": "1"} + doc2 := map[string]string{"b": "2"} + expected := []txn.Op{{ + C: "resources", + Id: "resource#a-service/spam#pending-some-unique-ID-001", + Assert: txn.DocExists, + Remove: true, + }, { + C: "resources", + Id: "resource#a-service/spam", + Assert: txn.DocMissing, + Insert: &doc1, + }, { + C: "resources", + Id: "resource#a-service/spam#pending-some-unique-ID-001", + Assert: txn.DocExists, + Remove: true, + }, { + C: "resources", + Id: "resource#a-service/spam", + Assert: txn.DocMissing, + Insert: &doc2, + }} + s.persist.ReturnNewResolvePendingResourceOps = [][]txn.Op{ + expected[:2], + expected[2:], + } + serviceID := "a-service" + st := NewState(s.raw) + s.stub.ResetCalls() + pendingIDs := map[string]string{ + "spam": "some-unique-id", + "eggs": "other-unique-id", + } + + ops, err := st.NewResolvePendingResourcesOps(serviceID, pendingIDs) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, + "NewResolvePendingResourceOps", + "NewResolvePendingResourceOps", + ) + c.Check(s.persist.CallsForNewResolvePendingResourceOps, jc.DeepEquals, map[string]string{ + "a-service/spam": "some-unique-id", + "a-service/eggs": "other-unique-id", + }) + c.Check(ops, jc.DeepEquals, expected) +} + +func (s *ResourceSuite) TestUnitSetterEOF(c *gc.C) { + r := unitSetter{ + ReadCloser: ioutil.NopCloser(&bytes.Buffer{}), + persist: &stubPersistence{stub: s.stub}, + unit: newUnit(s.stub, "some-service/0"), + resource: newUploadResource(c, "res", "res"), + } + // have to try to read non-zero data, or bytes.buffer will happily return + // nil. + p := make([]byte, 5) + n, err := r.Read(p) + c.Assert(n, gc.Equals, 0) + c.Assert(err, gc.Equals, io.EOF) + + s.stub.CheckCallNames(c, "Name", "SetUnitResource") + s.stub.CheckCall(c, 1, "SetUnitResource", "some-service/0", r.resource) +} + +func (s *ResourceSuite) TestUnitSetterNoEOF(c *gc.C) { + r := unitSetter{ + ReadCloser: ioutil.NopCloser(bytes.NewBufferString("foobar")), + persist: &stubPersistence{stub: s.stub}, + unit: newUnit(s.stub, "some-service/0"), + resource: newUploadResource(c, "res", "res"), + } + // read less than the full buffer + p := make([]byte, 3) + n, err := r.Read(p) + c.Assert(n, gc.Equals, 3) + c.Assert(err, gc.Equals, nil) + + // Assert that we don't call SetUnitResource if we read but don't reach the + // end of the buffer. + s.stub.CheckNoCalls(c) +} + +func (s *ResourceSuite) TestUnitSetterSetUnitErr(c *gc.C) { + r := unitSetter{ + ReadCloser: ioutil.NopCloser(&bytes.Buffer{}), + persist: &stubPersistence{stub: s.stub}, + unit: newUnit(s.stub, "some-service/0"), + resource: newUploadResource(c, "res", "res"), + } + + s.stub.SetErrors(errors.Errorf("oops!")) + // have to try to read non-zero data, or bytes.buffer will happily return + // nil. + p := make([]byte, 5) + n, err := r.Read(p) + c.Assert(n, gc.Equals, 0) + + // ensure that we return the EOF from bytes.buffer and not the error from SetUnitResource. + c.Assert(err, gc.Equals, io.EOF) + + s.stub.CheckCallNames(c, "Name", "SetUnitResource") + s.stub.CheckCall(c, 1, "SetUnitResource", "some-service/0", r.resource) +} + +func (s *ResourceSuite) TestUnitSetterErr(c *gc.C) { + r := unitSetter{ + ReadCloser: ioutil.NopCloser(&stubReader{stub: s.stub}), + persist: &stubPersistence{stub: s.stub}, + unit: newUnit(s.stub, "some-service/0"), + resource: newUploadResource(c, "res", "res"), + } + expected := errors.Errorf("some-err") + s.stub.SetErrors(expected) + // have to try to read non-zero data, or bytes.buffer will happily return + // nil. + p := make([]byte, 5) + n, err := r.Read(p) + c.Assert(n, gc.Equals, 0) + c.Assert(err, gc.Equals, expected) + + s.stub.CheckCall(c, 0, "Read", p) +} + +func newUploadResources(c *gc.C, names ...string) []resource.Resource { + var resources []resource.Resource + for _, name := range names { + res := newUploadResource(c, name, name) + resources = append(resources, res) + } + return resources +} + +func newUploadResource(c *gc.C, name, data string) resource.Resource { + opened := resourcetesting.NewResource(c, nil, name, "a-service", data) + return opened.Resource +} + +func newStoreResources(c *gc.C, names ...string) []resource.Resource { + var resources []resource.Resource + for _, name := range names { + res := newStoreResource(c, name, name) + resources = append(resources, res) + } + return resources +} + +func newStoreResource(c *gc.C, name, data string) resource.Resource { + opened := resourcetesting.NewResource(c, nil, name, "a-service", data) + res := opened.Resource + res.Origin = charmresource.OriginStore + res.Revision = 1 + res.Username = "" + res.Timestamp = time.Time{} + return res +} + +func newCharmResource(c *gc.C, name, data string, rev int) charmresource.Resource { + opened := resourcetesting.NewResource(c, nil, name, "a-service", data) + chRes := opened.Resource.Resource + chRes.Origin = charmresource.OriginStore + chRes.Revision = rev + return chRes +} + +func newUnit(stub *testing.Stub, name string) *resourcetesting.StubUnit { + return &resourcetesting.StubUnit{ + Stub: stub, + ReturnName: name, + ReturnServiceName: strings.Split(name, "/")[0], + } +} === added file 'src/github.com/juju/juju/resource/state/state.go' --- src/github.com/juju/juju/resource/state/state.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/state/state.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,57 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state + +import ( + "time" + + "github.com/juju/loggo" +) + +var logger = loggo.GetLogger("juju.resource.state") + +// Persistence is the state persistence functionality needed for resources. +type Persistence interface { + resourcePersistence +} + +// Storage is the state storage functionality needed for resources. +type Storage interface { + resourceStorage +} + +// RawState defines the functionality needed from state.State for resources. +type RawState interface { + // Persistence exposes the state data persistence needed for resources. + Persistence() Persistence + + // Storage exposes the state blob storage needed for resources. + Storage() Storage +} + +// State exposes the state functionality needed for resources. +type State struct { + *resourceState +} + +// NewState returns a new State for the given raw Juju state. +func NewState(raw RawState) *State { + logger.Tracef("wrapping state for resources") + + persist := raw.Persistence() + + storage := raw.Storage() + + st := &State{ + resourceState: &resourceState{ + persist: persist, + storage: storage, + newPendingID: newPendingID, + currentTimestamp: func() time.Time { + return time.Now().UTC() + }, + }, + } + return st +} === added file 'src/github.com/juju/juju/resource/state/state_test.go' --- src/github.com/juju/juju/resource/state/state_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/state/state_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,38 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state + +import ( + "github.com/juju/testing" + gc "gopkg.in/check.v1" +) + +var _ = gc.Suite(&StateSuite{}) + +type StateSuite struct { + testing.IsolationSuite + + stub *testing.Stub + raw *stubRawState + persist *stubPersistence + storage *stubStorage +} + +func (s *StateSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = &testing.Stub{} + s.raw = &stubRawState{stub: s.stub} + s.persist = &stubPersistence{stub: s.stub} + s.storage = &stubStorage{stub: s.stub} + s.raw.ReturnPersistence = s.persist + s.raw.ReturnStorage = s.storage +} + +func (s *StateSuite) TestNewStateOkay(c *gc.C) { + st := NewState(s.raw) + + c.Check(st, gc.NotNil) + s.stub.CheckCallNames(c, "Persistence", "Storage") +} === added file 'src/github.com/juju/juju/resource/state/stub_test.go' --- src/github.com/juju/juju/resource/state/stub_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/state/stub_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,206 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state + +import ( + "io" + "io/ioutil" + "time" + + "github.com/juju/errors" + "github.com/juju/testing" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + "gopkg.in/mgo.v2/txn" + + "github.com/juju/juju/resource" +) + +type stubRawState struct { + stub *testing.Stub + + ReturnPersistence Persistence + ReturnStorage Storage +} + +func (s *stubRawState) Persistence() Persistence { + s.stub.AddCall("Persistence") + s.stub.NextErr() + + return s.ReturnPersistence +} + +func (s *stubRawState) Storage() Storage { + s.stub.AddCall("Storage") + s.stub.NextErr() + + return s.ReturnStorage +} + +type stubPersistence struct { + stub *testing.Stub + + ReturnListResources resource.ServiceResources + ReturnListPendingResources []resource.Resource + ReturnGetResource resource.Resource + ReturnGetResourcePath string + ReturnStageResource *stubStagedResource + ReturnNewResolvePendingResourceOps [][]txn.Op + + CallsForNewResolvePendingResourceOps map[string]string +} + +func (s *stubPersistence) ListResources(serviceID string) (resource.ServiceResources, error) { + s.stub.AddCall("ListResources", serviceID) + if err := s.stub.NextErr(); err != nil { + return resource.ServiceResources{}, errors.Trace(err) + } + + return s.ReturnListResources, nil +} + +func (s *stubPersistence) ListPendingResources(serviceID string) ([]resource.Resource, error) { + s.stub.AddCall("ListPendingResources", serviceID) + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.ReturnListPendingResources, nil +} + +func (s *stubPersistence) GetResource(serviceID string) (resource.Resource, string, error) { + s.stub.AddCall("GetResource", serviceID) + if err := s.stub.NextErr(); err != nil { + return resource.Resource{}, "", errors.Trace(err) + } + + return s.ReturnGetResource, s.ReturnGetResourcePath, nil +} + +func (s *stubPersistence) StageResource(res resource.Resource, storagePath string) (StagedResource, error) { + s.stub.AddCall("StageResource", res, storagePath) + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.ReturnStageResource, nil +} + +func (s *stubPersistence) SetResource(res resource.Resource) error { + s.stub.AddCall("SetResource", res) + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *stubPersistence) SetCharmStoreResource(id, serviceID string, chRes charmresource.Resource, lastPolled time.Time) error { + s.stub.AddCall("SetCharmStoreResource", id, serviceID, chRes, lastPolled) + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *stubPersistence) SetUnitResource(unitID string, res resource.Resource) error { + s.stub.AddCall("SetUnitResource", unitID, res) + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *stubPersistence) NewResolvePendingResourceOps(resID, pendingID string) ([]txn.Op, error) { + s.stub.AddCall("NewResolvePendingResourceOps", resID, pendingID) + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + if s.CallsForNewResolvePendingResourceOps == nil { + s.CallsForNewResolvePendingResourceOps = make(map[string]string) + } + s.CallsForNewResolvePendingResourceOps[resID] = pendingID + + if len(s.ReturnNewResolvePendingResourceOps) == 0 { + return nil, nil + } + ops := s.ReturnNewResolvePendingResourceOps[0] + s.ReturnNewResolvePendingResourceOps = s.ReturnNewResolvePendingResourceOps[1:] + return ops, nil +} + +type stubStagedResource struct { + stub *testing.Stub +} + +func (s *stubStagedResource) Unstage() error { + s.stub.AddCall("Unstage") + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *stubStagedResource) Activate() error { + s.stub.AddCall("Activate") + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +type stubStorage struct { + stub *testing.Stub + + ReturnGet resource.Content +} + +func (s *stubStorage) PutAndCheckHash(path string, r io.Reader, length int64, hash string) error { + s.stub.AddCall("PutAndCheckHash", path, r, length, hash) + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *stubStorage) Get(path string) (io.ReadCloser, int64, error) { + s.stub.AddCall("Get", path) + if err := s.stub.NextErr(); err != nil { + return nil, 0, errors.Trace(err) + } + + if readCloser, ok := s.ReturnGet.Data.(io.ReadCloser); ok { + return readCloser, s.ReturnGet.Size, nil + } + return ioutil.NopCloser(s.ReturnGet.Data), s.ReturnGet.Size, nil +} + +func (s *stubStorage) Remove(path string) error { + s.stub.AddCall("Remove", path) + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +type stubReader struct { + stub *testing.Stub + + ReturnRead int +} + +func (s *stubReader) Read(buf []byte) (int, error) { + s.stub.AddCall("Read", buf) + if err := s.stub.NextErr(); err != nil { + return 0, err + } + + return s.ReturnRead, nil +} === added file 'src/github.com/juju/juju/resource/unit.go' --- src/github.com/juju/juju/resource/unit.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/unit.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resource + +import ( + "gopkg.in/juju/charm.v6-unstable" +) + +// TODO(ericsnow) Move Unit to an internal package? + +// Unit represents a Juju unit. +type Unit interface { + // Name is the name of the Unit. + Name() string + + // ServiceName is the name of the service to which the unit belongs. + ServiceName() string + + // CharmURL returns the unit's charm URL. + CharmURL() (*charm.URL, bool) +} === added file 'src/github.com/juju/juju/resource/util_test.go' --- src/github.com/juju/juju/resource/util_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/util_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,46 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resource_test + +import ( + "strings" + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource" +) + +func newFingerprint(c *gc.C, data string) charmresource.Fingerprint { + reader := strings.NewReader(data) + fp, err := charmresource.GenerateFingerprint(reader) + c.Assert(err, jc.ErrorIsNil) + return fp +} + +func newFullCharmResource(c *gc.C, name string) charmresource.Resource { + return charmresource.Resource{ + Meta: charmresource.Meta{ + Name: name, + Type: charmresource.TypeFile, + Path: name + ".tgz", + Description: "you need it", + }, + Origin: charmresource.OriginUpload, + Revision: 1, + Fingerprint: newFingerprint(c, name), + } +} + +func newFullResource(c *gc.C, name string) resource.Resource { + return resource.Resource{ + Resource: newFullCharmResource(c, name), + ID: "a-service/" + name, + ServiceID: "a-service", + Username: "a-user", + Timestamp: time.Now(), + } +} === added directory 'src/github.com/juju/juju/resource/workers' === added file 'src/github.com/juju/juju/resource/workers/charmstorepoller.go' --- src/github.com/juju/juju/resource/workers/charmstorepoller.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/workers/charmstorepoller.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,167 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package workers + +import ( + "io" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + "gopkg.in/juju/charm.v6-unstable" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/worker" +) + +const charmStorePollPeriod = 24 * time.Hour + +// Service exposes the functionality of the Juju entity needed here. +type Service interface { + // ID identifies the service in the model. + ID() names.ServiceTag + + // CharmURL identifies the service's charm. + CharmURL() *charm.URL +} + +// DataStore exposes the functionality of Juju state needed here. +type DataStore interface { + // ListAllServices returns all the services in the model. + ListAllServices() ([]Service, error) + + // SetCharmStoreResources sets the "polled from the charm store" + // resources for the service to the provided values. + SetCharmStoreResources(serviceID string, info []charmresource.Resource, lastPolled time.Time) error +} + +// CharmStoreClient exposes the functionality of the charm store +// needed here. +type CharmStoreClient interface { + io.Closer + + // ListResources returns the resources info for each identified charm. + ListResources([]*charm.URL) ([][]charmresource.Resource, error) +} + +// CharmStorePoller provides the functionality to poll the charm store +// for changes in resources in the Juju model. +type CharmStorePoller struct { + CharmStorePollerDeps + + // Period is the time between poll attempts. + Period time.Duration +} + +// NewCharmStorePoller returns a charm store poller that uses the +// provided data store. +func NewCharmStorePoller(st DataStore, newClient func() (CharmStoreClient, error)) *CharmStorePoller { + deps := &csPollerDeps{ + DataStore: st, + newClient: newClient, + } + return &CharmStorePoller{ + CharmStorePollerDeps: deps, + Period: charmStorePollPeriod, + } +} + +// NewWorker returns a new periodic worker for the poller. +func (csp CharmStorePoller) NewWorker() worker.Worker { + // TODO(ericsnow) Wrap Do() in a retry? Log the error instead of + // returning it? + return csp.NewPeriodicWorker(csp.Do, csp.Period) +} + +func shouldStop(stop <-chan struct{}) bool { + select { + case <-stop: + return true + default: + return false + } +} + +// Do performs a single polling iteration. +func (csp CharmStorePoller) Do(stop <-chan struct{}) error { + services, err := csp.ListAllServices() + if err != nil { + return errors.Trace(err) + } + if shouldStop(stop) { + return nil + } + + var cURLs []*charm.URL + for _, service := range services { + cURL := service.CharmURL() + if cURL == nil { + continue + } + cURLs = append(cURLs, cURL) + } + if shouldStop(stop) { + return nil + } + + chResources, err := csp.ListCharmStoreResources(cURLs) + if err != nil { + return errors.Trace(err) + } + + lastPolled := time.Now().UTC() + // Note: since we used "services" to compose the list of charm URL + // s passed to the charm store client, there is a one-to-one + // correspondence between "services" and "chResources". + for i, service := range services { + if shouldStop(stop) { + return nil + } + + serviceID := service.ID().Id() + if err := csp.SetCharmStoreResources(serviceID, chResources[i], lastPolled); err != nil { + return errors.Trace(err) + } + } + + return nil +} + +// CharmStorePollerDeps exposes the external dependencies of a charm +// store poller. +type CharmStorePollerDeps interface { + DataStore + + // NewPeriodicWorker returns a new periodic worker. + NewPeriodicWorker(func(stop <-chan struct{}) error, time.Duration) worker.Worker + + // ListCharmStoreResources returns the resources from the charm + // store for each of the identified charms. + ListCharmStoreResources([]*charm.URL) ([][]charmresource.Resource, error) +} + +type csPollerDeps struct { + DataStore + newClient func() (CharmStoreClient, error) +} + +// NewPeriodicWorker implements CharmStorePollerDeps. +func (csPollerDeps) NewPeriodicWorker(call func(stop <-chan struct{}) error, period time.Duration) worker.Worker { + return worker.NewPeriodicWorker(call, period, worker.NewTimer) +} + +// ListCharmStoreResources implements CharmStorePollerDeps. +func (deps csPollerDeps) ListCharmStoreResources(cURLs []*charm.URL) ([][]charmresource.Resource, error) { + client, err := deps.newClient() + if err != nil { + return nil, errors.Trace(err) + } + defer client.Close() + + chResources, err := client.ListResources(cURLs) + if err != nil { + return nil, errors.Trace(err) + } + return chResources, nil +} === added file 'src/github.com/juju/juju/resource/workers/charmstorepoller_test.go' --- src/github.com/juju/juju/resource/workers/charmstorepoller_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/workers/charmstorepoller_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,259 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package workers_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + charmresource "gopkg.in/juju/charm.v6-unstable/resource" + + "github.com/juju/juju/resource/resourcetesting" + "github.com/juju/juju/resource/workers" + "github.com/juju/juju/worker" + workertest "github.com/juju/juju/worker/testing" +) + +type CharmStorePollerSuite struct { + testing.IsolationSuite + + stub *testing.Stub + deps *stubCharmStorePollerDeps + client *stubCharmStoreClient +} + +var _ = gc.Suite(&CharmStorePollerSuite{}) + +func (s *CharmStorePollerSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = &testing.Stub{} + s.deps = &stubCharmStorePollerDeps{Stub: s.stub} + s.client = &stubCharmStoreClient{Stub: s.stub} + s.deps.ReturnNewClient = s.client +} + +func (s *CharmStorePollerSuite) TestIntegration(c *gc.C) { + s.deps.ReturnListAllServices = []workers.Service{ + newStubService(c, s.stub, "svc-a"), + newStubService(c, s.stub, "svc-b"), + } + s.client.ReturnListResources = [][]charmresource.Resource{{ + resourcetesting.NewCharmResource(c, "spam", "blahblahblah"), + }, { + resourcetesting.NewCharmResource(c, "eggs", "..."), + resourcetesting.NewCharmResource(c, "ham", "lahdeedah"), + }} + done := make(chan struct{}) + poller := workers.NewCharmStorePoller(s.deps, s.deps.NewClient) + poller.CharmStorePollerDeps = &doTracker{ + CharmStorePollerDeps: poller.CharmStorePollerDeps, + done: done, + } + + worker := poller.NewWorker() + go func() { + <-done + worker.Kill() + }() + err := worker.Wait() + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, + "ListAllServices", + "CharmURL", + "CharmURL", + "NewClient", + "ListResources", + "Close", + "ID", + "SetCharmStoreResources", + "ID", + "SetCharmStoreResources", + ) +} + +func (s *CharmStorePollerSuite) TestNewCharmStorePoller(c *gc.C) { + poller := workers.NewCharmStorePoller(s.deps, s.deps.NewClient) + + s.stub.CheckNoCalls(c) + c.Check(poller.Period, gc.Equals, 24*time.Hour) +} + +func (s *CharmStorePollerSuite) TestNewWorker(c *gc.C) { + expected := &workertest.StubWorker{Stub: s.stub} + s.deps.ReturnNewPeriodicWorker = expected + period := 11 * time.Second + poller := workers.CharmStorePoller{ + CharmStorePollerDeps: s.deps, + Period: period, + } + + worker := poller.NewWorker() + + s.stub.CheckCallNames(c, "NewPeriodicWorker") + c.Check(worker, gc.Equals, expected) +} + +func (s *CharmStorePollerSuite) TestDo(c *gc.C) { + s.deps.ReturnListAllServices = []workers.Service{ + newStubService(c, s.stub, "svc-a"), + newStubService(c, s.stub, "svc-b"), + } + s.deps.ReturnListCharmStoreResources = [][]charmresource.Resource{{ + resourcetesting.NewCharmResource(c, "spam", "blahblahblah"), + }, { + resourcetesting.NewCharmResource(c, "eggs", "..."), + resourcetesting.NewCharmResource(c, "ham", "lahdeedah"), + }} + poller := workers.CharmStorePoller{ + CharmStorePollerDeps: s.deps, + } + stop := make(chan struct{}) + defer close(stop) + + err := poller.Do(stop) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, + "ListAllServices", + "CharmURL", + "CharmURL", + "ListCharmStoreResources", + "ID", + "SetCharmStoreResources", + "ID", + "SetCharmStoreResources", + ) +} + +type stubCharmStorePollerDeps struct { + *testing.Stub + + ReturnNewClient workers.CharmStoreClient + ReturnListAllServices []workers.Service + ReturnNewPeriodicWorker worker.Worker + ReturnListCharmStoreResources [][]charmresource.Resource +} + +func (s *stubCharmStorePollerDeps) NewClient() (workers.CharmStoreClient, error) { + s.AddCall("NewClient") + if err := s.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.ReturnNewClient, nil +} + +func (s *stubCharmStorePollerDeps) ListAllServices() ([]workers.Service, error) { + s.AddCall("ListAllServices") + if err := s.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.ReturnListAllServices, nil +} + +func (s *stubCharmStorePollerDeps) SetCharmStoreResources(serviceID string, info []charmresource.Resource, lastPolled time.Time) error { + s.AddCall("SetCharmStoreResources", serviceID, info, lastPolled) + if err := s.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *stubCharmStorePollerDeps) NewPeriodicWorker(call func(stop <-chan struct{}) error, period time.Duration) worker.Worker { + s.AddCall("NewPeriodicWorker", call, period) + s.NextErr() // Pop one off. + + return s.ReturnNewPeriodicWorker +} + +func (s *stubCharmStorePollerDeps) ListCharmStoreResources(cURLs []*charm.URL) ([][]charmresource.Resource, error) { + s.AddCall("ListCharmStoreResources", cURLs) + if err := s.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.ReturnListCharmStoreResources, nil +} + +type stubCharmStoreClient struct { + *testing.Stub + + ReturnListResources [][]charmresource.Resource +} + +func (s *stubCharmStoreClient) ListResources(cURLs []*charm.URL) ([][]charmresource.Resource, error) { + s.AddCall("ListResources", cURLs) + if err := s.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.ReturnListResources, nil +} + +func (s *stubCharmStoreClient) Close() error { + s.AddCall("Close") + if err := s.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +type stubService struct { + *testing.Stub + + ReturnID names.ServiceTag + ReturnCharmURL *charm.URL +} + +func newStubService(c *gc.C, stub *testing.Stub, name string) *stubService { + cURL := &charm.URL{ + Schema: "cs", + Name: name, + Revision: 1, + } + return &stubService{ + Stub: stub, + ReturnID: names.NewServiceTag(name), + ReturnCharmURL: cURL, + } +} + +func (s *stubService) ID() names.ServiceTag { + s.AddCall("ID") + s.NextErr() // Pop one off. + + return s.ReturnID +} + +func (s *stubService) CharmURL() *charm.URL { + s.AddCall("CharmURL") + s.NextErr() // Pop one off. + + return s.ReturnCharmURL +} + +type doTracker struct { + workers.CharmStorePollerDeps + + done chan struct{} +} + +func (dt doTracker) NewPeriodicWorker(call func(stop <-chan struct{}) error, period time.Duration) worker.Worker { + wrapper := func(stop <-chan struct{}) error { + err := call(stop) + close(dt.done) + return err + } + return dt.CharmStorePollerDeps.NewPeriodicWorker(wrapper, period) +} === added file 'src/github.com/juju/juju/resource/workers/package_test.go' --- src/github.com/juju/juju/resource/workers/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/resource/workers/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package workers_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} === modified file 'src/github.com/juju/juju/rpc/client.go' --- src/github.com/juju/juju/rpc/client.go 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/rpc/client.go 2016-03-22 15:18:22 +0000 @@ -4,12 +4,17 @@ package rpc import ( - "errors" "strings" + + "github.com/juju/errors" ) var ErrShutdown = errors.New("connection is shut down") +func IsShutdownErr(err error) bool { + return errors.Cause(err) == ErrShutdown +} + // Call represents an active RPC. type Call struct { Request @@ -26,11 +31,10 @@ } func (e *RequestError) Error() string { - m := "request error: " + e.Message if e.Code != "" { - m += " (" + e.Code + ")" + return e.Message + " (" + e.Code + ")" } - return m + return e.Message } func (e *RequestError) ErrorCode() string { @@ -103,7 +107,7 @@ case hdr.Error != "": // Report rpcreflect.NoSuchMethodError with CodeNotImplemented. if strings.HasPrefix(hdr.Error, "no such request ") && hdr.ErrorCode == "" { - hdr.ErrorCode = CodeNotImplemented + hdr.ErrorCode = codeNotImplemented } // We've got an error response. Give this to the request; // any subsequent requests will get the ReadResponseBody @@ -124,7 +128,7 @@ } call.done() } - return err + return errors.Annotate(err, "error handling response") } func (call *Call) done() { @@ -138,39 +142,19 @@ } } -// Call invokes the named action on the object of the given type with -// the given id. The returned values will be stored in response, which -// should be a pointer. If the action fails remotely, the returned -// error will be of type RequestError. The params value may be nil if -// no parameters are provided; the response value may be nil to indicate -// that any result should be discarded. +// Call invokes the named action on the object of the given type with the given +// id. The returned values will be stored in response, which should be a pointer. +// If the action fails remotely, the error will have a cause of type RequestError. +// The params value may be nil if no parameters are provided; the response value +// may be nil to indicate that any result should be discarded. func (conn *Conn) Call(req Request, params, response interface{}) error { - call := <-conn.Go(req, params, response, make(chan *Call, 1)).Done - return call.Error -} - -// Go invokes the request asynchronously. It returns the Call structure representing -// the invocation. The done channel will signal when the call is complete by returning -// the same Call object. If done is nil, Go will allocate a new channel. -// If non-nil, done must be buffered or Go will deliberately panic. -func (conn *Conn) Go(req Request, args, response interface{}, done chan *Call) *Call { - if done == nil { - done = make(chan *Call, 1) - } else { - // If caller passes done != nil, it must arrange that - // done has enough buffer for the number of simultaneous - // RPCs that will be using that channel. If the channel - // is totally unbuffered, it's best not to run at all. - if cap(done) == 0 { - panic("github.com/juju/juju/rpc: done channel is unbuffered") - } - } call := &Call{ Request: req, - Params: args, + Params: params, Response: response, - Done: done, + Done: make(chan *Call, 1), } conn.send(call) - return call + result := <-call.Done + return errors.Trace(result.Error) } === added file 'src/github.com/juju/juju/rpc/export_test.go' --- src/github.com/juju/juju/rpc/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/rpc/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,6 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package rpc + +const CodeNotImplemented = codeNotImplemented === modified file 'src/github.com/juju/juju/rpc/jsoncodec/codec.go' --- src/github.com/juju/juju/rpc/jsoncodec/codec.go 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/rpc/jsoncodec/codec.go 2016-03-22 15:18:22 +0000 @@ -8,6 +8,7 @@ "sync" "sync/atomic" + "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/juju/rpc" @@ -120,7 +121,7 @@ if c.isClosing() || err == io.EOF { return io.EOF } - return fmt.Errorf("error receiving message: %v", err) + return errors.Annotate(err, "error receiving message") } hdr.RequestId = c.msg.RequestId hdr.Request = rpc.Request{ === modified file 'src/github.com/juju/juju/rpc/jsoncodec/codec_test.go' --- src/github.com/juju/juju/rpc/jsoncodec/codec_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/rpc/jsoncodec/codec_test.go 2016-03-22 15:18:22 +0000 @@ -9,16 +9,16 @@ stdtesting "testing" "github.com/juju/loggo" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/rpc" "github.com/juju/juju/rpc/jsoncodec" - "github.com/juju/juju/testing" ) type suite struct { - testing.BaseSuite + testing.LoggingSuite } var _ = gc.Suite(&suite{}) === modified file 'src/github.com/juju/juju/rpc/rpc_test.go' --- src/github.com/juju/juju/rpc/rpc_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/rpc/rpc_test.go 2016-03-22 15:18:22 +0000 @@ -13,10 +13,12 @@ stdtesting "testing" "time" + "github.com/juju/errors" "github.com/juju/loggo" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/rpc" "github.com/juju/juju/rpc/jsoncodec" "github.com/juju/juju/rpc/rpcreflect" @@ -470,7 +472,7 @@ err := p.client.Call(p.request(), stringVal{"arg"}, &r) switch { case p.retErr && p.testErr: - c.Assert(err, gc.DeepEquals, &rpc.RequestError{ + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: p.errorMessage(), }) c.Assert(r, gc.Equals, stringVal{}) @@ -610,7 +612,7 @@ // CodeNotImplemented. var r stringVal err := client.Call(rpc.Request{"InterfaceMethods", 0, "a99", "Call0r0"}, stringVal{"arg"}, &r) - c.Assert(err, gc.DeepEquals, &rpc.RequestError{ + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: "no such request - method InterfaceMethods.Call0r0 is not implemented", Code: rpc.CodeNotImplemented, }) @@ -637,7 +639,7 @@ // Call1r1 is exposed in version 1, but not in version 0. var r stringVal err := client.Call(rpc.Request{"MultiVersion", 0, "a99", "Call1r1"}, stringVal{"arg"}, &r) - c.Assert(err, gc.DeepEquals, &rpc.RequestError{ + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: "no such request - method MultiVersion.Call1r1 is not implemented", Code: rpc.CodeNotImplemented, }) @@ -664,7 +666,7 @@ // Call0r1 is exposed in version 0, but not in version 1. var r stringVal err := client.Call(rpc.Request{"MultiVersion", 1, "a99", "Call0r1"}, nil, &r) - c.Assert(err, gc.DeepEquals, &rpc.RequestError{ + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: "no such request - method MultiVersion(1).Call0r1 is not implemented", Code: rpc.CodeNotImplemented, }) @@ -692,7 +694,7 @@ // in InterfaceMethods. var r stringVal err := client.Call(rpc.Request{"MultiVersion", 2, "a99", "Call0r1e"}, nil, &r) - c.Assert(err, gc.DeepEquals, &rpc.RequestError{ + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `no such request - method MultiVersion(2).Call0r1e is not implemented`, Code: rpc.CodeNotImplemented, }) @@ -705,7 +707,7 @@ var r stringVal // Unknown version 5 err := client.Call(rpc.Request{"MultiVersion", 5, "a99", "Call0r1"}, nil, &r) - c.Assert(err, gc.DeepEquals, &rpc.RequestError{ + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `unknown version (5) of interface "MultiVersion"`, Code: rpc.CodeNotImplemented, }) @@ -769,8 +771,8 @@ client, srvDone, _, _ := newRPCClientServer(c, root, nil, false) defer closeClient(c, client, srvDone) err := client.Call(rpc.Request{"ErrorMethods", 0, "", "Call"}, nil, nil) - c.Assert(err, gc.ErrorMatches, `request error: message \(code\)`) - c.Assert(err.(rpc.ErrorCoder).ErrorCode(), gc.Equals, "code") + c.Assert(err, gc.ErrorMatches, `message \(code\)`) + c.Assert(errors.Cause(err).(rpc.ErrorCoder).ErrorCode(), gc.Equals, "code") } func (*rpcSuite) TestTransformErrors(c *gc.C) { @@ -791,13 +793,13 @@ defer closeClient(c, client, srvDone) // First, we don't transform methods we can't find. err := client.Call(rpc.Request{"foo", 0, "", "bar"}, nil, nil) - c.Assert(err, gc.DeepEquals, &rpc.RequestError{ + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: `unknown object type "foo"`, Code: rpc.CodeNotImplemented, }) err = client.Call(rpc.Request{"ErrorMethods", 0, "", "NoMethod"}, nil, nil) - c.Assert(err, gc.DeepEquals, &rpc.RequestError{ + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: "no such request - method ErrorMethods.NoMethod is not implemented", Code: rpc.CodeNotImplemented, }) @@ -805,7 +807,7 @@ // We do transform any errors that happen from calling the RootMethod // and beyond. err = client.Call(rpc.Request{"ErrorMethods", 0, "", "Call"}, nil, nil) - c.Assert(err, gc.DeepEquals, &rpc.RequestError{ + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: "transformed: message", Code: "transformed: code", }) @@ -816,7 +818,7 @@ root.errorInst = nil err = client.Call(rpc.Request{"ErrorMethods", 0, "", "Call"}, nil, nil) - c.Assert(err, gc.DeepEquals, &rpc.RequestError{ + c.Assert(errors.Cause(err), gc.DeepEquals, &rpc.RequestError{ Message: "transformed: no error methods", }) @@ -839,7 +841,7 @@ go func() { var r stringVal err := client.Call(rpc.Request{"DelayedMethods", 0, "1", "Delay"}, nil, &r) - c.Check(err, gc.Equals, rpc.ErrShutdown) + c.Check(errors.Cause(err), gc.Equals, rpc.ErrShutdown) done <- struct{}{} }() chanRead(c, ready, "DelayedMethods.Delay ready") @@ -951,7 +953,7 @@ if expectedErrCode != "" { msg += " (" + expectedErrCode + ")" } - c.Assert(err, gc.ErrorMatches, regexp.QuoteMeta("request error: "+msg)) + c.Assert(err, gc.ErrorMatches, regexp.QuoteMeta(msg)) // Test that there was a notification for the client request. c.Assert(clientNotifier.clientRequests, gc.HasLen, 1) @@ -1025,10 +1027,10 @@ X: map[string]int{"hello": 65}, } err := client.Call(rpc.Request{"SimpleMethods", 0, "a0", "SliceArg"}, arg0, &ret) - c.Assert(err, gc.ErrorMatches, `request error: json: cannot unmarshal object into Go value of type \[\]string`) + c.Assert(err, gc.ErrorMatches, `json: cannot unmarshal object into Go value of type \[\]string`) err = client.Call(rpc.Request{"SimpleMethods", 0, "a0", "SliceArg"}, arg0, &ret) - c.Assert(err, gc.ErrorMatches, `request error: json: cannot unmarshal object into Go value of type \[\]string`) + c.Assert(err, gc.ErrorMatches, `json: cannot unmarshal object into Go value of type \[\]string`) arg1 := struct { X []string @@ -1045,7 +1047,7 @@ err := client.Close() c.Assert(err, jc.ErrorIsNil) err = client.Call(rpc.Request{"Foo", 0, "", "Bar"}, nil, nil) - c.Assert(err, gc.Equals, rpc.ErrShutdown) + c.Assert(errors.Cause(err), gc.Equals, rpc.ErrShutdown) err = chanReadError(c, srvDone, "server done") c.Assert(err, jc.ErrorIsNil) } @@ -1102,7 +1104,7 @@ defer closeClient(c, client, srvDone) var r int64val err := client.Call(rpc.Request{"CallbackMethods", 0, "", "Factorial"}, int64val{12}, &r) - c.Assert(err, gc.ErrorMatches, "request error: request error: no service") + c.Assert(err, gc.ErrorMatches, "no service") } func (*rpcSuite) TestChangeAPI(c *gc.C) { @@ -1111,11 +1113,11 @@ defer closeClient(c, client, srvDone) var s stringVal err := client.Call(rpc.Request{"NewlyAvailable", 0, "", "NewMethod"}, nil, &s) - c.Assert(err, gc.ErrorMatches, `request error: unknown object type "NewlyAvailable" \(not implemented\)`) + c.Assert(err, gc.ErrorMatches, `unknown object type "NewlyAvailable" \(not implemented\)`) err = client.Call(rpc.Request{"ChangeAPIMethods", 0, "", "ChangeAPI"}, nil, nil) c.Assert(err, jc.ErrorIsNil) err = client.Call(rpc.Request{"ChangeAPIMethods", 0, "", "ChangeAPI"}, nil, nil) - c.Assert(err, gc.ErrorMatches, `request error: unknown object type "ChangeAPIMethods" \(not implemented\)`) + c.Assert(err, gc.ErrorMatches, `unknown object type "ChangeAPIMethods" \(not implemented\)`) err = client.Call(rpc.Request{"NewlyAvailable", 0, "", "NewMethod"}, nil, &s) c.Assert(err, jc.ErrorIsNil) c.Assert(s, gc.Equals, stringVal{"new method result"}) @@ -1130,7 +1132,7 @@ c.Assert(err, jc.ErrorIsNil) err = client.Call(rpc.Request{"ChangeAPIMethods", 0, "", "RemoveAPI"}, nil, nil) - c.Assert(err, gc.ErrorMatches, "request error: no service") + c.Assert(err, gc.ErrorMatches, "no service") } func (*rpcSuite) TestChangeAPIWhileServingRequest(c *gc.C) { @@ -1161,12 +1163,16 @@ done <- fmt.Errorf("an error") select { case r := <-result: - c.Assert(r, gc.ErrorMatches, "request error: transformed: an error") + c.Assert(r, gc.ErrorMatches, "transformed: an error") case <-time.After(3 * time.Second): c.Fatalf("timeout on channel read") } } +func (*rpcSuite) TestCodeNotImplementedMatchesApiserverParams(c *gc.C) { + c.Assert(rpc.CodeNotImplemented, gc.Equals, params.CodeNotImplemented) +} + func chanReadError(c *gc.C, ch <-chan error, what string) error { select { case e := <-ch: === modified file 'src/github.com/juju/juju/rpc/server.go' --- src/github.com/juju/juju/rpc/server.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/rpc/server.go 2016-03-22 15:18:22 +0000 @@ -4,18 +4,18 @@ package rpc import ( - "fmt" "io" "reflect" "sync" "time" + "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/juju/rpc/rpcreflect" ) -const CodeNotImplemented = "not implemented" +const codeNotImplemented = "not implemented" var logger = loggo.GetLogger("juju.rpc") @@ -96,7 +96,7 @@ // codec holds the underlying RPC connection. codec Codec - // notifier is informed about RPC requests. It may be nil. + // notifier is informed about RPC requests. notifier RequestNotifier // srvPending represents the current server requests. @@ -191,6 +191,9 @@ // any requests are sent or received. If notifier is non-nil, the // appropriate method will be called for every RPC request. func NewConn(codec Codec, notifier RequestNotifier) *Conn { + if notifier == nil { + notifier = new(dummyNotifier) + } return &Conn{ codec: codec, clientPending: make(map[uint64]*Call), @@ -198,6 +201,14 @@ } } +// dummyNotifier is used when no notifier is provided to NewConn. +type dummyNotifier struct{} + +func (*dummyNotifier) ServerRequest(*Header, interface{}) {} +func (*dummyNotifier) ServerReply(Request, *Header, interface{}, time.Duration) {} +func (*dummyNotifier) ClientRequest(*Header, interface{}) {} +func (*dummyNotifier) ClientReply(Request, *Header, interface{}) {} + // Start starts the RPC connection running. It must be called at least // once for any RPC connection (client or server side) It has no effect // if it has already been called. By default, a connection serves no @@ -402,20 +413,23 @@ // loop implements the looping part of Conn.input. func (conn *Conn) loop() error { - var hdr Header for { - hdr = Header{} + var hdr Header err := conn.codec.ReadHeader(&hdr) - if err != nil { - return err - } - if hdr.IsRequest() { - err = conn.handleRequest(&hdr) - } else { - err = conn.handleResponse(&hdr) - } - if err != nil { - return err + switch { + case err == io.EOF: + // handle sentinel error specially + return err + case err != nil: + return errors.Annotate(err, "codec.ReadHeader error") + case hdr.IsRequest(): + if err := conn.handleRequest(&hdr); err != nil { + return errors.Annotatef(err, "codec.handleRequest %#v error", hdr) + } + default: + if err := conn.handleResponse(&hdr); err != nil { + return errors.Annotatef(err, "codec.handleResponse %#v error", hdr) + } } } } @@ -431,9 +445,7 @@ startTime := time.Now() req, err := conn.bindRequest(hdr) if err != nil { - if conn.notifier != nil { - conn.notifier.ServerRequest(hdr, nil) - } + conn.notifier.ServerRequest(hdr, nil) if err := conn.readBody(nil, true); err != nil { return err } @@ -449,9 +461,7 @@ argp = v.Interface() } if err := conn.readBody(argp, true); err != nil { - if conn.notifier != nil { - conn.notifier.ServerRequest(hdr, nil) - } + conn.notifier.ServerRequest(hdr, nil) // If we get EOF, we know the connection is a // goner, so don't try to respond. if err == io.EOF || err == io.ErrUnexpectedEOF { @@ -467,12 +477,10 @@ // up the problem and abort. return conn.writeErrorResponse(hdr, req.transformErrors(err), startTime) } - if conn.notifier != nil { - if req.ParamsType() != nil { - conn.notifier.ServerRequest(hdr, arg.Interface()) - } else { - conn.notifier.ServerRequest(hdr, struct{}{}) - } + if req.ParamsType() != nil { + conn.notifier.ServerRequest(hdr, arg.Interface()) + } else { + conn.notifier.ServerRequest(hdr, struct{}{}) } conn.mutex.Lock() closing := conn.closing @@ -500,9 +508,7 @@ hdr.ErrorCode = "" } hdr.Error = err.Error() - if conn.notifier != nil { - conn.notifier.ServerReply(reqHdr.Request, hdr, struct{}{}, time.Since(startTime)) - } + conn.notifier.ServerReply(reqHdr.Request, hdr, struct{}{}, time.Since(startTime)) return conn.codec.WriteMessage(hdr, struct{}{}) } @@ -524,15 +530,14 @@ conn.mutex.Unlock() if methodFinder == nil { - return boundRequest{}, fmt.Errorf("no service") + return boundRequest{}, errors.New("no service") } caller, err := methodFinder.FindMethod( hdr.Request.Type, hdr.Request.Version, hdr.Request.Action) if err != nil { if _, ok := err.(*rpcreflect.CallNotImplementedError); ok { err = &serverError{ - Message: err.Error(), - Code: CodeNotImplemented, + error: err, } } else { err = transformErrors(err) @@ -562,9 +567,7 @@ } else { rvi = struct{}{} } - if conn.notifier != nil { - conn.notifier.ServerReply(req.hdr.Request, hdr, rvi, time.Since(startTime)) - } + conn.notifier.ServerReply(req.hdr.Request, hdr, rvi, time.Since(startTime)) conn.sending.Lock() err = conn.codec.WriteMessage(hdr, rvi) conn.sending.Unlock() @@ -574,12 +577,11 @@ } } -type serverError RequestError - -func (e *serverError) Error() string { - return e.Message +type serverError struct { + error } func (e *serverError) ErrorCode() string { - return e.Code + // serverError only knows one error code. + return codeNotImplemented } === modified file 'src/github.com/juju/juju/scripts/jujuman.py' --- src/github.com/juju/juju/scripts/jujuman.py 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/scripts/jujuman.py 2016-03-22 15:18:22 +0000 @@ -58,13 +58,14 @@ ENVIRONMENT = ( - ('JUJU_ENV', textwrap.dedent("""\ + ('JUJU_MODEL', textwrap.dedent("""\ Provides a way for the shell environment to specify the current Juju - environment to use. If the environment is specified explicitly using - -e ENV, this takes precedence. + model to use. If the model is specified explicitly using + -m MODEL, this takes precedence. """)), - ('JUJU_HOME', textwrap.dedent("""\ - Overrides the default Juju configuration directory of ~/.juju. + ('JUJU_DATA', textwrap.dedent("""\ + Overrides the default Juju configuration directory of $XDG_DATA_HOME/juju or ~/.local/share/juju + if $XDG_DATA_HOME is not defined. """)), ('AWS_ACCESS_KEY_ID', textwrap.dedent("""\ The access-key for your AWS account. @@ -144,9 +145,9 @@ man_foot = """\ .SH "FILES" .TP -.I "~/.juju/environments.yaml" +.I "~/.config/juju/environments.yaml" This is the Juju config file, which you can use to specify multiple -environments in which to deploy. +models in which to deploy. A config file can be created using .B juju init === modified file 'src/github.com/juju/juju/scripts/release-public-tools/release-public-tools.sh' --- src/github.com/juju/juju/scripts/release-public-tools/release-public-tools.sh 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/scripts/release-public-tools/release-public-tools.sh 2016-03-22 15:18:22 +0000 @@ -23,14 +23,17 @@ which lftp || has_deps=0 which swift || has_deps=0 which s3cmd || has_deps=0 - test -f ~/.juju/canonistacktoolsrc || has_deps=0 - test -f ~/.juju/hptoolsrc || has_deps=0 - test -f ~/.juju/awstoolsrc || has_deps=0 - test -f ~/.juju/azuretoolsrc || has_deps=0 + + test -f $JUJU_DATA/canonistacktoolsrc || has_deps=0 + test -f $JUJU_DATA/hptoolsrc || has_deps=0 + test -f $JUJU_DATA/awstoolsrc || has_deps=0 + test -f $JUJU_DATA/azuretoolsrc || has_deps=0 if [[ $has_deps == 0 ]]; then echo "Install lftp, python-swiftclient, and s3cmd" - echo "Your ~/.juju dir must contain rc files to publish:" + echo "Your your \$JUJU_DATA dir must contain rc files to publish:" echo " canonistacktoolsrc, hptoolsrc, awstoolsrc, azuretoolsrc" + echo "(if \$JUJU_DATA is not set, we will try \$XDG_DATA_HOME/juju" + echo " or, ~/.local/share/juju if \$XDG_DATA_HOME is not set)" exit 2 fi } @@ -52,7 +55,7 @@ retrieve_released_tools() { # Retrieve previously released tools to ensure the metadata continues # to work for historic releases. - source ~/.juju/awstoolsrc + source ~/.local/share/juju/awstoolsrc s3cmd sync s3://juju-dist/tools/releases/ $DEST_TOOLS } @@ -163,7 +166,7 @@ publish_to_canonistack() { echo "Phase 6.1: Publish to canonistack." cd $DESTINATION - source ~/.juju/canonistacktoolsrc + source ~/.local/share/juju/canonistacktoolsrc ${GOPATH}/bin/juju --show-log \ sync-tools -e public-tools-canonistack --dev --source=${DEST_DIST} # This needed to allow old deployments upgrade. @@ -175,7 +178,7 @@ publish_to_hp() { echo "Phase 6.2: Publish to HP Cloud." cd $DESTINATION - source ~/.juju/hptoolsrc + source ~/.local/share/juju/hptoolsrc ${GOPATH}/bin/juju --show-log \ sync-tools -e public-tools-hp --dev --source=${DEST_DIST} # Support old tools location so that deployments can upgrade to new tools. @@ -187,7 +190,7 @@ publish_to_aws() { echo "Phase 6.3: Publish to AWS." cd $DESTINATION - source ~/.juju/awstoolsrc + source ~/.local/share/juju/awstoolsrc s3cmd sync ${DEST_DIST}/tools s3://juju-dist/ } @@ -197,7 +200,7 @@ # each public file MUST match the destination path :(. echo "Phase 6.4: Publish to Azure." cd $DESTINATION - source ~/.juju/azuretoolsrc + source ~/.local/share/juju/azuretoolsrc cd ${DEST_DIST} public_files=$(find tools -name *.tgz -o -name *.json) for public_file in $public_files; do @@ -211,6 +214,14 @@ done } +# set $JUJU_DATA, if not set, to the proper path. +if [ -z "$JUJU_DATA" ]; then + if [ -z "$XDG_DATA_HOME" ]; then + JUJU_DATA=~/.local/share/juju + else + JUJU_DATA=$XDG_DATA_HOME/juju + fi +fi # These are the archives that are search for matching releases. UBUNTU_ARCH="http://archive.ubuntu.com/ubuntu/pool/universe/j/juju-core/" === modified file 'src/github.com/juju/juju/scripts/win-installer/README.txt' --- src/github.com/juju/juju/scripts/win-installer/README.txt 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/scripts/win-installer/README.txt 2016-03-22 15:18:22 +0000 @@ -1,16 +1,16 @@ Introduction to Juju -This tutorial will show you how to get started with Juju, including installing, configuring and bootstrapping a new Juju environment. Before you start you will need: +This tutorial will show you how to get started with Juju, including installing, configuring and bootstrapping a new Juju model. Before you start you will need: * An Ubuntu, Windows or OSX machine to install the client on. -* An environment which can provide a new server with an Ubuntu cloud operating system image on-demand. This includes services such as Microsoft Azure, Amazon EC2, HP Cloud, or an OpenStack installation. +* A model which can provide a new server with an Ubuntu cloud operating system image on-demand. This includes services such as Microsoft Azure, Amazon EC2, HP Cloud, or an OpenStack installation. * An SSH key-pair. Juju expects to find ssh keys called id_rsa and id_rsa.pub in a .ssh folder in your home directory. Configuring -Now the Juju software is installed, it needs to be configured to use your particular cloud provider. This is done by generating and editing a file, "environments.yaml", which will live in your %LOCALAPPDATA%\Juju directory. You can generate the environments file manually, but Juju also includes a boilerplate configuration option that will flesh out most of the file for you and minimise the amount of work (and potential errors). +Now the Juju software is installed, it needs to be configured to use your particular cloud provider. This is done by generating and editing a file, "environments.yaml", which will live in your %LOCALAPPDATA%\Juju directory. You can generate the models file manually, but Juju also includes a boilerplate configuration option that will flesh out most of the file for you and minimise the amount of work (and potential errors). To generate an initial config file, you simply need to run: @@ -25,11 +25,11 @@ Once you have installed and configured Juju, it is probably a good idea to take it for a bit of a test drive and check that everything is working as expected. Because Juju makes it really easy to deploy services, this is actually quick and straightforward. -The first thing to do is set up a bootstrap environment. This is an instance in the cloud that Juju will use to deploy and control other services with. It will be created according to the configuration you have provided, and your SSH key will automatically be uploaded so that Juju can communicate securely with the bootstrap instance. +The first thing to do is set up a bootstrap model. This is an instance in the cloud that Juju will use to deploy and control other services with. It will be created according to the configuration you have provided, and your SSH key will automatically be uploaded so that Juju can communicate securely with the bootstrap instance. > juju bootstrap -Note: If you have multiple environments configured, you can choose which one to address with a particular command by adding the -e switch followed by the environment name, E.g. "-e hpcloud". +Note: If you have multiple models configured, you can choose which one to address with a particular command by adding the -e switch followed by the model name, E.g. "-e hpcloud". You may have to wait a few moments for this command to return, as it needs to perform various tasks and contact your cloud provider. @@ -126,8 +126,8 @@ To remove all current deployments and clear up everything in your cloud, you can run the command: -> juju destroy-environment +> juju destroy-model This will remove everything, including the bootstrap node. -To learn more about charms, including configuring options and managing running systems, you should continue to read the charm documentation here: https://juju.ubuntu.com/docs/charms.html \ No newline at end of file +To learn more about charms, including configuring options and managing running systems, you should continue to read the charm documentation here: https://juju.ubuntu.com/docs/charms.html === modified file 'src/github.com/juju/juju/scripts/win-installer/setup.iss' --- src/github.com/juju/juju/scripts/win-installer/setup.iss 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/scripts/win-installer/setup.iss 2016-03-22 15:18:22 +0000 @@ -2,7 +2,7 @@ ; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES! #define MyAppName "Juju" -#define MyAppVersion "1.25.4" +#define MyAppVersion "2.0-beta2" #define MyAppPublisher "Canonical, Ltd" #define MyAppURL "http://juju.ubuntu.com/" #define MyAppExeName "juju.exe" === modified file 'src/github.com/juju/juju/service/discovery.go' --- src/github.com/juju/juju/service/discovery.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/service/discovery.go 2016-03-22 15:18:22 +0000 @@ -6,6 +6,8 @@ "github.com/juju/errors" "github.com/juju/utils/featureflag" + "github.com/juju/utils/os" + "github.com/juju/utils/series" "github.com/juju/utils/shell" "github.com/juju/juju/feature" @@ -13,14 +15,8 @@ "github.com/juju/juju/service/systemd" "github.com/juju/juju/service/upstart" "github.com/juju/juju/service/windows" - "github.com/juju/juju/version" ) -// This exists to allow patching during tests. -var getVersion = func() version.Binary { - return version.Current -} - // DiscoverService returns an interface to a service appropriate // for the current system func DiscoverService(name string, conf common.Conf) (Service, error) { @@ -29,8 +25,7 @@ return nil, errors.Trace(err) } - jujuVersion := getVersion() - service, err := newService(name, conf, initName, jujuVersion.Series) + service, err := newService(name, conf, initName, series.HostSeries()) if err != nil { return nil, errors.Trace(err) } @@ -41,8 +36,7 @@ initName, err := discoverLocalInitSystem() if errors.IsNotFound(err) { // Fall back to checking the juju version. - jujuVersion := getVersion() - versionInitName, err2 := VersionInitSystem(jujuVersion.Series) + versionInitName, err2 := VersionInitSystem(series.HostSeries()) if err2 != nil { // The key error is the one from discoverLocalInitSystem so // that is what we return. @@ -66,18 +60,18 @@ return initName, nil } -func versionInitSystem(series string) (string, error) { - os, err := version.GetOSFromSeries(series) +func versionInitSystem(ser string) (string, error) { + seriesos, err := series.GetOSFromSeries(ser) if err != nil { - notFound := errors.NotFoundf("init system for series %q", series) + notFound := errors.NotFoundf("init system for series %q", ser) return "", errors.Wrap(err, notFound) } - switch os { - case version.Windows: + switch seriesos { + case os.Windows: return InitSystemWindows, nil - case version.Ubuntu: - switch series { + case os.Ubuntu: + switch ser { case "precise", "quantal", "raring", "saucy", "trusty", "utopic": return InitSystemUpstart, nil default: @@ -87,10 +81,10 @@ } return InitSystemSystemd, nil } - case version.CentOS: + case os.CentOS: return InitSystemSystemd, nil } - return "", errors.NotFoundf("unknown os %q (from series %q), init system", os, series) + return "", errors.NotFoundf("unknown os %q (from series %q), init system", seriesos, ser) } type discoveryCheck struct { === modified file 'src/github.com/juju/juju/service/discovery_test.go' --- src/github.com/juju/juju/service/discovery_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/service/discovery_test.go 2016-03-22 15:18:22 +0000 @@ -14,6 +14,8 @@ jc "github.com/juju/testing/checkers" "github.com/juju/utils/exec" "github.com/juju/utils/featureflag" + jujuos "github.com/juju/utils/os" + "github.com/juju/utils/series" gc "gopkg.in/check.v1" "github.com/juju/juju/feature" @@ -37,14 +39,13 @@ const unknownExecutable = "/sbin/unknown/init/system" type discoveryTest struct { - os version.OSType + os jujuos.OSType series string expected string } func (dt discoveryTest) version() version.Binary { return version.Binary{ - OS: dt.os, Series: dt.series, } } @@ -58,7 +59,7 @@ } func (dt discoveryTest) disableVersionDiscovery(s *discoverySuite) { - dt.os = version.Unknown + dt.os = jujuos.Unknown dt.setVersion(s) } @@ -68,7 +69,7 @@ func (dt discoveryTest) setVersion(s *discoverySuite) version.Binary { vers := dt.version() - s.PatchVersion(vers) + s.PatchSeries(vers.Series) return vers } @@ -113,31 +114,31 @@ } var discoveryTests = []discoveryTest{{ - os: version.Windows, + os: jujuos.Windows, series: "win2012", expected: service.InitSystemWindows, }, { - os: version.Ubuntu, + os: jujuos.Ubuntu, series: "oneiric", expected: "", }, { - os: version.Ubuntu, + os: jujuos.Ubuntu, series: "precise", expected: service.InitSystemUpstart, }, { - os: version.Ubuntu, + os: jujuos.Ubuntu, series: "utopic", expected: service.InitSystemUpstart, }, { - os: version.Ubuntu, + os: jujuos.Ubuntu, series: "vivid", expected: maybeSystemd, }, { - os: version.CentOS, + os: jujuos.CentOS, series: "centos7", expected: service.InitSystemSystemd, }, { - os: version.Unknown, + os: jujuos.Unknown, expected: "", }} @@ -179,13 +180,13 @@ case "windows": localInitSystem = service.InitSystemWindows case "linux": - localInitSystem, err = service.VersionInitSystem(version.Current.Series) + localInitSystem, err = service.VersionInitSystem(series.HostSeries()) } c.Assert(err, gc.IsNil) test := discoveryTest{ - os: version.Current.OS, - series: version.Current.Series, + os: jujuos.HostOS(), + series: series.HostSeries(), expected: localInitSystem, } test.disableVersionDiscovery(s) @@ -220,7 +221,7 @@ func (s *discoverySuite) TestVersionInitSystemLegacyUpstart(c *gc.C) { s.setLegacyUpstart(c) test := discoveryTest{ - os: version.Ubuntu, + os: jujuos.Ubuntu, series: "vivid", expected: service.InitSystemUpstart, } @@ -234,7 +235,7 @@ func (s *discoverySuite) TestVersionInitSystemNoLegacyUpstart(c *gc.C) { s.unsetLegacyUpstart(c) test := discoveryTest{ - os: version.Ubuntu, + os: jujuos.Ubuntu, series: "vivid", expected: service.InitSystemSystemd, } === modified file 'src/github.com/juju/juju/service/service.go' --- src/github.com/juju/juju/service/service.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/service/service.go 2016-03-22 15:18:22 +0000 @@ -10,13 +10,13 @@ "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/utils" + "github.com/juju/utils/series" "github.com/juju/juju/juju/paths" "github.com/juju/juju/service/common" "github.com/juju/juju/service/systemd" "github.com/juju/juju/service/upstart" "github.com/juju/juju/service/windows" - "github.com/juju/juju/version" ) var ( @@ -138,7 +138,7 @@ // ListServices lists all installed services on the running system func ListServices() ([]string, error) { - initName, err := VersionInitSystem(version.Current.Series) + initName, err := VersionInitSystem(series.HostSeries()) if err != nil { return nil, errors.Trace(err) } === modified file 'src/github.com/juju/juju/service/systemd/conf.go' --- src/github.com/juju/juju/service/systemd/conf.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/service/systemd/conf.go 2016-03-22 15:18:22 +0000 @@ -12,10 +12,10 @@ "github.com/coreos/go-systemd/unit" "github.com/juju/errors" + "github.com/juju/utils/os" "github.com/juju/utils/shell" "github.com/juju/juju/service/common" - "github.com/juju/juju/version" ) var limitMap = map[string]string{ @@ -43,15 +43,12 @@ } func syslogUserGroup() (string, string) { - var user, group string - switch version.Current.OS { - case version.CentOS: - user, group = "root", "adm" + switch os.HostOS() { + case os.CentOS: + return "root", "adm" default: - user, group = "syslog", "syslog" + return "syslog", "syslog" } - - return user, group } // normalize adjusts the conf to more standardized content and === modified file 'src/github.com/juju/juju/service/systemd/export_test.go' --- src/github.com/juju/juju/service/systemd/export_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/service/systemd/export_test.go 2016-03-22 15:18:22 +0000 @@ -8,7 +8,8 @@ ) var ( - Serialize = serialize + Serialize = serialize + SyslogUserGroup = syslogUserGroup ) type patcher interface { === modified file 'src/github.com/juju/juju/service/systemd/service_test.go' --- src/github.com/juju/juju/service/systemd/service_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/service/systemd/service_test.go 2016-03-22 15:18:22 +0000 @@ -237,19 +237,20 @@ s.conf.Logfile = "/var/log/juju/machine-0.log" service := s.newService(c) + user, group := systemd.SyslogUserGroup() dirname := fmt.Sprintf("%s/init/%s", s.dataDir, s.name) script := ` #!/usr/bin/env bash # Set up logging. touch '/var/log/juju/machine-0.log' -chown syslog:syslog '/var/log/juju/machine-0.log' +chown `[1:] + user + `:` + group + ` '/var/log/juju/machine-0.log' chmod 0600 '/var/log/juju/machine-0.log' exec >> '/var/log/juju/machine-0.log' exec 2>&1 # Run the script. -`[1:] + jujud + " machine-0" +` + jujud + " machine-0" c.Check(service, jc.DeepEquals, &systemd.Service{ Service: common.Service{ Name: s.name, @@ -786,6 +787,7 @@ commands, err := service.InstallCommands() c.Assert(err, jc.ErrorIsNil) + user, group := systemd.SyslogUserGroup() test := systemdtesting.WriteConfTest{ Service: name, DataDir: s.dataDir, @@ -797,13 +799,13 @@ Script: ` # Set up logging. touch '/var/log/juju/machine-0.log' -chown syslog:syslog '/var/log/juju/machine-0.log' +chown `[1:] + user + `:` + group + ` '/var/log/juju/machine-0.log' chmod 0600 '/var/log/juju/machine-0.log' exec >> '/var/log/juju/machine-0.log' exec 2>&1 # Run the script. -`[1:] + jujud + " machine-0", +` + jujud + " machine-0", } test.CheckCommands(c, commands) } === modified file 'src/github.com/juju/juju/service/testing_test.go' --- src/github.com/juju/juju/service/testing_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/service/testing_test.go 2016-03-22 15:18:22 +0000 @@ -10,6 +10,7 @@ "github.com/juju/errors" "github.com/juju/testing" "github.com/juju/utils" + "github.com/juju/utils/series" gc "gopkg.in/check.v1" "github.com/juju/juju/service/common" @@ -95,9 +96,8 @@ }) } -func (s *BaseSuite) PatchVersion(vers version.Binary) { - s.Patched.Version = vers - s.PatchValue(&getVersion, s.Patched.GetVersion) +func (s *BaseSuite) PatchSeries(ser string) { + s.PatchValue(&series.HostSeries, func() string { return ser }) } func NewDiscoveryCheck(name string, running bool, failure error) discoveryCheck { === modified file 'src/github.com/juju/juju/service/upstart/upstart.go' --- src/github.com/juju/juju/service/upstart/upstart.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/service/upstart/upstart.go 2016-03-22 15:18:22 +0000 @@ -19,7 +19,6 @@ "github.com/juju/utils/shell" "github.com/juju/juju/service/common" - "github.com/juju/juju/utils" ) var ( @@ -49,14 +48,33 @@ return true, nil } - msg := fmt.Sprintf("exec %q failed", initctlPath) - if utils.IsCmdNotFoundErr(err) { + if isCmdNotFoundErr(err) { return false, nil } // Note: initctl will fail if upstart is installed but not running. // The error message will be: // Name "com.ubuntu.Upstart" does not exist - return false, errors.Annotatef(err, msg) + return false, errors.Annotatef(err, "exec %q failed", initctlPath) +} + +// isCmdNotFoundErr returns true if the provided error indicates that the +// command passed to exec.LookPath or exec.Command was not found. +func isCmdNotFoundErr(err error) bool { + err = errors.Cause(err) + if os.IsNotExist(err) { + // Executable could not be found, go 1.3 and later + return true + } + if err == exec.ErrNotFound { + return true + } + if execErr, ok := err.(*exec.Error); ok { + // Executable could not be found, go 1.2 + if os.IsNotExist(execErr.Err) || execErr.Err == exec.ErrNotFound { + return true + } + } + return false } // ListServices returns the name of all installed services on the === modified file 'src/github.com/juju/juju/service/windows/service.go' --- src/github.com/juju/juju/service/windows/service.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/service/windows/service.go 2016-03-22 15:18:22 +0000 @@ -221,7 +221,7 @@ return errors.Errorf("Service %s already installed", s.Service.Name) } - logger.Infof("Installing Service %v", s.Name) + logger.Infof("Installing Service %v", s.Name()) err = s.manager.Create(s.Name(), s.Conf()) if err != nil { return errors.Trace(err) === modified file 'src/github.com/juju/juju/service/windows/service_test.go' --- src/github.com/juju/juju/service/windows/service_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/service/windows/service_test.go 2016-03-22 15:18:22 +0000 @@ -1,15 +1,14 @@ package windows_test import ( - coretesting "github.com/juju/juju/testing" + "github.com/juju/errors" "github.com/juju/testing" - - "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "github.com/juju/juju/service/common" "github.com/juju/juju/service/windows" + coretesting "github.com/juju/juju/testing" ) type serviceSuite struct { === modified file 'src/github.com/juju/juju/service/windows/service_windows.go' --- src/github.com/juju/juju/service/windows/service_windows.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/service/windows/service_windows.go 2016-03-22 15:18:22 +0000 @@ -49,7 +49,7 @@ // https://msdn.microsoft.com/en-us/library/windows/desktop/ms685937(v=vs.85).aspx type serviceFailureActionsFlag struct { - failureActionsOnNonCrashFailures bool + failureActionsOnNonCrashFailures int32 } // This is done so we can mock this function out @@ -425,7 +425,7 @@ return errors.Trace(err) } flag := serviceFailureActionsFlag{ - failureActionsOnNonCrashFailures: true, + failureActionsOnNonCrashFailures: 1, } err = WinChangeServiceConfig2(handle, SERVICE_CONFIG_FAILURE_ACTIONS_FLAG, (*byte)(unsafe.Pointer(&flag))) if err != nil { === modified file 'src/github.com/juju/juju/service/windows/stubwinsvc_test.go' --- src/github.com/juju/juju/service/windows/stubwinsvc_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/service/windows/stubwinsvc_test.go 2016-03-22 15:18:22 +0000 @@ -10,7 +10,6 @@ "github.com/gabriel-samfira/sys/windows" "github.com/gabriel-samfira/sys/windows/svc" "github.com/gabriel-samfira/sys/windows/svc/mgr" - "github.com/juju/testing" ) === modified file 'src/github.com/juju/juju/state/action.go' --- src/github.com/juju/juju/state/action.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/action.go 2016-03-22 15:18:22 +0000 @@ -49,8 +49,8 @@ // composite _id. DocId string `bson:"_id"` - // EnvUUID is the environment identifier. - EnvUUID string `bson:"env-uuid"` + // ModelUUID is the model identifier. + ModelUUID string `bson:"model-uuid"` // Receiver is the Name of the Unit or any other ActionReceiver for // which this notification is queued. @@ -65,8 +65,8 @@ // DocId is the key for this document; it is a UUID. DocId string `bson:"_id"` - // EnvUUID is the environment identifier. - EnvUUID string `bson:"env-uuid"` + // ModelUUID is the model identifier. + ModelUUID string `bson:"model-uuid"` // Receiver is the Name of the Unit or any other ActionReceiver for // which this Action is queued. @@ -262,20 +262,20 @@ return actionDoc{}, actionNotificationDoc{}, err } actionLogger.Debugf("newActionDoc name: '%s', receiver: '%s', actionId: '%s'", actionName, receiverTag, actionId) - envuuid := st.EnvironUUID() + modelUUID := st.ModelUUID() return actionDoc{ DocId: st.docID(actionId.String()), - EnvUUID: envuuid, + ModelUUID: modelUUID, Receiver: receiverTag.Id(), Name: actionName, Parameters: parameters, Enqueued: nowToTheSecond(), Status: ActionPending, }, actionNotificationDoc{ - DocId: st.docID(prefix + actionId.String()), - EnvUUID: envuuid, - Receiver: receiverTag.Id(), - ActionID: actionId.String(), + DocId: st.docID(prefix + actionId.String()), + ModelUUID: modelUUID, + Receiver: receiverTag.Id(), + ActionID: actionId.String(), }, nil } === modified file 'src/github.com/juju/juju/state/action_test.go' --- src/github.com/juju/juju/state/action_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/action_test.go 2016-03-22 15:18:22 +0000 @@ -609,13 +609,15 @@ {changes: "a0,a1,a2", adds: "a1,a4,a5", removes: "a1,a3", expected: "a0,a2,a4,a5"}, } + prefix := state.DocID(s.State, "") + for ix, test := range tests { - updates := mapify(test.adds, test.removes) - changes := sliceify(test.changes) - expected := sliceify(test.expected) + updates := mapify(prefix, test.adds, test.removes) + changes := sliceify("", test.changes) + expected := sliceify("", test.expected) c.Log(fmt.Sprintf("test number %d %#v", ix, test)) - err := state.WatcherMergeIds(s.State, &changes, updates) + err := state.WatcherMergeIds(s.State, &changes, updates, state.ActionNotificationIdToActionId) c.Assert(err, jc.ErrorIsNil) c.Assert(changes, jc.SameContents, expected) } @@ -624,28 +626,19 @@ func (s *ActionSuite) TestMergeIdsErrors(c *gc.C) { var tests = []struct { - ok bool name string key interface{} }{ - {ok: false, name: "bool", key: true}, - {ok: false, name: "int", key: 0}, - {ok: false, name: "chan string", key: make(chan string)}, - - {ok: true, name: "string", key: ""}, + {name: "bool", key: true}, + {name: "int", key: 0}, + {name: "chan string", key: make(chan string)}, } for _, test := range tests { changes, updates := []string{}, map[interface{}]bool{} - updates[test.key] = true - err := state.WatcherMergeIds(s.State, &changes, updates) - - if test.ok { - c.Assert(err, jc.ErrorIsNil) - } else { - c.Assert(err, gc.ErrorMatches, "id is not of type string, got "+test.name) - } + err := state.WatcherMergeIds(s.State, &changes, updates, state.ActionNotificationIdToActionId) + c.Assert(err, gc.ErrorMatches, "id is not of type string, got "+test.name) } } @@ -820,12 +813,12 @@ // easier. It combines two comma delimited strings representing // additions and removals and turns it into the map[interface{}]bool // format needed -func mapify(adds, removes string) map[interface{}]bool { +func mapify(prefix, adds, removes string) map[interface{}]bool { m := map[interface{}]bool{} - for _, v := range sliceify(adds) { + for _, v := range sliceify(prefix, adds) { m[v] = true } - for _, v := range sliceify(removes) { + for _, v := range sliceify(prefix, removes) { m[v] = false } return m @@ -833,7 +826,7 @@ // sliceify turns a comma separated list of strings into a slice // trimming white space and excluding empty strings. -func sliceify(csvlist string) []string { +func sliceify(prefix, csvlist string) []string { slice := []string{} if csvlist == "" { return slice @@ -841,7 +834,7 @@ for _, entry := range strings.Split(csvlist, ",") { clean := strings.TrimSpace(entry) if clean != "" { - slice = append(slice, clean) + slice = append(slice, prefix+clean) } } return slice === modified file 'src/github.com/juju/juju/state/addmachine.go' --- src/github.com/juju/juju/state/addmachine.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/addmachine.go 2016-03-22 15:18:22 +0000 @@ -32,13 +32,13 @@ // Jobs holds the jobs to run on the machine's instance. // A machine must have at least one job to do. - // JobManageEnviron can only be part of the jobs + // JobManageModel can only be part of the jobs // when the first (bootstrap) machine is added. Jobs []MachineJob // NoVote holds whether a machine running - // a state server should abstain from peer voting. - // It is ignored if Jobs does not contain JobManageEnviron. + // a controller should abstain from peer voting. + // It is ignored if Jobs does not contain JobManageModel. NoVote bool // Addresses holds the addresses to be associated with the @@ -158,11 +158,11 @@ func (st *State) AddMachines(templates ...MachineTemplate) (_ []*Machine, err error) { defer errors.DeferredAnnotatef(&err, "cannot add a new machine") var ms []*Machine - env, err := st.Environment() + env, err := st.Model() if err != nil { return nil, errors.Trace(err) } else if env.Life() != Alive { - return nil, errors.New("environment is no longer alive") + return nil, errors.New("model is no longer alive") } var ops []txn.Op var mdocs []*machineDoc @@ -182,30 +182,30 @@ ms = append(ms, newMachine(st, mdoc)) ops = append(ops, addOps...) } - ssOps, err := st.maintainStateServersOps(mdocs, nil) + ssOps, err := st.maintainControllersOps(mdocs, nil) if err != nil { return nil, errors.Trace(err) } ops = append(ops, ssOps...) ops = append(ops, env.assertAliveOp()) if err := st.runTransaction(ops); err != nil { - return nil, onAbort(err, errors.New("environment is no longer alive")) + return nil, onAbort(err, errors.New("model is no longer alive")) } return ms, nil } func (st *State) addMachine(mdoc *machineDoc, ops []txn.Op) (*Machine, error) { - env, err := st.Environment() + env, err := st.Model() if err != nil { return nil, err } else if env.Life() != Alive { - return nil, errors.New("environment is no longer alive") + return nil, errors.New("model is no longer alive") } ops = append([]txn.Op{env.assertAliveOp()}, ops...) if err := st.runTransaction(ops); err != nil { enverr := env.Refresh() if (enverr == nil && env.Life() != Alive) || errors.IsNotFound(enverr) { - return nil, errors.New("environment is no longer alive") + return nil, errors.New("model is no longer alive") } else if enverr != nil { err = enverr } @@ -232,7 +232,7 @@ // valid and combines it with values from the state // to produce a resulting template that more accurately // represents the data that will be inserted into the state. -func (st *State) effectiveMachineTemplate(p MachineTemplate, allowStateServer bool) (tmpl MachineTemplate, err error) { +func (st *State) effectiveMachineTemplate(p MachineTemplate, allowController bool) (tmpl MachineTemplate, err error) { // First check for obvious errors. if p.Series == "" { return tmpl, errors.New("no series specified") @@ -260,9 +260,9 @@ } jset[j] = true } - if jset[JobManageEnviron] { - if !allowStateServer { - return tmpl, errStateServerNotAllowed + if jset[JobManageModel] { + if !allowController { + return tmpl, errControllerNotAllowed } } return p, nil @@ -272,7 +272,7 @@ // based on the given template. It also returns the machine document // that will be inserted. func (st *State) addMachineOps(template MachineTemplate) (*machineDoc, []txn.Op, error) { - template, err := st.effectiveMachineTemplate(template, st.IsStateServer()) + template, err := st.effectiveMachineTemplate(template, st.IsController()) if err != nil { return nil, nil, err } @@ -290,7 +290,7 @@ if err != nil { return nil, nil, errors.Trace(err) } - prereqOps = append(prereqOps, assertEnvAliveOp(st.EnvironUUID())) + prereqOps = append(prereqOps, assertModelAliveOp(st.ModelUUID())) prereqOps = append(prereqOps, st.insertNewContainerRefOp(mdoc.Id)) if template.InstanceId != "" { prereqOps = append(prereqOps, txn.Op{ @@ -301,7 +301,7 @@ DocID: mdoc.DocID, MachineId: mdoc.Id, InstanceId: template.InstanceId, - EnvUUID: mdoc.EnvUUID, + ModelUUID: mdoc.ModelUUID, Arch: template.HardwareCharacteristics.Arch, Mem: template.HardwareCharacteristics.Mem, RootDisk: template.HardwareCharacteristics.RootDisk, @@ -459,7 +459,7 @@ return &machineDoc{ DocID: st.docID(id), Id: id, - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Series: template.Series, Jobs: template.Jobs, Clean: !template.Dirty, @@ -478,29 +478,14 @@ // document into the database, based on the given template. Only the // constraints and networks are used from the template. func (st *State) insertNewMachineOps(mdoc *machineDoc, template MachineTemplate) (prereqOps []txn.Op, machineOp txn.Op, err error) { - machineOp = txn.Op{ - C: machinesC, - Id: mdoc.DocID, - Assert: txn.DocMissing, - Insert: mdoc, - } - statusDoc := statusDoc{ - Status: StatusPending, - EnvUUID: st.EnvironUUID(), - Updated: time.Now().UnixNano(), - } - globalKey := machineGlobalKey(mdoc.Id) - prereqOps = []txn.Op{ - createConstraintsOp(st, globalKey, template.Constraints), - createStatusOp(st, globalKey, statusDoc), - // TODO(dimitern) 2014-04-04 bug #1302498 - // Once we can add networks independently of machine - // provisioning, we should check the given networks are valid - // and known before setting them. - createRequestedNetworksOp(st, globalKey, template.RequestedNetworks), - createMachineBlockDevicesOp(mdoc.Id), - } + Status: StatusPending, + ModelUUID: st.ModelUUID(), + Updated: time.Now().UnixNano(), + } + + prereqOps, machineOp = st.baseNewMachineOps( + mdoc, statusDoc, template.Constraints, template.RequestedNetworks) storageOps, volumeAttachments, filesystemAttachments, err := st.machineStorageOps( mdoc, &machineStorageParams{ @@ -525,10 +510,30 @@ // history entry. This is risky, and may lead to extra entries, but that's // an intrinsic problem with mixing txn and non-txn ops -- we can't sync // them cleanly. - probablyUpdateStatusHistory(st, globalKey, statusDoc) + probablyUpdateStatusHistory(st, machineGlobalKey(mdoc.Id), statusDoc) return prereqOps, machineOp, nil } +func (st *State) baseNewMachineOps(mdoc *machineDoc, statusDoc statusDoc, cons constraints.Value, networks []string) (prereqOps []txn.Op, machineOp txn.Op) { + machineOp = txn.Op{ + C: machinesC, + Id: mdoc.DocID, + Assert: txn.DocMissing, + Insert: mdoc, + } + + globalKey := machineGlobalKey(mdoc.Id) + prereqOps = []txn.Op{ + createConstraintsOp(st, globalKey, cons), + createStatusOp(st, globalKey, statusDoc), + // TODO(dimitern): Drop requested networks across the board in a + // follow-up. + createRequestedNetworksOp(st, globalKey, networks), + createMachineBlockDevicesOp(mdoc.Id), + } + return prereqOps, machineOp +} + type machineStorageParams struct { volumes []MachineVolumeParams volumeAttachments map[names.VolumeTag]VolumeAttachmentParams @@ -662,17 +667,17 @@ return false } -var errStateServerNotAllowed = errors.New("state server jobs specified but not allowed") +var errControllerNotAllowed = errors.New("controller jobs specified but not allowed") -// maintainStateServersOps returns a set of operations that will maintain -// the state server information when the given machine documents +// maintainControllersOps returns a set of operations that will maintain +// the controller information when the given machine documents // are added to the machines collection. If currentInfo is nil, // there can be only one machine document and it must have // id 0 (this is a special case to allow adding the bootstrap machine) -func (st *State) maintainStateServersOps(mdocs []*machineDoc, currentInfo *StateServerInfo) ([]txn.Op, error) { +func (st *State) maintainControllersOps(mdocs []*machineDoc, currentInfo *ControllerInfo) ([]txn.Op, error) { var newIds, newVotingIds []string for _, doc := range mdocs { - if !hasJob(doc.Jobs, JobManageEnviron) { + if !hasJob(doc.Jobs, JobManageModel) { continue } newIds = append(newIds, doc.Id) @@ -686,20 +691,20 @@ if currentInfo == nil { // Allow bootstrap machine only. if len(mdocs) != 1 || mdocs[0].Id != "0" { - return nil, errStateServerNotAllowed + return nil, errControllerNotAllowed } var err error - currentInfo, err = st.StateServerInfo() + currentInfo, err = st.ControllerInfo() if err != nil { - return nil, errors.Annotate(err, "cannot get state server info") + return nil, errors.Annotate(err, "cannot get controller info") } if len(currentInfo.MachineIds) > 0 || len(currentInfo.VotingMachineIds) > 0 { - return nil, errors.New("state servers already exist") + return nil, errors.New("controllers already exist") } } ops := []txn.Op{{ - C: stateServersC, - Id: environGlobalKey, + C: controllersC, + Id: modelGlobalKey, Assert: bson.D{{ "$and", []bson.D{ {{"machineids", bson.D{{"$size", len(currentInfo.MachineIds)}}}}, @@ -714,40 +719,40 @@ return ops, nil } -// EnsureAvailability adds state server machines as necessary to make -// the number of live state servers equal to numStateServers. The given +// EnableHA adds controller machines as necessary to make +// the number of live controllers equal to numControllers. The given // constraints and series will be attached to any new machines. // If placement is not empty, any new machines which may be required are started // according to the specified placement directives until the placement list is // exhausted; thereafter any new machines are started according to the constraints and series. -func (st *State) EnsureAvailability( - numStateServers int, cons constraints.Value, series string, placement []string, -) (StateServersChanges, error) { +func (st *State) EnableHA( + numControllers int, cons constraints.Value, series string, placement []string, +) (ControllersChanges, error) { - if numStateServers < 0 || (numStateServers != 0 && numStateServers%2 != 1) { - return StateServersChanges{}, errors.New("number of state servers must be odd and non-negative") - } - if numStateServers > replicaset.MaxPeers { - return StateServersChanges{}, errors.Errorf("state server count is too large (allowed %d)", replicaset.MaxPeers) - } - var change StateServersChanges + if numControllers < 0 || (numControllers != 0 && numControllers%2 != 1) { + return ControllersChanges{}, errors.New("number of controllers must be odd and non-negative") + } + if numControllers > replicaset.MaxPeers { + return ControllersChanges{}, errors.Errorf("controller count is too large (allowed %d)", replicaset.MaxPeers) + } + var change ControllersChanges buildTxn := func(attempt int) ([]txn.Op, error) { - currentInfo, err := st.StateServerInfo() + currentInfo, err := st.ControllerInfo() if err != nil { return nil, err } - desiredStateServerCount := numStateServers - if desiredStateServerCount == 0 { - desiredStateServerCount = len(currentInfo.VotingMachineIds) - if desiredStateServerCount <= 1 { - desiredStateServerCount = 3 + desiredControllerCount := numControllers + if desiredControllerCount == 0 { + desiredControllerCount = len(currentInfo.VotingMachineIds) + if desiredControllerCount <= 1 { + desiredControllerCount = 3 } } - if len(currentInfo.VotingMachineIds) > desiredStateServerCount { - return nil, errors.New("cannot reduce state server count") + if len(currentInfo.VotingMachineIds) > desiredControllerCount { + return nil, errors.New("cannot reduce controller count") } - intent, err := st.ensureAvailabilityIntentions(currentInfo, placement) + intent, err := st.enableHAIntentions(currentInfo, placement) if err != nil { return nil, err } @@ -757,37 +762,37 @@ voteCount++ } } - if voteCount == desiredStateServerCount && len(intent.remove) == 0 { + if voteCount == desiredControllerCount && len(intent.remove) == 0 { return nil, jujutxn.ErrNoOperations } // Promote as many machines as we can to fulfil the shortfall. - if n := desiredStateServerCount - voteCount; n < len(intent.promote) { + if n := desiredControllerCount - voteCount; n < len(intent.promote) { intent.promote = intent.promote[:n] } voteCount += len(intent.promote) - if n := desiredStateServerCount - voteCount; n < len(intent.convert) { + if n := desiredControllerCount - voteCount; n < len(intent.convert) { intent.convert = intent.convert[:n] } voteCount += len(intent.convert) - intent.newCount = desiredStateServerCount - voteCount + intent.newCount = desiredControllerCount - voteCount logger.Infof("%d new machines; promoting %v; converting %v", intent.newCount, intent.promote, intent.convert) var ops []txn.Op - ops, change, err = st.ensureAvailabilityIntentionOps(intent, currentInfo, cons, series) + ops, change, err = st.enableHAIntentionOps(intent, currentInfo, cons, series) return ops, err } if err := st.run(buildTxn); err != nil { - err = errors.Annotate(err, "failed to create new state server machines") - return StateServersChanges{}, err + err = errors.Annotate(err, "failed to create new controller machines") + return ControllersChanges{}, err } return change, nil } -// Change in state servers after the ensure availability txn has committed. -type StateServersChanges struct { +// Change in controllers after the ensure availability txn has committed. +type ControllersChanges struct { Added []string Removed []string Maintained []string @@ -796,25 +801,25 @@ Converted []string } -// ensureAvailabilityIntentionOps returns operations to fulfil the desired intent. -func (st *State) ensureAvailabilityIntentionOps( - intent *ensureAvailabilityIntent, - currentInfo *StateServerInfo, +// enableHAIntentionOps returns operations to fulfil the desired intent. +func (st *State) enableHAIntentionOps( + intent *enableHAIntent, + currentInfo *ControllerInfo, cons constraints.Value, series string, -) ([]txn.Op, StateServersChanges, error) { +) ([]txn.Op, ControllersChanges, error) { var ops []txn.Op - var change StateServersChanges + var change ControllersChanges for _, m := range intent.promote { - ops = append(ops, promoteStateServerOps(m)...) + ops = append(ops, promoteControllerOps(m)...) change.Promoted = append(change.Promoted, m.doc.Id) } for _, m := range intent.demote { - ops = append(ops, demoteStateServerOps(m)...) + ops = append(ops, demoteControllerOps(m)...) change.Demoted = append(change.Demoted, m.doc.Id) } for _, m := range intent.convert { - ops = append(ops, convertStateServerOps(m)...) + ops = append(ops, convertControllerOps(m)...) change.Converted = append(change.Converted, m.doc.Id) } // Use any placement directives that have been provided @@ -836,14 +841,14 @@ Series: series, Jobs: []MachineJob{ JobHostUnits, - JobManageEnviron, + JobManageModel, }, Constraints: cons, Placement: getPlacement(), } mdoc, addOps, err := st.addMachineOps(template) if err != nil { - return nil, StateServersChanges{}, err + return nil, ControllersChanges{}, err } mdocs[i] = mdoc ops = append(ops, addOps...) @@ -851,7 +856,7 @@ } for _, m := range intent.remove { - ops = append(ops, removeStateServerOps(m)...) + ops = append(ops, removeControllerOps(m)...) change.Removed = append(change.Removed, m.doc.Id) } @@ -859,45 +864,45 @@ for _, m := range intent.maintain { tag, err := names.ParseTag(m.Tag().String()) if err != nil { - return nil, StateServersChanges{}, errors.Annotate(err, "could not parse machine tag") + return nil, ControllersChanges{}, errors.Annotate(err, "could not parse machine tag") } if tag.Kind() != names.MachineTagKind { - return nil, StateServersChanges{}, errors.Errorf("expected machine tag kind, got %s", tag.Kind()) + return nil, ControllersChanges{}, errors.Errorf("expected machine tag kind, got %s", tag.Kind()) } change.Maintained = append(change.Maintained, tag.Id()) } - ssOps, err := st.maintainStateServersOps(mdocs, currentInfo) + ssOps, err := st.maintainControllersOps(mdocs, currentInfo) if err != nil { - return nil, StateServersChanges{}, errors.Annotate(err, "cannot prepare machine add operations") + return nil, ControllersChanges{}, errors.Annotate(err, "cannot prepare machine add operations") } ops = append(ops, ssOps...) return ops, change, nil } -// stateServerAvailable returns true if the specified state server machine is +// controllerAvailable returns true if the specified controller machine is // available. -var stateServerAvailable = func(m *Machine) (bool, error) { +var controllerAvailable = func(m *Machine) (bool, error) { // TODO(axw) #1271504 2014-01-22 - // Check the state server's associated mongo health; + // Check the controller's associated mongo health; // requires coordination with worker/peergrouper. return m.AgentPresence() } -type ensureAvailabilityIntent struct { +type enableHAIntent struct { newCount int placement []string promote, maintain, demote, remove, convert []*Machine } -// ensureAvailabilityIntentions returns what we would like +// enableHAIntentions returns what we would like // to do to maintain the availability of the existing servers // mentioned in the given info, including: // demoting unavailable, voting machines; // removing unavailable, non-voting, non-vote-holding machines; // gathering available, non-voting machines that may be promoted; -func (st *State) ensureAvailabilityIntentions(info *StateServerInfo, placement []string) (*ensureAvailabilityIntent, error) { - var intent ensureAvailabilityIntent +func (st *State) enableHAIntentions(info *ControllerInfo, placement []string) (*enableHAIntent, error) { + var intent enableHAIntent for _, s := range placement { // TODO(natefinch): unscoped placements shouldn't ever get here (though // they do currently). We should fix up the CLI to always add a scope @@ -921,7 +926,7 @@ return nil, errors.Annotatef(err, "can't find machine for placement directive %q", s) } if m.IsManager() { - return nil, errors.Errorf("machine for placement directive %q is already a state server", s) + return nil, errors.Errorf("machine for placement directive %q is already a controller", s) } intent.convert = append(intent.convert, m) intent.placement = append(intent.placement, s) @@ -935,7 +940,7 @@ if err != nil { return nil, err } - available, err := stateServerAvailable(m) + available, err := controllerAvailable(m) if err != nil { return nil, err } @@ -952,7 +957,7 @@ // The machine wants to vote, so we simply set novote and allow it // to run its course to have its vote removed by the worker that // maintains the replicaset. We will replace it with an existing - // non-voting state server if there is one, starting a new one if + // non-voting controller if there is one, starting a new one if // not. intent.demote = append(intent.demote, m) } else if m.HasVote() { @@ -960,7 +965,7 @@ intent.maintain = append(intent.maintain, m) } else { // The machine neither wants to nor has a vote, so remove its - // JobManageEnviron job immediately. + // JobManageModel job immediately. intent.remove = append(intent.remove, m) } } @@ -969,18 +974,18 @@ return &intent, nil } -func convertStateServerOps(m *Machine) []txn.Op { +func convertControllerOps(m *Machine) []txn.Op { return []txn.Op{{ C: machinesC, Id: m.doc.DocID, Update: bson.D{ - {"$addToSet", bson.D{{"jobs", JobManageEnviron}}}, + {"$addToSet", bson.D{{"jobs", JobManageModel}}}, {"$set", bson.D{{"novote", false}}}, }, - Assert: bson.D{{"jobs", bson.D{{"$nin", []MachineJob{JobManageEnviron}}}}}, + Assert: bson.D{{"jobs", bson.D{{"$nin", []MachineJob{JobManageModel}}}}}, }, { - C: stateServersC, - Id: environGlobalKey, + C: controllersC, + Id: modelGlobalKey, Update: bson.D{ {"$addToSet", bson.D{{"votingmachineids", m.doc.Id}}}, {"$addToSet", bson.D{{"machineids", m.doc.Id}}}, @@ -988,44 +993,44 @@ }} } -func promoteStateServerOps(m *Machine) []txn.Op { +func promoteControllerOps(m *Machine) []txn.Op { return []txn.Op{{ C: machinesC, Id: m.doc.DocID, Assert: bson.D{{"novote", true}}, Update: bson.D{{"$set", bson.D{{"novote", false}}}}, }, { - C: stateServersC, - Id: environGlobalKey, + C: controllersC, + Id: modelGlobalKey, Update: bson.D{{"$addToSet", bson.D{{"votingmachineids", m.doc.Id}}}}, }} } -func demoteStateServerOps(m *Machine) []txn.Op { +func demoteControllerOps(m *Machine) []txn.Op { return []txn.Op{{ C: machinesC, Id: m.doc.DocID, Assert: bson.D{{"novote", false}}, Update: bson.D{{"$set", bson.D{{"novote", true}}}}, }, { - C: stateServersC, - Id: environGlobalKey, + C: controllersC, + Id: modelGlobalKey, Update: bson.D{{"$pull", bson.D{{"votingmachineids", m.doc.Id}}}}, }} } -func removeStateServerOps(m *Machine) []txn.Op { +func removeControllerOps(m *Machine) []txn.Op { return []txn.Op{{ C: machinesC, Id: m.doc.DocID, Assert: bson.D{{"novote", true}, {"hasvote", false}}, Update: bson.D{ - {"$pull", bson.D{{"jobs", JobManageEnviron}}}, + {"$pull", bson.D{{"jobs", JobManageModel}}}, {"$set", bson.D{{"novote", false}}}, }, }, { - C: stateServersC, - Id: environGlobalKey, + C: controllersC, + Id: modelGlobalKey, Update: bson.D{{"$pull", bson.D{{"machineids", m.doc.Id}}}}, }} } === modified file 'src/github.com/juju/juju/state/address.go' --- src/github.com/juju/juju/state/address.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/address.go 2016-03-22 15:18:22 +0000 @@ -15,23 +15,23 @@ "github.com/juju/juju/network" ) -// stateServerAddresses returns the list of internal addresses of the state +// controllerAddresses returns the list of internal addresses of the state // server machines. -func (st *State) stateServerAddresses() ([]string, error) { +func (st *State) controllerAddresses() ([]string, error) { ssState := st - env, err := st.StateServerEnvironment() + model, err := st.ControllerModel() if err != nil { return nil, errors.Trace(err) } - if st.EnvironTag() != env.EnvironTag() { - // We are not using the state server environment, so get one. - logger.Debugf("getting a state server state connection, current env: %s", st.EnvironTag()) - ssState, err = st.ForEnviron(env.EnvironTag()) + if st.ModelTag() != model.ModelTag() { + // We are not using the controller model, so get one. + logger.Debugf("getting a controller state connection, current env: %s", st.ModelTag()) + ssState, err = st.ForModel(model.ModelTag()) if err != nil { return nil, errors.Trace(err) } defer ssState.Close() - logger.Debugf("ssState env: %s", ssState.EnvironTag()) + logger.Debugf("ssState env: %s", ssState.ModelTag()) } type addressMachine struct { @@ -41,23 +41,23 @@ // TODO(rog) 2013/10/14 index machines on jobs. machines, closer := ssState.getCollection(machinesC) defer closer() - err = machines.Find(bson.D{{"jobs", JobManageEnviron}}).All(&allAddresses) + err = machines.Find(bson.D{{"jobs", JobManageModel}}).All(&allAddresses) if err != nil { return nil, err } if len(allAddresses) == 0 { - return nil, errors.New("no state server machines found") + return nil, errors.New("no controller machines found") } apiAddrs := make([]string, 0, len(allAddresses)) for _, addrs := range allAddresses { naddrs := networkAddresses(addrs.Addresses) - addr, ok := network.SelectInternalAddress(naddrs, false) + addr, ok := network.SelectControllerAddress(naddrs, false) if ok { apiAddrs = append(apiAddrs, addr.Value) } } if len(apiAddrs) == 0 { - return nil, errors.New("no state server machines with addresses found") + return nil, errors.New("no controller machines with addresses found") } return apiAddrs, nil } @@ -73,11 +73,11 @@ // Addresses returns the list of cloud-internal addresses that // can be used to connect to the state. func (st *State) Addresses() ([]string, error) { - addrs, err := st.stateServerAddresses() + addrs, err := st.controllerAddresses() if err != nil { return nil, errors.Trace(err) } - config, err := st.EnvironConfig() + config, err := st.ModelConfig() if err != nil { return nil, errors.Trace(err) } @@ -89,11 +89,11 @@ // This method will be deprecated when API addresses are // stored independently in their own document. func (st *State) APIAddressesFromMachines() ([]string, error) { - addrs, err := st.stateServerAddresses() + addrs, err := st.controllerAddresses() if err != nil { return nil, errors.Trace(err) } - config, err := st.EnvironConfig() + config, err := st.ModelConfig() if err != nil { return nil, errors.Trace(err) } @@ -109,8 +109,21 @@ // SetAPIHostPorts sets the addresses of the API server instances. // Each server is represented by one element in the top level slice. func (st *State) SetAPIHostPorts(netHostsPorts [][]network.HostPort) error { + // Filter any addresses not on the default space, if possible. + // All API servers need to be accessible there. + var hpsToSet [][]network.HostPort + for _, hps := range netHostsPorts { + defaultSpaceHP, ok := network.SelectHostPortBySpace(hps, network.DefaultSpace) + if !ok { + logger.Warningf("cannot determine API addresses in space %q to use as API endpoints; using all addresses", network.DefaultSpace) + hpsToSet = netHostsPorts + break + } + hpsToSet = append(hpsToSet, []network.HostPort{defaultSpaceHP}) + } + doc := apiHostPortsDoc{ - APIHostPorts: fromNetworkHostsPorts(netHostsPorts), + APIHostPorts: fromNetworkHostsPorts(hpsToSet), } buildTxn := func(attempt int) ([]txn.Op, error) { existing, err := st.APIHostPorts() @@ -118,7 +131,7 @@ return nil, err } op := txn.Op{ - C: stateServersC, + C: controllersC, Id: apiHostPortsKey, Assert: bson.D{{ "apihostports", fromNetworkHostsPorts(existing), @@ -134,16 +147,16 @@ if err := st.run(buildTxn); err != nil { return errors.Annotate(err, "cannot set API addresses") } - logger.Debugf("setting API hostPorts: %v", netHostsPorts) + logger.Debugf("setting API hostPorts: %v", hpsToSet) return nil } // APIHostPorts returns the API addresses as set by SetAPIHostPorts. func (st *State) APIHostPorts() ([][]network.HostPort, error) { var doc apiHostPortsDoc - stateServers, closer := st.getCollection(stateServersC) + controllers, closer := st.getCollection(controllersC) defer closer() - err := stateServers.Find(bson.D{{"_id", apiHostPortsKey}}).One(&doc) + err := controllers.Find(bson.D{{"_id", apiHostPortsKey}}).One(&doc) if err != nil { return nil, err } @@ -158,11 +171,11 @@ // DeployerConnectionInfo returns the address information necessary for the deployer. // The function does the expensive operations (getting stuff from mongo) just once. func (st *State) DeployerConnectionInfo() (*DeployerConnectionValues, error) { - addrs, err := st.stateServerAddresses() + addrs, err := st.controllerAddresses() if err != nil { return nil, errors.Trace(err) } - config, err := st.EnvironConfig() + config, err := st.ModelConfig() if err != nil { return nil, errors.Trace(err) } @@ -184,6 +197,7 @@ NetworkName string `bson:"networkname,omitempty"` Scope string `bson:"networkscope,omitempty"` Origin string `bson:"origin,omitempty"` + SpaceName string `bson:"spacename,omitempty"` } // Origin specifies where an address comes from, whether it was reported by a @@ -208,6 +222,7 @@ NetworkName: netAddr.NetworkName, Scope: string(netAddr.Scope), Origin: string(origin), + SpaceName: string(netAddr.SpaceName), } } @@ -219,6 +234,7 @@ Type: network.AddressType(addr.AddressType), NetworkName: addr.NetworkName, Scope: network.Scope(addr.Scope), + SpaceName: network.SpaceName(addr.SpaceName), } } @@ -254,6 +270,7 @@ NetworkName string `bson:"networkname,omitempty"` Scope string `bson:"networkscope,omitempty"` Port int `bson:"port"` + SpaceName string `bson:"spacename,omitempty"` } // fromNetworkHostPort is a convenience helper to create a state type @@ -265,6 +282,7 @@ NetworkName: netHostPort.NetworkName, Scope: string(netHostPort.Scope), Port: netHostPort.Port, + SpaceName: string(netHostPort.SpaceName), } } @@ -277,6 +295,7 @@ Type: network.AddressType(hp.AddressType), NetworkName: hp.NetworkName, Scope: network.Scope(hp.Scope), + SpaceName: network.SpaceName(hp.SpaceName), }, Port: hp.Port, } === modified file 'src/github.com/juju/juju/state/address_test.go' --- src/github.com/juju/juju/state/address_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/address_test.go 2016-03-22 15:18:22 +0000 @@ -38,17 +38,17 @@ state.AssertHostPortConversion(c, netHostPort) } -type StateServerAddressesSuite struct { +type ControllerAddressesSuite struct { testing.JujuConnSuite } -var _ = gc.Suite(&StateServerAddressesSuite{}) +var _ = gc.Suite(&ControllerAddressesSuite{}) -func (s *StateServerAddressesSuite) SetUpTest(c *gc.C) { +func (s *ControllerAddressesSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) // Make sure there is a machine with manage state in existence. machine := s.Factory.MakeMachine(c, &factory.MachineParams{ - Jobs: []state.MachineJob{state.JobManageEnviron, state.JobHostUnits}, + Jobs: []state.MachineJob{state.JobManageModel, state.JobHostUnits}, Addresses: []network.Address{ {Value: "192.168.2.144", Type: network.IPv4Address}, {Value: "10.0.1.2", Type: network.IPv4Address}, @@ -57,14 +57,14 @@ c.Logf("machine addresses: %#v", machine.Addresses()) } -func (s *StateServerAddressesSuite) TestStateServerEnv(c *gc.C) { +func (s *ControllerAddressesSuite) TestControllerEnv(c *gc.C) { addresses, err := s.State.Addresses() c.Assert(err, jc.ErrorIsNil) c.Assert(addresses, jc.SameContents, []string{"10.0.1.2:1234"}) } -func (s *StateServerAddressesSuite) TestOtherEnv(c *gc.C) { - st := s.Factory.MakeEnvironment(c, nil) +func (s *ControllerAddressesSuite) TestOtherEnv(c *gc.C) { + st := s.Factory.MakeModel(c, nil) defer st.Close() addresses, err := st.Addresses() c.Assert(err, jc.ErrorIsNil) === modified file 'src/github.com/juju/juju/state/allcollections.go' --- src/github.com/juju/juju/state/allcollections.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/allcollections.go 2016-03-22 15:18:22 +0000 @@ -20,16 +20,16 @@ // // * infrastructure: we really don't have any business touching these once // we've created them. They should have the rawAccess attribute set, so that -// multiEnvRunner will consider them forbidden. +// multiModelRunner will consider them forbidden. // -// * global: these hold information external to environments. They may include -// environment metadata, or references; but they're generally not relevant -// from the perspective of a given environment. +// * global: these hold information external to models. They may include +// model metadata, or references; but they're generally not relevant +// from the perspective of a given model. // // * local (in opposition to global; and for want of a better term): these -// hold information relevant *within* specific environments (machines, +// hold information relevant *within* specific models (machines, // services, relations, settings, bookkeeping, etc) and should generally be -// read via an envStateCollection, and written via a multiEnvRunner. This is +// read via an modelStateCollection, and written via a multiModelRunner. This is // the most common form of collection, and the above access should usually // be automatic via Database.Collection and Database.Runner. // @@ -71,15 +71,15 @@ // Global collections // ================== - // This collection holds the details of the state servers hosting, well, + // This collection holds the details of the controllers hosting, well, // everything in state. - stateServersC: {global: true}, + controllersC: {global: true}, // This collection is used to track progress when restoring a - // state server from backup. + // controller from backup. restoreInfoC: {global: true}, - // This collection is used by the state servers to coordinate binary + // This collection is used by the controllers to coordinate binary // upgrades and schema migrations. upgradeInfoC: {global: true}, @@ -87,12 +87,28 @@ // the simplestreams data source pointing to binaries required by juju. toolsmetadataC: {global: true}, - // This collection holds environment information; in particular its + // This collection holds model information; in particular its // Life and its UUID. - environmentsC: {global: true}, + modelsC: {global: true}, + + // This collection is holds the parameters for model migrations. + modelMigrationsC: { + global: true, + indexes: []mgo.Index{{ + Key: []string{"model-uuid"}, + }}, + }, + + // This collection tracks the progress of model migrations. + modelMigrationStatusC: {global: true}, + + // This collection records the model migrations which + // are currently in progress. It is used to ensure that only + // one model migration document exists per environment. + modelMigrationsActiveC: {global: true}, // This collection holds user information that's not specific to any - // one environment. + // one model. usersC: { global: true, indexes: []mgo.Index{{ @@ -110,8 +126,8 @@ // This collection is used as a unique key restraint. The _id field is // a concatenation of multiple fields that form a compound index, // allowing us to ensure users cannot have the same name for two - // different environments at a time. - userenvnameC: {global: true}, + // different models at a time. + usermodelnameC: {global: true}, // This collection holds workload metrics reported by certain charms // for passing onward to other tools. @@ -120,13 +136,13 @@ // This collection holds persistent state for the metrics manager. metricsManagerC: {global: true}, - // This collection holds lease data, which is per-environment, but is - // not itself multi-environment-aware; happily it will imminently be + // This collection holds lease data, which is per-model, but is + // not itself multi-model-aware; happily it will imminently be // deprecated in favour of the non-global leasesC below. // TODO(fwereade): drop leaseC entirely so can't use wrong const. leaseC: {global: true}, - // This collection was deprecated before multi-environment support + // This collection was deprecated before multi-model support // was implemented. actionresultsC: {global: true}, @@ -138,11 +154,11 @@ // This collection is basically a standard SQL intersection table; it // references the global records of the users allowed access to a // given collection. - envUsersC: {}, + modelUsersC: {}, - // This collection holds the last time the environment user connected - // to the environment. - envUserLastConnectionC: { + // This collection holds the last time the model user connected + // to the model. + modelUserLastConnectionC: { rawAccess: true, }, @@ -164,9 +180,9 @@ // for use by other clients in future. leasesC: { indexes: []mgo.Index{{ - Key: []string{"env-uuid", "type"}, + Key: []string{"model-uuid", "type"}, }, { - Key: []string{"env-uuid", "namespace"}, + Key: []string{"model-uuid", "namespace"}, }}, }, @@ -177,23 +193,28 @@ servicesC: {}, unitsC: { indexes: []mgo.Index{{ - Key: []string{"env-uuid", "service"}, - }, { - Key: []string{"env-uuid", "principal"}, - }, { - Key: []string{"env-uuid", "machineid"}, + Key: []string{"model-uuid", "service"}, + }, { + Key: []string{"model-uuid", "principal"}, + }, { + Key: []string{"model-uuid", "machineid"}, }}, }, minUnitsC: {}, + // This collection holds documents that indicate units which are queued + // to be assigned to machines. It is used exclusively by the + // AssignUnitWorker. + assignUnitC: {}, + // meterStatusC is the collection used to store meter status information. meterStatusC: {}, settingsrefsC: {}, relationsC: { indexes: []mgo.Index{{ - Key: []string{"env-uuid", "endpoints.relationname"}, + Key: []string{"model-uuid", "endpoints.relationname"}, }, { - Key: []string{"env-uuid", "endpoints.servicename"}, + Key: []string{"model-uuid", "endpoints.servicename"}, }}, }, relationScopesC: {}, @@ -211,30 +232,30 @@ // These collections hold information associated with storage. blockDevicesC: { indexes: []mgo.Index{{ - Key: []string{"env-uuid", "machineid"}, + Key: []string{"model-uuid", "machineid"}, }}, }, filesystemsC: { indexes: []mgo.Index{{ - Key: []string{"env-uuid", "storageid"}, + Key: []string{"model-uuid", "storageid"}, }}, }, filesystemAttachmentsC: {}, storageInstancesC: { indexes: []mgo.Index{{ - Key: []string{"env-uuid", "owner"}, + Key: []string{"model-uuid", "owner"}, }}, }, storageAttachmentsC: { indexes: []mgo.Index{{ - Key: []string{"env-uuid", "storageid"}, + Key: []string{"model-uuid", "storageid"}, }, { - Key: []string{"env-uuid", "unitid"}, + Key: []string{"model-uuid", "unitid"}, }}, }, volumesC: { indexes: []mgo.Index{{ - Key: []string{"env-uuid", "storageid"}, + Key: []string{"model-uuid", "storageid"}, }}, }, volumeAttachmentsC: {}, @@ -246,42 +267,55 @@ indexes: []mgo.Index{{ Key: []string{"uuid"}, }, { - Key: []string{"env-uuid", "state"}, + Key: []string{"model-uuid", "state"}, }, { - Key: []string{"env-uuid", "subnetid"}, + Key: []string{"model-uuid", "subnetid"}, }}, }, networkInterfacesC: { indexes: []mgo.Index{{ - Key: []string{"env-uuid", "interfacename", "machineid"}, - Unique: true, - }, { - Key: []string{"env-uuid", "macaddress", "networkname"}, - Unique: true, - }, { - Key: []string{"env-uuid", "machineid"}, - }, { - Key: []string{"env-uuid", "networkname"}, + Key: []string{"model-uuid", "interfacename", "machineid"}, + Unique: true, + }, { + Key: []string{"model-uuid", "macaddress", "networkname"}, + Unique: true, + }, { + Key: []string{"model-uuid", "machineid"}, + }, { + Key: []string{"model-uuid", "networkname"}, }}, }, networksC: { indexes: []mgo.Index{{ - Key: []string{"env-uuid", "providerid"}, + Key: []string{"model-uuid", "providerid"}, Unique: true, }}, }, openedPortsC: {}, requestedNetworksC: {}, + spacesC: { + indexes: []mgo.Index{{ + // NOTE: Like the DocID field, ProviderId also has the model + // UUID as prefix to ensure uniqueness per model. However since + // not all providers support spaces, it can be empty, hence both + // unique and sparse. + Key: []string{"providerid"}, + Unique: true, + Sparse: true, + }}, + }, subnetsC: { indexes: []mgo.Index{{ - // TODO(dimitern): make unique per-environment, not globally. - Key: []string{"providerid"}, - // Not always present; but, if present, must be unique; hence - // both unique and sparse. + // NOTE: Like the DocID field, ProviderId also has the model + // UUID as prefix to ensure uniqueness per model. However since + // not all providers support subnets, it can be empty, hence both + // unique and sparse. + Key: []string{"providerid"}, Unique: true, Sparse: true, }}, }, + endpointBindingsC: {}, // ----- @@ -297,12 +331,16 @@ // See payload/persistence/mongo.go. "payloads": {}, + // This collection holds information associated with charm resources. + // See resource/persistence/mongo.go. + "resources": {}, + // ----- // The remaining non-global collections share the property of being // relevant to multiple other kinds of entities, and are thus generally // indexed by globalKey(). This is unhelpfully named in this context -- - // it's meant to imply "global within an environment", because it was + // it's meant to imply "global within an model", because it was // named before multi-env support. // This collection holds user annotations for various entities. They @@ -311,7 +349,7 @@ // This collection in particular holds an astounding number of // different sorts of data: service config settings by charm version, - // unit relation settings, environment config, etc etc etc. + // unit relation settings, model config, etc etc etc. settingsC: {}, constraintsC: {}, @@ -319,10 +357,9 @@ statusesC: {}, statusesHistoryC: { indexes: []mgo.Index{{ - Key: []string{"env-uuid", "globalkey"}, + Key: []string{"model-uuid", "globalkey"}, }}, }, - spacesC: {}, // This collection holds information about cloud image metadata. cloudimagemetadataC: {}, @@ -341,60 +378,66 @@ // it in allCollections, above; and please keep this list sorted for easy // inspection. const ( - actionNotificationsC = "actionnotifications" - actionresultsC = "actionresults" - actionsC = "actions" - annotationsC = "annotations" - blockDevicesC = "blockdevices" - blocksC = "blocks" - charmsC = "charms" - cleanupsC = "cleanups" - cloudimagemetadataC = "cloudimagemetadata" - constraintsC = "constraints" - containerRefsC = "containerRefs" - envUsersC = "envusers" - environmentsC = "environments" - filesystemAttachmentsC = "filesystemAttachments" - filesystemsC = "filesystems" - instanceDataC = "instanceData" - ipaddressesC = "ipaddresses" - leaseC = "lease" - leasesC = "leases" - machinesC = "machines" - meterStatusC = "meterStatus" - metricsC = "metrics" - metricsManagerC = "metricsmanager" - minUnitsC = "minunits" - networkInterfacesC = "networkinterfaces" - networksC = "networks" - openedPortsC = "openedPorts" - rebootC = "reboot" - relationScopesC = "relationscopes" - relationsC = "relations" - requestedNetworksC = "requestednetworks" - restoreInfoC = "restoreInfo" - sequenceC = "sequence" - servicesC = "services" - settingsC = "settings" - settingsrefsC = "settingsrefs" - stateServersC = "stateServers" - statusesC = "statuses" - statusesHistoryC = "statuseshistory" - storageAttachmentsC = "storageattachments" - storageConstraintsC = "storageconstraints" - storageInstancesC = "storageinstances" - subnetsC = "subnets" - spacesC = "spaces" - toolsmetadataC = "toolsmetadata" - txnLogC = "txns.log" - txnsC = "txns" - unitsC = "units" - upgradeInfoC = "upgradeInfo" - userenvnameC = "userenvname" - usersC = "users" - userLastLoginC = "userLastLogin" - envUserLastConnectionC = "envUserLastConnection" - volumeAttachmentsC = "volumeattachments" - volumesC = "volumes" + actionNotificationsC = "actionnotifications" + actionresultsC = "actionresults" + actionsC = "actions" + annotationsC = "annotations" + assignUnitC = "assignUnits" + blockDevicesC = "blockdevices" + blocksC = "blocks" + charmsC = "charms" + cleanupsC = "cleanups" + cloudimagemetadataC = "cloudimagemetadata" + constraintsC = "constraints" + containerRefsC = "containerRefs" + controllersC = "controllers" + filesystemAttachmentsC = "filesystemAttachments" + filesystemsC = "filesystems" + instanceDataC = "instanceData" + ipaddressesC = "ipaddresses" + leaseC = "lease" + leasesC = "leases" + machinesC = "machines" + meterStatusC = "meterStatus" + metricsC = "metrics" + metricsManagerC = "metricsmanager" + minUnitsC = "minunits" + modelMigrationStatusC = "modelmigrations.status" + modelMigrationsActiveC = "modelmigrations.active" + modelMigrationsC = "modelmigrations" + modelUserLastConnectionC = "modelUserLastConnection" + modelUsersC = "modelusers" + modelsC = "models" + networkInterfacesC = "networkinterfaces" + networksC = "networks" + openedPortsC = "openedPorts" + rebootC = "reboot" + relationScopesC = "relationscopes" + relationsC = "relations" + requestedNetworksC = "requestednetworks" + restoreInfoC = "restoreInfo" + sequenceC = "sequence" + servicesC = "services" + endpointBindingsC = "endpointbindings" + settingsC = "settings" + settingsrefsC = "settingsrefs" + spacesC = "spaces" + statusesC = "statuses" + statusesHistoryC = "statuseshistory" + storageAttachmentsC = "storageattachments" + storageConstraintsC = "storageconstraints" + storageInstancesC = "storageinstances" + subnetsC = "subnets" + toolsmetadataC = "toolsmetadata" + txnLogC = "txns.log" + txnsC = "txns" + unitsC = "units" + upgradeInfoC = "upgradeInfo" + userLastLoginC = "userLastLogin" + usermodelnameC = "usermodelname" + usersC = "users" + volumeAttachmentsC = "volumeattachments" + volumesC = "volumes" // "payloads" (see payload/persistence/mongo.go) + // "resources" (see resource/persistence/mongo.go) ) === modified file 'src/github.com/juju/juju/state/allwatcher.go' --- src/github.com/juju/juju/state/allwatcher.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/allwatcher.go 2016-03-22 15:18:22 +0000 @@ -9,23 +9,24 @@ "time" "github.com/juju/errors" + "github.com/juju/names" + "gopkg.in/mgo.v2" + "github.com/juju/juju/network" "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/state/watcher" - "github.com/juju/names" - "gopkg.in/mgo.v2" ) // allWatcherStateBacking implements Backing by fetching entities for -// a single environment from the State. +// a single model from the State. type allWatcherStateBacking struct { st *State collectionByName map[string]allWatcherStateCollection } -// allEnvWatcherStateBacking implements Backing by fetching entities -// for all environments from the State. -type allEnvWatcherStateBacking struct { +// allModelWatcherStateBacking implements Backing by fetching entities +// for all models from the State. +type allModelWatcherStateBacking struct { st *State stPool *StatePool collectionByName map[string]allWatcherStateCollection @@ -57,8 +58,8 @@ for _, collName := range collNames { collection := allWatcherStateCollection{name: collName} switch collName { - case environmentsC: - collection.docType = reflect.TypeOf(backingEnvironment{}) + case modelsC: + collection.docType = reflect.TypeOf(backingModel{}) case machinesC: collection.docType = reflect.TypeOf(backingMachine{}) case unitsC: @@ -104,11 +105,11 @@ return collectionByName } -type backingEnvironment environmentDoc +type backingModel modelDoc -func (e *backingEnvironment) updated(st *State, store *multiwatcherStore, id string) error { - store.Update(&multiwatcher.EnvironmentInfo{ - EnvUUID: e.UUID, +func (e *backingModel) updated(st *State, store *multiwatcherStore, id string) error { + store.Update(&multiwatcher.ModelInfo{ + ModelUUID: e.UUID, Name: e.Name, Life: multiwatcher.Life(e.Life.String()), Owner: e.Owner, @@ -117,16 +118,16 @@ return nil } -func (e *backingEnvironment) removed(store *multiwatcherStore, envUUID, _ string, _ *State) error { +func (e *backingModel) removed(store *multiwatcherStore, modelUUID, _ string, _ *State) error { store.Remove(multiwatcher.EntityId{ - Kind: "environment", - EnvUUID: envUUID, - Id: envUUID, + Kind: "model", + ModelUUID: modelUUID, + Id: modelUUID, }) return nil } -func (e *backingEnvironment) mongoId() string { +func (e *backingModel) mongoId() string { return e.UUID } @@ -134,7 +135,7 @@ func (m *backingMachine) updated(st *State, store *multiwatcherStore, id string) error { info := &multiwatcher.MachineInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Id: m.Id, Life: multiwatcher.Life(m.Life.String()), Series: m.Series, @@ -181,11 +182,11 @@ return nil } -func (m *backingMachine) removed(store *multiwatcherStore, envUUID, id string, _ *State) error { +func (m *backingMachine) removed(store *multiwatcherStore, modelUUID, id string, _ *State) error { store.Remove(multiwatcher.EntityId{ - Kind: "machine", - EnvUUID: envUUID, - Id: id, + Kind: "machine", + ModelUUID: modelUUID, + Id: id, }) return nil } @@ -253,7 +254,7 @@ func (u *backingUnit) updated(st *State, store *multiwatcherStore, id string) error { info := &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: u.Name, Service: u.Service, Series: u.Series, @@ -357,11 +358,11 @@ return publicAddress.Value, privateAddress.Value, nil } -func (u *backingUnit) removed(store *multiwatcherStore, envUUID, id string, _ *State) error { +func (u *backingUnit) removed(store *multiwatcherStore, modelUUID, id string, _ *State) error { store.Remove(multiwatcher.EntityId{ - Kind: "unit", - EnvUUID: envUUID, - Id: id, + Kind: "unit", + ModelUUID: modelUUID, + Id: id, }) return nil } @@ -376,12 +377,12 @@ if svc.CharmURL == nil { return errors.Errorf("charm url is nil") } - env, err := st.Environment() + env, err := st.Model() if err != nil { return errors.Trace(err) } info := &multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: svc.Name, Exposed: svc.Exposed, CharmURL: svc.CharmURL.String(), @@ -450,28 +451,28 @@ } } if needConfig { - var err error - info.Config, _, err = readSettingsDoc(st, serviceSettingsKey(svc.Name, svc.CharmURL)) + doc, err := readSettingsDoc(st, serviceSettingsKey(svc.Name, svc.CharmURL)) if err != nil { return errors.Trace(err) } + info.Config = doc.Settings } store.Update(info) return nil } -func (svc *backingService) removed(store *multiwatcherStore, envUUID, id string, _ *State) error { +func (svc *backingService) removed(store *multiwatcherStore, modelUUID, id string, _ *State) error { store.Remove(multiwatcher.EntityId{ - Kind: "service", - EnvUUID: envUUID, - Id: id, + Kind: "service", + ModelUUID: modelUUID, + Id: id, }) return nil } // SCHEMACHANGE // TODO(mattyw) remove when schema upgrades are possible -func (svc *backingService) fixOwnerTag(env *Environment) string { +func (svc *backingService) fixOwnerTag(env *Model) string { if svc.OwnerTag != "" { return svc.OwnerTag } @@ -488,18 +489,18 @@ return a.DocId } -func (a *backingAction) removed(store *multiwatcherStore, envUUID, id string, _ *State) error { +func (a *backingAction) removed(store *multiwatcherStore, modelUUID, id string, _ *State) error { store.Remove(multiwatcher.EntityId{ - Kind: "action", - EnvUUID: envUUID, - Id: id, + Kind: "action", + ModelUUID: modelUUID, + Id: id, }) return nil } func (a *backingAction) updated(st *State, store *multiwatcherStore, id string) error { info := &multiwatcher.ActionInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Id: id, Receiver: a.Receiver, Name: a.Name, @@ -526,7 +527,7 @@ } } info := &multiwatcher.RelationInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Key: r.Key, Id: r.Id, Endpoints: eps, @@ -535,11 +536,11 @@ return nil } -func (r *backingRelation) removed(store *multiwatcherStore, envUUID, id string, _ *State) error { +func (r *backingRelation) removed(store *multiwatcherStore, modelUUID, id string, _ *State) error { store.Remove(multiwatcher.EntityId{ - Kind: "relation", - EnvUUID: envUUID, - Id: id, + Kind: "relation", + ModelUUID: modelUUID, + Id: id, }) return nil } @@ -552,7 +553,7 @@ func (a *backingAnnotation) updated(st *State, store *multiwatcherStore, id string) error { info := &multiwatcher.AnnotationInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Tag: a.Tag, Annotations: a.Annotations, } @@ -560,15 +561,15 @@ return nil } -func (a *backingAnnotation) removed(store *multiwatcherStore, envUUID, id string, _ *State) error { +func (a *backingAnnotation) removed(store *multiwatcherStore, modelUUID, id string, _ *State) error { tag, ok := tagForGlobalKey(id) if !ok { return errors.Errorf("could not parse global key: %q", id) } store.Remove(multiwatcher.EntityId{ - Kind: "annotation", - EnvUUID: envUUID, - Id: tag, + Kind: "annotation", + ModelUUID: modelUUID, + Id: tag, }) return nil } @@ -581,21 +582,21 @@ func (a *backingBlock) updated(st *State, store *multiwatcherStore, id string) error { info := &multiwatcher.BlockInfo{ - EnvUUID: st.EnvironUUID(), - Id: id, - Tag: a.Tag, - Type: a.Type.ToParams(), - Message: a.Message, + ModelUUID: st.ModelUUID(), + Id: id, + Tag: a.Tag, + Type: a.Type.ToParams(), + Message: a.Message, } store.Update(info) return nil } -func (a *backingBlock) removed(store *multiwatcherStore, envUUID, id string, _ *State) error { +func (a *backingBlock) removed(store *multiwatcherStore, modelUUID, id string, _ *State) error { store.Remove(multiwatcher.EntityId{ - Kind: "block", - EnvUUID: envUUID, - Id: id, + Kind: "block", + ModelUUID: modelUUID, + Id: id, }) return nil } @@ -607,7 +608,7 @@ type backingStatus statusDoc func (s *backingStatus) updated(st *State, store *multiwatcherStore, id string) error { - parentID, ok := backingEntityIdForGlobalKey(st.EnvironUUID(), id) + parentID, ok := backingEntityIdForGlobalKey(st.ModelUUID(), id) if !ok { return nil } @@ -695,7 +696,7 @@ if err != nil { return errors.Trace(err) } - serviceId, ok := backingEntityIdForGlobalKey(st.EnvironUUID(), service.globalKey()) + serviceId, ok := backingEntityIdForGlobalKey(st.ModelUUID(), service.globalKey()) if !ok { return nil } @@ -729,7 +730,7 @@ type backingConstraints constraintsDoc func (c *backingConstraints) updated(st *State, store *multiwatcherStore, id string) error { - parentID, ok := backingEntityIdForGlobalKey(st.EnvironUUID(), id) + parentID, ok := backingEntityIdForGlobalKey(st.ModelUUID(), id) if !ok { return nil } @@ -760,10 +761,10 @@ panic("cannot find mongo id from constraints document") } -type backingSettings map[string]interface{} +type backingSettings settingsDoc func (s *backingSettings) updated(st *State, store *multiwatcherStore, id string) error { - parentID, url, ok := backingEntityIdForSettingsKey(st.EnvironUUID(), id) + parentID, url, ok := backingEntityIdForSettingsKey(st.ModelUUID(), id) if !ok { return nil } @@ -782,8 +783,7 @@ break } newInfo := *info - cleanSettingsMap(*s) - newInfo.Config = *s + newInfo.Config = s.Settings info0 = &newInfo default: return nil @@ -792,8 +792,8 @@ return nil } -func (s *backingSettings) removed(store *multiwatcherStore, envUUID, id string, _ *State) error { - parentID, url, ok := backingEntityIdForSettingsKey(envUUID, id) +func (s *backingSettings) removed(store *multiwatcherStore, modelUUID, id string, _ *State) error { + parentID, url, ok := backingEntityIdForSettingsKey(modelUUID, id) if !ok { // Service is already gone along with its settings. return nil @@ -804,8 +804,7 @@ return nil } newInfo := *info - cleanSettingsMap(*s) - newInfo.Config = *s + newInfo.Config = s.Settings parent = &newInfo store.Update(parent) } @@ -819,9 +818,9 @@ // backingEntityIdForSettingsKey returns the entity id for the given // settings key. Any extra information in the key is returned in // extra. -func backingEntityIdForSettingsKey(envUUID, key string) (eid multiwatcher.EntityId, extra string, ok bool) { +func backingEntityIdForSettingsKey(modelUUID, key string) (eid multiwatcher.EntityId, extra string, ok bool) { if !strings.HasPrefix(key, "s#") { - eid, ok = backingEntityIdForGlobalKey(envUUID, key) + eid, ok = backingEntityIdForGlobalKey(modelUUID, key) return } key = key[2:] @@ -830,8 +829,8 @@ return multiwatcher.EntityId{}, "", false } eid = (&multiwatcher.ServiceInfo{ - EnvUUID: envUUID, - Name: key[0:i], + ModelUUID: modelUUID, + Name: key[0:i], }).EntityId() extra = key[i+1:] ok = true @@ -841,7 +840,7 @@ type backingOpenedPorts map[string]interface{} func (p *backingOpenedPorts) updated(st *State, store *multiwatcherStore, id string) error { - parentID, ok := backingEntityIdForOpenedPortsKey(st.EnvironUUID(), id) + parentID, ok := backingEntityIdForOpenedPortsKey(st.ModelUUID(), id) if !ok { return nil } @@ -866,11 +865,11 @@ return nil } -func (p *backingOpenedPorts) removed(store *multiwatcherStore, envUUID, id string, st *State) error { +func (p *backingOpenedPorts) removed(store *multiwatcherStore, modelUUID, id string, st *State) error { if st == nil { return nil } - parentID, ok := backingEntityIdForOpenedPortsKey(st.EnvironUUID(), id) + parentID, ok := backingEntityIdForOpenedPortsKey(st.ModelUUID(), id) if !ok { return nil } @@ -906,7 +905,7 @@ // updateUnitPorts updates the Ports and PortRanges info of the given unit. func updateUnitPorts(st *State, store *multiwatcherStore, u *Unit) error { - eid, ok := backingEntityIdForGlobalKey(st.EnvironUUID(), u.globalKey()) + eid, ok := backingEntityIdForGlobalKey(st.ModelUUID(), u.globalKey()) if !ok { // This should never happen. return errors.New("cannot retrieve entity id for unit") @@ -933,18 +932,18 @@ // backingEntityIdForOpenedPortsKey returns the entity id for the given // openedPorts key. Any extra information in the key is discarded. -func backingEntityIdForOpenedPortsKey(envUUID, key string) (multiwatcher.EntityId, bool) { +func backingEntityIdForOpenedPortsKey(modelUUID, key string) (multiwatcher.EntityId, bool) { parts, err := extractPortsIdParts(key) if err != nil { logger.Debugf("cannot parse ports key %q: %v", key, err) return multiwatcher.EntityId{}, false } - return backingEntityIdForGlobalKey(envUUID, machineGlobalKey(parts[1])) + return backingEntityIdForGlobalKey(modelUUID, machineGlobalKey(parts[1])) } // backingEntityIdForGlobalKey returns the entity id for the given global key. // It returns false if the key is not recognized. -func backingEntityIdForGlobalKey(envUUID, key string) (multiwatcher.EntityId, bool) { +func backingEntityIdForGlobalKey(modelUUID, key string) (multiwatcher.EntityId, bool) { if len(key) < 3 || key[1] != '#' { return multiwatcher.EntityId{}, false } @@ -952,19 +951,19 @@ switch key[0] { case 'm': return (&multiwatcher.MachineInfo{ - EnvUUID: envUUID, - Id: id, + ModelUUID: modelUUID, + Id: id, }).EntityId(), true case 'u': id = strings.TrimSuffix(id, "#charm") return (&multiwatcher.UnitInfo{ - EnvUUID: envUUID, - Name: id, + ModelUUID: modelUUID, + Name: id, }).EntityId(), true case 's': return (&multiwatcher.ServiceInfo{ - EnvUUID: envUUID, - Name: id, + ModelUUID: modelUUID, + Name: id, }).EntityId(), true default: return multiwatcher.EntityId{}, false @@ -985,7 +984,7 @@ // // In some cases st may be nil. If the implementation requires st // then it should do nothing. - removed(store *multiwatcherStore, envUUID, id string, st *State) error + removed(store *multiwatcherStore, modelUUID, id string, st *State) error // mongoId returns the mongo _id field of the document. // It is currently never called for subsidiary documents. @@ -1056,7 +1055,7 @@ // in, such as settings changes to entities we don't care about. err := col.FindId(id).One(doc) if err == mgo.ErrNotFound { - err := doc.removed(all, b.st.EnvironUUID(), id, b.st) + err := doc.removed(all, b.st.ModelUUID(), id, b.st) return errors.Trace(err) } if err != nil { @@ -1071,9 +1070,9 @@ return nil } -func newAllEnvWatcherStateBacking(st *State) Backing { +func NewAllModelWatcherStateBacking(st *State) Backing { collections := makeAllWatcherCollectionInfo( - environmentsC, + modelsC, machinesC, unitsC, servicesC, @@ -1084,7 +1083,7 @@ settingsC, openedPortsC, ) - return &allEnvWatcherStateBacking{ + return &allModelWatcherStateBacking{ st: st, stPool: NewStatePool(st), collectionByName: collections, @@ -1092,27 +1091,27 @@ } // Watch watches all the collections. -func (b *allEnvWatcherStateBacking) Watch(in chan<- watcher.Change) { +func (b *allModelWatcherStateBacking) Watch(in chan<- watcher.Change) { for _, c := range b.collectionByName { b.st.watcher.WatchCollection(c.name, in) } } // Unwatch unwatches all the collections. -func (b *allEnvWatcherStateBacking) Unwatch(in chan<- watcher.Change) { +func (b *allModelWatcherStateBacking) Unwatch(in chan<- watcher.Change) { for _, c := range b.collectionByName { b.st.watcher.UnwatchCollection(c.name, in) } } // GetAll fetches all items that we want to watch from the state. -func (b *allEnvWatcherStateBacking) GetAll(all *multiwatcherStore) error { - envs, err := b.st.AllEnvironments() +func (b *allModelWatcherStateBacking) GetAll(all *multiwatcherStore) error { + envs, err := b.st.AllModels() if err != nil { - return errors.Annotate(err, "error loading environments") + return errors.Annotate(err, "error loading models") } for _, env := range envs { - st, err := b.st.ForEnviron(env.EnvironTag()) + st, err := b.st.ForModel(env.ModelTag()) if err != nil { return errors.Trace(err) } @@ -1120,7 +1119,7 @@ err = loadAllWatcherEntities(st, b.collectionByName, all) if err != nil { - return errors.Annotatef(err, "error loading entities for environment %v", env.UUID()) + return errors.Annotatef(err, "error loading entities for model %v", env.UUID()) } } return nil @@ -1128,26 +1127,26 @@ // Changed updates the allWatcher's idea of the current state // in response to the given change. -func (b *allEnvWatcherStateBacking) Changed(all *multiwatcherStore, change watcher.Change) error { +func (b *allModelWatcherStateBacking) Changed(all *multiwatcherStore, change watcher.Change) error { c, ok := b.collectionByName[change.C] if !ok { return errors.Errorf("unknown collection %q in fetch request", change.C) } - envUUID, id, err := b.idForChange(change) + modelUUID, id, err := b.idForChange(change) if err != nil { return errors.Trace(err) } doc := reflect.New(c.docType).Interface().(backingEntityDoc) - st, err := b.getState(change.C, envUUID) + st, err := b.getState(change.C, modelUUID) if err != nil { - _, envErr := b.st.GetEnvironment(names.NewEnvironTag(envUUID)) + _, envErr := b.st.GetModel(names.NewModelTag(modelUUID)) if errors.IsNotFound(envErr) { - // The entity's environment is gone so remove the entity + // The entity's model is gone so remove the entity // from the store. - doc.removed(all, envUUID, id, nil) + doc.removed(all, modelUUID, id, nil) return nil } return errors.Trace(err) @@ -1159,7 +1158,7 @@ // TODO - see TODOs in allWatcherStateBacking.Changed() err = col.FindId(id).One(doc) if err == mgo.ErrNotFound { - err := doc.removed(all, envUUID, id, st) + err := doc.removed(all, modelUUID, id, st) return errors.Trace(err) } if err != nil { @@ -1168,25 +1167,25 @@ return doc.updated(st, all, id) } -func (b *allEnvWatcherStateBacking) idForChange(change watcher.Change) (string, string, error) { - if change.C == environmentsC { - envUUID := change.Id.(string) - return envUUID, envUUID, nil +func (b *allModelWatcherStateBacking) idForChange(change watcher.Change) (string, string, error) { + if change.C == modelsC { + modelUUID := change.Id.(string) + return modelUUID, modelUUID, nil } - envUUID, id, ok := splitDocID(change.Id.(string)) + modelUUID, id, ok := splitDocID(change.Id.(string)) if !ok { return "", "", errors.Errorf("unknown id format: %v", change.Id.(string)) } - return envUUID, id, nil + return modelUUID, id, nil } -func (b *allEnvWatcherStateBacking) getState(collName, envUUID string) (*State, error) { - if collName == environmentsC { +func (b *allModelWatcherStateBacking) getState(collName, modelUUID string) (*State, error) { + if collName == modelsC { return b.st, nil } - st, err := b.stPool.Get(envUUID) + st, err := b.stPool.Get(modelUUID) if err != nil { return nil, errors.Trace(err) } @@ -1194,7 +1193,7 @@ } // Release implements the Backing interface. -func (b *allEnvWatcherStateBacking) Release() error { +func (b *allModelWatcherStateBacking) Release() error { err := b.stPool.Close() return errors.Trace(err) } === modified file 'src/github.com/juju/juju/state/allwatcher_internal_test.go' --- src/github.com/juju/juju/state/allwatcher_internal_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/allwatcher_internal_test.go 2016-03-22 15:18:22 +0000 @@ -14,7 +14,7 @@ jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/constraints" "github.com/juju/juju/instance" @@ -50,11 +50,11 @@ func (s *allWatcherBaseSuite) newState(c *gc.C) *State { s.envCount++ - cfg := testing.CustomEnvironConfig(c, testing.Attrs{ + cfg := testing.CustomModelConfig(c, testing.Attrs{ "name": fmt.Sprintf("testenv%d", s.envCount), "uuid": utils.MustNewUUID().String(), }) - _, st, err := s.state.NewEnvironment(cfg, s.owner) + _, st, err := s.state.NewModel(cfg, s.owner) c.Assert(err, jc.ErrorIsNil) s.AddCleanup(func(*gc.C) { st.Close() }) return st @@ -64,7 +64,7 @@ // we can check that they all get pulled in by // all(Env)WatcherStateBacking.GetAll. func (s *allWatcherBaseSuite) setUpScenario(c *gc.C, st *State, units int) (entities entityInfoSlice) { - envUUID := st.EnvironUUID() + modelUUID := st.ModelUUID() add := func(e multiwatcher.EntityInfo) { entities = append(entities, e) } @@ -81,7 +81,7 @@ err = m.SetProviderAddresses(network.NewAddress("example.com")) c.Assert(err, jc.ErrorIsNil) add(&multiwatcher.MachineInfo{ - EnvUUID: envUUID, + ModelUUID: modelUUID, Id: "0", InstanceId: "i-machine-0", Status: multiwatcher.Status("pending"), @@ -104,7 +104,7 @@ c.Assert(err, jc.ErrorIsNil) setServiceConfigAttr(c, wordpress, "blog-title", "boring") add(&multiwatcher.ServiceInfo{ - EnvUUID: envUUID, + ModelUUID: modelUUID, Name: "wordpress", Exposed: true, CharmURL: serviceCharmURL(wordpress).String(), @@ -124,14 +124,14 @@ err = st.SetAnnotations(wordpress, pairs) c.Assert(err, jc.ErrorIsNil) add(&multiwatcher.AnnotationInfo{ - EnvUUID: envUUID, + ModelUUID: modelUUID, Tag: "service-wordpress", Annotations: pairs, }) logging := AddTestingService(c, st, "logging", AddTestingCharm(c, st, "logging"), s.owner) add(&multiwatcher.ServiceInfo{ - EnvUUID: envUUID, + ModelUUID: modelUUID, Name: "logging", CharmURL: serviceCharmURL(logging).String(), OwnerTag: s.owner.String(), @@ -150,9 +150,9 @@ rel, err := st.AddRelation(eps...) c.Assert(err, jc.ErrorIsNil) add(&multiwatcher.RelationInfo{ - EnvUUID: envUUID, - Key: "logging:logging-directory wordpress:logging-dir", - Id: rel.Id(), + ModelUUID: modelUUID, + Key: "logging:logging-directory wordpress:logging-dir", + Id: rel.Id(), Endpoints: []multiwatcher.Endpoint{ {ServiceName: "logging", Relation: charm.Relation{Name: "logging-directory", Role: "requirer", Interface: "logging", Optional: false, Limit: 1, Scope: "container"}}, {ServiceName: "wordpress", Relation: charm.Relation{Name: "logging-dir", Role: "provider", Interface: "logging", Optional: false, Limit: 0, Scope: "container"}}}, @@ -168,7 +168,7 @@ c.Assert(m.Tag().String(), gc.Equals, fmt.Sprintf("machine-%d", i+1)) add(&multiwatcher.UnitInfo{ - EnvUUID: envUUID, + ModelUUID: modelUUID, Name: fmt.Sprintf("wordpress/%d", i), Service: wordpress.Name(), Series: m.Series(), @@ -192,7 +192,7 @@ err = st.SetAnnotations(wu, pairs) c.Assert(err, jc.ErrorIsNil) add(&multiwatcher.AnnotationInfo{ - EnvUUID: envUUID, + ModelUUID: modelUUID, Tag: fmt.Sprintf("unit-wordpress-%d", i), Annotations: pairs, }) @@ -204,7 +204,7 @@ hc, err := m.HardwareCharacteristics() c.Assert(err, jc.ErrorIsNil) add(&multiwatcher.MachineInfo{ - EnvUUID: envUUID, + ModelUUID: modelUUID, Id: fmt.Sprint(i + 1), InstanceId: "i-" + m.Tag().String(), Status: multiwatcher.Status("error"), @@ -240,7 +240,7 @@ c.Assert(ok, jc.IsTrue) c.Assert(deployer, gc.Equals, names.NewUnitTag(fmt.Sprintf("wordpress/%d", i))) add(&multiwatcher.UnitInfo{ - EnvUUID: envUUID, + ModelUUID: modelUUID, Name: fmt.Sprintf("logging/%d", i), Service: "logging", Series: "quantal", @@ -280,12 +280,12 @@ } func (s *allWatcherStateSuite) TestGetAllMultiEnv(c *gc.C) { - // Set up 2 environments and ensure that GetAll returns the - // entities for the first environment with no errors. + // Set up 2 models and ensure that GetAll returns the + // entities for the first model with no errors. expectEntities := s.setUpScenario(c, s.state, 2) // Use more units in the second env to ensure the number of - // entities will mismatch if environment filtering isn't in place. + // entities will mismatch if model filtering isn't in place. s.setUpScenario(c, s.newState(c), 4) s.checkGetAll(c, expectEntities) @@ -448,22 +448,22 @@ return changeTestCase{ about: "no change if block is not in backing", initialContents: []multiwatcher.EntityInfo{&multiwatcher.BlockInfo{ - EnvUUID: st.EnvironUUID(), - Id: blockId, - Type: blockType, - Message: blockMsg, - Tag: st.EnvironTag().String(), + ModelUUID: st.ModelUUID(), + Id: blockId, + Type: blockType, + Message: blockMsg, + Tag: st.ModelTag().String(), }}, change: watcher.Change{ C: blocksC, Id: st.localID(blockId), }, expectContents: []multiwatcher.EntityInfo{&multiwatcher.BlockInfo{ - EnvUUID: st.EnvironUUID(), - Id: blockId, - Type: blockType, - Message: blockMsg, - Tag: st.EnvironTag().String(), + ModelUUID: st.ModelUUID(), + Id: blockId, + Type: blockType, + Message: blockMsg, + Tag: st.ModelTag().String(), }}, } }, @@ -483,11 +483,11 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.BlockInfo{ - EnvUUID: st.EnvironUUID(), - Id: st.localID(blockId), - Type: b.Type().ToParams(), - Message: b.Message(), - Tag: st.EnvironTag().String(), + ModelUUID: st.ModelUUID(), + Id: st.localID(blockId), + Type: b.Type().ToParams(), + Message: b.Message(), + Tag: st.ModelTag().String(), }}} }, func(c *gc.C, st *State) changeTestCase { @@ -513,7 +513,7 @@ func (s *allWatcherStateSuite) TestClosingPorts(c *gc.C) { defer s.Reset(c) - // Init the test environment. + // Init the test model. wordpress := AddTestingService(c, s.state, "wordpress", AddTestingCharm(c, s.state, "wordpress"), s.owner) u, err := wordpress.AddUnit() c.Assert(err, jc.ErrorIsNil) @@ -531,8 +531,8 @@ b := newAllWatcherStateBacking(s.state) all := newStore() all.Update(&multiwatcher.MachineInfo{ - EnvUUID: s.state.EnvironUUID(), - Id: "0", + ModelUUID: s.state.ModelUUID(), + Id: "0", }) // Check opened ports. err = b.Changed(all, watcher.Change{ @@ -544,7 +544,7 @@ substNilSinceTimeForEntities(c, entities) assertEntitiesEqual(c, entities, []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: s.state.EnvironUUID(), + ModelUUID: s.state.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Series: "quantal", @@ -566,8 +566,8 @@ }, }, &multiwatcher.MachineInfo{ - EnvUUID: s.state.EnvironUUID(), - Id: "0", + ModelUUID: s.state.ModelUUID(), + Id: "0", }, }) // Close the ports. @@ -581,7 +581,7 @@ entities = all.All() assertEntitiesEqual(c, entities, []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: s.state.EnvironUUID(), + ModelUUID: s.state.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Series: "quantal", @@ -603,15 +603,15 @@ }, }, &multiwatcher.MachineInfo{ - EnvUUID: s.state.EnvironUUID(), - Id: "0", + ModelUUID: s.state.ModelUUID(), + Id: "0", }, }) } func (s *allWatcherStateSuite) TestSettings(c *gc.C) { defer s.Reset(c) - // Init the test environment. + // Init the test model. svc := AddTestingService(c, s.state, "dummy-service", AddTestingCharm(c, s.state, "dummy"), s.owner) b := newAllWatcherStateBacking(s.state) all := newStore() @@ -619,9 +619,9 @@ setServiceConfigAttr(c, svc, "username", "foo") setServiceConfigAttr(c, svc, "outlook", "foo@bar") all.Update(&multiwatcher.ServiceInfo{ - EnvUUID: s.state.EnvironUUID(), - Name: "dummy-service", - CharmURL: "local:quantal/quantal-dummy-1", + ModelUUID: s.state.ModelUUID(), + Name: "dummy-service", + CharmURL: "local:quantal/quantal-dummy-1", }) err := b.Changed(all, watcher.Change{ C: "settings", @@ -632,10 +632,10 @@ substNilSinceTimeForEntities(c, entities) assertEntitiesEqual(c, entities, []multiwatcher.EntityInfo{ &multiwatcher.ServiceInfo{ - EnvUUID: s.state.EnvironUUID(), - Name: "dummy-service", - CharmURL: "local:quantal/quantal-dummy-1", - Config: charm.Settings{"outlook": "foo@bar", "username": "foo"}, + ModelUUID: s.state.ModelUUID(), + Name: "dummy-service", + CharmURL: "local:quantal/quantal-dummy-1", + Config: charm.Settings{"outlook": "foo@bar", "username": "foo"}, }, }) // 2nd scenario part: destroy the service and signal change. @@ -649,9 +649,9 @@ entities = all.All() assertEntitiesEqual(c, entities, []multiwatcher.EntityInfo{ &multiwatcher.ServiceInfo{ - EnvUUID: s.state.EnvironUUID(), - Name: "dummy-service", - CharmURL: "local:quantal/quantal-dummy-1", + ModelUUID: s.state.ModelUUID(), + Name: "dummy-service", + CharmURL: "local:quantal/quantal-dummy-1", }, }) } @@ -660,7 +660,7 @@ // with the state-based backing. Most of the logic is tested elsewhere - // this just tests end-to-end. func (s *allWatcherStateSuite) TestStateWatcher(c *gc.C) { - m0, err := s.state.AddMachine("trusty", JobManageEnviron) + m0, err := s.state.AddMachine("trusty", JobManageModel) c.Assert(err, jc.ErrorIsNil) c.Assert(m0.Id(), gc.Equals, "0") @@ -675,20 +675,20 @@ deltas := tw.All(2) checkDeltasEqual(c, deltas, []multiwatcher.Delta{{ Entity: &multiwatcher.MachineInfo{ - EnvUUID: s.state.EnvironUUID(), + ModelUUID: s.state.ModelUUID(), Id: "0", Status: multiwatcher.Status("pending"), StatusData: map[string]interface{}{}, Life: multiwatcher.Life("alive"), Series: "trusty", - Jobs: []multiwatcher.MachineJob{JobManageEnviron.ToParams()}, + Jobs: []multiwatcher.MachineJob{JobManageModel.ToParams()}, Addresses: []network.Address{}, HasVote: false, WantsVote: true, }, }, { Entity: &multiwatcher.MachineInfo{ - EnvUUID: s.state.EnvironUUID(), + ModelUUID: s.state.ModelUUID(), Id: "1", Status: multiwatcher.Status("pending"), StatusData: map[string]interface{}{}, @@ -709,7 +709,7 @@ zeroOutTimestampsForDeltas(c, deltas) checkDeltasEqual(c, deltas, []multiwatcher.Delta{{ Entity: &multiwatcher.MachineInfo{ - EnvUUID: s.state.EnvironUUID(), + ModelUUID: s.state.ModelUUID(), Id: "1", Status: multiwatcher.Status("pending"), StatusData: map[string]interface{}{}, @@ -729,7 +729,7 @@ zeroOutTimestampsForDeltas(c, deltas) checkDeltasEqual(c, deltas, []multiwatcher.Delta{{ Entity: &multiwatcher.MachineInfo{ - EnvUUID: s.state.EnvironUUID(), + ModelUUID: s.state.ModelUUID(), Id: "1", Status: multiwatcher.Status("pending"), StatusData: map[string]interface{}{}, @@ -772,14 +772,14 @@ checkDeltasEqual(c, deltas, []multiwatcher.Delta{{ Entity: &multiwatcher.MachineInfo{ - EnvUUID: s.state.EnvironUUID(), + ModelUUID: s.state.ModelUUID(), Id: "0", InstanceId: "i-0", Status: multiwatcher.Status("pending"), StatusData: map[string]interface{}{}, Life: multiwatcher.Life("alive"), Series: "trusty", - Jobs: []multiwatcher.MachineJob{JobManageEnviron.ToParams()}, + Jobs: []multiwatcher.MachineJob{JobManageModel.ToParams()}, Addresses: []network.Address{}, HardwareCharacteristics: hc, HasVote: false, @@ -788,12 +788,12 @@ }, { Removed: true, Entity: &multiwatcher.MachineInfo{ - EnvUUID: s.state.EnvironUUID(), - Id: "1", + ModelUUID: s.state.ModelUUID(), + Id: "1", }, }, { Entity: &multiwatcher.MachineInfo{ - EnvUUID: s.state.EnvironUUID(), + ModelUUID: s.state.ModelUUID(), Id: "2", Status: multiwatcher.Status("pending"), StatusData: map[string]interface{}{}, @@ -806,12 +806,12 @@ }, }, { Entity: &multiwatcher.ServiceInfo{ - EnvUUID: s.state.EnvironUUID(), - Name: "wordpress", - CharmURL: "local:quantal/quantal-wordpress-3", - OwnerTag: s.owner.String(), - Life: "alive", - Config: make(map[string]interface{}), + ModelUUID: s.state.ModelUUID(), + Name: "wordpress", + CharmURL: "local:quantal/quantal-wordpress-3", + OwnerTag: s.owner.String(), + Life: "alive", + Config: make(map[string]interface{}), Status: multiwatcher.StatusInfo{ Current: "unknown", Message: "Waiting for agent initialization to finish", @@ -820,7 +820,7 @@ }, }, { Entity: &multiwatcher.UnitInfo{ - EnvUUID: s.state.EnvironUUID(), + ModelUUID: s.state.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Series: "quantal", @@ -841,7 +841,7 @@ }}) } -func (s *allWatcherStateSuite) TestStateWatcherTwoEnvironments(c *gc.C) { +func (s *allWatcherStateSuite) TestStateWatcherTwoModels(c *gc.C) { loggo.GetLogger("juju.state.watcher").SetLogLevel(loggo.TRACE) for i, test := range []struct { about string @@ -955,7 +955,7 @@ c.Logf("Test %d: %s", i, test.about) func() { checkIsolationForEnv := func(st *State, w, otherW *testWatcher) { - c.Logf("Making changes to environment %s", st.EnvironUUID()) + c.Logf("Making changes to model %s", st.ModelUUID()) if test.setUpState != nil { test.setUpState(st) @@ -985,25 +985,25 @@ } } -var _ = gc.Suite(&allEnvWatcherStateSuite{}) +var _ = gc.Suite(&allModelWatcherStateSuite{}) -type allEnvWatcherStateSuite struct { +type allModelWatcherStateSuite struct { allWatcherBaseSuite state1 *State } -func (s *allEnvWatcherStateSuite) SetUpTest(c *gc.C) { +func (s *allModelWatcherStateSuite) SetUpTest(c *gc.C) { s.allWatcherBaseSuite.SetUpTest(c) s.state1 = s.newState(c) } -func (s *allEnvWatcherStateSuite) Reset(c *gc.C) { +func (s *allModelWatcherStateSuite) Reset(c *gc.C) { s.TearDownTest(c) s.SetUpTest(c) } // performChangeTestCases runs a passed number of test cases for changes. -func (s *allEnvWatcherStateSuite) performChangeTestCases(c *gc.C, changeTestFuncs []changeTestFunc) { +func (s *allModelWatcherStateSuite) performChangeTestCases(c *gc.C, changeTestFuncs []changeTestFunc) { for i, changeTestFunc := range changeTestFuncs { func() { // in aid of per-loop defers defer s.Reset(c) @@ -1011,7 +1011,7 @@ test0 := changeTestFunc(c, s.state) c.Logf("test %d. %s", i, test0.about) - b := newAllEnvWatcherStateBacking(s.state) + b := NewAllModelWatcherStateBacking(s.state) defer b.Release() all := newStore() @@ -1040,7 +1040,7 @@ // build a list of the entities for the second env. newEntities := make([]multiwatcher.EntityInfo, 0) for _, entity := range entities { - if entity.EntityId().EnvUUID == s.state1.EnvironUUID() { + if entity.EntityId().ModelUUID == s.state1.ModelUUID() { newEntities = append(newEntities, entity) } } @@ -1057,98 +1057,98 @@ } } -func (s *allEnvWatcherStateSuite) TestChangeAnnotations(c *gc.C) { +func (s *allModelWatcherStateSuite) TestChangeAnnotations(c *gc.C) { testChangeAnnotations(c, s.performChangeTestCases) } -func (s *allEnvWatcherStateSuite) TestChangeMachines(c *gc.C) { +func (s *allModelWatcherStateSuite) TestChangeMachines(c *gc.C) { testChangeMachines(c, s.performChangeTestCases) } -func (s *allEnvWatcherStateSuite) TestChangeRelations(c *gc.C) { +func (s *allModelWatcherStateSuite) TestChangeRelations(c *gc.C) { testChangeRelations(c, s.owner, s.performChangeTestCases) } -func (s *allEnvWatcherStateSuite) TestChangeServices(c *gc.C) { +func (s *allModelWatcherStateSuite) TestChangeServices(c *gc.C) { testChangeServices(c, s.owner, s.performChangeTestCases) } -func (s *allEnvWatcherStateSuite) TestChangeServicesConstraints(c *gc.C) { +func (s *allModelWatcherStateSuite) TestChangeServicesConstraints(c *gc.C) { testChangeServicesConstraints(c, s.owner, s.performChangeTestCases) } -func (s *allEnvWatcherStateSuite) TestChangeUnits(c *gc.C) { +func (s *allModelWatcherStateSuite) TestChangeUnits(c *gc.C) { testChangeUnits(c, s.owner, s.performChangeTestCases) } -func (s *allEnvWatcherStateSuite) TestChangeUnitsNonNilPorts(c *gc.C) { +func (s *allModelWatcherStateSuite) TestChangeUnitsNonNilPorts(c *gc.C) { testChangeUnitsNonNilPorts(c, s.owner, s.performChangeTestCases) } -func (s *allEnvWatcherStateSuite) TestChangeEnvironments(c *gc.C) { +func (s *allModelWatcherStateSuite) TestChangeModels(c *gc.C) { changeTestFuncs := []changeTestFunc{ func(c *gc.C, st *State) changeTestCase { return changeTestCase{ - about: "no environment in state -> do nothing", + about: "no model in state -> do nothing", change: watcher.Change{ - C: "environments", + C: "models", Id: "non-existing-uuid", }} }, func(c *gc.C, st *State) changeTestCase { return changeTestCase{ - about: "environment is removed if it's not in backing", - initialContents: []multiwatcher.EntityInfo{&multiwatcher.EnvironmentInfo{ - EnvUUID: "some-uuid", + about: "model is removed if it's not in backing", + initialContents: []multiwatcher.EntityInfo{&multiwatcher.ModelInfo{ + ModelUUID: "some-uuid", }}, change: watcher.Change{ - C: "environments", + C: "models", Id: "some-uuid", }} }, func(c *gc.C, st *State) changeTestCase { - env, err := st.Environment() + env, err := st.Model() c.Assert(err, jc.ErrorIsNil) return changeTestCase{ - about: "environment is added if it's in backing but not in Store", + about: "model is added if it's in backing but not in Store", change: watcher.Change{ - C: "environments", - Id: st.EnvironUUID(), + C: "models", + Id: st.ModelUUID(), }, expectContents: []multiwatcher.EntityInfo{ - &multiwatcher.EnvironmentInfo{ - EnvUUID: env.UUID(), + &multiwatcher.ModelInfo{ + ModelUUID: env.UUID(), Name: env.Name(), Life: multiwatcher.Life("alive"), Owner: env.Owner().Id(), - ServerUUID: env.ServerUUID(), + ServerUUID: env.ControllerUUID(), }}} }, func(c *gc.C, st *State) changeTestCase { - env, err := st.Environment() + env, err := st.Model() c.Assert(err, jc.ErrorIsNil) return changeTestCase{ - about: "environment is updated if it's in backing and in Store", + about: "model is updated if it's in backing and in Store", initialContents: []multiwatcher.EntityInfo{ - &multiwatcher.EnvironmentInfo{ - EnvUUID: env.UUID(), + &multiwatcher.ModelInfo{ + ModelUUID: env.UUID(), Name: "", Life: multiwatcher.Life("alive"), Owner: env.Owner().Id(), - ServerUUID: env.ServerUUID(), + ServerUUID: env.ControllerUUID(), }, }, change: watcher.Change{ - C: "environments", + C: "models", Id: env.UUID(), }, expectContents: []multiwatcher.EntityInfo{ - &multiwatcher.EnvironmentInfo{ - EnvUUID: env.UUID(), + &multiwatcher.ModelInfo{ + ModelUUID: env.UUID(), Name: env.Name(), Life: multiwatcher.Life("alive"), Owner: env.Owner().Id(), - ServerUUID: env.ServerUUID(), + ServerUUID: env.ControllerUUID(), }}} }, func(c *gc.C, st *State) changeTestCase { @@ -1159,7 +1159,7 @@ return changeTestCase{ about: "status is changed if the service exists in the store", initialContents: []multiwatcher.EntityInfo{&multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress", Constraints: constraints.MustParse("mem=99M cpu-cores=2 cpu-power=4"), }}, @@ -1169,7 +1169,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress", Constraints: constraints.MustParse("mem=4G arch=amd64"), }}} @@ -1178,25 +1178,25 @@ s.performChangeTestCases(c, changeTestFuncs) } -func (s *allEnvWatcherStateSuite) TestChangeForDeadEnv(c *gc.C) { +func (s *allModelWatcherStateSuite) TestChangeForDeadEnv(c *gc.C) { // Ensure an entity is removed when a change is seen but - // the environment the entity belonged to has already died. + // the model the entity belonged to has already died. - b := newAllEnvWatcherStateBacking(s.state) + b := NewAllModelWatcherStateBacking(s.state) defer b.Release() all := newStore() - // Insert a machine for an environment that doesn't actually + // Insert a machine for an model that doesn't actually // exist (mimics env removal). all.Update(&multiwatcher.MachineInfo{ - EnvUUID: "uuid", - Id: "0", + ModelUUID: "uuid", + Id: "0", }) c.Assert(all.All(), gc.HasLen, 1) err := b.Changed(all, watcher.Change{ C: "machines", - Id: ensureEnvUUID("uuid", "0"), + Id: ensureModelUUID("uuid", "0"), }) c.Assert(err, jc.ErrorIsNil) @@ -1204,36 +1204,36 @@ c.Assert(all.All(), gc.HasLen, 0) } -func (s *allEnvWatcherStateSuite) TestGetAll(c *gc.C) { - // Set up 2 environments and ensure that GetAll returns the +func (s *allModelWatcherStateSuite) TestGetAll(c *gc.C) { + // Set up 2 models and ensure that GetAll returns the // entities for both of them. entities0 := s.setUpScenario(c, s.state, 2) entities1 := s.setUpScenario(c, s.state1, 4) expectedEntities := append(entities0, entities1...) - // allEnvWatcherStateBacking also watches environments so add those in. - env, err := s.state.Environment() + // allModelWatcherStateBacking also watches models so add those in. + env, err := s.state.Model() c.Assert(err, jc.ErrorIsNil) - env1, err := s.state1.Environment() + env1, err := s.state1.Model() c.Assert(err, jc.ErrorIsNil) expectedEntities = append(expectedEntities, - &multiwatcher.EnvironmentInfo{ - EnvUUID: env.UUID(), + &multiwatcher.ModelInfo{ + ModelUUID: env.UUID(), Name: env.Name(), Life: multiwatcher.Life("alive"), Owner: env.Owner().Id(), - ServerUUID: env.ServerUUID(), + ServerUUID: env.ControllerUUID(), }, - &multiwatcher.EnvironmentInfo{ - EnvUUID: env1.UUID(), + &multiwatcher.ModelInfo{ + ModelUUID: env1.UUID(), Name: env1.Name(), Life: multiwatcher.Life("alive"), Owner: env1.Owner().Id(), - ServerUUID: env1.ServerUUID(), + ServerUUID: env1.ControllerUUID(), }, ) - b := newAllEnvWatcherStateBacking(s.state) + b := NewAllModelWatcherStateBacking(s.state) all := newStore() err = b.GetAll(all) c.Assert(err, jc.ErrorIsNil) @@ -1245,19 +1245,19 @@ } // TestStateWatcher tests the integration of the state watcher with -// allEnvWatcherStateBacking. Most of the logic is comprehensively +// allModelWatcherStateBacking. Most of the logic is comprehensively // tested elsewhere - this just tests end-to-end. -func (s *allEnvWatcherStateSuite) TestStateWatcher(c *gc.C) { +func (s *allModelWatcherStateSuite) TestStateWatcher(c *gc.C) { st0 := s.state - env0, err := st0.Environment() + env0, err := st0.Model() c.Assert(err, jc.ErrorIsNil) st1 := s.state1 - env1, err := st1.Environment() + env1, err := st1.Model() c.Assert(err, jc.ErrorIsNil) - // Create some initial machines across 2 environments - m00, err := st0.AddMachine("trusty", JobManageEnviron) + // Create some initial machines across 2 models + m00, err := st0.AddMachine("trusty", JobManageModel) c.Assert(err, jc.ErrorIsNil) c.Assert(m00.Id(), gc.Equals, "0") @@ -1265,44 +1265,44 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(m10.Id(), gc.Equals, "0") - tw := newTestAllEnvWatcher(st0, c) + tw := newTestAllModelWatcher(st0, c) defer tw.Stop() - // Expect to see events for the already created environments and + // Expect to see events for the already created models and // machines first. deltas := tw.All(4) checkDeltasEqual(c, deltas, []multiwatcher.Delta{{ - Entity: &multiwatcher.EnvironmentInfo{ - EnvUUID: env0.UUID(), + Entity: &multiwatcher.ModelInfo{ + ModelUUID: env0.UUID(), Name: env0.Name(), Life: "alive", Owner: env0.Owner().Id(), - ServerUUID: env0.ServerUUID(), + ServerUUID: env0.ControllerUUID(), }, }, { - Entity: &multiwatcher.EnvironmentInfo{ - EnvUUID: env1.UUID(), + Entity: &multiwatcher.ModelInfo{ + ModelUUID: env1.UUID(), Name: env1.Name(), Life: "alive", Owner: env1.Owner().Id(), - ServerUUID: env1.ServerUUID(), + ServerUUID: env1.ControllerUUID(), }, }, { Entity: &multiwatcher.MachineInfo{ - EnvUUID: st0.EnvironUUID(), + ModelUUID: st0.ModelUUID(), Id: "0", Status: multiwatcher.Status("pending"), StatusData: map[string]interface{}{}, Life: multiwatcher.Life("alive"), Series: "trusty", - Jobs: []multiwatcher.MachineJob{JobManageEnviron.ToParams()}, + Jobs: []multiwatcher.MachineJob{JobManageModel.ToParams()}, Addresses: []network.Address{}, HasVote: false, WantsVote: true, }, }, { Entity: &multiwatcher.MachineInfo{ - EnvUUID: st1.EnvironUUID(), + ModelUUID: st1.ModelUUID(), Id: "0", Status: multiwatcher.Status("pending"), StatusData: map[string]interface{}{}, @@ -1323,7 +1323,7 @@ zeroOutTimestampsForDeltas(c, deltas) checkDeltasEqual(c, deltas, []multiwatcher.Delta{{ Entity: &multiwatcher.MachineInfo{ - EnvUUID: st1.EnvironUUID(), + ModelUUID: st1.ModelUUID(), Id: "0", Status: multiwatcher.Status("pending"), StatusData: map[string]interface{}{}, @@ -1343,7 +1343,7 @@ zeroOutTimestampsForDeltas(c, deltas) checkDeltasEqual(c, deltas, []multiwatcher.Delta{{ Entity: &multiwatcher.MachineInfo{ - EnvUUID: st1.EnvironUUID(), + ModelUUID: st1.ModelUUID(), Id: "0", Status: multiwatcher.Status("pending"), StatusData: map[string]interface{}{}, @@ -1357,7 +1357,7 @@ }}) // Make further changes to the state, including the addition of a - // new environment. + // new model. err = m00.SetProvisioned("i-0", "bootstrap_nonce", nil) c.Assert(err, jc.ErrorIsNil) @@ -1375,7 +1375,7 @@ c.Assert(err, jc.ErrorIsNil) st2 := s.newState(c) - env2, err := st2.Environment() + env2, err := st2.Model() c.Assert(err, jc.ErrorIsNil) m20, err := st2.AddMachine("trusty", JobHostUnits) @@ -1388,14 +1388,14 @@ checkDeltasEqual(c, deltas, []multiwatcher.Delta{{ Entity: &multiwatcher.MachineInfo{ - EnvUUID: st0.EnvironUUID(), + ModelUUID: st0.ModelUUID(), Id: "0", InstanceId: "i-0", Status: multiwatcher.Status("pending"), StatusData: map[string]interface{}{}, Life: multiwatcher.Life("alive"), Series: "trusty", - Jobs: []multiwatcher.MachineJob{JobManageEnviron.ToParams()}, + Jobs: []multiwatcher.MachineJob{JobManageModel.ToParams()}, Addresses: []network.Address{}, HardwareCharacteristics: &instance.HardwareCharacteristics{}, HasVote: false, @@ -1404,12 +1404,12 @@ }, { Removed: true, Entity: &multiwatcher.MachineInfo{ - EnvUUID: st1.EnvironUUID(), - Id: "0", + ModelUUID: st1.ModelUUID(), + Id: "0", }, }, { Entity: &multiwatcher.MachineInfo{ - EnvUUID: st1.EnvironUUID(), + ModelUUID: st1.ModelUUID(), Id: "1", Status: multiwatcher.Status("pending"), StatusData: map[string]interface{}{}, @@ -1422,12 +1422,12 @@ }, }, { Entity: &multiwatcher.ServiceInfo{ - EnvUUID: st1.EnvironUUID(), - Name: "wordpress", - CharmURL: "local:quantal/quantal-wordpress-3", - OwnerTag: s.owner.String(), - Life: "alive", - Config: make(map[string]interface{}), + ModelUUID: st1.ModelUUID(), + Name: "wordpress", + CharmURL: "local:quantal/quantal-wordpress-3", + OwnerTag: s.owner.String(), + Life: "alive", + Config: make(map[string]interface{}), Status: multiwatcher.StatusInfo{ Current: "unknown", Message: "Waiting for agent initialization to finish", @@ -1436,7 +1436,7 @@ }, }, { Entity: &multiwatcher.UnitInfo{ - EnvUUID: st1.EnvironUUID(), + ModelUUID: st1.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Series: "quantal", @@ -1455,16 +1455,16 @@ }, }, }, { - Entity: &multiwatcher.EnvironmentInfo{ - EnvUUID: env2.UUID(), + Entity: &multiwatcher.ModelInfo{ + ModelUUID: env2.UUID(), Name: env2.Name(), Life: "alive", Owner: env2.Owner().Id(), - ServerUUID: env2.ServerUUID(), + ServerUUID: env2.ControllerUUID(), }, }, { Entity: &multiwatcher.MachineInfo{ - EnvUUID: st2.EnvironUUID(), + ModelUUID: st2.ModelUUID(), Id: "0", Status: multiwatcher.Status("pending"), StatusData: map[string]interface{}{}, @@ -1493,7 +1493,7 @@ } // The testChange* funcs are extracted so the test cases can be used -// to test both the allWatcher and allEnvWatcher. +// to test both the allWatcher and allModelWatcher. func testChangeAnnotations(c *gc.C, runChangeTests func(*gc.C, []changeTestFunc)) { changeTestFuncs := []changeTestFunc{ @@ -1509,8 +1509,8 @@ return changeTestCase{ about: "annotation is removed if it's not in backing", initialContents: []multiwatcher.EntityInfo{&multiwatcher.AnnotationInfo{ - EnvUUID: st.EnvironUUID(), - Tag: "machine-0", + ModelUUID: st.ModelUUID(), + Tag: "machine-0", }}, change: watcher.Change{ C: "annotations", @@ -1531,7 +1531,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.AnnotationInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Tag: "machine-0", Annotations: map[string]string{"foo": "bar", "arble": "baz"}, }}} @@ -1549,8 +1549,8 @@ return changeTestCase{ about: "annotation is updated if it's in backing and in multiwatcher.Store", initialContents: []multiwatcher.EntityInfo{&multiwatcher.AnnotationInfo{ - EnvUUID: st.EnvironUUID(), - Tag: "machine-0", + ModelUUID: st.ModelUUID(), + Tag: "machine-0", Annotations: map[string]string{ "arble": "baz", "foo": "bar", @@ -1563,8 +1563,8 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.AnnotationInfo{ - EnvUUID: st.EnvironUUID(), - Tag: "machine-0", + ModelUUID: st.ModelUUID(), + Tag: "machine-0", Annotations: map[string]string{ "arble": "khroomph", "new": "attr", @@ -1596,8 +1596,8 @@ return changeTestCase{ about: "machine is removed if it's not in backing", initialContents: []multiwatcher.EntityInfo{&multiwatcher.MachineInfo{ - EnvUUID: st.EnvironUUID(), - Id: "1", + ModelUUID: st.ModelUUID(), + Id: "1", }}, change: watcher.Change{ C: "machines", @@ -1618,7 +1618,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.MachineInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Id: "0", Status: multiwatcher.Status("error"), StatusInfo: "failure", @@ -1643,7 +1643,7 @@ about: "machine is updated if it's in backing and in Store", initialContents: []multiwatcher.EntityInfo{ &multiwatcher.MachineInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Id: "0", Status: multiwatcher.Status("error"), StatusInfo: "another failure", @@ -1656,7 +1656,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.MachineInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Id: "0", InstanceId: "i-0", Status: multiwatcher.Status("error"), @@ -1675,7 +1675,7 @@ return changeTestCase{ about: "no change if status is not in backing", initialContents: []multiwatcher.EntityInfo{&multiwatcher.MachineInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Id: "0", Status: multiwatcher.Status("error"), StatusInfo: "failure", @@ -1687,7 +1687,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.MachineInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Id: "0", Status: multiwatcher.Status("error"), StatusInfo: "failure", @@ -1703,7 +1703,7 @@ return changeTestCase{ about: "status is changed if the machine exists in the store", initialContents: []multiwatcher.EntityInfo{&multiwatcher.MachineInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Id: "0", Status: multiwatcher.Status("error"), StatusInfo: "failure", @@ -1715,7 +1715,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.MachineInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Id: "0", Status: multiwatcher.Status("started"), StatusData: make(map[string]interface{}), @@ -1739,8 +1739,8 @@ return changeTestCase{ about: "relation is removed if it's not in backing", initialContents: []multiwatcher.EntityInfo{&multiwatcher.RelationInfo{ - EnvUUID: st.EnvironUUID(), - Key: "logging:logging-directory wordpress:logging-dir", + ModelUUID: st.ModelUUID(), + Key: "logging:logging-directory wordpress:logging-dir", }}, change: watcher.Change{ C: "relations", @@ -1763,8 +1763,8 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.RelationInfo{ - EnvUUID: st.EnvironUUID(), - Key: "logging:logging-directory wordpress:logging-dir", + ModelUUID: st.ModelUUID(), + Key: "logging:logging-directory wordpress:logging-dir", Endpoints: []multiwatcher.Endpoint{ {ServiceName: "logging", Relation: charm.Relation{Name: "logging-directory", Role: "requirer", Interface: "logging", Optional: false, Limit: 1, Scope: "container"}}, {ServiceName: "wordpress", Relation: charm.Relation{Name: "logging-dir", Role: "provider", Interface: "logging", Optional: false, Limit: 0, Scope: "container"}}}, @@ -1791,8 +1791,8 @@ about: "service is removed if it's not in backing", initialContents: []multiwatcher.EntityInfo{ &multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), - Name: "wordpress", + ModelUUID: st.ModelUUID(), + Name: "wordpress", }, }, change: watcher.Change{ @@ -1815,14 +1815,14 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), - Name: "wordpress", - Exposed: true, - CharmURL: "local:quantal/quantal-wordpress-3", - OwnerTag: owner.String(), - Life: multiwatcher.Life("alive"), - MinUnits: 42, - Config: charm.Settings{}, + ModelUUID: st.ModelUUID(), + Name: "wordpress", + Exposed: true, + CharmURL: "local:quantal/quantal-wordpress-3", + OwnerTag: owner.String(), + Life: multiwatcher.Life("alive"), + MinUnits: 42, + Config: charm.Settings{}, Status: multiwatcher.StatusInfo{ Current: "unknown", Message: "Waiting for agent initialization to finish", @@ -1837,7 +1837,7 @@ return changeTestCase{ about: "service is updated if it's in backing and in multiwatcher.Store", initialContents: []multiwatcher.EntityInfo{&multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress", Exposed: true, CharmURL: "local:quantal/quantal-wordpress-3", @@ -1851,7 +1851,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress", CharmURL: "local:quantal/quantal-wordpress-3", OwnerTag: owner.String(), @@ -1867,8 +1867,8 @@ return changeTestCase{ about: "service re-reads config when charm URL changes", initialContents: []multiwatcher.EntityInfo{&multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), - Name: "wordpress", + ModelUUID: st.ModelUUID(), + Name: "wordpress", // Note: CharmURL has a different revision number from // the wordpress revision in the testing repo. CharmURL: "local:quantal/quantal-wordpress-2", @@ -1880,12 +1880,12 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), - Name: "wordpress", - CharmURL: "local:quantal/quantal-wordpress-3", - OwnerTag: owner.String(), - Life: multiwatcher.Life("alive"), - Config: charm.Settings{"blog-title": "boring"}, + ModelUUID: st.ModelUUID(), + Name: "wordpress", + CharmURL: "local:quantal/quantal-wordpress-3", + OwnerTag: owner.String(), + Life: multiwatcher.Life("alive"), + Config: charm.Settings{"blog-title": "boring"}, }}} }, // Settings. @@ -1901,18 +1901,18 @@ return changeTestCase{ about: "no change if service is not in backing", initialContents: []multiwatcher.EntityInfo{&multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), - Name: "dummy-service", - CharmURL: "local:quantal/quantal-dummy-1", + ModelUUID: st.ModelUUID(), + Name: "dummy-service", + CharmURL: "local:quantal/quantal-dummy-1", }}, change: watcher.Change{ C: "settings", Id: st.docID("s#dummy-service#local:quantal/quantal-dummy-1"), }, expectContents: []multiwatcher.EntityInfo{&multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), - Name: "dummy-service", - CharmURL: "local:quantal/quantal-dummy-1", + ModelUUID: st.ModelUUID(), + Name: "dummy-service", + CharmURL: "local:quantal/quantal-dummy-1", }}} }, func(c *gc.C, st *State) changeTestCase { @@ -1923,9 +1923,9 @@ return changeTestCase{ about: "service config is changed if service exists in the store with the same URL", initialContents: []multiwatcher.EntityInfo{&multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), - Name: "dummy-service", - CharmURL: "local:quantal/quantal-dummy-1", + ModelUUID: st.ModelUUID(), + Name: "dummy-service", + CharmURL: "local:quantal/quantal-dummy-1", }}, change: watcher.Change{ C: "settings", @@ -1933,10 +1933,10 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), - Name: "dummy-service", - CharmURL: "local:quantal/quantal-dummy-1", - Config: charm.Settings{"username": "foo", "outlook": "foo@bar"}, + ModelUUID: st.ModelUUID(), + Name: "dummy-service", + CharmURL: "local:quantal/quantal-dummy-1", + Config: charm.Settings{"username": "foo", "outlook": "foo@bar"}, }}} }, func(c *gc.C, st *State) changeTestCase { @@ -1948,10 +1948,10 @@ return changeTestCase{ about: "service config is changed after removing of a setting", initialContents: []multiwatcher.EntityInfo{&multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), - Name: "dummy-service", - CharmURL: "local:quantal/quantal-dummy-1", - Config: charm.Settings{"username": "foo", "outlook": "foo@bar"}, + ModelUUID: st.ModelUUID(), + Name: "dummy-service", + CharmURL: "local:quantal/quantal-dummy-1", + Config: charm.Settings{"username": "foo", "outlook": "foo@bar"}, }}, change: watcher.Change{ C: "settings", @@ -1959,10 +1959,10 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), - Name: "dummy-service", - CharmURL: "local:quantal/quantal-dummy-1", - Config: charm.Settings{"outlook": "foo@bar"}, + ModelUUID: st.ModelUUID(), + Name: "dummy-service", + CharmURL: "local:quantal/quantal-dummy-1", + Config: charm.Settings{"outlook": "foo@bar"}, }}} }, func(c *gc.C, st *State) changeTestCase { @@ -1976,10 +1976,10 @@ return changeTestCase{ about: "service config is unescaped when reading from the backing store", initialContents: []multiwatcher.EntityInfo{&multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), - Name: "dummy-service", - CharmURL: "local:quantal/quantal-dummy-1", - Config: charm.Settings{"key.dotted": "bar"}, + ModelUUID: st.ModelUUID(), + Name: "dummy-service", + CharmURL: "local:quantal/quantal-dummy-1", + Config: charm.Settings{"key.dotted": "bar"}, }}, change: watcher.Change{ C: "settings", @@ -1987,10 +1987,10 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), - Name: "dummy-service", - CharmURL: "local:quantal/quantal-dummy-1", - Config: charm.Settings{"key.dotted": "foo"}, + ModelUUID: st.ModelUUID(), + Name: "dummy-service", + CharmURL: "local:quantal/quantal-dummy-1", + Config: charm.Settings{"key.dotted": "foo"}, }}} }, func(c *gc.C, st *State) changeTestCase { @@ -2000,10 +2000,10 @@ return changeTestCase{ about: "service config is unchanged if service exists in the store with a different URL", initialContents: []multiwatcher.EntityInfo{&multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), - Name: "dummy-service", - CharmURL: "local:quantal/quantal-dummy-2", // Note different revno. - Config: charm.Settings{"username": "bar"}, + ModelUUID: st.ModelUUID(), + Name: "dummy-service", + CharmURL: "local:quantal/quantal-dummy-2", // Note different revno. + Config: charm.Settings{"username": "bar"}, }}, change: watcher.Change{ C: "settings", @@ -2011,10 +2011,10 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), - Name: "dummy-service", - CharmURL: "local:quantal/quantal-dummy-2", - Config: charm.Settings{"username": "bar"}, + ModelUUID: st.ModelUUID(), + Name: "dummy-service", + CharmURL: "local:quantal/quantal-dummy-2", + Config: charm.Settings{"username": "bar"}, }}} }, func(c *gc.C, st *State) changeTestCase { @@ -2051,7 +2051,7 @@ return changeTestCase{ about: "no change if service is not in backing", initialContents: []multiwatcher.EntityInfo{&multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress", Constraints: constraints.MustParse("mem=99M"), }}, @@ -2060,7 +2060,7 @@ Id: st.docID("s#wordpress"), }, expectContents: []multiwatcher.EntityInfo{&multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress", Constraints: constraints.MustParse("mem=99M"), }}} @@ -2073,7 +2073,7 @@ return changeTestCase{ about: "status is changed if the service exists in the store", initialContents: []multiwatcher.EntityInfo{&multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress", Constraints: constraints.MustParse("mem=99M cpu-cores=2 cpu-power=4"), }}, @@ -2083,7 +2083,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress", Constraints: constraints.MustParse("mem=4G arch=amd64"), }}} @@ -2108,8 +2108,8 @@ about: "unit is removed if it's not in backing", initialContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), - Name: "wordpress/1", + ModelUUID: st.ModelUUID(), + Name: "wordpress/1", }, }, change: watcher.Change{ @@ -2142,7 +2142,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Series: "quantal", @@ -2189,7 +2189,7 @@ return changeTestCase{ about: "unit is updated if it's in backing and in multiwatcher.Store", initialContents: []multiwatcher.EntityInfo{&multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Status: multiwatcher.Status("error"), StatusInfo: "another failure", @@ -2215,7 +2215,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Series: "quantal", @@ -2252,12 +2252,12 @@ about: "unit info is updated if a port is opened on the machine it is placed in", initialContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", StatusData: map[string]interface{}{}, }, &multiwatcher.MachineInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Id: "0", StatusData: map[string]interface{}{}, }, @@ -2268,14 +2268,14 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Ports: []network.Port{{"tcp", 4242}}, PortRanges: []network.PortRange{{4242, 4242, "tcp"}}, StatusData: map[string]interface{}{}, }, &multiwatcher.MachineInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Id: "0", StatusData: map[string]interface{}{}, }, @@ -2296,8 +2296,8 @@ about: "unit is created if a port is opened on the machine it is placed in", initialContents: []multiwatcher.EntityInfo{ &multiwatcher.MachineInfo{ - EnvUUID: st.EnvironUUID(), - Id: "0", + ModelUUID: st.ModelUUID(), + Id: "0", }, }, change: watcher.Change{ @@ -2306,7 +2306,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Series: "quantal", @@ -2326,8 +2326,8 @@ PortRanges: []network.PortRange{{21, 22, "tcp"}}, }, &multiwatcher.MachineInfo{ - EnvUUID: st.EnvironUUID(), - Id: "0", + ModelUUID: st.ModelUUID(), + Id: "0", }, }} }, @@ -2356,7 +2356,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Series: "quantal", @@ -2392,7 +2392,7 @@ return changeTestCase{ about: "no change if status is not in backing", initialContents: []multiwatcher.EntityInfo{&multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Status: multiwatcher.Status("error"), @@ -2417,7 +2417,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Status: multiwatcher.Status("error"), @@ -2445,7 +2445,7 @@ return changeTestCase{ about: "status is changed if the unit exists in the store", initialContents: []multiwatcher.EntityInfo{&multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Status: multiwatcher.Status("started"), @@ -2470,7 +2470,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Status: multiwatcher.Status("started"), @@ -2500,7 +2500,7 @@ return changeTestCase{ about: "unit status is changed if the agent comes off error state", initialContents: []multiwatcher.EntityInfo{&multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Status: multiwatcher.Status("error"), @@ -2524,7 +2524,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Status: multiwatcher.Status("started"), @@ -2556,10 +2556,10 @@ return changeTestCase{ about: "status is changed with additional status data", initialContents: []multiwatcher.EntityInfo{&multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), - Name: "wordpress/0", - Service: "wordpress", - Status: multiwatcher.Status("started"), + ModelUUID: st.ModelUUID(), + Name: "wordpress/0", + Service: "wordpress", + Status: multiwatcher.Status("started"), AgentStatus: multiwatcher.StatusInfo{ Current: "idle", Message: "", @@ -2577,7 +2577,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Status: multiwatcher.Status("error"), @@ -2614,10 +2614,10 @@ about: "service status is changed if the unit status changes", initialContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), - Name: "wordpress/0", - Service: "wordpress", - Status: multiwatcher.Status("error"), + ModelUUID: st.ModelUUID(), + Name: "wordpress/0", + Service: "wordpress", + Status: multiwatcher.Status("error"), AgentStatus: multiwatcher.StatusInfo{ Current: "idle", Message: "", @@ -2632,8 +2632,8 @@ }, }, &multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), - Name: "wordpress", + ModelUUID: st.ModelUUID(), + Name: "wordpress", Status: multiwatcher.StatusInfo{ Current: "error", Message: "failure", @@ -2648,7 +2648,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Status: multiwatcher.Status("started"), @@ -2665,8 +2665,8 @@ }, }, &multiwatcher.ServiceInfo{ - EnvUUID: st.EnvironUUID(), - Name: "wordpress", + ModelUUID: st.ModelUUID(), + Name: "wordpress", Status: multiwatcher.StatusInfo{ Current: "active", Message: "", @@ -2733,7 +2733,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Series: "quantal", @@ -2765,7 +2765,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Series: "quantal", @@ -2799,7 +2799,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Series: "quantal", @@ -2833,7 +2833,7 @@ }, expectContents: []multiwatcher.EntityInfo{ &multiwatcher.UnitInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: "wordpress/0", Service: "wordpress", Series: "quantal", @@ -2861,8 +2861,8 @@ return newTestWatcher(newAllWatcherStateBacking(st), st, c) } -func newTestAllEnvWatcher(st *State, c *gc.C) *testWatcher { - return newTestWatcher(newAllEnvWatcherStateBacking(st), st, c) +func newTestAllModelWatcher(st *State, c *gc.C) *testWatcher { + return newTestWatcher(NewAllModelWatcherStateBacking(st), st, c) } type testWatcher struct { @@ -2977,8 +2977,8 @@ if id0.Kind != id1.Kind { return id0.Kind < id1.Kind } - if id0.EnvUUID != id1.EnvUUID { - return id0.EnvUUID < id1.EnvUUID + if id0.ModelUUID != id1.ModelUUID { + return id0.ModelUUID < id1.ModelUUID } return id0.Id < id1.Id } @@ -3004,7 +3004,7 @@ func makeActionInfo(a *Action, st *State) multiwatcher.ActionInfo { results, message := a.Results() return multiwatcher.ActionInfo{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Id: a.Id(), Receiver: a.Receiver(), Name: a.Name(), === modified file 'src/github.com/juju/juju/state/annotations.go' --- src/github.com/juju/juju/state/annotations.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/annotations.go 2016-03-22 15:18:22 +0000 @@ -20,7 +20,7 @@ // Annotations/Annotation below. // Note also the correspondence with AnnotationInfo in apiserver/params. type annotatorDoc struct { - EnvUUID string `bson:"env-uuid"` + ModelUUID string `bson:"model-uuid"` GlobalKey string `bson:"globalkey"` Tag string `bson:"tag"` Annotations map[string]string `bson:"annotations"` @@ -41,10 +41,10 @@ return fmt.Errorf("invalid key %q", key) } if value == "" { - toRemove["annotations."+key] = true + toRemove[key] = true } else { toInsert[key] = value - toUpdate["annotations."+key] = value + toUpdate[key] = value } } // Set up and call the necessary transactions - if the document does not @@ -111,19 +111,18 @@ }} switch tag := tag.(type) { - case names.EnvironTag: - env, err := st.GetEnvironment(tag) + case names.ModelTag: + env, err := st.GetModel(tag) if err != nil { return nil, errors.Annotatef(err, "inserting annotations") } - if env.UUID() == env.doc.ServerUUID { - // This is a state server environment, and - // cannot be removed. Ergo, we can skip the - // existence check below. + if env.UUID() == env.ControllerUUID() { + // This is the controller model, and cannot be removed. + // Ergo, we can skip the existence check below. return ops, nil } } - // If the entity is not the state server environment, add a DocExists check on the + // If the entity is not the controller model, add a DocExists check on the // entity document, in order to avoid possible races between entity // removal and annotation creation. coll, id, err := st.tagToCollectionAndId(tag) @@ -143,7 +142,7 @@ C: annotationsC, Id: st.docID(entity.globalKey()), Assert: txn.DocExists, - Update: setUnsetUpdate(toUpdate, toRemove), + Update: setUnsetUpdateAnnotations(toUpdate, toRemove), }} } @@ -156,3 +155,21 @@ Remove: true, } } + +// setUnsetUpdateAnnotations returns a bson.D for use +// in an annotationsC txn.Op's Update field, containing $set and +// $unset operators if the corresponding operands +// are non-empty. +func setUnsetUpdateAnnotations(set, unset bson.M) bson.D { + var update bson.D + replace := inSubdocReplacer("annotations") + if len(set) > 0 { + set = bson.M(copyMap(map[string]interface{}(set), replace)) + update = append(update, bson.DocElem{"$set", set}) + } + if len(unset) > 0 { + unset = bson.M(copyMap(map[string]interface{}(unset), replace)) + update = append(update, bson.DocElem{"$unset", unset}) + } + return update +} === modified file 'src/github.com/juju/juju/state/annotations_test.go' --- src/github.com/juju/juju/state/annotations_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/annotations_test.go 2016-03-22 15:18:22 +0000 @@ -161,25 +161,25 @@ c.Assert(err, jc.ErrorIsNil) err = st.Close() c.Assert(err, jc.ErrorIsNil) - err = state.RemoveEnvironment(s.State, st.EnvironUUID()) + err = state.RemoveModel(s.State, st.ModelUUID()) c.Assert(err, jc.ErrorIsNil) expected = "fail" annts[key] = expected err = s.State.SetAnnotations(env, annts) - c.Assert(errors.Cause(err), gc.ErrorMatches, ".*environment not found.*") + c.Assert(errors.Cause(err), gc.ErrorMatches, ".*model not found.*") c.Assert(err, gc.ErrorMatches, ".*cannot update annotations.*") } -func (s *AnnotationsEnvSuite) createTestEnv(c *gc.C) (*state.Environment, *state.State) { +func (s *AnnotationsEnvSuite) createTestEnv(c *gc.C) (*state.Model, *state.State) { uuid, err := utils.NewUUID() c.Assert(err, jc.ErrorIsNil) - cfg := testing.CustomEnvironConfig(c, testing.Attrs{ + cfg := testing.CustomModelConfig(c, testing.Attrs{ "name": "testing", "uuid": uuid.String(), }) owner := names.NewUserTag("test@remote") - env, st, err := s.State.NewEnvironment(cfg, owner) + env, st, err := s.State.NewModel(cfg, owner) c.Assert(err, jc.ErrorIsNil) return env, st } === modified file 'src/github.com/juju/juju/state/assign_test.go' --- src/github.com/juju/juju/state/assign_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/assign_test.go 2016-03-22 15:18:22 +0000 @@ -227,10 +227,10 @@ err := s.wordpress.SetConstraints(scons) c.Assert(err, jc.ErrorIsNil) econs := constraints.MustParse("mem=4G cpu-cores=2") - err = s.State.SetEnvironConstraints(econs) + err = s.State.SetModelConstraints(econs) c.Assert(err, jc.ErrorIsNil) - // Machine will take environment constraints on creation. + // Machine will take model constraints on creation. machine, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) @@ -283,6 +283,16 @@ testWhenDying(c, machine, expect, expect, assignTest) } +func (s *AssignSuite) TestAssignMachineDifferentSeries(c *gc.C) { + machine, err := s.State.AddMachine("trusty", state.JobHostUnits) + c.Assert(err, jc.ErrorIsNil) + unit, err := s.wordpress.AddUnit() + c.Assert(err, jc.ErrorIsNil) + err = unit.AssignToMachine(machine) + c.Assert(err, gc.ErrorMatches, + `cannot assign unit "wordpress/0" to machine 0: series does not match`) +} + func (s *AssignSuite) TestPrincipals(c *gc.C) { machine, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) @@ -390,7 +400,7 @@ func (s *AssignSuite) TestAssignUnitToNewMachineDefaultContainerConstraint(c *gc.C) { // Set up env constraints. econs := constraints.MustParse("container=lxc") - err := s.State.SetEnvironConstraints(econs) + err := s.State.SetModelConstraints(econs) c.Assert(err, jc.ErrorIsNil) s.assertAssignUnitToNewMachineContainerConstraint(c) } @@ -414,7 +424,7 @@ err := s.wordpress.SetConstraints(scons) c.Assert(err, jc.ErrorIsNil) econs := constraints.MustParse("mem=4G cpu-cores=2") - err = s.State.SetEnvironConstraints(econs) + err = s.State.SetModelConstraints(econs) c.Assert(err, jc.ErrorIsNil) // Unit will take combined service/environ constraints on creation. @@ -426,7 +436,7 @@ err = s.wordpress.SetConstraints(scons) c.Assert(err, jc.ErrorIsNil) econs = constraints.MustParse("cpu-cores=4") - err = s.State.SetEnvironConstraints(econs) + err = s.State.SetModelConstraints(econs) c.Assert(err, jc.ErrorIsNil) // The new machine takes the original combined unit constraints. @@ -506,12 +516,12 @@ } func (s *AssignSuite) TestAssignUnitToNewMachineBecomesDirty(c *gc.C) { - _, err := s.State.AddMachine("quantal", state.JobManageEnviron) // bootstrap machine + _, err := s.State.AddMachine("quantal", state.JobManageModel) // bootstrap machine c.Assert(err, jc.ErrorIsNil) // Set up constraints to specify we want to install into a container. econs := constraints.MustParse("container=lxc") - err = s.State.SetEnvironConstraints(econs) + err = s.State.SetModelConstraints(econs) c.Assert(err, jc.ErrorIsNil) // Create some units and a clean machine. @@ -539,12 +549,12 @@ } func (s *AssignSuite) TestAssignUnitToNewMachineBecomesHost(c *gc.C) { - _, err := s.State.AddMachine("quantal", state.JobManageEnviron) // bootstrap machine + _, err := s.State.AddMachine("quantal", state.JobManageModel) // bootstrap machine c.Assert(err, jc.ErrorIsNil) // Set up constraints to specify we want to install into a container. econs := constraints.MustParse("container=lxc") - err = s.State.SetEnvironConstraints(econs) + err = s.State.SetModelConstraints(econs) c.Assert(err, jc.ErrorIsNil) // Create a unit and a clean machine. @@ -584,7 +594,7 @@ } func (s *AssignSuite) TestAssignUnitLocalPolicy(c *gc.C) { - m, err := s.State.AddMachine("quantal", state.JobManageEnviron, state.JobHostUnits) // bootstrap machine + m, err := s.State.AddMachine("quantal", state.JobManageModel, state.JobHostUnits) // bootstrap machine c.Assert(err, jc.ErrorIsNil) unit, err := s.wordpress.AddUnit() c.Assert(err, jc.ErrorIsNil) @@ -650,13 +660,13 @@ c.Assert(err, jc.ErrorIsNil) // Set up env constraints. econs := constraints.MustParse("container=lxc") - err = s.State.SetEnvironConstraints(econs) + err = s.State.SetModelConstraints(econs) c.Assert(err, jc.ErrorIsNil) s.assertAssignUnitNewPolicyWithContainerConstraint(c) } func (s *AssignSuite) TestAssignUnitWithSubordinate(c *gc.C) { - _, err := s.State.AddMachine("quantal", state.JobManageEnviron) // bootstrap machine + _, err := s.State.AddMachine("quantal", state.JobManageModel) // bootstrap machine c.Assert(err, jc.ErrorIsNil) unit, err := s.wordpress.AddUnit() c.Assert(err, jc.ErrorIsNil) @@ -727,7 +737,7 @@ // setupMachines creates a combination of machines with which to test. func (s *assignCleanSuite) setupMachines(c *gc.C) (hostMachine *state.Machine, container *state.Machine, cleanEmptyMachine *state.Machine) { - _, err := s.State.AddMachine("quantal", state.JobManageEnviron) // bootstrap machine + _, err := s.State.AddMachine("quantal", state.JobManageModel) // bootstrap machine c.Assert(err, jc.ErrorIsNil) // Add some units to another service and allocate them to machines @@ -814,7 +824,7 @@ // Add a state management machine which can host units and check it is not chosen. // Note that this must the first machine added, as AddMachine can only // be used to add state-manager machines for the bootstrap machine. - m, err = s.State.AddMachine("quantal", state.JobManageEnviron, state.JobHostUnits) + m, err = s.State.AddMachine("quantal", state.JobManageModel, state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) m, err = s.assignUnit(unit) c.Assert(m, gc.IsNil) @@ -830,7 +840,7 @@ c.Assert(err, gc.ErrorMatches, eligibleMachinesInUse) // Add two environ manager machines and check they are not chosen. - changes, err := s.State.EnsureAvailability(3, constraints.Value{}, "quantal", nil) + changes, err := s.State.EnableHA(3, constraints.Value{}, "quantal", nil) c.Assert(err, jc.ErrorIsNil) c.Assert(changes.Added, gc.HasLen, 3) @@ -938,7 +948,7 @@ for i, t := range assignUsingConstraintsTests { c.Logf("test %d", i) cons := constraints.MustParse(t.unitConstraints) - err := s.State.SetEnvironConstraints(cons) + err := s.State.SetModelConstraints(cons) c.Assert(err, jc.ErrorIsNil) unit, err := s.wordpress.AddUnit() @@ -967,7 +977,7 @@ } func (s *assignCleanSuite) TestAssignUnitWithRemovedService(c *gc.C) { - _, err := s.State.AddMachine("quantal", state.JobManageEnviron) // bootstrap machine + _, err := s.State.AddMachine("quantal", state.JobManageModel) // bootstrap machine c.Assert(err, jc.ErrorIsNil) unit, err := s.wordpress.AddUnit() c.Assert(err, jc.ErrorIsNil) @@ -983,7 +993,7 @@ } func (s *assignCleanSuite) TestAssignUnitToMachineWithRemovedUnit(c *gc.C) { - _, err := s.State.AddMachine("quantal", state.JobManageEnviron) // bootstrap machine + _, err := s.State.AddMachine("quantal", state.JobManageModel) // bootstrap machine c.Assert(err, jc.ErrorIsNil) unit, err := s.wordpress.AddUnit() c.Assert(err, jc.ErrorIsNil) @@ -1123,7 +1133,7 @@ } func (s *assignCleanSuite) TestAssignUnitPolicy(c *gc.C) { - _, err := s.State.AddMachine("quantal", state.JobManageEnviron) // bootstrap machine + _, err := s.State.AddMachine("quantal", state.JobManageModel) // bootstrap machine c.Assert(err, jc.ErrorIsNil) // Check unassigned placements with no clean and/or empty machines. @@ -1203,7 +1213,7 @@ } func (s *assignCleanSuite) TestAssignUnitPolicyWithContainers(c *gc.C) { - _, err := s.State.AddMachine("quantal", state.JobManageEnviron) // bootstrap machine + _, err := s.State.AddMachine("quantal", state.JobManageModel) // bootstrap machine c.Assert(err, jc.ErrorIsNil) // Create a machine and add a new container. @@ -1220,7 +1230,7 @@ // Set up constraints to specify we want to install into a container. econs := constraints.MustParse("container=lxc") - err = s.State.SetEnvironConstraints(econs) + err = s.State.SetModelConstraints(econs) c.Assert(err, jc.ErrorIsNil) // Check the first placement goes into the newly created, clean container above. @@ -1270,7 +1280,7 @@ } func (s *assignCleanSuite) TestAssignUnitPolicyConcurrently(c *gc.C) { - _, err := s.State.AddMachine("quantal", state.JobManageEnviron) // bootstrap machine + _, err := s.State.AddMachine("quantal", state.JobManageModel) // bootstrap machine c.Assert(err, jc.ErrorIsNil) us := make([]*state.Unit, 50) for i := range us { === modified file 'src/github.com/juju/juju/state/backups/backups.go' --- src/github.com/juju/juju/state/backups/backups.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/backups/backups.go 2016-03-22 15:18:22 +0000 @@ -21,8 +21,8 @@ Note that state (and juju as a whole) currently does not have a persistence layer abstraction to facilitate separating different persistence needs and implementations. As a consequence, state's -data, whether about how an environment should look or about existing -resources within an environment, is dumped essentially straight into +data, whether about how an model should look or about existing +resources within an model, is dumped essentially straight into State's mongo connection. The code in the state package does not make any distinction between the two (nor does the package clearly distinguish between state-related abstractions and state-related @@ -174,7 +174,7 @@ return meta.ID(), nil } -// Get retrieves the associated metadata and archive file from environment storage. +// Get retrieves the associated metadata and archive file from model storage. func (b *backups) Get(id string) (*Metadata, io.ReadCloser, error) { rawmeta, archiveFile, err := b.storage.Get(id) if err != nil { === modified file 'src/github.com/juju/juju/state/backups/backups_linux.go' --- src/github.com/juju/juju/state/backups/backups_linux.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/backups/backups_linux.go 2016-03-22 15:18:22 +0000 @@ -43,15 +43,18 @@ return errors.Errorf("agent config has no state serving info") } + // TODO(perrito666) determine mongo version from dump. err := mongo.EnsureServiceInstalled(agentConfig.DataDir(), - agentConfig.Value(agent.Namespace), si.StatePort, oplogSize, - numaCtlPolicy) + numaCtlPolicy, + mongo.Mongo24, + true, + ) return errors.Annotate(err, "cannot ensure that mongo service start/stop scripts are in place") } -// Restore handles either returning or creating a state server to a backed up status: +// Restore handles either returning or creating a controller to a backed up status: // * extracts the content of the given backup file and: // * runs mongorestore with the backed up mongo dump // * updates and writes configuration files @@ -76,7 +79,7 @@ version := meta.Origin.Version backupMachine := names.NewMachineTag(meta.Origin.Machine) - if err := mongo.StopService(agent.Namespace); err != nil { + if err := mongo.StopService(); err != nil { return nil, errors.Annotate(err, "cannot stop mongo to replace files") } @@ -165,13 +168,13 @@ return nil, errors.Annotate(err, "cannot update mongo entries") } - // From here we work with the restored state server + // From here we work with the restored controller mgoInfo, ok := agentConfig.MongoInfo() if !ok { return nil, errors.Errorf("cannot retrieve info to connect to mongo") } - st, err := newStateConnection(agentConfig.Environment(), mgoInfo) + st, err := newStateConnection(agentConfig.Model(), mgoInfo) if err != nil { return nil, errors.Trace(err) } @@ -187,7 +190,7 @@ return nil, errors.Annotate(err, "cannot update api server machine addresses") } - // update all agents known to the new state server. + // update all agents known to the new controller. // TODO(perrito666): We should never stop process because of this. // updateAllMachines will not return errors for individual // agent update failures === modified file 'src/github.com/juju/juju/state/backups/backups_test.go' --- src/github.com/juju/juju/state/backups/backups_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/state/backups/backups_test.go 2016-03-22 15:18:22 +0000 @@ -94,7 +94,7 @@ targets := set.NewStrings("juju", "admin") dbInfo := backups.DBInfo{"a", "b", "c", targets} meta := backupstesting.NewMetadataStarted() - backupstesting.SetOrigin(meta, "", "", "") + backupstesting.SetOrigin(meta, "", "", "") meta.Notes = "some notes" err := s.api.Create(meta, &paths, &dbInfo) @@ -116,7 +116,7 @@ c.Check(meta.Size(), gc.Equals, int64(10)) c.Check(meta.Checksum(), gc.Equals, "") c.Check(meta.Stored().Unix(), gc.Equals, stored.Unix()) - c.Check(meta.Origin.Environment, gc.Equals, "") + c.Check(meta.Origin.Model, gc.Equals, "") c.Check(meta.Origin.Machine, gc.Equals, "") c.Check(meta.Origin.Hostname, gc.Equals, "") c.Check(meta.Notes, gc.Equals, "some notes") === modified file 'src/github.com/juju/juju/state/backups/db.go' --- src/github.com/juju/juju/state/backups/db.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/backups/db.go 2016-03-22 15:18:22 +0000 @@ -16,7 +16,6 @@ "github.com/juju/juju/juju/paths" "github.com/juju/juju/mongo" "github.com/juju/juju/state/imagestorage" - "github.com/juju/juju/utils" "github.com/juju/juju/version" ) @@ -28,7 +27,7 @@ // low-level details publicly. Thus the backups implementation remains // oblivious to the underlying DB implementation. -var runCommand = utils.RunCommand +var runCommandFn = runCommand // DBInfo wraps all the DB-specific information backups needs to dump // the database. This includes a simplification of the information in @@ -97,7 +96,8 @@ } var getMongodumpPath = func() (string, error) { - mongod, err := mongo.Path() + // TODO(perrito666) Add automagic determination of version here + mongod, err := mongo.Path(mongo.Mongo24) if err != nil { return "", errors.Annotate(err, "failed to get mongod path") } @@ -152,7 +152,7 @@ func (md *mongoDumper) dump(dumpDir string) error { options := md.options(dumpDir) - if err := runCommand(md.binPath, options...); err != nil { + if err := runCommandFn(md.binPath, options...); err != nil { return errors.Annotate(err, "error dumping databases") } return nil @@ -219,15 +219,16 @@ // mongoRestoreArgsForVersion returns a string slice containing the args to be used // to call mongo restore since these can change depending on the backup method. -// Version 0: a dump made with --db, stopping the state server. -// Version 1: a dump made with --oplog with a running state server. +// Version 0: a dump made with --db, stopping the controller. +// Version 1: a dump made with --oplog with a running controller. // TODO (perrito666) change versions to use metadata version func mongoRestoreArgsForVersion(ver version.Number, dumpPath string) ([]string, error) { - dbDir := filepath.Join(agent.DefaultDataDir, "db") + dbDir := filepath.Join(agent.DefaultPaths.DataDir, "db") switch { case ver.Major == 1 && ver.Minor < 22: return []string{"--drop", "--journal", "--dbpath", dbDir, dumpPath}, nil - case ver.Major == 1 && ver.Minor >= 22: + case ver.Major == 1 && ver.Minor >= 22, + ver.Major == 2: return []string{"--drop", "--journal", "--oplogReplay", "--dbpath", dbDir, dumpPath}, nil default: return nil, errors.Errorf("this backup file is incompatible with the current version of juju") @@ -241,7 +242,7 @@ // and starting before dumping the new mongo db, it is mainly to easy testing // of placeNewMongo. func placeNewMongoService(newMongoDumpPath string, ver version.Number) error { - err := mongo.StopService("") + err := mongo.StopService() if err != nil { return errors.Annotate(err, "failed to stop mongo") } @@ -249,7 +250,7 @@ if err := placeNewMongo(newMongoDumpPath, ver); err != nil { return errors.Annotate(err, "cannot place new mongo") } - err = mongo.StartService("") + err = mongo.StartService() return errors.Annotate(err, "failed to start mongo") } @@ -266,7 +267,7 @@ return errors.Errorf("cannot restore this backup version") } - err = runCommand(mongoRestore, mgoRestoreArgs...) + err = runCommandFn(mongoRestore, mgoRestoreArgs...) if err != nil { return errors.Annotate(err, "failed to restore database dump") === modified file 'src/github.com/juju/juju/state/backups/db_restore_test.go' --- src/github.com/juju/juju/state/backups/db_restore_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/backups/db_restore_test.go 2016-03-22 15:18:22 +0000 @@ -21,8 +21,8 @@ testing.BaseSuite } -func (s *mongoRestoreSuite) TestMongoRestoreArgsForVersion(c *gc.C) { - dir := filepath.Join(agent.DefaultDataDir, "db") +func (s *mongoRestoreSuite) TestMongoRestoreArgsForVersion121(c *gc.C) { + dir := filepath.Join(agent.DefaultPaths.DataDir, "db") versionNumber := version.Number{} versionNumber.Major = 1 versionNumber.Minor = 21 @@ -36,24 +36,49 @@ dir, "/some/fake/path", }) +} +func (s *mongoRestoreSuite) TestMongoRestoreArgsForVersion122(c *gc.C) { + dir := filepath.Join(agent.DefaultPaths.DataDir, "db") + versionNumber := version.Number{} versionNumber.Major = 1 versionNumber.Minor = 22 - args, err = backups.MongoRestoreArgsForVersion(versionNumber, "/some/fake/path") - c.Assert(err, jc.ErrorIsNil) - c.Assert(args, gc.HasLen, 6) - c.Assert(args[0:6], jc.DeepEquals, []string{ - "--drop", - "--journal", - "--oplogReplay", - "--dbpath", - dir, - "/some/fake/path", - }) - + args, err := backups.MongoRestoreArgsForVersion(versionNumber, "/some/fake/path") + c.Assert(err, jc.ErrorIsNil) + c.Assert(args, gc.HasLen, 6) + c.Assert(args[0:6], jc.DeepEquals, []string{ + "--drop", + "--journal", + "--oplogReplay", + "--dbpath", + dir, + "/some/fake/path", + }) +} + +func (s *mongoRestoreSuite) TestMongoRestoreArgsForVersion2(c *gc.C) { + dir := filepath.Join(agent.DefaultPaths.DataDir, "db") + versionNumber := version.Number{} + versionNumber.Major = 2 + versionNumber.Minor = 0 + args, err := backups.MongoRestoreArgsForVersion(versionNumber, "/some/fake/path") + c.Assert(err, jc.ErrorIsNil) + c.Assert(args, gc.HasLen, 6) + c.Assert(args[0:6], jc.DeepEquals, []string{ + "--drop", + "--journal", + "--oplogReplay", + "--dbpath", + dir, + "/some/fake/path", + }) +} + +func (s *mongoRestoreSuite) TestMongoRestoreArgsForOldVersion(c *gc.C) { + versionNumber := version.Number{} versionNumber.Major = 0 versionNumber.Minor = 0 - _, err = backups.MongoRestoreArgsForVersion(versionNumber, "/some/fake/path") + _, err := backups.MongoRestoreArgsForVersion(versionNumber, "/some/fake/path") c.Assert(err, gc.ErrorMatches, "this backup file is incompatible with the current version of juju") } === added file 'src/github.com/juju/juju/state/backups/exec.go' --- src/github.com/juju/juju/state/backups/exec.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/state/backups/exec.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,29 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package backups + +import ( + "os/exec" + "strings" + + "github.com/juju/errors" +) + +// runCommand execs the provided command. It exists +// here so it can be overridden in export_test.go +func runCommand(cmd string, args ...string) error { + command := exec.Command(cmd, args...) + out, err := command.CombinedOutput() + if err == nil { + return nil + } + if _, ok := err.(*exec.ExitError); ok && len(out) > 0 { + return errors.Errorf( + "error executing %q: %s", + cmd, + strings.Replace(string(out), "\n", "; ", -1), + ) + } + return errors.Annotatef(err, "error executing %q", cmd) +} === modified file 'src/github.com/juju/juju/state/backups/export_test.go' --- src/github.com/juju/juju/state/backups/export_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/backups/export_test.go 2016-03-22 15:18:22 +0000 @@ -26,7 +26,7 @@ FinishMeta = &finishMeta StoreArchiveRef = &storeArchive GetMongodumpPath = &getMongodumpPath - RunCommand = &runCommand + RunCommand = &runCommandFn ReplaceableFolders = &replaceableFolders ) @@ -34,9 +34,9 @@ var _ filestorage.RawFileStorage = (*backupBlobStorage)(nil) func getBackupDBWrapper(st *state.State) *storageDBWrapper { - envUUID := st.EnvironTag().Id() + modelUUID := st.ModelTag().Id() db := st.MongoSession().DB(storageDBName) - return newStorageDBWrapper(db, storageMetaName, envUUID) + return newStorageDBWrapper(db, storageMetaName, modelUUID) } // NewBackupID creates a new backup ID based on the metadata. === modified file 'src/github.com/juju/juju/state/backups/files.go' --- src/github.com/juju/juju/state/backups/files.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/backups/files.go 2016-03-22 15:18:22 +0000 @@ -6,7 +6,6 @@ import ( "fmt" "os" - "path" "path/filepath" "sort" @@ -17,25 +16,21 @@ // Pull these from authoritative sources (see // github.com/juju/juju/juju/paths, etc.): const ( - dataDir = "/var/lib/juju" - loggingConfDir = "/etc/rsyslog.d" - logsDir = "/var/log/juju" - sshDir = "/home/ubuntu/.ssh" - - agentsDir = "agents" - agentsConfs = "machine-*" - loggingConfs = "*juju.conf" - toolsDir = "tools" - - sshIdentFile = "system-identity" - nonceFile = "nonce.txt" - allMachinesLog = "all-machines.log" - machineLog = "machine-%s.log" - authKeysFile = "authorized_keys" - - dbStartupConf = "juju-db.conf" - dbPEM = "server.pem" - dbSecret = "shared-secret" + dataDir = "/var/lib/juju" + logsDir = "/var/log/juju" + sshDir = "/home/ubuntu/.ssh" + + agentsDir = "agents" + agentsConfs = "machine-*" + toolsDir = "tools" + + sshIdentFile = "system-identity" + nonceFile = "nonce.txt" + machineLog = "machine-%s.log" + authKeysFile = "authorized_keys" + + dbPEM = "server.pem" + dbSecret = "shared-secret" ) // Paths holds the paths that backups needs. @@ -55,12 +50,6 @@ return nil, errors.Annotate(err, "failed to fetch agent config files") } - glob = filepath.Join(rootDir, loggingConfDir, loggingConfs) - jujuLogConfs, err := filepath.Glob(glob) - if err != nil { - return nil, errors.Annotate(err, "failed to fetch juju log conf files") - } - backupFiles := []string{ filepath.Join(rootDir, paths.DataDir, toolsDir), @@ -70,19 +59,7 @@ filepath.Join(rootDir, paths.DataDir, dbSecret), } backupFiles = append(backupFiles, agentConfs...) - backupFiles = append(backupFiles, jujuLogConfs...) - // Handle logs (might not exist). - // TODO(ericsnow) We should consider dropping these entirely. - allmachines := filepath.Join(rootDir, paths.LogsDir, allMachinesLog) - if _, err := os.Stat(allmachines); err != nil { - if !os.IsNotExist(err) { - return nil, errors.Trace(err) - } - logger.Errorf("skipping missing file %q", allmachines) - } else { - backupFiles = append(backupFiles, allmachines) - } // TODO(ericsnow) It might not be machine 0... machinelog := filepath.Join(rootDir, paths.LogsDir, fmt.Sprintf(machineLog, oldmachine)) if _, err := os.Stat(machinelog); err != nil { @@ -131,7 +108,6 @@ filepath.Join(dataDir, "db"), dataDir, logsDir, - path.Join(logsDir, "all-machines.log"), } { dirStat, err := os.Stat(replaceable) if err != nil { === modified file 'src/github.com/juju/juju/state/backups/files_test.go' --- src/github.com/juju/juju/state/backups/files_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/backups/files_test.go 2016-03-22 15:18:22 +0000 @@ -59,12 +59,8 @@ touch(dirname, "machine-"+machineID+".conf") dirname = mkdir(paths.LogsDir) - touch(dirname, "all-machines.log") touch(dirname, "machine-"+machineID+".log") - dirname = mkdir("/etc/rsyslog.d") - touch(dirname, "spam-juju.conf") - dirname = mkdir("/home/ubuntu/.ssh") touch(dirname, "authorized_keys") } @@ -109,7 +105,6 @@ c.Assert(err, jc.ErrorIsNil) expected := []string{ - filepath.Join(s.root, "/etc/rsyslog.d/spam-juju.conf"), filepath.Join(s.root, "/home/ubuntu/.ssh/authorized_keys"), filepath.Join(s.root, "/var/lib/juju/agents/machine-0.conf"), filepath.Join(s.root, "/var/lib/juju/nonce.txt"), @@ -117,7 +112,6 @@ filepath.Join(s.root, "/var/lib/juju/shared-secret"), filepath.Join(s.root, "/var/lib/juju/system-identity"), filepath.Join(s.root, "/var/lib/juju/tools"), - filepath.Join(s.root, "/var/log/juju/all-machines.log"), filepath.Join(s.root, "/var/log/juju/machine-0.log"), } c.Check(files, jc.SameContents, expected) @@ -192,7 +186,6 @@ c.Assert(err, jc.ErrorIsNil) expected := []string{ - filepath.Join(s.root, "/etc/rsyslog.d/spam-juju.conf"), filepath.Join(s.root, "/home/ubuntu/.ssh/authorized_keys"), filepath.Join(s.root, "/var/lib/juju/agents/machine-10.conf"), filepath.Join(s.root, "/var/lib/juju/nonce.txt"), @@ -200,7 +193,6 @@ filepath.Join(s.root, "/var/lib/juju/shared-secret"), filepath.Join(s.root, "/var/lib/juju/system-identity"), filepath.Join(s.root, "/var/lib/juju/tools"), - filepath.Join(s.root, "/var/log/juju/all-machines.log"), filepath.Join(s.root, "/var/log/juju/machine-10.log"), } c.Check(files, jc.SameContents, expected) @@ -217,7 +209,6 @@ missing := []string{ "/var/lib/juju/nonce.txt", "/home/ubuntu/.ssh/authorized_keys", - "/var/log/juju/all-machines.log", "/var/log/juju/machine-0.log", } for _, filename := range missing { @@ -229,7 +220,6 @@ c.Assert(err, jc.ErrorIsNil) expected := []string{ - filepath.Join(s.root, "/etc/rsyslog.d/spam-juju.conf"), filepath.Join(s.root, "/var/lib/juju/agents/machine-0.conf"), filepath.Join(s.root, "/var/lib/juju/server.pem"), filepath.Join(s.root, "/var/lib/juju/shared-secret"), === modified file 'src/github.com/juju/juju/state/backups/metadata.go' --- src/github.com/juju/juju/state/backups/metadata.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/state/backups/metadata.go 2016-03-22 15:18:22 +0000 @@ -28,10 +28,10 @@ // separately from Metadata due to its use as an argument when // requesting the creation of a new backup. type Origin struct { - Environment string - Machine string - Hostname string - Version version.Number + Model string + Machine string + Hostname string + Version version.Number } // UnknownString is a marker value for string fields with unknown values. @@ -43,10 +43,10 @@ // UnknownOrigin returns a new backups origin with unknown values. func UnknownOrigin() Origin { return Origin{ - Environment: UnknownString, - Machine: UnknownString, - Hostname: UnknownString, - Version: UnknownVersion, + Model: UnknownString, + Machine: UnknownString, + Hostname: UnknownString, + Version: UnknownVersion, } } @@ -71,16 +71,16 @@ FileMetadata: filestorage.NewMetadata(), Started: time.Now().UTC(), Origin: Origin{ - Version: version.Current.Number, + Version: version.Current, }, } } // NewMetadataState composes a new backup metadata with its origin -// values set. The environment UUID comes from state. The hostname is +// values set. The model UUID comes from state. The hostname is // retrieved from the OS. func NewMetadataState(db DB, machine string) (*Metadata, error) { - // hostname could be derived from the environment... + // hostname could be derived from the model... hostname, err := os.Hostname() if err != nil { // If os.Hostname() is not working, something is woefully wrong. @@ -89,7 +89,7 @@ } meta := NewMetadata() - meta.Origin.Environment = db.EnvironTag().Id() + meta.Origin.Model = db.ModelTag().Id() meta.Origin.Machine = machine meta.Origin.Hostname = hostname return meta, nil @@ -149,7 +149,7 @@ Started: m.Started, Notes: m.Notes, - Environment: m.Origin.Environment, + Environment: m.Origin.Model, Machine: m.Origin.Machine, Hostname: m.Origin.Hostname, Version: m.Origin.Version, @@ -196,10 +196,10 @@ } meta.Notes = flat.Notes meta.Origin = Origin{ - Environment: flat.Environment, - Machine: flat.Machine, - Hostname: flat.Hostname, - Version: flat.Version, + Model: flat.Environment, + Machine: flat.Machine, + Hostname: flat.Hostname, + Version: flat.Version, } return meta, nil === modified file 'src/github.com/juju/juju/state/backups/metadata_test.go' --- src/github.com/juju/juju/state/backups/metadata_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/state/backups/metadata_test.go 2016-03-22 15:18:22 +0000 @@ -26,10 +26,10 @@ func (s *metadataSuite) TestAsJSONBuffer(c *gc.C) { meta := backups.NewMetadata() meta.Origin = backups.Origin{ - Environment: "asdf-zxcv-qwe", - Machine: "0", - Hostname: "myhost", - Version: version.MustParse("1.21-alpha3"), + Model: "asdf-zxcv-qwe", + Machine: "0", + Hostname: "myhost", + Version: version.MustParse("1.21-alpha3"), } meta.Started = time.Date(2014, time.Month(9), 9, 11, 59, 34, 0, time.UTC) @@ -85,7 +85,7 @@ c.Check(meta.Started.Unix(), gc.Equals, int64(1410263974)) c.Check(meta.Finished.Unix(), gc.Equals, int64(1410264034)) c.Check(meta.Notes, gc.Equals, "") - c.Check(meta.Origin.Environment, gc.Equals, "asdf-zxcv-qwe") + c.Check(meta.Origin.Model, gc.Equals, "asdf-zxcv-qwe") c.Check(meta.Origin.Machine, gc.Equals, "0") c.Check(meta.Origin.Hostname, gc.Equals, "myhost") c.Check(meta.Origin.Version.String(), gc.Equals, "1.21-alpha3") @@ -112,7 +112,7 @@ c.Check(meta.Started.Unix(), gc.Equals, int64(time.Time{}.Unix())) c.Check(meta.Finished.Unix(), gc.Equals, finished) c.Check(meta.Notes, gc.Equals, "") - c.Check(meta.Origin.Environment, gc.Equals, backups.UnknownString) + c.Check(meta.Origin.Model, gc.Equals, backups.UnknownString) c.Check(meta.Origin.Machine, gc.Equals, backups.UnknownString) c.Check(meta.Origin.Hostname, gc.Equals, backups.UnknownString) c.Check(meta.Origin.Version.String(), gc.Equals, backups.UnknownVersion.String()) === modified file 'src/github.com/juju/juju/state/backups/restore.go' --- src/github.com/juju/juju/state/backups/restore.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/backups/restore.go 2016-03-22 15:18:22 +0000 @@ -17,6 +17,7 @@ "github.com/juju/errors" "github.com/juju/names" "github.com/juju/utils" + "github.com/juju/utils/ssh" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" @@ -26,7 +27,6 @@ "github.com/juju/juju/mongo" "github.com/juju/juju/network" "github.com/juju/juju/state" - "github.com/juju/juju/utils/ssh" "github.com/juju/juju/worker/peergrouper" ) @@ -34,7 +34,7 @@ // uses of this const, not only here but all around juju const restoreUserHome = "/home/ubuntu/" -// resetReplicaSet re-initiates replica-set using the new state server +// resetReplicaSet re-initiates replica-set using the new controller // values, this is required after a mongo restore. // In case of failure returns error. func resetReplicaSet(dialInfo *mgo.DialInfo, memberHostPort string) error { @@ -80,14 +80,14 @@ apiInfo, ok := conf.APIInfo() if ok { dialInfo.Password = apiInfo.Password - logger.Infof("using API password to access state server.") + logger.Infof("using API password to access controller.") } else { // There seems to be no way to reach this inconsistence other than making a // backup on a machine where these fields are corrupted and even so I find // no reasonable way to reach this state, yet since APIInfo has it as a // possibility I prefer to handle it, we cannot recover from this since // it would mean that the agent.conf is corrupted. - return nil, errors.New("cannot obtain password to access the state server") + return nil, errors.New("cannot obtain password to access the controller") } } return dialInfo, nil @@ -133,9 +133,9 @@ var mongoDefaultDialOpts = mongo.DefaultDialOpts var environsNewStatePolicy = environs.NewStatePolicy -// newStateConnection tries to connect to the newly restored state server. -func newStateConnection(environTag names.EnvironTag, info *mongo.MongoInfo) (*state.State, error) { - // We need to retry here to allow mongo to come up on the restored state server. +// newStateConnection tries to connect to the newly restored controller. +func newStateConnection(modelTag names.ModelTag, info *mongo.MongoInfo) (*state.State, error) { + // We need to retry here to allow mongo to come up on the restored controller. // The connection might succeed due to the mongo dial retries but there may still // be a problem issuing database commands. var ( @@ -148,7 +148,7 @@ ) attempt := utils.AttemptStrategy{Delay: newStateConnDelay, Min: newStateConnMinAttempts} for a := attempt.Start(); a.Next(); { - st, err = state.Open(environTag, info, mongoDefaultDialOpts(), environsNewStatePolicy()) + st, err = state.Open(modelTag, info, mongoDefaultDialOpts(), environsNewStatePolicy()) if err == nil { return st, nil } @@ -161,15 +161,15 @@ // in each of them. The address does not include the port. // It is too late to go back and errors in a couple of agents have // better chance of being fixed by the user, if we were to fail -// we risk an inconsistent state server because of one unresponsive +// we risk an inconsistent controller because of one unresponsive // agent, we should nevertheless return the err info to the user. func updateAllMachines(privateAddress string, machines []*state.Machine) error { var machineUpdating sync.WaitGroup for key := range machines { // key is used to have machine be scope bound to the loop iteration. machine := machines[key] - // A newly resumed state server requires no updating, and more - // than one state server is not yet supported by this code. + // A newly resumed controller requires no updating, and more + // than one controller is not yet supported by this code. if machine.IsManager() || machine.Life() == state.Dead { continue } === modified file 'src/github.com/juju/juju/state/backups/restore_test.go' --- src/github.com/juju/juju/state/backups/restore_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/backups/restore_test.go 2016-03-22 15:18:22 +0000 @@ -18,6 +18,7 @@ "github.com/juju/replicaset" gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/ssh" gc "gopkg.in/check.v1" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" @@ -28,7 +29,6 @@ "github.com/juju/juju/state" statetesting "github.com/juju/juju/state/testing" coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/utils/ssh" "github.com/juju/juju/version" ) @@ -108,7 +108,8 @@ dialInfo.Addrs = []string{mgoAddr} err = resetReplicaSet(dialInfo, mgoAddr) - session := server.MustDial() + session, err := server.Dial() + c.Assert(err, jc.ErrorIsNil) defer session.Close() cfg, err = replicaset.CurrentConfig(session) c.Assert(err, jc.ErrorIsNil) @@ -135,12 +136,12 @@ func (r *RestoreSuite) TestSetAgentAddressScript(c *gc.C) { testServerAddresses := []string{ - "FirstNewStateServerAddress:30303", - "SecondNewStateServerAddress:30304", - "ThirdNewStateServerAddress:30305", - "FourthNewStateServerAddress:30306", - "FiftNewStateServerAddress:30307", - "SixtNewStateServerAddress:30308", + "FirstNewControllerAddress:30303", + "SecondNewControllerAddress:30304", + "ThirdNewControllerAddress:30305", + "FourthNewControllerAddress:30306", + "FiftNewControllerAddress:30307", + "SixtNewControllerAddress:30308", } for _, address := range testServerAddresses { template := setAgentAddressScript(address) @@ -165,6 +166,7 @@ ` func (r *RestoreSuite) TestNewDialInfo(c *gc.C) { + cases := []struct { machineTag string apiPassword string @@ -188,6 +190,7 @@ "", }, } + dataDir := path.Join(r.cwd, "dataDir") err := os.Mkdir(dataDir, os.FileMode(0755)) c.Assert(err, jc.ErrorIsNil) @@ -195,17 +198,18 @@ logDir := path.Join(r.cwd, "logDir") err = os.Mkdir(logDir, os.FileMode(0755)) c.Assert(err, jc.ErrorIsNil) - for _, testCase := range cases { machineTag, err := names.ParseTag(testCase.machineTag) c.Assert(err, jc.ErrorIsNil) configParams := agent.AgentConfigParams{ - DataDir: dataDir, - LogDir: logDir, - UpgradedToVersion: version.Current.Number, + Paths: agent.Paths{ + DataDir: dataDir, + LogDir: logDir, + }, + UpgradedToVersion: version.Current, Tag: machineTag, - Environment: coretesting.EnvironmentTag, + Model: coretesting.ModelTag, Password: "placeholder", Nonce: "dummyNonce", StateAddresses: []string{"fakeStateAddress:1234"}, @@ -255,7 +259,8 @@ err = updateMongoEntries("1234", "0", "0", dialInfo) c.Assert(err, gc.ErrorMatches, "cannot update machine 0 instance information: not found") - session := server.MustDial() + session, err := server.Dial() + c.Assert(err, jc.ErrorIsNil) defer session.Close() err = session.DB("juju").C("machines").Insert(bson.M{"machineid": "0", "instanceid": "0"}) @@ -286,7 +291,7 @@ r.PatchValue(&mongoDefaultDialOpts, statetesting.NewDialOpts) r.PatchValue(&environsNewStatePolicy, func() state.Policy { return nil }) - st, err = newStateConnection(st.EnvironTag(), statetesting.NewMongoInfo()) + st, err = newStateConnection(st.ModelTag(), statetesting.NewMongoInfo()) c.Assert(err, jc.ErrorIsNil) c.Assert(st.Close(), jc.ErrorIsNil) } === modified file 'src/github.com/juju/juju/state/backups/storage.go' --- src/github.com/juju/juju/state/backups/storage.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/state/backups/storage.go 2016-03-22 15:18:22 +0000 @@ -8,11 +8,11 @@ "path" "time" - "github.com/juju/blobstore" "github.com/juju/errors" "github.com/juju/names" jujutxn "github.com/juju/txn" "github.com/juju/utils/filestorage" + "gopkg.in/juju/blobstore.v2" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" @@ -47,10 +47,10 @@ // origin - Environment string `bson:"environment"` - Machine string `bson:"machine"` - Hostname string `bson:"hostname"` - Version version.Number `bson:"version"` + Model string `bson:"model"` + Machine string `bson:"machine"` + Hostname string `bson:"hostname"` + Version version.Number `bson:"version"` } func (doc *storageMetaDoc) isFileInfoComplete() bool { @@ -74,8 +74,8 @@ return errors.New("missing Started") } // We don't check doc.Finished because it doesn't have to be set. - if doc.Environment == "" { - return errors.New("missing Environment") + if doc.Model == "" { + return errors.New("missing Model") } if doc.Machine == "" { return errors.New("missing Machine") @@ -121,7 +121,7 @@ meta.Started = metadocUnixToTime(doc.Started) meta.Notes = doc.Notes - meta.Origin.Environment = doc.Environment + meta.Origin.Model = doc.Model meta.Origin.Machine = doc.Machine meta.Origin.Hostname = doc.Hostname meta.Origin.Version = doc.Version @@ -171,7 +171,7 @@ } doc.Notes = meta.Notes - doc.Environment = meta.Origin.Environment + doc.Model = meta.Origin.Model doc.Machine = meta.Origin.Machine doc.Hostname = meta.Origin.Hostname doc.Version = meta.Origin.Version @@ -191,11 +191,11 @@ db *mgo.Database metaColl *mgo.Collection txnRunner jujutxn.Runner - envUUID string + modelUUID string } // newStorageDBWrapper returns a DB operator for the , with its own session. -func newStorageDBWrapper(db *mgo.Database, metaColl, envUUID string) *storageDBWrapper { +func newStorageDBWrapper(db *mgo.Database, metaColl, modelUUID string) *storageDBWrapper { session := db.Session.Copy() db = db.With(session) @@ -206,7 +206,7 @@ db: db, metaColl: coll, txnRunner: txnRunner, - envUUID: envUUID, + modelUUID: modelUUID, } return &dbWrap } @@ -269,7 +269,7 @@ // blobStorage returns a ManagedStorage matching the env storage and the blobDB. func (b *storageDBWrapper) blobStorage(blobDB string) blobstore.ManagedStorage { - dataStore := blobstore.NewGridFS(blobDB, b.envUUID, b.session) + dataStore := blobstore.NewGridFS(blobDB, b.modelUUID, b.session) return blobstore.NewManagedStorage(b.db, dataStore) } @@ -285,7 +285,7 @@ db: db, metaColl: coll, txnRunner: txnRunner, - envUUID: b.envUUID, + modelUUID: b.modelUUID, } return &dbWrap } @@ -315,15 +315,15 @@ } // newStorageID returns a new ID for a state backup. The format is the -// UTC timestamp from the metadata followed by the environment ID: -// "YYYYMMDD-hhmmss.". This makes the ID a little more human- +// UTC timestamp from the metadata followed by the model ID: +// "YYYYMMDD-hhmmss.". This makes the ID a little more human- // consumable (in contrast to a plain UUID string). Ideally we would -// use some form of environment name rather than the UUID, but for now +// use some form of model name rather than the UUID, but for now // the raw env ID is sufficient. var newStorageID = func(doc *storageMetaDoc) string { started := metadocUnixToTime(doc.Started) timestamp := started.Format(backupIDTimestamp) - return timestamp + "." + doc.Environment + return timestamp + "." + doc.Model } // addStorageMetadata stores metadata for a backup where it can be @@ -376,8 +376,8 @@ type backupsMetadataStorage struct { filestorage.MetadataDocStorage - db *mgo.Database - envUUID string + db *mgo.Database + modelUUID string } func newMetadataStorage(dbWrap *storageDBWrapper) *backupsMetadataStorage { @@ -387,7 +387,7 @@ stor := backupsMetadataStorage{ MetadataDocStorage: filestorage.MetadataDocStorage{&docStor}, db: dbWrap.db, - envUUID: dbWrap.envUUID, + modelUUID: dbWrap.modelUUID, } return &stor } @@ -454,7 +454,7 @@ // SetStored records in the metadata the fact that the file was stored. func (s *backupsMetadataStorage) SetStored(id string) error { - dbWrap := newStorageDBWrapper(s.db, storageMetaName, s.envUUID) + dbWrap := newStorageDBWrapper(s.db, storageMetaName, s.modelUUID) defer dbWrap.Close() err := setStorageStoredTime(dbWrap, id, time.Now()) @@ -469,7 +469,7 @@ type backupBlobStorage struct { dbWrap *storageDBWrapper - envUUID string + modelUUID string storeImpl blobstore.ManagedStorage root string } @@ -480,7 +480,7 @@ managed := dbWrap.blobStorage(dbWrap.db.Name) stor := backupBlobStorage{ dbWrap: dbWrap, - envUUID: dbWrap.envUUID, + modelUUID: dbWrap.modelUUID, storeImpl: managed, root: root, } @@ -489,24 +489,24 @@ func (s *backupBlobStorage) path(id string) string { // Use of path.Join instead of filepath.Join is intentional - this - // is an environment storage path not a filesystem path. + // is an model storage path not a filesystem path. return path.Join(s.root, id) } // File returns the identified file from storage. func (s *backupBlobStorage) File(id string) (io.ReadCloser, error) { - file, _, err := s.storeImpl.GetForEnvironment(s.envUUID, s.path(id)) + file, _, err := s.storeImpl.GetForBucket(s.modelUUID, s.path(id)) return file, errors.Trace(err) } // AddFile adds the file to storage. func (s *backupBlobStorage) AddFile(id string, file io.Reader, size int64) error { - return s.storeImpl.PutForEnvironment(s.envUUID, s.path(id), file, size) + return s.storeImpl.PutForBucket(s.modelUUID, s.path(id), file, size) } // RemoveFile removes the identified file from storage. func (s *backupBlobStorage) RemoveFile(id string) error { - return s.storeImpl.RemoveForEnvironment(s.envUUID, s.path(id)) + return s.storeImpl.RemoveForBucket(s.modelUUID, s.path(id)) } // Close closes the storage. @@ -529,16 +529,16 @@ // MongoSession returns the underlying mongodb session. MongoSession() *mgo.Session - // EnvironTag is the concrete environ tag for this database. - EnvironTag() names.EnvironTag + // ModelTag is the concrete model tag for this database. + ModelTag() names.ModelTag } // NewStorage returns a new FileStorage to use for storing backup // archives (and metadata). func NewStorage(st DB) filestorage.FileStorage { - envUUID := st.EnvironTag().Id() + modelUUID := st.ModelTag().Id() db := st.MongoSession().DB(storageDBName) - dbWrap := newStorageDBWrapper(db, storageMetaName, envUUID) + dbWrap := newStorageDBWrapper(db, storageMetaName, modelUUID) defer dbWrap.Close() files := newFileStorage(dbWrap, backupStorageRoot) === modified file 'src/github.com/juju/juju/state/backups/storage_test.go' --- src/github.com/juju/juju/state/backups/storage_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/backups/storage_test.go 2016-03-22 15:18:22 +0000 @@ -52,7 +52,7 @@ func (s *storageSuite) metadata(c *gc.C) *backups.Metadata { meta := backups.NewMetadata() - meta.Origin.Environment = s.State.EnvironUUID() + meta.Origin.Model = s.State.ModelUUID() meta.Origin.Machine = "0" meta.Origin.Hostname = "localhost" err := meta.MarkComplete(int64(42), "some hash") @@ -70,7 +70,7 @@ c.Check(meta.Checksum(), gc.Equals, expected.Checksum()) c.Check(meta.ChecksumFormat(), gc.Equals, expected.ChecksumFormat()) c.Check(meta.Size(), gc.Equals, expected.Size()) - c.Check(meta.Origin.Environment, gc.Equals, expected.Origin.Environment) + c.Check(meta.Origin.Model, gc.Equals, expected.Origin.Model) c.Check(meta.Origin.Machine, gc.Equals, expected.Origin.Machine) c.Check(meta.Origin.Hostname, gc.Equals, expected.Origin.Hostname) c.Check(meta.Origin.Version, gc.Equals, expected.Origin.Version) @@ -83,7 +83,7 @@ func (s *storageSuite) TestNewStorageID(c *gc.C) { meta := s.metadata(c) - meta.Origin.Environment = "spam" + meta.Origin.Model = "spam" meta.Started = time.Date(2014, time.Month(9), 12, 13, 19, 27, 0, time.UTC) id := backups.NewBackupID(meta) === modified file 'src/github.com/juju/juju/state/backups/testing/metadata.go' --- src/github.com/juju/juju/state/backups/testing/metadata.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/state/backups/testing/metadata.go 2016-03-22 15:18:22 +0000 @@ -30,7 +30,7 @@ func NewMetadataStarted() *backups.Metadata { meta := backups.NewMetadata() meta.Started = meta.Started.Truncate(time.Second) - meta.Origin.Environment = envID + meta.Origin.Model = envID meta.Origin.Machine = "0" meta.Origin.Hostname = "main-host" return meta @@ -53,8 +53,8 @@ } // SetOrigin updates the metadata's origin with the privided information. -func SetOrigin(meta *backups.Metadata, envUUID, machine, hostname string) { - meta.Origin.Environment = envUUID +func SetOrigin(meta *backups.Metadata, modelUUID, machine, hostname string) { + meta.Origin.Model = modelUUID meta.Origin.Machine = machine meta.Origin.Hostname = hostname } === modified file 'src/github.com/juju/juju/state/bench_test.go' --- src/github.com/juju/juju/state/bench_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/bench_test.go 2016-03-22 15:18:22 +0000 @@ -88,7 +88,15 @@ c.ResetTimer() for i := 0; i < c.N; i++ { for n := 0; n < batches; n++ { - _, err := unit.AddMetrics(utils.MustNewUUID().String(), now, "", metrics) + _, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: serviceCharmURL.String(), + Metrics: metrics, + Unit: unit.UnitTag(), + }, + ) c.Assert(err, jc.ErrorIsNil) } } @@ -114,9 +122,17 @@ c.ResetTimer() for i := 0; i < c.N; i++ { for i := 0; i < numberOfMetrics; i++ { - m, err := unit.AddMetrics(utils.MustNewUUID().String(), oldTime, "", []state.Metric{{}}) + m, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: oldTime, + CharmURL: serviceCharmURL.String(), + Metrics: []state.Metric{{}}, + Unit: unit.UnitTag(), + }, + ) c.Assert(err, jc.ErrorIsNil) - err = m.SetSent() + err = m.SetSent(time.Now()) c.Assert(err, jc.ErrorIsNil) } err := s.State.CleanupOldMetrics() === modified file 'src/github.com/juju/juju/state/block.go' --- src/github.com/juju/juju/state/block.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/block.go 2016-03-22 15:18:22 +0000 @@ -21,8 +21,8 @@ // Id returns this block's id. Id() string - // EnvUUID returns the environment UUID associated with this block. - EnvUUID() string + // ModelUUID returns the model UUID associated with this block. + ModelUUID() string // Tag returns tag for the entity that is being blocked Tag() (names.Tag, error) @@ -38,15 +38,15 @@ type BlockType int8 const ( - // DestroyBlock type identifies block that prevents environment destruction. + // DestroyBlock type identifies block that prevents model destruction. DestroyBlock BlockType = iota // RemoveBlock type identifies block that prevents // removal of machines, services, units or relations. RemoveBlock - // ChangeBlock type identifies block that prevents environment changes such - // as additions, modifications, removals of environment entities. + // ChangeBlock type identifies block that prevents model changes such + // as additions, modifications, removals of model entities. ChangeBlock ) @@ -92,13 +92,13 @@ doc blockDoc } -// blockDoc records information about an environment block. +// blockDoc records information about an model block. type blockDoc struct { - DocID string `bson:"_id"` - EnvUUID string `bson:"env-uuid"` - Tag string `bson:"tag"` - Type BlockType `bson:"type"` - Message string `bson:"message,omitempty"` + DocID string `bson:"_id"` + ModelUUID string `bson:"model-uuid"` + Tag string `bson:"tag"` + Type BlockType `bson:"type"` + Message string `bson:"message,omitempty"` } // Id is part of the state.Block interface. @@ -106,9 +106,9 @@ return b.doc.DocID } -// EnvUUID is part of the state.Block interface. -func (b *block) EnvUUID() string { - return b.doc.EnvUUID +// ModelUUID is part of the state.Block interface. +func (b *block) ModelUUID() string { + return b.doc.ModelUUID } // Message is part of the state.Block interface. @@ -131,18 +131,18 @@ } // SwitchBlockOn enables block of specified type for the -// current environment. +// current model. func (st *State) SwitchBlockOn(t BlockType, msg string) error { - return setEnvironmentBlock(st, t, msg) + return setModelBlock(st, t, msg) } // SwitchBlockOff disables block of specified type for the -// current environment. +// current model. func (st *State) SwitchBlockOff(t BlockType) error { - return removeEnvironmentBlock(st, t) + return RemoveModelBlock(st, t) } -// GetBlockForType returns the Block of the specified type for the current environment +// GetBlockForType returns the Block of the specified type for the current model // where // not found -> nil, false, nil // found -> block, true, nil @@ -164,7 +164,7 @@ } } -// AllBlocks returns all blocks in the environment. +// AllBlocks returns all blocks in the model. func (st *State) AllBlocks() ([]Block, error) { blocksCollection, closer := st.getCollection(blocksC) defer closer() @@ -181,9 +181,9 @@ return blocks, nil } -// AllBlocksForSystem returns all blocks in any environments on -// the system. -func (st *State) AllBlocksForSystem() ([]Block, error) { +// AllBlocksForController returns all blocks in any models on +// the controller. +func (st *State) AllBlocksForController() ([]Block, error) { blocksCollection, closer := st.getRawCollection(blocksC) defer closer() @@ -200,11 +200,11 @@ return blocks, nil } -// RemoveAllBlocksForSystem removes all the blocks for the system. +// RemoveAllBlocksForController removes all the blocks for the controller. // It does not prevent new blocks from being added during / after // removal. -func (st *State) RemoveAllBlocksForSystem() error { - blocks, err := st.AllBlocksForSystem() +func (st *State) RemoveAllBlocksForController() error { + blocks, err := st.AllBlocksForController() if err != nil { return errors.Trace(err) } @@ -219,30 +219,30 @@ } // Use runRawTransaction as we might be removing docs across - // multiple environments. + // multiple models. return st.runRawTransaction(ops) } -// setEnvironmentBlock updates the blocks collection with the +// setModelBlock updates the blocks collection with the // specified block. -// Only one instance of each block type can exist in environment. -func setEnvironmentBlock(st *State, t BlockType, msg string) error { +// Only one instance of each block type can exist in model. +func setModelBlock(st *State, t BlockType, msg string) error { buildTxn := func(attempt int) ([]txn.Op, error) { _, exists, err := st.GetBlockForType(t) if err != nil { return nil, errors.Trace(err) } - // Cannot create blocks of the same type more than once per environment. + // Cannot create blocks of the same type more than once per model. // Cannot update current blocks. if exists { return nil, errors.Errorf("block %v is already ON", t.String()) } - return createEnvironmentBlockOps(st, t, msg) + return createModelBlockOps(st, t, msg) } return st.run(buildTxn) } -// newBlockId returns a sequential block id for this environment. +// newBlockId returns a sequential block id for this model. func newBlockId(st *State) (string, error) { seq, err := st.sequence("block") if err != nil { @@ -251,17 +251,17 @@ return fmt.Sprint(seq), nil } -func createEnvironmentBlockOps(st *State, t BlockType, msg string) ([]txn.Op, error) { +func createModelBlockOps(st *State, t BlockType, msg string) ([]txn.Op, error) { id, err := newBlockId(st) if err != nil { return nil, errors.Annotatef(err, "getting new block id") } newDoc := blockDoc{ - DocID: st.docID(id), - EnvUUID: st.EnvironUUID(), - Tag: st.EnvironTag().String(), - Type: t, - Message: msg, + DocID: st.docID(id), + ModelUUID: st.ModelUUID(), + Tag: st.ModelTag().String(), + Type: t, + Message: msg, } insertOp := txn.Op{ C: blocksC, @@ -272,14 +272,14 @@ return []txn.Op{insertOp}, nil } -func removeEnvironmentBlock(st *State, t BlockType) error { +func RemoveModelBlock(st *State, t BlockType) error { buildTxn := func(attempt int) ([]txn.Op, error) { - return removeEnvironmentBlockOps(st, t) + return RemoveModelBlockOps(st, t) } return st.run(buildTxn) } -func removeEnvironmentBlockOps(st *State, t BlockType) ([]txn.Op, error) { +func RemoveModelBlockOps(st *State, t BlockType) ([]txn.Op, error) { tBlock, exists, err := st.GetBlockForType(t) if err != nil { return nil, errors.Annotatef(err, "removing block %v", t.String()) === modified file 'src/github.com/juju/juju/state/block_test.go' --- src/github.com/juju/juju/state/block_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/block_test.go 2016-03-22 15:18:22 +0000 @@ -47,7 +47,7 @@ c.Assert(dBlock.Type(), gc.DeepEquals, t) tag, err := dBlock.Tag() c.Assert(err, jc.ErrorIsNil) - c.Assert(tag, gc.DeepEquals, st.EnvironTag()) + c.Assert(tag, gc.DeepEquals, st.ModelTag()) c.Assert(dBlock.Message(), gc.DeepEquals, msg) } @@ -86,7 +86,7 @@ c.Assert(errors.Cause(err), gc.ErrorMatches, expectedErr) } -func (s *blockSuite) TestNewEnvironmentNotBlocked(c *gc.C) { +func (s *blockSuite) TestNewModelNotBlocked(c *gc.C) { assertNoEnvBlock(c, s.State) s.assertNoTypedBlock(c, state.DestroyBlock) s.assertNoTypedBlock(c, state.RemoveBlock) @@ -131,7 +131,7 @@ s.assertNoTypedBlock(c, t) } -func (s *blockSuite) TestAllBlocksForSystem(c *gc.C) { +func (s *blockSuite) TestAllBlocksForController(c *gc.C) { _, st2 := s.createTestEnv(c) defer st2.Close() @@ -140,12 +140,12 @@ err = s.State.SwitchBlockOn(state.ChangeBlock, "block test") c.Assert(err, jc.ErrorIsNil) - blocks, err := s.State.AllBlocksForSystem() + blocks, err := s.State.AllBlocksForController() c.Assert(err, jc.ErrorIsNil) c.Assert(len(blocks), gc.Equals, 2) } -func (s *blockSuite) TestRemoveAllBlocksForSystem(c *gc.C) { +func (s *blockSuite) TestRemoveAllBlocksForController(c *gc.C) { _, st2 := s.createTestEnv(c) defer st2.Close() @@ -154,28 +154,28 @@ err = s.State.SwitchBlockOn(state.ChangeBlock, "block test") c.Assert(err, jc.ErrorIsNil) - err = s.State.RemoveAllBlocksForSystem() + err = s.State.RemoveAllBlocksForController() c.Assert(err, jc.ErrorIsNil) - blocks, err := s.State.AllBlocksForSystem() + blocks, err := s.State.AllBlocksForController() c.Assert(err, jc.ErrorIsNil) c.Assert(len(blocks), gc.Equals, 0) } -func (s *blockSuite) TestRemoveAllBlocksForSystemNoBlocks(c *gc.C) { +func (s *blockSuite) TestRemoveAllBlocksForControllerNoBlocks(c *gc.C) { _, st2 := s.createTestEnv(c) defer st2.Close() - err := st2.RemoveAllBlocksForSystem() + err := st2.RemoveAllBlocksForController() c.Assert(err, jc.ErrorIsNil) - blocks, err := st2.AllBlocksForSystem() + blocks, err := st2.AllBlocksForController() c.Assert(err, jc.ErrorIsNil) c.Assert(len(blocks), gc.Equals, 0) } -func (s *blockSuite) TestEnvUUID(c *gc.C) { - st := s.Factory.MakeEnvironment(c, nil) +func (s *blockSuite) TestModelUUID(c *gc.C) { + st := s.Factory.MakeModel(c, nil) defer st.Close() err := st.SwitchBlockOn(state.ChangeBlock, "blocktest") c.Assert(err, jc.ErrorIsNil) @@ -183,18 +183,18 @@ blocks, err := st.AllBlocks() c.Assert(err, jc.ErrorIsNil) c.Assert(len(blocks), gc.Equals, 1) - c.Assert(blocks[0].EnvUUID(), gc.Equals, st.EnvironUUID()) + c.Assert(blocks[0].ModelUUID(), gc.Equals, st.ModelUUID()) } -func (s *blockSuite) createTestEnv(c *gc.C) (*state.Environment, *state.State) { +func (s *blockSuite) createTestEnv(c *gc.C) (*state.Model, *state.State) { uuid, err := utils.NewUUID() c.Assert(err, jc.ErrorIsNil) - cfg := testing.CustomEnvironConfig(c, testing.Attrs{ + cfg := testing.CustomModelConfig(c, testing.Attrs{ "name": "testing", "uuid": uuid.String(), }) owner := names.NewUserTag("test@remote") - env, st, err := s.State.NewEnvironment(cfg, owner) + env, st, err := s.State.NewModel(cfg, owner) c.Assert(err, jc.ErrorIsNil) return env, st } === modified file 'src/github.com/juju/juju/state/blockdevices.go' --- src/github.com/juju/juju/state/blockdevices.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/blockdevices.go 2016-03-22 15:18:22 +0000 @@ -14,7 +14,7 @@ "gopkg.in/mgo.v2/txn" ) -// BlockDevice represents the state of a block device in the environment. +// BlockDevice represents the state of a block device in the model. type BlockDevice interface { // Machine returns the ID of the machine the block device is attached to. Machine() string @@ -26,7 +26,7 @@ // blockDevicesDoc records information about a machine's block devices. type blockDevicesDoc struct { DocID string `bson:"_id"` - EnvUUID string `bson:"env-uuid"` + ModelUUID string `bson:"model-uuid"` Machine string `bson:"machineid"` BlockDevices []BlockDeviceInfo `bson:"blockdevices"` } === modified file 'src/github.com/juju/juju/state/charm.go' --- src/github.com/juju/juju/state/charm.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/charm.go 2016-03-22 15:18:22 +0000 @@ -9,7 +9,7 @@ "github.com/juju/errors" "github.com/juju/names" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" @@ -18,9 +18,9 @@ // charmDoc represents the internal state of a charm in MongoDB. type charmDoc struct { - DocID string `bson:"_id"` - URL *charm.URL `bson:"url"` // DANGEROUS see below - EnvUUID string `bson:"env-uuid"` + DocID string `bson:"_id"` + URL *charm.URL `bson:"url"` // DANGEROUS see below + ModelUUID string `bson:"model-uuid"` // TODO(fwereade) 2015-06-18 lp:1467964 // DANGEROUS: our schema can change any time the charm package changes, @@ -47,14 +47,15 @@ } // insertCharmOps returns the txn operations necessary to insert the supplied -// charm data. -func insertCharmOps( - st *State, ch charm.Charm, curl *charm.URL, storagePath, bundleSha256 string, -) ([]txn.Op, error) { +// charm data. If curl is nil, an error will be returned. +func insertCharmOps(st *State, ch charm.Charm, curl *charm.URL, storagePath, bundleSha256 string) ([]txn.Op, error) { + if curl == nil { + return nil, errors.New("*charm.URL was nil") + } return insertAnyCharmOps(&charmDoc{ DocID: curl.String(), URL: curl, - EnvUUID: st.EnvironTag().Id(), + ModelUUID: st.ModelTag().Id(), Meta: ch.Meta(), Config: safeConfig(ch), Metrics: ch.Metrics(), @@ -66,23 +67,30 @@ // insertPlaceholderCharmOps returns the txn operations necessary to insert a // charm document referencing a store charm that is not yet directly accessible -// within the environment. +// within the model. If curl is nil, an error will be returned. func insertPlaceholderCharmOps(st *State, curl *charm.URL) ([]txn.Op, error) { + if curl == nil { + return nil, errors.New("*charm.URL was nil") + } return insertAnyCharmOps(&charmDoc{ DocID: curl.String(), URL: curl, - EnvUUID: st.EnvironTag().Id(), + ModelUUID: st.ModelTag().Id(), Placeholder: true, }) } // insertPendingCharmOps returns the txn operations necessary to insert a charm -// document referencing a charm that has yet to be uploaded to the environment. +// document referencing a charm that has yet to be uploaded to the model. +// If curl is nil, an error will be returned. func insertPendingCharmOps(st *State, curl *charm.URL) ([]txn.Op, error) { + if curl == nil { + return nil, errors.New("*charm.URL was nil") + } return insertAnyCharmOps(&charmDoc{ DocID: curl.String(), URL: curl, - EnvUUID: st.EnvironTag().Id(), + ModelUUID: st.ModelTag().Id(), PendingUpload: true, }) } @@ -186,7 +194,7 @@ return escapedConfig } -// Charm represents the state of a charm in the environment. +// Charm represents the state of a charm in the model. type Charm struct { st *State doc charmDoc @@ -270,7 +278,7 @@ // the provider storage. // // DEPRECATED: this is only to be used for migrating -// charm archives to environment storage. +// charm archives to model storage. func (c *Charm) BundleURL() *url.URL { return c.doc.BundleURL } @@ -281,7 +289,7 @@ } // IsUploaded returns whether the charm has been uploaded to the -// environment storage. +// model storage. func (c *Charm) IsUploaded() bool { return !c.doc.PendingUpload } === modified file 'src/github.com/juju/juju/state/charm_test.go' --- src/github.com/juju/juju/state/charm_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/charm_test.go 2016-03-22 15:18:22 +0000 @@ -9,7 +9,7 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/state" "github.com/juju/juju/testcharms" === modified file 'src/github.com/juju/juju/state/cleanup.go' --- src/github.com/juju/juju/state/cleanup.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/cleanup.go 2016-03-22 15:18:22 +0000 @@ -22,31 +22,33 @@ cleanupUnitsForDyingService cleanupKind = "units" cleanupDyingUnit cleanupKind = "dyingUnit" cleanupRemovedUnit cleanupKind = "removedUnit" - cleanupServicesForDyingEnvironment cleanupKind = "services" + cleanupServicesForDyingModel cleanupKind = "services" cleanupDyingMachine cleanupKind = "dyingMachine" cleanupForceDestroyedMachine cleanupKind = "machine" cleanupAttachmentsForDyingStorage cleanupKind = "storageAttachments" cleanupAttachmentsForDyingVolume cleanupKind = "volumeAttachments" cleanupAttachmentsForDyingFilesystem cleanupKind = "filesystemAttachments" + cleanupModelsForDyingController cleanupKind = "models" + cleanupMachinesForDyingModel cleanupKind = "modelMachines" ) // cleanupDoc represents a potentially large set of documents that should be // removed. type cleanupDoc struct { - DocID string `bson:"_id"` - EnvUUID string `bson:"env-uuid"` - Kind cleanupKind - Prefix string + DocID string `bson:"_id"` + ModelUUID string `bson:"model-uuid"` + Kind cleanupKind + Prefix string } // newCleanupOp returns a txn.Op that creates a cleanup document with a unique // id and the supplied kind and prefix. func (st *State) newCleanupOp(kind cleanupKind, prefix string) txn.Op { doc := &cleanupDoc{ - DocID: st.docID(fmt.Sprint(bson.NewObjectId())), - EnvUUID: st.EnvironUUID(), - Kind: kind, - Prefix: prefix, + DocID: st.docID(fmt.Sprint(bson.NewObjectId())), + ModelUUID: st.ModelUUID(), + Kind: kind, + Prefix: prefix, } return txn.Op{ C: cleanupsC, @@ -87,8 +89,8 @@ err = st.cleanupDyingUnit(doc.Prefix) case cleanupRemovedUnit: err = st.cleanupRemovedUnit(doc.Prefix) - case cleanupServicesForDyingEnvironment: - err = st.cleanupServicesForDyingEnvironment() + case cleanupServicesForDyingModel: + err = st.cleanupServicesForDyingModel() case cleanupDyingMachine: err = st.cleanupDyingMachine(doc.Prefix) case cleanupForceDestroyedMachine: @@ -99,6 +101,10 @@ err = st.cleanupAttachmentsForDyingVolume(doc.Prefix) case cleanupAttachmentsForDyingFilesystem: err = st.cleanupAttachmentsForDyingFilesystem(doc.Prefix) + case cleanupModelsForDyingController: + err = st.cleanupModelsForDyingController() + case cleanupMachinesForDyingModel: + err = st.cleanupMachinesForDyingModel() default: err = fmt.Errorf("unknown cleanup kind %q", doc.Kind) } @@ -137,11 +143,62 @@ return nil } -// cleanupServicesForDyingEnvironment sets all services to Dying, if they are -// not already Dying or Dead. It's expected to be used when an environment is +// cleanupModelsForDyingController sets all models to dying, if +// they are not already Dying or Dead. It's expected to be used when a +// controller is destroyed. +func (st *State) cleanupModelsForDyingController() (err error) { + models, err := st.AllModels() + if err != nil { + return errors.Trace(err) + } + for _, env := range models { + + if env.Life() == Alive { + if err := env.Destroy(); err != nil { + return errors.Trace(err) + } + } + } + return nil +} + +// cleanupMachinesForDyingModel sets all non-manager, non-manual +// machines to Dying, if they are not already Dying or Dead. It's expected to +// be used when a model is destroyed. +func (st *State) cleanupMachinesForDyingModel() (err error) { + // This won't miss machines, because a Dying model cannot have + // machines added to it. But we do have to remove the machines themselves + // via individual transactions, because they could be in any state at all. + machines, err := st.AllMachines() + if err != nil { + return errors.Trace(err) + } + for _, m := range machines { + if m.IsManager() { + continue + } + if _, isContainer := m.ParentId(); isContainer { + continue + } + manual, err := m.IsManual() + if err != nil { + return err + } else if manual { + continue + } + err = m.ForceDestroy() + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +// cleanupServicesForDyingModel sets all services to Dying, if they are +// not already Dying or Dead. It's expected to be used when a model is // destroyed. -func (st *State) cleanupServicesForDyingEnvironment() (err error) { - // This won't miss services, because a Dying environment cannot have +func (st *State) cleanupServicesForDyingModel() (err error) { + // This won't miss services, because a Dying model cannot have // services added to it. But we do have to remove the services themselves // via individual transactions, because they could be in any state at all. services, closer := st.getCollection(servicesC) === modified file 'src/github.com/juju/juju/state/cleanup_test.go' --- src/github.com/juju/juju/state/cleanup_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/cleanup_test.go 2016-03-22 15:18:22 +0000 @@ -10,7 +10,7 @@ "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/instance" "github.com/juju/juju/state" @@ -71,7 +71,71 @@ s.assertCleanupCount(c, 1) } -func (s *CleanupSuite) TestCleanupEnvironmentServices(c *gc.C) { +func (s *CleanupSuite) TestCleanupControllerModels(c *gc.C) { + s.assertDoesNotNeedCleanup(c) + + // Create an model. + otherSt := s.Factory.MakeModel(c, nil) + defer otherSt.Close() + otherEnv, err := otherSt.Model() + c.Assert(err, jc.ErrorIsNil) + + s.assertDoesNotNeedCleanup(c) + + // Destroy the controller and check the model is unaffected, but a + // cleanup for the model and services has been scheduled. + controllerEnv, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + err = controllerEnv.DestroyIncludingHosted() + c.Assert(err, jc.ErrorIsNil) + + // Two cleanups should be scheduled. One to destroy the hosted + // models, the other to destroy the controller model's + // services. + s.assertCleanupCount(c, 1) + err = otherEnv.Refresh() + c.Assert(err, jc.ErrorIsNil) + c.Assert(otherEnv.Life(), gc.Equals, state.Dying) + + s.assertDoesNotNeedCleanup(c) +} + +func (s *CleanupSuite) TestCleanupModelMachines(c *gc.C) { + // Create a state and hosted machine. + stateMachine, err := s.State.AddMachine("quantal", state.JobManageModel) + c.Assert(err, jc.ErrorIsNil) + machine, err := s.State.AddMachine("quantal", state.JobHostUnits) + c.Assert(err, jc.ErrorIsNil) + + // Create a relation with a unit in scope and assigned to the hosted machine. + pr := NewPeerRelation(c, s.State, s.Owner) + err = pr.u0.AssignToMachine(machine) + c.Assert(err, jc.ErrorIsNil) + err = pr.ru0.EnterScope(nil) + c.Assert(err, jc.ErrorIsNil) + s.assertDoesNotNeedCleanup(c) + + // Destroy model, check cleanup queued. + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + err = env.Destroy() + c.Assert(err, jc.ErrorIsNil) + s.assertNeedsCleanup(c) + + // Clean up, and check that the unit has been removed... + s.assertCleanupCount(c, 3) + assertRemoved(c, pr.u0) + + // ...and the unit has departed relation scope... + assertNotJoined(c, pr.ru0) + + // ...but that the machine remains, and is Dead, ready for removal by the + // provisioner. + assertLife(c, machine, state.Dead) + assertLife(c, stateMachine, state.Alive) +} + +func (s *CleanupSuite) TestCleanupModelServices(c *gc.C) { s.assertDoesNotNeedCleanup(c) // Create a service with some units. @@ -84,9 +148,9 @@ } s.assertDoesNotNeedCleanup(c) - // Destroy the environment and check the service and units are + // Destroy the model and check the service and units are // unaffected, but a cleanup for the service has been scheduled. - env, err := s.State.Environment() + env, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) err = env.Destroy() c.Assert(err, jc.ErrorIsNil) @@ -148,11 +212,11 @@ } func (s *CleanupSuite) TestForceDestroyMachineErrors(c *gc.C) { - manager, err := s.State.AddMachine("quantal", state.JobManageEnviron) + manager, err := s.State.AddMachine("quantal", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) s.assertDoesNotNeedCleanup(c) err = manager.ForceDestroy() - expect := fmt.Sprintf("machine %s is required by the environment", manager.Id()) + expect := fmt.Sprintf("machine is required by the model") c.Assert(err, gc.ErrorMatches, expect) s.assertDoesNotNeedCleanup(c) assertLife(c, manager, state.Alive) === modified file 'src/github.com/juju/juju/state/cloudimagemetadata/functions_test.go' --- src/github.com/juju/juju/state/cloudimagemetadata/functions_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/cloudimagemetadata/functions_test.go 2016-03-22 15:18:22 +0000 @@ -48,10 +48,10 @@ bson.D{{"arch", bson.D{{"$in", []string{"arch-value"}}}}}) } -func (s *funcMetadataSuite) TestSearchCriteriaWithVirtualType(c *gc.C) { +func (s *funcMetadataSuite) TestSearchCriteriaWithVirtType(c *gc.C) { s.assertSearchCriteriaBuilt(c, - cloudimagemetadata.MetadataFilter{VirtualType: "vtype-value"}, - bson.D{{"virtual_type", "vtype-value"}}) + cloudimagemetadata.MetadataFilter{VirtType: "vtype-value"}, + bson.D{{"virt_type", "vtype-value"}}) } func (s *funcMetadataSuite) TestSearchCriteriaWithStorageType(c *gc.C) { @@ -69,7 +69,7 @@ Stream: "stream-value", Region: "region-value", Arches: []string{"arch-value", "arch-value-two"}, - VirtualType: "vtype-value", + VirtType: "vtype-value", }, // This is in order in which it is built. bson.D{ @@ -77,7 +77,7 @@ {"region", "region-value"}, {"series", bson.D{{"$in", []string{"series-value", "series-value-two"}}}}, {"arch", bson.D{{"$in", []string{"arch-value", "arch-value-two"}}}}, - {"virtual_type", "vtype-value"}, + {"virt_type", "vtype-value"}, {"root_storage_type", "rootstorage-value"}, }) } === modified file 'src/github.com/juju/juju/state/cloudimagemetadata/image.go' --- src/github.com/juju/juju/state/cloudimagemetadata/image.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/cloudimagemetadata/image.go 2016-03-22 15:18:22 +0000 @@ -10,6 +10,7 @@ "github.com/juju/errors" "github.com/juju/loggo" jujutxn "github.com/juju/txn" + "github.com/juju/utils/series" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" @@ -18,7 +19,7 @@ var logger = loggo.GetLogger("juju.state.cloudimagemetadata") type storage struct { - envuuid string + modelUUID string collection string store DataStore } @@ -27,60 +28,130 @@ // NewStorage constructs a new Storage that stores image metadata // in the provided data store. -func NewStorage(envuuid, collectionName string, store DataStore) Storage { - return &storage{envuuid, collectionName, store} +func NewStorage(modelUUID, collectionName string, store DataStore) Storage { + return &storage{modelUUID, collectionName, store} } var emptyMetadata = Metadata{} // SaveMetadata implements Storage.SaveMetadata and behaves as save-or-update. -func (s *storage) SaveMetadata(metadata Metadata) error { - newDoc := s.mongoDoc(metadata) - - buildTxn := func(attempt int) ([]txn.Op, error) { - op := txn.Op{ - C: s.collection, - Id: newDoc.Id, - } - - // Check if this image metadata is already known. - existing, err := s.getMetadata(newDoc.Id) +func (s *storage) SaveMetadata(metadata []Metadata) error { + if len(metadata) == 0 { + return nil + } + + newDocs := make([]imagesMetadataDoc, len(metadata)) + for i, m := range metadata { + newDoc := s.mongoDoc(m) + if err := validateMetadata(&newDoc); err != nil { + return err + } + newDocs[i] = newDoc + } + + buildTxn := func(attempt int) ([]txn.Op, error) { + var ops []txn.Op + for _, newDoc := range newDocs { + newDocCopy := newDoc + op := txn.Op{ + C: s.collection, + Id: newDocCopy.Id, + } + + // Check if this image metadata is already known. + existing, err := s.getMetadata(newDocCopy.Id) + if errors.IsNotFound(err) { + op.Assert = txn.DocMissing + op.Insert = &newDocCopy + ops = append(ops, op) + logger.Debugf("inserting cloud image metadata for %v", newDocCopy.Id) + } else if err != nil { + return nil, errors.Trace(err) + } else if existing.ImageId != newDocCopy.ImageId { + // need to update imageId + op.Assert = txn.DocExists + op.Update = bson.D{{"$set", bson.D{{"image_id", newDocCopy.ImageId}}}} + ops = append(ops, op) + logger.Debugf("updating cloud image id for metadata %v", newDocCopy.Id) + } + } + if len(ops) == 0 { + return nil, jujutxn.ErrNoOperations + } + return ops, nil + } + + err := s.store.RunTransaction(buildTxn) + if err != nil { + return errors.Annotate(err, "cannot save cloud image metadata") + } + return nil +} + +// DeleteMetadata implements Storage.DeleteMetadata. +func (s *storage) DeleteMetadata(imageId string) error { + deleteOperation := func(docId string) txn.Op { + logger.Debugf("deleting metadata (ID=%v) for image (ID=%v)", docId, imageId) + return txn.Op{ + C: s.collection, + Id: docId, + Assert: txn.DocExists, + Remove: true, + } + } + + noOp := func() ([]txn.Op, error) { + logger.Debugf("no metadata for image ID %v to delete", imageId) + return nil, jujutxn.ErrNoOperations + } + + buildTxn := func(attempt int) ([]txn.Op, error) { + // find all metadata docs with given image id + imageMetadata, err := s.metadataForImageId(imageId) if err != nil { - return nil, errors.Trace(err) - } - if existing.MetadataAttributes == metadata.MetadataAttributes { - // may need to updated imageId - if existing.ImageId != metadata.ImageId { - op.Assert = txn.DocExists - op.Update = bson.D{{"$set", bson.D{{"image_id", metadata.ImageId}}}} - logger.Debugf("updating cloud image id for metadata %v", newDoc.Id) - } else { - return nil, jujutxn.ErrNoOperations + if err == mgo.ErrNotFound { + return noOp() } - } else { - op.Assert = txn.DocMissing - op.Insert = &newDoc - logger.Debugf("inserting cloud image metadata for %v", newDoc.Id) - } - return []txn.Op{op}, nil + return nil, err + } + if len(imageMetadata) == 0 { + return noOp() + } + + allTxn := make([]txn.Op, len(imageMetadata)) + for i, doc := range imageMetadata { + allTxn[i] = deleteOperation(doc.Id) + } + return allTxn, nil } err := s.store.RunTransaction(buildTxn) if err != nil { - return errors.Annotatef(err, "cannot save metadata for cloud image %v", newDoc.ImageId) + return errors.Annotatef(err, "cannot delete metadata for cloud image %v", imageId) } return nil } +func (s *storage) metadataForImageId(imageId string) ([]imagesMetadataDoc, error) { + coll, closer := s.store.GetCollection(s.collection) + defer closer() + + var docs []imagesMetadataDoc + query := bson.D{{"image_id", imageId}} + if err := coll.Find(query).All(&docs); err != nil { + return nil, err + } + return docs, nil +} + func (s *storage) getMetadata(id string) (Metadata, error) { coll, closer := s.store.GetCollection(s.collection) defer closer() var old imagesMetadataDoc - err := coll.Find(bson.D{{"_id", id}}).One(&old) - if err != nil { + if err := coll.Find(bson.D{{"_id", id}}).One(&old); err != nil { if err == mgo.ErrNotFound { - return emptyMetadata, nil + return Metadata{}, errors.NotFoundf("image metadata with ID %q", id) } return emptyMetadata, errors.Trace(err) } @@ -90,8 +161,8 @@ // imagesMetadataDoc results in immutable records. Updates are effectively // a delate and an insert. type imagesMetadataDoc struct { - // EnvUUID is the environment identifier. - EnvUUID string `bson:"env-uuid"` + // ModelUUID is the model identifier. + ModelUUID string `bson:"model-uuid"` // Id contains unique key for cloud image metadata. // This is an amalgamation of all deterministic metadata attributes to ensure @@ -108,14 +179,17 @@ // Region is the name of cloud region associated with the image. Region string `bson:"region"` - // Series is Os version, for e.g. "quantal". + // Version is OS version, for e.g. "12.04". + Version string `bson:"version"` + + // Series is OS series, for e.g. "trusty". Series string `bson:"series"` // Arch is the architecture for this cloud image, for e.g. "amd64" Arch string `bson:"arch"` - // VirtualType contains the type of the cloud image, for e.g. "pv", "hvm". "kvm". - VirtualType string `bson:"virtual_type,omitempty"` + // VirtType contains virtualisation type of the cloud image, for e.g. "pv", "hvm". "kvm". + VirtType string `bson:"virt_type,omitempty"` // RootStorageType contains type of root storage, for e.g. "ebs", "instance". RootStorageType string `bson:"root_storage_type,omitempty"` @@ -127,31 +201,27 @@ DateCreated int64 `bson:"date_created"` // Source describes where this image is coming from: is it public? custom? - Source SourceType `bson:"source"` + Source string `bson:"source"` + + // Priority is an importance factor for image metadata. + // Higher number means higher priority. + // This will allow to sort metadata by importance. + Priority int `bson:"priority"` } -// SourceType values define source type. -type SourceType string - -const ( - // Public type identifies image as public. - Public SourceType = "public" - - // Custom type identifies image as custom. - Custom SourceType = "custom" -) - func (m imagesMetadataDoc) metadata() Metadata { r := Metadata{ MetadataAttributes{ Source: m.Source, Stream: m.Stream, Region: m.Region, + Version: m.Version, Series: m.Series, Arch: m.Arch, RootStorageType: m.RootStorageType, - VirtualType: m.VirtualType, + VirtType: m.VirtType, }, + m.Priority, m.ImageId, } if m.RootStorageSize != 0 { @@ -162,17 +232,19 @@ func (s *storage) mongoDoc(m Metadata) imagesMetadataDoc { r := imagesMetadataDoc{ - EnvUUID: s.envuuid, + ModelUUID: s.modelUUID, Id: buildKey(m), Stream: m.Stream, Region: m.Region, + Version: m.Version, Series: m.Series, Arch: m.Arch, - VirtualType: m.VirtualType, + VirtType: m.VirtType, RootStorageType: m.RootStorageType, ImageId: m.ImageId, DateCreated: time.Now().UnixNano(), Source: m.Source, + Priority: m.Priority, } if m.RootStorageSize != nil { r.RootStorageSize = *m.RootStorageSize @@ -186,17 +258,33 @@ m.Region, m.Series, m.Arch, - m.VirtualType, + m.VirtType, m.RootStorageType, m.Source) } +func validateMetadata(m *imagesMetadataDoc) error { + // series must be supplied. + if m.Series == "" { + return errors.NotValidf("missing series: metadata for image %v", m.ImageId) + } + + v, err := series.SeriesVersion(m.Series) + if err != nil { + return err + } + + m.Version = v + return nil +} + // FindMetadata implements Storage.FindMetadata. // Results are sorted by date created and grouped by source. -func (s *storage) FindMetadata(criteria MetadataFilter) (map[SourceType][]Metadata, error) { +func (s *storage) FindMetadata(criteria MetadataFilter) (map[string][]Metadata, error) { coll, closer := s.store.GetCollection(s.collection) defer closer() + logger.Debugf("searching for image metadata %#v", criteria) searchCriteria := buildSearchClauses(criteria) var docs []imagesMetadataDoc if err := coll.Find(searchCriteria).Sort("date_created").All(&docs); err != nil { @@ -206,7 +294,7 @@ return nil, errors.NotFoundf("matching cloud image metadata") } - metadata := make(map[SourceType][]Metadata) + metadata := make(map[string][]Metadata) for _, doc := range docs { one := doc.metadata() metadata[one.Source] = append(metadata[one.Source], one) @@ -233,8 +321,8 @@ all = append(all, bson.DocElem{"arch", bson.D{{"$in", criteria.Arches}}}) } - if criteria.VirtualType != "" { - all = append(all, bson.DocElem{"virtual_type", criteria.VirtualType}) + if criteria.VirtType != "" { + all = append(all, bson.DocElem{"virt_type", criteria.VirtType}) } if criteria.RootStorageType != "" { @@ -265,9 +353,34 @@ // simplestreams metadata supports. Stream string `json:"stream,omitempty"` - // VirtualType stores virtual type. - VirtualType string `json:"virtual_type,omitempty"` + // VirtType stores virtualisation type. + VirtType string `json:"virt_type,omitempty"` // RootStorageType stores storage type. RootStorageType string `json:"root-storage-type,omitempty"` } + +// SupportedArchitectures implements Storage.SupportedArchitectures. +func (s *storage) SupportedArchitectures(criteria MetadataFilter) ([]string, error) { + coll, closer := s.store.GetCollection(s.collection) + defer closer() + + var arches []string + if err := coll.Find(buildSearchClauses(criteria)).Distinct("arch", &arches); err != nil { + return nil, errors.Trace(err) + } + return arches, nil +} + +// MetadataArchitectureQuerier isolates querying supported architectures. +type MetadataArchitectureQuerier struct { + Storage Storage +} + +// SupportedArchitectures implements state policy SupportedArchitecturesQuerier.SupportedArchitectures. +func (q *MetadataArchitectureQuerier) SupportedArchitectures(stream, region string) ([]string, error) { + return q.Storage.SupportedArchitectures(MetadataFilter{ + Stream: stream, + Region: region, + }) +} === modified file 'src/github.com/juju/juju/state/cloudimagemetadata/image_test.go' --- src/github.com/juju/juju/state/cloudimagemetadata/image_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/cloudimagemetadata/image_test.go 2016-03-22 15:18:22 +0000 @@ -4,6 +4,8 @@ package cloudimagemetadata_test import ( + "regexp" + "github.com/juju/errors" "github.com/juju/testing" jc "github.com/juju/testing/checkers" @@ -26,7 +28,7 @@ var _ = gc.Suite(&cloudImageMetadataSuite{}) const ( - envName = "test-env" + envName = "test-model" collectionName = "test-collection" ) @@ -40,37 +42,44 @@ } func (s *cloudImageMetadataSuite) TestSaveMetadata(c *gc.C) { - attrs := cloudimagemetadata.MetadataAttributes{ + attrs1 := cloudimagemetadata.MetadataAttributes{ Stream: "stream", Region: "region-test", - Series: "series", + Version: "14.04", + Series: "trusty", Arch: "arch", - VirtualType: "virtType-test", - RootStorageType: "rootStorageType-test"} - - added := cloudimagemetadata.Metadata{attrs, "1"} - s.assertRecordMetadata(c, added) - s.assertMetadataRecorded(c, attrs, added) - + VirtType: "virtType-test", + RootStorageType: "rootStorageType-test", + } + attrs2 := cloudimagemetadata.MetadataAttributes{ + Stream: "chalk", + Region: "nether", + Version: "12.04", + Series: "precise", + Arch: "amd64", + } + added := []cloudimagemetadata.Metadata{ + {attrs1, 0, "1"}, + {attrs2, 0, "2"}, + } + s.assertRecordMetadata(c, added[0]) + s.assertRecordMetadata(c, added[1]) + s.assertMetadataRecorded(c, cloudimagemetadata.MetadataAttributes{}, added...) } func (s *cloudImageMetadataSuite) TestFindMetadataNotFound(c *gc.C) { - // No metadata is stored yet. - // So when looking for all and none is found, err. - found, err := s.storage.FindMetadata(cloudimagemetadata.MetadataFilter{}) - c.Assert(err, jc.Satisfies, errors.IsNotFound) - c.Assert(err, gc.ErrorMatches, "matching cloud image metadata not found") - c.Assert(found, gc.HasLen, 0) + s.assertNoMetadata(c) // insert something... attrs := cloudimagemetadata.MetadataAttributes{ Stream: "stream", Region: "region", - Series: "series", + Version: "14.04", + Series: "trusty", Arch: "arch", - VirtualType: "virtualType", + VirtType: "virtType", RootStorageType: "rootStorageType"} - m := cloudimagemetadata.Metadata{attrs, "1"} + m := cloudimagemetadata.Metadata{attrs, 0, "1"} s.assertRecordMetadata(c, m) // ...but look for something else. @@ -87,7 +96,7 @@ filter := cloudimagemetadata.MetadataFilter{ Stream: attrs.Stream, Region: attrs.Region, - VirtualType: attrs.VirtualType, + VirtType: attrs.VirtType, RootStorageType: attrs.RootStorageType} if attrs.Series != "" { filter.Series = []string{attrs.Series} @@ -102,12 +111,13 @@ attrs := cloudimagemetadata.MetadataAttributes{ Stream: "stream", Region: "region", - Series: "series", + Version: "14.04", + Series: "trusty", Arch: "arch", - VirtualType: "virtualType", + VirtType: "virtType", RootStorageType: "rootStorageType"} - m := cloudimagemetadata.Metadata{attrs, "1"} + m := cloudimagemetadata.Metadata{attrs, 0, "1"} _, err := s.storage.FindMetadata(buildAttributesFilter(attrs)) c.Assert(err, jc.Satisfies, errors.IsNotFound) @@ -117,7 +127,7 @@ s.assertMetadataRecorded(c, attrs, expected...) attrs.Stream = "another_stream" - m = cloudimagemetadata.Metadata{attrs, "2"} + m = cloudimagemetadata.Metadata{attrs, 0, "2"} s.assertRecordMetadata(c, m) expected = append(expected, m) @@ -127,12 +137,13 @@ func (s *cloudImageMetadataSuite) TestSaveMetadataUpdateSameAttrsAndImages(c *gc.C) { attrs := cloudimagemetadata.MetadataAttributes{ - Stream: "stream", - Series: "series", - Arch: "arch", + Stream: "stream", + Version: "14.04", + Series: "trusty", + Arch: "arch", } - metadata0 := cloudimagemetadata.Metadata{attrs, "1"} - metadata1 := cloudimagemetadata.Metadata{attrs, "1"} + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1"} + metadata1 := cloudimagemetadata.Metadata{attrs, 0, "1"} s.assertRecordMetadata(c, metadata0) s.assertRecordMetadata(c, metadata1) @@ -141,12 +152,13 @@ func (s *cloudImageMetadataSuite) TestSaveMetadataUpdateSameAttrsDiffImages(c *gc.C) { attrs := cloudimagemetadata.MetadataAttributes{ - Stream: "stream", - Series: "series", - Arch: "arch", + Stream: "stream", + Version: "14.04", + Series: "trusty", + Arch: "arch", } - metadata0 := cloudimagemetadata.Metadata{attrs, "1"} - metadata1 := cloudimagemetadata.Metadata{attrs, "12"} + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1"} + metadata1 := cloudimagemetadata.Metadata{attrs, 0, "12"} s.assertRecordMetadata(c, metadata0) s.assertMetadataRecorded(c, attrs, metadata0) @@ -157,12 +169,13 @@ func (s *cloudImageMetadataSuite) TestSaveDiffMetadataConcurrentlyAndOrderByDateCreated(c *gc.C) { attrs := cloudimagemetadata.MetadataAttributes{ - Stream: "stream", - Series: "series", - Arch: "arch", + Stream: "stream", + Version: "14.04", + Series: "trusty", + Arch: "arch", } - metadata0 := cloudimagemetadata.Metadata{attrs, "0"} - metadata1 := cloudimagemetadata.Metadata{attrs, "1"} + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "0"} + metadata1 := cloudimagemetadata.Metadata{attrs, 0, "1"} metadata1.Stream = "scream" s.assertConcurrentSave(c, @@ -176,12 +189,13 @@ func (s *cloudImageMetadataSuite) TestSaveSameMetadataDiffImageConcurrently(c *gc.C) { attrs := cloudimagemetadata.MetadataAttributes{ - Stream: "stream", - Series: "series", - Arch: "arch", + Stream: "stream", + Version: "14.04", + Series: "trusty", + Arch: "arch", } - metadata0 := cloudimagemetadata.Metadata{attrs, "0"} - metadata1 := cloudimagemetadata.Metadata{attrs, "1"} + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "0"} + metadata1 := cloudimagemetadata.Metadata{attrs, 0, "1"} s.assertConcurrentSave(c, metadata0, // add this one @@ -192,11 +206,12 @@ func (s *cloudImageMetadataSuite) TestSaveSameMetadataSameImageConcurrently(c *gc.C) { attrs := cloudimagemetadata.MetadataAttributes{ - Stream: "stream", - Series: "series", - Arch: "arch", + Stream: "stream", + Version: "14.04", + Series: "trusty", + Arch: "arch", } - metadata0 := cloudimagemetadata.Metadata{attrs, "0"} + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "0"} s.assertConcurrentSave(c, metadata0, // add this one @@ -207,15 +222,16 @@ func (s *cloudImageMetadataSuite) TestSaveSameMetadataSameImageDiffSourceConcurrently(c *gc.C) { attrs := cloudimagemetadata.MetadataAttributes{ - Stream: "stream", - Series: "series", - Arch: "arch", - Source: cloudimagemetadata.Public, + Stream: "stream", + Version: "14.04", + Series: "trusty", + Arch: "arch", + Source: "public", } - metadata0 := cloudimagemetadata.Metadata{attrs, "0"} + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "0"} - attrs.Source = cloudimagemetadata.Custom - metadata1 := cloudimagemetadata.Metadata{attrs, "0"} + attrs.Source = "custom" + metadata1 := cloudimagemetadata.Metadata{attrs, 0, "0"} s.assertConcurrentSave(c, metadata0, @@ -225,6 +241,37 @@ ) } +func (s *cloudImageMetadataSuite) TestSaveMetadataNoVersionPassed(c *gc.C) { + attrs := cloudimagemetadata.MetadataAttributes{ + Stream: "stream", + Series: "trusty", + Arch: "arch", + } + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1"} + s.assertRecordMetadata(c, metadata0) +} + +func (s *cloudImageMetadataSuite) TestSaveMetadataNoSeriesPassed(c *gc.C) { + attrs := cloudimagemetadata.MetadataAttributes{ + Stream: "stream", + Arch: "arch", + } + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1"} + err := s.storage.SaveMetadata([]cloudimagemetadata.Metadata{metadata0}) + c.Assert(err, gc.ErrorMatches, regexp.QuoteMeta(`missing series: metadata for image 1 not valid`)) +} + +func (s *cloudImageMetadataSuite) TestSaveMetadataUnsupportedSeriesPassed(c *gc.C) { + attrs := cloudimagemetadata.MetadataAttributes{ + Stream: "stream", + Series: "blah", + Arch: "arch", + } + metadata0 := cloudimagemetadata.Metadata{attrs, 0, "1"} + err := s.storage.SaveMetadata([]cloudimagemetadata.Metadata{metadata0}) + c.Assert(err, gc.ErrorMatches, regexp.QuoteMeta(`unknown version for series: "blah"`)) +} + func (s *cloudImageMetadataSuite) assertConcurrentSave(c *gc.C, metadata0, metadata1 cloudimagemetadata.Metadata, expected ...cloudimagemetadata.Metadata) { addMetadata := func() { s.assertRecordMetadata(c, metadata0) @@ -235,7 +282,7 @@ } func (s *cloudImageMetadataSuite) assertRecordMetadata(c *gc.C, m cloudimagemetadata.Metadata) { - err := s.storage.SaveMetadata(m) + err := s.storage.SaveMetadata([]cloudimagemetadata.Metadata{m}) c.Assert(err, jc.ErrorIsNil) } @@ -243,13 +290,184 @@ metadata, err := s.storage.FindMetadata(buildAttributesFilter(criteria)) c.Assert(err, jc.ErrorIsNil) - groups := make(map[cloudimagemetadata.SourceType][]cloudimagemetadata.Metadata) + groups := make(map[string][]cloudimagemetadata.Metadata) for _, one := range expected { groups[one.Source] = append(groups[one.Source], one) } c.Assert(metadata, jc.DeepEquals, groups) } +func (s *cloudImageMetadataSuite) TestSupportedArchitectures(c *gc.C) { + stream := "stream" + region := "region-test" + + arch1 := "arch" + attrs := cloudimagemetadata.MetadataAttributes{ + Stream: stream, + Region: region, + Version: "14.04", + Series: "trusty", + Arch: arch1, + VirtType: "virtType-test", + RootStorageType: "rootStorageType-test"} + + added := cloudimagemetadata.Metadata{attrs, 0, "1"} + s.assertRecordMetadata(c, added) + s.assertMetadataRecorded(c, attrs, added) + + addedNonUnique := cloudimagemetadata.Metadata{attrs, 0, "21"} + s.assertRecordMetadata(c, addedNonUnique) + s.assertMetadataRecorded(c, attrs, addedNonUnique) + + arch2 := "anotherArch" + attrs.Arch = arch2 + added2 := cloudimagemetadata.Metadata{attrs, 0, "21"} + s.assertRecordMetadata(c, added2) + s.assertMetadataRecorded(c, attrs, added2) + + expected := []string{arch1, arch2} + uniqueArches, err := s.storage.SupportedArchitectures( + cloudimagemetadata.MetadataFilter{Stream: stream, Region: region}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(uniqueArches, gc.DeepEquals, expected) +} + +func (s *cloudImageMetadataSuite) TestSupportedArchitecturesUnmatchedStreams(c *gc.C) { + stream := "stream" + region := "region-test" + + attrs := cloudimagemetadata.MetadataAttributes{ + Stream: "new-stream", + Region: region, + Version: "14.04", + Series: "trusty", + Arch: "arch", + VirtType: "virtType-test", + RootStorageType: "rootStorageType-test"} + + added := cloudimagemetadata.Metadata{attrs, 0, "1"} + s.assertRecordMetadata(c, added) + s.assertMetadataRecorded(c, attrs, added) + + uniqueArches, err := s.storage.SupportedArchitectures( + cloudimagemetadata.MetadataFilter{Stream: stream, Region: region}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(uniqueArches, gc.DeepEquals, []string{}) +} + +func (s *cloudImageMetadataSuite) TestSupportedArchitecturesUnmatchedRegions(c *gc.C) { + stream := "stream" + region := "region-test" + + attrs := cloudimagemetadata.MetadataAttributes{ + Stream: stream, + Region: "new-region", + Version: "14.04", + Series: "trusty", + Arch: "arch", + VirtType: "virtType-test", + RootStorageType: "rootStorageType-test"} + + added := cloudimagemetadata.Metadata{attrs, 0, "1"} + s.assertRecordMetadata(c, added) + s.assertMetadataRecorded(c, attrs, added) + + uniqueArches, err := s.storage.SupportedArchitectures( + cloudimagemetadata.MetadataFilter{Stream: stream, Region: region}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(uniqueArches, gc.DeepEquals, []string{}) +} + +func (s *cloudImageMetadataSuite) TestSupportedArchitecturesUnmatchedStreamsAndRegions(c *gc.C) { + stream := "stream" + region := "region-test" + + attrs := cloudimagemetadata.MetadataAttributes{ + Stream: "new-stream", + Region: "new-region", + Version: "14.04", + Series: "trusty", + Arch: "arch", + VirtType: "virtType-test", + RootStorageType: "rootStorageType-test"} + + added := cloudimagemetadata.Metadata{attrs, 0, "1"} + s.assertRecordMetadata(c, added) + s.assertMetadataRecorded(c, attrs, added) + + uniqueArches, err := s.storage.SupportedArchitectures( + cloudimagemetadata.MetadataFilter{Stream: stream, Region: region}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(uniqueArches, gc.DeepEquals, []string{}) +} + +func (s *cloudImageMetadataSuite) TestDeleteMetadata(c *gc.C) { + imageId := "ok-to-delete" + s.addTestImageMetadata(c, imageId) + s.assertDeleteMetadata(c, imageId) + s.assertNoMetadata(c) + + // calling delete on it again should be a no-op + s.assertDeleteMetadata(c, imageId) + // make sure log has "nothing to delete" message + c.Assert(c.GetTestLog(), jc.Contains, "no metadata for image ID ok-to-delete to delete") +} + +func (s *cloudImageMetadataSuite) TestDeleteDiffMetadataConcurrently(c *gc.C) { + imageId := "ok-to-delete" + s.addTestImageMetadata(c, imageId) + + diffImageId := "ok-to-delete-too" + s.addTestImageMetadata(c, diffImageId) + + s.assertConcurrentDelete(c, imageId, diffImageId) +} + +func (s *cloudImageMetadataSuite) TestDeleteSameMetadataConcurrently(c *gc.C) { + imageId := "ok-to-delete" + s.addTestImageMetadata(c, imageId) + + s.assertConcurrentDelete(c, imageId, imageId) +} + +func (s *cloudImageMetadataSuite) assertConcurrentDelete(c *gc.C, imageId0, imageId1 string) { + deleteMetadata := func() { + s.assertDeleteMetadata(c, imageId0) + } + defer txntesting.SetBeforeHooks(c, s.access.runner, deleteMetadata).Check() + s.assertDeleteMetadata(c, imageId1) + s.assertNoMetadata(c) +} + +func (s *cloudImageMetadataSuite) addTestImageMetadata(c *gc.C, imageId string) { + attrs := cloudimagemetadata.MetadataAttributes{ + Stream: "stream", + Region: "region-test", + Version: "14.04", + Series: "trusty", + Arch: "arch", + VirtType: "virtType-test", + RootStorageType: "rootStorageType-test"} + + added := cloudimagemetadata.Metadata{attrs, 0, imageId} + s.assertRecordMetadata(c, added) + s.assertMetadataRecorded(c, attrs, added) +} + +func (s *cloudImageMetadataSuite) assertDeleteMetadata(c *gc.C, imageId string) { + err := s.storage.DeleteMetadata(imageId) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *cloudImageMetadataSuite) assertNoMetadata(c *gc.C) { + // No metadata should be in store. + // So when looking for all and none is found, err. + found, err := s.storage.FindMetadata(cloudimagemetadata.MetadataFilter{}) + c.Assert(err, jc.Satisfies, errors.IsNotFound) + c.Assert(err, gc.ErrorMatches, "matching cloud image metadata not found") + c.Assert(found, gc.HasLen, 0) +} + type TestMongo struct { database *mgo.Database runner txn.Runner === modified file 'src/github.com/juju/juju/state/cloudimagemetadata/interface.go' --- src/github.com/juju/juju/state/cloudimagemetadata/interface.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/cloudimagemetadata/interface.go 2016-03-22 15:18:22 +0000 @@ -18,14 +18,17 @@ // Region is the name of cloud region associated with the image. Region string - // Series is Os version, for e.g. "quantal". + // Version is OS version, for e.g. "12.04". + Version string + + // Series is OS series, for e.g. "trusty". Series string // Arch is the architecture for this cloud image, for e.g. "amd64" Arch string - // VirtualType contains the type of the cloud image, for e.g. "pv", "hvm". "kvm". - VirtualType string + // VirtType contains virtualisation type of the cloud image, for e.g. "pv", "hvm". "kvm". + VirtType string // RootStorageType contains type of root storage, for e.g. "ebs", "instance". RootStorageType string @@ -34,13 +37,18 @@ RootStorageSize *uint64 // Source describes where this image is coming from: is it public? custom? - Source SourceType + Source string } // Metadata describes a cloud image metadata. type Metadata struct { MetadataAttributes + // Priority is an importance factor for image metadata. + // Higher number means higher priority. + // This will allow to sort metadata by importance. + Priority int + // ImageId contains image identifier. ImageId string } @@ -48,14 +56,21 @@ // Storage provides methods for storing and retrieving cloud image metadata. type Storage interface { // SaveMetadata adds cloud images metadata into state if it's new or - // updates metadata if it already exists, - SaveMetadata(Metadata) error + // updates metadata if it already exists. + SaveMetadata([]Metadata) error + + // DeleteMetadata deletes cloud image metadata from state. + DeleteMetadata(imageId string) error // FindMetadata returns all Metadata that match specified // criteria or a "not found" error if none match. // Empty criteria will return all cloud image metadata. // Returned result is grouped by source type and ordered by date created. - FindMetadata(criteria MetadataFilter) (map[SourceType][]Metadata, error) + FindMetadata(criteria MetadataFilter) (map[string][]Metadata, error) + + // SupportedArchitectures returns collection of unique architectures + // that stored metadata contains. + SupportedArchitectures(criteria MetadataFilter) ([]string, error) } // DataStore exposes data store operations for use by the cloud image metadata package. === modified file 'src/github.com/juju/juju/state/collection.go' --- src/github.com/juju/juju/state/collection.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/collection.go 2016-03-22 15:18:22 +0000 @@ -15,15 +15,15 @@ // database has previously been logged in to. It returns the // collection and a closer function for the session. // -// If the collection stores documents for multiple environments, the -// returned collection will automatically perform environment -// filtering where possible. See envStateCollection below. +// If the collection stores documents for multiple models, the +// returned collection will automatically perform model +// filtering where possible. See modelStateCollection below. func (st *State) getCollection(name string) (mongo.Collection, func()) { return st.database.GetCollection(name) } // getRawCollection returns the named mgo Collection. As no automatic -// environment filtering is performed by the returned collection it +// model filtering is performed by the returned collection it // should be rarely used. getCollection() should be used in almost all // cases. func (st *State) getRawCollection(name string) (*mgo.Collection, func()) { @@ -31,64 +31,64 @@ return collection.Writeable().Underlying(), closer } -// envStateCollection wraps a mongo.Collection, preserving the +// modelStateCollection wraps a mongo.Collection, preserving the // mongo.Collection interface and its Writeable behaviour. It will // automatically modify query selectors and documents so that queries -// and inserts only interact with data for a single environment (where +// and inserts only interact with data for a single model (where // possible). -type envStateCollection struct { +type modelStateCollection struct { mongo.WriteCollection - envUUID string + modelUUID string } // Writeable is part of the Collection interface. -func (c *envStateCollection) Writeable() mongo.WriteCollection { +func (c *modelStateCollection) Writeable() mongo.WriteCollection { // Note that we can't delegate this to the embedded WriteCollection: // that would return a writeable collection without any env-handling. return c } // Count returns the number of documents in the collection that belong -// to the environment that the envStateCollection is filtering on. -func (c *envStateCollection) Count() (int, error) { - return c.WriteCollection.Find(bson.D{{"env-uuid", c.envUUID}}).Count() +// to the model that the modelStateCollection is filtering on. +func (c *modelStateCollection) Count() (int, error) { + return c.WriteCollection.Find(bson.D{{"model-uuid", c.modelUUID}}).Count() } // Find performs a query on the collection. The query must be given as // either nil or a bson.D. // -// An "env-uuid" condition will always be added to the query to ensure -// that only data for the environment being filtered on is returned. +// An "model-uuid" condition will always be added to the query to ensure +// that only data for the model being filtered on is returned. // // If a simple "_id" field selector is included in the query -// (e.g. "{{"_id", "foo"}}" the relevant environment UUID prefix will +// (e.g. "{{"_id", "foo"}}" the relevant model UUID prefix will // be added on to the id. Note that more complex selectors using the // "_id" field (e.g. using the $in operator) will not be modified. In -// these cases it is up to the caller to add environment UUID +// these cases it is up to the caller to add model UUID // prefixes when necessary. -func (c *envStateCollection) Find(query interface{}) *mgo.Query { +func (c *modelStateCollection) Find(query interface{}) *mgo.Query { return c.WriteCollection.Find(c.mungeQuery(query)) } // FindId looks up a single document by _id. If the id is a string the -// relevant environment UUID prefix will be added to it. Otherwise, the +// relevant model UUID prefix will be added to it. Otherwise, the // query will be handled as per Find(). -func (c *envStateCollection) FindId(id interface{}) *mgo.Query { +func (c *modelStateCollection) FindId(id interface{}) *mgo.Query { if sid, ok := id.(string); ok { - return c.WriteCollection.FindId(ensureEnvUUID(c.envUUID, sid)) + return c.WriteCollection.FindId(ensureModelUUID(c.modelUUID, sid)) } return c.Find(bson.D{{"_id", id}}) } // Insert adds one or more documents to a collection. If the document -// id is a string the environment UUID prefix will be automatically -// added to it. The env-uuid field will also be automatically added if -// it is missing. An error will be returned if an env-uuid field is +// id is a string the model UUID prefix will be automatically +// added to it. The model-uuid field will also be automatically added if +// it is missing. An error will be returned if an model-uuid field is // provided but is the wrong value. -func (c *envStateCollection) Insert(docs ...interface{}) error { +func (c *modelStateCollection) Insert(docs ...interface{}) error { var mungedDocs []interface{} for _, doc := range docs { - mungedDoc, err := c.mungeInsert(doc) + mungedDoc, err := mungeDocForMultiEnv(doc, c.modelUUID, modelUUIDRequired) if err != nil { return errors.Trace(err) } @@ -100,152 +100,56 @@ // Update finds a single document matching the provided query document and // modifies it according to the update document. // -// An "env-uuid" condition will always be added to the query to ensure -// that only data for the environment being filtered on is returned. +// An "model-uuid" condition will always be added to the query to ensure +// that only data for the model being filtered on is returned. // // If a simple "_id" field selector is included in the query -// (e.g. "{{"_id", "foo"}}" the relevant environment UUID prefix will +// (e.g. "{{"_id", "foo"}}" the relevant model UUID prefix will // be added on to the id. Note that more complex selectors using the // "_id" field (e.g. using the $in operator) will not be modified. In -// these cases it is up to the caller to add environment UUID +// these cases it is up to the caller to add model UUID // prefixes when necessary. -func (c *envStateCollection) Update(query interface{}, update interface{}) error { +func (c *modelStateCollection) Update(query interface{}, update interface{}) error { return c.WriteCollection.Update(c.mungeQuery(query), update) } // UpdateId finds a single document by _id and modifies it according to the -// update document. The id must be a string or bson.ObjectId. The environment +// update document. The id must be a string or bson.ObjectId. The model // UUID will be automatically prefixed on to the id if it's a string and the // prefix isn't there already. -func (c *envStateCollection) UpdateId(id interface{}, update interface{}) error { +func (c *modelStateCollection) UpdateId(id interface{}, update interface{}) error { if sid, ok := id.(string); ok { - return c.WriteCollection.UpdateId(ensureEnvUUID(c.envUUID, sid), update) + return c.WriteCollection.UpdateId(ensureModelUUID(c.modelUUID, sid), update) } return c.WriteCollection.UpdateId(bson.D{{"_id", id}}, update) } // Remove deletes a single document using the query provided. The // query will be handled as per Find(). -func (c *envStateCollection) Remove(query interface{}) error { +func (c *modelStateCollection) Remove(query interface{}) error { return c.WriteCollection.Remove(c.mungeQuery(query)) } // RemoveId deletes a single document by id. If the id is a string the -// relevant environment UUID prefix will be added on to it. Otherwise, the +// relevant model UUID prefix will be added on to it. Otherwise, the // query will be handled as per Find(). -func (c *envStateCollection) RemoveId(id interface{}) error { +func (c *modelStateCollection) RemoveId(id interface{}) error { if sid, ok := id.(string); ok { - return c.WriteCollection.RemoveId(ensureEnvUUID(c.envUUID, sid)) + return c.WriteCollection.RemoveId(ensureModelUUID(c.modelUUID, sid)) } return c.Remove(bson.D{{"_id", id}}) } // RemoveAll deletes all documents that match a query. The query will // be handled as per Find(). -func (c *envStateCollection) RemoveAll(query interface{}) (*mgo.ChangeInfo, error) { +func (c *modelStateCollection) RemoveAll(query interface{}) (*mgo.ChangeInfo, error) { return c.WriteCollection.RemoveAll(c.mungeQuery(query)) } -func (c *envStateCollection) mungeInsert(inDoc interface{}) (bson.D, error) { - outDoc, err := toBsonD(inDoc) - if err != nil { - return nil, errors.Trace(err) - } - uuidSeen := false - for i, item := range outDoc { - switch item.Name { - case "_id": - docId, ok := item.Value.(string) - if ok { // tolerate non-string ids - outDoc[i].Value = ensureEnvUUID(c.envUUID, docId) - } - case "env-uuid": - docEnvUUID, ok := outDoc[i].Value.(string) - if !ok { - return nil, errors.Errorf("env-uuid is not a string: %v", outDoc[i].Value) - } - if docEnvUUID == "" { - outDoc[i].Value = c.envUUID - } else if docEnvUUID != c.envUUID { - return nil, errors.Errorf("insert env-uuid is not correct: %q != %q", docEnvUUID, c.envUUID) - } - uuidSeen = true - } - } - if !uuidSeen { - outDoc = append(outDoc, bson.DocElem{"env-uuid", c.envUUID}) - } - return outDoc, nil -} - -func (c *envStateCollection) mungeQuery(inq interface{}) bson.D { - outq := bson.D{{"env-uuid", c.envUUID}} - var add = func(name string, value interface{}) { - switch name { - case "_id": - if id, ok := value.(string); ok { - value = ensureEnvUUID(c.envUUID, id) - } else if subquery, ok := value.(bson.D); ok { - value = c.mungeIDSubQuery(subquery) - } - case "env-uuid": - panic("env-uuid is added automatically and should not be provided") - } - outq = append(outq, bson.DocElem{name, value}) - } - - updateQuery(inq, add) - return outq -} - -func (c *envStateCollection) mungeIDSubQuery(inq interface{}) bson.D { - var outq bson.D - var add = func(name string, value interface{}) { - switch name { - case "$in": - ids, ok := value.([]string) - if !ok { - panic("$in requires []string") - } - var fullIDs []string - for _, id := range ids { - fullID := ensureEnvUUID(c.envUUID, id) - fullIDs = append(fullIDs, fullID) - } - value = fullIDs - } - outq = append(outq, bson.DocElem{name, value}) - } - - updateQuery(inq, add) - return outq -} - -func updateQuery(inq interface{}, add func(name string, value interface{})) { - switch inq := inq.(type) { - case bson.D: - for _, elem := range inq { - add(elem.Name, elem.Value) - } - case bson.M: - for name, value := range inq { - add(name, value) - } - case nil: - default: - panic("query must be bson.D, bson.M, or nil") - } -} - -func toBsonD(doc interface{}) (bson.D, error) { - bytes, err := bson.Marshal(doc) - if err != nil { - return nil, errors.Annotate(err, "bson marshaling failed") - } - var out bson.D - err = bson.Unmarshal(bytes, &out) - if err != nil { - return nil, errors.Annotate(err, "bson unmarshaling failed") - } - return out, nil +func (c *modelStateCollection) mungeQuery(inq interface{}) bson.D { + outq, err := mungeDocForMultiEnv(inq, c.modelUUID, modelUUIDRequired|noModelUUIDInInput) + if err != nil { + panic(err) + } + return outq } === modified file 'src/github.com/juju/juju/state/collection_test.go' --- src/github.com/juju/juju/state/collection_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/collection_test.go 2016-03-22 15:18:22 +0000 @@ -28,7 +28,7 @@ } func (s *collectionSuite) TestGenericStateCollection(c *gc.C) { - // The users collection does not require filtering by env UUID. + // The users collection does not require filtering by model UUID. coll, closer := state.GetCollection(s.State, state.UsersC) defer closer() @@ -136,17 +136,17 @@ } } -func (s *collectionSuite) TestEnvStateCollection(c *gc.C) { - // The machines collection requires filtering by env UUID. Set up - // 2 environments with machines in each. +func (s *collectionSuite) TestModelStateCollection(c *gc.C) { + // The machines collection requires filtering by model UUID. Set up + // 2 models with machines in each. m0 := s.Factory.MakeMachine(c, nil) s.Factory.MakeMachine(c, nil) - st1 := s.Factory.MakeEnvironment(c, nil) + st1 := s.Factory.MakeModel(c, nil) defer st1.Close() f1 := factory.NewFactory(st1) otherM0 := f1.MakeMachine(c, &factory.MachineParams{Series: "trusty"}) - // Ensure that the first machine in each env have overlapping ids + // Ensure that the first machine in each model have overlapping ids // (otherwise tests may not fail when they should) c.Assert(m0.Id(), gc.Equals, otherM0.Id()) @@ -154,7 +154,7 @@ var doc bson.M coll, closer := state.GetRawCollection(st, state.NetworkInterfacesC) defer closer() - err := coll.Find(bson.D{{"env-uuid", st.EnvironUUID()}}).One(&doc) + err := coll.Find(bson.D{{"model-uuid", st.ModelUUID()}}).One(&doc) c.Assert(err, jc.ErrorIsNil) return doc["_id"].(bson.ObjectId) } @@ -172,7 +172,7 @@ // Grab the document id of the just added network interface for use in tests. ifaceId := getIfaceId(s.State) - // Add a network interface to the other environment to test collections that rely on the env-uuid field. + // Add a network interface to the other model to test collections that rely on the model-uuid field. _, err = st1.AddNetwork(state.NetworkInfo{"net2", "net2", "0.1.2.4/24", 0}) c.Assert(err, jc.ErrorIsNil) _, err = otherM0.AddNetworkInterface(state.NetworkInterfaceInfo{ @@ -182,7 +182,7 @@ }) c.Assert(err, jc.ErrorIsNil) - // Grab the document id of the network interface just added to the other environment for use in tests. + // Grab the document id of the network interface just added to the other model for use in tests. otherIfaceId := getIfaceId(st1) machines0, closer := state.GetCollection(s.State, state.MachinesC) @@ -200,28 +200,28 @@ for i, t := range []collectionTestCase{ { - label: "Count filters by env", + label: "Count filters by model", test: func() (int, error) { return machines0.Count() }, expectedCount: 2, }, { - label: "Find filters by env", + label: "Find filters by model", test: func() (int, error) { return machines0.Find(bson.D{{"series", m0.Series()}}).Count() }, expectedCount: 2, }, { - label: "Find adds env UUID when _id is provided", + label: "Find adds model UUID when _id is provided", test: func() (int, error) { return machines0.Find(bson.D{{"_id", m0.Id()}}).Count() }, expectedCount: 1, }, { - label: "Find tolerates env UUID prefix already being present", + label: "Find tolerates model UUID prefix already being present", test: func() (int, error) { return machines0.Find(bson.D{ {"_id", state.DocID(s.State, m0.Id())}, @@ -230,7 +230,7 @@ expectedCount: 1, }, { - label: "Find with no selector still filters by env", + label: "Find with no selector still filters by model", test: func() (int, error) { return machines0.Find(nil).Count() }, @@ -243,7 +243,7 @@ {"_id", bson.D{{"$regex", ":" + m0.Id() + "$"}}}, }).Count() }, - expectedCount: 1, // not 2 because env-uuid filter is still added + expectedCount: 1, // not 2 because model-uuid filter is still added }, { label: "Find works with collections with ObjectId ids", @@ -260,30 +260,29 @@ expectedCount: 1, }, { - label: "Find panics if env-uuid is included", - test: func() (int, error) { - machines0.Find(bson.D{{"env-uuid", "whatever"}}) - return 0, nil - }, - expectedPanic: "env-uuid is added automatically and should not be provided", - }, - { - label: "Find panics if query type is unsupported", - test: func() (int, error) { - machines0.Find(map[string]string{"foo": "bar"}) - return 0, nil - }, - expectedPanic: "query must be bson.D, bson.M, or nil", - }, - { - label: "FindId adds env UUID prefix", + label: "Find works with maps", + test: func() (int, error) { + return machines0.Find(map[string]string{"_id": m0.Id()}).Count() + }, + expectedCount: 1, + }, + { + label: "Find panics if model-uuid is included", + test: func() (int, error) { + machines0.Find(bson.D{{"model-uuid", "whatever"}}) + return 0, nil + }, + expectedPanic: "model-uuid is added automatically and should not be provided", + }, + { + label: "FindId adds model UUID prefix", test: func() (int, error) { return machines0.FindId(m0.Id()).Count() }, expectedCount: 1, }, { - label: "FindId tolerates env UUID prefix already being there", + label: "FindId tolerates model UUID prefix already being there", test: func() (int, error) { return machines0.FindId(state.DocID(s.State, m0.Id())).Count() }, @@ -297,16 +296,16 @@ expectedCount: 1, }, { - label: "FindId adds env-uuid field", + label: "FindId adds model-uuid field", test: func() (int, error) { return networkInterfaces.FindId(otherIfaceId).Count() }, // expect to find no networks, as we are searching with the id of - // the network in the other environment. + // the network in the other model. expectedCount: 0, }, { - label: "Insert adds env-uuid", + label: "Insert adds model-uuid", test: func() (int, error) { err := machines0.Writeable().Insert(bson.D{ {"_id", state.DocID(s.State, "99")}, @@ -318,12 +317,12 @@ expectedCount: 3, }, { - label: "Insert populates env-uuid if blank", + label: "Insert populates model-uuid if blank", test: func() (int, error) { err := machines0.Writeable().Insert(bson.D{ {"_id", state.DocID(s.State, "99")}, {"machineid", 99}, - {"env-uuid", ""}, + {"model-uuid", ""}, }) c.Assert(err, jc.ErrorIsNil) return machines0.Count() @@ -343,12 +342,12 @@ expectedCount: 1, }, { - label: "Insert tolerates prefixed _id and correct env-uuid if provided", + label: "Insert tolerates prefixed _id and correct model-uuid if provided", test: func() (int, error) { err := machines0.Writeable().Insert(bson.D{ {"_id", state.DocID(s.State, "99")}, {"machineid", 99}, - {"env-uuid", s.State.EnvironUUID()}, + {"model-uuid", s.State.ModelUUID()}, }) c.Assert(err, jc.ErrorIsNil) return machines0.Count() @@ -356,32 +355,32 @@ expectedCount: 3, }, { - label: "Insert fails if env-uuid doesn't match", + label: "Insert fails if model-uuid doesn't match", test: func() (int, error) { err := machines0.Writeable().Insert(bson.D{ {"_id", "99"}, {"machineid", 99}, - {"env-uuid", "something-else"}, + {"model-uuid", "something-else"}, }) return 0, err }, - expectedError: "insert env-uuid is not correct: .+", + expectedError: "bad \"model-uuid\" value: .+", }, { - label: "Remove adds env UUID prefix to _id", + label: "Remove adds model UUID prefix to _id", test: func() (int, error) { err := machines0.Writeable().Remove(bson.D{{"_id", "0"}}) c.Assert(err, jc.ErrorIsNil) return s.machines.Count() }, - expectedCount: 2, // Expect machine-1 in first env and machine-0 in second env + expectedCount: 2, // Expect machine-1 in first model and machine-0 in second model }, { - label: "Remove filters by env", + label: "Remove filters by model", test: func() (int, error) { // Attempt to remove the trusty machine in the second - // env with the collection that's filtering for the - // first env - nothing should get removed. + // model with the collection that's filtering for the + // first model - nothing should get removed. err := machines0.Writeable().Remove(bson.D{{"series", "trusty"}}) c.Assert(err, gc.ErrorMatches, "not found") return s.machines.Count() @@ -389,34 +388,34 @@ expectedCount: 3, // Expect all machines to still be there. }, { - label: "Remove filters by env 2", + label: "Remove filters by model 2", test: func() (int, error) { err := machines0.Writeable().Remove(bson.D{{"machineid", "0"}}) c.Assert(err, jc.ErrorIsNil) return s.machines.Count() }, - expectedCount: 2, // Expect machine 1 in first env and machine-0 in second env + expectedCount: 2, // Expect machine 1 in first model and machine-0 in second model }, { - label: "RemoveId adds env UUID prefix", + label: "RemoveId adds model UUID prefix", test: func() (int, error) { err := machines0.Writeable().RemoveId(m0.Id()) c.Assert(err, jc.ErrorIsNil) return s.machines.Count() }, - expectedCount: 2, // Expect machine-1 in first env and machine-0 in second env + expectedCount: 2, // Expect machine-1 in first model and machine-0 in second model }, { - label: "RemoveId tolerates env UUID prefix already being there", + label: "RemoveId tolerates model UUID prefix already being there", test: func() (int, error) { err := machines0.Writeable().RemoveId(state.DocID(s.State, m0.Id())) c.Assert(err, jc.ErrorIsNil) return s.machines.Count() }, - expectedCount: 2, // Expect machine-1 in first env and machine-0 in second env + expectedCount: 2, // Expect machine-1 in first model and machine-0 in second model }, { - label: "RemoveId filters by env-uuid field", + label: "RemoveId filters by model-uuid field", test: func() (int, error) { err := networkInterfaces.Writeable().RemoveId(otherIfaceId) c.Assert(err, gc.ErrorMatches, "not found") @@ -425,25 +424,25 @@ expectedCount: 1, // ensure doc was not removed }, { - label: "RemoveAll filters by env", + label: "RemoveAll filters by model", test: func() (int, error) { _, err := machines0.Writeable().RemoveAll(bson.D{{"series", m0.Series()}}) c.Assert(err, jc.ErrorIsNil) return s.machines.Count() }, - expectedCount: 1, // Expect machine-1 in second env + expectedCount: 1, // Expect machine-1 in second model }, { - label: "RemoveAll adds env UUID when _id is provided", + label: "RemoveAll adds model UUID when _id is provided", test: func() (int, error) { _, err := machines0.Writeable().RemoveAll(bson.D{{"_id", m0.Id()}}) c.Assert(err, jc.ErrorIsNil) return s.machines.Count() }, - expectedCount: 2, // Expect machine-1 in first env and machine-0 in second env + expectedCount: 2, // Expect machine-1 in first model and machine-0 in second model }, { - label: "RemoveAll tolerates env UUID prefix already being present", + label: "RemoveAll tolerates model UUID prefix already being present", test: func() (int, error) { _, err := machines0.Writeable().RemoveAll(bson.D{ {"_id", state.DocID(s.State, m0.Id())}, @@ -451,32 +450,24 @@ c.Assert(err, jc.ErrorIsNil) return s.machines.Count() }, - expectedCount: 2, // Expect machine-1 in first env and machine-0 in second env + expectedCount: 2, // Expect machine-1 in first model and machine-0 in second model }, { - label: "RemoveAll with no selector still filters by env", + label: "RemoveAll with no selector still filters by model", test: func() (int, error) { _, err := machines0.Writeable().RemoveAll(nil) c.Assert(err, jc.ErrorIsNil) return s.machines.Count() }, - expectedCount: 1, // Expect machine-0 in second env - }, - { - label: "RemoveAll panics if env-uuid is included", - test: func() (int, error) { - machines0.Writeable().RemoveAll(bson.D{{"env-uuid", "whatever"}}) - return 0, nil - }, - expectedPanic: "env-uuid is added automatically and should not be provided", - }, - { - label: "RemoveAll panics if query type is unsupported", - test: func() (int, error) { - machines0.Writeable().RemoveAll(map[string]string{"foo": "bar"}) - return 0, nil - }, - expectedPanic: "query must be bson.D, bson.M, or nil", + expectedCount: 1, // Expect machine-0 in second model + }, + { + label: "RemoveAll panics if model-uuid is included", + test: func() (int, error) { + machines0.Writeable().RemoveAll(bson.D{{"model-uuid", "whatever"}}) + return 0, nil + }, + expectedPanic: "model-uuid is added automatically and should not be provided", }, { label: "Update", @@ -515,7 +506,7 @@ c.Check(func() { t.test() }, gc.PanicMatches, t.expectedPanic) } - // Check that other environment is untouched after each test + // Check that other model is untouched after each test count, err := machines1.Count() c.Assert(err, jc.ErrorIsNil) c.Check(count, gc.Equals, 1) === modified file 'src/github.com/juju/juju/state/compat_test.go' --- src/github.com/juju/juju/state/compat_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/compat_test.go 2016-03-22 15:18:22 +0000 @@ -17,7 +17,7 @@ // schema changes. type compatSuite struct { internalStateSuite - env *Environment + env *Model } var _ = gc.Suite(&compatSuite{}) @@ -25,16 +25,16 @@ func (s *compatSuite) SetUpTest(c *gc.C) { s.internalStateSuite.SetUpTest(c) - env, err := s.state.Environment() + env, err := s.state.Model() c.Assert(err, jc.ErrorIsNil) s.env = env } -func (s *compatSuite) TestEnvironAssertAlive(c *gc.C) { - // 1.17+ has a "Life" field in environment documents. +func (s *compatSuite) TestModelAssertAlive(c *gc.C) { + // 1.17+ has a "Life" field in model documents. // We remove it here, to test 1.16 compatibility. ops := []txn.Op{{ - C: environmentsC, + C: modelsC, Id: s.env.doc.UUID, Update: bson.D{{"$unset", bson.D{{"life", nil}}}}, }} @@ -42,7 +42,7 @@ c.Assert(err, jc.ErrorIsNil) // Now check the assertAliveOp and Destroy work as if - // the environment is Alive. + // the model is Alive. err = s.state.runTransaction([]txn.Op{s.env.assertAliveOp()}) c.Assert(err, jc.ErrorIsNil) err = s.env.Destroy() @@ -52,7 +52,7 @@ func (s *compatSuite) TestGetServiceWithoutNetworksIsOK(c *gc.C) { charm := addCharm(c, s.state, "quantal", testcharms.Repo.CharmDir("mysql")) owner := s.env.Owner() - service, err := s.state.AddService("mysql", owner.String(), charm, nil, nil, nil) + service, err := s.state.AddService(AddServiceArgs{Name: "mysql", Owner: owner.String(), Charm: charm}) c.Assert(err, jc.ErrorIsNil) // In 1.17.7+ all services have associated document in the // requested networks collection. We remove it here to test === modified file 'src/github.com/juju/juju/state/configvalidator_test.go' --- src/github.com/juju/juju/state/configvalidator_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/state/configvalidator_test.go 2016-03-22 15:18:22 +0000 @@ -27,7 +27,7 @@ validateValid *config.Config } -// To test UpdateEnvironConfig updates state, Validate returns a config +// To test UpdateModelConfig updates state, Validate returns a config // different to both input configs func mockValidCfg() (valid *config.Config, err error) { cfg, err := config.New(config.UseDefaults, coretesting.FakeConfig()) @@ -58,33 +58,33 @@ } } -func (s *ConfigValidatorSuite) updateEnvironConfig(c *gc.C) error { +func (s *ConfigValidatorSuite) updateModelConfig(c *gc.C) error { updateAttrs := map[string]interface{}{ "authorized-keys": "different-keys", "arbitrary-key": "shazam!", } - return s.State.UpdateEnvironConfig(updateAttrs, nil, nil) + return s.State.UpdateModelConfig(updateAttrs, nil, nil) } func (s *ConfigValidatorSuite) TestConfigValidate(c *gc.C) { - err := s.updateEnvironConfig(c) + err := s.updateModelConfig(c) c.Assert(err, jc.ErrorIsNil) } -func (s *ConfigValidatorSuite) TestUpdateEnvironConfigFailsOnConfigValidateError(c *gc.C) { +func (s *ConfigValidatorSuite) TestUpdateModelConfigFailsOnConfigValidateError(c *gc.C) { var configValidatorErr error s.policy.GetConfigValidator = func(string) (state.ConfigValidator, error) { configValidatorErr = errors.NotFoundf("") return &s.configValidator, configValidatorErr } - err := s.updateEnvironConfig(c) + err := s.updateModelConfig(c) c.Assert(err, gc.ErrorMatches, " not found") } -func (s *ConfigValidatorSuite) TestUpdateEnvironConfigUpdatesState(c *gc.C) { - s.updateEnvironConfig(c) - stateCfg, err := s.State.EnvironConfig() +func (s *ConfigValidatorSuite) TestUpdateModelConfigUpdatesState(c *gc.C) { + s.updateModelConfig(c) + stateCfg, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) newValidCfg, err := mockValidCfg() c.Assert(err, jc.ErrorIsNil) @@ -97,10 +97,10 @@ return nil, configValidatorErr } - err := s.updateEnvironConfig(c) + err := s.updateModelConfig(c) c.Assert(err, gc.ErrorMatches, "policy returned nil configValidator without an error") configValidatorErr = errors.NotImplementedf("Validator") - err = s.updateEnvironConfig(c) + err = s.updateModelConfig(c) c.Assert(err, jc.ErrorIsNil) } @@ -111,6 +111,6 @@ } state.SetPolicy(s.State, nil) - err := s.updateEnvironConfig(c) + err := s.updateModelConfig(c) c.Assert(err, jc.ErrorIsNil) } === modified file 'src/github.com/juju/juju/state/conn_test.go' --- src/github.com/juju/juju/state/conn_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/conn_test.go 2016-03-22 15:18:22 +0000 @@ -4,9 +4,9 @@ package state_test import ( - stdtesting "testing" - "github.com/juju/names" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" gc "gopkg.in/check.v1" "gopkg.in/mgo.v2" @@ -15,11 +15,6 @@ "github.com/juju/juju/testing" ) -// TestPackage integrates the tests into gotest. -func TestPackage(t *stdtesting.T) { - testing.MgoTestPackage(t) -} - // ConnSuite provides the infrastructure for all other // test suites (StateSuite, CharmSuite, MachineSuite, etc). type ConnSuite struct { @@ -31,9 +26,9 @@ relations *mgo.Collection services *mgo.Collection units *mgo.Collection - stateServers *mgo.Collection + controllers *mgo.Collection policy statetesting.MockPolicy - envTag names.EnvironTag + modelTag names.ModelTag } func (cs *ConnSuite) SetUpTest(c *gc.C) { @@ -44,7 +39,7 @@ cs.StateSuite.SetUpTest(c) - cs.envTag = cs.State.EnvironTag() + cs.modelTag = cs.State.ModelTag() jujuDB := cs.MgoSuite.Session.DB("juju") cs.annotations = jujuDB.C("annotations") @@ -54,7 +49,7 @@ cs.relations = jujuDB.C("relations") cs.services = jujuDB.C("services") cs.units = jujuDB.C("units") - cs.stateServers = jujuDB.C("stateServers") + cs.controllers = jujuDB.C("controllers") c.Log("SetUpTest done") } @@ -75,6 +70,10 @@ return state.AddTestingServiceWithStorage(c, s.State, name, ch, s.Owner, storage) } +func (s *ConnSuite) AddTestingServiceWithBindings(c *gc.C, name string, ch *state.Charm, bindings map[string]string) *state.Service { + return state.AddTestingServiceWithBindings(c, s.State, name, ch, s.Owner, bindings) +} + func (s *ConnSuite) AddSeriesCharm(c *gc.C, name, series string) *state.Charm { return state.AddCustomCharm(c, s.State, name, "", "", series, -1) } @@ -103,3 +102,18 @@ func (s *ConnSuite) AddMetricsCharm(c *gc.C, name, metricsYaml string, revsion int) *state.Charm { return state.AddCustomCharm(c, s.State, name, "metrics.yaml", metricsYaml, "quantal", revsion) } + +// NewStateForModelNamed returns an new model with the given modelName, which +// has a unique UUID, and does not need to be closed when the test completes. +func (s *ConnSuite) NewStateForModelNamed(c *gc.C, modelName string) *state.State { + cfg := testing.CustomModelConfig(c, testing.Attrs{ + "name": modelName, + "uuid": utils.MustNewUUID().String(), + }) + otherOwner := names.NewLocalUserTag("test-admin") + _, otherState, err := s.State.NewModel(cfg, otherOwner) + + c.Assert(err, jc.ErrorIsNil) + s.AddCleanup(func(*gc.C) { otherState.Close() }) + return otherState +} === modified file 'src/github.com/juju/juju/state/constraints.go' --- src/github.com/juju/juju/state/constraints.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/constraints.go 2016-03-22 15:18:22 +0000 @@ -17,7 +17,7 @@ // constraintsDoc is the mongodb representation of a constraints.Value. type constraintsDoc struct { - EnvUUID string `bson:"env-uuid"` + ModelUUID string `bson:"model-uuid"` Arch *string CpuCores *uint64 CpuPower *uint64 @@ -49,7 +49,7 @@ func newConstraintsDoc(st *State, cons constraints.Value) constraintsDoc { return constraintsDoc{ - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Arch: cons.Arch, CpuCores: cons.CpuCores, CpuPower: cons.CpuPower, === modified file 'src/github.com/juju/juju/state/constraintsvalidation_test.go' --- src/github.com/juju/juju/state/constraintsvalidation_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/constraintsvalidation_test.go 2016-03-22 15:18:22 +0000 @@ -20,7 +20,7 @@ func (s *constraintsValidationSuite) SetUpTest(c *gc.C) { s.ConnSuite.SetUpTest(c) - s.policy.GetConstraintsValidator = func(*config.Config) (constraints.Validator, error) { + s.policy.GetConstraintsValidator = func(*config.Config, state.SupportedArchitecturesQuerier) (constraints.Validator, error) { validator := constraints.NewValidator() validator.RegisterConflicts( []string{constraints.InstanceType}, @@ -44,7 +44,7 @@ consToSet string consFallback string - effectiveEnvironCons string // environment constraints after setting consFallback + effectiveModelCons string // model constraints after setting consFallback effectiveServiceCons string // service constraints after setting consToSet effectiveUnitCons string // unit constraints after setting consToSet on the service effectiveMachineCons string // machine constraints after setting consToSet @@ -53,7 +53,7 @@ consToSet: "", consFallback: "", - effectiveEnvironCons: "", + effectiveModelCons: "", effectiveServiceCons: "", effectiveUnitCons: "", effectiveMachineCons: "", @@ -62,7 +62,7 @@ consToSet: "instance-type=foo-42 cpu-power=9001 spaces=bar networks=net1,^net2", consFallback: "", - effectiveEnvironCons: "", // environment constraints are stored as empty. + effectiveModelCons: "", // model constraints are stored as empty. // i.e. there are no fallbacks and all the following cases are the same. effectiveServiceCons: "instance-type=foo-42 cpu-power=9001 spaces=bar networks=net1,^net2", effectiveUnitCons: "instance-type=foo-42 cpu-power=9001 spaces=bar networks=net1,^net2", @@ -72,7 +72,7 @@ consToSet: "", consFallback: "arch=amd64 cpu-cores=42 mem=2G tags=foo networks=net1,^net2", - effectiveEnvironCons: "arch=amd64 cpu-cores=42 mem=2G tags=foo networks=net1,^net2", + effectiveModelCons: "arch=amd64 cpu-cores=42 mem=2G tags=foo networks=net1,^net2", effectiveServiceCons: "", // set as given. effectiveUnitCons: "arch=amd64 cpu-cores=42 mem=2G tags=foo networks=net1,^net2", // set as given, then merged with fallbacks; since consToSet is @@ -85,7 +85,7 @@ consToSet: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces= networks=", consFallback: "", - effectiveEnvironCons: "", + effectiveModelCons: "", effectiveServiceCons: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces= networks=", effectiveUnitCons: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces= networks=", effectiveMachineCons: "cpu-cores= cpu-power= instance-type= root-disk= tags= spaces= networks=", // container= is dropped @@ -94,7 +94,7 @@ consToSet: "", consFallback: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces= networks=", - effectiveEnvironCons: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces= networks=", + effectiveModelCons: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces= networks=", effectiveServiceCons: "", effectiveUnitCons: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces= networks=", effectiveMachineCons: "cpu-cores= cpu-power= instance-type= root-disk= tags= spaces= networks=", // container= is dropped @@ -102,7 +102,7 @@ about: "(explicitly) empty constraints and fallbacks are OK and stored as given", consToSet: "arch= mem= cpu-cores= container=", consFallback: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces= networks=", - effectiveEnvironCons: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces= networks=", + effectiveModelCons: "cpu-cores= cpu-power= root-disk= instance-type= container= tags= spaces= networks=", effectiveServiceCons: "arch= mem= cpu-cores= container=", effectiveUnitCons: "arch= container= cpu-cores= cpu-power= mem= root-disk= tags= spaces= networks=", effectiveMachineCons: "arch= cpu-cores= cpu-power= mem= root-disk= tags= spaces= networks=", // container= is dropped @@ -111,7 +111,7 @@ consToSet: "cpu-cores= arch= spaces= networks= cpu-power=", consFallback: "cpu-cores=42 arch=amd64 tags=foo spaces=default,^dmz mem=4G", - effectiveEnvironCons: "cpu-cores=42 arch=amd64 tags=foo spaces=default,^dmz mem=4G", + effectiveModelCons: "cpu-cores=42 arch=amd64 tags=foo spaces=default,^dmz mem=4G", effectiveServiceCons: "cpu-cores= arch= spaces= networks= cpu-power=", effectiveUnitCons: "arch= cpu-cores= cpu-power= mem=4G tags=foo spaces= networks=", effectiveMachineCons: "arch= cpu-cores= cpu-power= mem=4G tags=foo spaces= networks=", @@ -124,7 +124,7 @@ consToSet: "cpu-cores=42 root-disk=20G arch=amd64 tags=foo,bar networks=^dmz,db", consFallback: "cpu-cores= arch= tags=", - effectiveEnvironCons: "cpu-cores= arch= tags=", + effectiveModelCons: "cpu-cores= arch= tags=", effectiveServiceCons: "cpu-cores=42 root-disk=20G arch=amd64 tags=foo,bar networks=^dmz,db", effectiveUnitCons: "cpu-cores=42 root-disk=20G arch=amd64 tags=foo,bar networks=^dmz,db", effectiveMachineCons: "cpu-cores=42 root-disk=20G arch=amd64 tags=foo,bar networks=^dmz,db", @@ -133,7 +133,7 @@ consToSet: "cpu-cores=42 root-disk=20G arch=amd64 tags=foo,bar networks=^dmz,db", consFallback: "cpu-cores=12 root-disk=10G arch=i386 tags=bar networks=net1,^net2", - effectiveEnvironCons: "cpu-cores=12 root-disk=10G arch=i386 tags=bar networks=net1,^net2", + effectiveModelCons: "cpu-cores=12 root-disk=10G arch=i386 tags=bar networks=net1,^net2", effectiveServiceCons: "cpu-cores=42 root-disk=20G arch=amd64 tags=foo,bar networks=^dmz,db", effectiveUnitCons: "cpu-cores=42 root-disk=20G arch=amd64 tags=foo,bar networks=^dmz,db", effectiveMachineCons: "cpu-cores=42 root-disk=20G arch=amd64 tags=foo,bar networks=^dmz,db", @@ -142,7 +142,7 @@ consToSet: "mem=8G arch=amd64 cpu-cores=4 tags=bar", consFallback: "instance-type=small cpu-power=1000", // instance-type conflicts mem, arch - effectiveEnvironCons: "instance-type=small cpu-power=1000", + effectiveModelCons: "instance-type=small cpu-power=1000", effectiveServiceCons: "mem=8G arch=amd64 cpu-cores=4 tags=bar", // both of the following contain the explicitly set constraints after // resolving any conflicts with fallbacks (by dropping them). @@ -156,7 +156,7 @@ // a variation of the above case showing there's no difference // between deployment (service, unit) and provisioning (machine) // constraints when it comes to effective values. - effectiveEnvironCons: "networks=net1,^net3 tags=foo cpu-power=42", + effectiveModelCons: "networks=net1,^net3 tags=foo cpu-power=42", effectiveServiceCons: "cpu-power= tags= spaces=bar networks=", effectiveUnitCons: "cpu-power= tags= spaces=bar networks=", effectiveMachineCons: "cpu-power= tags= spaces=bar networks=", @@ -168,9 +168,9 @@ // service deployment constraints are transformed into machine // provisioning constraints, and the container type only makes // sense currently as a deployment constraint, so it's cleared - // when merging service/environment deployment constraints into + // when merging service/model deployment constraints into // effective machine provisioning constraints. - effectiveEnvironCons: "container=lxc mem=8G", + effectiveModelCons: "container=lxc mem=8G", effectiveServiceCons: "container=kvm arch=amd64", effectiveUnitCons: "container=kvm mem=8G arch=amd64", effectiveMachineCons: "mem=8G arch=amd64", @@ -182,11 +182,11 @@ "test %d: %s\nconsToSet: %q\nconsFallback: %q\n", i, t.about, t.consToSet, t.consFallback, ) - // Set fallbacks as environment constraints and verify them. - err := s.State.SetEnvironConstraints(constraints.MustParse(t.consFallback)) + // Set fallbacks as model constraints and verify them. + err := s.State.SetModelConstraints(constraints.MustParse(t.consFallback)) c.Check(err, jc.ErrorIsNil) - econs, err := s.State.EnvironConstraints() - c.Check(econs, jc.DeepEquals, constraints.MustParse(t.effectiveEnvironCons)) + econs, err := s.State.ModelConstraints() + c.Check(econs, jc.DeepEquals, constraints.MustParse(t.effectiveModelCons)) // Set the machine provisioning constraints. m, err := s.addOneMachine(c, constraints.MustParse(t.consToSet)) c.Check(err, jc.ErrorIsNil) @@ -211,11 +211,11 @@ "test %d: %s\nconsToSet: %q\nconsFallback: %q\n", i, t.about, t.consToSet, t.consFallback, ) - // Set fallbacks as environment constraints and verify them. - err := s.State.SetEnvironConstraints(constraints.MustParse(t.consFallback)) + // Set fallbacks as model constraints and verify them. + err := s.State.SetModelConstraints(constraints.MustParse(t.consFallback)) c.Check(err, jc.ErrorIsNil) - econs, err := s.State.EnvironConstraints() - c.Check(econs, jc.DeepEquals, constraints.MustParse(t.effectiveEnvironCons)) + econs, err := s.State.ModelConstraints() + c.Check(econs, jc.DeepEquals, constraints.MustParse(t.effectiveModelCons)) // Set the service deployment constraints. err = service.SetConstraints(constraints.MustParse(t.consToSet)) c.Check(err, jc.ErrorIsNil) === modified file 'src/github.com/juju/juju/state/container.go' --- src/github.com/juju/juju/state/container.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/state/container.go 2016-03-22 15:18:22 +0000 @@ -15,10 +15,10 @@ // machineContainers holds the machine ids of all the containers belonging to a parent machine. // All machines have an associated container ref doc, regardless of whether they host any containers. type machineContainers struct { - DocID string `bson:"_id"` - Id string `bson:"machineid"` - EnvUUID string `bson:"env-uuid"` - Children []string `bson:",omitempty"` + DocID string `bson:"_id"` + Id string `bson:"machineid"` + ModelUUID string `bson:"model-uuid"` + Children []string `bson:",omitempty"` } func (st *State) addChildToContainerRefOp(parentId string, childId string) txn.Op { === modified file 'src/github.com/juju/juju/state/database.go' --- src/github.com/juju/juju/state/database.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/database.go 2016-03-22 15:18:22 +0000 @@ -34,9 +34,9 @@ // might or might not have its own session, depending on the Database; the // closer must always be called regardless. // - // If the schema specifies environment-filtering for the named collection, + // If the schema specifies model-filtering for the named collection, // the returned collection will automatically filter queries; for details, - // see envStateCollection. + // see modelStateCollection. GetCollection(name string) (mongo.Collection, SessionCloser) // TransactionRunner() returns a runner responsible for making changes to @@ -47,7 +47,7 @@ // It will reject transactions that reference raw-access (or unknown) // collections; it will automatically rewrite operations that reference // non-global collections; and it will ensure that non-global documents can - // only be inserted while the corresponding environment is still Alive. + // only be inserted while the corresponding model is still Alive. TransactionRunner() (jujutxn.Runner, SessionCloser) // Schema returns the schema used to load the database. The returned schema @@ -65,9 +65,9 @@ // indexes listed here will be EnsureIndex~ed before state is opened. indexes []mgo.Index - // global collections will not have environment filtering applied. Non- + // global collections will not have model filtering applied. Non- // global collections will have both transactions and reads filtered by - // relevant environment uuid. + // relevant model uuid. global bool // rawAccess collections can be safely accessed as a mongo.WriteCollection. @@ -95,10 +95,10 @@ // Load causes all recorded collections to be created and indexed as specified; // the returned Database will filter queries and transactions according to the -// suppplied environment UUID. -func (schema collectionSchema) Load(db *mgo.Database, environUUID string) (Database, error) { - if !names.IsValidEnvironment(environUUID) { - return nil, errors.New("invalid environment UUID") +// suppplied model UUID. +func (schema collectionSchema) Load(db *mgo.Database, modelUUID string) (Database, error) { + if !names.IsValidModel(modelUUID) { + return nil, errors.New("invalid model UUID") } for name, info := range schema { rawCollection := db.C(name) @@ -115,9 +115,9 @@ } } return &database{ - raw: db, - schema: schema, - environUUID: environUUID, + raw: db, + schema: schema, + modelUUID: modelUUID, }, nil } @@ -141,9 +141,9 @@ // schema specifies how the various collections must be handled. schema collectionSchema - // environUUID is used to automatically filter queries and operations on + // modelUUID is used to automatically filter queries and operations on // certain collections (as defined in .schema). - environUUID string + modelUUID string // runner exists for testing purposes; if non-nil, the result of // TransactionRunner will always ultimately use this value to run @@ -159,11 +159,11 @@ func (db *database) CopySession() (Database, SessionCloser) { session := db.raw.Session.Copy() return &database{ - raw: db.raw.With(session), - schema: db.schema, - environUUID: db.environUUID, - runner: db.runner, - ownSession: true, + raw: db.raw.With(session), + schema: db.schema, + modelUUID: db.modelUUID, + runner: db.runner, + ownSession: true, }, session.Close } @@ -182,11 +182,11 @@ collection, closer = mongo.CollectionFromName(db.raw, name) } - // Apply environment filtering. + // Apply model filtering. if !info.global { - collection = &envStateCollection{ + collection = &modelStateCollection{ WriteCollection: collection.Writeable(), - envUUID: db.environUUID, + modelUUID: db.modelUUID, } } @@ -213,9 +213,9 @@ params := jujutxn.RunnerParams{Database: raw} runner = jujutxn.NewRunner(params) } - return &multiEnvRunner{ + return &multiModelRunner{ rawRunner: runner, - envUUID: db.environUUID, + modelUUID: db.modelUUID, schema: db.schema, }, closer } === modified file 'src/github.com/juju/juju/state/distribution.go' --- src/github.com/juju/juju/state/distribution.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/distribution.go 2016-03-22 15:18:22 +0000 @@ -22,7 +22,7 @@ if u.st.policy == nil { return candidates, nil } - cfg, err := u.st.EnvironConfig() + cfg, err := u.st.ModelConfig() if err != nil { return nil, err } === modified file 'src/github.com/juju/juju/state/endpoint.go' --- src/github.com/juju/juju/state/endpoint.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/endpoint.go 2016-03-22 15:18:22 +0000 @@ -6,7 +6,7 @@ import ( "fmt" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" ) // counterpartRole returns the RelationRole that this RelationRole === added file 'src/github.com/juju/juju/state/endpoint_bindings.go' --- src/github.com/juju/juju/state/endpoint_bindings.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/state/endpoint_bindings.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,318 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state + +import ( + "github.com/juju/errors" + jujutxn "github.com/juju/txn" + "github.com/juju/utils/set" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/txn" +) + +// endpointBindingsDoc represents how a service endpoints are bound to spaces. +// The DocID field contains the service's global key, so there is always one +// endpointBindingsDoc per service. +type endpointBindingsDoc struct { + // DocID is always the same as a service's global key. + DocID string `bson:"_id"` + EnvUUID string `bson:"env-uuid"` + + // Bindings maps a service endpoint name to the space name it is bound to. + Bindings bindingsMap `bson:"bindings"` + + // TxnRevno is used to assert the collection have not changed since this + // document was fetched. + TxnRevno int64 `bson:"txn-revno"` +} + +// bindingsMap is the underlying type stored in mongo for bindings. +type bindingsMap map[string]string + +// SetBSON ensures any special characters ($ or .) are unescaped in keys after +// unmarshalling the raw BSON coming from the stored document. +func (bp *bindingsMap) SetBSON(raw bson.Raw) error { + rawMap := make(map[string]string) + if err := raw.Unmarshal(rawMap); err != nil { + return err + } + for key, value := range rawMap { + newKey := unescapeReplacer.Replace(key) + if newKey != key { + delete(rawMap, key) + } + rawMap[newKey] = value + } + *bp = bindingsMap(rawMap) + return nil +} + +// GetBSON ensures any special characters ($ or .) are escaped in keys before +// marshalling the map into BSON and storing in mongo. +func (b bindingsMap) GetBSON() (interface{}, error) { + if b == nil || len(b) == 0 { + // We need to return a non-nil map otherwise bson.Unmarshal + // call will fail when reading the doc back. + return make(map[string]string), nil + } + rawMap := make(map[string]string, len(b)) + for key, value := range b { + newKey := escapeReplacer.Replace(key) + rawMap[newKey] = value + } + return rawMap, nil +} + +// mergeBindings returns the effective bindings, by combining the default +// bindings based on the given charm metadata, overriding them first with +// matching oldMap values, and then with newMap values (for the same keys). +// newMap and oldMap are both optional and will ignored when empty. Returns a +// map containing only those bindings that need updating, and a sorted slice of +// keys to remove (if any) - those are present in oldMap but missing in both +// newMap and defaults. +func mergeBindings(newMap, oldMap map[string]string, meta *charm.Meta) (map[string]string, []string, error) { + + defaultsMap, err := defaultEndpointBindingsForCharm(meta) + if err != nil { + return nil, nil, errors.Trace(err) + } + + // defaultsMap contains all endpoints that must be bound for the given charm + // metadata, but we need to figure out which value to use for each key. + updated := make(map[string]string) + for key, defaultValue := range defaultsMap { + effectiveValue := defaultValue + + oldValue, hasOld := oldMap[key] + if hasOld && oldValue != effectiveValue { + effectiveValue = oldValue + } + + newValue, hasNew := newMap[key] + if hasNew && newValue != effectiveValue { + effectiveValue = newValue + } + + updated[key] = effectiveValue + } + + // Any extra bindings in newMap are most likely extraneous, but add them + // anyway and let the validation handle them. + for key, newValue := range newMap { + if _, defaultExists := defaultsMap[key]; !defaultExists { + updated[key] = newValue + } + } + + // All defaults were processed, so anything else in oldMap not about to be + // updated and not having a default for the given metadata needs to be + // removed. + removedKeys := set.NewStrings() + for key := range oldMap { + if _, updating := updated[key]; !updating { + removedKeys.Add(key) + } + if _, defaultExists := defaultsMap[key]; !defaultExists { + removedKeys.Add(key) + } + } + removed := removedKeys.SortedValues() + return updated, removed, nil +} + +// createEndpointBindingsOp returns the op needed to create new endpoint +// bindings using the optional givenMap and the specified charm metadata to for +// determining defaults and to validate the effective bindings. +func createEndpointBindingsOp(st *State, key string, givenMap map[string]string, meta *charm.Meta) (txn.Op, error) { + + // No existing map to merge, just use the defaults. + initialMap, _, err := mergeBindings(givenMap, nil, meta) + if err != nil { + return txn.Op{}, errors.Trace(err) + } + + // Validate the bindings before inserting. + if err := validateEndpointBindingsForCharm(st, initialMap, meta); err != nil { + return txn.Op{}, errors.Trace(err) + } + + return txn.Op{ + C: endpointBindingsC, + Id: key, + Assert: txn.DocMissing, + Insert: endpointBindingsDoc{ + Bindings: initialMap, + }, + }, nil +} + +// updateEndpointBindingsOp returns an op that merges the existing bindings with +// givenMap, using newMeta to validate the merged bindings, and asserting the +// existing ones haven't changed in the since we fetched them. +func updateEndpointBindingsOp(st *State, key string, givenMap map[string]string, newMeta *charm.Meta) (txn.Op, error) { + // Fetch existing bindings. + existingMap, txnRevno, err := readEndpointBindings(st, key) + if err != nil && !errors.IsNotFound(err) { + return txn.Op{}, errors.Trace(err) + } + + // Merge existing with given as needed. + updatedMap, removedKeys, err := mergeBindings(givenMap, existingMap, newMeta) + if err != nil { + return txn.Op{}, errors.Trace(err) + } + + // Validate the bindings before updating. + if err := validateEndpointBindingsForCharm(st, updatedMap, newMeta); err != nil { + return txn.Op{}, errors.Trace(err) + } + + // Prepare the update operations. + sanitize := inSubdocEscapeReplacer("bindings") + changes := make(bson.M, len(updatedMap)) + for endpoint, space := range updatedMap { + changes[sanitize(endpoint)] = space + } + deletes := make(bson.M, len(removedKeys)) + for _, endpoint := range removedKeys { + deletes[sanitize(endpoint)] = 1 + } + + var update bson.D + if len(changes) != 0 { + update = append(update, bson.DocElem{Name: "$set", Value: changes}) + } + if len(deletes) != 0 { + update = append(update, bson.DocElem{Name: "$unset", Value: deletes}) + } + if len(update) == 0 { + return txn.Op{}, jujutxn.ErrNoOperations + } + updateOp := txn.Op{ + C: endpointBindingsC, + Id: key, + Update: update, + } + if existingMap != nil { + // Only assert existing haven't changed when they actually exist. + updateOp.Assert = bson.D{{"txn-revno", txnRevno}} + } + return updateOp, nil +} + +// removeEndpointBindingsOp returns an op removing the bindings for the given +// key, without asserting they exist in the first place. +func removeEndpointBindingsOp(key string) txn.Op { + return txn.Op{ + C: endpointBindingsC, + Id: key, + Remove: true, + } +} + +// readEndpointBindings returns the stored bindings and TxnRevno for the given +// service global key, or an error satisfying errors.IsNotFound() otherwise. +func readEndpointBindings(st *State, key string) (map[string]string, int64, error) { + endpointBindings, closer := st.getCollection(endpointBindingsC) + defer closer() + + var doc endpointBindingsDoc + err := endpointBindings.FindId(key).One(&doc) + if err == mgo.ErrNotFound { + return nil, 0, errors.NotFoundf("endpoint bindings for %q", key) + } + if err != nil { + return nil, 0, errors.Annotatef(err, "cannot get endpoint bindings for %q", key) + } + + return doc.Bindings, doc.TxnRevno, nil +} + +// validateEndpointBindingsForCharm verifies that all endpoint names in bindings +// are valid for the given charm metadata, and each endpoint is bound to a known +// space - otherwise an error satisfying errors.IsNotValid() will be returned. +func validateEndpointBindingsForCharm(st *State, bindings map[string]string, charmMeta *charm.Meta) error { + if st == nil { + return errors.NotValidf("nil state") + } + if bindings == nil { + return errors.NotValidf("nil bindings") + } + if charmMeta == nil { + return errors.NotValidf("nil charm metadata") + } + spaces, err := st.AllSpaces() + if err != nil { + return errors.Trace(err) + } + + spacesNamesSet := set.NewStrings() + for _, space := range spaces { + spacesNamesSet.Add(space.Name()) + } + + allRelations, err := CombinedCharmRelations(charmMeta) + if err != nil { + return errors.Trace(err) + } + endpointsNamesSet := set.NewStrings() + for name := range allRelations { + endpointsNamesSet.Add(name) + } + + // Ensure there are no unknown endpoints and/or spaces specified. + // + // TODO(dimitern): This assumes spaces cannot be deleted when they are used + // in bindings. In follow-up, this will be enforced by using refcounts on + // spaces. + for endpoint, space := range bindings { + if !endpointsNamesSet.Contains(endpoint) { + return errors.NotValidf("unknown endpoint %q", endpoint) + } + if space != "" && !spacesNamesSet.Contains(space) { + return errors.NotValidf("unknown space %q", space) + } + } + return nil +} + +// defaultEndpointBindingsForCharm populates a bindings map containing each +// endpoint of the given charm metadata bound to an empty space. +func defaultEndpointBindingsForCharm(charmMeta *charm.Meta) (map[string]string, error) { + allRelations, err := CombinedCharmRelations(charmMeta) + if err != nil { + return nil, errors.Trace(err) + } + bindings := make(map[string]string, len(allRelations)) + for name := range allRelations { + bindings[name] = "" + } + return bindings, nil +} + +// CombinedCharmRelations returns the relations defined in the given charm +// metadata (from Provides, Requires, and Peers) in a single map. This works +// because charm relation names must be unique regarless of their kind. +// +// TODO(dimitern): 2015-11-27 bug http://pad.lv/1520623 +// This should be moved directly into the charm repo, as it's +// generally useful. +func CombinedCharmRelations(charmMeta *charm.Meta) (map[string]charm.Relation, error) { + if charmMeta == nil { + return nil, errors.Errorf("nil charm metadata") + } + combined := make(map[string]charm.Relation) + for name, relation := range charmMeta.Provides { + combined[name] = relation + } + for name, relation := range charmMeta.Requires { + combined[name] = relation + } + for name, relation := range charmMeta.Peers { + combined[name] = relation + } + return combined, nil +} === added file 'src/github.com/juju/juju/state/endpoint_bindings_test.go' --- src/github.com/juju/juju/state/endpoint_bindings_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/state/endpoint_bindings_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,216 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/state" +) + +type BindingsSuite struct { + ConnSuite + + oldMeta *charm.Meta + oldDefaults map[string]string + newMeta *charm.Meta + newDefaults map[string]string +} + +var _ = gc.Suite(&BindingsSuite{}) + +func (s *BindingsSuite) SetUpTest(c *gc.C) { + s.ConnSuite.SetUpTest(c) + + const dummyCharmWithOneOfEachRelationType = ` +name: dummy +summary: "That's a dummy charm with one relation of each type." +description: "This is a longer description." +provides: + foo1: + interface: phony +requires: + bar1: + interface: fake +peers: + self: + interface: dummy +` + oldCharm := s.AddMetaCharm(c, "dummy", dummyCharmWithOneOfEachRelationType, 1) + s.oldMeta = oldCharm.Meta() + s.oldDefaults = map[string]string{ + "foo1": "", + "bar1": "", + "self": "", + } + + const dummyCharmWithTwoOfEachRelationType = ` +name: dummy +summary: "That's a dummy charm with 2 relations for each type." +description: "This is a longer description." +provides: + foo1: + interface: phony + foo2: + interface: secret +requires: + bar2: real + bar3: + interface: cool +peers: + self: + interface: dummy + me: peer +` + newCharm := s.AddMetaCharm(c, "dummy", dummyCharmWithTwoOfEachRelationType, 2) + s.newMeta = newCharm.Meta() + s.newDefaults = map[string]string{ + "foo1": "", + "foo2": "", + "bar2": "", + "bar3": "", + "self": "", + "me": "", + } + + // Add some spaces to use in bindings, but notably NOT the default space, as + // it should be always allowed. + _, err := s.State.AddSpace("client", "", nil, true) + c.Assert(err, jc.ErrorIsNil) + _, err = s.State.AddSpace("apps", "", nil, false) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *BindingsSuite) TestMergeBindings(c *gc.C) { + // The test cases below are not exhaustive, but just check basic + // functionality. Most of the logic is tested by calling service.SetCharm() + // in various ways. + for i, test := range []struct { + about string + newMap, oldMap map[string]string + meta *charm.Meta + updated map[string]string + removed []string + }{{ + about: "defaults used when both newMap and oldMap are nil", + newMap: nil, + oldMap: nil, + meta: s.oldMeta, + updated: s.copyMap(s.oldDefaults), + removed: nil, + }, { + about: "oldMap overrides defaults, newMap is nil", + newMap: nil, + oldMap: map[string]string{ + "foo1": "client", + "self": "db", + }, + meta: s.oldMeta, + updated: map[string]string{ + "foo1": "client", + "bar1": "", + "self": "db", + }, + removed: nil, + }, { + about: "oldMap overrides defaults, newMap overrides oldMap", + newMap: map[string]string{ + "foo1": "", + "self": "db", + "bar1": "client", + }, + oldMap: map[string]string{ + "foo1": "client", + "bar1": "db", + }, + meta: s.oldMeta, + updated: map[string]string{ + "foo1": "", + "bar1": "client", + "self": "db", + }, + removed: nil, + }, { + about: "newMap overrides defaults, oldMap is nil", + newMap: map[string]string{ + "self": "db", + }, + oldMap: nil, + meta: s.oldMeta, + updated: map[string]string{ + "foo1": "", + "bar1": "", + "self": "db", + }, + removed: nil, + }, { + about: "obsolete entries in oldMap missing in defaults are removed", + newMap: nil, + oldMap: map[string]string{ + "any-old-thing": "boo", + "self": "db", + }, + meta: s.oldMeta, + updated: map[string]string{ + "foo1": "", + "bar1": "", + "self": "db", + }, + removed: []string{"any-old-thing"}, + }, { + about: "new endpoints use defaults unless specified in newMap, existing ones are kept", + newMap: map[string]string{ + "foo2": "db", + "me": "client", + "bar3": "db", + }, + oldMap: s.copyMap(s.oldDefaults), + meta: s.newMeta, + updated: map[string]string{ + "foo1": "", + "foo2": "db", + "bar2": "", + "bar3": "db", + "self": "", + "me": "client", + }, + removed: []string{"bar1"}, + }} { + c.Logf("test #%d: %s", i, test.about) + + updated, removed, err := state.MergeBindings(test.newMap, test.oldMap, test.meta) + c.Check(err, jc.ErrorIsNil) + c.Check(updated, jc.DeepEquals, test.updated) + c.Check(removed, jc.DeepEquals, test.removed) + } +} + +func (s *BindingsSuite) TestCombinedCharmRelations(c *gc.C) { + _, err := state.CombinedCharmRelations(nil) + c.Check(err, gc.ErrorMatches, "nil charm metadata") + + meta := s.newMeta + allRelations, err := state.CombinedCharmRelations(meta) + c.Check(err, jc.ErrorIsNil) + combinedLength := len(meta.Provides) + len(meta.Requires) + len(meta.Peers) + c.Check(allRelations, gc.HasLen, combinedLength) + c.Check(allRelations, jc.DeepEquals, map[string]charm.Relation{ + "foo1": meta.Provides["foo1"], + "foo2": meta.Provides["foo2"], + "bar2": meta.Requires["bar2"], + "bar3": meta.Requires["bar3"], + "self": meta.Peers["self"], + "me": meta.Peers["me"], + }) +} + +func (s *BindingsSuite) copyMap(input map[string]string) map[string]string { + output := make(map[string]string, len(input)) + for key, value := range input { + output[key] = value + } + return output +} === modified file 'src/github.com/juju/juju/state/endpoint_test.go' --- src/github.com/juju/juju/state/endpoint_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/endpoint_test.go 2016-03-22 15:18:22 +0000 @@ -6,7 +6,7 @@ import ( jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/state" ) === removed file 'src/github.com/juju/juju/state/environ.go' --- src/github.com/juju/juju/state/environ.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/environ.go 1970-01-01 00:00:00 +0000 @@ -1,560 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package state - -import ( - "strings" - - "github.com/juju/errors" - "github.com/juju/names" - jujutxn "github.com/juju/txn" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - "gopkg.in/mgo.v2/txn" - - "github.com/juju/juju/environs/config" - "github.com/juju/juju/version" -) - -// environGlobalKey is the key for the environment, its -// settings and constraints. -const environGlobalKey = "e" - -// Environment represents the state of an environment. -type Environment struct { - st *State - doc environmentDoc -} - -// environmentDoc represents the internal state of the environment in MongoDB. -type environmentDoc struct { - UUID string `bson:"_id"` - Name string - Life Life - Owner string `bson:"owner"` - ServerUUID string `bson:"server-uuid"` - - // LatestAvailableTools is a string representing the newest version - // found while checking streams for new versions. - LatestAvailableTools string `bson:"available-tools,omitempty"` -} - -// StateServerEnvironment returns the environment that was bootstrapped. -// This is the only environment that can have state server machines. -// The owner of this environment is also considered "special", in that -// they are the only user that is able to create other users (until we -// have more fine grained permissions), and they cannot be disabled. -func (st *State) StateServerEnvironment() (*Environment, error) { - ssinfo, err := st.StateServerInfo() - if err != nil { - return nil, errors.Annotate(err, "could not get state server info") - } - - environments, closer := st.getCollection(environmentsC) - defer closer() - - env := &Environment{st: st} - uuid := ssinfo.EnvironmentTag.Id() - if err := env.refresh(environments.FindId(uuid)); err != nil { - return nil, errors.Trace(err) - } - return env, nil -} - -// Environment returns the environment entity. -func (st *State) Environment() (*Environment, error) { - environments, closer := st.getCollection(environmentsC) - defer closer() - - env := &Environment{st: st} - uuid := st.environTag.Id() - if err := env.refresh(environments.FindId(uuid)); err != nil { - return nil, errors.Trace(err) - } - return env, nil -} - -// GetEnvironment looks for the environment identified by the uuid passed in. -func (st *State) GetEnvironment(tag names.EnvironTag) (*Environment, error) { - environments, closer := st.getCollection(environmentsC) - defer closer() - - env := &Environment{st: st} - if err := env.refresh(environments.FindId(tag.Id())); err != nil { - return nil, errors.Trace(err) - } - return env, nil -} - -// AllEnvironments returns all the environments in the system. -func (st *State) AllEnvironments() ([]*Environment, error) { - environments, closer := st.getCollection(environmentsC) - defer closer() - - var envDocs []environmentDoc - err := environments.Find(nil).All(&envDocs) - if err != nil { - return nil, err - } - - result := make([]*Environment, len(envDocs)) - for i, doc := range envDocs { - result[i] = &Environment{st: st, doc: doc} - } - - return result, nil -} - -// NewEnvironment creates a new environment with its own UUID and -// prepares it for use. Environment and State instances for the new -// environment are returned. -// -// The state server environment's UUID is attached to the new -// environment's document. Having the server UUIDs stored with each -// environment document means that we have a way to represent external -// environments, perhaps for future use around cross environment -// relations. -func (st *State) NewEnvironment(cfg *config.Config, owner names.UserTag) (_ *Environment, _ *State, err error) { - if owner.IsLocal() { - if _, err := st.User(owner); err != nil { - return nil, nil, errors.Annotate(err, "cannot create environment") - } - } - - ssEnv, err := st.StateServerEnvironment() - if err != nil { - return nil, nil, errors.Annotate(err, "could not load state server environment") - } - - uuid, ok := cfg.UUID() - if !ok { - return nil, nil, errors.Errorf("environment uuid was not supplied") - } - newState, err := st.ForEnviron(names.NewEnvironTag(uuid)) - if err != nil { - return nil, nil, errors.Annotate(err, "could not create state for new environment") - } - defer func() { - if err != nil { - newState.Close() - } - }() - - ops, err := newState.envSetupOps(cfg, uuid, ssEnv.UUID(), owner) - if err != nil { - return nil, nil, errors.Annotate(err, "failed to create new environment") - } - err = newState.runTransaction(ops) - if err == txn.ErrAborted { - - // We have a unique key restriction on the "owner" and "name" fields, - // which will cause the insert to fail if there is another record with - // the same "owner" and "name" in the collection. If the txn is - // aborted, check if it is due to the unique key restriction. - environments, closer := st.getCollection(environmentsC) - defer closer() - envCount, countErr := environments.Find(bson.D{ - {"owner", owner.Canonical()}, - {"name", cfg.Name()}}, - ).Count() - if countErr != nil { - err = errors.Trace(countErr) - } else if envCount > 0 { - err = errors.AlreadyExistsf("environment %q for %s", cfg.Name(), owner.Canonical()) - } else { - err = errors.New("environment already exists") - } - } - if err != nil { - return nil, nil, errors.Trace(err) - } - - newEnv, err := newState.Environment() - if err != nil { - return nil, nil, errors.Trace(err) - } - - return newEnv, newState, nil -} - -// Tag returns a name identifying the environment. -// The returned name will be different from other Tag values returned -// by any other entities from the same state. -func (e *Environment) Tag() names.Tag { - return e.EnvironTag() -} - -// EnvironTag is the concrete environ tag for this environment. -func (e *Environment) EnvironTag() names.EnvironTag { - return names.NewEnvironTag(e.doc.UUID) -} - -// ServerTag is the environ tag for the server that the environment is running -// within. -func (e *Environment) ServerTag() names.EnvironTag { - return names.NewEnvironTag(e.doc.ServerUUID) -} - -// UUID returns the universally unique identifier of the environment. -func (e *Environment) UUID() string { - return e.doc.UUID -} - -// ServerUUID returns the universally unique identifier of the server in which -// the environment is running. -func (e *Environment) ServerUUID() string { - return e.doc.ServerUUID -} - -// Name returns the human friendly name of the environment. -func (e *Environment) Name() string { - return e.doc.Name -} - -// Life returns whether the environment is Alive, Dying or Dead. -func (e *Environment) Life() Life { - return e.doc.Life -} - -// Owner returns tag representing the owner of the environment. -// The owner is the user that created the environment. -func (e *Environment) Owner() names.UserTag { - return names.NewUserTag(e.doc.Owner) -} - -// Config returns the config for the environment. -func (e *Environment) Config() (*config.Config, error) { - if e.st.environTag.Id() == e.UUID() { - return e.st.EnvironConfig() - } - envState := e.st - if envState.environTag != e.EnvironTag() { - // The active environment isn't the same as the environment - // we are querying. - var err error - envState, err = e.st.ForEnviron(e.EnvironTag()) - if err != nil { - return nil, errors.Trace(err) - } - defer envState.Close() - } - return envState.EnvironConfig() -} - -// UpdateLatestToolsVersion looks up for the latest available version of -// juju tools and updates environementDoc with it. -func (e *Environment) UpdateLatestToolsVersion(ver version.Number) error { - v := ver.String() - // TODO(perrito666): I need to assert here that there isn't a newer - // version in place. - ops := []txn.Op{{ - C: environmentsC, - Id: e.doc.UUID, - Update: bson.D{{"$set", bson.D{{"available-tools", v}}}}, - }} - err := e.st.runTransaction(ops) - if err != nil { - return errors.Trace(err) - } - return e.Refresh() -} - -// LatestToolsVersion returns the newest version found in the last -// check in the streams. -// Bear in mind that the check was performed filtering only -// new patches for the current major.minor. (major.minor.patch) -func (e *Environment) LatestToolsVersion() version.Number { - ver := e.doc.LatestAvailableTools - if ver == "" { - return version.Zero - } - v, err := version.Parse(ver) - if err != nil { - // This is being stored from a valid version but - // in case this data would beacame corrupt It is not - // worth to fail because of it. - return version.Zero - } - return v -} - -// globalKey returns the global database key for the environment. -func (e *Environment) globalKey() string { - return environGlobalKey -} - -func (e *Environment) Refresh() error { - environments, closer := e.st.getCollection(environmentsC) - defer closer() - return e.refresh(environments.FindId(e.UUID())) -} - -func (e *Environment) refresh(query *mgo.Query) error { - err := query.One(&e.doc) - if err == mgo.ErrNotFound { - return errors.NotFoundf("environment") - } - return err -} - -// Users returns a slice of all users for this environment. -func (e *Environment) Users() ([]*EnvironmentUser, error) { - if e.st.EnvironUUID() != e.UUID() { - return nil, errors.New("cannot lookup environment users outside the current environment") - } - coll, closer := e.st.getCollection(envUsersC) - defer closer() - - var userDocs []envUserDoc - err := coll.Find(nil).All(&userDocs) - if err != nil { - return nil, errors.Trace(err) - } - - var envUsers []*EnvironmentUser - for _, doc := range userDocs { - envUsers = append(envUsers, &EnvironmentUser{ - st: e.st, - doc: doc, - }) - } - - return envUsers, nil -} - -// Destroy sets the environment's lifecycle to Dying, preventing -// addition of services or machines to state. -func (e *Environment) Destroy() (err error) { - defer errors.DeferredAnnotatef(&err, "failed to destroy environment") - - buildTxn := func(attempt int) ([]txn.Op, error) { - - // On the first attempt, we assume memory state is recent - // enough to try using... - if attempt != 0 { - // ...but on subsequent attempts, we read fresh environ - // state from the DB. Note that we do *not* refresh `e` - // itself, as detailed in doc/hacking-state.txt - if e, err = e.st.Environment(); err != nil { - return nil, errors.Trace(err) - } - } - - ops, err := e.destroyOps() - if err == errEnvironNotAlive { - return nil, jujutxn.ErrNoOperations - } else if err != nil { - return nil, errors.Trace(err) - } - - return ops, nil - } - return e.st.run(buildTxn) -} - -// errEnvironNotAlive is a signal emitted from destroyOps to indicate -// that environment destruction is already underway. -var errEnvironNotAlive = errors.New("environment is no longer alive") - -// destroyOps returns the txn operations necessary to begin environ -// destruction, or an error indicating why it can't. -func (e *Environment) destroyOps() ([]txn.Op, error) { - if e.Life() != Alive { - return nil, errEnvironNotAlive - } - - err := e.ensureDestroyable() - if err != nil { - return nil, errors.Trace(err) - } - - uuid := e.UUID() - ops := []txn.Op{{ - C: environmentsC, - Id: uuid, - Assert: isEnvAliveDoc, - Update: bson.D{{"$set", bson.D{{"life", Dying}}}}, - }} - - if uuid == e.doc.ServerUUID { - if count, err := hostedEnvironCount(e.st); err != nil { - return nil, errors.Trace(err) - } else if count != 0 { - return nil, errors.Errorf("hosting %d other environments", count) - } - ops = append(ops, assertNoHostedEnvironsOp()) - } else { - // When we're destroying a hosted environment, no further - // checks are necessary -- we just need to make sure we - // update the refcount. - ops = append(ops, decHostedEnvironCountOp()) - } - - // Because txn operations execute in order, and may encounter - // arbitrarily long delays, we need to make sure every op - // causes a state change that's still consistent; so we make - // sure the cleanup op is the last thing that will execute. - cleanupOp := e.st.newCleanupOp(cleanupServicesForDyingEnvironment, uuid) - ops = append(ops, cleanupOp) - return ops, nil -} - -// checkManualMachines checks if any of the machines in the slice were -// manually provisioned, and are non-manager machines. These machines -// must (currently) be manually destroyed via destroy-machine before -// destroy-environment can successfully complete. -func checkManualMachines(machines []*Machine) error { - var ids []string - for _, m := range machines { - if m.IsManager() { - continue - } - manual, err := m.IsManual() - if err != nil { - return errors.Trace(err) - } - if manual { - ids = append(ids, m.Id()) - } - } - if len(ids) > 0 { - return errors.Errorf("manually provisioned machines must first be destroyed with `juju destroy-machine %s`", strings.Join(ids, " ")) - } - return nil -} - -// ensureDestroyable an error if any manual machines or persistent volumes are -// found. -func (e *Environment) ensureDestroyable() error { - - // TODO(waigani) bug #1475212: Environment destroy can miss manual - // machines. We need to be able to assert the absence of these as - // part of the destroy txn, but in order to do this manual machines - // need to add refcounts to their environments. - - // Check for manual machines. We bail out if there are any, - // to stop the user from prematurely hobbling the environment. - machines, err := e.st.AllMachines() - if err != nil { - return errors.Trace(err) - } - - if err := checkManualMachines(machines); err != nil { - return errors.Trace(err) - } - - return nil -} - -// createEnvironmentOp returns the operation needed to create -// an environment document with the given name and UUID. -func createEnvironmentOp(st *State, owner names.UserTag, name, uuid, server string) txn.Op { - doc := &environmentDoc{ - UUID: uuid, - Name: name, - Life: Alive, - Owner: owner.Canonical(), - ServerUUID: server, - } - return txn.Op{ - C: environmentsC, - Id: uuid, - Assert: txn.DocMissing, - Insert: doc, - } -} - -const hostedEnvCountKey = "hostedEnvironCount" - -type hostedEnvCountDoc struct { - - // RefCount is the number of environments in the Juju system. We do not count - // the system environment. - RefCount int `bson:"refcount"` -} - -func assertNoHostedEnvironsOp() txn.Op { - return txn.Op{ - C: stateServersC, - Id: hostedEnvCountKey, - Assert: bson.D{{"refcount", 0}}, - } -} - -func incHostedEnvironCountOp() txn.Op { - return hostedEnvironCountOp(1) -} - -func decHostedEnvironCountOp() txn.Op { - return hostedEnvironCountOp(-1) -} - -func hostedEnvironCountOp(amount int) txn.Op { - return txn.Op{ - C: stateServersC, - Id: hostedEnvCountKey, - Update: bson.M{ - "$inc": bson.M{"refcount": amount}, - }, - } -} - -func hostedEnvironCount(st *State) (int, error) { - var doc hostedEnvCountDoc - stateServers, closer := st.getCollection(stateServersC) - defer closer() - - if err := stateServers.Find(bson.D{{"_id", hostedEnvCountKey}}).One(&doc); err != nil { - return 0, errors.Trace(err) - } - return doc.RefCount, nil -} - -// createUniqueOwnerEnvNameOp returns the operation needed to create -// an userenvnameC document with the given owner and environment name. -func createUniqueOwnerEnvNameOp(owner names.UserTag, envName string) txn.Op { - return txn.Op{ - C: userenvnameC, - Id: userEnvNameIndex(owner.Canonical(), envName), - Assert: txn.DocMissing, - Insert: bson.M{}, - } -} - -// assertAliveOp returns a txn.Op that asserts the environment is alive. -func (e *Environment) assertAliveOp() txn.Op { - return assertEnvAliveOp(e.UUID()) -} - -// assertEnvAliveOp returns a txn.Op that asserts the given -// environment UUID refers to an Alive environment. -func assertEnvAliveOp(envUUID string) txn.Op { - return txn.Op{ - C: environmentsC, - Id: envUUID, - Assert: isEnvAliveDoc, - } -} - -// isEnvAlive is an Environment-specific version of isAliveDoc. -// -// Environment documents from versions of Juju prior to 1.17 -// do not have the life field; if it does not exist, it should -// be considered to have the value Alive. -// -// TODO(mjs) - this should be removed with existing uses replaced with -// isAliveDoc. A DB migration should convert nil to Alive. -var isEnvAliveDoc = bson.D{ - {"life", bson.D{{"$in", []interface{}{Alive, nil}}}}, -} - -func checkEnvLife(st *State) error { - env, err := st.Environment() - if (err == nil && env.Life() != Alive) || errors.IsNotFound(err) { - return errors.New("environment is no longer alive") - } else if err != nil { - return errors.Annotate(err, "unable to read environment") - } - return nil -} === removed file 'src/github.com/juju/juju/state/environ_test.go' --- src/github.com/juju/juju/state/environ_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/environ_test.go 1970-01-01 00:00:00 +0000 @@ -1,396 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package state_test - -import ( - "fmt" - - "github.com/juju/errors" - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/environs/config" - "github.com/juju/juju/state" - "github.com/juju/juju/testing" - "github.com/juju/juju/testing/factory" -) - -type EnvironSuite struct { - ConnSuite -} - -var _ = gc.Suite(&EnvironSuite{}) - -func (s *EnvironSuite) TestEnvironment(c *gc.C) { - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - - expectedTag := names.NewEnvironTag(env.UUID()) - c.Assert(env.Tag(), gc.Equals, expectedTag) - c.Assert(env.ServerTag(), gc.Equals, expectedTag) - c.Assert(env.Name(), gc.Equals, "testenv") - c.Assert(env.Owner(), gc.Equals, s.Owner) - c.Assert(env.Life(), gc.Equals, state.Alive) -} - -func (s *EnvironSuite) TestNewEnvironmentNonExistentLocalUser(c *gc.C) { - cfg, _ := s.createTestEnvConfig(c) - owner := names.NewUserTag("non-existent@local") - - _, _, err := s.State.NewEnvironment(cfg, owner) - c.Assert(err, gc.ErrorMatches, `cannot create environment: user "non-existent" not found`) -} - -func (s *EnvironSuite) TestNewEnvironmentSameUserSameNameFails(c *gc.C) { - cfg, _ := s.createTestEnvConfig(c) - owner := s.Factory.MakeUser(c, nil).UserTag() - - // Create the first environment. - _, st1, err := s.State.NewEnvironment(cfg, owner) - c.Assert(err, jc.ErrorIsNil) - defer st1.Close() - - // Attempt to create another environment with a different UUID but the - // same owner and name as the first. - newUUID, err := utils.NewUUID() - c.Assert(err, jc.ErrorIsNil) - cfg2 := testing.CustomEnvironConfig(c, testing.Attrs{ - "name": cfg.Name(), - "uuid": newUUID.String(), - }) - _, _, err = s.State.NewEnvironment(cfg2, owner) - errMsg := fmt.Sprintf("environment %q for %s already exists", cfg2.Name(), owner.Canonical()) - c.Assert(err, gc.ErrorMatches, errMsg) - c.Assert(errors.IsAlreadyExists(err), jc.IsTrue) - - // Remove the first environment. - env1, err := st1.Environment() - c.Assert(err, jc.ErrorIsNil) - err = env1.Destroy() - c.Assert(err, jc.ErrorIsNil) - err = st1.RemoveAllEnvironDocs() - c.Assert(err, jc.ErrorIsNil) - - // We should now be able to create the other environment. - env2, st2, err := s.State.NewEnvironment(cfg2, owner) - c.Assert(err, jc.ErrorIsNil) - defer st2.Close() - c.Assert(env2, gc.NotNil) - c.Assert(st2, gc.NotNil) -} - -func (s *EnvironSuite) TestNewEnvironment(c *gc.C) { - cfg, uuid := s.createTestEnvConfig(c) - owner := names.NewUserTag("test@remote") - - env, st, err := s.State.NewEnvironment(cfg, owner) - c.Assert(err, jc.ErrorIsNil) - defer st.Close() - - envTag := names.NewEnvironTag(uuid) - assertEnvMatches := func(env *state.Environment) { - c.Assert(env.UUID(), gc.Equals, envTag.Id()) - c.Assert(env.Tag(), gc.Equals, envTag) - c.Assert(env.ServerTag(), gc.Equals, s.envTag) - c.Assert(env.Owner(), gc.Equals, owner) - c.Assert(env.Name(), gc.Equals, "testing") - c.Assert(env.Life(), gc.Equals, state.Alive) - } - assertEnvMatches(env) - - // Since the environ tag for the State connection is different, - // asking for this environment through FindEntity returns a not found error. - env, err = s.State.GetEnvironment(envTag) - c.Assert(err, jc.ErrorIsNil) - assertEnvMatches(env) - - env, err = st.Environment() - c.Assert(err, jc.ErrorIsNil) - assertEnvMatches(env) - - _, err = s.State.FindEntity(envTag) - c.Assert(err, jc.Satisfies, errors.IsNotFound) - - entity, err := st.FindEntity(envTag) - c.Assert(err, jc.ErrorIsNil) - c.Assert(entity.Tag(), gc.Equals, envTag) - - // Ensure the environment is functional by adding a machine - _, err = st.AddMachine("quantal", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *EnvironSuite) TestStateServerEnvironment(c *gc.C) { - env, err := s.State.StateServerEnvironment() - c.Assert(err, jc.ErrorIsNil) - - expectedTag := names.NewEnvironTag(env.UUID()) - c.Assert(env.Tag(), gc.Equals, expectedTag) - c.Assert(env.ServerTag(), gc.Equals, expectedTag) - c.Assert(env.Name(), gc.Equals, "testenv") - c.Assert(env.Owner(), gc.Equals, s.Owner) - c.Assert(env.Life(), gc.Equals, state.Alive) -} - -func (s *EnvironSuite) TestStateServerEnvironmentAccessibleFromOtherEnvironments(c *gc.C) { - cfg, _ := s.createTestEnvConfig(c) - _, st, err := s.State.NewEnvironment(cfg, names.NewUserTag("test@remote")) - defer st.Close() - - env, err := st.StateServerEnvironment() - c.Assert(err, jc.ErrorIsNil) - c.Assert(env.Tag(), gc.Equals, s.envTag) - c.Assert(env.Name(), gc.Equals, "testenv") - c.Assert(env.Owner(), gc.Equals, s.Owner) - c.Assert(env.Life(), gc.Equals, state.Alive) -} - -func (s *EnvironSuite) TestConfigForStateServerEnv(c *gc.C) { - otherState := s.Factory.MakeEnvironment(c, &factory.EnvParams{Name: "other"}) - defer otherState.Close() - - env, err := otherState.GetEnvironment(s.envTag) - c.Assert(err, jc.ErrorIsNil) - - conf, err := env.Config() - c.Assert(err, jc.ErrorIsNil) - c.Assert(conf.Name(), gc.Equals, "testenv") - uuid, ok := conf.UUID() - c.Assert(ok, jc.IsTrue) - c.Assert(uuid, gc.Equals, s.envTag.Id()) -} - -func (s *EnvironSuite) TestConfigForOtherEnv(c *gc.C) { - otherState := s.Factory.MakeEnvironment(c, &factory.EnvParams{Name: "other"}) - defer otherState.Close() - otherEnv, err := otherState.Environment() - c.Assert(err, jc.ErrorIsNil) - - // By getting the environment through a different state connection, - // the underlying state pointer in the *state.Environment struct has - // a different environment tag. - env, err := s.State.GetEnvironment(otherEnv.EnvironTag()) - c.Assert(err, jc.ErrorIsNil) - - conf, err := env.Config() - c.Assert(err, jc.ErrorIsNil) - c.Assert(conf.Name(), gc.Equals, "other") - uuid, ok := conf.UUID() - c.Assert(ok, jc.IsTrue) - c.Assert(uuid, gc.Equals, otherEnv.UUID()) -} - -// createTestEnvConfig returns a new environment config and its UUID for testing. -func (s *EnvironSuite) createTestEnvConfig(c *gc.C) (*config.Config, string) { - uuid, err := utils.NewUUID() - c.Assert(err, jc.ErrorIsNil) - return testing.CustomEnvironConfig(c, testing.Attrs{ - "name": "testing", - "uuid": uuid.String(), - }), uuid.String() -} - -func (s *EnvironSuite) TestEnvironmentConfigSameEnvAsState(c *gc.C) { - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - cfg, err := env.Config() - c.Assert(err, jc.ErrorIsNil) - uuid, exists := cfg.UUID() - c.Assert(exists, jc.IsTrue) - c.Assert(uuid, gc.Equals, s.State.EnvironUUID()) -} - -func (s *EnvironSuite) TestEnvironmentConfigDifferentEnvThanState(c *gc.C) { - otherState := s.Factory.MakeEnvironment(c, nil) - defer otherState.Close() - env, err := otherState.Environment() - c.Assert(err, jc.ErrorIsNil) - cfg, err := env.Config() - c.Assert(err, jc.ErrorIsNil) - uuid, exists := cfg.UUID() - c.Assert(exists, jc.IsTrue) - c.Assert(uuid, gc.Equals, env.UUID()) - c.Assert(uuid, gc.Not(gc.Equals), s.State.EnvironUUID()) -} - -func (s *EnvironSuite) TestDestroyStateServerEnvironment(c *gc.C) { - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - err = env.Destroy() - c.Assert(err, jc.ErrorIsNil) -} - -func (s *EnvironSuite) TestDestroyOtherEnvironment(c *gc.C) { - st2 := s.Factory.MakeEnvironment(c, nil) - defer st2.Close() - env, err := st2.Environment() - c.Assert(err, jc.ErrorIsNil) - err = env.Destroy() - c.Assert(err, jc.ErrorIsNil) -} - -func (s *EnvironSuite) TestDestroyStateServerEnvironmentFails(c *gc.C) { - st2 := s.Factory.MakeEnvironment(c, nil) - defer st2.Close() - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - c.Assert(env.Destroy(), gc.ErrorMatches, "failed to destroy environment: hosting 1 other environments") -} - -func (s *EnvironSuite) TestDestroyStateServerEnvironmentRace(c *gc.C) { - // Simulate an environment being added just before the remove txn is - // called. - defer state.SetBeforeHooks(c, s.State, func() { - blocker := s.Factory.MakeEnvironment(c, nil) - err := blocker.Close() - c.Check(err, jc.ErrorIsNil) - }).Check() - - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - c.Assert(env.Destroy(), gc.ErrorMatches, "failed to destroy environment: hosting 1 other environments") -} - -func (s *EnvironSuite) TestDestroyStateServerAlreadyDyingRaceNoOp(c *gc.C) { - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - - // Simulate an environment being destroyed by another client just before - // the remove txn is called. - defer state.SetBeforeHooks(c, s.State, func() { - c.Assert(env.Destroy(), jc.ErrorIsNil) - }).Check() - - c.Assert(env.Destroy(), jc.ErrorIsNil) -} - -func (s *EnvironSuite) TestDestroyStateServerAlreadyDyingNoOp(c *gc.C) { - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - - c.Assert(env.Destroy(), jc.ErrorIsNil) - c.Assert(env.Destroy(), jc.ErrorIsNil) -} - -func (s *EnvironSuite) TestListEnvironmentUsers(c *gc.C) { - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - - expected := addEnvUsers(c, s.State) - obtained, err := env.Users() - c.Assert(err, gc.IsNil) - - assertObtainedUsersMatchExpectedUsers(c, obtained, expected) -} - -func (s *EnvironSuite) TestMisMatchedEnvs(c *gc.C) { - // create another environment - otherEnvState := s.Factory.MakeEnvironment(c, nil) - defer otherEnvState.Close() - otherEnv, err := otherEnvState.Environment() - c.Assert(err, jc.ErrorIsNil) - - // get that environment from State - env, err := s.State.GetEnvironment(otherEnv.EnvironTag()) - c.Assert(err, jc.ErrorIsNil) - - // check that the Users method errors - users, err := env.Users() - c.Assert(users, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "cannot lookup environment users outside the current environment") -} - -func (s *EnvironSuite) TestListUsersTwoEnvironments(c *gc.C) { - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - - otherEnvState := s.Factory.MakeEnvironment(c, nil) - defer otherEnvState.Close() - otherEnv, err := otherEnvState.Environment() - c.Assert(err, jc.ErrorIsNil) - - // Add users to both environments - expectedUsers := addEnvUsers(c, s.State) - expectedUsersOtherEnv := addEnvUsers(c, otherEnvState) - - // test that only the expected users are listed for each environment - obtainedUsers, err := env.Users() - c.Assert(err, jc.ErrorIsNil) - assertObtainedUsersMatchExpectedUsers(c, obtainedUsers, expectedUsers) - - obtainedUsersOtherEnv, err := otherEnv.Users() - c.Assert(err, jc.ErrorIsNil) - assertObtainedUsersMatchExpectedUsers(c, obtainedUsersOtherEnv, expectedUsersOtherEnv) -} - -func addEnvUsers(c *gc.C, st *state.State) (expected []*state.EnvironmentUser) { - // get the environment owner - testAdmin := names.NewUserTag("test-admin") - owner, err := st.EnvironmentUser(testAdmin) - c.Assert(err, jc.ErrorIsNil) - - f := factory.NewFactory(st) - return []*state.EnvironmentUser{ - // we expect the owner to be an existing environment user - owner, - // add new users to the environment - f.MakeEnvUser(c, nil), - f.MakeEnvUser(c, nil), - f.MakeEnvUser(c, nil), - } -} - -func assertObtainedUsersMatchExpectedUsers(c *gc.C, obtainedUsers, expectedUsers []*state.EnvironmentUser) { - c.Assert(len(obtainedUsers), gc.Equals, len(expectedUsers)) - for i, obtained := range obtainedUsers { - c.Assert(obtained.EnvironmentTag().Id(), gc.Equals, expectedUsers[i].EnvironmentTag().Id()) - c.Assert(obtained.UserName(), gc.Equals, expectedUsers[i].UserName()) - c.Assert(obtained.DisplayName(), gc.Equals, expectedUsers[i].DisplayName()) - c.Assert(obtained.CreatedBy(), gc.Equals, expectedUsers[i].CreatedBy()) - } -} - -func (s *EnvironSuite) TestAllEnvironments(c *gc.C) { - s.Factory.MakeEnvironment(c, &factory.EnvParams{ - Name: "test", Owner: names.NewUserTag("bob@remote")}).Close() - s.Factory.MakeEnvironment(c, &factory.EnvParams{ - Name: "test", Owner: names.NewUserTag("mary@remote")}).Close() - envs, err := s.State.AllEnvironments() - c.Assert(err, jc.ErrorIsNil) - c.Assert(envs, gc.HasLen, 3) - var obtained []string - for _, env := range envs { - obtained = append(obtained, fmt.Sprintf("%s/%s", env.Owner().Canonical(), env.Name())) - } - expected := []string{ - "test-admin@local/testenv", - "bob@remote/test", - "mary@remote/test", - } - c.Assert(obtained, jc.SameContents, expected) -} - -func (s *EnvironSuite) TestHostedEnvironCount(c *gc.C) { - c.Assert(state.HostedEnvironCount(c, s.State), gc.Equals, 0) - - st1 := s.Factory.MakeEnvironment(c, nil) - defer st1.Close() - c.Assert(state.HostedEnvironCount(c, s.State), gc.Equals, 1) - - st2 := s.Factory.MakeEnvironment(c, nil) - defer st2.Close() - c.Assert(state.HostedEnvironCount(c, s.State), gc.Equals, 2) - - env1, err := st1.Environment() - c.Assert(err, jc.ErrorIsNil) - c.Assert(env1.Destroy(), jc.ErrorIsNil) - c.Assert(state.HostedEnvironCount(c, s.State), gc.Equals, 1) - - env2, err := st2.Environment() - c.Assert(err, jc.ErrorIsNil) - c.Assert(env2.Destroy(), jc.ErrorIsNil) - c.Assert(state.HostedEnvironCount(c, s.State), gc.Equals, 0) -} === removed file 'src/github.com/juju/juju/state/envuser.go' --- src/github.com/juju/juju/state/envuser.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/envuser.go 1970-01-01 00:00:00 +0000 @@ -1,307 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package state - -import ( - "fmt" - "strings" - "time" - - "github.com/juju/errors" - "github.com/juju/names" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - "gopkg.in/mgo.v2/txn" -) - -// EnvironmentUser represents a user access to an environment whereas the user -// could represent a remote user or a user across multiple environments the -// environment user always represents a single user for a single environment. -// There should be no more than one EnvironmentUser per environment. -type EnvironmentUser struct { - st *State - doc envUserDoc -} - -type envUserDoc struct { - ID string `bson:"_id"` - EnvUUID string `bson:"env-uuid"` - UserName string `bson:"user"` - DisplayName string `bson:"displayname"` - CreatedBy string `bson:"createdby"` - DateCreated time.Time `bson:"datecreated"` -} - -// envUserLastConnectionDoc is updated by the apiserver whenever the user -// connects over the API. This update is not done using mgo.txn so the values -// could well change underneath a normal transaction and as such, it should -// NEVER appear in any transaction asserts. It is really informational only as -// far as everyone except the api server is concerned. -type envUserLastConnectionDoc struct { - ID string `bson:"_id"` - EnvUUID string `bson:"env-uuid"` - UserName string `bson:"user"` - LastConnection time.Time `bson:"last-connection"` -} - -// ID returns the ID of the environment user. -func (e *EnvironmentUser) ID() string { - return e.doc.ID -} - -// EnvironmentTag returns the environment tag of the environment user. -func (e *EnvironmentUser) EnvironmentTag() names.EnvironTag { - return names.NewEnvironTag(e.doc.EnvUUID) -} - -// UserTag returns the tag for the environment user. -func (e *EnvironmentUser) UserTag() names.UserTag { - return names.NewUserTag(e.doc.UserName) -} - -// UserName returns the user name of the environment user. -func (e *EnvironmentUser) UserName() string { - return e.doc.UserName -} - -// DisplayName returns the display name of the environment user. -func (e *EnvironmentUser) DisplayName() string { - return e.doc.DisplayName -} - -// CreatedBy returns the user who created the environment user. -func (e *EnvironmentUser) CreatedBy() string { - return e.doc.CreatedBy -} - -// DateCreated returns the date the environment user was created in UTC. -func (e *EnvironmentUser) DateCreated() time.Time { - return e.doc.DateCreated.UTC() -} - -// LastConnection returns when this EnvironmentUser last connected through the API -// in UTC. The resulting time will be nil if the user has never logged in. -func (e *EnvironmentUser) LastConnection() (time.Time, error) { - lastConnections, closer := e.st.getRawCollection(envUserLastConnectionC) - defer closer() - - username := strings.ToLower(e.UserName()) - var lastConn envUserLastConnectionDoc - err := lastConnections.FindId(e.st.docID(username)).Select(bson.D{{"last-connection", 1}}).One(&lastConn) - if err != nil { - if err == mgo.ErrNotFound { - err = errors.Wrap(err, NeverConnectedError(e.UserName())) - } - return time.Time{}, errors.Trace(err) - } - - return lastConn.LastConnection.UTC(), nil -} - -// NeverConnectedError is used to indicate that a user has never connected to -// an environment. -type NeverConnectedError string - -// Error returns the error string for a user who has never connected to an -// environment. -func (e NeverConnectedError) Error() string { - return `never connected: "` + string(e) + `"` -} - -// IsNeverConnectedError returns true if err is of type NeverConnectedError. -func IsNeverConnectedError(err error) bool { - _, ok := errors.Cause(err).(NeverConnectedError) - return ok -} - -// UpdateLastConnection updates the last connection time of the environment user. -func (e *EnvironmentUser) UpdateLastConnection() error { - lastConnections, closer := e.st.getCollection(envUserLastConnectionC) - defer closer() - - lastConnectionsW := lastConnections.Writeable() - - // Update the safe mode of the underlying session to not require - // write majority, nor sync to disk. - session := lastConnectionsW.Underlying().Database.Session - session.SetSafe(&mgo.Safe{}) - - lastConn := envUserLastConnectionDoc{ - ID: e.st.docID(strings.ToLower(e.UserName())), - EnvUUID: e.EnvironmentTag().Id(), - UserName: e.UserName(), - LastConnection: nowToTheSecond(), - } - _, err := lastConnectionsW.UpsertId(lastConn.ID, lastConn) - return errors.Trace(err) -} - -// EnvironmentUser returns the environment user. -func (st *State) EnvironmentUser(user names.UserTag) (*EnvironmentUser, error) { - envUser := &EnvironmentUser{st: st} - envUsers, closer := st.getCollection(envUsersC) - defer closer() - - username := strings.ToLower(user.Canonical()) - err := envUsers.FindId(username).One(&envUser.doc) - if err == mgo.ErrNotFound { - return nil, errors.NotFoundf("environment user %q", user.Canonical()) - } - // DateCreated is inserted as UTC, but read out as local time. So we - // convert it back to UTC here. - envUser.doc.DateCreated = envUser.doc.DateCreated.UTC() - return envUser, nil -} - -// AddEnvironmentUser adds a new user to the database. -func (st *State) AddEnvironmentUser(user, createdBy names.UserTag, displayName string) (*EnvironmentUser, error) { - // Ensure local user exists in state before adding them as an environment user. - if user.IsLocal() { - localUser, err := st.User(user) - if err != nil { - return nil, errors.Annotate(err, fmt.Sprintf("user %q does not exist locally", user.Name())) - } - if displayName == "" { - displayName = localUser.DisplayName() - } - } - - // Ensure local createdBy user exists. - if createdBy.IsLocal() { - if _, err := st.User(createdBy); err != nil { - return nil, errors.Annotate(err, fmt.Sprintf("createdBy user %q does not exist locally", createdBy.Name())) - } - } - - envuuid := st.EnvironUUID() - op, doc := createEnvUserOpAndDoc(envuuid, user, createdBy, displayName) - err := st.runTransaction([]txn.Op{op}) - if err == txn.ErrAborted { - err = errors.AlreadyExistsf("environment user %q", user.Canonical()) - } - if err != nil { - return nil, errors.Trace(err) - } - return &EnvironmentUser{st: st, doc: *doc}, nil -} - -// envUserID returns the document id of the environment user -func envUserID(user names.UserTag) string { - username := user.Canonical() - return strings.ToLower(username) -} - -func createEnvUserOpAndDoc(envuuid string, user, createdBy names.UserTag, displayName string) (txn.Op, *envUserDoc) { - creatorname := createdBy.Canonical() - doc := &envUserDoc{ - ID: envUserID(user), - EnvUUID: envuuid, - UserName: user.Canonical(), - DisplayName: displayName, - CreatedBy: creatorname, - DateCreated: nowToTheSecond(), - } - op := txn.Op{ - C: envUsersC, - Id: envUserID(user), - Assert: txn.DocMissing, - Insert: doc, - } - return op, doc -} - -// RemoveEnvironmentUser removes a user from the database. -func (st *State) RemoveEnvironmentUser(user names.UserTag) error { - ops := []txn.Op{{ - C: envUsersC, - Id: envUserID(user), - Assert: txn.DocExists, - Remove: true, - }} - err := st.runTransaction(ops) - if err == txn.ErrAborted { - err = errors.NewNotFound(err, fmt.Sprintf("env user %q does not exist", user.Canonical())) - } - if err != nil { - return errors.Trace(err) - } - return nil -} - -// UserEnvironment contains information about an environment that a -// user has access to. -type UserEnvironment struct { - *Environment - User names.UserTag -} - -// LastConnection returns the last time the user has connected to the -// environment. -func (e *UserEnvironment) LastConnection() (time.Time, error) { - lastConnections, lastConnCloser := e.st.getRawCollection(envUserLastConnectionC) - defer lastConnCloser() - - lastConnDoc := envUserLastConnectionDoc{} - id := ensureEnvUUID(e.EnvironTag().Id(), strings.ToLower(e.User.Canonical())) - err := lastConnections.FindId(id).Select(bson.D{{"last-connection", 1}}).One(&lastConnDoc) - if (err != nil && err != mgo.ErrNotFound) || lastConnDoc.LastConnection.IsZero() { - return time.Time{}, errors.Trace(NeverConnectedError(e.User.Canonical())) - } - - return lastConnDoc.LastConnection, nil -} - -// EnvironmentsForUser returns a list of enviroments that the user -// is able to access. -func (st *State) EnvironmentsForUser(user names.UserTag) ([]*UserEnvironment, error) { - // Since there are no groups at this stage, the simplest way to get all - // the environments that a particular user can see is to look through the - // environment user collection. A raw collection is required to support - // queries across multiple environments. - envUsers, userCloser := st.getRawCollection(envUsersC) - defer userCloser() - - // TODO: consider adding an index to the envUsers collection on the username. - var userSlice []envUserDoc - err := envUsers.Find(bson.D{{"user", user.Canonical()}}).Select(bson.D{{"env-uuid", 1}, {"_id", 1}}).All(&userSlice) - if err != nil { - return nil, err - } - - var result []*UserEnvironment - for _, doc := range userSlice { - envTag := names.NewEnvironTag(doc.EnvUUID) - env, err := st.GetEnvironment(envTag) - if err != nil { - return nil, errors.Trace(err) - } - - result = append(result, &UserEnvironment{Environment: env, User: user}) - } - - return result, nil -} - -// IsSystemAdministrator returns true if the user specified has access to the -// state server environment (the system environment). -func (st *State) IsSystemAdministrator(user names.UserTag) (bool, error) { - ssinfo, err := st.StateServerInfo() - if err != nil { - return false, errors.Annotate(err, "could not get state server info") - } - - serverUUID := ssinfo.EnvironmentTag.Id() - - envUsers, userCloser := st.getRawCollection(envUsersC) - defer userCloser() - - count, err := envUsers.Find(bson.D{ - {"env-uuid", serverUUID}, - {"user", user.Canonical()}, - }).Count() - if err != nil { - return false, errors.Trace(err) - } - return count == 1, nil -} === removed file 'src/github.com/juju/juju/state/envuser_internal_test.go' --- src/github.com/juju/juju/state/envuser_internal_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/envuser_internal_test.go 1970-01-01 00:00:00 +0000 @@ -1,35 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package state - -import ( - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" -) - -type internalEnvUserSuite struct { - internalStateSuite -} - -var _ = gc.Suite(&internalEnvUserSuite{}) - -func (s *internalEnvUserSuite) TestCreateEnvUserOpAndDoc(c *gc.C) { - tag := names.NewUserTag("UserName") - op, doc := createEnvUserOpAndDoc("ignored", tag, names.NewUserTag("ignored"), "ignored") - - c.Assert(op.Id, gc.Equals, "username@local") - c.Assert(doc.ID, gc.Equals, "username@local") - c.Assert(doc.UserName, gc.Equals, "UserName@local") -} - -func (s *internalEnvUserSuite) TestCaseUserNameVsId(c *gc.C) { - env, err := s.state.Environment() - c.Assert(err, jc.ErrorIsNil) - - user, err := s.state.AddEnvironmentUser(names.NewUserTag("Bob@RandomProvider"), env.Owner(), "") - c.Assert(err, gc.IsNil) - c.Assert(user.UserName(), gc.Equals, "Bob@RandomProvider") - c.Assert(user.doc.ID, gc.Equals, s.state.docID("bob@randomprovider")) -} === removed file 'src/github.com/juju/juju/state/envuser_test.go' --- src/github.com/juju/juju/state/envuser_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/envuser_test.go 1970-01-01 00:00:00 +0000 @@ -1,330 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package state_test - -import ( - "fmt" - "sort" - - "github.com/juju/errors" - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/state" - "github.com/juju/juju/testing" - "github.com/juju/juju/testing/factory" -) - -type EnvUserSuite struct { - ConnSuite -} - -var _ = gc.Suite(&EnvUserSuite{}) - -func (s *EnvUserSuite) TestAddEnvironmentUser(c *gc.C) { - now := state.NowToTheSecond() - user := s.Factory.MakeUser(c, &factory.UserParams{Name: "validusername", NoEnvUser: true}) - createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) - envUser, err := s.State.AddEnvironmentUser(user.UserTag(), createdBy.UserTag(), "") - c.Assert(err, jc.ErrorIsNil) - - c.Assert(envUser.ID(), gc.Equals, fmt.Sprintf("%s:validusername@local", s.envTag.Id())) - c.Assert(envUser.EnvironmentTag(), gc.Equals, s.envTag) - c.Assert(envUser.UserName(), gc.Equals, "validusername@local") - c.Assert(envUser.DisplayName(), gc.Equals, user.DisplayName()) - c.Assert(envUser.CreatedBy(), gc.Equals, "createdby@local") - c.Assert(envUser.DateCreated().Equal(now) || envUser.DateCreated().After(now), jc.IsTrue) - when, err := envUser.LastConnection() - c.Assert(err, jc.Satisfies, state.IsNeverConnectedError) - c.Assert(when.IsZero(), jc.IsTrue) - - envUser, err = s.State.EnvironmentUser(user.UserTag()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(envUser.ID(), gc.Equals, fmt.Sprintf("%s:validusername@local", s.envTag.Id())) - c.Assert(envUser.EnvironmentTag(), gc.Equals, s.envTag) - c.Assert(envUser.UserName(), gc.Equals, "validusername@local") - c.Assert(envUser.DisplayName(), gc.Equals, user.DisplayName()) - c.Assert(envUser.CreatedBy(), gc.Equals, "createdby@local") - c.Assert(envUser.DateCreated().Equal(now) || envUser.DateCreated().After(now), jc.IsTrue) - when, err = envUser.LastConnection() - c.Assert(err, jc.Satisfies, state.IsNeverConnectedError) - c.Assert(when.IsZero(), jc.IsTrue) -} - -func (s *EnvUserSuite) TestCaseSensitiveEnvUserErrors(c *gc.C) { - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - s.Factory.MakeEnvUser(c, &factory.EnvUserParams{User: "Bob@ubuntuone"}) - - _, err = s.State.AddEnvironmentUser(names.NewUserTag("boB@ubuntuone"), env.Owner(), "") - c.Assert(err, gc.ErrorMatches, `environment user "boB@ubuntuone" already exists`) - c.Assert(errors.IsAlreadyExists(err), jc.IsTrue) -} - -func (s *EnvUserSuite) TestCaseInsensitiveLookupInMultiEnvirons(c *gc.C) { - assertIsolated := func(st1, st2 *state.State, usernames ...string) { - f := factory.NewFactory(st1) - expectedUser := f.MakeEnvUser(c, &factory.EnvUserParams{User: usernames[0]}) - - // assert case insensitive lookup for each username - for _, username := range usernames { - userTag := names.NewUserTag(username) - obtainedUser, err := st1.EnvironmentUser(userTag) - c.Assert(err, jc.ErrorIsNil) - c.Assert(obtainedUser, gc.DeepEquals, expectedUser) - - _, err = st2.EnvironmentUser(userTag) - c.Assert(errors.IsNotFound(err), jc.IsTrue) - } - } - - otherSt := s.Factory.MakeEnvironment(c, nil) - defer otherSt.Close() - assertIsolated(s.State, otherSt, - "Bob@UbuntuOne", - "bob@ubuntuone", - "BOB@UBUNTUONE", - ) - assertIsolated(otherSt, s.State, - "Sam@UbuntuOne", - "sam@ubuntuone", - "SAM@UBUNTUONE", - ) -} - -func (s *EnvUserSuite) TestAddEnvironmentDisplayName(c *gc.C) { - envUserDefault := s.Factory.MakeEnvUser(c, nil) - c.Assert(envUserDefault.DisplayName(), gc.Matches, "display name-[0-9]*") - - envUser := s.Factory.MakeEnvUser(c, &factory.EnvUserParams{DisplayName: "Override user display name"}) - c.Assert(envUser.DisplayName(), gc.Equals, "Override user display name") -} - -func (s *EnvUserSuite) TestAddEnvironmentNoUserFails(c *gc.C) { - createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) - _, err := s.State.AddEnvironmentUser(names.NewLocalUserTag("validusername"), createdBy.UserTag(), "") - c.Assert(err, gc.ErrorMatches, `user "validusername" does not exist locally: user "validusername" not found`) -} - -func (s *EnvUserSuite) TestAddEnvironmentNoCreatedByUserFails(c *gc.C) { - user := s.Factory.MakeUser(c, &factory.UserParams{Name: "validusername"}) - _, err := s.State.AddEnvironmentUser(user.UserTag(), names.NewLocalUserTag("createdby"), "") - c.Assert(err, gc.ErrorMatches, `createdBy user "createdby" does not exist locally: user "createdby" not found`) -} - -func (s *EnvUserSuite) TestRemoveEnvironmentUser(c *gc.C) { - user := s.Factory.MakeUser(c, &factory.UserParams{Name: "validUsername"}) - _, err := s.State.EnvironmentUser(user.UserTag()) - c.Assert(err, jc.ErrorIsNil) - - err = s.State.RemoveEnvironmentUser(user.UserTag()) - c.Assert(err, jc.ErrorIsNil) - - _, err = s.State.EnvironmentUser(user.UserTag()) - c.Assert(err, jc.Satisfies, errors.IsNotFound) -} - -func (s *EnvUserSuite) TestRemoveEnvironmentUserFails(c *gc.C) { - user := s.Factory.MakeUser(c, &factory.UserParams{NoEnvUser: true}) - err := s.State.RemoveEnvironmentUser(user.UserTag()) - c.Assert(err, jc.Satisfies, errors.IsNotFound) -} - -func (s *EnvUserSuite) TestUpdateLastConnection(c *gc.C) { - now := state.NowToTheSecond() - createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) - user := s.Factory.MakeUser(c, &factory.UserParams{Name: "validusername", Creator: createdBy.Tag()}) - envUser, err := s.State.EnvironmentUser(user.UserTag()) - c.Assert(err, jc.ErrorIsNil) - err = envUser.UpdateLastConnection() - c.Assert(err, jc.ErrorIsNil) - when, err := envUser.LastConnection() - c.Assert(err, jc.ErrorIsNil) - // It is possible that the update is done over a second boundary, so we need - // to check for after now as well as equal. - c.Assert(when.After(now) || when.Equal(now), jc.IsTrue) -} - -func (s *EnvUserSuite) TestUpdateLastConnectionTwoEnvUsers(c *gc.C) { - now := state.NowToTheSecond() - - // Create a user and add them to the inital environment. - createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) - user := s.Factory.MakeUser(c, &factory.UserParams{Name: "validusername", Creator: createdBy.Tag()}) - envUser, err := s.State.EnvironmentUser(user.UserTag()) - c.Assert(err, jc.ErrorIsNil) - - // Create a second environment and add the same user to this. - st2 := s.Factory.MakeEnvironment(c, nil) - defer st2.Close() - envUser2, err := st2.AddEnvironmentUser(user.UserTag(), createdBy.UserTag(), "ignored") - c.Assert(err, jc.ErrorIsNil) - - // Now we have two environment users with the same username. Ensure we get - // separate last connections. - - // Connect envUser and get last connection. - err = envUser.UpdateLastConnection() - c.Assert(err, jc.ErrorIsNil) - when, err := envUser.LastConnection() - c.Assert(err, jc.ErrorIsNil) - c.Assert(when.After(now) || when.Equal(now), jc.IsTrue) - - // Try to get last connection for envUser2. As they have never connected, - // we expect to get an error. - _, err = envUser2.LastConnection() - c.Assert(err, gc.ErrorMatches, `never connected: "validusername@local"`) - - // Connect envUser2 and get last connection. - err = envUser2.UpdateLastConnection() - c.Assert(err, jc.ErrorIsNil) - when, err = envUser2.LastConnection() - c.Assert(err, jc.ErrorIsNil) - c.Assert(when.After(now) || when.Equal(now), jc.IsTrue) -} - -func (s *EnvUserSuite) TestEnvironmentsForUserNone(c *gc.C) { - tag := names.NewUserTag("non-existent@remote") - environments, err := s.State.EnvironmentsForUser(tag) - c.Assert(err, jc.ErrorIsNil) - c.Assert(environments, gc.HasLen, 0) -} - -func (s *EnvUserSuite) TestEnvironmentsForUserNewLocalUser(c *gc.C) { - user := s.Factory.MakeUser(c, &factory.UserParams{NoEnvUser: true}) - environments, err := s.State.EnvironmentsForUser(user.UserTag()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(environments, gc.HasLen, 0) -} - -func (s *EnvUserSuite) TestEnvironmentsForUser(c *gc.C) { - user := s.Factory.MakeUser(c, nil) - environments, err := s.State.EnvironmentsForUser(user.UserTag()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(environments, gc.HasLen, 1) - c.Assert(environments[0].UUID(), gc.Equals, s.State.EnvironUUID()) - when, err := environments[0].LastConnection() - c.Assert(err, jc.Satisfies, state.IsNeverConnectedError) - c.Assert(when.IsZero(), jc.IsTrue) -} - -func (s *EnvUserSuite) newEnvWithOwner(c *gc.C, name string, owner names.UserTag) *state.Environment { - // Don't use the factory to call MakeEnvironment because it may at some - // time in the future be modified to do additional things. Instead call - // the state method directly to create an environment to make sure that - // the owner is able to access the environment. - uuid, err := utils.NewUUID() - c.Assert(err, jc.ErrorIsNil) - cfg := testing.CustomEnvironConfig(c, testing.Attrs{ - "name": name, - "uuid": uuid.String(), - }) - env, st, err := s.State.NewEnvironment(cfg, owner) - c.Assert(err, jc.ErrorIsNil) - defer st.Close() - return env -} - -func (s *EnvUserSuite) TestEnvironmentsForUserEnvOwner(c *gc.C) { - owner := names.NewUserTag("external@remote") - env := s.newEnvWithOwner(c, "test-env", owner) - - environments, err := s.State.EnvironmentsForUser(owner) - c.Assert(err, jc.ErrorIsNil) - c.Assert(environments, gc.HasLen, 1) - s.checkSameEnvironment(c, environments[0].Environment, env) -} - -func (s *EnvUserSuite) checkSameEnvironment(c *gc.C, env1, env2 *state.Environment) { - c.Check(env1.Name(), gc.Equals, env2.Name()) - c.Check(env1.UUID(), gc.Equals, env2.UUID()) -} - -func (s *EnvUserSuite) newEnvWithUser(c *gc.C, name string, user names.UserTag) *state.Environment { - envState := s.Factory.MakeEnvironment(c, &factory.EnvParams{Name: name}) - defer envState.Close() - newEnv, err := envState.Environment() - c.Assert(err, jc.ErrorIsNil) - - _, err = envState.AddEnvironmentUser(user, newEnv.Owner(), "") - c.Assert(err, jc.ErrorIsNil) - return newEnv -} - -func (s *EnvUserSuite) TestEnvironmentsForUserOfNewEnv(c *gc.C) { - userTag := names.NewUserTag("external@remote") - env := s.newEnvWithUser(c, "test-env", userTag) - - environments, err := s.State.EnvironmentsForUser(userTag) - c.Assert(err, jc.ErrorIsNil) - c.Assert(environments, gc.HasLen, 1) - s.checkSameEnvironment(c, environments[0].Environment, env) -} - -func (s *EnvUserSuite) TestEnvironmentsForUserMultiple(c *gc.C) { - userTag := names.NewUserTag("external@remote") - expected := []*state.Environment{ - s.newEnvWithUser(c, "user1", userTag), - s.newEnvWithUser(c, "user2", userTag), - s.newEnvWithUser(c, "user3", userTag), - s.newEnvWithOwner(c, "owner1", userTag), - s.newEnvWithOwner(c, "owner2", userTag), - } - sort.Sort(UUIDOrder(expected)) - - environments, err := s.State.EnvironmentsForUser(userTag) - c.Assert(err, jc.ErrorIsNil) - c.Assert(environments, gc.HasLen, len(expected)) - sort.Sort(userUUIDOrder(environments)) - for i := range expected { - s.checkSameEnvironment(c, environments[i].Environment, expected[i]) - } -} - -func (s *EnvUserSuite) TestIsSystemAdministrator(c *gc.C) { - isAdmin, err := s.State.IsSystemAdministrator(s.Owner) - c.Assert(err, jc.ErrorIsNil) - c.Assert(isAdmin, jc.IsTrue) - - user := s.Factory.MakeUser(c, &factory.UserParams{NoEnvUser: true}) - isAdmin, err = s.State.IsSystemAdministrator(user.UserTag()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(isAdmin, jc.IsFalse) - - s.Factory.MakeEnvUser(c, &factory.EnvUserParams{User: user.UserTag().Canonical()}) - isAdmin, err = s.State.IsSystemAdministrator(user.UserTag()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(isAdmin, jc.IsTrue) -} - -func (s *EnvUserSuite) TestIsSystemAdministratorFromOtherState(c *gc.C) { - user := s.Factory.MakeUser(c, &factory.UserParams{NoEnvUser: true}) - - otherState := s.Factory.MakeEnvironment(c, &factory.EnvParams{Owner: user.UserTag()}) - defer otherState.Close() - - isAdmin, err := otherState.IsSystemAdministrator(user.UserTag()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(isAdmin, jc.IsFalse) - - isAdmin, err = otherState.IsSystemAdministrator(s.Owner) - c.Assert(err, jc.ErrorIsNil) - c.Assert(isAdmin, jc.IsTrue) -} - -// UUIDOrder is used to sort the environments into a stable order -type UUIDOrder []*state.Environment - -func (a UUIDOrder) Len() int { return len(a) } -func (a UUIDOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a UUIDOrder) Less(i, j int) bool { return a[i].UUID() < a[j].UUID() } - -// userUUIDOrder is used to sort the UserEnvironments into a stable order -type userUUIDOrder []*state.UserEnvironment - -func (a userUUIDOrder) Len() int { return len(a) } -func (a userUUIDOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a userUUIDOrder) Less(i, j int) bool { return a[i].UUID() < a[j].UUID() } === modified file 'src/github.com/juju/juju/state/errors.go' --- src/github.com/juju/juju/state/errors.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/errors.go 2016-03-22 15:18:22 +0000 @@ -7,7 +7,7 @@ "fmt" "github.com/juju/errors" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "gopkg.in/mgo.v2/txn" ) === modified file 'src/github.com/juju/juju/state/export_test.go' --- src/github.com/juju/juju/state/export_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/export_test.go 2016-03-22 15:18:22 +0000 @@ -6,17 +6,17 @@ import ( "fmt" "io/ioutil" - "math/rand" "path/filepath" "time" + "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" jc "github.com/juju/testing/checkers" jujutxn "github.com/juju/txn" txntesting "github.com/juju/txn/testing" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" @@ -31,6 +31,7 @@ MachinesC = machinesC NetworkInterfacesC = networkInterfacesC ServicesC = servicesC + EndpointBindingsC = endpointBindingsC SettingsC = settingsC UnitsC = unitsC UsersC = usersC @@ -43,7 +44,7 @@ ToolstorageNewStorage = &toolstorageNewStorage ImageStorageNewStorage = &imageStorageNewStorage MachineIdLessThan = machineIdLessThan - StateServerAvailable = &stateServerAvailable + ControllerAvailable = &controllerAvailable GetOrCreatePorts = getOrCreatePorts GetPorts = getPorts PortsGlobalKey = portsGlobalKey @@ -52,6 +53,9 @@ PickAddress = &pickAddress AddVolumeOps = (*State).addVolumeOps CombineMeterStatus = combineMeterStatus + ServiceGlobalKey = serviceGlobalKey + MergeBindings = mergeBindings + UpgradeInProgressError = errUpgradeInProgress ) type ( @@ -115,21 +119,51 @@ return addCharm(c, st, "quantal", testcharms.Repo.CharmDir(name)) } +func AddTestingCharmForSeries(c *gc.C, st *State, series, name string) *Charm { + return addCharm(c, st, series, testcharms.Repo.CharmDir(name)) +} + +func AddTestingCharmMultiSeries(c *gc.C, st *State, name string) *Charm { + ch := testcharms.Repo.CharmDir(name) + ident := fmt.Sprintf("%s-%d", ch.Meta().Name, ch.Revision()) + curl := charm.MustParseURL("cs:" + ident) + sch, err := st.AddCharm(ch, curl, "dummy-path", ident+"-sha256") + c.Assert(err, jc.ErrorIsNil) + return sch +} + func AddTestingService(c *gc.C, st *State, name string, ch *Charm, owner names.UserTag) *Service { - return addTestingService(c, st, name, ch, owner, nil, nil) -} - + return addTestingService(c, st, "", name, ch, owner, nil, nil) +} + +func AddTestingServiceForSeries(c *gc.C, st *State, series, name string, ch *Charm, owner names.UserTag) *Service { + return addTestingService(c, st, series, name, ch, owner, nil, nil) +} + +// TODO(dimitern): Drop this along with the remnants of requested networks in a +// follow-up. func AddTestingServiceWithNetworks(c *gc.C, st *State, name string, ch *Charm, owner names.UserTag, networks []string) *Service { - return addTestingService(c, st, name, ch, owner, networks, nil) + return addTestingService(c, st, "", name, ch, owner, nil, nil) } func AddTestingServiceWithStorage(c *gc.C, st *State, name string, ch *Charm, owner names.UserTag, storage map[string]StorageConstraints) *Service { - return addTestingService(c, st, name, ch, owner, nil, storage) -} - -func addTestingService(c *gc.C, st *State, name string, ch *Charm, owner names.UserTag, networks []string, storage map[string]StorageConstraints) *Service { + return addTestingService(c, st, "", name, ch, owner, nil, storage) +} + +func AddTestingServiceWithBindings(c *gc.C, st *State, name string, ch *Charm, owner names.UserTag, bindings map[string]string) *Service { + return addTestingService(c, st, "", name, ch, owner, bindings, nil) +} + +func addTestingService(c *gc.C, st *State, series, name string, ch *Charm, owner names.UserTag, bindings map[string]string, storage map[string]StorageConstraints) *Service { c.Assert(ch, gc.NotNil) - service, err := st.AddService(name, owner.String(), ch, networks, storage, nil) + service, err := st.AddService(AddServiceArgs{ + Name: name, + Series: series, + Owner: owner.String(), + Charm: ch, + EndpointBindings: bindings, + Storage: storage, + }) c.Assert(err, jc.ErrorIsNil) return service } @@ -247,8 +281,8 @@ return st.checkUserExists(name) } -func WatcherMergeIds(st *State, changeset *[]string, updates map[interface{}]bool) error { - return mergeIds(st, changeset, updates) +func WatcherMergeIds(st *State, changeset *[]string, updates map[interface{}]bool, idconv func(string) string) error { + return mergeIds(st, changeset, updates, idconv) } func WatcherEnsureSuffixFn(marker string) func(string) string { @@ -280,8 +314,8 @@ return out, nil } -func UserEnvNameIndex(username, envName string) string { - return userEnvNameIndex(username, envName) +func UserModelNameIndex(username, modelName string) string { + return userModelNameIndex(username, modelName) } func DocID(st *State, id string) string { @@ -296,8 +330,8 @@ return st.strictLocalID(id) } -func GetUnitEnvUUID(unit *Unit) string { - return unit.doc.EnvUUID +func GetUnitModelUUID(unit *Unit) string { + return unit.doc.ModelUUID } func GetCollection(st *State, name string) (mongo.Collection, func()) { @@ -326,12 +360,12 @@ return st.sequence(name) } -// This is a naive environment destruction function, used to test environment -// watching after the client calls DestroyEnvironment and the environ doc is removed. +// This is a naive model destruction function, used to test model +// watching after the client calls DestroyModel and the model doc is removed. // It is also used to test annotations. -func RemoveEnvironment(st *State, uuid string) error { +func RemoveModel(st *State, uuid string) error { ops := []txn.Op{{ - C: environmentsC, + C: modelsC, Id: uuid, Assert: txn.DocExists, Remove: true, @@ -339,18 +373,17 @@ return st.runTransaction(ops) } -func SetEnvLifeDying(st *State, envUUID string) error { +func SetModelLifeDead(st *State, modelUUID string) error { ops := []txn.Op{{ - C: environmentsC, - Id: envUUID, - Update: bson.D{{"$set", bson.D{{"life", Dying}}}}, - Assert: isEnvAliveDoc, + C: modelsC, + Id: modelUUID, + Update: bson.D{{"$set", bson.D{{"life", Dead}}}}, }} return st.runTransaction(ops) } -func HostedEnvironCount(c *gc.C, st *State) int { - count, err := hostedEnvironCount(st) +func HostedModelCount(c *gc.C, st *State) int { + count, err := hostedModelCount(st) c.Assert(err, jc.ErrorIsNil) return count } @@ -403,44 +436,59 @@ c.Assert(netHostsPorts, gc.DeepEquals, newNetHostsPorts) } -// WriteLogWithOplog writes out a log record to the a (probably fake) -// oplog collection and the logs collection. -func WriteLogWithOplog( - oplog *mgo.Collection, - envUUID string, +// MakeLogDoc creates a database document for a single log message. +func MakeLogDoc( + modelUUID string, entity names.Tag, t time.Time, module string, location string, level loggo.Level, msg string, -) error { - doc := &logDoc{ - Id: bson.NewObjectId(), - Time: t, - EnvUUID: envUUID, - Entity: entity.String(), - Module: module, - Location: location, - Level: level, - Message: msg, - } - err := oplog.Insert(bson.D{ - {"ts", bson.MongoTimestamp(time.Now().Unix() << 32)}, // an approximation which will do - {"h", rand.Int63()}, // again, a suitable fake - {"op", "i"}, // this will always be an insert - {"ns", "logs.logs"}, - {"o", doc}, - }) - if err != nil { - return err - } - - session := oplog.Database.Session - logs := session.DB("logs").C("logs") - return logs.Insert(doc) +) *logDoc { + return &logDoc{ + Id: bson.NewObjectId(), + Time: t, + ModelUUID: modelUUID, + Entity: entity.String(), + Module: module, + Location: location, + Level: level, + Message: msg, + } } func SpaceDoc(s *Space) spaceDoc { return s.doc } + +func ForceDestroyMachineOps(m *Machine) ([]txn.Op, error) { + return m.forceDestroyOps() +} + +func IsManagerMachineError(err error) bool { + return errors.Cause(err) == managerMachineError +} + +var ActionNotificationIdToActionId = actionNotificationIdToActionId + +func UpdateModelUserLastConnection(e *ModelUser, when time.Time) error { + return e.updateLastConnection(when) +} + +func RemoveEndpointBindingsForService(c *gc.C, service *Service) { + globalKey := service.globalKey() + removeOp := removeEndpointBindingsOp(globalKey) + + txnError := service.st.runTransaction([]txn.Op{removeOp}) + err := onAbort(txnError, nil) // ignore ErrAborted as it asserts DocExists + c.Assert(err, jc.ErrorIsNil) +} + +func AssertEndpointBindingsNotFoundForService(c *gc.C, service *Service) { + globalKey := service.globalKey() + storedBindings, _, err := readEndpointBindings(service.st, globalKey) + c.Assert(storedBindings, gc.IsNil) + c.Assert(err, gc.ErrorMatches, fmt.Sprintf("endpoint bindings for %q not found", globalKey)) + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} === modified file 'src/github.com/juju/juju/state/filesystem.go' --- src/github.com/juju/juju/state/filesystem.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/filesystem.go 2016-03-22 15:18:22 +0000 @@ -7,11 +7,12 @@ "fmt" "path" "strings" + "time" "github.com/juju/errors" "github.com/juju/names" jujutxn "github.com/juju/txn" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" @@ -24,12 +25,14 @@ // without a backing volume. var ErrNoBackingVolume = errors.New("filesystem has no backing volume") -// Filesystem describes a filesystem in the environment. Filesystems may be +// Filesystem describes a filesystem in the model. Filesystems may be // backed by a volume, and managed by Juju; otherwise they are first-class // entities managed by a filesystem provider. type Filesystem interface { - Entity + GlobalEntity LifeBinder + StatusGetter + StatusSetter // FilesystemTag returns the tag for the filesystem. FilesystemTag() names.FilesystemTag @@ -72,7 +75,7 @@ // NotProvisioned error if the attachment has not yet been made. // // Note that the presence of FilesystemAttachmentInfo does not necessarily - // imply that the filesystem is mounted; environment storage providers may + // imply that the filesystem is mounted; model storage providers may // need to prepare a filesystem for attachment to a machine before it can // be mounted. Info() (FilesystemAttachmentInfo, error) @@ -84,6 +87,7 @@ } type filesystem struct { + st *State doc filesystemDoc } @@ -91,17 +95,17 @@ doc filesystemAttachmentDoc } -// filesystemDoc records information about a filesystem in the environment. +// filesystemDoc records information about a filesystem in the model. type filesystemDoc struct { DocID string `bson:"_id"` FilesystemId string `bson:"filesystemid"` - EnvUUID string `bson:"env-uuid"` + ModelUUID string `bson:"model-uuid"` Life Life `bson:"life"` StorageId string `bson:"storageid,omitempty"` VolumeId string `bson:"volumeid,omitempty"` // TODO(axw) 2015-06-22 #1467379 // upgrade step to set "attachmentcount" and "binding" - // for 1.24 environments. + // for 1.24 models. AttachmentCount int `bson:"attachmentcount"` Binding string `bson:"binding,omitempty"` Info *FilesystemInfo `bson:"info,omitempty"` @@ -112,7 +116,7 @@ type filesystemAttachmentDoc struct { // DocID is the machine global key followed by the filesystem name. DocID string `bson:"_id"` - EnvUUID string `bson:"env-uuid"` + ModelUUID string `bson:"model-uuid"` Filesystem string `bson:"filesystemid"` Machine string `bson:"machineid"` Life Life `bson:"life"` @@ -174,9 +178,9 @@ return errors.Annotate(err, "parsing binding") } switch tag.(type) { - case names.EnvironTag: - // TODO(axw) support binding to environment - return errors.NotSupportedf("binding to environment") + case names.ModelTag: + // TODO(axw) support binding to model + return errors.NotSupportedf("binding to model") case names.MachineTag: case names.StorageTag: default: @@ -186,7 +190,12 @@ return nil } -// Tag is required to implement Entity. +// globalKey is required to implement GlobalEntity. +func (f *filesystem) globalKey() string { + return filesystemGlobalKey(f.doc.FilesystemId) +} + +// Tag is required to implement GlobalEntity. func (f *filesystem) Tag() names.Tag { return f.FilesystemTag() } @@ -214,9 +223,9 @@ // Storage: If the filesystem is bound to a storage instance, // then the filesystem will be destroyed when the // storage insance is removed from state. -// Environment: If the filesystem is bound to the environment, then +// Model: If the filesystem is bound to the model, then // the filesystem must be destroyed prior to the -// environment being destroyed. +// model being destroyed. func (f *filesystem) LifeBinding() names.Tag { if f.doc.Binding == "" { return nil @@ -259,6 +268,16 @@ return *f.doc.Params, true } +// Status is required to implement StatusGetter. +func (f *filesystem) Status() (StatusInfo, error) { + return f.st.FilesystemStatus(f.FilesystemTag()) +} + +// SetStatus is required to implement StatusSetter. +func (f *filesystem) SetStatus(status Status, info string, data map[string]interface{}) error { + return f.st.SetFilesystemStatus(f.FilesystemTag(), status, info, data) +} + // Filesystem is required to implement FilesystemAttachment. func (f *filesystemAttachment) Filesystem() names.FilesystemTag { return names.NewFilesystemTag(f.doc.Filesystem) @@ -338,7 +357,7 @@ } filesystems := make([]*filesystem, len(fDocs)) for i, doc := range fDocs { - f := &filesystem{doc} + f := &filesystem{st, doc} if err := f.validate(); err != nil { return nil, errors.Annotate(err, "filesystem validation failed") } @@ -351,7 +370,7 @@ coll, cleanup := st.getCollection(filesystemsC) defer cleanup() - var f filesystem + f := filesystem{st: st} err := coll.Find(query).One(&f.doc) if err == mgo.ErrNotFound { return nil, errors.NotFoundf(description) @@ -625,12 +644,15 @@ } func removeFilesystemOps(st *State, filesystem Filesystem) ([]txn.Op, error) { - ops := []txn.Op{{ - C: filesystemsC, - Id: filesystem.Tag().Id(), - Assert: txn.DocExists, - Remove: true, - }} + ops := []txn.Op{ + { + C: filesystemsC, + Id: filesystem.Tag().Id(), + Assert: txn.DocExists, + Remove: true, + }, + removeStatusOp(st, filesystem.globalKey()), + } // If the filesystem is backed by a volume, the volume should // be destroyed once the filesystem is removed if it is bound // to the filesystem. @@ -730,21 +752,27 @@ ops = append(ops, volumeOps...) } - filesystemOp := txn.Op{ - C: filesystemsC, - Id: filesystemId, - Assert: txn.DocMissing, - Insert: &filesystemDoc{ - FilesystemId: filesystemId, - VolumeId: volumeId, - StorageId: params.storage.Id(), - Binding: params.binding.String(), - Params: ¶ms, - // Every filesystem is created with one attachment. - AttachmentCount: 1, + filesystemOps := []txn.Op{ + createStatusOp(st, filesystemGlobalKey(filesystemId), statusDoc{ + Status: StatusPending, + Updated: time.Now().UnixNano(), + }), + { + C: filesystemsC, + Id: filesystemId, + Assert: txn.DocMissing, + Insert: &filesystemDoc{ + FilesystemId: filesystemId, + VolumeId: volumeId, + StorageId: params.storage.Id(), + Binding: params.binding.String(), + Params: ¶ms, + // Every filesystem is created with one attachment. + AttachmentCount: 1, + }, }, } - ops = append(ops, filesystemOp) + ops = append(ops, filesystemOps...) return ops, filesystemTag, volumeTag, nil } @@ -752,7 +780,7 @@ if params.Pool != "" { return params, nil } - envConfig, err := st.EnvironConfig() + envConfig, err := st.ModelConfig() if err != nil { return FilesystemParams{}, errors.Trace(err) } @@ -1099,3 +1127,44 @@ } return result } + +func filesystemGlobalKey(name string) string { + return "f#" + name +} + +// FilesystemStatus returns the status of the specified filesystem. +func (st *State) FilesystemStatus(tag names.FilesystemTag) (StatusInfo, error) { + return getStatus(st, filesystemGlobalKey(tag.Id()), "filesystem") +} + +// SetFilesystemStatus sets the status of the specified filesystem. +func (st *State) SetFilesystemStatus(tag names.FilesystemTag, status Status, info string, data map[string]interface{}) error { + switch status { + case StatusAttaching, StatusAttached, StatusDetaching, StatusDetached, StatusDestroying: + case StatusError: + if info == "" { + return errors.Errorf("cannot set status %q without info", status) + } + case StatusPending: + // If a filesystem is not yet provisioned, we allow its status + // to be set back to pending (when a retry is to occur). + v, err := st.Filesystem(tag) + if err != nil { + return errors.Trace(err) + } + _, err = v.Info() + if errors.IsNotProvisioned(err) { + break + } + return errors.Errorf("cannot set status %q", status) + default: + return errors.Errorf("cannot set invalid status %q", status) + } + return setStatus(st, setStatusParams{ + badge: "filesystem", + globalKey: filesystemGlobalKey(tag.Id()), + status: status, + message: info, + rawData: data, + }) +} === modified file 'src/github.com/juju/juju/state/filesystem_test.go' --- src/github.com/juju/juju/state/filesystem_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/filesystem_test.go 2016-03-22 15:18:22 +0000 @@ -8,7 +8,7 @@ "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/state" "github.com/juju/juju/state/testing" @@ -25,41 +25,76 @@ storage := map[string]state.StorageConstraints{ "data": makeStorageCons("invalid-pool", 1024, 1), } - _, err := s.State.AddService("storage-filesystem", s.Owner.String(), ch, nil, storage, nil) + _, err := s.State.AddService(state.AddServiceArgs{Name: "storage-filesystem", Owner: s.Owner.String(), Charm: ch, Storage: storage}) c.Assert(err, gc.ErrorMatches, `.* pool "invalid-pool" not found`) } func (s *FilesystemStateSuite) TestAddServiceNoPoolNoDefault(c *gc.C) { // no pool specified, no default configured: use rootfs. - s.testAddServiceDefaultPool(c, "rootfs") + s.testAddServiceDefaultPool(c, "rootfs", 0) +} + +func (s *FilesystemStateSuite) TestAddServiceNoPoolNoDefaultWithUnits(c *gc.C) { + // no pool specified, no default configured: use rootfs, add a unit during + // service deploy. + s.testAddServiceDefaultPool(c, "rootfs", 1) } func (s *FilesystemStateSuite) TestAddServiceNoPoolDefaultBlock(c *gc.C) { // no pool specified, default block configured: use default // block with managed fs on top. - err := s.State.UpdateEnvironConfig(map[string]interface{}{ + err := s.State.UpdateModelConfig(map[string]interface{}{ "storage-default-block-source": "machinescoped", }, nil, nil) c.Assert(err, jc.ErrorIsNil) - s.testAddServiceDefaultPool(c, "machinescoped") + s.testAddServiceDefaultPool(c, "machinescoped", 0) } -func (s *FilesystemStateSuite) testAddServiceDefaultPool(c *gc.C, expectedPool string) { +func (s *FilesystemStateSuite) testAddServiceDefaultPool(c *gc.C, expectedPool string, numUnits int) { ch := s.AddTestingCharm(c, "storage-filesystem") storage := map[string]state.StorageConstraints{ "data": makeStorageCons("", 1024, 1), } - svc, err := s.State.AddService("storage-filesystem", s.Owner.String(), ch, nil, storage, nil) + + args := state.AddServiceArgs{ + Name: "storage-filesystem", + Owner: s.Owner.String(), + Charm: ch, + Storage: storage, + NumUnits: numUnits, + } + svc, err := s.State.AddService(args) c.Assert(err, jc.ErrorIsNil) cons, err := svc.StorageConstraints() c.Assert(err, jc.ErrorIsNil) - c.Assert(cons, jc.DeepEquals, map[string]state.StorageConstraints{ + expected := map[string]state.StorageConstraints{ "data": state.StorageConstraints{ Pool: expectedPool, Size: 1024, Count: 1, }, - }) + } + c.Assert(cons, jc.DeepEquals, expected) + + svc, err = s.State.Service(args.Name) + c.Assert(err, jc.ErrorIsNil) + + units, err := svc.AllUnits() + c.Assert(err, jc.ErrorIsNil) + c.Assert(units, gc.HasLen, numUnits) + + for _, unit := range units { + scons, err := unit.StorageConstraints() + c.Assert(err, jc.ErrorIsNil) + c.Assert(scons, gc.DeepEquals, expected) + + storageAttachments, err := s.State.UnitStorageAttachments(unit.UnitTag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(storageAttachments, gc.HasLen, 1) + storageInstance, err := s.State.StorageInstance(storageAttachments[0].StorageInstance()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(storageInstance.Kind(), gc.Equals, state.StorageKindFilesystem) + } } func (s *FilesystemStateSuite) TestAddFilesystemWithoutBackingVolume(c *gc.C) { @@ -256,9 +291,7 @@ s.assertFilesystemInfo(c, filesystemTag, filesystemInfo) s.assertFilesystemAttachmentUnprovisioned(c, machineTag, filesystemTag) - // Explicitly set both MountPoint and ReadOnly to work around - // bug #1517611 - filesystemAttachmentInfo := state.FilesystemAttachmentInfo{MountPoint: "/srv", ReadOnly: false} + filesystemAttachmentInfo := state.FilesystemAttachmentInfo{MountPoint: "/srv"} err = s.State.SetFilesystemAttachmentInfo(machineTag, filesystemTag, filesystemAttachmentInfo) c.Assert(err, jc.ErrorIsNil) s.assertFilesystemAttachmentInfo(c, machineTag, filesystemTag, filesystemAttachmentInfo) @@ -276,7 +309,7 @@ c.Assert(volumeTag, gc.Equals, names.NewVolumeTag("0")) } -func (s *FilesystemStateSuite) TestWatchEnvironFilesystems(c *gc.C) { +func (s *FilesystemStateSuite) TestWatchModelFilesystems(c *gc.C) { service := s.setupMixedScopeStorageService(c, "filesystem") addUnit := func() { u, err := service.AddUnit() @@ -286,7 +319,7 @@ } addUnit() - w := s.State.WatchEnvironFilesystems() + w := s.State.WatchModelFilesystems() defer testing.AssertStop(c, w) wc := testing.NewStringsWatcherC(c, s.State, w) wc.AssertChangeInSingleEvent("0") // initial === modified file 'src/github.com/juju/juju/state/images.go' --- src/github.com/juju/juju/state/images.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/images.go 2016-03-22 15:18:22 +0000 @@ -14,5 +14,5 @@ // ImageStorage returns a new imagestorage.Storage // that stores image metadata. func (st *State) ImageStorage() imagestorage.Storage { - return imageStorageNewStorage(st.session, st.EnvironUUID()) + return imageStorageNewStorage(st.session, st.ModelUUID()) } === modified file 'src/github.com/juju/juju/state/images_test.go' --- src/github.com/juju/juju/state/images_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/state/images_test.go 2016-03-22 15:18:22 +0000 @@ -39,16 +39,16 @@ } func (s *ImageSuite) TestStorageParams(c *gc.C) { - env, err := s.State.Environment() + env, err := s.State.Model() c.Assert(err, gc.IsNil) var called bool s.PatchValue(state.ImageStorageNewStorage, func( session *mgo.Session, - envUUID string, + modelUUID string, ) imagestorage.Storage { called = true - c.Assert(envUUID, gc.Equals, env.UUID()) + c.Assert(modelUUID, gc.Equals, env.UUID()) c.Assert(session, gc.NotNil) return nil }) === modified file 'src/github.com/juju/juju/state/imagestorage/export_test.go' --- src/github.com/juju/juju/state/imagestorage/export_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/state/imagestorage/export_test.go 2016-03-22 15:18:22 +0000 @@ -4,8 +4,8 @@ package imagestorage import ( - "github.com/juju/blobstore" "github.com/juju/errors" + "gopkg.in/juju/blobstore.v2" "gopkg.in/mgo.v2" ) @@ -32,7 +32,7 @@ blobstore.ManagedStorage } -func (removeFailsManagedStorage) RemoveForEnvironment(uuid, path string) error { +func (removeFailsManagedStorage) RemoveForBucket(uuid, path string) error { return errors.Errorf("cannot remove %s:%s", uuid, path) } === modified file 'src/github.com/juju/juju/state/imagestorage/image.go' --- src/github.com/juju/juju/state/imagestorage/image.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/state/imagestorage/image.go 2016-03-22 15:18:22 +0000 @@ -8,10 +8,10 @@ "io" "time" - "github.com/juju/blobstore" "github.com/juju/errors" "github.com/juju/loggo" jujutxn "github.com/juju/txn" + "gopkg.in/juju/blobstore.v2" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" @@ -30,7 +30,7 @@ ) type imageStorage struct { - envUUID string + modelUUID string metadataCollection *mgo.Collection blobDb *mgo.Database } @@ -42,12 +42,12 @@ // database in the "imagemetadata" collection. func NewStorage( session *mgo.Session, - envUUID string, + modelUUID string, ) Storage { blobDb := session.DB(ImagesDB) metadataCollection := blobDb.C(imagemetadataC) return &imageStorage{ - envUUID, + modelUUID, metadataCollection, blobDb, } @@ -82,14 +82,14 @@ defer session.Close() managedStorage := s.getManagedStorage(session) path := imagePath(metadata.Kind, metadata.Series, metadata.Arch, metadata.SHA256) - if err := managedStorage.PutForEnvironment(s.envUUID, path, r, metadata.Size); err != nil { + if err := managedStorage.PutForBucket(s.modelUUID, path, r, metadata.Size); err != nil { return errors.Annotate(err, "cannot store image") } defer func() { if resultErr == nil { return } - err := managedStorage.RemoveForEnvironment(s.envUUID, path) + err := managedStorage.RemoveForBucket(s.modelUUID, path) if err != nil { logger.Errorf("failed to remove image blob: %v", err) } @@ -97,7 +97,7 @@ newDoc := imageMetadataDoc{ Id: docId(metadata), - EnvUUID: s.envUUID, + ModelUUID: s.modelUUID, Kind: metadata.Kind, Series: metadata.Series, Arch: metadata.Arch, @@ -125,7 +125,7 @@ op.Assert = txn.DocMissing op.Insert = &newDoc } else { - oldDoc, err := s.imageMetadataDoc(metadata.EnvUUID, metadata.Kind, metadata.Series, metadata.Arch) + oldDoc, err := s.imageMetadataDoc(metadata.ModelUUID, metadata.Kind, metadata.Series, metadata.Arch) if err != nil { return nil, err } @@ -151,7 +151,7 @@ if oldPath != "" && oldPath != path { // Attempt to remove the old path. Failure is non-fatal. - err := managedStorage.RemoveForEnvironment(s.envUUID, oldPath) + err := managedStorage.RemoveForBucket(s.modelUUID, oldPath) if err != nil { logger.Errorf("failed to remove old image blob: %v", err) } else { @@ -163,14 +163,14 @@ // ListImages is defined on the Storage interface. func (s *imageStorage) ListImages(filter ImageFilter) ([]*Metadata, error) { - metadataDocs, err := s.listImageMetadataDocs(s.envUUID, filter.Kind, filter.Series, filter.Arch) + metadataDocs, err := s.listImageMetadataDocs(s.modelUUID, filter.Kind, filter.Series, filter.Arch) if err != nil { return nil, errors.Annotate(err, "cannot list image metadata") } result := make([]*Metadata, len(metadataDocs)) for i, metadataDoc := range metadataDocs { result[i] = &Metadata{ - EnvUUID: s.envUUID, + ModelUUID: s.modelUUID, Kind: metadataDoc.Kind, Series: metadataDoc.Series, Arch: metadataDoc.Arch, @@ -189,7 +189,7 @@ defer session.Close() managedStorage := s.getManagedStorage(session) path := imagePath(metadata.Kind, metadata.Series, metadata.Arch, metadata.SHA256) - if err := managedStorage.RemoveForEnvironment(s.envUUID, path); err != nil { + if err := managedStorage.RemoveForBucket(s.modelUUID, path); err != nil { return errors.Annotate(err, "cannot remove image blob") } // Remove the metadata. @@ -224,7 +224,7 @@ // Image is defined on the Storage interface. func (s *imageStorage) Image(kind, series, arch string) (*Metadata, io.ReadCloser, error) { - metadataDoc, err := s.imageMetadataDoc(s.envUUID, kind, series, arch) + metadataDoc, err := s.imageMetadataDoc(s.modelUUID, kind, series, arch) if err != nil { return nil, nil, err } @@ -235,7 +235,7 @@ return nil, nil, err } metadata := &Metadata{ - EnvUUID: s.envUUID, + ModelUUID: s.modelUUID, Kind: metadataDoc.Kind, Series: metadataDoc.Series, Arch: metadataDoc.Arch, @@ -253,7 +253,7 @@ type imageMetadataDoc struct { Id string `bson:"_id"` - EnvUUID string `bson:"envuuid"` + ModelUUID string `bson:"modelUUID"` Kind string `bson:"kind"` Series string `bson:"series"` Arch string `bson:"arch"` @@ -264,9 +264,9 @@ SourceURL string `bson:"sourceurl"` } -func (s *imageStorage) imageMetadataDoc(envUUID, kind, series, arch string) (imageMetadataDoc, error) { +func (s *imageStorage) imageMetadataDoc(modelUUID, kind, series, arch string) (imageMetadataDoc, error) { var doc imageMetadataDoc - id := fmt.Sprintf("%s-%s-%s-%s", envUUID, kind, series, arch) + id := fmt.Sprintf("%s-%s-%s-%s", modelUUID, kind, series, arch) coll, closer := mongo.CollectionFromName(s.metadataCollection.Database, imagemetadataC) defer closer() err := coll.FindId(id).One(&doc) @@ -278,11 +278,11 @@ return doc, nil } -func (s *imageStorage) listImageMetadataDocs(envUUID, kind, series, arch string) ([]imageMetadataDoc, error) { +func (s *imageStorage) listImageMetadataDocs(modelUUID, kind, series, arch string) ([]imageMetadataDoc, error) { coll, closer := mongo.CollectionFromName(s.metadataCollection.Database, imagemetadataC) defer closer() imageDocs := []imageMetadataDoc{} - filter := bson.D{{"envuuid", envUUID}} + filter := bson.D{{"modelUUID", modelUUID}} if kind != "" { filter = append(filter, bson.DocElem{"kind", kind}) } @@ -297,7 +297,7 @@ } func (s *imageStorage) imageBlob(managedStorage blobstore.ManagedStorage, path string) (io.ReadCloser, error) { - r, _, err := managedStorage.GetForEnvironment(s.envUUID, path) + r, _, err := managedStorage.GetForBucket(s.modelUUID, path) return r, err } @@ -308,5 +308,5 @@ // docId returns an id for the mongo image metadata document. func docId(metadata *Metadata) string { - return fmt.Sprintf("%s-%s-%s-%s", metadata.EnvUUID, metadata.Kind, metadata.Series, metadata.Arch) + return fmt.Sprintf("%s-%s-%s-%s", metadata.ModelUUID, metadata.Kind, metadata.Series, metadata.Arch) } === modified file 'src/github.com/juju/juju/state/imagestorage/image_test.go' --- src/github.com/juju/juju/state/imagestorage/image_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/state/imagestorage/image_test.go 2016-03-22 15:18:22 +0000 @@ -91,7 +91,7 @@ func (s *ImageSuite) testAddImage(c *gc.C, content string) { var r io.Reader = bytes.NewReader([]byte(content)) addedMetadata := &imagestorage.Metadata{ - EnvUUID: "my-uuid", + ModelUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", @@ -121,17 +121,17 @@ s.addMetadataDoc(c, "lxc", "trusty", "amd64", 3, "hash(abc)", "path", "http://path") _, _, err = s.storage.Image("lxc", "trusty", "amd64") c.Assert(err, jc.Satisfies, errors.IsNotFound) - c.Assert(err, gc.ErrorMatches, `resource at path "environs/my-uuid/path" not found`) + c.Assert(err, gc.ErrorMatches, `resource at path "buckets/my-uuid/path" not found`) managedStorage := imagestorage.ManagedStorage(s.storage, s.session) - err = managedStorage.PutForEnvironment("my-uuid", "path", strings.NewReader("blah"), 4) + err = managedStorage.PutForBucket("my-uuid", "path", strings.NewReader("blah"), 4) c.Assert(err, gc.IsNil) metadata, r, err := s.storage.Image("lxc", "trusty", "amd64") c.Assert(err, gc.IsNil) defer r.Close() checkMetadata(c, metadata, &imagestorage.Metadata{ - EnvUUID: "my-uuid", + ModelUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", @@ -150,11 +150,11 @@ // call AddImage and ensure the original blob is removed. s.addMetadataDoc(c, "lxc", "trusty", "amd64", 3, "hash(abc)", "path", "http://path") managedStorage := imagestorage.ManagedStorage(s.storage, s.session) - err := managedStorage.PutForEnvironment("my-uuid", "path", strings.NewReader("blah"), 4) + err := managedStorage.PutForBucket("my-uuid", "path", strings.NewReader("blah"), 4) c.Assert(err, gc.IsNil) addedMetadata := &imagestorage.Metadata{ - EnvUUID: "my-uuid", + ModelUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", @@ -166,7 +166,7 @@ c.Assert(err, gc.IsNil) // old blob should be gone - _, _, err = managedStorage.GetForEnvironment("my-uuid", "path") + _, _, err = managedStorage.GetForBucket("my-uuid", "path") c.Assert(err, jc.Satisfies, errors.IsNotFound) s.assertImage(c, addedMetadata, "xyzzzz") @@ -179,13 +179,13 @@ // fails. s.addMetadataDoc(c, "lxc", "trusty", "amd64", 3, "hash(abc)", "path", "http://path") managedStorage := imagestorage.ManagedStorage(s.storage, s.session) - err := managedStorage.PutForEnvironment("my-uuid", "path", strings.NewReader("blah"), 4) + err := managedStorage.PutForBucket("my-uuid", "path", strings.NewReader("blah"), 4) c.Assert(err, gc.IsNil) storage := imagestorage.NewStorage(s.session, "my-uuid") s.PatchValue(imagestorage.GetManagedStorage, imagestorage.RemoveFailsManagedStorage) addedMetadata := &imagestorage.Metadata{ - EnvUUID: "my-uuid", + ModelUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", @@ -197,7 +197,7 @@ c.Assert(err, gc.IsNil) // old blob should still be there - r, _, err := managedStorage.GetForEnvironment("my-uuid", "path") + r, _, err := managedStorage.GetForBucket("my-uuid", "path") c.Assert(err, gc.IsNil) r.Close() @@ -216,12 +216,12 @@ storage := imagestorage.NewStorage(s.session, "my-uuid") s.txnRunner = errorTransactionRunner{s.txnRunner} addedMetadata := &imagestorage.Metadata{ - EnvUUID: "my-uuid", - Kind: "lxc", - Series: "trusty", - Arch: "amd64", - Size: 6, - SHA256: "hash", + ModelUUID: "my-uuid", + Kind: "lxc", + Series: "trusty", + Arch: "amd64", + Size: 6, + SHA256: "hash", } err := storage.AddImage(strings.NewReader("xyzzzz"), addedMetadata) c.Assert(err, gc.ErrorMatches, "cannot store image metadata: Run fails") @@ -229,7 +229,7 @@ path := fmt.Sprintf( "images/%s-%s-%s:%s", addedMetadata.Kind, addedMetadata.Series, addedMetadata.Arch, addedMetadata.SHA256) managedStorage := imagestorage.ManagedStorage(s.storage, s.session) - _, _, err = managedStorage.GetForEnvironment("my-uuid", path) + _, _, err = managedStorage.GetForBucket("my-uuid", path) c.Assert(err, jc.Satisfies, errors.IsNotFound) } @@ -238,12 +238,12 @@ s.PatchValue(imagestorage.GetManagedStorage, imagestorage.RemoveFailsManagedStorage) s.txnRunner = errorTransactionRunner{s.txnRunner} addedMetadata := &imagestorage.Metadata{ - EnvUUID: "my-uuid", - Kind: "lxc", - Series: "trusty", - Arch: "amd64", - Size: 6, - SHA256: "hash", + ModelUUID: "my-uuid", + Kind: "lxc", + Series: "trusty", + Arch: "amd64", + Size: 6, + SHA256: "hash", } err := storage.AddImage(strings.NewReader("xyzzzz"), addedMetadata) c.Assert(err, gc.ErrorMatches, "cannot store image metadata: Run fails") @@ -252,14 +252,14 @@ path := fmt.Sprintf( "images/%s-%s-%s:%s", addedMetadata.Kind, addedMetadata.Series, addedMetadata.Arch, addedMetadata.SHA256) managedStorage := imagestorage.ManagedStorage(s.storage, s.session) - r, _, err := managedStorage.GetForEnvironment("my-uuid", path) + r, _, err := managedStorage.GetForBucket("my-uuid", path) c.Assert(err, gc.IsNil) r.Close() } func (s *ImageSuite) TestAddImageSame(c *gc.C) { metadata := &imagestorage.Metadata{ - EnvUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", Size: 1, SHA256: "0", SourceURL: "http://path", + ModelUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", Size: 1, SHA256: "0", SourceURL: "http://path", } for i := 0; i < 2; i++ { err := s.storage.AddImage(strings.NewReader("0"), metadata) @@ -288,17 +288,17 @@ func (s *ImageSuite) TestAddImageConcurrent(c *gc.C) { metadata0 := &imagestorage.Metadata{ - EnvUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", Size: 1, SHA256: "0", SourceURL: "http://path", + ModelUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", Size: 1, SHA256: "0", SourceURL: "http://path", } metadata1 := &imagestorage.Metadata{ - EnvUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", Size: 1, SHA256: "1", SourceURL: "http://path", + ModelUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", Size: 1, SHA256: "1", SourceURL: "http://path", } addMetadata := func() { err := s.storage.AddImage(strings.NewReader("0"), metadata0) c.Assert(err, gc.IsNil) managedStorage := imagestorage.ManagedStorage(s.storage, s.session) - r, _, err := managedStorage.GetForEnvironment("my-uuid", "images/lxc-trusty-amd64:0") + r, _, err := managedStorage.GetForBucket("my-uuid", "images/lxc-trusty-amd64:0") c.Assert(err, gc.IsNil) r.Close() } @@ -309,7 +309,7 @@ // Blob added in before-hook should be removed. managedStorage := imagestorage.ManagedStorage(s.storage, s.session) - _, _, err = managedStorage.GetForEnvironment("my-uuid", "images/lxc-trusty-amd64:0") + _, _, err = managedStorage.GetForBucket("my-uuid", "images/lxc-trusty-amd64:0") c.Assert(err, jc.Satisfies, errors.IsNotFound) s.assertImage(c, metadata1, "1") @@ -317,10 +317,10 @@ func (s *ImageSuite) TestAddImageExcessiveContention(c *gc.C) { metadata := []*imagestorage.Metadata{ - {EnvUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", Size: 1, SHA256: "0", SourceURL: "http://path"}, - {EnvUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", Size: 1, SHA256: "1", SourceURL: "http://path"}, - {EnvUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", Size: 1, SHA256: "2", SourceURL: "http://path"}, - {EnvUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", Size: 1, SHA256: "3", SourceURL: "http://path"}, + {ModelUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", Size: 1, SHA256: "0", SourceURL: "http://path"}, + {ModelUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", Size: 1, SHA256: "1", SourceURL: "http://path"}, + {ModelUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", Size: 1, SHA256: "2", SourceURL: "http://path"}, + {ModelUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", Size: 1, SHA256: "3", SourceURL: "http://path"}, } i := 1 @@ -338,7 +338,7 @@ for _, metadata := range metadata[:3] { path := fmt.Sprintf("images/%s-%s-%s:%s", metadata.Kind, metadata.Series, metadata.Arch, metadata.SHA256) managedStorage := imagestorage.ManagedStorage(s.storage, s.session) - _, _, err = managedStorage.GetForEnvironment("my-uuid", path) + _, _, err = managedStorage.GetForBucket("my-uuid", path) c.Assert(err, jc.Satisfies, errors.IsNotFound) } @@ -348,7 +348,7 @@ func (s *ImageSuite) TestDeleteImage(c *gc.C) { s.addMetadataDoc(c, "lxc", "trusty", "amd64", 3, "hash(abc)", "images/lxc-trusty-amd64:sha256", "http://lxc-trusty-amd64") managedStorage := imagestorage.ManagedStorage(s.storage, s.session) - err := managedStorage.PutForEnvironment("my-uuid", "images/lxc-trusty-amd64:sha256", strings.NewReader("blah"), 4) + err := managedStorage.PutForBucket("my-uuid", "images/lxc-trusty-amd64:sha256", strings.NewReader("blah"), 4) c.Assert(err, gc.IsNil) _, rc, err := s.storage.Image("lxc", "trusty", "amd64") @@ -357,16 +357,16 @@ rc.Close() metadata := &imagestorage.Metadata{ - EnvUUID: "my-uuid", - Kind: "lxc", - Series: "trusty", - Arch: "amd64", - SHA256: "sha256", + ModelUUID: "my-uuid", + Kind: "lxc", + Series: "trusty", + Arch: "amd64", + SHA256: "sha256", } err = s.storage.DeleteImage(metadata) c.Assert(err, gc.IsNil) - _, _, err = managedStorage.GetForEnvironment("my-uuid", "images/lxc-trusty-amd64:sha256") + _, _, err = managedStorage.GetForBucket("my-uuid", "images/lxc-trusty-amd64:sha256") c.Assert(err, jc.Satisfies, errors.IsNotFound) _, _, err = s.storage.Image("lxc", "trusty", "amd64") @@ -375,11 +375,11 @@ func (s *ImageSuite) TestDeleteNotExistentImage(c *gc.C) { metadata := &imagestorage.Metadata{ - EnvUUID: "my-uuid", - Kind: "lxc", - Series: "trusty", - Arch: "amd64", - SHA256: "sha256", + ModelUUID: "my-uuid", + Kind: "lxc", + Series: "trusty", + Arch: "amd64", + SHA256: "sha256", } err := s.storage.DeleteImage(metadata) c.Assert(err, jc.Satisfies, errors.IsNotFound) @@ -388,7 +388,7 @@ func (s *ImageSuite) addMetadataDoc(c *gc.C, kind, series, arch string, size int64, checksum, path, sourceURL string) { doc := struct { Id string `bson:"_id"` - EnvUUID string `bson:"envuuid"` + ModelUUID string `bson:"modelUUID"` Kind string `bson:"kind"` Series string `bson:"series"` Arch string `bson:"arch"` @@ -399,7 +399,7 @@ SourceURL string `bson:"sourceurl"` }{ Id: fmt.Sprintf("my-uuid-%s-%s-%s", kind, series, arch), - EnvUUID: "my-uuid", + ModelUUID: "my-uuid", Kind: kind, Series: series, Arch: arch, @@ -427,7 +427,7 @@ func (s *ImageSuite) createListImageMetadata(c *gc.C) []*imagestorage.Metadata { s.addMetadataDoc(c, "lxc", "trusty", "amd64", 3, "hash(abc)", "images/lxc-trusty-amd64:sha256", "http://lxc-trusty-amd64") metadataLxc := &imagestorage.Metadata{ - EnvUUID: "my-uuid", + ModelUUID: "my-uuid", Kind: "lxc", Series: "trusty", Arch: "amd64", @@ -437,7 +437,7 @@ } s.addMetadataDoc(c, "kvm", "precise", "amd64", 4, "hash(abcd)", "images/kvm-precise-amd64:sha256", "http://kvm-precise-amd64") metadataKvm := &imagestorage.Metadata{ - EnvUUID: "my-uuid", + ModelUUID: "my-uuid", Kind: "kvm", Series: "precise", Arch: "amd64", === modified file 'src/github.com/juju/juju/state/imagestorage/interface.go' --- src/github.com/juju/juju/state/imagestorage/interface.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/state/imagestorage/interface.go 2016-03-22 15:18:22 +0000 @@ -10,7 +10,7 @@ // Metadata describes an image blob. type Metadata struct { - EnvUUID string + ModelUUID string Series string Arch string Kind string === modified file 'src/github.com/juju/juju/state/initialize_test.go' --- src/github.com/juju/juju/state/initialize_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/initialize_test.go 2016-03-22 15:18:22 +0000 @@ -39,9 +39,9 @@ s.MgoSuite.SetUpTest(c) } -func (s *InitializeSuite) openState(c *gc.C, environTag names.EnvironTag) { +func (s *InitializeSuite) openState(c *gc.C, modelTag names.ModelTag) { st, err := state.Open( - environTag, + modelTag, statetesting.NewMongoInfo(), statetesting.NewDialOpts(), state.Policy(nil), @@ -60,43 +60,43 @@ } func (s *InitializeSuite) TestInitialize(c *gc.C) { - cfg := testing.EnvironConfig(c) + cfg := testing.ModelConfig(c) uuid, _ := cfg.UUID() initial := cfg.AllAttrs() owner := names.NewLocalUserTag("initialize-admin") st, err := state.Initialize(owner, statetesting.NewMongoInfo(), cfg, statetesting.NewDialOpts(), nil) c.Assert(err, jc.ErrorIsNil) c.Assert(st, gc.NotNil) - envTag := st.EnvironTag() - c.Assert(envTag.Id(), gc.Equals, uuid) + modelTag := st.ModelTag() + c.Assert(modelTag.Id(), gc.Equals, uuid) err = st.Close() c.Assert(err, jc.ErrorIsNil) - s.openState(c, envTag) + s.openState(c, modelTag) - cfg, err = s.State.EnvironConfig() + cfg, err = s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) c.Assert(cfg.AllAttrs(), gc.DeepEquals, initial) - // Check that the environment has been created. - env, err := s.State.Environment() + // Check that the model has been created. + env, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) - c.Assert(env.Tag(), gc.Equals, envTag) + c.Assert(env.Tag(), gc.Equals, modelTag) // Check that the owner has been created. c.Assert(env.Owner(), gc.Equals, owner) // Check that the owner can be retrieved by the tag. entity, err := s.State.FindEntity(env.Owner()) c.Assert(err, jc.ErrorIsNil) c.Assert(entity.Tag(), gc.Equals, owner) - // Check that the owner has an EnvUser created for the bootstrapped environment. - envUser, err := s.State.EnvironmentUser(env.Owner()) + // Check that the owner has an ModelUser created for the bootstrapped model. + modelUser, err := s.State.ModelUser(env.Owner()) c.Assert(err, jc.ErrorIsNil) - c.Assert(envUser.UserTag(), gc.Equals, owner) - c.Assert(envUser.EnvironmentTag(), gc.Equals, env.Tag()) + c.Assert(modelUser.UserTag(), gc.Equals, owner) + c.Assert(modelUser.ModelTag(), gc.Equals, env.Tag()) - // Check that the environment can be found through the tag. - entity, err = s.State.FindEntity(envTag) + // Check that the model can be found through the tag. + entity, err = s.State.FindEntity(modelTag) c.Assert(err, jc.ErrorIsNil) - cons, err := s.State.EnvironConstraints() + cons, err := s.State.ModelConstraints() c.Assert(err, jc.ErrorIsNil) c.Assert(&cons, jc.Satisfies, constraints.IsEmpty) @@ -104,13 +104,13 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(addrs, gc.HasLen, 0) - info, err := s.State.StateServerInfo() + info, err := s.State.ControllerInfo() c.Assert(err, jc.ErrorIsNil) - c.Assert(info, jc.DeepEquals, &state.StateServerInfo{EnvironmentTag: envTag}) + c.Assert(info, jc.DeepEquals, &state.ControllerInfo{ModelTag: modelTag}) } func (s *InitializeSuite) TestDoubleInitializeConfig(c *gc.C) { - cfg := testing.EnvironConfig(c) + cfg := testing.ModelConfig(c) owner := names.NewLocalUserTag("initialize-admin") mgoInfo := statetesting.NewMongoInfo() @@ -128,9 +128,9 @@ } } -func (s *InitializeSuite) TestEnvironConfigWithAdminSecret(c *gc.C) { +func (s *InitializeSuite) TestModelConfigWithAdminSecret(c *gc.C) { // admin-secret blocks Initialize. - good := testing.EnvironConfig(c) + good := testing.ModelConfig(c) badUpdateAttrs := map[string]interface{}{"admin-secret": "foo"} bad, err := good.Apply(badUpdateAttrs) owner := names.NewLocalUserTag("initialize-admin") @@ -138,23 +138,23 @@ _, err = state.Initialize(owner, statetesting.NewMongoInfo(), bad, statetesting.NewDialOpts(), state.Policy(nil)) c.Assert(err, gc.ErrorMatches, "admin-secret should never be written to the state") - // admin-secret blocks UpdateEnvironConfig. + // admin-secret blocks UpdateModelConfig. st := statetesting.Initialize(c, owner, good, nil) st.Close() - s.openState(c, st.EnvironTag()) - err = s.State.UpdateEnvironConfig(badUpdateAttrs, nil, nil) + s.openState(c, st.ModelTag()) + err = s.State.UpdateModelConfig(badUpdateAttrs, nil, nil) c.Assert(err, gc.ErrorMatches, "admin-secret should never be written to the state") - // EnvironConfig remains inviolate. - cfg, err := s.State.EnvironConfig() + // ModelConfig remains inviolate. + cfg, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) c.Assert(cfg.AllAttrs(), gc.DeepEquals, good.AllAttrs()) } -func (s *InitializeSuite) TestEnvironConfigWithoutAgentVersion(c *gc.C) { +func (s *InitializeSuite) TestModelConfigWithoutAgentVersion(c *gc.C) { // admin-secret blocks Initialize. - good := testing.EnvironConfig(c) + good := testing.ModelConfig(c) attrs := good.AllAttrs() delete(attrs, "agent-version") bad, err := config.New(config.NoDefaults, attrs) @@ -168,12 +168,12 @@ // yay side effects st.Close() - s.openState(c, st.EnvironTag()) - err = s.State.UpdateEnvironConfig(map[string]interface{}{}, []string{"agent-version"}, nil) + s.openState(c, st.ModelTag()) + err = s.State.UpdateModelConfig(map[string]interface{}{}, []string{"agent-version"}, nil) c.Assert(err, gc.ErrorMatches, "agent-version must always be set in state") - // EnvironConfig remains inviolate. - cfg, err := s.State.EnvironConfig() + // ModelConfig remains inviolate. + cfg, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) c.Assert(cfg.AllAttrs(), gc.DeepEquals, good.AllAttrs()) } === modified file 'src/github.com/juju/juju/state/interface.go' --- src/github.com/juju/juju/state/interface.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/interface.go 2016-03-22 15:18:22 +0000 @@ -94,11 +94,11 @@ NotifyWatcherFactory } -// EnvironAccessor defines the methods needed to watch for environment -// config changes, and read the environment config. -type EnvironAccessor interface { - WatchForEnvironConfigChanges() NotifyWatcher - EnvironConfig() (*config.Config, error) +// ModelAccessor defines the methods needed to watch for model +// config changes, and read the model config. +type ModelAccessor interface { + WatchForModelConfigChanges() NotifyWatcher + ModelConfig() (*config.Config, error) } // UnitsWatcher defines the methods needed to retrieve an entity (a @@ -108,10 +108,10 @@ WatchUnits() StringsWatcher } -// EnvironMachinesWatcher defines a single method - -// WatchEnvironMachines. -type EnvironMachinesWatcher interface { - WatchEnvironMachines() StringsWatcher +// ModelMachinesWatcher defines a single method - +// WatchModelMachines. +type ModelMachinesWatcher interface { + WatchModelMachines() StringsWatcher } // InstanceIdGetter defines a single method - InstanceId. === modified file 'src/github.com/juju/juju/state/interface_test.go' --- src/github.com/juju/juju/state/interface_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/interface_test.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,7 @@ _ Entity = (*Unit)(nil) _ Entity = (*UnitAgent)(nil) _ Entity = (*Service)(nil) - _ Entity = (*Environment)(nil) + _ Entity = (*Model)(nil) _ Entity = (*User)(nil) _ Entity = (*Action)(nil) _ Entity = (*IPAddress)(nil) @@ -37,17 +37,17 @@ _ NotifyWatcherFactory = (*Machine)(nil) _ NotifyWatcherFactory = (*Unit)(nil) _ NotifyWatcherFactory = (*Service)(nil) - _ NotifyWatcherFactory = (*Environment)(nil) + _ NotifyWatcherFactory = (*Model)(nil) _ AgentEntity = (*Machine)(nil) _ AgentEntity = (*Unit)(nil) - _ EnvironAccessor = (*State)(nil) + _ ModelAccessor = (*State)(nil) _ UnitsWatcher = (*Machine)(nil) _ UnitsWatcher = (*Service)(nil) - _ EnvironMachinesWatcher = (*State)(nil) + _ ModelMachinesWatcher = (*State)(nil) _ InstanceIdGetter = (*Machine)(nil) @@ -63,5 +63,5 @@ _ GlobalEntity = (*Unit)(nil) _ GlobalEntity = (*Service)(nil) _ GlobalEntity = (*Charm)(nil) - _ GlobalEntity = (*Environment)(nil) + _ GlobalEntity = (*Model)(nil) ) === modified file 'src/github.com/juju/juju/state/internal_test.go' --- src/github.com/juju/juju/state/internal_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/internal_test.go 2016-03-22 15:18:22 +0000 @@ -51,7 +51,7 @@ dialopts := mongo.DialOpts{ Timeout: testing.LongWait, } - st, err := Initialize(s.owner, info, testing.EnvironConfig(c), dialopts, nil) + st, err := Initialize(s.owner, info, testing.ModelConfig(c), dialopts, nil) c.Assert(err, jc.ErrorIsNil) s.state = st s.AddCleanup(func(*gc.C) { s.state.Close() }) === modified file 'src/github.com/juju/juju/state/ipaddresses.go' --- src/github.com/juju/juju/state/ipaddresses.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/ipaddresses.go 2016-03-22 15:18:22 +0000 @@ -36,20 +36,21 @@ addressID := st.docID(addr.Value) ipDoc := ipaddressDoc{ - DocID: addressID, - EnvUUID: st.EnvironUUID(), - UUID: uuid.String(), - Life: Alive, - State: AddressStateUnknown, - SubnetId: subnetid, - Value: addr.Value, - Type: string(addr.Type), - Scope: string(addr.Scope), + DocID: addressID, + ModelUUID: st.ModelUUID(), + UUID: uuid.String(), + Life: Alive, + State: AddressStateUnknown, + SubnetId: subnetid, + Value: addr.Value, + Type: string(addr.Type), + Scope: string(addr.Scope), + SpaceName: string(addr.SpaceName), } ipaddress = &IPAddress{doc: ipDoc, st: st} ops := []txn.Op{ - assertEnvAliveOp(st.EnvironUUID()), + assertModelAliveOp(st.ModelUUID()), { C: ipaddressesC, Id: addressID, @@ -61,7 +62,7 @@ err = st.runTransaction(ops) switch err { case txn.ErrAborted: - if err := checkEnvLife(st); err != nil { + if err := checkModeLife(st); err != nil { return nil, errors.Trace(err) } if _, err = st.IPAddress(addr.Value); err == nil { @@ -164,7 +165,7 @@ type ipaddressDoc struct { DocID string `bson:"_id"` - EnvUUID string `bson:"env-uuid"` + ModelUUID string `bson:"model-uuid"` UUID string `bson:"uuid"` Life Life `bson:"life"` SubnetId string `bson:"subnetid,omitempty"` @@ -176,6 +177,7 @@ Type string `bson:"type"` Scope string `bson:"networkscope,omitempty"` State AddressState `bson:"state"` + SpaceName string `bson:"spacename,omitempty"` } // Life returns whether the IP address is Alive, Dying or Dead. @@ -397,7 +399,7 @@ buildTxn := func(attempt int) ([]txn.Op, error) { if attempt > 0 { - if err := checkEnvLife(i.st); err != nil { + if err := checkModeLife(i.st); err != nil { return nil, errors.Trace(err) } if err := i.Refresh(); errors.IsNotFound(err) { @@ -412,7 +414,7 @@ } return []txn.Op{ - assertEnvAliveOp(i.st.EnvironUUID()), + assertModelAliveOp(i.st.ModelUUID()), { C: ipaddressesC, Id: i.doc.DocID, === modified file 'src/github.com/juju/juju/state/ipaddresses_test.go' --- src/github.com/juju/juju/state/ipaddresses_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/ipaddresses_test.go 2016-03-22 15:18:22 +0000 @@ -39,7 +39,7 @@ c.Assert(ipAddr.State(), gc.Equals, ipState) c.Assert(ipAddr.Address(), jc.DeepEquals, addr) c.Assert(ipAddr.String(), gc.Equals, addr.String()) - c.Assert(ipAddr.Id(), gc.Equals, s.State.EnvironUUID()+":"+addr.Value) + c.Assert(ipAddr.Id(), gc.Equals, s.State.ModelUUID()+":"+addr.Value) c.Assert(ipAddr.InstanceId(), gc.Equals, instance.UnknownId) c.Assert(ipAddr.MACAddress(), gc.Equals, "") } === removed directory 'src/github.com/juju/juju/state/leadership' === modified file 'src/github.com/juju/juju/state/leadership.go' --- src/github.com/juju/juju/state/leadership.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/leadership.go 2016-03-22 15:18:22 +0000 @@ -2,48 +2,36 @@ import ( "fmt" + "time" "github.com/juju/errors" + "github.com/juju/names" jujutxn "github.com/juju/txn" - "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" - "github.com/juju/juju/leadership" + "github.com/juju/juju/core/leadership" + corelease "github.com/juju/juju/core/lease" + "github.com/juju/juju/worker/lease" ) -const settingsKey = "s#%s#leader" - -func addLeadershipSettingsOp(serviceId string) txn.Op { - return txn.Op{ - C: settingsC, - Id: leadershipSettingsDocId(serviceId), - Insert: bson.D{}, - Assert: txn.DocMissing, - } -} - func removeLeadershipSettingsOp(serviceId string) txn.Op { - return txn.Op{ - C: settingsC, - Id: leadershipSettingsDocId(serviceId), - Remove: true, - } + return removeSettingsOp(leadershipSettingsKey(serviceId)) } -func leadershipSettingsDocId(serviceId string) string { - return fmt.Sprintf(settingsKey, serviceId) +func leadershipSettingsKey(serviceId string) string { + return fmt.Sprintf("s#%s#leader", serviceId) } // LeadershipClaimer returns a leadership.Claimer for units and services in the -// state's environment. +// state's model. func (st *State) LeadershipClaimer() leadership.Claimer { - return st.leadershipManager + return leadershipClaimer{st.leadershipManager} } // LeadershipChecker returns a leadership.Checker for units and services in the -// state's environment. +// state's model. func (st *State) LeadershipChecker() leadership.Checker { - return st.leadershipManager + return leadershipChecker{st.leadershipManager} } // HackLeadership stops the state's internal leadership manager to prevent it @@ -63,6 +51,7 @@ // the shared component should successfully goose them all into shutting down, // in parallel, of their own accord.) st.leadershipManager.Kill() + st.singularManager.Kill() } // buildTxnWithLeadership returns a transaction source that combines the supplied source @@ -82,3 +71,82 @@ return append(prereqs, ops...), nil } } + +// leadershipSecretary implements lease.Secretary; it checks that leases are +// service names, and holders are unit names. +type leadershipSecretary struct{} + +// CheckLease is part of the lease.Secretary interface. +func (leadershipSecretary) CheckLease(name string) error { + if !names.IsValidService(name) { + return errors.NewNotValid(nil, "not a service name") + } + return nil +} + +// CheckHolder is part of the lease.Secretary interface. +func (leadershipSecretary) CheckHolder(name string) error { + if !names.IsValidUnit(name) { + return errors.NewNotValid(nil, "not a unit name") + } + return nil +} + +// CheckDuration is part of the lease.Secretary interface. +func (leadershipSecretary) CheckDuration(duration time.Duration) error { + if duration <= 0 { + return errors.NewNotValid(nil, "non-positive") + } + return nil +} + +// leadershipChecker implements leadership.Checker by wrapping a lease.Manager. +type leadershipChecker struct { + manager *lease.Manager +} + +// LeadershipCheck is part of the leadership.Checker interface. +func (m leadershipChecker) LeadershipCheck(serviceName, unitName string) leadership.Token { + token := m.manager.Token(serviceName, unitName) + return leadershipToken{ + serviceName: serviceName, + unitName: unitName, + token: token, + } +} + +// leadershipToken implements leadership.Token by wrapping a corelease.Token. +type leadershipToken struct { + serviceName string + unitName string + token corelease.Token +} + +// Check is part of the leadership.Token interface. +func (t leadershipToken) Check(out interface{}) error { + err := t.token.Check(out) + if errors.Cause(err) == corelease.ErrNotHeld { + return errors.Errorf("%q is not leader of %q", t.unitName, t.serviceName) + } + return errors.Trace(err) +} + +// leadershipClaimer implements leadership.Claimer by wrappping a lease.Manager. +type leadershipClaimer struct { + manager *lease.Manager +} + +// ClaimLeadership is part of the leadership.Claimer interface. +func (m leadershipClaimer) ClaimLeadership(serviceName, unitName string, duration time.Duration) error { + err := m.manager.Claim(serviceName, unitName, duration) + if errors.Cause(err) == corelease.ErrClaimDenied { + return leadership.ErrClaimDenied + } + return errors.Trace(err) +} + +// BlockUntilLeadershipReleased is part of the leadership.Claimer interface. +func (m leadershipClaimer) BlockUntilLeadershipReleased(serviceName string) error { + err := m.manager.WaitUntilExpired(serviceName) + return errors.Trace(err) +} === removed file 'src/github.com/juju/juju/state/leadership/block.go' --- src/github.com/juju/juju/state/leadership/block.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/leadership/block.go 1970-01-01 00:00:00 +0000 @@ -1,68 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership - -import ( - "github.com/juju/errors" - "github.com/juju/names" -) - -// block is used to deliver leaderlessness-notification requests to a manager's -// loop goroutine on behalf of BlockUntilLeadershipReleased. -type block struct { - serviceName string - unblock chan struct{} - abort <-chan struct{} -} - -// validate returns an error if any fields are invalid or missing. -func (b block) validate() error { - if !names.IsValidService(b.serviceName) { - return errors.Errorf("invalid service name %q", b.serviceName) - } - if b.unblock == nil { - return errors.New("missing unblock channel") - } - if b.abort == nil { - return errors.New("missing abort channel") - } - return nil -} - -// invoke sends the block request on the supplied channel, and waits for the -// unblock channel to be closed. -func (b block) invoke(ch chan<- block) error { - if err := b.validate(); err != nil { - return errors.Annotatef(err, "cannot wait for leaderlessness") - } - for { - select { - case <-b.abort: - return errStopped - case ch <- b: - ch = nil - case <-b.unblock: - return nil - } - } -} - -// blocks is used to keep track of leaderlessness-notification channels for -// each service name. -type blocks map[string][]chan struct{} - -// add records the block's unblock channel under the block's service name. -func (b blocks) add(block block) { - b[block.serviceName] = append(b[block.serviceName], block.unblock) -} - -// unblock closes all channels added under the supplied name and removes -// them from blocks. -func (b blocks) unblock(serviceName string) { - unblocks := b[serviceName] - delete(b, serviceName) - for _, unblock := range unblocks { - close(unblock) - } -} === removed file 'src/github.com/juju/juju/state/leadership/check.go' --- src/github.com/juju/juju/state/leadership/check.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/leadership/check.go 1970-01-01 00:00:00 +0000 @@ -1,71 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership - -import ( - "github.com/juju/errors" - "github.com/juju/names" - "gopkg.in/mgo.v2/txn" -) - -// check is used to deliver leadership-check requests to a manager's loop -// goroutine on behalf of LeadershipCheck. -type check struct { - serviceName string - unitName string - response chan txn.Op - abort <-chan struct{} -} - -// validate returns an error if any fields are invalid or missing. -func (c check) validate() error { - if !names.IsValidService(c.serviceName) { - return errors.Errorf("invalid service name %q", c.serviceName) - } - if !names.IsValidUnit(c.unitName) { - return errors.Errorf("invalid unit name %q", c.unitName) - } - if c.response == nil { - return errors.New("missing response channel") - } - if c.abort == nil { - return errors.New("missing abort channel") - } - return nil -} - -// invoke sends the check on the supplied channel, waits for a response, and -// returns either a txn.Op that can be used to assert continued leadership in -// the future, or an error. -func (c check) invoke(ch chan<- check) (txn.Op, error) { - if err := c.validate(); err != nil { - return txn.Op{}, errors.Annotatef(err, "cannot check leadership") - } - for { - select { - case <-c.abort: - return txn.Op{}, errStopped - case ch <- c: - ch = nil - case op, ok := <-c.response: - if !ok { - return txn.Op{}, errors.Errorf("%q is not leader of %q", c.unitName, c.serviceName) - } - return op, nil - } - } -} - -// succeed sends the supplied operation back to the originating invoke. -func (c check) succeed(op txn.Op) { - select { - case <-c.abort: - case c.response <- op: - } -} - -// fail causes the originating invoke to return an error indicating non-leadership. -func (c check) fail() { - close(c.response) -} === removed file 'src/github.com/juju/juju/state/leadership/claim.go' --- src/github.com/juju/juju/state/leadership/claim.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/leadership/claim.go 1970-01-01 00:00:00 +0000 @@ -1,71 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership - -import ( - "time" - - "github.com/juju/errors" - "github.com/juju/names" - - "github.com/juju/juju/leadership" -) - -// claim is used to deliver leadership-claim requests to a manager's loop -// goroutine on behalf of ClaimLeadership. -type claim struct { - serviceName string - unitName string - duration time.Duration - response chan bool - abort <-chan struct{} -} - -// validate returns an error if any fields are invalid or missing. -func (c claim) validate() error { - if !names.IsValidService(c.serviceName) { - return errors.Errorf("invalid service name %q", c.serviceName) - } - if !names.IsValidUnit(c.unitName) { - return errors.Errorf("invalid unit name %q", c.unitName) - } - if c.duration <= 0 { - return errors.Errorf("invalid duration %v", c.duration) - } - if c.response == nil { - return errors.New("missing response channel") - } - if c.abort == nil { - return errors.New("missing abort channel") - } - return nil -} - -// invoke sends the claim on the supplied channel and waits for a response. -func (c claim) invoke(ch chan<- claim) error { - if err := c.validate(); err != nil { - return errors.Annotatef(err, "cannot claim leadership") - } - for { - select { - case <-c.abort: - return errStopped - case ch <- c: - ch = nil - case success := <-c.response: - if !success { - return leadership.ErrClaimDenied - } - return nil - } - } -} - -// respond causes the supplied success value to be sent back to invoke. -func (c claim) respond(success bool) { - select { - case <-c.abort: - case c.response <- success: - } -} === removed file 'src/github.com/juju/juju/state/leadership/config.go' --- src/github.com/juju/juju/state/leadership/config.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/leadership/config.go 1970-01-01 00:00:00 +0000 @@ -1,43 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership - -import ( - "time" - - "github.com/juju/errors" - "github.com/juju/utils/clock" - - "github.com/juju/juju/state/lease" -) - -// ManagerConfig contains the resources and information required to create a -// Manager. -type ManagerConfig struct { - - // Client reads and writes lease data. - Client lease.Client - - // Clock supplies time services. - Clock clock.Clock - - // MaxSleep is the longest time the Manager should sleep before - // refreshing its client's leases and checking for expiries. - MaxSleep time.Duration -} - -// Validate returns an error if the configuration contains invalid information -// or missing resources. -func (config ManagerConfig) Validate() error { - if config.Client == nil { - return errors.NotValidf("nil Client") - } - if config.Clock == nil { - return errors.NotValidf("nil Clock") - } - if config.MaxSleep <= 0 { - return errors.NotValidf("non-positive MaxSleep") - } - return nil -} === removed file 'src/github.com/juju/juju/state/leadership/fixture_test.go' --- src/github.com/juju/juju/state/leadership/fixture_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/leadership/fixture_test.go 1970-01-01 00:00:00 +0000 @@ -1,107 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership_test - -import ( - "time" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/state/leadership" - "github.com/juju/juju/state/lease" - "github.com/juju/juju/testing" -) - -const ( - defaultMaxSleep = time.Hour - almostOneSecond = time.Second - time.Nanosecond -) - -var ( - defaultClockStart time.Time -) - -func init() { - // We pick a time with a comfortable h:m:s component but: - // (1) past the int32 unix epoch limit; - // (2) at a 5ns offset to make sure we're not discarding precision; - // (3) in a weird time zone. - value := "2073-03-03T01:00:00.000000005-08:40" - var err error - defaultClockStart, err = time.Parse(time.RFC3339Nano, value) - if err != nil { - panic(err) - } -} - -// offset returns the result of defaultClockStart.Add(d); it exists to make -// expiry tests easier to write. -func offset(d time.Duration) time.Time { - return defaultClockStart.Add(d) -} - -// almostSeconds returns a duration smaller than the supplied number of -// seconds by one nanosecond. -func almostSeconds(seconds int) time.Duration { - if seconds < 1 { - panic("unexpected") - } - return (time.Second * time.Duration(seconds)) - time.Nanosecond -} - -// Fixture allows us to test a leadership.ManagerWorker with a usefully-mocked -// clock.Clock and lease.Client. -type Fixture struct { - - // leases contains the leases the lease.Client should report when the - // test starts up. - leases map[string]lease.Info - - // expectCalls contains the calls that should be made to the lease.Client - // in the course of a test. By specifying a callback you can cause the - // reported leases to change. - expectCalls []call - - // expectDirty should be set for tests that purposefully abuse the manager - // to the extent that it returns an error on Wait(); tests that don't set - // this flag will check that the manager's shutdown error is nil. - expectDirty bool -} - -// RunTest sets up a Manager and a Clock and passes them into the supplied -// test function. The manager will be cleaned up afterwards. -func (fix *Fixture) RunTest(c *gc.C, test func(leadership.ManagerWorker, *testing.Clock)) { - clock := testing.NewClock(defaultClockStart) - client := NewClient(fix.leases, fix.expectCalls) - manager, err := leadership.NewManager(leadership.ManagerConfig{ - Clock: clock, - Client: client, - MaxSleep: defaultMaxSleep, - }) - c.Assert(err, jc.ErrorIsNil) - defer func() { - // Dirty tests will probably have stopped the manager anyway, but no - // sense leaving them around if things aren't exactly as we expect. - manager.Kill() - err := manager.Wait() - if !fix.expectDirty { - c.Check(err, jc.ErrorIsNil) - } - }() - defer client.Wait(c) - waitAlarms(c, clock, 1) - test(manager, clock) -} - -func waitAlarms(c *gc.C, clock *testing.Clock, count int) { - timeout := time.After(testing.LongWait) - for i := 0; i < count; i++ { - select { - case <-clock.Alarms(): - case <-timeout: - c.Fatalf("timed out waiting for %dth alarm set", i) - } - } -} === removed file 'src/github.com/juju/juju/state/leadership/interface.go' --- src/github.com/juju/juju/state/leadership/interface.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/leadership/interface.go 1970-01-01 00:00:00 +0000 @@ -1,25 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership - -import ( - "github.com/juju/errors" - - "github.com/juju/juju/leadership" -) - -// ManagerWorker implements leadership functions, and worker.Worker. We don't -// import worker because it pulls in a lot of dependencies and causes import -// cycles when you try to use leadership in state. We should break this cycle -// elsewhere if we can. -type ManagerWorker interface { - leadership.Checker - leadership.Claimer - Kill() - Wait() error -} - -// errStopped is returned to clients when an operation cannot complete because -// the manager has started (and possibly finished) shutdown. -var errStopped = errors.New("leadership manager stopped") === removed file 'src/github.com/juju/juju/state/leadership/manager.go' --- src/github.com/juju/juju/state/leadership/manager.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/leadership/manager.go 1970-01-01 00:00:00 +0000 @@ -1,254 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership - -import ( - "sort" - "time" - - "github.com/juju/errors" - "github.com/juju/loggo" - "github.com/juju/utils/clock" - "launchpad.net/tomb" - - "github.com/juju/juju/leadership" - "github.com/juju/juju/state/lease" -) - -var logger = loggo.GetLogger("juju.state.leadership") - -// NewManager returns a Manager implementation, backed by a lease.Client, -// which (in addition to its exposed Manager capabilities) will expire all -// known leases as they run out. The caller takes responsibility for killing, -// and handling errors from, the returned Worker. -func NewManager(config ManagerConfig) (ManagerWorker, error) { - if err := config.Validate(); err != nil { - return nil, errors.Trace(err) - } - manager := &manager{ - config: config, - claims: make(chan claim), - checks: make(chan check), - blocks: make(chan block), - } - go func() { - defer manager.tomb.Done() - // note: we don't directly tomb.Kill, because we may need to - // unwrap tomb.ErrDying in order to function correctly. - manager.kill(manager.loop()) - }() - return manager, nil -} - -// manager implements ManagerWorker. -type manager struct { - tomb tomb.Tomb - - // config collects all external configuration and dependencies. - config ManagerConfig - - // claims is used to deliver leadership claim requests to the loop. - claims chan claim - - // checks is used to deliver leadership check requests to the loop. - checks chan check - - // blocks is used to deliver leaderlessness block requests to the loop. - blocks chan block -} - -// Kill is part of the worker.Worker interface. -func (manager *manager) Kill() { - manager.kill(nil) -} - -// kill unwraps tomb.ErrDying before killing the tomb, thus allowing the worker -// to use errors.Trace liberally and still stop cleanly. -func (manager *manager) kill(err error) { - if errors.Cause(err) == tomb.ErrDying { - err = tomb.ErrDying - } else if err != nil { - logger.Errorf("stopping leadership manager with error: %v", err) - } - manager.tomb.Kill(err) -} - -// Wait is part of the worker.Worker interface. -func (manager *manager) Wait() error { - return manager.tomb.Wait() -} - -// loop runs until the manager is stopped. -func (manager *manager) loop() error { - blocks := make(blocks) - for { - if err := manager.choose(blocks); err != nil { - return errors.Trace(err) - } - - leases := manager.config.Client.Leases() - for serviceName := range blocks { - if _, found := leases[serviceName]; !found { - blocks.unblock(serviceName) - } - } - } -} - -// choose breaks the select out of loop to make the blocking logic clearer. -func (manager *manager) choose(blocks blocks) error { - select { - case <-manager.tomb.Dying(): - return tomb.ErrDying - case <-manager.nextTick(): - return manager.tick() - case claim := <-manager.claims: - return manager.handleClaim(claim) - case check := <-manager.checks: - return manager.handleCheck(check) - case block := <-manager.blocks: - blocks.add(block) - return nil - } -} - -// ClaimLeadership is part of the leadership.Claimer interface. -func (manager *manager) ClaimLeadership(serviceName, unitName string, duration time.Duration) error { - return claim{ - serviceName: serviceName, - unitName: unitName, - duration: duration, - response: make(chan bool), - abort: manager.tomb.Dying(), - }.invoke(manager.claims) -} - -// handleClaim processes and responds to the supplied claim. It will only return -// unrecoverable errors; mere failure to claim just indicates a bad request, and -// is communicated back to the claim's originator. -func (manager *manager) handleClaim(claim claim) error { - client := manager.config.Client - request := lease.Request{claim.unitName, claim.duration} - err := lease.ErrInvalid - for err == lease.ErrInvalid { - select { - case <-manager.tomb.Dying(): - return tomb.ErrDying - default: - info, found := client.Leases()[claim.serviceName] - switch { - case !found: - err = client.ClaimLease(claim.serviceName, request) - case info.Holder == claim.unitName: - err = client.ExtendLease(claim.serviceName, request) - default: - claim.respond(false) - return nil - } - } - } - if err != nil { - return errors.Trace(err) - } - claim.respond(true) - return nil -} - -// LeadershipCheck is part of the leadership.Checker interface. -// -// The token returned will accept a `*[]txn.Op` passed to Check, and will -// populate it with transaction operations that will fail if the unit is -// not leader of the service. -func (manager *manager) LeadershipCheck(serviceName, unitName string) leadership.Token { - return token{ - serviceName: serviceName, - unitName: unitName, - checks: manager.checks, - abort: manager.tomb.Dying(), - } -} - -// handleCheck processes and responds to the supplied check. It will only return -// unrecoverable errors; mere untruth of the assertion just indicates a bad -// request, and is communicated back to the check's originator. -func (manager *manager) handleCheck(check check) error { - client := manager.config.Client - info, found := client.Leases()[check.serviceName] - if !found || info.Holder != check.unitName { - if err := client.Refresh(); err != nil { - return errors.Trace(err) - } - info, found = client.Leases()[check.serviceName] - } - if found && info.Holder == check.unitName { - check.succeed(info.AssertOp) - } else { - check.fail() - } - return nil -} - -// BlockUntilLeadershipReleased is part of the leadership.Claimer interface. -func (manager *manager) BlockUntilLeadershipReleased(serviceName string) error { - return block{ - serviceName: serviceName, - unblock: make(chan struct{}), - abort: manager.tomb.Dying(), - }.invoke(manager.blocks) -} - -// nextTick returns a channel that will send a value at some point when -// we expect to have to do some work; either because at least one lease -// may be ready to expire, or because enough enough time has passed that -// it's worth checking for stalled collaborators. -func (manager *manager) nextTick() <-chan time.Time { - now := manager.config.Clock.Now() - nextTick := now.Add(manager.config.MaxSleep) - for _, info := range manager.config.Client.Leases() { - if info.Expiry.After(nextTick) { - continue - } - nextTick = info.Expiry - } - logger.Debugf("waking to check leases at %s", nextTick) - return clock.Alarm(manager.config.Clock, nextTick) -} - -// tick snapshots recent leases and expires any that it can. There -// might be none that need attention; or those that do might already -// have been extended or expired by someone else; so ErrInvalid is -// expected, and ignored, comfortable that the client will have been -// updated in the background; and that we'll see fresh info when we -// subsequently check nextWake(). -// -// It will return only unrecoverable errors. -func (manager *manager) tick() error { - logger.Tracef("refreshing leases...") - client := manager.config.Client - if err := client.Refresh(); err != nil { - return errors.Trace(err) - } - leases := client.Leases() - - // Sort lease names so we expire in a predictable order for the tests. - names := make([]string, 0, len(leases)) - for name := range leases { - names = append(names, name) - } - sort.Strings(names) - - logger.Tracef("expiring leases...") - now := manager.config.Clock.Now() - for _, name := range names { - if leases[name].Expiry.After(now) { - continue - } - switch err := client.ExpireLease(name); err { - case nil, lease.ErrInvalid: - default: - return errors.Trace(err) - } - } - return nil -} === removed file 'src/github.com/juju/juju/state/leadership/manager_block_test.go' --- src/github.com/juju/juju/state/leadership/manager_block_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/leadership/manager_block_test.go 1970-01-01 00:00:00 +0000 @@ -1,237 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership_test - -import ( - "time" - - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/state/leadership" - "github.com/juju/juju/state/lease" - coretesting "github.com/juju/juju/testing" -) - -type BlockUntilLeadershipReleasedSuite struct { - testing.IsolationSuite -} - -var _ = gc.Suite(&BlockUntilLeadershipReleasedSuite{}) - -func (s *BlockUntilLeadershipReleasedSuite) TestLeadershipNotHeld(c *gc.C) { - fix := &Fixture{} - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - blockTest := newBlockTest(manager, "redis") - err := blockTest.assertUnblocked(c) - c.Check(err, jc.ErrorIsNil) - }) -} - -func (s *BlockUntilLeadershipReleasedSuite) TestLeadershipExpires(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - }, - }, - expectCalls: []call{{ - method: "Refresh", - }, { - method: "ExpireLease", - args: []interface{}{"redis"}, - callback: func(leases map[string]lease.Info) { - delete(leases, "redis") - }, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, clock *coretesting.Clock) { - blockTest := newBlockTest(manager, "redis") - blockTest.assertBlocked(c) - - // Trigger expiry. - clock.Advance(time.Second) - err := blockTest.assertUnblocked(c) - c.Check(err, jc.ErrorIsNil) - }) -} - -func (s *BlockUntilLeadershipReleasedSuite) TestLeadershipChanged(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - }, - }, - expectCalls: []call{{ - method: "Refresh", - }, { - method: "ExpireLease", - args: []interface{}{"redis"}, - err: lease.ErrInvalid, - callback: func(leases map[string]lease.Info) { - leases["redis"] = lease.Info{ - Holder: "redis/99", - Expiry: offset(time.Minute), - } - }, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, clock *coretesting.Clock) { - blockTest := newBlockTest(manager, "redis") - blockTest.assertBlocked(c) - - // Trigger abortive expiry. - clock.Advance(time.Second) - blockTest.assertBlocked(c) - }) -} - -func (s *BlockUntilLeadershipReleasedSuite) TestLeadershipExpiredEarly(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - }, - }, - expectCalls: []call{{ - method: "Refresh", - callback: func(leases map[string]lease.Info) { - delete(leases, "redis") - }, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, clock *coretesting.Clock) { - blockTest := newBlockTest(manager, "redis") - blockTest.assertBlocked(c) - - // Induce a refresh by making an unexpected check; it turns out the - // lease had already been expired by someone else. - manager.LeadershipCheck("redis", "redis/99").Check(nil) - err := blockTest.assertUnblocked(c) - c.Check(err, jc.ErrorIsNil) - }) -} - -func (s *BlockUntilLeadershipReleasedSuite) TestMultiple(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - }, - "store": lease.Info{ - Holder: "store/0", - Expiry: offset(time.Second), - }, - }, - expectCalls: []call{{ - method: "Refresh", - }, { - method: "ExpireLease", - args: []interface{}{"redis"}, - err: lease.ErrInvalid, - callback: func(leases map[string]lease.Info) { - delete(leases, "redis") - leases["store"] = lease.Info{ - Holder: "store/9", - Expiry: offset(time.Minute), - } - }, - }, { - method: "ExpireLease", - args: []interface{}{"store"}, - err: lease.ErrInvalid, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, clock *coretesting.Clock) { - redisTest1 := newBlockTest(manager, "redis") - redisTest1.assertBlocked(c) - redisTest2 := newBlockTest(manager, "redis") - redisTest2.assertBlocked(c) - storeTest1 := newBlockTest(manager, "store") - storeTest1.assertBlocked(c) - storeTest2 := newBlockTest(manager, "store") - storeTest2.assertBlocked(c) - - // Induce attempted expiry; redis was expired already, store was - // refreshed and not expired. - clock.Advance(time.Second) - err := redisTest2.assertUnblocked(c) - c.Check(err, jc.ErrorIsNil) - err = redisTest1.assertUnblocked(c) - c.Check(err, jc.ErrorIsNil) - storeTest2.assertBlocked(c) - storeTest1.assertBlocked(c) - }) -} - -func (s *BlockUntilLeadershipReleasedSuite) TestKillManager(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - }, - }, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - blockTest := newBlockTest(manager, "redis") - blockTest.assertBlocked(c) - - manager.Kill() - err := blockTest.assertUnblocked(c) - c.Check(err, gc.ErrorMatches, "leadership manager stopped") - }) -} - -// blockTest wraps a goroutine running BlockUntilLeadershipReleased, and -// fails if it's used more than a second after creation (which should be -// *plenty* of time). -type blockTest struct { - manager leadership.ManagerWorker - serviceName string - done chan error - abort <-chan time.Time -} - -// newBlockTest starts a test goroutine blocking until the manager confirms -// leaderlessness of the named service. -func newBlockTest(manager leadership.ManagerWorker, serviceName string) *blockTest { - bt := &blockTest{ - manager: manager, - serviceName: serviceName, - done: make(chan error), - abort: time.After(time.Second), - } - go func() { - select { - case <-bt.abort: - case bt.done <- bt.manager.BlockUntilLeadershipReleased(bt.serviceName): - } - }() - return bt -} - -func (bt *blockTest) assertBlocked(c *gc.C) { - select { - case err := <-bt.done: - c.Fatalf("unblocked unexpectedly with %v", err) - default: - } -} - -func (bt *blockTest) assertUnblocked(c *gc.C) error { - select { - case err := <-bt.done: - return err - case <-bt.abort: - c.Fatalf("timed out before unblocking") - } - panic("unreachable") -} === removed file 'src/github.com/juju/juju/state/leadership/manager_check_test.go' --- src/github.com/juju/juju/state/leadership/manager_check_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/leadership/manager_check_test.go 1970-01-01 00:00:00 +0000 @@ -1,138 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership_test - -import ( - "time" - - "github.com/juju/errors" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/mgo.v2/txn" - - coreleadership "github.com/juju/juju/leadership" - "github.com/juju/juju/state/leadership" - "github.com/juju/juju/state/lease" - coretesting "github.com/juju/juju/testing" -) - -type LeadershipCheckSuite struct { - testing.IsolationSuite -} - -var _ = gc.Suite(&LeadershipCheckSuite{}) - -func (s *LeadershipCheckSuite) TestSuccess(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - AssertOp: txn.Op{C: "fake", Id: "fake"}, - }, - }, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - token := manager.LeadershipCheck("redis", "redis/0") - c.Check(assertOps(c, token), jc.DeepEquals, []txn.Op{{ - C: "fake", Id: "fake", - }}) - }) -} - -func (s *LeadershipCheckSuite) TestMissingRefresh_Success(c *gc.C) { - fix := &Fixture{ - expectCalls: []call{{ - method: "Refresh", - callback: func(leases map[string]lease.Info) { - leases["redis"] = lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - AssertOp: txn.Op{C: "fake", Id: "fake"}, - } - }, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - token := manager.LeadershipCheck("redis", "redis/0") - c.Check(assertOps(c, token), jc.DeepEquals, []txn.Op{{ - C: "fake", Id: "fake", - }}) - }) -} - -func (s *LeadershipCheckSuite) TestOtherHolderRefresh_Success(c *gc.C) { - fix := &Fixture{ - expectCalls: []call{{ - method: "Refresh", - callback: func(leases map[string]lease.Info) { - leases["redis"] = lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - AssertOp: txn.Op{C: "fake", Id: "fake"}, - } - }, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - token := manager.LeadershipCheck("redis", "redis/0") - c.Check(assertOps(c, token), jc.DeepEquals, []txn.Op{{ - C: "fake", Id: "fake", - }}) - }) -} - -func (s *LeadershipCheckSuite) TestRefresh_Failure_Missing(c *gc.C) { - fix := &Fixture{ - expectCalls: []call{{ - method: "Refresh", - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - token := manager.LeadershipCheck("redis", "redis/0") - c.Check(token.Check(nil), gc.ErrorMatches, `"redis/0" is not leader of "redis"`) - }) -} - -func (s *LeadershipCheckSuite) TestRefresh_Failure_OtherHolder(c *gc.C) { - fix := &Fixture{ - expectCalls: []call{{ - method: "Refresh", - callback: func(leases map[string]lease.Info) { - leases["redis"] = lease.Info{ - Holder: "redis/1", - Expiry: offset(time.Second), - AssertOp: txn.Op{C: "fake", Id: "fake"}, - } - }, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - token := manager.LeadershipCheck("redis", "redis/0") - c.Check(token.Check(nil), gc.ErrorMatches, `"redis/0" is not leader of "redis"`) - }) -} - -func (s *LeadershipCheckSuite) TestRefresh_Error(c *gc.C) { - fix := &Fixture{ - expectCalls: []call{{ - method: "Refresh", - err: errors.New("crunch squish"), - }}, - expectDirty: true, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - token := manager.LeadershipCheck("redis", "redis/0") - c.Check(token.Check(nil), gc.ErrorMatches, "leadership manager stopped") - err := manager.Wait() - c.Check(err, gc.ErrorMatches, "crunch squish") - }) -} - -func assertOps(c *gc.C, token coreleadership.Token) (out []txn.Op) { - err := token.Check(&out) - c.Check(err, jc.ErrorIsNil) - return out -} === removed file 'src/github.com/juju/juju/state/leadership/manager_claim_test.go' --- src/github.com/juju/juju/state/leadership/manager_claim_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/leadership/manager_claim_test.go 1970-01-01 00:00:00 +0000 @@ -1,206 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership_test - -import ( - "time" - - "github.com/juju/errors" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - coreleadership "github.com/juju/juju/leadership" - "github.com/juju/juju/state/leadership" - "github.com/juju/juju/state/lease" - coretesting "github.com/juju/juju/testing" -) - -type ClaimLeadershipSuite struct { - testing.IsolationSuite -} - -var _ = gc.Suite(&ClaimLeadershipSuite{}) - -func (s *ClaimLeadershipSuite) TestClaimLease_Success(c *gc.C) { - fix := &Fixture{ - expectCalls: []call{{ - method: "ClaimLease", - args: []interface{}{"redis", lease.Request{"redis/0", time.Minute}}, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - err := manager.ClaimLeadership("redis", "redis/0", time.Minute) - c.Check(err, jc.ErrorIsNil) - }) -} - -func (s *ClaimLeadershipSuite) TestClaimLease_Success_SameHolder(c *gc.C) { - fix := &Fixture{ - expectCalls: []call{{ - method: "ClaimLease", - args: []interface{}{"redis", lease.Request{"redis/0", time.Minute}}, - err: lease.ErrInvalid, - callback: func(leases map[string]lease.Info) { - leases["redis"] = lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - } - }, - }, { - method: "ExtendLease", - args: []interface{}{"redis", lease.Request{"redis/0", time.Minute}}, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - err := manager.ClaimLeadership("redis", "redis/0", time.Minute) - c.Check(err, jc.ErrorIsNil) - }) -} - -func (s *ClaimLeadershipSuite) TestClaimLease_Failure_OtherHolder(c *gc.C) { - fix := &Fixture{ - expectCalls: []call{{ - method: "ClaimLease", - args: []interface{}{"redis", lease.Request{"redis/0", time.Minute}}, - err: lease.ErrInvalid, - callback: func(leases map[string]lease.Info) { - leases["redis"] = lease.Info{ - Holder: "redis/1", - Expiry: offset(time.Second), - } - }, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - err := manager.ClaimLeadership("redis", "redis/0", time.Minute) - c.Check(err, gc.Equals, coreleadership.ErrClaimDenied) - }) -} - -func (s *ClaimLeadershipSuite) TestClaimLease_Failure_Error(c *gc.C) { - fix := &Fixture{ - expectCalls: []call{{ - method: "ClaimLease", - args: []interface{}{"redis", lease.Request{"redis/0", time.Minute}}, - err: errors.New("lol borken"), - }}, - expectDirty: true, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - err := manager.ClaimLeadership("redis", "redis/0", time.Minute) - c.Check(err, gc.ErrorMatches, "leadership manager stopped") - err = manager.Wait() - c.Check(err, gc.ErrorMatches, "lol borken") - }) -} - -func (s *ClaimLeadershipSuite) TestExtendLease_Success(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - }, - }, - expectCalls: []call{{ - method: "ExtendLease", - args: []interface{}{"redis", lease.Request{"redis/0", time.Minute}}, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - err := manager.ClaimLeadership("redis", "redis/0", time.Minute) - c.Check(err, jc.ErrorIsNil) - }) -} - -func (s *ClaimLeadershipSuite) TestExtendLease_Success_Expired(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - }, - }, - expectCalls: []call{{ - method: "ExtendLease", - args: []interface{}{"redis", lease.Request{"redis/0", time.Minute}}, - err: lease.ErrInvalid, - callback: func(leases map[string]lease.Info) { - delete(leases, "redis") - }, - }, { - method: "ClaimLease", - args: []interface{}{"redis", lease.Request{"redis/0", time.Minute}}, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - err := manager.ClaimLeadership("redis", "redis/0", time.Minute) - c.Check(err, jc.ErrorIsNil) - }) -} - -func (s *ClaimLeadershipSuite) TestExtendLease_Failure_OtherHolder(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - }, - }, - expectCalls: []call{{ - method: "ExtendLease", - args: []interface{}{"redis", lease.Request{"redis/0", time.Minute}}, - err: lease.ErrInvalid, - callback: func(leases map[string]lease.Info) { - leases["redis"] = lease.Info{ - Holder: "redis/1", - Expiry: offset(time.Second), - } - }, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - err := manager.ClaimLeadership("redis", "redis/0", time.Minute) - c.Check(err, gc.Equals, coreleadership.ErrClaimDenied) - }) -} - -func (s *ClaimLeadershipSuite) TestExtendLease_Failure_Error(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - }, - }, - expectCalls: []call{{ - method: "ExtendLease", - args: []interface{}{"redis", lease.Request{"redis/0", time.Minute}}, - err: errors.New("boom splat"), - }}, - expectDirty: true, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - err := manager.ClaimLeadership("redis", "redis/0", time.Minute) - c.Check(err, gc.ErrorMatches, "leadership manager stopped") - err = manager.Wait() - c.Check(err, gc.ErrorMatches, "boom splat") - }) -} - -func (s *ClaimLeadershipSuite) TestOtherHolder_Failure(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{ - Holder: "redis/1", - Expiry: offset(time.Second), - }, - }, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - err := manager.ClaimLeadership("redis", "redis/0", time.Minute) - c.Check(err, gc.Equals, coreleadership.ErrClaimDenied) - }) -} === removed file 'src/github.com/juju/juju/state/leadership/manager_expire_test.go' --- src/github.com/juju/juju/state/leadership/manager_expire_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/leadership/manager_expire_test.go 1970-01-01 00:00:00 +0000 @@ -1,332 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership_test - -import ( - "time" - - "github.com/juju/errors" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/state/leadership" - "github.com/juju/juju/state/lease" - coretesting "github.com/juju/juju/testing" -) - -type ExpireLeadershipSuite struct { - testing.IsolationSuite -} - -var _ = gc.Suite(&ExpireLeadershipSuite{}) - -func (s *ExpireLeadershipSuite) TestStartup_ExpiryInPast(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{Expiry: offset(-time.Second)}, - }, - expectCalls: []call{{ - method: "Refresh", - }, { - method: "ExpireLease", - args: []interface{}{"redis"}, - callback: func(leases map[string]lease.Info) { - delete(leases, "redis") - }, - }}, - } - fix.RunTest(c, func(_ leadership.ManagerWorker, _ *coretesting.Clock) {}) -} - -func (s *ExpireLeadershipSuite) TestStartup_ExpiryInFuture(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{Expiry: offset(time.Second)}, - }, - } - fix.RunTest(c, func(_ leadership.ManagerWorker, clock *coretesting.Clock) { - clock.Advance(almostSeconds(1)) - }) -} - -func (s *ExpireLeadershipSuite) TestStartup_ExpiryInFuture_TimePasses(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{Expiry: offset(time.Second)}, - }, - expectCalls: []call{{ - method: "Refresh", - }, { - method: "ExpireLease", - args: []interface{}{"redis"}, - callback: func(leases map[string]lease.Info) { - delete(leases, "redis") - }, - }}, - } - fix.RunTest(c, func(_ leadership.ManagerWorker, clock *coretesting.Clock) { - clock.Advance(time.Second) - }) -} - -func (s *ExpireLeadershipSuite) TestStartup_NoExpiry_NotLongEnough(c *gc.C) { - fix := &Fixture{} - fix.RunTest(c, func(_ leadership.ManagerWorker, clock *coretesting.Clock) { - clock.Advance(almostSeconds(3600)) - }) -} - -func (s *ExpireLeadershipSuite) TestStartup_NoExpiry_LongEnough(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "goose": lease.Info{Expiry: offset(3 * time.Hour)}, - }, - expectCalls: []call{{ - method: "Refresh", - callback: func(leases map[string]lease.Info) { - leases["redis"] = lease.Info{ - Expiry: offset(time.Minute), - } - }, - }, { - method: "ExpireLease", - args: []interface{}{"redis"}, - callback: func(leases map[string]lease.Info) { - delete(leases, "redis") - }, - }}, - } - fix.RunTest(c, func(_ leadership.ManagerWorker, clock *coretesting.Clock) { - clock.Advance(time.Hour) - }) -} - -func (s *ExpireLeadershipSuite) TestExpire_ErrInvalid_Expired(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{Expiry: offset(time.Second)}, - }, - expectCalls: []call{{ - method: "Refresh", - }, { - method: "ExpireLease", - args: []interface{}{"redis"}, - err: lease.ErrInvalid, - callback: func(leases map[string]lease.Info) { - delete(leases, "redis") - }, - }}, - } - fix.RunTest(c, func(_ leadership.ManagerWorker, clock *coretesting.Clock) { - clock.Advance(time.Second) - }) -} - -func (s *ExpireLeadershipSuite) TestExpire_ErrInvalid_Updated(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{Expiry: offset(time.Second)}, - }, - expectCalls: []call{{ - method: "Refresh", - }, { - method: "ExpireLease", - args: []interface{}{"redis"}, - err: lease.ErrInvalid, - callback: func(leases map[string]lease.Info) { - leases["redis"] = lease.Info{Expiry: offset(time.Minute)} - }, - }}, - } - fix.RunTest(c, func(_ leadership.ManagerWorker, clock *coretesting.Clock) { - clock.Advance(time.Second) - }) -} - -func (s *ExpireLeadershipSuite) TestExpire_OtherError(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{Expiry: offset(time.Second)}, - }, - expectCalls: []call{{ - method: "Refresh", - }, { - method: "ExpireLease", - args: []interface{}{"redis"}, - err: errors.New("snarfblat hobalob"), - }}, - expectDirty: true, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, clock *coretesting.Clock) { - clock.Advance(time.Second) - err := manager.Wait() - c.Check(err, gc.ErrorMatches, "snarfblat hobalob") - }) -} - -func (s *ExpireLeadershipSuite) TestClaim_ExpiryInFuture(c *gc.C) { - fix := &Fixture{ - expectCalls: []call{{ - method: "ClaimLease", - args: []interface{}{"redis", lease.Request{"redis/0", time.Minute}}, - callback: func(leases map[string]lease.Info) { - leases["redis"] = lease.Info{ - Holder: "redis/0", - Expiry: offset(63 * time.Second), - } - }, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, clock *coretesting.Clock) { - // Ask for a minute, actually get 63s. Don't expire early. - err := manager.ClaimLeadership("redis", "redis/0", time.Minute) - c.Assert(err, jc.ErrorIsNil) - clock.Advance(almostSeconds(63)) - }) -} - -func (s *ExpireLeadershipSuite) TestClaim_ExpiryInFuture_TimePasses(c *gc.C) { - fix := &Fixture{ - expectCalls: []call{{ - method: "ClaimLease", - args: []interface{}{"redis", lease.Request{"redis/0", time.Minute}}, - callback: func(leases map[string]lease.Info) { - leases["redis"] = lease.Info{ - Holder: "redis/0", - Expiry: offset(63 * time.Second), - } - }, - }, { - method: "Refresh", - }, { - method: "ExpireLease", - args: []interface{}{"redis"}, - callback: func(leases map[string]lease.Info) { - delete(leases, "redis") - }, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, clock *coretesting.Clock) { - // Ask for a minute, actually get 63s. Expire on time. - err := manager.ClaimLeadership("redis", "redis/0", time.Minute) - c.Assert(err, jc.ErrorIsNil) - clock.Advance(63 * time.Second) - }) -} - -func (s *ExpireLeadershipSuite) TestExtend_ExpiryInFuture(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - }, - }, - expectCalls: []call{{ - method: "ExtendLease", - args: []interface{}{"redis", lease.Request{"redis/0", time.Minute}}, - callback: func(leases map[string]lease.Info) { - leases["redis"] = lease.Info{ - Holder: "redis/0", - Expiry: offset(63 * time.Second), - } - }, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, clock *coretesting.Clock) { - // Ask for a minute, actually get 63s. Don't expire early. - err := manager.ClaimLeadership("redis", "redis/0", time.Minute) - c.Assert(err, jc.ErrorIsNil) - clock.Advance(almostSeconds(63)) - }) -} - -func (s *ExpireLeadershipSuite) TestExtend_ExpiryInFuture_TimePasses(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - }, - }, - expectCalls: []call{{ - method: "ExtendLease", - args: []interface{}{"redis", lease.Request{"redis/0", time.Minute}}, - callback: func(leases map[string]lease.Info) { - leases["redis"] = lease.Info{ - Holder: "redis/0", - Expiry: offset(63 * time.Second), - } - }, - }, { - method: "Refresh", - }, { - method: "ExpireLease", - args: []interface{}{"redis"}, - callback: func(leases map[string]lease.Info) { - delete(leases, "redis") - }, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, clock *coretesting.Clock) { - // Ask for a minute, actually get 63s. Expire on time. - err := manager.ClaimLeadership("redis", "redis/0", time.Minute) - c.Assert(err, jc.ErrorIsNil) - clock.Advance(63 * time.Second) - }) -} - -func (s *ExpireLeadershipSuite) TestExpire_Multiple(c *gc.C) { - fix := &Fixture{ - leases: map[string]lease.Info{ - "redis": lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - }, - "store": lease.Info{ - Holder: "store/3", - Expiry: offset(5 * time.Second), - }, - "tokumx": lease.Info{ - Holder: "tokumx/5", - Expiry: offset(10 * time.Second), // will not expire. - }, - "ultron": lease.Info{ - Holder: "ultron/7", - Expiry: offset(5 * time.Second), - }, - "vvvvvv": lease.Info{ - Holder: "vvvvvv/2", - Expiry: offset(time.Second), // would expire, but errors first. - }, - }, - expectCalls: []call{{ - method: "Refresh", - }, { - method: "ExpireLease", - args: []interface{}{"redis"}, - callback: func(leases map[string]lease.Info) { - delete(leases, "redis") - }, - }, { - method: "ExpireLease", - args: []interface{}{"store"}, - err: lease.ErrInvalid, - callback: func(leases map[string]lease.Info) { - delete(leases, "store") - }, - }, { - method: "ExpireLease", - args: []interface{}{"ultron"}, - err: errors.New("what is this?"), - }}, - expectDirty: true, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, clock *coretesting.Clock) { - clock.Advance(5 * time.Second) - err := manager.Wait() - c.Check(err, gc.ErrorMatches, "what is this\\?") - }) -} === removed file 'src/github.com/juju/juju/state/leadership/manager_validation_test.go' --- src/github.com/juju/juju/state/leadership/manager_validation_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/leadership/manager_validation_test.go 1970-01-01 00:00:00 +0000 @@ -1,133 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership_test - -import ( - "time" - - "github.com/juju/errors" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/mgo.v2/txn" - - "github.com/juju/juju/state/leadership" - "github.com/juju/juju/state/lease" - coretesting "github.com/juju/juju/testing" -) - -type ValidationSuite struct { - testing.IsolationSuite -} - -var _ = gc.Suite(&ValidationSuite{}) - -func (s *ValidationSuite) TestMissingClient(c *gc.C) { - manager, err := leadership.NewManager(leadership.ManagerConfig{ - Clock: coretesting.NewClock(time.Now()), - MaxSleep: time.Minute, - }) - c.Check(err, gc.ErrorMatches, "nil Client not valid") - c.Check(err, jc.Satisfies, errors.IsNotValid) - c.Check(manager, gc.IsNil) -} - -func (s *ValidationSuite) TestMissingClock(c *gc.C) { - manager, err := leadership.NewManager(leadership.ManagerConfig{ - Client: NewClient(nil, nil), - MaxSleep: time.Minute, - }) - c.Check(err, gc.ErrorMatches, "nil Clock not valid") - c.Check(err, jc.Satisfies, errors.IsNotValid) - c.Check(manager, gc.IsNil) -} - -func (s *ValidationSuite) TestMissingMaxSleep(c *gc.C) { - manager, err := leadership.NewManager(leadership.ManagerConfig{ - Client: NewClient(nil, nil), - Clock: coretesting.NewClock(time.Now()), - }) - c.Check(err, gc.ErrorMatches, "non-positive MaxSleep not valid") - c.Check(err, jc.Satisfies, errors.IsNotValid) - c.Check(manager, gc.IsNil) -} - -func (s *ValidationSuite) TestNegativeMaxSleep(c *gc.C) { - manager, err := leadership.NewManager(leadership.ManagerConfig{ - Client: NewClient(nil, nil), - Clock: coretesting.NewClock(time.Now()), - MaxSleep: -time.Nanosecond, - }) - c.Check(err, gc.ErrorMatches, "non-positive MaxSleep not valid") - c.Check(err, jc.Satisfies, errors.IsNotValid) - c.Check(manager, gc.IsNil) -} - -func (s *ValidationSuite) TestClaimLeadership_ServiceName(c *gc.C) { - fix := &Fixture{} - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - err := manager.ClaimLeadership("foo/0", "bar/0", time.Minute) - c.Check(err, gc.ErrorMatches, `cannot claim leadership: invalid service name "foo/0"`) - }) -} - -func (s *ValidationSuite) TestClaimLeadership_UnitName(c *gc.C) { - fix := &Fixture{} - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - err := manager.ClaimLeadership("foo", "bar", time.Minute) - c.Check(err, gc.ErrorMatches, `cannot claim leadership: invalid unit name "bar"`) - }) -} - -func (s *ValidationSuite) TestClaimLeadership_Duration(c *gc.C) { - fix := &Fixture{} - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - err := manager.ClaimLeadership("foo", "bar/0", 0) - c.Check(err, gc.ErrorMatches, `cannot claim leadership: invalid duration 0`) - }) -} - -func (s *ValidationSuite) TestLeadershipCheck_ServiceName(c *gc.C) { - fix := &Fixture{} - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - token := manager.LeadershipCheck("foo/0", "bar/0") - c.Check(token.Check(nil), gc.ErrorMatches, `cannot check leadership: invalid service name "foo/0"`) - }) -} - -func (s *ValidationSuite) TestLeadershipCheck_UnitName(c *gc.C) { - fix := &Fixture{} - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - token := manager.LeadershipCheck("foo", "bar") - c.Check(token.Check(nil), gc.ErrorMatches, `cannot check leadership: invalid unit name "bar"`) - }) -} - -func (s *ValidationSuite) TestLeadershipCheck_OutPtr(c *gc.C) { - fix := &Fixture{ - expectCalls: []call{{ - method: "Refresh", - callback: func(leases map[string]lease.Info) { - leases["redis"] = lease.Info{ - Holder: "redis/0", - Expiry: offset(time.Second), - AssertOp: txn.Op{C: "fake", Id: "fake"}, - } - }, - }}, - } - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - bad := "bad" - token := manager.LeadershipCheck("redis", "redis/0") - c.Check(token.Check(&bad), gc.ErrorMatches, `expected pointer to \[\]txn.Op`) - }) -} - -func (s *ValidationSuite) TestBlockUntilLeadershipReleased_ServiceName(c *gc.C) { - fix := &Fixture{} - fix.RunTest(c, func(manager leadership.ManagerWorker, _ *coretesting.Clock) { - err := manager.BlockUntilLeadershipReleased("foo/0") - c.Check(err, gc.ErrorMatches, `cannot wait for leaderlessness: invalid service name "foo/0"`) - }) -} === removed file 'src/github.com/juju/juju/state/leadership/package_test.go' --- src/github.com/juju/juju/state/leadership/package_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/leadership/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership_test - -import ( - "testing" - - gc "gopkg.in/check.v1" -) - -func TestPackage(t *testing.T) { - gc.TestingT(t) -} === removed file 'src/github.com/juju/juju/state/leadership/token.go' --- src/github.com/juju/juju/state/leadership/token.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/leadership/token.go 1970-01-01 00:00:00 +0000 @@ -1,42 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership - -import ( - "github.com/juju/errors" - "gopkg.in/mgo.v2/txn" -) - -// token implements leadership.Token. -type token struct { - serviceName string - unitName string - checks chan<- check - abort <-chan struct{} -} - -// Check is part of the leadership.Token interface. -func (t token) Check(out interface{}) error { - - // Check validity and get the assert op in case it's needed. - op, err := check{ - serviceName: t.serviceName, - unitName: t.unitName, - response: make(chan txn.Op), - abort: t.abort, - }.invoke(t.checks) - if err != nil { - return errors.Trace(err) - } - - // Report transaction ops if the client wants them. - if out != nil { - outPtr, ok := out.(*[]txn.Op) - if !ok { - return errors.New("expected pointer to []txn.Op") - } - *outPtr = []txn.Op{op} - } - return nil -} === removed file 'src/github.com/juju/juju/state/leadership/util_test.go' --- src/github.com/juju/juju/state/leadership/util_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/leadership/util_test.go 1970-01-01 00:00:00 +0000 @@ -1,131 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership_test - -import ( - "fmt" - "time" - - "github.com/juju/errors" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/state/lease" -) - -// Client implements lease.Client for testing purposes. -type Client struct { - leases map[string]lease.Info - expect []call - failed string - done chan struct{} -} - -// NewClient initializes and returns a new client configured to report -// the supplied leases and expect the supplied calls. -func NewClient(leases map[string]lease.Info, expect []call) *Client { - if leases == nil { - leases = make(map[string]lease.Info) - } - done := make(chan struct{}) - if len(expect) == 0 { - close(done) - } - return &Client{ - leases: leases, - expect: expect, - done: done, - } -} - -// Wait will return when all expected calls have been made, or fail the test -// if they don't happen within a second. (You control the clock; your tests -// should pass in *way* less than a second of wall-clock time.) -func (client *Client) Wait(c *gc.C) { - select { - case <-client.done: - if client.failed != "" { - c.Fatalf(client.failed) - } - case <-time.After(time.Second): - c.Fatalf("Client test took way too long") - } -} - -// Leases is part of the lease.Client interface. -func (client *Client) Leases() map[string]lease.Info { - result := make(map[string]lease.Info) - for k, v := range client.leases { - result[k] = v - } - return result -} - -// call implements the bulk of the lease.Client interface. -func (client *Client) call(method string, args []interface{}) error { - select { - case <-client.done: - return errors.Errorf("Client method called after test complete: %s %v", method, args) - default: - defer func() { - if len(client.expect) == 0 || client.failed != "" { - close(client.done) - } - }() - } - - expect := client.expect[0] - client.expect = client.expect[1:] - if expect.callback != nil { - expect.callback(client.leases) - } - - if method == expect.method { - if ok, _ := jc.DeepEqual(args, expect.args); ok { - return expect.err - } - } - client.failed = fmt.Sprintf("unexpected Client call:\n actual: %s %v\n expect: %s %v", - method, args, expect.method, expect.args, - ) - return errors.New(client.failed) -} - -// ClaimLease is part of the lease.Client interface. -func (client *Client) ClaimLease(name string, request lease.Request) error { - return client.call("ClaimLease", []interface{}{name, request}) -} - -// ExtendLease is part of the lease.Client interface. -func (client *Client) ExtendLease(name string, request lease.Request) error { - return client.call("ExtendLease", []interface{}{name, request}) -} - -// ExpireLease is part of the lease.Client interface. -func (client *Client) ExpireLease(name string) error { - return client.call("ExpireLease", []interface{}{name}) -} - -// Refresh is part of the lease.Client interface. -func (client *Client) Refresh() error { - return client.call("Refresh", nil) -} - -// call defines a expected method call on a Client; it encodes: -type call struct { - - // method is the name of the method. - method string - - // args is the expected arguments. - args []interface{} - - // err is the error to return. - err error - - // callback, if non-nil, will be passed the internal leases dict; for - // modification, if desired. Otherwise you can use it to, e.g., assert - // clock time. - callback func(leases map[string]lease.Info) -} === modified file 'src/github.com/juju/juju/state/lease/client.go' --- src/github.com/juju/juju/state/lease/client.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/lease/client.go 2016-03-22 15:18:22 +0000 @@ -14,6 +14,7 @@ "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" + "github.com/juju/juju/core/lease" "github.com/juju/juju/mongo" ) @@ -26,8 +27,8 @@ // to use. // Clients do not need to be cleaned up themselves, but they will not function // past the lifetime of their configured Mongo. -func NewClient(config ClientConfig) (Client, error) { - if err := config.Validate(); err != nil { +func NewClient(config ClientConfig) (lease.Client, error) { + if err := config.validate(); err != nil { return nil, errors.Trace(err) } loggerName := fmt.Sprintf("state.lease.%s.%s", config.Namespace, config.Id) @@ -45,7 +46,7 @@ return client, nil } -// client implements the Client interface. +// client implements the lease.Client interface. type client struct { // config holds resources and configuration necessary to store leases. @@ -61,36 +62,36 @@ skews map[string]Skew } -// Leases is part of the Client interface. -func (client *client) Leases() map[string]Info { - leases := make(map[string]Info) +// Leases is part of the lease.Client interface. +func (client *client) Leases() map[string]lease.Info { + leases := make(map[string]lease.Info) for name, entry := range client.entries { skew := client.skews[entry.writer] - leases[name] = Info{ + leases[name] = lease.Info{ Holder: entry.holder, Expiry: skew.Latest(entry.expiry), - AssertOp: client.assertOp(name, entry.holder), + Trapdoor: client.assertOpTrapdoor(name, entry.holder), } } return leases } -// ClaimLease is part of the Client interface. -func (client *client) ClaimLease(name string, request Request) error { +// ClaimLease is part of the lease.Client interface. +func (client *client) ClaimLease(name string, request lease.Request) error { return client.request(name, request, client.claimLeaseOps, "claiming") } -// ExtendLease is part of the Client interface. -func (client *client) ExtendLease(name string, request Request) error { +// ExtendLease is part of the lease.Client interface. +func (client *client) ExtendLease(name string, request lease.Request) error { return client.request(name, request, client.extendLeaseOps, "extending") } // opsFunc is used to make the signature of the request method somewhat readable. -type opsFunc func(name string, request Request) ([]txn.Op, entry, error) +type opsFunc func(name string, request lease.Request) ([]txn.Op, entry, error) // request implements ClaimLease and ExtendLease. -func (client *client) request(name string, request Request, getOps opsFunc, verb string) error { - if err := validateString(name); err != nil { +func (client *client) request(name string, request lease.Request, getOps opsFunc, verb string) error { + if err := lease.ValidateString(name); err != nil { return errors.Annotatef(err, "invalid name") } if err := request.Validate(); err != nil { @@ -123,12 +124,11 @@ return ops, nil }) - // Unwrap ErrInvalid if necessary. - if errors.Cause(err) == ErrInvalid { - return ErrInvalid - } if err != nil { - return errors.Trace(err) + if errors.Cause(err) == lease.ErrInvalid { + return lease.ErrInvalid + } + return errors.Annotate(err, "cannot satisfy request") } // Update the cache for this lease only. @@ -138,7 +138,7 @@ // ExpireLease is part of the Client interface. func (client *client) ExpireLease(name string) error { - if err := validateString(name); err != nil { + if err := lease.ValidateString(name); err != nil { return errors.Annotatef(err, "invalid name") } @@ -161,11 +161,10 @@ return ops, nil }) - // Unwrap ErrInvalid if necessary. - if errors.Cause(err) == ErrInvalid { - return ErrInvalid - } if err != nil { + if errors.Cause(err) == lease.ErrInvalid { + return lease.ErrInvalid + } return errors.Trace(err) } @@ -216,27 +215,28 @@ client.logger.Tracef("checking clock %q (attempt %d)", clockDocId, attempt) var clockDoc clockDoc err := collection.FindId(clockDocId).One(&clockDoc) - if err == nil { + switch err { + case nil: client.logger.Tracef("clock already exists") if err := clockDoc.validate(); err != nil { return nil, errors.Annotatef(err, "corrupt clock document") } return nil, jujutxn.ErrNoOperations - } - if err != mgo.ErrNotFound { - return nil, errors.Trace(err) - } - client.logger.Tracef("creating clock") - newClockDoc, err := newClockDoc(client.config.Namespace) - if err != nil { - return nil, errors.Trace(err) - } - return []txn.Op{{ - C: client.config.Collection, - Id: clockDocId, - Assert: txn.DocMissing, - Insert: newClockDoc, - }}, nil + case mgo.ErrNotFound: + client.logger.Tracef("creating clock") + newClockDoc, err := newClockDoc(client.config.Namespace) + if err != nil { + return nil, errors.Trace(err) + } + return []txn.Op{{ + C: client.config.Collection, + Id: clockDocId, + Assert: txn.DocMissing, + Insert: newClockDoc, + }}, nil + default: + return nil, errors.Trace(err) + } }) return errors.Trace(err) } @@ -305,12 +305,12 @@ // claimLeaseOps returns the []txn.Op necessary to claim the supplied lease // until duration in the future, and a cache entry corresponding to the values // that will be written if the transaction succeeds. If the claim would conflict -// with cached state, it returns ErrInvalid. -func (client *client) claimLeaseOps(name string, request Request) ([]txn.Op, entry, error) { +// with cached state, it returns lease.ErrInvalid. +func (client *client) claimLeaseOps(name string, request lease.Request) ([]txn.Op, entry, error) { // We can't claim a lease that's already held. if _, found := client.entries[name]; found { - return nil, entry{}, ErrInvalid + return nil, entry{}, lease.ErrInvalid } // According to the local clock, we want the lease to extend until @@ -346,16 +346,16 @@ // that will be written if the transaction succeeds. If the supplied lease // already extends far enough that no operations are required, it will return // errNoExtension. If the extension would conflict with cached state, it will -// return ErrInvalid. -func (client *client) extendLeaseOps(name string, request Request) ([]txn.Op, entry, error) { +// return lease.ErrInvalid. +func (client *client) extendLeaseOps(name string, request lease.Request) ([]txn.Op, entry, error) { // Reject extensions when there's no lease, or the holder doesn't match. lastEntry, found := client.entries[name] if !found { - return nil, entry{}, ErrInvalid + return nil, entry{}, lease.ErrInvalid } if lastEntry.holder != request.Holder { - return nil, entry{}, ErrInvalid + return nil, entry{}, lease.ErrInvalid } // According to the local clock, we want the lease to extend until @@ -413,13 +413,14 @@ } // expireLeaseOps returns the []txn.Op necessary to vacate the lease. If the -// expiration would conflict with cached state, it will return ErrInvalid. +// expiration would conflict with cached state, it will return an error with +// a Cause of ErrInvalid. func (client *client) expireLeaseOps(name string) ([]txn.Op, error) { // We can't expire a lease that doesn't exist. lastEntry, found := client.entries[name] if !found { - return nil, ErrInvalid + return nil, lease.ErrInvalid } // We also can't expire a lease whose expiry time may be in the future. @@ -427,8 +428,7 @@ latestExpiry := skew.Latest(lastEntry.expiry) now := client.config.Clock.Now() if !now.After(latestExpiry) { - client.logger.Tracef("lease %q expires in the future", name) - return nil, ErrInvalid + return nil, errors.Annotatef(lease.ErrInvalid, "lease %q expires in the future", name) } // The database change is simple, and depends on the lease doc being @@ -473,16 +473,24 @@ } } -// assertOp returns a txn.Op which will succeed only if holder holds the -// named lease. -func (client *client) assertOp(name, holder string) txn.Op { - return txn.Op{ +// assertOpTrapdoor returns a lease.Trapdoor that will replace a supplied +// *[]txn.Op with one that asserts that the holder still holds the named lease. +func (client *client) assertOpTrapdoor(name, holder string) lease.Trapdoor { + op := txn.Op{ C: client.config.Collection, Id: client.leaseDocId(name), Assert: bson.M{ fieldLeaseHolder: holder, }, } + return func(out interface{}) error { + outPtr, ok := out.(*[]txn.Op) + if !ok { + return errors.NotValidf("expected *[]txn.Op; %T", out) + } + *outPtr = []txn.Op{op} + return nil + } } // clockDocId returns the id of the clock document in the client's namespace. === modified file 'src/github.com/juju/juju/state/lease/client_assert_test.go' --- src/github.com/juju/juju/state/lease/client_assert_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/lease/client_assert_test.go 2016-03-22 15:18:22 +0000 @@ -10,7 +10,7 @@ gc "gopkg.in/check.v1" "gopkg.in/mgo.v2/txn" - "github.com/juju/juju/state/lease" + "github.com/juju/juju/core/lease" ) // ClientAssertSuite tests that AssertOp does what it should. @@ -33,8 +33,10 @@ func (s *ClientAssertSuite) TestPassesWhenLeaseHeld(c *gc.C) { info := s.fix.Client.Leases()["name"] - ops := []txn.Op{info.AssertOp} - err := s.fix.Runner.RunTransaction(ops) + var ops []txn.Op + err := info.Trapdoor(&ops) + c.Check(err, jc.ErrorIsNil) + err = s.fix.Runner.RunTransaction(ops) c.Check(err, jc.ErrorIsNil) } @@ -45,7 +47,9 @@ err := fix2.Client.ExtendLease("name", lease.Request{"holder", time.Hour}) c.Assert(err, jc.ErrorIsNil) - ops := []txn.Op{info.AssertOp} + var ops []txn.Op + err = info.Trapdoor(&ops) + c.Check(err, jc.ErrorIsNil) err = s.fix.Runner.RunTransaction(ops) c.Check(err, gc.IsNil) } @@ -57,7 +61,9 @@ err := s.fix.Client.Refresh() c.Assert(err, jc.ErrorIsNil) - ops := []txn.Op{info.AssertOp} + var ops []txn.Op + err = info.Trapdoor(&ops) + c.Check(err, jc.ErrorIsNil) err = s.fix.Runner.RunTransaction(ops) c.Check(err, gc.IsNil) } @@ -69,7 +75,9 @@ err := s.fix.Client.ExpireLease("name") c.Assert(err, jc.ErrorIsNil) - ops := []txn.Op{info.AssertOp} + var ops []txn.Op + err = info.Trapdoor(&ops) + c.Check(err, jc.ErrorIsNil) err = s.fix.Runner.RunTransaction(ops) c.Check(err, gc.Equals, txn.ErrAborted) } === modified file 'src/github.com/juju/juju/state/lease/client_operation_test.go' --- src/github.com/juju/juju/state/lease/client_operation_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/lease/client_operation_test.go 2016-03-22 15:18:22 +0000 @@ -9,7 +9,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "github.com/juju/juju/state/lease" + "github.com/juju/juju/core/lease" ) // ClientOperationSuite verifies behaviour when claiming, extending, and expiring leases. === modified file 'src/github.com/juju/juju/state/lease/client_persistence_test.go' --- src/github.com/juju/juju/state/lease/client_persistence_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/lease/client_persistence_test.go 2016-03-22 15:18:22 +0000 @@ -11,6 +11,7 @@ gc "gopkg.in/check.v1" "gopkg.in/mgo.v2/bson" + corelease "github.com/juju/juju/core/lease" "github.com/juju/juju/state/lease" ) @@ -76,7 +77,7 @@ func (s *ClientPersistenceSuite) TestClaimLease(c *gc.C) { fix1 := s.EasyFixture(c) leaseDuration := time.Minute - err := fix1.Client.ClaimLease("name", lease.Request{"holder", leaseDuration}) + err := fix1.Client.ClaimLease("name", corelease.Request{"holder", leaseDuration}) c.Assert(err, jc.ErrorIsNil) // Same client id, same clock, new instance: sees exact same lease. @@ -88,10 +89,10 @@ func (s *ClientPersistenceSuite) TestExtendLease(c *gc.C) { fix1 := s.EasyFixture(c) - err := fix1.Client.ClaimLease("name", lease.Request{"holder", time.Second}) + err := fix1.Client.ClaimLease("name", corelease.Request{"holder", time.Second}) c.Assert(err, jc.ErrorIsNil) leaseDuration := time.Minute - err = fix1.Client.ExtendLease("name", lease.Request{"holder", leaseDuration}) + err = fix1.Client.ExtendLease("name", corelease.Request{"holder", leaseDuration}) c.Assert(err, jc.ErrorIsNil) // Same client id, same clock, new instance: sees exact same lease. @@ -104,7 +105,7 @@ func (s *ClientPersistenceSuite) TestExpireLease(c *gc.C) { fix1 := s.EasyFixture(c) leaseDuration := time.Minute - err := fix1.Client.ClaimLease("name", lease.Request{"holder", leaseDuration}) + err := fix1.Client.ClaimLease("name", corelease.Request{"holder", leaseDuration}) c.Assert(err, jc.ErrorIsNil) fix1.Clock.Advance(leaseDuration + time.Nanosecond) err = fix1.Client.ExpireLease("name") @@ -118,7 +119,7 @@ func (s *ClientPersistenceSuite) TestNamespaceIsolation(c *gc.C) { fix1 := s.EasyFixture(c) leaseDuration := time.Minute - err := fix1.Client.ClaimLease("name", lease.Request{"holder", leaseDuration}) + err := fix1.Client.ClaimLease("name", corelease.Request{"holder", leaseDuration}) c.Assert(err, jc.ErrorIsNil) // Same client id, same clock, different namespace: sees no lease. @@ -131,7 +132,7 @@ func (s *ClientPersistenceSuite) TestTimezoneChanges(c *gc.C) { fix1 := s.EasyFixture(c) leaseDuration := time.Minute - err := fix1.Client.ClaimLease("name", lease.Request{"holder", leaseDuration}) + err := fix1.Client.ClaimLease("name", corelease.Request{"holder", leaseDuration}) c.Assert(err, jc.ErrorIsNil) // Same client can come up in a different timezone and still work correctly. @@ -146,7 +147,7 @@ func (s *ClientPersistenceSuite) TestTimezoneIsolation(c *gc.C) { fix1 := s.EasyFixture(c) leaseDuration := time.Minute - err := fix1.Client.ClaimLease("name", lease.Request{"holder", leaseDuration}) + err := fix1.Client.ClaimLease("name", corelease.Request{"holder", leaseDuration}) c.Assert(err, jc.ErrorIsNil) // Different client *and* different timezone; but clock agrees perfectly, === modified file 'src/github.com/juju/juju/state/lease/client_race_test.go' --- src/github.com/juju/juju/state/lease/client_race_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/lease/client_race_test.go 2016-03-22 15:18:22 +0000 @@ -11,8 +11,8 @@ txntesting "github.com/juju/txn/testing" "github.com/juju/utils/clock" gc "gopkg.in/check.v1" - _ "gopkg.in/mgo.v2/bson" + corelease "github.com/juju/juju/core/lease" "github.com/juju/juju/state/lease" ) @@ -42,7 +42,7 @@ defer txntesting.SetBeforeHooks(c, sutRunner, func() { client, err := lease.NewClient(config("blocker")) c.Check(err, jc.ErrorIsNil) - err = client.ClaimLease("somewhere", lease.Request{"someone", time.Minute}) + err = client.ClaimLease("somewhere", corelease.Request{"someone", time.Minute}) c.Check(err, jc.ErrorIsNil) })() @@ -63,13 +63,13 @@ // Set up a hook to grab the lease "name" just before the next txn runs. defer txntesting.SetBeforeHooks(c, sut.Runner, func() { - err := blocker.Client.ClaimLease("name", lease.Request{"ha-haa", time.Minute}) + err := blocker.Client.ClaimLease("name", corelease.Request{"ha-haa", time.Minute}) c.Check(err, jc.ErrorIsNil) })() // Try to grab the lease "name", and fail. - err := sut.Client.ClaimLease("name", lease.Request{"trying", time.Second}) - c.Check(err, gc.Equals, lease.ErrInvalid) + err := sut.Client.ClaimLease("name", corelease.Request{"trying", time.Second}) + c.Check(err, gc.Equals, corelease.ErrInvalid) // The client that failed has refreshed state (as it had to, in order // to discover the reason for the invalidity). @@ -85,7 +85,7 @@ // it again before the SUT goes and looks to figure out what it should do. interfere := jujutxn.TestHook{ Before: func() { - err := blocker.Client.ClaimLease("name", lease.Request{"ha-haa", time.Second}) + err := blocker.Client.ClaimLease("name", corelease.Request{"ha-haa", time.Second}) c.Check(err, jc.ErrorIsNil) }, After: func() { @@ -100,8 +100,8 @@ )() // Try to claim, and watch the poor thing collapse in exhaustion. - err := sut.Client.ClaimLease("name", lease.Request{"trying", time.Minute}) - c.Check(err, gc.ErrorMatches, "state changing too quickly; try again soon") + err := sut.Client.ClaimLease("name", corelease.Request{"trying", time.Minute}) + c.Check(err, gc.ErrorMatches, "cannot satisfy request: state changing too quickly; try again soon") } // ClientTrickyRaceSuite tests what happens when two clients interfere with @@ -117,7 +117,7 @@ func (s *ClientTrickyRaceSuite) SetUpTest(c *gc.C) { s.FixtureSuite.SetUpTest(c) s.sut = s.EasyFixture(c) - err := s.sut.Client.ClaimLease("name", lease.Request{"holder", time.Minute}) + err := s.sut.Client.ClaimLease("name", corelease.Request{"holder", time.Minute}) c.Assert(err, jc.ErrorIsNil) s.blocker = s.NewFixture(c, FixtureParams{Id: "blocker"}) } @@ -130,7 +130,7 @@ // Set up hooks to extend the lease by a little, before the SUT's extend // gets a chance; and then to verify state after it's applied its retry. defer txntesting.SetRetryHooks(c, s.sut.Runner, func() { - err := s.blocker.Client.ExtendLease("name", lease.Request{"holder", shorterRequest}) + err := s.blocker.Client.ExtendLease("name", corelease.Request{"holder", shorterRequest}) c.Check(err, jc.ErrorIsNil) }, func() { err := s.blocker.Client.Refresh() @@ -139,7 +139,7 @@ })() // Extend the lease. - err := s.sut.Client.ExtendLease("name", lease.Request{"holder", longerRequest}) + err := s.sut.Client.ExtendLease("name", corelease.Request{"holder", longerRequest}) c.Check(err, jc.ErrorIsNil) } @@ -150,12 +150,12 @@ // Set up hooks to extend the lease by a lot, before the SUT's extend can. defer txntesting.SetBeforeHooks(c, s.sut.Runner, func() { - err := s.blocker.Client.ExtendLease("name", lease.Request{"holder", longerRequest}) + err := s.blocker.Client.ExtendLease("name", corelease.Request{"holder", longerRequest}) c.Check(err, jc.ErrorIsNil) })() // Extend the lease by a little. - err := s.sut.Client.ExtendLease("name", lease.Request{"holder", shorterRequest}) + err := s.sut.Client.ExtendLease("name", corelease.Request{"holder", shorterRequest}) c.Check(err, jc.ErrorIsNil) // The SUT was refreshed, and knows that the lease is really valid for longer. @@ -172,8 +172,8 @@ })() // Try to extend; check it aborts. - err := s.sut.Client.ExtendLease("name", lease.Request{"holder", 2 * time.Minute}) - c.Check(err, gc.Equals, lease.ErrInvalid) + err := s.sut.Client.ExtendLease("name", corelease.Request{"holder", 2 * time.Minute}) + c.Check(err, gc.Equals, corelease.ErrInvalid) // The SUT has been refreshed, and you can see why the operation was invalid. c.Check("name", s.sut.Holder(), "") @@ -187,13 +187,13 @@ s.blocker.Clock.Advance(90 * time.Second) err := s.blocker.Client.ExpireLease("name") c.Check(err, jc.ErrorIsNil) - err = s.blocker.Client.ClaimLease("name", lease.Request{"different-holder", time.Minute}) + err = s.blocker.Client.ClaimLease("name", corelease.Request{"different-holder", time.Minute}) c.Check(err, jc.ErrorIsNil) })() // Try to extend; check it aborts. - err := s.sut.Client.ExtendLease("name", lease.Request{"holder", 2 * time.Minute}) - c.Check(err, gc.Equals, lease.ErrInvalid) + err := s.sut.Client.ExtendLease("name", corelease.Request{"holder", 2 * time.Minute}) + c.Check(err, gc.Equals, corelease.ErrInvalid) // The SUT has been refreshed, and you can see why the operation was invalid. c.Check("name", s.sut.Holder(), "different-holder") @@ -207,7 +207,7 @@ s.blocker.Clock.Advance(90 * time.Second) err := s.blocker.Client.ExpireLease("name") c.Check(err, jc.ErrorIsNil) - err = s.blocker.Client.ClaimLease("name", lease.Request{"holder", time.Minute}) + err = s.blocker.Client.ClaimLease("name", corelease.Request{"holder", time.Minute}) c.Check(err, jc.ErrorIsNil) }, func() { err := s.blocker.Client.Refresh() @@ -216,7 +216,7 @@ })() // Try to extend; check it worked. - err := s.sut.Client.ExtendLease("name", lease.Request{"holder", 5 * time.Minute}) + err := s.sut.Client.ExtendLease("name", corelease.Request{"holder", 5 * time.Minute}) c.Check(err, jc.ErrorIsNil) } @@ -231,7 +231,7 @@ c.Check(err, jc.ErrorIsNil) }, After: func() { - err := s.blocker.Client.ClaimLease("name", lease.Request{"holder", time.Second}) + err := s.blocker.Client.ClaimLease("name", corelease.Request{"holder", time.Second}) c.Check(err, jc.ErrorIsNil) }, } @@ -241,8 +241,8 @@ )() // Try to extend, and watch the poor thing collapse in exhaustion. - err := s.sut.Client.ExtendLease("name", lease.Request{"holder", time.Minute}) - c.Check(err, gc.ErrorMatches, "state changing too quickly; try again soon") + err := s.sut.Client.ExtendLease("name", corelease.Request{"holder", time.Minute}) + c.Check(err, gc.ErrorMatches, "cannot satisfy request: state changing too quickly; try again soon") } func (s *ClientTrickyRaceSuite) TestExpireLease_BlockedBy_ExtendLease(c *gc.C) { @@ -250,14 +250,14 @@ // Set up a hook to extend the lease before the expire gets a chance. defer txntesting.SetBeforeHooks(c, s.sut.Runner, func() { s.blocker.Clock.Advance(90 * time.Second) - err := s.blocker.Client.ExtendLease("name", lease.Request{"holder", 30 * time.Second}) + err := s.blocker.Client.ExtendLease("name", corelease.Request{"holder", 30 * time.Second}) c.Check(err, jc.ErrorIsNil) })() // Try to expire; check it aborts. s.sut.Clock.Advance(90 * time.Second) err := s.sut.Client.ExpireLease("name") - c.Check(err, gc.Equals, lease.ErrInvalid) + c.Check(err, gc.Equals, corelease.ErrInvalid) // The SUT has been refreshed, and you can see why the operation was invalid. c.Check("name", s.sut.Expiry(), s.sut.Zero.Add(2*time.Minute)) @@ -275,7 +275,7 @@ // Try to expire; check it aborts. s.sut.Clock.Advance(90 * time.Second) err := s.sut.Client.ExpireLease("name") - c.Check(err, gc.Equals, lease.ErrInvalid) + c.Check(err, gc.Equals, corelease.ErrInvalid) // The SUT has been refreshed, and you can see why the operation was invalid. c.Check("name", s.sut.Holder(), "") @@ -288,14 +288,14 @@ s.blocker.Clock.Advance(90 * time.Second) err := s.blocker.Client.ExpireLease("name") c.Check(err, jc.ErrorIsNil) - err = s.blocker.Client.ClaimLease("name", lease.Request{"holder", time.Minute}) + err = s.blocker.Client.ClaimLease("name", corelease.Request{"holder", time.Minute}) c.Check(err, jc.ErrorIsNil) })() // Try to expire; check it aborts. s.sut.Clock.Advance(90 * time.Second) err := s.sut.Client.ExpireLease("name") - c.Check(err, gc.Equals, lease.ErrInvalid) + c.Check(err, gc.Equals, corelease.ErrInvalid) // The SUT has been refreshed, and you can see why the operation was invalid. c.Check("name", s.sut.Expiry(), s.sut.Zero.Add(150*time.Second)) === modified file 'src/github.com/juju/juju/state/lease/client_remote_test.go' --- src/github.com/juju/juju/state/lease/client_remote_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/lease/client_remote_test.go 2016-03-22 15:18:22 +0000 @@ -9,7 +9,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "github.com/juju/juju/state/lease" + "github.com/juju/juju/core/lease" ) // ClientRemoteSuite checks that clients do not break one another's promises. === modified file 'src/github.com/juju/juju/state/lease/client_validation_test.go' --- src/github.com/juju/juju/state/lease/client_validation_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/lease/client_validation_test.go 2016-03-22 15:18:22 +0000 @@ -8,6 +8,7 @@ gc "gopkg.in/check.v1" + corelease "github.com/juju/juju/core/lease" "github.com/juju/juju/state/lease" ) @@ -55,37 +56,37 @@ func (s *ClientValidationSuite) TestClaimLeaseName(c *gc.C) { fix := s.EasyFixture(c) - err := fix.Client.ClaimLease("$name", lease.Request{"holder", time.Minute}) + err := fix.Client.ClaimLease("$name", corelease.Request{"holder", time.Minute}) c.Check(err, gc.ErrorMatches, "invalid name: string contains forbidden characters") } func (s *ClientValidationSuite) TestClaimLeaseHolder(c *gc.C) { fix := s.EasyFixture(c) - err := fix.Client.ClaimLease("name", lease.Request{"$holder", time.Minute}) + err := fix.Client.ClaimLease("name", corelease.Request{"$holder", time.Minute}) c.Check(err, gc.ErrorMatches, "invalid request: invalid holder: string contains forbidden characters") } func (s *ClientValidationSuite) TestClaimLeaseDuration(c *gc.C) { fix := s.EasyFixture(c) - err := fix.Client.ClaimLease("name", lease.Request{"holder", 0}) + err := fix.Client.ClaimLease("name", corelease.Request{"holder", 0}) c.Check(err, gc.ErrorMatches, "invalid request: invalid duration") } func (s *ClientValidationSuite) TestExtendLeaseName(c *gc.C) { fix := s.EasyFixture(c) - err := fix.Client.ExtendLease("$name", lease.Request{"holder", time.Minute}) + err := fix.Client.ExtendLease("$name", corelease.Request{"holder", time.Minute}) c.Check(err, gc.ErrorMatches, "invalid name: string contains forbidden characters") } func (s *ClientValidationSuite) TestExtendLeaseHolder(c *gc.C) { fix := s.EasyFixture(c) - err := fix.Client.ExtendLease("name", lease.Request{"$holder", time.Minute}) + err := fix.Client.ExtendLease("name", corelease.Request{"$holder", time.Minute}) c.Check(err, gc.ErrorMatches, "invalid request: invalid holder: string contains forbidden characters") } func (s *ClientValidationSuite) TestExtendLeaseDuration(c *gc.C) { fix := s.EasyFixture(c) - err := fix.Client.ExtendLease("name", lease.Request{"holder", 0}) + err := fix.Client.ExtendLease("name", corelease.Request{"holder", 0}) c.Check(err, gc.ErrorMatches, "invalid request: invalid duration") } === modified file 'src/github.com/juju/juju/state/lease/config.go' --- src/github.com/juju/juju/state/lease/config.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/lease/config.go 2016-03-22 15:18:22 +0000 @@ -8,6 +8,7 @@ jujutxn "github.com/juju/txn" "github.com/juju/utils/clock" + "github.com/juju/juju/core/lease" "github.com/juju/juju/mongo" ) @@ -45,15 +46,15 @@ Clock clock.Clock } -// Validate returns an error if the supplied config is not valid. -func (config ClientConfig) Validate() error { - if err := validateString(config.Id); err != nil { +// validate returns an error if the supplied config is not valid. +func (config ClientConfig) validate() error { + if err := lease.ValidateString(config.Id); err != nil { return errors.Annotatef(err, "invalid id") } - if err := validateString(config.Namespace); err != nil { + if err := lease.ValidateString(config.Namespace); err != nil { return errors.Annotatef(err, "invalid namespace") } - if err := validateString(config.Collection); err != nil { + if err := lease.ValidateString(config.Collection); err != nil { return errors.Annotatef(err, "invalid collection") } if config.Mongo == nil { === modified file 'src/github.com/juju/juju/state/lease/fixture_test.go' --- src/github.com/juju/juju/state/lease/fixture_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/lease/fixture_test.go 2016-03-22 15:18:22 +0000 @@ -13,6 +13,7 @@ gc "gopkg.in/check.v1" "gopkg.in/mgo.v2" + corelease "github.com/juju/juju/core/lease" "github.com/juju/juju/state/lease" ) @@ -46,7 +47,7 @@ // Fixture collects together a running client and a bunch of useful data. type Fixture struct { - Client lease.Client + Client corelease.Client Config lease.ClientConfig Runner jujutxn.Runner Clock *Clock @@ -135,9 +136,9 @@ type checkFunc func(params []interface{}, names []string) (bool, string) -type checkInfoFunc func(info lease.Info, param interface{}) (bool, string) +type checkInfoFunc func(info corelease.Info, param interface{}) (bool, string) -func checkHolder(info lease.Info, holder interface{}) (bool, string) { +func checkHolder(info corelease.Info, holder interface{}) (bool, string) { actual := info.Holder expect := holder.(string) if actual == expect { @@ -146,7 +147,7 @@ return false, fmt.Sprintf("lease held by %q; expected %q", actual, expect) } -func checkExpiry(info lease.Info, expiry interface{}) (bool, string) { +func checkExpiry(info corelease.Info, expiry interface{}) (bool, string) { actual := info.Expiry expect := expiry.(time.Time) if actual.Equal(expect) { === removed file 'src/github.com/juju/juju/state/lease/interface.go' --- src/github.com/juju/juju/state/lease/interface.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/lease/interface.go 1970-01-01 00:00:00 +0000 @@ -1,86 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package lease - -import ( - "time" - - "github.com/juju/errors" - "gopkg.in/mgo.v2/txn" -) - -// Client manipulates leases backed by MongoDB. Client implementations are not -// expected to be goroutine-safe. -type Client interface { - - // ClaimLease records the supplied holder's claim to the supplied lease. If - // it succeeds, the claim is guaranteed until at least the supplied duration - // after the call to ClaimLease was initiated. If it returns ErrInvalid, - // check Leases() for updated state. - ClaimLease(lease string, request Request) error - - // ExtendLease records the supplied holder's continued claim to the supplied - // lease, if necessary. If it succeeds, the claim is guaranteed until at - // least the supplied duration after the call to ExtendLease was initiated. - // If it returns ErrInvalid, check Leases() for updated state. - ExtendLease(lease string, request Request) error - - // ExpireLease records the vacation of the supplied lease. It will fail if - // we cannot verify that the lease's writer considers the expiry time to - // have passed. If it returns ErrInvalid, check Leases() for updated state. - ExpireLease(lease string) error - - // Leases returns a recent snapshot of lease state. Expiry times are - // expressed according to the Clock the client was configured with. - Leases() map[string]Info - - // Refresh reads all lease state from the database. - Refresh() error -} - -// Info holds information about a lease. It's MongoDB-specific, because it -// includes information that can be used with the mgo/txn package to gate -// transaction operations on lease state. -type Info struct { - - // Holder is the name of the current leaseholder. - Holder string - - // Expiry is the latest time at which it's possible the lease might still - // be valid. Attempting to expire the lease before this time will fail. - Expiry time.Time - - // AssertOp, if included in a mgo/txn transaction, will gate the transaction - // on the lease remaining held by Holder. If we didn't need this, we could - // easily implement Clients backed by other substrates. - AssertOp txn.Op -} - -// Request describes a lease request. -type Request struct { - - // Holder identifies the lease holder. - Holder string - - // Duration specifies the time for which the lease is required. - Duration time.Duration -} - -// Validate returns an error if any fields are invalid or inconsistent. -func (request Request) Validate() error { - if err := validateString(request.Holder); err != nil { - return errors.Annotatef(err, "invalid holder") - } - if request.Duration <= 0 { - return errors.Errorf("invalid duration") - } - return nil -} - -// ErrInvalid indicates that a client operation failed because latest state -// indicates that it's a logical impossibility. It's a short-range signal to -// calling code only; that code should never pass it on, but should inspect -// the Client's updated Leases() and either attempt a new operation or return -// a new error at a suitable level of abstraction. -var ErrInvalid = errors.New("invalid lease operation") === modified file 'src/github.com/juju/juju/state/lease/schema.go' --- src/github.com/juju/juju/state/lease/schema.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/lease/schema.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,8 @@ "time" "github.com/juju/errors" + + "github.com/juju/juju/core/lease" ) // These constants define the field names and type values used by documents in @@ -45,27 +47,6 @@ return time.Unix(0, v) } -// For simplicity's sake, we impose the same restrictions on all strings used -// with the lease package: they may not be empty, and none of the following -// characters are allowed. -// * '.' and '$' mean things to mongodb; we don't want to risk seeing them -// in key names. -// * '#' means something to the lease package and we don't want to risk -// confusing ourselves. -// * whitespace just seems like a bad idea. -const badCharacters = ".$# \t\r\n" - -// validateString returns an error if the string is not valid. -func validateString(s string) error { - if s == "" { - return errors.New("string is empty") - } - if strings.ContainsAny(s, badCharacters) { - return errors.New("string contains forbidden characters") - } - return nil -} - // leaseDocId returns the _id for the document holding details of the supplied // namespace and lease. func leaseDocId(namespace, lease string) string { @@ -84,11 +65,6 @@ Namespace string `bson:"namespace"` Name string `bson:"name"` - // EnvUUID exists because state.multiEnvRunner can't handle structs - // without `bson:"env-uuid"` fields. It's not necessary for the logic - // in this package, though. - EnvUUID string `bson:"env-uuid"` - // Holder, Expiry, and Writer map directly to entry. Holder string `bson:"holder"` Expiry int64 `bson:"expiry"` @@ -100,18 +76,18 @@ if doc.Type != typeLease { return errors.Errorf("invalid type %q", doc.Type) } - // state.multiEnvRunner prepends environ ids in our documents, and - // state.envStateCollection does not strip them out. + // state.multiModelRunner prepends environ ids in our documents, and + // state.modelStateCollection does not strip them out. if !strings.HasSuffix(doc.Id, leaseDocId(doc.Namespace, doc.Name)) { return errors.Errorf("inconsistent _id") } - if err := validateString(doc.Holder); err != nil { + if err := lease.ValidateString(doc.Holder); err != nil { return errors.Annotatef(err, "invalid holder") } if doc.Expiry == 0 { return errors.Errorf("invalid expiry") } - if err := validateString(doc.Writer); err != nil { + if err := lease.ValidateString(doc.Writer); err != nil { return errors.Annotatef(err, "invalid writer") } return nil @@ -163,12 +139,7 @@ Type string `bson:"type"` Namespace string `bson:"namespace"` - // EnvUUID exists because state.multiEnvRunner can't handle structs - // without `bson:"env-uuid"` fields. It's not necessary for the logic - // in this package, though. - EnvUUID string `bson:"env-uuid"` - - // Writers holds a the latest acknowledged time for every known client. + // Writers holds the latest acknowledged time for every known client. Writers map[string]int64 `bson:"writers"` } @@ -177,8 +148,8 @@ if doc.Type != typeClock { return errors.Errorf("invalid type %q", doc.Type) } - // state.multiEnvRunner prepends environ ids in our documents, and - // state.envStateCollection does not strip them out. + // state.multiModelRunner prepends environ ids in our documents, and + // state.modelStateCollection does not strip them out. if !strings.HasSuffix(doc.Id, clockDocId(doc.Namespace)) { return errors.Errorf("inconsistent _id") } === modified file 'src/github.com/juju/juju/state/life.go' --- src/github.com/juju/juju/state/life.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/life.go 2016-03-22 15:18:22 +0000 @@ -34,6 +34,7 @@ } var isAliveDoc = bson.D{{"life", Alive}} +var isDyingDoc = bson.D{{"life", Dying}} var isDeadDoc = bson.D{{"life", Dead}} var notDeadDoc = bson.D{{"life", bson.D{{"$ne", Dead}}}} === modified file 'src/github.com/juju/juju/state/logs.go' --- src/github.com/juju/juju/state/logs.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/logs.go 2016-03-22 15:18:22 +0000 @@ -30,7 +30,7 @@ // LoggingState describes the methods on State required for logging to // the database. type LoggingState interface { - EnvironUUID() string + ModelUUID() string MongoSession() *mgo.Session } @@ -53,20 +53,20 @@ // space. These documents will be inserted 1000's of times and each // document includes the field names. type logDoc struct { - Id bson.ObjectId `bson:"_id"` - Time time.Time `bson:"t"` - EnvUUID string `bson:"e"` - Entity string `bson:"n"` // e.g. "machine-0" - Module string `bson:"m"` // e.g. "juju.worker.firewaller" - Location string `bson:"l"` // "filename:lineno" - Level loggo.Level `bson:"v"` - Message string `bson:"x"` + Id bson.ObjectId `bson:"_id"` + Time time.Time `bson:"t"` + ModelUUID string `bson:"e"` + Entity string `bson:"n"` // e.g. "machine-0" + Module string `bson:"m"` // e.g. "juju.worker.firewaller" + Location string `bson:"l"` // "filename:lineno" + Level loggo.Level `bson:"v"` + Message string `bson:"x"` } type DbLogger struct { - logsColl *mgo.Collection - envUUID string - entity string + logsColl *mgo.Collection + modelUUID string + entity string } // NewDbLogger returns a DbLogger instance which is used to write logs @@ -74,23 +74,23 @@ func NewDbLogger(st LoggingState, entity names.Tag) *DbLogger { _, logsColl := initLogsSession(st) return &DbLogger{ - logsColl: logsColl, - envUUID: st.EnvironUUID(), - entity: entity.String(), + logsColl: logsColl, + modelUUID: st.ModelUUID(), + entity: entity.String(), } } // Log writes a log message to the database. func (logger *DbLogger) Log(t time.Time, module string, location string, level loggo.Level, msg string) error { return logger.logsColl.Insert(&logDoc{ - Id: bson.NewObjectId(), - Time: t, - EnvUUID: logger.envUUID, - Entity: logger.entity, - Module: module, - Location: location, - Level: level, - Message: msg, + Id: bson.NewObjectId(), + Time: t, + ModelUUID: logger.modelUUID, + Entity: logger.entity, + Module: module, + Location: location, + Level: level, + Message: msg, }) } @@ -140,6 +140,7 @@ StartTime time.Time MinLevel loggo.Level InitialLines int + NoTail bool IncludeEntity []string ExcludeEntity []string IncludeModule []string @@ -161,7 +162,7 @@ // logs collection and tailing the oplog. // // The value was calculated by looking at the per-minute peak log -// output of large broken environments with logging at DEBUG. +// output of large broken models with logging at DEBUG. var maxRecentLogIds = int(oplogOverlap.Minutes() * 150000) // NewLogTailer returns a LogTailer which filters according to the @@ -169,7 +170,7 @@ func NewLogTailer(st LoggingState, params *LogTailerParams) LogTailer { session := st.MongoSession().Copy() t := &logTailer{ - envUUID: st.EnvironUUID(), + modelUUID: st.ModelUUID(), session: session, logsColl: session.DB(logsDB).C(logsC).With(session), params: params, @@ -188,7 +189,7 @@ type logTailer struct { tomb tomb.Tomb - envUUID string + modelUUID string session *mgo.Session logsColl *mgo.Collection params *LogTailerParams @@ -224,6 +225,10 @@ return errors.Trace(err) } + if t.params.NoTail { + return nil + } + err = t.tailOplog() return errors.Trace(err) } @@ -244,7 +249,7 @@ } } - iter := query.Sort("t", "_id").Iter() + iter := query.Sort("t").Iter() doc := new(logDoc) for iter.Next(doc) { select { @@ -315,7 +320,7 @@ func (t *logTailer) paramsToSelector(params *LogTailerParams, prefix string) bson.D { sel := bson.D{ - {"e", t.envUUID}, + {"e", t.modelUUID}, {"t", bson.M{"$gte": params.StartTime}}, } if params.MinLevel > loggo.UNSPECIFIED { @@ -431,24 +436,24 @@ session, logsColl := initLogsSession(st) defer session.Close() - envUUIDs, err := getEnvsInLogs(logsColl) + modelUUIDs, err := getEnvsInLogs(logsColl) if err != nil { return errors.Annotate(err, "failed to get log counts") } pruneCounts := make(map[string]int) - // Remove old log entries (per environment UUID to take advantage + // Remove old log entries (per model UUID to take advantage // of indexes on the logs collection). - for _, envUUID := range envUUIDs { + for _, modelUUID := range modelUUIDs { removeInfo, err := logsColl.RemoveAll(bson.M{ - "e": envUUID, + "e": modelUUID, "t": bson.M{"$lt": minLogTime}, }) if err != nil { return errors.Annotate(err, "failed to prune logs by time") } - pruneCounts[envUUID] = removeInfo.Removed + pruneCounts[modelUUID] = removeInfo.Removed } // Do further pruning if the logs collection is over the maximum size. @@ -461,7 +466,7 @@ break } - envUUID, count, err := findEnvWithMostLogs(logsColl, envUUIDs) + modelUUID, count, err := findEnvWithMostLogs(logsColl, modelUUIDs) if err != nil { return errors.Annotate(err, "log count query failed") } @@ -469,14 +474,14 @@ break // Pruning is not worthwhile } - // Remove the oldest 1% of log records for the environment. + // Remove the oldest 1% of log records for the model. toRemove := int(float64(count) * 0.01) // Find the threshold timestammp to start removing from. // NOTE: this assumes that there are no more logs being added // for the time range being pruned (which should be true for // any realistic minimum log collection size). - tsQuery := logsColl.Find(bson.M{"e": envUUID}).Sort("t") + tsQuery := logsColl.Find(bson.M{"e": modelUUID}).Sort("t") tsQuery = tsQuery.Skip(toRemove) tsQuery = tsQuery.Select(bson.M{"t": 1}) var doc bson.M @@ -488,18 +493,18 @@ // Remove old records. removeInfo, err := logsColl.RemoveAll(bson.M{ - "e": envUUID, + "e": modelUUID, "t": bson.M{"$lt": thresholdTs}, }) if err != nil { return errors.Annotate(err, "log pruning failed") } - pruneCounts[envUUID] += removeInfo.Removed + pruneCounts[modelUUID] += removeInfo.Removed } - for envUUID, count := range pruneCounts { + for modelUUID, count := range pruneCounts { if count > 0 { - logger.Debugf("pruned %d logs for environment %s", count, envUUID) + logger.Debugf("pruned %d logs for model %s", count, modelUUID) } } return nil @@ -534,40 +539,40 @@ return result["size"].(int), nil } -// getEnvsInLogs returns the unique envrionment UUIDs that exist in +// getEnvsInLogs returns the unique model UUIDs that exist in // the logs collection. This uses the one of the indexes on the // collection and should be fast. func getEnvsInLogs(coll *mgo.Collection) ([]string, error) { - var envUUIDs []string - err := coll.Find(nil).Distinct("e", &envUUIDs) + var modelUUIDs []string + err := coll.Find(nil).Distinct("e", &modelUUIDs) if err != nil { return nil, errors.Trace(err) } - return envUUIDs, nil + return modelUUIDs, nil } -// findEnvWithMostLogs returns the envUUID and log count for the -// environment with the most logs in the logs collection. -func findEnvWithMostLogs(logsColl *mgo.Collection, envUUIDs []string) (string, int, error) { - var maxEnvUUID string +// findEnvWithMostLogs returns the modelUUID and log count for the +// model with the most logs in the logs collection. +func findEnvWithMostLogs(logsColl *mgo.Collection, modelUUIDs []string) (string, int, error) { + var maxModelUUID string var maxCount int - for _, envUUID := range envUUIDs { - count, err := getLogCountForEnv(logsColl, envUUID) + for _, modelUUID := range modelUUIDs { + count, err := getLogCountForEnv(logsColl, modelUUID) if err != nil { return "", -1, errors.Trace(err) } if count > maxCount { - maxEnvUUID = envUUID + maxModelUUID = modelUUID maxCount = count } } - return maxEnvUUID, maxCount, nil + return maxModelUUID, maxCount, nil } // getLogCountForEnv returns the number of log records stored for a -// given environment. -func getLogCountForEnv(coll *mgo.Collection, envUUID string) (int, error) { - count, err := coll.Find(bson.M{"e": envUUID}).Count() +// given model. +func getLogCountForEnv(coll *mgo.Collection, modelUUID string) (int, error) { + count, err := coll.Find(bson.M{"e": modelUUID}).Count() if err != nil { return -1, errors.Annotate(err, "failed to get log count") } === modified file 'src/github.com/juju/juju/state/logs_test.go' --- src/github.com/juju/juju/state/logs_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/logs_test.go 2016-03-22 15:18:22 +0000 @@ -4,6 +4,7 @@ package state_test import ( + "math/rand" "strconv" "strings" "time" @@ -43,8 +44,8 @@ } c.Assert(keys, jc.SameContents, []string{ "_id", // default index - "e-t", // env-uuid and timestamp - "e-n", // env-uuid and entity + "e-t", // model-uuid and timestamp + "e-n", // model-uuid and entity }) } @@ -62,7 +63,7 @@ c.Assert(docs, gc.HasLen, 2) c.Assert(docs[0]["t"], gc.Equals, t0) - c.Assert(docs[0]["e"], gc.Equals, s.State.EnvironUUID()) + c.Assert(docs[0]["e"], gc.Equals, s.State.ModelUUID()) c.Assert(docs[0]["n"], gc.Equals, "machine-22") c.Assert(docs[0]["m"], gc.Equals, "some.where") c.Assert(docs[0]["l"], gc.Equals, "foo.go:99") @@ -70,7 +71,7 @@ c.Assert(docs[0]["x"], gc.Equals, "all is well") c.Assert(docs[1]["t"], gc.Equals, t1) - c.Assert(docs[1]["e"], gc.Equals, s.State.EnvironUUID()) + c.Assert(docs[1]["e"], gc.Equals, s.State.ModelUUID()) c.Assert(docs[1]["n"], gc.Equals, "machine-22") c.Assert(docs[1]["m"], gc.Equals, "else.where") c.Assert(docs[1]["l"], gc.Equals, "bar.go:42") @@ -109,7 +110,7 @@ } func (s *LogsSuite) TestPruneLogsBySize(c *gc.C) { - // Set up 3 environments and generate different amounts of logs + // Set up 3 models and generate different amounts of logs // for them. now := time.Now().Truncate(time.Millisecond) @@ -117,12 +118,12 @@ startingLogsS0 := 10 s.generateLogs(c, s0, now, startingLogsS0) - s1 := s.Factory.MakeEnvironment(c, nil) + s1 := s.Factory.MakeModel(c, nil) defer s1.Close() startingLogsS1 := 10000 s.generateLogs(c, s1, now, startingLogsS1) - s2 := s.Factory.MakeEnvironment(c, nil) + s2 := s.Factory.MakeModel(c, nil) defer s2.Close() startingLogsS2 := 12000 s.generateLogs(c, s2, now, startingLogsS2) @@ -145,7 +146,7 @@ // Ensure that the latest log records are still there. assertLatestTs := func(st *state.State) { var doc bson.M - err := s.logsColl.Find(bson.M{"e": st.EnvironUUID()}).Sort("-t").One(&doc) + err := s.logsColl.Find(bson.M{"e": st.ModelUUID()}).Sort("-t").One(&doc) c.Assert(err, jc.ErrorIsNil) c.Assert(doc["t"].(time.Time), gc.Equals, now) } @@ -165,7 +166,7 @@ } func (s *LogsSuite) countLogs(c *gc.C, st *state.State) int { - count, err := s.logsColl.Find(bson.M{"e": st.EnvironUUID()}).Count() + count, err := s.logsColl.Find(bson.M{"e": st.ModelUUID()}).Count() c.Assert(err, jc.ErrorIsNil) return count } @@ -250,12 +251,12 @@ good := logTemplate{Message: "good"} writeLogs := func() { s.writeLogs(c, 1, logTemplate{ - EnvUUID: "someuuid0", - Message: "bad", + ModelUUID: "someuuid0", + Message: "bad", }) s.writeLogs(c, 1, logTemplate{ - EnvUUID: "someuuid1", - Message: "bad", + ModelUUID: "someuuid1", + Message: "bad", }) s.writeLogs(c, 1, good) } @@ -314,6 +315,41 @@ s.assertTailer(c, tailer, 2, expected) } +func (s *LogTailerSuite) TestNoTail(c *gc.C) { + expected := logTemplate{Message: "want"} + s.writeLogs(c, 2, expected) + + // Write a log entry that's only in the oplog. + doc := s.logTemplateToDoc(logTemplate{Message: "dont want"}, time.Now()) + err := s.writeLogToOplog(doc) + c.Assert(err, jc.ErrorIsNil) + + tailer := state.NewLogTailer(s.State, &state.LogTailerParams{ + NoTail: true, + }) + // Not strictly necessary, just in case NoTail doesn't work in the test. + defer tailer.Stop() + + // Logs only in the oplog shouldn't be reported and the tailer + // should stop itself once the log collection has been read. + s.assertTailer(c, tailer, 2, expected) + select { + case _, ok := <-tailer.Logs(): + if ok { + c.Fatal("shouldn't be any further logs") + } + case <-time.After(coretesting.LongWait): + c.Fatal("timed out waiting for logs channel to close") + } + + select { + case <-tailer.Dying(): + // Success. + case <-time.After(coretesting.LongWait): + c.Fatal("tailer didn't stop itself") + } +} + func (s *LogTailerSuite) TestIncludeEntity(c *gc.C) { machine0 := logTemplate{Entity: names.NewMachineTag("0")} foo0 := logTemplate{Entity: names.NewUnitTag("foo/0")} @@ -493,12 +529,12 @@ } type logTemplate struct { - EnvUUID string - Entity names.Tag - Module string - Location string - Level loggo.Level - Message string + ModelUUID string + Entity names.Tag + Module string + Location string + Level loggo.Level + Message string } // writeLogs creates count log messages at the current time using @@ -513,29 +549,33 @@ // endTime using the supplied template. As well as writing to the logs // collection, entries are also made into the fake oplog collection. func (s *LogTailerSuite) writeLogsT(c *gc.C, startTime, endTime time.Time, count int, lt logTemplate) { - s.normaliseLogTemplate(<) - interval := endTime.Sub(startTime) / time.Duration(count) t := startTime for i := 0; i < count; i++ { - err := state.WriteLogWithOplog( - s.oplogColl, - lt.EnvUUID, - lt.Entity, - t, - lt.Module, - lt.Location, - lt.Level, - lt.Message, - ) + doc := s.logTemplateToDoc(lt, t) + err := s.writeLogToOplog(doc) + c.Assert(err, jc.ErrorIsNil) + err = s.logsColl.Insert(doc) c.Assert(err, jc.ErrorIsNil) t = t.Add(interval) } } +// writeLogToOplog writes out a log record to the a (probably fake) +// oplog collection. +func (s *LogTailerSuite) writeLogToOplog(doc interface{}) error { + return s.oplogColl.Insert(bson.D{ + {"ts", bson.MongoTimestamp(time.Now().Unix() << 32)}, // an approximation which will do + {"h", rand.Int63()}, // again, a suitable fake + {"op", "i"}, // this will always be an insert + {"ns", "logs.logs"}, + {"o", doc}, + }) +} + func (s *LogTailerSuite) normaliseLogTemplate(lt *logTemplate) { - if lt.EnvUUID == "" { - lt.EnvUUID = s.State.EnvironUUID() + if lt.ModelUUID == "" { + lt.ModelUUID = s.State.ModelUUID() } if lt.Entity == nil { lt.Entity = names.NewMachineTag("0") @@ -554,6 +594,19 @@ } } +func (s *LogTailerSuite) logTemplateToDoc(lt logTemplate, t time.Time) interface{} { + s.normaliseLogTemplate(<) + return state.MakeLogDoc( + lt.ModelUUID, + lt.Entity, + t, + lt.Module, + lt.Location, + lt.Level, + lt.Message, + ) +} + func (s *LogTailerSuite) assertTailer(c *gc.C, tailer state.LogTailer, expectedCount int, lt logTemplate) { s.normaliseLogTemplate(<) === modified file 'src/github.com/juju/juju/state/machine.go' --- src/github.com/juju/juju/state/machine.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/machine.go 2016-03-22 15:18:22 +0000 @@ -42,27 +42,28 @@ const ( _ MachineJob = iota JobHostUnits - JobManageEnviron + JobManageModel JobManageNetworking - - // Deprecated in 1.18. - JobManageStateDeprecated -) - -var jobNames = map[MachineJob]multiwatcher.MachineJob{ - JobHostUnits: multiwatcher.JobHostUnits, - JobManageEnviron: multiwatcher.JobManageEnviron, - JobManageNetworking: multiwatcher.JobManageNetworking, - - // Deprecated in 1.18. - JobManageStateDeprecated: multiwatcher.JobManageStateDeprecated, -} +) + +var ( + jobNames = map[MachineJob]multiwatcher.MachineJob{ + JobHostUnits: multiwatcher.JobHostUnits, + JobManageModel: multiwatcher.JobManageModel, + JobManageNetworking: multiwatcher.JobManageNetworking, + } + jobMigrationValue = map[MachineJob]string{ + JobHostUnits: "host-units", + JobManageModel: "api-server", + JobManageNetworking: "manage-networking", + } +) // AllJobs returns all supported machine jobs. func AllJobs() []MachineJob { return []MachineJob{ JobHostUnits, - JobManageEnviron, + JobManageModel, JobManageNetworking, } } @@ -84,6 +85,15 @@ return jujuJobs } +// MigrationValue converts the state job into a useful human readable +// string for model migration. +func (job MachineJob) MigrationValue() string { + if value, ok := jobMigrationValue[job]; ok { + return value + } + return "unknown" +} + func (job MachineJob) String() string { return string(job.ToParams()) } @@ -97,7 +107,7 @@ type machineDoc struct { DocID string `bson:"_id"` Id string `bson:"machineid"` - EnvUUID string `bson:"env-uuid"` + ModelUUID string `bson:"model-uuid"` Nonce string Series string ContainerType string @@ -142,6 +152,10 @@ // Placement is the placement directive that should be used when provisioning // an instance for the machine. Placement string `bson:",omitempty"` + + // StopMongoUntilVersion holds the version that must be checked to + // know if mongo must be stopped. + StopMongoUntilVersion string `bson:",omitempty"` } func newMachine(st *State, doc *machineDoc) *Machine { @@ -153,7 +167,7 @@ } func wantsVote(jobs []MachineJob, noVote bool) bool { - return hasJob(jobs, JobManageEnviron) && !noVote + return hasJob(jobs, JobManageModel) && !noVote } // Id returns the machine id. @@ -191,7 +205,7 @@ DocID string `bson:"_id"` MachineId string `bson:"machineid"` InstanceId instance.Id `bson:"instanceid"` - EnvUUID string `bson:"env-uuid"` + ModelUUID string `bson:"model-uuid"` Status string `bson:"status,omitempty"` Arch *string `bson:"arch,omitempty"` Mem *uint64 `bson:"mem,omitempty"` @@ -262,7 +276,7 @@ return m.doc.Jobs } -// WantsVote reports whether the machine is a state server +// WantsVote reports whether the machine is a controller // that wants to take part in peer voting. func (m *Machine) WantsVote() bool { return wantsVote(m.doc.Jobs, m.doc.NoVote) @@ -291,9 +305,31 @@ return nil } -// IsManager returns true if the machine has JobManageEnviron. +// SetStopMongoUntilVersion sets a version that is to be checked against +// the agent config before deciding if mongo must be started on a +// state server. +func (m *Machine) SetStopMongoUntilVersion(v mongo.Version) error { + ops := []txn.Op{{ + C: machinesC, + Id: m.doc.DocID, + Update: bson.D{{"$set", bson.D{{"stopmongountilversion", v.String()}}}}, + }} + if err := m.st.runTransaction(ops); err != nil { + return fmt.Errorf("cannot set StopMongoUntilVersion %v: %v", m, onAbort(err, ErrDead)) + } + m.doc.StopMongoUntilVersion = v.String() + return nil +} + +// StopMongoUntilVersion returns the current minimum version that +// is required for this machine to have mongo running. +func (m *Machine) StopMongoUntilVersion() (mongo.Version, error) { + return mongo.NewVersion(m.doc.StopMongoUntilVersion) +} + +// IsManager returns true if the machine has JobManageModel. func (m *Machine) IsManager() bool { - return hasJob(m.doc.Jobs, JobManageEnviron) + return hasJob(m.doc.Jobs, JobManageModel) } // IsManual returns true if the machine was manually provisioned. @@ -308,7 +344,7 @@ // case we need to check if its provider type is "manual". // We also check for "null", which is an alias for manual. if m.doc.Id == "0" { - cfg, err := m.st.EnvironConfig() + cfg, err := m.st.ModelConfig() if err != nil { return false, err } @@ -363,11 +399,11 @@ } // SetMongoPassword sets the password the agent responsible for the machine -// should use to communicate with the state servers. Previous passwords +// should use to communicate with the controllers. Previous passwords // are invalidated. func (m *Machine) SetMongoPassword(password string) error { if !m.IsManager() { - return errors.NotSupportedf("setting mongo password for non-state server machine %v", m) + return errors.NotSupportedf("setting mongo password for non-controller machine %v", m) } return mongo.SetAdminMongoPassword(m.st.session, m.Tag().String(), password) } @@ -431,7 +467,7 @@ // Destroy sets the machine lifecycle to Dying if it is Alive. It does // nothing otherwise. Destroy will fail if the machine has principal -// units assigned, or if the machine has JobManageEnviron. +// units assigned, or if the machine has JobManageModel. // If the machine has assigned units, Destroy will return // a HasAssignedUnitsError. func (m *Machine) Destroy() error { @@ -441,22 +477,33 @@ // ForceDestroy queues the machine for complete removal, including the // destruction of all units and containers on the machine. func (m *Machine) ForceDestroy() error { - if !m.IsManager() { - ops := []txn.Op{{ - C: machinesC, - Id: m.doc.DocID, - Assert: bson.D{{"jobs", bson.D{{"$nin", []MachineJob{JobManageEnviron}}}}}, - }, m.st.newCleanupOp(cleanupForceDestroyedMachine, m.doc.Id)} - if err := m.st.runTransaction(ops); err != txn.ErrAborted { - return err - } - } - return fmt.Errorf("machine %s is required by the environment", m.doc.Id) + ops, err := m.forceDestroyOps() + if err != nil { + return errors.Trace(err) + } + if err := m.st.runTransaction(ops); err != txn.ErrAborted { + return errors.Trace(err) + } + return nil +} + +var managerMachineError = errors.New("machine is required by the model") + +func (m *Machine) forceDestroyOps() ([]txn.Op, error) { + if m.IsManager() { + return nil, errors.Trace(managerMachineError) + } + + return []txn.Op{{ + C: machinesC, + Id: m.doc.DocID, + Assert: bson.D{{"jobs", bson.D{{"$nin", []MachineJob{JobManageModel}}}}}, + }, m.st.newCleanupOp(cleanupForceDestroyedMachine, m.doc.Id)}, nil } // EnsureDead sets the machine lifecycle to Dead if it is Alive or Dying. // It does nothing otherwise. EnsureDead will fail if the machine has -// principal units assigned, or if the machine has JobManageEnviron. +// principal units assigned, or if the machine has JobManageModel. // If the machine has assigned units, EnsureDead will return // a HasAssignedUnitsError. func (m *Machine) EnsureDead() error { @@ -593,7 +640,7 @@ // one intended to determine the cause of failure of the preceding attempt. buildTxn := func(attempt int) ([]txn.Op, error) { advanceAsserts := bson.D{ - {"jobs", bson.D{{"$nin", []MachineJob{JobManageEnviron}}}}, + {"jobs", bson.D{{"$nin", []MachineJob{JobManageModel}}}}, {"hasvote", bson.D{{"$ne", true}}}, } // Grab a fresh copy of the machine data. @@ -625,11 +672,11 @@ } // Check that the machine does not have any responsibilities that // prevent a lifecycle change. - if hasJob(m.doc.Jobs, JobManageEnviron) { - // (NOTE: When we enable multiple JobManageEnviron machines, + if hasJob(m.doc.Jobs, JobManageModel) { + // (NOTE: When we enable multiple JobManageModel machines, // this restriction will be lifted, but we will assert that the // machine is not voting) - return nil, fmt.Errorf("machine %s is required by the environment", m.doc.Id) + return nil, fmt.Errorf("machine %s is required by the model", m.doc.Id) } if m.doc.HasVote { return nil, fmt.Errorf("machine %s is a voting replica set member", m.doc.Id) @@ -926,7 +973,7 @@ // It returns the started pinger. func (m *Machine) SetAgentPresence() (*presence.Pinger, error) { presenceCollection := m.st.getPresence() - p := presence.NewPinger(presenceCollection, m.st.environTag, m.globalKey()) + p := presence.NewPinger(presenceCollection, m.st.modelTag, m.globalKey()) err := p.Start() if err != nil { return nil, err @@ -934,10 +981,10 @@ // We preform a manual sync here so that the // presence pinger has the most up-to-date information when it // starts. This ensures that commands run immediately after bootstrap - // like status or ensure-availability will have an accurate values + // like status or enable-ha will have an accurate values // for agent-state. // - // TODO: Does not work for multiple state servers. Trigger a sync across all state servers. + // TODO: Does not work for multiple controllers. Trigger a sync across all controllers. if m.IsManager() { m.st.pwatcher.Sync() } @@ -1055,7 +1102,7 @@ DocID: m.doc.DocID, MachineId: m.doc.Id, InstanceId: id, - EnvUUID: m.doc.EnvUUID, + ModelUUID: m.doc.ModelUUID, Arch: characteristics.Arch, Mem: characteristics.Mem, RootDisk: characteristics.RootDisk, @@ -1366,7 +1413,7 @@ } // Update addresses now. - envConfig, err := m.st.EnvironConfig() + envConfig, err := m.st.ModelConfig() if err != nil { return err } @@ -1501,9 +1548,9 @@ if args.InterfaceName == "" { return nil, fmt.Errorf("interface name must be not empty") } - doc := newNetworkInterfaceDoc(m.doc.Id, m.st.EnvironUUID(), args) + doc := newNetworkInterfaceDoc(m.doc.Id, m.st.ModelUUID(), args) ops := []txn.Op{ - assertEnvAliveOp(m.st.EnvironUUID()), + assertModelAliveOp(m.st.ModelUUID()), { C: networksC, Id: m.st.docID(args.NetworkName), === modified file 'src/github.com/juju/juju/state/machine_test.go' --- src/github.com/juju/juju/state/machine_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/machine_test.go 2016-03-22 15:18:22 +0000 @@ -39,14 +39,14 @@ func (s *MachineSuite) SetUpTest(c *gc.C) { s.ConnSuite.SetUpTest(c) - s.policy.GetConstraintsValidator = func(*config.Config) (constraints.Validator, error) { + s.policy.GetConstraintsValidator = func(*config.Config, state.SupportedArchitecturesQuerier) (constraints.Validator, error) { validator := constraints.NewValidator() validator.RegisterConflicts([]string{constraints.InstanceType}, []string{constraints.Mem}) validator.RegisterUnsupported([]string{constraints.CpuPower}) return validator, nil } var err error - s.machine0, err = s.State.AddMachine("quantal", state.JobManageEnviron) + s.machine0, err = s.State.AddMachine("quantal", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) s.machine, err = s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) @@ -186,7 +186,7 @@ } func (s *MachineSuite) TestMachineIsManualBootstrap(c *gc.C) { - cfg, err := s.State.EnvironConfig() + cfg, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) c.Assert(cfg.Type(), gc.Not(gc.Equals), "null") c.Assert(s.machine.Id(), gc.Equals, "1") @@ -194,7 +194,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(manual, jc.IsFalse) attrs := map[string]interface{}{"type": "null"} - err = s.State.UpdateEnvironConfig(attrs, nil, nil) + err = s.State.UpdateModelConfig(attrs, nil, nil) c.Assert(err, jc.ErrorIsNil) manual, err = s.machine0.IsManual() c.Assert(err, jc.ErrorIsNil) @@ -241,15 +241,15 @@ c.Assert(container.IsContainer(), jc.IsTrue) } -func (s *MachineSuite) TestLifeJobManageEnviron(c *gc.C) { - // A JobManageEnviron machine must never advance lifecycle. +func (s *MachineSuite) TestLifeJobManageModel(c *gc.C) { + // A JobManageModel machine must never advance lifecycle. m := s.machine0 err := m.Destroy() - c.Assert(err, gc.ErrorMatches, "machine 0 is required by the environment") + c.Assert(err, gc.ErrorMatches, "machine 0 is required by the model") err = m.ForceDestroy() - c.Assert(err, gc.ErrorMatches, "machine 0 is required by the environment") + c.Assert(err, gc.ErrorMatches, "machine is required by the model") err = m.EnsureDead() - c.Assert(err, gc.ErrorMatches, "machine 0 is required by the environment") + c.Assert(err, gc.ErrorMatches, "machine 0 is required by the model") } func (s *MachineSuite) TestLifeMachineWithContainer(c *gc.C) { @@ -327,6 +327,28 @@ c.Assert(err, jc.Satisfies, errors.IsNotFound) } +func (s *MachineSuite) TestDestroyOps(c *gc.C) { + m := s.Factory.MakeMachine(c, nil) + ops, err := state.ForceDestroyMachineOps(m) + c.Assert(err, jc.ErrorIsNil) + c.Assert(ops, gc.NotNil) +} + +func (s *MachineSuite) TestDestroyOpsForManagerFails(c *gc.C) { + // s.Factory does not allow us to make a manager machine, so we grab one + // from State ... + machines, err := s.State.AllMachines() + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(machines), jc.GreaterThan, 0) + m := machines[0] + c.Assert(m.IsManager(), jc.IsTrue) + + // ... and assert that we cannot get the destroy ops for it. + ops, err := state.ForceDestroyMachineOps(m) + c.Assert(err, jc.Satisfies, state.IsManagerMachineError) + c.Assert(ops, gc.IsNil) +} + func (s *MachineSuite) TestDestroyAbort(c *gc.C) { defer state.SetBeforeHooks(c, s.State, func() { c.Assert(s.machine.Destroy(), gc.IsNil) @@ -592,7 +614,7 @@ func (s *MachineSuite) TestSetMongoPassword(c *gc.C) { info := testing.NewMongoInfo() - st, err := state.Open(s.envTag, info, testing.NewDialOpts(), state.Policy(nil)) + st, err := state.Open(s.modelTag, info, testing.NewDialOpts(), state.Policy(nil)) c.Assert(err, jc.ErrorIsNil) defer func() { // Remove the admin password so that the test harness can reset the state. @@ -617,13 +639,13 @@ // Check that we cannot log in with the wrong password. info.Tag = ent.Tag() info.Password = "bar" - err = tryOpenState(s.envTag, info) + err = tryOpenState(s.modelTag, info) c.Check(errors.Cause(err), jc.Satisfies, errors.IsUnauthorized) c.Check(err, gc.ErrorMatches, `cannot log in to admin database as "machine-0": unauthorized mongo access: .*`) // Check that we can log in with the correct password. info.Password = "foo" - st1, err := state.Open(s.envTag, info, testing.NewDialOpts(), state.Policy(nil)) + st1, err := state.Open(s.modelTag, info, testing.NewDialOpts(), state.Policy(nil)) c.Assert(err, jc.ErrorIsNil) defer st1.Close() @@ -636,18 +658,18 @@ // Check that we cannot log in with the old password. info.Password = "foo" - err = tryOpenState(s.envTag, info) + err = tryOpenState(s.modelTag, info) c.Check(errors.Cause(err), jc.Satisfies, errors.IsUnauthorized) c.Check(err, gc.ErrorMatches, `cannot log in to admin database as "machine-0": unauthorized mongo access: .*`) // Check that we can log in with the correct password. info.Password = "bar" - err = tryOpenState(s.envTag, info) + err = tryOpenState(s.modelTag, info) c.Assert(err, jc.ErrorIsNil) // Check that the administrator can still log in. info.Tag, info.Password = nil, "admin-secret" - err = tryOpenState(s.envTag, info) + err = tryOpenState(s.modelTag, info) c.Assert(err, jc.ErrorIsNil) } @@ -657,7 +679,7 @@ }) } -func (s *MachineSuite) TestSetPasswordPreEnvUUID(c *gc.C) { +func (s *MachineSuite) TestSetPasswordPreModelUUID(c *gc.C) { // Ensure that SetPassword works for machines even when the env // UUID upgrade migrations haven't run yet. type oldMachineDoc struct { @@ -1369,9 +1391,9 @@ // Machine.WatchHardwareCharacteristics // Service.Watch // Unit.Watch - // State.WatchForEnvironConfigChanges + // State.WatchForModelConfigChanges // Unit.WatchConfigSettings - testWatcherDiesWhenStateCloses(c, s.envTag, func(c *gc.C, st *state.State) waiter { + testWatcherDiesWhenStateCloses(c, s.modelTag, func(c *gc.C, st *state.State) waiter { m, err := st.Machine(s.machine.Id()) c.Assert(err, jc.ErrorIsNil) w := m.Watch() @@ -1381,7 +1403,7 @@ } func (s *MachineSuite) TestWatchPrincipalUnits(c *gc.C) { - // TODO(mjs) - ENVUUID - test with multiple environments with + // TODO(mjs) - MODELUUID - test with multiple models with // identically named units and ensure there's no leakage. // Start a watch on an empty machine; check no units reported. @@ -1479,7 +1501,7 @@ func (s *MachineSuite) TestWatchPrincipalUnitsDiesOnStateClose(c *gc.C) { // This test is testing logic in watcher.unitsWatcher, which // is also used by Unit.WatchSubordinateUnits. - testWatcherDiesWhenStateCloses(c, s.envTag, func(c *gc.C, st *state.State) waiter { + testWatcherDiesWhenStateCloses(c, s.modelTag, func(c *gc.C, st *state.State) waiter { m, err := st.Machine(s.machine.Id()) c.Assert(err, jc.ErrorIsNil) w := m.WatchPrincipalUnits() @@ -1583,7 +1605,7 @@ } func (s *MachineSuite) TestWatchUnitsDiesOnStateClose(c *gc.C) { - testWatcherDiesWhenStateCloses(c, s.envTag, func(c *gc.C, st *state.State) waiter { + testWatcherDiesWhenStateCloses(c, s.modelTag, func(c *gc.C, st *state.State) waiter { m, err := st.Machine(s.machine.Id()) c.Assert(err, jc.ErrorIsNil) w := m.WatchUnits() @@ -1592,12 +1614,12 @@ }) } -func (s *MachineSuite) TestConstraintsFromEnvironment(c *gc.C) { +func (s *MachineSuite) TestConstraintsFromModel(c *gc.C) { econs1 := constraints.MustParse("mem=1G") econs2 := constraints.MustParse("mem=2G") - // A newly-created machine gets a copy of the environment constraints. - err := s.State.SetEnvironConstraints(econs1) + // A newly-created machine gets a copy of the model constraints. + err := s.State.SetModelConstraints(econs1) c.Assert(err, jc.ErrorIsNil) machine1, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) @@ -1605,8 +1627,8 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(mcons1, gc.DeepEquals, econs1) - // Change environment constraints and add a new machine. - err = s.State.SetEnvironConstraints(econs2) + // Change model constraints and add a new machine. + err = s.State.SetModelConstraints(econs2) c.Assert(err, jc.ErrorIsNil) machine2, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) @@ -1877,7 +1899,7 @@ // Now simulate prefer-ipv6: true c.Assert( - s.State.UpdateEnvironConfig( + s.State.UpdateModelConfig( map[string]interface{}{"prefer-ipv6": true}, nil, nil, ), @@ -2515,148 +2537,6 @@ } } -func (s *MachineSuite) TestWatchInterfaces(c *gc.C) { - // Provision the machine. - networks := []state.NetworkInfo{{ - Name: "net1", - ProviderId: "net1", - CIDR: "0.1.2.0/24", - VLANTag: 0, - }, { - Name: "vlan42", - ProviderId: "vlan42", - CIDR: "0.2.2.0/24", - VLANTag: 42, - }} - interfaces := []state.NetworkInterfaceInfo{{ - MACAddress: "aa:bb:cc:dd:ee:f0", - InterfaceName: "eth0", - NetworkName: "net1", - IsVirtual: false, - }, { - MACAddress: "aa:bb:cc:dd:ee:f1", - InterfaceName: "eth1", - NetworkName: "net1", - IsVirtual: false, - Disabled: true, - }, { - MACAddress: "aa:bb:cc:dd:ee:f1", - InterfaceName: "eth1.42", - NetworkName: "vlan42", - IsVirtual: true, - }} - err := s.machine.SetInstanceInfo("umbrella/0", "fake_nonce", nil, networks, interfaces, nil, nil) - c.Assert(err, jc.ErrorIsNil) - - // Read dynamically generated document Ids. - ifaces, err := s.machine.NetworkInterfaces() - c.Assert(err, jc.ErrorIsNil) - c.Assert(ifaces, gc.HasLen, 3) - - // Start network interface watcher. - w := s.machine.WatchInterfaces() - defer testing.AssertStop(c, w) - wc := testing.NewNotifyWatcherC(c, s.State, w) - wc.AssertOneChange() - - // Disable the first interface. - err = ifaces[0].Disable() - c.Assert(err, jc.ErrorIsNil) - wc.AssertOneChange() - - // Disable the first interface again, should not report. - err = ifaces[0].Disable() - c.Assert(err, jc.ErrorIsNil) - wc.AssertNoChange() - - // Enable the second interface, should report, because it was initially disabled. - err = ifaces[1].Enable() - c.Assert(err, jc.ErrorIsNil) - wc.AssertOneChange() - - // Disable two interfaces at once, check that both are reported. - err = ifaces[1].Disable() - c.Assert(err, jc.ErrorIsNil) - err = ifaces[2].Disable() - c.Assert(err, jc.ErrorIsNil) - wc.AssertOneChange() - - // Enable the first interface. - err = ifaces[0].Enable() - c.Assert(err, jc.ErrorIsNil) - wc.AssertOneChange() - - // Enable the first interface again, should not report. - err = ifaces[0].Enable() - c.Assert(err, jc.ErrorIsNil) - wc.AssertNoChange() - - // Remove the network interface. - err = ifaces[0].Remove() - c.Assert(err, jc.ErrorIsNil) - wc.AssertOneChange() - - // Add the new interface. - _, _ = addNetworkAndInterface( - c, s.State, s.machine, - "net2", "net2", "0.5.2.0/24", 0, false, - "aa:bb:cc:dd:ee:f2", "eth2") - wc.AssertOneChange() - - // Provision another machine, should not report - machine2, err := s.State.AddMachine("quantal", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) - interfaces2 := []state.NetworkInterfaceInfo{{ - MACAddress: "aa:bb:cc:dd:ee:e0", - InterfaceName: "eth0", - NetworkName: "net1", - IsVirtual: false, - }, { - MACAddress: "aa:bb:cc:dd:ee:e1", - InterfaceName: "eth1", - NetworkName: "net1", - IsVirtual: false, - }, { - MACAddress: "aa:bb:cc:dd:ee:e1", - InterfaceName: "eth1.42", - NetworkName: "vlan42", - IsVirtual: true, - }} - err = machine2.SetInstanceInfo("m-too", "fake_nonce", nil, networks, interfaces2, nil, nil) - c.Assert(err, jc.ErrorIsNil) - c.Assert(ifaces, gc.HasLen, 3) - wc.AssertNoChange() - - // Read dynamically generated document Ids. - ifaces2, err := machine2.NetworkInterfaces() - c.Assert(err, jc.ErrorIsNil) - c.Assert(ifaces2, gc.HasLen, 3) - - // Disable the first interface on the second machine, should not report. - err = ifaces2[0].Disable() - c.Assert(err, jc.ErrorIsNil) - wc.AssertNoChange() - - // Remove the network interface on the second machine, should not report. - err = ifaces2[0].Remove() - c.Assert(err, jc.ErrorIsNil) - wc.AssertNoChange() - - // Stop watcher; check Changes chan closed. - testing.AssertStop(c, w) - wc.AssertClosed() -} - -func (s *MachineSuite) TestWatchInterfacesDiesOnStateClose(c *gc.C) { - testWatcherDiesWhenStateCloses(c, s.envTag, func(c *gc.C, st *state.State) waiter { - m, err := st.Machine(s.machine.Id()) - c.Assert(err, jc.ErrorIsNil) - w := m.WatchInterfaces() - <-w.Changes() - return w - }) -} - func (s *MachineSuite) TestMachineAgentTools(c *gc.C) { m, err := s.State.AddMachine("quantal", state.JobHostUnits) c.Assert(err, jc.ErrorIsNil) === modified file 'src/github.com/juju/juju/state/meterstatus.go' --- src/github.com/juju/juju/state/meterstatus.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/meterstatus.go 2016-03-22 15:18:22 +0000 @@ -73,10 +73,10 @@ ) type meterStatusDoc struct { - DocID string `bson:"_id"` - EnvUUID string `bson:"env-uuid"` - Code string `bson:"code"` - Info string `bson:"info"` + DocID string `bson:"_id"` + ModelUUID string `bson:"model-uuid"` + Code string `bson:"code"` + Info string `bson:"info"` } // SetMeterStatus sets the meter status for the unit. @@ -127,7 +127,7 @@ // createMeterStatusOp returns the operation needed to create the meter status // document associated with the given globalKey. func createMeterStatusOp(st *State, globalKey string, doc *meterStatusDoc) txn.Op { - doc.EnvUUID = st.EnvironUUID() + doc.ModelUUID = st.ModelUUID() return txn.Op{ C: meterStatusC, Id: st.docID(globalKey), === modified file 'src/github.com/juju/juju/state/meterstatus_test.go' --- src/github.com/juju/juju/state/meterstatus_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/meterstatus_test.go 2016-03-22 15:18:22 +0000 @@ -45,14 +45,14 @@ c.Assert(status.Code, gc.Equals, state.MeterGreen) } -func (s *MeterStateSuite) TestMeterStatusIncludesEnvUUID(c *gc.C) { +func (s *MeterStateSuite) TestMeterStatusIncludesModelUUID(c *gc.C) { jujuDB := s.MgoSuite.Session.DB("juju") meterStatus := jujuDB.C("meterStatus") var docs []bson.M err := meterStatus.Find(nil).All(&docs) c.Assert(err, jc.ErrorIsNil) c.Assert(docs, gc.HasLen, 1) - c.Assert(docs[0]["env-uuid"], gc.Equals, s.State.EnvironUUID()) + c.Assert(docs[0]["model-uuid"], gc.Equals, s.State.ModelUUID()) } func (s *MeterStateSuite) TestSetMeterStatusIncorrect(c *gc.C) { === modified file 'src/github.com/juju/juju/state/metrics.go' --- src/github.com/juju/juju/state/metrics.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/metrics.go 2016-03-22 15:18:22 +0000 @@ -10,7 +10,7 @@ "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" "gopkg.in/mgo.v2/txn" @@ -34,10 +34,11 @@ type metricBatchDoc struct { UUID string `bson:"_id"` - EnvUUID string `bson:"env-uuid"` + ModelUUID string `bson:"model-uuid"` Unit string `bson:"unit"` CharmUrl string `bson:"charmurl"` Sent bool `bson:"sent"` + DeleteTime time.Time `bson:"delete-time"` Created time.Time `bson:"created"` Metrics []Metric `bson:"metrics"` Credentials []byte `bson:"credentials"` @@ -75,39 +76,53 @@ // BatchParam contains the properties of the metrics batch used when creating a metrics // batch. type BatchParam struct { - UUID string - CharmURL *charm.URL - Created time.Time - Metrics []Metric - Credentials []byte + UUID string + CharmURL string + Created time.Time + Metrics []Metric + Unit names.UnitTag } -// addMetrics adds a new batch of metrics to the database. -func (st *State) addMetrics(unitTag names.UnitTag, batch BatchParam) (*MetricBatch, error) { +// AddMetrics adds a new batch of metrics to the database. +func (st *State) AddMetrics(batch BatchParam) (*MetricBatch, error) { if len(batch.Metrics) == 0 { return nil, errors.New("cannot add a batch of 0 metrics") } + charmURL, err := charm.ParseURL(batch.CharmURL) + if err != nil { + return nil, errors.NewNotValid(err, "could not parse charm URL") + } + + unit, err := st.Unit(batch.Unit.Id()) + if err != nil { + return nil, errors.Trace(err) + } + service, err := unit.Service() + if err != nil { + return nil, errors.Trace(err) + } metric := &MetricBatch{ st: st, doc: metricBatchDoc{ UUID: batch.UUID, - EnvUUID: st.EnvironUUID(), - Unit: unitTag.Id(), - CharmUrl: batch.CharmURL.String(), + ModelUUID: st.ModelUUID(), + Unit: batch.Unit.Id(), + CharmUrl: charmURL.String(), Sent: false, Created: batch.Created, Metrics: batch.Metrics, - Credentials: batch.Credentials, - }} + Credentials: service.MetricCredentials(), + }, + } if err := metric.validate(); err != nil { return nil, err } buildTxn := func(attempt int) ([]txn.Op, error) { if attempt > 0 { - notDead, err := isNotDead(st, unitsC, unitTag.Id()) + notDead, err := isNotDead(st, unitsC, batch.Unit.Id()) if err != nil || !notDead { - return nil, errors.NotFoundf(unitTag.Id()) + return nil, errors.NotFoundf(batch.Unit.Id()) } exists, err := st.MetricBatch(batch.UUID) if exists != nil && err == nil { @@ -119,7 +134,7 @@ } ops := []txn.Op{{ C: unitsC, - Id: st.docID(unitTag.Id()), + Id: st.docID(batch.Unit.Id()), Assert: notDeadDoc, }, { C: metricsC, @@ -129,7 +144,7 @@ }} return ops, nil } - err := st.run(buildTxn) + err = st.run(buildTxn) if err != nil { return nil, errors.Trace(err) } @@ -137,11 +152,11 @@ return metric, nil } -// MetricBatches returns all metric batches currently stored in state. +// AllMetricBatches returns all metric batches currently stored in state. // TODO (tasdomas): this method is currently only used in the uniter worker test - // it needs to be modified to restrict the scope of the values it // returns if it is to be used outside of tests. -func (st *State) MetricBatches() ([]MetricBatch, error) { +func (st *State) AllMetricBatches() ([]MetricBatch, error) { c, closer := st.getCollection(metricsC) defer closer() docs := []metricBatchDoc{} @@ -156,6 +171,47 @@ return results, nil } +func (st *State) queryLocalMetricBatches(query bson.M) ([]MetricBatch, error) { + c, closer := st.getCollection(metricsC) + defer closer() + docs := []metricBatchDoc{} + if query == nil { + query = bson.M{} + } + query["charmurl"] = bson.M{"$regex": "^local:"} + err := c.Find(query).All(&docs) + if err != nil { + return nil, errors.Trace(err) + } + results := make([]MetricBatch, len(docs)) + for i, doc := range docs { + results[i] = MetricBatch{st: st, doc: doc} + } + return results, nil +} + +// MetricBatchesUnit returns metric batches for the given unit. +func (st *State) MetricBatchesForUnit(unit string) ([]MetricBatch, error) { + return st.queryLocalMetricBatches(bson.M{"unit": unit}) +} + +// MetricBatchesUnit returns metric batches for the given service. +func (st *State) MetricBatchesForService(service string) ([]MetricBatch, error) { + svc, err := st.Service(service) + if err != nil { + return nil, errors.Trace(err) + } + units, err := svc.AllUnits() + if err != nil { + return nil, errors.Trace(err) + } + unitNames := make([]bson.M, len(units)) + for i, u := range units { + unitNames[i] = bson.M{"unit": u.Name()} + } + return st.queryLocalMetricBatches(bson.M{"$or": unitNames}) +} + // MetricBatch returns the metric batch with the given id. func (st *State) MetricBatch(id string) (*MetricBatch, error) { c, closer := st.getCollection(metricsC) @@ -174,17 +230,17 @@ // CleanupOldMetrics looks for metrics that are 24 hours old (or older) // and have been sent. Any metrics it finds are deleted. func (st *State) CleanupOldMetrics() error { - age := time.Now().Add(-(CleanupAge)) - metricsLogger.Tracef("cleaning up metrics created before %v", age) + now := time.Now() metrics, closer := st.getCollection(metricsC) defer closer() // Nothing else in the system will interact with sent metrics, and nothing needs // to watch them either; so in this instance it's safe to do an end run around the // mgo/txn package. See State.cleanupRelationSettings for a similar situation. metricsW := metrics.Writeable() + // TODO (mattyw) iter over this. info, err := metricsW.RemoveAll(bson.M{ - "sent": true, - "created": bson.M{"$lte": age}, + "sent": true, + "delete-time": bson.M{"$lte": now}, }) if err == nil { metricsLogger.Tracef("cleanup removed %d metrics", info.Removed) @@ -246,9 +302,9 @@ return m.doc.UUID } -// EnvUUID returns the environment UUID this metric applies to. -func (m *MetricBatch) EnvUUID() string { - return m.doc.EnvUUID +// ModelUUID returns the model UUID this metric applies to. +func (m *MetricBatch) ModelUUID() string { + return m.doc.ModelUUID } // Unit returns the name of the unit this metric was generated in. @@ -279,14 +335,17 @@ return result } -// SetSent sets the sent flag to true -func (m *MetricBatch) SetSent() error { - ops := setSentOps([]string{m.UUID()}) +// SetSent marks the metric has having been sent at +// the specified time. +func (m *MetricBatch) SetSent(t time.Time) error { + deleteTime := t.UTC().Add(CleanupAge) + ops := setSentOps([]string{m.UUID()}, deleteTime) if err := m.st.runTransaction(ops); err != nil { return errors.Annotatef(err, "cannot set metric sent for metric %q", m.UUID()) } m.doc.Sent = true + m.doc.DeleteTime = deleteTime return nil } @@ -295,14 +354,14 @@ return m.doc.Credentials } -func setSentOps(batchUUIDs []string) []txn.Op { +func setSentOps(batchUUIDs []string, deleteTime time.Time) []txn.Op { ops := make([]txn.Op, len(batchUUIDs)) for i, u := range batchUUIDs { ops[i] = txn.Op{ C: metricsC, Id: u, Assert: txn.DocExists, - Update: bson.M{"$set": bson.M{"sent": true}}, + Update: bson.M{"$set": bson.M{"sent": true, "delete-time": deleteTime}}, } } return ops @@ -310,7 +369,8 @@ // SetMetricBatchesSent sets sent on each MetricBatch corresponding to the uuids provided. func (st *State) SetMetricBatchesSent(batchUUIDs []string) error { - ops := setSentOps(batchUUIDs) + deleteTime := time.Now().UTC().Add(CleanupAge) + ops := setSentOps(batchUUIDs, deleteTime) if err := st.runTransaction(ops); err != nil { return errors.Annotatef(err, "cannot set metric sent in bulk call") } === modified file 'src/github.com/juju/juju/state/metrics_test.go' --- src/github.com/juju/juju/state/metrics_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/metrics_test.go 2016-03-22 15:18:22 +0000 @@ -7,6 +7,7 @@ "time" "github.com/juju/errors" + "github.com/juju/names" jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" @@ -31,18 +32,49 @@ func (s *MetricSuite) TestAddNoMetrics(c *gc.C) { now := state.NowToTheSecond() - _, err := s.unit.AddMetrics(utils.MustNewUUID().String(), now, "", []state.Metric{}) + _, err := s.State.AddMetrics(state.BatchParam{ + UUID: utils.MustNewUUID().String(), + CharmURL: s.meteredCharm.URL().String(), + Created: now, + Metrics: []state.Metric{}, + Unit: s.unit.UnitTag(), + }) c.Assert(err, gc.ErrorMatches, "cannot add a batch of 0 metrics") } +func removeUnit(c *gc.C, unit *state.Unit) { + ensureUnitDead(c, unit) + err := unit.Remove() + c.Assert(err, jc.ErrorIsNil) +} + +func ensureUnitDead(c *gc.C, unit *state.Unit) { + err := unit.EnsureDead() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *MetricSuite) assertAddUnit(c *gc.C) { + s.meteredCharm = s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) + s.service = s.Factory.MakeService(c, &factory.ServiceParams{Charm: s.meteredCharm}) + s.unit = s.Factory.MakeUnit(c, &factory.UnitParams{Service: s.service, SetCharmURL: true}) +} + func (s *MetricSuite) TestAddMetric(c *gc.C) { now := state.NowToTheSecond() - envUUID := s.State.EnvironUUID() + modelUUID := s.State.ModelUUID() m := state.Metric{"pings", "5", now} - metricBatch, err := s.unit.AddMetrics(utils.MustNewUUID().String(), now, "", []state.Metric{m}) + metricBatch, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) c.Assert(err, jc.ErrorIsNil) c.Assert(metricBatch.Unit(), gc.Equals, "metered/0") - c.Assert(metricBatch.EnvUUID(), gc.Equals, envUUID) + c.Assert(metricBatch.ModelUUID(), gc.Equals, modelUUID) c.Assert(metricBatch.CharmURL(), gc.Equals, "cs:quantal/metered") c.Assert(metricBatch.Sent(), jc.IsFalse) c.Assert(metricBatch.Created(), gc.Equals, now) @@ -65,47 +97,55 @@ c.Assert(metric.Time.Equal(now), jc.IsTrue) } -func assertUnitRemoved(c *gc.C, unit *state.Unit) { - assertUnitDead(c, unit) - err := unit.Remove() - c.Assert(err, jc.ErrorIsNil) -} - -func assertUnitDead(c *gc.C, unit *state.Unit) { - err := unit.EnsureDead() - c.Assert(err, jc.ErrorIsNil) -} - -func (s *MetricSuite) assertAddUnit(c *gc.C) { - s.meteredCharm = s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) - s.service = s.Factory.MakeService(c, &factory.ServiceParams{Charm: s.meteredCharm}) - s.unit = s.Factory.MakeUnit(c, &factory.UnitParams{Service: s.service, SetCharmURL: true}) -} - func (s *MetricSuite) TestAddMetricNonExistentUnit(c *gc.C) { - assertUnitRemoved(c, s.unit) + removeUnit(c, s.unit) now := state.NowToTheSecond() m := state.Metric{"pings", "5", now} - _, err := s.unit.AddMetrics(utils.MustNewUUID().String(), now, "", []state.Metric{m}) - c.Assert(err, gc.ErrorMatches, `metered/0 not found`) + unitTag := names.NewUnitTag("test/0") + _, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: unitTag, + }, + ) + c.Assert(err, gc.ErrorMatches, ".*not found") } func (s *MetricSuite) TestAddMetricDeadUnit(c *gc.C) { - assertUnitDead(c, s.unit) + ensureUnitDead(c, s.unit) now := state.NowToTheSecond() m := state.Metric{"pings", "5", now} - _, err := s.unit.AddMetrics(utils.MustNewUUID().String(), now, "", []state.Metric{m}) + _, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) c.Assert(err, gc.ErrorMatches, `metered/0 not found`) } func (s *MetricSuite) TestSetMetricSent(c *gc.C) { now := state.NowToTheSecond() m := state.Metric{"pings", "5", now} - added, err := s.unit.AddMetrics(utils.MustNewUUID().String(), now, "", []state.Metric{m}) + added, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) c.Assert(err, jc.ErrorIsNil) saved, err := s.State.MetricBatch(added.UUID()) c.Assert(err, jc.ErrorIsNil) - err = saved.SetSent() + err = saved.SetSent(time.Now()) c.Assert(err, jc.ErrorIsNil) c.Assert(saved.Sent(), jc.IsTrue) saved, err = s.State.MetricBatch(added.UUID()) @@ -115,20 +155,44 @@ func (s *MetricSuite) TestCleanupMetrics(c *gc.C) { oldTime := time.Now().Add(-(time.Hour * 25)) + now := time.Now() m := state.Metric{"pings", "5", oldTime} - oldMetric1, err := s.unit.AddMetrics(utils.MustNewUUID().String(), oldTime, "", []state.Metric{m}) - c.Assert(err, jc.ErrorIsNil) - oldMetric1.SetSent() - - oldMetric2, err := s.unit.AddMetrics(utils.MustNewUUID().String(), oldTime, "", []state.Metric{m}) - c.Assert(err, jc.ErrorIsNil) - oldMetric2.SetSent() - - now := time.Now() + oldMetric1, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) + c.Assert(err, jc.ErrorIsNil) + oldMetric1.SetSent(time.Now().Add(-25 * time.Hour)) + + oldMetric2, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) + c.Assert(err, jc.ErrorIsNil) + oldMetric2.SetSent(time.Now().Add(-25 * time.Hour)) + m = state.Metric{"pings", "5", now} - newMetric, err := s.unit.AddMetrics(utils.MustNewUUID().String(), now, "", []state.Metric{m}) + newMetric, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) c.Assert(err, jc.ErrorIsNil) - newMetric.SetSent() + newMetric.SetSent(time.Now()) err = s.State.CleanupOldMetrics() c.Assert(err, jc.ErrorIsNil) @@ -150,14 +214,30 @@ func (s *MetricSuite) TestCleanupMetricsIgnoreNotSent(c *gc.C) { oldTime := time.Now().Add(-(time.Hour * 25)) m := state.Metric{"pings", "5", oldTime} - oldMetric, err := s.unit.AddMetrics(utils.MustNewUUID().String(), oldTime, "", []state.Metric{m}) + oldMetric, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: oldTime, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) c.Assert(err, jc.ErrorIsNil) now := time.Now() m = state.Metric{"pings", "5", now} - newMetric, err := s.unit.AddMetrics(utils.MustNewUUID().String(), now, "", []state.Metric{m}) + newMetric, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) c.Assert(err, jc.ErrorIsNil) - newMetric.SetSent() + newMetric.SetSent(time.Now()) err = s.State.CleanupOldMetrics() c.Assert(err, jc.ErrorIsNil) @@ -168,12 +248,20 @@ c.Assert(err, jc.ErrorIsNil) } -func (s *MetricSuite) TestMetricBatches(c *gc.C) { +func (s *MetricSuite) TestAllMetricBatches(c *gc.C) { now := state.NowToTheSecond() m := state.Metric{"pings", "5", now} - _, err := s.unit.AddMetrics(utils.MustNewUUID().String(), now, "", []state.Metric{m}) + _, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) c.Assert(err, jc.ErrorIsNil) - metricBatches, err := s.State.MetricBatches() + metricBatches, err := s.State.AllMetricBatches() c.Assert(err, jc.ErrorIsNil) c.Assert(metricBatches, gc.HasLen, 1) c.Assert(metricBatches[0].Unit(), gc.Equals, "metered/0") @@ -182,14 +270,22 @@ c.Assert(metricBatches[0].Metrics(), gc.HasLen, 1) } -func (s *MetricSuite) TestMetricBatchesCustomCharmURLAndUUID(c *gc.C) { +func (s *MetricSuite) TestAllMetricBatchesCustomCharmURLAndUUID(c *gc.C) { now := state.NowToTheSecond() m := state.Metric{"pings", "5", now} uuid := utils.MustNewUUID().String() charmUrl := "cs:quantal/metered" - _, err := s.unit.AddMetrics(uuid, now, charmUrl, []state.Metric{m}) + _, err := s.State.AddMetrics( + state.BatchParam{ + UUID: uuid, + Created: now, + CharmURL: charmUrl, + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) c.Assert(err, jc.ErrorIsNil) - metricBatches, err := s.State.MetricBatches() + metricBatches, err := s.State.AllMetricBatches() c.Assert(err, jc.ErrorIsNil) c.Assert(metricBatches, gc.HasLen, 1) c.Assert(metricBatches[0].Unit(), gc.Equals, "metered/0") @@ -204,9 +300,17 @@ m := state.Metric{"pings", "5", now} err := s.service.SetMetricCredentials([]byte("hello there")) c.Assert(err, gc.IsNil) - _, err = s.unit.AddMetrics(utils.MustNewUUID().String(), now, "", []state.Metric{m}) + _, err = s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) c.Assert(err, jc.ErrorIsNil) - metricBatches, err := s.State.MetricBatches() + metricBatches, err := s.State.AllMetricBatches() c.Assert(err, jc.ErrorIsNil) c.Assert(metricBatches, gc.HasLen, 1) c.Assert(metricBatches[0].Credentials(), gc.DeepEquals, []byte("hello there")) @@ -315,7 +419,7 @@ "assert valid metric fails on dying unit", []state.Metric{{"pings", "1", now}}, dyingUnit, - "metered-service/1 not found", + "unit \"metered-service/1\" not found", }, { "assert charm doesn't implement key returns error", []state.Metric{{"not-implemented", "1", now}}, @@ -331,10 +435,30 @@ []state.Metric{{"pings", "3.141592653589793238462643383279", now}}, meteredUnit, `metric value is too large`, + }, { + "negative value returns error", + []state.Metric{{"pings", "-42.0", now}}, + meteredUnit, + `invalid value: value must be greater or equal to zero, got -42.0`, + }, { + "non-float value returns an error", + []state.Metric{{"pings", "abcd", now}}, + meteredUnit, + `invalid value type: expected float, got "abcd"`, }} for i, t := range tests { c.Logf("test %d: %s", i, t.about) - _, err := t.unit.AddMetrics(utils.MustNewUUID().String(), now, "", t.metrics) + chURL, ok := t.unit.CharmURL() + c.Assert(ok, jc.IsTrue) + _, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: chURL.String(), + Metrics: t.metrics, + Unit: t.unit.UnitTag(), + }, + ) if t.err == "" { c.Assert(err, jc.ErrorIsNil) } else { @@ -346,19 +470,35 @@ func (s *MetricSuite) TestMetricsAcrossEnvironments(c *gc.C) { now := state.NowToTheSecond().Add(-48 * time.Hour) m := state.Metric{"pings", "5", now} - m1, err := s.unit.AddMetrics(utils.MustNewUUID().String(), now, "", []state.Metric{m}) + m1, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) c.Assert(err, jc.ErrorIsNil) - st := s.Factory.MakeEnvironment(c, nil) + st := s.Factory.MakeModel(c, nil) defer st.Close() f := factory.NewFactory(st) meteredCharm := f.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) service := f.MakeService(c, &factory.ServiceParams{Charm: meteredCharm}) unit := f.MakeUnit(c, &factory.UnitParams{Service: service, SetCharmURL: true}) - m2, err := unit.AddMetrics(utils.MustNewUUID().String(), now, "", []state.Metric{m}) + m2, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: unit.UnitTag(), + }, + ) c.Assert(err, jc.ErrorIsNil) - batches, err := s.State.MetricBatches() + batches, err := s.State.AllMetricBatches() c.Assert(err, jc.ErrorIsNil) c.Assert(batches, gc.HasLen, 2) @@ -370,9 +510,9 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(toSend, gc.HasLen, 2) - err = m1.SetSent() + err = m1.SetSent(time.Now().Add(-25 * time.Hour)) c.Assert(err, jc.ErrorIsNil) - err = m2.SetSent() + err = m2.SetSent(time.Now().Add(-25 * time.Hour)) c.Assert(err, jc.ErrorIsNil) sent, err := s.State.CountOfSentMetrics() @@ -382,7 +522,7 @@ err = s.State.CleanupOldMetrics() c.Assert(err, jc.ErrorIsNil) - batches, err = s.State.MetricBatches() + batches, err = s.State.AllMetricBatches() c.Assert(err, jc.ErrorIsNil) c.Assert(batches, gc.HasLen, 0) } @@ -390,9 +530,267 @@ func (s *MetricSuite) TestAddMetricDuplicateUUID(c *gc.C) { now := state.NowToTheSecond() mUUID := utils.MustNewUUID().String() - _, err := s.unit.AddMetrics(mUUID, now, "", []state.Metric{{"pings", "5", now}}) + _, err := s.State.AddMetrics( + state.BatchParam{ + UUID: mUUID, + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{{"pings", "5", now}}, + Unit: s.unit.UnitTag(), + }, + ) c.Assert(err, jc.ErrorIsNil) - _, err = s.unit.AddMetrics(mUUID, now, "", []state.Metric{{"pings", "10", now}}) + _, err = s.State.AddMetrics( + state.BatchParam{ + UUID: mUUID, + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{{"pings", "10", now}}, + Unit: s.unit.UnitTag(), + }, + ) c.Assert(err, gc.ErrorMatches, "metrics batch .* already exists") } + +func (s *MetricSuite) TestAddBuiltInMetric(c *gc.C) { + tests := []struct { + about string + value string + expectedError string + }{{ + about: "adding a positive value must succeed", + value: "5", + }, { + about: "negative values return an error", + value: "-42.0", + expectedError: "invalid value: value must be greater or equal to zero, got -42.0", + }, { + about: "non-float values return an error", + value: "abcd", + expectedError: `invalid value type: expected float, got "abcd"`, + }, { + about: "long values return an error", + value: "1234567890123456789012345678901234567890", + expectedError: "metric value is too large", + }, + } + for _, test := range tests { + c.Logf("running test: %v", test.about) + now := state.NowToTheSecond() + modelUUID := s.State.ModelUUID() + m := state.Metric{"juju-units", test.value, now} + metricBatch, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) + if test.expectedError == "" { + c.Assert(err, jc.ErrorIsNil) + c.Assert(metricBatch.Unit(), gc.Equals, "metered/0") + c.Assert(metricBatch.ModelUUID(), gc.Equals, modelUUID) + c.Assert(metricBatch.CharmURL(), gc.Equals, "cs:quantal/metered") + c.Assert(metricBatch.Sent(), jc.IsFalse) + c.Assert(metricBatch.Created(), gc.Equals, now) + c.Assert(metricBatch.Metrics(), gc.HasLen, 1) + + metric := metricBatch.Metrics()[0] + c.Assert(metric.Key, gc.Equals, "juju-units") + c.Assert(metric.Value, gc.Equals, test.value) + c.Assert(metric.Time.Equal(now), jc.IsTrue) + + saved, err := s.State.MetricBatch(metricBatch.UUID()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(saved.Unit(), gc.Equals, "metered/0") + c.Assert(metricBatch.CharmURL(), gc.Equals, "cs:quantal/metered") + c.Assert(saved.Sent(), jc.IsFalse) + c.Assert(saved.Metrics(), gc.HasLen, 1) + metric = saved.Metrics()[0] + c.Assert(metric.Key, gc.Equals, "juju-units") + c.Assert(metric.Value, gc.Equals, test.value) + c.Assert(metric.Time.Equal(now), jc.IsTrue) + } else { + c.Assert(err, gc.ErrorMatches, test.expectedError) + } + } +} + +func (s *MetricSuite) TestUnitMetricBatchesReturnsJustLocal(c *gc.C) { + now := state.NowToTheSecond() + m := state.Metric{"pings", "5", now} + _, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) + c.Assert(err, jc.ErrorIsNil) + localMeteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + service := s.Factory.MakeService(c, &factory.ServiceParams{Name: "localmetered", Charm: localMeteredCharm}) + unit := s.Factory.MakeUnit(c, &factory.UnitParams{Service: service, SetCharmURL: true}) + _, err = s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: localMeteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: unit.UnitTag(), + }, + ) + + c.Assert(err, jc.ErrorIsNil) + metricBatches, err := s.State.MetricBatchesForUnit("metered/0") + c.Assert(metricBatches, gc.HasLen, 0) + metricBatches, err = s.State.MetricBatchesForUnit("localmetered/0") + c.Assert(metricBatches, gc.HasLen, 1) +} + +type MetricLocalCharmSuite struct { + ConnSuite + unit *state.Unit + service *state.Service + meteredCharm *state.Charm +} + +var _ = gc.Suite(&MetricLocalCharmSuite{}) + +func (s *MetricLocalCharmSuite) SetUpTest(c *gc.C) { + s.ConnSuite.SetUpTest(c) + s.assertAddLocalUnit(c) +} + +func (s *MetricLocalCharmSuite) assertAddLocalUnit(c *gc.C) { + s.meteredCharm = s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "local:quantal/metered"}) + s.service = s.Factory.MakeService(c, &factory.ServiceParams{Charm: s.meteredCharm}) + s.unit = s.Factory.MakeUnit(c, &factory.UnitParams{Service: s.service, SetCharmURL: true}) +} + +func (s *MetricLocalCharmSuite) TestUnitMetricBatches(c *gc.C) { + now := state.NowToTheSecond() + m := state.Metric{"pings", "5", now} + m2 := state.Metric{"pings", "10", now} + _, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) + c.Assert(err, jc.ErrorIsNil) + newUnit, err := s.service.AddUnit() + c.Assert(err, jc.ErrorIsNil) + _, err = s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m2}, + Unit: newUnit.UnitTag(), + }, + ) + c.Assert(err, jc.ErrorIsNil) + + metricBatches, err := s.State.MetricBatchesForUnit("metered/0") + c.Assert(err, jc.ErrorIsNil) + c.Assert(metricBatches, gc.HasLen, 1) + c.Assert(metricBatches[0].Unit(), gc.Equals, "metered/0") + c.Assert(metricBatches[0].CharmURL(), gc.Equals, "local:quantal/metered") + c.Assert(metricBatches[0].Sent(), jc.IsFalse) + c.Assert(metricBatches[0].Metrics(), gc.HasLen, 1) + c.Assert(metricBatches[0].Metrics()[0].Value, gc.Equals, "5") + + metricBatches, err = s.State.MetricBatchesForUnit("metered/1") + c.Assert(err, jc.ErrorIsNil) + c.Assert(metricBatches, gc.HasLen, 1) + c.Assert(metricBatches[0].Unit(), gc.Equals, "metered/1") + c.Assert(metricBatches[0].CharmURL(), gc.Equals, "local:quantal/metered") + c.Assert(metricBatches[0].Sent(), jc.IsFalse) + c.Assert(metricBatches[0].Metrics(), gc.HasLen, 1) + c.Assert(metricBatches[0].Metrics()[0].Value, gc.Equals, "10") +} + +func (s *MetricLocalCharmSuite) TestServiceMetricBatches(c *gc.C) { + now := state.NowToTheSecond() + m := state.Metric{"pings", "5", now} + m2 := state.Metric{"pings", "10", now} + _, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) + c.Assert(err, jc.ErrorIsNil) + newUnit, err := s.service.AddUnit() + c.Assert(err, jc.ErrorIsNil) + _, err = s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m2}, + Unit: newUnit.UnitTag(), + }, + ) + c.Assert(err, jc.ErrorIsNil) + + metricBatches, err := s.State.MetricBatchesForService("metered") + c.Assert(err, jc.ErrorIsNil) + c.Assert(metricBatches, gc.HasLen, 2) + + c.Assert(metricBatches[0].Unit(), gc.Equals, "metered/0") + c.Assert(metricBatches[0].CharmURL(), gc.Equals, "local:quantal/metered") + c.Assert(metricBatches[0].Sent(), jc.IsFalse) + c.Assert(metricBatches[0].Metrics(), gc.HasLen, 1) + c.Assert(metricBatches[0].Metrics()[0].Value, gc.Equals, "5") + + c.Assert(metricBatches[1].Unit(), gc.Equals, "metered/1") + c.Assert(metricBatches[1].CharmURL(), gc.Equals, "local:quantal/metered") + c.Assert(metricBatches[1].Sent(), jc.IsFalse) + c.Assert(metricBatches[1].Metrics(), gc.HasLen, 1) + c.Assert(metricBatches[1].Metrics()[0].Value, gc.Equals, "10") +} + +func (s *MetricLocalCharmSuite) TestUnitMetricBatchesReturnsJustLocal(c *gc.C) { + now := state.NowToTheSecond() + m := state.Metric{"pings", "5", now} + _, err := s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: s.meteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: s.unit.UnitTag(), + }, + ) + c.Assert(err, jc.ErrorIsNil) + csMeteredCharm := s.Factory.MakeCharm(c, &factory.CharmParams{Name: "metered", URL: "cs:quantal/metered"}) + service := s.Factory.MakeService(c, &factory.ServiceParams{Name: "csmetered", Charm: csMeteredCharm}) + unit := s.Factory.MakeUnit(c, &factory.UnitParams{Service: service, SetCharmURL: true}) + _, err = s.State.AddMetrics( + state.BatchParam{ + UUID: utils.MustNewUUID().String(), + Created: now, + CharmURL: csMeteredCharm.URL().String(), + Metrics: []state.Metric{m}, + Unit: unit.UnitTag(), + }, + ) + + c.Assert(err, jc.ErrorIsNil) + metricBatches, err := s.State.MetricBatchesForUnit("metered/0") + c.Assert(metricBatches, gc.HasLen, 1) + metricBatches, err = s.State.MetricBatchesForUnit("csmetered/0") + c.Assert(metricBatches, gc.HasLen, 0) +} === modified file 'src/github.com/juju/juju/state/metricsmanager.go' --- src/github.com/juju/juju/state/metricsmanager.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/metricsmanager.go 2016-03-22 15:18:22 +0000 @@ -29,7 +29,7 @@ type metricsManagerDoc struct { DocID string `bson:"_id"` - EnvUUID string `bson:"env-uuid"` + ModelUUID string `bson:"model-uuid"` LastSuccessfulSend time.Time `bson:"lastsuccessfulsend"` ConsecutiveErrors int `bson:"consecutiveerrors"` GracePeriod time.Duration `bson:"graceperiod"` @@ -40,9 +40,9 @@ return m.doc.DocID } -// EnvUUID returns the environment UUID of the Metrics Manager. -func (m *MetricsManager) EnvUUID() string { - return m.doc.EnvUUID +// ModelUUID returns the model UUID of the Metrics Manager. +func (m *MetricsManager) ModelUUID() string { + return m.doc.ModelUUID } // LastSuccessfulSend returns the time of the last successful send. @@ -76,7 +76,7 @@ st: st, doc: metricsManagerDoc{ DocID: st.docID(metricsManagerKey), - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), LastSuccessfulSend: time.Time{}, ConsecutiveErrors: 0, GracePeriod: defaultGracePeriod, === modified file 'src/github.com/juju/juju/state/metricsmanager_test.go' --- src/github.com/juju/juju/state/metricsmanager_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/metricsmanager_test.go 2016-03-22 15:18:22 +0000 @@ -23,11 +23,11 @@ func (s *metricsManagerSuite) TestDefaultsWritten(c *gc.C) { mm, err := s.State.MetricsManager() c.Assert(err, jc.ErrorIsNil) - c.Assert(mm.DocID(), gc.Equals, fmt.Sprintf("%s:metricsManagerKey", s.State.EnvironUUID())) + c.Assert(mm.DocID(), gc.Equals, fmt.Sprintf("%s:metricsManagerKey", s.State.ModelUUID())) c.Assert(mm.LastSuccessfulSend(), gc.DeepEquals, time.Time{}) c.Assert(mm.ConsecutiveErrors(), gc.Equals, 0) c.Assert(mm.GracePeriod(), gc.Equals, 24*7*time.Hour) - c.Assert(mm.EnvUUID(), gc.Equals, s.State.EnvironUUID()) + c.Assert(mm.ModelUUID(), gc.Equals, s.State.ModelUUID()) } func (s *metricsManagerSuite) TestMetricsManagerCreatesThenReturns(c *gc.C) { === modified file 'src/github.com/juju/juju/state/minimumunits.go' --- src/github.com/juju/juju/state/minimumunits.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/state/minimumunits.go 2016-03-22 15:18:22 +0000 @@ -25,7 +25,7 @@ // the referred entity type is always the Service. DocID string `bson:"_id"` ServiceName string - EnvUUID string `bson:"env-uuid"` + ModelUUID string `bson:"model-uuid"` Revno int } @@ -82,7 +82,7 @@ Assert: txn.DocMissing, Insert: &minUnitsDoc{ ServiceName: serviceName, - EnvUUID: service.st.EnvironUUID(), + ModelUUID: service.st.ModelUUID(), }, }) } === added file 'src/github.com/juju/juju/state/model.go' --- src/github.com/juju/juju/state/model.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/state/model.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,628 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state + +import ( + "fmt" + "strings" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + jujutxn "github.com/juju/txn" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/txn" + + "github.com/juju/juju/environs/config" + "github.com/juju/juju/version" +) + +// modelGlobalKey is the key for the model, its +// settings and constraints. +const modelGlobalKey = "e" + +// Model represents the state of a model. +type Model struct { + // st is not necessarily the state of this model. Though it is + // usually safe to assume that it is. The only times it isn't is when we + // get models other than the current one - which is mostly in + // controller api endpoints. + st *State + doc modelDoc +} + +// modelDoc represents the internal state of the model in MongoDB. +type modelDoc struct { + UUID string `bson:"_id"` + Name string + Life Life + Owner string `bson:"owner"` + ServerUUID string `bson:"server-uuid"` + TimeOfDying time.Time `bson:"time-of-dying"` + TimeOfDeath time.Time `bson:"time-of-death"` + + // LatestAvailableTools is a string representing the newest version + // found while checking streams for new versions. + LatestAvailableTools string `bson:"available-tools,omitempty"` +} + +// ControllerModel returns the model that was bootstrapped. +// This is the only model that can have controller machines. +// The owner of this model is also considered "special", in that +// they are the only user that is able to create other users (until we +// have more fine grained permissions), and they cannot be disabled. +func (st *State) ControllerModel() (*Model, error) { + ssinfo, err := st.ControllerInfo() + if err != nil { + return nil, errors.Annotate(err, "could not get controller info") + } + + models, closer := st.getCollection(modelsC) + defer closer() + + env := &Model{st: st} + uuid := ssinfo.ModelTag.Id() + if err := env.refresh(models.FindId(uuid)); err != nil { + return nil, errors.Trace(err) + } + return env, nil +} + +// Model returns the model entity. +func (st *State) Model() (*Model, error) { + models, closer := st.getCollection(modelsC) + defer closer() + + env := &Model{st: st} + uuid := st.modelTag.Id() + if err := env.refresh(models.FindId(uuid)); err != nil { + return nil, errors.Trace(err) + } + return env, nil +} + +// GetModel looks for the model identified by the uuid passed in. +func (st *State) GetModel(tag names.ModelTag) (*Model, error) { + models, closer := st.getCollection(modelsC) + defer closer() + + env := &Model{st: st} + if err := env.refresh(models.FindId(tag.Id())); err != nil { + return nil, errors.Trace(err) + } + return env, nil +} + +// AllModels returns all the models in the system. +func (st *State) AllModels() ([]*Model, error) { + models, closer := st.getCollection(modelsC) + defer closer() + + var envDocs []modelDoc + err := models.Find(nil).All(&envDocs) + if err != nil { + return nil, err + } + + result := make([]*Model, len(envDocs)) + for i, doc := range envDocs { + result[i] = &Model{st: st, doc: doc} + } + + return result, nil +} + +// NewModel creates a new model with its own UUID and +// prepares it for use. Model and State instances for the new +// model are returned. +// +// The controller model's UUID is attached to the new +// model's document. Having the server UUIDs stored with each +// model document means that we have a way to represent external +// models, perhaps for future use around cross model +// relations. +func (st *State) NewModel(cfg *config.Config, owner names.UserTag) (_ *Model, _ *State, err error) { + if owner.IsLocal() { + if _, err := st.User(owner); err != nil { + return nil, nil, errors.Annotate(err, "cannot create model") + } + } + + ssEnv, err := st.ControllerModel() + if err != nil { + return nil, nil, errors.Annotate(err, "could not load controller model") + } + + uuid, ok := cfg.UUID() + if !ok { + return nil, nil, errors.Errorf("model uuid was not supplied") + } + newState, err := st.ForModel(names.NewModelTag(uuid)) + if err != nil { + return nil, nil, errors.Annotate(err, "could not create state for new model") + } + defer func() { + if err != nil { + newState.Close() + } + }() + + ops, err := newState.envSetupOps(cfg, uuid, ssEnv.UUID(), owner) + if err != nil { + return nil, nil, errors.Annotate(err, "failed to create new model") + } + err = newState.runTransaction(ops) + if err == txn.ErrAborted { + + // We have a unique key restriction on the "owner" and "name" fields, + // which will cause the insert to fail if there is another record with + // the same "owner" and "name" in the collection. If the txn is + // aborted, check if it is due to the unique key restriction. + models, closer := st.getCollection(modelsC) + defer closer() + envCount, countErr := models.Find(bson.D{ + {"owner", owner.Canonical()}, + {"name", cfg.Name()}}, + ).Count() + if countErr != nil { + err = errors.Trace(countErr) + } else if envCount > 0 { + err = errors.AlreadyExistsf("model %q for %s", cfg.Name(), owner.Canonical()) + } else { + err = errors.New("model already exists") + } + } + if err != nil { + return nil, nil, errors.Trace(err) + } + + newEnv, err := newState.Model() + if err != nil { + return nil, nil, errors.Trace(err) + } + + return newEnv, newState, nil +} + +// Tag returns a name identifying the model. +// The returned name will be different from other Tag values returned +// by any other entities from the same state. +func (m *Model) Tag() names.Tag { + return m.ModelTag() +} + +// ModelTag is the concrete model tag for this model. +func (m *Model) ModelTag() names.ModelTag { + return names.NewModelTag(m.doc.UUID) +} + +// ControllerTag is the model tag for the controller that the model is +// running within. +func (m *Model) ControllerTag() names.ModelTag { + return names.NewModelTag(m.doc.ServerUUID) +} + +// UUID returns the universally unique identifier of the model. +func (m *Model) UUID() string { + return m.doc.UUID +} + +// ControllerUUID returns the universally unique identifier of the controller +// in which the model is running. +func (m *Model) ControllerUUID() string { + return m.doc.ServerUUID +} + +// Name returns the human friendly name of the model. +func (m *Model) Name() string { + return m.doc.Name +} + +// Life returns whether the model is Alive, Dying or Dead. +func (m *Model) Life() Life { + return m.doc.Life +} + +// TimeOfDying returns when the model Life was set to Dying. +func (m *Model) TimeOfDying() time.Time { + return m.doc.TimeOfDying +} + +// TimeOfDeath returns when the model Life was set to Dead. +func (m *Model) TimeOfDeath() time.Time { + return m.doc.TimeOfDeath +} + +// Owner returns tag representing the owner of the model. +// The owner is the user that created the model. +func (m *Model) Owner() names.UserTag { + return names.NewUserTag(m.doc.Owner) +} + +// Config returns the config for the model. +func (m *Model) Config() (*config.Config, error) { + if m.st.modelTag.Id() == m.UUID() { + return m.st.ModelConfig() + } + envState := m.st + if envState.modelTag != m.ModelTag() { + // The active model isn't the same as the model + // we are querying. + var err error + envState, err = m.st.ForModel(m.ModelTag()) + if err != nil { + return nil, errors.Trace(err) + } + defer envState.Close() + } + return envState.ModelConfig() +} + +// UpdateLatestToolsVersion looks up for the latest available version of +// juju tools and updates environementDoc with it. +func (m *Model) UpdateLatestToolsVersion(ver version.Number) error { + v := ver.String() + // TODO(perrito666): I need to assert here that there isn't a newer + // version in place. + ops := []txn.Op{{ + C: modelsC, + Id: m.doc.UUID, + Update: bson.D{{"$set", bson.D{{"available-tools", v}}}}, + }} + err := m.st.runTransaction(ops) + if err != nil { + return errors.Trace(err) + } + return m.Refresh() +} + +// LatestToolsVersion returns the newest version found in the last +// check in the streams. +// Bear in mind that the check was performed filtering only +// new patches for the current major.minor. (major.minor.patch) +func (m *Model) LatestToolsVersion() version.Number { + ver := m.doc.LatestAvailableTools + if ver == "" { + return version.Zero + } + v, err := version.Parse(ver) + if err != nil { + // This is being stored from a valid version but + // in case this data would beacame corrupt It is not + // worth to fail because of it. + return version.Zero + } + return v +} + +// globalKey returns the global database key for the model. +func (m *Model) globalKey() string { + return modelGlobalKey +} + +func (m *Model) Refresh() error { + models, closer := m.st.getCollection(modelsC) + defer closer() + return m.refresh(models.FindId(m.UUID())) +} + +func (m *Model) refresh(query *mgo.Query) error { + err := query.One(&m.doc) + if err == mgo.ErrNotFound { + return errors.NotFoundf("model") + } + return err +} + +// Users returns a slice of all users for this model. +func (m *Model) Users() ([]*ModelUser, error) { + if m.st.ModelUUID() != m.UUID() { + return nil, errors.New("cannot lookup model users outside the current model") + } + coll, closer := m.st.getCollection(modelUsersC) + defer closer() + + var userDocs []modelUserDoc + err := coll.Find(nil).All(&userDocs) + if err != nil { + return nil, errors.Trace(err) + } + + var modelUsers []*ModelUser + for _, doc := range userDocs { + modelUsers = append(modelUsers, &ModelUser{ + st: m.st, + doc: doc, + }) + } + + return modelUsers, nil +} + +// Destroy sets the models's lifecycle to Dying, preventing +// addition of services or machines to state. +func (m *Model) Destroy() (err error) { + return m.destroy(false) +} + +// DestroyIncludingHosted sets the model's lifecycle to Dying, preventing addition of +// services or machines to state. If this model is a controller hosting +// other model, they will also be destroyed. +func (m *Model) DestroyIncludingHosted() error { + return m.destroy(true) +} + +func (m *Model) destroy(destroyHostedModels bool) (err error) { + defer errors.DeferredAnnotatef(&err, "failed to destroy model") + + buildTxn := func(attempt int) ([]txn.Op, error) { + + // On the first attempt, we assume memory state is recent + // enough to try using... + if attempt != 0 { + // ...but on subsequent attempts, we read fresh environ + // state from the DB. Note that we do *not* refresh `e` + // itself, as detailed in doc/hacking-state.txt + if m, err = m.st.GetModel(m.ModelTag()); err != nil { + return nil, errors.Trace(err) + } + } + + ops, err := m.destroyOps(destroyHostedModels) + if err == errModelNotAlive { + return nil, jujutxn.ErrNoOperations + } else if err != nil { + return nil, errors.Trace(err) + } + + return ops, nil + } + + st := m.st + if m.UUID() != m.st.ModelUUID() { + st, err = m.st.ForModel(m.ModelTag()) + defer st.Close() + if err != nil { + return errors.Trace(err) + } + } + return st.run(buildTxn) +} + +// errModelNotAlive is a signal emitted from destroyOps to indicate +// that model destruction is already underway. +var errModelNotAlive = errors.New("model is no longer alive") + +type hasHostedModelsError int + +func (e hasHostedModelsError) Error() string { + return fmt.Sprintf("hosting %d other models", e) +} + +func IsHasHostedModelsError(err error) bool { + _, ok := errors.Cause(err).(hasHostedModelsError) + return ok +} + +// destroyOps returns the txn operations necessary to begin model +// destruction, or an error indicating why it can't. +func (m *Model) destroyOps(destroyHostedModels bool) ([]txn.Op, error) { + // Ensure we're using the model's state. + st := m.st + var err error + if m.UUID() != m.st.ModelUUID() { + st, err = m.st.ForModel(m.ModelTag()) + defer st.Close() + if err != nil { + return nil, errors.Trace(err) + } + } + + if m.Life() != Alive { + return nil, errModelNotAlive + } + + err = m.ensureDestroyable() + if err != nil { + return nil, errors.Trace(err) + } + + uuid := m.UUID() + ops := []txn.Op{{ + C: modelsC, + Id: uuid, + Assert: isModelAliveDoc, + Update: bson.D{{"$set", bson.D{ + {"life", Dying}, + {"time-of-dying", nowToTheSecond()}, + }}}, + }} + if uuid == m.doc.ServerUUID && !destroyHostedModels { + if count, err := hostedModelCount(st); err != nil { + return nil, errors.Trace(err) + } else if count != 0 { + return nil, errors.Trace(hasHostedModelsError(count)) + } + ops = append(ops, assertNoHostedModelsOp()) + } else { + // If this model isn't hosting any models, no further + // checks are necessary -- we just need to make sure we update the + // refcount. + ops = append(ops, decHostedModelCountOp()) + } + + // Because txn operations execute in order, and may encounter + // arbitrarily long delays, we need to make sure every op + // causes a state change that's still consistent; so we make + // sure the cleanup ops are the last thing that will execute. + if uuid == m.doc.ServerUUID { + cleanupOp := st.newCleanupOp(cleanupModelsForDyingController, uuid) + ops = append(ops, cleanupOp) + } + cleanupMachinesOp := st.newCleanupOp(cleanupMachinesForDyingModel, uuid) + ops = append(ops, cleanupMachinesOp) + cleanupServicesOp := st.newCleanupOp(cleanupServicesForDyingModel, uuid) + ops = append(ops, cleanupServicesOp) + return ops, nil +} + +// checkManualMachines checks if any of the machines in the slice were +// manually provisioned, and are non-manager machines. These machines +// must (currently) be manually destroyed via destroy-machine before +// destroy-model can successfully complete. +func checkManualMachines(machines []*Machine) error { + var ids []string + for _, m := range machines { + if m.IsManager() { + continue + } + manual, err := m.IsManual() + if err != nil { + return errors.Trace(err) + } + if manual { + ids = append(ids, m.Id()) + } + } + if len(ids) > 0 { + return errors.Errorf("manually provisioned machines must first be destroyed with `juju destroy-machine %s`", strings.Join(ids, " ")) + } + return nil +} + +// ensureDestroyable an error if any manual machines or persistent volumes are +// found. +func (m *Model) ensureDestroyable() error { + + // TODO(waigani) bug #1475212: Model destroy can miss manual + // machines. We need to be able to assert the absence of these as + // part of the destroy txn, but in order to do this manual machines + // need to add refcounts to their models. + + // Check for manual machines. We bail out if there are any, + // to stop the user from prematurely hobbling the model. + machines, err := m.st.AllMachines() + if err != nil { + return errors.Trace(err) + } + + if err := checkManualMachines(machines); err != nil { + return errors.Trace(err) + } + + return nil +} + +// createModelOp returns the operation needed to create +// an model document with the given name and UUID. +func createModelOp(st *State, owner names.UserTag, name, uuid, server string) txn.Op { + doc := &modelDoc{ + UUID: uuid, + Name: name, + Life: Alive, + Owner: owner.Canonical(), + ServerUUID: server, + } + return txn.Op{ + C: modelsC, + Id: uuid, + Assert: txn.DocMissing, + Insert: doc, + } +} + +const hostedModelCountKey = "hostedModelCount" + +type hostedModelCountDoc struct { + + // RefCount is the number of models in the Juju system. We do not count + // the system model. + RefCount int `bson:"refcount"` +} + +func assertNoHostedModelsOp() txn.Op { + return txn.Op{ + C: controllersC, + Id: hostedModelCountKey, + Assert: bson.D{{"refcount", 0}}, + } +} + +func incHostedModelCountOp() txn.Op { + return HostedModelCountOp(1) +} + +func decHostedModelCountOp() txn.Op { + return HostedModelCountOp(-1) +} + +func HostedModelCountOp(amount int) txn.Op { + return txn.Op{ + C: controllersC, + Id: hostedModelCountKey, + Update: bson.M{ + "$inc": bson.M{"refcount": amount}, + }, + } +} + +func hostedModelCount(st *State) (int, error) { + var doc hostedModelCountDoc + controllers, closer := st.getCollection(controllersC) + defer closer() + + if err := controllers.Find(bson.D{{"_id", hostedModelCountKey}}).One(&doc); err != nil { + return 0, errors.Trace(err) + } + return doc.RefCount, nil +} + +// createUniqueOwnerModelNameOp returns the operation needed to create +// an usermodelnameC document with the given owner and model name. +func createUniqueOwnerModelNameOp(owner names.UserTag, envName string) txn.Op { + return txn.Op{ + C: usermodelnameC, + Id: userModelNameIndex(owner.Canonical(), envName), + Assert: txn.DocMissing, + Insert: bson.M{}, + } +} + +// assertAliveOp returns a txn.Op that asserts the model is alive. +func (m *Model) assertAliveOp() txn.Op { + return assertModelAliveOp(m.UUID()) +} + +// assertModelAliveOp returns a txn.Op that asserts the given +// model UUID refers to an Alive model. +func assertModelAliveOp(modelUUID string) txn.Op { + return txn.Op{ + C: modelsC, + Id: modelUUID, + Assert: isModelAliveDoc, + } +} + +// isModelAlive is a model-specific version of isAliveDoc. +// +// Model documents from versions of Juju prior to 1.17 +// do not have the life field; if it does not exist, it should +// be considered to have the value Alive. +// +// TODO(mjs) - this should be removed with existing uses replaced with +// isAliveDoc. A DB migration should convert nil to Alive. +var isModelAliveDoc = bson.D{ + {"life", bson.D{{"$in", []interface{}{Alive, nil}}}}, +} + +func checkModeLife(st *State) error { + env, err := st.Model() + if (err == nil && env.Life() != Alive) || errors.IsNotFound(err) { + return errors.New("model is no longer alive") + } else if err != nil { + return errors.Annotate(err, "unable to read model") + } + return nil +} === added file 'src/github.com/juju/juju/state/model_test.go' --- src/github.com/juju/juju/state/model_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/state/model_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,656 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state_test + +import ( + "fmt" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/environs/config" + "github.com/juju/juju/state" + "github.com/juju/juju/testing" + "github.com/juju/juju/testing/factory" +) + +type ModelSuite struct { + ConnSuite +} + +var _ = gc.Suite(&ModelSuite{}) + +func (s *ModelSuite) TestModel(c *gc.C) { + model, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + + expectedTag := names.NewModelTag(model.UUID()) + c.Assert(model.Tag(), gc.Equals, expectedTag) + c.Assert(model.ControllerTag(), gc.Equals, expectedTag) + c.Assert(model.Name(), gc.Equals, "testenv") + c.Assert(model.Owner(), gc.Equals, s.Owner) + c.Assert(model.Life(), gc.Equals, state.Alive) + c.Assert(model.TimeOfDying().IsZero(), jc.IsTrue) + c.Assert(model.TimeOfDeath().IsZero(), jc.IsTrue) +} + +func (s *ModelSuite) TestModelDestroy(c *gc.C) { + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + + now := state.NowToTheSecond() + s.PatchValue(&state.NowToTheSecond, func() time.Time { + return now + }) + + err = env.Destroy() + c.Assert(err, jc.ErrorIsNil) + err = env.Refresh() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Life(), gc.Equals, state.Dying) + c.Assert(env.TimeOfDying().UTC(), gc.Equals, now.UTC()) + c.Assert(env.TimeOfDeath().IsZero(), jc.IsTrue) +} + +func (s *ModelSuite) TestNewModelNonExistentLocalUser(c *gc.C) { + cfg, _ := s.createTestEnvConfig(c) + owner := names.NewUserTag("non-existent@local") + + _, _, err := s.State.NewModel(cfg, owner) + c.Assert(err, gc.ErrorMatches, `cannot create model: user "non-existent" not found`) +} + +func (s *ModelSuite) TestNewModelSameUserSameNameFails(c *gc.C) { + cfg, _ := s.createTestEnvConfig(c) + owner := s.Factory.MakeUser(c, nil).UserTag() + + // Create the first model. + _, st1, err := s.State.NewModel(cfg, owner) + c.Assert(err, jc.ErrorIsNil) + defer st1.Close() + + // Attempt to create another model with a different UUID but the + // same owner and name as the first. + newUUID, err := utils.NewUUID() + c.Assert(err, jc.ErrorIsNil) + cfg2 := testing.CustomModelConfig(c, testing.Attrs{ + "name": cfg.Name(), + "uuid": newUUID.String(), + }) + _, _, err = s.State.NewModel(cfg2, owner) + errMsg := fmt.Sprintf("model %q for %s already exists", cfg2.Name(), owner.Canonical()) + c.Assert(err, gc.ErrorMatches, errMsg) + c.Assert(errors.IsAlreadyExists(err), jc.IsTrue) + + // Remove the first model. + env1, err := st1.Model() + c.Assert(err, jc.ErrorIsNil) + err = env1.Destroy() + c.Assert(err, jc.ErrorIsNil) + // Destroy only sets the model to dying and RemoveAllModelDocs can + // only be called on a dead model. Normally, the environ's lifecycle + // would be set to dead after machines and services have been cleaned up. + err = state.SetModelLifeDead(st1, env1.ModelTag().Id()) + c.Assert(err, jc.ErrorIsNil) + err = st1.RemoveAllModelDocs() + c.Assert(err, jc.ErrorIsNil) + + // We should now be able to create the other model. + env2, st2, err := s.State.NewModel(cfg2, owner) + c.Assert(err, jc.ErrorIsNil) + defer st2.Close() + c.Assert(env2, gc.NotNil) + c.Assert(st2, gc.NotNil) +} + +func (s *ModelSuite) TestNewModel(c *gc.C) { + cfg, uuid := s.createTestEnvConfig(c) + owner := names.NewUserTag("test@remote") + + env, st, err := s.State.NewModel(cfg, owner) + c.Assert(err, jc.ErrorIsNil) + defer st.Close() + + modelTag := names.NewModelTag(uuid) + assertEnvMatches := func(env *state.Model) { + c.Assert(env.UUID(), gc.Equals, modelTag.Id()) + c.Assert(env.Tag(), gc.Equals, modelTag) + c.Assert(env.ControllerTag(), gc.Equals, s.modelTag) + c.Assert(env.Owner(), gc.Equals, owner) + c.Assert(env.Name(), gc.Equals, "testing") + c.Assert(env.Life(), gc.Equals, state.Alive) + } + assertEnvMatches(env) + + // Since the model tag for the State connection is different, + // asking for this model through FindEntity returns a not found error. + env, err = s.State.GetModel(modelTag) + c.Assert(err, jc.ErrorIsNil) + assertEnvMatches(env) + + env, err = st.Model() + c.Assert(err, jc.ErrorIsNil) + assertEnvMatches(env) + + _, err = s.State.FindEntity(modelTag) + c.Assert(err, jc.Satisfies, errors.IsNotFound) + + entity, err := st.FindEntity(modelTag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(entity.Tag(), gc.Equals, modelTag) + + // Ensure the model is functional by adding a machine + _, err = st.AddMachine("quantal", state.JobHostUnits) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *ModelSuite) TestControllerModel(c *gc.C) { + env, err := s.State.ControllerModel() + c.Assert(err, jc.ErrorIsNil) + + expectedTag := names.NewModelTag(env.UUID()) + c.Assert(env.Tag(), gc.Equals, expectedTag) + c.Assert(env.ControllerTag(), gc.Equals, expectedTag) + c.Assert(env.Name(), gc.Equals, "testenv") + c.Assert(env.Owner(), gc.Equals, s.Owner) + c.Assert(env.Life(), gc.Equals, state.Alive) +} + +func (s *ModelSuite) TestControllerModelAccessibleFromOtherModels(c *gc.C) { + cfg, _ := s.createTestEnvConfig(c) + _, st, err := s.State.NewModel(cfg, names.NewUserTag("test@remote")) + defer st.Close() + + env, err := st.ControllerModel() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Tag(), gc.Equals, s.modelTag) + c.Assert(env.Name(), gc.Equals, "testenv") + c.Assert(env.Owner(), gc.Equals, s.Owner) + c.Assert(env.Life(), gc.Equals, state.Alive) +} + +func (s *ModelSuite) TestConfigForControllerEnv(c *gc.C) { + otherState := s.Factory.MakeModel(c, &factory.ModelParams{Name: "other"}) + defer otherState.Close() + + env, err := otherState.GetModel(s.modelTag) + c.Assert(err, jc.ErrorIsNil) + + conf, err := env.Config() + c.Assert(err, jc.ErrorIsNil) + c.Assert(conf.Name(), gc.Equals, "testenv") + uuid, ok := conf.UUID() + c.Assert(ok, jc.IsTrue) + c.Assert(uuid, gc.Equals, s.modelTag.Id()) +} + +func (s *ModelSuite) TestConfigForOtherEnv(c *gc.C) { + otherState := s.Factory.MakeModel(c, &factory.ModelParams{Name: "other"}) + defer otherState.Close() + otherEnv, err := otherState.Model() + c.Assert(err, jc.ErrorIsNil) + + // By getting the model through a different state connection, + // the underlying state pointer in the *state.Model struct has + // a different model tag. + env, err := s.State.GetModel(otherEnv.ModelTag()) + c.Assert(err, jc.ErrorIsNil) + + conf, err := env.Config() + c.Assert(err, jc.ErrorIsNil) + c.Assert(conf.Name(), gc.Equals, "other") + uuid, ok := conf.UUID() + c.Assert(ok, jc.IsTrue) + c.Assert(uuid, gc.Equals, otherEnv.UUID()) +} + +// createTestEnvConfig returns a new model config and its UUID for testing. +func (s *ModelSuite) createTestEnvConfig(c *gc.C) (*config.Config, string) { + uuid, err := utils.NewUUID() + c.Assert(err, jc.ErrorIsNil) + return testing.CustomModelConfig(c, testing.Attrs{ + "name": "testing", + "uuid": uuid.String(), + }), uuid.String() +} + +func (s *ModelSuite) TestModelConfigSameEnvAsState(c *gc.C) { + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + cfg, err := env.Config() + c.Assert(err, jc.ErrorIsNil) + uuid, exists := cfg.UUID() + c.Assert(exists, jc.IsTrue) + c.Assert(uuid, gc.Equals, s.State.ModelUUID()) +} + +func (s *ModelSuite) TestModelConfigDifferentEnvThanState(c *gc.C) { + otherState := s.Factory.MakeModel(c, nil) + defer otherState.Close() + env, err := otherState.Model() + c.Assert(err, jc.ErrorIsNil) + cfg, err := env.Config() + c.Assert(err, jc.ErrorIsNil) + uuid, exists := cfg.UUID() + c.Assert(exists, jc.IsTrue) + c.Assert(uuid, gc.Equals, env.UUID()) + c.Assert(uuid, gc.Not(gc.Equals), s.State.ModelUUID()) +} + +func (s *ModelSuite) TestDestroyControllerModel(c *gc.C) { + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + err = env.Destroy() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *ModelSuite) TestDestroyOtherModel(c *gc.C) { + st2 := s.Factory.MakeModel(c, nil) + defer st2.Close() + env, err := st2.Model() + c.Assert(err, jc.ErrorIsNil) + err = env.Destroy() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *ModelSuite) TestDestroyControllerModelFails(c *gc.C) { + st2 := s.Factory.MakeModel(c, nil) + defer st2.Close() + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Destroy(), gc.ErrorMatches, "failed to destroy model: hosting 1 other models") +} + +func (s *ModelSuite) TestDestroyControllerAndHostedModels(c *gc.C) { + st2 := s.Factory.MakeModel(c, nil) + defer st2.Close() + + controllerEnv, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(controllerEnv.DestroyIncludingHosted(), jc.ErrorIsNil) + + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Life(), gc.Equals, state.Dying) + + assertNeedsCleanup(c, s.State) + assertCleanupRuns(c, s.State) + + env2, err := st2.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env2.Life(), gc.Equals, state.Dying) + + c.Assert(st2.ProcessDyingModel(), jc.ErrorIsNil) + + c.Assert(env2.Refresh(), jc.ErrorIsNil) + c.Assert(env2.Life(), gc.Equals, state.Dead) + + c.Assert(s.State.ProcessDyingModel(), jc.ErrorIsNil) + c.Assert(env.Refresh(), jc.ErrorIsNil) + c.Assert(env2.Life(), gc.Equals, state.Dead) +} + +func (s *ModelSuite) TestDestroyControllerAndHostedModelsWithResources(c *gc.C) { + otherSt := s.Factory.MakeModel(c, nil) + defer otherSt.Close() + + assertEnv := func(env *state.Model, st *state.State, life state.Life, expectedMachines, expectedServices int) { + c.Assert(env.Refresh(), jc.ErrorIsNil) + c.Assert(env.Life(), gc.Equals, life) + + machines, err := st.AllMachines() + c.Assert(err, jc.ErrorIsNil) + c.Assert(machines, gc.HasLen, expectedMachines) + + services, err := st.AllServices() + c.Assert(err, jc.ErrorIsNil) + c.Assert(services, gc.HasLen, expectedServices) + } + + // add some machines and services + otherEnv, err := otherSt.Model() + c.Assert(err, jc.ErrorIsNil) + _, err = otherSt.AddMachine("quantal", state.JobHostUnits) + c.Assert(err, jc.ErrorIsNil) + service := s.Factory.MakeService(c, &factory.ServiceParams{Creator: otherEnv.Owner()}) + ch, _, err := service.Charm() + c.Assert(err, jc.ErrorIsNil) + + args := state.AddServiceArgs{ + Name: service.Name(), + Owner: service.GetOwnerTag(), + Charm: ch, + } + service, err = otherSt.AddService(args) + c.Assert(err, jc.ErrorIsNil) + + controllerEnv, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(controllerEnv.DestroyIncludingHosted(), jc.ErrorIsNil) + + assertCleanupRuns(c, s.State) + assertDoesNotNeedCleanup(c, s.State) + assertAllMachinesDeadAndRemove(c, s.State) + assertEnv(controllerEnv, s.State, state.Dying, 0, 0) + + err = s.State.ProcessDyingModel() + c.Assert(err, gc.ErrorMatches, `one or more hosted models are not yet dead`) + + assertCleanupCount(c, otherSt, 3) + assertAllMachinesDeadAndRemove(c, otherSt) + assertEnv(otherEnv, otherSt, state.Dying, 0, 0) + c.Assert(otherSt.ProcessDyingModel(), jc.ErrorIsNil) + + c.Assert(otherEnv.Refresh(), jc.ErrorIsNil) + c.Assert(otherEnv.Life(), gc.Equals, state.Dead) + + c.Assert(s.State.ProcessDyingModel(), jc.ErrorIsNil) + c.Assert(controllerEnv.Refresh(), jc.ErrorIsNil) + c.Assert(controllerEnv.Life(), gc.Equals, state.Dead) +} + +func (s *ModelSuite) TestDestroyControllerModelRace(c *gc.C) { + // Simulate an model being added just before the remove txn is + // called. + defer state.SetBeforeHooks(c, s.State, func() { + blocker := s.Factory.MakeModel(c, nil) + err := blocker.Close() + c.Check(err, jc.ErrorIsNil) + }).Check() + + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env.Destroy(), gc.ErrorMatches, "failed to destroy model: hosting 1 other models") +} + +func (s *ModelSuite) TestDestroyControllerAlreadyDyingRaceNoOp(c *gc.C) { + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + + // Simulate an model being destroyed by another client just before + // the remove txn is called. + defer state.SetBeforeHooks(c, s.State, func() { + c.Assert(env.Destroy(), jc.ErrorIsNil) + }).Check() + + c.Assert(env.Destroy(), jc.ErrorIsNil) +} + +func (s *ModelSuite) TestDestroyControllerAlreadyDyingNoOp(c *gc.C) { + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + + c.Assert(env.Destroy(), jc.ErrorIsNil) + c.Assert(env.Destroy(), jc.ErrorIsNil) +} + +func (s *ModelSuite) TestProcessDyingServerEnvironTransitionDyingToDead(c *gc.C) { + s.assertDyingEnvironTransitionDyingToDead(c, s.State) +} + +func (s *ModelSuite) TestProcessDyingHostedEnvironTransitionDyingToDead(c *gc.C) { + st := s.Factory.MakeModel(c, nil) + defer st.Close() + s.assertDyingEnvironTransitionDyingToDead(c, st) +} + +func (s *ModelSuite) assertDyingEnvironTransitionDyingToDead(c *gc.C, st *state.State) { + env, err := st.Model() + c.Assert(err, jc.ErrorIsNil) + + // ProcessDyingModel is called by a worker after Destroy is called. To + // avoid a race, we jump the gun here and test immediately after the + // environement was set to dead. + defer state.SetAfterHooks(c, st, func() { + c.Assert(env.Refresh(), jc.ErrorIsNil) + c.Assert(env.Life(), gc.Equals, state.Dying) + + c.Assert(st.ProcessDyingModel(), jc.ErrorIsNil) + + c.Assert(env.Refresh(), jc.ErrorIsNil) + c.Assert(env.Life(), gc.Equals, state.Dead) + }).Check() + + c.Assert(env.Destroy(), jc.ErrorIsNil) +} + +func (s *ModelSuite) TestProcessDyingEnvironWithMachinesAndServicesNoOp(c *gc.C) { + st := s.Factory.MakeModel(c, nil) + defer st.Close() + + // calling ProcessDyingModel on a live environ should fail. + err := st.ProcessDyingModel() + c.Assert(err, gc.ErrorMatches, "model is not dying") + + // add some machines and services + env, err := st.Model() + c.Assert(err, jc.ErrorIsNil) + _, err = st.AddMachine("quantal", state.JobHostUnits) + c.Assert(err, jc.ErrorIsNil) + service := s.Factory.MakeService(c, &factory.ServiceParams{Creator: env.Owner()}) + ch, _, err := service.Charm() + c.Assert(err, jc.ErrorIsNil) + args := state.AddServiceArgs{ + Name: service.Name(), + Owner: service.GetOwnerTag(), + Charm: ch, + } + service, err = st.AddService(args) + c.Assert(err, jc.ErrorIsNil) + + assertEnv := func(life state.Life, expectedMachines, expectedServices int) { + c.Assert(env.Refresh(), jc.ErrorIsNil) + c.Assert(env.Life(), gc.Equals, life) + + machines, err := st.AllMachines() + c.Assert(err, jc.ErrorIsNil) + c.Assert(machines, gc.HasLen, expectedMachines) + + services, err := st.AllServices() + c.Assert(err, jc.ErrorIsNil) + c.Assert(services, gc.HasLen, expectedServices) + } + + // Simulate processing a dying envrionment after an envrionment is set to + // dying, but before the cleanup has removed machines and services. + defer state.SetAfterHooks(c, st, func() { + assertEnv(state.Dying, 1, 1) + err := st.ProcessDyingModel() + c.Assert(err, gc.ErrorMatches, `model not empty, found 1 machine\(s\)`) + assertEnv(state.Dying, 1, 1) + }).Check() + + c.Assert(env.Destroy(), jc.ErrorIsNil) +} + +func (s *ModelSuite) TestProcessDyingControllerEnvironWithHostedEnvsNoOp(c *gc.C) { + st := s.Factory.MakeModel(c, nil) + defer st.Close() + + controllerEnv, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(controllerEnv.DestroyIncludingHosted(), jc.ErrorIsNil) + + err = s.State.ProcessDyingModel() + c.Assert(err, gc.ErrorMatches, `one or more hosted models are not yet dead`) + + c.Assert(controllerEnv.Refresh(), jc.ErrorIsNil) + c.Assert(controllerEnv.Life(), gc.Equals, state.Dying) +} + +func (s *ModelSuite) TestListModelUsers(c *gc.C) { + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + + expected := addModelUsers(c, s.State) + obtained, err := env.Users() + c.Assert(err, gc.IsNil) + + assertObtainedUsersMatchExpectedUsers(c, obtained, expected) +} + +func (s *ModelSuite) TestMisMatchedEnvs(c *gc.C) { + // create another model + otherEnvState := s.Factory.MakeModel(c, nil) + defer otherEnvState.Close() + otherEnv, err := otherEnvState.Model() + c.Assert(err, jc.ErrorIsNil) + + // get that model from State + env, err := s.State.GetModel(otherEnv.ModelTag()) + c.Assert(err, jc.ErrorIsNil) + + // check that the Users method errors + users, err := env.Users() + c.Assert(users, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "cannot lookup model users outside the current model") +} + +func (s *ModelSuite) TestListUsersTwoModels(c *gc.C) { + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + + otherEnvState := s.Factory.MakeModel(c, nil) + defer otherEnvState.Close() + otherEnv, err := otherEnvState.Model() + c.Assert(err, jc.ErrorIsNil) + + // Add users to both models + expectedUsers := addModelUsers(c, s.State) + expectedUsersOtherEnv := addModelUsers(c, otherEnvState) + + // test that only the expected users are listed for each model + obtainedUsers, err := env.Users() + c.Assert(err, jc.ErrorIsNil) + assertObtainedUsersMatchExpectedUsers(c, obtainedUsers, expectedUsers) + + obtainedUsersOtherEnv, err := otherEnv.Users() + c.Assert(err, jc.ErrorIsNil) + assertObtainedUsersMatchExpectedUsers(c, obtainedUsersOtherEnv, expectedUsersOtherEnv) +} + +func addModelUsers(c *gc.C, st *state.State) (expected []*state.ModelUser) { + // get the model owner + testAdmin := names.NewUserTag("test-admin") + owner, err := st.ModelUser(testAdmin) + c.Assert(err, jc.ErrorIsNil) + + f := factory.NewFactory(st) + return []*state.ModelUser{ + // we expect the owner to be an existing model user + owner, + // add new users to the model + f.MakeModelUser(c, nil), + f.MakeModelUser(c, nil), + f.MakeModelUser(c, nil), + } +} + +func assertObtainedUsersMatchExpectedUsers(c *gc.C, obtainedUsers, expectedUsers []*state.ModelUser) { + c.Assert(len(obtainedUsers), gc.Equals, len(expectedUsers)) + for i, obtained := range obtainedUsers { + c.Assert(obtained.ModelTag().Id(), gc.Equals, expectedUsers[i].ModelTag().Id()) + c.Assert(obtained.UserName(), gc.Equals, expectedUsers[i].UserName()) + c.Assert(obtained.DisplayName(), gc.Equals, expectedUsers[i].DisplayName()) + c.Assert(obtained.CreatedBy(), gc.Equals, expectedUsers[i].CreatedBy()) + } +} + +func (s *ModelSuite) TestAllModels(c *gc.C) { + s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "test", Owner: names.NewUserTag("bob@remote")}).Close() + s.Factory.MakeModel(c, &factory.ModelParams{ + Name: "test", Owner: names.NewUserTag("mary@remote")}).Close() + envs, err := s.State.AllModels() + c.Assert(err, jc.ErrorIsNil) + c.Assert(envs, gc.HasLen, 3) + var obtained []string + for _, env := range envs { + obtained = append(obtained, fmt.Sprintf("%s/%s", env.Owner().Canonical(), env.Name())) + } + expected := []string{ + "test-admin@local/testenv", + "bob@remote/test", + "mary@remote/test", + } + c.Assert(obtained, jc.SameContents, expected) +} + +func (s *ModelSuite) TestHostedModelCount(c *gc.C) { + c.Assert(state.HostedModelCount(c, s.State), gc.Equals, 0) + + st1 := s.Factory.MakeModel(c, nil) + defer st1.Close() + c.Assert(state.HostedModelCount(c, s.State), gc.Equals, 1) + + st2 := s.Factory.MakeModel(c, nil) + defer st2.Close() + c.Assert(state.HostedModelCount(c, s.State), gc.Equals, 2) + + env1, err := st1.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env1.Destroy(), jc.ErrorIsNil) + c.Assert(state.HostedModelCount(c, s.State), gc.Equals, 1) + + env2, err := st2.Model() + c.Assert(err, jc.ErrorIsNil) + c.Assert(env2.Destroy(), jc.ErrorIsNil) + c.Assert(state.HostedModelCount(c, s.State), gc.Equals, 0) +} + +func assertCleanupRuns(c *gc.C, st *state.State) { + err := st.Cleanup() + c.Assert(err, jc.ErrorIsNil) +} + +func assertNeedsCleanup(c *gc.C, st *state.State) { + actual, err := st.NeedsCleanup() + c.Assert(err, jc.ErrorIsNil) + c.Assert(actual, jc.IsTrue) +} + +func assertDoesNotNeedCleanup(c *gc.C, st *state.State) { + actual, err := st.NeedsCleanup() + c.Assert(err, jc.ErrorIsNil) + c.Assert(actual, jc.IsFalse) +} + +// assertCleanupCount is useful because certain cleanups cause other cleanups +// to be queued; it makes more sense to just run cleanup again than to unpick +// object destruction so that we run the cleanups inline while running cleanups. +func assertCleanupCount(c *gc.C, st *state.State, count int) { + for i := 0; i < count; i++ { + c.Logf("checking cleanups %d", i) + assertNeedsCleanup(c, st) + assertCleanupRuns(c, st) + } + assertDoesNotNeedCleanup(c, st) +} + +// The provisioner will remove dead machines once their backing instances are +// stopped. For the tests, we remove them directly. +func assertAllMachinesDeadAndRemove(c *gc.C, st *state.State) { + machines, err := st.AllMachines() + c.Assert(err, jc.ErrorIsNil) + for _, m := range machines { + if m.IsManager() { + continue + } + if _, isContainer := m.ParentId(); isContainer { + continue + } + manual, err := m.IsManual() + c.Assert(err, jc.ErrorIsNil) + if manual { + continue + } + + c.Assert(m.Life(), gc.Equals, state.Dead) + c.Assert(m.Remove(), jc.ErrorIsNil) + } +} === added file 'src/github.com/juju/juju/state/modelmigration.go' --- src/github.com/juju/juju/state/modelmigration.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/state/modelmigration.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,401 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state + +import ( + "fmt" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/txn" + + migration "github.com/juju/juju/core/modelmigration" +) + +// This file contains functionality for managing the state documents +// used by Juju to track model migrations. + +// ModelMigration represents the state of an migration attempt for a +// model. +type ModelMigration struct { + st *State + doc modelMigDoc + statusDoc modelMigStatusDoc +} + +// modelMigDoc holds parameters of a migration attempt for a +// model. These are written into modelMigrationsC. +type modelMigDoc struct { + // Id holds migration document key. It has the format + // "uuid:sequence". + Id string `bson:"_id"` + + // The UUID of the model being migrated. + ModelUUID string `bson:"model-uuid"` + + // InitiatedBy holds the username of the user that triggered the + // migration. It should be in "user@domain" format. + InitiatedBy string `bson:"initiated-by"` + + // TargetController holds the UUID of the target controller. + TargetController string `bson:"target-controller"` + + // TargetAddrs holds the host:port values for the target API + // server. + TargetAddrs []string `bson:"target-addrs"` + + // TargetCACert holds the certificate to validate the target API + // server's TLS certificate. + TargetCACert string `bson:"target-cacert"` + + // TargetEntityTag holds a string representation of the tag to + // authenticate to the target controller with. + TargetEntityTag string `bson:"target-entity"` + + // TargetPassword holds the password to use with TargetEntityTag + // when authenticating. + TargetPassword string `bson:"target-password"` +} + +// modelMigStatusDoc tracks the progress of a migration attempt for a +// model. These are written into modelMigrationStatusC. +// +// There is exactly one document in modelMigrationStatusC for each +// document in modelMigrationsC. Separating them allows for watching +// for new model migrations without being woken up for each model +// migration status change. +type modelMigStatusDoc struct { + // These are the same as the ids as modelMigrationsC. + // "uuid:sequence". + Id string `bson:"_id"` + + // StartTime holds the time the migration started (stored as per + // UnixNano). + StartTime int64 `bson:"start-time"` + + // StartTime holds the time the migration reached the SUCCESS + // phase (stored as per UnixNano). + SuccessTime int64 `bson:"success-time"` + + // EndTime holds the time the migration reached a terminal (end) + // phase (stored as per UnixNano). + EndTime int64 `bson:"end-time"` + + // Phase holds the current migration phase. This should be one of + // the string representations of the core/migrations.Phase + // constants. + Phase string `bson:"phase"` + + // PhaseChangedTime holds the time that Phase last changed (stored + // as per UnixNano). + PhaseChangedTime int64 `bson:"phase-changed-time"` + + // StatusMessage holds a human readable message about the + // migration's progress. + StatusMessage string `bson:"status-message"` +} + +// Id returns a unique identifier for the model migration. +func (mig *ModelMigration) Id() string { + return mig.doc.Id +} + +// ModelUUID returns the UUID for the model being migrated. +func (mig *ModelMigration) ModelUUID() string { + return mig.doc.ModelUUID +} + +// StartTime returns the time when the migration was started. +func (mig *ModelMigration) StartTime() time.Time { + return unixNanoToTime0(mig.statusDoc.StartTime) +} + +// SuccessTime returns the time when the migration reached +// SUCCESS. +func (mig *ModelMigration) SuccessTime() time.Time { + return unixNanoToTime0(mig.statusDoc.SuccessTime) +} + +// EndTime returns the time when the migration reached DONE or +// REAPFAILED. +func (mig *ModelMigration) EndTime() time.Time { + return unixNanoToTime0(mig.statusDoc.EndTime) +} + +// Phase returns the migration's phase. +func (mig *ModelMigration) Phase() (migration.Phase, error) { + phase, ok := migration.ParsePhase(mig.statusDoc.Phase) + if !ok { + return phase, errors.Errorf("invalid phase in DB: %v", mig.statusDoc.Phase) + } + return phase, nil +} + +// PhaseChangedTime returns the time when the migration's phase last +// changed. +func (mig *ModelMigration) PhaseChangedTime() time.Time { + return unixNanoToTime0(mig.statusDoc.PhaseChangedTime) +} + +// StatusMessage returns human readable text about the current +// progress of the migration. +func (mig *ModelMigration) StatusMessage() string { + return mig.statusDoc.StatusMessage +} + +// InitiatedBy returns username the initiated the migration. +func (mig *ModelMigration) InitiatedBy() string { + return mig.doc.InitiatedBy +} + +// TargetInfo returns the details required to connect to the +// migration's target controller. +func (mig *ModelMigration) TargetInfo() (*migration.TargetInfo, error) { + entityTag, err := names.ParseTag(mig.doc.TargetEntityTag) + if err != nil { + return nil, errors.Trace(err) + } + return &migration.TargetInfo{ + ControllerTag: names.NewModelTag(mig.doc.TargetController), + Addrs: mig.doc.TargetAddrs, + CACert: mig.doc.TargetCACert, + EntityTag: entityTag, + Password: mig.doc.TargetPassword, + }, nil +} + +// SetPhase sets the phase of the migration. An error will be returned +// if the new phase does not follow the current phase or if the +// migration is no longer active. +func (mig *ModelMigration) SetPhase(nextPhase migration.Phase) error { + now := GetClock().Now().UnixNano() + + phase, err := mig.Phase() + if err != nil { + return errors.Trace(err) + } + + if nextPhase == phase { + return nil // Already at that phase. Nothing to do. + } + if !phase.CanTransitionTo(nextPhase) { + return errors.Errorf("illegal phase change: %s -> %s", phase, nextPhase) + } + + nextDoc := mig.statusDoc + nextDoc.Phase = nextPhase.String() + nextDoc.PhaseChangedTime = now + update := bson.M{ + "phase": nextDoc.Phase, + "phase-changed-time": now, + } + if nextPhase == migration.SUCCESS { + nextDoc.SuccessTime = now + update["success-time"] = now + } + var ops []txn.Op + if nextPhase.IsTerminal() { + nextDoc.EndTime = now + update["end-time"] = now + ops = append(ops, txn.Op{ + C: modelMigrationsActiveC, + Id: mig.doc.ModelUUID, + Assert: txn.DocExists, + Remove: true, + }) + } + + ops = append(ops, txn.Op{ + C: modelMigrationStatusC, + Id: mig.statusDoc.Id, + Update: bson.M{"$set": update}, + // Ensure phase hasn't changed underneath us + Assert: bson.M{"phase": mig.statusDoc.Phase}, + }) + + if err := mig.st.runTransaction(ops); err == txn.ErrAborted { + return errors.New("phase already changed") + } else if err != nil { + return errors.Annotate(err, "failed to update phase") + } + + mig.statusDoc = nextDoc + return nil +} + +// SetStatusMessage sets some human readable text about the current +// progress of the migration. +func (mig *ModelMigration) SetStatusMessage(text string) error { + ops := []txn.Op{{ + C: modelMigrationStatusC, + Id: mig.statusDoc.Id, + Update: bson.M{"$set": bson.M{"status-message": text}}, + Assert: txn.DocExists, + }} + if err := mig.st.runTransaction(ops); err != nil { + return errors.Annotate(err, "failed to set migration status") + } + mig.statusDoc.StatusMessage = text + return nil +} + +// Refresh updates the contents of the ModelMigration from the underlying +// state. +func (mig *ModelMigration) Refresh() error { + // Only the status document is updated. The modelMigDoc is static + // after creation. + statusColl, closer := mig.st.getCollection(modelMigrationStatusC) + defer closer() + var statusDoc modelMigStatusDoc + err := statusColl.FindId(mig.doc.Id).One(&statusDoc) + if err == mgo.ErrNotFound { + return errors.NotFoundf("migration status") + } else if err != nil { + return errors.Annotate(err, "migration status lookup failed") + } + + mig.statusDoc = statusDoc + return nil +} + +// ModelMigrationSpec holds the information required to create an +// ModelMigration instance. +type ModelMigrationSpec struct { + InitiatedBy string + TargetInfo migration.TargetInfo +} + +// Validate returns an error if the ModelMigrationSpec contains bad +// data. Nil is returned otherwise. +func (spec *ModelMigrationSpec) Validate() error { + if !names.IsValidUser(spec.InitiatedBy) { + return errors.NotValidf("InitiatedBy") + } + return spec.TargetInfo.Validate() +} + +// CreateModelMigration initialises state that tracks a model +// migration. It will return an error if there is already a +// model migration in progress. +func CreateModelMigration(st *State, spec ModelMigrationSpec) (*ModelMigration, error) { + if st.IsController() { + return nil, errors.New("controllers can't be migrated") + } + if err := spec.Validate(); err != nil { + return nil, errors.Trace(err) + } + + now := GetClock().Now().UnixNano() + modelUUID := st.ModelUUID() + var doc modelMigDoc + var statusDoc modelMigStatusDoc + buildTxn := func(int) ([]txn.Op, error) { + if isActive, err := IsModelMigrationActive(st, modelUUID); err != nil { + return nil, errors.Trace(err) + } else if isActive { + return nil, errors.New("already in progress") + } + + seq, err := st.sequence("modelmigration") + if err != nil { + return nil, errors.Trace(err) + } + + id := fmt.Sprintf("%s:%d", modelUUID, seq) + doc = modelMigDoc{ + Id: id, + ModelUUID: modelUUID, + InitiatedBy: spec.InitiatedBy, + TargetController: spec.TargetInfo.ControllerTag.Id(), + TargetAddrs: spec.TargetInfo.Addrs, + TargetCACert: spec.TargetInfo.CACert, + TargetEntityTag: spec.TargetInfo.EntityTag.String(), + TargetPassword: spec.TargetInfo.Password, + } + statusDoc = modelMigStatusDoc{ + Id: id, + StartTime: now, + Phase: migration.QUIESCE.String(), + PhaseChangedTime: now, + } + return []txn.Op{{ + C: modelMigrationsC, + Id: doc.Id, + Assert: txn.DocMissing, + Insert: &doc, + }, { + C: modelMigrationStatusC, + Id: statusDoc.Id, + Assert: txn.DocMissing, + Insert: &statusDoc, + }, { + C: modelMigrationsActiveC, + Id: modelUUID, + Assert: txn.DocMissing, + Insert: bson.M{"id": doc.Id}, + }}, nil + } + if err := st.run(buildTxn); err != nil { + return nil, errors.Annotate(err, "failed to create migration") + } + + return &ModelMigration{ + doc: doc, + statusDoc: statusDoc, + st: st, + }, nil +} + +// GetModelMigration returns the most recent ModelMigration for a +// model (if any). +func GetModelMigration(st *State) (*ModelMigration, error) { + migColl, closer := st.getCollection(modelMigrationsC) + defer closer() + + query := migColl.Find(bson.M{"model-uuid": st.ModelUUID()}) + query = query.Sort("-_id").Limit(1) + var doc modelMigDoc + err := query.One(&doc) + if err == mgo.ErrNotFound { + return nil, errors.NotFoundf("migration") + } else if err != nil { + return nil, errors.Annotate(err, "migration lookup failed") + } + + statusColl, closer := st.getCollection(modelMigrationStatusC) + defer closer() + var statusDoc modelMigStatusDoc + err = statusColl.FindId(doc.Id).One(&statusDoc) + if err != nil { + return nil, errors.Annotate(err, "failed to find status document") + } + + return &ModelMigration{ + doc: doc, + statusDoc: statusDoc, + st: st, + }, nil +} + +// IsModelMigrationActive return true if a migration is in progress for +// the given model. +func IsModelMigrationActive(st *State, modelUUID string) (bool, error) { + active, closer := st.getCollection(modelMigrationsActiveC) + defer closer() + n, err := active.FindId(modelUUID).Count() + if err != nil { + return false, errors.Trace(err) + } + return n > 0, nil +} + +func unixNanoToTime0(i int64) time.Time { + if i == 0 { + return time.Time{} + } + return time.Unix(0, i) +} === added file 'src/github.com/juju/juju/state/modelmigration_test.go' --- src/github.com/juju/juju/state/modelmigration_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/state/modelmigration_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,416 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state_test + +import ( + "fmt" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/clock" + gc "gopkg.in/check.v1" + + migration "github.com/juju/juju/core/modelmigration" + "github.com/juju/juju/state" + coretesting "github.com/juju/juju/testing" +) + +type ModelMigrationSuite struct { + ConnSuite + State2 *state.State + clock *coretesting.Clock + stdSpec state.ModelMigrationSpec +} + +var _ = gc.Suite(new(ModelMigrationSuite)) + +func (s *ModelMigrationSuite) SetUpTest(c *gc.C) { + s.clock = coretesting.NewClock(time.Now().Truncate(time.Second)) + s.PatchValue(&state.GetClock, func() clock.Clock { + return s.clock + }) + + s.ConnSuite.SetUpTest(c) + + // Create a hosted model to migrate. + s.State2 = s.Factory.MakeModel(c, nil) + s.AddCleanup(func(*gc.C) { s.State2.Close() }) + + // Plausible migration arguments to test with. + s.stdSpec = state.ModelMigrationSpec{ + InitiatedBy: "admin", + TargetInfo: migration.TargetInfo{ + ControllerTag: s.State.ModelTag(), + Addrs: []string{"1.2.3.4:5555", "4.3.2.1:6666"}, + CACert: "cert", + EntityTag: names.NewUserTag("user"), + Password: "password", + }, + } +} + +func (s *ModelMigrationSuite) TestCreate(c *gc.C) { + mig, err := state.CreateModelMigration(s.State2, s.stdSpec) + c.Assert(err, jc.ErrorIsNil) + + c.Check(mig.ModelUUID(), gc.Equals, s.State2.ModelUUID()) + c.Check(mig.Id(), gc.Equals, mig.ModelUUID()+":0") + + c.Check(mig.StartTime(), gc.Equals, s.clock.Now()) + + c.Check(mig.SuccessTime().IsZero(), jc.IsTrue) + c.Check(mig.EndTime().IsZero(), jc.IsTrue) + c.Check(mig.StatusMessage(), gc.Equals, "") + c.Check(mig.InitiatedBy(), gc.Equals, "admin") + + info, err := mig.TargetInfo() + c.Assert(err, jc.ErrorIsNil) + c.Check(*info, jc.DeepEquals, s.stdSpec.TargetInfo) + + assertPhase(c, mig, migration.QUIESCE) + c.Check(mig.PhaseChangedTime(), gc.Equals, mig.StartTime()) + + assertMigrationActive(c, s.State2) +} + +func (s *ModelMigrationSuite) TestIdSequencesAreIndependent(c *gc.C) { + st2 := s.State2 + st3 := s.Factory.MakeModel(c, nil) + s.AddCleanup(func(*gc.C) { st3.Close() }) + + mig2, err := state.CreateModelMigration(st2, s.stdSpec) + c.Assert(err, jc.ErrorIsNil) + c.Check(mig2.Id(), gc.Equals, st2.ModelUUID()+":0") + + mig3, err := state.CreateModelMigration(st3, s.stdSpec) + c.Assert(err, jc.ErrorIsNil) + c.Check(mig3.Id(), gc.Equals, st3.ModelUUID()+":0") +} + +func (s *ModelMigrationSuite) TestIdSequencesIncrement(c *gc.C) { + createAndAbort := func() string { + mig, err := state.CreateModelMigration(s.State2, s.stdSpec) + c.Assert(err, jc.ErrorIsNil) + c.Check(mig.SetPhase(migration.ABORT), jc.ErrorIsNil) + return mig.Id() + } + + modelUUID := s.State2.ModelUUID() + c.Check(createAndAbort(), gc.Equals, modelUUID+":0") + c.Check(createAndAbort(), gc.Equals, modelUUID+":1") + c.Check(createAndAbort(), gc.Equals, modelUUID+":2") +} + +func (s *ModelMigrationSuite) TestIdSequencesIncrementOnlyWhenNecessary(c *gc.C) { + // Ensure that sequence numbers aren't "used up" unnecessarily + // when the create txn is going to fail. + modelUUID := s.State2.ModelUUID() + + mig, err := state.CreateModelMigration(s.State2, s.stdSpec) + c.Assert(err, jc.ErrorIsNil) + c.Check(mig.Id(), gc.Equals, modelUUID+":0") + + // This attempt will fail because a migration is already in + // progress. + _, err = state.CreateModelMigration(s.State2, s.stdSpec) + c.Assert(err, gc.ErrorMatches, ".+already in progress") + + // Now abort the migration and create another. The Id sequence + // should have only incremented by 1. + c.Assert(mig.SetPhase(migration.ABORT), jc.ErrorIsNil) + + mig, err = state.CreateModelMigration(s.State2, s.stdSpec) + c.Assert(err, jc.ErrorIsNil) + c.Check(mig.Id(), gc.Equals, modelUUID+":1") +} + +func (s *ModelMigrationSuite) TestSpecValidation(c *gc.C) { + tests := []struct { + label string + tweakSpec func(*state.ModelMigrationSpec) + errorPattern string + }{{ + "empty InitiatedBy", + func(spec *state.ModelMigrationSpec) { + spec.InitiatedBy = "" + }, + "InitiatedBy not valid", + }, { + "invalid InitiatedBy", + func(spec *state.ModelMigrationSpec) { + spec.InitiatedBy = "!" + }, + "InitiatedBy not valid", + }, { + "TargetInfo is validated", + func(spec *state.ModelMigrationSpec) { + spec.TargetInfo.Password = "" + }, + "empty Password not valid", + }} + for _, test := range tests { + c.Logf("---- %s -----------", test.label) + + // Set up spec. + spec := s.stdSpec + test.tweakSpec(&spec) + + // Check Validate directly. + err := spec.Validate() + c.Check(errors.IsNotValid(err), jc.IsTrue) + c.Check(err, gc.ErrorMatches, test.errorPattern) + + // Ensure that CreateModelMigration rejects the bad spec too. + mig, err := state.CreateModelMigration(s.State2, spec) + c.Check(mig, gc.IsNil) + c.Check(errors.IsNotValid(err), jc.IsTrue) + c.Check(err, gc.ErrorMatches, test.errorPattern) + } +} + +func (s *ModelMigrationSuite) TestCreateWithControllerModel(c *gc.C) { + mig, err := state.CreateModelMigration( + s.State, // This is the State for the controller + s.stdSpec, + ) + c.Check(mig, gc.IsNil) + c.Check(err, gc.ErrorMatches, "controllers can't be migrated") +} + +func (s *ModelMigrationSuite) TestCreateMigrationInProgress(c *gc.C) { + mig, err := state.CreateModelMigration(s.State2, s.stdSpec) + c.Assert(mig, gc.Not(gc.IsNil)) + c.Assert(err, jc.ErrorIsNil) + + mig2, err := state.CreateModelMigration(s.State2, s.stdSpec) + c.Check(mig2, gc.IsNil) + c.Check(err, gc.ErrorMatches, "failed to create migration: already in progress") +} + +func (s *ModelMigrationSuite) TestCreateMigrationRace(c *gc.C) { + defer state.SetBeforeHooks(c, s.State2, func() { + mig, err := state.CreateModelMigration(s.State2, s.stdSpec) + c.Assert(mig, gc.Not(gc.IsNil)) + c.Assert(err, jc.ErrorIsNil) + }).Check() + + mig, err := state.CreateModelMigration(s.State2, s.stdSpec) + c.Check(mig, gc.IsNil) + c.Check(err, gc.ErrorMatches, "failed to create migration: already in progress") +} + +func (s *ModelMigrationSuite) TestGet(c *gc.C) { + mig1, err := state.CreateModelMigration(s.State2, s.stdSpec) + c.Assert(err, jc.ErrorIsNil) + + mig2, err := state.GetModelMigration(s.State2) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(mig1.Id(), gc.Equals, mig2.Id()) +} + +func (s *ModelMigrationSuite) TestGetNotExist(c *gc.C) { + mig, err := state.GetModelMigration(s.State2) + c.Check(mig, gc.IsNil) + c.Check(errors.IsNotFound(err), jc.IsTrue) +} + +func (s *ModelMigrationSuite) TestGetsLatestAttempt(c *gc.C) { + modelUUID := s.State2.ModelUUID() + + for i := 0; i < 10; i++ { + c.Logf("loop %d", i) + _, err := state.CreateModelMigration(s.State2, s.stdSpec) + c.Assert(err, jc.ErrorIsNil) + + mig, err := state.GetModelMigration(s.State2) + c.Check(mig.Id(), gc.Equals, fmt.Sprintf("%s:%d", modelUUID, i)) + + c.Assert(mig.SetPhase(migration.ABORT), jc.ErrorIsNil) + } +} + +func (s *ModelMigrationSuite) TestRefresh(c *gc.C) { + mig1, err := state.CreateModelMigration(s.State2, s.stdSpec) + c.Assert(err, jc.ErrorIsNil) + + mig2, err := state.GetModelMigration(s.State2) + c.Assert(err, jc.ErrorIsNil) + + err = mig1.SetPhase(migration.READONLY) + c.Assert(err, jc.ErrorIsNil) + + assertPhase(c, mig2, migration.QUIESCE) + err = mig2.Refresh() + c.Assert(err, jc.ErrorIsNil) + assertPhase(c, mig2, migration.READONLY) +} + +func (s *ModelMigrationSuite) TestSuccessfulPhaseTransitions(c *gc.C) { + st := s.State2 + + mig, err := state.CreateModelMigration(st, s.stdSpec) + c.Assert(mig, gc.Not(gc.IsNil)) + c.Assert(err, jc.ErrorIsNil) + + mig2, err := state.GetModelMigration(st) + c.Assert(err, jc.ErrorIsNil) + + phases := []migration.Phase{ + migration.READONLY, + migration.PRECHECK, + migration.IMPORT, + migration.VALIDATION, + migration.SUCCESS, + migration.LOGTRANSFER, + migration.REAP, + migration.DONE, + } + + var successTime time.Time + for _, phase := range phases[:len(phases)-1] { + err := mig.SetPhase(phase) + c.Assert(err, jc.ErrorIsNil) + + assertPhase(c, mig, phase) + c.Assert(mig.PhaseChangedTime(), gc.Equals, s.clock.Now()) + + // Check success timestamp is set only when SUCCESS is + // reached. + if phase < migration.SUCCESS { + c.Assert(mig.SuccessTime().IsZero(), jc.IsTrue) + } else { + if phase == migration.SUCCESS { + successTime = s.clock.Now() + } + c.Assert(mig.SuccessTime(), gc.Equals, successTime) + } + + // Check still marked as active. + assertMigrationActive(c, s.State2) + c.Assert(mig.EndTime().IsZero(), jc.IsTrue) + + // Ensure change was peristed. + c.Assert(mig2.Refresh(), jc.ErrorIsNil) + assertPhase(c, mig2, phase) + + s.clock.Advance(time.Millisecond) + } + + // Now move to the final phase (DONE) and ensure fields are set as + // expected. + err = mig.SetPhase(migration.DONE) + c.Assert(err, jc.ErrorIsNil) + assertPhase(c, mig, migration.DONE) + s.assertMigrationCleanedUp(c, mig) +} + +func (s *ModelMigrationSuite) TestABORTCleanup(c *gc.C) { + mig, err := state.CreateModelMigration(s.State2, s.stdSpec) + c.Assert(err, jc.ErrorIsNil) + + s.clock.Advance(time.Millisecond) + c.Assert(mig.SetPhase(migration.ABORT), jc.ErrorIsNil) + + s.assertMigrationCleanedUp(c, mig) +} + +func (s *ModelMigrationSuite) TestREAPFAILEDCleanup(c *gc.C) { + mig, err := state.CreateModelMigration(s.State2, s.stdSpec) + c.Assert(err, jc.ErrorIsNil) + + // Advance the migration to REAPFAILED. + phases := []migration.Phase{ + migration.READONLY, + migration.PRECHECK, + migration.IMPORT, + migration.VALIDATION, + migration.SUCCESS, + migration.LOGTRANSFER, + migration.REAP, + migration.REAPFAILED, + } + for _, phase := range phases { + s.clock.Advance(time.Millisecond) + c.Assert(mig.SetPhase(phase), jc.ErrorIsNil) + } + + s.assertMigrationCleanedUp(c, mig) +} + +func (s *ModelMigrationSuite) assertMigrationCleanedUp(c *gc.C, mig *state.ModelMigration) { + c.Assert(mig.PhaseChangedTime(), gc.Equals, s.clock.Now()) + c.Assert(mig.EndTime(), gc.Equals, s.clock.Now()) + assertMigrationNotActive(c, s.State2) +} + +func (s *ModelMigrationSuite) TestIllegalPhaseTransition(c *gc.C) { + mig, err := state.CreateModelMigration(s.State2, s.stdSpec) + c.Assert(err, jc.ErrorIsNil) + + err = mig.SetPhase(migration.SUCCESS) + c.Check(err, gc.ErrorMatches, "illegal phase change: QUIESCE -> SUCCESS") +} + +func (s *ModelMigrationSuite) TestPhaseChangeRace(c *gc.C) { + mig, err := state.CreateModelMigration(s.State2, s.stdSpec) + c.Assert(mig, gc.Not(gc.IsNil)) + + defer state.SetBeforeHooks(c, s.State2, func() { + mig, err := state.GetModelMigration(s.State2) + c.Assert(err, jc.ErrorIsNil) + c.Assert(mig.SetPhase(migration.READONLY), jc.ErrorIsNil) + }).Check() + + err = mig.SetPhase(migration.READONLY) + c.Assert(err, gc.ErrorMatches, "phase already changed") + assertPhase(c, mig, migration.QUIESCE) + + // After a refresh it the phase change should be ok. + c.Assert(mig.Refresh(), jc.ErrorIsNil) + err = mig.SetPhase(migration.READONLY) + c.Assert(err, jc.ErrorIsNil) + assertPhase(c, mig, migration.READONLY) +} + +func (s *ModelMigrationSuite) TestStatusMessage(c *gc.C) { + mig, err := state.CreateModelMigration(s.State2, s.stdSpec) + c.Assert(mig, gc.Not(gc.IsNil)) + + mig2, err := state.GetModelMigration(s.State2) + c.Assert(err, jc.ErrorIsNil) + + c.Check(mig.StatusMessage(), gc.Equals, "") + c.Check(mig2.StatusMessage(), gc.Equals, "") + + err = mig.SetStatusMessage("foo bar") + c.Assert(err, jc.ErrorIsNil) + + c.Check(mig.StatusMessage(), gc.Equals, "foo bar") + + c.Assert(mig2.Refresh(), jc.ErrorIsNil) + c.Check(mig2.StatusMessage(), gc.Equals, "foo bar") +} + +func assertPhase(c *gc.C, mig *state.ModelMigration, phase migration.Phase) { + actualPhase, err := mig.Phase() + c.Assert(err, jc.ErrorIsNil) + c.Check(actualPhase, gc.Equals, phase) +} + +func assertMigrationActive(c *gc.C, st *state.State) { + c.Check(isMigrationActive(c, st), jc.IsTrue) +} + +func assertMigrationNotActive(c *gc.C, st *state.State) { + c.Check(isMigrationActive(c, st), jc.IsFalse) +} + +func isMigrationActive(c *gc.C, st *state.State) bool { + isActive, err := state.IsModelMigrationActive(st, st.ModelUUID()) + c.Assert(err, jc.ErrorIsNil) + return isActive +} === added file 'src/github.com/juju/juju/state/modeluser.go' --- src/github.com/juju/juju/state/modeluser.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/state/modeluser.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,328 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state + +import ( + "fmt" + "strings" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/txn" +) + +// ModelUser represents a user access to an model whereas the user +// could represent a remote user or a user across multiple models the +// model user always represents a single user for a single model. +// There should be no more than one ModelUser per model. +type ModelUser struct { + st *State + doc modelUserDoc +} + +type modelUserDoc struct { + ID string `bson:"_id"` + ModelUUID string `bson:"model-uuid"` + UserName string `bson:"user"` + DisplayName string `bson:"displayname"` + CreatedBy string `bson:"createdby"` + DateCreated time.Time `bson:"datecreated"` + ReadOnly bool `bson:"readonly"` +} + +// modelUserLastConnectionDoc is updated by the apiserver whenever the user +// connects over the API. This update is not done using mgo.txn so the values +// could well change underneath a normal transaction and as such, it should +// NEVER appear in any transaction asserts. It is really informational only as +// far as everyone except the api server is concerned. +type modelUserLastConnectionDoc struct { + ID string `bson:"_id"` + ModelUUID string `bson:"model-uuid"` + UserName string `bson:"user"` + LastConnection time.Time `bson:"last-connection"` +} + +// ID returns the ID of the model user. +func (e *ModelUser) ID() string { + return e.doc.ID +} + +// ModelTag returns the model tag of the model user. +func (e *ModelUser) ModelTag() names.ModelTag { + return names.NewModelTag(e.doc.ModelUUID) +} + +// UserTag returns the tag for the model user. +func (e *ModelUser) UserTag() names.UserTag { + return names.NewUserTag(e.doc.UserName) +} + +// UserName returns the user name of the model user. +func (e *ModelUser) UserName() string { + return e.doc.UserName +} + +// DisplayName returns the display name of the model user. +func (e *ModelUser) DisplayName() string { + return e.doc.DisplayName +} + +// CreatedBy returns the user who created the model user. +func (e *ModelUser) CreatedBy() string { + return e.doc.CreatedBy +} + +// DateCreated returns the date the model user was created in UTC. +func (e *ModelUser) DateCreated() time.Time { + return e.doc.DateCreated.UTC() +} + +// ReadOnly returns whether or not the user has write access or only +// read access to the model. +func (e *ModelUser) ReadOnly() bool { + return e.doc.ReadOnly +} + +// LastConnection returns when this ModelUser last connected through the API +// in UTC. The resulting time will be nil if the user has never logged in. +func (e *ModelUser) LastConnection() (time.Time, error) { + lastConnections, closer := e.st.getRawCollection(modelUserLastConnectionC) + defer closer() + + username := strings.ToLower(e.UserName()) + var lastConn modelUserLastConnectionDoc + err := lastConnections.FindId(e.st.docID(username)).Select(bson.D{{"last-connection", 1}}).One(&lastConn) + if err != nil { + if err == mgo.ErrNotFound { + err = errors.Wrap(err, NeverConnectedError(e.UserName())) + } + return time.Time{}, errors.Trace(err) + } + + return lastConn.LastConnection.UTC(), nil +} + +// NeverConnectedError is used to indicate that a user has never connected to +// an model. +type NeverConnectedError string + +// Error returns the error string for a user who has never connected to an +// model. +func (e NeverConnectedError) Error() string { + return `never connected: "` + string(e) + `"` +} + +// IsNeverConnectedError returns true if err is of type NeverConnectedError. +func IsNeverConnectedError(err error) bool { + _, ok := errors.Cause(err).(NeverConnectedError) + return ok +} + +// UpdateLastConnection updates the last connection time of the model user. +func (e *ModelUser) UpdateLastConnection() error { + return e.updateLastConnection(nowToTheSecond()) +} + +func (e *ModelUser) updateLastConnection(when time.Time) error { + lastConnections, closer := e.st.getCollection(modelUserLastConnectionC) + defer closer() + + lastConnectionsW := lastConnections.Writeable() + + // Update the safe mode of the underlying session to not require + // write majority, nor sync to disk. + session := lastConnectionsW.Underlying().Database.Session + session.SetSafe(&mgo.Safe{}) + + lastConn := modelUserLastConnectionDoc{ + ID: e.st.docID(strings.ToLower(e.UserName())), + ModelUUID: e.ModelTag().Id(), + UserName: e.UserName(), + LastConnection: when, + } + _, err := lastConnectionsW.UpsertId(lastConn.ID, lastConn) + return errors.Trace(err) +} + +// ModelUser returns the model user. +func (st *State) ModelUser(user names.UserTag) (*ModelUser, error) { + modelUser := &ModelUser{st: st} + modelUsers, closer := st.getCollection(modelUsersC) + defer closer() + + username := strings.ToLower(user.Canonical()) + err := modelUsers.FindId(username).One(&modelUser.doc) + if err == mgo.ErrNotFound { + return nil, errors.NotFoundf("model user %q", user.Canonical()) + } + // DateCreated is inserted as UTC, but read out as local time. So we + // convert it back to UTC here. + modelUser.doc.DateCreated = modelUser.doc.DateCreated.UTC() + return modelUser, nil +} + +// ModelUserSpec defines the attributes that can be set when adding a new +// model user. +type ModelUserSpec struct { + User names.UserTag + CreatedBy names.UserTag + DisplayName string + ReadOnly bool +} + +// AddModelUser adds a new user to the database. +func (st *State) AddModelUser(spec ModelUserSpec) (*ModelUser, error) { + // Ensure local user exists in state before adding them as an model user. + if spec.User.IsLocal() { + localUser, err := st.User(spec.User) + if err != nil { + return nil, errors.Annotate(err, fmt.Sprintf("user %q does not exist locally", spec.User.Name())) + } + if spec.DisplayName == "" { + spec.DisplayName = localUser.DisplayName() + } + } + + // Ensure local createdBy user exists. + if spec.CreatedBy.IsLocal() { + if _, err := st.User(spec.CreatedBy); err != nil { + return nil, errors.Annotatef(err, "createdBy user %q does not exist locally", spec.CreatedBy.Name()) + } + } + + modelUUID := st.ModelUUID() + op := createModelUserOp(modelUUID, spec.User, spec.CreatedBy, spec.DisplayName, nowToTheSecond(), spec.ReadOnly) + err := st.runTransaction([]txn.Op{op}) + if err == txn.ErrAborted { + err = errors.AlreadyExistsf("model user %q", spec.User.Canonical()) + } + if err != nil { + return nil, errors.Trace(err) + } + // Re-read from DB to get the multi-env updated values. + return st.ModelUser(spec.User) +} + +// modelUserID returns the document id of the model user +func modelUserID(user names.UserTag) string { + username := user.Canonical() + return strings.ToLower(username) +} + +func createModelUserOp(modelUUID string, user, createdBy names.UserTag, displayName string, dateCreated time.Time, readOnly bool) txn.Op { + creatorname := createdBy.Canonical() + doc := &modelUserDoc{ + ID: modelUserID(user), + ModelUUID: modelUUID, + UserName: user.Canonical(), + DisplayName: displayName, + ReadOnly: readOnly, + CreatedBy: creatorname, + DateCreated: dateCreated, + } + return txn.Op{ + C: modelUsersC, + Id: modelUserID(user), + Assert: txn.DocMissing, + Insert: doc, + } +} + +// RemoveModelUser removes a user from the database. +func (st *State) RemoveModelUser(user names.UserTag) error { + ops := []txn.Op{{ + C: modelUsersC, + Id: modelUserID(user), + Assert: txn.DocExists, + Remove: true, + }} + err := st.runTransaction(ops) + if err == txn.ErrAborted { + err = errors.NewNotFound(err, fmt.Sprintf("env user %q does not exist", user.Canonical())) + } + if err != nil { + return errors.Trace(err) + } + return nil +} + +// UserModel contains information about an model that a +// user has access to. +type UserModel struct { + *Model + User names.UserTag +} + +// LastConnection returns the last time the user has connected to the +// model. +func (e *UserModel) LastConnection() (time.Time, error) { + lastConnections, lastConnCloser := e.st.getRawCollection(modelUserLastConnectionC) + defer lastConnCloser() + + lastConnDoc := modelUserLastConnectionDoc{} + id := ensureModelUUID(e.ModelTag().Id(), strings.ToLower(e.User.Canonical())) + err := lastConnections.FindId(id).Select(bson.D{{"last-connection", 1}}).One(&lastConnDoc) + if (err != nil && err != mgo.ErrNotFound) || lastConnDoc.LastConnection.IsZero() { + return time.Time{}, errors.Trace(NeverConnectedError(e.User.Canonical())) + } + + return lastConnDoc.LastConnection, nil +} + +// ModelsForUser returns a list of models that the user +// is able to access. +func (st *State) ModelsForUser(user names.UserTag) ([]*UserModel, error) { + // Since there are no groups at this stage, the simplest way to get all + // the models that a particular user can see is to look through the + // model user collection. A raw collection is required to support + // queries across multiple models. + modelUsers, userCloser := st.getRawCollection(modelUsersC) + defer userCloser() + + // TODO: consider adding an index to the modelUsers collection on the username. + var userSlice []modelUserDoc + err := modelUsers.Find(bson.D{{"user", user.Canonical()}}).Select(bson.D{{"model-uuid", 1}, {"_id", 1}}).All(&userSlice) + if err != nil { + return nil, err + } + + var result []*UserModel + for _, doc := range userSlice { + modelTag := names.NewModelTag(doc.ModelUUID) + env, err := st.GetModel(modelTag) + if err != nil { + return nil, errors.Trace(err) + } + + result = append(result, &UserModel{Model: env, User: user}) + } + + return result, nil +} + +// IsControllerAdministrator returns true if the user specified has access to the +// controller model (the system model). +func (st *State) IsControllerAdministrator(user names.UserTag) (bool, error) { + ssinfo, err := st.ControllerInfo() + if err != nil { + return false, errors.Annotate(err, "could not get controller info") + } + + serverUUID := ssinfo.ModelTag.Id() + + modelUsers, userCloser := st.getRawCollection(modelUsersC) + defer userCloser() + + count, err := modelUsers.Find(bson.D{ + {"model-uuid", serverUUID}, + {"user", user.Canonical()}, + }).Count() + if err != nil { + return false, errors.Trace(err) + } + return count == 1, nil +} === added file 'src/github.com/juju/juju/state/modeluser_test.go' --- src/github.com/juju/juju/state/modeluser_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/state/modeluser_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,372 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state_test + +import ( + "fmt" + "sort" + + "github.com/juju/errors" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/state" + "github.com/juju/juju/testing" + "github.com/juju/juju/testing/factory" +) + +type ModelUserSuite struct { + ConnSuite +} + +var _ = gc.Suite(&ModelUserSuite{}) + +func (s *ModelUserSuite) TestAddModelUser(c *gc.C) { + now := state.NowToTheSecond() + user := s.Factory.MakeUser(c, &factory.UserParams{Name: "validusername", NoModelUser: true}) + createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) + modelUser, err := s.State.AddModelUser(state.ModelUserSpec{ + User: user.UserTag(), CreatedBy: createdBy.UserTag()}) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(modelUser.ID(), gc.Equals, fmt.Sprintf("%s:validusername@local", s.modelTag.Id())) + c.Assert(modelUser.ModelTag(), gc.Equals, s.modelTag) + c.Assert(modelUser.UserName(), gc.Equals, "validusername@local") + c.Assert(modelUser.DisplayName(), gc.Equals, user.DisplayName()) + c.Assert(modelUser.ReadOnly(), jc.IsFalse) + c.Assert(modelUser.CreatedBy(), gc.Equals, "createdby@local") + c.Assert(modelUser.DateCreated().Equal(now) || modelUser.DateCreated().After(now), jc.IsTrue) + when, err := modelUser.LastConnection() + c.Assert(err, jc.Satisfies, state.IsNeverConnectedError) + c.Assert(when.IsZero(), jc.IsTrue) + + modelUser, err = s.State.ModelUser(user.UserTag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(modelUser.ID(), gc.Equals, fmt.Sprintf("%s:validusername@local", s.modelTag.Id())) + c.Assert(modelUser.ModelTag(), gc.Equals, s.modelTag) + c.Assert(modelUser.UserName(), gc.Equals, "validusername@local") + c.Assert(modelUser.DisplayName(), gc.Equals, user.DisplayName()) + c.Assert(modelUser.ReadOnly(), jc.IsFalse) + c.Assert(modelUser.CreatedBy(), gc.Equals, "createdby@local") + c.Assert(modelUser.DateCreated().Equal(now) || modelUser.DateCreated().After(now), jc.IsTrue) + when, err = modelUser.LastConnection() + c.Assert(err, jc.Satisfies, state.IsNeverConnectedError) + c.Assert(when.IsZero(), jc.IsTrue) +} + +func (s *ModelUserSuite) TestAddReadOnlyModelUser(c *gc.C) { + user := s.Factory.MakeUser(c, &factory.UserParams{Name: "validusername", NoModelUser: true}) + createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) + modelUser, err := s.State.AddModelUser(state.ModelUserSpec{ + User: user.UserTag(), CreatedBy: createdBy.UserTag(), ReadOnly: true}) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(modelUser.UserName(), gc.Equals, "validusername@local") + c.Assert(modelUser.DisplayName(), gc.Equals, user.DisplayName()) + c.Assert(modelUser.ReadOnly(), jc.IsTrue) + + // Make sure that it is set when we read the user out. + modelUser, err = s.State.ModelUser(user.UserTag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(modelUser.UserName(), gc.Equals, "validusername@local") + c.Assert(modelUser.ReadOnly(), jc.IsTrue) +} + +func (s *ModelUserSuite) TestCaseUserNameVsId(c *gc.C) { + model, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + + user, err := s.State.AddModelUser(state.ModelUserSpec{ + User: names.NewUserTag("Bob@RandomProvider"), + CreatedBy: model.Owner()}) + c.Assert(err, gc.IsNil) + c.Assert(user.UserName(), gc.Equals, "Bob@RandomProvider") + c.Assert(user.ID(), gc.Equals, state.DocID(s.State, "bob@randomprovider")) +} + +func (s *ModelUserSuite) TestCaseSensitiveModelUserErrors(c *gc.C) { + model, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + s.Factory.MakeModelUser(c, &factory.ModelUserParams{User: "Bob@ubuntuone"}) + + _, err = s.State.AddModelUser(state.ModelUserSpec{ + User: names.NewUserTag("boB@ubuntuone"), + CreatedBy: model.Owner()}) + c.Assert(err, gc.ErrorMatches, `model user "boB@ubuntuone" already exists`) + c.Assert(errors.IsAlreadyExists(err), jc.IsTrue) +} + +func (s *ModelUserSuite) TestCaseInsensitiveLookupInMultiEnvirons(c *gc.C) { + assertIsolated := func(st1, st2 *state.State, usernames ...string) { + f := factory.NewFactory(st1) + expectedUser := f.MakeModelUser(c, &factory.ModelUserParams{User: usernames[0]}) + + // assert case insensitive lookup for each username + for _, username := range usernames { + userTag := names.NewUserTag(username) + obtainedUser, err := st1.ModelUser(userTag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(obtainedUser, gc.DeepEquals, expectedUser) + + _, err = st2.ModelUser(userTag) + c.Assert(errors.IsNotFound(err), jc.IsTrue) + } + } + + otherSt := s.Factory.MakeModel(c, nil) + defer otherSt.Close() + assertIsolated(s.State, otherSt, + "Bob@UbuntuOne", + "bob@ubuntuone", + "BOB@UBUNTUONE", + ) + assertIsolated(otherSt, s.State, + "Sam@UbuntuOne", + "sam@ubuntuone", + "SAM@UBUNTUONE", + ) +} + +func (s *ModelUserSuite) TestAddModelDisplayName(c *gc.C) { + modelUserDefault := s.Factory.MakeModelUser(c, nil) + c.Assert(modelUserDefault.DisplayName(), gc.Matches, "display name-[0-9]*") + + modelUser := s.Factory.MakeModelUser(c, &factory.ModelUserParams{DisplayName: "Override user display name"}) + c.Assert(modelUser.DisplayName(), gc.Equals, "Override user display name") +} + +func (s *ModelUserSuite) TestAddModelNoUserFails(c *gc.C) { + createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) + _, err := s.State.AddModelUser(state.ModelUserSpec{ + User: names.NewLocalUserTag("validusername"), + CreatedBy: createdBy.UserTag()}) + c.Assert(err, gc.ErrorMatches, `user "validusername" does not exist locally: user "validusername" not found`) +} + +func (s *ModelUserSuite) TestAddModelNoCreatedByUserFails(c *gc.C) { + user := s.Factory.MakeUser(c, &factory.UserParams{Name: "validusername"}) + _, err := s.State.AddModelUser(state.ModelUserSpec{ + User: user.UserTag(), + CreatedBy: names.NewLocalUserTag("createdby")}) + c.Assert(err, gc.ErrorMatches, `createdBy user "createdby" does not exist locally: user "createdby" not found`) +} + +func (s *ModelUserSuite) TestRemoveModelUser(c *gc.C) { + user := s.Factory.MakeUser(c, &factory.UserParams{Name: "validUsername"}) + _, err := s.State.ModelUser(user.UserTag()) + c.Assert(err, jc.ErrorIsNil) + + err = s.State.RemoveModelUser(user.UserTag()) + c.Assert(err, jc.ErrorIsNil) + + _, err = s.State.ModelUser(user.UserTag()) + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} + +func (s *ModelUserSuite) TestRemoveModelUserFails(c *gc.C) { + user := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) + err := s.State.RemoveModelUser(user.UserTag()) + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} + +func (s *ModelUserSuite) TestUpdateLastConnection(c *gc.C) { + now := state.NowToTheSecond() + createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) + user := s.Factory.MakeUser(c, &factory.UserParams{Name: "validusername", Creator: createdBy.Tag()}) + modelUser, err := s.State.ModelUser(user.UserTag()) + c.Assert(err, jc.ErrorIsNil) + err = modelUser.UpdateLastConnection() + c.Assert(err, jc.ErrorIsNil) + when, err := modelUser.LastConnection() + c.Assert(err, jc.ErrorIsNil) + // It is possible that the update is done over a second boundary, so we need + // to check for after now as well as equal. + c.Assert(when.After(now) || when.Equal(now), jc.IsTrue) +} + +func (s *ModelUserSuite) TestUpdateLastConnectionTwoModelUsers(c *gc.C) { + now := state.NowToTheSecond() + + // Create a user and add them to the inital model. + createdBy := s.Factory.MakeUser(c, &factory.UserParams{Name: "createdby"}) + user := s.Factory.MakeUser(c, &factory.UserParams{Name: "validusername", Creator: createdBy.Tag()}) + modelUser, err := s.State.ModelUser(user.UserTag()) + c.Assert(err, jc.ErrorIsNil) + + // Create a second model and add the same user to this. + st2 := s.Factory.MakeModel(c, nil) + defer st2.Close() + modelUser2, err := st2.AddModelUser(state.ModelUserSpec{ + User: user.UserTag(), + CreatedBy: createdBy.UserTag()}) + c.Assert(err, jc.ErrorIsNil) + + // Now we have two model users with the same username. Ensure we get + // separate last connections. + + // Connect modelUser and get last connection. + err = modelUser.UpdateLastConnection() + c.Assert(err, jc.ErrorIsNil) + when, err := modelUser.LastConnection() + c.Assert(err, jc.ErrorIsNil) + c.Assert(when.After(now) || when.Equal(now), jc.IsTrue) + + // Try to get last connection for modelUser2. As they have never connected, + // we expect to get an error. + _, err = modelUser2.LastConnection() + c.Assert(err, gc.ErrorMatches, `never connected: "validusername@local"`) + + // Connect modelUser2 and get last connection. + err = modelUser2.UpdateLastConnection() + c.Assert(err, jc.ErrorIsNil) + when, err = modelUser2.LastConnection() + c.Assert(err, jc.ErrorIsNil) + c.Assert(when.After(now) || when.Equal(now), jc.IsTrue) +} + +func (s *ModelUserSuite) TestModelsForUserNone(c *gc.C) { + tag := names.NewUserTag("non-existent@remote") + models, err := s.State.ModelsForUser(tag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(models, gc.HasLen, 0) +} + +func (s *ModelUserSuite) TestModelsForUserNewLocalUser(c *gc.C) { + user := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) + models, err := s.State.ModelsForUser(user.UserTag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(models, gc.HasLen, 0) +} + +func (s *ModelUserSuite) TestModelsForUser(c *gc.C) { + user := s.Factory.MakeUser(c, nil) + models, err := s.State.ModelsForUser(user.UserTag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(models, gc.HasLen, 1) + c.Assert(models[0].UUID(), gc.Equals, s.State.ModelUUID()) + when, err := models[0].LastConnection() + c.Assert(err, jc.Satisfies, state.IsNeverConnectedError) + c.Assert(when.IsZero(), jc.IsTrue) +} + +func (s *ModelUserSuite) newEnvWithOwner(c *gc.C, name string, owner names.UserTag) *state.Model { + // Don't use the factory to call MakeModel because it may at some + // time in the future be modified to do additional things. Instead call + // the state method directly to create an model to make sure that + // the owner is able to access the model. + uuid, err := utils.NewUUID() + c.Assert(err, jc.ErrorIsNil) + cfg := testing.CustomModelConfig(c, testing.Attrs{ + "name": name, + "uuid": uuid.String(), + }) + model, st, err := s.State.NewModel(cfg, owner) + c.Assert(err, jc.ErrorIsNil) + defer st.Close() + return model +} + +func (s *ModelUserSuite) TestModelsForUserEnvOwner(c *gc.C) { + owner := names.NewUserTag("external@remote") + model := s.newEnvWithOwner(c, "test-model", owner) + + models, err := s.State.ModelsForUser(owner) + c.Assert(err, jc.ErrorIsNil) + c.Assert(models, gc.HasLen, 1) + s.checkSameModel(c, models[0].Model, model) +} + +func (s *ModelUserSuite) checkSameModel(c *gc.C, env1, env2 *state.Model) { + c.Check(env1.Name(), gc.Equals, env2.Name()) + c.Check(env1.UUID(), gc.Equals, env2.UUID()) +} + +func (s *ModelUserSuite) newEnvWithUser(c *gc.C, name string, user names.UserTag) *state.Model { + envState := s.Factory.MakeModel(c, &factory.ModelParams{Name: name}) + defer envState.Close() + newEnv, err := envState.Model() + c.Assert(err, jc.ErrorIsNil) + + _, err = envState.AddModelUser(state.ModelUserSpec{ + User: user, CreatedBy: newEnv.Owner()}) + c.Assert(err, jc.ErrorIsNil) + return newEnv +} + +func (s *ModelUserSuite) TestModelsForUserOfNewEnv(c *gc.C) { + userTag := names.NewUserTag("external@remote") + model := s.newEnvWithUser(c, "test-model", userTag) + + models, err := s.State.ModelsForUser(userTag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(models, gc.HasLen, 1) + s.checkSameModel(c, models[0].Model, model) +} + +func (s *ModelUserSuite) TestModelsForUserMultiple(c *gc.C) { + userTag := names.NewUserTag("external@remote") + expected := []*state.Model{ + s.newEnvWithUser(c, "user1", userTag), + s.newEnvWithUser(c, "user2", userTag), + s.newEnvWithUser(c, "user3", userTag), + s.newEnvWithOwner(c, "owner1", userTag), + s.newEnvWithOwner(c, "owner2", userTag), + } + sort.Sort(UUIDOrder(expected)) + + models, err := s.State.ModelsForUser(userTag) + c.Assert(err, jc.ErrorIsNil) + c.Assert(models, gc.HasLen, len(expected)) + sort.Sort(userUUIDOrder(models)) + for i := range expected { + s.checkSameModel(c, models[i].Model, expected[i]) + } +} + +func (s *ModelUserSuite) TestIsControllerAdministrator(c *gc.C) { + isAdmin, err := s.State.IsControllerAdministrator(s.Owner) + c.Assert(err, jc.ErrorIsNil) + c.Assert(isAdmin, jc.IsTrue) + + user := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) + isAdmin, err = s.State.IsControllerAdministrator(user.UserTag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(isAdmin, jc.IsFalse) + + s.Factory.MakeModelUser(c, &factory.ModelUserParams{User: user.UserTag().Canonical()}) + isAdmin, err = s.State.IsControllerAdministrator(user.UserTag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(isAdmin, jc.IsTrue) +} + +func (s *ModelUserSuite) TestIsControllerAdministratorFromOtherState(c *gc.C) { + user := s.Factory.MakeUser(c, &factory.UserParams{NoModelUser: true}) + + otherState := s.Factory.MakeModel(c, &factory.ModelParams{Owner: user.UserTag()}) + defer otherState.Close() + + isAdmin, err := otherState.IsControllerAdministrator(user.UserTag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(isAdmin, jc.IsFalse) + + isAdmin, err = otherState.IsControllerAdministrator(s.Owner) + c.Assert(err, jc.ErrorIsNil) + c.Assert(isAdmin, jc.IsTrue) +} + +// UUIDOrder is used to sort the models into a stable order +type UUIDOrder []*state.Model + +func (a UUIDOrder) Len() int { return len(a) } +func (a UUIDOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a UUIDOrder) Less(i, j int) bool { return a[i].UUID() < a[j].UUID() } + +// userUUIDOrder is used to sort the UserModels into a stable order +type userUUIDOrder []*state.UserModel + +func (a userUUIDOrder) Len() int { return len(a) } +func (a userUUIDOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a userUUIDOrder) Less(i, j int) bool { return a[i].UUID() < a[j].UUID() } === modified file 'src/github.com/juju/juju/state/mongo.go' --- src/github.com/juju/juju/state/mongo.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/mongo.go 2016-03-22 15:18:22 +0000 @@ -4,9 +4,13 @@ package state import ( + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/replicaset" jujutxn "github.com/juju/txn" "github.com/juju/juju/mongo" + "github.com/juju/juju/network" ) // environMongo implements state/lease.Mongo to expose environ-filtered mongo @@ -24,3 +28,84 @@ func (m *environMongo) RunTransaction(buildTxn jujutxn.TransactionSource) error { return m.state.run(buildTxn) } + +// Mongo Upgrade + +// HAMember holds information that identifies one member +// of HA. +type HAMember struct { + Tag string + PublicAddress network.Address + Series string +} + +// UpgradeMongoParams holds information that identifies +// the machines part of HA. +type UpgradeMongoParams struct { + RsMembers []replicaset.Member + + Master HAMember + Members []HAMember +} + +// SetUpgradeMongoMode writes a value in the state server to be picked up +// by api servers to know that there is an upgrade ready to happen. +func (st *State) SetUpgradeMongoMode(v mongo.Version) (UpgradeMongoParams, error) { + currentInfo, err := st.ControllerInfo() + if err != nil { + return UpgradeMongoParams{}, errors.Annotate(err, "could not obtain current controller information") + } + result := UpgradeMongoParams{} + machines := []*Machine{} + for _, mID := range currentInfo.VotingMachineIds { + m, err := st.Machine(mID) + if err != nil { + return UpgradeMongoParams{}, errors.Annotate(err, "cannot change all the replicas") + } + isMaster, err := mongo.IsMaster(st.session, m) + if err != nil { + return UpgradeMongoParams{}, errors.Annotatef(err, "cannot determine if machine %q is master", mID) + } + paddr, err := m.PublicAddress() + if err != nil { + return UpgradeMongoParams{}, errors.Annotatef(err, "cannot obtain public address for machine: %v", m) + } + tag := m.Tag() + mtag := tag.(names.MachineTag) + member := HAMember{ + Tag: mtag.Id(), + PublicAddress: paddr, + Series: m.Series(), + } + if isMaster { + result.Master = member + } else { + result.Members = append(result.Members, member) + } + machines = append(machines, m) + } + rsMembers, err := replicaset.CurrentMembers(st.session) + if err != nil { + return UpgradeMongoParams{}, errors.Annotate(err, "cannot obtain current replicaset members") + } + masterRs, err := replicaset.MasterHostPort(st.session) + if err != nil { + return UpgradeMongoParams{}, errors.Annotate(err, "cannot determine master on replicaset members") + } + for _, m := range rsMembers { + if m.Address != masterRs { + result.RsMembers = append(result.RsMembers, m) + } + } + for _, m := range machines { + if err := m.SetStopMongoUntilVersion(v); err != nil { + return UpgradeMongoParams{}, errors.Annotate(err, "cannot trigger replica shutdown") + } + } + return result, nil +} + +// ResumeReplication will add all passed members to replicaset. +func (st *State) ResumeReplication(members []replicaset.Member) error { + return replicaset.Add(st.session, members...) +} === added file 'src/github.com/juju/juju/state/multienv.go' --- src/github.com/juju/juju/state/multienv.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/state/multienv.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,151 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state + +import ( + "strings" + + "github.com/juju/errors" + "gopkg.in/mgo.v2/bson" +) + +// This file contains utility functions related to documents and +// collections that contain data for multiple models. + +// ensureModelUUID returns an model UUID prefixed document ID. The +// prefix is only added if it isn't already there. +func ensureModelUUID(modelUUID, id string) string { + prefix := modelUUID + ":" + if strings.HasPrefix(id, prefix) { + return id + } + return prefix + id +} + +// ensureModelUUIDIfString will call ensureModelUUID, but only if the id +// is a string. The id will be left untouched otherwise. +func ensureModelUUIDIfString(modelUUID string, id interface{}) interface{} { + if id, ok := id.(string); ok { + return ensureModelUUID(modelUUID, id) + } + return id +} + +// splitDocID returns the 2 parts of model UUID prefixed +// document ID. If the id is not in the expected format the final +// return value will be false. +func splitDocID(id string) (string, string, bool) { + parts := strings.SplitN(id, ":", 2) + if len(parts) != 2 { + return "", "", false + } + return parts[0], parts[1], true +} + +const modelUUIDRequired = 1 +const noModelUUIDInInput = 2 + +// mungeDocForMultiEnv takes the value of an txn.Op Insert or $set +// Update and modifies it to be multi-model safe, returning the +// modified document. +func mungeDocForMultiEnv(doc interface{}, modelUUID string, modelUUIDFlags int) (bson.D, error) { + var bDoc bson.D + var err error + if doc != nil { + bDoc, err = toBsonD(doc) + if err != nil { + return nil, errors.Trace(err) + } + } + + modelUUIDSeen := false + for i, elem := range bDoc { + switch elem.Name { + case "_id": + if id, ok := elem.Value.(string); ok { + bDoc[i].Value = ensureModelUUID(modelUUID, id) + } else if subquery, ok := elem.Value.(bson.D); ok { + munged, err := mungeIDSubQueryForMultiEnv(subquery, modelUUID) + if err != nil { + return nil, errors.Trace(err) + } + bDoc[i].Value = munged + } + case "model-uuid": + if modelUUIDFlags&noModelUUIDInInput > 0 { + return nil, errors.New("model-uuid is added automatically and should not be provided") + } + modelUUIDSeen = true + if elem.Value == "" { + bDoc[i].Value = modelUUID + } else if elem.Value != modelUUID { + return nil, errors.Errorf(`bad "model-uuid" value: expected %s, got %s`, modelUUID, elem.Value) + } + } + } + if modelUUIDFlags&modelUUIDRequired > 0 && !modelUUIDSeen { + bDoc = append(bDoc, bson.DocElem{"model-uuid", modelUUID}) + } + return bDoc, nil +} + +func mungeIDSubQueryForMultiEnv(doc interface{}, modelUUID string) (bson.D, error) { + var bDoc bson.D + var err error + if doc != nil { + bDoc, err = toBsonD(doc) + if err != nil { + return nil, errors.Trace(err) + } + } + + for i, elem := range bDoc { + switch elem.Name { + case "$in": + var ids []string + switch values := elem.Value.(type) { + case []string: + ids = values + case []interface{}: + for _, value := range values { + id, ok := value.(string) + if !ok { + continue + } + ids = append(ids, id) + } + if len(ids) != len(values) { + // We expect the type to be consistently string, so... + continue + } + default: + continue + } + + var fullIDs []string + for _, id := range ids { + fullID := ensureModelUUID(modelUUID, id) + fullIDs = append(fullIDs, fullID) + } + bDoc[i].Value = fullIDs + } + } + return bDoc, nil +} + +// toBsonD converts an arbitrary value to a bson.D via marshaling +// through BSON. This is still done even if the input is already a +// bson.D so that we end up with a copy of the input. +func toBsonD(doc interface{}) (bson.D, error) { + bytes, err := bson.Marshal(doc) + if err != nil { + return nil, errors.Annotate(err, "bson marshaling failed") + } + var out bson.D + err = bson.Unmarshal(bytes, &out) + if err != nil { + return nil, errors.Annotate(err, "bson unmarshaling failed") + } + return out, nil +} === modified file 'src/github.com/juju/juju/state/multiwatcher.go' --- src/github.com/juju/juju/state/multiwatcher.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/multiwatcher.go 2016-03-22 15:18:22 +0000 @@ -336,7 +336,7 @@ } // newStore returns an Store instance holding information about the -// current state of all entities in the environment. +// current state of all entities in the model. // It is only exposed here for testing purposes. func newStore() *multiwatcherStore { return &multiwatcherStore{ === modified file 'src/github.com/juju/juju/state/multiwatcher/multiwatcher.go' --- src/github.com/juju/juju/state/multiwatcher/multiwatcher.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/multiwatcher/multiwatcher.go 2016-03-22 15:18:22 +0000 @@ -9,7 +9,7 @@ "fmt" "time" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/constraints" "github.com/juju/juju/instance" @@ -34,12 +34,12 @@ // EntityId uniquely identifies an entity being tracked by the // multiwatcherStore. type EntityId struct { - Kind string - EnvUUID string - Id string + Kind string + ModelUUID string + Id string } -// Delta holds details of a change to the environment. +// Delta holds details of a change to the model. type Delta struct { // If Removed is true, the entity has been removed; // otherwise it has been created or changed. @@ -90,8 +90,8 @@ return fmt.Errorf("Unexpected operation %q", operation) } switch entityKind { - case "environment": - d.Entity = new(EnvironmentInfo) + case "model": + d.Entity = new(ModelInfo) case "machine": d.Entity = new(MachineInfo) case "service": @@ -112,23 +112,10 @@ return json.Unmarshal(elements[2], &d.Entity) } -// When remote units leave scope, their ids will be noted in the -// Departed field, and no further events will be sent for those units. -type RelationUnitsChange struct { - Changed map[string]UnitSettings - Departed []string -} - -// UnitSettings holds information about a service unit's settings -// within a relation. -type UnitSettings struct { - Version int64 -} - // MachineInfo holds the information about a machine // that is tracked by multiwatcherStore. type MachineInfo struct { - EnvUUID string + ModelUUID string Id string InstanceId string Status Status @@ -146,12 +133,12 @@ } // EntityId returns a unique identifier for a machine across -// environments. +// models. func (i *MachineInfo) EntityId() EntityId { return EntityId{ - Kind: "machine", - EnvUUID: i.EnvUUID, - Id: i.Id, + Kind: "machine", + ModelUUID: i.ModelUUID, + Id: i.Id, } } @@ -169,7 +156,7 @@ // ServiceInfo holds the information about a service that is tracked // by multiwatcherStore. type ServiceInfo struct { - EnvUUID string + ModelUUID string Name string Exposed bool CharmURL string @@ -183,19 +170,19 @@ } // EntityId returns a unique identifier for a service across -// environments. +// models. func (i *ServiceInfo) EntityId() EntityId { return EntityId{ - Kind: "service", - EnvUUID: i.EnvUUID, - Id: i.Name, + Kind: "service", + ModelUUID: i.ModelUUID, + Id: i.Name, } } // UnitInfo holds the information about a unit // that is tracked by multiwatcherStore. type UnitInfo struct { - EnvUUID string + ModelUUID string Name string Service string Series string @@ -216,19 +203,19 @@ } // EntityId returns a unique identifier for a unit across -// environments. +// models. func (i *UnitInfo) EntityId() EntityId { return EntityId{ - Kind: "unit", - EnvUUID: i.EnvUUID, - Id: i.Name, + Kind: "unit", + ModelUUID: i.ModelUUID, + Id: i.Name, } } // ActionInfo holds the information about a action that is tracked by // multiwatcherStore. type ActionInfo struct { - EnvUUID string + ModelUUID string Id string Receiver string Name string @@ -242,19 +229,19 @@ } // EntityId returns a unique identifier for an action across -// environments. +// models. func (i *ActionInfo) EntityId() EntityId { return EntityId{ - Kind: "action", - EnvUUID: i.EnvUUID, - Id: i.Id, + Kind: "action", + ModelUUID: i.ModelUUID, + Id: i.Id, } } // RelationInfo holds the information about a relation that is tracked // by multiwatcherStore. type RelationInfo struct { - EnvUUID string + ModelUUID string Key string Id int Endpoints []Endpoint @@ -267,30 +254,30 @@ } // EntityId returns a unique identifier for a relation across -// environments. +// models. func (i *RelationInfo) EntityId() EntityId { return EntityId{ - Kind: "relation", - EnvUUID: i.EnvUUID, - Id: i.Key, + Kind: "relation", + ModelUUID: i.ModelUUID, + Id: i.Key, } } // AnnotationInfo holds the information about an annotation that is // tracked by multiwatcherStore. type AnnotationInfo struct { - EnvUUID string + ModelUUID string Tag string Annotations map[string]string } // EntityId returns a unique identifier for an annotation across -// environments. +// models. func (i *AnnotationInfo) EntityId() EntityId { return EntityId{ - Kind: "annotation", - EnvUUID: i.EnvUUID, - Id: i.Tag, + Kind: "annotation", + ModelUUID: i.ModelUUID, + Id: i.Tag, } } @@ -300,16 +287,13 @@ const ( JobHostUnits MachineJob = "JobHostUnits" - JobManageEnviron MachineJob = "JobManageEnviron" + JobManageModel MachineJob = "JobManageModel" JobManageNetworking MachineJob = "JobManageNetworking" - - // Deprecated in 1.18 - JobManageStateDeprecated MachineJob = "JobManageState" ) // NeedsState returns true if the job requires a state connection. func (job MachineJob) NeedsState() bool { - return job == JobManageEnviron + return job == JobManageModel } // AnyJobNeedsState returns true if any of the provided jobs @@ -326,24 +310,24 @@ // BlockInfo holds the information about a block that is tracked by // multiwatcherStore. type BlockInfo struct { - EnvUUID string - Id string - Type BlockType - Message string - Tag string + ModelUUID string + Id string + Type BlockType + Message string + Tag string } // EntityId returns a unique identifier for a block across -// environments. +// models. func (i *BlockInfo) EntityId() EntityId { return EntityId{ - Kind: "block", - EnvUUID: i.EnvUUID, - Id: i.Id, + Kind: "block", + ModelUUID: i.ModelUUID, + Id: i.Id, } } -// BlockType values define environment block type. +// BlockType values define model block type. type BlockType string const ( @@ -357,21 +341,21 @@ BlockChange BlockType = "BlockChange" ) -// EnvironmentInfo holds the information about an environment that is +// ModelInfo holds the information about an model that is // tracked by multiwatcherStore. -type EnvironmentInfo struct { - EnvUUID string +type ModelInfo struct { + ModelUUID string Name string Life Life Owner string ServerUUID string } -// EntityId returns a unique identifier for an environment. -func (i *EnvironmentInfo) EntityId() EntityId { +// EntityId returns a unique identifier for an model. +func (i *ModelInfo) EntityId() EntityId { return EntityId{ - Kind: "environment", - EnvUUID: i.EnvUUID, - Id: i.EnvUUID, + Kind: "model", + ModelUUID: i.ModelUUID, + Id: i.ModelUUID, } } === modified file 'src/github.com/juju/juju/state/multiwatcher/multiwatcher_internal_test.go' --- src/github.com/juju/juju/state/multiwatcher/multiwatcher_internal_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/multiwatcher/multiwatcher_internal_test.go 2016-03-22 15:18:22 +0000 @@ -18,7 +18,7 @@ _ EntityInfo = (*AnnotationInfo)(nil) _ EntityInfo = (*BlockInfo)(nil) _ EntityInfo = (*ActionInfo)(nil) - _ EntityInfo = (*EnvironmentInfo)(nil) + _ EntityInfo = (*ModelInfo)(nil) ) type ConstantsSuite struct{} @@ -33,7 +33,6 @@ c.Assert(AnyJobNeedsState(), jc.IsFalse) c.Assert(AnyJobNeedsState(JobHostUnits), jc.IsFalse) c.Assert(AnyJobNeedsState(JobManageNetworking), jc.IsFalse) - c.Assert(AnyJobNeedsState(JobManageStateDeprecated), jc.IsFalse) - c.Assert(AnyJobNeedsState(JobManageEnviron), jc.IsTrue) - c.Assert(AnyJobNeedsState(JobHostUnits, JobManageEnviron), jc.IsTrue) + c.Assert(AnyJobNeedsState(JobManageModel), jc.IsTrue) + c.Assert(AnyJobNeedsState(JobHostUnits, JobManageModel), jc.IsTrue) } === modified file 'src/github.com/juju/juju/state/multiwatcher_internal_test.go' --- src/github.com/juju/juju/state/multiwatcher_internal_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/multiwatcher_internal_test.go 2016-03-22 15:18:22 +0000 @@ -93,8 +93,8 @@ }, { about: "mark removed on existing entry", change: func(all *multiwatcherStore) { - all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}) - all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "1"}) + all.Update(&multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"}) + all.Update(&multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "1"}) StoreIncRef(all, multiwatcher.EntityId{"machine", "uuid", "0"}) all.Remove(multiwatcher.EntityId{"machine", "uuid", "0"}) }, @@ -102,13 +102,13 @@ expectContents: []entityEntry{{ creationRevno: 2, revno: 2, - info: &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "1"}, + info: &multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "1"}, }, { creationRevno: 1, revno: 3, refCount: 1, removed: true, - info: &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}, + info: &multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"}, }}, }, { about: "mark removed on nonexistent entry", @@ -118,12 +118,12 @@ }, { about: "mark removed on already marked entry", change: func(all *multiwatcherStore) { - all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}) - all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "1"}) + all.Update(&multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"}) + all.Update(&multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "1"}) StoreIncRef(all, multiwatcher.EntityId{"machine", "uuid", "0"}) all.Remove(multiwatcher.EntityId{"machine", "uuid", "0"}) all.Update(&multiwatcher.MachineInfo{ - EnvUUID: "uuid", + ModelUUID: "uuid", Id: "1", InstanceId: "i-1", }) @@ -135,12 +135,12 @@ revno: 3, refCount: 1, removed: true, - info: &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}, + info: &multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"}, }, { creationRevno: 2, revno: 4, info: &multiwatcher.MachineInfo{ - EnvUUID: "uuid", + ModelUUID: "uuid", Id: "1", InstanceId: "i-1", }, @@ -148,14 +148,14 @@ }, { about: "mark removed on entry with zero ref count", change: func(all *multiwatcherStore) { - all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}) + all.Update(&multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"}) all.Remove(multiwatcher.EntityId{"machine", "uuid", "0"}) }, expectRevno: 2, }, { about: "delete entry", change: func(all *multiwatcherStore) { - all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}) + all.Update(&multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"}) all.delete(multiwatcher.EntityId{"machine", "uuid", "0"}) }, expectRevno: 1, @@ -206,8 +206,8 @@ var deltas []multiwatcher.Delta for i := 0; i < 3; i++ { m := &multiwatcher.MachineInfo{ - EnvUUID: "uuid", - Id: fmt.Sprint(i), + ModelUUID: "uuid", + Id: fmt.Sprint(i), } a.Update(m) deltas = append(deltas, multiwatcher.Delta{Entity: m}) @@ -225,7 +225,7 @@ // Update one machine and check we see the changes. rev := a.latestRevno m1 := &multiwatcher.MachineInfo{ - EnvUUID: "uuid", + ModelUUID: "uuid", Id: "1", InstanceId: "foo", } @@ -237,14 +237,14 @@ StoreIncRef(a, multiwatcher.EntityId{"machine", "uuid", "0"}) // Remove another machine and check we see it's removed. - m0 := &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"} + m0 := &multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"} a.Remove(m0.EntityId()) // Check that something that never saw m0 does not get // informed of its removal (even those the removed entity // is still in the list. c.Assert(a.ChangesSince(0), gc.DeepEquals, []multiwatcher.Delta{{ - Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "2"}, + Entity: &multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "2"}, }, { Entity: m1, }}) @@ -264,7 +264,7 @@ func (s *storeSuite) TestGet(c *gc.C) { a := newStore() - m := &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"} + m := &multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"} a.Update(m) c.Assert(a.Get(m.EntityId()), gc.Equals, m) @@ -335,7 +335,7 @@ // If the Multiwatcher hasn't seen the item, then we shouldn't // decrement its ref count when it is stopped. sm := newStoreManager(newTestBacking(nil)) - mi := &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"} + mi := &multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"} sm.all.Update(mi) StoreIncRef(sm.all, multiwatcher.EntityId{"machine", "uuid", "0"}) w := &Multiwatcher{all: sm} @@ -355,7 +355,7 @@ // we shouldn't decrement its ref count when it is stopped. sm := newStoreManager(newTestBacking(nil)) - mi := &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"} + mi := &multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"} sm.all.Update(mi) id := multiwatcher.EntityId{"machine", "uuid", "0"} @@ -378,7 +378,7 @@ // If the Multiwatcher has already seen the item removed, then // we should decrement its ref count when it is stopped. sm := newStoreManager(newTestBacking(nil)) - mi := &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"} + mi := &multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"} sm.all.Update(mi) StoreIncRef(sm.all, multiwatcher.EntityId{"machine", "uuid", "0"}) w := &Multiwatcher{all: sm} @@ -396,7 +396,7 @@ // If the Multiwatcher hasn't seen the item at all, it should // leave the ref count untouched. sm := newStoreManager(newTestBacking(nil)) - mi := &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"} + mi := &multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"} sm.all.Update(mi) StoreIncRef(sm.all, multiwatcher.EntityId{"machine", "uuid", "0"}) w := &Multiwatcher{all: sm} @@ -412,20 +412,20 @@ var respondTestChanges = [...]func(all *multiwatcherStore){ func(all *multiwatcherStore) { - all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}) - }, - func(all *multiwatcherStore) { - all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "1"}) - }, - func(all *multiwatcherStore) { - all.Update(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "2"}) + all.Update(&multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"}) + }, + func(all *multiwatcherStore) { + all.Update(&multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "1"}) + }, + func(all *multiwatcherStore) { + all.Update(&multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "2"}) }, func(all *multiwatcherStore) { all.Remove(multiwatcher.EntityId{"machine", "uuid", "0"}) }, func(all *multiwatcherStore) { all.Update(&multiwatcher.MachineInfo{ - EnvUUID: "uuid", + ModelUUID: "uuid", Id: "1", InstanceId: "i-1", }) @@ -440,8 +440,8 @@ creationRevno: 3, revno: 3, info: &multiwatcher.MachineInfo{ - EnvUUID: "uuid", - Id: "2", + ModelUUID: "uuid", + Id: "2", }, }} respondTestFinalRevno = int64(len(respondTestChanges)) @@ -623,9 +623,9 @@ func (*storeManagerSuite) TestRun(c *gc.C) { b := newTestBacking([]multiwatcher.EntityInfo{ - &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}, - &multiwatcher.ServiceInfo{EnvUUID: "uuid", Name: "logging"}, - &multiwatcher.ServiceInfo{EnvUUID: "uuid", Name: "wordpress"}, + &multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"}, + &multiwatcher.ServiceInfo{ModelUUID: "uuid", Name: "logging"}, + &multiwatcher.ServiceInfo{ModelUUID: "uuid", Name: "wordpress"}, }) sm := newStoreManager(b) defer func() { @@ -633,29 +633,29 @@ }() w := &Multiwatcher{all: sm} checkNext(c, w, []multiwatcher.Delta{ - {Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}}, - {Entity: &multiwatcher.ServiceInfo{EnvUUID: "uuid", Name: "logging"}}, - {Entity: &multiwatcher.ServiceInfo{EnvUUID: "uuid", Name: "wordpress"}}, + {Entity: &multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"}}, + {Entity: &multiwatcher.ServiceInfo{ModelUUID: "uuid", Name: "logging"}}, + {Entity: &multiwatcher.ServiceInfo{ModelUUID: "uuid", Name: "wordpress"}}, }, "") - b.updateEntity(&multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0", InstanceId: "i-0"}) + b.updateEntity(&multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0", InstanceId: "i-0"}) checkNext(c, w, []multiwatcher.Delta{ - {Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0", InstanceId: "i-0"}}, + {Entity: &multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0", InstanceId: "i-0"}}, }, "") b.deleteEntity(multiwatcher.EntityId{"machine", "uuid", "0"}) checkNext(c, w, []multiwatcher.Delta{ - {Removed: true, Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid", Id: "0"}}, + {Removed: true, Entity: &multiwatcher.MachineInfo{ModelUUID: "uuid", Id: "0"}}, }, "") } func (*storeManagerSuite) TestMultipleEnvironments(c *gc.C) { b := newTestBacking([]multiwatcher.EntityInfo{ - &multiwatcher.MachineInfo{EnvUUID: "uuid0", Id: "0"}, - &multiwatcher.ServiceInfo{EnvUUID: "uuid0", Name: "logging"}, - &multiwatcher.ServiceInfo{EnvUUID: "uuid0", Name: "wordpress"}, - &multiwatcher.MachineInfo{EnvUUID: "uuid1", Id: "0"}, - &multiwatcher.ServiceInfo{EnvUUID: "uuid1", Name: "logging"}, - &multiwatcher.ServiceInfo{EnvUUID: "uuid1", Name: "wordpress"}, - &multiwatcher.MachineInfo{EnvUUID: "uuid2", Id: "0"}, + &multiwatcher.MachineInfo{ModelUUID: "uuid0", Id: "0"}, + &multiwatcher.ServiceInfo{ModelUUID: "uuid0", Name: "logging"}, + &multiwatcher.ServiceInfo{ModelUUID: "uuid0", Name: "wordpress"}, + &multiwatcher.MachineInfo{ModelUUID: "uuid1", Id: "0"}, + &multiwatcher.ServiceInfo{ModelUUID: "uuid1", Name: "logging"}, + &multiwatcher.ServiceInfo{ModelUUID: "uuid1", Name: "wordpress"}, + &multiwatcher.MachineInfo{ModelUUID: "uuid2", Id: "0"}, }) sm := newStoreManager(b) defer func() { @@ -663,25 +663,25 @@ }() w := &Multiwatcher{all: sm} checkNext(c, w, []multiwatcher.Delta{ - {Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid0", Id: "0"}}, - {Entity: &multiwatcher.ServiceInfo{EnvUUID: "uuid0", Name: "logging"}}, - {Entity: &multiwatcher.ServiceInfo{EnvUUID: "uuid0", Name: "wordpress"}}, - {Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid1", Id: "0"}}, - {Entity: &multiwatcher.ServiceInfo{EnvUUID: "uuid1", Name: "logging"}}, - {Entity: &multiwatcher.ServiceInfo{EnvUUID: "uuid1", Name: "wordpress"}}, - {Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid2", Id: "0"}}, + {Entity: &multiwatcher.MachineInfo{ModelUUID: "uuid0", Id: "0"}}, + {Entity: &multiwatcher.ServiceInfo{ModelUUID: "uuid0", Name: "logging"}}, + {Entity: &multiwatcher.ServiceInfo{ModelUUID: "uuid0", Name: "wordpress"}}, + {Entity: &multiwatcher.MachineInfo{ModelUUID: "uuid1", Id: "0"}}, + {Entity: &multiwatcher.ServiceInfo{ModelUUID: "uuid1", Name: "logging"}}, + {Entity: &multiwatcher.ServiceInfo{ModelUUID: "uuid1", Name: "wordpress"}}, + {Entity: &multiwatcher.MachineInfo{ModelUUID: "uuid2", Id: "0"}}, }, "") - b.updateEntity(&multiwatcher.MachineInfo{EnvUUID: "uuid1", Id: "0", InstanceId: "i-0"}) + b.updateEntity(&multiwatcher.MachineInfo{ModelUUID: "uuid1", Id: "0", InstanceId: "i-0"}) checkNext(c, w, []multiwatcher.Delta{ - {Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid1", Id: "0", InstanceId: "i-0"}}, + {Entity: &multiwatcher.MachineInfo{ModelUUID: "uuid1", Id: "0", InstanceId: "i-0"}}, }, "") b.deleteEntity(multiwatcher.EntityId{"machine", "uuid2", "0"}) checkNext(c, w, []multiwatcher.Delta{ - {Removed: true, Entity: &multiwatcher.MachineInfo{EnvUUID: "uuid2", Id: "0"}}, + {Removed: true, Entity: &multiwatcher.MachineInfo{ModelUUID: "uuid2", Id: "0"}}, }, "") - b.updateEntity(&multiwatcher.ServiceInfo{EnvUUID: "uuid0", Name: "logging", Exposed: true}) + b.updateEntity(&multiwatcher.ServiceInfo{ModelUUID: "uuid0", Name: "logging", Exposed: true}) checkNext(c, w, []multiwatcher.Delta{ - {Entity: &multiwatcher.ServiceInfo{EnvUUID: "uuid0", Name: "logging", Exposed: true}}, + {Entity: &multiwatcher.ServiceInfo{ModelUUID: "uuid0", Name: "logging", Exposed: true}}, }, "") } @@ -823,14 +823,14 @@ } func (b *storeManagerTestBacking) Changed(all *multiwatcherStore, change watcher.Change) error { - envUUID, changeId, ok := splitDocID(change.Id.(string)) + modelUUID, changeId, ok := splitDocID(change.Id.(string)) if !ok { return errors.Errorf("unexpected id format: %v", change.Id) } id := multiwatcher.EntityId{ - Kind: change.C, - EnvUUID: envUUID, - Id: changeId, + Kind: change.C, + ModelUUID: modelUUID, + Id: changeId, } info, err := b.fetch(id) if err == mgo.ErrNotFound { @@ -896,7 +896,7 @@ if b.watchc != nil { b.watchc <- watcher.Change{ C: id.Kind, - Id: ensureEnvUUID(id.EnvUUID, id.Id), + Id: ensureModelUUID(id.ModelUUID, id.Id), Revno: b.txnRevno, // This is actually ignored, but fill it in anyway. } } @@ -916,7 +916,7 @@ if b.watchc != nil { b.watchc <- watcher.Change{ C: id.Kind, - Id: ensureEnvUUID(id.EnvUUID, id.Id), + Id: ensureModelUUID(id.ModelUUID, id.Id), Revno: -1, } } === modified file 'src/github.com/juju/juju/state/networkinterfaces.go' --- src/github.com/juju/juju/state/networkinterfaces.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/networkinterfaces.go 2016-03-22 15:18:22 +0000 @@ -48,7 +48,7 @@ // a given network. type networkInterfaceDoc struct { Id bson.ObjectId `bson:"_id"` - EnvUUID string `bson:"env-uuid"` + ModelUUID string `bson:"model-uuid"` MACAddress string `bson:"macaddress"` InterfaceName string `bson:"interfacename"` NetworkName string `bson:"networkname"` @@ -182,10 +182,10 @@ return &NetworkInterface{st, *doc} } -func newNetworkInterfaceDoc(machineID, envUUID string, args NetworkInterfaceInfo) *networkInterfaceDoc { +func newNetworkInterfaceDoc(machineID, modelUUID string, args NetworkInterfaceInfo) *networkInterfaceDoc { return &networkInterfaceDoc{ Id: bson.NewObjectId(), - EnvUUID: envUUID, + ModelUUID: modelUUID, MachineId: machineID, MACAddress: args.MACAddress, InterfaceName: args.InterfaceName, @@ -206,10 +206,10 @@ if err != nil { return err } - ops = append(ops, assertEnvAliveOp(ni.st.EnvironUUID())) + ops = append(ops, assertModelAliveOp(ni.st.ModelUUID())) err = ni.st.runTransaction(ops) if err != nil { - if err := checkEnvLife(ni.st); err != nil { + if err := checkModeLife(ni.st); err != nil { return errors.Trace(err) } return onAbort(err, errors.NotFoundf("network interface")) === modified file 'src/github.com/juju/juju/state/networks.go' --- src/github.com/juju/juju/state/networks.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/networks.go 2016-03-22 15:18:22 +0000 @@ -42,7 +42,7 @@ // Name is the network's name. It should be one of the machine's // included networks. Name string `bson:"name"` - EnvUUID string `bson:"env-uuid"` + ModelUUID string `bson:"model-uuid"` ProviderId string `bson:"providerid"` CIDR string `bson:"cidr"` VLANTag int `bson:"vlantag"` @@ -55,7 +55,7 @@ func (st *State) newNetworkDoc(args NetworkInfo) *networkDoc { return &networkDoc{ DocID: st.docID(args.Name), - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), Name: args.Name, ProviderId: string(args.ProviderId), CIDR: args.CIDR, === modified file 'src/github.com/juju/juju/state/open.go' --- src/github.com/juju/juju/state/open.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/open.go 2016-03-22 15:18:22 +0000 @@ -20,26 +20,26 @@ // Open connects to the server described by the given // info, waits for it to be initialized, and returns a new State -// representing the environment connected to. +// representing the model connected to. // // A policy may be provided, which will be used to validate and // modify behaviour of certain operations in state. A nil policy // may be provided. // // Open returns unauthorizedError if access is unauthorized. -func Open(tag names.EnvironTag, info *mongo.MongoInfo, opts mongo.DialOpts, policy Policy) (*State, error) { +func Open(tag names.ModelTag, info *mongo.MongoInfo, opts mongo.DialOpts, policy Policy) (*State, error) { st, err := open(tag, info, opts, policy) if err != nil { return nil, errors.Trace(err) } - if _, err := st.Environment(); err != nil { + if _, err := st.Model(); err != nil { if err := st.Close(); err != nil { - logger.Errorf("error closing state for unreadable environment %s: %v", tag.Id(), err) + logger.Errorf("error closing state for unreadable model %s: %v", tag.Id(), err) } - return nil, errors.Annotatef(err, "cannot read environment %s", tag.Id()) + return nil, errors.Annotatef(err, "cannot read model %s", tag.Id()) } - // State should only be Opened on behalf of a state server environ; all + // State should only be Opened on behalf of a controller environ; all // other *States should be created via ForEnviron. if err := st.start(tag); err != nil { return nil, errors.Trace(err) @@ -47,7 +47,7 @@ return st, nil } -func open(tag names.EnvironTag, info *mongo.MongoInfo, opts mongo.DialOpts, policy Policy) (*State, error) { +func open(tag names.ModelTag, info *mongo.MongoInfo, opts mongo.DialOpts, policy Policy) (*State, error) { logger.Infof("opening state, mongo addresses: %q; entity %v", info.Addrs, info.Tag) logger.Debugf("dialing mongo") session, err := mongo.DialWithInfo(info.Info, opts) @@ -64,17 +64,17 @@ logger.Debugf("mongodb login successful") // In rare circumstances, we may be upgrading from pre-1.23, and not have the - // environment UUID available. In that case we need to infer what it might be; + // model UUID available. In that case we need to infer what it might be; // we depend on the assumption that this is the only circumstance in which // the the UUID might not be known. if tag.Id() == "" { - logger.Warningf("creating state without environment tag; inferring bootstrap environment") - ssInfo, err := readRawStateServerInfo(session) + logger.Warningf("creating state without model tag; inferring bootstrap model") + ssInfo, err := readRawControllerInfo(session) if err != nil { session.Close() return nil, errors.Trace(err) } - tag = ssInfo.EnvironmentTag + tag = ssInfo.ModelTag } st, err := newState(tag, session, info, policy) @@ -101,15 +101,15 @@ } // Initialize sets up an initial empty state and returns it. -// This needs to be performed only once for the initial state server environment. +// This needs to be performed only once for the initial controller model. // It returns unauthorizedError if access is unauthorized. func Initialize(owner names.UserTag, info *mongo.MongoInfo, cfg *config.Config, opts mongo.DialOpts, policy Policy) (_ *State, err error) { uuid, ok := cfg.UUID() if !ok { - return nil, errors.Errorf("environment uuid was not supplied") + return nil, errors.Errorf("model uuid was not supplied") } - envTag := names.NewEnvironTag(uuid) - st, err := open(envTag, info, opts, policy) + modelTag := names.NewModelTag(uuid) + st, err := open(modelTag, info, opts, policy) if err != nil { return nil, errors.Trace(err) } @@ -121,18 +121,18 @@ } }() - // A valid environment is used as a signal that the + // A valid model is used as a signal that the // state has already been initalized. If this is the case // do nothing. - if _, err := st.Environment(); err == nil { + if _, err := st.Model(); err == nil { return nil, errors.New("already initialized") } else if !errors.IsNotFound(err) { return nil, errors.Trace(err) } - // When creating the state server environment, the new environment - // UUID is also used as the state server UUID. - logger.Infof("initializing state server environment %s", uuid) + // When creating the controller model, the new model + // UUID is also used as the controller UUID. + logger.Infof("initializing controller model %s", uuid) ops, err := st.envSetupOps(cfg, uuid, uuid, owner) if err != nil { return nil, errors.Trace(err) @@ -140,60 +140,60 @@ ops = append(ops, createInitialUserOp(st, owner, info.Password), txn.Op{ - C: stateServersC, - Id: environGlobalKey, + C: controllersC, + Id: modelGlobalKey, Assert: txn.DocMissing, - Insert: &stateServersDoc{ - EnvUUID: st.EnvironUUID(), + Insert: &controllersDoc{ + ModelUUID: st.ModelUUID(), }, }, txn.Op{ - C: stateServersC, + C: controllersC, Id: apiHostPortsKey, Assert: txn.DocMissing, Insert: &apiHostPortsDoc{}, }, txn.Op{ - C: stateServersC, + C: controllersC, Id: stateServingInfoKey, Assert: txn.DocMissing, Insert: &StateServingInfo{}, }, txn.Op{ - C: stateServersC, - Id: hostedEnvCountKey, + C: controllersC, + Id: hostedModelCountKey, Assert: txn.DocMissing, - Insert: &hostedEnvCountDoc{}, + Insert: &hostedModelCountDoc{}, }, ) if err := st.runTransaction(ops); err != nil { return nil, errors.Trace(err) } - if err := st.start(envTag); err != nil { + if err := st.start(modelTag); err != nil { return nil, errors.Trace(err) } return st, nil } -func (st *State) envSetupOps(cfg *config.Config, envUUID, serverUUID string, owner names.UserTag) ([]txn.Op, error) { - if err := checkEnvironConfig(cfg); err != nil { +func (st *State) envSetupOps(cfg *config.Config, modelUUID, serverUUID string, owner names.UserTag) ([]txn.Op, error) { + if err := checkModelConfig(cfg); err != nil { return nil, errors.Trace(err) } - // When creating the state server environment, the new environment - // UUID is also used as the state server UUID. + // When creating the controller model, the new model + // UUID is also used as the controller UUID. if serverUUID == "" { - serverUUID = envUUID + serverUUID = modelUUID } - envUserOp, _ := createEnvUserOpAndDoc(envUUID, owner, owner, owner.Name()) + modelUserOp := createModelUserOp(modelUUID, owner, owner, owner.Name(), nowToTheSecond(), false) ops := []txn.Op{ - createConstraintsOp(st, environGlobalKey, constraints.Value{}), - createSettingsOp(st, environGlobalKey, cfg.AllAttrs()), - incHostedEnvironCountOp(), - createEnvironmentOp(st, owner, cfg.Name(), envUUID, serverUUID), - createUniqueOwnerEnvNameOp(owner, cfg.Name()), - envUserOp, + createConstraintsOp(st, modelGlobalKey, constraints.Value{}), + createSettingsOp(modelGlobalKey, cfg.AllAttrs()), + incHostedModelCountOp(), + createModelOp(st, owner, cfg.Name(), modelUUID, serverUUID), + createUniqueOwnerModelNameOp(owner, cfg.Name()), + modelUserOp, } return ops, nil } @@ -229,12 +229,12 @@ } // newState creates an incomplete *State, with a configured watcher but no -// pwatcher, leadershipManager, or serverTag. You must start() the returned +// pwatcher, leadershipManager, or controllerTag. You must start() the returned // *State before it will function correctly. -func newState(environTag names.EnvironTag, session *mgo.Session, mongoInfo *mongo.MongoInfo, policy Policy) (_ *State, resultErr error) { +func newState(modelTag names.ModelTag, session *mgo.Session, mongoInfo *mongo.MongoInfo, policy Policy) (_ *State, resultErr error) { // Set up database. rawDB := session.DB(jujuDB) - database, err := allCollections().Load(rawDB, environTag.Id()) + database, err := allCollections().Load(rawDB, modelTag.Id()) if err != nil { return nil, errors.Trace(err) } @@ -244,12 +244,12 @@ // Create State. return &State{ - environTag: environTag, - mongoInfo: mongoInfo, - session: session, - database: database, - policy: policy, - watcher: watcher.New(rawDB.C(txnLogC)), + modelTag: modelTag, + mongoInfo: mongoInfo, + session: session, + database: database, + policy: policy, + watcher: watcher.New(rawDB.C(txnLogC)), }, nil } @@ -284,15 +284,19 @@ st.leadershipManager.Kill() handle("leadership manager", st.leadershipManager.Wait()) } + if st.singularManager != nil { + st.singularManager.Kill() + handle("singular manager", st.singularManager.Wait()) + } st.mu.Lock() if st.allManager != nil { handle("allwatcher manager", st.allManager.Stop()) } - if st.allEnvManager != nil { - handle("allenvwatcher manager", st.allEnvManager.Stop()) + if st.allModelManager != nil { + handle("allModelWatcher manager", st.allModelManager.Stop()) } - if st.allEnvWatcherBacking != nil { - handle("allenvwatcher backing", st.allEnvWatcherBacking.Release()) + if st.allModelWatcherBacking != nil { + handle("allModelWatcher backing", st.allModelWatcherBacking.Release()) } st.session.Close() st.mu.Unlock() === added file 'src/github.com/juju/juju/state/package_test.go' --- src/github.com/juju/juju/state/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/state/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package state_test + +import ( + stdtesting "testing" + + "github.com/juju/testing" + + coretesting "github.com/juju/juju/testing" +) + +func TestPackage(t *stdtesting.T) { + if testing.RaceEnabled { + t.Skip("skipping package under -race, see LP 1519095") + } + coretesting.MgoTestPackage(t) +} === modified file 'src/github.com/juju/juju/state/payloads.go' --- src/github.com/juju/juju/state/payloads.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/payloads.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,7 @@ // TODO(ericsnow) Track juju-level status in the status collection. // EnvPayloads exposes high-level interaction with all payloads -// in an environment. +// in an model. type EnvPayloads interface { // ListAll builds the list of registered payloads in the env and returns it. ListAll() ([]payload.FullPayloadInfo, error) === modified file 'src/github.com/juju/juju/state/payloads_test.go' --- src/github.com/juju/juju/state/payloads_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/state/payloads_test.go 2016-03-22 15:18:22 +0000 @@ -7,7 +7,7 @@ "github.com/juju/errors" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/component/all" "github.com/juju/juju/payload" === modified file 'src/github.com/juju/juju/state/persistence.go' --- src/github.com/juju/juju/state/persistence.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/persistence.go 2016-03-22 15:18:22 +0000 @@ -7,6 +7,9 @@ "github.com/juju/errors" jujutxn "github.com/juju/txn" "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/txn" + + "github.com/juju/juju/state/storage" ) // Persistence exposes persistence-layer functionality of State. @@ -14,12 +17,21 @@ // One populates doc with the document corresponding to the given // ID. Missing documents result in errors.NotFound. One(collName, id string, doc interface{}) error + // All populates docs with the list of the documents corresponding // to the provided query. All(collName string, query, docs interface{}) error + // Run runs the transaction generated by the provided factory // function. It may be retried several times. Run(transactions jujutxn.TransactionSource) error + + // NewStorage returns a new blob storage for the environment. + NewStorage() storage.Storage + + // IncCharmModifiedVersionOps returns the operations necessary to increment + // the CharmModifiedVersion field for the given service. + IncCharmModifiedVersionOps(serviceID string) []txn.Op } type statePersistence struct { @@ -64,3 +76,18 @@ } return nil } + +// NewStorage returns a new blob storage for the environment. +func (sp *statePersistence) NewStorage() storage.Storage { + envUUID := sp.st.ModelUUID() + // TODO(ericsnow) Copy the session? + session := sp.st.session + store := storage.NewStorage(envUUID, session) + return store +} + +// IncCharmModifiedVersionOps returns the operations necessary to increment the +// CharmModifiedVersion field for the given service. +func (sp *statePersistence) IncCharmModifiedVersionOps(serviceID string) []txn.Op { + return incCharmModifiedVersionOps(serviceID) +} === modified file 'src/github.com/juju/juju/state/policy.go' --- src/github.com/juju/juju/state/policy.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/policy.go 2016-03-22 15:18:22 +0000 @@ -11,6 +11,7 @@ "github.com/juju/juju/constraints" "github.com/juju/juju/environs/config" "github.com/juju/juju/instance" + "github.com/juju/juju/state/cloudimagemetadata" ) // Policy is an interface provided to State that may @@ -34,9 +35,9 @@ // or an error. EnvironCapability(*config.Config) (EnvironCapability, error) - // ConstraintsValidator takes a *config.Config and returns a - // constraints.Validator or an error. - ConstraintsValidator(*config.Config) (constraints.Validator, error) + // ConstraintsValidator takes a *config.Config and SupportedArchitecturesQuerier + // to return a constraints.Validator or an error. + ConstraintsValidator(*config.Config, SupportedArchitecturesQuerier) (constraints.Validator, error) // InstanceDistributor takes a *config.Config and returns an // InstanceDistributor or an error. @@ -48,7 +49,7 @@ type Prechecker interface { // PrecheckInstance performs a preflight check on the specified // series and constraints, ensuring that they are possibly valid for - // creating an instance in this environment. + // creating an instance in this model. // // PrecheckInstance is best effort, and not guaranteed to eliminate // all invalid parameters. If PrecheckInstance returns nil, it is not @@ -64,14 +65,14 @@ } // EnvironCapability implements access to metadata about the capabilities -// of an environment. +// of an model. type EnvironCapability interface { // SupportedArchitectures returns the image architectures which can - // be hosted by this environment. + // be hosted by this model. SupportedArchitectures() ([]string, error) // SupportsUnitAssignment returns an error which, if non-nil, indicates - // that the environment does not support unit placement. If the environment + // that the model does not support unit placement. If the model // does not support unit placement, then machines may not be created // without units, and units cannot be placed explcitly. SupportsUnitPlacement() error @@ -83,7 +84,7 @@ if st.policy == nil { return nil } - cfg, err := st.EnvironConfig() + cfg, err := st.ModelConfig() if err != nil { return err } @@ -101,16 +102,19 @@ func (st *State) constraintsValidator() (constraints.Validator, error) { // Default behaviour is to simply use a standard validator with - // no environment specific behaviour built in. + // no model specific behaviour built in. defaultValidator := constraints.NewValidator() if st.policy == nil { return defaultValidator, nil } - cfg, err := st.EnvironConfig() + cfg, err := st.ModelConfig() if err != nil { return nil, err } - validator, err := st.policy.ConstraintsValidator(cfg) + validator, err := st.policy.ConstraintsValidator( + cfg, + &cloudimagemetadata.MetadataArchitectureQuerier{st.CloudImageMetadataStorage}, + ) if errors.IsNotImplemented(err) { return defaultValidator, nil } else if err != nil { @@ -129,7 +133,7 @@ if err != nil { return constraints.Value{}, err } - envCons, err := st.EnvironConstraints() + envCons, err := st.ModelConstraints() if err != nil { return constraints.Value{}, err } @@ -137,7 +141,7 @@ } // validateConstraints returns an error if the given constraints are not valid for the -// current environment, and also any unsupported attributes. +// current model, and also any unsupported attributes. func (st *State) validateConstraints(cons constraints.Value) ([]string, error) { validator, err := st.constraintsValidator() if err != nil { @@ -172,7 +176,7 @@ if st.policy == nil { return nil } - cfg, err := st.EnvironConfig() + cfg, err := st.ModelConfig() if err != nil { return errors.Trace(err) } @@ -205,3 +209,11 @@ // a new machine will be allocated. DistributeInstances(candidates, distributionGroup []instance.Id) ([]instance.Id, error) } + +// SupportedArchitecturesQuerier implements access to stored cloud image metadata +// to retrieve a collection of supported architectures. +type SupportedArchitecturesQuerier interface { + // SupportedArchitectures returns a collection of unique architectures + // from cloud image metadata that satisfy passed in filtering parameters. + SupportedArchitectures(stream, region string) ([]string, error) +} === modified file 'src/github.com/juju/juju/state/pool.go' --- src/github.com/juju/juju/state/pool.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/pool.go 2016-03-22 15:18:22 +0000 @@ -11,7 +11,7 @@ ) // NewStatePool returns a new StatePool instance. It takes a State -// connected to the system (state server environment). +// connected to the system (controller model). func NewStatePool(systemState *State) *StatePool { return &StatePool{ systemState: systemState, @@ -19,7 +19,7 @@ } } -// StatePool is a simple cache of State instances for multiple environments. +// StatePool is a simple cache of State instances for multiple models. type StatePool struct { systemState *State // mu protects pool @@ -27,26 +27,26 @@ pool map[string]*State } -// Get returns a State for a given environment from the pool, creating +// Get returns a State for a given model from the pool, creating // one if required. -func (p *StatePool) Get(envUUID string) (*State, error) { - if envUUID == p.systemState.EnvironUUID() { +func (p *StatePool) Get(modelUUID string) (*State, error) { + if modelUUID == p.systemState.ModelUUID() { return p.systemState, nil } p.mu.Lock() defer p.mu.Unlock() - st, ok := p.pool[envUUID] + st, ok := p.pool[modelUUID] if ok { return st, nil } - st, err := p.systemState.ForEnviron(names.NewEnvironTag(envUUID)) + st, err := p.systemState.ForModel(names.NewModelTag(modelUUID)) if err != nil { - return nil, errors.Annotatef(err, "failed to create state for environment %v", envUUID) + return nil, errors.Annotatef(err, "failed to create state for model %v", modelUUID) } - p.pool[envUUID] = st + p.pool[modelUUID] = st return st, nil } === modified file 'src/github.com/juju/juju/state/pool_test.go' --- src/github.com/juju/juju/state/pool_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/pool_test.go 2016-03-22 15:18:22 +0000 @@ -13,55 +13,55 @@ type statePoolSuite struct { statetesting.StateSuite - State1, State2 *state.State - EnvUUID, EnvUUID1, EnvUUID2 string + State1, State2 *state.State + ModelUUID, ModelUUID1, ModelUUID2 string } var _ = gc.Suite(&statePoolSuite{}) func (s *statePoolSuite) SetUpTest(c *gc.C) { s.StateSuite.SetUpTest(c) - s.EnvUUID = s.State.EnvironUUID() + s.ModelUUID = s.State.ModelUUID() - s.State1 = s.Factory.MakeEnvironment(c, nil) + s.State1 = s.Factory.MakeModel(c, nil) s.AddCleanup(func(*gc.C) { s.State1.Close() }) - s.EnvUUID1 = s.State1.EnvironUUID() + s.ModelUUID1 = s.State1.ModelUUID() - s.State2 = s.Factory.MakeEnvironment(c, nil) + s.State2 = s.Factory.MakeModel(c, nil) s.AddCleanup(func(*gc.C) { s.State2.Close() }) - s.EnvUUID2 = s.State2.EnvironUUID() + s.ModelUUID2 = s.State2.ModelUUID() } func (s *statePoolSuite) TestGet(c *gc.C) { p := state.NewStatePool(s.State) defer p.Close() - st1, err := p.Get(s.EnvUUID1) + st1, err := p.Get(s.ModelUUID1) c.Assert(err, jc.ErrorIsNil) - c.Assert(st1.EnvironUUID(), gc.Equals, s.EnvUUID1) + c.Assert(st1.ModelUUID(), gc.Equals, s.ModelUUID1) - st2, err := p.Get(s.EnvUUID2) + st2, err := p.Get(s.ModelUUID2) c.Assert(err, jc.ErrorIsNil) - c.Assert(st2.EnvironUUID(), gc.Equals, s.EnvUUID2) + c.Assert(st2.ModelUUID(), gc.Equals, s.ModelUUID2) // Check that the same instances are returned // when a State for the same env is re-requested. - st1_, err := p.Get(s.EnvUUID1) + st1_, err := p.Get(s.ModelUUID1) c.Assert(err, jc.ErrorIsNil) c.Assert(st1_, gc.Equals, st1) - st2_, err := p.Get(s.EnvUUID2) + st2_, err := p.Get(s.ModelUUID2) c.Assert(err, jc.ErrorIsNil) c.Assert(st2_, gc.Equals, st2) } -func (s *statePoolSuite) TestGetWithStateServerEnv(c *gc.C) { +func (s *statePoolSuite) TestGetWithControllerEnv(c *gc.C) { p := state.NewStatePool(s.State) defer p.Close() - // When a State for the state server env is requested, the same + // When a State for the controller env is requested, the same // State that was original passed in should be returned. - st0, err := p.Get(s.EnvUUID) + st0, err := p.Get(s.ModelUUID) c.Assert(err, jc.ErrorIsNil) c.Assert(st0, gc.Equals, s.State) } @@ -79,27 +79,27 @@ defer p.Close() // Get some State instances. - st1, err := p.Get(s.EnvUUID1) + st1, err := p.Get(s.ModelUUID1) c.Assert(err, jc.ErrorIsNil) - st2, err := p.Get(s.EnvUUID1) + st2, err := p.Get(s.ModelUUID1) c.Assert(err, jc.ErrorIsNil) // Now close them. err = p.Close() c.Assert(err, jc.ErrorIsNil) - // Confirm that state server State isn't closed. - _, err = s.State.Environment() + // Confirm that controller State isn't closed. + _, err = s.State.Model() c.Assert(err, jc.ErrorIsNil) // Ensure that new ones are returned if further States are // requested. - st1_, err := p.Get(s.EnvUUID1) + st1_, err := p.Get(s.ModelUUID1) c.Assert(err, jc.ErrorIsNil) c.Assert(st1_, gc.Not(gc.Equals), st1) - st2_, err := p.Get(s.EnvUUID2) + st2_, err := p.Get(s.ModelUUID2) c.Assert(err, jc.ErrorIsNil) c.Assert(st2_, gc.Not(gc.Equals), st2) } === modified file 'src/github.com/juju/juju/state/ports.go' --- src/github.com/juju/juju/state/ports.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/state/ports.go 2016-03-22 15:18:22 +0000 @@ -142,7 +142,7 @@ // portsDoc represents the state of ports opened on machines for networks type portsDoc struct { DocID string `bson:"_id"` - EnvUUID string `bson:"env-uuid"` + ModelUUID string `bson:"model-uuid"` MachineID string `bson:"machine-id"` NetworkName string `bson:"network-name"` Ports []PortRange `bson:"ports"` @@ -200,7 +200,7 @@ buildTxn := func(attempt int) ([]txn.Op, error) { if attempt > 0 { - if err := checkEnvLife(p.st); err != nil { + if err := checkModeLife(p.st); err != nil { return nil, errors.Trace(err) } if err = ports.Refresh(); errors.IsNotFound(err) { @@ -230,7 +230,7 @@ } ops := []txn.Op{ - assertEnvAliveOp(p.st.EnvironUUID()), + assertModelAliveOp(p.st.ModelUUID()), } if ports.areNew { // Create a new document. @@ -537,7 +537,7 @@ DocID: st.docID(key), MachineID: machineId, NetworkName: networkName, - EnvUUID: st.EnvironUUID(), + ModelUUID: st.ModelUUID(), } ports = &Ports{st, doc, true} } else if err != nil { === modified file 'src/github.com/juju/juju/state/prechecker_test.go' --- src/github.com/juju/juju/state/prechecker_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/state/prechecker_test.go 2016-03-22 15:18:22 +0000 @@ -49,7 +49,7 @@ func (s *PrecheckerSuite) TestPrecheckInstance(c *gc.C) { // PrecheckInstance should be called with the specified // series and placement, and the specified constraints - // merged with the environment constraints, when attempting + // merged with the model constraints, when attempting // to create an instance. envCons := constraints.MustParse("mem=4G") placement := "abc123" @@ -100,7 +100,7 @@ } func (s *PrecheckerSuite) addOneMachine(c *gc.C, envCons constraints.Value, placement string) (state.MachineTemplate, error) { - err := s.State.SetEnvironConstraints(envCons) + err := s.State.SetModelConstraints(envCons) c.Assert(err, jc.ErrorIsNil) oneJob := []state.MachineJob{state.JobHostUnits} extraCons := constraints.MustParse("cpu-cores=4") @@ -119,7 +119,7 @@ InstanceId: instance.Id("bootstrap"), Series: "precise", Nonce: agent.BootstrapNonce, - Jobs: []state.MachineJob{state.JobManageEnviron}, + Jobs: []state.MachineJob{state.JobManageModel}, Placement: "anyoldthing", } _, err := s.State.AddOneMachine(template) === modified file 'src/github.com/juju/juju/state/presence/presence.go' --- src/github.com/juju/juju/state/presence/presence.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/state/presence/presence.go 2016-03-22 15:18:22 +0000 @@ -30,17 +30,17 @@ } // docIDInt64 generates a globally unique id value -// where the environment uuid is prefixed to the +// where the model uuid is prefixed to the // given int64 localID. -func docIDInt64(envUUID string, localID int64) string { - return envUUID + ":" + strconv.FormatInt(localID, 10) +func docIDInt64(modelUUID string, localID int64) string { + return modelUUID + ":" + strconv.FormatInt(localID, 10) } // docIDStr generates a globally unique id value -// where the environment uuid is prefixed to the +// where the model uuid is prefixed to the // given string localID. -func docIDStr(envUUID string, localID string) string { - return envUUID + ":" + localID +func docIDStr(modelUUID string, localID string) string { + return modelUUID + ":" + localID } // The implementation works by assigning a unique sequence number to each @@ -48,13 +48,13 @@ // periodically updating the current time slot document with its // sequence number so that watchers can tell it is alive. // -// There is only one time slot document per time slot, per environment. The +// There is only one time slot document per time slot, per model. The // internal implementation of the time slot document is as follows: // // { -// "_id": :") + s.keyPEM = []byte("") +} + +func (s *certSuite) TestNewCert(c *gc.C) { + cert := lxdclient.NewCert(s.certPEM, s.keyPEM) + + checkCert(c, cert, s.certPEM, s.keyPEM) +} + +func (s *certSuite) TestValidateOkay(c *gc.C) { + cert := lxdclient.NewCert(s.certPEM, s.keyPEM) + err := cert.Validate() + + c.Check(err, jc.ErrorIsNil) +} + +func (s *certSuite) TestValidateMissingCertPEM(c *gc.C) { + cert := lxdclient.NewCert(nil, s.keyPEM) + err := cert.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) +} + +func (s *certSuite) TestValidateMissingKeyPEM(c *gc.C) { + cert := lxdclient.NewCert(s.certPEM, nil) + err := cert.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) +} + +func (s *certSuite) TestWriteCertPEM(c *gc.C) { + cert := lxdclient.NewCert(s.certPEM, s.keyPEM) + var pemfile bytes.Buffer + err := cert.WriteCertPEM(&pemfile) + c.Assert(err, jc.ErrorIsNil) + + c.Check(pemfile.String(), gc.Equals, string(s.certPEM)) +} + +func (s *certSuite) TestWriteKeyPEM(c *gc.C) { + cert := lxdclient.NewCert(s.certPEM, s.keyPEM) + var pemfile bytes.Buffer + err := cert.WriteKeyPEM(&pemfile) + c.Assert(err, jc.ErrorIsNil) + + c.Check(pemfile.String(), gc.Equals, string(s.keyPEM)) +} + +func (s *certSuite) TestWritePEMs(c *gc.C) { + cert := lxdclient.NewCert(s.certPEM, s.keyPEM) + var pemfile bytes.Buffer + err := cert.WriteCertPEM(&pemfile) + c.Assert(err, jc.ErrorIsNil) + err = cert.WriteKeyPEM(&pemfile) + c.Assert(err, jc.ErrorIsNil) + + expected := string(s.certPEM) + string(s.keyPEM) + c.Check(pemfile.String(), gc.Equals, expected) +} + +func (s *certSuite) TestFingerprint(c *gc.C) { + certPEM := []byte(testCertPEM) + cert := lxdclient.NewCert(certPEM, nil) + fingerprint, err := cert.Fingerprint() + c.Assert(err, jc.ErrorIsNil) + + c.Check(fingerprint, gc.Equals, testCertFingerprint) +} + +func (s *certSuite) TestX509Okay(c *gc.C) { + certPEM := []byte(testCertPEM) + cert := lxdclient.NewCert(certPEM, nil) + x509Cert, err := cert.X509() + c.Assert(err, jc.ErrorIsNil) + + block, _ := pem.Decode(certPEM) + c.Assert(block, gc.NotNil) + c.Check(string(x509Cert.Raw), gc.Equals, string(block.Bytes)) +} + +func (s *certSuite) TestX509ZeroValue(c *gc.C) { + var cert lxdclient.Cert + _, err := cert.X509() + + c.Check(err, gc.ErrorMatches, `invalid cert PEM \(0 bytes\)`) +} + +func (s *certSuite) TestX509BadPEM(c *gc.C) { + cert := lxdclient.NewCert(s.certPEM, s.keyPEM) + _, err := cert.X509() + + c.Check(err, gc.ErrorMatches, `invalid cert PEM \(\d+ bytes\)`) +} + +type certFunctionalSuite struct { + lxdclient.BaseSuite +} + +func checkCert(c *gc.C, cert lxdclient.Cert, certPEM, keyPEM []byte) { + c.Check(cert, jc.DeepEquals, lxdclient.Cert{ + CertPEM: certPEM, + KeyPEM: keyPEM, + }) + c.Check(string(cert.CertPEM), gc.Equals, string(certPEM)) + c.Check(string(cert.KeyPEM), gc.Equals, string(keyPEM)) +} + +func checkValidCert(c *gc.C, cert *lxdclient.Cert) { + c.Assert(cert, gc.NotNil) + + _, err := tls.X509KeyPair(cert.CertPEM, cert.KeyPEM) + c.Check(err, jc.ErrorIsNil) + + block, remainder := pem.Decode(cert.CertPEM) + c.Check(block.Type, gc.Equals, "CERTIFICATE") + c.Check(remainder, gc.HasLen, 0) + + block, remainder = pem.Decode(cert.KeyPEM) + c.Check(block.Type, gc.Equals, "RSA PRIVATE KEY") + c.Check(remainder, gc.HasLen, 0) +} + +const ( + testCertFingerprint = "1c5156027fe71cfd0f7db807123e6873879f0f9754e08eab151f224783b2bff0" + testCertPEM = ` +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQEFjWOkN8qXNbWKtveG5ddTANBgkqhkiG9w0BAQsFADA2 +MRwwGgYDVQQKExNsaW51eGNvbnRhaW5lcnMub3JnMRYwFAYDVQQDDA1lc25vd0Bm +dXJpb3VzMB4XDTE1MTAwMTIxMjAyMloXDTI1MDkyODIxMjAyMlowNjEcMBoGA1UE +ChMTbGludXhjb250YWluZXJzLm9yZzEWMBQGA1UEAwwNZXNub3dAZnVyaW91czCC +AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMQgSXXaZMWImOP6IFBy/3E6 +JFHwrgy5YMqRikoernt5cMr838nNdNLW9woBIVRZfZIFbAjf38PGBQYAs/4G/WIt +oydFp37JASsjPCEa/9I9WdIvm1+HpL7p7KjY/0bzcCZY8PbnUY98XGmWAdR38wY6 +S79Q8kDE6iOWls/zwndwlPPGoQlrOaITyzcl9aurH9ZZc4aoRz9DeKiPEXwYD9rl +TMYPOVYu+YvN/UHOnzpFxYXJw1o5upvvF2QOHEm6kuYq/8azv0Iu+cOR1+Ok08Y+ +IGpXAkqqINf4qKWqd3/xq/ltkGpt/RfuUaMtbTbpU1UpLFsw7jkI5tGJarsXQZQP +mw0auh63Ty9y7MdKluy44HcFsuttGeeihXp6oHz2IqEOYzbFh1wlJfIUFFkmJ3lY +p81tA8A5Y7o/Il4aL+DudIzF8MmTHhElSZYF74KUVt/eiyQikUn/CjlGXzNfi/NC +J8yIbR1HCDLAsWg1a1CvGdKBBi4VH2w9yI9HsNm4hvcF/nQojPNxqlbHDZ7lVESN +tZZYDWACPUow9y8IQiVcI0hgAK1o/sxRWqt2URnz09iv3zNsOu/Y0oNyOJSrVeOq +bObbt9dcifOkDx09uG7A4i7pOk9lD/zIXx8o9Zkw0D/1HLYyE+jNz1V6zEnUDem8 +cRTMPAvAE6JQtR8zyckVAgMBAAGjgdswgdgwDgYDVR0PAQH/BAQDAgWgMBMGA1Ud +JQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwgaIGA1UdEQSBmjCBl4IHZnVy +aW91c4IRMTkyLjE2OC4yNy4xMTMvMjSCHGZlODA6OjVlNTE6NGZmZjpmZWRjOmM1 +ZmQvNjSCCzEwLjAuMy4xLzI0ghtmZTgwOjpkNDZhOmFmZjpmZWY2OjUzOTgvNjSC +EDE5Mi4xNjguMTIyLjEvMjSCDzE5Mi4xNjguNjQuMS8yNIIOMTcyLjE3LjQyLjEv +MTYwDQYJKoZIhvcNAQELBQADggIBADg+1q7OT/euwJIIGjvgo/UfIr7Xj//Sarfx +UcF6Qq125G2ZWb8epkB/sqoAerVpI0tRQX4G1sZvSe67sQvQDj17VHit9IrE14dY +A0xA77wWZThRKX/yyTSUhFBU8QYEVPi72D31NgcDY3Ppy6wBvcIjv4vWedeTdgrb +w09x/auAcvOl87bQXOduRl6xVoXu+mXwhjoK1rMrcqlPW6xcVn6yTWLODPNbAyx8 +xvaeHwKf67sIF/IBeRNoeVvuw6fANEGINB/JIaW5l6TwHakGaXBLOCe1dC6f7t5O +Zj9Kb5IS6YMbxUVKnzFLtEty4vPN/pDeLPrJt00wvvbA0SrMpM+M8gspKrQsJ3Oz +GiuXnLorumhOUXT7UQqw2gZ4FE/WA3W0LlIlpPuAbgZKRecJjilmnRPHa9+9hSXX +BmxTLbEvz87PrrsoVR9K5R261ciAFdFiE7Jbh15qUm4qXYHT9QgJeXnDtV/bxO+Y +Rrh9WfSP8x0SKrAoO7uhjI9Y276c8+etF0EY8u/+joqS8cZbOLXMuafgtF5E1trd +QNRHwiIhEUVqctdguzMHbhFfKthq6vP8qhWNOF6FowZgSg+Q5Tvm1jaU++BNPqWi +Zxy0qbMLRW8i/ABuTmzqtS3AHTtIFgdHx+BeT4W9LwU2dsO3Ijni2Rutmuz04rT+ +zxBNMbP3 +-----END CERTIFICATE----- +` +) === added file 'src/github.com/juju/juju/tools/lxdclient/client.go' --- src/github.com/juju/juju/tools/lxdclient/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,95 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient + +import ( + "net" + + "github.com/lxc/lxd" + lxdshared "github.com/lxc/lxd/shared" + + "github.com/juju/errors" + "github.com/juju/loggo" +) + +var logger = loggo.GetLogger("juju.tools.lxdclient") + +// Client is a high-level wrapper around the LXD API client. +type Client struct { + *serverConfigClient + *certClient + *profileClient + *instanceClient + *imageClient +} + +// Connect opens an API connection to LXD and returns a high-level +// Client wrapper around that connection. +func Connect(cfg Config) (*Client, error) { + if err := cfg.Validate(); err != nil { + return nil, errors.Trace(err) + } + + remote := cfg.Remote.ID() + + raw, err := newRawClient(cfg) + if err != nil { + return nil, errors.Trace(err) + } + + conn := &Client{ + serverConfigClient: &serverConfigClient{raw}, + certClient: &certClient{raw}, + profileClient: &profileClient{raw}, + instanceClient: &instanceClient{raw, remote}, + imageClient: &imageClient{raw}, + } + return conn, nil +} + +var lxdNewClient = lxd.NewClient +var lxdNewClientFromInfo = lxd.NewClientFromInfo +var lxdLoadConfig = lxd.LoadConfig + +func newRawClient(cfg Config) (*lxd.Client, error) { + logger.Debugf("using LXD remote %q", cfg.Remote.ID()) + remote := cfg.Remote.ID() + host := cfg.Remote.Host + if remote == remoteIDForLocal || host == "" { + host = "unix://" + lxdshared.VarPath("unix.socket") + } else { + _, _, err := net.SplitHostPort(host) + if err != nil { + // There is no port here + host = net.JoinHostPort(host, lxdshared.DefaultPort) + } + } + + clientCert := "" + if cfg.Remote.Cert != nil && cfg.Remote.Cert.CertPEM != nil { + clientCert = string(cfg.Remote.Cert.CertPEM) + } + + clientKey := "" + if cfg.Remote.Cert != nil && cfg.Remote.Cert.KeyPEM != nil { + clientKey = string(cfg.Remote.Cert.KeyPEM) + } + + client, err := lxdNewClientFromInfo(lxd.ConnectInfo{ + Name: cfg.Remote.ID(), + Addr: host, + ClientPEMCert: clientCert, + ClientPEMKey: clientKey, + ServerPEMCert: cfg.Remote.ServerPEMCert, + }) + if err != nil { + if remote == remoteIDForLocal { + return nil, errors.Annotate(err, "can't connect to the local LXD server") + } + return nil, errors.Trace(err) + } + return client, nil +} === added file 'src/github.com/juju/juju/tools/lxdclient/client_cert.go' --- src/github.com/juju/juju/tools/lxdclient/client_cert.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/client_cert.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,72 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient + +import ( + "crypto/x509" + + "github.com/juju/errors" + "github.com/lxc/lxd/shared" +) + +type rawCertClient interface { + CertificateList() ([]shared.CertInfo, error) + CertificateAdd(cert *x509.Certificate, name string) error + CertificateRemove(fingerprint string) error +} + +type certClient struct { + raw rawCertClient +} + +// AddCert adds the given certificate to the server. +func (c certClient) AddCert(cert Cert) error { + x509Cert, err := cert.X509() + if err != nil { + return errors.Trace(err) + } + + if err := c.raw.CertificateAdd(x509Cert, cert.Name); err != nil { + return errors.Trace(err) + } + + return nil +} + +// ListCerts returns the list of cert fingerprints from the server. +func (c certClient) ListCerts() ([]string, error) { + certs, err := c.raw.CertificateList() + if err != nil { + return nil, errors.Trace(err) + } + + var fingerprints []string + for _, cert := range certs { + fingerprints = append(fingerprints, cert.Fingerprint) + } + return fingerprints, nil +} + +// RemoveCert removes the cert from the server. +func (c certClient) RemoveCert(cert *Cert) error { + fingerprint, err := cert.Fingerprint() + if err != nil { + return errors.Trace(err) + } + + if err := c.raw.CertificateRemove(fingerprint); err != nil { + return errors.Trace(err) + } + return nil +} + +// RemoveCertByFingerprint removes the cert from the server. +func (c certClient) RemoveCertByFingerprint(fingerprint string) error { + if err := c.raw.CertificateRemove(fingerprint); err != nil { + return errors.Trace(err) + } + return nil +} === added file 'src/github.com/juju/juju/tools/lxdclient/client_image.go' --- src/github.com/juju/juju/tools/lxdclient/client_image.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/client_image.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,56 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient + +import ( + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + + "github.com/juju/errors" +) + +type rawImageClient interface { + ListAliases() (shared.ImageAliases, error) +} + +type imageClient struct { + raw rawImageClient +} + +func (i imageClient) EnsureImageExists(series string) error { + name := i.ImageNameForSeries(series) + + aliases, err := i.raw.ListAliases() + if err != nil { + return err + } + + for _, alias := range aliases { + if alias.Description == name { + return nil + } + } + + /* "ubuntu" here is cloud-images.ubuntu.com's "releases" stream; + * "ubuntu-daily" would be the daily stream + */ + ubuntu, err := lxdNewClient(&lxd.DefaultConfig, "ubuntu") + if err != nil { + return err + } + + client, ok := i.raw.(*lxd.Client) + if !ok { + return errors.Errorf("can't use a fake client as target") + } + + return ubuntu.CopyImage(series, client, false, []string{name}, false, true, nil) +} + +// A common place to compute image names (alises) based on the series +func (i imageClient) ImageNameForSeries(series string) string { + return "ubuntu-" + series +} === added file 'src/github.com/juju/juju/tools/lxdclient/client_instance.go' --- src/github.com/juju/juju/tools/lxdclient/client_instance.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/client_instance.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,338 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/gorilla/websocket" + "github.com/juju/errors" + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + + "github.com/juju/juju/network" +) + +// TODO(ericsnow) We probably need to address some of the things that +// get handled in container/lxc/clonetemplate.go. + +type rawInstanceClient interface { + ListContainers() ([]shared.ContainerInfo, error) + ContainerInfo(name string) (*shared.ContainerInfo, error) + Init(name string, imgremote string, image string, profiles *[]string, config map[string]string, ephem bool) (*lxd.Response, error) + Action(name string, action shared.ContainerAction, timeout int, force bool, stateful bool) (*lxd.Response, error) + Exec(name string, cmd []string, env map[string]string, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser, controlHandler func(*lxd.Client, *websocket.Conn)) (int, error) + Delete(name string) (*lxd.Response, error) + + WaitForSuccess(waitURL string) error + ContainerState(name string) (*shared.ContainerState, error) +} + +type instanceClient struct { + raw rawInstanceClient + remote string +} + +func (client *instanceClient) addInstance(spec InstanceSpec) error { + imageRemote := spec.ImageRemote + if imageRemote == "" { + imageRemote = client.remote + } + + imageAlias := spec.Image + if imageAlias == "" { + // TODO(ericsnow) Do not have a default? + imageAlias = "ubuntu" + } + + var profiles *[]string + if len(spec.Profiles) > 0 { + profiles = &spec.Profiles + } + + // TODO(ericsnow) Copy the image first? + + config := spec.config() + resp, err := client.raw.Init(spec.Name, imageRemote, imageAlias, profiles, config, spec.Ephemeral) + if err != nil { + return errors.Trace(err) + } + + // Init is an async operation, since the tar -xvf (or whatever) might + // take a while; the result is an LXD operation id, which we can just + // wait on until it is finished. + if err := client.raw.WaitForSuccess(resp.Operation); err != nil { + // TODO(ericsnow) Handle different failures (from the async + // operation) differently? + return errors.Trace(err) + } + + return nil +} + +type execFailure struct { + cmd string + code int + stderr string +} + +// Error returns the string representation of the error. +func (err execFailure) Error() string { + return fmt.Sprintf("got non-zero code from %q: (%d) %s", err.cmd, err.code, err.stderr) +} + +func (client *instanceClient) exec(spec InstanceSpec, cmd []string) error { + var env map[string]string + + cmdStr := strings.Join(cmd, " ") + fmt.Println("running", cmdStr) + + var input, output closingBuffer + stdin, stdout, stderr := &input, &output, &output + rc, err := client.raw.Exec(spec.Name, cmd, env, stdin, stdout, stderr, nil) + if err != nil { + return errors.Trace(err) + } else if rc != 0 { + msg := output.String() + if msg == "" { + msg = "" + } + err := &execFailure{ + cmd: cmdStr, + code: rc, + stderr: msg, + } + return errors.Trace(err) + } + + return nil +} + +func (client *instanceClient) chmod(spec InstanceSpec, filename string, mode os.FileMode) error { + cmd := []string{ + "/bin/chmod", + fmt.Sprintf("%s", mode), + filename, + } + + if err := client.exec(spec, cmd); err != nil { + return errors.Trace(err) + } + return nil +} + +func (client *instanceClient) startInstance(spec InstanceSpec) error { + timeout := -1 + force := false + resp, err := client.raw.Action(spec.Name, shared.Start, timeout, force, false) + if err != nil { + return errors.Trace(err) + } + + if err := client.raw.WaitForSuccess(resp.Operation); err != nil { + // TODO(ericsnow) Handle different failures (from the async + // operation) differently? + return errors.Trace(err) + } + + return nil +} + +// AddInstance creates a new instance based on the spec's data and +// returns it. The instance will be created using the client. +func (client *instanceClient) AddInstance(spec InstanceSpec) (*Instance, error) { + if err := client.addInstance(spec); err != nil { + return nil, errors.Trace(err) + } + + if err := client.startInstance(spec); err != nil { + if err := client.removeInstance(spec.Name); err != nil { + logger.Errorf("could not remove container %q after starting it failed", spec.Name) + } + return nil, errors.Trace(err) + } + + inst, err := client.Instance(spec.Name) + if err != nil { + return nil, errors.Trace(err) + } + inst.spec = &spec + + return inst, nil +} + +// Instance gets the up-to-date info about the given instance +// and returns it. +func (client *instanceClient) Instance(name string) (*Instance, error) { + info, err := client.raw.ContainerInfo(name) + if err != nil { + return nil, errors.Trace(err) + } + + inst := newInstance(info, nil) + return inst, nil +} + +func (client *instanceClient) Status(name string) (string, error) { + info, err := client.raw.ContainerInfo(name) + if err != nil { + return "", errors.Trace(err) + } + + return info.Status, nil +} + +// Instances sends a request to the API for a list of all instances +// (in the Client's namespace) for which the name starts with the +// provided prefix. The result is also limited to those instances with +// one of the specified statuses (if any). +func (client *instanceClient) Instances(prefix string, statuses ...string) ([]Instance, error) { + infos, err := client.raw.ListContainers() + if err != nil { + return nil, errors.Trace(err) + } + + var insts []Instance + for _, info := range infos { + name := info.Name + if prefix != "" && !strings.HasPrefix(name, prefix) { + continue + } + if len(statuses) > 0 && !checkStatus(info, statuses) { + continue + } + + inst := newInstance(&info, nil) + insts = append(insts, *inst) + } + return insts, nil +} + +func checkStatus(info shared.ContainerInfo, statuses []string) bool { + for _, status := range statuses { + statusCode := allStatuses[status] + if info.StatusCode == statusCode { + return true + } + } + return false +} + +// removeInstance sends a request to the API to remove the instance +// with the provided ID. The call blocks until the instance is removed +// (or the request fails). +func (client *instanceClient) removeInstance(name string) error { + info, err := client.raw.ContainerInfo(name) + if err != nil { + return errors.Trace(err) + } + + //if info.Status.StatusCode != 0 && info.Status.StatusCode != shared.Stopped { + if info.StatusCode != shared.Stopped { + timeout := -1 + force := true + resp, err := client.raw.Action(name, shared.Stop, timeout, force, false) + if err != nil { + return errors.Trace(err) + } + + if err := client.raw.WaitForSuccess(resp.Operation); err != nil { + // TODO(ericsnow) Handle different failures (from the async + // operation) differently? + return errors.Trace(err) + } + } + + resp, err := client.raw.Delete(name) + if err != nil { + return errors.Trace(err) + } + + if err := client.raw.WaitForSuccess(resp.Operation); err != nil { + // TODO(ericsnow) Handle different failures (from the async + // operation) differently? + return errors.Trace(err) + } + + return nil +} + +// RemoveInstances sends a request to the API to terminate all +// instances (in the Client's namespace) that match one of the +// provided IDs. If a prefix is provided, only IDs that start with the +// prefix will be considered. The call blocks until all the instances +// are removed or the request fails. +func (client *instanceClient) RemoveInstances(prefix string, names ...string) error { + if len(names) == 0 { + return nil + } + + instances, err := client.Instances(prefix) + if err != nil { + return errors.Annotatef(err, "while removing instances %v", names) + } + + var failed []string + for _, name := range names { + if !checkInstanceName(name, instances) { + // We ignore unknown instance names. + continue + } + + if err := client.removeInstance(name); err != nil { + failed = append(failed, name) + logger.Errorf("while removing instance %q: %v", name, err) + } + } + if len(failed) != 0 { + return errors.Errorf("some instance removals failed: %v", failed) + } + return nil +} + +func checkInstanceName(name string, instances []Instance) bool { + for _, inst := range instances { + if inst.Name == name { + return true + } + } + return false +} + +// Addresses returns the list of network.Addresses for this instance. It +// converts the information that LXD tracks into the Juju network model. +func (client *instanceClient) Addresses(name string) ([]network.Address, error) { + state, err := client.raw.ContainerState(name) + if err != nil { + return nil, err + } + + networks := state.Network + if networks == nil { + return []network.Address{}, nil + } + + addrs := []network.Address{} + + for _, net := range networks { + for _, addr := range net.Addresses { + if err != nil { + return nil, err + } + + addr := network.NewAddress(addr.Address) + if addr.Scope == network.ScopeLinkLocal || addr.Scope == network.ScopeMachineLocal { + logger.Tracef("for container %q ignoring address", name, addr) + continue + } + addrs = append(addrs, addr) + } + } + return addrs, nil +} === added file 'src/github.com/juju/juju/tools/lxdclient/client_instance_test.go' --- src/github.com/juju/juju/tools/lxdclient/client_instance_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/client_instance_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,125 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient_test + +import ( + jc "github.com/juju/testing/checkers" + lxdshared "github.com/lxc/lxd/shared" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/network" + jujutesting "github.com/juju/juju/testing" + "github.com/juju/juju/tools/lxdclient" +) + +type addressesSuite struct { + jujutesting.BaseSuite +} + +var _ = gc.Suite(&addressesSuite{}) + +type addressTester struct { + // Stub out all the APIs so we conform to the interface, + // we only implement the ones that we are going to be testing + lxdclient.RawInstanceClient + + ContainerStateResult *lxdshared.ContainerState +} + +func (a *addressTester) ContainerState(name string) (*lxdshared.ContainerState, error) { + return a.ContainerStateResult, nil +} + +var _ lxdclient.RawInstanceClient = (*addressTester)(nil) + +// containerStateSample was captured from a real response +var containerStateSample = lxdshared.ContainerState{ + Status: "Running", + StatusCode: lxdshared.Running, + Disk: map[string]lxdshared.ContainerStateDisk{}, + Memory: lxdshared.ContainerStateMemory{ + Usage: 66486272, + UsagePeak: 92405760, + SwapUsage: 0, + SwapUsagePeak: 0, + }, + Network: map[string]lxdshared.ContainerStateNetwork{ + "eth0": lxdshared.ContainerStateNetwork{ + Addresses: []lxdshared.ContainerStateNetworkAddress{ + lxdshared.ContainerStateNetworkAddress{ + Family: "inet", + Address: "10.0.3.173", + Netmask: "24", + Scope: "global", + }, + lxdshared.ContainerStateNetworkAddress{ + Family: "inet6", + Address: "fe80::216:3eff:fe3b:e582", + Netmask: "64", + Scope: "link", + }, + }, + Counters: lxdshared.ContainerStateNetworkCounters{ + BytesReceived: 16352, + BytesSent: 6238, + PacketsReceived: 69, + PacketsSent: 59, + }, + Hwaddr: "00:16:3e:3b:e5:82", + HostName: "vethYIEDPS", + Mtu: 1500, + State: "up", + Type: "broadcast", + }, + "lo": lxdshared.ContainerStateNetwork{ + Addresses: []lxdshared.ContainerStateNetworkAddress{ + lxdshared.ContainerStateNetworkAddress{ + Family: "inet", + Address: "127.0.0.1", + Netmask: "8", + Scope: "local", + }, + lxdshared.ContainerStateNetworkAddress{ + Family: "inet6", + Address: "::1", + Netmask: "128", + Scope: "local", + }, + }, + Counters: lxdshared.ContainerStateNetworkCounters{ + BytesReceived: 0, + BytesSent: 0, + PacketsReceived: 0, + PacketsSent: 0, + }, + Hwaddr: "", + HostName: "", + Mtu: 65536, + State: "up", + Type: "loopback", + }, + }, + Pid: 46072, + Processes: 19, +} + +func (s *addressesSuite) TestAddresses(c *gc.C) { + raw := &addressTester{ + ContainerStateResult: &containerStateSample, + } + client := lxdclient.NewInstanceClient(raw) + addrs, err := client.Addresses("test") + c.Assert(err, jc.ErrorIsNil) + // We should filter out the MachineLocal addresses 127.0.0.1 and [::1] + // and filter out the LinkLocal address [fe80::216:3eff:fe3b:e582] + c.Check(addrs, jc.DeepEquals, []network.Address{ + { + Value: "10.0.3.173", + Type: network.IPv4Address, + Scope: network.ScopeCloudLocal, + }, + }) +} === added file 'src/github.com/juju/juju/tools/lxdclient/client_profile.go' --- src/github.com/juju/juju/tools/lxdclient/client_profile.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/client_profile.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,49 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient + +import ( + "github.com/juju/errors" +) + +type rawProfileClient interface { + ProfileCreate(name string) error + ListProfiles() ([]string, error) + SetProfileConfigItem(name, key, value string) error +} + +type profileClient struct { + raw rawProfileClient +} + +// CreateProfile attempts to create a new lxc profile and set the given config. +func (p profileClient) CreateProfile(name string, config map[string]string) error { + if err := p.raw.ProfileCreate(name); err != nil { + //TODO(wwitzel3) use HasProfile to generate a more useful AlreadyExists error + return errors.Trace(err) + } + + for k, v := range config { + if err := p.raw.SetProfileConfigItem(name, k, v); err != nil { + return errors.Trace(err) + } + } + return nil +} + +// HasProfile returns true/false if the profile exists. +func (p profileClient) HasProfile(name string) (bool, error) { + profiles, err := p.raw.ListProfiles() + if err != nil { + return false, errors.Trace(err) + } + for _, profile := range profiles { + if profile == name { + return true, nil + } + } + return false, nil +} === added file 'src/github.com/juju/juju/tools/lxdclient/client_raw.go' --- src/github.com/juju/juju/tools/lxdclient/client_raw.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/client_raw.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,139 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient + +import ( + "crypto/x509" + "io" + "os" + + "github.com/gorilla/websocket" + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" +) + +// These interfaces facilitate mocking out the LXD API during tests. +// See https://github.com/lxc/lxd/blob/master/client.go +// and https://github.com/lxc/lxd/blob/master/specs/rest-api.md. + +// TODO(ericsnow) Move this to a test suite. +var _ rawClientWrapperFull = (*lxd.Client)(nil) + +// rawClientWrapperFull exposes all methods of lxd.Client. +type rawClientWrapperFull interface { + rawServerMethods + rawImageMethods + rawAliasMethods + rawContainerMethods + rawProfileMethods +} + +type rawServerMethods interface { + // info + Finger() error + ServerStatus() (*shared.ServerState, error) + + // config + GetServerConfigString() ([]string, error) + SetServerConfig(key string, value string) (*lxd.Response, error) + + // connection + WaitFor(waitURL string) (*shared.Operation, error) + WaitForSuccess(waitURL string) error + + // auth + AmTrusted() bool +} + +type rawCertMethods interface { + AddMyCertToServer(pwd string) error + CertificateList() ([]shared.CertInfo, error) + CertificateAdd(cert *x509.Certificate, name string) error + CertificateRemove(fingerprint string) error +} + +type rawImageMethods interface { + // info/meta + ListImages() ([]shared.ImageInfo, error) + GetImageInfo(image string) (*shared.ImageInfo, error) + //PutImageProperties(name string, p shared.ImageProperties) error + + // image data (create, upload, download, destroy) + CopyImage(image string, dest *lxd.Client, copy_aliases bool, aliases []string, public bool, autoUpdate bool, progresHandler func(string)) error + ImageFromContainer(cname string, public bool, aliases []string, properties map[string]string) (string, error) + DeleteImage(image string) error +} + +type rawAliasMethods interface { + // info + ListAliases() (shared.ImageAliases, error) + IsAlias(alias string) (bool, error) + + // alias data (upload, download, destroy) + PostAlias(alias string, desc string, target string) error + GetAlias(alias string) string + DeleteAlias(alias string) error +} + +type rawContainerMethods interface { + // info/meta + ListContainers() ([]shared.ContainerInfo, error) + //Rename(name string, newName string) (*lxd.Response, error) + ContainerState(name string) (*shared.ContainerState, error) + + // container data (create, actions, destroy) + Init(name string, imgremote string, image string, profiles *[]string, config map[string]string, ephem bool) (*lxd.Response, error) + LocalCopy(source string, name string, config map[string]string, profiles []string, ephemeral bool) (*lxd.Response, error) + MigrateFrom(name string, operation string, certificate string, secrets map[string]string, architecture string, config map[string]string, devices shared.Devices, profiles []string, baseImage string, ephemeral bool) (*lxd.Response, error) + Action(name string, action shared.ContainerAction, timeout int, force bool, stateful bool) (*lxd.Response, error) + Delete(name string) (*lxd.Response, error) + + // exec + Exec(name string, cmd []string, env map[string]string, stdin io.ReadCloser, stdout io.WriteCloser, stderr io.WriteCloser, controlHandler func(*lxd.Client, *websocket.Conn)) (int, error) + + // files + PushFile(container string, p string, gid int, uid int, mode os.FileMode, buf io.ReadSeeker) error + PullFile(container string, p string) (int, int, os.FileMode, io.ReadCloser, error) + + // config + GetContainerConfig(container string) ([]string, error) + SetContainerConfig(container, key, value string) error + UpdateContainerConfig(container string, st shared.BriefContainerInfo) error + + // devices + ContainerListDevices(container string) ([]string, error) + ContainerDeviceDelete(container, devname string) (*lxd.Response, error) + ContainerDeviceAdd(container, devname, devtype string, props []string) (*lxd.Response, error) + + // snapshots + RestoreSnapshot(container string, snapshotName string, stateful bool) (*lxd.Response, error) + Snapshot(container string, snapshotName string, stateful bool) (*lxd.Response, error) + ListSnapshots(container string) ([]shared.SnapshotInfo, error) +} + +type rawProfileMethods interface { + // info + ListProfiles() ([]string, error) + + // profile data (create, upload, destroy) + ProfileCreate(p string) error + ProfileCopy(name, newname string, dest *lxd.Client) error + PutProfile(name string, profile shared.ProfileConfig) error + ProfileDelete(p string) error + + // apply + ApplyProfile(container, profile string) (*lxd.Response, error) + + // config + ProfileConfig(name string) (*shared.ProfileConfig, error) + GetProfileConfig(profile string) (map[string]string, error) + SetProfileConfigItem(profile, key, value string) error + + // devices + ProfileListDevices(profile string) ([]string, error) + ProfileDeviceDelete(profile, devname string) (*lxd.Response, error) + ProfileDeviceAdd(profile, devname, devtype string, props []string) (*lxd.Response, error) +} === added file 'src/github.com/juju/juju/tools/lxdclient/client_serverconfig.go' --- src/github.com/juju/juju/tools/lxdclient/client_serverconfig.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/client_serverconfig.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,45 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient + +import ( + "github.com/juju/errors" + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" +) + +type rawServerConfigClient interface { + SetServerConfig(key string, value string) (*lxd.Response, error) + + WaitForSuccess(waitURL string) error + ServerStatus() (*shared.ServerState, error) +} + +type serverConfigClient struct { + raw rawServerConfigClient +} + +// SetConfig sets the given value in the server's config. +func (c serverConfigClient) SetConfig(key, value string) error { + resp, err := c.raw.SetServerConfig(key, value) + if err != nil { + return errors.Trace(err) + } + + if resp.Operation != "" { + if err := c.raw.WaitForSuccess(resp.Operation); err != nil { + // TODO(ericsnow) Handle different failures (from the async + // operation) differently? + return errors.Trace(err) + } + } + + return nil +} + +func (c serverConfigClient) ServerStatus() (*shared.ServerState, error) { + return c.raw.ServerStatus() +} === added file 'src/github.com/juju/juju/tools/lxdclient/client_test.go' --- src/github.com/juju/juju/tools/lxdclient/client_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/client_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,56 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + "github.com/lxc/lxd" + gc "gopkg.in/check.v1" +) + +type ConnectSuite struct { + testing.IsolationSuite +} + +func (cs ConnectSuite) TestLocalConnectError(c *gc.C) { + cs.PatchValue(lxdNewClientFromInfo, fakeNewClientFromInfo) + cs.PatchValue(lxdLoadConfig, fakeLoadConfig) + + cfg := Config{ + Remote: Local, + } + _, err := Connect(cfg) + + // Yes, the error message actually matters here... this is being displayed + // to the user. + c.Assert(err, gc.ErrorMatches, "can't connect to the local LXD server.*") +} + +func (cs ConnectSuite) TestRemoteConnectError(c *gc.C) { + cs.PatchValue(lxdNewClientFromInfo, fakeNewClientFromInfo) + cs.PatchValue(lxdLoadConfig, fakeLoadConfig) + + cfg := Config{ + Remote: Remote{ + Name: "foo", + Host: "a.b.c", + }, + } + _, err := Connect(cfg) + + c.Assert(errors.Cause(err), gc.Equals, testerr) +} + +var testerr = errors.Errorf("boo!") + +func fakeNewClientFromInfo(config *lxd.Config, remote string) (*lxd.Client, error) { + return nil, testerr +} + +func fakeLoadConfig() (*lxd.Config, error) { + return nil, nil +} === added file 'src/github.com/juju/juju/tools/lxdclient/config.go' --- src/github.com/juju/juju/tools/lxdclient/config.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/config.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,115 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient + +import ( + "github.com/juju/errors" +) + +// Config contains the config values used for a connection to the LXD API. +type Config struct { + // Namespace identifies the namespace to associate with containers + // and other resources with which the client interacts. If may be + // blank. + Namespace string + + // Remote identifies the remote server to which the client should + // connect. For the default "remote" use Local. + Remote Remote +} + +// WithDefaults updates a copy of the config with default values +// where needed. +func (cfg Config) WithDefaults() (Config, error) { + // We leave a blank namespace alone. + // Also, note that cfg is a value receiver, so it is an implicit copy. + + var err error + cfg.Remote, err = cfg.Remote.WithDefaults() + if err != nil { + return cfg, errors.Trace(err) + } + + return cfg, nil +} + +// Validate checks the client's fields for invalid values. +func (cfg Config) Validate() error { + // TODO(ericsnow) Check cfg.Namespace (if provided)? + + // TODO(ericsnow) Check cfg.Dirname (if provided)? + + // TODO(ericsnow) Check cfg.Filename (if provided)? + + if err := cfg.Remote.Validate(); err != nil { + return errors.Trace(err) + } + + return nil +} + +// UsingTCPRemote converts the config into a "non-local" version. An +// already non-local remote is left alone. +// +// For a "local" remote (see Local), the remote is changed to a one +// with the host set to the IP address of the local lxcbr0 bridge +// interface. The LXD server is also set up for remote access, exposing +// the TCP port and adding a certificate for remote access. +func (cfg Config) UsingTCPRemote() (Config, error) { + // Note that cfg is a value receiver, so it is an implicit copy. + + if !cfg.Remote.isLocal() { + return cfg, nil + } + + remote, err := cfg.Remote.UsingTCP() + if err != nil { + return cfg, errors.Trace(err) + } + + // Update the server config and authorized certs. + serverCert, err := prepareRemote(cfg, *remote.Cert) + if err != nil { + return cfg, errors.Trace(err) + } + // Note: jam 2016-02-25 setting ServerPEMCert feels like something + // that would have been done in UsingTCP. However, we can't know the + // server's certificate until we've actually connected to it, which + // happens in prepareRemote + remote.ServerPEMCert = serverCert + + cfg.Remote = remote + return cfg, nil +} + +func prepareRemote(cfg Config, newCert Cert) (string, error) { + client, err := Connect(cfg) + if err != nil { + return "", errors.Trace(err) + } + + // Make sure the LXD service is configured to listen to local https + // requests, rather than only via the Unix socket. + // TODO: jam 2016-02-25 This tells LXD to listen on all addresses, + // which does expose the LXD to outside requests. It would + // probably be better to only tell LXD to listen for requests on + // the loopback and LXC bridges that we are using. + if err := client.SetConfig("core.https_address", "[::]"); err != nil { + return "", errors.Trace(err) + } + + // Make sure the LXD service will allow our certificate to connect + if err := client.AddCert(newCert); err != nil { + return "", errors.Trace(err) + } + + st, err := client.ServerStatus() + if err != nil { + return "", errors.Trace(err) + } + + return st.Environment.Certificate, nil +} === added file 'src/github.com/juju/juju/tools/lxdclient/config_test.go' --- src/github.com/juju/juju/tools/lxdclient/config_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/config_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,196 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/set" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/tools/lxdclient" +) + +var ( + _ = gc.Suite(&configSuite{}) + _ = gc.Suite(&configFunctionalSuite{}) +) + +type configBaseSuite struct { + lxdclient.BaseSuite + + remote lxdclient.Remote +} + +func (s *configBaseSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + + s.remote = lxdclient.Remote{ + Name: "my-remote", + Host: "some-host", + Cert: s.Cert, + } +} + +type configSuite struct { + configBaseSuite +} + +func (s *configSuite) TestWithDefaultsOkay(c *gc.C) { + cfg := lxdclient.Config{ + Namespace: "my-ns", + Remote: s.remote, + } + updated, err := cfg.WithDefaults() + c.Assert(err, jc.ErrorIsNil) + + c.Check(updated, jc.DeepEquals, cfg) +} + +func (s *configSuite) TestWithDefaultsMissingRemote(c *gc.C) { + cfg := lxdclient.Config{ + Namespace: "my-ns", + } + updated, err := cfg.WithDefaults() + c.Assert(err, jc.ErrorIsNil) + + c.Check(updated, jc.DeepEquals, lxdclient.Config{ + Namespace: "my-ns", + Remote: lxdclient.Local, + }) +} + +func (s *configSuite) TestValidateOkay(c *gc.C) { + cfg := lxdclient.Config{ + Namespace: "my-ns", + Remote: s.remote, + } + err := cfg.Validate() + + c.Check(err, jc.ErrorIsNil) +} + +func (s *configSuite) TestValidateOnlyRemote(c *gc.C) { + cfg := lxdclient.Config{ + Namespace: "", + Remote: s.remote, + } + err := cfg.Validate() + + c.Check(err, jc.ErrorIsNil) +} + +func (s *configSuite) TestValidateMissingRemote(c *gc.C) { + cfg := lxdclient.Config{ + Namespace: "my-ns", + } + err := cfg.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) +} + +func (s *configSuite) TestValidateZeroValue(c *gc.C) { + var cfg lxdclient.Config + err := cfg.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) +} + +func (s *configSuite) TestWriteOkay(c *gc.C) { + c.Skip("not implemented yet") + // TODO(ericsnow) Finish! +} + +func (s *configSuite) TestWriteRemoteAlreadySet(c *gc.C) { + c.Skip("not implemented yet") + // TODO(ericsnow) Finish! +} + +func (s *configSuite) TestUsingTCPRemoteOkay(c *gc.C) { + // TODO(ericsnow) Finish! +} + +func (s *configSuite) TestUsingTCPRemoteNoop(c *gc.C) { + cfg := lxdclient.Config{ + Namespace: "my-ns", + Remote: s.remote, + } + nonlocal, err := cfg.UsingTCPRemote() + c.Assert(err, jc.ErrorIsNil) + + c.Check(nonlocal, jc.DeepEquals, cfg) +} + +type configFunctionalSuite struct { + configBaseSuite + + client *lxdclient.Client +} + +func (s *configFunctionalSuite) SetUpTest(c *gc.C) { + s.configBaseSuite.SetUpTest(c) + + s.client = newLocalClient(c) + + if s.client != nil { + origCerts, err := s.client.ListCerts() + c.Assert(err, jc.ErrorIsNil) + s.AddCleanup(func(c *gc.C) { + certs, err := s.client.ListCerts() + c.Assert(err, jc.ErrorIsNil) + + orig := set.NewStrings(origCerts...) + added := set.NewStrings(certs...).Difference(orig) + for _, fingerprint := range added.Values() { + err := s.client.RemoveCertByFingerprint(fingerprint) + if err != nil { + c.Logf("could not remove cert %q: %v", fingerprint, err) + } + } + }) + } +} + +func (s *configFunctionalSuite) TestUsingTCPRemote(c *gc.C) { + if s.client == nil { + c.Skip("LXD not running locally") + } + + cfg := lxdclient.Config{ + Namespace: "my-ns", + Remote: lxdclient.Local, + } + nonlocal, err := cfg.UsingTCPRemote() + c.Assert(err, jc.ErrorIsNil) + + checkValidRemote(c, &nonlocal.Remote) + c.Check(nonlocal, jc.DeepEquals, lxdclient.Config{ + Namespace: "my-ns", + Remote: lxdclient.Remote{ + Name: lxdclient.Local.Name, + Host: nonlocal.Remote.Host, + Cert: nonlocal.Remote.Cert, + ServerPEMCert: nonlocal.Remote.ServerPEMCert, + }, + }) + c.Check(nonlocal.Remote.Host, gc.Not(gc.Equals), "") + c.Check(nonlocal.Remote.Cert.CertPEM, gc.Not(gc.Equals), "") + c.Check(nonlocal.Remote.Cert.KeyPEM, gc.Not(gc.Equals), "") + c.Check(nonlocal.Remote.ServerPEMCert, gc.Not(gc.Equals), "") + // TODO(ericsnow) Check that the server has the certs. +} + +func newLocalClient(c *gc.C) *lxdclient.Client { + client, err := lxdclient.Connect(lxdclient.Config{ + Namespace: "my-ns", + Remote: lxdclient.Local, + }) + if err != nil { + c.Log(err) + return nil + } + return client +} === added file 'src/github.com/juju/juju/tools/lxdclient/export_test.go' --- src/github.com/juju/juju/tools/lxdclient/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,17 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient + +var NewInstanceSummary = newInstanceSummary + +type RawInstanceClient rawInstanceClient + +func NewInstanceClient(raw RawInstanceClient) *instanceClient { + return &instanceClient{ + raw: rawInstanceClient(raw), + remote: "", + } +} === added file 'src/github.com/juju/juju/tools/lxdclient/instance.go' --- src/github.com/juju/juju/tools/lxdclient/instance.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/instance.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,266 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient + +import ( + "fmt" + "math" + "strings" + "time" + + "github.com/juju/errors" + "github.com/juju/utils/arch" + "github.com/lxc/lxd/shared" +) + +// Constants related to user metadata. +const ( + MetadataNamespace = "user" + + // This is defined by the cloud-init code: + // http://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/view/head:/cloudinit/sources/ + // http://cloudinit.readthedocs.org/en/latest/ + // Also see https://github.com/lxc/lxd/blob/master/specs/configuration.md. + UserdataKey = "user-data" + + megabyte = 1024 * 1024 +) + +func resolveConfigKey(name string, namespace ...string) string { + parts := append(namespace, name) + return strings.Join(parts, ".") +} + +func splitConfigKey(key string) (string, string) { + parts := strings.SplitN(key, ".", 2) + if len(parts) == 1 { + return "", parts[0] + } + return parts[0], parts[1] +} + +// AliveStatuses are the LXD statuses that indicate a container is "alive". +var AliveStatuses = []string{ + // TODO(ericsnow) Also support StatusOK, StatusPending, and StatusThawed? + StatusStarting, + StatusStarted, + StatusRunning, +} + +// InstanceSpec holds all the information needed to create a new LXD +// container. +type InstanceSpec struct { + // Name is the "name" of the instance. + Name string + + // Image is the name of the image to use. + Image string + + // ImageRemote identifies the remote to use for images. By default + // the client's remote is used. + ImageRemote string + + // Profiles are the names of the container profiles to apply to the + // new container, in order. + Profiles []string + + // Ephemeral indicates whether or not the container should be + // destroyed when the LXD host is restarted. + Ephemeral bool + + // Metadata is the instance metadata. + Metadata map[string]string + + // TODO(ericsnow) Other possible fields: + // Disks + // Networks + // Metadata + // Tags +} + +func (spec InstanceSpec) config() map[string]string { + return resolveMetadata(spec.Metadata) +} + +func (spec InstanceSpec) info(namespace string) *shared.ContainerInfo { + name := spec.Name + if namespace != "" { + name = namespace + "-" + name + } + + return &shared.ContainerInfo{ + Architecture: "", + Config: spec.config(), + CreationDate: time.Time{}, + Devices: shared.Devices{}, + Ephemeral: spec.Ephemeral, + ExpandedConfig: map[string]string{}, + ExpandedDevices: shared.Devices{}, + Name: name, + Profiles: spec.Profiles, + Status: "", + StatusCode: 0, + } +} + +// Summary builds an InstanceSummary based on the spec and returns it. +func (spec InstanceSpec) Summary(namespace string) InstanceSummary { + info := spec.info(namespace) + return newInstanceSummary(info) +} + +// InstanceHardware describes the hardware characteristics of a LXC container. +type InstanceHardware struct { + // Architecture is the CPU architecture. + Architecture string + + // NumCores is the number of CPU cores. + NumCores uint + + // MemoryMB is the memory allocation for the container. + MemoryMB uint + + // RootDiskMB is the size of the root disk, in MB. + RootDiskMB uint64 +} + +// InstanceSummary captures all the data needed by Instance. +type InstanceSummary struct { + // Name is the "name" of the instance. + Name string + + // Status holds the status of the instance at a certain point in time. + Status string + + // Hardware describes the instance's hardware characterstics. + Hardware InstanceHardware + + // Metadata is the instance metadata. + Metadata map[string]string +} + +func newInstanceSummary(info *shared.ContainerInfo) InstanceSummary { + archStr := arch.NormaliseArch(info.Architecture) + + var numCores uint = 0 // default to all + if raw := info.Config["limits.cpu"]; raw != "" { + fmt.Sscanf(raw, "%d", &numCores) + } + + var mem uint = 0 // default to all + if raw := info.Config["limits.memory"]; raw != "" { + result, err := shared.ParseByteSizeString(raw) + if err != nil { + logger.Errorf("failed to parse %s into bytes, ignoring err: %s", raw, err) + mem = 0 + } else { + // We're going to put it into MemoryMB, so adjust by a megabyte + result = result / megabyte + if result > math.MaxUint32 { + logger.Errorf("byte string %s overflowed uint32", raw) + mem = math.MaxUint32 + } else { + mem = uint(result) + } + } + } + + // TODO(ericsnow) Factor this out into a function. + statusStr := info.Status + for status, code := range allStatuses { + if info.StatusCode == code { + statusStr = status + break + } + } + + metadata := extractMetadata(info.Config) + + return InstanceSummary{ + Name: info.Name, + Status: statusStr, + Metadata: metadata, + Hardware: InstanceHardware{ + Architecture: archStr, + NumCores: numCores, + MemoryMB: mem, + }, + } +} + +// Instance represents a single realized LXD container. +type Instance struct { + InstanceSummary + + // spec is the InstanceSpec used to create this instance. + spec *InstanceSpec +} + +func newInstance(info *shared.ContainerInfo, spec *InstanceSpec) *Instance { + summary := newInstanceSummary(info) + return NewInstance(summary, spec) +} + +// NewInstance builds an instance from the provided summary and spec +// and returns it. +func NewInstance(summary InstanceSummary, spec *InstanceSpec) *Instance { + if spec != nil { + // Make a copy. + val := *spec + spec = &val + } + return &Instance{ + InstanceSummary: summary, + spec: spec, + } +} + +// Status returns a string identifying the status of the instance. +func (gi Instance) Status() string { + return gi.InstanceSummary.Status +} + +// CurrentStatus returns a string identifying the status of the instance. +func (gi Instance) CurrentStatus(client *Client) (string, error) { + // TODO(ericsnow) Do this a better way? + + inst, err := client.Instance(gi.Name) + if err != nil { + return "", errors.Trace(err) + } + return inst.Status(), nil +} + +// Metadata returns the user-specified metadata for the instance. +func (gi Instance) Metadata() map[string]string { + // TODO*ericsnow) return a copy? + return gi.InstanceSummary.Metadata +} + +func resolveMetadata(metadata map[string]string) map[string]string { + config := make(map[string]string) + + for name, val := range metadata { + key := resolveConfigKey(name, MetadataNamespace) + config[key] = val + } + + return config +} + +func extractMetadata(config map[string]string) map[string]string { + metadata := make(map[string]string) + + for key, val := range config { + namespace, name := splitConfigKey(key) + if namespace != MetadataNamespace { + continue + } + metadata[name] = val + } + + return metadata +} === added file 'src/github.com/juju/juju/tools/lxdclient/instance_test.go' --- src/github.com/juju/juju/tools/lxdclient/instance_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/instance_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,102 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient_test + +import ( + "fmt" + "math" + "time" + + jc "github.com/juju/testing/checkers" + lxdshared "github.com/lxc/lxd/shared" + gc "gopkg.in/check.v1" + + jujutesting "github.com/juju/juju/testing" + "github.com/juju/juju/tools/lxdclient" +) + +type instanceSuite struct { + jujutesting.BaseSuite +} + +var _ = gc.Suite(&instanceSuite{}) + +var templateContainerInfo = lxdshared.ContainerInfo{ + Architecture: "x86_64", + Config: map[string]string{ + "limits.cpu": "2", + "limits.memory": "256MB", + "user.something": "something value", + }, + CreationDate: time.Now(), + Devices: nil, + Ephemeral: false, + ExpandedConfig: nil, + ExpandedDevices: nil, + Name: "container-name", + Profiles: []string{""}, + Status: lxdshared.Starting.String(), + StatusCode: lxdshared.Starting, +} + +func (s *instanceSuite) TestNewInstanceSummaryTemplate(c *gc.C) { + archStr, err := lxdshared.ArchitectureName(lxdshared.ARCH_64BIT_INTEL_X86) + c.Assert(err, jc.ErrorIsNil) + c.Check(templateContainerInfo.Architecture, gc.Equals, archStr) + summary := lxdclient.NewInstanceSummary(&templateContainerInfo) + c.Check(summary.Name, gc.Equals, "container-name") + c.Check(summary.Status, gc.Equals, lxdclient.StatusStarting) + c.Check(summary.Hardware.Architecture, gc.Equals, "amd64") + c.Check(summary.Hardware.NumCores, gc.Equals, uint(2)) + c.Check(summary.Hardware.MemoryMB, gc.Equals, uint(256)) + // NotImplemented yet + c.Check(summary.Hardware.RootDiskMB, gc.Equals, uint64(0)) + c.Check(summary.Metadata, gc.DeepEquals, map[string]string{"something": "something value"}) +} + +func infoWithMemory(mem string) *lxdshared.ContainerInfo { + info := templateContainerInfo + info.Config = map[string]string{ + "limits.memory": mem, + } + return &info +} + +func (s *instanceSuite) TestNewInstanceSummaryMemory(c *gc.C) { + // No suffix + summary := lxdclient.NewInstanceSummary(infoWithMemory("128")) + c.Check(summary.Hardware.MemoryMB, gc.Equals, uint(0)) + // Invalid integer + summary = lxdclient.NewInstanceSummary(infoWithMemory("blah")) + c.Check(summary.Hardware.MemoryMB, gc.Equals, uint(0)) + // Too big to fit in uint + tooBig := fmt.Sprintf("%vMB", uint64(math.MaxUint32)+1) + summary = lxdclient.NewInstanceSummary(infoWithMemory(tooBig)) + c.Check(summary.Hardware.MemoryMB, gc.Equals, uint(math.MaxUint32)) + // Just big enough + justEnough := fmt.Sprintf("%vMB", uint(math.MaxUint32)-1) + summary = lxdclient.NewInstanceSummary(infoWithMemory(justEnough)) + c.Check(summary.Hardware.MemoryMB, gc.Equals, uint(math.MaxUint32-1)) +} + +func infoWithArchitecture(arch int) *lxdshared.ContainerInfo { + info := templateContainerInfo + info.Architecture, _ = lxdshared.ArchitectureName(arch) + return &info +} + +func (s *instanceSuite) TestNewInstanceSummaryArchitectures(c *gc.C) { + summary := lxdclient.NewInstanceSummary(infoWithArchitecture(lxdshared.ARCH_32BIT_INTEL_X86)) + c.Check(summary.Hardware.Architecture, gc.Equals, "i386") + summary = lxdclient.NewInstanceSummary(infoWithArchitecture(lxdshared.ARCH_64BIT_INTEL_X86)) + c.Check(summary.Hardware.Architecture, gc.Equals, "amd64") + summary = lxdclient.NewInstanceSummary(infoWithArchitecture(lxdshared.ARCH_64BIT_POWERPC_LITTLE_ENDIAN)) + c.Check(summary.Hardware.Architecture, gc.Equals, "ppc64el") + info := templateContainerInfo + info.Architecture = "unknown" + summary = lxdclient.NewInstanceSummary(&info) + c.Check(summary.Hardware.Architecture, gc.Equals, "unknown") +} === added file 'src/github.com/juju/juju/tools/lxdclient/lxd_client.go' --- src/github.com/juju/juju/tools/lxdclient/lxd_client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/lxd_client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,48 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient + +import ( + "github.com/lxc/lxd/shared" +) + +// The various status values used for LXD. +const ( + StatusStarting = "Starting" + StatusStarted = "Started" + StatusRunning = "Running" + StatusFreezing = "Freezing" + StatusFrozen = "Frozen" + StatusThawed = "Thawed" + StatusStopping = "Stopping" + StatusStopped = "Stopped" + + StatusOperationCreated = "Operation created" + StatusPending = "Pending" + StatusAborting = "Aborting" + StatusCancelling = "Canceling" + StatusCancelled = "Canceled" + StatusSuccess = "Success" + StatusFailure = "Failure" +) + +var allStatuses = map[string]shared.StatusCode{ + StatusStarting: shared.Starting, + StatusStarted: shared.Started, + StatusRunning: shared.Running, + StatusFreezing: shared.Freezing, + StatusFrozen: shared.Frozen, + StatusThawed: shared.Thawed, + StatusStopping: shared.Stopping, + StatusStopped: shared.Stopped, + StatusOperationCreated: shared.OperationCreated, + StatusPending: shared.Pending, + StatusAborting: shared.Aborting, + StatusCancelling: shared.Cancelling, + StatusCancelled: shared.Cancelled, + StatusSuccess: shared.Success, + StatusFailure: shared.Failure, +} === added file 'src/github.com/juju/juju/tools/lxdclient/package_test.go' --- src/github.com/juju/juju/tools/lxdclient/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/tools/lxdclient/remote.go' --- src/github.com/juju/juju/tools/lxdclient/remote.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/remote.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,177 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient + +import ( + "github.com/juju/errors" + "github.com/juju/utils" + lxdshared "github.com/lxc/lxd/shared" + + "github.com/juju/juju/container/lxc" +) + +const ( + // remoteLocalName is a specific remote name in the default LXD config. + // See https://github.com/lxc/lxd/blob/master/config.go:defaultRemote. + remoteLocalName = "local" + remoteIDForLocal = remoteLocalName +) + +// Local is LXD's default "remote". Essentially it is an unencrypted, +// unauthenticated connection to localhost over a unix socket. +// However it does require users to be in the lxd group. +var Local = Remote{ + Name: remoteLocalName, + Host: "", // If Host is empty we will translate it into the local Unix socket + // No certificates are used when connecting to the Unix socket + Cert: nil, + ServerPEMCert: "", +} + +// Remote describes a LXD "remote" server for a client. In +// particular it holds the information needed for the client +// to connect to the remote. +type Remote struct { + // Name is a label for this remote. + Name string + + // Host identifies the host to which the client should connect. + // An empty string is interpreted as: + // "localhost over a unix socket (unencrypted)". + Host string + + // Cert holds the TLS certificate data for the client to use. + Cert *Cert + + // ServerPEMCert is the certificate to be supplied as the acceptable + // server certificate when connecting to the remote. + ServerPEMCert string +} + +// isLocal determines if the remote is the implicit "local" remote, +// an unencrypted, unauthenticated unix socket to a locally running LXD. +func (r Remote) isLocal() bool { + return r.Host == Local.Host +} + +// ID identifies the remote to the raw LXD client code. For the +// non-local case this is Remote.Name. For the local case it is the +// remote name that LXD special-cases for the local unix socket. +func (r Remote) ID() string { + if r.isLocal() { + return remoteIDForLocal + } + return r.Name +} + +// WithDefaults updates a copy of the remote with default values +// where needed. +func (r Remote) WithDefaults() (Remote, error) { + // Note that r is a value receiver, so it is an implicit copy. + + if r.isLocal() { + return r.withLocalDefaults(), nil + } + + if r.Cert == nil { + certPEM, keyPEM, err := lxdshared.GenerateMemCert() + if err != nil { + return r, errors.Trace(err) + } + cert := NewCert(certPEM, keyPEM) + r.Cert = &cert + } + + cert, err := r.Cert.WithDefaults() + if err != nil { + return r, errors.Trace(err) + } + r.Cert = &cert + + return r, nil +} + +func (r Remote) withLocalDefaults() Remote { + if r.Name == "" { + r.Name = remoteLocalName + } + + // TODO(ericsnow) Set r.Cert to nil? + + return r +} + +// Validate checks the Remote fields for invalid values. +func (r Remote) Validate() error { + if r.Name == "" { + return errors.NotValidf("remote missing name,") + } + + if r.isLocal() { + if err := r.validateLocal(); err != nil { + return errors.Trace(err) + } + return nil + } + + // TODO(ericsnow) Ensure the host is a valid hostname or address? + + if r.Cert == nil { + return errors.NotValidf("remote without cert") + } else if err := r.Cert.Validate(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (r Remote) validateLocal() error { + if r.Cert != nil { + return errors.NotValidf("hostless remote with cert") + } + + return nil +} + +// UsingTCP converts the remote into a non-local version. For +// non-local remotes this is a no-op. +// +// For a "local" remote (see Local), the remote is changed to a one +// with the host set to the IP address of the local lxcbr0 bridge +// interface. The remote is also set up for remote access, setting +// the cert if not already set. +func (r Remote) UsingTCP() (Remote, error) { + // Note that r is a value receiver, so it is an implicit copy. + + if !r.isLocal() { + return r, nil + } + + // TODO: jam 2016-02-25 This should be updated for systems that are + // space aware, as we may not be just using the default LXC + // bridge. + netIF := lxc.DefaultLxcBridge + addr, err := utils.GetAddressForInterface(netIF) + if err != nil { + return r, errors.Trace(err) + } + r.Host = addr + + r, err = r.WithDefaults() + if err != nil { + return r, errors.Trace(err) + } + + // TODO(ericsnow) Change r.Name if "local"? Prepend "juju-"? + + return r, nil +} + +// TODO(ericsnow) Add a "Connect(Config)" method that connects +// to the remote and returns the corresponding Client. + +// TODO(ericsnow) Add a "Register" method to Client that adds the remote +// to the client's remote? === added file 'src/github.com/juju/juju/tools/lxdclient/remote_test.go' --- src/github.com/juju/juju/tools/lxdclient/remote_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/remote_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,295 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient_test + +import ( + "net" + + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/container/lxc" + "github.com/juju/juju/tools/lxdclient" +) + +var ( + _ = gc.Suite(&remoteSuite{}) + _ = gc.Suite(&remoteFunctionalSuite{}) +) + +type remoteSuite struct { + lxdclient.BaseSuite +} + +func (s *remoteSuite) TestWithDefaultsNoop(c *gc.C) { + remote := lxdclient.Remote{ + Name: "my-remote", + Host: "some-host", + Cert: s.Cert, + } + updated, err := remote.WithDefaults() + c.Assert(err, jc.ErrorIsNil) + err = updated.Validate() + + c.Check(err, jc.ErrorIsNil) + c.Check(updated, jc.DeepEquals, remote) +} + +func (s *remoteSuite) TestWithDefaultsMissingName(c *gc.C) { + remote := lxdclient.Remote{ + Name: "", + Host: "some-host", + Cert: s.Cert, + } + updated, err := remote.WithDefaults() + c.Assert(err, jc.ErrorIsNil) + + c.Check(updated, jc.DeepEquals, remote) // Name is not updated. +} + +// TODO(ericsnow) Move this test to a functional suite. +func (s *remoteSuite) TestWithDefaultsMissingCert(c *gc.C) { + remote := lxdclient.Remote{ + Name: "my-remote", + Host: "some-host", + Cert: nil, + } + err := remote.Validate() + c.Assert(err, gc.NotNil) // Make sure the original is invalid. + + updated, err := remote.WithDefaults() + c.Assert(err, jc.ErrorIsNil) + err = updated.Validate() + + c.Check(err, jc.ErrorIsNil) + updated.Cert = nil // Validate ensured that the cert was okay. + c.Check(updated, jc.DeepEquals, lxdclient.Remote{ + Name: "my-remote", + Host: "some-host", + Cert: nil, + }) +} + +func (s *remoteSuite) TestWithDefaultsZeroValue(c *gc.C) { + var remote lxdclient.Remote + updated, err := remote.WithDefaults() + c.Assert(err, jc.ErrorIsNil) + err = updated.Validate() + + c.Check(err, jc.ErrorIsNil) + c.Check(updated, jc.DeepEquals, lxdclient.Remote{ + Name: "local", + Host: "", + Cert: nil, + }) +} + +func (s *remoteSuite) TestWithDefaultsLocalNoop(c *gc.C) { + remote := lxdclient.Remote{ + Name: "my-local", + Host: "", + Cert: nil, + } + updated, err := remote.WithDefaults() + c.Assert(err, jc.ErrorIsNil) + err = updated.Validate() + + c.Check(err, jc.ErrorIsNil) + c.Check(updated, jc.DeepEquals, lxdclient.Remote{ + Name: "my-local", + Host: "", + Cert: nil, + }) +} + +func (s *remoteSuite) TestWithDefaultsLocalMissingName(c *gc.C) { + remote := lxdclient.Remote{ + Name: "", + Host: "", + Cert: nil, + } + updated, err := remote.WithDefaults() + c.Assert(err, jc.ErrorIsNil) + err = updated.Validate() + + c.Check(err, jc.ErrorIsNil) + c.Check(updated, jc.DeepEquals, lxdclient.Remote{ + Name: "local", + Host: "", + Cert: nil, + }) +} + +func (s *remoteSuite) TestValidateOkay(c *gc.C) { + remote := lxdclient.Remote{ + Name: "my-remote", + Host: "some-host", + Cert: s.Cert, + } + err := remote.Validate() + + c.Check(err, jc.ErrorIsNil) +} + +func (s *remoteSuite) TestValidateZeroValue(c *gc.C) { + var remote lxdclient.Remote + err := remote.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) +} + +func (s *remoteSuite) TestValidateMissingName(c *gc.C) { + remote := lxdclient.Remote{ + Name: "", + Host: "some-host", + Cert: s.Cert, + } + err := remote.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) +} + +func (s *remoteSuite) TestValidateMissingCert(c *gc.C) { + remote := lxdclient.Remote{ + Name: "my-remote", + Host: "some-host", + Cert: nil, + } + err := remote.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) +} + +func (s *remoteSuite) TestValidateBadCert(c *gc.C) { + remote := lxdclient.Remote{ + Name: "my-remote", + Host: "some-host", + Cert: &lxdclient.Cert{}, + } + err := remote.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) +} + +func (s *remoteSuite) TestValidateLocalOkay(c *gc.C) { + remote := lxdclient.Remote{ + Name: "my-local", + Host: "", + Cert: nil, + } + err := remote.Validate() + + c.Check(err, jc.ErrorIsNil) +} + +func (s *remoteSuite) TestValidateLocalMissingName(c *gc.C) { + remote := lxdclient.Remote{ + Name: "", + Host: "", + Cert: nil, + } + err := remote.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) +} + +func (s *remoteSuite) TestValidateLocalWithCert(c *gc.C) { + remote := lxdclient.Remote{ + Name: "my-local", + Host: "", + Cert: &lxdclient.Cert{}, + } + err := remote.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) +} + +func (s *remoteSuite) TestLocal(c *gc.C) { + expected := lxdclient.Remote{ + Name: "local", + Host: "", + Cert: nil, + } + c.Check(lxdclient.Local, jc.DeepEquals, expected) +} + +func (s *remoteSuite) TestIDOkay(c *gc.C) { + remote := lxdclient.Remote{ + Name: "my-remote", + Host: "some-host", + Cert: s.Cert, + } + id := remote.ID() + + c.Check(id, gc.Equals, "my-remote") +} + +func (s *remoteSuite) TestIDLocal(c *gc.C) { + remote := lxdclient.Remote{ + Name: "my-remote", + Host: "", + Cert: s.Cert, + } + id := remote.ID() + + c.Check(id, gc.Equals, "local") +} + +func (s *remoteSuite) TestUsingTCPOkay(c *gc.C) { + c.Skip("not implemented yet") + // TODO(ericsnow) Finish this! +} + +func (s *remoteSuite) TestUsingTCPNoop(c *gc.C) { + remote := lxdclient.Remote{ + Name: "my-remote", + Host: "some-host", + Cert: s.Cert, + } + nonlocal, err := remote.UsingTCP() + c.Assert(err, jc.ErrorIsNil) + + c.Check(nonlocal, jc.DeepEquals, remote) +} + +type remoteFunctionalSuite struct { + lxdclient.BaseSuite +} + +func (s *remoteFunctionalSuite) TestUsingTCP(c *gc.C) { + if _, err := net.InterfaceByName(lxc.DefaultLxcBridge); err != nil { + c.Skip("network bridge interface not found") + } + + remote := lxdclient.Remote{ + Name: "my-remote", + Host: "", + Cert: nil, + } + nonlocal, err := remote.UsingTCP() + c.Assert(err, jc.ErrorIsNil) + + checkValidRemote(c, &nonlocal) + c.Check(nonlocal, jc.DeepEquals, lxdclient.Remote{ + Name: "my-remote", + Host: nonlocal.Host, + Cert: nonlocal.Cert, + }) +} + +func checkValidRemote(c *gc.C, remote *lxdclient.Remote) { + c.Check(remote.Host, jc.Satisfies, isValidAddr) + checkValidCert(c, remote.Cert) +} + +func isValidAddr(value interface{}) bool { + addr, ok := value.(string) + if !ok { + return false + } + return net.ParseIP(addr) != nil +} === added file 'src/github.com/juju/juju/tools/lxdclient/testing_test.go' --- src/github.com/juju/juju/tools/lxdclient/testing_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/testing_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,136 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient + +import ( + "crypto/x509" + "os" + + "github.com/juju/errors" + "github.com/juju/testing" + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + gc "gopkg.in/check.v1" +) + +type BaseSuite struct { + testing.IsolationSuite + + Stub *testing.Stub + Client *stubClient + Cert *Cert +} + +func (s *BaseSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.Stub = &testing.Stub{} + s.Client = &stubClient{stub: s.Stub} + s.Cert = &Cert{ + Name: "some cert", + CertPEM: []byte(""), + KeyPEM: []byte(""), + } +} + +type stubClient struct { + stub *testing.Stub + + Instance *shared.ContainerState + Instances []shared.ContainerInfo + ReturnCode int + Response *lxd.Response +} + +func (s *stubClient) WaitForSuccess(waitURL string) error { + s.stub.AddCall("WaitForSuccess", waitURL) + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *stubClient) SetServerConfig(key string, value string) (*lxd.Response, error) { + s.stub.AddCall("SetServerConfig", key, value) + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.Response, nil +} + +func (s *stubClient) CertificateAdd(cert *x509.Certificate, name string) error { + s.stub.AddCall("CertificateAdd", cert, name) + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *stubClient) ContainerState(name string) (*shared.ContainerState, error) { + s.stub.AddCall("ContainerState", name) + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.Instance, nil +} + +func (s *stubClient) ListContainers() ([]shared.ContainerInfo, error) { + s.stub.AddCall("ListContainers") + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.Instances, nil +} + +func (s *stubClient) Init(name, remote, image string, profiles *[]string, ephem bool) (*lxd.Response, error) { + s.stub.AddCall("AddInstance", name, remote, image, profiles, ephem) + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.Response, nil +} + +func (s *stubClient) Delete(name string) (*lxd.Response, error) { + s.stub.AddCall("Delete", name) + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.Response, nil +} + +func (s *stubClient) Action(name string, action shared.ContainerAction, timeout int, force bool) (*lxd.Response, error) { + s.stub.AddCall("Action", name, action, timeout, force) + if err := s.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return s.Response, nil +} + +func (s *stubClient) Exec(name string, cmd []string, env map[string]string, stdin *os.File, stdout *os.File, stderr *os.File) (int, error) { + s.stub.AddCall("Exec", name, cmd, env, stdin, stdout, stderr) + if err := s.stub.NextErr(); err != nil { + return -1, errors.Trace(err) + } + + return s.ReturnCode, nil +} + +func (s *stubClient) SetContainerConfig(name, key, value string) error { + s.stub.AddCall("SetContainerConfig", name, key, value) + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} === added file 'src/github.com/juju/juju/tools/lxdclient/utils.go' --- src/github.com/juju/juju/tools/lxdclient/utils.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/tools/lxdclient/utils.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,62 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build go1.3 + +package lxdclient + +import ( + "bytes" + + "github.com/juju/errors" + "github.com/juju/utils/series" + + "github.com/juju/juju/service" + "github.com/juju/juju/service/common" +) + +type closingBuffer struct { + bytes.Buffer +} + +// Close implements io.Closer. +func (closingBuffer) Close() error { + return nil +} + +// IsInstalledLocally returns true if LXD is installed locally. +func IsInstalledLocally() (bool, error) { + names, err := service.ListServices() + if err != nil { + return false, errors.Trace(err) + } + for _, name := range names { + if name == "lxd" { + return true, nil + } + } + return false, nil +} + +// IsRunningLocally returns true if LXD is running locally. +func IsRunningLocally() (bool, error) { + installed, err := IsInstalledLocally() + if err != nil { + return installed, errors.Trace(err) + } + if !installed { + return false, nil + } + + svc, err := service.NewService("lxd", common.Conf{}, series.HostSeries()) + if err != nil { + return false, errors.Trace(err) + } + + running, err := svc.Running() + if err != nil { + return running, errors.Trace(err) + } + + return running, nil +} === removed file 'src/github.com/juju/juju/upgrades/agentconfig.go' --- src/github.com/juju/juju/upgrades/agentconfig.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/upgrades/agentconfig.go 1970-01-01 00:00:00 +0000 @@ -1,152 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/juju/errors" - "github.com/juju/utils" - "github.com/juju/utils/symlink" - - "github.com/juju/juju/agent" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/state/multiwatcher" -) - -var ( - rootLogDir = "/var/log" - rootSpoolDir = "/var/spool/rsyslog" -) - -var chownPath = utils.ChownPath - -var isLocalEnviron = func(envConfig *config.Config) bool { - return envConfig.Type() == "local" -} - -func migrateLocalProviderAgentConfig(context Context) error { - st := context.State() - if st == nil { - logger.Debugf("no state connection, no migration required") - // We're running on a different node than the state server. - return nil - } - envConfig, err := st.EnvironConfig() - if err != nil { - return fmt.Errorf("failed to read current config: %v", err) - } - if !isLocalEnviron(envConfig) { - logger.Debugf("not a local environment, no migration required") - return nil - } - attrs := envConfig.AllAttrs() - rootDir, _ := attrs["root-dir"].(string) - sharedStorageDir := filepath.Join(rootDir, "shared-storage") - // In case these two are empty we need to set them and update the - // environment config. - namespace, _ := attrs["namespace"].(string) - container, _ := attrs["container"].(string) - - if namespace == "" { - username := os.Getenv("USER") - if username == "root" { - // sudo was probably called, get the original user. - username = os.Getenv("SUDO_USER") - } - if username == "" { - return fmt.Errorf("cannot get current user from the environment: %v", os.Environ()) - } - namespace = username + "-" + envConfig.Name() - } - if container == "" { - container = "lxc" - } - - dataDir := rootDir - localLogDir := filepath.Join(rootDir, "log") - // rsyslogd is restricted to write to /var/log - logDir := fmt.Sprintf("%s/juju-%s", rootLogDir, namespace) - jobs := []multiwatcher.MachineJob{multiwatcher.JobManageEnviron} - values := map[string]string{ - agent.Namespace: namespace, - // ContainerType is empty on the bootstrap node. - agent.ContainerType: "", - agent.AgentServiceName: "juju-agent-" + namespace, - } - deprecatedValues := []string{ - "SHARED_STORAGE_ADDR", - "SHARED_STORAGE_DIR", - } - - // Remove shared-storage dir if there. - if err := os.RemoveAll(sharedStorageDir); err != nil { - return fmt.Errorf("cannot remove deprecated %q: %v", sharedStorageDir, err) - } - - // We need to create the dirs if they don't exist. - if err := os.MkdirAll(dataDir, 0755); err != nil { - return fmt.Errorf("cannot create dataDir %q: %v", dataDir, err) - } - // We always recreate the logDir to make sure it's empty. - if err := os.RemoveAll(logDir); err != nil { - return fmt.Errorf("cannot remove logDir %q: %v", logDir, err) - } - if err := os.MkdirAll(logDir, 0755); err != nil { - return fmt.Errorf("cannot create logDir %q: %v", logDir, err) - } - // Reconfigure rsyslog as needed: - // 1. logDir must be owned by syslog:adm - // 2. Remove old rsyslog spool config - // 3. Relink logs to the new logDir - if err := chownPath(logDir, "syslog"); err != nil { - return err - } - spoolConfig := fmt.Sprintf("%s/machine-0-%s", rootSpoolDir, namespace) - if err := os.RemoveAll(spoolConfig); err != nil { - return fmt.Errorf("cannot remove %q: %v", spoolConfig, err) - } - allMachinesLog := filepath.Join(logDir, "all-machines.log") - if err := symlink.New(allMachinesLog, localLogDir+"/"); err != nil && !os.IsExist(err) { - return fmt.Errorf("cannot symlink %q to %q: %v", allMachinesLog, localLogDir, err) - } - machine0Log := filepath.Join(localLogDir, "machine-0.log") - if err := symlink.New(machine0Log, logDir+"/"); err != nil && !os.IsExist(err) { - return fmt.Errorf("cannot symlink %q to %q: %v", machine0Log, logDir, err) - } - - newCfg := map[string]interface{}{ - "namespace": namespace, - "container": container, - } - if err := st.UpdateEnvironConfig(newCfg, nil, nil); err != nil { - return fmt.Errorf("cannot update environment config: %v", err) - } - - return context.AgentConfig().Migrate(agent.MigrateParams{ - DataDir: dataDir, - LogDir: logDir, - Jobs: jobs, - Values: values, - DeleteValues: deprecatedValues, - }) -} - -func addEnvironmentUUIDToAgentConfig(context Context) error { - if context.AgentConfig().Environment().Id() != "" { - logger.Infof("environment uuid already set in agent config") - return nil - } - - environTag, err := context.APIState().EnvironTag() - if err != nil { - return errors.Annotate(err, "no environment uuid set on api") - } - - return context.AgentConfig().Migrate(agent.MigrateParams{ - Environment: environTag, - }) -} === removed file 'src/github.com/juju/juju/upgrades/agentconfig_test.go' --- src/github.com/juju/juju/upgrades/agentconfig_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/upgrades/agentconfig_test.go 1970-01-01 00:00:00 +0000 @@ -1,292 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - goyaml "gopkg.in/yaml.v1" - - "github.com/juju/juju/agent" - "github.com/juju/juju/environs/config" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" - "github.com/juju/juju/state/multiwatcher" - "github.com/juju/juju/testing" - "github.com/juju/juju/testing/factory" - "github.com/juju/juju/upgrades" - "github.com/juju/juju/version" -) - -type migrateLocalProviderAgentConfigSuite struct { - jujutesting.JujuConnSuite - - config agent.ConfigSetterWriter - ctx upgrades.Context -} - -var _ = gc.Suite(&migrateLocalProviderAgentConfigSuite{}) - -func (s *migrateLocalProviderAgentConfigSuite) SetUpTest(c *gc.C) { - if runtime.GOOS == "windows" { - c.Skip("No need to test local provider on windows") - } - s.JujuConnSuite.SetUpTest(c) - // Make sure we fallback to SUDO_USER if USER is root. - s.PatchEnvironment("USER", "root") - s.PatchEnvironment("SUDO_USER", "user") - s.PatchValue(upgrades.RootLogDir, c.MkDir()) - s.PatchValue(upgrades.RootSpoolDir, c.MkDir()) - s.PatchValue(&agent.DefaultDataDir, c.MkDir()) - s.PatchValue(upgrades.ChownPath, func(_, _ string) error { return nil }) - s.PatchValue(upgrades.IsLocalEnviron, func(_ *config.Config) bool { return true }) -} - -func (s *migrateLocalProviderAgentConfigSuite) primeConfig(c *gc.C, st *state.State, job state.MachineJob, tag names.Tag) { - rootDir := c.MkDir() - sharedStorageDir := filepath.Join(rootDir, "shared-storage") - c.Assert(os.MkdirAll(sharedStorageDir, 0755), gc.IsNil) - localLogDir := filepath.Join(rootDir, "log") - c.Assert(os.MkdirAll(localLogDir, 0755), gc.IsNil) - - initialConfig, err := agent.NewAgentConfig(agent.AgentConfigParams{ - Tag: tag, - Password: "blah", - CACert: testing.CACert, - StateAddresses: []string{"localhost:1111"}, - DataDir: agent.DefaultDataDir, - LogDir: agent.DefaultLogDir, - UpgradedToVersion: version.MustParse("1.16.0"), - Environment: s.State.EnvironTag(), - Values: map[string]string{ - "SHARED_STORAGE_ADDR": "blah", - "SHARED_STORAGE_DIR": sharedStorageDir, - }, - }) - c.Assert(err, jc.ErrorIsNil) - s.config = initialConfig - c.Assert(s.config.Write(), gc.IsNil) - - apiState, _ := s.OpenAPIAsNewMachine(c, job) - s.ctx = &mockContext{ - realAgentConfig: initialConfig, - apiState: apiState, - state: st, - } - - newCfg := (map[string]interface{}{ - "root-dir": rootDir, - }) - err = s.State.UpdateEnvironConfig(newCfg, nil, nil) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *migrateLocalProviderAgentConfigSuite) assertConfigProcessed(c *gc.C) { - envConfig, err := s.State.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - allAttrs := envConfig.AllAttrs() - - namespace, _ := allAttrs["namespace"].(string) - c.Assert(namespace, gc.Equals, "user-dummyenv") - container, _ := allAttrs["container"].(string) - c.Assert(container, gc.Equals, "lxc") - - expectedDataDir, _ := allAttrs["root-dir"].(string) - expectedSharedStorageDir := filepath.Join(expectedDataDir, "shared-storage") - _, err = os.Lstat(expectedSharedStorageDir) - c.Assert(err, gc.NotNil) - c.Assert(err, jc.Satisfies, os.IsNotExist) - expectedLogDir := filepath.Join(*upgrades.RootLogDir, "juju-"+namespace) - expectedJobs := []multiwatcher.MachineJob{multiwatcher.JobManageEnviron} - tag := s.ctx.AgentConfig().Tag() - - // We need to read the actual migrated agent config. - configFilePath := agent.ConfigPath(expectedDataDir, tag) - agentConfig, err := agent.ReadConfig(configFilePath) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(agentConfig.DataDir(), gc.Equals, expectedDataDir) - c.Assert(agentConfig.LogDir(), gc.Equals, expectedLogDir) - c.Assert(agentConfig.Jobs(), gc.DeepEquals, expectedJobs) - c.Assert(agentConfig.Value("SHARED_STORAGE_ADDR"), gc.Equals, "") - c.Assert(agentConfig.Value("SHARED_STORAGE_DIR"), gc.Equals, "") - c.Assert(agentConfig.Value(agent.Namespace), gc.Equals, namespace) - agentService := "juju-agent-user-dummyenv" - c.Assert(agentConfig.Value(agent.AgentServiceName), gc.Equals, agentService) - c.Assert(agentConfig.Value(agent.ContainerType), gc.Equals, "") -} - -func (s *migrateLocalProviderAgentConfigSuite) assertConfigNotProcessed(c *gc.C) { - envConfig, err := s.State.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - allAttrs := envConfig.AllAttrs() - - namespace, _ := allAttrs["namespace"].(string) - c.Assert(namespace, gc.Equals, "") - container, _ := allAttrs["container"].(string) - c.Assert(container, gc.Equals, "") - - rootDir, _ := allAttrs["root-dir"].(string) - expectedSharedStorageDir := filepath.Join(rootDir, "shared-storage") - _, err = os.Lstat(expectedSharedStorageDir) - c.Assert(err, jc.ErrorIsNil) - tag := s.ctx.AgentConfig().Tag() - - // We need to read the actual migrated agent config. - configFilePath := agent.ConfigPath(agent.DefaultDataDir, tag) - agentConfig, err := agent.ReadConfig(configFilePath) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(agentConfig.DataDir(), gc.Equals, agent.DefaultDataDir) - c.Assert(agentConfig.LogDir(), gc.Equals, agent.DefaultLogDir) - c.Assert(agentConfig.Jobs(), gc.HasLen, 0) - c.Assert(agentConfig.Value("SHARED_STORAGE_ADDR"), gc.Equals, "blah") - c.Assert(agentConfig.Value("SHARED_STORAGE_DIR"), gc.Equals, expectedSharedStorageDir) - c.Assert(agentConfig.Value(agent.Namespace), gc.Equals, "") - c.Assert(agentConfig.Value(agent.AgentServiceName), gc.Equals, "") - c.Assert(agentConfig.Value(agent.ContainerType), gc.Equals, "") -} -func (s *migrateLocalProviderAgentConfigSuite) TestMigrateStateServer(c *gc.C) { - s.primeConfig(c, s.State, state.JobManageEnviron, names.NewMachineTag("0")) - err := upgrades.MigrateLocalProviderAgentConfig(s.ctx) - c.Assert(err, jc.ErrorIsNil) - err = s.config.Write() - c.Assert(err, jc.ErrorIsNil) - s.assertConfigProcessed(c) -} - -func (s *migrateLocalProviderAgentConfigSuite) TestMigrateNonLocalEnvNotDone(c *gc.C) { - s.PatchValue(upgrades.IsLocalEnviron, func(_ *config.Config) bool { return false }) - s.primeConfig(c, s.State, state.JobManageEnviron, names.NewMachineTag("0")) - err := upgrades.MigrateLocalProviderAgentConfig(s.ctx) - c.Assert(err, jc.ErrorIsNil) - err = s.config.Write() - c.Assert(err, jc.ErrorIsNil) - s.assertConfigNotProcessed(c) -} - -func (s *migrateLocalProviderAgentConfigSuite) TestMigrateWithoutStateConnectionNotDone(c *gc.C) { - s.primeConfig(c, nil, state.JobManageEnviron, names.NewMachineTag("0")) - err := upgrades.MigrateLocalProviderAgentConfig(s.ctx) - c.Assert(err, jc.ErrorIsNil) - err = s.config.Write() - c.Assert(err, jc.ErrorIsNil) - s.assertConfigNotProcessed(c) -} - -func (s *migrateLocalProviderAgentConfigSuite) TestIdempotent(c *gc.C) { - s.primeConfig(c, s.State, state.JobManageEnviron, names.NewMachineTag("0")) - err := upgrades.MigrateLocalProviderAgentConfig(s.ctx) - c.Assert(err, jc.ErrorIsNil) - err = s.config.Write() - c.Assert(err, jc.ErrorIsNil) - s.assertConfigProcessed(c) - - err = upgrades.MigrateLocalProviderAgentConfig(s.ctx) - c.Assert(err, jc.ErrorIsNil) - err = s.config.Write() - c.Assert(err, jc.ErrorIsNil) - s.assertConfigProcessed(c) -} - -type migrateAgentEnvUUIDSuite struct { - jujutesting.JujuConnSuite - machine *state.Machine - password string - ctx *mockContext -} - -var _ = gc.Suite(&migrateAgentEnvUUIDSuite{}) - -func (s *migrateAgentEnvUUIDSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - s.PatchValue(&agent.DefaultLogDir, c.MkDir()) - s.PatchValue(&agent.DefaultDataDir, c.MkDir()) - s.primeConfig(c) -} - -func (s *migrateAgentEnvUUIDSuite) primeConfig(c *gc.C) { - s.machine, s.password = s.Factory.MakeMachineReturningPassword(c, &factory.MachineParams{ - Nonce: "a nonce", - }) - initialConfig, err := agent.NewAgentConfig(agent.AgentConfigParams{ - Tag: s.machine.Tag(), - Password: s.password, - CACert: testing.CACert, - StateAddresses: []string{"localhost:1111"}, - DataDir: agent.DefaultDataDir, - LogDir: agent.DefaultLogDir, - UpgradedToVersion: version.MustParse("1.22.0"), - Environment: s.State.EnvironTag(), - }) - c.Assert(err, jc.ErrorIsNil) - c.Assert(initialConfig.Write(), gc.IsNil) - - apiState, _ := s.OpenAPIAsNewMachine(c) - s.ctx = &mockContext{ - realAgentConfig: initialConfig, - apiState: apiState, - state: s.State, - } -} - -func (s *migrateAgentEnvUUIDSuite) removeEnvUUIDFromAgentConfig(c *gc.C) { - // Read the file in as simple map[string]interface{} and delete - // the element, and write it back out again. - - // First step, read the file contents. - filename := agent.ConfigPath(agent.DefaultDataDir, s.machine.Tag()) - data, err := ioutil.ReadFile(filename) - c.Assert(err, jc.ErrorIsNil) - c.Logf("Data in:\n\n%s\n", data) - - // Parse it into the map. - var content map[string]interface{} - err = goyaml.Unmarshal(data, &content) - c.Assert(err, jc.ErrorIsNil) - - // Remove the environment value, and marshal back into bytes. - delete(content, "environment") - data, err = goyaml.Marshal(content) - c.Assert(err, jc.ErrorIsNil) - - // Write the yaml back out remembering to add the format prefix. - data = append([]byte("# format 1.18\n"), data...) - c.Logf("Data out:\n\n%s\n", data) - err = ioutil.WriteFile(filename, data, 0644) - c.Assert(err, jc.ErrorIsNil) - - // Reset test attributes. - cfg, err := agent.ReadConfig(filename) - c.Assert(err, jc.ErrorIsNil) - s.ctx.realAgentConfig = cfg -} - -func (s *migrateAgentEnvUUIDSuite) TestAgentEnvironmentUUID(c *gc.C) { - c.Assert(s.ctx.realAgentConfig.Environment(), gc.Equals, s.State.EnvironTag()) -} - -func (s *migrateAgentEnvUUIDSuite) TestRemoveFuncWorks(c *gc.C) { - s.removeEnvUUIDFromAgentConfig(c) - c.Assert(s.ctx.realAgentConfig.Environment().Id(), gc.Equals, "") -} - -func (s *migrateAgentEnvUUIDSuite) TestMigrationStep(c *gc.C) { - s.removeEnvUUIDFromAgentConfig(c) - upgrades.AddEnvironmentUUIDToAgentConfig(s.ctx) - c.Assert(s.ctx.realAgentConfig.Environment(), gc.Equals, s.State.EnvironTag()) -} - -func (s *migrateAgentEnvUUIDSuite) TestMigrationStepIdempotent(c *gc.C) { - s.removeEnvUUIDFromAgentConfig(c) - upgrades.AddEnvironmentUUIDToAgentConfig(s.ctx) - upgrades.AddEnvironmentUUIDToAgentConfig(s.ctx) - c.Assert(s.ctx.realAgentConfig.Environment(), gc.Equals, s.State.EnvironTag()) -} === removed file 'src/github.com/juju/juju/upgrades/block.go' --- src/github.com/juju/juju/upgrades/block.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/upgrades/block.go 1970-01-01 00:00:00 +0000 @@ -1,94 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "github.com/juju/errors" - - "github.com/juju/juju/api/block" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/state" -) - -func moveBlocksFromEnvironToState(context Context) error { - logger.Infof("recording existing blocks") - st := context.State() - if st == nil { - logger.Debugf("no state connection, no block recording required") - // We're running on a different node than the state server. - return nil - } - blocks, err := getCurrentBlocks(st) - - if err != nil { - return errors.Trace(err) - } - err = upgradeBlocks(context, blocks) - if err != nil { - return errors.Trace(err) - } - err = removeBlockEnvVar(st) - if err != nil { - return errors.Trace(err) - } - return nil -} - -func upgradeBlocks(context Context, blocks []string) error { - if len(blocks) == 0 { - // no existing blocks = nothing to do here :) - return nil - } - blockClient := block.NewClient(context.APIState()) - for _, one := range blocks { - err := blockClient.SwitchBlockOn(one, "") - if err != nil { - return errors.Annotatef(err, "switching on %v", one) - } - } - return nil -} - -func getCurrentBlocks(st *state.State) ([]string, error) { - cfg, err := getEnvironConfig(st) - if err != nil { - return nil, errors.Trace(err) - } - return getBlocks(cfg), nil -} - -func getEnvironConfig(st *state.State) (*config.Config, error) { - envConfig, err := st.EnvironConfig() - if err != nil { - return nil, errors.Annotatef(err, "reading current config") - } - return envConfig, nil -} - -func getBlocks(cfg *config.Config) []string { - var blocks []string - addBlock := func(aType string) { - blocks = append(blocks, aType) - } - - if cfg.PreventAllChanges() { - addBlock(state.ChangeBlock.String()) - } - if cfg.PreventRemoveObject() { - addBlock(state.RemoveBlock.String()) - } - if cfg.PreventDestroyEnvironment() { - addBlock(state.DestroyBlock.String()) - } - return blocks -} - -func removeBlockEnvVar(st *state.State) error { - removeAttrs := []string{ - config.PreventAllChangesKey, - config.PreventDestroyEnvironmentKey, - config.PreventRemoveObjectKey, - } - return st.UpdateEnvironConfig(map[string]interface{}{}, removeAttrs, nil) -} === removed file 'src/github.com/juju/juju/upgrades/block_test.go' --- src/github.com/juju/juju/upgrades/block_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/upgrades/block_test.go 1970-01-01 00:00:00 +0000 @@ -1,88 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/api" - "github.com/juju/juju/api/block" - "github.com/juju/juju/juju" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" - "github.com/juju/juju/upgrades" -) - -type blockSuite struct { - jujutesting.JujuConnSuite - ctx upgrades.Context - blockClient *block.Client -} - -var _ = gc.Suite(&blockSuite{}) - -func (s *blockSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - - conn, err := juju.NewAPIState(s.AdminUserTag(c), s.Environ, api.DialOpts{}) - c.Assert(err, jc.ErrorIsNil) - s.AddCleanup(func(*gc.C) { conn.Close() }) - - s.ctx = &mockContext{ - agentConfig: &mockAgentConfig{dataDir: s.DataDir()}, - apiState: conn, - state: s.State, - } - s.blockClient = block.NewClient(conn) -} - -func (s *blockSuite) TestUpdateBlocksNone(c *gc.C) { - err := upgrades.MoveBlocksFromEnvironToState(s.ctx) - c.Assert(err, jc.ErrorIsNil) - s.ensureBlocksUpdated(c, nil) - s.ensureBlocksRemovedFromEnvConfig(c) -} - -func (s *blockSuite) ensureBlocksRemovedFromEnvConfig(c *gc.C) { - cfg, err := s.State.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - attrs := cfg.AllAttrs() - _, exists := attrs["block-destroy-environment"] - c.Assert(exists, jc.IsFalse) - _, exists = attrs["block-remove-object"] - c.Assert(exists, jc.IsFalse) - _, exists = attrs["block-all-changes"] - c.Assert(exists, jc.IsFalse) -} - -func (s *blockSuite) ensureBlocksUpdated(c *gc.C, expected []string) { - blocks, err := s.blockClient.List() - c.Assert(err, jc.ErrorIsNil) - - var types []string - for _, ablock := range blocks { - types = append(types, ablock.Type) - } - c.Assert(types, jc.SameContents, expected) -} - -func (s *blockSuite) TestUpgradeBlocks(c *gc.C) { - err := s.State.UpdateEnvironConfig(map[string]interface{}{ - "block-destroy-environment": true, - "block-remove-object": true, - "block-all-changes": true, - }, nil, nil) - c.Assert(err, jc.ErrorIsNil) - - err = upgrades.MoveBlocksFromEnvironToState(s.ctx) - - c.Assert(err, jc.ErrorIsNil) - s.ensureBlocksUpdated(c, []string{ - state.ChangeBlock.String(), - state.DestroyBlock.String(), - state.RemoveBlock.String(), - }) - s.ensureBlocksRemovedFromEnvConfig(c) -} === removed file 'src/github.com/juju/juju/upgrades/charmstorage.go' --- src/github.com/juju/juju/upgrades/charmstorage.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/upgrades/charmstorage.go 1970-01-01 00:00:00 +0000 @@ -1,140 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "path/filepath" - - "github.com/juju/errors" - "github.com/juju/utils" - "github.com/juju/utils/set" - "gopkg.in/juju/charm.v5" - - "github.com/juju/juju/agent" - "github.com/juju/juju/provider" - "github.com/juju/juju/state" - "github.com/juju/juju/state/storage" -) - -var ( - charmBundleURL = (*state.Charm).BundleURL - charmStoragePath = (*state.Charm).StoragePath - stateAddCharmStoragePaths = state.AddCharmStoragePaths -) - -// migrateCharmStorage copies uploaded charms from provider storage -// to environment storage, and then adds the storage path into the -// charm's document in state. -func migrateCharmStorage(st *state.State, agentConfig agent.Config) error { - logger.Debugf("migrating charms to environment storage") - charms, err := st.AllCharms() - if err != nil { - return err - } - services, err := st.AllServices() - if err != nil { - return err - } - usedCharms := make(set.Strings) - for _, service := range services { - url, _ := service.CharmURL() - usedCharms.Add(url.String()) - - units, err := service.AllUnits() - if err != nil { - return err - } - for _, unit := range units { - if url, ok := unit.CharmURL(); ok { - usedCharms.Add(url.String()) - } - } - } - storage := storage.NewStorage(st.EnvironUUID(), st.MongoSession()) - - // Local and manual provider host storage on the state server's - // filesystem, and serve via HTTP storage. The storage worker - // doesn't run yet, so we just open the files directly. - fetchCharmArchive := fetchCharmArchive - providerType := agentConfig.Value(agent.ProviderType) - if providerType == provider.Local || provider.IsManual(providerType) { - storageDir := agentConfig.Value(agent.StorageDir) - fetchCharmArchive = localstorage{storageDir}.fetchCharmArchive - } - - storagePaths := make(map[*charm.URL]string) - for _, ch := range charms { - if ch.IsPlaceholder() { - logger.Debugf("skipping %s, placeholder charm", ch.URL()) - continue - } - if !ch.IsUploaded() { - logger.Debugf("skipping %s, not uploaded to provider storage", ch.URL()) - continue - } - curl := ch.URL() - if !usedCharms.Contains(curl.String()) { - logger.Debugf("skipping %s, not used by any service or unit", ch.URL()) - continue - } - if charmStoragePath(ch) != "" { - logger.Debugf("skipping %s, already in environment storage", ch.URL()) - continue - } - url := charmBundleURL(ch) - if url == nil { - logger.Debugf("skipping %s, has no bundle URL", ch.URL()) - continue - } - uuid, err := utils.NewUUID() - if err != nil { - return err - } - data, err := fetchCharmArchive(url) - if err != nil { - return err - } - - storagePath := fmt.Sprintf("charms/%s-%s", curl, uuid) - logger.Debugf("uploading %s to %q in environment storage", curl, storagePath) - err = storage.Put(storagePath, bytes.NewReader(data), int64(len(data))) - if err != nil { - return errors.Annotatef(err, "failed to upload %s to storage", curl) - } - storagePaths[curl] = storagePath - } - - return stateAddCharmStoragePaths(st, storagePaths) -} - -func fetchCharmArchive(url *url.URL) ([]byte, error) { - client := utils.GetNonValidatingHTTPClient() - resp, err := client.Get(url.String()) - if err != nil { - return nil, errors.Annotatef(err, "cannot get %q", url) - } - body, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return nil, errors.Annotatef(err, "cannot read charm archive") - } - if resp.StatusCode != http.StatusOK { - return nil, errors.Errorf("cannot get %q: %s %s", url, resp.Status, body) - } - return body, nil -} - -type localstorage struct { - storageDir string -} - -func (s localstorage) fetchCharmArchive(url *url.URL) ([]byte, error) { - path := filepath.Join(s.storageDir, url.Path) - return ioutil.ReadFile(path) -} === removed file 'src/github.com/juju/juju/upgrades/charmstorage_test.go' --- src/github.com/juju/juju/upgrades/charmstorage_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/upgrades/charmstorage_test.go 1970-01-01 00:00:00 +0000 @@ -1,169 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - "io/ioutil" - "net/url" - "path/filepath" - "strings" - - jc "github.com/juju/testing/checkers" - "github.com/juju/utils/set" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - - "github.com/juju/juju/agent" - "github.com/juju/juju/environs" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" - "github.com/juju/juju/state/storage" - "github.com/juju/juju/testing/factory" - "github.com/juju/juju/upgrades" -) - -type migrateCharmStorageSuite struct { - jujutesting.JujuConnSuite - - bundleURLs map[string]*url.URL -} - -var _ = gc.Suite(&migrateCharmStorageSuite{}) - -func (s *migrateCharmStorageSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - s.bundleURLs = make(map[string]*url.URL) - - s.PatchValue(upgrades.CharmBundleURL, func(ch *state.Charm) *url.URL { - return s.bundleURLs[ch.URL().String()] - }) - s.PatchValue(upgrades.CharmStoragePath, func(ch *state.Charm) string { - // pretend none of the charms have storage paths - return "" - }) -} - -func (s *migrateCharmStorageSuite) TestMigrateCharmStorage(c *gc.C) { - stor := s.Environ.(environs.EnvironStorage).Storage() - err := stor.Put("somewhere", strings.NewReader("abc"), 3) - c.Assert(err, jc.ErrorIsNil) - - dummyCharm := s.AddTestingCharm(c, "dummy") - s.AddTestingService(c, "dummy", dummyCharm) - dummyCharmURL, err := stor.URL("somewhere") - c.Assert(err, jc.ErrorIsNil) - url, err := url.Parse(dummyCharmURL) - c.Assert(err, jc.ErrorIsNil) - s.bundleURLs[dummyCharm.URL().String()] = url - - s.testMigrateCharmStorage(c, dummyCharm.URL(), &mockAgentConfig{}) -} - -func (s *migrateCharmStorageSuite) TestMigrateCharmStorageLocalstorage(c *gc.C) { - storageDir := c.MkDir() - err := ioutil.WriteFile(filepath.Join(storageDir, "somewhere"), []byte("abc"), 0644) - c.Assert(err, jc.ErrorIsNil) - - dummyCharm := s.AddTestingCharm(c, "dummy") - url := &url.URL{Scheme: "https", Host: "localhost:8040", Path: "/somewhere"} - s.AddTestingService(c, "dummy", dummyCharm) - s.bundleURLs[dummyCharm.URL().String()] = url - - s.testMigrateCharmStorage(c, dummyCharm.URL(), &mockAgentConfig{ - values: map[string]string{ - agent.ProviderType: "local", - agent.StorageDir: storageDir, - }, - }) -} - -func (s *migrateCharmStorageSuite) testMigrateCharmStorage(c *gc.C, curl *charm.URL, agentConfig agent.Config) { - curlPlaceholder := charm.MustParseURL("cs:quantal/dummy-1") - err := s.State.AddStoreCharmPlaceholder(curlPlaceholder) - c.Assert(err, jc.ErrorIsNil) - - curlPending := charm.MustParseURL("cs:quantal/missing-123") - _, err = s.State.PrepareStoreCharmUpload(curlPending) - c.Assert(err, jc.ErrorIsNil) - - var storagePath string - var called bool - s.PatchValue(upgrades.StateAddCharmStoragePaths, func(st *state.State, storagePaths map[*charm.URL]string) error { - c.Assert(storagePaths, gc.HasLen, 1) - for k, v := range storagePaths { - c.Assert(k.String(), gc.Equals, curl.String()) - storagePath = v - } - called = true - return nil - }) - err = upgrades.MigrateCharmStorage(s.State, agentConfig) - c.Assert(err, jc.ErrorIsNil) - c.Assert(called, jc.IsTrue) - - storage := storage.NewStorage(s.State.EnvironUUID(), s.State.MongoSession()) - r, length, err := storage.Get(storagePath) - c.Assert(err, jc.ErrorIsNil) - c.Assert(r, gc.NotNil) - defer r.Close() - c.Assert(length, gc.Equals, int64(3)) - data, err := ioutil.ReadAll(r) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(data), gc.Equals, "abc") -} - -func (s *migrateCharmStorageSuite) TestMigrateCharmStorageIdempotency(c *gc.C) { - // If MigrateCharmStorage is called a second time, it will - // leave alone the charms that have already been migrated. - // The final step of migration is a transactional update - // of the charm document in state, which is what we base - // the decision on. - s.PatchValue(upgrades.CharmStoragePath, func(ch *state.Charm) string { - return "alreadyset" - }) - dummyCharm := s.AddTestingCharm(c, "dummy") - s.AddTestingService(c, "dummy", dummyCharm) - var called bool - s.PatchValue(upgrades.StateAddCharmStoragePaths, func(st *state.State, storagePaths map[*charm.URL]string) error { - c.Assert(storagePaths, gc.HasLen, 0) - called = true - return nil - }) - err := upgrades.MigrateCharmStorage(s.State, &mockAgentConfig{}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(called, jc.IsTrue) -} - -func (s *migrateCharmStorageSuite) TestMigrateCharmUnused(c *gc.C) { - s.AddTestingCharm(c, "dummy") // used by nothing - ch2 := s.Factory.MakeCharm(c, &factory.CharmParams{ - Name: "mysql", - Revision: "1", - }) // used by unit, not service - ch3 := s.Factory.MakeCharm(c, &factory.CharmParams{ - Name: "mysql", - Revision: "2", - }) // used by service, not unit - mysql := s.AddTestingService(c, "mysql", ch2) - s.Factory.MakeUnit(c, &factory.UnitParams{ - Service: mysql, - SetCharmURL: true, - }) - err := mysql.SetCharm(ch3, false) - c.Assert(err, jc.ErrorIsNil) - - expect := make(set.Strings) - expect.Add(ch2.URL().String()) - expect.Add(ch3.URL().String()) - s.PatchValue(upgrades.CharmStoragePath, func(ch *state.Charm) string { - curl := ch.URL().String() - c.Check(curl, jc.Satisfies, expect.Contains) - expect.Remove(curl) - return "alreadyset" - }) - - err = upgrades.MigrateCharmStorage(s.State, &mockAgentConfig{}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(expect, gc.HasLen, 0) // emptied by CharmStoragePath -} === modified file 'src/github.com/juju/juju/upgrades/contexts.go' --- src/github.com/juju/juju/upgrades/contexts.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/upgrades/contexts.go 2016-03-22 15:18:22 +0000 @@ -12,10 +12,15 @@ // Context provides the dependencies used when executing upgrade steps. type Context interface { // APIState returns an API connection to state. + // + // TODO(mjs) - for 2.0, this should return a base.APICaller + // instead of api.Connection once the 1.x upgrade steps have been + // removed. Upgrade steps should not be able close the API + // connection. APIState() api.Connection // State returns a connection to state. This will be non-nil - // only in the context of a state server. + // only in the context of a controller. State() *state.State // AgentConfig returns the agent config for the machine that is being === removed file 'src/github.com/juju/juju/upgrades/customimagemetadata.go' --- src/github.com/juju/juju/upgrades/customimagemetadata.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/upgrades/customimagemetadata.go 1970-01-01 00:00:00 +0000 @@ -1,76 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "bytes" - "io/ioutil" - - "github.com/juju/errors" - - "github.com/juju/juju/agent" - "github.com/juju/juju/environs" - "github.com/juju/juju/environs/filestorage" - "github.com/juju/juju/environs/storage" - "github.com/juju/juju/provider" - "github.com/juju/juju/state" - statestorage "github.com/juju/juju/state/storage" -) - -var newStateStorage = statestorage.NewStorage - -// migrateCustomImageMetadata copies uploaded image metadata from provider -// storage to environment storage, preserving paths. -func migrateCustomImageMetadata(st *state.State, agentConfig agent.Config) error { - logger.Debugf("migrating custom image metadata to environment storage") - estor := newStateStorage(st.EnvironUUID(), st.MongoSession()) - - // Local and manual provider host storage on the state server's - // filesystem, and serve via HTTP storage. The storage worker - // doesn't run yet, so we just open the files directly. - var pstor storage.StorageReader - providerType := agentConfig.Value(agent.ProviderType) - if providerType == provider.Local || provider.IsManual(providerType) { - storageDir := agentConfig.Value(agent.StorageDir) - var err error - pstor, err = filestorage.NewFileStorageReader(storageDir) - if err != nil { - return errors.Annotate(err, "cannot get local filesystem storage reader") - } - } else { - var err error - pstor, err = environs.LegacyStorage(st) - if errors.IsNotSupported(err) { - return nil - } else if err != nil { - return errors.Annotate(err, "cannot get provider storage") - } - } - - paths, err := pstor.List(storage.BaseImagesPath) - if err != nil { - return err - } - for _, path := range paths { - logger.Infof("migrating image metadata at path %q", path) - data, err := readImageMetadata(pstor, path) - if err != nil { - return errors.Annotate(err, "failed to read image metadata") - } - err = estor.Put(path, bytes.NewReader(data), int64(len(data))) - if err != nil { - return errors.Annotate(err, "failed to write image metadata") - } - } - return nil -} - -func readImageMetadata(stor storage.StorageReader, path string) ([]byte, error) { - r, err := stor.Get(path) - if err != nil { - return nil, err - } - defer r.Close() - return ioutil.ReadAll(r) -} === removed file 'src/github.com/juju/juju/upgrades/customimagemetadata_test.go' --- src/github.com/juju/juju/upgrades/customimagemetadata_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/upgrades/customimagemetadata_test.go 1970-01-01 00:00:00 +0000 @@ -1,61 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - "bytes" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/mgo.v2" - - "github.com/juju/juju/agent" - "github.com/juju/juju/environs" - "github.com/juju/juju/environs/filestorage" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state/storage" - statetesting "github.com/juju/juju/state/testing" - "github.com/juju/juju/upgrades" -) - -type migrateCustomImageMetadataStorageSuite struct { - jujutesting.JujuConnSuite -} - -var _ = gc.Suite(&migrateCustomImageMetadataStorageSuite{}) - -func (s *migrateCustomImageMetadataStorageSuite) TestMigrateCustomImageMetadata(c *gc.C) { - stor := s.Environ.(environs.EnvironStorage).Storage() - for path, content := range customImageMetadata { - err := stor.Put(path, bytes.NewReader(content), int64(len(content))) - c.Assert(err, jc.ErrorIsNil) - } - s.testMigrateCustomImageMetadata(c, &mockAgentConfig{}) -} - -func (s *migrateCustomImageMetadataStorageSuite) TestMigrateCustomImageMetadataLocalstorage(c *gc.C) { - storageDir := c.MkDir() - stor, err := filestorage.NewFileStorageWriter(storageDir) - c.Assert(err, jc.ErrorIsNil) - for path, content := range customImageMetadata { - err := stor.Put(path, bytes.NewReader(content), int64(len(content))) - c.Assert(err, jc.ErrorIsNil) - } - s.testMigrateCustomImageMetadata(c, &mockAgentConfig{ - values: map[string]string{ - agent.ProviderType: "local", - agent.StorageDir: storageDir, - }, - }) -} - -func (s *migrateCustomImageMetadataStorageSuite) testMigrateCustomImageMetadata(c *gc.C, agentConfig agent.Config) { - var stor statetesting.MapStorage - s.PatchValue(upgrades.NewStateStorage, func(string, *mgo.Session) storage.Storage { - return &stor - }) - err := upgrades.MigrateCustomImageMetadata(s.State, agentConfig) - c.Assert(err, jc.ErrorIsNil) - c.Assert(stor.Map, gc.DeepEquals, customImageMetadata) -} === removed file 'src/github.com/juju/juju/upgrades/deprecatedenvsettings.go' --- src/github.com/juju/juju/upgrades/deprecatedenvsettings.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/upgrades/deprecatedenvsettings.go 1970-01-01 00:00:00 +0000 @@ -1,19 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -func processDeprecatedEnvSettings(context Context) error { - st := context.State() - removeAttrs := []string{ - "public-bucket", - "public-bucket-region", - "public-bucket-url", - "default-image-id", - "default-instance-type", - "shared-storage-port", - "tools-url", - } - // TODO (wallyworld) - delete lxc-use-clone in 1.22 - return st.UpdateEnvironConfig(map[string]interface{}{}, removeAttrs, nil) -} === removed file 'src/github.com/juju/juju/upgrades/deprecatedenvsettings_test.go' --- src/github.com/juju/juju/upgrades/deprecatedenvsettings_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/upgrades/deprecatedenvsettings_test.go 1970-01-01 00:00:00 +0000 @@ -1,84 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" - "github.com/juju/juju/upgrades" -) - -type processDeprecatedEnvSettingsSuite struct { - jujutesting.JujuConnSuite - ctx upgrades.Context -} - -var _ = gc.Suite(&processDeprecatedEnvSettingsSuite{}) - -func (s *processDeprecatedEnvSettingsSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - apiState, _ := s.OpenAPIAsNewMachine(c, state.JobManageEnviron) - s.ctx = &mockContext{ - agentConfig: &mockAgentConfig{dataDir: s.DataDir()}, - apiState: apiState, - state: s.State, - } - // Add in old environment settings. - newCfg := map[string]interface{}{ - "public-bucket": "foo", - "public-bucket-region": "bar", - "public-bucket-url": "shazbot", - "default-instance-type": "vulch", - "default-image-id": "1234", - "shared-storage-port": 1234, - "tools-url": "some.special.url.com", - } - err := s.State.UpdateEnvironConfig(newCfg, nil, nil) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *processDeprecatedEnvSettingsSuite) TestEnvSettingsSet(c *gc.C) { - cfg, err := s.State.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - allAttrs := cfg.AllAttrs() - c.Assert(allAttrs["public-bucket"], gc.Equals, "foo") - c.Assert(allAttrs["public-bucket-region"], gc.Equals, "bar") - c.Assert(allAttrs["public-bucket-url"], gc.Equals, "shazbot") - c.Assert(allAttrs["default-instance-type"], gc.Equals, "vulch") - c.Assert(allAttrs["default-image-id"], gc.Equals, "1234") - c.Assert(allAttrs["shared-storage-port"], gc.Equals, 1234) - c.Assert(allAttrs["tools-url"], gc.Equals, "some.special.url.com") -} - -func (s *processDeprecatedEnvSettingsSuite) assertConfigProcessed(c *gc.C) { - cfg, err := s.State.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - allAttrs := cfg.AllAttrs() - for _, deprecated := range []string{ - "public-bucket", "public-bucket-region", "public-bucket-url", - "default-image-id", "default-instance-type", "shared-storage-port", "tools-url", - } { - _, ok := allAttrs[deprecated] - c.Assert(ok, jc.IsFalse) - } -} - -func (s *processDeprecatedEnvSettingsSuite) TestOldConfigRemoved(c *gc.C) { - err := upgrades.ProcessDeprecatedEnvSettings(s.ctx) - c.Assert(err, jc.ErrorIsNil) - s.assertConfigProcessed(c) -} - -func (s *processDeprecatedEnvSettingsSuite) TestIdempotent(c *gc.C) { - err := upgrades.ProcessDeprecatedEnvSettings(s.ctx) - c.Assert(err, jc.ErrorIsNil) - s.assertConfigProcessed(c) - - err = upgrades.ProcessDeprecatedEnvSettings(s.ctx) - c.Assert(err, jc.ErrorIsNil) - s.assertConfigProcessed(c) -} === modified file 'src/github.com/juju/juju/upgrades/doc.go' --- src/github.com/juju/juju/upgrades/doc.go 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/upgrades/doc.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,6 @@ // PerformUpgrade, which is invoked on each node by the machine agent with: // fromVersion - the Juju version from which the upgrade is occurring // target - the type of Juju node being upgraded -// context - provides API access to Juju state servers +// context - provides API access to Juju controllers // package upgrades === removed file 'src/github.com/juju/juju/upgrades/dotprofile.go' --- src/github.com/juju/juju/upgrades/dotprofile.go 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/upgrades/dotprofile.go 1970-01-01 00:00:00 +0000 @@ -1,37 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "fmt" - - "github.com/juju/utils/exec" -) - -// As of the middle of the 1.17 cycle, the proxy settings are written out to -// /home/ubuntu/.juju-proxy both by cloud-init and the machine environ worker. -// An older version of juju that has been upgraded will get the proxy settings -// written out to the .juju-proxy file, but the .profile for the ubuntu user -// wouldn't have been updated to source this file. -// -// This upgrade step is to add the line to source the file if it is missing -// from the file. -func ensureUbuntuDotProfileSourcesProxyFile(context Context) error { - // We look to see if the proxy line is there already as the manual - // provider may have had it aleady. The ubuntu user may not exist - // (local provider only). - command := fmt.Sprintf(""+ - `([ ! -e %s/.profile ] || grep -q '.juju-proxy' %s/.profile) || `+ - `printf '\n# Added by juju\n[ -f "$HOME/.juju-proxy" ] && . "$HOME/.juju-proxy"\n' >> %s/.profile`, - ubuntuHome, ubuntuHome, ubuntuHome) - logger.Tracef("command: %s", command) - result, err := exec.RunCommands(exec.RunParams{ - Commands: command, - }) - if err != nil { - return err - } - logger.Tracef("stdout: %s", result.Stdout) - return nil -} === removed file 'src/github.com/juju/juju/upgrades/dotprofile_test.go' --- src/github.com/juju/juju/upgrades/dotprofile_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/upgrades/dotprofile_test.go 1970-01-01 00:00:00 +0000 @@ -1,87 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - "io/ioutil" - "path" - "runtime" - - "github.com/juju/loggo" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/testing" - "github.com/juju/juju/upgrades" -) - -type ensureDotProfileSuite struct { - testing.FakeJujuHomeSuite - home string - ctx upgrades.Context -} - -var _ = gc.Suite(&ensureDotProfileSuite{}) - -func (s *ensureDotProfileSuite) SetUpTest(c *gc.C) { - //TODO(bogdanteleaga): Fix these on windows - if runtime.GOOS == "windows" { - c.Skip("bug 1403084: tests use bash scripts, will be fixed later on windows") - } - s.FakeJujuHomeSuite.SetUpTest(c) - - loggo.GetLogger("juju.upgrade").SetLogLevel(loggo.TRACE) - - s.home = c.MkDir() - s.PatchValue(upgrades.UbuntuHome, s.home) - s.ctx = &mockContext{} -} - -const expectedLine = ` -# Added by juju -[ -f "$HOME/.juju-proxy" ] && . "$HOME/.juju-proxy" -` - -func (s *ensureDotProfileSuite) writeDotProfile(c *gc.C, content string) { - dotProfile := path.Join(s.home, ".profile") - err := ioutil.WriteFile(dotProfile, []byte(content), 0644) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *ensureDotProfileSuite) assertProfile(c *gc.C, content string) { - dotProfile := path.Join(s.home, ".profile") - data, err := ioutil.ReadFile(dotProfile) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(data), gc.Equals, content) -} - -func (s *ensureDotProfileSuite) TestSourceAdded(c *gc.C) { - s.writeDotProfile(c, "") - err := upgrades.EnsureUbuntuDotProfileSourcesProxyFile(s.ctx) - c.Assert(err, jc.ErrorIsNil) - s.assertProfile(c, expectedLine) -} - -func (s *ensureDotProfileSuite) TestIdempotent(c *gc.C) { - s.writeDotProfile(c, "") - err := upgrades.EnsureUbuntuDotProfileSourcesProxyFile(s.ctx) - c.Assert(err, jc.ErrorIsNil) - err = upgrades.EnsureUbuntuDotProfileSourcesProxyFile(s.ctx) - c.Assert(err, jc.ErrorIsNil) - s.assertProfile(c, expectedLine) -} - -func (s *ensureDotProfileSuite) TestProfileUntouchedIfJujuProxyInSource(c *gc.C) { - content := "source .juju-proxy\n" - s.writeDotProfile(c, content) - err := upgrades.EnsureUbuntuDotProfileSourcesProxyFile(s.ctx) - c.Assert(err, jc.ErrorIsNil) - s.assertProfile(c, content) -} - -func (s *ensureDotProfileSuite) TestSkippedIfDotProfileDoesntExist(c *gc.C) { - err := upgrades.EnsureUbuntuDotProfileSourcesProxyFile(s.ctx) - c.Assert(err, jc.ErrorIsNil) - c.Assert(path.Join(s.home, ".profile"), jc.DoesNotExist) -} === modified file 'src/github.com/juju/juju/upgrades/environconfig.go' --- src/github.com/juju/juju/upgrades/environconfig.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/upgrades/environconfig.go 2016-03-22 15:18:22 +0000 @@ -14,34 +14,34 @@ // environConfigUpdater is an interface used atomically write environment // config changes to the global state. type environConfigUpdater interface { - // UpdateEnvironConfig atomically updates and removes environment + // UpdateModelConfig atomically updates and removes environment // config attributes to the global state. - UpdateEnvironConfig(map[string]interface{}, []string, state.ValidateConfigFunc) error + UpdateModelConfig(map[string]interface{}, []string, state.ValidateConfigFunc) error } // environConfigReader is an interface used to read the current environment // config from global state. type environConfigReader interface { - // EnvironConfig reads the current environment config from global + // ModelConfig reads the current environment config from global // state. - EnvironConfig() (*config.Config, error) + ModelConfig() (*config.Config, error) } -func upgradeEnvironConfig( +func upgradeModelConfig( reader environConfigReader, updater environConfigUpdater, registry environs.ProviderRegistry, ) error { - cfg, err := reader.EnvironConfig() + cfg, err := reader.ModelConfig() if err != nil { - return errors.Annotate(err, "reading environment config") + return errors.Annotate(err, "reading model config") } provider, err := registry.Provider(cfg.Type()) if err != nil { return errors.Annotate(err, "getting provider") } - upgrader, ok := provider.(environs.EnvironConfigUpgrader) + upgrader, ok := provider.(environs.ModelConfigUpgrader) if !ok { logger.Debugf("provider %q has no upgrades", cfg.Type()) return nil @@ -58,7 +58,7 @@ removedAttrs = append(removedAttrs, key) } } - if err := updater.UpdateEnvironConfig(newAttrs, removedAttrs, nil); err != nil { + if err := updater.UpdateModelConfig(newAttrs, removedAttrs, nil); err != nil { return errors.Annotate(err, "updating config in state") } return nil === modified file 'src/github.com/juju/juju/upgrades/environconfig_test.go' --- src/github.com/juju/juju/upgrades/environconfig_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/upgrades/environconfig_test.go 2016-03-22 15:18:22 +0000 @@ -17,81 +17,81 @@ "github.com/juju/juju/upgrades" ) -type upgradeEnvironConfigSuite struct { +type upgradeModelConfigSuite struct { coretesting.BaseSuite stub testing.Stub cfg *config.Config - reader upgrades.EnvironConfigReader - updater upgrades.EnvironConfigUpdater + reader upgrades.ModelConfigReader + updater upgrades.ModelConfigUpdater registry *mockProviderRegistry } -var _ = gc.Suite(&upgradeEnvironConfigSuite{}) +var _ = gc.Suite(&upgradeModelConfigSuite{}) -func (s *upgradeEnvironConfigSuite) SetUpSuite(c *gc.C) { +func (s *upgradeModelConfigSuite) SetUpSuite(c *gc.C) { s.BaseSuite.SetUpTest(c) } -func (s *upgradeEnvironConfigSuite) SetUpTest(c *gc.C) { +func (s *upgradeModelConfigSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.stub = testing.Stub{} - s.cfg = coretesting.EnvironConfig(c) + s.cfg = coretesting.ModelConfig(c) s.registry = &mockProviderRegistry{ providers: make(map[string]environs.EnvironProvider), } s.reader = environConfigFunc(func() (*config.Config, error) { - s.stub.AddCall("EnvironConfig") + s.stub.AddCall("ModelConfig") return s.cfg, s.stub.NextErr() }) - s.updater = updateEnvironConfigFunc(func( + s.updater = updateModelConfigFunc(func( update map[string]interface{}, remove []string, validate state.ValidateConfigFunc, ) error { - s.stub.AddCall("UpdateEnvironConfig", update, remove, validate) + s.stub.AddCall("UpdateModelConfig", update, remove, validate) return s.stub.NextErr() }) } -func (s *upgradeEnvironConfigSuite) TestUpgradeEnvironConfigEnvironConfigError(c *gc.C) { +func (s *upgradeModelConfigSuite) TestUpgradeModelConfigModelConfigError(c *gc.C) { s.stub.SetErrors(errors.New("cannot read environ config")) - err := upgrades.UpgradeEnvironConfig(s.reader, s.updater, s.registry) - c.Assert(err, gc.ErrorMatches, "reading environment config: cannot read environ config") - s.stub.CheckCallNames(c, "EnvironConfig") + err := upgrades.UpgradeModelConfig(s.reader, s.updater, s.registry) + c.Assert(err, gc.ErrorMatches, "reading model config: cannot read environ config") + s.stub.CheckCallNames(c, "ModelConfig") } -func (s *upgradeEnvironConfigSuite) TestUpgradeEnvironConfigProviderNotRegistered(c *gc.C) { +func (s *upgradeModelConfigSuite) TestUpgradeModelConfigProviderNotRegistered(c *gc.C) { s.registry.SetErrors(errors.New(`no registered provider for "someprovider"`)) - err := upgrades.UpgradeEnvironConfig(s.reader, s.updater, s.registry) + err := upgrades.UpgradeModelConfig(s.reader, s.updater, s.registry) c.Assert(err, gc.ErrorMatches, `getting provider: no registered provider for "someprovider"`) - s.stub.CheckCallNames(c, "EnvironConfig") + s.stub.CheckCallNames(c, "ModelConfig") } -func (s *upgradeEnvironConfigSuite) TestUpgradeEnvironConfigProviderNotConfigUpgrader(c *gc.C) { +func (s *upgradeModelConfigSuite) TestUpgradeModelConfigProviderNotConfigUpgrader(c *gc.C) { s.registry.providers["someprovider"] = &mockEnvironProvider{} - err := upgrades.UpgradeEnvironConfig(s.reader, s.updater, s.registry) + err := upgrades.UpgradeModelConfig(s.reader, s.updater, s.registry) c.Assert(err, jc.ErrorIsNil) s.registry.CheckCalls(c, []testing.StubCall{{ FuncName: "Provider", Args: []interface{}{"someprovider"}, }}) - s.stub.CheckCallNames(c, "EnvironConfig") + s.stub.CheckCallNames(c, "ModelConfig") } -func (s *upgradeEnvironConfigSuite) TestUpgradeEnvironConfigProviderConfigUpgrader(c *gc.C) { +func (s *upgradeModelConfigSuite) TestUpgradeModelConfigProviderConfigUpgrader(c *gc.C) { var err error s.cfg, err = s.cfg.Apply(map[string]interface{}{"test-key": "test-value"}) c.Assert(err, jc.ErrorIsNil) - s.registry.providers["someprovider"] = &mockEnvironConfigUpgrader{ + s.registry.providers["someprovider"] = &mockModelConfigUpgrader{ upgradeConfig: func(cfg *config.Config) (*config.Config, error) { return cfg.Remove([]string{"test-key"}) }, } - err = upgrades.UpgradeEnvironConfig(s.reader, s.updater, s.registry) + err = upgrades.UpgradeModelConfig(s.reader, s.updater, s.registry) c.Assert(err, jc.ErrorIsNil) - s.stub.CheckCallNames(c, "EnvironConfig", "UpdateEnvironConfig") + s.stub.CheckCallNames(c, "ModelConfig", "UpdateModelConfig") updateCall := s.stub.Calls()[1] expectedAttrs := s.cfg.AllAttrs() delete(expectedAttrs, "test-key") @@ -101,28 +101,28 @@ c.Assert(updateCall.Args[2], gc.IsNil) } -func (s *upgradeEnvironConfigSuite) TestUpgradeEnvironConfigUpgradeConfigError(c *gc.C) { - s.registry.providers["someprovider"] = &mockEnvironConfigUpgrader{ +func (s *upgradeModelConfigSuite) TestUpgradeModelConfigUpgradeConfigError(c *gc.C) { + s.registry.providers["someprovider"] = &mockModelConfigUpgrader{ upgradeConfig: func(cfg *config.Config) (*config.Config, error) { return nil, errors.New("cannot upgrade config") }, } - err := upgrades.UpgradeEnvironConfig(s.reader, s.updater, s.registry) + err := upgrades.UpgradeModelConfig(s.reader, s.updater, s.registry) c.Assert(err, gc.ErrorMatches, "upgrading config: cannot upgrade config") - s.stub.CheckCallNames(c, "EnvironConfig") + s.stub.CheckCallNames(c, "ModelConfig") } -func (s *upgradeEnvironConfigSuite) TestUpgradeEnvironConfigUpdateConfigError(c *gc.C) { +func (s *upgradeModelConfigSuite) TestUpgradeModelConfigUpdateConfigError(c *gc.C) { s.stub.SetErrors(nil, errors.New("cannot update environ config")) - s.registry.providers["someprovider"] = &mockEnvironConfigUpgrader{ + s.registry.providers["someprovider"] = &mockModelConfigUpgrader{ upgradeConfig: func(cfg *config.Config) (*config.Config, error) { return cfg, nil }, } - err := upgrades.UpgradeEnvironConfig(s.reader, s.updater, s.registry) + err := upgrades.UpgradeModelConfig(s.reader, s.updater, s.registry) c.Assert(err, gc.ErrorMatches, "updating config in state: cannot update environ config") - s.stub.CheckCallNames(c, "EnvironConfig", "UpdateEnvironConfig") + s.stub.CheckCallNames(c, "ModelConfig", "UpdateModelConfig") updateCall := s.stub.Calls()[1] c.Assert(updateCall.Args, gc.HasLen, 3) c.Assert(updateCall.Args[0], jc.DeepEquals, s.cfg.AllAttrs()) @@ -132,13 +132,13 @@ type environConfigFunc func() (*config.Config, error) -func (f environConfigFunc) EnvironConfig() (*config.Config, error) { +func (f environConfigFunc) ModelConfig() (*config.Config, error) { return f() } -type updateEnvironConfigFunc func(map[string]interface{}, []string, state.ValidateConfigFunc) error +type updateModelConfigFunc func(map[string]interface{}, []string, state.ValidateConfigFunc) error -func (f updateEnvironConfigFunc) UpdateEnvironConfig( +func (f updateModelConfigFunc) UpdateModelConfig( update map[string]interface{}, remove []string, validate state.ValidateConfigFunc, ) error { return f(update, remove, validate) @@ -160,12 +160,12 @@ environs.EnvironProvider } -type mockEnvironConfigUpgrader struct { +type mockModelConfigUpgrader struct { mockEnvironProvider upgradeConfig func(*config.Config) (*config.Config, error) } -func (u *mockEnvironConfigUpgrader) UpgradeConfig(cfg *config.Config) (*config.Config, error) { +func (u *mockModelConfigUpgrader) UpgradeConfig(cfg *config.Config) (*config.Config, error) { u.MethodCall(u, "UpgradeConfig", cfg) return u.upgradeConfig(cfg) } === modified file 'src/github.com/juju/juju/upgrades/export_test.go' --- src/github.com/juju/juju/upgrades/export_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/upgrades/export_test.go 2016-03-22 15:18:22 +0000 @@ -6,65 +6,17 @@ import "github.com/juju/juju/environs" var ( - UpgradeOperations = &upgradeOperations - StateUpgradeOperations = &stateUpgradeOperations - UbuntuHome = &ubuntuHome - RootLogDir = &rootLogDir - RootSpoolDir = &rootSpoolDir - CharmBundleURL = &charmBundleURL - CharmStoragePath = &charmStoragePath - StateAddCharmStoragePaths = &stateAddCharmStoragePaths - NewStateStorage = &newStateStorage - StateToolsStorage = &stateToolsStorage - AddAZToInstData = &addAZToInstData - - ChownPath = &chownPath - IsLocalEnviron = &isLocalEnviron - OsRemove = &osRemove - ExecRunCommands = &execRunCommands - - // 118 upgrade functions - StepsFor118 = stepsFor118 - EnsureLockDirExistsAndUbuntuWritable = ensureLockDirExistsAndUbuntuWritable - EnsureSystemSSHKey = ensureSystemSSHKey - EnsureUbuntuDotProfileSourcesProxyFile = ensureUbuntuDotProfileSourcesProxyFile - UpdateRsyslogPort = updateRsyslogPort - ProcessDeprecatedEnvSettings = processDeprecatedEnvSettings - MigrateLocalProviderAgentConfig = migrateLocalProviderAgentConfig - - // 121 upgrade functions - MigrateCharmStorage = migrateCharmStorage - MigrateCustomImageMetadata = migrateCustomImageMetadata - MigrateToolsStorage = migrateToolsStorage - - // 122 upgrade functions - EnsureSystemSSHKeyRedux = ensureSystemSSHKeyRedux - UpdateAuthorizedKeysForSystemIdentity = updateAuthorizedKeysForSystemIdentity - AddAvaililityZoneToInstanceData = addAvaililityZoneToInstanceData - - // 123 upgrade functions - AddEnvironmentUUIDToAgentConfig = addEnvironmentUUIDToAgentConfig - AddDefaultStoragePools = addDefaultStoragePools - MoveBlocksFromEnvironToState = moveBlocksFromEnvironToState - - // 124 upgrade functions - MoveSyslogConfig = moveSyslogConfig - CopyFile = copyFile - - // 125 upgrade functions - AddInstanceTags = addInstanceTags - RemoveJujudpass = removeJujudpass - AddJujuRegKey = addJujuRegKey - CleanToolsStorage = cleanToolsStorage + UpgradeOperations = &upgradeOperations + StateUpgradeOperations = &stateUpgradeOperations ) -type EnvironConfigUpdater environConfigUpdater -type EnvironConfigReader environConfigReader +type ModelConfigUpdater environConfigUpdater +type ModelConfigReader environConfigReader -func UpgradeEnvironConfig( - reader EnvironConfigReader, - updater EnvironConfigUpdater, +func UpgradeModelConfig( + reader ModelConfigReader, + updater ModelConfigUpdater, registry environs.ProviderRegistry, ) error { - return upgradeEnvironConfig(reader, updater, registry) + return upgradeModelConfig(reader, updater, registry) } === removed file 'src/github.com/juju/juju/upgrades/instance.go' --- src/github.com/juju/juju/upgrades/instance.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/upgrades/instance.go 1970-01-01 00:00:00 +0000 @@ -1,18 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "github.com/juju/errors" - - "github.com/juju/juju/state" - "github.com/juju/juju/state/utils" -) - -var addAZToInstData = state.AddAvailabilityZoneToInstanceData - -func addAvaililityZoneToInstanceData(context Context) error { - err := addAZToInstData(context.State(), utils.AvailabilityZone) - return errors.Trace(err) -} === removed file 'src/github.com/juju/juju/upgrades/instance_test.go' --- src/github.com/juju/juju/upgrades/instance_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/upgrades/instance_test.go 1970-01-01 00:00:00 +0000 @@ -1,44 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/instance" - "github.com/juju/juju/state" - "github.com/juju/juju/testing" - "github.com/juju/juju/upgrades" -) - -type instanceSuite struct { - testing.FakeJujuHomeSuite - ctx upgrades.Context -} - -var _ = gc.Suite(&instanceSuite{}) - -func (s *instanceSuite) SetUpTest(c *gc.C) { - s.FakeJujuHomeSuite.SetUpTest(c) - - s.ctx = &mockContext{} -} - -func (s *instanceSuite) TestAddAvaililityZoneToInstanceData(c *gc.C) { - var stArg *state.State - s.PatchValue(upgrades.AddAZToInstData, - func(st *state.State, azFunc func(*state.State, instance.Id) (string, error)) error { - stArg = st - // We can't compare functions for equality so we trust that - // azFunc is correct. - return nil - }, - ) - - err := upgrades.AddAvaililityZoneToInstanceData(s.ctx) - c.Assert(err, jc.ErrorIsNil) - - c.Check(stArg, gc.Equals, s.ctx.State()) -} === removed file 'src/github.com/juju/juju/upgrades/lockdirectory.go' --- src/github.com/juju/juju/upgrades/lockdirectory.go 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/upgrades/lockdirectory.go 1970-01-01 00:00:00 +0000 @@ -1,41 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "fmt" - "path" - - "github.com/juju/utils/exec" -) - -var ubuntuHome = "/home/ubuntu" - -// Previously the lock directory was created when the uniter started. This -// allows serialization of all of the hook execution across units running on a -// single machine. This lock directory is now also used but the juju-run -// command on the host machine. juju-run also gets a lock on the hook -// execution fslock prior to execution. However, the lock directory was owned -// by root, and the juju-run process was being executed by the ubuntu user, so -// we need to change the ownership of the lock directory to ubuntu:ubuntu. -// Also we need to make sure that this directory exists on machines with no -// units. -func ensureLockDirExistsAndUbuntuWritable(context Context) error { - lockDir := path.Join(context.AgentConfig().DataDir(), "locks") - // We only try to change ownership if there is an ubuntu user - // defined, and we determine this by the existance of the home dir. - command := fmt.Sprintf(""+ - "mkdir -p %s\n"+ - "[ -e %s ] && chown ubuntu:ubuntu %s\n", - lockDir, ubuntuHome, lockDir) - logger.Tracef("command: %s", command) - result, err := exec.RunCommands(exec.RunParams{ - Commands: command, - }) - if err != nil { - return err - } - logger.Tracef("stdout: %s", result.Stdout) - return nil -} === removed file 'src/github.com/juju/juju/upgrades/lockdirectory_test.go' --- src/github.com/juju/juju/upgrades/lockdirectory_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/upgrades/lockdirectory_test.go 1970-01-01 00:00:00 +0000 @@ -1,98 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "runtime" - - "github.com/juju/loggo" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/testing" - "github.com/juju/juju/upgrades" -) - -type ensureLockDirSuite struct { - testing.FakeJujuHomeSuite - bin string - home string - datadir string - lockdir string - ctx upgrades.Context -} - -var _ = gc.Suite(&ensureLockDirSuite{}) - -// fakecommand outputs its arguments to stdout for verification -var fakecommand = `#!/bin/bash - -echo $@ | tee $0.args -` - -func (s *ensureLockDirSuite) SetUpTest(c *gc.C) { - //TODO(bogdanteleaga): Fix this on windows - if runtime.GOOS == "windows" { - c.Skip("bug 1403084: tests use bash scripts, will be fixed later on windows") - } - s.FakeJujuHomeSuite.SetUpTest(c) - - s.bin = c.MkDir() - s.PatchEnvPathPrepend(s.bin) - - err := ioutil.WriteFile( - filepath.Join(s.bin, "chown"), - []byte(fakecommand), 0777) - c.Assert(err, jc.ErrorIsNil) - - loggo.GetLogger("juju.upgrade").SetLogLevel(loggo.TRACE) - - s.home = c.MkDir() - s.PatchValue(upgrades.UbuntuHome, s.home) - - s.datadir = c.MkDir() - s.lockdir = filepath.Join(s.datadir, "locks") - s.ctx = &mockContext{agentConfig: &mockAgentConfig{dataDir: s.datadir}} -} - -func (s *ensureLockDirSuite) assertChownCalled(c *gc.C) { - bytes, err := ioutil.ReadFile(filepath.Join(s.bin, "chown.args")) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(bytes), gc.Equals, fmt.Sprintf("ubuntu:ubuntu %s\n", s.lockdir)) -} - -func (s *ensureLockDirSuite) assertNoChownCalled(c *gc.C) { - c.Assert(filepath.Join(s.bin, "chown.args"), jc.DoesNotExist) -} - -func (s *ensureLockDirSuite) TestLockDirCreated(c *gc.C) { - err := upgrades.EnsureLockDirExistsAndUbuntuWritable(s.ctx) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(s.lockdir, jc.IsDirectory) - s.assertChownCalled(c) -} - -func (s *ensureLockDirSuite) TestIdempotent(c *gc.C) { - err := upgrades.EnsureLockDirExistsAndUbuntuWritable(s.ctx) - c.Assert(err, jc.ErrorIsNil) - - err = upgrades.EnsureLockDirExistsAndUbuntuWritable(s.ctx) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(s.lockdir, jc.IsDirectory) - s.assertChownCalled(c) -} - -func (s *ensureLockDirSuite) TestNoChownIfNoHome(c *gc.C) { - s.PatchValue(upgrades.UbuntuHome, filepath.Join(s.home, "not-exist")) - err := upgrades.EnsureLockDirExistsAndUbuntuWritable(s.ctx) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(s.lockdir, jc.IsDirectory) - s.assertNoChownCalled(c) -} === modified file 'src/github.com/juju/juju/upgrades/operations.go' --- src/github.com/juju/juju/upgrades/operations.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/upgrades/operations.go 2016-03-22 15:18:22 +0000 @@ -16,36 +16,8 @@ var stateUpgradeOperations = func() []Operation { steps := []Operation{ upgradeToVersion{ - version.MustParse("1.18.0"), - stateStepsFor118(), - }, - upgradeToVersion{ - version.MustParse("1.21.0"), - stateStepsFor121(), - }, - upgradeToVersion{ - version.MustParse("1.22.0"), - stateStepsFor122(), - }, - upgradeToVersion{ - version.MustParse("1.23.0"), - stateStepsFor123(), - }, - upgradeToVersion{ - version.MustParse("1.24.0"), - stateStepsFor124(), - }, - upgradeToVersion{ - version.MustParse("1.24.4"), - stateStepsFor1244(), - }, - upgradeToVersion{ - version.MustParse("1.25.0"), - stateStepsFor125(), - }, - upgradeToVersion{ - version.MustParse("1.25.2"), - stateStepsFor1252(), + version.MustParse("1.26.0"), + stateStepsFor126(), }, } return steps @@ -57,24 +29,8 @@ var upgradeOperations = func() []Operation { steps := []Operation{ upgradeToVersion{ - version.MustParse("1.18.0"), - stepsFor118(), - }, - upgradeToVersion{ - version.MustParse("1.22.0"), - stepsFor122(), - }, - upgradeToVersion{ - version.MustParse("1.23.0"), - stepsFor123(), - }, - upgradeToVersion{ - version.MustParse("1.24.0"), - stepsFor124(), - }, - upgradeToVersion{ - version.MustParse("1.25.0"), - stepsFor125(), + version.MustParse("1.26.0"), + stepsFor126(), }, } return steps @@ -88,11 +44,11 @@ } func newStateUpgradeOpsIterator(from version.Number) *opsIterator { - return newOpsIterator(from, version.Current.Number, stateUpgradeOperations()) + return newOpsIterator(from, version.Current, stateUpgradeOperations()) } func newUpgradeOpsIterator(from version.Number) *opsIterator { - return newOpsIterator(from, version.Current.Number, upgradeOperations()) + return newOpsIterator(from, version.Current, upgradeOperations()) } func newOpsIterator(from, to version.Number, ops []Operation) *opsIterator { === added file 'src/github.com/juju/juju/upgrades/preupgradesteps.go' --- src/github.com/juju/juju/upgrades/preupgradesteps.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/upgrades/preupgradesteps.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,57 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgrades + +import ( + "github.com/dustin/go-humanize" + "github.com/juju/errors" + "github.com/juju/utils/du" + "github.com/juju/utils/packaging/manager" + "github.com/juju/utils/series" + + "github.com/juju/juju/agent" + "github.com/juju/juju/state" +) + +// PreUpgradeSteps runs various checks and prepares for performing an upgrade. +// If any check fails, an error is returned which aborts the upgrade. +func PreUpgradeSteps(st *state.State, agentConf agent.Config, isController, isMaster bool) error { + if err := checkDiskSpace(agentConf.DataDir()); err != nil { + return err + } + if isController { + // Update distro info in case the new Juju controller version + // is aware of new supported series. We'll keep going if this + // fails, and the user can manually update it if they need to. + logger.Infof("updating distro-info") + if err := updateDistroInfo(); err != nil { + logger.Warningf("failed to update distro-info: %v", err) + } + } + return nil +} + +// We'll be conservative and require at least 250MiB of disk space for an upgrade. +var MinDiskSpaceMib = uint64(250) + +func checkDiskSpace(dir string) error { + usage := du.NewDiskUsage(dir) + free := usage.Free() + if free < uint64(MinDiskSpaceMib*humanize.MiByte) { + return errors.Errorf("not enough free disk space for upgrade: %s available, require %dMiB", + humanize.IBytes(free), MinDiskSpaceMib) + } + return nil +} + +func updateDistroInfo() error { + pm := manager.NewAptPackageManager() + if err := pm.Update(); err != nil { + return errors.Annotate(err, "updating package list") + } + if err := pm.Install("distro-info"); err != nil { + return errors.Annotate(err, "updating distro-info package") + } + return series.UpdateSeriesVersions() +} === added file 'src/github.com/juju/juju/upgrades/preupgradesteps_test.go' --- src/github.com/juju/juju/upgrades/preupgradesteps_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/upgrades/preupgradesteps_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,63 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgrades_test + +import ( + "os/exec" + + "github.com/dustin/go-humanize" + jc "github.com/juju/testing/checkers" + pkgmgr "github.com/juju/utils/packaging/manager" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/testing" + "github.com/juju/juju/upgrades" +) + +type preupgradechecksSuite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&preupgradechecksSuite{}) + +func (s *preupgradechecksSuite) TestCheckFreeDiskSpace(c *gc.C) { + // Expect an impossibly large amount of free disk. + s.PatchValue(&upgrades.MinDiskSpaceMib, uint64(humanize.PiByte/humanize.MiByte)) + err := upgrades.PreUpgradeSteps(nil, &mockAgentConfig{dataDir: "/"}, false, false) + c.Assert(err, gc.ErrorMatches, "not enough free disk space for upgrade: .*") +} + +func (s *preupgradechecksSuite) TestUpdateDistroInfo(c *gc.C) { + s.PatchValue(&upgrades.MinDiskSpaceMib, uint64(0)) + expectedAptCommandArgs := [][]string{ + {"update"}, + {"install", "distro-info"}, + } + + commandChan := s.HookCommandOutput(&pkgmgr.CommandOutput, nil, nil) + err := upgrades.PreUpgradeSteps(nil, &mockAgentConfig{dataDir: "/"}, true, false) + c.Assert(err, jc.ErrorIsNil) + + var commands []*exec.Cmd + for i := 0; i < cap(expectedAptCommandArgs)+1; i++ { + select { + case cmd := <-commandChan: + commands = append(commands, cmd) + default: + break + } + } + if len(commands) != len(expectedAptCommandArgs) { + c.Fatalf("expected %d commands, got %d", len(expectedAptCommandArgs), len(commands)) + } + + assertAptCommand := func(cmd *exec.Cmd, tailArgs ...string) { + args := cmd.Args + c.Assert(len(args), jc.GreaterThan, len(tailArgs)) + c.Assert(args[0], gc.Equals, "apt-get") + c.Assert(args[len(args)-len(tailArgs):], gc.DeepEquals, tailArgs) + } + assertAptCommand(commands[0], "update") + assertAptCommand(commands[1], "install", "distro-info") +} === added file 'src/github.com/juju/juju/upgrades/providerchanges.go' --- src/github.com/juju/juju/upgrades/providerchanges.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/upgrades/providerchanges.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,29 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgrades + +import ( + "github.com/juju/errors" + "github.com/juju/juju/environs" + "github.com/juju/juju/provider" + "github.com/juju/juju/version" +) + +func upgradeProviderChanges(env environs.Environ, reader environConfigReader, ver version.Number) error { + cfg, err := reader.ModelConfig() + if err != nil { + return errors.Annotate(err, "reading model config") + } + + upgrader, ok := env.(provider.Upgradeable) + if !ok { + logger.Debugf("provider %q has no upgrades", cfg.Type()) + return nil + } + + if err := upgrader.RunUpgradeStepsFor(ver); err != nil { + return errors.Annotate(err, "running upgrade steps") + } + return nil +} === removed file 'src/github.com/juju/juju/upgrades/rsysloggnutls.go' --- src/github.com/juju/juju/upgrades/rsysloggnutls.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/upgrades/rsysloggnutls.go 1970-01-01 00:00:00 +0000 @@ -1,27 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "github.com/juju/utils/packaging/manager" - - "github.com/juju/juju/version" -) - -// getPackageManager is a helper function which returns the -// package manager implementation for the current system. -func getPackageManager() (manager.PackageManager, error) { - return manager.NewPackageManager(version.Current.Series) -} - -// installRsyslogGnutls installs the rsyslog-gnutls package, -// which is required for our rsyslog configuration from 1.18.0. -func installRsyslogGnutls(context Context) error { - pacman, err := getPackageManager() - if err != nil { - return err - } - - return pacman.Install("rsyslog-gnutls") -} === removed file 'src/github.com/juju/juju/upgrades/rsyslogport.go' --- src/github.com/juju/juju/upgrades/rsyslogport.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/upgrades/rsyslogport.go 1970-01-01 00:00:00 +0000 @@ -1,31 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "fmt" - - "github.com/juju/juju/environs/config" - "github.com/juju/juju/mongo" - "github.com/juju/juju/state" -) - -func updateRsyslogPort(context Context) error { - agentConfig := context.AgentConfig() - info, ok := agentConfig.MongoInfo() - if !ok { - return fmt.Errorf("Failed to get MongoInfo") - } - // we need to re-open state with a nil policy so we can bypass - // validation, as the syslog-port is normally immutable - st, err := state.Open(agentConfig.Environment(), info, mongo.DefaultDialOpts(), nil) - if err != nil { - return err - } - defer st.Close() - attrs := map[string]interface{}{ - "syslog-port": config.DefaultSyslogPort, - } - return st.UpdateEnvironConfig(attrs, nil, nil) -} === removed file 'src/github.com/juju/juju/upgrades/rsyslogport_test.go' --- src/github.com/juju/juju/upgrades/rsyslogport_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/upgrades/rsyslogport_test.go 1970-01-01 00:00:00 +0000 @@ -1,56 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/environs/config" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" - "github.com/juju/juju/upgrades" -) - -type rsyslogPortSuite struct { - jujutesting.JujuConnSuite - ctx upgrades.Context -} - -var _ = gc.Suite(&rsyslogPortSuite{}) - -func (s *rsyslogPortSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - apiState, _ := s.OpenAPIAsNewMachine(c, state.JobManageEnviron) - s.ctx = &mockContext{ - agentConfig: &mockAgentConfig{ - dataDir: s.DataDir(), - mongoInfo: s.MongoInfo(c), - environTag: s.State.EnvironTag(), - }, - apiState: apiState, - state: s.State, - } - cfg, err := s.State.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - c.Assert(cfg.SyslogPort(), gc.Not(gc.Equals), config.DefaultSyslogPort) -} - -func (s *rsyslogPortSuite) TestSyslogPortChanged(c *gc.C) { - err := upgrades.UpdateRsyslogPort(s.ctx) - c.Assert(err, jc.ErrorIsNil) - cfg, err := s.State.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - c.Assert(cfg.SyslogPort(), gc.Equals, config.DefaultSyslogPort) -} - -func (s *rsyslogPortSuite) TestIdempotent(c *gc.C) { - err := upgrades.UpdateRsyslogPort(s.ctx) - c.Assert(err, jc.ErrorIsNil) - err = upgrades.UpdateRsyslogPort(s.ctx) - c.Assert(err, jc.ErrorIsNil) - cfg, err := s.State.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - c.Assert(cfg.SyslogPort(), gc.Equals, config.DefaultSyslogPort) -} === removed file 'src/github.com/juju/juju/upgrades/steps118.go' --- src/github.com/juju/juju/upgrades/steps118.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/upgrades/steps118.go 1970-01-01 00:00:00 +0000 @@ -1,51 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -// stateStepsFor118 returns upgrade steps form Juju 1.18 that manipulate state directly. -func stateStepsFor118() []Step { - return []Step{ - &upgradeStep{ - description: "update rsyslog port", - targets: []Target{StateServer}, - run: updateRsyslogPort, - }, - &upgradeStep{ - description: "remove deprecated environment config settings", - targets: []Target{StateServer}, - run: processDeprecatedEnvSettings, - }, - &upgradeStep{ - description: "migrate local provider agent config", - targets: []Target{StateServer}, - run: migrateLocalProviderAgentConfig, - }, - } -} - -// stepsFor118 returns upgrade steps for Juju 1.18 that operate via the API. -func stepsFor118() []Step { - return []Step{ - &upgradeStep{ - description: "make $DATADIR/locks owned by ubuntu:ubuntu", - targets: []Target{AllMachines}, - run: ensureLockDirExistsAndUbuntuWritable, - }, - &upgradeStep{ - description: "generate system ssh key", - targets: []Target{StateServer}, - run: ensureSystemSSHKey, - }, - &upgradeStep{ - description: "install rsyslog-gnutls", - targets: []Target{AllMachines}, - run: installRsyslogGnutls, - }, - &upgradeStep{ - description: "make /home/ubuntu/.profile source .juju-proxy file", - targets: []Target{AllMachines}, - run: ensureUbuntuDotProfileSourcesProxyFile, - }, - } -} === removed file 'src/github.com/juju/juju/upgrades/steps118_test.go' --- src/github.com/juju/juju/upgrades/steps118_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/upgrades/steps118_test.go 1970-01-01 00:00:00 +0000 @@ -1,36 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - gc "gopkg.in/check.v1" - - "github.com/juju/juju/testing" - "github.com/juju/juju/version" -) - -type steps118Suite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&steps118Suite{}) - -func (s *steps118Suite) TestStateStepsFor118(c *gc.C) { - expected := []string{ - "update rsyslog port", - "remove deprecated environment config settings", - "migrate local provider agent config", - } - assertStateSteps(c, version.MustParse("1.18.0"), expected) -} - -func (s *steps118Suite) TestStepsFor118(c *gc.C) { - expected := []string{ - "make $DATADIR/locks owned by ubuntu:ubuntu", - "generate system ssh key", - "install rsyslog-gnutls", - "make /home/ubuntu/.profile source .juju-proxy file", - } - assertSteps(c, version.MustParse("1.18.0"), expected) -} === removed file 'src/github.com/juju/juju/upgrades/steps121.go' --- src/github.com/juju/juju/upgrades/steps121.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/upgrades/steps121.go 1970-01-01 00:00:00 +0000 @@ -1,163 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "github.com/juju/juju/state" -) - -// stateStepsFor121 returns upgrade steps form Juju 1.21 that manipulate state directly. -func stateStepsFor121() []Step { - return []Step{ - &upgradeStep{ - description: "add environment uuid to state server doc", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvironmentUUIDToStateServerDoc(context.State()) - }, - }, - &upgradeStep{ - description: "set environment owner and server uuid", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.SetOwnerAndServerUUIDForEnvironment(context.State()) - }, - }, - - &upgradeStep{ - description: "migrate machine instanceId into instanceData", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.MigrateMachineInstanceIdToInstanceData(context.State()) - }, - }, - &upgradeStep{ - description: "prepend the environment UUID to the ID of all machine docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToMachines(context.State()) - }, - }, - &upgradeStep{ - description: "prepend the environment UUID to the ID of all instanceData docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToInstanceData(context.State()) - }, - }, - &upgradeStep{ - description: "prepend the environment UUID to the ID of all containerRef docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToContainerRefs(context.State()) - }, - }, - &upgradeStep{ - description: "prepend the environment UUID to the ID of all service docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToServices(context.State()) - }, - }, - &upgradeStep{ - description: "prepend the environment UUID to the ID of all unit docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToUnits(context.State()) - }, - }, - &upgradeStep{ - description: "prepend the environment UUID to the ID of all reboot docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToReboots(context.State()) - }, - }, - &upgradeStep{ - description: "prepend the environment UUID to the ID of all relations docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToRelations(context.State()) - }, - }, - &upgradeStep{ - description: "prepend the environment UUID to the ID of all relationscopes docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToRelationScopes(context.State()) - }, - }, - &upgradeStep{ - description: "prepend the environment UUID to the ID of all minUnit docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToMinUnits(context.State()) - }, - }, - &upgradeStep{ - description: "prepend the environment UUID to the ID of all cleanup docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToCleanups(context.State()) - }, - }, - &upgradeStep{ - description: "prepend the environment UUID to the ID of all sequence docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToSequences(context.State()) - }, - }, - - &upgradeStep{ - description: "rename the user LastConnection field to LastLogin", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.MigrateUserLastConnectionToLastLogin(context.State()) - }, - }, - &upgradeStep{ - description: "add all users in state as environment users", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddStateUsersAsEnvironUsers(context.State()) - }, - }, - &upgradeStep{ - description: "migrate custom image metadata into environment storage", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return migrateCustomImageMetadata(context.State(), context.AgentConfig()) - }, - }, - &upgradeStep{ - description: "migrate tools into environment storage", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return migrateToolsStorage(context.State(), context.AgentConfig()) - }, - }, - &upgradeStep{ - description: "migrate individual unit ports to openedPorts collection", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.MigrateUnitPortsToOpenedPorts(context.State()) - }, - }, - &upgradeStep{ - description: "create entries in meter status collection for existing units", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.CreateUnitMeterStatus(context.State()) - }, - }, - &upgradeStep{ - description: "migrate machine jobs into ones with JobManageNetworking based on rules", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.MigrateJobManageNetworking(context.State()) - }, - }, - } -} === removed file 'src/github.com/juju/juju/upgrades/steps121_test.go' --- src/github.com/juju/juju/upgrades/steps121_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/upgrades/steps121_test.go 1970-01-01 00:00:00 +0000 @@ -1,56 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - gc "gopkg.in/check.v1" - - "github.com/juju/juju/testing" - "github.com/juju/juju/version" -) - -type steps121Suite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&steps121Suite{}) - -func (s *steps121Suite) TestStateStepsFor121(c *gc.C) { - expected := []string{ - // Environment UUID related migrations should come first as - // other upgrade steps may rely on them. - "add environment uuid to state server doc", - "set environment owner and server uuid", - // It is important to keep the order of the following three steps: - // 1.migrate machine instanceId, 2. Add env ID to machine docs, 3. - // Add env ID to instanceData docs. If the order changes, bad things - // will happen. - "migrate machine instanceId into instanceData", - "prepend the environment UUID to the ID of all machine docs", - "prepend the environment UUID to the ID of all instanceData docs", - "prepend the environment UUID to the ID of all containerRef docs", - "prepend the environment UUID to the ID of all service docs", - "prepend the environment UUID to the ID of all unit docs", - "prepend the environment UUID to the ID of all reboot docs", - "prepend the environment UUID to the ID of all relations docs", - "prepend the environment UUID to the ID of all relationscopes docs", - "prepend the environment UUID to the ID of all minUnit docs", - "prepend the environment UUID to the ID of all cleanup docs", - "prepend the environment UUID to the ID of all sequence docs", - - // Non-environment UUID upgrade steps follow. - "rename the user LastConnection field to LastLogin", - "add all users in state as environment users", - "migrate custom image metadata into environment storage", - "migrate tools into environment storage", - "migrate individual unit ports to openedPorts collection", - "create entries in meter status collection for existing units", - "migrate machine jobs into ones with JobManageNetworking based on rules", - } - assertStateSteps(c, version.MustParse("1.21.0"), expected) -} - -func (s *steps121Suite) TestStepsFor121(c *gc.C) { - assertSteps(c, version.MustParse("1.21.0"), []string{}) -} === removed file 'src/github.com/juju/juju/upgrades/steps122.go' --- src/github.com/juju/juju/upgrades/steps122.go 2015-06-05 17:40:37 +0000 +++ src/github.com/juju/juju/upgrades/steps122.go 1970-01-01 00:00:00 +0000 @@ -1,118 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "github.com/juju/juju/state" -) - -// stateStepsFor122 returns upgrade steps form Juju 1.22 that manipulate state directly. -func stateStepsFor122() []Step { - return []Step{ - &upgradeStep{ - description: "prepend the environment UUID to the ID of all charm docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToCharms(context.State()) - }, - }, - &upgradeStep{ - description: "prepend the environment UUID to the ID of all settings docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToSettings(context.State()) - }, - }, - &upgradeStep{ - description: "prepend the environment UUID to the ID of all settingsRefs docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToSettingsRefs(context.State()) - }, - }, - &upgradeStep{ - description: "prepend the environment UUID to the ID of all networks docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToNetworks(context.State()) - }, - }, - &upgradeStep{ - description: "prepend the environment UUID to the ID of all requestedNetworks docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToRequestedNetworks(context.State()) - }, - }, - &upgradeStep{ - description: "prepend the environment UUID to the ID of all networkInterfaces docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToNetworkInterfaces(context.State()) - }, - }, &upgradeStep{ - description: "prepend the environment UUID to the ID of all statuses docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToStatuses(context.State()) - }, - }, &upgradeStep{ - description: "prepend the environment UUID to the ID of all annotations docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToAnnotations(context.State()) - }, - }, &upgradeStep{ - description: "prepend the environment UUID to the ID of all constraints docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToConstraints(context.State()) - }, - }, &upgradeStep{ - description: "prepend the environment UUID to the ID of all meterStatus docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToMeterStatus(context.State()) - }, - }, &upgradeStep{ - description: "prepend the environment UUID to the ID of all openPorts docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToOpenPorts(context.State()) - }, - }, &upgradeStep{ - description: "fix environment UUID for minUnits docs", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.FixMinUnitsEnvUUID(context.State()) - }, - }, &upgradeStep{ - description: "fix sequence documents", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.FixSequenceFields(context.State()) - }, - }, &upgradeStep{ - description: "update system identity in state", - targets: []Target{DatabaseMaster}, - run: ensureSystemSSHKeyRedux, - }, - &upgradeStep{ - description: "set AvailZone in instanceData", - targets: []Target{DatabaseMaster}, - run: addAvaililityZoneToInstanceData, - }, - } -} - -// stepsFor122 returns upgrade steps form Juju 1.22 that only need the API. -func stepsFor122() []Step { - return []Step{ - &upgradeStep{ - description: "update the authorized keys for the system identity", - targets: []Target{DatabaseMaster}, - run: updateAuthorizedKeysForSystemIdentity, - }, - } -} === removed file 'src/github.com/juju/juju/upgrades/steps122_test.go' --- src/github.com/juju/juju/upgrades/steps122_test.go 2015-06-05 17:40:37 +0000 +++ src/github.com/juju/juju/upgrades/steps122_test.go 1970-01-01 00:00:00 +0000 @@ -1,47 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - gc "gopkg.in/check.v1" - - "github.com/juju/juju/testing" - "github.com/juju/juju/version" -) - -type steps122Suite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&steps122Suite{}) - -func (s *steps122Suite) TestStateStepsFor122(c *gc.C) { - expected := []string{ - // Environment UUID related migrations should come first as - // other upgrade steps may rely on them. - "prepend the environment UUID to the ID of all charm docs", - "prepend the environment UUID to the ID of all settings docs", - "prepend the environment UUID to the ID of all settingsRefs docs", - "prepend the environment UUID to the ID of all networks docs", - "prepend the environment UUID to the ID of all requestedNetworks docs", - "prepend the environment UUID to the ID of all networkInterfaces docs", - "prepend the environment UUID to the ID of all statuses docs", - "prepend the environment UUID to the ID of all annotations docs", - "prepend the environment UUID to the ID of all constraints docs", - "prepend the environment UUID to the ID of all meterStatus docs", - "prepend the environment UUID to the ID of all openPorts docs", - "fix environment UUID for minUnits docs", - "fix sequence documents", - "update system identity in state", - "set AvailZone in instanceData", - } - assertStateSteps(c, version.MustParse("1.22.0"), expected) -} - -func (s *steps122Suite) TestStepsFor122(c *gc.C) { - expected := []string{ - "update the authorized keys for the system identity", - } - assertSteps(c, version.MustParse("1.22.0"), expected) -} === removed file 'src/github.com/juju/juju/upgrades/steps123.go' --- src/github.com/juju/juju/upgrades/steps123.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/upgrades/steps123.go 1970-01-01 00:00:00 +0000 @@ -1,88 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "github.com/juju/juju/state" -) - -// stateStepsFor123 returns upgrade steps for Juju 1.23 that manipulate state directly. -func stateStepsFor123() []Step { - return []Step{ - &upgradeStep{ - description: "add default storage pools", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return addDefaultStoragePools(context.State()) - }, - }, - &upgradeStep{ - description: "drop old mongo indexes", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.DropOldIndexesv123(context.State()) - }, - }, &upgradeStep{ - description: "migrate envuuid to env-uuid in envUsersC", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddEnvUUIDToEnvUsersDoc(context.State()) - }, - }, - &upgradeStep{ - description: "move blocks from environment to state", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return moveBlocksFromEnvironToState(context) - }, - }, &upgradeStep{ - description: "insert userenvnameC doc for each environment", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddUniqueOwnerEnvNameForEnvirons(context.State()) - }, - }, &upgradeStep{ - description: "add name field to users and lowercase _id field", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddNameFieldLowerCaseIdOfUsers(context.State()) - }, - }, &upgradeStep{ - description: "add life field to IP addresses", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddLifeFieldOfIPAddresses(context.State()) - }, - }, &upgradeStep{ - description: "add instance id field to IP addresses", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddInstanceIdFieldOfIPAddresses(context.State()) - }, - }, &upgradeStep{ - description: "lower case _id of envUsers", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.LowerCaseEnvUsersID(context.State()) - }, - }, &upgradeStep{ - description: "add leadership settings documents for all services", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddLeadershipSettingsDocs(context.State()) - }, - }, - } -} - -// stepsFor123 returns upgrade steps for Juju 1.23 that only need the API. -func stepsFor123() []Step { - return []Step{ - &upgradeStep{ - description: "add environment UUID to agent config", - targets: []Target{AllMachines}, - run: addEnvironmentUUIDToAgentConfig, - }, - } -} === removed file 'src/github.com/juju/juju/upgrades/steps123_test.go' --- src/github.com/juju/juju/upgrades/steps123_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/upgrades/steps123_test.go 1970-01-01 00:00:00 +0000 @@ -1,40 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - gc "gopkg.in/check.v1" - - "github.com/juju/juju/testing" - "github.com/juju/juju/version" -) - -type steps123Suite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&steps123Suite{}) - -func (s *steps123Suite) TestStateStepsFor123(c *gc.C) { - expected := []string{ - "add default storage pools", - "drop old mongo indexes", - "migrate envuuid to env-uuid in envUsersC", - "move blocks from environment to state", - "insert userenvnameC doc for each environment", - "add name field to users and lowercase _id field", - "add life field to IP addresses", - "add instance id field to IP addresses", - "lower case _id of envUsers", - "add leadership settings documents for all services", - } - assertStateSteps(c, version.MustParse("1.23.0"), expected) -} - -func (s *steps123Suite) TestStepsFor123(c *gc.C) { - expected := []string{ - "add environment UUID to agent config", - } - assertSteps(c, version.MustParse("1.23.0"), expected) -} === removed file 'src/github.com/juju/juju/upgrades/steps124.go' --- src/github.com/juju/juju/upgrades/steps124.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/upgrades/steps124.go 1970-01-01 00:00:00 +0000 @@ -1,173 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "github.com/juju/juju/state" -) - -// stateStepsFor124 returns upgrade steps for Juju 1.24 that manipulate state directly. -func stateStepsFor124() []Step { - return []Step{ - &upgradeStep{ - description: "add block device documents for existing machines", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddDefaultBlockDevicesDocs(context.State()) - }}, - &upgradeStep{ - description: "move service.UnitSeq to sequence collection", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.MoveServiceUnitSeqToSequence(context.State()) - }}, - &upgradeStep{ - description: "add instance id field to IP addresses", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddInstanceIdFieldOfIPAddresses(context.State()) - }}, - &upgradeStep{ - description: "add UUID field to IP addresses", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddUUIDToIPAddresses(context.State()) - }, - }, - &upgradeStep{ - description: "migrate charm archives into environment storage", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return migrateCharmStorage(context.State(), context.AgentConfig()) - }, - }, - &upgradeStep{ - description: "change entityid field on status history to globalkey", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.ChangeStatusHistoryEntityId(context.State()) - }, - }, - &upgradeStep{ - description: "change updated field on statushistory from time to int", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.ChangeStatusHistoryUpdatedType(context.State()) - }, - }, - &upgradeStep{ - description: "change updated field on status from time to int", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.ChangeStatusUpdatedType(context.State()) - }, - }, - &upgradeStep{ - description: "add preferred addresses to machines", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddPreferredAddressesToMachines(context.State()) - }, - }, - } -} - -// stepsFor124 returns upgrade steps for Juju 1.24 that only need the API. -func stepsFor124() []Step { - return []Step{ - &upgradeStep{ - description: "move syslog config from LogDir to DataDir", - targets: []Target{AllMachines}, - run: moveSyslogConfig, - }, - } -} - -// stateStepsFor1244 returns upgrade steps for Juju 1.24.4 that manipulate state directly. -func stateStepsFor1244() []Step { - return []Step{ - &upgradeStep{ - description: "add missing service statuses", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddMissingServiceStatuses(context.State()) - }, - }, - } -} - -func moveSyslogConfig(context Context) error { - config := context.AgentConfig() - logdir := config.LogDir() - datadir := config.DataDir() - - // these values were copied from - // github.com/juju/juju/utils/syslog/config.go - // Yes this is bad, but it is only needed once every for an - // upgrade, so it didn't seem worth exporting those values. - files := []string{ - "ca-cert.pem", - "rsyslog-cert.pem", - "rsyslog-key.pem", - "logrotate.conf", - "logrotate.run", - } - var errs []string - for _, f := range files { - oldpath := filepath.Join(logdir, f) - newpath := filepath.Join(datadir, f) - if err := copyFile(newpath, oldpath); err != nil { - errs = append(errs, err.Error()) - continue - } - if err := osRemove(oldpath); err != nil { - // Don't fail the step if we can't get rid of the old files. - // We don't actually care if they still exist or not. - logger.Warningf("Can't delete old config file %q: %s", oldpath, err) - } - } - if len(errs) > 0 { - return fmt.Errorf("error(s) while moving old syslog config files: %s", strings.Join(errs, "\n")) - } - return nil -} - -// for testing... of course. -var osRemove = os.Remove - -// copyFile copies a file from one location to another. It won't overwrite -// existing files and will return nil in this case. This is used instead of -// os.Rename because os.Rename won't work across partitions. -func copyFile(to, from string) error { - logger.Debugf("Copying %q to %q", from, to) - orig, err := os.Open(from) - if os.IsNotExist(err) { - logger.Debugf("Old file %q does not exist, skipping.", from) - // original doesn't exist, that's fine. - return nil - } - if err != nil { - return err - } - defer orig.Close() - info, err := orig.Stat() - if err != nil { - return err - } - target, err := os.OpenFile(to, os.O_CREATE|os.O_WRONLY|os.O_EXCL, info.Mode()) - if os.IsExist(err) { - return nil - } - defer target.Close() - if _, err := io.Copy(target, orig); err != nil { - return err - } - return nil -} === removed file 'src/github.com/juju/juju/upgrades/steps124_test.go' --- src/github.com/juju/juju/upgrades/steps124_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/upgrades/steps124_test.go 1970-01-01 00:00:00 +0000 @@ -1,217 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - "io/ioutil" - "os" - "path/filepath" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/agent" - "github.com/juju/juju/testing" - "github.com/juju/juju/upgrades" - "github.com/juju/juju/version" -) - -type steps124Suite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&steps124Suite{}) - -func (s *steps124Suite) TestStateStepsFor124(c *gc.C) { - expected := []string{ - "add block device documents for existing machines", - "move service.UnitSeq to sequence collection", - "add instance id field to IP addresses", - "add UUID field to IP addresses", - "migrate charm archives into environment storage", - "change entityid field on status history to globalkey", - "change updated field on statushistory from time to int", - "change updated field on status from time to int", - "add preferred addresses to machines", - } - assertStateSteps(c, version.MustParse("1.24.0"), expected) -} - -func (s *steps124Suite) TestStateStepsFor1244(c *gc.C) { - expected := []string{ - "add missing service statuses", - } - assertStateSteps(c, version.MustParse("1.24.4"), expected) -} - -func (s *steps124Suite) TestStepsFor124(c *gc.C) { - expected := []string{ - "move syslog config from LogDir to DataDir", - } - assertSteps(c, version.MustParse("1.24.0"), expected) -} - -func (s *steps124Suite) TestCopyFileNew(c *gc.C) { - src := c.MkDir() - dest := c.MkDir() - srcdata := []byte("new data!") - - // test that a file in src dir and not in dest dir gets copied. - - newSrc := filepath.Join(src, "new") - err := ioutil.WriteFile(newSrc, srcdata, 0644) - c.Assert(err, jc.ErrorIsNil) - - newDest := filepath.Join(dest, "new") - - err = upgrades.CopyFile(newDest, newSrc) - c.Assert(err, jc.ErrorIsNil) - - srcb, err := ioutil.ReadFile(newSrc) - c.Assert(err, jc.ErrorIsNil) - destb, err := ioutil.ReadFile(newDest) - c.Assert(err, jc.ErrorIsNil) - // convert to string and use Equals because we'll get a better failure message - c.Assert(string(destb), gc.Equals, string(srcb)) -} - -func (s *steps124Suite) TestCopyFileExisting(c *gc.C) { - src := c.MkDir() - dest := c.MkDir() - srcdata := []byte("new data!") - destdata := []byte("old data!") - - exSrc := filepath.Join(src, "existing") - exDest := filepath.Join(dest, "existing") - - err := ioutil.WriteFile(exSrc, srcdata, 0644) - c.Assert(err, jc.ErrorIsNil) - err = ioutil.WriteFile(exDest, destdata, 0644) - c.Assert(err, jc.ErrorIsNil) - - err = upgrades.CopyFile(exDest, exSrc) - c.Assert(err, jc.ErrorIsNil) - - // assert we haven't changed the destination - b, err := ioutil.ReadFile(exDest) - - c.Assert(err, jc.ErrorIsNil) - // convert to string because we'll get a better failure message - c.Assert(string(b), gc.Equals, string(destdata)) -} - -func (s *steps124Suite) TestMoveSyslogConfigDefault(c *gc.C) { - logdir := c.MkDir() - datadir := c.MkDir() - data := []byte("data!") - files := []string{ - "ca-cert.pem", - "rsyslog-cert.pem", - "rsyslog-key.pem", - "logrotate.conf", - "logrotate.run", - } - for _, f := range files { - err := ioutil.WriteFile(filepath.Join(logdir, f), data, 0644) - c.Assert(err, jc.ErrorIsNil) - } - - ctx := fakeContext{cfg: fakeConfig{logdir: logdir, datadir: datadir}} - err := upgrades.MoveSyslogConfig(ctx) - c.Assert(err, jc.ErrorIsNil) - - for _, f := range files { - _, err := os.Stat(filepath.Join(datadir, f)) - c.Assert(err, jc.ErrorIsNil) - _, err = os.Stat(filepath.Join(logdir, f)) - c.Assert(err, jc.Satisfies, os.IsNotExist) - } -} - -func (s *steps124Suite) TestMoveSyslogConfig(c *gc.C) { - logdir := c.MkDir() - datadir := c.MkDir() - data := []byte("data!") - files := []string{ - "logrotate.conf", - "logrotate.run", - } - - // ensure that we don't overwrite an existing file in datadir, and don't - // error out if one of the files exists in datadir but not logdir. - - err := ioutil.WriteFile(filepath.Join(logdir, "logrotate.conf"), data, 0644) - c.Assert(err, jc.ErrorIsNil) - - err = ioutil.WriteFile(filepath.Join(datadir, "logrotate.run"), data, 0644) - c.Assert(err, jc.ErrorIsNil) - - differentData := []byte("different") - existing := filepath.Join(datadir, "logrotate.conf") - err = ioutil.WriteFile(existing, differentData, 0644) - c.Assert(err, jc.ErrorIsNil) - - ctx := fakeContext{cfg: fakeConfig{logdir: logdir, datadir: datadir}} - err = upgrades.MoveSyslogConfig(ctx) - c.Assert(err, jc.ErrorIsNil) - - for _, f := range files { - _, err := os.Stat(filepath.Join(datadir, f)) - c.Assert(err, jc.ErrorIsNil) - _, err = os.Stat(filepath.Join(logdir, f)) - c.Assert(err, jc.Satisfies, os.IsNotExist) - } - - b, err := ioutil.ReadFile(existing) - c.Assert(err, jc.ErrorIsNil) - // convert to string because we'll get a better failure message - c.Assert(string(b), gc.Not(gc.Equals), string(existing)) - -} - -func (s *steps124Suite) TestMoveSyslogConfigCantDeleteOld(c *gc.C) { - logdir := c.MkDir() - datadir := c.MkDir() - file := filepath.Join(logdir, "logrotate.conf") - - // ensure that we don't error out if we can't remove the old file. - // error out if one of the files exists in datadir but not logdir. - s.PatchValue(upgrades.OsRemove, func(string) error { return os.ErrPermission }) - - err := ioutil.WriteFile(file, []byte("data!"), 0644) - c.Assert(err, jc.ErrorIsNil) - - ctx := fakeContext{cfg: fakeConfig{logdir: logdir, datadir: datadir}} - err = upgrades.MoveSyslogConfig(ctx) - c.Assert(err, jc.ErrorIsNil) - - // should still exist in both places (i.e. check we didn't screw up the test) - _, err = os.Stat(file) - c.Assert(err, jc.ErrorIsNil) - _, err = os.Stat(filepath.Join(datadir, "logrotate.conf")) - c.Assert(err, jc.ErrorIsNil) -} - -type fakeContext struct { - upgrades.Context - cfg fakeConfig -} - -func (f fakeContext) AgentConfig() agent.ConfigSetter { - return f.cfg -} - -type fakeConfig struct { - agent.ConfigSetter - logdir string - datadir string -} - -func (f fakeConfig) LogDir() string { - return f.logdir -} - -func (f fakeConfig) DataDir() string { - return f.datadir -} === removed file 'src/github.com/juju/juju/upgrades/steps125.go' --- src/github.com/juju/juju/upgrades/steps125.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/upgrades/steps125.go 1970-01-01 00:00:00 +0000 @@ -1,180 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "strings" - - "github.com/juju/errors" - "github.com/juju/juju/cloudconfig" - "github.com/juju/juju/environs" - "github.com/juju/juju/juju/osenv" - "github.com/juju/juju/state" - "github.com/juju/juju/version" - "github.com/juju/utils/exec" -) - -// stateStepsFor125 returns upgrade steps for Juju 1.25 that manipulate state directly. -func stateStepsFor125() []Step { - return []Step{ - &upgradeStep{ - description: "set hosted environment count to number of hosted environments", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.SetHostedEnvironCount(context.State()) - }, - }, - &upgradeStep{ - description: "tag machine instances", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - st := context.State() - machines, err := st.AllMachines() - if err != nil { - return errors.Trace(err) - } - cfg, err := st.EnvironConfig() - if err != nil { - return errors.Trace(err) - } - env, err := environs.New(cfg) - if err != nil { - return errors.Trace(err) - } - return addInstanceTags(env, machines) - }, - }, - &upgradeStep{ - description: "add missing env-uuid to statuses", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddMissingEnvUUIDOnStatuses(context.State()) - }, - }, - &upgradeStep{ - description: "add attachmentCount to volume", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddVolumeAttachmentCount(context.State()) - }}, - &upgradeStep{ - description: "add attachmentCount to filesystem", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddFilesystemsAttachmentCount(context.State()) - }}, - &upgradeStep{ - description: "add binding to volume", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddBindingToVolumes(context.State()) - }}, - &upgradeStep{ - description: "add binding to filesystem", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddBindingToFilesystems(context.State()) - }}, - &upgradeStep{ - description: "add status to volume", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddVolumeStatus(context.State()) - }}, - &upgradeStep{ - description: "add preferred addresses to machines", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddPreferredAddressesToMachines(context.State()) - }, - }, - &upgradeStep{ - description: "upgrade environment config", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - // TODO(axw) updateEnvironConfig should be - // called for all upgrades, to decouple this - // package from provider-specific upgrades. - st := context.State() - return upgradeEnvironConfig(st, st, environs.GlobalProviderRegistry()) - }, - }, - &upgradeStep{ - description: "move lastlogin and last connection to their own collections", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.MigrateLastLoginAndLastConnection(context.State()) - }}, - &upgradeStep{ - description: "sets an unknown unit status for missing unit statuses", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return state.AddMissingUnitStatus(context.State()) - }}, - } -} - -// stateStepsFor1252 returns upgrade steps for Juju 1.25.2 that manipulate state directly. -func stateStepsFor1252() []Step { - return []Step{ - &upgradeStep{ - description: "remove invalid tools metadata from state", - targets: []Target{DatabaseMaster}, - run: func(context Context) error { - return cleanToolsStorage(context.State()) - }}, - } -} - -// stepsFor125 returns upgrade steps for Juju 1.25 that only need the API. -func stepsFor125() []Step { - return []Step{ - &upgradeStep{ - description: "remove Jujud.pass file on windows", - targets: []Target{HostMachine}, - run: removeJujudpass, - }, - &upgradeStep{ - description: "add juju registry key", - targets: []Target{HostMachine}, - run: addJujuRegKey, - }, - } -} - -// removeJujudpass removes a file that is no longer used on versions >1.25 -// The Jujud.pass file was created during cloud init before -// so we know it's location for sure in case it exists -func removeJujudpass(context Context) error { - if version.Current.OS == version.Windows { - fileLocation := "C:\\Juju\\Jujud.pass" - if err := osRemove(fileLocation); err != nil { - // Don't fail the step if we can't get rid of the old files. - // We don't actually care if they still exist or not. - logger.Warningf("can't delete old password file %q: %s", fileLocation, err) - } - } - return nil -} - -var execRunCommands = exec.RunCommands - -// addJujuRegKey tries to create the same key that is now created during cloudinit -// on machines having version 1.25 or up -// Since support for ACL's in golang is quite disastrous at the moment, and they're -// not especially easy to use, this is done using the exact same steps used in cloudinit -func addJujuRegKey(context Context) error { - if version.Current.OS == version.Windows { - cmds := cloudconfig.CreateJujuRegistryKeyCmds() - _, err := execRunCommands(exec.RunParams{ - Commands: strings.Join(cmds, "\n"), - }) - if err != nil { - return errors.Annotate(err, "could not create juju registry key") - } - logger.Infof("created juju registry key at %s", osenv.JujuRegistryKey) - return nil - } - return nil -} === removed file 'src/github.com/juju/juju/upgrades/steps125_test.go' --- src/github.com/juju/juju/upgrades/steps125_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/upgrades/steps125_test.go 1970-01-01 00:00:00 +0000 @@ -1,161 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - "errors" - "strings" - - gc "gopkg.in/check.v1" - - jc "github.com/juju/testing/checkers" - "github.com/juju/utils/exec" - - "github.com/juju/juju/cloudconfig" - "github.com/juju/juju/testing" - "github.com/juju/juju/upgrades" - "github.com/juju/juju/version" -) - -type steps125Suite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&steps125Suite{}) - -func (s *steps125Suite) TestStateStepsFor125(c *gc.C) { - expected := []string{ - "set hosted environment count to number of hosted environments", - "tag machine instances", - "add missing env-uuid to statuses", - "add attachmentCount to volume", - "add attachmentCount to filesystem", - "add binding to volume", - "add binding to filesystem", - "add status to volume", - "add preferred addresses to machines", - "upgrade environment config", - "move lastlogin and last connection to their own collections", - "sets an unknown unit status for missing unit statuses", - } - assertStateSteps(c, version.MustParse("1.25.0"), expected) -} - -func (s *steps125Suite) TestStateStepsFor1252(c *gc.C) { - expected := []string{ - "remove invalid tools metadata from state", - } - assertStateSteps(c, version.MustParse("1.25.2"), expected) -} - -func (s *steps125Suite) TestStepsFor125(c *gc.C) { - expected := []string{ - "remove Jujud.pass file on windows", - "add juju registry key", - } - assertSteps(c, version.MustParse("1.25.0"), expected) -} - -type mockOSRemove struct { - called bool - path string - shouldFail bool -} - -func (m *mockOSRemove) osRemove(path string) error { - m.called = true - m.path = path - if m.shouldFail { - return errors.New("i done error'd") - } - return nil -} - -var removeFileTests = []struct { - os version.OSType - callExpected bool - shouldFail bool -}{ - { - os: version.Ubuntu, - callExpected: false, - shouldFail: false, - }, - { - os: version.Windows, - callExpected: true, - shouldFail: false, - }, - { - os: version.Windows, - callExpected: true, - shouldFail: true, - }, -} - -func (s *steps125Suite) TestRemoveJujudPass(c *gc.C) { - for _, t := range removeFileTests { - mock := &mockOSRemove{shouldFail: t.shouldFail} - s.PatchValue(upgrades.OsRemove, mock.osRemove) - s.PatchValue(&version.Current.OS, t.os) - err := upgrades.RemoveJujudpass(nil) - c.Assert(err, jc.ErrorIsNil) - c.Assert(mock.called, gc.Equals, t.callExpected) - } -} - -type mockRunCmds struct { - c *gc.C - commands string - called bool - shouldFail bool -} - -func (m *mockRunCmds) runCommands(params exec.RunParams) (*exec.ExecResponse, error) { - m.called = true - m.c.Assert(params.Commands, gc.Equals, strings.Join(cloudconfig.CreateJujuRegistryKeyCmds(), "\n")) - if m.shouldFail { - return nil, errors.New("derp") - } - return nil, nil -} - -var addRegKeyTests = []struct { - os version.OSType - callExpected bool - shouldFail bool - errMessage string -}{ - { - os: version.Ubuntu, - callExpected: false, - shouldFail: false, - }, - { - os: version.Windows, - callExpected: true, - shouldFail: false, - }, - { - os: version.Windows, - callExpected: true, - shouldFail: true, - errMessage: "could not create juju registry key: derp", - }, -} - -func (s *steps125Suite) TestAddJujuRegKey(c *gc.C) { - for _, t := range addRegKeyTests { - mock := &mockRunCmds{shouldFail: t.shouldFail, c: c} - s.PatchValue(upgrades.ExecRunCommands, mock.runCommands) - s.PatchValue(&version.Current.OS, t.os) - err := upgrades.AddJujuRegKey(nil) - if t.shouldFail { - c.Assert(err, gc.ErrorMatches, t.errMessage) - } else { - c.Assert(err, jc.ErrorIsNil) - } - c.Assert(mock.called, gc.Equals, t.callExpected) - } -} === added file 'src/github.com/juju/juju/upgrades/steps126.go' --- src/github.com/juju/juju/upgrades/steps126.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/upgrades/steps126.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,77 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgrades + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/environs" + "github.com/juju/juju/state" + "github.com/juju/juju/state/utils" + "github.com/juju/juju/version" +) + +// stepsFor126 returns upgrade steps for Juju 1.26. +func stepsFor126() []Step { + return []Step{} +} + +// stateStepsFor126 returns upgrade steps for Juju 1.26 that manipulate state directly. +func stateStepsFor126() []Step { + return []Step{ + &upgradeStep{ + description: "add the version field to all settings docs", + targets: []Target{DatabaseMaster}, + run: func(context Context) error { + return state.MigrateSettingsSchema(context.State()) + }, + }, + &upgradeStep{ + description: "add status to filesystem", + targets: []Target{DatabaseMaster}, + run: func(context Context) error { + return state.AddFilesystemStatus(context.State()) + }, + }, + &upgradeStep{ + description: "upgrade model config", + targets: []Target{DatabaseMaster}, + run: func(context Context) error { + // TODO(axw) updateModelConfig should be + // called for all upgrades, to decouple this + // package from provider-specific upgrades. + st := context.State() + return upgradeModelConfig(st, st, environs.GlobalProviderRegistry()) + }, + }, + //TODO(perrito666) make this an unconditional upgrade step. + // it would be ideal not to have to modify this package whenever we add provider upgrade steps. + &upgradeStep{ + description: "provider side upgrades", + targets: []Target{DatabaseMaster}, + run: func(context Context) error { + st := context.State() + env, err := utils.GetEnviron(st) + if err != nil { + return errors.Annotate(err, "getting provider for upgrade") + } + return upgradeProviderChanges(env, st, version.Number{Major: 1, Minor: 26}) + }, + }, + &upgradeStep{ + description: "update machine preferred addresses", + targets: []Target{DatabaseMaster}, + run: func(context Context) error { + return state.AddPreferredAddressesToMachines(context.State()) + }, + }, + &upgradeStep{ + description: "add default endpoint bindings to services", + targets: []Target{DatabaseMaster}, + run: func(context Context) error { + return state.AddDefaultEndpointBindingsToServices(context.State()) + }, + }, + } +} === added file 'src/github.com/juju/juju/upgrades/steps126_test.go' --- src/github.com/juju/juju/upgrades/steps126_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/upgrades/steps126_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,34 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgrades_test + +import ( + gc "gopkg.in/check.v1" + + "github.com/juju/juju/testing" + "github.com/juju/juju/version" +) + +type steps126Suite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&steps126Suite{}) + +func (s *steps126Suite) TestStepsFor126(c *gc.C) { + expected := []string{} + assertSteps(c, version.MustParse("1.26.0"), expected) +} + +func (s *steps126Suite) TestStateStepsFor126(c *gc.C) { + expected := []string{ + "add the version field to all settings docs", + "add status to filesystem", + "upgrade model config", + "provider side upgrades", + "update machine preferred addresses", + "add default endpoint bindings to services", + } + assertStateSteps(c, version.MustParse("1.26.0"), expected) +} === removed file 'src/github.com/juju/juju/upgrades/storage.go' --- src/github.com/juju/juju/upgrades/storage.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/upgrades/storage.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "github.com/juju/juju/state" - "github.com/juju/juju/storage/poolmanager" -) - -func addDefaultStoragePools(st *state.State) error { - settings := state.NewStateSettings(st) - return poolmanager.AddDefaultStoragePools(settings) -} === removed file 'src/github.com/juju/juju/upgrades/storage_test.go' --- src/github.com/juju/juju/upgrades/storage_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/upgrades/storage_test.go 1970-01-01 00:00:00 +0000 @@ -1,34 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/provider/ec2" - "github.com/juju/juju/state" - "github.com/juju/juju/storage/poolmanager" - "github.com/juju/juju/upgrades" -) - -type defaultStoragePoolsSuite struct { - jujutesting.JujuConnSuite -} - -var _ = gc.Suite(&defaultStoragePoolsSuite{}) - -func (s *defaultStoragePoolsSuite) TestDefaultStoragePools(c *gc.C) { - err := upgrades.AddDefaultStoragePools(s.State) - settings := state.NewStateSettings(s.State) - err = poolmanager.AddDefaultStoragePools(settings) - c.Assert(err, jc.ErrorIsNil) - pm := poolmanager.New(settings) - for _, pName := range []string{"ebs-ssd"} { - p, err := pm.Get(pName) - c.Assert(err, jc.ErrorIsNil) - c.Assert(p.Provider(), gc.Equals, ec2.EBS_ProviderType) - } -} === removed file 'src/github.com/juju/juju/upgrades/systemsshkey.go' --- src/github.com/juju/juju/upgrades/systemsshkey.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/upgrades/systemsshkey.go 1970-01-01 00:00:00 +0000 @@ -1,234 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "io/ioutil" - "os" - - "github.com/juju/errors" - "gopkg.in/mgo.v2/txn" - - "github.com/juju/juju/api/keymanager" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/state" - "github.com/juju/juju/utils/ssh" -) - -func ensureSystemSSHKey(context Context) error { - privateKey, publicKey, err := readOrMakeSystemIdentity(context) - if err != nil { - return errors.Trace(err) - } - if publicKey == "" { - // privateKey was read from disk, so it exists. - return nil - } - if err := updateAuthorizedKeys(context, publicKey); err != nil { - return errors.Trace(err) - } - if err := writeSystemIdentity(context, privateKey); err != nil { - return errors.Trace(err) - } - return nil -} - -// Somewhere in the 1.20 cycle the system-identity was added to the -// state serving info collection in the database, however no migration -// step was added to take the identity file from disk and put it into the -// new value in the database. -func ensureSystemSSHKeyRedux(context Context) error { - // If there is a system-identity in the database already, we don't need to - // do anything. - stateInfo, err := context.State().StateServingInfo() - if err != nil { - logger.Errorf("failed to read state serving info: %v", err) - return errors.Trace(err) - } - if stateInfo.SystemIdentity != "" { - logger.Infof("state serving info has a system identity already, all good") - // We are good. One exists already. - // Make sure that the agent thinks that it is the same. - return updateSystemIdentityInAgentConfig(context, stateInfo.SystemIdentity) - } - - privateKey, publicKey, err := readOrMakeSystemIdentity(context) - if err != nil { - logger.Errorf("failed to read or make system identity: %v", err) - return errors.Trace(err) - } - - if err := state.SetSystemIdentity(context.State(), privateKey); err != nil { - if errors.Cause(err) == txn.ErrAborted { - logger.Errorf("someone else has set system identity already") - // Another state server upgrading concurrently has updated - // the system identity so it is no longer empty. So discard - // anything that was created, reread the system info and write - // out the file. We also assume that the other upgrade has - // updated the authorized keys already. - stateInfo, err := context.State().StateServingInfo() - if err != nil { - logger.Errorf("failed to read state serving info: %v", err) - return errors.Trace(err) - } - if stateInfo.SystemIdentity == "" { - logger.Errorf("but the transaction said it would be there...") - return errors.New("system identity is not set") - } - if err := writeSystemIdentity(context, stateInfo.SystemIdentity); err != nil { - logger.Errorf("failed to write the system identity file: %v", err) - return errors.Trace(err) - } - return updateSystemIdentityInAgentConfig(context, stateInfo.SystemIdentity) - } - - logger.Errorf("failed to set system identity: %v", err) - return errors.Annotate(err, "cannot set state serving info") - } - - if publicKey != "" { - if err := writeSystemIdentity(context, privateKey); err != nil { - return errors.Trace(err) - } - } - return updateSystemIdentityInAgentConfig(context, privateKey) -} - -// updateAuthorizedKeysForSystemIdentity makes sure that the authorized keys -// list is up to date with the system identity. Due to changes in the way -// upgrades are done in 1.22, this part, which uses the API had to be split -// from the first part which used the state connection. -func updateAuthorizedKeysForSystemIdentity(context Context) error { - agentInfo, ok := context.AgentConfig().StateServingInfo() - if !ok { - return errors.New("missing state serving info for the agent") - } - publicKey, err := ssh.PublicKey([]byte(agentInfo.SystemIdentity), config.JujuSystemKey) - if err != nil { - return errors.Trace(err) - } - return errors.Trace(updateAuthorizedKeys(context, publicKey)) -} - -func updateAuthorizedKeys(context Context, publicKey string) error { - // Look for an existing authorized key. - logger.Infof("setting new authorized key for %q", publicKey) - keyManager := keymanager.NewClient(context.APIState()) - - result, err := keyManager.ListKeys(ssh.FullKeys, config.JujuSystemKey) - if err != nil { - return errors.Trace(err) - } - if result[0].Error != nil { - return errors.Trace(result[0].Error) - } - keys := result[0].Result - - // Loop through the keys. If we find a key that matches the publicKey - // then we are good, and done. If the comment on the key is for the system identity - // but it is not the same, remove it. - var keysToRemove []string - for _, key := range keys { - // The list of keys returned don't have carriage returns, but the - // publicKey does, so add one one before testing for equality. - if (key + "\n") == publicKey { - logger.Infof("system identity key already in authorized list") - return nil - } - - fingerprint, comment, err := ssh.KeyFingerprint(key) - if err != nil { - // Log the error, but it doesn't stop us doing what we need to do. - logger.Errorf("bad key in authorized keys: %v", err) - } else if comment == config.JujuSystemKey { - keysToRemove = append(keysToRemove, fingerprint) - } - } - if keysToRemove != nil { - logger.Infof("removing %d keys", len(keysToRemove)) - results, err := keyManager.DeleteKeys(config.JujuSystemKey, keysToRemove...) - if err != nil { - // Log the error but continue. - logger.Errorf("failed to remove keys: %v", err) - } else { - for _, err := range results { - if err.Error != nil { - // Log the error but continue. - logger.Errorf("failed to remove key: %v", err.Error) - } - } - } - } - - errResults, err := keyManager.AddKeys(config.JujuSystemKey, publicKey) - if err != nil { - return errors.Annotate(err, "failed to update authorised keys with new system key") - } - if err := errResults[0].Error; err != nil { - return errors.Annotate(err, "failed to update authorised keys with new system key") - } - return nil -} - -func updateSystemIdentityInAgentConfig(context Context, systemIdentity string) error { - agentInfo, ok := context.AgentConfig().StateServingInfo() - if !ok { - return errors.New("missing state serving info for the agent") - } - if agentInfo.SystemIdentity != systemIdentity { - agentInfo.SystemIdentity = systemIdentity - context.AgentConfig().SetStateServingInfo(agentInfo) - } - return nil -} - -func readOrMakeSystemIdentity(context Context) (privateKey, publicKey string, err error) { - identityFile := context.AgentConfig().SystemIdentityPath() - // Don't generate a key unless we have to. - keyExists, err := systemKeyExists(identityFile) - if err != nil { - return "", "", errors.Annotate(err, "failed to check system key exists") - } - if keyExists { - logger.Infof("key exists, reading contents") - - // Read the contents. - contents, err := ioutil.ReadFile(identityFile) - if err != nil { - return "", "", errors.Trace(err) - } - // If we are just reading the private key, - return string(contents), "", nil - } - - logger.Infof("generating new key") - privateKey, publicKey, err = ssh.GenerateKey(config.JujuSystemKey) - if err != nil { - return "", "", errors.Annotate(err, "failed to create system key") - } - return privateKey, publicKey, nil -} - -func writeSystemIdentity(context Context, privateKey string) error { - identityFile := context.AgentConfig().SystemIdentityPath() - logger.Infof("writing system identity to %q", identityFile) - if err := ioutil.WriteFile(identityFile, []byte(privateKey), 0600); err != nil { - return errors.Annotate(err, "failed to write identity file") - } - return nil -} - -func systemKeyExists(identityFile string) (bool, error) { - _, err := os.Stat(identityFile) - if err == nil { - logger.Infof("identity file %q exists", identityFile) - return true, nil - } - if !os.IsNotExist(err) { - logger.Infof("error looking for identity file %q: %v", identityFile, err) - return false, err - } - logger.Infof("identity file %q does not exist", identityFile) - return false, nil -} === removed file 'src/github.com/juju/juju/upgrades/systemsshkey_test.go' --- src/github.com/juju/juju/upgrades/systemsshkey_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/upgrades/systemsshkey_test.go 1970-01-01 00:00:00 +0000 @@ -1,241 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - "io/ioutil" - "path/filepath" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/environs/config" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" - "github.com/juju/juju/testing" - "github.com/juju/juju/upgrades" - "github.com/juju/juju/utils/ssh" -) - -type systemSSHKeySuiteBase struct { - jujutesting.JujuConnSuite - ctx upgrades.Context -} - -func (s *systemSSHKeySuiteBase) keyFile() string { - return filepath.Join(s.DataDir(), "system-identity") -} - -func (s *systemSSHKeySuiteBase) assertKeyCreation(c *gc.C) string { - c.Assert(s.keyFile(), jc.IsNonEmptyFile) - - // Check the private key from the system identify file. - contents, err := ioutil.ReadFile(s.keyFile()) - c.Assert(err, jc.ErrorIsNil) - privateKey := string(contents) - c.Check(privateKey, jc.HasPrefix, "-----BEGIN RSA PRIVATE KEY-----\n") - c.Check(privateKey, jc.HasSuffix, "-----END RSA PRIVATE KEY-----\n") - return privateKey -} - -func (s *systemSSHKeySuiteBase) assertHasPublicKeyInAuth(c *gc.C, privateKey string) { - publicKey, err := ssh.PublicKey([]byte(privateKey), config.JujuSystemKey) - c.Assert(err, jc.ErrorIsNil) - // Check the public key from the auth keys config. - cfg, err := s.JujuConnSuite.State.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - authKeys := ssh.SplitAuthorisedKeys(cfg.AuthorizedKeys()) - // The dummy env is created with 1 fake key. We check that another has been added. - c.Assert(authKeys, gc.HasLen, 2) - c.Check(authKeys[1]+"\n", gc.Equals, publicKey) -} - -type systemSSHKeySuite struct { - systemSSHKeySuiteBase -} - -var _ = gc.Suite(&systemSSHKeySuite{}) - -func (s *systemSSHKeySuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - apiState, _ := s.OpenAPIAsNewMachine(c, state.JobManageEnviron) - s.ctx = &mockContext{ - agentConfig: &mockAgentConfig{dataDir: s.DataDir()}, - apiState: apiState, - } - - c.Assert(s.keyFile(), jc.DoesNotExist) - // Bootstrap adds juju-system-key; remove it. - err := s.State.UpdateEnvironConfig(map[string]interface{}{ - "authorized-keys": testing.FakeAuthKeys, - }, nil, nil) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *systemSSHKeySuite) TestSystemKeyCreated(c *gc.C) { - err := upgrades.EnsureSystemSSHKey(s.ctx) - c.Assert(err, jc.ErrorIsNil) - pk := s.assertKeyCreation(c) - s.assertHasPublicKeyInAuth(c, pk) -} - -func (s *systemSSHKeySuite) TestIdempotent(c *gc.C) { - err := upgrades.EnsureSystemSSHKey(s.ctx) - c.Assert(err, jc.ErrorIsNil) - - privateKey, err := ioutil.ReadFile(s.keyFile()) - c.Assert(err, jc.ErrorIsNil) - - err = upgrades.EnsureSystemSSHKey(s.ctx) - c.Assert(err, jc.ErrorIsNil) - - // Ensure we haven't generated the key again a second time. - privateKey2, err := ioutil.ReadFile(s.keyFile()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(privateKey, gc.DeepEquals, privateKey2) -} - -type systemSSHKeyReduxSuite struct { - systemSSHKeySuiteBase -} - -var _ = gc.Suite(&systemSSHKeyReduxSuite{}) - -func (s *systemSSHKeyReduxSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - // no api state. - s.ctx = &mockContext{ - agentConfig: &mockAgentConfig{dataDir: s.DataDir()}, - state: s.State, - } - c.Assert(s.keyFile(), jc.DoesNotExist) - // Bootstrap adds juju-system-key; remove it. - err := s.State.UpdateEnvironConfig(map[string]interface{}{ - "authorized-keys": testing.FakeAuthKeys, - }, nil, nil) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *systemSSHKeyReduxSuite) TestReduxSystemKeyCreated(c *gc.C) { - err := upgrades.EnsureSystemSSHKeyRedux(s.ctx) - c.Assert(err, jc.ErrorIsNil) - s.assertKeyCreation(c) - - // Config authorized keys should be unaltered. - cfg, err := s.JujuConnSuite.State.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - c.Assert(cfg.AuthorizedKeys(), gc.Equals, testing.FakeAuthKeys) -} - -func (s *systemSSHKeyReduxSuite) TestReduxUpdatesAgentConfig(c *gc.C) { - err := upgrades.EnsureSystemSSHKeyRedux(s.ctx) - c.Assert(err, jc.ErrorIsNil) - - info, _ := s.ctx.AgentConfig().StateServingInfo() - c.Assert(info.SystemIdentity, gc.Not(gc.Equals), "") -} - -func (s *systemSSHKeyReduxSuite) TestReduxIdempotent(c *gc.C) { - err := upgrades.EnsureSystemSSHKeyRedux(s.ctx) - c.Assert(err, jc.ErrorIsNil) - - privateKey, err := ioutil.ReadFile(s.keyFile()) - c.Assert(err, jc.ErrorIsNil) - - err = upgrades.EnsureSystemSSHKeyRedux(s.ctx) - c.Assert(err, jc.ErrorIsNil) - - // Ensure we haven't generated the key again a second time. - privateKey2, err := ioutil.ReadFile(s.keyFile()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(privateKey, gc.DeepEquals, privateKey2) -} - -func (s *systemSSHKeyReduxSuite) TestReduxExistsInStateServingInfo(c *gc.C) { - err := state.SetSystemIdentity(s.State, "ssh-private-key") - c.Assert(err, jc.ErrorIsNil) - - err = upgrades.EnsureSystemSSHKeyRedux(s.ctx) - c.Assert(err, jc.ErrorIsNil) - - info, err := s.State.StateServingInfo() - c.Assert(err, jc.ErrorIsNil) - c.Assert(info.SystemIdentity, gc.Equals, "ssh-private-key") -} - -func (s *systemSSHKeyReduxSuite) TestReduxExistsOnDisk(c *gc.C) { - err := ioutil.WriteFile(s.keyFile(), []byte("ssh-private-key"), 0600) - c.Assert(err, jc.ErrorIsNil) - - err = upgrades.EnsureSystemSSHKeyRedux(s.ctx) - c.Assert(err, jc.ErrorIsNil) - - info, err := s.State.StateServingInfo() - c.Assert(err, jc.ErrorIsNil) - c.Assert(info.SystemIdentity, gc.Equals, "ssh-private-key") -} - -type updateAuthKeysSuite struct { - systemSSHKeySuiteBase - systemIdentity string -} - -var _ = gc.Suite(&updateAuthKeysSuite{}) - -func (s *updateAuthKeysSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - - mockAgent := &mockAgentConfig{dataDir: s.DataDir()} - // The ensure system ssh redux has already run. - err := upgrades.EnsureSystemSSHKeyRedux(&mockContext{ - agentConfig: mockAgent, - state: s.State, - }) - c.Assert(err, jc.ErrorIsNil) - s.systemIdentity = s.assertKeyCreation(c) - - apiState, _ := s.OpenAPIAsNewMachine(c, state.JobManageEnviron) - s.ctx = &mockContext{ - agentConfig: mockAgent, - apiState: apiState, - } - // Bootstrap adds juju-system-key; remove it. - err = s.State.UpdateEnvironConfig(map[string]interface{}{ - "authorized-keys": testing.FakeAuthKeys, - }, nil, nil) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *updateAuthKeysSuite) TestUpgradeStep(c *gc.C) { - err := upgrades.UpdateAuthorizedKeysForSystemIdentity(s.ctx) - c.Assert(err, jc.ErrorIsNil) - - s.assertHasPublicKeyInAuth(c, s.systemIdentity) -} - -func (s *updateAuthKeysSuite) TestIdempotent(c *gc.C) { - err := upgrades.UpdateAuthorizedKeysForSystemIdentity(s.ctx) - c.Assert(err, jc.ErrorIsNil) - - err = upgrades.UpdateAuthorizedKeysForSystemIdentity(s.ctx) - c.Assert(err, jc.ErrorIsNil) - - s.assertHasPublicKeyInAuth(c, s.systemIdentity) -} - -func (s *updateAuthKeysSuite) TestReplacesWrongKey(c *gc.C) { - // Put a wrong key in there. - _, publicKey, err := ssh.GenerateKey(config.JujuSystemKey) - c.Assert(err, jc.ErrorIsNil) - keys := testing.FakeAuthKeys + "\n" + publicKey - err = s.State.UpdateEnvironConfig(map[string]interface{}{ - "authorized-keys": keys, - }, nil, nil) - c.Assert(err, jc.ErrorIsNil) - - err = upgrades.UpdateAuthorizedKeysForSystemIdentity(s.ctx) - c.Assert(err, jc.ErrorIsNil) - - s.assertHasPublicKeyInAuth(c, s.systemIdentity) -} === removed file 'src/github.com/juju/juju/upgrades/tags.go' --- src/github.com/juju/juju/upgrades/tags.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/upgrades/tags.go 1970-01-01 00:00:00 +0000 @@ -1,58 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "github.com/juju/errors" - "github.com/juju/names" - - "github.com/juju/juju/cloudconfig/instancecfg" - "github.com/juju/juju/environs" - "github.com/juju/juju/state" - "github.com/juju/juju/state/multiwatcher" -) - -func addInstanceTags(env environs.Environ, machines []*state.Machine) error { - cfg := env.Config() - tagger, ok := env.(environs.InstanceTagger) - if !ok { - logger.Debugf("environment type %q does not support instance tagging", cfg.Type()) - return nil - } - - // Tag each top-level, provisioned machine. - logger.Infof("adding tags to existing machine instances") - for _, m := range machines { - if names.IsContainerMachine(m.Id()) { - continue - } - isManual, err := m.IsManual() - if err != nil { - return errors.Annotatef(err, "determining if machine %v is manually provisioned", m.Id()) - } - if isManual { - continue - } - instId, err := m.InstanceId() - if errors.IsNotProvisioned(err) { - continue - } else if err != nil { - return errors.Annotatef(err, "getting instance ID for machine %v", m.Id()) - } - - stateMachineJobs := m.Jobs() - paramsMachineJobs := make([]multiwatcher.MachineJob, len(stateMachineJobs)) - for i, job := range stateMachineJobs { - paramsMachineJobs[i] = job.ToParams() - } - - tags := instancecfg.InstanceTags(cfg, paramsMachineJobs) - logger.Infof("tagging instance %v: %v", instId, tags) - if err := tagger.TagInstance(instId, tags); err != nil { - return errors.Annotatef(err, "tagging instance %v for machine %v", instId, m.Id()) - } - } - - return nil -} === removed file 'src/github.com/juju/juju/upgrades/tags_test.go' --- src/github.com/juju/juju/upgrades/tags_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/upgrades/tags_test.go 1970-01-01 00:00:00 +0000 @@ -1,144 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/environs" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/instance" - "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" - "github.com/juju/juju/testing" - "github.com/juju/juju/upgrades" -) - -type tagsSuite struct { - statetesting.StateSuite - stateServer, unprovisioned, provisioned, container *state.Machine -} - -var _ = gc.Suite(&tagsSuite{}) - -func (s *tagsSuite) SetUpTest(c *gc.C) { - s.StateSuite.SetUpTest(c) - - var err error - s.stateServer, err = s.State.AddMachine("quantal", state.JobManageEnviron) - c.Assert(err, jc.ErrorIsNil) - err = s.stateServer.SetProvisioned("inst-0", "nonce-0", nil) - c.Assert(err, jc.ErrorIsNil) - - s.unprovisioned, err = s.State.AddMachine("quantal", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) - - s.provisioned, err = s.State.AddMachine("quantal", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) - err = s.provisioned.SetProvisioned("inst-1", "nonce-1", nil) - c.Assert(err, jc.ErrorIsNil) - - s.container, err = s.State.AddMachineInsideMachine(state.MachineTemplate{ - Series: "quantal", - Jobs: []state.MachineJob{state.JobHostUnits}, - }, s.provisioned.Id(), instance.LXC) - c.Assert(err, jc.ErrorIsNil) - err = s.container.SetProvisioned("inst-2", "nonce-2", nil) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *tagsSuite) TestAddInstanceTagsSupportsTagging(c *gc.C) { - env := &testEnvironWithTagging{ - testEnviron: testEnviron{ - cfg: testing.CustomEnvironConfig(c, testing.Attrs{ - "resource-tags": "abc=123", - }), - }, - } - err := upgrades.AddInstanceTags(env, []*state.Machine{ - s.stateServer, s.unprovisioned, s.provisioned, s.container, - }) - c.Assert(err, jc.ErrorIsNil) - c.Assert(env.calls, jc.DeepEquals, []tagInstanceArgs{{ - "inst-0", map[string]string{ - "juju-is-state": "true", - "juju-env-uuid": testing.EnvironmentTag.Id(), - "abc": "123", - }, - }, { - "inst-1", map[string]string{ - "juju-env-uuid": testing.EnvironmentTag.Id(), - "abc": "123", - }, - }}) -} - -func (s *tagsSuite) TestAddInstanceTagsIgnoresManuallyProvisionedMachines(c *gc.C) { - env := &testEnvironWithTagging{ - testEnviron: testEnviron{ - cfg: testing.CustomEnvironConfig(c, testing.Attrs{ - "resource-tags": "abc=123", - }), - }, - } - - manuallyProvisioned, err := s.State.AddMachine("quantal", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) - err = manuallyProvisioned.SetProvisioned("inst-10", "manual:", nil) - c.Assert(err, jc.ErrorIsNil) - - err = upgrades.AddInstanceTags(env, []*state.Machine{ - s.stateServer, - manuallyProvisioned, - s.provisioned}) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(env.calls, jc.DeepEquals, []tagInstanceArgs{{ - // for s.stateServer - "inst-0", map[string]string{ - "juju-is-state": "true", - "juju-env-uuid": testing.EnvironmentTag.Id(), - "abc": "123", - }, - }, // for s.provisioned - { - "inst-1", map[string]string{ - "juju-env-uuid": testing.EnvironmentTag.Id(), - "abc": "123", - }, - }}) -} - -func (s *tagsSuite) TestAddInstanceTagsDoesNotSupportTagging(c *gc.C) { - env := &testEnviron{cfg: testing.CustomEnvironConfig(c, nil)} - err := upgrades.AddInstanceTags(env, []*state.Machine{ - s.stateServer, s.unprovisioned, s.provisioned, s.container, - }) - c.Assert(err, jc.ErrorIsNil) -} - -type testEnviron struct { - environs.Environ - cfg *config.Config -} - -func (e *testEnviron) Config() *config.Config { - return e.cfg -} - -type tagInstanceArgs struct { - id instance.Id - tags map[string]string -} - -type testEnvironWithTagging struct { - testEnviron - calls []tagInstanceArgs -} - -func (e *testEnvironWithTagging) TagInstance(id instance.Id, tags map[string]string) error { - e.calls = append(e.calls, tagInstanceArgs{id, tags}) - return nil -} === removed file 'src/github.com/juju/juju/upgrades/toolstorage.go' --- src/github.com/juju/juju/upgrades/toolstorage.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/upgrades/toolstorage.go 1970-01-01 00:00:00 +0000 @@ -1,163 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades - -import ( - "bytes" - "io" - - "github.com/juju/errors" - "github.com/juju/utils" - - "github.com/juju/juju/agent" - "github.com/juju/juju/environs" - "github.com/juju/juju/environs/filestorage" - "github.com/juju/juju/environs/simplestreams" - "github.com/juju/juju/environs/storage" - envtools "github.com/juju/juju/environs/tools" - "github.com/juju/juju/provider" - "github.com/juju/juju/state" - "github.com/juju/juju/state/toolstorage" - "github.com/juju/juju/tools" -) - -var stateToolsStorage = (*state.State).ToolsStorage - -// migrateToolsStorage copies tools from provider storage to -// environment storage. -func migrateToolsStorage(st *state.State, agentConfig agent.Config) error { - logger.Debugf("migrating tools to environment storage") - - tstor, err := stateToolsStorage(st) - if err != nil { - return errors.Annotate(err, "cannot get tools storage") - } - defer tstor.Close() - - // Local and manual provider host storage on the state server's - // filesystem, and serve via HTTP storage. The storage worker - // doesn't run yet, so we just open the files directly. - var stor storage.StorageReader - providerType := agentConfig.Value(agent.ProviderType) - if providerType == provider.Local || provider.IsManual(providerType) { - storageDir := agentConfig.Value(agent.StorageDir) - var err error - stor, err = filestorage.NewFileStorageReader(storageDir) - if err != nil { - return errors.Annotate(err, "cannot get local filesystem storage reader") - } - } else { - var err error - stor, err = environs.LegacyStorage(st) - if errors.IsNotSupported(err) { - return nil - } else if err != nil { - return errors.Annotate(err, "cannot get provider storage") - } - } - - // Search provider storage for tools. - datasource := storage.NewStorageSimpleStreamsDataSource("provider storage", stor, storage.BaseToolsPath) - toolsList, err := envtools.FindToolsForCloud( - []simplestreams.DataSource{datasource}, - simplestreams.CloudSpec{}, - envtools.ReleasedStream, - -1, -1, tools.Filter{}) - switch err { - case nil: - break - case tools.ErrNoMatches, envtools.ErrNoTools: - // No tools in provider storage: nothing to do. - return nil - default: - return errors.Annotate(err, "cannot find tools in provider storage") - } - - for _, agentTools := range toolsList { - // Sanity-check tools metadata. - if err := validateAgentTools(agentTools); err != nil { - logger.Debugf("ignoring invalid agent tools %v: %v", agentTools.Version, err) - continue - } - logger.Infof("migrating %v tools to environment storage", agentTools.Version) - data, err := fetchToolsArchive(stor, envtools.LegacyReleaseDirectory, agentTools) - if errors.IsNotFound(err) { - logger.Debugf("ignoring missing agent tools %v: %v", agentTools.Version, err) - continue - } else if isErrInvalidMetadata(err) { - logger.Debugf("ignoring invalid agent tools %v: %v", agentTools.Version, err) - continue - } else if err != nil { - // Failed to fetch tools. Ignore them, in which case - // Juju will fetch them externally again, or the user - // must upload/sync them again. - logger.Debugf("ignoring agent tools %v: failed to fetch tools: %v", agentTools.Version, err) - continue - } - err = tstor.AddTools(bytes.NewReader(data), toolstorage.Metadata{ - Version: agentTools.Version, - Size: agentTools.Size, - SHA256: agentTools.SHA256, - }) - if err != nil { - return errors.Annotatef(err, "failed to add %v tools to environment storage", agentTools.Version) - } - } - return nil -} - -func validateAgentTools(agentTools *tools.Tools) error { - // Neither of these should be possible because simplestreams - // barfs if release/arch are not set. We'll be pedantic here - // in case of changes. - v := agentTools.Version - if v.Series == "" { - return errors.New("series not set") - } - if v.Arch == "" { - return errors.New("arch not set") - } - return nil -} - -type errInvalidMetadata struct { - error -} - -func isErrInvalidMetadata(err error) bool { - _, ok := err.(errInvalidMetadata) - return ok -} - -// cleanToolsStorage removes invalid tools from environment storage. -func cleanToolsStorage(st *state.State) error { - logger.Debugf("removing invalid tools from environment storage") - tstor, err := stateToolsStorage(st) - if err != nil { - return errors.Annotate(err, "cannot get tools storage") - } - defer tstor.Close() - return tstor.RemoveInvalid() -} - -func fetchToolsArchive(stor storage.StorageReader, toolsDir string, agentTools *tools.Tools) ([]byte, error) { - r, err := stor.Get(envtools.StorageName(agentTools.Version, toolsDir)) - if err != nil { - return nil, err - } - defer r.Close() - - var buf bytes.Buffer - hash, size, err := utils.ReadSHA256(io.TeeReader(r, &buf)) - if err != nil { - return nil, err - } - if hash != agentTools.SHA256 { - return nil, errInvalidMetadata{errors.New("hash mismatch")} - } - if size != agentTools.Size { - return nil, errInvalidMetadata{errors.New("size mismatch")} - } - return buf.Bytes(), nil -} === removed file 'src/github.com/juju/juju/upgrades/toolstorage_test.go' --- src/github.com/juju/juju/upgrades/toolstorage_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/upgrades/toolstorage_test.go 1970-01-01 00:00:00 +0000 @@ -1,197 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package upgrades_test - -import ( - "errors" - "io" - "strings" - - gitjujutesting "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/agent" - "github.com/juju/juju/environs" - "github.com/juju/juju/environs/filestorage" - envtesting "github.com/juju/juju/environs/testing" - envtools "github.com/juju/juju/environs/tools" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/provider/dummy" - "github.com/juju/juju/state" - "github.com/juju/juju/state/toolstorage" - "github.com/juju/juju/testing" - coretools "github.com/juju/juju/tools" - "github.com/juju/juju/upgrades" - "github.com/juju/juju/version" -) - -type migrateToolsStorageSuite struct { - jujutesting.JujuConnSuite -} - -var _ = gc.Suite(&migrateToolsStorageSuite{}) - -func (s *migrateToolsStorageSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) -} - -var migrateToolsVersions = []version.Binary{ - version.MustParseBinary("1.2.3-precise-amd64"), - version.MustParseBinary("2.3.4-trusty-ppc64el"), -} - -func (s *migrateToolsStorageSuite) TestMigrateToolsStorageNoTools(c *gc.C) { - fakeToolsStorage := &fakeToolsStorage{ - stored: make(map[version.Binary]toolstorage.Metadata), - } - s.PatchValue(upgrades.StateToolsStorage, func(*state.State) (toolstorage.StorageCloser, error) { - return fakeToolsStorage, nil - }) - - stor := s.Environ.(environs.EnvironStorage).Storage() - envtesting.RemoveFakeTools(c, stor, "releases") - envtesting.RemoveFakeToolsMetadata(c, stor) - err := upgrades.MigrateToolsStorage(s.State, &mockAgentConfig{}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(fakeToolsStorage.stored, gc.HasLen, 0) -} - -func (s *migrateToolsStorageSuite) TestMigrateToolsStorage(c *gc.C) { - stor := s.Environ.(environs.EnvironStorage).Storage() - envtesting.RemoveFakeTools(c, stor, "releases") - tools := envtesting.AssertUploadFakeToolsVersions(c, stor, "releases", "released", migrateToolsVersions...) - s.testMigrateToolsStorage(c, &mockAgentConfig{}, tools) -} - -func (s *migrateToolsStorageSuite) TestMigrateToolsStorageLocalstorage(c *gc.C) { - storageDir := c.MkDir() - stor, err := filestorage.NewFileStorageWriter(storageDir) - c.Assert(err, jc.ErrorIsNil) - tools := envtesting.AssertUploadFakeToolsVersions(c, stor, "releases", "released", migrateToolsVersions...) - for _, providerType := range []string{"local", "manual"} { - config := &mockAgentConfig{ - values: map[string]string{ - agent.ProviderType: providerType, - agent.StorageDir: storageDir, - }, - } - s.testMigrateToolsStorage(c, config, tools) - } -} - -func (s *migrateToolsStorageSuite) TestMigrateToolsStorageBadSHA256(c *gc.C) { - fakeToolsStorage := s.installFakeToolsStorage() - stor := s.Environ.(environs.EnvironStorage).Storage() - envtesting.AssertUploadFakeToolsVersions(c, stor, "releases", "released", migrateToolsVersions...) - // Overwrite one of the tools archives with junk, so the hash does not match. - err := stor.Put( - envtools.StorageName(migrateToolsVersions[0], "releases"), - strings.NewReader("junk"), - 4, - ) - c.Assert(err, jc.ErrorIsNil) - err = upgrades.MigrateToolsStorage(s.State, &mockAgentConfig{}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(migrateToolsVersions[1], jc.Satisfies, fakeToolsStorage.contains) - c.Assert(fakeToolsStorage.stored, gc.HasLen, 1) -} - -func (s *migrateToolsStorageSuite) TestMigrateToolsStorageMissing(c *gc.C) { - fakeToolsStorage := s.installFakeToolsStorage() - stor := s.Environ.(environs.EnvironStorage).Storage() - envtesting.AssertUploadFakeToolsVersions(c, stor, "releases", "released", migrateToolsVersions...) - // Remove one of the tools archives (but not the metadata). - err := stor.Remove(envtools.StorageName(migrateToolsVersions[0], "releases")) - c.Assert(err, jc.ErrorIsNil) - err = upgrades.MigrateToolsStorage(s.State, &mockAgentConfig{}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(migrateToolsVersions[1], jc.Satisfies, fakeToolsStorage.contains) - c.Assert(fakeToolsStorage.stored, gc.HasLen, 1) -} - -func (s *migrateToolsStorageSuite) TestMigrateToolsStorageReadFails(c *gc.C) { - fakeToolsStorage := s.installFakeToolsStorage() - stor := s.Environ.(environs.EnvironStorage).Storage() - envtesting.AssertUploadFakeToolsVersions(c, stor, "releases", "released", migrateToolsVersions...) - - storageErr := errors.New("no tools for you") - dummy.Poison(stor, envtools.StorageName(migrateToolsVersions[0], "releases"), storageErr) - - err := upgrades.MigrateToolsStorage(s.State, &mockAgentConfig{}) - c.Assert(err, jc.ErrorIsNil) - c.Assert(migrateToolsVersions[1], jc.Satisfies, fakeToolsStorage.contains) - c.Assert(fakeToolsStorage.stored, gc.HasLen, 1) -} - -func (s *migrateToolsStorageSuite) testMigrateToolsStorage(c *gc.C, agentConfig agent.Config, tools []*coretools.Tools) { - fakeToolsStorage := s.installFakeToolsStorage() - err := upgrades.MigrateToolsStorage(s.State, agentConfig) - c.Assert(err, jc.ErrorIsNil) - c.Assert(fakeToolsStorage.stored, gc.DeepEquals, map[version.Binary]toolstorage.Metadata{ - tools[0].Version: toolstorage.Metadata{ - Version: tools[0].Version, - Size: tools[0].Size, - SHA256: tools[0].SHA256, - }, - tools[1].Version: toolstorage.Metadata{ - Version: tools[1].Version, - Size: tools[1].Size, - SHA256: tools[1].SHA256, - }, - }) -} - -func (s *migrateToolsStorageSuite) installFakeToolsStorage() *fakeToolsStorage { - fakeToolsStorage := &fakeToolsStorage{ - stored: make(map[version.Binary]toolstorage.Metadata), - } - s.PatchValue(upgrades.StateToolsStorage, func(*state.State) (toolstorage.StorageCloser, error) { - return fakeToolsStorage, nil - }) - return fakeToolsStorage -} - -type cleanToolsStorageSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&cleanToolsStorageSuite{}) - -func (s *cleanToolsStorageSuite) TestCleanToolsStorage(c *gc.C) { - fakeToolsStorage := &fakeToolsStorage{} - s.PatchValue(upgrades.StateToolsStorage, func(*state.State) (toolstorage.StorageCloser, error) { - return fakeToolsStorage, nil - }) - fakeToolsStorage.SetErrors(errors.New("woop")) - err := upgrades.CleanToolsStorage(nil) - c.Assert(err, gc.ErrorMatches, "woop") -} - -type fakeToolsStorage struct { - gitjujutesting.Stub - toolstorage.Storage - stored map[version.Binary]toolstorage.Metadata -} - -func (s *fakeToolsStorage) Close() error { - s.MethodCall(s, "Close") - return s.NextErr() -} - -func (s *fakeToolsStorage) AddTools(r io.Reader, meta toolstorage.Metadata) error { - s.MethodCall(s, "AddTools", r, meta) - s.stored[meta.Version] = meta - return s.NextErr() -} - -func (s *fakeToolsStorage) RemoveInvalid() error { - s.MethodCall(s, "RemoveInvalid") - return s.NextErr() -} - -func (s *fakeToolsStorage) contains(v version.Binary) bool { - _, ok := s.stored[v] - return ok -} === modified file 'src/github.com/juju/juju/upgrades/upgrade.go' --- src/github.com/juju/juju/upgrades/upgrade.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/upgrades/upgrade.go 2016-03-22 15:18:22 +0000 @@ -49,10 +49,10 @@ // HostMachine is a machine on which units are deployed. HostMachine = Target("hostMachine") - // StateServer is a machine participating in a Juju state server cluster. - StateServer = Target("stateServer") + // Controller is a machine participating in a Juju controller cluster. + Controller = Target("controller") - // DatabaseMaster is a StateServer that has the master database, and as such + // DatabaseMaster is a Controller that has the master database, and as such // is the only target that should run database schema upgrade steps. DatabaseMaster = Target("databaseMaster") ) @@ -112,7 +112,7 @@ func hasStateTarget(targets []Target) bool { for _, target := range targets { - if target == StateServer || target == DatabaseMaster { + if target == Controller || target == DatabaseMaster { return true } } === modified file 'src/github.com/juju/juju/upgrades/upgrade_test.go' --- src/github.com/juju/juju/upgrades/upgrade_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/upgrades/upgrade_test.go 2016-03-22 15:18:22 +0000 @@ -160,7 +160,7 @@ values map[string]string mongoInfo *mongo.MongoInfo servingInfo params.StateServingInfo - environTag names.EnvironTag + modelTag names.ModelTag } func (mock *mockAgentConfig) Tag() names.Tag { @@ -203,8 +203,8 @@ mock.servingInfo = info } -func (mock *mockAgentConfig) Environment() names.EnvironTag { - return mock.environTag +func (mock *mockAgentConfig) Model() names.ModelTag { + return mock.modelTag } func stateUpgradeOperations() []upgrades.Operation { @@ -212,23 +212,23 @@ &mockUpgradeOperation{ targetVersion: version.MustParse("1.11.0"), steps: []upgrades.Step{ - newUpgradeStep("state step 1 - 1.11.0", upgrades.StateServer), - newUpgradeStep("state step 2 error", upgrades.StateServer), - newUpgradeStep("state step 3 - 1.11.0", upgrades.StateServer), + newUpgradeStep("state step 1 - 1.11.0", upgrades.Controller), + newUpgradeStep("state step 2 error", upgrades.Controller), + newUpgradeStep("state step 3 - 1.11.0", upgrades.Controller), }, }, &mockUpgradeOperation{ targetVersion: version.MustParse("1.21.0"), steps: []upgrades.Step{ newUpgradeStep("state step 1 - 1.21.0", upgrades.DatabaseMaster), - newUpgradeStep("state step 2 - 1.21.0", upgrades.StateServer), + newUpgradeStep("state step 2 - 1.21.0", upgrades.Controller), }, }, &mockUpgradeOperation{ targetVersion: version.MustParse("1.22.0"), steps: []upgrades.Step{ newUpgradeStep("state step 1 - 1.22.0", upgrades.DatabaseMaster), - newUpgradeStep("state step 2 - 1.22.0", upgrades.StateServer), + newUpgradeStep("state step 2 - 1.22.0", upgrades.Controller), }, }, } @@ -250,7 +250,7 @@ steps: []upgrades.Step{ newUpgradeStep("step 1 - 1.16.0", upgrades.HostMachine), newUpgradeStep("step 2 - 1.16.0", upgrades.HostMachine), - newUpgradeStep("step 3 - 1.16.0", upgrades.StateServer), + newUpgradeStep("step 3 - 1.16.0", upgrades.Controller), }, }, &mockUpgradeOperation{ @@ -263,14 +263,14 @@ targetVersion: version.MustParse("1.17.1"), steps: []upgrades.Step{ newUpgradeStep("step 1 - 1.17.1", upgrades.HostMachine), - newUpgradeStep("step 2 - 1.17.1", upgrades.StateServer), + newUpgradeStep("step 2 - 1.17.1", upgrades.Controller), }, }, &mockUpgradeOperation{ targetVersion: version.MustParse("1.18.0"), steps: []upgrades.Step{ newUpgradeStep("step 1 - 1.18.0", upgrades.HostMachine), - newUpgradeStep("step 2 - 1.18.0", upgrades.StateServer), + newUpgradeStep("step 2 - 1.18.0", upgrades.Controller), }, }, &mockUpgradeOperation{ @@ -278,7 +278,7 @@ steps: []upgrades.Step{ newUpgradeStep("step 1 - 1.20.0", upgrades.AllMachines), newUpgradeStep("step 2 - 1.20.0", upgrades.HostMachine), - newUpgradeStep("step 3 - 1.20.0", upgrades.StateServer), + newUpgradeStep("step 3 - 1.20.0", upgrades.Controller), }, }, &mockUpgradeOperation{ @@ -291,7 +291,7 @@ targetVersion: version.MustParse("1.22.0"), steps: []upgrades.Step{ // Separate targets used intentionally - newUpgradeStep("step 1 - 1.22.0", upgrades.StateServer, upgrades.HostMachine), + newUpgradeStep("step 1 - 1.22.0", upgrades.Controller, upgrades.HostMachine), newUpgradeStep("step 2 - 1.22.0", upgrades.AllMachines), }, }, @@ -362,9 +362,7 @@ if test.toVersion != "" { toVersion = version.MustParse(test.toVersion) } - vers := version.Current - vers.Number = toVersion - s.PatchValue(&version.Current, vers) + s.PatchValue(&version.Current, toVersion) result := upgrades.AreUpgradesDefined(fromVersion) c.Check(result, gc.Equals, test.expected) } @@ -405,7 +403,7 @@ { about: "incompatible targets excluded", fromVersion: "1.17.1", - targets: targets(upgrades.StateServer), + targets: targets(upgrades.Controller), expectedSteps: []string{"step 2 - 1.18.0"}, }, { @@ -419,13 +417,13 @@ about: "allMachines matches everything", fromVersion: "1.18.1", toVersion: "1.20.0", - targets: targets(upgrades.StateServer), + targets: targets(upgrades.Controller), expectedSteps: []string{"step 1 - 1.20.0", "step 3 - 1.20.0"}, }, { about: "state step error aborts, subsequent state steps not run", fromVersion: "1.10.0", - targets: targets(upgrades.StateServer), + targets: targets(upgrades.Controller), expectedSteps: []string{"state step 1 - 1.11.0"}, err: "state step 2 error: upgrade error occurred", }, @@ -439,14 +437,14 @@ { about: "default from version is 1.16", fromVersion: "", - targets: targets(upgrades.StateServer), + targets: targets(upgrades.Controller), expectedSteps: []string{"step 2 - 1.17.1", "step 2 - 1.18.0"}, }, { - about: "state servers don't get database master", + about: "controllers don't get database master", fromVersion: "1.20.0", toVersion: "1.21.0", - targets: targets(upgrades.StateServer), + targets: targets(upgrades.Controller), expectedSteps: []string{"state step 2 - 1.21.0", "step 1 - 1.21.0"}, }, { @@ -460,7 +458,7 @@ about: "all state steps are run first", fromVersion: "1.20.0", toVersion: "1.22.0", - targets: targets(upgrades.DatabaseMaster, upgrades.StateServer), + targets: targets(upgrades.DatabaseMaster, upgrades.Controller), expectedSteps: []string{ "state step 1 - 1.21.0", "state step 2 - 1.21.0", "state step 1 - 1.22.0", "state step 2 - 1.22.0", @@ -472,7 +470,7 @@ about: "machine with multiple targets - each step only run once", fromVersion: "1.20.0", toVersion: "1.21.0", - targets: targets(upgrades.HostMachine, upgrades.StateServer), + targets: targets(upgrades.HostMachine, upgrades.Controller), expectedSteps: []string{"state step 2 - 1.21.0", "step 1 - 1.21.0"}, }, { @@ -486,7 +484,7 @@ about: "machine and step with multiple targets - each step only run once", fromVersion: "1.21.0", toVersion: "1.22.0", - targets: targets(upgrades.HostMachine, upgrades.StateServer), + targets: targets(upgrades.HostMachine, upgrades.Controller), expectedSteps: []string{"state step 2 - 1.22.0", "step 1 - 1.22.0", "step 2 - 1.22.0"}, }, { @@ -550,9 +548,7 @@ if test.toVersion != "" { toVersion = version.MustParse(test.toVersion) } - vers := version.Current - vers.Number = toVersion - s.PatchValue(&version.Current, vers) + s.PatchValue(&version.Current, toVersion) err := upgrades.PerformUpgrade(fromVersion, test.targets, ctx) if test.err == "" { c.Check(err, jc.ErrorIsNil) @@ -572,7 +568,7 @@ } func (s *contextStep) Targets() []upgrades.Target { - return []upgrades.Target{upgrades.StateServer} + return []upgrades.Target{upgrades.Controller} } func (s *contextStep) Run(context upgrades.Context) error { @@ -619,9 +615,9 @@ func (s *upgradeSuite) checkContextRestriction(c *gc.C, expectedPanic string) { fromVersion := version.MustParse("1.20.0") type fakeAgentConfigSetter struct{ agent.ConfigSetter } - ctx := upgrades.NewContext(fakeAgentConfigSetter{}, new(api.State), new(state.State)) + ctx := upgrades.NewContext(fakeAgentConfigSetter{}, nil, new(state.State)) c.Assert( - func() { upgrades.PerformUpgrade(fromVersion, targets(upgrades.StateServer), ctx) }, + func() { upgrades.PerformUpgrade(fromVersion, targets(upgrades.Controller), ctx) }, gc.PanicMatches, expectedPanic, ) } @@ -652,7 +648,7 @@ c.Assert(apiCount, gc.Equals, 1) } - check(upgrades.StateServer, 1) + check(upgrades.Controller, 1) check(upgrades.DatabaseMaster, 1) check(upgrades.AllMachines, 0) check(upgrades.HostMachine, 0) @@ -672,13 +668,17 @@ func (s *upgradeSuite) TestStateUpgradeOperationsVersions(c *gc.C) { versions := extractUpgradeVersions(c, (*upgrades.StateUpgradeOperations)()) c.Assert(versions, gc.DeepEquals, []string{ - "1.18.0", "1.21.0", "1.22.0", "1.23.0", "1.24.0", "1.24.4", "1.25.0", "1.25.2", + // TODO(axw) change to 2.0 when we update version + "1.26.0", }) } func (s *upgradeSuite) TestUpgradeOperationsVersions(c *gc.C) { versions := extractUpgradeVersions(c, (*upgrades.UpgradeOperations)()) - c.Assert(versions, gc.DeepEquals, []string{"1.18.0", "1.22.0", "1.23.0", "1.24.0", "1.25.0"}) + c.Assert(versions, gc.DeepEquals, []string{ + // TODO(axw) change to 2.0 when we update version + "1.26.0", + }) } func extractUpgradeVersions(c *gc.C, ops []upgrades.Operation) []string { === removed file 'src/github.com/juju/juju/upgrades/util_unix_test.go' --- src/github.com/juju/juju/upgrades/util_unix_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/upgrades/util_unix_test.go 1970-01-01 00:00:00 +0000 @@ -1,12 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Copyright 2015 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. - -// +build !windows - -package upgrades_test - -var customImageMetadata = map[string][]byte{ - "images/abc": []byte("abc"), - "images/def/ghi": []byte("xyz"), -} === removed file 'src/github.com/juju/juju/upgrades/util_windows_test.go' --- src/github.com/juju/juju/upgrades/util_windows_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/upgrades/util_windows_test.go 1970-01-01 00:00:00 +0000 @@ -1,12 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Copyright 2015 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. - -// +build windows - -package upgrades_test - -var customImageMetadata = map[string][]byte{ - "images\\abc": []byte("abc"), - "images\\def\\ghi": []byte("xyz"), -} === removed file 'src/github.com/juju/juju/utils/exec.go' --- src/github.com/juju/juju/utils/exec.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/utils/exec.go 1970-01-01 00:00:00 +0000 @@ -1,49 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package utils - -import ( - "os" - "os/exec" - "strings" - - "github.com/juju/errors" -) - -// RunCommand execs the provided command. -func RunCommand(cmd string, args ...string) error { - command := exec.Command(cmd, args...) - out, err := command.CombinedOutput() - if err == nil { - return nil - } - if _, ok := err.(*exec.ExitError); ok && len(out) > 0 { - return errors.Errorf( - "error executing %q: %s", - cmd, - strings.Replace(string(out), "\n", "; ", -1), - ) - } - return errors.Annotatef(err, "error executing %q", cmd) -} - -// IsCmdNotFoundErr returns true if the provided error indicates that the -// command passed to exec.LookPath or exec.Command was not found. -func IsCmdNotFoundErr(err error) bool { - err = errors.Cause(err) - if os.IsNotExist(err) { - // Executable could not be found, go 1.3 and later - return true - } - if err == exec.ErrNotFound { - return true - } - if execErr, ok := err.(*exec.Error); ok { - // Executable could not be found, go 1.2 - if os.IsNotExist(execErr.Err) || execErr.Err == exec.ErrNotFound { - return true - } - } - return false -} === added directory 'src/github.com/juju/juju/utils/filelock' === added file 'src/github.com/juju/juju/utils/filelock/export_test.go' --- src/github.com/juju/juju/utils/filelock/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/utils/filelock/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,10 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package filelock + +// IsLocked is used just to see if the local lock instance is locked, and +// is only required for use in tests. +func IsLocked(lock *Lock) bool { + return lock.lockFile != nil +} === added file 'src/github.com/juju/juju/utils/filelock/flock.go' --- src/github.com/juju/juju/utils/filelock/flock.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/utils/filelock/flock.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,18 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build !windows + +package filelock + +import ( + "syscall" +) + +func flockLock(fd int) (err error) { + return syscall.Flock(fd, syscall.LOCK_EX) +} + +func flockUnlock(fd int) (err error) { + return syscall.Flock(fd, syscall.LOCK_UN) +} === added file 'src/github.com/juju/juju/utils/filelock/flock_windows.go' --- src/github.com/juju/juju/utils/filelock/flock_windows.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/utils/filelock/flock_windows.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build windows + +package filelock + +import "fmt" + +func flockLock(fd int) (err error) { + return fmt.Errorf("not implemented") +} + +func flockUnlock(fd int) (err error) { + return fmt.Errorf("not implemented") +} === added file 'src/github.com/juju/juju/utils/filelock/lock.go' --- src/github.com/juju/juju/utils/filelock/lock.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/utils/filelock/lock.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,82 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// filelock provides a machine wide file lock. +package filelock + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + + "github.com/juju/loggo" +) + +const ( + // NameRegexp specifies the regular expression used to identify valid lock names. + NameRegexp = "^[a-z]+[a-z0-9.-]*$" +) + +var ( + validName = regexp.MustCompile(NameRegexp) + logger = loggo.GetLogger("juju.utils.filelock") +) + +// Lock represents a machine wide file lock. +type Lock struct { + name string + lockDir string + lockFile *os.File +} + +// NewLock returns a new lock with the given name, using the given lock +// directory, without acquiring it. The lock name must match the regular +// expression defined by NameRegexp. +func NewLock(dir, name string) (*Lock, error) { + if !validName.MatchString(name) { + return nil, fmt.Errorf("Invalid lock name %q. Names must match %q", name, NameRegexp) + } + lockDir := filepath.Join(dir, name) + lock := &Lock{ + name: name, + lockDir: lockDir, + } + // Ensure the lockDir exists. + if err := os.MkdirAll(lockDir, 0755); err != nil { + return nil, err + } + return lock, nil +} + +// Lock blocks until it is able to acquire the lock. It is good behaviour to +// provide a message that is output in debugging information. +func (lock *Lock) Lock(message string) error { + f, err := os.Open(lock.lockDir) + if err != nil { + return err + } + fd := int(f.Fd()) + if err := flockLock(fd); err != nil { + f.Close() + return err + } + logger.Infof("acquired lock %q, %s", lock.name, message) + lock.lockFile = f + return nil +} + +// Unlock releases a held lock. +func (lock *Lock) Unlock() error { + if lock.lockFile == nil { + return nil + } + fd := int(lock.lockFile.Fd()) + err := flockUnlock(fd) + if err == nil { + logger.Infof("release lock %q", lock.name) + lock.lockFile.Close() + lock.lockFile = nil + } + return err +} === added file 'src/github.com/juju/juju/utils/filelock/lock_test.go' --- src/github.com/juju/juju/utils/filelock/lock_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/utils/filelock/lock_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,167 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build !windows + +package filelock_test + +import ( + "fmt" + "os" + "runtime" + "sync/atomic" + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/utils/filelock" +) + +type flockSuite struct{} + +var _ = gc.Suite(&flockSuite{}) + +// This test also happens to test that locks can get created when the +// lock directory doesn't exist. +func (s *flockSuite) TestValidNamesLockDir(c *gc.C) { + + for _, name := range []string{ + "a", + "longer", + "longer-with.special-characters", + } { + dir := c.MkDir() + _, err := filelock.NewLock(dir, name) + c.Assert(err, jc.ErrorIsNil) + } +} + +func (s *flockSuite) TestInvalidNames(c *gc.C) { + + for _, name := range []string{ + ".start", + "-start", + "NoCapitals", + "no+plus", + "no/slash", + "no\\backslash", + "no$dollar", + "no:colon", + } { + dir := c.MkDir() + _, err := filelock.NewLock(dir, name) + c.Assert(err, gc.ErrorMatches, "Invalid lock name .*") + } +} + +func (s *flockSuite) TestNewLockWithExistingDir(c *gc.C) { + dir := c.MkDir() + err := os.MkdirAll(dir, 0755) + c.Assert(err, jc.ErrorIsNil) + _, err = filelock.NewLock(dir, "special") + c.Assert(err, jc.ErrorIsNil) +} + +func (s *flockSuite) TestLockBlocks(c *gc.C) { + + dir := c.MkDir() + lock1, err := filelock.NewLock(dir, "testing") + c.Assert(err, jc.ErrorIsNil) + c.Assert(filelock.IsLocked(lock1), jc.IsFalse) + lock2, err := filelock.NewLock(dir, "testing") + c.Assert(err, jc.ErrorIsNil) + c.Assert(filelock.IsLocked(lock2), jc.IsFalse) + + acquired := make(chan struct{}) + err = lock1.Lock("") + c.Assert(err, jc.ErrorIsNil) + c.Assert(filelock.IsLocked(lock1), jc.IsTrue) + + go func() { + lock2.Lock("") + c.Assert(filelock.IsLocked(lock2), jc.IsTrue) + acquired <- struct{}{} + close(acquired) + }() + + // Waiting for something not to happen is inherently hard... + select { + case <-acquired: + c.Fatalf("Unexpected lock acquisition") + case <-time.After(coretesting.ShortWait): + // all good + } + + err = lock1.Unlock() + c.Assert(err, jc.ErrorIsNil) + c.Assert(filelock.IsLocked(lock1), jc.IsFalse) + + select { + case <-acquired: + // all good + case <-time.After(coretesting.LongWait): + c.Fatalf("Expected lock acquisition") + } +} + +func (s *flockSuite) TestUnlock(c *gc.C) { + dir := c.MkDir() + lock, err := filelock.NewLock(dir, "testing") + c.Assert(err, jc.ErrorIsNil) + err = lock.Lock("test") + c.Assert(err, jc.ErrorIsNil) + c.Assert(filelock.IsLocked(lock), jc.IsTrue) + + err = lock.Unlock() + c.Assert(err, jc.ErrorIsNil) + c.Assert(filelock.IsLocked(lock), jc.IsFalse) +} + +func (s *flockSuite) TestStress(c *gc.C) { + const lockAttempts = 200 + const concurrentLocks = 10 + + var counter = new(int64) + // Use atomics to update lockState to make sure the lock isn't held by + // someone else. A value of 1 means locked, 0 means unlocked. + var lockState = new(int32) + var done = make(chan struct{}) + defer close(done) + + dir := c.MkDir() + + var stress = func(name string) { + defer func() { done <- struct{}{} }() + lock, err := filelock.NewLock(dir, "testing") + if err != nil { + c.Errorf("Failed to create a new lock") + return + } + for i := 0; i < lockAttempts; i++ { + err = lock.Lock(name) + c.Assert(err, jc.ErrorIsNil) + state := atomic.AddInt32(lockState, 1) + c.Assert(state, gc.Equals, int32(1)) + // Tell the go routine scheduler to give a slice to someone else + // while we have this locked. + runtime.Gosched() + // need to decrement prior to unlock to avoid the race of someone + // else grabbing the lock before we decrement the state. + atomic.AddInt32(lockState, -1) + err = lock.Unlock() + c.Assert(err, jc.ErrorIsNil) + // increment the general counter + atomic.AddInt64(counter, 1) + } + } + + for i := 0; i < concurrentLocks; i++ { + go stress(fmt.Sprintf("Lock %d", i)) + } + for i := 0; i < concurrentLocks; i++ { + <-done + } + c.Assert(*counter, gc.Equals, int64(lockAttempts*concurrentLocks)) +} === added file 'src/github.com/juju/juju/utils/filelock/package_test.go' --- src/github.com/juju/juju/utils/filelock/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/utils/filelock/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package filelock_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === removed file 'src/github.com/juju/juju/utils/logging.go' --- src/github.com/juju/juju/utils/logging.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/logging.go 1970-01-01 00:00:00 +0000 @@ -1,25 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package utils - -import ( - "github.com/juju/errors" - "github.com/juju/loggo" - "github.com/juju/utils/featureflag" - - "github.com/juju/juju/feature" -) - -var logger = loggo.GetLogger("juju.utils") - -// LoggedErrorStack is a developer helper function that will cause the error -// stack of the error to be printed out at error severity if and only if the -// "log-error-stack" feature flag has been specified. The passed in error -// is also the return value of this function. -func LoggedErrorStack(err error) error { - if featureflag.Enabled(feature.LogErrorStack) { - logger.Errorf("error stack:\n%s", errors.ErrorStack(err)) - } - return err -} === removed file 'src/github.com/juju/juju/utils/logging_test.go' --- src/github.com/juju/juju/utils/logging_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/logging_test.go 1970-01-01 00:00:00 +0000 @@ -1,37 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package utils_test - -import ( - "errors" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/feature" - "github.com/juju/juju/testing" - "github.com/juju/juju/utils" -) - -type logSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&logSuite{}) - -func (*logSuite) TestFlagNotSet(c *gc.C) { - err := errors.New("test error") - err2 := utils.LoggedErrorStack(err) - c.Assert(err, gc.Equals, err2) - c.Assert(c.GetTestLog(), gc.Equals, "") -} - -func (s *logSuite) TestFlagSet(c *gc.C) { - s.SetFeatureFlags(feature.LogErrorStack) - err := errors.New("test error") - err2 := utils.LoggedErrorStack(err) - c.Assert(err, gc.Equals, err2) - expected := "ERROR juju.utils error stack:\ntest error" - c.Assert(c.GetTestLog(), jc.Contains, expected) -} === removed file 'src/github.com/juju/juju/utils/network.go' --- src/github.com/juju/juju/utils/network.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/network.go 1970-01-01 00:00:00 +0000 @@ -1,56 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package utils - -import ( - "net" - "time" - - "github.com/juju/errors" - "github.com/juju/utils" -) - -var ( - // The defaults below are best suited to retries associated - // with disk I/O timeouts, eg database operations. - // Use the NetworkOperationWithRetries() variant to explicitly - // use retry values better suited to different scenarios. - - // DefaultNetworkOperationRetryDelay is the default time - // to wait between operation retries. - DefaultNetworkOperationRetryDelay = 30 * time.Second - - // DefaultNetworkOperationAttempts is the default number - // of attempts before giving up. - DefaultNetworkOperationAttempts = 10 -) - -// NetworkOperationWithDefaultRetries calls the supplied function and if it returns a -// network error which is temporary, will retry a number of times before giving up. -// A default attempt strategy is used. -func NetworkOperationWitDefaultRetries(networkOp func() error, description string) func() error { - attempt := utils.AttemptStrategy{ - Delay: DefaultNetworkOperationRetryDelay, - Min: DefaultNetworkOperationAttempts, - } - return NetworkOperationWithRetries(attempt, networkOp, description) -} - -// NetworkOperationWithRetries calls the supplied function and if it returns a -// network error which is temporary, will retry a number of times before giving up. -func NetworkOperationWithRetries(strategy utils.AttemptStrategy, networkOp func() error, description string) func() error { - return func() error { - for a := strategy.Start(); a.Next(); { - err := networkOp() - if !a.HasNext() || err == nil { - return errors.Trace(err) - } - if networkErr, ok := errors.Cause(err).(net.Error); !ok || !networkErr.Temporary() { - return errors.Trace(err) - } - logger.Debugf("%q error, will retry: %v", description, err) - } - panic("unreachable") - } -} === removed file 'src/github.com/juju/juju/utils/network_test.go' --- src/github.com/juju/juju/utils/network_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/network_test.go 1970-01-01 00:00:00 +0000 @@ -1,103 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package utils_test - -import ( - "time" - - "github.com/juju/errors" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/testing" - "github.com/juju/juju/utils" -) - -type networkSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&networkSuite{}) - -func (s *networkSuite) TestOpSuccess(c *gc.C) { - isCalled := false - f := func() error { - isCalled = true - return nil - } - err := utils.NetworkOperationWitDefaultRetries(f, "do it")() - c.Assert(err, jc.ErrorIsNil) - c.Assert(isCalled, jc.IsTrue) -} - -func (s *networkSuite) TestOpFailureNoRetry(c *gc.C) { - s.PatchValue(&utils.DefaultNetworkOperationRetryDelay, 1*time.Millisecond) - netErr := &netError{false} - callCount := 0 - f := func() error { - callCount++ - return netErr - } - err := utils.NetworkOperationWitDefaultRetries(f, "do it")() - c.Assert(errors.Cause(err), gc.Equals, netErr) - c.Assert(callCount, gc.Equals, 1) -} - -func (s *networkSuite) TestOpFailureRetries(c *gc.C) { - s.PatchValue(&utils.DefaultNetworkOperationRetryDelay, 1*time.Millisecond) - netErr := &netError{true} - callCount := 0 - f := func() error { - callCount++ - return netErr - } - err := utils.NetworkOperationWitDefaultRetries(f, "do it")() - c.Assert(errors.Cause(err), gc.Equals, netErr) - c.Assert(callCount, gc.Equals, 10) -} - -func (s *networkSuite) TestOpNestedFailureRetries(c *gc.C) { - s.PatchValue(&utils.DefaultNetworkOperationRetryDelay, 1*time.Millisecond) - netErr := &netError{true} - callCount := 0 - f := func() error { - callCount++ - return errors.Annotate(errors.Trace(netErr), "create a wrapped error") - } - err := utils.NetworkOperationWitDefaultRetries(f, "do it")() - c.Assert(errors.Cause(err), gc.Equals, netErr) - c.Assert(callCount, gc.Equals, 10) -} - -func (s *networkSuite) TestOpSucceedsAfterRetries(c *gc.C) { - s.PatchValue(&utils.DefaultNetworkOperationRetryDelay, 1*time.Millisecond) - netErr := &netError{true} - callCount := 0 - f := func() error { - callCount++ - if callCount == 5 { - return nil - } - return netErr - } - err := utils.NetworkOperationWitDefaultRetries(f, "do it")() - c.Assert(err, jc.ErrorIsNil) - c.Assert(callCount, gc.Equals, 5) -} - -type netError struct { - temporary bool -} - -func (e *netError) Error() string { - return "network error" -} - -func (e *netError) Temporary() bool { - return e.temporary -} - -func (e *netError) Timeout() bool { - return false -} === removed file 'src/github.com/juju/juju/utils/package_test.go' --- src/github.com/juju/juju/utils/package_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package utils_test - -import ( - "testing" - - gc "gopkg.in/check.v1" -) - -func Test(t *testing.T) { - gc.TestingT(t) -} === removed directory 'src/github.com/juju/juju/utils/ssh' === removed file 'src/github.com/juju/juju/utils/ssh/authorisedkeys.go' --- src/github.com/juju/juju/utils/ssh/authorisedkeys.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/ssh/authorisedkeys.go 1970-01-01 00:00:00 +0000 @@ -1,333 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ssh - -import ( - "fmt" - "io/ioutil" - "os" - "os/user" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - - "github.com/juju/loggo" - "github.com/juju/utils" - "golang.org/x/crypto/ssh" -) - -var logger = loggo.GetLogger("juju.utils.ssh") - -type ListMode bool - -var ( - FullKeys ListMode = true - Fingerprints ListMode = false -) - -const ( - authKeysFile = "authorized_keys" -) - -type AuthorisedKey struct { - Type string - Key []byte - Comment string -} - -func authKeysDir(username string) (string, error) { - homeDir, err := utils.UserHomeDir(username) - if err != nil { - return "", err - } - homeDir, err = utils.NormalizePath(homeDir) - if err != nil { - return "", err - } - return filepath.Join(homeDir, ".ssh"), nil -} - -// ParseAuthorisedKey parses a non-comment line from an -// authorized_keys file and returns the constituent parts. -// Based on description in "man sshd". -func ParseAuthorisedKey(line string) (*AuthorisedKey, error) { - key, comment, _, _, err := ssh.ParseAuthorizedKey([]byte(line)) - if err != nil { - return nil, fmt.Errorf("invalid authorized_key %q", line) - } - return &AuthorisedKey{ - Type: key.Type(), - Key: key.Marshal(), - Comment: comment, - }, nil -} - -// SplitAuthorisedKeys extracts a key slice from the specified key data, -// by splitting the key data into lines and ignoring comments and blank lines. -func SplitAuthorisedKeys(keyData string) []string { - var keys []string - for _, key := range strings.Split(string(keyData), "\n") { - key = strings.Trim(key, " \r") - if len(key) == 0 { - continue - } - if key[0] == '#' { - continue - } - keys = append(keys, key) - } - return keys -} - -func readAuthorisedKeys(username string) ([]string, error) { - keyDir, err := authKeysDir(username) - if err != nil { - return nil, err - } - sshKeyFile := filepath.Join(keyDir, authKeysFile) - logger.Debugf("reading authorised keys file %s", sshKeyFile) - keyData, err := ioutil.ReadFile(sshKeyFile) - if os.IsNotExist(err) { - return []string{}, nil - } - if err != nil { - return nil, fmt.Errorf("reading ssh authorised keys file: %v", err) - } - var keys []string - for _, key := range strings.Split(string(keyData), "\n") { - if len(strings.Trim(key, " \r")) == 0 { - continue - } - keys = append(keys, key) - } - return keys, nil -} - -func writeAuthorisedKeys(username string, keys []string) error { - keyDir, err := authKeysDir(username) - if err != nil { - return err - } - err = os.MkdirAll(keyDir, os.FileMode(0755)) - if err != nil { - return fmt.Errorf("cannot create ssh key directory: %v", err) - } - keyData := strings.Join(keys, "\n") + "\n" - - // Get perms to use on auth keys file - sshKeyFile := filepath.Join(keyDir, authKeysFile) - perms := os.FileMode(0644) - info, err := os.Stat(sshKeyFile) - if err == nil { - perms = info.Mode().Perm() - } - - logger.Debugf("writing authorised keys file %s", sshKeyFile) - err = utils.AtomicWriteFile(sshKeyFile, []byte(keyData), perms) - if err != nil { - return err - } - - // TODO (wallyworld) - what to do on windows (if anything) - // TODO(dimitern) - no need to use user.Current() if username - // is "" - it will use the current user anyway. - if runtime.GOOS != "windows" { - // Ensure the resulting authorised keys file has its ownership - // set to the specified username. - var u *user.User - if username == "" { - u, err = user.Current() - } else { - u, err = user.Lookup(username) - } - if err != nil { - return err - } - // chown requires ints but user.User has strings for windows. - uid, err := strconv.Atoi(u.Uid) - if err != nil { - return err - } - gid, err := strconv.Atoi(u.Gid) - if err != nil { - return err - } - err = os.Chown(sshKeyFile, uid, gid) - if err != nil { - return err - } - } - return nil -} - -// We need a mutex because updates to the authorised keys file are done by -// reading the contents, updating, and writing back out. So only one caller -// at a time can use either Add, Delete, List. -var mutex sync.Mutex - -// AddKeys adds the specified ssh keys to the authorized_keys file for user. -// Returns an error if there is an issue with *any* of the supplied keys. -func AddKeys(user string, newKeys ...string) error { - mutex.Lock() - defer mutex.Unlock() - existingKeys, err := readAuthorisedKeys(user) - if err != nil { - return err - } - for _, newKey := range newKeys { - fingerprint, comment, err := KeyFingerprint(newKey) - if err != nil { - return err - } - if comment == "" { - return fmt.Errorf("cannot add ssh key without comment") - } - for _, key := range existingKeys { - existingFingerprint, existingComment, err := KeyFingerprint(key) - if err != nil { - // Only log a warning if the unrecognised key line is not a comment. - if key[0] != '#' { - logger.Warningf("invalid existing ssh key %q: %v", key, err) - } - continue - } - if existingFingerprint == fingerprint { - return fmt.Errorf("cannot add duplicate ssh key: %v", fingerprint) - } - if existingComment == comment { - return fmt.Errorf("cannot add ssh key with duplicate comment: %v", comment) - } - } - } - sshKeys := append(existingKeys, newKeys...) - return writeAuthorisedKeys(user, sshKeys) -} - -// DeleteKeys removes the specified ssh keys from the authorized ssh keys file for user. -// keyIds may be either key comments or fingerprints. -// Returns an error if there is an issue with *any* of the keys to delete. -func DeleteKeys(user string, keyIds ...string) error { - mutex.Lock() - defer mutex.Unlock() - existingKeyData, err := readAuthorisedKeys(user) - if err != nil { - return err - } - // Build up a map of keys indexed by fingerprint, and fingerprints indexed by comment - // so we can easily get the key represented by each keyId, which may be either a fingerprint - // or comment. - var keysToWrite []string - var sshKeys = make(map[string]string) - var keyComments = make(map[string]string) - for _, key := range existingKeyData { - fingerprint, comment, err := KeyFingerprint(key) - if err != nil { - logger.Debugf("keeping unrecognised existing ssh key %q: %v", key, err) - keysToWrite = append(keysToWrite, key) - continue - } - sshKeys[fingerprint] = key - if comment != "" { - keyComments[comment] = fingerprint - } - } - for _, keyId := range keyIds { - // assume keyId may be a fingerprint - fingerprint := keyId - _, ok := sshKeys[keyId] - if !ok { - // keyId is a comment - fingerprint, ok = keyComments[keyId] - } - if !ok { - return fmt.Errorf("cannot delete non existent key: %v", keyId) - } - delete(sshKeys, fingerprint) - } - for _, key := range sshKeys { - keysToWrite = append(keysToWrite, key) - } - if len(keysToWrite) == 0 { - return fmt.Errorf("cannot delete all keys") - } - return writeAuthorisedKeys(user, keysToWrite) -} - -// ReplaceKeys writes the specified ssh keys to the authorized_keys file for user, -// replacing any that are already there. -// Returns an error if there is an issue with *any* of the supplied keys. -func ReplaceKeys(user string, newKeys ...string) error { - mutex.Lock() - defer mutex.Unlock() - - existingKeyData, err := readAuthorisedKeys(user) - if err != nil { - return err - } - var existingNonKeyLines []string - for _, line := range existingKeyData { - _, _, err := KeyFingerprint(line) - if err != nil { - existingNonKeyLines = append(existingNonKeyLines, line) - } - } - return writeAuthorisedKeys(user, append(existingNonKeyLines, newKeys...)) -} - -// ListKeys returns either the full keys or key comments from the authorized ssh keys file for user. -func ListKeys(user string, mode ListMode) ([]string, error) { - mutex.Lock() - defer mutex.Unlock() - keyData, err := readAuthorisedKeys(user) - if err != nil { - return nil, err - } - var keys []string - for _, key := range keyData { - fingerprint, comment, err := KeyFingerprint(key) - if err != nil { - // Only log a warning if the unrecognised key line is not a comment. - if key[0] != '#' { - logger.Warningf("ignoring invalid ssh key %q: %v", key, err) - } - continue - } - if mode == FullKeys { - keys = append(keys, key) - } else { - shortKey := fingerprint - if comment != "" { - shortKey += fmt.Sprintf(" (%s)", comment) - } - keys = append(keys, shortKey) - } - } - return keys, nil -} - -// Any ssh key added to the authorised keys list by Juju will have this prefix. -// This allows Juju to know which keys have been added externally and any such keys -// will always be retained by Juju when updating the authorised keys file. -const JujuCommentPrefix = "Juju:" - -func EnsureJujuComment(key string) string { - ak, err := ParseAuthorisedKey(key) - // Just return an invalid key as is. - if err != nil { - logger.Warningf("invalid Juju ssh key %s: %v", key, err) - return key - } - if ak.Comment == "" { - return key + " " + JujuCommentPrefix + "sshkey" - } else { - // Add the Juju prefix to the comment if necessary. - if !strings.HasPrefix(ak.Comment, JujuCommentPrefix) { - commentIndex := strings.LastIndex(key, ak.Comment) - return key[:commentIndex] + JujuCommentPrefix + ak.Comment - } - } - return key -} === removed file 'src/github.com/juju/juju/utils/ssh/authorisedkeys_test.go' --- src/github.com/juju/juju/utils/ssh/authorisedkeys_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/utils/ssh/authorisedkeys_test.go 1970-01-01 00:00:00 +0000 @@ -1,275 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ssh_test - -import ( - "encoding/base64" - "strings" - - gitjujutesting "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/utils/ssh" - sshtesting "github.com/juju/juju/utils/ssh/testing" -) - -type AuthorisedKeysKeysSuite struct { - gitjujutesting.FakeHomeSuite -} - -const ( - // We'll use the current user for ssh tests. - testSSHUser = "" -) - -var _ = gc.Suite(&AuthorisedKeysKeysSuite{}) - -func writeAuthKeysFile(c *gc.C, keys []string) { - err := ssh.WriteAuthorisedKeys(testSSHUser, keys) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *AuthorisedKeysKeysSuite) TestListKeys(c *gc.C) { - keys := []string{ - sshtesting.ValidKeyOne.Key + " user@host", - sshtesting.ValidKeyTwo.Key, - } - writeAuthKeysFile(c, keys) - keys, err := ssh.ListKeys(testSSHUser, ssh.Fingerprints) - c.Assert(err, jc.ErrorIsNil) - c.Assert( - keys, gc.DeepEquals, - []string{sshtesting.ValidKeyOne.Fingerprint + " (user@host)", sshtesting.ValidKeyTwo.Fingerprint}) -} - -func (s *AuthorisedKeysKeysSuite) TestListKeysFull(c *gc.C) { - keys := []string{ - sshtesting.ValidKeyOne.Key + " user@host", - sshtesting.ValidKeyTwo.Key + " anotheruser@host", - } - writeAuthKeysFile(c, keys) - actual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys) - c.Assert(err, jc.ErrorIsNil) - c.Assert(actual, gc.DeepEquals, keys) -} - -func (s *AuthorisedKeysKeysSuite) TestAddNewKey(c *gc.C) { - key := sshtesting.ValidKeyOne.Key + " user@host" - err := ssh.AddKeys(testSSHUser, key) - c.Assert(err, jc.ErrorIsNil) - actual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys) - c.Assert(err, jc.ErrorIsNil) - c.Assert(actual, gc.DeepEquals, []string{key}) -} - -func (s *AuthorisedKeysKeysSuite) TestAddMoreKeys(c *gc.C) { - firstKey := sshtesting.ValidKeyOne.Key + " user@host" - writeAuthKeysFile(c, []string{firstKey}) - moreKeys := []string{ - sshtesting.ValidKeyTwo.Key + " anotheruser@host", - sshtesting.ValidKeyThree.Key + " yetanotheruser@host", - } - err := ssh.AddKeys(testSSHUser, moreKeys...) - c.Assert(err, jc.ErrorIsNil) - actual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys) - c.Assert(err, jc.ErrorIsNil) - c.Assert(actual, gc.DeepEquals, append([]string{firstKey}, moreKeys...)) -} - -func (s *AuthorisedKeysKeysSuite) TestAddDuplicateKey(c *gc.C) { - key := sshtesting.ValidKeyOne.Key + " user@host" - err := ssh.AddKeys(testSSHUser, key) - c.Assert(err, jc.ErrorIsNil) - moreKeys := []string{ - sshtesting.ValidKeyOne.Key + " user@host", - sshtesting.ValidKeyTwo.Key + " yetanotheruser@host", - } - err = ssh.AddKeys(testSSHUser, moreKeys...) - c.Assert(err, gc.ErrorMatches, "cannot add duplicate ssh key: "+sshtesting.ValidKeyOne.Fingerprint) -} - -func (s *AuthorisedKeysKeysSuite) TestAddDuplicateComment(c *gc.C) { - key := sshtesting.ValidKeyOne.Key + " user@host" - err := ssh.AddKeys(testSSHUser, key) - c.Assert(err, jc.ErrorIsNil) - moreKeys := []string{ - sshtesting.ValidKeyTwo.Key + " user@host", - sshtesting.ValidKeyThree.Key + " yetanotheruser@host", - } - err = ssh.AddKeys(testSSHUser, moreKeys...) - c.Assert(err, gc.ErrorMatches, "cannot add ssh key with duplicate comment: user@host") -} - -func (s *AuthorisedKeysKeysSuite) TestAddKeyWithoutComment(c *gc.C) { - keys := []string{ - sshtesting.ValidKeyOne.Key + " user@host", - sshtesting.ValidKeyTwo.Key, - } - err := ssh.AddKeys(testSSHUser, keys...) - c.Assert(err, gc.ErrorMatches, "cannot add ssh key without comment") -} - -func (s *AuthorisedKeysKeysSuite) TestAddKeepsUnrecognised(c *gc.C) { - writeAuthKeysFile(c, []string{sshtesting.ValidKeyOne.Key, "invalid-key"}) - anotherKey := sshtesting.ValidKeyTwo.Key + " anotheruser@host" - err := ssh.AddKeys(testSSHUser, anotherKey) - c.Assert(err, jc.ErrorIsNil) - actual, err := ssh.ReadAuthorisedKeys(testSSHUser) - c.Assert(err, jc.ErrorIsNil) - c.Assert(actual, gc.DeepEquals, []string{sshtesting.ValidKeyOne.Key, "invalid-key", anotherKey}) -} - -func (s *AuthorisedKeysKeysSuite) TestDeleteKeys(c *gc.C) { - firstKey := sshtesting.ValidKeyOne.Key + " user@host" - anotherKey := sshtesting.ValidKeyTwo.Key - thirdKey := sshtesting.ValidKeyThree.Key + " anotheruser@host" - writeAuthKeysFile(c, []string{firstKey, anotherKey, thirdKey}) - err := ssh.DeleteKeys(testSSHUser, "user@host", sshtesting.ValidKeyTwo.Fingerprint) - c.Assert(err, jc.ErrorIsNil) - actual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys) - c.Assert(err, jc.ErrorIsNil) - c.Assert(actual, gc.DeepEquals, []string{thirdKey}) -} - -func (s *AuthorisedKeysKeysSuite) TestDeleteKeysKeepsUnrecognised(c *gc.C) { - firstKey := sshtesting.ValidKeyOne.Key + " user@host" - writeAuthKeysFile(c, []string{firstKey, sshtesting.ValidKeyTwo.Key, "invalid-key"}) - err := ssh.DeleteKeys(testSSHUser, "user@host") - c.Assert(err, jc.ErrorIsNil) - actual, err := ssh.ReadAuthorisedKeys(testSSHUser) - c.Assert(err, jc.ErrorIsNil) - c.Assert(actual, gc.DeepEquals, []string{"invalid-key", sshtesting.ValidKeyTwo.Key}) -} - -func (s *AuthorisedKeysKeysSuite) TestDeleteNonExistentComment(c *gc.C) { - firstKey := sshtesting.ValidKeyOne.Key + " user@host" - writeAuthKeysFile(c, []string{firstKey}) - err := ssh.DeleteKeys(testSSHUser, "someone@host") - c.Assert(err, gc.ErrorMatches, "cannot delete non existent key: someone@host") -} - -func (s *AuthorisedKeysKeysSuite) TestDeleteNonExistentFingerprint(c *gc.C) { - firstKey := sshtesting.ValidKeyOne.Key + " user@host" - writeAuthKeysFile(c, []string{firstKey}) - err := ssh.DeleteKeys(testSSHUser, sshtesting.ValidKeyTwo.Fingerprint) - c.Assert(err, gc.ErrorMatches, "cannot delete non existent key: "+sshtesting.ValidKeyTwo.Fingerprint) -} - -func (s *AuthorisedKeysKeysSuite) TestDeleteLastKeyForbidden(c *gc.C) { - keys := []string{ - sshtesting.ValidKeyOne.Key + " user@host", - sshtesting.ValidKeyTwo.Key + " yetanotheruser@host", - } - writeAuthKeysFile(c, keys) - err := ssh.DeleteKeys(testSSHUser, "user@host", sshtesting.ValidKeyTwo.Fingerprint) - c.Assert(err, gc.ErrorMatches, "cannot delete all keys") -} - -func (s *AuthorisedKeysKeysSuite) TestReplaceKeys(c *gc.C) { - firstKey := sshtesting.ValidKeyOne.Key + " user@host" - anotherKey := sshtesting.ValidKeyTwo.Key - writeAuthKeysFile(c, []string{firstKey, anotherKey}) - - // replaceKey is created without a comment so test that - // ReplaceKeys handles keys without comments. This is - // because existing keys may not have a comment and - // ReplaceKeys is used to rewrite the entire authorized_keys - // file when adding new keys. - replaceKey := sshtesting.ValidKeyThree.Key - err := ssh.ReplaceKeys(testSSHUser, replaceKey) - c.Assert(err, jc.ErrorIsNil) - actual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys) - c.Assert(err, jc.ErrorIsNil) - c.Assert(actual, gc.DeepEquals, []string{replaceKey}) -} - -func (s *AuthorisedKeysKeysSuite) TestReplaceKeepsUnrecognised(c *gc.C) { - writeAuthKeysFile(c, []string{sshtesting.ValidKeyOne.Key, "invalid-key"}) - anotherKey := sshtesting.ValidKeyTwo.Key + " anotheruser@host" - err := ssh.ReplaceKeys(testSSHUser, anotherKey) - c.Assert(err, jc.ErrorIsNil) - actual, err := ssh.ReadAuthorisedKeys(testSSHUser) - c.Assert(err, jc.ErrorIsNil) - c.Assert(actual, gc.DeepEquals, []string{"invalid-key", anotherKey}) -} - -func (s *AuthorisedKeysKeysSuite) TestEnsureJujuComment(c *gc.C) { - sshKey := sshtesting.ValidKeyOne.Key - for _, test := range []struct { - key string - expected string - }{ - {"invalid-key", "invalid-key"}, - {sshKey, sshKey + " Juju:sshkey"}, - {sshKey + " user@host", sshKey + " Juju:user@host"}, - {sshKey + " Juju:user@host", sshKey + " Juju:user@host"}, - {sshKey + " " + sshKey[3:5], sshKey + " Juju:" + sshKey[3:5]}, - } { - actual := ssh.EnsureJujuComment(test.key) - c.Assert(actual, gc.Equals, test.expected) - } -} - -func (s *AuthorisedKeysKeysSuite) TestSplitAuthorisedKeys(c *gc.C) { - sshKey := sshtesting.ValidKeyOne.Key - for _, test := range []struct { - keyData string - expected []string - }{ - {"", nil}, - {sshKey, []string{sshKey}}, - {sshKey + "\n", []string{sshKey}}, - {sshKey + "\n\n", []string{sshKey}}, - {sshKey + "\n#comment\n", []string{sshKey}}, - {sshKey + "\n #comment\n", []string{sshKey}}, - {sshKey + "\ninvalid\n", []string{sshKey, "invalid"}}, - } { - actual := ssh.SplitAuthorisedKeys(test.keyData) - c.Assert(actual, gc.DeepEquals, test.expected) - } -} - -func b64decode(c *gc.C, s string) []byte { - b, err := base64.StdEncoding.DecodeString(s) - c.Assert(err, jc.ErrorIsNil) - return b -} - -func (s *AuthorisedKeysKeysSuite) TestParseAuthorisedKey(c *gc.C) { - for i, test := range []struct { - line string - key []byte - comment string - err string - }{{ - line: sshtesting.ValidKeyOne.Key, - key: b64decode(c, strings.Fields(sshtesting.ValidKeyOne.Key)[1]), - }, { - line: sshtesting.ValidKeyOne.Key + " a b c", - key: b64decode(c, strings.Fields(sshtesting.ValidKeyOne.Key)[1]), - comment: "a b c", - }, { - line: "ssh-xsa blah", - err: "invalid authorized_key \"ssh-xsa blah\"", - }, { - // options should be skipped - line: `no-pty,principals="\"",command="\!" ` + sshtesting.ValidKeyOne.Key, - key: b64decode(c, strings.Fields(sshtesting.ValidKeyOne.Key)[1]), - }, { - line: "ssh-rsa", - err: "invalid authorized_key \"ssh-rsa\"", - }} { - c.Logf("test %d: %s", i, test.line) - ak, err := ssh.ParseAuthorisedKey(test.line) - if test.err != "" { - c.Assert(err, gc.ErrorMatches, test.err) - } else { - c.Assert(err, jc.ErrorIsNil) - c.Assert(ak, gc.Not(gc.IsNil)) - c.Assert(ak.Key, gc.DeepEquals, test.key) - c.Assert(ak.Comment, gc.Equals, test.comment) - } - } -} === removed file 'src/github.com/juju/juju/utils/ssh/clientkeys.go' --- src/github.com/juju/juju/utils/ssh/clientkeys.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/ssh/clientkeys.go 1970-01-01 00:00:00 +0000 @@ -1,183 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ssh - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/juju/utils" - "github.com/juju/utils/set" - "golang.org/x/crypto/ssh" -) - -const clientKeyName = "juju_id_rsa" - -// PublicKeySuffix is the file extension for public key files. -const PublicKeySuffix = ".pub" - -var ( - clientKeysMutex sync.Mutex - - // clientKeys is a cached map of private key filenames - // to ssh.Signers. The private keys are those loaded - // from the client key directory, passed to LoadClientKeys. - clientKeys map[string]ssh.Signer -) - -// LoadClientKeys loads the client SSH keys from the -// specified directory, and caches them as a process-wide -// global. If the directory does not exist, it is created; -// if the directory did not exist, or contains no keys, it -// is populated with a new key pair. -// -// If the directory exists, then all pairs of files where one -// has the same name as the other + ".pub" will be loaded as -// private/public key pairs. -// -// Calls to LoadClientKeys will clear the previously loaded -// keys, and recompute the keys. -func LoadClientKeys(dir string) error { - clientKeysMutex.Lock() - defer clientKeysMutex.Unlock() - dir, err := utils.NormalizePath(dir) - if err != nil { - return err - } - if _, err := os.Stat(dir); err == nil { - keys, err := loadClientKeys(dir) - if err != nil { - return err - } else if len(keys) > 0 { - clientKeys = keys - return nil - } - // Directory exists but contains no keys; - // fall through and create one. - } - if err := os.MkdirAll(dir, 0700); err != nil { - return err - } - keyfile, key, err := generateClientKey(dir) - if err != nil { - os.RemoveAll(dir) - return err - } - clientKeys = map[string]ssh.Signer{keyfile: key} - return nil -} - -// ClearClientKeys clears the client keys cached in memory. -func ClearClientKeys() { - clientKeysMutex.Lock() - defer clientKeysMutex.Unlock() - clientKeys = nil -} - -func generateClientKey(dir string) (keyfile string, key ssh.Signer, err error) { - private, public, err := GenerateKey("juju-client-key") - if err != nil { - return "", nil, err - } - clientPrivateKey, err := ssh.ParsePrivateKey([]byte(private)) - if err != nil { - return "", nil, err - } - privkeyFilename := filepath.Join(dir, clientKeyName) - if err = ioutil.WriteFile(privkeyFilename, []byte(private), 0600); err != nil { - return "", nil, err - } - if err := ioutil.WriteFile(privkeyFilename+PublicKeySuffix, []byte(public), 0600); err != nil { - os.Remove(privkeyFilename) - return "", nil, err - } - return privkeyFilename, clientPrivateKey, nil -} - -func loadClientKeys(dir string) (map[string]ssh.Signer, error) { - publicKeyFiles, err := publicKeyFiles(dir) - if err != nil { - return nil, err - } - keys := make(map[string]ssh.Signer, len(publicKeyFiles)) - for _, filename := range publicKeyFiles { - filename = filename[:len(filename)-len(PublicKeySuffix)] - data, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - keys[filename], err = ssh.ParsePrivateKey(data) - if err != nil { - return nil, fmt.Errorf("parsing key file %q: %v", filename, err) - } - } - return keys, nil -} - -// privateKeys returns the private keys loaded by LoadClientKeys. -func privateKeys() (signers []ssh.Signer) { - clientKeysMutex.Lock() - defer clientKeysMutex.Unlock() - for _, key := range clientKeys { - signers = append(signers, key) - } - return signers -} - -// PrivateKeyFiles returns the filenames of private SSH keys loaded by -// LoadClientKeys. -func PrivateKeyFiles() []string { - clientKeysMutex.Lock() - defer clientKeysMutex.Unlock() - keyfiles := make([]string, 0, len(clientKeys)) - for f := range clientKeys { - keyfiles = append(keyfiles, f) - } - return keyfiles -} - -// PublicKeyFiles returns the filenames of public SSH keys loaded by -// LoadClientKeys. -func PublicKeyFiles() []string { - privkeys := PrivateKeyFiles() - pubkeys := make([]string, len(privkeys)) - for i, priv := range privkeys { - pubkeys[i] = priv + PublicKeySuffix - } - return pubkeys -} - -// publicKeyFiles returns the filenames of public SSH keys -// in the specified directory (all the files ending with .pub). -func publicKeyFiles(clientKeysDir string) ([]string, error) { - if clientKeysDir == "" { - return nil, nil - } - var keys []string - dir, err := os.Open(clientKeysDir) - if err != nil { - return nil, err - } - names, err := dir.Readdirnames(-1) - dir.Close() - if err != nil { - return nil, err - } - candidates := set.NewStrings(names...) - for _, name := range names { - if !strings.HasSuffix(name, PublicKeySuffix) { - continue - } - // If the private key filename also exists, add the file. - priv := name[:len(name)-len(PublicKeySuffix)] - if candidates.Contains(priv) { - keys = append(keys, filepath.Join(dir.Name(), name)) - } - } - return keys, nil -} === removed file 'src/github.com/juju/juju/utils/ssh/clientkeys_test.go' --- src/github.com/juju/juju/utils/ssh/clientkeys_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/utils/ssh/clientkeys_test.go 1970-01-01 00:00:00 +0000 @@ -1,106 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ssh_test - -import ( - "io/ioutil" - "os" - - gitjujutesting "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/utils/ssh" -) - -type ClientKeysSuite struct { - gitjujutesting.FakeHomeSuite -} - -var _ = gc.Suite(&ClientKeysSuite{}) - -func (s *ClientKeysSuite) SetUpTest(c *gc.C) { - s.FakeHomeSuite.SetUpTest(c) - s.AddCleanup(func(*gc.C) { ssh.ClearClientKeys() }) - generateKeyRestorer := overrideGenerateKey(c) - s.AddCleanup(func(*gc.C) { generateKeyRestorer.Restore() }) -} - -func checkFiles(c *gc.C, obtained, expected []string) { - var err error - for i, e := range expected { - expected[i], err = utils.NormalizePath(e) - c.Assert(err, jc.ErrorIsNil) - } - c.Assert(obtained, jc.SameContents, expected) -} - -func checkPublicKeyFiles(c *gc.C, expected ...string) { - keys := ssh.PublicKeyFiles() - checkFiles(c, keys, expected) -} - -func checkPrivateKeyFiles(c *gc.C, expected ...string) { - keys := ssh.PrivateKeyFiles() - checkFiles(c, keys, expected) -} - -func (s *ClientKeysSuite) TestPublicKeyFiles(c *gc.C) { - // LoadClientKeys will create the specified directory - // and populate it with a key pair. - err := ssh.LoadClientKeys("~/.juju/ssh") - c.Assert(err, jc.ErrorIsNil) - checkPublicKeyFiles(c, "~/.juju/ssh/juju_id_rsa.pub") - // All files ending with .pub in the client key dir get picked up. - priv, pub, err := ssh.GenerateKey("whatever") - c.Assert(err, jc.ErrorIsNil) - err = ioutil.WriteFile(gitjujutesting.HomePath(".juju", "ssh", "whatever.pub"), []byte(pub), 0600) - c.Assert(err, jc.ErrorIsNil) - err = ssh.LoadClientKeys("~/.juju/ssh") - c.Assert(err, jc.ErrorIsNil) - // The new public key won't be observed until the - // corresponding private key exists. - checkPublicKeyFiles(c, "~/.juju/ssh/juju_id_rsa.pub") - err = ioutil.WriteFile(gitjujutesting.HomePath(".juju", "ssh", "whatever"), []byte(priv), 0600) - c.Assert(err, jc.ErrorIsNil) - err = ssh.LoadClientKeys("~/.juju/ssh") - c.Assert(err, jc.ErrorIsNil) - checkPublicKeyFiles(c, "~/.juju/ssh/juju_id_rsa.pub", "~/.juju/ssh/whatever.pub") -} - -func (s *ClientKeysSuite) TestPrivateKeyFiles(c *gc.C) { - // Create/load client keys. They will be cached in memory: - // any files added to the directory will not be considered - // unless LoadClientKeys is called again. - err := ssh.LoadClientKeys("~/.juju/ssh") - c.Assert(err, jc.ErrorIsNil) - checkPrivateKeyFiles(c, "~/.juju/ssh/juju_id_rsa") - priv, pub, err := ssh.GenerateKey("whatever") - c.Assert(err, jc.ErrorIsNil) - err = ioutil.WriteFile(gitjujutesting.HomePath(".juju", "ssh", "whatever"), []byte(priv), 0600) - c.Assert(err, jc.ErrorIsNil) - err = ssh.LoadClientKeys("~/.juju/ssh") - c.Assert(err, jc.ErrorIsNil) - // The new private key won't be observed until the - // corresponding public key exists. - checkPrivateKeyFiles(c, "~/.juju/ssh/juju_id_rsa") - err = ioutil.WriteFile(gitjujutesting.HomePath(".juju", "ssh", "whatever.pub"), []byte(pub), 0600) - c.Assert(err, jc.ErrorIsNil) - // new keys won't be reported until we call LoadClientKeys again - checkPublicKeyFiles(c, "~/.juju/ssh/juju_id_rsa.pub") - checkPrivateKeyFiles(c, "~/.juju/ssh/juju_id_rsa") - err = ssh.LoadClientKeys("~/.juju/ssh") - c.Assert(err, jc.ErrorIsNil) - checkPublicKeyFiles(c, "~/.juju/ssh/juju_id_rsa.pub", "~/.juju/ssh/whatever.pub") - checkPrivateKeyFiles(c, "~/.juju/ssh/juju_id_rsa", "~/.juju/ssh/whatever") -} - -func (s *ClientKeysSuite) TestLoadClientKeysDirExists(c *gc.C) { - err := os.MkdirAll(gitjujutesting.HomePath(".juju", "ssh"), 0755) - c.Assert(err, jc.ErrorIsNil) - err = ssh.LoadClientKeys("~/.juju/ssh") - c.Assert(err, jc.ErrorIsNil) - checkPrivateKeyFiles(c, "~/.juju/ssh/juju_id_rsa") -} === removed file 'src/github.com/juju/juju/utils/ssh/export_test.go' --- src/github.com/juju/juju/utils/ssh/export_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/utils/ssh/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,15 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ssh - -var ( - ReadAuthorisedKeys = readAuthorisedKeys - WriteAuthorisedKeys = writeAuthorisedKeys - InitDefaultClient = initDefaultClient - DefaultIdentities = &defaultIdentities - SSHDial = &sshDial - RSAGenerateKey = &rsaGenerateKey - TestCopyReader = copyReader - TestNewCmd = newCmd -) === removed file 'src/github.com/juju/juju/utils/ssh/fakes_test.go' --- src/github.com/juju/juju/utils/ssh/fakes_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/utils/ssh/fakes_test.go 1970-01-01 00:00:00 +0000 @@ -1,125 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ssh_test - -import ( - "bytes" - "io" - "io/ioutil" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/utils/ssh" -) - -type fakeClient struct { - calls []string - hostArg string - commandArg []string - optionsArg *ssh.Options - copyArgs []string - - err error - cmd *ssh.Cmd - impl fakeCommandImpl -} - -func (cl *fakeClient) checkCalls(c *gc.C, host string, command []string, options *ssh.Options, copyArgs []string, calls ...string) { - c.Check(cl.hostArg, gc.Equals, host) - c.Check(cl.commandArg, jc.DeepEquals, command) - c.Check(cl.optionsArg, gc.Equals, options) - c.Check(cl.copyArgs, jc.DeepEquals, copyArgs) - c.Check(cl.calls, jc.DeepEquals, calls) -} - -func (cl *fakeClient) Command(host string, command []string, options *ssh.Options) *ssh.Cmd { - cl.calls = append(cl.calls, "Command") - cl.hostArg = host - cl.commandArg = command - cl.optionsArg = options - cmd := cl.cmd - if cmd == nil { - cmd = ssh.TestNewCmd(&cl.impl) - } - return cmd -} - -func (cl *fakeClient) Copy(args []string, options *ssh.Options) error { - cl.calls = append(cl.calls, "Copy") - cl.copyArgs = args - cl.optionsArg = options - return cl.err -} - -type bufferWriter struct { - bytes.Buffer -} - -func (*bufferWriter) Close() error { - return nil -} - -type fakeCommandImpl struct { - calls []string - stdinArg io.Reader - stdoutArg io.Writer - stderrArg io.Writer - stdinData bufferWriter - - err error - stdinRaw io.Reader - stdoutRaw io.Writer - stderrRaw io.Writer - stdoutData bytes.Buffer - stderrData bytes.Buffer -} - -func (ci *fakeCommandImpl) checkCalls(c *gc.C, stdin io.Reader, stdout, stderr io.Writer, calls ...string) { - c.Check(ci.stdinArg, gc.Equals, stdin) - c.Check(ci.stdoutArg, gc.Equals, stdout) - c.Check(ci.stderrArg, gc.Equals, stderr) - c.Check(ci.calls, jc.DeepEquals, calls) -} - -func (ci *fakeCommandImpl) checkStdin(c *gc.C, data string) { - c.Check(ci.stdinData.String(), gc.Equals, data) -} - -func (ci *fakeCommandImpl) Start() error { - ci.calls = append(ci.calls, "Start") - return ci.err -} - -func (ci *fakeCommandImpl) Wait() error { - ci.calls = append(ci.calls, "Wait") - return ci.err -} - -func (ci *fakeCommandImpl) Kill() error { - ci.calls = append(ci.calls, "Kill") - return ci.err -} - -func (ci *fakeCommandImpl) SetStdio(stdin io.Reader, stdout, stderr io.Writer) { - ci.calls = append(ci.calls, "SetStdio") - ci.stdinArg = stdin - ci.stdoutArg = stdout - ci.stderrArg = stderr -} - -func (ci *fakeCommandImpl) StdinPipe() (io.WriteCloser, io.Reader, error) { - ci.calls = append(ci.calls, "StdinPipe") - return &ci.stdinData, ci.stdinRaw, ci.err -} - -func (ci *fakeCommandImpl) StdoutPipe() (io.ReadCloser, io.Writer, error) { - ci.calls = append(ci.calls, "StdoutPipe") - return ioutil.NopCloser(&ci.stdoutData), ci.stdoutRaw, ci.err -} - -func (ci *fakeCommandImpl) StderrPipe() (io.ReadCloser, io.Writer, error) { - ci.calls = append(ci.calls, "StderrPipe") - return ioutil.NopCloser(&ci.stderrData), ci.stderrRaw, ci.err -} === removed file 'src/github.com/juju/juju/utils/ssh/fingerprint.go' --- src/github.com/juju/juju/utils/ssh/fingerprint.go 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/utils/ssh/fingerprint.go 1970-01-01 00:00:00 +0000 @@ -1,31 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ssh - -import ( - "bytes" - "crypto/md5" - "fmt" -) - -// KeyFingerprint returns the fingerprint and comment for the specified key -// in authorized_key format. Fingerprints are generated according to RFC4716. -// See ttp://www.ietf.org/rfc/rfc4716.txt, section 4. -func KeyFingerprint(key string) (fingerprint, comment string, err error) { - ak, err := ParseAuthorisedKey(key) - if err != nil { - return "", "", fmt.Errorf("generating key fingerprint: %v", err) - } - hash := md5.New() - hash.Write(ak.Key) - sum := hash.Sum(nil) - var buf bytes.Buffer - for i := 0; i < hash.Size(); i++ { - if i > 0 { - buf.WriteByte(':') - } - buf.WriteString(fmt.Sprintf("%02x", sum[i])) - } - return buf.String(), ak.Comment, nil -} === removed file 'src/github.com/juju/juju/utils/ssh/fingerprint_test.go' --- src/github.com/juju/juju/utils/ssh/fingerprint_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/utils/ssh/fingerprint_test.go 1970-01-01 00:00:00 +0000 @@ -1,37 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ssh_test - -import ( - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/utils/ssh" - sshtesting "github.com/juju/juju/utils/ssh/testing" -) - -type FingerprintSuite struct { - testing.IsolationSuite -} - -var _ = gc.Suite(&FingerprintSuite{}) - -func (s *FingerprintSuite) TestKeyFingerprint(c *gc.C) { - keys := []sshtesting.SSHKey{ - sshtesting.ValidKeyOne, - sshtesting.ValidKeyTwo, - sshtesting.ValidKeyThree, - } - for _, k := range keys { - fingerprint, _, err := ssh.KeyFingerprint(k.Key) - c.Assert(err, jc.ErrorIsNil) - c.Assert(fingerprint, gc.Equals, k.Fingerprint) - } -} - -func (s *FingerprintSuite) TestKeyFingerprintError(c *gc.C) { - _, _, err := ssh.KeyFingerprint("invalid key") - c.Assert(err, gc.ErrorMatches, `generating key fingerprint: invalid authorized_key "invalid key"`) -} === removed file 'src/github.com/juju/juju/utils/ssh/generate.go' --- src/github.com/juju/juju/utils/ssh/generate.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/ssh/generate.go 1970-01-01 00:00:00 +0000 @@ -1,65 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ssh - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "fmt" - "strings" - - "github.com/juju/errors" - "golang.org/x/crypto/ssh" -) - -// rsaGenerateKey allows for tests to patch out rsa key generation -var rsaGenerateKey = rsa.GenerateKey - -// KeyBits is used to determine the number of bits to use for the RSA keys -// created using the GenerateKey function. -var KeyBits = 2048 - -// GenerateKey makes a 2048 bit RSA no-passphrase SSH capable key. The bit -// size is actually controlled by the KeyBits var. The private key returned is -// encoded to ASCII using the PKCS1 encoding. The public key is suitable to -// be added into an authorized_keys file, and has the comment passed in as the -// comment part of the key. -func GenerateKey(comment string) (private, public string, err error) { - key, err := rsaGenerateKey(rand.Reader, KeyBits) - if err != nil { - return "", "", errors.Trace(err) - } - - identity := pem.EncodeToMemory( - &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(key), - }) - - public, err = PublicKey(identity, comment) - if err != nil { - return "", "", errors.Trace(err) - } - - return string(identity), public, nil -} - -// PublicKey returns the public key for any private key. The public key is -// suitable to be added into an authorized_keys file, and has the comment -// passed in as the comment part of the key. -func PublicKey(privateKey []byte, comment string) (string, error) { - signer, err := ssh.ParsePrivateKey(privateKey) - if err != nil { - return "", errors.Annotate(err, "failed to load key") - } - - auth_key := string(ssh.MarshalAuthorizedKey(signer.PublicKey())) - // Strip off the trailing new line so we can add a comment. - auth_key = strings.TrimSpace(auth_key) - public := fmt.Sprintf("%s %s\n", auth_key, comment) - - return public, nil -} === removed file 'src/github.com/juju/juju/utils/ssh/generate_test.go' --- src/github.com/juju/juju/utils/ssh/generate_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/utils/ssh/generate_test.go 1970-01-01 00:00:00 +0000 @@ -1,53 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ssh_test - -import ( - "crypto/rsa" - "io" - - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/utils/ssh" -) - -type GenerateSuite struct { - testing.IsolationSuite -} - -var _ = gc.Suite(&GenerateSuite{}) - -var pregeneratedKey *rsa.PrivateKey - -// overrideGenerateKey patches out rsa.GenerateKey to create a single testing -// key which is saved and used between tests to save computation time. -func overrideGenerateKey(c *gc.C) testing.Restorer { - restorer := testing.PatchValue(ssh.RSAGenerateKey, func(random io.Reader, bits int) (*rsa.PrivateKey, error) { - if pregeneratedKey != nil { - return pregeneratedKey, nil - } - // Ignore requested bits and just use 512 bits for speed - key, err := rsa.GenerateKey(random, 512) - if err != nil { - return nil, err - } - key.Precompute() - pregeneratedKey = key - return key, nil - }) - return restorer -} - -func (s *GenerateSuite) TestGenerate(c *gc.C) { - defer overrideGenerateKey(c).Restore() - private, public, err := ssh.GenerateKey("some-comment") - - c.Check(err, jc.ErrorIsNil) - c.Check(private, jc.HasPrefix, "-----BEGIN RSA PRIVATE KEY-----\n") - c.Check(private, jc.HasSuffix, "-----END RSA PRIVATE KEY-----\n") - c.Check(public, jc.HasPrefix, "ssh-rsa ") - c.Check(public, jc.HasSuffix, " some-comment\n") -} === removed file 'src/github.com/juju/juju/utils/ssh/package_test.go' --- src/github.com/juju/juju/utils/ssh/package_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/utils/ssh/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ssh_test - -import ( - "testing" - - gc "gopkg.in/check.v1" -) - -func TestPackage(t *testing.T) { - gc.TestingT(t) -} === removed file 'src/github.com/juju/juju/utils/ssh/run.go' --- src/github.com/juju/juju/utils/ssh/run.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/utils/ssh/run.go 1970-01-01 00:00:00 +0000 @@ -1,165 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ssh - -import ( - "bytes" - "os/exec" - "strings" - "syscall" - "time" - - "github.com/juju/errors" - "github.com/juju/utils/clock" - utilexec "github.com/juju/utils/exec" -) - -// ExecParams are used for the parameters for ExecuteCommandOnMachine. -type ExecParams struct { - IdentityFile string - Host string - Command string - Timeout time.Duration -} - -// StartCommandOnMachine executes the command on the given host. The -// command is run in a Bash shell over an SSH connection. All output -// is captured. A RunningCmd is returned that may be used to wait -// for the command to finish running. -func StartCommandOnMachine(params ExecParams) (*RunningCmd, error) { - // execute bash accepting commands on stdin - if params.Host == "" { - return nil, errors.Errorf("missing host address") - } - logger.Debugf("execute on %s", params.Host) - - var options Options - if params.IdentityFile != "" { - options.SetIdentities(params.IdentityFile) - } - command := Command(params.Host, []string{"/bin/bash", "-s"}, &options) - - // Run the command. - running := &RunningCmd{ - SSHCmd: command, - } - command.Stdout = &running.Stdout - command.Stderr = &running.Stderr - command.Stdin = strings.NewReader(params.Command + "\n") - if err := command.Start(); err != nil { - return nil, errors.Trace(err) - } - - return running, nil -} - -// RunningCmd represents a command that has been started. -type RunningCmd struct { - // SSHCmd is the command the was started. - SSHCmd *Cmd - - // Stdout and Stderr are the output streams the command is using. - Stdout bytes.Buffer - Stderr bytes.Buffer -} - -// Wait waits for the command to complete and returns the result. -func (cmd *RunningCmd) Wait() (result utilexec.ExecResponse, _ error) { - defer func() { - // Gather as much as we have from stdout and stderr. - result.Stdout = cmd.Stdout.Bytes() - result.Stderr = cmd.Stderr.Bytes() - }() - - err := cmd.SSHCmd.Wait() - logger.Debugf("command.Wait finished (err: %v)", err) - code, err := getExitCode(err) - if err != nil { - return result, errors.Trace(err) - } - - result.Code = code - return result, nil -} - -// TODO(ericsnow) Add RunningCmd.WaitAbortable(abortChan <-chan error) ... -// based on WaitWithTimeout and update WaitWithTimeout to use it. We -// could make it WaitAbortable(abortChans ...<-chan error), which would -// require using reflect.Select(). Then that could simply replace Wait(). -// It may make more sense, however, to have a helper function: -// Wait(cmd T, abortChans ...<-chan error) ... - -// Cancelled is an error indicating that a command timed out. -var Cancelled = errors.New("command timed out") - -// WaitWithCancel waits for the command to complete and returns the result. If -// cancel is closed before the result was returned, then it takes longer than -// the provided timeout then Cancelled is returned. -func (cmd *RunningCmd) WaitWithCancel(cancel <-chan struct{}) (utilexec.ExecResponse, error) { - var result utilexec.ExecResponse - - done := make(chan error, 1) - go func() { - defer close(done) - waitResult, err := cmd.Wait() - result = waitResult - done <- err - }() - - select { - case err := <-done: - return result, errors.Trace(err) - case <-cancel: - logger.Infof("killing the command due to cancellation") - cmd.SSHCmd.Kill() - - <-done // Ensure that the original cmd.Wait() call completed. - cmd.SSHCmd.Wait() // Finalize cmd.SSHCmd, if necessary. - return result, Cancelled - } -} - -func getExitCode(err error) (int, error) { - if err == nil { - return 0, nil - } - err = errors.Cause(err) - if ee, ok := err.(*exec.ExitError); ok { - raw := ee.ProcessState.Sys() - status, ok := raw.(syscall.WaitStatus) - if !ok { - logger.Errorf("unexpected type %T from ProcessState.Sys()", raw) - } else if status.Exited() { - // A non-zero return code isn't considered an error here. - return status.ExitStatus(), nil - } - } - return -1, err -} - -// ExecuteCommandOnMachine will execute the command passed through on -// the host specified. This is done using ssh, and passing the commands -// through /bin/bash. If the command is not finished within the timeout -// specified, an error is returned. Any output captured during that time -// is also returned in the remote response. -func ExecuteCommandOnMachine(args ExecParams) (utilexec.ExecResponse, error) { - var result utilexec.ExecResponse - - cmd, err := StartCommandOnMachine(args) - if err != nil { - return result, errors.Trace(err) - } - - cancel := make(chan struct{}) - go func() { - <-clock.WallClock.After(args.Timeout) - close(cancel) - }() - result, err = cmd.WaitWithCancel(cancel) - if err != nil { - return result, errors.Trace(err) - } - - return result, nil -} === removed file 'src/github.com/juju/juju/utils/ssh/run_test.go' --- src/github.com/juju/juju/utils/ssh/run_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/ssh/run_test.go 1970-01-01 00:00:00 +0000 @@ -1,132 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ssh_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "runtime" - "time" - - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/utils/ssh" -) - -const ( - shortWait = 50 * time.Millisecond - longWait = 10 * time.Second -) - -type ExecuteSSHCommandSuite struct { - testing.IsolationSuite - originalPath string - testbin string - fakessh string -} - -var _ = gc.Suite(&ExecuteSSHCommandSuite{}) - -func (s *ExecuteSSHCommandSuite) SetUpSuite(c *gc.C) { - s.originalPath = os.Getenv("PATH") - s.IsolationSuite.SetUpSuite(c) -} - -func (s *ExecuteSSHCommandSuite) SetUpTest(c *gc.C) { - if runtime.GOOS == "windows" { - c.Skip("issue 1403084: Tests use OpenSSH only") - } - s.IsolationSuite.SetUpTest(c) - err := os.Setenv("PATH", s.originalPath) - c.Assert(err, jc.ErrorIsNil) - s.testbin = c.MkDir() - s.fakessh = filepath.Join(s.testbin, "ssh") - s.PatchEnvPathPrepend(s.testbin) -} - -func (s *ExecuteSSHCommandSuite) fakeSSH(c *gc.C, cmd string) { - err := ioutil.WriteFile(s.fakessh, []byte(cmd), 0755) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *ExecuteSSHCommandSuite) TestCaptureOutput(c *gc.C) { - s.fakeSSH(c, echoSSH) - - response, err := ssh.ExecuteCommandOnMachine(ssh.ExecParams{ - Host: "hostname", - Command: "sudo apt-get update\nsudo apt-get upgrade", - Timeout: longWait, - }) - - c.Assert(err, jc.ErrorIsNil) - c.Assert(response.Code, gc.Equals, 0) - c.Assert(string(response.Stdout), gc.Equals, "sudo apt-get update\nsudo apt-get upgrade\n") - c.Assert(string(response.Stderr), gc.Equals, - "-o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 hostname /bin/bash -s\n") -} - -func (s *ExecuteSSHCommandSuite) TestIdentityFile(c *gc.C) { - s.fakeSSH(c, echoSSH) - - response, err := ssh.ExecuteCommandOnMachine(ssh.ExecParams{ - IdentityFile: "identity-file", - Host: "hostname", - Timeout: longWait, - }) - - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(response.Stderr), jc.Contains, " -i identity-file ") -} - -func (s *ExecuteSSHCommandSuite) TestTimoutCaptureOutput(c *gc.C) { - s.fakeSSH(c, slowSSH) - - response, err := ssh.ExecuteCommandOnMachine(ssh.ExecParams{ - IdentityFile: "identity-file", - Host: "hostname", - Command: "ignored", - Timeout: shortWait, - }) - - c.Check(err, gc.ErrorMatches, "command timed out") - c.Assert(response.Code, gc.Equals, 0) - c.Assert(string(response.Stdout), gc.Equals, "stdout\n") - c.Assert(string(response.Stderr), gc.Equals, "stderr\n") -} - -func (s *ExecuteSSHCommandSuite) TestCapturesReturnCode(c *gc.C) { - s.fakeSSH(c, passthroughSSH) - - response, err := ssh.ExecuteCommandOnMachine(ssh.ExecParams{ - IdentityFile: "identity-file", - Host: "hostname", - Command: "echo stdout; exit 42", - Timeout: longWait, - }) - - c.Check(err, jc.ErrorIsNil) - c.Assert(response.Code, gc.Equals, 42) - c.Assert(string(response.Stdout), gc.Equals, "stdout\n") - c.Assert(string(response.Stderr), gc.Equals, "") -} - -// echoSSH outputs the command args to stderr, and copies stdin to stdout -var echoSSH = `#!/bin/bash -# Write the args to stderr -echo "$*" >&2 -cat /dev/stdin -` - -// slowSSH sleeps for a while after outputting some text to stdout and stderr -var slowSSH = `#!/bin/bash -echo "stderr" >&2 -echo "stdout" -sleep 5s -` - -// passthroughSSH creates an ssh that executes stdin. -var passthroughSSH = `#!/bin/bash -s` === removed file 'src/github.com/juju/juju/utils/ssh/ssh.go' --- src/github.com/juju/juju/utils/ssh/ssh.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/utils/ssh/ssh.go 1970-01-01 00:00:00 +0000 @@ -1,269 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// Package ssh contains utilities for dealing with SSH connections, -// key management, and so on. All SSH-based command executions in -// Juju should use the Command/ScpCommand functions in this package. -// -package ssh - -import ( - "bytes" - "errors" - "io" - "os/exec" - "syscall" - - "github.com/juju/cmd" - je "github.com/juju/errors" -) - -// Options is a client-implementation independent SSH options set. -type Options struct { - // proxyCommand specifies the command to - // execute to proxy SSH traffic through. - proxyCommand []string - // ssh server port; zero means use the default (22) - port int - // no PTY forced by default - allocatePTY bool - // password authentication is disallowed by default - passwordAuthAllowed bool - // identities is a sequence of paths to private key/identity files - // to use when attempting to login. A client implementaton may attempt - // with additional identities, but must give preference to these - identities []string - // knownHostsFile is a path to a file in which to save the host's - // fingerprint. - knownHostsFile string -} - -// SetProxyCommand sets a command to execute to proxy traffic through. -func (o *Options) SetProxyCommand(command ...string) { - o.proxyCommand = append([]string{}, command...) -} - -// SetPort sets the SSH server port to connect to. -func (o *Options) SetPort(port int) { - o.port = port -} - -// EnablePTY forces the allocation of a pseudo-TTY. -// -// Forcing a pseudo-TTY is required, for example, for sudo -// prompts on the target host. -func (o *Options) EnablePTY() { - o.allocatePTY = true -} - -// SetKnownHostsFile sets the host's fingerprint to be saved in the given file. -// -// Host fingerprints are saved in ~/.ssh/known_hosts by default. -func (o *Options) SetKnownHostsFile(file string) { - o.knownHostsFile = file -} - -// AllowPasswordAuthentication allows the SSH -// client to prompt the user for a password. -// -// Password authentication is disallowed by default. -func (o *Options) AllowPasswordAuthentication() { - o.passwordAuthAllowed = true -} - -// SetIdentities sets a sequence of paths to private key/identity files -// to use when attempting login. Client implementations may attempt to -// use additional identities, but must give preference to the ones -// specified here. -func (o *Options) SetIdentities(identityFiles ...string) { - o.identities = append([]string{}, identityFiles...) -} - -// Client is an interface for SSH clients to implement -type Client interface { - // Command returns a Command for executing a command - // on the specified host. Each Command is executed - // within its own SSH session. - // - // Host is specified in the format [user@]host. - Command(host string, command []string, options *Options) *Cmd - - // Copy copies file(s) between local and remote host(s). - // Paths are specified in the scp format, [[user@]host:]path. If - // any extra arguments are specified in extraArgs, they are passed - // verbatim. - Copy(args []string, options *Options) error -} - -// Cmd represents a command to be (or being) executed -// on a remote host. -type Cmd struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer - impl command -} - -func newCmd(impl command) *Cmd { - return &Cmd{impl: impl} -} - -// CombinedOutput runs the command, and returns the -// combined stdout/stderr output and result of -// executing the command. -func (c *Cmd) CombinedOutput() ([]byte, error) { - if c.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if c.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - var b bytes.Buffer - c.Stdout = &b - c.Stderr = &b - err := c.Run() - return b.Bytes(), err -} - -// Output runs the command, and returns the stdout -// output and result of executing the command. -func (c *Cmd) Output() ([]byte, error) { - if c.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - var b bytes.Buffer - c.Stdout = &b - err := c.Run() - return b.Bytes(), err -} - -// Run runs the command, and returns the result as an error. -func (c *Cmd) Run() error { - if err := c.Start(); err != nil { - return err - } - err := c.Wait() - if exitError, ok := err.(*exec.ExitError); ok && exitError != nil { - status := exitError.ProcessState.Sys().(syscall.WaitStatus) - if status.Exited() { - return cmd.NewRcPassthroughError(status.ExitStatus()) - } - } - return err -} - -// Start starts the command running, but does not wait for -// it to complete. If the command could not be started, an -// error is returned. -func (c *Cmd) Start() error { - c.impl.SetStdio(c.Stdin, c.Stdout, c.Stderr) - return c.impl.Start() -} - -// Wait waits for the started command to complete, -// and returns the result as an error. -func (c *Cmd) Wait() error { - return c.impl.Wait() -} - -// Kill kills the started command. -func (c *Cmd) Kill() error { - return c.impl.Kill() -} - -// StdinPipe creates a pipe and connects it to -// the command's stdin. The read end of the pipe -// is assigned to c.Stdin. -func (c *Cmd) StdinPipe() (io.WriteCloser, error) { - wc, r, err := c.impl.StdinPipe() - if err != nil { - return nil, err - } - c.Stdin = r - return wc, nil -} - -// StdoutPipe creates a pipe and connects it to -// the command's stdout. The write end of the pipe -// is assigned to c.Stdout. -func (c *Cmd) StdoutPipe() (io.ReadCloser, error) { - rc, w, err := c.impl.StdoutPipe() - if err != nil { - return nil, err - } - c.Stdout = w - return rc, nil -} - -// StderrPipe creates a pipe and connects it to -// the command's stderr. The write end of the pipe -// is assigned to c.Stderr. -func (c *Cmd) StderrPipe() (io.ReadCloser, error) { - rc, w, err := c.impl.StderrPipe() - if err != nil { - return nil, err - } - c.Stderr = w - return rc, nil -} - -// command is an implementation-specific representation of a -// command prepared to execute against a specific host. -type command interface { - Start() error - Wait() error - Kill() error - SetStdio(stdin io.Reader, stdout, stderr io.Writer) - StdinPipe() (io.WriteCloser, io.Reader, error) - StdoutPipe() (io.ReadCloser, io.Writer, error) - StderrPipe() (io.ReadCloser, io.Writer, error) -} - -// DefaultClient is the default SSH client for the process. -// -// If the OpenSSH client is found in $PATH, then it will be -// used for DefaultClient; otherwise, DefaultClient will use -// an embedded client based on go.crypto/ssh. -var DefaultClient Client - -// chosenClient holds the type of SSH client created for -// DefaultClient, so that we can log it in Command or Copy. -var chosenClient string - -func init() { - initDefaultClient() -} - -func initDefaultClient() { - if client, err := NewOpenSSHClient(); err == nil { - DefaultClient = client - chosenClient = "OpenSSH" - } else if client, err := NewGoCryptoClient(); err == nil { - DefaultClient = client - chosenClient = "go.crypto (embedded)" - } -} - -// Command is a short-cut for DefaultClient.Command. -func Command(host string, command []string, options *Options) *Cmd { - logger.Debugf("using %s ssh client", chosenClient) - return DefaultClient.Command(host, command, options) -} - -// Copy is a short-cut for DefaultClient.Copy. -func Copy(args []string, options *Options) error { - logger.Debugf("using %s ssh client", chosenClient) - return DefaultClient.Copy(args, options) -} - -// CopyReader sends the reader's data to a file on the remote host over SSH. -func CopyReader(host, filename string, r io.Reader, options *Options) error { - logger.Debugf("using %s ssh client", chosenClient) - return copyReader(DefaultClient, host, filename, r, options) -} - -func copyReader(client Client, host, filename string, r io.Reader, options *Options) error { - cmd := client.Command(host, []string{"cat - > " + filename}, options) - cmd.Stdin = r - return je.Trace(cmd.Run()) -} === removed file 'src/github.com/juju/juju/utils/ssh/ssh_gocrypto.go' --- src/github.com/juju/juju/utils/ssh/ssh_gocrypto.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/utils/ssh/ssh_gocrypto.go 1970-01-01 00:00:00 +0000 @@ -1,243 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ssh - -import ( - "fmt" - "io" - "io/ioutil" - "net" - "os" - "os/exec" - "os/user" - "strconv" - "strings" - - "github.com/juju/utils" - "golang.org/x/crypto/ssh" -) - -const sshDefaultPort = 22 - -// GoCryptoClient is an implementation of Client that -// uses the embedded go.crypto/ssh SSH client. -// -// GoCryptoClient is intentionally limited in the -// functionality that it enables, as it is currently -// intended to be used only for non-interactive command -// execution. -type GoCryptoClient struct { - signers []ssh.Signer -} - -// NewGoCryptoClient creates a new GoCryptoClient. -// -// If no signers are specified, NewGoCryptoClient will -// use the private key generated by LoadClientKeys. -func NewGoCryptoClient(signers ...ssh.Signer) (*GoCryptoClient, error) { - return &GoCryptoClient{signers: signers}, nil -} - -// Command implements Client.Command. -func (c *GoCryptoClient) Command(host string, command []string, options *Options) *Cmd { - shellCommand := utils.CommandString(command...) - signers := c.signers - if len(signers) == 0 { - signers = privateKeys() - } - user, host := splitUserHost(host) - port := sshDefaultPort - var proxyCommand []string - if options != nil { - if options.port != 0 { - port = options.port - } - proxyCommand = options.proxyCommand - } - logger.Tracef(`running (equivalent of): ssh "%s@%s" -p %d '%s'`, user, host, port, shellCommand) - return &Cmd{impl: &goCryptoCommand{ - signers: signers, - user: user, - addr: net.JoinHostPort(host, strconv.Itoa(port)), - command: shellCommand, - proxyCommand: proxyCommand, - }} -} - -// Copy implements Client.Copy. -// -// Copy is currently unimplemented, and will always return an error. -func (c *GoCryptoClient) Copy(args []string, options *Options) error { - return fmt.Errorf("scp command is not implemented (OpenSSH scp not available in PATH)") -} - -type goCryptoCommand struct { - signers []ssh.Signer - user string - addr string - command string - proxyCommand []string - stdin io.Reader - stdout io.Writer - stderr io.Writer - client *ssh.Client - sess *ssh.Session -} - -var sshDial = ssh.Dial - -var sshDialWithProxy = func(addr string, proxyCommand []string, config *ssh.ClientConfig) (*ssh.Client, error) { - if len(proxyCommand) == 0 { - return sshDial("tcp", addr, config) - } - // User has specified a proxy. Create a pipe and - // redirect the proxy command's stdin/stdout to it. - host, port, err := net.SplitHostPort(addr) - if err != nil { - host = addr - } - for i, arg := range proxyCommand { - arg = strings.Replace(arg, "%h", host, -1) - if port != "" { - arg = strings.Replace(arg, "%p", port, -1) - } - arg = strings.Replace(arg, "%r", config.User, -1) - proxyCommand[i] = arg - } - client, server := net.Pipe() - logger.Tracef(`executing proxy command %q`, proxyCommand) - cmd := exec.Command(proxyCommand[0], proxyCommand[1:]...) - cmd.Stdin = server - cmd.Stdout = server - cmd.Stderr = os.Stderr - if err := cmd.Start(); err != nil { - return nil, err - } - conn, chans, reqs, err := ssh.NewClientConn(client, addr, config) - if err != nil { - return nil, err - } - return ssh.NewClient(conn, chans, reqs), nil -} - -func (c *goCryptoCommand) ensureSession() (*ssh.Session, error) { - if c.sess != nil { - return c.sess, nil - } - if len(c.signers) == 0 { - return nil, fmt.Errorf("no private keys available") - } - if c.user == "" { - currentUser, err := user.Current() - if err != nil { - return nil, fmt.Errorf("getting current user: %v", err) - } - c.user = currentUser.Username - } - config := &ssh.ClientConfig{ - User: c.user, - Auth: []ssh.AuthMethod{ - ssh.PublicKeysCallback(func() ([]ssh.Signer, error) { - return c.signers, nil - }), - }, - } - client, err := sshDialWithProxy(c.addr, c.proxyCommand, config) - if err != nil { - return nil, err - } - sess, err := client.NewSession() - if err != nil { - client.Close() - return nil, err - } - c.client = client - c.sess = sess - c.sess.Stdin = c.stdin - c.sess.Stdout = c.stdout - c.sess.Stderr = c.stderr - return sess, nil -} - -func (c *goCryptoCommand) Start() error { - sess, err := c.ensureSession() - if err != nil { - return err - } - if c.command == "" { - return sess.Shell() - } - return sess.Start(c.command) -} - -func (c *goCryptoCommand) Close() error { - if c.sess == nil { - return nil - } - err0 := c.sess.Close() - err1 := c.client.Close() - if err0 == nil { - err0 = err1 - } - c.sess = nil - c.client = nil - return err0 -} - -func (c *goCryptoCommand) Wait() error { - if c.sess == nil { - return fmt.Errorf("Command has not been started") - } - err := c.sess.Wait() - c.Close() - return err -} - -func (c *goCryptoCommand) Kill() error { - if c.sess == nil { - return fmt.Errorf("Command has not been started") - } - return c.sess.Signal(ssh.SIGKILL) -} - -func (c *goCryptoCommand) SetStdio(stdin io.Reader, stdout, stderr io.Writer) { - c.stdin = stdin - c.stdout = stdout - c.stderr = stderr -} - -func (c *goCryptoCommand) StdinPipe() (io.WriteCloser, io.Reader, error) { - sess, err := c.ensureSession() - if err != nil { - return nil, nil, err - } - wc, err := sess.StdinPipe() - return wc, sess.Stdin, err -} - -func (c *goCryptoCommand) StdoutPipe() (io.ReadCloser, io.Writer, error) { - sess, err := c.ensureSession() - if err != nil { - return nil, nil, err - } - wc, err := sess.StdoutPipe() - return ioutil.NopCloser(wc), sess.Stdout, err -} - -func (c *goCryptoCommand) StderrPipe() (io.ReadCloser, io.Writer, error) { - sess, err := c.ensureSession() - if err != nil { - return nil, nil, err - } - wc, err := sess.StderrPipe() - return ioutil.NopCloser(wc), sess.Stderr, err -} - -func splitUserHost(s string) (user, host string) { - userHost := strings.SplitN(s, "@", 2) - if len(userHost) == 2 { - return userHost[0], userHost[1] - } - return "", userHost[0] -} === removed file 'src/github.com/juju/juju/utils/ssh/ssh_gocrypto_test.go' --- src/github.com/juju/juju/utils/ssh/ssh_gocrypto_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/utils/ssh/ssh_gocrypto_test.go 1970-01-01 00:00:00 +0000 @@ -1,199 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ssh_test - -import ( - "encoding/binary" - "errors" - "fmt" - "io/ioutil" - "net" - "os/exec" - "path/filepath" - "sync" - - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - cryptossh "golang.org/x/crypto/ssh" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/utils/ssh" -) - -var ( - testCommand = []string{"echo", "$abc"} - testCommandFlat = `echo "\$abc"` -) - -type sshServer struct { - cfg *cryptossh.ServerConfig - listener net.Listener - client *cryptossh.Client -} - -func newServer(c *gc.C) *sshServer { - private, _, err := ssh.GenerateKey("test-server") - c.Assert(err, jc.ErrorIsNil) - key, err := cryptossh.ParsePrivateKey([]byte(private)) - c.Assert(err, jc.ErrorIsNil) - server := &sshServer{ - cfg: &cryptossh.ServerConfig{}, - } - server.cfg.AddHostKey(key) - server.listener, err = net.Listen("tcp", "127.0.0.1:0") - c.Assert(err, jc.ErrorIsNil) - return server -} - -func (s *sshServer) run(c *gc.C) { - netconn, err := s.listener.Accept() - c.Assert(err, jc.ErrorIsNil) - defer func() { - err := netconn.Close() - c.Assert(err, jc.ErrorIsNil) - }() - conn, chans, reqs, err := cryptossh.NewServerConn(netconn, s.cfg) - c.Assert(err, jc.ErrorIsNil) - s.client = cryptossh.NewClient(conn, chans, reqs) - var wg sync.WaitGroup - defer wg.Wait() - sessionChannels := s.client.HandleChannelOpen("session") - c.Assert(sessionChannels, gc.NotNil) - for newChannel := range sessionChannels { - c.Assert(newChannel.ChannelType(), gc.Equals, "session") - channel, reqs, err := newChannel.Accept() - c.Assert(err, jc.ErrorIsNil) - wg.Add(1) - go func() { - defer wg.Done() - defer channel.Close() - for req := range reqs { - switch req.Type { - case "exec": - c.Assert(req.WantReply, jc.IsTrue) - n := binary.BigEndian.Uint32(req.Payload[:4]) - command := string(req.Payload[4 : n+4]) - c.Assert(command, gc.Equals, testCommandFlat) - req.Reply(true, nil) - channel.Write([]byte("abc value\n")) - _, err := channel.SendRequest("exit-status", false, cryptossh.Marshal(&struct{ n uint32 }{0})) - c.Assert(err, jc.ErrorIsNil) - return - default: - c.Fatalf("Unexpected request type: %v", req.Type) - } - } - }() - } -} - -type SSHGoCryptoCommandSuite struct { - testing.IsolationSuite - client ssh.Client -} - -var _ = gc.Suite(&SSHGoCryptoCommandSuite{}) - -func (s *SSHGoCryptoCommandSuite) SetUpTest(c *gc.C) { - s.IsolationSuite.SetUpTest(c) - generateKeyRestorer := overrideGenerateKey(c) - s.AddCleanup(func(*gc.C) { generateKeyRestorer.Restore() }) - client, err := ssh.NewGoCryptoClient() - c.Assert(err, jc.ErrorIsNil) - s.client = client -} - -func (s *SSHGoCryptoCommandSuite) TestNewGoCryptoClient(c *gc.C) { - _, err := ssh.NewGoCryptoClient() - c.Assert(err, jc.ErrorIsNil) - private, _, err := ssh.GenerateKey("test-client") - c.Assert(err, jc.ErrorIsNil) - key, err := cryptossh.ParsePrivateKey([]byte(private)) - c.Assert(err, jc.ErrorIsNil) - _, err = ssh.NewGoCryptoClient(key) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *SSHGoCryptoCommandSuite) TestClientNoKeys(c *gc.C) { - client, err := ssh.NewGoCryptoClient() - c.Assert(err, jc.ErrorIsNil) - cmd := client.Command("0.1.2.3", []string{"echo", "123"}, nil) - _, err = cmd.Output() - c.Assert(err, gc.ErrorMatches, "no private keys available") - defer ssh.ClearClientKeys() - err = ssh.LoadClientKeys(c.MkDir()) - c.Assert(err, jc.ErrorIsNil) - - s.PatchValue(ssh.SSHDial, func(network, address string, cfg *cryptossh.ClientConfig) (*cryptossh.Client, error) { - return nil, errors.New("ssh.Dial failed") - }) - cmd = client.Command("0.1.2.3", []string{"echo", "123"}, nil) - _, err = cmd.Output() - // error message differs based on whether using cgo or not - c.Assert(err, gc.ErrorMatches, "ssh.Dial failed") -} - -func (s *SSHGoCryptoCommandSuite) TestCommand(c *gc.C) { - private, _, err := ssh.GenerateKey("test-server") - c.Assert(err, jc.ErrorIsNil) - key, err := cryptossh.ParsePrivateKey([]byte(private)) - client, err := ssh.NewGoCryptoClient(key) - c.Assert(err, jc.ErrorIsNil) - server := newServer(c) - var opts ssh.Options - opts.SetPort(server.listener.Addr().(*net.TCPAddr).Port) - cmd := client.Command("127.0.0.1", testCommand, &opts) - checkedKey := false - server.cfg.PublicKeyCallback = func(conn cryptossh.ConnMetadata, pubkey cryptossh.PublicKey) (*cryptossh.Permissions, error) { - c.Check(pubkey, gc.DeepEquals, key.PublicKey()) - checkedKey = true - return nil, nil - } - go server.run(c) - out, err := cmd.Output() - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(out), gc.Equals, "abc value\n") - c.Assert(checkedKey, jc.IsTrue) -} - -func (s *SSHGoCryptoCommandSuite) TestCopy(c *gc.C) { - client, err := ssh.NewGoCryptoClient() - c.Assert(err, jc.ErrorIsNil) - err = client.Copy([]string{"0.1.2.3:b", c.MkDir()}, nil) - c.Assert(err, gc.ErrorMatches, `scp command is not implemented \(OpenSSH scp not available in PATH\)`) -} - -func (s *SSHGoCryptoCommandSuite) TestProxyCommand(c *gc.C) { - realNetcat, err := exec.LookPath("nc") - if err != nil { - c.Skip("skipping test, couldn't find netcat: %v") - return - } - netcat := filepath.Join(c.MkDir(), "nc") - err = ioutil.WriteFile(netcat, []byte("#!/bin/sh\necho $0 \"$@\" > $0.args && exec "+realNetcat+" \"$@\""), 0755) - c.Assert(err, jc.ErrorIsNil) - - private, _, err := ssh.GenerateKey("test-server") - c.Assert(err, jc.ErrorIsNil) - key, err := cryptossh.ParsePrivateKey([]byte(private)) - client, err := ssh.NewGoCryptoClient(key) - c.Assert(err, jc.ErrorIsNil) - server := newServer(c) - var opts ssh.Options - port := server.listener.Addr().(*net.TCPAddr).Port - opts.SetProxyCommand(netcat, "-q0", "%h", "%p") - opts.SetPort(port) - cmd := client.Command("127.0.0.1", testCommand, &opts) - server.cfg.PublicKeyCallback = func(_ cryptossh.ConnMetadata, pubkey cryptossh.PublicKey) (*cryptossh.Permissions, error) { - return nil, nil - } - go server.run(c) - out, err := cmd.Output() - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(out), gc.Equals, "abc value\n") - // Ensure the proxy command was executed with the appropriate arguments. - data, err := ioutil.ReadFile(netcat + ".args") - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(data), gc.Equals, fmt.Sprintf("%s -q0 127.0.0.1 %v\n", netcat, port)) -} === removed file 'src/github.com/juju/juju/utils/ssh/ssh_openssh.go' --- src/github.com/juju/juju/utils/ssh/ssh_openssh.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/utils/ssh/ssh_openssh.go 1970-01-01 00:00:00 +0000 @@ -1,196 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package ssh - -import ( - "bytes" - "fmt" - "io" - "os" - "os/exec" - "strings" - - "github.com/juju/utils" -) - -var opensshCommonOptions = []string{"-o", "StrictHostKeyChecking no"} - -// default identities will not be attempted if -// -i is specified and they are not explcitly -// included. -var defaultIdentities = []string{ - "~/.ssh/identity", - "~/.ssh/id_rsa", - "~/.ssh/id_dsa", - "~/.ssh/id_ecdsa", -} - -type opensshCommandKind int - -const ( - sshKind opensshCommandKind = iota - scpKind -) - -// sshpassWrap wraps the command/args with sshpass if it is found in $PATH -// and the SSHPASS environment variable is set. Otherwise, the original -// command/args are returned. -func sshpassWrap(cmd string, args []string) (string, []string) { - if os.Getenv("SSHPASS") != "" { - if path, err := exec.LookPath("sshpass"); err == nil { - return path, append([]string{"-e", cmd}, args...) - } - } - return cmd, args -} - -// OpenSSHClient is an implementation of Client that -// uses the ssh and scp executables found in $PATH. -type OpenSSHClient struct{} - -// NewOpenSSHClient creates a new OpenSSHClient. -// If the ssh and scp programs cannot be found -// in $PATH, then an error is returned. -func NewOpenSSHClient() (*OpenSSHClient, error) { - var c OpenSSHClient - if _, err := exec.LookPath("ssh"); err != nil { - return nil, err - } - if _, err := exec.LookPath("scp"); err != nil { - return nil, err - } - return &c, nil -} - -func opensshOptions(options *Options, commandKind opensshCommandKind) []string { - args := append([]string{}, opensshCommonOptions...) - if options == nil { - options = &Options{} - } - if len(options.proxyCommand) > 0 { - args = append(args, "-o", "ProxyCommand "+utils.CommandString(options.proxyCommand...)) - } - if !options.passwordAuthAllowed { - args = append(args, "-o", "PasswordAuthentication no") - } - - // We must set ServerAliveInterval or the server may - // think we've become unresponsive on long running - // command executions such as "apt-get upgrade". - args = append(args, "-o", "ServerAliveInterval 30") - - if options.allocatePTY { - args = append(args, "-t", "-t") // twice to force - } - if options.knownHostsFile != "" { - args = append(args, "-o", "UserKnownHostsFile "+utils.CommandString(options.knownHostsFile)) - } - identities := append([]string{}, options.identities...) - if pk := PrivateKeyFiles(); len(pk) > 0 { - // Add client keys as implicit identities - identities = append(identities, pk...) - } - // If any identities are specified, the - // default ones must be explicitly specified. - if len(identities) > 0 { - for _, identity := range defaultIdentities { - path, err := utils.NormalizePath(identity) - if err != nil { - logger.Warningf("failed to normalize path %q: %v", identity, err) - continue - } - if _, err := os.Stat(path); err == nil { - identities = append(identities, path) - } - } - } - for _, identity := range identities { - args = append(args, "-i", identity) - } - if options.port != 0 { - port := fmt.Sprint(options.port) - if commandKind == scpKind { - // scp uses -P instead of -p (-p means preserve). - args = append(args, "-P", port) - } else { - args = append(args, "-p", port) - } - } - return args -} - -// Command implements Client.Command. -func (c *OpenSSHClient) Command(host string, command []string, options *Options) *Cmd { - args := opensshOptions(options, sshKind) - args = append(args, host) - if len(command) > 0 { - args = append(args, command...) - } - bin, args := sshpassWrap("ssh", args) - logger.Tracef("running: %s %s", bin, utils.CommandString(args...)) - return &Cmd{impl: &opensshCmd{exec.Command(bin, args...)}} -} - -// Copy implements Client.Copy. -func (c *OpenSSHClient) Copy(args []string, userOptions *Options) error { - var options Options - if userOptions != nil { - options = *userOptions - options.allocatePTY = false // doesn't make sense for scp - } - allArgs := opensshOptions(&options, scpKind) - allArgs = append(allArgs, args...) - bin, allArgs := sshpassWrap("scp", allArgs) - cmd := exec.Command(bin, allArgs...) - var stderr bytes.Buffer - cmd.Stderr = &stderr - logger.Tracef("running: %s %s", bin, utils.CommandString(args...)) - if err := cmd.Run(); err != nil { - stderr := strings.TrimSpace(stderr.String()) - if len(stderr) > 0 { - err = fmt.Errorf("%v (%v)", err, stderr) - } - return err - } - return nil -} - -type opensshCmd struct { - *exec.Cmd -} - -func (c *opensshCmd) SetStdio(stdin io.Reader, stdout, stderr io.Writer) { - c.Stdin, c.Stdout, c.Stderr = stdin, stdout, stderr -} - -func (c *opensshCmd) StdinPipe() (io.WriteCloser, io.Reader, error) { - wc, err := c.Cmd.StdinPipe() - if err != nil { - return nil, nil, err - } - return wc, c.Stdin, nil -} - -func (c *opensshCmd) StdoutPipe() (io.ReadCloser, io.Writer, error) { - rc, err := c.Cmd.StdoutPipe() - if err != nil { - return nil, nil, err - } - return rc, c.Stdout, nil -} - -func (c *opensshCmd) StderrPipe() (io.ReadCloser, io.Writer, error) { - rc, err := c.Cmd.StderrPipe() - if err != nil { - return nil, nil, err - } - return rc, c.Stderr, nil -} - -func (c *opensshCmd) Kill() error { - if c.Process == nil { - return fmt.Errorf("process has not been started") - } - return c.Process.Kill() -} === removed file 'src/github.com/juju/juju/utils/ssh/ssh_test.go' --- src/github.com/juju/juju/utils/ssh/ssh_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/ssh/ssh_test.go 1970-01-01 00:00:00 +0000 @@ -1,235 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// +build !windows - -package ssh_test - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/juju/cmd" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/utils/ssh" -) - -const ( - echoCommand = "/bin/echo" - echoScript = "#!/bin/sh\n" + echoCommand + " $0 \"$@\" | /usr/bin/tee $0.args" -) - -type SSHCommandSuite struct { - testing.IsolationSuite - originalPath string - testbin string - fakessh string - fakescp string - client ssh.Client -} - -var _ = gc.Suite(&SSHCommandSuite{}) - -func (s *SSHCommandSuite) SetUpTest(c *gc.C) { - s.IsolationSuite.SetUpTest(c) - s.testbin = c.MkDir() - s.fakessh = filepath.Join(s.testbin, "ssh") - s.fakescp = filepath.Join(s.testbin, "scp") - err := ioutil.WriteFile(s.fakessh, []byte(echoScript), 0755) - c.Assert(err, jc.ErrorIsNil) - err = ioutil.WriteFile(s.fakescp, []byte(echoScript), 0755) - c.Assert(err, jc.ErrorIsNil) - s.PatchEnvPathPrepend(s.testbin) - s.client, err = ssh.NewOpenSSHClient() - c.Assert(err, jc.ErrorIsNil) - s.PatchValue(ssh.DefaultIdentities, nil) -} - -func (s *SSHCommandSuite) command(args ...string) *ssh.Cmd { - return s.commandOptions(args, nil) -} - -func (s *SSHCommandSuite) commandOptions(args []string, opts *ssh.Options) *ssh.Cmd { - return s.client.Command("localhost", args, opts) -} - -func (s *SSHCommandSuite) assertCommandArgs(c *gc.C, cmd *ssh.Cmd, expected string) { - out, err := cmd.Output() - c.Assert(err, jc.ErrorIsNil) - c.Assert(strings.TrimSpace(string(out)), gc.Equals, expected) -} - -func (s *SSHCommandSuite) TestDefaultClient(c *gc.C) { - ssh.InitDefaultClient() - c.Assert(ssh.DefaultClient, gc.FitsTypeOf, &ssh.OpenSSHClient{}) - s.PatchEnvironment("PATH", "") - ssh.InitDefaultClient() - c.Assert(ssh.DefaultClient, gc.FitsTypeOf, &ssh.GoCryptoClient{}) -} - -func (s *SSHCommandSuite) TestCommandSSHPass(c *gc.C) { - // First create a fake sshpass, but don't set $SSHPASS - fakesshpass := filepath.Join(s.testbin, "sshpass") - err := ioutil.WriteFile(fakesshpass, []byte(echoScript), 0755) - s.assertCommandArgs(c, s.command(echoCommand, "123"), - fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 localhost %s 123", - s.fakessh, echoCommand), - ) - // Now set $SSHPASS. - s.PatchEnvironment("SSHPASS", "anyoldthing") - s.assertCommandArgs(c, s.command(echoCommand, "123"), - fmt.Sprintf("%s -e ssh -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 localhost %s 123", - fakesshpass, echoCommand), - ) - // Finally, remove sshpass from $PATH. - err = os.Remove(fakesshpass) - c.Assert(err, jc.ErrorIsNil) - s.assertCommandArgs(c, s.command(echoCommand, "123"), - fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 localhost %s 123", - s.fakessh, echoCommand), - ) -} - -func (s *SSHCommandSuite) TestCommand(c *gc.C) { - s.assertCommandArgs(c, s.command(echoCommand, "123"), - fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 localhost %s 123", - s.fakessh, echoCommand), - ) -} - -func (s *SSHCommandSuite) TestCommandEnablePTY(c *gc.C) { - var opts ssh.Options - opts.EnablePTY() - s.assertCommandArgs(c, s.commandOptions([]string{echoCommand, "123"}, &opts), - fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 -t -t localhost %s 123", - s.fakessh, echoCommand), - ) -} - -func (s *SSHCommandSuite) TestCommandSetKnownHostsFile(c *gc.C) { - var opts ssh.Options - opts.SetKnownHostsFile("/tmp/known hosts") - s.assertCommandArgs(c, s.commandOptions([]string{echoCommand, "123"}, &opts), - fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 -o UserKnownHostsFile \"/tmp/known hosts\" localhost %s 123", - s.fakessh, echoCommand), - ) -} - -func (s *SSHCommandSuite) TestCommandAllowPasswordAuthentication(c *gc.C) { - var opts ssh.Options - opts.AllowPasswordAuthentication() - s.assertCommandArgs(c, s.commandOptions([]string{echoCommand, "123"}, &opts), - fmt.Sprintf("%s -o StrictHostKeyChecking no -o ServerAliveInterval 30 localhost %s 123", - s.fakessh, echoCommand), - ) -} - -func (s *SSHCommandSuite) TestCommandIdentities(c *gc.C) { - var opts ssh.Options - opts.SetIdentities("x", "y") - s.assertCommandArgs(c, s.commandOptions([]string{echoCommand, "123"}, &opts), - fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 -i x -i y localhost %s 123", - s.fakessh, echoCommand), - ) -} - -func (s *SSHCommandSuite) TestCommandPort(c *gc.C) { - var opts ssh.Options - opts.SetPort(2022) - s.assertCommandArgs(c, s.commandOptions([]string{echoCommand, "123"}, &opts), - fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 -p 2022 localhost %s 123", - s.fakessh, echoCommand), - ) -} - -func (s *SSHCommandSuite) TestCopy(c *gc.C) { - var opts ssh.Options - opts.EnablePTY() - opts.AllowPasswordAuthentication() - opts.SetIdentities("x", "y") - opts.SetPort(2022) - err := s.client.Copy([]string{"/tmp/blah", "foo@bar.com:baz"}, &opts) - c.Assert(err, jc.ErrorIsNil) - out, err := ioutil.ReadFile(s.fakescp + ".args") - c.Assert(err, jc.ErrorIsNil) - // EnablePTY has no effect for Copy - c.Assert(string(out), gc.Equals, s.fakescp+" -o StrictHostKeyChecking no -o ServerAliveInterval 30 -i x -i y -P 2022 /tmp/blah foo@bar.com:baz\n") - - // Try passing extra args - err = s.client.Copy([]string{"/tmp/blah", "foo@bar.com:baz", "-r", "-v"}, &opts) - c.Assert(err, jc.ErrorIsNil) - out, err = ioutil.ReadFile(s.fakescp + ".args") - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(out), gc.Equals, s.fakescp+" -o StrictHostKeyChecking no -o ServerAliveInterval 30 -i x -i y -P 2022 /tmp/blah foo@bar.com:baz -r -v\n") - - // Try interspersing extra args - err = s.client.Copy([]string{"-r", "/tmp/blah", "-v", "foo@bar.com:baz"}, &opts) - c.Assert(err, jc.ErrorIsNil) - out, err = ioutil.ReadFile(s.fakescp + ".args") - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(out), gc.Equals, s.fakescp+" -o StrictHostKeyChecking no -o ServerAliveInterval 30 -i x -i y -P 2022 -r /tmp/blah -v foo@bar.com:baz\n") -} - -func (s *SSHCommandSuite) TestCommandClientKeys(c *gc.C) { - defer overrideGenerateKey(c).Restore() - clientKeysDir := c.MkDir() - defer ssh.ClearClientKeys() - err := ssh.LoadClientKeys(clientKeysDir) - c.Assert(err, jc.ErrorIsNil) - ck := filepath.Join(clientKeysDir, "juju_id_rsa") - var opts ssh.Options - opts.SetIdentities("x", "y") - s.assertCommandArgs(c, s.commandOptions([]string{echoCommand, "123"}, &opts), - fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 -i x -i y -i %s localhost %s 123", - s.fakessh, ck, echoCommand), - ) -} - -func (s *SSHCommandSuite) TestCommandError(c *gc.C) { - var opts ssh.Options - err := ioutil.WriteFile(s.fakessh, []byte("#!/bin/sh\nexit 42"), 0755) - c.Assert(err, jc.ErrorIsNil) - command := s.client.Command("ignored", []string{echoCommand, "foo"}, &opts) - err = command.Run() - c.Assert(cmd.IsRcPassthroughError(err), jc.IsTrue) -} - -func (s *SSHCommandSuite) TestCommandDefaultIdentities(c *gc.C) { - var opts ssh.Options - tempdir := c.MkDir() - def1 := filepath.Join(tempdir, "def1") - def2 := filepath.Join(tempdir, "def2") - s.PatchValue(ssh.DefaultIdentities, []string{def1, def2}) - // If no identities are specified, then the defaults aren't added. - s.assertCommandArgs(c, s.commandOptions([]string{echoCommand, "123"}, &opts), - fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 localhost %s 123", - s.fakessh, echoCommand), - ) - // If identities are specified, then the defaults are must added. - // Only the defaults that exist on disk will be added. - err := ioutil.WriteFile(def2, nil, 0644) - c.Assert(err, jc.ErrorIsNil) - opts.SetIdentities("x", "y") - s.assertCommandArgs(c, s.commandOptions([]string{echoCommand, "123"}, &opts), - fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 -i x -i y -i %s localhost %s 123", - s.fakessh, def2, echoCommand), - ) -} - -func (s *SSHCommandSuite) TestCopyReader(c *gc.C) { - client := &fakeClient{} - r := bytes.NewBufferString("") - - err := ssh.TestCopyReader(client, "foo@bar.com:baz", "/tmp/blah", r, nil) - c.Assert(err, jc.ErrorIsNil) - - client.checkCalls(c, "foo@bar.com:baz", []string{"cat - > /tmp/blah"}, nil, nil, "Command") - client.impl.checkCalls(c, r, nil, nil, "SetStdio", "Start", "Wait") -} === removed directory 'src/github.com/juju/juju/utils/ssh/testing' === removed file 'src/github.com/juju/juju/utils/ssh/testing/keys.go' --- src/github.com/juju/juju/utils/ssh/testing/keys.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/ssh/testing/keys.go 1970-01-01 00:00:00 +0000 @@ -1,84 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package testing - -type SSHKey struct { - Key string - Fingerprint string -} - -var ( - ValidKeyOne = SSHKey{ - `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEX/dPu4PmtvgK3La9zioCEDrJ` + - `yUr6xEIK7Pr+rLgydcqWTU/kt7w7gKjOw4vvzgHfjKl09CWyvgb+y5dCiTk` + - `9MxI+erGNhs3pwaoS+EavAbawB7iEqYyTep3YaJK+4RJ4OX7ZlXMAIMrTL+` + - `UVrK89t56hCkFYaAgo3VY+z6rb/b3bDBYtE1Y2tS7C3au73aDgeb9psIrSV` + - `86ucKBTl5X62FnYiyGd++xCnLB6uLximM5OKXfLzJQNS/QyZyk12g3D8y69` + - `Xw1GzCSKX1u1+MQboyf0HJcG2ryUCLHdcDVppApyHx2OLq53hlkQ/yxdflD` + - `qCqAE4j+doagSsIfC1T2T`, - "86:ed:1b:cd:26:a0:a3:4c:27:35:49:60:95:b7:0f:68", - } - - ValidKeyTwo = SSHKey{ - `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDNC6zK8UMazlVgp8en8N7m7H/Y6` + - `DoMWbmPFjXYRXu6iQJJ18hCtsfMe63E5/PBaOjDT8am0Sx3Eqn4ZzpWMj+z` + - `knTcSd8xnMHYYxH2HStRWC1akTe4tTno2u2mqzjKd8f62URPtIocYCNRBls` + - `9yjnq9SogI5EXgcx6taQcrIFcIK0SlthxxcMVSlLpnbReujW65JHtiMqoYA` + - `OIALyO+Rkmtvb/ObmViDnwCKCN1up/xWt6J10MrAUtpI5b4prqG7FOqVMM/` + - `zdgrVg6rUghnzdYeQ8QMyEv4mVSLzX0XIPcxorkl9q06s5mZmAzysEbKZCO` + - `aXcLeNlXx/nkmuWslYCJ`, - "2f:fb:b0:65:68:c8:4e:a6:1b:a6:4b:8d:14:0b:40:79", - } - - ValidKeyThree = SSHKey{ - `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpGj1JMjGjAFt5wjARbIORyjQ/c` + - `ZAiDyDHe/w8qmLKUG2KTs6586QqqM6DKPZiYesrzXqvZsWYV4B6OjLM1sxq` + - `WjeDIl56PSnJ0+KP8pUV9KTkkKtRXxAoNg/II4l69e05qGffj9AcQ/7JPxx` + - `eL14Ulvh/a69r3uVkw1UGVk9Bwm4eCOSCqKalYLA1k5da6crEAXn9hiXLGs` + - `S9dOn3Lsqj5tK31aaUncue+a3iKb7R5LRFflDizzNS+h8tPuANQflOjOhR0` + - `Vas0BsurgISseZZ0NIMISyWhZpr0eOBWA/YruN9r++kYPOnDy0eMaOVGLO7` + - `SQwJ/6QHvf73yksJTncz`, - "1d:cf:ab:66:8a:f6:77:fb:4c:b2:59:6f:12:cf:cb:2f", - } - - ValidKeyFour = SSHKey{ - `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCSEDMH5RyjGtEMIqM2RiPYYQgUK` + - `9wdHCo1/AXkuQ7m1iVjHhACp8Oawf2Grn7hO4e0JUn5FaEZOnDj/9HB2VPw` + - `EDGBwSN1caVC3yrTVkqQcsxBY9nTV+spQQMsePOdUZALcoEilvAcLRETbyn` + - `rybaS2bfzpqbA9MEEaKQKLKGdgqiMdNXAj5I/ik/BPp0ziOMlMl1A1zilnS` + - `UXubs1U49WWV0A70vAASvZVTXr3zrPAmstH+9Ik6FdpeE99um08FXxKYWqZ` + - `6rZF1M6L1/SqC7ediYdVgRCoti85kKhi7fZBzwrGcCnxer+D0GFz++KDSNS` + - `iAnVZxyXhmBrwnR6Q/v7`, - "37:99:ab:96:c4:e8:f8:0b:0d:04:3e:1e:ee:66:e8:9e", - } - - ValidKeyMulti = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDW+8zWO6qqXrHlcMK7obliuYp7D` + - `vZBsK6rHlnbeV5Hh38Qn0GUX4Ahm6XeQ/NSx53wqkBQDGOJFY3s4w1a/hbd` + - `PyLM2/yFXCYsj5FRf01JmUjAzWhuJMH9ViqzD//l4v8cR/pHC2B8PD6abKd` + - `mIH+yLI9Cl3C4ICMKteG54egsUyboBOVKCDIKmWRLAak6sE5DPpqKF53NvD` + - `cuDufWtaCfVAOrq6NW8wSQ7PAvfDh8gsG5uvZjY3gcWl9yI3EJVGFHcdxcv` + - `4LtQI8mKdeg3JoufnEmeBJTZMoo83Gru5Z7tjv8J4JTUeQpd9uCCED1JAMe` + - `cJSKgQ2gZMTbTshobpHr` + "\n" + - `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSgfrzyGpE5eLiXusvLcxEmoE6e` + - `SMUDvTW1dd2BZgfvUVwq+toQdZ6C0C1JmbC3X563n8fmKVUAQGo5JavzABG` + - `Kpy90L3cwoGCFtb+A28YsT+bfuP+LdnCbFXm9c3DPJQx6Dch8prnDtzRjRV` + - `CorbPvm35NY73liUXVF6g58Owlx5rWtb8OnoTh5KQps9JTSfyNckdV9bFxP` + - `7bZvMyRYW5X33KaA+CQGpTNAKDHruSuKdAdaS6rBIZRvzzzSCF28BWwFL7Z` + - `ghQo0ADlUMnqIeQ58nwRImZHpmvadsZi47aMKFeykk4JQUQlwjbM0xGi0uj` + - `+hlaqGYbNo0Evcjn23cj` - - PartValidKeyMulti = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZRvG2miYVkbWOr2I+9xHWXqALb` + - `eBcyxAlYtbjxBRwrq8oFOw9vtIIZSO0r1FM6+JHzKhLSiPCMR/PK78ZqPgZ` + - `fia8Y7cEZKaUWLtZUAl0RF9w8EtsA/2gpuLZErjcoIx6fzfEYFCJcLgcQSc` + - `RlKG8VZT6tWIjvoLj9ki6unkG5YGmapkT60afhf3/vd7pCJO/uyszkQ9qU8` + - `odUDTTlwftpJtUb8xGmzpEZJTgk1lbZKlZm5pVXwjNEodH7Je88RBzR7PBB` + - `Jct+vf8wVJ/UEFXCnamvHLanJTcJIi/I5qRlKns65Bwb8M0HszPYmvTfFRD` + - `ZLi3sPUmw6PJCJ0SgATd` + "\n" + - `ssh-rsa bad key` - - MultiInvalid = `ssh-rsa bad key` + "\n" + - `ssh-rsa also bad` - - EmptyKeyMulti = "" -) === removed directory 'src/github.com/juju/juju/utils/syslog' === removed file 'src/github.com/juju/juju/utils/syslog/config.go' --- src/github.com/juju/juju/utils/syslog/config.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/syslog/config.go 1970-01-01 00:00:00 +0000 @@ -1,357 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package syslog - -import ( - "bytes" - "fmt" - "io/ioutil" - "path/filepath" - "strings" - "text/template" -) - -// tagOffset represents the substring start value for the tag to return -// the logfileName value from the syslogtag. Substrings in syslog are -// indexed from 1, hence the + 1. -const tagOffset = len("juju-") + 1 - -// The rsyslog conf for state server nodes. -// Messages are gathered from other nodes and accumulated in an all-machines.log file. -// -// The apparmor profile is quite strict about where rsyslog can write files. -// Instead of poking with the profile, the local provider now logs to -// {{logDir}}-{{user}}-{{env name}}/all-machines.log, and a symlink is made -// in the local provider log dir to point to that file. By -// default rsyslog creates files with 0644, but in the ubuntu package, the -// setting is changed to 0640. Using a new action directive (new as in -// not-legacy), we can specify the file create mode so it doesn't use -// the default. -// -// I would dearly love to write the filtering action as follows to avoid setting -// and resetting the global $FileCreateMode, but alas, precise doesn't support it -// -// if $syslogtag startswith "juju{{namespace}}-" then -// action(type="omfile" -// File="{{logDir}}{{namespace}}/all-machines.log" -// Template="JujuLogFormat{{namespace}}" -// FileCreateMode="0644") -// & stop -// -// Instead we need to mess with the global FileCreateMode. We set it back -// to the ubuntu default after defining our rule. -const stateServerRsyslogTemplate = ` -$ModLoad imuxsock -$ModLoad imfile - -# Messages received from remote rsyslog machines have messages prefixed with a space, -# so add one in for local messages too if needed. -$template JujuLogFormat{{namespace}},"%syslogtag:{{tagStart}}:$%%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n" - -$template LongTagForwardFormat,"<%PRI%>%TIMESTAMP:::date-rfc3339% %HOSTNAME% %syslogtag%%msg:::sp-if-no-1st-sp%%msg%" - -{{range $i, $stateServerIP := stateServerHosts}} -# start: Forwarding rule for {{$stateServerIP}} -$ActionQueueType LinkedList -$ActionQueueFileName {{logfileName}}{{namespace}}_{{$i}} -$ActionResumeRetryCount -1 -$ActionQueueSaveOnShutdown on -$ActionQueueMaxDiskSpace 512M -$DefaultNetstreamDriver gtls -$DefaultNetstreamDriverCAFile {{tlsCACertPath}} -$ActionSendStreamDriverAuthMode anon -$ActionSendStreamDriverMode 1 # run driver in TLS-only mode - -:syslogtag, startswith, "juju{{namespace}}-" @@{{$stateServerIP}}:{{portNumber}};LongTagForwardFormat -# end: Forwarding rule for {{$stateServerIP}} -{{end}} -:syslogtag, startswith, "juju{{namespace}}-" stop - -$FileCreateMode 0600 - -# Maximum size for the log on this outchannel is 512MB -# The command to execute when an outchannel as reached its size limit cannot accept any arguments -# that is why we have created the helper script for executing logrotate. -$outchannel logRotation,{{logDir}}/all-machines.log,536870912,{{logrotateHelperPath}} - -$RuleSet remote -$FileCreateMode 0600 -:syslogtag, startswith, "juju{{namespace}}-" :omfile:$logRotation;JujuLogFormat{{namespace}} -:syslogtag, startswith, "juju{{namespace}}-" stop -$FileCreateMode 0600 - -$InputFilePersistStateInterval 50 -$InputFilePollInterval 5 -$InputFileName {{logfilePath}} -$InputFileTag juju{{namespace}}-{{logfileName}}: -$InputFileStateFile {{logfileName}}{{namespace}} -$InputRunFileMonitor - -$ModLoad imtcp -$DefaultNetstreamDriver gtls -$DefaultNetstreamDriverCAFile {{tlsCACertPath}} -$DefaultNetstreamDriverCertFile {{tlsCertPath}} -$DefaultNetstreamDriverKeyFile {{tlsKeyPath}} -$InputTCPServerStreamDriverAuthMode anon -$InputTCPServerStreamDriverMode 1 # run driver in TLS-only mode -$InputTCPMaxSessions 10000 # default is 200, all agents connect to all rsyslog daemons - -$InputTCPServerBindRuleset remote -$InputTCPServerRun {{portNumber}} - -# switch back to default ruleset for further rules -$RuleSet RSYSLOG_DefaultRuleset -` - -// The rsyslog conf for non-state server nodes. -// Messages are forwarded to the state server node. -// -// Each forwarding rule must be repeated in full for every state server and -// each rule must use a unique ActionQueueFileName -// See: http://www.rsyslog.com/doc/rsyslog_reliable_forwarding.html -const nodeRsyslogTemplate = ` -$ModLoad imuxsock -$ModLoad imfile - -$InputFilePersistStateInterval 50 -$InputFilePollInterval 5 -$InputFileName {{logfilePath}} -$InputFileTag juju{{namespace}}-{{logfileName}}: -$InputFileStateFile {{logfileName}}{{namespace}} -$InputRunFileMonitor -{{range $i, $stateServerIP := stateServerHosts}} -# start: Forwarding rule for {{$stateServerIP}} -$ActionQueueType LinkedList -$ActionQueueFileName {{logfileName}}{{namespace}}_{{$i}} -$ActionResumeRetryCount -1 -$ActionQueueSaveOnShutdown on -$ActionQueueMaxDiskSpace 512M -$DefaultNetstreamDriver gtls -$DefaultNetstreamDriverCAFile {{tlsCACertPath}} -$ActionSendStreamDriverAuthMode anon -$ActionSendStreamDriverMode 1 # run driver in TLS-only mode - -$template LongTagForwardFormat,"<%PRI%>%TIMESTAMP:::date-rfc3339% %HOSTNAME% %syslogtag%%msg:::sp-if-no-1st-sp%%msg%" -:syslogtag, startswith, "juju{{namespace}}-" @@{{$stateServerIP}}:{{portNumber}};LongTagForwardFormat -# end: Forwarding rule for {{$stateServerIP}} -{{end}} -& ~ -` - -// The logrotate conf for state serve nodes. -// default size is 512MB, ensuring that the log + one rotation -// will never take up more than 1GB of space. -// -// The size of the logrotate configuration is low for -// a very specific reason, see config comment. -const logrotateConf = ` -{{.LogDir}}/all-machines.log { - # rsyslogd informs logrotate when to rotate. - # The size specified here must be less than or equal to the log size - # when rsyslogd informs logrotate, or logrotate will take no action. - # The size value is otherwise unimportant. - size 1K - # maximum of one old file - rotate 1 - # counting old files starts at 1 rather than 0 - start 1 - # ensure new file is created with the correct permissions - create 600 - # reload rsyslog after rotation so it will use the new file - postrotate - service rsyslog reload - endscript -} -` - -var logrotateConfTemplate = template.Must(template.New("logrotate.conf").Parse(logrotateConf)) - -// The logrotate helper script for state server nodes. -// We specify a state file to ensure we have the proper permissions. -const logrotateHelper = ` -/usr/sbin/logrotate -s {{.LogDir}}/logrotate.state {{.LogrotateConfPath}} -` - -var logrotateHelperTemplate = template.Must(template.New("logrotate.run").Parse(logrotateHelper)) - -// nodeRsyslogTemplateTLSHeader is prepended to -// nodeRsyslogTemplate if TLS is to be used. -const nodeRsyslogTemplateTLSHeader = ` -` - -const ( - defaultConfigDir = "/etc/rsyslog.d" - defaultCACertFileName = "ca-cert.pem" - defaultServerCertFileName = "rsyslog-cert.pem" - defaultServerKeyFileName = "rsyslog-key.pem" - defaultLogrotateConfFileName = "logrotate.conf" - defaultLogrotateHelperFileName = "logrotate.run" -) - -// SyslogConfigRenderer instances are used to generate a rsyslog conf file. -type SyslogConfigRenderer interface { - Render() ([]byte, error) -} - -// SyslogConfig provides a means to configure and generate rsyslog conf files for -// the state server nodes and unit nodes. -// rsyslog is configured to tail the specified log file. -type SyslogConfig struct { - // the template representing the config file contents. - configTemplate string - // the directory where the config file is written. - ConfigDir string - // the config file name. - ConfigFileName string - // the name of the log file to tail. - LogFileName string - // the name of the logrotate configuration file. - LogrotateConfFileName string - // the name of the script that executes the logrotate command. - LogrotateHelperFileName string - // the addresses of the state server to which messages should be forwarded. - StateServerAddresses []string - // CA certificate file name. - CACertFileName string - // Server certificate file name. - ServerCertFileName string - // Server private key file name. - ServerKeyFileName string - // the port number for the listener - Port int - // the directory for the logfiles - LogDir string - // namespace is used when there are multiple environments on one machine - Namespace string - // directory where juju stores its config files - JujuConfigDir string -} - -// NewForwardConfig creates a SyslogConfig instance used on unit nodes to forward log entries -// to the state server nodes. -func NewForwardConfig(cfg *SyslogConfig) { - cfg.configTemplate = nodeRsyslogTemplate -} - -// NewAccumulateConfig creates a SyslogConfig instance used to accumulate log entries from the -// various unit nodes. -func NewAccumulateConfig(cfg *SyslogConfig) { - cfg.configTemplate = stateServerRsyslogTemplate -} - -func either(a, b string) string { - if a != "" { - return a - } - return b -} - -func (slConfig *SyslogConfig) renderNamespace() string { - if slConfig.Namespace == "" { - return "" - } - return "-" + slConfig.Namespace -} - -func (slConfig *SyslogConfig) ConfigFilePath() string { - dir := either(slConfig.ConfigDir, defaultConfigDir) - return filepath.Join(dir, slConfig.ConfigFileName) -} - -func (slConfig *SyslogConfig) CACertPath() string { - filename := either(slConfig.CACertFileName, defaultCACertFileName) - return filepath.Join(slConfig.JujuConfigDir, filename) -} - -func (slConfig *SyslogConfig) ServerCertPath() string { - filename := either(slConfig.ServerCertFileName, defaultServerCertFileName) - return filepath.Join(slConfig.JujuConfigDir, filename) -} - -func (slConfig *SyslogConfig) ServerKeyPath() string { - filename := either(slConfig.ServerCertFileName, defaultServerKeyFileName) - return filepath.Join(slConfig.JujuConfigDir, filename) -} - -// LogrotateConfPath returns the the the entire logrotate.conf path including filename. -func (slConfig *SyslogConfig) LogrotateConfPath() string { - filename := either(slConfig.LogrotateConfFileName, defaultLogrotateConfFileName) - return filepath.Join(slConfig.JujuConfigDir, filename) -} - -// LogrotateHelperPath returns the entire logrotate.helper path including filename. -func (slConfig *SyslogConfig) LogrotateHelperPath() string { - filename := either(slConfig.LogrotateHelperFileName, defaultLogrotateHelperFileName) - return filepath.Join(slConfig.JujuConfigDir, filename) -} - -// LogrotateConfFile returns a ready to write to disk byte array of the logrotate.conf file. -func (slConfig *SyslogConfig) LogrotateConfFile() ([]byte, error) { - return slConfig.logrotateRender(logrotateConfTemplate) -} - -// LogrotateHelperFile returns a ready to write to disk byte array of the logrotate.helper file. -func (slConfig *SyslogConfig) LogrotateHelperFile() ([]byte, error) { - return slConfig.logrotateRender(logrotateHelperTemplate) -} - -func (slConfig *SyslogConfig) logrotateRender(t *template.Template) ([]byte, error) { - var buffer bytes.Buffer - if err := t.Execute(&buffer, slConfig); err != nil { - return nil, err - } - return buffer.Bytes(), nil -} - -// Render generates the rsyslog config. -func (slConfig *SyslogConfig) Render() ([]byte, error) { - var stateServerHosts = func() []string { - var hosts []string - for _, addr := range slConfig.StateServerAddresses { - parts := strings.Split(addr, ":") - hosts = append(hosts, parts[0]) - } - return hosts - } - - var logFilePath = func() string { - return fmt.Sprintf("%s/%s.log", slConfig.LogDir, slConfig.LogFileName) - } - - t := template.New("syslogConfig") - t.Funcs(template.FuncMap{ - "logfileName": func() string { return slConfig.LogFileName }, - "stateServerHosts": stateServerHosts, - "logfilePath": logFilePath, - "portNumber": func() int { return slConfig.Port }, - "logDir": func() string { return slConfig.LogDir }, - "namespace": slConfig.renderNamespace, - "tagStart": func() int { return tagOffset + len(slConfig.renderNamespace()) }, - "tlsCACertPath": slConfig.CACertPath, - "tlsCertPath": slConfig.ServerCertPath, - "tlsKeyPath": slConfig.ServerKeyPath, - "logrotateHelperPath": slConfig.LogrotateHelperPath, - }) - - // Process the rsyslog config template and echo to the conf file. - p, err := t.Parse(slConfig.configTemplate) - if err != nil { - return nil, err - } - var confBuf bytes.Buffer - if err := p.Execute(&confBuf, nil); err != nil { - return nil, err - } - return confBuf.Bytes(), nil -} - -// Write generates and writes the rsyslog config. -func (slConfig *SyslogConfig) Write() error { - data, err := slConfig.Render() - if err != nil { - return err - } - err = ioutil.WriteFile(slConfig.ConfigFilePath(), data, 0644) - return err -} === removed file 'src/github.com/juju/juju/utils/syslog/config_test.go' --- src/github.com/juju/juju/utils/syslog/config_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/utils/syslog/config_test.go 1970-01-01 00:00:00 +0000 @@ -1,178 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package syslog_test - -import ( - "io/ioutil" - "path/filepath" - - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/agent" - "github.com/juju/juju/utils/syslog" - syslogtesting "github.com/juju/juju/utils/syslog/testing" -) - -type syslogConfigSuite struct { - testing.IsolationSuite - configDir string -} - -var _ = gc.Suite(&syslogConfigSuite{}) - -func (s *syslogConfigSuite) SetUpTest(c *gc.C) { - s.IsolationSuite.SetUpTest(c) - s.configDir = c.MkDir() -} - -func (s *syslogConfigSuite) assertRsyslogConfigPath(c *gc.C, slConfig *syslog.SyslogConfig) { - slConfig.ConfigDir = s.configDir - slConfig.ConfigFileName = "rsyslog.conf" - c.Assert(slConfig.ConfigFilePath(), gc.Equals, filepath.Join(s.configDir, "rsyslog.conf")) -} - -func (s *syslogConfigSuite) assertRsyslogConfigContents(c *gc.C, slConfig *syslog.SyslogConfig, expectedConf string) { - data, err := slConfig.Render() - c.Assert(err, jc.ErrorIsNil) - if len(data) == 0 { - c.Fatal("got empty data from render") - } - d := string(data) - if d != expectedConf { - diff(c, d, expectedConf) - c.Fail() - } -} - -func args() syslogtesting.TemplateArgs { - return syslogtesting.TemplateArgs{ - MachineTag: "some-machine", - LogDir: agent.DefaultLogDir, - DataDir: agent.DefaultDataDir, - Port: 8888, - Server: "server", - } -} - -func cfg() *syslog.SyslogConfig { - return &syslog.SyslogConfig{ - LogFileName: "some-machine", - LogDir: agent.DefaultLogDir, - JujuConfigDir: agent.DefaultDataDir, - Port: 8888, - StateServerAddresses: []string{"server"}, - } -} - -func (s *syslogConfigSuite) TestAccumulateConfigRender(c *gc.C) { - cfg := cfg() - syslog.NewAccumulateConfig(cfg) - s.assertRsyslogConfigContents( - c, - cfg, - syslogtesting.ExpectedAccumulateSyslogConf(c, args()), - ) -} - -func (s *syslogConfigSuite) TestAccumulateConfigWrite(c *gc.C) { - syslogConfigRenderer := cfg() - syslog.NewAccumulateConfig(syslogConfigRenderer) - syslogConfigRenderer.ConfigDir = s.configDir - syslogConfigRenderer.ConfigFileName = "rsyslog.conf" - s.assertRsyslogConfigPath(c, syslogConfigRenderer) - err := syslogConfigRenderer.Write() - c.Assert(err, jc.ErrorIsNil) - syslogConfData, err := ioutil.ReadFile(syslogConfigRenderer.ConfigFilePath()) - c.Assert(err, jc.ErrorIsNil) - c.Assert( - string(syslogConfData), - gc.Equals, - syslogtesting.ExpectedAccumulateSyslogConf(c, args()), - ) -} - -func (s *syslogConfigSuite) TestAccumulateConfigRenderWithNamespace(c *gc.C) { - cfg := cfg() - cfg.Namespace = "namespace" - cfg.JujuConfigDir = cfg.JujuConfigDir + "-" + cfg.Namespace - cfg.LogDir = cfg.LogDir + "-" + cfg.Namespace - - args := args() - args.Namespace = "namespace" - syslog.NewAccumulateConfig(cfg) - s.assertRsyslogConfigContents( - c, - cfg, - syslogtesting.ExpectedAccumulateSyslogConf(c, args), - ) -} - -func (s *syslogConfigSuite) TestForwardConfigRender(c *gc.C) { - cfg := cfg() - syslog.NewForwardConfig(cfg) - s.assertRsyslogConfigContents( - c, - cfg, - syslogtesting.ExpectedForwardSyslogConf(c, args()), - ) -} - -func (s *syslogConfigSuite) TestForwardConfigRenderWithNamespace(c *gc.C) { - cfg := cfg() - cfg.Namespace = "namespace" - args := args() - args.Namespace = "namespace" - syslog.NewForwardConfig(cfg) - s.assertRsyslogConfigContents( - c, - cfg, - syslogtesting.ExpectedForwardSyslogConf(c, args), - ) -} - -func (s *syslogConfigSuite) TestForwardConfigWrite(c *gc.C) { - syslogConfigRenderer := cfg() - syslogConfigRenderer.ConfigDir = s.configDir - syslogConfigRenderer.ConfigFileName = "rsyslog.conf" - syslog.NewForwardConfig(syslogConfigRenderer) - s.assertRsyslogConfigPath(c, syslogConfigRenderer) - err := syslogConfigRenderer.Write() - c.Assert(err, jc.ErrorIsNil) - syslogConfData, err := ioutil.ReadFile(syslogConfigRenderer.ConfigFilePath()) - c.Assert(err, jc.ErrorIsNil) - c.Assert( - string(syslogConfData), - gc.Equals, - syslogtesting.ExpectedForwardSyslogConf(c, args()), - ) -} - -func diff(c *gc.C, got, exp string) { - expR := []rune(exp) - gotR := []rune(got) - for x := 0; x < len(expR); x++ { - if x >= len(gotR) { - c.Log("String obtained is truncated version of expected.") - c.Errorf("Expected: %s, got: %s", exp, got) - return - } - if expR[x] != gotR[x] { - c.Logf("Diff at offset %d", x) - gotDiff := string(gotR[x:min(x+50, len(gotR)-x)]) - expDiff := string(expR[x:min(x+50, len(expR)-x)]) - c.Logf("Diff at offset - obtained: %#v\nexpected: %#v", gotDiff, expDiff) - c.Assert(got, gc.Equals, exp) - return - } - } -} - -func min(x, y int) int { - if x < y { - return x - } - return y -} === removed file 'src/github.com/juju/juju/utils/syslog/package_test.go' --- src/github.com/juju/juju/utils/syslog/package_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/syslog/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,19 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package syslog_test - -import ( - "runtime" - "testing" - - gc "gopkg.in/check.v1" -) - -func TestPackage(t *testing.T) { - //TODO(bogdanteleaga): Fix this on windows - if runtime.GOOS == "windows" { - t.Skip("bug 1403084: Skipping rsyslog tests on windows") - } - gc.TestingT(t) -} === removed file 'src/github.com/juju/juju/utils/syslog/service.go' --- src/github.com/juju/juju/utils/syslog/service.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/syslog/service.go 1970-01-01 00:00:00 +0000 @@ -1,37 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package syslog - -import ( - "os" - - "github.com/juju/errors" - - "github.com/juju/juju/service" -) - -const svcName = "rsyslog" - -// These are patched out during tests. -var ( - getEuid = func() int { - return os.Geteuid() - } - restart = func(name string) error { - return service.Restart(name) - } -) - -// Restart restarts the "rsyslog" service using the local host's -// currently running init system (e.g. upstart, systemd). If the caller -// is not a superuser then Restart returns an error. -func Restart() error { - if getEuid() == 0 { - if err := restart(svcName); err != nil { - return errors.Annotatef(err, "failed to restart service %q", svcName) - } - return nil - } - return errors.Errorf("must be root") -} === removed file 'src/github.com/juju/juju/utils/syslog/service_test.go' --- src/github.com/juju/juju/utils/syslog/service_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/syslog/service_test.go 1970-01-01 00:00:00 +0000 @@ -1,47 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package syslog_test - -import ( - "github.com/juju/errors" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/utils/syslog" -) - -type serviceSuite struct { - syslog.BaseSuite -} - -var _ = gc.Suite(&serviceSuite{}) - -func (s *serviceSuite) TestRestartRoot(c *gc.C) { - s.Stub.Euid = 0 - - err := syslog.Restart() - c.Assert(err, jc.ErrorIsNil) - - s.Stub.CheckCallNames(c, "Geteuid", "Restart") -} - -func (s *serviceSuite) TestRestartNotRoot(c *gc.C) { - s.Stub.Euid = 1000 - - err := syslog.Restart() - - c.Check(err, gc.ErrorMatches, `.*must be root.*`) - s.Stub.CheckCallNames(c, "Geteuid") -} - -func (s *serviceSuite) TestRestartError(c *gc.C) { - s.Stub.Euid = 0 - failure := errors.New("") - s.Stub.SetErrors(nil, failure) // Geteuid, Restart - - err := syslog.Restart() - - c.Check(errors.Cause(err), gc.Equals, failure) - s.Stub.CheckCallNames(c, "Geteuid", "Restart") -} === removed directory 'src/github.com/juju/juju/utils/syslog/testing' === removed file 'src/github.com/juju/juju/utils/syslog/testing/syslogconf.go' --- src/github.com/juju/juju/utils/syslog/testing/syslogconf.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/syslog/testing/syslogconf.go 1970-01-01 00:00:00 +0000 @@ -1,139 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package testing - -import ( - "bytes" - "text/template" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" -) - -var expectedAccumulateSyslogConfTemplate = ` -$ModLoad imuxsock -$ModLoad imfile - -# Messages received from remote rsyslog machines have messages prefixed with a space, -# so add one in for local messages too if needed. -$template JujuLogFormat{{.Namespace}},"%syslogtag:{{.Offset}}:$%%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n" - -$template LongTagForwardFormat,"<%PRI%>%TIMESTAMP:::date-rfc3339% %HOSTNAME% %syslogtag%%msg:::sp-if-no-1st-sp%%msg%" - - -# start: Forwarding rule for {{.Server}} -$ActionQueueType LinkedList -$ActionQueueFileName {{.MachineTag}}{{.Namespace}}_0 -$ActionResumeRetryCount -1 -$ActionQueueSaveOnShutdown on -$ActionQueueMaxDiskSpace 512M -$DefaultNetstreamDriver gtls -$DefaultNetstreamDriverCAFile {{.DataDir}}{{.Namespace}}/ca-cert.pem -$ActionSendStreamDriverAuthMode anon -$ActionSendStreamDriverMode 1 # run driver in TLS-only mode - -:syslogtag, startswith, "juju{{.Namespace}}-" @@{{.Server}}:{{.Port}};LongTagForwardFormat -# end: Forwarding rule for {{.Server}} - -:syslogtag, startswith, "juju{{.Namespace}}-" stop - -$FileCreateMode 0600 - -# Maximum size for the log on this outchannel is 512MB -# The command to execute when an outchannel as reached its size limit cannot accept any arguments -# that is why we have created the helper script for executing logrotate. -$outchannel logRotation,{{.LogDir}}{{.Namespace}}/all-machines.log,536870912,{{.DataDir}}{{.Namespace}}/logrotate.run - -$RuleSet remote -$FileCreateMode 0600 -:syslogtag, startswith, "juju{{.Namespace}}-" :omfile:$logRotation;JujuLogFormat{{.Namespace}} -:syslogtag, startswith, "juju{{.Namespace}}-" stop -$FileCreateMode 0600 - -$InputFilePersistStateInterval 50 -$InputFilePollInterval 5 -$InputFileName {{.LogDir}}{{.Namespace}}/{{.MachineTag}}.log -$InputFileTag juju{{.Namespace}}-{{.MachineTag}}: -$InputFileStateFile {{.MachineTag}}{{.Namespace}} -$InputRunFileMonitor - -$ModLoad imtcp -$DefaultNetstreamDriver gtls -$DefaultNetstreamDriverCAFile {{.DataDir}}{{.Namespace}}/ca-cert.pem -$DefaultNetstreamDriverCertFile {{.DataDir}}{{.Namespace}}/rsyslog-cert.pem -$DefaultNetstreamDriverKeyFile {{.DataDir}}{{.Namespace}}/rsyslog-key.pem -$InputTCPServerStreamDriverAuthMode anon -$InputTCPServerStreamDriverMode 1 # run driver in TLS-only mode -$InputTCPMaxSessions 10000 # default is 200, all agents connect to all rsyslog daemons - -$InputTCPServerBindRuleset remote -$InputTCPServerRun {{.Port}} - -# switch back to default ruleset for further rules -$RuleSet RSYSLOG_DefaultRuleset -` - -type TemplateArgs struct { - MachineTag string - LogDir string - DataDir string - Namespace string - Server string - Port int - Offset int -} - -// ExpectedAccumulateSyslogConf returns the expected content for a rsyslog file on a state server. -func ExpectedAccumulateSyslogConf(c *gc.C, args TemplateArgs) string { - if args.Namespace != "" { - args.Namespace = "-" + args.Namespace - } - args.Offset = len("juju-") + len(args.Namespace) + 1 - t := template.Must(template.New("").Parse(expectedAccumulateSyslogConfTemplate)) - var conf bytes.Buffer - err := t.Execute(&conf, args) - c.Assert(err, jc.ErrorIsNil) - return conf.String() -} - -var expectedForwardSyslogConfTemplate = ` -$ModLoad imuxsock -$ModLoad imfile - -$InputFilePersistStateInterval 50 -$InputFilePollInterval 5 -$InputFileName {{.LogDir}}/{{.MachineTag}}.log -$InputFileTag juju{{.Namespace}}-{{.MachineTag}}: -$InputFileStateFile {{.MachineTag}}{{.Namespace}} -$InputRunFileMonitor - -# start: Forwarding rule for {{.Server}} -$ActionQueueType LinkedList -$ActionQueueFileName {{.MachineTag}}{{.Namespace}}_0 -$ActionResumeRetryCount -1 -$ActionQueueSaveOnShutdown on -$ActionQueueMaxDiskSpace 512M -$DefaultNetstreamDriver gtls -$DefaultNetstreamDriverCAFile {{.DataDir}}/ca-cert.pem -$ActionSendStreamDriverAuthMode anon -$ActionSendStreamDriverMode 1 # run driver in TLS-only mode - -$template LongTagForwardFormat,"<%PRI%>%TIMESTAMP:::date-rfc3339% %HOSTNAME% %syslogtag%%msg:::sp-if-no-1st-sp%%msg%" -:syslogtag, startswith, "juju{{.Namespace}}-" @@{{.Server}}:{{.Port}};LongTagForwardFormat -# end: Forwarding rule for {{.Server}} - -& ~ -` - -// ExpectedForwardSyslogConf returns the expected content for a rsyslog file on a host machine. -func ExpectedForwardSyslogConf(c *gc.C, args TemplateArgs) string { - if args.Namespace != "" { - args.Namespace = "-" + args.Namespace - } - t := template.Must(template.New("").Parse(expectedForwardSyslogConfTemplate)) - var conf bytes.Buffer - err := t.Execute(&conf, args) - c.Assert(err, jc.ErrorIsNil) - return conf.String() -} === removed file 'src/github.com/juju/juju/utils/syslog/testing_test.go' --- src/github.com/juju/juju/utils/syslog/testing_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/utils/syslog/testing_test.go 1970-01-01 00:00:00 +0000 @@ -1,47 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package syslog - -import ( - "github.com/juju/testing" - gc "gopkg.in/check.v1" -) - -// Stub stubs out the external functions used in the syslog package. -type Stub struct { - testing.Stub - - Euid int -} - -// Geteuid is a stub for os.Geteuid. -func (s *Stub) Geteuid() int { - s.AddCall("Geteuid") - - // Pop off the err, even though we don't return it. - s.NextErr() - return s.Euid -} - -// Restart is a stub for service.Restart. -func (s *Stub) Restart(name string) error { - s.AddCall("Restart", name) - - return s.NextErr() -} - -// BaseSuite is the base suite for use in tests in the syslog package. -type BaseSuite struct { - testing.IsolationSuite - - Stub *Stub -} - -func (s *BaseSuite) SetUpTest(c *gc.C) { - s.IsolationSuite.SetUpTest(c) - - s.Stub = &Stub{} - s.PatchValue(&getEuid, s.Stub.Geteuid) - s.PatchValue(&restart, s.Stub.Restart) -} === removed file 'src/github.com/juju/juju/utils/utils.go' --- src/github.com/juju/juju/utils/utils.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/utils/utils.go 1970-01-01 00:00:00 +0000 @@ -1,11 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package utils - -// Must panics if the provided error is not nil. -func Must(err error) { - if err != nil { - panic(err) - } -} === modified file 'src/github.com/juju/juju/version/current_test.go' --- src/github.com/juju/juju/version/current_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/version/current_test.go 2016-03-22 15:18:22 +0000 @@ -7,9 +7,9 @@ "os/exec" "runtime" + "github.com/juju/utils/os" + "github.com/juju/utils/series" gc "gopkg.in/check.v1" - - "github.com/juju/juju/version" ) type CurrentSuite struct{} @@ -17,11 +17,12 @@ var _ = gc.Suite(&CurrentSuite{}) func (*CurrentSuite) TestCurrentSeries(c *gc.C) { - s := version.Current.Series + s := series.HostSeries() if s == "unknown" { s = "n/a" } out, err := exec.Command("lsb_release", "-c").CombinedOutput() + if err != nil { // If the command fails (for instance if we're running on some other // platform) then CurrentSeries should be unknown. @@ -31,17 +32,16 @@ case "windows": c.Check(s, gc.Matches, `win2012hvr2|win2012hv|win2012|win2012r2|win8|win81|win7`) default: - c.Assert(s, gc.Equals, "n/a") + current_os, err := series.GetOSFromSeries(s) + c.Assert(err, gc.IsNil) + if s != "n/a" { + // There is no lsb_release command on CentOS. + if current_os == os.CentOS { + c.Check(s, gc.Matches, `centos7`) + } + } } } else { - os, err := version.GetOSFromSeries(s) - c.Assert(err, gc.IsNil) - // There is no lsb_release command on CentOS. - switch os { - case version.CentOS: - c.Check(s, gc.Matches, `centos7`) - default: - c.Assert(string(out), gc.Equals, "Codename:\t"+s+"\n") - } + c.Assert(string(out), gc.Equals, "Codename:\t"+s+"\n") } } === removed file 'src/github.com/juju/juju/version/export_test.go' --- src/github.com/juju/juju/version/export_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/version/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,24 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package version - -var ( - DistroInfo = &distroInfo - ReadSeries = readSeries - KernelToMajor = kernelToMajor - MacOSXSeriesFromKernelVersion = macOSXSeriesFromKernelVersion - MacOSXSeriesFromMajorVersion = macOSXSeriesFromMajorVersion - OSReleaseFile = &osReleaseFile -) - -func SetSeriesVersions(value map[string]string) func() { - origVersions := seriesVersions - origUpdated := updatedseriesVersions - seriesVersions = value - updatedseriesVersions = len(value) != 0 - return func() { - seriesVersions = origVersions - updatedseriesVersions = origUpdated - } -} === removed file 'src/github.com/juju/juju/version/export_windows_test.go' --- src/github.com/juju/juju/version/export_windows_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/version/export_windows_test.go 1970-01-01 00:00:00 +0000 @@ -1,10 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Copyright 2015 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. - -package version - -var ( - CurrentVersionKey = ¤tVersionKey - OSVersion = osVersion -) === removed file 'src/github.com/juju/juju/version/osversion.go' --- src/github.com/juju/juju/version/osversion.go 2016-03-14 14:42:57 +0000 +++ src/github.com/juju/juju/version/osversion.go 1970-01-01 00:00:00 +0000 @@ -1,147 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package version - -import ( - "fmt" - "io/ioutil" - "strconv" - "strings" - - "github.com/juju/errors" - "github.com/juju/loggo" -) - -var logger = loggo.GetLogger("juju.version") - -// mustOSVersion will panic if the osVersion is "unknown" due -// to an error. -// -// If you want to avoid the panic, call osVersion and handle -// the error. -func mustOSVersion() string { - version, err := osVersion() - if err != nil { - panic("osVersion reported an error: " + err.Error()) - } - return version -} - -// MustOSFromSeries will panic if the series represents an "unknown" -// operating system -func MustOSFromSeries(series string) OSType { - operatingSystem, err := GetOSFromSeries(series) - if err != nil { - panic("osVersion reported an error: " + err.Error()) - } - return operatingSystem -} - -func readOSRelease() (map[string]string, error) { - values := map[string]string{} - - contents, err := ioutil.ReadFile(osReleaseFile) - if err != nil { - return values, err - } - releaseDetails := strings.Split(string(contents), "\n") - for _, val := range releaseDetails { - c := strings.SplitN(val, "=", 2) - if len(c) != 2 { - continue - } - values[c[0]] = strings.Trim(c[1], "\t '\"") - } - id, ok := values["ID"] - if !ok { - return values, errors.New("OS release file is missing ID") - } - if _, ok := values["VERSION_ID"]; !ok { - values["VERSION_ID"], ok = defaultVersionIDs[id] - if !ok { - return values, errors.New("OS release file is missing VERSION_ID") - } - } - return values, nil -} - -func getValue(from map[string]string, val string) (string, error) { - for serie, ver := range from { - if ver == val { - return serie, nil - } - } - return "unknown", errors.New("Could not determine series") -} - -func readSeries() (string, error) { - values, err := readOSRelease() - if err != nil { - return "unknown", err - } - updateSeriesVersionsOnce() - switch values["ID"] { - case strings.ToLower(Ubuntu.String()): - return getValue(ubuntuSeries, values["VERSION_ID"]) - case strings.ToLower(Arch.String()): - return getValue(archSeries, values["VERSION_ID"]) - case strings.ToLower(CentOS.String()): - codename := fmt.Sprintf("%s%s", values["ID"], values["VERSION_ID"]) - return getValue(centosSeries, codename) - default: - return "unknown", nil - } -} - -// kernelToMajor takes a dotted version and returns just the Major portion -func kernelToMajor(getKernelVersion func() (string, error)) (int, error) { - fullVersion, err := getKernelVersion() - if err != nil { - return 0, err - } - parts := strings.SplitN(fullVersion, ".", 2) - majorVersion, err := strconv.ParseInt(parts[0], 10, 32) - if err != nil { - return 0, err - } - return int(majorVersion), nil -} - -func macOSXSeriesFromKernelVersion(getKernelVersion func() (string, error)) (string, error) { - majorVersion, err := kernelToMajor(getKernelVersion) - if err != nil { - logger.Infof("unable to determine OS version: %v", err) - return "unknown", err - } - return macOSXSeriesFromMajorVersion(majorVersion) -} - -// TODO(jam): 2014-05-06 https://launchpad.net/bugs/1316593 -// we should have a system file that we can read so this can be updated without -// recompiling Juju. For now, this is a lot easier, and also solves the fact -// that we want to populate version.Current.Series during init() time, before -// we've potentially read that information from anywhere else -// macOSXSeries maps from the Darwin Kernel Major Version to the Mac OSX -// series. -var macOSXSeries = map[int]string{ - 15: "elcapitan", - 14: "yosemite", - 13: "mavericks", - 12: "mountainlion", - 11: "lion", - 10: "snowleopard", - 9: "leopard", - 8: "tiger", - 7: "panther", - 6: "jaguar", - 5: "puma", -} - -func macOSXSeriesFromMajorVersion(majorVersion int) (string, error) { - series, ok := macOSXSeries[majorVersion] - if !ok { - return "unknown", errors.Errorf("unknown series %q", series) - } - return series, nil -} === removed file 'src/github.com/juju/juju/version/osversion_darwin.go' --- src/github.com/juju/juju/version/osversion_darwin.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/version/osversion_darwin.go 1970-01-01 00:00:00 +0000 @@ -1,17 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package version - -import ( - "syscall" -) - -func sysctlVersion() (string, error) { - return syscall.Sysctl("kern.osrelease") -} - -// osVersion returns the best approximation to what version this machine is. -func osVersion() (string, error) { - return macOSXSeriesFromKernelVersion(sysctlVersion) -} === removed file 'src/github.com/juju/juju/version/osversion_darwin_test.go' --- src/github.com/juju/juju/version/osversion_darwin_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/version/osversion_darwin_test.go 1970-01-01 00:00:00 +0000 @@ -1,31 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package version - -import ( - jc "github.com/juju/testing/checkers" - "github.com/juju/utils/set" - gc "gopkg.in/check.v1" -) - -type macOSXVersionSuite struct{} - -var _ = gc.Suite(&macOSXVersionSuite{}) - -func (*macOSXVersionSuite) TestGetSysctlVersionPlatform(c *gc.C) { - // Test that sysctlVersion returns something that looks like a dotted revision number - releaseVersion, err := sysctlVersion() - c.Assert(err, jc.ErrorIsNil) - c.Check(releaseVersion, gc.Matches, `\d+\..*`) -} - -func (s *macOSXVersionSuite) TestOSVersion(c *gc.C) { - knownSeries := make(set.Strings) - for _, series := range macOSXSeries { - knownSeries.Add(series) - } - version, err := osVersion() - c.Assert(err, jc.ErrorIsNil) - c.Check(version, jc.Satisfies, knownSeries.Contains) -} === removed file 'src/github.com/juju/juju/version/osversion_linux.go' --- src/github.com/juju/juju/version/osversion_linux.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/version/osversion_linux.go 1970-01-01 00:00:00 +0000 @@ -1,8 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package version - -func osVersion() (string, error) { - return readSeries() -} === removed file 'src/github.com/juju/juju/version/osversion_linux_test.go' --- src/github.com/juju/juju/version/osversion_linux_test.go 2015-04-14 14:11:54 +0000 +++ src/github.com/juju/juju/version/osversion_linux_test.go 1970-01-01 00:00:00 +0000 @@ -1,60 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package version_test - -import ( - "io/ioutil" - "path/filepath" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/testing" - "github.com/juju/juju/version" -) - -type linuxVersionSuite struct { - testing.BaseSuite -} - -var futureReleaseFileContents = `NAME="Ubuntu" -VERSION="99.04 LTS, Star Trek" -ID=ubuntu -ID_LIKE=debian -PRETTY_NAME="Ubuntu spock (99.04 LTS)" -VERSION_ID="99.04" -` - -var distroInfoContents = `version,codename,series,created,release,eol,eol-server -12.04 LTS,Precise Pangolin,precise,2011-10-13,2012-04-26,2017-04-26 -99.04,Star Trek,spock,2364-04-25,2364-10-17,2365-07-17 -` - -var _ = gc.Suite(&linuxVersionSuite{}) - -func (s *linuxVersionSuite) SetUpTest(c *gc.C) { - cleanup := version.SetSeriesVersions(make(map[string]string)) - s.AddCleanup(func(*gc.C) { cleanup() }) -} - -func (s *linuxVersionSuite) TestOSVersion(c *gc.C) { - // Set up fake /etc/os-release file from the future. - d := c.MkDir() - release := filepath.Join(d, "future-release") - s.PatchValue(version.OSReleaseFile, release) - err := ioutil.WriteFile(release, []byte(futureReleaseFileContents), 0666) - c.Assert(err, jc.ErrorIsNil) - - // Set up fake /usr/share/distro-info/ubuntu.csv, also from the future. - distroInfo := filepath.Join(d, "ubuntu.csv") - err = ioutil.WriteFile(distroInfo, []byte(distroInfoContents), 0644) - c.Assert(err, jc.ErrorIsNil) - s.PatchValue(version.DistroInfo, distroInfo) - - // Ensure the future series can be read even though Juju doesn't - // know about it. - version, err := version.ReadSeries() - c.Assert(err, jc.ErrorIsNil) - c.Assert(version, gc.Equals, "spock") -} === removed file 'src/github.com/juju/juju/version/osversion_test.go' --- src/github.com/juju/juju/version/osversion_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/version/osversion_test.go 1970-01-01 00:00:00 +0000 @@ -1,210 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package version_test - -import ( - "fmt" - "io/ioutil" - "path/filepath" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/testing" - "github.com/juju/juju/version" -) - -type readSeriesSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&readSeriesSuite{}) - -type kernelVersionSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&kernelVersionSuite{}) - -var readSeriesTests = []struct { - contents string - series string - err string -}{{ - `NAME="Ubuntu" -VERSION="12.04.5 LTS, Precise Pangolin" -ID=ubuntu -ID_LIKE=debian -PRETTY_NAME="Ubuntu precise (12.04.5 LTS)" -VERSION_ID="12.04" -`, - "precise", - "", -}, { - `NAME="Ubuntu" -ID=ubuntu -VERSION_ID= "12.04" `, - "precise", - "", -}, { - `NAME='Ubuntu' -ID='ubuntu' -VERSION_ID='12.04' -`, - "precise", - "", -}, { - `NAME="CentOS Linux" -ID="centos" -VERSION_ID="7" -`, - "centos7", - "", -}, { - `NAME="Arch Linux" -ID=arch -PRETTY_NAME="Arch Linux" -ANSI_COLOR="0;36" -HOME_URL="https://www.archlinux.org/" -SUPPORT_URL="https://bbs.archlinux.org/" -BUG_REPORT_URL="https://bugs.archlinux.org/" -`, - "arch", - "", -}, { - `NAME="Ubuntu" -VERSION="14.04.1 LTS, Trusty Tahr" -ID=ubuntu -ID_LIKE=debian -PRETTY_NAME="Ubuntu 14.04.1 LTS" -VERSION_ID="14.04" -HOME_URL="http://www.ubuntu.com/" -SUPPORT_URL="http://help.ubuntu.com/" -BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/" -`, - "trusty", - "", -}, { - "", - "unknown", - "OS release file is missing ID", -}, { - `NAME="CentOS Linux" -ID="centos" -`, - "unknown", - "OS release file is missing VERSION_ID", -}, { - `NAME="SuSE Linux" -ID="SuSE" -VERSION_ID="12" -`, - "unknown", - "", -}, -} - -func (s *readSeriesSuite) TestReadSeries(c *gc.C) { - d := c.MkDir() - f := filepath.Join(d, "foo") - s.PatchValue(version.OSReleaseFile, f) - for i, t := range readSeriesTests { - c.Logf("test %d", i) - err := ioutil.WriteFile(f, []byte(t.contents), 0666) - c.Assert(err, jc.ErrorIsNil) - series, err := version.ReadSeries() - if t.err == "" { - c.Assert(err, jc.ErrorIsNil) - } else { - c.Assert(err, gc.ErrorMatches, t.err) - } - - c.Assert(series, gc.Equals, t.series) - } -} - -func sysctlMacOS10dot9dot2() (string, error) { - // My 10.9.2 Mac gives "13.1.0" as the kernel version - return "13.1.0", nil -} - -func sysctlError() (string, error) { - return "", fmt.Errorf("no such syscall") -} - -func (*kernelVersionSuite) TestKernelToMajorVersion(c *gc.C) { - majorVersion, err := version.KernelToMajor(sysctlMacOS10dot9dot2) - c.Assert(err, jc.ErrorIsNil) - c.Check(majorVersion, gc.Equals, 13) -} - -func (*kernelVersionSuite) TestKernelToMajorVersionError(c *gc.C) { - majorVersion, err := version.KernelToMajor(sysctlError) - c.Assert(err, gc.ErrorMatches, "no such syscall") - c.Check(majorVersion, gc.Equals, 0) -} - -func (*kernelVersionSuite) TestKernelToMajorVersionNoDots(c *gc.C) { - majorVersion, err := version.KernelToMajor(func() (string, error) { - return "1234", nil - }) - c.Assert(err, jc.ErrorIsNil) - c.Check(majorVersion, gc.Equals, 1234) -} - -func (*kernelVersionSuite) TestKernelToMajorVersionNotInt(c *gc.C) { - majorVersion, err := version.KernelToMajor(func() (string, error) { - return "a.b.c", nil - }) - c.Assert(err, gc.ErrorMatches, `strconv.ParseInt: parsing "a": invalid syntax`) - c.Check(majorVersion, gc.Equals, 0) -} - -func (*kernelVersionSuite) TestKernelToMajorVersionEmpty(c *gc.C) { - majorVersion, err := version.KernelToMajor(func() (string, error) { - return "", nil - }) - c.Assert(err, gc.ErrorMatches, `strconv.ParseInt: parsing "": invalid syntax`) - c.Check(majorVersion, gc.Equals, 0) -} - -func (*kernelVersionSuite) TestMacOSXSeriesFromKernelVersion(c *gc.C) { - series, err := version.MacOSXSeriesFromKernelVersion(sysctlMacOS10dot9dot2) - c.Assert(err, jc.ErrorIsNil) - c.Check(series, gc.Equals, "mavericks") -} - -func (*kernelVersionSuite) TestMacOSXSeriesFromKernelVersionError(c *gc.C) { - // We suppress the actual error in favor of returning "unknown", but we - // do log the error - series, err := version.MacOSXSeriesFromKernelVersion(sysctlError) - c.Assert(err, gc.ErrorMatches, "no such syscall") - c.Assert(series, gc.Equals, "unknown") - c.Check(c.GetTestLog(), gc.Matches, ".* juju.version unable to determine OS version: no such syscall\n") -} - -func (*kernelVersionSuite) TestMacOSXSeries(c *gc.C) { - tests := []struct { - version int - series string - err string - }{ - {version: 13, series: "mavericks"}, - {version: 12, series: "mountainlion"}, - {version: 14, series: "yosemite"}, - {version: 15, series: "elcapitan"}, - {version: 16, series: "unknown", err: `unknown series ""`}, - {version: 4, series: "unknown", err: `unknown series ""`}, - {version: 0, series: "unknown", err: `unknown series ""`}, - } - for _, test := range tests { - series, err := version.MacOSXSeriesFromMajorVersion(test.version) - if test.err != "" { - c.Assert(err, gc.ErrorMatches, test.err) - } else { - c.Assert(err, jc.ErrorIsNil) - } - c.Check(series, gc.Equals, test.series) - } -} === removed file 'src/github.com/juju/juju/version/osversion_windows.go' --- src/github.com/juju/juju/version/osversion_windows.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/version/osversion_windows.go 1970-01-01 00:00:00 +0000 @@ -1,48 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Copyright 2015 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. - -package version - -import ( - "strings" - - "github.com/gabriel-samfira/sys/windows/registry" - "github.com/juju/errors" -) - -// currentVersionKey is defined as a variable instead of a constant -// to allow overwriting during testing -var currentVersionKey = "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion" - -func getVersionFromRegistry() (string, error) { - k, err := registry.OpenKey(registry.LOCAL_MACHINE, currentVersionKey, registry.QUERY_VALUE) - if err != nil { - return "", err - } - defer k.Close() - s, _, err := k.GetStringValue("ProductName") - if err != nil { - return "", err - } - - return s, nil -} - -func osVersion() (string, error) { - ver, err := getVersionFromRegistry() - if err != nil { - return "unknown", err - } - if val, ok := windowsVersions[ver]; ok { - return val, nil - } - for _, value := range windowsVersionMatchOrder { - if strings.HasPrefix(ver, value) { - if val, ok := windowsVersions[value]; ok { - return val, nil - } - } - } - return "unknown", errors.Errorf("unknown series %q", ver) -} === removed file 'src/github.com/juju/juju/version/osversion_windows_test.go' --- src/github.com/juju/juju/version/osversion_windows_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/version/osversion_windows_test.go 1970-01-01 00:00:00 +0000 @@ -1,112 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Copyright 2015 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. - -package version_test - -import ( - "fmt" - - "github.com/juju/juju/version" - "github.com/juju/utils" - - "github.com/gabriel-samfira/sys/windows/registry" - "github.com/juju/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" -) - -type windowsVersionSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&windowsVersionSuite{}) - -var versionTests = []struct { - version string - want string -}{ - { - "Hyper-V Server 2012 R2", - "win2012hvr2", - }, - { - "Hyper-V Server 2012", - "win2012hv", - }, - { - "Windows Server 2012 R2", - "win2012r2", - }, - { - "Windows Server 2012", - "win2012", - }, - { - "Windows Server 2012 R2 Datacenter", - "win2012r2", - }, - { - "Windows Server 2012 Standard", - "win2012", - }, - { - "Windows Storage Server 2012 R2", - "win2012r2", - }, - { - "Windows Storage Server 2012 Standard", - "win2012", - }, - { - "Windows Storage Server 2012 R2 Standard", - "win2012r2", - }, - { - "Windows 7 Home", - "win7", - }, - { - "Windows 8 Pro", - "win8", - }, - { - "Windows 8.1 Pro", - "win81", - }, -} - -func (s *windowsVersionSuite) SetUpTest(c *gc.C) { - s.BaseSuite.SetUpTest(c) - salt, err := utils.RandomPassword() - c.Assert(err, jc.ErrorIsNil) - regKey := fmt.Sprintf(`SOFTWARE\JUJU\%s`, salt) - s.PatchValue(version.CurrentVersionKey, regKey) - - k, _, err := registry.CreateKey(registry.LOCAL_MACHINE, *version.CurrentVersionKey, registry.ALL_ACCESS) - c.Assert(err, jc.ErrorIsNil) - - err = k.Close() - c.Assert(err, jc.ErrorIsNil) - - s.AddCleanup(func(*gc.C) { - registry.DeleteKey(registry.LOCAL_MACHINE, *version.CurrentVersionKey) - }) -} - -func (s *windowsVersionSuite) TestOSVersion(c *gc.C) { - for _, value := range versionTests { - k, err := registry.OpenKey(registry.LOCAL_MACHINE, *version.CurrentVersionKey, registry.ALL_ACCESS) - c.Assert(err, jc.ErrorIsNil) - - err = k.SetStringValue("ProductName", value.version) - c.Assert(err, jc.ErrorIsNil) - - err = k.Close() - c.Assert(err, jc.ErrorIsNil) - - ver, err := version.OSVersion() - c.Assert(err, jc.ErrorIsNil) - c.Assert(ver, gc.Equals, value.want) - } -} === removed file 'src/github.com/juju/juju/version/supportedseries.go' --- src/github.com/juju/juju/version/supportedseries.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/version/supportedseries.go 1970-01-01 00:00:00 +0000 @@ -1,290 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package version - -import ( - "bufio" - "fmt" - "io" - "os" - "strings" - "sync" - - "github.com/juju/errors" -) - -type OSType int - -const ( - Unknown OSType = iota - Ubuntu - Windows - OSX - CentOS - Arch -) - -func (t OSType) String() string { - switch t { - case Ubuntu: - return "Ubuntu" - case Windows: - return "Windows" - case OSX: - return "OSX" - case CentOS: - return "CentOS" - case Arch: - return "Arch" - } - return "Unknown" -} - -type unknownOSForSeriesError string - -func (e unknownOSForSeriesError) Error() string { - return `unknown OS for series: "` + string(e) + `"` -} - -// IsUnknownOSForSeriesError returns true if err is of type unknownOSForSeriesError. -func IsUnknownOSForSeriesError(err error) bool { - _, ok := errors.Cause(err).(unknownOSForSeriesError) - return ok -} - -type unknownSeriesVersionError string - -func (e unknownSeriesVersionError) Error() string { - return `unknown version for series: "` + string(e) + `"` -} - -// IsUnknownSeriesVersionError returns true if err is of type unknownSeriesVersionError. -func IsUnknownSeriesVersionError(err error) bool { - _, ok := errors.Cause(err).(unknownSeriesVersionError) - return ok -} - -var defaultVersionIDs = map[string]string{ - "arch": "rolling", -} - -// seriesVersions provides a mapping between series names and versions. -// The values here are current as of the time of writing. On Ubuntu systems, we update -// these values from /usr/share/distro-info/ubuntu.csv to ensure we have the latest values. -// On non-Ubuntu systems, these values provide a nice fallback option. -// Exported so tests can change the values to ensure the distro-info lookup works. -var seriesVersions = map[string]string{ - "precise": "12.04", - "quantal": "12.10", - "raring": "13.04", - "saucy": "13.10", - "trusty": "14.04", - "utopic": "14.10", - "vivid": "15.04", - "wily": "15.10", - "xenial": "16.04", - "win2012hvr2": "win2012hvr2", - "win2012hv": "win2012hv", - "win2012r2": "win2012r2", - "win2012": "win2012", - "win7": "win7", - "win8": "win8", - "win81": "win81", - "win10": "win10", - "centos7": "centos7", - "arch": "rolling", -} - -var centosSeries = map[string]string{ - "centos7": "centos7", -} - -var archSeries = map[string]string{ - "arch": "rolling", -} - -var ubuntuSeries = map[string]string{ - "precise": "12.04", - "quantal": "12.10", - "raring": "13.04", - "saucy": "13.10", - "trusty": "14.04", - "utopic": "14.10", - "vivid": "15.04", - "wily": "15.10", - "xenial": "16.04", -} - -// Windows versions come in various flavors: -// Standard, Datacenter, etc. We use string prefix match them to one -// of the following. Specify the longest name in a particular series first -// For example, if we have "Win 2012" and "Win 2012 R2", we specify "Win 2012 R2" first. -// We need to make sure we manually update this list with each new windows release. -var windowsVersionMatchOrder = []string{ - "Hyper-V Server 2012 R2", - "Hyper-V Server 2012", - "Windows Server 2012 R2", - "Windows Server 2012", - "Windows Storage Server 2012 R2", - "Windows Storage Server 2012", - "Windows 7", - "Windows 8.1", - "Windows 8", - "Windows 10", -} - -// windowsVersions is a mapping consisting of the output from -// the following WMI query: (gwmi Win32_OperatingSystem).Name -var windowsVersions = map[string]string{ - "Hyper-V Server 2012 R2": "win2012hvr2", - "Hyper-V Server 2012": "win2012hv", - "Windows Server 2012 R2": "win2012r2", - "Windows Server 2012": "win2012", - "Windows Storage Server 2012 R2": "win2012r2", - "Windows Storage Server 2012": "win2012", - "Windows 7": "win7", - "Windows 8.1": "win81", - "Windows 8": "win8", - "Windows 10": "win10", -} - -var distroInfo = "/usr/share/distro-info/ubuntu.csv" - -// GetOSFromSeries will return the operating system based -// on the series that is passed to it -func GetOSFromSeries(series string) (OSType, error) { - if series == "" { - return Unknown, errors.NotValidf("series %q", series) - } - if _, ok := ubuntuSeries[series]; ok { - return Ubuntu, nil - } - if _, ok := centosSeries[series]; ok { - return CentOS, nil - } - if _, ok := archSeries[series]; ok { - return Arch, nil - } - for _, val := range windowsVersions { - if val == series { - return Windows, nil - } - } - for _, val := range macOSXSeries { - if val == series { - return OSX, nil - } - } - return Unknown, errors.Trace(unknownOSForSeriesError(series)) -} - -var ( - seriesVersionsMutex sync.Mutex - updatedseriesVersions bool -) - -// SeriesVersion returns the version for the specified series. -func SeriesVersion(series string) (string, error) { - if series == "" { - panic("cannot pass empty series to SeriesVersion()") - } - seriesVersionsMutex.Lock() - defer seriesVersionsMutex.Unlock() - if vers, ok := seriesVersions[series]; ok { - return vers, nil - } - updateSeriesVersionsOnce() - if vers, ok := seriesVersions[series]; ok { - return vers, nil - } - - return "", errors.Trace(unknownSeriesVersionError(series)) -} - -// SupportedSeries returns the series on which we can run Juju workloads. -func SupportedSeries() []string { - seriesVersionsMutex.Lock() - defer seriesVersionsMutex.Unlock() - updateSeriesVersionsOnce() - var series []string - for s := range seriesVersions { - series = append(series, s) - } - return series -} - -// OSSupportedSeries returns the series of the specified OS on which we -// can run Juju workloads. -func OSSupportedSeries(os OSType) []string { - var osSeries []string - for _, series := range SupportedSeries() { - seriesOS, err := GetOSFromSeries(series) - if err != nil || seriesOS != os { - continue - } - osSeries = append(osSeries, series) - } - return osSeries -} - -// UpdateSeriesVersions forces an update of the series versions by querying -// distro-info if possible. -func UpdateSeriesVersions() error { - seriesVersionsMutex.Lock() - defer seriesVersionsMutex.Unlock() - return updateDistroInfo() -} - -func updateSeriesVersionsOnce() { - if !updatedseriesVersions { - err := updateDistroInfo() - if err != nil { - logger.Warningf("failed to update distro info: %v", err) - } - updatedseriesVersions = true - } -} - -// updateDistroInfo updates seriesVersions from /usr/share/distro-info/ubuntu.csv if possible.. -func updateDistroInfo() error { - // We need to find the series version eg 12.04 from the series eg precise. Use the information found in - // /usr/share/distro-info/ubuntu.csv provided by distro-info-data package. - f, err := os.Open(distroInfo) - if err != nil { - // On non-Ubuntu systems this file won't exist but that's expected. - return nil - } - defer f.Close() - bufRdr := bufio.NewReader(f) - // Only find info for precise or later. - // TODO: only add in series that are supported (i.e. before end of life) - preciseOrLaterFound := false - for { - line, err := bufRdr.ReadString('\n') - if err == io.EOF { - break - } - if err != nil { - return fmt.Errorf("reading distro info file file: %v", err) - } - // lines are of the form: "12.04 LTS,Precise Pangolin,precise,2011-10-13,2012-04-26,2017-04-26" - parts := strings.Split(line, ",") - // Ignore any malformed lines. - if len(parts) < 3 { - continue - } - series := parts[2] - if series == "precise" { - preciseOrLaterFound = true - } - if series != "precise" && !preciseOrLaterFound { - continue - } - // the numeric version may contain a LTS moniker so strip that out. - seriesInfo := strings.Split(parts[0], " ") - seriesVersions[series] = seriesInfo[0] - ubuntuSeries[series] = seriesInfo[0] - } - return nil -} === removed file 'src/github.com/juju/juju/version/supportedseries_linux_test.go' --- src/github.com/juju/juju/version/supportedseries_linux_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/version/supportedseries_linux_test.go 1970-01-01 00:00:00 +0000 @@ -1,63 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package version_test - -import ( - "io/ioutil" - "path/filepath" - "sort" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/version" -) - -func (s *supportedSeriesSuite) TestSeriesVersion(c *gc.C) { - // There is no distro-info on Windows or CentOS. - if version.Current.OS != version.Ubuntu { - c.Skip("This test is only relevant on Ubuntu.") - } - vers, err := version.SeriesVersion("precise") - if err != nil && err.Error() == `invalid series "precise"` { - c.Fatalf(`Unable to lookup series "precise", you may need to: apt-get install distro-info`) - } - c.Assert(err, jc.ErrorIsNil) - c.Assert(vers, gc.Equals, "12.04") -} - -func (s *supportedSeriesSuite) TestSupportedSeries(c *gc.C) { - d := c.MkDir() - filename := filepath.Join(d, "ubuntu.csv") - err := ioutil.WriteFile(filename, []byte(distInfoData), 0644) - c.Assert(err, jc.ErrorIsNil) - s.PatchValue(version.DistroInfo, filename) - - expectedSeries := []string{"precise", "quantal", "raring", "saucy"} - series := version.SupportedSeries() - sort.Strings(series) - c.Assert(series, gc.DeepEquals, expectedSeries) -} - -const distInfoData = `version,codename,series,created,release,eol,eol-server -4.10,Warty Warthog,warty,2004-03-05,2004-10-20,2006-04-30 -5.04,Hoary Hedgehog,hoary,2004-10-20,2005-04-08,2006-10-31 -5.10,Breezy Badger,breezy,2005-04-08,2005-10-12,2007-04-13 -6.06 LTS,Dapper Drake,dapper,2005-10-12,2006-06-01,2009-07-14,2011-06-01 -6.10,Edgy Eft,edgy,2006-06-01,2006-10-26,2008-04-25 -7.04,Feisty Fawn,feisty,2006-10-26,2007-04-19,2008-10-19 -7.10,Gutsy Gibbon,gutsy,2007-04-19,2007-10-18,2009-04-18 -8.04 LTS,Hardy Heron,hardy,2007-10-18,2008-04-24,2011-05-12,2013-05-09 -8.10,Intrepid Ibex,intrepid,2008-04-24,2008-10-30,2010-04-30 -9.04,Jaunty Jackalope,jaunty,2008-10-30,2009-04-23,2010-10-23 -9.10,Karmic Koala,karmic,2009-04-23,2009-10-29,2011-04-29 -10.04 LTS,Lucid Lynx,lucid,2009-10-29,2010-04-29,2013-05-09,2015-04-29 -10.10,Maverick Meerkat,maverick,2010-04-29,2010-10-10,2012-04-10 -11.04,Natty Narwhal,natty,2010-10-10,2011-04-28,2012-10-28 -11.10,Oneiric Ocelot,oneiric,2011-04-28,2011-10-13,2013-05-09 -12.04 LTS,Precise Pangolin,precise,2011-10-13,2012-04-26,2017-04-26 -12.10,Quantal Quetzal,quantal,2012-04-26,2012-10-18,2014-04-18 -13.04,Raring Ringtail,raring,2012-10-18,2013-04-25,2014-01-27 -13.10,Saucy Salamander,saucy,2013-04-25,2013-10-17,2014-07-17 -` === removed file 'src/github.com/juju/juju/version/supportedseries_test.go' --- src/github.com/juju/juju/version/supportedseries_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/version/supportedseries_test.go 1970-01-01 00:00:00 +0000 @@ -1,89 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package version_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/testing" - "github.com/juju/juju/version" -) - -type supportedSeriesSuite struct { - testing.BaseSuite - cleanup func() -} - -var _ = gc.Suite(&supportedSeriesSuite{}) - -func (s *supportedSeriesSuite) SetUpTest(c *gc.C) { - s.cleanup = version.SetSeriesVersions(make(map[string]string)) -} - -func (s *supportedSeriesSuite) TearDownTest(c *gc.C) { - s.cleanup() -} - -var getOSFromSeriesTests = []struct { - series string - want version.OSType - err string -}{{ - series: "precise", - want: version.Ubuntu, -}, { - series: "win2012r2", - want: version.Windows, -}, { - series: "mountainlion", - want: version.OSX, -}, { - series: "centos7", - want: version.CentOS, -}, { - series: "arch", - want: version.Arch, -}, { - series: "", - err: "series \"\" not valid", -}, -} - -func (s *supportedSeriesSuite) TestGetOSFromSeries(c *gc.C) { - for _, t := range getOSFromSeriesTests { - got, err := version.GetOSFromSeries(t.series) - if t.err != "" { - c.Assert(err, gc.ErrorMatches, t.err) - } else { - c.Check(err, jc.ErrorIsNil) - c.Assert(got, gc.Equals, t.want) - } - } -} - -func (s *supportedSeriesSuite) TestUnknownOSFromSeries(c *gc.C) { - _, err := version.GetOSFromSeries("Xuanhuaceratops") - c.Assert(err, jc.Satisfies, version.IsUnknownOSForSeriesError) - c.Assert(err, gc.ErrorMatches, `unknown OS for series: "Xuanhuaceratops"`) -} - -func (s *supportedSeriesSuite) TestOSSupportedSeries(c *gc.C) { - version.SetSeriesVersions(map[string]string{ - "trusty": "14.04", - "utopic": "14.10", - "win7": "win7", - "win81": "win81", - "centos7": "centos7", - "arch": "rolling", - }) - series := version.OSSupportedSeries(version.Ubuntu) - c.Assert(series, jc.SameContents, []string{"trusty", "utopic"}) - series = version.OSSupportedSeries(version.Windows) - c.Assert(series, jc.SameContents, []string{"win7", "win81"}) - series = version.OSSupportedSeries(version.CentOS) - c.Assert(series, jc.SameContents, []string{"centos7"}) - series = version.OSSupportedSeries(version.Arch) - c.Assert(series, jc.SameContents, []string{"arch"}) -} === removed file 'src/github.com/juju/juju/version/supportedseries_windows_test.go' --- src/github.com/juju/juju/version/supportedseries_windows_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/version/supportedseries_windows_test.go 1970-01-01 00:00:00 +0000 @@ -1,58 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Copyright 2014 Cloudbase Solutions SRL -// Licensed under the AGPLv3, see LICENCE file for details. - -package version_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/testing" - "github.com/juju/juju/version" -) - -type supportedSeriesWindowsSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&supportedSeriesWindowsSuite{}) - -func (s *supportedSeriesWindowsSuite) TestSeriesVersion(c *gc.C) { - vers, err := version.SeriesVersion("win8") - if err != nil { - c.Assert(err, gc.Not(gc.ErrorMatches), `invalid series "win8"`, gc.Commentf(`unable to lookup series "win8"`)) - } else { - c.Assert(err, jc.ErrorIsNil) - } - c.Assert(err, jc.ErrorIsNil) - c.Assert(vers, gc.Equals, "win8") -} - -func (s *supportedSeriesWindowsSuite) TestSupportedSeries(c *gc.C) { - expectedSeries := []string{ - "arch", - "centos7", - - "precise", - "quantal", - "raring", - "saucy", - "trusty", - "utopic", - "vivid", - "wily", - "xenial", - - "win10", - "win2012", - "win2012hv", - "win2012hvr2", - "win2012r2", - "win7", - "win8", - "win81", - } - series := version.SupportedSeries() - c.Assert(series, jc.SameContents, expectedSeries) -} === modified file 'src/github.com/juju/juju/version/version.go' --- src/github.com/juju/juju/version/version.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/version/version.go 2016-03-22 15:18:22 +0000 @@ -12,19 +12,17 @@ "os" "path/filepath" "regexp" - "runtime" "strconv" "strings" + "github.com/juju/utils/series" "gopkg.in/mgo.v2/bson" - - "github.com/juju/juju/juju/arch" ) // The presence and format of this constant is very important. // The debian/rules build recipe uses this value for the version // number of the release package. -const version = "1.25.4" +const version = "2.0-beta2" // The version that we switched over from old style numbering to new style. var switchOverVersion = MustParse("1.19.9") @@ -33,19 +31,10 @@ // the linux type release version. var osReleaseFile = "/etc/os-release" -var osVers = mustOSVersion() - // Current gives the current version of the system. If the file // "FORCE-VERSION" is present in the same directory as the running // binary, it will override this. -var Current = Binary{ - Number: MustParse(version), - Series: osVers, - Arch: arch.HostArch(), - OS: MustOSFromSeries(osVers), -} - -var Compiler = runtime.Compiler +var Current = MustParse(version) func init() { toolsDir := filepath.Dir(os.Args[0]) @@ -56,7 +45,7 @@ } return } - Current.Number = MustParse(strings.TrimSpace(string(v))) + Current = MustParse(strings.TrimSpace(string(v))) } // Number represents a juju version. When bugs are fixed the patch number is @@ -85,7 +74,6 @@ Number Series string Arch string - OS OSType } func (v Binary) String() string { @@ -131,23 +119,24 @@ return nil } -// GetYAML implements goyaml.Getter -func (v Binary) GetYAML() (tag string, value interface{}) { - return "", v.String() +// MarshalYAML implements yaml.v2.Marshaller interface +func (v Binary) MarshalYAML() (interface{}, error) { + return v.String(), nil } -// SetYAML implements goyaml.Setter -func (vp *Binary) SetYAML(tag string, value interface{}) bool { - vstr := fmt.Sprintf("%v", value) - if vstr == "" { - return false +// UnmarshalYAML implements the yaml.Unmarshaller interface +func (vp *Binary) UnmarshalYAML(unmarshal func(interface{}) error) error { + var vstr string + err := unmarshal(&vstr) + if err != nil { + return err } v, err := ParseBinary(vstr) if err != nil { - return false + return err } *vp = v - return true + return nil } var ( @@ -191,8 +180,7 @@ } v.Series = m[7] v.Arch = m[8] - var err error - v.OS, err = GetOSFromSeries(v.Series) + _, err := series.GetOSFromSeries(v.Series) return v, err } @@ -309,23 +297,24 @@ return nil } -// GetYAML implements goyaml.Getter -func (v Number) GetYAML() (tag string, value interface{}) { - return "", v.String() +// MarshalYAML implements yaml.v2.Marshaller interface +func (v Number) MarshalYAML() (interface{}, error) { + return v.String(), nil } -// SetYAML implements goyaml.Setter -func (vp *Number) SetYAML(tag string, value interface{}) bool { - vstr := fmt.Sprintf("%v", value) - if vstr == "" { - return false +// UnmarshalYAML implements the yaml.Unmarshaller interface +func (vp *Number) UnmarshalYAML(unmarshal func(interface{}) error) error { + var vstr string + err := unmarshal(&vstr) + if err != nil { + return err } v, err := Parse(vstr) if err != nil { - return false + return err } *vp = v - return true + return nil } func isOdd(x int) bool { @@ -343,17 +332,6 @@ return v.Tag != "" || v.Build > 0 } -// ReleaseVersion looks for the value of VERSION_ID in the content of -// the os-release. If the value is not found, the file is not found, or -// an error occurs reading the file, an empty string is returned. -func ReleaseVersion() string { - release, err := readOSRelease() - if err != nil { - return "" - } - return release["VERSION_ID"] -} - // ParseMajorMinor takes an argument of the form "major.minor" and returns ints major and minor. func ParseMajorMinor(vers string) (int, int, error) { parts := strings.Split(vers, ".") === modified file 'src/github.com/juju/juju/version/version_test.go' --- src/github.com/juju/juju/version/version_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/version/version_test.go 2016-03-22 15:18:22 +0000 @@ -5,15 +5,12 @@ import ( "encoding/json" - "io/ioutil" - "path/filepath" - "runtime" "strings" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/mgo.v2/bson" - goyaml "gopkg.in/yaml.v1" + goyaml "gopkg.in/yaml.v2" "github.com/juju/juju/testing" "github.com/juju/juju/version" @@ -168,7 +165,6 @@ }, Series: series, Arch: arch, - OS: version.Ubuntu, } } @@ -222,7 +218,6 @@ Number: test.expect, Series: "trusty", Arch: "amd64", - OS: version.Ubuntu, } if test.err != "" { c.Assert(err, gc.ErrorMatches, strings.Replace(test.err, "version", "binary version", 1)) @@ -324,73 +319,3 @@ } } } - -func (s *suite) TestUseFastLXC(c *gc.C) { - for i, test := range []struct { - message string - releaseContent string - expected string - }{{ - message: "missing release file", - }, { - message: "OS release file is missing ID", - releaseContent: "some junk\nand more junk", - }, { - message: "precise release", - releaseContent: ` -NAME="Ubuntu" -VERSION="12.04 LTS, Precise" -ID=ubuntu -ID_LIKE=debian -PRETTY_NAME="Ubuntu 12.04.3 LTS" -VERSION_ID="12.04" -`, - expected: "12.04", - }, { - message: "trusty release", - releaseContent: ` -NAME="Ubuntu" -VERSION="14.04.1 LTS, Trusty Tahr" -ID=ubuntu -ID_LIKE=debian -PRETTY_NAME="Ubuntu 14.04.1 LTS" -VERSION_ID="14.04" -`, - expected: "14.04", - }, { - message: "minimal trusty release", - releaseContent: ` -ID=ubuntu -VERSION_ID="14.04" -`, - expected: "14.04", - }, { - message: "minimal unstable unicorn", - releaseContent: ` -ID=ubuntu -VERSION_ID="14.10" -`, - expected: "14.10", - }, { - message: "minimal jaunty", - releaseContent: ` -ID=ubuntu -VERSION_ID="9.10" -`, - expected: "9.10", - }} { - c.Logf("%v: %v", i, test.message) - filename := filepath.Join(c.MkDir(), "os-release") - s.PatchValue(version.OSReleaseFile, filename) - if test.releaseContent != "" { - err := ioutil.WriteFile(filename, []byte(test.releaseContent+"\n"), 0644) - c.Assert(err, jc.ErrorIsNil) - } - value := version.ReleaseVersion() - c.Assert(value, gc.Equals, test.expected) - } -} - -func (s *suite) TestCompiler(c *gc.C) { - c.Assert(version.Compiler, gc.Equals, runtime.Compiler) -} === added directory 'src/github.com/juju/juju/watcher' === added file 'src/github.com/juju/juju/watcher/entities.go' --- src/github.com/juju/juju/watcher/entities.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/entities.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package watcher + +// EntitiesWatcher conveniently ties an StringsChannel to the worker.Worker that +// represents its validity. +// +// It purports to deliver strings that can be parsed as tags, but since it +// doesn't actually produce tags today we may as well make it compatible with +// StringsWatcher so we can use it with a StringsHandler. In an ideal world +// we'd have something like `type EntitiesChannel <-chan []names.Tag` instead. +type EntitiesWatcher interface { + CoreWatcher + Changes() StringsChannel +} === added file 'src/github.com/juju/juju/watcher/interface.go' --- src/github.com/juju/juju/watcher/interface.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/interface.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,48 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package watcher + +import ( + "github.com/juju/juju/worker" +) + +// CoreWatcher encodes some features of a watcher. The most obvious one: +// +// Changes() <-chan +// +// ...can't be expressed cleanly; and this is annoying because every such chan +// needs to share common behaviours for the abstraction to be generally helpful. +// The critical features of a Changes chan are as follows: +// +// * The channel should never be closed. +// * The channel should send a single baseline value, representing the change +// from a nil state; and subsequently send values representing deltas from +// whatever had previously been sent. +// * The channel should really never be closed. Many existing watchers *do* +// close their channels when the watcher stops; this is harmful because it +// mixes lifetime-handling into change-handling at the cost of clarity (and +// in some cases correctness). So long as a watcher implements Worker, it +// can be safely managed with the worker/catacomb package; of course, all +// sensible clients will still check for closed channels (never trust a +// contract...) but can treat that scenario as a simple error. +// +// To convert a state/watcher.Watcher to a CoreWatcher, ensure that the watcher +// no longer closes its Changes() channel; and replace Stop() and Err() with the +// usual worker boilerplate. Namely: +// +// // Kill is part of the worker.Worker interface. +// func (w *watcher) Kill() { +// w.tomb.Kill(nil) +// } +// +// // Wait is part of the worker.Worker interface. +// func (w *watcher) Wait() error { +// return w.tomb.Wait() +// } +// +// Tests using state/testing/{$Kind}WatcherC should be converted to use the +// equivalents in watcher/watchertest. +type CoreWatcher interface { + worker.Worker +} === added directory 'src/github.com/juju/juju/watcher/legacy' === added file 'src/github.com/juju/juju/watcher/legacy/doc.go' --- src/github.com/juju/juju/watcher/legacy/doc.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/legacy/doc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +/* +Package legacy contains state-watcher-tuned worker harnesses; the canonical +implementations are in the watcher package, but aren't type-compatible with +original-style watchers -- such as those returned from state methods -- which +we still have a couple of uses for (and the certupdater use might even be +legitimate). +*/ +package legacy === added file 'src/github.com/juju/juju/watcher/legacy/export_test.go' --- src/github.com/juju/juju/watcher/legacy/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/legacy/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,20 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package legacy + +import ( + "github.com/juju/juju/state/watcher" +) + +func SetEnsureErr(f func(watcher.Errer) error) { + if f == nil { + ensureErr = watcher.EnsureErr + } else { + ensureErr = f + } +} + +func EnsureErr() func(watcher.Errer) error { + return ensureErr +} === added file 'src/github.com/juju/juju/watcher/legacy/notifyworker.go' --- src/github.com/juju/juju/watcher/legacy/notifyworker.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/legacy/notifyworker.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,107 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package legacy + +import ( + "launchpad.net/tomb" + + "github.com/juju/juju/state" + "github.com/juju/juju/state/watcher" + "github.com/juju/juju/worker" +) + +// ensureErr is defined as a variable to allow the test suite +// to override it. +var ensureErr = watcher.EnsureErr + +// notifyWorker is the internal implementation of the Worker +// interface, using a NotifyWatcher for handling changes. +type notifyWorker struct { + tomb tomb.Tomb + handler NotifyWatchHandler +} + +// NotifyWatchHandler implements the business logic that is triggered +// as part of watching a NotifyWatcher. +type NotifyWatchHandler interface { + // SetUp will be called once, and should return the watcher that will + // be used to trigger subsequent Handle()s. SetUp can return a watcher + // even if there is an error, and the notify worker will make sure + // to stop the watcher. + SetUp() (state.NotifyWatcher, error) + + // TearDown should cleanup any resources that are left around. + TearDown() error + + // Handle is called whenever the watcher returned from SetUp sends a value + // on its Changes() channel. The done channel will be closed if and when + // the worker is being interrupted to finish. Any worker should avoid any + // bare channel reads or writes, but instead use a select with the done + // channel. + Handle(done <-chan struct{}) error +} + +// NewNotifyWorker starts a new worker running the business logic from +// the handler. The worker loop is started in another goroutine as a +// side effect of calling this. +func NewNotifyWorker(handler NotifyWatchHandler) worker.Worker { + nw := ¬ifyWorker{ + handler: handler, + } + + go func() { + defer nw.tomb.Done() + nw.tomb.Kill(nw.loop()) + }() + return nw +} + +// Kill is part of the worker.Worker interface. +func (nw *notifyWorker) Kill() { + nw.tomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (nw *notifyWorker) Wait() error { + return nw.tomb.Wait() +} + +type tearDowner interface { + TearDown() error +} + +// propagateTearDown tears down the handler, but ensures any error is +// propagated through the tomb's Kill method. +func propagateTearDown(handler tearDowner, t *tomb.Tomb) { + if err := handler.TearDown(); err != nil { + t.Kill(err) + } +} + +func (nw *notifyWorker) loop() error { + w, err := nw.handler.SetUp() + if err != nil { + if w != nil { + // We don't bother to propagate an error, because we + // already have an error + w.Stop() + } + return err + } + defer propagateTearDown(nw.handler, &nw.tomb) + defer watcher.Stop(w, &nw.tomb) + for { + select { + case <-nw.tomb.Dying(): + return tomb.ErrDying + case _, ok := <-w.Changes(): + if !ok { + return ensureErr(w) + } + if err := nw.handler.Handle(nw.tomb.Dying()); err != nil { + return err + } + } + } +} === added file 'src/github.com/juju/juju/watcher/legacy/notifyworker_test.go' --- src/github.com/juju/juju/watcher/legacy/notifyworker_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/legacy/notifyworker_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,352 @@ +// Copyright 2012, 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package legacy_test + +import ( + "fmt" + "sync" + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "launchpad.net/tomb" + + "github.com/juju/juju/state" + "github.com/juju/juju/state/watcher" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/watcher/legacy" + "github.com/juju/juju/worker" +) + +type NotifyWorkerSuite struct { + coretesting.BaseSuite + worker worker.Worker + actor *notifyHandler +} + +var _ = gc.Suite(&NotifyWorkerSuite{}) + +func newNotifyHandlerWorker(c *gc.C, setupError, handlerError, teardownError error) (*notifyHandler, worker.Worker) { + nh := ¬ifyHandler{ + actions: nil, + handled: make(chan struct{}, 1), + setupError: setupError, + teardownError: teardownError, + handlerError: handlerError, + watcher: &testNotifyWatcher{ + changes: make(chan struct{}), + }, + setupDone: make(chan struct{}), + } + w := legacy.NewNotifyWorker(nh) + select { + case <-nh.setupDone: + case <-time.After(coretesting.ShortWait): + c.Error("Failed waiting for notifyHandler.Setup to be called during SetUpTest") + } + return nh, w +} + +func (s *NotifyWorkerSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.actor, s.worker = newNotifyHandlerWorker(c, nil, nil, nil) +} + +func (s *NotifyWorkerSuite) TearDownTest(c *gc.C) { + legacy.SetEnsureErr(nil) + s.stopWorker(c) + s.BaseSuite.TearDownTest(c) +} + +type notifyHandler struct { + actions []string + mu sync.Mutex + handled chan struct{} + setupError error + teardownError error + handlerError error + watcher *testNotifyWatcher + setupDone chan struct{} +} + +func (nh *notifyHandler) SetUp() (state.NotifyWatcher, error) { + defer func() { nh.setupDone <- struct{}{} }() + nh.mu.Lock() + defer nh.mu.Unlock() + nh.actions = append(nh.actions, "setup") + if nh.watcher == nil { + return nil, nh.setupError + } + return nh.watcher, nh.setupError +} + +func (nh *notifyHandler) TearDown() error { + nh.mu.Lock() + defer nh.mu.Unlock() + nh.actions = append(nh.actions, "teardown") + if nh.handled != nil { + close(nh.handled) + } + return nh.teardownError +} + +func (nh *notifyHandler) Handle(_ <-chan struct{}) error { + nh.mu.Lock() + defer nh.mu.Unlock() + nh.actions = append(nh.actions, "handler") + if nh.handled != nil { + // Unlock while we are waiting for the send + nh.mu.Unlock() + nh.handled <- struct{}{} + nh.mu.Lock() + } + return nh.handlerError +} + +func (nh *notifyHandler) CheckActions(c *gc.C, actions ...string) { + nh.mu.Lock() + defer nh.mu.Unlock() + c.Check(nh.actions, gc.DeepEquals, actions) +} + +// During teardown we try to stop the worker, but don't hang the test suite if +// Stop never returns +func (s *NotifyWorkerSuite) stopWorker(c *gc.C) { + if s.worker == nil { + return + } + done := make(chan error) + go func() { + done <- worker.Stop(s.worker) + }() + err := waitForTimeout(c, done, coretesting.LongWait) + c.Check(err, jc.ErrorIsNil) + s.actor = nil + s.worker = nil +} + +type testNotifyWatcher struct { + state.NotifyWatcher + mu sync.Mutex + changes chan struct{} + stopped bool + stopError error +} + +func (tnw *testNotifyWatcher) Changes() <-chan struct{} { + return tnw.changes +} + +func (tnw *testNotifyWatcher) Err() error { + return tnw.stopError +} + +func (tnw *testNotifyWatcher) Stop() error { + tnw.mu.Lock() + defer tnw.mu.Unlock() + if !tnw.stopped { + close(tnw.changes) + } + tnw.stopped = true + return tnw.stopError +} + +func (tnw *testNotifyWatcher) SetStopError(err error) { + tnw.mu.Lock() + tnw.stopError = err + tnw.mu.Unlock() +} + +func (tnw *testNotifyWatcher) TriggerChange(c *gc.C) { + select { + case tnw.changes <- struct{}{}: + case <-time.After(coretesting.LongWait): + c.Errorf("timed out trying to trigger a change") + } +} + +func waitForTimeout(c *gc.C, ch <-chan error, timeout time.Duration) error { + select { + case err := <-ch: + return err + case <-time.After(timeout): + c.Errorf("timed out waiting to receive a change after %s", timeout) + } + return nil +} + +func waitShort(c *gc.C, w worker.Worker) error { + done := make(chan error) + go func() { + done <- w.Wait() + }() + return waitForTimeout(c, done, coretesting.ShortWait) +} + +func waitForHandledNotify(c *gc.C, handled chan struct{}) { + select { + case <-handled: + case <-time.After(coretesting.LongWait): + c.Errorf("handled failed to signal after %s", coretesting.LongWait) + } +} + +func (s *NotifyWorkerSuite) TestKill(c *gc.C) { + s.worker.Kill() + err := waitShort(c, s.worker) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *NotifyWorkerSuite) TestStop(c *gc.C) { + err := worker.Stop(s.worker) + c.Assert(err, jc.ErrorIsNil) + // After stop, Wait should return right away + err = waitShort(c, s.worker) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *NotifyWorkerSuite) TestWait(c *gc.C) { + done := make(chan error) + go func() { + done <- s.worker.Wait() + }() + // Wait should not return until we've killed the worker + select { + case err := <-done: + c.Errorf("Wait() didn't wait until we stopped it: %v", err) + case <-time.After(coretesting.ShortWait): + } + s.worker.Kill() + err := waitForTimeout(c, done, coretesting.LongWait) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *NotifyWorkerSuite) TestCallSetUpAndTearDown(c *gc.C) { + // After calling NewNotifyWorker, we should have called setup + s.actor.CheckActions(c, "setup") + // If we kill the worker, it should notice, and call teardown + s.worker.Kill() + err := waitShort(c, s.worker) + c.Check(err, jc.ErrorIsNil) + s.actor.CheckActions(c, "setup", "teardown") + c.Check(s.actor.watcher.stopped, jc.IsTrue) +} + +func (s *NotifyWorkerSuite) TestChangesTriggerHandler(c *gc.C) { + s.actor.CheckActions(c, "setup") + s.actor.watcher.TriggerChange(c) + waitForHandledNotify(c, s.actor.handled) + s.actor.CheckActions(c, "setup", "handler") + s.actor.watcher.TriggerChange(c) + waitForHandledNotify(c, s.actor.handled) + s.actor.watcher.TriggerChange(c) + waitForHandledNotify(c, s.actor.handled) + s.actor.CheckActions(c, "setup", "handler", "handler", "handler") + c.Assert(worker.Stop(s.worker), gc.IsNil) + s.actor.CheckActions(c, "setup", "handler", "handler", "handler", "teardown") +} + +func (s *NotifyWorkerSuite) TestSetUpFailureStopsWithTearDown(c *gc.C) { + // Stop the worker and SetUp again, this time with an error + s.stopWorker(c) + actor, w := newNotifyHandlerWorker(c, fmt.Errorf("my special error"), nil, nil) + err := waitShort(c, w) + c.Check(err, gc.ErrorMatches, "my special error") + // TearDown is not called on SetUp error. + actor.CheckActions(c, "setup") + c.Check(actor.watcher.stopped, jc.IsTrue) +} + +func (s *NotifyWorkerSuite) TestWatcherStopFailurePropagates(c *gc.C) { + s.actor.watcher.SetStopError(fmt.Errorf("error while stopping watcher")) + s.worker.Kill() + c.Assert(s.worker.Wait(), gc.ErrorMatches, "error while stopping watcher") + // We've already stopped the worker, don't let teardown notice the + // worker is in an error state + s.worker = nil +} + +func (s *NotifyWorkerSuite) TestCleanRunNoticesTearDownError(c *gc.C) { + s.actor.teardownError = fmt.Errorf("failed to tear down watcher") + s.worker.Kill() + c.Assert(s.worker.Wait(), gc.ErrorMatches, "failed to tear down watcher") + s.worker = nil +} + +func (s *NotifyWorkerSuite) TestHandleErrorStopsWorkerAndWatcher(c *gc.C) { + s.stopWorker(c) + actor, w := newNotifyHandlerWorker(c, nil, fmt.Errorf("my handling error"), nil) + actor.watcher.TriggerChange(c) + waitForHandledNotify(c, actor.handled) + err := waitShort(c, w) + c.Check(err, gc.ErrorMatches, "my handling error") + actor.CheckActions(c, "setup", "handler", "teardown") + c.Check(actor.watcher.stopped, jc.IsTrue) +} + +func (s *NotifyWorkerSuite) TestNoticesStoppedWatcher(c *gc.C) { + // The default closedHandler doesn't panic if you have a genuine error + // (because it assumes you want to propagate a real error and then + // restart + s.actor.watcher.SetStopError(fmt.Errorf("Stopped Watcher")) + s.actor.watcher.Stop() + err := waitShort(c, s.worker) + c.Check(err, gc.ErrorMatches, "Stopped Watcher") + s.actor.CheckActions(c, "setup", "teardown") + // Worker is stopped, don't fail TearDownTest + s.worker = nil +} + +func noopHandler(watcher.Errer) error { + return nil +} + +type CannedErrer struct { + err error +} + +func (c CannedErrer) Err() error { + return c.err +} + +func (s *NotifyWorkerSuite) TestDefaultClosedHandler(c *gc.C) { + // Roundabout check for function equality. + // Is this test really worth it? + c.Assert(fmt.Sprintf("%p", legacy.EnsureErr()), gc.Equals, fmt.Sprintf("%p", watcher.EnsureErr)) +} + +func (s *NotifyWorkerSuite) TestErrorsOnStillAliveButClosedChannel(c *gc.C) { + foundErr := fmt.Errorf("did not get an error") + triggeredHandler := func(errer watcher.Errer) error { + foundErr = errer.Err() + return foundErr + } + legacy.SetEnsureErr(triggeredHandler) + s.actor.watcher.SetStopError(tomb.ErrStillAlive) + s.actor.watcher.Stop() + err := waitShort(c, s.worker) + c.Check(foundErr, gc.Equals, tomb.ErrStillAlive) + // ErrStillAlive is trapped by the Stop logic and gets turned into a + // 'nil' when stopping. However TestDefaultClosedHandler can assert + // that it would have triggered a panic. + c.Check(err, jc.ErrorIsNil) + s.actor.CheckActions(c, "setup", "teardown") + // Worker is stopped, don't fail TearDownTest + s.worker = nil +} + +func (s *NotifyWorkerSuite) TestErrorsOnClosedChannel(c *gc.C) { + foundErr := fmt.Errorf("did not get an error") + triggeredHandler := func(errer watcher.Errer) error { + foundErr = errer.Err() + return foundErr + } + legacy.SetEnsureErr(triggeredHandler) + s.actor.watcher.Stop() + err := waitShort(c, s.worker) + // If the foundErr is nil, we would have panic-ed (see TestDefaultClosedHandler) + c.Check(foundErr, gc.IsNil) + c.Check(err, jc.ErrorIsNil) + s.actor.CheckActions(c, "setup", "teardown") +} === added file 'src/github.com/juju/juju/watcher/legacy/package_test.go' --- src/github.com/juju/juju/watcher/legacy/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/legacy/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package legacy_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/watcher/legacy/stringsworker.go' --- src/github.com/juju/juju/watcher/legacy/stringsworker.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/legacy/stringsworker.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,87 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package legacy + +import ( + "launchpad.net/tomb" + + "github.com/juju/juju/state" + "github.com/juju/juju/state/watcher" + "github.com/juju/juju/worker" +) + +// stringsWorker is the internal implementation of the Worker +// interface, using a StringsWatcher for handling changes. +type stringsWorker struct { + tomb tomb.Tomb + handler StringsWatchHandler +} + +// StringsWatchHandler implements the business logic triggered as part +// of watching a StringsWatcher. +type StringsWatchHandler interface { + // SetUp will be called once, and should return the watcher that will + // be used to trigger subsequent Handle()s. SetUp can return a watcher + // even if there is an error, and the notify worker will make sure + // to stop the watcher. + SetUp() (state.StringsWatcher, error) + + // TearDown should cleanup any resources that are left around. + TearDown() error + + // Handle is called whenever the watcher returned from SetUp sends a value + // on its Changes() channel. + Handle(changes []string) error +} + +// NewStringsWorker starts a new worker running the business logic +// from the handler. The worker loop is started in another goroutine +// as a side effect of calling this. +func NewStringsWorker(handler StringsWatchHandler) worker.Worker { + sw := &stringsWorker{ + handler: handler, + } + go func() { + defer sw.tomb.Done() + sw.tomb.Kill(sw.loop()) + }() + return sw +} + +// Kill is part of the worker.Worker interface. +func (sw *stringsWorker) Kill() { + sw.tomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (sw *stringsWorker) Wait() error { + return sw.tomb.Wait() +} + +func (sw *stringsWorker) loop() error { + w, err := sw.handler.SetUp() + if err != nil { + if w != nil { + // We don't bother to propagate an error, because we + // already have an error. + w.Stop() + } + return err + } + defer propagateTearDown(sw.handler, &sw.tomb) + defer watcher.Stop(w, &sw.tomb) + for { + select { + case <-sw.tomb.Dying(): + return tomb.ErrDying + case changes, ok := <-w.Changes(): + if !ok { + return ensureErr(w) + } + if err := sw.handler.Handle(changes); err != nil { + return err + } + } + } +} === added file 'src/github.com/juju/juju/watcher/legacy/stringsworker_test.go' --- src/github.com/juju/juju/watcher/legacy/stringsworker_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/legacy/stringsworker_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,317 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package legacy_test + +import ( + "fmt" + "sync" + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "launchpad.net/tomb" + + "github.com/juju/juju/state" + "github.com/juju/juju/state/watcher" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/watcher/legacy" + "github.com/juju/juju/worker" +) + +type stringsWorkerSuite struct { + coretesting.BaseSuite + worker worker.Worker + actor *stringsHandler +} + +var _ = gc.Suite(&stringsWorkerSuite{}) + +func newStringsHandlerWorker(c *gc.C, setupError, handlerError, teardownError error) (*stringsHandler, worker.Worker) { + sh := &stringsHandler{ + actions: nil, + handled: make(chan []string, 1), + setupError: setupError, + teardownError: teardownError, + handlerError: handlerError, + watcher: &testStringsWatcher{ + changes: make(chan []string), + }, + setupDone: make(chan struct{}), + } + w := legacy.NewStringsWorker(sh) + select { + case <-sh.setupDone: + case <-time.After(coretesting.ShortWait): + c.Error("Failed waiting for stringsHandler.Setup to be called during SetUpTest") + } + return sh, w +} + +func (s *stringsWorkerSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.actor, s.worker = newStringsHandlerWorker(c, nil, nil, nil) +} + +func (s *stringsWorkerSuite) TearDownTest(c *gc.C) { + s.stopWorker(c) + s.BaseSuite.TearDownTest(c) +} + +type stringsHandler struct { + actions []string + mu sync.Mutex + // Signal handled when we get a handle() call + handled chan []string + setupError error + teardownError error + handlerError error + watcher *testStringsWatcher + setupDone chan struct{} +} + +func (sh *stringsHandler) SetUp() (state.StringsWatcher, error) { + defer func() { sh.setupDone <- struct{}{} }() + sh.mu.Lock() + defer sh.mu.Unlock() + sh.actions = append(sh.actions, "setup") + if sh.watcher == nil { + return nil, sh.setupError + } + return sh.watcher, sh.setupError +} + +func (sh *stringsHandler) TearDown() error { + sh.mu.Lock() + defer sh.mu.Unlock() + sh.actions = append(sh.actions, "teardown") + if sh.handled != nil { + close(sh.handled) + } + return sh.teardownError +} + +func (sh *stringsHandler) Handle(changes []string) error { + sh.mu.Lock() + defer sh.mu.Unlock() + sh.actions = append(sh.actions, "handler") + if sh.handled != nil { + // Unlock while we are waiting for the send + sh.mu.Unlock() + sh.handled <- changes + sh.mu.Lock() + } + return sh.handlerError +} + +func (sh *stringsHandler) CheckActions(c *gc.C, actions ...string) { + sh.mu.Lock() + defer sh.mu.Unlock() + c.Check(sh.actions, gc.DeepEquals, actions) +} + +// During teardown we try to stop the worker, but don't hang the test suite if +// Stop never returns +func (s *stringsWorkerSuite) stopWorker(c *gc.C) { + if s.worker == nil { + return + } + done := make(chan error) + go func() { + done <- worker.Stop(s.worker) + }() + err := waitForTimeout(c, done, coretesting.LongWait) + c.Check(err, jc.ErrorIsNil) + s.actor = nil + s.worker = nil +} + +type testStringsWatcher struct { + state.StringsWatcher + mu sync.Mutex + changes chan []string + stopped bool + stopError error +} + +func (tsw *testStringsWatcher) Changes() <-chan []string { + return tsw.changes +} + +func (tsw *testStringsWatcher) Err() error { + return tsw.stopError +} + +func (tsw *testStringsWatcher) Stop() error { + tsw.mu.Lock() + defer tsw.mu.Unlock() + if !tsw.stopped { + close(tsw.changes) + } + tsw.stopped = true + return tsw.stopError +} + +func (tsw *testStringsWatcher) SetStopError(err error) { + tsw.mu.Lock() + tsw.stopError = err + tsw.mu.Unlock() +} + +func (tsw *testStringsWatcher) TriggerChange(c *gc.C, changes []string) { + select { + case tsw.changes <- changes: + case <-time.After(coretesting.LongWait): + c.Errorf("timed out trying to trigger a change") + } +} + +func waitForHandledStrings(c *gc.C, handled chan []string, expect []string) { + select { + case changes := <-handled: + c.Assert(changes, gc.DeepEquals, expect) + case <-time.After(coretesting.LongWait): + c.Errorf("handled failed to signal after %s", coretesting.LongWait) + } +} + +func (s *stringsWorkerSuite) TestKill(c *gc.C) { + s.worker.Kill() + err := waitShort(c, s.worker) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *stringsWorkerSuite) TestStop(c *gc.C) { + err := worker.Stop(s.worker) + c.Assert(err, jc.ErrorIsNil) + // After stop, Wait should return right away + err = waitShort(c, s.worker) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *stringsWorkerSuite) TestWait(c *gc.C) { + done := make(chan error) + go func() { + done <- s.worker.Wait() + }() + // Wait should not return until we've killed the worker + select { + case err := <-done: + c.Errorf("Wait() didn't wait until we stopped it: %v", err) + case <-time.After(coretesting.ShortWait): + } + s.worker.Kill() + err := waitForTimeout(c, done, coretesting.LongWait) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *stringsWorkerSuite) TestCallSetUpAndTearDown(c *gc.C) { + // After calling NewStringsWorker, we should have called setup + s.actor.CheckActions(c, "setup") + // If we kill the worker, it should notice, and call teardown + s.worker.Kill() + err := waitShort(c, s.worker) + c.Check(err, jc.ErrorIsNil) + s.actor.CheckActions(c, "setup", "teardown") + c.Check(s.actor.watcher.stopped, jc.IsTrue) +} + +func (s *stringsWorkerSuite) TestChangesTriggerHandler(c *gc.C) { + s.actor.CheckActions(c, "setup") + s.actor.watcher.TriggerChange(c, []string{"aa", "bb"}) + waitForHandledStrings(c, s.actor.handled, []string{"aa", "bb"}) + s.actor.CheckActions(c, "setup", "handler") + s.actor.watcher.TriggerChange(c, []string{"cc", "dd"}) + waitForHandledStrings(c, s.actor.handled, []string{"cc", "dd"}) + s.actor.watcher.TriggerChange(c, []string{"ee", "ff"}) + waitForHandledStrings(c, s.actor.handled, []string{"ee", "ff"}) + s.actor.CheckActions(c, "setup", "handler", "handler", "handler") + c.Assert(worker.Stop(s.worker), gc.IsNil) + s.actor.CheckActions(c, "setup", "handler", "handler", "handler", "teardown") +} + +func (s *stringsWorkerSuite) TestSetUpFailureStopsWithTearDown(c *gc.C) { + // Stop the worker and SetUp again, this time with an error + s.stopWorker(c) + actor, w := newStringsHandlerWorker(c, fmt.Errorf("my special error"), nil, nil) + err := waitShort(c, w) + c.Check(err, gc.ErrorMatches, "my special error") + // TearDown is not called on SetUp error. + actor.CheckActions(c, "setup") + c.Check(actor.watcher.stopped, jc.IsTrue) +} + +func (s *stringsWorkerSuite) TestWatcherStopFailurePropagates(c *gc.C) { + s.actor.watcher.SetStopError(fmt.Errorf("error while stopping watcher")) + s.worker.Kill() + c.Assert(s.worker.Wait(), gc.ErrorMatches, "error while stopping watcher") + // We've already stopped the worker, don't let teardown notice the + // worker is in an error state + s.worker = nil +} + +func (s *stringsWorkerSuite) TestCleanRunNoticesTearDownError(c *gc.C) { + s.actor.teardownError = fmt.Errorf("failed to tear down watcher") + s.worker.Kill() + c.Assert(s.worker.Wait(), gc.ErrorMatches, "failed to tear down watcher") + s.worker = nil +} + +func (s *stringsWorkerSuite) TestHandleErrorStopsWorkerAndWatcher(c *gc.C) { + s.stopWorker(c) + actor, w := newStringsHandlerWorker(c, nil, fmt.Errorf("my handling error"), nil) + actor.watcher.TriggerChange(c, []string{"aa", "bb"}) + waitForHandledStrings(c, actor.handled, []string{"aa", "bb"}) + err := waitShort(c, w) + c.Check(err, gc.ErrorMatches, "my handling error") + actor.CheckActions(c, "setup", "handler", "teardown") + c.Check(actor.watcher.stopped, jc.IsTrue) +} + +func (s *stringsWorkerSuite) TestNoticesStoppedWatcher(c *gc.C) { + // The default closedHandler doesn't panic if you have a genuine error + // (because it assumes you want to propagate a real error and then + // restart + s.actor.watcher.SetStopError(fmt.Errorf("Stopped Watcher")) + s.actor.watcher.Stop() + err := waitShort(c, s.worker) + c.Check(err, gc.ErrorMatches, "Stopped Watcher") + s.actor.CheckActions(c, "setup", "teardown") + // Worker is stopped, don't fail TearDownTest + s.worker = nil +} + +func (s *stringsWorkerSuite) TestErrorsOnStillAliveButClosedChannel(c *gc.C) { + foundErr := fmt.Errorf("did not get an error") + triggeredHandler := func(errer watcher.Errer) error { + foundErr = errer.Err() + return foundErr + } + legacy.SetEnsureErr(triggeredHandler) + s.actor.watcher.SetStopError(tomb.ErrStillAlive) + s.actor.watcher.Stop() + err := waitShort(c, s.worker) + c.Check(foundErr, gc.Equals, tomb.ErrStillAlive) + // ErrStillAlive is trapped by the Stop logic and gets turned into a + // 'nil' when stopping. However TestDefaultClosedHandler can assert + // that it would have triggered an error. + c.Check(err, jc.ErrorIsNil) + s.actor.CheckActions(c, "setup", "teardown") + // Worker is stopped, don't fail TearDownTest + s.worker = nil +} + +func (s *stringsWorkerSuite) TestErrorsOnClosedChannel(c *gc.C) { + foundErr := fmt.Errorf("did not get an error") + triggeredHandler := func(errer watcher.Errer) error { + foundErr = errer.Err() + return foundErr + } + legacy.SetEnsureErr(triggeredHandler) + s.actor.watcher.Stop() + err := waitShort(c, s.worker) + // If the foundErr is nil, we would have panic-ed (see TestDefaultClosedHandler) + c.Check(foundErr, gc.IsNil) + c.Check(err, jc.ErrorIsNil) + s.actor.CheckActions(c, "setup", "teardown") +} === added file 'src/github.com/juju/juju/watcher/machinestorageids.go' --- src/github.com/juju/juju/watcher/machinestorageids.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/machinestorageids.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,27 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package watcher + +// MachineStorageId associates a machine entity with a storage entity. They're +// expressed as tags because they arrived here as a move, not a change; ideally +// a MachineStorageIdsWatcher would return them in a more model-appropriate +// format (i.e. not as strings-that-probably-parse-to-tags). +type MachineStorageId struct { + MachineTag string + AttachmentTag string +} + +// MachineStorageIdsChannel is a change channel as described in the CoreWatcher +// docs. +// +// It reports additions and removals to a set of attachments; and lifecycle +// changes within the active set. +type MachineStorageIdsChannel <-chan []MachineStorageId + +// MachineStorageIdsWatcher conveniently ties a MachineStorageIdsChannel to the +// worker.Worker that represents its validity. +type MachineStorageIdsWatcher interface { + CoreWatcher + Changes() MachineStorageIdsChannel +} === added file 'src/github.com/juju/juju/watcher/notify.go' --- src/github.com/juju/juju/watcher/notify.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/notify.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,139 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package watcher + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/worker/catacomb" +) + +// NotifyChannel is a change channel as described in the CoreWatcher docs. +// +// It sends a single value to indicate that the watch is active, and subsequent +// values whenever the value(s) under observation change(s). +type NotifyChannel <-chan struct{} + +// NotifyWatcher conveniently ties a NotifyChannel to the worker.Worker that +// represents its validity. +type NotifyWatcher interface { + CoreWatcher + Changes() NotifyChannel +} + +// NotifyHandler defines the operation of a NotifyWorker. +type NotifyHandler interface { + + // SetUp is called once when creating a NotifyWorker. It must return a + // NotifyWatcher or an error. The NotifyHandler takes responsibility for + // stopping any returned watcher and handling any errors. + SetUp() (NotifyWatcher, error) + + // Handle is called whenever a value is received from the NotifyWatcher + // returned by SetUp. If it returns an error, the NotifyWorker will be + // stopped. + // + // If Handle runs any blocking operations it must pass through, or select + // on, the supplied abort channel; this channnel will be closed when the + // NotifyWorker is killed. An aborted Handle should not return an error. + Handle(abort <-chan struct{}) error + + // TearDown is called once when stopping a NotifyWorker, whether or not + // SetUp succeeded. It need not concern itself with the NotifyWatcher, but + // must clean up any other resources created in SetUp or Handle. + TearDown() error +} + +// NotifyConfig holds the direct dependencies of a NotifyWorker. +type NotifyConfig struct { + Handler NotifyHandler +} + +// Validate returns an error if the config cannot start a NotifyWorker. +func (config NotifyConfig) Validate() error { + if config.Handler == nil { + return errors.NotValidf("nil Handler") + } + return nil +} + +// NewNotifyWorker starts a new worker that runs a NotifyHandler. +func NewNotifyWorker(config NotifyConfig) (*NotifyWorker, error) { + if err := config.Validate(); err != nil { + return nil, errors.Trace(err) + } + nw := &NotifyWorker{ + config: config, + } + err := catacomb.Invoke(catacomb.Plan{ + Site: &nw.catacomb, + Work: nw.loop, + }) + if err != nil { + return nil, errors.Trace(err) + } + return nw, nil +} + +// NotifyWorker is a worker that wraps a NotifyWatcher. +type NotifyWorker struct { + config NotifyConfig + catacomb catacomb.Catacomb +} + +func (nw *NotifyWorker) loop() (err error) { + changes := nw.setUp() + defer nw.tearDown(err) + abort := nw.catacomb.Dying() + for { + select { + case <-abort: + return nw.catacomb.ErrDying() + case _, ok := <-changes: + if !ok { + return errors.New("change channel closed") + } + err = nw.config.Handler.Handle(abort) + if err != nil { + return err + } + } + } +} + +// setUp calls the handler's SetUp method; registers any returned watcher with +// the worker's catacomb; and returns the watcher's changes channel. Any errors +// encountered kill the worker and cause a nil channel to be returned. +func (nw *NotifyWorker) setUp() NotifyChannel { + watcher, err := nw.config.Handler.SetUp() + if err != nil { + nw.catacomb.Kill(err) + } + if watcher == nil { + nw.catacomb.Kill(errors.New("handler returned nil watcher")) + } else if err := nw.catacomb.Add(watcher); err != nil { + nw.catacomb.Kill(err) + } else { + return watcher.Changes() + } + return nil +} + +// tearDown kills the worker with the supplied error; and then kills it with +// any error returned by the handler's TearDown method. +func (nw *NotifyWorker) tearDown(err error) { + nw.catacomb.Kill(err) + err = nw.config.Handler.TearDown() + nw.catacomb.Kill(err) +} + +// Kill is part of the worker.Worker interface. +func (nw *NotifyWorker) Kill() { + nw.catacomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (nw *NotifyWorker) Wait() error { + return nw.catacomb.Wait() +} === added file 'src/github.com/juju/juju/watcher/notify_test.go' --- src/github.com/juju/juju/watcher/notify_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/notify_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,314 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package watcher_test + +import ( + "sync" + "time" + + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "launchpad.net/tomb" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/watcher" + "github.com/juju/juju/worker" +) + +type notifyWorkerSuite struct { + coretesting.BaseSuite + worker worker.Worker + actor *notifyHandler +} + +var _ = gc.Suite(¬ifyWorkerSuite{}) + +func newNotifyHandlerWorker(c *gc.C, setupError, handlerError, teardownError error) (*notifyHandler, worker.Worker) { + nh := ¬ifyHandler{ + actions: nil, + handled: make(chan struct{}, 1), + setupError: setupError, + teardownError: teardownError, + handlerError: handlerError, + watcher: newTestNotifyWatcher(), + setupDone: make(chan struct{}), + } + w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{Handler: nh}) + c.Assert(err, jc.ErrorIsNil) + select { + case <-nh.setupDone: + case <-time.After(coretesting.ShortWait): + c.Error("Failed waiting for notifyHandler.Setup to be called during SetUpTest") + } + return nh, w +} + +func (s *notifyWorkerSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.actor, s.worker = newNotifyHandlerWorker(c, nil, nil, nil) +} + +func (s *notifyWorkerSuite) TearDownTest(c *gc.C) { + s.stopWorker(c) + s.BaseSuite.TearDownTest(c) +} + +type notifyHandler struct { + actions []string + mu sync.Mutex + // Signal handled when we get a handle() call + handled chan struct{} + setupError error + teardownError error + handlerError error + watcher *testNotifyWatcher + setupDone chan struct{} +} + +func (nh *notifyHandler) SetUp() (watcher.NotifyWatcher, error) { + defer func() { nh.setupDone <- struct{}{} }() + nh.mu.Lock() + defer nh.mu.Unlock() + nh.actions = append(nh.actions, "setup") + if nh.watcher == nil { + return nil, nh.setupError + } + return nh.watcher, nh.setupError +} + +func (nh *notifyHandler) TearDown() error { + nh.mu.Lock() + defer nh.mu.Unlock() + nh.actions = append(nh.actions, "teardown") + if nh.handled != nil { + close(nh.handled) + } + return nh.teardownError +} + +func (nh *notifyHandler) Handle(_ <-chan struct{}) error { + nh.mu.Lock() + defer nh.mu.Unlock() + nh.actions = append(nh.actions, "handler") + if nh.handled != nil { + // Unlock while we are waiting for the send + nh.mu.Unlock() + nh.handled <- struct{}{} + nh.mu.Lock() + } + return nh.handlerError +} + +func (nh *notifyHandler) CheckActions(c *gc.C, actions ...string) { + nh.mu.Lock() + defer nh.mu.Unlock() + c.Check(nh.actions, gc.DeepEquals, actions) +} + +// During teardown we try to stop the worker, but don't hang the test suite if +// Stop never returns +func (s *notifyWorkerSuite) stopWorker(c *gc.C) { + if s.worker == nil { + return + } + done := make(chan error) + go func() { + done <- worker.Stop(s.worker) + }() + err := waitForTimeout(c, done, coretesting.LongWait) + c.Check(err, jc.ErrorIsNil) + s.actor = nil + s.worker = nil +} + +func newTestNotifyWatcher() *testNotifyWatcher { + w := &testNotifyWatcher{ + changes: make(chan struct{}), + } + go func() { + defer w.tomb.Done() + <-w.tomb.Dying() + }() + return w +} + +type testNotifyWatcher struct { + tomb tomb.Tomb + changes chan struct{} + mu sync.Mutex + stopError error +} + +func (tnw *testNotifyWatcher) Changes() watcher.NotifyChannel { + return tnw.changes +} + +func (tnw *testNotifyWatcher) Kill() { + tnw.mu.Lock() + tnw.tomb.Kill(tnw.stopError) + tnw.mu.Unlock() +} + +func (tnw *testNotifyWatcher) Wait() error { + return tnw.tomb.Wait() +} + +func (tnw *testNotifyWatcher) Stopped() bool { + select { + case <-tnw.tomb.Dead(): + return true + default: + return false + } +} + +func (tnw *testNotifyWatcher) SetStopError(err error) { + tnw.mu.Lock() + tnw.stopError = err + tnw.mu.Unlock() +} + +func (tnw *testNotifyWatcher) TriggerChange(c *gc.C) { + select { + case tnw.changes <- struct{}{}: + case <-time.After(coretesting.LongWait): + c.Errorf("timed out trying to trigger a change") + } +} + +func waitForTimeout(c *gc.C, ch <-chan error, timeout time.Duration) error { + select { + case err := <-ch: + return err + case <-time.After(timeout): + c.Errorf("timed out waiting to receive a change after %s", timeout) + } + return nil +} + +func waitShort(c *gc.C, w worker.Worker) error { + done := make(chan error) + go func() { + done <- w.Wait() + }() + return waitForTimeout(c, done, coretesting.ShortWait) +} + +func waitForHandledNotify(c *gc.C, handled chan struct{}) { + select { + case <-handled: + case <-time.After(coretesting.LongWait): + c.Errorf("handled failed to signal after %s", coretesting.LongWait) + } +} + +func (s *notifyWorkerSuite) TestKill(c *gc.C) { + s.worker.Kill() + err := waitShort(c, s.worker) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *notifyWorkerSuite) TestStop(c *gc.C) { + err := worker.Stop(s.worker) + c.Assert(err, jc.ErrorIsNil) + // After stop, Wait should return right away + err = waitShort(c, s.worker) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *notifyWorkerSuite) TestWait(c *gc.C) { + done := make(chan error) + go func() { + done <- s.worker.Wait() + }() + // Wait should not return until we've killed the worker + select { + case err := <-done: + c.Errorf("Wait() didn't wait until we stopped it: %v", err) + case <-time.After(coretesting.ShortWait): + } + s.worker.Kill() + err := waitForTimeout(c, done, coretesting.LongWait) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *notifyWorkerSuite) TestCallSetUpAndTearDown(c *gc.C) { + // After calling NewNotifyWorker, we should have called setup + s.actor.CheckActions(c, "setup") + // If we kill the worker, it should notice, and call teardown + s.worker.Kill() + err := waitShort(c, s.worker) + c.Check(err, jc.ErrorIsNil) + s.actor.CheckActions(c, "setup", "teardown") + c.Check(s.actor.watcher.Stopped(), jc.IsTrue) +} + +func (s *notifyWorkerSuite) TestChangesTriggerHandler(c *gc.C) { + s.actor.CheckActions(c, "setup") + s.actor.watcher.TriggerChange(c) + waitForHandledNotify(c, s.actor.handled) + s.actor.CheckActions(c, "setup", "handler") + s.actor.watcher.TriggerChange(c) + waitForHandledNotify(c, s.actor.handled) + s.actor.watcher.TriggerChange(c) + waitForHandledNotify(c, s.actor.handled) + s.actor.CheckActions(c, "setup", "handler", "handler", "handler") + c.Assert(worker.Stop(s.worker), gc.IsNil) + s.actor.CheckActions(c, "setup", "handler", "handler", "handler", "teardown") +} + +func (s *notifyWorkerSuite) TestSetUpFailureStopsWithTearDown(c *gc.C) { + // Stop the worker and SetUp again, this time with an error + s.stopWorker(c) + actor, w := newNotifyHandlerWorker(c, errors.New("my special error"), nil, errors.New("teardown")) + err := waitShort(c, w) + c.Check(err, gc.ErrorMatches, "my special error") + actor.CheckActions(c, "setup", "teardown") + c.Check(actor.watcher.Stopped(), jc.IsTrue) +} + +func (s *notifyWorkerSuite) TestWatcherStopFailurePropagates(c *gc.C) { + s.actor.watcher.SetStopError(errors.New("error while stopping watcher")) + s.worker.Kill() + c.Assert(s.worker.Wait(), gc.ErrorMatches, "error while stopping watcher") + // We've already stopped the worker, don't let teardown notice the + // worker is in an error state + s.worker = nil +} + +func (s *notifyWorkerSuite) TestCleanRunNoticesTearDownError(c *gc.C) { + s.actor.teardownError = errors.New("failed to tear down watcher") + s.worker.Kill() + c.Assert(s.worker.Wait(), gc.ErrorMatches, "failed to tear down watcher") + s.worker = nil +} + +func (s *notifyWorkerSuite) TestHandleErrorStopsWorkerAndWatcher(c *gc.C) { + s.stopWorker(c) + actor, w := newNotifyHandlerWorker(c, nil, errors.New("my handling error"), nil) + actor.watcher.TriggerChange(c) + waitForHandledNotify(c, actor.handled) + err := waitShort(c, w) + c.Check(err, gc.ErrorMatches, "my handling error") + actor.CheckActions(c, "setup", "handler", "teardown") + c.Check(actor.watcher.Stopped(), jc.IsTrue) +} + +func (s *notifyWorkerSuite) TestNoticesStoppedWatcher(c *gc.C) { + s.actor.watcher.SetStopError(errors.New("Stopped Watcher")) + s.actor.watcher.Kill() + err := waitShort(c, s.worker) + c.Check(err, gc.ErrorMatches, "Stopped Watcher") + s.actor.CheckActions(c, "setup", "teardown") + s.worker = nil +} + +func (s *notifyWorkerSuite) TestErrorsOnClosedChannel(c *gc.C) { + close(s.actor.watcher.changes) + err := waitShort(c, s.worker) + c.Check(err, gc.ErrorMatches, "change channel closed") + s.actor.CheckActions(c, "setup", "teardown") + s.worker = nil +} === added file 'src/github.com/juju/juju/watcher/package_test.go' --- src/github.com/juju/juju/watcher/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package watcher_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/watcher/relationunits.go' --- src/github.com/juju/juju/watcher/relationunits.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/relationunits.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,40 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package watcher + +// UnitSettings specifies the version of some unit's settings in some relation. +type UnitSettings struct { + Version int64 +} + +// RelationUnitsChange describes the membership and settings of; or changes to; +// some relation scope. +type RelationUnitsChange struct { + + // Changed holds a set of units that are known to be in scope, and the + // latest known settings version for each. + Changed map[string]UnitSettings + + // Departed holds a set of units that have previously been reported to + // be in scope, but which no longer are. + Departed []string +} + +// RelationUnitsChannel is a change channel as described in the CoreWatcher docs. +// +// It sends a single value representing the current membership of a relation +// scope; and the versions of the settings documents for each; and subsequent +// values representing entry, settings-change, and departure for units in that +// scope. +// +// It feeds the joined-changed-departed logic in worker/uniter, but these events +// do not map 1:1 with hooks. +type RelationUnitsChannel <-chan RelationUnitsChange + +// RelationUnitsWatcher conveniently ties a RelationUnitsChannel to the +// worker.Worker that represents its validity. +type RelationUnitsWatcher interface { + CoreWatcher + Changes() RelationUnitsChannel +} === added file 'src/github.com/juju/juju/watcher/strings.go' --- src/github.com/juju/juju/watcher/strings.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/strings.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,142 @@ +// Copyright 2013-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package watcher + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/worker/catacomb" +) + +// StringsChannel is a change channel as described in the CoreWatcher docs. +// +// It sends a single value indicating a baseline set of values, and subsequent +// values representing additions, changes, and/or removals of those values. The +// precise semantics may depend upon the individual watcher. +type StringsChannel <-chan []string + +// StringsWatcher conveniently ties a StringsChannel to the worker.Worker that +// represents its validity. +type StringsWatcher interface { + CoreWatcher + Changes() StringsChannel +} + +// StringsHandler defines the operation of a StringsWorker. +type StringsHandler interface { + + // SetUp is called once when creating a StringsWorker. It must return a + // StringsWatcher or an error. The StringsHandler takes responsibility for + // stopping any returned watcher and handling any errors. + SetUp() (StringsWatcher, error) + + // Handle is called with every value received from the StringsWatcher + // returned by SetUp. If it returns an error, the StringsWorker will be + // stopped. + // + // If Handle runs any blocking operations it must pass through, or select + // on, the supplied abort channel; this channel will be closed when the + // StringsWorker is killed. An aborted Handle should not return an error. + Handle(abort <-chan struct{}, changes []string) error + + // TearDown is called once when stopping a StringsWorker, whether or not + // SetUp succeeded. It need not concern itself with the StringsWatcher, but + // must clean up any other resources created in SetUp or Handle. + TearDown() error +} + +// StringsConfig holds the direct dependencies of a StringsWorker. +type StringsConfig struct { + Handler StringsHandler +} + +// Validate returns ann error if the config cannot start a StringsWorker. +func (config StringsConfig) Validate() error { + if config.Handler == nil { + return errors.NotValidf("nil Handler") + } + return nil +} + +// NewStringsWorker starts a new worker that runs a StringsHandler. +func NewStringsWorker(config StringsConfig) (*StringsWorker, error) { + if err := config.Validate(); err != nil { + return nil, errors.Trace(err) + } + sw := &StringsWorker{ + config: config, + } + err := catacomb.Invoke(catacomb.Plan{ + Site: &sw.catacomb, + Work: sw.loop, + }) + if err != nil { + return nil, errors.Trace(err) + } + return sw, nil +} + +// StringsWorker is a worker that wraps a StringsWatcher. +type StringsWorker struct { + config StringsConfig + catacomb catacomb.Catacomb +} + +func (sw *StringsWorker) loop() (err error) { + changes := sw.setUp() + defer sw.tearDown(err) + abort := sw.catacomb.Dying() + for { + select { + case <-abort: + return sw.catacomb.ErrDying() + case strings, ok := <-changes: + if !ok { + return errors.New("change channel closed") + } + err = sw.config.Handler.Handle(abort, strings) + if err != nil { + return err + } + } + } +} + +// setUp calls the handler's SetUp method; registers any returned watcher with +// the worker's catacomb; and returns the watcher's changes channel. Any errors +// encountered kill the worker and cause a nil channel to be returned. +func (sw *StringsWorker) setUp() StringsChannel { + watcher, err := sw.config.Handler.SetUp() + if err != nil { + sw.catacomb.Kill(err) + } + if watcher == nil { + sw.catacomb.Kill(errors.New("handler returned nil watcher")) + } else { + if err := sw.catacomb.Add(watcher); err != nil { + sw.catacomb.Kill(err) + } else { + return watcher.Changes() + } + } + return nil +} + +// tearDown kills the worker with the supplied error; and then kills it with +// any error returned by the handler's TearDown method. +func (sw *StringsWorker) tearDown(err error) { + sw.catacomb.Kill(err) + err = sw.config.Handler.TearDown() + sw.catacomb.Kill(err) +} + +// Kill is part of the worker.Worker interface. +func (sw *StringsWorker) Kill() { + sw.catacomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (sw *StringsWorker) Wait() error { + return sw.catacomb.Wait() +} === added file 'src/github.com/juju/juju/watcher/strings_test.go' --- src/github.com/juju/juju/watcher/strings_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/strings_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,301 @@ +// Copyright 2013-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package watcher_test + +import ( + "sync" + "time" + + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "launchpad.net/tomb" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/watcher" + "github.com/juju/juju/worker" +) + +type stringsWorkerSuite struct { + coretesting.BaseSuite + worker worker.Worker + actor *stringsHandler +} + +var _ = gc.Suite(&stringsWorkerSuite{}) + +func newStringsHandlerWorker(c *gc.C, setupError, handlerError, teardownError error) (*stringsHandler, worker.Worker) { + sh := &stringsHandler{ + actions: nil, + handled: make(chan []string, 1), + setupError: setupError, + teardownError: teardownError, + handlerError: handlerError, + watcher: newTestStringsWatcher(), + setupDone: make(chan struct{}), + } + w, err := watcher.NewStringsWorker(watcher.StringsConfig{Handler: sh}) + c.Assert(err, jc.ErrorIsNil) + select { + case <-sh.setupDone: + case <-time.After(coretesting.ShortWait): + c.Error("Failed waiting for stringsHandler.Setup to be called during SetUpTest") + } + return sh, w +} + +func (s *stringsWorkerSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.actor, s.worker = newStringsHandlerWorker(c, nil, nil, nil) +} + +func (s *stringsWorkerSuite) TearDownTest(c *gc.C) { + s.stopWorker(c) + s.BaseSuite.TearDownTest(c) +} + +type stringsHandler struct { + actions []string + mu sync.Mutex + // Signal handled when we get a handle() call + handled chan []string + setupError error + teardownError error + handlerError error + watcher *testStringsWatcher + setupDone chan struct{} +} + +func (sh *stringsHandler) SetUp() (watcher.StringsWatcher, error) { + defer func() { sh.setupDone <- struct{}{} }() + sh.mu.Lock() + defer sh.mu.Unlock() + sh.actions = append(sh.actions, "setup") + if sh.watcher == nil { + return nil, sh.setupError + } + return sh.watcher, sh.setupError +} + +func (sh *stringsHandler) TearDown() error { + sh.mu.Lock() + defer sh.mu.Unlock() + sh.actions = append(sh.actions, "teardown") + if sh.handled != nil { + close(sh.handled) + } + return sh.teardownError +} + +func (sh *stringsHandler) Handle(_ <-chan struct{}, changes []string) error { + sh.mu.Lock() + defer sh.mu.Unlock() + sh.actions = append(sh.actions, "handler") + if sh.handled != nil { + // Unlock while we are waiting for the send + sh.mu.Unlock() + sh.handled <- changes + sh.mu.Lock() + } + return sh.handlerError +} + +func (sh *stringsHandler) CheckActions(c *gc.C, actions ...string) { + sh.mu.Lock() + defer sh.mu.Unlock() + c.Check(sh.actions, gc.DeepEquals, actions) +} + +// During teardown we try to stop the worker, but don't hang the test suite if +// Stop never returns +func (s *stringsWorkerSuite) stopWorker(c *gc.C) { + if s.worker == nil { + return + } + done := make(chan error) + go func() { + done <- worker.Stop(s.worker) + }() + err := waitForTimeout(c, done, coretesting.LongWait) + c.Check(err, jc.ErrorIsNil) + s.actor = nil + s.worker = nil +} + +func newTestStringsWatcher() *testStringsWatcher { + w := &testStringsWatcher{ + changes: make(chan []string), + } + go func() { + defer w.tomb.Done() + <-w.tomb.Dying() + }() + return w +} + +type testStringsWatcher struct { + tomb tomb.Tomb + changes chan []string + mu sync.Mutex + stopError error +} + +func (tsw *testStringsWatcher) Changes() watcher.StringsChannel { + return tsw.changes +} + +func (tsw *testStringsWatcher) Kill() { + tsw.mu.Lock() + tsw.tomb.Kill(tsw.stopError) + tsw.mu.Unlock() +} + +func (tsw *testStringsWatcher) Wait() error { + return tsw.tomb.Wait() +} + +func (tsw *testStringsWatcher) Stopped() bool { + select { + case <-tsw.tomb.Dead(): + return true + default: + return false + } +} + +func (tsw *testStringsWatcher) SetStopError(err error) { + tsw.mu.Lock() + tsw.stopError = err + tsw.mu.Unlock() +} + +func (tsw *testStringsWatcher) TriggerChange(c *gc.C, changes []string) { + select { + case tsw.changes <- changes: + case <-time.After(coretesting.LongWait): + c.Errorf("timed out trying to trigger a change") + } +} + +func waitForHandledStrings(c *gc.C, handled chan []string, expect []string) { + select { + case changes := <-handled: + c.Assert(changes, gc.DeepEquals, expect) + case <-time.After(coretesting.LongWait): + c.Errorf("handled failed to signal after %s", coretesting.LongWait) + } +} + +func (s *stringsWorkerSuite) TestKill(c *gc.C) { + s.worker.Kill() + err := waitShort(c, s.worker) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *stringsWorkerSuite) TestStop(c *gc.C) { + err := worker.Stop(s.worker) + c.Assert(err, jc.ErrorIsNil) + // After stop, Wait should return right away + err = waitShort(c, s.worker) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *stringsWorkerSuite) TestWait(c *gc.C) { + done := make(chan error) + go func() { + done <- s.worker.Wait() + }() + // Wait should not return until we've killed the worker + select { + case err := <-done: + c.Errorf("Wait() didn't wait until we stopped it: %v", err) + case <-time.After(coretesting.ShortWait): + } + s.worker.Kill() + err := waitForTimeout(c, done, coretesting.LongWait) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *stringsWorkerSuite) TestCallSetUpAndTearDown(c *gc.C) { + // After calling NewStringsWorker, we should have called setup + s.actor.CheckActions(c, "setup") + // If we kill the worker, it should notice, and call teardown + s.worker.Kill() + err := waitShort(c, s.worker) + c.Check(err, jc.ErrorIsNil) + s.actor.CheckActions(c, "setup", "teardown") + c.Check(s.actor.watcher.Stopped(), jc.IsTrue) +} + +func (s *stringsWorkerSuite) TestChangesTriggerHandler(c *gc.C) { + s.actor.CheckActions(c, "setup") + s.actor.watcher.TriggerChange(c, []string{"aa", "bb"}) + waitForHandledStrings(c, s.actor.handled, []string{"aa", "bb"}) + s.actor.CheckActions(c, "setup", "handler") + s.actor.watcher.TriggerChange(c, []string{"cc", "dd"}) + waitForHandledStrings(c, s.actor.handled, []string{"cc", "dd"}) + s.actor.watcher.TriggerChange(c, []string{"ee", "ff"}) + waitForHandledStrings(c, s.actor.handled, []string{"ee", "ff"}) + s.actor.CheckActions(c, "setup", "handler", "handler", "handler") + c.Assert(worker.Stop(s.worker), gc.IsNil) + s.actor.CheckActions(c, "setup", "handler", "handler", "handler", "teardown") +} + +func (s *stringsWorkerSuite) TestSetUpFailureStopsWithTearDown(c *gc.C) { + // Stop the worker and SetUp again, this time with an error + s.stopWorker(c) + actor, w := newStringsHandlerWorker(c, errors.New("my special error"), nil, nil) + err := waitShort(c, w) + c.Check(err, gc.ErrorMatches, "my special error") + actor.CheckActions(c, "setup", "teardown") + c.Check(actor.watcher.Stopped(), jc.IsTrue) +} + +func (s *stringsWorkerSuite) TestWatcherStopFailurePropagates(c *gc.C) { + s.actor.watcher.SetStopError(errors.New("error while stopping watcher")) + s.worker.Kill() + c.Assert(s.worker.Wait(), gc.ErrorMatches, "error while stopping watcher") + // We've already stopped the worker, don't let teardown notice the + // worker is in an error state + s.worker = nil +} + +func (s *stringsWorkerSuite) TestCleanRunNoticesTearDownError(c *gc.C) { + s.actor.teardownError = errors.New("failed to tear down watcher") + s.worker.Kill() + c.Assert(s.worker.Wait(), gc.ErrorMatches, "failed to tear down watcher") + s.worker = nil +} + +func (s *stringsWorkerSuite) TestHandleErrorStopsWorkerAndWatcher(c *gc.C) { + s.stopWorker(c) + actor, w := newStringsHandlerWorker(c, nil, errors.New("my handling error"), nil) + actor.watcher.TriggerChange(c, []string{"aa", "bb"}) + waitForHandledStrings(c, actor.handled, []string{"aa", "bb"}) + err := waitShort(c, w) + c.Check(err, gc.ErrorMatches, "my handling error") + actor.CheckActions(c, "setup", "handler", "teardown") + c.Check(actor.watcher.Stopped(), jc.IsTrue) +} + +func (s *stringsWorkerSuite) TestNoticesStoppedWatcher(c *gc.C) { + // The default closedHandler doesn't panic if you have a genuine error + // (because it assumes you want to propagate a real error and then + // restart + s.actor.watcher.SetStopError(errors.New("Stopped Watcher")) + s.actor.watcher.Kill() + err := waitShort(c, s.worker) + c.Check(err, gc.ErrorMatches, "Stopped Watcher") + s.actor.CheckActions(c, "setup", "teardown") + // Worker is stopped, don't fail TearDownTest + s.worker = nil +} + +func (s *stringsWorkerSuite) TestErrorsOnClosedChannel(c *gc.C) { + close(s.actor.watcher.changes) + err := waitShort(c, s.worker) + c.Check(err, gc.ErrorMatches, "change channel closed") + s.actor.CheckActions(c, "setup", "teardown") + s.worker = nil +} === added directory 'src/github.com/juju/juju/watcher/watchertest' === added file 'src/github.com/juju/juju/watcher/watchertest/notify.go' --- src/github.com/juju/juju/watcher/watchertest/notify.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/watchertest/notify.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,81 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package watchertest + +import ( + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/testing" + "github.com/juju/juju/watcher" +) + +func NewNotifyWatcherC(c *gc.C, watcher watcher.NotifyWatcher, preAssert func()) NotifyWatcherC { + if preAssert == nil { + preAssert = func() {} + } + return NotifyWatcherC{ + C: c, + Watcher: watcher, + PreAssert: preAssert, + } +} + +type NotifyWatcherC struct { + *gc.C + Watcher watcher.NotifyWatcher + PreAssert func() +} + +// AssertOneChange fails if no change is sent before a long time has passed; or +// if, subsequent to that, any further change is sent before a short time has +// passed. +func (c NotifyWatcherC) AssertOneChange() { + c.PreAssert() + select { + case _, ok := <-c.Watcher.Changes(): + c.Assert(ok, jc.IsTrue) + case <-time.After(testing.LongWait): + c.Fatalf("watcher did not send change") + } + c.AssertNoChange() +} + +// AssertNoChange fails if it manages to read a value from Changes before a +// short time has passed. +func (c NotifyWatcherC) AssertNoChange() { + c.PreAssert() + select { + case _, ok := <-c.Watcher.Changes(): + c.Fatalf("watcher sent unexpected change: (_, %v)", ok) + case <-time.After(testing.ShortWait): + } +} + +// AssertStops Kills the watcher and asserts (1) that Wait completes without +// error before a long time has passed; and (2) that Changes remains open but +// no values are being sent. +func (c NotifyWatcherC) AssertStops() { + c.Watcher.Kill() + wait := make(chan error) + go func() { + c.PreAssert() + wait <- c.Watcher.Wait() + }() + select { + case <-time.After(testing.LongWait): + c.Fatalf("watcher never stopped") + case err := <-wait: + c.Assert(err, jc.ErrorIsNil) + } + + c.PreAssert() + select { + case _, ok := <-c.Watcher.Changes(): + c.Fatalf("watcher sent unexpected change: (_, %v)", ok) + default: + } +} === added file 'src/github.com/juju/juju/watcher/watchertest/relationunits.go' --- src/github.com/juju/juju/watcher/watchertest/relationunits.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/watchertest/relationunits.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,106 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package watchertest + +import ( + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/testing" + "github.com/juju/juju/watcher" +) + +// NewRelationUnitsWatcherC returns a RelationUnitsWatcherC that +// checks for aggressive event coalescence. +func NewRelationUnitsWatcherC(c *gc.C, w watcher.RelationUnitsWatcher, preAssert func()) RelationUnitsWatcherC { + if preAssert == nil { + preAssert = func() {} + } + return RelationUnitsWatcherC{ + C: c, + PreAssert: preAssert, + Watcher: w, + settingsVersions: make(map[string]int64), + } +} + +type RelationUnitsWatcherC struct { + *gc.C + Watcher watcher.RelationUnitsWatcher + PreAssert func() + settingsVersions map[string]int64 +} + +func (c RelationUnitsWatcherC) AssertNoChange() { + c.PreAssert() + select { + case actual, ok := <-c.Watcher.Changes(): + c.Fatalf("watcher sent unexpected change: (%#v, %v)", actual, ok) + case <-time.After(testing.ShortWait): + } +} + +// AssertChange asserts the given changes was reported by the watcher, +// but does not assume there are no following changes. +func (c RelationUnitsWatcherC) AssertChange(changed []string, departed []string) { + // Get all items in changed in a map for easy lookup. + changedNames := make(map[string]bool) + for _, name := range changed { + changedNames[name] = true + } + c.PreAssert() + timeout := time.After(testing.LongWait) + select { + case actual, ok := <-c.Watcher.Changes(): + c.Assert(ok, jc.IsTrue) + c.Assert(actual.Changed, gc.HasLen, len(changed)) + // Because the versions can change, we only need to make sure + // the keys match, not the contents (UnitSettings == txnRevno). + for k, settings := range actual.Changed { + _, ok := changedNames[k] + c.Assert(ok, jc.IsTrue) + oldVer, ok := c.settingsVersions[k] + if !ok { + // This is the first time we see this unit, so + // save the settings version for later. + c.settingsVersions[k] = settings.Version + } else { + // Already seen; make sure the version increased. + if settings.Version <= oldVer { + c.Fatalf("expected unit settings version > %d (got %d)", oldVer, settings.Version) + } + } + } + c.Assert(actual.Departed, jc.SameContents, departed) + case <-timeout: + c.Fatalf("watcher did not send change") + } +} + +// AssertStops Kills the watcher and asserts (1) that Wait completes without +// error before a long time has passed; and (2) that Changes remains open but +// no values are being sent. +func (c RelationUnitsWatcherC) AssertStops() { + c.Watcher.Kill() + wait := make(chan error) + go func() { + c.PreAssert() + wait <- c.Watcher.Wait() + }() + select { + case <-time.After(testing.LongWait): + c.Fatalf("watcher never stopped") + case err := <-wait: + c.Assert(err, jc.ErrorIsNil) + } + + c.PreAssert() + select { + case change, ok := <-c.Watcher.Changes(): + c.Fatalf("watcher sent unexpected change: (%#v, %v)", change, ok) + default: + } +} === added file 'src/github.com/juju/juju/watcher/watchertest/strings.go' --- src/github.com/juju/juju/watcher/watchertest/strings.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/watcher/watchertest/strings.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,144 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package watchertest + +import ( + "time" + + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/set" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/testing" + "github.com/juju/juju/watcher" +) + +func NewStringsWatcherC(c *gc.C, watcher watcher.StringsWatcher, preAssert func()) StringsWatcherC { + if preAssert == nil { + preAssert = func() {} + } + return StringsWatcherC{ + C: c, + Watcher: watcher, + PreAssert: preAssert, + } +} + +type StringsWatcherC struct { + *gc.C + Watcher watcher.StringsWatcher + PreAssert func() +} + +// AssertChanges fails if it cannot read a value from Changes despite waiting a +// long time. It logs, but does not check, the received changes; but will fail +// if the Changes chan is closed. +func (c StringsWatcherC) AssertChanges() { + c.PreAssert() + select { + case change, ok := <-c.Watcher.Changes(): + c.Logf("received change: %#v", change) + c.Assert(ok, jc.IsTrue) + case <-time.After(testing.LongWait): + c.Fatalf("watcher did not send change") + } + c.AssertNoChange() +} + +// AssertNoChange fails if it manages to read a value from Changes before a +// short time has passed. +func (c StringsWatcherC) AssertNoChange() { + c.PreAssert() + select { + case change, ok := <-c.Watcher.Changes(): + c.Fatalf("watcher sent unexpected change: (%#v, %v)", change, ok) + case <-time.After(testing.ShortWait): + } +} + +// AssertStops Kills the watcher and asserts (1) that Wait completes without +// error before a long time has passed; and (2) that Changes remains open but +// no values are being sent. +func (c StringsWatcherC) AssertStops() { + c.Watcher.Kill() + wait := make(chan error) + go func() { + c.PreAssert() + wait <- c.Watcher.Wait() + }() + select { + case <-time.After(testing.LongWait): + c.Fatalf("watcher never stopped") + case err := <-wait: + c.Assert(err, jc.ErrorIsNil) + } + + c.PreAssert() + select { + case change, ok := <-c.Watcher.Changes(): + c.Fatalf("watcher sent unexpected change: (%#v, %v)", change, ok) + default: + } +} + +func (c StringsWatcherC) AssertChange(expect ...string) { + c.assertChange(false, expect...) +} + +func (c StringsWatcherC) AssertChangeInSingleEvent(expect ...string) { + c.assertChange(true, expect...) +} + +// AssertChangeMaybeIncluding verifies that there is a change that may +// contain zero to all of the passed in strings, and no other changes. +func (c StringsWatcherC) AssertChangeMaybeIncluding(expect ...string) { + maxCount := len(expect) + actual := c.collectChanges(true, maxCount) + + if maxCount == 0 { + c.Assert(actual, gc.HasLen, 0) + } else { + actualCount := len(actual) + c.Assert(actualCount <= maxCount, jc.IsTrue, gc.Commentf("expected at most %d, got %d", maxCount, actualCount)) + unexpected := set.NewStrings(actual...).Difference(set.NewStrings(expect...)) + c.Assert(unexpected.Values(), gc.HasLen, 0) + } +} + +// assertChange asserts the given list of changes was reported by +// the watcher, but does not assume there are no following changes. +func (c StringsWatcherC) assertChange(single bool, expect ...string) { + actual := c.collectChanges(single, len(expect)) + if len(expect) == 0 { + c.Assert(actual, gc.HasLen, 0) + } else { + c.Assert(actual, jc.SameContents, expect) + } +} + +// collectChanges gets up to the max number of changes within the +// testing.LongWait period. +func (c StringsWatcherC) collectChanges(single bool, max int) []string { + timeout := time.After(testing.LongWait) + var actual []string + gotOneChange := false +loop: + for { + c.PreAssert() + select { + case changes, ok := <-c.Watcher.Changes(): + c.Assert(ok, jc.IsTrue) + gotOneChange = true + actual = append(actual, changes...) + if single || len(actual) >= max { + break loop + } + case <-timeout: + if !gotOneChange { + c.Fatalf("watcher did not send change") + } + } + } + return actual +} === added file 'src/github.com/juju/juju/worker/addresser/manifold.go' --- src/github.com/juju/juju/worker/addresser/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/addresser/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,35 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package addresser + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/api/addresser" + "github.com/juju/juju/api/base" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/util" +) + +// ManifoldConfig describes the resources used by the addresser worker. +type ManifoldConfig util.ApiManifoldConfig + +// Manifold returns a Manifold that encapsulates the addresser worker. +func Manifold(config ManifoldConfig) dependency.Manifold { + return util.ApiManifold( + util.ApiManifoldConfig(config), + manifoldStart, + ) +} + +// manifoldStart creates an addresser worker, given a base.APICaller. +func manifoldStart(apiCaller base.APICaller) (worker.Worker, error) { + api := addresser.NewAPI(apiCaller) + w, err := NewWorker(api) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil +} === modified file 'src/github.com/juju/juju/worker/addresser/package_test.go' --- src/github.com/juju/juju/worker/addresser/package_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/addresser/package_test.go 2016-03-22 15:18:22 +0000 @@ -6,9 +6,14 @@ import ( stdtesting "testing" + "github.com/juju/testing" + coretesting "github.com/juju/juju/testing" ) func TestPackage(t *stdtesting.T) { + if testing.RaceEnabled { + t.Skip("skipping package under -race, see LP 1519191") + } coretesting.MgoTestPackage(t) } === modified file 'src/github.com/juju/juju/worker/addresser/worker.go' --- src/github.com/juju/juju/worker/addresser/worker.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/addresser/worker.go 2016-03-22 15:18:22 +0000 @@ -8,8 +8,8 @@ "github.com/juju/loggo" apiaddresser "github.com/juju/juju/api/addresser" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" ) @@ -32,16 +32,18 @@ logger.Debugf("address deallocation not supported; not starting worker") return worker.FinishedWorker{}, nil } - ah := &addresserHandler{ - api: api, + handler := &addresserHandler{api: api} + aw, err := watcher.NewStringsWorker(watcher.StringsConfig{ + Handler: handler, + }) + if err != nil { + return nil, errors.Trace(err) } - aw := worker.NewStringsWorker(ah) return aw, nil } // SetUp is part of the StringsWorker interface. func (a *addresserHandler) SetUp() (watcher.StringsWatcher, error) { - // WatchIPAddresses returns an EntityWatcher which is a StringsWatcher. return a.api.WatchIPAddresses() } @@ -50,8 +52,8 @@ return nil } -// Handle is part of the Worker interface. -func (a *addresserHandler) Handle(watcherTags []string) error { +// Handle is part of the StringsWorker interface. +func (a *addresserHandler) Handle(_ <-chan struct{}, watcherTags []string) error { // Changed IP address lifes are reported, clean them up. err := a.api.CleanupIPAddresses() if err != nil { === modified file 'src/github.com/juju/juju/worker/addresser/worker_test.go' --- src/github.com/juju/juju/worker/addresser/worker_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/addresser/worker_test.go 2016-03-22 15:18:22 +0000 @@ -48,7 +48,7 @@ // Unbreak dummy provider methods. s.AssertConfigParameterUpdated(c, "broken", "") - s.APIConnection, _ = s.OpenAPIAsNewMachine(c, state.JobManageEnviron) + s.APIConnection, _ = s.OpenAPIAsNewMachine(c, state.JobManageModel) s.API = s.APIConnection.Addresser() machineA, err := s.State.AddMachine("quantal", state.JobHostUnits) @@ -156,7 +156,7 @@ func (s *workerSuite) makeReleaseOp(digit int) dummy.OpReleaseAddress { return dummy.OpReleaseAddress{ - Env: "dummyenv", + Env: "dummymodel", InstanceId: "foo", SubnetId: "foobar", Address: network.NewAddress(fmt.Sprintf("0.1.2.%d", digit)), === modified file 'src/github.com/juju/juju/worker/apiaddressupdater/apiaddressupdater.go' --- src/github.com/juju/juju/worker/apiaddressupdater/apiaddressupdater.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/apiaddressupdater/apiaddressupdater.go 2016-03-22 15:18:22 +0000 @@ -6,10 +6,11 @@ import ( "fmt" + "github.com/juju/errors" "github.com/juju/loggo" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/network" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" ) @@ -39,25 +40,46 @@ // NewAPIAddressUpdater returns a worker.Worker that watches for changes to // API addresses and then sets them on the APIAddressSetter. -func NewAPIAddressUpdater(addresser APIAddresser, setter APIAddressSetter) worker.Worker { - return worker.NewNotifyWorker(&APIAddressUpdater{ +// TODO(fwereade): this should have a config struct, and some validation. +func NewAPIAddressUpdater(addresser APIAddresser, setter APIAddressSetter) (worker.Worker, error) { + handler := &APIAddressUpdater{ addresser: addresser, setter: setter, + } + w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{ + Handler: handler, }) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil } +// SetUp is part of the watcher.NotifyHandler interface. func (c *APIAddressUpdater) SetUp() (watcher.NotifyWatcher, error) { return c.addresser.WatchAPIHostPorts() } +// Handle is part of the watcher.NotifyHandler interface. func (c *APIAddressUpdater) Handle(_ <-chan struct{}) error { addresses, err := c.addresser.APIHostPorts() if err != nil { return fmt.Errorf("error getting addresses: %v", err) } + // Filter out any LXC bridge addresses. See LP bug #1416928. hpsToSet := make([][]network.HostPort, 0, len(addresses)) for _, hostPorts := range addresses { + // First try to keep only addresses in the default space where all API servers are on. + defaultSpaceHP, ok := network.SelectHostPortBySpace(hostPorts, network.DefaultSpace) + if ok { + hpsToSet = append(hpsToSet, []network.HostPort{defaultSpaceHP}) + continue + } else { + // As a fallback, use the old behavior. + logger.Warningf("cannot determine API addresses by space %q (using all as fallback)", network.DefaultSpace) + } + // Strip ports, filter, then add ports again. filtered := network.FilterLXCAddresses(network.HostsWithoutPort(hostPorts)) hps := make([]network.HostPort, 0, len(filtered)) @@ -72,12 +94,14 @@ hpsToSet = append(hpsToSet, hps) } } + logger.Debugf("updating API hostPorts to %+v", hpsToSet) if err := c.setter.SetAPIHostPorts(hpsToSet); err != nil { return fmt.Errorf("error setting addresses: %v", err) } return nil } +// TearDown is part of the watcher.NotifyHandler interface. func (c *APIAddressUpdater) TearDown() error { return nil } === modified file 'src/github.com/juju/juju/worker/apiaddressupdater/apiaddressupdater_test.go' --- src/github.com/juju/juju/worker/apiaddressupdater/apiaddressupdater_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/apiaddressupdater/apiaddressupdater_test.go 2016-03-22 15:18:22 +0000 @@ -48,7 +48,8 @@ func (s *APIAddressUpdaterSuite) TestStartStop(c *gc.C) { st, _ := s.OpenAPIAsNewMachine(c, state.JobHostUnits) - worker := apiaddressupdater.NewAPIAddressUpdater(st.Machiner(), &apiAddressSetter{}) + worker, err := apiaddressupdater.NewAPIAddressUpdater(st.Machiner(), &apiAddressSetter{}) + c.Assert(err, jc.ErrorIsNil) worker.Kill() c.Assert(worker.Wait(), gc.IsNil) } @@ -62,7 +63,8 @@ setter := &apiAddressSetter{servers: make(chan [][]network.HostPort, 1)} st, _ := s.OpenAPIAsNewMachine(c, state.JobHostUnits) - worker := apiaddressupdater.NewAPIAddressUpdater(st.Machiner(), setter) + worker, err := apiaddressupdater.NewAPIAddressUpdater(st.Machiner(), setter) + c.Assert(err, jc.ErrorIsNil) defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() @@ -78,7 +80,8 @@ func (s *APIAddressUpdaterSuite) TestAddressChange(c *gc.C) { setter := &apiAddressSetter{servers: make(chan [][]network.HostPort, 1)} st, _ := s.OpenAPIAsNewMachine(c, state.JobHostUnits) - worker := apiaddressupdater.NewAPIAddressUpdater(st.Machiner(), setter) + worker, err := apiaddressupdater.NewAPIAddressUpdater(st.Machiner(), setter) + c.Assert(err, jc.ErrorIsNil) defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() s.BackingState.StartSync() @@ -93,7 +96,7 @@ case servers := <-setter.servers: c.Assert(servers, gc.HasLen, 0) } - err := s.State.SetAPIHostPorts(updatedServers) + err = s.State.SetAPIHostPorts(updatedServers) c.Assert(err, jc.ErrorIsNil) s.BackingState.StartSync() select { @@ -138,7 +141,8 @@ setter := &apiAddressSetter{servers: make(chan [][]network.HostPort, 1)} st, _ := s.OpenAPIAsNewMachine(c, state.JobHostUnits) - worker := apiaddressupdater.NewAPIAddressUpdater(st.Machiner(), setter) + worker, err := apiaddressupdater.NewAPIAddressUpdater(st.Machiner(), setter) + c.Assert(err, jc.ErrorIsNil) defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() s.BackingState.StartSync() === modified file 'src/github.com/juju/juju/worker/apiaddressupdater/manifold.go' --- src/github.com/juju/juju/worker/apiaddressupdater/manifold.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/apiaddressupdater/manifold.go 2016-03-22 15:18:22 +0000 @@ -5,6 +5,7 @@ import ( "github.com/juju/errors" + "github.com/juju/juju/api/machiner" "github.com/juju/names" "github.com/juju/juju/agent" @@ -16,26 +17,33 @@ ) // ManifoldConfig defines the names of the manifolds on which a Manifold will depend. -type ManifoldConfig util.AgentApiManifoldConfig +type ManifoldConfig util.PostUpgradeManifoldConfig // Manifold returns a dependency manifold that runs an API address updater worker, // using the resource names defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { - return util.AgentApiManifold(util.AgentApiManifoldConfig(config), newWorker) + return util.PostUpgradeManifold(util.PostUpgradeManifoldConfig(config), newWorker) } // newWorker trivially wraps NewAPIAddressUpdater for use in a util.AgentApiManifold. -// It's not tested at the moment, because the scaffolding necessary to test these 5 -// lines outweighs them by several times for very little confirmatory power; in the -// long term, all APIAddressUpdaters should be constructed via a manifold, and the -// tests can be updated to reflect that. +// It's not tested at the moment, because the scaffolding necessary is too +// unwieldy/distracting to introduce at this point. var newWorker = func(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { - // TODO(fwereade): why on *earth* do we use the *uniter* facade for this - // worker? This code really ought to work anywhere... tag := a.CurrentConfig().Tag() - unitTag, ok := tag.(names.UnitTag) - if !ok { - return nil, errors.Errorf("expected a unit tag; got %q", tag) - } - return NewAPIAddressUpdater(uniter.NewState(apiCaller, unitTag), agent.APIHostPortsSetter{a}), nil + var facade APIAddresser + switch apiTag := tag.(type) { + case names.UnitTag: + facade = uniter.NewState(apiCaller, apiTag) + case names.MachineTag: + facade = machiner.NewState(apiCaller) + default: + return nil, errors.Errorf("expected a unit or machine tag; got %q", tag) + } + + setter := agent.APIHostPortsSetter{a} + w, err := NewAPIAddressUpdater(facade, setter) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil } === modified file 'src/github.com/juju/juju/worker/apicaller/export_test.go' --- src/github.com/juju/juju/worker/apicaller/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/apicaller/export_test.go 2016-03-22 15:18:22 +0000 @@ -4,7 +4,5 @@ package apicaller var ( - OpenConnection = &openConnection - OpenAPIForAgent = &apiOpen - CheckProvisionedStrategy = &checkProvisionedStrategy + OpenConnection = &openConnection ) === modified file 'src/github.com/juju/juju/worker/apicaller/manifold.go' --- src/github.com/juju/juju/worker/apicaller/manifold.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/apicaller/manifold.go 2016-03-22 15:18:22 +0000 @@ -7,16 +7,15 @@ "github.com/juju/errors" "github.com/juju/juju/agent" + "github.com/juju/juju/api" "github.com/juju/juju/api/base" "github.com/juju/juju/worker" "github.com/juju/juju/worker/dependency" - "github.com/juju/juju/worker/gate" ) // ManifoldConfig defines the names of the manifolds on which a Manifold will depend. type ManifoldConfig struct { - AgentName string - APIInfoGateName string + AgentName string } // Manifold returns a manifold whose worker wraps an API connection made on behalf of @@ -25,7 +24,6 @@ return dependency.Manifold{ Inputs: []string{ config.AgentName, - config.APIInfoGateName, }, Output: outputFunc, Start: startFunc(config), @@ -38,10 +36,6 @@ return func(getResource dependency.GetResourceFunc) (worker.Worker, error) { // Get dependencies and open a connection. - var gate gate.Unlocker - if err := getResource(config.APIInfoGateName, &gate); err != nil { - return nil, err - } var a agent.Agent if err := getResource(config.AgentName, &a); err != nil { return nil, err @@ -53,38 +47,44 @@ // Add the environment uuid to agent config if not present. currentConfig := a.CurrentConfig() - if currentConfig.Environment().Id() == "" { + if currentConfig.Model().Id() == "" { err := a.ChangeConfig(func(setter agent.ConfigSetter) error { - environTag, err := conn.EnvironTag() + modelTag, err := conn.ModelTag() if err != nil { - return errors.Annotate(err, "no environment uuid set on api") + return errors.Annotate(err, "no model uuid set on api") } return setter.Migrate(agent.MigrateParams{ - Environment: environTag, + Model: modelTag, }) }) if err != nil { - logger.Warningf("unable to save environment uuid: %v", err) + logger.Warningf("unable to save model uuid: %v", err) // Not really fatal, just annoying. } } - // Now we know the agent config has been fixed up, notify everyone - // else who might depend upon its stability/correctness. - gate.Unlock() - // Return the worker. return newApiConnWorker(conn) } } -// outputFunc extracts a base.APICaller from a *apiConnWorker. +// outputFunc extracts an API connection from a *apiConnWorker. func outputFunc(in worker.Worker, out interface{}) error { inWorker, _ := in.(*apiConnWorker) - outPointer, _ := out.(*base.APICaller) - if inWorker == nil || outPointer == nil { - return errors.Errorf("expected %T->%T; got %T->%T", inWorker, outPointer, in, out) - } - *outPointer = inWorker.conn + if inWorker == nil { + return errors.Errorf("in should be a %T; got %T", inWorker, in) + } + + switch outPointer := out.(type) { + case *base.APICaller: + *outPointer = inWorker.conn + case *api.Connection: + // Using api.Connection is strongly discouraged as consumers + // of this API connection should not be able to close it. This + // option is only available to support legacy upgrade steps. + *outPointer = inWorker.conn + default: + return errors.Errorf("out should be *base.APICaller or *api.Connection; got %T", out) + } return nil } === modified file 'src/github.com/juju/juju/worker/apicaller/manifold_test.go' --- src/github.com/juju/juju/worker/apicaller/manifold_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/apicaller/manifold_test.go 2016-03-22 15:18:22 +0000 @@ -25,7 +25,6 @@ testing.Stub manifold dependency.Manifold agent *mockAgent - gate *mockGate conn *mockConn getResource dependency.GetResourceFunc } @@ -36,20 +35,15 @@ s.IsolationSuite.SetUpTest(c) s.Stub = testing.Stub{} s.manifold = apicaller.Manifold(apicaller.ManifoldConfig{ - AgentName: "agent-name", - APIInfoGateName: "api-info-gate-name", + AgentName: "agent-name", }) s.agent = &mockAgent{ stub: &s.Stub, - env: coretesting.EnvironmentTag, - } - s.gate = &mockGate{ - stub: &s.Stub, + env: coretesting.ModelTag, } s.getResource = dt.StubGetResource(dt.StubResources{ - "agent-name": dt.StubResource{Output: s.agent}, - "api-info-gate-name": dt.StubResource{Output: s.gate}, + "agent-name": dt.StubResource{Output: s.agent}, }) // Watch out for this: it uses its own Stub because Close calls are made from @@ -70,25 +64,12 @@ } func (s *ManifoldSuite) TestInputs(c *gc.C) { - c.Check(s.manifold.Inputs, jc.DeepEquals, []string{"agent-name", "api-info-gate-name"}) + c.Check(s.manifold.Inputs, jc.DeepEquals, []string{"agent-name"}) } func (s *ManifoldSuite) TestStartMissingAgent(c *gc.C) { getResource := dt.StubGetResource(dt.StubResources{ - "agent-name": dt.StubResource{Error: dependency.ErrMissing}, - "api-info-gate-name": dt.StubResource{Output: s.gate}, - }) - - worker, err := s.manifold.Start(getResource) - c.Check(worker, gc.IsNil) - c.Check(err, gc.Equals, dependency.ErrMissing) - s.CheckCalls(c, nil) -} - -func (s *ManifoldSuite) TestStartMissingGate(c *gc.C) { - getResource := dt.StubGetResource(dt.StubResources{ - "agent-name": dt.StubResource{Output: s.agent}, - "api-info-gate-name": dt.StubResource{Error: dependency.ErrMissing}, + "agent-name": dt.StubResource{Error: dependency.ErrMissing}, }) worker, err := s.manifold.Start(getResource) @@ -116,13 +97,11 @@ s.CheckCalls(c, []testing.StubCall{{ FuncName: "openConnection", Args: []interface{}{s.agent}, - }, { - FuncName: "Unlock", }}) } func (s *ManifoldSuite) setupMutatorTest(c *gc.C) agent.ConfigMutator { - s.agent.env = names.EnvironTag{} + s.agent.env = names.ModelTag{} s.conn.stub = &s.Stub // will be unsafe if worker stopped before test finished s.SetErrors( nil, // openConnection, @@ -133,7 +112,7 @@ c.Assert(err, jc.ErrorIsNil) s.AddCleanup(func(c *gc.C) { assertStop(c, worker) }) - s.CheckCallNames(c, "openConnection", "ChangeConfig", "Unlock") + s.CheckCallNames(c, "openConnection", "ChangeConfig") changeArgs := s.Calls()[1].Args c.Assert(changeArgs, gc.HasLen, 1) s.ResetCalls() @@ -147,11 +126,11 @@ err := mutator(mockSetter) c.Check(err, jc.ErrorIsNil) s.CheckCalls(c, []testing.StubCall{{ - FuncName: "EnvironTag", + FuncName: "ModelTag", }, { FuncName: "Migrate", Args: []interface{}{agent.MigrateParams{ - Environment: coretesting.EnvironmentTag, + Model: coretesting.ModelTag, }}, }}) } @@ -161,9 +140,9 @@ s.SetErrors(errors.New("no tag for you")) err := mutator(nil) - c.Check(err, gc.ErrorMatches, "no environment uuid set on api: no tag for you") + c.Check(err, gc.ErrorMatches, "no model uuid set on api: no tag for you") s.CheckCalls(c, []testing.StubCall{{ - FuncName: "EnvironTag", + FuncName: "ModelTag", }}) } @@ -175,11 +154,11 @@ err := mutator(mockSetter) c.Check(err, gc.ErrorMatches, "migrate failure") s.CheckCalls(c, []testing.StubCall{{ - FuncName: "EnvironTag", + FuncName: "ModelTag", }, { FuncName: "Migrate", Args: []interface{}{agent.MigrateParams{ - Environment: coretesting.EnvironmentTag, + Model: coretesting.ModelTag, }}, }}) } @@ -239,13 +218,18 @@ err := s.manifold.Output(worker, &apicaller) c.Check(err, jc.ErrorIsNil) c.Check(apicaller, gc.Equals, s.conn) + + var conn api.Connection + err = s.manifold.Output(worker, &conn) + c.Check(err, jc.ErrorIsNil) + c.Check(conn, gc.Equals, s.conn) } func (s *ManifoldSuite) TestOutputBadWorker(c *gc.C) { var apicaller base.APICaller err := s.manifold.Output(dummyWorker{}, &apicaller) c.Check(apicaller, gc.IsNil) - c.Check(err.Error(), gc.Equals, "expected *apicaller.apiConnWorker->*base.APICaller; got apicaller_test.dummyWorker->*base.APICaller") + c.Check(err.Error(), gc.Equals, "in should be a *apicaller.apiConnWorker; got apicaller_test.dummyWorker") } func (s *ManifoldSuite) TestOutputBadTarget(c *gc.C) { @@ -254,5 +238,5 @@ var apicaller interface{} err := s.manifold.Output(worker, &apicaller) c.Check(apicaller, gc.IsNil) - c.Check(err.Error(), gc.Equals, "expected *apicaller.apiConnWorker->*base.APICaller; got *apicaller.apiConnWorker->*interface {}") + c.Check(err.Error(), gc.Equals, "out should be *base.APICaller or *api.Connection; got *interface {}") } === modified file 'src/github.com/juju/juju/worker/apicaller/open.go' --- src/github.com/juju/juju/worker/apicaller/open.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/apicaller/open.go 2016-03-22 15:18:22 +0000 @@ -17,7 +17,7 @@ ) var ( - apiOpen = openAPIForAgent + apiOpen = api.Open checkProvisionedStrategy = utils.AttemptStrategy{ Total: 1 * time.Minute, @@ -25,19 +25,6 @@ } ) -// openAPIForAgent exists to handle the edge case that exists -// when an environment is jumping several versions and doesn't -// yet have the environment UUID cached in the agent config. -// This happens only the first time an agent tries to connect -// after an upgrade. If there is no environment UUID set, then -// use login version 1. -func openAPIForAgent(info *api.Info, opts api.DialOpts) (api.Connection, error) { - if info.EnvironTag.Id() == "" { - return api.OpenWithVersion(info, opts, 1) - } - return api.Open(info, opts) -} - // OpenAPIState opens the API using the given information. The agent's // password is changed if the fallback password was used to connect to // the API. @@ -77,7 +64,7 @@ if !usedOldPassword { // Call set password with the current password. If we've recently - // become a state server, this will fix up our credentials in mongo. + // become a controller, this will fix up our credentials in mongo. if err := entity.SetPassword(info.Password); err != nil { return nil, errors.Annotate(err, "can't reset agent password") } === modified file 'src/github.com/juju/juju/worker/apicaller/util_test.go' --- src/github.com/juju/juju/worker/apicaller/util_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/apicaller/util_test.go 2016-03-22 15:18:22 +0000 @@ -18,7 +18,7 @@ type mockAgent struct { agent.Agent stub *testing.Stub - env names.EnvironTag + env names.ModelTag } func (mock *mockAgent) CurrentConfig() agent.Config { @@ -32,10 +32,10 @@ type dummyConfig struct { agent.Config - env names.EnvironTag + env names.ModelTag } -func (dummy dummyConfig) Environment() names.EnvironTag { +func (dummy dummyConfig) Model() names.ModelTag { return dummy.env } @@ -55,12 +55,12 @@ broken chan struct{} } -func (mock *mockConn) EnvironTag() (names.EnvironTag, error) { - mock.stub.AddCall("EnvironTag") +func (mock *mockConn) ModelTag() (names.ModelTag, error) { + mock.stub.AddCall("ModelTag") if err := mock.stub.NextErr(); err != nil { - return names.EnvironTag{}, err + return names.ModelTag{}, err } - return coretesting.EnvironmentTag, nil + return coretesting.ModelTag, nil } func (mock *mockConn) Broken() <-chan struct{} { === added file 'src/github.com/juju/juju/worker/authenticationworker/manifold.go' --- src/github.com/juju/juju/worker/authenticationworker/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/authenticationworker/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,39 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package authenticationworker + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/agent" + "github.com/juju/juju/api" + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/keyupdater" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/util" +) + +// ManifoldConfig defines the names of the manifolds on which a Manifold will depend. +type ManifoldConfig util.PostUpgradeManifoldConfig + +// Manifold returns a dependency manifold that runs a authenticationworker worker, +// using the resource names defined in the supplied config. +func Manifold(config ManifoldConfig) dependency.Manifold { + + return util.PostUpgradeManifold(util.PostUpgradeManifoldConfig(config), newWorker) +} + +func newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { + apiConn, ok := apiCaller.(api.Connection) + if !ok { + return nil, errors.New("unable to obtain api.Connection") + } + + w, err := NewWorker(keyupdater.NewState(apiConn), a.CurrentConfig()) + if err != nil { + return nil, errors.Annotate(err, "cannot start ssh auth-keys updater worker") + } + return w, nil +} === added file 'src/github.com/juju/juju/worker/authenticationworker/package_test.go' --- src/github.com/juju/juju/worker/authenticationworker/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/authenticationworker/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package authenticationworker_test + +import ( + stdtesting "testing" + + coretesting "github.com/juju/juju/testing" +) + +func TestPackage(t *stdtesting.T) { + coretesting.MgoTestPackage(t) +} === modified file 'src/github.com/juju/juju/worker/authenticationworker/worker.go' --- src/github.com/juju/juju/worker/authenticationworker/worker.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/authenticationworker/worker.go 2016-03-22 15:18:22 +0000 @@ -9,14 +9,14 @@ "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" + "github.com/juju/utils/os" "github.com/juju/utils/set" + "github.com/juju/utils/ssh" "launchpad.net/tomb" "github.com/juju/juju/agent" "github.com/juju/juju/api/keyupdater" - "github.com/juju/juju/api/watcher" - "github.com/juju/juju/utils/ssh" - "github.com/juju/juju/version" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" ) @@ -37,17 +37,27 @@ nonJujuKeys []string } -var _ worker.NotifyWatchHandler = (*keyupdaterWorker)(nil) - // NewWorker returns a worker that keeps track of // the machine's authorised ssh keys and ensures the // ~/.ssh/authorized_keys file is up to date. -func NewWorker(st *keyupdater.State, agentConfig agent.Config) worker.Worker { - if version.Current.OS == version.Windows { - return worker.NewNoOpWorker() - } - kw := &keyupdaterWorker{st: st, tag: agentConfig.Tag().(names.MachineTag)} - return worker.NewNotifyWorker(kw) +func NewWorker(st *keyupdater.State, agentConfig agent.Config) (worker.Worker, error) { + machineTag, ok := agentConfig.Tag().(names.MachineTag) + if !ok { + return nil, errors.NotValidf("machine tag %v", agentConfig.Tag()) + } + if os.HostOS() == os.Windows { + return worker.NewNoOpWorker(), nil + } + w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{ + Handler: &keyupdaterWorker{ + st: st, + tag: machineTag, + }, + }) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil } // SetUp is defined on the worker.NotifyWatchHandler interface. === modified file 'src/github.com/juju/juju/worker/authenticationworker/worker_test.go' --- src/github.com/juju/juju/worker/authenticationworker/worker_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/authenticationworker/worker_test.go 2016-03-22 15:18:22 +0000 @@ -6,11 +6,12 @@ import ( "runtime" "strings" - stdtesting "testing" "time" "github.com/juju/names" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/ssh" + sshtesting "github.com/juju/utils/ssh/testing" gc "gopkg.in/check.v1" "github.com/juju/juju/agent" @@ -19,24 +20,10 @@ jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/utils/ssh" - sshtesting "github.com/juju/juju/utils/ssh/testing" "github.com/juju/juju/worker" "github.com/juju/juju/worker/authenticationworker" ) -// worstCase is used for timeouts when timing out -// will fail the test. Raising this value should -// not affect the overall running time of the tests -// unless they fail. -const worstCase = 5 * time.Second - -func TestAll(t *stdtesting.T) { - coretesting.MgoTestPackage(t) -} - -var _ = gc.Suite(&workerSuite{}) - type workerSuite struct { jujutesting.JujuConnSuite stateMachine *state.Machine @@ -47,6 +34,8 @@ existingKeys []string } +var _ = gc.Suite(&workerSuite{}) + func (s *workerSuite) SetUpTest(c *gc.C) { //TODO(bogdanteleaga): Fix this on windows if runtime.GOOS == "windows" { @@ -72,7 +61,7 @@ var apiRoot api.Connection apiRoot, s.machine = s.OpenAPIAsNewMachine(c) c.Assert(apiRoot, gc.NotNil) - s.keyupdaterApi = apiRoot.KeyUpdater() + s.keyupdaterApi = keyupdater.NewState(apiRoot) c.Assert(s.keyupdaterApi, gc.NotNil) } @@ -96,13 +85,13 @@ func (s *workerSuite) setAuthorisedKeys(c *gc.C, keys ...string) { keyStr := strings.Join(keys, "\n") - err := s.BackingState.UpdateEnvironConfig(map[string]interface{}{"authorized-keys": keyStr}, nil, nil) + err := s.BackingState.UpdateModelConfig(map[string]interface{}{"authorized-keys": keyStr}, nil, nil) c.Assert(err, jc.ErrorIsNil) s.BackingState.StartSync() } func (s *workerSuite) waitSSHKeys(c *gc.C, expected []string) { - timeout := time.After(worstCase) + timeout := time.After(coretesting.LongWait) for { select { case <-timeout: @@ -121,7 +110,8 @@ } func (s *workerSuite) TestKeyUpdateRetainsExisting(c *gc.C) { - authWorker := authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + authWorker, err := authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + c.Assert(err, jc.ErrorIsNil) defer stop(c, authWorker) newKey := sshtesting.ValidKeyThree.Key + " user@host" @@ -134,7 +124,8 @@ newKey := sshtesting.ValidKeyThree.Key + " user@host" s.setAuthorisedKeys(c, newKey) - authWorker := authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + authWorker, err := authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + c.Assert(err, jc.ErrorIsNil) defer stop(c, authWorker) newKeyWithCommentPrefix := sshtesting.ValidKeyThree.Key + " Juju:user@host" @@ -142,7 +133,8 @@ } func (s *workerSuite) TestDeleteKey(c *gc.C) { - authWorker := authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + authWorker, err := authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + c.Assert(err, jc.ErrorIsNil) defer stop(c, authWorker) // Add another key @@ -157,7 +149,8 @@ } func (s *workerSuite) TestMultipleChanges(c *gc.C) { - authWorker := authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + authWorker, err := authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + c.Assert(err, jc.ErrorIsNil) defer stop(c, authWorker) s.waitSSHKeys(c, append(s.existingKeys, s.existingEnvKey)) @@ -170,7 +163,8 @@ } func (s *workerSuite) TestWorkerRestart(c *gc.C) { - authWorker := authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + authWorker, err := authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + c.Assert(err, jc.ErrorIsNil) defer stop(c, authWorker) s.waitSSHKeys(c, append(s.existingKeys, s.existingEnvKey)) @@ -181,7 +175,8 @@ s.setAuthorisedKeys(c, sshtesting.ValidKeyThree.Key+" yetanother@host") // Restart the worker and check that the ssh auth keys are as expected. - authWorker = authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + authWorker, err = authenticationworker.NewWorker(s.keyupdaterApi, agentConfig(c, s.machine.Tag().(names.MachineTag))) + c.Assert(err, jc.ErrorIsNil) defer stop(c, authWorker) yetAnotherKeyWithCommentPrefix := sshtesting.ValidKeyThree.Key + " Juju:yetanother@host" === added directory 'src/github.com/juju/juju/worker/catacomb' === added file 'src/github.com/juju/juju/worker/catacomb/catacomb.go' --- src/github.com/juju/juju/worker/catacomb/catacomb.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/catacomb/catacomb.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,254 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package catacomb + +import ( + "fmt" + "sync" + "sync/atomic" + + "github.com/juju/errors" + "launchpad.net/tomb" + + "github.com/juju/juju/worker" +) + +// Catacomb is a variant of tomb.Tomb with its own internal goroutine, designed +// for coordinating the lifetimes of private workers needed by a single parent. +// +// As a client, you should only ever create zero values; these should be used +// with Invoke to manage a parent task. No Catacomb methods are meaningful +// until the catacomb has been started with a successful Invoke. +// +// See the package documentation for more detailed discussion and usage notes. +type Catacomb struct { + tomb tomb.Tomb + wg sync.WaitGroup + adds chan worker.Worker + dirty int32 +} + +// Plan defines the strategy for an Invoke. +type Plan struct { + + // Site must point to an unused Catacomb. + Site *Catacomb + + // Work will be run on a new goroutine, and tracked by Site. + Work func() error + + // Init contains additional workers for which Site must be responsible. + Init []worker.Worker +} + +// Validate returns an error if the plan cannot be used. It doesn't check for +// reused catacombs: plan validity is necessary but not sufficient to determine +// that an Invoke will succeed. +func (plan Plan) Validate() error { + if plan.Site == nil { + return errors.NotValidf("nil Site") + } + if plan.Work == nil { + return errors.NotValidf("nil Work") + } + for i, w := range plan.Init { + if w == nil { + return errors.NotValidf("nil Init item %d", i) + } + } + return nil +} + +// Invoke uses the plan's catacomb to run the work func. It will return an +// error if the plan is not valid, or if the catacomb has already been used. +// If Invoke returns no error, the catacomb is now controlling the work func, +// and its exported methods can be called safely. +// +// Invoke takes responsibility for all workers in plan.Init, *whether or not +// it succeeds*. +func Invoke(plan Plan) (err error) { + + defer func() { + if err != nil { + stopWorkers(plan.Init) + } + }() + + if err := plan.Validate(); err != nil { + return errors.Trace(err) + } + catacomb := plan.Site + if !atomic.CompareAndSwapInt32(&catacomb.dirty, 0, 1) { + return errors.Errorf("catacomb %p has already been used", catacomb) + } + catacomb.adds = make(chan worker.Worker) + + // Add the Init workers right away, so the client can't induce data races + // by modifying the slice post-return. + for _, w := range plan.Init { + catacomb.add(w) + } + + // This goroutine listens for added workers until the catacomb is Killed. + // We ensure the wg can't complete until we know no new workers will be + // added. + catacomb.wg.Add(1) + go func() { + defer catacomb.wg.Done() + for { + select { + case <-catacomb.tomb.Dying(): + return + case w := <-catacomb.adds: + catacomb.add(w) + } + } + }() + + // This goroutine runs the work func and stops the catacomb with its error; + // and waits for for the listen goroutine and all added workers to complete + // before marking the catacomb's tomb Dead. + go func() { + defer catacomb.tomb.Done() + defer catacomb.wg.Wait() + catacomb.Kill(plan.Work()) + }() + return nil +} + +// stopWorkers stops all non-nil workers in the supplied slice, and swallows +// all errors. This is consistent, for now, because Catacomb swallows all +// errors but the first; as we come to rank or log errors, this must change +// to accommodate better practices. +func stopWorkers(workers []worker.Worker) { + for _, w := range workers { + if w != nil { + worker.Stop(w) + } + } +} + +// Add causes the supplied worker's lifetime to be bound to the catacomb's, +// relieving the client of responsibility for Kill()ing it and Wait()ing for an +// error, *whether or not this method succeeds*. If the method returns an error, +// it always indicates that the catacomb is shutting down; the value will either +// be the error from the (now-stopped) worker, or catacomb.ErrDying(). +// +// If the worker completes without error, the catacomb will continue unaffected; +// otherwise the catacomb's tomb will be killed with the returned error. This +// allows clients to freely Kill() workers that have been Add()ed; any errors +// encountered will still kill the catacomb, so the workers stay under control +// until the last moment, and so can be managed pretty casually once they've +// been added. +// +// Don't try to add a worker to its own catacomb; that'll deadlock the shutdown +// procedure. I don't think there's much we can do about that. +func (catacomb *Catacomb) Add(w worker.Worker) error { + select { + case <-catacomb.tomb.Dying(): + if err := worker.Stop(w); err != nil { + return errors.Trace(err) + } + return catacomb.ErrDying() + case catacomb.adds <- w: + // Note that we don't need to wait for confirmation here. This depends + // on the catacomb.wg.Add() for the listen loop, which ensures the wg + // won't complete until no more adds can be received. + return nil + } +} + +// add starts two goroutines that (1) kill the catacomb's tomb with any +// error encountered by the worker; and (2) kill the worker when the +// catacomb starts dying. +func (catacomb *Catacomb) add(w worker.Worker) { + + // The coordination via stopped is not reliably observable, and hence not + // tested, but it's yucky to leave the second goroutine running when we + // don't need to. + stopped := make(chan struct{}) + catacomb.wg.Add(1) + go func() { + defer catacomb.wg.Done() + defer close(stopped) + if err := w.Wait(); err != nil { + catacomb.Kill(err) + } + }() + go func() { + select { + case <-stopped: + case <-catacomb.tomb.Dying(): + w.Kill() + } + }() +} + +// Dying returns a channel that will be closed when Kill is called. +func (catacomb *Catacomb) Dying() <-chan struct{} { + return catacomb.tomb.Dying() +} + +// Dead returns a channel that will be closed when Invoke has completed (and +// thus when subsequent calls to Wait() are known not to block). +func (catacomb *Catacomb) Dead() <-chan struct{} { + return catacomb.tomb.Dead() +} + +// Wait blocks until Invoke completes, and returns the first non-nil and +// non-tomb.ErrDying error passed to Kill before Invoke finished. +func (catacomb *Catacomb) Wait() error { + return catacomb.tomb.Wait() +} + +// Kill kills the Catacomb's internal tomb with the supplied error, or one +// derived from it. +// * if it's caused by this catacomb's ErrDying, it passes on tomb.ErrDying. +// * if it's tomb.ErrDying, or caused by another catacomb's ErrDying, it passes +// on a new error complaining about the misuse. +// * all other errors are passed on unmodified. +// It's always safe to call Kill, but errors passed to Kill after the catacomb +// is dead will be ignored. +func (catacomb *Catacomb) Kill(err error) { + if err == tomb.ErrDying { + err = errors.New("bad catacomb Kill: tomb.ErrDying") + } + cause := errors.Cause(err) + if match, ok := cause.(dyingError); ok { + if catacomb != match.catacomb { + err = errors.Errorf("bad catacomb Kill: other catacomb's ErrDying") + } else { + err = tomb.ErrDying + } + } + + // TODO(fwereade) it's pretty clear that this ought to be a Kill(nil), and + // the catacomb should be responsible for ranking errors, just like the + // dependency.Engine does, rather than determining priority by scheduling + // alone. + catacomb.tomb.Kill(err) +} + +// ErrDying returns an error that can be used to Kill *this* catacomb without +// overwriting nil errors. It should only be used when the catacomb is already +// known to be dying; calling this method at any other time will return a +// different error, indicating client misuse. +func (catacomb *Catacomb) ErrDying() error { + select { + case <-catacomb.tomb.Dying(): + return dyingError{catacomb} + default: + return errors.New("bad catacomb ErrDying: still alive") + } +} + +// dyingError holds a reference to the catacomb that created it. +type dyingError struct { + catacomb *Catacomb +} + +// Error is part of the error interface. +func (err dyingError) Error() string { + return fmt.Sprintf("catacomb %p is dying", err.catacomb) +} === added file 'src/github.com/juju/juju/worker/catacomb/catacomb_test.go' --- src/github.com/juju/juju/worker/catacomb/catacomb_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/catacomb/catacomb_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,478 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package catacomb_test + +import ( + "sync" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "launchpad.net/tomb" + + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/catacomb" +) + +type CatacombSuite struct { + testing.IsolationSuite + fix *fixture +} + +var _ = gc.Suite(&CatacombSuite{}) + +func (s *CatacombSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.fix = &fixture{cleaner: s} +} + +func (s *CatacombSuite) TestStartsAlive(c *gc.C) { + s.fix.run(c, func() { + s.fix.assertNotDying(c) + s.fix.assertNotDead(c) + }) +} + +func (s *CatacombSuite) TestKillClosesDying(c *gc.C) { + s.fix.run(c, func() { + s.fix.catacomb.Kill(nil) + s.fix.assertDying(c) + }) +} + +func (s *CatacombSuite) TestKillDoesNotCloseDead(c *gc.C) { + s.fix.run(c, func() { + s.fix.catacomb.Kill(nil) + s.fix.assertNotDead(c) + }) +} + +func (s *CatacombSuite) TestFinishTaskStopsCompletely(c *gc.C) { + s.fix.run(c, func() {}) + + s.fix.assertDying(c) + s.fix.assertDead(c) +} + +func (s *CatacombSuite) TestKillNil(c *gc.C) { + err := s.fix.run(c, func() { + s.fix.catacomb.Kill(nil) + }) + c.Check(err, jc.ErrorIsNil) +} + +func (s *CatacombSuite) TestKillNonNilOverwritesNil(c *gc.C) { + second := errors.New("blah") + + err := s.fix.run(c, func() { + s.fix.catacomb.Kill(nil) + s.fix.catacomb.Kill(second) + }) + c.Check(err, gc.Equals, second) +} + +func (s *CatacombSuite) TestKillNilDoesNotOverwriteNonNil(c *gc.C) { + first := errors.New("blib") + + err := s.fix.run(c, func() { + s.fix.catacomb.Kill(first) + s.fix.catacomb.Kill(nil) + }) + c.Check(err, gc.Equals, first) +} + +func (s *CatacombSuite) TestKillNonNilDoesNotOverwriteNonNil(c *gc.C) { + first := errors.New("blib") + second := errors.New("blob") + + err := s.fix.run(c, func() { + s.fix.catacomb.Kill(first) + s.fix.catacomb.Kill(second) + }) + c.Check(err, gc.Equals, first) +} + +func (s *CatacombSuite) TestAliveErrDyingDifferent(c *gc.C) { + s.fix.run(c, func() { + notDying := s.fix.catacomb.ErrDying() + c.Check(notDying, gc.ErrorMatches, "bad catacomb ErrDying: still alive") + + s.fix.catacomb.Kill(nil) + dying := s.fix.catacomb.ErrDying() + c.Check(dying, gc.ErrorMatches, "catacomb 0x[0-9a-f]+ is dying") + }) +} + +func (s *CatacombSuite) TestKillAliveErrDying(c *gc.C) { + var notDying error + + err := s.fix.run(c, func() { + notDying = s.fix.catacomb.ErrDying() + s.fix.catacomb.Kill(notDying) + }) + c.Check(err, gc.Equals, notDying) +} + +func (s *CatacombSuite) TestKillErrDyingDoesNotOverwriteNil(c *gc.C) { + err := s.fix.run(c, func() { + s.fix.catacomb.Kill(nil) + errDying := s.fix.catacomb.ErrDying() + s.fix.catacomb.Kill(errDying) + }) + c.Check(err, jc.ErrorIsNil) +} + +func (s *CatacombSuite) TestKillErrDyingDoesNotOverwriteNonNil(c *gc.C) { + first := errors.New("FRIST!") + + err := s.fix.run(c, func() { + s.fix.catacomb.Kill(first) + errDying := s.fix.catacomb.ErrDying() + s.fix.catacomb.Kill(errDying) + }) + c.Check(err, gc.Equals, first) +} + +func (s *CatacombSuite) TestKillCauseErrDyingDoesNotOverwriteNil(c *gc.C) { + err := s.fix.run(c, func() { + s.fix.catacomb.Kill(nil) + errDying := s.fix.catacomb.ErrDying() + disguised := errors.Annotatef(errDying, "disguised") + s.fix.catacomb.Kill(disguised) + }) + c.Check(err, jc.ErrorIsNil) +} + +func (s *CatacombSuite) TestKillCauseErrDyingDoesNotOverwriteNonNil(c *gc.C) { + first := errors.New("FRIST!") + + err := s.fix.run(c, func() { + s.fix.catacomb.Kill(first) + errDying := s.fix.catacomb.ErrDying() + disguised := errors.Annotatef(errDying, "disguised") + s.fix.catacomb.Kill(disguised) + }) + c.Check(err, gc.Equals, first) +} + +func (s *CatacombSuite) TestKillTombErrDying(c *gc.C) { + err := s.fix.run(c, func() { + s.fix.catacomb.Kill(tomb.ErrDying) + }) + c.Check(err, gc.ErrorMatches, "bad catacomb Kill: tomb.ErrDying") +} + +func (s *CatacombSuite) TestKillErrDyingFromOtherCatacomb(c *gc.C) { + fix2 := &fixture{} + fix2.run(c, func() {}) + errDying := fix2.catacomb.ErrDying() + + err := s.fix.run(c, func() { + s.fix.catacomb.Kill(errDying) + }) + c.Check(err, gc.ErrorMatches, "bad catacomb Kill: other catacomb's ErrDying") +} + +func (s *CatacombSuite) TestStopsAddedWorker(c *gc.C) { + w := s.fix.startErrorWorker(c, nil) + + err := s.fix.run(c, func() { + s.fix.assertAddAlive(c, w) + }) + c.Check(err, jc.ErrorIsNil) + w.assertDead(c) +} + +func (s *CatacombSuite) TestStopsInitWorker(c *gc.C) { + w := s.fix.startErrorWorker(c, nil) + + err := s.fix.run(c, func() { + w.waitStillAlive(c) + }, w) + c.Check(err, jc.ErrorIsNil) + w.assertDead(c) +} + +func (s *CatacombSuite) TestStoppedWorkerErrorOverwritesNil(c *gc.C) { + expect := errors.New("splot") + w := s.fix.startErrorWorker(c, expect) + + err := s.fix.run(c, func() { + s.fix.assertAddAlive(c, w) + }) + c.Check(err, gc.Equals, expect) + w.assertDead(c) +} + +func (s *CatacombSuite) TestStoppedWorkerErrorDoesNotOverwriteNonNil(c *gc.C) { + expect := errors.New("splot") + w := s.fix.startErrorWorker(c, errors.New("not interesting")) + + err := s.fix.run(c, func() { + s.fix.assertAddAlive(c, w) + s.fix.catacomb.Kill(expect) + }) + c.Check(err, gc.Equals, expect) + w.assertDead(c) +} + +func (s *CatacombSuite) TestAddWhenDyingStopsWorker(c *gc.C) { + err := s.fix.run(c, func() { + w := s.fix.startErrorWorker(c, nil) + s.fix.catacomb.Kill(nil) + expect := s.fix.catacomb.ErrDying() + + err := s.fix.catacomb.Add(w) + c.Assert(err, gc.Equals, expect) + w.assertDead(c) + }) + c.Check(err, jc.ErrorIsNil) +} + +func (s *CatacombSuite) TestAddWhenDyingReturnsWorkerError(c *gc.C) { + err := s.fix.run(c, func() { + expect := errors.New("squelch") + w := s.fix.startErrorWorker(c, expect) + s.fix.catacomb.Kill(nil) + + actual := s.fix.catacomb.Add(w) + c.Assert(errors.Cause(actual), gc.Equals, expect) + w.assertDead(c) + }) + c.Check(err, jc.ErrorIsNil) +} + +func (s *CatacombSuite) TestAddWhenDeadStopsWorker(c *gc.C) { + s.fix.run(c, func() {}) + expect := s.fix.catacomb.ErrDying() + + w := s.fix.startErrorWorker(c, nil) + err := s.fix.catacomb.Add(w) + c.Assert(err, gc.Equals, expect) + w.assertDead(c) +} + +func (s *CatacombSuite) TestAddWhenDeadReturnsWorkerError(c *gc.C) { + s.fix.run(c, func() {}) + + expect := errors.New("squelch") + w := s.fix.startErrorWorker(c, expect) + actual := s.fix.catacomb.Add(w) + c.Assert(errors.Cause(actual), gc.Equals, expect) + w.assertDead(c) +} + +func (s *CatacombSuite) TestFailAddedWorkerKills(c *gc.C) { + expect := errors.New("blarft") + w := s.fix.startErrorWorker(c, expect) + + err := s.fix.run(c, func() { + s.fix.assertAddAlive(c, w) + w.Kill() + s.fix.waitDying(c) + }) + c.Check(err, gc.Equals, expect) + w.assertDead(c) +} + +func (s *CatacombSuite) TestAddFailedWorkerKills(c *gc.C) { + expect := errors.New("blarft") + w := s.fix.startErrorWorker(c, expect) + w.stop() + + err := s.fix.run(c, func() { + err := s.fix.catacomb.Add(w) + c.Assert(err, jc.ErrorIsNil) + s.fix.waitDying(c) + }) + c.Check(err, gc.Equals, expect) +} + +func (s *CatacombSuite) TestInitFailedWorkerKills(c *gc.C) { + expect := errors.New("blarft") + w := s.fix.startErrorWorker(c, expect) + w.stop() + + err := s.fix.run(c, func() { + s.fix.waitDying(c) + }, w) + c.Check(err, gc.Equals, expect) +} + +func (s *CatacombSuite) TestFinishAddedWorkerDoesNotKill(c *gc.C) { + w := s.fix.startErrorWorker(c, nil) + + err := s.fix.run(c, func() { + s.fix.assertAddAlive(c, w) + w.Kill() + + w2 := s.fix.startErrorWorker(c, nil) + s.fix.assertAddAlive(c, w2) + }) + c.Check(err, jc.ErrorIsNil) + w.assertDead(c) +} + +func (s *CatacombSuite) TestAddFinishedWorkerDoesNotKill(c *gc.C) { + w := s.fix.startErrorWorker(c, nil) + w.stop() + + err := s.fix.run(c, func() { + err := s.fix.catacomb.Add(w) + c.Assert(err, jc.ErrorIsNil) + + w2 := s.fix.startErrorWorker(c, nil) + s.fix.assertAddAlive(c, w2) + }) + c.Check(err, jc.ErrorIsNil) +} + +func (s *CatacombSuite) TestInitFinishedWorkerDoesNotKill(c *gc.C) { + w := s.fix.startErrorWorker(c, nil) + w.stop() + + err := s.fix.run(c, func() { + w2 := s.fix.startErrorWorker(c, nil) + s.fix.assertAddAlive(c, w2) + }, w) + c.Check(err, jc.ErrorIsNil) +} + +func (s *CatacombSuite) TestStress(c *gc.C) { + const workerCount = 1000 + workers := make([]*errorWorker, 0, workerCount) + + // Just add a whole bunch of workers... + err := s.fix.run(c, func() { + for i := 0; i < workerCount; i++ { + w := s.fix.startErrorWorker(c, errors.Errorf("error %d", i)) + err := s.fix.catacomb.Add(w) + c.Check(err, jc.ErrorIsNil) + workers = append(workers, w) + } + }) + + // ...and check that one of them killed the catacomb when it shut down; + // and that all of them have been stopped. + c.Check(err, gc.ErrorMatches, "error [0-9]+") + for _, w := range workers { + defer w.assertDead(c) + } +} + +func (s *CatacombSuite) TestStressAddKillRaces(c *gc.C) { + const workerCount = 500 + + // This construct lets us run a bunch of funcs "simultaneously"... + var wg sync.WaitGroup + block := make(chan struct{}) + together := func(f func()) { + wg.Add(1) + go func() { + defer wg.Done() + <-block + f() + }() + } + + // ...so we can queue up a whole bunch of adds/kills... + errFailed := errors.New("pow") + w := s.fix.startErrorWorker(c, errFailed) + err := s.fix.run(c, func() { + for i := 0; i < workerCount; i++ { + together(func() { + // NOTE: we reuse the same worker, largely for brevity's sake; + // the important thing is that it already exists so we can hit + // Add() as soon as possible, just like the Kill() below. + if err := s.fix.catacomb.Add(w); err != nil { + cause := errors.Cause(err) + c.Check(cause, gc.Equals, errFailed) + } + }) + together(func() { + s.fix.catacomb.Kill(errFailed) + }) + } + + // ...then activate them all and see what happens. + close(block) + wg.Wait() + }) + cause := errors.Cause(err) + c.Check(cause, gc.Equals, errFailed) +} + +func (s *CatacombSuite) TestReusedCatacomb(c *gc.C) { + var site catacomb.Catacomb + err := catacomb.Invoke(catacomb.Plan{ + Site: &site, + Work: func() error { return nil }, + }) + c.Check(err, jc.ErrorIsNil) + err = site.Wait() + c.Check(err, jc.ErrorIsNil) + + w := s.fix.startErrorWorker(c, nil) + err = catacomb.Invoke(catacomb.Plan{ + Site: &site, + Work: func() error { return nil }, + Init: []worker.Worker{w}, + }) + c.Check(err, gc.ErrorMatches, "catacomb 0x[0-9a-f]+ has already been used") + w.assertDead(c) +} + +func (s *CatacombSuite) TestPlanBadSite(c *gc.C) { + w := s.fix.startErrorWorker(c, nil) + plan := catacomb.Plan{ + Work: func() error { panic("no") }, + Init: []worker.Worker{w}, + } + checkInvalid(c, plan, "nil Site not valid") + w.assertDead(c) +} + +func (s *CatacombSuite) TestPlanBadWork(c *gc.C) { + w := s.fix.startErrorWorker(c, nil) + plan := catacomb.Plan{ + Site: &catacomb.Catacomb{}, + Init: []worker.Worker{w}, + } + checkInvalid(c, plan, "nil Work not valid") + w.assertDead(c) +} + +func (s *CatacombSuite) TestPlanBadInit(c *gc.C) { + w := s.fix.startErrorWorker(c, nil) + plan := catacomb.Plan{ + Site: &catacomb.Catacomb{}, + Work: func() error { panic("no") }, + Init: []worker.Worker{w, nil}, + } + checkInvalid(c, plan, "nil Init item 1 not valid") + w.assertDead(c) +} + +func (s *CatacombSuite) TestPlanDataRace(c *gc.C) { + w := s.fix.startErrorWorker(c, nil) + plan := catacomb.Plan{ + Site: &catacomb.Catacomb{}, + Work: func() error { return nil }, + Init: []worker.Worker{w}, + } + err := catacomb.Invoke(plan) + c.Assert(err, jc.ErrorIsNil) + + plan.Init[0] = nil +} + +func checkInvalid(c *gc.C, plan catacomb.Plan, match string) { + check := func(err error) { + c.Check(err, gc.ErrorMatches, match) + c.Check(err, jc.Satisfies, errors.IsNotValid) + } + check(plan.Validate()) + check(catacomb.Invoke(plan)) +} === added file 'src/github.com/juju/juju/worker/catacomb/doc.go' --- src/github.com/juju/juju/worker/catacomb/doc.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/catacomb/doc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,139 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +/* +Catacomb leverages tomb.Tomb to bind the lifetimes of, and track the errors +of, a group of related workers. It's intended to be close to a drop-in +replacement for a Tomb: if you're implementing a worker, the only differences +should be (1) a slightly different creation dance; and (2) you can later call +.Add(aWorker) to bind the worker's lifetime to the catacomb's, and cause errors +from that worker to be exposed via the catacomb. Oh, and there's no global +ErrDying to induce surprising panics when misused. + +This approach costs many extra goroutines over tomb.v2, but is slightly more +robust because Catacomb.Add() verfies worker registration, and is thus safer +than Tomb.Go(); and, of course, because it's designed to integrate with the +worker.Worker model already common in juju. + +Note that a Catacomb is *not* a worker itself, despite the internal goroutine; +it's a tool to help you construct workers, just like tomb.Tomb. + +The canonical expected construction of a catacomb-based worker is as follows: + + type someWorker struct { + config Config + catacomb catacomb.Catacomb + // more fields... + } + + func NewWorker(config Config) (worker.Worker, error) { + + // This chunk is exactly as you'd expect for a tomb worker: just + // create the instance with an implicit zero catacomb. + if err := config.Validate(); err != nil { + return nil, errors.Trace(err) + } + w := &someWorker{ + config: config, + // more fields... + } + + // Here, instead of starting one's own boilerplate goroutine, just + // hand responsibility over to the catacomb package. Evidently, it's + // pretty hard to get this code wrong, so some might think it'd be ok + // to write a panicky `MustInvoke(*Catacomb, func() error)`; please + // don't do this in juju. (Anything that can go wrong will. Let's not + // tempt fate.) + err := catacomb.Invoke(catacomb.Plan{ + Site: &w.catacomb, + Work: w.loop, + }) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil + } + +...with the standard Kill and Wait implementations just as expected: + + func (w *someWorker) Kill() { + w.catacomb.Kill(nil) + } + + func (w *someWorker) Wait() error { + return w.catacomb.Wait() + } + +...and the ability for loop code to create workers and bind their lifetimes +to the parent without risking the common misuse of a deferred watcher.Stop() +that targets the parent's tomb -- which risks causing an initiating loop error +to be overwritten by a later error from the Stop. Thus, while the Add in: + + func (w *someWorker) loop() error { + watch, err := w.config.Facade.WatchSomething() + if err != nil { + return errors.Annotate(err, "cannot watch something") + } + if err := w.catacomb.Add(watch); err != nil { + // Note that Add takes responsibility for the supplied worker; + // if the catacomb can't accept the worker (because it's already + // dying) it will stop the worker and directly return any error + // thus encountered. + return errors.Trace(err) + } + + for { + select { + case <-w.catacomb.Dying(): + // The other important difference is that there's no package- + // level ErrDying -- it's just too risky. Catacombs supply + // own ErrDying errors, and won't panic when they see them + // coming from other catacombs. + return w.catacomb.ErrDying() + case change, ok := <-watch.Changes(): + if !ok { + // Note: as discussed below, watcher.EnsureErr is an + // antipattern. To actually write this code, we need to + // (1) turn watchers into workers and (2) stop watchers + // closing their channels on error. + return errors.New("something watch failed") + } + if err := w.handle(change); err != nil { + return nil, errors.Trace(err) + } + } + } + } + +...is not *obviously* superior to `defer watcher.Stop(watch, &w.tomb)`, it +does in fact behave better; and, furthermore, is more amenable to future +extension (watcher.Stop is fine *if* the watcher is started in NewWorker, +and deferred to run *after* the tomb is killed with the loop error; but that +becomes unwieldy when more than one watcher/worker is needed, and profoundly +tedious when the set is either large or dynamic). + +And that's not even getting into the issues with `watcher.EnsureErr`: this +exists entirely because we picked a strange interface for watchers (Stop and +Err, instead of Kill and Wait) that's not amenable to clean error-gathering; +so we decided to signal worker errors with a closed change channel. + +This solved the immediate problem, but caused us to add EnsureErr to make sure +we still failed with *some* error if the watcher closed the chan without error: +either because it broke its contract, or if some *other* component stopped the +watcher cleanly. That is not ideal: it would be far better *never* to close. +Then we can expect clients to Add the watch to a catacomb to handle lifetime, +and they can expect the Changes channel to deliver changes alone. + +Of course, client code still has to handle closed channels: once the scope of +a chan gets beyond a single type, all users have to be properly paranoid, and +e.g. expect channels to be closed even when the contract explicitly says they +won't. But that's easy to track, and easy to handle -- just return an error +complaining that the watcher broke its contract. Done. + +It's also important to note that you can easily manage dynamic workers: once +you've Add()ed the worker you can freely Kill() it at any time; so long as it +cleans itself up successfully, and returns no error from Wait(), it will be +silently unregistered and leave the catacomb otherwise unaffected. And that +might happen in the loop goroutine; but it'll work just fine from anywhere. +*/ +package catacomb === added file 'src/github.com/juju/juju/worker/catacomb/fixture_test.go' --- src/github.com/juju/juju/worker/catacomb/fixture_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/catacomb/fixture_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,134 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package catacomb_test + +import ( + "time" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "launchpad.net/tomb" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/catacomb" +) + +type cleaner interface { + AddCleanup(testing.CleanupFunc) +} + +type fixture struct { + catacomb catacomb.Catacomb + cleaner cleaner +} + +func (fix *fixture) run(c *gc.C, task func(), init ...worker.Worker) error { + err := catacomb.Invoke(catacomb.Plan{ + Site: &fix.catacomb, + Work: func() error { task(); return nil }, + Init: init, + }) + c.Assert(err, jc.ErrorIsNil) + + select { + case <-fix.catacomb.Dead(): + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out") + } + return fix.catacomb.Wait() +} + +func (fix *fixture) waitDying(c *gc.C) { + select { + case <-fix.catacomb.Dying(): + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out; still alive") + } +} + +func (fix *fixture) assertDying(c *gc.C) { + select { + case <-fix.catacomb.Dying(): + default: + c.Fatalf("still alive") + } +} + +func (fix *fixture) assertNotDying(c *gc.C) { + select { + case <-fix.catacomb.Dying(): + c.Fatalf("already dying") + default: + } +} + +func (fix *fixture) assertDead(c *gc.C) { + select { + case <-fix.catacomb.Dead(): + default: + c.Fatalf("not dead") + } +} + +func (fix *fixture) assertNotDead(c *gc.C) { + select { + case <-fix.catacomb.Dead(): + c.Fatalf("already dead") + default: + } +} + +func (fix *fixture) assertAddAlive(c *gc.C, w *errorWorker) { + err := fix.catacomb.Add(w) + c.Assert(err, jc.ErrorIsNil) + w.waitStillAlive(c) +} + +func (fix *fixture) startErrorWorker(c *gc.C, err error) *errorWorker { + ew := &errorWorker{} + go func() { + defer ew.tomb.Done() + defer ew.tomb.Kill(err) + <-ew.tomb.Dying() + }() + fix.cleaner.AddCleanup(func(_ *gc.C) { + ew.stop() + }) + return ew +} + +type errorWorker struct { + tomb tomb.Tomb +} + +func (ew *errorWorker) Kill() { + ew.tomb.Kill(nil) +} + +func (ew *errorWorker) Wait() error { + return ew.tomb.Wait() +} + +func (ew *errorWorker) stop() { + ew.Kill() + ew.Wait() +} + +func (ew *errorWorker) waitStillAlive(c *gc.C) { + select { + case <-ew.tomb.Dying(): + c.Fatalf("already dying") + case <-time.After(coretesting.ShortWait): + } +} + +func (ew *errorWorker) assertDead(c *gc.C) { + select { + case <-ew.tomb.Dead(): + default: + c.Fatalf("not yet dead") + } +} === added file 'src/github.com/juju/juju/worker/catacomb/package_test.go' --- src/github.com/juju/juju/worker/catacomb/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/catacomb/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package catacomb_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} === modified file 'src/github.com/juju/juju/worker/certupdater/certupdater.go' --- src/github.com/juju/juju/worker/certupdater/certupdater.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/certupdater/certupdater.go 2016-03-22 15:18:22 +0000 @@ -10,27 +10,27 @@ "github.com/juju/loggo" "github.com/juju/utils/set" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cert" "github.com/juju/juju/environs/config" "github.com/juju/juju/network" "github.com/juju/juju/state" + "github.com/juju/juju/watcher/legacy" "github.com/juju/juju/worker" ) var logger = loggo.GetLogger("juju.worker.certupdater") -// CertificateUpdater is responsible for generating state server certificates. +// CertificateUpdater is responsible for generating controller certificates. // -// In practice, CertificateUpdater is used by a state server's machine agent to watch +// In practice, CertificateUpdater is used by a controller's machine agent to watch // that server's machines addresses in state, and write a new certificate to the // agent's config file. type CertificateUpdater struct { addressWatcher AddressWatcher getter StateServingInfoGetter setter StateServingInfoSetter - configGetter EnvironConfigGetter + configGetter ModelConfigGetter hostPortsGetter APIHostPortsGetter addresses []network.Address } @@ -42,10 +42,10 @@ Addresses() (addresses []network.Address) } -// EnvironConfigGetter is an interface that is provided to NewCertificateUpdater +// ModelConfigGetter is an interface that is provided to NewCertificateUpdater // which can be used to get environment config. -type EnvironConfigGetter interface { - EnvironConfig() (*config.Config, error) +type ModelConfigGetter interface { + ModelConfig() (*config.Config, error) } // StateServingInfoGetter is an interface that is provided to NewCertificateUpdater @@ -59,18 +59,18 @@ type StateServingInfoSetter func(info params.StateServingInfo, done <-chan struct{}) error // APIHostPortsGetter is an interface that is provided to NewCertificateUpdater -// whose APIHostPorts method will be invoked to get state server addresses. +// whose APIHostPorts method will be invoked to get controller addresses. type APIHostPortsGetter interface { APIHostPorts() ([][]network.HostPort, error) } // NewCertificateUpdater returns a worker.Worker that watches for changes to -// machine addresses and then generates a new state server certificate with those +// machine addresses and then generates a new controller certificate with those // addresses in the certificate's SAN value. func NewCertificateUpdater(addressWatcher AddressWatcher, getter StateServingInfoGetter, - configGetter EnvironConfigGetter, hostPortsGetter APIHostPortsGetter, setter StateServingInfoSetter, + configGetter ModelConfigGetter, hostPortsGetter APIHostPortsGetter, setter StateServingInfoSetter, ) worker.Worker { - return worker.NewNotifyWorker(&CertificateUpdater{ + return legacy.NewNotifyWorker(&CertificateUpdater{ addressWatcher: addressWatcher, configGetter: configGetter, hostPortsGetter: hostPortsGetter, @@ -80,7 +80,7 @@ } // SetUp is defined on the NotifyWatchHandler interface. -func (c *CertificateUpdater) SetUp() (watcher.NotifyWatcher, error) { +func (c *CertificateUpdater) SetUp() (state.NotifyWatcher, error) { // Populate certificate SAN with any addresses we know about now. apiHostPorts, err := c.hostPortsGetter.APIHostPorts() if err != nil { @@ -131,13 +131,13 @@ return nil } // Grab the env config and update a copy with ca cert private key. - envConfig, err := c.configGetter.EnvironConfig() + envConfig, err := c.configGetter.ModelConfig() if err != nil { - return errors.Annotate(err, "cannot read environment config") + return errors.Annotate(err, "cannot read model config") } envConfig, err = envConfig.Apply(map[string]interface{}{"ca-private-key": caPrivateKey}) if err != nil { - return errors.Annotate(err, "cannot add CA private key to environment config") + return errors.Annotate(err, "cannot add CA private key to model config") } // For backwards compatibility, we must include "anything", "juju-apiserver" @@ -161,10 +161,10 @@ return nil } - // Generate a new state server certificate with the machine addresses in the SAN value. - newCert, newKey, err := envConfig.GenerateStateServerCertAndKey(newServerAddrs) + // Generate a new controller certificate with the machine addresses in the SAN value. + newCert, newKey, err := envConfig.GenerateControllerCertAndKey(newServerAddrs) if err != nil { - return errors.Annotate(err, "cannot generate state server certificate") + return errors.Annotate(err, "cannot generate controller certificate") } stateInfo.Cert = string(newCert) stateInfo.PrivateKey = string(newKey) @@ -172,7 +172,7 @@ if err != nil { return errors.Annotate(err, "cannot write agent config") } - logger.Infof("State Server cerificate addresses updated to %q", newServerAddrs) + logger.Infof("controller cerificate addresses updated to %q", newServerAddrs) return nil } === modified file 'src/github.com/juju/juju/worker/certupdater/certupdater_test.go' --- src/github.com/juju/juju/worker/certupdater/certupdater_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/certupdater/certupdater_test.go 2016-03-22 15:18:22 +0000 @@ -90,7 +90,7 @@ type mockConfigGetter struct{} -func (g *mockConfigGetter) EnvironConfig() (*config.Config, error) { +func (g *mockConfigGetter) ModelConfig() (*config.Config, error) { return config.New(config.NoDefaults, coretesting.FakeConfig()) } === added directory 'src/github.com/juju/juju/worker/charmrevision' === added directory 'src/github.com/juju/juju/worker/charmrevision/charmrevisionmanifold' === added file 'src/github.com/juju/juju/worker/charmrevision/charmrevisionmanifold/manifold.go' --- src/github.com/juju/juju/worker/charmrevision/charmrevisionmanifold/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/charmrevision/charmrevisionmanifold/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,79 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrevisionmanifold + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/utils/clock" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/charmrevisionupdater" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/charmrevision" + "github.com/juju/juju/worker/dependency" +) + +// ManifoldConfig describes how to create a worker that checks for updates +// available to deployed charms in an environment. +type ManifoldConfig struct { + + // The named dependencies will be exposed to the start func as resources. + APICallerName string + ClockName string + + // The remaining dependencies will be used with the resources to configure + // and create the worker. The period must be greater than 0; the NewFacade + // and NewWorker fields must not be nil. charmrevision.NewWorker, and + // NewAPIFacade, are suitable implementations for most clients. + Period time.Duration + NewFacade func(base.APICaller) (Facade, error) + NewWorker func(charmrevision.Config) (worker.Worker, error) +} + +// Manifold returns a dependency.Manifold that runs a charm revision worker +// according to the supplied configuration. +func Manifold(config ManifoldConfig) dependency.Manifold { + return dependency.Manifold{ + Inputs: []string{ + config.APICallerName, + config.ClockName, + }, + Start: func(getResource dependency.GetResourceFunc) (worker.Worker, error) { + var clock clock.Clock + if err := getResource(config.ClockName, &clock); err != nil { + return nil, errors.Trace(err) + } + var apiCaller base.APICaller + if err := getResource(config.APICallerName, &apiCaller); err != nil { + return nil, errors.Trace(err) + } + facade, err := config.NewFacade(apiCaller) + if err != nil { + return nil, errors.Annotatef(err, "cannot create facade") + } + + worker, err := config.NewWorker(charmrevision.Config{ + RevisionUpdater: facade, + Clock: clock, + Period: config.Period, + }) + if err != nil { + return nil, errors.Annotatef(err, "cannot create worker") + } + return worker, nil + }, + } +} + +// NewAPIFacade returns a Facade backed by the supplied APICaller. +func NewAPIFacade(apiCaller base.APICaller) (Facade, error) { + return charmrevisionupdater.NewState(apiCaller), nil +} + +// Facade has all the controller methods used by the charm revision worker. +type Facade interface { + charmrevision.RevisionUpdater +} === added file 'src/github.com/juju/juju/worker/charmrevision/charmrevisionmanifold/manifold_test.go' --- src/github.com/juju/juju/worker/charmrevision/charmrevisionmanifold/manifold_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/charmrevision/charmrevisionmanifold/manifold_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,175 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrevisionmanifold_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/clock" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/charmrevision" + "github.com/juju/juju/worker/charmrevision/charmrevisionmanifold" + "github.com/juju/juju/worker/dependency" + dt "github.com/juju/juju/worker/dependency/testing" +) + +type ManifoldSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&ManifoldSuite{}) + +func (s *ManifoldSuite) TestManifold(c *gc.C) { + manifold := charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{ + APICallerName: "billy", + ClockName: "bob", + }) + + c.Check(manifold.Inputs, jc.DeepEquals, []string{"billy", "bob"}) + c.Check(manifold.Start, gc.NotNil) + c.Check(manifold.Output, gc.IsNil) +} + +func (s *ManifoldSuite) TestMissingAPICaller(c *gc.C) { + manifold := charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{ + APICallerName: "api-caller", + ClockName: "clock", + }) + + _, err := manifold.Start(dt.StubGetResource(dt.StubResources{ + "api-caller": dt.StubResource{Error: dependency.ErrMissing}, + "clock": dt.StubResource{Output: fakeClock{}}, + })) + c.Check(errors.Cause(err), gc.Equals, dependency.ErrMissing) +} + +func (s *ManifoldSuite) TestMissingClock(c *gc.C) { + manifold := charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{ + APICallerName: "api-caller", + ClockName: "clock", + }) + + _, err := manifold.Start(dt.StubGetResource(dt.StubResources{ + "api-caller": dt.StubResource{Output: fakeAPICaller{}}, + "clock": dt.StubResource{Error: dependency.ErrMissing}, + })) + c.Check(errors.Cause(err), gc.Equals, dependency.ErrMissing) +} + +func (s *ManifoldSuite) TestNewFacadeError(c *gc.C) { + fakeAPICaller := &fakeAPICaller{} + + stub := testing.Stub{} + manifold := charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{ + APICallerName: "api-caller", + ClockName: "clock", + NewFacade: func(apiCaller base.APICaller) (charmrevisionmanifold.Facade, error) { + stub.AddCall("NewFacade", apiCaller) + return nil, errors.New("blefgh") + }, + }) + + _, err := manifold.Start(dt.StubGetResource(dt.StubResources{ + "api-caller": dt.StubResource{Output: fakeAPICaller}, + "clock": dt.StubResource{Output: fakeClock{}}, + })) + c.Check(err, gc.ErrorMatches, "cannot create facade: blefgh") + stub.CheckCalls(c, []testing.StubCall{{ + "NewFacade", []interface{}{fakeAPICaller}, + }}) +} + +func (s *ManifoldSuite) TestNewWorkerError(c *gc.C) { + fakeClock := &fakeClock{} + fakeFacade := &fakeFacade{} + fakeAPICaller := &fakeAPICaller{} + + stub := testing.Stub{} + manifold := charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{ + APICallerName: "api-caller", + ClockName: "clock", + NewFacade: func(apiCaller base.APICaller) (charmrevisionmanifold.Facade, error) { + stub.AddCall("NewFacade", apiCaller) + return fakeFacade, nil + }, + NewWorker: func(config charmrevision.Config) (worker.Worker, error) { + stub.AddCall("NewWorker", config) + return nil, errors.New("snrght") + }, + }) + + _, err := manifold.Start(dt.StubGetResource(dt.StubResources{ + "api-caller": dt.StubResource{Output: fakeAPICaller}, + "clock": dt.StubResource{Output: fakeClock}, + })) + c.Check(err, gc.ErrorMatches, "cannot create worker: snrght") + stub.CheckCalls(c, []testing.StubCall{{ + "NewFacade", []interface{}{fakeAPICaller}, + }, { + "NewWorker", []interface{}{charmrevision.Config{ + RevisionUpdater: fakeFacade, + Clock: fakeClock, + }}, + }}) +} + +func (s *ManifoldSuite) TestSuccess(c *gc.C) { + fakeClock := &fakeClock{} + fakeFacade := &fakeFacade{} + fakeWorker := &fakeWorker{} + fakeAPICaller := &fakeAPICaller{} + + stub := testing.Stub{} + manifold := charmrevisionmanifold.Manifold(charmrevisionmanifold.ManifoldConfig{ + APICallerName: "api-caller", + ClockName: "clock", + Period: 10 * time.Minute, + NewFacade: func(apiCaller base.APICaller) (charmrevisionmanifold.Facade, error) { + stub.AddCall("NewFacade", apiCaller) + return fakeFacade, nil + }, + NewWorker: func(config charmrevision.Config) (worker.Worker, error) { + stub.AddCall("NewWorker", config) + return fakeWorker, nil + }, + }) + + w, err := manifold.Start(dt.StubGetResource(dt.StubResources{ + "api-caller": dt.StubResource{Output: fakeAPICaller}, + "clock": dt.StubResource{Output: fakeClock}, + })) + c.Check(w, gc.Equals, fakeWorker) + c.Check(err, jc.ErrorIsNil) + stub.CheckCalls(c, []testing.StubCall{{ + "NewFacade", []interface{}{fakeAPICaller}, + }, { + "NewWorker", []interface{}{charmrevision.Config{ + Period: 10 * time.Minute, + RevisionUpdater: fakeFacade, + Clock: fakeClock, + }}, + }}) +} + +type fakeAPICaller struct { + base.APICaller +} + +type fakeClock struct { + clock.Clock +} + +type fakeWorker struct { + worker.Worker +} + +type fakeFacade struct { + charmrevisionmanifold.Facade +} === added file 'src/github.com/juju/juju/worker/charmrevision/charmrevisionmanifold/package_test.go' --- src/github.com/juju/juju/worker/charmrevision/charmrevisionmanifold/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/charmrevision/charmrevisionmanifold/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrevisionmanifold_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/worker/charmrevision/package_test.go' --- src/github.com/juju/juju/worker/charmrevision/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/charmrevision/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrevision_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/worker/charmrevision/validate_test.go' --- src/github.com/juju/juju/worker/charmrevision/validate_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/charmrevision/validate_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,70 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrevision_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/clock" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/worker/charmrevision" +) + +type ValidateSuite struct { + testing.IsolationSuite + config charmrevision.Config +} + +var _ = gc.Suite(&ValidateSuite{}) + +func (s *ValidateSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.config = charmrevision.Config{ + RevisionUpdater: struct{ charmrevision.RevisionUpdater }{}, + Clock: struct{ clock.Clock }{}, + Period: time.Hour, + } +} + +func (s *ValidateSuite) TestValid(c *gc.C) { + err := s.config.Validate() + c.Check(err, jc.ErrorIsNil) +} + +func (s *ValidateSuite) TestNilRevisionUpdater(c *gc.C) { + s.config.RevisionUpdater = nil + s.checkNotValid(c, "nil RevisionUpdater not valid") +} + +func (s *ValidateSuite) TestNilClock(c *gc.C) { + s.config.Clock = nil + s.checkNotValid(c, "nil Clock not valid") +} + +func (s *ValidateSuite) TestBadPeriods(c *gc.C) { + for i, period := range []time.Duration{ + 0, -time.Nanosecond, -time.Hour, + } { + c.Logf("test %d", i) + s.config.Period = period + s.checkNotValid(c, "non-positive Period not valid") + } +} + +func (s *ValidateSuite) checkNotValid(c *gc.C, match string) { + check := func(err error) { + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, match) + } + err := s.config.Validate() + check(err) + + worker, err := charmrevision.NewWorker(s.config) + c.Check(worker, gc.IsNil) + check(err) +} === added file 'src/github.com/juju/juju/worker/charmrevision/worker.go' --- src/github.com/juju/juju/worker/charmrevision/worker.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/charmrevision/worker.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,106 @@ +// Copyright 2012, 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrevision + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/utils/clock" + "launchpad.net/tomb" + + "github.com/juju/juju/worker" +) + +// RevisionUpdater exposes the "single" capability required by the worker. +// As the worker gains more responsibilities, it will likely need more; see +// storageprovisioner for a helpful model to grow towards. +type RevisionUpdater interface { + + // UpdateLatestRevisions causes the environment to be scanned, the charm + // store to be interrogated, and model representations of updated charms + // to be stored in the environment. + // + // That is sufficiently complex that the logic should be implemented by + // the worker, not directly on the apiserver; as this functionality needs + // to change/mature, please migrate responsibilities down to the worker + // and grow this interface to match. + UpdateLatestRevisions() error +} + +// Config defines the operation of a charm revision updater worker. +type Config struct { + + // RevisionUpdater is the worker's view of the controller. + RevisionUpdater RevisionUpdater + + // Clock is the worker's view of time. + Clock clock.Clock + + // Period is the time between charm revision updates. + Period time.Duration +} + +// Validate returns an error if the configuration cannot be expected +// to start a functional worker. +func (config Config) Validate() error { + if config.RevisionUpdater == nil { + return errors.NotValidf("nil RevisionUpdater") + } + if config.Clock == nil { + return errors.NotValidf("nil Clock") + } + if config.Period <= 0 { + return errors.NotValidf("non-positive Period") + } + return nil +} + +// NewWorker returns a worker that calls UpdateLatestRevisions on the +// configured RevisionUpdater, once when started and subsequently every +// Period. +func NewWorker(config Config) (worker.Worker, error) { + if err := config.Validate(); err != nil { + return nil, errors.Trace(err) + } + w := &revisionUpdateWorker{ + config: config, + } + go func() { + defer w.tomb.Done() + w.tomb.Kill(w.loop()) + }() + return w, nil +} + +type revisionUpdateWorker struct { + tomb tomb.Tomb + config Config +} + +func (ruw *revisionUpdateWorker) loop() error { + var delay time.Duration + for { + select { + case <-ruw.tomb.Dying(): + return tomb.ErrDying + case <-ruw.config.Clock.After(delay): + err := ruw.config.RevisionUpdater.UpdateLatestRevisions() + if err != nil { + return errors.Trace(err) + } + } + delay = ruw.config.Period + } +} + +// Kill is part of the worker.Worker interface. +func (ruw *revisionUpdateWorker) Kill() { + ruw.tomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (ruw *revisionUpdateWorker) Wait() error { + return ruw.tomb.Wait() +} === added file 'src/github.com/juju/juju/worker/charmrevision/worker_test.go' --- src/github.com/juju/juju/worker/charmrevision/worker_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/charmrevision/worker_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,158 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrevision_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/charmrevision" +) + +type WorkerSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&WorkerSuite{}) + +func (s *WorkerSuite) TestUpdatesImmediately(c *gc.C) { + fix := newFixture(time.Minute) + fix.cleanTest(c, func(_ worker.Worker) { + fix.waitCall(c) + fix.waitNoCall(c) + }) + fix.revisionUpdater.stub.CheckCallNames(c, "UpdateLatestRevisions") +} + +func (s *WorkerSuite) TestNoMoreUpdatesUntilPeriod(c *gc.C) { + fix := newFixture(time.Minute) + fix.cleanTest(c, func(_ worker.Worker) { + fix.waitCall(c) + fix.clock.Advance(time.Minute - time.Nanosecond) + fix.waitNoCall(c) + }) + fix.revisionUpdater.stub.CheckCallNames(c, "UpdateLatestRevisions") +} + +func (s *WorkerSuite) TestUpdatesAfterPeriod(c *gc.C) { + fix := newFixture(time.Minute) + fix.cleanTest(c, func(_ worker.Worker) { + fix.waitCall(c) + fix.clock.Advance(time.Minute) + fix.waitCall(c) + fix.waitNoCall(c) + }) + fix.revisionUpdater.stub.CheckCallNames(c, "UpdateLatestRevisions", "UpdateLatestRevisions") +} + +func (s *WorkerSuite) TestImmediateUpdateError(c *gc.C) { + fix := newFixture(time.Minute) + fix.revisionUpdater.stub.SetErrors( + errors.New("no updates for you"), + ) + fix.dirtyTest(c, func(w worker.Worker) { + fix.waitCall(c) + c.Check(w.Wait(), gc.ErrorMatches, "no updates for you") + fix.waitNoCall(c) + }) + fix.revisionUpdater.stub.CheckCallNames(c, "UpdateLatestRevisions") +} + +func (s *WorkerSuite) TestDelayedUpdateError(c *gc.C) { + fix := newFixture(time.Minute) + fix.revisionUpdater.stub.SetErrors( + nil, + errors.New("no more updates for you"), + ) + fix.dirtyTest(c, func(w worker.Worker) { + fix.waitCall(c) + fix.clock.Advance(time.Minute) + fix.waitCall(c) + c.Check(w.Wait(), gc.ErrorMatches, "no more updates for you") + fix.waitNoCall(c) + }) + fix.revisionUpdater.stub.CheckCallNames(c, "UpdateLatestRevisions", "UpdateLatestRevisions") +} + +// workerFixture isolates a charmrevision worker for testing. +type workerFixture struct { + revisionUpdater mockRevisionUpdater + clock *coretesting.Clock + period time.Duration +} + +func newFixture(period time.Duration) workerFixture { + return workerFixture{ + revisionUpdater: newMockRevisionUpdater(), + clock: coretesting.NewClock(time.Now()), + period: period, + } +} + +type testFunc func(worker.Worker) + +func (fix workerFixture) cleanTest(c *gc.C, test testFunc) { + fix.runTest(c, test, true) +} + +func (fix workerFixture) dirtyTest(c *gc.C, test testFunc) { + fix.runTest(c, test, false) +} + +func (fix workerFixture) runTest(c *gc.C, test testFunc, checkWaitErr bool) { + w, err := charmrevision.NewWorker(charmrevision.Config{ + RevisionUpdater: fix.revisionUpdater, + Clock: fix.clock, + Period: fix.period, + }) + c.Assert(err, jc.ErrorIsNil) + defer func() { + err := worker.Stop(w) + if checkWaitErr { + c.Check(err, jc.ErrorIsNil) + } + }() + test(w) +} + +func (fix workerFixture) waitCall(c *gc.C) { + select { + case <-fix.revisionUpdater.calls: + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out") + } +} + +func (fix workerFixture) waitNoCall(c *gc.C) { + select { + case <-fix.revisionUpdater.calls: + c.Fatalf("unexpected revisionUpdater call") + case <-time.After(coretesting.ShortWait): + } +} + +// mockRevisionUpdater records (and notifies of) calls made to UpdateLatestRevisions. +type mockRevisionUpdater struct { + stub *testing.Stub + calls chan struct{} +} + +func newMockRevisionUpdater() mockRevisionUpdater { + return mockRevisionUpdater{ + stub: &testing.Stub{}, + calls: make(chan struct{}, 1000), + } +} + +func (mock mockRevisionUpdater) UpdateLatestRevisions() error { + mock.stub.AddCall("UpdateLatestRevisions") + mock.calls <- struct{}{} + return mock.stub.NextErr() +} === removed directory 'src/github.com/juju/juju/worker/charmrevisionworker' === removed file 'src/github.com/juju/juju/worker/charmrevisionworker/export_test.go' --- src/github.com/juju/juju/worker/charmrevisionworker/export_test.go 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/worker/charmrevisionworker/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,6 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmrevisionworker - -var Interval = &interval === removed file 'src/github.com/juju/juju/worker/charmrevisionworker/revisionupdater.go' --- src/github.com/juju/juju/worker/charmrevisionworker/revisionupdater.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/charmrevisionworker/revisionupdater.go 1970-01-01 00:00:00 +0000 @@ -1,90 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmrevisionworker - -import ( - "fmt" - "time" - - "github.com/juju/errors" - "github.com/juju/loggo" - "launchpad.net/tomb" - - "github.com/juju/juju/api/charmrevisionupdater" - "github.com/juju/juju/worker" -) - -var logger = loggo.GetLogger("juju.worker.charmrevisionworker") - -// interval sets how often the resuming is called. -var interval = 24 * time.Hour - -var _ worker.Worker = (*RevisionUpdateWorker)(nil) - -// RevisionUpdateWorker is responsible for a periodical retrieval of charm versions -// from the charm store, and recording the revision status for deployed charms. -type RevisionUpdateWorker struct { - st *charmrevisionupdater.State - tomb tomb.Tomb -} - -// NewRevisionUpdateWorker periodically retrieves charm versions from the charm store. -func NewRevisionUpdateWorker(st *charmrevisionupdater.State) *RevisionUpdateWorker { - ruw := &RevisionUpdateWorker{st: st} - go func() { - defer ruw.tomb.Done() - ruw.tomb.Kill(ruw.loop()) - }() - return ruw -} - -func (ruw *RevisionUpdateWorker) String() string { - return fmt.Sprintf("charm version lookup worker") -} - -// Stop stops the worker. -func (ruw *RevisionUpdateWorker) Stop() error { - ruw.tomb.Kill(nil) - return ruw.tomb.Wait() -} - -// Kill is defined on the worker.Worker interface. -func (ruw *RevisionUpdateWorker) Kill() { - ruw.tomb.Kill(nil) -} - -// Wait is defined on the worker.Worker interface. -func (ruw *RevisionUpdateWorker) Wait() error { - return ruw.tomb.Wait() -} - -func (ruw *RevisionUpdateWorker) loop() error { - err := ruw.updateVersions() - if err != nil { - return err - } - for { - select { - case <-ruw.tomb.Dying(): - return tomb.ErrDying - case <-time.After(interval): - err := ruw.updateVersions() - if err != nil { - return err - } - } - } -} - -func (ruw *RevisionUpdateWorker) updateVersions() error { - return UpdateVersions(ruw) -} - -var UpdateVersions = func(ruw *RevisionUpdateWorker) error { - if err := ruw.st.UpdateLatestRevisions(); err != nil { - logger.Errorf("cannot process charms: %v", err) - return errors.Annotatef(err, "failed updating charms") - } - return nil -} === removed file 'src/github.com/juju/juju/worker/charmrevisionworker/revisionupdater_test.go' --- src/github.com/juju/juju/worker/charmrevisionworker/revisionupdater_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/charmrevisionworker/revisionupdater_test.go 1970-01-01 00:00:00 +0000 @@ -1,130 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmrevisionworker_test - -import ( - stdtesting "testing" - "time" - - "github.com/juju/errors" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - - "github.com/juju/juju/api" - "github.com/juju/juju/apiserver/charmrevisionupdater/testing" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" - "github.com/juju/juju/testcharms" - coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/worker/charmrevisionworker" -) - -func TestPackage(t *stdtesting.T) { - coretesting.MgoTestPackage(t) -} - -type RevisionUpdateSuite struct { - testing.CharmSuite - jujutesting.JujuConnSuite - - st api.Connection - versionUpdater *charmrevisionworker.RevisionUpdateWorker -} - -var _ = gc.Suite(&RevisionUpdateSuite{}) - -func (s *RevisionUpdateSuite) SetUpSuite(c *gc.C) { - c.Assert(*charmrevisionworker.Interval, gc.Equals, 24*time.Hour) - s.JujuConnSuite.SetUpSuite(c) - s.CharmSuite.SetUpSuite(c, &s.JujuConnSuite) -} - -func (s *RevisionUpdateSuite) TearDownSuite(c *gc.C) { - s.JujuConnSuite.TearDownSuite(c) -} - -func (s *RevisionUpdateSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - s.CharmSuite.SetUpTest(c) - - machine, err := s.State.AddMachine("quantal", state.JobManageEnviron) - c.Assert(err, jc.ErrorIsNil) - password, err := utils.RandomPassword() - c.Assert(err, jc.ErrorIsNil) - err = machine.SetPassword(password) - c.Assert(err, jc.ErrorIsNil) - err = machine.SetProvisioned("i-manager", "fake_nonce", nil) - c.Assert(err, jc.ErrorIsNil) - s.st = s.OpenAPIAsMachine(c, machine.Tag(), password, "fake_nonce") - c.Assert(s.st, gc.NotNil) -} - -func (s *RevisionUpdateSuite) TearDownTest(c *gc.C) { - s.JujuConnSuite.TearDownTest(c) -} - -func (s *RevisionUpdateSuite) runUpdater(c *gc.C, updateInterval time.Duration) { - s.PatchValue(charmrevisionworker.Interval, updateInterval) - revisionUpdaterState := s.st.CharmRevisionUpdater() - c.Assert(revisionUpdaterState, gc.NotNil) - - s.versionUpdater = charmrevisionworker.NewRevisionUpdateWorker(revisionUpdaterState) - s.AddCleanup(func(c *gc.C) { s.versionUpdater.Stop() }) -} - -func (s *RevisionUpdateSuite) checkCharmRevision(c *gc.C, expectedRev int) bool { - checkRevision := func() bool { - curl := charm.MustParseURL("cs:quantal/mysql") - placeholder, err := s.State.LatestPlaceholderCharm(curl) - return err == nil && placeholder.String() == curl.WithRevision(expectedRev).String() - } - - success := false - for attempt := coretesting.LongAttempt.Start(); attempt.Next(); { - if success = checkRevision(); success { - break - } - } - return success -} - -func (s *RevisionUpdateSuite) TestVersionUpdateRunsInitially(c *gc.C) { - s.SetupScenario(c) - - // Run the updater with a long update interval to ensure only the initial - // update on startup is run. - s.runUpdater(c, time.Hour) - c.Assert(s.checkCharmRevision(c, 23), jc.IsTrue) -} - -func (s *RevisionUpdateSuite) TestVersionUpdateRunsPeriodically(c *gc.C) { - s.SetupScenario(c) - - // Start the updater and check the initial status. - s.runUpdater(c, 5*time.Millisecond) - c.Assert(s.checkCharmRevision(c, 23), jc.IsTrue) - - // Make some changes - id := charm.MustParseReference("~who/quantal/mysql-24") - ch := testcharms.Repo.CharmArchive(c.MkDir(), id.Name) - s.Server.UploadCharm(c, ch, id, true) - // Check the results of the latest changes. - c.Assert(s.checkCharmRevision(c, 24), jc.IsTrue) -} - -func (s *RevisionUpdateSuite) TestDiesOnError(c *gc.C) { - mockUpdate := func(ruw *charmrevisionworker.RevisionUpdateWorker) error { - return errors.New("boo") - } - s.PatchValue(&charmrevisionworker.UpdateVersions, mockUpdate) - - revisionUpdaterState := s.st.CharmRevisionUpdater() - c.Assert(revisionUpdaterState, gc.NotNil) - - versionUpdater := charmrevisionworker.NewRevisionUpdateWorker(revisionUpdaterState) - err := versionUpdater.Stop() - c.Assert(errors.Cause(err), gc.ErrorMatches, "boo") -} === modified file 'src/github.com/juju/juju/worker/cleaner/cleaner.go' --- src/github.com/juju/juju/worker/cleaner/cleaner.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/cleaner/cleaner.go 2016-03-22 15:18:22 +0000 @@ -4,9 +4,10 @@ package cleaner import ( + "github.com/juju/errors" "github.com/juju/loggo" - "github.com/juju/juju/api/watcher" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" ) @@ -24,8 +25,14 @@ // NewCleaner returns a worker.Worker that runs state.Cleanup() // if the CleanupWatcher signals documents marked for deletion. -func NewCleaner(st StateCleaner) worker.Worker { - return worker.NewNotifyWorker(&Cleaner{st}) +func NewCleaner(st StateCleaner) (worker.Worker, error) { + w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{ + Handler: &Cleaner{st: st}, + }) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil } func (c *Cleaner) SetUp() (watcher.NotifyWatcher, error) { === modified file 'src/github.com/juju/juju/worker/cleaner/cleaner_test.go' --- src/github.com/juju/juju/worker/cleaner/cleaner_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/cleaner/cleaner_test.go 2016-03-22 15:18:22 +0000 @@ -5,22 +5,18 @@ import ( "errors" - stdtesting "testing" "time" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" + "launchpad.net/tomb" - "github.com/juju/juju/api/watcher" coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" "github.com/juju/juju/worker/cleaner" ) -func TestPackage(t *stdtesting.T) { - gc.TestingT(t) -} - type CleanerSuite struct { coretesting.BaseSuite mockState *cleanerMock @@ -28,14 +24,12 @@ var _ = gc.Suite(&CleanerSuite{}) -var _ worker.NotifyWatchHandler = (*cleaner.Cleaner)(nil) - func (s *CleanerSuite) SetUpTest(c *gc.C) { s.BaseSuite.SetUpTest(c) s.mockState = &cleanerMock{ calls: make(chan string), } - s.mockState.watcher = newMockNotifyWatcher(nil) + s.mockState.watcher = s.newMockNotifyWatcher(nil) } func (s *CleanerSuite) AssertReceived(c *gc.C, expect string) { @@ -56,7 +50,8 @@ } func (s *CleanerSuite) TestCleaner(c *gc.C) { - cln := cleaner.NewCleaner(s.mockState) + cln, err := cleaner.NewCleaner(s.mockState) + c.Assert(err, jc.ErrorIsNil) defer func() { c.Assert(worker.Stop(cln), jc.ErrorIsNil) }() s.AssertReceived(c, "WatchCleanups") @@ -68,29 +63,74 @@ func (s *CleanerSuite) TestWatchCleanupsError(c *gc.C) { s.mockState.err = []error{errors.New("hello")} - cln := cleaner.NewCleaner(s.mockState) + cln, err := cleaner.NewCleaner(s.mockState) + c.Assert(err, jc.ErrorIsNil) s.AssertReceived(c, "WatchCleanups") s.AssertEmpty(c) - err := worker.Stop(cln) + err = worker.Stop(cln) c.Assert(err, gc.ErrorMatches, "hello") } func (s *CleanerSuite) TestCleanupError(c *gc.C) { s.mockState.err = []error{nil, errors.New("hello")} - cln := cleaner.NewCleaner(s.mockState) + cln, err := cleaner.NewCleaner(s.mockState) + c.Assert(err, jc.ErrorIsNil) s.AssertReceived(c, "WatchCleanups") s.AssertReceived(c, "Cleanup") - err := worker.Stop(cln) + err = worker.Stop(cln) c.Assert(err, jc.ErrorIsNil) log := c.GetTestLog() c.Assert(log, jc.Contains, "ERROR juju.worker.cleaner cannot cleanup state: hello") } +func (s *CleanerSuite) newMockNotifyWatcher(err error) *mockNotifyWatcher { + m := &mockNotifyWatcher{ + changes: make(chan struct{}, 1), + err: err, + } + go func() { + defer m.tomb.Done() + defer m.tomb.Kill(m.err) + <-m.tomb.Dying() + }() + s.AddCleanup(func(c *gc.C) { + err := worker.Stop(m) + c.Check(err, jc.ErrorIsNil) + }) + m.Change() + return m +} + +type mockNotifyWatcher struct { + watcher.NotifyWatcher + + tomb tomb.Tomb + err error + changes chan struct{} +} + +func (m *mockNotifyWatcher) Kill() { + m.tomb.Kill(nil) +} + +func (m *mockNotifyWatcher) Wait() error { + return m.tomb.Wait() +} + +func (m *mockNotifyWatcher) Changes() watcher.NotifyChannel { + return m.changes +} + +func (m *mockNotifyWatcher) Change() { + m.changes <- struct{}{} +} + // cleanerMock is used to check the // calls of Cleanup() and WatchCleanups() type cleanerMock struct { + cleaner.StateCleaner watcher *mockNotifyWatcher calls chan string err []error @@ -113,40 +153,3 @@ m.calls <- "WatchCleanups" return m.watcher, m.getError() } - -var _ cleaner.StateCleaner = (*cleanerMock)(nil) - -type mockNotifyWatcher struct { - watcher.NotifyWatcher - - err error - changes chan struct{} -} - -func newMockNotifyWatcher(err error) *mockNotifyWatcher { - m := &mockNotifyWatcher{ - changes: make(chan struct{}, 1), - err: err, - } - m.Change() - return m -} - -func (m *mockNotifyWatcher) Err() error { - return m.err -} - -func (m *mockNotifyWatcher) Changes() <-chan struct{} { - if m.err != nil { - close(m.changes) - } - return m.changes -} - -func (m *mockNotifyWatcher) Stop() error { - return m.err -} - -func (m *mockNotifyWatcher) Change() { - m.changes <- struct{}{} -} === added file 'src/github.com/juju/juju/worker/cleaner/manifold.go' --- src/github.com/juju/juju/worker/cleaner/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/cleaner/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,35 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cleaner + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/cleaner" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/util" +) + +// ManifoldConfig describes the resources used by the cleanup worker. +type ManifoldConfig util.ApiManifoldConfig + +// Manifold returns a Manifold that encapsulates the cleanup worker. +func Manifold(config ManifoldConfig) dependency.Manifold { + return util.ApiManifold( + util.ApiManifoldConfig(config), + manifoldStart, + ) +} + +// manifoldStart creates a cleaner worker, given a base.APICaller. +func manifoldStart(apiCaller base.APICaller) (worker.Worker, error) { + api := cleaner.NewAPI(apiCaller) + w, err := NewCleaner(api) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil +} === added file 'src/github.com/juju/juju/worker/cleaner/package_test.go' --- src/github.com/juju/juju/worker/cleaner/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/cleaner/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cleaner_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} === modified file 'src/github.com/juju/juju/worker/conv2state/converter.go' --- src/github.com/juju/juju/worker/conv2state/converter.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/conv2state/converter.go 2016-03-22 15:18:22 +0000 @@ -9,17 +9,16 @@ "github.com/juju/names" apimachiner "github.com/juju/juju/api/machiner" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state/multiwatcher" - "github.com/juju/juju/worker" + "github.com/juju/juju/watcher" ) var logger = loggo.GetLogger("juju.worker.conv2state") // New returns a new notify watch handler that will convert the given machine & -// agent to a state server. -func New(m *apimachiner.State, agent Agent) worker.NotifyWatchHandler { +// agent to a controller. +func New(m *apimachiner.State, agent Agent) watcher.NotifyHandler { return &converter{machiner: wrapper{m}, agent: agent} } === modified file 'src/github.com/juju/juju/worker/conv2state/converter_test.go' --- src/github.com/juju/juju/worker/conv2state/converter_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/conv2state/converter_test.go 2016-03-22 15:18:22 +0000 @@ -62,7 +62,7 @@ func (s Suite) TestHandle(c *gc.C) { a := &fakeAgent{tag: names.NewMachineTag("1")} - jobs := []multiwatcher.MachineJob{multiwatcher.JobHostUnits, multiwatcher.JobManageEnviron} + jobs := []multiwatcher.MachineJob{multiwatcher.JobHostUnits, multiwatcher.JobManageModel} m := &fakeMachine{ jobs: ¶ms.JobsResult{Jobs: jobs}, } @@ -92,7 +92,7 @@ func (Suite) TestHandleJobsError(c *gc.C) { a := &fakeAgent{tag: names.NewMachineTag("1")} - jobs := []multiwatcher.MachineJob{multiwatcher.JobHostUnits, multiwatcher.JobManageEnviron} + jobs := []multiwatcher.MachineJob{multiwatcher.JobHostUnits, multiwatcher.JobManageModel} m := &fakeMachine{ jobs: ¶ms.JobsResult{Jobs: jobs}, jobsErr: errors.New("foo"), @@ -111,7 +111,7 @@ tag: names.NewMachineTag("1"), restartErr: errors.New("foo"), } - jobs := []multiwatcher.MachineJob{multiwatcher.JobHostUnits, multiwatcher.JobManageEnviron} + jobs := []multiwatcher.MachineJob{multiwatcher.JobHostUnits, multiwatcher.JobManageModel} m := &fakeMachine{ jobs: ¶ms.JobsResult{Jobs: jobs}, } === modified file 'src/github.com/juju/juju/worker/conv2state/fakes_test.go' --- src/github.com/juju/juju/worker/conv2state/fakes_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/conv2state/fakes_test.go 2016-03-22 15:18:22 +0000 @@ -6,8 +6,8 @@ import ( "github.com/juju/names" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" ) type fakeMachiner struct { @@ -41,15 +41,14 @@ type fakeWatcher struct{} -func (fakeWatcher) Changes() <-chan struct{} { - return nil -} - -func (fakeWatcher) Stop() error { - return nil -} - -func (fakeWatcher) Err() error { +func (fakeWatcher) Changes() watcher.NotifyChannel { + return nil +} + +func (fakeWatcher) Kill() { +} + +func (fakeWatcher) Wait() error { return nil } === modified file 'src/github.com/juju/juju/worker/dependency/doc.go' --- src/github.com/juju/juju/worker/dependency/doc.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/dependency/doc.go 2016-03-22 15:18:22 +0000 @@ -5,8 +5,9 @@ The dependency package exists to address a general problem with shared resources and the management of their lifetimes. Many kinds of software handle these issues -with more or less felicity, but it's particularly important the juju (a distributed -system that needs to be very fault-tolerant) handle them clearly and sanely. +with more or less felicity, but it's particularly important that juju (which is +a distributed system that needs to be very fault-tolerant) handle them clearly +and sanely. Background ---------- @@ -63,7 +64,8 @@ * The singleton is dangerous specifically because its dependency interactions are unclear. Absolute clarity of dependencies, as provided by the nesting approaches, - is in fact critical. + is in fact critical; but the sheer convenience of the singleton is alluring, and + reminds us that the approach we take must remain easy to use. The various nesting approaches give easy access to directly-available resources, which is great, but will fail as soon as you have a sufficiently sophisticated @@ -82,6 +84,10 @@ The package is intended to implement the following developer stories: + * As a developer trying to understand the codebase, I want to know what workers + are running in an agent at any given time. + * As a developer, I want to be prevented from introducing dependency cycles + into my application. * As a developer, I want to provide a service provided by some worker to one or more client workers. * As a developer, I want to write a service that consumes one or more other @@ -114,17 +120,15 @@ so, when there are bugs, we're more likely to shake them out in automated testing before they hit users. -We'd also like to implement these stories, which go together, and should be -added when their absence becomes inconvenient: +We'd maybe also like to implement this story: - * As a developer, I want to be prevented from introducing dependency cycles - into my application. [NOT DONE] - * As a developer trying to understand the codebase, I want to know what workers - are running in an agent at any given time. [NOT DONE] * As a developer, I want to add and remove groups of workers atomically, e.g. - when starting the set of state-server workers for a hosted environ; or when + when starting the set of controller workers for a hosted environ; or when starting the set of workers used by a single unit. [NOT DONE] +...but there's no urgent use case yet, and it's not certain to be superior to an +engine-nesting approach. + Solution -------- @@ -146,20 +150,68 @@ will be restarted whenever there is a material change to their accessible dependencies. +When the weight of manifolds in a single engine becomes inconvenient, group them +and run them inside nested dependency.Engines; the Report() method on the top- +level engine will collect information from (directly-) contained engines, so at +least there's still some observability; but there may also be call to pass +actual dependencies down from one engine to another, and that'll demand careful +thought. + Usage ----- In each worker package, write a `manifold.go` containing the following: + // ManifoldConfig holds the information necessary to configure the worker + // controlled by a Manifold. type ManifoldConfig struct { + // The names of the various dependencies, e.g. APICallerName string MachineLockName string + + // Any other required top-level configuration, e.g. + Period time.Duration } + // Manifold returns a manifold that controls the operation of a worker + // responsible for , configured as supplied. func Manifold(config ManifoldConfig) dependency.Manifold { // Your code here... + return dependency.Manifold{ + + // * certainly include each of your configured dependency names, + // getResource will only expose them if you declare them here. + Inputs: []string{config.APICallerName, config.MachineLockName}, + + // * certainly include a start func, it will panic if you don't. + Start: func(getResource dependency.GetResourceFunc) (worker.Worker, error) { + // You presumably want to get your dependencies, and you almost + // certainly want to be closed over `config`... + var apicaller base.APICaller + if err := getResource(config.APICallerName, &apicaller); err != nil { + return nil, err + } + var machineLock *fslock.Lock + if err := getResource(config.MachineLockName, &machineLock); err != nil { + return nil, err + } + return newSomethingWorker(apicaller, machineLock, config.Period) + }, + + // * output func is not obligatory, and should be skipped if you + // don't know what you'll be exposing or to whom. + // * see `worker/machinelock`, `worker/gate`, `worker/util`, and + // `worker/dependency/testing` for examples of output funcs. + // * if you do supply an output func, be sure to document it on the + // Manifold func; for example: + // + // // Manifold exposes Foo and Bar resources, which can be + // // accessed by passing a *Foo or a *Bar in the output + // // parameter of its dependencies' getResouce calls. + Output: nil, + } } ...and take care to construct your manifolds *only* via that function; *all* @@ -171,6 +223,44 @@ definition of manifolds that depend on an API caller; on an agent; or on both. +Testing +------- + +The `worker/dependency/testing` package, commonly imported as "dt", exposes a +`StubResource` that is helpful for testing `Start` funcs in decent isolation, +with mocked dependencies. Tests for `Inputs` and `Output` are generally pretty +specific to their precise context and don't seem to benefit much from +generalisation. + + +Special considerations +---------------------- + +The nodes in your *dependency* graph must be acyclic; this does not imply that +the *information flow* must be acyclic. Indeed, it is common for separate +components to need to synchronise their actions; but the implementation of +Engine makes it inconvenient for either one to depend on the other (and +impossible for both to do so). + +When a set of manifolds need to encode a set of services whose information flow +is not acyclic, apparent A->B->A cycles can be broken by introducing a new +shared dependency C to mediate the information flow. That is, A and B can then +separately depend upon C; and C itself can start a degenerate worker that never +errors of its own accord. + +For examples of this technique, search for usage of `worker/util.NewValueWorker` +(which is generally used inside other manifolds to pass snippets of agent config +down to workers that don't have a good reason to see, or write, the full agent +config); and `worker/gate.Manifold`, which is for one-way coordination between +workers which should not be started until some other worker has completed some +task. + +Please be careful when coordinating workers like this; the gate manifold in +particular is effectively just another lock, and it'd be trivial to construct +a set of gate-users that can deadlock one another. All the usual considerations +when working with locks still apply. + + Concerns and mitigations thereof -------------------------------- === modified file 'src/github.com/juju/juju/worker/dependency/engine.go' --- src/github.com/juju/juju/worker/dependency/engine.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/dependency/engine.go 2016-03-22 15:18:22 +0000 @@ -8,6 +8,7 @@ "github.com/juju/errors" "github.com/juju/loggo" + "github.com/juju/utils/set" "launchpad.net/tomb" "github.com/juju/juju/worker" @@ -17,34 +18,39 @@ // EngineConfig defines the parameters needed to create a new engine. type EngineConfig struct { - // IsFatal allows errors generated by workers to stop the engine. + + // IsFatal returns true when passed an error that should stop the engine. + // It must not be nil. IsFatal IsFatalFunc - // MoreImportant allows fatal errors to be ranked according to importance. - MoreImportant MoreImportantFunc + // WorstError returns the more important of two fatal errors passed to it, + // and is used to determine which fatal error to report when there's more + // than one. It must not be nil. + WorstError WorstErrorFunc // ErrorDelay controls how long the engine waits before restarting a worker - // that encountered an unknown error. + // that encountered an unknown error. It must not be negative. ErrorDelay time.Duration // BounceDelay controls how long the engine waits before restarting a worker - // that was deliberately shut down because its dependencies changed. + // that was deliberately shut down because its dependencies changed. It must + // not be negative. BounceDelay time.Duration } -// Validate checks the config values are sensible. +// Validate returns an error if any field is invalid. func (config *EngineConfig) Validate() error { if config.IsFatal == nil { - return errors.New("engineconfig validation failed: IsFatal not specified") - } - if config.MoreImportant == nil { - return errors.New("engineconfig validation failed: MoreImportant not specified") - } - if config.ErrorDelay <= 0 { - return errors.New("engineconfig validation failed: ErrorDelay needs to be >= 0") - } - if config.BounceDelay <= 0 { - return errors.New("engineconfig validation failed: BounceDelay needs to be >= 0") + return errors.New("IsFatal not specified") + } + if config.WorstError == nil { + return errors.New("WorstError not specified") + } + if config.ErrorDelay < 0 { + return errors.New("ErrorDelay is negative") + } + if config.BounceDelay < 0 { + return errors.New("BounceDelay is negative") } return nil } @@ -56,18 +62,19 @@ // any error from Wait(). func NewEngine(config EngineConfig) (Engine, error) { if err := config.Validate(); err != nil { - return nil, errors.Trace(err) + return nil, errors.Annotatef(err, "invalid config") } engine := &engine{ config: config, - manifolds: map[string]Manifold{}, + manifolds: Manifolds{}, dependents: map[string][]string{}, current: map[string]workerInfo{}, install: make(chan installTicket), started: make(chan startedTicket), stopped: make(chan stoppedTicket), + report: make(chan reportTicket), } go func() { defer engine.tomb.Done() @@ -79,19 +86,24 @@ // engine maintains workers corresponding to its installed manifolds, and // restarts them whenever their inputs change. type engine struct { - tomb tomb.Tomb // config contains values passed in as config when the engine was created. config EngineConfig - // worstError is used to track the most important error we've received from a - // manifold. We use tomb.Tomb to track engine life cycle but the first error - // we get is not necessarily the most important one. - // Using moreImportant we rank errors and return the worst error. + // As usual, we use tomb.Tomb to track the lifecycle and error state of the + // engine worker itself; but we *only* report *internal* errors via the tomb. + // Fatal errors received from workers are *not* used to kill the tomb; they + // are tracked separately, and will only be exposed to the client when the + // engine's tomb has completed its job and encountered no errors. + tomb tomb.Tomb + + // worstError is used to track the most important fatal error we've received + // from any manifold. This should be the only place fatal errors are stored; + // they must *not* be passed into the tomb. worstError error // manifolds holds the installed manifolds by name. - manifolds map[string]Manifold + manifolds Manifolds // dependents holds, for each named manifold, those that depend on it. dependents map[string][]string @@ -99,11 +111,12 @@ // current holds the active worker information for each installed manifold. current map[string]workerInfo - // install, started, and stopped each communicate requests and changes into + // install, started, report and stopped each communicate requests and changes into // the loop goroutine. install chan installTicket started chan startedTicket stopped chan stoppedTicket + report chan reportTicket } // loop serializes manifold install operations and worker start/stop notifications. @@ -123,20 +136,20 @@ for name := range engine.current { engine.requestStop(name) } + case ticket := <-engine.report: + // This is safe so long as the Report method reads the result. + ticket.result <- engine.liveReport() case ticket := <-engine.install: // This is safe so long as the Install method reads the result. ticket.result <- engine.gotInstall(ticket.name, ticket.manifold) case ticket := <-engine.started: - engine.gotStarted(ticket.name, ticket.worker) + engine.gotStarted(ticket.name, ticket.worker, ticket.resourceLog) case ticket := <-engine.stopped: - engine.gotStopped(ticket.name, ticket.error) + engine.gotStopped(ticket.name, ticket.error, ticket.resourceLog) } if engine.isDying() { - if engine.allStopped() { - if engine.worstError == nil { - return tomb.ErrDying - } - return engine.worstError + if engine.allOthersStopped() { + return tomb.ErrDying } } } @@ -149,10 +162,69 @@ // Wait is part of the worker.Worker interface. func (engine *engine) Wait() error { - <-engine.tomb.Dead() + if tombError := engine.tomb.Wait(); tombError != nil { + return tombError + } return engine.worstError } +// Report is part of the Reporter interface. +func (engine *engine) Report() map[string]interface{} { + report := make(chan map[string]interface{}) + select { + case engine.report <- reportTicket{report}: + // This is safe so long as the loop sends a result. + return <-report + case <-engine.tomb.Dead(): + // Note that we don't abort on Dying as we usually would; the + // oneShotDying approach in loop means that it can continue to + // process requests until the last possible moment. Only once + // loop has exited do we fall back to this report. + return map[string]interface{}{ + KeyState: "stopped", + KeyError: engine.Wait(), + KeyManifolds: engine.manifoldsReport(), + } + } +} + +// liveReport collects and returns information about the engine, its manifolds, +// and their workers. It must only be called from the loop goroutine. +func (engine *engine) liveReport() map[string]interface{} { + var reportError error + state := "started" + if engine.isDying() { + state = "stopping" + if tombError := engine.tomb.Err(); tombError != nil { + reportError = tombError + } else { + reportError = engine.worstError + } + } + return map[string]interface{}{ + KeyState: state, + KeyError: reportError, + KeyManifolds: engine.manifoldsReport(), + } +} + +// manifoldsReport collects and returns information about the engine's manifolds +// and their workers. Until the tomb is Dead, it should only be called from the +// loop goroutine; after that, it's goroutine-safe. +func (engine *engine) manifoldsReport() map[string]interface{} { + manifolds := map[string]interface{}{} + for name, info := range engine.current { + manifolds[name] = map[string]interface{}{ + KeyState: info.state(), + KeyError: info.err, + KeyInputs: engine.manifolds[name].Inputs, + KeyReport: info.report(), + KeyResourceLog: resourceLogReport(info.resourceLog), + } + } + return manifolds +} + // Install is part of the Engine interface. func (engine *engine) Install(name string, manifold Manifold) error { result := make(chan error) @@ -168,10 +240,13 @@ // gotInstall handles the params originally supplied to Install. It must only be // called from the loop goroutine. func (engine *engine) gotInstall(name string, manifold Manifold) error { - logger.Infof("installing %q manifold...", name) + logger.Tracef("installing %q manifold...", name) if _, found := engine.manifolds[name]; found { return errors.Errorf("%q manifold already installed", name) } + if err := engine.checkAcyclic(name, manifold); err != nil { + return errors.Annotatef(err, "cannot install %q manifold", name) + } engine.manifolds[name] = manifold for _, input := range manifold.Inputs { engine.dependents[input] = append(engine.dependents[input], name) @@ -181,6 +256,31 @@ return nil } +// uninstall removes the named manifold from the engine's records. +func (engine *engine) uninstall(name string) { + // Note that we *don't* want to remove dependents[name] -- all those other + // manifolds do still depend on this, and another manifold with the same + // name might be installed in the future -- but we do want to remove the + // named manifold from all *values* in the dependents map. + for dName, dependents := range engine.dependents { + depSet := set.NewStrings(dependents...) + depSet.Remove(name) + engine.dependents[dName] = depSet.Values() + } + delete(engine.current, name) + delete(engine.manifolds, name) +} + +// checkAcyclic returns an error if the introduction of the supplied manifold +// would cause the dependency graph to contain cycles. +func (engine *engine) checkAcyclic(name string, manifold Manifold) error { + manifolds := Manifolds{name: manifold} + for name, manifold := range engine.manifolds { + manifolds[name] = manifold + } + return Validate(manifolds) +} + // requestStart invokes a runWorker goroutine for the manifold with the supplied // name. It must only be called from the loop goroutine. func (engine *engine) requestStart(name string, delay time.Duration) { @@ -199,7 +299,7 @@ // Final check that we're not shutting down yet... if engine.isDying() { - logger.Debugf("not starting %q manifold worker (shutting down)", name) + logger.Tracef("not starting %q manifold worker (shutting down)", name) return } @@ -207,15 +307,15 @@ // goroutine based on current known state. info.starting = true engine.current[name] = info - getResource := engine.getResourceFunc(name, manifold.Inputs) - go engine.runWorker(name, delay, manifold.Start, getResource) + resourceGetter := engine.resourceGetter(name, manifold.Inputs) + go engine.runWorker(name, delay, manifold.Start, resourceGetter) } -// getResourceFunc returns a GetResourceFunc backed by a snapshot of current +// resourceGetter returns a resourceGetter backed by a snapshot of current // worker state, restricted to those workers declared in inputs. It must only // be called from the loop goroutine; see inside for a detailed dicsussion of // why we took this appproach. -func (engine *engine) getResourceFunc(name string, inputs []string) GetResourceFunc { +func (engine *engine) resourceGetter(name string, inputs []string) *resourceGetter { // We snapshot the resources available at invocation time, rather than adding an // additional communicate-resource-request channel. The latter approach is not // unreasonable... but is prone to inelegant scrambles when starting several @@ -243,7 +343,7 @@ // * Install manifold A; loop starts worker A // * Install manifold B; loop starts worker B with empty resource snapshot // * A communicates its worker back to loop; main thread bounces B - // * B's StartFunc asks for A, gets nothing, returns ErrUnmetDependencies + // * B's StartFunc asks for A, gets nothing, returns ErrMissing // * loop restarts worker B with an up-to-date snapshot, B works fine // // We assume that, in the common case, most workers run without error most @@ -257,72 +357,85 @@ // Those may indeed suffer the occasional extra bounce as the system comes // to stability as it starts, or after a change; but workers *must* be // written for resilience in the face of arbitrary bounces *anyway*, so it - // shouldn't be harmful + // shouldn't be harmful. outputs := map[string]OutputFunc{} workers := map[string]worker.Worker{} for _, resourceName := range inputs { outputs[resourceName] = engine.manifolds[resourceName].Output workers[resourceName] = engine.current[resourceName].worker } - return func(resourceName string, out interface{}) error { - logger.Debugf("%q manifold requested %q resource", name, resourceName) - input := workers[resourceName] - if input == nil { - // No worker running (or not declared). - return ErrMissing - } - convert := outputs[resourceName] - if convert == nil { - // No conversion func available... - if out != nil { - // ...and the caller wants a resource. - return ErrMissing - } - // ...but it's ok, because the caller depends on existence only. - return nil - } - return convert(input, out) + return &resourceGetter{ + clientName: name, + expired: make(chan struct{}), + workers: workers, + outputs: outputs, } } // runWorker starts the supplied manifold's worker and communicates it back to the // loop goroutine; waits for worker completion; and communicates any error encountered // back to the loop goroutine. It must not be run on the loop goroutine. -func (engine *engine) runWorker(name string, delay time.Duration, start StartFunc, getResource GetResourceFunc) { +func (engine *engine) runWorker(name string, delay time.Duration, start StartFunc, resourceGetter *resourceGetter) { + + errAborted := errors.New("aborted before delay elapsed") + + startAfterDelay := func() (worker.Worker, error) { + // NOTE: the resourceGetter will expire *after* the worker is started. + // This is tolerable because + // 1) we'll still correctly block access attempts most of the time + // 2) failing to block them won't cause data races anyway + // 3) it's not worth complicating the interface for every client just + // to eliminate the possibility of one harmlessly dumb interaction. + defer resourceGetter.expire() + logger.Tracef("starting %q manifold worker in %s...", name, delay) + select { + case <-time.After(delay): + case <-engine.tomb.Dying(): + return nil, errAborted + } + logger.Tracef("starting %q manifold worker", name) + return start(resourceGetter.getResource) + } + startWorkerAndWait := func() error { - logger.Infof("starting %q manifold worker in %s...", name, delay) - select { - case <-time.After(delay): - case <-engine.tomb.Dying(): - logger.Debugf("not starting %q manifold worker (shutting down)", name) - return tomb.ErrDying - } - - logger.Debugf("starting %q manifold worker", name) - worker, err := start(getResource) - if err != nil { - logger.Warningf("failed to start %q manifold worker: %v", name, err) + worker, err := startAfterDelay() + switch errors.Cause(err) { + case errAborted: + return nil + case nil: + logger.Tracef("running %q manifold worker", name) + default: + logger.Tracef("failed to start %q manifold worker: %v", name, err) return err } - - logger.Debugf("running %q manifold worker", name) select { case <-engine.tomb.Dying(): - logger.Debugf("stopping %q manifold worker (shutting down)", name) + logger.Tracef("stopping %q manifold worker (shutting down)", name) + // Doesn't matter whether worker == engine: if we're already Dying + // then cleanly Kill()ing ourselves again won't hurt anything. worker.Kill() - case engine.started <- startedTicket{name, worker}: - logger.Debugf("registered %q manifold worker", name) - } + case engine.started <- startedTicket{name, worker, resourceGetter.accessLog}: + logger.Tracef("registered %q manifold worker", name) + } + if worker == engine { + // We mustn't Wait() for ourselves to complete here, or we'll + // deadlock. But we should wait until we're Dying, because we + // need this func to keep running to keep the self manifold + // accessible as a resource. + <-engine.tomb.Dying() + return tomb.ErrDying + } + return worker.Wait() } // We may or may not send on started, but we *must* send on stopped. - engine.stopped <- stoppedTicket{name, startWorkerAndWait()} + engine.stopped <- stoppedTicket{name, startWorkerAndWait(), resourceGetter.accessLog} } // gotStarted updates the engine to reflect the creation of a worker. It must // only be called from the loop goroutine. -func (engine *engine) gotStarted(name string, worker worker.Worker) { +func (engine *engine) gotStarted(name string, worker worker.Worker, resourceLog []resourceAccess) { // Copy current info; check preconditions and abort the workers if we've // already been asked to stop it. info := engine.current[name] @@ -331,14 +444,15 @@ engine.tomb.Kill(errors.Errorf("fatal: unexpected %q manifold worker start", name)) fallthrough case info.stopping, engine.isDying(): - logger.Debugf("%q manifold worker no longer required", name) + logger.Tracef("%q manifold worker no longer required", name) worker.Kill() default: // It's fine to use this worker; update info and copy back. - logger.Infof("%q manifold worker started", name) - info.starting = false - info.worker = worker - engine.current[name] = info + logger.Debugf("%q manifold worker started", name) + engine.current[name] = workerInfo{ + worker: worker, + resourceLog: resourceLog, + } // Any manifold that declares this one as an input needs to be restarted. engine.bounceDependents(name) @@ -347,24 +461,25 @@ // gotStopped updates the engine to reflect the demise of (or failure to create) // a worker. It must only be called from the loop goroutine. -func (engine *engine) gotStopped(name string, err error) { - logger.Infof("%q manifold worker stopped: %v", name, err) +func (engine *engine) gotStopped(name string, err error, resourceLog []resourceAccess) { + logger.Debugf("%q manifold worker stopped: %v", name, err) // Copy current info and check for reasons to stop the engine. info := engine.current[name] if info.stopped() { engine.tomb.Kill(errors.Errorf("fatal: unexpected %q manifold worker stop", name)) } else if engine.config.IsFatal(err) { - if engine.worstError == nil { - engine.worstError = engine.config.MoreImportant(err, engine.worstError) - } - engine.tomb.Kill(err) + engine.worstError = engine.config.WorstError(err, engine.worstError) + engine.tomb.Kill(nil) } // Reset engine info; and bail out if we can be sure there's no need to bounce. - engine.current[name] = workerInfo{} + engine.current[name] = workerInfo{ + err: err, + resourceLog: resourceLog, + } if engine.isDying() { - logger.Debugf("permanently stopped %q manifold worker (shutting down)", name) + logger.Tracef("permanently stopped %q manifold worker (shutting down)", name) return } @@ -374,7 +489,7 @@ engine.requestStart(name, engine.config.BounceDelay) } else { // If we didn't stop it ourselves, we need to interpret the error. - switch err { + switch errors.Cause(err) { case nil: // Nothing went wrong; the task completed successfully. Nothing // needs to be done (unless the inputs change, in which case it @@ -383,8 +498,15 @@ // The task can't even start with the current state. Nothing more // can be done (until the inputs change, in which case we retry // anyway). + case ErrBounce: + // The task exited but wanted to restart immediately. + engine.requestStart(name, engine.config.BounceDelay) + case ErrUninstall: + // The task should never run again, and can be removed completely. + engine.uninstall(name) default: // Something went wrong but we don't know what. Try again soon. + logger.Errorf("%q manifold worker returned unexpected error: %v", name, err) engine.requestStart(name, engine.config.ErrorDelay) } } @@ -425,11 +547,12 @@ } } -// allStopped returns true if no workers are running or starting. It must only +// allOthersStopped returns true if no workers (other than the engine itself, +// if it happens to have been injected) are running or starting. It must only // be called from the loop goroutine. -func (engine *engine) allStopped() bool { +func (engine *engine) allOthersStopped() bool { for _, info := range engine.current { - if !info.stopped() { + if !info.stopped() && info.worker != engine { return false } } @@ -440,7 +563,7 @@ // stops every started one (and trusts the rest of the engine to restart them). // It must only be called from the loop goroutine. func (engine *engine) bounceDependents(name string) { - logger.Debugf("restarting dependents of %q manifold", name) + logger.Tracef("restarting dependents of %q manifold", name) for _, dependentName := range engine.dependents[name] { if engine.current[dependentName].stopped() { engine.requestStart(dependentName, engine.config.BounceDelay) @@ -453,9 +576,11 @@ // workerInfo stores what an engine's loop goroutine needs to know about the // worker for a given Manifold. type workerInfo struct { - starting bool - stopping bool - worker worker.Worker + starting bool + stopping bool + worker worker.Worker + err error + resourceLog []resourceAccess } // stopped returns true unless the worker is either assigned or starting. @@ -469,6 +594,28 @@ return true } +// state returns the latest known state of the worker, for use in reports. +func (info workerInfo) state() string { + switch { + case info.starting: + return "starting" + case info.stopping: + return "stopping" + case info.worker != nil: + return "started" + } + return "stopped" +} + +// report returns any available report from the worker. If the worker is not +// a Reporter, or is not present, this method will return nil. +func (info workerInfo) report() map[string]interface{} { + if reporter, ok := info.worker.(Reporter); ok { + return reporter.Report() + } + return nil +} + // installTicket is used by engine to induce installation of a named manifold // and pass on any errors encountered in the process. type installTicket struct { @@ -480,13 +627,21 @@ // startedTicket is used by engine to notify the loop of the creation of the // worker for a particular manifold. type startedTicket struct { - name string - worker worker.Worker + name string + worker worker.Worker + resourceLog []resourceAccess } // stoppedTicket is used by engine to notify the loop of the demise of (or // failure to create) the worker for a particular manifold. type stoppedTicket struct { - name string - error error + name string + error error + resourceLog []resourceAccess +} + +// reportTicket is used by the engine to notify the loop that a status report +// should be generated. +type reportTicket struct { + result chan map[string]interface{} } === modified file 'src/github.com/juju/juju/worker/dependency/engine_test.go' --- src/github.com/juju/juju/worker/dependency/engine_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/dependency/engine_test.go 2016-03-22 15:18:22 +0000 @@ -7,7 +7,6 @@ "time" "github.com/juju/errors" - "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -17,43 +16,11 @@ ) type EngineSuite struct { - testing.IsolationSuite - engine dependency.Engine + engineFixture } var _ = gc.Suite(&EngineSuite{}) -func (s *EngineSuite) SetUpTest(c *gc.C) { - s.IsolationSuite.SetUpTest(c) - s.startEngine(c, nothingFatal) -} - -func (s *EngineSuite) TearDownTest(c *gc.C) { - s.stopEngine(c) - s.IsolationSuite.TearDownTest(c) -} - -func (s *EngineSuite) startEngine(c *gc.C, isFatal dependency.IsFatalFunc) { - config := dependency.EngineConfig{ - IsFatal: isFatal, - MoreImportant: func(err0, err1 error) error { return err0 }, - ErrorDelay: coretesting.ShortWait / 2, - BounceDelay: coretesting.ShortWait / 10, - } - - e, err := dependency.NewEngine(config) - c.Assert(err, jc.ErrorIsNil) - s.engine = e -} - -func (s *EngineSuite) stopEngine(c *gc.C) { - if s.engine != nil { - err := worker.Stop(s.engine) - s.engine = nil - c.Check(err, jc.ErrorIsNil) - } -} - func (s *EngineSuite) TestInstallConvenienceWrapper(c *gc.C) { mh1 := newManifoldHarness() mh2 := newManifoldHarness() @@ -118,6 +85,21 @@ mh.AssertNoStart(c) } +func (s *EngineSuite) TestInstallCycle(c *gc.C) { + + // Install a worker with an unmet dependency. + mh1 := newManifoldHarness("robin-hood") + err := s.engine.Install("friar-tuck", mh1.Manifold()) + c.Assert(err, jc.ErrorIsNil) + mh1.AssertNoStart(c) + + // Can't install another worker that creates a dependency cycle. + mh2 := newManifoldHarness("friar-tuck") + err = s.engine.Install("robin-hood", mh2.Manifold()) + c.Assert(err, gc.ErrorMatches, `cannot install "robin-hood" manifold: cycle detected at .*`) + mh2.AssertNoStart(c) +} + func (s *EngineSuite) TestInstallAlreadyStopped(c *gc.C) { // Shut down the engine. @@ -410,13 +392,64 @@ mh2.AssertOneStart(c) } -// TestErrMoreImportant starts an engine with two -// manifolds that always error with fatal errors. We test that the -// most important error is the one returned by the engine -// This test uses manifolds whose workers ignore fatal errors. -// We want this behvaiour so that we don't race over which fatal +func (s *EngineSuite) TestErrBounce(c *gc.C) { + + // Start a simple dependency. + mh1 := newManifoldHarness() + err := s.engine.Install("some-task", mh1.Manifold()) + c.Assert(err, jc.ErrorIsNil) + mh1.AssertOneStart(c) + + // Start its dependent. + mh2 := newResourceIgnoringManifoldHarness("some-task") + err = s.engine.Install("another-task", mh2.Manifold()) + c.Assert(err, jc.ErrorIsNil) + mh2.AssertOneStart(c) + + // The parent requests bounce causing both to restart. + // Note(mjs): the lack of a restart delay is not specifically + // tested as I can't think of a reliable way to do this. + mh1.InjectError(c, dependency.ErrBounce) + mh1.AssertOneStart(c) + mh2.AssertStart(c) // Might restart more than once +} + +func (s *EngineSuite) TestErrUninstall(c *gc.C) { + + // Start a simple dependency. + mh1 := newManifoldHarness() + err := s.engine.Install("some-task", mh1.Manifold()) + c.Assert(err, jc.ErrorIsNil) + mh1.AssertOneStart(c) + + // Start its dependent. Note that in this case we want to record all start + // attempts, even if there are resource errors. + mh2 := newResourceIgnoringManifoldHarness("some-task") + err = s.engine.Install("another-task", mh2.Manifold()) + c.Assert(err, jc.ErrorIsNil) + mh2.AssertOneStart(c) + + // Uninstall the dependency; it should not be restarted, but its dependent should. + mh1.InjectError(c, dependency.ErrUninstall) + mh1.AssertNoStart(c) + mh2.AssertOneStart(c) + + // Installing a new some-task manifold restarts the dependent. + mh3 := newManifoldHarness() + err = s.engine.Install("some-task", mh3.Manifold()) + c.Assert(err, jc.ErrorIsNil) + mh3.AssertOneStart(c) + mh2.AssertOneStart(c) +} + +// TestWorstError starts an engine with two manifolds that always error +// with fatal errors. We test that the most important error is the one +// returned by the engine. +// +// This test uses manifolds whose workers ignore kill requests. We want +// this (dangerous!) behaviour so that we don't race over which fatal // error is seen by the engine first. -func (s *EngineSuite) TestErrMoreImportant(c *gc.C) { +func (s *EngineSuite) TestWorstError(c *gc.C) { // Setup the errors, their importance, and the function // that decides. importantError := errors.New("an important error") @@ -428,10 +461,10 @@ // Start a new engine with moreImportant configured config := dependency.EngineConfig{ - IsFatal: allFatal, - MoreImportant: moreImportant, - ErrorDelay: coretesting.ShortWait / 2, - BounceDelay: coretesting.ShortWait / 10, + IsFatal: allFatal, + WorstError: moreImportant, + ErrorDelay: coretesting.ShortWait / 2, + BounceDelay: coretesting.ShortWait / 10, } engine, err := dependency.NewEngine(config) c.Assert(err, jc.ErrorIsNil) @@ -450,44 +483,100 @@ mh2.InjectError(c, importantError) err = engine.Wait() - c.Assert(err, gc.ErrorMatches, importantError.Error()) + c.Check(err, gc.ErrorMatches, importantError.Error()) + report := engine.Report() + c.Check(report["error"], gc.ErrorMatches, importantError.Error()) } func (s *EngineSuite) TestConfigValidate(c *gc.C) { validIsFatal := func(error) bool { return true } - validMoreImportant := func(err0, err1 error) error { return err0 } + validWorstError := func(err0, err1 error) error { return err0 } validErrorDelay := time.Second validBounceDelay := time.Second + tests := []struct { about string config dependency.EngineConfig err string - }{ - { - "IsFatal invalid", - dependency.EngineConfig{nil, validMoreImportant, validErrorDelay, validBounceDelay}, - "engineconfig validation failed: IsFatal not specified", - }, - { - "MoreImportant invalid", - dependency.EngineConfig{validIsFatal, nil, validErrorDelay, validBounceDelay}, - "engineconfig validation failed: MoreImportant not specified", - }, - { - "ErrorDelay invalid", - dependency.EngineConfig{validIsFatal, validMoreImportant, -time.Second, validBounceDelay}, - "engineconfig validation failed: ErrorDelay needs to be >= 0", - }, - { - "BounceDelay invalid", - dependency.EngineConfig{validIsFatal, validMoreImportant, validErrorDelay, -time.Second}, - "engineconfig validation failed: BounceDelay needs to be >= 0", - }, - } + }{{ + "IsFatal invalid", + dependency.EngineConfig{nil, validWorstError, validErrorDelay, validBounceDelay}, + "IsFatal not specified", + }, { + "WorstError invalid", + dependency.EngineConfig{validIsFatal, nil, validErrorDelay, validBounceDelay}, + "WorstError not specified", + }, { + "ErrorDelay invalid", + dependency.EngineConfig{validIsFatal, validWorstError, -time.Second, validBounceDelay}, + "ErrorDelay is negative", + }, { + "BounceDelay invalid", + dependency.EngineConfig{validIsFatal, validWorstError, validErrorDelay, -time.Second}, + "BounceDelay is negative", + }} for i, test := range tests { - c.Logf("running test %d: %v", i, test.about) - err := test.config.Validate() - c.Assert(err, gc.ErrorMatches, test.err) - } + c.Logf("test %d: %v", i, test.about) + + c.Logf("config validation...") + validateErr := test.config.Validate() + c.Check(validateErr, gc.ErrorMatches, test.err) + + c.Logf("engine creation...") + engine, createErr := dependency.NewEngine(test.config) + c.Check(engine, gc.IsNil) + c.Check(createErr, gc.ErrorMatches, "invalid config: "+test.err) + } +} + +func (s *EngineSuite) TestValidateEmptyManifolds(c *gc.C) { + err := dependency.Validate(dependency.Manifolds{}) + c.Check(err, jc.ErrorIsNil) +} + +func (s *EngineSuite) TestValidateTrivialCycle(c *gc.C) { + err := dependency.Validate(dependency.Manifolds{ + "a": dependency.Manifold{Inputs: []string{"a"}}, + }) + c.Check(err.Error(), gc.Equals, `cycle detected at "a" (considering: map[a:true])`) +} + +func (s *EngineSuite) TestValidateComplexManifolds(c *gc.C) { + + // Create a bunch of manifolds with tangled but acyclic dependencies; check + // that they pass validation. + manifolds := dependency.Manifolds{ + "root1": dependency.Manifold{}, + "root2": dependency.Manifold{}, + "mid1": dependency.Manifold{Inputs: []string{"root1"}}, + "mid2": dependency.Manifold{Inputs: []string{"root1", "root2"}}, + "leaf1": dependency.Manifold{Inputs: []string{"root2", "mid1"}}, + "leaf2": dependency.Manifold{Inputs: []string{"root1", "mid2"}}, + "leaf3": dependency.Manifold{Inputs: []string{"root1", "root2", "mid1", "mid2"}}, + } + err := dependency.Validate(manifolds) + c.Check(err, jc.ErrorIsNil) + + // Introduce a cycle; check the manifolds no longer validate. + manifolds["root1"] = dependency.Manifold{Inputs: []string{"leaf1"}} + err = dependency.Validate(manifolds) + c.Check(err, gc.ErrorMatches, "cycle detected at .*") +} + +func (s *EngineSuite) TestTracedErrMissing(c *gc.C) { + + // Install a worker with an unmet dependency, check it doesn't start + // (because the implementation returns a Trace()d ErrMissing). + mh1 := newTracedManifoldHarness("later-task") + err := s.engine.Install("some-task", mh1.Manifold()) + c.Assert(err, jc.ErrorIsNil) + mh1.AssertNoStart(c) + + // Install its dependency; check both start. + mh2 := newTracedManifoldHarness() + err = s.engine.Install("later-task", mh2.Manifold()) + c.Assert(err, jc.ErrorIsNil) + mh2.AssertOneStart(c) + mh1.AssertOneStart(c) } === added file 'src/github.com/juju/juju/worker/dependency/flag.go' --- src/github.com/juju/juju/worker/dependency/flag.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/dependency/flag.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,50 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package dependency + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/worker" +) + +// Flag represents a single boolean used to determine whether a given +// manifold worker should run. +type Flag interface { + + // Check returns the flag's value. Check calls must *always* return + // the same value for a given instatiation of the type implementing + // Flag. + Check() bool +} + +// WithFlag returns a manifold, based on that supplied, which will only run +// a worker when the named flag manifold's worker is active and set. +func WithFlag(base Manifold, flagName string) Manifold { + return Manifold{ + Inputs: append(base.Inputs, flagName), + Start: flagWrap(base.Start, flagName), + Output: base.Output, + } +} + +// flagWrap returns a StartFunc that will return ErrMissing if the named flag +// resource is not active or not set. +func flagWrap(inner StartFunc, flagName string) StartFunc { + return func(getResource GetResourceFunc) (worker.Worker, error) { + var flag Flag + if err := getResource(flagName, &flag); err != nil { + return nil, errors.Trace(err) + } + if !flag.Check() { + return nil, ErrMissing + } + + worker, err := inner(getResource) + if err != nil { + return nil, errors.Trace(err) + } + return worker, nil + } +} === added file 'src/github.com/juju/juju/worker/dependency/flag_test.go' --- src/github.com/juju/juju/worker/dependency/flag_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/dependency/flag_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,110 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package dependency_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + dt "github.com/juju/juju/worker/dependency/testing" +) + +type FlagSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&FlagSuite{}) + +func (s *FlagSuite) TestEmptyInputs(c *gc.C) { + wrapped := dependency.WithFlag(dependency.Manifold{}, "blob") + c.Check(wrapped.Inputs, jc.DeepEquals, []string{"blob"}) +} + +func (s *FlagSuite) TestNonEmptyInputs(c *gc.C) { + base := dependency.Manifold{ + Inputs: []string{"foo", "bar"}, + } + wrapped := dependency.WithFlag(base, "blib") + expect := []string{"foo", "bar", "blib"} + c.Check(wrapped.Inputs, jc.DeepEquals, expect) +} + +func (s *FlagSuite) TestEmptyOutput(c *gc.C) { + wrapped := dependency.WithFlag(dependency.Manifold{}, "blob") + c.Check(wrapped.Output, gc.IsNil) +} + +func (s *FlagSuite) TestNonEmptyOutput(c *gc.C) { + output := func(_ worker.Worker, _ interface{}) error { + panic("splat") + } + base := dependency.Manifold{ + Output: output, + } + wrapped := dependency.WithFlag(base, "blah") + tryOutput := func() { + wrapped.Output(nil, nil) + } + c.Check(tryOutput, gc.PanicMatches, "splat") +} + +func (s *FlagSuite) TestStartMissingFlag(c *gc.C) { + wrapped := dependency.WithFlag(dependency.Manifold{}, "foo") + getResource := dt.StubGetResource(dt.StubResources{ + "foo": dt.StubResource{Error: dependency.ErrMissing}, + }) + worker, err := wrapped.Start(getResource) + c.Check(worker, gc.IsNil) + c.Check(errors.Cause(err), gc.Equals, dependency.ErrMissing) +} + +func (s *FlagSuite) TestStartNotFlag(c *gc.C) { + wrapped := dependency.WithFlag(dependency.Manifold{}, "foo") + getResource := dt.StubGetResource(dt.StubResources{ + "foo": dt.StubResource{Output: true}, + }) + worker, err := wrapped.Start(getResource) + c.Check(worker, gc.IsNil) + c.Check(err, gc.ErrorMatches, `cannot set true into \*dependency.Flag`) +} + +func (s *FlagSuite) TestStartFalseFlag(c *gc.C) { + wrapped := dependency.WithFlag(dependency.Manifold{}, "foo") + getResource := dt.StubGetResource(dt.StubResources{ + "foo": dt.StubResource{Output: stubFlag(false)}, + }) + worker, err := wrapped.Start(getResource) + c.Check(worker, gc.IsNil) + c.Check(errors.Cause(err), gc.Equals, dependency.ErrMissing) +} + +func (s *FlagSuite) TestStartTrueFlag(c *gc.C) { + expectWorker := &stubWorker{} + base := dependency.Manifold{ + Start: func(_ dependency.GetResourceFunc) (worker.Worker, error) { + return expectWorker, nil + }, + } + wrapped := dependency.WithFlag(base, "foo") + getResource := dt.StubGetResource(dt.StubResources{ + "foo": dt.StubResource{Output: stubFlag(true)}, + }) + worker, err := wrapped.Start(getResource) + c.Check(worker, gc.Equals, expectWorker) + c.Check(err, jc.ErrorIsNil) +} + +type stubFlag bool + +func (flag stubFlag) Check() bool { + return bool(flag) +} + +type stubWorker struct { + worker.Worker +} === modified file 'src/github.com/juju/juju/worker/dependency/interface.go' --- src/github.com/juju/juju/worker/dependency/interface.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/dependency/interface.go 2016-03-22 15:18:22 +0000 @@ -9,17 +9,28 @@ "github.com/juju/juju/worker" ) -// Engine is a mechanism for persistently running named workers and managing -// dependencies between them. +// Engine is a mechanism for persistently running named workers, managing the +// dependencies between them, and reporting on their status. type Engine interface { - // Install causes the Engine to accept responsibility for maintaining a + // Engine's primary purpose is to implement Installer responsibilities. + Installer + + // Engine also exposes human-comprehensible status data to its clients. + Reporter + + // Engine is itself a Worker. + worker.Worker +} + +// Installer takes responsibility for persistently running named workers and +// managing the dependencies between them. +type Installer interface { + + // Install causes the implementor to accept responsibility for maintaining a // worker corresponding to the supplied manifold, restarting it when it // fails and when its inputs' workers change, until the Engine shuts down. Install(name string, manifold Manifold) error - - // Engine is just another Worker. - worker.Worker } // Manifold defines the behaviour of a node in an Engine's dependency graph. It's @@ -34,29 +45,55 @@ Inputs []string // Start is used to create a worker for the manifold. It must not be nil. + // The supplied GetResourceFunc will return ErrMissing for any dependency + // not named in Inputs, and will cease to function immediately after the + // StartFunc returns: do not store references to it. + // + // Note that, while Start must exist, it doesn't *have* to *start* a worker + // (although it must return either a worker or an error). That is to say: in + // *some* circumstances, it's ok to wrap a worker under the management of a + // separate component (e.g. the `worker/agent` Manifold itself) but this + // approach should only be used: + // + // * as a last resort; and + // * with clear justification. + // + // ...because it's a deliberate, and surprising, subversion of the dependency + // model; and is thus much harder to reason about and implement correctly. In + // particular, if you write a surprising start func, you can't safely declare + // any inputs at all. Start StartFunc // Output is used to implement a GetResourceFunc for manifolds that declare // a dependency on this one; it can be nil if your manifold is a leaf node, // or if it exposes no services to its dependents. + // + // If you implement an Output func, be especially careful to expose sensible + // types. Your `out` param should almost always be a pointer to an interface; + // and, furthermore, to an interface that does *not* satisfy `worker.Worker`. + // + // (Consider the interface segregation principle: the *Engine* is reponsible + // for the lifetimes of the backing workers, and for handling their errors. + // Exposing those levers to your dependents as well can only encourage them + // to use them, and vastly complicate the possible interactions.) + // + // And if your various possible clients might use different sets of features, + // please keep those interfaces segregated as well: prefer to accept [a *Foo + // or a *Bar] rather than just [a *FooBar] -- unless all your clients really + // do want a FooBar resource. + // + // Even if the Engine itself didn't bother to track the types exposed per + // dependency, it's still a useful prophylactic against complexity -- so + // that when reading manifold code, it should be immediately clear both what + // your dependencies *are* (by reading the names in the manifold config) + // and what they *do* for you (by reading the start func and observing the + // types in play). Output OutputFunc } // Manifolds conveniently represents several Manifolds. type Manifolds map[string]Manifold -// Install is a convenience function for installing multiple manifolds into an -// engine at once. It returns the first error it encounters (and installs no more -// manifolds). -func Install(engine Engine, manifolds Manifolds) error { - for name, manifold := range manifolds { - if err := engine.Install(name, manifold); err != nil { - return errors.Trace(err) - } - } - return nil -} - // StartFunc returns a worker or an error. All the worker's dependencies should // be taken from the supplied GetResourceFunc; if no worker can be started due // to unmet dependencies, it should return ErrMissing, in which case it will @@ -80,6 +117,17 @@ // because that's a lot of implementation hassle for little practical gain. var ErrMissing = errors.New("dependency not available") +// ErrBounce can be returned by a StartFunc or a worker to indicate to +// the engine that it should be restarted immediately, instead of +// waiting for ErrorDelay. This is useful for workers which restart +// themselves to alert dependents that an output has changed. +var ErrBounce = errors.New("restart immediately") + +// ErrUninstall can be returned by a StartFunc or a worker to indicate to the +// engine that it can/should never run again, and that the originating manifold +// should be completely removed. +var ErrUninstall = errors.New("resource permanently unavailable") + // OutputFunc is a type coercion function for a worker generated by a StartFunc. // When passed an out pointer to a type it recognises, it will assign a suitable // value and return no error. @@ -90,5 +138,68 @@ // its workers, shut itself down, and return the original fatal error via Wait(). type IsFatalFunc func(err error) bool -// MoreImportantFunc is used to determine which of two errors is more important. -type MoreImportantFunc func(err0, err1 error) error +// WorstErrorFunc is used to rank fatal errors, to allow an Engine to return the +// single most important error it's encountered. +type WorstErrorFunc func(err0, err1 error) error + +// Reporter defines an interface for extracting human-relevant information +// from a worker. +type Reporter interface { + + // Report returns a map describing the state of the receiver. It is expected + // to be goroutine-safe. + // + // It is polite and helpful to use the Key* constants and conventions defined + // and described in this package, where appropriate, but that's for the + // convenience of the humans that read the reports; we don't and shouldn't + // have any code that depends on particular Report formats. + Report() map[string]interface{} +} + +// The Key constants describe the constant features of an Engine's Report. +const ( + + // KeyState applies to a worker; possible values are "starting", "started", + // "stopping", or "stopped". Or it might be something else, in distant + // Reporter implementations; don't make assumptions. + KeyState = "state" + + // KeyError holds some relevant error. In the case of an Engine, this will be: + // * any internal error indicating incorrect operation; or + // * the most important fatal error encountered by any worker; or + // * nil, if none of the above apply; + // ...and the value should not be presumed to be stable until the engine + // state is "stopped". + // + // In the case of a manifold, it will always hold the most recent error + // returned by the associated worker (or its start func); and will be + // rewritten whenever a worker state is set to "started" or "stopped". + // + // In the case of a resource access, it holds any error encountered when + // trying to find or convert the resource. + KeyError = "error" + + // KeyManifolds holds a map of manifold name to further data (including + // dependency inputs; current worker state; and any relevant report/error + // for the associated current/recent worker.) + KeyManifolds = "manifolds" + + // KeyReport holds an arbitrary map of information returned by a manifold + // Worker that is also a Reporter. + KeyReport = "report" + + // KeyInputs holds the names of the manifolds on which this one depends. + KeyInputs = "inputs" + + // KeyResourceLog holds a slice representing the calls the current worker + // made to its getResource func; the type of the output param; and any + // error encountered. + KeyResourceLog = "resource-log" + + // KeyName holds the name of some resource. + KeyName = "name" + + // KeyType holds a string representation of the type by which a resource + // was accessed. + KeyType = "type" +) === added file 'src/github.com/juju/juju/worker/dependency/reporter_test.go' --- src/github.com/juju/juju/worker/dependency/reporter_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/dependency/reporter_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,167 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package dependency_test + +import ( + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/dependency" +) + +type ReportSuite struct { + engineFixture +} + +var _ = gc.Suite(&ReportSuite{}) + +func (s *ReportSuite) TestReportStarted(c *gc.C) { + report := s.engine.Report() + c.Check(report, jc.DeepEquals, map[string]interface{}{ + "state": "started", + "error": nil, + "manifolds": map[string]interface{}{}, + }) +} + +func (s *ReportSuite) TestReportStopped(c *gc.C) { + s.engine.Kill() + err := s.engine.Wait() + c.Check(err, jc.ErrorIsNil) + report := s.engine.Report() + c.Check(report, jc.DeepEquals, map[string]interface{}{ + "state": "stopped", + "error": nil, + "manifolds": map[string]interface{}{}, + }) +} + +func (s *ReportSuite) TestReportStopping(c *gc.C) { + mh1 := newErrorIgnoringManifoldHarness() + err := s.engine.Install("task", mh1.Manifold()) + c.Assert(err, jc.ErrorIsNil) + defer func() { + s.engine.Kill() + mh1.InjectError(c, nil) + err := s.engine.Wait() + c.Check(err, jc.ErrorIsNil) + }() + mh1.AssertOneStart(c) + + // It may take a short time for the main loop to notice + // the change and stop the "task" worker. + s.engine.Kill() + var isTaskStopping = func(report map[string]interface{}) bool { + manifolds := report["manifolds"].(map[string]interface{}) + task := manifolds["task"].(map[string]interface{}) + switch taskState := task["state"]; taskState { + case "started": + return false + case "stopping": + return true + default: + c.Fatalf("unexpected task state: %v", taskState) + } + panic("unreachable") + } + + var report map[string]interface{} + for i := 0; i < 3; i++ { + report = s.engine.Report() + if isTaskStopping(report) { + break + } + time.Sleep(coretesting.ShortWait) + } + c.Check(report, jc.DeepEquals, map[string]interface{}{ + "state": "stopping", + "error": nil, + "manifolds": map[string]interface{}{ + "task": map[string]interface{}{ + "state": "stopping", + "error": nil, + "inputs": ([]string)(nil), + "resource-log": []map[string]interface{}{}, + "report": map[string]interface{}{ + "key1": "hello there", + }, + }, + }, + }) +} + +func (s *ReportSuite) TestReportInputs(c *gc.C) { + mh1 := newManifoldHarness() + err := s.engine.Install("task", mh1.Manifold()) + c.Assert(err, jc.ErrorIsNil) + mh1.AssertOneStart(c) + + mh2 := newManifoldHarness("task") + err = s.engine.Install("another task", mh2.Manifold()) + c.Assert(err, jc.ErrorIsNil) + mh2.AssertOneStart(c) + + report := s.engine.Report() + c.Check(report, jc.DeepEquals, map[string]interface{}{ + "state": "started", + "error": nil, + "manifolds": map[string]interface{}{ + "task": map[string]interface{}{ + "state": "started", + "error": nil, + "inputs": ([]string)(nil), + "resource-log": []map[string]interface{}{}, + "report": map[string]interface{}{ + "key1": "hello there", + }, + }, + "another task": map[string]interface{}{ + "state": "started", + "error": nil, + "inputs": []string{"task"}, + "resource-log": []map[string]interface{}{{ + "name": "task", + "type": "", + "error": nil, + }}, + "report": map[string]interface{}{ + "key1": "hello there", + }, + }, + }, + }) +} +func (s *ReportSuite) TestReportError(c *gc.C) { + mh1 := newManifoldHarness("missing") + manifold := mh1.Manifold() + err := s.engine.Install("task", manifold) + c.Assert(err, jc.ErrorIsNil) + mh1.AssertNoStart(c) + + s.engine.Kill() + err = s.engine.Wait() + c.Check(err, jc.ErrorIsNil) + + report := s.engine.Report() + c.Check(report, jc.DeepEquals, map[string]interface{}{ + "state": "stopped", + "error": nil, + "manifolds": map[string]interface{}{ + "task": map[string]interface{}{ + "state": "stopped", + "error": dependency.ErrMissing, + "inputs": []string{"missing"}, + "resource-log": []map[string]interface{}{{ + "name": "missing", + "type": "", + "error": dependency.ErrMissing, + }}, + "report": (map[string]interface{})(nil), + }, + }, + }) +} === added file 'src/github.com/juju/juju/worker/dependency/resource.go' --- src/github.com/juju/juju/worker/dependency/resource.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/dependency/resource.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,106 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package dependency + +import ( + "fmt" + + "github.com/juju/errors" + + "github.com/juju/juju/worker" +) + +// resourceGetter encapsulates a snapshot of workers and output funcs and exposes +// a getResource method that can be used as a GetResourceFunc. +type resourceGetter struct { + + // clientName is the name of the manifold for whose convenience this exists. + clientName string + + // expired is closed when the resourceGetter should no longer be used. + expired chan struct{} + + // workers holds the snapshot of manifold workers. + workers map[string]worker.Worker + + // outputs holds the snapshot of manifold output funcs. + outputs map[string]OutputFunc + + // accessLog holds the names and types of resource requests, and any error + // encountered. It does not include requests made after expiry. + accessLog []resourceAccess +} + +// expire closes the expired channel. Calling it more than once will panic. +func (rg *resourceGetter) expire() { + close(rg.expired) +} + +// getResource is intended for use as the GetResourceFunc passed into the Start +// func of the client manifold. +func (rg *resourceGetter) getResource(resourceName string, out interface{}) error { + logger.Tracef("%q manifold requested %q resource", rg.clientName, resourceName) + select { + case <-rg.expired: + return errors.New("expired resourceGetter: cannot be used outside Start func") + default: + err := rg.rawAccess(resourceName, out) + rg.accessLog = append(rg.accessLog, resourceAccess{ + name: resourceName, + as: fmt.Sprintf("%T", out), + err: err, + }) + return err + } +} + +// rawAccess is a GetResourceFunc that neither checks enpiry nor records access. +func (rg *resourceGetter) rawAccess(resourceName string, out interface{}) error { + input := rg.workers[resourceName] + if input == nil { + // No worker running (or not declared). + return ErrMissing + } + if out == nil { + // No conversion necessary. + return nil + } + convert := rg.outputs[resourceName] + if convert == nil { + // Conversion required, no func available. + return ErrMissing + } + return convert(input, out) +} + +// resourceAccess describes a call made to (*resourceGetter).getResource. +type resourceAccess struct { + + // name is the name of the resource requested. + name string + + // as is the string representation of the type of the out param. + as string + + // err is any error returned from rawAccess. + err error +} + +// report returns a convenient representation of ra. +func (ra resourceAccess) report() map[string]interface{} { + return map[string]interface{}{ + KeyName: ra.name, + KeyType: ra.as, + KeyError: ra.err, + } +} + +// resourceLogReport returns a convenient representation of accessLog. +func resourceLogReport(accessLog []resourceAccess) []map[string]interface{} { + result := make([]map[string]interface{}, len(accessLog)) + for i, access := range accessLog { + result[i] = access.report() + } + return result +} === added file 'src/github.com/juju/juju/worker/dependency/self_test.go' --- src/github.com/juju/juju/worker/dependency/self_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/dependency/self_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,118 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package dependency_test + +import ( + "fmt" + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" +) + +type SelfSuite struct { + engineFixture +} + +var _ = gc.Suite(&SelfSuite{}) + +func (s *SelfSuite) TestInputs(c *gc.C) { + manifold := dependency.SelfManifold(s.engine) + c.Check(manifold.Inputs, gc.HasLen, 0) +} + +func (s *SelfSuite) TestStart(c *gc.C) { + manifold := dependency.SelfManifold(s.engine) + engine, err := manifold.Start(nil) + c.Check(err, jc.ErrorIsNil) + c.Check(engine, gc.Equals, s.engine) +} + +func (s *SelfSuite) TestOutputBadInput(c *gc.C) { + manifold := dependency.SelfManifold(s.engine) + var input dependency.Engine + err := manifold.Output(input, nil) + c.Check(err, gc.ErrorMatches, "unexpected input worker") +} + +func (s *SelfSuite) TestOutputBadOutput(c *gc.C) { + manifold := dependency.SelfManifold(s.engine) + var unknown interface{} + err := manifold.Output(s.engine, &unknown) + c.Check(err, gc.ErrorMatches, "out should be a \\*Installer or a \\*Reporter; is .*") + c.Check(unknown, gc.IsNil) +} + +func (s *SelfSuite) TestOutputReporter(c *gc.C) { + manifold := dependency.SelfManifold(s.engine) + var reporter dependency.Reporter + err := manifold.Output(s.engine, &reporter) + c.Check(err, jc.ErrorIsNil) + c.Check(reporter, gc.Equals, s.engine) +} + +func (s *SelfSuite) TestOutputInstaller(c *gc.C) { + manifold := dependency.SelfManifold(s.engine) + var installer dependency.Installer + err := manifold.Output(s.engine, &installer) + c.Check(err, jc.ErrorIsNil) + c.Check(installer, gc.Equals, s.engine) +} + +func (s *SelfSuite) TestActuallyWorks(c *gc.C) { + + // Create and install a manifold with an unsatisfied dependency. + mh1 := newManifoldHarness("self") + err := s.engine.Install("dependent", mh1.Manifold()) + c.Assert(err, jc.ErrorIsNil) + mh1.AssertNoStart(c) + + // Install an engine inside itself; once it's "started", dependent will + // be restarted. + manifold := dependency.SelfManifold(s.engine) + err = s.engine.Install("self", manifold) + c.Assert(err, jc.ErrorIsNil) + mh1.AssertOneStart(c) + + // Check we can still stop it (with a timeout -- injudicious + // implementation changes could induce deadlocks). + done := make(chan struct{}) + go func() { + err := worker.Stop(s.engine) + c.Check(err, jc.ErrorIsNil) + close(done) + }() + select { + case <-done: + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out") + } +} + +func (s *SelfSuite) TestStress(c *gc.C) { + + // Repeatedly install a manifold inside itself. + manifold := dependency.SelfManifold(s.engine) + for i := 0; i < 100; i++ { + go s.engine.Install(fmt.Sprintf("self-%d", i), manifold) + } + + // Check we can still stop it (with a timeout -- injudicious + // implementation changes could induce deadlocks). + done := make(chan struct{}) + go func() { + err := worker.Stop(s.engine) + c.Check(err, jc.ErrorIsNil) + close(done) + }() + select { + case <-done: + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out") + } +} === added file 'src/github.com/juju/juju/worker/dependency/util.go' --- src/github.com/juju/juju/worker/dependency/util.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/dependency/util.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,98 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package dependency + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/worker" +) + +// Install is a convenience function for installing multiple manifolds into an +// Installer at once. It returns the first error it encounters (and installs no +// more manifolds). +func Install(installer Installer, manifolds Manifolds) error { + for name, manifold := range manifolds { + if err := installer.Install(name, manifold); err != nil { + return errors.Trace(err) + } + } + return nil +} + +// Validate will return an error if the dependency graph defined by the supplied +// manifolds contains any cycles. +func Validate(manifolds Manifolds) error { + inputs := make(map[string][]string) + for name, manifold := range manifolds { + inputs[name] = manifold.Inputs + } + return validator{ + inputs: inputs, + doing: make(map[string]bool), + done: make(map[string]bool), + }.run() +} + +// validator implements a topological sort of the nodes defined in inputs; it +// doesn't actually produce sorted nodes, but rather exists to return an error +// if it determines that the nodes cannot be sorted (and hence a cycle exists). +type validator struct { + inputs map[string][]string + doing map[string]bool + done map[string]bool +} + +func (v validator) run() error { + for node := range v.inputs { + if err := v.visit(node); err != nil { + return errors.Trace(err) + } + } + return nil +} + +func (v validator) visit(node string) error { + if v.doing[node] { + return errors.Errorf("cycle detected at %q (considering: %v)", node, v.doing) + } + if !v.done[node] { + v.doing[node] = true + for _, input := range v.inputs[node] { + if err := v.visit(input); err != nil { + // Tracing this error will not help anyone. + return err + } + } + v.done[node] = true + v.doing[node] = false + } + return nil +} + +// SelfManifold returns a manifold exposing a running dependency engine's +// Installer and Reporter services. The returned manifold is intended for +// installation into the engine it wraps; installing it into other engines +// may have surprising effects. +func SelfManifold(engine Engine) Manifold { + return Manifold{ + Start: func(_ GetResourceFunc) (worker.Worker, error) { + return engine, nil + }, + Output: func(in worker.Worker, out interface{}) error { + if in != engine { + return errors.New("unexpected input worker") + } + switch outPtr := out.(type) { + case *Installer: + *outPtr = engine + case *Reporter: + *outPtr = engine + default: + return errors.Errorf("out should be a *Installer or a *Reporter; is %#v", out) + } + return nil + }, + } +} === modified file 'src/github.com/juju/juju/worker/dependency/util_test.go' --- src/github.com/juju/juju/worker/dependency/util_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/dependency/util_test.go 2016-03-22 15:18:22 +0000 @@ -6,6 +6,9 @@ import ( "time" + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "launchpad.net/tomb" @@ -14,89 +17,176 @@ "github.com/juju/juju/worker/dependency" ) +type engineFixture struct { + testing.IsolationSuite + engine dependency.Engine +} + +func (s *engineFixture) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.startEngine(c, nothingFatal) +} + +func (s *engineFixture) TearDownTest(c *gc.C) { + s.stopEngine(c) + s.IsolationSuite.TearDownTest(c) +} + +func (s *engineFixture) startEngine(c *gc.C, isFatal dependency.IsFatalFunc) { + if s.engine != nil { + c.Fatalf("original engine not stopped") + } + config := dependency.EngineConfig{ + IsFatal: isFatal, + WorstError: func(err0, err1 error) error { return err0 }, + ErrorDelay: coretesting.ShortWait / 2, + BounceDelay: coretesting.ShortWait / 10, + } + + e, err := dependency.NewEngine(config) + c.Assert(err, jc.ErrorIsNil) + s.engine = e +} + +func (s *engineFixture) stopEngine(c *gc.C) { + if s.engine != nil { + err := worker.Stop(s.engine) + s.engine = nil + c.Check(err, jc.ErrorIsNil) + } +} + type manifoldHarness struct { inputs []string errors chan error starts chan struct{} + requireResources bool ignoreExternalKill bool } func newManifoldHarness(inputs ...string) *manifoldHarness { return &manifoldHarness{ - inputs: inputs, - errors: make(chan error, 1000), - starts: make(chan struct{}, 1000), - ignoreExternalKill: false, + inputs: inputs, + errors: make(chan error, 1000), + starts: make(chan struct{}, 1000), + requireResources: true, } } +func newResourceIgnoringManifoldHarness(inputs ...string) *manifoldHarness { + mh := newManifoldHarness(inputs...) + mh.requireResources = false + return mh +} + // newErrorIgnoringManifoldHarness starts a minimal worker that ignores // fatal errors - and will never die. // This is potentially nasty, but it's useful in tests where we want // to generate fatal errors but not race on which one the engine see first. func newErrorIgnoringManifoldHarness(inputs ...string) *manifoldHarness { - return &manifoldHarness{ - inputs: inputs, - errors: make(chan error, 1000), - starts: make(chan struct{}, 1000), - ignoreExternalKill: true, - } + mh := newManifoldHarness(inputs...) + mh.ignoreExternalKill = true + return mh } -func (ews *manifoldHarness) Manifold() dependency.Manifold { +func (mh *manifoldHarness) Manifold() dependency.Manifold { return dependency.Manifold{ - Inputs: ews.inputs, - Start: ews.start, + Inputs: mh.inputs, + Start: mh.start, } } -func (ews *manifoldHarness) start(getResource dependency.GetResourceFunc) (worker.Worker, error) { - for _, resourceName := range ews.inputs { + +func (mh *manifoldHarness) start(getResource dependency.GetResourceFunc) (worker.Worker, error) { + for _, resourceName := range mh.inputs { if err := getResource(resourceName, nil); err != nil { - return nil, err + if mh.requireResources { + return nil, err + } } } - w := &minimalWorker{tomb.Tomb{}, ews.ignoreExternalKill} + w := &minimalWorker{tomb.Tomb{}, mh.ignoreExternalKill} go func() { defer w.tomb.Done() - ews.starts <- struct{}{} + mh.starts <- struct{}{} select { case <-w.tombDying(): - case err := <-ews.errors: + case err := <-mh.errors: w.tomb.Kill(err) } }() return w, nil } -func (ews *manifoldHarness) AssertOneStart(c *gc.C) { - ews.AssertStart(c) - ews.AssertNoStart(c) +func (mh *manifoldHarness) AssertOneStart(c *gc.C) { + mh.AssertStart(c) + mh.AssertNoStart(c) } -func (ews *manifoldHarness) AssertStart(c *gc.C) { +func (mh *manifoldHarness) AssertStart(c *gc.C) { select { - case <-ews.starts: + case <-mh.starts: case <-time.After(coretesting.LongWait): c.Fatalf("never started") } } -func (ews *manifoldHarness) AssertNoStart(c *gc.C) { +func (mh *manifoldHarness) AssertNoStart(c *gc.C) { select { case <-time.After(coretesting.ShortWait): - case <-ews.starts: + case <-mh.starts: c.Fatalf("started unexpectedly") } } -func (ews *manifoldHarness) InjectError(c *gc.C, err error) { +func (mh *manifoldHarness) InjectError(c *gc.C, err error) { select { - case ews.errors <- err: + case mh.errors <- err: case <-time.After(coretesting.LongWait): c.Fatalf("never sent") } } +func newTracedManifoldHarness(inputs ...string) *tracedManifoldHarness { + return &tracedManifoldHarness{ + &manifoldHarness{ + inputs: inputs, + errors: make(chan error, 1000), + starts: make(chan struct{}, 1000), + ignoreExternalKill: false, + }, + } +} + +type tracedManifoldHarness struct { + *manifoldHarness +} + +func (mh *tracedManifoldHarness) Manifold() dependency.Manifold { + return dependency.Manifold{ + Inputs: mh.inputs, + Start: mh.start, + } +} + +func (mh *tracedManifoldHarness) start(getResource dependency.GetResourceFunc) (worker.Worker, error) { + for _, resourceName := range mh.inputs { + if err := getResource(resourceName, nil); err != nil { + return nil, errors.Trace(err) + } + } + w := &minimalWorker{tomb.Tomb{}, mh.ignoreExternalKill} + go func() { + defer w.tomb.Done() + mh.starts <- struct{}{} + select { + case <-w.tombDying(): + case err := <-mh.errors: + w.tomb.Kill(err) + } + }() + return w, nil +} + type minimalWorker struct { tomb tomb.Tomb ignoreExternalKill bool @@ -117,6 +207,12 @@ return w.tomb.Wait() } +func (w *minimalWorker) Report() map[string]interface{} { + return map[string]interface{}{ + "key1": "hello there", + } +} + func startMinimalWorker(_ dependency.GetResourceFunc) (worker.Worker, error) { w := &minimalWorker{} go func() { === modified file 'src/github.com/juju/juju/worker/deployer/deployer.go' --- src/github.com/juju/juju/worker/deployer/deployer.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/deployer/deployer.go 2016-03-22 15:18:22 +0000 @@ -14,8 +14,8 @@ "github.com/juju/juju/agent" apideployer "github.com/juju/juju/api/deployer" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" ) @@ -54,13 +54,19 @@ // NewDeployer returns a Worker that deploys and recalls unit agents // via ctx, taking a machine id to operate on. -func NewDeployer(st *apideployer.State, ctx Context) worker.Worker { +func NewDeployer(st *apideployer.State, ctx Context) (worker.Worker, error) { d := &Deployer{ st: st, ctx: ctx, deployed: make(set.Strings), } - return worker.NewStringsWorker(d) + w, err := watcher.NewStringsWorker(watcher.StringsConfig{ + Handler: d, + }) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil } func (d *Deployer) SetUp() (watcher.StringsWatcher, error) { @@ -91,7 +97,7 @@ return machineUnitsWatcher, nil } -func (d *Deployer) Handle(unitNames []string) error { +func (d *Deployer) Handle(_ <-chan struct{}, unitNames []string) error { for _, unitName := range unitNames { if err := d.changed(unitName); err != nil { return err === modified file 'src/github.com/juju/juju/worker/deployer/deployer_test.go' --- src/github.com/juju/juju/worker/deployer/deployer_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/deployer/deployer_test.go 2016-03-22 15:18:22 +0000 @@ -4,10 +4,8 @@ package deployer_test import ( - "runtime" "sort" "strings" - stdtesting "testing" "time" "github.com/juju/errors" @@ -23,14 +21,6 @@ "github.com/juju/juju/worker/deployer" ) -func TestPackage(t *stdtesting.T) { - //TODO(bogdanteleaga): Fix this on windows - if runtime.GOOS == "windows" { - t.Skip("bug 1403084: Currently does not work under windows") - } - coretesting.MgoTestPackage(t) -} - type deployerSuite struct { jujutesting.JujuConnSuite SimpleToolsFixture @@ -42,8 +32,6 @@ var _ = gc.Suite(&deployerSuite{}) -var _ worker.StringsWatchHandler = (*deployer.Deployer)(nil) - func (s *deployerSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) s.SimpleToolsFixture.SetUp(c, s.DataDir()) @@ -61,7 +49,9 @@ func (s *deployerSuite) makeDeployerAndContext(c *gc.C) (worker.Worker, deployer.Context) { // Create a deployer acting on behalf of the machine. ctx := s.getContextForMachine(c, s.machine.Tag()) - return deployer.NewDeployer(s.deployerState, ctx), ctx + deployer, err := deployer.NewDeployer(s.deployerState, ctx) + c.Assert(err, jc.ErrorIsNil) + return deployer, ctx } func (s *deployerSuite) TestDeployRecallRemovePrincipals(c *gc.C) { === added file 'src/github.com/juju/juju/worker/deployer/manifold.go' --- src/github.com/juju/juju/worker/deployer/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/deployer/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,76 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package deployer + +import ( + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/agent" + "github.com/juju/juju/api" + "github.com/juju/juju/api/base" + apideployer "github.com/juju/juju/api/deployer" + "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/util" +) + +// ManifoldConfig defines the names of the manifolds on which a Manifold will depend. +type ManifoldConfig struct { + util.PostUpgradeManifoldConfig + NewDeployContext func(st *apideployer.State, agentConfig agent.Config) Context +} + +// Manifold returns a dependency manifold that runs a deployer worker, +// using the resource names defined in the supplied config. +func Manifold(config ManifoldConfig) dependency.Manifold { + + // newWorker trivially wraps NewDeployer for use in a util.PostUpgradeManifold. + // + // It's not tested at the moment, because the scaffolding + // necessary is too unwieldy/distracting to introduce at this point. + newWorker := func(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { + cfg := a.CurrentConfig() + // Grab the tag and ensure that it's for a machine. + tag, ok := cfg.Tag().(names.MachineTag) + if !ok { + return nil, errors.New("agent's tag is not a machine tag") + } + + // Get API connection. + apiConn, ok := apiCaller.(api.Connection) + if !ok { + return nil, errors.New("unable to obtain api.Connection") + } + + // Get the machine agent's jobs. + entity, err := apiConn.Agent().Entity(tag) + if err != nil { + return nil, err + } + + var isUnitHoster bool + for _, job := range entity.Jobs() { + if job == multiwatcher.JobHostUnits { + isUnitHoster = true + break + } + } + + if !isUnitHoster { + return nil, dependency.ErrUninstall + } + + apiDeployer := apiConn.Deployer() + context := config.NewDeployContext(apiDeployer, cfg) + w, err := NewDeployer(apiDeployer, context) + if err != nil { + return nil, errors.Annotate(err, "cannot start unit agent deployer worker") + } + return w, nil + } + + return util.PostUpgradeManifold(config.PostUpgradeManifoldConfig, newWorker) +} === added file 'src/github.com/juju/juju/worker/deployer/package_test.go' --- src/github.com/juju/juju/worker/deployer/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/deployer/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package deployer_test + +import ( + "runtime" + stdtesting "testing" + + coretesting "github.com/juju/juju/testing" +) + +func TestPackage(t *stdtesting.T) { + //TODO(bogdanteleaga): Fix this on windows + if runtime.GOOS == "windows" { + t.Skip("bug 1403084: Currently does not work under windows") + } + coretesting.MgoTestPackage(t) +} === modified file 'src/github.com/juju/juju/worker/deployer/simple.go' --- src/github.com/juju/juju/worker/deployer/simple.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/deployer/simple.go 2016-03-22 15:18:22 +0000 @@ -11,6 +11,8 @@ "github.com/juju/errors" "github.com/juju/names" + "github.com/juju/utils/arch" + "github.com/juju/utils/series" "github.com/juju/utils/shell" "github.com/juju/juju/agent" @@ -31,7 +33,7 @@ // SimpleContext is a Context that manages unit deployments on the local system. type SimpleContext struct { - // api is used to get the current state server addresses at the time the + // api is used to get the current controller addresses at the time the // given unit is deployed. api APICalls @@ -108,11 +110,14 @@ tag := names.NewUnitTag(unitName) dataDir := ctx.agentConfig.DataDir() logDir := ctx.agentConfig.LogDir() - // TODO(dfc) + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } toolsDir := tools.ToolsDir(dataDir, tag.String()) defer removeOnErr(&err, toolsDir) - // TODO(dfc) - _, err = tools.ChangeAgentTools(dataDir, tag.String(), version.Current) + _, err = tools.ChangeAgentTools(dataDir, tag.String(), current) if err != nil { return errors.Trace(err) } @@ -127,13 +132,16 @@ namespace := ctx.agentConfig.Value(agent.Namespace) conf, err := agent.NewAgentConfig( agent.AgentConfigParams{ - DataDir: dataDir, - LogDir: logDir, - UpgradedToVersion: version.Current.Number, + Paths: agent.Paths{ + DataDir: dataDir, + LogDir: logDir, + MetricsSpoolDir: agent.DefaultPaths.MetricsSpoolDir, + }, + UpgradedToVersion: version.Current, Tag: tag, Password: initialPassword, Nonce: "unused", - Environment: ctx.agentConfig.Environment(), + Model: ctx.agentConfig.Model(), // TODO: remove the state addresses here and test when api only. StateAddresses: result.StateAddresses, APIAddresses: result.APIAddresses, === modified file 'src/github.com/juju/juju/worker/deployer/simple_test.go' --- src/github.com/juju/juju/worker/deployer/simple_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/deployer/simple_test.go 2016-03-22 15:18:22 +0000 @@ -14,6 +14,8 @@ "github.com/juju/names" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/arch" + "github.com/juju/utils/series" gc "gopkg.in/check.v1" "github.com/juju/juju/agent" @@ -156,14 +158,19 @@ func (fix *SimpleToolsFixture) SetUp(c *gc.C, dataDir string) { fix.dataDir = dataDir fix.logDir = c.MkDir() - toolsDir := tools.SharedToolsDir(fix.dataDir, version.Current) + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + toolsDir := tools.SharedToolsDir(fix.dataDir, current) err := os.MkdirAll(toolsDir, 0755) c.Assert(err, jc.ErrorIsNil) jujudPath := filepath.Join(toolsDir, "jujud") err = ioutil.WriteFile(jujudPath, []byte(fakeJujud), 0755) c.Assert(err, jc.ErrorIsNil) toolsPath := filepath.Join(toolsDir, "downloaded-tools.txt") - testTools := coretools.Tools{Version: version.Current, URL: "http://testing.invalid/tools"} + testTools := coretools.Tools{Version: current, URL: "http://testing.invalid/tools"} data, err := json.Marshal(testTools) c.Assert(err, jc.ErrorIsNil) err = ioutil.WriteFile(toolsPath, data, 0644) @@ -315,8 +322,8 @@ return nil } -func (mock *mockConfig) Environment() names.EnvironTag { - return testing.EnvironmentTag +func (mock *mockConfig) Model() names.ModelTag { + return testing.ModelTag } func (mock *mockConfig) CACert() string { === added directory 'src/github.com/juju/juju/worker/discoverspaces' === added file 'src/github.com/juju/juju/worker/discoverspaces/discoverspaces.go' --- src/github.com/juju/juju/worker/discoverspaces/discoverspaces.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/discoverspaces/discoverspaces.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,239 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package discoverspaces + +import ( + "fmt" + "regexp" + "strings" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + "github.com/juju/utils/set" + "launchpad.net/tomb" + + "github.com/juju/juju/api/discoverspaces" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs" + "github.com/juju/juju/network" + "github.com/juju/juju/worker" +) + +var logger = loggo.GetLogger("juju.discoverspaces") + +type discoverspacesWorker struct { + api *discoverspaces.API + tomb tomb.Tomb + discoveringSpaces chan struct{} +} + +var dashPrefix = regexp.MustCompile("^-*") +var dashSuffix = regexp.MustCompile("-*$") +var multipleDashes = regexp.MustCompile("--+") + +func convertSpaceName(name string, existing set.Strings) string { + // First lower case and replace spaces with dashes. + name = strings.Replace(name, " ", "-", -1) + name = strings.ToLower(name) + // Replace any character that isn't in the set "-", "a-z", "0-9". + name = network.SpaceInvalidChars.ReplaceAllString(name, "") + // Get rid of any dashes at the start as that isn't valid. + name = dashPrefix.ReplaceAllString(name, "") + // And any at the end. + name = dashSuffix.ReplaceAllString(name, "") + // Repleace multiple dashes with a single dash. + name = multipleDashes.ReplaceAllString(name, "-") + // Special case of when the space name was only dashes or invalid + // characters! + if name == "" { + name = "empty" + } + // If this name is in use add a numerical suffix. + if existing.Contains(name) { + counter := 2 + for existing.Contains(name + fmt.Sprintf("-%d", counter)) { + counter += 1 + } + name = name + fmt.Sprintf("-%d", counter) + } + return name +} + +// NewWorker returns a worker +func NewWorker(api *discoverspaces.API) (worker.Worker, chan struct{}) { + dw := &discoverspacesWorker{ + api: api, + discoveringSpaces: make(chan struct{}), + } + go func() { + defer dw.tomb.Done() + dw.tomb.Kill(dw.loop()) + }() + return dw, dw.discoveringSpaces +} + +func (dw *discoverspacesWorker) Kill() { + dw.tomb.Kill(nil) +} + +func (dw *discoverspacesWorker) Wait() error { + return dw.tomb.Wait() +} + +func (dw *discoverspacesWorker) loop() (err error) { + ensureClosed := func() { + select { + case <-dw.discoveringSpaces: + // Already closed. + return + default: + close(dw.discoveringSpaces) + } + } + defer ensureClosed() + modelCfg, err := dw.api.ModelConfig() + if err != nil { + return err + } + model, err := environs.New(modelCfg) + if err != nil { + return err + } + networkingModel, ok := environs.SupportsNetworking(model) + + if ok { + err = dw.handleSubnets(networkingModel) + if err != nil { + return errors.Trace(err) + } + } + close(dw.discoveringSpaces) + + // TODO(mfoord): we'll have a watcher here checking if we need to + // update the spaces/subnets definition. + dying := dw.tomb.Dying() + for { + select { + case <-dying: + return nil + } + } + return nil +} + +func (dw *discoverspacesWorker) handleSubnets(env environs.NetworkingEnviron) error { + ok, err := env.SupportsSpaceDiscovery() + if err != nil { + return errors.Trace(err) + } + if !ok { + // Nothing to do. + return nil + } + providerSpaces, err := env.Spaces() + if err != nil { + return errors.Trace(err) + } + listSpacesResult, err := dw.api.ListSpaces() + if err != nil { + return errors.Trace(err) + } + + stateSubnets, err := dw.api.ListSubnets(params.SubnetsFilters{}) + if err != nil { + return errors.Trace(err) + } + stateSubnetIds := make(set.Strings) + for _, subnet := range stateSubnets.Results { + stateSubnetIds.Add(subnet.ProviderId) + } + stateSpaceMap := make(map[string]params.ProviderSpace) + spaceNames := make(set.Strings) + for _, space := range listSpacesResult.Results { + stateSpaceMap[space.ProviderId] = space + spaceNames.Add(space.Name) + } + + // TODO(mfoord): we need to delete spaces and subnets that no longer + // exist, so long as they're not in use. + for _, space := range providerSpaces { + // Check if the space is already in state, in which case we know + // its name. + stateSpace, ok := stateSpaceMap[string(space.ProviderId)] + var spaceTag names.SpaceTag + if ok { + spaceName := stateSpace.Name + if !names.IsValidSpace(spaceName) { + // Can only happen if an invalid name is stored + // in state. + logger.Errorf("space %q has an invalid name, ignoring", spaceName) + continue + + } + spaceTag = names.NewSpaceTag(spaceName) + + } else { + // The space is new, we need to create a valid name for it + // in state. + spaceName := string(space.ProviderId) + // Convert the name into a valid name that isn't already in + // use. + spaceName = convertSpaceName(spaceName, spaceNames) + spaceNames.Add(spaceName) + spaceTag = names.NewSpaceTag(spaceName) + // We need to create the space. + args := params.CreateSpacesParams{ + Spaces: []params.CreateSpaceParams{{ + Public: false, + SpaceTag: spaceTag.String(), + ProviderId: string(space.ProviderId), + }}} + result, err := dw.api.CreateSpaces(args) + if err != nil { + logger.Errorf("error creating space %v", err) + return errors.Trace(err) + } + if len(result.Results) != 1 { + return errors.Errorf("unexpected number of results from CreateSpaces, should be 1: %v", result) + } + if result.Results[0].Error != nil { + return errors.Errorf("error from CreateSpaces: %v", result.Results[0].Error) + } + } + // TODO(mfoord): currently no way of removing subnets, or + // changing the space they're in, so we can only add ones we + // don't already know about. + logger.Debugf("Created space %v with %v subnets", spaceTag.String(), len(space.Subnets)) + for _, subnet := range space.Subnets { + if stateSubnetIds.Contains(string(subnet.ProviderId)) { + continue + } + zones := subnet.AvailabilityZones + if len(zones) == 0 { + zones = []string{"default"} + } + args := params.AddSubnetsParams{ + Subnets: []params.AddSubnetParams{{ + SubnetProviderId: string(subnet.ProviderId), + SpaceTag: spaceTag.String(), + Zones: zones, + }}} + logger.Tracef("Adding subnet %v", subnet.CIDR) + result, err := dw.api.AddSubnets(args) + if err != nil { + logger.Errorf("invalid creating subnet %v", err) + return errors.Trace(err) + } + if len(result.Results) != 1 { + return errors.Errorf("unexpected number of results from AddSubnets, should be 1: %v", result) + } + if result.Results[0].Error != nil { + logger.Errorf("error creating subnet %v", result.Results[0].Error) + return errors.Errorf("error creating subnet %v", result.Results[0].Error) + } + } + } + return nil +} === added file 'src/github.com/juju/juju/worker/discoverspaces/export_test.go' --- src/github.com/juju/juju/worker/discoverspaces/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/discoverspaces/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,6 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package discoverspaces + +var ConvertSpaceName = convertSpaceName === added file 'src/github.com/juju/juju/worker/discoverspaces/package_test.go' --- src/github.com/juju/juju/worker/discoverspaces/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/discoverspaces/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package discoverspaces_test + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + testing.MgoTestPackage(t) +} === added file 'src/github.com/juju/juju/worker/discoverspaces/worker_test.go' --- src/github.com/juju/juju/worker/discoverspaces/worker_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/discoverspaces/worker_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,332 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package discoverspaces_test + +import ( + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/set" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api" + apidiscoverspaces "github.com/juju/juju/api/discoverspaces" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs" + "github.com/juju/juju/juju/testing" + "github.com/juju/juju/network" + "github.com/juju/juju/provider/common" + "github.com/juju/juju/provider/dummy" + "github.com/juju/juju/state" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/discoverspaces" +) + +type workerSuite struct { + testing.JujuConnSuite + + Worker worker.Worker + OpsChan chan dummy.Operation + + APIConnection api.Connection + API *apidiscoverspaces.API + spacesDiscovered chan struct{} +} + +var _ = gc.Suite(&workerSuite{}) + +func (s *workerSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + + // Unbreak dummy provider methods. + s.AssertConfigParameterUpdated(c, "broken", "") + + s.APIConnection, _ = s.OpenAPIAsNewMachine(c, state.JobManageModel) + s.API = s.APIConnection.DiscoverSpaces() + + s.OpsChan = make(chan dummy.Operation, 10) + dummy.Listen(s.OpsChan) + s.spacesDiscovered = nil +} + +func (s *workerSuite) startWorker() { + s.Worker, s.spacesDiscovered = discoverspaces.NewWorker(s.API) +} + +func (s *workerSuite) TearDownTest(c *gc.C) { + if s.Worker != nil { + c.Assert(worker.Stop(s.Worker), jc.ErrorIsNil) + } + s.JujuConnSuite.TearDownTest(c) +} + +func (s *workerSuite) TestConvertSpaceName(c *gc.C) { + empty := set.Strings{} + nameTests := []struct { + name string + existing set.Strings + expected string + }{ + {"foo", empty, "foo"}, + {"foo1", empty, "foo1"}, + {"Foo Thing", empty, "foo-thing"}, + {"foo^9*//++!!!!", empty, "foo9"}, + {"--Foo", empty, "foo"}, + {"---^^&*()!", empty, "empty"}, + {" ", empty, "empty"}, + {"", empty, "empty"}, + {"foo\u2318", empty, "foo"}, + {"foo--", empty, "foo"}, + {"-foo--foo----bar-", empty, "foo-foo-bar"}, + {"foo-", set.NewStrings("foo", "bar", "baz"), "foo-2"}, + {"foo", set.NewStrings("foo", "foo-2"), "foo-3"}, + {"---", set.NewStrings("empty"), "empty-2"}, + } + for _, test := range nameTests { + result := discoverspaces.ConvertSpaceName(test.name, test.existing) + c.Check(result, gc.Equals, test.expected) + } +} + +func (s *workerSuite) TestWorkerIsStringsWorker(c *gc.C) { + s.startWorker() + c.Assert(s.Worker, gc.Not(gc.FitsTypeOf), worker.FinishedWorker{}) +} + +func (s *workerSuite) assertSpaceDiscoveryCompleted(c *gc.C) { + c.Assert(s.spacesDiscovered, gc.NotNil) + select { + case <-s.spacesDiscovered: + // The channel was closed as it should be + return + default: + c.Fatalf("Space discovery channel not closed") + } +} + +func (s *workerSuite) TestWorkerSupportsNetworkingFalse(c *gc.C) { + // We set SupportsSpaceDiscovery to true so that spaces *would* be + // discovered if networking was supported. So we know that if they're + // discovered it must be because networking is not supported. + dummy.SetSupportsSpaceDiscovery(true) + noNetworking := func(environs.Environ) (environs.NetworkingEnviron, bool) { + return nil, false + } + s.PatchValue(&environs.SupportsNetworking, noNetworking) + s.startWorker() + + // No spaces will have been created, worker does nothing. + for a := common.ShortAttempt.Start(); a.Next(); { + spaces, err := s.State.AllSpaces() + c.Assert(err, jc.ErrorIsNil) + if len(spaces) != 0 { + c.Fatalf("spaces should not be created, we have %v", len(spaces)) + } + if !a.HasNext() { + break + } + } + s.assertSpaceDiscoveryCompleted(c) +} + +func (s *workerSuite) TestWorkerSupportsSpaceDiscoveryFalse(c *gc.C) { + s.startWorker() + + // No spaces will have been created, worker does nothing. + for a := common.ShortAttempt.Start(); a.Next(); { + spaces, err := s.State.AllSpaces() + c.Assert(err, jc.ErrorIsNil) + if len(spaces) != 0 { + c.Fatalf("spaces should not be created, we have %v", len(spaces)) + } + if !a.HasNext() { + break + } + } + s.assertSpaceDiscoveryCompleted(c) +} + +func (s *workerSuite) TestWorkerDiscoversSpaces(c *gc.C) { + dummy.SetSupportsSpaceDiscovery(true) + s.startWorker() + for a := common.ShortAttempt.Start(); a.Next(); { + var found bool + select { + case <-s.spacesDiscovered: + // The channel was closed so discovery has completed. + found = true + } + if found { + break + } + if !a.HasNext() { + c.Fatalf("discovery not completed") + } + } + + spaces, err := s.State.AllSpaces() + c.Assert(err, jc.ErrorIsNil) + c.Assert(spaces, gc.HasLen, 4) + expectedSpaces := []network.SpaceInfo{{ + Name: "foo", + ProviderId: network.Id("foo"), + Subnets: []network.SubnetInfo{{ + ProviderId: network.Id("1"), + CIDR: "192.168.1.0/24", + AvailabilityZones: []string{"zone1"}, + }, { + ProviderId: network.Id("2"), + CIDR: "192.168.2.0/24", + AvailabilityZones: []string{"zone1"}, + }}}, { + Name: "another-foo-99", + ProviderId: network.Id("Another Foo 99!"), + Subnets: []network.SubnetInfo{{ + ProviderId: network.Id("3"), + CIDR: "192.168.3.0/24", + AvailabilityZones: []string{"zone1"}, + }}}, { + Name: "foo-2", + ProviderId: network.Id("foo-"), + Subnets: []network.SubnetInfo{{ + ProviderId: network.Id("4"), + CIDR: "192.168.4.0/24", + AvailabilityZones: []string{"zone1"}, + }}}, { + Name: "empty", + ProviderId: network.Id("---"), + Subnets: []network.SubnetInfo{{ + ProviderId: network.Id("5"), + CIDR: "192.168.5.0/24", + AvailabilityZones: []string{"zone1"}, + }}}} + expectedSpaceMap := make(map[string]network.SpaceInfo) + for _, space := range expectedSpaces { + expectedSpaceMap[space.Name] = space + } + for _, space := range spaces { + expected, ok := expectedSpaceMap[space.Name()] + if !c.Check(ok, jc.IsTrue) { + continue + } + c.Check(space.ProviderId(), gc.Equals, expected.ProviderId) + subnets, err := space.Subnets() + if !c.Check(err, jc.ErrorIsNil) { + continue + } + if !c.Check(len(subnets), gc.Equals, len(expected.Subnets)) { + continue + } + for i, subnet := range subnets { + expectedSubnet := expected.Subnets[i] + c.Check(subnet.ProviderId(), gc.Equals, expectedSubnet.ProviderId) + c.Check([]string{subnet.AvailabilityZone()}, jc.DeepEquals, expectedSubnet.AvailabilityZones) + c.Check(subnet.CIDR(), gc.Equals, expectedSubnet.CIDR) + } + } + s.assertSpaceDiscoveryCompleted(c) +} + +func (s *workerSuite) TestWorkerIdempotent(c *gc.C) { + dummy.SetSupportsSpaceDiscovery(true) + s.startWorker() + var err error + var spaces []*state.Space + for a := common.ShortAttempt.Start(); a.Next(); { + spaces, err = s.State.AllSpaces() + if err != nil { + break + } + if len(spaces) == 4 { + // All spaces have been created. + break + } + if !a.HasNext() { + c.Fatalf("spaces not imported") + } + } + c.Assert(err, jc.ErrorIsNil) + newWorker, _ := discoverspaces.NewWorker(s.API) + + // This ensures that the worker can handle re-importing without error. + defer func() { + c.Assert(worker.Stop(newWorker), jc.ErrorIsNil) + }() + + // Check that no extra spaces are imported. + for a := common.ShortAttempt.Start(); a.Next(); { + spaces, err = s.State.AllSpaces() + if err != nil { + break + } + if len(spaces) != 4 { + c.Fatalf("unexpected number of spaces: %v", len(spaces)) + } + if !a.HasNext() { + break + } + } +} + +func (s *workerSuite) TestSupportsSpaceDiscoveryBroken(c *gc.C) { + s.AssertConfigParameterUpdated(c, "broken", "SupportsSpaceDiscovery") + + newWorker, spacesDiscovered := discoverspaces.NewWorker(s.API) + s.spacesDiscovered = spacesDiscovered + err := worker.Stop(newWorker) + c.Assert(err, gc.ErrorMatches, "dummy.SupportsSpaceDiscovery is broken") + s.assertSpaceDiscoveryCompleted(c) +} + +func (s *workerSuite) TestSpacesBroken(c *gc.C) { + dummy.SetSupportsSpaceDiscovery(true) + s.AssertConfigParameterUpdated(c, "broken", "Spaces") + + newWorker, spacesDiscovered := discoverspaces.NewWorker(s.API) + s.spacesDiscovered = spacesDiscovered + err := worker.Stop(newWorker) + c.Assert(err, gc.ErrorMatches, "dummy.Spaces is broken") + s.assertSpaceDiscoveryCompleted(c) +} + +func (s *workerSuite) TestWorkerIgnoresExistingSpacesAndSubnets(c *gc.C) { + dummy.SetSupportsSpaceDiscovery(true) + spaceTag := names.NewSpaceTag("foo") + args := params.CreateSpacesParams{ + Spaces: []params.CreateSpaceParams{{ + Public: false, + SpaceTag: spaceTag.String(), + ProviderId: "foo", + }}} + result, err := s.API.CreateSpaces(args) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.Results, gc.HasLen, 1) + c.Assert(result.Results[0].Error, gc.IsNil) + + subnetArgs := params.AddSubnetsParams{ + Subnets: []params.AddSubnetParams{{ + SubnetProviderId: "1", + SpaceTag: spaceTag.String(), + Zones: []string{"zone1"}, + }}} + subnetResult, err := s.API.AddSubnets(subnetArgs) + c.Assert(err, jc.ErrorIsNil) + c.Assert(subnetResult.Results, gc.HasLen, 1) + c.Assert(subnetResult.Results[0].Error, gc.IsNil) + + s.startWorker() + for a := common.ShortAttempt.Start(); a.Next(); { + spaces, err := s.State.AllSpaces() + if err != nil { + break + } + if len(spaces) == 4 { + // All spaces have been created. + break + } + if !a.HasNext() { + c.Fatalf("spaces not imported") + } + } + c.Assert(err, jc.ErrorIsNil) +} === modified file 'src/github.com/juju/juju/worker/diskmanager/diskmanager.go' --- src/github.com/juju/juju/worker/diskmanager/diskmanager.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/diskmanager/diskmanager.go 2016-03-22 15:18:22 +0000 @@ -41,7 +41,7 @@ // NewWorker returns a worker that lists block devices // attached to the machine, and records them in state. -func NewWorker(l ListBlockDevicesFunc, b BlockDeviceSetter) worker.Worker { +var NewWorker = func(l ListBlockDevicesFunc, b BlockDeviceSetter) worker.Worker { var old []storage.BlockDevice f := func(stop <-chan struct{}) error { return doWork(l, b, &old) === modified file 'src/github.com/juju/juju/worker/diskmanager/diskmanager_unsupported.go' --- src/github.com/juju/juju/worker/diskmanager/diskmanager_unsupported.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/diskmanager/diskmanager_unsupported.go 2016-03-22 15:18:22 +0000 @@ -6,8 +6,9 @@ package diskmanager import ( + "runtime" + "github.com/juju/juju/storage" - "github.com/juju/juju/version" ) var blockDeviceInUse = func(storage.BlockDevice) (bool, error) { @@ -22,7 +23,7 @@ func init() { logger.Infof( "block device support has not been implemented for %s", - version.Current.OS, + runtime.GOOS, ) DefaultListBlockDevices = listBlockDevices } === modified file 'src/github.com/juju/juju/worker/diskmanager/export_test.go' --- src/github.com/juju/juju/worker/diskmanager/export_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/diskmanager/export_test.go 2016-03-22 15:18:22 +0000 @@ -7,4 +7,5 @@ ListBlockDevices = listBlockDevices BlockDeviceInUse = &blockDeviceInUse DoWork = doWork + NewWorkerFunc = newWorker ) === added file 'src/github.com/juju/juju/worker/diskmanager/manifold.go' --- src/github.com/juju/juju/worker/diskmanager/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/diskmanager/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,38 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package diskmanager + +import ( + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/agent" + "github.com/juju/juju/api/base" + apidiskmanager "github.com/juju/juju/api/diskmanager" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/util" +) + +// ManifoldConfig defines the names of the manifolds on which a Manifold will depend. +type ManifoldConfig util.PostUpgradeManifoldConfig + +// Manifold returns a dependency manifold that runs a diskmanager worker, +// using the resource names defined in the supplied config. +func Manifold(config ManifoldConfig) dependency.Manifold { + return util.PostUpgradeManifold(util.PostUpgradeManifoldConfig(config), newWorker) +} + +// newWorker trivially wraps NewWorker for use in a util.PostUpgradeManifold. +func newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { + t := a.CurrentConfig().Tag() + tag, ok := t.(names.MachineTag) + if !ok { + return nil, errors.Errorf("expected MachineTag, got %#v", t) + } + + api := apidiskmanager.NewState(apiCaller, tag) + + return NewWorker(DefaultListBlockDevices, api), nil +} === added file 'src/github.com/juju/juju/worker/diskmanager/manifold_test.go' --- src/github.com/juju/juju/worker/diskmanager/manifold_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/diskmanager/manifold_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,88 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package diskmanager_test + +import ( + apidiskmanager "github.com/juju/juju/api/diskmanager" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/agent" + basetesting "github.com/juju/juju/api/base/testing" + "github.com/juju/juju/state/multiwatcher" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/diskmanager" +) + +type manifoldSuite struct { + coretesting.BaseSuite +} + +var _ = gc.Suite(&manifoldSuite{}) + +func (s *manifoldSuite) TestMachineDiskmanager(c *gc.C) { + + called := false + + apiCaller := basetesting.APICallerFunc( + func(objType string, + version int, + id, request string, + a, response interface{}, + ) error { + + // We don't test the api call. We test that NewWorker is + // passed the expected arguments. + return nil + }) + + s.PatchValue(&diskmanager.NewWorker, func(l diskmanager.ListBlockDevicesFunc, b diskmanager.BlockDeviceSetter) worker.Worker { + called = true + + c.Assert(l, gc.FitsTypeOf, diskmanager.DefaultListBlockDevices) + c.Assert(b, gc.NotNil) + + api, ok := b.(*apidiskmanager.State) + c.Assert(ok, jc.IsTrue) + c.Assert(api, gc.NotNil) + + return nil + }) + + a := &dummyAgent{ + tag: names.NewMachineTag("1"), + jobs: []multiwatcher.MachineJob{ + multiwatcher.JobManageModel, + }, + } + + _, err := diskmanager.NewWorkerFunc(a, apiCaller) + c.Assert(err, jc.ErrorIsNil) + c.Assert(called, jc.IsTrue) +} + +type dummyAgent struct { + agent.Agent + tag names.Tag + jobs []multiwatcher.MachineJob +} + +func (a dummyAgent) CurrentConfig() agent.Config { + return dummyCfg{ + tag: a.tag, + jobs: a.jobs, + } +} + +type dummyCfg struct { + agent.Config + tag names.Tag + jobs []multiwatcher.MachineJob +} + +func (c dummyCfg) Tag() names.Tag { + return c.tag +} === added directory 'src/github.com/juju/juju/worker/environ' === removed file 'src/github.com/juju/juju/worker/environ.go' --- src/github.com/juju/juju/worker/environ.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/environ.go 1970-01-01 00:00:00 +0000 @@ -1,149 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package worker - -import ( - "sync" - - "github.com/juju/errors" - "github.com/juju/loggo" - "launchpad.net/tomb" - - apiwatcher "github.com/juju/juju/api/watcher" - "github.com/juju/juju/environs" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/state/watcher" -) - -var ErrTerminateAgent = errors.New("agent should be terminated") -var ErrRebootMachine = errors.New("machine needs to reboot") -var ErrShutdownMachine = errors.New("machine needs to shutdown") - -var loadedInvalid = func() {} - -var logger = loggo.GetLogger("juju.worker") - -// EnvironConfigGetter interface defines a way to read the environment -// configuration. -type EnvironConfigGetter interface { - EnvironConfig() (*config.Config, error) -} - -// TODO(rog) remove WaitForEnviron, as we now should always -// start with a valid environ config. - -// WaitForEnviron waits for an valid environment to arrive from -// the given watcher. It terminates with tomb.ErrDying if -// it receives a value on dying. -func WaitForEnviron(w apiwatcher.NotifyWatcher, st EnvironConfigGetter, dying <-chan struct{}) (environs.Environ, error) { - for { - select { - case <-dying: - return nil, tomb.ErrDying - case _, ok := <-w.Changes(): - if !ok { - return nil, watcher.EnsureErr(w) - } - config, err := st.EnvironConfig() - if err != nil { - return nil, err - } - environ, err := environs.New(config) - if err == nil { - return environ, nil - } - logger.Errorf("loaded invalid environment configuration: %v", err) - loadedInvalid() - } - } -} - -// EnvironConfigObserver interface defines a way to read the -// environment configuration and watch for changes. -type EnvironConfigObserver interface { - EnvironConfigGetter - WatchForEnvironConfigChanges() (apiwatcher.NotifyWatcher, error) -} - -// EnvironObserver watches the current environment configuration -// and makes it available. It discards invalid environment -// configurations. -type EnvironObserver struct { - tomb tomb.Tomb - environWatcher apiwatcher.NotifyWatcher - st EnvironConfigObserver - mu sync.Mutex - environ environs.Environ -} - -// NewEnvironObserver waits for the environment to have a valid -// environment configuration and returns a new environment observer. -// While waiting for the first environment configuration, it will -// return with tomb.ErrDying if it receives a value on dying. -func NewEnvironObserver(st EnvironConfigObserver) (*EnvironObserver, error) { - config, err := st.EnvironConfig() - if err != nil { - return nil, err - } - environ, err := environs.New(config) - if err != nil { - return nil, errors.Annotate(err, "cannot create an environment") - } - environWatcher, err := st.WatchForEnvironConfigChanges() - if err != nil { - return nil, errors.Annotate(err, "cannot watch environment config") - } - obs := &EnvironObserver{ - st: st, - environ: environ, - environWatcher: environWatcher, - } - go func() { - defer obs.tomb.Done() - defer watcher.Stop(environWatcher, &obs.tomb) - obs.tomb.Kill(obs.loop()) - }() - return obs, nil -} - -func (obs *EnvironObserver) loop() error { - for { - select { - case <-obs.tomb.Dying(): - return nil - case _, ok := <-obs.environWatcher.Changes(): - if !ok { - return watcher.EnsureErr(obs.environWatcher) - } - } - config, err := obs.st.EnvironConfig() - if err != nil { - logger.Warningf("error reading environment config: %v", err) - continue - } - environ, err := environs.New(config) - if err != nil { - logger.Warningf("error creating an environment: %v", err) - continue - } - obs.mu.Lock() - obs.environ = environ - obs.mu.Unlock() - } -} - -// Environ returns the most recent valid Environ. -func (obs *EnvironObserver) Environ() environs.Environ { - obs.mu.Lock() - defer obs.mu.Unlock() - return obs.environ -} - -func (obs *EnvironObserver) Kill() { - obs.tomb.Kill(nil) -} - -func (obs *EnvironObserver) Wait() error { - return obs.tomb.Wait() -} === added file 'src/github.com/juju/juju/worker/environ/environ.go' --- src/github.com/juju/juju/worker/environ/environ.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/environ/environ.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,127 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package environ + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/watcher" + "github.com/juju/juju/worker/catacomb" +) + +// ConfigGetter exposes a model configuration to its clients. +type ConfigGetter interface { + ModelConfig() (*config.Config, error) +} + +// ConfigObserver exposes a model configuration and a watch constructor +// that allows clients to be informed of changes to the configuration. +type ConfigObserver interface { + ConfigGetter + WatchForModelConfigChanges() (watcher.NotifyWatcher, error) +} + +// Config describes the dependencies of a Tracker. +// +// It's arguable that it should be called TrackerConfig, because of the heavy +// use of model config in this package. +type Config struct { + Observer ConfigObserver +} + +// Validate returns an error if the config cannot be used to start a Tracker. +func (config Config) Validate() error { + if config.Observer == nil { + return errors.NotValidf("nil Observer") + } + return nil +} + +// Tracker loads an environment, makes it available to clients, and updates +// the environment in response to config changes until it is killed. +type Tracker struct { + config Config + catacomb catacomb.Catacomb + environ environs.Environ +} + +// NewTracker loads an environment from the observer and returns a new Tracker, +// or an error if anything goes wrong. If a tracker is returned, its Environ() +// method is immediately usable. +// +// The caller is responsible for Kill()ing the returned Tracker and Wait()ing +// for any errors it might return. +func NewTracker(config Config) (*Tracker, error) { + if err := config.Validate(); err != nil { + return nil, errors.Trace(err) + } + modelConfig, err := config.Observer.ModelConfig() + if err != nil { + return nil, errors.Annotate(err, "cannot read environ config") + } + environ, err := environs.New(modelConfig) + if err != nil { + return nil, errors.Annotate(err, "cannot create environ") + } + + t := &Tracker{ + config: config, + environ: environ, + } + err = catacomb.Invoke(catacomb.Plan{ + Site: &t.catacomb, + Work: t.loop, + }) + if err != nil { + return nil, errors.Trace(err) + } + return t, nil +} + +// Environ returns the encapsulated Environ. It will continue to be updated in +// the background for as long as the Tracker continues to run. +func (t *Tracker) Environ() environs.Environ { + return t.environ +} + +func (t *Tracker) loop() error { + environWatcher, err := t.config.Observer.WatchForModelConfigChanges() + if err != nil { + return errors.Annotate(err, "cannot watch environ config") + } + if err := t.catacomb.Add(environWatcher); err != nil { + return errors.Trace(err) + } + for { + logger.Debugf("waiting for environ watch notification") + select { + case <-t.catacomb.Dying(): + return t.catacomb.ErrDying() + case _, ok := <-environWatcher.Changes(): + if !ok { + return errors.New("environ config watch closed") + } + } + logger.Debugf("reloading environ config") + modelConfig, err := t.config.Observer.ModelConfig() + if err != nil { + return errors.Annotate(err, "cannot read environ config") + } + if err = t.environ.SetConfig(modelConfig); err != nil { + return errors.Annotate(err, "cannot update environ config") + } + } +} + +// Kill is part of the worker.Worker interface. +func (t *Tracker) Kill() { + t.catacomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (t *Tracker) Wait() error { + return t.catacomb.Wait() +} === added file 'src/github.com/juju/juju/worker/environ/environ_test.go' --- src/github.com/juju/juju/worker/environ/environ_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/environ/environ_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,205 @@ +// Copyright 2012, 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package environ_test + +import ( + "time" + + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/environ" + "github.com/juju/juju/worker/workertest" +) + +type TrackerSuite struct { + coretesting.BaseSuite +} + +var _ = gc.Suite(&TrackerSuite{}) + +func (s *TrackerSuite) TestValidateObserver(c *gc.C) { + config := environ.Config{} + check := func(err error) { + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, "nil Observer not valid") + } + + err := config.Validate() + check(err) + + tracker, err := environ.NewTracker(config) + c.Check(tracker, gc.IsNil) + check(err) +} + +func (s *TrackerSuite) TestModelConfigFails(c *gc.C) { + fix := &fixture{ + observerErrs: []error{ + errors.New("no yuo"), + }, + } + fix.Run(c, func(context *runContext) { + tracker, err := environ.NewTracker(environ.Config{ + Observer: context, + }) + c.Check(err, gc.ErrorMatches, "cannot read environ config: no yuo") + c.Check(tracker, gc.IsNil) + context.CheckCallNames(c, "ModelConfig") + }) + +} + +func (s *TrackerSuite) TestModelConfigInvalid(c *gc.C) { + fix := &fixture{ + initialConfig: coretesting.Attrs{ + "type": "unknown", + }, + } + fix.Run(c, func(context *runContext) { + tracker, err := environ.NewTracker(environ.Config{ + Observer: context, + }) + c.Check(err, gc.ErrorMatches, `cannot create environ: no registered provider for "unknown"`) + c.Check(tracker, gc.IsNil) + context.CheckCallNames(c, "ModelConfig") + }) + +} + +func (s *TrackerSuite) TestModelConfigValid(c *gc.C) { + fix := &fixture{ + initialConfig: coretesting.Attrs{ + "name": "this-particular-name", + }, + } + fix.Run(c, func(context *runContext) { + tracker, err := environ.NewTracker(environ.Config{ + Observer: context, + }) + c.Assert(err, jc.ErrorIsNil) + defer workertest.CleanKill(c, tracker) + + gotEnviron := tracker.Environ() + c.Assert(gotEnviron, gc.NotNil) + c.Check(gotEnviron.Config().Name(), gc.Equals, "this-particular-name") + }) +} + +func (s *TrackerSuite) TestWatchFails(c *gc.C) { + fix := &fixture{ + observerErrs: []error{ + nil, errors.New("grrk splat"), + }, + } + fix.Run(c, func(context *runContext) { + tracker, err := environ.NewTracker(environ.Config{ + Observer: context, + }) + c.Assert(err, jc.ErrorIsNil) + defer workertest.DirtyKill(c, tracker) + + err = workertest.CheckKilled(c, tracker) + c.Check(err, gc.ErrorMatches, "cannot watch environ config: grrk splat") + context.CheckCallNames(c, "ModelConfig", "WatchForModelConfigChanges") + }) +} + +func (s *TrackerSuite) TestWatchCloses(c *gc.C) { + fix := &fixture{} + fix.Run(c, func(context *runContext) { + tracker, err := environ.NewTracker(environ.Config{ + Observer: context, + }) + c.Assert(err, jc.ErrorIsNil) + defer workertest.DirtyKill(c, tracker) + + context.CloseNotify() + err = workertest.CheckKilled(c, tracker) + c.Check(err, gc.ErrorMatches, "environ config watch closed") + context.CheckCallNames(c, "ModelConfig", "WatchForModelConfigChanges") + }) +} + +func (s *TrackerSuite) TestWatchedModelConfigFails(c *gc.C) { + fix := &fixture{ + observerErrs: []error{ + nil, nil, errors.New("blam ouch"), + }, + } + fix.Run(c, func(context *runContext) { + tracker, err := environ.NewTracker(environ.Config{ + Observer: context, + }) + c.Check(err, jc.ErrorIsNil) + defer workertest.DirtyKill(c, tracker) + + context.SendNotify() + err = workertest.CheckKilled(c, tracker) + c.Check(err, gc.ErrorMatches, "cannot read environ config: blam ouch") + context.CheckCallNames(c, "ModelConfig", "WatchForModelConfigChanges", "ModelConfig") + }) +} + +func (s *TrackerSuite) TestWatchedModelConfigIncompatible(c *gc.C) { + fix := &fixture{ + initialConfig: coretesting.Attrs{ + "broken": "SetConfig", + }, + } + fix.Run(c, func(context *runContext) { + tracker, err := environ.NewTracker(environ.Config{ + Observer: context, + }) + c.Check(err, jc.ErrorIsNil) + defer workertest.DirtyKill(c, tracker) + + context.SendNotify() + err = workertest.CheckKilled(c, tracker) + c.Check(err, gc.ErrorMatches, "cannot update environ config: dummy.SetConfig is broken") + context.CheckCallNames(c, "ModelConfig", "WatchForModelConfigChanges", "ModelConfig") + }) +} + +func (s *TrackerSuite) TestWatchedModelConfigUpdates(c *gc.C) { + fix := &fixture{ + initialConfig: coretesting.Attrs{ + "name": "original-name", + }, + } + fix.Run(c, func(context *runContext) { + tracker, err := environ.NewTracker(environ.Config{ + Observer: context, + }) + c.Check(err, jc.ErrorIsNil) + defer workertest.CleanKill(c, tracker) + + context.SetConfig(c, coretesting.Attrs{ + "name": "updated-name", + }) + gotEnviron := tracker.Environ() + c.Assert(gotEnviron.Config().Name(), gc.Equals, "original-name") + + timeout := time.After(coretesting.LongWait) + attempt := time.After(0) + context.SendNotify() + for { + select { + case <-attempt: + name := gotEnviron.Config().Name() + if name == "original-name" { + attempt = time.After(coretesting.ShortWait) + continue + } + c.Check(name, gc.Equals, "updated-name") + case <-timeout: + c.Fatalf("timed out waiting for environ to be updated") + } + break + } + context.CheckCallNames(c, "ModelConfig", "WatchForModelConfigChanges", "ModelConfig") + }) +} === added file 'src/github.com/juju/juju/worker/environ/fixture_test.go' --- src/github.com/juju/juju/worker/environ/fixture_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/environ/fixture_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,125 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package environ_test + +import ( + "sync" + + "github.com/juju/testing" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/environs/config" + "github.com/juju/juju/provider/dummy" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/watcher" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/workertest" +) + +type fixture struct { + watcherErr error + observerErrs []error + initialConfig map[string]interface{} +} + +func (fix *fixture) Run(c *gc.C, test func(*runContext)) { + watcher := newNotifyWatcher(fix.watcherErr) + defer workertest.DirtyKill(c, watcher) + context := &runContext{ + config: newModelConfig(c, fix.initialConfig), + watcher: watcher, + } + context.stub.SetErrors(fix.observerErrs...) + test(context) +} + +type runContext struct { + mu sync.Mutex + stub testing.Stub + config map[string]interface{} + watcher *notifyWatcher +} + +// SetConfig updates the configuration returned by ModelConfig. +func (context *runContext) SetConfig(c *gc.C, extraAttrs coretesting.Attrs) { + context.mu.Lock() + defer context.mu.Unlock() + context.config = newModelConfig(c, extraAttrs) +} + +// ModelConfig is part of the environ.ConfigObserver interface. +func (context *runContext) ModelConfig() (*config.Config, error) { + context.mu.Lock() + defer context.mu.Unlock() + context.stub.AddCall("ModelConfig") + if err := context.stub.NextErr(); err != nil { + return nil, err + } + return config.New(config.NoDefaults, context.config) +} + +// KillNotify kills the watcher returned from WatchForModelConfigChanges with +// the error configured in the enclosing fixture. +func (context *runContext) KillNotify() { + context.watcher.Kill() +} + +// SendNotify sends a value on the channel used by WatchForModelConfigChanges +// results. +func (context *runContext) SendNotify() { + context.watcher.changes <- struct{}{} +} + +// CloseNotify closes the channel used by WatchForModelConfigChanges results. +func (context *runContext) CloseNotify() { + close(context.watcher.changes) +} + +// WatchForModelConfigChanges is part of the environ.ConfigObserver interface. +func (context *runContext) WatchForModelConfigChanges() (watcher.NotifyWatcher, error) { + context.mu.Lock() + defer context.mu.Unlock() + context.stub.AddCall("WatchForModelConfigChanges") + if err := context.stub.NextErr(); err != nil { + return nil, err + } + return context.watcher, nil +} + +func (context *runContext) CheckCallNames(c *gc.C, names ...string) { + context.mu.Lock() + defer context.mu.Unlock() + context.stub.CheckCallNames(c, names...) +} + +// newNotifyWatcher returns a watcher.NotifyWatcher that will fail with the +// supplied error when Kill()ed. +func newNotifyWatcher(err error) *notifyWatcher { + return ¬ifyWatcher{ + Worker: workertest.NewErrorWorker(err), + changes: make(chan struct{}, 1000), + } +} + +type notifyWatcher struct { + worker.Worker + changes chan struct{} +} + +// Changes is part of the watcher.NotifyWatcher interface. +func (w *notifyWatcher) Changes() watcher.NotifyChannel { + return w.changes +} + +// newModelConfig returns an environment config map with the supplied attrs +// (on top of some default set), or fails the test. +func newModelConfig(c *gc.C, extraAttrs coretesting.Attrs) map[string]interface{} { + attrs := dummy.SampleConfig() + attrs["broken"] = "" + attrs["state-id"] = "42" + for k, v := range extraAttrs { + attrs[k] = v + } + return coretesting.CustomModelConfig(c, attrs).AllAttrs() +} === added file 'src/github.com/juju/juju/worker/environ/manifold.go' --- src/github.com/juju/juju/worker/environ/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/environ/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,54 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package environ + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/api/agent" + "github.com/juju/juju/api/base" + "github.com/juju/juju/environs" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/util" +) + +// ManifoldConfig describes the resources used by a Tracker. +type ManifoldConfig util.ApiManifoldConfig + +// Manifold returns a Manifold that encapsulates a *Tracker and exposes it as +// an environs.Environ resource. +func Manifold(config ManifoldConfig) dependency.Manifold { + manifold := util.ApiManifold( + util.ApiManifoldConfig(config), + manifoldStart, + ) + manifold.Output = manifoldOutput + return manifold +} + +// manifoldStart creates a *Tracker given a base.APICaller. +func manifoldStart(apiCaller base.APICaller) (worker.Worker, error) { + w, err := NewTracker(Config{ + Observer: agent.NewState(apiCaller), + }) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil +} + +// manifoldOutput extracts an environs.Environ resource from a *Tracker. +func manifoldOutput(in worker.Worker, out interface{}) error { + inTracker, ok := in.(*Tracker) + if !ok { + return errors.Errorf("expected *environ.Tracker, got %T", in) + } + outEnviron, ok := out.(*environs.Environ) + if !ok { + return errors.Errorf("expected *environs.Environ, got %T", out) + } + *outEnviron = inTracker.Environ() + return nil +} === added file 'src/github.com/juju/juju/worker/environ/package_test.go' --- src/github.com/juju/juju/worker/environ/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/environ/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package environ_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/worker/environ/wait.go' --- src/github.com/juju/juju/worker/environ/wait.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/environ/wait.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,53 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package environ + +import ( + "github.com/juju/errors" + "github.com/juju/loggo" + + "github.com/juju/juju/environs" + "github.com/juju/juju/watcher" +) + +// TODO(fwereade) remove WaitForEnviron, use a manifold-managed Tracker to share +// a single environs.Environ among firewaller, instancepoller, provisioner. + +var logger = loggo.GetLogger("juju.worker.environ") + +// ErrWaitAborted is returned from WaitForEnviron when the wait is terminated by +// closing the abort chan. +var ErrWaitAborted = errors.New("environ wait aborted") + +// WaitForEnviron waits for an valid environment to arrive from the given +// watcher. It terminates with ErrWaitAborted if it receives a value on abort. +// +// In practice, it shouldn't wait at all: juju *should* never deliver invalid +// environ configs. Regardless, it should be considered deprecated; clients +// should prefer to access an Environ via a shared Tracker. +// +// It never takes responsibility for the supplied watcher; the client remains +// responsible for detecting and handling any watcher errors that may occur, +// whether this func succeeds or fails. +func WaitForEnviron(w watcher.NotifyWatcher, getter ConfigGetter, abort <-chan struct{}) (environs.Environ, error) { + for { + select { + case <-abort: + return nil, ErrWaitAborted + case _, ok := <-w.Changes(): + if !ok { + return nil, errors.New("environ config watch closed") + } + config, err := getter.ModelConfig() + if err != nil { + return nil, errors.Annotate(err, "cannot read environ config") + } + environ, err := environs.New(config) + if err == nil { + return environ, nil + } + logger.Errorf("loaded invalid environment configuration: %v", err) + } + } +} === added file 'src/github.com/juju/juju/worker/environ/wait_test.go' --- src/github.com/juju/juju/worker/environ/wait_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/environ/wait_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,135 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package environ_test + +import ( + "time" + + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/environ" + "github.com/juju/juju/worker/workertest" +) + +type WaitSuite struct { + coretesting.BaseSuite +} + +var _ = gc.Suite(&WaitSuite{}) + +func (s *WaitSuite) TestWaitAborted(c *gc.C) { + fix := &fixture{} + fix.Run(c, func(context *runContext) { + abort := make(chan struct{}) + done := make(chan struct{}) + go func() { + defer close(done) + env, err := environ.WaitForEnviron(context.watcher, nil, abort) + c.Check(env, gc.IsNil) + c.Check(err, gc.Equals, environ.ErrWaitAborted) + }() + + close(abort) + select { + case <-done: + case <-time.After(coretesting.LongWait): + c.Errorf("timed out waiting for abort") + } + workertest.CheckAlive(c, context.watcher) + }) +} + +func (s *WaitSuite) TestWatchClosed(c *gc.C) { + fix := &fixture{} + fix.Run(c, func(context *runContext) { + abort := make(chan struct{}) + defer close(abort) + + done := make(chan struct{}) + go func() { + defer close(done) + env, err := environ.WaitForEnviron(context.watcher, nil, abort) + c.Check(env, gc.IsNil) + c.Check(err, gc.ErrorMatches, "environ config watch closed") + }() + + context.CloseNotify() + select { + case <-done: + case <-time.After(coretesting.LongWait): + c.Errorf("timed out waiting for failure") + } + workertest.CheckAlive(c, context.watcher) + }) +} + +func (s *WaitSuite) TestConfigError(c *gc.C) { + fix := &fixture{ + observerErrs: []error{ + errors.New("biff zonk"), + }, + } + fix.Run(c, func(context *runContext) { + abort := make(chan struct{}) + defer close(abort) + + done := make(chan struct{}) + go func() { + defer close(done) + env, err := environ.WaitForEnviron(context.watcher, context, abort) + c.Check(env, gc.IsNil) + c.Check(err, gc.ErrorMatches, "cannot read environ config: biff zonk") + }() + + context.SendNotify() + select { + case <-done: + case <-time.After(coretesting.LongWait): + c.Errorf("timed out waiting for failure") + } + workertest.CheckAlive(c, context.watcher) + }) +} + +func (s *WaitSuite) TestIgnoresBadConfig(c *gc.C) { + fix := &fixture{ + initialConfig: coretesting.Attrs{ + "type": "unknown", + }, + } + fix.Run(c, func(context *runContext) { + abort := make(chan struct{}) + defer close(abort) + + done := make(chan struct{}) + go func() { + defer close(done) + env, err := environ.WaitForEnviron(context.watcher, context, abort) + if c.Check(err, jc.ErrorIsNil) { + c.Check(env.Config().Name(), gc.Equals, "expected-name") + } + }() + + context.SendNotify() + select { + case <-time.After(coretesting.ShortWait): + case <-done: + c.Errorf("completed unexpectedly") + } + + context.SetConfig(c, coretesting.Attrs{ + "name": "expected-name", + }) + context.SendNotify() + select { + case <-done: + case <-time.After(coretesting.LongWait): + c.Errorf("timed out waiting for success") + } + workertest.CheckAlive(c, context.watcher) + }) +} === removed file 'src/github.com/juju/juju/worker/environ_test.go' --- src/github.com/juju/juju/worker/environ_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/environ_test.go 1970-01-01 00:00:00 +0000 @@ -1,262 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package worker_test - -import ( - "errors" - "strings" - "sync" - stdtesting "testing" - "time" - - "github.com/juju/loggo" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "launchpad.net/tomb" - - apiwatcher "github.com/juju/juju/api/watcher" - "github.com/juju/juju/environs" - "github.com/juju/juju/environs/config" - "github.com/juju/juju/provider/dummy" - coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/worker" -) - -func TestPackage(t *stdtesting.T) { - gc.TestingT(t) -} - -type environSuite struct { - coretesting.BaseSuite - - st *fakeState -} - -var _ = gc.Suite(&environSuite{}) - -func (s *environSuite) SetUpTest(c *gc.C) { - s.BaseSuite.SetUpTest(c) - s.st = &fakeState{ - Stub: &testing.Stub{}, - changes: make(chan struct{}, 100), - } -} - -func (s *environSuite) TestStop(c *gc.C) { - s.st.SetErrors( - nil, // WatchForEnvironConfigChanges - errors.New("err1"), // Changes (closing the channel) - ) - s.st.SetConfig(c, coretesting.Attrs{ - "type": "invalid", - }) - - w, err := s.st.WatchForEnvironConfigChanges() - c.Assert(err, jc.ErrorIsNil) - defer stopWatcher(c, w) - stop := make(chan struct{}) - close(stop) // close immediately so the loop exits. - done := make(chan error) - go func() { - env, err := worker.WaitForEnviron(w, s.st, stop) - c.Check(env, gc.IsNil) - done <- err - }() - select { - case <-worker.LoadedInvalid: - c.Errorf("expected changes watcher to be closed") - case err := <-done: - c.Assert(err, gc.Equals, tomb.ErrDying) - case <-time.After(coretesting.LongWait): - c.Fatalf("timeout waiting for the WaitForEnviron to stop") - } - s.st.CheckCallNames(c, "WatchForEnvironConfigChanges", "Changes") -} - -func stopWatcher(c *gc.C, w apiwatcher.NotifyWatcher) { - err := w.Stop() - c.Check(err, jc.ErrorIsNil) -} - -func (s *environSuite) TestInvalidConfig(c *gc.C) { - s.st.SetConfig(c, coretesting.Attrs{ - "type": "unknown", - }) - - w, err := s.st.WatchForEnvironConfigChanges() - c.Assert(err, jc.ErrorIsNil) - defer stopWatcher(c, w) - done := make(chan environs.Environ) - go func() { - env, err := worker.WaitForEnviron(w, s.st, nil) - c.Check(err, jc.ErrorIsNil) - done <- env - }() - <-worker.LoadedInvalid - s.st.CheckCallNames(c, - "WatchForEnvironConfigChanges", - "Changes", - "EnvironConfig", - "Changes", - ) -} - -func (s *environSuite) TestErrorWhenEnvironIsInvalid(c *gc.C) { - s.st.SetConfig(c, coretesting.Attrs{ - "type": "unknown", - }) - - obs, err := worker.NewEnvironObserver(s.st) - c.Assert(err, gc.ErrorMatches, - `cannot create an environment: no registered provider for "unknown"`, - ) - c.Assert(obs, gc.IsNil) - s.st.CheckCallNames(c, "EnvironConfig") -} - -func (s *environSuite) TestEnvironmentChanges(c *gc.C) { - s.st.SetConfig(c, nil) - - logc := make(logChan, 1009) - c.Assert(loggo.RegisterWriter("testing", logc, loggo.WARNING), gc.IsNil) - defer loggo.RemoveWriter("testing") - - obs, err := worker.NewEnvironObserver(s.st) - c.Assert(err, jc.ErrorIsNil) - - env := obs.Environ() - s.st.AssertConfig(c, env.Config()) - - // Change to an invalid configuration and check - // that the observer's environment remains the same. - originalConfig, err := s.st.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - - s.st.SetConfig(c, coretesting.Attrs{ - "type": "invalid", - }) - - // Wait for the observer to register the invalid environment -loop: - for { - select { - case msg := <-logc: - if strings.Contains(msg, "error creating an environment") { - break loop - } - case <-time.After(coretesting.LongWait): - c.Fatalf("timed out waiting to see broken environment") - } - } - // Check that the returned environ is still the same. - env = obs.Environ() - c.Assert(env.Config().AllAttrs(), jc.DeepEquals, originalConfig.AllAttrs()) - - // Change the environment back to a valid configuration - // with a different name and check that we see it. - s.st.SetConfig(c, coretesting.Attrs{ - "name": "a-new-name", - }) - - for a := coretesting.LongAttempt.Start(); a.Next(); { - env := obs.Environ() - if !a.HasNext() { - c.Fatalf("timed out waiting for new environ") - } - if env.Config().Name() == "a-new-name" { - break - } - } -} - -type logChan chan string - -func (logc logChan) Write(level loggo.Level, name, filename string, line int, timestamp time.Time, message string) { - logc <- message -} - -type fakeState struct { - *testing.Stub - apiwatcher.NotifyWatcher - - mu sync.Mutex - - changes chan struct{} - config map[string]interface{} -} - -var _ worker.EnvironConfigObserver = (*fakeState)(nil) - -// WatchForEnvironConfigChanges implements EnvironConfigObserver. -func (s *fakeState) WatchForEnvironConfigChanges() (apiwatcher.NotifyWatcher, error) { - s.MethodCall(s, "WatchForEnvironConfigChanges") - if err := s.NextErr(); err != nil { - return nil, err - } - return s, nil -} - -// EnvironConfig implements EnvironConfigObserver. -func (s *fakeState) EnvironConfig() (*config.Config, error) { - s.mu.Lock() - defer s.mu.Unlock() - - s.MethodCall(s, "EnvironConfig") - if err := s.NextErr(); err != nil { - return nil, err - } - return config.New(config.NoDefaults, s.config) -} - -// SetConfig changes the stored environment config with the given -// extraAttrs and triggers a change for the watcher. -func (s *fakeState) SetConfig(c *gc.C, extraAttrs coretesting.Attrs) { - s.mu.Lock() - defer s.mu.Unlock() - - attrs := dummy.SampleConfig() - for k, v := range extraAttrs { - attrs[k] = v - } - - // Simulate it's prepared. - attrs["broken"] = "" - attrs["state-id"] = "42" - - s.config = coretesting.CustomEnvironConfig(c, attrs).AllAttrs() - s.changes <- struct{}{} -} - -// Err implements apiwatcher.NotifyWatcher. -func (s *fakeState) Err() error { - s.MethodCall(s, "Err") - return s.NextErr() -} - -// Stop implements apiwatcher.NotifyWatcher. -func (s *fakeState) Stop() error { - s.MethodCall(s, "Stop") - return s.NextErr() -} - -// Changes implements apiwatcher.NotifyWatcher. -func (s *fakeState) Changes() <-chan struct{} { - s.mu.Lock() - defer s.mu.Unlock() - - s.MethodCall(s, "Changes") - if err := s.NextErr(); err != nil && s.changes != nil { - close(s.changes) // simulate the watcher died. - s.changes = nil - } - return s.changes -} - -func (s *fakeState) AssertConfig(c *gc.C, expected *config.Config) { - s.mu.Lock() - defer s.mu.Unlock() - - c.Assert(s.config, jc.DeepEquals, expected.AllAttrs()) -} === removed directory 'src/github.com/juju/juju/worker/envworkermanager' === removed file 'src/github.com/juju/juju/worker/envworkermanager/envworkermanager.go' --- src/github.com/juju/juju/worker/envworkermanager/envworkermanager.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/envworkermanager/envworkermanager.go 1970-01-01 00:00:00 +0000 @@ -1,153 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package envworkermanager - -import ( - "github.com/juju/errors" - "github.com/juju/loggo" - "github.com/juju/names" - "gopkg.in/mgo.v2" - "launchpad.net/tomb" - - cmdutil "github.com/juju/juju/cmd/jujud/util" - "github.com/juju/juju/state" - "github.com/juju/juju/worker" -) - -var logger = loggo.GetLogger("juju.worker.envworkermanager") - -// NewEnvWorkerManager returns a Worker which manages a worker which -// needs to run on a per environment basis. It takes a function which will -// be called to start a worker for a new environment. This worker -// will be killed when an environment goes away. -func NewEnvWorkerManager( - st InitialState, - startEnvWorker func(InitialState, *state.State) (worker.Worker, error), -) worker.Worker { - m := &envWorkerManager{ - st: st, - startEnvWorker: startEnvWorker, - } - m.runner = worker.NewRunner(cmdutil.IsFatal, cmdutil.MoreImportant) - go func() { - defer m.tomb.Done() - m.tomb.Kill(m.loop()) - }() - return m -} - -// InitialState defines the State functionality used by -// envWorkerManager and/or could be useful to startEnvWorker -// funcs. It mainly exists to support testing. -type InitialState interface { - WatchEnvironments() state.StringsWatcher - ForEnviron(names.EnvironTag) (*state.State, error) - GetEnvironment(names.EnvironTag) (*state.Environment, error) - EnvironUUID() string - Machine(string) (*state.Machine, error) - MongoSession() *mgo.Session -} - -type envWorkerManager struct { - runner worker.Runner - tomb tomb.Tomb - st InitialState - startEnvWorker func(InitialState, *state.State) (worker.Worker, error) -} - -// Kill satisfies the Worker interface. -func (m *envWorkerManager) Kill() { - m.tomb.Kill(nil) -} - -// Wait satisfies the Worker interface. -func (m *envWorkerManager) Wait() error { - return m.tomb.Wait() -} - -func (m *envWorkerManager) loop() error { - go func() { - // When the runner stops, make sure we stop the envWorker as well - m.tomb.Kill(m.runner.Wait()) - }() - defer func() { - // When we return, make sure that we kill - // the runner and wait for it. - m.runner.Kill() - m.tomb.Kill(m.runner.Wait()) - }() - w := m.st.WatchEnvironments() - defer w.Stop() - for { - select { - case uuids := <-w.Changes(): - // One or more environments have changed. - for _, uuid := range uuids { - if err := m.envHasChanged(uuid); err != nil { - return errors.Trace(err) - } - } - case <-m.tomb.Dying(): - return tomb.ErrDying - } - } -} - -func (m *envWorkerManager) envHasChanged(uuid string) error { - envTag := names.NewEnvironTag(uuid) - envAlive, err := m.isEnvAlive(envTag) - if err != nil { - return errors.Trace(err) - } - if envAlive { - err = m.envIsAlive(envTag) - } else { - err = m.envIsDead(envTag) - } - return errors.Trace(err) -} - -func (m *envWorkerManager) envIsAlive(envTag names.EnvironTag) error { - return m.runner.StartWorker(envTag.Id(), func() (worker.Worker, error) { - st, err := m.st.ForEnviron(envTag) - if err != nil { - return nil, errors.Annotatef(err, "failed to open state for environment %s", envTag.Id()) - } - closeState := func() { - err := st.Close() - if err != nil { - logger.Errorf("error closing state for env %s: %v", envTag.Id(), err) - } - } - - envRunner, err := m.startEnvWorker(m.st, st) - if err != nil { - closeState() - return nil, errors.Trace(err) - } - - // Close State when the runner for the environment is done. - go func() { - envRunner.Wait() - closeState() - }() - - return envRunner, nil - }) -} - -func (m *envWorkerManager) envIsDead(envTag names.EnvironTag) error { - err := m.runner.StopWorker(envTag.Id()) - return errors.Trace(err) -} - -func (m *envWorkerManager) isEnvAlive(tag names.EnvironTag) (bool, error) { - env, err := m.st.GetEnvironment(tag) - if errors.IsNotFound(err) { - return false, nil - } else if err != nil { - return false, errors.Annotatef(err, "error loading environment %s", tag.Id()) - } - return env.Life() == state.Alive, nil -} === removed file 'src/github.com/juju/juju/worker/envworkermanager/envworkermanager_test.go' --- src/github.com/juju/juju/worker/envworkermanager/envworkermanager_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/envworkermanager/envworkermanager_test.go 1970-01-01 00:00:00 +0000 @@ -1,402 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package envworkermanager_test - -import ( - stdtesting "testing" - "time" - - "github.com/juju/errors" - cmdutil "github.com/juju/juju/cmd/jujud/util" - "github.com/juju/loggo" - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "launchpad.net/tomb" - - "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" - "github.com/juju/juju/testing" - "github.com/juju/juju/testing/factory" - "github.com/juju/juju/worker" - "github.com/juju/juju/worker/envworkermanager" -) - -func TestPackage(t *stdtesting.T) { - testing.MgoTestPackage(t) -} - -var _ = gc.Suite(&suite{}) - -type suite struct { - statetesting.StateSuite - factory *factory.Factory - runnerC chan *fakeRunner - startErr error -} - -func (s *suite) SetUpTest(c *gc.C) { - s.StateSuite.SetUpTest(c) - s.factory = factory.NewFactory(s.State) - s.runnerC = make(chan *fakeRunner, 1) - s.startErr = nil -} - -func (s *suite) makeEnvironment(c *gc.C) *state.State { - st := s.factory.MakeEnvironment(c, nil) - s.AddCleanup(func(*gc.C) { st.Close() }) - return st -} - -func (s *suite) TestStartsWorkersForPreExistingEnvs(c *gc.C) { - moreState := s.makeEnvironment(c) - - var seenEnvs []string - m := envworkermanager.NewEnvWorkerManager(s.State, s.startEnvWorker) - defer m.Kill() - for _, r := range s.seeRunnersStart(c, 2) { - seenEnvs = append(seenEnvs, r.envUUID) - } - c.Assert(seenEnvs, jc.SameContents, - []string{s.State.EnvironUUID(), moreState.EnvironUUID()}, - ) -} - -func (s *suite) TestStartsWorkersForNewEnv(c *gc.C) { - m := envworkermanager.NewEnvWorkerManager(s.State, s.startEnvWorker) - defer m.Kill() - s.seeRunnersStart(c, 1) // Runner for state server env - - // Create another environment and watch a runner be created for it. - st2 := s.makeEnvironment(c) - runner := s.seeRunnersStart(c, 1)[0] - c.Assert(runner.envUUID, gc.Equals, st2.EnvironUUID()) -} - -func (s *suite) TestStopsWorkersWhenEnvGoesAway(c *gc.C) { - m := envworkermanager.NewEnvWorkerManager(s.State, s.startEnvWorker) - defer m.Kill() - runner0 := s.seeRunnersStart(c, 1)[0] - - // Create an environment and grab the runner for it. - otherState := s.makeEnvironment(c) - runner1 := s.seeRunnersStart(c, 1)[0] - - // Destroy the new environment. - env, err := otherState.Environment() - c.Assert(err, jc.ErrorIsNil) - err = env.Destroy() - c.Assert(err, jc.ErrorIsNil) - - // See that the first runner is still running but the runner for - // the new environment is stopped. - s.State.StartSync() - select { - case <-runner0.tomb.Dying(): - c.Fatal("first runner should not die here") - case <-runner1.tomb.Dying(): - break - case <-time.After(testing.LongWait): - c.Fatal("timed out waiting for runner to die") - } - - // Make sure the first runner doesn't get stopped. - s.State.StartSync() - select { - case <-runner0.tomb.Dying(): - c.Fatal("first runner should not die here") - case <-time.After(testing.ShortWait): - break - } -} - -func (s *suite) TestKillPropagates(c *gc.C) { - s.makeEnvironment(c) - - m := envworkermanager.NewEnvWorkerManager(s.State, s.startEnvWorker) - runners := s.seeRunnersStart(c, 2) - c.Assert(runners[0].killed, jc.IsFalse) - c.Assert(runners[1].killed, jc.IsFalse) - - m.Kill() - err := waitOrFatal(c, m.Wait) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(runners[0].killed, jc.IsTrue) - c.Assert(runners[1].killed, jc.IsTrue) -} - -// stateWithFailingGetEnvironment wraps a *state.State, overriding the -// GetEnvironment to generate an error. -type stateWithFailingGetEnvironment struct { - *stateWithFakeWatcher - shouldFail bool -} - -func newStateWithFailingGetEnvironment(realSt *state.State) *stateWithFailingGetEnvironment { - return &stateWithFailingGetEnvironment{ - stateWithFakeWatcher: newStateWithFakeWatcher(realSt), - shouldFail: false, - } -} -func (s *stateWithFailingGetEnvironment) GetEnvironment(tag names.EnvironTag) (*state.Environment, error) { - if s.shouldFail { - return nil, errors.New("unable to GetEnvironment") - } - return s.State.GetEnvironment(tag) -} - -func (s *suite) TestLoopExitKillsRunner(c *gc.C) { - // If something causes EnvWorkerManager.loop to exit that isn't Kill() then it should stop the runner. - // Currently the best way to cause this is to make - // m.st.GetEnvironment(tag) fail with any error other than NotFound - st := newStateWithFailingGetEnvironment(s.State) - uuid := st.EnvironUUID() - m := envworkermanager.NewEnvWorkerManager(st, s.startEnvWorker) - defer m.Kill() - - // First time: runners started - st.sendEnvChange(uuid) - runners := s.seeRunnersStart(c, 1) - c.Assert(runners[0].killed, jc.IsFalse) - - // Now we start failing - st.shouldFail = true - st.sendEnvChange(uuid) - - // This should kill the manager - err := waitOrFatal(c, m.Wait) - c.Assert(err, gc.ErrorMatches, "error loading environment .*: unable to GetEnvironment") - - // And that should kill all the runners - c.Assert(runners[0].killed, jc.IsTrue) -} - -func (s *suite) TestWorkerErrorIsPropagatedWhenKilled(c *gc.C) { - st := newStateWithFakeWatcher(s.State) - started := make(chan struct{}, 1) - m := envworkermanager.NewEnvWorkerManager(st, func(envworkermanager.InitialState, *state.State) (worker.Worker, error) { - c.Logf("starting worker") - started <- struct{}{} - return &errorWhenKilledWorker{ - err: &cmdutil.FatalError{"an error"}, - }, nil - }) - st.sendEnvChange(st.EnvironUUID()) - s.State.StartSync() - <-started - m.Kill() - err := m.Wait() - c.Assert(err, gc.ErrorMatches, "an error") -} - -type errorWhenKilledWorker struct { - tomb tomb.Tomb - err error -} - -var logger = loggo.GetLogger("juju.worker.envworkermanager") - -func (w *errorWhenKilledWorker) Kill() { - w.tomb.Kill(w.err) - logger.Errorf("errorWhenKilledWorker dying with error %v", w.err) - w.tomb.Done() -} - -func (w *errorWhenKilledWorker) Wait() error { - err := w.tomb.Wait() - logger.Errorf("errorWhenKilledWorker wait -> error %v", err) - return err -} - -func (s *suite) TestNothingHappensWhenEnvIsSeenAgain(c *gc.C) { - // This could happen if there's a change to an environment doc but - // it's otherwise still alive (unlikely but possible). - st := newStateWithFakeWatcher(s.State) - uuid := st.EnvironUUID() - - m := envworkermanager.NewEnvWorkerManager(st, s.startEnvWorker) - defer m.Kill() - - // First time: runners started - st.sendEnvChange(uuid) - s.seeRunnersStart(c, 1) - - // Second time: no runners started - st.sendEnvChange(uuid) - s.checkNoRunnersStart(c) -} - -func (s *suite) TestNothingHappensWhenUnknownEnvReported(c *gc.C) { - // This could perhaps happen when an environment is dying just as - // the EnvWorkerManager is coming up (unlikely but possible). - st := newStateWithFakeWatcher(s.State) - - m := envworkermanager.NewEnvWorkerManager(st, s.startEnvWorker) - defer m.Kill() - - st.sendEnvChange("unknown-env-uuid") - s.checkNoRunnersStart(c) - - // Existing environment still works. - st.sendEnvChange(st.EnvironUUID()) - s.seeRunnersStart(c, 1) -} - -func (s *suite) TestFatalErrorKillsEnvWorkerManager(c *gc.C) { - m := envworkermanager.NewEnvWorkerManager(s.State, s.startEnvWorker) - runner := s.seeRunnersStart(c, 1)[0] - - runner.tomb.Kill(worker.ErrTerminateAgent) - runner.tomb.Done() - - err := waitOrFatal(c, m.Wait) - c.Assert(errors.Cause(err), gc.Equals, worker.ErrTerminateAgent) -} - -func (s *suite) TestNonFatalErrorCausesRunnerRestart(c *gc.C) { - s.PatchValue(&worker.RestartDelay, time.Millisecond) - - m := envworkermanager.NewEnvWorkerManager(s.State, s.startEnvWorker) - defer m.Kill() - runner0 := s.seeRunnersStart(c, 1)[0] - - runner0.tomb.Kill(errors.New("trivial")) - runner0.tomb.Done() - - s.seeRunnersStart(c, 1) -} - -func (s *suite) TestStateIsClosedIfStartEnvWorkersFails(c *gc.C) { - // If State is not closed when startEnvWorker errors, MgoSuite's - // dirty socket detection will pick up the leaked socket and - // panic. - s.startErr = worker.ErrTerminateAgent // This will make envWorkerManager exit. - m := envworkermanager.NewEnvWorkerManager(s.State, s.startEnvWorker) - waitOrFatal(c, m.Wait) -} - -func (s *suite) seeRunnersStart(c *gc.C, expectedCount int) []*fakeRunner { - if expectedCount < 1 { - panic("expectedCount must be >= 1") - } - s.State.StartSync() - runners := make([]*fakeRunner, 0, expectedCount) - for { - select { - case r := <-s.runnerC: - c.Assert(r.ssEnvUUID, gc.Equals, s.State.EnvironUUID()) - - runners = append(runners, r) - if len(runners) == expectedCount { - s.checkNoRunnersStart(c) // Check no more runners start - return runners - } - case <-time.After(testing.LongWait): - c.Fatal("timed out waiting for runners to be started") - } - } -} - -func (s *suite) checkNoRunnersStart(c *gc.C) { - s.State.StartSync() - for { - select { - case <-s.runnerC: - c.Fatal("saw runner creation when expecting none") - case <-time.After(testing.ShortWait): - return - } - } -} - -// startEnvWorker is passed to NewEnvWorkerManager in these tests. It -// creates fake Runner instances when envWorkerManager starts workers -// for an environment. -func (s *suite) startEnvWorker(ssSt envworkermanager.InitialState, st *state.State) (worker.Worker, error) { - if s.startErr != nil { - return nil, s.startErr - } - runner := &fakeRunner{ - ssEnvUUID: ssSt.EnvironUUID(), - envUUID: st.EnvironUUID(), - } - s.runnerC <- runner - return runner, nil -} - -func waitOrFatal(c *gc.C, wait func() error) error { - errC := make(chan error) - go func() { - errC <- wait() - }() - - select { - case err := <-errC: - return err - case <-time.After(testing.LongWait): - c.Fatal("waited too long") - } - return nil -} - -// fakeRunner minimally implements the worker.Worker interface. It -// doesn't actually run anything, recording some execution details for -// testing. -type fakeRunner struct { - tomb tomb.Tomb - ssEnvUUID string - envUUID string - killed bool -} - -func (r *fakeRunner) Kill() { - r.killed = true - r.tomb.Done() -} - -func (r *fakeRunner) Wait() error { - return r.tomb.Wait() -} - -func newStateWithFakeWatcher(realSt *state.State) *stateWithFakeWatcher { - return &stateWithFakeWatcher{ - State: realSt, - envWatcher: &fakeEnvWatcher{ - changes: make(chan []string), - }, - } -} - -// stateWithFakeWatcher wraps a *state.State, overriding the -// WatchEnvironments method to allow control over the reported -// environment lifecycle events for testing. -// -// Use sendEnvChange to cause an environment event to be emitted by -// the watcher returned by WatchEnvironments. -type stateWithFakeWatcher struct { - *state.State - envWatcher *fakeEnvWatcher -} - -func (s *stateWithFakeWatcher) WatchEnvironments() state.StringsWatcher { - return s.envWatcher -} - -func (s *stateWithFakeWatcher) sendEnvChange(uuids ...string) { - s.envWatcher.changes <- uuids -} - -type fakeEnvWatcher struct { - state.StringsWatcher - changes chan []string -} - -func (w *fakeEnvWatcher) Stop() error { - return nil -} - -func (w *fakeEnvWatcher) Changes() <-chan []string { - return w.changes -} === added file 'src/github.com/juju/juju/worker/errors.go' --- src/github.com/juju/juju/worker/errors.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/errors.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,23 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package worker + +import ( + "github.com/juju/errors" +) + +// These errors are returned by various specific workers in the hope that they +// will have some specific effect on the top-level agent running that worker. +// +// It should be clear that they don't belong here, and certainly shouldn't be +// used as they are today: e.g. a uniter has *no fricking idea* whether its +// host agent should shut down. A uniter can return ErrUnitDead, and its host +// might need to respond to that, perhaps by returning an error specific to +// *its* host; depending on these values punching right through N layers (but +// only when we want them to!) is kinda terrible. +var ( + ErrTerminateAgent = errors.New("agent should be terminated") + ErrRebootMachine = errors.New("machine needs to reboot") + ErrShutdownMachine = errors.New("machine needs to shutdown") +) === removed file 'src/github.com/juju/juju/worker/export_test.go' --- src/github.com/juju/juju/worker/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,32 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package worker - -import ( - "github.com/juju/juju/state/watcher" -) - -var LoadedInvalid = make(chan struct{}) - -func init() { - loadedInvalid = func() { - LoadedInvalid <- struct{}{} - } -} - -func SetEnsureErr(f func(watcher.Errer) error) { - if f == nil { - ensureErr = watcher.EnsureErr - } else { - ensureErr = f - } -} - -func EnsureErr() func(watcher.Errer) error { - return ensureErr -} - -func ExtractWorkers(workers Workers) ([]string, map[string]func() (Worker, error)) { - return workers.ids, workers.funcs -} === modified file 'src/github.com/juju/juju/worker/firewaller/firewaller.go' --- src/github.com/juju/juju/worker/firewaller/firewaller.go 2015-06-05 17:40:37 +0000 +++ src/github.com/juju/juju/worker/firewaller/firewaller.go 2016-03-22 15:18:22 +0000 @@ -8,17 +8,17 @@ "github.com/juju/errors" "github.com/juju/names" - "launchpad.net/tomb" - apifirewaller "github.com/juju/juju/api/firewaller" - apiwatcher "github.com/juju/juju/api/watcher" + "github.com/juju/juju/api/firewaller" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/instance" "github.com/juju/juju/network" - "github.com/juju/juju/state/watcher" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" + "github.com/juju/juju/worker/catacomb" + "github.com/juju/juju/worker/environ" ) type machineRanges map[network.PortRange]bool @@ -27,12 +27,12 @@ // machines and reflects those changes onto the backing environment. // Uses Firewaller API V1. type Firewaller struct { - tomb tomb.Tomb - st *apifirewaller.State + catacomb catacomb.Catacomb + st *firewaller.State environ environs.Environ - environWatcher apiwatcher.NotifyWatcher - machinesWatcher apiwatcher.StringsWatcher - portsWatcher apiwatcher.StringsWatcher + modelWatcher watcher.NotifyWatcher + machinesWatcher watcher.StringsWatcher + portsWatcher watcher.StringsWatcher machineds map[names.MachineTag]*machineData unitsChange chan *unitsChange unitds map[names.UnitTag]*unitData @@ -45,7 +45,7 @@ // NewFirewaller returns a new Firewaller or a new FirewallerV0, // depending on what the API supports. -func NewFirewaller(st *apifirewaller.State) (_ worker.Worker, err error) { +func NewFirewaller(st *firewaller.State) (worker.Worker, error) { fw := &Firewaller{ st: st, machineds: make(map[names.MachineTag]*machineData), @@ -55,82 +55,99 @@ exposedChange: make(chan *exposedChange), machinePorts: make(map[names.MachineTag]machineRanges), } - defer func() { - if err != nil { - fw.stopWatchers() - } - }() - - fw.environWatcher, err = st.WatchForEnvironConfigChanges() - if err != nil { - return nil, err - } - - fw.machinesWatcher, err = st.WatchEnvironMachines() - if err != nil { - return nil, err - } - - fw.portsWatcher, err = st.WatchOpenedPorts() - if err != nil { - return nil, errors.Annotatef(err, "failed to start ports watcher") - } - logger.Debugf("started watching opened port ranges for the environment") + err := catacomb.Invoke(catacomb.Plan{ + Site: &fw.catacomb, + Work: fw.loop, + }) + if err != nil { + return nil, errors.Trace(err) + } + return fw, nil +} + +func (fw *Firewaller) setUp() error { + var err error + fw.modelWatcher, err = fw.st.WatchForModelConfigChanges() + if err != nil { + return errors.Trace(err) + } + if err := fw.catacomb.Add(fw.modelWatcher); err != nil { + return errors.Trace(err) + } // We won't "wait" actually, because the environ is already // available and has a guaranteed valid config, but until // WaitForEnviron goes away, this code needs to stay. - fw.environ, err = worker.WaitForEnviron(fw.environWatcher, fw.st, fw.tomb.Dying()) + fw.environ, err = environ.WaitForEnviron(fw.modelWatcher, fw.st, fw.catacomb.Dying()) if err != nil { - return nil, err + if err == environ.ErrWaitAborted { + return fw.catacomb.ErrDying() + } + return errors.Trace(err) } - switch fw.environ.Config().FirewallMode() { case config.FwGlobal: fw.globalMode = true fw.globalPortRef = make(map[network.PortRange]int) case config.FwNone: logger.Warningf("stopping firewaller - firewall-mode is %q", config.FwNone) - return nil, errors.Errorf("firewaller is disabled when firewall-mode is %q", config.FwNone) - } - - go func() { - defer fw.tomb.Done() - fw.tomb.Kill(fw.loop()) - }() - return fw, nil + // XXX(fwereade): shouldn't this be nil? Nothing wrong, nothing to do, + // now that we've logged there's no further reason to complain or retry. + return errors.Errorf("firewaller is disabled when firewall-mode is %q", config.FwNone) + } + + fw.machinesWatcher, err = fw.st.WatchModelMachines() + if err != nil { + return errors.Trace(err) + } + if err := fw.catacomb.Add(fw.machinesWatcher); err != nil { + return errors.Trace(err) + } + + fw.portsWatcher, err = fw.st.WatchOpenedPorts() + if err != nil { + return errors.Annotatef(err, "failed to start ports watcher") + } + if err := fw.catacomb.Add(fw.portsWatcher); err != nil { + return errors.Trace(err) + } + + logger.Debugf("started watching opened port ranges for the environment") + return nil } -var _ worker.Worker = (*Firewaller)(nil) - func (fw *Firewaller) loop() error { - defer fw.stopWatchers() - + if err := fw.setUp(); err != nil { + return errors.Trace(err) + } var reconciled bool - portsChange := fw.portsWatcher.Changes() for { select { - case <-fw.tomb.Dying(): - return tomb.ErrDying - case _, ok := <-fw.environWatcher.Changes(): + case <-fw.catacomb.Dying(): + return fw.catacomb.ErrDying() + case _, ok := <-fw.modelWatcher.Changes(): logger.Debugf("got environ config changes") if !ok { - return watcher.EnsureErr(fw.environWatcher) + return errors.New("environment configuration watcher closed") } - config, err := fw.st.EnvironConfig() + config, err := fw.st.ModelConfig() if err != nil { - return err + return errors.Trace(err) } if err := fw.environ.SetConfig(config); err != nil { + // XXX(fwereade): surely this is an error? probably moot, will + // hopefully be replaced with EnvironObserver. logger.Errorf("loaded invalid environment configuration: %v", err) } case change, ok := <-fw.machinesWatcher.Changes(): if !ok { - return watcher.EnsureErr(fw.machinesWatcher) + return errors.New("machines watcher closed") } for _, machineId := range change { - fw.machineLifeChanged(names.NewMachineTag(machineId)) + if err := fw.machineLifeChanged(names.NewMachineTag(machineId)); err != nil { + return err + } } if !reconciled { reconciled = true @@ -141,12 +158,12 @@ err = fw.reconcileInstances() } if err != nil { - return err + return errors.Trace(err) } } case change, ok := <-portsChange: if !ok { - return watcher.EnsureErr(fw.portsWatcher) + return errors.New("ports watcher closed") } for _, portsGlobalKey := range change { machineTag, networkTag, err := parsePortsKey(portsGlobalKey) @@ -159,7 +176,7 @@ } case change := <-fw.unitsChange: if err := fw.unitsChanged(change); err != nil { - return err + return errors.Trace(err) } case change := <-fw.exposedChange: change.serviced.exposed = change.exposed @@ -192,14 +209,25 @@ } unitw, err := m.WatchUnits() if err != nil { - return err + return errors.Trace(err) + } + // XXX(fwereade): this is the best of a bunch of bad options. We've started + // the watch, so we're responsible for it; but we (probably?) need to do this + // little dance below to update the machined data on the fw loop goroutine, + // whence it's usually accessed, before we start the machined watchLoop + // below. That catacomb *should* be the only one responsible -- and it *is* + // responsible -- but having it in the main fw catacomb as well does no harm, + // and greatly simplifies the code below (which would otherwise have to + // manage unitw lifetime and errors manually). + if err := fw.catacomb.Add(unitw); err != nil { + return errors.Trace(err) } select { - case <-fw.tomb.Dying(): - return tomb.ErrDying + case <-fw.catacomb.Dying(): + return fw.catacomb.ErrDying() case change, ok := <-unitw.Changes(): if !ok { - return watcher.EnsureErr(unitw) + return errors.New("machine units watcher closed") } fw.machineds[tag] = machined err = fw.unitsChanged(&unitsChange{machined, change}) @@ -208,15 +236,24 @@ return errors.Annotatef(err, "cannot respond to units changes for %q", tag) } } - go machined.watchLoop(unitw) + + err = catacomb.Invoke(catacomb.Plan{ + Site: &machined.catacomb, + Work: func() error { + return machined.watchLoop(unitw) + }, + }) + if err != nil { + delete(fw.machineds, tag) + return errors.Trace(err) + } return nil } // startUnit creates a new data value for tracking details of the unit -// and starts watching the unit for port changes. The provided -// machineTag must be the tag for the machine the unit was last +// The provided machineTag must be the tag for the machine the unit was last // observed to be assigned to. -func (fw *Firewaller) startUnit(unit *apifirewaller.Unit, machineTag names.MachineTag) error { +func (fw *Firewaller) startUnit(unit *firewaller.Unit, machineTag names.MachineTag) error { service, err := unit.Service() if err != nil { return err @@ -267,7 +304,7 @@ // startService creates a new data value for tracking details of the // service and starts watching the service for exposure changes. -func (fw *Firewaller) startService(service *apifirewaller.Service) error { +func (fw *Firewaller) startService(service *firewaller.Service) error { exposed, err := service.IsExposed() if err != nil { return err @@ -278,8 +315,19 @@ exposed: exposed, unitds: make(map[names.UnitTag]*unitData), } + err = catacomb.Invoke(catacomb.Plan{ + Site: &serviced.catacomb, + Work: func() error { + return serviced.watchLoop(exposed) + }, + }) + if err != nil { + return errors.Trace(err) + } + if err := fw.catacomb.Add(serviced); err != nil { + return errors.Trace(err) + } fw.serviceds[service.Tag()] = serviced - go serviced.watchLoop(serviced.exposed) return nil } @@ -637,12 +685,14 @@ fw.forgetUnit(unitd) } if err := fw.flushMachine(machined); err != nil { - return err + return errors.Trace(err) } + + // Unusually, it's fine to ignore this error, because we know the machined + // is being tracked in fw.catacomb. But we do still want to wait until the + // watch loop has stopped before we nuke the last data and return. + worker.Stop(machined) delete(fw.machineds, machined.tag) - if err := machined.Stop(); err != nil { - return err - } logger.Debugf("stopped watching %q", machined.tag) return nil } @@ -652,56 +702,39 @@ serviced := unitd.serviced machined := unitd.machined + // If it's the last unit in the service, we'll need to stop the serviced. + stoppedService := false + if len(serviced.unitds) == 1 { + if _, found := serviced.unitds[unitd.tag]; found { + // Unusually, it's fine to ignore this error, because we know the + // serviced is being tracked in fw.catacomb. But we do still want + // to wait until the watch loop has stopped before we nuke the last + // data and return. + worker.Stop(serviced) + stoppedService = true + } + } + // Clean up after stopping. delete(fw.unitds, unitd.tag) delete(machined.unitds, unitd.tag) delete(serviced.unitds, unitd.tag) - if len(serviced.unitds) == 0 { - // Stop service data after all units are removed. - if err := serviced.Stop(); err != nil { - logger.Errorf("service watcher %q returned error when stopping: %v", serviced.service.Name(), err) - } - delete(fw.serviceds, serviced.service.Tag()) - } -} - -// stopWatchers stops all the firewaller's watchers. -func (fw *Firewaller) stopWatchers() { - if fw.environWatcher != nil { - watcher.Stop(fw.environWatcher, &fw.tomb) - } - if fw.machinesWatcher != nil { - watcher.Stop(fw.machinesWatcher, &fw.tomb) - } - if fw.portsWatcher != nil { - watcher.Stop(fw.portsWatcher, &fw.tomb) - } - for _, serviced := range fw.serviceds { - if serviced != nil { - watcher.Stop(serviced, &fw.tomb) - } - } - for _, machined := range fw.machineds { - if machined != nil { - watcher.Stop(machined, &fw.tomb) - } - } -} - -// Err returns the reason why the firewaller has stopped or tomb.ErrStillAlive -// when it is still alive. -func (fw *Firewaller) Err() (reason error) { - return fw.tomb.Err() -} - -// Kill implements worker.Worker.Kill. + logger.Debugf("stopped watching %q", unitd.tag) + if stoppedService { + serviceTag := serviced.service.Tag() + delete(fw.serviceds, serviceTag) + logger.Debugf("stopped watching %q", serviceTag) + } +} + +// Kill is part of the worker.Worker interface. func (fw *Firewaller) Kill() { - fw.tomb.Kill(nil) + fw.catacomb.Kill(nil) } -// Wait implements worker.Worker.Wait. +// Wait is part of the worker.Worker interface. func (fw *Firewaller) Wait() error { - return fw.tomb.Wait() + return fw.catacomb.Wait() } // unitsChange contains the changed units for one specific machine. @@ -712,7 +745,7 @@ // machineData holds machine details and watches units added or removed. type machineData struct { - tomb tomb.Tomb + catacomb catacomb.Catacomb fw *Firewaller tag names.MachineTag unitds map[names.UnitTag]*unitData @@ -721,47 +754,47 @@ definedPorts map[network.PortRange]names.UnitTag } -func (md *machineData) machine() (*apifirewaller.Machine, error) { +func (md *machineData) machine() (*firewaller.Machine, error) { return md.fw.st.Machine(md.tag) } // watchLoop watches the machine for units added or removed. -func (md *machineData) watchLoop(unitw apiwatcher.StringsWatcher) { - defer md.tomb.Done() - defer watcher.Stop(unitw, &md.tomb) +func (md *machineData) watchLoop(unitw watcher.StringsWatcher) error { + if err := md.catacomb.Add(unitw); err != nil { + return errors.Trace(err) + } for { select { - case <-md.tomb.Dying(): - return + case <-md.catacomb.Dying(): + return md.catacomb.ErrDying() case change, ok := <-unitw.Changes(): if !ok { - _, err := md.machine() - if !params.IsCodeNotFound(err) { - md.fw.tomb.Kill(watcher.EnsureErr(unitw)) - } - return + return errors.New("machine units watcher closed") } select { case md.fw.unitsChange <- &unitsChange{md, change}: - case <-md.tomb.Dying(): - return + case <-md.catacomb.Dying(): + return md.catacomb.ErrDying() } } } } -// Stop stops the machine watching. -func (md *machineData) Stop() error { - md.tomb.Kill(nil) - return md.tomb.Wait() -} - -// unitData holds unit details and watches port changes. +// Kill is part of the worker.Worker interface. +func (md *machineData) Kill() { + md.catacomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (md *machineData) Wait() error { + return md.catacomb.Wait() +} + +// unitData holds unit details. type unitData struct { - tomb tomb.Tomb fw *Firewaller tag names.UnitTag - unit *apifirewaller.Unit + unit *firewaller.Unit serviced *serviceData machined *machineData } @@ -774,59 +807,62 @@ // serviceData holds service details and watches exposure changes. type serviceData struct { - tomb tomb.Tomb - fw *Firewaller - service *apifirewaller.Service - exposed bool - unitds map[names.UnitTag]*unitData + catacomb catacomb.Catacomb + fw *Firewaller + service *firewaller.Service + exposed bool + unitds map[names.UnitTag]*unitData } // watchLoop watches the service's exposed flag for changes. -func (sd *serviceData) watchLoop(exposed bool) { - defer sd.tomb.Done() - w, err := sd.service.Watch() +func (sd *serviceData) watchLoop(exposed bool) error { + serviceWatcher, err := sd.service.Watch() if err != nil { - sd.fw.tomb.Kill(err) - return - } - defer watcher.Stop(w, &sd.tomb) + return errors.Trace(err) + } + if err := sd.catacomb.Add(serviceWatcher); err != nil { + return errors.Trace(err) + } for { select { - case <-sd.tomb.Dying(): - return - case _, ok := <-w.Changes(): + case <-sd.catacomb.Dying(): + return sd.catacomb.ErrDying() + case _, ok := <-serviceWatcher.Changes(): if !ok { - sd.fw.tomb.Kill(watcher.EnsureErr(w)) - return + return errors.New("service watcher closed") } if err := sd.service.Refresh(); err != nil { if !params.IsCodeNotFound(err) { - sd.fw.tomb.Kill(err) + return errors.Trace(err) } - return + return nil } change, err := sd.service.IsExposed() if err != nil { - sd.fw.tomb.Kill(err) - return + return errors.Trace(err) } if change == exposed { continue } + exposed = change select { case sd.fw.exposedChange <- &exposedChange{sd, change}: - case <-sd.tomb.Dying(): - return + case <-sd.catacomb.Dying(): + return sd.catacomb.ErrDying() } } } } -// Stop stops the service watching. -func (sd *serviceData) Stop() error { - sd.tomb.Kill(nil) - return sd.tomb.Wait() +// Kill is part of the worker.Worker interface. +func (sd *serviceData) Kill() { + sd.catacomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (sd *serviceData) Wait() error { + return sd.catacomb.Wait() } // diffRanges returns all the port rangess that exist in A but not B. === modified file 'src/github.com/juju/juju/worker/firewaller/firewaller_test.go' --- src/github.com/juju/juju/worker/firewaller/firewaller_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/firewaller/firewaller_test.go 2016-03-22 15:18:22 +0000 @@ -47,7 +47,7 @@ s.charm = s.AddTestingCharm(c, "dummy") // Create a manager machine and login to the API. - machine, err := s.State.AddMachine("quantal", state.JobManageEnviron) + machine, err := s.State.AddMachine("quantal", state.JobManageModel) c.Assert(err, jc.ErrorIsNil) password, err := utils.RandomPassword() c.Assert(err, jc.ErrorIsNil) @@ -114,7 +114,7 @@ } func (s *firewallerBaseSuite) addUnit(c *gc.C, svc *state.Service) (*state.Unit, *state.Machine) { - units, err := juju.AddUnits(s.State, svc, 1, "") + units, err := juju.AddUnits(s.State, svc, 1, nil) c.Assert(err, jc.ErrorIsNil) u := units[0] id, err := u.AssignedMachineId() @@ -571,6 +571,39 @@ c.Assert(err, jc.ErrorIsNil) } +func (s *InstanceModeSuite) TestStartWithStateOpenPortsBroken(c *gc.C) { + svc := s.AddTestingService(c, "wordpress", s.charm) + err := svc.SetExposed() + c.Assert(err, jc.ErrorIsNil) + u, m := s.addUnit(c, svc) + inst := s.startInstance(c, m) + + err = u.OpenPort("tcp", 80) + c.Assert(err, jc.ErrorIsNil) + + // Nothing open without firewaller. + s.assertPorts(c, inst, m.Id(), nil) + dummy.SetInstanceBroken(inst, "OpenPorts") + + // Starting the firewaller should attempt to open the ports, + // and fail due to the method being broken. + fw, err := firewaller.NewFirewaller(s.firewaller) + c.Assert(err, jc.ErrorIsNil) + + errc := make(chan error, 1) + go func() { errc <- fw.Wait() }() + s.BackingState.StartSync() + select { + case err := <-errc: + c.Assert(err, gc.ErrorMatches, + `cannot respond to units changes for "machine-1": dummyInstance.OpenPorts is broken`) + case <-time.After(coretesting.LongWait): + fw.Kill() + fw.Wait() + c.Fatal("timed out waiting for firewaller to stop") + } +} + type GlobalModeSuite struct { firewallerBaseSuite } @@ -798,8 +831,22 @@ s.firewallerBaseSuite.JujuConnSuite.TearDownTest(c) } -func (s *NoneModeSuite) TestDoesNotStartAtAll(c *gc.C) { +func (s *NoneModeSuite) TestStopImmediatelyWhenModeNone(c *gc.C) { fw, err := firewaller.NewFirewaller(s.firewaller) - c.Assert(err, gc.ErrorMatches, `firewaller is disabled when firewall-mode is "none"`) - c.Assert(fw, gc.IsNil) + c.Assert(err, jc.ErrorIsNil) + defer func() { + fw.Kill() + fw.Wait() + }() + + wait := make(chan error) + go func() { + wait <- fw.Wait() + }() + select { + case err := <-wait: + c.Assert(err, gc.ErrorMatches, `firewaller is disabled when firewall-mode is "none"`) + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out") + } } === added file 'src/github.com/juju/juju/worker/firewaller/manifold.go' --- src/github.com/juju/juju/worker/firewaller/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/firewaller/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,35 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package firewaller + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/firewaller" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/util" +) + +// ManifoldConfig describes the resources used by the firewaller worker. +type ManifoldConfig util.ApiManifoldConfig + +// Manifold returns a Manifold that encapsulates the firewaller worker. +func Manifold(config ManifoldConfig) dependency.Manifold { + return util.ApiManifold( + util.ApiManifoldConfig(config), + manifoldStart, + ) +} + +// manifoldStart creates a firewaller worker, given a base.APICaller. +func manifoldStart(apiCaller base.APICaller) (worker.Worker, error) { + api := firewaller.NewState(apiCaller) + w, err := NewFirewaller(api) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil +} === added directory 'src/github.com/juju/juju/worker/fortress' === added file 'src/github.com/juju/juju/worker/fortress/fortress.go' --- src/github.com/juju/juju/worker/fortress/fortress.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/fortress/fortress.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,153 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package fortress + +import ( + "sync" + + "github.com/juju/errors" + "launchpad.net/tomb" +) + +// fortress coordinates between clients that access it as a Guard and as a Guest. +type fortress struct { + tomb tomb.Tomb + guardTickets chan guardTicket + guestTickets chan guestTicket +} + +// newFortress returns a new, locked, fortress. The caller is responsible for +// ensuring it somehow gets Kill()ed, and for handling any error returned by +// Wait(). +func newFortress() *fortress { + f := &fortress{ + guardTickets: make(chan guardTicket), + guestTickets: make(chan guestTicket), + } + go func() { + defer f.tomb.Done() + f.tomb.Kill(f.loop()) + }() + return f +} + +// Kill is part of the worker.Worker interface. +func (f *fortress) Kill() { + f.tomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (f *fortress) Wait() error { + return f.tomb.Wait() +} + +// Unlock is part of the Guard interface. +func (f *fortress) Unlock() error { + return f.allowGuests(true, nil) +} + +// Lockdown is part of the Guard interface. +func (f *fortress) Lockdown(abort Abort) error { + return f.allowGuests(false, abort) +} + +// Visit is part of the Guest interface. +func (f *fortress) Visit(visit Visit, abort Abort) error { + result := make(chan error) + select { + case <-f.tomb.Dying(): + return errors.New("fortress worker shutting down") + case <-abort: + return ErrAborted + case f.guestTickets <- guestTicket{visit, result}: + return <-result + } +} + +// allowGuests communicates Guard-interface requests to the main loop. +func (f *fortress) allowGuests(allowGuests bool, abort Abort) error { + result := make(chan error) + select { + case <-f.tomb.Dying(): + return errors.New("fortress worker shutting down") + case f.guardTickets <- guardTicket{allowGuests, abort, result}: + return <-result + } +} + +// loop waits for a Guard to unlock the fortress, and then runs visit funcs in +// parallel until a Guard locks it down again; at which point, it waits for all +// outstanding visits to complete, and reverts to its original state. +func (f *fortress) loop() error { + var active sync.WaitGroup + defer active.Wait() + + // guestTickets will be set on Unlock and cleared at the start of Lockdown. + var guestTickets <-chan guestTicket + for { + select { + case <-f.tomb.Dying(): + return tomb.ErrDying + case ticket := <-guestTickets: + active.Add(1) + go ticket.complete(active.Done) + case ticket := <-f.guardTickets: + // guard ticket requests are idempotent; it's not worth building + // the extra mechanism needed to (1) complain about abuse but + // (2) remain comprehensible and functional in the face of aborted + // Lockdowns. + if ticket.allowGuests { + guestTickets = f.guestTickets + } else { + guestTickets = nil + } + go ticket.complete(active.Wait) + } + } +} + +// guardTicket communicates between the Guard interface and the main loop. +type guardTicket struct { + allowGuests bool + abort Abort + result chan<- error +} + +// complete unconditionally sends a single value on ticket.result; either nil +// (when the desired state is reached) or ErrAborted (when the ticket's Abort +// is closed). It should be called on its own goroutine. +func (ticket guardTicket) complete(waitLockedDown func()) { + var result error + defer func() { + ticket.result <- result + }() + + done := make(chan struct{}) + go func() { + // If we're locking down, we should wait for all Visits to complete. + // If not, Visits are already being accepted and we're already done. + if !ticket.allowGuests { + waitLockedDown() + } + close(done) + }() + select { + case <-done: + case <-ticket.abort: + result = ErrAborted + } +} + +// guestTicket communicates between the Guest interface and the main loop. +type guestTicket struct { + visit Visit + result chan<- error +} + +// complete unconditionally sends any error returned from the Visit func, then +// calls the finished func. It should be called on its own goroutine. +func (ticket guestTicket) complete(finished func()) { + defer finished() + ticket.result <- ticket.visit() +} === added file 'src/github.com/juju/juju/worker/fortress/fortress_test.go' --- src/github.com/juju/juju/worker/fortress/fortress_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/fortress/fortress_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,319 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package fortress_test + +import ( + "sync" + "time" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/fortress" +) + +type FortressSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&FortressSuite{}) + +func (s *FortressSuite) TestOutputBadSource(c *gc.C) { + fix := newFixture(c) + defer fix.TearDown(c) + + var dummy struct{ worker.Worker } + var out fortress.Guard + err := fix.manifold.Output(dummy, &out) + c.Check(err, gc.ErrorMatches, "in should be \\*fortress\\.fortress; is .*") + c.Check(out, gc.IsNil) +} + +func (s *FortressSuite) TestOutputBadTarget(c *gc.C) { + fix := newFixture(c) + defer fix.TearDown(c) + + var out interface{} + err := fix.manifold.Output(fix.worker, &out) + c.Check(err.Error(), gc.Equals, "out should be *fortress.Guest or *fortress.Guard; is *interface {}") + c.Check(out, gc.IsNil) +} + +func (s *FortressSuite) TestStoppedUnlock(c *gc.C) { + fix := newFixture(c) + fix.TearDown(c) + + err := fix.Guard(c).Unlock() + c.Check(err, gc.ErrorMatches, "fortress worker shutting down") +} + +func (s *FortressSuite) TestStoppedLockdown(c *gc.C) { + fix := newFixture(c) + fix.TearDown(c) + + err := fix.Guard(c).Lockdown(nil) + c.Check(err, gc.ErrorMatches, "fortress worker shutting down") +} + +func (s *FortressSuite) TestStoppedVisit(c *gc.C) { + fix := newFixture(c) + fix.TearDown(c) + + err := fix.Guest(c).Visit(nil, nil) + c.Check(err, gc.ErrorMatches, "fortress worker shutting down") +} + +func (s *FortressSuite) TestStartsLocked(c *gc.C) { + fix := newFixture(c) + defer fix.TearDown(c) + + AssertLocked(c, fix.Guest(c)) +} + +func (s *FortressSuite) TestInitialLockdown(c *gc.C) { + fix := newFixture(c) + defer fix.TearDown(c) + + err := fix.Guard(c).Lockdown(nil) + c.Check(err, jc.ErrorIsNil) + AssertLocked(c, fix.Guest(c)) +} + +func (s *FortressSuite) TestInitialUnlock(c *gc.C) { + fix := newFixture(c) + defer fix.TearDown(c) + + err := fix.Guard(c).Unlock() + c.Check(err, jc.ErrorIsNil) + AssertUnlocked(c, fix.Guest(c)) +} + +func (s *FortressSuite) TestDoubleUnlock(c *gc.C) { + fix := newFixture(c) + defer fix.TearDown(c) + + guard := fix.Guard(c) + err := guard.Unlock() + c.Check(err, jc.ErrorIsNil) + + err = guard.Unlock() + c.Check(err, jc.ErrorIsNil) + AssertUnlocked(c, fix.Guest(c)) +} + +func (s *FortressSuite) TestDoubleLockdown(c *gc.C) { + fix := newFixture(c) + defer fix.TearDown(c) + + guard := fix.Guard(c) + err := guard.Unlock() + c.Check(err, jc.ErrorIsNil) + err = guard.Lockdown(nil) + c.Check(err, jc.ErrorIsNil) + + err = guard.Lockdown(nil) + c.Check(err, jc.ErrorIsNil) + AssertLocked(c, fix.Guest(c)) +} + +func (s *FortressSuite) TestWorkersIndependent(c *gc.C) { + fix := newFixture(c) + defer fix.TearDown(c) + + // Create a separate worker and associated guard from the same manifold. + worker2, err := fix.manifold.Start(nil) + c.Assert(err, jc.ErrorIsNil) + defer CheckStop(c, worker2) + var guard2 fortress.Guard + err = fix.manifold.Output(worker2, &guard2) + c.Assert(err, jc.ErrorIsNil) + + // Unlock the separate worker; check the original worker is unaffected. + err = guard2.Unlock() + c.Assert(err, jc.ErrorIsNil) + AssertLocked(c, fix.Guest(c)) +} + +func (s *FortressSuite) TestVisitError(c *gc.C) { + fix := newFixture(c) + defer fix.TearDown(c) + err := fix.Guard(c).Unlock() + c.Check(err, jc.ErrorIsNil) + + err = fix.Guest(c).Visit(badVisit, nil) + c.Check(err, gc.ErrorMatches, "bad!") +} + +func (s *FortressSuite) TestVisitSuccess(c *gc.C) { + fix := newFixture(c) + defer fix.TearDown(c) + err := fix.Guard(c).Unlock() + c.Check(err, jc.ErrorIsNil) + + err = fix.Guest(c).Visit(func() error { return nil }, nil) + c.Check(err, jc.ErrorIsNil) +} + +func (s *FortressSuite) TestConcurrentVisit(c *gc.C) { + fix := newFixture(c) + defer fix.TearDown(c) + err := fix.Guard(c).Unlock() + c.Check(err, jc.ErrorIsNil) + guest := fix.Guest(c) + + // Start a bunch of concurrent, blocking, Visits. + const count = 10 + var started sync.WaitGroup + finishes := make(chan int, count) + unblocked := make(chan struct{}) + for i := 0; i < count; i++ { + started.Add(1) + go func(i int) { + visit := func() error { + started.Done() + <-unblocked + return nil + } + err := guest.Visit(visit, nil) + c.Check(err, jc.ErrorIsNil) + finishes <- i + + }(i) + } + started.Wait() + + // Just for fun, make sure a separate Visit still works as expected. + AssertUnlocked(c, guest) + + // Unblock them all, and wait for them all to complete. + close(unblocked) + timeout := time.After(coretesting.LongWait) + seen := make(map[int]bool) + for i := 0; i < count; i++ { + select { + case finished := <-finishes: + c.Logf("visit %d finished", finished) + seen[finished] = true + case <-timeout: + c.Errorf("timed out waiting for %dth result", i) + } + } + c.Check(seen, gc.HasLen, count) +} + +func (s *FortressSuite) TestUnlockUnblocksVisit(c *gc.C) { + fix := newFixture(c) + defer fix.TearDown(c) + + // Start a Visit on a locked fortress, and check it's blocked. + visited := make(chan error, 1) + go func() { + visited <- fix.Guest(c).Visit(badVisit, nil) + }() + select { + case err := <-visited: + c.Fatalf("unexpected Visit result: %v", err) + case <-time.After(coretesting.ShortWait): + } + + // Unlock the fortress, and check the Visit is unblocked. + err := fix.Guard(c).Unlock() + c.Assert(err, jc.ErrorIsNil) + select { + case err := <-visited: + c.Check(err, gc.ErrorMatches, "bad!") + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out") + } +} + +func (s *FortressSuite) TestVisitUnblocksLockdown(c *gc.C) { + fix := newFixture(c) + defer fix.TearDown(c) + + // Start a long Visit to an unlocked fortress. + unblockVisit := fix.startBlockingVisit(c) + defer close(unblockVisit) + + // Start a Lockdown call, and check that nothing progresses... + locked := make(chan error, 1) + go func() { + locked <- fix.Guard(c).Lockdown(nil) + }() + select { + case err := <-locked: + c.Fatalf("unexpected Lockdown result: %v", err) + case <-time.After(coretesting.ShortWait): + } + + // ...including new Visits. + AssertLocked(c, fix.Guest(c)) + + // Complete the running Visit, and check that the Lockdown completes too. + unblockVisit <- struct{}{} + select { + case err := <-locked: + c.Check(err, jc.ErrorIsNil) + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out") + } +} + +func (s *FortressSuite) TestAbortedLockdownStillLocks(c *gc.C) { + fix := newFixture(c) + defer fix.TearDown(c) + + // Start a long Visit to an unlocked fortress. + unblockVisit := fix.startBlockingVisit(c) + defer close(unblockVisit) + + // Start a Lockdown call, and check that nothing progresses... + locked := make(chan error, 1) + abort := make(chan struct{}) + go func() { + locked <- fix.Guard(c).Lockdown(abort) + }() + select { + case err := <-locked: + c.Fatalf("unexpected Lockdown result: %v", err) + case <-time.After(coretesting.ShortWait): + } + + // ...then abort the lockdown. + close(abort) + select { + case err := <-locked: + c.Check(err, gc.Equals, fortress.ErrAborted) + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out") + } + + // Check the fortress is already locked, even as the old visit continues. + AssertLocked(c, fix.Guest(c)) +} + +func (s *FortressSuite) TestAbortedLockdownUnlock(c *gc.C) { + fix := newFixture(c) + defer fix.TearDown(c) + + // Start a long Visit to an unlocked fortress. + unblockVisit := fix.startBlockingVisit(c) + defer close(unblockVisit) + + // Start and abort a Lockdown. + abort := make(chan struct{}) + close(abort) + guard := fix.Guard(c) + err := guard.Lockdown(abort) + c.Assert(err, gc.Equals, fortress.ErrAborted) + + // Unlock the fortress again, leaving the original visit running, and + // check that new Visits are immediately accepted. + err = guard.Unlock() + c.Assert(err, jc.ErrorIsNil) + AssertUnlocked(c, fix.Guest(c)) +} === added file 'src/github.com/juju/juju/worker/fortress/interface.go' --- src/github.com/juju/juju/worker/fortress/interface.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/fortress/interface.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,58 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +/* +Package fortress implements a convenient metaphor for an RWLock. + +A "fortress" is constructed via a manifold's Start func, and accessed via its +Output func as either a Guard or a Guest. To begin with, it's considered to be +locked, and inaccessible to Guests; when the Guard Unlocks it, the Guests can +Visit it until the Guard calls Lockdown. At that point, new Visits are blocked, +and existing Visits are allowed to complete; the Lockdown returns once all +Guests' Visits have completed. + +The original motivating use case was for a component to mediate charm directory +access between the uniter and the metrics collector. The metrics collector must +be free to run its own independent hooks while the uniter is active; but metrics +hooks and charm upgrades cannot be allowed to tread on one another's toes. +*/ +package fortress + +import ( + "github.com/juju/errors" +) + +// Guard manages Guest access to a fortress. +type Guard interface { + + // Unlock unblocks all Guest.Visit calls. + Unlock() error + + // Lockdown blocks new Guest.Visit calls, and waits for existing calls to + // complete; it will return ErrAborted if the supplied Abort is closed + // before lockdown is complete. In this situation, the fortress will + // remain closed to new visits, but may still be executing pre-existing + // ones; you need to wait for a Lockdown to complete successfully before + // you can infer exclusive access. + Lockdown(Abort) error +} + +// Guest allows clients to Visit a fortress when it's unlocked; that is, to +// get non-exclusive access to whatever resource is being protected for the +// duration of the supplied Visit func. +type Guest interface { + + // Visit waits until the fortress is unlocked, then runs the supplied + // Visit func. It will return ErrAborted if the supplied Abort is closed + // before the Visit is started. + Visit(Visit, Abort) error +} + +// Visit is an operation that can be performed by a Guest. +type Visit func() error + +// Abort is a channel that can be closed to abort a blocking operation. +type Abort <-chan struct{} + +// ErrAborted is used to confirm clean termination of a blocking operation. +var ErrAborted = errors.New("fortress operation aborted") === added file 'src/github.com/juju/juju/worker/fortress/manifold.go' --- src/github.com/juju/juju/worker/fortress/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/fortress/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package fortress + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" +) + +// Manifold returns a dependency.Manifold that runs a fortress. +// +// Clients should access the fortress resource via Guard and/or Guest pointers. +// Guest.Visit calls will block until a Guard.Unlock call is made; Guard.Lockdown +// calls will block new Guest.Visits and wait until all active Visits complete. +// +// If multiple clients act as guards, the fortress' state at any time will be +// determined by whichever guard last ran an operation; that is to say, it will +// be impossible to reliably tell from outside. So please don't do that. +func Manifold() dependency.Manifold { + return dependency.Manifold{ + Start: func(_ dependency.GetResourceFunc) (worker.Worker, error) { + return newFortress(), nil + }, + Output: func(in worker.Worker, out interface{}) error { + inFortress, _ := in.(*fortress) + if inFortress == nil { + return errors.Errorf("in should be %T; is %T", inFortress, in) + } + switch outPointer := out.(type) { + case *Guard: + *outPointer = inFortress + case *Guest: + *outPointer = inFortress + default: + return errors.Errorf("out should be *fortress.Guest or *fortress.Guard; is %T", out) + } + return nil + }, + } +} === added file 'src/github.com/juju/juju/worker/fortress/package_test.go' --- src/github.com/juju/juju/worker/fortress/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/fortress/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package fortress_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/worker/fortress/util_test.go' --- src/github.com/juju/juju/worker/fortress/util_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/fortress/util_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,133 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package fortress_test + +import ( + "time" + + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/fortress" +) + +// fixture holds a fortress worker and the manifold whence it sprang. +type fixture struct { + manifold dependency.Manifold + worker worker.Worker +} + +// newFixture returns a new fixture with a running worker. The caller +// takes responsibility for stopping the worker (most easily accomplished +// by deferring a TearDown). +func newFixture(c *gc.C) *fixture { + manifold := fortress.Manifold() + worker, err := manifold.Start(nil) + c.Assert(err, jc.ErrorIsNil) + return &fixture{ + manifold: manifold, + worker: worker, + } +} + +// TearDown stops the worker and checks it encountered no errors. +func (fix *fixture) TearDown(c *gc.C) { + CheckStop(c, fix.worker) +} + +// Guard returns a fortress.Guard backed by the fixture's worker. +func (fix *fixture) Guard(c *gc.C) (out fortress.Guard) { + err := fix.manifold.Output(fix.worker, &out) + c.Assert(err, jc.ErrorIsNil) + return out +} + +// Guest returns a fortress.Guest backed by the fixture's worker. +func (fix *fixture) Guest(c *gc.C) (out fortress.Guest) { + err := fix.manifold.Output(fix.worker, &out) + c.Assert(err, jc.ErrorIsNil) + return out +} + +// startBlockingVisit Unlocks the fortress; starts a Visit and waits for it to +// be invoked; then leaves that Visit blocking, and returns a channel on which +// you (1) *can* send a value to unblock the visit but (2) *must* defer a close +// (in case your test fails before sending, in which case we still want to stop +// the visit). +func (fix *fixture) startBlockingVisit(c *gc.C) chan<- struct{} { + err := fix.Guard(c).Unlock() + c.Assert(err, jc.ErrorIsNil) + visitStarted := make(chan struct{}, 1) + defer close(visitStarted) + unblockVisit := make(chan struct{}, 1) + go func() { + err := fix.Guest(c).Visit(func() error { + visitStarted <- struct{}{} + <-unblockVisit + return nil + }, nil) + c.Check(err, jc.ErrorIsNil) + }() + select { + case <-visitStarted: + case <-time.After(coretesting.LongWait): + c.Fatalf("visit never started") + } + return unblockVisit +} + +// AssertUnlocked checks that the supplied Guest can Visit its fortress. +func AssertUnlocked(c *gc.C, guest fortress.Guest) { + visited := make(chan error) + go func() { + visited <- guest.Visit(badVisit, nil) + }() + + select { + case err := <-visited: + c.Assert(err, gc.ErrorMatches, "bad!") + case <-time.After(coretesting.LongWait): + c.Fatalf("abort never handled") + } +} + +// AssertUnlocked checks that the supplied Guest's Visit calls are blocked +// (and can be cancelled via Abort). +func AssertLocked(c *gc.C, guest fortress.Guest) { + visited := make(chan error) + abort := make(chan struct{}) + go func() { + visited <- guest.Visit(badVisit, abort) + }() + + // NOTE(fwereade): this isn't about interacting with a timer; it's about + // making sure other goroutines have had ample opportunity to do stuff. + delay := time.After(coretesting.ShortWait) + for { + select { + case <-delay: + delay = nil + close(abort) + case err := <-visited: + c.Assert(err, gc.Equals, fortress.ErrAborted) + return + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out") + } + } +} + +// CheckStop stops the worker and checks it encountered no error. +func CheckStop(c *gc.C, w worker.Worker) { + c.Check(worker.Stop(w), jc.ErrorIsNil) +} + +// badVisit is a Vist that always fails. +func badVisit() error { + return errors.New("bad!") +} === modified file 'src/github.com/juju/juju/worker/gate/interface.go' --- src/github.com/juju/juju/worker/gate/interface.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/gate/interface.go 2016-03-22 15:18:22 +0000 @@ -14,6 +14,13 @@ // Waiter is used to wait for a shared gate to be unlocked. type Waiter interface { Unlocked() <-chan struct{} + IsUnlocked() bool +} + +// Lock combines the Waiter and Unlocker interfaces. +type Lock interface { + Waiter + Unlocker } // AlreadyUnlocked is a Waiter that always reports its gate to be unlocked. @@ -25,3 +32,8 @@ close(ch) return ch } + +// IsUnlocked is part of the Waiter interface. +func (AlreadyUnlocked) IsUnlocked() bool { + return true +} === modified file 'src/github.com/juju/juju/worker/gate/manifold.go' --- src/github.com/juju/juju/worker/gate/manifold.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/gate/manifold.go 2016-03-22 15:18:22 +0000 @@ -15,24 +15,24 @@ // Manifold returns a dependency.Manifold that wraps a single channel, shared // across all workers returned by the start func; it can be used to synchronize -// operations acrosss manifolds that lack direct dependency relationships. +// operations across manifolds that lack direct dependency relationships. // // The output func accepts an out pointer to either an Unlocker or a Waiter. func Manifold() dependency.Manifold { - - // mu and ch are shared across all workers started by the returned manifold. - // In normal operation, there will only be one such worker at a time; but if - // multiple workers somehow run in parallel, mu should prevent panic and/or - // confusion. - mu := new(sync.Mutex) - ch := make(chan struct{}) - + return ManifoldEx(NewLock()) +} + +// ManifoldEx does the same thing as Manifold but takes the +// Lock which used to wait on or unlock the gate. This +// allows code running outside of a dependency engine managed worker +// to monitor or unlock the gate. +// +// TODO(mjs) - this can likely go away once all machine agent workers +// are running inside the dependency engine. +func ManifoldEx(lock Lock) dependency.Manifold { return dependency.Manifold{ Start: func(_ dependency.GetResourceFunc) (worker.Worker, error) { - w := &gate{ - mu: mu, - ch: ch, - } + w := &gate{lock: lock} go func() { defer w.tomb.Done() <-w.tomb.Dying() @@ -46,22 +46,69 @@ } switch outPointer := out.(type) { case *Unlocker: - *outPointer = inWorker + *outPointer = inWorker.lock case *Waiter: - *outPointer = inWorker + *outPointer = inWorker.lock + case *Lock: + *outPointer = inWorker.lock default: - return errors.Errorf("out should be a pointer to an Unlocker or a Waiter; is %#v", out) + return errors.Errorf("out should be a *Unlocker, *Waiter, *Lock; is %#v", out) } return nil }, } } -// gate implements Waiter, Unlocker, and worker.Worker. +// NewLock returns a new Lock for the gate manifold, suitable for +// passing to ManifoldEx. It can be safely unlocked and monitored by +// code running inside or outside of the dependency engine. +func NewLock() Lock { + return &lock{ + // mu and ch are shared across all workers started by the returned manifold. + // In normal operation, there will only be one such worker at a time; but if + // multiple workers somehow run in parallel, mu should prevent panic and/or + // confusion. + mu: new(sync.Mutex), + ch: make(chan struct{}), + } +} + +// Lock implements of Unlocker and Waiter +type lock struct { + mu *sync.Mutex + ch chan struct{} +} + +// Unlock implements Unlocker. +func (l *lock) Unlock() { + l.mu.Lock() + defer l.mu.Unlock() + select { + case <-l.ch: + default: + close(l.ch) + } +} + +// Unlocked implements Waiter. +func (l *lock) Unlocked() <-chan struct{} { + return l.ch +} + +// IsUnlocked implements Waiter. +func (l *lock) IsUnlocked() bool { + select { + case <-l.ch: + return true + default: + return false + } +} + +// gate implements a degenerate worker that holds a Lock. type gate struct { tomb tomb.Tomb - mu *sync.Mutex - ch chan struct{} + lock Lock } // Kill is part of the worker.Worker interface. @@ -73,19 +120,3 @@ func (w *gate) Wait() error { return w.tomb.Wait() } - -// Unlocked is part of the Waiter interface. -func (w *gate) Unlocked() <-chan struct{} { - return w.ch -} - -// Unlock is part of the Unlocker interface. -func (w *gate) Unlock() { - w.mu.Lock() - defer w.mu.Unlock() - select { - case <-w.ch: - default: - close(w.ch) - } -} === modified file 'src/github.com/juju/juju/worker/gate/manifold_test.go' --- src/github.com/juju/juju/worker/gate/manifold_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/gate/manifold_test.go 2016-03-22 15:18:22 +0000 @@ -70,6 +70,17 @@ assertUnlocked(c, w) } +func (s *ManifoldSuite) TestLockOutput(c *gc.C) { + var lock gate.Lock + err := s.manifold.Output(s.worker, &lock) + c.Assert(err, jc.ErrorIsNil) + + w := waiter(c, s.manifold, s.worker) + assertLocked(c, w) + lock.Unlock() + assertUnlocked(c, w) +} + func (s *ManifoldSuite) TestDifferentManifoldWorkersUnconnected(c *gc.C) { manifold2 := gate.Manifold() worker2, err := manifold2.Start(nil) @@ -88,6 +99,26 @@ assertUnlocked(c, w) } +func (s *ManifoldSuite) TestManifoldEx(c *gc.C) { + lock := gate.NewLock() + + manifold := gate.ManifoldEx(lock) + var waiter1 gate.Waiter = lock + var unlocker1 gate.Unlocker = lock + + worker, err := manifold.Start(nil) + c.Assert(err, jc.ErrorIsNil) + defer checkStop(c, worker) + waiter2 := waiter(c, manifold, worker) + + assertLocked(c, waiter1) + assertLocked(c, waiter2) + + unlocker1.Unlock() + assertUnlocked(c, waiter1) + assertUnlocked(c, waiter2) +} + func unlocker(c *gc.C, m dependency.Manifold, w worker.Worker) gate.Unlocker { var unlocker gate.Unlocker err := m.Output(w, &unlocker) @@ -105,6 +136,7 @@ } func assertLocked(c *gc.C, waiter gate.Waiter) { + c.Assert(waiter.IsUnlocked(), jc.IsFalse) select { case <-waiter.Unlocked(): c.Fatalf("expected gate to be locked") @@ -113,6 +145,7 @@ } func assertUnlocked(c *gc.C, waiter gate.Waiter) { + c.Assert(waiter.IsUnlocked(), jc.IsTrue) select { case <-waiter.Unlocked(): default: === added directory 'src/github.com/juju/juju/worker/identityfilewriter' === added file 'src/github.com/juju/juju/worker/identityfilewriter/manifold.go' --- src/github.com/juju/juju/worker/identityfilewriter/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/identityfilewriter/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,65 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package identityfilewriter + +import ( + "errors" + + "github.com/juju/names" + + "github.com/juju/juju/agent" + apiagent "github.com/juju/juju/api/agent" + "github.com/juju/juju/api/base" + "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/util" +) + +// ManifoldConfig defines the names of the manifolds on which a Manifold will depend. +type ManifoldConfig util.PostUpgradeManifoldConfig + +// Manifold returns a dependency manifold that runs an identity file writer worker, +// using the resource names defined in the supplied config. +func Manifold(config ManifoldConfig) dependency.Manifold { + return util.PostUpgradeManifold(util.PostUpgradeManifoldConfig(config), newWorker) +} + +// newWorker trivially wraps NewWorker for use in a util.PostUpgradeManifold. +func newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { + cfg := a.CurrentConfig() + + // Grab the tag and ensure that it's for a machine. + tag, ok := cfg.Tag().(names.MachineTag) + if !ok { + return nil, errors.New("this manifold may only be used inside a machine agent") + } + + // Get the machine agent's jobs. + entity, err := apiagent.NewState(apiCaller).Entity(tag) + if err != nil { + return nil, err + } + + var isModelManager bool + for _, job := range entity.Jobs() { + if job == multiwatcher.JobManageModel { + isModelManager = true + break + } + } + + if !isModelManager { + return nil, dependency.ErrMissing + } + + return NewWorker(cfg) +} + +var NewWorker = func(agentConfig agent.Config) (worker.Worker, error) { + inner := func(<-chan struct{}) error { + return agent.WriteSystemIdentityFile(agentConfig) + } + return worker.NewSimpleWorker(inner), nil +} === added file 'src/github.com/juju/juju/worker/identityfilewriter/manifold_test.go' --- src/github.com/juju/juju/worker/identityfilewriter/manifold_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/identityfilewriter/manifold_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,97 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package identityfilewriter_test + +import ( + "github.com/juju/juju/worker/dependency" + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/agent" + apitesting "github.com/juju/juju/api/base/testing" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/identityfilewriter" + workertesting "github.com/juju/juju/worker/testing" +) + +type ManifoldSuite struct { + testing.IsolationSuite + newCalled bool +} + +var _ = gc.Suite(&ManifoldSuite{}) + +func (s *ManifoldSuite) SetUpTest(c *gc.C) { + s.newCalled = false + s.PatchValue(&identityfilewriter.NewWorker, + func(a agent.Config) (worker.Worker, error) { + s.newCalled = true + return nil, nil + }, + ) +} + +func (s *ManifoldSuite) TestMachine(c *gc.C) { + config := identityfilewriter.ManifoldConfig(workertesting.PostUpgradeManifoldTestConfig()) + _, err := workertesting.RunPostUpgradeManifold( + identityfilewriter.Manifold(config), + &fakeAgent{tag: names.NewMachineTag("42")}, + mockAPICaller(multiwatcher.JobManageModel)) + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.newCalled, jc.IsTrue) +} + +func (s *ManifoldSuite) TestMachineNotModelManagerErrors(c *gc.C) { + config := identityfilewriter.ManifoldConfig(workertesting.PostUpgradeManifoldTestConfig()) + _, err := workertesting.RunPostUpgradeManifold( + identityfilewriter.Manifold(config), + &fakeAgent{tag: names.NewMachineTag("42")}, + mockAPICaller(multiwatcher.JobHostUnits)) + c.Assert(err, gc.Equals, dependency.ErrMissing) + c.Assert(s.newCalled, jc.IsFalse) +} + +func (s *ManifoldSuite) TestNonMachineAgent(c *gc.C) { + config := identityfilewriter.ManifoldConfig(workertesting.PostUpgradeManifoldTestConfig()) + _, err := workertesting.RunPostUpgradeManifold( + identityfilewriter.Manifold(config), + &fakeAgent{tag: names.NewUnitTag("foo/0")}, + mockAPICaller("")) + c.Assert(err, gc.ErrorMatches, "this manifold may only be used inside a machine agent") + c.Assert(s.newCalled, jc.IsFalse) +} + +type fakeAgent struct { + agent.Agent + tag names.Tag +} + +func (a *fakeAgent) CurrentConfig() agent.Config { + return &fakeConfig{tag: a.tag} +} + +type fakeConfig struct { + agent.Config + tag names.Tag +} + +func (c *fakeConfig) Tag() names.Tag { + return c.tag +} + +func mockAPICaller(job multiwatcher.MachineJob) apitesting.APICallerFunc { + return apitesting.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { + if res, ok := result.(*params.AgentGetEntitiesResults); ok { + res.Entities = []params.AgentGetEntitiesResult{ + {Jobs: []multiwatcher.MachineJob{ + job, + }}} + } + return nil + }) +} === added file 'src/github.com/juju/juju/worker/identityfilewriter/package_test.go' --- src/github.com/juju/juju/worker/identityfilewriter/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/identityfilewriter/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package identityfilewriter_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added directory 'src/github.com/juju/juju/worker/imagemetadataworker' === added file 'src/github.com/juju/juju/worker/imagemetadataworker/metadataupdater.go' --- src/github.com/juju/juju/worker/imagemetadataworker/metadataupdater.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/imagemetadataworker/metadataupdater.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,25 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package imagemetadataworker + +import ( + "time" + + "github.com/juju/juju/api/imagemetadata" + "github.com/juju/juju/worker" +) + +// updatePublicImageMetadataPeriod is how frequently we check for +// public image metadata updates. +const updatePublicImageMetadataPeriod = time.Hour * 24 + +// NewWorker returns a worker that lists published cloud +// images metadata, and records them in state. +func NewWorker(cl *imagemetadata.Client) worker.Worker { + // TODO (anastasiamac 2015-09-02) Bug#1491353 - don't ignore stop channel. + f := func(stop <-chan struct{}) error { + return cl.UpdateFromPublishedImages() + } + return worker.NewPeriodicWorker(f, updatePublicImageMetadataPeriod, worker.NewTimer) +} === added file 'src/github.com/juju/juju/worker/imagemetadataworker/metadataupdater_test.go' --- src/github.com/juju/juju/worker/imagemetadataworker/metadataupdater_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/imagemetadataworker/metadataupdater_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,37 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package imagemetadataworker_test + +import ( + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/testing" + "github.com/juju/juju/worker/imagemetadataworker" +) + +var _ = gc.Suite(&imageMetadataUpdateSuite{}) + +type imageMetadataUpdateSuite struct { + baseMetadataSuite +} + +func (s *imageMetadataUpdateSuite) TestWorker(c *gc.C) { + done := make(chan struct{}) + client := s.ImageClient(done) + + w := imagemetadataworker.NewWorker(client) + + defer w.Wait() + defer w.Kill() + + select { + case <-done: + case <-time.After(testing.LongWait): + c.Fatalf("timed out waiting for images metadata to update") + } + c.Assert(s.apiCalled, jc.IsTrue) +} === added file 'src/github.com/juju/juju/worker/imagemetadataworker/package_test.go' --- src/github.com/juju/juju/worker/imagemetadataworker/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/imagemetadataworker/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,37 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package imagemetadataworker_test + +import ( + "testing" + + gc "gopkg.in/check.v1" + + apitesting "github.com/juju/juju/api/base/testing" + "github.com/juju/juju/api/imagemetadata" + coretesting "github.com/juju/juju/testing" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} + +type baseMetadataSuite struct { + coretesting.BaseSuite + apiCalled bool +} + +func (s *baseMetadataSuite) ImageClient(done chan struct{}) *imagemetadata.Client { + closer := apitesting.APICallerFunc(func(objType string, version int, id, request string, a, result interface{}) error { + s.apiCalled = false + if request == "UpdateFromPublishedImages" { + s.apiCalled = true + close(done) + return nil + } + return nil + }) + + return imagemetadata.NewClient(closer) +} === modified file 'src/github.com/juju/juju/worker/instancepoller/aggregate.go' --- src/github.com/juju/juju/worker/instancepoller/aggregate.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/instancepoller/aggregate.go 2016-03-22 15:18:22 +0000 @@ -48,12 +48,17 @@ func (a *aggregator) instanceInfo(id instance.Id) (instanceInfo, error) { reply := make(chan instanceInfoReply) - a.reqc <- instanceInfoReq{ - instId: id, - reply: reply, + reqc := a.reqc + for { + select { + case <-a.tomb.Dying(): + return instanceInfo{}, errors.New("instanceInfo call aborted") + case reqc <- instanceInfoReq{id, reply}: + reqc = nil + case r := <-reply: + return r.info, r.err + } } - r := <-reply - return r.info, r.err } var gatherTime = 3 * time.Second @@ -88,7 +93,11 @@ } else { reply.info, reply.err = a.instInfo(req.instId, insts[i]) } - req.reply <- reply + select { + case <-a.tomb.Dying(): + return tomb.ErrDying + case req.reply <- reply: + } } reqs = nil } === modified file 'src/github.com/juju/juju/worker/instancepoller/aggregate_test.go' --- src/github.com/juju/juju/worker/instancepoller/aggregate_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/instancepoller/aggregate_test.go 2016-03-22 15:18:22 +0000 @@ -132,6 +132,60 @@ c.Assert(len(testGetter.ids), gc.DeepEquals, 2) } +// notifyingInstanceGetter wraps testInstanceGetter, notifying via +// a channel when Instances() is called. +type notifyingInstanceGetter struct { + testInstanceGetter + instancesc chan bool +} + +func (g *notifyingInstanceGetter) Instances(ids []instance.Id) ([]instance.Instance, error) { + g.instancesc <- true + return g.testInstanceGetter.Instances(ids) +} + +func (s *aggregateSuite) TestDyingWhileHandlingRequest(c *gc.C) { + // This tests a regression where the aggregator couldn't shut down + // if the the tomb was killed while a request was being handled, + // leaving the reply channel unread. + + s.PatchValue(&gatherTime, 30*time.Millisecond) + + // Set up the aggregator with the instance getter. + testGetter := ¬ifyingInstanceGetter{instancesc: make(chan bool)} + testGetter.newTestInstance("foo", "foobar", []string{"127.0.0.1", "192.168.1.1"}) + aggregator := newAggregator(testGetter) + + // Make a request with a reply channel that will never be read. + req := instanceInfoReq{ + reply: make(chan instanceInfoReply), + instId: instance.Id("foo"), + } + aggregator.reqc <- req + + // Wait for Instances to be called. + select { + case <-testGetter.instancesc: + case <-time.After(testing.LongWait): + c.Fatal("Instances() not called") + } + + // Now we know the request is being handled - kill the aggregator. + aggregator.Kill() + done := make(chan error) + go func() { + done <- aggregator.Wait() + }() + + // The aggregator should stop. + select { + case err := <-done: + c.Assert(err, jc.ErrorIsNil) + case <-time.After(testing.LongWait): + c.Fatal("aggregator didn't stop") + } +} + type batchingInstanceGetter struct { testInstanceGetter wg sync.WaitGroup === modified file 'src/github.com/juju/juju/worker/instancepoller/machine_test.go' --- src/github.com/juju/juju/worker/instancepoller/machine_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/instancepoller/machine_test.go 2016-03-22 15:18:22 +0000 @@ -52,7 +52,7 @@ time.Sleep(coretesting.ShortWait) killMachineLoop(c, m, context.dyingc, died) - c.Assert(context.killAllErr, gc.Equals, nil) + c.Assert(context.killErr, gc.Equals, nil) c.Assert(m.addresses, gc.DeepEquals, testAddrs) c.Assert(m.setAddressCount, gc.Equals, 1) c.Assert(m.instStatus, gc.Equals, "running") @@ -141,43 +141,10 @@ time.Sleep(coretesting.ShortWait) killMachineLoop(c, m, context.dyingc, died) - c.Assert(context.killAllErr, gc.Equals, nil) + c.Assert(context.killErr, gc.Equals, nil) return int(count) } -func (s *machineSuite) TestSinglePollWhenInstancInfoUnimplemented(c *gc.C) { - s.PatchValue(&ShortPoll, 1*time.Millisecond) - s.PatchValue(&LongPoll, 1*time.Millisecond) - count := int32(0) - getInstanceInfo := func(id instance.Id) (instanceInfo, error) { - c.Check(id, gc.Equals, instance.Id("i1234")) - atomic.AddInt32(&count, 1) - err := ¶ms.Error{ - Code: params.CodeNotImplemented, - Message: "instance address not implemented", - } - return instanceInfo{}, err - } - context := &testMachineContext{ - getInstanceInfo: getInstanceInfo, - dyingc: make(chan struct{}), - } - m := &testMachine{ - tag: names.NewMachineTag("99"), - instanceId: "i1234", - refresh: func() error { return nil }, - life: params.Alive, - } - died := make(chan machine) - - go runMachine(context, m, nil, died) - - time.Sleep(coretesting.ShortWait) - killMachineLoop(c, m, context.dyingc, died) - c.Assert(context.killAllErr, gc.Equals, nil) - c.Assert(count, gc.Equals, int32(1)) -} - func (*machineSuite) TestChangedRefreshes(c *gc.C) { context := &testMachineContext{ getInstanceInfo: instanceInfoGetter(c, "i1234", testAddrs, "running", nil), @@ -280,7 +247,7 @@ case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for machine to die") } - c.Assert(context.killAllErr, gc.ErrorMatches, ".*"+expectErr.Error()) + c.Assert(context.killErr, gc.ErrorMatches, ".*"+expectErr.Error()) } func killMachineLoop(c *gc.C, m machine, dying chan struct{}, died <-chan machine) { @@ -304,16 +271,16 @@ } type testMachineContext struct { - killAllErr error + killErr error getInstanceInfo func(instance.Id) (instanceInfo, error) dyingc chan struct{} } -func (context *testMachineContext) killAll(err error) { +func (context *testMachineContext) kill(err error) { if err == nil { - panic("killAll with nil error") + panic("kill with nil error") } - context.killAllErr = err + context.killErr = err } func (context *testMachineContext) instanceInfo(id instance.Id) (instanceInfo, error) { @@ -324,6 +291,10 @@ return context.dyingc } +func (context *testMachineContext) errDying() error { + return nil +} + type testMachine struct { instanceId instance.Id instanceIdErr error === added file 'src/github.com/juju/juju/worker/instancepoller/manifold.go' --- src/github.com/juju/juju/worker/instancepoller/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/instancepoller/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,35 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package instancepoller + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/instancepoller" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/util" +) + +// ManifoldConfig describes the resources used by the instancepoller worker. +type ManifoldConfig util.ApiManifoldConfig + +// Manifold returns a Manifold that encapsulates the instancepoller worker. +func Manifold(config ManifoldConfig) dependency.Manifold { + return util.ApiManifold( + util.ApiManifoldConfig(config), + manifoldStart, + ) +} + +// manifoldStart creates an instancepoller worker, given a base.APICaller. +func manifoldStart(apiCaller base.APICaller) (worker.Worker, error) { + api := instancepoller.NewAPI(apiCaller) + w, err := NewWorker(api) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil +} === added file 'src/github.com/juju/juju/worker/instancepoller/package_test.go' --- src/github.com/juju/juju/worker/instancepoller/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/instancepoller/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,15 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// TODO(wallyworld) - move to instancepoller_test +package instancepoller + +import ( + stdtesting "testing" + + coretesting "github.com/juju/juju/testing" +) + +func TestPackage(t *stdtesting.T) { + coretesting.MgoTestPackage(t) +} === modified file 'src/github.com/juju/juju/worker/instancepoller/updater.go' --- src/github.com/juju/juju/worker/instancepoller/updater.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/instancepoller/updater.go 2016-03-22 15:18:22 +0000 @@ -7,16 +7,17 @@ "fmt" "time" + "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/instance" "github.com/juju/juju/network" - "github.com/juju/juju/state/watcher" + "github.com/juju/juju/watcher" ) -var logger = loggo.GetLogger("juju.worker.instanceupdater") +var logger = loggo.GetLogger("juju.worker.instancepoller") // ShortPoll and LongPoll hold the polling intervals for the instance // updater. When a machine has no address or is not started, it will be @@ -51,10 +52,18 @@ status string } +// lifetimeContext was extracted to allow the various context clients to get +// the benefits of the catacomb encapsulating everything that should happen +// here. A clean implementation would almost certainly not need this. +type lifetimeContext interface { + kill(error) + dying() <-chan struct{} + errDying() error +} + type machineContext interface { - killAll(err error) + lifetimeContext instanceInfo(id instance.Id) (instanceInfo, error) - dying() <-chan struct{} } type machineAddress struct { @@ -62,16 +71,10 @@ addresses []network.Address } -type machinesWatcher interface { - Changes() <-chan []string - Err() error - Stop() error -} - type updaterContext interface { + lifetimeContext newMachineContext() machineContext getMachine(tag names.MachineTag) (machine, error) - dying() <-chan struct{} } type updater struct { @@ -84,29 +87,27 @@ // machinesWatcher and starts machine goroutines to deal with them, // using the provided newMachineContext function to create the // appropriate context for each new machine tag. -func watchMachinesLoop(context updaterContext, w machinesWatcher) (err error) { +func watchMachinesLoop(context updaterContext, machinesWatcher watcher.StringsWatcher) (err error) { p := &updater{ context: context, machines: make(map[names.MachineTag]chan struct{}), machineDead: make(chan machine), } defer func() { - if stopErr := w.Stop(); stopErr != nil { - if err == nil { - err = fmt.Errorf("error stopping watcher: %v", stopErr) - } else { - logger.Warningf("ignoring error when stopping watcher: %v", stopErr) - } - } + // TODO(fwereade): is this a home-grown sync.WaitGroup or something? + // strongly suspect these machine goroutines could be managed rather + // less opaquely if we made them all workers. for len(p.machines) > 0 { delete(p.machines, (<-p.machineDead).Tag()) } }() for { select { - case ids, ok := <-w.Changes(): + case <-p.context.dying(): + return p.context.errDying() + case ids, ok := <-machinesWatcher.Changes(): if !ok { - return watcher.EnsureErr(w) + return errors.New("machines watcher closed") } tags := make([]names.MachineTag, len(ids)) for i := range ids { @@ -117,8 +118,6 @@ } case m := <-p.machineDead: delete(p.machines, m.Tag()) - case <-p.context.dying(): - return nil } } } @@ -148,7 +147,11 @@ p.machines[tag] = c go runMachine(p.context.newMachineContext(), m, c, p.machineDead) } else { - c <- struct{}{} + select { + case <-p.context.dying(): + return p.context.errDying() + case c <- struct{}{}: + } } } return nil @@ -170,7 +173,7 @@ } }() if err := machineLoop(context, m, changed); err != nil { - context.killAll(err) + context.kill(err) } } @@ -184,18 +187,7 @@ if pollInstance { instInfo, err := pollInstanceInfo(context, m) if err != nil && !params.IsCodeNotProvisioned(err) { - // If the provider doesn't implement Addresses/Status now, - // it never will until we're upgraded, so don't bother - // asking any more. We could use less resources - // by taking down the entire worker, but this is easier for now - // (and hopefully the local provider will implement - // Addresses/Status in the not-too-distant future), - // so we won't need to worry about this case at all. - if params.IsCodeNotImplemented(err) { - pollInterval = 365 * 24 * time.Hour - } else { - return err - } + return err } machineStatus := params.StatusPending if err == nil { @@ -216,10 +208,10 @@ pollInstance = false } select { + case <-context.dying(): + return context.errDying() case <-time.After(pollInterval): pollInstance = true - case <-context.dying(): - return nil case <-changed: if err := m.Refresh(); err != nil { return err @@ -245,6 +237,7 @@ } instInfo, err = context.instanceInfo(instId) if err != nil { + // TODO (anastasiamac 2016-02-01) This does not look like it needs to be removed now. if params.IsCodeNotImplemented(err) { return instInfo, err } === modified file 'src/github.com/juju/juju/worker/instancepoller/updater_test.go' --- src/github.com/juju/juju/worker/instancepoller/updater_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/instancepoller/updater_test.go 2016-03-22 15:18:22 +0000 @@ -5,8 +5,6 @@ package instancepoller import ( - "errors" - stdtesting "testing" "time" "github.com/juju/names" @@ -15,41 +13,15 @@ "github.com/juju/juju/apiserver/params" coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/watcher" ) -func TestPackage(t *stdtesting.T) { - coretesting.MgoTestPackage(t) -} - var _ = gc.Suite(&updaterSuite{}) type updaterSuite struct { coretesting.BaseSuite } -func (*updaterSuite) TestStopsWatcher(c *gc.C) { - context := &testUpdaterContext{ - dyingc: make(chan struct{}), - } - expectErr := errors.New("some error") - watcher := &testMachinesWatcher{ - changes: make(chan []string), - err: expectErr, - } - done := make(chan error) - go func() { - done <- watchMachinesLoop(context, watcher) - }() - close(context.dyingc) - select { - case err := <-done: - c.Assert(err, gc.ErrorMatches, ".*"+expectErr.Error()) - case <-time.After(coretesting.LongWait): - c.Fatalf("timed out waiting for watchMachinesLoop to terminate") - } - c.Assert(watcher.stopped, jc.IsTrue) -} - func (*updaterSuite) TestWatchMachinesWaitsForMachinePollers(c *gc.C) { // We can't see that the machine pollers are still alive directly, // but we can make the machine's Refresh method block, @@ -116,7 +88,6 @@ case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for watchMachinesLoop to terminate") } - c.Assert(watcher.stopped, jc.IsTrue) } func (s *updaterSuite) TestManualMachinesIgnored(c *gc.C) { @@ -172,10 +143,10 @@ case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for watchMachinesLoop to terminate") } - c.Assert(watcher.stopped, jc.IsTrue) } type testUpdaterContext struct { + updaterContext newMachineContextFunc func() machineContext getMachineFunc func(tag names.MachineTag) (machine, error) dyingc chan struct{} @@ -193,21 +164,20 @@ return context.dyingc } +func (context *testUpdaterContext) errDying() error { + return nil +} + type testMachinesWatcher struct { - stopped bool + watcher.StringsWatcher changes chan []string err error } -func (w *testMachinesWatcher) Changes() <-chan []string { +func (w *testMachinesWatcher) Changes() watcher.StringsChannel { return w.changes } -func (w *testMachinesWatcher) Stop() error { - w.stopped = true - return w.err -} - -func (w *testMachinesWatcher) Err() error { +func (w *testMachinesWatcher) Wait() error { return w.err } === modified file 'src/github.com/juju/juju/worker/instancepoller/worker.go' --- src/github.com/juju/juju/worker/instancepoller/worker.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/instancepoller/worker.go 2016-03-22 15:18:22 +0000 @@ -4,78 +4,103 @@ package instancepoller import ( + "github.com/juju/errors" "github.com/juju/names" - "launchpad.net/tomb" - apiinstancepoller "github.com/juju/juju/api/instancepoller" - apiwatcher "github.com/juju/juju/api/watcher" + "github.com/juju/juju/api/instancepoller" + "github.com/juju/juju/instance" "github.com/juju/juju/worker" + "github.com/juju/juju/worker/catacomb" + "github.com/juju/juju/worker/environ" ) type updaterWorker struct { - st *apiinstancepoller.API - tomb tomb.Tomb - *aggregator - - observer *worker.EnvironObserver + st *instancepoller.API + aggregator *aggregator + catacomb catacomb.Catacomb } // NewWorker returns a worker that keeps track of // the machines in the state and polls their instance // addresses and status periodically to keep them up to date. -func NewWorker(st *apiinstancepoller.API) worker.Worker { +func NewWorker(st *instancepoller.API) (worker.Worker, error) { u := &updaterWorker{ st: st, } - // wait for environment - go func() { - defer u.tomb.Done() - u.tomb.Kill(u.loop()) - }() - return u + err := catacomb.Invoke(catacomb.Plan{ + Site: &u.catacomb, + Work: u.loop, + }) + if err != nil { + return nil, errors.Trace(err) + } + return u, nil } +// Kill is part of the worker.Worker interface. func (u *updaterWorker) Kill() { - u.tomb.Kill(nil) + u.catacomb.Kill(nil) } +// Wait is part of the worker.Worker interface. func (u *updaterWorker) Wait() error { - return u.tomb.Wait() + return u.catacomb.Wait() } func (u *updaterWorker) loop() (err error) { - u.observer, err = worker.NewEnvironObserver(u.st) + + // TODO(fwereade): get this as a resource from a dependency.Engine. + tracker, err := environ.NewTracker(environ.Config{ + Observer: u.st, + }) if err != nil { - return err - } - u.aggregator = newAggregator(u.observer.Environ()) + return errors.Trace(err) + } + if err := u.catacomb.Add(tracker); err != nil { + return errors.Trace(err) + } + u.aggregator = newAggregator(tracker.Environ()) + if err := u.catacomb.Add(u.aggregator); err != nil { + return errors.Trace(err) + } logger.Infof("instance poller received inital environment configuration") - defer func() { - obsErr := worker.Stop(u.observer) - if err == nil { - err = obsErr - } - }() - var w apiwatcher.StringsWatcher - w, err = u.st.WatchEnvironMachines() + + watcher, err := u.st.WatchModelMachines() if err != nil { return err } - return watchMachinesLoop(u, w) + if err := u.catacomb.Add(watcher); err != nil { + return errors.Trace(err) + } + return watchMachinesLoop(u, watcher) } +// newMachineContext is part of the updaterContext interface. func (u *updaterWorker) newMachineContext() machineContext { return u } +// getMachine is part of the machineContext interface. func (u *updaterWorker) getMachine(tag names.MachineTag) (machine, error) { return u.st.Machine(tag) } +// instanceInfo is part of the machineContext interface. +func (u *updaterWorker) instanceInfo(id instance.Id) (instanceInfo, error) { + return u.aggregator.instanceInfo(id) +} + +// kill is part of the lifetimeContext interface. +func (u *updaterWorker) kill(err error) { + u.catacomb.Kill(err) +} + +// dying is part of the lifetimeContext interface. func (u *updaterWorker) dying() <-chan struct{} { - return u.tomb.Dying() + return u.catacomb.Dying() } -func (u *updaterWorker) killAll(err error) { - u.tomb.Kill(err) +// errDying is part of the lifetimeContext interface. +func (u *updaterWorker) errDying() error { + return u.catacomb.ErrDying() } === modified file 'src/github.com/juju/juju/worker/instancepoller/worker_test.go' --- src/github.com/juju/juju/worker/instancepoller/worker_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/instancepoller/worker_test.go 2016-03-22 15:18:22 +0000 @@ -36,7 +36,7 @@ func (s *workerSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) - s.apiSt, _ = s.OpenAPIAsNewMachine(c, state.JobManageEnviron) + s.apiSt, _ = s.OpenAPIAsNewMachine(c, state.JobManageModel) s.api = s.apiSt.InstancePoller() } @@ -57,7 +57,8 @@ s.PatchValue(&gatherTime, 10*time.Millisecond) machines, insts := s.setupScenario(c) s.State.StartSync() - w := NewWorker(s.api) + w, err := NewWorker(s.api) + c.Assert(err, jc.ErrorIsNil) defer func() { c.Assert(worker.Stop(w), gc.IsNil) }() === removed file 'src/github.com/juju/juju/worker/leadership/export_test.go' --- src/github.com/juju/juju/worker/leadership/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/leadership/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,16 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership - -import ( - "github.com/juju/juju/worker" -) - -var NewManifoldWorker = &newManifoldWorker - -func DummyTrackerWorker() worker.Worker { - // yes, this is entirely unsafe to *use*. It's just to get something - // of the right type to use in the manifold's Output tests. - return &tracker{} -} === removed file 'src/github.com/juju/juju/worker/leadership/interface.go' --- src/github.com/juju/juju/worker/leadership/interface.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/leadership/interface.go 1970-01-01 00:00:00 +0000 @@ -1,57 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package leadership - -import ( - "time" - - "github.com/juju/juju/worker" -) - -// Ticket is used to communicate leadership status to Tracker clients. -type Ticket interface { - - // Wait returns true if its Tracker is prepared to guarantee leadership - // for some period from the ticket request. The guaranteed duration depends - // upon the Tracker. - Wait() bool - - // Ready returns a channel that will be closed when a result is available - // to Wait(), and is helpful for clients that want to select rather than - // block on long-waiting tickets. - Ready() <-chan struct{} -} - -// Tracker allows clients to discover current leadership status by attempting to -// claim it for themselves. -type Tracker interface { - - // ServiceName returns the name of the service for which leadership claims - // are made. - ServiceName() string - - // ClaimDuration returns the duration for which a Ticket's true Wait result - // is guaranteed valid. - ClaimDuration() time.Duration - - // ClaimLeader will return a Ticket which, when Wait()ed for, will return - // true if leadership is guaranteed for at least the tracker's duration from - // the time the ticket was issued. Leadership claims should be resolved - // relatively quickly. - ClaimLeader() Ticket - - // WaitLeader will return a Ticket which, when Wait()ed for, will block - // until the tracker attains leadership. - WaitLeader() Ticket - - // WaitMinion will return a Ticket which, when Wait()ed for, will block - // until the tracker's future leadership can no longer be guaranteed. - WaitMinion() Ticket -} - -// TrackerWorker embeds the Tracker and worker.Worker interfaces. -type TrackerWorker interface { - worker.Worker - Tracker -} === modified file 'src/github.com/juju/juju/worker/leadership/manifold.go' --- src/github.com/juju/juju/worker/leadership/manifold.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/leadership/manifold.go 2016-03-22 15:18:22 +0000 @@ -13,6 +13,7 @@ "github.com/juju/juju/agent" "github.com/juju/juju/api/base" "github.com/juju/juju/api/leadership" + coreleadership "github.com/juju/juju/core/leadership" "github.com/juju/juju/worker" "github.com/juju/juju/worker/dependency" ) @@ -49,30 +50,33 @@ if err := getResource(config.APICallerName, &apiCaller); err != nil { return nil, err } - return newManifoldWorker(agent, apiCaller, config.LeadershipGuarantee) + return NewManifoldWorker(agent, apiCaller, config.LeadershipGuarantee) } } -// newManifoldWorker wraps NewTrackerWorker for the convenience of startFunc. It +// NewManifoldWorker wraps NewTracker for the convenience of startFunc. It // exists primarily to be patched out via NewManifoldWorker for ease of testing, -// and is not itself directly tested; once all NewTrackerWorker clients have been -// replaced with manifolds, the tests can be tidied up a bit. -var newManifoldWorker = func(agent agent.Agent, apiCaller base.APICaller, guarantee time.Duration) (worker.Worker, error) { +// and is not itself directly tested. It would almost certainly be better to +// pass the constructor dependencies in as explicit manifold config. +var NewManifoldWorker = func(agent agent.Agent, apiCaller base.APICaller, guarantee time.Duration) (worker.Worker, error) { tag := agent.CurrentConfig().Tag() unitTag, ok := tag.(names.UnitTag) if !ok { return nil, fmt.Errorf("expected a unit tag; got %q", tag) } claimer := leadership.NewClient(apiCaller) - return NewTrackerWorker(unitTag, claimer, guarantee), nil + return NewTracker(unitTag, claimer, guarantee), nil } -// outputFunc extracts the Tracker from a *tracker passed in as a Worker. +// outputFunc extracts the coreleadership.Tracker from a *Tracker passed in as a Worker. func outputFunc(in worker.Worker, out interface{}) error { - inWorker, _ := in.(*tracker) - outPointer, _ := out.(*Tracker) - if inWorker == nil || outPointer == nil { - return errors.Errorf("expected %T->%T; got %T->%T", inWorker, outPointer, in, out) + inWorker, _ := in.(*Tracker) + if inWorker == nil { + return errors.Errorf("expected *Tracker input; got %T", in) + } + outPointer, _ := out.(*coreleadership.Tracker) + if outPointer == nil { + return errors.Errorf("expected *leadership.Tracker output; got %T", out) } *outPointer = inWorker return nil === modified file 'src/github.com/juju/juju/worker/leadership/manifold_test.go' --- src/github.com/juju/juju/worker/leadership/manifold_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/leadership/manifold_test.go 2016-03-22 15:18:22 +0000 @@ -13,6 +13,7 @@ "github.com/juju/juju/agent" "github.com/juju/juju/api/base" + coreleadership "github.com/juju/juju/core/leadership" "github.com/juju/juju/worker" "github.com/juju/juju/worker/dependency" dt "github.com/juju/juju/worker/dependency/testing" @@ -70,7 +71,7 @@ "agent-name": dt.StubResource{Output: dummyAgent}, "api-caller-name": dt.StubResource{Output: dummyApiCaller}, }) - s.PatchValue(leadership.NewManifoldWorker, func(a agent.Agent, apiCaller base.APICaller, guarantee time.Duration) (worker.Worker, error) { + s.PatchValue(&leadership.NewManifoldWorker, func(a agent.Agent, apiCaller base.APICaller, guarantee time.Duration) (worker.Worker, error) { s.AddCall("newManifoldWorker", a, apiCaller, guarantee) return nil, errors.New("blammo") }) @@ -92,7 +93,7 @@ "api-caller-name": dt.StubResource{Output: dummyApiCaller}, }) dummyWorker := &dummyWorker{} - s.PatchValue(leadership.NewManifoldWorker, func(a agent.Agent, apiCaller base.APICaller, guarantee time.Duration) (worker.Worker, error) { + s.PatchValue(&leadership.NewManifoldWorker, func(a agent.Agent, apiCaller base.APICaller, guarantee time.Duration) (worker.Worker, error) { s.AddCall("newManifoldWorker", a, apiCaller, guarantee) return dummyWorker, nil }) @@ -108,21 +109,21 @@ func (s *ManifoldSuite) TestOutputBadTarget(c *gc.C) { var target interface{} - err := s.manifold.Output(leadership.DummyTrackerWorker(), &target) + err := s.manifold.Output(&leadership.Tracker{}, &target) c.Check(target, gc.IsNil) - c.Check(err.Error(), gc.Equals, "expected *leadership.tracker->*leadership.Tracker; got *leadership.tracker->*interface {}") + c.Check(err.Error(), gc.Equals, "expected *leadership.Tracker output; got *interface {}") } func (s *ManifoldSuite) TestOutputBadWorker(c *gc.C) { - var target leadership.Tracker + var target coreleadership.Tracker err := s.manifold.Output(&dummyWorker{}, &target) c.Check(target, gc.IsNil) - c.Check(err.Error(), gc.Equals, "expected *leadership.tracker->*leadership.Tracker; got *leadership_test.dummyWorker->*leadership.Tracker") + c.Check(err.Error(), gc.Equals, "expected *Tracker input; got *leadership_test.dummyWorker") } func (s *ManifoldSuite) TestOutputSuccess(c *gc.C) { - source := leadership.DummyTrackerWorker() - var target leadership.Tracker + source := &leadership.Tracker{} + var target coreleadership.Tracker err := s.manifold.Output(source, &target) c.Check(err, jc.ErrorIsNil) c.Check(target, gc.Equals, source) === modified file 'src/github.com/juju/juju/worker/leadership/package_test.go' --- src/github.com/juju/juju/worker/leadership/package_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/leadership/package_test.go 2016-03-22 15:18:22 +0000 @@ -6,9 +6,14 @@ import ( stdtesting "testing" + "github.com/juju/testing" + gc "gopkg.in/check.v1" ) func TestPackage(t *stdtesting.T) { + if testing.RaceEnabled { + t.Skip("skipping package under -race, see LP 1518806") + } gc.TestingT(t) } === modified file 'src/github.com/juju/juju/worker/leadership/tracker.go' --- src/github.com/juju/juju/worker/leadership/tracker.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/leadership/tracker.go 2016-03-22 15:18:22 +0000 @@ -11,13 +11,12 @@ "github.com/juju/names" "launchpad.net/tomb" - "github.com/juju/juju/leadership" + "github.com/juju/juju/core/leadership" ) var logger = loggo.GetLogger("juju.worker.leadership") -// tracker implements TrackerWorker. -type tracker struct { +type Tracker struct { tomb tomb.Tomb claimer leadership.Claimer unitName string @@ -34,18 +33,18 @@ waitingMinion []chan bool } -// NewTrackerWorker returns a TrackerWorker that attempts to claim and retain -// service leadership for the supplied unit. It will claim leadership for twice -// the supplied duration, and once it's leader it will renew leadership every +// NewTracker returns a *Tracker that attempts to claim and retain service +// leadership for the supplied unit. It will claim leadership for twice the +// supplied duration, and once it's leader it will renew leadership every // time the duration elapses. // Thus, successful leadership claims on the resulting Tracker will guarantee -// leadership for the duration supplied here without generating additional calls -// to the supplied manager (which may very well be on the other side of a -// network connection). -func NewTrackerWorker(tag names.UnitTag, claimer leadership.Claimer, duration time.Duration) TrackerWorker { +// leadership for the duration supplied here without generating additional +// calls to the supplied manager (which may very well be on the other side of +// a network connection). +func NewTracker(tag names.UnitTag, claimer leadership.Claimer, duration time.Duration) *Tracker { unitName := tag.Id() serviceName, _ := names.UnitService(unitName) - t := &tracker{ + t := &Tracker{ unitName: unitName, serviceName: serviceName, claimer: claimer, @@ -85,41 +84,41 @@ } // Kill is part of the worker.Worker interface. -func (t *tracker) Kill() { +func (t *Tracker) Kill() { t.tomb.Kill(nil) } // Wait is part of the worker.Worker interface. -func (t *tracker) Wait() error { +func (t *Tracker) Wait() error { return t.tomb.Wait() } -// ServiceName is part of the Tracker interface. -func (t *tracker) ServiceName() string { +// ServiceName is part of the leadership.Tracker interface. +func (t *Tracker) ServiceName() string { return t.serviceName } -// ClaimDuration is part of the Tracker interface. -func (t *tracker) ClaimDuration() time.Duration { +// ClaimDuration is part of the leadership.Tracker interface. +func (t *Tracker) ClaimDuration() time.Duration { return t.duration } -// ClaimLeader is part of the Tracker interface. -func (t *tracker) ClaimLeader() Ticket { +// ClaimLeader is part of the leadership.Tracker interface. +func (t *Tracker) ClaimLeader() leadership.Ticket { return t.submit(t.claimTickets) } -// WaitLeader is part of the Tracker interface. -func (t *tracker) WaitLeader() Ticket { +// WaitLeader is part of the leadership.Tracker interface. +func (t *Tracker) WaitLeader() leadership.Ticket { return t.submit(t.waitLeaderTickets) } -// WaitMinion is part of the Tracker interface. -func (t *tracker) WaitMinion() Ticket { +// WaitMinion is part of the leadership.Tracker interface. +func (t *Tracker) WaitMinion() leadership.Ticket { return t.submit(t.waitMinionTickets) } -func (t *tracker) loop() error { +func (t *Tracker) loop() error { logger.Debugf("%s making initial claim for %s leadership", t.unitName, t.serviceName) if err := t.refresh(); err != nil { return errors.Trace(err) @@ -159,9 +158,9 @@ } } -// refresh makes a leadership request, and updates tracker state to conform to +// refresh makes a leadership request, and updates Tracker state to conform to // latest known reality. -func (t *tracker) refresh() error { +func (t *Tracker) refresh() error { logger.Debugf("checking %s for %s leadership", t.unitName, t.serviceName) leaseDuration := 2 * t.duration untilTime := time.Now().Add(leaseDuration) @@ -176,7 +175,7 @@ } // setLeader arranges for lease renewal. -func (t *tracker) setLeader(untilTime time.Time) error { +func (t *Tracker) setLeader(untilTime time.Time) error { logger.Debugf("%s confirmed for %s leadership until %s", t.unitName, t.serviceName, untilTime) renewTime := untilTime.Add(-t.duration) logger.Infof("%s will renew %s leadership at %s", t.unitName, t.serviceName, renewTime) @@ -197,7 +196,7 @@ } // setMinion arranges for lease acquisition when there's an opportunity. -func (t *tracker) setMinion() error { +func (t *Tracker) setMinion() error { logger.Infof("%s leadership for %s denied", t.serviceName, t.unitName) t.isMinion = true t.renewLease = nil @@ -214,7 +213,7 @@ // close the claimLease channel and trigger a leadership claim on the // main loop; if anything's gone seriously wrong we'll find out right // away and shut down anyway. (And if this goroutine outlives the - // tracker, it keeps it around as a zombie, but I don't see a way + // Tracker, it keeps it around as a zombie, but I don't see a way // around that...) }() } @@ -231,8 +230,8 @@ return nil } -// isLeader returns true if leadership is guaranteed for the tracker's duration. -func (t *tracker) isLeader() (bool, error) { +// isLeader returns true if leadership is guaranteed for the Tracker's duration. +func (t *Tracker) isLeader() (bool, error) { if !t.isMinion { // Last time we looked, we were leader. select { @@ -253,7 +252,7 @@ // resolveClaim will send true on the supplied channel if leadership can be // successfully verified, and will always close it whether or not it sent. -func (t *tracker) resolveClaim(ticketCh chan bool) error { +func (t *Tracker) resolveClaim(ticketCh chan bool) error { logger.Debugf("resolving %s leadership ticket for %s...", t.serviceName, t.unitName) defer close(ticketCh) if leader, err := t.isLeader(); err != nil { @@ -267,11 +266,11 @@ } // resolveWaitLeader will send true on the supplied channel if leadership can be -// guaranteed for the tracker's duration. It will then close the channel. If +// guaranteed for the Tracker's duration. It will then close the channel. If // leadership cannot be guaranteed, the channel is left untouched until either -// the termination of the tracker or the next invocation of setLeader; at which +// the termination of the Tracker or the next invocation of setLeader; at which // point true is sent if applicable, and the channel is closed. -func (t *tracker) resolveWaitLeader(ticketCh chan bool) error { +func (t *Tracker) resolveWaitLeader(ticketCh chan bool) error { var dontClose bool defer func() { if !dontClose { @@ -293,8 +292,8 @@ } // resolveWaitMinion will close the supplied channel as soon as leadership cannot -// be guaranteed beyond the tracker's duration. -func (t *tracker) resolveWaitMinion(ticketCh chan bool) error { +// be guaranteed beyond the Tracker's duration. +func (t *Tracker) resolveWaitMinion(ticketCh chan bool) error { var dontClose bool defer func() { if !dontClose { @@ -315,7 +314,7 @@ } -func (t *tracker) sendTrue(ticketCh chan bool) error { +func (t *Tracker) sendTrue(ticketCh chan bool) error { select { case <-t.tomb.Dying(): return tomb.ErrDying @@ -324,7 +323,7 @@ } } -func (t *tracker) submit(tickets chan chan bool) Ticket { +func (t *Tracker) submit(tickets chan chan bool) leadership.Ticket { ticketCh := make(chan bool, 1) select { case <-t.tomb.Dying(): @@ -339,7 +338,7 @@ return ticket } -// ticket is used with tracker to communicate leadership status back to a client. +// ticket is used by Tracker to communicate leadership status back to a client. type ticket struct { ch chan bool ready chan struct{} @@ -348,19 +347,19 @@ func (t *ticket) run() { defer close(t.ready) - // This is only safe/sane because the tracker promises to close all pending + // This is only safe/sane because the Tracker promises to close all pending // ticket channels when it shuts down. if <-t.ch { t.success = true } } -// Ready is part of the Ticket interface. +// Ready is part of the leadership.Ticket interface. func (t *ticket) Ready() <-chan struct{} { return t.ready } -// Wait is part of the Ticket interface. +// Wait is part of the leadership.Ticket interface. func (t *ticket) Wait() bool { <-t.ready return t.success === modified file 'src/github.com/juju/juju/worker/leadership/tracker_test.go' --- src/github.com/juju/juju/worker/leadership/tracker_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/leadership/tracker_test.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - coreleadership "github.com/juju/juju/leadership" + coreleadership "github.com/juju/juju/core/leadership" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker" "github.com/juju/juju/worker/leadership" @@ -65,13 +65,13 @@ } func (s *TrackerSuite) TestServiceName(c *gc.C) { - tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) + tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) c.Assert(tracker.ServiceName(), gc.Equals, "led-service") } func (s *TrackerSuite) TestOnLeaderSuccess(c *gc.C) { - tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) + tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check the ticket succeeds. @@ -89,7 +89,7 @@ func (s *TrackerSuite) TestOnLeaderFailure(c *gc.C) { s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil) - tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) + tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check the ticket fails. @@ -116,7 +116,7 @@ func (s *TrackerSuite) TestOnLeaderError(c *gc.C) { s.claimer.Stub.SetErrors(errors.New("pow")) - tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) + tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) defer worker.Stop(tracker) // Check the ticket fails. @@ -135,7 +135,7 @@ func (s *TrackerSuite) TestLoseLeadership(c *gc.C) { s.claimer.Stub.SetErrors(nil, coreleadership.ErrClaimDenied, nil) - tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) + tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check the first ticket succeeds. @@ -172,7 +172,7 @@ func (s *TrackerSuite) TestGainLeadership(c *gc.C) { s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil, nil) - tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) + tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check initial ticket fails. @@ -211,7 +211,7 @@ s.claimer.Stub.SetErrors( coreleadership.ErrClaimDenied, nil, coreleadership.ErrClaimDenied, nil, ) - tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) + tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check initial ticket fails. @@ -260,7 +260,7 @@ } func (s *TrackerSuite) TestWaitLeaderAlreadyLeader(c *gc.C) { - tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) + tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check the ticket succeeds. @@ -278,7 +278,7 @@ func (s *TrackerSuite) TestWaitLeaderBecomeLeader(c *gc.C) { s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil, nil) - tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) + tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check initial ticket fails. @@ -315,7 +315,7 @@ func (s *TrackerSuite) TestWaitLeaderNeverBecomeLeader(c *gc.C) { s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil) - tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) + tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check initial ticket fails. @@ -347,7 +347,7 @@ func (s *TrackerSuite) TestWaitMinionAlreadyMinion(c *gc.C) { s.claimer.Stub.SetErrors(coreleadership.ErrClaimDenied, nil) - tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) + tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check initial ticket is closed immediately. @@ -370,7 +370,7 @@ func (s *TrackerSuite) TestWaitMinionBecomeMinion(c *gc.C) { s.claimer.Stub.SetErrors(nil, coreleadership.ErrClaimDenied, nil) - tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) + tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) // Check the first ticket stays open. @@ -406,7 +406,7 @@ } func (s *TrackerSuite) TestWaitMinionNeverBecomeMinion(c *gc.C) { - tracker := leadership.NewTrackerWorker(s.unitTag, s.claimer, trackerDuration) + tracker := leadership.NewTracker(s.unitTag, s.claimer, trackerDuration) defer assertStop(c, tracker) ticket := tracker.WaitMinion() @@ -434,7 +434,7 @@ }}) } -func assertClaimLeader(c *gc.C, tracker leadership.Tracker, expect bool) { +func assertClaimLeader(c *gc.C, tracker *leadership.Tracker, expect bool) { // Grab a ticket... ticket := tracker.ClaimLeader() @@ -443,7 +443,7 @@ assertTicket(c, ticket, expect) } -func assertWaitLeader(c *gc.C, tracker leadership.Tracker, expect bool) { +func assertWaitLeader(c *gc.C, tracker *leadership.Tracker, expect bool) { ticket := tracker.WaitLeader() if expect { assertTicket(c, ticket, true) @@ -459,7 +459,7 @@ } } -func assertWaitMinion(c *gc.C, tracker leadership.Tracker, expect bool) { +func assertWaitMinion(c *gc.C, tracker *leadership.Tracker, expect bool) { ticket := tracker.WaitMinion() if expect { assertTicket(c, ticket, false) @@ -475,7 +475,7 @@ } } -func assertTicket(c *gc.C, ticket leadership.Ticket, expect bool) { +func assertTicket(c *gc.C, ticket coreleadership.Ticket, expect bool) { // Wait for the ticket to give a value... select { case <-time.After(coretesting.LongWait): === modified file 'src/github.com/juju/juju/worker/leadership/util_test.go' --- src/github.com/juju/juju/worker/leadership/util_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/leadership/util_test.go 2016-03-22 15:18:22 +0000 @@ -8,7 +8,7 @@ "github.com/juju/testing" - "github.com/juju/juju/leadership" + "github.com/juju/juju/core/leadership" ) type StubClaimer struct { === added directory 'src/github.com/juju/juju/worker/lease' === added file 'src/github.com/juju/juju/worker/lease/block.go' --- src/github.com/juju/juju/worker/lease/block.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/lease/block.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,46 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package lease + +// block is used to deliver lease-expiry-notification requests to a manager's +// loop goroutine on behalf of BlockUntilLeadershipReleased. +type block struct { + leaseName string + unblock chan struct{} + abort <-chan struct{} +} + +// invoke sends the block request on the supplied channel, and waits for the +// unblock channel to be closed. +func (b block) invoke(ch chan<- block) error { + for { + select { + case <-b.abort: + return errStopped + case ch <- b: + ch = nil + case <-b.unblock: + return nil + } + } +} + +// blocks is used to keep track of expiry-notification channels for +// each lease name. +type blocks map[string][]chan struct{} + +// add records the block's unblock channel under the block's lease name. +func (b blocks) add(block block) { + b[block.leaseName] = append(b[block.leaseName], block.unblock) +} + +// unblock closes all channels added under the supplied name and removes +// them from blocks. +func (b blocks) unblock(leaseName string) { + unblocks := b[leaseName] + delete(b, leaseName) + for _, unblock := range unblocks { + close(unblock) + } +} === added file 'src/github.com/juju/juju/worker/lease/check.go' --- src/github.com/juju/juju/worker/lease/check.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/lease/check.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,76 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package lease + +import ( + "github.com/juju/errors" +) + +// token implements lease.Token. +type token struct { + leaseName string + holderName string + secretary Secretary + checks chan<- check + abort <-chan struct{} +} + +// Check is part of the lease.Token interface. +func (t token) Check(trapdoorKey interface{}) error { + + // This validation, which could be done at Token creation time, is deferred + // until this point for historical reasons. In particular, this code was + // extracted from a *leadership* implementation which has a LeadershipCheck + // method returning a token; if it returned an error as well it would seem + // to imply that the method implemented a check itself, rather than a check + // factory. + // + // Fixing that would be great but seems out of scope. + if err := t.secretary.CheckLease(t.leaseName); err != nil { + return errors.Annotatef(err, "cannot check lease %q", t.leaseName) + } + if err := t.secretary.CheckHolder(t.holderName); err != nil { + return errors.Annotatef(err, "cannot check holder %q", t.holderName) + } + return check{ + leaseName: t.leaseName, + holderName: t.holderName, + trapdoorKey: trapdoorKey, + response: make(chan error), + abort: t.abort, + }.invoke(t.checks) +} + +// check is used to deliver lease-check requests to a manager's loop +// goroutine on behalf of a token (as returned by LeadershipCheck). +type check struct { + leaseName string + holderName string + trapdoorKey interface{} + response chan error + abort <-chan struct{} +} + +// invoke sends the check on the supplied channel and waits for an error +// response. +func (c check) invoke(ch chan<- check) error { + for { + select { + case <-c.abort: + return errStopped + case ch <- c: + ch = nil + case err := <-c.response: + return errors.Trace(err) + } + } +} + +// respond notifies the originating invoke of completion status. +func (c check) respond(err error) { + select { + case <-c.abort: + case c.response <- err: + } +} === added file 'src/github.com/juju/juju/worker/lease/claim.go' --- src/github.com/juju/juju/worker/lease/claim.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/lease/claim.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,45 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package lease + +import ( + "time" + + "github.com/juju/juju/core/lease" +) + +// claim is used to deliver lease-claim requests to a manager's loop +// goroutine on behalf of ClaimLeadership. +type claim struct { + leaseName string + holderName string + duration time.Duration + response chan bool + abort <-chan struct{} +} + +// invoke sends the claim on the supplied channel and waits for a response. +func (c claim) invoke(ch chan<- claim) error { + for { + select { + case <-c.abort: + return errStopped + case ch <- c: + ch = nil + case success := <-c.response: + if !success { + return lease.ErrClaimDenied + } + return nil + } + } +} + +// respond causes the supplied success value to be sent back to invoke. +func (c claim) respond(success bool) { + select { + case <-c.abort: + case c.response <- success: + } +} === added file 'src/github.com/juju/juju/worker/lease/config.go' --- src/github.com/juju/juju/worker/lease/config.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/lease/config.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,63 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package lease + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/utils/clock" + + "github.com/juju/juju/core/lease" +) + +// Secretary is reponsible for validating the sanity of lease and holder names +// before bothering the manager with them. +type Secretary interface { + + // CheckLease returns an error if the supplied lease name is not valid. + CheckLease(name string) error + + // CheckHolder returns an error if the supplied holder name is not valid. + CheckHolder(name string) error + + // CheckDuration returns an error if the supplied duration is not valid. + CheckDuration(duration time.Duration) error +} + +// ManagerConfig contains the resources and information required to create a +// Manager. +type ManagerConfig struct { + + // Secretary is responsible for validating lease names and holder names. + Secretary Secretary + + // Client is responsible for recording, retrieving, and expiring leases. + Client lease.Client + + // Clock is reponsible for reporting the passage of time. + Clock clock.Clock + + // MaxSleep is the longest time the Manager should sleep before + // refreshing its client's leases and checking for expiries. + MaxSleep time.Duration +} + +// Validate returns an error if the configuration contains invalid information +// or missing resources. +func (config ManagerConfig) Validate() error { + if config.Secretary == nil { + return errors.NotValidf("nil Secretary") + } + if config.Client == nil { + return errors.NotValidf("nil Client") + } + if config.Clock == nil { + return errors.NotValidf("nil Clock") + } + if config.MaxSleep <= 0 { + return errors.NotValidf("non-positive MaxSleep") + } + return nil +} === added file 'src/github.com/juju/juju/worker/lease/fixture_test.go' --- src/github.com/juju/juju/worker/lease/fixture_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/lease/fixture_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,108 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package lease_test + +import ( + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + corelease "github.com/juju/juju/core/lease" + "github.com/juju/juju/testing" + "github.com/juju/juju/worker/lease" +) + +const ( + defaultMaxSleep = time.Hour + almostOneSecond = time.Second - time.Nanosecond +) + +var ( + defaultClockStart time.Time +) + +func init() { + // We pick a time with a comfortable h:m:s component but: + // (1) past the int32 unix epoch limit; + // (2) at a 5ns offset to make sure we're not discarding precision; + // (3) in a weird time zone. + value := "2073-03-03T01:00:00.000000005-08:40" + var err error + defaultClockStart, err = time.Parse(time.RFC3339Nano, value) + if err != nil { + panic(err) + } +} + +// offset returns the result of defaultClockStart.Add(d); it exists to make +// expiry tests easier to write. +func offset(d time.Duration) time.Time { + return defaultClockStart.Add(d) +} + +// almostSeconds returns a duration smaller than the supplied number of +// seconds by one nanosecond. +func almostSeconds(seconds int) time.Duration { + if seconds < 1 { + panic("unexpected") + } + return (time.Second * time.Duration(seconds)) - time.Nanosecond +} + +// Fixture allows us to test a *lease.Manager with a usefully-mocked +// clock.Clock and corelease.Client. +type Fixture struct { + + // leases contains the leases the corelease.Client should report when the + // test starts up. + leases map[string]corelease.Info + + // expectCalls contains the calls that should be made to the corelease.Client + // in the course of a test. By specifying a callback you can cause the + // reported leases to change. + expectCalls []call + + // expectDirty should be set for tests that purposefully abuse the manager + // to the extent that it returns an error on Wait(); tests that don't set + // this flag will check that the manager's shutdown error is nil. + expectDirty bool +} + +// RunTest sets up a Manager and a Clock and passes them into the supplied +// test function. The manager will be cleaned up afterwards. +func (fix *Fixture) RunTest(c *gc.C, test func(*lease.Manager, *testing.Clock)) { + clock := testing.NewClock(defaultClockStart) + client := NewClient(fix.leases, fix.expectCalls) + manager, err := lease.NewManager(lease.ManagerConfig{ + Clock: clock, + Client: client, + Secretary: Secretary{}, + MaxSleep: defaultMaxSleep, + }) + c.Assert(err, jc.ErrorIsNil) + defer func() { + // Dirty tests will probably have stopped the manager anyway, but no + // sense leaving them around if things aren't exactly as we expect. + manager.Kill() + err := manager.Wait() + if !fix.expectDirty { + c.Check(err, jc.ErrorIsNil) + } + }() + defer client.Wait(c) + waitAlarms(c, clock, 1) + test(manager, clock) +} + +func waitAlarms(c *gc.C, clock *testing.Clock, count int) { + timeout := time.After(testing.LongWait) + for i := 0; i < count; i++ { + select { + case <-clock.Alarms(): + case <-timeout: + c.Fatalf("timed out waiting for %dth alarm set", i) + } + } +} === added file 'src/github.com/juju/juju/worker/lease/manager.go' --- src/github.com/juju/juju/worker/lease/manager.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/lease/manager.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,257 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package lease + +import ( + "sort" + "time" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/utils/clock" + + "github.com/juju/juju/core/lease" + "github.com/juju/juju/worker/catacomb" +) + +var logger = loggo.GetLogger("juju.worker.lease") + +// errStopped is returned to clients when an operation cannot complete because +// the manager has started (and possibly finished) shutdown. +var errStopped = errors.New("lease manager stopped") + +// NewManager returns a new *Manager configured as supplied. The caller takes +// responsibility for killing, and handling errors from, the returned Worker. +func NewManager(config ManagerConfig) (*Manager, error) { + if err := config.Validate(); err != nil { + return nil, errors.Trace(err) + } + manager := &Manager{ + config: config, + claims: make(chan claim), + checks: make(chan check), + blocks: make(chan block), + } + err := catacomb.Invoke(catacomb.Plan{ + Site: &manager.catacomb, + Work: manager.loop, + }) + if err != nil { + return nil, errors.Trace(err) + } + return manager, nil +} + +// Manager implements lease.Claimer, lease.Checker, and worker.Worker. +type Manager struct { + catacomb catacomb.Catacomb + + // config collects all external configuration and dependencies. + config ManagerConfig + + // claims is used to deliver lease claim requests to the loop. + claims chan claim + + // checks is used to deliver lease check requests to the loop. + checks chan check + + // blocks is used to deliver expiry block requests to the loop. + blocks chan block +} + +// Kill is part of the worker.Worker interface. +func (manager *Manager) Kill() { + manager.catacomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (manager *Manager) Wait() error { + return manager.catacomb.Wait() +} + +// loop runs until the manager is stopped. +func (manager *Manager) loop() error { + blocks := make(blocks) + for { + if err := manager.choose(blocks); err != nil { + return errors.Trace(err) + } + + leases := manager.config.Client.Leases() + for leaseName := range blocks { + if _, found := leases[leaseName]; !found { + blocks.unblock(leaseName) + } + } + } +} + +// choose breaks the select out of loop to make the blocking logic clearer. +func (manager *Manager) choose(blocks blocks) error { + select { + case <-manager.catacomb.Dying(): + return manager.catacomb.ErrDying() + case <-manager.nextTick(): + return manager.tick() + case claim := <-manager.claims: + return manager.handleClaim(claim) + case check := <-manager.checks: + return manager.handleCheck(check) + case block := <-manager.blocks: + blocks.add(block) + return nil + } +} + +// Claim is part of the lease.Claimer interface. +func (manager *Manager) Claim(leaseName, holderName string, duration time.Duration) error { + if err := manager.config.Secretary.CheckLease(leaseName); err != nil { + return errors.Annotatef(err, "cannot claim lease %q", leaseName) + } + if err := manager.config.Secretary.CheckHolder(holderName); err != nil { + return errors.Annotatef(err, "cannot claim lease for holder %q", holderName) + } + if err := manager.config.Secretary.CheckDuration(duration); err != nil { + return errors.Annotatef(err, "cannot claim lease for %s", duration) + } + return claim{ + leaseName: leaseName, + holderName: holderName, + duration: duration, + response: make(chan bool), + abort: manager.catacomb.Dying(), + }.invoke(manager.claims) +} + +// handleClaim processes and responds to the supplied claim. It will only return +// unrecoverable errors; mere failure to claim just indicates a bad request, and +// is communicated back to the claim's originator. +func (manager *Manager) handleClaim(claim claim) error { + client := manager.config.Client + request := lease.Request{claim.holderName, claim.duration} + err := lease.ErrInvalid + for err == lease.ErrInvalid { + select { + case <-manager.catacomb.Dying(): + return manager.catacomb.ErrDying() + default: + info, found := client.Leases()[claim.leaseName] + switch { + case !found: + err = client.ClaimLease(claim.leaseName, request) + case info.Holder == claim.holderName: + err = client.ExtendLease(claim.leaseName, request) + default: + claim.respond(false) + return nil + } + } + } + if err != nil { + return errors.Trace(err) + } + claim.respond(true) + return nil +} + +// Token is part of the lease.Checker interface. +func (manager *Manager) Token(leaseName, holderName string) lease.Token { + return token{ + leaseName: leaseName, + holderName: holderName, + secretary: manager.config.Secretary, + checks: manager.checks, + abort: manager.catacomb.Dying(), + } +} + +// handleCheck processes and responds to the supplied check. It will only return +// unrecoverable errors; mere untruth of the assertion just indicates a bad +// request, and is communicated back to the check's originator. +func (manager *Manager) handleCheck(check check) error { + client := manager.config.Client + info, found := client.Leases()[check.leaseName] + if !found || info.Holder != check.holderName { + if err := client.Refresh(); err != nil { + return errors.Trace(err) + } + info, found = client.Leases()[check.leaseName] + } + + var response error + if !found || info.Holder != check.holderName { + response = lease.ErrNotHeld + } else if check.trapdoorKey != nil { + response = info.Trapdoor(check.trapdoorKey) + } + check.respond(errors.Trace(response)) + return nil +} + +// WaitUntilExpired is part of the lease.Claimer interface. +func (manager *Manager) WaitUntilExpired(leaseName string) error { + if err := manager.config.Secretary.CheckLease(leaseName); err != nil { + return errors.Annotatef(err, "cannot wait for lease %q expiry", leaseName) + } + return block{ + leaseName: leaseName, + unblock: make(chan struct{}), + abort: manager.catacomb.Dying(), + }.invoke(manager.blocks) +} + +// nextTick returns a channel that will send a value at some point when +// we expect to have to do some work; either because at least one lease +// may be ready to expire, or because enough enough time has passed that +// it's worth checking for stalled collaborators. +func (manager *Manager) nextTick() <-chan time.Time { + now := manager.config.Clock.Now() + nextTick := now.Add(manager.config.MaxSleep) + for _, info := range manager.config.Client.Leases() { + if info.Expiry.After(nextTick) { + continue + } + nextTick = info.Expiry + } + logger.Debugf("waking to check leases at %s", nextTick) + return clock.Alarm(manager.config.Clock, nextTick) +} + +// tick snapshots recent leases and expires any that it can. There +// might be none that need attention; or those that do might already +// have been extended or expired by someone else; so ErrInvalid is +// expected, and ignored, comfortable that the client will have been +// updated in the background; and that we'll see fresh info when we +// subsequently check nextWake(). +// +// It will return only unrecoverable errors. +func (manager *Manager) tick() error { + logger.Tracef("refreshing leases...") + client := manager.config.Client + if err := client.Refresh(); err != nil { + return errors.Trace(err) + } + leases := client.Leases() + + // Sort lease names so we expire in a predictable order for the tests. + names := make([]string, 0, len(leases)) + for name := range leases { + names = append(names, name) + } + sort.Strings(names) + + logger.Tracef("expiring leases...") + now := manager.config.Clock.Now() + for _, name := range names { + if leases[name].Expiry.After(now) { + continue + } + switch err := client.ExpireLease(name); err { + case nil, lease.ErrInvalid: + default: + return errors.Trace(err) + } + } + return nil +} === added file 'src/github.com/juju/juju/worker/lease/manager_block_test.go' --- src/github.com/juju/juju/worker/lease/manager_block_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/lease/manager_block_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,236 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package lease_test + +import ( + "time" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + corelease "github.com/juju/juju/core/lease" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/lease" +) + +type WaitUntilExpiredSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&WaitUntilExpiredSuite{}) + +func (s *WaitUntilExpiredSuite) TestLeadershipNotHeld(c *gc.C) { + fix := &Fixture{} + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + blockTest := newBlockTest(manager, "redis") + err := blockTest.assertUnblocked(c) + c.Check(err, jc.ErrorIsNil) + }) +} + +func (s *WaitUntilExpiredSuite) TestLeadershipExpires(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + }, + }, + expectCalls: []call{{ + method: "Refresh", + }, { + method: "ExpireLease", + args: []interface{}{"redis"}, + callback: func(leases map[string]corelease.Info) { + delete(leases, "redis") + }, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + blockTest := newBlockTest(manager, "redis") + blockTest.assertBlocked(c) + + // Trigger expiry. + clock.Advance(time.Second) + err := blockTest.assertUnblocked(c) + c.Check(err, jc.ErrorIsNil) + }) +} + +func (s *WaitUntilExpiredSuite) TestLeadershipChanged(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + }, + }, + expectCalls: []call{{ + method: "Refresh", + }, { + method: "ExpireLease", + args: []interface{}{"redis"}, + err: corelease.ErrInvalid, + callback: func(leases map[string]corelease.Info) { + leases["redis"] = corelease.Info{ + Holder: "redis/99", + Expiry: offset(time.Minute), + } + }, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + blockTest := newBlockTest(manager, "redis") + blockTest.assertBlocked(c) + + // Trigger abortive expiry. + clock.Advance(time.Second) + blockTest.assertBlocked(c) + }) +} + +func (s *WaitUntilExpiredSuite) TestLeadershipExpiredEarly(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + }, + }, + expectCalls: []call{{ + method: "Refresh", + callback: func(leases map[string]corelease.Info) { + delete(leases, "redis") + }, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + blockTest := newBlockTest(manager, "redis") + blockTest.assertBlocked(c) + + // Induce a refresh by making an unexpected check; it turns out the + // lease had already been expired by someone else. + manager.Token("redis", "redis/99").Check(nil) + err := blockTest.assertUnblocked(c) + c.Check(err, jc.ErrorIsNil) + }) +} + +func (s *WaitUntilExpiredSuite) TestMultiple(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + }, + "store": corelease.Info{ + Holder: "store/0", + Expiry: offset(time.Second), + }, + }, + expectCalls: []call{{ + method: "Refresh", + }, { + method: "ExpireLease", + args: []interface{}{"redis"}, + err: corelease.ErrInvalid, + callback: func(leases map[string]corelease.Info) { + delete(leases, "redis") + leases["store"] = corelease.Info{ + Holder: "store/9", + Expiry: offset(time.Minute), + } + }, + }, { + method: "ExpireLease", + args: []interface{}{"store"}, + err: corelease.ErrInvalid, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + redisTest1 := newBlockTest(manager, "redis") + redisTest1.assertBlocked(c) + redisTest2 := newBlockTest(manager, "redis") + redisTest2.assertBlocked(c) + storeTest1 := newBlockTest(manager, "store") + storeTest1.assertBlocked(c) + storeTest2 := newBlockTest(manager, "store") + storeTest2.assertBlocked(c) + + // Induce attempted expiry; redis was expired already, store was + // refreshed and not expired. + clock.Advance(time.Second) + err := redisTest2.assertUnblocked(c) + c.Check(err, jc.ErrorIsNil) + err = redisTest1.assertUnblocked(c) + c.Check(err, jc.ErrorIsNil) + storeTest2.assertBlocked(c) + storeTest1.assertBlocked(c) + }) +} + +func (s *WaitUntilExpiredSuite) TestKillManager(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + }, + }, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + blockTest := newBlockTest(manager, "redis") + blockTest.assertBlocked(c) + + manager.Kill() + err := blockTest.assertUnblocked(c) + c.Check(err, gc.ErrorMatches, "lease manager stopped") + }) +} + +// blockTest wraps a goroutine running WaitUntilExpired, and fails if it's used +// more than a second after creation (which should be *plenty* of time). +type blockTest struct { + manager *lease.Manager + leaseName string + done chan error + abort <-chan time.Time +} + +// newBlockTest starts a test goroutine blocking until the manager confirms +// expiry of the named lease. +func newBlockTest(manager *lease.Manager, leaseName string) *blockTest { + bt := &blockTest{ + manager: manager, + leaseName: leaseName, + done: make(chan error), + abort: time.After(time.Second), + } + go func() { + select { + case <-bt.abort: + case bt.done <- bt.manager.WaitUntilExpired(bt.leaseName): + } + }() + return bt +} + +func (bt *blockTest) assertBlocked(c *gc.C) { + select { + case err := <-bt.done: + c.Fatalf("unblocked unexpectedly with %v", err) + default: + } +} + +func (bt *blockTest) assertUnblocked(c *gc.C) error { + select { + case err := <-bt.done: + return err + case <-bt.abort: + c.Fatalf("timed out before unblocking") + } + panic("unreachable") +} === added file 'src/github.com/juju/juju/worker/lease/manager_check_test.go' --- src/github.com/juju/juju/worker/lease/manager_check_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/lease/manager_check_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,129 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package lease_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + corelease "github.com/juju/juju/core/lease" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/lease" +) + +type TokenSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&TokenSuite{}) + +func (s *TokenSuite) TestSuccess(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + Trapdoor: corelease.LockedTrapdoor, + }, + }, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + token := manager.Token("redis", "redis/0") + err := token.Check(nil) + c.Check(err, jc.ErrorIsNil) + }) +} + +func (s *TokenSuite) TestMissingRefresh_Success(c *gc.C) { + fix := &Fixture{ + expectCalls: []call{{ + method: "Refresh", + callback: func(leases map[string]corelease.Info) { + leases["redis"] = corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + Trapdoor: corelease.LockedTrapdoor, + } + }, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + token := manager.Token("redis", "redis/0") + err := token.Check(nil) + c.Check(err, jc.ErrorIsNil) + }) +} + +func (s *TokenSuite) TestOtherHolderRefresh_Success(c *gc.C) { + fix := &Fixture{ + expectCalls: []call{{ + method: "Refresh", + callback: func(leases map[string]corelease.Info) { + leases["redis"] = corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + Trapdoor: corelease.LockedTrapdoor, + } + }, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + token := manager.Token("redis", "redis/0") + err := token.Check(nil) + c.Check(err, jc.ErrorIsNil) + }) +} + +func (s *TokenSuite) TestRefresh_Failure_Missing(c *gc.C) { + fix := &Fixture{ + expectCalls: []call{{ + method: "Refresh", + }}, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + token := manager.Token("redis", "redis/0") + err := token.Check(nil) + c.Check(errors.Cause(err), gc.Equals, corelease.ErrNotHeld) + }) +} + +func (s *TokenSuite) TestRefresh_Failure_OtherHolder(c *gc.C) { + fix := &Fixture{ + expectCalls: []call{{ + method: "Refresh", + callback: func(leases map[string]corelease.Info) { + leases["redis"] = corelease.Info{ + Holder: "redis/1", + Expiry: offset(time.Second), + Trapdoor: corelease.LockedTrapdoor, + } + }, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + token := manager.Token("redis", "redis/0") + err := token.Check(nil) + c.Check(errors.Cause(err), gc.Equals, corelease.ErrNotHeld) + }) +} + +func (s *TokenSuite) TestRefresh_Error(c *gc.C) { + fix := &Fixture{ + expectCalls: []call{{ + method: "Refresh", + err: errors.New("crunch squish"), + }}, + expectDirty: true, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + token := manager.Token("redis", "redis/0") + c.Check(token.Check(nil), gc.ErrorMatches, "lease manager stopped") + err := manager.Wait() + c.Check(err, gc.ErrorMatches, "crunch squish") + }) +} === added file 'src/github.com/juju/juju/worker/lease/manager_claim_test.go' --- src/github.com/juju/juju/worker/lease/manager_claim_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/lease/manager_claim_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,205 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package lease_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + corelease "github.com/juju/juju/core/lease" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/lease" +) + +type ClaimSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&ClaimSuite{}) + +func (s *ClaimSuite) TestClaimLease_Success(c *gc.C) { + fix := &Fixture{ + expectCalls: []call{{ + method: "ClaimLease", + args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + err := manager.Claim("redis", "redis/0", time.Minute) + c.Check(err, jc.ErrorIsNil) + }) +} + +func (s *ClaimSuite) TestClaimLease_Success_SameHolder(c *gc.C) { + fix := &Fixture{ + expectCalls: []call{{ + method: "ClaimLease", + args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, + err: corelease.ErrInvalid, + callback: func(leases map[string]corelease.Info) { + leases["redis"] = corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + } + }, + }, { + method: "ExtendLease", + args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + err := manager.Claim("redis", "redis/0", time.Minute) + c.Check(err, jc.ErrorIsNil) + }) +} + +func (s *ClaimSuite) TestClaimLease_Failure_OtherHolder(c *gc.C) { + fix := &Fixture{ + expectCalls: []call{{ + method: "ClaimLease", + args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, + err: corelease.ErrInvalid, + callback: func(leases map[string]corelease.Info) { + leases["redis"] = corelease.Info{ + Holder: "redis/1", + Expiry: offset(time.Second), + } + }, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + err := manager.Claim("redis", "redis/0", time.Minute) + c.Check(err, gc.Equals, corelease.ErrClaimDenied) + }) +} + +func (s *ClaimSuite) TestClaimLease_Failure_Error(c *gc.C) { + fix := &Fixture{ + expectCalls: []call{{ + method: "ClaimLease", + args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, + err: errors.New("lol borken"), + }}, + expectDirty: true, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + err := manager.Claim("redis", "redis/0", time.Minute) + c.Check(err, gc.ErrorMatches, "lease manager stopped") + err = manager.Wait() + c.Check(err, gc.ErrorMatches, "lol borken") + }) +} + +func (s *ClaimSuite) TestExtendLease_Success(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + }, + }, + expectCalls: []call{{ + method: "ExtendLease", + args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + err := manager.Claim("redis", "redis/0", time.Minute) + c.Check(err, jc.ErrorIsNil) + }) +} + +func (s *ClaimSuite) TestExtendLease_Success_Expired(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + }, + }, + expectCalls: []call{{ + method: "ExtendLease", + args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, + err: corelease.ErrInvalid, + callback: func(leases map[string]corelease.Info) { + delete(leases, "redis") + }, + }, { + method: "ClaimLease", + args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + err := manager.Claim("redis", "redis/0", time.Minute) + c.Check(err, jc.ErrorIsNil) + }) +} + +func (s *ClaimSuite) TestExtendLease_Failure_OtherHolder(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + }, + }, + expectCalls: []call{{ + method: "ExtendLease", + args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, + err: corelease.ErrInvalid, + callback: func(leases map[string]corelease.Info) { + leases["redis"] = corelease.Info{ + Holder: "redis/1", + Expiry: offset(time.Second), + } + }, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + err := manager.Claim("redis", "redis/0", time.Minute) + c.Check(err, gc.Equals, corelease.ErrClaimDenied) + }) +} + +func (s *ClaimSuite) TestExtendLease_Failure_Error(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + }, + }, + expectCalls: []call{{ + method: "ExtendLease", + args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, + err: errors.New("boom splat"), + }}, + expectDirty: true, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + err := manager.Claim("redis", "redis/0", time.Minute) + c.Check(err, gc.ErrorMatches, "lease manager stopped") + err = manager.Wait() + c.Check(err, gc.ErrorMatches, "boom splat") + }) +} + +func (s *ClaimSuite) TestOtherHolder_Failure(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{ + Holder: "redis/1", + Expiry: offset(time.Second), + }, + }, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + err := manager.Claim("redis", "redis/0", time.Minute) + c.Check(err, gc.Equals, corelease.ErrClaimDenied) + }) +} === added file 'src/github.com/juju/juju/worker/lease/manager_expire_test.go' --- src/github.com/juju/juju/worker/lease/manager_expire_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/lease/manager_expire_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,332 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package lease_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + corelease "github.com/juju/juju/core/lease" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/lease" +) + +type ExpireSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&ExpireSuite{}) + +func (s *ExpireSuite) TestStartup_ExpiryInPast(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{Expiry: offset(-time.Second)}, + }, + expectCalls: []call{{ + method: "Refresh", + }, { + method: "ExpireLease", + args: []interface{}{"redis"}, + callback: func(leases map[string]corelease.Info) { + delete(leases, "redis") + }, + }}, + } + fix.RunTest(c, func(_ *lease.Manager, _ *coretesting.Clock) {}) +} + +func (s *ExpireSuite) TestStartup_ExpiryInFuture(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{Expiry: offset(time.Second)}, + }, + } + fix.RunTest(c, func(_ *lease.Manager, clock *coretesting.Clock) { + clock.Advance(almostSeconds(1)) + }) +} + +func (s *ExpireSuite) TestStartup_ExpiryInFuture_TimePasses(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{Expiry: offset(time.Second)}, + }, + expectCalls: []call{{ + method: "Refresh", + }, { + method: "ExpireLease", + args: []interface{}{"redis"}, + callback: func(leases map[string]corelease.Info) { + delete(leases, "redis") + }, + }}, + } + fix.RunTest(c, func(_ *lease.Manager, clock *coretesting.Clock) { + clock.Advance(time.Second) + }) +} + +func (s *ExpireSuite) TestStartup_NoExpiry_NotLongEnough(c *gc.C) { + fix := &Fixture{} + fix.RunTest(c, func(_ *lease.Manager, clock *coretesting.Clock) { + clock.Advance(almostSeconds(3600)) + }) +} + +func (s *ExpireSuite) TestStartup_NoExpiry_LongEnough(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "goose": corelease.Info{Expiry: offset(3 * time.Hour)}, + }, + expectCalls: []call{{ + method: "Refresh", + callback: func(leases map[string]corelease.Info) { + leases["redis"] = corelease.Info{ + Expiry: offset(time.Minute), + } + }, + }, { + method: "ExpireLease", + args: []interface{}{"redis"}, + callback: func(leases map[string]corelease.Info) { + delete(leases, "redis") + }, + }}, + } + fix.RunTest(c, func(_ *lease.Manager, clock *coretesting.Clock) { + clock.Advance(time.Hour) + }) +} + +func (s *ExpireSuite) TestExpire_ErrInvalid_Expired(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{Expiry: offset(time.Second)}, + }, + expectCalls: []call{{ + method: "Refresh", + }, { + method: "ExpireLease", + args: []interface{}{"redis"}, + err: corelease.ErrInvalid, + callback: func(leases map[string]corelease.Info) { + delete(leases, "redis") + }, + }}, + } + fix.RunTest(c, func(_ *lease.Manager, clock *coretesting.Clock) { + clock.Advance(time.Second) + }) +} + +func (s *ExpireSuite) TestExpire_ErrInvalid_Updated(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{Expiry: offset(time.Second)}, + }, + expectCalls: []call{{ + method: "Refresh", + }, { + method: "ExpireLease", + args: []interface{}{"redis"}, + err: corelease.ErrInvalid, + callback: func(leases map[string]corelease.Info) { + leases["redis"] = corelease.Info{Expiry: offset(time.Minute)} + }, + }}, + } + fix.RunTest(c, func(_ *lease.Manager, clock *coretesting.Clock) { + clock.Advance(time.Second) + }) +} + +func (s *ExpireSuite) TestExpire_OtherError(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{Expiry: offset(time.Second)}, + }, + expectCalls: []call{{ + method: "Refresh", + }, { + method: "ExpireLease", + args: []interface{}{"redis"}, + err: errors.New("snarfblat hobalob"), + }}, + expectDirty: true, + } + fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + clock.Advance(time.Second) + err := manager.Wait() + c.Check(err, gc.ErrorMatches, "snarfblat hobalob") + }) +} + +func (s *ExpireSuite) TestClaim_ExpiryInFuture(c *gc.C) { + fix := &Fixture{ + expectCalls: []call{{ + method: "ClaimLease", + args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, + callback: func(leases map[string]corelease.Info) { + leases["redis"] = corelease.Info{ + Holder: "redis/0", + Expiry: offset(63 * time.Second), + } + }, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + // Ask for a minute, actually get 63s. Don't expire early. + err := manager.Claim("redis", "redis/0", time.Minute) + c.Assert(err, jc.ErrorIsNil) + clock.Advance(almostSeconds(63)) + }) +} + +func (s *ExpireSuite) TestClaim_ExpiryInFuture_TimePasses(c *gc.C) { + fix := &Fixture{ + expectCalls: []call{{ + method: "ClaimLease", + args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, + callback: func(leases map[string]corelease.Info) { + leases["redis"] = corelease.Info{ + Holder: "redis/0", + Expiry: offset(63 * time.Second), + } + }, + }, { + method: "Refresh", + }, { + method: "ExpireLease", + args: []interface{}{"redis"}, + callback: func(leases map[string]corelease.Info) { + delete(leases, "redis") + }, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + // Ask for a minute, actually get 63s. Expire on time. + err := manager.Claim("redis", "redis/0", time.Minute) + c.Assert(err, jc.ErrorIsNil) + clock.Advance(63 * time.Second) + }) +} + +func (s *ExpireSuite) TestExtend_ExpiryInFuture(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + }, + }, + expectCalls: []call{{ + method: "ExtendLease", + args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, + callback: func(leases map[string]corelease.Info) { + leases["redis"] = corelease.Info{ + Holder: "redis/0", + Expiry: offset(63 * time.Second), + } + }, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + // Ask for a minute, actually get 63s. Don't expire early. + err := manager.Claim("redis", "redis/0", time.Minute) + c.Assert(err, jc.ErrorIsNil) + clock.Advance(almostSeconds(63)) + }) +} + +func (s *ExpireSuite) TestExtend_ExpiryInFuture_TimePasses(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + }, + }, + expectCalls: []call{{ + method: "ExtendLease", + args: []interface{}{"redis", corelease.Request{"redis/0", time.Minute}}, + callback: func(leases map[string]corelease.Info) { + leases["redis"] = corelease.Info{ + Holder: "redis/0", + Expiry: offset(63 * time.Second), + } + }, + }, { + method: "Refresh", + }, { + method: "ExpireLease", + args: []interface{}{"redis"}, + callback: func(leases map[string]corelease.Info) { + delete(leases, "redis") + }, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + // Ask for a minute, actually get 63s. Expire on time. + err := manager.Claim("redis", "redis/0", time.Minute) + c.Assert(err, jc.ErrorIsNil) + clock.Advance(63 * time.Second) + }) +} + +func (s *ExpireSuite) TestExpire_Multiple(c *gc.C) { + fix := &Fixture{ + leases: map[string]corelease.Info{ + "redis": corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + }, + "store": corelease.Info{ + Holder: "store/3", + Expiry: offset(5 * time.Second), + }, + "tokumx": corelease.Info{ + Holder: "tokumx/5", + Expiry: offset(10 * time.Second), // will not expire. + }, + "ultron": corelease.Info{ + Holder: "ultron/7", + Expiry: offset(5 * time.Second), + }, + "vvvvvv": corelease.Info{ + Holder: "vvvvvv/2", + Expiry: offset(time.Second), // would expire, but errors first. + }, + }, + expectCalls: []call{{ + method: "Refresh", + }, { + method: "ExpireLease", + args: []interface{}{"redis"}, + callback: func(leases map[string]corelease.Info) { + delete(leases, "redis") + }, + }, { + method: "ExpireLease", + args: []interface{}{"store"}, + err: corelease.ErrInvalid, + callback: func(leases map[string]corelease.Info) { + delete(leases, "store") + }, + }, { + method: "ExpireLease", + args: []interface{}{"ultron"}, + err: errors.New("what is this?"), + }}, + expectDirty: true, + } + fix.RunTest(c, func(manager *lease.Manager, clock *coretesting.Clock) { + clock.Advance(5 * time.Second) + err := manager.Wait() + c.Check(err, gc.ErrorMatches, "what is this\\?") + }) +} === added file 'src/github.com/juju/juju/worker/lease/manager_validation_test.go' --- src/github.com/juju/juju/worker/lease/manager_validation_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/lease/manager_validation_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,162 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package lease_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/clock" + gc "gopkg.in/check.v1" + + corelease "github.com/juju/juju/core/lease" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/lease" +) + +type ValidationSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&ValidationSuite{}) + +func (s *ValidationSuite) TestMissingClient(c *gc.C) { + manager, err := lease.NewManager(lease.ManagerConfig{ + Clock: struct{ clock.Clock }{}, + Secretary: struct{ lease.Secretary }{}, + MaxSleep: time.Minute, + }) + c.Check(err, gc.ErrorMatches, "nil Client not valid") + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(manager, gc.IsNil) +} + +func (s *ValidationSuite) TestMissingClock(c *gc.C) { + manager, err := lease.NewManager(lease.ManagerConfig{ + Client: struct{ corelease.Client }{}, + Secretary: struct{ lease.Secretary }{}, + MaxSleep: time.Minute, + }) + c.Check(err, gc.ErrorMatches, "nil Clock not valid") + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(manager, gc.IsNil) +} + +func (s *ValidationSuite) TestMissingSecretary(c *gc.C) { + manager, err := lease.NewManager(lease.ManagerConfig{ + Client: struct{ corelease.Client }{}, + Clock: struct{ clock.Clock }{}, + }) + c.Check(err, gc.ErrorMatches, "nil Secretary not valid") + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(manager, gc.IsNil) +} + +func (s *ValidationSuite) TestMissingMaxSleep(c *gc.C) { + manager, err := lease.NewManager(lease.ManagerConfig{ + Client: NewClient(nil, nil), + Secretary: struct{ lease.Secretary }{}, + Clock: coretesting.NewClock(time.Now()), + }) + c.Check(err, gc.ErrorMatches, "non-positive MaxSleep not valid") + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(manager, gc.IsNil) +} + +func (s *ValidationSuite) TestNegativeMaxSleep(c *gc.C) { + manager, err := lease.NewManager(lease.ManagerConfig{ + Client: NewClient(nil, nil), + Clock: coretesting.NewClock(time.Now()), + Secretary: struct{ lease.Secretary }{}, + MaxSleep: -time.Nanosecond, + }) + c.Check(err, gc.ErrorMatches, "non-positive MaxSleep not valid") + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(manager, gc.IsNil) +} + +func (s *ValidationSuite) TestClaim_LeaseName(c *gc.C) { + fix := &Fixture{} + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + err := manager.Claim("INVALID", "bar/0", time.Minute) + c.Check(err, gc.ErrorMatches, `cannot claim lease "INVALID": name not valid`) + c.Check(err, jc.Satisfies, errors.IsNotValid) + }) +} + +func (s *ValidationSuite) TestClaim_HolderName(c *gc.C) { + fix := &Fixture{} + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + err := manager.Claim("foo", "INVALID", time.Minute) + c.Check(err, gc.ErrorMatches, `cannot claim lease for holder "INVALID": name not valid`) + c.Check(err, jc.Satisfies, errors.IsNotValid) + }) +} + +func (s *ValidationSuite) TestClaim_Duration(c *gc.C) { + fix := &Fixture{} + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + err := manager.Claim("foo", "bar/0", time.Second) + c.Check(err, gc.ErrorMatches, `cannot claim lease for 1s: time not valid`) + c.Check(err, jc.Satisfies, errors.IsNotValid) + }) +} + +func (s *ValidationSuite) TestToken_LeaseName(c *gc.C) { + fix := &Fixture{} + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + token := manager.Token("INVALID", "bar/0") + err := token.Check(nil) + c.Check(err, gc.ErrorMatches, `cannot check lease "INVALID": name not valid`) + c.Check(err, jc.Satisfies, errors.IsNotValid) + }) +} + +func (s *ValidationSuite) TestToken_HolderName(c *gc.C) { + fix := &Fixture{} + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + token := manager.Token("foo", "INVALID") + err := token.Check(nil) + c.Check(err, gc.ErrorMatches, `cannot check holder "INVALID": name not valid`) + c.Check(err, jc.Satisfies, errors.IsNotValid) + }) +} + +func (s *ValidationSuite) TestToken_OutPtr(c *gc.C) { + expectKey := "bad" + expectErr := errors.New("bad") + + fix := &Fixture{ + expectCalls: []call{{ + method: "Refresh", + callback: func(leases map[string]corelease.Info) { + leases["redis"] = corelease.Info{ + Holder: "redis/0", + Expiry: offset(time.Second), + Trapdoor: func(gotKey interface{}) error { + c.Check(gotKey, gc.Equals, &expectKey) + return expectErr + }, + } + }, + }}, + } + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + token := manager.Token("redis", "redis/0") + err := token.Check(&expectKey) + cause := errors.Cause(err) + c.Check(cause, gc.Equals, expectErr) + }) +} + +func (s *ValidationSuite) TestWaitUntilExpired_LeaseName(c *gc.C) { + fix := &Fixture{} + fix.RunTest(c, func(manager *lease.Manager, _ *coretesting.Clock) { + err := manager.WaitUntilExpired("INVALID") + c.Check(err, gc.ErrorMatches, `cannot wait for lease "INVALID" expiry: name not valid`) + c.Check(err, jc.Satisfies, errors.IsNotValid) + }) +} === added file 'src/github.com/juju/juju/worker/lease/package_test.go' --- src/github.com/juju/juju/worker/lease/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/lease/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package lease_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/worker/lease/util_test.go' --- src/github.com/juju/juju/worker/lease/util_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/lease/util_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,159 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package lease_test + +import ( + "fmt" + "time" + + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/core/lease" +) + +// Secretary implements lease.Secretary for testing purposes. +type Secretary struct{} + +// CheckLease is part of the lease.Secretary interface. +func (Secretary) CheckLease(name string) error { + return checkName(name) +} + +// CheckHolder is part of the lease.Secretary interface. +func (Secretary) CheckHolder(name string) error { + return checkName(name) +} + +func checkName(name string) error { + if name == "INVALID" { + return errors.NotValidf("name") + } + return nil +} + +// CheckDuration is part of the lease.Secretary interface. +func (Secretary) CheckDuration(duration time.Duration) error { + if duration != time.Minute { + return errors.NotValidf("time") + } + return nil +} + +// Client implements corelease.Client for testing purposes. +type Client struct { + leases map[string]lease.Info + expect []call + failed string + done chan struct{} +} + +// NewClient initializes and returns a new client configured to report +// the supplied leases and expect the supplied calls. +func NewClient(leases map[string]lease.Info, expect []call) *Client { + if leases == nil { + leases = make(map[string]lease.Info) + } + done := make(chan struct{}) + if len(expect) == 0 { + close(done) + } + return &Client{ + leases: leases, + expect: expect, + done: done, + } +} + +// Wait will return when all expected calls have been made, or fail the test +// if they don't happen within a second. (You control the clock; your tests +// should pass in *way* less than a second of wall-clock time.) +func (client *Client) Wait(c *gc.C) { + select { + case <-client.done: + if client.failed != "" { + c.Fatalf(client.failed) + } + case <-time.After(time.Second): + c.Fatalf("Client test took way too long") + } +} + +// Leases is part of the lease.Client interface. +func (client *Client) Leases() map[string]lease.Info { + result := make(map[string]lease.Info) + for k, v := range client.leases { + result[k] = v + } + return result +} + +// call implements the bulk of the lease.Client interface. +func (client *Client) call(method string, args []interface{}) error { + select { + case <-client.done: + return errors.Errorf("Client method called after test complete: %s %v", method, args) + default: + defer func() { + if len(client.expect) == 0 || client.failed != "" { + close(client.done) + } + }() + } + + expect := client.expect[0] + client.expect = client.expect[1:] + if expect.callback != nil { + expect.callback(client.leases) + } + + if method == expect.method { + if ok, _ := jc.DeepEqual(args, expect.args); ok { + return expect.err + } + } + client.failed = fmt.Sprintf("unexpected Client call:\n actual: %s %v\n expect: %s %v", + method, args, expect.method, expect.args, + ) + return errors.New(client.failed) +} + +// ClaimLease is part of the corelease.Client interface. +func (client *Client) ClaimLease(name string, request lease.Request) error { + return client.call("ClaimLease", []interface{}{name, request}) +} + +// ExtendLease is part of the corelease.Client interface. +func (client *Client) ExtendLease(name string, request lease.Request) error { + return client.call("ExtendLease", []interface{}{name, request}) +} + +// ExpireLease is part of the corelease.Client interface. +func (client *Client) ExpireLease(name string) error { + return client.call("ExpireLease", []interface{}{name}) +} + +// Refresh is part of the lease.Client interface. +func (client *Client) Refresh() error { + return client.call("Refresh", nil) +} + +// call defines a expected method call on a Client; it encodes: +type call struct { + + // method is the name of the method. + method string + + // args is the expected arguments. + args []interface{} + + // err is the error to return. + err error + + // callback, if non-nil, will be passed the internal leases dict; for + // modification, if desired. Otherwise you can use it to, e.g., assert + // clock time. + callback func(leases map[string]lease.Info) +} === removed directory 'src/github.com/juju/juju/worker/localstorage' === removed file 'src/github.com/juju/juju/worker/localstorage/config.go' --- src/github.com/juju/juju/worker/localstorage/config.go 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/worker/localstorage/config.go 1970-01-01 00:00:00 +0000 @@ -1,116 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package localstorage - -import ( - goyaml "gopkg.in/yaml.v1" - - "github.com/juju/juju/agent" -) - -const ( - // TODO(axw) 2013-09-25 bug #1230131 - // Move these variables out of agent when we can do upgrades in - // the right place. In this case, the local provider should do - // the envvar-to-agent.conf migration. - StorageDir = agent.StorageDir - StorageAddr = agent.StorageAddr - StorageCACert = "StorageCACert" - StorageCAKey = "StorageCAKey" - StorageHostnames = "StorageHostnames" - StorageAuthKey = "StorageAuthKey" -) - -// LocalStorageConfig is an interface that, if implemented, may be used -// to configure a machine agent for use with the localstorage worker in -// this package. -type LocalStorageConfig interface { - StorageDir() string - StorageAddr() string -} - -// LocalTLSStorageConfig is an interface that extends LocalStorageConfig -// to support serving storage over TLS. -type LocalTLSStorageConfig interface { - LocalStorageConfig - - // StorageCACert is the CA certificate in PEM format. - StorageCACert() string - - // StorageCAKey is the CA private key in PEM format. - StorageCAKey() string - - // StorageHostnames is the set of hostnames that will - // be assigned to the storage server's certificate. - StorageHostnames() []string - - // StorageAuthKey is the key that clients must present - // to perform modifying operations. - StorageAuthKey() string -} - -type config struct { - storageDir string - storageAddr string - caCertPEM string - caKeyPEM string - hostnames []string - authkey string -} - -// StoreConfig takes a LocalStorageConfig (or derivative interface), -// and stores it in a map[string]string suitable for updating an -// agent.Config's key/value map. -func StoreConfig(storageConfig LocalStorageConfig) (map[string]string, error) { - kv := make(map[string]string) - kv[StorageDir] = storageConfig.StorageDir() - kv[StorageAddr] = storageConfig.StorageAddr() - if tlsConfig, ok := storageConfig.(LocalTLSStorageConfig); ok { - if authkey := tlsConfig.StorageAuthKey(); authkey != "" { - kv[StorageAuthKey] = authkey - } - if cert := tlsConfig.StorageCACert(); cert != "" { - kv[StorageCACert] = cert - } - if key := tlsConfig.StorageCAKey(); key != "" { - kv[StorageCAKey] = key - } - if hostnames := tlsConfig.StorageHostnames(); len(hostnames) > 0 { - data, err := goyaml.Marshal(hostnames) - if err != nil { - return nil, err - } - kv[StorageHostnames] = string(data) - } - } - return kv, nil -} - -func loadConfig(agentConfig agent.Config) (*config, error) { - config := &config{ - storageDir: agentConfig.Value(StorageDir), - storageAddr: agentConfig.Value(StorageAddr), - authkey: agentConfig.Value(StorageAuthKey), - } - - caCertPEM := agentConfig.Value(StorageCACert) - if len(caCertPEM) > 0 { - config.caCertPEM = caCertPEM - } - - caKeyPEM := agentConfig.Value(StorageCAKey) - if len(caKeyPEM) > 0 { - config.caKeyPEM = caKeyPEM - } - - hostnames := agentConfig.Value(StorageHostnames) - if len(hostnames) > 0 { - err := goyaml.Unmarshal([]byte(hostnames), &config.hostnames) - if err != nil { - return nil, err - } - } - - return config, nil -} === removed file 'src/github.com/juju/juju/worker/localstorage/config_test.go' --- src/github.com/juju/juju/worker/localstorage/config_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/localstorage/config_test.go 1970-01-01 00:00:00 +0000 @@ -1,111 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package localstorage_test - -import ( - stdtesting "testing" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - goyaml "gopkg.in/yaml.v1" - - "github.com/juju/juju/worker/localstorage" -) - -type configSuite struct{} - -var _ = gc.Suite(&configSuite{}) - -func TestPackage(t *stdtesting.T) { - gc.TestingT(t) -} - -type localStorageConfig struct { - storageDir string - storageAddr string -} - -func (c *localStorageConfig) StorageDir() string { - return c.storageDir -} - -func (c *localStorageConfig) StorageAddr() string { - return c.storageAddr -} - -type localTLSStorageConfig struct { - localStorageConfig - caCertPEM string - caKeyPEM string - hostnames []string - authkey string -} - -func (c *localTLSStorageConfig) StorageCACert() string { - return c.caCertPEM -} - -func (c *localTLSStorageConfig) StorageCAKey() string { - return c.caKeyPEM -} - -func (c *localTLSStorageConfig) StorageHostnames() []string { - return c.hostnames -} - -func (c *localTLSStorageConfig) StorageAuthKey() string { - return c.authkey -} - -func (*configSuite) TestStoreConfig(c *gc.C) { - var config localStorageConfig - m, err := localstorage.StoreConfig(&config) - c.Assert(err, jc.ErrorIsNil) - c.Assert(m, gc.DeepEquals, map[string]string{ - localstorage.StorageDir: "", - localstorage.StorageAddr: "", - }) - - config.storageDir = "a" - config.storageAddr = "b" - m, err = localstorage.StoreConfig(&config) - c.Assert(err, jc.ErrorIsNil) - c.Assert(m, gc.DeepEquals, map[string]string{ - localstorage.StorageDir: config.storageDir, - localstorage.StorageAddr: config.storageAddr, - }) -} - -func (*configSuite) TestStoreConfigTLS(c *gc.C) { - var config localTLSStorageConfig - m, err := localstorage.StoreConfig(&config) - c.Assert(err, jc.ErrorIsNil) - c.Assert(m, gc.DeepEquals, map[string]string{ - localstorage.StorageDir: "", - localstorage.StorageAddr: "", - }) - - config.storageDir = "a" - config.storageAddr = "b" - config.caCertPEM = "heyhey" - config.caKeyPEM = "hoho" - config.hostnames = []string{"easy", "as", "1.2.3"} - config.authkey = "password" - m, err = localstorage.StoreConfig(&config) - c.Assert(err, jc.ErrorIsNil) - c.Assert(m, gc.DeepEquals, map[string]string{ - localstorage.StorageDir: config.storageDir, - localstorage.StorageAddr: config.storageAddr, - localstorage.StorageCACert: string(config.caCertPEM), - localstorage.StorageCAKey: string(config.caKeyPEM), - localstorage.StorageHostnames: mustMarshalYAML(c, config.hostnames), - localstorage.StorageAuthKey: config.authkey, - }) -} - -func mustMarshalYAML(c *gc.C, v interface{}) string { - data, err := goyaml.Marshal(v) - c.Assert(err, jc.ErrorIsNil) - return string(data) -} === removed file 'src/github.com/juju/juju/worker/localstorage/worker.go' --- src/github.com/juju/juju/worker/localstorage/worker.go 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/worker/localstorage/worker.go 1970-01-01 00:00:00 +0000 @@ -1,88 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package localstorage - -import ( - "net" - - "github.com/juju/loggo" - "launchpad.net/tomb" - - "github.com/juju/juju/agent" - "github.com/juju/juju/environs/filestorage" - "github.com/juju/juju/environs/httpstorage" - "github.com/juju/juju/worker" -) - -var logger = loggo.GetLogger("juju.worker.localstorage") - -type storageWorker struct { - config agent.Config - tomb tomb.Tomb -} - -func NewWorker(config agent.Config) worker.Worker { - w := &storageWorker{config: config} - go func() { - defer w.tomb.Done() - w.tomb.Kill(w.waitForDeath()) - }() - return w -} - -// Kill implements worker.Worker.Kill. -func (s *storageWorker) Kill() { - s.tomb.Kill(nil) -} - -// Wait implements worker.Worker.Wait. -func (s *storageWorker) Wait() error { - return s.tomb.Wait() -} - -func (s *storageWorker) serveStorage(storageAddr, storageDir string, config *config) (net.Listener, error) { - authenticated := len(config.caCertPEM) > 0 && len(config.caKeyPEM) > 0 - scheme := "http://" - if authenticated { - scheme = "https://" - } - logger.Infof("serving storage from %s to %s%s", storageDir, scheme, storageAddr) - storage, err := filestorage.NewFileStorageWriter(storageDir) - if err != nil { - return nil, err - } - if authenticated { - return httpstorage.ServeTLS( - storageAddr, - storage, - config.caCertPEM, - config.caKeyPEM, - config.hostnames, - config.authkey, - ) - } - return httpstorage.Serve(storageAddr, storage) -} - -func (s *storageWorker) waitForDeath() error { - config, err := loadConfig(s.config) - if err != nil { - logger.Errorf("error loading config: %v", err) - return err - } - - storageListener, err := s.serveStorage(config.storageAddr, config.storageDir, config) - if err != nil { - logger.Errorf("error with local storage: %v", err) - return err - } - defer storageListener.Close() - - logger.Infof("storage routines started, awaiting death") - - <-s.tomb.Dying() - - logger.Infof("dying, closing storage listeners") - return tomb.ErrDying -} === modified file 'src/github.com/juju/juju/worker/logger/logger.go' --- src/github.com/juju/juju/worker/logger/logger.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/logger/logger.go 2016-03-22 15:18:22 +0000 @@ -4,11 +4,12 @@ package logger import ( + "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/juju/agent" "github.com/juju/juju/api/logger" - "github.com/juju/juju/api/watcher" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" ) @@ -22,18 +23,22 @@ lastConfig string } -var _ worker.NotifyWatchHandler = (*Logger)(nil) - // NewLogger returns a worker.Worker that uses the notify watcher returned // from the setup. -func NewLogger(api *logger.State, agentConfig agent.Config) worker.Worker { +func NewLogger(api *logger.State, agentConfig agent.Config) (worker.Worker, error) { logger := &Logger{ api: api, agentConfig: agentConfig, lastConfig: loggo.LoggerInfo(), } log.Debugf("initial log config: %q", logger.lastConfig) - return worker.NewNotifyWorker(logger) + w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{ + Handler: logger, + }) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil } func (logger *Logger) setLogging() { === modified file 'src/github.com/juju/juju/worker/logger/logger_test.go' --- src/github.com/juju/juju/worker/logger/logger_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/logger/logger_test.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,6 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/agent" - "github.com/juju/juju/api" apilogger "github.com/juju/juju/api/logger" "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" @@ -29,7 +28,6 @@ type LoggerSuite struct { testing.JujuConnSuite - apiRoot api.Connection loggerApi *apilogger.State machine *state.Machine } @@ -38,10 +36,11 @@ func (s *LoggerSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) - s.apiRoot, s.machine = s.OpenAPIAsNewMachine(c) + apiConn, machine := s.OpenAPIAsNewMachine(c) // Create the machiner API facade. - s.loggerApi = s.apiRoot.Logger() + s.loggerApi = apilogger.NewState(apiConn) c.Assert(s.loggerApi, gc.NotNil) + s.machine = machine } func (s *LoggerSuite) waitLoggingInfo(c *gc.C, expected string) { @@ -77,7 +76,9 @@ func (s *LoggerSuite) makeLogger(c *gc.C) (worker.Worker, *mockConfig) { config := agentConfig(c, s.machine.Tag()) - return logger.NewLogger(s.loggerApi, config), config + w, err := logger.NewLogger(s.loggerApi, config) + c.Assert(err, jc.ErrorIsNil) + return w, config } func (s *LoggerSuite) TestRunStop(c *gc.C) { @@ -86,7 +87,7 @@ } func (s *LoggerSuite) TestInitialState(c *gc.C) { - config, err := s.State.EnvironConfig() + config, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) expected := config.LoggingConfig() === modified file 'src/github.com/juju/juju/worker/logger/manifold.go' --- src/github.com/juju/juju/worker/logger/manifold.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/logger/manifold.go 2016-03-22 15:18:22 +0000 @@ -14,17 +14,17 @@ // ManifoldConfig defines the names of the manifolds on which a // Manifold will depend. -type ManifoldConfig util.AgentApiManifoldConfig +type ManifoldConfig util.PostUpgradeManifoldConfig // Manifold returns a dependency manifold that runs a logger // worker, using the resource names defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { - return util.AgentApiManifold(util.AgentApiManifoldConfig(config), newWorker) + return util.PostUpgradeManifold(util.PostUpgradeManifoldConfig(config), newWorker) } -// newWorker trivially wraps NewLogger to specialise an AgentApiManifold. +// newWorker trivially wraps NewLogger to specialise a PostUpgradeManifold. var newWorker = func(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { currentConfig := a.CurrentConfig() loggerFacade := logger.NewState(apiCaller) - return NewLogger(loggerFacade, currentConfig), nil + return NewLogger(loggerFacade, currentConfig) } === modified file 'src/github.com/juju/juju/worker/logsender/bufferedlogwriter.go' --- src/github.com/juju/juju/worker/logsender/bufferedlogwriter.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/logsender/bufferedlogwriter.go 2016-03-22 15:18:22 +0000 @@ -9,7 +9,6 @@ "time" "github.com/juju/errors" - "github.com/juju/juju/feature" "github.com/juju/loggo" "github.com/juju/utils/deque" ) @@ -36,10 +35,6 @@ // InstallBufferedLogWriter creates a new BufferedLogWriter, registers // it with Loggo and returns its output channel. func InstallBufferedLogWriter(maxLen int) (LogRecordCh, error) { - if !feature.IsDbLogEnabled() { - return nil, nil - } - writer := NewBufferedLogWriter(maxLen) err := loggo.RegisterWriter(writerName, writer, loggo.TRACE) if err != nil { @@ -51,10 +46,6 @@ // UninstallBufferedLogWriter removes the BufferedLogWriter previously // installed by InstallBufferedLogWriter and closes it. func UninstallBufferedLogWriter() error { - if !feature.IsDbLogEnabled() { - return nil - } - writer, _, err := loggo.RemoveWriter(writerName) if err != nil { return errors.Annotate(err, "failed to uninstall log buffering") === modified file 'src/github.com/juju/juju/worker/logsender/bufferedlogwriter_test.go' --- src/github.com/juju/juju/worker/logsender/bufferedlogwriter_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/logsender/bufferedlogwriter_test.go 2016-03-22 15:18:22 +0000 @@ -130,8 +130,6 @@ } func (s *bufferedLogWriterSuite) TestInstallBufferedLogWriter(c *gc.C) { - s.SetFeatureFlags("db-log") - logsCh, err := logsender.InstallBufferedLogWriter(10) c.Assert(err, jc.ErrorIsNil) defer logsender.UninstallBufferedLogWriter() @@ -153,8 +151,6 @@ } func (s *bufferedLogWriterSuite) TestUninstallBufferedLogWriter(c *gc.C) { - s.SetFeatureFlags("db-log") - _, err := logsender.InstallBufferedLogWriter(10) c.Assert(err, jc.ErrorIsNil) @@ -166,19 +162,6 @@ c.Assert(err, gc.ErrorMatches, "failed to uninstall log buffering: .+") } -func (s *bufferedLogWriterSuite) TestInstallBufferedLogWriterNoFeatureFlag(c *gc.C) { - logsCh, err := logsender.InstallBufferedLogWriter(10) - c.Assert(err, jc.ErrorIsNil) - c.Assert(logsCh, gc.IsNil) -} - -func (s *bufferedLogWriterSuite) TestUninstallBufferedLogWriterNoFeatureFlag(c *gc.C) { - err := logsender.UninstallBufferedLogWriter() - // With the feature flag, uninstalling without first installing - // would result in an error. - c.Assert(err, jc.ErrorIsNil) -} - func (s *bufferedLogWriterSuite) writeAndReceive(c *gc.C) { now := time.Now() s.writer.Write(loggo.INFO, "module", "filename", 99, now, "message") === modified file 'src/github.com/juju/juju/worker/logsender/manifold.go' --- src/github.com/juju/juju/worker/logsender/manifold.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/logsender/manifold.go 2016-03-22 15:18:22 +0000 @@ -5,43 +5,27 @@ import ( "github.com/juju/juju/agent" - "github.com/juju/juju/feature" + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/logsender" "github.com/juju/juju/worker" "github.com/juju/juju/worker/dependency" - "github.com/juju/juju/worker/gate" + "github.com/juju/juju/worker/util" ) -// ManifoldConfig defines the names of the manifolds on which a -// Manifold will depend. +// ManifoldConfig defines the names of the manifolds on which a Manifold will +// depend. type ManifoldConfig struct { - AgentName string - APIInfoGateName string - LogSource LogRecordCh + util.PostUpgradeManifoldConfig + LogSource LogRecordCh } // Manifold returns a dependency manifold that runs a logger // worker, using the resource names defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { - return dependency.Manifold{ - Inputs: []string{ - config.AgentName, - config.APIInfoGateName, - }, - Start: func(getResource dependency.GetResourceFunc) (worker.Worker, error) { - if !feature.IsDbLogEnabled() { - logger.Warningf("log sender manifold disabled by feature flag") - return nil, dependency.ErrMissing - } - var gate gate.Waiter - if err := getResource(config.APIInfoGateName, &gate); err != nil { - return nil, err - } - var agent agent.Agent - if err := getResource(config.AgentName, &agent); err != nil { - return nil, err - } - return New(config.LogSource, gate, agent), nil - }, + newWorker := func(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { + return New(config.LogSource, logsender.NewAPI(apiCaller)), nil } + + return util.PostUpgradeManifold(config.PostUpgradeManifoldConfig, newWorker) } === modified file 'src/github.com/juju/juju/worker/logsender/worker.go' --- src/github.com/juju/juju/worker/logsender/worker.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/logsender/worker.go 2016-03-22 15:18:22 +0000 @@ -4,21 +4,14 @@ package logsender import ( - "encoding/json" "fmt" - "time" "github.com/juju/errors" "github.com/juju/loggo" - "github.com/juju/utils" - "golang.org/x/net/websocket" - "github.com/juju/juju/agent" - "github.com/juju/juju/api" - "github.com/juju/juju/apiserver" + "github.com/juju/juju/api/logsender" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/worker" - "github.com/juju/juju/worker/gate" ) const loggerName = "juju.worker.logsender" @@ -27,30 +20,23 @@ // New starts a logsender worker which reads log message structs from // a channel and sends them to the JES via the logsink API. -func New(logs LogRecordCh, apiInfoGate gate.Waiter, agent agent.Agent) worker.Worker { +func New(logs LogRecordCh, logSenderAPI *logsender.API) worker.Worker { loop := func(stop <-chan struct{}) error { - logger.Debugf("started log-sender worker; waiting for api info") - select { - case <-apiInfoGate.Unlocked(): - case <-stop: - return nil - } - - logger.Debugf("dialing log-sender connection") - apiInfo, ok := agent.CurrentConfig().APIInfo() - if !ok { - return errors.New("API info not available") - } - conn, err := dialLogsinkAPI(apiInfo) + logWriter, err := logSenderAPI.LogWriter() if err != nil { return errors.Annotate(err, "logsender dial failed") } - defer conn.Close() - + defer logWriter.Close() for { select { case rec := <-logs: - err := sendLogRecord(conn, rec.Time, rec.Module, rec.Location, rec.Level, rec.Message) + err := logWriter.WriteLog(¶ms.LogRecord{ + Time: rec.Time, + Module: rec.Module, + Location: rec.Location, + Level: rec.Level, + Message: rec.Message, + }) if err != nil { return errors.Trace(err) } @@ -70,8 +56,12 @@ // periods. The maximum in-memory log buffer is // quite large (see the InstallBufferedLogWriter // call in jujuDMain). - err := sendLogRecord(conn, rec.Time, loggerName, "", loggo.WARNING, - fmt.Sprintf("%d log messages dropped due to lack of API connectivity", rec.DroppedAfter)) + err := logWriter.WriteLog(¶ms.LogRecord{ + Time: rec.Time, + Module: loggerName, + Level: loggo.WARNING, + Message: fmt.Sprintf("%d log messages dropped due to lack of API connectivity", rec.DroppedAfter), + }) if err != nil { return errors.Trace(err) } @@ -84,50 +74,3 @@ } return worker.NewSimpleWorker(loop) } - -func dialLogsinkAPI(apiInfo *api.Info) (*websocket.Conn, error) { - // TODO(mjs) Most of this should be extracted to be shared for - // connections to both /log (debuglog) and /logsink. - header := utils.BasicAuthHeader(apiInfo.Tag.String(), apiInfo.Password) - header.Set("X-Juju-Nonce", apiInfo.Nonce) - conn, err := api.Connect(apiInfo, "/logsink", header, api.DialOpts{}) - if err != nil { - return nil, errors.Annotate(err, "failed to connect to logsink API") - } - - // Read the initial error and translate to a real error. - // Read up to the first new line character. We can't use bufio here as it - // reads too much from the reader. - line := make([]byte, 4096) - n, err := conn.Read(line) - if err != nil { - return nil, errors.Annotate(err, "unable to read initial response") - } - line = line[0:n] - - var errResult params.ErrorResult - err = json.Unmarshal(line, &errResult) - if err != nil { - return nil, errors.Annotate(err, "unable to unmarshal initial response") - } - if errResult.Error != nil { - return nil, errors.Annotatef(errResult.Error, "initial server error") - } - - return conn, nil -} - -func sendLogRecord(conn *websocket.Conn, ts time.Time, module, location string, level loggo.Level, msg string) error { - err := websocket.JSON.Send(conn, &apiserver.LogMessage{ - Time: ts, - Module: module, - Location: location, - Level: level, - Message: msg, - }) - // Note: due to the fire-and-forget nature of the - // logsink API, it is possible that when the - // connection dies, any logs that were "in-flight" - // will not be recorded on the server side. - return errors.Annotate(err, "logsink connection failed") -} === modified file 'src/github.com/juju/juju/worker/logsender/worker_test.go' --- src/github.com/juju/juju/worker/logsender/worker_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/logsender/worker_test.go 2016-03-22 15:18:22 +0000 @@ -8,65 +8,57 @@ "time" "github.com/juju/loggo" + "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" "gopkg.in/mgo.v2/bson" - "github.com/juju/juju/agent" "github.com/juju/juju/api" + apilogsender "github.com/juju/juju/api/logsender" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/testing" "github.com/juju/juju/testing/factory" - "github.com/juju/juju/worker/gate" "github.com/juju/juju/worker/logsender" ) type workerSuite struct { jujutesting.JujuConnSuite - apiInfo *api.Info + + // machineTag holds the tag of a machine created + // for the test. + machineTag names.Tag + + // APIState holds an API connection authenticated + // as the above machine. + APIState api.Connection } var _ = gc.Suite(&workerSuite{}) func (s *workerSuite) SetUpTest(c *gc.C) { - s.SetInitialFeatureFlags("db-log") s.JujuConnSuite.SetUpTest(c) // Create a machine for the client to log in as. nonce := "some-nonce" machine, password := s.Factory.MakeMachineReturningPassword(c, &factory.MachineParams{Nonce: nonce}) - s.apiInfo = s.APIInfo(c) - s.apiInfo.Tag = machine.Tag() - s.apiInfo.Password = password - s.apiInfo.Nonce = nonce -} - -func (s *workerSuite) agent() agent.Agent { - return &mockAgent{apiInfo: s.apiInfo} -} - -func (s *workerSuite) TestLockedGate(c *gc.C) { - - // Set a bad password to induce an error if we connect. - s.apiInfo.Password = "lol-borken" - - // Run a logsender worker. - logsCh := make(chan *logsender.LogRecord) - worker := logsender.New(logsCh, lockedGate{}, s.agent()) - - // At the end of the test, make sure we never tried to connect. - defer func() { - worker.Kill() - c.Check(worker.Wait(), jc.ErrorIsNil) - }() - - // Give it a chance to ignore the gate and read the log channel. - select { - case <-time.After(testing.ShortWait): - case logsCh <- &logsender.LogRecord{}: - c.Fatalf("read log channel without waiting for gate") - } + apiInfo := s.APIInfo(c) + apiInfo.Tag = machine.Tag() + apiInfo.Password = password + apiInfo.Nonce = nonce + st, err := api.Open(apiInfo, api.DefaultDialOpts()) + c.Assert(err, gc.IsNil) + s.APIState = st + s.machineTag = machine.Tag() +} + +func (s *workerSuite) TearDownTest(c *gc.C) { + s.APIState.Close() + s.JujuConnSuite.TearDownTest(c) +} + +func (s *workerSuite) logSenderAPI() *apilogsender.API { + return apilogsender.NewAPI(s.APIState) } func (s *workerSuite) TestLogSending(c *gc.C) { @@ -74,7 +66,7 @@ logsCh := make(chan *logsender.LogRecord, logCount) // Start the logsender worker. - worker := logsender.New(logsCh, gate.AlreadyUnlocked{}, s.agent()) + worker := logsender.New(logsCh, s.logSenderAPI()) defer func() { worker.Kill() c.Check(worker.Wait(), jc.ErrorIsNil) @@ -98,8 +90,8 @@ expectedDocs = append(expectedDocs, bson.M{ "t": ts, - "e": s.State.EnvironUUID(), - "n": s.apiInfo.Tag.String(), + "e": s.State.ModelUUID(), + "n": s.machineTag.String(), "m": "logsender-test", "l": location, "v": int(loggo.INFO), @@ -131,7 +123,7 @@ logsCh := make(logsender.LogRecordCh) // Start the logsender worker. - worker := logsender.New(logsCh, gate.AlreadyUnlocked{}, s.agent()) + worker := logsender.New(logsCh, s.logSenderAPI()) defer func() { worker.Kill() c.Check(worker.Wait(), jc.ErrorIsNil) @@ -180,8 +172,8 @@ delete(docs[1], "_id") c.Assert(docs[1], gc.DeepEquals, bson.M{ "t": ts, // Should share timestamp with previous message. - "e": s.State.EnvironUUID(), - "n": s.apiInfo.Tag.String(), + "e": s.State.ModelUUID(), + "n": s.machineTag.String(), "m": "juju.worker.logsender", "l": "", "v": int(loggo.WARNING), @@ -189,23 +181,3 @@ }) c.Assert(docs[2]["x"], gc.Equals, "message1") } - -type mockAgent struct { - agent.Agent - agent.Config - apiInfo *api.Info -} - -func (a *mockAgent) CurrentConfig() agent.Config { - return a -} - -func (a *mockAgent) APIInfo() (*api.Info, bool) { - return a.apiInfo, true -} - -type lockedGate struct{} - -func (lockedGate) Unlocked() <-chan struct{} { - return nil -} === modified file 'src/github.com/juju/juju/worker/machinelock/manifold_test.go' --- src/github.com/juju/juju/worker/machinelock/manifold_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/machinelock/manifold_test.go 2016-03-22 15:18:22 +0000 @@ -37,7 +37,7 @@ "agent-name": dt.StubResource{Output: &dummyAgent{}}, }) - lock, err := fslock.NewLock(c.MkDir(), "test-lock") + lock, err := fslock.NewLock(c.MkDir(), "test-lock", fslock.Defaults()) c.Assert(err, jc.ErrorIsNil) s.lock = lock s.PatchValue(machinelock.CreateLock, func(dataDir string) (*fslock.Lock, error) { === modified file 'src/github.com/juju/juju/worker/machinelock/package_test.go' --- src/github.com/juju/juju/worker/machinelock/package_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/machinelock/package_test.go 2016-03-22 15:18:22 +0000 @@ -4,11 +4,11 @@ package machinelock_test import ( - stdtesting "testing" + "testing" - coretesting "github.com/juju/juju/testing" + gc "gopkg.in/check.v1" ) -func TestPackage(t *stdtesting.T) { - coretesting.MgoTestPackage(t) +func TestPackage(t *testing.T) { + gc.TestingT(t) } === modified file 'src/github.com/juju/juju/worker/machiner/machiner.go' --- src/github.com/juju/juju/worker/machiner/machiner.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/machiner/machiner.go 2016-03-22 15:18:22 +0000 @@ -9,9 +9,9 @@ "github.com/juju/loggo" "github.com/juju/names" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/network" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" ) @@ -58,12 +58,18 @@ // // The machineDead function will be called immediately after the machine's // lifecycle is updated to Dead. -func NewMachiner(cfg Config) (worker.Worker, error) { +var NewMachiner = func(cfg Config) (worker.Worker, error) { if err := cfg.Validate(); err != nil { return nil, errors.Annotate(err, "validating config") } - mr := &Machiner{config: cfg} - return worker.NewNotifyWorker(mr), nil + handler := &Machiner{config: cfg} + w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{ + Handler: handler, + }) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil } func (mr *Machiner) SetUp() (watcher.NotifyWatcher, error) { === modified file 'src/github.com/juju/juju/worker/machiner/machiner_test.go' --- src/github.com/juju/juju/worker/machiner/machiner_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/machiner/machiner_test.go 2016-03-22 15:18:22 +0000 @@ -27,6 +27,10 @@ "github.com/juju/juju/worker/machiner" ) +func TestPackage(t *stdtesting.T) { + coretesting.MgoTestPackage(t) +} + type MachinerSuite struct { coretesting.BaseSuite accessor *mockMachineAccessor @@ -58,11 +62,17 @@ MachineAccessor: &mockMachineAccessor{}, }) c.Assert(err, gc.ErrorMatches, "validating config: unspecified Tag not valid") - _, err = machiner.NewMachiner(machiner.Config{ + + w, err := machiner.NewMachiner(machiner.Config{ MachineAccessor: &mockMachineAccessor{}, Tag: names.NewMachineTag("123"), }) c.Assert(err, jc.ErrorIsNil) + + // must stop the worker to prevent a data race when cleanup suite + // rolls back the patches + err = stopWorker(w) + c.Assert(err, jc.ErrorIsNil) } func (s *MachinerSuite) TestMachinerMachineNotFound(c *gc.C) { @@ -206,10 +216,6 @@ Args: []interface{}{s.machineTag}, }}) - s.accessor.machine.watcher.CheckCalls(c, []gitjujutesting.StubCall{ - {FuncName: "Changes"}, {FuncName: "Changes"}, {FuncName: "Stop"}, - }) - s.accessor.machine.CheckCalls(c, []gitjujutesting.StubCall{{ FuncName: "SetMachineAddresses", Args: []interface{}{ @@ -249,10 +255,6 @@ // unless they fail. const worstCase = 5 * time.Second -func TestPackage(t *stdtesting.T) { - coretesting.MgoTestPackage(t) -} - type MachinerStateSuite struct { testing.JujuConnSuite @@ -306,8 +308,6 @@ } } -var _ worker.NotifyWatchHandler = (*machiner.Machiner)(nil) - func (s *MachinerStateSuite) TestNotFoundOrUnauthorized(c *gc.C) { mr, err := machiner.NewMachiner(machiner.Config{ machiner.APIMachineAccessor{s.machinerState}, === added file 'src/github.com/juju/juju/worker/machiner/manifold.go' --- src/github.com/juju/juju/worker/machiner/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/machiner/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,72 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package machiner + +import ( + "github.com/juju/errors" + "github.com/juju/juju/agent" + "github.com/juju/juju/api" + "github.com/juju/juju/api/base" + apimachiner "github.com/juju/juju/api/machiner" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/util" + "github.com/juju/names" +) + +// ManifoldConfig defines the names of the manifolds on which a +// Manifold will depend. +type ManifoldConfig struct { + util.PostUpgradeManifoldConfig + WriteUninstallFile func() error +} + +// Manifold returns a dependency manifold that runs a machiner worker, using +// the resource names defined in the supplied config. +func Manifold(config ManifoldConfig) dependency.Manifold { + + // TODO(waigani) This function is currently covered by functional tests + // under the machine agent. Add unit tests once infrastructure to do so is + // in place. + + // newWorker trivially wraps NewMachiner to specialise a PostUpgradeManifold. + var newWorker = func(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { + currentConfig := a.CurrentConfig() + + apiConn, ok := apiCaller.(api.Connection) + if !ok { + return nil, errors.New("unable to obtain api.Connection") + } + + envConfig, err := apiConn.Agent().ModelConfig() + if err != nil { + return nil, errors.Errorf("cannot read environment config: %v", err) + } + + ignoreMachineAddresses, _ := envConfig.IgnoreMachineAddresses() + // Containers only have machine addresses, so we can't ignore them. + tag := currentConfig.Tag() + if names.IsContainerMachine(tag.Id()) { + ignoreMachineAddresses = false + } + if ignoreMachineAddresses { + logger.Infof("machine addresses not used, only addresses from provider") + } + accessor := APIMachineAccessor{apimachiner.NewState(apiCaller)} + w, err := NewMachiner(Config{ + MachineAccessor: accessor, + Tag: tag.(names.MachineTag), + ClearMachineAddressesOnStart: ignoreMachineAddresses, + NotifyMachineDead: func() error { + return config.WriteUninstallFile() + }, + }) + if err != nil { + return nil, errors.Annotate(err, "cannot start machiner worker") + } + return w, err + } + + return util.PostUpgradeManifold(config.PostUpgradeManifoldConfig, newWorker) +} === modified file 'src/github.com/juju/juju/worker/machiner/mock_test.go' --- src/github.com/juju/juju/worker/machiner/mock_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/machiner/mock_test.go 2016-03-22 15:18:22 +0000 @@ -7,30 +7,24 @@ "github.com/juju/names" gitjujutesting "github.com/juju/testing" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/network" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker/machiner" ) type mockWatcher struct { - gitjujutesting.Stub changes chan struct{} } -func (w *mockWatcher) Changes() <-chan struct{} { - w.MethodCall(w, "Changes") +func (w *mockWatcher) Changes() watcher.NotifyChannel { return w.changes } -func (w *mockWatcher) Stop() error { - w.MethodCall(w, "Stop") - return w.NextErr() -} +func (w *mockWatcher) Kill() {} -func (w *mockWatcher) Err() error { - w.MethodCall(w, "Err") - return w.NextErr() +func (w *mockWatcher) Wait() error { + return nil } type mockMachine struct { === modified file 'src/github.com/juju/juju/worker/machiner/state.go' --- src/github.com/juju/juju/worker/machiner/state.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/machiner/state.go 2016-03-22 15:18:22 +0000 @@ -4,11 +4,12 @@ import ( "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/juju/api/machiner" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/network" - "github.com/juju/names" + "github.com/juju/juju/watcher" ) type MachineAccessor interface { === added directory 'src/github.com/juju/juju/worker/meterstatus' === added file 'src/github.com/juju/juju/worker/meterstatus/connected.go' --- src/github.com/juju/juju/worker/meterstatus/connected.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/meterstatus/connected.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,116 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus + +import ( + "github.com/juju/errors" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/api/meterstatus" + "github.com/juju/juju/watcher" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/uniter/runner/context" +) + +// connectedStatusHandler implements the NotifyWatchHandler interface. +type connectedStatusHandler struct { + config ConnectedConfig + + code string + info string +} + +// ConnectedConfig contains all the dependencies required to create a new connected status worker. +type ConnectedConfig struct { + Runner HookRunner + StateFile *StateFile + Status meterstatus.MeterStatusClient +} + +// Validate validates the config structure and returns an error on failure. +func (c ConnectedConfig) Validate() error { + if c.Runner == nil { + return errors.New("hook runner not provided") + } + if c.StateFile == nil { + return errors.New("state file not provided") + } + if c.Status == nil { + return errors.New("meter status API client not provided") + } + return nil +} + +// NewConnectedStatusWorker creates a new worker that monitors the meter status of the +// unit and runs the meter-status-changed hook appropriately. +func NewConnectedStatusWorker(cfg ConnectedConfig) (worker.Worker, error) { + handler, err := NewConnectedStatusHandler(cfg) + if err != nil { + return nil, errors.Trace(err) + } + return watcher.NewNotifyWorker(watcher.NotifyConfig{ + Handler: handler, + }) +} + +// NewConnectedStatusHandler creates a new meter status handler for handling meter status +// changes as provided by the API. +func NewConnectedStatusHandler(cfg ConnectedConfig) (watcher.NotifyHandler, error) { + if err := cfg.Validate(); err != nil { + return nil, errors.Trace(err) + } + + w := &connectedStatusHandler{ + config: cfg, + } + return w, nil +} + +// SetUp is part of the worker.NotifyWatchHandler interface. +func (w *connectedStatusHandler) SetUp() (watcher.NotifyWatcher, error) { + var err error + w.code, w.info, _, err = w.config.StateFile.Read() + if err != nil { + return nil, errors.Trace(err) + } + + return w.config.Status.WatchMeterStatus() +} + +// TearDown is part of the worker.NotifyWatchHandler interface. +func (w *connectedStatusHandler) TearDown() error { + return nil +} + +// Handle is part of the worker.NotifyWatchHandler interface. +func (w *connectedStatusHandler) Handle(abort <-chan struct{}) error { + logger.Debugf("got meter status change signal from watcher") + currentCode, currentInfo, err := w.config.Status.MeterStatus() + if err != nil { + return errors.Trace(err) + } + if currentCode == w.code && currentInfo == w.info { + logger.Tracef("meter status (%q, %q) matches stored information (%q, %q), skipping", currentCode, currentInfo, w.code, w.info) + return nil + } + w.applyStatus(currentCode, currentInfo, abort) + w.code, w.info = currentCode, currentInfo + err = w.config.StateFile.Write(w.code, w.info, nil) + if err != nil { + return errors.Annotate(err, "failed to record meter status worker state") + } + return nil +} + +func (w *connectedStatusHandler) applyStatus(code, info string, abort <-chan struct{}) { + logger.Tracef("applying meter status change: %q (%q)", code, info) + err := w.config.Runner.RunHook(code, info, abort) + cause := errors.Cause(err) + switch { + case context.IsMissingHookError(cause): + logger.Infof("skipped %q hook (missing)", string(hooks.MeterStatusChanged)) + case err != nil: + logger.Errorf("meter status worker encountered hook error: %v", err) + } +} === added file 'src/github.com/juju/juju/worker/meterstatus/connected_test.go' --- src/github.com/juju/juju/worker/meterstatus/connected_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/meterstatus/connected_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,208 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus_test + +import ( + "fmt" + "path" + "time" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/fslock" + gc "gopkg.in/check.v1" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/meterstatus" + "github.com/juju/juju/worker/uniter/runner/context" +) + +type ConnectedWorkerSuite struct { + coretesting.BaseSuite + + stub *testing.Stub + + dataDir string + lock *fslock.Lock + msClient *stubMeterStatusClient +} + +var _ = gc.Suite(&ConnectedWorkerSuite{}) + +func (s *ConnectedWorkerSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.stub = &testing.Stub{} + + s.dataDir = c.MkDir() + + s.msClient = newStubMeterStatusClient(s.stub) +} + +func assertSignal(c *gc.C, signal <-chan struct{}) { + select { + case <-signal: + case <-time.After(coretesting.LongWait): + c.Fatal("timed out waiting for signal") + } +} + +func assertNoSignal(c *gc.C, signal <-chan struct{}) { + select { + case <-signal: + c.Fatal("unexpected signal") + case <-time.After(coretesting.ShortWait): + } +} + +func (s *ConnectedWorkerSuite) TestConfigValidation(c *gc.C) { + tests := []struct { + cfg meterstatus.ConnectedConfig + expected string + }{{ + cfg: meterstatus.ConnectedConfig{ + Status: s.msClient, + StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")), + }, + expected: "hook runner not provided", + }, { + cfg: meterstatus.ConnectedConfig{ + StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")), + Runner: &stubRunner{stub: s.stub}, + }, + expected: "meter status API client not provided", + }, { + cfg: meterstatus.ConnectedConfig{ + Status: s.msClient, + Runner: &stubRunner{stub: s.stub}, + }, + expected: "state file not provided", + }} + for i, test := range tests { + c.Logf("running test %d", i) + err := test.cfg.Validate() + c.Assert(err, gc.ErrorMatches, test.expected) + } +} + +// TestStatusHandlerDoesNotRerunNoChange ensures that the handler does not execute the hook if it +// detects no actual meter status change. +func (s *ConnectedWorkerSuite) TestStatusHandlerDoesNotRerunNoChange(c *gc.C) { + handler, err := meterstatus.NewConnectedStatusHandler( + meterstatus.ConnectedConfig{ + Runner: &stubRunner{stub: s.stub}, + StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")), + Status: s.msClient, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(handler, gc.NotNil) + _, err = handler.SetUp() + c.Assert(err, jc.ErrorIsNil) + + err = handler.Handle(nil) + c.Assert(err, jc.ErrorIsNil) + err = handler.Handle(nil) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "WatchMeterStatus", "MeterStatus", "RunHook", "MeterStatus") +} + +// TestStatusHandlerRunsHookOnChanges ensures that the handler runs the meter-status-changed hook +// if an actual meter status change is detected. +func (s *ConnectedWorkerSuite) TestStatusHandlerRunsHookOnChanges(c *gc.C) { + handler, err := meterstatus.NewConnectedStatusHandler( + meterstatus.ConnectedConfig{ + Runner: &stubRunner{stub: s.stub}, + StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")), + Status: s.msClient, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(handler, gc.NotNil) + _, err = handler.SetUp() + c.Assert(err, jc.ErrorIsNil) + + handler.Handle(nil) + s.msClient.SetStatus("RED") + handler.Handle(nil) + + c.Assert(err, jc.ErrorIsNil) + s.stub.CheckCallNames(c, "WatchMeterStatus", "MeterStatus", "RunHook", "MeterStatus", "RunHook") +} + +// TestStatusHandlerHandlesHookMissingError tests that the handler does not report errors +// caused by a missing meter-status-changed hook. +func (s *ConnectedWorkerSuite) TestStatusHandlerHandlesHookMissingError(c *gc.C) { + s.stub.SetErrors(context.NewMissingHookError("meter-status-changed")) + handler, err := meterstatus.NewConnectedStatusHandler( + meterstatus.ConnectedConfig{ + Runner: &stubRunner{stub: s.stub}, + StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")), + Status: s.msClient, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(handler, gc.NotNil) + _, err = handler.SetUp() + c.Assert(err, jc.ErrorIsNil) + + err = handler.Handle(nil) + c.Assert(err, jc.ErrorIsNil) + s.stub.CheckCallNames(c, "WatchMeterStatus", "MeterStatus", "RunHook") +} + +// TestStatusHandlerHandlesRandomHookError tests that the meter status handler does not return +// errors encountered while executing the hook. +func (s *ConnectedWorkerSuite) TestStatusHandlerHandlesRandomHookError(c *gc.C) { + s.stub.SetErrors(fmt.Errorf("blah")) + handler, err := meterstatus.NewConnectedStatusHandler( + meterstatus.ConnectedConfig{ + Runner: &stubRunner{stub: s.stub}, + StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")), + Status: s.msClient, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(handler, gc.NotNil) + _, err = handler.SetUp() + c.Assert(err, jc.ErrorIsNil) + + err = handler.Handle(nil) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "WatchMeterStatus", "MeterStatus", "RunHook") +} + +// TestStatusHandlerDoesNotRerunAfterRestart tests that the status handler will not rerun a meter-status-changed +// hook if it is restarted, but no actual changes are recorded. +func (s *ConnectedWorkerSuite) TestStatusHandlerDoesNotRerunAfterRestart(c *gc.C) { + handler, err := meterstatus.NewConnectedStatusHandler( + meterstatus.ConnectedConfig{ + Runner: &stubRunner{stub: s.stub}, + StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")), + Status: s.msClient, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(handler, gc.NotNil) + _, err = handler.SetUp() + c.Assert(err, jc.ErrorIsNil) + + err = handler.Handle(nil) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "WatchMeterStatus", "MeterStatus", "RunHook") + s.stub.ResetCalls() + + // Create a new handler (imitating worker restart). + handler, err = meterstatus.NewConnectedStatusHandler( + meterstatus.ConnectedConfig{ + Runner: &stubRunner{stub: s.stub}, + StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")), + Status: s.msClient}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(handler, gc.NotNil) + _, err = handler.SetUp() + c.Assert(err, jc.ErrorIsNil) + + err = handler.Handle(nil) + c.Assert(err, jc.ErrorIsNil) + + s.stub.CheckCallNames(c, "WatchMeterStatus", "MeterStatus") +} === added file 'src/github.com/juju/juju/worker/meterstatus/context.go' --- src/github.com/juju/juju/worker/meterstatus/context.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/meterstatus/context.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,93 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus + +import ( + "fmt" + "math/rand" + "time" + + "github.com/juju/errors" + + "github.com/juju/juju/worker/uniter/runner/context" + "github.com/juju/juju/worker/uniter/runner/jujuc" +) + +type limitedContext struct { + jujuc.RestrictedContext + + env map[string]string + + unitName string + id string +} + +// NewLimitedContext creates a new context that implements just the bare minimum +// of the jujuc.Context interface. +func NewLimitedContext(unitName string) *limitedContext { + id := fmt.Sprintf("%s-%s-%d", unitName, "meter-status", rand.New(rand.NewSource(time.Now().Unix())).Int63()) + return &limitedContext{unitName: unitName, id: id} +} + +// HookVars implements runner.Context. +func (ctx *limitedContext) HookVars(paths context.Paths) ([]string, error) { + vars := []string{ + "JUJU_CHARM_DIR=" + paths.GetCharmDir(), + "JUJU_CONTEXT_ID=" + ctx.id, + "JUJU_AGENT_SOCKET=" + paths.GetJujucSocket(), + "JUJU_UNIT_NAME=" + ctx.unitName, + } + for key, val := range ctx.env { + vars = append(vars, fmt.Sprintf("%s=%s", key, val)) + } + return append(vars, context.OSDependentEnvVars(paths)...), nil +} + +// SetEnvVars sets additional environment variables to be exported by the context. +func (ctx *limitedContext) SetEnvVars(vars map[string]string) { + if ctx.env == nil { + ctx.env = vars + return + } + for key, val := range vars { + ctx.env[key] = val + } +} + +// UnitName implements runner.Context. +func (ctx *limitedContext) UnitName() string { + return ctx.unitName +} + +// SetProcess implements runner.Context. +func (ctx *limitedContext) SetProcess(process context.HookProcess) {} + +// ActionData implements runner.Context. +func (ctx *limitedContext) ActionData() (*context.ActionData, error) { + return nil, jujuc.ErrRestrictedContext +} + +// Flush implementes runner.Context. +func (ctx *limitedContext) Flush(_ string, err error) error { + return err +} + +// HasExecutionSetUnitStatus implements runner.Context. +func (ctx *limitedContext) HasExecutionSetUnitStatus() bool { return false } + +// ResetExecutionSetUnitStatus implements runner.Context. +func (ctx *limitedContext) ResetExecutionSetUnitStatus() {} + +// Id implements runner.Context. +func (ctx *limitedContext) Id() string { return ctx.id } + +// Prepare implements runner.Context. +func (ctx *limitedContext) Prepare() error { + return jujuc.ErrRestrictedContext +} + +// Component implements runner.Context. +func (ctx *limitedContext) Component(name string) (jujuc.ContextComponent, error) { + return nil, errors.NotFoundf("context component %q", name) +} === added file 'src/github.com/juju/juju/worker/meterstatus/context_test.go' --- src/github.com/juju/juju/worker/meterstatus/context_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/meterstatus/context_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,61 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus_test + +import ( + "runtime" + + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/keyvalues" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/worker/meterstatus" +) + +type ContextSuite struct{} + +var _ = gc.Suite(&ContextSuite{}) + +type dummyPaths struct{} + +func (*dummyPaths) GetToolsDir() string { return "/dummy/tools" } +func (*dummyPaths) GetCharmDir() string { return "/dummy/charm" } +func (*dummyPaths) GetJujucSocket() string { return "/dummy/jujuc.sock" } +func (*dummyPaths) GetMetricsSpoolDir() string { return "/dummy/spool" } +func (*dummyPaths) ComponentDir(name string) string { return "/dummy/" + name } + +func (s *ContextSuite) TestHookContextEnv(c *gc.C) { + ctx := meterstatus.NewLimitedContext("u/0") + paths := &dummyPaths{} + vars, err := ctx.HookVars(paths) + c.Assert(err, jc.ErrorIsNil) + varMap, err := keyvalues.Parse(vars, true) + c.Assert(err, jc.ErrorIsNil) + c.Assert(varMap["JUJU_AGENT_SOCKET"], gc.Equals, "/dummy/jujuc.sock") + c.Assert(varMap["JUJU_UNIT_NAME"], gc.Equals, "u/0") + key := "PATH" + if runtime.GOOS == "windows" { + key = "Path" + } + c.Assert(varMap[key], gc.Not(gc.Equals), "") +} + +func (s *ContextSuite) TestHookContextSetEnv(c *gc.C) { + ctx := meterstatus.NewLimitedContext("u/0") + setVars := map[string]string{ + "somekey": "somevalue", + "anotherkey": "anothervalue", + } + ctx.SetEnvVars(setVars) + paths := &dummyPaths{} + vars, err := ctx.HookVars(paths) + c.Assert(err, jc.ErrorIsNil) + varMap, err := keyvalues.Parse(vars, true) + c.Assert(err, jc.ErrorIsNil) + for key, value := range setVars { + c.Assert(varMap[key], gc.Equals, value) + } + c.Assert(varMap["JUJU_AGENT_SOCKET"], gc.Equals, "/dummy/jujuc.sock") + c.Assert(varMap["JUJU_UNIT_NAME"], gc.Equals, "u/0") +} === added file 'src/github.com/juju/juju/worker/meterstatus/isolated.go' --- src/github.com/juju/juju/worker/meterstatus/isolated.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/meterstatus/isolated.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,158 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/utils/clock" + "gopkg.in/juju/charm.v6-unstable/hooks" + "launchpad.net/tomb" + + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/uniter/runner/context" +) + +const ( + // defaultAmberGracePeriod is the time that the unit is allowed to + // function without a working API connection before its meter + // status is switched to AMBER. + defaultAmberGracePeriod = time.Minute * 5 + + // defaultRedGracePeriod is the time that a unit is allowed to function + // without a working API connection before its meter status is + // switched to RED. + defaultRedGracePeriod = time.Minute * 15 +) + +// workerState defines all the possible states the isolatedStatusWorker can be in. +type WorkerState int + +const ( + Uninitialized WorkerState = iota + WaitingAmber // Waiting for a signal to switch to AMBER status. + WaitingRed // Waiting for a signal to switch to RED status. + Done // No more transitions to perform. +) + +// IsolatedConfig stores all the dependencies required to create an isolated meter status worker. +type IsolatedConfig struct { + Runner HookRunner + StateFile *StateFile + Clock clock.Clock + AmberGracePeriod time.Duration + RedGracePeriod time.Duration + TriggerFactory TriggerCreator +} + +// Validate validates the config structure and returns an error on failure. +func (c IsolatedConfig) Validate() error { + if c.Runner == nil { + return errors.New("hook runner not provided") + } + if c.StateFile == nil { + return errors.New("state file not provided") + } + if c.Clock == nil { + return errors.New("clock not provided") + } + if c.AmberGracePeriod <= 0 { + return errors.New("invalid amber grace period") + } + if c.RedGracePeriod <= 0 { + return errors.New("invalid red grace period") + } + if c.AmberGracePeriod >= c.RedGracePeriod { + return errors.New("amber grace period must be shorter than the red grace period") + } + return nil +} + +// isolatedStatusWorker is a worker that is instantiated by the +// meter status manifold when the API connection is unavailable. +// Its main function is to escalate the meter status of the unit +// to amber and later to red. +type isolatedStatusWorker struct { + config IsolatedConfig + + tomb tomb.Tomb +} + +// NewIsolatedStatusWorker creates a new status worker that runs without an API connection. +func NewIsolatedStatusWorker(cfg IsolatedConfig) (worker.Worker, error) { + if err := cfg.Validate(); err != nil { + return nil, errors.Trace(err) + } + w := &isolatedStatusWorker{ + config: cfg, + } + go func() { + defer w.tomb.Done() + w.tomb.Kill(w.loop()) + }() + return w, nil +} + +func (w *isolatedStatusWorker) loop() error { + code, info, disconnected, err := w.config.StateFile.Read() + if err != nil { + return errors.Trace(err) + } + + // Disconnected time has not been recorded yet. + if disconnected == nil { + disconnected = &Disconnected{w.config.Clock.Now().Unix(), WaitingAmber} + } + + amberSignal, redSignal := w.config.TriggerFactory(disconnected.State, code, disconnected.When(), w.config.Clock, w.config.AmberGracePeriod, w.config.RedGracePeriod) + for { + select { + case <-w.tomb.Dying(): + return tomb.ErrDying + case <-redSignal: + logger.Debugf("triggering meter status transition to RED due to loss of connection") + currentCode := "RED" + currentInfo := "unit agent has been disconnected" + + w.applyStatus(currentCode, currentInfo) + code, info = currentCode, currentInfo + disconnected.State = Done + case <-amberSignal: + logger.Debugf("triggering meter status transition to AMBER due to loss of connection") + currentCode := "AMBER" + currentInfo := "unit agent has been disconnected" + + w.applyStatus(currentCode, currentInfo) + code, info = currentCode, currentInfo + disconnected.State = WaitingRed + } + err := w.config.StateFile.Write(code, info, disconnected) + if err != nil { + return errors.Annotate(err, "failed to record meter status worker state") + } + } +} + +func (w *isolatedStatusWorker) applyStatus(code, info string) { + logger.Tracef("applying meter status change: %q (%q)", code, info) + err := w.config.Runner.RunHook(code, info, w.tomb.Dying()) + cause := errors.Cause(err) + switch { + case context.IsMissingHookError(cause): + logger.Infof("skipped %q hook (missing)", string(hooks.MeterStatusChanged)) + case err != nil: + logger.Errorf("meter status worker encountered hook error: %v", err) + } +} + +// Kill is part of the worker.Worker interface. +func (w *isolatedStatusWorker) Kill() { + w.tomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (w *isolatedStatusWorker) Wait() error { + return w.tomb.Wait() +} === added file 'src/github.com/juju/juju/worker/meterstatus/isolated_test.go' --- src/github.com/juju/juju/worker/meterstatus/isolated_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/meterstatus/isolated_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,145 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus_test + +import ( + "fmt" + "path" + "time" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/clock" + "github.com/juju/utils/fslock" + gc "gopkg.in/check.v1" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/meterstatus" + "github.com/juju/juju/worker/uniter/runner/context" +) + +const ( + AmberGracePeriod = time.Minute + RedGracePeriod = time.Minute * 5 +) + +type IsolatedWorkerSuite struct { + coretesting.BaseSuite + + stub *testing.Stub + + dataDir string + lock *fslock.Lock + + clk *coretesting.Clock + + hookRan chan struct{} + triggersCreated chan struct{} + + worker worker.Worker +} + +var _ = gc.Suite(&IsolatedWorkerSuite{}) + +func (s *IsolatedWorkerSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.stub = &testing.Stub{} + + s.dataDir = c.MkDir() + + s.hookRan = make(chan struct{}) + s.triggersCreated = make(chan struct{}) + + triggerFactory := func(state meterstatus.WorkerState, status string, disconectedAt time.Time, clk clock.Clock, amber time.Duration, red time.Duration) (<-chan time.Time, <-chan time.Time) { + select { + case s.triggersCreated <- struct{}{}: + case <-time.After(coretesting.LongWait): + c.Fatalf("failed to signal trigger creation") + } + return meterstatus.GetTriggers(state, status, disconectedAt, clk, amber, red) + } + + s.clk = coretesting.NewClock(time.Now()) + wrk, err := meterstatus.NewIsolatedStatusWorker( + meterstatus.IsolatedConfig{ + Runner: &stubRunner{stub: s.stub, ran: s.hookRan}, + StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")), + Clock: s.clk, + AmberGracePeriod: AmberGracePeriod, + RedGracePeriod: RedGracePeriod, + TriggerFactory: triggerFactory, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(wrk, gc.NotNil) + s.worker = wrk +} + +func (s *IsolatedWorkerSuite) TearDownTest(c *gc.C) { + s.worker.Kill() + err := s.worker.Wait() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *IsolatedWorkerSuite) TestConfigValidation(c *gc.C) { + tests := []struct { + cfg meterstatus.IsolatedConfig + expected string + }{{ + cfg: meterstatus.IsolatedConfig{ + Runner: &stubRunner{stub: s.stub}, + StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")), + }, + expected: "clock not provided", + }, { + cfg: meterstatus.IsolatedConfig{ + Clock: coretesting.NewClock(time.Now()), + StateFile: meterstatus.NewStateFile(path.Join(s.dataDir, "meter-status.yaml")), + }, + expected: "hook runner not provided", + }, { + cfg: meterstatus.IsolatedConfig{ + Clock: coretesting.NewClock(time.Now()), + Runner: &stubRunner{stub: s.stub}, + }, + expected: "state file not provided", + }} + for i, test := range tests { + c.Logf("running test %d", i) + err := test.cfg.Validate() + c.Assert(err, gc.ErrorMatches, test.expected) + } +} + +func (s *IsolatedWorkerSuite) TestTriggering(c *gc.C) { + assertSignal(c, s.triggersCreated) + s.clk.Advance(AmberGracePeriod + time.Second) + assertSignal(c, s.hookRan) + s.clk.Advance(RedGracePeriod + time.Second) + assertSignal(c, s.hookRan) + + s.stub.CheckCallNames(c, "RunHook", "RunHook") +} + +// TestMissingHookError tests that errors caused by missing hooks do not stop the worker. +func (s *IsolatedWorkerSuite) TestMissingHookError(c *gc.C) { + s.stub.SetErrors(context.NewMissingHookError("meter-status-changed")) + + assertSignal(c, s.triggersCreated) + s.clk.Advance(AmberGracePeriod + time.Second) + assertSignal(c, s.hookRan) + + s.stub.CheckCallNames(c, "RunHook") +} + +// TestRandomHookError tests that errors returned by hooks do not stop the worker. +func (s *IsolatedWorkerSuite) TestRandomHookError(c *gc.C) { + s.stub.SetErrors(fmt.Errorf("blah")) + + assertSignal(c, s.triggersCreated) + s.clk.Advance(AmberGracePeriod + time.Second) + assertSignal(c, s.hookRan) + + s.stub.CheckCallNames(c, "RunHook") +} === added file 'src/github.com/juju/juju/worker/meterstatus/manifold.go' --- src/github.com/juju/juju/worker/meterstatus/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/meterstatus/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,103 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package meterstatus provides a worker that executes the meter-status-changed hook +// periodically. +package meterstatus + +import ( + "path" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + "github.com/juju/utils/clock" + "github.com/juju/utils/fslock" + + "github.com/juju/juju/agent" + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/meterstatus" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" +) + +var ( + logger = loggo.GetLogger("juju.worker.meterstatus") +) + +// ManifoldConfig identifies the resource names upon which the status manifold depends. +type ManifoldConfig struct { + AgentName string + APICallerName string + MachineLockName string + + NewHookRunner func(names.UnitTag, *fslock.Lock, agent.Config) HookRunner + NewMeterStatusAPIClient func(base.APICaller, names.UnitTag) meterstatus.MeterStatusClient + + NewConnectedStatusWorker func(ConnectedConfig) (worker.Worker, error) + NewIsolatedStatusWorker func(IsolatedConfig) (worker.Worker, error) +} + +// Manifold returns a status manifold. +func Manifold(config ManifoldConfig) dependency.Manifold { + return dependency.Manifold{ + Inputs: []string{ + config.AgentName, + config.APICallerName, + config.MachineLockName, + }, + Start: func(getResource dependency.GetResourceFunc) (worker.Worker, error) { + return newStatusWorker(config, getResource) + }, + } +} + +func newStatusWorker(config ManifoldConfig, getResource dependency.GetResourceFunc) (worker.Worker, error) { + var agent agent.Agent + if err := getResource(config.AgentName, &agent); err != nil { + return nil, err + } + + var machineLock *fslock.Lock + if err := getResource(config.MachineLockName, &machineLock); err != nil { + return nil, err + } + + tag := agent.CurrentConfig().Tag() + unitTag, ok := tag.(names.UnitTag) + if !ok { + return nil, errors.Errorf("expected unit tag, got %v", tag) + } + + agentConfig := agent.CurrentConfig() + stateFile := NewStateFile(path.Join(agentConfig.DataDir(), "meter-status.yaml")) + runner := config.NewHookRunner(unitTag, machineLock, agentConfig) + + // If we don't have a valid APICaller, start a meter status + // worker that works without an API connection. + var apiCaller base.APICaller + err := getResource(config.APICallerName, &apiCaller) + if errors.Cause(err) == dependency.ErrMissing { + logger.Tracef("API caller dependency not available, starting isolated meter status worker.") + cfg := IsolatedConfig{ + Runner: runner, + StateFile: stateFile, + Clock: clock.WallClock, + AmberGracePeriod: defaultAmberGracePeriod, + RedGracePeriod: defaultRedGracePeriod, + TriggerFactory: GetTriggers, + } + return config.NewIsolatedStatusWorker(cfg) + } else if err != nil { + return nil, err + } + logger.Tracef("Starting connected meter status worker.") + status := config.NewMeterStatusAPIClient(apiCaller, unitTag) + + cfg := ConnectedConfig{ + Runner: runner, + StateFile: stateFile, + Status: status, + } + return config.NewConnectedStatusWorker(cfg) +} === added file 'src/github.com/juju/juju/worker/meterstatus/manifold_test.go' --- src/github.com/juju/juju/worker/meterstatus/manifold_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/meterstatus/manifold_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,276 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus_test + +import ( + "sync" + "time" + + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/fslock" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/agent" + "github.com/juju/juju/api/base" + msapi "github.com/juju/juju/api/meterstatus" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/watcher" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + dt "github.com/juju/juju/worker/dependency/testing" + "github.com/juju/juju/worker/meterstatus" + "github.com/juju/juju/worker/uniter/runner" +) + +type ManifoldSuite struct { + coretesting.BaseSuite + + stub *testing.Stub + + dataDir string + + manifoldConfig meterstatus.ManifoldConfig + manifold dependency.Manifold + dummyResources dt.StubResources + getResource dependency.GetResourceFunc +} + +var _ = gc.Suite(&ManifoldSuite{}) + +func (s *ManifoldSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.stub = &testing.Stub{} + + s.manifoldConfig = meterstatus.ManifoldConfig{ + AgentName: "agent-name", + APICallerName: "apicaller-name", + MachineLockName: "machine-lock-name", + NewHookRunner: meterstatus.NewHookRunner, + NewMeterStatusAPIClient: msapi.NewClient, + + NewConnectedStatusWorker: meterstatus.NewConnectedStatusWorker, + NewIsolatedStatusWorker: meterstatus.NewIsolatedStatusWorker, + } + s.manifold = meterstatus.Manifold(s.manifoldConfig) + s.dataDir = c.MkDir() + + locksDir := c.MkDir() + lock, err := fslock.NewLock(locksDir, "machine-lock", fslock.Defaults()) + c.Assert(err, jc.ErrorIsNil) + + s.dummyResources = dt.StubResources{ + "agent-name": dt.StubResource{Output: &dummyAgent{dataDir: s.dataDir}}, + "apicaller-name": dt.StubResource{Output: &dummyAPICaller{}}, + "machine-lock-name": dt.StubResource{Output: lock}, + } + s.getResource = dt.StubGetResource(s.dummyResources) +} + +// TestInputs ensures the collect manifold has the expected defined inputs. +func (s *ManifoldSuite) TestInputs(c *gc.C) { + c.Check(s.manifold.Inputs, jc.DeepEquals, []string{ + "agent-name", "apicaller-name", "machine-lock-name", + }) +} + +// TestStartMissingDeps ensures that the manifold correctly handles a missing +// resource dependency. +func (s *ManifoldSuite) TestStartMissingDeps(c *gc.C) { + for _, missingDep := range []string{ + "agent-name", "machine-lock-name", + } { + testResources := dt.StubResources{} + for k, v := range s.dummyResources { + if k == missingDep { + testResources[k] = dt.StubResource{Error: dependency.ErrMissing} + } else { + testResources[k] = v + } + } + getResource := dt.StubGetResource(testResources) + worker, err := s.manifold.Start(getResource) + c.Check(worker, gc.IsNil) + c.Check(err, gc.Equals, dependency.ErrMissing) + } +} + +type PatchedManifoldSuite struct { + coretesting.BaseSuite + msClient *stubMeterStatusClient + manifoldConfig meterstatus.ManifoldConfig + stub *testing.Stub + dummyResources dt.StubResources +} + +func (s *PatchedManifoldSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + + s.stub = &testing.Stub{} + s.msClient = &stubMeterStatusClient{stub: s.stub, changes: make(chan struct{})} + newMSClient := func(_ base.APICaller, _ names.UnitTag) msapi.MeterStatusClient { + return s.msClient + } + newHookRunner := func(_ names.UnitTag, _ *fslock.Lock, _ agent.Config) meterstatus.HookRunner { + return &stubRunner{stub: s.stub} + } + + s.manifoldConfig = meterstatus.ManifoldConfig{ + AgentName: "agent-name", + APICallerName: "apicaller-name", + MachineLockName: "machine-lock-name", + NewHookRunner: newHookRunner, + NewMeterStatusAPIClient: newMSClient, + } +} + +// TestStatusWorkerStarts ensures that the manifold correctly sets up the connected worker. +func (s *PatchedManifoldSuite) TestStatusWorkerStarts(c *gc.C) { + var called bool + s.manifoldConfig.NewConnectedStatusWorker = func(cfg meterstatus.ConnectedConfig) (worker.Worker, error) { + called = true + return meterstatus.NewConnectedStatusWorker(cfg) + } + manifold := meterstatus.Manifold(s.manifoldConfig) + getResource := dt.StubGetResource(s.dummyResources) + worker, err := manifold.Start(getResource) + c.Assert(called, jc.IsTrue) + c.Assert(err, jc.ErrorIsNil) + c.Assert(worker, gc.NotNil) + worker.Kill() + err = worker.Wait() + c.Assert(err, jc.ErrorIsNil) + s.stub.CheckCallNames(c, "MeterStatus", "RunHook", "WatchMeterStatus") +} + +// TestInactiveWorker ensures that the manifold correctly sets up the isolated worker. +func (s *PatchedManifoldSuite) TestIsolatedWorker(c *gc.C) { + delete(s.dummyResources, "apicaller-name") + var called bool + s.manifoldConfig.NewIsolatedStatusWorker = func(cfg meterstatus.IsolatedConfig) (worker.Worker, error) { + called = true + return meterstatus.NewIsolatedStatusWorker(cfg) + } + manifold := meterstatus.Manifold(s.manifoldConfig) + getResource := dt.StubGetResource(s.dummyResources) + worker, err := manifold.Start(getResource) + c.Assert(called, jc.IsTrue) + c.Assert(err, jc.ErrorIsNil) + c.Assert(worker, gc.NotNil) + worker.Kill() + err = worker.Wait() + c.Assert(err, jc.ErrorIsNil) + s.stub.CheckCallNames(c, "MeterStatus", "RunHook", "WatchMeterStatus") +} + +type dummyAgent struct { + agent.Agent + dataDir string +} + +func (a dummyAgent) CurrentConfig() agent.Config { + return &dummyAgentConfig{dataDir: a.dataDir} +} + +type dummyAgentConfig struct { + agent.Config + dataDir string +} + +// Tag implements agent.AgentConfig. +func (ac dummyAgentConfig) Tag() names.Tag { + return names.NewUnitTag("u/0") +} + +// DataDir implements agent.AgentConfig. +func (ac dummyAgentConfig) DataDir() string { + return ac.dataDir +} + +type dummyAPICaller struct { + base.APICaller +} + +func (dummyAPICaller) BestFacadeVersion(facade string) int { + return 42 +} + +type stubMeterStatusClient struct { + sync.RWMutex + stub *testing.Stub + changes chan struct{} + code string +} + +func newStubMeterStatusClient(stub *testing.Stub) *stubMeterStatusClient { + changes := make(chan struct{}) + return &stubMeterStatusClient{stub: stub, changes: changes} +} + +func (s *stubMeterStatusClient) SignalStatus(codes ...string) { + if len(codes) == 0 { + codes = []string{s.code} + } + for _, code := range codes { + s.SetStatus(code) + select { + case s.changes <- struct{}{}: + case <-time.After(coretesting.LongWait): + panic("timed out signaling meter status change") + } + } +} + +func (s *stubMeterStatusClient) SetStatus(code string) { + s.Lock() + defer s.Unlock() + s.code = code +} + +func (s *stubMeterStatusClient) MeterStatus() (string, string, error) { + s.RLock() + defer s.RUnlock() + s.stub.MethodCall(s, "MeterStatus") + if s.code == "" { + return "GREEN", "", nil + } else { + return s.code, "", nil + } + +} + +func (s *stubMeterStatusClient) WatchMeterStatus() (watcher.NotifyWatcher, error) { + s.stub.MethodCall(s, "WatchMeterStatus") + return s, nil +} + +func (s *stubMeterStatusClient) Changes() watcher.NotifyChannel { + return s.changes +} + +func (s *stubMeterStatusClient) Kill() { +} + +func (s *stubMeterStatusClient) Wait() error { + return nil +} + +type stubRunner struct { + runner.Runner + stub *testing.Stub + ran chan struct{} +} + +func (r *stubRunner) RunHook(code, info string, abort <-chan struct{}) error { + r.stub.MethodCall(r, "RunHook", code, info) + if r.ran != nil { + select { + case r.ran <- struct{}{}: + case <-time.After(coretesting.LongWait): + panic("timed out signaling hook run") + } + } + return r.stub.NextErr() +} === added file 'src/github.com/juju/juju/worker/meterstatus/package_test.go' --- src/github.com/juju/juju/worker/meterstatus/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/meterstatus/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/worker/meterstatus/runner.go' --- src/github.com/juju/juju/worker/meterstatus/runner.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/meterstatus/runner.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,83 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus + +import ( + "fmt" + + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/utils/fslock" + "gopkg.in/juju/charm.v6-unstable/hooks" + "launchpad.net/tomb" + + "github.com/juju/juju/agent" + "github.com/juju/juju/worker/uniter" + "github.com/juju/juju/worker/uniter/runner" +) + +// HookRunner implements the functionality necessary to run a meter-status-changed hook. +type HookRunner interface { + RunHook(string, string, <-chan struct{}) error +} + +// hookRunner implements functionality for running a hook. +type hookRunner struct { + machineLock *fslock.Lock + config agent.Config + tag names.UnitTag +} + +func NewHookRunner(tag names.UnitTag, lock *fslock.Lock, config agent.Config) HookRunner { + return &hookRunner{ + tag: tag, + machineLock: lock, + config: config, + } +} + +// acquireExecutionLock acquires the machine-level execution lock and returns a function to be used +// to unlock it. +func (w *hookRunner) acquireExecutionLock(interrupt <-chan struct{}) (func() error, error) { + message := "running meter-status-changed hook" + logger.Tracef("lock: %v", message) + checkTomb := func() error { + select { + case <-interrupt: + return tomb.ErrDying + default: + return nil + } + } + message = fmt.Sprintf("%s: %s", w.tag.String(), message) + if err := w.machineLock.LockWithFunc(message, checkTomb); err != nil { + return nil, err + } + return func() error { + logger.Tracef("unlock: %v", message) + return w.machineLock.Unlock() + }, nil +} + +func (w *hookRunner) RunHook(code, info string, interrupt <-chan struct{}) (runErr error) { + unitTag := w.tag + paths := uniter.NewPaths(w.config.DataDir(), unitTag) + ctx := NewLimitedContext(unitTag.String()) + ctx.SetEnvVars(map[string]string{ + "JUJU_METER_STATUS": code, + "JUJU_METER_INFO": info, + }) + r := runner.NewRunner(ctx, paths) + unlock, err := w.acquireExecutionLock(interrupt) + if err != nil { + return errors.Annotate(err, "failed to acquire machine lock") + } + defer func() { + unlockErr := unlock() + if unlockErr != nil { + logger.Criticalf("hook run resulted in error %v; unlock failure error: %v", runErr, unlockErr) + } + }() + return r.RunHook(string(hooks.MeterStatusChanged)) +} === added file 'src/github.com/juju/juju/worker/meterstatus/state.go' --- src/github.com/juju/juju/worker/meterstatus/state.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/meterstatus/state.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,64 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus + +import ( + "os" + "time" + + "github.com/juju/errors" + "github.com/juju/utils" +) + +// StateFile holds the meter status on disk. +type StateFile struct { + path string +} + +// NewStateFile creates a new file for persistent storage of +// the meter status. +func NewStateFile(path string) *StateFile { + return &StateFile{path: path} +} + +type state struct { + Code string `yaml:"status-code"` + Info string `yaml:"status-info"` + Disconnected *Disconnected `yaml:"disconnected,omitempty"` +} + +// Disconnected stores the information relevant to the inactive meter status worker. +type Disconnected struct { + Disconnected int64 `yaml:"disconnected-at,omitempty"` + State WorkerState `yaml:"disconnected-state,omitempty"` +} + +// When returns the time when the unit was disconnected. +func (d Disconnected) When() time.Time { + return time.Unix(d.Disconnected, 0) +} + +// Read reads the current meter status information from disk. +func (f *StateFile) Read() (string, string, *Disconnected, error) { + var st state + if err := utils.ReadYaml(f.path, &st); err != nil { + if os.IsNotExist(err) { + return "", "", nil, nil + } + return "", "", nil, errors.Trace(err) + } + + return st.Code, st.Info, st.Disconnected, nil +} + +// Write stores the supplied status information to disk. +func (f *StateFile) Write(code, info string, disconnected *Disconnected) error { + st := state{ + Code: code, + Info: info, + Disconnected: disconnected, + } + + return errors.Trace(utils.WriteYaml(f.path, st)) +} === added file 'src/github.com/juju/juju/worker/meterstatus/state_test.go' --- src/github.com/juju/juju/worker/meterstatus/state_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/meterstatus/state_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,72 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus_test + +import ( + "path" + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/worker/meterstatus" +) + +type StateFileSuite struct { + path string + state *meterstatus.StateFile +} + +var _ = gc.Suite(&StateFileSuite{}) + +func (t *StateFileSuite) SetUpTest(c *gc.C) { + t.path = path.Join(c.MkDir(), "state.yaml") + t.state = meterstatus.NewStateFile(t.path) +} + +func (t *StateFileSuite) TestReadNonExist(c *gc.C) { + code, info, disconnected, err := t.state.Read() + c.Assert(err, jc.ErrorIsNil) + c.Assert(code, gc.Equals, "") + c.Assert(info, gc.Equals, "") + c.Assert(disconnected, gc.IsNil) +} + +func (t *StateFileSuite) TestWriteRead(c *gc.C) { + code := "GREEN" + info := "some message" + err := t.state.Write(code, info, nil) + c.Assert(err, jc.ErrorIsNil) + + rCode, rInfo, _, err := t.state.Read() + c.Assert(err, jc.ErrorIsNil) + c.Assert(rCode, gc.Equals, code) + c.Assert(rInfo, gc.Equals, info) +} + +func (t *StateFileSuite) TestWriteReadExtra(c *gc.C) { + code := "GREEN" + info := "some message" + err := t.state.Write(code, info, nil) + c.Assert(err, jc.ErrorIsNil) + + rCode, rInfo, rDisconnected, err := t.state.Read() + c.Assert(err, jc.ErrorIsNil) + c.Assert(rCode, gc.Equals, code) + c.Assert(rInfo, gc.Equals, info) + c.Assert(rDisconnected, gc.IsNil) + + disconnected := meterstatus.Disconnected{ + Disconnected: time.Now().Unix(), + State: meterstatus.WaitingRed, + } + + err = t.state.Write(code, info, &disconnected) + + rCode, rInfo, rDisconnected, err = t.state.Read() + c.Assert(err, jc.ErrorIsNil) + c.Assert(rCode, gc.Equals, code) + c.Assert(rInfo, gc.Equals, info) + c.Assert(*rDisconnected, gc.DeepEquals, disconnected) +} === added file 'src/github.com/juju/juju/worker/meterstatus/triggers.go' --- src/github.com/juju/juju/worker/meterstatus/triggers.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/meterstatus/triggers.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,62 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus + +import ( + "time" + + "github.com/juju/utils/clock" +) + +type TriggerCreator func(WorkerState, string, time.Time, clock.Clock, time.Duration, time.Duration) (<-chan time.Time, <-chan time.Time) + +// GetTriggers returns the signal channels for state transitions based on the current state. +// It controls the transitions of the inactive meter status worker. +// +// In a simple case, the transitions are trivial: +// +// D------------------A----------------------R---------------------> +// +// D - disconnect time +// A - amber status triggered +// R - red status triggered +// +// The problem arises from the fact that the lifetime of the worker can +// be interrupted, possibly with significant portions of the duration missing. +func GetTriggers( + wst WorkerState, + status string, + disconnectedAt time.Time, + clk clock.Clock, + amberGracePeriod time.Duration, + redGracePeriod time.Duration) (<-chan time.Time, <-chan time.Time) { + + now := clk.Now() + + if wst == Done { + return nil, nil + } + + if wst <= WaitingAmber && status == "RED" { + // If the current status is already RED, we don't want to deescalate. + wst = WaitingRed + // } else if wst <= WaitingAmber && now.Sub(disconnectedAt) >= amberGracePeriod { + // If we missed the transition to amber, activate it. + // wst = WaitingRed + } else if wst < Done && now.Sub(disconnectedAt) >= redGracePeriod { + // If we missed the transition to amber and it's time to transition to RED, go straight to RED. + wst = WaitingRed + } + + if wst == WaitingRed { + redSignal := clk.After(redGracePeriod - now.Sub(disconnectedAt)) + return nil, redSignal + } + if wst == WaitingAmber || wst == Uninitialized { + amberSignal := clk.After(amberGracePeriod - now.Sub(disconnectedAt)) + redSignal := clk.After(redGracePeriod - now.Sub(disconnectedAt)) + return amberSignal, redSignal + } + return nil, nil +} === added file 'src/github.com/juju/juju/worker/meterstatus/triggers_test.go' --- src/github.com/juju/juju/worker/meterstatus/triggers_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/meterstatus/triggers_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,162 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package meterstatus_test + +import ( + "time" + + "github.com/juju/utils/clock" + gc "gopkg.in/check.v1" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/meterstatus" +) + +type TriggersSuite struct { + coretesting.BaseSuite +} + +var _ = gc.Suite(&TriggersSuite{}) + +var fudge = time.Second + +const ( + testAmberGracePeriod = time.Minute * 10 + testRedGracePeriod = time.Minute * 30 +) + +func (*TriggersSuite) TestTriggerCreation(c *gc.C) { + now := time.Now() + tests := []struct { + description string + worker meterstatus.WorkerState + status string + disconnected time.Time + now clock.Clock + check func(*gc.C, <-chan time.Time, <-chan time.Time) + }{{ + "normal start, unit status is green", + meterstatus.Uninitialized, + "GREEN", + now, + coretesting.NewClock(now), + func(c *gc.C, amber, red <-chan time.Time) { + c.Check(amber, gc.NotNil) + c.Check(red, gc.NotNil) + }}, { + "normal start, unit status is amber", + meterstatus.Uninitialized, + "AMBER", + now, + coretesting.NewClock(now), + func(c *gc.C, amber, red <-chan time.Time) { + c.Check(amber, gc.NotNil) + c.Check(red, gc.NotNil) + }}, { + "normal start, unit status is RED", + meterstatus.Uninitialized, + "RED", + now, + coretesting.NewClock(now), + func(c *gc.C, amber, red <-chan time.Time) { + c.Check(amber, gc.IsNil) + c.Check(red, gc.NotNil) + }}, { + "restart, unit status is green, amber status not yet triggered", + meterstatus.WaitingAmber, + "GREEN", + now, + coretesting.NewClock(now), + func(c *gc.C, amber, red <-chan time.Time) { + c.Check(amber, gc.NotNil) + c.Check(red, gc.NotNil) + }}, { + "restart, unit status is amber, amber status not yet triggered", + meterstatus.WaitingAmber, + "AMBER", + now, + coretesting.NewClock(now), + func(c *gc.C, amber, red <-chan time.Time) { + c.Check(amber, gc.NotNil) + c.Check(red, gc.NotNil) + }}, { + "restart, unit status is red, amber status not yet triggered", + meterstatus.WaitingAmber, + "RED", + now, + coretesting.NewClock(now), + func(c *gc.C, amber, red <-chan time.Time) { + c.Check(amber, gc.IsNil) + c.Check(red, gc.NotNil) + }}, { + "restart, unit status is green, amber status trigger time passed", + meterstatus.WaitingAmber, + "GREEN", + now.Add(-(testAmberGracePeriod + fudge)), + coretesting.NewClock(now), + func(c *gc.C, amber, red <-chan time.Time) { + c.Check(amber, gc.NotNil) + c.Check(red, gc.NotNil) + }}, { + "restart, unit status is amber, amber status trigger time passed", + meterstatus.WaitingAmber, + "AMBER", + now.Add(-(testAmberGracePeriod + fudge)), + coretesting.NewClock(now), + func(c *gc.C, amber, red <-chan time.Time) { + c.Check(amber, gc.NotNil) + c.Check(red, gc.NotNil) + }}, { + "restart, unit status is red, amber status trigger time passed", + meterstatus.WaitingAmber, + "RED", + now.Add(-(testAmberGracePeriod + fudge)), + coretesting.NewClock(now), + func(c *gc.C, amber, red <-chan time.Time) { + c.Check(amber, gc.IsNil) + c.Check(red, gc.NotNil) + }}, { + "restart, unit status is amber, amber status has been triggered", + meterstatus.WaitingRed, + "AMBER", + now.Add(-(testAmberGracePeriod + fudge)), + coretesting.NewClock(now), + func(c *gc.C, amber, red <-chan time.Time) { + c.Check(amber, gc.IsNil) + c.Check(red, gc.NotNil) + }}, { + "restart, unit status is amber, red status trigger time has passed", + meterstatus.WaitingRed, + "AMBER", + now.Add(-(testRedGracePeriod + fudge)), + coretesting.NewClock(now), + func(c *gc.C, amber, red <-chan time.Time) { + c.Check(amber, gc.IsNil) + c.Check(red, gc.NotNil) + }}, { + "restart, unit status is red, red status trigger time has passed", + meterstatus.WaitingRed, + "RED", + now.Add(-(testRedGracePeriod + fudge)), + coretesting.NewClock(now), + func(c *gc.C, amber, red <-chan time.Time) { + c.Check(amber, gc.IsNil) + c.Check(red, gc.NotNil) + }}, { + "restart, unit status is red, red status has been triggered", + meterstatus.Done, + "RED", + now.Add(-(testRedGracePeriod + fudge)), + coretesting.NewClock(now), + func(c *gc.C, amber, red <-chan time.Time) { + c.Check(amber, gc.IsNil) + c.Check(red, gc.IsNil) + }}} + + for i, test := range tests { + c.Logf("%d: %s", i, test.description) + signalAmber, signalRed := meterstatus.GetTriggers(test.worker, test.status, test.disconnected, test.now, testAmberGracePeriod, testRedGracePeriod) + test.check(c, signalAmber, signalRed) + } +} === added directory 'src/github.com/juju/juju/worker/metrics' === added directory 'src/github.com/juju/juju/worker/metrics/collect' === added file 'src/github.com/juju/juju/worker/metrics/collect/context.go' --- src/github.com/juju/juju/worker/metrics/collect/context.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/collect/context.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,94 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package collect + +import ( + "fmt" + "math/rand" + "time" + + "github.com/juju/errors" + + "github.com/juju/juju/worker/metrics/spool" + "github.com/juju/juju/worker/uniter/runner/context" + "github.com/juju/juju/worker/uniter/runner/jujuc" +) + +type hookContext struct { + jujuc.RestrictedContext + + unitName string + id string + recorder spool.MetricRecorder +} + +func newHookContext(unitName string, recorder spool.MetricRecorder) *hookContext { + id := fmt.Sprintf("%s-%s-%d", unitName, "collect-metrics", rand.New(rand.NewSource(time.Now().Unix())).Int63()) + return &hookContext{unitName: unitName, id: id, recorder: recorder} +} + +// HookVars implements runner.Context. +func (ctx *hookContext) HookVars(paths context.Paths) ([]string, error) { + vars := []string{ + "JUJU_CHARM_DIR=" + paths.GetCharmDir(), + "JUJU_CONTEXT_ID=" + ctx.id, + "JUJU_AGENT_SOCKET=" + paths.GetJujucSocket(), + "JUJU_UNIT_NAME=" + ctx.unitName, + } + return append(vars, context.OSDependentEnvVars(paths)...), nil +} + +// UnitName implements runner.Context. +func (ctx *hookContext) UnitName() string { + return ctx.unitName +} + +// Flush implements runner.Context. +func (ctx *hookContext) Flush(process string, ctxErr error) (err error) { + return ctx.recorder.Close() +} + +// AddMetric implements runner.Context. +func (ctx *hookContext) AddMetric(key string, value string, created time.Time) error { + return ctx.recorder.AddMetric(key, value, created) +} + +// addJujuUnitsMetric adds the juju-units built in metric if it +// is defined for this context. +func (ctx *hookContext) addJujuUnitsMetric() error { + if ctx.recorder.IsDeclaredMetric("juju-units") { + err := ctx.recorder.AddMetric("juju-units", "1", time.Now().UTC()) + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +// SetProcess implements runner.Context. +func (ctx *hookContext) SetProcess(process context.HookProcess) {} + +// ActionData implements runner.Context. +func (ctx *hookContext) ActionData() (*context.ActionData, error) { + return nil, jujuc.ErrRestrictedContext +} + +// HasExecutionSetUnitStatus implements runner.Context. +func (ctx *hookContext) HasExecutionSetUnitStatus() bool { return false } + +// ResetExecutionSetUnitStatus implements runner.Context. +func (ctx *hookContext) ResetExecutionSetUnitStatus() {} + +// Id implements runner.Context. +func (ctx *hookContext) Id() string { return ctx.id } + +// Prepare implements runner.Context. +func (ctx *hookContext) Prepare() error { + return jujuc.ErrRestrictedContext +} + +// Component implements runner.Context. +func (ctx *hookContext) Component(name string) (jujuc.ContextComponent, error) { + return nil, errors.NotFoundf("context component %q", name) +} === added file 'src/github.com/juju/juju/worker/metrics/collect/context_test.go' --- src/github.com/juju/juju/worker/metrics/collect/context_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/collect/context_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,72 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package collect_test + +import ( + "runtime" + "time" + + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/keyvalues" + gc "gopkg.in/check.v1" + corecharm "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/worker/metrics/collect" +) + +type ContextSuite struct { + recorder *dummyRecorder +} + +var _ = gc.Suite(&ContextSuite{}) + +func (s *ContextSuite) SetUpTest(c *gc.C) { + s.recorder = &dummyRecorder{ + charmURL: "local:quantal/metered-1", + unitTag: "u/0", + metrics: map[string]corecharm.Metric{ + "pings": corecharm.Metric{ + Type: corecharm.MetricTypeGauge, + Description: "pings-desc", + }, + }, + } +} + +func (s *ContextSuite) TestCtxDeclaredMetric(c *gc.C) { + ctx := collect.NewHookContext("u/0", s.recorder) + err := ctx.AddMetric("pings", "1", time.Now()) + c.Assert(err, jc.ErrorIsNil) + err = ctx.Flush("", nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.recorder.closed, jc.IsTrue) + c.Assert(s.recorder.batches, gc.HasLen, 1) + c.Assert(s.recorder.batches[0].Metrics, gc.HasLen, 1) + c.Assert(s.recorder.batches[0].Metrics[0].Key, gc.Equals, "pings") + c.Assert(s.recorder.batches[0].Metrics[0].Value, gc.Equals, "1") +} + +type dummyPaths struct{} + +func (*dummyPaths) GetToolsDir() string { return "/dummy/tools" } +func (*dummyPaths) GetCharmDir() string { return "/dummy/charm" } +func (*dummyPaths) GetJujucSocket() string { return "/dummy/jujuc.sock" } +func (*dummyPaths) GetMetricsSpoolDir() string { return "/dummy/spool" } +func (*dummyPaths) ComponentDir(name string) string { return "/dummy/" + name } + +func (s *ContextSuite) TestHookContextEnv(c *gc.C) { + ctx := collect.NewHookContext("u/0", s.recorder) + paths := &dummyPaths{} + vars, err := ctx.HookVars(paths) + c.Assert(err, jc.ErrorIsNil) + varMap, err := keyvalues.Parse(vars, true) + c.Assert(err, jc.ErrorIsNil) + c.Assert(varMap["JUJU_AGENT_SOCKET"], gc.Equals, "/dummy/jujuc.sock") + c.Assert(varMap["JUJU_UNIT_NAME"], gc.Equals, "u/0") + key := "PATH" + if runtime.GOOS == "windows" { + key = "Path" + } + c.Assert(varMap[key], gc.Not(gc.Equals), "") +} === added file 'src/github.com/juju/juju/worker/metrics/collect/export_test.go' --- src/github.com/juju/juju/worker/metrics/collect/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/collect/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,45 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package collect + +import ( + "github.com/juju/juju/worker/metrics/spool" + "github.com/juju/juju/worker/uniter/runner" +) + +var ( + // NewCollect allows patching the function that creates the metric collection + // entity. + NewCollect = newCollect + + // NewRecorder allows patching the function that creates the metric recorder. + NewRecorder = &newRecorder + + // NewHookContext returns a new hook context used to collect metrics. + // It is exported here for calling from tests, but not patching. + NewHookContext = newHookContext + + // ReadCharm reads the charm directory and returns the charm url and + // metrics declared by the charm. + ReadCharm = &readCharm + + // NewSocketListener creates a new socket listener with the provided + // socket path and connection handler. + NewSocketListener = &newSocketListener +) + +// Ensure hookContext is a runner.Context. +var _ runner.Context = (*hookContext)(nil) + +type handlerSetterStopper interface { + SetHandler(spool.ConnectionHandler) + Stop() +} + +func NewSocketListenerFnc(listener handlerSetterStopper) func(string, spool.ConnectionHandler) (stopper, error) { + return func(_ string, handler spool.ConnectionHandler) (stopper, error) { + listener.SetHandler(handler) + return listener, nil + } +} === added file 'src/github.com/juju/juju/worker/metrics/collect/handler.go' --- src/github.com/juju/juju/worker/metrics/collect/handler.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/collect/handler.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,64 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package collect + +import ( + "fmt" + "net" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + corecharm "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/worker/metrics/spool" +) + +// handlerConfig stores configuration values for the socketListener. +type handlerConfig struct { + unitTag names.UnitTag + charmURL *corecharm.URL + validMetrics map[string]corecharm.Metric + metricsFactory spool.MetricFactory + runner *hookRunner +} + +func newHandler(config handlerConfig) *handler { + return &handler{config: config} +} + +type handler struct { + config handlerConfig +} + +// Handle triggers the collect-metrics hook and writes collected metrics +// to the specified connection. +func (l *handler) Handle(c net.Conn) (err error) { + defer func() { + if err != nil { + fmt.Fprintf(c, "%v\n", err.Error()) + } else { + fmt.Fprintf(c, "ok\n") + } + c.Close() + }() + err = c.SetDeadline(time.Now().Add(spool.DefaultTimeout)) + if err != nil { + return errors.Annotate(err, "failed to set the deadline") + } + recorder, err := l.config.metricsFactory.Recorder( + l.config.validMetrics, + l.config.charmURL.String(), + l.config.unitTag.String(), + ) + if err != nil { + return errors.Annotate(err, "failed to create the metric recorder") + } + defer recorder.Close() + err = l.config.runner.do(recorder) + if err != nil { + return errors.Annotate(err, "failed to collect metrics") + } + return nil +} === added file 'src/github.com/juju/juju/worker/metrics/collect/handler_test.go' --- src/github.com/juju/juju/worker/metrics/collect/handler_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/collect/handler_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,220 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package collect_test + +import ( + "net" + "os" + "path/filepath" + "strings" + "time" + + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + corecharm "gopkg.in/juju/charm.v6-unstable" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/dependency" + dt "github.com/juju/juju/worker/dependency/testing" + "github.com/juju/juju/worker/metrics/collect" + "github.com/juju/juju/worker/metrics/spool" + "github.com/juju/juju/worker/uniter/runner/context" +) + +type handlerSuite struct { + coretesting.BaseSuite + + manifoldConfig collect.ManifoldConfig + manifold dependency.Manifold + dataDir string + dummyResources dt.StubResources + getResource dependency.GetResourceFunc + recorder *dummyRecorder + listener *mockListener +} + +var _ = gc.Suite(&handlerSuite{}) + +func (s *handlerSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.manifoldConfig = collect.ManifoldConfig{ + AgentName: "agent-name", + MetricSpoolName: "metric-spool-name", + CharmDirName: "charmdir-name", + } + s.manifold = collect.Manifold(s.manifoldConfig) + s.dataDir = c.MkDir() + + // create unit agent base dir so that hooks can run. + err := os.MkdirAll(filepath.Join(s.dataDir, "agents", "unit-u-0"), 0777) + c.Assert(err, jc.ErrorIsNil) + + s.recorder = &dummyRecorder{ + charmURL: "local:trusty/metered-1", + unitTag: "metered/0", + metrics: map[string]corecharm.Metric{ + "pings": corecharm.Metric{ + Description: "test metric", + Type: corecharm.MetricTypeAbsolute, + }, + "juju-units": corecharm.Metric{}, + }, + } + + s.dummyResources = dt.StubResources{ + "agent-name": dt.StubResource{Output: &dummyAgent{dataDir: s.dataDir}}, + "metric-spool-name": dt.StubResource{Output: &mockMetricFactory{recorder: s.recorder}}, + "charmdir-name": dt.StubResource{Output: &dummyCharmdir{aborted: false}}, + } + s.getResource = dt.StubGetResource(s.dummyResources) + + s.PatchValue(collect.NewRecorder, + func(_ names.UnitTag, _ context.Paths, _ spool.MetricFactory) (spool.MetricRecorder, error) { + // Return a dummyRecorder here, because otherwise a real one + // *might* get instantiated and error out, if the periodic worker + // happens to fire before the worker shuts down (as seen in + // LP:#1497355). + return &dummyRecorder{ + charmURL: "local:trusty/metered-1", + unitTag: "metered/0", + metrics: map[string]corecharm.Metric{ + "pings": corecharm.Metric{ + Description: "test metric", + Type: corecharm.MetricTypeAbsolute, + }, + "juju-units": corecharm.Metric{}, + }, + }, nil + }, + ) + s.PatchValue(collect.ReadCharm, + func(_ names.UnitTag, _ context.Paths) (*corecharm.URL, map[string]corecharm.Metric, error) { + return corecharm.MustParseURL("local:trusty/metered-1"), + map[string]corecharm.Metric{ + "pings": corecharm.Metric{Description: "test metric", Type: corecharm.MetricTypeAbsolute}, + "juju-units": corecharm.Metric{}, + }, nil + }, + ) + s.listener = &mockListener{} + s.PatchValue(collect.NewSocketListener, collect.NewSocketListenerFnc(s.listener)) +} + +func (s *handlerSuite) TestListenerStart(c *gc.C) { + getResource := dt.StubGetResource(s.dummyResources) + worker, err := s.manifold.Start(getResource) + c.Assert(err, jc.ErrorIsNil) + c.Assert(worker, gc.NotNil) + c.Assert(s.listener.Calls(), gc.HasLen, 0) + worker.Kill() + err = worker.Wait() + c.Assert(err, jc.ErrorIsNil) + s.listener.CheckCall(c, 0, "Stop") +} + +func (s *handlerSuite) TestJujuUnitsBuiltinMetric(c *gc.C) { + getResource := dt.StubGetResource(s.dummyResources) + worker, err := s.manifold.Start(getResource) + c.Assert(err, jc.ErrorIsNil) + c.Assert(worker, gc.NotNil) + c.Assert(s.listener.Calls(), gc.HasLen, 0) + + conn, err := s.listener.trigger() + c.Assert(err, jc.ErrorIsNil) + c.Assert(conn.Calls(), gc.HasLen, 3) + conn.CheckCall(c, 2, "Close") + + responseString := strings.Trim(string(conn.data), " \n\t") + c.Assert(responseString, gc.Equals, "ok") + c.Assert(s.recorder.batches, gc.HasLen, 1) + + worker.Kill() + err = worker.Wait() + c.Assert(err, jc.ErrorIsNil) + s.listener.CheckCall(c, 0, "Stop") +} + +func (s *handlerSuite) TestHandlerError(c *gc.C) { + getResource := dt.StubGetResource(s.dummyResources) + worker, err := s.manifold.Start(getResource) + c.Assert(err, jc.ErrorIsNil) + c.Assert(worker, gc.NotNil) + c.Assert(s.listener.Calls(), gc.HasLen, 0) + + s.recorder.err = "well, this is embarassing" + + conn, err := s.listener.trigger() + c.Assert(err, gc.ErrorMatches, "failed to collect metrics: error adding 'juju-units' metric: well, this is embarassing") + c.Assert(conn.Calls(), gc.HasLen, 3) + conn.CheckCall(c, 2, "Close") + + responseString := strings.Trim(string(conn.data), " \n\t") + c.Assert(responseString, gc.Matches, ".*well, this is embarassing") + c.Assert(s.recorder.batches, gc.HasLen, 0) + + worker.Kill() + err = worker.Wait() + c.Assert(err, jc.ErrorIsNil) + s.listener.CheckCall(c, 0, "Stop") +} + +type mockListener struct { + testing.Stub + handler spool.ConnectionHandler +} + +func (l *mockListener) trigger() (*mockConnection, error) { + conn := &mockConnection{} + err := l.handler.Handle(conn) + if err != nil { + return conn, err + } + return conn, nil +} + +// Stop implements the stopper interface. +func (l *mockListener) Stop() { + l.AddCall("Stop") +} + +func (l *mockListener) SetHandler(handler spool.ConnectionHandler) { + l.handler = handler +} + +type mockConnection struct { + net.Conn + testing.Stub + data []byte +} + +// SetDeadline implements the net.Conn interface. +func (c *mockConnection) SetDeadline(t time.Time) error { + c.AddCall("SetDeadline", t) + return nil +} + +// Write implements the net.Conn interface. +func (c *mockConnection) Write(data []byte) (int, error) { + c.AddCall("Write", data) + c.data = data + return len(data), nil +} + +// Close implements the net.Conn interface. +func (c *mockConnection) Close() error { + c.AddCall("Close") + return nil +} + +type mockMetricFactory struct { + spool.MetricFactory + recorder *dummyRecorder +} + +// Recorder implements the spool.MetricFactory interface. +func (f *mockMetricFactory) Recorder(metrics map[string]corecharm.Metric, charmURL, unitTag string) (spool.MetricRecorder, error) { + return f.recorder, nil +} === added file 'src/github.com/juju/juju/worker/metrics/collect/manifold.go' --- src/github.com/juju/juju/worker/metrics/collect/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/collect/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,251 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package collect provides a worker that executes the collect-metrics hook +// periodically, as long as the workload has been started (between start and +// stop hooks). collect-metrics executes in its own execution context, which is +// restricted to avoid contention with uniter "lifecycle" hooks. +package collect + +import ( + "fmt" + "path" + "sync" + "time" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + "github.com/juju/utils/os" + corecharm "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/agent" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/fortress" + "github.com/juju/juju/worker/metrics/spool" + "github.com/juju/juju/worker/uniter" + "github.com/juju/juju/worker/uniter/charm" + "github.com/juju/juju/worker/uniter/runner" + "github.com/juju/juju/worker/uniter/runner/context" +) + +const ( + defaultPeriod = 5 * time.Minute + defaultSocketName = "metrics-collect.socket" +) + +var ( + logger = loggo.GetLogger("juju.worker.metrics.collect") + + // errMetricsNotDefined is returned when the charm the uniter is running does + // not declared any metrics. + errMetricsNotDefined = errors.New("no metrics defined") + + // readCharm function reads the charm directory and extracts declared metrics and the charm url. + readCharm = func(unitTag names.UnitTag, paths context.Paths) (*corecharm.URL, map[string]corecharm.Metric, error) { + ch, err := corecharm.ReadCharm(paths.GetCharmDir()) + if err != nil { + return nil, nil, errors.Annotatef(err, "failed to read charm from: %v", paths.GetCharmDir()) + } + chURL, err := charm.ReadCharmURL(path.Join(paths.GetCharmDir(), charm.CharmURLPath)) + if err != nil { + return nil, nil, errors.Trace(err) + } + charmMetrics := map[string]corecharm.Metric{} + if ch.Metrics() != nil { + charmMetrics = ch.Metrics().Metrics + } + return chURL, charmMetrics, nil + } + + // newRecorder returns a struct that implements the spool.MetricRecorder + // interface. + newRecorder = func(unitTag names.UnitTag, paths context.Paths, metricFactory spool.MetricFactory) (spool.MetricRecorder, error) { + chURL, charmMetrics, err := readCharm(unitTag, paths) + if err != nil { + return nil, errors.Trace(err) + } + if len(charmMetrics) == 0 { + return nil, errMetricsNotDefined + } + return metricFactory.Recorder(charmMetrics, chURL.String(), unitTag.String()) + } + + newSocketListener = func(path string, handler spool.ConnectionHandler) (stopper, error) { + return spool.NewSocketListener(path, handler) + } +) + +type stopper interface { + Stop() +} + +// ManifoldConfig identifies the resource names upon which the collect manifold +// depends. +type ManifoldConfig struct { + Period *time.Duration + + AgentName string + MetricSpoolName string + CharmDirName string +} + +// Manifold returns a collect-metrics manifold. +func Manifold(config ManifoldConfig) dependency.Manifold { + return dependency.Manifold{ + Inputs: []string{ + config.AgentName, + config.MetricSpoolName, + config.CharmDirName, + }, + Start: func(getResource dependency.GetResourceFunc) (worker.Worker, error) { + collector, err := newCollect(config, getResource) + if err != nil { + return nil, err + } + return spool.NewPeriodicWorker(collector.Do, collector.period, worker.NewTimer, collector.stop), nil + }, + } +} + +func socketName(baseDir, unitTag string) string { + if os.HostOS() == os.Windows { + return fmt.Sprintf(`\\.\pipe\collect-metrics-%s`, unitTag) + } + return path.Join(baseDir, defaultSocketName) +} + +func newCollect(config ManifoldConfig, getResource dependency.GetResourceFunc) (*collect, error) { + period := defaultPeriod + if config.Period != nil { + period = *config.Period + } + + var agent agent.Agent + if err := getResource(config.AgentName, &agent); err != nil { + return nil, err + } + + var metricFactory spool.MetricFactory + err := getResource(config.MetricSpoolName, &metricFactory) + if err != nil { + return nil, err + } + + var charmdir fortress.Guest + err = getResource(config.CharmDirName, &charmdir) + if err != nil { + return nil, err + } + + agentConfig := agent.CurrentConfig() + tag := agentConfig.Tag() + unitTag, ok := tag.(names.UnitTag) + if !ok { + return nil, errors.Errorf("expected a unit tag, got %v", tag) + } + paths := uniter.NewWorkerPaths(agentConfig.DataDir(), unitTag, "metrics-collect") + runner := &hookRunner{ + unitTag: unitTag.String(), + paths: paths, + } + var listener stopper + charmURL, validMetrics, err := readCharm(unitTag, paths) + if err != nil { + return nil, errors.Trace(err) + } + if len(validMetrics) > 0 && charmURL.Schema == "local" { + h := newHandler(handlerConfig{ + unitTag: unitTag, + charmURL: charmURL, + validMetrics: validMetrics, + metricsFactory: metricFactory, + runner: runner, + }) + listener, err = newSocketListener(socketName(paths.State.BaseDir, unitTag.String()), h) + if err != nil { + return nil, err + } + } + collector := &collect{ + period: period, + agent: agent, + metricFactory: metricFactory, + charmdir: charmdir, + listener: listener, + runner: runner, + } + + return collector, nil +} + +type collect struct { + period time.Duration + agent agent.Agent + metricFactory spool.MetricFactory + charmdir fortress.Guest + listener stopper + runner *hookRunner +} + +func (w *collect) stop() { + if w.listener != nil { + w.listener.Stop() + } +} + +// Do satisfies the worker.PeriodWorkerCall function type. +func (w *collect) Do(stop <-chan struct{}) error { + config := w.agent.CurrentConfig() + tag := config.Tag() + unitTag, ok := tag.(names.UnitTag) + if !ok { + return errors.Errorf("expected a unit tag, got %v", tag) + } + paths := uniter.NewWorkerPaths(config.DataDir(), unitTag, "metrics-collect") + + recorder, err := newRecorder(unitTag, paths, w.metricFactory) + if errors.Cause(err) == errMetricsNotDefined { + logger.Tracef("%v", err) + return nil + } else if err != nil { + return errors.Annotate(err, "failed to instantiate metric recorder") + } + + err = w.charmdir.Visit(func() error { + return w.runner.do(recorder) + }, stop) + if err == fortress.ErrAborted { + logger.Tracef("cannot execute collect-metrics: %v", err) + return nil + } + return err +} + +type hookRunner struct { + m sync.Mutex + + unitTag string + paths uniter.Paths +} + +func (h *hookRunner) do(recorder spool.MetricRecorder) error { + h.m.Lock() + defer h.m.Unlock() + logger.Tracef("recording metrics") + + ctx := newHookContext(h.unitTag, recorder) + err := ctx.addJujuUnitsMetric() + if err != nil { + return errors.Annotatef(err, "error adding 'juju-units' metric") + } + + r := runner.NewRunner(ctx, h.paths) + err = r.RunHook(string(hooks.CollectMetrics)) + if err != nil { + return errors.Annotatef(err, "error running 'collect-metrics' hook") + } + return nil +} === added file 'src/github.com/juju/juju/worker/metrics/collect/manifold_test.go' --- src/github.com/juju/juju/worker/metrics/collect/manifold_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/collect/manifold_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,292 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package collect_test + +import ( + "os" + "path/filepath" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + corecharm "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/agent" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/dependency" + dt "github.com/juju/juju/worker/dependency/testing" + "github.com/juju/juju/worker/fortress" + "github.com/juju/juju/worker/metrics/collect" + "github.com/juju/juju/worker/metrics/spool" + "github.com/juju/juju/worker/uniter/runner/context" + "github.com/juju/juju/worker/uniter/runner/jujuc" +) + +type ManifoldSuite struct { + coretesting.BaseSuite + + dataDir string + oldLcAll string + + manifoldConfig collect.ManifoldConfig + manifold dependency.Manifold + dummyResources dt.StubResources + getResource dependency.GetResourceFunc +} + +var _ = gc.Suite(&ManifoldSuite{}) + +func (s *ManifoldSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.manifoldConfig = collect.ManifoldConfig{ + AgentName: "agent-name", + MetricSpoolName: "metric-spool-name", + CharmDirName: "charmdir-name", + } + s.manifold = collect.Manifold(s.manifoldConfig) + s.dataDir = c.MkDir() + + // create unit agent base dir so that hooks can run. + err := os.MkdirAll(filepath.Join(s.dataDir, "agents", "unit-u-0"), 0777) + c.Assert(err, jc.ErrorIsNil) + + s.dummyResources = dt.StubResources{ + "agent-name": dt.StubResource{Output: &dummyAgent{dataDir: s.dataDir}}, + "metric-spool-name": dt.StubResource{Output: &dummyMetricFactory{}}, + "charmdir-name": dt.StubResource{Output: &dummyCharmdir{aborted: false}}, + } + s.getResource = dt.StubGetResource(s.dummyResources) +} + +// TestInputs ensures the collect manifold has the expected defined inputs. +func (s *ManifoldSuite) TestInputs(c *gc.C) { + c.Check(s.manifold.Inputs, jc.DeepEquals, []string{ + "agent-name", "metric-spool-name", "charmdir-name", + }) +} + +// TestStartMissingDeps ensures that the manifold correctly handles a missing +// resource dependency. +func (s *ManifoldSuite) TestStartMissingDeps(c *gc.C) { + for _, missingDep := range []string{ + "agent-name", "metric-spool-name", "charmdir-name", + } { + testResources := dt.StubResources{} + for k, v := range s.dummyResources { + if k == missingDep { + testResources[k] = dt.StubResource{Error: dependency.ErrMissing} + } else { + testResources[k] = v + } + } + getResource := dt.StubGetResource(testResources) + worker, err := s.manifold.Start(getResource) + c.Check(worker, gc.IsNil) + c.Check(err, gc.Equals, dependency.ErrMissing) + } +} + +// TestCollectWorkerStarts ensures that the manifold correctly sets up the worker. +func (s *ManifoldSuite) TestCollectWorkerStarts(c *gc.C) { + s.PatchValue(collect.NewRecorder, + func(_ names.UnitTag, _ context.Paths, _ spool.MetricFactory) (spool.MetricRecorder, error) { + // Return a dummyRecorder here, because otherwise a real one + // *might* get instantiated and error out, if the periodic worker + // happens to fire before the worker shuts down (as seen in + // LP:#1497355). + return &dummyRecorder{ + charmURL: "cs:ubuntu-1", + unitTag: "ubuntu/0", + }, nil + }) + s.PatchValue(collect.ReadCharm, + func(_ names.UnitTag, _ context.Paths) (*corecharm.URL, map[string]corecharm.Metric, error) { + return corecharm.MustParseURL("cs:ubuntu-1"), map[string]corecharm.Metric{"pings": corecharm.Metric{Description: "test metric", Type: corecharm.MetricTypeAbsolute}}, nil + }) + getResource := dt.StubGetResource(s.dummyResources) + worker, err := s.manifold.Start(getResource) + c.Assert(err, jc.ErrorIsNil) + c.Assert(worker, gc.NotNil) + worker.Kill() + err = worker.Wait() + c.Assert(err, jc.ErrorIsNil) +} + +// TestJujuUnitsBuiltinMetric tests that the juju-units built-in metric is collected +// with a mock implementation of newRecorder. +func (s *ManifoldSuite) TestJujuUnitsBuiltinMetric(c *gc.C) { + recorder := &dummyRecorder{ + charmURL: "cs:wordpress-37", + unitTag: "wp/0", + isDeclaredMetric: true, + } + s.PatchValue(collect.NewRecorder, + func(_ names.UnitTag, _ context.Paths, _ spool.MetricFactory) (spool.MetricRecorder, error) { + return recorder, nil + }) + s.PatchValue(collect.ReadCharm, + func(_ names.UnitTag, _ context.Paths) (*corecharm.URL, map[string]corecharm.Metric, error) { + return corecharm.MustParseURL("cs:wordpress-37"), map[string]corecharm.Metric{"pings": corecharm.Metric{Description: "test metric", Type: corecharm.MetricTypeAbsolute}}, nil + }) + collectEntity, err := collect.NewCollect(s.manifoldConfig, s.getResource) + c.Assert(err, jc.ErrorIsNil) + err = collectEntity.Do(nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(recorder.closed, jc.IsTrue) + c.Assert(recorder.batches, gc.HasLen, 1) + c.Assert(recorder.batches[0].CharmURL, gc.Equals, "cs:wordpress-37") + c.Assert(recorder.batches[0].UnitTag, gc.Equals, "wp/0") + c.Assert(recorder.batches[0].Metrics, gc.HasLen, 1) + c.Assert(recorder.batches[0].Metrics[0].Key, gc.Equals, "juju-units") + c.Assert(recorder.batches[0].Metrics[0].Value, gc.Equals, "1") +} + +// TestAvailability tests that the charmdir resource is properly checked. +func (s *ManifoldSuite) TestAvailability(c *gc.C) { + recorder := &dummyRecorder{ + charmURL: "cs:wordpress-37", + unitTag: "wp/0", + isDeclaredMetric: true, + } + s.PatchValue(collect.NewRecorder, + func(_ names.UnitTag, _ context.Paths, _ spool.MetricFactory) (spool.MetricRecorder, error) { + return recorder, nil + }) + s.PatchValue(collect.ReadCharm, + func(_ names.UnitTag, _ context.Paths) (*corecharm.URL, map[string]corecharm.Metric, error) { + return corecharm.MustParseURL("cs:wordpress-37"), map[string]corecharm.Metric{"pings": corecharm.Metric{Description: "test metric", Type: corecharm.MetricTypeAbsolute}}, nil + }) + charmdir := &dummyCharmdir{aborted: true} + s.dummyResources["charmdir-name"] = dt.StubResource{Output: charmdir} + getResource := dt.StubGetResource(s.dummyResources) + collectEntity, err := collect.NewCollect(s.manifoldConfig, getResource) + c.Assert(err, jc.ErrorIsNil) + err = collectEntity.Do(nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(recorder.batches, gc.HasLen, 0) + + charmdir = &dummyCharmdir{aborted: false} + s.dummyResources["charmdir-name"] = dt.StubResource{Output: charmdir} + getResource = dt.StubGetResource(s.dummyResources) + collectEntity, err = collect.NewCollect(s.manifoldConfig, getResource) + c.Assert(err, jc.ErrorIsNil) + err = collectEntity.Do(nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(recorder.closed, jc.IsTrue) + c.Assert(recorder.batches, gc.HasLen, 1) +} + +// TestNoMetricsDeclared tests that if metrics are not declared, none are +// collected, not even builtin. +func (s *ManifoldSuite) TestNoMetricsDeclared(c *gc.C) { + recorder := &dummyRecorder{ + charmURL: "cs:wordpress-37", + unitTag: "wp/0", + isDeclaredMetric: false, + } + s.PatchValue(collect.NewRecorder, + func(_ names.UnitTag, _ context.Paths, _ spool.MetricFactory) (spool.MetricRecorder, error) { + return recorder, nil + }) + s.PatchValue(collect.ReadCharm, + func(_ names.UnitTag, _ context.Paths) (*corecharm.URL, map[string]corecharm.Metric, error) { + return corecharm.MustParseURL("cs:wordpress-37"), map[string]corecharm.Metric{}, nil + }) + collectEntity, err := collect.NewCollect(s.manifoldConfig, s.getResource) + c.Assert(err, jc.ErrorIsNil) + err = collectEntity.Do(nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(recorder.closed, jc.IsTrue) + c.Assert(recorder.batches, gc.HasLen, 0) +} + +type dummyAgent struct { + agent.Agent + dataDir string +} + +func (a dummyAgent) CurrentConfig() agent.Config { + return &dummyAgentConfig{dataDir: a.dataDir} +} + +type dummyAgentConfig struct { + agent.Config + dataDir string +} + +// Tag implements agent.AgentConfig. +func (ac dummyAgentConfig) Tag() names.Tag { + return names.NewUnitTag("u/0") +} + +// DataDir implements agent.AgentConfig. +func (ac dummyAgentConfig) DataDir() string { + return ac.dataDir +} + +type dummyCharmdir struct { + fortress.Guest + + aborted bool +} + +func (a *dummyCharmdir) Visit(visit fortress.Visit, _ fortress.Abort) error { + if a.aborted { + return fortress.ErrAborted + } + return visit() +} + +type dummyMetricFactory struct { + spool.MetricFactory +} + +type dummyRecorder struct { + spool.MetricRecorder + + // inputs + charmURL, unitTag string + metrics map[string]corecharm.Metric + isDeclaredMetric bool + err string + + // outputs + closed bool + batches []spool.MetricBatch +} + +func (r *dummyRecorder) AddMetric(key, value string, created time.Time) error { + if r.err != "" { + return errors.New(r.err) + } + then := time.Date(2015, 8, 20, 15, 48, 0, 0, time.UTC) + r.batches = append(r.batches, spool.MetricBatch{ + CharmURL: r.charmURL, + UUID: utils.MustNewUUID().String(), + Created: then, + Metrics: []jujuc.Metric{{ + Key: key, + Value: value, + Time: then, + }}, + UnitTag: r.unitTag, + }) + return nil +} + +func (r *dummyRecorder) IsDeclaredMetric(key string) bool { + if r.isDeclaredMetric { + return true + } + _, ok := r.metrics[key] + return ok +} + +func (r *dummyRecorder) Close() error { + r.closed = true + return nil +} === added file 'src/github.com/juju/juju/worker/metrics/collect/package_test.go' --- src/github.com/juju/juju/worker/metrics/collect/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/collect/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package collect_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} === added directory 'src/github.com/juju/juju/worker/metrics/sender' === added file 'src/github.com/juju/juju/worker/metrics/sender/export_test.go' --- src/github.com/juju/juju/worker/metrics/sender/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/sender/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,10 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package sender + +var ( + NewSender = newSender + NewMetricAdderClient = newMetricAdderClient + SocketName = &socketName +) === added file 'src/github.com/juju/juju/worker/metrics/sender/manifold.go' --- src/github.com/juju/juju/worker/metrics/sender/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/sender/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,80 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package sender + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + + "github.com/juju/juju/agent" + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/metricsadder" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/metrics/spool" + "github.com/juju/juju/worker/uniter" +) + +var ( + logger = loggo.GetLogger("juju.worker.metrics.sender") + newMetricAdderClient = func(apiCaller base.APICaller) metricsadder.MetricsAdderClient { + return metricsadder.NewClient(apiCaller) + } +) + +const ( + period = time.Minute * 5 +) + +// ManifoldConfig defines configuration of a metric sender manifold. +type ManifoldConfig struct { + AgentName string + APICallerName string + MetricSpoolName string +} + +// Manifold creates a metric sender manifold. +func Manifold(config ManifoldConfig) dependency.Manifold { + return dependency.Manifold{ + Inputs: []string{ + config.AgentName, + config.APICallerName, + config.MetricSpoolName, + }, + Start: func(getResource dependency.GetResourceFunc) (worker.Worker, error) { + var apicaller base.APICaller + var factory spool.MetricFactory + err := getResource(config.APICallerName, &apicaller) + if err != nil { + return nil, errors.Trace(err) + } + err = getResource(config.MetricSpoolName, &factory) + if err != nil { + return nil, errors.Trace(err) + } + var agent agent.Agent + if err := getResource(config.AgentName, &agent); err != nil { + return nil, err + } + agentConfig := agent.CurrentConfig() + tag := agentConfig.Tag() + unitTag, ok := tag.(names.UnitTag) + if !ok { + return nil, errors.Errorf("expected a unit tag, got %v", tag) + } + paths := uniter.NewWorkerPaths(agentConfig.DataDir(), unitTag, "metrics-send") + + client := newMetricAdderClient(apicaller) + + s, err := newSender(client, factory, paths.State.BaseDir, unitTag.String()) + if err != nil { + return nil, errors.Trace(err) + } + return spool.NewPeriodicWorker(s.Do, period, worker.NewTimer, s.stop), nil + }, + } +} === added file 'src/github.com/juju/juju/worker/metrics/sender/manifold_test.go' --- src/github.com/juju/juju/worker/metrics/sender/manifold_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/sender/manifold_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,159 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package sender_test + +import ( + "net/url" + "os" + "path/filepath" + + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/httprequest" + "github.com/juju/juju/agent" + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/metricsadder" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + dt "github.com/juju/juju/worker/dependency/testing" + "github.com/juju/juju/worker/metrics/sender" + "github.com/juju/juju/worker/metrics/spool" +) + +type ManifoldSuite struct { + testing.IsolationSuite + factory spool.MetricFactory + client metricsadder.MetricsAdderClient + manifold dependency.Manifold + getResource dependency.GetResourceFunc +} + +var _ = gc.Suite(&ManifoldSuite{}) + +func (s *ManifoldSuite) SetUpTest(c *gc.C) { + spoolDir := c.MkDir() + s.IsolationSuite.SetUpTest(c) + s.factory = &stubMetricFactory{ + &testing.Stub{}, + spoolDir, + } + + testAPIClient := func(apiCaller base.APICaller) metricsadder.MetricsAdderClient { + return newTestAPIMetricSender() + } + s.PatchValue(&sender.NewMetricAdderClient, testAPIClient) + + s.manifold = sender.Manifold(sender.ManifoldConfig{ + AgentName: "agent", + APICallerName: "api-caller", + MetricSpoolName: "metric-spool", + }) + + dataDir := c.MkDir() + // create unit agent base dir so that hooks can run. + err := os.MkdirAll(filepath.Join(dataDir, "agents", "unit-u-0"), 0777) + c.Assert(err, jc.ErrorIsNil) + + s.getResource = dt.StubGetResource(dt.StubResources{ + "agent": dt.StubResource{Output: &dummyAgent{dataDir: dataDir}}, + "api-caller": dt.StubResource{Output: &stubAPICaller{&testing.Stub{}}}, + "metric-spool": dt.StubResource{Output: s.factory}, + }) +} + +func (s *ManifoldSuite) TestInputs(c *gc.C) { + c.Check(s.manifold.Inputs, jc.DeepEquals, []string{"agent", "api-caller", "metric-spool"}) +} + +func (s *ManifoldSuite) TestStartMissingAPICaller(c *gc.C) { + getResource := dt.StubGetResource(dt.StubResources{ + "api-caller": dt.StubResource{Error: dependency.ErrMissing}, + "metric-spool": dt.StubResource{Output: s.factory}, + }) + worker, err := s.manifold.Start(getResource) + c.Check(worker, gc.IsNil) + c.Check(err, gc.ErrorMatches, dependency.ErrMissing.Error()) +} + +func (s *ManifoldSuite) TestStartMissingAgent(c *gc.C) { + getResource := dt.StubGetResource(dt.StubResources{ + "agent": dt.StubResource{Error: dependency.ErrMissing}, + "api-caller": dt.StubResource{Output: &stubAPICaller{&testing.Stub{}}}, + "metric-spool": dt.StubResource{Output: s.factory}, + }) + worker, err := s.manifold.Start(getResource) + c.Check(worker, gc.IsNil) + c.Check(err, gc.ErrorMatches, dependency.ErrMissing.Error()) +} + +func (s *ManifoldSuite) TestStartSuccess(c *gc.C) { + s.setupWorkerTest(c) +} + +func (s *ManifoldSuite) setupWorkerTest(c *gc.C) worker.Worker { + worker, err := s.manifold.Start(s.getResource) + c.Check(err, jc.ErrorIsNil) + s.AddCleanup(func(c *gc.C) { + worker.Kill() + err := worker.Wait() + c.Check(err, jc.ErrorIsNil) + }) + return worker +} + +var _ base.APICaller = (*stubAPICaller)(nil) + +type stubAPICaller struct { + *testing.Stub +} + +func (s *stubAPICaller) APICall(objType string, version int, id, request string, params, response interface{}) error { + s.MethodCall(s, "APICall", objType, version, id, request, params, response) + return nil +} + +func (s *stubAPICaller) BestFacadeVersion(facade string) int { + s.MethodCall(s, "BestFacadeVersion", facade) + return 42 +} + +func (s *stubAPICaller) ModelTag() (names.ModelTag, error) { + s.MethodCall(s, "ModelTag") + return names.NewModelTag("foobar"), nil +} + +func (s *stubAPICaller) ConnectStream(string, url.Values) (base.Stream, error) { + panic("should not be called") +} + +func (s *stubAPICaller) HTTPClient() (*httprequest.Client, error) { + panic("should not be called") +} + +type dummyAgent struct { + agent.Agent + dataDir string +} + +func (a dummyAgent) CurrentConfig() agent.Config { + return &dummyAgentConfig{dataDir: a.dataDir} +} + +type dummyAgentConfig struct { + agent.Config + dataDir string +} + +// Tag implements agent.AgentConfig. +func (ac dummyAgentConfig) Tag() names.Tag { + return names.NewUnitTag("u/0") +} + +// DataDir implements agent.AgentConfig. +func (ac dummyAgentConfig) DataDir() string { + return ac.dataDir +} === added file 'src/github.com/juju/juju/worker/metrics/sender/package_test.go' --- src/github.com/juju/juju/worker/metrics/sender/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/sender/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package sender_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/worker/metrics/sender/sender.go' --- src/github.com/juju/juju/worker/metrics/sender/sender.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/sender/sender.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,125 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package sender contains the implementation of the metric +// sender manifold. +package sender + +import ( + "fmt" + "net" + "path" + "time" + + "github.com/juju/errors" + "github.com/juju/utils/os" + + "github.com/juju/juju/api/metricsadder" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/worker/metrics/spool" +) + +const ( + defaultSocketName = "metrics-send.socket" +) + +type stopper interface { + Stop() +} + +type sender struct { + client metricsadder.MetricsAdderClient + factory spool.MetricFactory + listener stopper +} + +// Do sends metrics from the metric spool to the +// controller via an api call. +func (s *sender) Do(stop <-chan struct{}) error { + reader, err := s.factory.Reader() + if err != nil { + return errors.Trace(err) + } + defer reader.Close() + return s.sendMetrics(reader) +} + +func (s *sender) sendMetrics(reader spool.MetricReader) error { + batches, err := reader.Read() + if err != nil { + logger.Warningf("failed to open the metric reader: %v", err) + return errors.Trace(err) + } + var sendBatches []params.MetricBatchParam + for _, batch := range batches { + sendBatches = append(sendBatches, spool.APIMetricBatch(batch)) + } + results, err := s.client.AddMetricBatches(sendBatches) + if err != nil { + logger.Warningf("could not send metrics: %v", err) + return errors.Trace(err) + } + for batchUUID, resultErr := range results { + // if we fail to send any metric batch we log a warning with the assumption that + // the unsent metric batches remain in the spool directory and will be sent to the + // controller when the network partition is restored. + if _, ok := resultErr.(*params.Error); ok || params.IsCodeAlreadyExists(resultErr) { + err = reader.Remove(batchUUID) + if err != nil { + logger.Warningf("could not remove batch %q from spool: %v", batchUUID, err) + } + } else { + logger.Warningf("failed to send batch %q: %v", batchUUID, resultErr) + } + } + return nil +} + +// Handle sends metrics from the spool directory to the +// controller. +func (s *sender) Handle(c net.Conn) (err error) { + defer func() { + if err != nil { + fmt.Fprintf(c, "%v\n", err) + } else { + fmt.Fprintf(c, "ok\n") + } + c.Close() + }() + err = c.SetDeadline(time.Now().Add(spool.DefaultTimeout)) + if err != nil { + return errors.Annotate(err, "failed to set the deadline") + } + reader, err := s.factory.Reader() + if err != nil { + return errors.Trace(err) + } + defer reader.Close() + return s.sendMetrics(reader) +} + +func (s *sender) stop() { + if s.listener != nil { + s.listener.Stop() + } +} + +var socketName = func(baseDir, unitTag string) string { + if os.HostOS() == os.Windows { + return fmt.Sprintf(`\\.\pipe\send-metrics-%s`, unitTag) + } + return path.Join(baseDir, defaultSocketName) +} + +func newSender(client metricsadder.MetricsAdderClient, factory spool.MetricFactory, baseDir, unitTag string) (*sender, error) { + s := &sender{ + client: client, + factory: factory, + } + listener, err := spool.NewSocketListener(socketName(baseDir, unitTag), s) + if err != nil { + return nil, errors.Trace(err) + } + s.listener = listener + return s, nil +} === added file 'src/github.com/juju/juju/worker/metrics/sender/sender_test.go' --- src/github.com/juju/juju/worker/metrics/sender/sender_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/sender/sender_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,319 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package sender_test + +import ( + "errors" + "fmt" + "io" + "net" + "path" + "runtime" + "time" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + corecharm "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/worker/metrics/sender" + "github.com/juju/juju/worker/metrics/spool" +) + +var _ = gc.Suite(&senderSuite{}) + +type senderSuite struct { + spoolDir string + socketDir string + metricfactory spool.MetricFactory +} + +func (s *senderSuite) SetUpTest(c *gc.C) { + s.spoolDir = c.MkDir() + s.socketDir = c.MkDir() + + s.metricfactory = &stubMetricFactory{ + &testing.Stub{}, + s.spoolDir, + } + + declaredMetrics := map[string]corecharm.Metric{ + "pings": corecharm.Metric{Description: "test pings", Type: corecharm.MetricTypeAbsolute}, + } + recorder, err := s.metricfactory.Recorder(declaredMetrics, "local:trusty/testcharm", "testcharm/0") + c.Assert(err, jc.ErrorIsNil) + + err = recorder.AddMetric("pings", "50", time.Now()) + c.Assert(err, jc.ErrorIsNil) + + err = recorder.Close() + c.Assert(err, jc.ErrorIsNil) + + reader, err := s.metricfactory.Reader() + c.Assert(err, jc.ErrorIsNil) + batches, err := reader.Read() + c.Assert(err, jc.ErrorIsNil) + c.Assert(batches, gc.HasLen, 1) + + testing.PatchValue(sender.SocketName, func(_, _ string) string { + return sockPath(c) + }) +} + +func (s *senderSuite) TestHandler(c *gc.C) { + apiSender := newTestAPIMetricSender() + tmpDir := c.MkDir() + metricFactory := &stubMetricFactory{ + &testing.Stub{}, + tmpDir, + } + + declaredMetrics := map[string]corecharm.Metric{ + "pings": corecharm.Metric{Description: "test pings", Type: corecharm.MetricTypeAbsolute}, + } + recorder, err := metricFactory.Recorder(declaredMetrics, "local:trusty/testcharm", "testcharm/0") + c.Assert(err, jc.ErrorIsNil) + + err = recorder.AddMetric("pings", "50", time.Now()) + c.Assert(err, jc.ErrorIsNil) + + err = recorder.Close() + c.Assert(err, jc.ErrorIsNil) + + metricSender, err := sender.NewSender(apiSender, s.metricfactory, s.socketDir, "") + c.Assert(err, jc.ErrorIsNil) + + conn := &mockConnection{data: []byte(fmt.Sprintf("%v\n", tmpDir))} + err = metricSender.Handle(conn) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(apiSender.batches, gc.HasLen, 1) + c.Assert(apiSender.batches[0].Tag, gc.Equals, "testcharm/0") + c.Assert(apiSender.batches[0].Batch.CharmURL, gc.Equals, "local:trusty/testcharm") + c.Assert(apiSender.batches[0].Batch.Metrics, gc.HasLen, 1) + c.Assert(apiSender.batches[0].Batch.Metrics[0].Key, gc.Equals, "pings") + c.Assert(apiSender.batches[0].Batch.Metrics[0].Value, gc.Equals, "50") +} + +func (s *senderSuite) TestMetricSendingSuccess(c *gc.C) { + apiSender := newTestAPIMetricSender() + + metricSender, err := sender.NewSender(apiSender, s.metricfactory, s.socketDir, "test-unit-0") + c.Assert(err, jc.ErrorIsNil) + stopCh := make(chan struct{}) + err = metricSender.Do(stopCh) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(apiSender.batches, gc.HasLen, 1) + + reader, err := spool.NewJSONMetricReader(s.spoolDir) + c.Assert(err, jc.ErrorIsNil) + batches, err := reader.Read() + c.Assert(err, jc.ErrorIsNil) + c.Assert(batches, gc.HasLen, 0) +} + +func (s *senderSuite) TestSendingGetDuplicate(c *gc.C) { + apiSender := newTestAPIMetricSender() + + apiErr := ¶ms.Error{Message: "already exists", Code: params.CodeAlreadyExists} + select { + case apiSender.errors <- apiErr: + default: + c.Fatalf("blocked error channel") + } + + metricSender, err := sender.NewSender(apiSender, s.metricfactory, s.socketDir, "test-unit-0") + c.Assert(err, jc.ErrorIsNil) + stopCh := make(chan struct{}) + err = metricSender.Do(stopCh) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(apiSender.batches, gc.HasLen, 1) + + reader, err := spool.NewJSONMetricReader(s.spoolDir) + c.Assert(err, jc.ErrorIsNil) + batches, err := reader.Read() + c.Assert(err, jc.ErrorIsNil) + c.Assert(batches, gc.HasLen, 0) +} + +func (s *senderSuite) TestSendingFails(c *gc.C) { + apiSender := newTestAPIMetricSender() + + select { + case apiSender.sendError <- errors.New("something went wrong"): + default: + c.Fatalf("blocked error channel") + } + + metricSender, err := sender.NewSender(apiSender, s.metricfactory, s.socketDir, "test-unit-0") + c.Assert(err, jc.ErrorIsNil) + stopCh := make(chan struct{}) + err = metricSender.Do(stopCh) + c.Assert(err, gc.ErrorMatches, "something went wrong") + + c.Assert(apiSender.batches, gc.HasLen, 1) + + reader, err := spool.NewJSONMetricReader(s.spoolDir) + c.Assert(err, jc.ErrorIsNil) + batches, err := reader.Read() + c.Assert(err, jc.ErrorIsNil) + c.Assert(batches, gc.HasLen, 1) +} + +func (s *senderSuite) TestNoSpoolDirectory(c *gc.C) { + apiSender := newTestAPIMetricSender() + + metricfactory := &stubMetricFactory{ + &testing.Stub{}, + "/some/random/spool/dir", + } + + metricSender, err := sender.NewSender(apiSender, metricfactory, s.socketDir, "") + c.Assert(err, jc.ErrorIsNil) + stopCh := make(chan struct{}) + err = metricSender.Do(stopCh) + c.Assert(err, gc.ErrorMatches, `failed to open spool directory "/some/random/spool/dir": .*`) + + c.Assert(apiSender.batches, gc.HasLen, 0) +} + +func (s *senderSuite) TestNoMetricsToSend(c *gc.C) { + apiSender := newTestAPIMetricSender() + + newTmpSpoolDir := c.MkDir() + metricfactory := &stubMetricFactory{ + &testing.Stub{}, + newTmpSpoolDir, + } + + metricSender, err := sender.NewSender(apiSender, metricfactory, s.socketDir, "test-unit-0") + c.Assert(err, jc.ErrorIsNil) + stopCh := make(chan struct{}) + err = metricSender.Do(stopCh) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(apiSender.batches, gc.HasLen, 0) +} + +func newTestAPIMetricSender() *testAPIMetricSender { + return &testAPIMetricSender{errors: make(chan error, 1), sendError: make(chan error, 1)} +} + +type testAPIMetricSender struct { + batches []params.MetricBatchParam + errors chan error + sendError chan error +} + +func (t *testAPIMetricSender) AddMetricBatches(batches []params.MetricBatchParam) (map[string]error, error) { + t.batches = batches + + var err error + select { + case e := <-t.errors: + err = e + default: + err = (*params.Error)(nil) + } + + var sendErr error + select { + case e := <-t.sendError: + sendErr = e + default: + sendErr = nil + } + + errors := make(map[string]error) + for _, b := range batches { + errors[b.Batch.UUID] = err + } + return errors, sendErr +} + +type stubMetricFactory struct { + *testing.Stub + spoolDir string +} + +func (s *stubMetricFactory) Recorder(declaredMetrics map[string]corecharm.Metric, charmURL, unitTag string) (spool.MetricRecorder, error) { + s.MethodCall(s, "Recorder", declaredMetrics, charmURL, unitTag) + config := spool.MetricRecorderConfig{ + SpoolDir: s.spoolDir, + Metrics: declaredMetrics, + CharmURL: charmURL, + UnitTag: unitTag, + } + + return spool.NewJSONMetricRecorder(config) +} + +func (s *stubMetricFactory) Reader() (spool.MetricReader, error) { + s.MethodCall(s, "Reader") + return spool.NewJSONMetricReader(s.spoolDir) + +} + +type mockConnection struct { + net.Conn + testing.Stub + data []byte +} + +// SetDeadline implements the net.Conn interface. +func (c *mockConnection) SetDeadline(t time.Time) error { + c.AddCall("SetDeadline", t) + return nil +} + +// Write implements the net.Conn interface. +func (c *mockConnection) Write(data []byte) (int, error) { + c.AddCall("Write", data) + c.data = data + return len(data), nil +} + +// Close implements the net.Conn interface. +func (c *mockConnection) Close() error { + c.AddCall("Close") + return nil +} + +func (c mockConnection) eof() bool { + return len(c.data) == 0 +} + +func (c *mockConnection) readByte() byte { + b := c.data[0] + c.data = c.data[1:] + return b +} + +func (c *mockConnection) Read(p []byte) (n int, err error) { + if c.eof() { + err = io.EOF + return + } + if cp := cap(p); cp > 0 { + for n < cp { + p[n] = c.readByte() + n++ + if c.eof() { + break + } + } + } + return +} + +func sockPath(c *gc.C) string { + sockPath := path.Join(c.MkDir(), "test.listener") + if runtime.GOOS == "windows" { + return `\\.\pipe` + sockPath[2:] + } + return sockPath +} === added directory 'src/github.com/juju/juju/worker/metrics/spool' === added file 'src/github.com/juju/juju/worker/metrics/spool/export_test.go' --- src/github.com/juju/juju/worker/metrics/spool/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/spool/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package spool + +var ( + NewFactory = &newFactory +) === added file 'src/github.com/juju/juju/worker/metrics/spool/listener.go' --- src/github.com/juju/juju/worker/metrics/spool/listener.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/spool/listener.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,105 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package spool + +import ( + "net" + "time" + + "github.com/juju/errors" + "launchpad.net/tomb" + + "github.com/juju/juju/juju/sockets" + "github.com/juju/juju/worker" +) + +const ( + // DefaultTimeout specifies the default socket read and write timeout. + DefaultTimeout = 3 * time.Second +) + +// ConnectionHandler defines the method needed to handle socket connections. +type ConnectionHandler interface { + Handle(net.Conn) error +} + +type socketListener struct { + listener net.Listener + t tomb.Tomb + + handler ConnectionHandler +} + +// NewSocketListener returns a new socket listener struct. +func NewSocketListener(socketPath string, handler ConnectionHandler) (*socketListener, error) { + listener, err := sockets.Listen(socketPath) + if err != nil { + return nil, errors.Trace(err) + } + sListener := &socketListener{listener: listener, handler: handler} + go func() { + defer sListener.t.Done() + sListener.t.Kill(sListener.loop()) + }() + return sListener, nil +} + +// Stop closes the listener and releases all resources +// used by the socketListener. +func (l *socketListener) Stop() { + l.t.Kill(nil) + err := l.listener.Close() + if err != nil { + logger.Errorf("failed to close the collect-metrics listener: %v", err) + } + err = l.t.Wait() + if err != nil { + logger.Errorf("failed waiting for all goroutines to finish: %v", err) + } +} + +func (l *socketListener) loop() (_err error) { + defer func() { + select { + case <-l.t.Dying(): + _err = nil + default: + } + }() + for { + conn, err := l.listener.Accept() + if err != nil { + return errors.Trace(err) + } + go func() error { + err := l.handler.Handle(conn) + if err != nil { + // log the error and continue + logger.Errorf("request handling failed: %v", err) + } + return nil + }() + } + return +} + +// NewPeriodicWorker returns a periodic worker, that will call a stop function +// when it is killed. +func NewPeriodicWorker(do worker.PeriodicWorkerCall, period time.Duration, newTimer func(time.Duration) worker.PeriodicTimer, stop func()) worker.Worker { + return &periodicWorker{ + Worker: worker.NewPeriodicWorker(do, period, newTimer), + stop: stop, + } +} + +type periodicWorker struct { + worker.Worker + stop func() +} + +// Kill implements the worker.Worker interface. +func (w *periodicWorker) Kill() { + w.stop() + w.Worker.Kill() +} === added file 'src/github.com/juju/juju/worker/metrics/spool/listener_test.go' --- src/github.com/juju/juju/worker/metrics/spool/listener_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/spool/listener_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,73 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package spool_test + +import ( + "fmt" + "io/ioutil" + "net" + "path/filepath" + "runtime" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/worker/metrics/spool" +) + +var _ = gc.Suite(&listenerSuite{}) + +type stopper interface { + Stop() +} + +type listenerSuite struct { + socketPath string + handler *mockHandler + listener stopper +} + +func sockPath(c *gc.C) string { + sockPath := filepath.Join(c.MkDir(), "test.listener") + if runtime.GOOS == "windows" { + return `\\.\pipe` + sockPath[2:] + } + return sockPath +} + +func (s *listenerSuite) SetUpTest(c *gc.C) { + s.handler = &mockHandler{} + s.socketPath = sockPath(c) + listener, err := spool.NewSocketListener(s.socketPath, s.handler) + c.Assert(err, jc.ErrorIsNil) + s.listener = listener +} + +func (s *listenerSuite) TearDownTest(c *gc.C) { + s.listener.Stop() +} + +func (s *listenerSuite) TestDial(c *gc.C) { + readCloser, err := dial(s.socketPath) + c.Assert(err, jc.ErrorIsNil) + defer readCloser.Close() + + data, err := ioutil.ReadAll(readCloser) + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(data), gc.Equals, "Hello socket.") + s.handler.CheckCall(c, 0, "Handle") +} + +type mockHandler struct { + testing.Stub +} + +// Handle implements the spool.ConnectionHandler interface. +func (h *mockHandler) Handle(c net.Conn) error { + defer c.Close() + h.AddCall("Handle") + fmt.Fprintf(c, "Hello socket.") + return nil +} === added file 'src/github.com/juju/juju/worker/metrics/spool/listener_unix_test.go' --- src/github.com/juju/juju/worker/metrics/spool/listener_unix_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/spool/listener_unix_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,21 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build !windows + +package spool_test + +import ( + "io" + "net" + + "github.com/juju/errors" +) + +func dial(socketPath string) (io.ReadCloser, error) { + conn, err := net.Dial("unix", socketPath) + if err != nil { + return nil, errors.Trace(err) + } + return conn, nil +} === added file 'src/github.com/juju/juju/worker/metrics/spool/listener_win_test.go' --- src/github.com/juju/juju/worker/metrics/spool/listener_win_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/spool/listener_win_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,21 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build windows + +package spool_test + +import ( + "io" + + "github.com/juju/errors" + "gopkg.in/natefinch/npipe.v2" +) + +func dial(socketPath string) (io.ReadCloser, error) { + conn, err := npipe.Dial(socketPath) + if err != nil { + return nil, errors.Trace(err) + } + return conn, nil +} === added file 'src/github.com/juju/juju/worker/metrics/spool/manifold.go' --- src/github.com/juju/juju/worker/metrics/spool/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/spool/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,132 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package spool contains the implementation of a +// worker that extracts the spool directory path from the agent +// config and enables other workers to write and read +// metrics to and from a the spool directory using a writer +// and a reader. +package spool + +import ( + "time" + + "github.com/juju/errors" + corecharm "gopkg.in/juju/charm.v6-unstable" + "launchpad.net/tomb" + + "github.com/juju/juju/agent" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/util" +) + +// MetricRecorder records metrics to a spool directory. +type MetricRecorder interface { + // AddMetric records a metric with the specified key, value and create time + // to a spool directory. + AddMetric(key, value string, created time.Time) error + // Close implements io.Closer. + Close() error + // IsDeclaredMetrics returns true if the metric recorder + // is permitted to store metrics with the specified key. + IsDeclaredMetric(key string) bool +} + +// MetricReader reads metrics from a spool directory. +type MetricReader interface { + // Read returns all metric batches stored in the spool directory. + Read() ([]MetricBatch, error) + // Remove removes the metric batch with the specified uuid + // from the spool directory. + Remove(uuid string) error + // Close implements io.Closer. + Close() error +} + +// MetricFactory contains the metrics reader and recorder factories. +type MetricFactory interface { + // Recorder returns a new MetricRecorder. + Recorder(metrics map[string]corecharm.Metric, charmURL, unitTag string) (MetricRecorder, error) + + // Reader returns a new MetricReader. + Reader() (MetricReader, error) +} + +type factory struct { + spoolDir string +} + +// Reader implements the MetricFactory interface. +func (f *factory) Reader() (MetricReader, error) { + return NewJSONMetricReader(f.spoolDir) +} + +// Recorder implements the MetricFactory interface. +func (f *factory) Recorder(declaredMetrics map[string]corecharm.Metric, charmURL, unitTag string) (MetricRecorder, error) { + return NewJSONMetricRecorder(MetricRecorderConfig{ + SpoolDir: f.spoolDir, + Metrics: declaredMetrics, + CharmURL: charmURL, + UnitTag: unitTag, + }) +} + +var newFactory = func(spoolDir string) MetricFactory { + return &factory{spoolDir: spoolDir} +} + +// ManifoldConfig specifies names a spooldirectory manifold should use to +// address its dependencies. +type ManifoldConfig util.AgentManifoldConfig + +// Manifold returns a dependency.Manifold that extracts the metrics +// spool directory path from the agent. +func Manifold(config ManifoldConfig) dependency.Manifold { + manifold := util.AgentManifold(util.AgentManifoldConfig(config), newWorker) + manifold.Output = outputFunc + return manifold +} + +// newWorker creates a degenerate worker that provides access to the metrics +// spool directory path. +func newWorker(a agent.Agent) (worker.Worker, error) { + metricsSpoolDir := a.CurrentConfig().MetricsSpoolDir() + err := checkSpoolDir(metricsSpoolDir) + if err != nil { + return nil, errors.Annotatef(err, "error checking spool directory %q", metricsSpoolDir) + } + w := &spoolWorker{factory: newFactory(metricsSpoolDir)} + go func() { + defer w.tomb.Done() + <-w.tomb.Dying() + }() + return w, nil +} + +// outputFunc extracts the metrics spool directory path from a *metricsSpoolDirWorker. +func outputFunc(in worker.Worker, out interface{}) error { + inWorker, _ := in.(*spoolWorker) + outPointer, _ := out.(*MetricFactory) + if inWorker == nil || outPointer == nil { + return errors.Errorf("expected %T->%T; got %T->%T", inWorker, outPointer, in, out) + } + *outPointer = inWorker.factory + return nil +} + +// spoolWorker is a worker that provides a MetricFactory. +type spoolWorker struct { + tomb tomb.Tomb + factory MetricFactory +} + +// Kill is part of the worker.Worker interface. +func (w *spoolWorker) Kill() { + w.tomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (w *spoolWorker) Wait() error { + return w.tomb.Wait() +} === added file 'src/github.com/juju/juju/worker/metrics/spool/manifold_test.go' --- src/github.com/juju/juju/worker/metrics/spool/manifold_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/spool/manifold_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,131 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package spool_test + +import ( + "io/ioutil" + "path/filepath" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/agent" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + dt "github.com/juju/juju/worker/dependency/testing" + "github.com/juju/juju/worker/metrics/spool" +) + +type ManifoldSuite struct { + testing.IsolationSuite + factory *stubFactory + manifold dependency.Manifold + getResource dependency.GetResourceFunc + spoolDir string +} + +var _ = gc.Suite(&ManifoldSuite{}) + +func (s *ManifoldSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.factory = &stubFactory{} + s.PatchValue(spool.NewFactory, s.factory.newFactory) + s.manifold = spool.Manifold(spool.ManifoldConfig{ + AgentName: "agent-name", + }) + s.spoolDir = c.MkDir() + s.getResource = dt.StubGetResource(dt.StubResources{ + "agent-name": dt.StubResource{Output: &dummyAgent{spoolDir: s.spoolDir}}, + }) +} + +func (s *ManifoldSuite) TestInputs(c *gc.C) { + c.Check(s.manifold.Inputs, jc.DeepEquals, []string{"agent-name"}) +} + +func (s *ManifoldSuite) TestStartMissingAgent(c *gc.C) { + getResource := dt.StubGetResource(dt.StubResources{ + "agent-name": dt.StubResource{Error: dependency.ErrMissing}, + }) + worker, err := s.manifold.Start(getResource) + c.Check(worker, gc.IsNil) + c.Check(err, gc.Equals, dependency.ErrMissing) +} + +func (s *ManifoldSuite) TestStartSuccess(c *gc.C) { + s.setupWorkerTest(c) +} + +func (s *ManifoldSuite) TestOutputSuccess(c *gc.C) { + worker := s.setupWorkerTest(c) + var factory spool.MetricFactory + err := s.manifold.Output(worker, &factory) + c.Check(err, jc.ErrorIsNil) + s.factory.CheckCall(c, 0, "newFactory", s.spoolDir) +} + +func (s *ManifoldSuite) setupWorkerTest(c *gc.C) worker.Worker { + worker, err := s.manifold.Start(s.getResource) + c.Check(err, jc.ErrorIsNil) + s.AddCleanup(func(c *gc.C) { + worker.Kill() + err := worker.Wait() + c.Check(err, jc.ErrorIsNil) + }) + return worker +} + +func (s *ManifoldSuite) TestOutputBadTarget(c *gc.C) { + worker := s.setupWorkerTest(c) + var spoolDirPlaceholder interface{} + err := s.manifold.Output(worker, &spoolDirPlaceholder) + c.Check(err.Error(), gc.Equals, "expected *spool.spoolWorker->*spool.MetricFactory; got *spool.spoolWorker->*interface {}") + c.Check(spoolDirPlaceholder, gc.IsNil) +} + +func (s *ManifoldSuite) TestCannotCreateSpoolDir(c *gc.C) { + c.Assert(ioutil.WriteFile(filepath.Join(s.spoolDir, "x"), nil, 0666), jc.ErrorIsNil) + spoolDir := filepath.Join(s.spoolDir, "x", "y") + getResource := dt.StubGetResource(dt.StubResources{ + "agent-name": dt.StubResource{Output: &dummyAgent{spoolDir: spoolDir}}, + }) + w, err := s.manifold.Start(getResource) + c.Check(err, gc.ErrorMatches, ".*error checking spool directory.*") + + var factory spool.MetricFactory + err = s.manifold.Output(w, &factory) + c.Check(err.Error(), gc.Equals, "expected *spool.spoolWorker->*spool.MetricFactory; got ->*spool.MetricFactory") +} + +type dummyAgent struct { + agent.Agent + spoolDir string +} + +func (a dummyAgent) CurrentConfig() agent.Config { + return &dummyAgentConfig{spoolDir: a.spoolDir} +} + +type dummyAgentConfig struct { + agent.Config + spoolDir string +} + +func (ac dummyAgentConfig) MetricsSpoolDir() string { + return ac.spoolDir +} + +type dummyFactory struct { + spool.MetricFactory +} + +type stubFactory struct { + testing.Stub +} + +func (s *stubFactory) newFactory(spoolDir string) spool.MetricFactory { + s.AddCall("newFactory", spoolDir) + return &dummyFactory{} +} === added file 'src/github.com/juju/juju/worker/metrics/spool/metrics.go' --- src/github.com/juju/juju/worker/metrics/spool/metrics.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/spool/metrics.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,377 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package spool + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/utils" + corecharm "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/worker/uniter/runner/jujuc" +) + +var logger = loggo.GetLogger("juju.worker.uniter.metrics") + +type metricFile struct { + *os.File + finalName string +} + +func createMetricFile(path string) (*metricFile, error) { + dir, base := filepath.Dir(path), filepath.Base(path) + if !filepath.IsAbs(dir) { + return nil, errors.Errorf("not an absolute path: %q", path) + } + + workUUID, err := utils.NewUUID() + if err != nil { + return nil, errors.Trace(err) + } + workName := filepath.Join(dir, fmt.Sprintf(".%s.inc-%s", base, workUUID.String())) + + f, err := os.Create(workName) + if err != nil { + return nil, errors.Trace(err) + } + return &metricFile{File: f, finalName: path}, nil +} + +// Close implements io.Closer. +func (f *metricFile) Close() error { + err := f.File.Close() + if err != nil { + return errors.Trace(err) + } + ok, err := utils.MoveFile(f.Name(), f.finalName) + if err != nil { + // ok can be true even when there is an error completing the move, on + // platforms that implement it in multiple steps that can fail + // separately. POSIX for example, uses link(2) to claim the new + // location atomically, followed by an unlink(2) to release the old + // location. + if !ok { + return errors.Trace(err) + } + logger.Errorf("failed to remove temporary file %q: %v", f.Name(), err) + } + return nil +} + +// MetricBatch stores the information relevant to a single metrics batch. +type MetricBatch struct { + CharmURL string `json:"charmurl"` + UUID string `json:"uuid"` + Created time.Time `json:"created"` + Metrics []jujuc.Metric `json:"metrics"` + UnitTag string `json:"unit-tag"` +} + +// APIMetricBatch converts the specified MetricBatch to a params.MetricBatch, +// which can then be sent to the controller. +func APIMetricBatch(batch MetricBatch) params.MetricBatchParam { + metrics := make([]params.Metric, len(batch.Metrics)) + for i, metric := range batch.Metrics { + metrics[i] = params.Metric{Key: metric.Key, Value: metric.Value, Time: metric.Time} + } + return params.MetricBatchParam{ + Tag: batch.UnitTag, + Batch: params.MetricBatch{ + UUID: batch.UUID, + CharmURL: batch.CharmURL, + Created: batch.Created, + Metrics: metrics, + }, + } +} + +// MetricMetadata is used to store metadata for the current metric batch. +type MetricMetadata struct { + CharmURL string `json:"charmurl"` + UUID string `json:"uuid"` + Created time.Time `json:"created"` + UnitTag string `json:"unit-tag"` +} + +// JSONMetricRecorder implements the MetricsRecorder interface +// and writes metrics to a spool directory for store-and-forward. +type JSONMetricRecorder struct { + spoolDir string + validMetrics map[string]corecharm.Metric + charmURL string + uuid utils.UUID + created time.Time + unitTag string + + lock sync.Mutex + + file io.Closer + enc *json.Encoder +} + +// MetricRecorderConfig stores configuration data for a metrics recorder. +type MetricRecorderConfig struct { + SpoolDir string + Metrics map[string]corecharm.Metric + CharmURL string + UnitTag string +} + +// NewJSONMetricRecorder creates a new JSON metrics recorder. +func NewJSONMetricRecorder(config MetricRecorderConfig) (rec *JSONMetricRecorder, rErr error) { + mbUUID, err := utils.NewUUID() + if err != nil { + return nil, errors.Trace(err) + } + + recorder := &JSONMetricRecorder{ + spoolDir: config.SpoolDir, + uuid: mbUUID, + charmURL: config.CharmURL, + created: time.Now().UTC(), + validMetrics: config.Metrics, + unitTag: config.UnitTag, + } + if err := recorder.open(); err != nil { + return nil, errors.Trace(err) + } + return recorder, nil +} + +// Close implements the MetricsRecorder interface. +func (m *JSONMetricRecorder) Close() error { + m.lock.Lock() + defer m.lock.Unlock() + + err := m.file.Close() + if err != nil { + return errors.Trace(err) + } + + // We have an exclusive lock on this metric batch here, because + // metricsFile.Close was able to rename the final filename atomically. + // + // Now write the meta file so that JSONMetricReader discovers a finished + // pair of files. + err = m.recordMetaData() + if err != nil { + return errors.Trace(err) + } + + return nil +} + +// AddMetric implements the MetricsRecorder interface. +func (m *JSONMetricRecorder) AddMetric(key, value string, created time.Time) error { + err := m.validateMetric(key, value) + if err != nil { + return errors.Trace(err) + } + m.lock.Lock() + defer m.lock.Unlock() + return errors.Trace(m.enc.Encode(jujuc.Metric{Key: key, Value: value, Time: created})) +} + +func (m *JSONMetricRecorder) validateMetric(key, value string) error { + if !m.IsDeclaredMetric(key) { + return errors.Errorf("metric key %q not declared by the charm", key) + } + // The largest number of digits that can be returned by strconv.FormatFloat is 24, so + // choose an arbitrary limit somewhat higher than that. + if len(value) > 30 { + return fmt.Errorf("metric value is too large") + } + fValue, err := strconv.ParseFloat(value, 64) + if err != nil { + return fmt.Errorf("invalid value type: expected float, got %q", value) + } + if fValue < 0 { + return fmt.Errorf("invalid value: value must be greater or equal to zero, got %v", value) + } + return nil +} + +// IsDeclaredMetric returns true if the metric recorder is permitted to store this metric. +// Returns false if the uniter using this recorder doesn't define this metric. +func (m *JSONMetricRecorder) IsDeclaredMetric(key string) bool { + _, ok := m.validMetrics[key] + return ok +} + +func (m *JSONMetricRecorder) open() error { + dataFile := filepath.Join(m.spoolDir, m.uuid.String()) + if _, err := os.Stat(dataFile); err != nil && !os.IsNotExist(err) { + if err != nil { + return errors.Annotatef(err, "failed to stat file %s", dataFile) + } + return errors.Errorf("file %s already exists", dataFile) + } + + dataWriter, err := createMetricFile(dataFile) + if err != nil { + return errors.Trace(err) + } + m.file = dataWriter + m.enc = json.NewEncoder(dataWriter) + return nil +} + +func checkSpoolDir(path string) error { + if _, err := os.Stat(path); os.IsNotExist(err) { + err := os.MkdirAll(path, 0755) + if err != nil { + return errors.Trace(err) + } + } else if err != nil { + return errors.Trace(err) + } + return nil +} + +func (m *JSONMetricRecorder) recordMetaData() error { + metaFile := filepath.Join(m.spoolDir, fmt.Sprintf("%s.meta", m.uuid.String())) + if _, err := os.Stat(metaFile); !os.IsNotExist(err) { + if err != nil { + return errors.Annotatef(err, "failed to stat file %s", metaFile) + } + return errors.Errorf("file %s already exists", metaFile) + } + + metadata := MetricMetadata{ + CharmURL: m.charmURL, + UUID: m.uuid.String(), + Created: m.created, + UnitTag: m.unitTag, + } + // The use of a metricFile here ensures that the JSONMetricReader will only + // find a fully-written metafile. + metaWriter, err := createMetricFile(metaFile) + if err != nil { + return errors.Trace(err) + } + defer metaWriter.Close() + enc := json.NewEncoder(metaWriter) + err = enc.Encode(metadata) + if err != nil { + return errors.Trace(err) + } + return nil +} + +// JSONMetricsReader reads metrics batches stored in the spool directory. +type JSONMetricReader struct { + dir string +} + +// NewJSONMetricsReader creates a new JSON metrics reader for the specified spool directory. +func NewJSONMetricReader(spoolDir string) (*JSONMetricReader, error) { + if _, err := os.Stat(spoolDir); err != nil { + return nil, errors.Annotatef(err, "failed to open spool directory %q", spoolDir) + } + return &JSONMetricReader{ + dir: spoolDir, + }, nil +} + +// Read implements the MetricsReader interface. +// Due to the way the batches are stored in the file system, +// they will be returned in an arbitrary order. This does not affect the behavior. +func (r *JSONMetricReader) Read() ([]MetricBatch, error) { + var batches []MetricBatch + + walker := func(path string, info os.FileInfo, err error) error { + if err != nil { + return errors.Trace(err) + } + if info.IsDir() && path != r.dir { + return filepath.SkipDir + } else if !strings.HasSuffix(info.Name(), ".meta") { + return nil + } + + batch, err := decodeBatch(path) + if err != nil { + return errors.Trace(err) + } + batch.Metrics, err = decodeMetrics(filepath.Join(r.dir, batch.UUID)) + if err != nil { + return errors.Trace(err) + } + if len(batch.Metrics) > 0 { + batches = append(batches, batch) + } + return nil + } + if err := filepath.Walk(r.dir, walker); err != nil { + return nil, errors.Trace(err) + } + return batches, nil +} + +// Remove implements the MetricsReader interface. +func (r *JSONMetricReader) Remove(uuid string) error { + metaFile := filepath.Join(r.dir, fmt.Sprintf("%s.meta", uuid)) + dataFile := filepath.Join(r.dir, uuid) + err := os.Remove(metaFile) + if err != nil && !os.IsNotExist(err) { + return errors.Trace(err) + } + err = os.Remove(dataFile) + if err != nil { + return errors.Trace(err) + } + return nil +} + +// Close implements the MetricsReader interface. +func (r *JSONMetricReader) Close() error { + return nil +} + +func decodeBatch(file string) (MetricBatch, error) { + var batch MetricBatch + f, err := os.Open(file) + if err != nil { + return MetricBatch{}, errors.Trace(err) + } + defer f.Close() + dec := json.NewDecoder(f) + err = dec.Decode(&batch) + if err != nil { + return MetricBatch{}, errors.Trace(err) + } + return batch, nil +} + +func decodeMetrics(file string) ([]jujuc.Metric, error) { + var metrics []jujuc.Metric + f, err := os.Open(file) + if err != nil { + return nil, errors.Trace(err) + } + defer f.Close() + dec := json.NewDecoder(f) + for { + var metric jujuc.Metric + err := dec.Decode(&metric) + if err == io.EOF { + break + } else if err != nil { + return nil, errors.Trace(err) + } + metrics = append(metrics, metric) + } + return metrics, nil +} === added file 'src/github.com/juju/juju/worker/metrics/spool/metrics_file_test.go' --- src/github.com/juju/juju/worker/metrics/spool/metrics_file_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/spool/metrics_file_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,79 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package spool + +import ( + "crypto/rand" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" +) + +type metricFileSuite struct { + spoolDir string +} + +var _ = gc.Suite(&metricFileSuite{}) + +func (s *metricFileSuite) SetUpTest(c *gc.C) { + s.spoolDir = c.MkDir() +} + +func cleanupFile(f *metricFile) { + if f != nil { + f.File.Close() + } +} + +func (s *metricFileSuite) TestRenameOnClose(c *gc.C) { + fileName := filepath.Join(s.spoolDir, "foo") + mf, err := createMetricFile(fileName) + c.Assert(err, gc.IsNil) + + _, err = io.CopyN(mf, rand.Reader, 78666) + c.Assert(err, gc.IsNil) + + _, err = os.Stat(fileName) + c.Assert(os.IsNotExist(err), jc.IsTrue) + + err = mf.Close() + c.Assert(err, gc.IsNil) + + st, err := os.Stat(fileName) + c.Assert(err, gc.IsNil) + c.Assert(st.Size(), gc.Equals, int64(78666)) +} + +func (s *metricFileSuite) TestContention(c *gc.C) { + fileName := filepath.Join(s.spoolDir, "foo") + mf1, err := createMetricFile(fileName) + c.Assert(err, gc.IsNil) + mf2, err := createMetricFile(fileName) + c.Assert(err, gc.IsNil) + + _, err = fmt.Fprint(mf1, "emacs") + c.Assert(err, gc.IsNil) + _, err = fmt.Fprint(mf2, "vi") + c.Assert(err, gc.IsNil) + + _, err = os.Stat(fileName) + c.Assert(os.IsNotExist(err), jc.IsTrue) + + err = mf2.Close() + c.Assert(err, gc.IsNil) + err = mf1.Close() + c.Assert(err, gc.NotNil) + + st, err := os.Stat(fileName) + c.Assert(err, gc.IsNil) + c.Assert(st.Size(), gc.Equals, int64(2)) + contents, err := ioutil.ReadFile(fileName) + c.Assert(err, gc.IsNil) + c.Assert(contents, gc.DeepEquals, []byte("vi")) +} === added file 'src/github.com/juju/juju/worker/metrics/spool/metrics_test.go' --- src/github.com/juju/juju/worker/metrics/spool/metrics_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/spool/metrics_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,290 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package spool_test + +import ( + "path/filepath" + "runtime" + "time" + + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + corecharm "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/worker/metrics/spool" + "github.com/juju/juju/worker/uniter/runner/jujuc" +) + +type metricsBatchSuite struct { +} + +var _ = gc.Suite(&metricsBatchSuite{}) + +func (s *metricsBatchSuite) TestAPIMetricBatch(c *gc.C) { + batches := []spool.MetricBatch{{ + CharmURL: "local:trusty/test-charm", + UUID: "test-uuid", + Created: time.Now(), + Metrics: []jujuc.Metric{ + { + Key: "test-key-1", + Value: "test-value-1", + Time: time.Now(), + }, { + Key: "test-key-2", + Value: "test-value-2", + Time: time.Now(), + }, + }, + }, { + CharmURL: "local:trusty/test-charm", + UUID: "test-uuid", + Created: time.Now(), + Metrics: []jujuc.Metric{}, + }, + } + for _, batch := range batches { + apiBatch := spool.APIMetricBatch(batch) + c.Assert(apiBatch.Batch.UUID, gc.DeepEquals, batch.UUID) + c.Assert(apiBatch.Batch.CharmURL, gc.DeepEquals, batch.CharmURL) + c.Assert(apiBatch.Batch.Created, gc.DeepEquals, batch.Created) + c.Assert(len(apiBatch.Batch.Metrics), gc.Equals, len(batch.Metrics)) + for i, metric := range batch.Metrics { + c.Assert(metric.Key, gc.DeepEquals, apiBatch.Batch.Metrics[i].Key) + c.Assert(metric.Value, gc.DeepEquals, apiBatch.Batch.Metrics[i].Value) + c.Assert(metric.Time, gc.DeepEquals, apiBatch.Batch.Metrics[i].Time) + } + } +} + +func osDependentSockPath(c *gc.C) string { + sockPath := filepath.Join(c.MkDir(), "test.sock") + if runtime.GOOS == "windows" { + return `\\.\pipe` + sockPath[2:] + } + return sockPath +} + +// testPaths implements Paths for tests that do touch the filesystem. +type testPaths struct { + tools string + charm string + socket string + metricsspool string +} + +func newTestPaths(c *gc.C) testPaths { + return testPaths{ + tools: c.MkDir(), + charm: c.MkDir(), + socket: osDependentSockPath(c), + metricsspool: c.MkDir(), + } +} + +func (p testPaths) GetMetricsSpoolDir() string { + return p.metricsspool +} + +func (p testPaths) GetToolsDir() string { + return p.tools +} + +func (p testPaths) GetCharmDir() string { + return p.charm +} + +func (p testPaths) GetJujucSocket() string { + return p.socket +} + +type metricsRecorderSuite struct { + testing.IsolationSuite + + paths testPaths + unitTag string +} + +var _ = gc.Suite(&metricsRecorderSuite{}) + +func (s *metricsRecorderSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.paths = newTestPaths(c) + s.unitTag = names.NewUnitTag("test-unit/0").String() +} + +func (s *metricsRecorderSuite) TestInit(c *gc.C) { + w, err := spool.NewJSONMetricRecorder( + spool.MetricRecorderConfig{ + SpoolDir: s.paths.GetMetricsSpoolDir(), + Metrics: map[string]corecharm.Metric{"pings": corecharm.Metric{}}, + CharmURL: "local:precise/wordpress", + UnitTag: s.unitTag, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(w, gc.NotNil) + err = w.AddMetric("pings", "5", time.Now()) + c.Assert(err, jc.ErrorIsNil) + err = w.Close() + c.Assert(err, jc.ErrorIsNil) + + r, err := spool.NewJSONMetricReader(s.paths.GetMetricsSpoolDir()) + c.Assert(err, jc.ErrorIsNil) + batches, err := r.Read() + c.Assert(err, jc.ErrorIsNil) + c.Assert(batches, gc.HasLen, 1) + batch := batches[0] + c.Assert(batch.CharmURL, gc.Equals, "local:precise/wordpress") + c.Assert(batch.UUID, gc.Not(gc.Equals), "") + c.Assert(batch.Metrics, gc.HasLen, 1) + c.Assert(batch.Metrics[0].Key, gc.Equals, "pings") + c.Assert(batch.Metrics[0].Value, gc.Equals, "5") + c.Assert(batch.UnitTag, gc.Equals, s.unitTag) + + err = r.Close() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *metricsRecorderSuite) TestMetricValidation(c *gc.C) { + tests := []struct { + about string + key string + value string + expectedError string + }{{ + about: "metric not declared", + key: "pings", + value: "5", + expectedError: `metric key "pings" not declared by the charm`, + }, { + about: "non float metrics", + key: "pongs", + value: "abcd", + expectedError: `invalid value type: expected float, got "abcd"`, + }, { + about: "negative value", + key: "pongs", + value: "-5.0", + expectedError: `invalid value: value must be greater or equal to zero, got -5.0`, + }, { + about: "large value", + key: "pongs", + value: "1234567890123456789012345678901234567890", + expectedError: `metric value is too large`, + }, + } + + for _, test := range tests { + w, err := spool.NewJSONMetricRecorder( + spool.MetricRecorderConfig{ + SpoolDir: s.paths.GetMetricsSpoolDir(), + Metrics: map[string]corecharm.Metric{ + "juju-units": corecharm.Metric{}, + "pongs": corecharm.Metric{ + Type: corecharm.MetricTypeAbsolute, + }, + }, + CharmURL: "local:precise/wordpress", + UnitTag: s.unitTag, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(w, gc.NotNil) + + c.Logf("running test: %s", test.about) + err = w.AddMetric(test.key, test.value, time.Now()) + if test.expectedError != "" { + c.Assert(err, gc.ErrorMatches, test.expectedError) + err = w.Close() + c.Assert(err, jc.ErrorIsNil) + + r, err := spool.NewJSONMetricReader(s.paths.GetMetricsSpoolDir()) + c.Assert(err, jc.ErrorIsNil) + batches, err := r.Read() + c.Assert(err, jc.ErrorIsNil) + c.Assert(batches, gc.HasLen, 0) + } else { + c.Assert(err, jc.ErrorIsNil) + err = w.Close() + c.Assert(err, jc.ErrorIsNil) + } + } +} + +type metricsReaderSuite struct { + paths testPaths + unitTag string + + w *spool.JSONMetricRecorder +} + +var _ = gc.Suite(&metricsReaderSuite{}) + +func (s *metricsReaderSuite) SetUpTest(c *gc.C) { + s.paths = newTestPaths(c) + s.unitTag = names.NewUnitTag("test-unit/0").String() + + var err error + s.w, err = spool.NewJSONMetricRecorder( + spool.MetricRecorderConfig{ + SpoolDir: s.paths.GetMetricsSpoolDir(), + Metrics: map[string]corecharm.Metric{"pings": corecharm.Metric{}}, + CharmURL: "local:precise/wordpress", + UnitTag: s.unitTag, + }) + + c.Assert(err, jc.ErrorIsNil) + err = s.w.AddMetric("pings", "5", time.Now()) + c.Assert(err, jc.ErrorIsNil) + err = s.w.Close() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *metricsReaderSuite) TestTwoSimultaneousReaders(c *gc.C) { + r, err := spool.NewJSONMetricReader(s.paths.GetMetricsSpoolDir()) + c.Assert(err, jc.ErrorIsNil) + + r2, err := spool.NewJSONMetricReader(c.MkDir()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(r2, gc.NotNil) + err = r2.Close() + c.Assert(err, jc.ErrorIsNil) + err = r.Close() + c.Assert(err, jc.ErrorIsNil) + +} + +func (s *metricsReaderSuite) TestUnblockedReaders(c *gc.C) { + r, err := spool.NewJSONMetricReader(s.paths.GetMetricsSpoolDir()) + c.Assert(err, jc.ErrorIsNil) + err = r.Close() + c.Assert(err, jc.ErrorIsNil) + + r2, err := spool.NewJSONMetricReader(s.paths.GetMetricsSpoolDir()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(r2, gc.NotNil) + err = r2.Close() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *metricsReaderSuite) TestRemoval(c *gc.C) { + r, err := spool.NewJSONMetricReader(s.paths.GetMetricsSpoolDir()) + c.Assert(err, jc.ErrorIsNil) + + batches, err := r.Read() + c.Assert(err, jc.ErrorIsNil) + for _, batch := range batches { + err := r.Remove(batch.UUID) + c.Assert(err, jc.ErrorIsNil) + } + err = r.Close() + c.Assert(err, jc.ErrorIsNil) + + batches, err = r.Read() + c.Assert(err, jc.ErrorIsNil) + c.Assert(batches, gc.HasLen, 0) + err = r.Close() + c.Assert(err, jc.ErrorIsNil) +} === added file 'src/github.com/juju/juju/worker/metrics/spool/package_test.go' --- src/github.com/juju/juju/worker/metrics/spool/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metrics/spool/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package spool_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/worker/metricworker/manifold.go' --- src/github.com/juju/juju/worker/metricworker/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/metricworker/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,38 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package metricworker + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/metricsmanager" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/util" +) + +// ManifoldConfig describes the resources used by metrics workers. +type ManifoldConfig util.ApiManifoldConfig + +// Manifold returns a Manifold that encapsulates various metrics workers. +func Manifold(config ManifoldConfig) dependency.Manifold { + return util.ApiManifold( + util.ApiManifoldConfig(config), + manifoldStart, + ) +} + +// manifoldStart creates a runner for the metrics workers, given a base.APICaller. +func manifoldStart(apiCaller base.APICaller) (worker.Worker, error) { + client, err := metricsmanager.NewClient(apiCaller) + if err != nil { + return nil, errors.Trace(err) + } + w, err := NewMetricsManager(client) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil +} === modified file 'src/github.com/juju/juju/worker/metricworker/metricmanager.go' --- src/github.com/juju/juju/worker/metricworker/metricmanager.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/metricworker/metricmanager.go 2016-03-22 15:18:22 +0000 @@ -12,6 +12,8 @@ // NewMetricsManager creates a runner that will run the metricsmanagement workers. func NewMetricsManager(client metricsmanager.MetricsManagerClient) (worker.Runner, error) { + // TODO(fwereade): break this out into separate manifolds (with their own facades). + // Periodic workers automatically retry so none should return an error. If they do // it's ok to restart them individually. isFatal := func(error) bool { @@ -21,7 +23,7 @@ moreImportant := func(error, error) bool { return false } - runner := worker.NewRunner(isFatal, moreImportant) + runner := worker.NewRunner(isFatal, moreImportant, worker.RestartDelay) err := runner.StartWorker("sender", func() (worker.Worker, error) { return NewSender(client), nil }) === modified file 'src/github.com/juju/juju/worker/minunitsworker/minunitsworker.go' --- src/github.com/juju/juju/worker/minunitsworker/minunitsworker.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/minunitsworker/minunitsworker.go 2016-03-22 15:18:22 +0000 @@ -6,8 +6,8 @@ import ( "github.com/juju/loggo" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/state" + "github.com/juju/juju/watcher/legacy" "github.com/juju/juju/worker" ) @@ -23,10 +23,10 @@ // minimum required number of units for a service is increased. func NewMinUnitsWorker(st *state.State) worker.Worker { mu := &MinUnitsWorker{st: st} - return worker.NewStringsWorker(mu) + return legacy.NewStringsWorker(mu) } -func (mu *MinUnitsWorker) SetUp() (watcher.StringsWatcher, error) { +func (mu *MinUnitsWorker) SetUp() (state.StringsWatcher, error) { return mu.st.WatchMinUnits(), nil } === modified file 'src/github.com/juju/juju/worker/minunitsworker/minunitsworker_test.go' --- src/github.com/juju/juju/worker/minunitsworker/minunitsworker_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/minunitsworker/minunitsworker_test.go 2016-03-22 15:18:22 +0000 @@ -4,7 +4,6 @@ package minunitsworker_test import ( - stdtesting "testing" "time" "github.com/juju/loggo" @@ -19,18 +18,12 @@ var logger = loggo.GetLogger("juju.worker.minunitsworker_test") -func TestPackage(t *stdtesting.T) { - coretesting.MgoTestPackage(t) -} - type minUnitsWorkerSuite struct { testing.JujuConnSuite } var _ = gc.Suite(&minUnitsWorkerSuite{}) -var _ worker.StringsWatchHandler = (*minunitsworker.MinUnitsWorker)(nil) - func (s *minUnitsWorkerSuite) TestMinUnitsWorker(c *gc.C) { mu := minunitsworker.NewMinUnitsWorker(s.State) defer func() { c.Assert(worker.Stop(mu), gc.IsNil) }() === added file 'src/github.com/juju/juju/worker/minunitsworker/package_test.go' --- src/github.com/juju/juju/worker/minunitsworker/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/minunitsworker/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package minunitsworker_test + +import ( + stdtesting "testing" + + coretesting "github.com/juju/juju/testing" +) + +func TestPackage(t *stdtesting.T) { + coretesting.MgoTestPackage(t) +} === added directory 'src/github.com/juju/juju/worker/modelworkermanager' === added file 'src/github.com/juju/juju/worker/modelworkermanager/export_test.go' --- src/github.com/juju/juju/worker/modelworkermanager/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/modelworkermanager/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package modelworkermanager + +func DyingModelWorkerId(uuid string) string { + return dyingModelWorkerId(uuid) +} === added file 'src/github.com/juju/juju/worker/modelworkermanager/modelworkermanager.go' --- src/github.com/juju/juju/worker/modelworkermanager/modelworkermanager.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/modelworkermanager/modelworkermanager.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,209 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package modelworkermanager + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + "gopkg.in/mgo.v2" + "launchpad.net/tomb" + + cmdutil "github.com/juju/juju/cmd/jujud/util" + "github.com/juju/juju/state" + "github.com/juju/juju/worker" +) + +var logger = loggo.GetLogger("juju.worker.modelworkermanager") + +type modelWorkersCreator func(InitialState, *state.State) (worker.Worker, error) + +// NewModelWorkerManager returns a Worker which manages a worker which +// needs to run on a per model basis. It takes a function which will +// be called to start a worker for a new model. This worker +// will be killed when an model goes away. +func NewModelWorkerManager( + st InitialState, + startModelWorker modelWorkersCreator, + dyingModelWorker modelWorkersCreator, + delay time.Duration, +) worker.Worker { + m := &modelWorkerManager{ + st: st, + startModelWorker: startModelWorker, + dyingModelWorker: dyingModelWorker, + } + m.runner = worker.NewRunner(cmdutil.IsFatal, cmdutil.MoreImportant, delay) + go func() { + defer m.tomb.Done() + m.tomb.Kill(m.loop()) + }() + return m +} + +// InitialState defines the State functionality used by +// envWorkerManager and/or could be useful to startEnvWorker +// funcs. It mainly exists to support testing. +type InitialState interface { + WatchModels() state.StringsWatcher + ForModel(names.ModelTag) (*state.State, error) + GetModel(names.ModelTag) (*state.Model, error) + ModelUUID() string + Machine(string) (*state.Machine, error) + MongoSession() *mgo.Session +} + +type modelWorkerManager struct { + runner worker.Runner + tomb tomb.Tomb + st InitialState + startModelWorker modelWorkersCreator + dyingModelWorker modelWorkersCreator +} + +// Kill satisfies the Worker interface. +func (m *modelWorkerManager) Kill() { + m.tomb.Kill(nil) +} + +// Wait satisfies the Worker interface. +func (m *modelWorkerManager) Wait() error { + return m.tomb.Wait() +} + +func (m *modelWorkerManager) loop() error { + go func() { + // When the runner stops, make sure we stop the envWorker as well + m.tomb.Kill(m.runner.Wait()) + }() + defer func() { + // When we return, make sure that we kill + // the runner and wait for it. + m.runner.Kill() + m.tomb.Kill(m.runner.Wait()) + }() + w := m.st.WatchModels() + defer w.Stop() + for { + select { + case uuids := <-w.Changes(): + // One or more models have changed. + for _, uuid := range uuids { + if err := m.modelHasChanged(uuid); err != nil { + return errors.Trace(err) + } + } + case <-m.tomb.Dying(): + return tomb.ErrDying + } + } +} + +func (m *modelWorkerManager) modelHasChanged(uuid string) error { + modelTag := names.NewModelTag(uuid) + env, err := m.st.GetModel(modelTag) + if errors.IsNotFound(err) { + return m.modelNotFound(modelTag) + } else if err != nil { + return errors.Annotatef(err, "error loading model %s", modelTag.Id()) + } + + switch env.Life() { + case state.Alive: + err = m.envIsAlive(modelTag) + case state.Dying: + err = m.modelIsDying(modelTag) + case state.Dead: + err = m.envIsDead(modelTag) + } + + return errors.Trace(err) +} + +func (m *modelWorkerManager) envIsAlive(modelTag names.ModelTag) error { + return m.runner.StartWorker(modelTag.Id(), func() (worker.Worker, error) { + st, err := m.st.ForModel(modelTag) + if err != nil { + return nil, errors.Annotatef(err, "failed to open state for model %s", modelTag.Id()) + } + closeState := func() { + err := st.Close() + if err != nil { + logger.Errorf("error closing state for model %s: %v", modelTag.Id(), err) + } + } + + envRunner, err := m.startModelWorker(m.st, st) + if err != nil { + closeState() + return nil, errors.Trace(err) + } + + // Close State when the runner for the model is done. + go func() { + envRunner.Wait() + closeState() + }() + + return envRunner, nil + }) +} + +func dyingModelWorkerId(uuid string) string { + return "dying" + ":" + uuid +} + +// envNotFound stops all workers for that model. +func (m *modelWorkerManager) modelNotFound(modelTag names.ModelTag) error { + uuid := modelTag.Id() + if err := m.runner.StopWorker(uuid); err != nil { + return errors.Trace(err) + } + if err := m.runner.StopWorker(dyingModelWorkerId(uuid)); err != nil { + return errors.Trace(err) + } + return nil +} + +func (m *modelWorkerManager) modelIsDying(modelTag names.ModelTag) error { + id := dyingModelWorkerId(modelTag.Id()) + return m.runner.StartWorker(id, func() (worker.Worker, error) { + st, err := m.st.ForModel(modelTag) + if err != nil { + return nil, errors.Annotatef(err, "failed to open state for model %s", modelTag.Id()) + } + closeState := func() { + err := st.Close() + if err != nil { + logger.Errorf("error closing state for model %s: %v", modelTag.Id(), err) + } + } + + dyingRunner, err := m.dyingModelWorker(m.st, st) + if err != nil { + closeState() + return nil, errors.Trace(err) + } + + // Close State when the runner for the model is done. + go func() { + dyingRunner.Wait() + closeState() + }() + + return dyingRunner, nil + }) +} + +func (m *modelWorkerManager) envIsDead(modelTag names.ModelTag) error { + uuid := modelTag.Id() + err := m.runner.StopWorker(uuid) + if err != nil { + return errors.Trace(err) + } + + return nil +} === added file 'src/github.com/juju/juju/worker/modelworkermanager/modelworkermanager_test.go' --- src/github.com/juju/juju/worker/modelworkermanager/modelworkermanager_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/modelworkermanager/modelworkermanager_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,461 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package modelworkermanager_test + +import ( + stdtesting "testing" + "time" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "launchpad.net/tomb" + + cmdutil "github.com/juju/juju/cmd/jujud/util" + "github.com/juju/juju/state" + statetesting "github.com/juju/juju/state/testing" + "github.com/juju/juju/testing" + "github.com/juju/juju/testing/factory" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/modelworkermanager" +) + +func TestPackage(t *stdtesting.T) { + testing.MgoTestPackage(t) +} + +var _ = gc.Suite(&suite{}) + +type suite struct { + statetesting.StateSuite + factory *factory.Factory + runnerC chan *fakeRunner + startErr error +} + +func (s *suite) SetUpTest(c *gc.C) { + s.StateSuite.SetUpTest(c) + s.factory = factory.NewFactory(s.State) + s.runnerC = make(chan *fakeRunner, 1) + s.startErr = nil +} + +func (s *suite) TearDownTest(c *gc.C) { + close(s.runnerC) + s.StateSuite.TearDownTest(c) +} + +func (s *suite) MakeModel(c *gc.C) *state.State { + st := s.factory.MakeModel(c, nil) + s.AddCleanup(func(*gc.C) { st.Close() }) + return st +} + +func destroyEnvironment(c *gc.C, st *state.State) { + env, err := st.Model() + c.Assert(err, jc.ErrorIsNil) + err = env.Destroy() + c.Assert(err, jc.ErrorIsNil) +} + +func (s *suite) TestStartsWorkersForPreExistingEnvs(c *gc.C) { + moreState := s.MakeModel(c) + + var seenEnvs []string + m := modelworkermanager.NewModelWorkerManager(s.State, s.startEnvWorker, s.dyingEnvWorker, time.Millisecond) + defer m.Kill() + for _, r := range s.seeRunnersStart(c, 2) { + seenEnvs = append(seenEnvs, r.modelUUID) + } + + c.Assert(seenEnvs, jc.SameContents, + []string{s.State.ModelUUID(), moreState.ModelUUID()}, + ) + + destroyEnvironment(c, moreState) + dyingRunner := s.seeRunnersStart(c, 1)[0] + c.Assert(dyingRunner.modelUUID, gc.Equals, moreState.ModelUUID()) +} + +func (s *suite) TestStartsWorkersForNewEnv(c *gc.C) { + m := modelworkermanager.NewModelWorkerManager(s.State, s.startEnvWorker, s.dyingEnvWorker, time.Millisecond) + defer m.Kill() + s.seeRunnersStart(c, 1) // Runner for controller env + + // Create another environment and watch a runner be created for it. + st2 := s.MakeModel(c) + runner := s.seeRunnersStart(c, 1)[0] + c.Assert(runner.modelUUID, gc.Equals, st2.ModelUUID()) +} + +func (s *suite) TestStopsWorkersWhenEnvGoesAway(c *gc.C) { + m := modelworkermanager.NewModelWorkerManager(s.State, s.startEnvWorker, s.dyingEnvWorker, time.Millisecond) + defer m.Kill() + runner0 := s.seeRunnersStart(c, 1)[0] + + // Create an environment and grab the runner for it. + otherState := s.MakeModel(c) + runner1 := s.seeRunnersStart(c, 1)[0] + + // Set environment to dying. + destroyEnvironment(c, otherState) + s.seeRunnersStart(c, 1) // dying env runner + + // Set environment to dead. + err := otherState.ProcessDyingModel() + c.Assert(err, jc.ErrorIsNil) + + // See that the first runner is still running but the runner for + // the new environment is stopped. + s.State.StartSync() + select { + case <-runner0.tomb.Dead(): + c.Fatal("first runner should not die here") + case <-runner1.tomb.Dead(): + break + case <-time.After(testing.LongWait): + c.Fatal("timed out waiting for runner to die") + } + + // Make sure the first runner doesn't get stopped. + s.State.StartSync() + select { + case <-runner0.tomb.Dead(): + c.Fatal("first runner should not die here") + case <-time.After(testing.ShortWait): + break + } +} + +func (s *suite) TestKillPropagates(c *gc.C) { + otherSt := s.MakeModel(c) + + m := modelworkermanager.NewModelWorkerManager(s.State, s.startEnvWorker, s.dyingEnvWorker, time.Millisecond) + runners := s.seeRunnersStart(c, 2) + c.Assert(runners[0].killed, jc.IsFalse) + c.Assert(runners[1].killed, jc.IsFalse) + + destroyEnvironment(c, otherSt) + dyingRunner := s.seeRunnersStart(c, 1)[0] + c.Assert(dyingRunner.killed, jc.IsFalse) + + m.Kill() + err := waitOrFatal(c, m.Wait) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(runners[0].killed, jc.IsTrue) + c.Assert(runners[1].killed, jc.IsTrue) + c.Assert(dyingRunner.killed, jc.IsTrue) +} + +// stateWithFailingGetEnvironment wraps a *state.State, overriding the +// GetModel to generate an error. +type stateWithFailingGetEnvironment struct { + *stateWithFakeWatcher + shouldFail bool +} + +func newStateWithFailingGetEnvironment(realSt *state.State) *stateWithFailingGetEnvironment { + return &stateWithFailingGetEnvironment{ + stateWithFakeWatcher: newStateWithFakeWatcher(realSt), + shouldFail: false, + } +} + +func (s *stateWithFailingGetEnvironment) GetModel(tag names.ModelTag) (*state.Model, error) { + if s.shouldFail { + return nil, errors.New("unable to GetModel") + } + return s.State.GetModel(tag) +} + +func (s *suite) TestLoopExitKillsRunner(c *gc.C) { + // If something causes EnvWorkerManager.loop to exit that isn't Kill() then it should stop the runner. + // Currently the best way to cause this is to make + // m.st.GetModel(tag) fail with any error other than NotFound + otherSt := s.MakeModel(c) + st := newStateWithFailingGetEnvironment(s.State) + uuid := st.ModelUUID() + m := modelworkermanager.NewModelWorkerManager(st, s.startEnvWorker, s.dyingEnvWorker, time.Millisecond) + defer m.Kill() + + // First time: runners started + st.sendEnvChange(uuid) + runners := s.seeRunnersStart(c, 1) + c.Assert(runners[0].killed, jc.IsFalse) + + destroyEnvironment(c, otherSt) + st.sendEnvChange(otherSt.ModelUUID()) + dyingRunner := s.seeRunnersStart(c, 1)[0] + c.Assert(dyingRunner.killed, jc.IsFalse) + + // Now we start failing + st.shouldFail = true + st.sendEnvChange(uuid) + + // This should kill the manager + err := waitOrFatal(c, m.Wait) + c.Assert(err, gc.ErrorMatches, "error loading model .*: unable to GetModel") + + // And that should kill all the runners + c.Assert(runners[0].killed, jc.IsTrue) + c.Assert(dyingRunner.killed, jc.IsTrue) +} + +func (s *suite) TestWorkerErrorIsPropagatedWhenKilled(c *gc.C) { + st := newStateWithFakeWatcher(s.State) + started := make(chan struct{}, 1) + m := modelworkermanager.NewModelWorkerManager(st, func(modelworkermanager.InitialState, *state.State) (worker.Worker, error) { + c.Logf("starting worker") + started <- struct{}{} + return &errorWhenKilledWorker{ + err: &cmdutil.FatalError{"an error"}, + }, nil + }, s.dyingEnvWorker, time.Millisecond) + st.sendEnvChange(st.ModelUUID()) + s.State.StartSync() + <-started + m.Kill() + err := m.Wait() + c.Assert(err, gc.ErrorMatches, "an error") +} + +type errorWhenKilledWorker struct { + tomb tomb.Tomb + err error +} + +var logger = loggo.GetLogger("juju.worker.modelworkermanager") + +func (w *errorWhenKilledWorker) Kill() { + w.tomb.Kill(w.err) + logger.Errorf("errorWhenKilledWorker dying with error %v", w.err) + w.tomb.Done() +} + +func (w *errorWhenKilledWorker) Wait() error { + err := w.tomb.Wait() + logger.Errorf("errorWhenKilledWorker wait -> error %v", err) + return err +} + +func (s *suite) TestNothingHappensWhenEnvIsSeenAgain(c *gc.C) { + // This could happen if there's a change to an environment doc but + // it's otherwise still alive (unlikely but possible). + st := newStateWithFakeWatcher(s.State) + uuid := st.ModelUUID() + otherSt := s.MakeModel(c) + otherUUID := otherSt.ModelUUID() + + m := modelworkermanager.NewModelWorkerManager(st, s.startEnvWorker, s.dyingEnvWorker, time.Millisecond) + defer m.Kill() + + // First time: runners started + st.sendEnvChange(uuid) + st.sendEnvChange(otherUUID) + s.seeRunnersStart(c, 2) + + // Second time: no runners started + st.sendEnvChange(uuid) + s.checkNoRunnersStart(c) + + destroyEnvironment(c, otherSt) + st.sendEnvChange(otherUUID) + s.seeRunnersStart(c, 1) + + st.sendEnvChange(otherUUID) + s.checkNoRunnersStart(c) +} + +func (s *suite) TestNothingHappensWhenUnknownEnvReported(c *gc.C) { + // This could perhaps happen when an environment is dying just as + // the EnvWorkerManager is coming up (unlikely but possible). + st := newStateWithFakeWatcher(s.State) + + m := modelworkermanager.NewModelWorkerManager(st, s.startEnvWorker, s.dyingEnvWorker, time.Millisecond) + defer m.Kill() + + st.sendEnvChange("unknown-model-uuid") + s.checkNoRunnersStart(c) + + // Existing environment still works. + st.sendEnvChange(st.ModelUUID()) + s.seeRunnersStart(c, 1) +} + +func (s *suite) TestFatalErrorKillsEnvWorkerManager(c *gc.C) { + m := modelworkermanager.NewModelWorkerManager(s.State, s.startEnvWorker, s.dyingEnvWorker, time.Millisecond) + runner := s.seeRunnersStart(c, 1)[0] + + runner.tomb.Kill(worker.ErrTerminateAgent) + runner.tomb.Done() + + err := waitOrFatal(c, m.Wait) + c.Assert(errors.Cause(err), gc.Equals, worker.ErrTerminateAgent) +} + +func (s *suite) TestNonFatalErrorCausesRunnerRestart(c *gc.C) { + m := modelworkermanager.NewModelWorkerManager(s.State, s.startEnvWorker, s.dyingEnvWorker, time.Millisecond) + defer m.Kill() + runner0 := s.seeRunnersStart(c, 1)[0] + + runner0.tomb.Kill(errors.New("trivial")) + runner0.tomb.Done() + + s.seeRunnersStart(c, 1) +} + +func (s *suite) TestStateIsClosedIfStartEnvWorkersFails(c *gc.C) { + // If State is not closed when startEnvWorker errors, MgoSuite's + // dirty socket detection will pick up the leaked socket and + // panic. + s.startErr = worker.ErrTerminateAgent // This will make envWorkerManager exit. + m := modelworkermanager.NewModelWorkerManager(s.State, s.startEnvWorker, s.dyingEnvWorker, time.Millisecond) + waitOrFatal(c, m.Wait) +} + +func (s *suite) TestdyingEnvWorkerId(c *gc.C) { + c.Assert(modelworkermanager.DyingModelWorkerId("uuid"), gc.Equals, "dying:uuid") +} + +func (s *suite) seeRunnersStart(c *gc.C, expectedCount int) []*fakeRunner { + if expectedCount < 1 { + c.Fatal("expectedCount must be >= 1") + } + s.State.StartSync() + runners := make([]*fakeRunner, 0, expectedCount) + for { + select { + case r := <-s.runnerC: + c.Assert(r.ssModelUUID, gc.Equals, s.State.ModelUUID()) + + runners = append(runners, r) + if len(runners) == expectedCount { + s.checkNoRunnersStart(c) // Check no more runners start + return runners + } + case <-time.After(testing.LongWait): + c.Fatal("timed out waiting for runners to be started") + } + } +} + +func (s *suite) checkNoRunnersStart(c *gc.C) { + s.State.StartSync() + for { + select { + case <-s.runnerC: + c.Fatal("saw runner creation when expecting none") + case <-time.After(testing.ShortWait): + return + } + } +} + +// startEnvWorker is passed to NewModelWorkerManager in these tests. It +// creates fake Runner instances when envWorkerManager starts workers +// for an alive environment. +func (s *suite) startEnvWorker(ssSt modelworkermanager.InitialState, st *state.State) (worker.Worker, error) { + if s.startErr != nil { + return nil, s.startErr + } + runner := &fakeRunner{ + ssModelUUID: ssSt.ModelUUID(), + modelUUID: st.ModelUUID(), + } + s.runnerC <- runner + return runner, nil +} + +// dyingEnvWorker is passed to NewModelWorkerManager in these tests. It +// creates a fake Runner instance when envWorkerManager starts workers for a +// dying or dead environment. +func (s *suite) dyingEnvWorker(ssSt modelworkermanager.InitialState, st *state.State) (worker.Worker, error) { + if s.startErr != nil { + return nil, s.startErr + } + runner := &fakeRunner{ + ssModelUUID: ssSt.ModelUUID(), + modelUUID: st.ModelUUID(), + } + s.runnerC <- runner + return runner, nil +} + +func waitOrFatal(c *gc.C, wait func() error) error { + errC := make(chan error) + go func() { + errC <- wait() + }() + + select { + case err := <-errC: + return err + case <-time.After(testing.LongWait): + c.Fatal("waited too long") + } + return nil +} + +// fakeRunner minimally implements the worker.Worker interface. It +// doesn't actually run anything, recording some execution details for +// testing. +type fakeRunner struct { + tomb tomb.Tomb + ssModelUUID string + modelUUID string + killed bool +} + +func (r *fakeRunner) Kill() { + r.killed = true + r.tomb.Done() +} + +func (r *fakeRunner) Wait() error { + return r.tomb.Wait() +} + +func newStateWithFakeWatcher(realSt *state.State) *stateWithFakeWatcher { + return &stateWithFakeWatcher{ + State: realSt, + envWatcher: &fakeEnvWatcher{ + changes: make(chan []string), + }, + } +} + +// stateWithFakeWatcher wraps a *state.State, overriding the +// WatchModels method to allow control over the reported +// environment lifecycle events for testing. +// +// Use sendEnvChange to cause an environment event to be emitted by +// the watcher returned by WatchModels. +type stateWithFakeWatcher struct { + *state.State + envWatcher *fakeEnvWatcher +} + +func (s *stateWithFakeWatcher) WatchModels() state.StringsWatcher { + return s.envWatcher +} + +func (s *stateWithFakeWatcher) sendEnvChange(uuids ...string) { + s.envWatcher.changes <- uuids +} + +type fakeEnvWatcher struct { + state.StringsWatcher + changes chan []string +} + +func (w *fakeEnvWatcher) Stop() error { + return nil +} + +func (w *fakeEnvWatcher) Changes() <-chan []string { + return w.changes +} === added directory 'src/github.com/juju/juju/worker/mongoupgrader' === added file 'src/github.com/juju/juju/worker/mongoupgrader/worker.go' --- src/github.com/juju/juju/worker/mongoupgrader/worker.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/mongoupgrader/worker.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,91 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package mongoupgrader + +import ( + "net" + "strconv" + + "github.com/juju/errors" + "github.com/juju/juju/mongo" + "github.com/juju/juju/state" + "github.com/juju/juju/worker" + "github.com/juju/loggo" + "github.com/juju/replicaset" +) + +var logger = loggo.GetLogger("juju.worker.mongoupgrader") + +// StopMongo represents a function that can issue a stop +// to a running mongo service. +type StopMongo func(mongo.Version, bool) error + +// New returns a worker or err in case of failure. +// this worker takes care of watching the state of machine's upgrade +// mongo information and change agent conf accordingly. +func New(st *state.State, machineID string, maybeStopMongo StopMongo) (worker.Worker, error) { + upgradeWorker := func(stopch <-chan struct{}) error { + return upgradeMongoWatcher(st, stopch, machineID, maybeStopMongo) + } + return worker.NewSimpleWorker(upgradeWorker), nil +} + +func upgradeMongoWatcher(st *state.State, stopch <-chan struct{}, machineID string, maybeStopMongo StopMongo) error { + m, err := st.Machine(machineID) + if err != nil { + return errors.Annotatef(err, "cannot start watcher for machine %q", machineID) + } + watch := m.Watch() + defer func() { + watch.Kill() + watch.Wait() + }() + + for { + select { + case <-watch.Changes(): + if err := m.Refresh(); err != nil { + return errors.Annotate(err, "cannot refresh machine information") + } + if !m.IsManager() { + continue + } + expectedVersion, err := m.StopMongoUntilVersion() + if err != nil { + return errors.Annotate(err, "cannot obtain minimum version of mongo") + } + if expectedVersion == mongo.Mongo24 { + continue + } + var isMaster bool + isMaster, err = mongo.IsMaster(st.MongoSession(), m) + if err != nil { + return errors.Annotatef(err, "cannot determine if machine %q is master", machineID) + } + + err = maybeStopMongo(expectedVersion, isMaster) + if err != nil { + return errors.Annotate(err, "cannot determine if mongo must be stopped") + } + if !isMaster { + addrs := make([]string, len(m.Addresses())) + ssi, err := st.StateServingInfo() + if err != nil { + return errors.Annotate(err, "cannot obtain state serving info to stop mongo") + } + for i, addr := range m.Addresses() { + addrs[i] = net.JoinHostPort(addr.Value, strconv.Itoa(ssi.StatePort)) + } + if err := replicaset.Remove(st.MongoSession(), addrs...); err != nil { + return errors.Annotatef(err, "cannot remove %q from replicaset", m.Id()) + } + if err := m.SetStopMongoUntilVersion(mongo.Mongo24); err != nil { + return errors.Annotate(err, "cannot reset stop mongo flag") + } + } + case <-stopch: + return nil + } + } +} === removed directory 'src/github.com/juju/juju/worker/networker' === removed file 'src/github.com/juju/juju/worker/networker/configfiles.go' --- src/github.com/juju/juju/worker/networker/configfiles.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/networker/configfiles.go 1970-01-01 00:00:00 +0000 @@ -1,226 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package networker - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - - "github.com/juju/utils" - - "github.com/juju/juju/network" -) - -// ConfigFile defines operations on a network config file for a single -// network interface. -type ConfigFile interface { - // InterfaceName returns the inteface name for this config file. - InterfaceName() string - - // FileName returns the full path for storing this config file on - // disk. - FileName() string - - // InterfaceInfo returns the network.InterfaceInfo associated with - // this config file. - InterfaceInfo() network.InterfaceInfo - - // ReadData opens the underlying config file and populates the - // data. - ReadData() error - - // Data returns the original raw contents of this config file. - Data() []byte - - // RenderManaged generates network config based on the known - // network.InterfaceInfo and returns it. - RenderManaged() []byte - - // NeedsUpdating returns true if this config file needs to be - // written to disk. - NeedsUpdating() bool - - // IsPendingRemoval returns true if this config file needs to be - // removed. - IsPendingRemoval() bool - - // IsManaged returns true if this config file is managed by Juju. - IsManaged() bool - - // UpdateData updates the internally stored raw contents of this - // config file, and sets the "needs updating" internal flag, - // returning true, if newData is different. If newData is the same - // as the old or the interface is not managed, returns false and - // does not change anything. - UpdateData(newData []byte) bool - - // MarkForRemoval marks this config file as pending for removal, - // if the interface is managed. - MarkForRemoval() - - // Apply updates the config file data (if it needs updating), - // removes the file (if it's marked removal), or does nothing. - Apply() error -} - -// ManagedHeader is the header of a network config file managed by Juju. -const ManagedHeader = "# Managed by Juju, please don't change.\n\n" - -// RenderMainConfig generates a managed main config file, which -// includes *.cfg individual config files inside configSubDir (i.e. -// /etc/network/interfaces). -func RenderMainConfig(configSubDir string) []byte { - var data bytes.Buffer - globSpec := fmt.Sprintf("%s/*.cfg", configSubDir) - logger.Debugf("rendering main network config to include %q", globSpec) - fmt.Fprintf(&data, ManagedHeader) - fmt.Fprintf(&data, "source %s\n\n", globSpec) - return data.Bytes() -} - -// configFile implement ConfigFile. -type configFile struct { - // interfaceName holds the name of the network interface. - interfaceName string - - // fileName holds the full path to the config file on disk. - fileName string - - // interfaceInfo holds the network information about this - // interface, known by the API server. - interfaceInfo network.InterfaceInfo - - // data holds the raw file contents of the underlying file. - data []byte - - // needsUpdating is true when the interface config has changed and - // needs to be written back to disk. - needsUpdating bool - - // pendingRemoval is true when the interface config file is about - // to be removed. - pendingRemoval bool -} - -var _ ConfigFile = (*configFile)(nil) - -// InterfaceName implements ConfigFile.InterfaceName(). -func (f *configFile) InterfaceName() string { - return f.interfaceName -} - -// FileName implements ConfigFile.FileName(). -func (f *configFile) FileName() string { - return f.fileName -} - -// ReadData implements ConfigFile.ReadData(). -func (f *configFile) ReadData() error { - data, err := ioutil.ReadFile(f.fileName) - if err != nil { - return err - } - f.UpdateData(data) - return nil -} - -// InterfaceInfo implements ConfigFile.InterfaceInfo(). -func (f *configFile) InterfaceInfo() network.InterfaceInfo { - return f.interfaceInfo -} - -// Data implements ConfigFile.Data(). -func (f *configFile) Data() []byte { - return f.data -} - -// RenderManaged implements ConfigFile.RenderManaged(). -// -// TODO(dimitern) Once container addressability work has progressed -// enough, modify this to render the config taking all fields of -// network.InterfaceInfo into account. -func (f *configFile) RenderManaged() []byte { - var data bytes.Buffer - actualName := f.interfaceInfo.ActualInterfaceName() - logger.Debugf("rendering managed config for %q", actualName) - fmt.Fprintf(&data, ManagedHeader) - fmt.Fprintf(&data, "auto %s\n", actualName) - fmt.Fprintf(&data, "iface %s inet dhcp\n", actualName) - - // Add vlan-raw-device line for VLAN interfaces. - if f.interfaceInfo.IsVLAN() { - // network.InterfaceInfo.InterfaceName is always the physical - // device name, i.e. "eth1" for VLAN interface "eth1.42". - fmt.Fprintf(&data, "\tvlan-raw-device %s\n", f.interfaceInfo.InterfaceName) - } - fmt.Fprintf(&data, "\n") - return data.Bytes() -} - -// NeedsUpdating implements ConfigFile.NeedsUpdating(). -func (f *configFile) NeedsUpdating() bool { - return f.needsUpdating -} - -// IsPendingRemoval implements ConfigFile.IsPendingRemoval(). -func (f *configFile) IsPendingRemoval() bool { - return f.pendingRemoval -} - -// IsManaged implements ConfigFile.IsManaged() -func (f *configFile) IsManaged() bool { - return len(f.data) > 0 && bytes.HasPrefix(f.data, []byte(ManagedHeader)) -} - -// UpdateData implements ConfigFile.UpdateData(). -func (f *configFile) UpdateData(newData []byte) bool { - if bytes.Equal(f.data, newData) { - // Not changed. - if f.interfaceName == "" { - // This is the main config. - logger.Debugf("main network config not changed") - } else { - logger.Debugf("network config for %q not changed", f.interfaceName) - } - return false - } - f.data = make([]byte, len(newData)) - copy(f.data, newData) - f.needsUpdating = true - return true -} - -// MarkForRemoval implements ConfigFile.MarkForRemoval(). -func (f *configFile) MarkForRemoval() { - f.pendingRemoval = true -} - -// Apply implements ConfigFile.Apply(). -func (f *configFile) Apply() error { - if f.needsUpdating { - err := utils.AtomicWriteFile(f.fileName, f.data, 0644) - if err != nil { - logger.Errorf("failed to write file %q: %v", f.fileName, err) - return err - } - if f.interfaceName == "" { - logger.Debugf("updated main network config %q", f.fileName) - } else { - logger.Debugf("updated network config %q for %q", f.fileName, f.interfaceName) - } - f.needsUpdating = false - } - if f.pendingRemoval { - err := os.Remove(f.fileName) - if err != nil { - logger.Errorf("failed to remove file %q: %v", f.fileName, err) - return err - } - logger.Debugf("removed config %q for %q", f.fileName, f.interfaceName) - f.pendingRemoval = false - } - return nil -} === removed file 'src/github.com/juju/juju/worker/networker/configfiles_test.go' --- src/github.com/juju/juju/worker/networker/configfiles_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/networker/configfiles_test.go 1970-01-01 00:00:00 +0000 @@ -1,175 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package networker_test - -import ( - "io/ioutil" - "os" - "path/filepath" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/network" - "github.com/juju/juju/testing" - "github.com/juju/juju/worker/networker" -) - -type configFilesSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&configFilesSuite{}) - -func (s *configFilesSuite) TestSimpleGetters(c *gc.C) { - info := network.InterfaceInfo{ - InterfaceName: "blah", - } - data := []byte("some data") - cf := networker.NewConfigFile("ethX", "/some/path", info, data) - c.Assert(cf.InterfaceName(), gc.Equals, "ethX") - c.Assert(cf.FileName(), gc.Equals, "/some/path") - c.Assert(cf.InterfaceInfo(), jc.DeepEquals, info) - c.Assert(cf.Data(), jc.DeepEquals, data) - c.Assert(cf.NeedsUpdating(), jc.IsFalse) - c.Assert(cf.IsPendingRemoval(), jc.IsFalse) - c.Assert(cf.IsManaged(), jc.IsFalse) -} - -func (s *configFilesSuite) TestRenderManaged(c *gc.C) { - info := network.InterfaceInfo{ - InterfaceName: "ethX", - VLANTag: 42, - } - cf := networker.NewConfigFile("ethX", "/some/path", info, nil) - data := cf.RenderManaged() - expectedVLAN := ` -# Managed by Juju, please don't change. - -auto ethX.42 -iface ethX.42 inet dhcp - vlan-raw-device ethX - -`[1:] - c.Assert(string(data), jc.DeepEquals, expectedVLAN) - - expectedNormal := ` -# Managed by Juju, please don't change. - -auto ethX -iface ethX inet dhcp - -`[1:] - info.VLANTag = 0 - cf = networker.NewConfigFile("ethX", "/some/path", info, nil) - data = cf.RenderManaged() - c.Assert(string(data), jc.DeepEquals, expectedNormal) -} - -func (s *configFilesSuite) TestUpdateData(c *gc.C) { - cf := networker.NewConfigFile("ethX", "", network.InterfaceInfo{}, nil) - assertData := func(expectData []byte, expectNeedsUpdating bool) { - c.Assert(string(cf.Data()), jc.DeepEquals, string(expectData)) - c.Assert(cf.NeedsUpdating(), gc.Equals, expectNeedsUpdating) - c.Assert(cf.IsPendingRemoval(), jc.IsFalse) - } - - assertData(nil, false) - - result := cf.UpdateData(nil) - c.Assert(result, jc.IsFalse) - assertData(nil, false) - - newData := []byte("new data") - result = cf.UpdateData(newData) - c.Assert(result, jc.IsTrue) - assertData(newData, true) - - newData = []byte("newer data") - result = cf.UpdateData(newData) - c.Assert(result, jc.IsTrue) - assertData(newData, true) -} - -func (s *configFilesSuite) TestReadData(c *gc.C) { - data := []byte("some\ndata\nhere") - testFile := filepath.Join(c.MkDir(), "test") - defer os.Remove(testFile) - - err := ioutil.WriteFile(testFile, data, 0644) - c.Assert(err, jc.ErrorIsNil) - cf := networker.NewConfigFile("ethX", testFile, network.InterfaceInfo{}, nil) - err = cf.ReadData() - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(cf.Data()), jc.DeepEquals, string(data)) - c.Assert(cf.NeedsUpdating(), jc.IsTrue) -} - -func (s *configFilesSuite) TestMarkForRemoval(c *gc.C) { - cf := networker.NewConfigFile("ethX", "", network.InterfaceInfo{}, nil) - c.Assert(cf.IsPendingRemoval(), jc.IsFalse) - c.Assert(cf.NeedsUpdating(), jc.IsFalse) - cf.MarkForRemoval() - c.Assert(cf.IsPendingRemoval(), jc.IsTrue) - c.Assert(cf.NeedsUpdating(), jc.IsFalse) -} - -func (s *configFilesSuite) TestIsManaged(c *gc.C) { - info := network.InterfaceInfo{ - InterfaceName: "ethX", - } - cf := networker.NewConfigFile("ethX", "", info, nil) - c.Assert(cf.IsManaged(), jc.IsFalse) // always false when no data - c.Assert(cf.UpdateData([]byte("blah")), jc.IsTrue) - c.Assert(cf.IsManaged(), jc.IsFalse) // false if header is missing - c.Assert(cf.UpdateData(cf.RenderManaged()), jc.IsTrue) - c.Assert(cf.IsManaged(), jc.IsTrue) -} - -func (s *configFilesSuite) TestApply(c *gc.C) { - data := []byte("some\ndata\nhere") - testFile := filepath.Join(c.MkDir(), "test") - defer os.Remove(testFile) - - cf := networker.NewConfigFile("ethX", testFile, network.InterfaceInfo{}, data) - c.Assert(cf.NeedsUpdating(), jc.IsFalse) - c.Assert(cf.IsPendingRemoval(), jc.IsFalse) - c.Assert(string(cf.Data()), jc.DeepEquals, string(data)) - - newData := []byte("new\ndata") - c.Assert(cf.UpdateData(newData), jc.IsTrue) - c.Assert(cf.NeedsUpdating(), jc.IsTrue) - c.Assert(cf.IsPendingRemoval(), jc.IsFalse) - - err := cf.Apply() - c.Assert(err, jc.ErrorIsNil) - c.Assert(cf.NeedsUpdating(), jc.IsFalse) - c.Assert(cf.IsPendingRemoval(), jc.IsFalse) - - readData, err := ioutil.ReadFile(testFile) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(readData), jc.DeepEquals, string(newData)) - - cf.MarkForRemoval() - c.Assert(cf.NeedsUpdating(), jc.IsFalse) - c.Assert(cf.IsPendingRemoval(), jc.IsTrue) - err = cf.Apply() - c.Assert(err, jc.ErrorIsNil) - c.Assert(cf.NeedsUpdating(), jc.IsFalse) - c.Assert(cf.IsPendingRemoval(), jc.IsFalse) - - _, err = os.Stat(testFile) - c.Assert(err, jc.Satisfies, os.IsNotExist) -} - -func (s *configFilesSuite) TestRenderMainConfig(c *gc.C) { - expect := ` -# Managed by Juju, please don't change. - -source /some/path/*.cfg - -`[1:] - data := networker.RenderMainConfig("/some/path") - c.Assert(string(data), jc.DeepEquals, expect) -} === removed file 'src/github.com/juju/juju/worker/networker/export_test.go' --- src/github.com/juju/juju/worker/networker/export_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/networker/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,32 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package networker - -import ( - "github.com/juju/names" - - "github.com/juju/juju/network" -) - -// NewConfigFile is a helper use to create a *configFile for testing. -func NewConfigFile(interfaceName, fileName string, info network.InterfaceInfo, data []byte) ConfigFile { - return &configFile{ - interfaceName: interfaceName, - fileName: fileName, - interfaceInfo: info, - data: data, - } -} - -// IsRunningInLXC is a helper for testing isRunningInLXC. -func IsRunningInLXC(machineId string) bool { - nw := &Networker{tag: names.NewMachineTag(machineId)} - return nw.isRunningInLXC() -} - -// IsVLANModuleLoaded returns whether 8021q kernel module has been -// loaded. -func (nw *Networker) IsVLANModuleLoaded() bool { - return nw.isVLANSupportInstalled -} === removed file 'src/github.com/juju/juju/worker/networker/networker.go' --- src/github.com/juju/juju/worker/networker/networker.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/networker/networker.go 1970-01-01 00:00:00 +0000 @@ -1,526 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package networker - -import ( - "fmt" - "io/ioutil" - "net" - "os" - "path/filepath" - "sort" - "strings" - - "github.com/juju/loggo" - "github.com/juju/names" - "launchpad.net/tomb" - - "github.com/juju/juju/agent" - apinetworker "github.com/juju/juju/api/networker" - apiwatcher "github.com/juju/juju/api/watcher" - "github.com/juju/juju/network" - "github.com/juju/juju/state/watcher" - "github.com/juju/juju/worker" -) - -var logger = loggo.GetLogger("juju.networker") - -// DefaultConfigBaseDir is the usual root directory where the -// network configuration is kept. -const DefaultConfigBaseDir = "/etc/network" - -// Networker configures network interfaces on the machine, as needed. -type Networker struct { - tomb tomb.Tomb - - st apinetworker.State - tag names.MachineTag - - // isVLANSupportInstalled is set to true when the VLAN kernel - // module 8021q was installed. - isVLANSupportInstalled bool - - // intrusiveMode determines whether to write any changes - // to the network config (intrusive mode) or not (non-intrusive mode). - intrusiveMode bool - - // configBasePath is the root directory where the networking - // config is kept (usually /etc/network). - configBaseDir string - - // primaryInterface is the name of the primary network interface - // on the machine (usually "eth0"). - primaryInterface string - - // loopbackInterface is the name of the loopback interface on the - // machine (usually "lo"). - loopbackInterface string - - // configFiles holds all loaded network config files, using the - // full file path as key. - configFiles map[string]*configFile - - // interfaceInfo holds the info for all network interfaces - // discovered via the API, using the interface name as key. - interfaceInfo map[string]network.InterfaceInfo - - // interfaces holds all known network interfaces on the machine, - // using their name as key. - interfaces map[string]net.Interface - - // commands holds generated scripts (e.g. for bringing interfaces - // up or down, etc.) which were not executed yet. - commands []string -} - -var _ worker.Worker = (*Networker)(nil) - -// NewNetworker returns a Worker that handles machine networking -// configuration. If there is no /interfaces file, an -// error is returned. -func NewNetworker( - st apinetworker.State, - agentConfig agent.Config, - intrusiveMode bool, - configBaseDir string, -) (*Networker, error) { - tag, ok := agentConfig.Tag().(names.MachineTag) - if !ok { - // This should never happen, as there is a check for it in the - // machine agent. - return nil, fmt.Errorf("expected names.MachineTag, got %T", agentConfig.Tag()) - } - nw := &Networker{ - st: st, - tag: tag, - intrusiveMode: intrusiveMode, - configBaseDir: configBaseDir, - configFiles: make(map[string]*configFile), - interfaceInfo: make(map[string]network.InterfaceInfo), - interfaces: make(map[string]net.Interface), - } - go func() { - defer nw.tomb.Done() - nw.tomb.Kill(nw.loop()) - }() - return nw, nil -} - -// Kill implements Worker.Kill(). -func (nw *Networker) Kill() { - nw.tomb.Kill(nil) -} - -// Wait implements Worker.Wait(). -func (nw *Networker) Wait() error { - return nw.tomb.Wait() -} - -// ConfigBaseDir returns the root directory where the networking config is -// kept. Usually, this is /etc/network. -func (nw *Networker) ConfigBaseDir() string { - return nw.configBaseDir -} - -// ConfigSubDir returns the directory where individual config files -// for each network interface are kept. Usually, this is -// /etc/network/interfaces.d. -func (nw *Networker) ConfigSubDir() string { - return filepath.Join(nw.ConfigBaseDir(), "interfaces.d") -} - -// ConfigFile returns the full path to the network config file for the -// given interface. If interfaceName is "", the path to the main -// network config file is returned (usually, this is -// /etc/network/interfaces). -func (nw *Networker) ConfigFile(interfaceName string) string { - if interfaceName == "" { - return filepath.Join(nw.ConfigBaseDir(), "interfaces") - } - return filepath.Join(nw.ConfigSubDir(), interfaceName+".cfg") -} - -// IntrusiveMode returns whether the networker is changing networking -// configuration files (intrusive mode) or won't modify them on the -// machine (non-intrusive mode). -func (nw *Networker) IntrusiveMode() bool { - return nw.intrusiveMode -} - -// IsPrimaryInterfaceOrLoopback returns whether the given -// interfaceName matches the primary or loopback network interface. -func (nw *Networker) IsPrimaryInterfaceOrLoopback(interfaceName string) bool { - return interfaceName == nw.primaryInterface || - interfaceName == nw.loopbackInterface -} - -// loop is the worker's main loop. -func (nw *Networker) loop() error { - // TODO(dimitern) Networker is disabled until we have time to fix - // it so it's not overwriting /etc/network/interfaces - // indiscriminately for containers and possibly other cases. - logger.Infof("networker is disabled - not starting on machine %q", nw.tag) - return nil - - logger.Debugf("starting on machine %q", nw.tag) - if !nw.IntrusiveMode() { - logger.Warningf("running in non-intrusive mode - no commands or changes to network config will be done") - } - w, err := nw.init() - if err != nil { - if w != nil { - // We don't bother to propagate an error, because we - // already have an error - w.Stop() - } - return err - } - defer watcher.Stop(w, &nw.tomb) - logger.Debugf("initialized and started watching") - for { - select { - case <-nw.tomb.Dying(): - logger.Debugf("shutting down") - return tomb.ErrDying - case _, ok := <-w.Changes(): - logger.Debugf("got change notification") - if !ok { - return watcher.EnsureErr(w) - } - if err := nw.handle(); err != nil { - return err - } - } - } -} - -// init initializes the worker and starts a watcher for monitoring -// network interface changes. -func (nw *Networker) init() (apiwatcher.NotifyWatcher, error) { - // Discover all interfaces on the machine and populate internal - // maps, reading existing config files as well, and fetch the - // network info from the API.. - if err := nw.updateInterfaces(); err != nil { - return nil, err - } - - // Apply changes (i.e. write managed config files and load the - // VLAN module if needed). - if err := nw.applyAndExecute(); err != nil { - return nil, err - } - return nw.st.WatchInterfaces(nw.tag) -} - -// handle processes changes to network interfaces in state. -func (nw *Networker) handle() error { - // Update interfaces and config files as needed. - if err := nw.updateInterfaces(); err != nil { - return err - } - - // Bring down disabled interfaces. - nw.prepareDownCommands() - - // Bring up configured interfaces. - nw.prepareUpCommands() - - // Apply any needed changes to config and run generated commands. - if err := nw.applyAndExecute(); err != nil { - return err - } - return nil -} - -// updateInterfaces discovers all known network interfaces on the -// machine and caches the result internally. -func (nw *Networker) updateInterfaces() error { - interfaces, err := Interfaces() - if err != nil { - return fmt.Errorf("cannot retrieve network interfaces: %v", err) - } - logger.Debugf("updated machine network interfaces info") - - // Read the main config file first. - mainConfig := nw.ConfigFile("") - if _, ok := nw.configFiles[mainConfig]; !ok { - if err := nw.readConfig("", mainConfig); err != nil { - return err - } - } - - // Populate the internal maps for interfaces and configFiles and - // find the primary interface. - nw.interfaces = make(map[string]net.Interface) - for _, iface := range interfaces { - logger.Debugf( - "found interface %q with index %d and flags %s", - iface.Name, - iface.Index, - iface.Flags.String(), - ) - nw.interfaces[iface.Name] = iface - fullPath := nw.ConfigFile(iface.Name) - if _, ok := nw.configFiles[fullPath]; !ok { - if err := nw.readConfig(iface.Name, fullPath); err != nil { - return err - } - } - if iface.Flags&net.FlagLoopback != 0 && nw.loopbackInterface == "" { - nw.loopbackInterface = iface.Name - logger.Debugf("loopback interface is %q", iface.Name) - continue - } - - // The first enabled, non-loopback interface should be the - // primary. - if iface.Flags&net.FlagUp != 0 && nw.primaryInterface == "" { - nw.primaryInterface = iface.Name - logger.Debugf("primary interface is %q", iface.Name) - } - } - - // Fetch network info from the API and generate managed config as - // needed. - if err := nw.fetchInterfaceInfo(); err != nil { - return err - } - - return nil -} - -// fetchInterfaceInfo makes an API call to get all known -// *network.InterfaceInfo entries for each interface on the machine. -// If there are any VLAN interfaces to setup, it also generates -// commands to load the kernal 8021q VLAN module, if not already -// loaded and when not running inside an LXC container. -func (nw *Networker) fetchInterfaceInfo() error { - interfaceInfo, err := nw.st.MachineNetworkConfig(nw.tag) - if err != nil { - logger.Errorf("failed to retrieve network info: %v", err) - return err - } - logger.Debugf("fetched known network info from state") - - haveVLANs := false - nw.interfaceInfo = make(map[string]network.InterfaceInfo) - for _, info := range interfaceInfo { - actualName := info.ActualInterfaceName() - logger.Debugf( - "have network info for %q: MAC=%q, disabled: %v, vlan-tag: %d", - actualName, - info.MACAddress, - info.Disabled, - info.VLANTag, - ) - if info.IsVLAN() { - haveVLANs = true - } - nw.interfaceInfo[actualName] = info - fullPath := nw.ConfigFile(actualName) - cfgFile, ok := nw.configFiles[fullPath] - if !ok { - // We have info for an interface which is was not - // discovered on the machine, so we need to add it - // list of managed interfaces. - logger.Debugf("no config for %q but network info exists; will generate", actualName) - if err := nw.readConfig(actualName, fullPath); err != nil { - return err - } - cfgFile = nw.configFiles[fullPath] - } - cfgFile.interfaceInfo = info - - // Make sure we generate managed config, in case it changed. - cfgFile.UpdateData(cfgFile.RenderManaged()) - - nw.configFiles[fullPath] = cfgFile - } - - // Generate managed main config file. - cfgFile := nw.configFiles[nw.ConfigFile("")] - cfgFile.UpdateData(RenderMainConfig(nw.ConfigSubDir())) - - if !haveVLANs { - return nil - } - - if !nw.isVLANSupportInstalled { - if nw.isRunningInLXC() { - msg := "running inside LXC: " - msg += "cannot load the required 8021q kernel module for VLAN support; " - msg += "please ensure it is loaded on the host" - logger.Warningf(msg) - return nil - } - nw.prepareVLANModule() - nw.isVLANSupportInstalled = true - logger.Debugf("need to load VLAN 8021q kernel module") - } - return nil -} - -// applyAndExecute updates or removes config files as needed, and runs -// all accumulated pending commands, and if all commands succeed, -// resets the commands slice. If the networker is running in "safe -// mode" nothing is changed. -func (nw *Networker) applyAndExecute() error { - if !nw.IntrusiveMode() { - logger.Warningf("running in non-intrusive mode - no changes made") - return nil - } - - // Create the config subdir, if needed. - configSubDir := nw.ConfigSubDir() - if _, err := os.Stat(configSubDir); err != nil { - if err := os.Mkdir(configSubDir, 0755); err != nil { - logger.Errorf("failed to create directory %q: %v", configSubDir, err) - return err - } - } - - // Read config subdir contents and remove any non-managed files. - files, err := ioutil.ReadDir(configSubDir) - if err != nil { - logger.Errorf("failed to read directory %q: %v", configSubDir, err) - return err - } - for _, info := range files { - if !info.Mode().IsRegular() { - // Skip special files and directories. - continue - } - fullPath := filepath.Join(configSubDir, info.Name()) - if _, ok := nw.configFiles[fullPath]; !ok { - if err := os.Remove(fullPath); err != nil { - logger.Errorf("failed to remove non-managed config %q: %v", fullPath, err) - return err - } - } - } - - // Apply all changes needed for each config file. - logger.Debugf("applying changes to config files as needed") - for _, cfgFile := range nw.configFiles { - if err := cfgFile.Apply(); err != nil { - return err - } - } - if len(nw.commands) > 0 { - logger.Debugf("executing commands %v", nw.commands) - if err := ExecuteCommands(nw.commands); err != nil { - return err - } - nw.commands = []string{} - } - return nil -} - -// isRunningInLXC returns whether the worker is running inside a LXC -// container or not. When running in LXC containers, we should not -// attempt to modprobe anything, as it's not possible and leads to -// run-time errors. See http://pad.lv/1353443. -func (nw *Networker) isRunningInLXC() bool { - // In case of nested containers, we need to check - // the last nesting level to ensure it's not LXC. - machineId := strings.ToLower(nw.tag.Id()) - parts := strings.Split(machineId, "/") - return len(parts) > 2 && parts[len(parts)-2] == "lxc" -} - -// prepareVLANModule generates the necessary commands to load the VLAN -// kernel module 8021q. -func (nw *Networker) prepareVLANModule() { - commands := []string{ - `dpkg-query -s vlan || apt-get --option Dpkg::Options::=--force-confold --assume-yes install vlan`, - `lsmod | grep -q 8021q || modprobe 8021q`, - `grep -q 8021q /etc/modules || echo 8021q >> /etc/modules`, - `vconfig set_name_type DEV_PLUS_VID_NO_PAD`, - } - nw.commands = append(nw.commands, commands...) -} - -// prepareUpCommands generates ifup commands to bring the needed -// interfaces up. -func (nw *Networker) prepareUpCommands() { - bringUp := []string{} - logger.Debugf("preparing to bring interfaces up") - for name, info := range nw.interfaceInfo { - if nw.IsPrimaryInterfaceOrLoopback(name) { - logger.Debugf("skipping primary or loopback interface %q", name) - continue - } - fullPath := nw.ConfigFile(name) - cfgFile := nw.configFiles[fullPath] - if info.Disabled && !cfgFile.IsPendingRemoval() { - cfgFile.MarkForRemoval() - logger.Debugf("disabled %q marked for removal", name) - } else if !info.Disabled && !InterfaceIsUp(name) { - bringUp = append(bringUp, name) - logger.Debugf("will bring %q up", name) - } - } - - // Sort interfaces to ensure raw interfaces go before their - // virtual dependents (i.e. VLANs) - sort.Sort(sort.StringSlice(bringUp)) - for _, name := range bringUp { - nw.commands = append(nw.commands, "ifup "+name) - } -} - -// prepareUpCommands generates ifdown commands to bring the needed -// interfaces down. -func (nw *Networker) prepareDownCommands() { - bringDown := []string{} - logger.Debugf("preparing to bring interfaces down") - for _, cfgFile := range nw.configFiles { - name := cfgFile.InterfaceName() - if name == "" { - // Skip the main config file. - continue - } - if nw.IsPrimaryInterfaceOrLoopback(name) { - logger.Debugf("skipping primary or loopback interface %q", name) - continue - } - info := cfgFile.InterfaceInfo() - if info.Disabled { - if InterfaceIsUp(name) { - bringDown = append(bringDown, name) - logger.Debugf("will bring %q down", name) - } - if !cfgFile.IsPendingRemoval() { - cfgFile.MarkForRemoval() - logger.Debugf("diabled %q marked for removal", name) - } - } - } - - // Sort interfaces to ensure raw interfaces go after their virtual - // dependents (i.e. VLANs) - sort.Sort(sort.Reverse(sort.StringSlice(bringDown))) - for _, name := range bringDown { - nw.commands = append(nw.commands, "ifdown "+name) - } -} - -// readConfig populates the configFiles map with an entry for the -// given interface and filename, and tries to read the file. If the -// config file is missing, that's OK, as it will be generated later -// and it's not considered an error. If configFiles already contains -// an entry for fileName, nothing is changed. -func (nw *Networker) readConfig(interfaceName, fileName string) error { - cfgFile := &configFile{ - interfaceName: interfaceName, - fileName: fileName, - } - if err := cfgFile.ReadData(); !os.IsNotExist(err) && err != nil { - return err - } - if _, ok := nw.configFiles[fileName]; !ok { - nw.configFiles[fileName] = cfgFile - } - return nil -} === removed file 'src/github.com/juju/juju/worker/networker/networker_test.go' --- src/github.com/juju/juju/worker/networker/networker_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/networker/networker_test.go 1970-01-01 00:00:00 +0000 @@ -1,453 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package networker_test - -import ( - "net" - "os" - "path/filepath" - "strings" - "time" - - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - "github.com/juju/utils/set" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/agent" - "github.com/juju/juju/api" - apinetworker "github.com/juju/juju/api/networker" - "github.com/juju/juju/instance" - "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" - coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/worker" - "github.com/juju/juju/worker/networker" -) - -type networkerSuite struct { - testing.JujuConnSuite - - stateMachine *state.Machine - stateNetworks []state.NetworkInfo - stateInterfaces []state.NetworkInterfaceInfo - - upInterfaces set.Strings - interfacesWithAddress set.Strings - machineInterfaces []net.Interface - vlanModuleLoaded bool - lastCommands chan []string - - apiState api.Connection - apiFacade apinetworker.State -} - -var _ = gc.Suite(&networkerSuite{}) - -func (s *networkerSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - - // Setup testing state. - s.setUpNetworks(c) - s.setUpMachine(c) - - s.machineInterfaces = []net.Interface{ - {Index: 1, MTU: 65535, Name: "lo", Flags: net.FlagUp | net.FlagLoopback}, - {Index: 2, MTU: 1500, Name: "eth0", Flags: net.FlagUp}, - {Index: 3, MTU: 1500, Name: "eth1"}, - {Index: 4, MTU: 1500, Name: "eth2"}, - } - s.PatchValue(&networker.InterfaceIsUp, func(name string) bool { - return s.upInterfaces.Contains(name) - }) - s.PatchValue(&networker.InterfaceHasAddress, func(name string) bool { - return s.interfacesWithAddress.Contains(name) - }) - s.PatchValue(&networker.ExecuteCommands, func(commands []string) error { - return s.executeCommandsHook(c, commands) - }) - s.PatchValue(&networker.Interfaces, func() ([]net.Interface, error) { - return s.machineInterfaces, nil - }) - - // Create the networker API facade. - s.apiFacade = s.apiState.Networker() - c.Assert(s.apiFacade, gc.NotNil) -} - -func (s *networkerSuite) TestStartStop(c *gc.C) { - nw := s.newNetworker(c, true) - c.Assert(worker.Stop(nw), gc.IsNil) -} - -func (s *networkerSuite) TestConfigPaths(c *gc.C) { - nw, configDir := s.newCustomNetworker(c, s.apiFacade, s.stateMachine.Id(), true, true) - defer worker.Stop(nw) - - c.Assert(nw.ConfigBaseDir(), gc.Equals, configDir) - subdir := filepath.Join(configDir, "interfaces.d") - c.Assert(nw.ConfigSubDir(), gc.Equals, subdir) - c.Assert(nw.ConfigFile(""), gc.Equals, filepath.Join(configDir, "interfaces")) - c.Assert(nw.ConfigFile("ethX.42"), gc.Equals, filepath.Join(subdir, "ethX.42.cfg")) -} - -func (s *networkerSuite) TestSafeNetworkerCannotWriteConfig(c *gc.C) { - c.Skip("enable once the networker is enabled again") - - nw := s.newNetworker(c, false) - defer worker.Stop(nw) - c.Assert(nw.IntrusiveMode(), jc.IsFalse) - - select { - case cmds := <-s.lastCommands: - c.Fatalf("no commands expected, got %v", cmds) - case <-time.After(coretesting.ShortWait): - s.assertNoConfig(c, nw, "", "lo", "eth0", "eth1", "eth1.42", "eth0.69") - } -} - -func (s *networkerSuite) TestNormalNetworkerCanWriteConfigAndLoadsVLANModule(c *gc.C) { - c.Skip("enable once the networker is enabled again") - - nw := s.newNetworker(c, true) - defer worker.Stop(nw) - c.Assert(nw.IntrusiveMode(), jc.IsTrue) - - select { - case <-s.lastCommands: - // VLAN module loading commands is one of the first things the - // worker does, so if it happened, we can assume commands are - // executed. - c.Assert(s.vlanModuleLoaded, jc.IsTrue) - c.Assert(nw.IsVLANModuleLoaded(), jc.IsTrue) - case <-time.After(coretesting.ShortWait): - c.Fatalf("commands expected but not executed") - } - c.Assert(nw.IsPrimaryInterfaceOrLoopback("lo"), jc.IsTrue) - c.Assert(nw.IsPrimaryInterfaceOrLoopback("eth0"), jc.IsTrue) - s.assertHaveConfig(c, nw, "", "eth0", "eth1", "eth1.42", "eth0.69") -} - -func (s *networkerSuite) TestPrimaryOrLoopbackInterfacesAreSkipped(c *gc.C) { - c.Skip("enable once the networker is enabled again") - - // Reset what's considered up, so we can test eth0 and lo are not - // touched. - s.upInterfaces = make(set.Strings) - s.interfacesWithAddress = make(set.Strings) - - nw, _ := s.newCustomNetworker(c, s.apiFacade, s.stateMachine.Id(), true, false) - defer worker.Stop(nw) - - timeout := time.After(coretesting.LongWait) - for { - select { - case <-s.lastCommands: - if !s.vlanModuleLoaded { - // VLAN module loading commands is one of the first things - // the worker does, so if hasn't happened, we wait a bit more. - continue - } - c.Assert(s.upInterfaces.Contains("lo"), jc.IsFalse) - c.Assert(s.upInterfaces.Contains("eth0"), jc.IsFalse) - if s.upInterfaces.Contains("eth1") { - // If we run ifup eth1, we successfully skipped lo and - // eth0. - s.assertHaveConfig(c, nw, "", "eth0", "eth1", "eth1.42", "eth0.69") - return - } - case <-timeout: - c.Fatalf("commands expected but not executed") - } - } -} - -func (s *networkerSuite) TestDisabledInterfacesAreBroughtDown(c *gc.C) { - c.Skip("enable once the networker is enabled again") - - // Simulate eth1 is up and then disable it, so we can test it's - // brought down. Also test the VLAN interface eth1.42 is also - // brought down, as it's physical interface eth1 is disabled. - s.upInterfaces = set.NewStrings("lo", "eth0", "eth1") - s.interfacesWithAddress = set.NewStrings("lo", "eth0", "eth1") - s.machineInterfaces[2].Flags |= net.FlagUp - ifaces, err := s.stateMachine.NetworkInterfaces() - c.Assert(err, jc.ErrorIsNil) - err = ifaces[1].Disable() - c.Assert(err, jc.ErrorIsNil) - // We verify that setting the parent physical interface to - // disabled leads to setting any VLAN intefaces depending on it to - // get disabled as well. - err = ifaces[2].Refresh() - c.Assert(err, jc.ErrorIsNil) - c.Assert(ifaces[2].IsDisabled(), jc.IsTrue) - - nw, _ := s.newCustomNetworker(c, s.apiFacade, s.stateMachine.Id(), true, false) - defer worker.Stop(nw) - - timeout := time.After(coretesting.LongWait) - for { - select { - case cmds := <-s.lastCommands: - if !strings.Contains(strings.Join(cmds, " "), "ifdown") { - // No down commands yet, keep waiting. - continue - } - c.Assert(s.upInterfaces.Contains("eth1"), jc.IsFalse) - c.Assert(s.machineInterfaces[2].Flags&net.FlagUp, gc.Equals, net.Flags(0)) - c.Assert(s.upInterfaces.Contains("eth1.42"), jc.IsFalse) - s.assertNoConfig(c, nw, "eth1", "eth1.42") - s.assertHaveConfig(c, nw, "", "eth0", "eth0.69") - return - case <-timeout: - c.Fatalf("commands expected but not executed") - } - } -} - -func (s *networkerSuite) TestIsRunningInLXC(c *gc.C) { - tests := []struct { - machineId string - result bool - }{ - {"0", false}, - {"1/lxc/0", true}, - {"2/kvm/1", false}, - {"3/lxc/0/lxc/1", true}, - {"4/lxc/0/kvm/1", false}, - {"5/lxc/1/kvm/1/lxc/3", true}, - } - for i, t := range tests { - c.Logf("test %d: %q -> %v", i, t.machineId, t.result) - c.Check(networker.IsRunningInLXC(t.machineId), gc.Equals, t.result) - } -} - -func (s *networkerSuite) TestNoModprobeWhenRunningInLXC(c *gc.C) { - c.Skip("enable once the networker is enabled again") - - // Create a new container. - template := state.MachineTemplate{ - Series: coretesting.FakeDefaultSeries, - Jobs: []state.MachineJob{state.JobHostUnits}, - } - lxcMachine, err := s.State.AddMachineInsideMachine(template, s.stateMachine.Id(), instance.LXC) - c.Assert(err, jc.ErrorIsNil) - password, err := utils.RandomPassword() - c.Assert(err, jc.ErrorIsNil) - err = lxcMachine.SetPassword(password) - c.Assert(err, jc.ErrorIsNil) - lxcInterfaces := []state.NetworkInterfaceInfo{{ - MACAddress: "aa:bb:cc:dd:02:f0", - InterfaceName: "eth0.123", - NetworkName: "vlan123", - IsVirtual: true, - Disabled: false, - }} - s.machineInterfaces = []net.Interface{ - {Index: 1, MTU: 65535, Name: "lo", Flags: net.FlagUp | net.FlagLoopback}, - {Index: 2, MTU: 1500, Name: "eth0", Flags: net.FlagUp}, - } - - err = lxcMachine.SetInstanceInfo("i-am-lxc", "fake_nonce", nil, s.stateNetworks, lxcInterfaces, nil, nil) - c.Assert(err, jc.ErrorIsNil) - - // Login to the API as the machine agent of lxcMachine. - lxcState := s.OpenAPIAsMachine(c, lxcMachine.Tag(), password, "fake_nonce") - c.Assert(lxcState, gc.NotNil) - lxcFacade := lxcState.Networker() - c.Assert(lxcFacade, gc.NotNil) - - // Create and setup networker for the LXC machine. - nw, _ := s.newCustomNetworker(c, lxcFacade, lxcMachine.Id(), true, true) - defer worker.Stop(nw) - - timeout := time.After(coretesting.LongWait) - for { - select { - case cmds := <-s.lastCommands: - if !s.upInterfaces.Contains("eth0.123") { - c.Fatalf("expected command ifup eth0.123, got %v", cmds) - } - c.Assert(s.vlanModuleLoaded, jc.IsFalse) - c.Assert(nw.IsVLANModuleLoaded(), jc.IsFalse) - s.assertHaveConfig(c, nw, "", "eth0.123") - s.assertNoConfig(c, nw, "lo", "eth0") - return - case <-timeout: - c.Fatalf("no commands executed!") - } - } -} - -type mockConfig struct { - agent.Config - tag names.Tag -} - -func (mock *mockConfig) Tag() names.Tag { - return mock.tag -} - -func agentConfig(machineId string) agent.Config { - return &mockConfig{tag: names.NewMachineTag(machineId)} -} - -// Create several networks. -func (s *networkerSuite) setUpNetworks(c *gc.C) { - s.stateNetworks = []state.NetworkInfo{{ - Name: "net1", - ProviderId: "net1", - CIDR: "0.1.2.0/24", - VLANTag: 0, - }, { - Name: "vlan42", - ProviderId: "vlan42", - CIDR: "0.2.2.0/24", - VLANTag: 42, - }, { - Name: "vlan69", - ProviderId: "vlan69", - CIDR: "0.3.2.0/24", - VLANTag: 69, - }, { - Name: "vlan123", - ProviderId: "vlan123", - CIDR: "0.4.2.0/24", - VLANTag: 123, - }, { - Name: "net2", - ProviderId: "net2", - CIDR: "0.5.2.0/24", - VLANTag: 0, - }} -} - -func (s *networkerSuite) setUpMachine(c *gc.C) { - var err error - s.stateMachine, err = s.State.AddMachine("quantal", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) - password, err := utils.RandomPassword() - c.Assert(err, jc.ErrorIsNil) - err = s.stateMachine.SetPassword(password) - c.Assert(err, jc.ErrorIsNil) - s.stateInterfaces = []state.NetworkInterfaceInfo{{ - MACAddress: "aa:bb:cc:dd:ee:f0", - InterfaceName: "eth0", - NetworkName: "net1", - IsVirtual: false, - }, { - MACAddress: "aa:bb:cc:dd:ee:f1", - InterfaceName: "eth1", - NetworkName: "net1", - IsVirtual: false, - }, { - MACAddress: "aa:bb:cc:dd:ee:f1", - InterfaceName: "eth1.42", - NetworkName: "vlan42", - IsVirtual: true, - }, { - MACAddress: "aa:bb:cc:dd:ee:f0", - InterfaceName: "eth0.69", - NetworkName: "vlan69", - IsVirtual: true, - }, { - MACAddress: "aa:bb:cc:dd:ee:f2", - InterfaceName: "eth2", - NetworkName: "net2", - IsVirtual: false, - }} - err = s.stateMachine.SetInstanceInfo("i-am", "fake_nonce", nil, s.stateNetworks, s.stateInterfaces, nil, nil) - c.Assert(err, jc.ErrorIsNil) - s.apiState = s.OpenAPIAsMachine(c, s.stateMachine.Tag(), password, "fake_nonce") - c.Assert(s.apiState, gc.NotNil) -} - -func (s *networkerSuite) executeCommandsHook(c *gc.C, commands []string) error { - markUp := func(name string, isUp bool) { - for i, iface := range s.machineInterfaces { - if iface.Name == name { - if isUp { - iface.Flags |= net.FlagUp - } else { - iface.Flags &= ^net.FlagUp - } - s.machineInterfaces[i] = iface - return - } - } - } - for _, cmd := range commands { - args := strings.Split(cmd, " ") - if len(args) >= 2 { - what, name := args[0], args[1] - switch what { - case "ifup": - s.upInterfaces.Add(name) - s.interfacesWithAddress.Add(name) - markUp(name, true) - c.Logf("bringing %q up", name) - case "ifdown": - s.upInterfaces.Remove(name) - s.interfacesWithAddress.Remove(name) - markUp(name, false) - c.Logf("bringing %q down", name) - } - } - if strings.Contains(cmd, "modprobe 8021q") { - s.vlanModuleLoaded = true - c.Logf("VLAN module loaded") - } - } - // Send the commands without blocking. - select { - case s.lastCommands <- commands: - default: - } - return nil -} - -func (s *networkerSuite) newCustomNetworker( - c *gc.C, - facade apinetworker.State, - machineId string, - intrusiveMode bool, - initInterfaces bool, -) (*networker.Networker, string) { - if initInterfaces { - s.upInterfaces = set.NewStrings("lo", "eth0") - s.interfacesWithAddress = set.NewStrings("lo", "eth0") - } - s.lastCommands = make(chan []string) - s.vlanModuleLoaded = false - configDir := c.MkDir() - - nw, err := networker.NewNetworker(facade, agentConfig(machineId), intrusiveMode, configDir) - c.Assert(err, jc.ErrorIsNil) - c.Assert(nw, gc.NotNil) - - return nw, configDir -} - -func (s *networkerSuite) newNetworker(c *gc.C, canWriteConfig bool) *networker.Networker { - nw, _ := s.newCustomNetworker(c, s.apiFacade, s.stateMachine.Id(), canWriteConfig, true) - return nw -} - -func (s *networkerSuite) assertNoConfig(c *gc.C, nw *networker.Networker, interfaceNames ...string) { - for _, name := range interfaceNames { - fullPath := nw.ConfigFile(name) - _, err := os.Stat(fullPath) - c.Assert(err, jc.Satisfies, os.IsNotExist) - } -} - -func (s *networkerSuite) assertHaveConfig(c *gc.C, nw *networker.Networker, interfaceNames ...string) { - for _, name := range interfaceNames { - fullPath := nw.ConfigFile(name) - _, err := os.Stat(fullPath) - c.Assert(err, jc.ErrorIsNil) - } -} === removed file 'src/github.com/juju/juju/worker/networker/package_test.go' --- src/github.com/juju/juju/worker/networker/package_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/networker/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package networker_test - -import ( - stdtesting "testing" - - "github.com/juju/juju/testing" -) - -func Test(t *stdtesting.T) { - testing.MgoTestPackage(t) -} === removed file 'src/github.com/juju/juju/worker/networker/utils.go' --- src/github.com/juju/juju/worker/networker/utils.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/networker/utils.go 1970-01-01 00:00:00 +0000 @@ -1,77 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package networker - -import ( - "fmt" - "net" - - "github.com/juju/utils/exec" -) - -// Functions defined here for easier patching when testing. -var ( - ExecuteCommands = executeCommands - Interfaces = interfaces - InterfaceIsUp = interfaceIsUp - InterfaceHasAddress = interfaceHasAddress -) - -// executeCommands execute a batch of commands one by one. -func executeCommands(commands []string) error { - for _, command := range commands { - result, err := exec.RunCommands(exec.RunParams{ - Commands: command, - WorkingDir: "/", - }) - if err != nil { - return fmt.Errorf("failed to execute %q: %v", command, err) - } - if result.Code != 0 { - return fmt.Errorf( - "command %q failed (code: %d, stdout: %s, stderr: %s)", - command, result.Code, result.Stdout, result.Stderr) - } - logger.Debugf("command %q (code: %d, stdout: %s, stderr: %s)", - command, result.Code, result.Stdout, result.Stderr) - } - return nil -} - -// interfaceIsUp returns whether the given network interface is up. -func interfaceIsUp(interfaceName string) bool { - iface, err := net.InterfaceByName(interfaceName) - if err != nil { - // Log as warning, because with virtual interfaces, there - // might be pending commands to execute that actually create - // the interface first before we can look it up. - logger.Warningf("cannot tell if %q is up: %v", interfaceName, err) - return false - } - return (iface.Flags & net.FlagUp) != 0 -} - -// interfaceHasAddress whether the given network interface has at -// least one assigned address. -func interfaceHasAddress(interfaceName string) bool { - iface, err := net.InterfaceByName(interfaceName) - if err != nil { - // Log as warning, because with virtual interfaces, there - // might be pending commands to execute that actually create - // the interface first before we can look it up. - logger.Warningf("cannot tell if %q has addresses: %v", interfaceName, err) - return false - } - addrs, err := iface.Addrs() - if err != nil { - logger.Errorf("cannot get addresses for network interface %q: %v", interfaceName, err) - return false - } - return len(addrs) != 0 -} - -// interfaces returns all known network interfaces on the machine. -func interfaces() ([]net.Interface, error) { - return net.Interfaces() -} === removed file 'src/github.com/juju/juju/worker/networker/utils_test.go' --- src/github.com/juju/juju/worker/networker/utils_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/networker/utils_test.go 1970-01-01 00:00:00 +0000 @@ -1,37 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package networker_test - -import ( - "runtime" - - gc "gopkg.in/check.v1" - - "github.com/juju/juju/testing" - "github.com/juju/juju/worker/networker" -) - -type utilsSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&utilsSuite{}) - -func (s *utilsSuite) TestExecuteCommands(c *gc.C) { - //TODO(bogdanteleaga): Fix this on windows - if runtime.GOOS == "windows" { - c.Skip("bug 1403084: test uses bash scripts, will fix later on windows") - } - commands := []string{ - "echo start", - "sh -c 'echo STDOUT; echo STDERR >&2; exit 123'", - "echo end", - "exit 111", - } - err := networker.ExecuteCommands(commands) - expected := "command \"sh -c 'echo STDOUT; echo STDERR >&2; exit 123'\" failed " + - "(code: 123, stdout: STDOUT\n, stderr: STDERR\n)" - c.Assert(err, gc.NotNil) - c.Assert(err.Error(), gc.Equals, expected) -} === removed file 'src/github.com/juju/juju/worker/notifyworker.go' --- src/github.com/juju/juju/worker/notifyworker.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/notifyworker.go 1970-01-01 00:00:00 +0000 @@ -1,108 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package worker - -import ( - "launchpad.net/tomb" - - apiWatcher "github.com/juju/juju/api/watcher" - "github.com/juju/juju/state/watcher" -) - -// ensureErr is defined as a variable to allow the test suite -// to override it. -var ensureErr = watcher.EnsureErr - -// notifyWorker is the internal implementation of the Worker -// interface, using a NotifyWatcher for handling changes. -type notifyWorker struct { - tomb tomb.Tomb - - // handler is what will handle when events are triggered - handler NotifyWatchHandler -} - -// NotifyWatchHandler implements the business logic that is triggered -// as part of watching a NotifyWatcher. -type NotifyWatchHandler interface { - // SetUp starts the handler, this should create the watcher we - // will be waiting on for more events. SetUp can return a Watcher - // even if there is an error, and the notify Worker will make sure - // to stop the watcher. - SetUp() (apiWatcher.NotifyWatcher, error) - - // TearDown should cleanup any resources that are left around - TearDown() error - - // Handle is called when the Watcher has indicated there are changes, do - // whatever work is necessary to process it. The done channel is closed if - // the worker is being interrupted to finish. Any worker should avoid any - // bare channel reads or writes, but instead use a select with the done - // channel. - Handle(done <-chan struct{}) error -} - -// NewNotifyWorker starts a new worker running the business logic from -// the handler. The worker loop is started in another goroutine as a -// side effect of calling this. -func NewNotifyWorker(handler NotifyWatchHandler) Worker { - nw := ¬ifyWorker{ - handler: handler, - } - - go func() { - defer nw.tomb.Done() - nw.tomb.Kill(nw.loop()) - }() - return nw -} - -// Kill the loop with no-error -func (nw *notifyWorker) Kill() { - nw.tomb.Kill(nil) -} - -// Wait for the looping to finish -func (nw *notifyWorker) Wait() error { - return nw.tomb.Wait() -} - -type tearDowner interface { - TearDown() error -} - -// propagateTearDown tears down the handler, but ensures any error is -// propagated through the tomb's Kill method. -func propagateTearDown(handler tearDowner, t *tomb.Tomb) { - if err := handler.TearDown(); err != nil { - t.Kill(err) - } -} - -func (nw *notifyWorker) loop() error { - w, err := nw.handler.SetUp() - if err != nil { - if w != nil { - // We don't bother to propagate an error, because we - // already have an error - w.Stop() - } - return err - } - defer propagateTearDown(nw.handler, &nw.tomb) - defer watcher.Stop(w, &nw.tomb) - for { - select { - case <-nw.tomb.Dying(): - return tomb.ErrDying - case _, ok := <-w.Changes(): - if !ok { - return ensureErr(w) - } - if err := nw.handler.Handle(nw.tomb.Dying()); err != nil { - return err - } - } - } -} === removed file 'src/github.com/juju/juju/worker/notifyworker_test.go' --- src/github.com/juju/juju/worker/notifyworker_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/notifyworker_test.go 1970-01-01 00:00:00 +0000 @@ -1,355 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package worker_test - -import ( - "fmt" - "sync" - "time" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "launchpad.net/tomb" - - apiWatcher "github.com/juju/juju/api/watcher" - "github.com/juju/juju/state/watcher" - coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/worker" -) - -type notifyWorkerSuite struct { - coretesting.BaseSuite - worker worker.Worker - actor *notifyHandler -} - -var _ = gc.Suite(¬ifyWorkerSuite{}) - -func newNotifyHandlerWorker(c *gc.C, setupError, handlerError, teardownError error) (*notifyHandler, worker.Worker) { - nh := ¬ifyHandler{ - actions: nil, - handled: make(chan struct{}, 1), - setupError: setupError, - teardownError: teardownError, - handlerError: handlerError, - watcher: &testNotifyWatcher{ - changes: make(chan struct{}), - }, - setupDone: make(chan struct{}), - } - w := worker.NewNotifyWorker(nh) - select { - case <-nh.setupDone: - case <-time.After(coretesting.ShortWait): - c.Error("Failed waiting for notifyHandler.Setup to be called during SetUpTest") - } - return nh, w -} - -func (s *notifyWorkerSuite) SetUpTest(c *gc.C) { - s.BaseSuite.SetUpTest(c) - s.actor, s.worker = newNotifyHandlerWorker(c, nil, nil, nil) -} - -func (s *notifyWorkerSuite) TearDownTest(c *gc.C) { - worker.SetEnsureErr(nil) - s.stopWorker(c) - s.BaseSuite.TearDownTest(c) -} - -type notifyHandler struct { - actions []string - mu sync.Mutex - // Signal handled when we get a handle() call - handled chan struct{} - setupError error - teardownError error - handlerError error - watcher *testNotifyWatcher - setupDone chan struct{} -} - -var _ worker.NotifyWatchHandler = (*notifyHandler)(nil) - -func (nh *notifyHandler) SetUp() (apiWatcher.NotifyWatcher, error) { - defer func() { nh.setupDone <- struct{}{} }() - nh.mu.Lock() - defer nh.mu.Unlock() - nh.actions = append(nh.actions, "setup") - if nh.watcher == nil { - return nil, nh.setupError - } - return nh.watcher, nh.setupError -} - -func (nh *notifyHandler) TearDown() error { - nh.mu.Lock() - defer nh.mu.Unlock() - nh.actions = append(nh.actions, "teardown") - if nh.handled != nil { - close(nh.handled) - } - return nh.teardownError -} - -func (nh *notifyHandler) Handle(_ <-chan struct{}) error { - nh.mu.Lock() - defer nh.mu.Unlock() - nh.actions = append(nh.actions, "handler") - if nh.handled != nil { - // Unlock while we are waiting for the send - nh.mu.Unlock() - nh.handled <- struct{}{} - nh.mu.Lock() - } - return nh.handlerError -} - -func (nh *notifyHandler) CheckActions(c *gc.C, actions ...string) { - nh.mu.Lock() - defer nh.mu.Unlock() - c.Check(nh.actions, gc.DeepEquals, actions) -} - -// During teardown we try to stop the worker, but don't hang the test suite if -// Stop never returns -func (s *notifyWorkerSuite) stopWorker(c *gc.C) { - if s.worker == nil { - return - } - done := make(chan error) - go func() { - done <- worker.Stop(s.worker) - }() - err := waitForTimeout(c, done, coretesting.LongWait) - c.Check(err, jc.ErrorIsNil) - s.actor = nil - s.worker = nil -} - -type testNotifyWatcher struct { - mu sync.Mutex - changes chan struct{} - stopped bool - stopError error -} - -var _ apiWatcher.NotifyWatcher = (*testNotifyWatcher)(nil) - -func (tnw *testNotifyWatcher) Changes() <-chan struct{} { - return tnw.changes -} - -func (tnw *testNotifyWatcher) Err() error { - return tnw.stopError -} - -func (tnw *testNotifyWatcher) Stop() error { - tnw.mu.Lock() - defer tnw.mu.Unlock() - if !tnw.stopped { - close(tnw.changes) - } - tnw.stopped = true - return tnw.stopError -} - -func (tnw *testNotifyWatcher) SetStopError(err error) { - tnw.mu.Lock() - tnw.stopError = err - tnw.mu.Unlock() -} - -func (tnw *testNotifyWatcher) TriggerChange(c *gc.C) { - select { - case tnw.changes <- struct{}{}: - case <-time.After(coretesting.LongWait): - c.Errorf("timed out trying to trigger a change") - } -} - -func waitForTimeout(c *gc.C, ch <-chan error, timeout time.Duration) error { - select { - case err := <-ch: - return err - case <-time.After(timeout): - c.Errorf("timed out waiting to receive a change after %s", timeout) - } - return nil -} - -func waitShort(c *gc.C, w worker.Worker) error { - done := make(chan error) - go func() { - done <- w.Wait() - }() - return waitForTimeout(c, done, coretesting.ShortWait) -} - -func waitForHandledNotify(c *gc.C, handled chan struct{}) { - select { - case <-handled: - case <-time.After(coretesting.LongWait): - c.Errorf("handled failed to signal after %s", coretesting.LongWait) - } -} - -func (s *notifyWorkerSuite) TestKill(c *gc.C) { - s.worker.Kill() - err := waitShort(c, s.worker) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *notifyWorkerSuite) TestStop(c *gc.C) { - err := worker.Stop(s.worker) - c.Assert(err, jc.ErrorIsNil) - // After stop, Wait should return right away - err = waitShort(c, s.worker) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *notifyWorkerSuite) TestWait(c *gc.C) { - done := make(chan error) - go func() { - done <- s.worker.Wait() - }() - // Wait should not return until we've killed the worker - select { - case err := <-done: - c.Errorf("Wait() didn't wait until we stopped it: %v", err) - case <-time.After(coretesting.ShortWait): - } - s.worker.Kill() - err := waitForTimeout(c, done, coretesting.LongWait) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *notifyWorkerSuite) TestCallSetUpAndTearDown(c *gc.C) { - // After calling NewNotifyWorker, we should have called setup - s.actor.CheckActions(c, "setup") - // If we kill the worker, it should notice, and call teardown - s.worker.Kill() - err := waitShort(c, s.worker) - c.Check(err, jc.ErrorIsNil) - s.actor.CheckActions(c, "setup", "teardown") - c.Check(s.actor.watcher.stopped, jc.IsTrue) -} - -func (s *notifyWorkerSuite) TestChangesTriggerHandler(c *gc.C) { - s.actor.CheckActions(c, "setup") - s.actor.watcher.TriggerChange(c) - waitForHandledNotify(c, s.actor.handled) - s.actor.CheckActions(c, "setup", "handler") - s.actor.watcher.TriggerChange(c) - waitForHandledNotify(c, s.actor.handled) - s.actor.watcher.TriggerChange(c) - waitForHandledNotify(c, s.actor.handled) - s.actor.CheckActions(c, "setup", "handler", "handler", "handler") - c.Assert(worker.Stop(s.worker), gc.IsNil) - s.actor.CheckActions(c, "setup", "handler", "handler", "handler", "teardown") -} - -func (s *notifyWorkerSuite) TestSetUpFailureStopsWithTearDown(c *gc.C) { - // Stop the worker and SetUp again, this time with an error - s.stopWorker(c) - actor, w := newNotifyHandlerWorker(c, fmt.Errorf("my special error"), nil, nil) - err := waitShort(c, w) - c.Check(err, gc.ErrorMatches, "my special error") - // TearDown is not called on SetUp error. - actor.CheckActions(c, "setup") - c.Check(actor.watcher.stopped, jc.IsTrue) -} - -func (s *notifyWorkerSuite) TestWatcherStopFailurePropagates(c *gc.C) { - s.actor.watcher.SetStopError(fmt.Errorf("error while stopping watcher")) - s.worker.Kill() - c.Assert(s.worker.Wait(), gc.ErrorMatches, "error while stopping watcher") - // We've already stopped the worker, don't let teardown notice the - // worker is in an error state - s.worker = nil -} - -func (s *notifyWorkerSuite) TestCleanRunNoticesTearDownError(c *gc.C) { - s.actor.teardownError = fmt.Errorf("failed to tear down watcher") - s.worker.Kill() - c.Assert(s.worker.Wait(), gc.ErrorMatches, "failed to tear down watcher") - s.worker = nil -} - -func (s *notifyWorkerSuite) TestHandleErrorStopsWorkerAndWatcher(c *gc.C) { - s.stopWorker(c) - actor, w := newNotifyHandlerWorker(c, nil, fmt.Errorf("my handling error"), nil) - actor.watcher.TriggerChange(c) - waitForHandledNotify(c, actor.handled) - err := waitShort(c, w) - c.Check(err, gc.ErrorMatches, "my handling error") - actor.CheckActions(c, "setup", "handler", "teardown") - c.Check(actor.watcher.stopped, jc.IsTrue) -} - -func (s *notifyWorkerSuite) TestNoticesStoppedWatcher(c *gc.C) { - // The default closedHandler doesn't panic if you have a genuine error - // (because it assumes you want to propagate a real error and then - // restart - s.actor.watcher.SetStopError(fmt.Errorf("Stopped Watcher")) - s.actor.watcher.Stop() - err := waitShort(c, s.worker) - c.Check(err, gc.ErrorMatches, "Stopped Watcher") - s.actor.CheckActions(c, "setup", "teardown") - // Worker is stopped, don't fail TearDownTest - s.worker = nil -} - -func noopHandler(watcher.Errer) error { - return nil -} - -type CannedErrer struct { - err error -} - -func (c CannedErrer) Err() error { - return c.err -} - -func (s *notifyWorkerSuite) TestDefaultClosedHandler(c *gc.C) { - // Roundabout check for function equality. - // Is this test really worth it? - c.Assert(fmt.Sprintf("%p", worker.EnsureErr()), gc.Equals, fmt.Sprintf("%p", watcher.EnsureErr)) -} - -func (s *notifyWorkerSuite) TestErrorsOnStillAliveButClosedChannel(c *gc.C) { - foundErr := fmt.Errorf("did not get an error") - triggeredHandler := func(errer watcher.Errer) error { - foundErr = errer.Err() - return foundErr - } - worker.SetEnsureErr(triggeredHandler) - s.actor.watcher.SetStopError(tomb.ErrStillAlive) - s.actor.watcher.Stop() - err := waitShort(c, s.worker) - c.Check(foundErr, gc.Equals, tomb.ErrStillAlive) - // ErrStillAlive is trapped by the Stop logic and gets turned into a - // 'nil' when stopping. However TestDefaultClosedHandler can assert - // that it would have triggered a panic. - c.Check(err, jc.ErrorIsNil) - s.actor.CheckActions(c, "setup", "teardown") - // Worker is stopped, don't fail TearDownTest - s.worker = nil -} - -func (s *notifyWorkerSuite) TestErrorsOnClosedChannel(c *gc.C) { - foundErr := fmt.Errorf("did not get an error") - triggeredHandler := func(errer watcher.Errer) error { - foundErr = errer.Err() - return foundErr - } - worker.SetEnsureErr(triggeredHandler) - s.actor.watcher.Stop() - err := waitShort(c, s.worker) - // If the foundErr is nil, we would have panic-ed (see TestDefaultClosedHandler) - c.Check(foundErr, gc.IsNil) - c.Check(err, jc.ErrorIsNil) - s.actor.CheckActions(c, "setup", "teardown") -} === added file 'src/github.com/juju/juju/worker/package_test.go' --- src/github.com/juju/juju/worker/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package worker_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} === modified file 'src/github.com/juju/juju/worker/peergrouper/desired.go' --- src/github.com/juju/juju/worker/peergrouper/desired.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/peergrouper/desired.go 2016-03-22 15:18:22 +0000 @@ -41,7 +41,7 @@ logger.Debugf("maxId: %v", maxId) // We may find extra peer group members if the machines - // have been removed or their state server status removed. + // have been removed or their controller status removed. // This should only happen if they had been set to non-voting // before removal, in which case we want to remove it // from the members list. If we find a member that's still configured === modified file 'src/github.com/juju/juju/worker/peergrouper/desired_test.go' --- src/github.com/juju/juju/worker/peergrouper/desired_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/peergrouper/desired_test.go 2016-03-22 15:18:22 +0000 @@ -130,7 +130,7 @@ expectVoting: []bool{false, true, false}, expectMembers: mkMembers("1 2v 3", ipVersion), }, { - about: "machines removed as state server -> removed from members", + about: "machines removed as controller -> removed from members", machines: mkMachines("11v", ipVersion), members: mkMembers("1v 2 3", ipVersion), statuses: mkStatuses("1p 2s 3s", ipVersion), === modified file 'src/github.com/juju/juju/worker/peergrouper/initiate.go' --- src/github.com/juju/juju/worker/peergrouper/initiate.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/peergrouper/initiate.go 2016-03-22 15:18:22 +0000 @@ -83,6 +83,10 @@ // is useful for testing purposes and also when debugging cases of faulty replica sets var ErrReplicaSetAlreadyInitiated = errors.New("replicaset is already initiated") +func isNotUnreachableError(err error) bool { + return err.Error() != "no reachable servers" +} + // attemptInitiateMongoServer attempts to initiate the replica set. func attemptInitiateMongoServer(dialInfo *mgo.DialInfo, memberHostPort string, force bool) error { session, err := mgo.DialWithInfo(dialInfo) @@ -91,13 +95,24 @@ } defer session.Close() session.SetSocketTimeout(mongo.SocketTimeout) - cfg, err := replicaset.CurrentConfig(session) - if err != nil && err != mgo.ErrNotFound { - return errors.Errorf("cannot get replica set configuration: %v", err) - } - if !force && err == nil && len(cfg.Members) > 0 { - logger.Infof("replica set configuration found: %#v", cfg) - return ErrReplicaSetAlreadyInitiated + + if !force { + bInfo, err := session.BuildInfo() + if err != nil && isNotUnreachableError(err) { + return errors.Annotate(err, "cannot determine mongo build information") + } + var cfg *replicaset.Config + if err != nil || !bInfo.VersionAtLeast(3) { + cfg, err = replicaset.CurrentConfig(session) + if err != nil && err != mgo.ErrNotFound { + return errors.Errorf("cannot get replica set configuration: %v", err) + } + } + + if cfg != nil && len(cfg.Members) > 0 { + logger.Infof("replica set configuration found: %#v", cfg) + return ErrReplicaSetAlreadyInitiated + } } return replicaset.Initiate( === removed file 'src/github.com/juju/juju/worker/peergrouper/initiate_test.go' --- src/github.com/juju/juju/worker/peergrouper/initiate_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/peergrouper/initiate_test.go 1970-01-01 00:00:00 +0000 @@ -1,59 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package peergrouper_test - -import ( - gitjujutesting "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/worker/peergrouper" -) - -type InitiateSuite struct { - coretesting.BaseSuite -} - -var _ = gc.Suite(&InitiateSuite{}) - -// TODO(natefinch) add a test that InitiateMongoServer works when -// we support upgrading of existing environments. - -func (s *InitiateSuite) TestInitiateReplicaSet(c *gc.C) { - var err error - inst := &gitjujutesting.MgoInstance{Params: []string{"--replSet", "juju"}} - err = inst.Start(coretesting.Certs) - c.Assert(err, jc.ErrorIsNil) - defer inst.Destroy() - - info := inst.DialInfo() - args := peergrouper.InitiateMongoParams{ - DialInfo: info, - MemberHostPort: inst.Addr(), - } - - err = peergrouper.MaybeInitiateMongoServer(args) - c.Assert(err, jc.ErrorIsNil) - - // This would return a mgo.QueryError if a ReplicaSet - // configuration already existed but we tried to create - // one with replicaset.Initiate again. - // ErrReplicaSetAlreadyInitiated is not a failure but an - // indication that we tried to initiate an initiated rs - err = peergrouper.MaybeInitiateMongoServer(args) - c.Assert(err, gc.Equals, peergrouper.ErrReplicaSetAlreadyInitiated) - - // Make sure running InitiateMongoServer without forcing will behave - // in the same way as MaybeInitiateMongoServer - err = peergrouper.InitiateMongoServer(args, false) - c.Assert(err, gc.Equals, peergrouper.ErrReplicaSetAlreadyInitiated) - - // Assert that passing Force to initiate will re-create the replicaset - // even though it exists already - err = peergrouper.InitiateMongoServer(args, true) - c.Assert(err, jc.ErrorIsNil) - - // TODO test login -} === modified file 'src/github.com/juju/juju/worker/peergrouper/mock_test.go' --- src/github.com/juju/juju/worker/peergrouper/mock_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/peergrouper/mock_test.go 2016-03-22 15:18:22 +0000 @@ -27,13 +27,13 @@ // that we don't want to directly depend on in unit tests. type fakeState struct { - mu sync.Mutex - errors errorPatterns - machines map[string]*fakeMachine - stateServers voyeur.Value // of *state.StateServerInfo - statuses voyeur.Value // of statuses collection - session *fakeMongoSession - check func(st *fakeState) error + mu sync.Mutex + errors errorPatterns + machines map[string]*fakeMachine + controllers voyeur.Value // of *state.ControllerInfo + statuses voyeur.Value // of statuses collection + session *fakeMongoSession + check func(st *fakeState) error } var ( @@ -105,7 +105,7 @@ machines: make(map[string]*fakeMachine), } st.session = newFakeMongoSession(st, &st.errors) - st.stateServers.Set(&state.StateServerInfo{}) + st.controllers.Set(&state.ControllerInfo{}) return st } @@ -209,24 +209,24 @@ delete(st.machines, id) } -func (st *fakeState) setStateServers(ids ...string) { - st.stateServers.Set(&state.StateServerInfo{ +func (st *fakeState) setControllers(ids ...string) { + st.controllers.Set(&state.ControllerInfo{ MachineIds: ids, }) } -func (st *fakeState) StateServerInfo() (*state.StateServerInfo, error) { - if err := st.errors.errorFor("State.StateServerInfo"); err != nil { +func (st *fakeState) ControllerInfo() (*state.ControllerInfo, error) { + if err := st.errors.errorFor("State.ControllerInfo"); err != nil { return nil, err } - return deepCopy(st.stateServers.Get()).(*state.StateServerInfo), nil -} - -func (st *fakeState) WatchStateServerInfo() state.NotifyWatcher { - return WatchValue(&st.stateServers) -} - -func (st *fakeState) WatchStateServerStatusChanges() state.StringsWatcher { + return deepCopy(st.controllers.Get()).(*state.ControllerInfo), nil +} + +func (st *fakeState) WatchControllerInfo() state.NotifyWatcher { + return WatchValue(&st.controllers) +} + +func (st *fakeState) WatchControllerStatusChanges() state.StringsWatcher { return WatchStrings(&st.statuses) } === modified file 'src/github.com/juju/juju/worker/peergrouper/publish_test.go' --- src/github.com/juju/juju/worker/peergrouper/publish_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/peergrouper/publish_test.go 2016-03-22 15:18:22 +0000 @@ -74,3 +74,14 @@ check(true, ipV4First, ipV6First) check(true, ipV6First, ipV6First) } + +func (s *publishSuite) TestPublisherRejectsNoServers(c *gc.C) { + check := func(preferIPv6 bool) { + var mock mockAPIHostPortsSetter + statePublish := newPublisher(&mock, preferIPv6) + err := statePublish.PublishAPIServers(nil, nil) + c.Assert(err, gc.ErrorMatches, "no api servers specified") + } + check(false) + check(true) +} === modified file 'src/github.com/juju/juju/worker/peergrouper/suite_test.go' --- src/github.com/juju/juju/worker/peergrouper/suite_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/peergrouper/suite_test.go 2016-03-22 15:18:22 +0000 @@ -4,11 +4,11 @@ package peergrouper import ( - stdtesting "testing" + "testing" - "github.com/juju/juju/testing" + gc "gopkg.in/check.v1" ) -func TestPackage(t *stdtesting.T) { - testing.MgoTestPackage(t) +func TestPackage(t *testing.T) { + gc.TestingT(t) } === modified file 'src/github.com/juju/juju/worker/peergrouper/worker.go' --- src/github.com/juju/juju/worker/peergrouper/worker.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/peergrouper/worker.go 2016-03-22 15:18:22 +0000 @@ -21,9 +21,9 @@ type stateInterface interface { Machine(id string) (stateMachine, error) - WatchStateServerInfo() state.NotifyWatcher - StateServerInfo() (*state.StateServerInfo, error) - WatchStateServerStatusChanges() state.StringsWatcher + WatchControllerInfo() state.NotifyWatcher + WatchControllerStatusChanges() state.StringsWatcher + ControllerInfo() (*state.ControllerInfo, error) MongoSession() mongoSession } @@ -47,7 +47,7 @@ } type publisherInterface interface { - // publish publishes information about the given state servers + // publish publishes information about the given controllers // to whomsoever it may concern. When it is called there // is no guarantee that any of the information has actually changed. publishAPIServers(apiServers [][]network.HostPort, instanceIds []instance.Id) error @@ -103,7 +103,7 @@ notifyCh chan notifyFunc // machines holds the set of machines we are currently - // watching (all the state server machines). Each one has an + // watching (all the controller machines). Each one has an // associated goroutine that // watches attributes of that machine. machines map[string]*machine @@ -116,7 +116,7 @@ // New returns a new worker that maintains the mongo replica set // with respect to the given state. func New(st *state.State) (worker.Worker, error) { - cfg, err := st.EnvironConfig() + cfg, err := st.ModelConfig() if err != nil { return nil, err } @@ -158,7 +158,7 @@ } func (w *pgWorker) loop() error { - infow := w.watchStateServerInfo() + infow := w.watchControllerInfo() defer infow.stop() var updateChan <-chan time.Time @@ -274,7 +274,7 @@ func (w *pgWorker) updateReplicaset() error { info, err := w.peerGroupInfo() if err != nil { - return err + return errors.Annotate(err, "cannot get peergrouper info") } members, voting, err := desiredPeerGroup(info) if err != nil { @@ -318,7 +318,7 @@ } } if err := setHasVote(added, true); err != nil { - return err + return errors.Annotate(err, "cannot set HasVote added") } if members != nil { if err := w.st.MongoSession().Set(members); err != nil { @@ -332,7 +332,7 @@ logger.Infof("successfully changed replica set to %#v", members) } if err := setHasVote(removed, false); err != nil { - return err + return errors.Annotate(err, "cannot set HasVote removed") } return nil } @@ -365,19 +365,19 @@ return nil } -// serverInfoWatcher watches the state server info and +// serverInfoWatcher watches the controller info and // notifies the worker when it changes. type serverInfoWatcher struct { worker *pgWorker - stateServerWatcher state.NotifyWatcher + controllerWatcher state.NotifyWatcher machineStatusWatcher state.StringsWatcher } -func (w *pgWorker) watchStateServerInfo() *serverInfoWatcher { +func (w *pgWorker) watchControllerInfo() *serverInfoWatcher { infow := &serverInfoWatcher{ worker: w, - stateServerWatcher: w.st.WatchStateServerInfo(), - machineStatusWatcher: w.st.WatchStateServerStatusChanges(), + controllerWatcher: w.st.WatchControllerInfo(), + machineStatusWatcher: w.st.WatchControllerStatusChanges(), } w.start(infow.loop) return infow @@ -391,9 +391,9 @@ return infow.machineStatusWatcher.Err() } infow.worker.notify(infow.updateMachines) - case _, ok := <-infow.stateServerWatcher.Changes(): + case _, ok := <-infow.controllerWatcher.Changes(): if !ok { - return infow.stateServerWatcher.Err() + return infow.controllerWatcher.Err() } infow.worker.notify(infow.updateMachines) case <-infow.worker.tomb.Dying(): @@ -404,18 +404,18 @@ func (infow *serverInfoWatcher) stop() { infow.machineStatusWatcher.Stop() - infow.stateServerWatcher.Stop() + infow.controllerWatcher.Stop() } // updateMachines is a notifyFunc that updates the current -// machines when the state server info has changed. +// machines when the controller info has changed. func (infow *serverInfoWatcher) updateMachines() (bool, error) { - info, err := infow.worker.st.StateServerInfo() + info, err := infow.worker.st.ControllerInfo() if err != nil { - return false, fmt.Errorf("cannot get state server info: %v", err) + return false, fmt.Errorf("cannot get controller info: %v", err) } changed := false - // Stop machine goroutines that no longer correspond to state server + // Stop machine goroutines that no longer correspond to controller // machines. for _, m := range infow.worker.machines { if !inStrings(m.id, info.MachineIds) { @@ -435,9 +435,9 @@ if errors.IsNotFound(err) { // If the machine isn't found, it must have been // removed and will soon enough be removed - // from the state server list. This will probably + // from the controller list. This will probably // never happen, but we'll code defensively anyway. - logger.Warningf("machine %q from state server list not found", id) + logger.Warningf("machine %q from controller list not found", id) continue } return false, fmt.Errorf("cannot get machine %q: %v", id, err) @@ -521,7 +521,7 @@ if errors.IsNotFound(err) { // We want to be robust when the machine // state is out of date with respect to the - // state server info, so if the machine + // controller info, so if the machine // has been removed, just assume that // no change has happened - the machine // loop will be stopped very soon anyway. === modified file 'src/github.com/juju/juju/worker/peergrouper/worker_test.go' --- src/github.com/juju/juju/worker/peergrouper/worker_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/peergrouper/worker_test.go 2016-03-22 15:18:22 +0000 @@ -86,7 +86,7 @@ )) } st.machine("10").SetHasVote(true) - st.setStateServers(ids...) + st.setControllers(ids...) st.session.Set(mkMembers("0v", ipVersion)) st.session.setStatus(mkStatuses("0p", ipVersion)) st.check = checkInvariants @@ -137,7 +137,7 @@ // Add another machine. m13 := st.addMachine("13", false) m13.setStateHostPort(fmt.Sprintf(ipVersion.formatHostPort, 13, mongoPort)) - st.setStateServers("10", "11", "12", "13") + st.setControllers("10", "11", "12", "13") c.Logf("waiting for new member to be added") mustNext(c, memberWatcher) @@ -162,7 +162,7 @@ c.Logf("removing old machine") // Remove the old machine. st.removeMachine("10") - st.setStateServers("11", "12", "13") + st.setControllers("11", "12", "13") // Check that it's removed from the members. c.Logf("waiting for removal") @@ -175,7 +175,7 @@ DoTestForIPv4AndIPv6(func(ipVersion TestIPVersion) { st := NewFakeState() - // Simulate a state where we have four state servers, + // Simulate a state where we have four controllers, // one has gone down, and we're replacing it: // 0 - hasvote true, wantsvote false, down // 1 - hasvote true, wantsvote true @@ -222,7 +222,7 @@ // has-vote status to false and exit. select { case err := <-done: - c.Assert(err, gc.ErrorMatches, `cannot set voting status of "[0-9]+" to false: frood`) + c.Assert(err, gc.ErrorMatches, `cannot set HasVote removed: cannot set voting status of "[0-9]+" to false: frood`) case <-time.After(coretesting.LongWait): c.Fatalf("timed out waiting for worker to exit") } @@ -305,17 +305,17 @@ err error expectErr string }{{ - errPattern: "State.StateServerInfo", - expectErr: "cannot get state server info: sample", + errPattern: "State.ControllerInfo", + expectErr: "cannot get controller info: sample", }, { errPattern: "Machine.SetHasVote 11 true", - expectErr: `cannot set voting status of "11" to true: sample`, + expectErr: `cannot set HasVote added: cannot set voting status of "11" to true: sample`, }, { errPattern: "Session.CurrentStatus", - expectErr: "cannot get replica set status: sample", + expectErr: "cannot get peergrouper info: cannot get replica set status: sample", }, { errPattern: "Session.CurrentMembers", - expectErr: "cannot get replica set members: sample", + expectErr: "cannot get peergrouper info: cannot get replica set members: sample", }, { errPattern: "State.Machine *", expectErr: `cannot get machine "10": sample`, @@ -382,7 +382,7 @@ return f(apiServers, instanceIds) } -func (s *workerSuite) TestStateServersArePublished(c *gc.C) { +func (s *workerSuite) TestControllersArePublished(c *gc.C) { DoTestForIPv4AndIPv6(func(ipVersion TestIPVersion) { publishCh := make(chan [][]network.HostPort) publish := func(apiServers [][]network.HostPort, instanceIds []instance.Id) error { === modified file 'src/github.com/juju/juju/worker/provisioner/container_initialisation.go' --- src/github.com/juju/juju/worker/provisioner/container_initialisation.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/provisioner/container_initialisation.go 2016-03-22 15:18:22 +0000 @@ -15,14 +15,15 @@ "github.com/juju/juju/agent" apiprovisioner "github.com/juju/juju/api/provisioner" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/container" "github.com/juju/juju/container/kvm" "github.com/juju/juju/container/lxc" + "github.com/juju/juju/container/lxd" "github.com/juju/juju/environs" "github.com/juju/juju/instance" "github.com/juju/juju/state" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" ) @@ -65,7 +66,7 @@ // NewContainerSetupHandler returns a StringsWatchHandler which is notified when // containers are created on the given machine. -func NewContainerSetupHandler(params ContainerSetupParams) worker.StringsWatchHandler { +func NewContainerSetupHandler(params ContainerSetupParams) watcher.StringsHandler { return &ContainerSetup{ runner: params.Runner, imageURLGetter: params.ImageURLGetter, @@ -96,7 +97,7 @@ // Handle is called whenever containers change on the machine being watched. // Machines start out with no containers so the first time Handle is called, // it will be because a container has been added. -func (cs *ContainerSetup) Handle(containerIds []string) (resultError error) { +func (cs *ContainerSetup) Handle(_ <-chan struct{}, containerIds []string) (resultError error) { // Consume the initial watcher event. if len(containerIds) == 0 { return nil @@ -308,6 +309,23 @@ logger.Errorf("failed to create new kvm broker") return nil, nil, nil, err } + case instance.LXD: + series, err := cs.machine.Series() + if err != nil { + return nil, nil, nil, err + } + + initialiser = lxd.NewContainerInitialiser(series) + broker, err = NewLxdBroker( + cs.provisioner, + cs.config, + managerConfig, + cs.enableNAT, + ) + if err != nil { + logger.Errorf("failed to create new lxd broker") + return nil, nil, nil, err + } default: return nil, nil, nil, fmt.Errorf("unknown container type: %v", containerType) } @@ -324,11 +342,6 @@ managerConfigResult, err := provisioner.ContainerManagerConfig( params.ContainerManagerConfigParams{Type: containerType}, ) - if params.IsCodeNotImplemented(err) { - // We currently don't support upgrading; - // revert to the old configuration. - managerConfigResult.ManagerConfig = container.ManagerConfig{container.ConfigName: container.DefaultNamespace} - } if err != nil { return nil, err } @@ -369,7 +382,11 @@ // already been added to the machine. It will see that the // container does not have an instance yet and create one. return runner.StartWorker(workerName, func() (worker.Worker, error) { - return NewContainerProvisioner(containerType, provisioner, config, broker, toolsFinder), nil + w, err := NewContainerProvisioner(containerType, provisioner, config, broker, toolsFinder) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil }) } === modified file 'src/github.com/juju/juju/worker/provisioner/container_initialisation_test.go' --- src/github.com/juju/juju/worker/provisioner/container_initialisation_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/provisioner/container_initialisation_test.go 2016-03-22 15:18:22 +0000 @@ -10,13 +10,17 @@ "os/exec" "path/filepath" "runtime" + "sync/atomic" "github.com/juju/names" "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/arch" "github.com/juju/utils/featureflag" "github.com/juju/utils/fslock" + jujuos "github.com/juju/utils/os" "github.com/juju/utils/packaging/manager" + "github.com/juju/utils/series" gc "gopkg.in/check.v1" "github.com/juju/juju/agent" @@ -26,13 +30,13 @@ "github.com/juju/juju/environs" "github.com/juju/juju/feature" "github.com/juju/juju/instance" - "github.com/juju/juju/juju/arch" "github.com/juju/juju/juju/osenv" "github.com/juju/juju/provider/dummy" "github.com/juju/juju/state" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/tools" "github.com/juju/juju/version" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" "github.com/juju/juju/worker/provisioner" ) @@ -77,11 +81,13 @@ // Set up provisioner for the state machine. s.agentConfig = s.AgentConfigForTag(c, names.NewMachineTag("0")) - s.p = provisioner.NewEnvironProvisioner(s.provisioner, s.agentConfig) + var err error + s.p, err = provisioner.NewEnvironProvisioner(s.provisioner, s.agentConfig) + c.Assert(err, jc.ErrorIsNil) // Create a new container initialisation lock. s.initLockDir = c.MkDir() - initLock, err := fslock.NewLock(s.initLockDir, "container-init") + initLock, err := fslock.NewLock(s.initLockDir, "container-init", fslock.Defaults()) c.Assert(err, jc.ErrorIsNil) s.initLock = initLock @@ -91,13 +97,15 @@ } func (s *ContainerSetupSuite) TearDownTest(c *gc.C) { - stop(c, s.p) + if s.p != nil { + stop(c, s.p) + } s.CommonProvisionerSuite.TearDownTest(c) } -func (s *ContainerSetupSuite) setupContainerWorker(c *gc.C, tag names.MachineTag) (worker.StringsWatchHandler, worker.Runner) { +func (s *ContainerSetupSuite) setupContainerWorker(c *gc.C, tag names.MachineTag) (watcher.StringsHandler, worker.Runner) { testing.PatchExecutable(c, s, "ubuntu-cloudimg-query", containertesting.FakeLxcURLScript) - runner := worker.NewRunner(allFatal, noImportance) + runner := worker.NewRunner(allFatal, noImportance, worker.RestartDelay) pr := s.st.Provisioner() machine, err := pr.Machine(tag) c.Assert(err, jc.ErrorIsNil) @@ -118,7 +126,9 @@ } handler := provisioner.NewContainerSetupHandler(params) runner.StartWorker(watcherName, func() (worker.Worker, error) { - return worker.NewStringsWorker(handler), nil + return watcher.NewStringsWorker(watcher.StringsConfig{ + Handler: handler, + }) }) return handler, runner } @@ -150,13 +160,13 @@ c *gc.C, host *state.Machine, ctype instance.ContainerType) { // A stub worker callback to record what happens. - provisionerStarted := false + var provisionerStarted uint32 startProvisionerWorker := func(runner worker.Runner, containerType instance.ContainerType, pr *apiprovisioner.State, cfg agent.Config, broker environs.InstanceBroker, toolsFinder provisioner.ToolsFinder) error { c.Assert(containerType, gc.Equals, ctype) c.Assert(cfg.Tag(), gc.Equals, host.Tag()) - provisionerStarted = true + atomic.StoreUint32(&provisionerStarted, 1) return nil } s.PatchValue(&provisioner.StartProvisioner, startProvisionerWorker) @@ -166,11 +176,13 @@ <-s.aptCmdChan // the container worker should have created the provisioner - c.Assert(provisionerStarted, jc.IsTrue) + c.Assert(atomic.LoadUint32(&provisionerStarted) > 0, jc.IsTrue) } func (s *ContainerSetupSuite) TestContainerProvisionerStarted(c *gc.C) { - for _, ctype := range instance.ContainerTypes { + // Specifically ignore LXD here, if present in instance.ContainerTypes. + containerTypes := []instance.ContainerType{instance.LXC, instance.KVM} + for _, ctype := range containerTypes { // create a machine to host the container. m, err := s.BackingState.AddOneMachine(state.MachineTemplate{ Series: coretesting.FakeDefaultSeries, @@ -178,9 +190,14 @@ Constraints: s.defaultConstraints, }) c.Assert(err, jc.ErrorIsNil) - err = m.SetSupportedContainers([]instance.ContainerType{instance.LXC, instance.KVM}) + err = m.SetSupportedContainers(containerTypes) c.Assert(err, jc.ErrorIsNil) - err = m.SetAgentVersion(version.Current) + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + err = m.SetAgentVersion(current) c.Assert(err, jc.ErrorIsNil) s.assertContainerProvisionerStarted(c, m, ctype) } @@ -201,16 +218,16 @@ } func (s *ContainerSetupSuite) testContainerConstraintsArch(c *gc.C, containerType instance.ContainerType, expectArch string) { - var called bool + var called uint32 s.PatchValue(provisioner.GetToolsFinder, func(*apiprovisioner.State) provisioner.ToolsFinder { - return toolsFinderFunc(func(v version.Number, series string, arch *string) (tools.List, error) { - called = true - c.Assert(arch, gc.NotNil) - c.Assert(*arch, gc.Equals, expectArch) - result := version.Current - result.Number = v - result.Series = series - result.Arch = *arch + return toolsFinderFunc(func(v version.Number, series string, arch string) (tools.List, error) { + atomic.StoreUint32(&called, 1) + c.Assert(arch, gc.Equals, expectArch) + result := version.Binary{ + Number: v, + Arch: arch, + Series: series, + } return tools.List{{Version: result}}, nil }) }) @@ -218,8 +235,7 @@ s.PatchValue(&provisioner.StartProvisioner, func(runner worker.Runner, containerType instance.ContainerType, pr *apiprovisioner.State, cfg agent.Config, broker environs.InstanceBroker, toolsFinder provisioner.ToolsFinder) error { - amd64 := arch.AMD64 - toolsFinder.FindTools(version.Current.Number, version.Current.Series, &amd64) + toolsFinder.FindTools(version.Current, series.HostSeries(), arch.AMD64) return nil }) @@ -232,12 +248,17 @@ c.Assert(err, jc.ErrorIsNil) err = m.SetSupportedContainers([]instance.ContainerType{containerType}) c.Assert(err, jc.ErrorIsNil) - err = m.SetAgentVersion(version.Current) + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + err = m.SetAgentVersion(current) c.Assert(err, jc.ErrorIsNil) s.createContainer(c, m, containerType) <-s.aptCmdChan - c.Assert(called, jc.IsTrue) + c.Assert(atomic.LoadUint32(&called) > 0, jc.IsTrue) } func (s *ContainerSetupSuite) TestLxcContainerUsesImageURL(c *gc.C) { @@ -250,7 +271,12 @@ c.Assert(err, jc.ErrorIsNil) err = m.SetSupportedContainers([]instance.ContainerType{instance.LXC, instance.KVM}) c.Assert(err, jc.ErrorIsNil) - err = m.SetAgentVersion(version.Current) + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + err = m.SetAgentVersion(current) c.Assert(err, jc.ErrorIsNil) brokerCalled := false @@ -280,7 +306,12 @@ expect("any-old-thing") } -func (s *ContainerSetupSuite) assertContainerInitialised(c *gc.C, ctype instance.ContainerType, packages [][]string, addressable bool) { +type ContainerInstance struct { + ctype instance.ContainerType + packages [][]string +} + +func (s *ContainerSetupSuite) assertContainerInitialised(c *gc.C, cont ContainerInstance, addressable bool) { // A noop worker callback. startProvisionerWorker := func(runner worker.Runner, containerType instance.ContainerType, pr *apiprovisioner.State, cfg agent.Config, broker environs.InstanceBroker, @@ -289,29 +320,52 @@ } s.PatchValue(&provisioner.StartProvisioner, startProvisionerWorker) + current_os, err := series.GetOSFromSeries(series.HostSeries()) + c.Assert(err, jc.ErrorIsNil) + + var ser string + var expected_initial []string + switch current_os { + case jujuos.CentOS: + ser = "centos7" + expected_initial = []string{ + "yum", "--assumeyes", "--debuglevel=1", "install"} + default: + ser = "precise" + expected_initial = []string{ + "apt-get", "--option=Dpkg::Options::=--force-confold", + "--option=Dpkg::options::=--force-unsafe-io", "--assume-yes", "--quiet", + "install"} + } + // create a machine to host the container. m, err := s.BackingState.AddOneMachine(state.MachineTemplate{ - Series: "precise", // precise requires special apt parameters, so we use that series here. + Series: ser, // precise requires special apt parameters, so we use that series here. Jobs: []state.MachineJob{state.JobHostUnits}, Constraints: s.defaultConstraints, }) c.Assert(err, jc.ErrorIsNil) err = m.SetSupportedContainers([]instance.ContainerType{instance.LXC, instance.KVM}) c.Assert(err, jc.ErrorIsNil) - err = m.SetAgentVersion(version.Current) + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + err = m.SetAgentVersion(current) c.Assert(err, jc.ErrorIsNil) // Before starting /etc/default/lxc-net should be missing. c.Assert(s.fakeLXCNet, jc.DoesNotExist) - s.createContainer(c, m, ctype) + s.createContainer(c, m, cont.ctype) // Only feature-flagged addressable containers modify lxc-net. if addressable { // After initialisation starts, but before running the - // initializer, lxc-net should be created if ctype is LXC, as the + // initializer, lxc-net should be created if cont.ctype is LXC, as the // dummy provider supports static address allocation by default. - if ctype == instance.LXC { + if cont.ctype == instance.LXC { AssertFileContains(c, s.fakeLXCNet, provisioner.EtcDefaultLXCNet) defer os.Remove(s.fakeLXCNet) } else { @@ -319,30 +373,20 @@ } } - for _, pack := range packages { + for _, pack := range cont.packages { cmd := <-s.aptCmdChan - expected := []string{ - "apt-get", "--option=Dpkg::Options::=--force-confold", - "--option=Dpkg::options::=--force-unsafe-io", "--assume-yes", "--quiet", - "install"} - expected = append(expected, pack...) + + expected := append(expected_initial, pack...) c.Assert(cmd.Args, gc.DeepEquals, expected) } } func (s *ContainerSetupSuite) TestContainerInitialised(c *gc.C) { - for _, test := range []struct { - ctype instance.ContainerType - packages [][]string - }{ - {instance.LXC, [][]string{ - []string{"--target-release", "precise-updates/cloud-tools", "lxc"}, - []string{"--target-release", "precise-updates/cloud-tools", "cloud-image-utils"}}}, - {instance.KVM, [][]string{ - []string{"uvtool-libvirt"}, - []string{"uvtool"}}}, - } { - s.assertContainerInitialised(c, test.ctype, test.packages, false) + cont, err := getContainerInstance() + c.Assert(err, jc.ErrorIsNil) + + for _, test := range cont { + s.assertContainerInitialised(c, test, false) } } @@ -353,7 +397,12 @@ Constraints: s.defaultConstraints, }) c.Assert(err, jc.ErrorIsNil) - err = m.SetAgentVersion(version.Current) + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + err = m.SetAgentVersion(current) c.Assert(err, jc.ErrorIsNil) err = os.RemoveAll(s.initLockDir) @@ -365,7 +414,7 @@ _, err = handler.SetUp() c.Assert(err, jc.ErrorIsNil) - err = handler.Handle([]string{"0/lxc/0"}) + err = handler.Handle(nil, []string{"0/lxc/0"}) c.Assert(err, gc.ErrorMatches, ".*failed to acquire initialization lock:.*") } @@ -481,9 +530,9 @@ c.Assert(err, jc.Satisfies, os.IsNotExist) } -type toolsFinderFunc func(v version.Number, series string, arch *string) (tools.List, error) +type toolsFinderFunc func(v version.Number, series string, arch string) (tools.List, error) -func (t toolsFinderFunc) FindTools(v version.Number, series string, arch *string) (tools.List, error) { +func (t toolsFinderFunc) FindTools(v version.Number, series string, arch string) (tools.List, error) { return t(v, series, arch) } @@ -501,16 +550,47 @@ } func (s *AddressableContainerSetupSuite) TestContainerInitialised(c *gc.C) { - for _, test := range []struct { - ctype instance.ContainerType - packages [][]string - }{ - {instance.LXC, [][]string{{"--target-release", "precise-updates/cloud-tools", "lxc"}, {"--target-release", "precise-updates/cloud-tools", "cloud-image-utils"}}}, - {instance.KVM, [][]string{{"uvtool-libvirt"}, {"uvtool"}}}, - } { + cont, err := getContainerInstance() + c.Assert(err, jc.ErrorIsNil) + + for _, test := range cont { s.enableFeatureFlag() - s.assertContainerInitialised(c, test.ctype, test.packages, true) - } + s.assertContainerInitialised(c, test, true) + } +} + +func getContainerInstance() (cont []ContainerInstance, err error) { + current_os, err := series.GetOSFromSeries(series.HostSeries()) + if err != nil { + return nil, err + } + + switch current_os { + case jujuos.CentOS: + cont = []ContainerInstance{ + {instance.LXC, [][]string{ + {"lxc"}, + {"cloud-image-utils"}, + }}, + {instance.KVM, [][]string{ + {"uvtool-libvirt"}, + {"uvtool"}, + }}, + } + default: + cont = []ContainerInstance{ + {instance.LXC, [][]string{ + {"--target-release", "precise-updates/cloud-tools", "lxc"}, + {"--target-release", "precise-updates/cloud-tools", "cloud-image-utils"}, + }}, + {instance.KVM, [][]string{ + {"uvtool-libvirt"}, + {"uvtool"}, + }}, + } + } + + return cont, nil } // LXCDefaultMTUSuite only contains tests depending on the @@ -539,7 +619,12 @@ c.Assert(err, jc.ErrorIsNil) err = m.SetSupportedContainers([]instance.ContainerType{instance.LXC, instance.KVM}) c.Assert(err, jc.ErrorIsNil) - err = m.SetAgentVersion(version.Current) + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + err = m.SetAgentVersion(current) c.Assert(err, jc.ErrorIsNil) brokerCalled := false === modified file 'src/github.com/juju/juju/worker/provisioner/export_test.go' --- src/github.com/juju/juju/worker/provisioner/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/provisioner/export_test.go 2016-03-22 15:18:22 +0000 @@ -6,9 +6,9 @@ import ( "reflect" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/environs/config" "github.com/juju/juju/network" + "github.com/juju/juju/watcher" ) func SetObserver(p Provisioner, observer chan<- *config.Config) { @@ -29,22 +29,25 @@ } var ( - ContainerManagerConfig = containerManagerConfig - GetToolsFinder = &getToolsFinder - SysctlConfig = &sysctlConfig - ResolvConf = &resolvConf - LocalDNSServers = localDNSServers - MustParseTemplate = mustParseTemplate - RunTemplateCommand = runTemplateCommand - IptablesRules = &iptablesRules - NetInterfaces = &netInterfaces - InterfaceAddrs = &interfaceAddrs - DiscoverPrimaryNIC = discoverPrimaryNIC - ConfigureContainerNetwork = configureContainerNetwork - MaybeOverrideDefaultLXCNet = maybeOverrideDefaultLXCNet - EtcDefaultLXCNetPath = &etcDefaultLXCNetPath - EtcDefaultLXCNet = etcDefaultLXCNet - MaxInstanceRetryDelay = &maxInstanceRetryDelay + ContainerManagerConfig = containerManagerConfig + GetToolsFinder = &getToolsFinder + SysctlConfig = &sysctlConfig + ResolvConf = &resolvConf + LocalDNSServers = localDNSServers + MustParseTemplate = mustParseTemplate + RunTemplateCommand = runTemplateCommand + IptablesRules = &iptablesRules + NetInterfaceByName = &netInterfaceByName + NetInterfaces = &netInterfaces + InterfaceAddrs = &interfaceAddrs + DiscoverPrimaryNIC = discoverPrimaryNIC + DiscoverIPv4InterfaceAddress = discoverIPv4InterfaceAddress + ConfigureContainerNetwork = configureContainerNetwork + MaybeOverrideDefaultLXCNet = maybeOverrideDefaultLXCNet + EtcDefaultLXCNetPath = &etcDefaultLXCNetPath + EtcDefaultLXCNet = etcDefaultLXCNet + RetryStrategyDelay = &retryStrategyDelay + RetryStrategyCount = &retryStrategyCount ) const ( === modified file 'src/github.com/juju/juju/worker/provisioner/kvm-broker.go' --- src/github.com/juju/juju/worker/provisioner/kvm-broker.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/provisioner/kvm-broker.go 2016-03-22 15:18:22 +0000 @@ -25,12 +25,14 @@ managerConfig container.ManagerConfig, enableNAT bool, ) (environs.InstanceBroker, error) { + namespace := maybeGetManagerConfigNamespaces(managerConfig) manager, err := kvm.NewContainerManager(managerConfig) if err != nil { return nil, err } return &kvmBroker{ manager: manager, + namespace: namespace, api: api, agentConfig: agentConfig, enableNAT: enableNAT, @@ -39,6 +41,7 @@ type kvmBroker struct { manager container.Manager + namespace string api APICalls agentConfig agent.Config enableNAT bool @@ -60,29 +63,22 @@ if bridgeDevice == "" { bridgeDevice = kvm.DefaultKvmBridge } - if !environs.AddressAllocationEnabled() { - logger.Debugf( - "address allocation feature flag not enabled; using DHCP for container %q", - machineId, - ) + + preparedInfo, err := prepareOrGetContainerInterfaceInfo( + broker.api, + machineId, + bridgeDevice, + true, // allocate if possible, do not maintain existing. + broker.enableNAT, + args.NetworkInfo, + kvmLogger, + ) + if err != nil { + // It's not fatal (yet) if we couldn't pre-allocate addresses for the + // container. + logger.Warningf("failed to prepare container %q network config: %v", machineId, err) } else { - logger.Debugf("trying to allocate static IP for container %q", machineId) - - allocatedInfo, err := configureContainerNetwork( - machineId, - bridgeDevice, - broker.api, - args.NetworkInfo, - true, // allocate a new address. - broker.enableNAT, - ) - if err != nil { - // It's fine, just ignore it. The effect will be that the - // container won't have a static address configured. - logger.Infof("not allocating static IP for container %q: %v", machineId, err) - } else { - args.NetworkInfo = allocatedInfo - } + args.NetworkInfo = preparedInfo } // Unlike with LXC, we don't override the default MTU to use. @@ -130,6 +126,33 @@ }, nil } +// MaintainInstance ensures the container's host has the required iptables and +// routing rules to make the container visible to both the host and other +// machines on the same subnet. This is important mostly when address allocation +// feature flag is enabled, as otherwise we don't create additional iptables +// rules or routes. +func (broker *kvmBroker) MaintainInstance(args environs.StartInstanceParams) error { + machineID := args.InstanceConfig.MachineId + + // Default to using the host network until we can configure. + bridgeDevice := broker.agentConfig.Value(agent.LxcBridge) + if bridgeDevice == "" { + bridgeDevice = kvm.DefaultKvmBridge + } + + // There's no InterfaceInfo we expect to get below. + _, err := prepareOrGetContainerInterfaceInfo( + broker.api, + machineID, + bridgeDevice, + false, // maintain, do not allocate. + broker.enableNAT, + args.NetworkInfo, + kvmLogger, + ) + return err +} + // StopInstances shuts down the given instances. func (broker *kvmBroker) StopInstances(ids ...instance.Id) error { // TODO: potentially parallelise. @@ -139,6 +162,7 @@ kvmLogger.Errorf("container did not stop: %v", err) return err } + maybeReleaseContainerAddresses(broker.api, id, broker.namespace, kvmLogger) } return nil } @@ -147,31 +171,3 @@ func (broker *kvmBroker) AllInstances() (result []instance.Instance, err error) { return broker.manager.ListContainers() } - -// MaintainInstance checks that the container's host has the required iptables and routing -// rules to make the container visible to both the host and other machines on the same subnet. -func (broker *kvmBroker) MaintainInstance(args environs.StartInstanceParams) error { - machineId := args.InstanceConfig.MachineId - if !environs.AddressAllocationEnabled() { - kvmLogger.Debugf("address allocation disabled: Not running maintenance for kvm with machineId: %s", - machineId) - return nil - } - - kvmLogger.Debugf("running maintenance for kvm with machineId: %s", machineId) - - // Default to using the host network until we can configure. - bridgeDevice := broker.agentConfig.Value(agent.LxcBridge) - if bridgeDevice == "" { - bridgeDevice = kvm.DefaultKvmBridge - } - _, err := configureContainerNetwork( - machineId, - bridgeDevice, - broker.api, - args.NetworkInfo, - false, // don't allocate a new address. - broker.enableNAT, - ) - return err -} === modified file 'src/github.com/juju/juju/worker/provisioner/kvm-broker_test.go' --- src/github.com/juju/juju/worker/provisioner/kvm-broker_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/provisioner/kvm-broker_test.go 2016-03-22 15:18:22 +0000 @@ -15,6 +15,7 @@ "github.com/juju/names" gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/arch" gc "gopkg.in/check.v1" "github.com/juju/juju/agent" @@ -27,7 +28,6 @@ "github.com/juju/juju/feature" "github.com/juju/juju/instance" instancetest "github.com/juju/juju/instance/testing" - "github.com/juju/juju/juju/arch" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/network" "github.com/juju/juju/state" @@ -82,14 +82,14 @@ var err error s.agentConfig, err = agent.NewAgentConfig( agent.AgentConfigParams{ - DataDir: "/not/used/here", + Paths: agent.NewPathsWithDefaults(agent.Paths{DataDir: "/not/used/here"}), Tag: names.NewUnitTag("ubuntu/1"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, Password: "dummy-secret", Nonce: "nonce", APIAddresses: []string{"10.0.0.1:1234"}, CACert: coretesting.CACert, - Environment: coretesting.EnvironmentTag, + Model: coretesting.ModelTag, }) c.Assert(err, jc.ErrorIsNil) s.api = NewFakeAPI() @@ -162,6 +162,9 @@ machineId := "1/kvm/0" kvm := s.startInstance(c, machineId) s.api.CheckCalls(c, []gitjujutesting.StubCall{{ + FuncName: "PrepareContainerInterfaceInfo", + Args: []interface{}{names.NewMachineTag("1-kvm-0")}, + }, { FuncName: "ContainerConfig", }}) c.Assert(kvm.Id(), gc.Equals, instance.Id("juju-machine-1-kvm-0")) @@ -359,12 +362,14 @@ broker, err := provisioner.NewKvmBroker(s.provisioner, agentConfig, managerConfig, false) c.Assert(err, jc.ErrorIsNil) toolsFinder := (*provisioner.GetToolsFinder)(s.provisioner) - return provisioner.NewContainerProvisioner(instance.KVM, s.provisioner, agentConfig, broker, toolsFinder) + w, err := provisioner.NewContainerProvisioner(instance.KVM, s.provisioner, agentConfig, broker, toolsFinder) + c.Assert(err, jc.ErrorIsNil) + return w } func (s *kvmProvisionerSuite) TestProvisionerStartStop(c *gc.C) { p := s.newKvmProvisioner(c) - c.Assert(p.Stop(), gc.IsNil) + stop(c, p) } func (s *kvmProvisionerSuite) TestDoesNotStartEnvironMachines(c *gc.C) { === added file 'src/github.com/juju/juju/worker/provisioner/logging.go' --- src/github.com/juju/juju/worker/provisioner/logging.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/provisioner/logging.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package provisioner + +import ( + "github.com/juju/errors" + "github.com/juju/utils/featureflag" + + "github.com/juju/juju/feature" +) + +// loggedErrorStack is a developer helper function that will cause the error +// stack of the error to be printed out at error severity if and only if the +// "log-error-stack" feature flag has been specified. The passed in error +// is also the return value of this function. +func loggedErrorStack(err error) error { + if featureflag.Enabled(feature.LogErrorStack) { + logger.Errorf("error stack:\n%s", errors.ErrorStack(err)) + } + return err +} === added file 'src/github.com/juju/juju/worker/provisioner/logging_test.go' --- src/github.com/juju/juju/worker/provisioner/logging_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/provisioner/logging_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package provisioner + +import ( + "errors" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/feature" + jujutesting "github.com/juju/juju/testing" +) + +type logSuite struct { + testing.LoggingSuite + jujutesting.JujuOSEnvSuite +} + +func (l *logSuite) SetUpTest(c *gc.C) { + l.LoggingSuite.SetUpTest(c) + l.JujuOSEnvSuite.SetUpTest(c) +} + +var _ = gc.Suite(&logSuite{}) + +func (*logSuite) TestFlagNotSet(c *gc.C) { + err := errors.New("test error") + err2 := loggedErrorStack(err) + c.Assert(err, gc.Equals, err2) + c.Assert(c.GetTestLog(), gc.Equals, "") +} + +func (s *logSuite) TestFlagSet(c *gc.C) { + s.SetFeatureFlags(feature.LogErrorStack) + err := errors.New("test error") + err2 := loggedErrorStack(err) + c.Assert(err, gc.Equals, err2) + expected := "ERROR juju.provisioner error stack:\ntest error" + c.Assert(c.GetTestLog(), jc.Contains, expected) +} === modified file 'src/github.com/juju/juju/worker/provisioner/lxc-broker.go' --- src/github.com/juju/juju/worker/provisioner/lxc-broker.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/provisioner/lxc-broker.go 2016-03-22 15:18:22 +0000 @@ -15,7 +15,9 @@ "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" + "github.com/juju/utils/arch" "github.com/juju/utils/exec" + "github.com/juju/utils/set" "github.com/juju/juju/agent" apiprovisioner "github.com/juju/juju/api/provisioner" @@ -25,7 +27,6 @@ "github.com/juju/juju/container/lxc" "github.com/juju/juju/environs" "github.com/juju/juju/instance" - "github.com/juju/juju/juju/arch" "github.com/juju/juju/network" "github.com/juju/juju/storage/looputil" "github.com/juju/juju/tools" @@ -40,6 +41,7 @@ ContainerConfig() (params.ContainerConfig, error) PrepareContainerInterfaceInfo(names.MachineTag) ([]network.InterfaceInfo, error) GetContainerInterfaceInfo(names.MachineTag) ([]network.InterfaceInfo, error) + ReleaseContainerAddresses(names.MachineTag) error } var _ APICalls = (*apiprovisioner.State)(nil) @@ -55,6 +57,7 @@ enableNAT bool, defaultMTU int, ) (environs.InstanceBroker, error) { + namespace := maybeGetManagerConfigNamespaces(managerConfig) manager, err := lxc.NewContainerManager( managerConfig, imageURLGetter, looputil.NewLoopDeviceManager(), ) @@ -63,6 +66,7 @@ } return &lxcBroker{ manager: manager, + namespace: namespace, api: api, agentConfig: agentConfig, enableNAT: enableNAT, @@ -72,6 +76,7 @@ type lxcBroker struct { manager container.Manager + namespace string api APICalls agentConfig agent.Config enableNAT bool @@ -93,28 +98,22 @@ bridgeDevice = lxc.DefaultLxcBridge } - if !environs.AddressAllocationEnabled() { - logger.Debugf( - "address allocation feature flag not enabled; using DHCP for container %q", - machineId, - ) + preparedInfo, err := prepareOrGetContainerInterfaceInfo( + broker.api, + machineId, + bridgeDevice, + true, // allocate if possible, do not maintain existing. + broker.enableNAT, + args.NetworkInfo, + lxcLogger, + ) + if err != nil { + // It's not fatal (yet) if we couldn't pre-allocate addresses for the + // container. + logger.Warningf("failed to prepare container %q network config: %v", machineId, err) } else { - logger.Debugf("trying to allocate static IP for container %q", machineId) - allocatedInfo, err := configureContainerNetwork( - machineId, - bridgeDevice, - broker.api, - args.NetworkInfo, - true, // allocate a new address. - broker.enableNAT, - ) - if err != nil { - // It's fine, just ignore it. The effect will be that the - // container won't have a static address configured. - logger.Infof("not allocating static IP for container %q: %v", machineId, err) - } else { - args.NetworkInfo = allocatedInfo - } + args.NetworkInfo = preparedInfo + } network := container.BridgeNetworkConfig(bridgeDevice, broker.defaultMTU, args.NetworkInfo) @@ -176,6 +175,33 @@ }, nil } +// MaintainInstance ensures the container's host has the required iptables and +// routing rules to make the container visible to both the host and other +// machines on the same subnet. This is important mostly when address allocation +// feature flag is enabled, as otherwise we don't create additional iptables +// rules or routes. +func (broker *lxcBroker) MaintainInstance(args environs.StartInstanceParams) error { + machineID := args.InstanceConfig.MachineId + + // Default to using the host network until we can configure. + bridgeDevice := broker.agentConfig.Value(agent.LxcBridge) + if bridgeDevice == "" { + bridgeDevice = lxc.DefaultLxcBridge + } + + // There's no InterfaceInfo we expect to get below. + _, err := prepareOrGetContainerInterfaceInfo( + broker.api, + machineID, + bridgeDevice, + false, // maintain, do not allocate. + broker.enableNAT, + args.NetworkInfo, + lxcLogger, + ) + return err +} + // StopInstances shuts down the given instances. func (broker *lxcBroker) StopInstances(ids ...instance.Id) error { // TODO: potentially parallelise. @@ -185,6 +211,7 @@ lxcLogger.Errorf("container did not stop: %v", err) return err } + maybeReleaseContainerAddresses(broker.api, id, broker.namespace, lxcLogger) } return nil } @@ -199,10 +226,9 @@ } // FindTools is defined on the ToolsFinder interface. -func (h hostArchToolsFinder) FindTools(v version.Number, series string, _ *string) (tools.List, error) { +func (h hostArchToolsFinder) FindTools(v version.Number, series, _ string) (tools.List, error) { // Override the arch constraint with the arch of the host. - arch := arch.HostArch() - return h.f.FindTools(v, series, &arch) + return h.f.FindTools(v, series, arch.HostArch()) } // resolvConf is the full path to the resolv.conf file on the local @@ -462,13 +488,55 @@ } var ( - netInterfaces = net.Interfaces - interfaceAddrs = (*net.Interface).Addrs + netInterfaceByName = net.InterfaceByName + netInterfaces = net.Interfaces + interfaceAddrs = (*net.Interface).Addrs ) -// discoverPrimaryNIC returns the name of the first network interface -// on the machine which is up and has address, along with the first -// address it has. +// discoverIPv4InterfaceAddress returns the address for ifaceName +// (e.g., br-eth1). This method is a stop-gap measure to unblock +// master CI failures and will be removed once multi-NIC container +// support is landed from the maas-spaces2 feature branch. +func discoverIPv4InterfaceAddress(ifaceName string) (*network.Address, error) { + iface, err := netInterfaceByName(ifaceName) + if err != nil { + return nil, errors.Annotatef(err, "cannot get interface %q", ifaceName) + } + + addrs, err := interfaceAddrs(iface) + + if err != nil { + return nil, errors.Annotatef(err, "cannot get network addresses for interface %q", ifaceName) + } + + for _, addr := range addrs { + // Check if it's an IP or a CIDR. + + ip := net.ParseIP(addr.String()) + + if ip != nil && ip.To4() == nil { + logger.Debugf("skipping IPv6 address: %q", ip) + continue + } + + if ip == nil { + // Try a CIDR. + ip, _, err = net.ParseCIDR(addr.String()) + if ip != nil && ip.To4() == nil { + logger.Debugf("skipping IPv6 address: %q", ip) + continue + } + if err != nil { + return nil, errors.Annotatef(err, "cannot parse address %q", addr) + } + } + logger.Tracef("network interface %q has address %q", ifaceName, ip) + addr := network.NewAddress(ip.String()) + return &addr, nil + } + return nil, errors.Errorf("no addresses found for %q", ifaceName) +} + func discoverPrimaryNIC() (string, network.Address, error) { interfaces, err := netInterfaces() if err != nil { @@ -596,30 +664,133 @@ return finalIfaceInfo, nil } -// MaintainInstance checks that the container's host has the required iptables and routing -// rules to make the container visible to both the host and other machines on the same subnet. -func (broker *lxcBroker) MaintainInstance(args environs.StartInstanceParams) error { - machineId := args.InstanceConfig.MachineId - if !environs.AddressAllocationEnabled() { - lxcLogger.Debugf("address allocation disabled: Not running maintenance for lxc container with machineId: %s", - machineId) - return nil - } - - lxcLogger.Debugf("running maintenance for lxc container with machineId: %s", machineId) - - // Default to using the host network until we can configure. - bridgeDevice := broker.agentConfig.Value(agent.LxcBridge) - if bridgeDevice == "" { - bridgeDevice = lxc.DefaultLxcBridge - } - _, err := configureContainerNetwork( - machineId, - bridgeDevice, - broker.api, - args.NetworkInfo, - false, // don't allocate a new address. - broker.enableNAT, +func maybeGetManagerConfigNamespaces(managerConfig container.ManagerConfig) string { + if len(managerConfig) == 0 { + return "" + } + if namespace, ok := managerConfig[container.ConfigName]; ok { + return namespace + } + return "" +} + +func prepareOrGetContainerInterfaceInfo( + api APICalls, + machineID string, + bridgeDevice string, + allocateOrMaintain bool, + enableNAT bool, + startingNetworkInfo []network.InterfaceInfo, + log loggo.Logger, +) ([]network.InterfaceInfo, error) { + maintain := !allocateOrMaintain + + if environs.AddressAllocationEnabled() { + if maintain { + log.Debugf("running maintenance for container %q", machineID) + } else { + log.Debugf("trying to allocate static IP for container %q", machineID) + } + + allocatedInfo, err := configureContainerNetwork( + machineID, + bridgeDevice, + api, + startingNetworkInfo, + allocateOrMaintain, + enableNAT, + ) + if err != nil && !maintain { + log.Infof("not allocating static IP for container %q: %v", machineID, err) + } + return allocatedInfo, err + } + + if maintain { + log.Debugf("address allocation disabled: Not running maintenance for machine %q", machineID) + return nil, nil + } + + log.Debugf("address allocation feature flag not enabled; using DHCP for container %q", machineID) + + // In case we're running on MAAS 1.8+ with devices support, we'll still + // call PrepareContainerInterfaceInfo(), but we'll ignore a NotSupported + // error if we get it (which means we're not using MAAS 1.8+). + containerTag := names.NewMachineTag(machineID) + preparedInfo, err := api.PrepareContainerInterfaceInfo(containerTag) + if err != nil && errors.IsNotSupported(err) { + log.Warningf("new container %q not registered as device: not running on MAAS 1.8+", machineID) + return nil, nil + } else if err != nil { + return nil, errors.Trace(err) + } + + dnsServers, searchDomain, dnsErr := localDNSServers() + + if dnsErr != nil { + return nil, errors.Trace(dnsErr) + } + + bridgeDeviceAddress, err := discoverIPv4InterfaceAddress(bridgeDevice) + + if err != nil { + return nil, errors.Trace(err) + } + + for i, _ := range preparedInfo { + preparedInfo[i].DNSServers = dnsServers + preparedInfo[i].DNSSearch = searchDomain + if preparedInfo[i].GatewayAddress.Value == "" { + preparedInfo[i].GatewayAddress = *bridgeDeviceAddress + } + } + + log.Tracef("PrepareContainerInterfaceInfo returned %#v", preparedInfo) + // Most likely there will be only one item in the list, but check + // all of them for forward compatibility. + macAddresses := set.NewStrings() + for _, prepInfo := range preparedInfo { + macAddresses.Add(prepInfo.MACAddress) + } + log.Infof( + "new container %q registered as a MAAS device with MAC address(es) %v", + machineID, macAddresses.SortedValues(), ) - return err + return preparedInfo, nil +} + +func maybeReleaseContainerAddresses( + api APICalls, + instanceID instance.Id, + namespace string, + log loggo.Logger, +) { + if environs.AddressAllocationEnabled() { + // The addresser worker will take care of the addresses. + return + } + // If we're not using addressable containers, we might still have used MAAS + // 1.8+ device to register the container when provisioning. In that case we + // need to attempt releasing the device, but ignore a NotSupported error + // (when we're not using MAAS 1.8+). + namespacePrefix := fmt.Sprintf("%s-", namespace) + tagString := strings.TrimPrefix(string(instanceID), namespacePrefix) + containerTag, err := names.ParseMachineTag(tagString) + if err != nil { + // Not a reason to cause StopInstances to fail though.. + log.Warningf("unexpected container tag %q: %v", instanceID, err) + return + } + err = api.ReleaseContainerAddresses(containerTag) + switch { + case err == nil: + log.Infof("released all addresses for container %q", containerTag.Id()) + case errors.IsNotSupported(err): + log.Warningf("not releasing all addresses for container %q: %v", containerTag.Id(), err) + default: + log.Warningf( + "unexpected error trying to release container %q addreses: %v", + containerTag.Id(), err, + ) + } } === modified file 'src/github.com/juju/juju/worker/provisioner/lxc-broker_test.go' --- src/github.com/juju/juju/worker/provisioner/lxc-broker_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/provisioner/lxc-broker_test.go 2016-03-22 15:18:22 +0000 @@ -17,6 +17,7 @@ "github.com/juju/names" gitjujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/arch" "github.com/juju/utils/set" gc "gopkg.in/check.v1" @@ -35,7 +36,6 @@ "github.com/juju/juju/feature" "github.com/juju/juju/instance" instancetest "github.com/juju/juju/instance/testing" - "github.com/juju/juju/juju/arch" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/network" "github.com/juju/juju/state" @@ -92,14 +92,14 @@ var err error s.agentConfig, err = agent.NewAgentConfig( agent.AgentConfigParams{ - DataDir: "/not/used/here", + Paths: agent.NewPathsWithDefaults(agent.Paths{DataDir: "/not/used/here"}), Tag: names.NewMachineTag("1"), - UpgradedToVersion: version.Current.Number, + UpgradedToVersion: version.Current, Password: "dummy-secret", Nonce: "nonce", APIAddresses: []string{"10.0.0.1:1234"}, CACert: coretesting.CACert, - Environment: coretesting.EnvironmentTag, + Model: coretesting.ModelTag, }) c.Assert(err, jc.ErrorIsNil) managerConfig := container.ManagerConfig{ @@ -193,6 +193,9 @@ machineId := "1/lxc/0" lxc := s.startInstance(c, machineId, nil) s.api.CheckCalls(c, []gitjujutesting.StubCall{{ + FuncName: "PrepareContainerInterfaceInfo", + Args: []interface{}{names.NewMachineTag("1-lxc-0")}, + }, { FuncName: "ContainerConfig", }}) c.Assert(lxc.Id(), gc.Equals, instance.Id("juju-machine-1-lxc-0")) @@ -314,6 +317,9 @@ machineId := "1/lxc/0" lxc := s.startInstance(c, machineId, nil) s.api.CheckCalls(c, []gitjujutesting.StubCall{{ + FuncName: "PrepareContainerInterfaceInfo", + Args: []interface{}{names.NewMachineTag("1-lxc-0")}, + }, { FuncName: "ContainerConfig", }}) c.Assert(lxc.Id(), gc.Equals, instance.Id("juju-machine-1-lxc-0")) @@ -328,13 +334,12 @@ AssertFileContains(c, lxc_conf, expect...) } -func (s *lxcBrokerSuite) TestStartInstancePopulatesNetworkInfo(c *gc.C) { - s.SetFeatureFlags(feature.AddressAllocation) +func (s *lxcBrokerSuite) startInstancePopulatesNetworkInfo(c *gc.C) (*environs.StartInstanceResult, error) { s.PatchValue(provisioner.InterfaceAddrs, func(i *net.Interface) ([]net.Addr, error) { return []net.Addr{&fakeAddr{"0.1.2.1/24"}}, nil }) fakeResolvConf := filepath.Join(c.MkDir(), "resolv.conf") - err := ioutil.WriteFile(fakeResolvConf, []byte("nameserver ns1.dummy\n"), 0644) + err := ioutil.WriteFile(fakeResolvConf, []byte("nameserver ns1.dummy\nnameserver ns2.dummy\nsearch dummy\n"), 0644) c.Assert(err, jc.ErrorIsNil) s.PatchValue(provisioner.ResolvConf, fakeResolvConf) @@ -343,11 +348,16 @@ Version: version.MustParseBinary("2.3.4-quantal-amd64"), URL: "http://tools.testing.invalid/2.3.4-quantal-amd64.tgz", }} - result, err := s.broker.StartInstance(environs.StartInstanceParams{ + return s.broker.StartInstance(environs.StartInstanceParams{ Constraints: constraints.Value{}, Tools: possibleTools, InstanceConfig: instanceConfig, }) +} + +func (s *lxcBrokerSuite) TestStartInstancePopulatesNetworkInfoWithAddressAllocation(c *gc.C) { + s.SetFeatureFlags(feature.AddressAllocation) + result, err := s.startInstancePopulatesNetworkInfo(c) c.Assert(err, jc.ErrorIsNil) c.Assert(result.NetworkInfo, gc.HasLen, 1) iface := result.NetworkInfo[0] @@ -358,7 +368,8 @@ ConfigType: network.ConfigStatic, InterfaceName: "eth0", // generated from the device index. MACAddress: "aa:bb:cc:dd:ee:ff", - DNSServers: network.NewAddresses("ns1.dummy"), + DNSServers: network.NewAddresses("ns1.dummy", "ns2.dummy"), + DNSSearch: "dummy", Address: network.NewAddress("0.1.2.3"), GatewayAddress: network.NewAddress("0.1.2.1"), NetworkName: network.DefaultPrivate, @@ -366,6 +377,25 @@ }) } +func (s *lxcBrokerSuite) TestStartInstancePopulatesNetworkInfoWithoutAddressAllocation(c *gc.C) { + s.SetFeatureFlags() + result, err := s.startInstancePopulatesNetworkInfo(c) + c.Assert(err, jc.ErrorIsNil) + c.Assert(result.NetworkInfo, gc.HasLen, 1) + iface := result.NetworkInfo[0] + c.Assert(err, jc.ErrorIsNil) + c.Assert(iface, jc.DeepEquals, network.InterfaceInfo{ + DeviceIndex: 0, + CIDR: "0.1.2.0/24", + InterfaceName: "dummy0", // generated from the device index. + MACAddress: "aa:bb:cc:dd:ee:ff", + DNSServers: network.NewAddresses("ns1.dummy", "ns2.dummy"), + DNSSearch: "dummy", + Address: network.NewAddress("0.1.2.3"), + GatewayAddress: network.NewAddress("0.1.2.1"), + }) +} + func (s *lxcBrokerSuite) TestStopInstance(c *gc.C) { lxc0 := s.startInstance(c, "1/lxc/0", nil) lxc1 := s.startInstance(c, "1/lxc/1", nil) @@ -740,6 +770,99 @@ gitjujutesting.AssertEchoArgs(c, "ip", "route", "add", "0.1.2.3", "dev", "bridge") } +func (s *lxcBrokerSuite) patchNetInterfaceByName(c *gc.C, interfaceName string) { + s.PatchValue(provisioner.NetInterfaceByName, func(name string) (*net.Interface, error) { + if interfaceName != name { + return nil, errors.New("no such network interface") + } + return &net.Interface{ + Index: 0, + Name: name, + Flags: net.FlagUp, + }, nil + }) +} + +func (s *lxcBrokerSuite) patchNetInterfaceByNameAddrs(c *gc.C, interfaceName string, fakeAddrs ...string) { + addrs := make([]net.Addr, len(fakeAddrs)) + + for i, a := range fakeAddrs { + addrs[i] = &fakeAddr{a} + } + + s.PatchValue(provisioner.InterfaceAddrs, func(i *net.Interface) ([]net.Addr, error) { + c.Assert(i.Name, gc.Matches, interfaceName) + return addrs, nil + }) +} + +func (s *lxcBrokerSuite) checkDiscoverIPv4InterfaceAddressesFails(c *gc.C, ifaceName, expectedError string, fakeAddrs ...string) { + s.patchNetInterfaceByName(c, ifaceName) + s.patchNetInterfaceByNameAddrs(c, ifaceName, fakeAddrs...) + addr, err := provisioner.DiscoverIPv4InterfaceAddress(ifaceName) + c.Assert(err, gc.ErrorMatches, expectedError) + c.Assert(addr, gc.IsNil) +} + +func (s *lxcBrokerSuite) checkDiscoverIPv4InterfaceAddress(c *gc.C, ifaceName, expectedAddress string, fakeAddrs ...string) { + s.patchNetInterfaceByName(c, ifaceName) + s.patchNetInterfaceByNameAddrs(c, ifaceName, fakeAddrs...) + addr, err := provisioner.DiscoverIPv4InterfaceAddress(ifaceName) + c.Assert(err, gc.IsNil) + c.Assert(addr, gc.Not(gc.IsNil)) + c.Assert(*addr, gc.Equals, network.NewAddress(expectedAddress)) +} + +func (s *lxcBrokerSuite) TestDiscoverIPv4InterfaceByNameUnknownInterfaceNameError(c *gc.C) { + s.patchNetInterfaceByName(c, "fake") + addr, err := provisioner.DiscoverIPv4InterfaceAddress("missing") + c.Assert(err, gc.ErrorMatches, `cannot get interface "missing": no such network interface`) + c.Assert(addr, gc.IsNil) +} + +func (s *lxcBrokerSuite) TestDiscoverIPv4InterfaceByNameAddressError(c *gc.C) { + s.patchNetInterfaceByName(c, "fake") + s.PatchValue(provisioner.InterfaceAddrs, func(i *net.Interface) ([]net.Addr, error) { + c.Assert(i.Name, gc.Matches, "fake") + return nil, errors.New("boom!") + }) + addr, err := provisioner.DiscoverIPv4InterfaceAddress("fake") + c.Assert(err, gc.ErrorMatches, `cannot get network addresses for interface "fake": boom!`) + c.Assert(addr, gc.IsNil) +} + +func (s *lxcBrokerSuite) TestDiscoverIPv4InterfaceByNameInvalidAddr(c *gc.C) { + s.checkDiscoverIPv4InterfaceAddressesFails(c, "fake", `cannot parse address "fakeAddr": invalid CIDR address: fakeAddr`, "") +} + +func (s *lxcBrokerSuite) TestDiscoverIPv4InterfaceByNameZeroAddresses(c *gc.C) { + s.checkDiscoverIPv4InterfaceAddressesFails(c, "fake", `no addresses found for "fake"`) +} + +func (s *lxcBrokerSuite) TestDiscoverIPv4InterfaceByNameIPv6CIDRAddrError(c *gc.C) { + s.checkDiscoverIPv4InterfaceAddressesFails(c, "fake", `cannot parse address "f000::/": invalid CIDR address: f000::/`, "f000::/") +} + +func (s *lxcBrokerSuite) TestDiscoverIPv4InterfaceByNameOnlyHasIPv6AddrError(c *gc.C) { + s.checkDiscoverIPv4InterfaceAddressesFails(c, "fake", `no addresses found for "fake"`, "::1", "f000::1/1") +} + +func (s *lxcBrokerSuite) TestDiscoverIPv4InterfaceByNameIPv4CIDRAddrError(c *gc.C) { + s.checkDiscoverIPv4InterfaceAddressesFails(c, "fake", `cannot parse address "192.168.1.42/42": invalid CIDR address: 192.168.1.42/42`, "192.168.1.42/42") +} + +func (s *lxcBrokerSuite) TestDiscoverIPv4InterfaceByNameSuccessWithCIDRAddress(c *gc.C) { + s.checkDiscoverIPv4InterfaceAddress(c, "fake", "192.168.1.42", "192.168.1.42/24") +} + +func (s *lxcBrokerSuite) TestDiscoverIPv4InterfaceByNameSuccess(c *gc.C) { + s.checkDiscoverIPv4InterfaceAddress(c, "fake", "192.168.1.42", "192.168.1.42") +} + +func (s *lxcBrokerSuite) TestDiscoverIPv4InterfaceByNameMixtureOfIPv6AndIPv4Success(c *gc.C) { + s.checkDiscoverIPv4InterfaceAddress(c, "fake", "192.168.1.42", "::1", "f000::1", "192.168.1.42") +} + func (s *lxcBrokerSuite) TestDiscoverPrimaryNICNetInterfacesError(c *gc.C) { s.PatchValue(provisioner.NetInterfaces, func() ([]net.Interface, error) { return nil, errors.New("boom!") @@ -1047,12 +1170,14 @@ broker, err := provisioner.NewLxcBroker(s.provisioner, agentConfig, managerConfig, &containertesting.MockURLGetter{}, false, 0) c.Assert(err, jc.ErrorIsNil) toolsFinder := (*provisioner.GetToolsFinder)(s.provisioner) - return provisioner.NewContainerProvisioner(instance.LXC, s.provisioner, agentConfig, broker, toolsFinder) + w, err := provisioner.NewContainerProvisioner(instance.LXC, s.provisioner, agentConfig, broker, toolsFinder) + c.Assert(err, jc.ErrorIsNil) + return w } func (s *lxcProvisionerSuite) TestProvisionerStartStop(c *gc.C) { p := s.newLxcProvisioner(c) - c.Assert(p.Stop(), gc.IsNil) + stop(c, p) } func (s *lxcProvisionerSuite) TestDoesNotStartEnvironMachines(c *gc.C) { @@ -1098,7 +1223,7 @@ c.Assert(err, jc.ErrorIsNil) defaultTools := version.Binary{ - Number: version.Current.Number, + Number: version.Current, Arch: arch.HostArch(), Series: coretesting.FakeDefaultSeries, } @@ -1187,3 +1312,11 @@ } return []network.InterfaceInfo{f.fakeInterfaceInfo}, nil } + +func (f *fakeAPI) ReleaseContainerAddresses(tag names.MachineTag) error { + f.MethodCall(f, "ReleaseContainerAddresses", tag) + if err := f.NextErr(); err != nil { + return err + } + return nil +} === added file 'src/github.com/juju/juju/worker/provisioner/lxd-broker.go' --- src/github.com/juju/juju/worker/provisioner/lxd-broker.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/provisioner/lxd-broker.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,162 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package provisioner + +import ( + "github.com/juju/errors" + "github.com/juju/loggo" + + "github.com/juju/juju/agent" + "github.com/juju/juju/cloudconfig/instancecfg" + "github.com/juju/juju/container" + "github.com/juju/juju/container/lxd" + "github.com/juju/juju/environs" + "github.com/juju/juju/instance" +) + +var lxdLogger = loggo.GetLogger("juju.provisioner.lxd") + +var _ environs.InstanceBroker = (*lxdBroker)(nil) + +func NewLxdBroker( + api APICalls, + agentConfig agent.Config, + managerConfig container.ManagerConfig, + enableNAT bool, +) (environs.InstanceBroker, error) { + namespace := maybeGetManagerConfigNamespaces(managerConfig) + manager, err := lxd.NewContainerManager(managerConfig) + if err != nil { + return nil, err + } + + return &lxdBroker{ + manager, + namespace, + api, + agentConfig, + enableNAT, + }, nil +} + +type lxdBroker struct { + manager container.Manager + namespace string + api APICalls + agentConfig agent.Config + enableNAT bool +} + +func (broker *lxdBroker) StartInstance(args environs.StartInstanceParams) (*environs.StartInstanceResult, error) { + if args.InstanceConfig.HasNetworks() { + return nil, errors.New("starting lxd containers with networks is not supported yet") + } + machineId := args.InstanceConfig.MachineId + bridgeDevice := broker.agentConfig.Value(agent.LxcBridge) + if bridgeDevice == "" { + bridgeDevice = lxd.DefaultLxdBridge + } + + preparedInfo, err := prepareOrGetContainerInterfaceInfo( + broker.api, + machineId, + bridgeDevice, + true, // allocate if possible, do not maintain existing. + broker.enableNAT, + args.NetworkInfo, + lxdLogger, + ) + if err != nil { + // It's not fatal (yet) if we couldn't pre-allocate addresses for the + // container. + logger.Warningf("failed to prepare container %q network config: %v", machineId, err) + } else { + args.NetworkInfo = preparedInfo + } + + network := container.BridgeNetworkConfig(bridgeDevice, 0, args.NetworkInfo) + + series := args.Tools.OneSeries() + args.InstanceConfig.MachineContainerType = instance.LXD + args.InstanceConfig.Tools = args.Tools[0] + + config, err := broker.api.ContainerConfig() + if err != nil { + lxdLogger.Errorf("failed to get container config: %v", err) + return nil, err + } + + if err := instancecfg.PopulateInstanceConfig( + args.InstanceConfig, + config.ProviderType, + config.AuthorizedKeys, + config.SSLHostnameVerification, + config.Proxy, + config.AptProxy, + config.AptMirror, + config.PreferIPv6, + config.EnableOSRefreshUpdate, + config.EnableOSUpgrade, + ); err != nil { + lxdLogger.Errorf("failed to populate machine config: %v", err) + return nil, err + } + + storageConfig := &container.StorageConfig{} + inst, hardware, err := broker.manager.CreateContainer(args.InstanceConfig, series, network, storageConfig) + if err != nil { + return nil, err + } + + return &environs.StartInstanceResult{ + Instance: inst, + Hardware: hardware, + NetworkInfo: network.Interfaces, + }, nil +} + +func (broker *lxdBroker) StopInstances(ids ...instance.Id) error { + // TODO: potentially parallelise. + for _, id := range ids { + lxdLogger.Infof("stopping lxd container for instance: %s", id) + if err := broker.manager.DestroyContainer(id); err != nil { + lxdLogger.Errorf("container did not stop: %v", err) + return err + } + maybeReleaseContainerAddresses(broker.api, id, broker.namespace, lxdLogger) + } + return nil +} + +// AllInstances only returns running containers. +func (broker *lxdBroker) AllInstances() (result []instance.Instance, err error) { + return broker.manager.ListContainers() +} + +// MaintainInstance ensures the container's host has the required iptables and +// routing rules to make the container visible to both the host and other +// machines on the same subnet. This is important mostly when address allocation +// feature flag is enabled, as otherwise we don't create additional iptables +// rules or routes. +func (broker *lxdBroker) MaintainInstance(args environs.StartInstanceParams) error { + machineID := args.InstanceConfig.MachineId + + // Default to using the host network until we can configure. + bridgeDevice := broker.agentConfig.Value(agent.LxcBridge) + if bridgeDevice == "" { + bridgeDevice = lxd.DefaultLxdBridge + } + + // There's no InterfaceInfo we expect to get below. + _, err := prepareOrGetContainerInterfaceInfo( + broker.api, + machineID, + bridgeDevice, + false, // maintain, do not allocate. + broker.enableNAT, + args.NetworkInfo, + lxdLogger, + ) + return err +} === added file 'src/github.com/juju/juju/worker/provisioner/manifold.go' --- src/github.com/juju/juju/worker/provisioner/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/provisioner/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,49 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package provisioner + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/agent" + "github.com/juju/juju/api/base" + apiprovisioner "github.com/juju/juju/api/provisioner" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" +) + +// ManifoldConfig defines an environment provisioner's dependencies. It's not +// currently clear whether it'll be easier to extend this type to include all +// provisioners, or to create separate (Environ|Container)Manifold[Config]s; +// for now we dodge the question because we don't need container provisioners +// in dependency engines. Yet. +type ManifoldConfig struct { + AgentName string + APICallerName string +} + +// Manifold creates a manifold that runs an environemnt provisioner. See the +// ManifoldConfig type for discussion about how this can/should evolve. +func Manifold(config ManifoldConfig) dependency.Manifold { + return dependency.Manifold{ + Inputs: []string{config.AgentName, config.APICallerName}, + Start: func(getResource dependency.GetResourceFunc) (worker.Worker, error) { + var agent agent.Agent + if err := getResource(config.AgentName, &agent); err != nil { + return nil, errors.Trace(err) + } + var apiCaller base.APICaller + if err := getResource(config.APICallerName, &apiCaller); err != nil { + return nil, errors.Trace(err) + } + api := apiprovisioner.NewState(apiCaller) + config := agent.CurrentConfig() + w, err := NewEnvironProvisioner(api, config) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil + }, + } +} === added file 'src/github.com/juju/juju/worker/provisioner/manifold_test.go' --- src/github.com/juju/juju/worker/provisioner/manifold_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/provisioner/manifold_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,61 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package provisioner_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/agent" + "github.com/juju/juju/api/base" + "github.com/juju/juju/worker/dependency" + dt "github.com/juju/juju/worker/dependency/testing" + "github.com/juju/juju/worker/provisioner" +) + +type ManifoldSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&ManifoldSuite{}) + +func (s *ManifoldSuite) TestManifold(c *gc.C) { + manifold := provisioner.Manifold(provisioner.ManifoldConfig{ + AgentName: "jeff", + APICallerName: "barry", + }) + + c.Check(manifold.Inputs, jc.DeepEquals, []string{"jeff", "barry"}) + c.Check(manifold.Output, gc.IsNil) + c.Check(manifold.Start, gc.NotNil) + // manifold.Start is tested extensively via direct use in provisioner_test +} + +func (s *ManifoldSuite) TestMissingAgent(c *gc.C) { + manifold := provisioner.Manifold(provisioner.ManifoldConfig{ + AgentName: "agent", + APICallerName: "api-caller", + }) + w, err := manifold.Start(dt.StubGetResource(dt.StubResources{ + "agent": dt.StubResource{Error: dependency.ErrMissing}, + "api-caller": dt.StubResource{Output: struct{ base.APICaller }{}}, + })) + c.Check(w, gc.IsNil) + c.Check(errors.Cause(err), gc.Equals, dependency.ErrMissing) +} + +func (s *ManifoldSuite) TestMissingAPICaller(c *gc.C) { + manifold := provisioner.Manifold(provisioner.ManifoldConfig{ + AgentName: "agent", + APICallerName: "api-caller", + }) + w, err := manifold.Start(dt.StubGetResource(dt.StubResources{ + "agent": dt.StubResource{Output: struct{ agent.Agent }{}}, + "api-caller": dt.StubResource{Error: dependency.ErrMissing}, + })) + c.Check(w, gc.IsNil) + c.Check(errors.Cause(err), gc.Equals, dependency.ErrMissing) +} === modified file 'src/github.com/juju/juju/worker/provisioner/package_test.go' --- src/github.com/juju/juju/worker/provisioner/package_test.go 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/worker/provisioner/package_test.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,6 @@ "github.com/juju/juju/testing" ) -func Test(t *stdtesting.T) { +func TestPackage(t *stdtesting.T) { testing.MgoTestPackage(t) } === modified file 'src/github.com/juju/juju/worker/provisioner/provisioner.go' --- src/github.com/juju/juju/worker/provisioner/provisioner.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/provisioner/provisioner.go 2016-03-22 15:18:22 +0000 @@ -5,22 +5,22 @@ import ( "sync" + "time" "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" - "launchpad.net/tomb" "github.com/juju/juju/agent" apiprovisioner "github.com/juju/juju/api/provisioner" - apiwatcher "github.com/juju/juju/api/watcher" - "github.com/juju/juju/environmentserver/authentication" + "github.com/juju/juju/controller/authentication" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/instance" - "github.com/juju/juju/state/watcher" - "github.com/juju/juju/utils" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" + "github.com/juju/juju/worker/catacomb" + "github.com/juju/juju/worker/environ" ) var logger = loggo.GetLogger("juju.provisioner") @@ -29,12 +29,16 @@ var _ Provisioner = (*environProvisioner)(nil) var _ Provisioner = (*containerProvisioner)(nil) +var ( + retryStrategyDelay = 10 * time.Second + retryStrategyCount = 3 +) + // Provisioner represents a running provisioner worker. type Provisioner interface { worker.Worker - Stop() error - getMachineWatcher() (apiwatcher.StringsWatcher, error) - getRetryWatcher() (apiwatcher.NotifyWatcher, error) + getMachineWatcher() (watcher.StringsWatcher, error) + getRetryWatcher() (watcher.NotifyWatcher, error) } // environProvisioner represents a running provisioning worker for machine nodes @@ -61,7 +65,23 @@ agentConfig agent.Config broker environs.InstanceBroker toolsFinder ToolsFinder - tomb tomb.Tomb + catacomb catacomb.Catacomb +} + +// RetryStrategy defines the retry behavior when encountering a retryable +// error during provisioning. +type RetryStrategy struct { + retryDelay time.Duration + retryCount int +} + +// NewRetryStrategy returns a new retry strategy with the specified delay and +// count for use with retryable provisioning errors. +func NewRetryStrategy(delay time.Duration, count int) RetryStrategy { + return RetryStrategy{ + retryDelay: delay, + retryCount: count, + } } // configObserver is implemented so that tests can see @@ -80,27 +100,14 @@ o.Unlock() } -// Err returns the reason why the provisioner has stopped or tomb.ErrStillAlive -// when it is still alive. -func (p *provisioner) Err() (reason error) { - return p.tomb.Err() -} - // Kill implements worker.Worker.Kill. func (p *provisioner) Kill() { - p.tomb.Kill(nil) + p.catacomb.Kill(nil) } // Wait implements worker.Worker.Wait. func (p *provisioner) Wait() error { - return p.tomb.Wait() -} - -// Stop stops the provisioner and returns any error encountered while -// provisioning. -func (p *provisioner) Stop() error { - p.tomb.Kill(nil) - return p.tomb.Wait() + return p.catacomb.Wait() } // getToolsFinder returns a ToolsFinder for the provided State. @@ -128,19 +135,19 @@ tag := p.agentConfig.Tag() machineTag, ok := tag.(names.MachineTag) if !ok { - errors.Errorf("expacted names.MachineTag, got %T", tag) + errors.Errorf("expected names.MachineTag, got %T", tag) } - envCfg, err := p.st.EnvironConfig() + envCfg, err := p.st.ModelConfig() if err != nil { - return nil, errors.Annotate(err, "could not retrieve the environment config.") + return nil, errors.Annotate(err, "could not retrieve the model config.") } secureServerConnection := false if info, ok := p.agentConfig.StateServingInfo(); ok { secureServerConnection = info.CAPrivateKey != "" } - task := NewProvisionerTask( + task, err := NewProvisionerTask( machineTag, harvestMode, p.st, @@ -151,14 +158,18 @@ auth, envCfg.ImageStream(), secureServerConnection, + RetryStrategy{retryDelay: retryStrategyDelay, retryCount: retryStrategyCount}, ) + if err != nil { + return nil, errors.Trace(err) + } return task, nil } // NewEnvironProvisioner returns a new Provisioner for an environment. // When new machines are added to the state, it allocates instances // from the environment and allocates them to the new machines. -func NewEnvironProvisioner(st *apiprovisioner.State, agentConfig agent.Config) Provisioner { +func NewEnvironProvisioner(st *apiprovisioner.State, agentConfig agent.Config) (Provisioner, error) { p := &environProvisioner{ provisioner: provisioner{ st: st, @@ -168,75 +179,83 @@ } p.Provisioner = p logger.Tracef("Starting environ provisioner for %q", p.agentConfig.Tag()) - go func() { - defer p.tomb.Done() - p.tomb.Kill(errors.Cause(p.loop())) - }() - return p + + err := catacomb.Invoke(catacomb.Plan{ + Site: &p.catacomb, + Work: p.loop, + }) + if err != nil { + return nil, errors.Trace(err) + } + return p, nil } func (p *environProvisioner) loop() error { - var environConfigChanges <-chan struct{} - environWatcher, err := p.st.WatchForEnvironConfigChanges() + var modelConfigChanges <-chan struct{} + modelWatcher, err := p.st.WatchForModelConfigChanges() if err != nil { - return utils.LoggedErrorStack(errors.Trace(err)) - } - environConfigChanges = environWatcher.Changes() - defer watcher.Stop(environWatcher, &p.tomb) + return loggedErrorStack(errors.Trace(err)) + } + if err := p.catacomb.Add(modelWatcher); err != nil { + return errors.Trace(err) + } + modelConfigChanges = modelWatcher.Changes() - p.environ, err = worker.WaitForEnviron(environWatcher, p.st, p.tomb.Dying()) + p.environ, err = environ.WaitForEnviron(modelWatcher, p.st, p.catacomb.Dying()) if err != nil { - return utils.LoggedErrorStack(errors.Trace(err)) + if err == environ.ErrWaitAborted { + return p.catacomb.ErrDying() + } + return loggedErrorStack(errors.Trace(err)) } p.broker = p.environ - harvestMode := p.environ.Config().ProvisionerHarvestMode() + modelConfig := p.environ.Config() + p.configObserver.notify(modelConfig) + harvestMode := modelConfig.ProvisionerHarvestMode() task, err := p.getStartTask(harvestMode) if err != nil { - return utils.LoggedErrorStack(errors.Trace(err)) - } - defer watcher.Stop(task, &p.tomb) + return loggedErrorStack(errors.Trace(err)) + } + if err := p.catacomb.Add(task); err != nil { + return errors.Trace(err) + } for { select { - case <-p.tomb.Dying(): - return tomb.ErrDying - case <-task.Dying(): - err := task.Err() - logger.Errorf("environ provisioner died: %v", err) - return err - case _, ok := <-environConfigChanges: + case <-p.catacomb.Dying(): + return p.catacomb.ErrDying() + case _, ok := <-modelConfigChanges: if !ok { - return watcher.EnsureErr(environWatcher) + return errors.New("model configuration watcher closed") } - environConfig, err := p.st.EnvironConfig() + modelConfig, err := p.st.ModelConfig() if err != nil { - logger.Errorf("cannot load environment configuration: %v", err) - return err - } - if err := p.setConfig(environConfig); err != nil { - logger.Errorf("loaded invalid environment configuration: %v", err) - } - task.SetHarvestMode(environConfig.ProvisionerHarvestMode()) + return errors.Annotate(err, "cannot load model configuration") + } + if err := p.setConfig(modelConfig); err != nil { + return errors.Annotate(err, "loaded invalid model configuration") + } + task.SetHarvestMode(modelConfig.ProvisionerHarvestMode()) } } } -func (p *environProvisioner) getMachineWatcher() (apiwatcher.StringsWatcher, error) { - return p.st.WatchEnvironMachines() +func (p *environProvisioner) getMachineWatcher() (watcher.StringsWatcher, error) { + return p.st.WatchModelMachines() } -func (p *environProvisioner) getRetryWatcher() (apiwatcher.NotifyWatcher, error) { +func (p *environProvisioner) getRetryWatcher() (watcher.NotifyWatcher, error) { return p.st.WatchMachineErrorRetry() } // setConfig updates the environment configuration and notifies // the config observer. -func (p *environProvisioner) setConfig(environConfig *config.Config) error { - if err := p.environ.SetConfig(environConfig); err != nil { +func (p *environProvisioner) setConfig(modelConfig *config.Config) error { + if err := p.environ.SetConfig(modelConfig); err != nil { return err } - p.configObserver.notify(environConfig) + p.configObserver.notify(modelConfig) return nil } @@ -249,7 +268,7 @@ agentConfig agent.Config, broker environs.InstanceBroker, toolsFinder ToolsFinder, -) Provisioner { +) (Provisioner, error) { p := &containerProvisioner{ provisioner: provisioner{ @@ -262,53 +281,55 @@ } p.Provisioner = p logger.Tracef("Starting %s provisioner for %q", p.containerType, p.agentConfig.Tag()) - go func() { - defer p.tomb.Done() - p.tomb.Kill(p.loop()) - }() - return p + + err := catacomb.Invoke(catacomb.Plan{ + Site: &p.catacomb, + Work: p.loop, + }) + if err != nil { + return nil, errors.Trace(err) + } + return p, nil } func (p *containerProvisioner) loop() error { - var environConfigChanges <-chan struct{} - environWatcher, err := p.st.WatchForEnvironConfigChanges() + modelWatcher, err := p.st.WatchForModelConfigChanges() if err != nil { - return err - } - environConfigChanges = environWatcher.Changes() - defer watcher.Stop(environWatcher, &p.tomb) + return errors.Trace(err) + } + if err := p.catacomb.Add(modelWatcher); err != nil { + return errors.Trace(err) + } - config, err := p.st.EnvironConfig() + modelConfig, err := p.st.ModelConfig() if err != nil { return err } - harvestMode := config.ProvisionerHarvestMode() + p.configObserver.notify(modelConfig) + harvestMode := modelConfig.ProvisionerHarvestMode() task, err := p.getStartTask(harvestMode) if err != nil { return err } - defer watcher.Stop(task, &p.tomb) + if err := p.catacomb.Add(task); err != nil { + return errors.Trace(err) + } for { select { - case <-p.tomb.Dying(): - return tomb.ErrDying - case <-task.Dying(): - err := task.Err() - logger.Errorf("%s provisioner died: %v", p.containerType, err) - return err - case _, ok := <-environConfigChanges: + case <-p.catacomb.Dying(): + return p.catacomb.ErrDying() + case _, ok := <-modelWatcher.Changes(): if !ok { - return watcher.EnsureErr(environWatcher) + return errors.New("model configuratioon watch closed") } - environConfig, err := p.st.EnvironConfig() + modelConfig, err := p.st.ModelConfig() if err != nil { - logger.Errorf("cannot load environment configuration: %v", err) - return err + return errors.Annotate(err, "cannot load model configuration") } - p.configObserver.notify(environConfig) - task.SetHarvestMode(environConfig.ProvisionerHarvestMode()) + p.configObserver.notify(modelConfig) + task.SetHarvestMode(modelConfig.ProvisionerHarvestMode()) } } } @@ -329,7 +350,7 @@ return p.machine, nil } -func (p *containerProvisioner) getMachineWatcher() (apiwatcher.StringsWatcher, error) { +func (p *containerProvisioner) getMachineWatcher() (watcher.StringsWatcher, error) { machine, err := p.getMachine() if err != nil { return nil, err @@ -337,6 +358,6 @@ return machine.WatchContainers(p.containerType) } -func (p *containerProvisioner) getRetryWatcher() (apiwatcher.NotifyWatcher, error) { +func (p *containerProvisioner) getRetryWatcher() (watcher.NotifyWatcher, error) { return nil, errors.NotImplementedf("getRetryWatcher") } === modified file 'src/github.com/juju/juju/worker/provisioner/provisioner_task.go' --- src/github.com/juju/juju/worker/provisioner/provisioner_task.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/provisioner/provisioner_task.go 2016-03-22 15:18:22 +0000 @@ -12,31 +12,28 @@ "github.com/juju/names" "github.com/juju/utils" "github.com/juju/utils/set" - "launchpad.net/tomb" apiprovisioner "github.com/juju/juju/api/provisioner" - apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/cloudconfig/instancecfg" "github.com/juju/juju/constraints" - "github.com/juju/juju/environmentserver/authentication" + "github.com/juju/juju/controller/authentication" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" + "github.com/juju/juju/environs/imagemetadata" "github.com/juju/juju/environs/simplestreams" "github.com/juju/juju/instance" "github.com/juju/juju/network" - "github.com/juju/juju/state/watcher" "github.com/juju/juju/storage" coretools "github.com/juju/juju/tools" "github.com/juju/juju/version" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" + "github.com/juju/juju/worker/catacomb" ) type ProvisionerTask interface { worker.Worker - Stop() error - Dying() <-chan struct{} - Err() error // SetHarvestMode sets a flag to indicate how the provisioner task // should harvest machines. See config.HarvestMode for @@ -53,15 +50,11 @@ // provisioned instances. type ToolsFinder interface { // FindTools returns a list of tools matching the specified - // version and series, and optionally arch. - FindTools(version version.Number, series string, arch *string) (coretools.List, error) + // version, series, and architecture. If arch is empty, the + // implementation is expected to use a well documented default. + FindTools(version version.Number, series string, arch string) (coretools.List, error) } -var ( - maxInstanceRetryDelay = 60 - maxInstanceRetryCount = 5 -) - var _ MachineGetter = (*apiprovisioner.State)(nil) var _ ToolsFinder = (*apiprovisioner.State)(nil) @@ -70,52 +63,61 @@ harvestMode config.HarvestMode, machineGetter MachineGetter, toolsFinder ToolsFinder, - machineWatcher apiwatcher.StringsWatcher, - retryWatcher apiwatcher.NotifyWatcher, + machineWatcher watcher.StringsWatcher, + retryWatcher watcher.NotifyWatcher, broker environs.InstanceBroker, auth authentication.AuthenticationProvider, imageStream string, secureServerConnection bool, -) ProvisionerTask { + retryStartInstanceStrategy RetryStrategy, +) (ProvisionerTask, error) { + machineChanges := machineWatcher.Changes() + workers := []worker.Worker{machineWatcher} + var retryChanges watcher.NotifyChannel + if retryWatcher != nil { + retryChanges = retryWatcher.Changes() + workers = append(workers, retryWatcher) + } task := &provisionerTask{ - machineTag: machineTag, - machineGetter: machineGetter, - toolsFinder: toolsFinder, - machineWatcher: machineWatcher, - retryWatcher: retryWatcher, - broker: broker, - auth: auth, - harvestMode: harvestMode, - harvestModeChan: make(chan config.HarvestMode, 1), - machines: make(map[string]*apiprovisioner.Machine), - imageStream: imageStream, - secureServerConnection: secureServerConnection, - } - go func() { - defer task.tomb.Done() - err := task.loop() - switch cause := errors.Cause(err); cause { - case tomb.ErrDying: - err = cause - } - task.tomb.Kill(err) - }() - return task + machineTag: machineTag, + machineGetter: machineGetter, + toolsFinder: toolsFinder, + machineChanges: machineChanges, + retryChanges: retryChanges, + broker: broker, + auth: auth, + harvestMode: harvestMode, + harvestModeChan: make(chan config.HarvestMode, 1), + machines: make(map[string]*apiprovisioner.Machine), + imageStream: imageStream, + secureServerConnection: secureServerConnection, + retryStartInstanceStrategy: retryStartInstanceStrategy, + } + err := catacomb.Invoke(catacomb.Plan{ + Site: &task.catacomb, + Work: task.loop, + Init: workers, + }) + if err != nil { + return nil, errors.Trace(err) + } + return task, nil } type provisionerTask struct { - machineTag names.MachineTag - machineGetter MachineGetter - toolsFinder ToolsFinder - machineWatcher apiwatcher.StringsWatcher - retryWatcher apiwatcher.NotifyWatcher - broker environs.InstanceBroker - tomb tomb.Tomb - auth authentication.AuthenticationProvider - imageStream string - secureServerConnection bool - harvestMode config.HarvestMode - harvestModeChan chan config.HarvestMode + machineTag names.MachineTag + machineGetter MachineGetter + toolsFinder ToolsFinder + machineChanges watcher.StringsChannel + retryChanges watcher.NotifyChannel + broker environs.InstanceBroker + catacomb catacomb.Catacomb + auth authentication.AuthenticationProvider + imageStream string + secureServerConnection bool + harvestMode config.HarvestMode + harvestModeChan chan config.HarvestMode + retryStartInstanceStrategy RetryStrategy // instance id -> instance instances map[instance.Id]instance.Instance // machine id -> machine @@ -124,30 +126,15 @@ // Kill implements worker.Worker.Kill. func (task *provisionerTask) Kill() { - task.tomb.Kill(nil) + task.catacomb.Kill(nil) } // Wait implements worker.Worker.Wait. func (task *provisionerTask) Wait() error { - return task.tomb.Wait() -} - -func (task *provisionerTask) Stop() error { - task.Kill() - return task.Wait() -} - -func (task *provisionerTask) Dying() <-chan struct{} { - return task.tomb.Dying() -} - -func (task *provisionerTask) Err() error { - return task.tomb.Err() + return task.catacomb.Wait() } func (task *provisionerTask) loop() error { - logger.Infof("Starting up provisioner task %s", task.machineTag) - defer watcher.Stop(task.machineWatcher, &task.tomb) // Don't allow the harvesting mode to change until we have read at // least one set of changes, which will populate the task.machines @@ -155,27 +142,22 @@ // as unknown. var harvestModeChan chan config.HarvestMode - // Not all provisioners have a retry channel. - var retryChan <-chan struct{} - if task.retryWatcher != nil { - retryChan = task.retryWatcher.Changes() - } - // When the watcher is started, it will have the initial changes be all // the machines that are relevant. Also, since this is available straight // away, we know there will be some changes right off the bat. for { select { - case <-task.tomb.Dying(): + case <-task.catacomb.Dying(): logger.Infof("Shutting down provisioner task %s", task.machineTag) - return tomb.ErrDying - case ids, ok := <-task.machineWatcher.Changes(): + return task.catacomb.ErrDying() + case ids, ok := <-task.machineChanges: if !ok { - return watcher.EnsureErr(task.machineWatcher) + return errors.New("machine watcher closed channel") } if err := task.processMachines(ids); err != nil { return errors.Annotate(err, "failed to process updated machines") } + // We've seen a set of changes. Enable modification of // harvesting mode. harvestModeChan = task.harvestModeChan @@ -183,18 +165,15 @@ if harvestMode == task.harvestMode { break } - logger.Infof("harvesting mode changed to %s", harvestMode) task.harvestMode = harvestMode - if harvestMode.HarvestUnknown() { - logger.Infof("harvesting unknown machines") if err := task.processMachines(nil); err != nil { return errors.Annotate(err, "failed to process machines after safe mode disabled") } } - case <-retryChan: + case <-task.retryChanges: if err := task.processMachinesWithTransientErrors(); err != nil { return errors.Annotate(err, "failed to process machines with transient errors") } @@ -206,7 +185,7 @@ func (task *provisionerTask) SetHarvestMode(mode config.HarvestMode) { select { case task.harvestModeChan <- mode: - case <-task.Dying(): + case <-task.catacomb.Dying(): } } @@ -577,6 +556,7 @@ }, } } + var subnetsToZones map[network.Id][]string if provisioningInfo.SubnetsToZones != nil { // Convert subnet provider ids from string to network.Id. @@ -586,6 +566,27 @@ } } + var endpointBindings map[string]network.Id + if len(provisioningInfo.EndpointBindings) != 0 { + endpointBindings = make(map[string]network.Id) + for endpoint, space := range provisioningInfo.EndpointBindings { + endpointBindings[endpoint] = network.Id(space) + } + } + possibleImageMetadata := make([]*imagemetadata.ImageMetadata, len(provisioningInfo.ImageMetadata)) + for i, metadata := range provisioningInfo.ImageMetadata { + possibleImageMetadata[i] = &imagemetadata.ImageMetadata{ + Id: metadata.ImageId, + Arch: metadata.Arch, + RegionAlias: metadata.Region, + RegionName: metadata.Region, + Storage: metadata.RootStorageType, + Stream: metadata.Stream, + VirtType: metadata.VirtType, + Version: metadata.Version, + } + } + return environs.StartInstanceParams{ Constraints: provisioningInfo.Constraints, Tools: possibleTools, @@ -594,6 +595,8 @@ DistributionGroup: machine.DistributionGroup, Volumes: volumes, SubnetsToZones: subnetsToZones, + EndpointBindings: endpointBindings, + ImageMetadata: possibleImageMetadata, }, nil } @@ -613,7 +616,7 @@ func (task *provisionerTask) startMachines(machines []*apiprovisioner.Machine) error { for _, m := range machines { - pInfo, err := task.blockUntilProvisioned(m.ProvisioningInfo) + pInfo, err := m.ProvisioningInfo() if err != nil { return task.setErrorStatus("fetching provisioning info for machine %q: %v", m, err) } @@ -625,10 +628,15 @@ assocProvInfoAndMachCfg(pInfo, instanceCfg) + var arch string + if pInfo.Constraints.Arch != nil { + arch = *pInfo.Constraints.Arch + } + possibleTools, err := task.toolsFinder.FindTools( - version.Current.Number, + version.Current, pInfo.Series, - pInfo.Constraints.Arch, + arch, ) if err != nil { return task.setErrorStatus("cannot find tools for machine %q: %v", m, err) @@ -667,6 +675,21 @@ } visitedNetworks := set.NewStrings() for _, info := range networkInfo { + // TODO(dimitern): The following few fields are required, but no longer + // matter and will be dropped or changed soon as part of making spaces + // and subnets usable across the board. + if info.NetworkName == "" { + info.NetworkName = network.DefaultPrivate + } + if info.ProviderId == "" { + info.ProviderId = network.DefaultPrivate + } + if info.CIDR == "" { + // TODO(dimitern): This is only when NOT using addressable + // containers, as we don't fetch the subnet details, but since + // networks in state are going away real soon, it's not important. + info.CIDR = "0.0.0.0/32" + } if !names.IsValidNetwork(info.NetworkName) { return nil, nil, errors.Errorf("invalid network name %q", info.NetworkName) } @@ -691,41 +714,27 @@ return networks, ifaces, nil } -func min(a, b int) int { - if a <= b { - return a - } - return b -} - func (task *provisionerTask) startMachine( machine *apiprovisioner.Machine, provisioningInfo *params.ProvisioningInfo, startInstanceParams environs.StartInstanceParams, ) error { + result, err := task.broker.StartInstance(startInstanceParams) if err != nil { - // If the broker has indicated that the error encountered is temporary and - // subsequent attempts may succeed, then retry as requested. - retryErr, ok := instance.GetRetryableCreationError(errors.Cause(err)) - if !ok || retryErr.RetryCount() <= 0 { - // Not a retryable error. Set the state to error, so the - // machine will be skipped next time until the error is - // resolved, but don't return an error; just keep going with - // the other machines. + if !instance.IsRetryableCreationError(errors.Cause(err)) { + // Set the state to error, so the machine will be skipped next + // time until the error is resolved, but don't return an + // error; just keep going with the other machines. return task.setErrorStatus("cannot start instance for machine %q: %v", machine, err) } - - count := min(retryErr.RetryCount(), maxInstanceRetryCount) - delay := min(retryErr.RetryDelay(), maxInstanceRetryDelay) - - logger.Infof("retryable error received on start instance - retrying instance creation %d times with a %ds delay", count, delay) - for ; count > 0; count-- { - if delay > 0 { + logger.Infof("retryable error received on start instance: %v", err) + for count := task.retryStartInstanceStrategy.retryCount; count > 0; count-- { + if task.retryStartInstanceStrategy.retryDelay > 0 { select { - case <-task.tomb.Dying(): - return tomb.ErrDying - case <-time.After(time.Duration(delay) * time.Second): + case <-task.catacomb.Dying(): + return task.catacomb.ErrDying() + case <-time.After(task.retryStartInstanceStrategy.retryDelay): } } @@ -733,10 +742,10 @@ if err == nil { break } - if !instance.IsRetryableCreationError(errors.Cause(err)) || count == 1 { - // If we encountered a non-retryable error, or this is our last attempt, - // report that starting the instance failed. - return task.setErrorStatus("cannot start instance for machine after a retry %q: %v", machine, err) + // If this was the last attempt and an error was received, set the error + // status on the machine. + if count == 1 { + return task.setErrorStatus("cannot start instance for machine %q: %v", machine, err) } } } @@ -757,9 +766,7 @@ // for each interface, so we can later manage interfaces // dynamically at run-time. err = machine.SetInstanceInfo(inst.Id(), nonce, hardware, networks, ifaces, volumes, volumeAttachments) - if err != nil && params.IsCodeNotImplemented(err) { - return fmt.Errorf("cannot provision instance %v for machine %q with networks: not implemented", inst.Id(), machine) - } else if err == nil { + if err == nil { logger.Infof( "started machine %s as instance %s with hardware %q, networks %v, interfaces %v, volumes %v, volume attachments %v, subnets to zones %v", machine, inst.Id(), hardware, @@ -835,30 +842,3 @@ } return result } - -// ProvisioningInfo is new in 1.20; wait for the API server to be -// upgraded so we don't spew errors on upgrade. -func (task *provisionerTask) blockUntilProvisioned( - provision func() (*params.ProvisioningInfo, error), -) (*params.ProvisioningInfo, error) { - - var pInfo *params.ProvisioningInfo - var err error - for { - if pInfo, err = provision(); err == nil { - break - } - if params.IsCodeNotImplemented(err) { - logger.Infof("waiting for state server to be upgraded") - select { - case <-task.tomb.Dying(): - return nil, tomb.ErrDying - case <-time.After(15 * time.Second): - continue - } - } - return nil, err - } - - return pInfo, nil -} === modified file 'src/github.com/juju/juju/worker/provisioner/provisioner_test.go' --- src/github.com/juju/juju/worker/provisioner/provisioner_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/provisioner/provisioner_test.go 2016-03-22 15:18:22 +0000 @@ -13,6 +13,8 @@ "github.com/juju/names" jc "github.com/juju/testing/checkers" "github.com/juju/utils" + "github.com/juju/utils/arch" + "github.com/juju/utils/series" "github.com/juju/utils/set" gc "gopkg.in/check.v1" @@ -22,20 +24,20 @@ "github.com/juju/juju/apiserver/params" apiserverprovisioner "github.com/juju/juju/apiserver/provisioner" "github.com/juju/juju/constraints" - "github.com/juju/juju/environmentserver/authentication" + "github.com/juju/juju/controller/authentication" "github.com/juju/juju/environs" "github.com/juju/juju/environs/config" "github.com/juju/juju/environs/filestorage" "github.com/juju/juju/environs/imagemetadata" + imagetesting "github.com/juju/juju/environs/imagemetadata/testing" envtesting "github.com/juju/juju/environs/testing" "github.com/juju/juju/environs/tools" "github.com/juju/juju/instance" - "github.com/juju/juju/juju/arch" "github.com/juju/juju/juju/testing" - "github.com/juju/juju/mongo" "github.com/juju/juju/network" "github.com/juju/juju/provider/dummy" "github.com/juju/juju/state" + "github.com/juju/juju/state/cloudimagemetadata" "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/storage" "github.com/juju/juju/storage/poolmanager" @@ -44,6 +46,8 @@ coretesting "github.com/juju/juju/testing" coretools "github.com/juju/juju/tools" "github.com/juju/juju/version" + "github.com/juju/juju/worker" + dt "github.com/juju/juju/worker/dependency/testing" "github.com/juju/juju/worker/provisioner" ) @@ -67,7 +71,7 @@ attrs := map[string]interface{}{ config.ProvisionerHarvestModeKey: config.HarvestAll.String(), } - err := s.State.UpdateEnvironConfig(attrs, nil, nil) + err := s.State.UpdateModelConfig(attrs, nil, nil) c.Assert(err, jc.ErrorIsNil) s.BackingState.StartSync() @@ -119,13 +123,30 @@ s.JujuConnSuite.SetUpTest(c) + // We do not want to pull published image metadata for tests... + imagetesting.PatchOfficialDataSources(&s.CleanupSuite, "") + // We want an image to start test instances + err := s.State.CloudImageMetadataStorage.SaveMetadata([]cloudimagemetadata.Metadata{{ + cloudimagemetadata.MetadataAttributes{ + Region: "region", + Series: "trusty", + Arch: "amd64", + VirtType: "", + RootStorageType: "", + Source: "test", + }, + 10, + "-999", + }}) + c.Assert(err, jc.ErrorIsNil) + // Create the operations channel with more than enough space // for those tests that don't listen on it. op := make(chan dummy.Operation, 500) dummy.Listen(op) s.op = op - cfg, err := s.State.EnvironConfig() + cfg, err := s.State.ModelConfig() c.Assert(err, jc.ErrorIsNil) s.cfg = cfg @@ -140,12 +161,17 @@ Series: "quantal", Nonce: agent.BootstrapNonce, InstanceId: dummy.BootstrapInstanceId, - Jobs: []state.MachineJob{state.JobManageEnviron}, + Jobs: []state.MachineJob{state.JobManageModel}, }) c.Assert(err, jc.ErrorIsNil) c.Assert(machine.Id(), gc.Equals, "0") - err = machine.SetAgentVersion(version.Current) + current := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + err = machine.SetAgentVersion(current) c.Assert(err, jc.ErrorIsNil) password, err := utils.RandomPassword() @@ -160,45 +186,9 @@ c.Assert(s.provisioner, gc.NotNil) } -// breakDummyProvider changes the environment config in state in a way -// that causes the given environMethod of the dummy provider to return -// an error, which is also returned as a message to be checked. -func breakDummyProvider(c *gc.C, st *state.State, environMethod string) string { - attrs := map[string]interface{}{"broken": environMethod} - err := st.UpdateEnvironConfig(attrs, nil, nil) - c.Assert(err, jc.ErrorIsNil) - return fmt.Sprintf("dummy.%s is broken", environMethod) -} - -// invalidateEnvironment alters the environment configuration -// so the Settings returned from the watcher will not pass -// validation. -func (s *CommonProvisionerSuite) invalidateEnvironment(c *gc.C) { - st, err := state.Open(s.State.EnvironTag(), s.MongoInfo(c), mongo.DefaultDialOpts(), state.Policy(nil)) - c.Assert(err, jc.ErrorIsNil) - defer st.Close() - attrs := map[string]interface{}{"type": "unknown"} - err = st.UpdateEnvironConfig(attrs, nil, nil) - c.Assert(err, jc.ErrorIsNil) -} - -// fixEnvironment undoes the work of invalidateEnvironment. -func (s *CommonProvisionerSuite) fixEnvironment(c *gc.C) error { - st, err := state.Open(s.State.EnvironTag(), s.MongoInfo(c), mongo.DefaultDialOpts(), state.Policy(nil)) - c.Assert(err, jc.ErrorIsNil) - defer st.Close() - attrs := map[string]interface{}{"type": s.cfg.AllAttrs()["type"]} - return st.UpdateEnvironConfig(attrs, nil, nil) -} - -// stopper is stoppable. -type stopper interface { - Stop() error -} - -// stop stops a stopper. -func stop(c *gc.C, s stopper) { - c.Assert(s.Stop(), jc.ErrorIsNil) +// stop stops a Worker. +func stop(c *gc.C, w worker.Worker) { + c.Assert(worker.Stop(w), jc.ErrorIsNil) } func (s *CommonProvisionerSuite) startUnknownInstance(c *gc.C, id string) instance.Instance { @@ -258,7 +248,7 @@ c.Assert(o.Networks, jc.DeepEquals, networks) c.Assert(o.NetworkInfo, jc.DeepEquals, networkInfo) c.Assert(o.Volumes, jc.DeepEquals, volumes) - c.Assert(o.AgentEnvironment["SECURE_STATESERVER_CONNECTION"], gc.Equals, strconv.FormatBool(secureServerConnection)) + c.Assert(o.AgentEnvironment["SECURE_CONTROLLER_CONNECTION"], gc.Equals, strconv.FormatBool(secureServerConnection)) var jobs []multiwatcher.MachineJob for _, job := range m.Jobs() { @@ -268,8 +258,8 @@ if checkPossibleTools != nil { for _, t := range o.PossibleTools { - url := fmt.Sprintf("https://%s/environment/%s/tools/%s", - s.st.Addr(), coretesting.EnvironmentTag.Id(), t.Version) + url := fmt.Sprintf("https://%s/model/%s/tools/%s", + s.st.Addr(), coretesting.ModelTag.Id(), t.Version) c.Check(t.URL, gc.Equals, url) t.URL = "" } @@ -437,7 +427,19 @@ func (s *CommonProvisionerSuite) newEnvironProvisioner(c *gc.C) provisioner.Provisioner { machineTag := names.NewMachineTag("0") agentConfig := s.AgentConfigForTag(c, machineTag) - return provisioner.NewEnvironProvisioner(s.provisioner, agentConfig) + getResource := dt.StubGetResource(dt.StubResources{ + "agent": dt.StubResource{Output: mockAgent{config: agentConfig}}, + "api-caller": dt.StubResource{Output: s.st}, + }) + manifold := provisioner.Manifold(provisioner.ManifoldConfig{ + AgentName: "agent", + APICallerName: "api-caller", + }) + untyped, err := manifold.Start(getResource) + c.Assert(err, jc.ErrorIsNil) + typed, ok := untyped.(provisioner.Provisioner) + c.Assert(ok, jc.IsTrue) + return typed } func (s *CommonProvisionerSuite) addMachine() (*state.Machine, error) { @@ -452,8 +454,8 @@ }) } -func (s *CommonProvisionerSuite) ensureAvailability(c *gc.C, n int) []*state.Machine { - changes, err := s.BackingState.EnsureAvailability(n, s.defaultConstraints, coretesting.FakeDefaultSeries, nil) +func (s *CommonProvisionerSuite) enableHA(c *gc.C, n int) []*state.Machine { + changes, err := s.BackingState.EnableHA(n, s.defaultConstraints, coretesting.FakeDefaultSeries, nil) c.Assert(err, jc.ErrorIsNil) added := make([]*state.Machine, len(changes.Added)) for i, mid := range changes.Added { @@ -466,7 +468,7 @@ func (s *ProvisionerSuite) TestProvisionerStartStop(c *gc.C) { p := s.newEnvironProvisioner(c) - c.Assert(p.Stop(), jc.ErrorIsNil) + stop(c, p) } func (s *ProvisionerSuite) TestSimple(c *gc.C) { @@ -508,7 +510,9 @@ // Set a current version that does not match the // agent-version in the environ config. currentVersion := version.MustParseBinary("1.2.3-quantal-arm64") - s.PatchValue(&version.Current, currentVersion) + s.PatchValue(&arch.HostArch, func() string { return currentVersion.Arch }) + s.PatchValue(&series.HostSeries, func() string { return currentVersion.Series }) + s.PatchValue(&version.Current, currentVersion.Number) // Upload some plausible matches, and some that should be filtered out. compatibleVersion := version.MustParseBinary("1.2.3-quantal-amd64") @@ -576,44 +580,13 @@ s.checkNoOperations(c) } -func (s *ProvisionerSuite) TestProvisionerSetsErrorStatusWhenStartInstanceFailed(c *gc.C) { - brokenMsg := breakDummyProvider(c, s.State, "StartInstance") - p := s.newEnvironProvisioner(c) - defer stop(c, p) - - // Check that an instance is not provisioned when the machine is created... - m, err := s.addMachine() - c.Assert(err, jc.ErrorIsNil) - s.checkNoOperations(c) - - t0 := time.Now() - for time.Since(t0) < coretesting.LongWait { - // And check the machine status is set to error. - statusInfo, err := m.Status() - c.Assert(err, jc.ErrorIsNil) - if statusInfo.Status == state.StatusPending { - time.Sleep(coretesting.ShortWait) - continue - } - c.Assert(statusInfo.Status, gc.Equals, state.StatusError) - c.Assert(statusInfo.Message, gc.Equals, brokenMsg) - break - } - - // Unbreak the environ config. - err = s.fixEnvironment(c) - c.Assert(err, jc.ErrorIsNil) - - // Restart the PA to make sure the machine is skipped again. - stop(c, p) - p = s.newEnvironProvisioner(c) - defer stop(c, p) - s.checkNoOperations(c) -} - func (s *ProvisionerSuite) TestProvisionerFailedStartInstanceWithInjectedCreationError(c *gc.C) { + // Set the retry delay to 0, and retry count to 2 to keep tests short + s.PatchValue(provisioner.RetryStrategyDelay, 0*time.Second) + s.PatchValue(provisioner.RetryStrategyCount, 2) + // create the error injection channel - errorInjectionChannel := make(chan error, 2) + errorInjectionChannel := make(chan error, 3) p := s.newEnvironProvisioner(c) defer stop(c, p) @@ -622,10 +595,10 @@ cleanup := dummy.PatchTransientErrorInjectionChannel(errorInjectionChannel) defer cleanup() - retryableError := instance.NewRetryableCreationError("container failed to start and was destroyed", 3, 0) + retryableError := instance.NewRetryableCreationError("container failed to start and was destroyed") destroyError := errors.New("container failed to start and failed to destroy: manual cleanup of containers needed") - // Send both error messages to make sure that the provisioner gives up retrying - // once a non-retryable error is returned. + // send the error message three times, because the provisioner will retry twice as patched above. + errorInjectionChannel <- retryableError errorInjectionChannel <- retryableError errorInjectionChannel <- destroyError @@ -651,6 +624,10 @@ } func (s *ProvisionerSuite) TestProvisionerSucceedStartInstanceWithInjectedRetryableCreationError(c *gc.C) { + // Set the retry delay to 0, and retry count to 2 to keep tests short + s.PatchValue(provisioner.RetryStrategyDelay, 0*time.Second) + s.PatchValue(provisioner.RetryStrategyCount, 2) + // create the error injection channel errorInjectionChannel := make(chan error, 1) c.Assert(errorInjectionChannel, gc.NotNil) @@ -664,7 +641,7 @@ // send the error message once // - instance creation should succeed - retryableError := instance.NewRetryableCreationError("container failed to start and was destroyed", 3, 0) + retryableError := instance.NewRetryableCreationError("container failed to start and was destroyed") errorInjectionChannel <- retryableError m, err := s.addMachine() @@ -673,6 +650,10 @@ } func (s *ProvisionerSuite) TestProvisionerSucceedStartInstanceWithInjectedWrappedRetryableCreationError(c *gc.C) { + // Set the retry delay to 0, and retry count to 1 to keep tests short + s.PatchValue(provisioner.RetryStrategyDelay, 0*time.Second) + s.PatchValue(provisioner.RetryStrategyCount, 1) + // create the error injection channel errorInjectionChannel := make(chan error, 1) c.Assert(errorInjectionChannel, gc.NotNil) @@ -686,7 +667,7 @@ // send the error message once // - instance creation should succeed - retryableError := errors.Wrap(errors.New(""), instance.NewRetryableCreationError("container failed to start and was destroyed", 1, 0)) + retryableError := errors.Wrap(errors.New(""), instance.NewRetryableCreationError("container failed to start and was destroyed")) errorInjectionChannel <- retryableError m, err := s.addMachine() @@ -707,7 +688,6 @@ defer cleanup() // send the error message once - // - instance creation should succeed nonRetryableError := errors.New("some nonretryable error") errorInjectionChannel <- nonRetryableError @@ -732,117 +712,6 @@ c.Fatal("Test took too long to complete") } -func (s *ProvisionerSuite) TestProvisionerNoRetriesForZeroRetryCount(c *gc.C) { - // create the error injection channel - errorInjectionChannel := make(chan error, 1) - - p := s.newEnvironProvisioner(c) - defer stop(c, p) - - // patch the dummy provider error injection channel - cleanup := dummy.PatchTransientErrorInjectionChannel(errorInjectionChannel) - defer cleanup() - - retryableError := instance.NewRetryableCreationError("not really a retryable error", 0, 0) - errorInjectionChannel <- retryableError - - m, err := s.addMachine() - c.Assert(err, jc.ErrorIsNil) - s.checkNoOperations(c) - - t0 := time.Now() - for time.Since(t0) < coretesting.LongWait { - // And check the machine status is set to error. - statusInfo, err := m.Status() - c.Assert(err, jc.ErrorIsNil) - if statusInfo.Status == state.StatusPending { - time.Sleep(coretesting.ShortWait) - continue - } - c.Assert(statusInfo.Status, gc.Equals, state.StatusError) - // check that the status matches the error message - c.Assert(statusInfo.Message, gc.Equals, "not really a retryable error") - return - } - c.Fatal("Test took too long to complete") -} - -func (s *ProvisionerSuite) TestProvisionerRetryableErrorTriggersMultipleAttempts(c *gc.C) { - // Create the error injection channel. Inject 7 errors to - // verify that we give up after the max number of retries (5). - errorInjectionChannel := make(chan error, 7) - - p := s.newEnvironProvisioner(c) - defer stop(c, p) - - // patch the dummy provider error injection channel - cleanup := dummy.PatchTransientErrorInjectionChannel(errorInjectionChannel) - defer cleanup() - - for i := 0; i < 7; i++ { - msg := fmt.Sprintf("failure: %d", i) - retryableError := instance.NewRetryableCreationError(msg, 10, 0) - errorInjectionChannel <- retryableError - } - - m, err := s.addMachine() - c.Assert(err, jc.ErrorIsNil) - s.checkNoOperations(c) - - t0 := time.Now() - for time.Since(t0) < coretesting.LongWait { - // And check the machine status is set to error. - statusInfo, err := m.Status() - c.Assert(err, jc.ErrorIsNil) - if statusInfo.Status == state.StatusPending { - time.Sleep(coretesting.ShortWait) - continue - } - c.Assert(statusInfo.Status, gc.Equals, state.StatusError) - // check that the status matches the error message - c.Assert(statusInfo.Message, gc.Equals, "failure: 5") - return - } - c.Fatal("Test took too long to complete") -} - -func (s *ProvisionerSuite) TestProvisionerRetryableErrorMultipleAttemptsHonorsMaxDelay(c *gc.C) { - // Create the error injection channel. - errorInjectionChannel := make(chan error, 1) - - p := s.newEnvironProvisioner(c) - defer stop(c, p) - - // patch the dummy provider error injection channel - cleanup := dummy.PatchTransientErrorInjectionChannel(errorInjectionChannel) - defer cleanup() - - retryableError := instance.NewRetryableCreationError("retry with long delay error", 1, 600) - errorInjectionChannel <- retryableError - - m, err := s.addMachine() - c.Assert(err, jc.ErrorIsNil) - - s.PatchValue(provisioner.MaxInstanceRetryDelay, 10) - - t0 := time.Now() - for time.Since(t0) < (30 * time.Second) { - // Check if the machine has been provisioned. - _, err := m.InstanceId() - if errors.IsNotProvisioned(err) { - continue - } - c.Assert(err, jc.ErrorIsNil) - - // Check that we delayed at least 10 seconds. - elapsed := time.Since(t0) - c.Assert(elapsed, jc.GreaterThan, (10 * time.Second)) - s.checkStartInstanceNoSecureConnection(c, m) - return - } - c.Fatal("Test took too long to complete") -} - func (s *ProvisionerSuite) TestProvisionerStopRetryingIfDying(c *gc.C) { // Create the error injection channel and inject // a retryable error @@ -855,8 +724,7 @@ cleanup := dummy.PatchTransientErrorInjectionChannel(errorInjectionChannel) defer cleanup() - // Inject a retry error with a delay longer than coretesting.LongWait - retryableError := instance.NewRetryableCreationError("retryable error", 10, 30) + retryableError := instance.NewRetryableCreationError("container failed to start and was destroyed") errorInjectionChannel <- retryableError m, err := s.addMachine() @@ -1081,12 +949,12 @@ func (s *ProvisionerSuite) TestProvisioningMachinesWithSpacesSuccess(c *gc.C) { p := s.newEnvironProvisioner(c) - defer p.Stop() + defer stop(c, p) // Add the spaces used in constraints. - _, err := s.State.AddSpace("space1", nil, false) + _, err := s.State.AddSpace("space1", "", nil, false) c.Assert(err, jc.ErrorIsNil) - _, err = s.State.AddSpace("space2", nil, false) + _, err = s.State.AddSpace("space2", "", nil, false) c.Assert(err, jc.ErrorIsNil) // Add 1 subnet into space1, and 2 into space2. @@ -1183,7 +1051,7 @@ } func (s *ProvisionerSuite) TestProvisioningMachinesFailsWithEmptySpaces(c *gc.C) { - _, err := s.State.AddSpace("empty", nil, false) + _, err := s.State.AddSpace("empty", "", nil, false) c.Assert(err, jc.ErrorIsNil) cons := constraints.MustParse( s.defaultConstraints.String(), "spaces=empty", @@ -1212,7 +1080,7 @@ c.Assert(err, jc.ErrorIsNil) p := s.newEnvironProvisioner(c) - defer p.Stop() + defer stop(c, p) // Add and provision a machine with volumes specified. requestedVolumes := []state.MachineVolumeParams{{ @@ -1249,62 +1117,6 @@ s.waitRemoved(c, m) } -func (s *ProvisionerSuite) TestProvisioningDoesNotOccurWithAnInvalidEnvironment(c *gc.C) { - s.invalidateEnvironment(c) - - p := s.newEnvironProvisioner(c) - defer stop(c, p) - - // try to create a machine - _, err := s.addMachine() - c.Assert(err, jc.ErrorIsNil) - - // the PA should not create it - s.checkNoOperations(c) -} - -func (s *ProvisionerSuite) TestProvisioningOccursWithFixedEnvironment(c *gc.C) { - s.invalidateEnvironment(c) - - p := s.newEnvironProvisioner(c) - defer stop(c, p) - - // try to create a machine - m, err := s.addMachine() - c.Assert(err, jc.ErrorIsNil) - - // the PA should not create it - s.checkNoOperations(c) - - err = s.fixEnvironment(c) - c.Assert(err, jc.ErrorIsNil) - - s.checkStartInstanceNoSecureConnection(c, m) -} - -func (s *ProvisionerSuite) TestProvisioningDoesOccurAfterInvalidEnvironmentPublished(c *gc.C) { - s.PatchValue(provisioner.GetToolsFinder, func(*apiprovisioner.State) provisioner.ToolsFinder { - return mockToolsFinder{} - }) - p := s.newEnvironProvisioner(c) - defer stop(c, p) - - // place a new machine into the state - m, err := s.addMachine() - c.Assert(err, jc.ErrorIsNil) - - s.checkStartInstanceNoSecureConnection(c, m) - - s.invalidateEnvironment(c) - - // create a second machine - m, err = s.addMachine() - c.Assert(err, jc.ErrorIsNil) - - // the PA should create it using the old environment - s.checkStartInstanceNoSecureConnection(c, m) -} - func (s *ProvisionerSuite) TestProvisioningDoesNotProvisionTheSameMachineAfterRestart(c *gc.C) { p := s.newEnvironProvisioner(c) defer stop(c, p) @@ -1362,55 +1174,6 @@ c.Assert(m0.Life(), gc.Equals, state.Dying) } -func (s *ProvisionerSuite) TestProvisioningRecoversAfterInvalidEnvironmentPublished(c *gc.C) { - s.PatchValue(provisioner.GetToolsFinder, func(*apiprovisioner.State) provisioner.ToolsFinder { - return mockToolsFinder{} - }) - p := s.newEnvironProvisioner(c) - defer stop(c, p) - - // place a new machine into the state - m, err := s.addMachine() - c.Assert(err, jc.ErrorIsNil) - s.checkStartInstanceNoSecureConnection(c, m) - - s.invalidateEnvironment(c) - s.BackingState.StartSync() - - // create a second machine - m, err = s.addMachine() - c.Assert(err, jc.ErrorIsNil) - - // the PA should create it using the old environment - s.checkStartInstanceNoSecureConnection(c, m) - - err = s.fixEnvironment(c) - c.Assert(err, jc.ErrorIsNil) - - // insert our observer - cfgObserver := make(chan *config.Config, 1) - provisioner.SetObserver(p, cfgObserver) - - err = s.State.UpdateEnvironConfig(map[string]interface{}{"secret": "beef"}, nil, nil) - c.Assert(err, jc.ErrorIsNil) - - s.BackingState.StartSync() - - // wait for the PA to load the new configuration - select { - case <-cfgObserver: - case <-time.After(coretesting.LongWait): - c.Fatalf("PA did not action config change") - } - - // create a third machine - m, err = s.addMachine() - c.Assert(err, jc.ErrorIsNil) - - // the PA should create it using the new environment - s.checkStartInstanceCustom(c, m, "beef", s.defaultConstraints, nil, nil, nil, nil, false, nil, true) -} - type mockMachineGetter struct{} func (*mockMachineGetter) Machine(names.MachineTag) (*apiprovisioner.Machine, error) { @@ -1442,7 +1205,7 @@ &mockToolsFinder{}, ) defer func() { - err := task.Stop() + err := worker.Stop(task) c.Assert(err, gc.ErrorMatches, ".*failed to get machine.*") }() s.checkNoOperations(c) @@ -1462,14 +1225,16 @@ toolsFinder provisioner.ToolsFinder, ) provisioner.ProvisionerTask { - machineWatcher, err := s.provisioner.WatchEnvironMachines() + machineWatcher, err := s.provisioner.WatchModelMachines() c.Assert(err, jc.ErrorIsNil) retryWatcher, err := s.provisioner.WatchMachineErrorRetry() c.Assert(err, jc.ErrorIsNil) auth, err := authentication.NewAPIAuthenticator(s.provisioner) c.Assert(err, jc.ErrorIsNil) - return provisioner.NewProvisionerTask( + retryStrategy := provisioner.NewRetryStrategy(0*time.Second, 0) + + w, err := provisioner.NewProvisionerTask( names.NewMachineTag("0"), harvestingMethod, machineGetter, @@ -1480,7 +1245,10 @@ auth, imagemetadata.ReleasedStream, true, + retryStrategy, ) + c.Assert(err, jc.ErrorIsNil) + return w } func (s *ProvisionerSuite) TestHarvestNoneReapsNothing(c *gc.C) { @@ -1630,7 +1398,7 @@ task := s.newProvisionerTask(c, config.HarvestAll, broker, s.provisioner, mockToolsFinder{}) defer stop(c, task) - added := s.ensureAvailability(c, 3) + added := s.enableHA(c, 3) c.Assert(added, gc.HasLen, 2) byId := make(map[string]*state.Machine) for _, m := range added { @@ -1666,13 +1434,22 @@ type mockToolsFinder struct { } -func (f mockToolsFinder) FindTools(number version.Number, series string, a *string) (coretools.List, error) { +func (f mockToolsFinder) FindTools(number version.Number, series string, a string) (coretools.List, error) { v, err := version.ParseBinary(fmt.Sprintf("%s-%s-%s", number, series, arch.HostArch())) if err != nil { return nil, err } - if a != nil { - v.Arch = *a + if a != "" { + v.Arch = a } return coretools.List{&coretools.Tools{Version: v}}, nil } + +type mockAgent struct { + agent.Agent + config agent.Config +} + +func (mock mockAgent) CurrentConfig() agent.Config { + return mock.config +} === modified file 'src/github.com/juju/juju/worker/proxyupdater/manifold.go' --- src/github.com/juju/juju/worker/proxyupdater/manifold.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/proxyupdater/manifold.go 2016-03-22 15:18:22 +0000 @@ -1,30 +1,40 @@ -// Copyright 2015 Canonical Ltd. +// Copyright 2016 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package proxyupdater import ( + "github.com/juju/errors" + "github.com/juju/juju/agent" "github.com/juju/juju/api/base" - "github.com/juju/juju/api/environment" + "github.com/juju/juju/api/proxyupdater" "github.com/juju/juju/worker" "github.com/juju/juju/worker/dependency" "github.com/juju/juju/worker/util" + "github.com/juju/names" ) // ManifoldConfig defines the names of the manifolds on which a Manifold will depend. -type ManifoldConfig util.ApiManifoldConfig +type ManifoldConfig util.PostUpgradeManifoldConfig // Manifold returns a dependency manifold that runs a proxy updater worker, // using the api connection resource named in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { - return util.ApiManifold(util.ApiManifoldConfig(config), newWorker) + return util.PostUpgradeManifold(util.PostUpgradeManifoldConfig(config), newWorker) } // newWorker is not currently tested; it should eventually replace New as the // package's exposed factory func, and then all tests should pass through it. -func newWorker(apiCaller base.APICaller) (worker.Worker, error) { +func newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { + agentConfig := a.CurrentConfig() + switch tag := agentConfig.Tag().(type) { + case names.MachineTag, names.UnitTag: + default: + return nil, errors.Errorf("unknown agent type: %T", tag) + } + // TODO(fwereade): This shouldn't be an "environment" facade, it // should be specific to the proxyupdater, and be watching for // *proxy settings* changes, not just watching the "environment". - return New(environment.NewFacade(apiCaller), false), nil + return NewWorker(proxyupdater.NewFacade(apiCaller)) } === added file 'src/github.com/juju/juju/worker/proxyupdater/manifold_test.go' --- src/github.com/juju/juju/worker/proxyupdater/manifold_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/proxyupdater/manifold_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,92 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package proxyupdater_test + +import ( + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/agent" + apiproxyupdater "github.com/juju/juju/api/proxyupdater" + "github.com/juju/juju/worker" + proxyup "github.com/juju/juju/worker/proxyupdater" + workertesting "github.com/juju/juju/worker/testing" +) + +type ManifoldSuite struct { + testing.IsolationSuite + newCalled bool +} + +var _ = gc.Suite(&ManifoldSuite{}) + +func (s *ManifoldSuite) SetUpTest(c *gc.C) { + s.newCalled = false + s.PatchValue(&proxyup.NewWorker, + func(_ *apiproxyupdater.Facade) (worker.Worker, error) { + s.newCalled = true + return nil, nil + }, + ) +} + +func (s *ManifoldSuite) TestMachineShouldWrite(c *gc.C) { + config := proxyup.ManifoldConfig(workertesting.PostUpgradeManifoldTestConfig()) + _, err := workertesting.RunPostUpgradeManifold( + proxyup.Manifold(config), + &fakeAgent{tag: names.NewMachineTag("42")}, + nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.newCalled, jc.IsTrue) +} + +func (s *ManifoldSuite) TestMachineShouldntWrite(c *gc.C) { + config := proxyup.ManifoldConfig(workertesting.PostUpgradeManifoldTestConfig()) + _, err := workertesting.RunPostUpgradeManifold( + proxyup.Manifold(config), + &fakeAgent{tag: names.NewMachineTag("42")}, + nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.newCalled, jc.IsTrue) +} + +func (s *ManifoldSuite) TestUnit(c *gc.C) { + config := proxyup.ManifoldConfig(workertesting.PostUpgradeManifoldTestConfig()) + _, err := workertesting.RunPostUpgradeManifold( + proxyup.Manifold(config), + &fakeAgent{tag: names.NewUnitTag("foo/0")}, + nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.newCalled, jc.IsTrue) +} + +func (s *ManifoldSuite) TestNonAgent(c *gc.C) { + config := proxyup.ManifoldConfig(workertesting.PostUpgradeManifoldTestConfig()) + _, err := workertesting.RunPostUpgradeManifold( + proxyup.Manifold(config), + &fakeAgent{tag: names.NewUserTag("foo")}, + nil) + c.Assert(err, gc.ErrorMatches, "unknown agent type:.+") + c.Assert(s.newCalled, jc.IsFalse) +} + +type fakeAgent struct { + agent.Agent + tag names.Tag +} + +func (a *fakeAgent) CurrentConfig() agent.Config { + return &fakeConfig{tag: a.tag} +} + +type fakeConfig struct { + agent.Config + tag names.Tag +} + +func (c *fakeConfig) Tag() names.Tag { + return c.tag +} === modified file 'src/github.com/juju/juju/worker/proxyupdater/proxyupdater.go' --- src/github.com/juju/juju/worker/proxyupdater/proxyupdater.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/proxyupdater/proxyupdater.go 2016-03-22 15:18:22 +0000 @@ -8,16 +8,18 @@ "io/ioutil" "path" + "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/utils" "github.com/juju/utils/exec" + "github.com/juju/utils/os" "github.com/juju/utils/packaging/commands" "github.com/juju/utils/packaging/config" proxyutils "github.com/juju/utils/proxy" + "github.com/juju/utils/series" - "github.com/juju/juju/api/environment" - "github.com/juju/juju/api/watcher" - "github.com/juju/juju/version" + apiproxyupdater "github.com/juju/juju/api/proxyupdater" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" ) @@ -45,11 +47,10 @@ // changes are apt proxy configuration and the juju proxies stored in the juju // proxy file. type proxyWorker struct { - api *environment.Facade + api *apiproxyupdater.Facade aptProxy proxyutils.Settings proxy proxyutils.Settings - writeSystemFiles bool // The whole point of the first value is to make sure that the the files // are written out the first time through, even if they are the same as // "last" time, as the initial value for last time is the zeroed struct. @@ -61,37 +62,32 @@ first bool } -var _ worker.NotifyWatchHandler = (*proxyWorker)(nil) - -// New returns a worker.Worker that updates proxy environment variables for the -// process; and, if writeSystemFiles is true, for the whole machine. -var New = func(api *environment.Facade, writeSystemFiles bool) worker.Worker { - logger.Debugf("write system files: %v", writeSystemFiles) +// NewWorker returns a worker.Worker that updates proxy environment variables for the +// process and for the whole machine. +var NewWorker = func(api *apiproxyupdater.Facade) (worker.Worker, error) { envWorker := &proxyWorker{ - api: api, - writeSystemFiles: writeSystemFiles, - first: true, - } - return worker.NewNotifyWorker(envWorker) + api: api, + first: true, + } + w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{ + Handler: envWorker, + }) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil } func (w *proxyWorker) writeEnvironmentFile() error { - // Writing the environment file is handled by executing the script for two - // primary reasons: - // - // 1: In order to have the local provider specify the environment settings - // for the machine agent running on the host, this worker needs to run, - // but it shouldn't be touching any files on the disk. If however there is - // an ubuntu user, it will. This shouldn't be a problem. - // - // 2: On cloud-instance ubuntu images, the ubuntu user is uid 1000, but in + // Writing the environment file is handled by executing the script: + // + // On cloud-instance ubuntu images, the ubuntu user is uid 1000, but in // the situation where the ubuntu user has been created as a part of the // manual provisioning process, the user will exist, and will not have the // same uid/gid as the default cloud image. // - // It is easier to shell out to check both these things, and is also the - // same way that the file is written in the cloud-init process, so - // consistency FTW. + // It is easier to shell out to check, and is also the same way that the file + // is written in the cloud-init process, so consistency FTW. filePath := path.Join(ProxyDirectory, ProxyFile) result, err := exec.RunCommands(exec.RunParams{ Commands: fmt.Sprintf( @@ -133,12 +129,13 @@ } func (w *proxyWorker) writeEnvironment() error { - osystem, err := version.GetOSFromSeries(version.Current.Series) + // TODO(dfc) this should be replaced with a switch on os.HostOS() + osystem, err := series.GetOSFromSeries(series.HostSeries()) if err != nil { return err } switch osystem { - case version.Windows: + case os.Windows: return w.writeEnvironmentToRegistry() default: return w.writeEnvironmentFile() @@ -150,11 +147,9 @@ if proxySettings != w.proxy || w.first { logger.Debugf("new proxy settings %#v", proxySettings) w.proxy = proxySettings - if w.writeSystemFiles { - if err := w.writeEnvironment(); err != nil { - // It isn't really fatal, but we should record it. - logger.Errorf("error writing proxy environment file: %v", err) - } + if err := w.writeEnvironment(); err != nil { + // It isn't really fatal, but we should record it. + logger.Errorf("error writing proxy environment file: %v", err) } } } @@ -162,11 +157,11 @@ // getPackageCommander is a helper function which returns the // package commands implementation for the current system. func getPackageCommander() (commands.PackageCommander, error) { - return commands.NewPackageCommander(version.Current.Series) + return commands.NewPackageCommander(series.HostSeries()) } func (w *proxyWorker) handleAptProxyValues(aptSettings proxyutils.Settings) error { - if w.writeSystemFiles && (aptSettings != w.aptProxy || w.first) { + if aptSettings != w.aptProxy || w.first { logger.Debugf("new apt proxy settings %#v", aptSettings) paccmder, err := getPackageCommander() if err != nil { @@ -186,7 +181,7 @@ } func (w *proxyWorker) onChange() error { - env, err := w.api.EnvironConfig() + env, err := w.api.ModelConfig() if err != nil { return err } @@ -208,7 +203,7 @@ } w.first = false Started() - return w.api.WatchForEnvironConfigChanges() + return w.api.WatchForModelConfigChanges() } // Handle is defined on the worker.NotifyWatchHandler interface. === modified file 'src/github.com/juju/juju/worker/proxyupdater/proxyupdater_test.go' --- src/github.com/juju/juju/worker/proxyupdater/proxyupdater_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/proxyupdater/proxyupdater_test.go 2016-03-22 15:18:22 +0000 @@ -15,15 +15,15 @@ "github.com/juju/utils/packaging/commands" pacconfig "github.com/juju/utils/packaging/config" "github.com/juju/utils/proxy" + "github.com/juju/utils/series" gc "gopkg.in/check.v1" "github.com/juju/juju/api" - "github.com/juju/juju/api/environment" + apiproxyupdater "github.com/juju/juju/api/proxyupdater" "github.com/juju/juju/environs/config" jujutesting "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" "github.com/juju/juju/testing" - "github.com/juju/juju/version" "github.com/juju/juju/worker" "github.com/juju/juju/worker/proxyupdater" ) @@ -31,9 +31,9 @@ type ProxyUpdaterSuite struct { jujutesting.JujuConnSuite - apiRoot api.Connection - environmentAPI *environment.Facade - machine *state.Machine + apiRoot api.Connection + proxyUpdaterAPI *apiproxyupdater.Facade + machine *state.Machine proxyFile string started chan struct{} @@ -53,8 +53,8 @@ s.JujuConnSuite.SetUpTest(c) s.apiRoot, s.machine = s.OpenAPIAsNewMachine(c) // Create the environment API facade. - s.environmentAPI = s.apiRoot.Environment() - c.Assert(s.environmentAPI, gc.NotNil) + s.proxyUpdaterAPI = apiproxyupdater.NewFacade(s.apiRoot) + c.Assert(s.proxyUpdaterAPI, gc.NotNil) proxyDir := c.MkDir() s.PatchValue(&proxyupdater.ProxyDirectory, proxyDir) @@ -113,8 +113,10 @@ } func (s *ProxyUpdaterSuite) TestRunStop(c *gc.C) { - updater := proxyupdater.New(s.environmentAPI, false) - c.Assert(worker.Stop(updater), gc.IsNil) + updater, err := proxyupdater.NewWorker(s.proxyUpdaterAPI) + c.Assert(err, jc.ErrorIsNil) + err = worker.Stop(updater) + c.Assert(err, jc.ErrorIsNil) } func (s *ProxyUpdaterSuite) updateConfig(c *gc.C) (proxy.Settings, proxy.Settings) { @@ -143,7 +145,7 @@ attrs[k] = v } - err := s.State.UpdateEnvironConfig(attrs, nil, nil) + err := s.State.UpdateModelConfig(attrs, nil, nil) c.Assert(err, jc.ErrorIsNil) return proxySettings, aptProxySettings @@ -152,13 +154,14 @@ func (s *ProxyUpdaterSuite) TestInitialState(c *gc.C) { proxySettings, aptProxySettings := s.updateConfig(c) - updater := proxyupdater.New(s.environmentAPI, true) + updater, err := proxyupdater.NewWorker(s.proxyUpdaterAPI) + c.Assert(err, jc.ErrorIsNil) defer worker.Stop(updater) s.waitProxySettings(c, proxySettings) s.waitForFile(c, s.proxyFile, proxySettings.AsScriptEnvironment()+"\n") - paccmder, err := commands.NewPackageCommander(version.Current.Series) + paccmder, err := commands.NewPackageCommander(series.HostSeries()) c.Assert(err, jc.ErrorIsNil) s.waitForFile(c, pacconfig.AptProxyConfigFile, paccmder.ProxyConfigContents(aptProxySettings)+"\n") } @@ -166,14 +169,15 @@ func (s *ProxyUpdaterSuite) TestWriteSystemFiles(c *gc.C) { proxySettings, aptProxySettings := s.updateConfig(c) - updater := proxyupdater.New(s.environmentAPI, true) + updater, err := proxyupdater.NewWorker(s.proxyUpdaterAPI) + c.Assert(err, jc.ErrorIsNil) defer worker.Stop(updater) s.waitForPostSetup(c) s.waitProxySettings(c, proxySettings) s.waitForFile(c, s.proxyFile, proxySettings.AsScriptEnvironment()+"\n") - paccmder, err := commands.NewPackageCommander(version.Current.Series) + paccmder, err := commands.NewPackageCommander(series.HostSeries()) c.Assert(err, jc.ErrorIsNil) s.waitForFile(c, pacconfig.AptProxyConfigFile, paccmder.ProxyConfigContents(aptProxySettings)+"\n") } @@ -190,7 +194,8 @@ proxySettings, _ := s.updateConfig(c) - updater := proxyupdater.New(s.environmentAPI, true) + updater, err := proxyupdater.NewWorker(s.proxyUpdaterAPI) + c.Assert(err, jc.ErrorIsNil) defer worker.Stop(updater) s.waitForPostSetup(c) s.waitProxySettings(c, proxySettings) @@ -204,15 +209,3 @@ assertEnv("ftp_proxy", proxySettings.Ftp) assertEnv("no_proxy", proxySettings.NoProxy) } - -func (s *ProxyUpdaterSuite) TestDontWriteSystemFiles(c *gc.C) { - proxySettings, _ := s.updateConfig(c) - - updater := proxyupdater.New(s.environmentAPI, false) - defer worker.Stop(updater) - s.waitForPostSetup(c) - - s.waitProxySettings(c, proxySettings) - c.Assert(pacconfig.AptProxyConfigFile, jc.DoesNotExist) - c.Assert(s.proxyFile, jc.DoesNotExist) -} === added file 'src/github.com/juju/juju/worker/reboot/manifold.go' --- src/github.com/juju/juju/worker/reboot/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/reboot/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,49 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package reboot + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/agent" + "github.com/juju/juju/api" + "github.com/juju/juju/api/base" + cmdutil "github.com/juju/juju/cmd/jujud/util" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/util" +) + +// ManifoldConfig defines the names of the manifolds on which a Manifold will depend. +type ManifoldConfig util.PostUpgradeManifoldConfig + +// Manifold returns a dependency manifold that runs a reboot worker, +// using the resource names defined in the supplied config. +func Manifold(config ManifoldConfig) dependency.Manifold { + return util.PostUpgradeManifold(util.PostUpgradeManifoldConfig(config), newWorker) +} + +// newWorker trivially wraps NewReboot for use in a util.PostUpgradeManifold. +// +// TODO(mjs) - It's not tested at the moment, because the scaffolding +// necessary is too unwieldy/distracting to introduce at this point. +func newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { + apiConn, ok := apiCaller.(api.Connection) + if !ok { + return nil, errors.New("unable to obtain api.Connection") + } + rebootState, err := apiConn.Reboot() + if err != nil { + return nil, errors.Trace(err) + } + lock, err := cmdutil.HookExecutionLock(cmdutil.DataDir) + if err != nil { + return nil, errors.Trace(err) + } + w, err := NewReboot(rebootState, a.CurrentConfig(), lock) + if err != nil { + return nil, errors.Annotate(err, "cannot start reboot worker") + } + return w, nil +} === modified file 'src/github.com/juju/juju/worker/reboot/reboot.go' --- src/github.com/juju/juju/worker/reboot/reboot.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/reboot/reboot.go 2016-03-22 15:18:22 +0000 @@ -9,8 +9,8 @@ "github.com/juju/juju/agent" "github.com/juju/juju/api/reboot" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" ) @@ -18,8 +18,6 @@ const RebootMessage = "preparing for reboot" -var _ worker.NotifyWatchHandler = (*Reboot)(nil) - // The reboot worker listens for changes to the reboot flag and // exists with worker.ErrRebootMachine if the machine should reboot or // with worker.ErrShutdownMachine if it should shutdown. This will be picked @@ -42,7 +40,13 @@ tag: tag, machineLock: machineLock, } - return worker.NewNotifyWorker(r), nil + w, err := watcher.NewNotifyWorker(watcher.NotifyConfig{ + Handler: r, + }) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil } func (r *Reboot) checkForRebootState() error { === modified file 'src/github.com/juju/juju/worker/reboot/reboot_test.go' --- src/github.com/juju/juju/worker/reboot/reboot_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/reboot/reboot_test.go 2016-03-22 15:18:22 +0000 @@ -44,8 +44,6 @@ var _ = gc.Suite(&rebootSuite{}) -var _ worker.NotifyWatchHandler = (*reboot.Reboot)(nil) - func (s *rebootSuite) SetUpTest(c *gc.C) { var err error template := state.MachineTemplate{ @@ -75,7 +73,7 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(s.ctRebootState, gc.NotNil) - lock, err := fslock.NewLock(c.MkDir(), "fake") + lock, err := fslock.NewLock(c.MkDir(), "fake", fslock.Defaults()) c.Assert(err, jc.ErrorIsNil) s.lock = lock } === added file 'src/github.com/juju/juju/worker/resumer/manifold.go' --- src/github.com/juju/juju/worker/resumer/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/resumer/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,61 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resumer + +import ( + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/agent" + "github.com/juju/juju/api" + "github.com/juju/juju/api/base" + apiresumer "github.com/juju/juju/api/resumer" + "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/util" +) + +// ManifoldConfig defines the names of the manifolds on which a Manifold will depend. +type ManifoldConfig util.PostUpgradeManifoldConfig + +// Manifold returns a dependency manifold that runs a resumer worker, +// using the api connection resource named in the supplied config. +func Manifold(config ManifoldConfig) dependency.Manifold { + return util.PostUpgradeManifold(util.PostUpgradeManifoldConfig(config), newWorker) +} + +func newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { + cfg := a.CurrentConfig() + // Grab the tag and ensure that it's for a machine. + tag, ok := cfg.Tag().(names.MachineTag) + if !ok { + return nil, errors.New("this manifold may only be used inside a machine agent") + } + + // Get API connection. + apiConn, ok := apiCaller.(api.Connection) + if !ok { + return nil, errors.New("unable to obtain api.Connection") + } + + // Get the machine agent's jobs. + entity, err := apiConn.Agent().Entity(tag) + if err != nil { + return nil, err + } + + var isModelManager bool + for _, job := range entity.Jobs() { + if job == multiwatcher.JobManageModel { + isModelManager = true + break + } + } + if !isModelManager { + return nil, dependency.ErrMissing + } + + return NewResumer(apiresumer.NewAPI(apiCaller)), nil +} === added file 'src/github.com/juju/juju/worker/resumer/manifold_test.go' --- src/github.com/juju/juju/worker/resumer/manifold_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/resumer/manifold_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,119 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resumer_test + +import ( + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/agent" + "github.com/juju/juju/api" + apiagent "github.com/juju/juju/api/agent" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + resumer "github.com/juju/juju/worker/resumer" + workertesting "github.com/juju/juju/worker/testing" +) + +type ManifoldSuite struct { + testing.IsolationSuite + newCalled bool +} + +var _ = gc.Suite(&ManifoldSuite{}) + +func (s *ManifoldSuite) SetUpTest(c *gc.C) { + s.newCalled = false + s.PatchValue(&resumer.NewResumer, + func(tr resumer.TransactionResumer) worker.Worker { + s.newCalled = true + return nil + }, + ) +} + +func (s *ManifoldSuite) TestMachine(c *gc.C) { + config := resumer.ManifoldConfig(workertesting.PostUpgradeManifoldTestConfig()) + _, err := workertesting.RunPostUpgradeManifold( + resumer.Manifold(config), + &fakeAgent{tag: names.NewMachineTag("42")}, + &fakeAPIConn{machineJob: multiwatcher.JobManageModel}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.newCalled, jc.IsTrue) +} + +func (s *ManifoldSuite) TestMachineNonManagerErrors(c *gc.C) { + config := resumer.ManifoldConfig(workertesting.PostUpgradeManifoldTestConfig()) + _, err := workertesting.RunPostUpgradeManifold( + resumer.Manifold(config), + &fakeAgent{tag: names.NewMachineTag("42")}, + &fakeAPIConn{machineJob: multiwatcher.JobHostUnits}) + c.Assert(err, gc.Equals, dependency.ErrMissing) + c.Assert(s.newCalled, jc.IsFalse) +} + +func (s *ManifoldSuite) TestUnitErrors(c *gc.C) { + config := resumer.ManifoldConfig(workertesting.PostUpgradeManifoldTestConfig()) + _, err := workertesting.RunPostUpgradeManifold( + resumer.Manifold(config), + &fakeAgent{tag: names.NewUnitTag("foo/0")}, + &fakeAPIConn{}) + c.Assert(err, gc.ErrorMatches, "this manifold may only be used inside a machine agent") + c.Assert(s.newCalled, jc.IsFalse) +} + +func (s *ManifoldSuite) TestNonAgentErrors(c *gc.C) { + config := resumer.ManifoldConfig(workertesting.PostUpgradeManifoldTestConfig()) + _, err := workertesting.RunPostUpgradeManifold( + resumer.Manifold(config), + &fakeAgent{tag: names.NewUserTag("foo")}, + &fakeAPIConn{}) + c.Assert(err, gc.ErrorMatches, "this manifold may only be used inside a machine agent") + c.Assert(s.newCalled, jc.IsFalse) +} + +type fakeAgent struct { + agent.Agent + tag names.Tag +} + +func (a *fakeAgent) CurrentConfig() agent.Config { + return &fakeConfig{tag: a.tag} +} + +type fakeConfig struct { + agent.Config + tag names.Tag +} + +func (c *fakeConfig) Tag() names.Tag { + return c.tag +} + +type fakeAPIConn struct { + api.Connection + machineJob multiwatcher.MachineJob +} + +func (f *fakeAPIConn) APICall(objType string, version int, id, request string, args interface{}, response interface{}) error { + if res, ok := response.(*params.AgentGetEntitiesResults); ok { + res.Entities = []params.AgentGetEntitiesResult{ + {Jobs: []multiwatcher.MachineJob{f.machineJob}}, + } + } + + return nil +} + +func (*fakeAPIConn) BestFacadeVersion(facade string) int { + return 42 +} + +func (f *fakeAPIConn) Agent() *apiagent.State { + return apiagent.NewState(f) +} === modified file 'src/github.com/juju/juju/worker/resumer/resumer.go' --- src/github.com/juju/juju/worker/resumer/resumer.go 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/worker/resumer/resumer.go 2016-03-22 15:18:22 +0000 @@ -7,6 +7,7 @@ "fmt" "time" + "github.com/juju/juju/worker" "github.com/juju/loggo" "launchpad.net/tomb" ) @@ -33,7 +34,7 @@ } // NewResumer periodically resumes pending transactions. -func NewResumer(tr TransactionResumer) *Resumer { +var NewResumer = func(tr TransactionResumer) worker.Worker { rr := &Resumer{tr: tr} go func() { defer rr.tomb.Done() @@ -50,11 +51,6 @@ rr.tomb.Kill(nil) } -func (rr *Resumer) Stop() error { - rr.tomb.Kill(nil) - return rr.tomb.Wait() -} - func (rr *Resumer) Wait() error { return rr.tomb.Wait() } === modified file 'src/github.com/juju/juju/worker/resumer/resumer_test.go' --- src/github.com/juju/juju/worker/resumer/resumer_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/resumer/resumer_test.go 2016-03-22 15:18:22 +0000 @@ -8,6 +8,8 @@ "sync" "time" + "github.com/juju/juju/worker" + "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" @@ -37,8 +39,8 @@ } func (s *ResumerSuite) TestRunStopWithMockState(c *gc.C) { - rr := resumer.NewResumer(s.mockState) - c.Assert(rr.Stop(), gc.IsNil) + w := resumer.NewResumer(s.mockState) + c.Assert(worker.Stop(w), gc.IsNil) } func (s *ResumerSuite) TestResumerCalls(c *gc.C) { @@ -48,8 +50,10 @@ resumer.SetInterval(testInterval) defer resumer.RestoreInterval() - rr := resumer.NewResumer(s.mockState) - defer func() { c.Assert(rr.Stop(), gc.IsNil) }() + w := resumer.NewResumer(s.mockState) + defer func() { + c.Assert(worker.Stop(w), gc.IsNil) + }() time.Sleep(10 * testInterval) @@ -67,14 +71,19 @@ resumer.SetInterval(testInterval) defer resumer.RestoreInterval() - rr := resumer.NewResumer(s.mockState) - defer func() { c.Assert(rr.Stop(), gc.IsNil) }() + w := resumer.NewResumer(s.mockState) + defer func() { + c.Assert(worker.Stop(w), gc.IsNil) + }() // For 4 intervals between 2 and 3 calls should be made. time.Sleep(4 * testInterval) s.mockState.CheckNumCallsBetween(c, 2, 3) } +// TODO(waigani) This could be a simpler and more robust if the resumer took a +// Clock. + // transactionResumerMock is used to check the // calls of ResumeTransactions(). type transactionResumerMock struct { === removed directory 'src/github.com/juju/juju/worker/rsyslog' === removed file 'src/github.com/juju/juju/worker/rsyslog/export_test.go' --- src/github.com/juju/juju/worker/rsyslog/export_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/rsyslog/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package rsyslog - -var ( - RestartRsyslog = &restartRsyslog - LogDir = &logDir - RsyslogConfDir = &rsyslogConfDir - LookupUser = &lookupUser - DialSyslog = &dialSyslog - SyslogTargets = &syslogTargets - NewRsyslogConfigHandler = newRsyslogConfigHandler -) === removed file 'src/github.com/juju/juju/worker/rsyslog/manifold.go' --- src/github.com/juju/juju/worker/rsyslog/manifold.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/rsyslog/manifold.go 1970-01-01 00:00:00 +0000 @@ -1,53 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package rsyslog - -import ( - "github.com/juju/juju/agent" - "github.com/juju/juju/api/base" - "github.com/juju/juju/api/rsyslog" - "github.com/juju/juju/feature" - "github.com/juju/juju/worker" - "github.com/juju/juju/worker/dependency" - "github.com/juju/juju/worker/util" - "github.com/juju/utils/featureflag" -) - -// ManifoldConfig defines the names of the manifolds on which a -// Manifold will depend. -type ManifoldConfig util.AgentApiManifoldConfig - -// Manifold returns a dependency manifold that runs an rsyslog -// worker, using the resource names defined in the supplied config. -func Manifold(config ManifoldConfig) dependency.Manifold { - return util.AgentApiManifold(util.AgentApiManifoldConfig(config), newWorker) -} - -// newWorker exists to wrap NewRsyslogConfigWorker in a format convenient for an -// AgentApiManifold. -// TODO(fwereade) 2015-05-11 Eventually, the method should be the sole accessible -// package factory function -- as part of the manifold -- and all tests should -// thus be routed through it. -var newWorker = func(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { - if featureflag.Enabled(feature.DisableRsyslog) { - logger.Warningf("rsyslog manifold disabled by feature flag") - return nil, dependency.ErrMissing - } - - agentConfig := a.CurrentConfig() - tag := agentConfig.Tag() - namespace := agentConfig.Value(agent.Namespace) - addrs, err := agentConfig.APIAddresses() - if err != nil { - return nil, err - } - return NewRsyslogConfigWorker( - rsyslog.NewState(apiCaller), - RsyslogModeForwarding, - tag, - namespace, - addrs, - agentConfig.DataDir(), - ) -} === removed file 'src/github.com/juju/juju/worker/rsyslog/rsyslog_common_test.go' --- src/github.com/juju/juju/worker/rsyslog/rsyslog_common_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/rsyslog/rsyslog_common_test.go 1970-01-01 00:00:00 +0000 @@ -1,144 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package rsyslog_test - -import ( - "crypto/tls" - "io/ioutil" - "os" - "path/filepath" - "sync" - stdtesting "testing" - "time" - - "github.com/juju/syslog" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/api" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/network" - "github.com/juju/juju/state" - coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/worker/rsyslog" -) - -func TestPackage(t *stdtesting.T) { - coretesting.MgoTestPackage(t) -} - -type RsyslogSuite struct { - jujutesting.JujuConnSuite - - st api.Connection - machine *state.Machine - mu sync.Mutex // protects dialTags - dialTags []string -} - -var _ = gc.Suite(&RsyslogSuite{}) - -func waitForFile(c *gc.C, file string) { - timeout := time.After(coretesting.LongWait) - for { - select { - case <-timeout: - c.Fatalf("timed out waiting for %s to be written", file) - case <-time.After(coretesting.ShortWait): - if _, err := os.Stat(file); err == nil { - return - } - } - } -} - -func (s *RsyslogSuite) SetUpSuite(c *gc.C) { - s.JujuConnSuite.SetUpSuite(c) - // TODO(waigani) 2014-03-19 bug 1294462 - // Add patch for suite functions - restore := testing.PatchValue(rsyslog.LookupUser, func(username string) (uid, gid int, err error) { - // worker will not attempt to chown files if uid/gid is 0 - return 0, 0, nil - }) - s.AddSuiteCleanup(func(*gc.C) { restore() }) -} - -func (s *RsyslogSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - s.PatchValue(rsyslog.RestartRsyslog, func() error { return nil }) - s.PatchValue(rsyslog.DialSyslog, func(network, raddr string, priority syslog.Priority, tag string, tlsCfg *tls.Config) (*syslog.Writer, error) { - s.mu.Lock() - s.dialTags = append(s.dialTags, tag) - s.mu.Unlock() - return &syslog.Writer{}, nil - }) - s.PatchValue(rsyslog.LogDir, c.MkDir()) - s.PatchValue(rsyslog.RsyslogConfDir, c.MkDir()) - - s.mu.Lock() - s.dialTags = nil - s.mu.Unlock() - s.st, s.machine = s.OpenAPIAsNewMachine(c, state.JobManageEnviron) - err := s.machine.SetProviderAddresses(network.NewAddress("0.1.2.3")) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *RsyslogSuite) TestModeForwarding(c *gc.C) { - err := s.APIState.Client().EnvironmentSet(map[string]interface{}{ - "rsyslog-ca-cert": coretesting.CACert, - "rsyslog-ca-key": coretesting.CAKey, - }) - c.Assert(err, jc.ErrorIsNil) - st, m := s.OpenAPIAsNewMachine(c, state.JobHostUnits) - addrs := []string{"0.1.2.3", "0.2.4.6"} - worker, err := rsyslog.NewRsyslogConfigWorker(st.Rsyslog(), rsyslog.RsyslogModeForwarding, m.Tag(), "foo", addrs, s.ConfDir()) - c.Assert(err, jc.ErrorIsNil) - defer func() { c.Assert(worker.Wait(), gc.IsNil) }() - defer worker.Kill() - - // We should get a ca-cert.pem with the contents introduced into state config. - dirname := filepath.Join(s.ConfDir()+"-foo", "rsyslog") - waitForFile(c, filepath.Join(dirname, "ca-cert.pem")) - caCertPEM, err := ioutil.ReadFile(filepath.Join(dirname, "ca-cert.pem")) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(caCertPEM), gc.DeepEquals, coretesting.CACert) - - c.Assert(*rsyslog.SyslogTargets, gc.HasLen, 2) - s.mu.Lock() - tags := s.dialTags - s.mu.Unlock() - for _, dialTag := range tags { - c.Check(dialTag, gc.Equals, "juju-foo-"+m.Tag().String()) - } -} - -func (s *RsyslogSuite) TestNoNamespace(c *gc.C) { - err := s.APIState.Client().EnvironmentSet(map[string]interface{}{ - "rsyslog-ca-cert": coretesting.CACert, - "rsyslog-ca-key": coretesting.CAKey, - }) - c.Assert(err, jc.ErrorIsNil) - st, m := s.OpenAPIAsNewMachine(c, state.JobHostUnits) - addrs := []string{"0.1.2.3", "0.2.4.6"} - worker, err := rsyslog.NewRsyslogConfigWorker(st.Rsyslog(), rsyslog.RsyslogModeForwarding, m.Tag(), "", addrs, s.ConfDir()) - c.Assert(err, jc.ErrorIsNil) - defer func() { c.Assert(worker.Wait(), gc.IsNil) }() - defer worker.Kill() - - // We should get a ca-cert.pem with the contents introduced into state config. - dirname := filepath.Join(s.ConfDir(), "rsyslog") - waitForFile(c, filepath.Join(dirname, "ca-cert.pem")) - caCertPEM, err := ioutil.ReadFile(filepath.Join(dirname, "ca-cert.pem")) - c.Assert(err, jc.ErrorIsNil) - c.Assert(string(caCertPEM), gc.DeepEquals, coretesting.CACert) - - c.Assert(*rsyslog.SyslogTargets, gc.HasLen, 2) - s.mu.Lock() - tags := s.dialTags - s.mu.Unlock() - for _, dialTag := range tags { - c.Check(dialTag, gc.Equals, "juju-"+m.Tag().String()) - } -} === removed file 'src/github.com/juju/juju/worker/rsyslog/rsyslog_test.go' --- src/github.com/juju/juju/worker/rsyslog/rsyslog_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/rsyslog/rsyslog_test.go 1970-01-01 00:00:00 +0000 @@ -1,262 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. -// +build !windows - -package rsyslog_test - -import ( - "crypto/x509" - "encoding/pem" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" - - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/api" - "github.com/juju/juju/cert" - "github.com/juju/juju/network" - "github.com/juju/juju/state" - coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/utils/syslog" - "github.com/juju/juju/worker/rsyslog" -) - -func waitForRestart(c *gc.C, restarted chan struct{}) { - timeout := time.After(coretesting.LongWait) - for { - select { - case <-timeout: - c.Fatalf("timed out waiting for rsyslog to be restarted") - case <-restarted: - return - } - } -} - -func assertPathExists(c *gc.C, path string) { - _, err := os.Stat(path) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *RsyslogSuite) TestStartStop(c *gc.C) { - st, m := s.OpenAPIAsNewMachine(c, state.JobHostUnits) - worker, err := rsyslog.NewRsyslogConfigWorker(st.Rsyslog(), rsyslog.RsyslogModeForwarding, m.Tag(), "", []string{"0.1.2.3"}, s.ConfDir()) - c.Assert(err, jc.ErrorIsNil) - worker.Kill() - c.Assert(worker.Wait(), gc.IsNil) -} - -func (s *RsyslogSuite) TestTearDown(c *gc.C) { - st, m := s.st, s.machine - worker, err := rsyslog.NewRsyslogConfigWorker(st.Rsyslog(), rsyslog.RsyslogModeAccumulate, m.Tag(), "", []string{"0.1.2.3"}, s.ConfDir()) - c.Assert(err, jc.ErrorIsNil) - confFile := filepath.Join(*rsyslog.RsyslogConfDir, "25-juju.conf") - // On worker teardown, the rsyslog config file should be removed. - defer func() { - _, err := os.Stat(confFile) - c.Assert(err, jc.Satisfies, os.IsNotExist) - }() - defer func() { c.Assert(worker.Wait(), gc.IsNil) }() - defer worker.Kill() - waitForFile(c, confFile) -} - -func (s *RsyslogSuite) TestRsyslogCert(c *gc.C) { - st, m := s.st, s.machine - err := s.machine.SetProviderAddresses(network.NewAddress("example.com")) - c.Assert(err, jc.ErrorIsNil) - - worker, err := rsyslog.NewRsyslogConfigWorker(st.Rsyslog(), rsyslog.RsyslogModeAccumulate, m.Tag(), "", []string{"0.1.2.3"}, s.ConfDir()) - c.Assert(err, jc.ErrorIsNil) - defer func() { c.Assert(worker.Wait(), gc.IsNil) }() - defer worker.Kill() - filename := filepath.Join(s.ConfDir(), "rsyslog", "rsyslog-cert.pem") - waitForFile(c, filename) - - rsyslogCertPEM, err := ioutil.ReadFile(filename) - c.Assert(err, jc.ErrorIsNil) - - cert, err := cert.ParseCert(string(rsyslogCertPEM)) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(cert.DNSNames, gc.DeepEquals, []string{"example.com", "*"}) - - subject := cert.Subject - c.Assert(subject.CommonName, gc.Equals, "*") - c.Assert(subject.Organization, gc.DeepEquals, []string{"juju"}) - - issuer := cert.Issuer - c.Assert(issuer.CommonName, gc.Equals, "juju-generated CA for environment \"rsyslog\"") - c.Assert(issuer.Organization, gc.DeepEquals, []string{"juju"}) -} - -func (s *RsyslogSuite) TestModeAccumulate(c *gc.C) { - st, m := s.st, s.machine - worker, err := rsyslog.NewRsyslogConfigWorker(st.Rsyslog(), rsyslog.RsyslogModeAccumulate, m.Tag(), "", nil, s.ConfDir()) - c.Assert(err, jc.ErrorIsNil) - defer func() { c.Assert(worker.Wait(), gc.IsNil) }() - defer worker.Kill() - dirname := filepath.Join(s.ConfDir(), "rsyslog") - waitForFile(c, filepath.Join(dirname, "ca-cert.pem")) - - // We should have ca-cert.pem, rsyslog-cert.pem, and rsyslog-key.pem. - caCertPEM, err := ioutil.ReadFile(filepath.Join(dirname, "ca-cert.pem")) - c.Assert(err, jc.ErrorIsNil) - rsyslogCertPEM, err := ioutil.ReadFile(filepath.Join(dirname, "rsyslog-cert.pem")) - c.Assert(err, jc.ErrorIsNil) - rsyslogKeyPEM, err := ioutil.ReadFile(filepath.Join(dirname, "rsyslog-key.pem")) - c.Assert(err, jc.ErrorIsNil) - - _, _, err = cert.ParseCertAndKey(string(rsyslogCertPEM), string(rsyslogKeyPEM)) - c.Assert(err, jc.ErrorIsNil) - err = cert.Verify(string(rsyslogCertPEM), string(caCertPEM), time.Now().UTC()) - c.Assert(err, jc.ErrorIsNil) - - // Verify rsyslog configuration. - waitForFile(c, filepath.Join(*rsyslog.RsyslogConfDir, "25-juju.conf")) - rsyslogConf, err := ioutil.ReadFile(filepath.Join(*rsyslog.RsyslogConfDir, "25-juju.conf")) - c.Assert(err, jc.ErrorIsNil) - - syslogPort := s.Environ.Config().SyslogPort() - - syslogConfig := &syslog.SyslogConfig{ - LogFileName: m.Tag().String(), - LogDir: *rsyslog.LogDir, - Port: syslogPort, - Namespace: "", - StateServerAddresses: []string{}, - } - - syslog.NewAccumulateConfig(syslogConfig) - syslogConfig.ConfigDir = *rsyslog.RsyslogConfDir - syslogConfig.JujuConfigDir = filepath.Join(s.ConfDir(), "rsyslog") - rendered, err := syslogConfig.Render() - c.Assert(err, jc.ErrorIsNil) - - c.Assert(string(rsyslogConf), gc.DeepEquals, string(rendered)) - - // Verify logrotate files - assertPathExists(c, filepath.Join(dirname, "logrotate.conf")) - assertPathExists(c, filepath.Join(dirname, "logrotate.run")) - -} - -func (s *RsyslogSuite) TestAccumulateHA(c *gc.C) { - m := s.machine - - syslogConfig := &syslog.SyslogConfig{ - LogFileName: m.Tag().String(), - LogDir: *rsyslog.LogDir, - Port: 6541, - Namespace: "", - StateServerAddresses: []string{"192.168.1", "127.0.0.1"}, - } - - syslog.NewAccumulateConfig(syslogConfig) - syslogConfig.JujuConfigDir = filepath.Join(s.ConfDir(), "rsyslog") - rendered, err := syslogConfig.Render() - c.Assert(err, jc.ErrorIsNil) - - stateServer1Config := ":syslogtag, startswith, \"juju-\" @@192.168.1:6541;LongTagForwardFormat" - stateServer2Config := ":syslogtag, startswith, \"juju-\" @@127.0.0.1:6541;LongTagForwardFormat" - - c.Assert(strings.Contains(string(rendered), stateServer1Config), jc.IsTrue) - c.Assert(strings.Contains(string(rendered), stateServer2Config), jc.IsTrue) -} - -// TestModeAccumulateCertsExist is a regression test for -// https://bugs.launchpad.net/juju-core/+bug/1464335, -// where the CA certs existing (in local provider) at -// bootstrap caused the worker to not publish to state. -func (s *RsyslogSuite) TestModeAccumulateCertsExistOnDisk(c *gc.C) { - dirname := filepath.Join(s.ConfDir(), "rsyslog") - err := os.MkdirAll(dirname, 0755) - c.Assert(err, jc.ErrorIsNil) - err = ioutil.WriteFile(filepath.Join(dirname, "ca-cert.pem"), nil, 0644) - c.Assert(err, jc.ErrorIsNil) - - st, m := s.st, s.machine - worker, err := rsyslog.NewRsyslogConfigWorker(st.Rsyslog(), rsyslog.RsyslogModeAccumulate, m.Tag(), "", nil, s.ConfDir()) - c.Assert(err, jc.ErrorIsNil) - // The worker should create certs and publish to state during setup, - // so we can kill and wait and be confident that the task is done. - worker.Kill() - c.Assert(worker.Wait(), jc.ErrorIsNil) - - // The CA cert and key should have been published to state. - cfg, err := s.State.EnvironConfig() - c.Assert(err, jc.ErrorIsNil) - c.Assert(cfg.AllAttrs()["rsyslog-ca-cert"], gc.NotNil) - c.Assert(cfg.AllAttrs()["rsyslog-ca-key"], gc.NotNil) - - // ca-cert.pem isn't updated on disk until the worker reacts to the - // state change. Let's just ensure that rsyslog-ca-cert is a valid - // certificate, and no the zero-length string we wrote to ca-cert.pem. - caCertPEM := cfg.AllAttrs()["rsyslog-ca-cert"].(string) - c.Assert(err, jc.ErrorIsNil) - block, _ := pem.Decode([]byte(caCertPEM)) - c.Assert(block, gc.NotNil) - _, err = x509.ParseCertificate(block.Bytes) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *RsyslogSuite) TestNamespace(c *gc.C) { - st := s.st - // set the rsyslog cert - err := s.APIState.Client().EnvironmentSet(map[string]interface{}{"rsyslog-ca-cert": coretesting.CACert}) - c.Assert(err, jc.ErrorIsNil) - - // namespace only takes effect in filenames - // for machine-0; all others assume isolation. - s.testNamespace(c, st, names.NewMachineTag("0"), "", "25-juju.conf", *rsyslog.LogDir) - s.testNamespace(c, st, names.NewMachineTag("0"), "mynamespace", "25-juju-mynamespace.conf", *rsyslog.LogDir+"-mynamespace") - s.testNamespace(c, st, names.NewMachineTag("1"), "", "25-juju.conf", *rsyslog.LogDir) - s.testNamespace(c, st, names.NewMachineTag("1"), "mynamespace", "25-juju.conf", *rsyslog.LogDir) - s.testNamespace(c, st, names.NewUnitTag("myservice/0"), "", "26-juju-unit-myservice-0.conf", *rsyslog.LogDir) - s.testNamespace(c, st, names.NewUnitTag("myservice/0"), "mynamespace", "26-juju-unit-myservice-0.conf", *rsyslog.LogDir) -} - -// testNamespace starts a worker and ensures that -// the rsyslog config file has the expected filename, -// and the appropriate log dir is used. -func (s *RsyslogSuite) testNamespace(c *gc.C, st api.Connection, tag names.Tag, namespace, expectedFilename, expectedLogDir string) { - restarted := make(chan struct{}, 2) // once for create, once for teardown - s.PatchValue(rsyslog.RestartRsyslog, func() error { - restarted <- struct{}{} - return nil - }) - - err := os.MkdirAll(expectedLogDir, 0755) - c.Assert(err, jc.ErrorIsNil) - worker, err := rsyslog.NewRsyslogConfigWorker(st.Rsyslog(), - rsyslog.RsyslogModeAccumulate, tag, namespace, []string{"0.1.2.3"}, s.ConfDir()) - c.Assert(err, jc.ErrorIsNil) - defer func() { c.Assert(worker.Wait(), gc.IsNil) }() - defer worker.Kill() - - // change the API HostPorts to trigger an rsyslog restart - newHostPorts := network.NewHostPorts(6541, "127.0.0.1") - err = s.State.SetAPIHostPorts([][]network.HostPort{newHostPorts}) - c.Assert(err, jc.ErrorIsNil) - - // Wait for rsyslog to be restarted, so we can check to see - // what the name of the config file is. - waitForRestart(c, restarted) - - // Ensure that ca-cert.pem gets written to the expected log dir. - dirname := filepath.Join(s.ConfDir(), "rsyslog") - waitForFile(c, filepath.Join(dirname, "ca-cert.pem")) - - dir, err := os.Open(*rsyslog.RsyslogConfDir) - c.Assert(err, jc.ErrorIsNil) - names, err := dir.Readdirnames(-1) - dir.Close() - c.Assert(err, jc.ErrorIsNil) - c.Assert(names, gc.HasLen, 1) - c.Assert(names[0], gc.Equals, expectedFilename) -} === removed file 'src/github.com/juju/juju/worker/rsyslog/worker.go' --- src/github.com/juju/juju/worker/rsyslog/worker.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/rsyslog/worker.go 1970-01-01 00:00:00 +0000 @@ -1,508 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package rsyslog - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io" - "net" - "os" - "os/user" - "path/filepath" - "runtime" - "strconv" - "time" - - "github.com/juju/errors" - "github.com/juju/loggo" - "github.com/juju/names" - rsyslog "github.com/juju/syslog" - "github.com/juju/utils" - - "github.com/juju/juju/agent" - apirsyslog "github.com/juju/juju/api/rsyslog" - "github.com/juju/juju/api/watcher" - "github.com/juju/juju/cert" - "github.com/juju/juju/utils/syslog" - "github.com/juju/juju/version" - "github.com/juju/juju/worker" -) - -var logger = loggo.GetLogger("juju.worker.rsyslog") - -var ( - rsyslogConfDir = "/etc/rsyslog.d" - logDir = agent.DefaultLogDir - syslogTargets = []*rsyslog.Writer{} -) - -// RsyslogMode describes how to configure rsyslog. -type RsyslogMode int - -const ( - RsyslogModeInvalid RsyslogMode = iota - // RsyslogModeForwarding is the mode in which - // rsyslog will be configured to forward logging - // to state servers. - RsyslogModeForwarding - // RsyslogModeAccumulate is the mode in which - // rsyslog will be configured to accumulate logging - // from other machines into an "all-machines.log". - RsyslogModeAccumulate -) - -// RsyslogConfigHandler implements worker.NotifyWatchHandler, watching -// environment configuration changes and generating new rsyslog -// configuration. -type RsyslogConfigHandler struct { - st *apirsyslog.State - mode RsyslogMode - syslogConfig *syslog.SyslogConfig - rsyslogConfPath string - tag names.Tag - // We store the syslog-port and rsyslog-ca-cert - // values after writing the rsyslog configuration, - // so we can decide whether a change has occurred. - syslogPort int - rsyslogCACert string - rsyslogCAKey string -} - -// certPair holds the path and contents for a certificate. -type certPair struct { - path string - data string -} - -var _ worker.NotifyWatchHandler = (*RsyslogConfigHandler)(nil) - -func syslogUser() string { - var user string - switch version.Current.OS { - case version.CentOS: - user = "root" - default: - user = "syslog" - } - - return user -} - -var NewRsyslogConfigWorker = newRsyslogConfigWorker - -// newRsyslogConfigWorker returns a worker.Worker that uses -// WatchForRsyslogChanges and updates rsyslog configuration based -// on changes. The worker will remove the configuration file -// on teardown. -func newRsyslogConfigWorker(st *apirsyslog.State, mode RsyslogMode, tag names.Tag, namespace string, stateServerAddrs []string, jujuConfigDir string) (worker.Worker, error) { - if version.Current.OS == version.Windows && mode == RsyslogModeAccumulate { - return worker.NewNoOpWorker(), nil - } - handler, err := newRsyslogConfigHandler(st, mode, tag, namespace, stateServerAddrs, jujuConfigDir) - if err != nil { - return nil, err - } - logger.Debugf("starting rsyslog worker mode %v for %q %q", mode, tag, namespace) - return worker.NewNotifyWorker(handler), nil -} - -func newRsyslogConfigHandler(st *apirsyslog.State, mode RsyslogMode, tag names.Tag, namespace string, stateServerAddrs []string, jujuConfigDir string) (*RsyslogConfigHandler, error) { - if namespace != "" { - jujuConfigDir += "-" + namespace - } - jujuConfigDir = filepath.Join(jujuConfigDir, "rsyslog") - if err := os.MkdirAll(jujuConfigDir, 0755); err != nil { - return nil, errors.Trace(err) - } - - syslogConfig := &syslog.SyslogConfig{ - LogFileName: tag.String(), - LogDir: logDir, - JujuConfigDir: jujuConfigDir, - Port: 0, - Namespace: namespace, - StateServerAddresses: stateServerAddrs, - } - if mode == RsyslogModeAccumulate { - syslog.NewAccumulateConfig(syslogConfig) - } else { - syslog.NewForwardConfig(syslogConfig) - } - - // Historically only machine-0 includes the namespace in the log - // dir/file; for backwards compatibility we continue the tradition. - if tag != names.NewMachineTag("0") { - namespace = "" - } - switch tag := tag.(type) { - case names.MachineTag: - if namespace == "" { - syslogConfig.ConfigFileName = "25-juju.conf" - } else { - syslogConfig.ConfigFileName = fmt.Sprintf("25-juju-%s.conf", namespace) - } - default: - syslogConfig.ConfigFileName = fmt.Sprintf("26-juju-%s.conf", tag) - } - - syslogConfig.ConfigDir = rsyslogConfDir - syslogConfig.LogDir = logDir - if namespace != "" { - syslogConfig.LogDir += "-" + namespace - } - - return &RsyslogConfigHandler{ - st: st, - mode: mode, - syslogConfig: syslogConfig, - tag: tag, - }, nil -} - -func (h *RsyslogConfigHandler) SetUp() (watcher.NotifyWatcher, error) { - if h.mode == RsyslogModeAccumulate { - if err := h.ensureCA(); err != nil { - return nil, errors.Annotate(err, "failed to write rsyslog certificates") - } - - if err := h.ensureLogrotate(); err != nil { - return nil, errors.Annotate(err, "failed to write rsyslog logrotate scripts") - } - - } - return h.st.WatchForRsyslogChanges(h.tag.String()) -} - -var restartRsyslog = syslog.Restart -var dialSyslog = rsyslog.Dial - -func (h *RsyslogConfigHandler) TearDown() error { - if err := os.Remove(h.syslogConfig.ConfigFilePath()); err == nil { - restartRsyslog() - } - return nil -} - -// composeTLS generates a new client certificate for connecting to the rsyslog server. -// We explicitly set the ServerName field, this ensures that even if we are connecting -// via an IP address and are using an old certificate (pre 1.20.9), we can still -// successfully connect. -func (h *RsyslogConfigHandler) composeTLS(caCert string) (*tls.Config, error) { - cert := x509.NewCertPool() - ok := cert.AppendCertsFromPEM([]byte(caCert)) - if !ok { - return nil, errors.Errorf("Failed to parse rsyslog root certificate") - } - return &tls.Config{ - RootCAs: cert, - ServerName: "juju-rsyslog", - }, nil -} - -func (h *RsyslogConfigHandler) replaceRemoteLogger(caCert string) error { - tlsConf, err := h.composeTLS(caCert) - if err != nil { - return err - } - - var newLoggers []*rsyslog.Writer - var wrapLoggers []io.Writer - for _, j := range h.syslogConfig.StateServerAddresses { - host, _, err := net.SplitHostPort(j) - if err != nil { - // No port was found - host = j - } - target := net.JoinHostPort(host, strconv.Itoa(h.syslogConfig.Port)) - namespace := h.syslogConfig.Namespace - if namespace != "" { - namespace = "-" + namespace - } - logTag := "juju" + namespace + "-" + h.tag.String() - logger.Debugf("making syslog connection for %q to %s", logTag, target) - writer, err := dialSyslog("tcp", target, rsyslog.LOG_DEBUG, logTag, tlsConf) - if err != nil { - return err - } - wrapLoggers = append(wrapLoggers, writer) - newLoggers = append(newLoggers, writer) - } - wapper := io.MultiWriter(wrapLoggers...) - writer := loggo.NewSimpleWriter(wapper, &loggo.DefaultFormatter{}) - - loggo.RemoveWriter("syslog") - err = loggo.RegisterWriter("syslog", writer, loggo.TRACE) - if err != nil { - return err - } - - // Close old targets - for _, j := range syslogTargets { - if err := j.Close(); err != nil { - logger.Warningf("Failed to close syslog writer: %s", err) - } - } - // record new targets - syslogTargets = newLoggers - return nil -} - -func (h *RsyslogConfigHandler) Handle(_ <-chan struct{}) error { - cfg, err := h.st.GetRsyslogConfig(h.tag.String()) - if err != nil { - return errors.Annotate(err, "cannot get environ config") - } - - rsyslogCACert := cfg.CACert - if rsyslogCACert == "" { - return nil - } - - rsyslogCAKey := cfg.CAKey - if rsyslogCAKey == "" { - return nil - } - - h.syslogConfig.Port = cfg.Port - if h.mode == RsyslogModeForwarding { - if err := writeFileAtomic(h.syslogConfig.CACertPath(), []byte(rsyslogCACert), 0644, 0, 0); err != nil { - return errors.Annotate(err, "cannot write CA certificate") - } - if err := h.replaceRemoteLogger(rsyslogCACert); err != nil { - return err - } - } else { - rsyslogCertPEM, rsyslogKeyPEM, err := h.rsyslogServerCerts(rsyslogCACert, rsyslogCAKey) - if err != nil { - return errors.Trace(err) - } - - if err := writeCertificates([]certPair{ - {h.syslogConfig.ServerCertPath(), rsyslogCertPEM}, - {h.syslogConfig.ServerKeyPath(), rsyslogKeyPEM}, - {h.syslogConfig.CACertPath(), rsyslogCACert}, - }); err != nil { - return errors.Trace(err) - } - - data, err := h.syslogConfig.Render() - if err != nil { - return errors.Annotate(err, "failed to render rsyslog configuration file") - } - if err := writeFileAtomic(h.syslogConfig.ConfigFilePath(), []byte(data), 0644, 0, 0); err != nil { - return errors.Annotate(err, "failed to write rsyslog configuration file") - } - logger.Debugf("Reloading rsyslog configuration") - if err := restartRsyslog(); err != nil { - logger.Errorf("failed to reload rsyslog configuration") - return errors.Annotate(err, "cannot restart rsyslog") - } - } - // Record config values so we don't try again. - // Do this last so we recover from intermittent - // failures. - h.syslogPort = cfg.Port - h.rsyslogCACert = rsyslogCACert - h.rsyslogCAKey = rsyslogCAKey - return nil -} - -var lookupUser = func(username string) (uid, gid int, err error) { - u, err := user.Lookup(username) - if err != nil { - return -1, -1, err - } - uid, err = strconv.Atoi(u.Uid) - if err != nil { - return -1, -1, err - } - gid, err = strconv.Atoi(u.Gid) - if err != nil { - return -1, -1, err - } - return uid, gid, nil -} - -func localIPS() ([]string, error) { - var ips []string - addrs, err := net.InterfaceAddrs() - if err != nil { - return nil, err - } - for _, j := range addrs { - ip, _, err := net.ParseCIDR(j.String()) - if err != nil { - return nil, err - } - if ip.IsLoopback() { - continue - } - ips = append(ips, ip.String()) - } - return ips, nil -} - -func (h *RsyslogConfigHandler) rsyslogHosts() ([]string, error) { - var hosts []string - cfg, err := h.st.GetRsyslogConfig(h.tag.String()) - if err != nil { - return nil, err - } - for _, j := range cfg.HostPorts { - if j.Value != "" { - hosts = append(hosts, j.Address.Value) - } - } - - // Explicitly add the '*' wildcard host. This will ensure that rsyslog - // clients will always be able to connect even if their hostnames and/or IPAddresses - // are changed. This also ensures we can continue to use SSL for our rsyslog connections - // and we can avoid having to use the skipVerify flag. - hosts = append(hosts, "*") - - return hosts, nil -} - -// ensureCA ensures that a CA certificate and key exist in state, -// to be picked up by all rsyslog workers in the environment. -func (h *RsyslogConfigHandler) ensureCA() error { - // We never write the CA key to local disk, so - // we must check state to know whether or not - // we need to generate new certs and keys. - cfg, err := h.st.GetRsyslogConfig(h.tag.String()) - if err != nil { - return errors.Annotate(err, "cannot get environ config") - } - if cfg.CACert != "" && cfg.CAKey != "" { - return nil - } - - // Generate a new CA and server cert/key pairs, and - // publish to state. Rsyslog workers will observe - // this and generate certificates and keys for - // rsyslog in response. - expiry := time.Now().UTC().AddDate(10, 0, 0) - caCertPEM, caKeyPEM, err := cert.NewCA("rsyslog", expiry) - if err != nil { - return err - } - return h.st.SetRsyslogCert(caCertPEM, caKeyPEM) -} - -// writeCertificates persists any certPair to disk. If any -// of the write attempts fail it will return an error immediately. -// It is up to the caller to ensure the order of pairs represents -// a suitable order in the case of failue. -func writeCertificates(pairs []certPair) error { - // Files must be chowned to syslog:adm. - syslogUid, syslogGid, err := lookupUser(syslogUser()) - if err != nil { - return err - } - - for _, pair := range pairs { - if err := writeFileAtomic(pair.path, []byte(pair.data), 0600, syslogUid, syslogGid); err != nil { - return err - } - } - return nil -} - -// rsyslogServerCerts generates new certificates for RsyslogConfigHandler -// using the provider caCert and caKey. This is used during the setup of the -// rsyslog worker as well as when handling any changes to the rsyslog configuration, -// usually adding and removing of state machines through ensure-availability. -func (h *RsyslogConfigHandler) rsyslogServerCerts(caCert, caKey string) (string, string, error) { - if caCert == "" { - return "", "", errors.New("CACert is not set") - } - if caKey == "" { - return "", "", errors.New("CAKey is not set") - } - - expiry := time.Now().UTC().AddDate(10, 0, 0) - // Add rsyslog servers in the subjectAltName so we can - // successfully validate when connectiong via SSL - hosts, err := h.rsyslogHosts() - if err != nil { - return "", "", err - } - // Add local IPs to SAN. When connecting via IP address, - // the client will validate the server against any IP in - // the subjectAltName. We add all local ips to make sure - // this does not cause an error - ips, err := localIPS() - if err != nil { - return "", "", err - } - hosts = append(hosts, ips...) - return cert.NewServer(caCert, caKey, expiry, hosts) -} - -// ensureLogrotate ensures that the logrotate -// configuration file and logrotate helper script -// exist in the log directory and creates them if they do not. -func (h *RsyslogConfigHandler) ensureLogrotate() error { - // Files must be chowned to syslog - syslogUid, syslogGid, err := lookupUser(syslogUser()) - if err != nil { - return err - } - - logrotateConfPath := h.syslogConfig.LogrotateConfPath() - // check for the logrotate conf - if _, err := os.Stat(logrotateConfPath); os.IsNotExist(err) { - logrotateConfFile, err := h.syslogConfig.LogrotateConfFile() - if err != nil { - return err - } - // create the logrotate conf - if err := writeFileAtomic(logrotateConfPath, logrotateConfFile, 0600, syslogUid, syslogGid); err != nil { - return err - } - } else { - return err - } - - logrotateHelperPath := h.syslogConfig.LogrotateHelperPath() - // check for the logrotate helper - if _, err := os.Stat(logrotateHelperPath); os.IsNotExist(err) { - logrotateHelperFile, err := h.syslogConfig.LogrotateHelperFile() - if err != nil { - return err - } - // create the logrotate helper - if err := writeFileAtomic(logrotateHelperPath, logrotateHelperFile, 0700, syslogUid, syslogGid); err != nil { - return err - } - } else { - return err - } - - return nil -} - -func writeFileAtomic(path string, data []byte, mode os.FileMode, uid, gid int) error { - chmodAndChown := func(f *os.File) error { - // f.Chmod() and f.Chown() are not implemented on Windows - // There is currently no good way of doing file permission - // management for Windows, directly from Go. The behavior of os.Chmod() - // is different from its linux implementation. - if runtime.GOOS == "windows" { - return nil - } - if err := f.Chmod(mode); err != nil { - return err - } - if uid != 0 { - if err := f.Chown(uid, gid); err != nil { - return err - } - } - return nil - } - return utils.AtomicWriteFileAndChange(path, data, chmodAndChown) -} === modified file 'src/github.com/juju/juju/worker/runner.go' --- src/github.com/juju/juju/worker/runner.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/runner.go 2016-03-22 15:18:22 +0000 @@ -7,12 +7,15 @@ "time" "github.com/juju/errors" + "github.com/juju/loggo" "launchpad.net/tomb" ) +var logger = loggo.GetLogger("juju.worker") + // RestartDelay holds the length of time that a worker // will wait between exiting and restarting. -var RestartDelay = 3 * time.Second +const RestartDelay = 3 * time.Second // Worker is implemented by a running worker. type Worker interface { @@ -41,10 +44,12 @@ startedc chan startInfo isFatal func(error) bool moreImportant func(err0, err1 error) bool + + // restartDelay holds the length of time that a worker + // will wait between exiting and restarting. + restartDelay time.Duration } -var _ Runner = (*runner)(nil) - type startReq struct { id string start func() (Worker, error) @@ -70,7 +75,7 @@ // The function isFatal(err) returns whether err is a fatal error. The // function moreImportant(err0, err1) returns whether err0 is considered // more important than err1. -func NewRunner(isFatal func(error) bool, moreImportant func(err0, err1 error) bool) Runner { +func NewRunner(isFatal func(error) bool, moreImportant func(err0, err1 error) bool, restartDelay time.Duration) Runner { runner := &runner{ startc: make(chan startReq), stopc: make(chan string), @@ -78,6 +83,7 @@ startedc: make(chan startInfo), isFatal: isFatal, moreImportant: moreImportant, + restartDelay: restartDelay, } go func() { defer runner.tomb.Done() @@ -172,7 +178,7 @@ if info == nil { workers[req.id] = &workerInfo{ start: req.start, - restartDelay: RestartDelay, + restartDelay: runner.restartDelay, } go runner.runWorker(0, req.id, req.start) break @@ -232,7 +238,7 @@ break } go runner.runWorker(workerInfo.restartDelay, info.id, workerInfo.start) - workerInfo.restartDelay = RestartDelay + workerInfo.restartDelay = runner.restartDelay } } } === modified file 'src/github.com/juju/juju/worker/runner_test.go' --- src/github.com/juju/juju/worker/runner_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/runner_test.go 2016-03-22 15:18:22 +0000 @@ -41,14 +41,8 @@ return false } -func (s *runnerSuite) SetUpTest(c *gc.C) { - s.BaseSuite.SetUpTest(c) - // Avoid patching RestartDealy to zero, as it changes worker behaviour. - s.PatchValue(&worker.RestartDelay, time.Duration(time.Millisecond)) -} - func (*runnerSuite) TestOneWorkerStart(c *gc.C) { - runner := worker.NewRunner(noneFatal, noImportance) + runner := worker.NewRunner(noneFatal, noImportance, time.Millisecond) starter := newTestWorkerStarter() err := runner.StartWorker("id", testWorkerStart(starter)) c.Assert(err, jc.ErrorIsNil) @@ -59,7 +53,7 @@ } func (*runnerSuite) TestOneWorkerFinish(c *gc.C) { - runner := worker.NewRunner(noneFatal, noImportance) + runner := worker.NewRunner(noneFatal, noImportance, time.Millisecond) starter := newTestWorkerStarter() err := runner.StartWorker("id", testWorkerStart(starter)) c.Assert(err, jc.ErrorIsNil) @@ -73,7 +67,7 @@ } func (*runnerSuite) TestOneWorkerRestart(c *gc.C) { - runner := worker.NewRunner(noneFatal, noImportance) + runner := worker.NewRunner(noneFatal, noImportance, time.Millisecond) starter := newTestWorkerStarter() err := runner.StartWorker("id", testWorkerStart(starter)) c.Assert(err, jc.ErrorIsNil) @@ -91,7 +85,7 @@ } func (*runnerSuite) TestOneWorkerStartFatalError(c *gc.C) { - runner := worker.NewRunner(allFatal, noImportance) + runner := worker.NewRunner(allFatal, noImportance, time.Millisecond) starter := newTestWorkerStarter() starter.startErr = errors.New("cannot start test task") err := runner.StartWorker("id", testWorkerStart(starter)) @@ -101,7 +95,7 @@ } func (*runnerSuite) TestOneWorkerDieFatalError(c *gc.C) { - runner := worker.NewRunner(allFatal, noImportance) + runner := worker.NewRunner(allFatal, noImportance, time.Millisecond) starter := newTestWorkerStarter() err := runner.StartWorker("id", testWorkerStart(starter)) c.Assert(err, jc.ErrorIsNil) @@ -114,7 +108,7 @@ } func (*runnerSuite) TestOneWorkerStartStop(c *gc.C) { - runner := worker.NewRunner(allFatal, noImportance) + runner := worker.NewRunner(allFatal, noImportance, time.Millisecond) starter := newTestWorkerStarter() err := runner.StartWorker("id", testWorkerStart(starter)) c.Assert(err, jc.ErrorIsNil) @@ -126,7 +120,7 @@ } func (*runnerSuite) TestOneWorkerStopFatalError(c *gc.C) { - runner := worker.NewRunner(allFatal, noImportance) + runner := worker.NewRunner(allFatal, noImportance, time.Millisecond) starter := newTestWorkerStarter() starter.stopErr = errors.New("stop error") err := runner.StartWorker("id", testWorkerStart(starter)) @@ -139,8 +133,7 @@ } func (*runnerSuite) TestOneWorkerStartWhenStopping(c *gc.C) { - worker.RestartDelay = 3 * time.Second - runner := worker.NewRunner(allFatal, noImportance) + runner := worker.NewRunner(allFatal, noImportance, 3*time.Second) starter := newTestWorkerStarter() starter.stopWait = make(chan struct{}) @@ -166,8 +159,8 @@ } func (*runnerSuite) TestOneWorkerRestartDelay(c *gc.C) { - worker.RestartDelay = 100 * time.Millisecond - runner := worker.NewRunner(noneFatal, noImportance) + const delay = 100 * time.Millisecond + runner := worker.NewRunner(noneFatal, noImportance, delay) starter := newTestWorkerStarter() err := runner.StartWorker("id", testWorkerStart(starter)) c.Assert(err, jc.ErrorIsNil) @@ -177,8 +170,8 @@ t0 := time.Now() starter.assertStarted(c, true) restartDuration := time.Since(t0) - if restartDuration < worker.RestartDelay { - c.Fatalf("restart delay was not respected; got %v want %v", restartDuration, worker.RestartDelay) + if restartDuration < delay { + c.Fatalf("restart delay was not respected; got %v want %v", restartDuration, delay) } c.Assert(worker.Stop(runner), gc.IsNil) } @@ -194,7 +187,7 @@ return err0.(errorLevel) > err1.(errorLevel) } id := func(i int) string { return fmt.Sprint(i) } - runner := worker.NewRunner(allFatal, moreImportant) + runner := worker.NewRunner(allFatal, moreImportant, time.Millisecond) for i := 0; i < 10; i++ { starter := newTestWorkerStarter() starter.stopErr = errorLevel(i) @@ -208,19 +201,19 @@ } func (*runnerSuite) TestStartWorkerWhenDead(c *gc.C) { - runner := worker.NewRunner(allFatal, noImportance) + runner := worker.NewRunner(allFatal, noImportance, time.Millisecond) c.Assert(worker.Stop(runner), gc.IsNil) c.Assert(runner.StartWorker("foo", nil), gc.Equals, worker.ErrDead) } func (*runnerSuite) TestStopWorkerWhenDead(c *gc.C) { - runner := worker.NewRunner(allFatal, noImportance) + runner := worker.NewRunner(allFatal, noImportance, time.Millisecond) c.Assert(worker.Stop(runner), gc.IsNil) c.Assert(runner.StopWorker("foo"), gc.Equals, worker.ErrDead) } func (*runnerSuite) TestAllWorkersStoppedWhenOneDiesWithFatalError(c *gc.C) { - runner := worker.NewRunner(allFatal, noImportance) + runner := worker.NewRunner(allFatal, noImportance, time.Millisecond) var starters []*testWorkerStarter for i := 0; i < 10; i++ { starter := newTestWorkerStarter() @@ -244,7 +237,7 @@ // Original deadlock problem that this tests for: // A worker dies with fatal error while another worker // is inside start(). runWorker can't send startInfo on startedc. - runner := worker.NewRunner(allFatal, noImportance) + runner := worker.NewRunner(allFatal, noImportance, time.Millisecond) slowStarter := newTestWorkerStarter() // make the startNotify channel synchronous so @@ -282,7 +275,7 @@ // A worker tries to call StartWorker in its start function // at the same time another worker dies with a fatal error. // It might not be able to send on startc. - runner := worker.NewRunner(allFatal, noImportance) + runner := worker.NewRunner(allFatal, noImportance, time.Millisecond) selfStarter := newTestWorkerStarter() // make the startNotify channel synchronous so @@ -448,14 +441,6 @@ } } -func (*workersSuite) TestNewWorkers(c *gc.C) { - workers := worker.NewWorkers() - ids, funcs := worker.ExtractWorkers(workers) - - c.Check(ids, gc.HasLen, 0) - c.Check(funcs, gc.HasLen, 0) -} - func (*workersSuite) TestIDsOkay(c *gc.C) { newWorker := func() (worker.Worker, error) { return nil, nil } @@ -476,25 +461,6 @@ c.Check(ids, gc.HasLen, 0) } -func (s *workersSuite) TestAddOkay(c *gc.C) { - workers := worker.NewWorkers() - expected := []string{"spam", "eggs"} - for _, id := range expected { - err := workers.Add(id, s.newWorkerFunc(id)) - c.Assert(err, jc.ErrorIsNil) - } - ids, funcs := worker.ExtractWorkers(workers) - - c.Check(ids, jc.DeepEquals, expected) - // We can't compare functions so we work around it. - for _, newWorker := range funcs { - newWorker() - } - sort.Strings(s.calls) - sort.Strings(expected) - c.Check(s.calls, jc.DeepEquals, expected) -} - func (*workersSuite) TestAddAlreadyRegistered(c *gc.C) { newWorker := func() (worker.Worker, error) { return nil, nil } === modified file 'src/github.com/juju/juju/worker/simpleworker.go' --- src/github.com/juju/juju/worker/simpleworker.go 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/juju/worker/simpleworker.go 2016-03-22 15:18:22 +0000 @@ -3,9 +3,7 @@ package worker -import ( - "launchpad.net/tomb" -) +import "launchpad.net/tomb" // simpleWorker implements the worker returned by NewSimpleWorker. type simpleWorker struct { === modified file 'src/github.com/juju/juju/worker/singular/mongo_test.go' --- src/github.com/juju/juju/worker/singular/mongo_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/singular/mongo_test.go 2016-03-22 15:18:22 +0000 @@ -174,10 +174,9 @@ localHostPort: a.hostPort, session: session, } - runner := worker.NewRunner( - connectionIsFatal(mc), - func(err0, err1 error) bool { return true }, - ) + + fn := func(err0, err1 error) bool { return true } + runner := worker.NewRunner(connectionIsFatal(mc), fn, worker.RestartDelay) singularRunner, err := singular.New(runner, mc) if err != nil { return nil, fmt.Errorf("cannot start singular runner: %v", err) === modified file 'src/github.com/juju/juju/worker/singular/singular_test.go' --- src/github.com/juju/juju/worker/singular/singular_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/singular/singular_test.go 2016-03-22 15:18:22 +0000 @@ -172,6 +172,7 @@ return err == errFatal }, func(err0, err1 error) bool { return true }, + worker.RestartDelay, ) } === removed file 'src/github.com/juju/juju/worker/statushistorypruner/export_test.go' --- src/github.com/juju/juju/worker/statushistorypruner/export_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/statushistorypruner/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,18 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package statushistorypruner - -import ( - "github.com/juju/juju/state" - "github.com/juju/juju/worker" -) - -func NewPruneWorker(st *state.State, params *HistoryPrunerParams, t worker.NewTimerFunc, psh pruneHistoryFunc) worker.Worker { - w := &pruneWorker{ - st: st, - params: params, - pruner: psh, - } - return worker.NewPeriodicWorker(w.doPruning, w.params.PruneInterval, t) -} === added file 'src/github.com/juju/juju/worker/statushistorypruner/manifold.go' --- src/github.com/juju/juju/worker/statushistorypruner/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/statushistorypruner/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,50 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package statushistorypruner + +import ( + "time" + + "github.com/juju/errors" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/statushistory" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" +) + +// ManifoldConfig describes the resources and configuration on which the +// statushistorypruner worker depends. +type ManifoldConfig struct { + APICallerName string + MaxLogsPerEntity uint + PruneInterval time.Duration + NewTimer worker.NewTimerFunc +} + +// Manifold returns a Manifold that encapsulates the statushistorypruner worker. +func Manifold(config ManifoldConfig) dependency.Manifold { + return dependency.Manifold{ + Inputs: []string{config.APICallerName}, + Start: func(getResource dependency.GetResourceFunc) (worker.Worker, error) { + var apiCaller base.APICaller + if err := getResource(config.APICallerName, &apiCaller); err != nil { + return nil, errors.Trace(err) + } + + facade := statushistory.NewFacade(apiCaller) + prunerConfig := Config{ + Facade: facade, + MaxLogsPerEntity: config.MaxLogsPerEntity, + PruneInterval: config.PruneInterval, + NewTimer: config.NewTimer, + } + w, err := New(prunerConfig) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil + }, + } +} === modified file 'src/github.com/juju/juju/worker/statushistorypruner/worker.go' --- src/github.com/juju/juju/worker/statushistorypruner/worker.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/statushistorypruner/worker.go 2016-03-22 15:18:22 +0000 @@ -8,50 +8,53 @@ "github.com/juju/errors" - "github.com/juju/juju/state" "github.com/juju/juju/worker" ) // HistoryPrunerParams specifies how history logs should be prunned. type HistoryPrunerParams struct { // TODO(perrito666) We might want to have some sort of limitation of the collection size too. - MaxLogsPerState int - PruneInterval time.Duration -} - -const DefaultMaxLogsPerState = 100 -const DefaultPruneInterval = 5 * time.Minute - -// NewHistoryPrunerParams returns a HistoryPrunerParams initialized with default parameter. -func NewHistoryPrunerParams() *HistoryPrunerParams { - return &HistoryPrunerParams{ - MaxLogsPerState: DefaultMaxLogsPerState, - PruneInterval: DefaultPruneInterval, - } -} - -type pruneHistoryFunc func(*state.State, int) error - -type pruneWorker struct { - st *state.State - params *HistoryPrunerParams - pruner pruneHistoryFunc + MaxLogsPerEntity int + PruneInterval time.Duration +} + +// Facade represents an API that implements status history pruning. +type Facade interface { + Prune(int) error +} + +// Config holds all necessary attributes to start a pruner worker. +type Config struct { + Facade Facade + MaxLogsPerEntity uint + PruneInterval time.Duration + NewTimer worker.NewTimerFunc +} + +// Validate will err unless basic requirements for a valid +// config are met. +func (c *Config) Validate() error { + if c.Facade == nil { + return errors.New("missing Facade") + } + if c.NewTimer == nil { + return errors.New("missing Timer") + } + return nil } // New returns a worker.Worker for history Pruner. -func New(st *state.State, params *HistoryPrunerParams) worker.Worker { - w := &pruneWorker{ - st: st, - params: params, - pruner: state.PruneStatusHistory, - } - return worker.NewPeriodicWorker(w.doPruning, w.params.PruneInterval, worker.NewTimer) -} +func New(conf Config) (worker.Worker, error) { + if err := conf.Validate(); err != nil { + return nil, errors.Trace(err) + } + doPruning := func(stop <-chan struct{}) error { + err := conf.Facade.Prune(int(conf.MaxLogsPerEntity)) + if err != nil { + return errors.Trace(err) + } + return nil + } -func (w *pruneWorker) doPruning(stop <-chan struct{}) error { - err := w.pruner(w.st, w.params.MaxLogsPerState) - if err != nil { - return errors.Trace(err) - } - return nil + return worker.NewPeriodicWorker(doPruning, conf.PruneInterval, conf.NewTimer), nil } === modified file 'src/github.com/juju/juju/worker/statushistorypruner/worker_test.go' --- src/github.com/juju/juju/worker/statushistorypruner/worker_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/statushistorypruner/worker_test.go 2016-03-22 15:18:22 +0000 @@ -10,19 +10,102 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "github.com/juju/juju/state" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker" "github.com/juju/juju/worker/statushistorypruner" ) +type statusHistoryPrunerSuite struct { + coretesting.BaseSuite +} + +var _ = gc.Suite(&statusHistoryPrunerSuite{}) + +func (s *statusHistoryPrunerSuite) TestWorkerCallsPrune(c *gc.C) { + fakeTimer := newMockTimer(coretesting.LongWait) + + fakeTimerFunc := func(d time.Duration) worker.PeriodicTimer { + // construction of timer should be with 0 because we intend it to + // run once before waiting. + c.Assert(d, gc.Equals, 0*time.Nanosecond) + return fakeTimer + } + facade := newFakeFacade() + conf := statushistorypruner.Config{ + Facade: facade, + MaxLogsPerEntity: 3, + PruneInterval: coretesting.ShortWait, + NewTimer: fakeTimerFunc, + } + + pruner, err := statushistorypruner.New(conf) + c.Check(err, jc.ErrorIsNil) + s.AddCleanup(func(*gc.C) { + c.Assert(worker.Stop(pruner), jc.ErrorIsNil) + }) + + err = fakeTimer.fire() + c.Check(err, jc.ErrorIsNil) + + var passedLogs int + select { + case passedLogs = <-facade.passedMaxLogs: + case <-time.After(coretesting.LongWait): + c.Fatal("timed out waiting for passed logs to pruner") + } + c.Assert(passedLogs, gc.Equals, 3) + + // Reset will have been called with the actual PruneInterval + var period time.Duration + select { + case period = <-fakeTimer.period: + case <-time.After(coretesting.LongWait): + c.Fatal("timed out waiting for period reset by pruner") + } + c.Assert(period, gc.Equals, coretesting.ShortWait) +} + +func (s *statusHistoryPrunerSuite) TestWorkerWontCallPruneBeforeFiringTimer(c *gc.C) { + fakeTimer := newMockTimer(coretesting.LongWait) + + fakeTimerFunc := func(d time.Duration) worker.PeriodicTimer { + // construction of timer should be with 0 because we intend it to + // run once before waiting. + c.Assert(d, gc.Equals, 0*time.Nanosecond) + return fakeTimer + } + facade := newFakeFacade() + conf := statushistorypruner.Config{ + Facade: facade, + MaxLogsPerEntity: 3, + PruneInterval: coretesting.ShortWait, + NewTimer: fakeTimerFunc, + } + + pruner, err := statushistorypruner.New(conf) + c.Check(err, jc.ErrorIsNil) + s.AddCleanup(func(*gc.C) { + c.Assert(worker.Stop(pruner), jc.ErrorIsNil) + }) + + select { + case <-facade.passedMaxLogs: + c.Fatal("called before firing timer.") + case <-time.After(coretesting.LongWait): + } +} + type mockTimer struct { - period time.Duration + period chan time.Duration c chan time.Time } func (t *mockTimer) Reset(d time.Duration) bool { - t.period = d + select { + case t.period <- d: + case <-time.After(coretesting.LongWait): + panic("timed out waiting for timer to reset") + } return true } @@ -39,49 +122,28 @@ return nil } -func newMockTimer(d time.Duration) worker.PeriodicTimer { - return &mockTimer{period: d, +func newMockTimer(d time.Duration) *mockTimer { + return &mockTimer{period: make(chan time.Duration, 1), c: make(chan time.Time), } } -var _ = gc.Suite(&statusHistoryPrunerSuite{}) - -type statusHistoryPrunerSuite struct { - coretesting.BaseSuite -} - -func (s *statusHistoryPrunerSuite) TestWorker(c *gc.C) { - var passedMaxLogs int - fakePruner := func(_ *state.State, maxLogs int) error { - passedMaxLogs = maxLogs - return nil - } - params := statushistorypruner.HistoryPrunerParams{ - MaxLogsPerState: 3, - PruneInterval: coretesting.ShortWait, - } - fakeTimer := newMockTimer(coretesting.LongWait) - - fakeTimerFunc := func(d time.Duration) worker.PeriodicTimer { - // construction of timer should be with 0 because we intend it to - // run once before waiting. - c.Assert(d, gc.Equals, 0*time.Nanosecond) - return fakeTimer - } - pruner := statushistorypruner.NewPruneWorker( - &state.State{}, - ¶ms, - fakeTimerFunc, - fakePruner, - ) - s.AddCleanup(func(*gc.C) { - pruner.Kill() - c.Assert(pruner.Wait(), jc.ErrorIsNil) - }) - err := fakeTimer.(*mockTimer).fire() - c.Check(err, jc.ErrorIsNil) - c.Assert(passedMaxLogs, gc.Equals, 3) - // Reset will have been called with the actual PruneInterval - c.Assert(fakeTimer.(*mockTimer).period, gc.Equals, coretesting.ShortWait) +type fakeFacade struct { + passedMaxLogs chan int +} + +func newFakeFacade() *fakeFacade { + return &fakeFacade{ + passedMaxLogs: make(chan int, 1), + } +} + +// Prune implements Facade +func (f *fakeFacade) Prune(maxLogs int) error { + select { + case f.passedMaxLogs <- maxLogs: + case <-time.After(coretesting.LongWait): + return errors.New("timed out waiting for facade call Prune to run") + } + return nil } === modified file 'src/github.com/juju/juju/worker/storageprovisioner/blockdevices.go' --- src/github.com/juju/juju/worker/storageprovisioner/blockdevices.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/blockdevices.go 2016-03-22 15:18:22 +0000 @@ -15,11 +15,18 @@ // machine have been seen to have changed. This triggers a refresh of all // block devices for attached volumes backing pending filesystems. func machineBlockDevicesChanged(ctx *context) error { - if len(ctx.pendingFilesystems) == 0 { + if len(ctx.incompleteFilesystemParams) == 0 { return nil } - volumeTags := make([]names.VolumeTag, 0, len(ctx.pendingFilesystems)) - for _, params := range ctx.pendingFilesystems { + volumeTags := make([]names.VolumeTag, 0, len(ctx.incompleteFilesystemParams)) + // We only need to query volumes for incomplete filesystems, + // and not incomplete filesystem attachments, because a + // filesystem attachment cannot exist without a filesystem. + // Therefore, the block device must have existed before + // the filesystem attachment. Upon restarting the worker, + // witnessing an already-provisioned filesystem will trigger + // a refresh of the block device for the backing volume. + for _, params := range ctx.incompleteFilesystemParams { if params.Volume == (names.VolumeTag{}) { // Filesystem is not volume-backed. continue @@ -56,7 +63,7 @@ // refreshVolumeBlockDevices refreshes the block devices for the specified // volumes. func refreshVolumeBlockDevices(ctx *context, volumeTags []names.VolumeTag) error { - machineTag, ok := ctx.scope.(names.MachineTag) + machineTag, ok := ctx.config.Scope.(names.MachineTag) if !ok { // This function should only be called by machine-scoped // storage provisioners. @@ -69,13 +76,27 @@ AttachmentTag: volumeTag.String(), } } - results, err := ctx.volumeAccessor.VolumeBlockDevices(ids) + results, err := ctx.config.Volumes.VolumeBlockDevices(ids) if err != nil { return errors.Annotate(err, "refreshing volume block devices") } for i, result := range results { if result.Error == nil { ctx.volumeBlockDevices[volumeTags[i]] = result.Result + for _, params := range ctx.incompleteFilesystemParams { + if params.Volume == volumeTags[i] { + updatePendingFilesystem(ctx, params) + } + } + for id, params := range ctx.incompleteFilesystemAttachmentParams { + filesystem, ok := ctx.filesystems[params.Filesystem] + if !ok { + continue + } + if filesystem.Volume == volumeTags[i] { + updatePendingFilesystemAttachment(ctx, id, params) + } + } } else if params.IsCodeNotProvisioned(result.Error) || params.IsCodeNotFound(result.Error) { // Either the volume (attachment) isn't provisioned, // or the corresponding block device is not yet known. === modified file 'src/github.com/juju/juju/worker/storageprovisioner/common.go' --- src/github.com/juju/juju/worker/storageprovisioner/common.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/common.go 2016-03-22 15:18:22 +0000 @@ -13,13 +13,14 @@ "github.com/juju/juju/environs/config" "github.com/juju/juju/storage" "github.com/juju/juju/storage/provider/registry" + "github.com/juju/juju/watcher" ) // storageEntityLife queries the lifecycle state of each specified // storage entity (volume or filesystem), and then partitions the // tags by them. func storageEntityLife(ctx *context, tags []names.Tag) (alive, dying, dead []names.Tag, _ error) { - lifeResults, err := ctx.life.Life(tags) + lifeResults, err := ctx.config.Life.Life(tags) if err != nil { return nil, nil, nil, errors.Annotate(err, "getting storage entity life") } @@ -47,7 +48,7 @@ func attachmentLife(ctx *context, ids []params.MachineStorageId) ( alive, dying, dead []params.MachineStorageId, _ error, ) { - lifeResults, err := ctx.life.AttachmentLife(ids) + lifeResults, err := ctx.config.Life.AttachmentLife(ids) if err != nil { return nil, nil, nil, errors.Annotate(err, "getting machine attachment life") } @@ -76,7 +77,7 @@ return nil } logger.Debugf("removing entities: %v", tags) - errorResults, err := ctx.life.Remove(tags) + errorResults, err := ctx.config.Life.Remove(tags) if err != nil { return errors.Annotate(err, "removing storage entities") } @@ -93,7 +94,7 @@ if len(ids) == 0 { return nil } - errorResults, err := ctx.life.RemoveAttachments(ids) + errorResults, err := ctx.config.Life.RemoveAttachments(ids) if err != nil { return errors.Annotate(err, "removing attachments") } @@ -112,7 +113,7 @@ // the status fails the error is logged but otherwise ignored. func setStatus(ctx *context, statuses []params.EntityStatusArgs) { if len(statuses) > 0 { - if err := ctx.statusSetter.SetStatus(statuses); err != nil { + if err := ctx.config.Status.SetStatus(statuses); err != nil { logger.Errorf("failed to set status: %v", err) } } @@ -187,3 +188,14 @@ } return provider, sourceConfig, nil } + +func copyMachineStorageIds(src []watcher.MachineStorageId) []params.MachineStorageId { + dst := make([]params.MachineStorageId, len(src)) + for i, msid := range src { + dst[i] = params.MachineStorageId{ + MachineTag: msid.MachineTag, + AttachmentTag: msid.AttachmentTag, + } + } + return dst +} === added file 'src/github.com/juju/juju/worker/storageprovisioner/config.go' --- src/github.com/juju/juju/worker/storageprovisioner/config.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/config.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,63 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storageprovisioner + +import ( + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/utils/clock" +) + +// Config holds configuration and dependencies for a storageprovisioner worker. +type Config struct { + Scope names.Tag + StorageDir string + Volumes VolumeAccessor + Filesystems FilesystemAccessor + Life LifecycleManager + Environ ModelAccessor + Machines MachineAccessor + Status StatusSetter + Clock clock.Clock +} + +// Validate returns an error if the config cannot be relied upon to start a worker. +func (config Config) Validate() error { + switch config.Scope.(type) { + case nil: + return errors.NotValidf("nil Scope") + case names.ModelTag: + if config.StorageDir != "" { + return errors.NotValidf("environ Scope with non-empty StorageDir") + } + case names.MachineTag: + if config.StorageDir == "" { + return errors.NotValidf("machine Scope with empty StorageDir") + } + default: + return errors.NotValidf("%T Scope", config.Scope) + } + if config.Volumes == nil { + return errors.NotValidf("nil Volumes") + } + if config.Filesystems == nil { + return errors.NotValidf("nil Filesystems") + } + if config.Life == nil { + return errors.NotValidf("nil Life") + } + if config.Environ == nil { + return errors.NotValidf("nil Environ") + } + if config.Machines == nil { + return errors.NotValidf("nil Machines") + } + if config.Status == nil { + return errors.NotValidf("nil Status") + } + if config.Clock == nil { + return errors.NotValidf("nil Clock") + } + return nil +} === added file 'src/github.com/juju/juju/worker/storageprovisioner/config_test.go' --- src/github.com/juju/juju/worker/storageprovisioner/config_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/config_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,134 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storageprovisioner_test + +import ( + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/clock" + gc "gopkg.in/check.v1" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/storageprovisioner" +) + +type ConfigSuite struct { + testing.IsolationSuite + + // This is a bit unexpected: these tests should mutate the stored + // config, and then call the checkNotValid method. + config storageprovisioner.Config +} + +var _ = gc.Suite(&ConfigSuite{}) + +func (s *ConfigSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.config = validEnvironConfig() +} + +func (s *ConfigSuite) TestNilScope(c *gc.C) { + s.config.Scope = nil + s.checkNotValid(c, "nil Scope not valid") +} + +func (s *ConfigSuite) TestInvalidScope(c *gc.C) { + s.config.Scope = names.NewServiceTag("boo") + s.checkNotValid(c, ".* Scope not valid") +} + +func (s *ConfigSuite) TestEnvironScopeStorageDir(c *gc.C) { + s.config.StorageDir = "surprise!" + s.checkNotValid(c, "environ Scope with non-empty StorageDir not valid") +} + +func (s *ConfigSuite) TestMachineScopeStorageDir(c *gc.C) { + s.config = validMachineConfig() + s.config.StorageDir = "" + s.checkNotValid(c, "machine Scope with empty StorageDir not valid") +} + +func (s *ConfigSuite) TestNilVolumes(c *gc.C) { + s.config.Volumes = nil + s.checkNotValid(c, "nil Volumes not valid") +} + +func (s *ConfigSuite) TestNilFilesystems(c *gc.C) { + s.config.Filesystems = nil + s.checkNotValid(c, "nil Filesystems not valid") +} + +func (s *ConfigSuite) TestNilLife(c *gc.C) { + s.config.Life = nil + s.checkNotValid(c, "nil Life not valid") +} + +func (s *ConfigSuite) TestNilEnviron(c *gc.C) { + s.config.Environ = nil + s.checkNotValid(c, "nil Environ not valid") +} + +func (s *ConfigSuite) TestNilMachines(c *gc.C) { + s.config.Machines = nil + s.checkNotValid(c, "nil Machines not valid") +} + +func (s *ConfigSuite) TestNilStatus(c *gc.C) { + s.config.Status = nil + s.checkNotValid(c, "nil Status not valid") +} + +func (s *ConfigSuite) TestNilClock(c *gc.C) { + s.config.Clock = nil + s.checkNotValid(c, "nil Clock not valid") +} + +func (s *ConfigSuite) checkNotValid(c *gc.C, match string) { + err := s.config.Validate() + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, match) +} + +func validEnvironConfig() storageprovisioner.Config { + config := almostValidConfig() + config.Scope = coretesting.ModelTag + return config +} + +func validMachineConfig() storageprovisioner.Config { + config := almostValidConfig() + config.Scope = names.NewMachineTag("123/lxc/7") + config.StorageDir = "storage-dir" + return config +} + +func almostValidConfig() storageprovisioner.Config { + // gofmt doesn't seem to want to let me one-line any of these + // except the last one, so I'm standardising on multi-line. + return storageprovisioner.Config{ + Volumes: struct { + storageprovisioner.VolumeAccessor + }{}, + Filesystems: struct { + storageprovisioner.FilesystemAccessor + }{}, + Life: struct { + storageprovisioner.LifecycleManager + }{}, + Environ: struct { + storageprovisioner.ModelAccessor + }{}, + Machines: struct { + storageprovisioner.MachineAccessor + }{}, + Status: struct { + storageprovisioner.StatusSetter + }{}, + Clock: struct { + clock.Clock + }{}, + } +} === added file 'src/github.com/juju/juju/worker/storageprovisioner/filesystem_events.go' --- src/github.com/juju/juju/worker/storageprovisioner/filesystem_events.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/filesystem_events.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,488 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storageprovisioner + +import ( + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/instance" + "github.com/juju/juju/storage" + "github.com/juju/juju/watcher" +) + +// filesystemsChanged is called when the lifecycle states of the filesystems +// with the provided IDs have been seen to have changed. +func filesystemsChanged(ctx *context, changes []string) error { + tags := make([]names.Tag, len(changes)) + for i, change := range changes { + tags[i] = names.NewFilesystemTag(change) + } + alive, dying, dead, err := storageEntityLife(ctx, tags) + if err != nil { + return errors.Trace(err) + } + logger.Debugf("filesystems alive: %v, dying: %v, dead: %v", alive, dying, dead) + if len(alive)+len(dying)+len(dead) == 0 { + return nil + } + + // Get filesystem information for filesystems, so we can provision, + // deprovision, attach and detach. + filesystemTags := make([]names.FilesystemTag, 0, len(alive)+len(dying)+len(dead)) + for _, tag := range alive { + filesystemTags = append(filesystemTags, tag.(names.FilesystemTag)) + } + for _, tag := range dying { + filesystemTags = append(filesystemTags, tag.(names.FilesystemTag)) + } + for _, tag := range dead { + filesystemTags = append(filesystemTags, tag.(names.FilesystemTag)) + } + filesystemResults, err := ctx.config.Filesystems.Filesystems(filesystemTags) + if err != nil { + return errors.Annotatef(err, "getting filesystem information") + } + + aliveFilesystemTags := filesystemTags[:len(alive)] + dyingFilesystemTags := filesystemTags[len(alive) : len(alive)+len(dying)] + deadFilesystemTags := filesystemTags[len(alive)+len(dying):] + aliveFilesystemResults := filesystemResults[:len(alive)] + dyingFilesystemResults := filesystemResults[len(alive) : len(alive)+len(dying)] + deadFilesystemResults := filesystemResults[len(alive)+len(dying):] + + if err := processDeadFilesystems(ctx, deadFilesystemTags, deadFilesystemResults); err != nil { + return errors.Annotate(err, "deprovisioning filesystems") + } + if err := processDyingFilesystems(ctx, dyingFilesystemTags, dyingFilesystemResults); err != nil { + return errors.Annotate(err, "processing dying filesystems") + } + if err := processAliveFilesystems(ctx, aliveFilesystemTags, aliveFilesystemResults); err != nil { + return errors.Annotate(err, "provisioning filesystems") + } + return nil +} + +// filesystemAttachmentsChanged is called when the lifecycle states of the filesystem +// attachments with the provided IDs have been seen to have changed. +func filesystemAttachmentsChanged(ctx *context, watcherIds []watcher.MachineStorageId) error { + ids := copyMachineStorageIds(watcherIds) + alive, dying, dead, err := attachmentLife(ctx, ids) + if err != nil { + return errors.Trace(err) + } + logger.Debugf("filesystem attachment alive: %v, dying: %v, dead: %v", alive, dying, dead) + if len(dead) != 0 { + // We should not see dead filesystem attachments; + // attachments go directly from Dying to removed. + logger.Warningf("unexpected dead filesystem attachments: %v", dead) + } + if len(alive)+len(dying) == 0 { + return nil + } + + // Get filesystem information for alive and dying filesystem attachments, so + // we can attach/detach. + ids = append(alive, dying...) + filesystemAttachmentResults, err := ctx.config.Filesystems.FilesystemAttachments(ids) + if err != nil { + return errors.Annotatef(err, "getting filesystem attachment information") + } + + // Deprovision Dying filesystem attachments. + dyingFilesystemAttachmentResults := filesystemAttachmentResults[len(alive):] + if err := processDyingFilesystemAttachments(ctx, dying, dyingFilesystemAttachmentResults); err != nil { + return errors.Annotate(err, "destroying filesystem attachments") + } + + // Provision Alive filesystem attachments. + aliveFilesystemAttachmentResults := filesystemAttachmentResults[:len(alive)] + if err := processAliveFilesystemAttachments(ctx, alive, aliveFilesystemAttachmentResults); err != nil { + return errors.Annotate(err, "creating filesystem attachments") + } + + return nil +} + +// processDyingFilesystems processes the FilesystemResults for Dying filesystems, +// removing them from provisioning-pending as necessary. +func processDyingFilesystems(ctx *context, tags []names.FilesystemTag, filesystemResults []params.FilesystemResult) error { + for _, tag := range tags { + removePendingFilesystem(ctx, tag) + } + return nil +} + +func updateFilesystem(ctx *context, info storage.Filesystem) { + ctx.filesystems[info.Tag] = info + for id, params := range ctx.incompleteFilesystemAttachmentParams { + if params.FilesystemId == "" && id.AttachmentTag == info.Tag.String() { + updatePendingFilesystemAttachment(ctx, id, params) + } + } +} + +func updatePendingFilesystem(ctx *context, params storage.FilesystemParams) { + if params.Volume != (names.VolumeTag{}) { + // The filesystem is volume-backed: we must watch for + // the corresponding block device. This will trigger a + // one-time (for the volume) forced update of block + // devices. If the block device is not immediately + // available, then we rely on the watcher. The forced + // update is necessary in case the block device was + // added to state already, and we didn't observe it. + if _, ok := ctx.volumeBlockDevices[params.Volume]; !ok { + ctx.pendingVolumeBlockDevices.Add(params.Volume) + ctx.incompleteFilesystemParams[params.Tag] = params + return + } + } + delete(ctx.incompleteFilesystemParams, params.Tag) + scheduleOperations(ctx, &createFilesystemOp{args: params}) +} + +func removePendingFilesystem(ctx *context, tag names.FilesystemTag) { + delete(ctx.incompleteFilesystemParams, tag) + ctx.schedule.Remove(tag) +} + +// updatePendingFilesystemAttachment adds the given filesystem attachment params to +// either the incomplete set or the schedule. If the params are incomplete +// due to a missing instance ID, updatePendingFilesystemAttachment will request +// that the machine be watched so its instance ID can be learned. +func updatePendingFilesystemAttachment( + ctx *context, + id params.MachineStorageId, + params storage.FilesystemAttachmentParams, +) { + var incomplete bool + filesystem, ok := ctx.filesystems[params.Filesystem] + if !ok { + incomplete = true + } else { + params.FilesystemId = filesystem.FilesystemId + if filesystem.Volume != (names.VolumeTag{}) { + // The filesystem is volume-backed: if the filesystem + // was created in another session, then the block device + // may not have been seen yet. We must wait for the block + // device watcher to trigger. + if _, ok := ctx.volumeBlockDevices[filesystem.Volume]; !ok { + incomplete = true + } + } + } + if params.InstanceId == "" { + watchMachine(ctx, params.Machine) + incomplete = true + } + if params.FilesystemId == "" { + incomplete = true + } + if incomplete { + ctx.incompleteFilesystemAttachmentParams[id] = params + return + } + delete(ctx.incompleteFilesystemAttachmentParams, id) + scheduleOperations(ctx, &attachFilesystemOp{args: params}) +} + +// removePendingFilesystemAttachment removes the specified pending filesystem +// attachment from the incomplete set and/or the schedule if it exists +// there. +func removePendingFilesystemAttachment(ctx *context, id params.MachineStorageId) { + delete(ctx.incompleteFilesystemAttachmentParams, id) + ctx.schedule.Remove(id) +} + +// processDeadFilesystems processes the FilesystemResults for Dead filesystems, +// deprovisioning filesystems and removing from state as necessary. +func processDeadFilesystems(ctx *context, tags []names.FilesystemTag, filesystemResults []params.FilesystemResult) error { + for _, tag := range tags { + removePendingFilesystem(ctx, tag) + } + var destroy []names.FilesystemTag + var remove []names.Tag + for i, result := range filesystemResults { + tag := tags[i] + if result.Error == nil { + logger.Debugf("filesystem %s is provisioned, queuing for deprovisioning", tag.Id()) + filesystem, err := filesystemFromParams(result.Result) + if err != nil { + return errors.Annotate(err, "getting filesystem info") + } + updateFilesystem(ctx, filesystem) + destroy = append(destroy, tag) + continue + } + if params.IsCodeNotProvisioned(result.Error) { + logger.Debugf("filesystem %s is not provisioned, queuing for removal", tag.Id()) + remove = append(remove, tag) + continue + } + return errors.Annotatef(result.Error, "getting filesystem information for filesystem %s", tag.Id()) + } + if len(destroy) > 0 { + ops := make([]scheduleOp, len(destroy)) + for i, tag := range destroy { + ops[i] = &destroyFilesystemOp{tag: tag} + } + scheduleOperations(ctx, ops...) + } + if err := removeEntities(ctx, remove); err != nil { + return errors.Annotate(err, "removing filesystems from state") + } + return nil +} + +// processDyingFilesystemAttachments processes the FilesystemAttachmentResults for +// Dying filesystem attachments, detaching filesystems and updating state as necessary. +func processDyingFilesystemAttachments( + ctx *context, + ids []params.MachineStorageId, + filesystemAttachmentResults []params.FilesystemAttachmentResult, +) error { + for _, id := range ids { + removePendingFilesystemAttachment(ctx, id) + } + detach := make([]params.MachineStorageId, 0, len(ids)) + remove := make([]params.MachineStorageId, 0, len(ids)) + for i, result := range filesystemAttachmentResults { + id := ids[i] + if result.Error == nil { + detach = append(detach, id) + continue + } + if params.IsCodeNotProvisioned(result.Error) { + remove = append(remove, id) + continue + } + return errors.Annotatef(result.Error, "getting information for filesystem attachment %v", id) + } + if len(detach) > 0 { + attachmentParams, err := filesystemAttachmentParams(ctx, detach) + if err != nil { + return errors.Trace(err) + } + ops := make([]scheduleOp, len(attachmentParams)) + for i, p := range attachmentParams { + ops[i] = &detachFilesystemOp{args: p} + } + scheduleOperations(ctx, ops...) + } + if err := removeAttachments(ctx, remove); err != nil { + return errors.Annotate(err, "removing attachments from state") + } + return nil +} + +// processAliveFilesystems processes the FilesystemResults for Alive filesystems, +// provisioning filesystems and setting the info in state as necessary. +func processAliveFilesystems(ctx *context, tags []names.FilesystemTag, filesystemResults []params.FilesystemResult) error { + // Filter out the already-provisioned filesystems. + pending := make([]names.FilesystemTag, 0, len(tags)) + for i, result := range filesystemResults { + tag := tags[i] + if result.Error == nil { + // Filesystem is already provisioned: skip. + logger.Debugf("filesystem %q is already provisioned, nothing to do", tag.Id()) + filesystem, err := filesystemFromParams(result.Result) + if err != nil { + return errors.Annotate(err, "getting filesystem info") + } + updateFilesystem(ctx, filesystem) + if filesystem.Volume != (names.VolumeTag{}) { + // Ensure that volume-backed filesystems' block + // devices are present even after creating the + // filesystem, so that attachments can be made. + maybeAddPendingVolumeBlockDevice(ctx, filesystem.Volume) + } + continue + } + if !params.IsCodeNotProvisioned(result.Error) { + return errors.Annotatef( + result.Error, "getting filesystem information for filesystem %q", tag.Id(), + ) + } + // The filesystem has not yet been provisioned, so record its tag + // to enquire about parameters below. + pending = append(pending, tag) + } + if len(pending) == 0 { + return nil + } + params, err := filesystemParams(ctx, pending) + if err != nil { + return errors.Annotate(err, "getting filesystem params") + } + for _, params := range params { + updatePendingFilesystem(ctx, params) + } + return nil +} + +func maybeAddPendingVolumeBlockDevice(ctx *context, v names.VolumeTag) { + if _, ok := ctx.volumeBlockDevices[v]; !ok { + ctx.pendingVolumeBlockDevices.Add(v) + } +} + +// processAliveFilesystemAttachments processes the FilesystemAttachmentResults +// for Alive filesystem attachments, attaching filesystems and setting the info +// in state as necessary. +func processAliveFilesystemAttachments( + ctx *context, + ids []params.MachineStorageId, + filesystemAttachmentResults []params.FilesystemAttachmentResult, +) error { + // Filter out the already-attached. + pending := make([]params.MachineStorageId, 0, len(ids)) + for i, result := range filesystemAttachmentResults { + if result.Error == nil { + // Filesystem attachment is already provisioned: if we + // didn't (re)attach in this session, then we must do + // so now. + action := "nothing to do" + if _, ok := ctx.filesystemAttachments[ids[i]]; !ok { + // Not yet (re)attached in this session. + pending = append(pending, ids[i]) + action = "will reattach" + } + logger.Debugf( + "%s is already attached to %s, %s", + ids[i].AttachmentTag, ids[i].MachineTag, action, + ) + removePendingFilesystemAttachment(ctx, ids[i]) + continue + } + if !params.IsCodeNotProvisioned(result.Error) { + return errors.Annotatef( + result.Error, "getting information for attachment %v", ids[i], + ) + } + // The filesystem has not yet been attached, so + // record its tag to enquire about parameters below. + pending = append(pending, ids[i]) + } + if len(pending) == 0 { + return nil + } + params, err := filesystemAttachmentParams(ctx, pending) + if err != nil { + return errors.Trace(err) + } + for i, params := range params { + updatePendingFilesystemAttachment(ctx, pending[i], params) + } + return nil +} + +// filesystemAttachmentParams obtains the specified attachments' parameters. +func filesystemAttachmentParams( + ctx *context, ids []params.MachineStorageId, +) ([]storage.FilesystemAttachmentParams, error) { + paramsResults, err := ctx.config.Filesystems.FilesystemAttachmentParams(ids) + if err != nil { + return nil, errors.Annotate(err, "getting filesystem attachment params") + } + attachmentParams := make([]storage.FilesystemAttachmentParams, len(ids)) + for i, result := range paramsResults { + if result.Error != nil { + return nil, errors.Annotate(result.Error, "getting filesystem attachment parameters") + } + params, err := filesystemAttachmentParamsFromParams(result.Result) + if err != nil { + return nil, errors.Annotate(err, "getting filesystem attachment parameters") + } + attachmentParams[i] = params + } + return attachmentParams, nil +} + +// filesystemParams obtains the specified filesystems' parameters. +func filesystemParams(ctx *context, tags []names.FilesystemTag) ([]storage.FilesystemParams, error) { + paramsResults, err := ctx.config.Filesystems.FilesystemParams(tags) + if err != nil { + return nil, errors.Annotate(err, "getting filesystem params") + } + allParams := make([]storage.FilesystemParams, len(tags)) + for i, result := range paramsResults { + if result.Error != nil { + return nil, errors.Annotate(result.Error, "getting filesystem parameters") + } + params, err := filesystemParamsFromParams(result.Result) + if err != nil { + return nil, errors.Annotate(err, "getting filesystem parameters") + } + allParams[i] = params + } + return allParams, nil +} + +func filesystemFromParams(in params.Filesystem) (storage.Filesystem, error) { + filesystemTag, err := names.ParseFilesystemTag(in.FilesystemTag) + if err != nil { + return storage.Filesystem{}, errors.Trace(err) + } + var volumeTag names.VolumeTag + if in.VolumeTag != "" { + volumeTag, err = names.ParseVolumeTag(in.VolumeTag) + if err != nil { + return storage.Filesystem{}, errors.Trace(err) + } + } + return storage.Filesystem{ + filesystemTag, + volumeTag, + storage.FilesystemInfo{ + in.Info.FilesystemId, + in.Info.Size, + }, + }, nil +} + +func filesystemParamsFromParams(in params.FilesystemParams) (storage.FilesystemParams, error) { + filesystemTag, err := names.ParseFilesystemTag(in.FilesystemTag) + if err != nil { + return storage.FilesystemParams{}, errors.Trace(err) + } + var volumeTag names.VolumeTag + if in.VolumeTag != "" { + volumeTag, err = names.ParseVolumeTag(in.VolumeTag) + if err != nil { + return storage.FilesystemParams{}, errors.Trace(err) + } + } + providerType := storage.ProviderType(in.Provider) + return storage.FilesystemParams{ + filesystemTag, + volumeTag, + in.Size, + providerType, + in.Attributes, + in.Tags, + }, nil +} + +func filesystemAttachmentParamsFromParams(in params.FilesystemAttachmentParams) (storage.FilesystemAttachmentParams, error) { + machineTag, err := names.ParseMachineTag(in.MachineTag) + if err != nil { + return storage.FilesystemAttachmentParams{}, errors.Trace(err) + } + filesystemTag, err := names.ParseFilesystemTag(in.FilesystemTag) + if err != nil { + return storage.FilesystemAttachmentParams{}, errors.Trace(err) + } + return storage.FilesystemAttachmentParams{ + AttachmentParams: storage.AttachmentParams{ + Provider: storage.ProviderType(in.Provider), + Machine: machineTag, + InstanceId: instance.Id(in.InstanceId), + ReadOnly: in.ReadOnly, + }, + Filesystem: filesystemTag, + FilesystemId: in.FilesystemId, + Path: in.MountPoint, + }, nil +} === added file 'src/github.com/juju/juju/worker/storageprovisioner/filesystem_ops.go' --- src/github.com/juju/juju/worker/storageprovisioner/filesystem_ops.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/filesystem_ops.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,533 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storageprovisioner + +import ( + "path/filepath" + + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/storage" +) + +// createFilesystems creates filesystems with the specified parameters. +func createFilesystems(ctx *context, ops map[names.FilesystemTag]*createFilesystemOp) error { + filesystemParams := make([]storage.FilesystemParams, 0, len(ops)) + for _, op := range ops { + filesystemParams = append(filesystemParams, op.args) + } + paramsBySource, filesystemSources, err := filesystemParamsBySource( + ctx.modelConfig, ctx.config.StorageDir, + filesystemParams, ctx.managedFilesystemSource, + ) + if err != nil { + return errors.Trace(err) + } + var reschedule []scheduleOp + var filesystems []storage.Filesystem + var statuses []params.EntityStatusArgs + for sourceName, filesystemParams := range paramsBySource { + logger.Debugf("creating filesystems: %v", filesystemParams) + filesystemSource := filesystemSources[sourceName] + validFilesystemParams, validationErrors := validateFilesystemParams( + filesystemSource, filesystemParams, + ) + for i, err := range validationErrors { + if err == nil { + continue + } + statuses = append(statuses, params.EntityStatusArgs{ + Tag: filesystemParams[i].Tag.String(), + Status: params.StatusError, + Info: err.Error(), + }) + logger.Debugf( + "failed to validate parameters for %s: %v", + names.ReadableString(filesystemParams[i].Tag), err, + ) + } + filesystemParams = validFilesystemParams + if len(filesystemParams) == 0 { + continue + } + results, err := filesystemSource.CreateFilesystems(filesystemParams) + if err != nil { + return errors.Annotatef(err, "creating filesystems from source %q", sourceName) + } + for i, result := range results { + statuses = append(statuses, params.EntityStatusArgs{ + Tag: filesystemParams[i].Tag.String(), + Status: params.StatusAttaching, + }) + status := &statuses[len(statuses)-1] + if result.Error != nil { + // Reschedule the filesystem creation. + reschedule = append(reschedule, ops[filesystemParams[i].Tag]) + + // Note: we keep the status as "pending" to indicate + // that we will retry. When we distinguish between + // transient and permanent errors, we will set the + // status to "error" for permanent errors. + status.Status = params.StatusPending + status.Info = result.Error.Error() + logger.Debugf( + "failed to create %s: %v", + names.ReadableString(filesystemParams[i].Tag), + result.Error, + ) + continue + } + filesystems = append(filesystems, *result.Filesystem) + } + } + scheduleOperations(ctx, reschedule...) + setStatus(ctx, statuses) + if len(filesystems) == 0 { + return nil + } + // TODO(axw) we need to be able to list filesystems in the provider, + // by environment, so that we can "harvest" them if they're + // unknown. This will take care of killing filesystems that we fail + // to record in state. + errorResults, err := ctx.config.Filesystems.SetFilesystemInfo(filesystemsFromStorage(filesystems)) + if err != nil { + return errors.Annotate(err, "publishing filesystems to state") + } + for i, result := range errorResults { + if result.Error != nil { + logger.Errorf( + "publishing filesystem %s to state: %v", + filesystems[i].Tag.Id(), + result.Error, + ) + } + } + for _, v := range filesystems { + updateFilesystem(ctx, v) + } + return nil +} + +// attachFilesystems creates filesystem attachments with the specified parameters. +func attachFilesystems(ctx *context, ops map[params.MachineStorageId]*attachFilesystemOp) error { + filesystemAttachmentParams := make([]storage.FilesystemAttachmentParams, 0, len(ops)) + for _, op := range ops { + args := op.args + if args.Path == "" { + args.Path = filepath.Join(ctx.config.StorageDir, args.Filesystem.Id()) + } + filesystemAttachmentParams = append(filesystemAttachmentParams, args) + } + paramsBySource, filesystemSources, err := filesystemAttachmentParamsBySource( + ctx.modelConfig, + ctx.config.StorageDir, + filesystemAttachmentParams, + ctx.filesystems, + ctx.managedFilesystemSource, + ) + if err != nil { + return errors.Trace(err) + } + var reschedule []scheduleOp + var filesystemAttachments []storage.FilesystemAttachment + var statuses []params.EntityStatusArgs + for sourceName, filesystemAttachmentParams := range paramsBySource { + logger.Debugf("attaching filesystems: %+v", filesystemAttachmentParams) + filesystemSource := filesystemSources[sourceName] + results, err := filesystemSource.AttachFilesystems(filesystemAttachmentParams) + if err != nil { + return errors.Annotatef(err, "attaching filesystems from source %q", sourceName) + } + for i, result := range results { + p := filesystemAttachmentParams[i] + statuses = append(statuses, params.EntityStatusArgs{ + Tag: p.Filesystem.String(), + Status: params.StatusAttached, + }) + status := &statuses[len(statuses)-1] + if result.Error != nil { + // Reschedule the filesystem attachment. + id := params.MachineStorageId{ + MachineTag: p.Machine.String(), + AttachmentTag: p.Filesystem.String(), + } + reschedule = append(reschedule, ops[id]) + + // Note: we keep the status as "attaching" to + // indicate that we will retry. When we distinguish + // between transient and permanent errors, we will + // set the status to "error" for permanent errors. + status.Status = params.StatusAttaching + status.Info = result.Error.Error() + logger.Debugf( + "failed to attach %s to %s: %v", + names.ReadableString(p.Filesystem), + names.ReadableString(p.Machine), + result.Error, + ) + continue + } + filesystemAttachments = append(filesystemAttachments, *result.FilesystemAttachment) + } + } + scheduleOperations(ctx, reschedule...) + setStatus(ctx, statuses) + if err := setFilesystemAttachmentInfo(ctx, filesystemAttachments); err != nil { + return errors.Trace(err) + } + return nil +} + +// destroyFilesystems destroys filesystems with the specified parameters. +func destroyFilesystems(ctx *context, ops map[names.FilesystemTag]*destroyFilesystemOp) error { + tags := make([]names.FilesystemTag, 0, len(ops)) + for tag := range ops { + tags = append(tags, tag) + } + filesystemParams, err := filesystemParams(ctx, tags) + if err != nil { + return errors.Trace(err) + } + paramsBySource, filesystemSources, err := filesystemParamsBySource( + ctx.modelConfig, ctx.config.StorageDir, + filesystemParams, ctx.managedFilesystemSource, + ) + if err != nil { + return errors.Trace(err) + } + var remove []names.Tag + var reschedule []scheduleOp + var statuses []params.EntityStatusArgs + for sourceName, filesystemParams := range paramsBySource { + logger.Debugf("destroying filesystems from %q: %v", sourceName, filesystemParams) + filesystemSource := filesystemSources[sourceName] + validFilesystemParams, validationErrors := validateFilesystemParams(filesystemSource, filesystemParams) + for i, err := range validationErrors { + if err == nil { + continue + } + statuses = append(statuses, params.EntityStatusArgs{ + Tag: filesystemParams[i].Tag.String(), + Status: params.StatusError, + Info: err.Error(), + }) + logger.Debugf( + "failed to validate parameters for %s: %v", + names.ReadableString(filesystemParams[i].Tag), err, + ) + } + filesystemParams = validFilesystemParams + if len(filesystemParams) == 0 { + continue + } + filesystemIds := make([]string, len(filesystemParams)) + for i, filesystemParams := range filesystemParams { + filesystem, ok := ctx.filesystems[filesystemParams.Tag] + if !ok { + return errors.NotFoundf("filesystem %s", filesystemParams.Tag.Id()) + } + filesystemIds[i] = filesystem.FilesystemId + } + errs, err := filesystemSource.DestroyFilesystems(filesystemIds) + if err != nil { + return errors.Trace(err) + } + for i, err := range errs { + tag := filesystemParams[i].Tag + if err == nil { + remove = append(remove, tag) + continue + } + // Failed to destroy filesystem; reschedule and update status. + reschedule = append(reschedule, ops[tag]) + statuses = append(statuses, params.EntityStatusArgs{ + Tag: tag.String(), + Status: params.StatusDestroying, + Info: err.Error(), + }) + } + } + scheduleOperations(ctx, reschedule...) + setStatus(ctx, statuses) + if err := removeEntities(ctx, remove); err != nil { + return errors.Annotate(err, "removing filesystems from state") + } + return nil +} + +// detachFilesystems destroys filesystem attachments with the specified parameters. +func detachFilesystems(ctx *context, ops map[params.MachineStorageId]*detachFilesystemOp) error { + filesystemAttachmentParams := make([]storage.FilesystemAttachmentParams, 0, len(ops)) + for _, op := range ops { + filesystemAttachmentParams = append(filesystemAttachmentParams, op.args) + } + paramsBySource, filesystemSources, err := filesystemAttachmentParamsBySource( + ctx.modelConfig, ctx.config.StorageDir, + filesystemAttachmentParams, + ctx.filesystems, + ctx.managedFilesystemSource, + ) + if err != nil { + return errors.Trace(err) + } + var reschedule []scheduleOp + var statuses []params.EntityStatusArgs + var remove []params.MachineStorageId + for sourceName, filesystemAttachmentParams := range paramsBySource { + logger.Debugf("detaching filesystems: %+v", filesystemAttachmentParams) + filesystemSource := filesystemSources[sourceName] + errs, err := filesystemSource.DetachFilesystems(filesystemAttachmentParams) + if err != nil { + return errors.Annotatef(err, "detaching filesystems from source %q", sourceName) + } + for i, err := range errs { + p := filesystemAttachmentParams[i] + statuses = append(statuses, params.EntityStatusArgs{ + Tag: p.Filesystem.String(), + // TODO(axw) when we support multiple + // attachment, we'll have to check if + // there are any other attachments + // before saying the status "detached". + Status: params.StatusDetached, + }) + id := params.MachineStorageId{ + MachineTag: p.Machine.String(), + AttachmentTag: p.Filesystem.String(), + } + status := &statuses[len(statuses)-1] + if err != nil { + reschedule = append(reschedule, ops[id]) + status.Status = params.StatusDetaching + status.Info = err.Error() + logger.Debugf( + "failed to detach %s from %s: %v", + names.ReadableString(p.Filesystem), + names.ReadableString(p.Machine), + err, + ) + continue + } + remove = append(remove, id) + } + } + scheduleOperations(ctx, reschedule...) + setStatus(ctx, statuses) + if err := removeAttachments(ctx, remove); err != nil { + return errors.Annotate(err, "removing attachments from state") + } + for _, id := range remove { + delete(ctx.filesystemAttachments, id) + } + return nil +} + +// filesystemParamsBySource separates the filesystem parameters by filesystem source. +func filesystemParamsBySource( + environConfig *config.Config, + baseStorageDir string, + params []storage.FilesystemParams, + managedFilesystemSource storage.FilesystemSource, +) (map[string][]storage.FilesystemParams, map[string]storage.FilesystemSource, error) { + // TODO(axw) later we may have multiple instantiations (sources) + // for a storage provider, e.g. multiple Ceph installations. For + // now we assume a single source for each provider type, with no + // configuration. + filesystemSources := make(map[string]storage.FilesystemSource) + for _, params := range params { + sourceName := string(params.Provider) + if _, ok := filesystemSources[sourceName]; ok { + continue + } + if params.Volume != (names.VolumeTag{}) { + filesystemSources[sourceName] = managedFilesystemSource + continue + } + filesystemSource, err := filesystemSource( + environConfig, baseStorageDir, sourceName, params.Provider, + ) + if errors.Cause(err) == errNonDynamic { + filesystemSource = nil + } else if err != nil { + return nil, nil, errors.Annotate(err, "getting filesystem source") + } + filesystemSources[sourceName] = filesystemSource + } + paramsBySource := make(map[string][]storage.FilesystemParams) + for _, params := range params { + sourceName := string(params.Provider) + filesystemSource := filesystemSources[sourceName] + if filesystemSource == nil { + // Ignore nil filesystem sources; this means that the + // filesystem should be created by the machine-provisioner. + continue + } + paramsBySource[sourceName] = append(paramsBySource[sourceName], params) + } + return paramsBySource, filesystemSources, nil +} + +// validateFilesystemParams validates a collection of filesystem parameters. +func validateFilesystemParams( + filesystemSource storage.FilesystemSource, + filesystemParams []storage.FilesystemParams, +) ([]storage.FilesystemParams, []error) { + valid := make([]storage.FilesystemParams, 0, len(filesystemParams)) + results := make([]error, len(filesystemParams)) + for i, params := range filesystemParams { + err := filesystemSource.ValidateFilesystemParams(params) + if err == nil { + valid = append(valid, params) + } + results[i] = err + } + return valid, results +} + +// filesystemAttachmentParamsBySource separates the filesystem attachment parameters by filesystem source. +func filesystemAttachmentParamsBySource( + environConfig *config.Config, + baseStorageDir string, + params []storage.FilesystemAttachmentParams, + filesystems map[names.FilesystemTag]storage.Filesystem, + managedFilesystemSource storage.FilesystemSource, +) (map[string][]storage.FilesystemAttachmentParams, map[string]storage.FilesystemSource, error) { + // TODO(axw) later we may have multiple instantiations (sources) + // for a storage provider, e.g. multiple Ceph installations. For + // now we assume a single source for each provider type, with no + // configuration. + filesystemSources := make(map[string]storage.FilesystemSource) + paramsBySource := make(map[string][]storage.FilesystemAttachmentParams) + for _, params := range params { + sourceName := string(params.Provider) + paramsBySource[sourceName] = append(paramsBySource[sourceName], params) + if _, ok := filesystemSources[sourceName]; ok { + continue + } + filesystem := filesystems[params.Filesystem] + if filesystem.Volume != (names.VolumeTag{}) { + filesystemSources[sourceName] = managedFilesystemSource + continue + } + filesystemSource, err := filesystemSource( + environConfig, baseStorageDir, sourceName, params.Provider, + ) + if err != nil { + return nil, nil, errors.Annotate(err, "getting filesystem source") + } + filesystemSources[sourceName] = filesystemSource + } + return paramsBySource, filesystemSources, nil +} + +func setFilesystemAttachmentInfo(ctx *context, filesystemAttachments []storage.FilesystemAttachment) error { + if len(filesystemAttachments) == 0 { + return nil + } + // TODO(axw) we need to be able to list filesystem attachments in the + // provider, by environment, so that we can "harvest" them if they're + // unknown. This will take care of killing filesystems that we fail to + // record in state. + errorResults, err := ctx.config.Filesystems.SetFilesystemAttachmentInfo( + filesystemAttachmentsFromStorage(filesystemAttachments), + ) + if err != nil { + return errors.Annotate(err, "publishing filesystems to state") + } + for i, result := range errorResults { + if result.Error != nil { + return errors.Annotatef( + result.Error, "publishing attachment of %s to %s to state", + names.ReadableString(filesystemAttachments[i].Filesystem), + names.ReadableString(filesystemAttachments[i].Machine), + ) + } + // Record the filesystem attachment in the context. + id := params.MachineStorageId{ + MachineTag: filesystemAttachments[i].Machine.String(), + AttachmentTag: filesystemAttachments[i].Filesystem.String(), + } + ctx.filesystemAttachments[id] = filesystemAttachments[i] + removePendingFilesystemAttachment(ctx, id) + } + return nil +} + +func filesystemsFromStorage(in []storage.Filesystem) []params.Filesystem { + out := make([]params.Filesystem, len(in)) + for i, f := range in { + paramsFilesystem := params.Filesystem{ + f.Tag.String(), + "", + params.FilesystemInfo{ + f.FilesystemId, + f.Size, + }, + } + if f.Volume != (names.VolumeTag{}) { + paramsFilesystem.VolumeTag = f.Volume.String() + } + out[i] = paramsFilesystem + } + return out +} + +func filesystemAttachmentsFromStorage(in []storage.FilesystemAttachment) []params.FilesystemAttachment { + out := make([]params.FilesystemAttachment, len(in)) + for i, f := range in { + out[i] = params.FilesystemAttachment{ + f.Filesystem.String(), + f.Machine.String(), + params.FilesystemAttachmentInfo{ + f.Path, + f.ReadOnly, + }, + } + } + return out +} + +type createFilesystemOp struct { + exponentialBackoff + args storage.FilesystemParams +} + +func (op *createFilesystemOp) key() interface{} { + return op.args.Tag +} + +type destroyFilesystemOp struct { + exponentialBackoff + tag names.FilesystemTag +} + +func (op *destroyFilesystemOp) key() interface{} { + return op.tag +} + +type attachFilesystemOp struct { + exponentialBackoff + args storage.FilesystemAttachmentParams +} + +func (op *attachFilesystemOp) key() interface{} { + return params.MachineStorageId{ + MachineTag: op.args.Machine.String(), + AttachmentTag: op.args.Filesystem.String(), + } +} + +type detachFilesystemOp struct { + exponentialBackoff + args storage.FilesystemAttachmentParams +} + +func (op *detachFilesystemOp) key() interface{} { + return params.MachineStorageId{ + MachineTag: op.args.Machine.String(), + AttachmentTag: op.args.Filesystem.String(), + } +} === removed file 'src/github.com/juju/juju/worker/storageprovisioner/filesystems.go' --- src/github.com/juju/juju/worker/storageprovisioner/filesystems.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/filesystems.go 1970-01-01 00:00:00 +0000 @@ -1,766 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package storageprovisioner - -import ( - "path/filepath" - - "github.com/juju/errors" - "github.com/juju/names" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/instance" - "github.com/juju/juju/storage" -) - -// filesystemsChanged is called when the lifecycle states of the filesystems -// with the provided IDs have been seen to have changed. -func filesystemsChanged(ctx *context, changes []string) error { - tags := make([]names.Tag, len(changes)) - for i, change := range changes { - tags[i] = names.NewFilesystemTag(change) - } - alive, dying, dead, err := storageEntityLife(ctx, tags) - if err != nil { - return errors.Trace(err) - } - logger.Debugf("filesystems alive: %v, dying: %v, dead: %v", alive, dying, dead) - if len(alive)+len(dying)+len(dead) == 0 { - return nil - } - - // Get filesystem information for filesystems, so we can provision, - // deprovision, attach and detach. - filesystemTags := make([]names.FilesystemTag, 0, len(alive)+len(dying)+len(dead)) - for _, tag := range alive { - filesystemTags = append(filesystemTags, tag.(names.FilesystemTag)) - } - for _, tag := range dying { - filesystemTags = append(filesystemTags, tag.(names.FilesystemTag)) - } - for _, tag := range dead { - filesystemTags = append(filesystemTags, tag.(names.FilesystemTag)) - } - filesystemResults, err := ctx.filesystemAccessor.Filesystems(filesystemTags) - if err != nil { - return errors.Annotatef(err, "getting filesystem information") - } - - aliveFilesystemTags := filesystemTags[:len(alive)] - dyingFilesystemTags := filesystemTags[len(alive) : len(alive)+len(dying)] - deadFilesystemTags := filesystemTags[len(alive)+len(dying):] - aliveFilesystemResults := filesystemResults[:len(alive)] - dyingFilesystemResults := filesystemResults[len(alive) : len(alive)+len(dying)] - deadFilesystemResults := filesystemResults[len(alive)+len(dying):] - - if err := processDeadFilesystems(ctx, deadFilesystemTags, deadFilesystemResults); err != nil { - return errors.Annotate(err, "deprovisioning filesystems") - } - if err := processDyingFilesystems(ctx, dyingFilesystemTags, dyingFilesystemResults); err != nil { - return errors.Annotate(err, "processing dying filesystems") - } - if err := processAliveFilesystems(ctx, aliveFilesystemTags, aliveFilesystemResults); err != nil { - return errors.Annotate(err, "provisioning filesystems") - } - return nil -} - -// filesystemAttachmentsChanged is called when the lifecycle states of the filesystem -// attachments with the provided IDs have been seen to have changed. -func filesystemAttachmentsChanged(ctx *context, ids []params.MachineStorageId) error { - alive, dying, dead, err := attachmentLife(ctx, ids) - if err != nil { - return errors.Trace(err) - } - logger.Debugf("filesystem attachment alive: %v, dying: %v, dead: %v", alive, dying, dead) - if len(dead) != 0 { - // We should not see dead filesystem attachments; - // attachments go directly from Dying to removed. - logger.Debugf("unexpected dead filesystem attachments: %v", dead) - } - if len(alive)+len(dying) == 0 { - return nil - } - - // Get filesystem information for alive and dying filesystem attachments, so - // we can attach/detach. - ids = append(alive, dying...) - filesystemAttachmentResults, err := ctx.filesystemAccessor.FilesystemAttachments(ids) - if err != nil { - return errors.Annotatef(err, "getting filesystem attachment information") - } - - // Deprovision Dying filesystem attachments. - dyingFilesystemAttachmentResults := filesystemAttachmentResults[len(alive):] - if err := processDyingFilesystemAttachments(ctx, dying, dyingFilesystemAttachmentResults); err != nil { - return errors.Annotate(err, "destroying filesystem attachments") - } - - // Provision Alive filesystem attachments. - aliveFilesystemAttachmentResults := filesystemAttachmentResults[:len(alive)] - if err := processAliveFilesystemAttachments(ctx, alive, aliveFilesystemAttachmentResults); err != nil { - return errors.Annotate(err, "creating filesystem attachments") - } - - return nil -} - -// processDyingFilesystems processes the FilesystemResults for Dying filesystems, -// removing them from provisioning-pending as necessary, and storing the current -// filesystem info for provisioned filesystems so that attachments may be destroyed. -func processDyingFilesystems(ctx *context, tags []names.FilesystemTag, filesystemResults []params.FilesystemResult) error { - for _, tag := range tags { - delete(ctx.pendingFilesystems, tag) - } - for i, result := range filesystemResults { - tag := tags[i] - if result.Error == nil { - filesystem, err := filesystemFromParams(result.Result) - if err != nil { - return errors.Annotate(err, "getting filesystem info") - } - ctx.filesystems[tag] = filesystem - } else if !params.IsCodeNotProvisioned(result.Error) { - return errors.Annotatef(result.Error, "getting information for filesystem %s", tag.Id()) - } - } - return nil -} - -// processDeadFilesystems processes the FilesystemResults for Dead filesystems, -// deprovisioning filesystems and removing from state as necessary. -func processDeadFilesystems(ctx *context, tags []names.FilesystemTag, filesystemResults []params.FilesystemResult) error { - for _, tag := range tags { - delete(ctx.pendingFilesystems, tag) - } - var destroy []names.FilesystemTag - var remove []names.Tag - for i, result := range filesystemResults { - tag := tags[i] - if result.Error == nil { - logger.Debugf("filesystem %s is provisioned, queuing for deprovisioning", tag.Id()) - filesystem, err := filesystemFromParams(result.Result) - if err != nil { - return errors.Annotate(err, "getting filesystem info") - } - ctx.filesystems[tag] = filesystem - destroy = append(destroy, tag) - continue - } - if params.IsCodeNotProvisioned(result.Error) { - logger.Debugf("filesystem %s is not provisioned, queuing for removal", tag.Id()) - remove = append(remove, tag) - continue - } - return errors.Annotatef(result.Error, "getting filesystem information for filesystem %s", tag.Id()) - } - if len(destroy)+len(remove) == 0 { - return nil - } - if len(destroy) > 0 { - errorResults, err := destroyFilesystems(ctx, destroy) - if err != nil { - return errors.Annotate(err, "destroying filesystems") - } - for i, tag := range destroy { - if err := errorResults[i]; err != nil { - return errors.Annotatef(err, "destroying %s", names.ReadableString(tag)) - } - remove = append(remove, tag) - } - } - if err := removeEntities(ctx, remove); err != nil { - return errors.Annotate(err, "removing filesystems from state") - } - return nil -} - -// processDyingFilesystemAttachments processes the FilesystemAttachmentResults for -// Dying filesystem attachments, detaching filesystems and updating state as necessary. -func processDyingFilesystemAttachments( - ctx *context, - ids []params.MachineStorageId, - filesystemAttachmentResults []params.FilesystemAttachmentResult, -) error { - if len(ids) == 0 { - return nil - } - for _, id := range ids { - delete(ctx.pendingFilesystemAttachments, id) - } - detach := make([]params.MachineStorageId, 0, len(ids)) - remove := make([]params.MachineStorageId, 0, len(ids)) - for i, result := range filesystemAttachmentResults { - id := ids[i] - if result.Error == nil { - detach = append(detach, id) - continue - } - if params.IsCodeNotProvisioned(result.Error) { - remove = append(remove, id) - continue - } - return errors.Annotatef(result.Error, "getting information for filesystem attachment %v", id) - } - if len(detach) > 0 { - attachmentParams, err := filesystemAttachmentParams(ctx, detach) - if err != nil { - return errors.Trace(err) - } - for i, params := range attachmentParams { - ctx.pendingDyingFilesystemAttachments[detach[i]] = params - } - } - if len(remove) > 0 { - if err := removeAttachments(ctx, remove); err != nil { - return errors.Annotate(err, "removing attachments from state") - } - } - return nil -} - -// processAliveFilesystems processes the FilesystemResults for Alive filesystems, -// provisioning filesystems and setting the info in state as necessary. -func processAliveFilesystems(ctx *context, tags []names.FilesystemTag, filesystemResults []params.FilesystemResult) error { - // Filter out the already-provisioned filesystems. - pending := make([]names.FilesystemTag, 0, len(tags)) - for i, result := range filesystemResults { - tag := tags[i] - if result.Error == nil { - // Filesystem is already provisioned: skip. - logger.Debugf("filesystem %q is already provisioned, nothing to do", tag.Id()) - filesystem, err := filesystemFromParams(result.Result) - if err != nil { - return errors.Annotate(err, "getting filesystem info") - } - ctx.filesystems[tag] = filesystem - if filesystem.Volume != (names.VolumeTag{}) { - // Ensure that volume-backed filesystems' block - // devices are present even after creating the - // filesystem, so that attachments can be made. - maybeAddPendingVolumeBlockDevice(ctx, filesystem.Volume) - } - continue - } - if !params.IsCodeNotProvisioned(result.Error) { - return errors.Annotatef( - result.Error, "getting filesystem information for filesystem %q", tag.Id(), - ) - } - // The filesystem has not yet been provisioned, so record its tag - // to enquire about parameters below. - pending = append(pending, tag) - } - if len(pending) == 0 { - return nil - } - paramsResults, err := ctx.filesystemAccessor.FilesystemParams(pending) - if err != nil { - return errors.Annotate(err, "getting filesystem params") - } - for i, result := range paramsResults { - if result.Error != nil { - return errors.Annotate(result.Error, "getting filesystem parameters") - } - params, err := filesystemParamsFromParams(result.Result) - if err != nil { - return errors.Annotate(err, "getting filesystem parameters") - } - ctx.pendingFilesystems[pending[i]] = params - if params.Volume != (names.VolumeTag{}) { - // The filesystem is volume-backed: we must watch for - // the corresponding block device. This will trigger a - // one-time (for the volume) forced update of block - // devices. If the block device is not immediately - // available, then we rely on the watcher. The forced - // update is necessary in case the block device was - // added to state already, and we didn't observe it. - maybeAddPendingVolumeBlockDevice(ctx, params.Volume) - } - } - return nil -} - -func maybeAddPendingVolumeBlockDevice(ctx *context, v names.VolumeTag) { - if _, ok := ctx.volumeBlockDevices[v]; !ok { - ctx.pendingVolumeBlockDevices.Add(v) - } -} - -// processPendingFilesystems creates as many of the pending filesystems -// as possible, first ensuring that their prerequisites have been met. -func processPendingFilesystems(ctx *context) error { - if len(ctx.pendingFilesystems) == 0 { - logger.Tracef("no pending filesystems") - return nil - } - ready := make([]storage.FilesystemParams, 0, len(ctx.pendingFilesystems)) - for tag, filesystemParams := range ctx.pendingFilesystems { - if filesystemParams.Volume != (names.VolumeTag{}) { - // The filesystem is backed by a volume; ensure that - // the volume is attached by virtue of there being a - // matching block device on the machine. - if _, ok := ctx.volumeBlockDevices[filesystemParams.Volume]; !ok { - logger.Debugf( - "filesystem %v backing-volume %v is not attached yet", - filesystemParams.Tag.Id(), - filesystemParams.Volume.Id(), - ) - continue - } - } - ready = append(ready, filesystemParams) - delete(ctx.pendingFilesystems, tag) - } - if len(ready) == 0 { - return nil - } - filesystems, err := createFilesystems(ctx, ready) - if err != nil { - return errors.Annotate(err, "creating filesystems") - } - if err := setFilesystemInfo(ctx, filesystems); err != nil { - return errors.Trace(err) - } - return nil -} - -func setFilesystemInfo(ctx *context, filesystems []storage.Filesystem) error { - if len(filesystems) == 0 { - return nil - } - // TODO(axw) we need to be able to list filesystems in the provider, - // by environment, so that we can "harvest" them if they're - // unknown. This will take care of killing filesystems that we fail - // to record in state. - errorResults, err := ctx.filesystemAccessor.SetFilesystemInfo( - filesystemsFromStorage(filesystems), - ) - if err != nil { - return errors.Annotate(err, "publishing filesystems to state") - } - for i, result := range errorResults { - if result.Error != nil { - return errors.Annotatef( - result.Error, "publishing filesystem %s to state", - filesystems[i].Tag.Id(), - ) - } - ctx.filesystems[filesystems[i].Tag] = filesystems[i] - } - return nil -} - -// processAliveFilesystemAttachments processes the FilesystemAttachmentResults -// for Alive filesystem attachments, attaching filesystems and setting the info -// in state as necessary. -func processAliveFilesystemAttachments( - ctx *context, - ids []params.MachineStorageId, - filesystemAttachmentResults []params.FilesystemAttachmentResult, -) error { - // Filter out the already-attached. - pending := make([]params.MachineStorageId, 0, len(ids)) - for i, result := range filesystemAttachmentResults { - if result.Error == nil { - delete(ctx.pendingFilesystemAttachments, ids[i]) - // Filesystem attachment is already provisioned: if we - // didn't (re)attach in this session, then we must do - // so now. - action := "nothing to do" - if _, ok := ctx.filesystemAttachments[ids[i]]; !ok { - // Not yet (re)attached in this session. - pending = append(pending, ids[i]) - action = "will reattach" - } - logger.Debugf( - "%s is already attached to %s, %s", - ids[i].AttachmentTag, ids[i].MachineTag, action, - ) - continue - } - if !params.IsCodeNotProvisioned(result.Error) { - return errors.Annotatef( - result.Error, "getting information for attachment %v", ids[i], - ) - } - // The filesystem has not yet been attached, so - // record its tag to enquire about parameters below. - pending = append(pending, ids[i]) - } - if len(pending) == 0 { - return nil - } - params, err := filesystemAttachmentParams(ctx, pending) - if err != nil { - return errors.Trace(err) - } - for i, params := range params { - if params.InstanceId == "" { - watchMachine(ctx, params.Machine) - } - ctx.pendingFilesystemAttachments[pending[i]] = params - } - return nil -} - -// filesystemAttachmentParams obtains the specified attachments' parameters. -func filesystemAttachmentParams( - ctx *context, ids []params.MachineStorageId, -) ([]storage.FilesystemAttachmentParams, error) { - paramsResults, err := ctx.filesystemAccessor.FilesystemAttachmentParams(ids) - if err != nil { - return nil, errors.Annotate(err, "getting filesystem attachment params") - } - attachmentParams := make([]storage.FilesystemAttachmentParams, len(ids)) - for i, result := range paramsResults { - if result.Error != nil { - return nil, errors.Annotate(result.Error, "getting filesystem attachment parameters") - } - params, err := filesystemAttachmentParamsFromParams(result.Result) - if err != nil { - return nil, errors.Annotate(err, "getting filesystem attachment parameters") - } - attachmentParams[i] = params - } - return attachmentParams, nil -} - -func processPendingFilesystemAttachments(ctx *context) error { - if len(ctx.pendingFilesystemAttachments) == 0 { - logger.Tracef("no pending filesystem attachments") - return nil - } - ready := make([]storage.FilesystemAttachmentParams, 0, len(ctx.pendingFilesystemAttachments)) - for id, params := range ctx.pendingFilesystemAttachments { - filesystem, ok := ctx.filesystems[params.Filesystem] - if !ok { - logger.Debugf("filesystem %v has not been provisioned yet", params.Filesystem.Id()) - continue - } - if filesystem.Volume != (names.VolumeTag{}) { - // The filesystem is volume-backed: if the filesystem - // was created in another session, then the block device - // may not have been seen yet. We must wait for the block - // device watcher to trigger. - if _, ok := ctx.volumeBlockDevices[filesystem.Volume]; !ok { - logger.Debugf( - "filesystem %v backing-volume %v is not attached yet", - filesystem.Tag.Id(), - filesystem.Volume.Id(), - ) - continue - } - } - if params.InstanceId == "" { - logger.Debugf("machine %v has not been provisioned yet", params.Machine.Id()) - continue - } - if params.Path == "" { - params.Path = filepath.Join(ctx.storageDir, params.Filesystem.Id()) - } - params.FilesystemId = filesystem.FilesystemId - ready = append(ready, params) - delete(ctx.pendingFilesystemAttachments, id) - } - if len(ready) == 0 { - return nil - } - filesystemAttachments, err := createFilesystemAttachments(ctx, ready) - if err != nil { - return errors.Annotate(err, "creating filesystem attachments") - } - if err := setFilesystemAttachmentInfo(ctx, filesystemAttachments); err != nil { - return errors.Trace(err) - } - return nil -} - -func processPendingDyingFilesystemAttachments(ctx *context) error { - if len(ctx.pendingDyingFilesystemAttachments) == 0 { - logger.Tracef("no pending, dying filesystem attachments") - return nil - } - var detach []storage.FilesystemAttachmentParams - var remove []params.MachineStorageId - for id, params := range ctx.pendingDyingFilesystemAttachments { - if _, ok := ctx.filesystems[params.Filesystem]; !ok { - // Wait until the filesystem info is known. - continue - } - delete(ctx.pendingDyingFilesystemAttachments, id) - detach = append(detach, params) - remove = append(remove, id) - } - if len(detach) == 0 { - return nil - } - if err := detachFilesystems(ctx, detach); err != nil { - return errors.Annotate(err, "detaching filesystems") - } - if err := removeAttachments(ctx, remove); err != nil { - return errors.Annotate(err, "removing attachments from state") - } - return nil -} - -func setFilesystemAttachmentInfo(ctx *context, filesystemAttachments []storage.FilesystemAttachment) error { - if len(filesystemAttachments) == 0 { - return nil - } - // TODO(axw) we need to be able to list filesystem attachments in the - // provider, by environment, so that we can "harvest" them if they're - // unknown. This will take care of killing filesystems that we fail to - // record in state. - errorResults, err := ctx.filesystemAccessor.SetFilesystemAttachmentInfo( - filesystemAttachmentsFromStorage(filesystemAttachments), - ) - if err != nil { - return errors.Annotate(err, "publishing filesystems to state") - } - for i, result := range errorResults { - if result.Error != nil { - return errors.Annotatef( - result.Error, "publishing attachment of %s to %s to state", - names.ReadableString(filesystemAttachments[i].Filesystem), - names.ReadableString(filesystemAttachments[i].Machine), - ) - } - // Record the filesystem attachment in the context. - ctx.filesystemAttachments[params.MachineStorageId{ - MachineTag: filesystemAttachments[i].Machine.String(), - AttachmentTag: filesystemAttachments[i].Filesystem.String(), - }] = filesystemAttachments[i] - } - return nil -} - -// createFilesystems creates filesystems with the specified parameters. -func createFilesystems(ctx *context, params []storage.FilesystemParams) ([]storage.Filesystem, error) { - // TODO(axw) later we may have multiple instantiations (sources) - // for a storage provider, e.g. multiple Ceph installations. For - // now we assume a single source for each provider type, with no - // configuration. - - // Create filesystem sources. - filesystemSources := make(map[string]storage.FilesystemSource) - for _, params := range params { - sourceName := string(params.Provider) - if _, ok := filesystemSources[sourceName]; ok { - continue - } - if params.Volume != (names.VolumeTag{}) { - filesystemSources[sourceName] = ctx.managedFilesystemSource - continue - } - filesystemSource, err := filesystemSource( - ctx.environConfig, ctx.storageDir, sourceName, params.Provider, - ) - if err != nil { - return nil, errors.Annotate(err, "getting filesystem source") - } - filesystemSources[sourceName] = filesystemSource - } - - // Validate and gather filesystem parameters. - paramsBySource := make(map[string][]storage.FilesystemParams) - for _, params := range params { - sourceName := string(params.Provider) - filesystemSource := filesystemSources[sourceName] - err := filesystemSource.ValidateFilesystemParams(params) - if err != nil { - // TODO(axw) we should set an error status for params.Tag - // here, and we should retry periodically. - logger.Errorf("ignoring invalid filesystem: %v", err) - continue - } - paramsBySource[sourceName] = append(paramsBySource[sourceName], params) - } - - var allFilesystems []storage.Filesystem - for sourceName, params := range paramsBySource { - logger.Debugf("creating filesystems: %v", params) - filesystemSource := filesystemSources[sourceName] - filesystems, err := filesystemSource.CreateFilesystems(params) - if err != nil { - return nil, errors.Annotatef(err, "creating filesystems from source %q", sourceName) - } - allFilesystems = append(allFilesystems, filesystems...) - } - return allFilesystems, nil -} - -// createFilesystemAttachments creates filesystem attachments with the specified parameters. -func createFilesystemAttachments( - ctx *context, - params []storage.FilesystemAttachmentParams, -) ([]storage.FilesystemAttachment, error) { - paramsBySource, filesystemSources, err := filesystemAttachmentParamsBySource(ctx, params) - if err != nil { - return nil, errors.Trace(err) - } - var allFilesystemAttachments []storage.FilesystemAttachment - for sourceName, params := range paramsBySource { - logger.Debugf("attaching filesystems: %v", params) - filesystemSource := filesystemSources[sourceName] - filesystemAttachments, err := filesystemSource.AttachFilesystems(params) - if err != nil { - return nil, errors.Annotatef(err, "attaching filesystems from source %q", sourceName) - } - allFilesystemAttachments = append(allFilesystemAttachments, filesystemAttachments...) - } - return allFilesystemAttachments, nil -} - -func destroyFilesystems(ctx *context, tags []names.FilesystemTag) ([]error, error) { - // TODO(axw) add storage.FilesystemSource.DestroyFilesystems - return make([]error, len(tags)), nil -} - -func detachFilesystems(ctx *context, attachments []storage.FilesystemAttachmentParams) error { - paramsBySource, filesystemSources, err := filesystemAttachmentParamsBySource(ctx, attachments) - if err != nil { - return errors.Trace(err) - } - for sourceName, params := range paramsBySource { - logger.Debugf("detaching filesystems: %v", params) - filesystemSource := filesystemSources[sourceName] - if err := filesystemSource.DetachFilesystems(params); err != nil { - return errors.Annotatef(err, "detaching filesystems from source %q", sourceName) - } - } - return nil -} - -func filesystemAttachmentParamsBySource( - ctx *context, params []storage.FilesystemAttachmentParams, -) (map[string][]storage.FilesystemAttachmentParams, map[string]storage.FilesystemSource, error) { - // TODO(axw) later we may have multiple instantiations (sources) - // for a storage provider, e.g. multiple Ceph installations. For - // now we assume a single source for each provider type, with no - // configuration. - filesystemSources := make(map[string]storage.FilesystemSource) - paramsBySource := make(map[string][]storage.FilesystemAttachmentParams) - for _, params := range params { - sourceName := string(params.Provider) - paramsBySource[sourceName] = append(paramsBySource[sourceName], params) - if _, ok := filesystemSources[sourceName]; ok { - continue - } - filesystem := ctx.filesystems[params.Filesystem] - if filesystem.Volume != (names.VolumeTag{}) { - filesystemSources[sourceName] = ctx.managedFilesystemSource - continue - } - filesystemSource, err := filesystemSource( - ctx.environConfig, ctx.storageDir, sourceName, params.Provider, - ) - if err != nil { - return nil, nil, errors.Annotate(err, "getting filesystem source") - } - filesystemSources[sourceName] = filesystemSource - } - return paramsBySource, filesystemSources, nil -} - -func filesystemsFromStorage(in []storage.Filesystem) []params.Filesystem { - out := make([]params.Filesystem, len(in)) - for i, f := range in { - paramsFilesystem := params.Filesystem{ - f.Tag.String(), - "", - params.FilesystemInfo{ - f.FilesystemId, - f.Size, - }, - } - if f.Volume != (names.VolumeTag{}) { - paramsFilesystem.VolumeTag = f.Volume.String() - } - out[i] = paramsFilesystem - } - return out -} - -func filesystemAttachmentsFromStorage(in []storage.FilesystemAttachment) []params.FilesystemAttachment { - out := make([]params.FilesystemAttachment, len(in)) - for i, f := range in { - out[i] = params.FilesystemAttachment{ - f.Filesystem.String(), - f.Machine.String(), - params.FilesystemAttachmentInfo{ - f.Path, - f.ReadOnly, - }, - } - } - return out -} - -func filesystemFromParams(in params.Filesystem) (storage.Filesystem, error) { - filesystemTag, err := names.ParseFilesystemTag(in.FilesystemTag) - if err != nil { - return storage.Filesystem{}, errors.Trace(err) - } - var volumeTag names.VolumeTag - if in.VolumeTag != "" { - volumeTag, err = names.ParseVolumeTag(in.VolumeTag) - if err != nil { - return storage.Filesystem{}, errors.Trace(err) - } - } - return storage.Filesystem{ - filesystemTag, - volumeTag, - storage.FilesystemInfo{ - in.Info.FilesystemId, - in.Info.Size, - }, - }, nil -} - -func filesystemParamsFromParams(in params.FilesystemParams) (storage.FilesystemParams, error) { - filesystemTag, err := names.ParseFilesystemTag(in.FilesystemTag) - if err != nil { - return storage.FilesystemParams{}, errors.Trace(err) - } - var volumeTag names.VolumeTag - if in.VolumeTag != "" { - volumeTag, err = names.ParseVolumeTag(in.VolumeTag) - if err != nil { - return storage.FilesystemParams{}, errors.Trace(err) - } - } - providerType := storage.ProviderType(in.Provider) - return storage.FilesystemParams{ - filesystemTag, - volumeTag, - in.Size, - providerType, - in.Attributes, - in.Tags, - }, nil -} - -func filesystemAttachmentParamsFromParams(in params.FilesystemAttachmentParams) (storage.FilesystemAttachmentParams, error) { - machineTag, err := names.ParseMachineTag(in.MachineTag) - if err != nil { - return storage.FilesystemAttachmentParams{}, errors.Trace(err) - } - filesystemTag, err := names.ParseFilesystemTag(in.FilesystemTag) - if err != nil { - return storage.FilesystemAttachmentParams{}, errors.Trace(err) - } - return storage.FilesystemAttachmentParams{ - AttachmentParams: storage.AttachmentParams{ - Provider: storage.ProviderType(in.Provider), - Machine: machineTag, - InstanceId: instance.Id(in.InstanceId), - ReadOnly: in.ReadOnly, - }, - Filesystem: filesystemTag, - FilesystemId: in.FilesystemId, - Path: in.MountPoint, - }, nil -} === modified file 'src/github.com/juju/juju/worker/storageprovisioner/machines.go' --- src/github.com/juju/juju/worker/storageprovisioner/machines.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/machines.go 2016-03-22 15:18:22 +0000 @@ -6,11 +6,11 @@ import ( "github.com/juju/errors" "github.com/juju/names" - "launchpad.net/tomb" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/instance" - "github.com/juju/juju/state/watcher" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/catacomb" ) // watchMachine starts a machine watcher if there is not already one for the @@ -21,8 +21,14 @@ if ok { return } - w := newMachineWatcher(ctx.machineAccessor, tag, ctx.machineChanges) - ctx.machines[tag] = w + w, err := newMachineWatcher(ctx.config.Machines, tag, ctx.machineChanges) + if err != nil { + ctx.kill(errors.Trace(err)) + } else if err := ctx.addWorker(w); err != nil { + ctx.kill(errors.Trace(err)) + } else { + ctx.machines[tag] = w + } } // refreshMachine refreshes the specified machine's instance ID. If it is set, @@ -34,13 +40,11 @@ return errors.Errorf("machine %s is not being watched", tag.Id()) } stopAndRemove := func() error { - if err := w.stop(); err != nil { - return errors.Annotate(err, "stopping machine watcher") - } + worker.Stop(w) delete(ctx.machines, tag) return nil } - results, err := ctx.machineAccessor.InstanceIds([]names.MachineTag{tag}) + results, err := ctx.config.Machines.InstanceIds([]names.MachineTag{tag}) if err != nil { return errors.Annotate(err, "getting machine instance ID") } @@ -75,17 +79,17 @@ params.InstanceId = instanceId updatePendingVolumeAttachment(ctx, id, params) } - for id, params := range ctx.pendingFilesystemAttachments { + for id, params := range ctx.incompleteFilesystemAttachmentParams { if params.Machine != tag || params.InstanceId != "" { continue } params.InstanceId = instanceId - ctx.pendingFilesystemAttachments[id] = params + updatePendingFilesystemAttachment(ctx, id, params) } } type machineWatcher struct { - tomb tomb.Tomb + catacomb catacomb.Catacomb accessor MachineAccessor tag names.MachineTag instanceId instance.Id @@ -96,22 +100,20 @@ accessor MachineAccessor, tag names.MachineTag, out chan<- names.MachineTag, -) *machineWatcher { +) (*machineWatcher, error) { w := &machineWatcher{ accessor: accessor, tag: tag, out: out, } - go func() { - defer w.tomb.Done() - w.tomb.Kill(w.loop()) - }() - return w -} - -func (mw *machineWatcher) stop() error { - mw.tomb.Kill(nil) - return mw.tomb.Wait() + err := catacomb.Invoke(catacomb.Plan{ + Site: &w.catacomb, + Work: w.loop, + }) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil } func (mw *machineWatcher) loop() error { @@ -119,16 +121,19 @@ if err != nil { return errors.Annotate(err, "watching machine") } + if err := mw.catacomb.Add(w); err != nil { + return errors.Trace(err) + } logger.Debugf("watching machine %s", mw.tag.Id()) defer logger.Debugf("finished watching machine %s", mw.tag.Id()) var out chan<- names.MachineTag for { select { - case <-mw.tomb.Dying(): - return tomb.ErrDying + case <-mw.catacomb.Dying(): + return mw.catacomb.ErrDying() case _, ok := <-w.Changes(): if !ok { - return watcher.EnsureErr(w) + return errors.New("machine watcher closed") } out = mw.out case out <- mw.tag: @@ -136,3 +141,13 @@ } } } + +// Kill is part of the worker.Worker interface. +func (mw *machineWatcher) Kill() { + mw.catacomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (mw *machineWatcher) Wait() error { + return mw.catacomb.Wait() +} === added file 'src/github.com/juju/juju/worker/storageprovisioner/manifold.go' --- src/github.com/juju/juju/worker/storageprovisioner/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,64 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storageprovisioner + +import ( + "path/filepath" + + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/utils/clock" + + "github.com/juju/juju/agent" + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/storageprovisioner" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/util" +) + +// ManifoldConfig defines a storage provisioner's configuration and dependencies. +type ManifoldConfig struct { + util.PostUpgradeManifoldConfig + Clock clock.Clock +} + +// Manifold returns a dependency.Manifold that runs a storage provisioner. +func Manifold(config ManifoldConfig) dependency.Manifold { + newWorker := func(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { + if config.Clock == nil { + return nil, dependency.ErrMissing + } + + cfg := a.CurrentConfig() + api, err := storageprovisioner.NewState(apiCaller, cfg.Tag()) + if err != nil { + return nil, errors.Trace(err) + } + + tag, ok := cfg.Tag().(names.MachineTag) + if !ok { + return nil, errors.Errorf("this manifold may only be used inside a machine agent") + } + + storageDir := filepath.Join(cfg.DataDir(), "storage") + w, err := NewStorageProvisioner(Config{ + Scope: tag, + StorageDir: storageDir, + Volumes: api, + Filesystems: api, + Life: api, + Environ: api, + Machines: api, + Status: api, + Clock: config.Clock, + }) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil + } + + return util.PostUpgradeManifold(config.PostUpgradeManifoldConfig, newWorker) +} === added file 'src/github.com/juju/juju/worker/storageprovisioner/manifold_test.go' --- src/github.com/juju/juju/worker/storageprovisioner/manifold_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/manifold_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,141 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storageprovisioner_test + +import ( + "time" + + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/agent" + "github.com/juju/juju/api" + apiagent "github.com/juju/juju/api/agent" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state/multiwatcher" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/storageprovisioner" + workertesting "github.com/juju/juju/worker/testing" +) + +type ManifoldSuite struct { + testing.IsolationSuite + newCalled bool +} + +var ( + defaultClockStart time.Time + _ = gc.Suite(&ManifoldSuite{}) +) + +func (s *ManifoldSuite) SetUpTest(c *gc.C) { + s.newCalled = false + s.PatchValue(&storageprovisioner.NewStorageProvisioner, + func(config storageprovisioner.Config) (worker.Worker, error) { + s.newCalled = true + return nil, nil + }, + ) +} + +func (s *ManifoldSuite) TestMachine(c *gc.C) { + config := storageprovisioner.ManifoldConfig{ + PostUpgradeManifoldConfig: workertesting.PostUpgradeManifoldTestConfig(), + Clock: coretesting.NewClock(defaultClockStart), + } + _, err := workertesting.RunPostUpgradeManifold( + storageprovisioner.Manifold(config), + &fakeAgent{tag: names.NewMachineTag("42")}, + &fakeAPIConn{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.newCalled, jc.IsTrue) +} + +func (s *ManifoldSuite) TestMissingClock(c *gc.C) { + config := storageprovisioner.ManifoldConfig{ + PostUpgradeManifoldConfig: workertesting.PostUpgradeManifoldTestConfig(), + // Clock: coretesting.NewClock(defaultClockStart), + } + _, err := workertesting.RunPostUpgradeManifold( + storageprovisioner.Manifold(config), + &fakeAgent{tag: names.NewMachineTag("42")}, + &fakeAPIConn{}) + c.Assert(err, gc.Equals, dependency.ErrMissing) + c.Assert(s.newCalled, jc.IsFalse) +} + +func (s *ManifoldSuite) TestUnit(c *gc.C) { + config := storageprovisioner.ManifoldConfig{ + PostUpgradeManifoldConfig: workertesting.PostUpgradeManifoldTestConfig(), + Clock: coretesting.NewClock(defaultClockStart), + } + _, err := workertesting.RunPostUpgradeManifold( + storageprovisioner.Manifold(config), + &fakeAgent{tag: names.NewUnitTag("foo/0")}, + &fakeAPIConn{}) + c.Assert(err, gc.ErrorMatches, "expected ModelTag or MachineTag, got names.UnitTag") + c.Assert(s.newCalled, jc.IsFalse) +} + +func (s *ManifoldSuite) TestNonAgent(c *gc.C) { + config := storageprovisioner.ManifoldConfig{ + PostUpgradeManifoldConfig: workertesting.PostUpgradeManifoldTestConfig(), + Clock: coretesting.NewClock(defaultClockStart), + } + _, err := workertesting.RunPostUpgradeManifold( + storageprovisioner.Manifold(config), + &fakeAgent{tag: names.NewUserTag("foo")}, + &fakeAPIConn{}) + c.Assert(err, gc.ErrorMatches, "expected ModelTag or MachineTag, got names.UserTag") + c.Assert(s.newCalled, jc.IsFalse) +} + +type fakeAgent struct { + agent.Agent + tag names.Tag +} + +func (a *fakeAgent) CurrentConfig() agent.Config { + return &fakeConfig{tag: a.tag} +} + +type fakeConfig struct { + agent.Config + tag names.Tag +} + +func (c *fakeConfig) Tag() names.Tag { + return c.tag +} + +func (_ fakeConfig) DataDir() string { + return "/path/to/data/dir" +} + +type fakeAPIConn struct { + api.Connection + machineJob multiwatcher.MachineJob +} + +func (f *fakeAPIConn) APICall(objType string, version int, id, request string, args interface{}, response interface{}) error { + if res, ok := response.(*params.AgentGetEntitiesResults); ok { + res.Entities = []params.AgentGetEntitiesResult{ + {Jobs: []multiwatcher.MachineJob{f.machineJob}}, + } + } + + return nil +} + +func (*fakeAPIConn) BestFacadeVersion(facade string) int { + return 42 +} + +func (f *fakeAPIConn) Agent() *apiagent.State { + return apiagent.NewState(f) +} === modified file 'src/github.com/juju/juju/worker/storageprovisioner/mock_test.go' --- src/github.com/juju/juju/worker/storageprovisioner/mock_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/mock_test.go 2016-03-22 15:18:22 +0000 @@ -11,15 +11,16 @@ "github.com/juju/errors" "github.com/juju/names" gitjujutesting "github.com/juju/testing" + "github.com/juju/utils/clock" gc "gopkg.in/check.v1" - apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/environs/config" "github.com/juju/juju/instance" "github.com/juju/juju/storage" "github.com/juju/juju/testing" + "github.com/juju/juju/watcher" ) const attachedVolumeId = "1" @@ -40,81 +41,83 @@ AttachmentTag: "volume-1", } +type mockWatcher struct{} + +func (mockWatcher) Kill() {} +func (mockWatcher) Wait() error { return nil } + +func newMockNotifyWatcher() *mockNotifyWatcher { + return &mockNotifyWatcher{ + changes: make(chan struct{}, 1), + } +} + type mockNotifyWatcher struct { + mockWatcher changes chan struct{} } -func (*mockNotifyWatcher) Stop() error { - return nil -} - -func (*mockNotifyWatcher) Err() error { - return nil -} - -func (w *mockNotifyWatcher) Changes() <-chan struct{} { +func (w *mockNotifyWatcher) Changes() watcher.NotifyChannel { return w.changes } +func newMockStringsWatcher() *mockStringsWatcher { + return &mockStringsWatcher{ + changes: make(chan []string, 1), + } +} + type mockStringsWatcher struct { + mockWatcher changes chan []string } -func (*mockStringsWatcher) Stop() error { - return nil -} - -func (*mockStringsWatcher) Err() error { - return nil -} - -func (w *mockStringsWatcher) Changes() <-chan []string { +func (w *mockStringsWatcher) Changes() watcher.StringsChannel { return w.changes } +func newMockAttachmentsWatcher() *mockAttachmentsWatcher { + return &mockAttachmentsWatcher{ + changes: make(chan []watcher.MachineStorageId, 1), + } +} + type mockAttachmentsWatcher struct { - changes chan []params.MachineStorageId -} - -func (*mockAttachmentsWatcher) Stop() error { - return nil -} - -func (*mockAttachmentsWatcher) Err() error { - return nil -} - -func (w *mockAttachmentsWatcher) Changes() <-chan []params.MachineStorageId { + mockWatcher + changes chan []watcher.MachineStorageId +} + +func (w *mockAttachmentsWatcher) Changes() watcher.MachineStorageIdsChannel { return w.changes } -type mockEnvironAccessor struct { +type mockModelAccessor struct { watcher *mockNotifyWatcher mu sync.Mutex cfg *config.Config } -func (e *mockEnvironAccessor) WatchForEnvironConfigChanges() (apiwatcher.NotifyWatcher, error) { +func (e *mockModelAccessor) WatchForModelConfigChanges() (watcher.NotifyWatcher, error) { return e.watcher, nil } -func (e *mockEnvironAccessor) EnvironConfig() (*config.Config, error) { +func (e *mockModelAccessor) ModelConfig() (*config.Config, error) { e.mu.Lock() cfg := e.cfg e.mu.Unlock() return cfg, nil } -func (e *mockEnvironAccessor) setConfig(cfg *config.Config) { +func (e *mockModelAccessor) setConfig(cfg *config.Config) { e.mu.Lock() e.cfg = cfg e.mu.Unlock() } -func newMockEnvironAccessor(c *gc.C) *mockEnvironAccessor { - return &mockEnvironAccessor{ - watcher: &mockNotifyWatcher{make(chan struct{}, 1)}, - cfg: testing.EnvironConfig(c), +func newMockModelAccessor(c *gc.C) *mockModelAccessor { + return &mockModelAccessor{ + watcher: newMockNotifyWatcher(), + cfg: testing.ModelConfig(c), } } @@ -142,15 +145,15 @@ return v } -func (w *mockVolumeAccessor) WatchVolumes() (apiwatcher.StringsWatcher, error) { +func (w *mockVolumeAccessor) WatchVolumes() (watcher.StringsWatcher, error) { return w.volumesWatcher, nil } -func (w *mockVolumeAccessor) WatchVolumeAttachments() (apiwatcher.MachineStorageIdsWatcher, error) { +func (w *mockVolumeAccessor) WatchVolumeAttachments() (watcher.MachineStorageIdsWatcher, error) { return w.attachmentsWatcher, nil } -func (w *mockVolumeAccessor) WatchBlockDevices(tag names.MachineTag) (apiwatcher.NotifyWatcher, error) { +func (w *mockVolumeAccessor) WatchBlockDevices(tag names.MachineTag) (watcher.NotifyWatcher, error) { return w.blockDevicesWatcher, nil } @@ -257,9 +260,9 @@ func newMockVolumeAccessor() *mockVolumeAccessor { return &mockVolumeAccessor{ - volumesWatcher: &mockStringsWatcher{make(chan []string, 1)}, - attachmentsWatcher: &mockAttachmentsWatcher{make(chan []params.MachineStorageId, 1)}, - blockDevicesWatcher: &mockNotifyWatcher{make(chan struct{}, 1)}, + volumesWatcher: newMockStringsWatcher(), + attachmentsWatcher: newMockAttachmentsWatcher(), + blockDevicesWatcher: newMockNotifyWatcher(), provisionedMachines: make(map[string]instance.Id), provisionedVolumes: make(map[string]params.Volume), provisionedAttachments: make(map[params.MachineStorageId]params.VolumeAttachment), @@ -289,11 +292,11 @@ return f } -func (w *mockFilesystemAccessor) WatchFilesystems() (apiwatcher.StringsWatcher, error) { +func (w *mockFilesystemAccessor) WatchFilesystems() (watcher.StringsWatcher, error) { return w.filesystemsWatcher, nil } -func (w *mockFilesystemAccessor) WatchFilesystemAttachments() (apiwatcher.MachineStorageIdsWatcher, error) { +func (w *mockFilesystemAccessor) WatchFilesystemAttachments() (watcher.MachineStorageIdsWatcher, error) { return w.attachmentsWatcher, nil } @@ -326,30 +329,26 @@ } func (v *mockFilesystemAccessor) FilesystemParams(filesystems []names.FilesystemTag) ([]params.FilesystemParamsResult, error) { - var result []params.FilesystemParamsResult - for _, tag := range filesystems { - if _, ok := v.provisionedFilesystems[tag.String()]; ok { - result = append(result, params.FilesystemParamsResult{ - Error: ¶ms.Error{Message: "already provisioned"}, - }) - } else { - filesystemParams := params.FilesystemParams{ - FilesystemTag: tag.String(), - Size: 1024, - Provider: "dummy", - Tags: map[string]string{ - "very": "fancy", - }, - } - if _, ok := names.FilesystemMachine(tag); ok { - // place all volume-backed filesystems on machine-scoped - // volumes with the same ID as the filesystem. - filesystemParams.VolumeTag = names.NewVolumeTag(tag.Id()).String() - } - result = append(result, params.FilesystemParamsResult{Result: filesystemParams}) - } + results := make([]params.FilesystemParamsResult, len(filesystems)) + for i, tag := range filesystems { + // Parameters are returned regardless of whether the filesystem + // exists; this is to support destruction. + filesystemParams := params.FilesystemParams{ + FilesystemTag: tag.String(), + Size: 1024, + Provider: "dummy", + Tags: map[string]string{ + "very": "fancy", + }, + } + if _, ok := names.FilesystemMachine(tag); ok { + // place all volume-backed filesystems on machine-scoped + // volumes with the same ID as the filesystem. + filesystemParams.VolumeTag = names.NewVolumeTag(tag.Id()).String() + } + results[i] = params.FilesystemParamsResult{Result: filesystemParams} } - return result, nil + return results, nil } func (f *mockFilesystemAccessor) FilesystemAttachmentParams(ids []params.MachineStorageId) ([]params.FilesystemAttachmentParamsResult, error) { @@ -370,20 +369,23 @@ } func (f *mockFilesystemAccessor) SetFilesystemInfo(filesystems []params.Filesystem) ([]params.ErrorResult, error) { - return f.setFilesystemInfo(filesystems) + if f.setFilesystemInfo != nil { + return f.setFilesystemInfo(filesystems) + } + return make([]params.ErrorResult, len(filesystems)), nil } func (f *mockFilesystemAccessor) SetFilesystemAttachmentInfo(filesystemAttachments []params.FilesystemAttachment) ([]params.ErrorResult, error) { if f.setFilesystemAttachmentInfo != nil { return f.setFilesystemAttachmentInfo(filesystemAttachments) } - return nil, nil + return make([]params.ErrorResult, len(filesystemAttachments)), nil } func newMockFilesystemAccessor() *mockFilesystemAccessor { return &mockFilesystemAccessor{ - filesystemsWatcher: &mockStringsWatcher{make(chan []string, 1)}, - attachmentsWatcher: &mockAttachmentsWatcher{make(chan []params.MachineStorageId, 1)}, + filesystemsWatcher: newMockStringsWatcher(), + attachmentsWatcher: newMockAttachmentsWatcher(), provisionedMachines: make(map[string]instance.Id), provisionedFilesystems: make(map[string]params.Filesystem), provisionedAttachments: make(map[params.MachineStorageId]params.FilesystemAttachment), @@ -452,14 +454,18 @@ storage.Provider dynamic bool - volumeSourceFunc func(*config.Config, *storage.Config) (storage.VolumeSource, error) - filesystemSourceFunc func(*config.Config, *storage.Config) (storage.FilesystemSource, error) - createVolumesFunc func([]storage.VolumeParams) ([]storage.CreateVolumesResult, error) - attachVolumesFunc func([]storage.VolumeAttachmentParams) ([]storage.AttachVolumesResult, error) - detachVolumesFunc func([]storage.VolumeAttachmentParams) ([]error, error) - detachFilesystemsFunc func([]storage.FilesystemAttachmentParams) error - destroyVolumesFunc func([]string) ([]error, error) - validateVolumeParamsFunc func(storage.VolumeParams) error + volumeSourceFunc func(*config.Config, *storage.Config) (storage.VolumeSource, error) + filesystemSourceFunc func(*config.Config, *storage.Config) (storage.FilesystemSource, error) + createVolumesFunc func([]storage.VolumeParams) ([]storage.CreateVolumesResult, error) + createFilesystemsFunc func([]storage.FilesystemParams) ([]storage.CreateFilesystemsResult, error) + attachVolumesFunc func([]storage.VolumeAttachmentParams) ([]storage.AttachVolumesResult, error) + attachFilesystemsFunc func([]storage.FilesystemAttachmentParams) ([]storage.AttachFilesystemsResult, error) + detachVolumesFunc func([]storage.VolumeAttachmentParams) ([]error, error) + detachFilesystemsFunc func([]storage.FilesystemAttachmentParams) ([]error, error) + destroyVolumesFunc func([]string) ([]error, error) + destroyFilesystemsFunc func([]string) ([]error, error) + validateVolumeParamsFunc func(storage.VolumeParams) error + validateFilesystemParamsFunc func(storage.FilesystemParams) error } type dummyVolumeSource struct { @@ -567,56 +573,75 @@ return make([]error, len(params)), nil } -func (*dummyFilesystemSource) ValidateFilesystemParams(params storage.FilesystemParams) error { +func (s *dummyFilesystemSource) ValidateFilesystemParams(params storage.FilesystemParams) error { + if s.provider != nil && s.provider.validateFilesystemParamsFunc != nil { + return s.provider.validateFilesystemParamsFunc(params) + } return nil } // CreateFilesystems makes some filesystems that we can check later to ensure things went as expected. -func (s *dummyFilesystemSource) CreateFilesystems(params []storage.FilesystemParams) ([]storage.Filesystem, error) { +func (s *dummyFilesystemSource) CreateFilesystems(params []storage.FilesystemParams) ([]storage.CreateFilesystemsResult, error) { + if s.provider != nil && s.provider.createFilesystemsFunc != nil { + return s.provider.createFilesystemsFunc(params) + } + paramsCopy := make([]storage.FilesystemParams, len(params)) copy(paramsCopy, params) s.createFilesystemsArgs = append(s.createFilesystemsArgs, paramsCopy) - var filesystems []storage.Filesystem - for _, p := range params { - filesystems = append(filesystems, storage.Filesystem{ + results := make([]storage.CreateFilesystemsResult, len(params)) + for i, p := range params { + results[i].Filesystem = &storage.Filesystem{ Tag: p.Tag, FilesystemInfo: storage.FilesystemInfo{ Size: p.Size, FilesystemId: "id-" + p.Tag.Id(), }, - }) - } - return filesystems, nil + } + } + return results, nil +} + +// DestroyFilesystems destroys filesystems. +func (s *dummyFilesystemSource) DestroyFilesystems(filesystemIds []string) ([]error, error) { + if s.provider.destroyFilesystemsFunc != nil { + return s.provider.destroyFilesystemsFunc(filesystemIds) + } + return make([]error, len(filesystemIds)), nil } // AttachFilesystems attaches filesystems to machines. -func (*dummyFilesystemSource) AttachFilesystems(params []storage.FilesystemAttachmentParams) ([]storage.FilesystemAttachment, error) { - var filesystemAttachments []storage.FilesystemAttachment - for _, p := range params { +func (s *dummyFilesystemSource) AttachFilesystems(params []storage.FilesystemAttachmentParams) ([]storage.AttachFilesystemsResult, error) { + if s.provider != nil && s.provider.attachFilesystemsFunc != nil { + return s.provider.attachFilesystemsFunc(params) + } + + results := make([]storage.AttachFilesystemsResult, len(params)) + for i, p := range params { if p.FilesystemId == "" { panic("AttachFilesystems called with unprovisioned filesystem") } if p.InstanceId == "" { panic("AttachFilesystems called with unprovisioned machine") } - filesystemAttachments = append(filesystemAttachments, storage.FilesystemAttachment{ + results[i].FilesystemAttachment = &storage.FilesystemAttachment{ p.Filesystem, p.Machine, storage.FilesystemAttachmentInfo{ Path: "/srv/" + p.FilesystemId, }, - }) + } } - return filesystemAttachments, nil + return results, nil } // DetachFilesystems detaches filesystems from machines. -func (s *dummyFilesystemSource) DetachFilesystems(params []storage.FilesystemAttachmentParams) error { +func (s *dummyFilesystemSource) DetachFilesystems(params []storage.FilesystemAttachmentParams) ([]error, error) { if s.provider.detachFilesystemsFunc != nil { return s.provider.detachFilesystemsFunc(params) } - return nil + return make([]error, len(params)), nil } type mockManagedFilesystemSource struct { @@ -628,31 +653,32 @@ return nil } -func (s *mockManagedFilesystemSource) CreateFilesystems(args []storage.FilesystemParams) ([]storage.Filesystem, error) { - var filesystems []storage.Filesystem - for _, arg := range args { +func (s *mockManagedFilesystemSource) CreateFilesystems(args []storage.FilesystemParams) ([]storage.CreateFilesystemsResult, error) { + results := make([]storage.CreateFilesystemsResult, len(args)) + for i, arg := range args { blockDevice, ok := s.blockDevices[arg.Volume] if !ok { - return nil, errors.Errorf("filesystem %v's backing-volume is not attached", arg.Tag.Id()) + results[i].Error = errors.Errorf("filesystem %v's backing-volume is not attached", arg.Tag.Id()) + continue } - filesystems = append(filesystems, storage.Filesystem{ + results[i].Filesystem = &storage.Filesystem{ Tag: arg.Tag, FilesystemInfo: storage.FilesystemInfo{ Size: blockDevice.Size, FilesystemId: blockDevice.DeviceName, }, - }) + } } - return filesystems, nil -} - -func (s *mockManagedFilesystemSource) DestroyFilesystems(filesystemIds []string) []error { - return make([]error, len(filesystemIds)) -} - -func (s *mockManagedFilesystemSource) AttachFilesystems(args []storage.FilesystemAttachmentParams) ([]storage.FilesystemAttachment, error) { - var filesystemAttachments []storage.FilesystemAttachment - for _, arg := range args { + return results, nil +} + +func (s *mockManagedFilesystemSource) DestroyFilesystems(filesystemIds []string) ([]error, error) { + return make([]error, len(filesystemIds)), nil +} + +func (s *mockManagedFilesystemSource) AttachFilesystems(args []storage.FilesystemAttachmentParams) ([]storage.AttachFilesystemsResult, error) { + results := make([]storage.AttachFilesystemsResult, len(args)) + for i, arg := range args { if arg.FilesystemId == "" { panic("AttachFilesystems called with unprovisioned filesystem") } @@ -661,26 +687,28 @@ } filesystem, ok := s.filesystems[arg.Filesystem] if !ok { - return nil, errors.Errorf("filesystem %v has not been created", arg.Filesystem.Id()) + results[i].Error = errors.Errorf("filesystem %v has not been created", arg.Filesystem.Id()) + continue } blockDevice, ok := s.blockDevices[filesystem.Volume] if !ok { - return nil, errors.Errorf("filesystem %v's backing-volume is not attached", filesystem.Tag.Id()) + results[i].Error = errors.Errorf("filesystem %v's backing-volume is not attached", filesystem.Tag.Id()) + continue } - filesystemAttachments = append(filesystemAttachments, storage.FilesystemAttachment{ + results[i].FilesystemAttachment = &storage.FilesystemAttachment{ arg.Filesystem, arg.Machine, storage.FilesystemAttachmentInfo{ Path: "/mnt/" + blockDevice.DeviceName, ReadOnly: arg.ReadOnly, }, - }) + } } - return filesystemAttachments, nil + return results, nil } -func (s *mockManagedFilesystemSource) DetachFilesystems(params []storage.FilesystemAttachmentParams) error { - return errors.NotImplementedf("DetachFilesystems") +func (s *mockManagedFilesystemSource) DetachFilesystems(params []storage.FilesystemAttachmentParams) ([]error, error) { + return nil, errors.NotImplementedf("DetachFilesystems") } type mockMachineAccessor struct { @@ -688,7 +716,7 @@ watcher *mockNotifyWatcher } -func (a *mockMachineAccessor) WatchMachine(names.MachineTag) (apiwatcher.NotifyWatcher, error) { +func (a *mockMachineAccessor) WatchMachine(names.MachineTag) (watcher.NotifyWatcher, error) { return a.watcher, nil } @@ -710,29 +738,30 @@ func newMockMachineAccessor(c *gc.C) *mockMachineAccessor { return &mockMachineAccessor{ instanceIds: make(map[names.MachineTag]instance.Id), - watcher: &mockNotifyWatcher{make(chan struct{}, 1)}, + watcher: newMockNotifyWatcher(), } } type mockClock struct { gitjujutesting.Stub - now time.Time - nowFunc func() time.Time - afterFunc func(time.Duration) <-chan time.Time + now time.Time + onNow func() time.Time + onAfter func(time.Duration) <-chan time.Time + onAfterFunc func(time.Duration, func()) clock.Timer } func (c *mockClock) Now() time.Time { c.MethodCall(c, "Now") - if c.nowFunc != nil { - return c.nowFunc() + if c.onNow != nil { + return c.onNow() } return c.now } func (c *mockClock) After(d time.Duration) <-chan time.Time { c.MethodCall(c, "After", d) - if c.afterFunc != nil { - return c.afterFunc(d) + if c.onAfter != nil { + return c.onAfter(d) } if d > 0 { c.now = c.now.Add(d) @@ -742,6 +771,17 @@ return ch } +func (c *mockClock) AfterFunc(d time.Duration, f func()) clock.Timer { + c.MethodCall(c, "AfterFunc", d, f) + if c.onAfterFunc != nil { + return c.onAfterFunc(d, f) + } + if d > 0 { + c.now = c.now.Add(d) + } + return time.AfterFunc(0, f) +} + type mockStatusSetter struct { args []params.EntityStatusArgs setStatus func([]params.EntityStatusArgs) error === modified file 'src/github.com/juju/juju/worker/storageprovisioner/schedule.go' --- src/github.com/juju/juju/worker/storageprovisioner/schedule.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/schedule.go 2016-03-22 15:18:22 +0000 @@ -25,7 +25,7 @@ if len(ops) == 0 { return } - now := ctx.time.Now() + now := ctx.config.Clock.Now() for _, op := range ops { k := op.key() d := op.delay() === modified file 'src/github.com/juju/juju/worker/storageprovisioner/storageprovisioner.go' --- src/github.com/juju/juju/worker/storageprovisioner/storageprovisioner.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/storageprovisioner.go 2016-03-22 15:18:22 +0000 @@ -1,23 +1,44 @@ // Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. +// Package storageprovisioner provides a worker that manages the provisioning +// and deprovisioning of storage volumes and filesystems, and attaching them +// to and detaching them from machines. +// +// A storage provisioner worker is run at each model manager, which +// manages model-scoped storage such as virtual disk services of the +// cloud provider. In addition to this, each machine agent runs a machine- +// storage provisioner worker that manages storage scoped to that machine, +// such as loop devices, temporary filesystems (tmpfs), and rootfs. +// +// The storage provisioner worker is comprised of the following major +// components: +// - a set of watchers for provisioning and attachment events +// - a schedule of pending operations +// - event-handling code fed by the watcher, that identifies +// interesting changes (unprovisioned -> provisioned, etc.), +// ensures prerequisites are met (e.g. volume and machine are both +// provisioned before attachment is attempted), and populates +// operations into the schedule +// - operation execution code fed by the schedule, that groups +// operations to make bulk calls to storage providers; updates +// status; and reschedules operations upon failure +// package storageprovisioner import ( "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" - "github.com/juju/utils/clock" "github.com/juju/utils/set" - "launchpad.net/tomb" - apiwatcher "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/environs/config" - "github.com/juju/juju/state/watcher" "github.com/juju/juju/storage" "github.com/juju/juju/storage/provider" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" + "github.com/juju/juju/worker/catacomb" "github.com/juju/juju/worker/storageprovisioner/internal/schedule" ) @@ -30,15 +51,15 @@ type VolumeAccessor interface { // WatchBlockDevices watches for changes to the block devices of the // specified machine. - WatchBlockDevices(names.MachineTag) (apiwatcher.NotifyWatcher, error) + WatchBlockDevices(names.MachineTag) (watcher.NotifyWatcher, error) // WatchVolumes watches for changes to volumes that this storage // provisioner is responsible for. - WatchVolumes() (apiwatcher.StringsWatcher, error) + WatchVolumes() (watcher.StringsWatcher, error) // WatchVolumeAttachments watches for changes to volume attachments // that this storage provisioner is responsible for. - WatchVolumeAttachments() (apiwatcher.MachineStorageIdsWatcher, error) + WatchVolumeAttachments() (watcher.MachineStorageIdsWatcher, error) // Volumes returns details of volumes with the specified tags. Volumes([]names.VolumeTag) ([]params.VolumeResult, error) @@ -72,11 +93,11 @@ type FilesystemAccessor interface { // WatchFilesystems watches for changes to filesystems that this // storage provisioner is responsible for. - WatchFilesystems() (apiwatcher.StringsWatcher, error) + WatchFilesystems() (watcher.StringsWatcher, error) // WatchFilesystemAttachments watches for changes to filesystem attachments // that this storage provisioner is responsible for. - WatchFilesystemAttachments() (apiwatcher.MachineStorageIdsWatcher, error) + WatchFilesystemAttachments() (watcher.MachineStorageIdsWatcher, error) // Filesystems returns details of filesystems with the specified tags. Filesystems([]names.FilesystemTag) ([]params.FilesystemResult, error) @@ -105,7 +126,7 @@ // worker to perform machine related operations. type MachineAccessor interface { // WatchMachine watches for changes to the specified machine. - WatchMachine(names.MachineTag) (apiwatcher.NotifyWatcher, error) + WatchMachine(names.MachineTag) (watcher.NotifyWatcher, error) // InstanceIds returns the instance IDs of each machine. InstanceIds([]names.MachineTag) ([]params.StringResult, error) @@ -135,16 +156,16 @@ SetStatus([]params.EntityStatusArgs) error } -// EnvironAccessor defines an interface used to enable a storage provisioner -// worker to watch changes to and read environment config, to use when +// ModelAccessor defines an interface used to enable a storage provisioner +// worker to watch changes to and read model config, to use when // provisioning storage. -type EnvironAccessor interface { - // WatchForEnvironConfigChanges returns a watcher that will be notified - // whenever the environment config changes in state. - WatchForEnvironConfigChanges() (apiwatcher.NotifyWatcher, error) +type ModelAccessor interface { + // WatchForModelConfigChanges returns a watcher that will be notified + // whenever the model config changes in state. + WatchForModelConfigChanges() (watcher.NotifyWatcher, error) - // EnvironConfig returns the current environment config. - EnvironConfig() (*config.Config, error) + // ModelConfig returns the current model config. + ModelConfig() (*config.Config, error) } // NewStorageProvisioner returns a Worker which manages @@ -152,217 +173,189 @@ // of first-class volumes and filesystems. // // Machine-scoped storage workers will be provided with -// a storage directory, while environment-scoped workers +// a storage directory, while model-scoped workers // will not. If the directory path is non-empty, then it // will be passed to the storage source via its config. -func NewStorageProvisioner( - scope names.Tag, - storageDir string, - v VolumeAccessor, - f FilesystemAccessor, - l LifecycleManager, - e EnvironAccessor, - m MachineAccessor, - s StatusSetter, - clock clock.Clock, -) worker.Worker { - w := &storageprovisioner{ - scope: scope, - storageDir: storageDir, - volumes: v, - filesystems: f, - life: l, - environ: e, - machines: m, - status: s, - clock: clock, - } - go func() { - defer w.tomb.Done() - err := w.loop() - if err != tomb.ErrDying { - logger.Errorf("%s", err) - } - w.tomb.Kill(err) - }() - return w +var NewStorageProvisioner = func(config Config) (worker.Worker, error) { + if err := config.Validate(); err != nil { + return nil, errors.Trace(err) + } + w := &storageProvisioner{ + config: config, + } + err := catacomb.Invoke(catacomb.Plan{ + Site: &w.catacomb, + Work: w.loop, + }) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil } -type storageprovisioner struct { - tomb tomb.Tomb - scope names.Tag - storageDir string - volumes VolumeAccessor - filesystems FilesystemAccessor - life LifecycleManager - environ EnvironAccessor - machines MachineAccessor - status StatusSetter - clock clock.Clock +type storageProvisioner struct { + catacomb catacomb.Catacomb + config Config } // Kill implements Worker.Kill(). -func (w *storageprovisioner) Kill() { - w.tomb.Kill(nil) +func (w *storageProvisioner) Kill() { + w.catacomb.Kill(nil) } // Wait implements Worker.Wait(). -func (w *storageprovisioner) Wait() error { - return w.tomb.Wait() +func (w *storageProvisioner) Wait() error { + return w.catacomb.Wait() } -func (w *storageprovisioner) loop() error { - var environConfigChanges <-chan struct{} - var volumesWatcher apiwatcher.StringsWatcher - var filesystemsWatcher apiwatcher.StringsWatcher - var volumesChanges <-chan []string - var filesystemsChanges <-chan []string - var volumeAttachmentsWatcher apiwatcher.MachineStorageIdsWatcher - var filesystemAttachmentsWatcher apiwatcher.MachineStorageIdsWatcher - var volumeAttachmentsChanges <-chan []params.MachineStorageId - var filesystemAttachmentsChanges <-chan []params.MachineStorageId - var machineBlockDevicesWatcher apiwatcher.NotifyWatcher - var machineBlockDevicesChanges <-chan struct{} +func (w *storageProvisioner) loop() error { + var ( + volumesChanges watcher.StringsChannel + filesystemsChanges watcher.StringsChannel + volumeAttachmentsChanges watcher.MachineStorageIdsChannel + filesystemAttachmentsChanges watcher.MachineStorageIdsChannel + machineBlockDevicesChanges <-chan struct{} + ) machineChanges := make(chan names.MachineTag) - environConfigWatcher, err := w.environ.WatchForEnvironConfigChanges() + modelConfigWatcher, err := w.config.Environ.WatchForModelConfigChanges() if err != nil { - return errors.Annotate(err, "watching environ config") - } - defer watcher.Stop(environConfigWatcher, &w.tomb) - environConfigChanges = environConfigWatcher.Changes() + return errors.Annotate(err, "watching model config") + } + if err := w.catacomb.Add(modelConfigWatcher); err != nil { + return errors.Trace(err) + } // Machine-scoped provisioners need to watch block devices, to create // volume-backed filesystems. - if machineTag, ok := w.scope.(names.MachineTag); ok { - machineBlockDevicesWatcher, err = w.volumes.WatchBlockDevices(machineTag) + if machineTag, ok := w.config.Scope.(names.MachineTag); ok { + machineBlockDevicesWatcher, err := w.config.Volumes.WatchBlockDevices(machineTag) if err != nil { return errors.Annotate(err, "watching block devices") } - defer watcher.Stop(machineBlockDevicesWatcher, &w.tomb) + if err := w.catacomb.Add(machineBlockDevicesWatcher); err != nil { + return errors.Trace(err) + } machineBlockDevicesChanges = machineBlockDevicesWatcher.Changes() } - // The other watchers are started dynamically; stop only if started. - defer w.maybeStopWatcher(volumesWatcher) - defer w.maybeStopWatcher(volumeAttachmentsWatcher) - defer w.maybeStopWatcher(filesystemsWatcher) - defer w.maybeStopWatcher(filesystemAttachmentsWatcher) - startWatchers := func() error { - var err error - volumesWatcher, err = w.volumes.WatchVolumes() + volumesWatcher, err := w.config.Volumes.WatchVolumes() if err != nil { return errors.Annotate(err, "watching volumes") } - filesystemsWatcher, err = w.filesystems.WatchFilesystems() + if err := w.catacomb.Add(volumesWatcher); err != nil { + return errors.Trace(err) + } + volumesChanges = volumesWatcher.Changes() + + filesystemsWatcher, err := w.config.Filesystems.WatchFilesystems() if err != nil { return errors.Annotate(err, "watching filesystems") } - volumeAttachmentsWatcher, err = w.volumes.WatchVolumeAttachments() + if err := w.catacomb.Add(filesystemsWatcher); err != nil { + return errors.Trace(err) + } + filesystemsChanges = filesystemsWatcher.Changes() + + volumeAttachmentsWatcher, err := w.config.Volumes.WatchVolumeAttachments() if err != nil { return errors.Annotate(err, "watching volume attachments") } - filesystemAttachmentsWatcher, err = w.filesystems.WatchFilesystemAttachments() + if err := w.catacomb.Add(volumeAttachmentsWatcher); err != nil { + return errors.Trace(err) + } + volumeAttachmentsChanges = volumeAttachmentsWatcher.Changes() + + filesystemAttachmentsWatcher, err := w.config.Filesystems.WatchFilesystemAttachments() if err != nil { return errors.Annotate(err, "watching filesystem attachments") } - volumesChanges = volumesWatcher.Changes() - filesystemsChanges = filesystemsWatcher.Changes() - volumeAttachmentsChanges = volumeAttachmentsWatcher.Changes() + if err := w.catacomb.Add(filesystemAttachmentsWatcher); err != nil { + return errors.Trace(err) + } filesystemAttachmentsChanges = filesystemAttachmentsWatcher.Changes() return nil } ctx := context{ - scope: w.scope, - storageDir: w.storageDir, - volumeAccessor: w.volumes, - filesystemAccessor: w.filesystems, - life: w.life, - machineAccessor: w.machines, - statusSetter: w.status, - time: w.clock, - volumes: make(map[names.VolumeTag]storage.Volume), - volumeAttachments: make(map[params.MachineStorageId]storage.VolumeAttachment), - volumeBlockDevices: make(map[names.VolumeTag]storage.BlockDevice), - filesystems: make(map[names.FilesystemTag]storage.Filesystem), - filesystemAttachments: make(map[params.MachineStorageId]storage.FilesystemAttachment), - machines: make(map[names.MachineTag]*machineWatcher), - machineChanges: machineChanges, - schedule: schedule.NewSchedule(w.clock), - pendingVolumeBlockDevices: make(set.Tags), - incompleteVolumeParams: make(map[names.VolumeTag]storage.VolumeParams), - incompleteVolumeAttachmentParams: make(map[params.MachineStorageId]storage.VolumeAttachmentParams), - pendingFilesystems: make(map[names.FilesystemTag]storage.FilesystemParams), - pendingFilesystemAttachments: make(map[params.MachineStorageId]storage.FilesystemAttachmentParams), - pendingDyingFilesystemAttachments: make(map[params.MachineStorageId]storage.FilesystemAttachmentParams), + kill: w.catacomb.Kill, + addWorker: w.catacomb.Add, + config: w.config, + volumes: make(map[names.VolumeTag]storage.Volume), + volumeAttachments: make(map[params.MachineStorageId]storage.VolumeAttachment), + volumeBlockDevices: make(map[names.VolumeTag]storage.BlockDevice), + filesystems: make(map[names.FilesystemTag]storage.Filesystem), + filesystemAttachments: make(map[params.MachineStorageId]storage.FilesystemAttachment), + machines: make(map[names.MachineTag]*machineWatcher), + machineChanges: machineChanges, + schedule: schedule.NewSchedule(w.config.Clock), + incompleteVolumeParams: make(map[names.VolumeTag]storage.VolumeParams), + incompleteVolumeAttachmentParams: make(map[params.MachineStorageId]storage.VolumeAttachmentParams), + incompleteFilesystemParams: make(map[names.FilesystemTag]storage.FilesystemParams), + incompleteFilesystemAttachmentParams: make(map[params.MachineStorageId]storage.FilesystemAttachmentParams), + pendingVolumeBlockDevices: make(set.Tags), } ctx.managedFilesystemSource = newManagedFilesystemSource( ctx.volumeBlockDevices, ctx.filesystems, ) - defer func() { - for _, w := range ctx.machines { - w.stop() - } - }() - for { - // Check if any pending operations can be fulfilled. - if err := processPending(&ctx); err != nil { - return errors.Trace(err) + + // Check if block devices need to be refreshed. + if err := processPendingVolumeBlockDevices(&ctx); err != nil { + return errors.Annotate(err, "processing pending block devices") } select { - case <-w.tomb.Dying(): - return tomb.ErrDying - case _, ok := <-environConfigChanges: + case <-w.catacomb.Dying(): + return w.catacomb.ErrDying() + case _, ok := <-modelConfigWatcher.Changes(): if !ok { - return watcher.EnsureErr(environConfigWatcher) + return errors.New("environ config watcher closed") } - environConfig, err := w.environ.EnvironConfig() + modelConfig, err := w.config.Environ.ModelConfig() if err != nil { - return errors.Annotate(err, "getting environ config") + return errors.Annotate(err, "getting model config") } - if ctx.environConfig == nil { - // We've received the initial environ config, + if ctx.modelConfig == nil { + // We've received the initial model config, // so we can begin provisioning storage. if err := startWatchers(); err != nil { return err } } - ctx.environConfig = environConfig + ctx.modelConfig = modelConfig case changes, ok := <-volumesChanges: if !ok { - return watcher.EnsureErr(volumesWatcher) + return errors.New("volumes watcher closed") } if err := volumesChanged(&ctx, changes); err != nil { return errors.Trace(err) } case changes, ok := <-volumeAttachmentsChanges: if !ok { - return watcher.EnsureErr(volumeAttachmentsWatcher) + return errors.New("volume attachments watcher closed") } if err := volumeAttachmentsChanged(&ctx, changes); err != nil { return errors.Trace(err) } case changes, ok := <-filesystemsChanges: if !ok { - return watcher.EnsureErr(filesystemsWatcher) + return errors.New("filesystems watcher closed") } if err := filesystemsChanged(&ctx, changes); err != nil { return errors.Trace(err) } case changes, ok := <-filesystemAttachmentsChanges: if !ok { - return watcher.EnsureErr(filesystemAttachmentsWatcher) + return errors.New("filesystem attachments watcher closed") } if err := filesystemAttachmentsChanged(&ctx, changes); err != nil { return errors.Trace(err) } case _, ok := <-machineBlockDevicesChanges: if !ok { - return watcher.EnsureErr(machineBlockDevicesWatcher) + return errors.New("machine block devices watcher closed") } if err := machineBlockDevicesChanged(&ctx); err != nil { return errors.Trace(err) @@ -380,32 +373,17 @@ } } -// processPending checks if the pending operations' prerequisites have -// been met, and processes them if so. -func processPending(ctx *context) error { - if err := processPendingVolumeBlockDevices(ctx); err != nil { - return errors.Annotate(err, "processing pending block devices") - } - // TODO(axw) below should be handled by processSchedule. - if err := processPendingFilesystems(ctx); err != nil { - return errors.Annotate(err, "processing pending filesystems") - } - if err := processPendingDyingFilesystemAttachments(ctx); err != nil { - return errors.Annotate(err, "processing pending, dying filesystem attachments") - } - if err := processPendingFilesystemAttachments(ctx); err != nil { - return errors.Annotate(err, "processing pending filesystem attachments") - } - return nil -} - // processSchedule executes scheduled operations. func processSchedule(ctx *context) error { - ready := ctx.schedule.Ready(ctx.time.Now()) + ready := ctx.schedule.Ready(ctx.config.Clock.Now()) createVolumeOps := make(map[names.VolumeTag]*createVolumeOp) destroyVolumeOps := make(map[names.VolumeTag]*destroyVolumeOp) attachVolumeOps := make(map[params.MachineStorageId]*attachVolumeOp) detachVolumeOps := make(map[params.MachineStorageId]*detachVolumeOp) + createFilesystemOps := make(map[names.FilesystemTag]*createFilesystemOp) + destroyFilesystemOps := make(map[names.FilesystemTag]*destroyFilesystemOp) + attachFilesystemOps := make(map[params.MachineStorageId]*attachFilesystemOp) + detachFilesystemOps := make(map[params.MachineStorageId]*detachFilesystemOp) for _, item := range ready { op := item.(scheduleOp) key := op.key() @@ -418,6 +396,14 @@ attachVolumeOps[key.(params.MachineStorageId)] = op case *detachVolumeOp: detachVolumeOps[key.(params.MachineStorageId)] = op + case *createFilesystemOp: + createFilesystemOps[key.(names.FilesystemTag)] = op + case *destroyFilesystemOp: + destroyFilesystemOps[key.(names.FilesystemTag)] = op + case *attachFilesystemOp: + attachFilesystemOps[key.(params.MachineStorageId)] = op + case *detachFilesystemOp: + detachFilesystemOps[key.(params.MachineStorageId)] = op } } if len(destroyVolumeOps) > 0 { @@ -440,25 +426,34 @@ return errors.Annotate(err, "attaching volumes") } } + if len(destroyFilesystemOps) > 0 { + if err := destroyFilesystems(ctx, destroyFilesystemOps); err != nil { + return errors.Annotate(err, "destroying filesystems") + } + } + if len(createFilesystemOps) > 0 { + if err := createFilesystems(ctx, createFilesystemOps); err != nil { + return errors.Annotate(err, "creating filesystems") + } + } + if len(detachFilesystemOps) > 0 { + if err := detachFilesystems(ctx, detachFilesystemOps); err != nil { + return errors.Annotate(err, "detaching filesystems") + } + } + if len(attachFilesystemOps) > 0 { + if err := attachFilesystems(ctx, attachFilesystemOps); err != nil { + return errors.Annotate(err, "attaching filesystems") + } + } return nil } -func (p *storageprovisioner) maybeStopWatcher(w watcher.Stopper) { - if w != nil { - watcher.Stop(w, &p.tomb) - } -} - type context struct { - scope names.Tag - environConfig *config.Config - storageDir string - volumeAccessor VolumeAccessor - filesystemAccessor FilesystemAccessor - life LifecycleManager - machineAccessor MachineAccessor - statusSetter StatusSetter - time clock.Clock + kill func(error) + addWorker func(worker.Worker) error + config Config + modelConfig *config.Config // volumes contains information about provisioned volumes. volumes map[names.VolumeTag]storage.Volume @@ -504,22 +499,28 @@ // map and a volume attachment operation is scheduled. incompleteVolumeAttachmentParams map[params.MachineStorageId]storage.VolumeAttachmentParams + // incompleteFilesystemParams contains incomplete parameters for + // filesystems. + // + // Filesystem parameters are incomplete when they lack information + // about the initial attachment. Once the initial attachment + // information is available, the parameters are removed from this + // map and a filesystem creation operation is scheduled. + incompleteFilesystemParams map[names.FilesystemTag]storage.FilesystemParams + + // incompleteFilesystemAttachmentParams contains incomplete parameters + // for filesystem attachments + // + // Filesystem attachment parameters are incomplete when they lack + // information about the associated filesystem or machine. Once this + // information is available, the parameters are removed from this + // map and a filesystem attachment operation is scheduled. + incompleteFilesystemAttachmentParams map[params.MachineStorageId]storage.FilesystemAttachmentParams + // pendingVolumeBlockDevices contains the tags of volumes about whose // block devices we wish to enquire. pendingVolumeBlockDevices set.Tags - // pendingFilesystems contains parameters for filesystems that are - // yet to be created. - pendingFilesystems map[names.FilesystemTag]storage.FilesystemParams - - // pendingFilesystemAttachments contains parameters for filesystem attachments - // that are yet to be created. - pendingFilesystemAttachments map[params.MachineStorageId]storage.FilesystemAttachmentParams - - // pendingDyingFilesystemAttachments contains parameters for filesystem attachments - // that are to be destroyed. - pendingDyingFilesystemAttachments map[params.MachineStorageId]storage.FilesystemAttachmentParams - // managedFilesystemSource is a storage.FilesystemSource that // manages filesystems backed by volumes attached to the host // machine. === modified file 'src/github.com/juju/juju/worker/storageprovisioner/storageprovisioner_test.go' --- src/github.com/juju/juju/worker/storageprovisioner/storageprovisioner_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/storageprovisioner_test.go 2016-03-22 15:18:22 +0000 @@ -4,9 +4,9 @@ package storageprovisioner_test import ( - "errors" "time" + "github.com/juju/errors" "github.com/juju/names" jc "github.com/juju/testing/checkers" "github.com/juju/utils/clock" @@ -18,6 +18,7 @@ "github.com/juju/juju/storage" "github.com/juju/juju/storage/provider/registry" coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/watcher" "github.com/juju/juju/worker" "github.com/juju/juju/worker/storageprovisioner" ) @@ -55,21 +56,27 @@ } func (s *storageProvisionerSuite) TestStartStop(c *gc.C) { - worker := storageprovisioner.NewStorageProvisioner( - coretesting.EnvironmentTag, - "dir", - newMockVolumeAccessor(), - newMockFilesystemAccessor(), - &mockLifecycleManager{}, - newMockEnvironAccessor(c), - newMockMachineAccessor(c), - &mockStatusSetter{}, - &mockClock{}, - ) + worker, err := storageprovisioner.NewStorageProvisioner(storageprovisioner.Config{ + Scope: coretesting.ModelTag, + Volumes: newMockVolumeAccessor(), + Filesystems: newMockFilesystemAccessor(), + Life: &mockLifecycleManager{}, + Environ: newMockModelAccessor(c), + Machines: newMockMachineAccessor(c), + Status: &mockStatusSetter{}, + Clock: &mockClock{}, + }) + c.Assert(err, jc.ErrorIsNil) + worker.Kill() c.Assert(worker.Wait(), gc.IsNil) } +func (s *storageProvisionerSuite) TestInvalidConfig(c *gc.C) { + _, err := storageprovisioner.NewStorageProvisioner(almostValidConfig()) + c.Check(err, jc.Satisfies, errors.IsNotValid) +} + func (s *storageProvisionerSuite) TestVolumeAdded(c *gc.C) { expectedVolumes := []params.Volume{{ VolumeTag: "volume-1", @@ -123,7 +130,7 @@ defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() - volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ + volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ MachineTag: "machine-1", AttachmentTag: "volume-1", }, { MachineTag: "machine-1", AttachmentTag: "volume-2", @@ -183,7 +190,7 @@ defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() - volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ + volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ MachineTag: "machine-1", AttachmentTag: "volume-1", }} assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment set") @@ -224,7 +231,7 @@ defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() - volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ + volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ MachineTag: "machine-1", AttachmentTag: "volume-1", }} volumeAccessor.volumesWatcher.changes <- []string{"1"} @@ -265,6 +272,76 @@ }) } +func (s *storageProvisionerSuite) TestCreateFilesystemRetry(c *gc.C) { + filesystemInfoSet := make(chan interface{}) + filesystemAccessor := newMockFilesystemAccessor() + filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") + filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) { + defer close(filesystemInfoSet) + return make([]params.ErrorResult, len(filesystems)), nil + } + + // mockFunc's After will progress the current time by the specified + // duration and signal the channel immediately. + clock := &mockClock{} + var createFilesystemTimes []time.Time + + s.provider.createFilesystemsFunc = func(args []storage.FilesystemParams) ([]storage.CreateFilesystemsResult, error) { + createFilesystemTimes = append(createFilesystemTimes, clock.Now()) + if len(createFilesystemTimes) < 10 { + return []storage.CreateFilesystemsResult{{Error: errors.New("badness")}}, nil + } + return []storage.CreateFilesystemsResult{{ + Filesystem: &storage.Filesystem{Tag: args[0].Tag}, + }}, nil + } + + args := &workerArgs{filesystems: filesystemAccessor, clock: clock} + worker := newStorageProvisioner(c, args) + defer func() { c.Assert(worker.Wait(), gc.IsNil) }() + defer worker.Kill() + + filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ + MachineTag: "machine-1", AttachmentTag: "filesystem-1", + }} + filesystemAccessor.filesystemsWatcher.changes <- []string{"1"} + args.environ.watcher.changes <- struct{}{} + waitChannel(c, filesystemInfoSet, "waiting for filesystem info to be set") + c.Assert(createFilesystemTimes, gc.HasLen, 10) + + // The first attempt should have been immediate: T0. + c.Assert(createFilesystemTimes[0], gc.Equals, time.Time{}) + + delays := make([]time.Duration, len(createFilesystemTimes)-1) + for i := range createFilesystemTimes[1:] { + delays[i] = createFilesystemTimes[i+1].Sub(createFilesystemTimes[i]) + } + c.Assert(delays, jc.DeepEquals, []time.Duration{ + 30 * time.Second, + 1 * time.Minute, + 2 * time.Minute, + 4 * time.Minute, + 8 * time.Minute, + 16 * time.Minute, + 30 * time.Minute, // ceiling reached + 30 * time.Minute, + 30 * time.Minute, + }) + + c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ + {Tag: "filesystem-1", Status: "pending", Info: "badness"}, + {Tag: "filesystem-1", Status: "pending", Info: "badness"}, + {Tag: "filesystem-1", Status: "pending", Info: "badness"}, + {Tag: "filesystem-1", Status: "pending", Info: "badness"}, + {Tag: "filesystem-1", Status: "pending", Info: "badness"}, + {Tag: "filesystem-1", Status: "pending", Info: "badness"}, + {Tag: "filesystem-1", Status: "pending", Info: "badness"}, + {Tag: "filesystem-1", Status: "pending", Info: "badness"}, + {Tag: "filesystem-1", Status: "pending", Info: "badness"}, + {Tag: "filesystem-1", Status: "attaching", Info: ""}, + }) +} + func (s *storageProvisionerSuite) TestAttachVolumeRetry(c *gc.C) { volumeInfoSet := make(chan interface{}) volumeAccessor := newMockVolumeAccessor() @@ -305,7 +382,7 @@ defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() - volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ + volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ MachineTag: "machine-1", AttachmentTag: "volume-1", }} volumeAccessor.volumesWatcher.changes <- []string{"1"} @@ -348,6 +425,89 @@ }) } +func (s *storageProvisionerSuite) TestAttachFilesystemRetry(c *gc.C) { + filesystemInfoSet := make(chan interface{}) + filesystemAccessor := newMockFilesystemAccessor() + filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") + filesystemAccessor.setFilesystemInfo = func(filesystems []params.Filesystem) ([]params.ErrorResult, error) { + defer close(filesystemInfoSet) + return make([]params.ErrorResult, len(filesystems)), nil + } + filesystemAttachmentInfoSet := make(chan interface{}) + filesystemAccessor.setFilesystemAttachmentInfo = func(filesystemAttachments []params.FilesystemAttachment) ([]params.ErrorResult, error) { + defer close(filesystemAttachmentInfoSet) + return make([]params.ErrorResult, len(filesystemAttachments)), nil + } + + // mockFunc's After will progress the current time by the specified + // duration and signal the channel immediately. + clock := &mockClock{} + var attachFilesystemTimes []time.Time + + s.provider.attachFilesystemsFunc = func(args []storage.FilesystemAttachmentParams) ([]storage.AttachFilesystemsResult, error) { + attachFilesystemTimes = append(attachFilesystemTimes, clock.Now()) + if len(attachFilesystemTimes) < 10 { + return []storage.AttachFilesystemsResult{{Error: errors.New("badness")}}, nil + } + return []storage.AttachFilesystemsResult{{ + FilesystemAttachment: &storage.FilesystemAttachment{ + args[0].Filesystem, + args[0].Machine, + storage.FilesystemAttachmentInfo{ + Path: "/oh/over/there", + }, + }, + }}, nil + } + + args := &workerArgs{filesystems: filesystemAccessor, clock: clock} + worker := newStorageProvisioner(c, args) + defer func() { c.Assert(worker.Wait(), gc.IsNil) }() + defer worker.Kill() + + filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ + MachineTag: "machine-1", AttachmentTag: "filesystem-1", + }} + filesystemAccessor.filesystemsWatcher.changes <- []string{"1"} + args.environ.watcher.changes <- struct{}{} + waitChannel(c, filesystemInfoSet, "waiting for filesystem info to be set") + waitChannel(c, filesystemAttachmentInfoSet, "waiting for filesystem attachments to be set") + c.Assert(attachFilesystemTimes, gc.HasLen, 10) + + // The first attempt should have been immediate: T0. + c.Assert(attachFilesystemTimes[0], gc.Equals, time.Time{}) + + delays := make([]time.Duration, len(attachFilesystemTimes)-1) + for i := range attachFilesystemTimes[1:] { + delays[i] = attachFilesystemTimes[i+1].Sub(attachFilesystemTimes[i]) + } + c.Assert(delays, jc.DeepEquals, []time.Duration{ + 30 * time.Second, + 1 * time.Minute, + 2 * time.Minute, + 4 * time.Minute, + 8 * time.Minute, + 16 * time.Minute, + 30 * time.Minute, // ceiling reached + 30 * time.Minute, + 30 * time.Minute, + }) + + c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ + {Tag: "filesystem-1", Status: "attaching", Info: ""}, // CreateFilesystems + {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, // AttachFilesystems + {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, + {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, + {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, + {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, + {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, + {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, + {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, + {Tag: "filesystem-1", Status: "attaching", Info: "badness"}, + {Tag: "filesystem-1", Status: "attached", Info: ""}, + }) +} + func (s *storageProvisionerSuite) TestValidateVolumeParams(c *gc.C) { volumeAccessor := newMockVolumeAccessor() volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") @@ -409,7 +569,7 @@ defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() - volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ + volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ MachineTag: "machine-1", AttachmentTag: "volume-1", }, { MachineTag: "machine-1", AttachmentTag: "volume-2", @@ -449,6 +609,107 @@ }) } +func (s *storageProvisionerSuite) TestValidateFilesystemParams(c *gc.C) { + filesystemAccessor := newMockFilesystemAccessor() + filesystemAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") + filesystemAccessor.provisionedFilesystems["filesystem-3"] = params.Filesystem{FilesystemTag: "filesystem-3"} + filesystemAccessor.provisionedFilesystems["filesystem-4"] = params.Filesystem{ + FilesystemTag: "filesystem-4", + Info: params.FilesystemInfo{FilesystemId: "fs-id"}, + } + + var validateCalls int + validated := make(chan interface{}, 1) + s.provider.validateFilesystemParamsFunc = func(p storage.FilesystemParams) error { + validateCalls++ + validated <- p + switch p.Tag.String() { + case "filesystem-1", "filesystem-3": + return errors.New("something is wrong") + } + return nil + } + + life := func(tags []names.Tag) ([]params.LifeResult, error) { + results := make([]params.LifeResult, len(tags)) + for i := range results { + switch tags[i].String() { + case "filesystem-3", "filesystem-4": + results[i].Life = params.Dead + default: + results[i].Life = params.Alive + } + } + return results, nil + } + + createdFilesystems := make(chan interface{}, 1) + s.provider.createFilesystemsFunc = func(args []storage.FilesystemParams) ([]storage.CreateFilesystemsResult, error) { + createdFilesystems <- args + if len(args) != 1 { + return nil, errors.New("expected one argument") + } + return []storage.CreateFilesystemsResult{{ + Filesystem: &storage.Filesystem{Tag: args[0].Tag}, + }}, nil + } + + destroyedFilesystems := make(chan interface{}, 1) + s.provider.destroyFilesystemsFunc = func(filesystemIds []string) ([]error, error) { + destroyedFilesystems <- filesystemIds + return make([]error, len(filesystemIds)), nil + } + + args := &workerArgs{ + filesystems: filesystemAccessor, + life: &mockLifecycleManager{ + life: life, + }, + } + worker := newStorageProvisioner(c, args) + defer func() { c.Assert(worker.Wait(), gc.IsNil) }() + defer worker.Kill() + + filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ + MachineTag: "machine-1", AttachmentTag: "filesystem-1", + }, { + MachineTag: "machine-1", AttachmentTag: "filesystem-2", + }} + filesystemAccessor.filesystemsWatcher.changes <- []string{"1"} + args.environ.watcher.changes <- struct{}{} + waitChannel(c, validated, "waiting for filesystem parameter validation") + assertNoEvent(c, createdFilesystems, "filesystem created") + c.Assert(validateCalls, gc.Equals, 1) + + // Failure to create filesystem-1 should not block creation filesystem-2. + filesystemAccessor.filesystemsWatcher.changes <- []string{"2"} + waitChannel(c, validated, "waiting for filesystem parameter validation") + createFilesystemParams := waitChannel(c, createdFilesystems, "filesystem created").([]storage.FilesystemParams) + c.Assert(createFilesystemParams, gc.HasLen, 1) + c.Assert(createFilesystemParams[0].Tag.String(), gc.Equals, "filesystem-2") + c.Assert(validateCalls, gc.Equals, 2) + + filesystemAccessor.filesystemsWatcher.changes <- []string{"3"} + waitChannel(c, validated, "waiting for filesystem parameter validation") + assertNoEvent(c, destroyedFilesystems, "filesystem destroyed") + c.Assert(validateCalls, gc.Equals, 3) + + // Failure to destroy filesystem-3 should not block creation of filesystem-4. + filesystemAccessor.filesystemsWatcher.changes <- []string{"4"} + waitChannel(c, validated, "waiting for filesystem parameter validation") + destroyFilesystemParams := waitChannel(c, destroyedFilesystems, "filesystem destroyed").([]string) + c.Assert(destroyFilesystemParams, jc.DeepEquals, []string{"fs-id"}) + c.Assert(validateCalls, gc.Equals, 4) + + c.Assert(args.statusSetter.args, jc.DeepEquals, []params.EntityStatusArgs{ + {Tag: "filesystem-1", Status: "error", Info: "something is wrong"}, + {Tag: "filesystem-2", Status: "attaching"}, + {Tag: "filesystem-3", Status: "error", Info: "something is wrong"}, + // destroyed filesystems are removed immediately, + // so there is no status update. + }) +} + func (s *storageProvisionerSuite) TestFilesystemAdded(c *gc.C) { expectedFilesystems := []params.Filesystem{{ FilesystemTag: "filesystem-1", @@ -588,7 +849,7 @@ defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() - volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ + volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ MachineTag: "machine-1", AttachmentTag: "volume-1", }, { MachineTag: "machine-1", AttachmentTag: "volume-2", @@ -604,7 +865,10 @@ c.Assert(allVolumeAttachments, jc.SameContents, expectedVolumeAttachments) // Reattachment should only happen once per session. - volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{alreadyAttached} + volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ + MachineTag: "machine-0", + AttachmentTag: "volume-1", + }} assertNoEvent(c, volumeAttachmentInfoSet, "volume attachment info set") } @@ -667,7 +931,7 @@ defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() - filesystemAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ + filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ MachineTag: "machine-1", AttachmentTag: "filesystem-1", }, { MachineTag: "machine-1", AttachmentTag: "filesystem-2", @@ -684,7 +948,10 @@ c.Assert(allFilesystemAttachments, jc.SameContents, expectedFilesystemAttachments) // Reattachment should only happen once per session. - filesystemAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{alreadyAttached} + filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ + MachineTag: "machine-0", + AttachmentTag: "filesystem-1", + }} assertNoEvent(c, filesystemAttachmentInfoSet, "filesystem attachment info set") } @@ -786,7 +1053,7 @@ DeviceName: "xvdf1", Size: 123, } - filesystemAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ + filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ MachineTag: "machine-0", AttachmentTag: "filesystem-0-0", }} @@ -807,7 +1074,7 @@ }}) } -func (s *storageProvisionerSuite) TestUpdateEnvironConfig(c *gc.C) { +func (s *storageProvisionerSuite) TestUpdateModelConfig(c *gc.C) { volumeAccessor := newMockVolumeAccessor() volumeAccessor.provisionedMachines["machine-1"] = instance.Id("already-provisioned-1") s.provider.volumeSourceFunc = func(envConfig *config.Config, sourceConfig *storage.Config) (storage.VolumeSource, error) { @@ -966,7 +1233,7 @@ defer worker.Wait() defer worker.Kill() - args.volumes.attachmentsWatcher.changes <- []params.MachineStorageId{{ + args.volumes.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ MachineTag: "machine-0", AttachmentTag: "volume-0", }} args.environ.watcher.changes <- struct{}{} @@ -1039,13 +1306,13 @@ defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() - volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ + volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ MachineTag: "machine-1", AttachmentTag: "volume-1", }} volumeAccessor.volumesWatcher.changes <- []string{"1"} args.environ.watcher.changes <- struct{}{} waitChannel(c, volumeAttachmentInfoSet, "waiting for volume attachments to be set") - volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ + volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ MachineTag: "machine-1", AttachmentTag: "volume-1", }} waitChannel(c, detached, "waiting for volume to be detached") @@ -1056,7 +1323,8 @@ machine := names.NewMachineTag("1") volume := names.NewVolumeTag("1") attachmentId := params.MachineStorageId{ - MachineTag: machine.String(), AttachmentTag: volume.String(), + MachineTag: machine.String(), + AttachmentTag: volume.String(), } volumeAccessor := newMockVolumeAccessor() volumeAccessor.provisionedAttachments[attachmentId] = params.VolumeAttachment{ @@ -1108,7 +1376,10 @@ volumeAccessor.volumesWatcher.changes <- []string{volume.Id()} args.environ.watcher.changes <- struct{}{} - volumeAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{attachmentId} + volumeAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ + MachineTag: machine.String(), + AttachmentTag: volume.String(), + }} waitChannel(c, removed, "waiting for attachment to be removed") c.Assert(detachVolumeTimes, gc.HasLen, 10) @@ -1163,7 +1434,7 @@ defer worker.Wait() defer worker.Kill() - args.filesystems.attachmentsWatcher.changes <- []params.MachineStorageId{{ + args.filesystems.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ MachineTag: "machine-0", AttachmentTag: "filesystem-0", }} args.environ.watcher.changes <- struct{}{} @@ -1201,12 +1472,12 @@ } detached := make(chan interface{}) - s.provider.detachFilesystemsFunc = func(args []storage.FilesystemAttachmentParams) error { + s.provider.detachFilesystemsFunc = func(args []storage.FilesystemAttachmentParams) ([]error, error) { c.Assert(args, gc.HasLen, 1) c.Assert(args[0].Machine.String(), gc.Equals, expectedAttachmentIds[0].MachineTag) c.Assert(args[0].Filesystem.String(), gc.Equals, expectedAttachmentIds[0].AttachmentTag) defer close(detached) - return nil + return make([]error, len(args)), nil } removed := make(chan interface{}) @@ -1236,13 +1507,13 @@ defer func() { c.Assert(worker.Wait(), gc.IsNil) }() defer worker.Kill() - filesystemAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ + filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ MachineTag: "machine-1", AttachmentTag: "filesystem-1", }} filesystemAccessor.filesystemsWatcher.changes <- []string{"1"} args.environ.watcher.changes <- struct{}{} waitChannel(c, filesystemAttachmentInfoSet, "waiting for filesystem attachments to be set") - filesystemAccessor.attachmentsWatcher.changes <- []params.MachineStorageId{{ + filesystemAccessor.attachmentsWatcher.changes <- []watcher.MachineStorageId{{ MachineTag: "machine-1", AttachmentTag: "filesystem-1", }} waitChannel(c, detached, "waiting for filesystem to be detached") @@ -1441,8 +1712,13 @@ if args == nil { args = &workerArgs{} } - if args.scope == nil { - args.scope = coretesting.EnvironmentTag + var storageDir string + switch args.scope.(type) { + case names.MachineTag: + storageDir = "storage-dir" + case names.ModelTag: + case nil: + args.scope = coretesting.ModelTag } if args.volumes == nil { args.volumes = newMockVolumeAccessor() @@ -1454,7 +1730,7 @@ args.life = &mockLifecycleManager{} } if args.environ == nil { - args.environ = newMockEnvironAccessor(c) + args.environ = newMockModelAccessor(c) } if args.machines == nil { args.machines = newMockMachineAccessor(c) @@ -1465,17 +1741,19 @@ if args.statusSetter == nil { args.statusSetter = &mockStatusSetter{} } - return storageprovisioner.NewStorageProvisioner( - args.scope, - "storage-dir", - args.volumes, - args.filesystems, - args.life, - args.environ, - args.machines, - args.statusSetter, - args.clock, - ) + worker, err := storageprovisioner.NewStorageProvisioner(storageprovisioner.Config{ + Scope: args.scope, + StorageDir: storageDir, + Volumes: args.volumes, + Filesystems: args.filesystems, + Life: args.life, + Environ: args.environ, + Machines: args.machines, + Status: args.statusSetter, + Clock: args.clock, + }) + c.Assert(err, jc.ErrorIsNil) + return worker } type workerArgs struct { @@ -1483,7 +1761,7 @@ volumes *mockVolumeAccessor filesystems *mockFilesystemAccessor life *mockLifecycleManager - environ *mockEnvironAccessor + environ *mockModelAccessor machines *mockMachineAccessor clock clock.Clock statusSetter *mockStatusSetter === added file 'src/github.com/juju/juju/worker/storageprovisioner/volume_events.go' --- src/github.com/juju/juju/worker/storageprovisioner/volume_events.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/volume_events.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,500 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storageprovisioner + +import ( + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/instance" + "github.com/juju/juju/storage" + "github.com/juju/juju/watcher" +) + +// volumesChanged is called when the lifecycle states of the volumes +// with the provided IDs have been seen to have changed. +func volumesChanged(ctx *context, changes []string) error { + tags := make([]names.Tag, len(changes)) + for i, change := range changes { + tags[i] = names.NewVolumeTag(change) + } + alive, dying, dead, err := storageEntityLife(ctx, tags) + if err != nil { + return errors.Trace(err) + } + logger.Debugf("volumes alive: %v, dying: %v, dead: %v", alive, dying, dead) + if err := processDyingVolumes(ctx, dying); err != nil { + return errors.Annotate(err, "processing dying volumes") + } + if len(alive)+len(dead) == 0 { + return nil + } + + // Get volume information for alive and dead volumes, so + // we can provision/deprovision. + volumeTags := make([]names.VolumeTag, 0, len(alive)+len(dead)) + for _, tag := range alive { + volumeTags = append(volumeTags, tag.(names.VolumeTag)) + } + for _, tag := range dead { + volumeTags = append(volumeTags, tag.(names.VolumeTag)) + } + volumeResults, err := ctx.config.Volumes.Volumes(volumeTags) + if err != nil { + return errors.Annotatef(err, "getting volume information") + } + if err := processDeadVolumes(ctx, volumeTags[len(alive):], volumeResults[len(alive):]); err != nil { + return errors.Annotate(err, "deprovisioning volumes") + } + if err := processAliveVolumes(ctx, alive, volumeResults[:len(alive)]); err != nil { + return errors.Annotate(err, "provisioning volumes") + } + return nil +} + +// volumeAttachmentsChanged is called when the lifecycle states of the volume +// attachments with the provided IDs have been seen to have changed. +func volumeAttachmentsChanged(ctx *context, watcherIds []watcher.MachineStorageId) error { + ids := copyMachineStorageIds(watcherIds) + alive, dying, dead, err := attachmentLife(ctx, ids) + if err != nil { + return errors.Trace(err) + } + logger.Debugf("volume attachments alive: %v, dying: %v, dead: %v", alive, dying, dead) + if len(dead) != 0 { + // We should not see dead volume attachments; + // attachments go directly from Dying to removed. + logger.Warningf("unexpected dead volume attachments: %v", dead) + } + if len(alive)+len(dying) == 0 { + return nil + } + + // Get volume information for alive and dying volume attachments, so + // we can attach/detach. + ids = append(alive, dying...) + volumeAttachmentResults, err := ctx.config.Volumes.VolumeAttachments(ids) + if err != nil { + return errors.Annotatef(err, "getting volume attachment information") + } + + // Deprovision Dying volume attachments. + dyingVolumeAttachmentResults := volumeAttachmentResults[len(alive):] + if err := processDyingVolumeAttachments(ctx, dying, dyingVolumeAttachmentResults); err != nil { + return errors.Annotate(err, "deprovisioning volume attachments") + } + + // Provision Alive volume attachments. + aliveVolumeAttachmentResults := volumeAttachmentResults[:len(alive)] + if err := processAliveVolumeAttachments(ctx, alive, aliveVolumeAttachmentResults); err != nil { + return errors.Annotate(err, "provisioning volumes") + } + + return nil +} + +// processDyingVolumes processes the VolumeResults for Dying volumes, +// removing them from provisioning-pending as necessary. +func processDyingVolumes(ctx *context, tags []names.Tag) error { + for _, tag := range tags { + removePendingVolume(ctx, tag.(names.VolumeTag)) + } + return nil +} + +// updateVolume updates the context with the given volume info. +func updateVolume(ctx *context, info storage.Volume) { + ctx.volumes[info.Tag] = info + for id, params := range ctx.incompleteVolumeAttachmentParams { + if params.VolumeId == "" && id.AttachmentTag == info.Tag.String() { + params.VolumeId = info.VolumeId + updatePendingVolumeAttachment(ctx, id, params) + } + } +} + +// updatePendingVolume adds the given volume params to either the incomplete +// set or the schedule. If the params are incomplete due to a missing instance +// ID, updatePendingVolume will request that the machine be watched so its +// instance ID can be learned. +func updatePendingVolume(ctx *context, params storage.VolumeParams) { + if params.Attachment.InstanceId == "" { + watchMachine(ctx, params.Attachment.Machine) + ctx.incompleteVolumeParams[params.Tag] = params + } else { + delete(ctx.incompleteVolumeParams, params.Tag) + scheduleOperations(ctx, &createVolumeOp{args: params}) + } +} + +// removePendingVolume removes the specified pending volume from the +// incomplete set and/or the schedule if it exists there. +func removePendingVolume(ctx *context, tag names.VolumeTag) { + delete(ctx.incompleteVolumeParams, tag) + ctx.schedule.Remove(tag) +} + +// updatePendingVolumeAttachment adds the given volume attachment params to +// either the incomplete set or the schedule. If the params are incomplete +// due to a missing instance ID, updatePendingVolumeAttachment will request +// that the machine be watched so its instance ID can be learned. +func updatePendingVolumeAttachment( + ctx *context, + id params.MachineStorageId, + params storage.VolumeAttachmentParams, +) { + if params.InstanceId == "" { + watchMachine(ctx, params.Machine) + } else if params.VolumeId != "" { + delete(ctx.incompleteVolumeAttachmentParams, id) + scheduleOperations(ctx, &attachVolumeOp{args: params}) + return + } + ctx.incompleteVolumeAttachmentParams[id] = params +} + +// removePendingVolumeAttachment removes the specified pending volume +// attachment from the incomplete set and/or the schedule if it exists +// there. +func removePendingVolumeAttachment(ctx *context, id params.MachineStorageId) { + delete(ctx.incompleteVolumeAttachmentParams, id) + ctx.schedule.Remove(id) +} + +// processDeadVolumes processes the VolumeResults for Dead volumes, +// deprovisioning volumes and removing from state as necessary. +func processDeadVolumes(ctx *context, tags []names.VolumeTag, volumeResults []params.VolumeResult) error { + for _, tag := range tags { + removePendingVolume(ctx, tag) + } + var destroy []names.VolumeTag + var remove []names.Tag + for i, result := range volumeResults { + tag := tags[i] + if result.Error == nil { + logger.Debugf("volume %s is provisioned, queuing for deprovisioning", tag.Id()) + volume, err := volumeFromParams(result.Result) + if err != nil { + return errors.Annotate(err, "getting volume info") + } + updateVolume(ctx, volume) + destroy = append(destroy, tag) + continue + } + if params.IsCodeNotProvisioned(result.Error) { + logger.Debugf("volume %s is not provisioned, queuing for removal", tag.Id()) + remove = append(remove, tag) + continue + } + return errors.Annotatef(result.Error, "getting volume information for volume %s", tag.Id()) + } + if len(destroy) > 0 { + ops := make([]scheduleOp, len(destroy)) + for i, tag := range destroy { + ops[i] = &destroyVolumeOp{tag: tag} + } + scheduleOperations(ctx, ops...) + } + if err := removeEntities(ctx, remove); err != nil { + return errors.Annotate(err, "removing volumes from state") + } + return nil +} + +// processDyingVolumeAttachments processes the VolumeAttachmentResults for +// Dying volume attachments, detaching volumes and updating state as necessary. +func processDyingVolumeAttachments( + ctx *context, + ids []params.MachineStorageId, + volumeAttachmentResults []params.VolumeAttachmentResult, +) error { + for _, id := range ids { + removePendingVolumeAttachment(ctx, id) + } + detach := make([]params.MachineStorageId, 0, len(ids)) + remove := make([]params.MachineStorageId, 0, len(ids)) + for i, result := range volumeAttachmentResults { + id := ids[i] + if result.Error == nil { + detach = append(detach, id) + continue + } + if params.IsCodeNotProvisioned(result.Error) { + remove = append(remove, id) + continue + } + return errors.Annotatef(result.Error, "getting information for volume attachment %v", id) + } + if len(detach) > 0 { + attachmentParams, err := volumeAttachmentParams(ctx, detach) + if err != nil { + return errors.Trace(err) + } + ops := make([]scheduleOp, len(attachmentParams)) + for i, p := range attachmentParams { + ops[i] = &detachVolumeOp{args: p} + } + scheduleOperations(ctx, ops...) + } + if err := removeAttachments(ctx, remove); err != nil { + return errors.Annotate(err, "removing attachments from state") + } + for _, id := range remove { + delete(ctx.volumeAttachments, id) + } + return nil +} + +// processAliveVolumes processes the VolumeResults for Alive volumes, +// provisioning volumes and setting the info in state as necessary. +func processAliveVolumes(ctx *context, tags []names.Tag, volumeResults []params.VolumeResult) error { + // Filter out the already-provisioned volumes. + pending := make([]names.VolumeTag, 0, len(tags)) + for i, result := range volumeResults { + volumeTag := tags[i].(names.VolumeTag) + if result.Error == nil { + // Volume is already provisioned: skip. + logger.Debugf("volume %q is already provisioned, nothing to do", tags[i].Id()) + volume, err := volumeFromParams(result.Result) + if err != nil { + return errors.Annotate(err, "getting volume info") + } + updateVolume(ctx, volume) + removePendingVolume(ctx, volumeTag) + continue + } + if !params.IsCodeNotProvisioned(result.Error) { + return errors.Annotatef( + result.Error, "getting volume information for volume %q", tags[i].Id(), + ) + } + // The volume has not yet been provisioned, so record its tag + // to enquire about parameters below. + pending = append(pending, volumeTag) + } + if len(pending) == 0 { + return nil + } + volumeParams, err := volumeParams(ctx, pending) + if err != nil { + return errors.Annotate(err, "getting volume params") + } + for _, params := range volumeParams { + updatePendingVolume(ctx, params) + } + return nil +} + +// processAliveVolumeAttachments processes the VolumeAttachmentResults +// for Alive volume attachments, attaching volumes and setting the info +// in state as necessary. +func processAliveVolumeAttachments( + ctx *context, + ids []params.MachineStorageId, + volumeAttachmentResults []params.VolumeAttachmentResult, +) error { + // Filter out the already-attached. + pending := make([]params.MachineStorageId, 0, len(ids)) + for i, result := range volumeAttachmentResults { + if result.Error == nil { + // Volume attachment is already provisioned: if we + // didn't (re)attach in this session, then we must + // do so now. + action := "nothing to do" + if _, ok := ctx.volumeAttachments[ids[i]]; !ok { + // Not yet (re)attached in this session. + pending = append(pending, ids[i]) + action = "will reattach" + } + logger.Debugf( + "%s is already attached to %s, %s", + ids[i].AttachmentTag, ids[i].MachineTag, action, + ) + removePendingVolumeAttachment(ctx, ids[i]) + continue + } + if !params.IsCodeNotProvisioned(result.Error) { + return errors.Annotatef( + result.Error, "getting information for attachment %v", ids[i], + ) + } + // The volume has not yet been provisioned, so record its tag + // to enquire about parameters below. + pending = append(pending, ids[i]) + } + if len(pending) == 0 { + return nil + } + params, err := volumeAttachmentParams(ctx, pending) + if err != nil { + return errors.Trace(err) + } + for i, params := range params { + if volume, ok := ctx.volumes[params.Volume]; ok { + params.VolumeId = volume.VolumeId + } + updatePendingVolumeAttachment(ctx, pending[i], params) + } + return nil +} + +// volumeAttachmentParams obtains the specified attachments' parameters. +func volumeAttachmentParams( + ctx *context, ids []params.MachineStorageId, +) ([]storage.VolumeAttachmentParams, error) { + paramsResults, err := ctx.config.Volumes.VolumeAttachmentParams(ids) + if err != nil { + return nil, errors.Annotate(err, "getting volume attachment params") + } + attachmentParams := make([]storage.VolumeAttachmentParams, len(ids)) + for i, result := range paramsResults { + if result.Error != nil { + return nil, errors.Annotate(result.Error, "getting volume attachment parameters") + } + params, err := volumeAttachmentParamsFromParams(result.Result) + if err != nil { + return nil, errors.Annotate(err, "getting volume attachment parameters") + } + attachmentParams[i] = params + } + return attachmentParams, nil +} + +// volumeParams obtains the specified volumes' parameters. +func volumeParams(ctx *context, tags []names.VolumeTag) ([]storage.VolumeParams, error) { + paramsResults, err := ctx.config.Volumes.VolumeParams(tags) + if err != nil { + return nil, errors.Annotate(err, "getting volume params") + } + allParams := make([]storage.VolumeParams, len(tags)) + for i, result := range paramsResults { + if result.Error != nil { + return nil, errors.Annotate(result.Error, "getting volume parameters") + } + params, err := volumeParamsFromParams(result.Result) + if err != nil { + return nil, errors.Annotate(err, "getting volume parameters") + } + allParams[i] = params + } + return allParams, nil +} + +func volumesFromStorage(in []storage.Volume) []params.Volume { + out := make([]params.Volume, len(in)) + for i, v := range in { + out[i] = params.Volume{ + v.Tag.String(), + params.VolumeInfo{ + v.VolumeId, + v.HardwareId, + v.Size, + v.Persistent, + }, + } + } + return out +} + +func volumeAttachmentsFromStorage(in []storage.VolumeAttachment) []params.VolumeAttachment { + out := make([]params.VolumeAttachment, len(in)) + for i, v := range in { + out[i] = params.VolumeAttachment{ + v.Volume.String(), + v.Machine.String(), + params.VolumeAttachmentInfo{ + v.DeviceName, + v.DeviceLink, + v.BusAddress, + v.ReadOnly, + }, + } + } + return out +} + +func volumeFromParams(in params.Volume) (storage.Volume, error) { + volumeTag, err := names.ParseVolumeTag(in.VolumeTag) + if err != nil { + return storage.Volume{}, errors.Trace(err) + } + return storage.Volume{ + volumeTag, + storage.VolumeInfo{ + in.Info.VolumeId, + in.Info.HardwareId, + in.Info.Size, + in.Info.Persistent, + }, + }, nil +} + +func volumeParamsFromParams(in params.VolumeParams) (storage.VolumeParams, error) { + volumeTag, err := names.ParseVolumeTag(in.VolumeTag) + if err != nil { + return storage.VolumeParams{}, errors.Trace(err) + } + providerType := storage.ProviderType(in.Provider) + + var attachment *storage.VolumeAttachmentParams + if in.Attachment != nil { + if in.Attachment.Provider != in.Provider { + return storage.VolumeParams{}, errors.Errorf( + "storage provider mismatch: volume (%q), attachment (%q)", + in.Provider, in.Attachment.Provider, + ) + } + if in.Attachment.VolumeTag != in.VolumeTag { + return storage.VolumeParams{}, errors.Errorf( + "volume tag mismatch: volume (%q), attachment (%q)", + in.VolumeTag, in.Attachment.VolumeTag, + ) + } + machineTag, err := names.ParseMachineTag(in.Attachment.MachineTag) + if err != nil { + return storage.VolumeParams{}, errors.Annotate( + err, "parsing attachment machine tag", + ) + } + attachment = &storage.VolumeAttachmentParams{ + AttachmentParams: storage.AttachmentParams{ + Provider: providerType, + Machine: machineTag, + InstanceId: instance.Id(in.Attachment.InstanceId), + ReadOnly: in.Attachment.ReadOnly, + }, + Volume: volumeTag, + } + } + return storage.VolumeParams{ + volumeTag, + in.Size, + providerType, + in.Attributes, + in.Tags, + attachment, + }, nil +} + +func volumeAttachmentParamsFromParams(in params.VolumeAttachmentParams) (storage.VolumeAttachmentParams, error) { + machineTag, err := names.ParseMachineTag(in.MachineTag) + if err != nil { + return storage.VolumeAttachmentParams{}, errors.Trace(err) + } + volumeTag, err := names.ParseVolumeTag(in.VolumeTag) + if err != nil { + return storage.VolumeAttachmentParams{}, errors.Trace(err) + } + return storage.VolumeAttachmentParams{ + AttachmentParams: storage.AttachmentParams{ + Provider: storage.ProviderType(in.Provider), + Machine: machineTag, + InstanceId: instance.Id(in.InstanceId), + ReadOnly: in.ReadOnly, + }, + Volume: volumeTag, + VolumeId: in.VolumeId, + }, nil +} === modified file 'src/github.com/juju/juju/worker/storageprovisioner/volume_ops.go' --- src/github.com/juju/juju/worker/storageprovisioner/volume_ops.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/volume_ops.go 2016-03-22 15:18:22 +0000 @@ -19,7 +19,7 @@ volumeParams = append(volumeParams, op.args) } paramsBySource, volumeSources, err := volumeParamsBySource( - ctx.environConfig, ctx.storageDir, volumeParams, + ctx.modelConfig, ctx.config.StorageDir, volumeParams, ) if err != nil { return errors.Trace(err) @@ -93,7 +93,7 @@ // by environment, so that we can "harvest" them if they're // unknown. This will take care of killing volumes that we fail // to record in state. - errorResults, err := ctx.volumeAccessor.SetVolumeInfo(volumesFromStorage(volumes)) + errorResults, err := ctx.config.Volumes.SetVolumeInfo(volumesFromStorage(volumes)) if err != nil { return errors.Annotate(err, "publishing volumes to state") } @@ -128,7 +128,7 @@ volumeAttachmentParams = append(volumeAttachmentParams, op.args) } paramsBySource, volumeSources, err := volumeAttachmentParamsBySource( - ctx.environConfig, ctx.storageDir, volumeAttachmentParams, + ctx.modelConfig, ctx.config.StorageDir, volumeAttachmentParams, ) if err != nil { return errors.Trace(err) @@ -194,7 +194,7 @@ return errors.Trace(err) } paramsBySource, volumeSources, err := volumeParamsBySource( - ctx.environConfig, ctx.storageDir, volumeParams, + ctx.modelConfig, ctx.config.StorageDir, volumeParams, ) if err != nil { return errors.Trace(err) @@ -266,7 +266,7 @@ volumeAttachmentParams = append(volumeAttachmentParams, op.args) } paramsBySource, volumeSources, err := volumeAttachmentParamsBySource( - ctx.environConfig, ctx.storageDir, volumeAttachmentParams, + ctx.modelConfig, ctx.config.StorageDir, volumeAttachmentParams, ) if err != nil { return errors.Trace(err) @@ -415,7 +415,7 @@ // provider, by environment, so that we can "harvest" them if they're // unknown. This will take care of killing volumes that we fail to // record in state. - errorResults, err := ctx.volumeAccessor.SetVolumeAttachmentInfo( + errorResults, err := ctx.config.Volumes.SetVolumeAttachmentInfo( volumeAttachmentsFromStorage(volumeAttachments), ) if err != nil { === removed file 'src/github.com/juju/juju/worker/storageprovisioner/volumes.go' --- src/github.com/juju/juju/worker/storageprovisioner/volumes.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/storageprovisioner/volumes.go 1970-01-01 00:00:00 +0000 @@ -1,498 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package storageprovisioner - -import ( - "github.com/juju/errors" - "github.com/juju/names" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/instance" - "github.com/juju/juju/storage" -) - -// volumesChanged is called when the lifecycle states of the volumes -// with the provided IDs have been seen to have changed. -func volumesChanged(ctx *context, changes []string) error { - tags := make([]names.Tag, len(changes)) - for i, change := range changes { - tags[i] = names.NewVolumeTag(change) - } - alive, dying, dead, err := storageEntityLife(ctx, tags) - if err != nil { - return errors.Trace(err) - } - logger.Debugf("volumes alive: %v, dying: %v, dead: %v", alive, dying, dead) - if err := processDyingVolumes(ctx, dying); err != nil { - return errors.Annotate(err, "processing dying volumes") - } - if len(alive)+len(dead) == 0 { - return nil - } - - // Get volume information for alive and dead volumes, so - // we can provision/deprovision. - volumeTags := make([]names.VolumeTag, 0, len(alive)+len(dead)) - for _, tag := range alive { - volumeTags = append(volumeTags, tag.(names.VolumeTag)) - } - for _, tag := range dead { - volumeTags = append(volumeTags, tag.(names.VolumeTag)) - } - volumeResults, err := ctx.volumeAccessor.Volumes(volumeTags) - if err != nil { - return errors.Annotatef(err, "getting volume information") - } - if err := processDeadVolumes(ctx, volumeTags[len(alive):], volumeResults[len(alive):]); err != nil { - return errors.Annotate(err, "deprovisioning volumes") - } - if err := processAliveVolumes(ctx, alive, volumeResults[:len(alive)]); err != nil { - return errors.Annotate(err, "provisioning volumes") - } - return nil -} - -// volumeAttachmentsChanged is called when the lifecycle states of the volume -// attachments with the provided IDs have been seen to have changed. -func volumeAttachmentsChanged(ctx *context, ids []params.MachineStorageId) error { - alive, dying, dead, err := attachmentLife(ctx, ids) - if err != nil { - return errors.Trace(err) - } - logger.Debugf("volume attachments alive: %v, dying: %v, dead: %v", alive, dying, dead) - if len(dead) != 0 { - // We should not see dead volume attachments; - // attachments go directly from Dying to removed. - logger.Debugf("unexpected dead volume attachments: %v", dead) - } - if len(alive)+len(dying) == 0 { - return nil - } - - // Get volume information for alive and dying volume attachments, so - // we can attach/detach. - ids = append(alive, dying...) - volumeAttachmentResults, err := ctx.volumeAccessor.VolumeAttachments(ids) - if err != nil { - return errors.Annotatef(err, "getting volume attachment information") - } - - // Deprovision Dying volume attachments. - dyingVolumeAttachmentResults := volumeAttachmentResults[len(alive):] - if err := processDyingVolumeAttachments(ctx, dying, dyingVolumeAttachmentResults); err != nil { - return errors.Annotate(err, "deprovisioning volume attachments") - } - - // Provision Alive volume attachments. - aliveVolumeAttachmentResults := volumeAttachmentResults[:len(alive)] - if err := processAliveVolumeAttachments(ctx, alive, aliveVolumeAttachmentResults); err != nil { - return errors.Annotate(err, "provisioning volumes") - } - - return nil -} - -// processDyingVolumes processes the VolumeResults for Dying volumes, -// removing them from provisioning-pending as necessary. -func processDyingVolumes(ctx *context, tags []names.Tag) error { - for _, tag := range tags { - removePendingVolume(ctx, tag.(names.VolumeTag)) - } - return nil -} - -// updateVolume updates the context with the given volume info. -func updateVolume(ctx *context, info storage.Volume) { - ctx.volumes[info.Tag] = info - for id, params := range ctx.incompleteVolumeAttachmentParams { - if params.VolumeId == "" && id.AttachmentTag == info.Tag.String() { - params.VolumeId = info.VolumeId - updatePendingVolumeAttachment(ctx, id, params) - } - } -} - -// updatePendingVolume adds the given volume params to either the incomplete -// set or the schedule. If the params are incomplete due to a missing instance -// ID, updatePendingVolume will request that the machine be watched so its -// instance ID can be learned. -func updatePendingVolume(ctx *context, params storage.VolumeParams) { - if params.Attachment.InstanceId == "" { - watchMachine(ctx, params.Attachment.Machine) - ctx.incompleteVolumeParams[params.Tag] = params - } else { - delete(ctx.incompleteVolumeParams, params.Tag) - scheduleOperations(ctx, &createVolumeOp{args: params}) - } -} - -// removePendingVolume removes the specified pending volume from the -// incomplete set and/or the schedule if it exists there. -func removePendingVolume(ctx *context, tag names.VolumeTag) { - delete(ctx.incompleteVolumeParams, tag) - ctx.schedule.Remove(tag) -} - -// updatePendingVolumeAttachment adds the given volume attachment params to -// either the incomplete set or the schedule. If the params are incomplete -// due to a missing instance ID, updatePendingVolumeAttachment will request -// that the machine be watched so its instance ID can be learned. -func updatePendingVolumeAttachment( - ctx *context, - id params.MachineStorageId, - params storage.VolumeAttachmentParams, -) { - if params.InstanceId == "" { - watchMachine(ctx, params.Machine) - } else if params.VolumeId != "" { - delete(ctx.incompleteVolumeAttachmentParams, id) - scheduleOperations(ctx, &attachVolumeOp{args: params}) - return - } - ctx.incompleteVolumeAttachmentParams[id] = params -} - -// removePendingVolumeAttachment removes the specified pending volume -// attachment from the incomplete set and/or the schedule if it exists -// there. -func removePendingVolumeAttachment(ctx *context, id params.MachineStorageId) { - delete(ctx.incompleteVolumeAttachmentParams, id) - ctx.schedule.Remove(id) -} - -// processDeadVolumes processes the VolumeResults for Dead volumes, -// deprovisioning volumes and removing from state as necessary. -func processDeadVolumes(ctx *context, tags []names.VolumeTag, volumeResults []params.VolumeResult) error { - for _, tag := range tags { - removePendingVolume(ctx, tag) - } - var destroy []names.VolumeTag - var remove []names.Tag - for i, result := range volumeResults { - tag := tags[i] - if result.Error == nil { - logger.Debugf("volume %s is provisioned, queuing for deprovisioning", tag.Id()) - volume, err := volumeFromParams(result.Result) - if err != nil { - return errors.Annotate(err, "getting volume info") - } - updateVolume(ctx, volume) - destroy = append(destroy, tag) - continue - } - if params.IsCodeNotProvisioned(result.Error) { - logger.Debugf("volume %s is not provisioned, queuing for removal", tag.Id()) - remove = append(remove, tag) - continue - } - return errors.Annotatef(result.Error, "getting volume information for volume %s", tag.Id()) - } - if len(destroy) > 0 { - ops := make([]scheduleOp, len(destroy)) - for i, tag := range destroy { - ops[i] = &destroyVolumeOp{tag: tag} - } - scheduleOperations(ctx, ops...) - } - if err := removeEntities(ctx, remove); err != nil { - return errors.Annotate(err, "removing volumes from state") - } - return nil -} - -// processDyingVolumeAttachments processes the VolumeAttachmentResults for -// Dying volume attachments, detaching volumes and updating state as necessary. -func processDyingVolumeAttachments( - ctx *context, - ids []params.MachineStorageId, - volumeAttachmentResults []params.VolumeAttachmentResult, -) error { - for _, id := range ids { - removePendingVolumeAttachment(ctx, id) - } - detach := make([]params.MachineStorageId, 0, len(ids)) - remove := make([]params.MachineStorageId, 0, len(ids)) - for i, result := range volumeAttachmentResults { - id := ids[i] - if result.Error == nil { - detach = append(detach, id) - continue - } - if params.IsCodeNotProvisioned(result.Error) { - remove = append(remove, id) - continue - } - return errors.Annotatef(result.Error, "getting information for volume attachment %v", id) - } - if len(detach) > 0 { - attachmentParams, err := volumeAttachmentParams(ctx, detach) - if err != nil { - return errors.Trace(err) - } - ops := make([]scheduleOp, len(attachmentParams)) - for i, p := range attachmentParams { - ops[i] = &detachVolumeOp{args: p} - } - scheduleOperations(ctx, ops...) - } - if err := removeAttachments(ctx, remove); err != nil { - return errors.Annotate(err, "removing attachments from state") - } - for _, id := range remove { - delete(ctx.volumeAttachments, id) - } - return nil -} - -// processAliveVolumes processes the VolumeResults for Alive volumes, -// provisioning volumes and setting the info in state as necessary. -func processAliveVolumes(ctx *context, tags []names.Tag, volumeResults []params.VolumeResult) error { - // Filter out the already-provisioned volumes. - pending := make([]names.VolumeTag, 0, len(tags)) - for i, result := range volumeResults { - volumeTag := tags[i].(names.VolumeTag) - if result.Error == nil { - // Volume is already provisioned: skip. - logger.Debugf("volume %q is already provisioned, nothing to do", tags[i].Id()) - volume, err := volumeFromParams(result.Result) - if err != nil { - return errors.Annotate(err, "getting volume info") - } - updateVolume(ctx, volume) - removePendingVolume(ctx, volumeTag) - continue - } - if !params.IsCodeNotProvisioned(result.Error) { - return errors.Annotatef( - result.Error, "getting volume information for volume %q", tags[i].Id(), - ) - } - // The volume has not yet been provisioned, so record its tag - // to enquire about parameters below. - pending = append(pending, volumeTag) - } - if len(pending) == 0 { - return nil - } - volumeParams, err := volumeParams(ctx, pending) - if err != nil { - return errors.Annotate(err, "getting volume params") - } - for _, params := range volumeParams { - updatePendingVolume(ctx, params) - } - return nil -} - -// processAliveVolumeAttachments processes the VolumeAttachmentResults -// for Alive volume attachments, attaching volumes and setting the info -// in state as necessary. -func processAliveVolumeAttachments( - ctx *context, - ids []params.MachineStorageId, - volumeAttachmentResults []params.VolumeAttachmentResult, -) error { - // Filter out the already-attached. - pending := make([]params.MachineStorageId, 0, len(ids)) - for i, result := range volumeAttachmentResults { - if result.Error == nil { - // Volume attachment is already provisioned: if we - // didn't (re)attach in this session, then we must - // do so now. - action := "nothing to do" - if _, ok := ctx.volumeAttachments[ids[i]]; !ok { - // Not yet (re)attached in this session. - pending = append(pending, ids[i]) - action = "will reattach" - } - logger.Debugf( - "%s is already attached to %s, %s", - ids[i].AttachmentTag, ids[i].MachineTag, action, - ) - removePendingVolumeAttachment(ctx, ids[i]) - continue - } - if !params.IsCodeNotProvisioned(result.Error) { - return errors.Annotatef( - result.Error, "getting information for attachment %v", ids[i], - ) - } - // The volume has not yet been provisioned, so record its tag - // to enquire about parameters below. - pending = append(pending, ids[i]) - } - if len(pending) == 0 { - return nil - } - params, err := volumeAttachmentParams(ctx, pending) - if err != nil { - return errors.Trace(err) - } - for i, params := range params { - if volume, ok := ctx.volumes[params.Volume]; ok { - params.VolumeId = volume.VolumeId - } - updatePendingVolumeAttachment(ctx, pending[i], params) - } - return nil -} - -// volumeAttachmentParams obtains the specified attachments' parameters. -func volumeAttachmentParams( - ctx *context, ids []params.MachineStorageId, -) ([]storage.VolumeAttachmentParams, error) { - paramsResults, err := ctx.volumeAccessor.VolumeAttachmentParams(ids) - if err != nil { - return nil, errors.Annotate(err, "getting volume attachment params") - } - attachmentParams := make([]storage.VolumeAttachmentParams, len(ids)) - for i, result := range paramsResults { - if result.Error != nil { - return nil, errors.Annotate(result.Error, "getting volume attachment parameters") - } - params, err := volumeAttachmentParamsFromParams(result.Result) - if err != nil { - return nil, errors.Annotate(err, "getting volume attachment parameters") - } - attachmentParams[i] = params - } - return attachmentParams, nil -} - -// volumeParams obtains the specified volumes' parameters. -func volumeParams(ctx *context, tags []names.VolumeTag) ([]storage.VolumeParams, error) { - paramsResults, err := ctx.volumeAccessor.VolumeParams(tags) - if err != nil { - return nil, errors.Annotate(err, "getting volume params") - } - allParams := make([]storage.VolumeParams, len(tags)) - for i, result := range paramsResults { - if result.Error != nil { - return nil, errors.Annotate(result.Error, "getting volume parameters") - } - params, err := volumeParamsFromParams(result.Result) - if err != nil { - return nil, errors.Annotate(err, "getting volume parameters") - } - allParams[i] = params - } - return allParams, nil -} - -func volumesFromStorage(in []storage.Volume) []params.Volume { - out := make([]params.Volume, len(in)) - for i, v := range in { - out[i] = params.Volume{ - v.Tag.String(), - params.VolumeInfo{ - v.VolumeId, - v.HardwareId, - v.Size, - v.Persistent, - }, - } - } - return out -} - -func volumeAttachmentsFromStorage(in []storage.VolumeAttachment) []params.VolumeAttachment { - out := make([]params.VolumeAttachment, len(in)) - for i, v := range in { - out[i] = params.VolumeAttachment{ - v.Volume.String(), - v.Machine.String(), - params.VolumeAttachmentInfo{ - v.DeviceName, - v.DeviceLink, - v.BusAddress, - v.ReadOnly, - }, - } - } - return out -} - -func volumeFromParams(in params.Volume) (storage.Volume, error) { - volumeTag, err := names.ParseVolumeTag(in.VolumeTag) - if err != nil { - return storage.Volume{}, errors.Trace(err) - } - return storage.Volume{ - volumeTag, - storage.VolumeInfo{ - in.Info.VolumeId, - in.Info.HardwareId, - in.Info.Size, - in.Info.Persistent, - }, - }, nil -} - -func volumeParamsFromParams(in params.VolumeParams) (storage.VolumeParams, error) { - volumeTag, err := names.ParseVolumeTag(in.VolumeTag) - if err != nil { - return storage.VolumeParams{}, errors.Trace(err) - } - providerType := storage.ProviderType(in.Provider) - - var attachment *storage.VolumeAttachmentParams - if in.Attachment != nil { - if in.Attachment.Provider != in.Provider { - return storage.VolumeParams{}, errors.Errorf( - "storage provider mismatch: volume (%q), attachment (%q)", - in.Provider, in.Attachment.Provider, - ) - } - if in.Attachment.VolumeTag != in.VolumeTag { - return storage.VolumeParams{}, errors.Errorf( - "volume tag mismatch: volume (%q), attachment (%q)", - in.VolumeTag, in.Attachment.VolumeTag, - ) - } - machineTag, err := names.ParseMachineTag(in.Attachment.MachineTag) - if err != nil { - return storage.VolumeParams{}, errors.Annotate( - err, "parsing attachment machine tag", - ) - } - attachment = &storage.VolumeAttachmentParams{ - AttachmentParams: storage.AttachmentParams{ - Provider: providerType, - Machine: machineTag, - InstanceId: instance.Id(in.Attachment.InstanceId), - ReadOnly: in.Attachment.ReadOnly, - }, - Volume: volumeTag, - } - } - return storage.VolumeParams{ - volumeTag, - in.Size, - providerType, - in.Attributes, - in.Tags, - attachment, - }, nil -} - -func volumeAttachmentParamsFromParams(in params.VolumeAttachmentParams) (storage.VolumeAttachmentParams, error) { - machineTag, err := names.ParseMachineTag(in.MachineTag) - if err != nil { - return storage.VolumeAttachmentParams{}, errors.Trace(err) - } - volumeTag, err := names.ParseVolumeTag(in.VolumeTag) - if err != nil { - return storage.VolumeAttachmentParams{}, errors.Trace(err) - } - return storage.VolumeAttachmentParams{ - AttachmentParams: storage.AttachmentParams{ - Provider: storage.ProviderType(in.Provider), - Machine: machineTag, - InstanceId: instance.Id(in.InstanceId), - ReadOnly: in.ReadOnly, - }, - Volume: volumeTag, - VolumeId: in.VolumeId, - }, nil -} === removed file 'src/github.com/juju/juju/worker/stringsworker.go' --- src/github.com/juju/juju/worker/stringsworker.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/stringsworker.go 1970-01-01 00:00:00 +0000 @@ -1,88 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package worker - -import ( - "launchpad.net/tomb" - - apiWatcher "github.com/juju/juju/api/watcher" - "github.com/juju/juju/state/watcher" -) - -// stringsWorker is the internal implementation of the Worker -// interface, using a StringsWatcher for handling changes. -type stringsWorker struct { - tomb tomb.Tomb - - // handler is what will be called when events are triggered. - handler StringsWatchHandler -} - -// StringsWatchHandler implements the business logic triggered as part -// of watching a StringsWatcher. -type StringsWatchHandler interface { - // SetUp starts the handler, this should create the watcher we - // will be waiting on for more events. SetUp can return a Watcher - // even if there is an error, and strings Worker will make sure to - // stop the watcher. - SetUp() (apiWatcher.StringsWatcher, error) - - // TearDown should cleanup any resources that are left around - TearDown() error - - // Handle is called when the Watcher has indicated there are - // changes, do whatever work is necessary to process it - Handle(changes []string) error -} - -// NewStringsWorker starts a new worker running the business logic -// from the handler. The worker loop is started in another goroutine -// as a side effect of calling this. -func NewStringsWorker(handler StringsWatchHandler) Worker { - sw := &stringsWorker{ - handler: handler, - } - go func() { - defer sw.tomb.Done() - sw.tomb.Kill(sw.loop()) - }() - return sw -} - -// Kill the loop with no-error -func (sw *stringsWorker) Kill() { - sw.tomb.Kill(nil) -} - -// Wait for the looping to finish -func (sw *stringsWorker) Wait() error { - return sw.tomb.Wait() -} - -func (sw *stringsWorker) loop() error { - w, err := sw.handler.SetUp() - if err != nil { - if w != nil { - // We don't bother to propagate an error, because we - // already have an error - w.Stop() - } - return err - } - defer propagateTearDown(sw.handler, &sw.tomb) - defer watcher.Stop(w, &sw.tomb) - for { - select { - case <-sw.tomb.Dying(): - return tomb.ErrDying - case changes, ok := <-w.Changes(): - if !ok { - return ensureErr(w) - } - if err := sw.handler.Handle(changes); err != nil { - return err - } - } - } -} === removed file 'src/github.com/juju/juju/worker/stringsworker_test.go' --- src/github.com/juju/juju/worker/stringsworker_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/stringsworker_test.go 1970-01-01 00:00:00 +0000 @@ -1,319 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package worker_test - -import ( - "fmt" - "sync" - "time" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "launchpad.net/tomb" - - apiWatcher "github.com/juju/juju/api/watcher" - "github.com/juju/juju/state/watcher" - coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/worker" -) - -type stringsWorkerSuite struct { - coretesting.BaseSuite - worker worker.Worker - actor *stringsHandler -} - -var _ = gc.Suite(&stringsWorkerSuite{}) - -func newStringsHandlerWorker(c *gc.C, setupError, handlerError, teardownError error) (*stringsHandler, worker.Worker) { - sh := &stringsHandler{ - actions: nil, - handled: make(chan []string, 1), - setupError: setupError, - teardownError: teardownError, - handlerError: handlerError, - watcher: &testStringsWatcher{ - changes: make(chan []string), - }, - setupDone: make(chan struct{}), - } - w := worker.NewStringsWorker(sh) - select { - case <-sh.setupDone: - case <-time.After(coretesting.ShortWait): - c.Error("Failed waiting for stringsHandler.Setup to be called during SetUpTest") - } - return sh, w -} - -func (s *stringsWorkerSuite) SetUpTest(c *gc.C) { - s.BaseSuite.SetUpTest(c) - s.actor, s.worker = newStringsHandlerWorker(c, nil, nil, nil) -} - -func (s *stringsWorkerSuite) TearDownTest(c *gc.C) { - s.stopWorker(c) - s.BaseSuite.TearDownTest(c) -} - -type stringsHandler struct { - actions []string - mu sync.Mutex - // Signal handled when we get a handle() call - handled chan []string - setupError error - teardownError error - handlerError error - watcher *testStringsWatcher - setupDone chan struct{} -} - -var _ worker.StringsWatchHandler = (*stringsHandler)(nil) - -func (sh *stringsHandler) SetUp() (apiWatcher.StringsWatcher, error) { - defer func() { sh.setupDone <- struct{}{} }() - sh.mu.Lock() - defer sh.mu.Unlock() - sh.actions = append(sh.actions, "setup") - if sh.watcher == nil { - return nil, sh.setupError - } - return sh.watcher, sh.setupError -} - -func (sh *stringsHandler) TearDown() error { - sh.mu.Lock() - defer sh.mu.Unlock() - sh.actions = append(sh.actions, "teardown") - if sh.handled != nil { - close(sh.handled) - } - return sh.teardownError -} - -func (sh *stringsHandler) Handle(changes []string) error { - sh.mu.Lock() - defer sh.mu.Unlock() - sh.actions = append(sh.actions, "handler") - if sh.handled != nil { - // Unlock while we are waiting for the send - sh.mu.Unlock() - sh.handled <- changes - sh.mu.Lock() - } - return sh.handlerError -} - -func (sh *stringsHandler) CheckActions(c *gc.C, actions ...string) { - sh.mu.Lock() - defer sh.mu.Unlock() - c.Check(sh.actions, gc.DeepEquals, actions) -} - -// During teardown we try to stop the worker, but don't hang the test suite if -// Stop never returns -func (s *stringsWorkerSuite) stopWorker(c *gc.C) { - if s.worker == nil { - return - } - done := make(chan error) - go func() { - done <- worker.Stop(s.worker) - }() - err := waitForTimeout(c, done, coretesting.LongWait) - c.Check(err, jc.ErrorIsNil) - s.actor = nil - s.worker = nil -} - -type testStringsWatcher struct { - mu sync.Mutex - changes chan []string - stopped bool - stopError error -} - -var _ apiWatcher.StringsWatcher = (*testStringsWatcher)(nil) - -func (tsw *testStringsWatcher) Changes() <-chan []string { - return tsw.changes -} - -func (tsw *testStringsWatcher) Err() error { - return tsw.stopError -} - -func (tsw *testStringsWatcher) Stop() error { - tsw.mu.Lock() - defer tsw.mu.Unlock() - if !tsw.stopped { - close(tsw.changes) - } - tsw.stopped = true - return tsw.stopError -} - -func (tsw *testStringsWatcher) SetStopError(err error) { - tsw.mu.Lock() - tsw.stopError = err - tsw.mu.Unlock() -} - -func (tsw *testStringsWatcher) TriggerChange(c *gc.C, changes []string) { - select { - case tsw.changes <- changes: - case <-time.After(coretesting.LongWait): - c.Errorf("timed out trying to trigger a change") - } -} - -func waitForHandledStrings(c *gc.C, handled chan []string, expect []string) { - select { - case changes := <-handled: - c.Assert(changes, gc.DeepEquals, expect) - case <-time.After(coretesting.LongWait): - c.Errorf("handled failed to signal after %s", coretesting.LongWait) - } -} - -func (s *stringsWorkerSuite) TestKill(c *gc.C) { - s.worker.Kill() - err := waitShort(c, s.worker) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *stringsWorkerSuite) TestStop(c *gc.C) { - err := worker.Stop(s.worker) - c.Assert(err, jc.ErrorIsNil) - // After stop, Wait should return right away - err = waitShort(c, s.worker) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *stringsWorkerSuite) TestWait(c *gc.C) { - done := make(chan error) - go func() { - done <- s.worker.Wait() - }() - // Wait should not return until we've killed the worker - select { - case err := <-done: - c.Errorf("Wait() didn't wait until we stopped it: %v", err) - case <-time.After(coretesting.ShortWait): - } - s.worker.Kill() - err := waitForTimeout(c, done, coretesting.LongWait) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *stringsWorkerSuite) TestCallSetUpAndTearDown(c *gc.C) { - // After calling NewStringsWorker, we should have called setup - s.actor.CheckActions(c, "setup") - // If we kill the worker, it should notice, and call teardown - s.worker.Kill() - err := waitShort(c, s.worker) - c.Check(err, jc.ErrorIsNil) - s.actor.CheckActions(c, "setup", "teardown") - c.Check(s.actor.watcher.stopped, jc.IsTrue) -} - -func (s *stringsWorkerSuite) TestChangesTriggerHandler(c *gc.C) { - s.actor.CheckActions(c, "setup") - s.actor.watcher.TriggerChange(c, []string{"aa", "bb"}) - waitForHandledStrings(c, s.actor.handled, []string{"aa", "bb"}) - s.actor.CheckActions(c, "setup", "handler") - s.actor.watcher.TriggerChange(c, []string{"cc", "dd"}) - waitForHandledStrings(c, s.actor.handled, []string{"cc", "dd"}) - s.actor.watcher.TriggerChange(c, []string{"ee", "ff"}) - waitForHandledStrings(c, s.actor.handled, []string{"ee", "ff"}) - s.actor.CheckActions(c, "setup", "handler", "handler", "handler") - c.Assert(worker.Stop(s.worker), gc.IsNil) - s.actor.CheckActions(c, "setup", "handler", "handler", "handler", "teardown") -} - -func (s *stringsWorkerSuite) TestSetUpFailureStopsWithTearDown(c *gc.C) { - // Stop the worker and SetUp again, this time with an error - s.stopWorker(c) - actor, w := newStringsHandlerWorker(c, fmt.Errorf("my special error"), nil, nil) - err := waitShort(c, w) - c.Check(err, gc.ErrorMatches, "my special error") - // TearDown is not called on SetUp error. - actor.CheckActions(c, "setup") - c.Check(actor.watcher.stopped, jc.IsTrue) -} - -func (s *stringsWorkerSuite) TestWatcherStopFailurePropagates(c *gc.C) { - s.actor.watcher.SetStopError(fmt.Errorf("error while stopping watcher")) - s.worker.Kill() - c.Assert(s.worker.Wait(), gc.ErrorMatches, "error while stopping watcher") - // We've already stopped the worker, don't let teardown notice the - // worker is in an error state - s.worker = nil -} - -func (s *stringsWorkerSuite) TestCleanRunNoticesTearDownError(c *gc.C) { - s.actor.teardownError = fmt.Errorf("failed to tear down watcher") - s.worker.Kill() - c.Assert(s.worker.Wait(), gc.ErrorMatches, "failed to tear down watcher") - s.worker = nil -} - -func (s *stringsWorkerSuite) TestHandleErrorStopsWorkerAndWatcher(c *gc.C) { - s.stopWorker(c) - actor, w := newStringsHandlerWorker(c, nil, fmt.Errorf("my handling error"), nil) - actor.watcher.TriggerChange(c, []string{"aa", "bb"}) - waitForHandledStrings(c, actor.handled, []string{"aa", "bb"}) - err := waitShort(c, w) - c.Check(err, gc.ErrorMatches, "my handling error") - actor.CheckActions(c, "setup", "handler", "teardown") - c.Check(actor.watcher.stopped, jc.IsTrue) -} - -func (s *stringsWorkerSuite) TestNoticesStoppedWatcher(c *gc.C) { - // The default closedHandler doesn't panic if you have a genuine error - // (because it assumes you want to propagate a real error and then - // restart - s.actor.watcher.SetStopError(fmt.Errorf("Stopped Watcher")) - s.actor.watcher.Stop() - err := waitShort(c, s.worker) - c.Check(err, gc.ErrorMatches, "Stopped Watcher") - s.actor.CheckActions(c, "setup", "teardown") - // Worker is stopped, don't fail TearDownTest - s.worker = nil -} - -func (s *stringsWorkerSuite) TestErrorsOnStillAliveButClosedChannel(c *gc.C) { - foundErr := fmt.Errorf("did not get an error") - triggeredHandler := func(errer watcher.Errer) error { - foundErr = errer.Err() - return foundErr - } - worker.SetEnsureErr(triggeredHandler) - s.actor.watcher.SetStopError(tomb.ErrStillAlive) - s.actor.watcher.Stop() - err := waitShort(c, s.worker) - c.Check(foundErr, gc.Equals, tomb.ErrStillAlive) - // ErrStillAlive is trapped by the Stop logic and gets turned into a - // 'nil' when stopping. However TestDefaultClosedHandler can assert - // that it would have triggered an error. - c.Check(err, jc.ErrorIsNil) - s.actor.CheckActions(c, "setup", "teardown") - // Worker is stopped, don't fail TearDownTest - s.worker = nil -} - -func (s *stringsWorkerSuite) TestErrorsOnClosedChannel(c *gc.C) { - foundErr := fmt.Errorf("did not get an error") - triggeredHandler := func(errer watcher.Errer) error { - foundErr = errer.Err() - return foundErr - } - worker.SetEnsureErr(triggeredHandler) - s.actor.watcher.Stop() - err := waitShort(c, s.worker) - // If the foundErr is nil, we would have panic-ed (see TestDefaultClosedHandler) - c.Check(foundErr, gc.IsNil) - c.Check(err, jc.ErrorIsNil) - s.actor.CheckActions(c, "setup", "teardown") -} === added file 'src/github.com/juju/juju/worker/terminationworker/manifold.go' --- src/github.com/juju/juju/worker/terminationworker/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/terminationworker/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package terminationworker + +import ( + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" +) + +// Manifold returns a manifold whose worker returns ErrTerminateAgent +// if a termination signal is received by the process it's running in. +func Manifold() dependency.Manifold { + return dependency.Manifold{ + Start: func(dependency.GetResourceFunc) (worker.Worker, error) { + return NewWorker(), nil + }, + } +} === modified file 'src/github.com/juju/juju/worker/terminationworker/worker_test.go' --- src/github.com/juju/juju/worker/terminationworker/worker_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/terminationworker/worker_test.go 2016-03-22 15:18:22 +0000 @@ -37,8 +37,8 @@ } func (s *TerminationWorkerSuite) TearDownTest(c *gc.C) { + signal.Stop(s.c) close(s.c) - signal.Stop(s.c) s.BaseSuite.TearDownTest(c) } === added file 'src/github.com/juju/juju/worker/testing/postupgrade.go' --- src/github.com/juju/juju/worker/testing/postupgrade.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/testing/postupgrade.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,56 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package testing + +import ( + "github.com/juju/juju/agent" + "github.com/juju/juju/api/base" + basetesting "github.com/juju/juju/api/base/testing" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + dt "github.com/juju/juju/worker/dependency/testing" + "github.com/juju/juju/worker/util" +) + +// PostUpgradeManifoldTestConfig returns a PostUpgradeManifoldConfig +// suitable for use with RunPostUpgradeManifold. +func PostUpgradeManifoldTestConfig() util.PostUpgradeManifoldConfig { + return util.PostUpgradeManifoldConfig{ + AgentName: "agent-name", + APICallerName: "api-caller-name", + UpgradeWaiterName: "upgradewaiter-name", + } +} + +// RunPostUpgradeManifold is useful for testing manifolds based on +// PostUpgradeManifold. It takes the manifold, sets up the resources +// required to successfully pass PostUpgradeManifold's checks and then +// runs the manifold start func. +// +// An agent and apiCaller may be optionally provided. If they are nil, +// dummy barely-good-enough default will be used (these dummies are +// fine not actually used for much). +func RunPostUpgradeManifold( + manifold dependency.Manifold, agent agent.Agent, apiCaller base.APICaller, +) (worker.Worker, error) { + if agent == nil { + agent = new(dummyAgent) + } + if apiCaller == nil { + apiCaller = basetesting.APICallerFunc( + func(string, int, string, string, interface{}, interface{}) error { + return nil + }) + } + getResource := dt.StubGetResource(dt.StubResources{ + "upgradewaiter-name": dt.StubResource{Output: true}, + "agent-name": dt.StubResource{Output: agent}, + "api-caller-name": dt.StubResource{Output: apiCaller}, + }) + return manifold.Start(getResource) +} + +type dummyAgent struct { + agent.Agent +} === modified file 'src/github.com/juju/juju/worker/txnpruner/txnpruner.go' --- src/github.com/juju/juju/worker/txnpruner/txnpruner.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/txnpruner/txnpruner.go 2016-03-22 15:18:22 +0000 @@ -7,6 +7,7 @@ "time" "github.com/juju/errors" + "github.com/juju/juju/worker" ) === added directory 'src/github.com/juju/juju/worker/undertaker' === added file 'src/github.com/juju/juju/worker/undertaker/export_test.go' --- src/github.com/juju/juju/worker/undertaker/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/undertaker/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package undertaker + +const ( + RIPTime = ripTime +) === added file 'src/github.com/juju/juju/worker/undertaker/mock_test.go' --- src/github.com/juju/juju/worker/undertaker/mock_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/undertaker/mock_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,94 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package undertaker_test + +import ( + "sync" + "time" + + "github.com/juju/errors" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/state" + "github.com/juju/juju/watcher" +) + +type clientModel struct { + Life state.Life + TimeOfDeath *time.Time + UUID string + IsSystem bool + HasMachinesAndServices bool + Removed bool +} + +type mockClient struct { + calls chan string + lock sync.RWMutex + mockModel clientModel + watcher watcher.NotifyWatcher + cfg *config.Config +} + +func (m *mockClient) mockCall(call string) { + m.calls <- call +} + +func (m *mockClient) ProcessDyingModel() error { + defer m.mockCall("ProcessDyingModel") + if m.mockModel.HasMachinesAndServices { + return errors.Errorf("found documents for model with uuid %s: 1 cleanups doc, 1 constraints doc, 1 leases doc, 1 modelusers doc, 1 settings doc", m.mockModel.UUID) + } + m.mockModel.Life = state.Dead + t := time.Now() + m.mockModel.TimeOfDeath = &t + + return nil +} + +func (m *mockClient) RemoveModel() error { + defer m.mockCall("RemoveModel") + m.mockModel.Removed = true + return nil +} + +func (m *mockClient) ModelInfo() (params.UndertakerModelInfoResult, error) { + defer m.mockCall("ModelInfo") + result := params.UndertakerModelInfo{ + Life: params.Life(m.mockModel.Life.String()), + UUID: m.mockModel.UUID, + Name: "dummy", + GlobalName: "bob/dummy", + IsSystem: m.mockModel.IsSystem, + TimeOfDeath: m.mockModel.TimeOfDeath, + } + return params.UndertakerModelInfoResult{Result: result}, nil +} + +func (m *mockClient) ModelConfig() (*config.Config, error) { + return m.cfg, nil +} + +func (m *mockClient) WatchModelResources() (watcher.NotifyWatcher, error) { + return m.watcher, nil +} + +type mockModelResourceWatcher struct { + events chan struct{} + closeOnce sync.Once + err error +} + +func (w *mockModelResourceWatcher) Changes() watcher.NotifyChannel { + return w.events +} + +func (w *mockModelResourceWatcher) Kill() { + w.closeOnce.Do(func() { close(w.events) }) +} + +func (w *mockModelResourceWatcher) Wait() error { + return w.err +} === added file 'src/github.com/juju/juju/worker/undertaker/package_test.go' --- src/github.com/juju/juju/worker/undertaker/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/undertaker/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package undertaker_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/worker/undertaker/undertaker.go' --- src/github.com/juju/juju/worker/undertaker/undertaker.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/undertaker/undertaker.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,167 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package undertaker + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/loggo" + uc "github.com/juju/utils/clock" + + apiundertaker "github.com/juju/juju/api/undertaker" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/environs" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/catacomb" +) + +var logger = loggo.GetLogger("juju.worker.undertaker") + +// ripTime is the time to wait after an model has been set to +// dead, before removing all model docs. +const ripTime = 24 * time.Hour + +// NewUndertaker returns a worker which processes a dying model. +func NewUndertaker(client apiundertaker.UndertakerClient, clock uc.Clock) (worker.Worker, error) { + u := &undertaker{ + client: client, + clock: clock, + } + err := catacomb.Invoke(catacomb.Plan{ + Site: &u.catacomb, + Work: u.run, + }) + if err != nil { + return nil, errors.Trace(err) + } + return u, nil +} + +type undertaker struct { + catacomb catacomb.Catacomb + client apiundertaker.UndertakerClient + clock uc.Clock +} + +func (u *undertaker) run() error { + result, err := u.client.ModelInfo() + if err != nil { + return errors.Trace(err) + } + if result.Error != nil { + return errors.Trace(result.Error) + } + modelInfo := result.Result + + if modelInfo.Life == params.Alive { + return errors.Errorf("undertaker worker should not be started for an alive model: %q", modelInfo.GlobalName) + } + + if modelInfo.Life == params.Dying { + // Process the dying model. This blocks until the model + // is dead. + u.processDyingModel() + } + + // If model is not alive or dying, it must be dead. + + if modelInfo.IsSystem { + // Nothing to do. We don't remove model docs for a controller + // model. + return nil + } + + err = u.destroyProviderModel() + if err != nil { + return errors.Trace(err) + } + + tod := u.clock.Now() + if modelInfo.TimeOfDeath != nil { + // If TimeOfDeath is not nil, the model was already dead + // before the worker was started. So we use the recorded time of + // death. This may happen if the system is rebooted after an + // model is set to dead, but before the model docs are + // removed. + tod = *modelInfo.TimeOfDeath + } + + // Process the dead model + return u.processDeadModel(tod) +} + +// Kill is part of the worker.Worker interface. +func (u *undertaker) Kill() { + u.catacomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (u *undertaker) Wait() error { + return u.catacomb.Wait() +} + +func (u *undertaker) processDyingModel() error { + // ProcessDyingModel will fail quite a few times before it succeeds as + // it is being woken up as every machine or service changes. We ignore the + // error here and rely on the logging inside the ProcessDyingModel. + if err := u.client.ProcessDyingModel(); err == nil { + return nil + } + + watcher, err := u.client.WatchModelResources() + if err != nil { + return errors.Trace(err) + } + defer watcher.Kill() // The watcher is not needed once this func returns. + if err := u.catacomb.Add(watcher); err != nil { + return errors.Trace(err) + } + + for { + select { + case <-u.catacomb.Dying(): + return u.catacomb.ErrDying() + case _, ok := <-watcher.Changes(): + if !ok { + return errors.New("model resources watcher failed") + } + err := u.client.ProcessDyingModel() + if err == nil { + // ProcessDyingModel succeeded. We're done. + return nil + } + // Yes, we ignore the error. See comment above. + } + } +} + +func (u *undertaker) destroyProviderModel() error { + cfg, err := u.client.ModelConfig() + if err != nil { + return errors.Trace(err) + } + env, err := environs.New(cfg) + if err != nil { + return errors.Trace(err) + } + err = env.Destroy() + return errors.Trace(err) +} + +func (u *undertaker) processDeadModel(timeOfDeath time.Time) error { + timeDead := u.clock.Now().Sub(timeOfDeath) + wait := ripTime - timeDead + if wait < 0 { + wait = 0 + } + + select { + case <-u.catacomb.Dying(): + return u.catacomb.ErrDying() + case <-u.clock.After(wait): + err := u.client.RemoveModel() + return errors.Annotate(err, "could not remove all docs for dead model") + } +} === added file 'src/github.com/juju/juju/worker/undertaker/undertaker_test.go' --- src/github.com/juju/juju/worker/undertaker/undertaker_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/undertaker/undertaker_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,281 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package undertaker_test + +import ( + "sync" + "sync/atomic" + "time" + + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/environs" + "github.com/juju/juju/environs/config" + "github.com/juju/juju/environs/configstore" + "github.com/juju/juju/jujuclient/jujuclienttesting" + "github.com/juju/juju/provider/dummy" + "github.com/juju/juju/state" + "github.com/juju/juju/testing" + "github.com/juju/juju/worker/undertaker" +) + +type undertakerSuite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&undertakerSuite{}) + +type clock struct { + // advanceDurationAfterNow is the duration to advance the clock after the + // next call to Now(). + advanceDurationAfterNow int64 + + *testing.Clock +} + +func (c *clock) Now() time.Time { + now := c.Clock.Now() + d := atomic.LoadInt64(&c.advanceDurationAfterNow) + if d != 0 { + c.Clock.Advance(time.Duration(d)) + atomic.StoreInt64(&c.advanceDurationAfterNow, 0) + } + + return now +} + +func (c *clock) advanceAfterNextNow(d time.Duration) { + atomic.StoreInt64(&c.advanceDurationAfterNow, int64(d)) +} + +func (s *undertakerSuite) TestAPICalls(c *gc.C) { + cfg, uuid := dummyCfgAndUUID(c) + client := &mockClient{ + calls: make(chan string), + mockModel: clientModel{ + Life: state.Dying, + UUID: uuid, + HasMachinesAndServices: true, + }, + cfg: cfg, + watcher: &mockModelResourceWatcher{ + events: make(chan struct{}), + }, + } + + startTime := time.Date(2015, time.September, 1, 17, 2, 1, 0, time.UTC) + mClock := &clock{ + Clock: testing.NewClock(startTime), + } + + wg := sync.WaitGroup{} + wg.Add(1) + + go func() { + defer wg.Done() + for _, test := range []struct { + call string + callback func() + }{{ + call: "ModelInfo", + }, { + call: "ProcessDyingModel", + callback: func() { + c.Check(client.mockModel.Life, gc.Equals, state.Dying) + c.Check(client.mockModel.TimeOfDeath, gc.IsNil) + client.mockModel.HasMachinesAndServices = false + client.watcher.(*mockModelResourceWatcher).events <- struct{}{} + mClock.advanceAfterNextNow(undertaker.RIPTime) + }}, { + call: "ProcessDyingModel", + callback: func() { + c.Check(client.mockModel.Life, gc.Equals, state.Dead) + c.Check(client.mockModel.TimeOfDeath, gc.NotNil) + }}, { + call: "RemoveModel", + callback: func() { + oneDayLater := startTime.Add(undertaker.RIPTime) + c.Check(mClock.Now().Equal(oneDayLater), jc.IsTrue) + c.Check(client.mockModel.Removed, gc.Equals, true) + }}, + } { + select { + case call := <-client.calls: + c.Check(call, gc.Equals, test.call) + if test.callback != nil { + test.callback() + } + case <-time.After(testing.LongWait): + c.Fatalf("timed out waiting for API call: %q", test.call) + } + } + }() + + worker, err := undertaker.NewUndertaker(client, mClock) + c.Assert(err, jc.ErrorIsNil) + defer worker.Kill() + + wg.Wait() + + assertNoMoreCalls(c, client) +} + +func (s *undertakerSuite) TestRemoveModelDocsNotCalledForController(c *gc.C) { + mockWatcher := &mockModelResourceWatcher{ + events: make(chan struct{}, 1), + } + uuid, err := utils.NewUUID() + c.Assert(err, jc.ErrorIsNil) + client := &mockClient{ + calls: make(chan string, 1), + mockModel: clientModel{ + Life: state.Dying, + UUID: uuid.String(), + IsSystem: true, + }, + watcher: mockWatcher, + } + startTime := time.Date(2015, time.September, 1, 17, 2, 1, 0, time.UTC) + mClock := &clock{ + Clock: testing.NewClock(startTime), + } + + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + + for _, test := range []struct { + call string + callback func() + }{{ + call: "ModelInfo", + callback: func() { + mockWatcher.events <- struct{}{} + }, + }, { + call: "ProcessDyingModel", + callback: func() { + c.Assert(client.mockModel.Life, gc.Equals, state.Dead) + c.Assert(client.mockModel.TimeOfDeath, gc.NotNil) + + mClock.advanceAfterNextNow(undertaker.RIPTime) + }, + }, + } { + select { + case call := <-client.calls: + c.Assert(call, gc.Equals, test.call) + if test.callback != nil { + test.callback() + } + case <-time.After(testing.LongWait): + c.Fatalf("timed out waiting for API call: %q", test.call) + } + } + }() + + worker, err := undertaker.NewUndertaker(client, mClock) + c.Assert(err, jc.ErrorIsNil) + defer worker.Kill() + + wg.Wait() + + assertNoMoreCalls(c, client) +} + +func (s *undertakerSuite) TestRemoveModelOnRebootCalled(c *gc.C) { + startTime := time.Date(2015, time.September, 1, 17, 2, 1, 0, time.UTC) + mClock := testing.NewClock(startTime) + halfDayEarlier := mClock.Now().Add(-12 * time.Hour) + + cfg, uuid := dummyCfgAndUUID(c) + client := &mockClient{ + calls: make(chan string, 1), + // Mimic the situation where the worker is started after the + // model has been set to dead 12hrs ago. + mockModel: clientModel{ + Life: state.Dead, + UUID: uuid, + TimeOfDeath: &halfDayEarlier, + }, + cfg: cfg, + } + + wg := sync.WaitGroup{} + wg.Add(1) + + // We expect RemoveModel not to be called, as we have to wait another + // 12hrs. + go func() { + defer wg.Done() + for _, test := range []struct { + call string + callback func() + }{{ + call: "ModelInfo", + callback: func() { + // As model was set to dead 12hrs earlier, assert that the + // undertaker picks up where it left off and RemoveModel + // is called 12hrs later. + mClock.Advance(12 * time.Hour) + }, + }, { + call: "RemoveModel", + callback: func() { + c.Assert(client.mockModel.Removed, gc.Equals, true) + }}, + } { + select { + case call := <-client.calls: + c.Assert(call, gc.Equals, test.call) + if test.callback != nil { + test.callback() + } + case <-time.After(testing.LongWait): + c.Fatalf("timed out waiting for API call: %q", test.call) + } + } + }() + + worker, err := undertaker.NewUndertaker(client, mClock) + c.Assert(err, jc.ErrorIsNil) + defer worker.Kill() + + wg.Wait() + + assertNoMoreCalls(c, client) +} + +func assertNoMoreCalls(c *gc.C, client *mockClient) { + select { + case call := <-client.calls: + c.Fatalf("unexpected API call: %q", call) + case <-time.After(testing.ShortWait): + } +} + +func dummyCfgAndUUID(c *gc.C) (*config.Config, string) { + cfg := testingEnvConfig(c) + uuid, ok := cfg.UUID() + c.Assert(ok, jc.IsTrue) + return cfg, uuid +} + +// testingEnvConfig prepares an environment configuration using +// the dummy provider. +func testingEnvConfig(c *gc.C) *config.Config { + cfg, err := config.New(config.NoDefaults, dummy.SampleConfig()) + c.Assert(err, jc.ErrorIsNil) + env, err := environs.Prepare( + modelcmd.BootstrapContext(testing.Context(c)), configstore.NewMem(), + jujuclienttesting.NewMemStore(), + "dummycontroller", environs.PrepareForBootstrapParams{Config: cfg}, + ) + c.Assert(err, jc.ErrorIsNil) + return env.Config() +} === added directory 'src/github.com/juju/juju/worker/unitassigner' === added file 'src/github.com/juju/juju/worker/unitassigner/package_test.go' --- src/github.com/juju/juju/worker/unitassigner/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/unitassigner/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package unitassigner + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/worker/unitassigner/unitassigner.go' --- src/github.com/juju/juju/worker/unitassigner/unitassigner.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/unitassigner/unitassigner.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,92 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package unitassigner + +import ( + "github.com/juju/errors" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" + "github.com/juju/juju/worker" + "github.com/juju/loggo" + "github.com/juju/names" +) + +var logger = loggo.GetLogger("juju.worker.unitassigner") + +type UnitAssigner interface { + AssignUnits(tags []names.UnitTag) ([]error, error) + WatchUnitAssignments() (watcher.StringsWatcher, error) + SetAgentStatus(args params.SetStatus) error +} + +func New(ua UnitAssigner) (worker.Worker, error) { + return watcher.NewStringsWorker(watcher.StringsConfig{ + Handler: unitAssignerHandler{api: ua}, + }) +} + +type unitAssignerHandler struct { + api UnitAssigner +} + +func (u unitAssignerHandler) SetUp() (watcher.StringsWatcher, error) { + return u.api.WatchUnitAssignments() +} + +func (u unitAssignerHandler) Handle(_ <-chan struct{}, ids []string) error { + logger.Tracef("Handling unit assignments: %q", ids) + if len(ids) == 0 { + return nil + } + + units := make([]names.UnitTag, len(ids)) + for i, id := range ids { + if !names.IsValidUnit(id) { + return errors.Errorf("%q is not a valid unit id", id) + } + units[i] = names.NewUnitTag(id) + } + + results, err := u.api.AssignUnits(units) + if err != nil { + return err + } + + failures := map[string]error{} + + logger.Tracef("Unit assignment results: %q", results) + // errors are returned in the same order as the ids given. Any errors from + // the assign units call must be reported as error statuses on the + // respective units (though the assignments will be retried). Not found + // errors indicate that the unit was removed before the assignment was + // requested, which can be safely ignored. + for i, err := range results { + if err != nil && !errors.IsNotFound(err) { + failures[units[i].String()] = err + } + } + + if len(failures) > 0 { + args := params.SetStatus{ + Entities: make([]params.EntityStatusArgs, len(failures)), + } + + x := 0 + for unit, err := range failures { + args.Entities[x] = params.EntityStatusArgs{ + Tag: unit, + Status: params.StatusError, + Info: err.Error(), + } + x++ + } + + return u.api.SetAgentStatus(args) + } + return nil +} + +func (unitAssignerHandler) TearDown() error { + return nil +} === added file 'src/github.com/juju/juju/worker/unitassigner/unitassigner_test.go' --- src/github.com/juju/juju/worker/unitassigner/unitassigner_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/unitassigner/unitassigner_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,91 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package unitassigner + +import ( + "errors" + + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" +) + +var _ = gc.Suite(testsuite{}) + +type testsuite struct{} + +func (testsuite) TestSetup(c *gc.C) { + f := &fakeAPI{} + ua := unitAssignerHandler{api: f} + _, err := ua.SetUp() + c.Assert(err, jc.ErrorIsNil) + c.Assert(f.calledWatch, jc.IsTrue) + + f.err = errors.New("boo") + _, err = ua.SetUp() + c.Assert(err, gc.Equals, f.err) +} + +func (testsuite) TestHandle(c *gc.C) { + f := &fakeAPI{} + ua := unitAssignerHandler{api: f} + ids := []string{"foo/0", "bar/0"} + err := ua.Handle(nil, ids) + c.Assert(err, jc.ErrorIsNil) + c.Assert(f.assignTags, gc.DeepEquals, []names.UnitTag{ + names.NewUnitTag("foo/0"), + names.NewUnitTag("bar/0"), + }) + + f.err = errors.New("boo") + err = ua.Handle(nil, ids) + c.Assert(err, gc.Equals, f.err) +} + +func (testsuite) TestHandleError(c *gc.C) { + e := errors.New("some error") + f := &fakeAPI{assignErrs: []error{e}} + ua := unitAssignerHandler{api: f} + ids := []string{"foo/0", "bar/0"} + err := ua.Handle(nil, ids) + c.Assert(err, jc.ErrorIsNil) + c.Assert(f.assignTags, gc.DeepEquals, []names.UnitTag{ + names.NewUnitTag("foo/0"), + names.NewUnitTag("bar/0"), + }) + c.Assert(f.status.Entities, gc.NotNil) + entities := f.status.Entities + c.Assert(entities, gc.HasLen, 1) + c.Assert(entities[0], gc.DeepEquals, params.EntityStatusArgs{ + Tag: "unit-foo-0", + Status: params.StatusError, + Info: e.Error(), + }) +} + +type fakeAPI struct { + calledWatch bool + assignTags []names.UnitTag + err error + status params.SetStatus + assignErrs []error +} + +func (f *fakeAPI) AssignUnits(tags []names.UnitTag) ([]error, error) { + f.assignTags = tags + return f.assignErrs, f.err +} + +func (f *fakeAPI) WatchUnitAssignments() (watcher.StringsWatcher, error) { + f.calledWatch = true + return nil, f.err +} + +func (f *fakeAPI) SetAgentStatus(args params.SetStatus) error { + f.status = args + return f.err +} === added directory 'src/github.com/juju/juju/worker/uniter/actions' === added file 'src/github.com/juju/juju/worker/uniter/actions/package_test.go' --- src/github.com/juju/juju/worker/uniter/actions/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/actions/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package actions_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/worker/uniter/actions/resolver.go' --- src/github.com/juju/juju/worker/uniter/actions/resolver.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/actions/resolver.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,68 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package actions + +import ( + "github.com/juju/loggo" + + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/remotestate" + "github.com/juju/juju/worker/uniter/resolver" +) + +var logger = loggo.GetLogger("juju.worker.uniter.actions") + +type actionsResolver struct{} + +// NewResolver returns a new resolver with determines which action related operation +// should be run based on local and remote uniter states. +// +// TODO(axw) 2015-10-27 #1510333 +// Use the same method as in the runcommands resolver +// for updating the remote state snapshot when an +// action is completed. +func NewResolver() resolver.Resolver { + return &actionsResolver{} +} + +func nextAction(pendingActions []string, completedActions map[string]struct{}) (string, error) { + for _, action := range pendingActions { + if _, ok := completedActions[action]; !ok { + return action, nil + } + } + return "", resolver.ErrNoOperation +} + +// NextOp implements the resolver.Resolver interface. +func (r *actionsResolver) NextOp( + localState resolver.LocalState, + remoteState remotestate.Snapshot, + opFactory operation.Factory, +) (operation.Operation, error) { + nextAction, err := nextAction(remoteState.Actions, localState.CompletedActions) + if err != nil { + return nil, err + } + switch localState.Kind { + case operation.RunHook: + // We can still run actions if the unit is in a hook error state. + if localState.Step == operation.Pending { + return opFactory.NewAction(nextAction) + } + case operation.RunAction: + // TODO(fwereade): we *should* handle interrupted actions, and make sure + // they're marked as failed, but that's not for now. + if localState.Hook != nil { + logger.Infof("found incomplete action %q; ignoring", localState.ActionId) + logger.Infof("recommitting prior %q hook", localState.Hook.Kind) + return opFactory.NewSkipHook(*localState.Hook) + } else { + logger.Infof("%q hook is nil", operation.RunAction) + } + case operation.Continue: + return opFactory.NewAction(nextAction) + } + return nil, resolver.ErrNoOperation +} === added file 'src/github.com/juju/juju/worker/uniter/actions/resolver_test.go' --- src/github.com/juju/juju/worker/uniter/actions/resolver_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/actions/resolver_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,94 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package actions_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/worker/uniter/actions" + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/remotestate" + "github.com/juju/juju/worker/uniter/resolver" +) + +type actionsSuite struct{} + +var _ = gc.Suite(&actionsSuite{}) + +func (s *actionsSuite) TestNoActions(c *gc.C) { + actionResolver := actions.NewResolver() + localState := resolver.LocalState{} + remoteState := remotestate.Snapshot{} + _, err := actionResolver.NextOp(localState, remoteState, &mockOperations{}) + c.Assert(err, gc.DeepEquals, resolver.ErrNoOperation) +} + +func (s *actionsSuite) TestActionStateKindContinue(c *gc.C) { + actionResolver := actions.NewResolver() + localState := resolver.LocalState{ + State: operation.State{ + Kind: operation.Continue, + }, + } + remoteState := remotestate.Snapshot{ + Actions: []string{"actionA", "actionB"}, + } + op, err := actionResolver.NextOp(localState, remoteState, &mockOperations{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(op, jc.DeepEquals, mockOp("actionA")) +} + +func (s *actionsSuite) TestActionRunHook(c *gc.C) { + actionResolver := actions.NewResolver() + localState := resolver.LocalState{ + State: operation.State{ + Kind: operation.RunHook, + Step: operation.Pending, + }, + } + remoteState := remotestate.Snapshot{ + Actions: []string{"actionA", "actionB"}, + } + op, err := actionResolver.NextOp(localState, remoteState, &mockOperations{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(op, jc.DeepEquals, mockOp("actionA")) +} + +func (s *actionsSuite) TestNextAction(c *gc.C) { + actionResolver := actions.NewResolver() + localState := resolver.LocalState{ + State: operation.State{ + Kind: operation.Continue, + }, + CompletedActions: map[string]struct{}{"actionA": struct{}{}}, + } + remoteState := remotestate.Snapshot{ + Actions: []string{"actionA", "actionB"}, + } + op, err := actionResolver.NextOp(localState, remoteState, &mockOperations{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(op, jc.DeepEquals, mockOp("actionB")) +} + +type mockOperations struct { + operation.Factory +} + +func (m *mockOperations) NewAction(id string) (operation.Operation, error) { + return mockOp(id), nil +} + +func mockOp(name string) operation.Operation { + return &mockOperation{name: name} +} + +type mockOperation struct { + operation.Operation + name string +} + +func (op *mockOperation) String() string { + return op.name +} === added file 'src/github.com/juju/juju/worker/uniter/agent.go' --- src/github.com/juju/juju/worker/uniter/agent.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/agent.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,34 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package uniter + +import ( + "github.com/juju/juju/apiserver/params" +) + +// setAgentStatus sets the unit's status if it has changed since last time this method was called. +func setAgentStatus(u *Uniter, status params.Status, info string, data map[string]interface{}) error { + u.setStatusMutex.Lock() + defer u.setStatusMutex.Unlock() + if u.lastReportedStatus == status && u.lastReportedMessage == info { + return nil + } + u.lastReportedStatus = status + u.lastReportedMessage = info + logger.Debugf("[AGENT-STATUS] %s: %s", status, info) + return u.unit.SetAgentStatus(status, info, data) +} + +// reportAgentError reports if there was an error performing an agent operation. +func reportAgentError(u *Uniter, userMessage string, err error) { + // If a non-nil error is reported (e.g. due to an operation failing), + // set the agent status to Failed. + if err == nil { + return + } + err2 := setAgentStatus(u, params.StatusFailed, userMessage, nil) + if err2 != nil { + logger.Errorf("updating agent status: %v", err2) + } +} === modified file 'src/github.com/juju/juju/worker/uniter/charm/bundles.go' --- src/github.com/juju/juju/worker/uniter/charm/bundles.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/charm/bundles.go 2016-03-22 15:18:22 +0000 @@ -10,7 +10,7 @@ "github.com/juju/errors" "github.com/juju/utils" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/downloader" ) === modified file 'src/github.com/juju/juju/worker/uniter/charm/bundles_test.go' --- src/github.com/juju/juju/worker/uniter/charm/bundles_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/charm/bundles_test.go 2016-03-22 15:18:22 +0000 @@ -18,7 +18,7 @@ jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" - corecharm "gopkg.in/juju/charm.v5" + corecharm "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api" "github.com/juju/juju/api/uniter" === modified file 'src/github.com/juju/juju/worker/uniter/charm/charm.go' --- src/github.com/juju/juju/worker/uniter/charm/charm.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/charm/charm.go 2016-03-22 15:18:22 +0000 @@ -10,14 +10,14 @@ "github.com/juju/loggo" "github.com/juju/utils" "github.com/juju/utils/set" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" ) var logger = loggo.GetLogger("juju.worker.uniter.charm") -// charmURLPath is the path within a charm directory to which Deployers +// CharmURLPath is the path within a charm directory to which Deployers // commonly write the charm URL of the latest deployed charm. -const charmURLPath = ".juju-charm" +const CharmURLPath = ".juju-charm" // Bundle allows access to a charm's files. type Bundle interface { === modified file 'src/github.com/juju/juju/worker/uniter/charm/charm_test.go' --- src/github.com/juju/juju/worker/uniter/charm/charm_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/charm/charm_test.go 2016-03-22 15:18:22 +0000 @@ -11,7 +11,7 @@ jc "github.com/juju/testing/checkers" "github.com/juju/utils/set" gc "gopkg.in/check.v1" - corecharm "gopkg.in/juju/charm.v5" + corecharm "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/testcharms" "github.com/juju/juju/worker/uniter/charm" === modified file 'src/github.com/juju/juju/worker/uniter/charm/converter.go' --- src/github.com/juju/juju/worker/uniter/charm/converter.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/charm/converter.go 2016-03-22 15:18:22 +0000 @@ -10,7 +10,7 @@ "github.com/juju/utils/set" "github.com/juju/utils/symlink" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" ) // NewDeployer returns a Deployer of whatever kind is currently in use for the @@ -58,7 +58,7 @@ // Ensure that the staged charm matches the deployed charm: it's possible // that the uniter was stopped after staging, but before deploying, a new // bundle. - deployedURL, err := ReadCharmURL(manifestDeployer.CharmPath(charmURLPath)) + deployedURL, err := ReadCharmURL(manifestDeployer.CharmPath(CharmURLPath)) if err != nil && !os.IsNotExist(err) { return err } @@ -138,7 +138,7 @@ return err } switch relPath { - case ".", charmURLPath: + case ".", CharmURLPath: return nil case ".git": err = filepath.SkipDir === modified file 'src/github.com/juju/juju/worker/uniter/charm/git.go' --- src/github.com/juju/juju/worker/uniter/charm/git.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/charm/git.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,7 @@ "path/filepath" "strings" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" ) // GitDir exposes a specialized subset of git operations on a directory. @@ -231,11 +231,11 @@ // ReadCharmURL reads the charm identity file from the GitDir. func (d *GitDir) ReadCharmURL() (*charm.URL, error) { - path := filepath.Join(d.path, charmURLPath) + path := filepath.Join(d.path, CharmURLPath) return ReadCharmURL(path) } // WriteCharmURL writes the charm identity file into the GitDir. func (d *GitDir) WriteCharmURL(url *charm.URL) error { - return WriteCharmURL(filepath.Join(d.path, charmURLPath), url) + return WriteCharmURL(filepath.Join(d.path, CharmURLPath), url) } === modified file 'src/github.com/juju/juju/worker/uniter/charm/git_deployer_test.go' --- src/github.com/juju/juju/worker/uniter/charm/git_deployer_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/charm/git_deployer_test.go 2016-03-22 15:18:22 +0000 @@ -10,7 +10,7 @@ jc "github.com/juju/testing/checkers" "github.com/juju/utils/symlink" gc "gopkg.in/check.v1" - corecharm "gopkg.in/juju/charm.v5" + corecharm "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/charm" === modified file 'src/github.com/juju/juju/worker/uniter/charm/git_test.go' --- src/github.com/juju/juju/worker/uniter/charm/git_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/charm/git_test.go 2016-03-22 15:18:22 +0000 @@ -13,7 +13,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - corecharm "gopkg.in/juju/charm.v5" + corecharm "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/charm" === modified file 'src/github.com/juju/juju/worker/uniter/charm/manifest_deployer.go' --- src/github.com/juju/juju/worker/uniter/charm/manifest_deployer.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/charm/manifest_deployer.go 2016-03-22 15:18:22 +0000 @@ -10,7 +10,7 @@ "github.com/juju/utils" "github.com/juju/utils/set" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" ) const ( @@ -76,7 +76,7 @@ } // Detect and resolve state of charm directory. - baseURL, baseManifest, err := d.loadManifest(charmURLPath) + baseURL, baseManifest, err := d.loadManifest(CharmURLPath) if err != nil && !os.IsNotExist(err) { return err } @@ -146,7 +146,7 @@ func (d *manifestDeployer) finishDeploy() error { logger.Debugf("finishing deploy of charm %q", d.staged.url) oldPath := d.CharmPath(deployingURLPath) - newPath := d.CharmPath(charmURLPath) + newPath := d.CharmPath(CharmURLPath) return utils.ReplaceFile(oldPath, newPath) } === removed file 'src/github.com/juju/juju/worker/uniter/export_test.go' --- src/github.com/juju/juju/worker/uniter/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,65 +0,0 @@ -// Copyright 2013, 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package uniter - -import ( - "fmt" - "time" - - "github.com/juju/juju/testing" -) - -func SetUniterObserver(u *Uniter, observer UniterExecutionObserver) { - u.observer = observer -} - -var ( - ActiveCollectMetricsTimer = &activeCollectMetricsTimer - ActiveSendMetricsTimer = &activeSendMetricsTimer - IdleWaitTime = &idleWaitTime - LeadershipGuarantee = &leadershipGuarantee -) - -// manualTicker will be used to generate collect-metrics events -// in a time-independent manner for testing. -type ManualTicker struct { - c chan time.Time -} - -// Tick sends a signal on the ticker channel. -func (t *ManualTicker) Tick() error { - select { - case t.c <- time.Now(): - case <-time.After(testing.LongWait): - return fmt.Errorf("ticker channel blocked") - } - return nil -} - -// ReturnTimer can be used to replace the metrics signal generator. -func (t *ManualTicker) ReturnTimer(now, lastRun time.Time, interval time.Duration) <-chan time.Time { - return t.c -} - -func NewManualTicker() *ManualTicker { - return &ManualTicker{ - c: make(chan time.Time), - } -} - -func NewTestingMetricsTimerChooser(collector TimedSignal, sender TimedSignal) *timerChooser { - return &timerChooser{ - collector: collector, - sender: sender, - inactive: inactiveMetricsTimer, - } -} - -func UpdateStatusSignal(now, lastSignal time.Time, interval time.Duration) <-chan time.Time { - return updateStatusSignal(now, lastSignal, interval) -} - -func ActiveCollectMetricsSignal(now, lastSignal time.Time, interval time.Duration) <-chan time.Time { - return activeCollectMetricsTimer(now, lastSignal, interval) -} === removed directory 'src/github.com/juju/juju/worker/uniter/filter' === removed file 'src/github.com/juju/juju/worker/uniter/filter/export_test.go' --- src/github.com/juju/juju/worker/uniter/filter/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/filter/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,10 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package filter - -func DummyFilter() Filter { - // This should, obviously, not be used except for type tests that don't - // try to do anything with it (eg TestOutput*). - return &filter{} -} === removed file 'src/github.com/juju/juju/worker/uniter/filter/filter.go' --- src/github.com/juju/juju/worker/uniter/filter/filter.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/filter/filter.go 1970-01-01 00:00:00 +0000 @@ -1,789 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package filter - -import ( - "github.com/juju/errors" - "github.com/juju/loggo" - "github.com/juju/names" - "github.com/juju/utils/set" - "gopkg.in/juju/charm.v5" - "launchpad.net/tomb" - - "github.com/juju/juju/api/uniter" - apiwatcher "github.com/juju/juju/api/watcher" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/state/watcher" - "github.com/juju/juju/worker" -) - -var filterLogger = loggo.GetLogger("juju.worker.uniter.filter") - -// filter collects unit, service, and service config information from separate -// state watchers, and presents it as events on channels designed specifically -// for the convenience of the uniter. -type filter struct { - st *uniter.State - tomb tomb.Tomb - - // outUnitDying is closed when the unit's life becomes Dying. - outUnitDying chan struct{} - - // The out*On chans are used to deliver events to clients. - // The out* chans, when set to the corresponding out*On chan (rather than - // nil) indicate that an event of the appropriate type is ready to send - // to the client. - outConfig chan struct{} - outConfigOn chan struct{} - outAction chan string - outActionOn chan string - outLeaderSettings chan struct{} - outLeaderSettingsOn chan struct{} - outUpgrade chan *charm.URL - outUpgradeOn chan *charm.URL - outResolved chan params.ResolvedMode - outResolvedOn chan params.ResolvedMode - outRelations chan []int - outRelationsOn chan []int - outMeterStatus chan struct{} - outMeterStatusOn chan struct{} - outStorage chan []names.StorageTag - outStorageOn chan []names.StorageTag - // The want* chans are used to indicate that the filter should send - // events if it has them available. - wantForcedUpgrade chan bool - wantResolved chan struct{} - wantLeaderSettings chan bool - - // discardConfig is used to indicate that any pending config event - // should be discarded. - discardConfig chan struct{} - - // discardLeaderSettings is used to indicate any pending Leader - // Settings event should be discarded. - discardLeaderSettings chan struct{} - - // setCharm is used to request that the unit's charm URL be set to - // a new value. This must be done in the filter's goroutine, so - // that config watches can be stopped and restarted pointing to - // the new charm URL. If we don't stop the watch before the - // (potentially) last reference to that settings document is - // removed, we'll see spurious errors (and even in the best case, - // we risk getting notifications for the wrong settings version). - setCharm chan *charm.URL - - // didSetCharm is used to report back after setting a charm URL. - didSetCharm chan struct{} - - // clearResolved is used to request that the unit's resolved flag - // be cleared. This must be done on the filter's goroutine so that - // it can immediately trigger the unit change handler, and thus - // ensure that subsquent requests for resolved events -- that land - // before the next watcher update for the unit -- do not erroneously - // send out stale values. - clearResolved chan struct{} - - // didClearResolved is used to report back after clearing the resolved - // flag. - didClearResolved chan struct{} - - // The following fields hold state that is collected while running, - // and used to detect interesting changes to express as events. - unit *uniter.Unit - life params.Life - resolved params.ResolvedMode - service *uniter.Service - upgradeFrom serviceCharm - upgradeAvailable serviceCharm - upgrade *charm.URL - relations []int - storage []names.StorageTag - actionsPending []string - nextAction string - - // meterStatusCode and meterStatusInfo reflect the meter status values of the unit. - meterStatusCode string - meterStatusInfo string -} - -// NewFilter returns a filter that handles state changes pertaining to the -// supplied unit. -func NewFilter(st *uniter.State, unitTag names.UnitTag) (Filter, error) { - f := &filter{ - st: st, - outUnitDying: make(chan struct{}), - outConfigOn: make(chan struct{}), - outActionOn: make(chan string), - outLeaderSettingsOn: make(chan struct{}), - outUpgradeOn: make(chan *charm.URL), - outResolvedOn: make(chan params.ResolvedMode), - outRelationsOn: make(chan []int), - outMeterStatusOn: make(chan struct{}), - outStorageOn: make(chan []names.StorageTag), - wantForcedUpgrade: make(chan bool), - wantResolved: make(chan struct{}), - wantLeaderSettings: make(chan bool), - discardConfig: make(chan struct{}), - discardLeaderSettings: make(chan struct{}), - setCharm: make(chan *charm.URL), - didSetCharm: make(chan struct{}), - clearResolved: make(chan struct{}), - didClearResolved: make(chan struct{}), - } - go func() { - defer f.tomb.Done() - err := f.loop(unitTag) - filterLogger.Errorf("%v", err) - f.tomb.Kill(err) - }() - return f, nil -} - -func (f *filter) Stop() error { - f.tomb.Kill(nil) - return f.tomb.Wait() -} - -func (f *filter) Dead() <-chan struct{} { - return f.tomb.Dead() -} - -func (f *filter) Wait() error { - return f.tomb.Wait() -} - -func (f *filter) Kill() { - f.tomb.Kill(nil) -} - -// UnitDying returns a channel which is closed when the Unit enters a Dying state. -func (f *filter) UnitDying() <-chan struct{} { - return f.outUnitDying -} - -// UpgradeEvents returns a channel that will receive a new charm URL whenever an -// upgrade is indicated. Events should not be read until the baseline state -// has been specified by calling WantUpgradeEvent. -func (f *filter) UpgradeEvents() <-chan *charm.URL { - return f.outUpgradeOn -} - -// ResolvedEvents returns a channel that may receive a ResolvedMode when the -// unit's Resolved value changes, or when an event is explicitly requested. -// A ResolvedNone state will never generate events, but ResolvedRetryHooks and -// ResolvedNoHooks will always be delivered as described. -func (f *filter) ResolvedEvents() <-chan params.ResolvedMode { - return f.outResolvedOn -} - -// MeterStatusEvents returns a channel that will receive a signal when the unit's -// meter status changes. -func (f *filter) MeterStatusEvents() <-chan struct{} { - return f.outMeterStatusOn -} - -// ConfigEvents returns a channel that will receive a signal whenever the service's -// configuration changes, or when an event is explicitly requested. -func (f *filter) ConfigEvents() <-chan struct{} { - return f.outConfigOn -} - -// ActionEvents returns a channel that will receive a signal whenever the unit -// receives new Actions. -func (f *filter) ActionEvents() <-chan string { - return f.outActionOn -} - -// RelationsEvents returns a channel that will receive the ids of all the service's -// relations whose Life status has changed. -func (f *filter) RelationsEvents() <-chan []int { - return f.outRelationsOn -} - -// StorageEvents returns a channel that will receive the tags of all the unit's -// associated storage instances. -func (f *filter) StorageEvents() <-chan []names.StorageTag { - return f.outStorageOn -} - -// WantUpgradeEvent controls whether the filter will generate upgrade -// events for unforced service charm changes. -func (f *filter) WantUpgradeEvent(mustForce bool) { - select { - case <-f.tomb.Dying(): - case f.wantForcedUpgrade <- mustForce: - } -} - -// SetCharm notifies the filter that the unit is running a new -// charm. It causes the unit's charm URL to be set in state, and the -// following changes to the filter's behaviour: -// -// * Upgrade events will only be generated for charms different to -// that supplied; -// * A fresh relations event will be generated containing every relation -// the service is participating in; -// * A fresh configuration event will be generated, and subsequent -// events will only be sent in response to changes in the version -// of the service's settings that is specific to that charm. -// -// SetCharm blocks until the charm URL is set in state, returning any -// error that occurred. -func (f *filter) SetCharm(curl *charm.URL) error { - select { - case <-f.tomb.Dying(): - return tomb.ErrDying - case f.setCharm <- curl: - } - select { - case <-f.tomb.Dying(): - return tomb.ErrDying - case <-f.didSetCharm: - return nil - } -} - -// WantResolvedEvent indicates that the filter should send a resolved event -// if one is available. -func (f *filter) WantResolvedEvent() { - select { - case <-f.tomb.Dying(): - case f.wantResolved <- nothing: - } -} - -// ClearResolved notifies the filter that a resolved event has been handled -// and should not be reported again. -func (f *filter) ClearResolved() error { - select { - case <-f.tomb.Dying(): - return tomb.ErrDying - case f.clearResolved <- nothing: - } - select { - case <-f.tomb.Dying(): - return tomb.ErrDying - case <-f.didClearResolved: - filterLogger.Debugf("resolved clear completed") - return nil - } -} - -// LeaderSettingsEvents returns a channel that will receive an event whenever -// there is a leader settings change. Events can be temporarily suspended by -// calling WantLeaderSettingsEvents(false), and then reenabled by calling -// WantLeaderSettingsEvents(true) -func (f *filter) LeaderSettingsEvents() <-chan struct{} { - return f.outLeaderSettingsOn -} - -// DiscardLeaderSettingsEvent can be called to discard any pending -// LeaderSettingsEvents. This is used by code that saw a LeaderSettings change, -// and has been prepping for a response. Just before they request the current -// LeaderSettings, they can discard any other pending changes, since they know -// they will be handling all changes that have occurred before right now. -func (f *filter) DiscardLeaderSettingsEvent() { - select { - case <-f.tomb.Dying(): - case f.discardLeaderSettings <- nothing: - } -} - -// WantLeaderSettingsEvents can be used to enable/disable events being sent on -// the LeaderSettingsEvents() channel. This is used when an agent notices that -// it is the leader, it wants to disable getting events for changes that it is -// generating. Calling this with sendEvents=false disables getting change -// events. Calling this with sendEvents=true will enable future changes, and -// queues up an immediate event so that the agent will refresh its information -// for any events it might have missed while it thought it was the leader. -func (f *filter) WantLeaderSettingsEvents(sendEvents bool) { - select { - case <-f.tomb.Dying(): - case f.wantLeaderSettings <- sendEvents: - } -} - -// DiscardConfigEvent indicates that the filter should discard any pending -// config event. -func (f *filter) DiscardConfigEvent() { - select { - case <-f.tomb.Dying(): - case f.discardConfig <- nothing: - } -} - -func (f *filter) maybeStopWatcher(w watcher.Stopper) { - if w != nil { - watcher.Stop(w, &f.tomb) - } -} - -func (f *filter) loop(unitTag names.UnitTag) (err error) { - // TODO(dfc) named return value is a time bomb - defer func() { - if params.IsCodeNotFoundOrCodeUnauthorized(err) { - err = worker.ErrTerminateAgent - } - }() - if f.unit, err = f.st.Unit(unitTag); err != nil { - return err - } - if err = f.unitChanged(); err != nil { - return err - } - if err = f.meterStatusChanged(); err != nil { - return err - } - f.service, err = f.unit.Service() - if err != nil { - return err - } - if err = f.serviceChanged(); err != nil { - return err - } - unitw, err := f.unit.Watch() - if err != nil { - return err - } - defer f.maybeStopWatcher(unitw) - servicew, err := f.service.Watch() - if err != nil { - return err - } - defer f.maybeStopWatcher(servicew) - // configw and relationsw can get restarted, so we need to use - // their eventual values in the defer calls. - var configw apiwatcher.NotifyWatcher - var configChanges <-chan struct{} - curl, err := f.unit.CharmURL() - if err == nil { - configw, err = f.unit.WatchConfigSettings() - if err != nil { - return err - } - configChanges = configw.Changes() - f.upgradeFrom.url = curl - } else if err != uniter.ErrNoCharmURLSet { - filterLogger.Errorf("unit charm: %v", err) - return err - } - defer f.maybeStopWatcher(configw) - actionsw, err := f.unit.WatchActionNotifications() - if err != nil { - return err - } - f.actionsPending = make([]string, 0) - defer f.maybeStopWatcher(actionsw) - relationsw, err := f.service.WatchRelations() - if err != nil { - return err - } - defer f.maybeStopWatcher(relationsw) - meterStatusw, err := f.unit.WatchMeterStatus() - if err != nil { - return err - } - defer f.maybeStopWatcher(meterStatusw) - addressesw, err := f.unit.WatchAddresses() - if err != nil { - return err - } - defer watcher.Stop(addressesw, &f.tomb) - storagew, err := f.unit.WatchStorage() - if err != nil { - return err - } - defer watcher.Stop(storagew, &f.tomb) - leaderSettingsw, err := f.st.LeadershipSettings.WatchLeadershipSettings(f.service.Tag().Id()) - if err != nil { - return err - } - defer watcher.Stop(leaderSettingsw, &f.tomb) - - // Ignore external requests for leader settings behaviour until we see the first change. - var discardLeaderSettings <-chan struct{} - var wantLeaderSettings <-chan bool - // By default we send all leaderSettings onwards. - sendLeaderSettings := true - - // Config events cannot be meaningfully discarded until one is available; - // once we receive the initial config and address changes, we unblock - // discard requests by setting this channel to its namesake on f. - var discardConfig chan struct{} - var seenConfigChange bool - var seenAddressChange bool - maybePrepareConfigEvent := func() { - if !seenAddressChange { - filterLogger.Debugf("no address change seen yet, skipping config event") - return - } - if !seenConfigChange { - filterLogger.Debugf("no config change seen yet, skipping config event") - return - } - filterLogger.Debugf("preparing new config event") - f.outConfig = f.outConfigOn - discardConfig = f.discardConfig - } - - for { - var ok bool - select { - case <-f.tomb.Dying(): - return tomb.ErrDying - - // Handle watcher changes. - case _, ok = <-unitw.Changes(): - filterLogger.Debugf("got unit change") - if !ok { - return watcher.EnsureErr(unitw) - } - if err = f.unitChanged(); err != nil { - return err - } - case _, ok = <-servicew.Changes(): - filterLogger.Debugf("got service change") - if !ok { - return watcher.EnsureErr(servicew) - } - if err = f.serviceChanged(); err != nil { - return err - } - case _, ok = <-configChanges: - filterLogger.Debugf("got config change") - if !ok { - return watcher.EnsureErr(configw) - } - seenConfigChange = true - maybePrepareConfigEvent() - case _, ok = <-addressesw.Changes(): - filterLogger.Debugf("got address change") - if !ok { - return watcher.EnsureErr(addressesw) - } - seenAddressChange = true - maybePrepareConfigEvent() - case _, ok = <-meterStatusw.Changes(): - filterLogger.Debugf("got meter status change") - if !ok { - return watcher.EnsureErr(meterStatusw) - } - if err = f.meterStatusChanged(); err != nil { - return errors.Trace(err) - } - case ids, ok := <-actionsw.Changes(): - filterLogger.Debugf("got %d actions", len(ids)) - if !ok { - return watcher.EnsureErr(actionsw) - } - f.actionsPending = append(f.actionsPending, ids...) - f.nextAction = f.getNextAction() - case keys, ok := <-relationsw.Changes(): - filterLogger.Debugf("got relations change") - if !ok { - return watcher.EnsureErr(relationsw) - } - var ids []int - for _, key := range keys { - relationTag := names.NewRelationTag(key) - rel, err := f.st.Relation(relationTag) - if params.IsCodeNotFoundOrCodeUnauthorized(err) { - // If it's actually gone, this unit cannot have entered - // scope, and therefore never needs to know about it. - } else if err != nil { - return err - } else { - ids = append(ids, rel.Id()) - } - } - f.relationsChanged(ids) - case ids, ok := <-storagew.Changes(): - filterLogger.Debugf("got storage change") - if !ok { - return watcher.EnsureErr(storagew) - } - tags := make([]names.StorageTag, len(ids)) - for i, id := range ids { - tag := names.NewStorageTag(id) - tags[i] = tag - } - f.storageChanged(tags) - case _, ok = <-leaderSettingsw.Changes(): - filterLogger.Debugf("got leader settings change: ok=%t", ok) - if !ok { - return watcher.EnsureErr(leaderSettingsw) - } - if sendLeaderSettings { - // only send the leader settings changed event - // if it hasn't been explicitly disabled - f.outLeaderSettings = f.outLeaderSettingsOn - } else { - filterLogger.Debugf("not sending leader settings change (want=false)") - } - discardLeaderSettings = f.discardLeaderSettings - wantLeaderSettings = f.wantLeaderSettings - - // Send events on active out chans. - case f.outUpgrade <- f.upgrade: - filterLogger.Debugf("sent upgrade event") - f.outUpgrade = nil - case f.outResolved <- f.resolved: - filterLogger.Debugf("sent resolved event") - f.outResolved = nil - case f.outConfig <- nothing: - filterLogger.Debugf("sent config event") - f.outConfig = nil - case f.outLeaderSettings <- nothing: - filterLogger.Debugf("sent leader settings event") - f.outLeaderSettings = nil - case f.outAction <- f.nextAction: - f.nextAction = f.getNextAction() - filterLogger.Debugf("sent action event") - case f.outRelations <- f.relations: - filterLogger.Debugf("sent relations event") - f.outRelations = nil - f.relations = nil - case f.outMeterStatus <- nothing: - filterLogger.Debugf("sent meter status change event") - f.outMeterStatus = nil - case f.outStorage <- f.storage: - filterLogger.Debugf("sent storage event") - f.outStorage = nil - f.storage = nil - - // Handle explicit requests. - case curl := <-f.setCharm: - filterLogger.Debugf("changing charm to %q", curl) - // We need to restart the config watcher after setting the - // charm, because service config settings are distinct for - // different service charms. - if configw != nil { - if err := configw.Stop(); err != nil { - return err - } - } - if err := f.unit.SetCharmURL(curl); err != nil { - filterLogger.Debugf("failed setting charm url %q: %v", curl, err) - return err - } - select { - case <-f.tomb.Dying(): - return tomb.ErrDying - case f.didSetCharm <- nothing: - } - configw, err = f.unit.WatchConfigSettings() - if err != nil { - return err - } - configChanges = configw.Changes() - - // Restart the relations watcher. - if err := relationsw.Stop(); err != nil { - return err - } - relationsw, err = f.service.WatchRelations() - if err != nil { - return err - } - - f.upgradeFrom.url = curl - if err = f.upgradeChanged(); err != nil { - return err - } - case force := <-f.wantForcedUpgrade: - filterLogger.Debugf("want forced upgrade %v", force) - f.upgradeFrom.force = force - if err = f.upgradeChanged(); err != nil { - return err - } - case <-f.wantResolved: - filterLogger.Debugf("want resolved event") - if f.resolved != params.ResolvedNone { - f.outResolved = f.outResolvedOn - } - case sendEvents := <-wantLeaderSettings: - filterLogger.Debugf("want leader settings event: %t", sendEvents) - sendLeaderSettings = sendEvents - if sendEvents { - // go ahead and send an event right now, - // they're waiting for us - f.outLeaderSettings = f.outLeaderSettingsOn - } else { - // Make sure we don't have a pending event - f.outLeaderSettings = nil - } - case <-f.clearResolved: - filterLogger.Debugf("resolved event handled") - f.outResolved = nil - if err := f.unit.ClearResolved(); err != nil { - return err - } - if err := f.unitChanged(); err != nil { - return err - } - select { - case <-f.tomb.Dying(): - return tomb.ErrDying - case f.didClearResolved <- nothing: - } - case <-discardConfig: - filterLogger.Debugf("discarded config event") - f.outConfig = nil - case <-discardLeaderSettings: - filterLogger.Debugf("discarded leader settings event") - f.outLeaderSettings = nil - } - } -} - -// meterStatusChanges respondes to changes in the unit's meter status. -func (f *filter) meterStatusChanged() error { - code, info, err := f.unit.MeterStatus() - if err != nil { - return errors.Trace(err) - } - if f.meterStatusCode != code || f.meterStatusInfo != info { - if f.meterStatusCode != "" { - f.outMeterStatus = f.outMeterStatusOn - } - f.meterStatusCode = code - f.meterStatusInfo = info - } - return nil -} - -// unitChanged responds to changes in the unit. -func (f *filter) unitChanged() error { - if err := f.unit.Refresh(); err != nil { - return err - } - if f.life != f.unit.Life() { - switch f.life = f.unit.Life(); f.life { - case params.Dying: - filterLogger.Infof("unit is dying") - close(f.outUnitDying) - f.outUpgrade = nil - case params.Dead: - filterLogger.Infof("unit is dead") - return worker.ErrTerminateAgent - } - } - resolved, err := f.unit.Resolved() - if err != nil { - return err - } - if resolved != f.resolved { - f.resolved = resolved - if f.resolved != params.ResolvedNone { - f.outResolved = f.outResolvedOn - } - } - return nil -} - -// serviceChanged responds to changes in the service. -func (f *filter) serviceChanged() error { - if err := f.service.Refresh(); err != nil { - return err - } - url, force, err := f.service.CharmURL() - if err != nil { - return err - } - f.upgradeAvailable = serviceCharm{url, force} - switch f.service.Life() { - case params.Dying: - if err := f.unit.Destroy(); err != nil { - return err - } - case params.Dead: - filterLogger.Infof("service is dead") - return worker.ErrTerminateAgent - } - return f.upgradeChanged() -} - -// upgradeChanged responds to changes in the service or in the -// upgrade requests that defines which charm changes should be -// delivered as upgrades. -func (f *filter) upgradeChanged() (err error) { - if f.life != params.Alive { - filterLogger.Debugf("charm check skipped, unit is dying") - f.outUpgrade = nil - return nil - } - if f.upgradeFrom.url == nil { - filterLogger.Debugf("charm check skipped, not yet installed.") - f.outUpgrade = nil - return nil - } - if *f.upgradeAvailable.url != *f.upgradeFrom.url { - if f.upgradeAvailable.force || !f.upgradeFrom.force { - filterLogger.Debugf("preparing new upgrade event") - if f.upgrade == nil || *f.upgrade != *f.upgradeAvailable.url { - f.upgrade = f.upgradeAvailable.url - } - f.outUpgrade = f.outUpgradeOn - return nil - } - } - filterLogger.Debugf("no new charm event") - f.outUpgrade = nil - return nil -} - -// relationsChanged responds to service relation changes. -func (f *filter) relationsChanged(changed []int) { - ids := set.NewInts(f.relations...) - for _, id := range changed { - ids.Add(id) - } - if len(f.relations) != len(ids) { - f.relations = ids.SortedValues() - f.outRelations = f.outRelationsOn - } -} - -// storageChanged responds to unit storage changes. -func (f *filter) storageChanged(changed []names.StorageTag) { - tags := set.NewTags() // f.storage is []StorageTag, not []Tag - for _, tag := range f.storage { - tags.Add(tag) - } - for _, tag := range changed { - tags.Add(tag) - } - if len(f.storage) != len(tags) { - storage := make([]names.StorageTag, len(tags)) - for i, tag := range tags.SortedValues() { - storage[i] = tag.(names.StorageTag) - } - f.storage = storage - f.outStorage = f.outStorageOn - } -} - -func (f *filter) getNextAction() string { - if len(f.actionsPending) > 0 { - actionId := f.actionsPending[0] - - f.outAction = f.outActionOn - f.actionsPending = f.actionsPending[1:] - - return actionId - } else { - f.outAction = nil - } - - return "" -} - -// serviceCharm holds information about a charm. -type serviceCharm struct { - url *charm.URL - force bool -} - -// nothing is marginally more pleasant to read than "struct{}{}". -var nothing = struct{}{} === removed file 'src/github.com/juju/juju/worker/uniter/filter/filter_test.go' --- src/github.com/juju/juju/worker/uniter/filter/filter_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/filter/filter_test.go 1970-01-01 00:00:00 +0000 @@ -1,720 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package filter_test - -import ( - "fmt" - "time" - - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "launchpad.net/tomb" - - "github.com/juju/juju/api" - apiuniter "github.com/juju/juju/api/uniter" - "github.com/juju/juju/apiserver/params" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/network" - "github.com/juju/juju/state" - statetesting "github.com/juju/juju/state/testing" - coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/worker" - "github.com/juju/juju/worker/uniter/filter" -) - -type FilterSuite struct { - jujutesting.JujuConnSuite - wordpress *state.Service - unit *state.Unit - mysqlcharm *state.Charm - wpcharm *state.Charm - machine *state.Machine - - st api.Connection - uniter *apiuniter.State -} - -var _ = gc.Suite(&FilterSuite{}) - -func (s *FilterSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - s.wpcharm = s.AddTestingCharm(c, "wordpress") - s.wordpress = s.AddTestingService(c, "wordpress", s.wpcharm) - var err error - s.unit, err = s.wordpress.AddUnit() - c.Assert(err, jc.ErrorIsNil) - err = s.unit.AssignToNewMachine() - c.Assert(err, jc.ErrorIsNil) - mid, err := s.unit.AssignedMachineId() - c.Assert(err, jc.ErrorIsNil) - s.machine, err = s.State.Machine(mid) - c.Assert(err, jc.ErrorIsNil) - err = s.machine.SetProvisioned("i-exist", "fake_nonce", nil) - c.Assert(err, jc.ErrorIsNil) - s.APILogin(c, s.unit) -} - -func (s *FilterSuite) APILogin(c *gc.C, unit *state.Unit) { - password, err := utils.RandomPassword() - c.Assert(err, jc.ErrorIsNil) - err = unit.SetPassword(password) - c.Assert(err, jc.ErrorIsNil) - s.st = s.OpenAPIAs(c, unit.Tag(), password) - s.uniter, err = s.st.Uniter() - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.uniter, gc.NotNil) -} - -// notifyAsserterC creates a coretesting.NotifyAsserterC that will sync the -// state before running our assertions. -func (s *FilterSuite) notifyAsserterC(c *gc.C, ch <-chan struct{}) coretesting.NotifyAsserterC { - return coretesting.NotifyAsserterC{ - Precond: s.BackingState.StartSync, - C: c, - Chan: ch, - } -} - -// contentAsserterC creates a coretesting.ContentAsserterC that will sync the -// state before running our assertions. -func (s *FilterSuite) contentAsserterC(c *gc.C, ch interface{}) coretesting.ContentAsserterC { - return coretesting.ContentAsserterC{ - Precond: s.BackingState.StartSync, - C: c, - Chan: ch, - } -} - -// EvilSync starts a state sync (ensuring that any changes will be delivered to -// the internal watchers "soon") -- and then waits "a while" so that we can be -// reasonably certain that the events have made it through the api server and -// then delivered from the api-level watcher to the filter itself. -// -// It's important to be clear that this *is* evil, and we should be testing -// with a mocked-out watcher we can control directly; the only reason this -// method exists is because we already perpetrated this crime -- but not -// consistently -- and we're concentrating the evil in one place. -func (s *FilterSuite) EvilSync() { - s.BackingState.StartSync() - time.Sleep(250 * time.Millisecond) -} - -func (s *FilterSuite) TestUnitDeath(c *gc.C) { - f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer f.Stop() // no AssertStop, we test for an error below - dyingC := s.notifyAsserterC(c, f.UnitDying()) - dyingC.AssertNoReceive() - - // Irrelevant change. - err = s.unit.SetResolved(state.ResolvedRetryHooks) - c.Assert(err, jc.ErrorIsNil) - dyingC.AssertNoReceive() - - // Set dying. - err = s.unit.SetAgentStatus(state.StatusIdle, "", nil) - c.Assert(err, jc.ErrorIsNil) - err = s.unit.Destroy() - c.Assert(err, jc.ErrorIsNil) - dyingC.AssertClosed() - - // Another irrelevant change. - err = s.unit.ClearResolved() - c.Assert(err, jc.ErrorIsNil) - dyingC.AssertClosed() - - // Set dead. - err = s.unit.EnsureDead() - c.Assert(err, jc.ErrorIsNil) - s.assertAgentTerminates(c, f) -} - -func (s *FilterSuite) TestUnitRemoval(c *gc.C) { - coretesting.SkipIfI386(c, "lp:1425569") - - f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer f.Stop() // no AssertStop, we test for an error below - - // short-circuit to remove because no status set. - err = s.unit.Destroy() - c.Assert(err, jc.ErrorIsNil) - s.assertAgentTerminates(c, f) -} - -// Ensure we get a signal on f.Dead() -func (s *FilterSuite) assertFilterDies(c *gc.C, f filter.Filter) { - deadC := s.notifyAsserterC(c, f.Dead()) - deadC.AssertClosed() -} - -func (s *FilterSuite) assertAgentTerminates(c *gc.C, f filter.Filter) { - s.assertFilterDies(c, f) - c.Assert(f.Wait(), gc.Equals, worker.ErrTerminateAgent) -} - -func (s *FilterSuite) TestServiceDeath(c *gc.C) { - f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - dyingC := s.notifyAsserterC(c, f.UnitDying()) - dyingC.AssertNoReceive() - - err = s.unit.SetAgentStatus(state.StatusIdle, "", nil) - c.Assert(err, jc.ErrorIsNil) - err = s.wordpress.Destroy() - c.Assert(err, jc.ErrorIsNil) - - timeout := time.After(coretesting.LongWait) -loop: - for { - select { - case <-f.UnitDying(): - break loop - case <-time.After(coretesting.ShortWait): - s.BackingState.StartSync() - case <-timeout: - c.Fatalf("dead not detected") - } - } - err = s.unit.Refresh() - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.unit.Life(), gc.Equals, state.Dying) - - // Can't set s.wordpress to Dead while it still has units. -} - -func (s *FilterSuite) TestResolvedEvents(c *gc.C) { - f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - - resolvedC := s.contentAsserterC(c, f.ResolvedEvents()) - resolvedC.AssertNoReceive() - - // Request an event; no interesting event is available. - f.WantResolvedEvent() - resolvedC.AssertNoReceive() - - // Change the unit in an irrelevant way; no events. - err = s.unit.SetAgentStatus(state.StatusError, "blarg", nil) - c.Assert(err, jc.ErrorIsNil) - resolvedC.AssertNoReceive() - - // Change the unit's resolved to an interesting value; new event received. - err = s.unit.SetResolved(state.ResolvedRetryHooks) - c.Assert(err, jc.ErrorIsNil) - resolvedC.AssertOneValue(params.ResolvedRetryHooks) - - // Ask for the event again, and check it's resent. - f.WantResolvedEvent() - resolvedC.AssertOneValue(params.ResolvedRetryHooks) - - // Clear the resolved status *via the filter*; check not resent... - err = f.ClearResolved() - c.Assert(err, jc.ErrorIsNil) - resolvedC.AssertNoReceive() - - // ...even when requested. - f.WantResolvedEvent() - resolvedC.AssertNoReceive() - - // Induce several events; only latest state is reported. - err = s.unit.SetResolved(state.ResolvedRetryHooks) - c.Assert(err, jc.ErrorIsNil) - err = f.ClearResolved() - c.Assert(err, jc.ErrorIsNil) - err = s.unit.SetResolved(state.ResolvedNoHooks) - c.Assert(err, jc.ErrorIsNil) - resolvedC.AssertOneValue(params.ResolvedNoHooks) -} - -func (s *FilterSuite) TestCharmUpgradeEvents(c *gc.C) { - oldCharm := s.AddTestingCharm(c, "upgrade1") - svc := s.AddTestingService(c, "upgradetest", oldCharm) - unit, err := svc.AddUnit() - c.Assert(err, jc.ErrorIsNil) - err = unit.AssignToNewMachine() - c.Assert(err, jc.ErrorIsNil) - - s.APILogin(c, unit) - - f, err := filter.NewFilter(s.uniter, unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - - // No initial event is sent. - upgradeC := s.contentAsserterC(c, f.UpgradeEvents()) - upgradeC.AssertNoReceive() - - // Setting a charm generates no new events if it already matches. - err = f.SetCharm(oldCharm.URL()) - c.Assert(err, jc.ErrorIsNil) - upgradeC.AssertNoReceive() - - // Explicitly request an event relative to the existing state; nothing. - f.WantUpgradeEvent(false) - upgradeC.AssertNoReceive() - - // Change the service in an irrelevant way; no events. - err = svc.SetExposed() - c.Assert(err, jc.ErrorIsNil) - upgradeC.AssertNoReceive() - - // Change the service's charm; new event received. - newCharm := s.AddTestingCharm(c, "upgrade2") - err = svc.SetCharm(newCharm, false) - c.Assert(err, jc.ErrorIsNil) - upgradeC.AssertOneValue(newCharm.URL()) - - // Request a new upgrade *unforced* upgrade event, we should see one. - f.WantUpgradeEvent(false) - upgradeC.AssertOneValue(newCharm.URL()) - - // Request only *forced* upgrade events; nothing. - f.WantUpgradeEvent(true) - upgradeC.AssertNoReceive() - - // But when we have a forced upgrade to the same URL, no new event. - err = svc.SetCharm(oldCharm, true) - c.Assert(err, jc.ErrorIsNil) - upgradeC.AssertNoReceive() - - // ...but a *forced* change to a different URL should generate an event. - err = svc.SetCharm(newCharm, true) - c.Assert(err, jc.ErrorIsNil) - upgradeC.AssertOneValue(newCharm.URL()) -} - -func (s *FilterSuite) TestConfigEvents(c *gc.C) { - f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - - err = s.machine.SetProviderAddresses(network.NewAddress("0.1.2.3")) - c.Assert(err, jc.ErrorIsNil) - - // Test no changes before the charm URL is set. - configC := s.notifyAsserterC(c, f.ConfigEvents()) - configC.AssertNoReceive() - - // Set the charm URL to trigger config events. - err = f.SetCharm(s.wpcharm.URL()) - c.Assert(err, jc.ErrorIsNil) - s.EvilSync() - configC.AssertOneReceive() - - // Change the config; new event received. - changeConfig := func(title interface{}) { - err := s.wordpress.UpdateConfigSettings(charm.Settings{ - "blog-title": title, - }) - c.Assert(err, jc.ErrorIsNil) - } - changeConfig("20,000 leagues in the cloud") - configC.AssertOneReceive() - - // Change the config a few more times, then reset the events. We sync to - // make sure the events have arrived in the watcher -- and then wait a - // little longer, to allow for the delay while the events are coalesced - // -- before we tell it to discard all received events. This would be - // much better tested by controlling a mocked-out watcher directly, but - // that's a bit inconvenient for this change. - changeConfig(nil) - changeConfig("the curious incident of the dog in the cloud") - s.EvilSync() - f.DiscardConfigEvent() - configC.AssertNoReceive() - - // Change the addresses of the unit's assigned machine; new event received. - err = s.machine.SetProviderAddresses(network.NewAddress("0.1.2.4")) - c.Assert(err, jc.ErrorIsNil) - s.BackingState.StartSync() - configC.AssertOneReceive() - - // Check that a filter's initial event works with DiscardConfigEvent - // as expected. - f, err = filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - s.BackingState.StartSync() - f.DiscardConfigEvent() - configC.AssertNoReceive() - - // Further changes are still collapsed as appropriate. - changeConfig("forsooth") - changeConfig("imagination failure") - configC.AssertOneReceive() -} - -func (s *FilterSuite) TestInitialAddressEventIgnored(c *gc.C) { - f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - - err = s.machine.SetProviderAddresses(network.NewAddress("0.1.2.3")) - c.Assert(err, jc.ErrorIsNil) - - // We should not get any config-change events until - // setting the charm URL. - configC := s.notifyAsserterC(c, f.ConfigEvents()) - configC.AssertNoReceive() - - // Set the charm URL to trigger config events. - err = f.SetCharm(s.wpcharm.URL()) - c.Assert(err, jc.ErrorIsNil) - - // We should get one config-change event only. - s.EvilSync() - configC.AssertOneReceive() -} - -func (s *FilterSuite) TestConfigAndAddressEvents(c *gc.C) { - f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - - // Set the charm URL to trigger config events. - err = f.SetCharm(s.wpcharm.URL()) - c.Assert(err, jc.ErrorIsNil) - - // Changing the machine addresses should also result in - // a config-change event. - err = s.machine.SetProviderAddresses( - network.NewAddress("0.1.2.3"), - ) - c.Assert(err, jc.ErrorIsNil) - - configC := s.notifyAsserterC(c, f.ConfigEvents()) - - // Config and address events should be coalesced. Start - // the synchronisation and sleep a bit to give the filter - // a chance to pick them both up. - s.EvilSync() - configC.AssertOneReceive() -} - -func (s *FilterSuite) TestConfigAndAddressEventsDiscarded(c *gc.C) { - f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - - // There should be no pending changes yet - configC := s.notifyAsserterC(c, f.ConfigEvents()) - configC.AssertNoReceive() - - // Change the machine addresses. - err = s.machine.SetProviderAddresses(network.NewAddress("0.1.2.3")) - c.Assert(err, jc.ErrorIsNil) - - // Set the charm URL to trigger config events. - err = f.SetCharm(s.wpcharm.URL()) - c.Assert(err, jc.ErrorIsNil) - - // We should not receive any config-change events. - s.EvilSync() - f.DiscardConfigEvent() - configC.AssertNoReceive() -} - -func getAssertActionChange(actionC coretesting.ContentAsserterC) func(ids []string) { - // This calls AssertReceive N times for N ids, but allows the - // ids to come back in any order. - return func(ids []string) { - expected := make(map[string]int) - seen := make(map[string]int) - for _, id := range ids { - expected[id] += 1 - actionId := actionC.AssertReceive().(string) - seen[actionId] += 1 - } - actionC.C.Assert(seen, jc.DeepEquals, expected) - - // Ensure that there are no other items remaining - actionC.AssertNoReceive() - } -} - -func getAddAction(s *FilterSuite, c *gc.C) func(name string) string { - return func(name string) string { - newAction, err := s.State.EnqueueAction(s.unit.Tag(), name, nil) - // newAction, err := s.unit.AddAction(name, nil) - c.Assert(err, jc.ErrorIsNil) - newId := newAction.Id() - return newId - } -} - -func (s *FilterSuite) TestActionEvents(c *gc.C) { - f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - - actionC := s.contentAsserterC(c, f.ActionEvents()) - addAction := getAddAction(s, c) - assertChange := getAssertActionChange(actionC) - - // Test no changes before Actions are added for the Unit. - actionC.AssertNoReceive() - - // Add a new action; event occurs - testId := addAction("fakeaction") - assertChange([]string{testId}) - - // Make sure bundled events arrive properly. - testIds := make([]string, 5) - for i := 0; i < 5; i++ { - testIds[i] = addAction("fakeaction") - } - assertChange(testIds) -} - -func (s *FilterSuite) TestPreexistingActions(c *gc.C) { - addAction := getAddAction(s, c) - - // Add an Action before the Filter has been created and see if - // it arrives properly. - - testId := addAction("snapshot") - - // Now create the Filter and see whether the Action comes in as expected. - f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - - actionC := s.contentAsserterC(c, f.ActionEvents()) - assertChange := getAssertActionChange(actionC) - assertChange([]string{testId}) - - // Let's make sure there were no duplicates. - actionC.AssertNoReceive() -} - -func (s *FilterSuite) TestCharmErrorEvents(c *gc.C) { - f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer f.Stop() // no AssertStop, we test for an error below - - configC := s.notifyAsserterC(c, f.ConfigEvents()) - - // Check setting an invalid charm URL does not send events. - err = f.SetCharm(charm.MustParseURL("cs:missing/one-1")) - c.Assert(err, gc.Equals, tomb.ErrDying) - configC.AssertNoReceive() - s.assertFilterDies(c, f) - - // Filter died after the error, so restart it. - f, err = filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer f.Stop() // no AssertStop, we test for an error below - - // Check with a nil charm URL, again no changes. - err = f.SetCharm(nil) - c.Assert(err, gc.Equals, tomb.ErrDying) - configC.AssertNoReceive() - s.assertFilterDies(c, f) -} - -func (s *FilterSuite) TestRelationsEvents(c *gc.C) { - f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - - relationsC := s.contentAsserterC(c, f.RelationsEvents()) - relationsC.AssertNoReceive() - - // Add a couple of relations; check the event. - rel0 := s.addRelation(c) - rel1 := s.addRelation(c) - c.Assert(relationsC.AssertOneReceive(), gc.DeepEquals, []int{0, 1}) - - // Add another relation, and change another's Life (by entering scope before - // Destroy, thereby setting the relation to Dying); check event. - s.addRelation(c) - ru0, err := rel0.Unit(s.unit) - c.Assert(err, jc.ErrorIsNil) - err = ru0.EnterScope(nil) - c.Assert(err, jc.ErrorIsNil) - err = rel0.Destroy() - c.Assert(err, jc.ErrorIsNil) - c.Assert(relationsC.AssertOneReceive(), gc.DeepEquals, []int{0, 2}) - - // Remove a relation completely; check no event, because the relation - // could not have been removed if the unit was in scope, and therefore - // the uniter never needs to hear about it. - err = rel1.Destroy() - c.Assert(err, jc.ErrorIsNil) - relationsC.AssertNoReceive() - err = f.Stop() - c.Assert(err, jc.ErrorIsNil) - - // Start a new filter, check initial event. - f, err = filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - relationsC = s.contentAsserterC(c, f.RelationsEvents()) - c.Assert(relationsC.AssertOneReceive(), gc.DeepEquals, []int{0, 2}) - - // Check setting the charm URL generates all new relation events. - err = f.SetCharm(s.wpcharm.URL()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(relationsC.AssertOneReceive(), gc.DeepEquals, []int{0, 2}) -} - -func (s *FilterSuite) addRelation(c *gc.C) *state.Relation { - if s.mysqlcharm == nil { - s.mysqlcharm = s.AddTestingCharm(c, "mysql") - } - rels, err := s.wordpress.Relations() - c.Assert(err, jc.ErrorIsNil) - svcName := fmt.Sprintf("mysql%d", len(rels)) - s.AddTestingService(c, svcName, s.mysqlcharm) - eps, err := s.State.InferEndpoints(svcName, "wordpress") - c.Assert(err, jc.ErrorIsNil) - rel, err := s.State.AddRelation(eps...) - c.Assert(err, jc.ErrorIsNil) - return rel -} - -func (s *FilterSuite) TestMeterStatusEvents(c *gc.C) { - f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - meterC := s.notifyAsserterC(c, f.MeterStatusEvents()) - // Initial meter status does not trigger event. - meterC.AssertNoReceive() - - // Set unit meter status to trigger event. - err = s.unit.SetMeterStatus("GREEN", "Operating normally.") - c.Assert(err, jc.ErrorIsNil) - meterC.AssertOneReceive() - - // Make sure bundled events arrive properly. - for i := 0; i < 5; i++ { - err = s.unit.SetMeterStatus("RED", fmt.Sprintf("Update %d.", i)) - c.Assert(err, jc.ErrorIsNil) - } - meterC.AssertOneReceive() -} - -func (s *FilterSuite) TestStorageEvents(c *gc.C) { - storageCharm := s.AddTestingCharm(c, "storage-block2") - svc := s.AddTestingServiceWithStorage(c, "storage-block2", storageCharm, map[string]state.StorageConstraints{ - "multi1to10": state.StorageConstraints{Pool: "loop", Size: 1024, Count: 1}, - "multi2up": state.StorageConstraints{Pool: "loop", Size: 2048, Count: 2}, - }) - unit, err := svc.AddUnit() - c.Assert(err, jc.ErrorIsNil) - err = unit.AssignToNewMachine() - c.Assert(err, jc.ErrorIsNil) - s.APILogin(c, unit) - - f, err := filter.NewFilter(s.uniter, unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - storageC := s.contentAsserterC(c, f.StorageEvents()) - c.Assert(storageC.AssertOneReceive(), gc.DeepEquals, []names.StorageTag{ - names.NewStorageTag("multi1to10/0"), - names.NewStorageTag("multi2up/1"), - names.NewStorageTag("multi2up/2"), - }) - - err = s.State.DestroyStorageInstance(names.NewStorageTag("multi2up/1")) - c.Assert(err, jc.ErrorIsNil) - err = s.State.Cleanup() - c.Assert(err, jc.ErrorIsNil) - c.Assert(storageC.AssertOneReceive(), gc.DeepEquals, []names.StorageTag{ - names.NewStorageTag("multi2up/1"), - }) -} - -func (s *FilterSuite) setLeaderSetting(c *gc.C, key, value string) { - err := s.wordpress.UpdateLeaderSettings(successToken{}, map[string]string{key: value}) - c.Assert(err, jc.ErrorIsNil) -} - -type successToken struct{} - -func (successToken) Check(interface{}) error { - return nil -} - -func (s *FilterSuite) TestLeaderSettingsEventsSendsChanges(c *gc.C) { - f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - - leaderSettingsC := s.notifyAsserterC(c, f.LeaderSettingsEvents()) - // Assert that we get the initial event - leaderSettingsC.AssertOneReceive() - - // And any time we make changes to the leader settings, we get an event - s.setLeaderSetting(c, "foo", "bar-1") - leaderSettingsC.AssertOneReceive() - - // And multiple changes to settings still get collapsed into a single event - s.setLeaderSetting(c, "foo", "bar-2") - s.setLeaderSetting(c, "foo", "bar-3") - s.setLeaderSetting(c, "foo", "bar-4") - s.EvilSync() - leaderSettingsC.AssertOneReceive() -} - -func (s *FilterSuite) TestWantLeaderSettingsEvents(c *gc.C) { - f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - - leaderSettingsC := s.notifyAsserterC(c, f.LeaderSettingsEvents()) - - // Supress the initial event - f.WantLeaderSettingsEvents(false) - leaderSettingsC.AssertNoReceive() - - // Also suppresses actual changes - s.setLeaderSetting(c, "foo", "baz-1") - s.EvilSync() - leaderSettingsC.AssertNoReceive() - - // Reenabling the settings gives us an immediate change - f.WantLeaderSettingsEvents(true) - leaderSettingsC.AssertOneReceive() - - // And also gives changes when actual changes are made - s.setLeaderSetting(c, "foo", "baz-2") - s.EvilSync() - leaderSettingsC.AssertOneReceive() - - // Setting a value to the same thing doesn't trigger a change - s.setLeaderSetting(c, "foo", "baz-2") - s.EvilSync() - leaderSettingsC.AssertNoReceive() - -} - -func (s *FilterSuite) TestDiscardLeaderSettingsEvent(c *gc.C) { - f, err := filter.NewFilter(s.uniter, s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - defer statetesting.AssertStop(c, f) - - leaderSettingsC := s.notifyAsserterC(c, f.LeaderSettingsEvents()) - // Discard the initial event - f.DiscardLeaderSettingsEvent() - leaderSettingsC.AssertNoReceive() - - // However, it has not permanently disabled change events, another - // change still shows up - s.setLeaderSetting(c, "foo", "bing-1") - s.EvilSync() - leaderSettingsC.AssertOneReceive() - - // But at any point we can discard them - s.setLeaderSetting(c, "foo", "bing-2") - s.EvilSync() - f.DiscardLeaderSettingsEvent() - leaderSettingsC.AssertNoReceive() -} === removed file 'src/github.com/juju/juju/worker/uniter/filter/interface.go' --- src/github.com/juju/juju/worker/uniter/filter/interface.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/filter/interface.go 1970-01-01 00:00:00 +0000 @@ -1,117 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package filter - -import ( - "github.com/juju/names" - "gopkg.in/juju/charm.v5" - - "github.com/juju/juju/apiserver/params" -) - -// Filter is responsible for delivering events relevant to a unit agent in a -// form that can be consumed conveniently. -type Filter interface { - - // Stop shuts down the filter and returns any error encountered in the process. - Stop() error - - // Dead returns a channel that will close when the filter has shut down. - Dead() <-chan struct{} - - // Wait blocks until the filter has shut down, and returns any error - // encountered in the process. - Wait() error - - // Kill causes the filter to start shutting down if it has not already done so. - Kill() - - // UnitDying returns a channel which is closed when the Unit enters a Dying state. - UnitDying() <-chan struct{} - - // UpgradeEvents returns a channel that will receive a new charm URL whenever an - // upgrade is indicated. Events should not be read until the baseline state - // has been specified by calling WantUpgradeEvent. - UpgradeEvents() <-chan *charm.URL - - // ResolvedEvents returns a channel that may receive a ResolvedMode when the - // unit's Resolved value changes, or when an event is explicitly requested. - // A ResolvedNone state will never generate events, but ResolvedRetryHooks and - // ResolvedNoHooks will always be delivered as described. - ResolvedEvents() <-chan params.ResolvedMode - - // MeterStatusEvents returns a channel that will receive a signal when the unit's - // meter status changes. - MeterStatusEvents() <-chan struct{} - - // ConfigEvents returns a channel that will receive a signal whenever the service's - // configuration changes, or when an event is explicitly requested. - ConfigEvents() <-chan struct{} - - // ActionEvents returns a channel that will receive a signal whenever the unit - // receives new Actions. - ActionEvents() <-chan string - - // RelationsEvents returns a channel that will receive the ids of all the service's - // relations whose Life status has changed. - RelationsEvents() <-chan []int - - // StorageEvents returns a channel that will receive the tags of all the unit's - // associated storage instances whose Life status has changed. - StorageEvents() <-chan []names.StorageTag - - // WantUpgradeEvent controls whether the filter will generate upgrade - // events for unforced service charm changes. - WantUpgradeEvent(mustForce bool) - - // SetCharm notifies the filter that the unit is running a new - // charm. It causes the unit's charm URL to be set in state, and the - // following changes to the filter's behaviour: - // - // * Upgrade events will only be generated for charms different to - // that supplied; - // * A fresh relations event will be generated containing every relation - // the service is participating in; - // * A fresh configuration event will be generated, and subsequent - // events will only be sent in response to changes in the version - // of the service's settings that is specific to that charm. - // - // SetCharm blocks until the charm URL is set in state, returning any - // error that occurred. - SetCharm(curl *charm.URL) error - - // WantResolvedEvent indicates that the filter should send a resolved event - // if one is available. - WantResolvedEvent() - - // ClearResolved notifies the filter that a resolved event has been handled - // and should not be reported again. - ClearResolved() error - - // DiscardConfigEvent indicates that the filter should discard any pending - // config event. - DiscardConfigEvent() - - // LeaderSettingsEvents returns a channel that will receive an event whenever - // there is a leader settings change. Events can be temporarily suspended by - // calling WantLeaderSettingsEvents(false), and then reenabled by calling - // WantLeaderSettingsEvents(true) - LeaderSettingsEvents() <-chan struct{} - - // DiscardLeaderSettingsEvent can be called to discard any pending - // LeaderSettingsEvents. This is used by code that saw a LeaderSettings change, - // and has been prepping for a response. Just before they request the current - // LeaderSettings, they can discard any other pending changes, since they know - // they will be handling all changes that have occurred before right now. - DiscardLeaderSettingsEvent() - - // WantLeaderSettingsEvents can be used to enable/disable events being sent on - // the LeaderSettingsEvents() channel. This is used when an agent notices that - // it is the leader, it wants to disable getting events for changes that it is - // generating. Calling this with sendEvents=false disables getting change - // events. Calling this with sendEvents=true will enable future changes, and - // queues up an immediate event so that the agent will refresh its information - // for any events it might have missed while it thought it was the leader. - WantLeaderSettingsEvents(sendEvents bool) -} === removed file 'src/github.com/juju/juju/worker/uniter/filter/package_test.go' --- src/github.com/juju/juju/worker/uniter/filter/package_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/uniter/filter/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package filter_test - -import ( - stdtesting "testing" - - coretesting "github.com/juju/juju/testing" -) - -func TestPackage(t *stdtesting.T) { - coretesting.MgoTestPackage(t) -} === modified file 'src/github.com/juju/juju/worker/uniter/hook/hook.go' --- src/github.com/juju/juju/worker/uniter/hook/hook.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/hook/hook.go 2016-03-22 15:18:22 +0000 @@ -8,7 +8,7 @@ "fmt" "github.com/juju/names" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable/hooks" ) // TODO(fwereade): move these definitions to juju/charm/hooks. @@ -28,7 +28,7 @@ RelationId int `yaml:"relation-id,omitempty"` // RemoteUnit is the name of the unit that triggered the hook. It is only - // set when Kind inicates a relation hook other than relation-broken. + // set when Kind indicates a relation hook other than relation-broken. RemoteUnit string `yaml:"remote-unit,omitempty"` // ChangeVersion identifies the most recent unit settings change @@ -47,7 +47,8 @@ return fmt.Errorf("%q hook requires a remote unit", hi.Kind) } fallthrough - case hooks.Install, hooks.Start, hooks.ConfigChanged, hooks.UpgradeCharm, hooks.Stop, hooks.RelationBroken, hooks.CollectMetrics, hooks.MeterStatusChanged, hooks.UpdateStatus: + case hooks.Install, hooks.Start, hooks.ConfigChanged, hooks.UpgradeCharm, hooks.Stop, hooks.RelationBroken, + hooks.CollectMetrics, hooks.MeterStatusChanged, hooks.UpdateStatus: return nil case hooks.Action: return fmt.Errorf("hooks.Kind Action is deprecated") === modified file 'src/github.com/juju/juju/worker/uniter/hook/hook_test.go' --- src/github.com/juju/juju/worker/uniter/hook/hook_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/hook/hook_test.go 2016-03-22 15:18:22 +0000 @@ -6,7 +6,7 @@ import ( jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/hook" === modified file 'src/github.com/juju/juju/worker/uniter/hook/hooktesting/source.go' --- src/github.com/juju/juju/worker/uniter/hook/hooktesting/source.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/hook/hooktesting/source.go 2016-03-22 15:18:22 +0000 @@ -4,7 +4,7 @@ package hooktesting import ( - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable/hooks" "launchpad.net/tomb" "github.com/juju/juju/worker/uniter/hook" === modified file 'src/github.com/juju/juju/worker/uniter/hook/listsource_test.go' --- src/github.com/juju/juju/worker/uniter/hook/listsource_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/hook/listsource_test.go 2016-03-22 15:18:22 +0000 @@ -6,7 +6,7 @@ import ( jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/worker/uniter/hook" "github.com/juju/juju/worker/uniter/hook/hooktesting" === modified file 'src/github.com/juju/juju/worker/uniter/hook/peeker_test.go' --- src/github.com/juju/juju/worker/uniter/hook/peeker_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/hook/peeker_test.go 2016-03-22 15:18:22 +0000 @@ -8,7 +8,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable/hooks" statetesting "github.com/juju/juju/state/testing" coretesting "github.com/juju/juju/testing" === modified file 'src/github.com/juju/juju/worker/uniter/hook/sender_test.go' --- src/github.com/juju/juju/worker/uniter/hook/sender_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/hook/sender_test.go 2016-03-22 15:18:22 +0000 @@ -8,7 +8,7 @@ jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable/hooks" statetesting "github.com/juju/juju/state/testing" coretesting "github.com/juju/juju/testing" === added directory 'src/github.com/juju/juju/worker/uniter/leadership' === added file 'src/github.com/juju/juju/worker/uniter/leadership/resolver.go' --- src/github.com/juju/juju/worker/uniter/leadership/resolver.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/leadership/resolver.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,72 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package leadership + +import ( + "github.com/juju/loggo" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/remotestate" + "github.com/juju/juju/worker/uniter/resolver" +) + +var logger = loggo.GetLogger("juju.worker.uniter.leadership") + +type leadershipResolver struct { +} + +// NewResolver returns a new leadership resolver. +func NewResolver() resolver.Resolver { + return &leadershipResolver{} +} + +// NextOp is defined on the Resolver interface. +func (l *leadershipResolver) NextOp( + localState resolver.LocalState, + remoteState remotestate.Snapshot, + opFactory operation.Factory, +) (operation.Operation, error) { + + // TODO(wallyworld) - maybe this can occur before install + if !localState.Installed { + return nil, resolver.ErrNoOperation + } + + // Check for any leadership change, and enact it if possible. + logger.Tracef("checking leadership status") + + // If we've already accepted leadership, we don't need to do it again. + canAcceptLeader := !localState.Leader + if remoteState.Life == params.Dying { + canAcceptLeader = false + } else { + // If we're in an unexpected mode (eg pending hook) we shouldn't try either. + if localState.Kind != operation.Continue { + canAcceptLeader = false + } + } + + switch { + case remoteState.Leader && canAcceptLeader: + return opFactory.NewAcceptLeadership() + + // If we're the leader but should not be any longer, or + // if the unit is dying, we should resign leadership. + case localState.Leader && (!remoteState.Leader || remoteState.Life == params.Dying): + return opFactory.NewResignLeadership() + } + + if localState.Kind == operation.Continue { + // We want to run the leader settings hook if we're + // not the leader and the settings have changed. + if !localState.Leader && localState.LeaderSettingsVersion != remoteState.LeaderSettingsVersion { + return opFactory.NewRunHook(hook.Info{Kind: hook.LeaderSettingsChanged}) + } + } + + logger.Tracef("leadership status is up-to-date") + return nil, resolver.ErrNoOperation +} === modified file 'src/github.com/juju/juju/worker/uniter/manifold.go' --- src/github.com/juju/juju/worker/uniter/manifold.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/manifold.go 2016-03-22 15:18:22 +0000 @@ -6,14 +6,16 @@ import ( "github.com/juju/errors" "github.com/juju/names" + "github.com/juju/utils/clock" "github.com/juju/utils/fslock" "github.com/juju/juju/agent" "github.com/juju/juju/api/base" "github.com/juju/juju/api/uniter" + "github.com/juju/juju/core/leadership" "github.com/juju/juju/worker" "github.com/juju/juju/worker/dependency" - "github.com/juju/juju/worker/leadership" + "github.com/juju/juju/worker/fortress" "github.com/juju/juju/worker/uniter/operation" ) @@ -24,6 +26,7 @@ APICallerName string MachineLockName string LeadershipTrackerName string + CharmDirName string } // Manifold returns a dependency manifold that runs a uniter worker, @@ -35,6 +38,7 @@ config.APICallerName, config.LeadershipTrackerName, config.MachineLockName, + config.CharmDirName, }, Start: func(getResource dependency.GetResourceFunc) (worker.Worker, error) { @@ -58,6 +62,10 @@ if err := getResource(config.LeadershipTrackerName, &leadershipTracker); err != nil { return nil, err } + var charmDirGuard fortress.Guard + if err := getResource(config.CharmDirName, &charmDirGuard); err != nil { + return nil, err + } // Configure and start the uniter. config := agent.CurrentConfig() @@ -67,16 +75,21 @@ return nil, errors.Errorf("expected a unit tag, got %v", tag) } uniterFacade := uniter.NewState(apiCaller, unitTag) - return NewUniter(&UniterParams{ + uniter, err := NewUniter(&UniterParams{ UniterFacade: uniterFacade, UnitTag: unitTag, LeadershipTracker: leadershipTracker, DataDir: config.DataDir(), MachineLock: machineLock, - MetricsTimerChooser: NewMetricsTimerChooser(), + CharmDirGuard: charmDirGuard, UpdateStatusSignal: NewUpdateStatusTimer(), NewOperationExecutor: operation.NewExecutor, - }), nil + Clock: clock.WallClock, + }) + if err != nil { + return nil, errors.Trace(err) + } + return uniter, nil }, } } === removed directory 'src/github.com/juju/juju/worker/uniter/metrics' === removed file 'src/github.com/juju/juju/worker/uniter/metrics.go' --- src/github.com/juju/juju/worker/uniter/metrics.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/metrics.go 1970-01-01 00:00:00 +0000 @@ -1,72 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package uniter - -import ( - "time" - - corecharm "gopkg.in/juju/charm.v5" -) - -const ( - // interval at which the unit's metrics should be collected - metricsPollInterval = 5 * time.Minute - - // interval at which the uniter sends metrics to the state server. - metricsSendInterval = 5 * time.Minute -) - -// activeCollectMetricsTimer returns a channel that will signal the collect metrics hook -// as close to interval after the last run as possible. -var activeCollectMetricsTimer = func(now, lastRun time.Time, interval time.Duration) <-chan time.Time { - waitDuration := interval - now.Sub(lastRun) - logger.Debugf("metrics waiting for %v", waitDuration) - return time.After(waitDuration) -} - -var activeSendMetricsTimer = activeCollectMetricsTimer - -// inactiveMetricsTimer is the default metrics signal generation function, that -// returns no signal. It will be used in charms that do not declare metrics. -func inactiveMetricsTimer(_, _ time.Time, _ time.Duration) <-chan time.Time { - return nil -} - -// timerChooser allows modeAbide to choose a proper timer for metrics -// depending on the charm. -type timerChooser struct { - collector TimedSignal - sender TimedSignal - inactive TimedSignal -} - -// getCollectMetricsTimer returns a timer used to trigger the collect-metrics hook, -// given the supplied charm. -func (t *timerChooser) getCollectMetricsTimer(ch corecharm.Charm) TimedSignal { - metrics := ch.Metrics() - if metrics != nil && len(metrics.Metrics) > 0 { - return t.collector - } - return t.inactive -} - -// getSendMetricsTimer returns a timer used to trigger sending metrics -// to the state server, given the supplied charm. -func (t *timerChooser) getSendMetricsTimer(ch corecharm.Charm) TimedSignal { - metrics := ch.Metrics() - if metrics != nil && len(metrics.Metrics) > 0 { - return t.sender - } - return t.inactive -} - -// NewMetricsTimerChooser returns a timerChooser for -// collect-metrics hook and the send-metrics operation. -func NewMetricsTimerChooser() *timerChooser { - return &timerChooser{ - collector: activeCollectMetricsTimer, - sender: activeSendMetricsTimer, - inactive: inactiveMetricsTimer, - } -} === removed file 'src/github.com/juju/juju/worker/uniter/metrics/metrics.go' --- src/github.com/juju/juju/worker/uniter/metrics/metrics.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/metrics/metrics.go 1970-01-01 00:00:00 +0000 @@ -1,346 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package metrics - -import ( - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/juju/errors" - "github.com/juju/juju/apiserver/params" - "github.com/juju/loggo" - "github.com/juju/utils" - corecharm "gopkg.in/juju/charm.v5" - - "github.com/juju/juju/worker/uniter/runner/jujuc" -) - -var logger = loggo.GetLogger("juju.worker.uniter.metrics") - -type metricFile struct { - *os.File - finalName string -} - -func createMetricFile(path string) (*metricFile, error) { - dir, base := filepath.Dir(path), filepath.Base(path) - if !filepath.IsAbs(dir) { - return nil, errors.Errorf("not an absolute path: %q", path) - } - - workUUID, err := utils.NewUUID() - if err != nil { - return nil, errors.Trace(err) - } - workName := filepath.Join(dir, fmt.Sprintf(".%s.inc-%s", base, workUUID.String())) - - f, err := os.Create(workName) - if err != nil { - return nil, errors.Trace(err) - } - return &metricFile{File: f, finalName: path}, nil -} - -// Close implements io.Closer. -func (f *metricFile) Close() error { - err := f.File.Close() - if err != nil { - return errors.Trace(err) - } - ok, err := utils.MoveFile(f.Name(), f.finalName) - if err != nil { - // ok can be true even when there is an error completing the move, on - // platforms that implement it in multiple steps that can fail - // separately. POSIX for example, uses link(2) to claim the new - // location atomically, followed by an unlink(2) to release the old - // location. - if !ok { - return errors.Trace(err) - } - logger.Errorf("failed to remove temporary file %q: %v", f.Name(), err) - } - return nil -} - -// MetricBatch stores the information relevant to a single metrics batch. -type MetricBatch struct { - CharmURL string `json:"charmurl"` - UUID string `json:"uuid"` - Created time.Time `json:"created"` - Metrics []jujuc.Metric `json:"metrics"` -} - -// APIMetricBatch converts the specified MetricBatch to a params.MetricBatch, -// which can then be sent to the state server. -func APIMetricBatch(batch MetricBatch) params.MetricBatch { - metrics := make([]params.Metric, len(batch.Metrics)) - for i, metric := range batch.Metrics { - metrics[i] = params.Metric{Key: metric.Key, Value: metric.Value, Time: metric.Time} - } - return params.MetricBatch{ - UUID: batch.UUID, - CharmURL: batch.CharmURL, - Created: batch.Created, - Metrics: metrics, - } -} - -// MetricsMetadata is used to store metadata for the current metric batch. -type MetricsMetadata struct { - CharmURL string `json:"charmurl"` - UUID string `json:"uuid"` - Created time.Time `json:"created"` -} - -// JSONMetricRecorder implements the MetricsRecorder interface -// and writes metrics to a spool directory for store-and-forward. -type JSONMetricRecorder struct { - spoolDir string - validMetrics map[string]corecharm.Metric - charmURL string - uuid utils.UUID - created time.Time - - lock sync.Mutex - - file io.Closer - enc *json.Encoder -} - -// NewJSONMetricRecorder creates a new JSON metrics recorder. -// It checks if the metrics spool directory exists, if it does not - it is created. Then -// it tries to find an unused metric batch UUID 3 times. -func NewJSONMetricRecorder(spoolDir string, metrics map[string]corecharm.Metric, charmURL string) (rec *JSONMetricRecorder, rErr error) { - if err := checkSpoolDir(spoolDir); err != nil { - return nil, errors.Trace(err) - } - - mbUUID, err := utils.NewUUID() - if err != nil { - return nil, errors.Trace(err) - } - - recorder := &JSONMetricRecorder{ - spoolDir: spoolDir, - uuid: mbUUID, - charmURL: charmURL, - created: time.Now().UTC(), - validMetrics: metrics, - } - if err := recorder.open(); err != nil { - return nil, errors.Trace(err) - } - return recorder, nil -} - -// Close implements the MetricsRecorder interface. -func (m *JSONMetricRecorder) Close() error { - m.lock.Lock() - defer m.lock.Unlock() - - err := m.file.Close() - if err != nil { - return errors.Trace(err) - } - - // We have an exclusive lock on this metric batch here, because - // metricsFile.Close was able to rename the final filename atomically. - // - // Now write the meta file so that JSONMetricReader discovers a finished - // pair of files. - err = m.recordMetaData() - if err != nil { - return errors.Trace(err) - } - - return nil -} - -// AddMetric implements the MetricsRecorder interface. -func (m *JSONMetricRecorder) AddMetric(key, value string, created time.Time) error { - if !m.IsDeclaredMetric(key) { - return errors.Errorf("metric key %q not declared by the charm", key) - } - m.lock.Lock() - defer m.lock.Unlock() - return errors.Trace(m.enc.Encode(jujuc.Metric{Key: key, Value: value, Time: created})) -} - -// IsDeclaredMetric returns true if the metric recorder is permitted to store this metric. -// Returns false if the uniter using this recorder doesn't define this metric. -func (m *JSONMetricRecorder) IsDeclaredMetric(key string) bool { - _, ok := m.validMetrics[key] - return ok -} - -func (m *JSONMetricRecorder) open() error { - dataFile := filepath.Join(m.spoolDir, m.uuid.String()) - if _, err := os.Stat(dataFile); err != nil && !os.IsNotExist(err) { - if err != nil { - return errors.Annotatef(err, "failed to stat file %s", dataFile) - } - return errors.Errorf("file %s already exists", dataFile) - } - - dataWriter, err := createMetricFile(dataFile) - if err != nil { - return errors.Trace(err) - } - m.file = dataWriter - m.enc = json.NewEncoder(dataWriter) - return nil -} - -func checkSpoolDir(path string) error { - if _, err := os.Stat(path); os.IsNotExist(err) { - err := os.MkdirAll(path, 0755) - if err != nil { - return errors.Trace(err) - } - } else if err != nil { - return errors.Trace(err) - } - return nil -} - -func (m *JSONMetricRecorder) recordMetaData() error { - metaFile := filepath.Join(m.spoolDir, fmt.Sprintf("%s.meta", m.uuid.String())) - if _, err := os.Stat(metaFile); !os.IsNotExist(err) { - if err != nil { - return errors.Annotatef(err, "failed to stat file %s", metaFile) - } - return errors.Errorf("file %s already exists", metaFile) - } - - metadata := MetricsMetadata{ - CharmURL: m.charmURL, - UUID: m.uuid.String(), - Created: m.created, - } - // The use of a metricFile here ensures that the JSONMetricReader will only - // find a fully-written metafile. - metaWriter, err := createMetricFile(metaFile) - if err != nil { - return errors.Trace(err) - } - defer metaWriter.Close() - enc := json.NewEncoder(metaWriter) - err = enc.Encode(metadata) - if err != nil { - return errors.Trace(err) - } - return nil -} - -// JSONMetricsReader reads metrics batches stored in the spool directory. -type JSONMetricReader struct { - dir string -} - -// NewJSONMetricsReader creates a new JSON metrics reader for the specified spool directory. -func NewJSONMetricReader(spoolDir string) (*JSONMetricReader, error) { - if _, err := os.Stat(spoolDir); err != nil { - return nil, errors.Annotatef(err, "failed to open spool directory %q", spoolDir) - } - return &JSONMetricReader{ - dir: spoolDir, - }, nil -} - -// Read implements the MetricsReader interface. -// Due to the way the batches are stored in the file system, -// they will be returned in an arbitrary order. This does not affect the behavior. -func (r *JSONMetricReader) Read() ([]MetricBatch, error) { - var batches []MetricBatch - - walker := func(path string, info os.FileInfo, err error) error { - if err != nil { - return errors.Trace(err) - } - if info.IsDir() && path != r.dir { - return filepath.SkipDir - } else if !strings.HasSuffix(info.Name(), ".meta") { - return nil - } - - batch, err := decodeBatch(path) - if err != nil { - return errors.Trace(err) - } - batch.Metrics, err = decodeMetrics(filepath.Join(r.dir, batch.UUID)) - if err != nil { - return errors.Trace(err) - } - if len(batch.Metrics) > 0 { - batches = append(batches, batch) - } - return nil - } - if err := filepath.Walk(r.dir, walker); err != nil { - return nil, errors.Trace(err) - } - return batches, nil -} - -// Remove implements the MetricsReader interface. -func (r *JSONMetricReader) Remove(uuid string) error { - metaFile := filepath.Join(r.dir, fmt.Sprintf("%s.meta", uuid)) - dataFile := filepath.Join(r.dir, uuid) - err := os.Remove(metaFile) - if err != nil && !os.IsNotExist(err) { - return errors.Trace(err) - } - err = os.Remove(dataFile) - if err != nil { - return errors.Trace(err) - } - return nil -} - -// Close implements the MetricsReader interface. -func (r *JSONMetricReader) Close() error { - return nil -} - -func decodeBatch(file string) (MetricBatch, error) { - var batch MetricBatch - f, err := os.Open(file) - if err != nil { - return MetricBatch{}, errors.Trace(err) - } - defer f.Close() - dec := json.NewDecoder(f) - err = dec.Decode(&batch) - if err != nil { - return MetricBatch{}, errors.Trace(err) - } - return batch, nil -} - -func decodeMetrics(file string) ([]jujuc.Metric, error) { - var metrics []jujuc.Metric - f, err := os.Open(file) - if err != nil { - return nil, errors.Trace(err) - } - defer f.Close() - dec := json.NewDecoder(f) - for { - var metric jujuc.Metric - err := dec.Decode(&metric) - if err == io.EOF { - break - } else if err != nil { - return nil, errors.Trace(err) - } - metrics = append(metrics, metric) - } - return metrics, nil -} === removed file 'src/github.com/juju/juju/worker/uniter/metrics/metrics_file_test.go' --- src/github.com/juju/juju/worker/uniter/metrics/metrics_file_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/metrics/metrics_file_test.go 1970-01-01 00:00:00 +0000 @@ -1,79 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package metrics - -import ( - "crypto/rand" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" -) - -type metricFileSuite struct { - spoolDir string -} - -var _ = gc.Suite(&metricFileSuite{}) - -func (s *metricFileSuite) SetUpTest(c *gc.C) { - s.spoolDir = c.MkDir() -} - -func cleanupFile(f *metricFile) { - if f != nil { - f.File.Close() - } -} - -func (s *metricFileSuite) TestRenameOnClose(c *gc.C) { - fileName := filepath.Join(s.spoolDir, "foo") - mf, err := createMetricFile(fileName) - c.Assert(err, gc.IsNil) - - _, err = io.CopyN(mf, rand.Reader, 78666) - c.Assert(err, gc.IsNil) - - _, err = os.Stat(fileName) - c.Assert(os.IsNotExist(err), jc.IsTrue) - - err = mf.Close() - c.Assert(err, gc.IsNil) - - st, err := os.Stat(fileName) - c.Assert(err, gc.IsNil) - c.Assert(st.Size(), gc.Equals, int64(78666)) -} - -func (s *metricFileSuite) TestContention(c *gc.C) { - fileName := filepath.Join(s.spoolDir, "foo") - mf1, err := createMetricFile(fileName) - c.Assert(err, gc.IsNil) - mf2, err := createMetricFile(fileName) - c.Assert(err, gc.IsNil) - - _, err = fmt.Fprint(mf1, "emacs") - c.Assert(err, gc.IsNil) - _, err = fmt.Fprint(mf2, "vi") - c.Assert(err, gc.IsNil) - - _, err = os.Stat(fileName) - c.Assert(os.IsNotExist(err), jc.IsTrue) - - err = mf2.Close() - c.Assert(err, gc.IsNil) - err = mf1.Close() - c.Assert(err, gc.NotNil) - - st, err := os.Stat(fileName) - c.Assert(err, gc.IsNil) - c.Assert(st.Size(), gc.Equals, int64(2)) - contents, err := ioutil.ReadFile(fileName) - c.Assert(err, gc.IsNil) - c.Assert(contents, gc.DeepEquals, []byte("vi")) -} === removed file 'src/github.com/juju/juju/worker/uniter/metrics/metrics_test.go' --- src/github.com/juju/juju/worker/uniter/metrics/metrics_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/metrics/metrics_test.go 1970-01-01 00:00:00 +0000 @@ -1,226 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package metrics_test - -import ( - "path/filepath" - "runtime" - "time" - - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - corecharm "gopkg.in/juju/charm.v5" - - "github.com/juju/juju/worker/uniter/metrics" - "github.com/juju/juju/worker/uniter/runner/jujuc" -) - -type MetricsBatchSuite struct { -} - -var _ = gc.Suite(&MetricsBatchSuite{}) - -func (s *MetricsBatchSuite) TestAPIMetricBatch(c *gc.C) { - batches := []metrics.MetricBatch{{ - CharmURL: "local:trusty/test-charm", - UUID: "test-uuid", - Created: time.Now(), - Metrics: []jujuc.Metric{ - { - Key: "test-key-1", - Value: "test-value-1", - Time: time.Now(), - }, { - Key: "test-key-2", - Value: "test-value-2", - Time: time.Now(), - }, - }, - }, { - CharmURL: "local:trusty/test-charm", - UUID: "test-uuid", - Created: time.Now(), - Metrics: []jujuc.Metric{}, - }, - } - for _, batch := range batches { - apiBatch := metrics.APIMetricBatch(batch) - c.Assert(apiBatch.UUID, gc.DeepEquals, batch.UUID) - c.Assert(apiBatch.CharmURL, gc.DeepEquals, batch.CharmURL) - c.Assert(apiBatch.Created, gc.DeepEquals, batch.Created) - c.Assert(len(apiBatch.Metrics), gc.Equals, len(batch.Metrics)) - for i, metric := range batch.Metrics { - c.Assert(metric.Key, gc.DeepEquals, apiBatch.Metrics[i].Key) - c.Assert(metric.Value, gc.DeepEquals, apiBatch.Metrics[i].Value) - c.Assert(metric.Time, gc.DeepEquals, apiBatch.Metrics[i].Time) - } - } -} - -func osDependentSockPath(c *gc.C) string { - sockPath := filepath.Join(c.MkDir(), "test.sock") - if runtime.GOOS == "windows" { - return `\\.\pipe` + sockPath[2:] - } - return sockPath -} - -// testPaths implements Paths for tests that do touch the filesystem. -type testPaths struct { - tools string - charm string - socket string - metricsspool string -} - -func newTestPaths(c *gc.C) testPaths { - return testPaths{ - tools: c.MkDir(), - charm: c.MkDir(), - socket: osDependentSockPath(c), - metricsspool: c.MkDir(), - } -} - -func (p testPaths) GetMetricsSpoolDir() string { - return p.metricsspool -} - -func (p testPaths) GetToolsDir() string { - return p.tools -} - -func (p testPaths) GetCharmDir() string { - return p.charm -} - -func (p testPaths) GetJujucSocket() string { - return p.socket -} - -type MetricsRecorderSuite struct { - testing.IsolationSuite - - paths testPaths -} - -var _ = gc.Suite(&MetricsRecorderSuite{}) - -func (s *MetricsRecorderSuite) SetUpTest(c *gc.C) { - s.IsolationSuite.SetUpTest(c) - s.paths = newTestPaths(c) -} - -func (s *MetricsRecorderSuite) TestInit(c *gc.C) { - w, err := metrics.NewJSONMetricRecorder(s.paths.GetMetricsSpoolDir(), map[string]corecharm.Metric{"pings": corecharm.Metric{}}, "local:precise/wordpress") - c.Assert(err, jc.ErrorIsNil) - c.Assert(w, gc.NotNil) - err = w.AddMetric("pings", "5", time.Now()) - c.Assert(err, jc.ErrorIsNil) - err = w.Close() - c.Assert(err, jc.ErrorIsNil) - - r, err := metrics.NewJSONMetricReader(s.paths.GetMetricsSpoolDir()) - c.Assert(err, jc.ErrorIsNil) - batches, err := r.Read() - c.Assert(err, jc.ErrorIsNil) - c.Assert(batches, gc.HasLen, 1) - batch := batches[0] - c.Assert(batch.CharmURL, gc.Equals, "local:precise/wordpress") - c.Assert(batch.UUID, gc.Not(gc.Equals), "") - c.Assert(batch.Metrics, gc.HasLen, 1) - c.Assert(batch.Metrics[0].Key, gc.Equals, "pings") - c.Assert(batch.Metrics[0].Value, gc.Equals, "5") - - err = r.Close() - c.Assert(err, jc.ErrorIsNil) -} - -func (s *MetricsRecorderSuite) TestUnknownMetricKey(c *gc.C) { - w, err := metrics.NewJSONMetricRecorder(s.paths.GetMetricsSpoolDir(), map[string]corecharm.Metric{}, "local:precise/wordpress") - c.Assert(err, jc.ErrorIsNil) - c.Assert(w, gc.NotNil) - err = w.AddMetric("pings", "5", time.Now()) - c.Assert(err, gc.ErrorMatches, `metric key "pings" not declared by the charm`) - err = w.Close() - c.Assert(err, jc.ErrorIsNil) - - r, err := metrics.NewJSONMetricReader(s.paths.GetMetricsSpoolDir()) - c.Assert(err, jc.ErrorIsNil) - batches, err := r.Read() - c.Assert(err, jc.ErrorIsNil) - c.Assert(batches, gc.HasLen, 0) -} - -type MetricsReaderSuite struct { - paths testPaths - - w *metrics.JSONMetricRecorder -} - -var _ = gc.Suite(&MetricsReaderSuite{}) - -func (s *MetricsReaderSuite) SetUpTest(c *gc.C) { - s.paths = newTestPaths(c) - - var err error - s.w, err = metrics.NewJSONMetricRecorder( - s.paths.GetMetricsSpoolDir(), - map[string]corecharm.Metric{"pings": corecharm.Metric{}}, - "local:precise/wordpress") - - c.Assert(err, jc.ErrorIsNil) - err = s.w.AddMetric("pings", "5", time.Now()) - c.Assert(err, jc.ErrorIsNil) - err = s.w.Close() - c.Assert(err, jc.ErrorIsNil) -} - -func (s *MetricsReaderSuite) TestTwoSimultaneousReaders(c *gc.C) { - r, err := metrics.NewJSONMetricReader(s.paths.GetMetricsSpoolDir()) - c.Assert(err, jc.ErrorIsNil) - - r2, err := metrics.NewJSONMetricReader(c.MkDir()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(r2, gc.NotNil) - err = r2.Close() - c.Assert(err, jc.ErrorIsNil) - err = r.Close() - c.Assert(err, jc.ErrorIsNil) - -} - -func (s *MetricsReaderSuite) TestUnblockedReaders(c *gc.C) { - r, err := metrics.NewJSONMetricReader(s.paths.GetMetricsSpoolDir()) - c.Assert(err, jc.ErrorIsNil) - err = r.Close() - c.Assert(err, jc.ErrorIsNil) - - r2, err := metrics.NewJSONMetricReader(s.paths.GetMetricsSpoolDir()) - c.Assert(err, jc.ErrorIsNil) - c.Assert(r2, gc.NotNil) - err = r2.Close() - c.Assert(err, jc.ErrorIsNil) -} - -func (s *MetricsReaderSuite) TestRemoval(c *gc.C) { - r, err := metrics.NewJSONMetricReader(s.paths.GetMetricsSpoolDir()) - c.Assert(err, jc.ErrorIsNil) - - batches, err := r.Read() - c.Assert(err, jc.ErrorIsNil) - for _, batch := range batches { - err := r.Remove(batch.UUID) - c.Assert(err, jc.ErrorIsNil) - } - err = r.Close() - c.Assert(err, jc.ErrorIsNil) - - batches, err = r.Read() - c.Assert(err, jc.ErrorIsNil) - c.Assert(batches, gc.HasLen, 0) - err = r.Close() - c.Assert(err, jc.ErrorIsNil) -} === removed file 'src/github.com/juju/juju/worker/uniter/metrics/package_test.go' --- src/github.com/juju/juju/worker/uniter/metrics/package_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/metrics/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package metrics_test - -import ( - stdtesting "testing" - - gc "gopkg.in/check.v1" -) - -func TestPackage(t *stdtesting.T) { - gc.TestingT(t) -} === added file 'src/github.com/juju/juju/worker/uniter/mock_test.go' --- src/github.com/juju/juju/worker/uniter/mock_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/mock_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,38 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package uniter_test + +import ( + "github.com/juju/names" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/relation" + "github.com/juju/juju/worker/uniter/remotestate" + "github.com/juju/juju/worker/uniter/resolver" + "github.com/juju/juju/worker/uniter/storage" +) + +type dummyRelations struct { + relation.Relations +} + +func (*dummyRelations) NextHook(_ resolver.LocalState, _ remotestate.Snapshot) (hook.Info, error) { + return hook.Info{}, resolver.ErrNoOperation +} + +type dummyStorageAccessor struct { + storage.StorageAccessor +} + +func (*dummyStorageAccessor) UnitStorageAttachments(_ names.UnitTag) ([]params.StorageAttachmentId, error) { + return nil, nil +} + +type nopResolver struct{} + +func (nopResolver) NextOp(resolver.LocalState, remotestate.Snapshot, operation.Factory) (operation.Operation, error) { + return nil, resolver.ErrNoOperation +} === removed file 'src/github.com/juju/juju/worker/uniter/modes.go' --- src/github.com/juju/juju/worker/uniter/modes.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/modes.go 1970-01-01 00:00:00 +0000 @@ -1,581 +0,0 @@ -// Copyright 2012-2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package uniter - -import ( - "fmt" - "time" - - "github.com/juju/errors" - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/hooks" - "launchpad.net/tomb" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/state/watcher" - "github.com/juju/juju/worker" - "github.com/juju/juju/worker/uniter/hook" - "github.com/juju/juju/worker/uniter/operation" -) - -// setAgentStatus sets the unit's status if it has changed since last time this method was called. -func setAgentStatus(u *Uniter, status params.Status, info string, data map[string]interface{}) error { - u.setStatusMutex.Lock() - defer u.setStatusMutex.Unlock() - if u.lastReportedStatus == status && u.lastReportedMessage == info { - return nil - } - u.lastReportedStatus = status - u.lastReportedMessage = info - logger.Debugf("[AGENT-STATUS] %s: %s", status, info) - return u.unit.SetAgentStatus(status, info, data) -} - -// reportAgentError reports if there was an error performing an agent operation. -func reportAgentError(u *Uniter, userMessage string, err error) { - // If a non-nil error is reported (e.g. due to an operation failing), - // set the agent status to Failed. - if err == nil { - return - } - err2 := setAgentStatus(u, params.StatusFailed, userMessage, nil) - if err2 != nil { - logger.Errorf("updating agent status: %v", err2) - } -} - -// Mode defines the signature of the functions that implement the possible -// states of a running Uniter. -type Mode func(u *Uniter) (Mode, error) - -// ModeContinue determines what action to take based on persistent uniter state. -func ModeContinue(u *Uniter) (next Mode, err error) { - defer modeContext("ModeContinue", &err)() - opState := u.operationState() - - // Resume interrupted deployment operations. - if opState.Kind == operation.Install { - logger.Infof("resuming charm install") - return ModeInstalling(opState.CharmURL) - } else if opState.Kind == operation.Upgrade { - logger.Infof("resuming charm upgrade") - return ModeUpgrading(opState.CharmURL), nil - } - - // If we got this far, we should have an installed charm, - // so initialize the metrics timers according to what's - // currently deployed. - if err := u.initializeMetricsTimers(); err != nil { - return nil, errors.Trace(err) - } - - // Check for any leadership change, and enact it if possible. - logger.Infof("checking leadership status") - // If we've already accepted leadership, we don't need to do it again. - canAcceptLeader := !opState.Leader - select { - // If the unit's shutting down, we shouldn't accept it. - case <-u.f.UnitDying(): - canAcceptLeader = false - default: - // If we're in an unexpected mode (eg pending hook) we shouldn't try either. - if opState.Kind != operation.Continue { - canAcceptLeader = false - } - } - - // NOTE: the Wait() looks scary, but a ClaimLeadership ticket should always - // complete quickly; worst-case is API latency time, but it's designed that - // it should be vanishingly rare to hit that code path. - isLeader := u.leadershipTracker.ClaimLeader().Wait() - var creator creator - switch { - case isLeader && canAcceptLeader: - creator = newAcceptLeadershipOp() - case opState.Leader && !isLeader: - creator = newResignLeadershipOp() - } - if creator != nil { - return continueAfter(u, creator) - } - logger.Infof("leadership status is up-to-date") - - switch opState.Kind { - case operation.RunAction: - // TODO(fwereade): we *should* handle interrupted actions, and make sure - // they're marked as failed, but that's not for now. - if opState.Hook != nil { - logger.Infof("found incomplete action %q; ignoring", opState.ActionId) - logger.Infof("recommitting prior %q hook", opState.Hook.Kind) - creator = newSkipHookOp(*opState.Hook) - } else { - logger.Infof("%q hook is nil", operation.RunAction) - } - case operation.RunHook: - switch opState.Step { - case operation.Pending: - logger.Infof("awaiting error resolution for %q hook", opState.Hook.Kind) - return ModeHookError, nil - case operation.Queued: - logger.Infof("found queued %q hook", opState.Hook.Kind) - // Ensure storage-attached hooks are run before install - // or upgrade hooks. - switch opState.Hook.Kind { - case hooks.UpgradeCharm: - // Force a refresh of all storage attachments, - // so we find out about new ones introduced - // by the charm upgrade. - if err := u.storage.Refresh(); err != nil { - return nil, errors.Trace(err) - } - fallthrough - case hooks.Install: - if err := waitStorage(u); err != nil { - return nil, errors.Trace(err) - } - } - creator = newRunHookOp(*opState.Hook) - case operation.Done: - logger.Infof("committing %q hook", opState.Hook.Kind) - creator = newSkipHookOp(*opState.Hook) - } - case operation.Continue: - if opState.Stopped { - logger.Infof("opState.Stopped == true; transition to ModeTerminating") - return ModeTerminating, nil - } - logger.Infof("no operations in progress; waiting for changes") - return ModeAbide, nil - default: - return nil, errors.Errorf("unknown operation kind %v", opState.Kind) - } - return continueAfter(u, creator) -} - -// ModeInstalling is responsible for the initial charm deployment. If an install -// operation were to set an appropriate status, it shouldn't be necessary; but see -// ModeUpgrading for discussion relevant to both. -func ModeInstalling(curl *charm.URL) (next Mode, err error) { - name := fmt.Sprintf("ModeInstalling %s", curl) - return func(u *Uniter) (next Mode, err error) { - defer modeContext(name, &err)() - return continueAfter(u, newInstallOp(curl)) - }, nil -} - -// ModeUpgrading is responsible for upgrading the charm. It shouldn't really -// need to be a mode at all -- it's just running a single operation -- but -// it's not safe to call it inside arbitrary other modes, because failing to -// pass through ModeContinue on the way out could cause a queued hook to be -// accidentally skipped. -func ModeUpgrading(curl *charm.URL) Mode { - name := fmt.Sprintf("ModeUpgrading %s", curl) - return func(u *Uniter) (next Mode, err error) { - defer modeContext(name, &err)() - return continueAfter(u, newUpgradeOp(curl)) - } -} - -// ModeTerminating marks the unit dead and returns ErrTerminateAgent. -func ModeTerminating(u *Uniter) (next Mode, err error) { - defer modeContext("ModeTerminating", &err)() - w, err := u.unit.Watch() - if err != nil { - return nil, errors.Trace(err) - } - defer watcher.Stop(w, &u.tomb) - - // Upon unit termination we attempt to send any leftover metrics one last time. If we fail, there is nothing - // else we can do but log the error. - sendErr := u.runOperation(newSendMetricsOp()) - if sendErr != nil { - logger.Warningf("failed to send metrics: %v", sendErr) - } - - for { - select { - case <-u.tomb.Dying(): - return nil, tomb.ErrDying - case creator := <-u.runCommands: - if err := u.runOperation(creator); err != nil { - return nil, errors.Trace(err) - } - case actionId := <-u.f.ActionEvents(): - creator := newActionOp(actionId) - if err := u.runOperation(creator); err != nil { - return nil, errors.Trace(err) - } - case _, ok := <-w.Changes(): - if !ok { - return nil, watcher.EnsureErr(w) - } - if err := u.unit.Refresh(); err != nil { - return nil, errors.Trace(err) - } - if hasSubs, err := u.unit.HasSubordinates(); err != nil { - return nil, errors.Trace(err) - } else if hasSubs { - continue - } - // The unit is known to be Dying; so if it didn't have subordinates - // just above, it can't acquire new ones before this call. - if err := u.unit.EnsureDead(); err != nil { - return nil, errors.Trace(err) - } - return nil, worker.ErrTerminateAgent - } - } -} - -// ModeAbide is the Uniter's usual steady state. It watches for and responds to: -// * service configuration changes -// * charm upgrade requests -// * relation changes -// * unit death -// * acquisition or loss of service leadership -func ModeAbide(u *Uniter) (next Mode, err error) { - defer modeContext("ModeAbide", &err)() - opState := u.operationState() - if opState.Kind != operation.Continue { - return nil, errors.Errorf("insane uniter state: %#v", opState) - } - if err := u.deployer.Fix(); err != nil { - return nil, errors.Trace(err) - } - - if !opState.Leader && !u.ranLeaderSettingsChanged { - creator := newSimpleRunHookOp(hook.LeaderSettingsChanged) - if err := u.runOperation(creator); err != nil { - return nil, errors.Trace(err) - } - } - - if !u.ranConfigChanged { - return continueAfter(u, newSimpleRunHookOp(hooks.ConfigChanged)) - } - if !opState.Started { - return continueAfter(u, newSimpleRunHookOp(hooks.Start)) - } - u.f.WantUpgradeEvent(false) - u.relations.StartHooks() - defer func() { - if e := u.relations.StopHooks(); e != nil { - if err == nil { - err = e - } else { - logger.Errorf("error while stopping hooks: %v", e) - } - } - }() - - select { - case <-u.f.UnitDying(): - return modeAbideDyingLoop(u) - default: - } - return modeAbideAliveLoop(u) -} - -// idleWaitTime is the time after which, if there are no uniter events, -// the agent state becomes idle. -var idleWaitTime = 2 * time.Second - -// modeAbideAliveLoop handles all state changes for ModeAbide when the unit -// is in an Alive state. -func modeAbideAliveLoop(u *Uniter) (Mode, error) { - var leaderElected, leaderDeposed <-chan struct{} - for { - // We expect one or none of these vars to be non-nil; and if none - // are, we set the one that should trigger when our leadership state - // differs from what we have recorded locally. - if leaderElected == nil && leaderDeposed == nil { - if u.operationState().Leader { - logger.Infof("waiting to lose leadership") - leaderDeposed = u.leadershipTracker.WaitMinion().Ready() - } else { - logger.Infof("waiting to gain leadership") - leaderElected = u.leadershipTracker.WaitLeader().Ready() - } - } - - // collect-metrics hook - lastCollectMetrics := time.Unix(u.operationState().CollectMetricsTime, 0) - collectMetricsSignal := u.collectMetricsAt( - time.Now(), lastCollectMetrics, metricsPollInterval, - ) - - lastSentMetrics := time.Unix(u.operationState().SendMetricsTime, 0) - sendMetricsSignal := u.sendMetricsAt( - time.Now(), lastSentMetrics, metricsSendInterval, - ) - - // update-status hook - lastUpdateStatus := time.Unix(u.operationState().UpdateStatusTime, 0) - updateStatusSignal := u.updateStatusAt( - time.Now(), lastUpdateStatus, statusPollInterval, - ) - - var creator creator - select { - case <-time.After(idleWaitTime): - if err := setAgentStatus(u, params.StatusIdle, "", nil); err != nil { - return nil, errors.Trace(err) - } - continue - case <-u.tomb.Dying(): - return nil, tomb.ErrDying - case <-u.f.UnitDying(): - return modeAbideDyingLoop(u) - case curl := <-u.f.UpgradeEvents(): - return ModeUpgrading(curl), nil - case ids := <-u.f.RelationsEvents(): - creator = newUpdateRelationsOp(ids) - case creator = <-u.runCommands: - case actionId := <-u.f.ActionEvents(): - creator = newActionOp(actionId) - case tags := <-u.f.StorageEvents(): - creator = newUpdateStorageOp(tags) - case <-u.f.ConfigEvents(): - creator = newSimpleRunHookOp(hooks.ConfigChanged) - case <-u.f.MeterStatusEvents(): - creator = newSimpleRunHookOp(hooks.MeterStatusChanged) - case <-collectMetricsSignal: - creator = newSimpleRunHookOp(hooks.CollectMetrics) - case <-sendMetricsSignal: - creator = newSendMetricsOp() - case <-updateStatusSignal: - creator = newSimpleRunHookOp(hooks.UpdateStatus) - case hookInfo := <-u.relations.Hooks(): - creator = newRunHookOp(hookInfo) - case hookInfo := <-u.storage.Hooks(): - creator = newRunHookOp(hookInfo) - case <-leaderElected: - // This operation queues a hook, better to let ModeContinue pick up - // after it than to duplicate queued-hook handling here. - return continueAfter(u, newAcceptLeadershipOp()) - case <-leaderDeposed: - leaderDeposed = nil - creator = newResignLeadershipOp() - case <-u.f.LeaderSettingsEvents(): - creator = newSimpleRunHookOp(hook.LeaderSettingsChanged) - } - if err := u.runOperation(creator); err != nil { - return nil, errors.Trace(err) - } - } -} - -// modeAbideDyingLoop handles the proper termination of all relations in -// response to a Dying unit. -func modeAbideDyingLoop(u *Uniter) (next Mode, err error) { - if err := u.unit.Refresh(); err != nil { - return nil, errors.Trace(err) - } - if err = u.unit.DestroyAllSubordinates(); err != nil { - return nil, errors.Trace(err) - } - if err := u.relations.SetDying(); err != nil { - return nil, errors.Trace(err) - } - if u.operationState().Leader { - if err := u.runOperation(newResignLeadershipOp()); err != nil { - return nil, errors.Trace(err) - } - // TODO(fwereade): we ought to inform the tracker that we're shutting down - // (and no longer wish to continue renewing our lease) so that the tracker - // can then report minionhood at all times, and thus prevent the is-leader - // and leader-set hook tools from acting in a correct but misleading way - // (ie continuing to act as though leader after leader-deposed has run). - } - if err := u.storage.SetDying(); err != nil { - return nil, errors.Trace(err) - } - for { - if len(u.relations.GetInfo()) == 0 && u.storage.Empty() { - return continueAfter(u, newSimpleRunHookOp(hooks.Stop)) - } - var creator creator - select { - case <-u.tomb.Dying(): - return nil, tomb.ErrDying - case creator = <-u.runCommands: - case actionId := <-u.f.ActionEvents(): - creator = newActionOp(actionId) - case <-u.f.ConfigEvents(): - creator = newSimpleRunHookOp(hooks.ConfigChanged) - case <-u.f.LeaderSettingsEvents(): - creator = newSimpleRunHookOp(hook.LeaderSettingsChanged) - case hookInfo := <-u.relations.Hooks(): - creator = newRunHookOp(hookInfo) - case hookInfo := <-u.storage.Hooks(): - creator = newRunHookOp(hookInfo) - } - if err := u.runOperation(creator); err != nil { - return nil, errors.Trace(err) - } - } -} - -// waitStorage waits until all storage attachments are provisioned -// and their hooks processed. -func waitStorage(u *Uniter) error { - if u.storage.Pending() == 0 { - return nil - } - logger.Infof("waiting for storage attachments") - for u.storage.Pending() > 0 { - var creator creator - select { - case <-u.tomb.Dying(): - return tomb.ErrDying - case <-u.f.UnitDying(): - // Unit is shutting down; no need to handle any - // more storage-attached hooks. We will process - // required storage-detaching hooks in ModeAbideDying. - return nil - case tags := <-u.f.StorageEvents(): - creator = newUpdateStorageOp(tags) - case hookInfo := <-u.storage.Hooks(): - creator = newRunHookOp(hookInfo) - } - if err := u.runOperation(creator); err != nil { - return errors.Trace(err) - } - } - logger.Infof("storage attachments ready") - return nil -} - -// ModeHookError is responsible for watching and responding to: -// * user resolution of hook errors -// * forced charm upgrade requests -// * loss of service leadership -func ModeHookError(u *Uniter) (next Mode, err error) { - defer modeContext("ModeHookError", &err)() - opState := u.operationState() - if opState.Kind != operation.RunHook || opState.Step != operation.Pending { - return nil, errors.Errorf("insane uniter state: %#v", u.operationState()) - } - - // Create error information for status. - hookInfo := *opState.Hook - hookName := string(hookInfo.Kind) - statusData := map[string]interface{}{} - if hookInfo.Kind.IsRelation() { - statusData["relation-id"] = hookInfo.RelationId - if hookInfo.RemoteUnit != "" { - statusData["remote-unit"] = hookInfo.RemoteUnit - } - relationName, err := u.relations.Name(hookInfo.RelationId) - if err != nil { - return nil, errors.Trace(err) - } - hookName = fmt.Sprintf("%s-%s", relationName, hookInfo.Kind) - } - statusData["hook"] = hookName - statusMessage := fmt.Sprintf("hook failed: %q", hookName) - - // Run the select loop. - u.f.WantResolvedEvent() - u.f.WantUpgradeEvent(true) - var leaderDeposed <-chan struct{} - if opState.Leader { - leaderDeposed = u.leadershipTracker.WaitMinion().Ready() - } - for { - // The spec says we should set the workload status to Error, but that's crazy talk. - // It's the agent itself that should be in Error state. So we'll ensure the model is - // correct and translate before the user sees the data. - // ie a charm hook error results in agent error status, but is presented as a workload error. - if err = setAgentStatus(u, params.StatusError, statusMessage, statusData); err != nil { - return nil, errors.Trace(err) - } - select { - case <-u.tomb.Dying(): - return nil, tomb.ErrDying - case curl := <-u.f.UpgradeEvents(): - return ModeUpgrading(curl), nil - case rm := <-u.f.ResolvedEvents(): - var creator creator - switch rm { - case params.ResolvedRetryHooks: - creator = newRetryHookOp(hookInfo) - case params.ResolvedNoHooks: - creator = newSkipHookOp(hookInfo) - default: - return nil, errors.Errorf("unknown resolved mode %q", rm) - } - err := u.runOperation(creator) - if errors.Cause(err) == operation.ErrHookFailed { - continue - } else if err != nil { - return nil, errors.Trace(err) - } - return ModeContinue, nil - case creator := <-u.runCommands: - if err := u.runOperation(creator); err != nil { - return nil, errors.Trace(err) - } - case actionId := <-u.f.ActionEvents(): - if err := u.runOperation(newActionOp(actionId)); err != nil { - return nil, errors.Trace(err) - } - case <-leaderDeposed: - // This should trigger at most once -- we can't reaccept leadership while - // in an error state. - leaderDeposed = nil - if err := u.runOperation(newResignLeadershipOp()); err != nil { - return nil, errors.Trace(err) - } - } - } -} - -// ModeConflicted is responsible for watching and responding to: -// * user resolution of charm upgrade conflicts -// * forced charm upgrade requests -func ModeConflicted(curl *charm.URL) Mode { - return func(u *Uniter) (next Mode, err error) { - defer modeContext("ModeConflicted", &err)() - // TODO(mue) Add helpful data here too in later CL. - // The spec says we should set the workload status to Error, but that's crazy talk. - // It's the agent itself that should be in Error state. So we'll ensure the model is - // correct and translate before the user sees the data. - // ie a charm upgrade error results in agent error status, but is presented as a workload error. - if err := setAgentStatus(u, params.StatusError, "upgrade failed", nil); err != nil { - return nil, errors.Trace(err) - } - u.f.WantResolvedEvent() - u.f.WantUpgradeEvent(true) - var creator creator - select { - case <-u.tomb.Dying(): - return nil, tomb.ErrDying - case curl = <-u.f.UpgradeEvents(): - creator = newRevertUpgradeOp(curl) - case <-u.f.ResolvedEvents(): - creator = newResolvedUpgradeOp(curl) - } - return continueAfter(u, creator) - } -} - -// modeContext returns a function that implements logging and common error -// manipulation for Mode funcs. -func modeContext(name string, err *error) func() { - logger.Infof("%s starting", name) - return func() { - logger.Infof("%s exiting", name) - *err = errors.Annotatef(*err, name) - } -} - -// continueAfter is commonly used at the end of a Mode func to execute the -// operation returned by creator and return ModeContinue (or any error). -func continueAfter(u *Uniter, creator creator) (Mode, error) { - if err := u.runOperation(creator); err != nil { - return nil, errors.Trace(err) - } - return ModeContinue, nil -} === modified file 'src/github.com/juju/juju/worker/uniter/op_callbacks.go' --- src/github.com/juju/juju/worker/uniter/op_callbacks.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/op_callbacks.go 2016-03-22 15:18:22 +0000 @@ -8,8 +8,8 @@ "github.com/juju/errors" "github.com/juju/names" - corecharm "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/hooks" + corecharm "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/worker/uniter/charm" @@ -45,9 +45,11 @@ // TODO(axw) if the agent is not installed yet, // set the status to "preparing storage". case hi.Kind == hooks.ConfigChanged: - opc.u.f.DiscardConfigEvent() + // TODO(axw) + //opc.u.f.DiscardConfigEvent() case hi.Kind == hook.LeaderSettingsChanged: - opc.u.f.DiscardLeaderSettingsEvent() + // TODO(axw) + //opc.u.f.DiscardLeaderSettingsEvent() } return name, nil } @@ -59,21 +61,12 @@ return opc.u.relations.CommitHook(hi) case hi.Kind.IsStorage(): return opc.u.storage.CommitHook(hi) - case hi.Kind == hooks.ConfigChanged: - opc.u.ranConfigChanged = true - case hi.Kind == hook.LeaderSettingsChanged: - opc.u.ranLeaderSettingsChanged = true } return nil } -// UpdateRelations is part of the operation.Callbacks interface. -func (opc *operationCallbacks) UpdateRelations(ids []int) error { - return opc.u.relations.Update(ids) -} - func notifyHook(hook string, ctx runner.Context, method func(string)) { - if r, ok := ctx.HookRelation(); ok { + if r, err := ctx.HookRelation(); err == nil { remote, _ := ctx.RemoteUnitName() if remote != "" { remote = " " + remote @@ -121,17 +114,7 @@ // SetCurrentCharm is part of the operation.Callbacks interface. func (opc *operationCallbacks) SetCurrentCharm(charmURL *corecharm.URL) error { - return opc.u.f.SetCharm(charmURL) -} - -// ClearResolvedFlag is part of the operation.Callbacks interface. -func (opc *operationCallbacks) ClearResolvedFlag() error { - return opc.u.f.ClearResolved() -} - -// InitializeMetricsTimers is part of the operation.Callbacks interface. -func (opc *operationCallbacks) InitializeMetricsTimers() error { - return opc.u.initializeMetricsTimers() + return opc.u.unit.SetCharmURL(charmURL) } // SetExecutingStatus is part of the operation.Callbacks interface. === removed file 'src/github.com/juju/juju/worker/uniter/op_plumbing.go' --- src/github.com/juju/juju/worker/uniter/op_plumbing.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/op_plumbing.go 1970-01-01 00:00:00 +0000 @@ -1,113 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package uniter - -import ( - "github.com/juju/names" - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/hooks" - - "github.com/juju/juju/worker/uniter/hook" - "github.com/juju/juju/worker/uniter/operation" -) - -// creator exists primarily to make the implementation of the Mode funcs more -// readable -- the general pattern is to switch to get a creator func (which -// doesn't allow for the possibility of error) and then to pass the chosen -// creator down to runOperation (which can then consistently create and run -// all the operations in the same way). -type creator func(factory operation.Factory) (operation.Operation, error) - -// The following creator functions are all just dumb plumbing to support the -// Mode funcs. - -func newInstallOp(charmURL *charm.URL) creator { - return func(factory operation.Factory) (operation.Operation, error) { - return factory.NewInstall(charmURL) - } -} - -func newUpgradeOp(charmURL *charm.URL) creator { - return func(factory operation.Factory) (operation.Operation, error) { - return factory.NewUpgrade(charmURL) - } -} - -func newRevertUpgradeOp(charmURL *charm.URL) creator { - return func(factory operation.Factory) (operation.Operation, error) { - return factory.NewRevertUpgrade(charmURL) - } -} - -func newResolvedUpgradeOp(charmURL *charm.URL) creator { - return func(factory operation.Factory) (operation.Operation, error) { - return factory.NewResolvedUpgrade(charmURL) - } -} - -func newSimpleRunHookOp(kind hooks.Kind) creator { - return func(factory operation.Factory) (operation.Operation, error) { - return factory.NewRunHook(hook.Info{Kind: kind}) - } -} - -func newRunHookOp(hookInfo hook.Info) creator { - return func(factory operation.Factory) (operation.Operation, error) { - return factory.NewRunHook(hookInfo) - } -} - -func newRetryHookOp(hookInfo hook.Info) creator { - return func(factory operation.Factory) (operation.Operation, error) { - return factory.NewRetryHook(hookInfo) - } -} - -func newSkipHookOp(hookInfo hook.Info) creator { - return func(factory operation.Factory) (operation.Operation, error) { - return factory.NewSkipHook(hookInfo) - } -} - -func newCommandsOp(args operation.CommandArgs, sendResponse operation.CommandResponseFunc) creator { - return func(factory operation.Factory) (operation.Operation, error) { - return factory.NewCommands(args, sendResponse) - } -} - -func newActionOp(actionId string) creator { - return func(factory operation.Factory) (operation.Operation, error) { - return factory.NewAction(actionId) - } -} - -func newUpdateRelationsOp(ids []int) creator { - return func(factory operation.Factory) (operation.Operation, error) { - return factory.NewUpdateRelations(ids) - } -} - -func newUpdateStorageOp(tags []names.StorageTag) creator { - return func(factory operation.Factory) (operation.Operation, error) { - return factory.NewUpdateStorage(tags) - } -} - -func newAcceptLeadershipOp() creator { - return func(factory operation.Factory) (operation.Operation, error) { - return factory.NewAcceptLeadership() - } -} - -func newResignLeadershipOp() creator { - return func(factory operation.Factory) (operation.Operation, error) { - return factory.NewResignLeadership() - } -} - -func newSendMetricsOp() creator { - return func(factory operation.Factory) (operation.Operation, error) { - return factory.NewSendMetrics() - } -} === modified file 'src/github.com/juju/juju/worker/uniter/operation/deploy.go' --- src/github.com/juju/juju/worker/uniter/operation/deploy.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/deploy.go 2016-03-22 15:18:22 +0000 @@ -7,8 +7,8 @@ "fmt" "github.com/juju/errors" - corecharm "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/hooks" + corecharm "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/worker/uniter/charm" "github.com/juju/juju/worker/uniter/hook" @@ -43,7 +43,7 @@ return fmt.Sprintf("%s%s %s", prefix, verb, d.charmURL) } -// Prepare downloads and verifies the charm, and informs the state server +// Prepare downloads and verifies the charm, and informs the controller // that the unit will be using it. If the supplied state indicates that a // hook was pending, that hook is recorded in the returned state. // Prepare is part of the Operation interface. @@ -71,10 +71,10 @@ // note: yes, this *should* be in Prepare, not Execute. Before we can safely // write out local state referencing the charm url (by returning the new // State to the Executor, below), we have to register our interest in that - // charm on the state server. If we neglected to do so, the operation could - // race with a new service-charm-url change on the state server, and lead to + // charm on the controller. If we neglected to do so, the operation could + // race with a new service-charm-url change on the controller, and lead to // failures on resume in which we try to obtain archive info for a charm that - // has already been removed from the state server. + // has already been removed from the controller. if err := d.callbacks.SetCurrentCharm(d.charmURL); err != nil { return nil, errors.Trace(err) } @@ -96,9 +96,6 @@ // Commit restores state for any interrupted hook, or queues an install or // upgrade-charm hook if no hook was interrupted. func (d *deploy) Commit(state State) (*State, error) { - if err := d.callbacks.InitializeMetricsTimers(); err != nil { - return nil, errors.Trace(err) - } change := &stateChange{ Kind: RunHook, } === modified file 'src/github.com/juju/juju/worker/uniter/operation/deploy_test.go' --- src/github.com/juju/juju/worker/uniter/operation/deploy_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/deploy_test.go 2016-03-22 15:18:22 +0000 @@ -8,8 +8,8 @@ "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - corecharm "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/hooks" + corecharm "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/worker/uniter/charm" "github.com/juju/juju/worker/uniter/hook" @@ -25,11 +25,9 @@ type newDeploy func(operation.Factory, *corecharm.URL) (operation.Operation, error) func (s *DeploySuite) testPrepareAlreadyDone( - c *gc.C, newDeploy newDeploy, kind operation.Kind, expectClearResolvedFlag bool, + c *gc.C, newDeploy newDeploy, kind operation.Kind, ) { - callbacks := &DeployCallbacks{ - MockClearResolvedFlag: &MockNoArgs{}, - } + callbacks := &DeployCallbacks{} factory := operation.NewFactory(operation.FactoryParams{Callbacks: callbacks}) op, err := newDeploy(factory, curl("cs:quantal/hive-23")) c.Assert(err, jc.ErrorIsNil) @@ -40,14 +38,12 @@ }) c.Check(newState, gc.IsNil) c.Check(errors.Cause(err), gc.Equals, operation.ErrSkipExecute) - c.Check(callbacks.MockClearResolvedFlag.called, gc.Equals, expectClearResolvedFlag) } func (s *DeploySuite) TestPrepareAlreadyDone_Install(c *gc.C) { s.testPrepareAlreadyDone(c, (operation.Factory).NewInstall, operation.Install, - false, ) } @@ -55,7 +51,6 @@ s.testPrepareAlreadyDone(c, (operation.Factory).NewUpgrade, operation.Upgrade, - false, ) } @@ -63,7 +58,6 @@ s.testPrepareAlreadyDone(c, (operation.Factory).NewRevertUpgrade, operation.Upgrade, - true, ) } @@ -71,37 +65,13 @@ s.testPrepareAlreadyDone(c, (operation.Factory).NewResolvedUpgrade, operation.Upgrade, - true, ) } -func (s *DeploySuite) testClearResolvedFlagError(c *gc.C, newDeploy newDeploy) { - callbacks := &DeployCallbacks{ - MockClearResolvedFlag: &MockNoArgs{err: errors.New("blort")}, - } - factory := operation.NewFactory(operation.FactoryParams{Callbacks: callbacks}) - op, err := newDeploy(factory, curl("cs:quantal/hive-23")) - c.Assert(err, jc.ErrorIsNil) - newState, err := op.Prepare(operation.State{}) - c.Check(newState, gc.IsNil) - c.Check(err, gc.ErrorMatches, "blort") - c.Check(callbacks.MockClearResolvedFlag.called, jc.IsTrue) -} - -func (s *DeploySuite) TestClearResolvedFlagError_RevertUpgrade(c *gc.C) { - s.testClearResolvedFlagError(c, (operation.Factory).NewRevertUpgrade) -} - -func (s *DeploySuite) TestClearResolvedFlagError_ResolvedUpgrade(c *gc.C) { - s.testClearResolvedFlagError(c, (operation.Factory).NewResolvedUpgrade) -} - func (s *DeploySuite) testNotifyDeployerError( c *gc.C, newDeploy newDeploy, expectNotifyRevert bool, ) { - callbacks := &DeployCallbacks{ - MockClearResolvedFlag: &MockNoArgs{}, - } + callbacks := &DeployCallbacks{} deployer := &MockDeployer{} expectCall := &MockNoArgs{err: errors.New("snh")} if expectNotifyRevert { @@ -132,8 +102,7 @@ func (s *DeploySuite) testPrepareArchiveInfoError(c *gc.C, newDeploy newDeploy) { callbacks := &DeployCallbacks{ - MockClearResolvedFlag: &MockNoArgs{}, - MockGetArchiveInfo: &MockGetArchiveInfo{err: errors.New("pew")}, + MockGetArchiveInfo: &MockGetArchiveInfo{err: errors.New("pew")}, } deployer := &MockDeployer{ MockNotifyRevert: &MockNoArgs{}, @@ -170,8 +139,7 @@ func (s *DeploySuite) testPrepareStageError(c *gc.C, newDeploy newDeploy) { callbacks := &DeployCallbacks{ - MockClearResolvedFlag: &MockNoArgs{}, - MockGetArchiveInfo: &MockGetArchiveInfo{info: &MockBundleInfo{}}, + MockGetArchiveInfo: &MockGetArchiveInfo{info: &MockBundleInfo{}}, } deployer := &MockDeployer{ MockNotifyRevert: &MockNoArgs{}, @@ -212,9 +180,8 @@ func (s *DeploySuite) testPrepareSetCharmError(c *gc.C, newDeploy newDeploy) { callbacks := &DeployCallbacks{ - MockClearResolvedFlag: &MockNoArgs{}, - MockGetArchiveInfo: &MockGetArchiveInfo{}, - MockSetCurrentCharm: &MockSetCurrentCharm{err: errors.New("blargh")}, + MockGetArchiveInfo: &MockGetArchiveInfo{}, + MockSetCurrentCharm: &MockSetCurrentCharm{err: errors.New("blargh")}, } deployer := &MockDeployer{ MockNotifyRevert: &MockNoArgs{}, @@ -359,12 +326,10 @@ newDeploy, overwriteState, operation.State{ - Kind: operation.Upgrade, - Step: operation.Pending, - CharmURL: curl("cs:quantal/nyancat-4"), - Started: true, - CollectMetricsTime: 1234567, - UpdateStatusTime: 1234567, + Kind: operation.Upgrade, + Step: operation.Pending, + CharmURL: curl("cs:quantal/nyancat-4"), + Started: true, }, ) } @@ -391,9 +356,8 @@ newState, err := op.Execute(operation.State{}) c.Check(newState, gc.IsNil) c.Check(err, gc.ErrorMatches, "cannot deploy charm cs:quantal/nyancat-4") - errURL, ok := operation.DeployConflictCharmURL(err) + ok := operation.IsDeployConflictError(err) c.Check(ok, jc.IsTrue) - c.Check(errURL, gc.DeepEquals, charmURL) c.Check(deployer.MockDeploy.called, jc.IsTrue) } @@ -562,43 +526,15 @@ newDeploy, overwriteState, operation.State{ - Kind: operation.Upgrade, - Step: operation.Done, - CharmURL: curl("cs:quantal/lol-1"), - Started: true, - CollectMetricsTime: 1234567, - UpdateStatusTime: 1234567, + Kind: operation.Upgrade, + Step: operation.Done, + CharmURL: curl("cs:quantal/lol-1"), + Started: true, }, ) } } -func (s *DeploySuite) testCommitMetricsError(c *gc.C, newDeploy newDeploy) { - callbacks := NewDeployCommitCallbacks(errors.New("glukh")) - factory := operation.NewFactory(operation.FactoryParams{Callbacks: callbacks}) - op, err := newDeploy(factory, curl("cs:quantal/x-0")) - c.Assert(err, jc.ErrorIsNil) - newState, err := op.Commit(operation.State{}) - c.Check(err, gc.ErrorMatches, "glukh") - c.Check(newState, gc.IsNil) -} - -func (s *DeploySuite) TestCommitMetricsError_Install(c *gc.C) { - s.testCommitMetricsError(c, (operation.Factory).NewInstall) -} - -func (s *DeploySuite) TestCommitMetricsError_Upgrade(c *gc.C) { - s.testCommitMetricsError(c, (operation.Factory).NewUpgrade) -} - -func (s *DeploySuite) TestCommitMetricsError_RevertUpgrade(c *gc.C) { - s.testCommitMetricsError(c, (operation.Factory).NewRevertUpgrade) -} - -func (s *DeploySuite) TestCommitMetricsError_ResolvedUpgrade(c *gc.C) { - s.testCommitMetricsError(c, (operation.Factory).NewResolvedUpgrade) -} - func (s *DeploySuite) TestCommitQueueInstallHook(c *gc.C) { callbacks := NewDeployCommitCallbacks(nil) factory := operation.NewFactory(operation.FactoryParams{Callbacks: callbacks}) === modified file 'src/github.com/juju/juju/worker/uniter/operation/errors.go' --- src/github.com/juju/juju/worker/uniter/operation/errors.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/errors.go 2016-03-22 15:18:22 +0000 @@ -7,7 +7,7 @@ "fmt" "github.com/juju/errors" - corecharm "gopkg.in/juju/charm.v5" + corecharm "gopkg.in/juju/charm.v6-unstable" ) var ( @@ -32,11 +32,9 @@ return &deployConflictError{charmURL} } -// DeployConflictCharmURL returns the charm URL used to create the supplied -// deploy conflict error, and a bool indicating success. -func DeployConflictCharmURL(err error) (*corecharm.URL, bool) { - if e, ok := err.(*deployConflictError); ok { - return e.charmURL, true - } - return nil, false +// IsDeployConflictError returns true if the error is a +// deploy conflict error. +func IsDeployConflictError(err error) bool { + _, ok := err.(*deployConflictError) + return ok } === modified file 'src/github.com/juju/juju/worker/uniter/operation/executor.go' --- src/github.com/juju/juju/worker/uniter/operation/executor.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/executor.go 2016-03-22 15:18:22 +0000 @@ -7,7 +7,7 @@ "fmt" "github.com/juju/errors" - corecharm "gopkg.in/juju/charm.v5" + corecharm "gopkg.in/juju/charm.v6-unstable" ) type executorStep struct { @@ -65,7 +65,7 @@ // Run is part of the Executor interface. func (x *executor) Run(op Operation) (runErr error) { - logger.Infof("running operation %v", op) + logger.Debugf("running operation %v", op) if op.NeedsGlobalMachineLock() { unlock, err := x.acquireMachineLock(fmt.Sprintf("executing operation: %s", op.String())) @@ -99,13 +99,13 @@ // Skip is part of the Executor interface. func (x *executor) Skip(op Operation) error { - logger.Infof("skipping operation %v", op) + logger.Debugf("skipping operation %v", op) return x.do(op, stepCommit) } func (x *executor) do(op Operation, step executorStep) (err error) { message := step.message(op) - logger.Infof(message) + logger.Debugf(message) newState, firstErr := step.run(op, *x.state) if newState != nil { writeErr := x.writeState(*newState) === modified file 'src/github.com/juju/juju/worker/uniter/operation/executor_test.go' --- src/github.com/juju/juju/worker/uniter/operation/executor_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/executor_test.go 2016-03-22 15:18:22 +0000 @@ -11,8 +11,8 @@ jc "github.com/juju/testing/checkers" ft "github.com/juju/testing/filetesting" gc "gopkg.in/check.v1" - corecharm "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/hooks" + corecharm "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/worker/uniter/hook" "github.com/juju/juju/worker/uniter/operation" @@ -503,8 +503,7 @@ func (mock *mockLockFunc) newFailingLock() func(string) (func() error, error) { return func(string) (func() error, error) { mock.noStepsCalledOnLock = mock.op.prepare.called == false && - mock.op.commit.called == false && - mock.op.prepare.called == false + mock.op.commit.called == false return nil, errors.New("wat") } @@ -515,8 +514,7 @@ mock.calledLock = true // Ensure that when we lock no operation has been called mock.noStepsCalledOnLock = mock.op.prepare.called == false && - mock.op.commit.called == false && - mock.op.prepare.called == false + mock.op.commit.called == false return func() error { // Record steps called when unlocking mock.stepsCalledOnUnlock = []bool{mock.op.prepare.called, === modified file 'src/github.com/juju/juju/worker/uniter/operation/factory.go' --- src/github.com/juju/juju/worker/uniter/operation/factory.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/factory.go 2016-03-22 15:18:22 +0000 @@ -6,7 +6,7 @@ import ( "github.com/juju/errors" "github.com/juju/names" - corecharm "gopkg.in/juju/charm.v5" + corecharm "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/worker/uniter/charm" "github.com/juju/juju/worker/uniter/hook" @@ -20,7 +20,6 @@ Callbacks Callbacks StorageUpdater StorageUpdater Abort <-chan struct{} - MetricSender apiMetricSender MetricSpoolDir string } @@ -36,18 +35,6 @@ config FactoryParams } -// newResolved wraps the supplied operation such that it will clear the uniter -// resolve flag before executing. -func (f *factory) newResolved(wrapped Operation) (Operation, error) { - if wrapped == nil { - return nil, errors.New("operation required") - } - return &resolvedOperation{ - Operation: wrapped, - callbacks: f.config.Callbacks, - }, nil -} - // newDeploy is the common code for creating arbitrary deploy operations. func (f *factory) newDeploy(kind Kind, charmURL *corecharm.URL, revert, resolved bool) (Operation, error) { if charmURL == nil { @@ -78,20 +65,12 @@ // NewRevertUpgrade is part of the Factory interface. func (f *factory) NewRevertUpgrade(charmURL *corecharm.URL) (Operation, error) { - charmOp, err := f.newDeploy(Upgrade, charmURL, true, false) - if err != nil { - return nil, err - } - return f.newResolved(charmOp) + return f.newDeploy(Upgrade, charmURL, true, false) } // NewResolvedUpgrade is part of the Factory interface. func (f *factory) NewResolvedUpgrade(charmURL *corecharm.URL) (Operation, error) { - charmOp, err := f.newDeploy(Upgrade, charmURL, false, true) - if err != nil { - return nil, err - } - return f.newResolved(charmOp) + return f.newDeploy(Upgrade, charmURL, false, true) } // NewRunHook is part of the Factory interface. @@ -106,22 +85,13 @@ }, nil } -// NewRetryHook is part of the Factory interface. -func (f *factory) NewRetryHook(hookInfo hook.Info) (Operation, error) { - hookOp, err := f.NewRunHook(hookInfo) - if err != nil { - return nil, err - } - return f.newResolved(hookOp) -} - // NewSkipHook is part of the Factory interface. func (f *factory) NewSkipHook(hookInfo hook.Info) (Operation, error) { hookOp, err := f.NewRunHook(hookInfo) if err != nil { return nil, err } - return f.newResolved(&skipOperation{hookOp}) + return &skipOperation{hookOp}, nil } // NewAction is part of the Factory interface. @@ -158,14 +128,6 @@ }, nil } -// NewUpdateRelations is part of the Factory interface. -func (f *factory) NewUpdateRelations(ids []int) (Operation, error) { - return &updateRelations{ - ids: ids, - callbacks: f.config.Callbacks, - }, nil -} - // NewUpdateStorage is part of the Factory interface. func (f *factory) NewUpdateStorage(tags []names.StorageTag) (Operation, error) { return &updateStorage{ @@ -183,11 +145,3 @@ func (f *factory) NewAcceptLeadership() (Operation, error) { return &acceptLeadership{}, nil } - -// NewSendMetrics is part of the Factory interface. -func (f *factory) NewSendMetrics() (Operation, error) { - return &sendMetrics{ - sender: f.config.MetricSender, - spoolDir: f.config.MetricSpoolDir, - }, nil -} === modified file 'src/github.com/juju/juju/worker/uniter/operation/factory_test.go' --- src/github.com/juju/juju/worker/uniter/operation/factory_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/factory_test.go 2016-03-22 15:18:22 +0000 @@ -8,8 +8,8 @@ jc "github.com/juju/testing/checkers" utilexec "github.com/juju/utils/exec" gc "gopkg.in/check.v1" - corecharm "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/hooks" + corecharm "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/worker/uniter/hook" "github.com/juju/juju/worker/uniter/operation" @@ -70,14 +70,14 @@ func (s *FactorySuite) TestNewRevertUpgradeString(c *gc.C) { s.testNewDeployString(c, (operation.Factory).NewRevertUpgrade, - "clear resolved flag and switch upgrade to", + "switch upgrade to", ) } func (s *FactorySuite) TestNewResolvedUpgradeString(c *gc.C) { s.testNewDeployString(c, (operation.Factory).NewResolvedUpgrade, - "clear resolved flag and continue upgrade to", + "continue upgrade to", ) } @@ -179,10 +179,6 @@ s.testNewHookError(c, (operation.Factory).NewRunHook) } -func (s *FactorySuite) TestNewHookError_Retry(c *gc.C) { - s.testNewHookError(c, (operation.Factory).NewRetryHook) -} - func (s *FactorySuite) TestNewHookError_Skip(c *gc.C) { s.testNewHookError(c, (operation.Factory).NewSkipHook) } @@ -193,15 +189,6 @@ c.Check(op.String(), gc.Equals, "run install hook") } -func (s *FactorySuite) TestNewHookString_Retry(c *gc.C) { - op, err := s.factory.NewRetryHook(hook.Info{ - Kind: hooks.RelationBroken, - RelationId: 123, - }) - c.Check(err, jc.ErrorIsNil) - c.Check(op.String(), gc.Equals, "clear resolved flag and run relation-broken (123) hook") -} - func (s *FactorySuite) TestNewHookString_Skip(c *gc.C) { op, err := s.factory.NewSkipHook(hook.Info{ Kind: hooks.RelationJoined, @@ -209,13 +196,7 @@ RelationId: 123, }) c.Check(err, jc.ErrorIsNil) - c.Check(op.String(), gc.Equals, "clear resolved flag and skip run relation-joined (123; foo/22) hook") -} - -func (s *FactorySuite) TestNewUpdateRelationsString(c *gc.C) { - op, err := s.factory.NewUpdateRelations([]int{1, 2, 3}) - c.Check(err, jc.ErrorIsNil) - c.Check(op.String(), gc.Equals, "update relations [1 2 3]") + c.Check(op.String(), gc.Equals, "skip run relation-joined (123; foo/22) hook") } func (s *FactorySuite) TestNewAcceptLeadershipString(c *gc.C) { === modified file 'src/github.com/juju/juju/worker/uniter/operation/interface.go' --- src/github.com/juju/juju/worker/uniter/operation/interface.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/interface.go 2016-03-22 15:18:22 +0000 @@ -7,7 +7,7 @@ "github.com/juju/loggo" "github.com/juju/names" utilexec "github.com/juju/utils/exec" - corecharm "gopkg.in/juju/charm.v5" + corecharm "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/worker/uniter/charm" "github.com/juju/juju/worker/uniter/hook" @@ -83,12 +83,8 @@ // NewRunHook creates an operation to execute the supplied hook. NewRunHook(hookInfo hook.Info) (Operation, error) - // NewRetryHook creates an operation to clear the unit's resolved flag, and - // re-execute the supplied hook. - NewRetryHook(hookInfo hook.Info) (Operation, error) - - // NewSkipHook creates an operation to clear the unit's resolved flag, and - // mark the supplied hook as completed successfully. + // NewSkipHook creates an operation to mark the supplied hook as + // completed successfully, without executing the hook. NewSkipHook(hookInfo hook.Info) (Operation, error) // NewAction creates an operation to execute the supplied action. @@ -99,10 +95,6 @@ // func. NewCommands(args CommandArgs, sendResponse CommandResponseFunc) (Operation, error) - // NewUpdateRelations creates an operation to ensure the supplied relation - // ids are known and tracked. - NewUpdateRelations(ids []int) (Operation, error) - // NewUpdateStorage creates an operation to ensure the supplied storage // tags are known and tracked. NewUpdateStorage(tags []names.StorageTag) (Operation, error) @@ -114,10 +106,6 @@ // NewResignLeadership creates an operation to ensure the uniter does not // act as service leader. NewResignLeadership() (Operation, error) - - // NewSendMetrics creates an operation that sends all metrics collected - // by the unit. - NewSendMetrics() (Operation, error) } // CommandArgs stores the arguments for a Command operation. @@ -149,19 +137,11 @@ // SetExecutingStatus sets the agent state to "Executing" with a message. SetExecutingStatus(string) error - // UpdateRelations exists so that we can encapsulate it in an operation. - UpdateRelations(ids []int) error - // NotifyHook* exist so that we can defer worrying about how to untangle the // callbacks inserted for uniter_test. They're only used by RunHook operations. NotifyHookCompleted(string, runner.Context) NotifyHookFailed(string, runner.Context) - // InitializeMetricsTimers ensures that the collect-metrics hook timer is - // up to date given the current deployed charm. It's only used in deploy - // operations. - InitializeMetricsTimers() error - // The following methods exist primarily to allow us to test operation code // without using a live api connection. @@ -175,14 +155,9 @@ // SetCurrentCharm records intent to deploy a given charm. It must be called // *before* recording local state referencing that charm, to ensure there's - // no path by which the state server can legitimately garbage collect that + // no path by which the controller can legitimately garbage collect that // charm or the service's settings for it. It's only used by Deploy operations. SetCurrentCharm(charmURL *corecharm.URL) error - - // ClearResolvedFlag notifies the state server that the uniter has accepted - // the resolved attempt and is trying to progress. It's only used by Resolved - // operations (which we generally expect to wrap other operations). - ClearResolvedFlag() error } // StorageUpdater is an interface used for updating local knowledge of storage === modified file 'src/github.com/juju/juju/worker/uniter/operation/leader_test.go' --- src/github.com/juju/juju/worker/uniter/operation/leader_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/leader_test.go 2016-03-22 15:18:22 +0000 @@ -7,7 +7,7 @@ "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/worker/uniter/hook" "github.com/juju/juju/worker/uniter/operation" @@ -83,19 +83,17 @@ c.Check(err, gc.Equals, operation.ErrSkipExecute) newState, err := op.Commit(operation.State{ - Kind: operation.Continue, - Started: true, - CollectMetricsTime: 1234567, - Hook: &hook.Info{Kind: hooks.Install}, + Kind: operation.Continue, + Started: true, + Hook: &hook.Info{Kind: hooks.Install}, }) c.Check(err, jc.ErrorIsNil) c.Check(newState, gc.DeepEquals, &operation.State{ - Kind: operation.RunHook, - Step: operation.Queued, - Hook: &hook.Info{Kind: hook.LeaderElected}, - Leader: true, - Started: true, - CollectMetricsTime: 1234567, + Kind: operation.RunHook, + Step: operation.Queued, + Hook: &hook.Info{Kind: hook.LeaderElected}, + Leader: true, + Started: true, }) } === removed file 'src/github.com/juju/juju/worker/uniter/operation/metrics.go' --- src/github.com/juju/juju/worker/uniter/operation/metrics.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/metrics.go 1970-01-01 00:00:00 +0000 @@ -1,89 +0,0 @@ -// Copyright 2014-2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package operation - -import ( - "fmt" - "time" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/worker/uniter/metrics" -) - -// apiMetricSender is used to send metrics to the state server. Its default implementation is -// *uniter.Unit. -type apiMetricSender interface { - AddMetricBatches(batches []params.MetricBatch) (map[string]error, error) -} - -// metricsReader is used to read metrics batches stored by the metrics recorder -// and remove metrics batches that have been marked as succesfully sent. -type metricsReader interface { - Read() ([]metrics.MetricBatch, error) - Remove(uuid string) error - Close() error -} - -type sendMetrics struct { - DoesNotRequireMachineLock - spoolDir string - sender apiMetricSender -} - -// String implements the Operation interface. -func (op *sendMetrics) String() string { - return fmt.Sprintf("sending metrics") -} - -// Prepare implements the Operation interface. -func (op *sendMetrics) Prepare(state State) (*State, error) { - return &state, nil -} - -// Execute implements the Operation interface. -// Execute will try to read any metric batches stored in the spool directory -// and send them to the state server. -func (op *sendMetrics) Execute(state State) (*State, error) { - reader, err := metrics.NewJSONMetricReader(op.spoolDir) - if err != nil { - logger.Warningf("failed to create a metric reader: %v", err) - return &state, nil - } - - batches, err := reader.Read() - if err != nil { - logger.Warningf("failed to open the metric reader: %v", err) - return &state, nil - } - defer reader.Close() - var sendBatches []params.MetricBatch - for _, batch := range batches { - sendBatches = append(sendBatches, metrics.APIMetricBatch(batch)) - } - results, err := op.sender.AddMetricBatches(sendBatches) - if err != nil { - logger.Warningf("could not send metrics: %v", err) - return &state, nil - } - for batchUUID, resultErr := range results { - // if we fail to send any metric batch we log a warning with the assumption that - // the unsent metric batches remain in the spool directory and will be sent to the - // state server when the network partition is restored. - if _, ok := resultErr.(*params.Error); ok || params.IsCodeAlreadyExists(resultErr) { - err = reader.Remove(batchUUID) - if err != nil { - logger.Warningf("could not remove batch %q from spool: %v", batchUUID, err) - } - } else { - logger.Warningf("failed to send batch %q: %v", batchUUID, resultErr) - } - } - return &state, nil -} - -// Commit implements the Operation interface. -func (op *sendMetrics) Commit(state State) (*State, error) { - state.SendMetricsTime = time.Now().Unix() - return &state, nil -} === removed file 'src/github.com/juju/juju/worker/uniter/operation/metrics_test.go' --- src/github.com/juju/juju/worker/uniter/operation/metrics_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/metrics_test.go 1970-01-01 00:00:00 +0000 @@ -1,209 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package operation_test - -import ( - "errors" - "time" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - corecharm "gopkg.in/juju/charm.v5" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/worker/uniter/metrics" - "github.com/juju/juju/worker/uniter/operation" -) - -var _ = gc.Suite(&MetricsOperationSuite{}) - -type MetricsOperationSuite struct { - spoolDir string -} - -func (s *MetricsOperationSuite) SetUpTest(c *gc.C) { - s.spoolDir = c.MkDir() - - declaredMetrics := map[string]corecharm.Metric{ - "pings": corecharm.Metric{Description: "test pings", Type: corecharm.MetricTypeAbsolute}, - } - recorder, err := metrics.NewJSONMetricRecorder(s.spoolDir, declaredMetrics, "local:trusty/testcharm") - c.Assert(err, jc.ErrorIsNil) - - err = recorder.AddMetric("pings", "50", time.Now()) - c.Assert(err, jc.ErrorIsNil) - - err = recorder.Close() - c.Assert(err, jc.ErrorIsNil) -} - -func (s *MetricsOperationSuite) TestMetricSendingSuccess(c *gc.C) { - apiSender := newTestAPIMetricSender() - - factory := operation.NewFactory(operation.FactoryParams{ - MetricSender: apiSender, - MetricSpoolDir: s.spoolDir, - }) - - sendOperation, err := factory.NewSendMetrics() - c.Assert(err, gc.IsNil) - - _, err = sendOperation.Prepare(operation.State{}) - c.Assert(err, jc.ErrorIsNil) - - _, err = sendOperation.Execute(operation.State{}) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(apiSender.batches, gc.HasLen, 1) - - reader, err := metrics.NewJSONMetricReader(s.spoolDir) - c.Assert(err, gc.IsNil) - batches, err := reader.Read() - c.Assert(err, gc.IsNil) - c.Assert(batches, gc.HasLen, 0) -} - -func (s *MetricsOperationSuite) TestSendingGetDuplicate(c *gc.C) { - apiSender := newTestAPIMetricSender() - - factory := operation.NewFactory(operation.FactoryParams{ - MetricSender: apiSender, - MetricSpoolDir: s.spoolDir, - }) - - sendOperation, err := factory.NewSendMetrics() - c.Assert(err, gc.IsNil) - - _, err = sendOperation.Prepare(operation.State{}) - c.Assert(err, jc.ErrorIsNil) - - apiErr := ¶ms.Error{Message: "already exists", Code: params.CodeAlreadyExists} - select { - case apiSender.errors <- apiErr: - default: - c.Fatalf("blocked error channel") - } - - _, err = sendOperation.Execute(operation.State{}) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(apiSender.batches, gc.HasLen, 1) - - reader, err := metrics.NewJSONMetricReader(s.spoolDir) - c.Assert(err, gc.IsNil) - batches, err := reader.Read() - c.Assert(err, gc.IsNil) - c.Assert(batches, gc.HasLen, 0) -} - -func (s *MetricsOperationSuite) TestSendingFails(c *gc.C) { - apiSender := newTestAPIMetricSender() - - factory := operation.NewFactory(operation.FactoryParams{ - MetricSender: apiSender, - MetricSpoolDir: s.spoolDir, - }) - - sendOperation, err := factory.NewSendMetrics() - c.Assert(err, gc.IsNil) - - _, err = sendOperation.Prepare(operation.State{}) - c.Assert(err, jc.ErrorIsNil) - - select { - case apiSender.sendError <- errors.New("something went wrong"): - default: - c.Fatalf("blocked error channel") - } - - _, err = sendOperation.Execute(operation.State{}) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(apiSender.batches, gc.HasLen, 1) - - reader, err := metrics.NewJSONMetricReader(s.spoolDir) - c.Assert(err, gc.IsNil) - batches, err := reader.Read() - c.Assert(err, gc.IsNil) - c.Assert(batches, gc.HasLen, 1) -} - -func (s *MetricsOperationSuite) TestNoSpoolDirectory(c *gc.C) { - apiSender := newTestAPIMetricSender() - - factory := operation.NewFactory(operation.FactoryParams{ - MetricSender: apiSender, - MetricSpoolDir: "/some/random/spool/dir", - }) - - sendOperation, err := factory.NewSendMetrics() - c.Assert(err, gc.IsNil) - - _, err = sendOperation.Prepare(operation.State{}) - c.Assert(err, jc.ErrorIsNil) - - _, err = sendOperation.Execute(operation.State{}) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(apiSender.batches, gc.HasLen, 0) -} - -func (s *MetricsOperationSuite) TestNoMetricsToSend(c *gc.C) { - apiSender := newTestAPIMetricSender() - - newTmpSpoolDir := c.MkDir() - - factory := operation.NewFactory(operation.FactoryParams{ - MetricSender: apiSender, - MetricSpoolDir: newTmpSpoolDir, - }) - - sendOperation, err := factory.NewSendMetrics() - c.Assert(err, gc.IsNil) - - _, err = sendOperation.Prepare(operation.State{}) - c.Assert(err, jc.ErrorIsNil) - - _, err = sendOperation.Execute(operation.State{}) - c.Assert(err, jc.ErrorIsNil) - - c.Assert(apiSender.batches, gc.HasLen, 0) -} - -func newTestAPIMetricSender() *testAPIMetricSender { - return &testAPIMetricSender{errors: make(chan error, 1), sendError: make(chan error, 1)} -} - -type testAPIMetricSender struct { - batches []params.MetricBatch - errors chan error - sendError chan error -} - -// AddMetricsBatches implements the operation.apiMetricsSender interface. -func (t *testAPIMetricSender) AddMetricBatches(batches []params.MetricBatch) (map[string]error, error) { - t.batches = batches - - var err error - select { - case e := <-t.errors: - err = e - default: - err = (*params.Error)(nil) - } - - var sendErr error - select { - case e := <-t.sendError: - sendErr = e - default: - sendErr = nil - } - - errors := make(map[string]error) - for _, b := range batches { - errors[b.UUID] = err - } - return errors, sendErr -} === removed file 'src/github.com/juju/juju/worker/uniter/operation/relations.go' --- src/github.com/juju/juju/worker/uniter/operation/relations.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/relations.go 1970-01-01 00:00:00 +0000 @@ -1,40 +0,0 @@ -// Copyright 2014-2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package operation - -import ( - "fmt" -) - -type updateRelations struct { - ids []int - - callbacks Callbacks - - DoesNotRequireMachineLock -} - -// String is part of the Operation interface. -func (ur *updateRelations) String() string { - return fmt.Sprintf("update relations %v", ur.ids) -} - -// Prepare does nothing. -// Prepare is part of the Operation interface. -func (ur *updateRelations) Prepare(_ State) (*State, error) { - return nil, nil -} - -// Execute ensures the operation's relation ids are known and tracked. This -// doesn't directly change any persistent state. -// Execute is part of the Operation interface. -func (ur *updateRelations) Execute(_ State) (*State, error) { - return nil, ur.callbacks.UpdateRelations(ur.ids) -} - -// Commit does nothing. -// Commit is part of the Operation interface. -func (ur *updateRelations) Commit(_ State) (*State, error) { - return nil, nil -} === removed file 'src/github.com/juju/juju/worker/uniter/operation/relations_test.go' --- src/github.com/juju/juju/worker/uniter/operation/relations_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/relations_test.go 1970-01-01 00:00:00 +0000 @@ -1,78 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package operation_test - -import ( - "github.com/juju/errors" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/worker/uniter/operation" -) - -type UpdateRelationsSuite struct { - testing.IsolationSuite -} - -var _ = gc.Suite(&UpdateRelationsSuite{}) - -func (s *UpdateRelationsSuite) TestPrepare(c *gc.C) { - factory := operation.NewFactory(operation.FactoryParams{}) - op, err := factory.NewUpdateRelations(nil) - c.Assert(err, jc.ErrorIsNil) - state, err := op.Prepare(operation.State{}) - c.Check(err, jc.ErrorIsNil) - c.Check(state, gc.IsNil) -} - -func (s *UpdateRelationsSuite) TestExecuteError(c *gc.C) { - callbacks := &UpdateRelationsCallbacks{ - MockUpdateRelations: &MockUpdateRelations{err: errors.New("quack")}, - } - factory := operation.NewFactory(operation.FactoryParams{Callbacks: callbacks}) - op, err := factory.NewUpdateRelations([]int{3, 2, 1}) - c.Assert(err, jc.ErrorIsNil) - state, err := op.Prepare(operation.State{}) - c.Check(err, jc.ErrorIsNil) - c.Check(state, gc.IsNil) - - state, err = op.Execute(operation.State{}) - c.Check(err, gc.ErrorMatches, "quack") - c.Check(state, gc.IsNil) - c.Check(callbacks.MockUpdateRelations.gotIds, jc.DeepEquals, &[]int{3, 2, 1}) -} - -func (s *UpdateRelationsSuite) TestExecuteSuccess(c *gc.C) { - callbacks := &UpdateRelationsCallbacks{ - MockUpdateRelations: &MockUpdateRelations{}, - } - factory := operation.NewFactory(operation.FactoryParams{Callbacks: callbacks}) - op, err := factory.NewUpdateRelations([]int{3, 2, 1}) - c.Assert(err, jc.ErrorIsNil) - state, err := op.Prepare(operation.State{}) - c.Check(err, jc.ErrorIsNil) - c.Check(state, gc.IsNil) - - state, err = op.Execute(operation.State{}) - c.Check(err, jc.ErrorIsNil) - c.Check(state, gc.IsNil) - c.Check(callbacks.MockUpdateRelations.gotIds, jc.DeepEquals, &[]int{3, 2, 1}) -} - -func (s *UpdateRelationsSuite) TestCommit(c *gc.C) { - factory := operation.NewFactory(operation.FactoryParams{}) - op, err := factory.NewUpdateRelations(nil) - c.Assert(err, jc.ErrorIsNil) - state, err := op.Commit(operation.State{}) - c.Check(err, jc.ErrorIsNil) - c.Check(state, gc.IsNil) -} - -func (s *UpdateRelationsSuite) TestDoesNotNeedGlobalMachineLock(c *gc.C) { - factory := operation.NewFactory(operation.FactoryParams{}) - op, err := factory.NewUpdateRelations(nil) - c.Assert(err, jc.ErrorIsNil) - c.Assert(op.NeedsGlobalMachineLock(), jc.IsFalse) -} === removed file 'src/github.com/juju/juju/worker/uniter/operation/resolved.go' --- src/github.com/juju/juju/worker/uniter/operation/resolved.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/resolved.go 1970-01-01 00:00:00 +0000 @@ -1,28 +0,0 @@ -// Copyright 2014-2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package operation - -import ( - "fmt" - - "github.com/juju/errors" -) - -type resolvedOperation struct { - Operation - callbacks Callbacks -} - -// String is part of the Operation interface. -func (op *resolvedOperation) String() string { - return fmt.Sprintf("clear resolved flag and %s", op.Operation) -} - -// Prepare is part of the Operation interface. -func (op *resolvedOperation) Prepare(state State) (*State, error) { - if err := op.callbacks.ClearResolvedFlag(); err != nil { - return nil, errors.Trace(err) - } - return op.Operation.Prepare(state) -} === modified file 'src/github.com/juju/juju/worker/uniter/operation/runaction_test.go' --- src/github.com/juju/juju/worker/uniter/operation/runaction_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/runaction_test.go 2016-03-22 15:18:22 +0000 @@ -8,11 +8,12 @@ "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/worker/uniter/hook" "github.com/juju/juju/worker/uniter/operation" "github.com/juju/juju/worker/uniter/runner" + "github.com/juju/juju/worker/uniter/runner/context" ) type RunActionSuite struct { @@ -100,7 +101,7 @@ } func (s *RunActionSuite) TestPrepareCtxCalled(c *gc.C) { - ctx := &MockContext{actionData: &runner.ActionData{Name: "some-action-name"}} + ctx := &MockContext{actionData: &context.ActionData{Name: "some-action-name"}} runnerFactory := &MockRunnerFactory{ MockNewActionRunner: &MockNewActionRunner{ runner: &MockRunner{ @@ -121,7 +122,7 @@ } func (s *RunActionSuite) TestPrepareCtxError(c *gc.C) { - ctx := &MockContext{actionData: &runner.ActionData{Name: "some-action-name"}} + ctx := &MockContext{actionData: &context.ActionData{Name: "some-action-name"}} ctx.SetErrors(errors.New("ctx prepare error")) runnerFactory := &MockRunnerFactory{ MockNewActionRunner: &MockNewActionRunner{ @@ -171,13 +172,11 @@ newState, err := op.Prepare(overwriteState) c.Assert(err, jc.ErrorIsNil) c.Assert(newState, jc.DeepEquals, &operation.State{ - Kind: operation.RunAction, - Step: operation.Pending, - ActionId: &someActionId, - Started: true, - CollectMetricsTime: 1234567, - UpdateStatusTime: 1234567, - Hook: &hook.Info{Kind: hooks.Install}, + Kind: operation.RunAction, + Step: operation.Pending, + ActionId: &someActionId, + Started: true, + Hook: &hook.Info{Kind: hooks.Install}, }) c.Assert(*runnerFactory.MockNewActionRunner.gotActionId, gc.Equals, someActionId) } @@ -198,13 +197,11 @@ description: "preserves appropriate fields", before: overwriteState, after: operation.State{ - Kind: operation.RunAction, - Step: operation.Done, - ActionId: &someActionId, - Hook: &hook.Info{Kind: hooks.Install}, - Started: true, - CollectMetricsTime: 1234567, - UpdateStatusTime: 1234567, + Kind: operation.RunAction, + Step: operation.Done, + ActionId: &someActionId, + Hook: &hook.Info{Kind: hooks.Install}, + Started: true, }, }} @@ -244,40 +241,32 @@ }, { description: "preserves only appropriate fields, no hook", before: operation.State{ - Kind: operation.Continue, - Step: operation.Pending, - Started: true, - CollectMetricsTime: 1234567, - UpdateStatusTime: 1234567, - CharmURL: curl("cs:quantal/wordpress-2"), - ActionId: &randomActionId, + Kind: operation.Continue, + Step: operation.Pending, + Started: true, + CharmURL: curl("cs:quantal/wordpress-2"), + ActionId: &randomActionId, }, after: operation.State{ - Kind: operation.Continue, - Step: operation.Pending, - Started: true, - CollectMetricsTime: 1234567, - UpdateStatusTime: 1234567, + Kind: operation.Continue, + Step: operation.Pending, + Started: true, }, }, { description: "preserves only appropriate fields, with hook", before: operation.State{ - Kind: operation.Continue, - Step: operation.Pending, - Started: true, - CollectMetricsTime: 1234567, - UpdateStatusTime: 1234567, - CharmURL: curl("cs:quantal/wordpress-2"), - ActionId: &randomActionId, - Hook: &hook.Info{Kind: hooks.Install}, + Kind: operation.Continue, + Step: operation.Pending, + Started: true, + CharmURL: curl("cs:quantal/wordpress-2"), + ActionId: &randomActionId, + Hook: &hook.Info{Kind: hooks.Install}, }, after: operation.State{ - Kind: operation.RunHook, - Step: operation.Pending, - Hook: &hook.Info{Kind: hooks.Install}, - Started: true, - CollectMetricsTime: 1234567, - UpdateStatusTime: 1234567, + Kind: operation.RunHook, + Step: operation.Pending, + Hook: &hook.Info{Kind: hooks.Install}, + Started: true, }, }} === modified file 'src/github.com/juju/juju/worker/uniter/operation/runcommands.go' --- src/github.com/juju/juju/worker/uniter/operation/runcommands.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/runcommands.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,7 @@ "github.com/juju/errors" "github.com/juju/juju/worker/uniter/runner" + "github.com/juju/juju/worker/uniter/runner/context" ) type runCommands struct { @@ -39,7 +40,7 @@ // Prepare ensures the commands can be run. It never returns a state change. // Prepare is part of the Operation interface. func (rc *runCommands) Prepare(state State) (*State, error) { - rnr, err := rc.runnerFactory.NewCommandRunner(runner.CommandInfo{ + rnr, err := rc.runnerFactory.NewCommandRunner(context.CommandInfo{ RelationId: rc.args.RelationId, RemoteUnitName: rc.args.RemoteUnitName, ForceRemoteUnit: rc.args.ForceRemoteUnit, @@ -66,14 +67,16 @@ response, err := rc.runner.RunCommands(rc.args.Commands) switch err { - case runner.ErrRequeueAndReboot: + case context.ErrRequeueAndReboot: logger.Warningf("cannot requeue external commands") fallthrough - case runner.ErrReboot: + case context.ErrReboot: + rc.sendResponse(response, nil) err = ErrNeedsReboot + default: + rc.sendResponse(response, err) } - rc.sendResponse(response, err) - return nil, nil + return nil, err } // Commit does nothing. === modified file 'src/github.com/juju/juju/worker/uniter/operation/runcommands_test.go' --- src/github.com/juju/juju/worker/uniter/operation/runcommands_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/runcommands_test.go 2016-03-22 15:18:22 +0000 @@ -11,7 +11,7 @@ gc "gopkg.in/check.v1" "github.com/juju/juju/worker/uniter/operation" - "github.com/juju/juju/worker/uniter/runner" + "github.com/juju/juju/worker/uniter/runner/context" ) type RunCommandsSuite struct { @@ -34,7 +34,7 @@ newState, err := op.Prepare(operation.State{}) c.Assert(err, gc.ErrorMatches, "blooey") c.Assert(newState, gc.IsNil) - c.Assert(*runnerFactory.MockNewCommandRunner.gotInfo, gc.Equals, runner.CommandInfo{ + c.Assert(*runnerFactory.MockNewCommandRunner.gotInfo, gc.Equals, context.CommandInfo{ RelationId: 123, RemoteUnitName: "foo/456", ForceRemoteUnit: true, @@ -60,7 +60,7 @@ newState, err := op.Prepare(operation.State{}) c.Assert(err, jc.ErrorIsNil) c.Assert(newState, gc.IsNil) - c.Assert(*runnerFactory.MockNewCommandRunner.gotInfo, gc.Equals, runner.CommandInfo{ + c.Assert(*runnerFactory.MockNewCommandRunner.gotInfo, gc.Equals, context.CommandInfo{ RelationId: 123, RemoteUnitName: "foo/456", ForceRemoteUnit: true, @@ -92,7 +92,7 @@ } func (s *RunCommandsSuite) TestExecuteRebootErrors(c *gc.C) { - for _, sendErr := range []error{runner.ErrRequeueAndReboot, runner.ErrReboot} { + for _, sendErr := range []error{context.ErrRequeueAndReboot, context.ErrReboot} { runnerFactory := NewRunCommandsRunnerFactory( &utilexec.ExecResponse{Code: 101}, sendErr, ) @@ -109,10 +109,10 @@ newState, err := op.Execute(operation.State{}) c.Assert(newState, gc.IsNil) - c.Assert(err, jc.ErrorIsNil) + c.Assert(err, gc.Equals, operation.ErrNeedsReboot) c.Assert(*runnerFactory.MockNewCommandRunner.runner.MockRunCommands.gotCommands, gc.Equals, "do something") c.Assert(*sendResponse.gotResponse, gc.DeepEquals, &utilexec.ExecResponse{Code: 101}) - c.Assert(*sendResponse.gotErr, gc.Equals, operation.ErrNeedsReboot) + c.Assert(*sendResponse.gotErr, jc.ErrorIsNil) } } @@ -133,7 +133,7 @@ newState, err := op.Execute(operation.State{}) c.Assert(newState, gc.IsNil) - c.Assert(err, jc.ErrorIsNil) + c.Assert(err, gc.ErrorMatches, "sneh") c.Assert(*runnerFactory.MockNewCommandRunner.runner.MockRunCommands.gotCommands, gc.Equals, "do something") c.Assert(*sendResponse.gotResponse, gc.IsNil) c.Assert(*sendResponse.gotErr, gc.ErrorMatches, "sneh") === modified file 'src/github.com/juju/juju/worker/uniter/operation/runhook.go' --- src/github.com/juju/juju/worker/uniter/operation/runhook.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/runhook.go 2016-03-22 15:18:22 +0000 @@ -5,14 +5,14 @@ import ( "fmt" - "time" "github.com/juju/errors" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/worker/uniter/hook" "github.com/juju/juju/worker/uniter/runner" + "github.com/juju/juju/worker/uniter/runner/context" "github.com/juju/juju/worker/uniter/runner/jujuc" ) @@ -94,13 +94,13 @@ err := rh.runner.RunHook(rh.name) cause := errors.Cause(err) switch { - case runner.IsMissingHookError(cause): + case context.IsMissingHookError(cause): ranHook = false err = nil - case cause == runner.ErrRequeueAndReboot: + case cause == context.ErrRequeueAndReboot: step = Queued fallthrough - case cause == runner.ErrReboot: + case cause == context.ErrReboot: err = ErrNeedsReboot case err == nil: default: @@ -167,6 +167,7 @@ if hasRunStatusSet { break } + logger.Debugf("unit %v has started but has not yet set status", ctx.UnitName()) // We've finished the start hook and the charm has not updated its // own status so we'll set it to unknown. err = rh.runner.Context().SetUnitStatus(jujuc.StatusInfo{ @@ -213,14 +214,12 @@ newState := change.apply(state) switch rh.info.Kind { + case hooks.Install: + newState.Installed = true case hooks.Start: newState.Started = true case hooks.Stop: newState.Stopped = true - case hooks.CollectMetrics: - newState.CollectMetricsTime = time.Now().Unix() - case hooks.UpdateStatus: - newState.UpdateStatusTime = time.Now().Unix() } return newState, nil === modified file 'src/github.com/juju/juju/worker/uniter/operation/runhook_test.go' --- src/github.com/juju/juju/worker/uniter/operation/runhook_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/runhook_test.go 2016-03-22 15:18:22 +0000 @@ -4,17 +4,15 @@ package operation_test import ( - "time" - "github.com/juju/errors" "github.com/juju/testing" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/worker/uniter/hook" "github.com/juju/juju/worker/uniter/operation" - "github.com/juju/juju/worker/uniter/runner" + "github.com/juju/juju/worker/uniter/runner/context" "github.com/juju/juju/worker/uniter/runner/jujuc" ) @@ -26,36 +24,11 @@ type newHook func(operation.Factory, hook.Info) (operation.Operation, error) -func (s *RunHookSuite) testClearResolvedFlagError(c *gc.C, newHook newHook) { - callbacks := &PrepareHookCallbacks{ - MockClearResolvedFlag: &MockNoArgs{err: errors.New("biff")}, - } - factory := operation.NewFactory(operation.FactoryParams{ - Callbacks: callbacks, - }) - op, err := newHook(factory, hook.Info{Kind: hooks.ConfigChanged}) - c.Assert(err, jc.ErrorIsNil) - - newState, err := op.Prepare(operation.State{}) - c.Check(newState, gc.IsNil) - c.Check(callbacks.MockClearResolvedFlag.called, jc.IsTrue) - c.Check(err, gc.ErrorMatches, "biff") -} - -func (s *RunHookSuite) TestClearResolvedFlagError_Retry(c *gc.C) { - s.testClearResolvedFlagError(c, (operation.Factory).NewRetryHook) -} - -func (s *RunHookSuite) TestClearResolvedFlagError_Skip(c *gc.C) { - s.testClearResolvedFlagError(c, (operation.Factory).NewSkipHook) -} - func (s *RunHookSuite) testPrepareHookError( c *gc.C, newHook newHook, expectClearResolvedFlag, expectSkip bool, ) { callbacks := &PrepareHookCallbacks{ - MockPrepareHook: &MockPrepareHook{err: errors.New("pow")}, - MockClearResolvedFlag: &MockNoArgs{}, + MockPrepareHook: &MockPrepareHook{err: errors.New("pow")}, } factory := operation.NewFactory(operation.FactoryParams{ Callbacks: callbacks, @@ -65,7 +38,6 @@ newState, err := op.Prepare(operation.State{}) c.Check(newState, gc.IsNil) - c.Check(callbacks.MockClearResolvedFlag.called, gc.Equals, expectClearResolvedFlag) if expectSkip { c.Check(err, gc.Equals, operation.ErrSkipExecute) c.Check(callbacks.MockPrepareHook.gotHook, gc.IsNil) @@ -80,8 +52,7 @@ func (s *RunHookSuite) TestPrepareHookCtxCalled(c *gc.C) { ctx := &MockContext{} callbacks := &PrepareHookCallbacks{ - MockPrepareHook: &MockPrepareHook{}, - MockClearResolvedFlag: &MockNoArgs{}, + MockPrepareHook: &MockPrepareHook{}, } runnerFactory := &MockRunnerFactory{ MockNewHookRunner: &MockNewHookRunner{ @@ -109,8 +80,7 @@ ctx := &MockContext{} ctx.SetErrors(errors.New("ctx prepare error")) callbacks := &PrepareHookCallbacks{ - MockPrepareHook: &MockPrepareHook{}, - MockClearResolvedFlag: &MockNoArgs{}, + MockPrepareHook: &MockPrepareHook{}, } runnerFactory := &MockRunnerFactory{ MockNewHookRunner: &MockNewHookRunner{ @@ -138,10 +108,6 @@ s.testPrepareHookError(c, (operation.Factory).NewRunHook, false, false) } -func (s *RunHookSuite) TestPrepareHookError_Retry(c *gc.C) { - s.testPrepareHookError(c, (operation.Factory).NewRetryHook, true, false) -} - func (s *RunHookSuite) TestPrepareHookError_Skip(c *gc.C) { s.testPrepareHookError(c, (operation.Factory).NewSkipHook, true, true) } @@ -170,10 +136,6 @@ s.testPrepareRunnerError(c, (operation.Factory).NewRunHook) } -func (s *RunHookSuite) TestPrepareRunnerError_Retry(c *gc.C) { - s.testPrepareRunnerError(c, (operation.Factory).NewRetryHook) -} - func (s *RunHookSuite) testPrepareSuccess( c *gc.C, newHook newHook, before, after operation.State, ) { @@ -192,42 +154,28 @@ } func (s *RunHookSuite) TestPrepareSuccess_BlankSlate(c *gc.C) { - for i, newHook := range []newHook{ + s.testPrepareSuccess(c, (operation.Factory).NewRunHook, - (operation.Factory).NewRetryHook, - } { - c.Logf("variant %d", i) - s.testPrepareSuccess(c, - newHook, - operation.State{}, - operation.State{ - Kind: operation.RunHook, - Step: operation.Pending, - Hook: &hook.Info{Kind: hooks.ConfigChanged}, - }, - ) - } + operation.State{}, + operation.State{ + Kind: operation.RunHook, + Step: operation.Pending, + Hook: &hook.Info{Kind: hooks.ConfigChanged}, + }, + ) } func (s *RunHookSuite) TestPrepareSuccess_Preserve(c *gc.C) { - for i, newHook := range []newHook{ + s.testPrepareSuccess(c, (operation.Factory).NewRunHook, - (operation.Factory).NewRetryHook, - } { - c.Logf("variant %d", i) - s.testPrepareSuccess(c, - newHook, - overwriteState, - operation.State{ - Started: true, - CollectMetricsTime: 1234567, - UpdateStatusTime: 1234567, - Kind: operation.RunHook, - Step: operation.Pending, - Hook: &hook.Info{Kind: hooks.ConfigChanged}, - }, - ) - } + overwriteState, + operation.State{ + Started: true, + Kind: operation.RunHook, + Step: operation.Pending, + Hook: &hook.Info{Kind: hooks.ConfigChanged}, + }, + ) } func (s *RunHookSuite) getExecuteRunnerTest(c *gc.C, newHook newHook, kind hooks.Kind, runErr error) (operation.Operation, *ExecuteHookCallbacks, *MockRunnerFactory) { @@ -246,11 +194,11 @@ return op, callbacks, runnerFactory } -func (s *RunHookSuite) testExecuteMissingHookError(c *gc.C, newHook newHook) { - runErr := runner.NewMissingHookError("blah-blah") +func (s *RunHookSuite) TestExecuteMissingHookError(c *gc.C) { + runErr := context.NewMissingHookError("blah-blah") for _, kind := range hooks.UnitHooks() { c.Logf("hook %v", kind) - op, callbacks, runnerFactory := s.getExecuteRunnerTest(c, newHook, kind, runErr) + op, callbacks, runnerFactory := s.getExecuteRunnerTest(c, (operation.Factory).NewRunHook, kind, runErr) _, err := op.Prepare(operation.State{}) c.Assert(err, jc.ErrorIsNil) @@ -271,17 +219,9 @@ } } -func (s *RunHookSuite) TestExecuteMissingHookError_Run(c *gc.C) { - s.testExecuteMissingHookError(c, (operation.Factory).NewRunHook) -} - -func (s *RunHookSuite) TestExecuteMissingHookError_Retry(c *gc.C) { - s.testExecuteMissingHookError(c, (operation.Factory).NewRetryHook) -} - -func (s *RunHookSuite) testExecuteRequeueRebootError(c *gc.C, newHook newHook) { - runErr := runner.ErrRequeueAndReboot - op, callbacks, runnerFactory := s.getExecuteRunnerTest(c, newHook, hooks.ConfigChanged, runErr) +func (s *RunHookSuite) TestExecuteRequeueRebootError(c *gc.C) { + runErr := context.ErrRequeueAndReboot + op, callbacks, runnerFactory := s.getExecuteRunnerTest(c, (operation.Factory).NewRunHook, hooks.ConfigChanged, runErr) _, err := op.Prepare(operation.State{}) c.Assert(err, jc.ErrorIsNil) @@ -298,17 +238,9 @@ c.Assert(callbacks.MockNotifyHookFailed.gotName, gc.IsNil) } -func (s *RunHookSuite) TestExecuteRequeueRebootError_Run(c *gc.C) { - s.testExecuteRequeueRebootError(c, (operation.Factory).NewRunHook) -} - -func (s *RunHookSuite) TestExecuteRequeueRebootError_Retry(c *gc.C) { - s.testExecuteRequeueRebootError(c, (operation.Factory).NewRetryHook) -} - -func (s *RunHookSuite) testExecuteRebootError(c *gc.C, newHook newHook) { - runErr := runner.ErrReboot - op, callbacks, runnerFactory := s.getExecuteRunnerTest(c, newHook, hooks.ConfigChanged, runErr) +func (s *RunHookSuite) TestExecuteRebootError(c *gc.C) { + runErr := context.ErrReboot + op, callbacks, runnerFactory := s.getExecuteRunnerTest(c, (operation.Factory).NewRunHook, hooks.ConfigChanged, runErr) _, err := op.Prepare(operation.State{}) c.Assert(err, jc.ErrorIsNil) @@ -325,17 +257,9 @@ c.Assert(callbacks.MockNotifyHookFailed.gotName, gc.IsNil) } -func (s *RunHookSuite) TestExecuteRebootError_Run(c *gc.C) { - s.testExecuteRebootError(c, (operation.Factory).NewRunHook) -} - -func (s *RunHookSuite) TestExecuteRebootError_Retry(c *gc.C) { - s.testExecuteRebootError(c, (operation.Factory).NewRetryHook) -} - -func (s *RunHookSuite) testExecuteOtherError(c *gc.C, newHook newHook) { +func (s *RunHookSuite) TestExecuteOtherError(c *gc.C) { runErr := errors.New("graaargh") - op, callbacks, runnerFactory := s.getExecuteRunnerTest(c, newHook, hooks.ConfigChanged, runErr) + op, callbacks, runnerFactory := s.getExecuteRunnerTest(c, (operation.Factory).NewRunHook, hooks.ConfigChanged, runErr) _, err := op.Prepare(operation.State{}) c.Assert(err, jc.ErrorIsNil) @@ -348,18 +272,10 @@ c.Assert(callbacks.MockNotifyHookCompleted.gotName, gc.IsNil) } -func (s *RunHookSuite) TestExecuteOtherError_Run(c *gc.C) { - s.testExecuteOtherError(c, (operation.Factory).NewRunHook) -} - -func (s *RunHookSuite) TestExecuteOtherError_Retry(c *gc.C) { - s.testExecuteOtherError(c, (operation.Factory).NewRetryHook) -} - func (s *RunHookSuite) testExecuteSuccess( - c *gc.C, newHook newHook, before, after operation.State, setStatusCalled bool, + c *gc.C, before, after operation.State, setStatusCalled bool, ) { - op, callbacks, f := s.getExecuteRunnerTest(c, newHook, hooks.ConfigChanged, nil) + op, callbacks, f := s.getExecuteRunnerTest(c, (operation.Factory).NewRunHook, hooks.ConfigChanged, nil) f.MockNewHookRunner.runner.MockRunHook.setStatusCalled = setStatusCalled midState, err := op.Prepare(before) c.Assert(err, jc.ErrorIsNil) @@ -372,52 +288,36 @@ } func (s *RunHookSuite) TestExecuteSuccess_BlankSlate(c *gc.C) { - for i, newHook := range []newHook{ - (operation.Factory).NewRunHook, - (operation.Factory).NewRetryHook, - } { - c.Logf("variant %d", i) - s.testExecuteSuccess(c, - newHook, - operation.State{}, - operation.State{ - Kind: operation.RunHook, - Step: operation.Done, - Hook: &hook.Info{Kind: hooks.ConfigChanged}, - StatusSet: true, - }, - true, - ) - } + s.testExecuteSuccess(c, + operation.State{}, + operation.State{ + Kind: operation.RunHook, + Step: operation.Done, + Hook: &hook.Info{Kind: hooks.ConfigChanged}, + StatusSet: true, + }, + true, + ) } func (s *RunHookSuite) TestExecuteSuccess_Preserve(c *gc.C) { - for i, newHook := range []newHook{ - (operation.Factory).NewRunHook, - (operation.Factory).NewRetryHook, - } { - c.Logf("variant %d", i) - s.testExecuteSuccess(c, - newHook, - overwriteState, - operation.State{ - Started: true, - CollectMetricsTime: 1234567, - UpdateStatusTime: 1234567, - Kind: operation.RunHook, - Step: operation.Done, - Hook: &hook.Info{Kind: hooks.ConfigChanged}, - StatusSet: true, - }, - true, - ) - } + s.testExecuteSuccess(c, + overwriteState, + operation.State{ + Started: true, + Kind: operation.RunHook, + Step: operation.Done, + Hook: &hook.Info{Kind: hooks.ConfigChanged}, + StatusSet: true, + }, + true, + ) } func (s *RunHookSuite) testExecuteThenCharmStatus( - c *gc.C, newHook newHook, before, after operation.State, kind hooks.Kind, setStatusCalled bool, + c *gc.C, before, after operation.State, kind hooks.Kind, setStatusCalled bool, ) { - op, _, f := s.getExecuteRunnerTest(c, newHook, kind, nil) + op, _, f := s.getExecuteRunnerTest(c, (operation.Factory).NewRunHook, kind, nil) f.MockNewHookRunner.runner.MockRunHook.setStatusCalled = setStatusCalled midState, err := op.Prepare(before) c.Assert(err, jc.ErrorIsNil) @@ -487,38 +387,23 @@ func (s *RunHookSuite) TestBeforeHookStatus(c *gc.C) { for _, kind := range hooks.UnitHooks() { c.Logf("hook %v", kind) - for i, newHook := range []newHook{ - (operation.Factory).NewRunHook, - (operation.Factory).NewRetryHook, - } { - c.Logf("variant %d", i) - s.testBeforeHookExecute(c, newHook, kind) - } + s.testBeforeHookExecute(c, (operation.Factory).NewRunHook, kind) } } func (s *RunHookSuite) testExecuteHookWithSetStatus(c *gc.C, kind hooks.Kind, setStatusCalled bool) { - for i, newHook := range []newHook{ - (operation.Factory).NewRunHook, - (operation.Factory).NewRetryHook, - } { - c.Logf("variant %d", i) - s.testExecuteThenCharmStatus(c, - newHook, - overwriteState, - operation.State{ - Started: true, - CollectMetricsTime: 1234567, - UpdateStatusTime: 1234567, - Kind: operation.RunHook, - Step: operation.Done, - Hook: &hook.Info{Kind: kind}, - StatusSet: setStatusCalled, - }, - kind, - setStatusCalled, - ) - } + s.testExecuteThenCharmStatus(c, + overwriteState, + operation.State{ + Started: true, + Kind: operation.RunHook, + Step: operation.Done, + Hook: &hook.Info{Kind: kind}, + StatusSet: setStatusCalled, + }, + kind, + setStatusCalled, + ) } func (s *RunHookSuite) TestExecuteHookWithSetStatus(c *gc.C) { @@ -548,10 +433,6 @@ s.testCommitError(c, (operation.Factory).NewRunHook) } -func (s *RunHookSuite) TestCommitError_Retry(c *gc.C) { - s.testCommitError(c, (operation.Factory).NewRetryHook) -} - func (s *RunHookSuite) TestCommitError_Skip(c *gc.C) { s.testCommitError(c, (operation.Factory).NewSkipHook) } @@ -574,7 +455,6 @@ func (s *RunHookSuite) TestCommitSuccess_ConfigChanged_QueueStartHook(c *gc.C) { for i, newHook := range []newHook{ (operation.Factory).NewRunHook, - (operation.Factory).NewRetryHook, (operation.Factory).NewSkipHook, } { c.Logf("variant %d", i) @@ -594,7 +474,6 @@ func (s *RunHookSuite) TestCommitSuccess_ConfigChanged_Preserve(c *gc.C) { for i, newHook := range []newHook{ (operation.Factory).NewRunHook, - (operation.Factory).NewRetryHook, (operation.Factory).NewSkipHook, } { c.Logf("variant %d", i) @@ -603,11 +482,9 @@ hook.Info{Kind: hooks.ConfigChanged}, overwriteState, operation.State{ - Started: true, - CollectMetricsTime: 1234567, - UpdateStatusTime: 1234567, - Kind: operation.Continue, - Step: operation.Pending, + Started: true, + Kind: operation.Continue, + Step: operation.Pending, }, ) } @@ -616,7 +493,6 @@ func (s *RunHookSuite) TestCommitSuccess_Start_SetStarted(c *gc.C) { for i, newHook := range []newHook{ (operation.Factory).NewRunHook, - (operation.Factory).NewRetryHook, (operation.Factory).NewSkipHook, } { c.Logf("variant %d", i) @@ -636,7 +512,6 @@ func (s *RunHookSuite) TestCommitSuccess_Start_Preserve(c *gc.C) { for i, newHook := range []newHook{ (operation.Factory).NewRunHook, - (operation.Factory).NewRetryHook, (operation.Factory).NewSkipHook, } { c.Logf("variant %d", i) @@ -645,11 +520,9 @@ hook.Info{Kind: hooks.Start}, overwriteState, operation.State{ - Started: true, - CollectMetricsTime: 1234567, - UpdateStatusTime: 1234567, - Kind: operation.Continue, - Step: operation.Pending, + Started: true, + Kind: operation.Continue, + Step: operation.Pending, }, ) } @@ -658,7 +531,6 @@ func (s *RunHookSuite) testQueueHook_BlankSlate(c *gc.C, cause hooks.Kind) { for i, newHook := range []newHook{ (operation.Factory).NewRunHook, - (operation.Factory).NewRetryHook, (operation.Factory).NewSkipHook, } { c.Logf("variant %d", i) @@ -686,7 +558,6 @@ func (s *RunHookSuite) testQueueHook_Preserve(c *gc.C, cause hooks.Kind) { for i, newHook := range []newHook{ (operation.Factory).NewRunHook, - (operation.Factory).NewRetryHook, (operation.Factory).NewSkipHook, } { c.Logf("variant %d", i) @@ -702,13 +573,11 @@ hook.Info{Kind: cause}, overwriteState, operation.State{ - Kind: operation.RunHook, - Step: operation.Queued, - Started: true, - Stopped: cause == hooks.Stop, - Hook: hi, - CollectMetricsTime: 1234567, - UpdateStatusTime: 1234567, + Kind: operation.RunHook, + Step: operation.Queued, + Started: true, + Stopped: cause == hooks.Stop, + Hook: hi, }, ) } @@ -725,7 +594,6 @@ func (s *RunHookSuite) testQueueNothing_BlankSlate(c *gc.C, hookInfo hook.Info) { for i, newHook := range []newHook{ (operation.Factory).NewRunHook, - (operation.Factory).NewRetryHook, (operation.Factory).NewSkipHook, } { c.Logf("variant %d", i) @@ -734,9 +602,10 @@ hookInfo, operation.State{}, operation.State{ - Kind: operation.Continue, - Step: operation.Pending, - Stopped: hookInfo.Kind == hooks.Stop, + Installed: hookInfo.Kind == hooks.Install, + Kind: operation.Continue, + Step: operation.Pending, + Stopped: hookInfo.Kind == hooks.Stop, }, ) } @@ -745,7 +614,6 @@ func (s *RunHookSuite) testQueueNothing_Preserve(c *gc.C, hookInfo hook.Info) { for i, newHook := range []newHook{ (operation.Factory).NewRunHook, - (operation.Factory).NewRetryHook, (operation.Factory).NewSkipHook, } { c.Logf("variant %d", i) @@ -754,12 +622,11 @@ hookInfo, overwriteState, operation.State{ - Kind: operation.Continue, - Step: operation.Pending, - Started: true, - Stopped: hookInfo.Kind == hooks.Stop, - CollectMetricsTime: 1234567, - UpdateStatusTime: 1234567, + Kind: operation.Continue, + Step: operation.Pending, + Installed: hookInfo.Kind == hooks.Install, + Started: true, + Stopped: hookInfo.Kind == hooks.Stop, }, ) } @@ -843,90 +710,6 @@ }) } -func (s *RunHookSuite) testCommitSuccess_UpdateStatusTime(c *gc.C, newHook newHook) { - callbacks := &CommitHookCallbacks{ - MockCommitHook: &MockCommitHook{}, - } - factory := operation.NewFactory(operation.FactoryParams{ - Callbacks: callbacks, - }) - op, err := newHook(factory, hook.Info{Kind: hooks.UpdateStatus}) - c.Assert(err, jc.ErrorIsNil) - - nowBefore := time.Now().Unix() - newState, err := op.Commit(overwriteState) - c.Assert(err, jc.ErrorIsNil) - - nowAfter := time.Now().Unix() - nowWritten := newState.UpdateStatusTime - c.Logf("%d <= %d <= %d", nowBefore, nowWritten, nowAfter) - c.Check(nowBefore <= nowWritten, jc.IsTrue) - c.Check(nowWritten <= nowAfter, jc.IsTrue) - - // Check the other fields match. - newState.UpdateStatusTime = 0 - c.Check(newState, gc.DeepEquals, &operation.State{ - Started: true, - Kind: operation.Continue, - Step: operation.Pending, - CollectMetricsTime: 1234567, - }) -} - -func (s *RunHookSuite) TestCommitSuccess_UpdateStatusTime_Run(c *gc.C) { - s.testCommitSuccess_UpdateStatusTime(c, (operation.Factory).NewRunHook) -} - -func (s *RunHookSuite) TestCommitSuccess_UpdateStatusTime_Retry(c *gc.C) { - s.testCommitSuccess_UpdateStatusTime(c, (operation.Factory).NewRetryHook) -} - -func (s *RunHookSuite) TestCommitSuccess_UpdateStatusTime_Skip(c *gc.C) { - s.testCommitSuccess_UpdateStatusTime(c, (operation.Factory).NewSkipHook) -} - -func (s *RunHookSuite) testCommitSuccess_CollectMetricsTime(c *gc.C, newHook newHook) { - callbacks := &CommitHookCallbacks{ - MockCommitHook: &MockCommitHook{}, - } - factory := operation.NewFactory(operation.FactoryParams{ - Callbacks: callbacks, - }) - op, err := newHook(factory, hook.Info{Kind: hooks.CollectMetrics}) - c.Assert(err, jc.ErrorIsNil) - - nowBefore := time.Now().Unix() - newState, err := op.Commit(overwriteState) - c.Assert(err, jc.ErrorIsNil) - - nowAfter := time.Now().Unix() - nowWritten := newState.CollectMetricsTime - c.Logf("%d <= %d <= %d", nowBefore, nowWritten, nowAfter) - c.Check(nowBefore <= nowWritten, jc.IsTrue) - c.Check(nowWritten <= nowAfter, jc.IsTrue) - - // Check the other fields match. - newState.CollectMetricsTime = 0 - c.Check(newState, gc.DeepEquals, &operation.State{ - Started: true, - Kind: operation.Continue, - Step: operation.Pending, - UpdateStatusTime: 1234567, - }) -} - -func (s *RunHookSuite) TestCommitSuccess_CollectMetricsTime_Run(c *gc.C) { - s.testCommitSuccess_CollectMetricsTime(c, (operation.Factory).NewRunHook) -} - -func (s *RunHookSuite) TestCommitSuccess_CollectMetricsTime_Retry(c *gc.C) { - s.testCommitSuccess_CollectMetricsTime(c, (operation.Factory).NewRetryHook) -} - -func (s *RunHookSuite) TestCommitSuccess_CollectMetricsTime_Skip(c *gc.C) { - s.testCommitSuccess_CollectMetricsTime(c, (operation.Factory).NewSkipHook) -} - func (s *RunHookSuite) testNeedsGlobalMachineLock(c *gc.C, newHook newHook, expected bool) { factory := operation.NewFactory(operation.FactoryParams{}) op, err := newHook(factory, hook.Info{Kind: hooks.ConfigChanged}) @@ -938,10 +721,6 @@ s.testNeedsGlobalMachineLock(c, (operation.Factory).NewRunHook, true) } -func (s *RunHookSuite) TestNeedsGlobalMachineLock_Retry(c *gc.C) { - s.testNeedsGlobalMachineLock(c, (operation.Factory).NewRetryHook, true) -} - func (s *RunHookSuite) TestNeedsGlobalMachineLock_Skip(c *gc.C) { s.testNeedsGlobalMachineLock(c, (operation.Factory).NewSkipHook, false) } === modified file 'src/github.com/juju/juju/worker/uniter/operation/state.go' --- src/github.com/juju/juju/worker/uniter/operation/state.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/state.go 2016-03-22 15:18:22 +0000 @@ -5,11 +5,10 @@ import ( "os" - "time" "github.com/juju/errors" "github.com/juju/utils" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/worker/uniter/hook" ) @@ -66,6 +65,9 @@ // Stopped indicates whether the stop hook has run. Stopped bool `yaml:"stopped"` + // Installed indicates whether the install hook has run. + Installed bool `yaml:"installed"` + // StatusSet indicates whether the charm being deployed has ever invoked // the status-set hook tool. StatusSet bool `yaml:"status-set"` @@ -91,19 +93,6 @@ // Charm describes the charm being deployed by an Install or Upgrade // operation, and is otherwise blank. CharmURL *charm.URL `yaml:"charm,omitempty"` - - // CollectMetricsTime records the time the collect metrics hook was last run. - // It's set to nil if the hook was not run at all. Recording time as int64 - // because the yaml encoder cannot encode the time.Time struct. - CollectMetricsTime int64 `yaml:"collectmetricstime,omitempty"` - - // SendMetricsTime records the time when metrics were last sent to the - // state server (see also CollectMetricsTime). - SendMetricsTime int64 `yaml:"sendmetricstime,omitempty"` - - // UpdateStatusTime records the time the update status hook was last run. - // It's set to nil if the hook was not run at all. - UpdateStatusTime int64 `yaml:"updatestatustime,omitempty"` } // validate returns an error if the state violates expectations. @@ -114,7 +103,7 @@ hasCharm := st.CharmURL != nil switch st.Kind { case Install: - if hasHook { + if st.Installed { return errors.New("unexpected hook info with Kind Install") } fallthrough @@ -168,10 +157,6 @@ return nil } -func (st State) CollectedMetricsAt() time.Time { - return time.Unix(st.CollectMetricsTime, 0) -} - // stateChange is useful for a variety of Operation implementations. type stateChange struct { Kind Kind @@ -220,7 +205,7 @@ // Write stores the supplied state to the file. func (f *StateFile) Write(st *State) error { if err := st.validate(); err != nil { - panic(err) + return errors.Trace(err) } return utils.WriteYaml(f.path, st) } === modified file 'src/github.com/juju/juju/worker/uniter/operation/state_test.go' --- src/github.com/juju/juju/worker/uniter/operation/state_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/state_test.go 2016-03-22 15:18:22 +0000 @@ -9,8 +9,8 @@ jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/worker/uniter/hook" "github.com/juju/juju/worker/uniter/operation" @@ -44,9 +44,11 @@ // Install operation. { st: operation.State{ - Kind: operation.Install, - Step: operation.Pending, - Hook: &hook.Info{Kind: hooks.ConfigChanged}, + Kind: operation.Install, + Installed: true, + Step: operation.Pending, + CharmURL: stcurl, + Hook: &hook.Info{Kind: hooks.ConfigChanged}, }, err: `unexpected hook info with Kind Install`, }, { @@ -182,10 +184,9 @@ err: `unexpected action id`, }, { st: operation.State{ - Kind: operation.Continue, - Step: operation.Pending, - CollectMetricsTime: 98765432, - Leader: true, + Kind: operation.Continue, + Step: operation.Pending, + Leader: true, }, }, } @@ -197,19 +198,18 @@ file := operation.NewStateFile(path) _, err := file.Read() c.Assert(err, gc.Equals, operation.ErrNoStateFile) - write := func() { - err := file.Write(&t.st) + + err = file.Write(&t.st) + if t.err == "" { c.Assert(err, jc.ErrorIsNil) - } - if t.err != "" { - c.Assert(write, gc.PanicMatches, "invalid operation state: "+t.err) + } else { + c.Assert(err, gc.ErrorMatches, "invalid operation state: "+t.err) err := utils.WriteYaml(path, &t.st) c.Assert(err, jc.ErrorIsNil) _, err = file.Read() c.Assert(err, gc.ErrorMatches, `cannot read ".*": invalid operation state: `+t.err) continue } - write() st, err := file.Read() c.Assert(err, jc.ErrorIsNil) c.Assert(st, jc.DeepEquals, &t.st) === modified file 'src/github.com/juju/juju/worker/uniter/operation/util_test.go' --- src/github.com/juju/juju/worker/uniter/operation/util_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/operation/util_test.go 2016-03-22 15:18:22 +0000 @@ -7,13 +7,14 @@ "github.com/juju/errors" "github.com/juju/testing" utilexec "github.com/juju/utils/exec" - corecharm "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/hooks" + corecharm "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/worker/uniter/charm" "github.com/juju/juju/worker/uniter/hook" "github.com/juju/juju/worker/uniter/operation" "github.com/juju/juju/worker/uniter/runner" + "github.com/juju/juju/worker/uniter/runner/context" "github.com/juju/juju/worker/uniter/runner/jujuc" ) @@ -42,7 +43,6 @@ operation.Callbacks *MockGetArchiveInfo *MockSetCurrentCharm - MockClearResolvedFlag *MockNoArgs MockInitializeMetricsTimers *MockNoArgs } @@ -54,10 +54,6 @@ return cb.MockSetCurrentCharm.Call(charmURL) } -func (cb *DeployCallbacks) ClearResolvedFlag() error { - return cb.MockClearResolvedFlag.Call() -} - func (cb *DeployCallbacks) InitializeMetricsTimers() error { return cb.MockInitializeMetricsTimers.Call() } @@ -163,18 +159,13 @@ type PrepareHookCallbacks struct { operation.Callbacks *MockPrepareHook - MockClearResolvedFlag *MockNoArgs - executingMessage string + executingMessage string } func (cb *PrepareHookCallbacks) PrepareHook(hookInfo hook.Info) (string, error) { return cb.MockPrepareHook.Call(hookInfo) } -func (cb *PrepareHookCallbacks) ClearResolvedFlag() error { - return cb.MockClearResolvedFlag.Call() -} - func (cb *PrepareHookCallbacks) SetExecutingStatus(message string) error { cb.executingMessage = message return nil @@ -265,12 +256,12 @@ } type MockNewCommandRunner struct { - gotInfo *runner.CommandInfo + gotInfo *context.CommandInfo runner *MockRunner err error } -func (mock *MockNewCommandRunner) Call(info runner.CommandInfo) (runner.Runner, error) { +func (mock *MockNewCommandRunner) Call(info context.CommandInfo) (runner.Runner, error) { mock.gotInfo = &info return mock.runner, mock.err } @@ -289,19 +280,19 @@ return f.MockNewHookRunner.Call(hookInfo) } -func (f *MockRunnerFactory) NewCommandRunner(commandInfo runner.CommandInfo) (runner.Runner, error) { +func (f *MockRunnerFactory) NewCommandRunner(commandInfo context.CommandInfo) (runner.Runner, error) { return f.MockNewCommandRunner.Call(commandInfo) } type MockContext struct { runner.Context testing.Stub - actionData *runner.ActionData + actionData *context.ActionData setStatusCalled bool status jujuc.StatusInfo } -func (mock *MockContext) ActionData() (*runner.ActionData, error) { +func (mock *MockContext) ActionData() (*context.ActionData, error) { if mock.actionData == nil { return nil, errors.New("not an action context") } @@ -322,6 +313,10 @@ return nil } +func (mock *MockContext) UnitName() string { + return "unit/0" +} + func (mock *MockContext) UnitStatus() (*jujuc.StatusInfo, error) { return &mock.status, nil } @@ -389,9 +384,8 @@ func NewDeployCallbacks() *DeployCallbacks { return &DeployCallbacks{ - MockGetArchiveInfo: &MockGetArchiveInfo{info: &MockBundleInfo{}}, - MockSetCurrentCharm: &MockSetCurrentCharm{}, - MockClearResolvedFlag: &MockNoArgs{}, + MockGetArchiveInfo: &MockGetArchiveInfo{info: &MockBundleInfo{}}, + MockSetCurrentCharm: &MockSetCurrentCharm{}, } } @@ -411,8 +405,7 @@ func NewPrepareHookCallbacks() *PrepareHookCallbacks { return &PrepareHookCallbacks{ - MockPrepareHook: &MockPrepareHook{nil, "some-hook-name", nil}, - MockClearResolvedFlag: &MockNoArgs{}, + MockPrepareHook: &MockPrepareHook{nil, "some-hook-name", nil}, } } @@ -422,7 +415,7 @@ runner: &MockRunner{ MockRunAction: &MockRunAction{err: runErr}, context: &MockContext{ - actionData: &runner.ActionData{Name: "some-action-name"}, + actionData: &context.ActionData{Name: "some-action-name"}, }, }, }, @@ -465,14 +458,12 @@ var someActionId = "f47ac10b-58cc-4372-a567-0e02b2c3d479" var randomActionId = "9f484882-2f18-4fd2-967d-db9663db7bea" var overwriteState = operation.State{ - Kind: operation.Continue, - Step: operation.Pending, - Started: true, - CollectMetricsTime: 1234567, - UpdateStatusTime: 1234567, - CharmURL: curl("cs:quantal/wordpress-2"), - ActionId: &randomActionId, - Hook: &hook.Info{Kind: hooks.Install}, + Kind: operation.Continue, + Step: operation.Pending, + Started: true, + CharmURL: curl("cs:quantal/wordpress-2"), + ActionId: &randomActionId, + Hook: &hook.Info{Kind: hooks.Install}, } var someCommandArgs = operation.CommandArgs{ Commands: "do something", === modified file 'src/github.com/juju/juju/worker/uniter/package_test.go' --- src/github.com/juju/juju/worker/uniter/package_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/uniter/package_test.go 2016-03-22 15:18:22 +0000 @@ -6,9 +6,9 @@ import ( stdtesting "testing" - coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/testing" ) func TestPackage(t *stdtesting.T) { - coretesting.MgoTestPackage(t) + testing.MgoTestPackage(t) } === modified file 'src/github.com/juju/juju/worker/uniter/paths.go' --- src/github.com/juju/juju/worker/uniter/paths.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/paths.go 2016-03-22 15:18:22 +0000 @@ -9,9 +9,9 @@ "path/filepath" "github.com/juju/names" + "github.com/juju/utils/os" "github.com/juju/juju/agent/tools" - "github.com/juju/juju/version" ) // Paths represents the set of filesystem paths a uniter worker has reason to @@ -108,16 +108,29 @@ // NewPaths returns the set of filesystem paths that the supplied unit should // use, given the supplied root juju data directory path. func NewPaths(dataDir string, unitTag names.UnitTag) Paths { + return NewWorkerPaths(dataDir, unitTag, "") +} +// NewWorkerPaths returns the set of filesystem paths that the supplied unit worker should +// use, given the supplied root juju data directory path and worker identifier. +// Distinct worker identifiers ensure that runtime paths of different worker do not interfere. +func NewWorkerPaths(dataDir string, unitTag names.UnitTag, worker string) Paths { join := filepath.Join baseDir := join(dataDir, "agents", unitTag.String()) stateDir := join(baseDir, "state") socket := func(name string, abstract bool) string { - if version.Current.OS == version.Windows { - return fmt.Sprintf(`\\.\pipe\%s-%s`, unitTag, name) + if os.HostOS() == os.Windows { + base := fmt.Sprintf("%s", unitTag) + if worker != "" { + base = fmt.Sprintf("%s-%s", unitTag, worker) + } + return fmt.Sprintf(`\\.\pipe\%s-%s`, base, name) } path := join(baseDir, name+".socket") + if worker != "" { + path = join(baseDir, fmt.Sprintf("%s-%s.socket", worker, name)) + } if abstract { path = "@" + path } === modified file 'src/github.com/juju/juju/worker/uniter/paths_test.go' --- src/github.com/juju/juju/worker/uniter/paths_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/paths_test.go 2016-03-22 15:18:22 +0000 @@ -9,9 +9,9 @@ "github.com/juju/names" "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/os" gc "gopkg.in/check.v1" - "github.com/juju/juju/version" "github.com/juju/juju/worker/uniter" ) @@ -29,7 +29,7 @@ } func (s *PathsSuite) TestWindows(c *gc.C) { - s.PatchValue(&version.Current.OS, version.Windows) + s.PatchValue(&os.HostOS, func() os.OSType { return os.Windows }) dataDir := c.MkDir() unitTag := names.NewUnitTag("some-service/323") @@ -56,8 +56,37 @@ }) } +func (s *PathsSuite) TestWorkerPathsWindows(c *gc.C) { + s.PatchValue(&os.HostOS, func() os.OSType { return os.Windows }) + + dataDir := c.MkDir() + unitTag := names.NewUnitTag("some-service/323") + worker := "some-worker" + paths := uniter.NewWorkerPaths(dataDir, unitTag, worker) + + relData := relPathFunc(dataDir) + relAgent := relPathFunc(relData("agents", "unit-some-service-323")) + c.Assert(paths, jc.DeepEquals, uniter.Paths{ + ToolsDir: relData("tools/unit-some-service-323"), + Runtime: uniter.RuntimePaths{ + JujuRunSocket: `\\.\pipe\unit-some-service-323-some-worker-run`, + JujucServerSocket: `\\.\pipe\unit-some-service-323-some-worker-agent`, + }, + State: uniter.StatePaths{ + BaseDir: relAgent(), + CharmDir: relAgent("charm"), + OperationsFile: relAgent("state", "uniter"), + RelationsDir: relAgent("state", "relations"), + BundlesDir: relAgent("state", "bundles"), + DeployerDir: relAgent("state", "deployer"), + StorageDir: relAgent("state", "storage"), + MetricsSpoolDir: relAgent("state", "spool", "metrics"), + }, + }) +} + func (s *PathsSuite) TestOther(c *gc.C) { - s.PatchValue(&version.Current.OS, version.OSType(-1)) + s.PatchValue(&os.HostOS, func() os.OSType { return os.Unknown }) dataDir := c.MkDir() unitTag := names.NewUnitTag("some-service/323") @@ -84,6 +113,35 @@ }) } +func (s *PathsSuite) TestWorkerPaths(c *gc.C) { + s.PatchValue(&os.HostOS, func() os.OSType { return os.Unknown }) + + dataDir := c.MkDir() + unitTag := names.NewUnitTag("some-service/323") + worker := "worker-id" + paths := uniter.NewWorkerPaths(dataDir, unitTag, worker) + + relData := relPathFunc(dataDir) + relAgent := relPathFunc(relData("agents", "unit-some-service-323")) + c.Assert(paths, jc.DeepEquals, uniter.Paths{ + ToolsDir: relData("tools/unit-some-service-323"), + Runtime: uniter.RuntimePaths{ + JujuRunSocket: relAgent(worker + "-run.socket"), + JujucServerSocket: "@" + relAgent(worker+"-agent.socket"), + }, + State: uniter.StatePaths{ + BaseDir: relAgent(), + CharmDir: relAgent("charm"), + OperationsFile: relAgent("state", "uniter"), + RelationsDir: relAgent("state", "relations"), + BundlesDir: relAgent("state", "bundles"), + DeployerDir: relAgent("state", "deployer"), + StorageDir: relAgent("state", "storage"), + MetricsSpoolDir: relAgent("state", "spool", "metrics"), + }, + }) +} + func (s *PathsSuite) TestContextInterface(c *gc.C) { paths := uniter.Paths{ ToolsDir: "/path/to/tools", === removed file 'src/github.com/juju/juju/worker/uniter/relation/dyingsource.go' --- src/github.com/juju/juju/worker/uniter/relation/dyingsource.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/relation/dyingsource.go 1970-01-01 00:00:00 +0000 @@ -1,52 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package relation - -import ( - "sort" - - "gopkg.in/juju/charm.v5/hooks" - - "github.com/juju/juju/worker/uniter/hook" -) - -// NewDyingHookSource returns a new hook.Source that generates all hooks -// necessary to clean up the supplied initial relation hook state, while -// preserving the guarantees Juju makes about hook execution order. -func NewDyingHookSource(initial *State) hook.Source { - var list []hook.Info - - // Honour any expected relation-changed hook. - if initial.ChangedPending != "" { - list = append(list, hook.Info{ - Kind: hooks.RelationChanged, - RelationId: initial.RelationId, - RemoteUnit: initial.ChangedPending, - ChangeVersion: initial.Members[initial.ChangedPending], - }) - } - - // Depart in consistent order, mainly for testing purposes. - departs := []string{} - for name := range initial.Members { - departs = append(departs, name) - } - sort.Strings(departs) - for _, name := range departs { - list = append(list, hook.Info{ - Kind: hooks.RelationDeparted, - RelationId: initial.RelationId, - RemoteUnit: name, - ChangeVersion: initial.Members[name], - }) - } - - // Finally break the relation. - list = append(list, hook.Info{ - Kind: hooks.RelationBroken, - RelationId: initial.RelationId, - }) - - return hook.NewListSource(list) -} === removed file 'src/github.com/juju/juju/worker/uniter/relation/dyingsource_test.go' --- src/github.com/juju/juju/worker/uniter/relation/dyingsource_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/uniter/relation/dyingsource_test.go 1970-01-01 00:00:00 +0000 @@ -1,27 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package relation_test - -import ( - gc "gopkg.in/check.v1" - - "github.com/juju/juju/worker/uniter/relation" -) - -type DyingSourceSuite struct{} - -var _ = gc.Suite(&DyingSourceSuite{}) - -func (s *DyingSourceSuite) TestDyingHookSource(c *gc.C) { - for i, t := range dyingHookQueueTests { - c.Logf("test %d: %s", i, t.summary) - q := relation.NewDyingHookSource(t.initial) - for i, step := range t.steps { - c.Logf(" step %d", i) - step.checkDirect(c, q) - } - expect{}.checkDirect(c, q) - q.Stop() - } -} === removed file 'src/github.com/juju/juju/worker/uniter/relation/hookqueue.go' --- src/github.com/juju/juju/worker/uniter/relation/hookqueue.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/relation/hookqueue.go 1970-01-01 00:00:00 +0000 @@ -1,37 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package relation - -import ( - "github.com/juju/juju/state/multiwatcher" - "github.com/juju/juju/worker/uniter/hook" -) - -// HookQueue exists to keep the package interface stable. -type HookQueue interface { - hook.Sender -} - -// NewAliveHookQueue exists to keep the package interface stable; it wraps the -// result of NewLiveHookSource in a HookSender. -func NewAliveHookQueue(initial *State, out chan<- hook.Info, w RelationUnitsWatcher) HookQueue { - source := NewLiveHookSource(initial, w) - return hook.NewSender(out, source) -} - -// NewDyingHookQueue exists to keep the package interface stable; it wraps the -// result of NewDyingHookSource in a HookSender. -func NewDyingHookQueue(initial *State, out chan<- hook.Info) HookQueue { - source := NewDyingHookSource(initial) - return hook.NewSender(out, source) -} - -// RelationUnitsWatcher produces RelationUnitsChange events until stopped, or -// until it encounters an error. It must not close its Changes channel without -// signalling an error via Stop and Err. -type RelationUnitsWatcher interface { - Err() error - Stop() error - Changes() <-chan multiwatcher.RelationUnitsChange -} === removed file 'src/github.com/juju/juju/worker/uniter/relation/hookqueue_test.go' --- src/github.com/juju/juju/worker/uniter/relation/hookqueue_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/relation/hookqueue_test.go 1970-01-01 00:00:00 +0000 @@ -1,327 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package relation_test - -import ( - "time" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5/hooks" - - "github.com/juju/juju/state/multiwatcher" - coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/worker/uniter/hook" - "github.com/juju/juju/worker/uniter/relation" -) - -type HookQueueSuite struct{} - -var _ = gc.Suite(&HookQueueSuite{}) - -type msi map[string]int64 - -type hookQueueTest struct { - summary string - initial *relation.State - steps []checker -} - -func fullTest(summary string, steps ...checker) hookQueueTest { - return hookQueueTest{summary, &relation.State{21345, nil, ""}, steps} -} - -func reconcileTest(summary string, members msi, joined string, steps ...checker) hookQueueTest { - return hookQueueTest{summary, &relation.State{21345, members, joined}, steps} -} - -var aliveHookQueueTests = []hookQueueTest{ - fullTest( - "Empty initial change causes no hooks.", - send{nil, nil}, - ), fullTest( - "Joined and changed are both run when unit is first detected.", - send{msi{"u/0": 0}, nil}, - expect{hooks.RelationJoined, "u/0", 0}, - expect{hooks.RelationChanged, "u/0", 0}, - ), fullTest( - "Automatic changed is run with latest settings.", - send{msi{"u/0": 0}, nil}, - expect{hooks.RelationJoined, "u/0", 0}, - send{msi{"u/0": 7}, nil}, - expect{hooks.RelationChanged, "u/0", 7}, - ), fullTest( - "Joined is also run with latest settings.", - send{msi{"u/0": 0}, nil}, - send{msi{"u/0": 7}, nil}, - expect{hooks.RelationJoined, "u/0", 7}, - expect{hooks.RelationChanged, "u/0", 7}, - ), fullTest( - "Nothing happens if a unit departs before its joined is run.", - send{msi{"u/0": 0}, nil}, - send{msi{"u/0": 7}, nil}, - send{nil, []string{"u/0"}}, - ), fullTest( - "A changed is run after a joined, even if a departed is known.", - send{msi{"u/0": 0}, nil}, - expect{hooks.RelationJoined, "u/0", 0}, - send{nil, []string{"u/0"}}, - expect{hooks.RelationChanged, "u/0", 0}, - expect{hooks.RelationDeparted, "u/0", 0}, - ), fullTest( - "A departed replaces a changed.", - send{msi{"u/0": 0}, nil}, - advance{2}, - send{msi{"u/0": 7}, nil}, - send{nil, []string{"u/0"}}, - expect{hooks.RelationDeparted, "u/0", 7}, - ), fullTest( - "Changed events are ignored if the version has not changed.", - send{msi{"u/0": 0}, nil}, - advance{2}, - send{msi{"u/0": 0}, nil}, - ), fullTest( - "Redundant changed events are elided.", - send{msi{"u/0": 0}, nil}, - advance{2}, - send{msi{"u/0": 3}, nil}, - send{msi{"u/0": 7}, nil}, - send{msi{"u/0": 79}, nil}, - expect{hooks.RelationChanged, "u/0", 79}, - ), fullTest( - "Latest hooks are run in the original unit order.", - send{msi{"u/0": 0, "u/1": 1}, nil}, - advance{4}, - send{msi{"u/0": 3}, nil}, - send{msi{"u/1": 7}, nil}, - send{nil, []string{"u/0"}}, - expect{hooks.RelationDeparted, "u/0", 3}, - expect{hooks.RelationChanged, "u/1", 7}, - ), fullTest( - "Test everything we can think of at the same time.", - send{msi{"u/0": 0, "u/1": 0, "u/2": 0, "u/3": 0, "u/4": 0}, nil}, - advance{6}, - // u/0, u/1, u/2 are now up to date; u/3, u/4 are untouched. - send{msi{"u/0": 1}, nil}, - send{msi{"u/1": 1, "u/2": 1, "u/3": 1, "u/5": 0}, []string{"u/0", "u/4"}}, - send{msi{"u/3": 2}, nil}, - // - Finish off the rest of the initial state, ignoring u/4, but using - // the latest known settings. - expect{hooks.RelationJoined, "u/3", 2}, - expect{hooks.RelationChanged, "u/3", 2}, - // - u/0 was queued for change by the first RUC, but this change is - // no longer relevant; it's departed in the second RUC, so we run - // that hook instead. - expect{hooks.RelationDeparted, "u/0", 1}, - // - Handle the remaining changes in the second RUC, still ignoring u/4. - // We do run new changed hooks for u/1 and u/2, because the latest settings - // are newer than those used in their original changed events. - expect{hooks.RelationChanged, "u/1", 1}, - expect{hooks.RelationChanged, "u/2", 1}, - expect{hooks.RelationJoined, "u/5", 0}, - expect{hooks.RelationChanged, "u/5", 0}, - // - Ignore the third RUC, because the original joined/changed on u/3 - // was executed after we got the latest settings version. - ), reconcileTest( - "Check that matching settings versions cause no changes.", - msi{"u/0": 0}, "", - send{msi{"u/0": 0}, nil}, - ), reconcileTest( - "Check that new settings versions cause appropriate changes.", - msi{"u/0": 0}, "", - send{msi{"u/0": 1}, nil}, - expect{hooks.RelationChanged, "u/0", 1}, - ), reconcileTest( - "Check that a just-joined unit gets its changed hook run first.", - msi{"u/0": 0}, "u/0", - send{msi{"u/0": 0}, nil}, - expect{hooks.RelationChanged, "u/0", 0}, - ), reconcileTest( - "Check that missing units are queued for depart as early as possible.", - msi{"u/0": 0}, "", - send{msi{"u/1": 0}, nil}, - expect{hooks.RelationDeparted, "u/0", 0}, - expect{hooks.RelationJoined, "u/1", 0}, - expect{hooks.RelationChanged, "u/1", 0}, - ), reconcileTest( - "Double-check that a pending changed happens before an injected departed.", - msi{"u/0": 0}, "u/0", - send{nil, nil}, - expect{hooks.RelationChanged, "u/0", 0}, - expect{hooks.RelationDeparted, "u/0", 0}, - ), reconcileTest( - "Check that missing units don't slip in front of required changed hooks.", - msi{"u/0": 0}, "u/0", - send{msi{"u/1": 0}, nil}, - expect{hooks.RelationChanged, "u/0", 0}, - expect{hooks.RelationDeparted, "u/0", 0}, - expect{hooks.RelationJoined, "u/1", 0}, - expect{hooks.RelationChanged, "u/1", 0}, - ), -} - -func (s *HookQueueSuite) TestAliveHookQueue(c *gc.C) { - for i, t := range aliveHookQueueTests[4:5] { - c.Logf("test %d: %s", i, t.summary) - out := make(chan hook.Info) - in := make(chan multiwatcher.RelationUnitsChange) - ruw := &RUW{in, false} - q := relation.NewAliveHookQueue(t.initial, out, ruw) - for i, step := range t.steps { - c.Logf(" step %d", i) - step.check(c, in, out) - } - expect{}.check(c, in, out) - q.Stop() - c.Assert(ruw.stopped, jc.IsTrue) - } -} - -var dyingHookQueueTests = []hookQueueTest{ - fullTest( - "Empty state just gets a broken hook.", - expect{hook: hooks.RelationBroken}, - ), reconcileTest( - "Each current member is departed before broken is sent.", - msi{"u/1": 7, "u/4": 33}, "", - expect{hooks.RelationDeparted, "u/1", 7}, - expect{hooks.RelationDeparted, "u/4", 33}, - expect{hook: hooks.RelationBroken}, - ), reconcileTest( - "If there's a pending changed, that must still be respected.", - msi{"u/0": 3}, "u/0", - expect{hooks.RelationChanged, "u/0", 3}, - expect{hooks.RelationDeparted, "u/0", 3}, - expect{hook: hooks.RelationBroken}, - ), -} - -func (s *HookQueueSuite) TestDyingHookQueue(c *gc.C) { - for i, t := range dyingHookQueueTests { - c.Logf("test %d: %s", i, t.summary) - out := make(chan hook.Info) - q := relation.NewDyingHookQueue(t.initial, out) - for i, step := range t.steps { - c.Logf(" step %d", i) - step.check(c, nil, out) - } - expect{}.check(c, nil, out) - q.Stop() - } -} - -// RUW exists entirely to send RelationUnitsChanged events to a tested -// HookQueue in a synchronous and predictable fashion. -type RUW struct { - in chan multiwatcher.RelationUnitsChange - stopped bool -} - -func (w *RUW) Changes() <-chan multiwatcher.RelationUnitsChange { - return w.in -} - -func (w *RUW) Stop() error { - close(w.in) - w.stopped = true - return nil -} - -func (w *RUW) Err() error { - return nil -} - -type checker interface { - check(c *gc.C, in chan multiwatcher.RelationUnitsChange, out chan hook.Info) - checkDirect(c *gc.C, q hook.Source) -} - -type send struct { - changed msi - departed []string -} - -func (d send) event() multiwatcher.RelationUnitsChange { - ruc := multiwatcher.RelationUnitsChange{Changed: map[string]multiwatcher.UnitSettings{}} - for name, version := range d.changed { - ruc.Changed[name] = multiwatcher.UnitSettings{Version: version} - } - for _, name := range d.departed { - ruc.Departed = append(ruc.Departed, name) - } - return ruc -} - -func (d send) check(c *gc.C, in chan multiwatcher.RelationUnitsChange, out chan hook.Info) { - in <- d.event() -} - -func (d send) checkDirect(c *gc.C, q hook.Source) { - q.(interface { - Update(change multiwatcher.RelationUnitsChange) error - }).Update(d.event()) -} - -type advance struct { - count int -} - -func (d advance) check(c *gc.C, in chan multiwatcher.RelationUnitsChange, out chan hook.Info) { - for i := 0; i < d.count; i++ { - select { - case <-out: - case <-time.After(coretesting.LongWait): - c.Fatalf("timed out waiting for event %d", i) - } - } -} - -func (d advance) checkDirect(c *gc.C, q hook.Source) { - for i := 0; i < d.count; i++ { - c.Assert(q.Empty(), jc.IsFalse) - q.Pop() - } -} - -type expect struct { - hook hooks.Kind - unit string - version int64 -} - -func (d expect) info() hook.Info { - return hook.Info{ - Kind: d.hook, - RelationId: 21345, - RemoteUnit: d.unit, - ChangeVersion: d.version, - } -} - -func (d expect) check(c *gc.C, in chan multiwatcher.RelationUnitsChange, out chan hook.Info) { - if d.hook == "" { - select { - case unexpected := <-out: - c.Fatalf("got %#v", unexpected) - case <-time.After(coretesting.ShortWait): - } - return - } - select { - case actual := <-out: - c.Assert(actual, jc.DeepEquals, d.info()) - case <-time.After(coretesting.LongWait): - c.Fatalf("timed out waiting for %#v", d.info()) - } -} - -func (d expect) checkDirect(c *gc.C, q hook.Source) { - if d.hook == "" { - c.Check(q.Empty(), jc.IsTrue) - } else { - c.Check(q.Empty(), jc.IsFalse) - c.Check(q.Next(), jc.DeepEquals, d.info()) - q.Pop() - } -} === removed file 'src/github.com/juju/juju/worker/uniter/relation/livesource.go' --- src/github.com/juju/juju/worker/uniter/relation/livesource.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/relation/livesource.go 1970-01-01 00:00:00 +0000 @@ -1,292 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package relation - -import ( - "sort" - "sync" - - "github.com/juju/errors" - "gopkg.in/juju/charm.v5/hooks" - - "github.com/juju/juju/state/multiwatcher" - "github.com/juju/juju/worker/uniter/hook" -) - -// liveSource maintains a minimal queue of hooks that need to be run to reflect -// relation state changes exposed via a RelationUnitsWatcher. -type liveSource struct { - relationId int - - // info holds information about all units that were added to the - // queue and haven't had a "relation-departed" event popped. This - // means the unit may be in info and not currently in the queue - // itself. - info map[string]*unitInfo - - // head and tail are the ends of the queue. - head, tail *unitInfo - - // changedPending, if not empty, indicates that the most recently - // popped event was a "relation-joined" for the named unit, and - // therefore that the next event must be a "relation-changed" - // for that same unit. - // If changedPending is not empty, the queue is considered non- - // empty, even if head is nil. - changedPending string - - started bool - watcher RelationUnitsWatcher - changes chan hook.SourceChange -} - -// unitInfo holds unit information for management by liveSource. -type unitInfo struct { - // unit holds the name of the unit. - unit string - - // version and settings hold the most recent settings known - // to the AliveHookQueue. - version int64 - - // joined is set to true when a "relation-joined" is popped for this unit. - joined bool - - // hookKind holds the current idea of the next hook that should - // be run for the unit, and is empty if and only if the unit - // is not queued. - hookKind hooks.Kind - - // prev and next define the position in the queue of the - // unit's next hook. - prev, next *unitInfo -} - -// NewLiveHookSource returns a new HookSource that aggregates the values -// obtained from the w watcher and generates the hooks that must be executed -// in the unit. It guarantees that the stream of hooks will respect the -// guarantees Juju makes about hook execution order. If any values have -// previously been received from w's Changes channel, the Source's -// behaviour is undefined. -func NewLiveHookSource(initial *State, w RelationUnitsWatcher) hook.Source { - info := map[string]*unitInfo{} - for unit, version := range initial.Members { - info[unit] = &unitInfo{ - unit: unit, - version: version, - joined: true, - } - } - s := &liveSource{ - watcher: w, - info: info, - relationId: initial.RelationId, - changedPending: initial.ChangedPending, - changes: make(chan hook.SourceChange), - } - go func() { - defer close(s.changes) - // w's out channel will be closed when the source is Stop()ped. - // We use a waitgroup to ensure the current change is processed - // before any more changes are accepted. - var wg sync.WaitGroup - for c := range w.Changes() { - wg.Add(1) - s.changes <- func() error { - defer wg.Done() - return s.Update(c) - } - wg.Wait() - } - }() - return s -} - -// Changes returns a channel sending a stream of hook.SourceChange events -// that need to be Applied in order for the source to function correctly. -// In particular, the first event represents the ideal state of the relation, -// and must be delivered for the source to be able to calculate the desired -// hooks. -func (q *liveSource) Changes() <-chan hook.SourceChange { - return q.changes -} - -// Stop cleans up the liveSource's resources and stops sending changes. -func (q *liveSource) Stop() error { - return q.watcher.Stop() -} - -// Update modifies the queue such that the hook.Info values it sends will -// reflect the supplied change. -func (q *liveSource) Update(change multiwatcher.RelationUnitsChange) error { - if !q.started { - q.started = true - // The first event represents the ideal final state of the system. - // If it contains any Departed notifications, it cannot be one of - // those -- most likely the watcher was not a fresh one -- and we're - // completely hosed. - if len(change.Departed) != 0 { - return errors.Errorf("hook source watcher sent bad event: %#v", change) - } - // Anyway, before we can generate actual hooks, we have to generate - // departed hooks for any previously-known members not reflected in - // the ideal state, and insert those at the head of the queue. The - // easiest way to do this is to inject a departure update for those - // missing members before processing the ideal state. - departs := multiwatcher.RelationUnitsChange{} - for unit := range q.info { - if _, found := change.Changed[unit]; !found { - departs.Departed = append(departs.Departed, unit) - } - } - q.update(departs) - } - q.update(change) - return nil -} - -// Empty returns true if the queue is empty. -func (q *liveSource) Empty() bool { - // If the first event has not yet been delivered, we cannot correctly - // determine the schedule, so we pretend to be empty rather than expose - // an incorrect hook. - if !q.started { - return true - } - return q.head == nil && q.changedPending == "" -} - -// Next returns the next hook.Info value to send. It will panic if the queue is -// empty. -func (q *liveSource) Next() hook.Info { - if q.Empty() { - panic("queue is empty") - } - var unit string - var kind hooks.Kind - if q.changedPending != "" { - unit = q.changedPending - kind = hooks.RelationChanged - } else { - unit = q.head.unit - kind = q.head.hookKind - } - version := q.info[unit].version - return hook.Info{ - Kind: kind, - RelationId: q.relationId, - RemoteUnit: unit, - ChangeVersion: version, - } -} - -// Pop advances the queue. It will panic if the queue is already empty. -func (q *liveSource) Pop() { - if q.Empty() { - panic("queue is empty") - } - if q.changedPending != "" { - if q.info[q.changedPending].hookKind == hooks.RelationChanged { - // We just ran this very hook; no sense keeping it queued. - q.unqueue(q.changedPending) - } - q.changedPending = "" - } else { - old := *q.head - q.unqueue(q.head.unit) - if old.hookKind == hooks.RelationJoined { - q.changedPending = old.unit - q.info[old.unit].joined = true - } else if old.hookKind == hooks.RelationDeparted { - delete(q.info, old.unit) - } - } -} - -func (q *liveSource) update(change multiwatcher.RelationUnitsChange) { - // Enforce consistent addition order, mainly for testing purposes. - changedUnits := []string{} - for unit := range change.Changed { - changedUnits = append(changedUnits, unit) - } - sort.Strings(changedUnits) - - for _, unit := range changedUnits { - settings := change.Changed[unit] - info, found := q.info[unit] - if !found { - info = &unitInfo{unit: unit} - q.info[unit] = info - q.queue(unit, hooks.RelationJoined) - } else if info.hookKind != hooks.RelationJoined { - if settings.Version != info.version { - q.queue(unit, hooks.RelationChanged) - } else { - q.unqueue(unit) - } - } - info.version = settings.Version - } - - for _, unit := range change.Departed { - if q.info[unit].hookKind == hooks.RelationJoined { - q.unqueue(unit) - } else { - q.queue(unit, hooks.RelationDeparted) - } - } -} - -// queue sets the next hook to be run for the named unit, and places it -// at the tail of the queue if it is not already queued. It will panic -// if the unit is not in q.info. -func (q *liveSource) queue(unit string, kind hooks.Kind) { - // If the unit is not in the queue, place it at the tail. - info := q.info[unit] - if info.hookKind == "" { - info.prev = q.tail - if q.tail != nil { - q.tail.next = info - } - q.tail = info - - // If the queue is empty, the tail is also the head. - if q.head == nil { - q.head = info - } - } - info.hookKind = kind -} - -// unqueue removes the named unit from the queue. It is fine to -// unqueue a unit that is not in the queue, but it will panic if -// the unit is not in q.info. -func (q *liveSource) unqueue(unit string) { - if q.head == nil { - // The queue is empty, nothing to do. - return - } - - // Get the unit info and clear its next action. - info := q.info[unit] - if info.hookKind == "" { - // The unit is not in the queue, nothing to do. - return - } - info.hookKind = "" - - // Update queue pointers. - if info.prev == nil { - q.head = info.next - } else { - info.prev.next = info.next - } - if info.next == nil { - q.tail = info.prev - } else { - info.next.prev = info.prev - } - info.prev = nil - info.next = nil -} === removed file 'src/github.com/juju/juju/worker/uniter/relation/livesource_test.go' --- src/github.com/juju/juju/worker/uniter/relation/livesource_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/uniter/relation/livesource_test.go 1970-01-01 00:00:00 +0000 @@ -1,31 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package relation_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/state/multiwatcher" - "github.com/juju/juju/worker/uniter/relation" -) - -type LiveSourceSuite struct{} - -var _ = gc.Suite(&LiveSourceSuite{}) - -func (s *LiveSourceSuite) TestLiveHookSource(c *gc.C) { - for i, t := range aliveHookQueueTests { - c.Logf("test %d: %s", i, t.summary) - ruw := &RUW{make(chan multiwatcher.RelationUnitsChange), false} - q := relation.NewLiveHookSource(t.initial, ruw) - for i, step := range t.steps { - c.Logf(" step %d", i) - step.checkDirect(c, q) - } - expect{}.checkDirect(c, q) - q.Stop() - c.Assert(ruw.stopped, jc.IsTrue) - } -} === added file 'src/github.com/juju/juju/worker/uniter/relation/mock_test.go' --- src/github.com/juju/juju/worker/uniter/relation/mock_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/relation/mock_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,44 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package relation_test + +import ( + "fmt" + + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/operation" +) + +type mockOperations struct { + operation.Factory +} + +func (m *mockOperations) NewRunHook(hookInfo hook.Info) (operation.Operation, error) { + return &mockOperation{hookInfo}, nil +} + +type mockOperation struct { + hookInfo hook.Info +} + +func (m *mockOperation) String() string { + return fmt.Sprintf("run hook %v on unit with relation %d", + m.hookInfo.Kind, m.hookInfo.RelationId) +} + +func (m *mockOperation) NeedsGlobalMachineLock() bool { + return false +} + +func (m *mockOperation) Prepare(state operation.State) (*operation.State, error) { + return &state, nil +} + +func (m *mockOperation) Execute(state operation.State) (*operation.State, error) { + return &state, nil +} + +func (m *mockOperation) Commit(state operation.State) (*operation.State, error) { + return &state, nil +} === modified file 'src/github.com/juju/juju/worker/uniter/relation/package_test.go' --- src/github.com/juju/juju/worker/uniter/relation/package_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/uniter/relation/package_test.go 2016-03-22 15:18:22 +0000 @@ -1,4 +1,4 @@ -// Copyright 2014 Canonical Ltd. +// Copyright 2014, 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package relation_test @@ -6,9 +6,9 @@ import ( stdtesting "testing" - gc "gopkg.in/check.v1" + coretesting "github.com/juju/juju/testing" ) func TestPackage(t *stdtesting.T) { - gc.TestingT(t) + coretesting.MgoTestPackage(t) } === removed file 'src/github.com/juju/juju/worker/uniter/relation/relation.go' --- src/github.com/juju/juju/worker/uniter/relation/relation.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/relation/relation.go 1970-01-01 00:00:00 +0000 @@ -1,234 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// relation implements persistent local storage of a unit's relation state, and -// translation of relation changes into hooks that need to be run. -package relation - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/juju/errors" - "github.com/juju/utils" - "gopkg.in/juju/charm.v5/hooks" - - "github.com/juju/juju/worker/uniter/hook" -) - -// State describes the state of a relation. -type State struct { - // RelationId identifies the relation. - RelationId int - - // Members is a map from unit name to the last change version - // for which a hook.Info was delivered on the output channel. - Members map[string]int64 - - // ChangedPending indicates that a "relation-changed" hook for the given - // unit name must be the first hook.Info to be sent to the output channel. - ChangedPending string -} - -// copy returns an independent copy of the state. -func (s *State) copy() *State { - copy := &State{ - RelationId: s.RelationId, - ChangedPending: s.ChangedPending, - } - if s.Members != nil { - copy.Members = map[string]int64{} - for m, v := range s.Members { - copy.Members[m] = v - } - } - return copy -} - -// Validate returns an error if the supplied hook.Info does not represent -// a valid change to the relation state. Hooks must always be validated -// against the current state before they are run, to ensure that the system -// meets its guarantees about hook execution order. -func (s *State) Validate(hi hook.Info) (err error) { - defer errors.DeferredAnnotatef(&err, "inappropriate %q for %q", hi.Kind, hi.RemoteUnit) - if hi.RelationId != s.RelationId { - return fmt.Errorf("expected relation %d, got relation %d", s.RelationId, hi.RelationId) - } - if s.Members == nil { - return fmt.Errorf(`relation is broken and cannot be changed further`) - } - unit, kind := hi.RemoteUnit, hi.Kind - if kind == hooks.RelationBroken { - if len(s.Members) == 0 { - return nil - } - return fmt.Errorf(`cannot run "relation-broken" while units still present`) - } - if s.ChangedPending != "" { - if unit != s.ChangedPending || kind != hooks.RelationChanged { - return fmt.Errorf(`expected "relation-changed" for %q`, s.ChangedPending) - } - } else if _, joined := s.Members[unit]; joined && kind == hooks.RelationJoined { - return fmt.Errorf("unit already joined") - } else if !joined && kind != hooks.RelationJoined { - return fmt.Errorf("unit has not joined") - } - return nil -} - -// StateDir is a filesystem-backed representation of the state of a -// relation. Concurrent modifications to the underlying state directory -// will have undefined consequences. -type StateDir struct { - // path identifies the directory holding persistent state. - path string - - // state is the cached state of the directory, which is guaranteed - // to be synchronized with the true state so long as no concurrent - // changes are made to the directory. - state State -} - -// State returns the current state of the relation. -func (d *StateDir) State() *State { - return d.state.copy() -} - -// ReadStateDir loads a StateDir from the subdirectory of dirPath named -// for the supplied RelationId. If the directory does not exist, no error -// is returned, -func ReadStateDir(dirPath string, relationId int) (d *StateDir, err error) { - d = &StateDir{ - filepath.Join(dirPath, strconv.Itoa(relationId)), - State{relationId, map[string]int64{}, ""}, - } - defer errors.DeferredAnnotatef(&err, "cannot load relation state from %q", d.path) - if _, err := os.Stat(d.path); os.IsNotExist(err) { - return d, nil - } else if err != nil { - return nil, err - } - fis, err := ioutil.ReadDir(d.path) - if err != nil { - return nil, err - } - for _, fi := range fis { - // Entries with names ending in "-" followed by an integer must be - // files containing valid unit data; all other names are ignored. - name := fi.Name() - i := strings.LastIndex(name, "-") - if i == -1 { - continue - } - svcName := name[:i] - unitId := name[i+1:] - if _, err := strconv.Atoi(unitId); err != nil { - continue - } - unitName := svcName + "/" + unitId - var info diskInfo - if err = utils.ReadYaml(filepath.Join(d.path, name), &info); err != nil { - return nil, fmt.Errorf("invalid unit file %q: %v", name, err) - } - if info.ChangeVersion == nil { - return nil, fmt.Errorf(`invalid unit file %q: "changed-version" not set`, name) - } - d.state.Members[unitName] = *info.ChangeVersion - if info.ChangedPending { - if d.state.ChangedPending != "" { - return nil, fmt.Errorf("%q and %q both have pending changed hooks", d.state.ChangedPending, unitName) - } - d.state.ChangedPending = unitName - } - } - return d, nil -} - -// ReadAllStateDirs loads and returns every StateDir persisted directly inside -// the supplied dirPath. If dirPath does not exist, no error is returned. -func ReadAllStateDirs(dirPath string) (dirs map[int]*StateDir, err error) { - defer errors.DeferredAnnotatef(&err, "cannot load relations state from %q", dirPath) - if _, err := os.Stat(dirPath); os.IsNotExist(err) { - return nil, nil - } else if err != nil { - return nil, err - } - fis, err := ioutil.ReadDir(dirPath) - if err != nil { - return nil, err - } - dirs = map[int]*StateDir{} - for _, fi := range fis { - // Entries with integer names must be directories containing StateDir - // data; all other names will be ignored. - relationId, err := strconv.Atoi(fi.Name()) - if err != nil { - // This doesn't look like a relation. - continue - } - dir, err := ReadStateDir(dirPath, relationId) - if err != nil { - return nil, err - } - dirs[relationId] = dir - } - return dirs, nil -} - -// Ensure creates the directory if it does not already exist. -func (d *StateDir) Ensure() error { - return os.MkdirAll(d.path, 0755) -} - -// Write atomically writes to disk the relation state change in hi. -// It must be called after the respective hook was executed successfully. -// Write doesn't validate hi but guarantees that successive writes of -// the same hi are idempotent. -func (d *StateDir) Write(hi hook.Info) (err error) { - defer errors.DeferredAnnotatef(&err, "failed to write %q hook info for %q on state directory", hi.Kind, hi.RemoteUnit) - if hi.Kind == hooks.RelationBroken { - return d.Remove() - } - name := strings.Replace(hi.RemoteUnit, "/", "-", 1) - path := filepath.Join(d.path, name) - if hi.Kind == hooks.RelationDeparted { - if err = os.Remove(path); err != nil && !os.IsNotExist(err) { - return err - } - // If atomic delete succeeded, update own state. - delete(d.state.Members, hi.RemoteUnit) - return nil - } - di := diskInfo{&hi.ChangeVersion, hi.Kind == hooks.RelationJoined} - if err := utils.WriteYaml(path, &di); err != nil { - return err - } - // If write was successful, update own state. - d.state.Members[hi.RemoteUnit] = hi.ChangeVersion - if hi.Kind == hooks.RelationJoined { - d.state.ChangedPending = hi.RemoteUnit - } else { - d.state.ChangedPending = "" - } - return nil -} - -// Remove removes the directory if it exists and is empty. -func (d *StateDir) Remove() error { - if err := os.Remove(d.path); err != nil && !os.IsNotExist(err) { - return err - } - // If atomic delete succeeded, update own state. - d.state.Members = nil - return nil -} - -// diskInfo defines the relation unit data serialization. -type diskInfo struct { - ChangeVersion *int64 `yaml:"change-version"` - ChangedPending bool `yaml:"changed-pending,omitempty"` -} === removed file 'src/github.com/juju/juju/worker/uniter/relation/relation_test.go' --- src/github.com/juju/juju/worker/uniter/relation/relation_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/relation/relation_test.go 1970-01-01 00:00:00 +0000 @@ -1,353 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package relation_test - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5/hooks" - - "github.com/juju/juju/worker/uniter/hook" - "github.com/juju/juju/worker/uniter/relation" -) - -type StateDirSuite struct{} - -var _ = gc.Suite(&StateDirSuite{}) - -func (s *StateDirSuite) TestReadStateDirEmpty(c *gc.C) { - basedir := c.MkDir() - reldir := filepath.Join(basedir, "123") - - dir, err := relation.ReadStateDir(basedir, 123) - c.Assert(err, jc.ErrorIsNil) - state := dir.State() - c.Assert(state.RelationId, gc.Equals, 123) - c.Assert(msi(state.Members), gc.DeepEquals, msi{}) - c.Assert(state.ChangedPending, gc.Equals, "") - - _, err = os.Stat(reldir) - c.Assert(err, jc.Satisfies, os.IsNotExist) - - err = dir.Ensure() - c.Assert(err, jc.ErrorIsNil) - fi, err := os.Stat(reldir) - c.Assert(err, jc.ErrorIsNil) - c.Assert(fi, jc.Satisfies, os.FileInfo.IsDir) -} - -func (s *StateDirSuite) TestReadStateDirValid(c *gc.C) { - basedir := c.MkDir() - reldir := setUpDir(c, basedir, "123", map[string]string{ - "foo-bar-1": "change-version: 99\n", - "foo-bar-1.preparing": "change-version: 100\n", - "baz-qux-7": "change-version: 101\nchanged-pending: true\n", - "nonsensical": "blah", - "27": "blah", - }) - setUpDir(c, reldir, "ignored", nil) - - dir, err := relation.ReadStateDir(basedir, 123) - c.Assert(err, jc.ErrorIsNil) - state := dir.State() - c.Assert(state.RelationId, gc.Equals, 123) - c.Assert(msi(state.Members), gc.DeepEquals, msi{"foo-bar/1": 99, "baz-qux/7": 101}) - c.Assert(state.ChangedPending, gc.Equals, "baz-qux/7") -} - -var badRelationsTests = []struct { - contents map[string]string - subdirs []string - err string -}{ - { - nil, []string{"foo-bar-1"}, - `.* (is a directory|handle is invalid.)`, - }, { - map[string]string{"foo-1": "'"}, nil, - `invalid unit file "foo-1": YAML error: .*`, - }, { - map[string]string{"foo-1": "blah: blah\n"}, nil, - `invalid unit file "foo-1": "changed-version" not set`, - }, { - map[string]string{ - "foo-1": "change-version: 123\nchanged-pending: true\n", - "foo-2": "change-version: 456\nchanged-pending: true\n", - }, nil, - `"foo/1" and "foo/2" both have pending changed hooks`, - }, -} - -func (s *StateDirSuite) TestBadRelations(c *gc.C) { - for i, t := range badRelationsTests { - c.Logf("test %d", i) - basedir := c.MkDir() - reldir := setUpDir(c, basedir, "123", t.contents) - for _, subdir := range t.subdirs { - setUpDir(c, reldir, subdir, nil) - } - _, err := relation.ReadStateDir(basedir, 123) - expect := `cannot load relation state from ".*": ` + t.err - c.Assert(err, gc.ErrorMatches, expect) - } -} - -var defaultMembers = msi{"foo/1": 0, "foo/2": 0} - -// writeTests verify the behaviour of sequences of HookInfos on a relation -// state that starts off containing defaultMembers. -var writeTests = []struct { - hooks []hook.Info - members msi - pending string - err string - deleted bool -}{ - // Verify that valid changes work. - { - hooks: []hook.Info{ - {Kind: hooks.RelationChanged, RelationId: 123, RemoteUnit: "foo/1", ChangeVersion: 1}, - }, - members: msi{"foo/1": 1, "foo/2": 0}, - }, { - hooks: []hook.Info{ - {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/3"}, - }, - members: msi{"foo/1": 0, "foo/2": 0, "foo/3": 0}, - pending: "foo/3", - }, { - hooks: []hook.Info{ - {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/3"}, - {Kind: hooks.RelationChanged, RelationId: 123, RemoteUnit: "foo/3"}, - }, - members: msi{"foo/1": 0, "foo/2": 0, "foo/3": 0}, - }, { - hooks: []hook.Info{ - {Kind: hooks.RelationDeparted, RelationId: 123, RemoteUnit: "foo/1"}, - }, - members: msi{"foo/2": 0}, - }, { - hooks: []hook.Info{ - {Kind: hooks.RelationDeparted, RelationId: 123, RemoteUnit: "foo/1"}, - {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/1"}, - }, - members: msi{"foo/1": 0, "foo/2": 0}, - pending: "foo/1", - }, { - hooks: []hook.Info{ - {Kind: hooks.RelationDeparted, RelationId: 123, RemoteUnit: "foo/1"}, - {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/1"}, - {Kind: hooks.RelationChanged, RelationId: 123, RemoteUnit: "foo/1"}, - }, - members: msi{"foo/1": 0, "foo/2": 0}, - }, { - hooks: []hook.Info{ - {Kind: hooks.RelationDeparted, RelationId: 123, RemoteUnit: "foo/1"}, - {Kind: hooks.RelationDeparted, RelationId: 123, RemoteUnit: "foo/2"}, - {Kind: hooks.RelationBroken, RelationId: 123}, - }, - deleted: true, - }, - // Verify detection of various error conditions. - { - hooks: []hook.Info{ - {Kind: hooks.RelationJoined, RelationId: 456, RemoteUnit: "foo/1"}, - }, - err: "expected relation 123, got relation 456", - }, { - hooks: []hook.Info{ - {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/3"}, - {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/4"}, - }, - members: msi{"foo/1": 0, "foo/2": 0, "foo/3": 0}, - pending: "foo/3", - err: `expected "relation-changed" for "foo/3"`, - }, { - hooks: []hook.Info{ - {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/3"}, - {Kind: hooks.RelationChanged, RelationId: 123, RemoteUnit: "foo/1"}, - }, - members: msi{"foo/1": 0, "foo/2": 0, "foo/3": 0}, - pending: "foo/3", - err: `expected "relation-changed" for "foo/3"`, - }, { - hooks: []hook.Info{ - {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/1"}, - }, - err: "unit already joined", - }, { - hooks: []hook.Info{ - {Kind: hooks.RelationChanged, RelationId: 123, RemoteUnit: "foo/3"}, - }, - err: "unit has not joined", - }, { - hooks: []hook.Info{ - {Kind: hooks.RelationDeparted, RelationId: 123, RemoteUnit: "foo/3"}, - }, - err: "unit has not joined", - }, { - hooks: []hook.Info{ - {Kind: hooks.RelationBroken, RelationId: 123}, - }, - err: `cannot run "relation-broken" while units still present`, - }, { - hooks: []hook.Info{ - {Kind: hooks.RelationDeparted, RelationId: 123, RemoteUnit: "foo/1"}, - {Kind: hooks.RelationDeparted, RelationId: 123, RemoteUnit: "foo/2"}, - {Kind: hooks.RelationBroken, RelationId: 123}, - {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/1"}, - }, - err: `relation is broken and cannot be changed further`, - deleted: true, - }, -} - -func (s *StateDirSuite) TestWrite(c *gc.C) { - for i, t := range writeTests { - c.Logf("test %d", i) - basedir := c.MkDir() - setUpDir(c, basedir, "123", map[string]string{ - "foo-1": "change-version: 0\n", - "foo-2": "change-version: 0\n", - }) - dir, err := relation.ReadStateDir(basedir, 123) - c.Assert(err, jc.ErrorIsNil) - for i, hi := range t.hooks { - c.Logf(" hook %d", i) - if i == len(t.hooks)-1 && t.err != "" { - err = dir.State().Validate(hi) - expect := fmt.Sprintf(`inappropriate %q for %q: %s`, hi.Kind, hi.RemoteUnit, t.err) - c.Assert(err, gc.ErrorMatches, expect) - } else { - err = dir.State().Validate(hi) - c.Assert(err, jc.ErrorIsNil) - err = dir.Write(hi) - c.Assert(err, jc.ErrorIsNil) - // Check that writing the same change again is OK. - err = dir.Write(hi) - c.Assert(err, jc.ErrorIsNil) - } - } - members := t.members - if members == nil && !t.deleted { - members = defaultMembers - } - assertState(c, dir, basedir, 123, members, t.pending, t.deleted) - } -} - -func (s *StateDirSuite) TestRemove(c *gc.C) { - basedir := c.MkDir() - dir, err := relation.ReadStateDir(basedir, 1) - c.Assert(err, jc.ErrorIsNil) - err = dir.Ensure() - c.Assert(err, jc.ErrorIsNil) - err = dir.Remove() - c.Assert(err, jc.ErrorIsNil) - err = dir.Remove() - c.Assert(err, jc.ErrorIsNil) - - setUpDir(c, basedir, "99", map[string]string{ - "foo-1": "change-version: 0\n", - }) - dir, err = relation.ReadStateDir(basedir, 99) - c.Assert(err, jc.ErrorIsNil) - err = dir.Remove() - // Windows message is The directory is not empty - // Unix message is directory not empty - c.Assert(err, gc.ErrorMatches, ".* directory (is )?not empty.?") -} - -type ReadAllStateDirsSuite struct{} - -var _ = gc.Suite(&ReadAllStateDirsSuite{}) - -func (s *ReadAllStateDirsSuite) TestNoDir(c *gc.C) { - basedir := c.MkDir() - relsdir := filepath.Join(basedir, "relations") - - dirs, err := relation.ReadAllStateDirs(relsdir) - c.Assert(err, jc.ErrorIsNil) - c.Assert(dirs, gc.HasLen, 0) - - _, err = os.Stat(relsdir) - c.Assert(err, jc.Satisfies, os.IsNotExist) -} - -func (s *ReadAllStateDirsSuite) TestBadStateDir(c *gc.C) { - basedir := c.MkDir() - relsdir := setUpDir(c, basedir, "relations", nil) - setUpDir(c, relsdir, "123", map[string]string{ - "bad-0": "blah: blah\n", - }) - _, err := relation.ReadAllStateDirs(relsdir) - c.Assert(err, gc.ErrorMatches, `cannot load relations state from .*: cannot load relation state from .*: invalid unit file "bad-0": "changed-version" not set`) -} - -func (s *ReadAllStateDirsSuite) TestReadAllStateDirs(c *gc.C) { - basedir := c.MkDir() - relsdir := setUpDir(c, basedir, "relations", map[string]string{ - "ignored": "blah", - "foo-bar-123": "gibberish", - }) - setUpDir(c, relsdir, "123", map[string]string{ - "foo-0": "change-version: 1\n", - "foo-1": "change-version: 2\nchanged-pending: true\n", - "gibberish": "gibberish", - }) - setUpDir(c, relsdir, "456", map[string]string{ - "bar-0": "change-version: 3\n", - "bar-1": "change-version: 4\n", - }) - setUpDir(c, relsdir, "789", nil) - setUpDir(c, relsdir, "onethousand", map[string]string{ - "baz-0": "change-version: 3\n", - "baz-1": "change-version: 4\n", - }) - - dirs, err := relation.ReadAllStateDirs(relsdir) - c.Assert(err, jc.ErrorIsNil) - for id, dir := range dirs { - c.Logf("%d: %#v", id, dir) - } - assertState(c, dirs[123], relsdir, 123, msi{"foo/0": 1, "foo/1": 2}, "foo/1", false) - assertState(c, dirs[456], relsdir, 456, msi{"bar/0": 3, "bar/1": 4}, "", false) - assertState(c, dirs[789], relsdir, 789, msi{}, "", false) - c.Assert(dirs, gc.HasLen, 3) -} - -func setUpDir(c *gc.C, basedir, name string, contents map[string]string) string { - reldir := filepath.Join(basedir, name) - err := os.Mkdir(reldir, 0777) - c.Assert(err, jc.ErrorIsNil) - for name, content := range contents { - path := filepath.Join(reldir, name) - err := ioutil.WriteFile(path, []byte(content), 0777) - c.Assert(err, jc.ErrorIsNil) - } - return reldir -} - -func assertState(c *gc.C, dir *relation.StateDir, relsdir string, relationId int, members msi, pending string, deleted bool) { - expect := &relation.State{ - RelationId: relationId, - Members: map[string]int64(members), - ChangedPending: pending, - } - c.Assert(dir.State(), gc.DeepEquals, expect) - if deleted { - _, err := os.Stat(filepath.Join(relsdir, strconv.Itoa(relationId))) - c.Assert(err, jc.Satisfies, os.IsNotExist) - } else { - fresh, err := relation.ReadStateDir(relsdir, relationId) - c.Assert(err, jc.ErrorIsNil) - c.Assert(fresh.State(), gc.DeepEquals, expect) - } -} === added file 'src/github.com/juju/juju/worker/uniter/relation/relationer.go' --- src/github.com/juju/juju/worker/uniter/relation/relationer.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/relation/relationer.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,111 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package relation + +import ( + "fmt" + + "gopkg.in/juju/charm.v6-unstable/hooks" + + apiuniter "github.com/juju/juju/api/uniter" + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/runner/context" +) + +// Relationer manages a unit's presence in a relation. +type Relationer struct { + ru *apiuniter.RelationUnit + dir *StateDir + dying bool +} + +// NewRelationer creates a new Relationer. The unit will not join the +// relation until explicitly requested. +func NewRelationer(ru *apiuniter.RelationUnit, dir *StateDir) *Relationer { + return &Relationer{ + ru: ru, + dir: dir, + } +} + +// ContextInfo returns a represention of the Relationer's current state. +func (r *Relationer) ContextInfo() *context.RelationInfo { + members := r.dir.State().Members + memberNames := make([]string, 0, len(members)) + for memberName := range members { + memberNames = append(memberNames, memberName) + } + return &context.RelationInfo{r.ru, memberNames} +} + +// IsImplicit returns whether the local relation endpoint is implicit. Implicit +// relations do not run hooks. +func (r *Relationer) IsImplicit() bool { + return r.ru.Endpoint().IsImplicit() +} + +// Join initializes local state and causes the unit to enter its relation +// scope, allowing its counterpart units to detect its presence and settings +// changes. Local state directory is not created until needed. +func (r *Relationer) Join() error { + if r.dying { + panic("dying relationer must not join!") + } + // We need to make sure the state directory exists before we join the + // relation, lest a subsequent ReadAllStateDirs report local state that + // doesn't include relations recorded in remote state. + if err := r.dir.Ensure(); err != nil { + return err + } + // uniter.RelationUnit.EnterScope() sets the unit's private address + // internally automatically, so no need to set it here. + return r.ru.EnterScope() +} + +// SetDying informs the relationer that the unit is departing the relation, +// and that the only hooks it should send henceforth are -departed hooks, +// until the relation is empty, followed by a -broken hook. +func (r *Relationer) SetDying() error { + if r.IsImplicit() { + r.dying = true + return r.die() + } + r.dying = true + return nil +} + +// die is run when the relationer has no further responsibilities; it leaves +// relation scope, and removes the local relation state directory. +func (r *Relationer) die() error { + if err := r.ru.LeaveScope(); err != nil { + return err + } + return r.dir.Remove() +} + +// PrepareHook checks that the relation is in a state such that it makes +// sense to execute the supplied hook, and ensures that the relation context +// contains the latest relation state as communicated in the hook.Info. It +// returns the name of the hook that must be run. +func (r *Relationer) PrepareHook(hi hook.Info) (hookName string, err error) { + if r.IsImplicit() { + panic("implicit relations must not run hooks") + } + if err = r.dir.State().Validate(hi); err != nil { + return + } + name := r.ru.Endpoint().Name + return fmt.Sprintf("%s-%s", name, hi.Kind), nil +} + +// CommitHook persists the fact of the supplied hook's completion. +func (r *Relationer) CommitHook(hi hook.Info) error { + if r.IsImplicit() { + panic("implicit relations must not run hooks") + } + if hi.Kind == hooks.RelationBroken { + return r.die() + } + return r.dir.Write(hi) +} === added file 'src/github.com/juju/juju/worker/uniter/relation/relationer_test.go' --- src/github.com/juju/juju/worker/uniter/relation/relationer_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/relation/relationer_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,352 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package relation_test + +import ( + "strconv" + "strings" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + ft "github.com/juju/testing/filetesting" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/api" + apiuniter "github.com/juju/juju/api/uniter" + jujutesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/network" + "github.com/juju/juju/state" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/relation" +) + +type RelationerSuite struct { + jujutesting.JujuConnSuite + hooks chan hook.Info + svc *state.Service + rel *state.Relation + dir *relation.StateDir + dirPath string + + st api.Connection + uniter *apiuniter.State + apiRelUnit *apiuniter.RelationUnit +} + +var _ = gc.Suite(&RelationerSuite{}) + +func (s *RelationerSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + var err error + s.svc = s.AddTestingService(c, "u", s.AddTestingCharm(c, "riak")) + c.Assert(err, jc.ErrorIsNil) + rels, err := s.svc.Relations() + c.Assert(err, jc.ErrorIsNil) + c.Assert(rels, gc.HasLen, 1) + s.rel = rels[0] + _, unit := s.AddRelationUnit(c, "u/0") + s.dirPath = c.MkDir() + s.dir, err = relation.ReadStateDir(s.dirPath, s.rel.Id()) + c.Assert(err, jc.ErrorIsNil) + s.hooks = make(chan hook.Info) + + password, err := utils.RandomPassword() + c.Assert(err, jc.ErrorIsNil) + err = unit.SetPassword(password) + c.Assert(err, jc.ErrorIsNil) + s.st = s.OpenAPIAs(c, unit.Tag(), password) + s.uniter, err = s.st.Uniter() + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.uniter, gc.NotNil) + + apiUnit, err := s.uniter.Unit(unit.Tag().(names.UnitTag)) + c.Assert(err, jc.ErrorIsNil) + apiRel, err := s.uniter.Relation(s.rel.Tag().(names.RelationTag)) + c.Assert(err, jc.ErrorIsNil) + s.apiRelUnit, err = apiRel.Unit(apiUnit) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *RelationerSuite) AddRelationUnit(c *gc.C, name string) (*state.RelationUnit, *state.Unit) { + u, err := s.svc.AddUnit() + c.Assert(err, jc.ErrorIsNil) + c.Assert(u.Name(), gc.Equals, name) + machine, err := s.State.AddMachine("quantal", state.JobHostUnits) + c.Assert(err, jc.ErrorIsNil) + err = u.AssignToMachine(machine) + c.Assert(err, jc.ErrorIsNil) + privateAddr := network.NewScopedAddress( + strings.Replace(name, "/", "-", 1)+".testing.invalid", network.ScopeCloudLocal, + ) + err = machine.SetProviderAddresses(privateAddr) + c.Assert(err, jc.ErrorIsNil) + ru, err := s.rel.Unit(u) + c.Assert(err, jc.ErrorIsNil) + return ru, u +} + +func (s *RelationerSuite) TestStateDir(c *gc.C) { + // Create the relationer; check its state dir is not created. + r := relation.NewRelationer(s.apiRelUnit, s.dir) + path := strconv.Itoa(s.rel.Id()) + ft.Removed{path}.Check(c, s.dirPath) + + // Join the relation; check the dir was created. + err := r.Join() + c.Assert(err, jc.ErrorIsNil) + ft.Dir{path, 0755}.Check(c, s.dirPath) + + // Prepare to depart the relation; check the dir is still there. + hi := hook.Info{Kind: hooks.RelationBroken} + _, err = r.PrepareHook(hi) + c.Assert(err, jc.ErrorIsNil) + ft.Dir{path, 0755}.Check(c, s.dirPath) + + // Actually depart it; check the dir is removed. + err = r.CommitHook(hi) + c.Assert(err, jc.ErrorIsNil) + ft.Removed{path}.Check(c, s.dirPath) +} + +func (s *RelationerSuite) TestEnterLeaveScope(c *gc.C) { + ru1, _ := s.AddRelationUnit(c, "u/1") + r := relation.NewRelationer(s.apiRelUnit, s.dir) + + // u/1 does not consider u/0 to be alive. + w := ru1.Watch() + defer stop(c, w) + s.State.StartSync() + ch, ok := <-w.Changes() + c.Assert(ok, jc.IsTrue) + c.Assert(ch.Changed, gc.HasLen, 0) + c.Assert(ch.Departed, gc.HasLen, 0) + + // u/0 enters scope; u/1 observes it. + err := r.Join() + c.Assert(err, jc.ErrorIsNil) + s.State.StartSync() + select { + case ch, ok := <-w.Changes(): + c.Assert(ok, jc.IsTrue) + c.Assert(ch.Changed, gc.HasLen, 1) + _, found := ch.Changed["u/0"] + c.Assert(found, jc.IsTrue) + c.Assert(ch.Departed, gc.HasLen, 0) + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out waiting for presence detection") + } + + // re-Join is no-op. + err = r.Join() + c.Assert(err, jc.ErrorIsNil) + // TODO(jam): This would be a great to replace with statetesting.NotifyWatcherC + s.State.StartSync() + select { + case ch, ok := <-w.Changes(): + c.Fatalf("got unexpected change: %#v, %#v", ch, ok) + case <-time.After(coretesting.ShortWait): + } + + // u/0 leaves scope; u/1 observes it. + hi := hook.Info{Kind: hooks.RelationBroken} + _, err = r.PrepareHook(hi) + c.Assert(err, jc.ErrorIsNil) + + err = r.CommitHook(hi) + c.Assert(err, jc.ErrorIsNil) + s.State.StartSync() + select { + case ch, ok := <-w.Changes(): + c.Assert(ok, jc.IsTrue) + c.Assert(ch.Changed, gc.HasLen, 0) + c.Assert(ch.Departed, gc.DeepEquals, []string{"u/0"}) + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out waiting for absence detection") + } +} + +func (s *RelationerSuite) TestPrepareCommitHooks(c *gc.C) { + r := relation.NewRelationer(s.apiRelUnit, s.dir) + err := r.Join() + c.Assert(err, jc.ErrorIsNil) + + assertMembers := func(expect map[string]int64) { + c.Assert(s.dir.State().Members, jc.DeepEquals, expect) + expectNames := make([]string, 0, len(expect)) + for name := range expect { + expectNames = append(expectNames, name) + } + c.Assert(r.ContextInfo().MemberNames, jc.SameContents, expectNames) + } + assertMembers(map[string]int64{}) + + // Check preparing an invalid hook changes nothing. + changed := hook.Info{ + Kind: hooks.RelationChanged, + RemoteUnit: "u/1", + ChangeVersion: 7, + } + _, err = r.PrepareHook(changed) + c.Assert(err, gc.ErrorMatches, `inappropriate "relation-changed" for "u/1": unit has not joined`) + assertMembers(map[string]int64{}) + + // Check preparing a valid hook updates neither the context nor persistent + // relation state. + joined := hook.Info{ + Kind: hooks.RelationJoined, + RemoteUnit: "u/1", + } + name, err := r.PrepareHook(joined) + c.Assert(err, jc.ErrorIsNil) + c.Assert(name, gc.Equals, "ring-relation-joined") + assertMembers(map[string]int64{}) + + // Check that preparing the following hook fails as before... + _, err = r.PrepareHook(changed) + c.Assert(err, gc.ErrorMatches, `inappropriate "relation-changed" for "u/1": unit has not joined`) + assertMembers(map[string]int64{}) + + // ...but that committing the previous hook updates the persistent + // relation state... + err = r.CommitHook(joined) + c.Assert(err, jc.ErrorIsNil) + assertMembers(map[string]int64{"u/1": 0}) + + // ...and allows us to prepare the next hook... + name, err = r.PrepareHook(changed) + c.Assert(err, jc.ErrorIsNil) + c.Assert(name, gc.Equals, "ring-relation-changed") + assertMembers(map[string]int64{"u/1": 0}) + + // ...and commit it. + err = r.CommitHook(changed) + c.Assert(err, jc.ErrorIsNil) + assertMembers(map[string]int64{"u/1": 7}) + + // To verify implied behaviour above, prepare a new joined hook with + // missing membership information, and check relation context + // membership is stil not updated... + joined.RemoteUnit = "u/2" + joined.ChangeVersion = 3 + name, err = r.PrepareHook(joined) + c.Assert(err, jc.ErrorIsNil) + c.Assert(name, gc.Equals, "ring-relation-joined") + assertMembers(map[string]int64{"u/1": 7}) + + // ...until commit, at which point so is relation state. + err = r.CommitHook(joined) + c.Assert(err, jc.ErrorIsNil) + assertMembers(map[string]int64{"u/1": 7, "u/2": 3}) +} + +func (s *RelationerSuite) TestSetDying(c *gc.C) { + ru1, u := s.AddRelationUnit(c, "u/1") + settings := map[string]interface{}{"unit": "settings"} + err := ru1.EnterScope(settings) + c.Assert(err, jc.ErrorIsNil) + r := relation.NewRelationer(s.apiRelUnit, s.dir) + err = r.Join() + c.Assert(err, jc.ErrorIsNil) + + // Change Life to Dying check the results. + err = r.SetDying() + c.Assert(err, jc.ErrorIsNil) + + // Check that we cannot rejoin the relation. + f := func() { r.Join() } + c.Assert(f, gc.PanicMatches, "dying relationer must not join!") + + // Simulate a RelationBroken hook. + err = r.CommitHook(hook.Info{Kind: hooks.RelationBroken}) + c.Assert(err, jc.ErrorIsNil) + + // Check that the relation state has been broken. + err = s.dir.State().Validate(hook.Info{Kind: hooks.RelationBroken}) + c.Assert(err, gc.ErrorMatches, ".*: relation is broken and cannot be changed further") + + // Check that it left scope, by leaving scope on the other side and destroying + // the relation. + err = ru1.LeaveScope() + c.Assert(err, jc.ErrorIsNil) + err = u.Destroy() + c.Assert(err, jc.ErrorIsNil) + err = u.Refresh() + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} + +type stopper interface { + Stop() error +} + +func stop(c *gc.C, s stopper) { + c.Assert(s.Stop(), gc.IsNil) +} + +type RelationerImplicitSuite struct { + jujutesting.JujuConnSuite +} + +var _ = gc.Suite(&RelationerImplicitSuite{}) + +func (s *RelationerImplicitSuite) TestImplicitRelationer(c *gc.C) { + // Create a relationer for an implicit endpoint (mysql:juju-info). + mysql := s.AddTestingService(c, "mysql", s.AddTestingCharm(c, "mysql")) + u, err := mysql.AddUnit() + c.Assert(err, jc.ErrorIsNil) + machine, err := s.State.AddMachine("quantal", state.JobHostUnits) + c.Assert(err, jc.ErrorIsNil) + err = u.AssignToMachine(machine) + c.Assert(err, jc.ErrorIsNil) + err = machine.SetProviderAddresses(network.NewScopedAddress("blah", network.ScopeCloudLocal)) + c.Assert(err, jc.ErrorIsNil) + s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging")) + eps, err := s.State.InferEndpoints("logging", "mysql") + c.Assert(err, jc.ErrorIsNil) + rel, err := s.State.AddRelation(eps...) + c.Assert(err, jc.ErrorIsNil) + relsDir := c.MkDir() + dir, err := relation.ReadStateDir(relsDir, rel.Id()) + c.Assert(err, jc.ErrorIsNil) + + password, err := utils.RandomPassword() + c.Assert(err, jc.ErrorIsNil) + err = u.SetPassword(password) + c.Assert(err, jc.ErrorIsNil) + st := s.OpenAPIAs(c, u.Tag(), password) + uniterState, err := st.Uniter() + c.Assert(err, jc.ErrorIsNil) + c.Assert(uniterState, gc.NotNil) + + apiUnit, err := uniterState.Unit(u.Tag().(names.UnitTag)) + c.Assert(err, jc.ErrorIsNil) + apiRel, err := uniterState.Relation(rel.Tag().(names.RelationTag)) + c.Assert(err, jc.ErrorIsNil) + apiRelUnit, err := apiRel.Unit(apiUnit) + c.Assert(err, jc.ErrorIsNil) + + r := relation.NewRelationer(apiRelUnit, dir) + c.Assert(r, jc.Satisfies, (*relation.Relationer).IsImplicit) + + // Hooks are not allowed. + f := func() { r.PrepareHook(hook.Info{}) } + c.Assert(f, gc.PanicMatches, "implicit relations must not run hooks") + f = func() { r.CommitHook(hook.Info{}) } + c.Assert(f, gc.PanicMatches, "implicit relations must not run hooks") + + // Set it to Dying; check that the dir is removed immediately. + err = r.SetDying() + c.Assert(err, jc.ErrorIsNil) + path := strconv.Itoa(rel.Id()) + ft.Removed{path}.Check(c, relsDir) + + err = rel.Destroy() + c.Assert(err, jc.ErrorIsNil) + err = rel.Refresh() + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} === added file 'src/github.com/juju/juju/worker/uniter/relation/relations.go' --- src/github.com/juju/juju/worker/uniter/relation/relations.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/relation/relations.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,489 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package relation + +import ( + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + "github.com/juju/utils/set" + corecharm "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/api/uniter" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/remotestate" + "github.com/juju/juju/worker/uniter/resolver" + "github.com/juju/juju/worker/uniter/runner/context" +) + +var logger = loggo.GetLogger("juju.worker.uniter.relation") + +// Relations exists to encapsulate relation state and operations behind an +// interface for the benefit of future refactoring. +type Relations interface { + // Name returns the name of the relation with the supplied id, or an error + // if the relation is unknown. + Name(id int) (string, error) + + // PrepareHook returns the name of the supplied relation hook, or an error + // if the hook is unknown or invalid given current state. + PrepareHook(hookInfo hook.Info) (string, error) + + // CommitHook persists the state change encoded in the supplied relation + // hook, or returns an error if the hook is unknown or invalid given + // current relation state. + CommitHook(hookInfo hook.Info) error + + // GetInfo returns information about current relation state. + GetInfo() map[int]*context.RelationInfo + + // NextHook returns details on the next hook to execute, based on the local + // and remote states. + NextHook(resolver.LocalState, remotestate.Snapshot) (hook.Info, error) +} + +// NewRelationsResolver returns a new Resolver that handles differences in +// relation state. +func NewRelationsResolver(r Relations) resolver.Resolver { + return &relationsResolver{r} +} + +type relationsResolver struct { + relations Relations +} + +// NextOp implements resolver.Resolver. +func (s *relationsResolver) NextOp( + localState resolver.LocalState, + remoteState remotestate.Snapshot, + opFactory operation.Factory, +) (operation.Operation, error) { + hook, err := s.relations.NextHook(localState, remoteState) + if err != nil { + return nil, errors.Trace(err) + } + return opFactory.NewRunHook(hook) +} + +// relations implements Relations. +type relations struct { + st *uniter.State + unit *uniter.Unit + charmDir string + relationsDir string + relationers map[int]*Relationer + abort <-chan struct{} +} + +// NewRelations returns a new Relations instance. +func NewRelations(st *uniter.State, tag names.UnitTag, charmDir, relationsDir string, abort <-chan struct{}) (Relations, error) { + unit, err := st.Unit(tag) + if err != nil { + return nil, errors.Trace(err) + } + r := &relations{ + st: st, + unit: unit, + charmDir: charmDir, + relationsDir: relationsDir, + relationers: make(map[int]*Relationer), + abort: abort, + } + if err := r.init(); err != nil { + return nil, errors.Trace(err) + } + return r, nil +} + +// init reconciles the local relation state dirs with the remote state of +// the corresponding relations. It's only expected to be called while a +// *relations is being created. +func (r *relations) init() error { + joinedRelationTags, err := r.unit.JoinedRelations() + if err != nil { + return errors.Trace(err) + } + joinedRelations := make(map[int]*uniter.Relation) + for _, tag := range joinedRelationTags { + relation, err := r.st.Relation(tag) + if err != nil { + return errors.Trace(err) + } + joinedRelations[relation.Id()] = relation + } + knownDirs, err := ReadAllStateDirs(r.relationsDir) + if err != nil { + return errors.Trace(err) + } + for id, dir := range knownDirs { + if rel, ok := joinedRelations[id]; ok { + if err := r.add(rel, dir); err != nil { + return errors.Trace(err) + } + } else if err := dir.Remove(); err != nil { + return errors.Trace(err) + } + } + for id, rel := range joinedRelations { + if _, ok := knownDirs[id]; ok { + continue + } + dir, err := ReadStateDir(r.relationsDir, id) + if err != nil { + return errors.Trace(err) + } + if err := r.add(rel, dir); err != nil { + return errors.Trace(err) + } + } + return nil +} + +// NextHook implements Relations. +func (r *relations) NextHook( + localState resolver.LocalState, + remoteState remotestate.Snapshot, +) (hook.Info, error) { + + if remoteState.Life == params.Dying { + // The unit is Dying, so make sure all subordinates are dying. + var destroyAllSubordinates bool + for relationId, relationSnapshot := range remoteState.Relations { + if relationSnapshot.Life != params.Alive { + continue + } + relationer, ok := r.relationers[relationId] + if !ok { + continue + } + if relationer.ru.Endpoint().Scope == corecharm.ScopeContainer { + relationSnapshot.Life = params.Dying + remoteState.Relations[relationId] = relationSnapshot + destroyAllSubordinates = true + } + } + if destroyAllSubordinates { + if err := r.unit.DestroyAllSubordinates(); err != nil { + return hook.Info{}, errors.Trace(err) + } + } + } + + // Add/remove local relation state; enter and leave scope as necessary. + if err := r.update(remoteState.Relations); err != nil { + return hook.Info{}, errors.Trace(err) + } + + if localState.Kind != operation.Continue { + return hook.Info{}, resolver.ErrNoOperation + } + + // See if any of the relations have operations to perform. + for relationId, relationSnapshot := range remoteState.Relations { + relationer, ok := r.relationers[relationId] + if !ok || relationer.IsImplicit() { + continue + } + var remoteBroken bool + if remoteState.Life == params.Dying || relationSnapshot.Life == params.Dying { + relationSnapshot = remotestate.RelationSnapshot{} + remoteBroken = true + // TODO(axw) if relation is implicit, leave scope & remove. + } + // If either the unit or the relation are Dying, + // then the relation should be broken. + hook, err := nextRelationHook(relationer.dir.State(), relationSnapshot, remoteBroken) + if err == resolver.ErrNoOperation { + continue + } + return hook, err + } + return hook.Info{}, resolver.ErrNoOperation +} + +// nextRelationHook returns the next hook op that should be executed in the +// relation characterised by the supplied local and remote state; or an error +// if the states do not refer to the same relation; or ErrRelationUpToDate if +// no hooks need to be executed. +func nextRelationHook( + local *State, + remote remotestate.RelationSnapshot, + remoteBroken bool, +) (hook.Info, error) { + + // If there's a guaranteed next hook, return that. + relationId := local.RelationId + if local.ChangedPending != "" { + unitName := local.ChangedPending + return hook.Info{ + Kind: hooks.RelationChanged, + RelationId: relationId, + RemoteUnit: unitName, + ChangeVersion: remote.Members[unitName], + }, nil + } + + // Get the union of all relevant units, and sort them, so we produce events + // in a consistent order (largely for the convenience of the tests). + allUnitNames := set.NewStrings() + for unitName := range local.Members { + allUnitNames.Add(unitName) + } + for unitName := range remote.Members { + allUnitNames.Add(unitName) + } + sortedUnitNames := allUnitNames.SortedValues() + + // If there are any locally known units that are no longer reflected in + // remote state, depart them. + for _, unitName := range sortedUnitNames { + changeVersion, found := local.Members[unitName] + if !found { + continue + } + if _, found := remote.Members[unitName]; !found { + return hook.Info{ + Kind: hooks.RelationDeparted, + RelationId: relationId, + RemoteUnit: unitName, + ChangeVersion: changeVersion, + }, nil + } + } + + // If the relation's meant to be broken, break it. + if remoteBroken { + return hook.Info{ + Kind: hooks.RelationBroken, + RelationId: relationId, + }, nil + } + + // If there are any remote units not locally known, join them. + for _, unitName := range sortedUnitNames { + changeVersion, found := remote.Members[unitName] + if !found { + continue + } + if _, found := local.Members[unitName]; !found { + return hook.Info{ + Kind: hooks.RelationJoined, + RelationId: relationId, + RemoteUnit: unitName, + ChangeVersion: changeVersion, + }, nil + } + } + + // Finally scan for remote units whose latest version is not reflected + // in local state. + for _, unitName := range sortedUnitNames { + remoteChangeVersion, found := remote.Members[unitName] + if !found { + continue + } + localChangeVersion, found := local.Members[unitName] + if !found { + continue + } + // NOTE(axw) we use != and not > to cater due to the + // use of the relation settings document's txn-revno + // as the version. When model-uuid migration occurs, the + // document is recreated, resetting txn-revno. + if remoteChangeVersion != localChangeVersion { + return hook.Info{ + Kind: hooks.RelationChanged, + RelationId: relationId, + RemoteUnit: unitName, + ChangeVersion: remoteChangeVersion, + }, nil + } + } + + // Nothing left to do for this relation. + return hook.Info{}, resolver.ErrNoOperation +} + +// Name is part of the Relations interface. +func (r *relations) Name(id int) (string, error) { + relationer, found := r.relationers[id] + if !found { + return "", errors.Errorf("unknown relation: %d", id) + } + return relationer.ru.Endpoint().Name, nil +} + +// PrepareHook is part of the Relations interface. +func (r *relations) PrepareHook(hookInfo hook.Info) (string, error) { + if !hookInfo.Kind.IsRelation() { + return "", errors.Errorf("not a relation hook: %#v", hookInfo) + } + relationer, found := r.relationers[hookInfo.RelationId] + if !found { + return "", errors.Errorf("unknown relation: %d", hookInfo.RelationId) + } + return relationer.PrepareHook(hookInfo) +} + +// CommitHook is part of the Relations interface. +func (r *relations) CommitHook(hookInfo hook.Info) error { + if !hookInfo.Kind.IsRelation() { + return errors.Errorf("not a relation hook: %#v", hookInfo) + } + relationer, found := r.relationers[hookInfo.RelationId] + if !found { + return errors.Errorf("unknown relation: %d", hookInfo.RelationId) + } + if hookInfo.Kind == hooks.RelationBroken { + delete(r.relationers, hookInfo.RelationId) + } + return relationer.CommitHook(hookInfo) +} + +// GetInfo is part of the Relations interface. +func (r *relations) GetInfo() map[int]*context.RelationInfo { + relationInfos := map[int]*context.RelationInfo{} + for id, relationer := range r.relationers { + relationInfos[id] = relationer.ContextInfo() + } + return relationInfos +} + +func (r *relations) update(remote map[int]remotestate.RelationSnapshot) error { + for id, relationSnapshot := range remote { + if _, found := r.relationers[id]; found { + // We've seen this relation before. The only changes + // we care about are to the lifecycle state, and to + // the member settings versions. We handle differences + // in settings in nextRelationHook. + if relationSnapshot.Life == params.Dying { + if err := r.setDying(id); err != nil { + return errors.Trace(err) + } + } + continue + } + // Relations that are not alive are simply skipped, because they + // were not previously known anyway. + if relationSnapshot.Life != params.Alive { + continue + } + rel, err := r.st.RelationById(id) + if err != nil { + if params.IsCodeNotFoundOrCodeUnauthorized(err) { + continue + } + return errors.Trace(err) + } + // Make sure we ignore relations not implemented by the unit's charm. + ch, err := corecharm.ReadCharmDir(r.charmDir) + if err != nil { + return errors.Trace(err) + } + if ep, err := rel.Endpoint(); err != nil { + return errors.Trace(err) + } else if !ep.ImplementedBy(ch) { + logger.Warningf("skipping relation with unknown endpoint %q", ep.Name) + continue + } + dir, err := ReadStateDir(r.relationsDir, id) + if err != nil { + return errors.Trace(err) + } + addErr := r.add(rel, dir) + if addErr == nil { + continue + } + removeErr := dir.Remove() + if !params.IsCodeCannotEnterScope(addErr) { + return errors.Trace(addErr) + } + if removeErr != nil { + return errors.Trace(removeErr) + } + } + if ok, err := r.unit.IsPrincipal(); err != nil { + return errors.Trace(err) + } else if ok { + return nil + } + // If no Alive relations remain between a subordinate unit's service + // and its principal's service, the subordinate must become Dying. + for _, relationer := range r.relationers { + scope := relationer.ru.Endpoint().Scope + if scope == corecharm.ScopeContainer && !relationer.dying { + return nil + } + } + return r.unit.Destroy() +} + +// add causes the unit agent to join the supplied relation, and to +// store persistent state in the supplied dir. It will block until the +// operation succeeds or fails; or until the abort chan is closed, in +// which case it will return resolver.ErrLoopAborted. +func (r *relations) add(rel *uniter.Relation, dir *StateDir) (err error) { + logger.Infof("joining relation %q", rel) + ru, err := rel.Unit(r.unit) + if err != nil { + return errors.Trace(err) + } + relationer := NewRelationer(ru, dir) + unitWatcher, err := r.unit.Watch() + if err != nil { + return errors.Trace(err) + } + defer func() { + if e := worker.Stop(unitWatcher); e != nil { + if err == nil { + err = e + } else { + logger.Errorf("while stopping unit watcher: %v", e) + } + } + }() + for { + select { + case <-r.abort: + // Should this be a different error? e.g. resolver.ErrAborted, that + // Loop translates into ErrLoopAborted? + return resolver.ErrLoopAborted + case _, ok := <-unitWatcher.Changes(): + if !ok { + return errors.New("unit watcher closed") + } + err := relationer.Join() + if params.IsCodeCannotEnterScopeYet(err) { + logger.Infof("cannot enter scope for relation %q; waiting for subordinate to be removed", rel) + continue + } else if err != nil { + return errors.Trace(err) + } + logger.Infof("joined relation %q", rel) + r.relationers[rel.Id()] = relationer + return nil + } + } +} + +// setDying notifies the relationer identified by the supplied id that the +// only hook executions to be requested should be those necessary to cleanly +// exit the relation. +func (r *relations) setDying(id int) error { + relationer, found := r.relationers[id] + if !found { + return nil + } + if err := relationer.SetDying(); err != nil { + return errors.Trace(err) + } + if relationer.IsImplicit() { + delete(r.relationers, id) + } + return nil +} === added file 'src/github.com/juju/juju/worker/uniter/relation/relations_test.go' --- src/github.com/juju/juju/worker/uniter/relation/relations_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/relation/relations_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,524 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package relation_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync/atomic" + + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/hooks" + + apitesting "github.com/juju/juju/api/base/testing" + "github.com/juju/juju/api/uniter" + "github.com/juju/juju/apiserver/common" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/state/multiwatcher" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/relation" + "github.com/juju/juju/worker/uniter/remotestate" + "github.com/juju/juju/worker/uniter/resolver" +) + +/* +TODO(wallyworld) +DO NOT COPY THE METHODOLOGY USED IN THESE TESTS. +We want to write unit tests without resorting to JujuConnSuite. +However, the current api/uniter code uses structs instead of +interfaces for its component model, and it's not possible to +implement a stub uniter api at the model level due to the way +the domain objects reference each other. + +The best we can do for now is to stub out the facade caller and +return curated values for each API call. +*/ + +type relationsSuite struct { + coretesting.BaseSuite + + stateDir string + relationsDir string +} + +var _ = gc.Suite(&relationsSuite{}) + +type apiCall struct { + request string + args interface{} + result interface{} + err error +} + +func uniterApiCall(request string, args, result interface{}, err error) apiCall { + return apiCall{ + request: request, + args: args, + result: result, + err: err, + } +} + +func mockAPICaller(c *gc.C, callNumber *int32, apiCalls ...apiCall) apitesting.APICallerFunc { + apiCaller := apitesting.APICallerFunc(func(objType string, version int, id, request string, arg, result interface{}) error { + switch objType { + case "NotifyWatcher": + return nil + case "Uniter": + index := int(atomic.AddInt32(callNumber, 1)) - 1 + c.Check(index < len(apiCalls), jc.IsTrue) + call := apiCalls[index] + c.Logf("request %d, %s", index, request) + c.Check(version, gc.Equals, 3) + c.Check(id, gc.Equals, "") + c.Check(request, gc.Equals, call.request) + c.Check(arg, jc.DeepEquals, call.args) + if call.err != nil { + return common.ServerError(call.err) + } + testing.PatchValue(result, call.result) + default: + c.Fail() + } + return nil + }) + return apiCaller +} + +var minimalMetadata = ` +name: wordpress +summary: "test" +description: "test" +requires: + mysql: db +`[1:] + +func (s *relationsSuite) SetUpTest(c *gc.C) { + s.stateDir = filepath.Join(c.MkDir(), "charm") + err := os.MkdirAll(s.stateDir, 0755) + c.Assert(err, jc.ErrorIsNil) + err = ioutil.WriteFile(filepath.Join(s.stateDir, "metadata.yaml"), []byte(minimalMetadata), 0755) + c.Assert(err, jc.ErrorIsNil) + s.relationsDir = filepath.Join(c.MkDir(), "relations") +} + +func assertNumCalls(c *gc.C, numCalls *int32, expected int32) { + v := atomic.LoadInt32(numCalls) + c.Assert(v, gc.Equals, expected) +} + +func (s *relationsSuite) setupRelations(c *gc.C) relation.Relations { + unitTag := names.NewUnitTag("wordpress/0") + abort := make(chan struct{}) + + var numCalls int32 + unitEntity := params.Entities{Entities: []params.Entity{params.Entity{Tag: "unit-wordpress-0"}}} + apiCaller := mockAPICaller(c, &numCalls, + uniterApiCall("Life", unitEntity, params.LifeResults{Results: []params.LifeResult{{Life: params.Alive}}}, nil), + uniterApiCall("JoinedRelations", unitEntity, params.StringsResults{Results: []params.StringsResult{{Result: []string{}}}}, nil), + ) + st := uniter.NewState(apiCaller, unitTag) + r, err := relation.NewRelations(st, unitTag, s.stateDir, s.relationsDir, abort) + c.Assert(err, jc.ErrorIsNil) + assertNumCalls(c, &numCalls, 2) + return r +} + +func (s *relationsSuite) TestNewRelationsNoRelations(c *gc.C) { + r := s.setupRelations(c) + //No relations created. + c.Assert(r.GetInfo(), gc.HasLen, 0) +} + +func (s *relationsSuite) TestNewRelationsWithExistingRelations(c *gc.C) { + unitTag := names.NewUnitTag("wordpress/0") + abort := make(chan struct{}) + + var numCalls int32 + unitEntity := params.Entities{Entities: []params.Entity{params.Entity{Tag: "unit-wordpress-0"}}} + relationUnits := params.RelationUnits{RelationUnits: []params.RelationUnit{ + {Relation: "relation-wordpress.db#mysql.db", Unit: "unit-wordpress-0"}, + }} + relationResults := params.RelationResults{ + Results: []params.RelationResult{ + { + Id: 1, + Key: "wordpress:db mysql:db", + Life: params.Alive, + Endpoint: multiwatcher.Endpoint{ + ServiceName: "wordpress", + Relation: charm.Relation{Name: "mysql", Role: charm.RoleProvider, Interface: "db"}, + }}, + }, + } + + apiCaller := mockAPICaller(c, &numCalls, + uniterApiCall("Life", unitEntity, params.LifeResults{Results: []params.LifeResult{{Life: params.Alive}}}, nil), + uniterApiCall("JoinedRelations", unitEntity, params.StringsResults{Results: []params.StringsResult{{Result: []string{"relation-wordpress:db mysql:db"}}}}, nil), + uniterApiCall("Relation", relationUnits, relationResults, nil), + uniterApiCall("Relation", relationUnits, relationResults, nil), + uniterApiCall("Watch", unitEntity, params.NotifyWatchResults{Results: []params.NotifyWatchResult{{NotifyWatcherId: "1"}}}, nil), + uniterApiCall("EnterScope", relationUnits, params.ErrorResults{Results: []params.ErrorResult{{}}}, nil), + ) + st := uniter.NewState(apiCaller, unitTag) + r, err := relation.NewRelations(st, unitTag, s.stateDir, s.relationsDir, abort) + c.Assert(err, jc.ErrorIsNil) + assertNumCalls(c, &numCalls, 6) + + info := r.GetInfo() + c.Assert(info, gc.HasLen, 1) + oneInfo := info[1] + c.Assert(oneInfo.RelationUnit.Relation().Tag(), gc.Equals, names.NewRelationTag("wordpress:db mysql:db")) + c.Assert(oneInfo.RelationUnit.Endpoint(), jc.DeepEquals, uniter.Endpoint{ + Relation: charm.Relation{Name: "mysql", Role: "provider", Interface: "db", Optional: false, Limit: 0, Scope: ""}, + }) + c.Assert(oneInfo.MemberNames, gc.HasLen, 0) +} + +func (s *relationsSuite) TestNextOpNothing(c *gc.C) { + unitTag := names.NewUnitTag("wordpress/0") + abort := make(chan struct{}) + + var numCalls int32 + unitEntity := params.Entities{Entities: []params.Entity{params.Entity{Tag: "unit-wordpress-0"}}} + apiCaller := mockAPICaller(c, &numCalls, + uniterApiCall("Life", unitEntity, params.LifeResults{Results: []params.LifeResult{{Life: params.Alive}}}, nil), + uniterApiCall("JoinedRelations", unitEntity, params.StringsResults{Results: []params.StringsResult{{Result: []string{}}}}, nil), + uniterApiCall("GetPrincipal", unitEntity, params.StringBoolResults{Results: []params.StringBoolResult{{Result: "", Ok: false}}}, nil), + ) + st := uniter.NewState(apiCaller, unitTag) + r, err := relation.NewRelations(st, unitTag, s.stateDir, s.relationsDir, abort) + c.Assert(err, jc.ErrorIsNil) + assertNumCalls(c, &numCalls, 2) + + localState := resolver.LocalState{ + State: operation.State{ + Kind: operation.Continue, + }, + } + remoteState := remotestate.Snapshot{} + relationsResolver := relation.NewRelationsResolver(r) + _, err = relationsResolver.NextOp(localState, remoteState, &mockOperations{}) + c.Assert(errors.Cause(err), gc.Equals, resolver.ErrNoOperation) +} + +func relationJoinedApiCalls() []apiCall { + unitEntity := params.Entities{Entities: []params.Entity{params.Entity{Tag: "unit-wordpress-0"}}} + relationResults := params.RelationResults{ + Results: []params.RelationResult{ + { + Id: 1, + Key: "wordpress:db mysql:db", + Life: params.Alive, + Endpoint: multiwatcher.Endpoint{ + ServiceName: "wordpress", + Relation: charm.Relation{Name: "mysql", Role: charm.RoleRequirer, Interface: "db", Scope: "global"}, + }}, + }, + } + relationUnits := params.RelationUnits{RelationUnits: []params.RelationUnit{ + {Relation: "relation-wordpress.db#mysql.db", Unit: "unit-wordpress-0"}, + }} + apiCalls := []apiCall{ + uniterApiCall("Life", unitEntity, params.LifeResults{Results: []params.LifeResult{{Life: params.Alive}}}, nil), + uniterApiCall("JoinedRelations", unitEntity, params.StringsResults{Results: []params.StringsResult{{Result: []string{}}}}, nil), + uniterApiCall("RelationById", params.RelationIds{RelationIds: []int{1}}, relationResults, nil), + uniterApiCall("Relation", relationUnits, relationResults, nil), + uniterApiCall("Relation", relationUnits, relationResults, nil), + uniterApiCall("Watch", unitEntity, params.NotifyWatchResults{Results: []params.NotifyWatchResult{{NotifyWatcherId: "1"}}}, nil), + uniterApiCall("EnterScope", relationUnits, params.ErrorResults{Results: []params.ErrorResult{{}}}, nil), + uniterApiCall("GetPrincipal", unitEntity, params.StringBoolResults{Results: []params.StringBoolResult{{Result: "", Ok: false}}}, nil), + } + return apiCalls +} + +func (s *relationsSuite) assertHookRelationJoined(c *gc.C, numCalls *int32, apiCalls ...apiCall) relation.Relations { + unitTag := names.NewUnitTag("wordpress/0") + abort := make(chan struct{}) + + apiCaller := mockAPICaller(c, numCalls, apiCalls...) + st := uniter.NewState(apiCaller, unitTag) + r, err := relation.NewRelations(st, unitTag, s.stateDir, s.relationsDir, abort) + c.Assert(err, jc.ErrorIsNil) + assertNumCalls(c, numCalls, 2) + + localState := resolver.LocalState{ + State: operation.State{ + Kind: operation.Continue, + }, + } + remoteState := remotestate.Snapshot{ + Relations: map[int]remotestate.RelationSnapshot{ + 1: remotestate.RelationSnapshot{ + Life: params.Alive, + Members: map[string]int64{ + "wordpress": 1, + }, + }, + }, + } + relationsResolver := relation.NewRelationsResolver(r) + op, err := relationsResolver.NextOp(localState, remoteState, &mockOperations{}) + c.Assert(err, jc.ErrorIsNil) + assertNumCalls(c, numCalls, 8) + c.Assert(op.String(), gc.Equals, "run hook relation-joined on unit with relation 1") + + // Commit the operation so we save local state for any next operation. + _, err = r.PrepareHook(op.(*mockOperation).hookInfo) + c.Assert(err, jc.ErrorIsNil) + err = r.CommitHook(op.(*mockOperation).hookInfo) + c.Assert(err, jc.ErrorIsNil) + return r +} + +func (s *relationsSuite) TestHookRelationJoined(c *gc.C) { + var numCalls int32 + s.assertHookRelationJoined(c, &numCalls, relationJoinedApiCalls()...) +} + +func (s *relationsSuite) assertHookRelationChanged( + c *gc.C, r relation.Relations, + remoteRelationSnapshot remotestate.RelationSnapshot, + numCalls *int32, +) { + numCallsBefore := *numCalls + localState := resolver.LocalState{ + State: operation.State{ + Kind: operation.Continue, + }, + } + remoteState := remotestate.Snapshot{ + Relations: map[int]remotestate.RelationSnapshot{ + 1: remoteRelationSnapshot, + }, + } + relationsResolver := relation.NewRelationsResolver(r) + op, err := relationsResolver.NextOp(localState, remoteState, &mockOperations{}) + c.Assert(err, jc.ErrorIsNil) + assertNumCalls(c, numCalls, numCallsBefore+1) + c.Assert(op.String(), gc.Equals, "run hook relation-changed on unit with relation 1") + + // Commit the operation so we save local state for any next operation. + _, err = r.PrepareHook(op.(*mockOperation).hookInfo) + c.Assert(err, jc.ErrorIsNil) + err = r.CommitHook(op.(*mockOperation).hookInfo) + c.Assert(err, jc.ErrorIsNil) +} + +func getPrincipalApiCalls(numCalls int32) []apiCall { + unitEntity := params.Entities{Entities: []params.Entity{params.Entity{Tag: "unit-wordpress-0"}}} + result := make([]apiCall, numCalls) + for i := int32(0); i < numCalls; i++ { + result[i] = uniterApiCall("GetPrincipal", unitEntity, params.StringBoolResults{Results: []params.StringBoolResult{{Result: "", Ok: false}}}, nil) + } + return result +} + +func (s *relationsSuite) TestHookRelationChanged(c *gc.C) { + var numCalls int32 + apiCalls := relationJoinedApiCalls() + apiCalls = append(apiCalls, getPrincipalApiCalls(3)...) + r := s.assertHookRelationJoined(c, &numCalls, apiCalls...) + + // There will be an initial relation-changed regardless of + // members, due to the "changed pending" local persistent + // state. + s.assertHookRelationChanged(c, r, remotestate.RelationSnapshot{ + Life: params.Alive, + }, &numCalls) + + // wordpress starts at 1, changing to 2 should trigger a + // relation-changed hook. + s.assertHookRelationChanged(c, r, remotestate.RelationSnapshot{ + Life: params.Alive, + Members: map[string]int64{ + "wordpress": 2, + }, + }, &numCalls) + + // NOTE(axw) this is a test for the temporary to fix lp:1495542. + // + // wordpress is at 2, changing to 1 should trigger a + // relation-changed hook. This is to cater for the scenario + // where the relation settings document is removed and + // recreated, thus resetting the txn-revno. + s.assertHookRelationChanged(c, r, remotestate.RelationSnapshot{ + Life: params.Alive, + Members: map[string]int64{ + "wordpress": 1, + }, + }, &numCalls) +} + +func (s *relationsSuite) assertHookRelationDeparted(c *gc.C, numCalls *int32, apiCalls ...apiCall) relation.Relations { + r := s.assertHookRelationJoined(c, numCalls, apiCalls...) + s.assertHookRelationChanged(c, r, remotestate.RelationSnapshot{ + Life: params.Alive, + }, numCalls) + numCallsBefore := *numCalls + + localState := resolver.LocalState{ + State: operation.State{ + Kind: operation.Continue, + }, + } + remoteState := remotestate.Snapshot{ + Relations: map[int]remotestate.RelationSnapshot{ + 1: remotestate.RelationSnapshot{ + Life: params.Dying, + Members: map[string]int64{ + "wordpress": 1, + }, + }, + }, + } + relationsResolver := relation.NewRelationsResolver(r) + op, err := relationsResolver.NextOp(localState, remoteState, &mockOperations{}) + c.Assert(err, jc.ErrorIsNil) + assertNumCalls(c, numCalls, numCallsBefore+1) + c.Assert(op.String(), gc.Equals, "run hook relation-departed on unit with relation 1") + + // Commit the operation so we save local state for any next operation. + _, err = r.PrepareHook(op.(*mockOperation).hookInfo) + c.Assert(err, jc.ErrorIsNil) + err = r.CommitHook(op.(*mockOperation).hookInfo) + c.Assert(err, jc.ErrorIsNil) + return r +} + +func (s *relationsSuite) TestHookRelationDeparted(c *gc.C) { + var numCalls int32 + apiCalls := relationJoinedApiCalls() + + apiCalls = append(apiCalls, getPrincipalApiCalls(2)...) + s.assertHookRelationDeparted(c, &numCalls, apiCalls...) +} + +func (s *relationsSuite) TestHookRelationBroken(c *gc.C) { + var numCalls int32 + apiCalls := relationJoinedApiCalls() + + apiCalls = append(apiCalls, getPrincipalApiCalls(3)...) + r := s.assertHookRelationDeparted(c, &numCalls, apiCalls...) + + localState := resolver.LocalState{ + State: operation.State{ + Kind: operation.Continue, + }, + } + remoteState := remotestate.Snapshot{ + Relations: map[int]remotestate.RelationSnapshot{ + 1: remotestate.RelationSnapshot{ + Life: params.Dying, + }, + }, + } + relationsResolver := relation.NewRelationsResolver(r) + op, err := relationsResolver.NextOp(localState, remoteState, &mockOperations{}) + c.Assert(err, jc.ErrorIsNil) + assertNumCalls(c, &numCalls, 11) + c.Assert(op.String(), gc.Equals, "run hook relation-broken on unit with relation 1") +} + +func (s *relationsSuite) TestCommitHook(c *gc.C) { + var numCalls int32 + apiCalls := relationJoinedApiCalls() + relationUnits := params.RelationUnits{RelationUnits: []params.RelationUnit{ + {Relation: "relation-wordpress.db#mysql.db", Unit: "unit-wordpress-0"}, + }} + apiCalls = append(apiCalls, + uniterApiCall("LeaveScope", relationUnits, params.ErrorResults{Results: []params.ErrorResult{{}}}, nil), + ) + stateFile := filepath.Join(s.relationsDir, "1", "wordpress") + c.Assert(stateFile, jc.DoesNotExist) + r := s.assertHookRelationJoined(c, &numCalls, apiCalls...) + + data, err := ioutil.ReadFile(stateFile) + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(data), gc.Equals, "change-version: 1\nchanged-pending: true\n") + + err = r.CommitHook(hook.Info{ + Kind: hooks.RelationChanged, + RemoteUnit: "wordpress", + RelationId: 1, + ChangeVersion: 2, + }) + c.Assert(err, jc.ErrorIsNil) + data, err = ioutil.ReadFile(stateFile) + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(data), gc.Equals, "change-version: 2\n") + + err = r.CommitHook(hook.Info{ + Kind: hooks.RelationDeparted, + RemoteUnit: "wordpress", + RelationId: 1, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(stateFile, jc.DoesNotExist) +} + +func (s *relationsSuite) TestImplicitRelationNoHooks(c *gc.C) { + unitTag := names.NewUnitTag("wordpress/0") + abort := make(chan struct{}) + + unitEntity := params.Entities{Entities: []params.Entity{params.Entity{Tag: "unit-wordpress-0"}}} + relationResults := params.RelationResults{ + Results: []params.RelationResult{ + { + Id: 1, + Key: "wordpress:juju-info juju-info:juju-info", + Life: params.Alive, + Endpoint: multiwatcher.Endpoint{ + ServiceName: "wordpress", + Relation: charm.Relation{Name: "juju-info", Role: charm.RoleProvider, Interface: "juju-info", Scope: "global"}, + }}, + }, + } + relationUnits := params.RelationUnits{RelationUnits: []params.RelationUnit{ + {Relation: "relation-wordpress.juju-info#juju-info.juju-info", Unit: "unit-wordpress-0"}, + }} + apiCalls := []apiCall{ + uniterApiCall("Life", unitEntity, params.LifeResults{Results: []params.LifeResult{{Life: params.Alive}}}, nil), + uniterApiCall("JoinedRelations", unitEntity, params.StringsResults{Results: []params.StringsResult{{Result: []string{}}}}, nil), + uniterApiCall("RelationById", params.RelationIds{RelationIds: []int{1}}, relationResults, nil), + uniterApiCall("Relation", relationUnits, relationResults, nil), + uniterApiCall("Relation", relationUnits, relationResults, nil), + uniterApiCall("Watch", unitEntity, params.NotifyWatchResults{Results: []params.NotifyWatchResult{{NotifyWatcherId: "1"}}}, nil), + uniterApiCall("EnterScope", relationUnits, params.ErrorResults{Results: []params.ErrorResult{{}}}, nil), + uniterApiCall("GetPrincipal", unitEntity, params.StringBoolResults{Results: []params.StringBoolResult{{Result: "", Ok: false}}}, nil), + } + + var numCalls int32 + apiCaller := mockAPICaller(c, &numCalls, apiCalls...) + st := uniter.NewState(apiCaller, unitTag) + r, err := relation.NewRelations(st, unitTag, s.stateDir, s.relationsDir, abort) + c.Assert(err, jc.ErrorIsNil) + + localState := resolver.LocalState{ + State: operation.State{ + Kind: operation.Continue, + }, + } + remoteState := remotestate.Snapshot{ + Relations: map[int]remotestate.RelationSnapshot{ + 1: remotestate.RelationSnapshot{ + Life: params.Alive, + Members: map[string]int64{ + "wordpress": 1, + }, + }, + }, + } + relationsResolver := relation.NewRelationsResolver(r) + _, err = relationsResolver.NextOp(localState, remoteState, &mockOperations{}) + c.Assert(errors.Cause(err), gc.Equals, resolver.ErrNoOperation) +} === added file 'src/github.com/juju/juju/worker/uniter/relation/state.go' --- src/github.com/juju/juju/worker/uniter/relation/state.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/relation/state.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,234 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// relation implements persistent local storage of a unit's relation state, and +// translation of relation changes into hooks that need to be run. +package relation + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/juju/errors" + "github.com/juju/utils" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/worker/uniter/hook" +) + +// State describes the state of a relation. +type State struct { + // RelationId identifies the relation. + RelationId int + + // Members is a map from unit name to the last change version + // for which a hook.Info was delivered on the output channel. + Members map[string]int64 + + // ChangedPending indicates that a "relation-changed" hook for the given + // unit name must be the first hook.Info to be sent to the output channel. + ChangedPending string +} + +// copy returns an independent copy of the state. +func (s *State) copy() *State { + copy := &State{ + RelationId: s.RelationId, + ChangedPending: s.ChangedPending, + } + if s.Members != nil { + copy.Members = map[string]int64{} + for m, v := range s.Members { + copy.Members[m] = v + } + } + return copy +} + +// Validate returns an error if the supplied hook.Info does not represent +// a valid change to the relation state. Hooks must always be validated +// against the current state before they are run, to ensure that the system +// meets its guarantees about hook execution order. +func (s *State) Validate(hi hook.Info) (err error) { + defer errors.DeferredAnnotatef(&err, "inappropriate %q for %q", hi.Kind, hi.RemoteUnit) + if hi.RelationId != s.RelationId { + return fmt.Errorf("expected relation %d, got relation %d", s.RelationId, hi.RelationId) + } + if s.Members == nil { + return fmt.Errorf(`relation is broken and cannot be changed further`) + } + unit, kind := hi.RemoteUnit, hi.Kind + if kind == hooks.RelationBroken { + if len(s.Members) == 0 { + return nil + } + return fmt.Errorf(`cannot run "relation-broken" while units still present`) + } + if s.ChangedPending != "" { + if unit != s.ChangedPending || kind != hooks.RelationChanged { + return fmt.Errorf(`expected "relation-changed" for %q`, s.ChangedPending) + } + } else if _, joined := s.Members[unit]; joined && kind == hooks.RelationJoined { + return fmt.Errorf("unit already joined") + } else if !joined && kind != hooks.RelationJoined { + return fmt.Errorf("unit has not joined") + } + return nil +} + +// StateDir is a filesystem-backed representation of the state of a +// relation. Concurrent modifications to the underlying state directory +// will have undefined consequences. +type StateDir struct { + // path identifies the directory holding persistent state. + path string + + // state is the cached state of the directory, which is guaranteed + // to be synchronized with the true state so long as no concurrent + // changes are made to the directory. + state State +} + +// State returns the current state of the relation. +func (d *StateDir) State() *State { + return d.state.copy() +} + +// ReadStateDir loads a StateDir from the subdirectory of dirPath named +// for the supplied RelationId. If the directory does not exist, no error +// is returned, +func ReadStateDir(dirPath string, relationId int) (d *StateDir, err error) { + d = &StateDir{ + filepath.Join(dirPath, strconv.Itoa(relationId)), + State{relationId, map[string]int64{}, ""}, + } + defer errors.DeferredAnnotatef(&err, "cannot load relation state from %q", d.path) + if _, err := os.Stat(d.path); os.IsNotExist(err) { + return d, nil + } else if err != nil { + return nil, err + } + fis, err := ioutil.ReadDir(d.path) + if err != nil { + return nil, err + } + for _, fi := range fis { + // Entries with names ending in "-" followed by an integer must be + // files containing valid unit data; all other names are ignored. + name := fi.Name() + i := strings.LastIndex(name, "-") + if i == -1 { + continue + } + svcName := name[:i] + unitId := name[i+1:] + if _, err := strconv.Atoi(unitId); err != nil { + continue + } + unitName := svcName + "/" + unitId + var info diskInfo + if err = utils.ReadYaml(filepath.Join(d.path, name), &info); err != nil { + return nil, fmt.Errorf("invalid unit file %q: %v", name, err) + } + if info.ChangeVersion == nil { + return nil, fmt.Errorf(`invalid unit file %q: "changed-version" not set`, name) + } + d.state.Members[unitName] = *info.ChangeVersion + if info.ChangedPending { + if d.state.ChangedPending != "" { + return nil, fmt.Errorf("%q and %q both have pending changed hooks", d.state.ChangedPending, unitName) + } + d.state.ChangedPending = unitName + } + } + return d, nil +} + +// ReadAllStateDirs loads and returns every StateDir persisted directly inside +// the supplied dirPath. If dirPath does not exist, no error is returned. +func ReadAllStateDirs(dirPath string) (dirs map[int]*StateDir, err error) { + defer errors.DeferredAnnotatef(&err, "cannot load relations state from %q", dirPath) + if _, err := os.Stat(dirPath); os.IsNotExist(err) { + return nil, nil + } else if err != nil { + return nil, err + } + fis, err := ioutil.ReadDir(dirPath) + if err != nil { + return nil, err + } + dirs = map[int]*StateDir{} + for _, fi := range fis { + // Entries with integer names must be directories containing StateDir + // data; all other names will be ignored. + relationId, err := strconv.Atoi(fi.Name()) + if err != nil { + // This doesn't look like a relation. + continue + } + dir, err := ReadStateDir(dirPath, relationId) + if err != nil { + return nil, err + } + dirs[relationId] = dir + } + return dirs, nil +} + +// Ensure creates the directory if it does not already exist. +func (d *StateDir) Ensure() error { + return os.MkdirAll(d.path, 0755) +} + +// Write atomically writes to disk the relation state change in hi. +// It must be called after the respective hook was executed successfully. +// Write doesn't validate hi but guarantees that successive writes of +// the same hi are idempotent. +func (d *StateDir) Write(hi hook.Info) (err error) { + defer errors.DeferredAnnotatef(&err, "failed to write %q hook info for %q on state directory", hi.Kind, hi.RemoteUnit) + if hi.Kind == hooks.RelationBroken { + return d.Remove() + } + name := strings.Replace(hi.RemoteUnit, "/", "-", 1) + path := filepath.Join(d.path, name) + if hi.Kind == hooks.RelationDeparted { + if err = os.Remove(path); err != nil && !os.IsNotExist(err) { + return err + } + // If atomic delete succeeded, update own state. + delete(d.state.Members, hi.RemoteUnit) + return nil + } + di := diskInfo{&hi.ChangeVersion, hi.Kind == hooks.RelationJoined} + if err := utils.WriteYaml(path, &di); err != nil { + return err + } + // If write was successful, update own state. + d.state.Members[hi.RemoteUnit] = hi.ChangeVersion + if hi.Kind == hooks.RelationJoined { + d.state.ChangedPending = hi.RemoteUnit + } else { + d.state.ChangedPending = "" + } + return nil +} + +// Remove removes the directory if it exists and is empty. +func (d *StateDir) Remove() error { + if err := os.Remove(d.path); err != nil && !os.IsNotExist(err) { + return err + } + // If atomic delete succeeded, update own state. + d.state.Members = nil + return nil +} + +// diskInfo defines the relation unit data serialization. +type diskInfo struct { + ChangeVersion *int64 `yaml:"change-version"` + ChangedPending bool `yaml:"changed-pending,omitempty"` +} === added file 'src/github.com/juju/juju/worker/uniter/relation/state_test.go' --- src/github.com/juju/juju/worker/uniter/relation/state_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/relation/state_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,355 @@ +// Copyright 2012, 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package relation_test + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/relation" +) + +type StateDirSuite struct{} + +type msi map[string]int64 + +var _ = gc.Suite(&StateDirSuite{}) + +func (s *StateDirSuite) TestReadStateDirEmpty(c *gc.C) { + basedir := c.MkDir() + reldir := filepath.Join(basedir, "123") + + dir, err := relation.ReadStateDir(basedir, 123) + c.Assert(err, jc.ErrorIsNil) + state := dir.State() + c.Assert(state.RelationId, gc.Equals, 123) + c.Assert(msi(state.Members), gc.DeepEquals, msi{}) + c.Assert(state.ChangedPending, gc.Equals, "") + + _, err = os.Stat(reldir) + c.Assert(err, jc.Satisfies, os.IsNotExist) + + err = dir.Ensure() + c.Assert(err, jc.ErrorIsNil) + fi, err := os.Stat(reldir) + c.Assert(err, jc.ErrorIsNil) + c.Assert(fi, jc.Satisfies, os.FileInfo.IsDir) +} + +func (s *StateDirSuite) TestReadStateDirValid(c *gc.C) { + basedir := c.MkDir() + reldir := setUpDir(c, basedir, "123", map[string]string{ + "foo-bar-1": "change-version: 99\n", + "foo-bar-1.preparing": "change-version: 100\n", + "baz-qux-7": "change-version: 101\nchanged-pending: true\n", + "nonsensical": "blah", + "27": "blah", + }) + setUpDir(c, reldir, "ignored", nil) + + dir, err := relation.ReadStateDir(basedir, 123) + c.Assert(err, jc.ErrorIsNil) + state := dir.State() + c.Assert(state.RelationId, gc.Equals, 123) + c.Assert(msi(state.Members), gc.DeepEquals, msi{"foo-bar/1": 99, "baz-qux/7": 101}) + c.Assert(state.ChangedPending, gc.Equals, "baz-qux/7") +} + +var badRelationsTests = []struct { + contents map[string]string + subdirs []string + err string +}{ + { + nil, []string{"foo-bar-1"}, + `.* (is a directory|handle is invalid.)`, + }, { + map[string]string{"foo-1": "'"}, nil, + `invalid unit file "foo-1": yaml: found unexpected end of stream`, + }, { + map[string]string{"foo-1": "blah: blah\n"}, nil, + `invalid unit file "foo-1": "changed-version" not set`, + }, { + map[string]string{ + "foo-1": "change-version: 123\nchanged-pending: true\n", + "foo-2": "change-version: 456\nchanged-pending: true\n", + }, nil, + `"foo/1" and "foo/2" both have pending changed hooks`, + }, +} + +func (s *StateDirSuite) TestBadRelations(c *gc.C) { + for i, t := range badRelationsTests { + c.Logf("test %d", i) + basedir := c.MkDir() + reldir := setUpDir(c, basedir, "123", t.contents) + for _, subdir := range t.subdirs { + setUpDir(c, reldir, subdir, nil) + } + _, err := relation.ReadStateDir(basedir, 123) + expect := `cannot load relation state from ".*": ` + t.err + c.Assert(err, gc.ErrorMatches, expect) + } +} + +var defaultMembers = msi{"foo/1": 0, "foo/2": 0} + +// writeTests verify the behaviour of sequences of HookInfos on a relation +// state that starts off containing defaultMembers. +var writeTests = []struct { + hooks []hook.Info + members msi + pending string + err string + deleted bool +}{ + // Verify that valid changes work. + { + hooks: []hook.Info{ + {Kind: hooks.RelationChanged, RelationId: 123, RemoteUnit: "foo/1", ChangeVersion: 1}, + }, + members: msi{"foo/1": 1, "foo/2": 0}, + }, { + hooks: []hook.Info{ + {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/3"}, + }, + members: msi{"foo/1": 0, "foo/2": 0, "foo/3": 0}, + pending: "foo/3", + }, { + hooks: []hook.Info{ + {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/3"}, + {Kind: hooks.RelationChanged, RelationId: 123, RemoteUnit: "foo/3"}, + }, + members: msi{"foo/1": 0, "foo/2": 0, "foo/3": 0}, + }, { + hooks: []hook.Info{ + {Kind: hooks.RelationDeparted, RelationId: 123, RemoteUnit: "foo/1"}, + }, + members: msi{"foo/2": 0}, + }, { + hooks: []hook.Info{ + {Kind: hooks.RelationDeparted, RelationId: 123, RemoteUnit: "foo/1"}, + {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/1"}, + }, + members: msi{"foo/1": 0, "foo/2": 0}, + pending: "foo/1", + }, { + hooks: []hook.Info{ + {Kind: hooks.RelationDeparted, RelationId: 123, RemoteUnit: "foo/1"}, + {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/1"}, + {Kind: hooks.RelationChanged, RelationId: 123, RemoteUnit: "foo/1"}, + }, + members: msi{"foo/1": 0, "foo/2": 0}, + }, { + hooks: []hook.Info{ + {Kind: hooks.RelationDeparted, RelationId: 123, RemoteUnit: "foo/1"}, + {Kind: hooks.RelationDeparted, RelationId: 123, RemoteUnit: "foo/2"}, + {Kind: hooks.RelationBroken, RelationId: 123}, + }, + deleted: true, + }, + // Verify detection of various error conditions. + { + hooks: []hook.Info{ + {Kind: hooks.RelationJoined, RelationId: 456, RemoteUnit: "foo/1"}, + }, + err: "expected relation 123, got relation 456", + }, { + hooks: []hook.Info{ + {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/3"}, + {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/4"}, + }, + members: msi{"foo/1": 0, "foo/2": 0, "foo/3": 0}, + pending: "foo/3", + err: `expected "relation-changed" for "foo/3"`, + }, { + hooks: []hook.Info{ + {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/3"}, + {Kind: hooks.RelationChanged, RelationId: 123, RemoteUnit: "foo/1"}, + }, + members: msi{"foo/1": 0, "foo/2": 0, "foo/3": 0}, + pending: "foo/3", + err: `expected "relation-changed" for "foo/3"`, + }, { + hooks: []hook.Info{ + {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/1"}, + }, + err: "unit already joined", + }, { + hooks: []hook.Info{ + {Kind: hooks.RelationChanged, RelationId: 123, RemoteUnit: "foo/3"}, + }, + err: "unit has not joined", + }, { + hooks: []hook.Info{ + {Kind: hooks.RelationDeparted, RelationId: 123, RemoteUnit: "foo/3"}, + }, + err: "unit has not joined", + }, { + hooks: []hook.Info{ + {Kind: hooks.RelationBroken, RelationId: 123}, + }, + err: `cannot run "relation-broken" while units still present`, + }, { + hooks: []hook.Info{ + {Kind: hooks.RelationDeparted, RelationId: 123, RemoteUnit: "foo/1"}, + {Kind: hooks.RelationDeparted, RelationId: 123, RemoteUnit: "foo/2"}, + {Kind: hooks.RelationBroken, RelationId: 123}, + {Kind: hooks.RelationJoined, RelationId: 123, RemoteUnit: "foo/1"}, + }, + err: `relation is broken and cannot be changed further`, + deleted: true, + }, +} + +func (s *StateDirSuite) TestWrite(c *gc.C) { + for i, t := range writeTests { + c.Logf("test %d", i) + basedir := c.MkDir() + setUpDir(c, basedir, "123", map[string]string{ + "foo-1": "change-version: 0\n", + "foo-2": "change-version: 0\n", + }) + dir, err := relation.ReadStateDir(basedir, 123) + c.Assert(err, jc.ErrorIsNil) + for i, hi := range t.hooks { + c.Logf(" hook %d", i) + if i == len(t.hooks)-1 && t.err != "" { + err = dir.State().Validate(hi) + expect := fmt.Sprintf(`inappropriate %q for %q: %s`, hi.Kind, hi.RemoteUnit, t.err) + c.Assert(err, gc.ErrorMatches, expect) + } else { + err = dir.State().Validate(hi) + c.Assert(err, jc.ErrorIsNil) + err = dir.Write(hi) + c.Assert(err, jc.ErrorIsNil) + // Check that writing the same change again is OK. + err = dir.Write(hi) + c.Assert(err, jc.ErrorIsNil) + } + } + members := t.members + if members == nil && !t.deleted { + members = defaultMembers + } + assertState(c, dir, basedir, 123, members, t.pending, t.deleted) + } +} + +func (s *StateDirSuite) TestRemove(c *gc.C) { + basedir := c.MkDir() + dir, err := relation.ReadStateDir(basedir, 1) + c.Assert(err, jc.ErrorIsNil) + err = dir.Ensure() + c.Assert(err, jc.ErrorIsNil) + err = dir.Remove() + c.Assert(err, jc.ErrorIsNil) + err = dir.Remove() + c.Assert(err, jc.ErrorIsNil) + + setUpDir(c, basedir, "99", map[string]string{ + "foo-1": "change-version: 0\n", + }) + dir, err = relation.ReadStateDir(basedir, 99) + c.Assert(err, jc.ErrorIsNil) + err = dir.Remove() + // Windows message is The directory is not empty + // Unix message is directory not empty + c.Assert(err, gc.ErrorMatches, ".* directory (is )?not empty.?") +} + +type ReadAllStateDirsSuite struct{} + +var _ = gc.Suite(&ReadAllStateDirsSuite{}) + +func (s *ReadAllStateDirsSuite) TestNoDir(c *gc.C) { + basedir := c.MkDir() + relsdir := filepath.Join(basedir, "relations") + + dirs, err := relation.ReadAllStateDirs(relsdir) + c.Assert(err, jc.ErrorIsNil) + c.Assert(dirs, gc.HasLen, 0) + + _, err = os.Stat(relsdir) + c.Assert(err, jc.Satisfies, os.IsNotExist) +} + +func (s *ReadAllStateDirsSuite) TestBadStateDir(c *gc.C) { + basedir := c.MkDir() + relsdir := setUpDir(c, basedir, "relations", nil) + setUpDir(c, relsdir, "123", map[string]string{ + "bad-0": "blah: blah\n", + }) + _, err := relation.ReadAllStateDirs(relsdir) + c.Assert(err, gc.ErrorMatches, `cannot load relations state from .*: cannot load relation state from .*: invalid unit file "bad-0": "changed-version" not set`) +} + +func (s *ReadAllStateDirsSuite) TestReadAllStateDirs(c *gc.C) { + basedir := c.MkDir() + relsdir := setUpDir(c, basedir, "relations", map[string]string{ + "ignored": "blah", + "foo-bar-123": "gibberish", + }) + setUpDir(c, relsdir, "123", map[string]string{ + "foo-0": "change-version: 1\n", + "foo-1": "change-version: 2\nchanged-pending: true\n", + "gibberish": "gibberish", + }) + setUpDir(c, relsdir, "456", map[string]string{ + "bar-0": "change-version: 3\n", + "bar-1": "change-version: 4\n", + }) + setUpDir(c, relsdir, "789", nil) + setUpDir(c, relsdir, "onethousand", map[string]string{ + "baz-0": "change-version: 3\n", + "baz-1": "change-version: 4\n", + }) + + dirs, err := relation.ReadAllStateDirs(relsdir) + c.Assert(err, jc.ErrorIsNil) + for id, dir := range dirs { + c.Logf("%d: %#v", id, dir) + } + assertState(c, dirs[123], relsdir, 123, msi{"foo/0": 1, "foo/1": 2}, "foo/1", false) + assertState(c, dirs[456], relsdir, 456, msi{"bar/0": 3, "bar/1": 4}, "", false) + assertState(c, dirs[789], relsdir, 789, msi{}, "", false) + c.Assert(dirs, gc.HasLen, 3) +} + +func setUpDir(c *gc.C, basedir, name string, contents map[string]string) string { + reldir := filepath.Join(basedir, name) + err := os.Mkdir(reldir, 0777) + c.Assert(err, jc.ErrorIsNil) + for name, content := range contents { + path := filepath.Join(reldir, name) + err := ioutil.WriteFile(path, []byte(content), 0777) + c.Assert(err, jc.ErrorIsNil) + } + return reldir +} + +func assertState(c *gc.C, dir *relation.StateDir, relsdir string, relationId int, members msi, pending string, deleted bool) { + expect := &relation.State{ + RelationId: relationId, + Members: map[string]int64(members), + ChangedPending: pending, + } + c.Assert(dir.State(), gc.DeepEquals, expect) + if deleted { + _, err := os.Stat(filepath.Join(relsdir, strconv.Itoa(relationId))) + c.Assert(err, jc.Satisfies, os.IsNotExist) + } else { + fresh, err := relation.ReadStateDir(relsdir, relationId) + c.Assert(err, jc.ErrorIsNil) + c.Assert(fresh.State(), gc.DeepEquals, expect) + } +} === removed file 'src/github.com/juju/juju/worker/uniter/relationer.go' --- src/github.com/juju/juju/worker/uniter/relationer.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/relationer.go 1970-01-01 00:00:00 +0000 @@ -1,154 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package uniter - -import ( - "fmt" - - "gopkg.in/juju/charm.v5/hooks" - - apiuniter "github.com/juju/juju/api/uniter" - "github.com/juju/juju/worker/uniter/hook" - "github.com/juju/juju/worker/uniter/relation" - "github.com/juju/juju/worker/uniter/runner" -) - -// Relationer manages a unit's presence in a relation. -type Relationer struct { - ru *apiuniter.RelationUnit - dir *relation.StateDir - queue relation.HookQueue - hooks chan<- hook.Info - dying bool -} - -// NewRelationer creates a new Relationer. The unit will not join the -// relation until explicitly requested. -func NewRelationer(ru *apiuniter.RelationUnit, dir *relation.StateDir, hooks chan<- hook.Info) *Relationer { - return &Relationer{ - ru: ru, - dir: dir, - hooks: hooks, - } -} - -// ContextInfo returns a represention of r's current state. -func (r *Relationer) ContextInfo() *runner.RelationInfo { - members := r.dir.State().Members - memberNames := make([]string, 0, len(members)) - for memberName := range members { - memberNames = append(memberNames, memberName) - } - return &runner.RelationInfo{r.ru, memberNames} -} - -// IsImplicit returns whether the local relation endpoint is implicit. Implicit -// relations do not run hooks. -func (r *Relationer) IsImplicit() bool { - return r.ru.Endpoint().IsImplicit() -} - -// Join initializes local state and causes the unit to enter its relation -// scope, allowing its counterpart units to detect its presence and settings -// changes. Local state directory is not created until needed. -func (r *Relationer) Join() error { - if r.dying { - panic("dying relationer must not join!") - } - // We need to make sure the state directory exists before we join the - // relation, lest a subsequent ReadAllStateDirs report local state that - // doesn't include relations recorded in remote state. - if err := r.dir.Ensure(); err != nil { - return err - } - // uniter.RelationUnit.EnterScope() sets the unit's private address - // internally automatically, so no need to set it here. - return r.ru.EnterScope() -} - -// SetDying informs the relationer that the unit is departing the relation, -// and that the only hooks it should send henceforth are -departed hooks, -// until the relation is empty, followed by a -broken hook. -func (r *Relationer) SetDying() error { - if r.IsImplicit() { - r.dying = true - return r.die() - } - if r.queue != nil { - if err := r.StopHooks(); err != nil { - return err - } - defer r.StartHooks() - } - r.dying = true - return nil -} - -// die is run when the relationer has no further responsibilities; it leaves -// relation scope, and removes the local relation state directory. -func (r *Relationer) die() error { - if err := r.ru.LeaveScope(); err != nil { - return err - } - return r.dir.Remove() -} - -// StartHooks starts watching the relation, and sending hook.Info events on the -// hooks channel. It will panic if called when already responding to relation -// changes. -func (r *Relationer) StartHooks() error { - if r.IsImplicit() { - return nil - } - if r.queue != nil { - panic("hooks already started!") - } - if r.dying { - r.queue = relation.NewDyingHookQueue(r.dir.State(), r.hooks) - } else { - w, err := r.ru.Watch() - if err != nil { - return err - } - r.queue = relation.NewAliveHookQueue(r.dir.State(), r.hooks, w) - } - return nil -} - -// StopHooks ensures that the relationer is not watching the relation, or sending -// hook.Info events on the hooks channel. -func (r *Relationer) StopHooks() error { - if r.queue == nil { - return nil - } - queue := r.queue - r.queue = nil - return queue.Stop() -} - -// PrepareHook checks that the relation is in a state such that it makes -// sense to execute the supplied hook, and ensures that the relation context -// contains the latest relation state as communicated in the hook.Info. It -// returns the name of the hook that must be run. -func (r *Relationer) PrepareHook(hi hook.Info) (hookName string, err error) { - if r.IsImplicit() { - panic("implicit relations must not run hooks") - } - if err = r.dir.State().Validate(hi); err != nil { - return - } - name := r.ru.Endpoint().Name - return fmt.Sprintf("%s-%s", name, hi.Kind), nil -} - -// CommitHook persists the fact of the supplied hook's completion. -func (r *Relationer) CommitHook(hi hook.Info) error { - if r.IsImplicit() { - panic("implicit relations must not run hooks") - } - if hi.Kind == hooks.RelationBroken { - return r.die() - } - return r.dir.Write(hi) -} === removed file 'src/github.com/juju/juju/worker/uniter/relationer_test.go' --- src/github.com/juju/juju/worker/uniter/relationer_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/relationer_test.go 1970-01-01 00:00:00 +0000 @@ -1,487 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package uniter_test - -import ( - "strconv" - "strings" - "time" - - "github.com/juju/errors" - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - ft "github.com/juju/testing/filetesting" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5/hooks" - - "github.com/juju/juju/api" - apiuniter "github.com/juju/juju/api/uniter" - jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/network" - "github.com/juju/juju/state" - coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/worker/uniter" - "github.com/juju/juju/worker/uniter/hook" - "github.com/juju/juju/worker/uniter/relation" -) - -type RelationerSuite struct { - jujutesting.JujuConnSuite - hooks chan hook.Info - svc *state.Service - rel *state.Relation - dir *relation.StateDir - dirPath string - - st api.Connection - uniter *apiuniter.State - apiRelUnit *apiuniter.RelationUnit -} - -var _ = gc.Suite(&RelationerSuite{}) - -func (s *RelationerSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - var err error - s.svc = s.AddTestingService(c, "u", s.AddTestingCharm(c, "riak")) - c.Assert(err, jc.ErrorIsNil) - rels, err := s.svc.Relations() - c.Assert(err, jc.ErrorIsNil) - c.Assert(rels, gc.HasLen, 1) - s.rel = rels[0] - _, unit := s.AddRelationUnit(c, "u/0") - s.dirPath = c.MkDir() - s.dir, err = relation.ReadStateDir(s.dirPath, s.rel.Id()) - c.Assert(err, jc.ErrorIsNil) - s.hooks = make(chan hook.Info) - - password, err := utils.RandomPassword() - c.Assert(err, jc.ErrorIsNil) - err = unit.SetPassword(password) - c.Assert(err, jc.ErrorIsNil) - s.st = s.OpenAPIAs(c, unit.Tag(), password) - s.uniter, err = s.st.Uniter() - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.uniter, gc.NotNil) - - apiUnit, err := s.uniter.Unit(unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - apiRel, err := s.uniter.Relation(s.rel.Tag().(names.RelationTag)) - c.Assert(err, jc.ErrorIsNil) - s.apiRelUnit, err = apiRel.Unit(apiUnit) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *RelationerSuite) AddRelationUnit(c *gc.C, name string) (*state.RelationUnit, *state.Unit) { - u, err := s.svc.AddUnit() - c.Assert(err, jc.ErrorIsNil) - c.Assert(u.Name(), gc.Equals, name) - machine, err := s.State.AddMachine("quantal", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) - err = u.AssignToMachine(machine) - c.Assert(err, jc.ErrorIsNil) - privateAddr := network.NewScopedAddress( - strings.Replace(name, "/", "-", 1)+".testing.invalid", network.ScopeCloudLocal, - ) - err = machine.SetProviderAddresses(privateAddr) - c.Assert(err, jc.ErrorIsNil) - ru, err := s.rel.Unit(u) - c.Assert(err, jc.ErrorIsNil) - return ru, u -} - -func (s *RelationerSuite) TestStateDir(c *gc.C) { - // Create the relationer; check its state dir is not created. - r := uniter.NewRelationer(s.apiRelUnit, s.dir, s.hooks) - path := strconv.Itoa(s.rel.Id()) - ft.Removed{path}.Check(c, s.dirPath) - - // Join the relation; check the dir was created. - err := r.Join() - c.Assert(err, jc.ErrorIsNil) - ft.Dir{path, 0755}.Check(c, s.dirPath) - - // Prepare to depart the relation; check the dir is still there. - hi := hook.Info{Kind: hooks.RelationBroken} - _, err = r.PrepareHook(hi) - c.Assert(err, jc.ErrorIsNil) - ft.Dir{path, 0755}.Check(c, s.dirPath) - - // Actually depart it; check the dir is removed. - err = r.CommitHook(hi) - c.Assert(err, jc.ErrorIsNil) - ft.Removed{path}.Check(c, s.dirPath) -} - -func (s *RelationerSuite) TestEnterLeaveScope(c *gc.C) { - ru1, _ := s.AddRelationUnit(c, "u/1") - r := uniter.NewRelationer(s.apiRelUnit, s.dir, s.hooks) - - // u/1 does not consider u/0 to be alive. - w := ru1.Watch() - defer stop(c, w) - s.State.StartSync() - ch, ok := <-w.Changes() - c.Assert(ok, jc.IsTrue) - c.Assert(ch.Changed, gc.HasLen, 0) - c.Assert(ch.Departed, gc.HasLen, 0) - - // u/0 enters scope; u/1 observes it. - err := r.Join() - c.Assert(err, jc.ErrorIsNil) - s.State.StartSync() - select { - case ch, ok := <-w.Changes(): - c.Assert(ok, jc.IsTrue) - c.Assert(ch.Changed, gc.HasLen, 1) - _, found := ch.Changed["u/0"] - c.Assert(found, jc.IsTrue) - c.Assert(ch.Departed, gc.HasLen, 0) - case <-time.After(coretesting.LongWait): - c.Fatalf("timed out waiting for presence detection") - } - - // re-Join is no-op. - err = r.Join() - c.Assert(err, jc.ErrorIsNil) - // TODO(jam): This would be a great to replace with statetesting.NotifyWatcherC - s.State.StartSync() - select { - case ch, ok := <-w.Changes(): - c.Fatalf("got unexpected change: %#v, %#v", ch, ok) - case <-time.After(coretesting.ShortWait): - } - - // u/0 leaves scope; u/1 observes it. - hi := hook.Info{Kind: hooks.RelationBroken} - _, err = r.PrepareHook(hi) - c.Assert(err, jc.ErrorIsNil) - - err = r.CommitHook(hi) - c.Assert(err, jc.ErrorIsNil) - s.State.StartSync() - select { - case ch, ok := <-w.Changes(): - c.Assert(ok, jc.IsTrue) - c.Assert(ch.Changed, gc.HasLen, 0) - c.Assert(ch.Departed, gc.DeepEquals, []string{"u/0"}) - case <-time.After(worstCase): - c.Fatalf("timed out waiting for absence detection") - } -} - -func (s *RelationerSuite) TestStartStopHooks(c *gc.C) { - ru1, _ := s.AddRelationUnit(c, "u/1") - ru2, _ := s.AddRelationUnit(c, "u/2") - r := uniter.NewRelationer(s.apiRelUnit, s.dir, s.hooks) - c.Assert(r.IsImplicit(), jc.IsFalse) - err := r.Join() - c.Assert(err, jc.ErrorIsNil) - - // Check no hooks are being sent. - s.assertNoHook(c) - - // Start hooks, and check that still no changes are sent. - r.StartHooks() - defer stopHooks(c, r) - s.assertNoHook(c) - - // Check we can't start hooks again. - f := func() { r.StartHooks() } - c.Assert(f, gc.PanicMatches, "hooks already started!") - - // Join u/1 to the relation, and check that we receive the expected hooks. - settings := map[string]interface{}{"unit": "settings"} - err = ru1.EnterScope(settings) - c.Assert(err, jc.ErrorIsNil) - s.assertHook(c, hook.Info{ - Kind: hooks.RelationJoined, - RemoteUnit: "u/1", - }) - s.assertHook(c, hook.Info{ - Kind: hooks.RelationChanged, - RemoteUnit: "u/1", - }) - s.assertNoHook(c) - - // Stop hooks, make more changes, check no events. - err = r.StopHooks() - c.Assert(err, jc.ErrorIsNil) - err = ru1.LeaveScope() - c.Assert(err, jc.ErrorIsNil) - err = ru2.EnterScope(nil) - c.Assert(err, jc.ErrorIsNil) - node, err := ru2.Settings() - c.Assert(err, jc.ErrorIsNil) - node.Set("private-address", "roehampton") - _, err = node.Write() - c.Assert(err, jc.ErrorIsNil) - s.assertNoHook(c) - - // Stop hooks again to verify safety. - err = r.StopHooks() - c.Assert(err, jc.ErrorIsNil) - s.assertNoHook(c) - - // Start them again, and check we get the expected events sent. - r.StartHooks() - defer stopHooks(c, r) - s.assertHook(c, hook.Info{ - Kind: hooks.RelationDeparted, - RemoteUnit: "u/1", - }) - s.assertHook(c, hook.Info{ - Kind: hooks.RelationJoined, - ChangeVersion: 1, - RemoteUnit: "u/2", - }) - s.assertHook(c, hook.Info{ - Kind: hooks.RelationChanged, - ChangeVersion: 1, - RemoteUnit: "u/2", - }) - s.assertNoHook(c) - - // Stop them again, just to be sure. - err = r.StopHooks() - c.Assert(err, jc.ErrorIsNil) - s.assertNoHook(c) -} - -func (s *RelationerSuite) TestPrepareCommitHooks(c *gc.C) { - r := uniter.NewRelationer(s.apiRelUnit, s.dir, s.hooks) - err := r.Join() - c.Assert(err, jc.ErrorIsNil) - - assertMembers := func(expect map[string]int64) { - c.Assert(s.dir.State().Members, jc.DeepEquals, expect) - expectNames := make([]string, 0, len(expect)) - for name := range expect { - expectNames = append(expectNames, name) - } - c.Assert(r.ContextInfo().MemberNames, jc.SameContents, expectNames) - } - assertMembers(map[string]int64{}) - - // Check preparing an invalid hook changes nothing. - changed := hook.Info{ - Kind: hooks.RelationChanged, - RemoteUnit: "u/1", - ChangeVersion: 7, - } - _, err = r.PrepareHook(changed) - c.Assert(err, gc.ErrorMatches, `inappropriate "relation-changed" for "u/1": unit has not joined`) - assertMembers(map[string]int64{}) - - // Check preparing a valid hook updates neither the context nor persistent - // relation state. - joined := hook.Info{ - Kind: hooks.RelationJoined, - RemoteUnit: "u/1", - } - name, err := r.PrepareHook(joined) - c.Assert(err, jc.ErrorIsNil) - c.Assert(name, gc.Equals, "ring-relation-joined") - assertMembers(map[string]int64{}) - - // Check that preparing the following hook fails as before... - _, err = r.PrepareHook(changed) - c.Assert(err, gc.ErrorMatches, `inappropriate "relation-changed" for "u/1": unit has not joined`) - assertMembers(map[string]int64{}) - - // ...but that committing the previous hook updates the persistent - // relation state... - err = r.CommitHook(joined) - c.Assert(err, jc.ErrorIsNil) - assertMembers(map[string]int64{"u/1": 0}) - - // ...and allows us to prepare the next hook... - name, err = r.PrepareHook(changed) - c.Assert(err, jc.ErrorIsNil) - c.Assert(name, gc.Equals, "ring-relation-changed") - assertMembers(map[string]int64{"u/1": 0}) - - // ...and commit it. - err = r.CommitHook(changed) - c.Assert(err, jc.ErrorIsNil) - assertMembers(map[string]int64{"u/1": 7}) - - // To verify implied behaviour above, prepare a new joined hook with - // missing membership information, and check relation context - // membership is stil not updated... - joined.RemoteUnit = "u/2" - joined.ChangeVersion = 3 - name, err = r.PrepareHook(joined) - c.Assert(err, jc.ErrorIsNil) - c.Assert(name, gc.Equals, "ring-relation-joined") - assertMembers(map[string]int64{"u/1": 7}) - - // ...until commit, at which point so is relation state. - err = r.CommitHook(joined) - c.Assert(err, jc.ErrorIsNil) - assertMembers(map[string]int64{"u/1": 7, "u/2": 3}) -} - -func (s *RelationerSuite) TestSetDying(c *gc.C) { - ru1, _ := s.AddRelationUnit(c, "u/1") - settings := map[string]interface{}{"unit": "settings"} - err := ru1.EnterScope(settings) - c.Assert(err, jc.ErrorIsNil) - r := uniter.NewRelationer(s.apiRelUnit, s.dir, s.hooks) - err = r.Join() - c.Assert(err, jc.ErrorIsNil) - r.StartHooks() - defer stopHooks(c, r) - s.assertHook(c, hook.Info{ - Kind: hooks.RelationJoined, - RemoteUnit: "u/1", - }) - - // While a changed hook is still pending, the relation (or possibly the unit, - // pending lifecycle work), changes Life to Dying, and the relationer is - // informed. - err = r.SetDying() - c.Assert(err, jc.ErrorIsNil) - - // Check that we cannot rejoin the relation. - f := func() { r.Join() } - c.Assert(f, gc.PanicMatches, "dying relationer must not join!") - - // ...but the hook stream continues, sending the required changed hook for - // u/1 before moving on to a departed, despite the fact that its pinger is - // still running, and closing with a broken. - s.assertHook(c, hook.Info{Kind: hooks.RelationChanged, RemoteUnit: "u/1"}) - s.assertHook(c, hook.Info{Kind: hooks.RelationDeparted, RemoteUnit: "u/1"}) - s.assertHook(c, hook.Info{Kind: hooks.RelationBroken}) - - // Check that the relation state has been broken. - err = s.dir.State().Validate(hook.Info{Kind: hooks.RelationBroken}) - c.Assert(err, gc.ErrorMatches, ".*: relation is broken and cannot be changed further") -} - -func (s *RelationerSuite) assertNoHook(c *gc.C) { - s.BackingState.StartSync() - select { - case hi, ok := <-s.hooks: - c.Fatalf("got unexpected hook info %#v (%t)", hi, ok) - case <-time.After(coretesting.ShortWait): - } -} - -func (s *RelationerSuite) assertHook(c *gc.C, expect hook.Info) { - s.BackingState.StartSync() - // We must ensure the local state dir exists first. - c.Assert(s.dir.Ensure(), gc.IsNil) - select { - case hi, ok := <-s.hooks: - c.Assert(ok, jc.IsTrue) - expect.ChangeVersion = hi.ChangeVersion - c.Assert(hi, gc.DeepEquals, expect) - c.Assert(s.dir.Write(hi), gc.Equals, nil) - case <-time.After(coretesting.LongWait): - c.Fatalf("timed out waiting for %#v", expect) - } -} - -type stopper interface { - Stop() error -} - -func stop(c *gc.C, s stopper) { - c.Assert(s.Stop(), gc.IsNil) -} - -func stopHooks(c *gc.C, r *uniter.Relationer) { - c.Assert(r.StopHooks(), gc.IsNil) -} - -type RelationerImplicitSuite struct { - jujutesting.JujuConnSuite -} - -var _ = gc.Suite(&RelationerImplicitSuite{}) - -func (s *RelationerImplicitSuite) TestImplicitRelationer(c *gc.C) { - // Create a relationer for an implicit endpoint (mysql:juju-info). - mysql := s.AddTestingService(c, "mysql", s.AddTestingCharm(c, "mysql")) - u, err := mysql.AddUnit() - c.Assert(err, jc.ErrorIsNil) - machine, err := s.State.AddMachine("quantal", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) - err = u.AssignToMachine(machine) - c.Assert(err, jc.ErrorIsNil) - err = machine.SetProviderAddresses(network.NewScopedAddress("blah", network.ScopeCloudLocal)) - c.Assert(err, jc.ErrorIsNil) - s.AddTestingService(c, "logging", s.AddTestingCharm(c, "logging")) - eps, err := s.State.InferEndpoints("logging", "mysql") - c.Assert(err, jc.ErrorIsNil) - rel, err := s.State.AddRelation(eps...) - c.Assert(err, jc.ErrorIsNil) - relsDir := c.MkDir() - dir, err := relation.ReadStateDir(relsDir, rel.Id()) - c.Assert(err, jc.ErrorIsNil) - hooks := make(chan hook.Info) - - password, err := utils.RandomPassword() - c.Assert(err, jc.ErrorIsNil) - err = u.SetPassword(password) - c.Assert(err, jc.ErrorIsNil) - st := s.OpenAPIAs(c, u.Tag(), password) - uniterState, err := st.Uniter() - c.Assert(err, jc.ErrorIsNil) - c.Assert(uniterState, gc.NotNil) - - apiUnit, err := uniterState.Unit(u.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - apiRel, err := uniterState.Relation(rel.Tag().(names.RelationTag)) - c.Assert(err, jc.ErrorIsNil) - apiRelUnit, err := apiRel.Unit(apiUnit) - c.Assert(err, jc.ErrorIsNil) - - r := uniter.NewRelationer(apiRelUnit, dir, hooks) - c.Assert(r, jc.Satisfies, (*uniter.Relationer).IsImplicit) - - // Join the relation. - err = r.Join() - c.Assert(err, jc.ErrorIsNil) - sub, err := s.State.Unit("logging/0") - c.Assert(err, jc.ErrorIsNil) - - // Join the other side; check no hooks are sent. - r.StartHooks() - defer func() { c.Assert(r.StopHooks(), gc.IsNil) }() - subru, err := rel.Unit(sub) - c.Assert(err, jc.ErrorIsNil) - err = subru.EnterScope(map[string]interface{}{"some": "data"}) - c.Assert(err, jc.ErrorIsNil) - s.State.StartSync() - select { - case <-time.After(coretesting.ShortWait): - case <-hooks: - c.Fatalf("unexpected hook generated") - } - - // Set it to Dying; check that the dir is removed immediately. - err = r.SetDying() - c.Assert(err, jc.ErrorIsNil) - path := strconv.Itoa(rel.Id()) - ft.Removed{path}.Check(c, relsDir) - - // Check that it left scope, by leaving scope on the other side and destroying - // the relation. - err = subru.LeaveScope() - c.Assert(err, jc.ErrorIsNil) - err = rel.Destroy() - c.Assert(err, jc.ErrorIsNil) - err = rel.Refresh() - c.Assert(err, jc.Satisfies, errors.IsNotFound) - - // Verify that no other hooks were sent at any stage. - select { - case <-hooks: - c.Fatalf("unexpected hook generated") - default: - } -} === removed file 'src/github.com/juju/juju/worker/uniter/relations.go' --- src/github.com/juju/juju/worker/uniter/relations.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/relations.go 1970-01-01 00:00:00 +0000 @@ -1,349 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package uniter - -import ( - "github.com/juju/errors" - "github.com/juju/names" - corecharm "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/hooks" - "launchpad.net/tomb" - - "github.com/juju/juju/api/uniter" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/state/watcher" - "github.com/juju/juju/worker/uniter/hook" - "github.com/juju/juju/worker/uniter/relation" - "github.com/juju/juju/worker/uniter/runner" -) - -// Relations exists to encapsulate relation state and operations behind an -// interface for the benefit of future refactoring. -type Relations interface { - - // Name returns the name of the relation with the supplied id, or an error - // if the relation is unknown. - Name(id int) (string, error) - - // Hooks returns the channel on which relation hook execution requests - // are sent. - Hooks() <-chan hook.Info - - // StartHooks starts sending hook execution requests on the Hooks channel. - StartHooks() - - // StopHooks stops sending hook execution requests on the Hooks channel. - StopHooks() error - - // PrepareHook returns the name of the supplied relation hook, or an error - // if the hook is unknown or invalid given current state. - PrepareHook(hookInfo hook.Info) (string, error) - - // CommitHook persists the state change encoded in the supplied relation - // hook, or returns an error if the hook is unknown or invalid given - // current relation state. - CommitHook(hookInfo hook.Info) error - - // GetInfo returns information about current relation state. - GetInfo() map[int]*runner.RelationInfo - - // Update checks for and responds to changes in the life states of the - // relations with the supplied ids. If any id corresponds to an alive - // relation that is not already recorded, the unit will enter scope for - // that relation and start its hook queue. - Update(ids []int) error - - // SetDying notifies all known relations that the only hooks to be requested - // should be those necessary to cleanly exit the relation. - SetDying() error -} - -// relations implements Relations. -type relations struct { - st *uniter.State - unit *uniter.Unit - charmDir string - relationsDir string - relationers map[int]*Relationer - relationHooks chan hook.Info - abort <-chan struct{} -} - -func newRelations(st *uniter.State, tag names.UnitTag, paths Paths, abort <-chan struct{}) (*relations, error) { - unit, err := st.Unit(tag) - if err != nil { - return nil, errors.Trace(err) - } - r := &relations{ - st: st, - unit: unit, - charmDir: paths.State.CharmDir, - relationsDir: paths.State.RelationsDir, - relationers: make(map[int]*Relationer), - relationHooks: make(chan hook.Info), - abort: abort, - } - if err := r.init(); err != nil { - return nil, errors.Trace(err) - } - return r, nil -} - -// init reconciles the local relation state dirs with the remote state of -// the corresponding relations. It's only expected to be called while a -// *relations is being created. -func (r *relations) init() error { - joinedRelationTags, err := r.unit.JoinedRelations() - if err != nil { - return errors.Trace(err) - } - joinedRelations := make(map[int]*uniter.Relation) - for _, tag := range joinedRelationTags { - relation, err := r.st.Relation(tag) - if err != nil { - return errors.Trace(err) - } - joinedRelations[relation.Id()] = relation - } - knownDirs, err := relation.ReadAllStateDirs(r.relationsDir) - if err != nil { - return errors.Trace(err) - } - for id, dir := range knownDirs { - if rel, ok := joinedRelations[id]; ok { - if err := r.add(rel, dir); err != nil { - return errors.Trace(err) - } - } else if err := dir.Remove(); err != nil { - return errors.Trace(err) - } - } - for id, rel := range joinedRelations { - if _, ok := knownDirs[id]; ok { - continue - } - dir, err := relation.ReadStateDir(r.relationsDir, id) - if err != nil { - return errors.Trace(err) - } - if err := r.add(rel, dir); err != nil { - return errors.Trace(err) - } - } - return nil -} - -// Name is part of the Relations interface. -func (r *relations) Name(id int) (string, error) { - relationer, found := r.relationers[id] - if !found { - return "", errors.Errorf("unknown relation: %d", id) - } - return relationer.ru.Endpoint().Name, nil -} - -// Hooks is part of the Relations interface. -func (r *relations) Hooks() <-chan hook.Info { - return r.relationHooks -} - -// StartHooks is part of the Relations interface. -func (r *relations) StartHooks() { - for _, relationer := range r.relationers { - relationer.StartHooks() - } -} - -// StopHooks is part of the Relations interface. -func (r *relations) StopHooks() (err error) { - for _, relationer := range r.relationers { - if e := relationer.StopHooks(); e != nil { - if err == nil { - err = e - } else { - logger.Errorf("additional error while stopping hooks: %v", e) - } - } - } - return err -} - -// PrepareHook is part of the Relations interface. -func (r *relations) PrepareHook(hookInfo hook.Info) (string, error) { - if !hookInfo.Kind.IsRelation() { - return "", errors.Errorf("not a relation hook: %#v", hookInfo) - } - relationer, found := r.relationers[hookInfo.RelationId] - if !found { - return "", errors.Errorf("unknown relation: %d", hookInfo.RelationId) - } - return relationer.PrepareHook(hookInfo) -} - -// CommitHook is part of the Relations interface. -func (r *relations) CommitHook(hookInfo hook.Info) error { - if !hookInfo.Kind.IsRelation() { - return errors.Errorf("not a relation hook: %#v", hookInfo) - } - relationer, found := r.relationers[hookInfo.RelationId] - if !found { - return errors.Errorf("unknown relation: %d", hookInfo.RelationId) - } - if hookInfo.Kind == hooks.RelationBroken { - delete(r.relationers, hookInfo.RelationId) - } - return relationer.CommitHook(hookInfo) -} - -// GetInfo is part of the Relations interface. -func (r *relations) GetInfo() map[int]*runner.RelationInfo { - relationInfos := map[int]*runner.RelationInfo{} - for id, relationer := range r.relationers { - relationInfos[id] = relationer.ContextInfo() - } - return relationInfos -} - -// Update is part of the Relations interface. -func (r *relations) Update(ids []int) error { - for _, id := range ids { - if relationer, found := r.relationers[id]; found { - rel := relationer.ru.Relation() - if err := rel.Refresh(); err != nil { - return errors.Annotatef(err, "cannot update relation %q", rel) - } - if rel.Life() == params.Dying { - if err := r.setDying(id); err != nil { - return errors.Trace(err) - } - } - continue - } - // Relations that are not alive are simply skipped, because they - // were not previously known anyway. - rel, err := r.st.RelationById(id) - if err != nil { - if params.IsCodeNotFoundOrCodeUnauthorized(err) { - continue - } - return errors.Trace(err) - } - if rel.Life() != params.Alive { - continue - } - // Make sure we ignore relations not implemented by the unit's charm. - ch, err := corecharm.ReadCharmDir(r.charmDir) - if err != nil { - return errors.Trace(err) - } - if ep, err := rel.Endpoint(); err != nil { - return errors.Trace(err) - } else if !ep.ImplementedBy(ch) { - logger.Warningf("skipping relation with unknown endpoint %q", ep.Name) - continue - } - dir, err := relation.ReadStateDir(r.relationsDir, id) - if err != nil { - return errors.Trace(err) - } - err = r.add(rel, dir) - if err == nil { - r.relationers[id].StartHooks() - continue - } - e := dir.Remove() - if !params.IsCodeCannotEnterScope(err) { - return errors.Trace(err) - } - if e != nil { - return errors.Trace(e) - } - } - if ok, err := r.unit.IsPrincipal(); err != nil { - return errors.Trace(err) - } else if ok { - return nil - } - // If no Alive relations remain between a subordinate unit's service - // and its principal's service, the subordinate must become Dying. - for _, relationer := range r.relationers { - scope := relationer.ru.Endpoint().Scope - if scope == corecharm.ScopeContainer && !relationer.dying { - return nil - } - } - return r.unit.Destroy() -} - -// SetDying is part of the Relations interface. -// should be those necessary to cleanly exit the relation. -func (r *relations) SetDying() error { - for id := range r.relationers { - if err := r.setDying(id); err != nil { - return err - } - } - return nil -} - -// add causes the unit agent to join the supplied relation, and to -// store persistent state in the supplied dir. -func (r *relations) add(rel *uniter.Relation, dir *relation.StateDir) (err error) { - logger.Infof("joining relation %q", rel) - ru, err := rel.Unit(r.unit) - if err != nil { - return errors.Trace(err) - } - relationer := NewRelationer(ru, dir, r.relationHooks) - w, err := r.unit.Watch() - if err != nil { - return errors.Trace(err) - } - defer func() { - if e := w.Stop(); e != nil { - if err == nil { - err = e - } else { - logger.Errorf("error stopping unit watcher: %v", e) - } - } - }() - for { - select { - case <-r.abort: - return tomb.ErrDying - case _, ok := <-w.Changes(): - if !ok { - return watcher.EnsureErr(w) - } - err := relationer.Join() - if params.IsCodeCannotEnterScopeYet(err) { - logger.Infof("cannot enter scope for relation %q; waiting for subordinate to be removed", rel) - continue - } else if err != nil { - return errors.Trace(err) - } - logger.Infof("joined relation %q", rel) - r.relationers[rel.Id()] = relationer - return nil - } - } -} - -// setDying notifies the relationer identified by the supplied id that the -// only hook executions to be requested should be those necessary to cleanly -// exit the relation. -func (r *relations) setDying(id int) error { - relationer, found := r.relationers[id] - if !found { - return nil - } - if err := relationer.SetDying(); err != nil { - return errors.Trace(err) - } - if relationer.IsImplicit() { - delete(r.relationers, id) - } - return nil -} === added directory 'src/github.com/juju/juju/worker/uniter/remotestate' === added file 'src/github.com/juju/juju/worker/uniter/remotestate/interface.go' --- src/github.com/juju/juju/worker/uniter/remotestate/interface.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/remotestate/interface.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package remotestate + +import ( + "github.com/juju/juju/worker" +) + +type Watcher interface { + // RemoteStateChanged returns a channel which is signalled + // whenever the remote state is changed. + RemoteStateChanged() <-chan struct{} + + // Snapshot returns the current snapshot of the remote state. + Snapshot() Snapshot + + worker.Worker +} === added file 'src/github.com/juju/juju/worker/uniter/remotestate/mock_test.go' --- src/github.com/juju/juju/worker/uniter/remotestate/mock_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/remotestate/mock_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,322 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package remotestate_test + +import ( + "sync" + + "github.com/juju/names" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/core/leadership" + "github.com/juju/juju/watcher" + "github.com/juju/juju/worker/uniter/remotestate" +) + +func newMockWatcher() *mockWatcher { + return &mockWatcher{ + stopped: make(chan struct{}), + } +} + +type mockWatcher struct { + mu sync.Mutex + stopped chan struct{} +} + +func (w *mockWatcher) Kill() { + w.mu.Lock() + defer w.mu.Unlock() + if !w.Stopped() { + close(w.stopped) + } +} + +func (w *mockWatcher) Wait() error { + <-w.stopped + return nil +} + +func (w *mockWatcher) Stopped() bool { + select { + case <-w.stopped: + return true + default: + return false + } +} + +func newMockNotifyWatcher() *mockNotifyWatcher { + return &mockNotifyWatcher{ + mockWatcher: newMockWatcher(), + changes: make(chan struct{}, 1), + } +} + +type mockNotifyWatcher struct { + *mockWatcher + changes chan struct{} +} + +func (w *mockNotifyWatcher) Changes() watcher.NotifyChannel { + return w.changes +} + +func newMockStringsWatcher() *mockStringsWatcher { + return &mockStringsWatcher{ + mockWatcher: newMockWatcher(), + changes: make(chan []string, 1), + } +} + +type mockStringsWatcher struct { + *mockWatcher + changes chan []string +} + +func (w *mockStringsWatcher) Changes() watcher.StringsChannel { + return w.changes +} + +func newMockRelationUnitsWatcher() *mockRelationUnitsWatcher { + return &mockRelationUnitsWatcher{ + mockWatcher: newMockWatcher(), + changes: make(chan watcher.RelationUnitsChange, 1), + } +} + +type mockRelationUnitsWatcher struct { + *mockWatcher + changes chan watcher.RelationUnitsChange +} + +func (w *mockRelationUnitsWatcher) Changes() watcher.RelationUnitsChannel { + return w.changes +} + +type mockState struct { + unit mockUnit + relations map[names.RelationTag]*mockRelation + storageAttachment map[params.StorageAttachmentId]params.StorageAttachment + relationUnitsWatchers map[names.RelationTag]*mockRelationUnitsWatcher + storageAttachmentWatchers map[names.StorageTag]*mockNotifyWatcher +} + +func (st *mockState) Relation(tag names.RelationTag) (remotestate.Relation, error) { + r, ok := st.relations[tag] + if !ok { + return nil, ¶ms.Error{Code: params.CodeNotFound} + } + return r, nil +} + +func (st *mockState) StorageAttachment( + storageTag names.StorageTag, unitTag names.UnitTag, +) (params.StorageAttachment, error) { + if unitTag != st.unit.tag { + return params.StorageAttachment{}, ¶ms.Error{Code: params.CodeNotFound} + } + attachment, ok := st.storageAttachment[params.StorageAttachmentId{ + UnitTag: unitTag.String(), + StorageTag: storageTag.String(), + }] + if !ok { + return params.StorageAttachment{}, ¶ms.Error{Code: params.CodeNotFound} + } + if attachment.Kind == params.StorageKindUnknown { + return params.StorageAttachment{}, ¶ms.Error{Code: params.CodeNotProvisioned} + } + return attachment, nil +} + +func (st *mockState) StorageAttachmentLife( + ids []params.StorageAttachmentId, +) ([]params.LifeResult, error) { + results := make([]params.LifeResult, len(ids)) + for i, id := range ids { + attachment, ok := st.storageAttachment[id] + if !ok { + results[i] = params.LifeResult{ + Error: ¶ms.Error{Code: params.CodeNotFound}, + } + continue + } + results[i] = params.LifeResult{Life: attachment.Life} + } + return results, nil +} + +func (st *mockState) Unit(tag names.UnitTag) (remotestate.Unit, error) { + if tag != st.unit.tag { + return nil, ¶ms.Error{Code: params.CodeNotFound} + } + return &st.unit, nil +} + +func (st *mockState) WatchRelationUnits( + relationTag names.RelationTag, unitTag names.UnitTag, +) (watcher.RelationUnitsWatcher, error) { + if unitTag != st.unit.tag { + return nil, ¶ms.Error{Code: params.CodeNotFound} + } + watcher, ok := st.relationUnitsWatchers[relationTag] + if !ok { + return nil, ¶ms.Error{Code: params.CodeNotFound} + } + return watcher, nil +} + +func (st *mockState) WatchStorageAttachment( + storageTag names.StorageTag, unitTag names.UnitTag, +) (watcher.NotifyWatcher, error) { + if unitTag != st.unit.tag { + return nil, ¶ms.Error{Code: params.CodeNotFound} + } + watcher, ok := st.storageAttachmentWatchers[storageTag] + if !ok { + return nil, ¶ms.Error{Code: params.CodeNotFound} + } + return watcher, nil +} + +type mockUnit struct { + tag names.UnitTag + life params.Life + resolved params.ResolvedMode + service mockService + unitWatcher *mockNotifyWatcher + addressesWatcher *mockNotifyWatcher + configSettingsWatcher *mockNotifyWatcher + storageWatcher *mockStringsWatcher + actionWatcher *mockStringsWatcher +} + +func (u *mockUnit) Life() params.Life { + return u.life +} + +func (u *mockUnit) Refresh() error { + return nil +} + +func (u *mockUnit) Resolved() (params.ResolvedMode, error) { + return u.resolved, nil +} + +func (u *mockUnit) Service() (remotestate.Service, error) { + return &u.service, nil +} + +func (u *mockUnit) Tag() names.UnitTag { + return u.tag +} + +func (u *mockUnit) Watch() (watcher.NotifyWatcher, error) { + return u.unitWatcher, nil +} + +func (u *mockUnit) WatchAddresses() (watcher.NotifyWatcher, error) { + return u.addressesWatcher, nil +} + +func (u *mockUnit) WatchConfigSettings() (watcher.NotifyWatcher, error) { + return u.configSettingsWatcher, nil +} + +func (u *mockUnit) WatchStorage() (watcher.StringsWatcher, error) { + return u.storageWatcher, nil +} + +func (u *mockUnit) WatchActionNotifications() (watcher.StringsWatcher, error) { + return u.actionWatcher, nil +} + +type mockService struct { + tag names.ServiceTag + life params.Life + curl *charm.URL + charmModifiedVersion int + forceUpgrade bool + serviceWatcher *mockNotifyWatcher + leaderSettingsWatcher *mockNotifyWatcher + relationsWatcher *mockStringsWatcher +} + +func (s *mockService) CharmModifiedVersion() (int, error) { + return s.charmModifiedVersion, nil +} + +func (s *mockService) CharmURL() (*charm.URL, bool, error) { + return s.curl, s.forceUpgrade, nil +} + +func (s *mockService) Life() params.Life { + return s.life +} + +func (s *mockService) Refresh() error { + return nil +} + +func (s *mockService) Tag() names.ServiceTag { + return s.tag +} + +func (s *mockService) Watch() (watcher.NotifyWatcher, error) { + return s.serviceWatcher, nil +} + +func (s *mockService) WatchLeadershipSettings() (watcher.NotifyWatcher, error) { + return s.leaderSettingsWatcher, nil +} + +func (s *mockService) WatchRelations() (watcher.StringsWatcher, error) { + return s.relationsWatcher, nil +} + +type mockRelation struct { + id int + life params.Life +} + +func (r *mockRelation) Id() int { + return r.id +} + +func (r *mockRelation) Life() params.Life { + return r.life +} + +type mockLeadershipTracker struct { + leadership.Tracker + claimTicket mockTicket + leaderTicket mockTicket + minionTicket mockTicket +} + +func (mock *mockLeadershipTracker) ClaimLeader() leadership.Ticket { + return &mock.claimTicket +} + +func (mock *mockLeadershipTracker) WaitLeader() leadership.Ticket { + return &mock.leaderTicket +} + +func (mock *mockLeadershipTracker) WaitMinion() leadership.Ticket { + return &mock.minionTicket +} + +type mockTicket struct { + ch chan struct{} + result bool +} + +func (t *mockTicket) Ready() <-chan struct{} { + return t.ch +} + +func (t *mockTicket) Wait() bool { + return t.result +} === added file 'src/github.com/juju/juju/worker/uniter/remotestate/package_test.go' --- src/github.com/juju/juju/worker/uniter/remotestate/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/remotestate/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package remotestate_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/worker/uniter/remotestate/relationunits.go' --- src/github.com/juju/juju/worker/uniter/remotestate/relationunits.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/remotestate/relationunits.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,79 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package remotestate + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/watcher" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/catacomb" +) + +type relationUnitsWatcher struct { + catacomb catacomb.Catacomb + relationId int + changes watcher.RelationUnitsChannel + out chan<- relationUnitsChange +} + +type relationUnitsChange struct { + relationId int + watcher.RelationUnitsChange +} + +// newRelationUnitsWatcher creates a new worker that takes values from the +// supplied watcher's Changes chan, annotates them with the supplied relation +// id, and delivers then on the supplied out chan. +// +// The caller releases responsibility for stopping the supplied watcher and +// waiting for errors, *whether or not this method succeeds*. +func newRelationUnitsWatcher( + relationId int, + watcher watcher.RelationUnitsWatcher, + out chan<- relationUnitsChange, +) (*relationUnitsWatcher, error) { + ruw := &relationUnitsWatcher{ + relationId: relationId, + changes: watcher.Changes(), + out: out, + } + err := catacomb.Invoke(catacomb.Plan{ + Site: &ruw.catacomb, + Work: ruw.loop, + Init: []worker.Worker{watcher}, + }) + if err != nil { + return nil, errors.Trace(err) + } + return ruw, nil +} + +// Kill is part of the worker.Worker interface. +func (w *relationUnitsWatcher) Kill() { + w.catacomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (w *relationUnitsWatcher) Wait() error { + return w.catacomb.Wait() +} + +func (w *relationUnitsWatcher) loop() error { + for { + select { + case <-w.catacomb.Dying(): + return w.catacomb.ErrDying() + case change, ok := <-w.changes: + if !ok { + return errors.New("watcher closed channel") + } + select { + case <-w.catacomb.Dying(): + return w.catacomb.ErrDying() + case w.out <- relationUnitsChange{w.relationId, change}: + } + } + } +} === added file 'src/github.com/juju/juju/worker/uniter/remotestate/snapshot.go' --- src/github.com/juju/juju/worker/uniter/remotestate/snapshot.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/remotestate/snapshot.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,85 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package remotestate + +import ( + "github.com/juju/names" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/apiserver/params" +) + +// Snapshot is a snapshot of the remote state of the unit. +type Snapshot struct { + // Life is the lifecycle state of the unit. + Life params.Life + + // Relations contains the lifecycle states of + // each of the service's relations, keyed by + // relation IDs. + Relations map[int]RelationSnapshot + + // Storage contains the lifecycle and attached + // states of each of the unit's storage attachments. + Storage map[names.StorageTag]StorageSnapshot + + // CharmModifiedVersion is increased whenever the service's charm was + // changed in some way. + CharmModifiedVersion int + + // CharmURL is the charm URL that the unit is + // expected to run. + CharmURL *charm.URL + + // ForceCharmUpgrade reports whether the unit + // should upgrade even in an error state. + ForceCharmUpgrade bool + + // ResolvedMode reports the method of resolving + // hook execution errors. + ResolvedMode params.ResolvedMode + + // RetryHookVersion increments each time a failed + // hook is meant to be retried if ResolvedMode is + // set to ResolvedNone. + RetryHookVersion int + + // ConfigVersion is the last published version of + // the unit's config settings. + ConfigVersion int + + // Leader indicates whether or not the unit is the + // elected leader. + Leader bool + + // LeaderSettingsVersion is the last published + // version of the leader settings for the service. + LeaderSettingsVersion int + + // UpdateStatusVersion increments each time an + // update-status hook is supposed to run. + UpdateStatusVersion int + + // Actions is the list of pending actions to + // be peformed by this unit. + Actions []string + + // Commands is the list of IDs of commands to be + // executed by this unit. + Commands []string +} + +type RelationSnapshot struct { + Life params.Life + Members map[string]int64 +} + +// StorageSnapshot has information relating to a storage +// instance belonging to a unit. +type StorageSnapshot struct { + Kind params.StorageKind + Life params.Life + Attached bool + Location string +} === added file 'src/github.com/juju/juju/worker/uniter/remotestate/state.go' --- src/github.com/juju/juju/worker/uniter/remotestate/state.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/remotestate/state.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,97 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package remotestate + +import ( + "github.com/juju/names" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/api/uniter" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" +) + +type State interface { + Relation(names.RelationTag) (Relation, error) + StorageAttachment(names.StorageTag, names.UnitTag) (params.StorageAttachment, error) + StorageAttachmentLife([]params.StorageAttachmentId) ([]params.LifeResult, error) + Unit(names.UnitTag) (Unit, error) + WatchRelationUnits(names.RelationTag, names.UnitTag) (watcher.RelationUnitsWatcher, error) + WatchStorageAttachment(names.StorageTag, names.UnitTag) (watcher.NotifyWatcher, error) +} + +type Unit interface { + Life() params.Life + Refresh() error + Resolved() (params.ResolvedMode, error) + Service() (Service, error) + Tag() names.UnitTag + Watch() (watcher.NotifyWatcher, error) + WatchAddresses() (watcher.NotifyWatcher, error) + WatchConfigSettings() (watcher.NotifyWatcher, error) + WatchStorage() (watcher.StringsWatcher, error) + WatchActionNotifications() (watcher.StringsWatcher, error) +} + +type Service interface { + // CharmModifiedVersion returns a revision number for the charm that + // increments whenever the charm or a resource for the charm changes. + CharmModifiedVersion() (int, error) + // CharmURL returns the url for the charm for this service. + CharmURL() (*charm.URL, bool, error) + // Life returns whether the service is alive. + Life() params.Life + // Refresh syncs this value with the api server. + Refresh() error + // Tag returns the tag for this service. + Tag() names.ServiceTag + // Watch returns a watcher that fires when this service changes. + Watch() (watcher.NotifyWatcher, error) + // WatchLeadershipSettings returns a watcher that fires when the leadership + // settings for this service change. + WatchLeadershipSettings() (watcher.NotifyWatcher, error) + // WatchRelation returns a watcher that fires when the relations on this + // service change. + WatchRelations() (watcher.StringsWatcher, error) +} + +type Relation interface { + Id() int + Life() params.Life +} + +func NewAPIState(st *uniter.State) State { + return apiState{st} +} + +type apiState struct { + *uniter.State +} + +type apiUnit struct { + *uniter.Unit +} + +type apiService struct { + *uniter.Service +} + +type apiRelation struct { + *uniter.Relation +} + +func (st apiState) Relation(tag names.RelationTag) (Relation, error) { + r, err := st.State.Relation(tag) + return apiRelation{r}, err +} + +func (st apiState) Unit(tag names.UnitTag) (Unit, error) { + u, err := st.State.Unit(tag) + return apiUnit{u}, err +} + +func (u apiUnit) Service() (Service, error) { + s, err := u.Unit.Service() + return apiService{s}, err +} === added file 'src/github.com/juju/juju/worker/uniter/remotestate/storagewatcher.go' --- src/github.com/juju/juju/worker/uniter/remotestate/storagewatcher.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/remotestate/storagewatcher.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,133 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package remotestate + +import ( + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/watcher" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/catacomb" +) + +type StorageAccessor interface { + // StorageAttachment returns the storage attachment with the specified + // unit and storage tags. + StorageAttachment(names.StorageTag, names.UnitTag) (params.StorageAttachment, error) +} + +// newStorageAttachmentsWatcher creates a new worker that wakes on input from +// the supplied watcher's Changes chan, finds out more about them, and delivers +// them on the supplied out chan. +// +// The caller releases responsibility for stopping the supplied watcher and +// waiting for errors, *whether or not this method succeeds*. +func newStorageAttachmentWatcher( + st StorageAccessor, + watcher watcher.NotifyWatcher, + unitTag names.UnitTag, + storageTag names.StorageTag, + out chan<- storageAttachmentChange, +) (*storageAttachmentWatcher, error) { + s := &storageAttachmentWatcher{ + st: st, + changes: watcher.Changes(), + out: out, + storageTag: storageTag, + unitTag: unitTag, + } + err := catacomb.Invoke(catacomb.Plan{ + Site: &s.catacomb, + Work: s.loop, + Init: []worker.Worker{watcher}, + }) + if err != nil { + return nil, errors.Trace(err) + } + return s, nil +} + +// storageAttachmentWatcher watches for changes to the attachment status of +// the storage with the specified tag and sends the tag to the specified channel +// when a change occurs. +type storageAttachmentWatcher struct { + catacomb catacomb.Catacomb + + st StorageAccessor + changes watcher.NotifyChannel + storageTag names.StorageTag + unitTag names.UnitTag + out chan<- storageAttachmentChange +} + +type storageAttachmentChange struct { + Tag names.StorageTag + Snapshot StorageSnapshot +} + +func getStorageSnapshot( + st StorageAccessor, + storageTag names.StorageTag, + unitTag names.UnitTag, +) (StorageSnapshot, error) { + attachment, err := st.StorageAttachment(storageTag, unitTag) + if err != nil { + return StorageSnapshot{}, errors.Annotate(err, "refreshing storage details") + } + snapshot := StorageSnapshot{ + Life: attachment.Life, + Kind: attachment.Kind, + Attached: true, + Location: attachment.Location, + } + return snapshot, nil +} + +func (s *storageAttachmentWatcher) loop() error { + for { + select { + case <-s.catacomb.Dying(): + return s.catacomb.ErrDying() + case _, ok := <-s.changes: + if !ok { + return errors.New("storage attachment watcher closed") + } + snapshot, err := getStorageSnapshot( + s.st, s.storageTag, s.unitTag, + ) + if params.IsCodeNotFound(err) { + // The storage attachment was removed + // from state, so we can stop watching. + return nil + } else if params.IsCodeNotProvisioned(err) { + // We do not care about unattached + // storage here. + continue + } else if err != nil { + return err + } + change := storageAttachmentChange{ + s.storageTag, + snapshot, + } + select { + case <-s.catacomb.Dying(): + return s.catacomb.ErrDying() + case s.out <- change: + } + } + } +} + +// Kill is part of the worker.Worker interface. +func (s *storageAttachmentWatcher) Kill() { + s.catacomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (s *storageAttachmentWatcher) Wait() error { + return s.catacomb.Wait() +} === added file 'src/github.com/juju/juju/worker/uniter/remotestate/utils_test.go' --- src/github.com/juju/juju/worker/uniter/remotestate/utils_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/remotestate/utils_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,29 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package remotestate_test + +import ( + "time" + + gc "gopkg.in/check.v1" + + "github.com/juju/juju/testing" +) + +func assertNoNotifyEvent(c *gc.C, ch <-chan struct{}, event string) { + select { + case <-ch: + c.Fatalf("unexpected " + event) + case <-time.After(testing.ShortWait): + } +} + +func assertNotifyEvent(c *gc.C, ch <-chan struct{}, activity string) { + select { + case <-ch: + case <-time.After(testing.LongWait): + c.Fatalf("timed out " + activity) + panic("unreachable") + } +} === added file 'src/github.com/juju/juju/worker/uniter/remotestate/watcher.go' --- src/github.com/juju/juju/worker/uniter/remotestate/watcher.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/remotestate/watcher.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,727 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package remotestate + +import ( + "sync" + "time" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/core/leadership" + "github.com/juju/juju/watcher" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/catacomb" +) + +var logger = loggo.GetLogger("juju.worker.uniter.remotestate") + +// RemoteStateWatcher collects unit, service, and service config information +// from separate state watchers, and updates a Snapshot which is sent on a +// channel upon change. +type RemoteStateWatcher struct { + st State + unit Unit + service Service + relations map[names.RelationTag]*relationUnitsWatcher + relationUnitsChanges chan relationUnitsChange + storageAttachmentWatchers map[names.StorageTag]*storageAttachmentWatcher + storageAttachmentChanges chan storageAttachmentChange + leadershipTracker leadership.Tracker + updateStatusChannel func() <-chan time.Time + commandChannel <-chan string + retryHookChannel <-chan struct{} + + catacomb catacomb.Catacomb + + out chan struct{} + mu sync.Mutex + current Snapshot +} + +// WatcherConfig holds configuration parameters for the +// remote state watcher. +type WatcherConfig struct { + State State + LeadershipTracker leadership.Tracker + UpdateStatusChannel func() <-chan time.Time + CommandChannel <-chan string + RetryHookChannel <-chan struct{} + UnitTag names.UnitTag +} + +// NewWatcher returns a RemoteStateWatcher that handles state changes pertaining to the +// supplied unit. +func NewWatcher(config WatcherConfig) (*RemoteStateWatcher, error) { + w := &RemoteStateWatcher{ + st: config.State, + relations: make(map[names.RelationTag]*relationUnitsWatcher), + relationUnitsChanges: make(chan relationUnitsChange), + storageAttachmentWatchers: make(map[names.StorageTag]*storageAttachmentWatcher), + storageAttachmentChanges: make(chan storageAttachmentChange), + leadershipTracker: config.LeadershipTracker, + updateStatusChannel: config.UpdateStatusChannel, + commandChannel: config.CommandChannel, + retryHookChannel: config.RetryHookChannel, + // Note: it is important that the out channel be buffered! + // The remote state watcher will perform a non-blocking send + // on the channel to wake up the observer. It is non-blocking + // so that we coalesce events while the observer is busy. + out: make(chan struct{}, 1), + current: Snapshot{ + Relations: make(map[int]RelationSnapshot), + Storage: make(map[names.StorageTag]StorageSnapshot), + }, + } + err := catacomb.Invoke(catacomb.Plan{ + Site: &w.catacomb, + Work: func() error { + return w.loop(config.UnitTag) + }, + }) + if err != nil { + return nil, errors.Trace(err) + } + return w, nil +} + +// Kill is part of the worker.Worker interface. +func (w *RemoteStateWatcher) Kill() { + w.catacomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (w *RemoteStateWatcher) Wait() error { + return w.catacomb.Wait() +} + +func (w *RemoteStateWatcher) RemoteStateChanged() <-chan struct{} { + return w.out +} + +func (w *RemoteStateWatcher) Snapshot() Snapshot { + w.mu.Lock() + defer w.mu.Unlock() + snapshot := w.current + snapshot.Relations = make(map[int]RelationSnapshot) + for id, relationSnapshot := range w.current.Relations { + snapshot.Relations[id] = relationSnapshot + } + snapshot.Storage = make(map[names.StorageTag]StorageSnapshot) + for tag, storageSnapshot := range w.current.Storage { + snapshot.Storage[tag] = storageSnapshot + } + snapshot.Actions = make([]string, len(w.current.Actions)) + copy(snapshot.Actions, w.current.Actions) + snapshot.Commands = make([]string, len(w.current.Commands)) + copy(snapshot.Commands, w.current.Commands) + return snapshot +} + +func (w *RemoteStateWatcher) ClearResolvedMode() { + w.mu.Lock() + w.current.ResolvedMode = params.ResolvedNone + w.mu.Unlock() +} + +func (w *RemoteStateWatcher) CommandCompleted(completed string) { + w.mu.Lock() + defer w.mu.Unlock() + for i, id := range w.current.Commands { + if id != completed { + continue + } + w.current.Commands = append( + w.current.Commands[:i], + w.current.Commands[i+1:]..., + ) + break + } +} + +func (w *RemoteStateWatcher) setUp(unitTag names.UnitTag) (err error) { + // TODO(dfc) named return value is a time bomb + // TODO(axw) move this logic. + defer func() { + cause := errors.Cause(err) + if params.IsCodeNotFoundOrCodeUnauthorized(cause) { + err = worker.ErrTerminateAgent + } + }() + if w.unit, err = w.st.Unit(unitTag); err != nil { + return errors.Trace(err) + } + w.service, err = w.unit.Service() + if err != nil { + return errors.Trace(err) + } + return nil +} + +func (w *RemoteStateWatcher) loop(unitTag names.UnitTag) (err error) { + if err := w.setUp(unitTag); err != nil { + return errors.Trace(err) + } + + var requiredEvents int + + var seenUnitChange bool + unitw, err := w.unit.Watch() + if err != nil { + return errors.Trace(err) + } + if err := w.catacomb.Add(unitw); err != nil { + return errors.Trace(err) + } + requiredEvents++ + + var seenServiceChange bool + servicew, err := w.service.Watch() + if err != nil { + return errors.Trace(err) + } + if err := w.catacomb.Add(servicew); err != nil { + return errors.Trace(err) + } + requiredEvents++ + + var seenConfigChange bool + configw, err := w.unit.WatchConfigSettings() + if err != nil { + return errors.Trace(err) + } + if err := w.catacomb.Add(configw); err != nil { + return errors.Trace(err) + } + requiredEvents++ + + var seenRelationsChange bool + relationsw, err := w.service.WatchRelations() + if err != nil { + return errors.Trace(err) + } + if err := w.catacomb.Add(relationsw); err != nil { + return errors.Trace(err) + } + requiredEvents++ + + var seenAddressesChange bool + addressesw, err := w.unit.WatchAddresses() + if err != nil { + return errors.Trace(err) + } + if err := w.catacomb.Add(addressesw); err != nil { + return errors.Trace(err) + } + requiredEvents++ + + var seenStorageChange bool + storagew, err := w.unit.WatchStorage() + if err != nil { + return errors.Trace(err) + } + if err := w.catacomb.Add(storagew); err != nil { + return errors.Trace(err) + } + requiredEvents++ + + var seenLeaderSettingsChange bool + leaderSettingsw, err := w.service.WatchLeadershipSettings() + if err != nil { + return errors.Trace(err) + } + if err := w.catacomb.Add(leaderSettingsw); err != nil { + return errors.Trace(err) + } + requiredEvents++ + + var seenActionsChange bool + actionsw, err := w.unit.WatchActionNotifications() + if err != nil { + return errors.Trace(err) + } + if err := w.catacomb.Add(actionsw); err != nil { + return errors.Trace(err) + } + requiredEvents++ + + var seenLeadershipChange bool + // There's no watcher for this per se; we wait on a channel + // returned by the leadership tracker. + requiredEvents++ + + var eventsObserved int + observedEvent := func(flag *bool) { + if !*flag { + *flag = true + eventsObserved++ + } + } + + // fire will, once the first event for each watcher has + // been observed, send a signal on the out channel. + fire := func() { + if eventsObserved != requiredEvents { + return + } + select { + case w.out <- struct{}{}: + default: + } + } + + // Check the initial leadership status, and then we can flip-flop + // waiting on leader or minion to trigger the changed event. + var waitLeader, waitMinion <-chan struct{} + claimLeader := w.leadershipTracker.ClaimLeader() + select { + case <-w.catacomb.Dying(): + return w.catacomb.ErrDying() + case <-claimLeader.Ready(): + isLeader := claimLeader.Wait() + w.leadershipChanged(isLeader) + if isLeader { + waitMinion = w.leadershipTracker.WaitMinion().Ready() + } else { + waitLeader = w.leadershipTracker.WaitLeader().Ready() + } + observedEvent(&seenLeadershipChange) + } + + for { + select { + case <-w.catacomb.Dying(): + return w.catacomb.ErrDying() + + case _, ok := <-unitw.Changes(): + logger.Debugf("got unit change") + if !ok { + return errors.New("unit watcher closed") + } + if err := w.unitChanged(); err != nil { + return errors.Trace(err) + } + observedEvent(&seenUnitChange) + + case _, ok := <-servicew.Changes(): + logger.Debugf("got service change") + if !ok { + return errors.New("service watcher closed") + } + if err := w.serviceChanged(); err != nil { + return errors.Trace(err) + } + observedEvent(&seenServiceChange) + + case _, ok := <-configw.Changes(): + logger.Debugf("got config change: ok=%t", ok) + if !ok { + return errors.New("config watcher closed") + } + if err := w.configChanged(); err != nil { + return errors.Trace(err) + } + observedEvent(&seenConfigChange) + + case _, ok := <-addressesw.Changes(): + logger.Debugf("got address change: ok=%t", ok) + if !ok { + return errors.New("addresses watcher closed") + } + if err := w.addressesChanged(); err != nil { + return errors.Trace(err) + } + observedEvent(&seenAddressesChange) + + case _, ok := <-leaderSettingsw.Changes(): + logger.Debugf("got leader settings change: ok=%t", ok) + if !ok { + return errors.New("leader settings watcher closed") + } + if err := w.leaderSettingsChanged(); err != nil { + return errors.Trace(err) + } + observedEvent(&seenLeaderSettingsChange) + + case actions, ok := <-actionsw.Changes(): + logger.Debugf("got action change: %v ok=%t", actions, ok) + if !ok { + return errors.New("actions watcher closed") + } + if err := w.actionsChanged(actions); err != nil { + return errors.Trace(err) + } + observedEvent(&seenActionsChange) + + case keys, ok := <-relationsw.Changes(): + logger.Debugf("got relations change: ok=%t", ok) + if !ok { + return errors.New("relations watcher closed") + } + if err := w.relationsChanged(keys); err != nil { + return errors.Trace(err) + } + observedEvent(&seenRelationsChange) + + case keys, ok := <-storagew.Changes(): + logger.Debugf("got storage change: %v ok=%t", keys, ok) + if !ok { + return errors.New("storage watcher closed") + } + if err := w.storageChanged(keys); err != nil { + return errors.Trace(err) + } + observedEvent(&seenStorageChange) + + case <-waitMinion: + logger.Debugf("got leadership change: minion") + if err := w.leadershipChanged(false); err != nil { + return errors.Trace(err) + } + waitMinion = nil + waitLeader = w.leadershipTracker.WaitLeader().Ready() + + case <-waitLeader: + logger.Debugf("got leadership change: leader") + if err := w.leadershipChanged(true); err != nil { + return errors.Trace(err) + } + waitLeader = nil + waitMinion = w.leadershipTracker.WaitMinion().Ready() + + case change := <-w.storageAttachmentChanges: + logger.Debugf("storage attachment change %v", change) + if err := w.storageAttachmentChanged(change); err != nil { + return errors.Trace(err) + } + + case change := <-w.relationUnitsChanges: + logger.Debugf("got a relation units change: %v", change) + if err := w.relationUnitsChanged(change); err != nil { + return errors.Trace(err) + } + + case <-w.updateStatusChannel(): + logger.Debugf("update status timer triggered") + if err := w.updateStatusChanged(); err != nil { + return errors.Trace(err) + } + + case id := <-w.commandChannel: + logger.Debugf("command enqueued: %v", id) + if err := w.commandsChanged(id); err != nil { + return err + } + + case <-w.retryHookChannel: + logger.Debugf("retry hook timer triggered") + if err := w.retryHookTimerTriggered(); err != nil { + return err + } + } + + // Something changed. + fire() + } +} + +// updateStatusChanged is called when the update status timer expires. +func (w *RemoteStateWatcher) updateStatusChanged() error { + w.mu.Lock() + w.current.UpdateStatusVersion++ + w.mu.Unlock() + return nil +} + +// commandsChanged is called when a command is enqueued. +func (w *RemoteStateWatcher) commandsChanged(id string) error { + w.mu.Lock() + w.current.Commands = append(w.current.Commands, id) + w.mu.Unlock() + return nil +} + +// retryHookTimerTriggered is called when the retry hook timer expires. +func (w *RemoteStateWatcher) retryHookTimerTriggered() error { + w.mu.Lock() + w.current.RetryHookVersion++ + w.mu.Unlock() + return nil +} + +// unitChanged responds to changes in the unit. +func (w *RemoteStateWatcher) unitChanged() error { + if err := w.unit.Refresh(); err != nil { + return errors.Trace(err) + } + resolved, err := w.unit.Resolved() + if err != nil { + return errors.Trace(err) + } + w.mu.Lock() + defer w.mu.Unlock() + w.current.Life = w.unit.Life() + w.current.ResolvedMode = resolved + return nil +} + +// serviceChanged responds to changes in the service. +func (w *RemoteStateWatcher) serviceChanged() error { + if err := w.service.Refresh(); err != nil { + return errors.Trace(err) + } + url, force, err := w.service.CharmURL() + if err != nil { + return errors.Trace(err) + } + ver, err := w.service.CharmModifiedVersion() + if err != nil { + return errors.Trace(err) + } + w.mu.Lock() + w.current.CharmURL = url + w.current.ForceCharmUpgrade = force + w.current.CharmModifiedVersion = ver + w.mu.Unlock() + return nil +} + +func (w *RemoteStateWatcher) configChanged() error { + w.mu.Lock() + w.current.ConfigVersion++ + w.mu.Unlock() + return nil +} + +func (w *RemoteStateWatcher) addressesChanged() error { + w.mu.Lock() + w.current.ConfigVersion++ + w.mu.Unlock() + return nil +} + +func (w *RemoteStateWatcher) leaderSettingsChanged() error { + w.mu.Lock() + w.current.LeaderSettingsVersion++ + w.mu.Unlock() + return nil +} + +func (w *RemoteStateWatcher) leadershipChanged(isLeader bool) error { + w.mu.Lock() + w.current.Leader = isLeader + w.mu.Unlock() + return nil +} + +// relationsChanged responds to service relation changes. +func (w *RemoteStateWatcher) relationsChanged(keys []string) error { + w.mu.Lock() + defer w.mu.Unlock() + for _, key := range keys { + relationTag := names.NewRelationTag(key) + rel, err := w.st.Relation(relationTag) + if params.IsCodeNotFoundOrCodeUnauthorized(err) { + // If it's actually gone, this unit cannot have entered + // scope, and therefore never needs to know about it. + if ruw, ok := w.relations[relationTag]; ok { + worker.Stop(ruw) + delete(w.relations, relationTag) + delete(w.current.Relations, ruw.relationId) + } + } else if err != nil { + return errors.Trace(err) + } else { + if _, ok := w.relations[relationTag]; ok { + relationSnapshot := w.current.Relations[rel.Id()] + relationSnapshot.Life = rel.Life() + w.current.Relations[rel.Id()] = relationSnapshot + continue + } + ruw, err := w.st.WatchRelationUnits(relationTag, w.unit.Tag()) + if err != nil { + return errors.Trace(err) + } + // Because of the delay before handing off responsibility to + // newRelationUnitsWatcher below, add to our own catacomb to + // ensure errors get picked up if they happen. + if err := w.catacomb.Add(ruw); err != nil { + return errors.Trace(err) + } + if err := w.watchRelationUnits(rel, relationTag, ruw); err != nil { + return errors.Trace(err) + } + } + } + return nil +} + +// watchRelationUnits starts watching the relation units for the given +// relation, waits for its first event, and records the information in +// the current snapshot. +func (w *RemoteStateWatcher) watchRelationUnits( + rel Relation, relationTag names.RelationTag, ruw watcher.RelationUnitsWatcher, +) error { + relationSnapshot := RelationSnapshot{ + Life: rel.Life(), + Members: make(map[string]int64), + } + select { + case <-w.catacomb.Dying(): + return w.catacomb.ErrDying() + case change, ok := <-ruw.Changes(): + if !ok { + return errors.New("relation units watcher closed") + } + for unit, settings := range change.Changed { + relationSnapshot.Members[unit] = settings.Version + } + } + innerRUW, err := newRelationUnitsWatcher(rel.Id(), ruw, w.relationUnitsChanges) + if err != nil { + return errors.Trace(err) + } + if err := w.catacomb.Add(innerRUW); err != nil { + return errors.Trace(err) + } + w.current.Relations[rel.Id()] = relationSnapshot + w.relations[relationTag] = innerRUW + return nil +} + +// relationUnitsChanged responds to relation units changes. +func (w *RemoteStateWatcher) relationUnitsChanged(change relationUnitsChange) error { + w.mu.Lock() + defer w.mu.Unlock() + snapshot, ok := w.current.Relations[change.relationId] + if !ok { + return nil + } + for unit, settings := range change.Changed { + snapshot.Members[unit] = settings.Version + } + for _, unit := range change.Departed { + delete(snapshot.Members, unit) + } + return nil +} + +// storageAttachmentChanged responds to storage attachment changes. +func (w *RemoteStateWatcher) storageAttachmentChanged(change storageAttachmentChange) error { + w.mu.Lock() + w.current.Storage[change.Tag] = change.Snapshot + w.mu.Unlock() + return nil +} + +func (w *RemoteStateWatcher) actionsChanged(actions []string) error { + w.mu.Lock() + defer w.mu.Unlock() + w.current.Actions = append(w.current.Actions, actions...) + return nil +} + +// storageChanged responds to unit storage changes. +func (w *RemoteStateWatcher) storageChanged(keys []string) error { + tags := make([]names.StorageTag, len(keys)) + for i, key := range keys { + tags[i] = names.NewStorageTag(key) + } + ids := make([]params.StorageAttachmentId, len(keys)) + for i, tag := range tags { + ids[i] = params.StorageAttachmentId{ + StorageTag: tag.String(), + UnitTag: w.unit.Tag().String(), + } + } + results, err := w.st.StorageAttachmentLife(ids) + if err != nil { + return errors.Trace(err) + } + + w.mu.Lock() + defer w.mu.Unlock() + + for i, result := range results { + tag := tags[i] + if result.Error == nil { + if storageSnapshot, ok := w.current.Storage[tag]; ok { + // We've previously started a watcher for this storage + // attachment, so all we needed to do was update the + // lifecycle state. + storageSnapshot.Life = result.Life + w.current.Storage[tag] = storageSnapshot + continue + } + // We haven't seen this storage attachment before, so start + // a watcher now; add it to our catacomb in case of mishap; + // and wait for the initial event. + saw, err := w.st.WatchStorageAttachment(tag, w.unit.Tag()) + if err != nil { + return errors.Annotate(err, "watching storage attachment") + } + if err := w.catacomb.Add(saw); err != nil { + return errors.Trace(err) + } + if err := w.watchStorageAttachment(tag, result.Life, saw); err != nil { + return errors.Trace(err) + } + } else if params.IsCodeNotFound(result.Error) { + if watcher, ok := w.storageAttachmentWatchers[tag]; ok { + // already under catacomb management, any error tracked already + worker.Stop(watcher) + delete(w.storageAttachmentWatchers, tag) + } + delete(w.current.Storage, tag) + } else { + return errors.Annotatef( + result.Error, "getting life of %s attachment", + names.ReadableString(tag), + ) + } + } + return nil +} + +// watchStorageAttachment starts watching the storage attachment with +// the specified storage tag, waits for its first event, and records +// the information in the current snapshot. +func (w *RemoteStateWatcher) watchStorageAttachment( + tag names.StorageTag, + life params.Life, + saw watcher.NotifyWatcher, +) error { + var storageSnapshot StorageSnapshot + select { + case <-w.catacomb.Dying(): + return w.catacomb.ErrDying() + case _, ok := <-saw.Changes(): + if !ok { + return errors.New("storage attachment watcher closed") + } + var err error + storageSnapshot, err = getStorageSnapshot(w.st, tag, w.unit.Tag()) + if params.IsCodeNotProvisioned(err) { + // If the storage is unprovisioned, we still want to + // record the attachment, but we'll mark it as + // unattached. This allows the uniter to wait for + // pending storage attachments to be provisioned. + storageSnapshot = StorageSnapshot{Life: life} + } else if err != nil { + return errors.Annotatef(err, "processing initial storage attachment change") + } + } + innerSAW, err := newStorageAttachmentWatcher( + w.st, saw, w.unit.Tag(), tag, w.storageAttachmentChanges, + ) + if err != nil { + return errors.Trace(err) + } + w.current.Storage[tag] = storageSnapshot + w.storageAttachmentWatchers[tag] = innerSAW + return nil +} === added file 'src/github.com/juju/juju/worker/uniter/remotestate/watcher_test.go' --- src/github.com/juju/juju/worker/uniter/remotestate/watcher_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/remotestate/watcher_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,521 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package remotestate_test + +import ( + "time" + + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/testing" + "github.com/juju/juju/watcher" + "github.com/juju/juju/worker/uniter/remotestate" +) + +type WatcherSuite struct { + testing.BaseSuite + + st *mockState + leadership *mockLeadershipTracker + watcher *remotestate.RemoteStateWatcher + clock *testing.Clock +} + +// Duration is arbitrary, we'll trigger the ticker +// by advancing the clock past the duration. +var statusTickDuration = 10 * time.Second + +var _ = gc.Suite(&WatcherSuite{}) + +func (s *WatcherSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.st = &mockState{ + unit: mockUnit{ + tag: names.NewUnitTag("mysql/0"), + life: params.Alive, + service: mockService{ + tag: names.NewServiceTag("mysql"), + life: params.Alive, + curl: charm.MustParseURL("cs:trusty/mysql"), + charmModifiedVersion: 5, + serviceWatcher: newMockNotifyWatcher(), + leaderSettingsWatcher: newMockNotifyWatcher(), + relationsWatcher: newMockStringsWatcher(), + }, + unitWatcher: newMockNotifyWatcher(), + addressesWatcher: newMockNotifyWatcher(), + configSettingsWatcher: newMockNotifyWatcher(), + storageWatcher: newMockStringsWatcher(), + actionWatcher: newMockStringsWatcher(), + }, + relations: make(map[names.RelationTag]*mockRelation), + storageAttachment: make(map[params.StorageAttachmentId]params.StorageAttachment), + relationUnitsWatchers: make(map[names.RelationTag]*mockRelationUnitsWatcher), + storageAttachmentWatchers: make(map[names.StorageTag]*mockNotifyWatcher), + } + + s.leadership = &mockLeadershipTracker{ + claimTicket: mockTicket{make(chan struct{}, 1), true}, + leaderTicket: mockTicket{make(chan struct{}, 1), true}, + minionTicket: mockTicket{make(chan struct{}, 1), true}, + } + + s.clock = testing.NewClock(time.Now()) + statusTicker := func() <-chan time.Time { + return s.clock.After(statusTickDuration) + } + + w, err := remotestate.NewWatcher(remotestate.WatcherConfig{ + State: s.st, + LeadershipTracker: s.leadership, + UnitTag: s.st.unit.tag, + UpdateStatusChannel: statusTicker, + }) + c.Assert(err, jc.ErrorIsNil) + s.watcher = w +} + +func (s *WatcherSuite) TearDownTest(c *gc.C) { + if s.watcher != nil { + s.watcher.Kill() + err := s.watcher.Wait() + c.Assert(err, jc.ErrorIsNil) + } +} + +func (s *WatcherSuite) TestInitialSnapshot(c *gc.C) { + snap := s.watcher.Snapshot() + c.Assert(snap, jc.DeepEquals, remotestate.Snapshot{ + Relations: map[int]remotestate.RelationSnapshot{}, + Storage: map[names.StorageTag]remotestate.StorageSnapshot{}, + }) +} + +func (s *WatcherSuite) TestInitialSignal(c *gc.C) { + // There should not be a remote state change until + // we've seen all of the top-level notifications. + s.st.unit.unitWatcher.changes <- struct{}{} + assertNoNotifyEvent(c, s.watcher.RemoteStateChanged(), "remote state change") + + s.st.unit.addressesWatcher.changes <- struct{}{} + s.st.unit.configSettingsWatcher.changes <- struct{}{} + s.st.unit.storageWatcher.changes <- []string{} + s.st.unit.actionWatcher.changes <- []string{} + s.st.unit.service.serviceWatcher.changes <- struct{}{} + s.st.unit.service.leaderSettingsWatcher.changes <- struct{}{} + s.st.unit.service.relationsWatcher.changes <- []string{} + s.leadership.claimTicket.ch <- struct{}{} + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") +} + +func signalAll(st *mockState, l *mockLeadershipTracker) { + st.unit.unitWatcher.changes <- struct{}{} + st.unit.addressesWatcher.changes <- struct{}{} + st.unit.configSettingsWatcher.changes <- struct{}{} + st.unit.storageWatcher.changes <- []string{} + st.unit.actionWatcher.changes <- []string{} + st.unit.service.serviceWatcher.changes <- struct{}{} + st.unit.service.leaderSettingsWatcher.changes <- struct{}{} + st.unit.service.relationsWatcher.changes <- []string{} + l.claimTicket.ch <- struct{}{} +} + +func (s *WatcherSuite) TestSnapshot(c *gc.C) { + signalAll(s.st, s.leadership) + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + + snap := s.watcher.Snapshot() + c.Assert(snap, jc.DeepEquals, remotestate.Snapshot{ + Life: s.st.unit.life, + Relations: map[int]remotestate.RelationSnapshot{}, + Storage: map[names.StorageTag]remotestate.StorageSnapshot{}, + CharmModifiedVersion: s.st.unit.service.charmModifiedVersion, + CharmURL: s.st.unit.service.curl, + ForceCharmUpgrade: s.st.unit.service.forceUpgrade, + ResolvedMode: s.st.unit.resolved, + ConfigVersion: 2, // config settings and addresses + LeaderSettingsVersion: 1, + Leader: true, + }) +} + +func (s *WatcherSuite) TestRemoteStateChanged(c *gc.C) { + assertOneChange := func() { + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + assertNoNotifyEvent(c, s.watcher.RemoteStateChanged(), "remote state change") + } + + signalAll(s.st, s.leadership) + assertOneChange() + initial := s.watcher.Snapshot() + + s.st.unit.life = params.Dying + s.st.unit.unitWatcher.changes <- struct{}{} + assertOneChange() + c.Assert(s.watcher.Snapshot().Life, gc.Equals, params.Dying) + + s.st.unit.addressesWatcher.changes <- struct{}{} + assertOneChange() + c.Assert(s.watcher.Snapshot().ConfigVersion, gc.Equals, initial.ConfigVersion+1) + + s.st.unit.configSettingsWatcher.changes <- struct{}{} + assertOneChange() + c.Assert(s.watcher.Snapshot().ConfigVersion, gc.Equals, initial.ConfigVersion+2) + + s.st.unit.storageWatcher.changes <- []string{} + assertOneChange() + + s.st.unit.service.forceUpgrade = true + s.st.unit.service.serviceWatcher.changes <- struct{}{} + assertOneChange() + c.Assert(s.watcher.Snapshot().ForceCharmUpgrade, jc.IsTrue) + + s.st.unit.service.leaderSettingsWatcher.changes <- struct{}{} + assertOneChange() + c.Assert(s.watcher.Snapshot().LeaderSettingsVersion, gc.Equals, initial.LeaderSettingsVersion+1) + + s.st.unit.service.relationsWatcher.changes <- []string{} + assertOneChange() + + s.clock.Advance(statusTickDuration + 1) + assertOneChange() +} + +func (s *WatcherSuite) TestActionsReceived(c *gc.C) { + signalAll(s.st, s.leadership) + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + + s.st.unit.actionWatcher.changes <- []string{"an-action"} + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + c.Assert(s.watcher.Snapshot().Actions, gc.DeepEquals, []string{"an-action"}) +} + +func (s *WatcherSuite) TestClearResolvedMode(c *gc.C) { + s.st.unit.resolved = params.ResolvedRetryHooks + signalAll(s.st, s.leadership) + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + + snap := s.watcher.Snapshot() + c.Assert(snap.ResolvedMode, gc.Equals, params.ResolvedRetryHooks) + + s.watcher.ClearResolvedMode() + snap = s.watcher.Snapshot() + c.Assert(snap.ResolvedMode, gc.Equals, params.ResolvedNone) +} + +func (s *WatcherSuite) TestLeadershipChanged(c *gc.C) { + s.leadership.claimTicket.result = false + signalAll(s.st, s.leadership) + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + c.Assert(s.watcher.Snapshot().Leader, jc.IsFalse) + + s.leadership.leaderTicket.ch <- struct{}{} + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + c.Assert(s.watcher.Snapshot().Leader, jc.IsTrue) + + s.leadership.minionTicket.ch <- struct{}{} + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + c.Assert(s.watcher.Snapshot().Leader, jc.IsFalse) +} + +func (s *WatcherSuite) TestLeadershipMinionUnchanged(c *gc.C) { + s.leadership.claimTicket.result = false + signalAll(s.st, s.leadership) + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + + // Initially minion, so triggering minion should have no effect. + s.leadership.minionTicket.ch <- struct{}{} + assertNoNotifyEvent(c, s.watcher.RemoteStateChanged(), "remote state change") +} + +func (s *WatcherSuite) TestLeadershipLeaderUnchanged(c *gc.C) { + signalAll(s.st, s.leadership) + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + + // Initially leader, so triggering leader should have no effect. + s.leadership.leaderTicket.ch <- struct{}{} + assertNoNotifyEvent(c, s.watcher.RemoteStateChanged(), "remote state change") +} + +func (s *WatcherSuite) TestStorageChanged(c *gc.C) { + signalAll(s.st, s.leadership) + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + + storageTag0 := names.NewStorageTag("blob/0") + storageAttachmentId0 := params.StorageAttachmentId{ + UnitTag: s.st.unit.tag.String(), + StorageTag: storageTag0.String(), + } + storageTag0Watcher := newMockNotifyWatcher() + s.st.storageAttachmentWatchers[storageTag0] = storageTag0Watcher + s.st.storageAttachment[storageAttachmentId0] = params.StorageAttachment{ + UnitTag: storageAttachmentId0.UnitTag, + StorageTag: storageAttachmentId0.StorageTag, + Life: params.Alive, + Kind: params.StorageKindUnknown, // unprovisioned + Location: "nowhere", + } + + storageTag1 := names.NewStorageTag("blob/1") + storageAttachmentId1 := params.StorageAttachmentId{ + UnitTag: s.st.unit.tag.String(), + StorageTag: storageTag1.String(), + } + storageTag1Watcher := newMockNotifyWatcher() + s.st.storageAttachmentWatchers[storageTag1] = storageTag1Watcher + s.st.storageAttachment[storageAttachmentId1] = params.StorageAttachment{ + UnitTag: storageAttachmentId1.UnitTag, + StorageTag: storageAttachmentId1.StorageTag, + Life: params.Dying, + Kind: params.StorageKindBlock, + Location: "malta", + } + + // We should not see any event until the storage attachment watchers + // return their initial events. + s.st.unit.storageWatcher.changes <- []string{"blob/0", "blob/1"} + assertNoNotifyEvent(c, s.watcher.RemoteStateChanged(), "remote state change") + storageTag0Watcher.changes <- struct{}{} + storageTag1Watcher.changes <- struct{}{} + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + + c.Assert(s.watcher.Snapshot().Storage, jc.DeepEquals, map[names.StorageTag]remotestate.StorageSnapshot{ + storageTag0: remotestate.StorageSnapshot{ + Life: params.Alive, + }, + storageTag1: remotestate.StorageSnapshot{ + Life: params.Dying, + Kind: params.StorageKindBlock, + Attached: true, + Location: "malta", + }, + }) + + s.st.storageAttachment[storageAttachmentId0] = params.StorageAttachment{ + UnitTag: storageAttachmentId0.UnitTag, + StorageTag: storageAttachmentId0.StorageTag, + Life: params.Dying, + Kind: params.StorageKindFilesystem, + Location: "somewhere", + } + delete(s.st.storageAttachment, storageAttachmentId1) + storageTag0Watcher.changes <- struct{}{} + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + s.st.unit.storageWatcher.changes <- []string{"blob/1"} + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + c.Assert(s.watcher.Snapshot().Storage, jc.DeepEquals, map[names.StorageTag]remotestate.StorageSnapshot{ + storageTag0: remotestate.StorageSnapshot{ + Life: params.Dying, + Attached: true, + Kind: params.StorageKindFilesystem, + Location: "somewhere", + }, + }) +} + +func (s *WatcherSuite) TestStorageUnattachedChanged(c *gc.C) { + signalAll(s.st, s.leadership) + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + + storageTag0 := names.NewStorageTag("blob/0") + storageAttachmentId0 := params.StorageAttachmentId{ + UnitTag: s.st.unit.tag.String(), + StorageTag: storageTag0.String(), + } + storageTag0Watcher := newMockNotifyWatcher() + s.st.storageAttachmentWatchers[storageTag0] = storageTag0Watcher + s.st.storageAttachment[storageAttachmentId0] = params.StorageAttachment{ + UnitTag: storageAttachmentId0.UnitTag, + StorageTag: storageAttachmentId0.StorageTag, + Life: params.Alive, + Kind: params.StorageKindUnknown, // unprovisioned + } + + s.st.unit.storageWatcher.changes <- []string{"blob/0"} + storageTag0Watcher.changes <- struct{}{} + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + + c.Assert(s.watcher.Snapshot().Storage, jc.DeepEquals, map[names.StorageTag]remotestate.StorageSnapshot{ + storageTag0: remotestate.StorageSnapshot{ + Life: params.Alive, + }, + }) + + s.st.storageAttachment[storageAttachmentId0] = params.StorageAttachment{ + UnitTag: storageAttachmentId0.UnitTag, + StorageTag: storageAttachmentId0.StorageTag, + Life: params.Dying, + } + // The storage is still unattached; triggering the storage-specific + // watcher should not cause any event to be emitted. + storageTag0Watcher.changes <- struct{}{} + assertNoNotifyEvent(c, s.watcher.RemoteStateChanged(), "remote state change") + s.st.unit.storageWatcher.changes <- []string{"blob/0"} + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + c.Assert(s.watcher.Snapshot().Storage, jc.DeepEquals, map[names.StorageTag]remotestate.StorageSnapshot{ + storageTag0: remotestate.StorageSnapshot{ + Life: params.Dying, + }, + }) +} + +func (s *WatcherSuite) TestStorageAttachmentRemoved(c *gc.C) { + signalAll(s.st, s.leadership) + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + + storageTag0 := names.NewStorageTag("blob/0") + storageAttachmentId0 := params.StorageAttachmentId{ + UnitTag: s.st.unit.tag.String(), + StorageTag: storageTag0.String(), + } + storageTag0Watcher := newMockNotifyWatcher() + s.st.storageAttachmentWatchers[storageTag0] = storageTag0Watcher + s.st.storageAttachment[storageAttachmentId0] = params.StorageAttachment{ + UnitTag: storageAttachmentId0.UnitTag, + StorageTag: storageAttachmentId0.StorageTag, + Life: params.Dying, + Kind: params.StorageKindUnknown, // unprovisioned + } + + s.st.unit.storageWatcher.changes <- []string{"blob/0"} + storageTag0Watcher.changes <- struct{}{} + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + + c.Assert(s.watcher.Snapshot().Storage, jc.DeepEquals, map[names.StorageTag]remotestate.StorageSnapshot{ + storageTag0: remotestate.StorageSnapshot{ + Life: params.Dying, + }, + }) + + // Removing the storage attachment and then triggering the storage- + // specific watcher should not cause an event to be emitted, but it + // will cause that watcher to stop running. Triggering the top-level + // storage watcher will remove it and update the snapshot. + delete(s.st.storageAttachment, storageAttachmentId0) + storageTag0Watcher.changes <- struct{}{} + assertNoNotifyEvent(c, s.watcher.RemoteStateChanged(), "remote state change") + c.Assert(storageTag0Watcher.Stopped(), jc.IsTrue) + s.st.unit.storageWatcher.changes <- []string{"blob/0"} + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + c.Assert(s.watcher.Snapshot().Storage, gc.HasLen, 0) +} + +func (s *WatcherSuite) TestStorageChangedNotFoundInitially(c *gc.C) { + signalAll(s.st, s.leadership) + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + + // blob/0 is initially in state, but is removed between the + // watcher signal and the uniter querying it. This should + // not cause the watcher to raise an error. + s.st.unit.storageWatcher.changes <- []string{"blob/0"} + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + c.Assert(s.watcher.Snapshot().Storage, gc.HasLen, 0) +} + +func (s *WatcherSuite) TestRelationsChanged(c *gc.C) { + signalAll(s.st, s.leadership) + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + + relationTag := names.NewRelationTag("mysql:peer") + s.st.relations[relationTag] = &mockRelation{ + id: 123, life: params.Alive, + } + s.st.relationUnitsWatchers[relationTag] = newMockRelationUnitsWatcher() + s.st.unit.service.relationsWatcher.changes <- []string{relationTag.Id()} + + // There should not be any signal until the relation units watcher has + // returned its initial event also. + assertNoNotifyEvent(c, s.watcher.RemoteStateChanged(), "remote state change") + s.st.relationUnitsWatchers[relationTag].changes <- watcher.RelationUnitsChange{ + Changed: map[string]watcher.UnitSettings{"mysql/1": {1}, "mysql/2": {2}}, + } + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + c.Assert( + s.watcher.Snapshot().Relations, + jc.DeepEquals, + map[int]remotestate.RelationSnapshot{ + 123: remotestate.RelationSnapshot{ + Life: params.Alive, + Members: map[string]int64{"mysql/1": 1, "mysql/2": 2}, + }, + }, + ) + + // If a relation is known, then updating it does not require any input + // from the relation units watcher. + s.st.relations[relationTag].life = params.Dying + s.st.unit.service.relationsWatcher.changes <- []string{relationTag.Id()} + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + c.Assert(s.watcher.Snapshot().Relations[123].Life, gc.Equals, params.Dying) + + // If a relation is not found, then it should be removed from the + // snapshot and its relation units watcher stopped. + delete(s.st.relations, relationTag) + s.st.unit.service.relationsWatcher.changes <- []string{relationTag.Id()} + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + c.Assert(s.watcher.Snapshot().Relations, gc.HasLen, 0) + c.Assert(s.st.relationUnitsWatchers[relationTag].Stopped(), jc.IsTrue) +} + +func (s *WatcherSuite) TestRelationUnitsChanged(c *gc.C) { + signalAll(s.st, s.leadership) + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + + relationTag := names.NewRelationTag("mysql:peer") + s.st.relations[relationTag] = &mockRelation{ + id: 123, life: params.Alive, + } + s.st.relationUnitsWatchers[relationTag] = newMockRelationUnitsWatcher() + + s.st.unit.service.relationsWatcher.changes <- []string{relationTag.Id()} + s.st.relationUnitsWatchers[relationTag].changes <- watcher.RelationUnitsChange{ + Changed: map[string]watcher.UnitSettings{"mysql/1": {1}}, + } + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + + s.st.relationUnitsWatchers[relationTag].changes <- watcher.RelationUnitsChange{ + Changed: map[string]watcher.UnitSettings{"mysql/1": {2}, "mysql/2": {1}}, + } + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + c.Assert( + s.watcher.Snapshot().Relations[123].Members, + jc.DeepEquals, + map[string]int64{"mysql/1": 2, "mysql/2": 1}, + ) + + s.st.relationUnitsWatchers[relationTag].changes <- watcher.RelationUnitsChange{ + Departed: []string{"mysql/1", "mysql/42"}, + } + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + c.Assert( + s.watcher.Snapshot().Relations[123].Members, + jc.DeepEquals, + map[string]int64{"mysql/2": 1}, + ) +} + +func (s *WatcherSuite) TestUpdateStatusTicker(c *gc.C) { + signalAll(s.st, s.leadership) + initial := s.watcher.Snapshot() + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + + // Advance the clock past the trigger time. + s.clock.Advance(11 * time.Second) + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + c.Assert(s.watcher.Snapshot().UpdateStatusVersion, gc.Equals, initial.UpdateStatusVersion+1) + + // Advance again but not past the trigger time. + s.clock.Advance(6 * time.Second) + assertNoNotifyEvent(c, s.watcher.RemoteStateChanged(), "unexpected remote state change") + c.Assert(s.watcher.Snapshot().UpdateStatusVersion, gc.Equals, initial.UpdateStatusVersion+1) + + // And we hit the trigger time. + s.clock.Advance(5 * time.Second) + assertNotifyEvent(c, s.watcher.RemoteStateChanged(), "waiting for remote state change") + c.Assert(s.watcher.Snapshot().UpdateStatusVersion, gc.Equals, initial.UpdateStatusVersion+2) +} === added directory 'src/github.com/juju/juju/worker/uniter/resolver' === added file 'src/github.com/juju/juju/worker/uniter/resolver.go' --- src/github.com/juju/juju/worker/uniter/resolver.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/resolver.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,284 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package uniter + +import ( + "github.com/juju/errors" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/remotestate" + "github.com/juju/juju/worker/uniter/resolver" +) + +// ResolverConfig defines configuration for the uniter resolver. +type ResolverConfig struct { + ClearResolved func() error + ReportHookError func(hook.Info) error + FixDeployer func() error + StartRetryHookTimer func() + StopRetryHookTimer func() + Leadership resolver.Resolver + Actions resolver.Resolver + Relations resolver.Resolver + Storage resolver.Resolver + Commands resolver.Resolver +} + +type uniterResolver struct { + config ResolverConfig + retryHookTimerStarted bool +} + +// NewUniterResolver returns a new resolver.Resolver for the uniter. +func NewUniterResolver(cfg ResolverConfig) resolver.Resolver { + return &uniterResolver{ + config: cfg, + retryHookTimerStarted: false, + } +} + +func (s *uniterResolver) NextOp( + localState resolver.LocalState, + remoteState remotestate.Snapshot, + opFactory operation.Factory, +) (operation.Operation, error) { + if remoteState.Life == params.Dead || localState.Stopped { + return nil, resolver.ErrTerminate + } + + if localState.Kind == operation.Upgrade { + if localState.Conflicted { + return s.nextOpConflicted(localState, remoteState, opFactory) + } + logger.Infof("resuming charm upgrade") + return opFactory.NewUpgrade(localState.CharmURL) + } + + if localState.Restart { + // We've just run the upgrade op, which will change the + // unit's charm URL. We need to restart the resolver + // loop so that we start watching the correct events. + return nil, resolver.ErrRestart + } + + if localState.Kind == operation.Continue { + if err := s.config.FixDeployer(); err != nil { + return nil, errors.Trace(err) + } + } + + if s.retryHookTimerStarted && (localState.Kind != operation.RunHook || localState.Step != operation.Pending) { + // The hook-retry timer is running, but there is no pending + // hook operation. We're not in an error state, so stop the + // timer now to reset the backoff state. + s.config.StopRetryHookTimer() + s.retryHookTimerStarted = false + } + + op, err := s.config.Leadership.NextOp(localState, remoteState, opFactory) + if errors.Cause(err) != resolver.ErrNoOperation { + return op, err + } + + op, err = s.config.Actions.NextOp(localState, remoteState, opFactory) + if errors.Cause(err) != resolver.ErrNoOperation { + return op, err + } + + op, err = s.config.Commands.NextOp(localState, remoteState, opFactory) + if errors.Cause(err) != resolver.ErrNoOperation { + return op, err + } + + op, err = s.config.Storage.NextOp(localState, remoteState, opFactory) + if errors.Cause(err) != resolver.ErrNoOperation { + return op, err + } + + switch localState.Kind { + case operation.RunHook: + switch localState.Step { + case operation.Pending: + logger.Infof("awaiting error resolution for %q hook", localState.Hook.Kind) + return s.nextOpHookError(localState, remoteState, opFactory) + + case operation.Queued: + logger.Infof("found queued %q hook", localState.Hook.Kind) + if localState.Hook.Kind == hooks.Install { + // Special case: handle install in nextOp, + // so we do nothing when the unit is dying. + return s.nextOp(localState, remoteState, opFactory) + } + return opFactory.NewRunHook(*localState.Hook) + + case operation.Done: + logger.Infof("committing %q hook", localState.Hook.Kind) + return opFactory.NewSkipHook(*localState.Hook) + + default: + return nil, errors.Errorf("unknown operation step %v", localState.Step) + } + + case operation.Continue: + logger.Infof("no operations in progress; waiting for changes") + return s.nextOp(localState, remoteState, opFactory) + + default: + return nil, errors.Errorf("unknown operation kind %v", localState.Kind) + } +} + +// nextOpConflicted is called after an upgrade operation has failed, and hasn't +// yet been resolved or reverted. When in this mode, the resolver will only +// consider those two possibilities for progressing. +func (s *uniterResolver) nextOpConflicted( + localState resolver.LocalState, + remoteState remotestate.Snapshot, + opFactory operation.Factory, +) (operation.Operation, error) { + if remoteState.ResolvedMode != params.ResolvedNone { + if err := s.config.ClearResolved(); err != nil { + return nil, errors.Trace(err) + } + return opFactory.NewResolvedUpgrade(localState.CharmURL) + } + if remoteState.ForceCharmUpgrade && charmModified(localState, remoteState) { + return opFactory.NewRevertUpgrade(remoteState.CharmURL) + } + return nil, resolver.ErrWaiting +} + +func (s *uniterResolver) nextOpHookError( + localState resolver.LocalState, + remoteState remotestate.Snapshot, + opFactory operation.Factory, +) (operation.Operation, error) { + + // Report the hook error. + if err := s.config.ReportHookError(*localState.Hook); err != nil { + return nil, errors.Trace(err) + } + + if remoteState.ForceCharmUpgrade && charmModified(localState, remoteState) { + return opFactory.NewUpgrade(remoteState.CharmURL) + } + + switch remoteState.ResolvedMode { + case params.ResolvedNone: + if remoteState.RetryHookVersion > localState.RetryHookVersion { + // We've been asked to retry: clear the hook timer + // started state so we'll restart it if this fails. + // + // If the hook fails again, we'll re-enter this method + // with the retry hook versions equal and restart the + // timer. If the hook succeeds, we'll enter nextOp + // and stop the timer. + s.retryHookTimerStarted = false + return opFactory.NewRunHook(*localState.Hook) + } + if !s.retryHookTimerStarted { + // We haven't yet started a retry timer, so start one + // now. If we retry and fail, retryHookTimerStarted is + // cleared so that we'll still start it again. + s.config.StartRetryHookTimer() + s.retryHookTimerStarted = true + } + return nil, resolver.ErrNoOperation + case params.ResolvedRetryHooks: + s.config.StopRetryHookTimer() + s.retryHookTimerStarted = false + if err := s.config.ClearResolved(); err != nil { + return nil, errors.Trace(err) + } + return opFactory.NewRunHook(*localState.Hook) + case params.ResolvedNoHooks: + s.config.StopRetryHookTimer() + s.retryHookTimerStarted = false + if err := s.config.ClearResolved(); err != nil { + return nil, errors.Trace(err) + } + return opFactory.NewSkipHook(*localState.Hook) + default: + return nil, errors.Errorf( + "unknown resolved mode %q", remoteState.ResolvedMode, + ) + } +} + +func charmModified(local resolver.LocalState, remote remotestate.Snapshot) bool { + if *local.CharmURL != *remote.CharmURL { + logger.Debugf("upgrade from %v to %v", local.CharmURL, remote.CharmURL) + return true + } + + if local.CharmModifiedVersion != remote.CharmModifiedVersion { + logger.Debugf("upgrade from CharmModifiedVersion %v to %v", local.CharmModifiedVersion, remote.CharmModifiedVersion) + return true + } + return false +} + +func (s *uniterResolver) nextOp( + localState resolver.LocalState, + remoteState remotestate.Snapshot, + opFactory operation.Factory, +) (operation.Operation, error) { + + switch remoteState.Life { + case params.Alive: + case params.Dying: + // Normally we handle relations last, but if we're dying we + // must ensure that all relations are broken first. + op, err := s.config.Relations.NextOp(localState, remoteState, opFactory) + if errors.Cause(err) != resolver.ErrNoOperation { + return op, err + } + + // We're not in a hook error and the unit is Dying, + // so we should proceed to tear down. + // + // TODO(axw) move logic for cascading destruction of + // subordinates, relation units and storage + // attachments into state, via cleanups. + if localState.Started { + return opFactory.NewRunHook(hook.Info{Kind: hooks.Stop}) + } + fallthrough + case params.Dead: + // The unit is dying/dead and stopped, so tell the uniter + // to terminate. + return nil, resolver.ErrTerminate + } + + // Now that storage hooks have run at least once, before anything else, + // we need to run the install hook. + // TODO(cmars): remove !localState.Started. It's here as a temporary + // measure because unit agent upgrades aren't being performed yet. + if !localState.Installed && !localState.Started { + return opFactory.NewRunHook(hook.Info{Kind: hooks.Install}) + } + + if charmModified(localState, remoteState) { + return opFactory.NewUpgrade(remoteState.CharmURL) + } + + if localState.ConfigVersion != remoteState.ConfigVersion { + return opFactory.NewRunHook(hook.Info{Kind: hooks.ConfigChanged}) + } + + op, err := s.config.Relations.NextOp(localState, remoteState, opFactory) + if errors.Cause(err) != resolver.ErrNoOperation { + return op, err + } + + // UpdateStatus hook runs if nothing else needs to. + if localState.UpdateStatusVersion != remoteState.UpdateStatusVersion { + return opFactory.NewRunHook(hook.Info{Kind: hooks.UpdateStatus}) + } + + return nil, resolver.ErrNoOperation +} === added file 'src/github.com/juju/juju/worker/uniter/resolver/export_test.go' --- src/github.com/juju/juju/worker/uniter/resolver/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/resolver/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resolver + +import "github.com/juju/juju/worker/uniter/operation" + +type ResolverOpFactory struct { + *resolverOpFactory +} + +func NewResolverOpFactory(f operation.Factory) ResolverOpFactory { + return ResolverOpFactory{&resolverOpFactory{ + Factory: f, + LocalState: &LocalState{}, + }} +} + +var UpdateCharmDir = updateCharmDir === added file 'src/github.com/juju/juju/worker/uniter/resolver/func.go' --- src/github.com/juju/juju/worker/uniter/resolver/func.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/resolver/func.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,23 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resolver + +import ( + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/remotestate" +) + +type ResolverFunc func( + LocalState, + remotestate.Snapshot, + operation.Factory, +) (operation.Operation, error) + +func (f ResolverFunc) NextOp( + local LocalState, + remote remotestate.Snapshot, + opFactory operation.Factory, +) (operation.Operation, error) { + return f(local, remote, opFactory) +} === added file 'src/github.com/juju/juju/worker/uniter/resolver/interface.go' --- src/github.com/juju/juju/worker/uniter/resolver/interface.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/resolver/interface.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,97 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resolver + +import ( + "github.com/juju/errors" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/remotestate" +) + +// ErrNoOperation is used to indicate that there are no +// currently pending operations to run. +var ErrNoOperation = errors.New("no operations") + +// ErrWaiting indicates that the resolver loop should +// not execute any more operations until a remote state +// event has occurred. +var ErrWaiting = errors.New("waiting for remote state change") + +// ErrRestart indicates that the resolver loop should +// be restarted with a new remote state watcher. +var ErrRestart = errors.New("restarting resolver") + +// ErrTerminate is used when the unit has been marked +// as dead and so there will never be any more +// operations to run for that unit. +var ErrTerminate = errors.New("terminate resolver") + +// Resolver instances use local (as is) and remote (to be) state +// to provide operations to run in order to progress towards +// the desired state. +type Resolver interface { + // NextOp returns the next operation to run to reconcile + // the local state with the remote, desired state. The + // operations returned must be created using the given + // operation.Factory. + // + // This method must return ErrNoOperation if there are no + // operations to perform. + // + // By returning ErrTerminate, the resolver indicates that + // it will never have any more operations to perform, + // and the caller can cease calling. + NextOp( + LocalState, + remotestate.Snapshot, + operation.Factory, + ) (operation.Operation, error) +} + +// LocalState is a cache of the state of the local unit, as needed by the +// Uniter. It is generally compared to the remote state of the expected state of +// the unit as stored in the controller. +type LocalState struct { + operation.State + + // CharmModifiedVersion increases any time the charm, + // or any part of it, is changed in some way. + CharmModifiedVersion int + + // CharmURL reports the currently installed charm URL. This is set + // by the committing of deploy (install/upgrade) ops. + CharmURL *charm.URL + + // Conflicted indicates that the uniter is in a conflicted state, + // and needs either resolution or a forced upgrade to continue. + Conflicted bool + + // Restart indicates that the resolver should exit with ErrRestart + // at the earliest opportunity. + Restart bool + + // UpdateStatusVersion is the version of update status from remotestate.Snapshot + // for which an update-status hook has been committed. + UpdateStatusVersion int + + // RetryHookVersion is the version of hook-retries from + // remotestate.Snapshot for which a hook has been retried. + RetryHookVersion int + + // ConfigVersion is the version of config from remotestate.Snapshot + // for which a config-changed hook has been committed. + ConfigVersion int + + // LeaderSettingsVersion is the version of leader settings from + // remotestate.Snapshot for which a leader-settings-changed hook has + // been committed. + LeaderSettingsVersion int + + // CompletedActions is the set of actions that have been completed. + // This is used to prevent us re running actions requested by the + // controller. + CompletedActions map[string]struct{} +} === added file 'src/github.com/juju/juju/worker/uniter/resolver/locker_test.go' --- src/github.com/juju/juju/worker/uniter/resolver/locker_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/resolver/locker_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,93 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resolver_test + +import ( + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/worker/fortress" + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/resolver" +) + +type GuardSuite struct { + guard *mockCharmDirGuard +} + +var _ = gc.Suite(&GuardSuite{}) + +func (s *GuardSuite) SetUpTest(c *gc.C) { + s.guard = &mockCharmDirGuard{} +} + +func (s *GuardSuite) checkCall(c *gc.C, state operation.State, call string) { + err := resolver.UpdateCharmDir(state, s.guard, nil) + c.Assert(err, jc.ErrorIsNil) + s.guard.CheckCallNames(c, call) +} + +func (s *GuardSuite) TestLockdownEmptyState(c *gc.C) { + s.checkCall(c, operation.State{}, "Lockdown") +} + +func (s *GuardSuite) TestLockdownNotStarted(c *gc.C) { + s.checkCall(c, operation.State{Started: false}, "Lockdown") +} + +func (s *GuardSuite) TestLockdownStartStopInvalid(c *gc.C) { + s.checkCall(c, operation.State{Started: true, Stopped: true}, "Lockdown") +} + +func (s *GuardSuite) TestLockdownInstall(c *gc.C) { + s.checkCall(c, operation.State{Started: true, Stopped: false, Kind: operation.Install}, "Lockdown") +} + +func (s *GuardSuite) TestLockdownUpgrade(c *gc.C) { + s.checkCall(c, operation.State{Started: true, Stopped: false, Kind: operation.Upgrade}, "Lockdown") +} + +func (s *GuardSuite) TestLockdownRunHookUpgradeCharm(c *gc.C) { + s.checkCall(c, operation.State{ + Started: true, + Stopped: false, + Kind: operation.RunHook, + Hook: &hook.Info{ + Kind: hooks.UpgradeCharm, + }, + }, "Lockdown") +} + +func (s *GuardSuite) TestUnlockStarted(c *gc.C) { + s.checkCall(c, operation.State{Started: true, Stopped: false}, "Unlock") +} + +func (s *GuardSuite) TestUnlockStartedContinue(c *gc.C) { + s.checkCall(c, operation.State{Started: true, Stopped: false, Kind: operation.Continue}, "Unlock") +} + +func (s *GuardSuite) TestUnlockStartedRunAction(c *gc.C) { + s.checkCall(c, operation.State{Started: true, Stopped: false, Kind: operation.RunAction}, "Unlock") +} + +func (s *GuardSuite) TestUnlockConfigChanged(c *gc.C) { + s.checkCall(c, operation.State{ + Started: true, + Stopped: false, + Kind: operation.RunHook, + Hook: &hook.Info{ + Kind: hooks.ConfigChanged, + }, + }, "Unlock") +} + +func (s *GuardSuite) TestLockdownAbortArg(c *gc.C) { + abort := make(fortress.Abort) + err := resolver.UpdateCharmDir(operation.State{}, s.guard, abort) + c.Assert(err, jc.ErrorIsNil) + s.guard.CheckCalls(c, []testing.StubCall{{FuncName: "Lockdown", Args: []interface{}{abort}}}) +} === added file 'src/github.com/juju/juju/worker/uniter/resolver/loop.go' --- src/github.com/juju/juju/worker/uniter/resolver/loop.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/resolver/loop.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,124 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resolver + +import ( + "github.com/juju/errors" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/worker/fortress" + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/remotestate" +) + +// ErrLoopAborted is used to signal that the loop is exiting because it +// received a value on its config's Abort chan. +var ErrLoopAborted = errors.New("resolver loop aborted") + +// LoopConfig contains configuration parameters for the resolver loop. +type LoopConfig struct { + Resolver Resolver + Watcher remotestate.Watcher + Executor operation.Executor + Factory operation.Factory + Abort <-chan struct{} + OnIdle func() error + CharmDirGuard fortress.Guard +} + +// Loop repeatedly waits for remote state changes, feeding the local and +// remote state to the provided Resolver to generate Operations which are +// then run with the provided Executor. +// +// The provided "onIdle" function will be called when the loop is waiting +// for remote state changes due to a lack of work to perform. It will not +// be called when a change is anticipated (i.e. due to ErrWaiting). +// +// The resolver loop can be controlled in the following ways: +// - if the "abort" channel is signalled, then the loop will +// exit with ErrLoopAborted +// - if the resolver returns ErrWaiting, then no operations +// will be executed until the remote state has changed +// again +// - if the resolver returns ErrNoOperation, then "onIdle" +// will be invoked and the loop will wait until the remote +// state has changed again +// - if the resolver, onIdle, or executor return some other +// error, the loop will exit immediately +func Loop(cfg LoopConfig, localState *LocalState) error { + rf := &resolverOpFactory{Factory: cfg.Factory, LocalState: localState} + + // Initialize charmdir availability before entering the loop in case we're recovering from a restart. + err := updateCharmDir(cfg.Executor.State(), cfg.CharmDirGuard, cfg.Abort) + if err != nil { + return errors.Trace(err) + } + + for { + rf.RemoteState = cfg.Watcher.Snapshot() + rf.LocalState.State = cfg.Executor.State() + + op, err := cfg.Resolver.NextOp(*rf.LocalState, rf.RemoteState, rf) + for err == nil { + logger.Tracef("running op: %v", op) + if err := cfg.Executor.Run(op); err != nil { + return errors.Trace(err) + } + // Refresh snapshot, in case remote state + // changed between operations. + rf.RemoteState = cfg.Watcher.Snapshot() + rf.LocalState.State = cfg.Executor.State() + + err = updateCharmDir(rf.LocalState.State, cfg.CharmDirGuard, cfg.Abort) + if err != nil { + return errors.Trace(err) + } + + op, err = cfg.Resolver.NextOp(*rf.LocalState, rf.RemoteState, rf) + } + + switch errors.Cause(err) { + case nil: + case ErrWaiting: + // If a resolver is waiting for events to + // complete, the agent is not idle. + case ErrNoOperation: + if cfg.OnIdle != nil { + if err := cfg.OnIdle(); err != nil { + return errors.Trace(err) + } + } + default: + return err + } + + select { + case <-cfg.Abort: + return ErrLoopAborted + case <-cfg.Watcher.RemoteStateChanged(): + } + } +} + +// updateCharmDir sets charm directory availability for sharing among +// concurrent workers according to local operation state. +func updateCharmDir(opState operation.State, guard fortress.Guard, abort fortress.Abort) error { + var changing bool + + // Determine if the charm content is changing. + if opState.Kind == operation.Install || opState.Kind == operation.Upgrade { + changing = true + } else if opState.Kind == operation.RunHook && opState.Hook != nil && opState.Hook.Kind == hooks.UpgradeCharm { + changing = true + } + + available := opState.Started && !opState.Stopped && !changing + logger.Tracef("charmdir: available=%v opState: started=%v stopped=%v changing=%v", + available, opState.Started, opState.Stopped, changing) + if available { + return guard.Unlock() + } else { + return guard.Lockdown(abort) + } +} === added file 'src/github.com/juju/juju/worker/uniter/resolver/loop_test.go' --- src/github.com/juju/juju/worker/uniter/resolver/loop_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/resolver/loop_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,215 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resolver_test + +import ( + "errors" + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/testing" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/remotestate" + "github.com/juju/juju/worker/uniter/resolver" +) + +type LoopSuite struct { + testing.BaseSuite + + resolver resolver.Resolver + watcher *mockRemoteStateWatcher + opFactory *mockOpFactory + executor *mockOpExecutor + charmURL *charm.URL + abort chan struct{} + onIdle func() error +} + +var _ = gc.Suite(&LoopSuite{}) + +func (s *LoopSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.resolver = resolver.ResolverFunc(func(resolver.LocalState, remotestate.Snapshot, operation.Factory) (operation.Operation, error) { + return nil, resolver.ErrNoOperation + }) + s.watcher = &mockRemoteStateWatcher{ + changes: make(chan struct{}, 1), + } + s.opFactory = &mockOpFactory{} + s.executor = &mockOpExecutor{} + s.charmURL = charm.MustParseURL("cs:trusty/mysql") + s.abort = make(chan struct{}) +} + +func (s *LoopSuite) loop() (resolver.LocalState, error) { + localState := resolver.LocalState{ + CharmURL: s.charmURL, + } + err := resolver.Loop(resolver.LoopConfig{ + Resolver: s.resolver, + Factory: s.opFactory, + Watcher: s.watcher, + Executor: s.executor, + Abort: s.abort, + OnIdle: s.onIdle, + CharmDirGuard: &mockCharmDirGuard{}, + }, &localState) + return localState, err +} + +func (s *LoopSuite) TestAbort(c *gc.C) { + close(s.abort) + _, err := s.loop() + c.Assert(err, gc.Equals, resolver.ErrLoopAborted) +} + +func (s *LoopSuite) TestOnIdle(c *gc.C) { + onIdleCh := make(chan interface{}, 1) + s.onIdle = func() error { + onIdleCh <- nil + return nil + } + + done := make(chan interface{}, 1) + go func() { + _, err := s.loop() + done <- err + }() + + waitChannel(c, onIdleCh, "waiting for onIdle") + s.watcher.changes <- struct{}{} + waitChannel(c, onIdleCh, "waiting for onIdle") + close(s.abort) + + err := waitChannel(c, done, "waiting for loop to exit") + c.Assert(err, gc.Equals, resolver.ErrLoopAborted) + + select { + case <-onIdleCh: + c.Fatal("unexpected onIdle call") + default: + } +} + +func (s *LoopSuite) TestOnIdleError(c *gc.C) { + s.onIdle = func() error { + return errors.New("onIdle failed") + } + close(s.abort) + _, err := s.loop() + c.Assert(err, gc.ErrorMatches, "onIdle failed") +} + +func (s *LoopSuite) TestErrWaitingNoOnIdle(c *gc.C) { + var onIdleCalled bool + s.onIdle = func() error { + onIdleCalled = true + return nil + } + s.resolver = resolver.ResolverFunc(func( + _ resolver.LocalState, + _ remotestate.Snapshot, + _ operation.Factory, + ) (operation.Operation, error) { + return nil, resolver.ErrWaiting + }) + close(s.abort) + _, err := s.loop() + c.Assert(err, gc.Equals, resolver.ErrLoopAborted) + c.Assert(onIdleCalled, jc.IsFalse) +} + +func (s *LoopSuite) TestInitialFinalLocalState(c *gc.C) { + var local resolver.LocalState + s.resolver = resolver.ResolverFunc(func( + l resolver.LocalState, + _ remotestate.Snapshot, + _ operation.Factory, + ) (operation.Operation, error) { + local = l + return nil, resolver.ErrNoOperation + }) + + close(s.abort) + lastLocal, err := s.loop() + c.Assert(err, gc.Equals, resolver.ErrLoopAborted) + c.Assert(local, jc.DeepEquals, resolver.LocalState{ + CharmURL: s.charmURL, + }) + c.Assert(lastLocal, jc.DeepEquals, local) +} + +func (s *LoopSuite) TestLoop(c *gc.C) { + var resolverCalls int + theOp := &mockOp{} + s.resolver = resolver.ResolverFunc(func( + _ resolver.LocalState, + _ remotestate.Snapshot, + _ operation.Factory, + ) (operation.Operation, error) { + resolverCalls++ + switch resolverCalls { + // On the first call, return an operation. + case 1: + return theOp, nil + // On the second call, simulate having + // no operations to perform, at which + // point we'll wait for a remote state + // change. + case 2: + s.watcher.changes <- struct{}{} + break + // On the third call, kill the loop. + case 3: + close(s.abort) + break + } + return nil, resolver.ErrNoOperation + }) + + _, err := s.loop() + c.Assert(err, gc.Equals, resolver.ErrLoopAborted) + c.Assert(resolverCalls, gc.Equals, 3) + s.executor.CheckCallNames(c, "State", "State", "Run", "State", "State") + c.Assert(s.executor.Calls()[2].Args, jc.SameContents, []interface{}{theOp}) +} + +func (s *LoopSuite) TestRunFails(c *gc.C) { + s.executor.SetErrors(errors.New("Run fails")) + s.resolver = resolver.ResolverFunc(func( + _ resolver.LocalState, + _ remotestate.Snapshot, + _ operation.Factory, + ) (operation.Operation, error) { + return mockOp{}, nil + }) + _, err := s.loop() + c.Assert(err, gc.ErrorMatches, "Run fails") +} + +func (s *LoopSuite) TestNextOpFails(c *gc.C) { + s.resolver = resolver.ResolverFunc(func( + _ resolver.LocalState, + _ remotestate.Snapshot, + _ operation.Factory, + ) (operation.Operation, error) { + return nil, errors.New("NextOp fails") + }) + _, err := s.loop() + c.Assert(err, gc.ErrorMatches, "NextOp fails") +} + +func waitChannel(c *gc.C, ch <-chan interface{}, activity string) interface{} { + select { + case v := <-ch: + return v + case <-time.After(coretesting.LongWait): + c.Fatalf("timed out " + activity) + panic("unreachable") + } +} === added file 'src/github.com/juju/juju/worker/uniter/resolver/mock_test.go' --- src/github.com/juju/juju/worker/uniter/resolver/mock_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/resolver/mock_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,108 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resolver_test + +import ( + "github.com/juju/testing" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/worker/fortress" + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/remotestate" +) + +type mockRemoteStateWatcher struct { + remotestate.RemoteStateWatcher + changes chan struct{} + snapshot remotestate.Snapshot +} + +func (w *mockRemoteStateWatcher) RemoteStateChanged() <-chan struct{} { + return w.changes +} + +func (w *mockRemoteStateWatcher) Snapshot() remotestate.Snapshot { + return w.snapshot +} + +type mockOpFactory struct { + operation.Factory + testing.Stub + op mockOp +} + +func (f *mockOpFactory) NewUpgrade(charmURL *charm.URL) (operation.Operation, error) { + f.MethodCall(f, "NewUpgrade", charmURL) + return f.op, f.NextErr() +} + +func (f *mockOpFactory) NewRevertUpgrade(charmURL *charm.URL) (operation.Operation, error) { + f.MethodCall(f, "NewRevertUpgrade", charmURL) + return f.op, f.NextErr() +} + +func (f *mockOpFactory) NewResolvedUpgrade(charmURL *charm.URL) (operation.Operation, error) { + f.MethodCall(f, "NewResolvedUpgrade", charmURL) + return f.op, f.NextErr() +} + +func (f *mockOpFactory) NewRunHook(info hook.Info) (operation.Operation, error) { + f.MethodCall(f, "NewRunHook", info) + return f.op, f.NextErr() +} + +func (f *mockOpFactory) NewSkipHook(info hook.Info) (operation.Operation, error) { + f.MethodCall(f, "NewSkipHook", info) + return f.op, f.NextErr() +} + +func (f *mockOpFactory) NewAction(id string) (operation.Operation, error) { + f.MethodCall(f, "NewAction", id) + return f.op, f.NextErr() +} + +type mockOpExecutor struct { + operation.Executor + testing.Stub + st operation.State +} + +func (e *mockOpExecutor) State() operation.State { + e.MethodCall(e, "State") + return e.st +} + +func (e *mockOpExecutor) Run(op operation.Operation) error { + e.MethodCall(e, "Run", op) + return e.NextErr() +} + +type mockOp struct { + operation.Operation + commit func(operation.State) (*operation.State, error) +} + +func (op mockOp) Commit(st operation.State) (*operation.State, error) { + if op.commit != nil { + return op.commit(st) + } + return &st, nil +} + +type mockCharmDirGuard struct { + fortress.Guard + testing.Stub + commit func(operation.State) (*operation.State, error) +} + +func (l *mockCharmDirGuard) Unlock() error { + l.MethodCall(l, "Unlock") + return l.NextErr() +} + +func (l *mockCharmDirGuard) Lockdown(abort fortress.Abort) error { + l.MethodCall(l, "Lockdown", abort) + return l.NextErr() +} === added file 'src/github.com/juju/juju/worker/uniter/resolver/opfactory.go' --- src/github.com/juju/juju/worker/uniter/resolver/opfactory.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/resolver/opfactory.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,172 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resolver + +import ( + "github.com/juju/errors" + "github.com/juju/loggo" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/remotestate" +) + +var logger = loggo.GetLogger("juju.worker.uniter.resolver") + +// resolverOpFactory wraps an operation.Factory such that skips that affect +// local state will, when committed, update the embedded LocalState struct +// to reflect the change made by the operation. +// +// The wrapped operations embed information specific to the remote state +// snapshot that was used to create the operation. Thus, remote state changes +// observed between the time the operation was created and committed do not +// affect the operation; and the local state change will not prevent further +// operations from being enqueued to achieve the new remote state. +type resolverOpFactory struct { + operation.Factory + + LocalState *LocalState + RemoteState remotestate.Snapshot +} + +func (s *resolverOpFactory) NewRunHook(info hook.Info) (operation.Operation, error) { + op, err := s.Factory.NewRunHook(info) + if err != nil { + return nil, errors.Trace(err) + } + return s.wrapHookOp(op, info), nil +} + +func (s *resolverOpFactory) NewSkipHook(info hook.Info) (operation.Operation, error) { + op, err := s.Factory.NewSkipHook(info) + if err != nil { + return nil, errors.Trace(err) + } + return s.wrapHookOp(op, info), nil +} + +func (s *resolverOpFactory) NewUpgrade(charmURL *charm.URL) (operation.Operation, error) { + op, err := s.Factory.NewUpgrade(charmURL) + if err != nil { + return nil, errors.Trace(err) + } + return s.wrapUpgradeOp(op, charmURL), nil +} + +func (s *resolverOpFactory) NewRevertUpgrade(charmURL *charm.URL) (operation.Operation, error) { + op, err := s.Factory.NewRevertUpgrade(charmURL) + if err != nil { + return nil, errors.Trace(err) + } + return s.wrapUpgradeOp(op, charmURL), nil +} + +func (s *resolverOpFactory) NewResolvedUpgrade(charmURL *charm.URL) (operation.Operation, error) { + op, err := s.Factory.NewResolvedUpgrade(charmURL) + if err != nil { + return nil, errors.Trace(err) + } + return s.wrapUpgradeOp(op, charmURL), nil +} + +func (s *resolverOpFactory) NewAction(id string) (operation.Operation, error) { + op, err := s.Factory.NewAction(id) + if err != nil { + return nil, errors.Trace(err) + } + f := func() { + if s.LocalState.CompletedActions == nil { + s.LocalState.CompletedActions = make(map[string]struct{}) + } + s.LocalState.CompletedActions[id] = struct{}{} + s.LocalState.CompletedActions = trimCompletedActions(s.RemoteState.Actions, s.LocalState.CompletedActions) + } + op = onCommitWrapper{op, f} + return op, nil +} + +func trimCompletedActions(pendingActions []string, completedActions map[string]struct{}) map[string]struct{} { + newCompletedActions := map[string]struct{}{} + for _, pendingAction := range pendingActions { + if _, ok := completedActions[pendingAction]; ok { + newCompletedActions[pendingAction] = struct{}{} + } + } + return newCompletedActions +} + +func (s *resolverOpFactory) wrapUpgradeOp(op operation.Operation, charmURL *charm.URL) operation.Operation { + charmModifiedVersion := s.RemoteState.CharmModifiedVersion + return onCommitWrapper{op, func() { + s.LocalState.CharmURL = charmURL + s.LocalState.Restart = true + s.LocalState.Conflicted = false + s.LocalState.CharmModifiedVersion = charmModifiedVersion + }} +} + +func (s *resolverOpFactory) wrapHookOp(op operation.Operation, info hook.Info) operation.Operation { + switch info.Kind { + case hooks.ConfigChanged: + v := s.RemoteState.ConfigVersion + op = onCommitWrapper{op, func() { + s.LocalState.ConfigVersion = v + }} + case hooks.LeaderSettingsChanged: + v := s.RemoteState.LeaderSettingsVersion + op = onCommitWrapper{op, func() { + s.LocalState.LeaderSettingsVersion = v + }} + } + + charmModifiedVersion := s.RemoteState.CharmModifiedVersion + updateStatusVersion := s.RemoteState.UpdateStatusVersion + op = onCommitWrapper{op, func() { + // Update UpdateStatusVersion so that the update-status + // hook only fires after the next timer. + s.LocalState.UpdateStatusVersion = updateStatusVersion + s.LocalState.CharmModifiedVersion = charmModifiedVersion + }} + + retryHookVersion := s.RemoteState.RetryHookVersion + op = onPrepareWrapper{op, func() { + // Update RetryHookVersion so that we don't attempt to + // retry a hook more than once between timers signals. + // + // We need to do this in Prepare, rather than Commit, + // in case the retried hook fails. + s.LocalState.RetryHookVersion = retryHookVersion + }} + return op +} + +type onCommitWrapper struct { + operation.Operation + onCommit func() +} + +func (op onCommitWrapper) Commit(state operation.State) (*operation.State, error) { + st, err := op.Operation.Commit(state) + if err != nil { + return nil, err + } + op.onCommit() + return st, nil +} + +type onPrepareWrapper struct { + operation.Operation + onPrepare func() +} + +func (op onPrepareWrapper) Prepare(state operation.State) (*operation.State, error) { + st, err := op.Operation.Prepare(state) + if err != nil { + return nil, err + } + op.onPrepare() + return st, nil +} === added file 'src/github.com/juju/juju/worker/uniter/resolver/opfactory_test.go' --- src/github.com/juju/juju/worker/uniter/resolver/opfactory_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/resolver/opfactory_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,210 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resolver_test + +import ( + "errors" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/testing" + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/remotestate" + "github.com/juju/juju/worker/uniter/resolver" +) + +type ResolverOpFactorySuite struct { + testing.BaseSuite + opFactory *mockOpFactory +} + +var _ = gc.Suite(&ResolverOpFactorySuite{}) + +func (s *ResolverOpFactorySuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.opFactory = &mockOpFactory{} +} + +func (s *ResolverOpFactorySuite) TestInitialState(c *gc.C) { + f := resolver.NewResolverOpFactory(s.opFactory) + c.Assert(f.LocalState, jc.DeepEquals, &resolver.LocalState{}) + c.Assert(f.RemoteState, jc.DeepEquals, remotestate.Snapshot{}) +} + +func (s *ResolverOpFactorySuite) TestUpdateStatusChanged(c *gc.C) { + s.testUpdateStatusChanged(c, resolver.ResolverOpFactory.NewRunHook) + s.testUpdateStatusChanged(c, resolver.ResolverOpFactory.NewSkipHook) +} + +func (s *ResolverOpFactorySuite) testUpdateStatusChanged( + c *gc.C, meth func(resolver.ResolverOpFactory, hook.Info) (operation.Operation, error), +) { + f := resolver.NewResolverOpFactory(s.opFactory) + f.RemoteState.UpdateStatusVersion = 1 + + op, err := f.NewRunHook(hook.Info{Kind: hooks.UpdateStatus}) + c.Assert(err, jc.ErrorIsNil) + f.RemoteState.UpdateStatusVersion = 2 + + _, err = op.Commit(operation.State{}) + c.Assert(err, jc.ErrorIsNil) + + // Local state's UpdateStatusVersion should be set to what + // RemoteState's UpdateStatusVersion was when the operation + // was constructed. + c.Assert(f.LocalState.UpdateStatusVersion, gc.Equals, 1) +} + +func (s *ResolverOpFactorySuite) TestConfigChanged(c *gc.C) { + s.testConfigChanged(c, resolver.ResolverOpFactory.NewRunHook) + s.testConfigChanged(c, resolver.ResolverOpFactory.NewSkipHook) +} + +func (s *ResolverOpFactorySuite) TestNewHookError(c *gc.C) { + s.opFactory.SetErrors( + errors.New("NewRunHook fails"), + errors.New("NewSkipHook fails"), + ) + f := resolver.NewResolverOpFactory(s.opFactory) + _, err := f.NewRunHook(hook.Info{Kind: hooks.ConfigChanged}) + c.Assert(err, gc.ErrorMatches, "NewRunHook fails") + _, err = f.NewSkipHook(hook.Info{Kind: hooks.ConfigChanged}) + c.Assert(err, gc.ErrorMatches, "NewSkipHook fails") +} + +func (s *ResolverOpFactorySuite) testConfigChanged( + c *gc.C, meth func(resolver.ResolverOpFactory, hook.Info) (operation.Operation, error), +) { + f := resolver.NewResolverOpFactory(s.opFactory) + f.RemoteState.ConfigVersion = 1 + f.RemoteState.UpdateStatusVersion = 3 + + op, err := f.NewRunHook(hook.Info{Kind: hooks.ConfigChanged}) + c.Assert(err, jc.ErrorIsNil) + f.RemoteState.ConfigVersion = 2 + f.RemoteState.UpdateStatusVersion = 4 + + _, err = op.Commit(operation.State{}) + c.Assert(err, jc.ErrorIsNil) + + // Local state's ConfigVersion should be set to what + // RemoteState's ConfigVersion was when the operation + // was constructed. + c.Assert(f.LocalState.ConfigVersion, gc.Equals, 1) + c.Assert(f.LocalState.UpdateStatusVersion, gc.Equals, 3) +} + +func (s *ResolverOpFactorySuite) TestLeaderSettingsChanged(c *gc.C) { + s.testLeaderSettingsChanged(c, resolver.ResolverOpFactory.NewRunHook) + s.testLeaderSettingsChanged(c, resolver.ResolverOpFactory.NewSkipHook) +} + +func (s *ResolverOpFactorySuite) testLeaderSettingsChanged( + c *gc.C, meth func(resolver.ResolverOpFactory, hook.Info) (operation.Operation, error), +) { + f := resolver.NewResolverOpFactory(s.opFactory) + f.RemoteState.LeaderSettingsVersion = 1 + f.RemoteState.UpdateStatusVersion = 3 + + op, err := meth(f, hook.Info{Kind: hooks.LeaderSettingsChanged}) + c.Assert(err, jc.ErrorIsNil) + f.RemoteState.LeaderSettingsVersion = 2 + f.RemoteState.UpdateStatusVersion = 4 + + _, err = op.Commit(operation.State{}) + c.Assert(err, jc.ErrorIsNil) + + // Local state's LeaderSettingsVersion should be set to what + // RemoteState's LeaderSettingsVersion was when the operation + // was constructed. + c.Assert(f.LocalState.LeaderSettingsVersion, gc.Equals, 1) + c.Assert(f.LocalState.UpdateStatusVersion, gc.Equals, 3) +} + +func (s *ResolverOpFactorySuite) TestUpgrade(c *gc.C) { + s.testUpgrade(c, resolver.ResolverOpFactory.NewUpgrade) + s.testUpgrade(c, resolver.ResolverOpFactory.NewRevertUpgrade) + s.testUpgrade(c, resolver.ResolverOpFactory.NewResolvedUpgrade) +} + +func (s *ResolverOpFactorySuite) testUpgrade( + c *gc.C, meth func(resolver.ResolverOpFactory, *charm.URL) (operation.Operation, error), +) { + f := resolver.NewResolverOpFactory(s.opFactory) + f.LocalState.Conflicted = true + curl := charm.MustParseURL("cs:trusty/mysql") + op, err := meth(f, curl) + c.Assert(err, jc.ErrorIsNil) + _, err = op.Commit(operation.State{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(f.LocalState.CharmURL, jc.DeepEquals, curl) + c.Assert(f.LocalState.Conflicted, jc.IsFalse) +} + +func (s *ResolverOpFactorySuite) TestNewUpgradeError(c *gc.C) { + curl := charm.MustParseURL("cs:trusty/mysql") + s.opFactory.SetErrors( + errors.New("NewUpgrade fails"), + errors.New("NewRevertUpgrade fails"), + errors.New("NewResolvedUpgrade fails"), + ) + f := resolver.NewResolverOpFactory(s.opFactory) + _, err := f.NewUpgrade(curl) + c.Assert(err, gc.ErrorMatches, "NewUpgrade fails") + _, err = f.NewRevertUpgrade(curl) + c.Assert(err, gc.ErrorMatches, "NewRevertUpgrade fails") + _, err = f.NewResolvedUpgrade(curl) + c.Assert(err, gc.ErrorMatches, "NewResolvedUpgrade fails") +} + +func (s *ResolverOpFactorySuite) TestCommitError(c *gc.C) { + f := resolver.NewResolverOpFactory(s.opFactory) + curl := charm.MustParseURL("cs:trusty/mysql") + s.opFactory.op.commit = func(operation.State) (*operation.State, error) { + return nil, errors.New("Commit fails") + } + op, err := f.NewUpgrade(curl) + c.Assert(err, jc.ErrorIsNil) + _, err = op.Commit(operation.State{}) + c.Assert(err, gc.ErrorMatches, "Commit fails") + // Local state should not have been updated. We use the same code + // internally for all operations, so it suffices to test just the + // upgrade case. + c.Assert(f.LocalState.CharmURL, gc.IsNil) +} + +func (s *ResolverOpFactorySuite) TestActionsCommit(c *gc.C) { + f := resolver.NewResolverOpFactory(s.opFactory) + f.RemoteState.Actions = []string{"action 1", "action 2", "action 3"} + f.LocalState.CompletedActions = map[string]struct{}{} + op, err := f.NewAction("action 1") + c.Assert(err, jc.ErrorIsNil) + _, err = op.Commit(operation.State{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(f.LocalState.CompletedActions, gc.DeepEquals, map[string]struct{}{ + "action 1": struct{}{}, + }) +} + +func (s *ResolverOpFactorySuite) TestActionsTrimming(c *gc.C) { + f := resolver.NewResolverOpFactory(s.opFactory) + f.RemoteState.Actions = []string{"c", "d"} + f.LocalState.CompletedActions = map[string]struct{}{ + "a": struct{}{}, + "b": struct{}{}, + "c": struct{}{}, + } + op, err := f.NewAction("d") + c.Assert(err, jc.ErrorIsNil) + _, err = op.Commit(operation.State{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(f.LocalState.CompletedActions, gc.DeepEquals, map[string]struct{}{ + "c": struct{}{}, + "d": struct{}{}, + }) +} === added file 'src/github.com/juju/juju/worker/uniter/resolver/package_test.go' --- src/github.com/juju/juju/worker/uniter/resolver/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/resolver/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resolver_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/worker/uniter/resolver_test.go' --- src/github.com/juju/juju/worker/uniter/resolver_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/resolver_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,229 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package uniter_test + +import ( + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/worker/uniter" + uniteractions "github.com/juju/juju/worker/uniter/actions" + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/leadership" + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/relation" + "github.com/juju/juju/worker/uniter/remotestate" + "github.com/juju/juju/worker/uniter/resolver" + "github.com/juju/juju/worker/uniter/storage" +) + +type resolverSuite struct { + stub testing.Stub + charmModifiedVersion int + charmURL *charm.URL + remoteState remotestate.Snapshot + opFactory operation.Factory + resolver resolver.Resolver + + clearResolved func() error + reportHookError func(hook.Info) error +} + +var _ = gc.Suite(&resolverSuite{}) + +func (s *resolverSuite) SetUpTest(c *gc.C) { + s.stub = testing.Stub{} + s.charmURL = charm.MustParseURL("cs:precise/mysql-2") + s.remoteState = remotestate.Snapshot{ + CharmModifiedVersion: s.charmModifiedVersion, + CharmURL: s.charmURL, + } + s.opFactory = operation.NewFactory(operation.FactoryParams{}) + + attachments, err := storage.NewAttachments(&dummyStorageAccessor{}, names.NewUnitTag("u/0"), c.MkDir(), nil) + c.Assert(err, jc.ErrorIsNil) + + s.clearResolved = func() error { + return errors.New("unexpected resolved") + } + + s.reportHookError = func(hook.Info) error { + return errors.New("unexpected report hook error") + } + + s.resolver = uniter.NewUniterResolver(uniter.ResolverConfig{ + ClearResolved: func() error { return s.clearResolved() }, + ReportHookError: func(info hook.Info) error { return s.reportHookError(info) }, + FixDeployer: func() error { return nil }, + StartRetryHookTimer: func() { s.stub.AddCall("StartRetryHookTimer") }, + StopRetryHookTimer: func() { s.stub.AddCall("StopRetryHookTimer") }, + Leadership: leadership.NewResolver(), + Actions: uniteractions.NewResolver(), + Relations: relation.NewRelationsResolver(&dummyRelations{}), + Storage: storage.NewResolver(attachments), + Commands: nopResolver{}, + }) +} + +// TestStartedNotInstalled tests whether the Started flag overrides the +// Installed flag being unset, in the event of an unexpected inconsistency in +// local state. +func (s *resolverSuite) TestStartedNotInstalled(c *gc.C) { + localState := resolver.LocalState{ + CharmModifiedVersion: s.charmModifiedVersion, + CharmURL: s.charmURL, + State: operation.State{ + Kind: operation.Continue, + Installed: false, + Started: true, + }, + } + _, err := s.resolver.NextOp(localState, s.remoteState, s.opFactory) + c.Assert(err, gc.Equals, resolver.ErrNoOperation) +} + +// TestNotStartedNotInstalled tests whether the next operation for an +// uninstalled local state is an install hook operation. +func (s *resolverSuite) TestNotStartedNotInstalled(c *gc.C) { + localState := resolver.LocalState{ + CharmModifiedVersion: s.charmModifiedVersion, + CharmURL: s.charmURL, + State: operation.State{ + Kind: operation.Continue, + Installed: false, + Started: false, + }, + } + op, err := s.resolver.NextOp(localState, s.remoteState, s.opFactory) + c.Assert(err, jc.ErrorIsNil) + c.Assert(op.String(), gc.Equals, "run install hook") +} + +func (s *resolverSuite) TestHookErrorStartRetryTimer(c *gc.C) { + s.reportHookError = func(hook.Info) error { return nil } + localState := resolver.LocalState{ + CharmModifiedVersion: s.charmModifiedVersion, + CharmURL: s.charmURL, + State: operation.State{ + Kind: operation.RunHook, + Step: operation.Pending, + Installed: true, + Started: true, + Hook: &hook.Info{ + Kind: hooks.ConfigChanged, + }, + }, + } + // Run the resolver twice; we should start the hook retry + // timer on the first time through, no change on the second. + _, err := s.resolver.NextOp(localState, s.remoteState, s.opFactory) + c.Assert(err, gc.Equals, resolver.ErrNoOperation) + s.stub.CheckCallNames(c, "StartRetryHookTimer") + + _, err = s.resolver.NextOp(localState, s.remoteState, s.opFactory) + c.Assert(err, gc.Equals, resolver.ErrNoOperation) + s.stub.CheckCallNames(c, "StartRetryHookTimer") // no change +} + +func (s *resolverSuite) TestHookErrorStartRetryTimerAgain(c *gc.C) { + s.reportHookError = func(hook.Info) error { return nil } + localState := resolver.LocalState{ + CharmModifiedVersion: s.charmModifiedVersion, + CharmURL: s.charmURL, + State: operation.State{ + Kind: operation.RunHook, + Step: operation.Pending, + Installed: true, + Started: true, + Hook: &hook.Info{ + Kind: hooks.ConfigChanged, + }, + }, + } + + _, err := s.resolver.NextOp(localState, s.remoteState, s.opFactory) + c.Assert(err, gc.Equals, resolver.ErrNoOperation) + s.stub.CheckCallNames(c, "StartRetryHookTimer") + + s.remoteState.RetryHookVersion = 1 + op, err := s.resolver.NextOp(localState, s.remoteState, s.opFactory) + c.Assert(err, jc.ErrorIsNil) + c.Assert(op.String(), gc.Equals, "run config-changed hook") + s.stub.CheckCallNames(c, "StartRetryHookTimer") // no change + localState.RetryHookVersion = 1 + + _, err = s.resolver.NextOp(localState, s.remoteState, s.opFactory) + c.Assert(err, gc.Equals, resolver.ErrNoOperation) + s.stub.CheckCallNames(c, "StartRetryHookTimer", "StartRetryHookTimer") +} + +func (s *resolverSuite) TestResolvedRetryHooksStopRetryTimer(c *gc.C) { + // Resolving a failed hook should stop the retry timer. + s.testResolveHookErrorStopRetryTimer(c, params.ResolvedRetryHooks) +} + +func (s *resolverSuite) TestResolvedNoHooksStopRetryTimer(c *gc.C) { + // Resolving a failed hook should stop the retry timer. + s.testResolveHookErrorStopRetryTimer(c, params.ResolvedNoHooks) +} + +func (s *resolverSuite) testResolveHookErrorStopRetryTimer(c *gc.C, mode params.ResolvedMode) { + s.stub.ResetCalls() + s.clearResolved = func() error { return nil } + s.reportHookError = func(hook.Info) error { return nil } + localState := resolver.LocalState{ + CharmModifiedVersion: s.charmModifiedVersion, + CharmURL: s.charmURL, + State: operation.State{ + Kind: operation.RunHook, + Step: operation.Pending, + Installed: true, + Started: true, + Hook: &hook.Info{ + Kind: hooks.ConfigChanged, + }, + }, + } + + _, err := s.resolver.NextOp(localState, s.remoteState, s.opFactory) + c.Assert(err, gc.Equals, resolver.ErrNoOperation) + s.stub.CheckCallNames(c, "StartRetryHookTimer") + + s.remoteState.ResolvedMode = mode + _, err = s.resolver.NextOp(localState, s.remoteState, s.opFactory) + c.Assert(err, jc.ErrorIsNil) + s.stub.CheckCallNames(c, "StartRetryHookTimer", "StopRetryHookTimer") +} + +func (s *resolverSuite) TestRunHookStopRetryTimer(c *gc.C) { + s.reportHookError = func(hook.Info) error { return nil } + localState := resolver.LocalState{ + CharmModifiedVersion: s.charmModifiedVersion, + CharmURL: s.charmURL, + State: operation.State{ + Kind: operation.RunHook, + Step: operation.Pending, + Installed: true, + Started: true, + Hook: &hook.Info{ + Kind: hooks.ConfigChanged, + }, + }, + } + + _, err := s.resolver.NextOp(localState, s.remoteState, s.opFactory) + c.Assert(err, gc.Equals, resolver.ErrNoOperation) + s.stub.CheckCallNames(c, "StartRetryHookTimer") + + localState.Kind = operation.Continue + _, err = s.resolver.NextOp(localState, s.remoteState, s.opFactory) + c.Assert(err, gc.Equals, resolver.ErrNoOperation) + s.stub.CheckCallNames(c, "StartRetryHookTimer", "StopRetryHookTimer") +} === added directory 'src/github.com/juju/juju/worker/uniter/runcommands' === added file 'src/github.com/juju/juju/worker/uniter/runcommands/mock_test.go' --- src/github.com/juju/juju/worker/uniter/runcommands/mock_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runcommands/mock_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,53 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package runcommands_test + +import ( + "github.com/juju/testing" + "github.com/juju/utils/exec" + + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/runner" + "github.com/juju/juju/worker/uniter/runner/context" +) + +type mockRunnerFactory struct { + runner.Factory + newCommandRunner func(context.CommandInfo) (runner.Runner, error) +} + +func (f *mockRunnerFactory) NewCommandRunner(info context.CommandInfo) (runner.Runner, error) { + return f.newCommandRunner(info) +} + +type mockRunner struct { + runner.Runner + runCommands func(string) (*exec.ExecResponse, error) +} + +func (r *mockRunner) Context() runner.Context { + return &mockRunnerContext{} +} + +func (r *mockRunner) RunCommands(commands string) (*exec.ExecResponse, error) { + return r.runCommands(commands) +} + +type mockRunnerContext struct { + runner.Context +} + +func (*mockRunnerContext) Prepare() error { + return nil +} + +type mockCallbacks struct { + testing.Stub + operation.Callbacks +} + +func (c *mockCallbacks) SetExecutingStatus(status string) error { + c.MethodCall(c, "SetExecutingStatus", status) + return c.NextErr() +} === added file 'src/github.com/juju/juju/worker/uniter/runcommands/package_test.go' --- src/github.com/juju/juju/worker/uniter/runcommands/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runcommands/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package runcommands_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/worker/uniter/runcommands/runcommands.go' --- src/github.com/juju/juju/worker/uniter/runcommands/runcommands.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runcommands/runcommands.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,121 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package runcommands + +import ( + "fmt" + "sync" + + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/remotestate" + "github.com/juju/juju/worker/uniter/resolver" +) + +// Commands is an interface providing a means of storing and retrieving +// arguments for running commands. +type Commands interface { + // AddCommand adds the given command arguments and response function + // and returns a unique identifier. + AddCommand(operation.CommandArgs, operation.CommandResponseFunc) string + + // GetCommand returns the command arguments and response function + // with the specified ID, as registered in AddCommand. + GetCommand(id string) (operation.CommandArgs, operation.CommandResponseFunc) + + // RemoveCommand removes the command arguments and response function + // associated with the specified ID. + RemoveCommand(id string) +} + +type commands struct { + mu sync.Mutex + nextId int + pending map[string]command +} + +type command struct { + args operation.CommandArgs + response operation.CommandResponseFunc +} + +func NewCommands() Commands { + return &commands{pending: make(map[string]command)} +} + +func (c *commands) AddCommand(args operation.CommandArgs, response operation.CommandResponseFunc) string { + c.mu.Lock() + defer c.mu.Unlock() + id := fmt.Sprint(c.nextId) + c.nextId++ + c.pending[id] = command{args, response} + return id +} + +func (c *commands) RemoveCommand(id string) { + c.mu.Lock() + delete(c.pending, id) + c.mu.Unlock() +} + +func (c *commands) GetCommand(id string) (operation.CommandArgs, operation.CommandResponseFunc) { + c.mu.Lock() + defer c.mu.Unlock() + command := c.pending[id] + return command.args, command.response +} + +// commandsResolver is a Resolver that returns operations to run pending +// commands. When a command is completed, the "commandCompleted" callback +// is invoked to remove the pending command from the remote state. +type commandsResolver struct { + commands Commands + commandCompleted func(id string) +} + +// NewCommandsResolver returns a new Resolver that returns operations to +// execute "juju run" commands. +// +// The returned resolver's NextOp method will return operations to execute +// run commands whenever the remote state's "Commands" is non-empty, by +// taking the first ID in the sequence and fetching the command arguments +// from the Commands interface passed into this function. When the command +// execution operation is committed, the ID of the command is passed to the +// "commandCompleted" callback. +func NewCommandsResolver(commands Commands, commandCompleted func(string)) resolver.Resolver { + return &commandsResolver{commands, commandCompleted} +} + +// NextOp is part of the resolver.Resolver interface. +func (s *commandsResolver) NextOp( + localState resolver.LocalState, + remoteState remotestate.Snapshot, + opFactory operation.Factory, +) (operation.Operation, error) { + if len(remoteState.Commands) == 0 { + return nil, resolver.ErrNoOperation + } + id := remoteState.Commands[0] + op, err := opFactory.NewCommands(s.commands.GetCommand(id)) + if err != nil { + return nil, err + } + commandCompleted := func() { + s.commands.RemoveCommand(id) + s.commandCompleted(id) + } + return &commandCompleter{op, commandCompleted}, nil +} + +type commandCompleter struct { + operation.Operation + commandCompleted func() +} + +func (c *commandCompleter) Commit(st operation.State) (*operation.State, error) { + result, err := c.Operation.Commit(st) + if err == nil { + c.commandCompleted() + } + return result, err +} === added file 'src/github.com/juju/juju/worker/uniter/runcommands/runcommands_test.go' --- src/github.com/juju/juju/worker/uniter/runcommands/runcommands_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runcommands/runcommands_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,245 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package runcommands_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/exec" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/remotestate" + "github.com/juju/juju/worker/uniter/resolver" + "github.com/juju/juju/worker/uniter/runcommands" + "github.com/juju/juju/worker/uniter/runner" + runnercontext "github.com/juju/juju/worker/uniter/runner/context" +) + +type runcommandsSuite struct { + charmURL *charm.URL + remoteState remotestate.Snapshot + mockRunner mockRunner + callbacks *mockCallbacks + opFactory operation.Factory + resolver resolver.Resolver + commands runcommands.Commands + runCommands func(string) (*exec.ExecResponse, error) + commandCompleted func(string) +} + +var _ = gc.Suite(&runcommandsSuite{}) + +func (s *runcommandsSuite) SetUpTest(c *gc.C) { + s.charmURL = charm.MustParseURL("cs:precise/mysql-2") + s.remoteState = remotestate.Snapshot{ + CharmURL: s.charmURL, + } + s.mockRunner = mockRunner{runCommands: func(commands string) (*exec.ExecResponse, error) { + return s.runCommands(commands) + }} + s.callbacks = &mockCallbacks{} + s.opFactory = operation.NewFactory(operation.FactoryParams{ + Callbacks: s.callbacks, + RunnerFactory: &mockRunnerFactory{ + newCommandRunner: func(info runnercontext.CommandInfo) (runner.Runner, error) { + return &s.mockRunner, nil + }, + }, + }) + + s.commands = runcommands.NewCommands() + s.commandCompleted = nil + s.resolver = runcommands.NewCommandsResolver( + s.commands, func(id string) { + if s.commandCompleted != nil { + s.commandCompleted(id) + } + }, + ) +} + +func (s *runcommandsSuite) TestRunCommands(c *gc.C) { + localState := resolver.LocalState{ + CharmURL: s.charmURL, + State: operation.State{ + Kind: operation.Continue, + }, + } + id := s.commands.AddCommand(operation.CommandArgs{ + Commands: "echo foxtrot", + }, func(*exec.ExecResponse, error) {}) + s.remoteState.Commands = []string{id} + op, err := s.resolver.NextOp(localState, s.remoteState, s.opFactory) + c.Assert(err, jc.ErrorIsNil) + c.Assert(op.String(), gc.Equals, "run commands (0)") +} + +func (s *runcommandsSuite) TestRunCommandsCallbacks(c *gc.C) { + var completed []string + s.commandCompleted = func(id string) { + completed = append(completed, id) + } + + var run []string + s.runCommands = func(commands string) (*exec.ExecResponse, error) { + run = append(run, commands) + return &exec.ExecResponse{}, nil + } + localState := resolver.LocalState{ + CharmURL: s.charmURL, + State: operation.State{ + Kind: operation.Continue, + }, + } + + id := s.commands.AddCommand(operation.CommandArgs{ + Commands: "echo foxtrot", + }, func(*exec.ExecResponse, error) {}) + s.remoteState.Commands = []string{id} + + op, err := s.resolver.NextOp(localState, s.remoteState, s.opFactory) + c.Assert(err, jc.ErrorIsNil) + c.Assert(op.String(), gc.Equals, "run commands (0)") + + _, err = op.Prepare(operation.State{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(run, gc.HasLen, 0) + c.Assert(completed, gc.HasLen, 0) + + _, err = op.Execute(operation.State{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(run, jc.DeepEquals, []string{"echo foxtrot"}) + c.Assert(completed, gc.HasLen, 0) + + _, err = op.Commit(operation.State{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(completed, jc.DeepEquals, []string{id}) +} + +func (s *runcommandsSuite) TestRunCommandsCommitErrorNoCompletedCallback(c *gc.C) { + // Override opFactory with one that creates run command + // operations with failing Commit methods. + s.opFactory = commitErrorOpFactory{s.opFactory} + + var completed []string + s.commandCompleted = func(id string) { + completed = append(completed, id) + } + + var run []string + s.runCommands = func(commands string) (*exec.ExecResponse, error) { + run = append(run, commands) + return &exec.ExecResponse{}, nil + } + localState := resolver.LocalState{ + CharmURL: s.charmURL, + State: operation.State{ + Kind: operation.Continue, + }, + } + + id := s.commands.AddCommand(operation.CommandArgs{ + Commands: "echo foxtrot", + }, func(*exec.ExecResponse, error) {}) + s.remoteState.Commands = []string{id} + + op, err := s.resolver.NextOp(localState, s.remoteState, s.opFactory) + c.Assert(err, jc.ErrorIsNil) + c.Assert(op.String(), gc.Equals, "run commands (0)") + + _, err = op.Prepare(operation.State{}) + c.Assert(err, jc.ErrorIsNil) + + _, err = op.Execute(operation.State{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(run, jc.DeepEquals, []string{"echo foxtrot"}) + c.Assert(completed, gc.HasLen, 0) + + _, err = op.Commit(operation.State{}) + c.Assert(err, gc.ErrorMatches, "Commit failed") + // commandCompleted is not called if Commit fails + c.Assert(completed, gc.HasLen, 0) +} + +func (s *runcommandsSuite) TestRunCommandsError(c *gc.C) { + localState := resolver.LocalState{ + CharmURL: s.charmURL, + State: operation.State{ + Kind: operation.Continue, + }, + } + s.runCommands = func(commands string) (*exec.ExecResponse, error) { + return nil, errors.Errorf("executing commands: %s", commands) + } + + var execErr error + id := s.commands.AddCommand(operation.CommandArgs{ + Commands: "echo foxtrot", + }, func(_ *exec.ExecResponse, err error) { + execErr = err + }) + s.remoteState.Commands = []string{id} + + op, err := s.resolver.NextOp(localState, s.remoteState, s.opFactory) + c.Assert(err, jc.ErrorIsNil) + c.Assert(op.String(), gc.Equals, "run commands (0)") + + _, err = op.Prepare(operation.State{}) + c.Assert(err, jc.ErrorIsNil) + + _, err = op.Execute(operation.State{}) + c.Assert(execErr, gc.ErrorMatches, "executing commands: echo foxtrot") + c.Assert(execErr, gc.ErrorMatches, "executing commands: echo foxtrot") +} + +func (s *runcommandsSuite) TestRunCommandsStatus(c *gc.C) { + localState := resolver.LocalState{ + CharmURL: s.charmURL, + State: operation.State{ + Kind: operation.Continue, + }, + } + + id := s.commands.AddCommand(operation.CommandArgs{ + Commands: "echo foxtrot", + }, func(*exec.ExecResponse, error) {}) + s.remoteState.Commands = []string{id} + + op, err := s.resolver.NextOp(localState, s.remoteState, s.opFactory) + c.Assert(err, jc.ErrorIsNil) + c.Assert(op.String(), gc.Equals, "run commands (0)") + s.callbacks.CheckCalls(c, nil /* no calls */) + + _, err = op.Prepare(operation.State{}) + c.Assert(err, jc.ErrorIsNil) + s.callbacks.CheckCalls(c, nil /* no calls */) + + s.callbacks.SetErrors(errors.New("cannot set status")) + _, err = op.Execute(operation.State{}) + c.Assert(err, gc.ErrorMatches, "cannot set status") + s.callbacks.CheckCallNames(c, "SetExecutingStatus") + s.callbacks.CheckCall(c, 0, "SetExecutingStatus", "running commands") +} + +type commitErrorOpFactory struct { + operation.Factory +} + +func (f commitErrorOpFactory) NewCommands(args operation.CommandArgs, sendResponse operation.CommandResponseFunc) (operation.Operation, error) { + op, err := f.Factory.NewCommands(args, sendResponse) + if err == nil { + op = commitErrorOperation{op} + } + return op, err +} + +type commitErrorOperation struct { + operation.Operation +} + +func (commitErrorOperation) Commit(operation.State) (*operation.State, error) { + return nil, errors.New("Commit failed") +} === modified file 'src/github.com/juju/juju/worker/uniter/runlistener.go' --- src/github.com/juju/juju/worker/uniter/runlistener.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/uniter/runlistener.go 2016-03-22 15:18:22 +0000 @@ -11,14 +11,21 @@ "net/rpc" "sync" + "launchpad.net/tomb" + "github.com/juju/errors" "github.com/juju/utils/exec" "github.com/juju/juju/juju/sockets" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/runcommands" ) const JujuRunEndpoint = "JujuRunServer.RunCommands" +var errCommandAborted = errors.New("command execution aborted") + // RunCommandsArgs stores the arguments for a RunCommands call. type RunCommandsArgs struct { // Commands is the arbitrary commands to execute on the unit @@ -38,10 +45,30 @@ RunCommands(RunCommandsArgs RunCommandsArgs) (results *exec.ExecResponse, err error) } +// RunListenerConfig contains the configuration for a RunListener. +type RunListenerConfig struct { + // SocketPath is the path of the socket to listen on for run commands. + SocketPath string + + // CommandRunner is the CommandRunner that will run commands. + CommandRunner CommandRunner +} + +func (cfg *RunListenerConfig) Validate() error { + if cfg.SocketPath == "" { + return errors.NotValidf("SocketPath unspecified") + } + if cfg.CommandRunner == nil { + return errors.NotValidf("CommandRunner unspecified") + } + return nil +} + // RunListener is responsible for listening on the network connection and -// seting up the rpc server on that net connection. Also starts the go routine +// setting up the rpc server on that net connection. Also starts the go routine // that listens and hands off the work. type RunListener struct { + RunListenerConfig listener net.Listener server *rpc.Server closed chan struct{} @@ -49,42 +76,27 @@ wg sync.WaitGroup } -// The JujuRunServer is the entity that has the methods that are called over -// the rpc connection. -type JujuRunServer struct { - runner CommandRunner -} - -// RunCommands delegates the actual running to the runner and populates the -// response structure. -func (r *JujuRunServer) RunCommands(args RunCommandsArgs, result *exec.ExecResponse) error { - logger.Debugf("RunCommands: %+v", args) - runResult, err := r.runner.RunCommands(args) - if err != nil { - return errors.Annotate(err, "r.runner.RunCommands") - } - *result = *runResult - return err -} - // NewRunListener returns a new RunListener that is listening on given // socket or named pipe passed in. If a valid RunListener is returned, is // has the go routine running, and should be closed by the creator // when they are done with it. -func NewRunListener(runner CommandRunner, socketPath string) (*RunListener, error) { - server := rpc.NewServer() - if err := server.Register(&JujuRunServer{runner}); err != nil { +func NewRunListener(cfg RunListenerConfig) (*RunListener, error) { + if err := cfg.Validate(); err != nil { return nil, errors.Trace(err) } - listener, err := sockets.Listen(socketPath) + listener, err := sockets.Listen(cfg.SocketPath) if err != nil { return nil, errors.Trace(err) } runListener := &RunListener{ - listener: listener, - server: server, - closed: make(chan struct{}), - closing: make(chan struct{}), + RunListenerConfig: cfg, + listener: listener, + server: rpc.NewServer(), + closed: make(chan struct{}), + closing: make(chan struct{}), + } + if err := runListener.server.Register(&JujuRunServer{runListener}); err != nil { + return nil, errors.Trace(err) } go runListener.Run() return runListener, nil @@ -122,9 +134,155 @@ // Close immediately stops accepting connections, and blocks until all existing // connections have been closed. -func (s *RunListener) Close() { +func (s *RunListener) Close() error { + defer func() { + <-s.closed + logger.Debugf("juju-run listener stopped") + }() close(s.closing) - s.listener.Close() - <-s.closed - logger.Debugf("juju-run listener stopped") + return s.listener.Close() +} + +// RunCommands executes the supplied commands in a hook context. +func (r *RunListener) RunCommands(args RunCommandsArgs) (results *exec.ExecResponse, err error) { + logger.Tracef("run commands: %s", args.Commands) + return r.CommandRunner.RunCommands(args) +} + +// newRunListenerWrapper returns a worker that will Close the supplied run +// listener when the worker is killed. The Wait() method will never return +// an error -- NewRunListener just drops the Run error on the floor and that's +// not what I'm fixing here. +func newRunListenerWrapper(rl *RunListener) worker.Worker { + rlw := &runListenerWrapper{rl: rl} + go func() { + defer rlw.tomb.Done() + defer rlw.tearDown() + <-rlw.tomb.Dying() + }() + return rlw +} + +type runListenerWrapper struct { + tomb tomb.Tomb + rl *RunListener +} + +func (rlw *runListenerWrapper) tearDown() { + if err := rlw.rl.Close(); err != nil { + logger.Warningf("error closing runlistener: %v", err) + } +} + +// Kill is part of the worker.Worker interface. +func (rlw *runListenerWrapper) Kill() { + rlw.tomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (rlw *runListenerWrapper) Wait() error { + return rlw.tomb.Wait() +} + +// The JujuRunServer is the entity that has the methods that are called over +// the rpc connection. +type JujuRunServer struct { + runner CommandRunner +} + +// RunCommands delegates the actual running to the runner and populates the +// response structure. +func (r *JujuRunServer) RunCommands(args RunCommandsArgs, result *exec.ExecResponse) error { + logger.Debugf("RunCommands: %+v", args) + runResult, err := r.runner.RunCommands(args) + if err != nil { + return errors.Annotate(err, "r.runner.RunCommands") + } + *result = *runResult + return err +} + +// ChannelCommandRunnerConfig contains the configuration for a ChannelCommandRunner. +type ChannelCommandRunnerConfig struct { + // Abort is a channel that will be closed when the runner should abort + // the execution of run commands. + Abort <-chan struct{} + + // Commands is used to add commands received from the listener. + Commands runcommands.Commands + + // CommandChannel will be sent the IDs of commands added to Commands. + CommandChannel chan<- string +} + +func (cfg ChannelCommandRunnerConfig) Validate() error { + if cfg.Abort == nil { + return errors.NotValidf("Abort unspecified") + } + if cfg.Commands == nil { + return errors.NotValidf("Commands unspecified") + } + if cfg.CommandChannel == nil { + return errors.NotValidf("CommandChannel unspecified") + } + return nil +} + +// ChannelCommandRunner is a CommandRunner that registers command +// arguments in a runcommands.Commands, sends the returned IDs to +// a channel and waits for response callbacks. +type ChannelCommandRunner struct { + config ChannelCommandRunnerConfig +} + +// NewChannelCommandRunner returns a new ChannelCommandRunner with the +// given configuration. +func NewChannelCommandRunner(cfg ChannelCommandRunnerConfig) (*ChannelCommandRunner, error) { + if err := cfg.Validate(); err != nil { + return nil, errors.Trace(err) + } + return &ChannelCommandRunner{cfg}, nil +} + +// RunCommands executes the supplied run commands by registering the +// arguments in a runcommands.Commands, and then sending the returned +// ID to a channel and waiting for a response callback. +func (c *ChannelCommandRunner) RunCommands(args RunCommandsArgs) (results *exec.ExecResponse, err error) { + type responseInfo struct { + response *exec.ExecResponse + err error + } + + // NOTE(axw) the response channel must be synchronous so that the + // response is received before the uniter resumes operation, and + // potentially aborts. This prevents a race when rebooting. + responseChan := make(chan responseInfo) + responseFunc := func(response *exec.ExecResponse, err error) { + select { + case <-c.config.Abort: + case responseChan <- responseInfo{response, err}: + } + } + + id := c.config.Commands.AddCommand( + operation.CommandArgs{ + Commands: args.Commands, + RelationId: args.RelationId, + RemoteUnitName: args.RemoteUnitName, + ForceRemoteUnit: args.ForceRemoteUnit, + }, + responseFunc, + ) + select { + case <-c.config.Abort: + return nil, errCommandAborted + case c.config.CommandChannel <- id: + } + + select { + case <-c.config.Abort: + return nil, errCommandAborted + case response := <-responseChan: + return response.response, response.err + } } === modified file 'src/github.com/juju/juju/worker/uniter/runlistener_test.go' --- src/github.com/juju/juju/worker/uniter/runlistener_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/runlistener_test.go 2016-03-22 15:18:22 +0000 @@ -14,6 +14,7 @@ "github.com/juju/juju/juju/sockets" "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter" + "github.com/juju/juju/worker/uniter/runcommands" ) type ListenerSuite struct { @@ -38,11 +39,14 @@ // Mirror the params to uniter.NewRunListener, but add cleanup to close it. func (s *ListenerSuite) NewRunListener(c *gc.C) *uniter.RunListener { - listener, err := uniter.NewRunListener(&mockRunner{c}, s.socketPath) + listener, err := uniter.NewRunListener(uniter.RunListenerConfig{ + SocketPath: s.socketPath, + CommandRunner: &mockRunner{c}, + }) c.Assert(err, jc.ErrorIsNil) c.Assert(listener, gc.NotNil) s.AddCleanup(func(*gc.C) { - listener.Close() + c.Assert(listener.Close(), jc.ErrorIsNil) }) return listener } @@ -52,11 +56,7 @@ c.Skip("bug 1403084: Current named pipes implementation does not support this") } s.NewRunListener(c) - - listener, err := uniter.NewRunListener(&mockRunner{}, s.socketPath) - c.Assert(err, jc.ErrorIsNil) - c.Assert(listener, gc.NotNil) - listener.Close() + s.NewRunListener(c) } func (s *ListenerSuite) TestClientCall(c *gc.C) { @@ -81,6 +81,38 @@ c.Assert(result.Code, gc.Equals, 42) } +type ChannelCommandRunnerSuite struct { + testing.BaseSuite + abort chan struct{} + commands runcommands.Commands + commandChannel chan string + runner *uniter.ChannelCommandRunner +} + +var _ = gc.Suite(&ChannelCommandRunnerSuite{}) + +func (s *ChannelCommandRunnerSuite) SetUpTest(c *gc.C) { + s.BaseSuite.SetUpTest(c) + s.abort = make(chan struct{}, 1) + s.commands = runcommands.NewCommands() + s.commandChannel = make(chan string, 1) + runner, err := uniter.NewChannelCommandRunner(uniter.ChannelCommandRunnerConfig{ + Abort: s.abort, + Commands: s.commands, + CommandChannel: s.commandChannel, + }) + c.Assert(err, jc.ErrorIsNil) + s.runner = runner +} + +func (s *ChannelCommandRunnerSuite) TestCommandsAborted(c *gc.C) { + close(s.abort) + _, err := s.runner.RunCommands(uniter.RunCommandsArgs{ + Commands: "some-command", + }) + c.Assert(err, gc.ErrorMatches, "command execution aborted") +} + type mockRunner struct { c *gc.C } === removed file 'src/github.com/juju/juju/worker/uniter/runner/action.go' --- src/github.com/juju/juju/worker/uniter/runner/action.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/action.go 1970-01-01 00:00:00 +0000 @@ -1,73 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package runner - -import ( - "github.com/juju/names" -) - -// ActionData contains the tag, parameters, and results of an Action. -type ActionData struct { - Name string - Tag names.ActionTag - Params map[string]interface{} - Failed bool - ResultsMessage string - ResultsMap map[string]interface{} -} - -// NewActionData builds a suitable ActionData struct with no nil members. -// this should only be called in the event that an Action hook is being requested. -func newActionData(name string, tag *names.ActionTag, params map[string]interface{}) *ActionData { - return &ActionData{ - Name: name, - Tag: *tag, - Params: params, - ResultsMap: map[string]interface{}{}, - } -} - -// actionStatus messages define the possible states of a completed Action. -const ( - actionStatusInit = "init" - actionStatusFailed = "fail" -) - -// addValueToMap adds the given value to the map on which the method is run. -// This allows us to merge maps such as {foo: {bar: baz}} and {foo: {baz: faz}} -// into {foo: {bar: baz, baz: faz}}. -func addValueToMap(keys []string, value string, target map[string]interface{}) { - next := target - - for i := range keys { - // if we are on last key set the value. - // shouldn't be a problem. overwrites existing vals. - if i == len(keys)-1 { - next[keys[i]] = value - break - } - - if iface, ok := next[keys[i]]; ok { - switch typed := iface.(type) { - case map[string]interface{}: - // If we already had a map inside, keep - // stepping through. - next = typed - default: - // If we didn't, then overwrite value - // with a map and iterate with that. - m := map[string]interface{}{} - next[keys[i]] = m - next = m - } - continue - } - - // Otherwise, it wasn't present, so make it and step - // into. - m := map[string]interface{}{} - next[keys[i]] = m - next = m - } -} === modified file 'src/github.com/juju/juju/worker/uniter/runner/args.go' --- src/github.com/juju/juju/worker/uniter/runner/args.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/args.go 2016-03-22 15:18:22 +0000 @@ -10,7 +10,8 @@ "path/filepath" "strings" - "github.com/juju/juju/version" + "github.com/juju/juju/worker/uniter/runner/context" + jujuos "github.com/juju/utils/os" ) var windowsSuffixOrder = []string{ @@ -24,7 +25,7 @@ hookFile, err := exec.LookPath(hook) if err != nil { if ee, ok := err.(*exec.Error); ok && os.IsNotExist(ee.Err) { - return "", &missingHookError{hook} + return "", context.NewMissingHookError(hook) } return "", err } @@ -37,7 +38,7 @@ // being default. func searchHook(charmDir, hook string) (string, error) { hookFile := filepath.Join(charmDir, hook) - if version.Current.OS != version.Windows { + if jujuos.HostOS() != jujuos.Windows { // we are not running on windows, // there is no need to look for suffixed hooks return lookPath(hookFile) @@ -46,7 +47,7 @@ file := fmt.Sprintf("%s%s", hookFile, suffix) foundHook, err := lookPath(file) if err != nil { - if IsMissingHookError(err) { + if context.IsMissingHookError(err) { // look for next suffix continue } @@ -54,7 +55,7 @@ } return foundHook, nil } - return "", &missingHookError{hook} + return "", context.NewMissingHookError(hook) } // hookCommand constructs an appropriate command to be passed to @@ -64,7 +65,7 @@ // and propagate error levels (-File). .cmd and .bat files can be run // directly. func hookCommand(hook string) []string { - if version.Current.OS != version.Windows { + if jujuos.HostOS() != jujuos.Windows { // we are not running on windows, // just return the hook name return []string{hook} === modified file 'src/github.com/juju/juju/worker/uniter/runner/args_test.go' --- src/github.com/juju/juju/worker/uniter/runner/args_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/args_test.go 2016-03-22 15:18:22 +0000 @@ -9,9 +9,9 @@ envtesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" + "github.com/juju/utils/os" gc "gopkg.in/check.v1" - "github.com/juju/juju/version" "github.com/juju/juju/worker/uniter/runner" ) @@ -20,7 +20,7 @@ var _ = gc.Suite(&WindowsHookSuite{}) func (s *WindowsHookSuite) TestHookCommandPowerShellScript(c *gc.C) { - restorer := envtesting.PatchValue(&version.Current.OS, version.Windows) + restorer := envtesting.PatchValue(&os.HostOS, func() os.OSType { return os.Windows }) defer restorer() hookname := "powerShellScript.ps1" @@ -37,7 +37,7 @@ } func (s *WindowsHookSuite) TestHookCommandNotPowerShellScripts(c *gc.C) { - restorer := envtesting.PatchValue(&version.Current.OS, version.Windows) + restorer := envtesting.PatchValue(&os.HostOS, func() os.OSType { return os.Windows }) defer restorer() cmdhook := "somehook.cmd" @@ -51,7 +51,7 @@ if runtime.GOOS == "windows" { c.Skip("Cannot search for executables without extension on windows") } - restorer := envtesting.PatchValue(&version.Current.OS, version.Ubuntu) + restorer := envtesting.PatchValue(&os.HostOS, func() os.OSType { return os.Ubuntu }) defer restorer() charmDir := c.MkDir() @@ -69,7 +69,7 @@ } func (s *WindowsHookSuite) TestSearchHookWindows(c *gc.C) { - restorer := envtesting.PatchValue(&version.Current.OS, version.Windows) + restorer := envtesting.PatchValue(&os.HostOS, func() os.OSType { return os.Windows }) defer restorer() charmDir := c.MkDir() @@ -85,7 +85,7 @@ } func (s *WindowsHookSuite) TestSearchHookWindowsError(c *gc.C) { - restorer := envtesting.PatchValue(&version.Current.OS, version.Windows) + restorer := envtesting.PatchValue(&os.HostOS, func() os.OSType { return os.Windows }) defer restorer() charmDir := c.MkDir() === removed file 'src/github.com/juju/juju/worker/uniter/runner/cache.go' --- src/github.com/juju/juju/worker/uniter/runner/cache.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/cache.go 1970-01-01 00:00:00 +0000 @@ -1,97 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package runner - -import ( - "sort" - - "github.com/juju/juju/apiserver/params" -) - -// SettingsFunc returns the relation settings for a unit. -type SettingsFunc func(unitName string) (params.Settings, error) - -// SettingsMap is a map from unit name to relation settings. -type SettingsMap map[string]params.Settings - -// RelationCache stores a relation's remote unit membership and settings. -// Member settings are stored until invalidated or removed by name; settings -// of non-member units are stored only until the cache is pruned. -type RelationCache struct { - // readSettings is used to get settings data if when not already present. - readSettings SettingsFunc - // members' keys define the relation's membership; non-nil values hold - // cached settings. - members SettingsMap - // others is a short-term cache for non-member settings. - others SettingsMap -} - -// NewRelationCache creates a new RelationCache that will use the supplied -// SettingsFunc to populate itself on demand. Initial membership is determined -// by memberNames. -func NewRelationCache(readSettings SettingsFunc, memberNames []string) *RelationCache { - cache := &RelationCache{ - readSettings: readSettings, - } - cache.Prune(memberNames) - return cache -} - -// Prune resets the membership to the supplied list, and discards the settings -// of all non-member units. -func (cache *RelationCache) Prune(memberNames []string) { - newMembers := SettingsMap{} - for _, memberName := range memberNames { - newMembers[memberName] = cache.members[memberName] - } - cache.members = newMembers - cache.others = SettingsMap{} -} - -// MemberNames returns the names of the remote units present in the relation. -func (cache *RelationCache) MemberNames() (memberNames []string) { - for memberName := range cache.members { - memberNames = append(memberNames, memberName) - } - sort.Strings(memberNames) - return memberNames -} - -// Settings returns the settings of the named remote unit. It's valid to get -// the settings of any unit that has ever been in the relation. -func (cache *RelationCache) Settings(unitName string) (params.Settings, error) { - settings, isMember := cache.members[unitName] - if settings == nil { - if !isMember { - settings = cache.others[unitName] - } - if settings == nil { - var err error - settings, err = cache.readSettings(unitName) - if err != nil { - return nil, err - } - } - } - if isMember { - cache.members[unitName] = settings - } else { - cache.others[unitName] = settings - } - return settings, nil -} - -// InvalidateMember ensures that the named remote unit will be considered a -// member of the relation, and that the next attempt to read its settings will -// use fresh data. -func (cache *RelationCache) InvalidateMember(memberName string) { - cache.members[memberName] = nil -} - -// RemoveMember ensures that the named remote unit will not be considered a -// member of the relation, -func (cache *RelationCache) RemoveMember(memberName string) { - delete(cache.members, memberName) -} === removed file 'src/github.com/juju/juju/worker/uniter/runner/cache_test.go' --- src/github.com/juju/juju/worker/uniter/runner/cache_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/cache_test.go 1970-01-01 00:00:00 +0000 @@ -1,215 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package runner_test - -import ( - "github.com/juju/errors" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/worker/uniter/runner" -) - -type settingsResult struct { - settings params.Settings - err error -} - -type RelationCacheSuite struct { - testing.IsolationSuite - calls []string - results []settingsResult -} - -var _ = gc.Suite(&RelationCacheSuite{}) - -func (s *RelationCacheSuite) SetUpTest(c *gc.C) { - s.calls = []string{} - s.results = []settingsResult{} -} - -func (s *RelationCacheSuite) ReadSettings(unitName string) (params.Settings, error) { - result := s.results[len(s.calls)] - s.calls = append(s.calls, unitName) - return result.settings, result.err -} - -func (s *RelationCacheSuite) TestCreateEmpty(c *gc.C) { - cache := runner.NewRelationCache(s.ReadSettings, nil) - c.Assert(cache.MemberNames(), gc.HasLen, 0) - c.Assert(s.calls, gc.HasLen, 0) -} - -func (s *RelationCacheSuite) TestCreateWithMembers(c *gc.C) { - cache := runner.NewRelationCache(s.ReadSettings, []string{"u/3", "u/2", "u/1"}) - c.Assert(cache.MemberNames(), jc.DeepEquals, []string{"u/1", "u/2", "u/3"}) - c.Assert(s.calls, gc.HasLen, 0) -} - -func (s *RelationCacheSuite) TestInvalidateMemberChangesMembership(c *gc.C) { - cache := runner.NewRelationCache(s.ReadSettings, nil) - cache.InvalidateMember("foo/1") - c.Assert(cache.MemberNames(), jc.DeepEquals, []string{"foo/1"}) - cache.InvalidateMember("foo/2") - c.Assert(cache.MemberNames(), jc.DeepEquals, []string{"foo/1", "foo/2"}) - cache.InvalidateMember("foo/2") - c.Assert(cache.MemberNames(), jc.DeepEquals, []string{"foo/1", "foo/2"}) - c.Assert(s.calls, gc.HasLen, 0) -} - -func (s *RelationCacheSuite) TestRemoveMemberChangesMembership(c *gc.C) { - cache := runner.NewRelationCache(s.ReadSettings, []string{"x/2"}) - cache.RemoveMember("x/1") - c.Assert(cache.MemberNames(), jc.DeepEquals, []string{"x/2"}) - cache.RemoveMember("x/2") - c.Assert(cache.MemberNames(), gc.HasLen, 0) - c.Assert(s.calls, gc.HasLen, 0) -} - -func (s *RelationCacheSuite) TestPruneChangesMembership(c *gc.C) { - cache := runner.NewRelationCache(s.ReadSettings, []string{"u/1", "u/2", "u/3"}) - cache.Prune([]string{"u/3", "u/4", "u/5"}) - c.Assert(cache.MemberNames(), jc.DeepEquals, []string{"u/3", "u/4", "u/5"}) - c.Assert(s.calls, gc.HasLen, 0) -} - -func (s *RelationCacheSuite) TestSettingsPropagatesError(c *gc.C) { - s.results = []settingsResult{{ - nil, errors.New("blam"), - }} - cache := runner.NewRelationCache(s.ReadSettings, nil) - - settings, err := cache.Settings("whatever") - c.Assert(settings, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "blam") - c.Assert(s.calls, jc.DeepEquals, []string{"whatever"}) -} - -func (s *RelationCacheSuite) TestSettingsCachesMemberSettings(c *gc.C) { - s.results = []settingsResult{{ - params.Settings{"foo": "bar"}, nil, - }} - cache := runner.NewRelationCache(s.ReadSettings, []string{"x/2"}) - - for i := 0; i < 2; i++ { - settings, err := cache.Settings("x/2") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, jc.DeepEquals, params.Settings{"foo": "bar"}) - c.Assert(s.calls, jc.DeepEquals, []string{"x/2"}) - } -} - -func (s *RelationCacheSuite) TestInvalidateMemberUncachesMemberSettings(c *gc.C) { - s.results = []settingsResult{{ - params.Settings{"foo": "bar"}, nil, - }, { - params.Settings{"baz": "qux"}, nil, - }} - cache := runner.NewRelationCache(s.ReadSettings, []string{"x/2"}) - - settings, err := cache.Settings("x/2") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, jc.DeepEquals, params.Settings{"foo": "bar"}) - c.Assert(s.calls, jc.DeepEquals, []string{"x/2"}) - - cache.InvalidateMember("x/2") - settings, err = cache.Settings("x/2") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, jc.DeepEquals, params.Settings{"baz": "qux"}) - c.Assert(s.calls, jc.DeepEquals, []string{"x/2", "x/2"}) -} - -func (s *RelationCacheSuite) TestInvalidateMemberUncachesOtherSettings(c *gc.C) { - s.results = []settingsResult{{ - params.Settings{"foo": "bar"}, nil, - }, { - params.Settings{"baz": "qux"}, nil, - }} - cache := runner.NewRelationCache(s.ReadSettings, nil) - - settings, err := cache.Settings("x/2") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, jc.DeepEquals, params.Settings{"foo": "bar"}) - c.Assert(s.calls, jc.DeepEquals, []string{"x/2"}) - - cache.InvalidateMember("x/2") - settings, err = cache.Settings("x/2") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, jc.DeepEquals, params.Settings{"baz": "qux"}) - c.Assert(s.calls, jc.DeepEquals, []string{"x/2", "x/2"}) -} - -func (s *RelationCacheSuite) TestRemoveMemberUncachesMemberSettings(c *gc.C) { - s.results = []settingsResult{{ - params.Settings{"foo": "bar"}, nil, - }, { - params.Settings{"baz": "qux"}, nil, - }} - cache := runner.NewRelationCache(s.ReadSettings, []string{"x/2"}) - - settings, err := cache.Settings("x/2") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, jc.DeepEquals, params.Settings{"foo": "bar"}) - c.Assert(s.calls, jc.DeepEquals, []string{"x/2"}) - - cache.RemoveMember("x/2") - settings, err = cache.Settings("x/2") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, jc.DeepEquals, params.Settings{"baz": "qux"}) - c.Assert(s.calls, jc.DeepEquals, []string{"x/2", "x/2"}) -} - -func (s *RelationCacheSuite) TestSettingsCachesOtherSettings(c *gc.C) { - s.results = []settingsResult{{ - params.Settings{"foo": "bar"}, nil, - }} - cache := runner.NewRelationCache(s.ReadSettings, nil) - - for i := 0; i < 2; i++ { - settings, err := cache.Settings("x/2") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, jc.DeepEquals, params.Settings{"foo": "bar"}) - c.Assert(s.calls, jc.DeepEquals, []string{"x/2"}) - } -} - -func (s *RelationCacheSuite) TestPrunePreservesMemberSettings(c *gc.C) { - s.results = []settingsResult{{ - params.Settings{"foo": "bar"}, nil, - }} - cache := runner.NewRelationCache(s.ReadSettings, []string{"foo/2"}) - - settings, err := cache.Settings("foo/2") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, jc.DeepEquals, params.Settings{"foo": "bar"}) - c.Assert(s.calls, jc.DeepEquals, []string{"foo/2"}) - - cache.Prune([]string{"foo/2"}) - settings, err = cache.Settings("foo/2") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, jc.DeepEquals, params.Settings{"foo": "bar"}) - c.Assert(s.calls, jc.DeepEquals, []string{"foo/2"}) -} - -func (s *RelationCacheSuite) TestPruneUncachesOtherSettings(c *gc.C) { - s.results = []settingsResult{{ - params.Settings{"foo": "bar"}, nil, - }, { - params.Settings{"baz": "qux"}, nil, - }} - cache := runner.NewRelationCache(s.ReadSettings, nil) - - settings, err := cache.Settings("x/2") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, jc.DeepEquals, params.Settings{"foo": "bar"}) - c.Assert(s.calls, jc.DeepEquals, []string{"x/2"}) - - cache.Prune(nil) - settings, err = cache.Settings("x/2") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, jc.DeepEquals, params.Settings{"baz": "qux"}) - c.Assert(s.calls, jc.DeepEquals, []string{"x/2", "x/2"}) -} === added directory 'src/github.com/juju/juju/worker/uniter/runner/context' === removed file 'src/github.com/juju/juju/worker/uniter/runner/context.go' --- src/github.com/juju/juju/worker/uniter/runner/context.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context.go 1970-01-01 00:00:00 +0000 @@ -1,787 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package runner - -import ( - "fmt" - "os" - "strings" - "sync" - "time" - - "github.com/juju/errors" - "github.com/juju/loggo" - "github.com/juju/names" - "github.com/juju/utils/proxy" - "gopkg.in/juju/charm.v5" - - "github.com/juju/juju/api/base" - "github.com/juju/juju/api/uniter" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/network" - "github.com/juju/juju/worker/uniter/metrics" - "github.com/juju/juju/worker/uniter/runner/jujuc" -) - -var logger = loggo.GetLogger("juju.worker.uniter.context") -var mutex = sync.Mutex{} -var ErrIsNotLeader = errors.Errorf("this unit is not the leader") - -// ComponentConfig holds all the information related to a hook context -// needed by components. -type ComponentConfig struct { - // UnitName is the name of the unit. - UnitName string - // DataDir is the component's data directory. - DataDir string - // APICaller is the API caller the component may use. - APICaller base.APICaller -} - -// ComponentFunc is a factory function for Context components. -type ComponentFunc func(ComponentConfig) (jujuc.ContextComponent, error) - -var registeredComponentFuncs = map[string]ComponentFunc{} - -// Add the named component factory func to the registry. -func RegisterComponentFunc(name string, f ComponentFunc) error { - if _, ok := registeredComponentFuncs[name]; ok { - return errors.AlreadyExistsf("%s", name) - } - registeredComponentFuncs[name] = f - return nil -} - -// meterStatus describes the unit's meter status. -type meterStatus struct { - code string - info string -} - -// MetricsRecorder is used to store metrics supplied by the add-metric command. -type MetricsRecorder interface { - AddMetric(key, value string, created time.Time) error - IsDeclaredMetric(key string) bool - Close() error -} - -// metricsReader is used to read metrics batches stored by the metrics recorder -// and remove metrics batches that have been marked as succesfully sent. -type metricsReader interface { - Open() ([]metrics.MetricBatch, error) - Remove(uuid string) error - Close() error -} - -// HookContext is the implementation of jujuc.Context. -type HookContext struct { - unit *uniter.Unit - - // state is the handle to the uniter State so that HookContext can make - // API calls on the stateservice. - // NOTE: We would like to be rid of the fake-remote-Unit and switch - // over fully to API calls on State. This adds that ability, but we're - // not fully there yet. - state *uniter.State - - // LeadershipContext supplies several jujuc.Context methods. - LeadershipContext - - // privateAddress is the cached value of the unit's private - // address. - privateAddress string - - // publicAddress is the cached value of the unit's public - // address. - publicAddress string - - // availabilityzone is the cached value of the unit's availability zone name. - availabilityzone string - - // configSettings holds the service configuration. - configSettings charm.Settings - - // id identifies the context. - id string - - // actionData contains the values relevant to the run of an Action: - // its tag, its parameters, and its results. - actionData *ActionData - - // uuid is the universally unique identifier of the environment. - uuid string - - // envName is the human friendly name of the environment. - envName string - - // unitName is the human friendly name of the local unit. - unitName string - - // status is the status of the local unit. - status *jujuc.StatusInfo - - // relationId identifies the relation for which a relation hook is - // executing. If it is -1, the context is not running a relation hook; - // otherwise, its value must be a valid key into the relations map. - relationId int - - // remoteUnitName identifies the changing unit of the executing relation - // hook. It will be empty if the context is not running a relation hook, - // or if it is running a relation-broken hook. - remoteUnitName string - - // relations contains the context for every relation the unit is a member - // of, keyed on relation id. - relations map[int]*ContextRelation - - // apiAddrs contains the API server addresses. - apiAddrs []string - - // proxySettings are the current proxy settings that the uniter knows about. - proxySettings proxy.Settings - - // metricsRecorder is used to write metrics batches to a storage (usually a file). - metricsRecorder MetricsRecorder - - // definedMetrics specifies the metrics the charm has defined in its metrics.yaml file. - definedMetrics *charm.Metrics - - // meterStatus is the status of the unit's metering. - meterStatus *meterStatus - - // pendingPorts contains a list of port ranges to be opened or - // closed when the current hook is committed. - pendingPorts map[PortRange]PortRangeInfo - - // machinePorts contains cached information about all opened port - // ranges on the unit's assigned machine, mapped to the unit that - // opened each range and the relevant relation. - machinePorts map[network.PortRange]params.RelationUnit - - // assignedMachineTag contains the tag of the unit's assigned - // machine. - assignedMachineTag names.MachineTag - - // process is the process of the command that is being run in the local context, - // like a juju-run command or a hook - process *os.Process - - // rebootPriority tells us when the hook wants to reboot. If rebootPriority is jujuc.RebootNow - // the hook will be killed and requeued - rebootPriority jujuc.RebootPriority - - // storage provides access to the information about storage attached to the unit. - storage StorageContextAccessor - - // storageId is the tag of the storage instance associated with the running hook. - storageTag names.StorageTag - - // hasRunSetStatus is true if a call to the status-set was made during the - // invocation of a hook. - // This attribute is persisted to local uniter state at the end of the hook - // execution so that the uniter can ultimately decide if it needs to update - // a charm's workload status, or if the charm has already taken care of it. - hasRunStatusSet bool - - // storageAddConstraints is a collection of storage constraints - // keyed on storage name as specified in the charm. - // This collection will be added to the unit on successful - // hook run, so the actual add will happen in a flush. - storageAddConstraints map[string][]params.StorageConstraints - - componentDir func(string) string - componentFuncs map[string]ComponentFunc -} - -// Component implements jujuc.Context. -func (ctx *HookContext) Component(name string) (jujuc.ContextComponent, error) { - compCtxFunc, ok := ctx.componentFuncs[name] - if !ok { - return nil, errors.NotFoundf("context component %q", name) - } - - facade := ctx.state.Facade() - config := ComponentConfig{ - UnitName: ctx.unit.Name(), - DataDir: ctx.componentDir(name), - APICaller: facade.RawAPICaller(), - } - compCtx, err := compCtxFunc(config) - if err != nil { - return nil, errors.Trace(err) - } - return compCtx, nil -} - -func (ctx *HookContext) RequestReboot(priority jujuc.RebootPriority) error { - var err error - if priority == jujuc.RebootNow { - // At this point, the hook should be running - err = ctx.killCharmHook() - } - - switch err { - case nil, ErrNoProcess: - // ErrNoProcess almost certainly means we are running in debug hooks - ctx.SetRebootPriority(priority) - } - return err -} - -func (ctx *HookContext) GetRebootPriority() jujuc.RebootPriority { - mutex.Lock() - defer mutex.Unlock() - return ctx.rebootPriority -} - -func (ctx *HookContext) SetRebootPriority(priority jujuc.RebootPriority) { - mutex.Lock() - defer mutex.Unlock() - ctx.rebootPriority = priority -} - -func (ctx *HookContext) GetProcess() *os.Process { - mutex.Lock() - defer mutex.Unlock() - return ctx.process -} - -func (ctx *HookContext) SetProcess(process *os.Process) { - mutex.Lock() - defer mutex.Unlock() - ctx.process = process -} - -func (ctx *HookContext) Id() string { - return ctx.id -} - -func (ctx *HookContext) UnitName() string { - return ctx.unitName -} - -// UnitStatus will return the status for the current Unit. -func (ctx *HookContext) UnitStatus() (*jujuc.StatusInfo, error) { - if ctx.status == nil { - var err error - status, err := ctx.unit.UnitStatus() - if err != nil { - return nil, err - } - ctx.status = &jujuc.StatusInfo{ - Status: string(status.Status), - Info: status.Info, - Data: status.Data, - } - } - return ctx.status, nil -} - -// ServiceStatus returns the status for the service and all the units on -// the service to which this context unit belongs, only if this unit is -// the leader. -func (ctx *HookContext) ServiceStatus() (jujuc.ServiceStatusInfo, error) { - var err error - isLeader, err := ctx.IsLeader() - if err != nil { - return jujuc.ServiceStatusInfo{}, errors.Annotatef(err, "cannot determine leadership") - } - if !isLeader { - return jujuc.ServiceStatusInfo{}, ErrIsNotLeader - } - service, err := ctx.unit.Service() - if err != nil { - return jujuc.ServiceStatusInfo{}, errors.Trace(err) - } - status, err := service.Status(ctx.unit.Name()) - if err != nil { - return jujuc.ServiceStatusInfo{}, errors.Trace(err) - } - us := make([]jujuc.StatusInfo, len(status.Units)) - i := 0 - for t, s := range status.Units { - us[i] = jujuc.StatusInfo{ - Tag: t, - Status: string(s.Status), - Info: s.Info, - Data: s.Data, - } - i++ - } - return jujuc.ServiceStatusInfo{ - Service: jujuc.StatusInfo{ - Tag: service.Tag().String(), - Status: string(status.Service.Status), - Info: status.Service.Info, - Data: status.Service.Data, - }, - Units: us, - }, nil -} - -// SetUnitStatus will set the given status for this unit. -func (ctx *HookContext) SetUnitStatus(status jujuc.StatusInfo) error { - ctx.hasRunStatusSet = true - logger.Debugf("[WORKLOAD-STATUS] %s: %s", status.Status, status.Info) - return ctx.unit.SetUnitStatus( - params.Status(status.Status), - status.Info, - status.Data, - ) -} - -// SetServiceStatus will set the given status to the service to which this -// unit's belong, only if this unit is the leader. -func (ctx *HookContext) SetServiceStatus(status jujuc.StatusInfo) error { - logger.Debugf("[SERVICE-STATUS] %s: %s", status.Status, status.Info) - isLeader, err := ctx.IsLeader() - if err != nil { - return errors.Annotatef(err, "cannot determine leadership") - } - if !isLeader { - return ErrIsNotLeader - } - - service, err := ctx.unit.Service() - if err != nil { - return errors.Trace(err) - } - return service.SetStatus( - ctx.unit.Name(), - params.Status(status.Status), - status.Info, - status.Data, - ) -} - -func (ctx *HookContext) HasExecutionSetUnitStatus() bool { - return ctx.hasRunStatusSet -} - -func (ctx *HookContext) ResetExecutionSetUnitStatus() { - ctx.hasRunStatusSet = false -} - -func (ctx *HookContext) PublicAddress() (string, bool) { - return ctx.publicAddress, ctx.publicAddress != "" -} - -func (ctx *HookContext) PrivateAddress() (string, bool) { - return ctx.privateAddress, ctx.privateAddress != "" -} - -func (ctx *HookContext) AvailabilityZone() (string, bool) { - return ctx.availabilityzone, ctx.availabilityzone != "" -} - -func (ctx *HookContext) StorageTags() []names.StorageTag { - return ctx.storage.StorageTags() -} - -func (ctx *HookContext) HookStorage() (jujuc.ContextStorageAttachment, bool) { - return ctx.Storage(ctx.storageTag) -} - -func (ctx *HookContext) Storage(tag names.StorageTag) (jujuc.ContextStorageAttachment, bool) { - return ctx.storage.Storage(tag) -} - -func (ctx *HookContext) AddUnitStorage(cons map[string]params.StorageConstraints) { - // All storage constraints are accumulated before context is flushed. - if ctx.storageAddConstraints == nil { - ctx.storageAddConstraints = make( - map[string][]params.StorageConstraints, - len(cons)) - } - for storage, newConstraints := range cons { - // Multiple calls for the same storage are accumulated as well. - ctx.storageAddConstraints[storage] = append( - ctx.storageAddConstraints[storage], - newConstraints) - } -} - -func (ctx *HookContext) OpenPorts(protocol string, fromPort, toPort int) error { - return tryOpenPorts( - protocol, fromPort, toPort, - ctx.unit.Tag(), - ctx.machinePorts, ctx.pendingPorts, - ) -} - -func (ctx *HookContext) ClosePorts(protocol string, fromPort, toPort int) error { - return tryClosePorts( - protocol, fromPort, toPort, - ctx.unit.Tag(), - ctx.machinePorts, ctx.pendingPorts, - ) -} - -func (ctx *HookContext) OpenedPorts() []network.PortRange { - var unitRanges []network.PortRange - for portRange, relUnit := range ctx.machinePorts { - if relUnit.Unit == ctx.unit.Tag().String() { - unitRanges = append(unitRanges, portRange) - } - } - network.SortPortRanges(unitRanges) - return unitRanges -} - -func (ctx *HookContext) ConfigSettings() (charm.Settings, error) { - if ctx.configSettings == nil { - var err error - ctx.configSettings, err = ctx.unit.ConfigSettings() - if err != nil { - return nil, err - } - } - result := charm.Settings{} - for name, value := range ctx.configSettings { - result[name] = value - } - return result, nil -} - -// ActionName returns the name of the action. -func (ctx *HookContext) ActionName() (string, error) { - if ctx.actionData == nil { - return "", errors.New("not running an action") - } - return ctx.actionData.Name, nil -} - -// ActionParams simply returns the arguments to the Action. -func (ctx *HookContext) ActionParams() (map[string]interface{}, error) { - if ctx.actionData == nil { - return nil, errors.New("not running an action") - } - return ctx.actionData.Params, nil -} - -// SetActionMessage sets a message for the Action, usually an error message. -func (ctx *HookContext) SetActionMessage(message string) error { - if ctx.actionData == nil { - return errors.New("not running an action") - } - ctx.actionData.ResultsMessage = message - return nil -} - -// SetActionFailed sets the fail state of the action. -func (ctx *HookContext) SetActionFailed() error { - if ctx.actionData == nil { - return errors.New("not running an action") - } - ctx.actionData.Failed = true - return nil -} - -// UpdateActionResults inserts new values for use with action-set and -// action-fail. The results struct will be delivered to the state server -// upon completion of the Action. It returns an error if not called on an -// Action-containing HookContext. -func (ctx *HookContext) UpdateActionResults(keys []string, value string) error { - if ctx.actionData == nil { - return errors.New("not running an action") - } - addValueToMap(keys, value, ctx.actionData.ResultsMap) - return nil -} - -func (ctx *HookContext) HookRelation() (jujuc.ContextRelation, bool) { - return ctx.Relation(ctx.relationId) -} - -func (ctx *HookContext) RemoteUnitName() (string, bool) { - return ctx.remoteUnitName, ctx.remoteUnitName != "" -} - -func (ctx *HookContext) Relation(id int) (jujuc.ContextRelation, bool) { - r, found := ctx.relations[id] - return r, found -} - -func (ctx *HookContext) RelationIds() []int { - ids := []int{} - for id := range ctx.relations { - ids = append(ids, id) - } - return ids -} - -// AddMetric adds metrics to the hook context. -func (ctx *HookContext) AddMetric(key, value string, created time.Time) error { - if ctx.metricsRecorder == nil || ctx.definedMetrics == nil { - return errors.New("metrics disabled") - } - - err := ctx.definedMetrics.ValidateMetric(key, value) - if err != nil { - return errors.Annotatef(err, "invalid metric %q", key) - } - - err = ctx.metricsRecorder.AddMetric(key, value, created) - if err != nil { - return errors.Annotate(err, "failed to store metric") - } - return nil -} - -// ActionData returns the context's internal action data. It's meant to be -// transitory; it exists to allow uniter and runner code to keep working as -// it did; it should be considered deprecated, and not used by new clients. -func (c *HookContext) ActionData() (*ActionData, error) { - if c.actionData == nil { - return nil, errors.New("not running an action") - } - return c.actionData, nil -} - -// HookVars returns an os.Environ-style list of strings necessary to run a hook -// such that it can know what environment it's operating in, and can call back -// into context. -func (context *HookContext) HookVars(paths Paths) []string { - vars := context.proxySettings.AsEnvironmentValues() - vars = append(vars, - "CHARM_DIR="+paths.GetCharmDir(), // legacy, embarrassing - "JUJU_CHARM_DIR="+paths.GetCharmDir(), - "JUJU_CONTEXT_ID="+context.id, - "JUJU_AGENT_SOCKET="+paths.GetJujucSocket(), - "JUJU_UNIT_NAME="+context.unitName, - "JUJU_ENV_UUID="+context.uuid, - "JUJU_ENV_NAME="+context.envName, - "JUJU_API_ADDRESSES="+strings.Join(context.apiAddrs, " "), - "JUJU_METER_STATUS="+context.meterStatus.code, - "JUJU_METER_INFO="+context.meterStatus.info, - "JUJU_MACHINE_ID="+context.assignedMachineTag.Id(), - "JUJU_AVAILABILITY_ZONE="+context.availabilityzone, - ) - if r, found := context.HookRelation(); found { - vars = append(vars, - "JUJU_RELATION="+r.Name(), - "JUJU_RELATION_ID="+r.FakeId(), - "JUJU_REMOTE_UNIT="+context.remoteUnitName, - ) - } - if context.actionData != nil { - vars = append(vars, - "JUJU_ACTION_NAME="+context.actionData.Name, - "JUJU_ACTION_UUID="+context.actionData.Tag.Id(), - "JUJU_ACTION_TAG="+context.actionData.Tag.String(), - ) - } - return append(vars, osDependentEnvVars(paths)...) -} - -func (ctx *HookContext) handleReboot(err *error) { - logger.Infof("handling reboot") - rebootPriority := ctx.GetRebootPriority() - switch rebootPriority { - case jujuc.RebootSkip: - return - case jujuc.RebootAfterHook: - // Reboot should happen only after hook has finished. - if *err != nil { - return - } - *err = ErrReboot - case jujuc.RebootNow: - *err = ErrRequeueAndReboot - } - err2 := ctx.unit.SetUnitStatus(params.StatusRebooting, "", nil) - if err2 != nil { - logger.Errorf("updating agent status: %v", err2) - } - reqErr := ctx.unit.RequestReboot() - if reqErr != nil { - *err = reqErr - } -} - -// addJujuUnitsMetric adds the juju-units built in metric if it -// is defined for this context. -func (ctx *HookContext) addJujuUnitsMetric() error { - if ctx.metricsRecorder.IsDeclaredMetric("juju-units") { - err := ctx.metricsRecorder.AddMetric("juju-units", "1", time.Now().UTC()) - if err != nil { - return errors.Trace(err) - } - } - return nil -} - -// Prepare implements the Context interface. -func (ctx *HookContext) Prepare() error { - if ctx.actionData != nil { - err := ctx.state.ActionBegin(ctx.actionData.Tag) - if err != nil { - return errors.Trace(err) - } - } - return nil -} - -// Flush implements the Context interface. -func (ctx *HookContext) Flush(process string, ctxErr error) (err error) { - // A non-existant metricsRecorder simply means that metrics were disabled - // for this hook run. - if ctx.metricsRecorder != nil { - err := ctx.addJujuUnitsMetric() - if err != nil { - return errors.Trace(err) - } - err = ctx.metricsRecorder.Close() - if err != nil { - return errors.Trace(err) - } - } - - writeChanges := ctxErr == nil - - // In the case of Actions, handle any errors using finalizeAction. - if ctx.actionData != nil { - // If we had an error in err at this point, it's part of the - // normal behavior of an Action. Errors which happen during - // the finalize should be handed back to the uniter. Close - // over the existing err, clear it, and only return errors - // which occur during the finalize, e.g. API call errors. - defer func(ctxErr error) { - err = ctx.finalizeAction(ctxErr, err) - }(ctxErr) - ctxErr = nil - } else { - // TODO(gsamfira): Just for now, reboot will not be supported in actions. - defer ctx.handleReboot(&err) - } - - for id, rctx := range ctx.relations { - if writeChanges { - if e := rctx.WriteSettings(); e != nil { - e = errors.Errorf( - "could not write settings from %q to relation %d: %v", - process, id, e, - ) - logger.Errorf("%v", e) - if ctxErr == nil { - ctxErr = e - } - } - } - } - - for rangeKey, rangeInfo := range ctx.pendingPorts { - if writeChanges { - var e error - var op string - if rangeInfo.ShouldOpen { - e = ctx.unit.OpenPorts( - rangeKey.Ports.Protocol, - rangeKey.Ports.FromPort, - rangeKey.Ports.ToPort, - ) - op = "open" - } else { - e = ctx.unit.ClosePorts( - rangeKey.Ports.Protocol, - rangeKey.Ports.FromPort, - rangeKey.Ports.ToPort, - ) - op = "close" - } - if e != nil { - e = errors.Annotatef(e, "cannot %s %v", op, rangeKey.Ports) - logger.Errorf("%v", e) - if ctxErr == nil { - ctxErr = e - } - } - } - } - - // add storage to unit dynamically - if len(ctx.storageAddConstraints) > 0 && writeChanges { - err := ctx.unit.AddStorage(ctx.storageAddConstraints) - if err != nil { - err = errors.Annotatef(err, "cannot add storage") - logger.Errorf("%v", err) - if ctxErr == nil { - ctxErr = err - } - } - } - - // TODO (tasdomas) 2014 09 03: context finalization needs to modified to apply all - // changes in one api call to minimize the risk - // of partial failures. - - if !writeChanges { - return ctxErr - } - - return ctxErr -} - -// finalizeAction passes back the final status of an Action hook to state. -// It wraps any errors which occurred in normal behavior of the Action run; -// only errors passed in unhandledErr will be returned. -func (ctx *HookContext) finalizeAction(err, unhandledErr error) error { - // TODO (binary132): synchronize with gsamfira's reboot logic - message := ctx.actionData.ResultsMessage - results := ctx.actionData.ResultsMap - tag := ctx.actionData.Tag - status := params.ActionCompleted - if ctx.actionData.Failed { - status = params.ActionFailed - } - - // If we had an action error, we'll simply encapsulate it in the response - // and discard the error state. Actions should not error the uniter. - if err != nil { - message = err.Error() - if IsMissingHookError(err) { - message = fmt.Sprintf("action not implemented on unit %q", ctx.unitName) - } - status = params.ActionFailed - } - - callErr := ctx.state.ActionFinish(tag, status, results, message) - if callErr != nil { - unhandledErr = errors.Wrap(unhandledErr, callErr) - } - return unhandledErr -} - -// killCharmHook tries to kill the current running charm hook. -func (ctx *HookContext) killCharmHook() error { - proc := ctx.GetProcess() - if proc == nil { - // nothing to kill - return ErrNoProcess - } - logger.Infof("trying to kill context process %d", proc.Pid) - - tick := time.After(0) - timeout := time.After(30 * time.Second) - for { - // We repeatedly try to kill the process until we fail; this is - // because we don't control the *Process, and our clients expect - // to be able to Wait(); so we can't Wait. We could do better, - // but not with a single implementation across all platforms. - // TODO(gsamfira): come up with a better cross-platform approach. - select { - case <-tick: - err := proc.Kill() - if err != nil { - logger.Infof("kill returned: %s", err) - logger.Infof("assuming already killed") - return nil - } - case <-timeout: - return errors.Errorf("failed to kill context process %d", proc.Pid) - } - logger.Infof("waiting for context process %d to die", proc.Pid) - tick = time.After(100 * time.Millisecond) - } -} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/action.go' --- src/github.com/juju/juju/worker/uniter/runner/context/action.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/action.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,73 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context + +import ( + "github.com/juju/names" +) + +// ActionData contains the tag, parameters, and results of an Action. +type ActionData struct { + Name string + Tag names.ActionTag + Params map[string]interface{} + Failed bool + ResultsMessage string + ResultsMap map[string]interface{} +} + +// NewActionData builds a suitable ActionData struct with no nil members. +// this should only be called in the event that an Action hook is being requested. +func NewActionData(name string, tag *names.ActionTag, params map[string]interface{}) *ActionData { + return &ActionData{ + Name: name, + Tag: *tag, + Params: params, + ResultsMap: map[string]interface{}{}, + } +} + +// actionStatus messages define the possible states of a completed Action. +const ( + actionStatusInit = "init" + actionStatusFailed = "fail" +) + +// addValueToMap adds the given value to the map on which the method is run. +// This allows us to merge maps such as {foo: {bar: baz}} and {foo: {baz: faz}} +// into {foo: {bar: baz, baz: faz}}. +func addValueToMap(keys []string, value string, target map[string]interface{}) { + next := target + + for i := range keys { + // if we are on last key set the value. + // shouldn't be a problem. overwrites existing vals. + if i == len(keys)-1 { + next[keys[i]] = value + break + } + + if iface, ok := next[keys[i]]; ok { + switch typed := iface.(type) { + case map[string]interface{}: + // If we already had a map inside, keep + // stepping through. + next = typed + default: + // If we didn't, then overwrite value + // with a map and iterate with that. + m := map[string]interface{}{} + next[keys[i]] = m + next = m + } + continue + } + + // Otherwise, it wasn't present, so make it and step + // into. + m := map[string]interface{}{} + next[keys[i]] = m + next = m + } +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/cache.go' --- src/github.com/juju/juju/worker/uniter/runner/context/cache.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/cache.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,97 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context + +import ( + "sort" + + "github.com/juju/juju/apiserver/params" +) + +// SettingsFunc returns the relation settings for a unit. +type SettingsFunc func(unitName string) (params.Settings, error) + +// SettingsMap is a map from unit name to relation settings. +type SettingsMap map[string]params.Settings + +// RelationCache stores a relation's remote unit membership and settings. +// Member settings are stored until invalidated or removed by name; settings +// of non-member units are stored only until the cache is pruned. +type RelationCache struct { + // readSettings is used to get settings data if when not already present. + readSettings SettingsFunc + // members' keys define the relation's membership; non-nil values hold + // cached settings. + members SettingsMap + // others is a short-term cache for non-member settings. + others SettingsMap +} + +// NewRelationCache creates a new RelationCache that will use the supplied +// SettingsFunc to populate itself on demand. Initial membership is determined +// by memberNames. +func NewRelationCache(readSettings SettingsFunc, memberNames []string) *RelationCache { + cache := &RelationCache{ + readSettings: readSettings, + } + cache.Prune(memberNames) + return cache +} + +// Prune resets the membership to the supplied list, and discards the settings +// of all non-member units. +func (cache *RelationCache) Prune(memberNames []string) { + newMembers := SettingsMap{} + for _, memberName := range memberNames { + newMembers[memberName] = cache.members[memberName] + } + cache.members = newMembers + cache.others = SettingsMap{} +} + +// MemberNames returns the names of the remote units present in the relation. +func (cache *RelationCache) MemberNames() (memberNames []string) { + for memberName := range cache.members { + memberNames = append(memberNames, memberName) + } + sort.Strings(memberNames) + return memberNames +} + +// Settings returns the settings of the named remote unit. It's valid to get +// the settings of any unit that has ever been in the relation. +func (cache *RelationCache) Settings(unitName string) (params.Settings, error) { + settings, isMember := cache.members[unitName] + if settings == nil { + if !isMember { + settings = cache.others[unitName] + } + if settings == nil { + var err error + settings, err = cache.readSettings(unitName) + if err != nil { + return nil, err + } + } + } + if isMember { + cache.members[unitName] = settings + } else { + cache.others[unitName] = settings + } + return settings, nil +} + +// InvalidateMember ensures that the named remote unit will be considered a +// member of the relation, and that the next attempt to read its settings will +// use fresh data. +func (cache *RelationCache) InvalidateMember(memberName string) { + cache.members[memberName] = nil +} + +// RemoveMember ensures that the named remote unit will not be considered a +// member of the relation, +func (cache *RelationCache) RemoveMember(memberName string) { + delete(cache.members, memberName) +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/cache_test.go' --- src/github.com/juju/juju/worker/uniter/runner/context/cache_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/cache_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,215 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/worker/uniter/runner/context" +) + +type settingsResult struct { + settings params.Settings + err error +} + +type RelationCacheSuite struct { + testing.IsolationSuite + calls []string + results []settingsResult +} + +var _ = gc.Suite(&RelationCacheSuite{}) + +func (s *RelationCacheSuite) SetUpTest(c *gc.C) { + s.calls = []string{} + s.results = []settingsResult{} +} + +func (s *RelationCacheSuite) ReadSettings(unitName string) (params.Settings, error) { + result := s.results[len(s.calls)] + s.calls = append(s.calls, unitName) + return result.settings, result.err +} + +func (s *RelationCacheSuite) TestCreateEmpty(c *gc.C) { + cache := context.NewRelationCache(s.ReadSettings, nil) + c.Assert(cache.MemberNames(), gc.HasLen, 0) + c.Assert(s.calls, gc.HasLen, 0) +} + +func (s *RelationCacheSuite) TestCreateWithMembers(c *gc.C) { + cache := context.NewRelationCache(s.ReadSettings, []string{"u/3", "u/2", "u/1"}) + c.Assert(cache.MemberNames(), jc.DeepEquals, []string{"u/1", "u/2", "u/3"}) + c.Assert(s.calls, gc.HasLen, 0) +} + +func (s *RelationCacheSuite) TestInvalidateMemberChangesMembership(c *gc.C) { + cache := context.NewRelationCache(s.ReadSettings, nil) + cache.InvalidateMember("foo/1") + c.Assert(cache.MemberNames(), jc.DeepEquals, []string{"foo/1"}) + cache.InvalidateMember("foo/2") + c.Assert(cache.MemberNames(), jc.DeepEquals, []string{"foo/1", "foo/2"}) + cache.InvalidateMember("foo/2") + c.Assert(cache.MemberNames(), jc.DeepEquals, []string{"foo/1", "foo/2"}) + c.Assert(s.calls, gc.HasLen, 0) +} + +func (s *RelationCacheSuite) TestRemoveMemberChangesMembership(c *gc.C) { + cache := context.NewRelationCache(s.ReadSettings, []string{"x/2"}) + cache.RemoveMember("x/1") + c.Assert(cache.MemberNames(), jc.DeepEquals, []string{"x/2"}) + cache.RemoveMember("x/2") + c.Assert(cache.MemberNames(), gc.HasLen, 0) + c.Assert(s.calls, gc.HasLen, 0) +} + +func (s *RelationCacheSuite) TestPruneChangesMembership(c *gc.C) { + cache := context.NewRelationCache(s.ReadSettings, []string{"u/1", "u/2", "u/3"}) + cache.Prune([]string{"u/3", "u/4", "u/5"}) + c.Assert(cache.MemberNames(), jc.DeepEquals, []string{"u/3", "u/4", "u/5"}) + c.Assert(s.calls, gc.HasLen, 0) +} + +func (s *RelationCacheSuite) TestSettingsPropagatesError(c *gc.C) { + s.results = []settingsResult{{ + nil, errors.New("blam"), + }} + cache := context.NewRelationCache(s.ReadSettings, nil) + + settings, err := cache.Settings("whatever") + c.Assert(settings, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "blam") + c.Assert(s.calls, jc.DeepEquals, []string{"whatever"}) +} + +func (s *RelationCacheSuite) TestSettingsCachesMemberSettings(c *gc.C) { + s.results = []settingsResult{{ + params.Settings{"foo": "bar"}, nil, + }} + cache := context.NewRelationCache(s.ReadSettings, []string{"x/2"}) + + for i := 0; i < 2; i++ { + settings, err := cache.Settings("x/2") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, jc.DeepEquals, params.Settings{"foo": "bar"}) + c.Assert(s.calls, jc.DeepEquals, []string{"x/2"}) + } +} + +func (s *RelationCacheSuite) TestInvalidateMemberUncachesMemberSettings(c *gc.C) { + s.results = []settingsResult{{ + params.Settings{"foo": "bar"}, nil, + }, { + params.Settings{"baz": "qux"}, nil, + }} + cache := context.NewRelationCache(s.ReadSettings, []string{"x/2"}) + + settings, err := cache.Settings("x/2") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, jc.DeepEquals, params.Settings{"foo": "bar"}) + c.Assert(s.calls, jc.DeepEquals, []string{"x/2"}) + + cache.InvalidateMember("x/2") + settings, err = cache.Settings("x/2") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, jc.DeepEquals, params.Settings{"baz": "qux"}) + c.Assert(s.calls, jc.DeepEquals, []string{"x/2", "x/2"}) +} + +func (s *RelationCacheSuite) TestInvalidateMemberUncachesOtherSettings(c *gc.C) { + s.results = []settingsResult{{ + params.Settings{"foo": "bar"}, nil, + }, { + params.Settings{"baz": "qux"}, nil, + }} + cache := context.NewRelationCache(s.ReadSettings, nil) + + settings, err := cache.Settings("x/2") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, jc.DeepEquals, params.Settings{"foo": "bar"}) + c.Assert(s.calls, jc.DeepEquals, []string{"x/2"}) + + cache.InvalidateMember("x/2") + settings, err = cache.Settings("x/2") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, jc.DeepEquals, params.Settings{"baz": "qux"}) + c.Assert(s.calls, jc.DeepEquals, []string{"x/2", "x/2"}) +} + +func (s *RelationCacheSuite) TestRemoveMemberUncachesMemberSettings(c *gc.C) { + s.results = []settingsResult{{ + params.Settings{"foo": "bar"}, nil, + }, { + params.Settings{"baz": "qux"}, nil, + }} + cache := context.NewRelationCache(s.ReadSettings, []string{"x/2"}) + + settings, err := cache.Settings("x/2") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, jc.DeepEquals, params.Settings{"foo": "bar"}) + c.Assert(s.calls, jc.DeepEquals, []string{"x/2"}) + + cache.RemoveMember("x/2") + settings, err = cache.Settings("x/2") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, jc.DeepEquals, params.Settings{"baz": "qux"}) + c.Assert(s.calls, jc.DeepEquals, []string{"x/2", "x/2"}) +} + +func (s *RelationCacheSuite) TestSettingsCachesOtherSettings(c *gc.C) { + s.results = []settingsResult{{ + params.Settings{"foo": "bar"}, nil, + }} + cache := context.NewRelationCache(s.ReadSettings, nil) + + for i := 0; i < 2; i++ { + settings, err := cache.Settings("x/2") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, jc.DeepEquals, params.Settings{"foo": "bar"}) + c.Assert(s.calls, jc.DeepEquals, []string{"x/2"}) + } +} + +func (s *RelationCacheSuite) TestPrunePreservesMemberSettings(c *gc.C) { + s.results = []settingsResult{{ + params.Settings{"foo": "bar"}, nil, + }} + cache := context.NewRelationCache(s.ReadSettings, []string{"foo/2"}) + + settings, err := cache.Settings("foo/2") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, jc.DeepEquals, params.Settings{"foo": "bar"}) + c.Assert(s.calls, jc.DeepEquals, []string{"foo/2"}) + + cache.Prune([]string{"foo/2"}) + settings, err = cache.Settings("foo/2") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, jc.DeepEquals, params.Settings{"foo": "bar"}) + c.Assert(s.calls, jc.DeepEquals, []string{"foo/2"}) +} + +func (s *RelationCacheSuite) TestPruneUncachesOtherSettings(c *gc.C) { + s.results = []settingsResult{{ + params.Settings{"foo": "bar"}, nil, + }, { + params.Settings{"baz": "qux"}, nil, + }} + cache := context.NewRelationCache(s.ReadSettings, nil) + + settings, err := cache.Settings("x/2") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, jc.DeepEquals, params.Settings{"foo": "bar"}) + c.Assert(s.calls, jc.DeepEquals, []string{"x/2"}) + + cache.Prune(nil) + settings, err = cache.Settings("x/2") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, jc.DeepEquals, params.Settings{"baz": "qux"}) + c.Assert(s.calls, jc.DeepEquals, []string{"x/2", "x/2"}) +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/context.go' --- src/github.com/juju/juju/worker/uniter/runner/context/context.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/context.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,787 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package context contains the ContextFactory and Context definitions. Context implements +// jujuc.Context and is used together with uniter.Runner to run hooks, commands and actions. +package context + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + "github.com/juju/utils/clock" + "github.com/juju/utils/proxy" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/api/base" + "github.com/juju/juju/api/uniter" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/network" + "github.com/juju/juju/worker/uniter/runner/jujuc" +) + +// Paths exposes the paths needed by Context. +type Paths interface { + + // GetToolsDir returns the filesystem path to the dirctory containing + // the hook tool symlinks. + GetToolsDir() string + + // GetCharmDir returns the filesystem path to the directory in which + // the charm is installed. + GetCharmDir() string + + // GetJujucSocket returns the path to the socket used by the hook tools + // to communicate back to the executing uniter process. It might be a + // filesystem path, or it might be abstract. + GetJujucSocket() string + + // GetMetricsSpoolDir returns the path to a metrics spool dir, used + // to store metrics recorded during a single hook run. + GetMetricsSpoolDir() string + + // ComponentDir returns the filesystem path to the directory + // containing all data files for a component. + ComponentDir(name string) string +} + +var logger = loggo.GetLogger("juju.worker.uniter.context") +var mutex = sync.Mutex{} +var ErrIsNotLeader = errors.Errorf("this unit is not the leader") + +// ComponentConfig holds all the information related to a hook context +// needed by components. +type ComponentConfig struct { + // UnitName is the name of the unit. + UnitName string + // DataDir is the component's data directory. + DataDir string + // APICaller is the API caller the component may use. + APICaller base.APICaller +} + +// ComponentFunc is a factory function for Context components. +type ComponentFunc func(ComponentConfig) (jujuc.ContextComponent, error) + +var registeredComponentFuncs = map[string]ComponentFunc{} + +// Add the named component factory func to the registry. +func RegisterComponentFunc(name string, f ComponentFunc) error { + if _, ok := registeredComponentFuncs[name]; ok { + return errors.AlreadyExistsf("%s", name) + } + registeredComponentFuncs[name] = f + return nil +} + +// meterStatus describes the unit's meter status. +type meterStatus struct { + code string + info string +} + +// HookProcess is an interface representing a process running a hook. +type HookProcess interface { + Pid() int + Kill() error +} + +// HookContext is the implementation of jujuc.Context. +type HookContext struct { + unit *uniter.Unit + + // state is the handle to the uniter State so that HookContext can make + // API calls on the stateservice. + // NOTE: We would like to be rid of the fake-remote-Unit and switch + // over fully to API calls on State. This adds that ability, but we're + // not fully there yet. + state *uniter.State + + // LeadershipContext supplies several jujuc.Context methods. + LeadershipContext + + // privateAddress is the cached value of the unit's private + // address. + privateAddress string + + // publicAddress is the cached value of the unit's public + // address. + publicAddress string + + // availabilityzone is the cached value of the unit's availability zone name. + availabilityzone string + + // configSettings holds the service configuration. + configSettings charm.Settings + + // id identifies the context. + id string + + // actionData contains the values relevant to the run of an Action: + // its tag, its parameters, and its results. + actionData *ActionData + + // uuid is the universally unique identifier of the environment. + uuid string + + // envName is the human friendly name of the environment. + envName string + + // unitName is the human friendly name of the local unit. + unitName string + + // status is the status of the local unit. + status *jujuc.StatusInfo + + // relationId identifies the relation for which a relation hook is + // executing. If it is -1, the context is not running a relation hook; + // otherwise, its value must be a valid key into the relations map. + relationId int + + // remoteUnitName identifies the changing unit of the executing relation + // hook. It will be empty if the context is not running a relation hook, + // or if it is running a relation-broken hook. + remoteUnitName string + + // relations contains the context for every relation the unit is a member + // of, keyed on relation id. + relations map[int]*ContextRelation + + // apiAddrs contains the API server addresses. + apiAddrs []string + + // proxySettings are the current proxy settings that the uniter knows about. + proxySettings proxy.Settings + + // meterStatus is the status of the unit's metering. + meterStatus *meterStatus + + // pendingPorts contains a list of port ranges to be opened or + // closed when the current hook is committed. + pendingPorts map[PortRange]PortRangeInfo + + // machinePorts contains cached information about all opened port + // ranges on the unit's assigned machine, mapped to the unit that + // opened each range and the relevant relation. + machinePorts map[network.PortRange]params.RelationUnit + + // assignedMachineTag contains the tag of the unit's assigned + // machine. + assignedMachineTag names.MachineTag + + // process is the process of the command that is being run in the local context, + // like a juju-run command or a hook + process HookProcess + + // rebootPriority tells us when the hook wants to reboot. If rebootPriority is jujuc.RebootNow + // the hook will be killed and requeued + rebootPriority jujuc.RebootPriority + + // storage provides access to the information about storage attached to the unit. + storage StorageContextAccessor + + // storageId is the tag of the storage instance associated with the running hook. + storageTag names.StorageTag + + // hasRunSetStatus is true if a call to the status-set was made during the + // invocation of a hook. + // This attribute is persisted to local uniter state at the end of the hook + // execution so that the uniter can ultimately decide if it needs to update + // a charm's workload status, or if the charm has already taken care of it. + hasRunStatusSet bool + + // storageAddConstraints is a collection of storage constraints + // keyed on storage name as specified in the charm. + // This collection will be added to the unit on successful + // hook run, so the actual add will happen in a flush. + storageAddConstraints map[string][]params.StorageConstraints + + // clock is used for any time operations. + clock clock.Clock + + componentDir func(string) string + componentFuncs map[string]ComponentFunc +} + +// Component implements jujuc.Context. +func (ctx *HookContext) Component(name string) (jujuc.ContextComponent, error) { + compCtxFunc, ok := ctx.componentFuncs[name] + if !ok { + return nil, errors.NotFoundf("context component %q", name) + } + + facade := ctx.state.Facade() + config := ComponentConfig{ + UnitName: ctx.unit.Name(), + DataDir: ctx.componentDir(name), + APICaller: facade.RawAPICaller(), + } + compCtx, err := compCtxFunc(config) + if err != nil { + return nil, errors.Trace(err) + } + return compCtx, nil +} + +func (ctx *HookContext) RequestReboot(priority jujuc.RebootPriority) error { + // Must set reboot priority first, because killing the hook + // process will trigger the completion of the hook. If killing + // the hook fails, then we can reset the priority. + ctx.SetRebootPriority(priority) + + var err error + if priority == jujuc.RebootNow { + // At this point, the hook should be running + err = ctx.killCharmHook() + } + + switch err { + case nil, ErrNoProcess: + // ErrNoProcess almost certainly means we are running in debug hooks + default: + ctx.SetRebootPriority(jujuc.RebootSkip) + } + return err +} + +func (ctx *HookContext) GetRebootPriority() jujuc.RebootPriority { + mutex.Lock() + defer mutex.Unlock() + return ctx.rebootPriority +} + +func (ctx *HookContext) SetRebootPriority(priority jujuc.RebootPriority) { + mutex.Lock() + defer mutex.Unlock() + ctx.rebootPriority = priority +} + +func (ctx *HookContext) GetProcess() HookProcess { + mutex.Lock() + defer mutex.Unlock() + return ctx.process +} + +func (ctx *HookContext) SetProcess(process HookProcess) { + mutex.Lock() + defer mutex.Unlock() + ctx.process = process +} + +func (ctx *HookContext) Id() string { + return ctx.id +} + +func (ctx *HookContext) UnitName() string { + return ctx.unitName +} + +// UnitStatus will return the status for the current Unit. +func (ctx *HookContext) UnitStatus() (*jujuc.StatusInfo, error) { + if ctx.status == nil { + var err error + status, err := ctx.unit.UnitStatus() + if err != nil { + return nil, err + } + ctx.status = &jujuc.StatusInfo{ + Status: string(status.Status), + Info: status.Info, + Data: status.Data, + } + } + return ctx.status, nil +} + +// ServiceStatus returns the status for the service and all the units on +// the service to which this context unit belongs, only if this unit is +// the leader. +func (ctx *HookContext) ServiceStatus() (jujuc.ServiceStatusInfo, error) { + var err error + isLeader, err := ctx.IsLeader() + if err != nil { + return jujuc.ServiceStatusInfo{}, errors.Annotatef(err, "cannot determine leadership") + } + if !isLeader { + return jujuc.ServiceStatusInfo{}, ErrIsNotLeader + } + service, err := ctx.unit.Service() + if err != nil { + return jujuc.ServiceStatusInfo{}, errors.Trace(err) + } + status, err := service.Status(ctx.unit.Name()) + if err != nil { + return jujuc.ServiceStatusInfo{}, errors.Trace(err) + } + us := make([]jujuc.StatusInfo, len(status.Units)) + i := 0 + for t, s := range status.Units { + us[i] = jujuc.StatusInfo{ + Tag: t, + Status: string(s.Status), + Info: s.Info, + Data: s.Data, + } + i++ + } + return jujuc.ServiceStatusInfo{ + Service: jujuc.StatusInfo{ + Tag: service.Tag().String(), + Status: string(status.Service.Status), + Info: status.Service.Info, + Data: status.Service.Data, + }, + Units: us, + }, nil +} + +// SetUnitStatus will set the given status for this unit. +func (ctx *HookContext) SetUnitStatus(status jujuc.StatusInfo) error { + ctx.hasRunStatusSet = true + logger.Tracef("[WORKLOAD-STATUS] %s: %s", status.Status, status.Info) + return ctx.unit.SetUnitStatus( + params.Status(status.Status), + status.Info, + status.Data, + ) +} + +// SetServiceStatus will set the given status to the service to which this +// unit's belong, only if this unit is the leader. +func (ctx *HookContext) SetServiceStatus(status jujuc.StatusInfo) error { + logger.Tracef("[SERVICE-STATUS] %s: %s", status.Status, status.Info) + isLeader, err := ctx.IsLeader() + if err != nil { + return errors.Annotatef(err, "cannot determine leadership") + } + if !isLeader { + return ErrIsNotLeader + } + + service, err := ctx.unit.Service() + if err != nil { + return errors.Trace(err) + } + return service.SetStatus( + ctx.unit.Name(), + params.Status(status.Status), + status.Info, + status.Data, + ) +} + +func (ctx *HookContext) HasExecutionSetUnitStatus() bool { + return ctx.hasRunStatusSet +} + +func (ctx *HookContext) ResetExecutionSetUnitStatus() { + ctx.hasRunStatusSet = false +} + +func (ctx *HookContext) PublicAddress() (string, error) { + if ctx.publicAddress == "" { + return "", errors.NotFoundf("public address") + } + return ctx.publicAddress, nil +} + +func (ctx *HookContext) PrivateAddress() (string, error) { + if ctx.privateAddress == "" { + return "", errors.NotFoundf("private address") + } + return ctx.privateAddress, nil +} + +func (ctx *HookContext) AvailabilityZone() (string, error) { + if ctx.availabilityzone == "" { + return "", errors.NotFoundf("availability zone") + } + return ctx.availabilityzone, nil +} + +func (ctx *HookContext) StorageTags() ([]names.StorageTag, error) { + return ctx.storage.StorageTags() +} + +func (ctx *HookContext) HookStorage() (jujuc.ContextStorageAttachment, error) { + return ctx.Storage(ctx.storageTag) +} + +func (ctx *HookContext) Storage(tag names.StorageTag) (jujuc.ContextStorageAttachment, error) { + return ctx.storage.Storage(tag) +} + +func (ctx *HookContext) AddUnitStorage(cons map[string]params.StorageConstraints) error { + // All storage constraints are accumulated before context is flushed. + if ctx.storageAddConstraints == nil { + ctx.storageAddConstraints = make( + map[string][]params.StorageConstraints, + len(cons)) + } + for storage, newConstraints := range cons { + // Multiple calls for the same storage are accumulated as well. + ctx.storageAddConstraints[storage] = append( + ctx.storageAddConstraints[storage], + newConstraints) + } + return nil +} + +func (ctx *HookContext) OpenPorts(protocol string, fromPort, toPort int) error { + return tryOpenPorts( + protocol, fromPort, toPort, + ctx.unit.Tag(), + ctx.machinePorts, ctx.pendingPorts, + ) +} + +func (ctx *HookContext) ClosePorts(protocol string, fromPort, toPort int) error { + return tryClosePorts( + protocol, fromPort, toPort, + ctx.unit.Tag(), + ctx.machinePorts, ctx.pendingPorts, + ) +} + +func (ctx *HookContext) OpenedPorts() []network.PortRange { + var unitRanges []network.PortRange + for portRange, relUnit := range ctx.machinePorts { + if relUnit.Unit == ctx.unit.Tag().String() { + unitRanges = append(unitRanges, portRange) + } + } + network.SortPortRanges(unitRanges) + return unitRanges +} + +func (ctx *HookContext) ConfigSettings() (charm.Settings, error) { + if ctx.configSettings == nil { + var err error + ctx.configSettings, err = ctx.unit.ConfigSettings() + if err != nil { + return nil, err + } + } + result := charm.Settings{} + for name, value := range ctx.configSettings { + result[name] = value + } + return result, nil +} + +// ActionName returns the name of the action. +func (ctx *HookContext) ActionName() (string, error) { + if ctx.actionData == nil { + return "", errors.New("not running an action") + } + return ctx.actionData.Name, nil +} + +// ActionParams simply returns the arguments to the Action. +func (ctx *HookContext) ActionParams() (map[string]interface{}, error) { + if ctx.actionData == nil { + return nil, errors.New("not running an action") + } + return ctx.actionData.Params, nil +} + +// SetActionMessage sets a message for the Action, usually an error message. +func (ctx *HookContext) SetActionMessage(message string) error { + if ctx.actionData == nil { + return errors.New("not running an action") + } + ctx.actionData.ResultsMessage = message + return nil +} + +// SetActionFailed sets the fail state of the action. +func (ctx *HookContext) SetActionFailed() error { + if ctx.actionData == nil { + return errors.New("not running an action") + } + ctx.actionData.Failed = true + return nil +} + +// UpdateActionResults inserts new values for use with action-set and +// action-fail. The results struct will be delivered to the controller +// upon completion of the Action. It returns an error if not called on an +// Action-containing HookContext. +func (ctx *HookContext) UpdateActionResults(keys []string, value string) error { + if ctx.actionData == nil { + return errors.New("not running an action") + } + addValueToMap(keys, value, ctx.actionData.ResultsMap) + return nil +} + +func (ctx *HookContext) HookRelation() (jujuc.ContextRelation, error) { + return ctx.Relation(ctx.relationId) +} + +func (ctx *HookContext) RemoteUnitName() (string, error) { + if ctx.remoteUnitName == "" { + return "", errors.NotFoundf("remote unit") + } + return ctx.remoteUnitName, nil +} + +func (ctx *HookContext) Relation(id int) (jujuc.ContextRelation, error) { + r, found := ctx.relations[id] + if !found { + return nil, errors.NotFoundf("relation") + } + return r, nil +} + +func (ctx *HookContext) RelationIds() ([]int, error) { + ids := []int{} + for id := range ctx.relations { + ids = append(ids, id) + } + return ids, nil +} + +// AddMetric adds metrics to the hook context. +func (ctx *HookContext) AddMetric(key, value string, created time.Time) error { + return errors.New("metrics not allowed in this context") +} + +// ActionData returns the context's internal action data. It's meant to be +// transitory; it exists to allow uniter and runner code to keep working as +// it did; it should be considered deprecated, and not used by new clients. +func (c *HookContext) ActionData() (*ActionData, error) { + if c.actionData == nil { + return nil, errors.New("not running an action") + } + return c.actionData, nil +} + +// HookVars returns an os.Environ-style list of strings necessary to run a hook +// such that it can know what environment it's operating in, and can call back +// into context. +func (context *HookContext) HookVars(paths Paths) ([]string, error) { + vars := context.proxySettings.AsEnvironmentValues() + vars = append(vars, + "CHARM_DIR="+paths.GetCharmDir(), // legacy, embarrassing + "JUJU_CHARM_DIR="+paths.GetCharmDir(), + "JUJU_CONTEXT_ID="+context.id, + "JUJU_AGENT_SOCKET="+paths.GetJujucSocket(), + "JUJU_UNIT_NAME="+context.unitName, + "JUJU_MODEL_UUID="+context.uuid, + "JUJU_MODEL_NAME="+context.envName, + "JUJU_API_ADDRESSES="+strings.Join(context.apiAddrs, " "), + "JUJU_METER_STATUS="+context.meterStatus.code, + "JUJU_METER_INFO="+context.meterStatus.info, + "JUJU_MACHINE_ID="+context.assignedMachineTag.Id(), + "JUJU_AVAILABILITY_ZONE="+context.availabilityzone, + ) + if r, err := context.HookRelation(); err == nil { + vars = append(vars, + "JUJU_RELATION="+r.Name(), + "JUJU_RELATION_ID="+r.FakeId(), + "JUJU_REMOTE_UNIT="+context.remoteUnitName, + ) + } else if !errors.IsNotFound(err) { + return nil, errors.Trace(err) + } + if context.actionData != nil { + vars = append(vars, + "JUJU_ACTION_NAME="+context.actionData.Name, + "JUJU_ACTION_UUID="+context.actionData.Tag.Id(), + "JUJU_ACTION_TAG="+context.actionData.Tag.String(), + ) + } + return append(vars, OSDependentEnvVars(paths)...), nil +} + +func (ctx *HookContext) handleReboot(err *error) { + logger.Tracef("checking for reboot request") + rebootPriority := ctx.GetRebootPriority() + switch rebootPriority { + case jujuc.RebootSkip: + return + case jujuc.RebootAfterHook: + // Reboot should happen only after hook has finished. + if *err != nil { + return + } + *err = ErrReboot + case jujuc.RebootNow: + *err = ErrRequeueAndReboot + } + err2 := ctx.unit.SetUnitStatus(params.StatusRebooting, "", nil) + if err2 != nil { + logger.Errorf("updating agent status: %v", err2) + } + reqErr := ctx.unit.RequestReboot() + if reqErr != nil { + *err = reqErr + } +} + +// Prepare implements the Context interface. +func (ctx *HookContext) Prepare() error { + if ctx.actionData != nil { + err := ctx.state.ActionBegin(ctx.actionData.Tag) + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +// Flush implements the Context interface. +func (ctx *HookContext) Flush(process string, ctxErr error) (err error) { + writeChanges := ctxErr == nil + + // In the case of Actions, handle any errors using finalizeAction. + if ctx.actionData != nil { + // If we had an error in err at this point, it's part of the + // normal behavior of an Action. Errors which happen during + // the finalize should be handed back to the uniter. Close + // over the existing err, clear it, and only return errors + // which occur during the finalize, e.g. API call errors. + defer func(ctxErr error) { + err = ctx.finalizeAction(ctxErr, err) + }(ctxErr) + ctxErr = nil + } else { + // TODO(gsamfira): Just for now, reboot will not be supported in actions. + defer ctx.handleReboot(&err) + } + + for id, rctx := range ctx.relations { + if writeChanges { + if e := rctx.WriteSettings(); e != nil { + e = errors.Errorf( + "could not write settings from %q to relation %d: %v", + process, id, e, + ) + logger.Errorf("%v", e) + if ctxErr == nil { + ctxErr = e + } + } + } + } + + for rangeKey, rangeInfo := range ctx.pendingPorts { + if writeChanges { + var e error + var op string + if rangeInfo.ShouldOpen { + e = ctx.unit.OpenPorts( + rangeKey.Ports.Protocol, + rangeKey.Ports.FromPort, + rangeKey.Ports.ToPort, + ) + op = "open" + } else { + e = ctx.unit.ClosePorts( + rangeKey.Ports.Protocol, + rangeKey.Ports.FromPort, + rangeKey.Ports.ToPort, + ) + op = "close" + } + if e != nil { + e = errors.Annotatef(e, "cannot %s %v", op, rangeKey.Ports) + logger.Errorf("%v", e) + if ctxErr == nil { + ctxErr = e + } + } + } + } + + // add storage to unit dynamically + if len(ctx.storageAddConstraints) > 0 && writeChanges { + err := ctx.unit.AddStorage(ctx.storageAddConstraints) + if err != nil { + err = errors.Annotatef(err, "cannot add storage") + logger.Errorf("%v", err) + if ctxErr == nil { + ctxErr = err + } + } + } + + // TODO (tasdomas) 2014 09 03: context finalization needs to modified to apply all + // changes in one api call to minimize the risk + // of partial failures. + + if !writeChanges { + return ctxErr + } + + return ctxErr +} + +// finalizeAction passes back the final status of an Action hook to state. +// It wraps any errors which occurred in normal behavior of the Action run; +// only errors passed in unhandledErr will be returned. +func (ctx *HookContext) finalizeAction(err, unhandledErr error) error { + // TODO (binary132): synchronize with gsamfira's reboot logic + message := ctx.actionData.ResultsMessage + results := ctx.actionData.ResultsMap + tag := ctx.actionData.Tag + status := params.ActionCompleted + if ctx.actionData.Failed { + status = params.ActionFailed + } + + // If we had an action error, we'll simply encapsulate it in the response + // and discard the error state. Actions should not error the uniter. + if err != nil { + message = err.Error() + if IsMissingHookError(err) { + message = fmt.Sprintf("action not implemented on unit %q", ctx.unitName) + } + status = params.ActionFailed + } + + callErr := ctx.state.ActionFinish(tag, status, results, message) + if callErr != nil { + unhandledErr = errors.Wrap(unhandledErr, callErr) + } + return unhandledErr +} + +// killCharmHook tries to kill the current running charm hook. +func (ctx *HookContext) killCharmHook() error { + proc := ctx.GetProcess() + if proc == nil { + // nothing to kill + return ErrNoProcess + } + logger.Infof("trying to kill context process %v", proc.Pid()) + + tick := ctx.clock.After(0) + timeout := ctx.clock.After(30 * time.Second) + for { + // We repeatedly try to kill the process until we fail; this is + // because we don't control the *Process, and our clients expect + // to be able to Wait(); so we can't Wait. We could do better, + // but not with a single implementation across all platforms. + // TODO(gsamfira): come up with a better cross-platform approach. + select { + case <-tick: + err := proc.Kill() + if err != nil { + logger.Infof("kill returned: %s", err) + logger.Infof("assuming already killed") + return nil + } + case <-timeout: + return errors.Errorf("failed to kill context process %v", proc.Pid()) + } + logger.Infof("waiting for context process %v to die", proc.Pid()) + tick = ctx.clock.After(100 * time.Millisecond) + } +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/context_test.go' --- src/github.com/juju/juju/worker/uniter/runner/context/context_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/context_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,425 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context_test + +import ( + "errors" + "time" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/network" + "github.com/juju/juju/state" + "github.com/juju/juju/worker/uniter/runner" + "github.com/juju/juju/worker/uniter/runner/context" + "github.com/juju/juju/worker/uniter/runner/jujuc" +) + +type InterfaceSuite struct { + HookContextSuite + stub testing.Stub +} + +var _ = gc.Suite(&InterfaceSuite{}) + +func (s *InterfaceSuite) TestUnitName(c *gc.C) { + ctx := s.GetContext(c, -1, "") + c.Assert(ctx.UnitName(), gc.Equals, "u/0") +} + +func (s *InterfaceSuite) TestHookRelation(c *gc.C) { + ctx := s.GetContext(c, -1, "") + r, err := ctx.HookRelation() + c.Assert(err, gc.ErrorMatches, ".*") + c.Assert(r, gc.IsNil) +} + +func (s *InterfaceSuite) TestRemoteUnitName(c *gc.C) { + ctx := s.GetContext(c, -1, "") + name, err := ctx.RemoteUnitName() + c.Assert(err, gc.ErrorMatches, ".*") + c.Assert(name, gc.Equals, "") +} + +func (s *InterfaceSuite) TestRelationIds(c *gc.C) { + ctx := s.GetContext(c, -1, "") + relIds, err := ctx.RelationIds() + c.Assert(err, jc.ErrorIsNil) + c.Assert(relIds, gc.HasLen, 2) + r, err := ctx.Relation(0) + c.Assert(err, jc.ErrorIsNil) + c.Assert(r.Name(), gc.Equals, "db") + c.Assert(r.FakeId(), gc.Equals, "db:0") + r, err = ctx.Relation(123) + c.Assert(err, gc.ErrorMatches, ".*") + c.Assert(r, gc.IsNil) +} + +func (s *InterfaceSuite) TestRelationContext(c *gc.C) { + ctx := s.GetContext(c, 1, "") + r, err := ctx.HookRelation() + c.Assert(err, jc.ErrorIsNil) + c.Assert(r.Name(), gc.Equals, "db") + c.Assert(r.FakeId(), gc.Equals, "db:1") +} + +func (s *InterfaceSuite) TestRelationContextWithRemoteUnitName(c *gc.C) { + ctx := s.GetContext(c, 1, "u/123") + name, err := ctx.RemoteUnitName() + c.Assert(err, jc.ErrorIsNil) + c.Assert(name, gc.Equals, "u/123") +} + +func (s *InterfaceSuite) TestAddingMetricsInWrongContext(c *gc.C) { + ctx := s.GetContext(c, 1, "u/123") + err := ctx.AddMetric("key", "123", time.Now()) + c.Assert(err, gc.ErrorMatches, "metrics not allowed in this context") +} + +func (s *InterfaceSuite) TestAvailabilityZone(c *gc.C) { + ctx := s.GetContext(c, -1, "") + zone, err := ctx.AvailabilityZone() + c.Check(err, jc.ErrorIsNil) + c.Check(zone, gc.Equals, "a-zone") +} + +func (s *InterfaceSuite) TestUnitStatus(c *gc.C) { + ctx := s.GetContext(c, -1, "") + defer context.PatchCachedStatus(ctx.(runner.Context), "maintenance", "working", map[string]interface{}{"hello": "world"})() + status, err := ctx.UnitStatus() + c.Check(err, jc.ErrorIsNil) + c.Check(status.Status, gc.Equals, "maintenance") + c.Check(status.Info, gc.Equals, "working") + c.Check(status.Data, gc.DeepEquals, map[string]interface{}{"hello": "world"}) +} + +func (s *InterfaceSuite) TestSetUnitStatus(c *gc.C) { + ctx := s.GetContext(c, -1, "") + status := jujuc.StatusInfo{ + Status: "maintenance", + Info: "doing work", + } + err := ctx.SetUnitStatus(status) + c.Check(err, jc.ErrorIsNil) + unitStatus, err := ctx.UnitStatus() + c.Check(err, jc.ErrorIsNil) + c.Check(unitStatus.Status, gc.Equals, "maintenance") + c.Check(unitStatus.Info, gc.Equals, "doing work") + c.Check(unitStatus.Data, gc.DeepEquals, map[string]interface{}{}) +} + +func (s *InterfaceSuite) TestSetUnitStatusUpdatesFlag(c *gc.C) { + ctx := s.GetContext(c, -1, "") + c.Assert(ctx.(runner.Context).HasExecutionSetUnitStatus(), jc.IsFalse) + status := jujuc.StatusInfo{ + Status: "maintenance", + Info: "doing work", + } + err := ctx.SetUnitStatus(status) + c.Check(err, jc.ErrorIsNil) + c.Assert(ctx.(runner.Context).HasExecutionSetUnitStatus(), jc.IsTrue) +} + +func (s *InterfaceSuite) TestUnitStatusCaching(c *gc.C) { + ctx := s.GetContext(c, -1, "") + status, err := ctx.UnitStatus() + c.Check(err, jc.ErrorIsNil) + c.Check(status.Status, gc.Equals, "unknown") + c.Check(status.Data, gc.DeepEquals, map[string]interface{}{}) + + // Change remote state. + err = s.unit.SetStatus(state.StatusActive, "it works", nil) + c.Assert(err, jc.ErrorIsNil) + + // Local view is unchanged. + status, err = ctx.UnitStatus() + c.Check(err, jc.ErrorIsNil) + c.Check(status.Status, gc.Equals, "unknown") + c.Check(status.Data, gc.DeepEquals, map[string]interface{}{}) +} + +func (s *InterfaceSuite) TestUnitCaching(c *gc.C) { + ctx := s.GetContext(c, -1, "") + pr, err := ctx.PrivateAddress() + c.Assert(err, jc.ErrorIsNil) + c.Assert(pr, gc.Equals, "u-0.testing.invalid") + pa, err := ctx.PublicAddress() + c.Assert(err, jc.ErrorIsNil) + // Initially the public address is the same as the private address since + // the "most public" address is chosen. + c.Assert(pr, gc.Equals, pa) + + // Change remote state. + err = s.machine.SetProviderAddresses( + network.NewScopedAddress("blah.testing.invalid", network.ScopePublic), + ) + c.Assert(err, jc.ErrorIsNil) + + // Local view is unchanged. + pr, err = ctx.PrivateAddress() + c.Assert(err, jc.ErrorIsNil) + c.Assert(pr, gc.Equals, "u-0.testing.invalid") + pa, err = ctx.PublicAddress() + c.Assert(err, jc.ErrorIsNil) + c.Assert(pr, gc.Equals, pa) +} + +func (s *InterfaceSuite) TestConfigCaching(c *gc.C) { + ctx := s.GetContext(c, -1, "") + settings, err := ctx.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, gc.DeepEquals, charm.Settings{"blog-title": "My Title"}) + + // Change remote config. + err = s.service.UpdateConfigSettings(charm.Settings{ + "blog-title": "Something Else", + }) + c.Assert(err, jc.ErrorIsNil) + + // Local view is not changed. + settings, err = ctx.ConfigSettings() + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, gc.DeepEquals, charm.Settings{"blog-title": "My Title"}) +} + +// TestNonActionCallsToActionMethodsFail does exactly what its name says: +// it simply makes sure that Action-related calls to HookContexts with a nil +// actionData member error out correctly. +func (s *InterfaceSuite) TestNonActionCallsToActionMethodsFail(c *gc.C) { + ctx := context.HookContext{} + _, err := ctx.ActionParams() + c.Check(err, gc.ErrorMatches, "not running an action") + err = ctx.SetActionFailed() + c.Check(err, gc.ErrorMatches, "not running an action") + err = ctx.SetActionMessage("foo") + c.Check(err, gc.ErrorMatches, "not running an action") + err = ctx.UpdateActionResults([]string{"1", "2", "3"}, "value") + c.Check(err, gc.ErrorMatches, "not running an action") +} + +// TestUpdateActionResults demonstrates that UpdateActionResults functions +// as expected. +func (s *InterfaceSuite) TestUpdateActionResults(c *gc.C) { + tests := []struct { + initial map[string]interface{} + keys []string + value string + expected map[string]interface{} + }{{ + initial: map[string]interface{}{}, + keys: []string{"foo"}, + value: "bar", + expected: map[string]interface{}{ + "foo": "bar", + }, + }, { + initial: map[string]interface{}{ + "foo": "bar", + }, + keys: []string{"foo", "bar"}, + value: "baz", + expected: map[string]interface{}{ + "foo": map[string]interface{}{ + "bar": "baz", + }, + }, + }, { + initial: map[string]interface{}{ + "foo": map[string]interface{}{ + "bar": "baz", + }, + }, + keys: []string{"foo"}, + value: "bar", + expected: map[string]interface{}{ + "foo": "bar", + }, + }} + + for i, t := range tests { + c.Logf("UpdateActionResults test %d: %#v: %#v", i, t.keys, t.value) + hctx := context.GetStubActionContext(t.initial) + err := hctx.UpdateActionResults(t.keys, t.value) + c.Assert(err, jc.ErrorIsNil) + actionData, err := hctx.ActionData() + c.Assert(err, jc.ErrorIsNil) + c.Assert(actionData.ResultsMap, jc.DeepEquals, t.expected) + } +} + +// TestSetActionFailed ensures SetActionFailed works properly. +func (s *InterfaceSuite) TestSetActionFailed(c *gc.C) { + hctx := context.GetStubActionContext(nil) + err := hctx.SetActionFailed() + c.Assert(err, jc.ErrorIsNil) + actionData, err := hctx.ActionData() + c.Assert(err, jc.ErrorIsNil) + c.Check(actionData.Failed, jc.IsTrue) +} + +// TestSetActionMessage ensures SetActionMessage works properly. +func (s *InterfaceSuite) TestSetActionMessage(c *gc.C) { + hctx := context.GetStubActionContext(nil) + err := hctx.SetActionMessage("because reasons") + c.Assert(err, jc.ErrorIsNil) + actionData, err := hctx.ActionData() + c.Check(err, jc.ErrorIsNil) + c.Check(actionData.ResultsMessage, gc.Equals, "because reasons") +} + +func (s *InterfaceSuite) TestRequestRebootAfterHook(c *gc.C) { + var killed bool + p := &mockProcess{func() error { + killed = true + return nil + }} + ctx := s.GetContext(c, -1, "").(*context.HookContext) + ctx.SetProcess(p) + err := ctx.RequestReboot(jujuc.RebootAfterHook) + c.Assert(err, jc.ErrorIsNil) + c.Assert(killed, jc.IsFalse) + priority := ctx.GetRebootPriority() + c.Assert(priority, gc.Equals, jujuc.RebootAfterHook) +} + +func (s *InterfaceSuite) TestRequestRebootNow(c *gc.C) { + ctx := s.GetContext(c, -1, "").(*context.HookContext) + + var stub testing.Stub + var p *mockProcess + p = &mockProcess{func() error { + // Reboot priority should be set before the process + // is killed, or else the client waiting for the + // process to exit will race with the setting of + // the priority. + priority := ctx.GetRebootPriority() + c.Assert(priority, gc.Equals, jujuc.RebootNow) + return stub.NextErr() + }} + stub.SetErrors(errors.New("process is already dead")) + ctx.SetProcess(p) + + err := ctx.RequestReboot(jujuc.RebootNow) + c.Assert(err, jc.ErrorIsNil) + + // Everything went well, so priority should still be RebootNow. + priority := ctx.GetRebootPriority() + c.Assert(priority, gc.Equals, jujuc.RebootNow) +} + +func (s *InterfaceSuite) TestRequestRebootNowTimeout(c *gc.C) { + ctx := s.GetContext(c, -1, "").(*context.HookContext) + + var advanced bool + var p *mockProcess + p = &mockProcess{func() error { + // Reboot priority should be set before the process + // is killed, or else the client waiting for the + // process to exit will race with the setting of + // the priority. + priority := ctx.GetRebootPriority() + c.Assert(priority, gc.Equals, jujuc.RebootNow) + if !advanced { + advanced = true + s.clock.Advance(time.Hour) // force timeout + } + return nil + }} + ctx.SetProcess(p) + + err := ctx.RequestReboot(jujuc.RebootNow) + c.Assert(err, gc.ErrorMatches, "failed to kill context process 123") + + // RequestReboot failed, so priority should revert to RebootSkip. + priority := ctx.GetRebootPriority() + c.Assert(priority, gc.Equals, jujuc.RebootSkip) +} + +func (s *InterfaceSuite) TestRequestRebootNowNoProcess(c *gc.C) { + // A normal hook run or a juju-run command will record the *os.Process + // object of the running command, in HookContext. When requesting a + // reboot with the --now flag, the process is killed and only + // then will we set the reboot priority. This test basically simulates + // the case when the process calling juju-reboot is not recorded. + ctx := context.HookContext{} + err := ctx.RequestReboot(jujuc.RebootNow) + c.Assert(err, gc.ErrorMatches, "no process to kill") + priority := ctx.GetRebootPriority() + c.Assert(priority, gc.Equals, jujuc.RebootNow) +} + +func (s *InterfaceSuite) TestStorageAddConstraints(c *gc.C) { + expected := map[string][]params.StorageConstraints{ + "data": []params.StorageConstraints{ + params.StorageConstraints{}, + }, + } + + ctx := context.HookContext{} + addStorageToContext(&ctx, "data", params.StorageConstraints{}) + assertStorageAddInContext(c, ctx, expected) +} + +var two = uint64(2) + +func (s *InterfaceSuite) TestStorageAddConstraintsSameStorage(c *gc.C) { + expected := map[string][]params.StorageConstraints{ + "data": []params.StorageConstraints{ + params.StorageConstraints{}, + params.StorageConstraints{Count: &two}, + }, + } + + ctx := context.HookContext{} + addStorageToContext(&ctx, "data", params.StorageConstraints{}) + addStorageToContext(&ctx, "data", params.StorageConstraints{Count: &two}) + assertStorageAddInContext(c, ctx, expected) +} + +func (s *InterfaceSuite) TestStorageAddConstraintsDifferentStorage(c *gc.C) { + expected := map[string][]params.StorageConstraints{ + "data": []params.StorageConstraints{params.StorageConstraints{}}, + "diff": []params.StorageConstraints{ + params.StorageConstraints{Count: &two}}, + } + + ctx := context.HookContext{} + addStorageToContext(&ctx, "data", params.StorageConstraints{}) + addStorageToContext(&ctx, "diff", params.StorageConstraints{Count: &two}) + assertStorageAddInContext(c, ctx, expected) +} + +func addStorageToContext(ctx *context.HookContext, + name string, + cons params.StorageConstraints, +) { + addOne := map[string]params.StorageConstraints{name: cons} + ctx.AddUnitStorage(addOne) +} + +func assertStorageAddInContext(c *gc.C, + ctx context.HookContext, expected map[string][]params.StorageConstraints, +) { + obtained := context.StorageAddConstraints(&ctx) + c.Assert(len(obtained), gc.Equals, len(expected)) + for k, v := range obtained { + c.Assert(v, jc.SameContents, expected[k]) + } +} + +type mockProcess struct { + kill func() error +} + +func (p *mockProcess) Kill() error { + return p.kill() +} + +func (p *mockProcess) Pid() int { + return 123 +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/contextfactory.go' --- src/github.com/juju/juju/worker/uniter/runner/context/contextfactory.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/contextfactory.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,354 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context + +import ( + "fmt" + "math/rand" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/utils/clock" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/api/uniter" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/core/leadership" + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/runner/jujuc" +) + +// CommandInfo specifies the information necessary to run a command. +type CommandInfo struct { + // RelationId is the relation context to execute the commands in. + RelationId int + // RemoteUnitName is the remote unit for the relation context. + RemoteUnitName string + // ForceRemoteUnit skips unit inference and existence validation. + ForceRemoteUnit bool +} + +// ContextFactory represents a long-lived object that can create execution contexts +// relevant to a specific unit. +type ContextFactory interface { + // CommandContext creates a new context for running a juju command. + CommandContext(commandInfo CommandInfo) (*HookContext, error) + + // HookContext creates a new context for running a juju hook. + HookContext(hookInfo hook.Info) (*HookContext, error) + + // ActionContext creates a new context for running a juju action. + ActionContext(actionData *ActionData) (*HookContext, error) +} + +// StorageContextAccessor is an interface providing access to StorageContexts +// for a jujuc.Context. +type StorageContextAccessor interface { + + // StorageTags returns the tags of storage instances attached to + // the unit. + StorageTags() ([]names.StorageTag, error) + + // Storage returns the jujuc.ContextStorageAttachment with the + // supplied tag if it was found, and whether it was found. + Storage(names.StorageTag) (jujuc.ContextStorageAttachment, error) +} + +// RelationsFunc is used to get snapshots of relation membership at context +// creation time. +type RelationsFunc func() map[int]*RelationInfo + +type contextFactory struct { + // API connection fields; unit should be deprecated, but isn't yet. + unit *uniter.Unit + state *uniter.State + tracker leadership.Tracker + + // Fields that shouldn't change in a factory's lifetime. + paths Paths + modelUUID string + envName string + machineTag names.MachineTag + storage StorageContextAccessor + clock clock.Clock + zone string + + // Callback to get relation state snapshot. + getRelationInfos RelationsFunc + relationCaches map[int]*RelationCache + + // For generating "unique" context ids. + rand *rand.Rand +} + +// NewContextFactory returns a ContextFactory capable of creating execution contexts backed +// by the supplied unit's supplied API connection. +func NewContextFactory( + state *uniter.State, + unitTag names.UnitTag, + tracker leadership.Tracker, + getRelationInfos RelationsFunc, + storage StorageContextAccessor, + paths Paths, + clock clock.Clock, +) ( + ContextFactory, error, +) { + unit, err := state.Unit(unitTag) + if err != nil { + return nil, errors.Trace(err) + } + machineTag, err := unit.AssignedMachine() + if err != nil { + return nil, errors.Trace(err) + } + model, err := state.Model() + if err != nil { + return nil, errors.Trace(err) + } + + zone, err := unit.AvailabilityZone() + if err != nil { + return nil, errors.Trace(err) + } + f := &contextFactory{ + unit: unit, + state: state, + tracker: tracker, + paths: paths, + modelUUID: model.UUID(), + envName: model.Name(), + machineTag: machineTag, + getRelationInfos: getRelationInfos, + relationCaches: map[int]*RelationCache{}, + storage: storage, + rand: rand.New(rand.NewSource(time.Now().Unix())), + clock: clock, + zone: zone, + } + return f, nil +} + +// newId returns a probably-unique identifier for a new context, containing the +// supplied string. +func (f *contextFactory) newId(name string) string { + return fmt.Sprintf("%s-%s-%d", f.unit.Name(), name, f.rand.Int63()) +} + +// coreContext creates a new context with all unspecialised fields filled in. +func (f *contextFactory) coreContext() (*HookContext, error) { + leadershipContext := newLeadershipContext( + f.state.LeadershipSettings, + f.tracker, + ) + ctx := &HookContext{ + unit: f.unit, + state: f.state, + LeadershipContext: leadershipContext, + uuid: f.modelUUID, + envName: f.envName, + unitName: f.unit.Name(), + assignedMachineTag: f.machineTag, + relations: f.getContextRelations(), + relationId: -1, + pendingPorts: make(map[PortRange]PortRangeInfo), + storage: f.storage, + clock: f.clock, + componentDir: f.paths.ComponentDir, + componentFuncs: registeredComponentFuncs, + availabilityzone: f.zone, + } + if err := f.updateContext(ctx); err != nil { + return nil, err + } + return ctx, nil +} + +// ActionContext is part of the ContextFactory interface. +func (f *contextFactory) ActionContext(actionData *ActionData) (*HookContext, error) { + if actionData == nil { + return nil, errors.New("nil actionData specified") + } + ctx, err := f.coreContext() + if err != nil { + return nil, errors.Trace(err) + } + ctx.actionData = actionData + ctx.id = f.newId(actionData.Name) + return ctx, nil +} + +// HookContext is part of the ContextFactory interface. +func (f *contextFactory) HookContext(hookInfo hook.Info) (*HookContext, error) { + ctx, err := f.coreContext() + if err != nil { + return nil, errors.Trace(err) + } + hookName := string(hookInfo.Kind) + if hookInfo.Kind.IsRelation() { + ctx.relationId = hookInfo.RelationId + ctx.remoteUnitName = hookInfo.RemoteUnit + relation, found := ctx.relations[hookInfo.RelationId] + if !found { + return nil, errors.Errorf("unknown relation id: %v", hookInfo.RelationId) + } + if hookInfo.Kind == hooks.RelationDeparted { + relation.cache.RemoveMember(hookInfo.RemoteUnit) + } else if hookInfo.RemoteUnit != "" { + // Clear remote settings cache for changing remote unit. + relation.cache.InvalidateMember(hookInfo.RemoteUnit) + } + hookName = fmt.Sprintf("%s-%s", relation.Name(), hookInfo.Kind) + } + if hookInfo.Kind.IsStorage() { + ctx.storageTag = names.NewStorageTag(hookInfo.StorageId) + if _, err := ctx.storage.Storage(ctx.storageTag); err != nil { + return nil, errors.Annotatef(err, "could not retrieve storage for id: %v", hookInfo.StorageId) + } + storageName, err := names.StorageName(hookInfo.StorageId) + if err != nil { + return nil, errors.Trace(err) + } + hookName = fmt.Sprintf("%s-%s", storageName, hookName) + } + ctx.id = f.newId(hookName) + return ctx, nil +} + +// CommandContext is part of the ContextFactory interface. +func (f *contextFactory) CommandContext(commandInfo CommandInfo) (*HookContext, error) { + ctx, err := f.coreContext() + if err != nil { + return nil, errors.Trace(err) + } + relationId, remoteUnitName, err := inferRemoteUnit(ctx.relations, commandInfo) + if err != nil { + return nil, errors.Trace(err) + } + ctx.relationId = relationId + ctx.remoteUnitName = remoteUnitName + ctx.id = f.newId("run-commands") + return ctx, nil +} + +// getContextRelations updates the factory's relation caches, and uses them +// to construct ContextRelations for a fresh context. +func (f *contextFactory) getContextRelations() map[int]*ContextRelation { + contextRelations := map[int]*ContextRelation{} + relationInfos := f.getRelationInfos() + relationCaches := map[int]*RelationCache{} + for id, info := range relationInfos { + relationUnit := info.RelationUnit + memberNames := info.MemberNames + cache, found := f.relationCaches[id] + if found { + cache.Prune(memberNames) + } else { + cache = NewRelationCache(relationUnit.ReadSettings, memberNames) + } + relationCaches[id] = cache + contextRelations[id] = NewContextRelation(relationUnit, cache) + } + f.relationCaches = relationCaches + return contextRelations +} + +// updateContext fills in all unspecialized fields that require an API call to +// discover. +// +// Approximately *every* line of code in this function represents a bug: ie, some +// piece of information we expose to the charm but which we fail to report changes +// to via hooks. Furthermore, the fact that we make multiple API calls at this +// time, rather than grabbing everything we need in one go, is unforgivably yucky. +func (f *contextFactory) updateContext(ctx *HookContext) (err error) { + defer errors.Trace(err) + + ctx.apiAddrs, err = f.state.APIAddresses() + if err != nil { + return err + } + ctx.machinePorts, err = f.state.AllMachinePorts(f.machineTag) + if err != nil { + return errors.Trace(err) + } + + statusCode, statusInfo, err := f.unit.MeterStatus() + if err != nil { + return errors.Annotate(err, "could not retrieve meter status for unit") + } + ctx.meterStatus = &meterStatus{ + code: statusCode, + info: statusInfo, + } + + // TODO(fwereade) 23-10-2014 bug 1384572 + // Nothing here should ever be getting the environ config directly. + environConfig, err := f.state.ModelConfig() + if err != nil { + return err + } + ctx.proxySettings = environConfig.ProxySettings() + + // Calling these last, because there's a potential race: they're not guaranteed + // to be set in time to be needed for a hook. If they're not, we just leave them + // unset as we always have; this isn't great but it's about behaviour preservation. + ctx.publicAddress, err = f.unit.PublicAddress() + if err != nil && !params.IsCodeNoAddressSet(err) { + return err + } + ctx.privateAddress, err = f.unit.PrivateAddress() + if err != nil && !params.IsCodeNoAddressSet(err) { + return err + } + return nil +} + +func inferRemoteUnit(rctxs map[int]*ContextRelation, info CommandInfo) (int, string, error) { + relationId := info.RelationId + hasRelation := relationId != -1 + remoteUnit := info.RemoteUnitName + hasRemoteUnit := remoteUnit != "" + + // Check baseline sanity of remote unit, if supplied. + if hasRemoteUnit { + if !names.IsValidUnit(remoteUnit) { + return -1, "", errors.Errorf(`invalid remote unit: %s`, remoteUnit) + } else if !hasRelation { + return -1, "", errors.Errorf("remote unit provided without a relation: %s", remoteUnit) + } + } + + // Check sanity of relation, if supplied, otherwise easy early return. + if !hasRelation { + return relationId, remoteUnit, nil + } + rctx, found := rctxs[relationId] + if !found { + return -1, "", errors.Errorf("unknown relation id: %d", relationId) + } + + // Past basic sanity checks; if forced, accept what we're given. + if info.ForceRemoteUnit { + return relationId, remoteUnit, nil + } + + // Infer an appropriate remote unit if we can. + possibles := rctx.UnitNames() + if remoteUnit == "" { + switch len(possibles) { + case 0: + return -1, "", errors.Errorf("cannot infer remote unit in empty relation %d", relationId) + case 1: + return relationId, possibles[0], nil + } + return -1, "", errors.Errorf("ambiguous remote unit; possibilities are %+v", possibles) + } + for _, possible := range possibles { + if remoteUnit == possible { + return relationId, remoteUnit, nil + } + } + return -1, "", errors.Errorf("unknown remote unit %s; possibilities are %+v", remoteUnit, possibles) +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/contextfactory_test.go' --- src/github.com/juju/juju/worker/uniter/runner/context/contextfactory_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/contextfactory_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,449 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context_test + +import ( + "os" + "time" + + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + "github.com/juju/utils/fs" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/core/leadership" + "github.com/juju/juju/state" + "github.com/juju/juju/storage" + "github.com/juju/juju/testcharms" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/runner/context" + runnertesting "github.com/juju/juju/worker/uniter/runner/testing" +) + +type ContextFactorySuite struct { + HookContextSuite + paths runnertesting.RealPaths + factory context.ContextFactory + membership map[int][]string +} + +var _ = gc.Suite(&ContextFactorySuite{}) + +func (s *ContextFactorySuite) SetUpTest(c *gc.C) { + s.HookContextSuite.SetUpTest(c) + s.paths = runnertesting.NewRealPaths(c) + s.membership = map[int][]string{} + + contextFactory, err := context.NewContextFactory( + s.uniter, + s.unit.Tag().(names.UnitTag), + runnertesting.FakeTracker{}, + s.getRelationInfos, + s.storage, + s.paths, + coretesting.NewClock(time.Time{}), + ) + c.Assert(err, jc.ErrorIsNil) + s.factory = contextFactory +} + +func (s *ContextFactorySuite) setUpCacheMethods(c *gc.C) { + // The factory's caches are created lazily, so it doesn't have any at all to + // begin with. Creating and discarding a context lets us call updateCache + // without panicking. (IMO this is less invasive that making updateCache + // responsible for creating missing caches etc.) + _, err := s.factory.HookContext(hook.Info{Kind: hooks.Install}) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *ContextFactorySuite) updateCache(relId int, unitName string, settings params.Settings) { + context.UpdateCachedSettings(s.factory, relId, unitName, settings) +} + +func (s *ContextFactorySuite) getCache(relId int, unitName string) (params.Settings, bool) { + return context.CachedSettings(s.factory, relId, unitName) +} + +func (s *ContextFactorySuite) SetCharm(c *gc.C, name string) { + err := os.RemoveAll(s.paths.GetCharmDir()) + c.Assert(err, jc.ErrorIsNil) + err = fs.Copy(testcharms.Repo.CharmDirPath(name), s.paths.GetCharmDir()) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *ContextFactorySuite) getRelationInfos() map[int]*context.RelationInfo { + info := map[int]*context.RelationInfo{} + for relId, relUnit := range s.apiRelunits { + info[relId] = &context.RelationInfo{ + RelationUnit: relUnit, + MemberNames: s.membership[relId], + } + } + return info +} + +func (s *ContextFactorySuite) testLeadershipContextWiring(c *gc.C, createContext func() *context.HookContext) { + var stub testing.Stub + stub.SetErrors(errors.New("bam")) + restore := context.PatchNewLeadershipContext( + func(accessor context.LeadershipSettingsAccessor, tracker leadership.Tracker) context.LeadershipContext { + stub.AddCall("NewLeadershipContext", accessor, tracker) + return &StubLeadershipContext{Stub: &stub} + }, + ) + defer restore() + + ctx := createContext() + isLeader, err := ctx.IsLeader() + c.Check(err, gc.ErrorMatches, "bam") + c.Check(isLeader, jc.IsFalse) + + stub.CheckCalls(c, []testing.StubCall{{ + FuncName: "NewLeadershipContext", + Args: []interface{}{s.uniter.LeadershipSettings, runnertesting.FakeTracker{}}, + }, { + FuncName: "IsLeader", + }}) + +} + +func (s *ContextFactorySuite) TestNewHookContextLeadershipContext(c *gc.C) { + s.testLeadershipContextWiring(c, func() *context.HookContext { + ctx, err := s.factory.HookContext(hook.Info{Kind: hooks.ConfigChanged}) + c.Assert(err, jc.ErrorIsNil) + return ctx + }) +} + +func (s *ContextFactorySuite) TestNewCommandContextLeadershipContext(c *gc.C) { + s.testLeadershipContextWiring(c, func() *context.HookContext { + ctx, err := s.factory.CommandContext(context.CommandInfo{RelationId: -1}) + c.Assert(err, jc.ErrorIsNil) + return ctx + }) +} + +func (s *ContextFactorySuite) TestNewActionContextLeadershipContext(c *gc.C) { + s.testLeadershipContextWiring(c, func() *context.HookContext { + s.SetCharm(c, "dummy") + action, err := s.State.EnqueueAction(s.unit.Tag(), "snapshot", nil) + c.Assert(err, jc.ErrorIsNil) + + actionData := &context.ActionData{ + Name: action.Name(), + Tag: names.NewActionTag(action.Id()), + Params: action.Parameters(), + ResultsMap: map[string]interface{}{}, + } + + ctx, err := s.factory.ActionContext(actionData) + c.Assert(err, jc.ErrorIsNil) + return ctx + }) +} + +func (s *ContextFactorySuite) TestRelationHookContext(c *gc.C) { + hi := hook.Info{ + Kind: hooks.RelationBroken, + RelationId: 1, + } + ctx, err := s.factory.HookContext(hi) + c.Assert(err, jc.ErrorIsNil) + s.AssertCoreContext(c, ctx) + s.AssertNotActionContext(c, ctx) + s.AssertRelationContext(c, ctx, 1, "") + s.AssertNotStorageContext(c, ctx) +} + +func (s *ContextFactorySuite) TestNewHookContextWithStorage(c *gc.C) { + // We need to set up a unit that has storage metadata defined. + ch := s.AddTestingCharm(c, "storage-block") + sCons := map[string]state.StorageConstraints{ + "data": {Pool: "", Size: 1024, Count: 1}, + } + service := s.AddTestingServiceWithStorage(c, "storage-block", ch, sCons) + s.machine = nil // allocate a new machine + unit := s.AddUnit(c, service) + + storageAttachments, err := s.State.UnitStorageAttachments(unit.UnitTag()) + c.Assert(err, jc.ErrorIsNil) + c.Assert(storageAttachments, gc.HasLen, 1) + storageTag := storageAttachments[0].StorageInstance() + + volume, err := s.State.StorageInstanceVolume(storageTag) + c.Assert(err, jc.ErrorIsNil) + volumeTag := volume.VolumeTag() + machineTag := s.machine.MachineTag() + + err = s.State.SetVolumeInfo( + volumeTag, state.VolumeInfo{ + VolumeId: "vol-123", + Size: 456, + }, + ) + c.Assert(err, jc.ErrorIsNil) + err = s.State.SetVolumeAttachmentInfo( + machineTag, volumeTag, state.VolumeAttachmentInfo{ + DeviceName: "sdb", + }, + ) + c.Assert(err, jc.ErrorIsNil) + + password, err := utils.RandomPassword() + err = unit.SetPassword(password) + c.Assert(err, jc.ErrorIsNil) + st := s.OpenAPIAs(c, unit.Tag(), password) + uniter, err := st.Uniter() + c.Assert(err, jc.ErrorIsNil) + + contextFactory, err := context.NewContextFactory( + uniter, + unit.Tag().(names.UnitTag), + runnertesting.FakeTracker{}, + s.getRelationInfos, + s.storage, + s.paths, + coretesting.NewClock(time.Time{}), + ) + c.Assert(err, jc.ErrorIsNil) + ctx, err := contextFactory.HookContext(hook.Info{ + Kind: hooks.StorageAttached, + StorageId: "data/0", + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(ctx.UnitName(), gc.Equals, "storage-block/0") + s.AssertStorageContext(c, ctx, "data/0", storage.StorageAttachmentInfo{ + Kind: storage.StorageKindBlock, + Location: "/dev/sdb", + }) + s.AssertNotActionContext(c, ctx) + s.AssertNotRelationContext(c, ctx) +} + +func (s *ContextFactorySuite) TestActionContext(c *gc.C) { + s.SetCharm(c, "dummy") + action, err := s.State.EnqueueAction(s.unit.Tag(), "snapshot", nil) + c.Assert(err, jc.ErrorIsNil) + + actionData := &context.ActionData{ + Name: action.Name(), + Tag: names.NewActionTag(action.Id()), + Params: action.Parameters(), + ResultsMap: map[string]interface{}{}, + } + + ctx, err := s.factory.ActionContext(actionData) + c.Assert(err, jc.ErrorIsNil) + + s.AssertCoreContext(c, ctx) + s.AssertActionContext(c, ctx) + s.AssertNotRelationContext(c, ctx) + s.AssertNotStorageContext(c, ctx) +} + +func (s *ContextFactorySuite) TestCommandContext(c *gc.C) { + ctx, err := s.factory.CommandContext(context.CommandInfo{RelationId: -1}) + c.Assert(err, jc.ErrorIsNil) + + s.AssertCoreContext(c, ctx) + s.AssertNotActionContext(c, ctx) + s.AssertNotRelationContext(c, ctx) + s.AssertNotStorageContext(c, ctx) +} + +func (s *ContextFactorySuite) TestCommandContextNoRelation(c *gc.C) { + ctx, err := s.factory.CommandContext(context.CommandInfo{RelationId: -1}) + c.Assert(err, jc.ErrorIsNil) + s.AssertCoreContext(c, ctx) + s.AssertNotActionContext(c, ctx) + s.AssertNotRelationContext(c, ctx) + s.AssertNotStorageContext(c, ctx) +} + +func (s *ContextFactorySuite) TestNewCommandContextForceNoRemoteUnit(c *gc.C) { + ctx, err := s.factory.CommandContext(context.CommandInfo{ + RelationId: 0, ForceRemoteUnit: true, + }) + c.Assert(err, jc.ErrorIsNil) + s.AssertCoreContext(c, ctx) + s.AssertNotActionContext(c, ctx) + s.AssertRelationContext(c, ctx, 0, "") + s.AssertNotStorageContext(c, ctx) +} + +func (s *ContextFactorySuite) TestNewCommandContextForceRemoteUnitMissing(c *gc.C) { + ctx, err := s.factory.CommandContext(context.CommandInfo{ + RelationId: 0, RemoteUnitName: "blah/123", ForceRemoteUnit: true, + }) + c.Assert(err, gc.IsNil) + s.AssertCoreContext(c, ctx) + s.AssertNotActionContext(c, ctx) + s.AssertRelationContext(c, ctx, 0, "blah/123") + s.AssertNotStorageContext(c, ctx) +} + +func (s *ContextFactorySuite) TestNewCommandContextInferRemoteUnit(c *gc.C) { + s.membership[0] = []string{"foo/2"} + ctx, err := s.factory.CommandContext(context.CommandInfo{RelationId: 0}) + c.Assert(err, jc.ErrorIsNil) + s.AssertCoreContext(c, ctx) + s.AssertNotActionContext(c, ctx) + s.AssertRelationContext(c, ctx, 0, "foo/2") + s.AssertNotStorageContext(c, ctx) +} + +func (s *ContextFactorySuite) TestNewHookContextPrunesNonMemberCaches(c *gc.C) { + + // Write cached member settings for a member and a non-member. + s.setUpCacheMethods(c) + s.membership[0] = []string{"rel0/0"} + s.updateCache(0, "rel0/0", params.Settings{"keep": "me"}) + s.updateCache(0, "rel0/1", params.Settings{"drop": "me"}) + + ctx, err := s.factory.HookContext(hook.Info{Kind: hooks.Install}) + c.Assert(err, jc.ErrorIsNil) + + settings0, found := s.getCache(0, "rel0/0") + c.Assert(found, jc.IsTrue) + c.Assert(settings0, jc.DeepEquals, params.Settings{"keep": "me"}) + + settings1, found := s.getCache(0, "rel0/1") + c.Assert(found, jc.IsFalse) + c.Assert(settings1, gc.IsNil) + + // Check the caches are being used by the context relations. + relCtx, err := ctx.Relation(0) + c.Assert(err, jc.ErrorIsNil) + + // Verify that the settings really were cached by trying to look them up. + // Nothing's really in scope, so the call would fail if they weren't. + settings0, err = relCtx.ReadSettings("rel0/0") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings0, jc.DeepEquals, params.Settings{"keep": "me"}) + + // Verify that the non-member settings were purged by looking them up and + // checking for the expected error. + settings1, err = relCtx.ReadSettings("rel0/1") + c.Assert(settings1, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "permission denied") +} + +func (s *ContextFactorySuite) TestNewHookContextRelationJoinedUpdatesRelationContextAndCaches(c *gc.C) { + // Write some cached settings for r/0, so we can verify the cache gets cleared. + s.setUpCacheMethods(c) + s.membership[1] = []string{"r/0"} + s.updateCache(1, "r/0", params.Settings{"foo": "bar"}) + + ctx, err := s.factory.HookContext(hook.Info{ + Kind: hooks.RelationJoined, + RelationId: 1, + RemoteUnit: "r/0", + }) + c.Assert(err, jc.ErrorIsNil) + s.AssertCoreContext(c, ctx) + s.AssertNotActionContext(c, ctx) + s.AssertNotStorageContext(c, ctx) + rel := s.AssertRelationContext(c, ctx, 1, "r/0") + c.Assert(rel.UnitNames(), jc.DeepEquals, []string{"r/0"}) + cached0, member := s.getCache(1, "r/0") + c.Assert(cached0, gc.IsNil) + c.Assert(member, jc.IsTrue) +} + +func (s *ContextFactorySuite) TestNewHookContextRelationChangedUpdatesRelationContextAndCaches(c *gc.C) { + // Update member settings to have actual values, so we can check that + // the change for r/4 clears its cache but leaves r/0's alone. + s.setUpCacheMethods(c) + s.membership[1] = []string{"r/0", "r/4"} + s.updateCache(1, "r/0", params.Settings{"foo": "bar"}) + s.updateCache(1, "r/4", params.Settings{"baz": "qux"}) + + ctx, err := s.factory.HookContext(hook.Info{ + Kind: hooks.RelationChanged, + RelationId: 1, + RemoteUnit: "r/4", + }) + c.Assert(err, jc.ErrorIsNil) + s.AssertCoreContext(c, ctx) + s.AssertNotActionContext(c, ctx) + s.AssertNotStorageContext(c, ctx) + rel := s.AssertRelationContext(c, ctx, 1, "r/4") + c.Assert(rel.UnitNames(), jc.DeepEquals, []string{"r/0", "r/4"}) + cached0, member := s.getCache(1, "r/0") + c.Assert(cached0, jc.DeepEquals, params.Settings{"foo": "bar"}) + c.Assert(member, jc.IsTrue) + cached4, member := s.getCache(1, "r/4") + c.Assert(cached4, gc.IsNil) + c.Assert(member, jc.IsTrue) +} + +func (s *ContextFactorySuite) TestNewHookContextRelationDepartedUpdatesRelationContextAndCaches(c *gc.C) { + // Update member settings to have actual values, so we can check that + // the depart for r/0 leaves r/4's cache alone (while discarding r/0's). + s.setUpCacheMethods(c) + s.membership[1] = []string{"r/0", "r/4"} + s.updateCache(1, "r/0", params.Settings{"foo": "bar"}) + s.updateCache(1, "r/4", params.Settings{"baz": "qux"}) + + ctx, err := s.factory.HookContext(hook.Info{ + Kind: hooks.RelationDeparted, + RelationId: 1, + RemoteUnit: "r/0", + }) + c.Assert(err, jc.ErrorIsNil) + s.AssertCoreContext(c, ctx) + s.AssertNotActionContext(c, ctx) + s.AssertNotStorageContext(c, ctx) + rel := s.AssertRelationContext(c, ctx, 1, "r/0") + c.Assert(rel.UnitNames(), jc.DeepEquals, []string{"r/4"}) + cached0, member := s.getCache(1, "r/0") + c.Assert(cached0, gc.IsNil) + c.Assert(member, jc.IsFalse) + cached4, member := s.getCache(1, "r/4") + c.Assert(cached4, jc.DeepEquals, params.Settings{"baz": "qux"}) + c.Assert(member, jc.IsTrue) +} + +func (s *ContextFactorySuite) TestNewHookContextRelationBrokenRetainsCaches(c *gc.C) { + // Note that this is bizarre and unrealistic, because we would never usually + // run relation-broken on a non-empty relation. But verfying that the settings + // stick around allows us to verify that there's no special handling for that + // hook -- as there should not be, because the relation caches will be discarded + // for the *next* hook, which will be constructed with the current set of known + // relations and ignore everything else. + s.setUpCacheMethods(c) + s.membership[1] = []string{"r/0", "r/4"} + s.updateCache(1, "r/0", params.Settings{"foo": "bar"}) + s.updateCache(1, "r/4", params.Settings{"baz": "qux"}) + + ctx, err := s.factory.HookContext(hook.Info{ + Kind: hooks.RelationBroken, + RelationId: 1, + }) + c.Assert(err, jc.ErrorIsNil) + rel := s.AssertRelationContext(c, ctx, 1, "") + c.Assert(rel.UnitNames(), jc.DeepEquals, []string{"r/0", "r/4"}) + cached0, member := s.getCache(1, "r/0") + c.Assert(cached0, jc.DeepEquals, params.Settings{"foo": "bar"}) + c.Assert(member, jc.IsTrue) + cached4, member := s.getCache(1, "r/4") + c.Assert(cached4, jc.DeepEquals, params.Settings{"baz": "qux"}) + c.Assert(member, jc.IsTrue) +} + +type StubLeadershipContext struct { + context.LeadershipContext + *testing.Stub +} + +func (stub *StubLeadershipContext) IsLeader() (bool, error) { + stub.MethodCall(stub, "IsLeader") + return false, stub.NextErr() +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/env.go' --- src/github.com/juju/juju/worker/uniter/runner/context/env.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/env.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,58 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context + +import ( + "os" + "path/filepath" + + jujuos "github.com/juju/utils/os" +) + +// OSDependentEnvVars returns the OS-dependent environment variables that +// should be set for a hook context. +func OSDependentEnvVars(paths Paths) []string { + switch jujuos.HostOS() { + case jujuos.Windows: + return windowsEnv(paths) + case jujuos.Ubuntu: + return ubuntuEnv(paths) + case jujuos.CentOS: + return centosEnv(paths) + } + return nil +} + +func appendPath(paths Paths) []string { + return []string{ + "PATH=" + paths.GetToolsDir() + ":" + os.Getenv("PATH"), + } +} + +func ubuntuEnv(paths Paths) []string { + path := appendPath(paths) + env := []string{ + "APT_LISTCHANGES_FRONTEND=none", + "DEBIAN_FRONTEND=noninteractive", + } + env = append(env, path...) + return env +} + +func centosEnv(paths Paths) []string { + return appendPath(paths) +} + +// windowsEnv adds windows specific environment variables. PSModulePath +// helps hooks use normal imports instead of dot sourcing modules +// its a convenience variable. The PATH variable delimiter is +// a semicolon instead of a colon +func windowsEnv(paths Paths) []string { + charmDir := paths.GetCharmDir() + charmModules := filepath.Join(charmDir, "lib", "Modules") + return []string{ + "Path=" + paths.GetToolsDir() + ";" + os.Getenv("Path"), + "PSModulePath=" + os.Getenv("PSModulePath") + ";" + charmModules, + } +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/env_test.go' --- src/github.com/juju/juju/worker/uniter/runner/context/env_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/env_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,149 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context_test + +import ( + "os" + "path/filepath" + "runtime" + "sort" + + "github.com/juju/names" + envtesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/keyvalues" + "github.com/juju/utils/proxy" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/worker/uniter/runner/context" + jujuos "github.com/juju/utils/os" +) + +type EnvSuite struct { + envtesting.IsolationSuite +} + +var _ = gc.Suite(&EnvSuite{}) + +func (s *EnvSuite) assertVars(c *gc.C, actual []string, expect ...[]string) { + var fullExpect []string + for _, someExpect := range expect { + fullExpect = append(fullExpect, someExpect...) + } + sort.Strings(actual) + sort.Strings(fullExpect) + c.Assert(actual, jc.DeepEquals, fullExpect) +} + +func (s *EnvSuite) getPaths() (paths context.Paths, expectVars []string) { + // note: path-munging is os-dependent, not included in expectVars + return MockEnvPaths{}, []string{ + "CHARM_DIR=path-to-charm", + "JUJU_CHARM_DIR=path-to-charm", + "JUJU_AGENT_SOCKET=path-to-jujuc.socket", + } +} + +func (s *EnvSuite) getContext() (ctx *context.HookContext, expectVars []string) { + return context.NewModelHookContext( + "some-context-id", + "model-uuid-deadbeef", + "some-model-name", + "this-unit/123", + "PURPLE", + "proceed with care", + "some-zone", + []string{"he.re:12345", "the.re:23456"}, + proxy.Settings{ + Http: "some-http-proxy", + Https: "some-https-proxy", + Ftp: "some-ftp-proxy", + NoProxy: "some-no-proxy", + }, + names.NewMachineTag("42"), + ), []string{ + "JUJU_CONTEXT_ID=some-context-id", + "JUJU_MODEL_UUID=model-uuid-deadbeef", + "JUJU_MODEL_NAME=some-model-name", + "JUJU_UNIT_NAME=this-unit/123", + "JUJU_METER_STATUS=PURPLE", + "JUJU_METER_INFO=proceed with care", + "JUJU_API_ADDRESSES=he.re:12345 the.re:23456", + "JUJU_MACHINE_ID=42", + "JUJU_AVAILABILITY_ZONE=some-zone", + "http_proxy=some-http-proxy", + "HTTP_PROXY=some-http-proxy", + "https_proxy=some-https-proxy", + "HTTPS_PROXY=some-https-proxy", + "ftp_proxy=some-ftp-proxy", + "FTP_PROXY=some-ftp-proxy", + "no_proxy=some-no-proxy", + "NO_PROXY=some-no-proxy", + } +} + +func (s *EnvSuite) setRelation(ctx *context.HookContext) (expectVars []string) { + context.SetEnvironmentHookContextRelation( + ctx, 22, "an-endpoint", "that-unit/456", + ) + return []string{ + "JUJU_RELATION=an-endpoint", + "JUJU_RELATION_ID=an-endpoint:22", + "JUJU_REMOTE_UNIT=that-unit/456", + } +} + +func (s *EnvSuite) TestEnvSetsPath(c *gc.C) { + paths := context.OSDependentEnvVars(MockEnvPaths{}) + c.Assert(paths, gc.Not(gc.HasLen), 0) + vars, err := keyvalues.Parse(paths, true) + c.Assert(err, jc.ErrorIsNil) + key := "PATH" + if runtime.GOOS == "windows" { + key = "Path" + } + c.Assert(vars[key], gc.Not(gc.Equals), "") +} + +func (s *EnvSuite) TestEnvWindows(c *gc.C) { + s.PatchValue(&jujuos.HostOS, func() jujuos.OSType { return jujuos.Windows }) + os.Setenv("Path", "foo;bar") + os.Setenv("PSModulePath", "ping;pong") + windowsVars := []string{ + "Path=path-to-tools;foo;bar", + "PSModulePath=ping;pong;" + filepath.FromSlash("path-to-charm/lib/Modules"), + } + + ctx, contextVars := s.getContext() + paths, pathsVars := s.getPaths() + actualVars, err := ctx.HookVars(paths) + c.Assert(err, jc.ErrorIsNil) + s.assertVars(c, actualVars, contextVars, pathsVars, windowsVars) + + relationVars := s.setRelation(ctx) + actualVars, err = ctx.HookVars(paths) + c.Assert(err, jc.ErrorIsNil) + s.assertVars(c, actualVars, contextVars, pathsVars, windowsVars, relationVars) +} + +func (s *EnvSuite) TestEnvUbuntu(c *gc.C) { + s.PatchValue(&jujuos.HostOS, func() jujuos.OSType { return jujuos.Ubuntu }) + os.Setenv("PATH", "foo:bar") + ubuntuVars := []string{ + "PATH=path-to-tools:foo:bar", + "APT_LISTCHANGES_FRONTEND=none", + "DEBIAN_FRONTEND=noninteractive", + } + + ctx, contextVars := s.getContext() + paths, pathsVars := s.getPaths() + actualVars, err := ctx.HookVars(paths) + c.Assert(err, jc.ErrorIsNil) + s.assertVars(c, actualVars, contextVars, pathsVars, ubuntuVars) + + relationVars := s.setRelation(ctx) + actualVars, err = ctx.HookVars(paths) + c.Assert(err, jc.ErrorIsNil) + s.assertVars(c, actualVars, contextVars, pathsVars, ubuntuVars, relationVars) +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/errors.go' --- src/github.com/juju/juju/worker/uniter/runner/context/errors.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/errors.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,29 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context + +import ( + "github.com/juju/errors" +) + +var ErrRequeueAndReboot = errors.New("reboot now") +var ErrReboot = errors.New("reboot after hook") +var ErrNoProcess = errors.New("no process to kill") + +type missingHookError struct { + hookName string +} + +func (e *missingHookError) Error() string { + return e.hookName + " does not exist" +} + +func IsMissingHookError(err error) bool { + _, ok := err.(*missingHookError) + return ok +} + +func NewMissingHookError(hookName string) error { + return &missingHookError{hookName} +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/export_test.go' --- src/github.com/juju/juju/worker/uniter/runner/context/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,186 @@ +// Copyright 2012-2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context + +import ( + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/utils/clock" + "github.com/juju/utils/proxy" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/api/uniter" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/core/leadership" + "github.com/juju/juju/worker/uniter/runner/jujuc" +) + +var ( + ValidatePortRange = validatePortRange + TryOpenPorts = tryOpenPorts + TryClosePorts = tryClosePorts +) + +func NewHookContext( + unit *uniter.Unit, + state *uniter.State, + id, + uuid, + envName string, + relationId int, + remoteUnitName string, + relations map[int]*ContextRelation, + apiAddrs []string, + proxySettings proxy.Settings, + canAddMetrics bool, + charmMetrics *charm.Metrics, + actionData *ActionData, + assignedMachineTag names.MachineTag, + paths Paths, + clock clock.Clock, +) (*HookContext, error) { + ctx := &HookContext{ + unit: unit, + state: state, + id: id, + uuid: uuid, + envName: envName, + unitName: unit.Name(), + relationId: relationId, + remoteUnitName: remoteUnitName, + relations: relations, + apiAddrs: apiAddrs, + proxySettings: proxySettings, + actionData: actionData, + pendingPorts: make(map[PortRange]PortRangeInfo), + assignedMachineTag: assignedMachineTag, + clock: clock, + } + // Get and cache the addresses. + var err error + ctx.publicAddress, err = unit.PublicAddress() + if err != nil && !params.IsCodeNoAddressSet(err) { + return nil, err + } + ctx.privateAddress, err = unit.PrivateAddress() + if err != nil && !params.IsCodeNoAddressSet(err) { + return nil, err + } + ctx.availabilityzone, err = unit.AvailabilityZone() + if err != nil { + return nil, err + } + ctx.machinePorts, err = state.AllMachinePorts(ctx.assignedMachineTag) + if err != nil { + return nil, errors.Trace(err) + } + + statusCode, statusInfo, err := unit.MeterStatus() + if err != nil { + return nil, errors.Annotate(err, "could not retrieve meter status for unit") + } + ctx.meterStatus = &meterStatus{ + code: statusCode, + info: statusInfo, + } + return ctx, nil +} + +// SetEnvironmentHookContextRelation exists purely to set the fields used in hookVars. +// It makes no assumptions about the validity of context. +func SetEnvironmentHookContextRelation( + context *HookContext, + relationId int, endpointName, remoteUnitName string, +) { + context.relationId = relationId + context.remoteUnitName = remoteUnitName + context.relations = map[int]*ContextRelation{ + relationId: { + endpointName: endpointName, + relationId: relationId, + }, + } +} + +func PatchCachedStatus(ctx jujuc.Context, status, info string, data map[string]interface{}) func() { + hctx := ctx.(*HookContext) + oldStatus := hctx.status + hctx.status = &jujuc.StatusInfo{ + Status: status, + Info: info, + Data: data, + } + return func() { + hctx.status = oldStatus + } +} + +func GetStubActionContext(in map[string]interface{}) *HookContext { + return &HookContext{ + actionData: &ActionData{ + ResultsMap: in, + }, + } +} + +type LeadershipContextFunc func(LeadershipSettingsAccessor, leadership.Tracker) LeadershipContext + +func PatchNewLeadershipContext(f LeadershipContextFunc) func() { + var old LeadershipContextFunc + old, newLeadershipContext = newLeadershipContext, f + return func() { newLeadershipContext = old } +} + +func StorageAddConstraints(ctx *HookContext) map[string][]params.StorageConstraints { + return ctx.storageAddConstraints +} + +// NewModelHookContext exists purely to set the fields used in rs. +// The returned value is not otherwise valid. +func NewModelHookContext( + id, modelUUID, envName, unitName, meterCode, meterInfo, availZone string, + apiAddresses []string, proxySettings proxy.Settings, + machineTag names.MachineTag, +) *HookContext { + return &HookContext{ + id: id, + unitName: unitName, + uuid: modelUUID, + envName: envName, + apiAddrs: apiAddresses, + proxySettings: proxySettings, + meterStatus: &meterStatus{ + code: meterCode, + info: meterInfo, + }, + relationId: -1, + assignedMachineTag: machineTag, + availabilityzone: availZone, + } +} + +func ContextEnvInfo(hctx *HookContext) (name, uuid string) { + return hctx.envName, hctx.uuid +} + +func ContextMachineTag(hctx *HookContext) names.MachineTag { + return hctx.assignedMachineTag +} + +func UpdateCachedSettings(cf0 ContextFactory, relId int, unitName string, settings params.Settings) { + cf := cf0.(*contextFactory) + members := cf.relationCaches[relId].members + if members[unitName] == nil { + members[unitName] = params.Settings{} + } + for key, value := range settings { + members[unitName][key] = value + } +} + +func CachedSettings(cf0 ContextFactory, relId int, unitName string) (params.Settings, bool) { + cf := cf0.(*contextFactory) + settings, found := cf.relationCaches[relId].members[unitName] + return settings, found +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/flush_test.go' --- src/github.com/juju/juju/worker/uniter/runner/context/flush_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/flush_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,229 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/network" + "github.com/juju/juju/worker/metrics/spool" + "github.com/juju/juju/worker/uniter/runner/context" + runnertesting "github.com/juju/juju/worker/uniter/runner/testing" +) + +type FlushContextSuite struct { + HookContextSuite + stub testing.Stub +} + +var _ = gc.Suite(&FlushContextSuite{}) + +func (s *FlushContextSuite) SetUpTest(c *gc.C) { + s.HookContextSuite.SetUpTest(c) + s.stub.ResetCalls() +} + +func (s *FlushContextSuite) TestRunHookRelationFlushingError(c *gc.C) { + ctx := s.context(c) + + // Mess with multiple relation settings. + relCtx0, err := ctx.Relation(0) + c.Assert(err, jc.ErrorIsNil) + node0, err := relCtx0.Settings() + c.Assert(err, jc.ErrorIsNil) + node0.Set("foo", "1") + relCtx1, err := ctx.Relation(1) + c.Assert(err, jc.ErrorIsNil) + node1, err := relCtx1.Settings() + c.Assert(err, jc.ErrorIsNil) + node1.Set("bar", "2") + + // Flush the context with a failure. + err = ctx.Flush("some badge", errors.New("blam pow")) + c.Assert(err, gc.ErrorMatches, "blam pow") + + // Check that the changes have not been written to state. + settings0, err := s.relunits[0].ReadSettings("u/0") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings0, gc.DeepEquals, map[string]interface{}{"relation-name": "db0"}) + settings1, err := s.relunits[1].ReadSettings("u/0") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings1, gc.DeepEquals, map[string]interface{}{"relation-name": "db1"}) +} + +func (s *FlushContextSuite) TestRunHookRelationFlushingSuccess(c *gc.C) { + ctx := s.context(c) + + // Mess with multiple relation settings. + relCtx0, err := ctx.Relation(0) + c.Assert(err, jc.ErrorIsNil) + node0, err := relCtx0.Settings() + c.Assert(err, jc.ErrorIsNil) + node0.Set("baz", "3") + relCtx1, err := ctx.Relation(1) + c.Assert(err, jc.ErrorIsNil) + node1, err := relCtx1.Settings() + c.Assert(err, jc.ErrorIsNil) + node1.Set("qux", "4") + + // Flush the context with a success. + err = ctx.Flush("some badge", nil) + c.Assert(err, jc.ErrorIsNil) + + // Check that the changes have been written to state. + settings0, err := s.relunits[0].ReadSettings("u/0") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings0, gc.DeepEquals, map[string]interface{}{ + "relation-name": "db0", + "baz": "3", + }) + settings1, err := s.relunits[1].ReadSettings("u/0") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings1, gc.DeepEquals, map[string]interface{}{ + "relation-name": "db1", + "qux": "4", + }) +} + +func (s *FlushContextSuite) TestRunHookOpensAndClosesPendingPorts(c *gc.C) { + // Initially, no port ranges are open on the unit or its machine. + unitRanges, err := s.unit.OpenedPorts() + c.Assert(err, jc.ErrorIsNil) + c.Assert(unitRanges, gc.HasLen, 0) + machinePorts, err := s.machine.AllPorts() + c.Assert(err, jc.ErrorIsNil) + c.Assert(machinePorts, gc.HasLen, 0) + + // Add another unit on the same machine. + otherUnit, err := s.service.AddUnit() + c.Assert(err, jc.ErrorIsNil) + err = otherUnit.AssignToMachine(s.machine) + c.Assert(err, jc.ErrorIsNil) + + // Open some ports on both units. + err = s.unit.OpenPorts("tcp", 100, 200) + c.Assert(err, jc.ErrorIsNil) + err = otherUnit.OpenPorts("udp", 200, 300) + c.Assert(err, jc.ErrorIsNil) + + unitRanges, err = s.unit.OpenedPorts() + c.Assert(err, jc.ErrorIsNil) + c.Assert(unitRanges, jc.DeepEquals, []network.PortRange{ + {100, 200, "tcp"}, + }) + + ctx := s.context(c) + + // Try opening some ports via the context. + err = ctx.OpenPorts("tcp", 100, 200) + c.Assert(err, jc.ErrorIsNil) // duplicates are ignored + err = ctx.OpenPorts("udp", 200, 300) + c.Assert(err, gc.ErrorMatches, `cannot open 200-300/udp \(unit "u/0"\): conflicts with existing 200-300/udp \(unit "u/1"\)`) + err = ctx.OpenPorts("udp", 100, 200) + c.Assert(err, gc.ErrorMatches, `cannot open 100-200/udp \(unit "u/0"\): conflicts with existing 200-300/udp \(unit "u/1"\)`) + err = ctx.OpenPorts("udp", 10, 20) + c.Assert(err, jc.ErrorIsNil) + err = ctx.OpenPorts("tcp", 50, 100) + c.Assert(err, gc.ErrorMatches, `cannot open 50-100/tcp \(unit "u/0"\): conflicts with existing 100-200/tcp \(unit "u/0"\)`) + err = ctx.OpenPorts("tcp", 50, 80) + c.Assert(err, jc.ErrorIsNil) + err = ctx.OpenPorts("tcp", 40, 90) + c.Assert(err, gc.ErrorMatches, `cannot open 40-90/tcp \(unit "u/0"\): conflicts with 50-80/tcp requested earlier`) + + // Now try closing some ports as well. + err = ctx.ClosePorts("udp", 8080, 8088) + c.Assert(err, jc.ErrorIsNil) // not existing -> ignored + err = ctx.ClosePorts("tcp", 100, 200) + c.Assert(err, jc.ErrorIsNil) + err = ctx.ClosePorts("tcp", 100, 200) + c.Assert(err, jc.ErrorIsNil) // duplicates are ignored + err = ctx.ClosePorts("udp", 200, 300) + c.Assert(err, gc.ErrorMatches, `cannot close 200-300/udp \(opened by "u/1"\) from "u/0"`) + err = ctx.ClosePorts("tcp", 50, 80) + c.Assert(err, jc.ErrorIsNil) // still pending -> no longer pending + + // Ensure the ports are not actually changed on the unit yet. + unitRanges, err = s.unit.OpenedPorts() + c.Assert(err, jc.ErrorIsNil) + c.Assert(unitRanges, jc.DeepEquals, []network.PortRange{ + {100, 200, "tcp"}, + }) + + // Flush the context with a success. + err = ctx.Flush("some badge", nil) + c.Assert(err, jc.ErrorIsNil) + + // Verify the unit ranges are now open. + expectUnitRanges := []network.PortRange{ + {FromPort: 10, ToPort: 20, Protocol: "udp"}, + } + unitRanges, err = s.unit.OpenedPorts() + c.Assert(err, jc.ErrorIsNil) + c.Assert(unitRanges, jc.DeepEquals, expectUnitRanges) +} + +func (s *FlushContextSuite) TestRunHookAddStorageOnFailure(c *gc.C) { + ctx := s.context(c) + c.Assert(ctx.UnitName(), gc.Equals, "u/0") + + size := uint64(1) + ctx.AddUnitStorage( + map[string]params.StorageConstraints{ + "allecto": params.StorageConstraints{Size: &size}, + }) + + // Flush the context with an error. + msg := "test fail run hook" + err := ctx.Flush("test fail run hook", errors.New(msg)) + c.Assert(errors.Cause(err), gc.ErrorMatches, msg) + + all, err := s.State.AllStorageInstances() + c.Assert(err, jc.ErrorIsNil) + c.Assert(all, gc.HasLen, 0) +} + +func (s *FlushContextSuite) TestRunHookAddUnitStorageOnSuccess(c *gc.C) { + ctx := s.context(c) + c.Assert(ctx.UnitName(), gc.Equals, "u/0") + + size := uint64(1) + ctx.AddUnitStorage( + map[string]params.StorageConstraints{ + "allecto": params.StorageConstraints{Size: &size}, + }) + + // Flush the context with a success. + err := ctx.Flush("success", nil) + c.Assert(errors.Cause(err), gc.ErrorMatches, `.*storage "allecto" not found.*`) + + all, err := s.State.AllStorageInstances() + c.Assert(err, jc.ErrorIsNil) + c.Assert(all, gc.HasLen, 0) +} + +func (s *HookContextSuite) context(c *gc.C) *context.HookContext { + uuid, err := utils.NewUUID() + c.Assert(err, jc.ErrorIsNil) + return s.getHookContext(c, uuid.String(), -1, "", noProxies) +} + +func (s *FlushContextSuite) TestBuiltinMetricNotGeneratedIfNotDefined(c *gc.C) { + uuid := utils.MustNewUUID() + paths := runnertesting.NewRealPaths(c) + ctx := s.getMeteredHookContext(c, uuid.String(), -1, "", noProxies, true, s.metricsDefinition("pings"), paths) + reader, err := spool.NewJSONMetricReader( + paths.GetMetricsSpoolDir(), + ) + + err = ctx.Flush("some badge", nil) + c.Assert(err, jc.ErrorIsNil) + batches, err := reader.Read() + c.Assert(err, jc.ErrorIsNil) + c.Assert(batches, gc.HasLen, 0) +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/leader.go' --- src/github.com/juju/juju/worker/uniter/runner/context/leader.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/leader.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,111 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context + +import ( + "github.com/juju/errors" + + "github.com/juju/juju/core/leadership" +) + +var ( + errIsMinion = errors.New("not the leader") +) + +// LeadershipSettingsAccessor is an interface that allows us not to have +// to use the concrete `api/uniter/LeadershipSettingsAccessor` type, thus +// simplifying testing. +type LeadershipSettingsAccessor interface { + Read(serviceName string) (map[string]string, error) + Merge(serviceName string, settings map[string]string) error +} + +// LeadershipContext provides several jujuc.Context methods. It +// exists separately of HookContext for clarity, and ease of testing. +type LeadershipContext interface { + IsLeader() (bool, error) + LeaderSettings() (map[string]string, error) + WriteLeaderSettings(map[string]string) error +} + +type leadershipContext struct { + accessor LeadershipSettingsAccessor + tracker leadership.Tracker + serviceName string + + isMinion bool + settings map[string]string +} + +func NewLeadershipContext(accessor LeadershipSettingsAccessor, tracker leadership.Tracker) LeadershipContext { + return &leadershipContext{ + accessor: accessor, + tracker: tracker, + serviceName: tracker.ServiceName(), + } +} + +// newLeadershipContext allows us to swap out the leadership context creator for +// factory tests. +var newLeadershipContext = NewLeadershipContext + +// IsLeader is part of the jujuc.Context interface. +func (ctx *leadershipContext) IsLeader() (bool, error) { + // This doesn't technically need an error return, but that feels like a + // happy accident of the current implementation and not a reason to change + // the interface we're implementing. + err := ctx.ensureLeader() + switch err { + case nil: + return true, nil + case errIsMinion: + return false, nil + } + return false, errors.Trace(err) +} + +// WriteLeaderSettings is part of the jujuc.Context interface. +func (ctx *leadershipContext) WriteLeaderSettings(settings map[string]string) error { + // This may trigger a lease refresh; it would be desirable to use a less + // eager approach here, but we're working around a race described in + // `apiserver/leadership.LeadershipSettingsAccessor.Merge`, and as of + // 2015-02-19 it's better to stay eager. + err := ctx.ensureLeader() + if err == nil { + // Clear local settings; if we need them again we should use the values + // as merged by the server. But we don't need to get them again right now; + // the charm may not need to ask again before the hook finishes. + ctx.settings = nil + err = ctx.accessor.Merge(ctx.serviceName, settings) + } + return errors.Annotate(err, "cannot write settings") +} + +// LeaderSettings is part of the jujuc.Context interface. +func (ctx *leadershipContext) LeaderSettings() (map[string]string, error) { + if ctx.settings == nil { + var err error + ctx.settings, err = ctx.accessor.Read(ctx.serviceName) + if err != nil { + return nil, errors.Annotate(err, "cannot read settings") + } + } + result := map[string]string{} + for key, value := range ctx.settings { + result[key] = value + } + return result, nil +} + +func (ctx *leadershipContext) ensureLeader() error { + if ctx.isMinion { + return errIsMinion + } + success := ctx.tracker.ClaimLeader().Wait() + if !success { + ctx.isMinion = true + return errIsMinion + } + return nil +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/leader_test.go' --- src/github.com/juju/juju/worker/uniter/runner/context/leader_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/leader_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,335 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/core/leadership" + "github.com/juju/juju/worker/uniter/runner/context" +) + +type LeaderSuite struct { + testing.IsolationSuite + testing.Stub + accessor *StubLeadershipSettingsAccessor + tracker *StubTracker + context context.LeadershipContext +} + +var _ = gc.Suite(&LeaderSuite{}) + +func (s *LeaderSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.accessor = &StubLeadershipSettingsAccessor{ + Stub: &s.Stub, + } + s.tracker = &StubTracker{ + Stub: &s.Stub, + serviceName: "led-service", + } + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "ServiceName", + }}, func() { + s.context = context.NewLeadershipContext(s.accessor, s.tracker) + }) +} + +func (s *LeaderSuite) CheckCalls(c *gc.C, stubCalls []testing.StubCall, f func()) { + s.Stub = testing.Stub{} + f() + s.Stub.CheckCalls(c, stubCalls) +} + +func (s *LeaderSuite) TestIsLeaderSuccess(c *gc.C) { + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "ClaimLeader", + }}, func() { + // The first call succeeds... + s.tracker.results = []StubTicket{true} + leader, err := s.context.IsLeader() + c.Check(leader, jc.IsTrue) + c.Check(err, jc.ErrorIsNil) + }) + + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "ClaimLeader", + }}, func() { + // ...and so does the second. + s.tracker.results = []StubTicket{true} + leader, err := s.context.IsLeader() + c.Check(leader, jc.IsTrue) + c.Check(err, jc.ErrorIsNil) + }) +} + +func (s *LeaderSuite) TestIsLeaderFailure(c *gc.C) { + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "ClaimLeader", + }}, func() { + // The first call fails... + s.tracker.results = []StubTicket{false} + leader, err := s.context.IsLeader() + c.Check(leader, jc.IsFalse) + c.Check(err, jc.ErrorIsNil) + }) + + s.CheckCalls(c, nil, func() { + // ...and the second doesn't even try. + leader, err := s.context.IsLeader() + c.Check(leader, jc.IsFalse) + c.Check(err, jc.ErrorIsNil) + }) +} + +func (s *LeaderSuite) TestIsLeaderFailureAfterSuccess(c *gc.C) { + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "ClaimLeader", + }}, func() { + // The first call succeeds... + s.tracker.results = []StubTicket{true} + leader, err := s.context.IsLeader() + c.Check(leader, jc.IsTrue) + c.Check(err, jc.ErrorIsNil) + }) + + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "ClaimLeader", + }}, func() { + // The second fails... + s.tracker.results = []StubTicket{false} + leader, err := s.context.IsLeader() + c.Check(leader, jc.IsFalse) + c.Check(err, jc.ErrorIsNil) + }) + + s.CheckCalls(c, nil, func() { + // The third doesn't even try. + leader, err := s.context.IsLeader() + c.Check(leader, jc.IsFalse) + c.Check(err, jc.ErrorIsNil) + }) +} + +func (s *LeaderSuite) TestLeaderSettingsSuccess(c *gc.C) { + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "Read", + Args: []interface{}{"led-service"}, + }}, func() { + // The first call grabs the settings... + s.accessor.results = []map[string]string{{ + "some": "settings", + "of": "interest", + }} + settings, err := s.context.LeaderSettings() + c.Check(settings, jc.DeepEquals, map[string]string{ + "some": "settings", + "of": "interest", + }) + c.Check(err, jc.ErrorIsNil) + }) + + s.CheckCalls(c, nil, func() { + // The second uses the cache. + settings, err := s.context.LeaderSettings() + c.Check(settings, jc.DeepEquals, map[string]string{ + "some": "settings", + "of": "interest", + }) + c.Check(err, jc.ErrorIsNil) + }) +} + +func (s *LeaderSuite) TestLeaderSettingsCopyMap(c *gc.C) { + // Grab the settings to populate the cache... + s.accessor.results = []map[string]string{{ + "some": "settings", + "of": "interest", + }} + settings, err := s.context.LeaderSettings() + c.Check(err, gc.IsNil) + + // Put some nonsense into the returned settings... + settings["bad"] = "news" + + // Get the settings again and check they're as expected. + settings, err = s.context.LeaderSettings() + c.Check(settings, jc.DeepEquals, map[string]string{ + "some": "settings", + "of": "interest", + }) + c.Check(err, jc.ErrorIsNil) +} + +func (s *LeaderSuite) TestLeaderSettingsError(c *gc.C) { + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "Read", + Args: []interface{}{"led-service"}, + }}, func() { + s.accessor.results = []map[string]string{nil} + s.Stub.SetErrors(errors.New("blort")) + settings, err := s.context.LeaderSettings() + c.Check(settings, gc.IsNil) + c.Check(err, gc.ErrorMatches, "cannot read settings: blort") + }) +} + +func (s *LeaderSuite) TestWriteLeaderSettingsSuccess(c *gc.C) { + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "ClaimLeader", + }, { + FuncName: "Merge", + Args: []interface{}{"led-service", map[string]string{ + "some": "very", + "nice": "data", + }}, + }}, func() { + s.tracker.results = []StubTicket{true} + err := s.context.WriteLeaderSettings(map[string]string{ + "some": "very", + "nice": "data", + }) + c.Check(err, jc.ErrorIsNil) + }) +} + +func (s *LeaderSuite) TestWriteLeaderSettingsMinion(c *gc.C) { + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "ClaimLeader", + }}, func() { + // The first call fails... + s.tracker.results = []StubTicket{false} + err := s.context.WriteLeaderSettings(map[string]string{"blah": "blah"}) + c.Check(err, gc.ErrorMatches, "cannot write settings: not the leader") + }) + + s.CheckCalls(c, nil, func() { + // The second doesn't even try. + err := s.context.WriteLeaderSettings(map[string]string{"blah": "blah"}) + c.Check(err, gc.ErrorMatches, "cannot write settings: not the leader") + }) +} + +func (s *LeaderSuite) TestWriteLeaderSettingsError(c *gc.C) { + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "ClaimLeader", + }, { + FuncName: "Merge", + Args: []interface{}{"led-service", map[string]string{ + "some": "very", + "nice": "data", + }}, + }}, func() { + s.tracker.results = []StubTicket{true} + s.Stub.SetErrors(errors.New("glurk")) + err := s.context.WriteLeaderSettings(map[string]string{ + "some": "very", + "nice": "data", + }) + c.Check(err, gc.ErrorMatches, "cannot write settings: glurk") + }) +} + +func (s *LeaderSuite) TestWriteLeaderSettingsClearsCache(c *gc.C) { + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "Read", + Args: []interface{}{"led-service"}, + }}, func() { + // Start off by populating the cache... + s.accessor.results = []map[string]string{{ + "some": "settings", + "of": "interest", + }} + _, err := s.context.LeaderSettings() + c.Check(err, gc.IsNil) + }) + + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "ClaimLeader", + }, { + FuncName: "Merge", + Args: []interface{}{"led-service", map[string]string{ + "some": "very", + "nice": "data", + }}, + }}, func() { + // Write new data to the controller... + s.tracker.results = []StubTicket{true} + err := s.context.WriteLeaderSettings(map[string]string{ + "some": "very", + "nice": "data", + }) + c.Check(err, jc.ErrorIsNil) + }) + + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "Read", + Args: []interface{}{"led-service"}, + }}, func() { + s.accessor.results = []map[string]string{{ + "totally": "different", + "server": "decides", + }} + settings, err := s.context.LeaderSettings() + c.Check(err, gc.IsNil) + c.Check(settings, jc.DeepEquals, map[string]string{ + "totally": "different", + "server": "decides", + }) + c.Check(err, jc.ErrorIsNil) + }) +} + +type StubLeadershipSettingsAccessor struct { + *testing.Stub + results []map[string]string +} + +func (stub *StubLeadershipSettingsAccessor) Read(serviceName string) (result map[string]string, _ error) { + stub.MethodCall(stub, "Read", serviceName) + result, stub.results = stub.results[0], stub.results[1:] + return result, stub.NextErr() +} + +func (stub *StubLeadershipSettingsAccessor) Merge(serviceName string, settings map[string]string) error { + stub.MethodCall(stub, "Merge", serviceName, settings) + return stub.NextErr() +} + +type StubTracker struct { + leadership.Tracker + *testing.Stub + serviceName string + results []StubTicket +} + +func (stub *StubTracker) ServiceName() string { + stub.MethodCall(stub, "ServiceName") + return stub.serviceName +} + +func (stub *StubTracker) ClaimLeader() (result leadership.Ticket) { + stub.MethodCall(stub, "ClaimLeader") + result, stub.results = stub.results[0], stub.results[1:] + return result +} + +type StubTicket bool + +func (ticket StubTicket) Wait() bool { + return bool(ticket) +} + +func (ticket StubTicket) Ready() <-chan struct{} { + return alwaysReady +} + +var alwaysReady = make(chan struct{}) + +func init() { + close(alwaysReady) +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/package_test.go' --- src/github.com/juju/juju/worker/uniter/runner/context/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context_test + +import ( + stdtesting "testing" + + coretesting "github.com/juju/juju/testing" +) + +func TestPackage(t *stdtesting.T) { + coretesting.MgoTestPackage(t) +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/ports.go' --- src/github.com/juju/juju/worker/uniter/runner/context/ports.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/ports.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,173 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context + +import ( + "strings" + + "github.com/juju/errors" + "github.com/juju/names" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/network" +) + +// PortRangeInfo contains information about a pending open- or +// close-port operation for a port range. This is only exported for +// testing. +type PortRangeInfo struct { + ShouldOpen bool + RelationTag names.RelationTag +} + +// PortRange contains a port range and a relation id. Used as key to +// pendingRelations and is only exported for testing. +type PortRange struct { + Ports network.PortRange + RelationId int +} + +func validatePortRange(protocol string, fromPort, toPort int) (network.PortRange, error) { + // Validate the given range. + newRange := network.PortRange{ + Protocol: strings.ToLower(protocol), + FromPort: fromPort, + ToPort: toPort, + } + if err := newRange.Validate(); err != nil { + return network.PortRange{}, err + } + return newRange, nil +} + +func tryOpenPorts( + protocol string, + fromPort, toPort int, + unitTag names.UnitTag, + machinePorts map[network.PortRange]params.RelationUnit, + pendingPorts map[PortRange]PortRangeInfo, +) error { + // TODO(dimitern) Once port ranges are linked to relations in + // addition to networks, refactor this functions and test it + // better to ensure it handles relations properly. + relationId := -1 + + //Validate the given range. + newRange, err := validatePortRange(protocol, fromPort, toPort) + if err != nil { + return err + } + rangeKey := PortRange{ + Ports: newRange, + RelationId: relationId, + } + + rangeInfo, isKnown := pendingPorts[rangeKey] + if isKnown { + if !rangeInfo.ShouldOpen { + // If the same range is already pending to be closed, just + // mark is pending to be opened. + rangeInfo.ShouldOpen = true + pendingPorts[rangeKey] = rangeInfo + } + return nil + } + + // Ensure there are no conflicts with existing ports on the + // machine. + for portRange, relUnit := range machinePorts { + relUnitTag, err := names.ParseUnitTag(relUnit.Unit) + if err != nil { + return errors.Annotatef( + err, + "machine ports %v contain invalid unit tag", + portRange, + ) + } + if newRange.ConflictsWith(portRange) { + if portRange == newRange && relUnitTag == unitTag { + // The same unit trying to open the same range is just + // ignored. + return nil + } + return errors.Errorf( + "cannot open %v (unit %q): conflicts with existing %v (unit %q)", + newRange, unitTag.Id(), portRange, relUnitTag.Id(), + ) + } + } + // Ensure other pending port ranges do not conflict with this one. + for rangeKey, rangeInfo := range pendingPorts { + if newRange.ConflictsWith(rangeKey.Ports) && rangeInfo.ShouldOpen { + return errors.Errorf( + "cannot open %v (unit %q): conflicts with %v requested earlier", + newRange, unitTag.Id(), rangeKey.Ports, + ) + } + } + + rangeInfo = pendingPorts[rangeKey] + rangeInfo.ShouldOpen = true + pendingPorts[rangeKey] = rangeInfo + return nil +} + +func tryClosePorts( + protocol string, + fromPort, toPort int, + unitTag names.UnitTag, + machinePorts map[network.PortRange]params.RelationUnit, + pendingPorts map[PortRange]PortRangeInfo, +) error { + // TODO(dimitern) Once port ranges are linked to relations in + // addition to networks, refactor this functions and test it + // better to ensure it handles relations properly. + relationId := -1 + + // Validate the given range. + newRange, err := validatePortRange(protocol, fromPort, toPort) + if err != nil { + return err + } + rangeKey := PortRange{ + Ports: newRange, + RelationId: relationId, + } + + rangeInfo, isKnown := pendingPorts[rangeKey] + if isKnown { + if rangeInfo.ShouldOpen { + // If the same range is already pending to be opened, just + // remove it from pending. + delete(pendingPorts, rangeKey) + } + return nil + } + + // Ensure the range we're trying to close is opened on the + // machine. + relUnit, found := machinePorts[newRange] + if !found { + // Trying to close a range which is not open is ignored. + return nil + } else if relUnit.Unit != unitTag.String() { + relUnitTag, err := names.ParseUnitTag(relUnit.Unit) + if err != nil { + return errors.Annotatef( + err, + "machine ports %v contain invalid unit tag", + newRange, + ) + } + return errors.Errorf( + "cannot close %v (opened by %q) from %q", + newRange, relUnitTag.Id(), unitTag.Id(), + ) + } + + rangeInfo = pendingPorts[rangeKey] + rangeInfo.ShouldOpen = false + pendingPorts[rangeKey] = rangeInfo + return nil +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/ports_test.go' --- src/github.com/juju/juju/worker/uniter/runner/context/ports_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/ports_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,273 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context_test + +import ( + "github.com/juju/names" + envtesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/network" + "github.com/juju/juju/worker/uniter/runner/context" +) + +type PortsSuite struct { + envtesting.IsolationSuite +} + +var _ = gc.Suite(&PortsSuite{}) + +func (s *PortsSuite) TestValidatePortRange(c *gc.C) { + tests := []struct { + about string + proto string + ports []int + portRange network.PortRange + expectErr string + }{{ + about: "invalid range - 0-0/tcp", + proto: "tcp", + ports: []int{0, 0}, + expectErr: "invalid port range 0-0/tcp", + }, { + about: "invalid range - 0-1/tcp", + proto: "tcp", + ports: []int{0, 1}, + expectErr: "invalid port range 0-1/tcp", + }, { + about: "invalid range - -1-1/tcp", + proto: "tcp", + ports: []int{-1, 1}, + expectErr: "invalid port range -1-1/tcp", + }, { + about: "invalid range - 1-99999/tcp", + proto: "tcp", + ports: []int{1, 99999}, + expectErr: "invalid port range 1-99999/tcp", + }, { + about: "invalid range - 88888-99999/tcp", + proto: "tcp", + ports: []int{88888, 99999}, + expectErr: "invalid port range 88888-99999/tcp", + }, { + about: "invalid protocol - 1-65535/foo", + proto: "foo", + ports: []int{1, 65535}, + expectErr: `invalid protocol "foo", expected "tcp" or "udp"`, + }, { + about: "valid range - 100-200/udp", + proto: "UDP", + ports: []int{100, 200}, + portRange: network.PortRange{ + FromPort: 100, + ToPort: 200, + Protocol: "udp", + }, + }, { + about: "valid single port - 100/tcp", + proto: "TCP", + ports: []int{100, 100}, + portRange: network.PortRange{ + FromPort: 100, + ToPort: 100, + Protocol: "tcp", + }, + }} + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + portRange, err := context.ValidatePortRange( + test.proto, + test.ports[0], + test.ports[1], + ) + if test.expectErr != "" { + c.Check(err, gc.ErrorMatches, test.expectErr) + c.Check(portRange, jc.DeepEquals, network.PortRange{}) + } else { + c.Check(err, jc.ErrorIsNil) + c.Check(portRange, jc.DeepEquals, test.portRange) + } + } +} + +func makeMachinePorts( + unitName, proto string, fromPort, toPort int, +) map[network.PortRange]params.RelationUnit { + result := make(map[network.PortRange]params.RelationUnit) + portRange := network.PortRange{ + FromPort: fromPort, + ToPort: toPort, + Protocol: proto, + } + unitTag := "" + if unitName != "invalid" { + unitTag = names.NewUnitTag(unitName).String() + } else { + unitTag = unitName + } + result[portRange] = params.RelationUnit{ + Unit: unitTag, + } + return result +} + +func makePendingPorts( + proto string, fromPort, toPort int, shouldOpen bool, +) map[context.PortRange]context.PortRangeInfo { + result := make(map[context.PortRange]context.PortRangeInfo) + portRange := network.PortRange{ + FromPort: fromPort, + ToPort: toPort, + Protocol: proto, + } + key := context.PortRange{ + Ports: portRange, + RelationId: -1, + } + result[key] = context.PortRangeInfo{ + ShouldOpen: shouldOpen, + } + return result +} + +type portsTest struct { + about string + proto string + ports []int + machinePorts map[network.PortRange]params.RelationUnit + pendingPorts map[context.PortRange]context.PortRangeInfo + expectErr string + expectPending map[context.PortRange]context.PortRangeInfo +} + +func (p portsTest) withDefaults(proto string, fromPort, toPort int) portsTest { + if p.proto == "" { + p.proto = proto + } + if len(p.ports) != 2 { + p.ports = []int{fromPort, toPort} + } + if p.pendingPorts == nil { + p.pendingPorts = make(map[context.PortRange]context.PortRangeInfo) + } + return p +} + +func (s *PortsSuite) TestTryOpenPorts(c *gc.C) { + tests := []portsTest{{ + about: "invalid port range", + ports: []int{0, 0}, + expectErr: "invalid port range 0-0/tcp", + }, { + about: "invalid protocol - 10-20/foo", + proto: "foo", + expectErr: `invalid protocol "foo", expected "tcp" or "udp"`, + }, { + about: "open a new range (no machine ports yet)", + expectPending: makePendingPorts("tcp", 10, 20, true), + }, { + about: "open an existing range (ignored)", + machinePorts: makeMachinePorts("u/0", "tcp", 10, 20), + expectPending: map[context.PortRange]context.PortRangeInfo{}, + }, { + about: "open a range pending to be closed already", + pendingPorts: makePendingPorts("tcp", 10, 20, false), + expectPending: makePendingPorts("tcp", 10, 20, true), + }, { + about: "open a range pending to be opened already (ignored)", + pendingPorts: makePendingPorts("tcp", 10, 20, true), + expectPending: makePendingPorts("tcp", 10, 20, true), + }, { + about: "try opening a range when machine ports has invalid unit tag", + machinePorts: makeMachinePorts("invalid", "tcp", 80, 90), + expectErr: `machine ports 80-90/tcp contain invalid unit tag: "invalid" is not a valid tag`, + }, { + about: "try opening a range conflicting with another unit", + machinePorts: makeMachinePorts("u/1", "tcp", 10, 20), + expectErr: `cannot open 10-20/tcp \(unit "u/0"\): conflicts with existing 10-20/tcp \(unit "u/1"\)`, + }, { + about: "open a range conflicting with the same unit (ignored)", + machinePorts: makeMachinePorts("u/0", "tcp", 10, 20), + expectPending: map[context.PortRange]context.PortRangeInfo{}, + }, { + about: "try opening a range conflicting with another pending range", + pendingPorts: makePendingPorts("tcp", 5, 25, true), + expectErr: `cannot open 10-20/tcp \(unit "u/0"\): conflicts with 5-25/tcp requested earlier`, + }} + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + + test = test.withDefaults("tcp", 10, 20) + err := context.TryOpenPorts( + test.proto, + test.ports[0], + test.ports[1], + names.NewUnitTag("u/0"), + test.machinePorts, + test.pendingPorts, + ) + if test.expectErr != "" { + c.Check(err, gc.ErrorMatches, test.expectErr) + } else { + c.Check(err, jc.ErrorIsNil) + c.Check(test.pendingPorts, jc.DeepEquals, test.expectPending) + } + } +} + +func (s *PortsSuite) TestTryClosePorts(c *gc.C) { + tests := []portsTest{{ + about: "invalid port range", + ports: []int{0, 0}, + expectErr: "invalid port range 0-0/tcp", + }, { + about: "invalid protocol - 10-20/foo", + proto: "foo", + expectErr: `invalid protocol "foo", expected "tcp" or "udp"`, + }, { + about: "close a new range (no machine ports yet; ignored)", + expectPending: map[context.PortRange]context.PortRangeInfo{}, + }, { + about: "close an existing range", + machinePorts: makeMachinePorts("u/0", "tcp", 10, 20), + expectPending: makePendingPorts("tcp", 10, 20, false), + }, { + about: "close a range pending to be opened already (removed from pending)", + pendingPorts: makePendingPorts("tcp", 10, 20, true), + expectPending: map[context.PortRange]context.PortRangeInfo{}, + }, { + about: "close a range pending to be closed already (ignored)", + pendingPorts: makePendingPorts("tcp", 10, 20, false), + expectPending: makePendingPorts("tcp", 10, 20, false), + }, { + about: "try closing an existing range when machine ports has invalid unit tag", + machinePorts: makeMachinePorts("invalid", "tcp", 10, 20), + expectErr: `machine ports 10-20/tcp contain invalid unit tag: "invalid" is not a valid tag`, + }, { + about: "try closing a range of another unit", + machinePorts: makeMachinePorts("u/1", "tcp", 10, 20), + expectErr: `cannot close 10-20/tcp \(opened by "u/1"\) from "u/0"`, + }} + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + + test = test.withDefaults("tcp", 10, 20) + err := context.TryClosePorts( + test.proto, + test.ports[0], + test.ports[1], + names.NewUnitTag("u/0"), + test.machinePorts, + test.pendingPorts, + ) + if test.expectErr != "" { + c.Check(err, gc.ErrorMatches, test.expectErr) + } else { + c.Check(err, jc.ErrorIsNil) + c.Check(test.pendingPorts, jc.DeepEquals, test.expectPending) + } + } +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/relation.go' --- src/github.com/juju/juju/worker/uniter/runner/context/relation.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/relation.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,85 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context + +import ( + "fmt" + + "github.com/juju/juju/api/uniter" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/worker/uniter/runner/jujuc" +) + +type RelationInfo struct { + RelationUnit *uniter.RelationUnit + MemberNames []string +} + +// ContextRelation is the implementation of jujuc.ContextRelation. +type ContextRelation struct { + ru *uniter.RelationUnit + relationId int + endpointName string + + // settings allows read and write access to the relation unit settings. + settings *uniter.Settings + + // cache holds remote unit membership and settings. + cache *RelationCache +} + +// NewContextRelation creates a new context for the given relation unit. +// The unit-name keys of members supplies the initial membership. +func NewContextRelation(ru *uniter.RelationUnit, cache *RelationCache) *ContextRelation { + return &ContextRelation{ + ru: ru, + relationId: ru.Relation().Id(), + endpointName: ru.Endpoint().Name, + cache: cache, + } +} + +func (ctx *ContextRelation) Id() int { + return ctx.relationId +} + +func (ctx *ContextRelation) Name() string { + return ctx.endpointName +} + +func (ctx *ContextRelation) FakeId() string { + return fmt.Sprintf("%s:%d", ctx.endpointName, ctx.relationId) +} + +func (ctx *ContextRelation) UnitNames() []string { + return ctx.cache.MemberNames() +} + +func (ctx *ContextRelation) ReadSettings(unit string) (settings params.Settings, err error) { + return ctx.cache.Settings(unit) +} + +func (ctx *ContextRelation) Settings() (jujuc.Settings, error) { + if ctx.settings == nil { + node, err := ctx.ru.Settings() + if err != nil { + return nil, err + } + ctx.settings = node + } + return ctx.settings, nil +} + +// WriteSettings persists all changes made to the unit's relation settings. +func (ctx *ContextRelation) WriteSettings() (err error) { + if ctx.settings != nil { + err = ctx.settings.Write() + } + return +} + +// NetworkConfig returns the network config for the relation. +func (ctx *ContextRelation) NetworkConfig() ([]params.NetworkConfig, error) { + return ctx.ru.NetworkConfig() +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/relation_test.go' --- src/github.com/juju/juju/worker/uniter/runner/context/relation_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/relation_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,178 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context_test + +import ( + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/api" + apiuniter "github.com/juju/juju/api/uniter" + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/juju/testing" + "github.com/juju/juju/state" + "github.com/juju/juju/worker/uniter/runner/context" +) + +type ContextRelationSuite struct { + testing.JujuConnSuite + svc *state.Service + rel *state.Relation + ru *state.RelationUnit + + st api.Connection + uniter *apiuniter.State + apiRelUnit *apiuniter.RelationUnit +} + +var _ = gc.Suite(&ContextRelationSuite{}) + +func (s *ContextRelationSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + machine, err := s.State.AddMachine("quantal", state.JobHostUnits) + c.Assert(err, jc.ErrorIsNil) + password, err := utils.RandomPassword() + c.Assert(err, jc.ErrorIsNil) + err = machine.SetPassword(password) + c.Assert(err, jc.ErrorIsNil) + err = machine.SetProvisioned("foo", "fake_nonce", nil) + c.Assert(err, jc.ErrorIsNil) + + ch := s.AddTestingCharm(c, "riak") + s.svc = s.AddTestingService(c, "u", ch) + rels, err := s.svc.Relations() + c.Assert(err, jc.ErrorIsNil) + c.Assert(rels, gc.HasLen, 1) + s.rel = rels[0] + unit, err := s.svc.AddUnit() + c.Assert(err, jc.ErrorIsNil) + err = unit.AssignToMachine(machine) + s.ru, err = s.rel.Unit(unit) + c.Assert(err, jc.ErrorIsNil) + err = s.ru.EnterScope(nil) + c.Assert(err, jc.ErrorIsNil) + + password, err = utils.RandomPassword() + c.Assert(err, jc.ErrorIsNil) + err = unit.SetPassword(password) + c.Assert(err, jc.ErrorIsNil) + s.st = s.OpenAPIAs(c, unit.Tag(), password) + s.uniter, err = s.st.Uniter() + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.uniter, gc.NotNil) + + apiRel, err := s.uniter.Relation(s.rel.Tag().(names.RelationTag)) + c.Assert(err, jc.ErrorIsNil) + apiUnit, err := s.uniter.Unit(unit.Tag().(names.UnitTag)) + c.Assert(err, jc.ErrorIsNil) + s.apiRelUnit, err = apiRel.Unit(apiUnit) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *ContextRelationSuite) TestMemberCaching(c *gc.C) { + unit, err := s.svc.AddUnit() + c.Assert(err, jc.ErrorIsNil) + ru, err := s.rel.Unit(unit) + c.Assert(err, jc.ErrorIsNil) + err = ru.EnterScope(map[string]interface{}{"blib": "blob"}) + c.Assert(err, jc.ErrorIsNil) + settings, err := ru.Settings() + c.Assert(err, jc.ErrorIsNil) + settings.Set("ping", "pong") + _, err = settings.Write() + c.Assert(err, jc.ErrorIsNil) + + cache := context.NewRelationCache(s.apiRelUnit.ReadSettings, []string{"u/1"}) + ctx := context.NewContextRelation(s.apiRelUnit, cache) + + // Check that uncached settings are read from state. + m, err := ctx.ReadSettings("u/1") + c.Assert(err, jc.ErrorIsNil) + expectMap := settings.Map() + expectSettings := convertMap(expectMap) + c.Assert(m, gc.DeepEquals, expectSettings) + + // Check that changes to state do not affect the cached settings. + settings.Set("ping", "pow") + _, err = settings.Write() + c.Assert(err, jc.ErrorIsNil) + m, err = ctx.ReadSettings("u/1") + c.Assert(err, jc.ErrorIsNil) + c.Assert(m, gc.DeepEquals, expectSettings) +} + +func (s *ContextRelationSuite) TestNonMemberCaching(c *gc.C) { + unit, err := s.svc.AddUnit() + c.Assert(err, jc.ErrorIsNil) + ru, err := s.rel.Unit(unit) + c.Assert(err, jc.ErrorIsNil) + err = ru.EnterScope(map[string]interface{}{"blib": "blob"}) + c.Assert(err, jc.ErrorIsNil) + settings, err := ru.Settings() + c.Assert(err, jc.ErrorIsNil) + settings.Set("ping", "pong") + _, err = settings.Write() + c.Assert(err, jc.ErrorIsNil) + + cache := context.NewRelationCache(s.apiRelUnit.ReadSettings, nil) + ctx := context.NewContextRelation(s.apiRelUnit, cache) + + // Check that settings are read from state. + m, err := ctx.ReadSettings("u/1") + c.Assert(err, jc.ErrorIsNil) + expectMap := settings.Map() + expectSettings := convertMap(expectMap) + c.Assert(m, gc.DeepEquals, expectSettings) + + // Check that changes to state do not affect the obtained settings. + settings.Set("ping", "pow") + _, err = settings.Write() + c.Assert(err, jc.ErrorIsNil) + m, err = ctx.ReadSettings("u/1") + c.Assert(err, jc.ErrorIsNil) + c.Assert(m, gc.DeepEquals, expectSettings) +} + +func (s *ContextRelationSuite) TestLocalSettings(c *gc.C) { + ctx := context.NewContextRelation(s.apiRelUnit, nil) + + // Change Settings... + node, err := ctx.Settings() + c.Assert(err, jc.ErrorIsNil) + expectSettings := node.Map() + expectOldMap := convertSettings(expectSettings) + node.Set("change", "exciting") + + // ...and check it's not written to state. + settings, err := s.ru.ReadSettings("u/0") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, gc.DeepEquals, expectOldMap) + + // Write settings... + err = ctx.WriteSettings() + c.Assert(err, jc.ErrorIsNil) + + // ...and check it was written to state. + settings, err = s.ru.ReadSettings("u/0") + c.Assert(err, jc.ErrorIsNil) + c.Assert(settings, gc.DeepEquals, map[string]interface{}{"change": "exciting"}) +} + +func convertSettings(settings params.Settings) map[string]interface{} { + result := make(map[string]interface{}) + for k, v := range settings { + result[k] = v + } + return result +} + +func convertMap(settingsMap map[string]interface{}) params.Settings { + result := make(params.Settings) + for k, v := range settingsMap { + result[k] = v.(string) + } + return result +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/unitStorage_test.go' --- src/github.com/juju/juju/worker/uniter/runner/context/unitStorage_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/unitStorage_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,238 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context_test + +import ( + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + "github.com/juju/utils/set" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/provider/ec2" + "github.com/juju/juju/state" + "github.com/juju/juju/storage/poolmanager" + "github.com/juju/juju/storage/provider" + "github.com/juju/juju/storage/provider/registry" + "github.com/juju/juju/worker/uniter/runner/context" +) + +type unitStorageSuite struct { + HookContextSuite + expectedStorageNames set.Strings + charmName string + initCons map[string]state.StorageConstraints + ch *state.Charm + initialStorageInstancesCount int +} + +var _ = gc.Suite(&unitStorageSuite{}) + +const ( + testPool = "block" + testPersistentPool = "block-persistent" +) + +func (s *unitStorageSuite) SetUpTest(c *gc.C) { + s.HookContextSuite.SetUpTest(c) + setupTestStorageSupport(c, s.State) +} + +func (s *unitStorageSuite) TestAddUnitStorage(c *gc.C) { + s.createStorageBlockUnit(c) + count := uint64(1) + s.assertUnitStorageAdded(c, + map[string]params.StorageConstraints{ + "allecto": params.StorageConstraints{Count: &count}}) +} + +func (s *unitStorageSuite) TestAddUnitStorageIgnoresBlocks(c *gc.C) { + s.createStorageBlockUnit(c) + count := uint64(1) + s.BlockDestroyModel(c, "TestAddUnitStorageIgnoresBlocks") + s.BlockRemoveObject(c, "TestAddUnitStorageIgnoresBlocks") + s.BlockAllChanges(c, "TestAddUnitStorageIgnoresBlocks") + s.assertUnitStorageAdded(c, + map[string]params.StorageConstraints{ + "allecto": params.StorageConstraints{Count: &count}}) +} + +func (s *unitStorageSuite) TestAddUnitStorageZeroCount(c *gc.C) { + s.createStorageBlockUnit(c) + cons := map[string]params.StorageConstraints{ + "allecto": params.StorageConstraints{}} + + ctx := s.addUnitStorage(c, cons) + + // Flush the context with a success. + err := ctx.Flush("success", nil) + c.Assert(err, gc.ErrorMatches, `.*count must be specified.*`) + + // Make sure no storage instances was added + after, err := s.State.AllStorageInstances() + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(after)-s.initialStorageInstancesCount, gc.Equals, 0) + s.assertExistingStorage(c, after) +} + +func (s *unitStorageSuite) TestAddUnitStorageWithSize(c *gc.C) { + s.createStorageBlockUnit(c) + size := uint64(1) + cons := map[string]params.StorageConstraints{ + "allecto": params.StorageConstraints{Size: &size}} + + ctx := s.addUnitStorage(c, cons) + + // Flush the context with a success. + err := ctx.Flush("success", nil) + c.Assert(err, gc.ErrorMatches, `.*only count can be specified.*`) + + // Make sure no storage instances was added + after, err := s.State.AllStorageInstances() + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(after)-s.initialStorageInstancesCount, gc.Equals, 0) + s.assertExistingStorage(c, after) +} + +func (s *unitStorageSuite) TestAddUnitStorageWithPool(c *gc.C) { + s.createStorageBlockUnit(c) + cons := map[string]params.StorageConstraints{ + "allecto": params.StorageConstraints{Pool: "loop"}} + + ctx := s.addUnitStorage(c, cons) + + // Flush the context with a success. + err := ctx.Flush("success", nil) + c.Assert(err, gc.ErrorMatches, `.*only count can be specified.*`) + + // Make sure no storage instances was added + after, err := s.State.AllStorageInstances() + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(after)-s.initialStorageInstancesCount, gc.Equals, 0) + s.assertExistingStorage(c, after) +} + +func (s *unitStorageSuite) TestAddUnitStorageAccumulated(c *gc.C) { + s.createStorageBlock2Unit(c) + count := uint64(1) + s.assertUnitStorageAdded(c, + map[string]params.StorageConstraints{ + "multi2up": params.StorageConstraints{Count: &count}}, + map[string]params.StorageConstraints{ + "multi1to10": params.StorageConstraints{Count: &count}}) +} + +func (s *unitStorageSuite) TestAddUnitStorageAccumulatedSame(c *gc.C) { + s.createStorageBlock2Unit(c) + count := uint64(1) + s.assertUnitStorageAdded(c, + map[string]params.StorageConstraints{ + "multi2up": params.StorageConstraints{Count: &count}}, + map[string]params.StorageConstraints{ + "multi2up": params.StorageConstraints{Count: &count}}) +} + +func setupTestStorageSupport(c *gc.C, s *state.State) { + stsetts := state.NewStateSettings(s) + poolManager := poolmanager.New(stsetts) + _, err := poolManager.Create(testPool, provider.LoopProviderType, map[string]interface{}{"it": "works"}) + c.Assert(err, jc.ErrorIsNil) + _, err = poolManager.Create(testPersistentPool, ec2.EBS_ProviderType, map[string]interface{}{"persistent": true}) + c.Assert(err, jc.ErrorIsNil) + + registry.RegisterEnvironStorageProviders("dummy", ec2.EBS_ProviderType) + registry.RegisterEnvironStorageProviders("dummymodel", ec2.EBS_ProviderType) +} + +func (s *unitStorageSuite) createStorageEnabledUnit(c *gc.C) { + s.ch = s.AddTestingCharm(c, s.charmName) + s.service = s.AddTestingServiceWithStorage(c, s.charmName, s.ch, s.initCons) + s.unit = s.AddUnit(c, s.service) + + s.assertStorageCreated(c) + s.createHookSupport(c) +} + +func (s *unitStorageSuite) createStorageBlockUnit(c *gc.C) { + s.charmName = "storage-block" + s.initCons = map[string]state.StorageConstraints{ + "data": makeStorageCons("block", 1024, 1), + } + s.createStorageEnabledUnit(c) + s.assertStorageCreated(c) + s.createHookSupport(c) +} + +func (s *unitStorageSuite) createStorageBlock2Unit(c *gc.C) { + s.charmName = "storage-block2" + s.initCons = map[string]state.StorageConstraints{ + "multi1to10": makeStorageCons("loop", 0, 3), + } + s.createStorageEnabledUnit(c) + s.assertStorageCreated(c) + s.createHookSupport(c) +} + +func (s *unitStorageSuite) assertStorageCreated(c *gc.C) { + all, err := s.State.AllStorageInstances() + c.Assert(err, jc.ErrorIsNil) + s.initialStorageInstancesCount = len(all) + s.expectedStorageNames = set.NewStrings() + for _, one := range all { + s.expectedStorageNames.Add(one.StorageName()) + } +} + +func (s *unitStorageSuite) createHookSupport(c *gc.C) { + password, err := utils.RandomPassword() + err = s.unit.SetPassword(password) + c.Assert(err, jc.ErrorIsNil) + s.st = s.OpenAPIAs(c, s.unit.Tag(), password) + s.uniter, err = s.st.Uniter() + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.uniter, gc.NotNil) + s.apiUnit, err = s.uniter.Unit(s.unit.Tag().(names.UnitTag)) + c.Assert(err, jc.ErrorIsNil) + + err = s.unit.SetCharmURL(s.ch.URL()) + c.Assert(err, jc.ErrorIsNil) +} + +func makeStorageCons(pool string, size, count uint64) state.StorageConstraints { + return state.StorageConstraints{Pool: pool, Size: size, Count: count} +} + +func (s *unitStorageSuite) addUnitStorage(c *gc.C, cons ...map[string]params.StorageConstraints) *context.HookContext { + // Get the context. + ctx := s.getHookContext(c, s.State.ModelUUID(), -1, "", noProxies) + c.Assert(ctx.UnitName(), gc.Equals, s.unit.Name()) + + for _, one := range cons { + for storage, _ := range one { + s.expectedStorageNames.Add(storage) + } + ctx.AddUnitStorage(one) + } + return ctx +} + +func (s *unitStorageSuite) assertUnitStorageAdded(c *gc.C, cons ...map[string]params.StorageConstraints) { + ctx := s.addUnitStorage(c, cons...) + + // Flush the context with a success. + err := ctx.Flush("success", nil) + c.Assert(err, jc.ErrorIsNil) + + after, err := s.State.AllStorageInstances() + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(after)-s.initialStorageInstancesCount, gc.Equals, len(cons)) + s.assertExistingStorage(c, after) +} + +func (s *unitStorageSuite) assertExistingStorage(c *gc.C, all []state.StorageInstance) { + for _, one := range all { + c.Assert(s.expectedStorageNames.Contains(one.StorageName()), jc.IsTrue) + } +} === added file 'src/github.com/juju/juju/worker/uniter/runner/context/util_test.go' --- src/github.com/juju/juju/worker/uniter/runner/context/util_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context/util_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,400 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package context_test + +import ( + "path/filepath" + "strings" + "time" + + "github.com/juju/names" + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + "github.com/juju/utils/proxy" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/api" + "github.com/juju/juju/api/block" + "github.com/juju/juju/api/uniter" + "github.com/juju/juju/instance" + "github.com/juju/juju/juju/testing" + "github.com/juju/juju/network" + "github.com/juju/juju/state" + "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/storage" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker/uniter/runner/context" + "github.com/juju/juju/worker/uniter/runner/jujuc" + runnertesting "github.com/juju/juju/worker/uniter/runner/testing" +) + +var noProxies = proxy.Settings{} +var apiAddrs = []string{"a1:123", "a2:123"} +var expectedApiAddrs = strings.Join(apiAddrs, " ") + +// HookContextSuite contains shared setup for various other test suites. Test +// methods should not be added to this type, because they'll get run repeatedly. +type HookContextSuite struct { + testing.JujuConnSuite + service *state.Service + unit *state.Unit + machine *state.Machine + relch *state.Charm + relunits map[int]*state.RelationUnit + storage *runnertesting.StorageContextAccessor + clock *coretesting.Clock + + st api.Connection + uniter *uniter.State + apiUnit *uniter.Unit + meteredApiUnit *uniter.Unit + meteredCharm *state.Charm + apiRelunits map[int]*uniter.RelationUnit + BlockHelper +} + +func (s *HookContextSuite) SetUpTest(c *gc.C) { + var err error + s.JujuConnSuite.SetUpTest(c) + s.BlockHelper = NewBlockHelper(s.APIState) + c.Assert(s.BlockHelper, gc.NotNil) + s.AddCleanup(func(*gc.C) { s.BlockHelper.Close() }) + + // reset + s.machine = nil + + sch := s.AddTestingCharm(c, "wordpress") + s.service = s.AddTestingService(c, "u", sch) + s.unit = s.AddUnit(c, s.service) + + s.meteredCharm = s.AddTestingCharm(c, "metered") + meteredService := s.AddTestingService(c, "m", s.meteredCharm) + meteredUnit := s.addUnit(c, meteredService) + err = meteredUnit.SetCharmURL(s.meteredCharm.URL()) + c.Assert(err, jc.ErrorIsNil) + + password, err := utils.RandomPassword() + err = s.unit.SetPassword(password) + c.Assert(err, jc.ErrorIsNil) + s.st = s.OpenAPIAs(c, s.unit.Tag(), password) + s.uniter, err = s.st.Uniter() + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.uniter, gc.NotNil) + s.apiUnit, err = s.uniter.Unit(s.unit.Tag().(names.UnitTag)) + c.Assert(err, jc.ErrorIsNil) + + err = meteredUnit.SetPassword(password) + c.Assert(err, jc.ErrorIsNil) + meteredState := s.OpenAPIAs(c, meteredUnit.Tag(), password) + meteredUniter, err := meteredState.Uniter() + s.meteredApiUnit, err = meteredUniter.Unit(meteredUnit.Tag().(names.UnitTag)) + c.Assert(err, jc.ErrorIsNil) + + // Note: The unit must always have a charm URL set, because this + // happens as part of the installation process (that happens + // before the initial install hook). + err = s.unit.SetCharmURL(sch.URL()) + c.Assert(err, jc.ErrorIsNil) + s.relch = s.AddTestingCharm(c, "mysql") + s.relunits = map[int]*state.RelationUnit{} + s.apiRelunits = map[int]*uniter.RelationUnit{} + s.AddContextRelation(c, "db0") + s.AddContextRelation(c, "db1") + + storageData0 := names.NewStorageTag("data/0") + s.storage = &runnertesting.StorageContextAccessor{ + map[names.StorageTag]*runnertesting.ContextStorage{ + storageData0: &runnertesting.ContextStorage{ + storageData0, + storage.StorageKindBlock, + "/dev/sdb", + }, + }, + } + + s.clock = coretesting.NewClock(time.Time{}) +} + +func (s *HookContextSuite) GetContext( + c *gc.C, relId int, remoteName string, +) jujuc.Context { + uuid, err := utils.NewUUID() + c.Assert(err, jc.ErrorIsNil) + return s.getHookContext( + c, uuid.String(), relId, remoteName, noProxies, + ) +} + +func (s *HookContextSuite) addUnit(c *gc.C, svc *state.Service) *state.Unit { + unit, err := svc.AddUnit() + c.Assert(err, jc.ErrorIsNil) + if s.machine != nil { + err = unit.AssignToMachine(s.machine) + c.Assert(err, jc.ErrorIsNil) + return unit + } + + err = s.State.AssignUnit(unit, state.AssignCleanEmpty) + c.Assert(err, jc.ErrorIsNil) + machineId, err := unit.AssignedMachineId() + c.Assert(err, jc.ErrorIsNil) + s.machine, err = s.State.Machine(machineId) + c.Assert(err, jc.ErrorIsNil) + zone := "a-zone" + hwc := instance.HardwareCharacteristics{ + AvailabilityZone: &zone, + } + err = s.machine.SetProvisioned("i-exist", "fake_nonce", &hwc) + c.Assert(err, jc.ErrorIsNil) + return unit +} + +func (s *HookContextSuite) AddUnit(c *gc.C, svc *state.Service) *state.Unit { + unit := s.addUnit(c, svc) + name := strings.Replace(unit.Name(), "/", "-", 1) + privateAddr := network.NewScopedAddress(name+".testing.invalid", network.ScopeCloudLocal) + err := s.machine.SetProviderAddresses(privateAddr) + c.Assert(err, jc.ErrorIsNil) + return unit +} + +func (s *HookContextSuite) AddContextRelation(c *gc.C, name string) { + s.AddTestingService(c, name, s.relch) + eps, err := s.State.InferEndpoints("u", name) + c.Assert(err, jc.ErrorIsNil) + rel, err := s.State.AddRelation(eps...) + c.Assert(err, jc.ErrorIsNil) + ru, err := rel.Unit(s.unit) + c.Assert(err, jc.ErrorIsNil) + err = ru.EnterScope(map[string]interface{}{"relation-name": name}) + c.Assert(err, jc.ErrorIsNil) + s.relunits[rel.Id()] = ru + apiRel, err := s.uniter.Relation(rel.Tag().(names.RelationTag)) + c.Assert(err, jc.ErrorIsNil) + apiRelUnit, err := apiRel.Unit(s.apiUnit) + c.Assert(err, jc.ErrorIsNil) + s.apiRelunits[rel.Id()] = apiRelUnit +} + +func (s *HookContextSuite) getHookContext(c *gc.C, uuid string, relid int, + remote string, proxies proxy.Settings) *context.HookContext { + if relid != -1 { + _, found := s.apiRelunits[relid] + c.Assert(found, jc.IsTrue) + } + facade, err := s.st.Uniter() + c.Assert(err, jc.ErrorIsNil) + + relctxs := map[int]*context.ContextRelation{} + for relId, relUnit := range s.apiRelunits { + cache := context.NewRelationCache(relUnit.ReadSettings, nil) + relctxs[relId] = context.NewContextRelation(relUnit, cache) + } + + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + + context, err := context.NewHookContext(s.apiUnit, facade, "TestCtx", uuid, + env.Name(), relid, remote, relctxs, apiAddrs, + proxies, false, nil, nil, s.machine.Tag().(names.MachineTag), + runnertesting.NewRealPaths(c), s.clock) + c.Assert(err, jc.ErrorIsNil) + return context +} + +func (s *HookContextSuite) getMeteredHookContext(c *gc.C, uuid string, relid int, + remote string, proxies proxy.Settings, canAddMetrics bool, metrics *charm.Metrics, paths runnertesting.RealPaths) *context.HookContext { + if relid != -1 { + _, found := s.apiRelunits[relid] + c.Assert(found, jc.IsTrue) + } + facade, err := s.st.Uniter() + c.Assert(err, jc.ErrorIsNil) + + relctxs := map[int]*context.ContextRelation{} + for relId, relUnit := range s.apiRelunits { + cache := context.NewRelationCache(relUnit.ReadSettings, nil) + relctxs[relId] = context.NewContextRelation(relUnit, cache) + } + + context, err := context.NewHookContext(s.meteredApiUnit, facade, "TestCtx", uuid, + "test-model-name", relid, remote, relctxs, apiAddrs, + proxies, canAddMetrics, metrics, nil, s.machine.Tag().(names.MachineTag), + paths, s.clock) + c.Assert(err, jc.ErrorIsNil) + return context +} + +func (s *HookContextSuite) metricsDefinition(name string) *charm.Metrics { + return &charm.Metrics{Metrics: map[string]charm.Metric{name: {Type: charm.MetricTypeGauge, Description: "generated metric"}}} +} + +func (s *HookContextSuite) AssertCoreContext(c *gc.C, ctx *context.HookContext) { + c.Assert(ctx.UnitName(), gc.Equals, "u/0") + c.Assert(context.ContextMachineTag(ctx), jc.DeepEquals, names.NewMachineTag("0")) + + expect, expectErr := s.unit.PrivateAddress() + actual, actualErr := ctx.PrivateAddress() + c.Assert(actual, gc.Equals, expect.Value) + c.Assert(actualErr, jc.DeepEquals, expectErr) + + expect, expectErr = s.unit.PublicAddress() + actual, actualErr = ctx.PublicAddress() + c.Assert(actual, gc.Equals, expect.Value) + c.Assert(actualErr, jc.DeepEquals, expectErr) + + env, err := s.State.Model() + c.Assert(err, jc.ErrorIsNil) + name, uuid := context.ContextEnvInfo(ctx) + c.Assert(name, gc.Equals, env.Name()) + c.Assert(uuid, gc.Equals, env.UUID()) + + ids, err := ctx.RelationIds() + c.Assert(err, jc.ErrorIsNil) + c.Assert(ids, gc.HasLen, 2) + + r, err := ctx.Relation(0) + c.Assert(err, jc.ErrorIsNil) + c.Assert(r.Name(), gc.Equals, "db") + c.Assert(r.FakeId(), gc.Equals, "db:0") + + r, err = ctx.Relation(1) + c.Assert(err, jc.ErrorIsNil) + c.Assert(r.Name(), gc.Equals, "db") + c.Assert(r.FakeId(), gc.Equals, "db:1") + + az, err := ctx.AvailabilityZone() + c.Assert(err, jc.ErrorIsNil) + c.Assert(az, gc.Equals, "a-zone") +} + +func (s *HookContextSuite) AssertNotActionContext(c *gc.C, ctx *context.HookContext) { + actionData, err := ctx.ActionData() + c.Assert(actionData, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "not running an action") +} + +func (s *HookContextSuite) AssertActionContext(c *gc.C, ctx *context.HookContext) { + actionData, err := ctx.ActionData() + c.Assert(actionData, gc.NotNil) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *HookContextSuite) AssertNotStorageContext(c *gc.C, ctx *context.HookContext) { + storageAttachment, err := ctx.HookStorage() + c.Assert(storageAttachment, gc.IsNil) + c.Assert(err, gc.ErrorMatches, ".*") +} + +func (s *HookContextSuite) AssertStorageContext(c *gc.C, ctx *context.HookContext, id string, attachment storage.StorageAttachmentInfo) { + fromCache, err := ctx.HookStorage() + c.Assert(err, jc.ErrorIsNil) + c.Assert(fromCache, gc.NotNil) + c.Assert(fromCache.Tag().Id(), gc.Equals, id) + c.Assert(fromCache.Kind(), gc.Equals, attachment.Kind) + c.Assert(fromCache.Location(), gc.Equals, attachment.Location) +} + +func (s *HookContextSuite) AssertRelationContext(c *gc.C, ctx *context.HookContext, relId int, remoteUnit string) *context.ContextRelation { + actualRemoteUnit, _ := ctx.RemoteUnitName() + c.Assert(actualRemoteUnit, gc.Equals, remoteUnit) + rel, err := ctx.HookRelation() + c.Assert(err, jc.ErrorIsNil) + c.Assert(rel.Id(), gc.Equals, relId) + return rel.(*context.ContextRelation) +} + +func (s *HookContextSuite) AssertNotRelationContext(c *gc.C, ctx *context.HookContext) { + rel, err := ctx.HookRelation() + c.Assert(rel, gc.IsNil) + c.Assert(err, gc.ErrorMatches, ".*") +} + +type BlockHelper struct { + blockClient *block.Client +} + +// NewBlockHelper creates a block switch used in testing +// to manage desired juju blocks. +func NewBlockHelper(st api.Connection) BlockHelper { + return BlockHelper{ + blockClient: block.NewClient(st), + } +} + +// on switches on desired block and +// asserts that no errors were encountered. +func (s *BlockHelper) on(c *gc.C, blockType multiwatcher.BlockType, msg string) { + c.Assert(s.blockClient.SwitchBlockOn(string(blockType), msg), gc.IsNil) +} + +// BlockAllChanges switches changes block on. +// This prevents all changes to juju environment. +func (s *BlockHelper) BlockAllChanges(c *gc.C, msg string) { + s.on(c, multiwatcher.BlockChange, msg) +} + +// BlockRemoveObject switches remove block on. +// This prevents any object/entity removal on juju environment +func (s *BlockHelper) BlockRemoveObject(c *gc.C, msg string) { + s.on(c, multiwatcher.BlockRemove, msg) +} + +// BlockDestroyModel switches destroy block on. +// This prevents juju environment destruction. +func (s *BlockHelper) BlockDestroyModel(c *gc.C, msg string) { + s.on(c, multiwatcher.BlockDestroy, msg) +} + +func (s *BlockHelper) Close() { + s.blockClient.Close() +} + +// StubMetricsRecorder implements the MetricsRecorder interface. +type StubMetricsRecorder struct { + *jujutesting.Stub +} + +// AddMetric implements the MetricsRecorder interface. +func (s StubMetricsRecorder) AddMetric(key, value string, created time.Time) error { + s.AddCall("AddMetric", key, value, created) + return nil +} + +func (mr *StubMetricsRecorder) IsDeclaredMetric(key string) bool { + mr.MethodCall(mr, "IsDeclaredMetric", key) + return true +} + +// Close implements the MetricsRecorder interface. +func (s StubMetricsRecorder) Close() error { + s.AddCall("Close") + return nil +} + +// MockEnvPaths implements Paths for tests that don't need to actually touch +// the filesystem. +type MockEnvPaths struct{} + +func (MockEnvPaths) GetToolsDir() string { + return "path-to-tools" +} + +func (MockEnvPaths) GetCharmDir() string { + return "path-to-charm" +} + +func (MockEnvPaths) GetJujucSocket() string { + return "path-to-jujuc.socket" +} + +func (MockEnvPaths) GetMetricsSpoolDir() string { + return "path-to-metrics-spool-dir" +} + +func (MockEnvPaths) ComponentDir(name string) string { + return filepath.Join("path-to-base-dir", name) +} === removed file 'src/github.com/juju/juju/worker/uniter/runner/context_test.go' --- src/github.com/juju/juju/worker/uniter/runner/context_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/context_test.go 1970-01-01 00:00:00 +0000 @@ -1,406 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package runner_test - -import ( - "os" - "runtime" - "syscall" - "time" - - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - "github.com/juju/utils/exec" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/network" - "github.com/juju/juju/state" - "github.com/juju/juju/worker/uniter/runner" - "github.com/juju/juju/worker/uniter/runner/jujuc" -) - -type InterfaceSuite struct { - HookContextSuite - stub testing.Stub -} - -var _ = gc.Suite(&InterfaceSuite{}) - -func (s *InterfaceSuite) TestUnitName(c *gc.C) { - ctx := s.GetContext(c, -1, "") - c.Assert(ctx.UnitName(), gc.Equals, "u/0") -} - -func (s *InterfaceSuite) TestHookRelation(c *gc.C) { - ctx := s.GetContext(c, -1, "") - r, ok := ctx.HookRelation() - c.Assert(ok, jc.IsFalse) - c.Assert(r, gc.IsNil) -} - -func (s *InterfaceSuite) TestRemoteUnitName(c *gc.C) { - ctx := s.GetContext(c, -1, "") - name, found := ctx.RemoteUnitName() - c.Assert(found, jc.IsFalse) - c.Assert(name, gc.Equals, "") -} - -func (s *InterfaceSuite) TestRelationIds(c *gc.C) { - ctx := s.GetContext(c, -1, "") - relIds := ctx.RelationIds() - c.Assert(relIds, gc.HasLen, 2) - r, found := ctx.Relation(0) - c.Assert(found, jc.IsTrue) - c.Assert(r.Name(), gc.Equals, "db") - c.Assert(r.FakeId(), gc.Equals, "db:0") - r, found = ctx.Relation(123) - c.Assert(found, jc.IsFalse) - c.Assert(r, gc.IsNil) -} - -func (s *InterfaceSuite) TestRelationContext(c *gc.C) { - ctx := s.GetContext(c, 1, "") - r, ok := ctx.HookRelation() - c.Assert(ok, jc.IsTrue) - c.Assert(r.Name(), gc.Equals, "db") - c.Assert(r.FakeId(), gc.Equals, "db:1") -} - -func (s *InterfaceSuite) TestRelationContextWithRemoteUnitName(c *gc.C) { - ctx := s.GetContext(c, 1, "u/123") - name, found := ctx.RemoteUnitName() - c.Assert(found, jc.IsTrue) - c.Assert(name, gc.Equals, "u/123") -} - -func (s *InterfaceSuite) TestAddingMetricsWhenNotEnabledFails(c *gc.C) { - ctx := s.GetContext(c, 1, "u/123") - err := ctx.AddMetric("key", "123", time.Now()) - c.Assert(err, gc.ErrorMatches, "metrics disabled") -} - -func (s *InterfaceSuite) TestAddingMetrics(c *gc.C) { - uuid := utils.MustNewUUID() - ctx := s.getMeteredHookContext(c, uuid.String(), -1, "", noProxies, true, s.metricsDefinition("key"), NewRealPaths(c)) - cleanup := runner.PatchMetricsRecorder(ctx, &StubMetricsRecorder{&s.stub}) - defer cleanup() - - now := time.Now() - err := ctx.AddMetric("key", "123", now) - c.Assert(err, jc.ErrorIsNil) - - s.stub.CheckCalls(c, - []testing.StubCall{{ - FuncName: "AddMetric", - Args: []interface{}{"key", "123", now}, - }}) -} - -func (s *InterfaceSuite) TestAvailabilityZone(c *gc.C) { - ctx := s.GetContext(c, -1, "") - zone, ok := ctx.AvailabilityZone() - c.Check(ok, jc.IsTrue) - c.Check(zone, gc.Equals, "a-zone") -} - -func (s *InterfaceSuite) TestUnitStatus(c *gc.C) { - ctx := s.GetContext(c, -1, "") - defer runner.PatchCachedStatus(ctx.(runner.Context), "maintenance", "working", map[string]interface{}{"hello": "world"})() - status, err := ctx.UnitStatus() - c.Check(err, jc.ErrorIsNil) - c.Check(status.Status, gc.Equals, "maintenance") - c.Check(status.Info, gc.Equals, "working") - c.Check(status.Data, gc.DeepEquals, map[string]interface{}{"hello": "world"}) -} - -func (s *InterfaceSuite) TestSetUnitStatus(c *gc.C) { - ctx := s.GetContext(c, -1, "") - status := jujuc.StatusInfo{ - Status: "maintenance", - Info: "doing work", - } - err := ctx.SetUnitStatus(status) - c.Check(err, jc.ErrorIsNil) - unitStatus, err := ctx.UnitStatus() - c.Check(err, jc.ErrorIsNil) - c.Check(unitStatus.Status, gc.Equals, "maintenance") - c.Check(unitStatus.Info, gc.Equals, "doing work") - c.Check(unitStatus.Data, gc.DeepEquals, map[string]interface{}{}) -} - -func (s *InterfaceSuite) TestSetUnitStatusUpdatesFlag(c *gc.C) { - ctx := s.GetContext(c, -1, "") - c.Assert(ctx.(runner.Context).HasExecutionSetUnitStatus(), jc.IsFalse) - status := jujuc.StatusInfo{ - Status: "maintenance", - Info: "doing work", - } - err := ctx.SetUnitStatus(status) - c.Check(err, jc.ErrorIsNil) - c.Assert(ctx.(runner.Context).HasExecutionSetUnitStatus(), jc.IsTrue) -} - -func (s *InterfaceSuite) TestUnitStatusCaching(c *gc.C) { - ctx := s.GetContext(c, -1, "") - status, err := ctx.UnitStatus() - c.Check(err, jc.ErrorIsNil) - c.Check(status.Status, gc.Equals, "unknown") - c.Check(status.Data, gc.DeepEquals, map[string]interface{}{}) - - // Change remote state. - err = s.unit.SetStatus(state.StatusActive, "it works", nil) - c.Assert(err, jc.ErrorIsNil) - - // Local view is unchanged. - status, err = ctx.UnitStatus() - c.Check(err, jc.ErrorIsNil) - c.Check(status.Status, gc.Equals, "unknown") - c.Check(status.Data, gc.DeepEquals, map[string]interface{}{}) -} - -func (s *InterfaceSuite) TestUnitCaching(c *gc.C) { - ctx := s.GetContext(c, -1, "") - pr, ok := ctx.PrivateAddress() - c.Assert(ok, jc.IsTrue) - c.Assert(pr, gc.Equals, "u-0.testing.invalid") - pa, ok := ctx.PublicAddress() - c.Assert(ok, jc.IsTrue) - // Initially the public address is the same as the private address since - // the "most public" address is chosen. - c.Assert(pr, gc.Equals, pa) - - // Change remote state. - err := s.machine.SetProviderAddresses( - network.NewScopedAddress("blah.testing.invalid", network.ScopePublic), - ) - c.Assert(err, jc.ErrorIsNil) - - // Local view is unchanged. - pr, ok = ctx.PrivateAddress() - c.Assert(ok, jc.IsTrue) - c.Assert(pr, gc.Equals, "u-0.testing.invalid") - pa, ok = ctx.PublicAddress() - c.Assert(ok, jc.IsTrue) - c.Assert(pr, gc.Equals, pa) -} - -func (s *InterfaceSuite) TestConfigCaching(c *gc.C) { - ctx := s.GetContext(c, -1, "") - settings, err := ctx.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, charm.Settings{"blog-title": "My Title"}) - - // Change remote config. - err = s.service.UpdateConfigSettings(charm.Settings{ - "blog-title": "Something Else", - }) - c.Assert(err, jc.ErrorIsNil) - - // Local view is not changed. - settings, err = ctx.ConfigSettings() - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, charm.Settings{"blog-title": "My Title"}) -} - -// TestNonActionCallsToActionMethodsFail does exactly what its name says: -// it simply makes sure that Action-related calls to HookContexts with a nil -// actionData member error out correctly. -func (s *InterfaceSuite) TestNonActionCallsToActionMethodsFail(c *gc.C) { - ctx := runner.HookContext{} - _, err := ctx.ActionParams() - c.Check(err, gc.ErrorMatches, "not running an action") - err = ctx.SetActionFailed() - c.Check(err, gc.ErrorMatches, "not running an action") - err = ctx.SetActionMessage("foo") - c.Check(err, gc.ErrorMatches, "not running an action") - err = ctx.UpdateActionResults([]string{"1", "2", "3"}, "value") - c.Check(err, gc.ErrorMatches, "not running an action") -} - -// TestUpdateActionResults demonstrates that UpdateActionResults functions -// as expected. -func (s *InterfaceSuite) TestUpdateActionResults(c *gc.C) { - tests := []struct { - initial map[string]interface{} - keys []string - value string - expected map[string]interface{} - }{{ - initial: map[string]interface{}{}, - keys: []string{"foo"}, - value: "bar", - expected: map[string]interface{}{ - "foo": "bar", - }, - }, { - initial: map[string]interface{}{ - "foo": "bar", - }, - keys: []string{"foo", "bar"}, - value: "baz", - expected: map[string]interface{}{ - "foo": map[string]interface{}{ - "bar": "baz", - }, - }, - }, { - initial: map[string]interface{}{ - "foo": map[string]interface{}{ - "bar": "baz", - }, - }, - keys: []string{"foo"}, - value: "bar", - expected: map[string]interface{}{ - "foo": "bar", - }, - }} - - for i, t := range tests { - c.Logf("UpdateActionResults test %d: %#v: %#v", i, t.keys, t.value) - hctx := runner.GetStubActionContext(t.initial) - err := hctx.UpdateActionResults(t.keys, t.value) - c.Assert(err, jc.ErrorIsNil) - actionData, err := hctx.ActionData() - c.Assert(err, jc.ErrorIsNil) - c.Assert(actionData.ResultsMap, jc.DeepEquals, t.expected) - } -} - -// TestSetActionFailed ensures SetActionFailed works properly. -func (s *InterfaceSuite) TestSetActionFailed(c *gc.C) { - hctx := runner.GetStubActionContext(nil) - err := hctx.SetActionFailed() - c.Assert(err, jc.ErrorIsNil) - actionData, err := hctx.ActionData() - c.Assert(err, jc.ErrorIsNil) - c.Check(actionData.Failed, jc.IsTrue) -} - -// TestSetActionMessage ensures SetActionMessage works properly. -func (s *InterfaceSuite) TestSetActionMessage(c *gc.C) { - hctx := runner.GetStubActionContext(nil) - err := hctx.SetActionMessage("because reasons") - c.Assert(err, jc.ErrorIsNil) - actionData, err := hctx.ActionData() - c.Check(err, jc.ErrorIsNil) - c.Check(actionData.ResultsMessage, gc.Equals, "because reasons") -} - -func (s *InterfaceSuite) startProcess(c *gc.C) *os.Process { - command := exec.RunParams{ - Commands: "trap 'exit 0' SIGTERM; while true;do sleep 1;done", - } - err := command.Run() - c.Assert(err, jc.ErrorIsNil) - p := command.Process() - s.AddCleanup(func(c *gc.C) { p.Kill() }) - return p -} - -func (s *InterfaceSuite) TestRequestRebootAfterHook(c *gc.C) { - if runtime.GOOS == "windows" { - c.Skip("bug 1403084: Cannot send sigterm on windows") - } - ctx := runner.HookContext{} - p := s.startProcess(c) - ctx.SetProcess(p) - err := ctx.RequestReboot(jujuc.RebootAfterHook) - c.Assert(err, jc.ErrorIsNil) - err = p.Signal(syscall.SIGTERM) - c.Assert(err, jc.ErrorIsNil) - _, err = p.Wait() - c.Assert(err, jc.ErrorIsNil) - priority := ctx.GetRebootPriority() - c.Assert(priority, gc.Equals, jujuc.RebootAfterHook) -} - -func (s *InterfaceSuite) TestRequestRebootNow(c *gc.C) { - ctx := runner.HookContext{} - p := s.startProcess(c) - ctx.SetProcess(p) - go func() { - _, err := p.Wait() - c.Assert(err, jc.ErrorIsNil) - }() - err := ctx.RequestReboot(jujuc.RebootNow) - c.Assert(err, jc.ErrorIsNil) - priority := ctx.GetRebootPriority() - c.Assert(priority, gc.Equals, jujuc.RebootNow) -} - -func (s *InterfaceSuite) TestRequestRebootNowNoProcess(c *gc.C) { - // A normal hook run or a juju-run command will record the *os.Process - // object of the running command, in HookContext. When requesting a - // reboot with the --now flag, the process is killed and only - // then will we set the reboot priority. This test basically simulates - // the case when the process calling juju-reboot is not recorded. - ctx := runner.HookContext{} - err := ctx.RequestReboot(jujuc.RebootNow) - c.Assert(err, gc.ErrorMatches, "no process to kill") - priority := ctx.GetRebootPriority() - c.Assert(priority, gc.Equals, jujuc.RebootNow) -} - -func (s *InterfaceSuite) TestStorageAddConstraints(c *gc.C) { - expected := map[string][]params.StorageConstraints{ - "data": []params.StorageConstraints{ - params.StorageConstraints{}, - }, - } - - ctx := runner.HookContext{} - addStorageToContext(&ctx, "data", params.StorageConstraints{}) - assertStorageAddInContext(c, ctx, expected) -} - -var two = uint64(2) - -func (s *InterfaceSuite) TestStorageAddConstraintsSameStorage(c *gc.C) { - expected := map[string][]params.StorageConstraints{ - "data": []params.StorageConstraints{ - params.StorageConstraints{}, - params.StorageConstraints{Count: &two}, - }, - } - - ctx := runner.HookContext{} - addStorageToContext(&ctx, "data", params.StorageConstraints{}) - addStorageToContext(&ctx, "data", params.StorageConstraints{Count: &two}) - assertStorageAddInContext(c, ctx, expected) -} - -func (s *InterfaceSuite) TestStorageAddConstraintsDifferentStorage(c *gc.C) { - expected := map[string][]params.StorageConstraints{ - "data": []params.StorageConstraints{params.StorageConstraints{}}, - "diff": []params.StorageConstraints{ - params.StorageConstraints{Count: &two}}, - } - - ctx := runner.HookContext{} - addStorageToContext(&ctx, "data", params.StorageConstraints{}) - addStorageToContext(&ctx, "diff", params.StorageConstraints{Count: &two}) - assertStorageAddInContext(c, ctx, expected) -} - -func addStorageToContext(ctx *runner.HookContext, - name string, - cons params.StorageConstraints, -) { - addOne := map[string]params.StorageConstraints{name: cons} - ctx.AddUnitStorage(addOne) -} - -func assertStorageAddInContext(c *gc.C, - ctx runner.HookContext, expected map[string][]params.StorageConstraints, -) { - obtained := ctx.StorageAddConstraints() - c.Assert(len(obtained), gc.Equals, len(expected)) - for k, v := range obtained { - c.Assert(v, jc.SameContents, expected[k]) - } -} === removed file 'src/github.com/juju/juju/worker/uniter/runner/contextfactory.go' --- src/github.com/juju/juju/worker/uniter/runner/contextfactory.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/contextfactory.go 1970-01-01 00:00:00 +0000 @@ -1,367 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package runner - -import ( - "fmt" - "math/rand" - "time" - - "github.com/juju/errors" - "github.com/juju/names" - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/hooks" - - "github.com/juju/juju/api/uniter" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/worker/leadership" - "github.com/juju/juju/worker/uniter/hook" - "github.com/juju/juju/worker/uniter/metrics" - "github.com/juju/juju/worker/uniter/runner/jujuc" -) - -// ContextFactory represents a long-lived object that can create execution contexts -// relevant to a specific unit. -type ContextFactory interface { - // CommandContext creates a new context for running a juju command. - CommandContext(commandInfo CommandInfo) (*HookContext, error) - - // HookContext creates a new context for running a juju hook. - HookContext(hookInfo hook.Info) (*HookContext, error) - - // ActionContext creates a new context for running a juju action. - ActionContext(actionData *ActionData) (*HookContext, error) -} - -// StorageContextAccessor is an interface providing access to StorageContexts -// for a jujuc.Context. -type StorageContextAccessor interface { - - // StorageTags returns the tags of storage instances attached to - // the unit. - StorageTags() []names.StorageTag - - // Storage returns the jujuc.ContextStorageAttachment with the - // supplied tag if it was found, and whether it was found. - Storage(names.StorageTag) (jujuc.ContextStorageAttachment, bool) -} - -// RelationsFunc is used to get snapshots of relation membership at context -// creation time. -type RelationsFunc func() map[int]*RelationInfo - -type contextFactory struct { - // API connection fields; unit should be deprecated, but isn't yet. - unit *uniter.Unit - state *uniter.State - tracker leadership.Tracker - - // Fields that shouldn't change in a factory's lifetime. - paths Paths - envUUID string - envName string - machineTag names.MachineTag - storage StorageContextAccessor - zone string - - // Callback to get relation state snapshot. - getRelationInfos RelationsFunc - relationCaches map[int]*RelationCache - - // For generating "unique" context ids. - rand *rand.Rand -} - -// NewContextFactory returns a ContextFactory capable of creating execution contexts backed -// by the supplied unit's supplied API connection. -func NewContextFactory( - state *uniter.State, - unitTag names.UnitTag, - tracker leadership.Tracker, - getRelationInfos RelationsFunc, - storage StorageContextAccessor, - paths Paths, -) ( - ContextFactory, error, -) { - unit, err := state.Unit(unitTag) - if err != nil { - return nil, errors.Trace(err) - } - machineTag, err := unit.AssignedMachine() - if err != nil { - return nil, errors.Trace(err) - } - environment, err := state.Environment() - if err != nil { - return nil, errors.Trace(err) - } - zone, err := unit.AvailabilityZone() - if err != nil { - return nil, errors.Trace(err) - } - f := &contextFactory{ - unit: unit, - state: state, - tracker: tracker, - paths: paths, - envUUID: environment.UUID(), - envName: environment.Name(), - machineTag: machineTag, - getRelationInfos: getRelationInfos, - relationCaches: map[int]*RelationCache{}, - storage: storage, - rand: rand.New(rand.NewSource(time.Now().Unix())), - zone: zone, - } - return f, nil -} - -// newId returns a probably-unique identifier for a new context, containing the -// supplied string. -func (f *contextFactory) newId(name string) string { - return fmt.Sprintf("%s-%s-%d", f.unit.Name(), name, f.rand.Int63()) -} - -// coreContext creates a new context with all unspecialised fields filled in. -func (f *contextFactory) coreContext() (*HookContext, error) { - leadershipContext := newLeadershipContext( - f.state.LeadershipSettings, - f.tracker, - ) - ctx := &HookContext{ - unit: f.unit, - state: f.state, - LeadershipContext: leadershipContext, - uuid: f.envUUID, - envName: f.envName, - unitName: f.unit.Name(), - assignedMachineTag: f.machineTag, - relations: f.getContextRelations(), - relationId: -1, - metricsRecorder: nil, - definedMetrics: nil, - pendingPorts: make(map[PortRange]PortRangeInfo), - storage: f.storage, - componentDir: f.paths.ComponentDir, - componentFuncs: registeredComponentFuncs, - availabilityzone: f.zone, - } - if err := f.updateContext(ctx); err != nil { - return nil, err - } - return ctx, nil -} - -// ActionContext is part of the ContextFactory interface. -func (f *contextFactory) ActionContext(actionData *ActionData) (*HookContext, error) { - if actionData == nil { - return nil, errors.New("nil actionData specified") - } - ctx, err := f.coreContext() - if err != nil { - return nil, errors.Trace(err) - } - ctx.actionData = actionData - ctx.id = f.newId(actionData.Name) - return ctx, nil -} - -// HookContext is part of the ContextFactory interface. -func (f *contextFactory) HookContext(hookInfo hook.Info) (*HookContext, error) { - ctx, err := f.coreContext() - if err != nil { - return nil, errors.Trace(err) - } - hookName := string(hookInfo.Kind) - if hookInfo.Kind.IsRelation() { - ctx.relationId = hookInfo.RelationId - ctx.remoteUnitName = hookInfo.RemoteUnit - relation, found := ctx.relations[hookInfo.RelationId] - if !found { - return nil, errors.Errorf("unknown relation id: %v", hookInfo.RelationId) - } - if hookInfo.Kind == hooks.RelationDeparted { - relation.cache.RemoveMember(hookInfo.RemoteUnit) - } else if hookInfo.RemoteUnit != "" { - // Clear remote settings cache for changing remote unit. - relation.cache.InvalidateMember(hookInfo.RemoteUnit) - } - hookName = fmt.Sprintf("%s-%s", relation.Name(), hookInfo.Kind) - } - if hookInfo.Kind.IsStorage() { - ctx.storageTag = names.NewStorageTag(hookInfo.StorageId) - if _, found := ctx.storage.Storage(ctx.storageTag); !found { - return nil, errors.Errorf("unknown storage id: %v", hookInfo.StorageId) - } - storageName, err := names.StorageName(hookInfo.StorageId) - if err != nil { - return nil, errors.Trace(err) - } - hookName = fmt.Sprintf("%s-%s", storageName, hookName) - } - // Metrics are only sent from the collect-metrics hook. - if hookInfo.Kind == hooks.CollectMetrics { - ch, err := getCharm(f.paths.GetCharmDir()) - if err != nil { - return nil, errors.Trace(err) - } - ctx.definedMetrics = ch.Metrics() - - chURL, err := f.unit.CharmURL() - if err != nil { - return nil, errors.Trace(err) - } - - charmMetrics := map[string]charm.Metric{} - if ch.Metrics() != nil { - charmMetrics = ch.Metrics().Metrics - } - ctx.metricsRecorder, err = metrics.NewJSONMetricRecorder( - f.paths.GetMetricsSpoolDir(), - charmMetrics, - chURL.String()) - if err != nil { - return nil, errors.Trace(err) - } - } - ctx.id = f.newId(hookName) - return ctx, nil -} - -// CommandContext is part of the ContextFactory interface. -func (f *contextFactory) CommandContext(commandInfo CommandInfo) (*HookContext, error) { - ctx, err := f.coreContext() - if err != nil { - return nil, errors.Trace(err) - } - relationId, remoteUnitName, err := inferRemoteUnit(ctx.relations, commandInfo) - if err != nil { - return nil, errors.Trace(err) - } - ctx.relationId = relationId - ctx.remoteUnitName = remoteUnitName - ctx.id = f.newId("run-commands") - return ctx, nil -} - -// getContextRelations updates the factory's relation caches, and uses them -// to construct ContextRelations for a fresh context. -func (f *contextFactory) getContextRelations() map[int]*ContextRelation { - contextRelations := map[int]*ContextRelation{} - relationInfos := f.getRelationInfos() - relationCaches := map[int]*RelationCache{} - for id, info := range relationInfos { - relationUnit := info.RelationUnit - memberNames := info.MemberNames - cache, found := f.relationCaches[id] - if found { - cache.Prune(memberNames) - } else { - cache = NewRelationCache(relationUnit.ReadSettings, memberNames) - } - relationCaches[id] = cache - contextRelations[id] = NewContextRelation(relationUnit, cache) - } - f.relationCaches = relationCaches - return contextRelations -} - -// updateContext fills in all unspecialized fields that require an API call to -// discover. -// -// Approximately *every* line of code in this function represents a bug: ie, some -// piece of information we expose to the charm but which we fail to report changes -// to via hooks. Furthermore, the fact that we make multiple API calls at this -// time, rather than grabbing everything we need in one go, is unforgivably yucky. -func (f *contextFactory) updateContext(ctx *HookContext) (err error) { - defer errors.Trace(err) - - ctx.apiAddrs, err = f.state.APIAddresses() - if err != nil { - return err - } - ctx.machinePorts, err = f.state.AllMachinePorts(f.machineTag) - if err != nil { - return errors.Trace(err) - } - - statusCode, statusInfo, err := f.unit.MeterStatus() - if err != nil { - return errors.Annotate(err, "could not retrieve meter status for unit") - } - ctx.meterStatus = &meterStatus{ - code: statusCode, - info: statusInfo, - } - - // TODO(fwereade) 23-10-2014 bug 1384572 - // Nothing here should ever be getting the environ config directly. - environConfig, err := f.state.EnvironConfig() - if err != nil { - return err - } - ctx.proxySettings = environConfig.ProxySettings() - - // Calling these last, because there's a potential race: they're not guaranteed - // to be set in time to be needed for a hook. If they're not, we just leave them - // unset as we always have; this isn't great but it's about behaviour preservation. - ctx.publicAddress, err = f.unit.PublicAddress() - if err != nil && !params.IsCodeNoAddressSet(err) { - return err - } - ctx.privateAddress, err = f.unit.PrivateAddress() - if err != nil && !params.IsCodeNoAddressSet(err) { - return err - } - return nil -} - -func inferRemoteUnit(rctxs map[int]*ContextRelation, info CommandInfo) (int, string, error) { - relationId := info.RelationId - hasRelation := relationId != -1 - remoteUnit := info.RemoteUnitName - hasRemoteUnit := remoteUnit != "" - - // Check baseline sanity of remote unit, if supplied. - if hasRemoteUnit { - if !names.IsValidUnit(remoteUnit) { - return -1, "", errors.Errorf(`invalid remote unit: %s`, remoteUnit) - } else if !hasRelation { - return -1, "", errors.Errorf("remote unit provided without a relation: %s", remoteUnit) - } - } - - // Check sanity of relation, if supplied, otherwise easy early return. - if !hasRelation { - return relationId, remoteUnit, nil - } - rctx, found := rctxs[relationId] - if !found { - return -1, "", errors.Errorf("unknown relation id: %d", relationId) - } - - // Past basic sanity checks; if forced, accept what we're given. - if info.ForceRemoteUnit { - return relationId, remoteUnit, nil - } - - // Infer an appropriate remote unit if we can. - possibles := rctx.UnitNames() - if remoteUnit == "" { - switch len(possibles) { - case 0: - return -1, "", errors.Errorf("cannot infer remote unit in empty relation %d", relationId) - case 1: - return relationId, possibles[0], nil - } - return -1, "", errors.Errorf("ambiguous remote unit; possibilities are %+v", possibles) - } - for _, possible := range possibles { - if remoteUnit == possible { - return relationId, remoteUnit, nil - } - } - return -1, "", errors.Errorf("unknown remote unit %s; possibilities are %+v", remoteUnit, possibles) -} === removed file 'src/github.com/juju/juju/worker/uniter/runner/contextfactory_test.go' --- src/github.com/juju/juju/worker/uniter/runner/contextfactory_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/contextfactory_test.go 1970-01-01 00:00:00 +0000 @@ -1,203 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package runner_test - -import ( - "os" - "time" - - "github.com/juju/errors" - "github.com/juju/names" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils/fs" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5/hooks" - - "github.com/juju/juju/testcharms" - "github.com/juju/juju/worker/leadership" - "github.com/juju/juju/worker/uniter/hook" - "github.com/juju/juju/worker/uniter/runner" -) - -type ContextFactorySuite struct { - HookContextSuite - paths RealPaths - factory runner.ContextFactory - membership map[int][]string -} - -var _ = gc.Suite(&ContextFactorySuite{}) - -type fakeTracker struct { - leadership.Tracker -} - -func (fakeTracker) ServiceName() string { - return "service-name" -} - -func (s *ContextFactorySuite) SetUpTest(c *gc.C) { - s.HookContextSuite.SetUpTest(c) - s.paths = NewRealPaths(c) - s.membership = map[int][]string{} - - contextFactory, err := runner.NewContextFactory( - s.uniter, - s.unit.Tag().(names.UnitTag), - fakeTracker{}, - s.getRelationInfos, - s.storage, - s.paths, - ) - c.Assert(err, jc.ErrorIsNil) - s.factory = contextFactory -} - -func (s *ContextFactorySuite) SetCharm(c *gc.C, name string) { - err := os.RemoveAll(s.paths.charm) - c.Assert(err, jc.ErrorIsNil) - err = fs.Copy(testcharms.Repo.CharmDirPath(name), s.paths.charm) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *ContextFactorySuite) getRelationInfos() map[int]*runner.RelationInfo { - info := map[int]*runner.RelationInfo{} - for relId, relUnit := range s.apiRelunits { - info[relId] = &runner.RelationInfo{ - RelationUnit: relUnit, - MemberNames: s.membership[relId], - } - } - return info -} - -func (s *ContextFactorySuite) testLeadershipContextWiring(c *gc.C, createContext func() runner.Context) { - var stub testing.Stub - stub.SetErrors(errors.New("bam")) - restore := runner.PatchNewLeadershipContext( - func(accessor runner.LeadershipSettingsAccessor, tracker leadership.Tracker) runner.LeadershipContext { - stub.AddCall("NewLeadershipContext", accessor, tracker) - return &StubLeadershipContext{Stub: &stub} - }, - ) - defer restore() - - ctx := createContext() - isLeader, err := ctx.IsLeader() - c.Check(err, gc.ErrorMatches, "bam") - c.Check(isLeader, jc.IsFalse) - - stub.CheckCalls(c, []testing.StubCall{{ - FuncName: "NewLeadershipContext", - Args: []interface{}{s.uniter.LeadershipSettings, fakeTracker{}}, - }, { - FuncName: "IsLeader", - }}) - -} - -func (s *ContextFactorySuite) TestNewHookRunnerLeadershipContext(c *gc.C) { - s.testLeadershipContextWiring(c, func() runner.Context { - ctx, err := s.factory.HookContext(hook.Info{Kind: hooks.ConfigChanged}) - c.Assert(err, jc.ErrorIsNil) - return ctx - }) -} - -func (s *ContextFactorySuite) TestNewCommandRunnerLeadershipContext(c *gc.C) { - s.testLeadershipContextWiring(c, func() runner.Context { - ctx, err := s.factory.CommandContext(runner.CommandInfo{RelationId: -1}) - c.Assert(err, jc.ErrorIsNil) - return ctx - }) -} - -func (s *ContextFactorySuite) TestNewActionRunnerLeadershipContext(c *gc.C) { - s.testLeadershipContextWiring(c, func() runner.Context { - s.SetCharm(c, "dummy") - action, err := s.State.EnqueueAction(s.unit.Tag(), "snapshot", nil) - c.Assert(err, jc.ErrorIsNil) - - actionData := &runner.ActionData{ - Name: action.Name(), - Tag: names.NewActionTag(action.Id()), - Params: action.Parameters(), - ResultsMap: map[string]interface{}{}, - } - - ctx, err := s.factory.ActionContext(actionData) - c.Assert(err, jc.ErrorIsNil) - return ctx - }) -} - -func (s *ContextFactorySuite) TestRelationHookContext(c *gc.C) { - hi := hook.Info{ - Kind: hooks.RelationBroken, - RelationId: 1, - } - ctx, err := s.factory.HookContext(hi) - c.Assert(err, jc.ErrorIsNil) - s.AssertCoreContext(c, ctx) - s.AssertNotActionContext(c, ctx) - s.AssertRelationContext(c, ctx, 1, "") - s.AssertNotStorageContext(c, ctx) -} - -func (s *ContextFactorySuite) TestMetricsHookContext(c *gc.C) { - s.SetCharm(c, "metered") - hi := hook.Info{Kind: hooks.CollectMetrics} - ctx, err := s.factory.HookContext(hi) - c.Assert(err, jc.ErrorIsNil) - - err = ctx.AddMetric("pings", "1", time.Now()) - c.Assert(err, jc.ErrorIsNil) - - s.AssertCoreContext(c, ctx) - s.AssertNotActionContext(c, ctx) - s.AssertNotRelationContext(c, ctx) - s.AssertNotStorageContext(c, ctx) -} - -func (s *ContextFactorySuite) TestActionContext(c *gc.C) { - s.SetCharm(c, "dummy") - action, err := s.State.EnqueueAction(s.unit.Tag(), "snapshot", nil) - c.Assert(err, jc.ErrorIsNil) - - actionData := &runner.ActionData{ - Name: action.Name(), - Tag: names.NewActionTag(action.Id()), - Params: action.Parameters(), - ResultsMap: map[string]interface{}{}, - } - - ctx, err := s.factory.ActionContext(actionData) - c.Assert(err, jc.ErrorIsNil) - - s.AssertCoreContext(c, ctx) - s.AssertActionContext(c, ctx) - s.AssertNotRelationContext(c, ctx) - s.AssertNotStorageContext(c, ctx) -} - -func (s *ContextFactorySuite) TestCommandContext(c *gc.C) { - ctx, err := s.factory.CommandContext(runner.CommandInfo{RelationId: -1}) - c.Assert(err, jc.ErrorIsNil) - - s.AssertCoreContext(c, ctx) - s.AssertNotActionContext(c, ctx) - s.AssertNotRelationContext(c, ctx) - s.AssertNotStorageContext(c, ctx) -} - -type StubLeadershipContext struct { - runner.LeadershipContext - *testing.Stub -} - -func (stub *StubLeadershipContext) IsLeader() (bool, error) { - stub.MethodCall(stub, "IsLeader") - return false, stub.NextErr() -} === modified file 'src/github.com/juju/juju/worker/uniter/runner/debug/client.go' --- src/github.com/juju/juju/worker/uniter/runner/debug/client.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/debug/client.go 2016-03-22 15:18:22 +0000 @@ -7,7 +7,7 @@ "encoding/base64" "strings" - goyaml "gopkg.in/yaml.v1" + goyaml "gopkg.in/yaml.v2" ) type hookArgs struct { === modified file 'src/github.com/juju/juju/worker/uniter/runner/debug/server.go' --- src/github.com/juju/juju/worker/uniter/runner/debug/server.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/debug/server.go 2016-03-22 15:18:22 +0000 @@ -11,7 +11,7 @@ "os/exec" "github.com/juju/utils/set" - goyaml "gopkg.in/yaml.v1" + goyaml "gopkg.in/yaml.v2" ) // ServerSession represents a "juju debug-hooks" session. === modified file 'src/github.com/juju/juju/worker/uniter/runner/env.go' --- src/github.com/juju/juju/worker/uniter/runner/env.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/env.go 2016-03-22 15:18:22 +0000 @@ -4,58 +4,9 @@ package runner import ( - "os" - "path/filepath" "strings" - - "github.com/juju/juju/version" ) -func osDependentEnvVars(paths Paths) []string { - switch version.Current.OS { - case version.Windows: - return windowsEnv(paths) - case version.Ubuntu: - return ubuntuEnv(paths) - case version.CentOS: - return centosEnv(paths) - } - return nil -} - -func appendPath(paths Paths) []string { - return []string{ - "PATH=" + paths.GetToolsDir() + ":" + os.Getenv("PATH"), - } -} - -func ubuntuEnv(paths Paths) []string { - path := appendPath(paths) - env := []string{ - "APT_LISTCHANGES_FRONTEND=none", - "DEBIAN_FRONTEND=noninteractive", - } - env = append(env, path...) - return env -} - -func centosEnv(paths Paths) []string { - return appendPath(paths) -} - -// windowsEnv adds windows specific environment variables. PSModulePath -// helps hooks use normal imports instead of dot sourcing modules -// its a convenience variable. The PATH variable delimiter is -// a semicolon instead of a colon -func windowsEnv(paths Paths) []string { - charmDir := paths.GetCharmDir() - charmModules := filepath.Join(charmDir, "lib", "Modules") - return []string{ - "Path=" + paths.GetToolsDir() + ";" + os.Getenv("Path"), - "PSModulePath=" + os.Getenv("PSModulePath") + ";" + charmModules, - } -} - // mergeEnvironment takes in a string array representing the desired environment // and merges it with the current environment. On Windows, clearing the environment, // or having missing environment variables, may lead to standard go packages not working === modified file 'src/github.com/juju/juju/worker/uniter/runner/env_test.go' --- src/github.com/juju/juju/worker/uniter/runner/env_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/env_test.go 2016-03-22 15:18:22 +0000 @@ -5,17 +5,12 @@ import ( "os" - "path/filepath" - "sort" "strings" - "github.com/juju/names" envtesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" - "github.com/juju/utils/proxy" gc "gopkg.in/check.v1" - "github.com/juju/juju/version" "github.com/juju/juju/worker/uniter/runner" ) @@ -64,115 +59,3 @@ expected := []string{"a=baz", "b=bar", "c=omg", "foo=val2", "d=another"} c.Check(created, jc.SameContents, expected) } - -type EnvSuite struct { - envtesting.IsolationSuite -} - -var _ = gc.Suite(&EnvSuite{}) - -func (s *EnvSuite) assertVars(c *gc.C, actual []string, expect ...[]string) { - var fullExpect []string - for _, someExpect := range expect { - fullExpect = append(fullExpect, someExpect...) - } - sort.Strings(actual) - sort.Strings(fullExpect) - c.Assert(actual, jc.DeepEquals, fullExpect) -} - -func (s *EnvSuite) getPaths() (paths runner.Paths, expectVars []string) { - // note: path-munging is os-dependent, not included in expectVars - return MockEnvPaths{}, []string{ - "CHARM_DIR=path-to-charm", - "JUJU_CHARM_DIR=path-to-charm", - "JUJU_AGENT_SOCKET=path-to-jujuc.socket", - } -} - -func (s *EnvSuite) getContext() (ctx *runner.HookContext, expectVars []string) { - return runner.NewEnvironmentHookContext( - "some-context-id", - "env-uuid-deadbeef", - "some-env-name", - "this-unit/123", - "PURPLE", - "proceed with care", - "some-zone", - []string{"he.re:12345", "the.re:23456"}, - proxy.Settings{ - Http: "some-http-proxy", - Https: "some-https-proxy", - Ftp: "some-ftp-proxy", - NoProxy: "some-no-proxy", - }, - names.NewMachineTag("42"), - ), []string{ - "JUJU_CONTEXT_ID=some-context-id", - "JUJU_ENV_UUID=env-uuid-deadbeef", - "JUJU_ENV_NAME=some-env-name", - "JUJU_UNIT_NAME=this-unit/123", - "JUJU_METER_STATUS=PURPLE", - "JUJU_METER_INFO=proceed with care", - "JUJU_API_ADDRESSES=he.re:12345 the.re:23456", - "JUJU_MACHINE_ID=42", - "JUJU_AVAILABILITY_ZONE=some-zone", - "http_proxy=some-http-proxy", - "HTTP_PROXY=some-http-proxy", - "https_proxy=some-https-proxy", - "HTTPS_PROXY=some-https-proxy", - "ftp_proxy=some-ftp-proxy", - "FTP_PROXY=some-ftp-proxy", - "no_proxy=some-no-proxy", - "NO_PROXY=some-no-proxy", - } -} - -func (s *EnvSuite) setRelation(ctx *runner.HookContext) (expectVars []string) { - runner.SetEnvironmentHookContextRelation( - ctx, 22, "an-endpoint", "that-unit/456", - ) - return []string{ - "JUJU_RELATION=an-endpoint", - "JUJU_RELATION_ID=an-endpoint:22", - "JUJU_REMOTE_UNIT=that-unit/456", - } -} - -func (s *EnvSuite) TestEnvWindows(c *gc.C) { - s.PatchValue(&version.Current.OS, version.Windows) - os.Setenv("Path", "foo;bar") - os.Setenv("PSModulePath", "ping;pong") - windowsVars := []string{ - "Path=path-to-tools;foo;bar", - "PSModulePath=ping;pong;" + filepath.FromSlash("path-to-charm/lib/Modules"), - } - - ctx, contextVars := s.getContext() - paths, pathsVars := s.getPaths() - actualVars := ctx.HookVars(paths) - s.assertVars(c, actualVars, contextVars, pathsVars, windowsVars) - - relationVars := s.setRelation(ctx) - actualVars = ctx.HookVars(paths) - s.assertVars(c, actualVars, contextVars, pathsVars, windowsVars, relationVars) -} - -func (s *EnvSuite) TestEnvUbuntu(c *gc.C) { - s.PatchValue(&version.Current.OS, version.Ubuntu) - os.Setenv("PATH", "foo:bar") - ubuntuVars := []string{ - "PATH=path-to-tools:foo:bar", - "APT_LISTCHANGES_FRONTEND=none", - "DEBIAN_FRONTEND=noninteractive", - } - - ctx, contextVars := s.getContext() - paths, pathsVars := s.getPaths() - actualVars := ctx.HookVars(paths) - s.assertVars(c, actualVars, contextVars, pathsVars, ubuntuVars) - - relationVars := s.setRelation(ctx) - actualVars = ctx.HookVars(paths) - s.assertVars(c, actualVars, contextVars, pathsVars, ubuntuVars, relationVars) -} === modified file 'src/github.com/juju/juju/worker/uniter/runner/errors.go' --- src/github.com/juju/juju/worker/uniter/runner/errors.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/errors.go 2016-03-22 15:18:22 +0000 @@ -9,28 +9,8 @@ "github.com/juju/errors" ) -var ErrRequeueAndReboot = errors.New("reboot now") -var ErrReboot = errors.New("reboot after hook") -var ErrNoProcess = errors.New("no process to kill") var ErrActionNotAvailable = errors.New("action no longer available") -type missingHookError struct { - hookName string -} - -func (e *missingHookError) Error() string { - return e.hookName + " does not exist" -} - -func IsMissingHookError(err error) bool { - _, ok := err.(*missingHookError) - return ok -} - -func NewMissingHookError(hookName string) error { - return &missingHookError{hookName} -} - type badActionError struct { actionName string problem string === modified file 'src/github.com/juju/juju/worker/uniter/runner/export_test.go' --- src/github.com/juju/juju/worker/uniter/runner/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/export_test.go 2016-03-22 15:18:22 +0000 @@ -4,16 +4,7 @@ package runner import ( - "github.com/juju/errors" - "github.com/juju/names" - "github.com/juju/utils/proxy" - "gopkg.in/juju/charm.v5" - - "github.com/juju/juju/api/uniter" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/worker/leadership" - "github.com/juju/juju/worker/uniter/metrics" - "github.com/juju/juju/worker/uniter/runner/jujuc" + "github.com/juju/juju/worker/uniter/runner/context" ) var ( @@ -21,214 +12,8 @@ SearchHook = searchHook HookCommand = hookCommand LookPath = lookPath - ValidatePortRange = validatePortRange - TryOpenPorts = tryOpenPorts - TryClosePorts = tryClosePorts ) -func RunnerPaths(rnr Runner) Paths { +func RunnerPaths(rnr Runner) context.Paths { return rnr.(*runner).paths } - -type LeadershipContextFunc func(LeadershipSettingsAccessor, leadership.Tracker) LeadershipContext - -func PatchNewLeadershipContext(f LeadershipContextFunc) func() { - var old LeadershipContextFunc - old, newLeadershipContext = newLeadershipContext, f - return func() { newLeadershipContext = old } -} - -func UpdateCachedSettings(f0 Factory, relId int, unitName string, settings params.Settings) { - f := f0.(*factory) - cf := f.contextFactory.(*contextFactory) - members := cf.relationCaches[relId].members - if members[unitName] == nil { - members[unitName] = params.Settings{} - } - for key, value := range settings { - members[unitName][key] = value - } -} - -func CachedSettings(f0 Factory, relId int, unitName string) (params.Settings, bool) { - f := f0.(*factory) - cf := f.contextFactory.(*contextFactory) - settings, found := cf.relationCaches[relId].members[unitName] - return settings, found -} - -// PatchMeterStatus changes the meter status of the context. -func (ctx *HookContext) PatchMeterStatus(code, info string) func() { - oldMeterStatus := ctx.meterStatus - ctx.meterStatus = &meterStatus{ - code: code, - info: info, - } - return func() { - ctx.meterStatus = oldMeterStatus - } -} - -func ContextEnvInfo(ctx Context) (name, uuid string) { - hctx := ctx.(*HookContext) - return hctx.envName, hctx.uuid -} - -func ContextMachineTag(ctx Context) names.MachineTag { - hctx := ctx.(*HookContext) - return hctx.assignedMachineTag -} - -func GetStubActionContext(in map[string]interface{}) *HookContext { - return &HookContext{ - actionData: &ActionData{ - ResultsMap: in, - }, - } -} - -func PatchCachedStatus(ctx Context, status, info string, data map[string]interface{}) func() { - hctx := ctx.(*HookContext) - oldStatus := hctx.status - hctx.status = &jujuc.StatusInfo{ - Status: status, - Info: info, - Data: data, - } - return func() { - hctx.status = oldStatus - } -} - -func NewHookContext( - unit *uniter.Unit, - state *uniter.State, - id, - uuid, - envName string, - relationId int, - remoteUnitName string, - relations map[int]*ContextRelation, - apiAddrs []string, - proxySettings proxy.Settings, - canAddMetrics bool, - charmMetrics *charm.Metrics, - actionData *ActionData, - assignedMachineTag names.MachineTag, - paths Paths, -) (*HookContext, error) { - ctx := &HookContext{ - unit: unit, - state: state, - id: id, - uuid: uuid, - envName: envName, - unitName: unit.Name(), - relationId: relationId, - remoteUnitName: remoteUnitName, - relations: relations, - apiAddrs: apiAddrs, - proxySettings: proxySettings, - metricsRecorder: nil, - definedMetrics: charmMetrics, - actionData: actionData, - pendingPorts: make(map[PortRange]PortRangeInfo), - assignedMachineTag: assignedMachineTag, - } - if canAddMetrics { - charmURL, err := unit.CharmURL() - if err != nil { - return nil, err - } - ctx.metricsRecorder, err = metrics.NewJSONMetricRecorder( - paths.GetMetricsSpoolDir(), - charmMetrics.Metrics, - charmURL.String()) - if err != nil { - return nil, err - } - } - // Get and cache the addresses. - var err error - ctx.publicAddress, err = unit.PublicAddress() - if err != nil && !params.IsCodeNoAddressSet(err) { - return nil, err - } - ctx.privateAddress, err = unit.PrivateAddress() - if err != nil && !params.IsCodeNoAddressSet(err) { - return nil, err - } - ctx.availabilityzone, err = unit.AvailabilityZone() - if err != nil { - return nil, err - } - ctx.machinePorts, err = state.AllMachinePorts(ctx.assignedMachineTag) - if err != nil { - return nil, errors.Trace(err) - } - - statusCode, statusInfo, err := unit.MeterStatus() - if err != nil { - return nil, errors.Annotate(err, "could not retrieve meter status for unit") - } - ctx.meterStatus = &meterStatus{ - code: statusCode, - info: statusInfo, - } - return ctx, nil -} - -// NewEnvironmentHookContext exists purely to set the fields used in rs. -// The returned value is not otherwise valid. -func NewEnvironmentHookContext( - id, envUUID, envName, unitName, meterCode, meterInfo, availZone string, - apiAddresses []string, proxySettings proxy.Settings, - machineTag names.MachineTag, -) *HookContext { - return &HookContext{ - id: id, - unitName: unitName, - uuid: envUUID, - envName: envName, - apiAddrs: apiAddresses, - proxySettings: proxySettings, - meterStatus: &meterStatus{ - code: meterCode, - info: meterInfo, - }, - relationId: -1, - assignedMachineTag: machineTag, - availabilityzone: availZone, - } -} - -// SetEnvironmentHookContextRelation exists purely to set the fields used in hookVars. -// It makes no assumptions about the validity of context. -func SetEnvironmentHookContextRelation( - context *HookContext, - relationId int, endpointName, remoteUnitName string, -) { - context.relationId = relationId - context.remoteUnitName = remoteUnitName - context.relations = map[int]*ContextRelation{ - relationId: { - endpointName: endpointName, - relationId: relationId, - }, - } -} - -func (ctx *HookContext) StorageAddConstraints() map[string][]params.StorageConstraints { - return ctx.storageAddConstraints -} - -// PatchMetricsRecorder patches the metrics writer used by the context with a new -// object. -func PatchMetricsRecorder(ctx jujuc.Context, writer MetricsRecorder) func() { - hctx := ctx.(*HookContext) - oldRecorder := hctx.metricsRecorder - hctx.metricsRecorder = writer - return func() { - hctx.metricsRecorder = oldRecorder - } -} === modified file 'src/github.com/juju/juju/worker/uniter/runner/factory.go' --- src/github.com/juju/juju/worker/uniter/runner/factory.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/factory.go 2016-03-22 15:18:22 +0000 @@ -6,29 +6,21 @@ import ( "github.com/juju/errors" "github.com/juju/names" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api/uniter" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/runner/context" ) -type CommandInfo struct { - // RelationId is the relation context to execute the commands in. - RelationId int - // RemoteUnitName is the remote unit for the relation context. - RemoteUnitName string - // ForceRemoteUnit skips unit inference and existence validation. - ForceRemoteUnit bool -} - // Factory represents a long-lived object that can create runners // relevant to a specific unit. type Factory interface { // NewCommandRunner returns an execution context suitable for running // an arbitrary script. - NewCommandRunner(commandInfo CommandInfo) (Runner, error) + NewCommandRunner(commandInfo context.CommandInfo) (Runner, error) // NewHookRunner returns an execution context suitable for running the // supplied hook definition (which must be valid). @@ -43,8 +35,8 @@ // charm hooks, actions and commands. func NewFactory( state *uniter.State, - paths Paths, - contextFactory ContextFactory, + paths context.Paths, + contextFactory context.ContextFactory, ) ( Factory, error, ) { @@ -58,17 +50,17 @@ } type factory struct { - contextFactory ContextFactory + contextFactory context.ContextFactory // API connection fields. state *uniter.State // Fields that shouldn't change in a factory's lifetime. - paths Paths + paths context.Paths } // NewCommandRunner exists to satisfy the Factory interface. -func (f *factory) NewCommandRunner(commandInfo CommandInfo) (Runner, error) { +func (f *factory) NewCommandRunner(commandInfo context.CommandInfo) (Runner, error) { ctx, err := f.contextFactory.CommandContext(commandInfo) if err != nil { return nil, errors.Trace(err) @@ -122,7 +114,7 @@ return nil, &badActionError{name, err.Error()} } - actionData := newActionData(name, &tag, params) + actionData := context.NewActionData(name, &tag, params) ctx, err := f.contextFactory.ActionContext(actionData) runner := NewRunner(ctx, f.paths) return runner, nil === modified file 'src/github.com/juju/juju/worker/uniter/runner/factory_test.go' --- src/github.com/juju/juju/worker/uniter/runner/factory_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/factory_test.go 2016-03-22 15:18:22 +0000 @@ -12,104 +12,36 @@ "github.com/juju/names" jc "github.com/juju/testing/checkers" "github.com/juju/utils" - "github.com/juju/utils/fs" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable/hooks" - "github.com/juju/juju/apiserver/params" "github.com/juju/juju/state" - "github.com/juju/juju/storage" - "github.com/juju/juju/testcharms" + "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/hook" "github.com/juju/juju/worker/uniter/runner" + "github.com/juju/juju/worker/uniter/runner/context" + runnertesting "github.com/juju/juju/worker/uniter/runner/testing" ) type FactorySuite struct { - HookContextSuite - paths RealPaths - factory runner.Factory - membership map[int][]string + ContextSuite } var _ = gc.Suite(&FactorySuite{}) -func (s *FactorySuite) SetUpTest(c *gc.C) { - s.HookContextSuite.SetUpTest(c) - s.paths = NewRealPaths(c) - s.membership = map[int][]string{} - - contextFactory, err := runner.NewContextFactory( - s.uniter, - s.unit.Tag().(names.UnitTag), - fakeTracker{}, - s.getRelationInfos, - s.storage, - s.paths, - ) - c.Assert(err, jc.ErrorIsNil) - - factory, err := runner.NewFactory( - s.uniter, - s.paths, - contextFactory, - ) - c.Assert(err, jc.ErrorIsNil) - s.factory = factory -} - -func (s *FactorySuite) SetCharm(c *gc.C, name string) { - err := os.RemoveAll(s.paths.charm) - c.Assert(err, jc.ErrorIsNil) - err = fs.Copy(testcharms.Repo.CharmDirPath(name), s.paths.charm) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *FactorySuite) getRelationInfos() map[int]*runner.RelationInfo { - info := map[int]*runner.RelationInfo{} - for relId, relUnit := range s.apiRelunits { - info[relId] = &runner.RelationInfo{ - RelationUnit: relUnit, - MemberNames: s.membership[relId], - } - } - return info -} - -func (s *FactorySuite) setUpCacheMethods(c *gc.C) { - // The factory's caches are created lazily, so it doesn't have any at all to - // begin with. Creating and discarding a context lets us call updateCache - // without panicking. (IMO this is less invasive that making updateCache - // responsible for creating missing caches etc.) - _, err := s.factory.NewHookRunner(hook.Info{Kind: hooks.Install}) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *FactorySuite) updateCache(relId int, unitName string, settings params.Settings) { - runner.UpdateCachedSettings(s.factory, relId, unitName, settings) -} - -func (s *FactorySuite) getCache(relId int, unitName string) (params.Settings, bool) { - return runner.CachedSettings(s.factory, relId, unitName) -} - func (s *FactorySuite) AssertPaths(c *gc.C, rnr runner.Runner) { c.Assert(runner.RunnerPaths(rnr), gc.DeepEquals, s.paths) } func (s *FactorySuite) TestNewCommandRunnerNoRelation(c *gc.C) { - rnr, err := s.factory.NewCommandRunner(runner.CommandInfo{RelationId: -1}) + rnr, err := s.factory.NewCommandRunner(context.CommandInfo{RelationId: -1}) c.Assert(err, jc.ErrorIsNil) s.AssertPaths(c, rnr) - ctx := rnr.Context() - s.AssertCoreContext(c, ctx) - s.AssertNotActionContext(c, ctx) - s.AssertNotRelationContext(c, ctx) - s.AssertNotStorageContext(c, ctx) } func (s *FactorySuite) TestNewCommandRunnerRelationIdDoesNotExist(c *gc.C) { for _, value := range []bool{true, false} { - _, err := s.factory.NewCommandRunner(runner.CommandInfo{ + _, err := s.factory.NewCommandRunner(context.CommandInfo{ RelationId: 12, ForceRemoteUnit: value, }) c.Check(err, gc.ErrorMatches, `unknown relation id: 12`) @@ -118,7 +50,7 @@ func (s *FactorySuite) TestNewCommandRunnerRemoteUnitInvalid(c *gc.C) { for _, value := range []bool{true, false} { - _, err := s.factory.NewCommandRunner(runner.CommandInfo{ + _, err := s.factory.NewCommandRunner(context.CommandInfo{ RelationId: 0, RemoteUnitName: "blah", ForceRemoteUnit: value, }) c.Check(err, gc.ErrorMatches, `invalid remote unit: blah`) @@ -127,7 +59,7 @@ func (s *FactorySuite) TestNewCommandRunnerRemoteUnitInappropriate(c *gc.C) { for _, value := range []bool{true, false} { - _, err := s.factory.NewCommandRunner(runner.CommandInfo{ + _, err := s.factory.NewCommandRunner(context.CommandInfo{ RelationId: -1, RemoteUnitName: "blah/123", ForceRemoteUnit: value, }) c.Check(err, gc.ErrorMatches, `remote unit provided without a relation: blah/123`) @@ -135,70 +67,50 @@ } func (s *FactorySuite) TestNewCommandRunnerEmptyRelation(c *gc.C) { - _, err := s.factory.NewCommandRunner(runner.CommandInfo{RelationId: 1}) + _, err := s.factory.NewCommandRunner(context.CommandInfo{RelationId: 1}) c.Check(err, gc.ErrorMatches, `cannot infer remote unit in empty relation 1`) } func (s *FactorySuite) TestNewCommandRunnerRemoteUnitAmbiguous(c *gc.C) { s.membership[1] = []string{"foo/0", "foo/1"} - _, err := s.factory.NewCommandRunner(runner.CommandInfo{RelationId: 1}) + _, err := s.factory.NewCommandRunner(context.CommandInfo{RelationId: 1}) c.Check(err, gc.ErrorMatches, `ambiguous remote unit; possibilities are \[foo/0 foo/1\]`) } func (s *FactorySuite) TestNewCommandRunnerRemoteUnitMissing(c *gc.C) { s.membership[0] = []string{"foo/0", "foo/1"} - _, err := s.factory.NewCommandRunner(runner.CommandInfo{ + _, err := s.factory.NewCommandRunner(context.CommandInfo{ RelationId: 0, RemoteUnitName: "blah/123", }) c.Check(err, gc.ErrorMatches, `unknown remote unit blah/123; possibilities are \[foo/0 foo/1\]`) } func (s *FactorySuite) TestNewCommandRunnerForceNoRemoteUnit(c *gc.C) { - rnr, err := s.factory.NewCommandRunner(runner.CommandInfo{ + rnr, err := s.factory.NewCommandRunner(context.CommandInfo{ RelationId: 0, ForceRemoteUnit: true, }) c.Assert(err, jc.ErrorIsNil) s.AssertPaths(c, rnr) - ctx := rnr.Context() - s.AssertCoreContext(c, ctx) - s.AssertNotActionContext(c, ctx) - s.AssertRelationContext(c, ctx, 0, "") - s.AssertNotStorageContext(c, ctx) } func (s *FactorySuite) TestNewCommandRunnerForceRemoteUnitMissing(c *gc.C) { - rnr, err := s.factory.NewCommandRunner(runner.CommandInfo{ + _, err := s.factory.NewCommandRunner(context.CommandInfo{ RelationId: 0, RemoteUnitName: "blah/123", ForceRemoteUnit: true, }) c.Assert(err, gc.IsNil) - ctx := rnr.Context() - s.AssertCoreContext(c, ctx) - s.AssertNotActionContext(c, ctx) - s.AssertRelationContext(c, ctx, 0, "blah/123") - s.AssertNotStorageContext(c, ctx) } func (s *FactorySuite) TestNewCommandRunnerInferRemoteUnit(c *gc.C) { s.membership[0] = []string{"foo/2"} - rnr, err := s.factory.NewCommandRunner(runner.CommandInfo{RelationId: 0}) + rnr, err := s.factory.NewCommandRunner(context.CommandInfo{RelationId: 0}) c.Assert(err, jc.ErrorIsNil) s.AssertPaths(c, rnr) - ctx := rnr.Context() - s.AssertCoreContext(c, ctx) - s.AssertNotActionContext(c, ctx) - s.AssertRelationContext(c, ctx, 0, "foo/2") - s.AssertNotStorageContext(c, ctx) } func (s *FactorySuite) TestNewHookRunner(c *gc.C) { rnr, err := s.factory.NewHookRunner(hook.Info{Kind: hooks.ConfigChanged}) c.Assert(err, jc.ErrorIsNil) s.AssertPaths(c, rnr) - ctx := rnr.Context() - s.AssertCoreContext(c, ctx) - s.AssertNotActionContext(c, ctx) - s.AssertNotRelationContext(c, ctx) - s.AssertNotStorageContext(c, ctx) } func (s *FactorySuite) TestNewHookRunnerWithBadHook(c *gc.C) { @@ -248,13 +160,14 @@ uniter, err := st.Uniter() c.Assert(err, jc.ErrorIsNil) - contextFactory, err := runner.NewContextFactory( + contextFactory, err := context.NewContextFactory( uniter, unit.Tag().(names.UnitTag), - fakeTracker{}, + runnertesting.FakeTracker{}, s.getRelationInfos, s.storage, s.paths, + testing.NewClock(time.Time{}), ) c.Assert(err, jc.ErrorIsNil) factory, err := runner.NewFactory( @@ -272,12 +185,6 @@ s.AssertPaths(c, rnr) ctx := rnr.Context() c.Assert(ctx.UnitName(), gc.Equals, "storage-block/0") - s.AssertStorageContext(c, ctx, "data/0", storage.StorageAttachmentInfo{ - Kind: storage.StorageKindBlock, - Location: "/dev/sdb", - }) - s.AssertNotActionContext(c, ctx) - s.AssertNotRelationContext(c, ctx) } func (s *FactorySuite) TestNewHookRunnerWithRelation(c *gc.C) { @@ -287,160 +194,6 @@ }) c.Assert(err, jc.ErrorIsNil) s.AssertPaths(c, rnr) - ctx := rnr.Context() - s.AssertCoreContext(c, ctx) - s.AssertNotActionContext(c, ctx) - s.AssertRelationContext(c, ctx, 1, "") - s.AssertNotStorageContext(c, ctx) -} - -func (s *FactorySuite) TestNewHookRunnerPrunesNonMemberCaches(c *gc.C) { - - // Write cached member settings for a member and a non-member. - s.setUpCacheMethods(c) - s.membership[0] = []string{"rel0/0"} - s.updateCache(0, "rel0/0", params.Settings{"keep": "me"}) - s.updateCache(0, "rel0/1", params.Settings{"drop": "me"}) - - rnr, err := s.factory.NewHookRunner(hook.Info{Kind: hooks.Install}) - c.Assert(err, jc.ErrorIsNil) - s.AssertPaths(c, rnr) - ctx := rnr.Context() - - settings0, found := s.getCache(0, "rel0/0") - c.Assert(found, jc.IsTrue) - c.Assert(settings0, jc.DeepEquals, params.Settings{"keep": "me"}) - - settings1, found := s.getCache(0, "rel0/1") - c.Assert(found, jc.IsFalse) - c.Assert(settings1, gc.IsNil) - - // Check the caches are being used by the context relations. - relCtx, found := ctx.Relation(0) - c.Assert(found, jc.IsTrue) - - // Verify that the settings really were cached by trying to look them up. - // Nothing's really in scope, so the call would fail if they weren't. - settings0, err = relCtx.ReadSettings("rel0/0") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings0, jc.DeepEquals, params.Settings{"keep": "me"}) - - // Verify that the non-member settings were purged by looking them up and - // checking for the expected error. - settings1, err = relCtx.ReadSettings("rel0/1") - c.Assert(settings1, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "permission denied") -} - -func (s *FactorySuite) TestNewHookRunnerRelationJoinedUpdatesRelationContextAndCaches(c *gc.C) { - // Write some cached settings for r/0, so we can verify the cache gets cleared. - s.setUpCacheMethods(c) - s.membership[1] = []string{"r/0"} - s.updateCache(1, "r/0", params.Settings{"foo": "bar"}) - - rnr, err := s.factory.NewHookRunner(hook.Info{ - Kind: hooks.RelationJoined, - RelationId: 1, - RemoteUnit: "r/0", - }) - c.Assert(err, jc.ErrorIsNil) - s.AssertPaths(c, rnr) - ctx := rnr.Context() - s.AssertCoreContext(c, ctx) - s.AssertNotActionContext(c, ctx) - s.AssertNotStorageContext(c, ctx) - rel := s.AssertRelationContext(c, ctx, 1, "r/0") - c.Assert(rel.UnitNames(), jc.DeepEquals, []string{"r/0"}) - cached0, member := s.getCache(1, "r/0") - c.Assert(cached0, gc.IsNil) - c.Assert(member, jc.IsTrue) -} - -func (s *FactorySuite) TestNewHookRunnerRelationChangedUpdatesRelationContextAndCaches(c *gc.C) { - // Update member settings to have actual values, so we can check that - // the change for r/4 clears its cache but leaves r/0's alone. - s.setUpCacheMethods(c) - s.membership[1] = []string{"r/0", "r/4"} - s.updateCache(1, "r/0", params.Settings{"foo": "bar"}) - s.updateCache(1, "r/4", params.Settings{"baz": "qux"}) - - rnr, err := s.factory.NewHookRunner(hook.Info{ - Kind: hooks.RelationChanged, - RelationId: 1, - RemoteUnit: "r/4", - }) - c.Assert(err, jc.ErrorIsNil) - s.AssertPaths(c, rnr) - ctx := rnr.Context() - s.AssertCoreContext(c, ctx) - s.AssertNotActionContext(c, ctx) - s.AssertNotStorageContext(c, ctx) - rel := s.AssertRelationContext(c, ctx, 1, "r/4") - c.Assert(rel.UnitNames(), jc.DeepEquals, []string{"r/0", "r/4"}) - cached0, member := s.getCache(1, "r/0") - c.Assert(cached0, jc.DeepEquals, params.Settings{"foo": "bar"}) - c.Assert(member, jc.IsTrue) - cached4, member := s.getCache(1, "r/4") - c.Assert(cached4, gc.IsNil) - c.Assert(member, jc.IsTrue) -} - -func (s *FactorySuite) TestNewHookRunnerRelationDepartedUpdatesRelationContextAndCaches(c *gc.C) { - // Update member settings to have actual values, so we can check that - // the depart for r/0 leaves r/4's cache alone (while discarding r/0's). - s.setUpCacheMethods(c) - s.membership[1] = []string{"r/0", "r/4"} - s.updateCache(1, "r/0", params.Settings{"foo": "bar"}) - s.updateCache(1, "r/4", params.Settings{"baz": "qux"}) - - rnr, err := s.factory.NewHookRunner(hook.Info{ - Kind: hooks.RelationDeparted, - RelationId: 1, - RemoteUnit: "r/0", - }) - c.Assert(err, jc.ErrorIsNil) - s.AssertPaths(c, rnr) - ctx := rnr.Context() - s.AssertCoreContext(c, ctx) - s.AssertNotActionContext(c, ctx) - s.AssertNotStorageContext(c, ctx) - rel := s.AssertRelationContext(c, ctx, 1, "r/0") - c.Assert(rel.UnitNames(), jc.DeepEquals, []string{"r/4"}) - cached0, member := s.getCache(1, "r/0") - c.Assert(cached0, gc.IsNil) - c.Assert(member, jc.IsFalse) - cached4, member := s.getCache(1, "r/4") - c.Assert(cached4, jc.DeepEquals, params.Settings{"baz": "qux"}) - c.Assert(member, jc.IsTrue) -} - -func (s *FactorySuite) TestNewHookRunnerRelationBrokenRetainsCaches(c *gc.C) { - // Note that this is bizarre and unrealistic, because we would never usually - // run relation-broken on a non-empty relation. But verfying that the settings - // stick around allows us to verify that there's no special handling for that - // hook -- as there should not be, because the relation caches will be discarded - // for the *next* hook, which will be constructed with the current set of known - // relations and ignore everything else. - s.setUpCacheMethods(c) - s.membership[1] = []string{"r/0", "r/4"} - s.updateCache(1, "r/0", params.Settings{"foo": "bar"}) - s.updateCache(1, "r/4", params.Settings{"baz": "qux"}) - - rnr, err := s.factory.NewHookRunner(hook.Info{ - Kind: hooks.RelationBroken, - RelationId: 1, - }) - c.Assert(err, jc.ErrorIsNil) - s.AssertPaths(c, rnr) - ctx := rnr.Context() - rel := s.AssertRelationContext(c, ctx, 1, "") - c.Assert(rel.UnitNames(), jc.DeepEquals, []string{"r/0", "r/4"}) - cached0, member := s.getCache(1, "r/0") - c.Assert(cached0, jc.DeepEquals, params.Settings{"foo": "bar"}) - c.Assert(member, jc.IsTrue) - cached4, member := s.getCache(1, "r/4") - c.Assert(cached4, jc.DeepEquals, params.Settings{"baz": "qux"}) - c.Assert(member, jc.IsTrue) } func (s *FactorySuite) TestNewHookRunnerWithBadRelation(c *gc.C) { @@ -452,43 +205,6 @@ c.Assert(err, gc.ErrorMatches, `unknown relation id: 12345`) } -func (s *FactorySuite) TestNewHookRunnerMetricsDisabledHook(c *gc.C) { - s.SetCharm(c, "metered") - rnr, err := s.factory.NewHookRunner(hook.Info{Kind: hooks.Install}) - c.Assert(err, jc.ErrorIsNil) - s.AssertPaths(c, rnr) - ctx := rnr.Context() - err = ctx.AddMetric("key", "value", time.Now()) - c.Assert(err, gc.ErrorMatches, "metrics disabled") -} - -func (s *FactorySuite) TestNewHookRunnerMetricsDisabledUndeclared(c *gc.C) { - s.SetCharm(c, "mysql") - rnr, err := s.factory.NewHookRunner(hook.Info{Kind: hooks.CollectMetrics}) - c.Assert(err, jc.ErrorIsNil) - s.AssertPaths(c, rnr) - ctx := rnr.Context() - err = ctx.AddMetric("key", "value", time.Now()) - c.Assert(err, gc.ErrorMatches, "metrics disabled") -} - -func (s *FactorySuite) TestNewHookRunnerMetricsDeclarationError(c *gc.C) { - rnr, err := s.factory.NewHookRunner(hook.Info{Kind: hooks.CollectMetrics}) - c.Assert(errors.Cause(err), jc.Satisfies, os.IsNotExist) - c.Assert(rnr, gc.IsNil) -} - -func (s *FactorySuite) TestNewHookRunnerMetricsEnabled(c *gc.C) { - s.SetCharm(c, "metered") - - rnr, err := s.factory.NewHookRunner(hook.Info{Kind: hooks.CollectMetrics}) - c.Assert(err, jc.ErrorIsNil) - s.AssertPaths(c, rnr) - ctx := rnr.Context() - err = ctx.AddMetric("pings", "0.5", time.Now()) - c.Assert(err, jc.ErrorIsNil) -} - func (s *FactorySuite) TestNewActionRunnerGood(c *gc.C) { s.SetCharm(c, "dummy") action, err := s.State.EnqueueAction(s.unit.Tag(), "snapshot", map[string]interface{}{ @@ -501,7 +217,7 @@ ctx := rnr.Context() data, err := ctx.ActionData() c.Assert(err, jc.ErrorIsNil) - c.Assert(data, jc.DeepEquals, &runner.ActionData{ + c.Assert(data, jc.DeepEquals, &context.ActionData{ Name: "snapshot", Tag: action.ActionTag(), Params: map[string]interface{}{ @@ -509,7 +225,8 @@ }, ResultsMap: map[string]interface{}{}, }) - vars := ctx.HookVars(s.paths) + vars, err := ctx.HookVars(s.paths) + c.Assert(err, jc.ErrorIsNil) c.Assert(len(vars) > 0, jc.IsTrue, gc.Commentf("expected HookVars but found none")) combined := strings.Join(vars, "|") c.Assert(combined, gc.Matches, `(^|.*\|)JUJU_ACTION_NAME=snapshot(\|.*|$)`) === removed file 'src/github.com/juju/juju/worker/uniter/runner/flush_test.go' --- src/github.com/juju/juju/worker/uniter/runner/flush_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/flush_test.go 1970-01-01 00:00:00 +0000 @@ -1,274 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package runner_test - -import ( - "time" - - "github.com/juju/errors" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/network" - "github.com/juju/juju/worker/uniter/metrics" - "github.com/juju/juju/worker/uniter/runner" -) - -type FlushContextSuite struct { - HookContextSuite - stub testing.Stub -} - -var _ = gc.Suite(&FlushContextSuite{}) - -func (s *FlushContextSuite) SetUpTest(c *gc.C) { - s.HookContextSuite.SetUpTest(c) - s.stub.ResetCalls() -} - -func (s *FlushContextSuite) TestRunHookRelationFlushingError(c *gc.C) { - ctx := s.context(c) - - // Mess with multiple relation settings. - relCtx0, ok := ctx.Relation(0) - c.Assert(ok, jc.IsTrue) - node0, err := relCtx0.Settings() - c.Assert(err, jc.ErrorIsNil) - node0.Set("foo", "1") - relCtx1, ok := ctx.Relation(1) - c.Assert(ok, jc.IsTrue) - node1, err := relCtx1.Settings() - c.Assert(err, jc.ErrorIsNil) - node1.Set("bar", "2") - - // Flush the context with a failure. - err = ctx.Flush("some badge", errors.New("blam pow")) - c.Assert(err, gc.ErrorMatches, "blam pow") - - // Check that the changes have not been written to state. - settings0, err := s.relunits[0].ReadSettings("u/0") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings0, gc.DeepEquals, map[string]interface{}{"relation-name": "db0"}) - settings1, err := s.relunits[1].ReadSettings("u/0") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings1, gc.DeepEquals, map[string]interface{}{"relation-name": "db1"}) -} - -func (s *FlushContextSuite) TestRunHookRelationFlushingSuccess(c *gc.C) { - ctx := s.context(c) - - // Mess with multiple relation settings. - relCtx0, ok := ctx.Relation(0) - c.Assert(ok, jc.IsTrue) - node0, err := relCtx0.Settings() - c.Assert(err, jc.ErrorIsNil) - node0.Set("baz", "3") - relCtx1, ok := ctx.Relation(1) - c.Assert(ok, jc.IsTrue) - node1, err := relCtx1.Settings() - c.Assert(err, jc.ErrorIsNil) - node1.Set("qux", "4") - - // Flush the context with a success. - err = ctx.Flush("some badge", nil) - c.Assert(err, jc.ErrorIsNil) - - // Check that the changes have been written to state. - settings0, err := s.relunits[0].ReadSettings("u/0") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings0, gc.DeepEquals, map[string]interface{}{ - "relation-name": "db0", - "baz": "3", - }) - settings1, err := s.relunits[1].ReadSettings("u/0") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings1, gc.DeepEquals, map[string]interface{}{ - "relation-name": "db1", - "qux": "4", - }) -} - -func (s *FlushContextSuite) TestRunHookOpensAndClosesPendingPorts(c *gc.C) { - // Initially, no port ranges are open on the unit or its machine. - unitRanges, err := s.unit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(unitRanges, gc.HasLen, 0) - machinePorts, err := s.machine.AllPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(machinePorts, gc.HasLen, 0) - - // Add another unit on the same machine. - otherUnit, err := s.service.AddUnit() - c.Assert(err, jc.ErrorIsNil) - err = otherUnit.AssignToMachine(s.machine) - c.Assert(err, jc.ErrorIsNil) - - // Open some ports on both units. - err = s.unit.OpenPorts("tcp", 100, 200) - c.Assert(err, jc.ErrorIsNil) - err = otherUnit.OpenPorts("udp", 200, 300) - c.Assert(err, jc.ErrorIsNil) - - unitRanges, err = s.unit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(unitRanges, jc.DeepEquals, []network.PortRange{ - {100, 200, "tcp"}, - }) - - ctx := s.context(c) - - // Try opening some ports via the context. - err = ctx.OpenPorts("tcp", 100, 200) - c.Assert(err, jc.ErrorIsNil) // duplicates are ignored - err = ctx.OpenPorts("udp", 200, 300) - c.Assert(err, gc.ErrorMatches, `cannot open 200-300/udp \(unit "u/0"\): conflicts with existing 200-300/udp \(unit "u/1"\)`) - err = ctx.OpenPorts("udp", 100, 200) - c.Assert(err, gc.ErrorMatches, `cannot open 100-200/udp \(unit "u/0"\): conflicts with existing 200-300/udp \(unit "u/1"\)`) - err = ctx.OpenPorts("udp", 10, 20) - c.Assert(err, jc.ErrorIsNil) - err = ctx.OpenPorts("tcp", 50, 100) - c.Assert(err, gc.ErrorMatches, `cannot open 50-100/tcp \(unit "u/0"\): conflicts with existing 100-200/tcp \(unit "u/0"\)`) - err = ctx.OpenPorts("tcp", 50, 80) - c.Assert(err, jc.ErrorIsNil) - err = ctx.OpenPorts("tcp", 40, 90) - c.Assert(err, gc.ErrorMatches, `cannot open 40-90/tcp \(unit "u/0"\): conflicts with 50-80/tcp requested earlier`) - - // Now try closing some ports as well. - err = ctx.ClosePorts("udp", 8080, 8088) - c.Assert(err, jc.ErrorIsNil) // not existing -> ignored - err = ctx.ClosePorts("tcp", 100, 200) - c.Assert(err, jc.ErrorIsNil) - err = ctx.ClosePorts("tcp", 100, 200) - c.Assert(err, jc.ErrorIsNil) // duplicates are ignored - err = ctx.ClosePorts("udp", 200, 300) - c.Assert(err, gc.ErrorMatches, `cannot close 200-300/udp \(opened by "u/1"\) from "u/0"`) - err = ctx.ClosePorts("tcp", 50, 80) - c.Assert(err, jc.ErrorIsNil) // still pending -> no longer pending - - // Ensure the ports are not actually changed on the unit yet. - unitRanges, err = s.unit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(unitRanges, jc.DeepEquals, []network.PortRange{ - {100, 200, "tcp"}, - }) - - // Flush the context with a success. - err = ctx.Flush("some badge", nil) - c.Assert(err, jc.ErrorIsNil) - - // Verify the unit ranges are now open. - expectUnitRanges := []network.PortRange{ - {FromPort: 10, ToPort: 20, Protocol: "udp"}, - } - unitRanges, err = s.unit.OpenedPorts() - c.Assert(err, jc.ErrorIsNil) - c.Assert(unitRanges, jc.DeepEquals, expectUnitRanges) -} - -func (s *FlushContextSuite) TestRunHookAddStorageOnFailure(c *gc.C) { - ctx := s.context(c) - c.Assert(ctx.UnitName(), gc.Equals, "u/0") - - size := uint64(1) - ctx.AddUnitStorage( - map[string]params.StorageConstraints{ - "allecto": params.StorageConstraints{Size: &size}, - }) - - // Flush the context with an error. - msg := "test fail run hook" - err := ctx.Flush("test fail run hook", errors.New(msg)) - c.Assert(errors.Cause(err), gc.ErrorMatches, msg) - - all, err := s.State.AllStorageInstances() - c.Assert(err, jc.ErrorIsNil) - c.Assert(all, gc.HasLen, 0) -} - -func (s *FlushContextSuite) TestRunHookAddUnitStorageOnSuccess(c *gc.C) { - ctx := s.context(c) - c.Assert(ctx.UnitName(), gc.Equals, "u/0") - - size := uint64(1) - ctx.AddUnitStorage( - map[string]params.StorageConstraints{ - "allecto": params.StorageConstraints{Size: &size}, - }) - - // Flush the context with a success. - err := ctx.Flush("success", nil) - c.Assert(errors.Cause(err), gc.ErrorMatches, `.*storage "allecto" not found.*`) - - all, err := s.State.AllStorageInstances() - c.Assert(err, jc.ErrorIsNil) - c.Assert(all, gc.HasLen, 0) -} - -func (s *FlushContextSuite) TestFlushClosesMetricsRecorder(c *gc.C) { - uuid := utils.MustNewUUID() - ctx := s.getMeteredHookContext(c, uuid.String(), -1, "", noProxies, true, s.metricsDefinition("key"), NewRealPaths(c)) - - runner.PatchMetricsRecorder(ctx, &StubMetricsRecorder{&s.stub}) - - err := ctx.AddMetric("key", "value", time.Now()) - - // Flush the context with a success. - err = ctx.Flush("success", nil) - c.Assert(err, jc.ErrorIsNil) - - s.stub.CheckCallNames(c, "IsDeclaredMetric", "AddMetric", "Close") -} - -func (s *HookContextSuite) context(c *gc.C) *runner.HookContext { - uuid, err := utils.NewUUID() - c.Assert(err, jc.ErrorIsNil) - return s.getHookContext(c, uuid.String(), -1, "", noProxies) -} - -func (s *FlushContextSuite) TestBuiltinMetric(c *gc.C) { - uuid := utils.MustNewUUID() - paths := NewRealPaths(c) - ctx := s.getMeteredHookContext(c, uuid.String(), -1, "", noProxies, true, s.metricsDefinition("juju-units"), paths) - reader, err := metrics.NewJSONMetricReader( - paths.GetMetricsSpoolDir(), - ) - - err = ctx.Flush("some badge", nil) - c.Assert(err, jc.ErrorIsNil) - batches, err := reader.Read() - c.Assert(err, jc.ErrorIsNil) - c.Assert(batches, gc.HasLen, 1) - c.Assert(batches[0].Metrics, gc.HasLen, 1) - c.Assert(batches[0].Metrics[0].Key, gc.Equals, "juju-units") - c.Assert(batches[0].Metrics[0].Value, gc.Equals, "1") -} - -func (s *FlushContextSuite) TestBuiltinMetricNotGeneratedIfNotDefined(c *gc.C) { - uuid := utils.MustNewUUID() - paths := NewRealPaths(c) - ctx := s.getMeteredHookContext(c, uuid.String(), -1, "", noProxies, true, s.metricsDefinition("pings"), paths) - reader, err := metrics.NewJSONMetricReader( - paths.GetMetricsSpoolDir(), - ) - - err = ctx.Flush("some badge", nil) - c.Assert(err, jc.ErrorIsNil) - batches, err := reader.Read() - c.Assert(err, jc.ErrorIsNil) - c.Assert(batches, gc.HasLen, 0) -} - -func (s *FlushContextSuite) TestRecorderIsClosedAfterBuiltIn(c *gc.C) { - uuid := utils.MustNewUUID() - paths := NewRealPaths(c) - ctx := s.getMeteredHookContext(c, uuid.String(), -1, "", noProxies, true, s.metricsDefinition("juju-units"), paths) - runner.PatchMetricsRecorder(ctx, &StubMetricsRecorder{&s.stub}) - - err := ctx.Flush("some badge", nil) - c.Assert(err, jc.ErrorIsNil) - s.stub.CheckCallNames(c, "IsDeclaredMetric", "AddMetric", "Close") -} === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/add-metric.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/add-metric.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/add-metric.go 2016-03-22 15:18:22 +0000 @@ -7,11 +7,10 @@ "fmt" "time" - "gopkg.in/juju/charm.v5" - "github.com/juju/cmd" "github.com/juju/errors" "github.com/juju/utils/keyvalues" + "gopkg.in/juju/charm.v6-unstable" ) // Metric represents a single metric set by the charm. === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/config-get_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/config-get_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/config-get_test.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,7 @@ "github.com/juju/cmd" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - goyaml "gopkg.in/yaml.v1" + goyaml "gopkg.in/yaml.v2" "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/runner/jujuc" === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/context.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/context.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/context.go 2016-03-22 15:18:22 +0000 @@ -11,7 +11,7 @@ "github.com/juju/errors" "github.com/juju/names" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/network" @@ -67,12 +67,13 @@ type relationHookContext interface { // HookRelation returns the ContextRelation associated with the executing - // hook if it was found, and whether it was found. - HookRelation() (ContextRelation, bool) + // hook if it was found, or an error if it was not found (or is not available). + HookRelation() (ContextRelation, error) // RemoteUnitName returns the name of the remote unit the hook execution - // is associated with if it was found, and whether it was found. - RemoteUnitName() (string, bool) + // is associated with if it was found, and an error if it was not found or is not + // available. + RemoteUnitName() (string, error) } // ActionHookContext is the context for an action hook. @@ -86,7 +87,7 @@ ActionParams() (map[string]interface{}, error) // UpdateActionResults inserts new values for use with action-set. - // The results struct will be delivered to the state server upon + // The results struct will be delivered to the controller upon // completion of the Action. UpdateActionResults(keys []string, value string) error @@ -124,8 +125,9 @@ // ContextInstance is the part of a hook context related to the unit's instance. type ContextInstance interface { - // AvailabilityZone returns the executing unit's availability zone. - AvailabilityZone() (string, bool) + // AvailabilityZone returns the executing unit's availability zone or an error + // if it was not found (or is not available). + AvailabilityZone() (string, error) // RequestReboot will set the reboot flag to true on the machine agent RequestReboot(prio RebootPriority) error @@ -134,11 +136,13 @@ // ContextNetworking is the part of a hook context related to network // interface of the unit's instance. type ContextNetworking interface { - // PublicAddress returns the executing unit's public address. - PublicAddress() (string, bool) + // PublicAddress returns the executing unit's public address or an + // error if it is not available. + PublicAddress() (string, error) - // PrivateAddress returns the executing unit's private address. - PrivateAddress() (string, bool) + // PrivateAddress returns the executing unit's private address or an + // error if it is not available. + PrivateAddress() (string, error) // OpenPorts marks the supplied port range for opening when the // executing unit's service is exposed. @@ -182,19 +186,21 @@ // resources associated with the unit. type ContextStorage interface { // StorageTags returns a list of tags for storage instances - // attached to the unit. - StorageTags() []names.StorageTag + // attached to the unit or an error if they are not available. + StorageTags() ([]names.StorageTag, error) // Storage returns the ContextStorageAttachment with the supplied - // tag if it was found, and whether it was found. - Storage(names.StorageTag) (ContextStorageAttachment, bool) + // tag if it was found, and an error if it was not found or is not + // available to the context. + Storage(names.StorageTag) (ContextStorageAttachment, error) // HookStorage returns the storage attachment associated - // the executing hook if it was found, and whether it was found. - HookStorage() (ContextStorageAttachment, bool) + // the executing hook if it was found, and an error if it + // was not found or is not available. + HookStorage() (ContextStorageAttachment, error) // AddUnitStorage saves storage constraints in the context. - AddUnitStorage(map[string]params.StorageConstraints) + AddUnitStorage(map[string]params.StorageConstraints) error } // ContextComponents exposes modular Juju components as they relate to @@ -208,12 +214,12 @@ // ContextRelations exposes the relations associated with the unit. type ContextRelations interface { // Relation returns the relation with the supplied id if it was found, and - // whether it was found. - Relation(id int) (ContextRelation, bool) + // an error if it was not found or is not available. + Relation(id int) (ContextRelation, error) // RelationIds returns the ids of all relations the executing unit is - // currently participating in. - RelationIds() []int + // currently participating in or an error if they are not available. + RelationIds() ([]int, error) } // ContextComponent is a single modular Juju component as it relates to @@ -254,6 +260,14 @@ // ReadSettings returns the settings of any remote unit in the relation. ReadSettings(unit string) (params.Settings, error) + + // NetworkConfig returns the network configuration for the relation. + // + // TODO(dimitern): Currently, only the Address is populated, add the + // rest later. + // + // LKK Card: https://canonical.leankit.com/Boards/View/101652562/119258804 + NetworkConfig() ([]params.NetworkConfig, error) } // ContextStorageAttachment expresses the capabilities of a hook with @@ -281,15 +295,17 @@ // newRelationIdValue returns a gnuflag.Value for convenient parsing of relation // ids in ctx. -func newRelationIdValue(ctx Context, result *int) *relationIdValue { +func newRelationIdValue(ctx Context, result *int) (*relationIdValue, error) { v := &relationIdValue{result: result, ctx: ctx} id := -1 - if r, found := ctx.HookRelation(); found { + if r, err := ctx.HookRelation(); err == nil { id = r.Id() v.value = r.FakeId() + } else if !errors.IsNotFound(err) { + return nil, errors.Trace(err) } *result = id - return v + return v, nil } // relationIdValue implements gnuflag.Value for use in relation commands. @@ -316,8 +332,8 @@ if err != nil { return fmt.Errorf("invalid relation id") } - if _, found := v.ctx.Relation(id); !found { - return fmt.Errorf("unknown relation id") + if _, err := v.ctx.Relation(id); err != nil { + return errors.Trace(err) } *v.result = id v.value = value @@ -326,12 +342,14 @@ // newStorageIdValue returns a gnuflag.Value for convenient parsing of storage // ids in ctx. -func newStorageIdValue(ctx Context, result *names.StorageTag) *storageIdValue { +func newStorageIdValue(ctx Context, result *names.StorageTag) (*storageIdValue, error) { v := &storageIdValue{result: result, ctx: ctx} - if s, found := ctx.HookStorage(); found { + if s, err := ctx.HookStorage(); err == nil { *v.result = s.Tag() + } else if !errors.IsNotFound(err) { + return nil, errors.Trace(err) } - return v + return v, nil } // storageIdValue implements gnuflag.Value for use in storage commands. @@ -356,8 +374,8 @@ return errors.Errorf("invalid storage ID %q", value) } tag := names.NewStorageTag(value) - if _, found := v.ctx.Storage(tag); !found { - return fmt.Errorf("unknown storage ID") + if _, err := v.ctx.Storage(tag); err != nil { + return errors.Trace(err) } *v.result = tag return nil === added file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/errors.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/errors.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/errors.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,26 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package jujuc + +import ( + "github.com/juju/errors" +) + +type notAvailable struct { + errors.Err +} + +// NotAvailable returns an error which satisfies IsNotAvailable. +func NotAvailable(thing string) error { + return ¬Available{ + errors.NewErr(thing + " is not available"), + } +} + +// IsNotAvailable reports whether err was creates with NotAvailable(). +func IsNotAvailable(err error) bool { + err = errors.Cause(err) + _, ok := err.(*notAvailable) + return ok +} === added file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/errors_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/errors_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/errors_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,25 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package jujuc_test + +import ( + gc "gopkg.in/check.v1" + + jc "github.com/juju/testing/checkers" + + "github.com/juju/juju/testing" + "github.com/juju/juju/worker/uniter/runner/jujuc" +) + +type ErrorsSuite struct { + testing.BaseSuite +} + +var _ = gc.Suite(&ErrorsSuite{}) + +func (t *ErrorsSuite) TestNotAvailableErr(c *gc.C) { + err := jujuc.NotAvailable("the thing") + c.Assert(err, gc.ErrorMatches, "the thing is not available") + c.Assert(jujuc.IsNotAvailable(err), jc.IsTrue) +} === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/is-leader_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/is-leader_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/is-leader_test.go 2016-03-22 15:18:22 +0000 @@ -21,19 +21,22 @@ var _ = gc.Suite(&isLeaderSuite{}) func (s *isLeaderSuite) TestInitError(c *gc.C) { - command, _ := jujuc.NewIsLeaderCommand(nil) - err := command.Init([]string{"blah"}) + command, err := jujuc.NewIsLeaderCommand(nil) + c.Assert(err, jc.ErrorIsNil) + err = command.Init([]string{"blah"}) c.Assert(err, gc.ErrorMatches, `unrecognized args: \["blah"\]`) } func (s *isLeaderSuite) TestInitSuccess(c *gc.C) { - command, _ := jujuc.NewIsLeaderCommand(nil) - err := command.Init(nil) + command, err := jujuc.NewIsLeaderCommand(nil) + c.Assert(err, jc.ErrorIsNil) + err = command.Init(nil) c.Assert(err, jc.ErrorIsNil) } func (s *isLeaderSuite) TestFormatError(c *gc.C) { - command, _ := jujuc.NewIsLeaderCommand(nil) + command, err := jujuc.NewIsLeaderCommand(nil) + c.Assert(err, jc.ErrorIsNil) runContext := testing.Context(c) code := cmd.Main(command, runContext, []string{"--format", "bad"}) c.Check(code, gc.Equals, 2) @@ -43,7 +46,8 @@ func (s *isLeaderSuite) TestIsLeaderError(c *gc.C) { jujucContext := &isLeaderContext{err: errors.New("pow")} - command, _ := jujuc.NewIsLeaderCommand(jujucContext) + command, err := jujuc.NewIsLeaderCommand(jujucContext) + c.Assert(err, jc.ErrorIsNil) runContext := testing.Context(c) code := cmd.Main(command, runContext, nil) c.Check(code, gc.Equals, 1) @@ -86,7 +90,8 @@ func (s *isLeaderSuite) testOutput(c *gc.C, leader bool, args []string, expect string) { jujucContext := &isLeaderContext{leader: leader} - command, _ := jujuc.NewIsLeaderCommand(jujucContext) + command, err := jujuc.NewIsLeaderCommand(jujucContext) + c.Assert(err, jc.ErrorIsNil) runContext := testing.Context(c) code := cmd.Main(command, runContext, args) c.Check(code, gc.Equals, 0) @@ -97,7 +102,8 @@ func (s *isLeaderSuite) testParseOutput(c *gc.C, leader bool, args []string, checker gc.Checker) { jujucContext := &isLeaderContext{leader: leader} - command, _ := jujuc.NewIsLeaderCommand(jujucContext) + command, err := jujuc.NewIsLeaderCommand(jujucContext) + c.Assert(err, jc.ErrorIsNil) runContext := testing.Context(c) code := cmd.Main(command, runContext, args) c.Check(code, gc.Equals, 0) === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/juju-log.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/juju-log.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/juju-log.go 2016-03-22 15:18:22 +0000 @@ -4,11 +4,11 @@ package jujuc import ( - "errors" "fmt" "strings" "github.com/juju/cmd" + "github.com/juju/errors" "github.com/juju/loggo" "launchpad.net/gnuflag" ) @@ -69,8 +69,10 @@ } prefix := "" - if r, found := c.ctx.HookRelation(); found { + if r, err := c.ctx.HookRelation(); err == nil { prefix = r.FakeId() + ": " + } else if !errors.IsNotFound(err) { + return errors.Trace(err) } logger.Logf(logLevel, "%s%s", prefix, c.Message) === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-get_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-get_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-get_test.go 2016-03-22 15:18:22 +0000 @@ -16,38 +16,40 @@ type leaderGetSuite struct { jujutesting.IsolationSuite + command cmd.Command } var _ = gc.Suite(&leaderGetSuite{}) +func (s *leaderGetSuite) SetUpTest(c *gc.C) { + var err error + s.command, err = jujuc.NewLeaderGetCommand(nil) + c.Assert(err, jc.ErrorIsNil) +} + func (s *leaderGetSuite) TestInitError(c *gc.C) { - command, _ := jujuc.NewLeaderGetCommand(nil) - err := command.Init([]string{"x=x"}) + err := s.command.Init([]string{"x=x"}) c.Assert(err, gc.ErrorMatches, `invalid key "x=x"`) } func (s *leaderGetSuite) TestInitKey(c *gc.C) { - command, _ := jujuc.NewLeaderGetCommand(nil) - err := command.Init([]string{"some-key"}) + err := s.command.Init([]string{"some-key"}) c.Assert(err, jc.ErrorIsNil) } func (s *leaderGetSuite) TestInitAll(c *gc.C) { - command, _ := jujuc.NewLeaderGetCommand(nil) - err := command.Init([]string{"-"}) + err := s.command.Init([]string{"-"}) c.Assert(err, jc.ErrorIsNil) } func (s *leaderGetSuite) TestInitEmpty(c *gc.C) { - command, _ := jujuc.NewLeaderGetCommand(nil) - err := command.Init(nil) + err := s.command.Init(nil) c.Assert(err, jc.ErrorIsNil) } func (s *leaderGetSuite) TestFormatError(c *gc.C) { - command, _ := jujuc.NewLeaderGetCommand(nil) runContext := testing.Context(c) - code := cmd.Main(command, runContext, []string{"--format", "bad"}) + code := cmd.Main(s.command, runContext, []string{"--format", "bad"}) c.Check(code, gc.Equals, 2) c.Check(bufferString(runContext.Stdout), gc.Equals, "") c.Check(bufferString(runContext.Stderr), gc.Equals, `error: invalid value "bad" for flag --format: unknown format "bad"`+"\n") @@ -55,7 +57,8 @@ func (s *leaderGetSuite) TestSettingsError(c *gc.C) { jujucContext := newLeaderGetContext(errors.New("zap")) - command, _ := jujuc.NewLeaderGetCommand(jujucContext) + command, err := jujuc.NewLeaderGetCommand(jujucContext) + c.Assert(err, jc.ErrorIsNil) runContext := testing.Context(c) code := cmd.Main(command, runContext, nil) c.Check(code, gc.Equals, 1) @@ -134,7 +137,8 @@ func (s *leaderGetSuite) testParseOutput(c *gc.C, args []string, checker gc.Checker, expect interface{}) { jujucContext := newLeaderGetContext(nil) - command, _ := jujuc.NewLeaderGetCommand(jujucContext) + command, err := jujuc.NewLeaderGetCommand(jujucContext) + c.Assert(err, jc.ErrorIsNil) runContext := testing.Context(c) code := cmd.Main(command, runContext, args) c.Check(code, gc.Equals, 0) === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-set.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-set.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-set.go 2016-03-22 15:18:22 +0000 @@ -24,7 +24,7 @@ // Info is part of the cmd.Command interface. func (c *leaderSetCommand) Info() *cmd.Info { doc := ` -leader-set immediate writes the supplied key/value pairs to the state server, +leader-set immediate writes the supplied key/value pairs to the controller, which will then inform non-leader units of the change. It will fail if called without arguments, or if called by a unit that is not currently service leader. ` === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-set_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-set_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/leader-set_test.go 2016-03-22 15:18:22 +0000 @@ -16,31 +16,36 @@ type leaderSetSuite struct { jujutesting.IsolationSuite + command cmd.Command } var _ = gc.Suite(&leaderSetSuite{}) +func (s *leaderSetSuite) SetUpTest(c *gc.C) { + var err error + s.command, err = jujuc.NewLeaderSetCommand(nil) + c.Assert(err, jc.ErrorIsNil) +} + func (s *leaderSetSuite) TestInitEmpty(c *gc.C) { - command, _ := jujuc.NewLeaderSetCommand(nil) - err := command.Init(nil) + err := s.command.Init(nil) c.Check(err, jc.ErrorIsNil) } func (s *leaderSetSuite) TestInitValues(c *gc.C) { - command, _ := jujuc.NewLeaderSetCommand(nil) - err := command.Init([]string{"foo=bar", "baz=qux"}) + err := s.command.Init([]string{"foo=bar", "baz=qux"}) c.Check(err, jc.ErrorIsNil) } func (s *leaderSetSuite) TestInitError(c *gc.C) { - command, _ := jujuc.NewLeaderSetCommand(nil) - err := command.Init([]string{"nonsense"}) + err := s.command.Init([]string{"nonsense"}) c.Check(err, gc.ErrorMatches, `expected "key=value", got "nonsense"`) } func (s *leaderSetSuite) TestWriteEmpty(c *gc.C) { jujucContext := &leaderSetContext{} - command, _ := jujuc.NewLeaderSetCommand(jujucContext) + command, err := jujuc.NewLeaderSetCommand(jujucContext) + c.Assert(err, jc.ErrorIsNil) runContext := testing.Context(c) code := cmd.Main(command, runContext, nil) c.Check(code, gc.Equals, 0) @@ -51,7 +56,8 @@ func (s *leaderSetSuite) TestWriteValues(c *gc.C) { jujucContext := &leaderSetContext{} - command, _ := jujuc.NewLeaderSetCommand(jujucContext) + command, err := jujuc.NewLeaderSetCommand(jujucContext) + c.Assert(err, jc.ErrorIsNil) runContext := testing.Context(c) code := cmd.Main(command, runContext, []string{"foo=bar", "baz=qux"}) c.Check(code, gc.Equals, 0) @@ -65,7 +71,8 @@ func (s *leaderSetSuite) TestWriteError(c *gc.C) { jujucContext := &leaderSetContext{err: errors.New("splat")} - command, _ := jujuc.NewLeaderSetCommand(jujucContext) + command, err := jujuc.NewLeaderSetCommand(jujucContext) + c.Assert(err, jc.ErrorIsNil) runContext := testing.Context(c) code := cmd.Main(command, runContext, []string{"foo=bar"}) c.Check(code, gc.Equals, 1) === added file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/network-get.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/network-get.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/network-get.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,93 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package jujuc + +import ( + "fmt" + + "github.com/juju/cmd" + "github.com/juju/errors" + "launchpad.net/gnuflag" +) + +// NetworkGetCommand implements the network-get command. +type NetworkGetCommand struct { + cmd.CommandBase + ctx Context + + RelationId int + relationIdProxy gnuflag.Value + primaryAddress bool + + out cmd.Output +} + +func NewNetworkGetCommand(ctx Context) (cmd.Command, error) { + var err error + cmd := &NetworkGetCommand{ctx: ctx} + cmd.relationIdProxy, err = newRelationIdValue(ctx, &cmd.RelationId) + if err != nil { + return nil, errors.Trace(err) + } + + return cmd, nil +} + +// Info is part of the cmd.Command interface. +func (c *NetworkGetCommand) Info() *cmd.Info { + args := "--primary-address" + doc := ` +network-get returns the network config for a relation. The only supported +flag for now is --primary-address, which is required and returns the IP +address the local unit should advertise as its endpoint to its peers. +` + return &cmd.Info{ + Name: "network-get", + Args: args, + Purpose: "get network config", + Doc: doc, + } +} + +// SetFlags is part of the cmd.Command interface. +func (c *NetworkGetCommand) SetFlags(f *gnuflag.FlagSet) { + c.out.AddFlags(f, "smart", cmd.DefaultFormatters) + f.Var(c.relationIdProxy, "r", "specify a relation by id") + f.Var(c.relationIdProxy, "relation", "") + f.BoolVar(&c.primaryAddress, "primary-address", false, "get the primary address for the relation") +} + +// Init is part of the cmd.Command interface. +func (c *NetworkGetCommand) Init(args []string) error { + + if c.RelationId == -1 { + return fmt.Errorf("no relation id specified") + } + + if !c.primaryAddress { + return fmt.Errorf("--primary-address is currently required") + } + + return cmd.CheckEmpty(args) +} + +func (c *NetworkGetCommand) Run(ctx *cmd.Context) error { + r, err := c.ctx.Relation(c.RelationId) + if err != nil { + return errors.Trace(err) + } + + netconfig, err := r.NetworkConfig() + if err != nil { + return err + } + if len(netconfig) < 1 { + return fmt.Errorf("no network config available") + } + + if c.primaryAddress { + return c.out.Write(ctx, netconfig[0].Address) + } + return c.out.Write(ctx, nil) +} === added file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/network-get_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/network-get_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/network-get_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,147 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package jujuc_test + +import ( + "fmt" + + "github.com/juju/cmd" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/testing" + "github.com/juju/juju/worker/uniter/runner/jujuc" + jujuctesting "github.com/juju/juju/worker/uniter/runner/jujuc/testing" +) + +type NetworkGetSuite struct { + relationSuite +} + +var _ = gc.Suite(&NetworkGetSuite{}) + +func (s *NetworkGetSuite) newHookContext(relid int) (jujuc.Context, *relationInfo) { + netConfig := []params.NetworkConfig{ + {Address: "8.8.8.8"}, + {Address: "10.0.0.1"}, + } + + hctx, info := s.relationSuite.newHookContext(relid, "remote") + info.rels[0].Units["u/0"]["private-address"] = "foo: bar\n" + info.rels[1].SetRelated("m/0", jujuctesting.Settings{"pew": "pew\npew\n"}, netConfig) + info.rels[1].SetRelated("u/1", jujuctesting.Settings{"value": "12345"}, netConfig) + return hctx, info +} + +func (s *NetworkGetSuite) TestNetworkGet(c *gc.C) { + for i, t := range []struct { + summary string + relid int + args []string + code int + out string + checkctx func(*gc.C, *cmd.Context) + }{{ + summary: "no default relation", + relid: -1, + code: 2, + out: `no relation id specified`, + }, { + summary: "explicit relation, not known", + relid: -1, + code: 2, + args: []string{"-r", "burble:123"}, + out: `invalid value "burble:123" for flag -r: relation not found`, + }, { + summary: "default relation, no --primary-address given", + relid: 1, + code: 2, + out: `--primary-address is currently required`, + }, { + summary: "explicit relation, no --primary-address given", + relid: -1, + code: 2, + args: []string{"-r", "burble:1"}, + out: `--primary-address is currently required`, + }, { + summary: "explicit relation with --primary-address", + relid: 1, + args: []string{"-r", "burble:1", "--primary-address"}, + out: "8.8.8.8", + }, { + summary: "default relation with --primary-address", + relid: 1, + args: []string{"--primary-address"}, + out: "8.8.8.8", + }} { + c.Logf("test %d: %s", i, t.summary) + hctx, _ := s.newHookContext(t.relid) + com, err := jujuc.NewCommand(hctx, cmdString("network-get")) + c.Assert(err, jc.ErrorIsNil) + ctx := testing.Context(c) + code := cmd.Main(com, ctx, t.args) + c.Check(code, gc.Equals, t.code) + if code == 0 { + c.Check(bufferString(ctx.Stderr), gc.Equals, "") + expect := t.out + if expect != "" { + expect = expect + "\n" + } + c.Check(bufferString(ctx.Stdout), gc.Equals, expect) + } else { + c.Check(bufferString(ctx.Stdout), gc.Equals, "") + expect := fmt.Sprintf(`(.|\n)*error: %s\n`, t.out) + c.Check(bufferString(ctx.Stderr), gc.Matches, expect) + } + } +} + +func (s *NetworkGetSuite) TestHelp(c *gc.C) { + + var helpTemplate = ` +usage: network-get [options] --primary-address +purpose: get network config + +options: +--format (= smart) + specify output format (json|smart|yaml) +-o, --output (= "") + specify an output file +--primary-address (= false) + get the primary address for the relation +-r, --relation (= %s) + specify a relation by id + +network-get returns the network config for a relation. The only supported +flag for now is --primary-address, which is required and returns the IP +address the local unit should advertise as its endpoint to its peers. +`[1:] + + for i, t := range []struct { + summary string + relid int + usage string + rel string + }{{ + summary: "no default relation", + relid: -1, + }, { + summary: "default relation", + relid: 1, + rel: "peer1:1", + }} { + c.Logf("test %d", i) + hctx, _ := s.newHookContext(t.relid) + com, err := jujuc.NewCommand(hctx, cmdString("network-get")) + c.Check(err, jc.ErrorIsNil) + ctx := testing.Context(c) + code := cmd.Main(com, ctx, []string{"--help"}) + c.Check(code, gc.Equals, 0) + + expect := fmt.Sprintf(helpTemplate, t.rel) + c.Check(bufferString(ctx.Stdout), gc.Equals, expect) + c.Check(bufferString(ctx.Stderr), gc.Equals, "") + } +} === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/reboot_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/reboot_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/reboot_test.go 2016-03-22 15:18:22 +0000 @@ -21,12 +21,14 @@ var _ = gc.Suite(&JujuRebootSuite{}) func (s *JujuRebootSuite) TestNewJujuRebootCommand(c *gc.C) { - cmd, _ := jujuc.NewJujuRebootCommand(nil) + cmd, err := jujuc.NewJujuRebootCommand(nil) + c.Assert(err, jc.ErrorIsNil) c.Assert(cmd, gc.DeepEquals, &jujuc.JujuRebootCommand{}) } func (s *JujuRebootSuite) TestInfo(c *gc.C) { - rebootCmd, _ := jujuc.NewJujuRebootCommand(nil) + rebootCmd, err := jujuc.NewJujuRebootCommand(nil) + c.Assert(err, jc.ErrorIsNil) cmdInfo := rebootCmd.Info() c.Assert(cmdInfo.Name, gc.Equals, "juju-reboot") === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-get.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-get.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-get.go 2016-03-22 15:18:22 +0000 @@ -7,6 +7,7 @@ "fmt" "github.com/juju/cmd" + "github.com/juju/errors" "launchpad.net/gnuflag" "github.com/juju/juju/apiserver/params" @@ -15,26 +16,40 @@ // RelationGetCommand implements the relation-get command. type RelationGetCommand struct { cmd.CommandBase - ctx Context - RelationId int - Key string - UnitName string - out cmd.Output + ctx Context + + RelationId int + relationIdProxy gnuflag.Value + + Key string + UnitName string + out cmd.Output } func NewRelationGetCommand(ctx Context) (cmd.Command, error) { - return &RelationGetCommand{ctx: ctx}, nil + var err error + cmd := &RelationGetCommand{ctx: ctx} + cmd.relationIdProxy, err = newRelationIdValue(ctx, &cmd.RelationId) + if err != nil { + return nil, errors.Trace(err) + } + + return cmd, nil } +// Info is part of the cmd.Command interface. func (c *RelationGetCommand) Info() *cmd.Info { args := " " doc := ` relation-get prints the value of a unit's relation setting, specified by key. If no key is given, or if the key is "-", all keys and values will be printed. ` - if name, found := c.ctx.RemoteUnitName(); found { + // There's nothing we can really do about the error here. + if name, err := c.ctx.RemoteUnitName(); err == nil { args = "[ []]" doc += fmt.Sprintf("Current default unit id is %q.", name) + } else if !errors.IsNotFound(err) { + logger.Errorf("Failed to retrieve remote unit name: %v", err) } return &cmd.Info{ Name: "relation-get", @@ -44,14 +59,14 @@ } } +// SetFlags is part of the cmd.Command interface. func (c *RelationGetCommand) SetFlags(f *gnuflag.FlagSet) { - rV := newRelationIdValue(c.ctx, &c.RelationId) - c.out.AddFlags(f, "smart", cmd.DefaultFormatters) - f.Var(rV, "r", "specify a relation by id") - f.Var(rV, "relation", "") + f.Var(c.relationIdProxy, "r", "specify a relation by id") + f.Var(c.relationIdProxy, "relation", "") } +// Init is part of the cmd.Command interface. func (c *RelationGetCommand) Init(args []string) error { if c.RelationId == -1 { return fmt.Errorf("no relation id specified") @@ -63,8 +78,11 @@ } args = args[1:] } - if name, found := c.ctx.RemoteUnitName(); found { + name, err := c.ctx.RemoteUnitName() + if err == nil { c.UnitName = name + } else if cause := errors.Cause(err); !errors.IsNotFound(cause) { + return errors.Trace(err) } if len(args) > 0 { c.UnitName = args[0] @@ -77,9 +95,9 @@ } func (c *RelationGetCommand) Run(ctx *cmd.Context) error { - r, found := c.ctx.Relation(c.RelationId) - if !found { - return fmt.Errorf("unknown relation id") + r, err := c.ctx.Relation(c.RelationId) + if err != nil { + return errors.Trace(err) } var settings params.Settings if c.UnitName == c.ctx.UnitName() { === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-get_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-get_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-get_test.go 2016-03-22 15:18:22 +0000 @@ -27,8 +27,8 @@ func (s *RelationGetSuite) newHookContext(relid int, remote string) (jujuc.Context, *relationInfo) { hctx, info := s.relationSuite.newHookContext(relid, remote) info.rels[0].Units["u/0"]["private-address"] = "foo: bar\n" - info.rels[1].SetRelated("m/0", jujuctesting.Settings{"pew": "pew\npew\n"}) - info.rels[1].SetRelated("u/1", jujuctesting.Settings{"value": "12345"}) + info.rels[1].SetRelated("m/0", jujuctesting.Settings{"pew": "pew\npew\n"}, nil) + info.rels[1].SetRelated("u/1", jujuctesting.Settings{"value": "12345"}, nil) return hctx, info } @@ -51,7 +51,7 @@ relid: -1, code: 2, args: []string{"-r", "burble:123"}, - out: `invalid value "burble:123" for flag -r: unknown relation id`, + out: `invalid value "burble:123" for flag -r: relation not found`, }, { summary: "default relation, no unit chosen", relid: 1, === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-ids.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-ids.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-ids.go 2016-03-22 15:18:22 +0000 @@ -8,6 +8,7 @@ "sort" "github.com/juju/cmd" + "github.com/juju/errors" "launchpad.net/gnuflag" ) @@ -20,15 +21,25 @@ } func NewRelationIdsCommand(ctx Context) (cmd.Command, error) { - return &RelationIdsCommand{ctx: ctx}, nil + name := "" + if r, err := ctx.HookRelation(); err == nil { + name = r.Name() + } else if cause := errors.Cause(err); !errors.IsNotFound(cause) { + return nil, errors.Trace(err) + } + + return &RelationIdsCommand{ctx: ctx, Name: name}, nil } func (c *RelationIdsCommand) Info() *cmd.Info { args := "" doc := "" - if r, found := c.ctx.HookRelation(); found { + if r, err := c.ctx.HookRelation(); err == nil { + // There's not much we can do about this error here. args = "[]" doc = fmt.Sprintf("Current default relation name is %q.", r.Name()) + } else if !errors.IsNotFound(err) { + logger.Errorf("Could not retrieve hook relation: %v", err) } return &cmd.Info{ Name: "relation-ids", @@ -43,9 +54,6 @@ } func (c *RelationIdsCommand) Init(args []string) error { - if r, found := c.ctx.HookRelation(); found { - c.Name = r.Name() - } if len(args) > 0 { c.Name = args[0] args = args[1:] @@ -57,9 +65,16 @@ func (c *RelationIdsCommand) Run(ctx *cmd.Context) error { result := []string{} - for _, id := range c.ctx.RelationIds() { - if r, found := c.ctx.Relation(id); found && r.Name() == c.Name { + ids, err := c.ctx.RelationIds() + if err != nil && !errors.IsNotFound(err) { + return errors.Trace(err) + } + for _, id := range ids { + r, err := c.ctx.Relation(id) + if err == nil && r.Name() == c.Name { result = append(result, r.FakeId()) + } else if err != nil && !errors.IsNotFound(err) { + return errors.Trace(err) } } sort.Strings(result) === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-ids_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-ids_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-ids_test.go 2016-03-22 15:18:22 +0000 @@ -24,8 +24,8 @@ func (s *RelationIdsSuite) newHookContext(relid int, remote string) (jujuc.Context, *relationInfo) { hctx, info := s.relationSuite.newHookContext(-1, "") info.reset() - info.addRelatedServices("x", 3) - info.addRelatedServices("y", 1) + info.addRelatedServices("x", 3, nil) + info.addRelatedServices("y", 1, nil) if relid >= 0 { info.SetAsRelationHook(relid, remote) } === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-list.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-list.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-list.go 2016-03-22 15:18:22 +0000 @@ -7,24 +7,35 @@ "fmt" "github.com/juju/cmd" + "github.com/juju/errors" "launchpad.net/gnuflag" ) // RelationListCommand implements the relation-list command. type RelationListCommand struct { cmd.CommandBase - ctx Context - RelationId int - out cmd.Output + ctx Context + RelationId int + relationIdProxy gnuflag.Value + out cmd.Output } func NewRelationListCommand(ctx Context) (cmd.Command, error) { - return &RelationListCommand{ctx: ctx}, nil + c := &RelationListCommand{ctx: ctx} + + rV, err := newRelationIdValue(c.ctx, &c.RelationId) + if err != nil { + return nil, errors.Trace(err) + } + c.relationIdProxy = rV + + return c, nil + } func (c *RelationListCommand) Info() *cmd.Info { doc := "-r must be specified when not in a relation hook" - if _, found := c.ctx.HookRelation(); found { + if _, err := c.ctx.HookRelation(); err == nil { doc = "" } return &cmd.Info{ @@ -35,11 +46,9 @@ } func (c *RelationListCommand) SetFlags(f *gnuflag.FlagSet) { - rV := newRelationIdValue(c.ctx, &c.RelationId) - c.out.AddFlags(f, "smart", cmd.DefaultFormatters) - f.Var(rV, "r", "specify a relation by id") - f.Var(rV, "relation", "") + f.Var(c.relationIdProxy, "r", "specify a relation by id") + f.Var(c.relationIdProxy, "relation", "") } func (c *RelationListCommand) Init(args []string) (err error) { @@ -50,9 +59,9 @@ } func (c *RelationListCommand) Run(ctx *cmd.Context) error { - r, found := c.ctx.Relation(c.RelationId) - if !found { - return fmt.Errorf("unknown relation id") + r, err := c.ctx.Relation(c.RelationId) + if err != nil { + return errors.Trace(err) } unitNames := r.UnitNames() if unitNames == nil { === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-list_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-list_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-list_test.go 2016-03-22 15:18:22 +0000 @@ -45,7 +45,7 @@ relid: -1, args: []string{"-r", "unknown:123"}, code: 2, - out: `invalid value "unknown:123" for flag -r: unknown relation id`, + out: `invalid value "unknown:123" for flag -r: relation not found`, }, { summary: "default relation, bad arg", relid: 1, @@ -57,7 +57,7 @@ relid: 1, args: []string{"-r", "unknown:123"}, code: 2, - out: `invalid value "unknown:123" for flag -r: unknown relation id`, + out: `invalid value "unknown:123" for flag -r: relation not found`, }, { summary: "default relation, no members", relid: 1, @@ -111,8 +111,8 @@ for i, t := range relationListTests { c.Logf("test %d: %s", i, t.summary) hctx, info := s.newHookContext(t.relid, "") - info.setRelations(0, t.members0) - info.setRelations(1, t.members1) + info.setRelations(0, t.members0, nil) + info.setRelations(1, t.members1, nil) c.Logf("%#v %#v", info.rels[t.relid], t.members1) com, err := jujuc.NewCommand(hctx, cmdString("relation-list")) c.Assert(err, jc.ErrorIsNil) === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-set.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-set.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-set.go 2016-03-22 15:18:22 +0000 @@ -11,7 +11,7 @@ "github.com/juju/cmd" "github.com/juju/errors" "github.com/juju/utils/keyvalues" - goyaml "gopkg.in/yaml.v1" + goyaml "gopkg.in/yaml.v2" "launchpad.net/gnuflag" ) @@ -32,15 +32,24 @@ // RelationSetCommand implements the relation-set command. type RelationSetCommand struct { cmd.CommandBase - ctx Context - RelationId int - Settings map[string]string - settingsFile cmd.FileVar - formatFlag string // deprecated + ctx Context + RelationId int + relationIdProxy gnuflag.Value + Settings map[string]string + settingsFile cmd.FileVar + formatFlag string // deprecated } func NewRelationSetCommand(ctx Context) (cmd.Command, error) { - return &RelationSetCommand{ctx: ctx}, nil + c := &RelationSetCommand{ctx: ctx} + + rV, err := newRelationIdValue(ctx, &c.RelationId) + if err != nil { + return nil, errors.Trace(err) + } + c.relationIdProxy = rV + + return c, nil } func (c *RelationSetCommand) Info() *cmd.Info { @@ -53,10 +62,8 @@ } func (c *RelationSetCommand) SetFlags(f *gnuflag.FlagSet) { - rV := newRelationIdValue(c.ctx, &c.RelationId) - - f.Var(rV, "r", "specify a relation by id") - f.Var(rV, "relation", "") + f.Var(c.relationIdProxy, "r", "specify a relation by id") + f.Var(c.relationIdProxy, "relation", "") c.settingsFile.SetStdin() f.Var(&c.settingsFile, "file", "file containing key-value pairs") @@ -84,27 +91,6 @@ return nil, errors.Trace(err) } - skipValidation := false // for debugging - if !skipValidation { - // Can this validation be done more simply or efficiently? - - var scalar string - if err := goyaml.Unmarshal(data, &scalar); err != nil { - return nil, errors.Trace(err) - } - if scalar != "" { - return nil, errors.Errorf("expected YAML map, got %q", scalar) - } - - var sequence []string - if err := goyaml.Unmarshal(data, &sequence); err != nil { - return nil, errors.Trace(err) - } - if len(sequence) != 0 { - return nil, errors.Errorf("expected YAML map, got %#v", sequence) - } - } - kvs := make(map[string]string) if err := goyaml.Unmarshal(data, kvs); err != nil { return nil, errors.Trace(err) @@ -145,9 +131,9 @@ return errors.Trace(err) } - r, found := c.ctx.Relation(c.RelationId) - if !found { - return fmt.Errorf("unknown relation id") + r, err := c.ctx.Relation(c.RelationId) + if err != nil { + return errors.Trace(err) } settings, err := r.Settings() if err != nil { === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-set_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-set_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/relation-set_test.go 2016-03-22 15:18:22 +0000 @@ -164,11 +164,11 @@ }, { ctxrelid: -1, args: []string{"-r", "2"}, - err: `invalid value "2" for flag -r: unknown relation id`, + err: `invalid value "2" for flag -r: relation not found`, }, { ctxrelid: 1, args: []string{"-r", "ignored:2"}, - err: `invalid value "ignored:2" for flag -r: unknown relation id`, + err: `invalid value "ignored:2" for flag -r: relation not found`, }, { ctxrelid: -1, err: `no relation id specified`, @@ -260,22 +260,22 @@ summary: "accidental same format as command-line", args: []string{"--file", "spam"}, content: "foo=bar ham=eggs good=bad", - err: `expected YAML map, got .*`, + err: "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `foo=bar...` into map.*", }, { summary: "scalar instead of map", args: []string{"--file", "spam"}, content: "haha", - err: `expected YAML map, got "haha"`, + err: "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `haha` into map.*", }, { summary: "sequence instead of map", args: []string{"--file", "spam"}, content: "[haha]", - err: `expected YAML map, got \[]string{"haha"}`, + err: "yaml: unmarshal errors:\n line 1: cannot unmarshal !!seq into map.*", }, { summary: "multiple maps", args: []string{"--file", "spam"}, content: "{a: b}\n{c: d}", - err: `.*YAML error: .*`, + err: `.*yaml: .*`, }, { summary: "value with a space", args: []string{"--file", "spam"}, === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/relation_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/relation_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/relation_test.go 2016-03-22 15:18:22 +0000 @@ -8,6 +8,7 @@ "github.com/juju/testing" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/worker/uniter/runner/jujuc" jujuctesting "github.com/juju/juju/worker/uniter/runner/jujuc/testing" ) @@ -22,8 +23,8 @@ settings := jujuctesting.Settings{ "private-address": "u-0.testing.invalid", } - rInfo.setNextRelation("", s.Unit, settings) // peer0 - rInfo.setNextRelation("", s.Unit, settings) // peer1 + rInfo.setNextRelation("", s.Unit, settings, nil) // peer0 + rInfo.setNextRelation("", s.Unit, settings, nil) // peer1 if relid >= 0 { rInfo.SetAsRelationHook(relid, remote) } @@ -44,7 +45,11 @@ ri.rels = nil } -func (ri *relationInfo) setNextRelation(name, unit string, settings jujuctesting.Settings) int { +func (ri *relationInfo) setNextRelation( + name, unit string, + settings jujuctesting.Settings, + netConfig []params.NetworkConfig, +) int { if ri.rels == nil { ri.rels = make(map[int]*jujuctesting.Relation) } @@ -55,25 +60,25 @@ relation := ri.SetNewRelation(id, name, ri.stub) if unit != "" { relation.UnitName = unit - relation.SetRelated(unit, settings) + relation.SetRelated(unit, settings, netConfig) } ri.rels[id] = relation return id } -func (ri *relationInfo) addRelatedServices(relname string, count int) { +func (ri *relationInfo) addRelatedServices(relname string, count int, netConfig []params.NetworkConfig) { if ri.rels == nil { ri.rels = make(map[int]*jujuctesting.Relation) } for i := 0; i < count; i++ { - ri.setNextRelation(relname, "", nil) + ri.setNextRelation(relname, "", nil, netConfig) } } -func (ri *relationInfo) setRelations(id int, members []string) { +func (ri *relationInfo) setRelations(id int, members []string, netConfig []params.NetworkConfig) { relation := ri.rels[id] relation.Reset() for _, name := range members { - relation.SetRelated(name, nil) + relation.SetRelated(name, nil, netConfig) } } === added file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/restricted.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/restricted.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/restricted.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,134 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package jujuc + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/names" + "gopkg.in/juju/charm.v6-unstable" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/network" +) + +// ErrRestrictedContext indicates a method is not implemented in the given context. +var ErrRestrictedContext = errors.NotImplementedf("not implemented for restricted context") + +// RestrictedContext is a base implementation for restricted contexts to embed, +// so that an error is returned for methods that are not explicitly +// implemented. +type RestrictedContext struct{} + +// ConfigSettings implements jujuc.Context. +func (*RestrictedContext) ConfigSettings() (charm.Settings, error) { return nil, ErrRestrictedContext } + +// UnitStatus implements jujuc.Context. +func (*RestrictedContext) UnitStatus() (*StatusInfo, error) { return nil, ErrRestrictedContext } + +// SetUnitStatus implements jujuc.Context. +func (*RestrictedContext) SetUnitStatus(StatusInfo) error { return ErrRestrictedContext } + +// ServiceStatus implements jujuc.Context. +func (*RestrictedContext) ServiceStatus() (ServiceStatusInfo, error) { + return ServiceStatusInfo{}, ErrRestrictedContext +} + +// SetServiceStatus implements jujuc.Context. +func (*RestrictedContext) SetServiceStatus(StatusInfo) error { return ErrRestrictedContext } + +// AvailabilityZone implements jujuc.Context. +func (*RestrictedContext) AvailabilityZone() (string, error) { return "", ErrRestrictedContext } + +// RequestReboot implements jujuc.Context. +func (*RestrictedContext) RequestReboot(prio RebootPriority) error { return ErrRestrictedContext } + +// PublicAddress implements jujuc.Context. +func (*RestrictedContext) PublicAddress() (string, error) { return "", ErrRestrictedContext } + +// PrivateAddress implements jujuc.Context. +func (*RestrictedContext) PrivateAddress() (string, error) { return "", ErrRestrictedContext } + +// OpenPorts implements jujuc.Context. +func (*RestrictedContext) OpenPorts(protocol string, fromPort, toPort int) error { + return ErrRestrictedContext +} + +// ClosePorts implements jujuc.Context. +func (*RestrictedContext) ClosePorts(protocol string, fromPort, toPort int) error { + return ErrRestrictedContext +} + +// OpenedPorts implements jujuc.Context. +func (*RestrictedContext) OpenedPorts() []network.PortRange { return nil } + +// IsLeader implements jujuc.Context. +func (*RestrictedContext) IsLeader() (bool, error) { return false, ErrRestrictedContext } + +// LeaderSettings implements jujuc.Context. +func (*RestrictedContext) LeaderSettings() (map[string]string, error) { + return nil, ErrRestrictedContext +} + +// WriteLeaderSettings implements jujuc.Context. +func (*RestrictedContext) WriteLeaderSettings(map[string]string) error { return ErrRestrictedContext } + +// AddMetric implements jujuc.Context. +func (*RestrictedContext) AddMetric(string, string, time.Time) error { return ErrRestrictedContext } + +// StorageTags implements jujuc.Context. +func (*RestrictedContext) StorageTags() ([]names.StorageTag, error) { return nil, ErrRestrictedContext } + +// Storage implements jujuc.Context. +func (*RestrictedContext) Storage(names.StorageTag) (ContextStorageAttachment, error) { + return nil, ErrRestrictedContext +} + +// HookStorage implements jujuc.Context. +func (*RestrictedContext) HookStorage() (ContextStorageAttachment, error) { + return nil, ErrRestrictedContext +} + +// AddUnitStorage implements jujuc.Context. +func (*RestrictedContext) AddUnitStorage(map[string]params.StorageConstraints) error { + return ErrRestrictedContext +} + +// Relation implements jujuc.Context. +func (*RestrictedContext) Relation(id int) (ContextRelation, error) { + return nil, ErrRestrictedContext +} + +// RelationIds implements jujuc.Context. +func (*RestrictedContext) RelationIds() ([]int, error) { return nil, ErrRestrictedContext } + +// HookRelation implements jujuc.Context. +func (*RestrictedContext) HookRelation() (ContextRelation, error) { + return nil, ErrRestrictedContext +} + +// RemoteUnitName implements jujuc.Context. +func (*RestrictedContext) RemoteUnitName() (string, error) { return "", ErrRestrictedContext } + +// ActionParams implements jujuc.Context. +func (*RestrictedContext) ActionParams() (map[string]interface{}, error) { + return nil, ErrRestrictedContext +} + +// UpdateActionResults implements jujuc.Context. +func (*RestrictedContext) UpdateActionResults(keys []string, value string) error { + return ErrRestrictedContext +} + +// SetActionMessage implements jujuc.Context. +func (*RestrictedContext) SetActionMessage(string) error { return ErrRestrictedContext } + +// SetActionFailed implements jujuc.Context. +func (*RestrictedContext) SetActionFailed() error { return ErrRestrictedContext } + +// Component implements jujc.Context. +func (*RestrictedContext) Component(string) (ContextComponent, error) { + return nil, ErrRestrictedContext +} === added file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/restricted_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/restricted_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/restricted_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,18 @@ +// Copyright 2012-2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package jujuc_test + +import ( + "github.com/juju/juju/worker/uniter/runner/jujuc" +) + +type restrictedContext struct { + *jujuc.RestrictedContext +} + +// UnitName completes the jujuc.Context implementation, which the +// RestrictedContext leaves out. +func (*restrictedContext) UnitName() string { return "restricted" } + +var _ jujuc.Context = (*restrictedContext)(nil) === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/server.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/server.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/server.go 2016-03-22 15:18:22 +0000 @@ -60,6 +60,7 @@ "juju-reboot" + cmdSuffix: NewJujuRebootCommand, "status-get" + cmdSuffix: NewStatusGetCommand, "status-set" + cmdSuffix: NewStatusSetCommand, + "network-get" + cmdSuffix: NewNetworkGetCommand, } var storageCommands = map[string]creator{ === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/status-get_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/status-get_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/status-get_test.go 2016-03-22 15:18:22 +0000 @@ -11,7 +11,7 @@ "github.com/juju/cmd" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - goyaml "gopkg.in/yaml.v1" + goyaml "gopkg.in/yaml.v2" "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/runner/jujuc" === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-get.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-get.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-get.go 2016-03-22 15:18:22 +0000 @@ -15,14 +15,21 @@ // StorageGetCommand implements the storage-get command. type StorageGetCommand struct { cmd.CommandBase - ctx Context - storageTag names.StorageTag - key string - out cmd.Output + ctx Context + storageTag names.StorageTag + storageTagProxy gnuflag.Value + key string + out cmd.Output } func NewStorageGetCommand(ctx Context) (cmd.Command, error) { - return &StorageGetCommand{ctx: ctx}, nil + c := &StorageGetCommand{ctx: ctx} + sV, err := newStorageIdValue(ctx, &c.storageTag) + if err != nil { + return nil, errors.Trace(err) + } + c.storageTagProxy = sV + return c, nil } func (c *StorageGetCommand) Info() *cmd.Info { @@ -38,9 +45,8 @@ } func (c *StorageGetCommand) SetFlags(f *gnuflag.FlagSet) { - sV := newStorageIdValue(c.ctx, &c.storageTag) c.out.AddFlags(f, "smart", cmd.DefaultFormatters) - f.Var(sV, "s", "specify a storage instance by id") + f.Var(c.storageTagProxy, "s", "specify a storage instance by id") } func (c *StorageGetCommand) Init(args []string) error { @@ -66,9 +72,9 @@ } func (c *StorageGetCommand) Run(ctx *cmd.Context) error { - storage, ok := c.ctx.Storage(c.storageTag) - if !ok { - return nil + storage, err := c.ctx.Storage(c.storageTag) + if err != nil { + return errors.Trace(err) } values := map[string]interface{}{ "kind": storage.Kind().String(), === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-get_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-get_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-get_test.go 2016-03-22 15:18:22 +0000 @@ -11,7 +11,7 @@ "github.com/juju/cmd" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - goyaml "gopkg.in/yaml.v1" + goyaml "gopkg.in/yaml.v2" "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/runner/jujuc" === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-list.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-list.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-list.go 2016-03-22 15:18:22 +0000 @@ -55,7 +55,10 @@ } func (c *StorageListCommand) Run(ctx *cmd.Context) error { - tags := c.ctx.StorageTags() + tags, err := c.ctx.StorageTags() + if err != nil { + return errors.Trace(err) + } ids := make([]string, 0, len(tags)) for _, tag := range tags { id := tag.Id() === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-list_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-list_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/storage-list_test.go 2016-03-22 15:18:22 +0000 @@ -9,7 +9,7 @@ "github.com/juju/cmd" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - goyaml "gopkg.in/yaml.v1" + goyaml "gopkg.in/yaml.v2" "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/runner/jujuc" === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/instance.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/instance.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/instance.go 2016-03-22 15:18:22 +0000 @@ -22,11 +22,10 @@ } // AvailabilityZone implements jujuc.ContextInstance. -func (c *ContextInstance) AvailabilityZone() (string, bool) { +func (c *ContextInstance) AvailabilityZone() (string, error) { c.stub.AddCall("AvailabilityZone") - c.stub.NextErr() - return c.info.AvailabilityZone, true + return c.info.AvailabilityZone, c.stub.NextErr() } // RequestReboot implements jujuc.ContextInstance. === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/networking.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/networking.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/networking.go 2016-03-22 15:18:22 +0000 @@ -56,25 +56,19 @@ } // PublicAddress implements jujuc.ContextNetworking. -func (c *ContextNetworking) PublicAddress() (string, bool) { +func (c *ContextNetworking) PublicAddress() (string, error) { c.stub.AddCall("PublicAddress") - c.stub.NextErr() - - if c.info.PublicAddress == "" { - return "", false - } - return c.info.PublicAddress, true + + return c.info.PublicAddress, c.stub.NextErr() + } // PrivateAddress implements jujuc.ContextNetworking. -func (c *ContextNetworking) PrivateAddress() (string, bool) { +func (c *ContextNetworking) PrivateAddress() (string, error) { c.stub.AddCall("PrivateAddress") - c.stub.NextErr() - - if c.info.PrivateAddress == "" { - return "", false - } - return c.info.PrivateAddress, true + + return c.info.PrivateAddress, c.stub.NextErr() + } // OpenPorts implements jujuc.ContextNetworking. === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/relation.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/relation.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/relation.go 2016-03-22 15:18:22 +0000 @@ -23,6 +23,8 @@ Units map[string]Settings // UnitName is data for jujuc.ContextRelation. UnitName string + // NetworkConfig is data for jujuc.ContextRelation. + NetworkConfig []params.NetworkConfig } // Reset clears the Relation's settings. @@ -31,11 +33,12 @@ } // SetRelated adds the relation settings for the unit. -func (r *Relation) SetRelated(name string, settings Settings) { +func (r *Relation) SetRelated(name string, settings Settings, netConfig []params.NetworkConfig) { if r.Units == nil { r.Units = make(map[string]Settings) } r.Units[name] = settings + r.NetworkConfig = netConfig } // ContextRelation is a test double for jujuc.ContextRelation. @@ -108,3 +111,13 @@ } return s.Map(), nil } + +func (r *ContextRelation) NetworkConfig() ([]params.NetworkConfig, error) { + r.stub.AddCall("NetworkConfig") + + if err := r.stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return r.info.NetworkConfig, nil +} === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/relationhook.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/relationhook.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/relationhook.go 2016-03-22 15:18:22 +0000 @@ -4,6 +4,8 @@ package testing import ( + "github.com/juju/errors" + "github.com/juju/juju/worker/uniter/runner/jujuc" ) @@ -26,17 +28,24 @@ } // HookRelation implements jujuc.RelationHookContext. -func (c *ContextRelationHook) HookRelation() (jujuc.ContextRelation, bool) { +func (c *ContextRelationHook) HookRelation() (jujuc.ContextRelation, error) { c.stub.AddCall("HookRelation") - c.stub.NextErr() + var err error + if c.info.HookRelation == nil { + err = errors.NotFoundf("hook relation") + } - return c.info.HookRelation, c.info.HookRelation != nil + return c.info.HookRelation, err } // RemoteUnitName implements jujuc.RelationHookContext. -func (c *ContextRelationHook) RemoteUnitName() (string, bool) { +func (c *ContextRelationHook) RemoteUnitName() (string, error) { c.stub.AddCall("RemoteUnitName") c.stub.NextErr() + var err error + if c.info.RemoteUnitName == "" { + err = errors.NotFoundf("remote unit") + } - return c.info.RemoteUnitName, c.info.RemoteUnitName != "" + return c.info.RemoteUnitName, err } === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/relations.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/relations.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/relations.go 2016-03-22 15:18:22 +0000 @@ -6,8 +6,10 @@ import ( "fmt" + "github.com/juju/errors" "github.com/juju/testing" + "github.com/juju/juju/apiserver/params" "github.com/juju/juju/worker/uniter/runner/jujuc" ) @@ -46,9 +48,9 @@ } // SetRelated adds the provided unit information to the relation. -func (r *Relations) SetRelated(id int, unit string, settings Settings) { +func (r *Relations) SetRelated(id int, unit string, settings Settings, netConfig []params.NetworkConfig) { relation := r.Relations[id].(*ContextRelation).info - relation.SetRelated(unit, settings) + relation.SetRelated(unit, settings, netConfig) } // ContextRelations is a test double for jujuc.ContextRelations. @@ -58,22 +60,24 @@ } // Relation implements jujuc.ContextRelations. -func (c *ContextRelations) Relation(id int) (jujuc.ContextRelation, bool) { +func (c *ContextRelations) Relation(id int) (jujuc.ContextRelation, error) { c.stub.AddCall("Relation", id) - c.stub.NextErr() - r, found := c.info.Relations[id] - return r, found + r, ok := c.info.Relations[id] + var err error + if !ok { + err = errors.NotFoundf("relation") + } + return r, err } // RelationIds implements jujuc.ContextRelations. -func (c *ContextRelations) RelationIds() []int { +func (c *ContextRelations) RelationIds() ([]int, error) { c.stub.AddCall("RelationIds") - c.stub.NextErr() ids := []int{} for id := range c.info.Relations { ids = append(ids, id) } - return ids + return ids, c.stub.NextErr() } === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/storage.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/storage.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/storage.go 2016-03-22 15:18:22 +0000 @@ -6,6 +6,7 @@ import ( "fmt" + "github.com/juju/errors" "github.com/juju/names" "github.com/juju/testing" "github.com/juju/utils/set" @@ -82,9 +83,8 @@ } // StorageTags implements jujuc.ContextStorage. -func (c *ContextStorage) StorageTags() []names.StorageTag { +func (c *ContextStorage) StorageTags() ([]names.StorageTag, error) { c.stub.AddCall("StorageTags") - c.stub.NextErr() tags := set.NewTags() for tag := range c.info.Storage { @@ -94,30 +94,32 @@ for i, tag := range tags.SortedValues() { storageTags[i] = tag.(names.StorageTag) } - return storageTags + return storageTags, c.stub.NextErr() } // Storage implements jujuc.ContextStorage. -func (c *ContextStorage) Storage(tag names.StorageTag) (jujuc.ContextStorageAttachment, bool) { +func (c *ContextStorage) Storage(tag names.StorageTag) (jujuc.ContextStorageAttachment, error) { c.stub.AddCall("Storage") - c.stub.NextErr() storage, ok := c.info.Storage[tag] - return storage, ok + var err error + if !ok { + err = errors.NotFoundf("storage") + } + return storage, err + } // HookStorage implements jujuc.ContextStorage. -func (c *ContextStorage) HookStorage() (jujuc.ContextStorageAttachment, bool) { +func (c *ContextStorage) HookStorage() (jujuc.ContextStorageAttachment, error) { c.stub.AddCall("HookStorage") - c.stub.NextErr() return c.Storage(c.info.StorageTag) } // AddUnitStorage implements jujuc.ContextStorage. -func (c *ContextStorage) AddUnitStorage(all map[string]params.StorageConstraints) { +func (c *ContextStorage) AddUnitStorage(all map[string]params.StorageConstraints) error { c.stub.AddCall("AddUnitStorage", all) - c.stub.NextErr() - c.info.AddUnitStorage(all) + return c.stub.NextErr() } === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/suite.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/suite.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/suite.go 2016-03-22 15:18:22 +0000 @@ -6,7 +6,7 @@ import ( "github.com/juju/testing" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" ) // ContextSuite is the base suite for testing jujuc.Context-related code. === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/unit.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/unit.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/testing/unit.go 2016-03-22 15:18:22 +0000 @@ -5,7 +5,7 @@ import ( "github.com/juju/errors" - "gopkg.in/juju/charm.v5" + "gopkg.in/juju/charm.v6-unstable" ) // Unit holds the values for the hook context. === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/tools_test.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/tools_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/tools_test.go 2016-03-22 15:18:22 +0000 @@ -11,6 +11,8 @@ jc "github.com/juju/testing/checkers" "github.com/juju/utils" + "github.com/juju/utils/arch" + "github.com/juju/utils/series" "github.com/juju/utils/symlink" gc "gopkg.in/check.v1" @@ -28,7 +30,11 @@ func (s *ToolsSuite) SetUpTest(c *gc.C) { s.dataDir = c.MkDir() - s.toolsDir = tools.SharedToolsDir(s.dataDir, version.Current) + s.toolsDir = tools.SharedToolsDir(s.dataDir, version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + }) err := os.MkdirAll(s.toolsDir, 0755) c.Assert(err, jc.ErrorIsNil) err = symlink.New(s.toolsDir, tools.ToolsDir(s.dataDir, "unit-u-123")) === modified file 'src/github.com/juju/juju/worker/uniter/runner/jujuc/unit-get.go' --- src/github.com/juju/juju/worker/uniter/runner/jujuc/unit-get.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/jujuc/unit-get.go 2016-03-22 15:18:22 +0000 @@ -4,10 +4,10 @@ package jujuc import ( - "errors" "fmt" "github.com/juju/cmd" + "github.com/juju/errors" "launchpad.net/gnuflag" ) @@ -47,14 +47,15 @@ } func (c *UnitGetCommand) Run(ctx *cmd.Context) error { - value, ok := "", false + var value string + var err error if c.Key == "private-address" { - value, ok = c.ctx.PrivateAddress() + value, err = c.ctx.PrivateAddress() } else { - value, ok = c.ctx.PublicAddress() + value, err = c.ctx.PublicAddress() } - if !ok { - return fmt.Errorf("%s not set", c.Key) + if err != nil { + return errors.Trace(err) } return c.out.Write(ctx, value) } === removed file 'src/github.com/juju/juju/worker/uniter/runner/leader.go' --- src/github.com/juju/juju/worker/uniter/runner/leader.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/leader.go 1970-01-01 00:00:00 +0000 @@ -1,111 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package runner - -import ( - "github.com/juju/errors" - - "github.com/juju/juju/worker/leadership" -) - -var ( - errIsMinion = errors.New("not the leader") -) - -// LeadershipSettingsAccessor is an interface that allows us not to have -// to use the concrete `api/uniter/LeadershipSettingsAccessor` type, thus -// simplifying testing. -type LeadershipSettingsAccessor interface { - Read(serviceName string) (map[string]string, error) - Merge(serviceName string, settings map[string]string) error -} - -// LeadershipContext provides several jujuc.Context methods. It -// exists separately of HookContext for clarity, and ease of testing. -type LeadershipContext interface { - IsLeader() (bool, error) - LeaderSettings() (map[string]string, error) - WriteLeaderSettings(map[string]string) error -} - -type leadershipContext struct { - accessor LeadershipSettingsAccessor - tracker leadership.Tracker - serviceName string - - isMinion bool - settings map[string]string -} - -func NewLeadershipContext(accessor LeadershipSettingsAccessor, tracker leadership.Tracker) LeadershipContext { - return &leadershipContext{ - accessor: accessor, - tracker: tracker, - serviceName: tracker.ServiceName(), - } -} - -// newLeadershipContext allows us to swap out the leadership context creator for -// factory tests. -var newLeadershipContext = NewLeadershipContext - -// IsLeader is part of the jujuc.Context interface. -func (ctx *leadershipContext) IsLeader() (bool, error) { - // This doesn't technically need an error return, but that feels like a - // happy accident of the current implementation and not a reason to change - // the interface we're implementing. - err := ctx.ensureLeader() - switch err { - case nil: - return true, nil - case errIsMinion: - return false, nil - } - return false, errors.Trace(err) -} - -// WriteLeaderSettings is part of the jujuc.Context interface. -func (ctx *leadershipContext) WriteLeaderSettings(settings map[string]string) error { - // This may trigger a lease refresh; it would be desirable to use a less - // eager approach here, but we're working around a race described in - // `apiserver/leadership.LeadershipSettingsAccessor.Merge`, and as of - // 2015-02-19 it's better to stay eager. - err := ctx.ensureLeader() - if err == nil { - // Clear local settings; if we need them again we should use the values - // as merged by the server. But we don't need to get them again right now; - // the charm may not need to ask again before the hook finishes. - ctx.settings = nil - err = ctx.accessor.Merge(ctx.serviceName, settings) - } - return errors.Annotate(err, "cannot write settings") -} - -// LeaderSettings is part of the jujuc.Context interface. -func (ctx *leadershipContext) LeaderSettings() (map[string]string, error) { - if ctx.settings == nil { - var err error - ctx.settings, err = ctx.accessor.Read(ctx.serviceName) - if err != nil { - return nil, errors.Annotate(err, "cannot read settings") - } - } - result := map[string]string{} - for key, value := range ctx.settings { - result[key] = value - } - return result, nil -} - -func (ctx *leadershipContext) ensureLeader() error { - if ctx.isMinion { - return errIsMinion - } - success := ctx.tracker.ClaimLeader().Wait() - if !success { - ctx.isMinion = true - return errIsMinion - } - return nil -} === removed file 'src/github.com/juju/juju/worker/uniter/runner/leader_test.go' --- src/github.com/juju/juju/worker/uniter/runner/leader_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/leader_test.go 1970-01-01 00:00:00 +0000 @@ -1,335 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package runner_test - -import ( - "github.com/juju/errors" - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/worker/leadership" - "github.com/juju/juju/worker/uniter/runner" -) - -type LeaderSuite struct { - testing.IsolationSuite - testing.Stub - accessor *StubLeadershipSettingsAccessor - tracker *StubTracker - context runner.LeadershipContext -} - -var _ = gc.Suite(&LeaderSuite{}) - -func (s *LeaderSuite) SetUpTest(c *gc.C) { - s.IsolationSuite.SetUpTest(c) - s.accessor = &StubLeadershipSettingsAccessor{ - Stub: &s.Stub, - } - s.tracker = &StubTracker{ - Stub: &s.Stub, - serviceName: "led-service", - } - s.CheckCalls(c, []testing.StubCall{{ - FuncName: "ServiceName", - }}, func() { - s.context = runner.NewLeadershipContext(s.accessor, s.tracker) - }) -} - -func (s *LeaderSuite) CheckCalls(c *gc.C, stubCalls []testing.StubCall, f func()) { - s.Stub = testing.Stub{} - f() - s.Stub.CheckCalls(c, stubCalls) -} - -func (s *LeaderSuite) TestIsLeaderSuccess(c *gc.C) { - s.CheckCalls(c, []testing.StubCall{{ - FuncName: "ClaimLeader", - }}, func() { - // The first call succeeds... - s.tracker.results = []StubTicket{true} - leader, err := s.context.IsLeader() - c.Check(leader, jc.IsTrue) - c.Check(err, jc.ErrorIsNil) - }) - - s.CheckCalls(c, []testing.StubCall{{ - FuncName: "ClaimLeader", - }}, func() { - // ...and so does the second. - s.tracker.results = []StubTicket{true} - leader, err := s.context.IsLeader() - c.Check(leader, jc.IsTrue) - c.Check(err, jc.ErrorIsNil) - }) -} - -func (s *LeaderSuite) TestIsLeaderFailure(c *gc.C) { - s.CheckCalls(c, []testing.StubCall{{ - FuncName: "ClaimLeader", - }}, func() { - // The first call fails... - s.tracker.results = []StubTicket{false} - leader, err := s.context.IsLeader() - c.Check(leader, jc.IsFalse) - c.Check(err, jc.ErrorIsNil) - }) - - s.CheckCalls(c, nil, func() { - // ...and the second doesn't even try. - leader, err := s.context.IsLeader() - c.Check(leader, jc.IsFalse) - c.Check(err, jc.ErrorIsNil) - }) -} - -func (s *LeaderSuite) TestIsLeaderFailureAfterSuccess(c *gc.C) { - s.CheckCalls(c, []testing.StubCall{{ - FuncName: "ClaimLeader", - }}, func() { - // The first call succeeds... - s.tracker.results = []StubTicket{true} - leader, err := s.context.IsLeader() - c.Check(leader, jc.IsTrue) - c.Check(err, jc.ErrorIsNil) - }) - - s.CheckCalls(c, []testing.StubCall{{ - FuncName: "ClaimLeader", - }}, func() { - // The second fails... - s.tracker.results = []StubTicket{false} - leader, err := s.context.IsLeader() - c.Check(leader, jc.IsFalse) - c.Check(err, jc.ErrorIsNil) - }) - - s.CheckCalls(c, nil, func() { - // The third doesn't even try. - leader, err := s.context.IsLeader() - c.Check(leader, jc.IsFalse) - c.Check(err, jc.ErrorIsNil) - }) -} - -func (s *LeaderSuite) TestLeaderSettingsSuccess(c *gc.C) { - s.CheckCalls(c, []testing.StubCall{{ - FuncName: "Read", - Args: []interface{}{"led-service"}, - }}, func() { - // The first call grabs the settings... - s.accessor.results = []map[string]string{{ - "some": "settings", - "of": "interest", - }} - settings, err := s.context.LeaderSettings() - c.Check(settings, jc.DeepEquals, map[string]string{ - "some": "settings", - "of": "interest", - }) - c.Check(err, jc.ErrorIsNil) - }) - - s.CheckCalls(c, nil, func() { - // The second uses the cache. - settings, err := s.context.LeaderSettings() - c.Check(settings, jc.DeepEquals, map[string]string{ - "some": "settings", - "of": "interest", - }) - c.Check(err, jc.ErrorIsNil) - }) -} - -func (s *LeaderSuite) TestLeaderSettingsCopyMap(c *gc.C) { - // Grab the settings to populate the cache... - s.accessor.results = []map[string]string{{ - "some": "settings", - "of": "interest", - }} - settings, err := s.context.LeaderSettings() - c.Check(err, gc.IsNil) - - // Put some nonsense into the returned settings... - settings["bad"] = "news" - - // Get the settings again and check they're as expected. - settings, err = s.context.LeaderSettings() - c.Check(settings, jc.DeepEquals, map[string]string{ - "some": "settings", - "of": "interest", - }) - c.Check(err, jc.ErrorIsNil) -} - -func (s *LeaderSuite) TestLeaderSettingsError(c *gc.C) { - s.CheckCalls(c, []testing.StubCall{{ - FuncName: "Read", - Args: []interface{}{"led-service"}, - }}, func() { - s.accessor.results = []map[string]string{nil} - s.Stub.SetErrors(errors.New("blort")) - settings, err := s.context.LeaderSettings() - c.Check(settings, gc.IsNil) - c.Check(err, gc.ErrorMatches, "cannot read settings: blort") - }) -} - -func (s *LeaderSuite) TestWriteLeaderSettingsSuccess(c *gc.C) { - s.CheckCalls(c, []testing.StubCall{{ - FuncName: "ClaimLeader", - }, { - FuncName: "Merge", - Args: []interface{}{"led-service", map[string]string{ - "some": "very", - "nice": "data", - }}, - }}, func() { - s.tracker.results = []StubTicket{true} - err := s.context.WriteLeaderSettings(map[string]string{ - "some": "very", - "nice": "data", - }) - c.Check(err, jc.ErrorIsNil) - }) -} - -func (s *LeaderSuite) TestWriteLeaderSettingsMinion(c *gc.C) { - s.CheckCalls(c, []testing.StubCall{{ - FuncName: "ClaimLeader", - }}, func() { - // The first call fails... - s.tracker.results = []StubTicket{false} - err := s.context.WriteLeaderSettings(map[string]string{"blah": "blah"}) - c.Check(err, gc.ErrorMatches, "cannot write settings: not the leader") - }) - - s.CheckCalls(c, nil, func() { - // The second doesn't even try. - err := s.context.WriteLeaderSettings(map[string]string{"blah": "blah"}) - c.Check(err, gc.ErrorMatches, "cannot write settings: not the leader") - }) -} - -func (s *LeaderSuite) TestWriteLeaderSettingsError(c *gc.C) { - s.CheckCalls(c, []testing.StubCall{{ - FuncName: "ClaimLeader", - }, { - FuncName: "Merge", - Args: []interface{}{"led-service", map[string]string{ - "some": "very", - "nice": "data", - }}, - }}, func() { - s.tracker.results = []StubTicket{true} - s.Stub.SetErrors(errors.New("glurk")) - err := s.context.WriteLeaderSettings(map[string]string{ - "some": "very", - "nice": "data", - }) - c.Check(err, gc.ErrorMatches, "cannot write settings: glurk") - }) -} - -func (s *LeaderSuite) TestWriteLeaderSettingsClearsCache(c *gc.C) { - s.CheckCalls(c, []testing.StubCall{{ - FuncName: "Read", - Args: []interface{}{"led-service"}, - }}, func() { - // Start off by populating the cache... - s.accessor.results = []map[string]string{{ - "some": "settings", - "of": "interest", - }} - _, err := s.context.LeaderSettings() - c.Check(err, gc.IsNil) - }) - - s.CheckCalls(c, []testing.StubCall{{ - FuncName: "ClaimLeader", - }, { - FuncName: "Merge", - Args: []interface{}{"led-service", map[string]string{ - "some": "very", - "nice": "data", - }}, - }}, func() { - // Write new data to the state server... - s.tracker.results = []StubTicket{true} - err := s.context.WriteLeaderSettings(map[string]string{ - "some": "very", - "nice": "data", - }) - c.Check(err, jc.ErrorIsNil) - }) - - s.CheckCalls(c, []testing.StubCall{{ - FuncName: "Read", - Args: []interface{}{"led-service"}, - }}, func() { - s.accessor.results = []map[string]string{{ - "totally": "different", - "server": "decides", - }} - settings, err := s.context.LeaderSettings() - c.Check(err, gc.IsNil) - c.Check(settings, jc.DeepEquals, map[string]string{ - "totally": "different", - "server": "decides", - }) - c.Check(err, jc.ErrorIsNil) - }) -} - -type StubLeadershipSettingsAccessor struct { - *testing.Stub - results []map[string]string -} - -func (stub *StubLeadershipSettingsAccessor) Read(serviceName string) (result map[string]string, _ error) { - stub.MethodCall(stub, "Read", serviceName) - result, stub.results = stub.results[0], stub.results[1:] - return result, stub.NextErr() -} - -func (stub *StubLeadershipSettingsAccessor) Merge(serviceName string, settings map[string]string) error { - stub.MethodCall(stub, "Merge", serviceName, settings) - return stub.NextErr() -} - -type StubTracker struct { - leadership.Tracker - *testing.Stub - serviceName string - results []StubTicket -} - -func (stub *StubTracker) ServiceName() string { - stub.MethodCall(stub, "ServiceName") - return stub.serviceName -} - -func (stub *StubTracker) ClaimLeader() (result leadership.Ticket) { - stub.MethodCall(stub, "ClaimLeader") - result, stub.results = stub.results[0], stub.results[1:] - return result -} - -type StubTicket bool - -func (ticket StubTicket) Wait() bool { - return bool(ticket) -} - -func (ticket StubTicket) Ready() <-chan struct{} { - return alwaysReady -} - -var alwaysReady = make(chan struct{}) - -func init() { - close(alwaysReady) -} === removed file 'src/github.com/juju/juju/worker/uniter/runner/ports.go' --- src/github.com/juju/juju/worker/uniter/runner/ports.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/ports.go 1970-01-01 00:00:00 +0000 @@ -1,173 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package runner - -import ( - "strings" - - "github.com/juju/errors" - "github.com/juju/names" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/network" -) - -// PortRangeInfo contains information about a pending open- or -// close-port operation for a port range. This is only exported for -// testing. -type PortRangeInfo struct { - ShouldOpen bool - RelationTag names.RelationTag -} - -// PortRange contains a port range and a relation id. Used as key to -// pendingRelations and is only exported for testing. -type PortRange struct { - Ports network.PortRange - RelationId int -} - -func validatePortRange(protocol string, fromPort, toPort int) (network.PortRange, error) { - // Validate the given range. - newRange := network.PortRange{ - Protocol: strings.ToLower(protocol), - FromPort: fromPort, - ToPort: toPort, - } - if err := newRange.Validate(); err != nil { - return network.PortRange{}, err - } - return newRange, nil -} - -func tryOpenPorts( - protocol string, - fromPort, toPort int, - unitTag names.UnitTag, - machinePorts map[network.PortRange]params.RelationUnit, - pendingPorts map[PortRange]PortRangeInfo, -) error { - // TODO(dimitern) Once port ranges are linked to relations in - // addition to networks, refactor this functions and test it - // better to ensure it handles relations properly. - relationId := -1 - - //Validate the given range. - newRange, err := validatePortRange(protocol, fromPort, toPort) - if err != nil { - return err - } - rangeKey := PortRange{ - Ports: newRange, - RelationId: relationId, - } - - rangeInfo, isKnown := pendingPorts[rangeKey] - if isKnown { - if !rangeInfo.ShouldOpen { - // If the same range is already pending to be closed, just - // mark is pending to be opened. - rangeInfo.ShouldOpen = true - pendingPorts[rangeKey] = rangeInfo - } - return nil - } - - // Ensure there are no conflicts with existing ports on the - // machine. - for portRange, relUnit := range machinePorts { - relUnitTag, err := names.ParseUnitTag(relUnit.Unit) - if err != nil { - return errors.Annotatef( - err, - "machine ports %v contain invalid unit tag", - portRange, - ) - } - if newRange.ConflictsWith(portRange) { - if portRange == newRange && relUnitTag == unitTag { - // The same unit trying to open the same range is just - // ignored. - return nil - } - return errors.Errorf( - "cannot open %v (unit %q): conflicts with existing %v (unit %q)", - newRange, unitTag.Id(), portRange, relUnitTag.Id(), - ) - } - } - // Ensure other pending port ranges do not conflict with this one. - for rangeKey, rangeInfo := range pendingPorts { - if newRange.ConflictsWith(rangeKey.Ports) && rangeInfo.ShouldOpen { - return errors.Errorf( - "cannot open %v (unit %q): conflicts with %v requested earlier", - newRange, unitTag.Id(), rangeKey.Ports, - ) - } - } - - rangeInfo = pendingPorts[rangeKey] - rangeInfo.ShouldOpen = true - pendingPorts[rangeKey] = rangeInfo - return nil -} - -func tryClosePorts( - protocol string, - fromPort, toPort int, - unitTag names.UnitTag, - machinePorts map[network.PortRange]params.RelationUnit, - pendingPorts map[PortRange]PortRangeInfo, -) error { - // TODO(dimitern) Once port ranges are linked to relations in - // addition to networks, refactor this functions and test it - // better to ensure it handles relations properly. - relationId := -1 - - // Validate the given range. - newRange, err := validatePortRange(protocol, fromPort, toPort) - if err != nil { - return err - } - rangeKey := PortRange{ - Ports: newRange, - RelationId: relationId, - } - - rangeInfo, isKnown := pendingPorts[rangeKey] - if isKnown { - if rangeInfo.ShouldOpen { - // If the same range is already pending to be opened, just - // remove it from pending. - delete(pendingPorts, rangeKey) - } - return nil - } - - // Ensure the range we're trying to close is opened on the - // machine. - relUnit, found := machinePorts[newRange] - if !found { - // Trying to close a range which is not open is ignored. - return nil - } else if relUnit.Unit != unitTag.String() { - relUnitTag, err := names.ParseUnitTag(relUnit.Unit) - if err != nil { - return errors.Annotatef( - err, - "machine ports %v contain invalid unit tag", - newRange, - ) - } - return errors.Errorf( - "cannot close %v (opened by %q) from %q", - newRange, relUnitTag.Id(), unitTag.Id(), - ) - } - - rangeInfo = pendingPorts[rangeKey] - rangeInfo.ShouldOpen = false - pendingPorts[rangeKey] = rangeInfo - return nil -} === removed file 'src/github.com/juju/juju/worker/uniter/runner/ports_test.go' --- src/github.com/juju/juju/worker/uniter/runner/ports_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/ports_test.go 1970-01-01 00:00:00 +0000 @@ -1,273 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package runner_test - -import ( - "github.com/juju/names" - envtesting "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/network" - "github.com/juju/juju/worker/uniter/runner" -) - -type PortsSuite struct { - envtesting.IsolationSuite -} - -var _ = gc.Suite(&PortsSuite{}) - -func (s *PortsSuite) TestValidatePortRange(c *gc.C) { - tests := []struct { - about string - proto string - ports []int - portRange network.PortRange - expectErr string - }{{ - about: "invalid range - 0-0/tcp", - proto: "tcp", - ports: []int{0, 0}, - expectErr: "invalid port range 0-0/tcp", - }, { - about: "invalid range - 0-1/tcp", - proto: "tcp", - ports: []int{0, 1}, - expectErr: "invalid port range 0-1/tcp", - }, { - about: "invalid range - -1-1/tcp", - proto: "tcp", - ports: []int{-1, 1}, - expectErr: "invalid port range -1-1/tcp", - }, { - about: "invalid range - 1-99999/tcp", - proto: "tcp", - ports: []int{1, 99999}, - expectErr: "invalid port range 1-99999/tcp", - }, { - about: "invalid range - 88888-99999/tcp", - proto: "tcp", - ports: []int{88888, 99999}, - expectErr: "invalid port range 88888-99999/tcp", - }, { - about: "invalid protocol - 1-65535/foo", - proto: "foo", - ports: []int{1, 65535}, - expectErr: `invalid protocol "foo", expected "tcp" or "udp"`, - }, { - about: "valid range - 100-200/udp", - proto: "UDP", - ports: []int{100, 200}, - portRange: network.PortRange{ - FromPort: 100, - ToPort: 200, - Protocol: "udp", - }, - }, { - about: "valid single port - 100/tcp", - proto: "TCP", - ports: []int{100, 100}, - portRange: network.PortRange{ - FromPort: 100, - ToPort: 100, - Protocol: "tcp", - }, - }} - for i, test := range tests { - c.Logf("test %d: %s", i, test.about) - portRange, err := runner.ValidatePortRange( - test.proto, - test.ports[0], - test.ports[1], - ) - if test.expectErr != "" { - c.Check(err, gc.ErrorMatches, test.expectErr) - c.Check(portRange, jc.DeepEquals, network.PortRange{}) - } else { - c.Check(err, jc.ErrorIsNil) - c.Check(portRange, jc.DeepEquals, test.portRange) - } - } -} - -func makeMachinePorts( - unitName, proto string, fromPort, toPort int, -) map[network.PortRange]params.RelationUnit { - result := make(map[network.PortRange]params.RelationUnit) - portRange := network.PortRange{ - FromPort: fromPort, - ToPort: toPort, - Protocol: proto, - } - unitTag := "" - if unitName != "invalid" { - unitTag = names.NewUnitTag(unitName).String() - } else { - unitTag = unitName - } - result[portRange] = params.RelationUnit{ - Unit: unitTag, - } - return result -} - -func makePendingPorts( - proto string, fromPort, toPort int, shouldOpen bool, -) map[runner.PortRange]runner.PortRangeInfo { - result := make(map[runner.PortRange]runner.PortRangeInfo) - portRange := network.PortRange{ - FromPort: fromPort, - ToPort: toPort, - Protocol: proto, - } - key := runner.PortRange{ - Ports: portRange, - RelationId: -1, - } - result[key] = runner.PortRangeInfo{ - ShouldOpen: shouldOpen, - } - return result -} - -type portsTest struct { - about string - proto string - ports []int - machinePorts map[network.PortRange]params.RelationUnit - pendingPorts map[runner.PortRange]runner.PortRangeInfo - expectErr string - expectPending map[runner.PortRange]runner.PortRangeInfo -} - -func (p portsTest) withDefaults(proto string, fromPort, toPort int) portsTest { - if p.proto == "" { - p.proto = proto - } - if len(p.ports) != 2 { - p.ports = []int{fromPort, toPort} - } - if p.pendingPorts == nil { - p.pendingPorts = make(map[runner.PortRange]runner.PortRangeInfo) - } - return p -} - -func (s *PortsSuite) TestTryOpenPorts(c *gc.C) { - tests := []portsTest{{ - about: "invalid port range", - ports: []int{0, 0}, - expectErr: "invalid port range 0-0/tcp", - }, { - about: "invalid protocol - 10-20/foo", - proto: "foo", - expectErr: `invalid protocol "foo", expected "tcp" or "udp"`, - }, { - about: "open a new range (no machine ports yet)", - expectPending: makePendingPorts("tcp", 10, 20, true), - }, { - about: "open an existing range (ignored)", - machinePorts: makeMachinePorts("u/0", "tcp", 10, 20), - expectPending: map[runner.PortRange]runner.PortRangeInfo{}, - }, { - about: "open a range pending to be closed already", - pendingPorts: makePendingPorts("tcp", 10, 20, false), - expectPending: makePendingPorts("tcp", 10, 20, true), - }, { - about: "open a range pending to be opened already (ignored)", - pendingPorts: makePendingPorts("tcp", 10, 20, true), - expectPending: makePendingPorts("tcp", 10, 20, true), - }, { - about: "try opening a range when machine ports has invalid unit tag", - machinePorts: makeMachinePorts("invalid", "tcp", 80, 90), - expectErr: `machine ports 80-90/tcp contain invalid unit tag: "invalid" is not a valid tag`, - }, { - about: "try opening a range conflicting with another unit", - machinePorts: makeMachinePorts("u/1", "tcp", 10, 20), - expectErr: `cannot open 10-20/tcp \(unit "u/0"\): conflicts with existing 10-20/tcp \(unit "u/1"\)`, - }, { - about: "open a range conflicting with the same unit (ignored)", - machinePorts: makeMachinePorts("u/0", "tcp", 10, 20), - expectPending: map[runner.PortRange]runner.PortRangeInfo{}, - }, { - about: "try opening a range conflicting with another pending range", - pendingPorts: makePendingPorts("tcp", 5, 25, true), - expectErr: `cannot open 10-20/tcp \(unit "u/0"\): conflicts with 5-25/tcp requested earlier`, - }} - for i, test := range tests { - c.Logf("test %d: %s", i, test.about) - - test = test.withDefaults("tcp", 10, 20) - err := runner.TryOpenPorts( - test.proto, - test.ports[0], - test.ports[1], - names.NewUnitTag("u/0"), - test.machinePorts, - test.pendingPorts, - ) - if test.expectErr != "" { - c.Check(err, gc.ErrorMatches, test.expectErr) - } else { - c.Check(err, jc.ErrorIsNil) - c.Check(test.pendingPorts, jc.DeepEquals, test.expectPending) - } - } -} - -func (s *PortsSuite) TestTryClosePorts(c *gc.C) { - tests := []portsTest{{ - about: "invalid port range", - ports: []int{0, 0}, - expectErr: "invalid port range 0-0/tcp", - }, { - about: "invalid protocol - 10-20/foo", - proto: "foo", - expectErr: `invalid protocol "foo", expected "tcp" or "udp"`, - }, { - about: "close a new range (no machine ports yet; ignored)", - expectPending: map[runner.PortRange]runner.PortRangeInfo{}, - }, { - about: "close an existing range", - machinePorts: makeMachinePorts("u/0", "tcp", 10, 20), - expectPending: makePendingPorts("tcp", 10, 20, false), - }, { - about: "close a range pending to be opened already (removed from pending)", - pendingPorts: makePendingPorts("tcp", 10, 20, true), - expectPending: map[runner.PortRange]runner.PortRangeInfo{}, - }, { - about: "close a range pending to be closed already (ignored)", - pendingPorts: makePendingPorts("tcp", 10, 20, false), - expectPending: makePendingPorts("tcp", 10, 20, false), - }, { - about: "try closing an existing range when machine ports has invalid unit tag", - machinePorts: makeMachinePorts("invalid", "tcp", 10, 20), - expectErr: `machine ports 10-20/tcp contain invalid unit tag: "invalid" is not a valid tag`, - }, { - about: "try closing a range of another unit", - machinePorts: makeMachinePorts("u/1", "tcp", 10, 20), - expectErr: `cannot close 10-20/tcp \(opened by "u/1"\) from "u/0"`, - }} - for i, test := range tests { - c.Logf("test %d: %s", i, test.about) - - test = test.withDefaults("tcp", 10, 20) - err := runner.TryClosePorts( - test.proto, - test.ports[0], - test.ports[1], - names.NewUnitTag("u/0"), - test.machinePorts, - test.pendingPorts, - ) - if test.expectErr != "" { - c.Check(err, gc.ErrorMatches, test.expectErr) - } else { - c.Check(err, jc.ErrorIsNil) - c.Check(test.pendingPorts, jc.DeepEquals, test.expectPending) - } - } -} === removed file 'src/github.com/juju/juju/worker/uniter/runner/relation.go' --- src/github.com/juju/juju/worker/uniter/runner/relation.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/relation.go 1970-01-01 00:00:00 +0000 @@ -1,80 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package runner - -import ( - "fmt" - - "github.com/juju/juju/api/uniter" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/worker/uniter/runner/jujuc" -) - -type RelationInfo struct { - RelationUnit *uniter.RelationUnit - MemberNames []string -} - -// ContextRelation is the implementation of jujuc.ContextRelation. -type ContextRelation struct { - ru *uniter.RelationUnit - relationId int - endpointName string - - // settings allows read and write access to the relation unit settings. - settings *uniter.Settings - - // cache holds remote unit membership and settings. - cache *RelationCache -} - -// NewContextRelation creates a new context for the given relation unit. -// The unit-name keys of members supplies the initial membership. -func NewContextRelation(ru *uniter.RelationUnit, cache *RelationCache) *ContextRelation { - return &ContextRelation{ - ru: ru, - relationId: ru.Relation().Id(), - endpointName: ru.Endpoint().Name, - cache: cache, - } -} - -func (ctx *ContextRelation) Id() int { - return ctx.relationId -} - -func (ctx *ContextRelation) Name() string { - return ctx.endpointName -} - -func (ctx *ContextRelation) FakeId() string { - return fmt.Sprintf("%s:%d", ctx.endpointName, ctx.relationId) -} - -func (ctx *ContextRelation) UnitNames() []string { - return ctx.cache.MemberNames() -} - -func (ctx *ContextRelation) ReadSettings(unit string) (settings params.Settings, err error) { - return ctx.cache.Settings(unit) -} - -func (ctx *ContextRelation) Settings() (jujuc.Settings, error) { - if ctx.settings == nil { - node, err := ctx.ru.Settings() - if err != nil { - return nil, err - } - ctx.settings = node - } - return ctx.settings, nil -} - -// WriteSettings persists all changes made to the unit's relation settings. -func (ctx *ContextRelation) WriteSettings() (err error) { - if ctx.settings != nil { - err = ctx.settings.Write() - } - return -} === removed file 'src/github.com/juju/juju/worker/uniter/runner/relation_test.go' --- src/github.com/juju/juju/worker/uniter/runner/relation_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/relation_test.go 1970-01-01 00:00:00 +0000 @@ -1,178 +0,0 @@ -// Copyright 2012-2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package runner_test - -import ( - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/api" - apiuniter "github.com/juju/juju/api/uniter" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/juju/testing" - "github.com/juju/juju/state" - "github.com/juju/juju/worker/uniter/runner" -) - -type ContextRelationSuite struct { - testing.JujuConnSuite - svc *state.Service - rel *state.Relation - ru *state.RelationUnit - - st api.Connection - uniter *apiuniter.State - apiRelUnit *apiuniter.RelationUnit -} - -var _ = gc.Suite(&ContextRelationSuite{}) - -func (s *ContextRelationSuite) SetUpTest(c *gc.C) { - s.JujuConnSuite.SetUpTest(c) - machine, err := s.State.AddMachine("quantal", state.JobHostUnits) - c.Assert(err, jc.ErrorIsNil) - password, err := utils.RandomPassword() - c.Assert(err, jc.ErrorIsNil) - err = machine.SetPassword(password) - c.Assert(err, jc.ErrorIsNil) - err = machine.SetProvisioned("foo", "fake_nonce", nil) - c.Assert(err, jc.ErrorIsNil) - - ch := s.AddTestingCharm(c, "riak") - s.svc = s.AddTestingService(c, "u", ch) - rels, err := s.svc.Relations() - c.Assert(err, jc.ErrorIsNil) - c.Assert(rels, gc.HasLen, 1) - s.rel = rels[0] - unit, err := s.svc.AddUnit() - c.Assert(err, jc.ErrorIsNil) - err = unit.AssignToMachine(machine) - s.ru, err = s.rel.Unit(unit) - c.Assert(err, jc.ErrorIsNil) - err = s.ru.EnterScope(nil) - c.Assert(err, jc.ErrorIsNil) - - password, err = utils.RandomPassword() - c.Assert(err, jc.ErrorIsNil) - err = unit.SetPassword(password) - c.Assert(err, jc.ErrorIsNil) - s.st = s.OpenAPIAs(c, unit.Tag(), password) - s.uniter, err = s.st.Uniter() - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.uniter, gc.NotNil) - - apiRel, err := s.uniter.Relation(s.rel.Tag().(names.RelationTag)) - c.Assert(err, jc.ErrorIsNil) - apiUnit, err := s.uniter.Unit(unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - s.apiRelUnit, err = apiRel.Unit(apiUnit) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *ContextRelationSuite) TestMemberCaching(c *gc.C) { - unit, err := s.svc.AddUnit() - c.Assert(err, jc.ErrorIsNil) - ru, err := s.rel.Unit(unit) - c.Assert(err, jc.ErrorIsNil) - err = ru.EnterScope(map[string]interface{}{"blib": "blob"}) - c.Assert(err, jc.ErrorIsNil) - settings, err := ru.Settings() - c.Assert(err, jc.ErrorIsNil) - settings.Set("ping", "pong") - _, err = settings.Write() - c.Assert(err, jc.ErrorIsNil) - - cache := runner.NewRelationCache(s.apiRelUnit.ReadSettings, []string{"u/1"}) - ctx := runner.NewContextRelation(s.apiRelUnit, cache) - - // Check that uncached settings are read from state. - m, err := ctx.ReadSettings("u/1") - c.Assert(err, jc.ErrorIsNil) - expectMap := settings.Map() - expectSettings := convertMap(expectMap) - c.Assert(m, gc.DeepEquals, expectSettings) - - // Check that changes to state do not affect the cached settings. - settings.Set("ping", "pow") - _, err = settings.Write() - c.Assert(err, jc.ErrorIsNil) - m, err = ctx.ReadSettings("u/1") - c.Assert(err, jc.ErrorIsNil) - c.Assert(m, gc.DeepEquals, expectSettings) -} - -func (s *ContextRelationSuite) TestNonMemberCaching(c *gc.C) { - unit, err := s.svc.AddUnit() - c.Assert(err, jc.ErrorIsNil) - ru, err := s.rel.Unit(unit) - c.Assert(err, jc.ErrorIsNil) - err = ru.EnterScope(map[string]interface{}{"blib": "blob"}) - c.Assert(err, jc.ErrorIsNil) - settings, err := ru.Settings() - c.Assert(err, jc.ErrorIsNil) - settings.Set("ping", "pong") - _, err = settings.Write() - c.Assert(err, jc.ErrorIsNil) - - cache := runner.NewRelationCache(s.apiRelUnit.ReadSettings, nil) - ctx := runner.NewContextRelation(s.apiRelUnit, cache) - - // Check that settings are read from state. - m, err := ctx.ReadSettings("u/1") - c.Assert(err, jc.ErrorIsNil) - expectMap := settings.Map() - expectSettings := convertMap(expectMap) - c.Assert(m, gc.DeepEquals, expectSettings) - - // Check that changes to state do not affect the obtained settings. - settings.Set("ping", "pow") - _, err = settings.Write() - c.Assert(err, jc.ErrorIsNil) - m, err = ctx.ReadSettings("u/1") - c.Assert(err, jc.ErrorIsNil) - c.Assert(m, gc.DeepEquals, expectSettings) -} - -func (s *ContextRelationSuite) TestLocalSettings(c *gc.C) { - ctx := runner.NewContextRelation(s.apiRelUnit, nil) - - // Change Settings... - node, err := ctx.Settings() - c.Assert(err, jc.ErrorIsNil) - expectSettings := node.Map() - expectOldMap := convertSettings(expectSettings) - node.Set("change", "exciting") - - // ...and check it's not written to state. - settings, err := s.ru.ReadSettings("u/0") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, expectOldMap) - - // Write settings... - err = ctx.WriteSettings() - c.Assert(err, jc.ErrorIsNil) - - // ...and check it was written to state. - settings, err = s.ru.ReadSettings("u/0") - c.Assert(err, jc.ErrorIsNil) - c.Assert(settings, gc.DeepEquals, map[string]interface{}{"change": "exciting"}) -} - -func convertSettings(settings params.Settings) map[string]interface{} { - result := make(map[string]interface{}) - for k, v := range settings { - result[k] = v - } - return result -} - -func convertMap(settingsMap map[string]interface{}) params.Settings { - result := make(params.Settings) - for k, v := range settingsMap { - result[k] = v.(string) - } - return result -} === modified file 'src/github.com/juju/juju/worker/uniter/runner/runner.go' --- src/github.com/juju/juju/worker/uniter/runner/runner.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/runner.go 2016-03-22 15:18:22 +0000 @@ -14,11 +14,14 @@ "github.com/juju/loggo" utilexec "github.com/juju/utils/exec" - "github.com/juju/juju/version" + "github.com/juju/juju/worker/uniter/runner/context" "github.com/juju/juju/worker/uniter/runner/debug" "github.com/juju/juju/worker/uniter/runner/jujuc" + jujuos "github.com/juju/utils/os" ) +var logger = loggo.GetLogger("juju.worker.uniter.runner") + // Runner is responsible for invoking commands in a context. type Runner interface { @@ -39,9 +42,9 @@ type Context interface { jujuc.Context Id() string - HookVars(paths Paths) []string - ActionData() (*ActionData, error) - SetProcess(process *os.Process) + HookVars(paths context.Paths) ([]string, error) + ActionData() (*context.ActionData, error) + SetProcess(process context.HookProcess) HasExecutionSetUnitStatus() bool ResetExecutionSetUnitStatus() @@ -49,40 +52,15 @@ Flush(badge string, failure error) error } -// Paths exposes the paths needed by Runner. -type Paths interface { - - // GetToolsDir returns the filesystem path to the dirctory containing - // the hook tool symlinks. - GetToolsDir() string - - // GetCharmDir returns the filesystem path to the directory in which - // the charm is installed. - GetCharmDir() string - - // GetJujucSocket returns the path to the socket used by the hook tools - // to communicate back to the executing uniter process. It might be a - // filesystem path, or it might be abstract. - GetJujucSocket() string - - // GetMetricsSpoolDir returns the path to a metrics spool dir, used - // to store metrics recorded during a single hook run. - GetMetricsSpoolDir() string - - // ComponentDir returns the filesystem path to the directory - // containing all data files for a component. - ComponentDir(name string) string -} - // NewRunner returns a Runner backed by the supplied context and paths. -func NewRunner(context Context, paths Paths) Runner { +func NewRunner(context Context, paths context.Paths) Runner { return &runner{context, paths} } // runner implements Runner. type runner struct { context Context - paths Paths + paths context.Paths } func (runner *runner) Context() Context { @@ -97,7 +75,10 @@ } defer srv.Close() - env := runner.context.HookVars(runner.paths) + env, err := runner.context.HookVars(runner.paths) + if err != nil { + return nil, errors.Trace(err) + } command := utilexec.RunParams{ Commands: commands, WorkingDir: runner.paths.GetCharmDir(), @@ -108,7 +89,7 @@ if err != nil { return nil, err } - runner.context.SetProcess(command.Process()) + runner.context.SetProcess(hookProcess{command.Process()}) // Block and wait for process to finish result, err := command.Wait() @@ -135,8 +116,11 @@ } defer srv.Close() - env := runner.context.HookVars(runner.paths) - if version.Current.OS == version.Windows { + env, err := runner.context.HookVars(runner.paths) + if err != nil { + return errors.Trace(err) + } + if jujuos.HostOS() == jujuos.Windows { // TODO(fwereade): somehow consolidate with utils/exec? // We don't do this on the other code path, which uses exec.RunCommands, // because that already has handling for windows environment requirements. @@ -157,10 +141,6 @@ charmDir := runner.paths.GetCharmDir() hook, err := searchHook(charmDir, filepath.Join(charmLocation, hookName)) if err != nil { - if IsMissingHookError(err) { - // Missing hook is perfectly valid, but worth mentioning. - logger.Infof("skipped %q hook (not implemented)", hookName) - } return err } hookCmd := hookCommand(hook) @@ -183,7 +163,7 @@ outWriter.Close() if err == nil { // Record the *os.Process of the hook - runner.context.SetProcess(ps.Process) + runner.context.SetProcess(hookProcess{ps.Process}) // Block until execution finishes err = ps.Wait() } @@ -210,3 +190,11 @@ func (runner *runner) getLogger(hookName string) loggo.Logger { return loggo.GetLogger(fmt.Sprintf("unit.%s.%s", runner.context.UnitName(), hookName)) } + +type hookProcess struct { + *os.Process +} + +func (p hookProcess) Pid() int { + return p.Process.Pid +} === modified file 'src/github.com/juju/juju/worker/uniter/runner/runner_test.go' --- src/github.com/juju/juju/worker/uniter/runner/runner_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/runner_test.go 2016-03-22 15:18:22 +0000 @@ -6,7 +6,6 @@ import ( "fmt" "io/ioutil" - "os" "path/filepath" "runtime" "strings" @@ -15,23 +14,23 @@ "github.com/juju/errors" envtesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" - "github.com/juju/utils" + "github.com/juju/utils/proxy" gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable/hooks" + "github.com/juju/juju/worker/uniter/hook" "github.com/juju/juju/worker/uniter/runner" + "github.com/juju/juju/worker/uniter/runner/context" + runnertesting "github.com/juju/juju/worker/uniter/runner/testing" ) type RunCommandSuite struct { - HookContextSuite + ContextSuite } var _ = gc.Suite(&RunCommandSuite{}) -func (s *RunCommandSuite) getHookContext(c *gc.C) *runner.HookContext { - uuid, err := utils.NewUUID() - c.Assert(err, jc.ErrorIsNil) - return s.HookContextSuite.getHookContext(c, uuid.String(), -1, "", noProxies) -} +var noProxies = proxy.Settings{} func (s *RunCommandSuite) TestRunCommandsEnvStdOutAndErrAndRC(c *gc.C) { // TODO(bogdanteleaga): powershell throws another exit status code when @@ -40,8 +39,9 @@ if runtime.GOOS == "windows" { c.Skip("bug 1403084: Have to figure out a good way to output to stderr from powershell") } - ctx := s.getHookContext(c) - paths := NewRealPaths(c) + ctx, err := s.contextFactory.HookContext(hook.Info{Kind: hooks.ConfigChanged}) + c.Assert(err, jc.ErrorIsNil) + paths := runnertesting.NewRealPaths(c) runner := runner.NewRunner(ctx, paths) commands := ` @@ -53,13 +53,13 @@ c.Assert(err, jc.ErrorIsNil) c.Assert(result.Code, gc.Equals, 42) - c.Assert(strings.TrimRight(string(result.Stdout), "\r\n"), gc.Equals, paths.charm) + c.Assert(strings.TrimRight(string(result.Stdout), "\r\n"), gc.Equals, paths.GetCharmDir()) c.Assert(strings.TrimRight(string(result.Stderr), "\r\n"), gc.Equals, "this is standard err") c.Assert(ctx.GetProcess(), gc.NotNil) } type RunHookSuite struct { - HookContextSuite + ContextSuite } var _ = gc.Suite(&RunHookSuite{}) @@ -113,12 +113,12 @@ } func (s *RunHookSuite) TestRunHook(c *gc.C) { - uuid, err := utils.NewUUID() - c.Assert(err, jc.ErrorIsNil) for i, t := range runHookTests { c.Logf("\ntest %d: %s; perm %v", i, t.summary, t.spec.perm) - ctx := s.getHookContext(c, uuid.String(), t.relid, t.remote, noProxies) - paths := NewRealPaths(c) + ctx, err := s.contextFactory.HookContext(hook.Info{Kind: hooks.ConfigChanged}) + c.Assert(err, jc.ErrorIsNil) + + paths := runnertesting.NewRealPaths(c) rnr := runner.NewRunner(ctx, paths) var hookExists bool if t.spec.perm != 0 { @@ -126,15 +126,15 @@ spec.dir = "hooks" spec.name = hookName c.Logf("makeCharm %#v", spec) - makeCharm(c, spec, paths.charm) + makeCharm(c, spec, paths.GetCharmDir()) hookExists = true } t0 := time.Now() - err := rnr.RunHook("something-happened") + err = rnr.RunHook("something-happened") if t.err == "" && hookExists { c.Assert(err, jc.ErrorIsNil) } else if !hookExists { - c.Assert(runner.IsMissingHookError(err), jc.IsTrue) + c.Assert(context.IsMissingHookError(err), jc.IsTrue) } else { c.Assert(err, gc.ErrorMatches, t.err) } @@ -146,7 +146,7 @@ type MockContext struct { runner.Context - actionData *runner.ActionData + actionData *context.ActionData expectPid int flushBadge string flushFailure error @@ -157,19 +157,19 @@ return "some-unit/999" } -func (ctx *MockContext) HookVars(paths runner.Paths) []string { - return []string{"VAR=value"} +func (ctx *MockContext) HookVars(paths context.Paths) ([]string, error) { + return []string{"VAR=value"}, nil } -func (ctx *MockContext) ActionData() (*runner.ActionData, error) { +func (ctx *MockContext) ActionData() (*context.ActionData, error) { if ctx.actionData == nil { return nil, errors.New("blam") } return ctx.actionData, nil } -func (ctx *MockContext) SetProcess(process *os.Process) { - ctx.expectPid = process.Pid +func (ctx *MockContext) SetProcess(process context.HookProcess) { + ctx.expectPid = process.Pid() } func (ctx *MockContext) Prepare() error { @@ -184,18 +184,18 @@ type RunMockContextSuite struct { envtesting.IsolationSuite - paths RealPaths + paths runnertesting.RealPaths } var _ = gc.Suite(&RunMockContextSuite{}) func (s *RunMockContextSuite) SetUpTest(c *gc.C) { s.IsolationSuite.SetUpTest(c) - s.paths = NewRealPaths(c) + s.paths = runnertesting.NewRealPaths(c) } func (s *RunMockContextSuite) assertRecordedPid(c *gc.C, expectPid int) { - path := filepath.Join(s.paths.charm, "pid") + path := filepath.Join(s.paths.GetCharmDir(), "pid") content, err := ioutil.ReadFile(path) c.Assert(err, jc.ErrorIsNil) expectContent := fmt.Sprintf("%d", expectPid) @@ -211,7 +211,7 @@ dir: "hooks", name: hookName, perm: 0700, - }, s.paths.charm) + }, s.paths.GetCharmDir()) actualErr := runner.NewRunner(ctx, s.paths).RunHook("something-happened") c.Assert(actualErr, gc.Equals, expectErr) c.Assert(ctx.flushBadge, gc.Equals, "something-happened") @@ -229,7 +229,7 @@ name: hookName, perm: 0700, code: 123, - }, s.paths.charm) + }, s.paths.GetCharmDir()) actualErr := runner.NewRunner(ctx, s.paths).RunHook("something-happened") c.Assert(actualErr, gc.Equals, expectErr) c.Assert(ctx.flushBadge, gc.Equals, "something-happened") @@ -241,13 +241,13 @@ expectErr := errors.New("pew pew pew") ctx := &MockContext{ flushResult: expectErr, - actionData: &runner.ActionData{}, + actionData: &context.ActionData{}, } makeCharm(c, hookSpec{ dir: "actions", name: hookName, perm: 0700, - }, s.paths.charm) + }, s.paths.GetCharmDir()) actualErr := runner.NewRunner(ctx, s.paths).RunAction("something-happened") c.Assert(actualErr, gc.Equals, expectErr) c.Assert(ctx.flushBadge, gc.Equals, "something-happened") @@ -259,14 +259,14 @@ expectErr := errors.New("pew pew pew") ctx := &MockContext{ flushResult: expectErr, - actionData: &runner.ActionData{}, + actionData: &context.ActionData{}, } makeCharm(c, hookSpec{ dir: "actions", name: hookName, perm: 0700, code: 123, - }, s.paths.charm) + }, s.paths.GetCharmDir()) actualErr := runner.NewRunner(ctx, s.paths).RunAction("something-happened") c.Assert(actualErr, gc.Equals, expectErr) c.Assert(ctx.flushBadge, gc.Equals, "something-happened") === added directory 'src/github.com/juju/juju/worker/uniter/runner/testing' === added file 'src/github.com/juju/juju/worker/uniter/runner/testing/utils.go' --- src/github.com/juju/juju/worker/uniter/runner/testing/utils.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/testing/utils.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,126 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package testing + +import ( + "path/filepath" + "runtime" + + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/utils/set" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/core/leadership" + "github.com/juju/juju/storage" + "github.com/juju/juju/worker/uniter/runner/jujuc" +) + +type fops interface { + // MkDir provides the functionality of gc.C.MkDir(). + MkDir() string +} + +// RealPaths implements Paths for tests that do touch the filesystem. +type RealPaths struct { + tools string + charm string + socket string + metricsspool string + componentDirs map[string]string + fops fops +} + +func osDependentSockPath(c *gc.C) string { + sockPath := filepath.Join(c.MkDir(), "test.sock") + if runtime.GOOS == "windows" { + return `\\.\pipe` + sockPath[2:] + } + return sockPath +} + +func NewRealPaths(c *gc.C) RealPaths { + return RealPaths{ + tools: c.MkDir(), + charm: c.MkDir(), + socket: osDependentSockPath(c), + metricsspool: c.MkDir(), + componentDirs: make(map[string]string), + fops: c, + } +} + +func (p RealPaths) GetMetricsSpoolDir() string { + return p.metricsspool +} + +func (p RealPaths) GetToolsDir() string { + return p.tools +} + +func (p RealPaths) GetCharmDir() string { + return p.charm +} + +func (p RealPaths) GetJujucSocket() string { + return p.socket +} + +func (p RealPaths) ComponentDir(name string) string { + if dirname, ok := p.componentDirs[name]; ok { + return dirname + } + p.componentDirs[name] = filepath.Join(p.fops.MkDir(), name) + return p.componentDirs[name] +} + +type StorageContextAccessor struct { + CStorage map[names.StorageTag]*ContextStorage +} + +func (s *StorageContextAccessor) StorageTags() ([]names.StorageTag, error) { + tags := set.NewTags() + for tag := range s.CStorage { + tags.Add(tag) + } + storageTags := make([]names.StorageTag, len(tags)) + for i, tag := range tags.SortedValues() { + storageTags[i] = tag.(names.StorageTag) + } + return storageTags, nil +} + +func (s *StorageContextAccessor) Storage(tag names.StorageTag) (jujuc.ContextStorageAttachment, error) { + storage, ok := s.CStorage[tag] + if !ok { + return nil, errors.NotFoundf("storage") + } + return storage, nil +} + +type ContextStorage struct { + CTag names.StorageTag + CKind storage.StorageKind + CLocation string +} + +func (c *ContextStorage) Tag() names.StorageTag { + return c.CTag +} + +func (c *ContextStorage) Kind() storage.StorageKind { + return c.CKind +} + +func (c *ContextStorage) Location() string { + return c.CLocation +} + +type FakeTracker struct { + leadership.Tracker +} + +func (FakeTracker) ServiceName() string { + return "service-name" +} === removed file 'src/github.com/juju/juju/worker/uniter/runner/unitStorage_test.go' --- src/github.com/juju/juju/worker/uniter/runner/unitStorage_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/unitStorage_test.go 1970-01-01 00:00:00 +0000 @@ -1,238 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package runner_test - -import ( - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - "github.com/juju/utils/set" - gc "gopkg.in/check.v1" - - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/provider/ec2" - "github.com/juju/juju/state" - "github.com/juju/juju/storage/poolmanager" - "github.com/juju/juju/storage/provider" - "github.com/juju/juju/storage/provider/registry" - "github.com/juju/juju/worker/uniter/runner" -) - -type unitStorageSuite struct { - HookContextSuite - expectedStorageNames set.Strings - charmName string - initCons map[string]state.StorageConstraints - ch *state.Charm - initialStorageInstancesCount int -} - -var _ = gc.Suite(&unitStorageSuite{}) - -const ( - testPool = "block" - testPersistentPool = "block-persistent" -) - -func (s *unitStorageSuite) SetUpTest(c *gc.C) { - s.HookContextSuite.SetUpTest(c) - setupTestStorageSupport(c, s.State) -} - -func (s *unitStorageSuite) TestAddUnitStorage(c *gc.C) { - s.createStorageBlockUnit(c) - count := uint64(1) - s.assertUnitStorageAdded(c, - map[string]params.StorageConstraints{ - "allecto": params.StorageConstraints{Count: &count}}) -} - -func (s *unitStorageSuite) TestAddUnitStorageIgnoresBlocks(c *gc.C) { - s.createStorageBlockUnit(c) - count := uint64(1) - s.BlockDestroyEnvironment(c, "TestAddUnitStorageIgnoresBlocks") - s.BlockRemoveObject(c, "TestAddUnitStorageIgnoresBlocks") - s.BlockAllChanges(c, "TestAddUnitStorageIgnoresBlocks") - s.assertUnitStorageAdded(c, - map[string]params.StorageConstraints{ - "allecto": params.StorageConstraints{Count: &count}}) -} - -func (s *unitStorageSuite) TestAddUnitStorageZeroCount(c *gc.C) { - s.createStorageBlockUnit(c) - cons := map[string]params.StorageConstraints{ - "allecto": params.StorageConstraints{}} - - ctx := s.addUnitStorage(c, cons) - - // Flush the context with a success. - err := ctx.Flush("success", nil) - c.Assert(err, gc.ErrorMatches, `.*count must be specified.*`) - - // Make sure no storage instances was added - after, err := s.State.AllStorageInstances() - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(after)-s.initialStorageInstancesCount, gc.Equals, 0) - s.assertExistingStorage(c, after) -} - -func (s *unitStorageSuite) TestAddUnitStorageWithSize(c *gc.C) { - s.createStorageBlockUnit(c) - size := uint64(1) - cons := map[string]params.StorageConstraints{ - "allecto": params.StorageConstraints{Size: &size}} - - ctx := s.addUnitStorage(c, cons) - - // Flush the context with a success. - err := ctx.Flush("success", nil) - c.Assert(err, gc.ErrorMatches, `.*only count can be specified.*`) - - // Make sure no storage instances was added - after, err := s.State.AllStorageInstances() - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(after)-s.initialStorageInstancesCount, gc.Equals, 0) - s.assertExistingStorage(c, after) -} - -func (s *unitStorageSuite) TestAddUnitStorageWithPool(c *gc.C) { - s.createStorageBlockUnit(c) - cons := map[string]params.StorageConstraints{ - "allecto": params.StorageConstraints{Pool: "loop"}} - - ctx := s.addUnitStorage(c, cons) - - // Flush the context with a success. - err := ctx.Flush("success", nil) - c.Assert(err, gc.ErrorMatches, `.*only count can be specified.*`) - - // Make sure no storage instances was added - after, err := s.State.AllStorageInstances() - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(after)-s.initialStorageInstancesCount, gc.Equals, 0) - s.assertExistingStorage(c, after) -} - -func (s *unitStorageSuite) TestAddUnitStorageAccumulated(c *gc.C) { - s.createStorageBlock2Unit(c) - count := uint64(1) - s.assertUnitStorageAdded(c, - map[string]params.StorageConstraints{ - "multi2up": params.StorageConstraints{Count: &count}}, - map[string]params.StorageConstraints{ - "multi1to10": params.StorageConstraints{Count: &count}}) -} - -func (s *unitStorageSuite) TestAddUnitStorageAccumulatedSame(c *gc.C) { - s.createStorageBlock2Unit(c) - count := uint64(1) - s.assertUnitStorageAdded(c, - map[string]params.StorageConstraints{ - "multi2up": params.StorageConstraints{Count: &count}}, - map[string]params.StorageConstraints{ - "multi2up": params.StorageConstraints{Count: &count}}) -} - -func setupTestStorageSupport(c *gc.C, s *state.State) { - stsetts := state.NewStateSettings(s) - poolManager := poolmanager.New(stsetts) - _, err := poolManager.Create(testPool, provider.LoopProviderType, map[string]interface{}{"it": "works"}) - c.Assert(err, jc.ErrorIsNil) - _, err = poolManager.Create(testPersistentPool, ec2.EBS_ProviderType, map[string]interface{}{"persistent": true}) - c.Assert(err, jc.ErrorIsNil) - - registry.RegisterEnvironStorageProviders("dummy", ec2.EBS_ProviderType) - registry.RegisterEnvironStorageProviders("dummyenv", ec2.EBS_ProviderType) -} - -func (s *unitStorageSuite) createStorageEnabledUnit(c *gc.C) { - s.ch = s.AddTestingCharm(c, s.charmName) - s.service = s.AddTestingServiceWithStorage(c, s.charmName, s.ch, s.initCons) - s.unit = s.AddUnit(c, s.service) - - s.assertStorageCreated(c) - s.createHookSupport(c) -} - -func (s *unitStorageSuite) createStorageBlockUnit(c *gc.C) { - s.charmName = "storage-block" - s.initCons = map[string]state.StorageConstraints{ - "data": makeStorageCons("block", 1024, 1), - } - s.createStorageEnabledUnit(c) - s.assertStorageCreated(c) - s.createHookSupport(c) -} - -func (s *unitStorageSuite) createStorageBlock2Unit(c *gc.C) { - s.charmName = "storage-block2" - s.initCons = map[string]state.StorageConstraints{ - "multi1to10": makeStorageCons("loop", 0, 3), - } - s.createStorageEnabledUnit(c) - s.assertStorageCreated(c) - s.createHookSupport(c) -} - -func (s *unitStorageSuite) assertStorageCreated(c *gc.C) { - all, err := s.State.AllStorageInstances() - c.Assert(err, jc.ErrorIsNil) - s.initialStorageInstancesCount = len(all) - s.expectedStorageNames = set.NewStrings() - for _, one := range all { - s.expectedStorageNames.Add(one.StorageName()) - } -} - -func (s *unitStorageSuite) createHookSupport(c *gc.C) { - password, err := utils.RandomPassword() - err = s.unit.SetPassword(password) - c.Assert(err, jc.ErrorIsNil) - s.st = s.OpenAPIAs(c, s.unit.Tag(), password) - s.uniter, err = s.st.Uniter() - c.Assert(err, jc.ErrorIsNil) - c.Assert(s.uniter, gc.NotNil) - s.apiUnit, err = s.uniter.Unit(s.unit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) - - err = s.unit.SetCharmURL(s.ch.URL()) - c.Assert(err, jc.ErrorIsNil) -} - -func makeStorageCons(pool string, size, count uint64) state.StorageConstraints { - return state.StorageConstraints{Pool: pool, Size: size, Count: count} -} - -func (s *unitStorageSuite) addUnitStorage(c *gc.C, cons ...map[string]params.StorageConstraints) *runner.HookContext { - // Get the context. - ctx := s.getHookContext(c, s.State.EnvironUUID(), -1, "", noProxies) - c.Assert(ctx.UnitName(), gc.Equals, s.unit.Name()) - - for _, one := range cons { - for storage, _ := range one { - s.expectedStorageNames.Add(storage) - } - ctx.AddUnitStorage(one) - } - return ctx -} - -func (s *unitStorageSuite) assertUnitStorageAdded(c *gc.C, cons ...map[string]params.StorageConstraints) { - ctx := s.addUnitStorage(c, cons...) - - // Flush the context with a success. - err := ctx.Flush("success", nil) - c.Assert(err, jc.ErrorIsNil) - - after, err := s.State.AllStorageInstances() - c.Assert(err, jc.ErrorIsNil) - c.Assert(len(after)-s.initialStorageInstancesCount, gc.Equals, len(cons)) - s.assertExistingStorage(c, after) -} - -func (s *unitStorageSuite) assertExistingStorage(c *gc.C, all []state.StorageInstance) { - for _, one := range all { - c.Assert(s.expectedStorageNames.Contains(one.StorageName()), jc.IsTrue) - } -} === modified file 'src/github.com/juju/juju/worker/uniter/runner/util_test.go' --- src/github.com/juju/juju/worker/uniter/runner/util_test.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/util_test.go 2016-03-22 15:18:22 +0000 @@ -1,4 +1,4 @@ -// Copyright 2012-2014 Canonical Ltd. +// Copyright 2015 Canonical Ltd. // Licensed under the AGPLv3, see LICENCE file for details. package runner_test @@ -12,152 +12,67 @@ "time" "github.com/juju/names" - jujutesting "github.com/juju/testing" jc "github.com/juju/testing/checkers" "github.com/juju/utils" - "github.com/juju/utils/proxy" - "github.com/juju/utils/set" + "github.com/juju/utils/fs" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" "github.com/juju/juju/api" - "github.com/juju/juju/api/block" "github.com/juju/juju/api/uniter" "github.com/juju/juju/instance" "github.com/juju/juju/juju/testing" "github.com/juju/juju/network" "github.com/juju/juju/state" - "github.com/juju/juju/state/multiwatcher" "github.com/juju/juju/storage" + "github.com/juju/juju/testcharms" + coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/runner" - "github.com/juju/juju/worker/uniter/runner/jujuc" + "github.com/juju/juju/worker/uniter/runner/context" + runnertesting "github.com/juju/juju/worker/uniter/runner/testing" ) -var noProxies = proxy.Settings{} var apiAddrs = []string{"a1:123", "a2:123"} -var expectedApiAddrs = strings.Join(apiAddrs, " ") - -// MockEnvPaths implements Paths for tests that don't need to actually touch -// the filesystem. -type MockEnvPaths struct{} - -func (MockEnvPaths) GetToolsDir() string { - return "path-to-tools" -} - -func (MockEnvPaths) GetCharmDir() string { - return "path-to-charm" -} - -func (MockEnvPaths) GetJujucSocket() string { - return "path-to-jujuc.socket" -} - -func (MockEnvPaths) GetMetricsSpoolDir() string { - return "path-to-metrics-spool-dir" -} - -func (MockEnvPaths) ComponentDir(name string) string { - return filepath.Join("path-to-base-dir", name) -} - -type fops interface { - // MkDir provides the functionality of gc.C.MkDir(). - MkDir() string -} - -// RealPaths implements Paths for tests that do touch the filesystem. -type RealPaths struct { - tools string - charm string - socket string - metricsspool string - componentDirs map[string]string - fops fops -} - -func osDependentSockPath(c *gc.C) string { - sockPath := filepath.Join(c.MkDir(), "test.sock") - if runtime.GOOS == "windows" { - return `\\.\pipe` + sockPath[2:] - } - return sockPath -} - -func NewRealPaths(c *gc.C) RealPaths { - return RealPaths{ - tools: c.MkDir(), - charm: c.MkDir(), - socket: osDependentSockPath(c), - metricsspool: c.MkDir(), - componentDirs: make(map[string]string), - fops: c, - } -} - -func (p RealPaths) GetMetricsSpoolDir() string { - return p.metricsspool -} - -func (p RealPaths) GetToolsDir() string { - return p.tools -} - -func (p RealPaths) GetCharmDir() string { - return p.charm -} - -func (p RealPaths) GetJujucSocket() string { - return p.socket -} - -func (p RealPaths) ComponentDir(name string) string { - if dirname, ok := p.componentDirs[name]; ok { - return dirname - } - p.componentDirs[name] = filepath.Join(p.fops.MkDir(), name) - return p.componentDirs[name] -} - -// HookContextSuite contains shared setup for various other test suites. Test -// methods should not be added to this type, because they'll get run repeatedly. -type HookContextSuite struct { + +type ContextSuite struct { testing.JujuConnSuite - service *state.Service - unit *state.Unit - machine *state.Machine - relch *state.Charm - relunits map[int]*state.RelationUnit - storage *storageContextAccessor - - st api.Connection - uniter *uniter.State - apiUnit *uniter.Unit - meteredApiUnit *uniter.Unit - meteredCharm *state.Charm - apiRelunits map[int]*uniter.RelationUnit - BlockHelper + + paths runnertesting.RealPaths + factory runner.Factory + contextFactory context.ContextFactory + membership map[int][]string + + st api.Connection + service *state.Service + machine *state.Machine + unit *state.Unit + uniter *uniter.State + apiUnit *uniter.Unit + storage *runnertesting.StorageContextAccessor + + apiRelunits map[int]*uniter.RelationUnit + relch *state.Charm + relunits map[int]*state.RelationUnit } -func (s *HookContextSuite) SetUpTest(c *gc.C) { - var err error +func (s *ContextSuite) SetUpTest(c *gc.C) { s.JujuConnSuite.SetUpTest(c) - s.BlockHelper = NewBlockHelper(s.APIState) - c.Assert(s.BlockHelper, gc.NotNil) - s.AddCleanup(func(*gc.C) { s.BlockHelper.Close() }) - // reset s.machine = nil - sch := s.AddTestingCharm(c, "wordpress") - s.service = s.AddTestingService(c, "u", sch) + ch := s.AddTestingCharm(c, "wordpress") + s.service = s.AddTestingService(c, "u", ch) s.unit = s.AddUnit(c, s.service) - s.meteredCharm = s.AddTestingCharm(c, "metered") - meteredService := s.AddTestingService(c, "m", s.meteredCharm) - meteredUnit := s.addUnit(c, meteredService) - err = meteredUnit.SetCharmURL(s.meteredCharm.URL()) - c.Assert(err, jc.ErrorIsNil) + storageData0 := names.NewStorageTag("data/0") + s.storage = &runnertesting.StorageContextAccessor{ + map[names.StorageTag]*runnertesting.ContextStorage{ + storageData0: &runnertesting.ContextStorage{ + storageData0, + storage.StorageKindBlock, + "/dev/sdb", + }, + }, + } password, err := utils.RandomPassword() err = s.unit.SetPassword(password) @@ -169,17 +84,13 @@ s.apiUnit, err = s.uniter.Unit(s.unit.Tag().(names.UnitTag)) c.Assert(err, jc.ErrorIsNil) - err = meteredUnit.SetPassword(password) - c.Assert(err, jc.ErrorIsNil) - meteredState := s.OpenAPIAs(c, meteredUnit.Tag(), password) - meteredUniter, err := meteredState.Uniter() - s.meteredApiUnit, err = meteredUniter.Unit(meteredUnit.Tag().(names.UnitTag)) - c.Assert(err, jc.ErrorIsNil) + s.paths = runnertesting.NewRealPaths(c) + s.membership = map[int][]string{} // Note: The unit must always have a charm URL set, because this // happens as part of the installation process (that happens // before the initial install hook). - err = s.unit.SetCharmURL(sch.URL()) + err = s.unit.SetCharmURL(ch.URL()) c.Assert(err, jc.ErrorIsNil) s.relch = s.AddTestingCharm(c, "mysql") s.relunits = map[int]*state.RelationUnit{} @@ -187,29 +98,45 @@ s.AddContextRelation(c, "db0") s.AddContextRelation(c, "db1") - storageData0 := names.NewStorageTag("data/0") - s.storage = &storageContextAccessor{ - map[names.StorageTag]*contextStorage{ - storageData0: &contextStorage{ - storageData0, - storage.StorageKindBlock, - "/dev/sdb", - }, - }, - } -} - -func (s *HookContextSuite) GetContext( - c *gc.C, relId int, remoteName string, -) jujuc.Context { - uuid, err := utils.NewUUID() - c.Assert(err, jc.ErrorIsNil) - return s.getHookContext( - c, uuid.String(), relId, remoteName, noProxies, - ) -} - -func (s *HookContextSuite) addUnit(c *gc.C, svc *state.Service) *state.Unit { + s.contextFactory, err = context.NewContextFactory( + s.uniter, + s.unit.Tag().(names.UnitTag), + runnertesting.FakeTracker{}, + s.getRelationInfos, + s.storage, + s.paths, + coretesting.NewClock(time.Time{}), + ) + c.Assert(err, jc.ErrorIsNil) + + factory, err := runner.NewFactory( + s.uniter, + s.paths, + s.contextFactory, + ) + c.Assert(err, jc.ErrorIsNil) + s.factory = factory +} + +func (s *ContextSuite) AddContextRelation(c *gc.C, name string) { + s.AddTestingService(c, name, s.relch) + eps, err := s.State.InferEndpoints("u", name) + c.Assert(err, jc.ErrorIsNil) + rel, err := s.State.AddRelation(eps...) + c.Assert(err, jc.ErrorIsNil) + ru, err := rel.Unit(s.unit) + c.Assert(err, jc.ErrorIsNil) + err = ru.EnterScope(map[string]interface{}{"relation-name": name}) + c.Assert(err, jc.ErrorIsNil) + s.relunits[rel.Id()] = ru + apiRel, err := s.uniter.Relation(rel.Tag().(names.RelationTag)) + c.Assert(err, jc.ErrorIsNil) + apiRelUnit, err := apiRel.Unit(s.apiUnit) + c.Assert(err, jc.ErrorIsNil) + s.apiRelunits[rel.Id()] = apiRelUnit +} + +func (s *ContextSuite) AddUnit(c *gc.C, svc *state.Service) *state.Unit { unit, err := svc.AddUnit() c.Assert(err, jc.ErrorIsNil) if s.machine != nil { @@ -230,178 +157,30 @@ } err = s.machine.SetProvisioned("i-exist", "fake_nonce", &hwc) c.Assert(err, jc.ErrorIsNil) - return unit -} -func (s *HookContextSuite) AddUnit(c *gc.C, svc *state.Service) *state.Unit { - unit := s.addUnit(c, svc) name := strings.Replace(unit.Name(), "/", "-", 1) privateAddr := network.NewScopedAddress(name+".testing.invalid", network.ScopeCloudLocal) - err := s.machine.SetProviderAddresses(privateAddr) + err = s.machine.SetProviderAddresses(privateAddr) c.Assert(err, jc.ErrorIsNil) return unit } -func (s *HookContextSuite) AddContextRelation(c *gc.C, name string) { - s.AddTestingService(c, name, s.relch) - eps, err := s.State.InferEndpoints("u", name) - c.Assert(err, jc.ErrorIsNil) - rel, err := s.State.AddRelation(eps...) - c.Assert(err, jc.ErrorIsNil) - ru, err := rel.Unit(s.unit) - c.Assert(err, jc.ErrorIsNil) - err = ru.EnterScope(map[string]interface{}{"relation-name": name}) - c.Assert(err, jc.ErrorIsNil) - s.relunits[rel.Id()] = ru - apiRel, err := s.uniter.Relation(rel.Tag().(names.RelationTag)) - c.Assert(err, jc.ErrorIsNil) - apiRelUnit, err := apiRel.Unit(s.apiUnit) - c.Assert(err, jc.ErrorIsNil) - s.apiRelunits[rel.Id()] = apiRelUnit -} - -func (s *HookContextSuite) getHookContext(c *gc.C, uuid string, relid int, - remote string, proxies proxy.Settings) *runner.HookContext { - if relid != -1 { - _, found := s.apiRelunits[relid] - c.Assert(found, jc.IsTrue) - } - facade, err := s.st.Uniter() - c.Assert(err, jc.ErrorIsNil) - - relctxs := map[int]*runner.ContextRelation{} - for relId, relUnit := range s.apiRelunits { - cache := runner.NewRelationCache(relUnit.ReadSettings, nil) - relctxs[relId] = runner.NewContextRelation(relUnit, cache) - } - - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - - context, err := runner.NewHookContext(s.apiUnit, facade, "TestCtx", uuid, - env.Name(), relid, remote, relctxs, apiAddrs, - proxies, false, nil, nil, s.machine.Tag().(names.MachineTag), - NewRealPaths(c)) - c.Assert(err, jc.ErrorIsNil) - return context -} - -func (s *HookContextSuite) getMeteredHookContext(c *gc.C, uuid string, relid int, - remote string, proxies proxy.Settings, canAddMetrics bool, metrics *charm.Metrics, paths RealPaths) *runner.HookContext { - if relid != -1 { - _, found := s.apiRelunits[relid] - c.Assert(found, jc.IsTrue) - } - facade, err := s.st.Uniter() - c.Assert(err, jc.ErrorIsNil) - - relctxs := map[int]*runner.ContextRelation{} - for relId, relUnit := range s.apiRelunits { - cache := runner.NewRelationCache(relUnit.ReadSettings, nil) - relctxs[relId] = runner.NewContextRelation(relUnit, cache) - } - - context, err := runner.NewHookContext(s.meteredApiUnit, facade, "TestCtx", uuid, - "test-env-name", relid, remote, relctxs, apiAddrs, - proxies, canAddMetrics, metrics, nil, s.machine.Tag().(names.MachineTag), - paths) - c.Assert(err, jc.ErrorIsNil) - return context -} - -func (s *HookContextSuite) metricsDefinition(name string) *charm.Metrics { - return &charm.Metrics{Metrics: map[string]charm.Metric{name: {Type: charm.MetricTypeGauge, Description: "generated metric"}}} -} - -func (s *HookContextSuite) AssertCoreContext(c *gc.C, ctx runner.Context) { - c.Assert(ctx.UnitName(), gc.Equals, "u/0") - c.Assert(runner.ContextMachineTag(ctx), jc.DeepEquals, names.NewMachineTag("0")) - - var expectAddressSet bool - expect, err := s.unit.PrivateAddress() - if err == nil { - expectAddressSet = true - } else if !network.IsNoAddress(err) { - c.Fatalf("unexpected error: %v", err) - } - actual, actualOK := ctx.PrivateAddress() - c.Assert(actual, gc.Equals, expect.Value) - c.Assert(actualOK, gc.Equals, expectAddressSet) - - expectAddressSet = false - expect, err = s.unit.PublicAddress() - if err == nil { - expectAddressSet = true - } else if !network.IsNoAddress(err) { - c.Fatalf("unexpected error: %v", err) - } - actual, actualOK = ctx.PublicAddress() - c.Assert(actual, gc.Equals, expect.Value) - c.Assert(actualOK, gc.Equals, expectAddressSet) - - env, err := s.State.Environment() - c.Assert(err, jc.ErrorIsNil) - name, uuid := runner.ContextEnvInfo(ctx) - c.Assert(name, gc.Equals, env.Name()) - c.Assert(uuid, gc.Equals, env.UUID()) - - c.Assert(ctx.RelationIds(), gc.HasLen, 2) - - r, found := ctx.Relation(0) - c.Assert(found, jc.IsTrue) - c.Assert(r.Name(), gc.Equals, "db") - c.Assert(r.FakeId(), gc.Equals, "db:0") - - r, found = ctx.Relation(1) - c.Assert(found, jc.IsTrue) - c.Assert(r.Name(), gc.Equals, "db") - c.Assert(r.FakeId(), gc.Equals, "db:1") - - az, exists := ctx.AvailabilityZone() - c.Assert(exists, jc.IsTrue) - c.Assert(az, gc.Equals, "a-zone") -} - -func (s *HookContextSuite) AssertNotActionContext(c *gc.C, ctx runner.Context) { - actionData, err := ctx.ActionData() - c.Assert(actionData, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "not running an action") -} - -func (s *HookContextSuite) AssertActionContext(c *gc.C, ctx runner.Context) { - actionData, err := ctx.ActionData() - c.Assert(actionData, gc.NotNil) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *HookContextSuite) AssertNotStorageContext(c *gc.C, ctx runner.Context) { - storageAttachment, ok := ctx.HookStorage() - c.Assert(storageAttachment, gc.IsNil) - c.Assert(ok, jc.IsFalse) -} - -func (s *HookContextSuite) AssertStorageContext(c *gc.C, ctx runner.Context, id string, attachment storage.StorageAttachmentInfo) { - fromCache, ok := ctx.HookStorage() - c.Assert(ok, jc.IsTrue) - c.Assert(fromCache, gc.NotNil) - c.Assert(fromCache.Tag().Id(), gc.Equals, id) - c.Assert(fromCache.Kind(), gc.Equals, attachment.Kind) - c.Assert(fromCache.Location(), gc.Equals, attachment.Location) -} - -func (s *HookContextSuite) AssertRelationContext(c *gc.C, ctx runner.Context, relId int, remoteUnit string) *runner.ContextRelation { - actualRemoteUnit, _ := ctx.RemoteUnitName() - c.Assert(actualRemoteUnit, gc.Equals, remoteUnit) - rel, found := ctx.HookRelation() - c.Assert(found, jc.IsTrue) - c.Assert(rel.Id(), gc.Equals, relId) - return rel.(*runner.ContextRelation) -} - -func (s *HookContextSuite) AssertNotRelationContext(c *gc.C, ctx runner.Context) { - rel, found := ctx.HookRelation() - c.Assert(rel, gc.IsNil) - c.Assert(found, jc.IsFalse) +func (s *ContextSuite) SetCharm(c *gc.C, name string) { + err := os.RemoveAll(s.paths.GetCharmDir()) + c.Assert(err, jc.ErrorIsNil) + err = fs.Copy(testcharms.Repo.CharmDirPath(name), s.paths.GetCharmDir()) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *ContextSuite) getRelationInfos() map[int]*context.RelationInfo { + info := map[int]*context.RelationInfo{} + for relId, relUnit := range s.apiRelunits { + info[relId] = &context.RelationInfo{ + RelationUnit: relUnit, + MemberNames: s.membership[relId], + } + } + return info } // hookSpec supports makeCharm. @@ -466,106 +245,3 @@ } printf("exit %d", spec.code) } - -type storageContextAccessor struct { - storage map[names.StorageTag]*contextStorage -} - -func (s *storageContextAccessor) StorageTags() []names.StorageTag { - tags := set.NewTags() - for tag := range s.storage { - tags.Add(tag) - } - storageTags := make([]names.StorageTag, len(tags)) - for i, tag := range tags.SortedValues() { - storageTags[i] = tag.(names.StorageTag) - } - return storageTags -} - -func (s *storageContextAccessor) Storage(tag names.StorageTag) (jujuc.ContextStorageAttachment, bool) { - storage, ok := s.storage[tag] - return storage, ok -} - -type contextStorage struct { - tag names.StorageTag - kind storage.StorageKind - location string -} - -func (c *contextStorage) Tag() names.StorageTag { - return c.tag -} - -func (c *contextStorage) Kind() storage.StorageKind { - return c.kind -} - -func (c *contextStorage) Location() string { - return c.location -} - -type BlockHelper struct { - blockClient *block.Client -} - -// NewBlockHelper creates a block switch used in testing -// to manage desired juju blocks. -func NewBlockHelper(st api.Connection) BlockHelper { - return BlockHelper{ - blockClient: block.NewClient(st), - } -} - -// on switches on desired block and -// asserts that no errors were encountered. -func (s *BlockHelper) on(c *gc.C, blockType multiwatcher.BlockType, msg string) { - c.Assert(s.blockClient.SwitchBlockOn(string(blockType), msg), gc.IsNil) -} - -// BlockAllChanges switches changes block on. -// This prevents all changes to juju environment. -func (s *BlockHelper) BlockAllChanges(c *gc.C, msg string) { - s.on(c, multiwatcher.BlockChange, msg) -} - -// BlockRemoveObject switches remove block on. -// This prevents any object/entity removal on juju environment -func (s *BlockHelper) BlockRemoveObject(c *gc.C, msg string) { - s.on(c, multiwatcher.BlockRemove, msg) -} - -// BlockDestroyEnvironment switches destroy block on. -// This prevents juju environment destruction. -func (s *BlockHelper) BlockDestroyEnvironment(c *gc.C, msg string) { - s.on(c, multiwatcher.BlockDestroy, msg) -} - -func (s *BlockHelper) Close() { - s.blockClient.Close() -} - -// StubMetricsRecorder implements the MetricsRecorder interface. -type StubMetricsRecorder struct { - *jujutesting.Stub -} - -// AddMetric implements the MetricsRecorder interface. -func (s StubMetricsRecorder) AddMetric(key, value string, created time.Time) error { - s.AddCall("AddMetric", key, value, created) - return nil -} - -func (mr *StubMetricsRecorder) IsDeclaredMetric(key string) bool { - mr.MethodCall(mr, "IsDeclaredMetric", key) - return true -} - -// Close implements the MetricsRecorder interface. -func (s StubMetricsRecorder) Close() error { - s.AddCall("Close") - return nil -} - -var _ runner.MetricsRecorder = (*StubMetricsRecorder)(nil) === modified file 'src/github.com/juju/juju/worker/uniter/runner/util_unix_test.go' --- src/github.com/juju/juju/worker/uniter/runner/util_unix_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/util_unix_test.go 2016-03-22 15:18:22 +0000 @@ -9,7 +9,6 @@ var ( // Platform specific hook name used in runner_test.go hookName = "something-happened" - // Platform specific script used in runner_test.go echoPidScript = "echo $$ > pid" ) === modified file 'src/github.com/juju/juju/worker/uniter/runner/util_windows_test.go' --- src/github.com/juju/juju/worker/uniter/runner/util_windows_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/runner/util_windows_test.go 2016-03-22 15:18:22 +0000 @@ -9,7 +9,6 @@ var ( // Platform specific hook name used in runner_test.go hookName = "something-happened.ps1" - // Platform specific script used in runner_test.go echoPidScript = "Set-Content pid $pid" ) === removed file 'src/github.com/juju/juju/worker/uniter/status.go' --- src/github.com/juju/juju/worker/uniter/status.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/status.go 1970-01-01 00:00:00 +0000 @@ -1,24 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package uniter - -import ( - "time" -) - -const ( - // interval at which the unit's status should be polled - statusPollInterval = 5 * time.Minute -) - -// updateStatusSignal returns a time channel that fires after a given interval. -func updateStatusSignal(now, lastSignal time.Time, interval time.Duration) <-chan time.Time { - waitDuration := interval - now.Sub(lastSignal) - return time.After(waitDuration) -} - -// NewUpdateStatusTimer returns a timed signal suitable for update-status hook. -func NewUpdateStatusTimer() TimedSignal { - return updateStatusSignal -} === modified file 'src/github.com/juju/juju/worker/uniter/storage/attachments.go' --- src/github.com/juju/juju/worker/uniter/storage/attachments.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/storage/attachments.go 2016-03-22 15:18:22 +0000 @@ -13,9 +13,8 @@ "github.com/juju/loggo" "github.com/juju/names" "github.com/juju/utils/set" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable/hooks" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" "github.com/juju/juju/worker/uniter/hook" "github.com/juju/juju/worker/uniter/runner/jujuc" @@ -26,10 +25,6 @@ // StorageAccessor is an interface for accessing information about // storage attachments. type StorageAccessor interface { - // WatchStorageAttachment starts a watcher for changes to the - // storage attachment with the specified unit and storage tags. - WatchStorageAttachment(names.StorageTag, names.UnitTag) (watcher.NotifyWatcher, error) - // StorageAttachment returns details of the storage attachment // with the specified unit and storage tags. StorageAttachment(names.StorageTag, names.UnitTag) (params.StorageAttachment, error) @@ -53,6 +48,11 @@ RemoveStorageAttachment(names.StorageTag, names.UnitTag) error } +type storageAttachment struct { + *stateFile + jujuc.ContextStorageAttachment +} + // Attachments generates storage hooks in response to changes to // storage attachments, and provides access to information about // storage attachments to hooks. @@ -60,13 +60,14 @@ st StorageAccessor unitTag names.UnitTag abort <-chan struct{} - hooks chan hook.Info - storagers map[names.StorageTag]*storager storageStateDir string // pending is the set of tags for storage attachments // for which no hooks have been run. pending set.Tags + + // current storage attachments + storageAttachments map[names.StorageTag]storageAttachment } // NewAttachments returns a new Attachments. @@ -77,13 +78,12 @@ abort <-chan struct{}, ) (*Attachments, error) { a := &Attachments{ - st: st, - unitTag: tag, - abort: abort, - hooks: make(chan hook.Info), - storagers: make(map[names.StorageTag]*storager), - storageStateDir: storageStateDir, - pending: make(set.Tags), + st: st, + unitTag: tag, + abort: abort, + storageAttachments: make(map[names.StorageTag]storageAttachment), + storageStateDir: storageStateDir, + pending: make(set.Tags), } if err := a.init(); err != nil { return nil, err @@ -141,22 +141,6 @@ return nil } -// Hooks returns the channel on which storage hook execution requests -// are sent. -func (a *Attachments) Hooks() <-chan hook.Info { - return a.hooks -} - -// Stop stops all of the storagers. -func (a *Attachments) Stop() error { - for _, s := range a.storagers { - if err := s.Stop(); err != nil { - return errors.Trace(err) - } - } - return nil -} - // SetDying ensures that any unprovisioned storage attachments are removed // from state, and Pending is updated. After SetDying returns successfully, // and once Pending returns zero and Empty returns true, there will be no @@ -247,26 +231,26 @@ if err != nil { return errors.Trace(err) } - storager := a.storagers[storageTag] + _, storageKnown := a.storageAttachments[storageTag] switch life { case params.Dying: if stateFile.state.attached { // Previously ran storage-attached, so we'll - // leave the storager to handle the lifecycle + // leave the external watcher to handle the lifecycle // state change. - if storager == nil { - panic("missing storager for attached storage") + if !storageKnown { + panic("missing status for attached storage") } return nil } // Storage attachment hasn't previously been observed, // so we can short-circuit the removal. - err := a.removeStorageAttachment(storageTag, storager) + err := a.removeStorageAttachment(storageTag, storageKnown) return errors.Trace(err) } - if storager == nil { + if !storageKnown { a.pending.Add(storageTag) return a.add(storageTag, stateFile) } @@ -275,12 +259,10 @@ // add creates a new storager for the specified storage tag. func (a *Attachments) add(storageTag names.StorageTag, stateFile *stateFile) error { - s, err := newStorager(a.st, a.unitTag, storageTag, stateFile, a.hooks) - if err != nil { - return errors.Annotatef(err, "watching storage %q", storageTag.Id()) + a.storageAttachments[storageTag] = storageAttachment{ + stateFile: stateFile, } - a.storagers[storageTag] = s - logger.Debugf("watching storage %q", storageTag.Id()) + logger.Debugf("adding storage %q", storageTag.Id()) return nil } @@ -292,48 +274,48 @@ // Empty reports whether or not there are any active storage attachments. func (a *Attachments) Empty() bool { - return len(a.storagers) == 0 + return len(a.storageAttachments) == 0 } // Storage returns the ContextStorage with the supplied tag if it was // found, and whether it was found. -func (a *Attachments) Storage(tag names.StorageTag) (jujuc.ContextStorageAttachment, bool) { - if s, ok := a.storagers[tag]; ok { - return s.Context() +func (a *Attachments) Storage(tag names.StorageTag) (jujuc.ContextStorageAttachment, error) { + if attachment, ok := a.storageAttachments[tag]; ok { + return attachment, nil } - return nil, false + return nil, errors.NotFoundf("storage") } // StorageTags returns the names.StorageTags for the active storage attachments. -func (a *Attachments) StorageTags() []names.StorageTag { +func (a *Attachments) StorageTags() ([]names.StorageTag, error) { tags := set.NewTags() - for tag := range a.storagers { + for tag := range a.storageAttachments { tags.Add(tag) } storageTags := make([]names.StorageTag, tags.Size()) for i, tag := range tags.SortedValues() { storageTags[i] = tag.(names.StorageTag) } - return storageTags + return storageTags, nil } // ValidateHook validates the hook against the current state. func (a *Attachments) ValidateHook(hi hook.Info) error { - storager, err := a.storagerForHook(hi) + storageState, err := a.storageStateForHook(hi) if err != nil { return errors.Trace(err) } - return storager.state.ValidateHook(hi) + return storageState.ValidateHook(hi) } // CommitHook persists the state change encoded in the supplied storage // hook, or returns an error if the hook is invalid given current state. func (a *Attachments) CommitHook(hi hook.Info) error { - storager, err := a.storagerForHook(hi) + storageState, err := a.storageStateForHook(hi) if err != nil { return errors.Trace(err) } - if err := storager.CommitHook(hi); err != nil { + if err := storageState.CommitHook(hi); err != nil { return err } storageTag := names.NewStorageTag(hi.StorageId) @@ -341,35 +323,32 @@ case hooks.StorageAttached: a.pending.Remove(storageTag) case hooks.StorageDetaching: - if err := a.removeStorageAttachment(storageTag, storager); err != nil { + if err := a.removeStorageAttachment(storageTag, storageState != nil); err != nil { return errors.Trace(err) } } return nil } -func (a *Attachments) removeStorageAttachment(tag names.StorageTag, s *storager) error { +func (a *Attachments) removeStorageAttachment(tag names.StorageTag, storageKnown bool) error { if err := a.st.RemoveStorageAttachment(tag, a.unitTag); err != nil { return errors.Annotate(err, "removing storage attachment") } a.pending.Remove(tag) - if s == nil { + if !storageKnown { return nil } - if err := s.Stop(); err != nil { - return errors.Trace(err) - } - delete(a.storagers, tag) + delete(a.storageAttachments, tag) return nil } -func (a *Attachments) storagerForHook(hi hook.Info) (*storager, error) { +func (a *Attachments) storageStateForHook(hi hook.Info) (*stateFile, error) { if !hi.Kind.IsStorage() { return nil, errors.Errorf("not a storage hook: %#v", hi) } - storager, ok := a.storagers[names.NewStorageTag(hi.StorageId)] + storageAttachment, ok := a.storageAttachments[names.NewStorageTag(hi.StorageId)] if !ok { return nil, errors.Errorf("unknown storage %q", hi.StorageId) } - return storager, nil + return storageAttachment.stateFile, nil } === modified file 'src/github.com/juju/juju/worker/uniter/storage/attachments_test.go' --- src/github.com/juju/juju/worker/uniter/storage/attachments_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/storage/attachments_test.go 2016-03-22 15:18:22 +0000 @@ -7,16 +7,19 @@ "io/ioutil" "path/filepath" + "github.com/juju/errors" "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable/hooks" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/params" corestorage "github.com/juju/juju/storage" "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/remotestate" + "github.com/juju/juju/worker/uniter/resolver" "github.com/juju/juju/worker/uniter/storage" ) @@ -27,7 +30,9 @@ var _ = gc.Suite(&attachmentsSuite{}) func assertStorageTags(c *gc.C, a *storage.Attachments, tags ...names.StorageTag) { - c.Assert(a.StorageTags(), jc.SameContents, tags) + sTags, err := a.StorageTags() + c.Assert(err, jc.ErrorIsNil) + c.Assert(sTags, jc.SameContents, tags) } func (s *attachmentsSuite) TestNewAttachments(c *gc.C) { @@ -41,12 +46,8 @@ }, } - att, err := storage.NewAttachments(st, unitTag, stateDir, abort) + _, err := storage.NewAttachments(st, unitTag, stateDir, abort) c.Assert(err, jc.ErrorIsNil) - defer func() { - err := att.Stop() - c.Assert(err, jc.ErrorIsNil) - }() // state dir should have been created. c.Assert(stateDir, jc.IsDirectory) } @@ -66,19 +67,12 @@ c.Assert(u, gc.Equals, unitTag) return attachmentIds, nil }, - watchStorageAttachment: func(s names.StorageTag, u names.UnitTag) (watcher.NotifyWatcher, error) { - return newMockNotifyWatcher(), nil - }, } storageTag := names.NewStorageTag("data/0") withAttachments := func(f func(*storage.Attachments)) { att, err := storage.NewAttachments(st, unitTag, stateDir, abort) c.Assert(err, jc.ErrorIsNil) - defer func() { - err := att.Stop() - c.Assert(err, jc.ErrorIsNil) - }() f(att) } @@ -139,10 +133,6 @@ c.Assert(u, gc.Equals, unitTag) return nil, nil }, - watchStorageAttachment: func(s names.StorageTag, u names.UnitTag) (watcher.NotifyWatcher, error) { - w := newMockNotifyWatcher() - return w, nil - }, storageAttachmentLife: func(ids []params.StorageAttachmentId) ([]params.LifeResult, error) { return []params.LifeResult{{Life: params.Dying}}, nil }, @@ -156,10 +146,6 @@ att, err := storage.NewAttachments(st, unitTag, stateDir, abort) c.Assert(err, jc.ErrorIsNil) - defer func() { - err := att.Stop() - c.Assert(err, jc.ErrorIsNil) - }() err = att.UpdateStorage([]names.StorageTag{storageTag}) c.Assert(err, jc.ErrorIsNil) c.Assert(removed, jc.IsTrue) @@ -184,11 +170,6 @@ c.Assert(u, gc.Equals, unitTag) return nil, nil }, - watchStorageAttachment: func(s names.StorageTag, u names.UnitTag) (watcher.NotifyWatcher, error) { - w := newMockNotifyWatcher() - w.changes <- struct{}{} - return w, nil - }, storageAttachment: func(s names.StorageTag, u names.UnitTag) (params.StorageAttachment, error) { c.Assert(s, gc.Equals, storageTag) return attachment, nil @@ -197,27 +178,41 @@ att, err := storage.NewAttachments(st, unitTag, stateDir, abort) c.Assert(err, jc.ErrorIsNil) - defer func() { - err := att.Stop() - c.Assert(err, jc.ErrorIsNil) - }() - // There should be no context for data/0 until a hook is queued. + // There should be no context for data/0 until a required remote state change occurs. _, ok := att.Storage(storageTag) - c.Assert(ok, jc.IsFalse) + c.Assert(ok, jc.Satisfies, errors.IsNotFound) assertStorageTags(c, att) err = att.UpdateStorage([]names.StorageTag{storageTag}) c.Assert(err, jc.ErrorIsNil) - hi := waitOneHook(c, att.Hooks()) - c.Assert(hi, gc.Equals, hook.Info{ - Kind: hooks.StorageAttached, - StorageId: storageTag.Id(), - }) assertStorageTags(c, att, storageTag) - ctx, ok := att.Storage(storageTag) - c.Assert(ok, jc.IsTrue) + storageResolver := storage.NewResolver(att) + storage.SetStorageLife(storageResolver, map[names.StorageTag]params.Life{ + storageTag: params.Alive, + }) + localState := resolver.LocalState{ + State: operation.State{ + Kind: operation.Continue, + }, + } + remoteState := remotestate.Snapshot{ + Storage: map[names.StorageTag]remotestate.StorageSnapshot{ + storageTag: remotestate.StorageSnapshot{ + Kind: params.StorageKindBlock, + Life: params.Alive, + Location: "/dev/sdb", + Attached: true, + }, + }, + } + op, err := storageResolver.NextOp(localState, remoteState, &mockOperations{}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(op.String(), gc.Equals, "run hook storage-attached") + + ctx, err := att.Storage(storageTag) + c.Assert(err, jc.ErrorIsNil) c.Assert(ctx, gc.NotNil) c.Assert(ctx.Tag(), gc.Equals, storageTag) c.Assert(ctx.Kind(), gc.Equals, corestorage.StorageKindBlock) @@ -243,11 +238,6 @@ c.Assert(u, gc.Equals, unitTag) return nil, nil }, - watchStorageAttachment: func(s names.StorageTag, u names.UnitTag) (watcher.NotifyWatcher, error) { - w := newMockNotifyWatcher() - w.changes <- struct{}{} - return w, nil - }, storageAttachment: func(s names.StorageTag, u names.UnitTag) (params.StorageAttachment, error) { c.Assert(s, gc.Equals, storageTag) return attachment, nil @@ -261,10 +251,6 @@ att, err := storage.NewAttachments(st, unitTag, stateDir, abort) c.Assert(err, jc.ErrorIsNil) - defer func() { - err := att.Stop() - c.Assert(err, jc.ErrorIsNil) - }() err = att.UpdateStorage([]names.StorageTag{storageTag}) c.Assert(err, jc.ErrorIsNil) c.Assert(att.Pending(), gc.Equals, 1) @@ -311,11 +297,6 @@ UnitTag: unitTag.String(), }}, nil }, - watchStorageAttachment: func(s names.StorageTag, u names.UnitTag) (watcher.NotifyWatcher, error) { - w := newMockNotifyWatcher() - w.changes <- struct{}{} - return w, nil - }, storageAttachment: func(s names.StorageTag, u names.UnitTag) (params.StorageAttachment, error) { c.Assert(u, gc.Equals, unitTag) if s == storageTag0 { @@ -361,10 +342,6 @@ att, err := storage.NewAttachments(st, unitTag, stateDir, abort) c.Assert(err, jc.ErrorIsNil) - defer func() { - err := att.Stop() - c.Assert(err, jc.ErrorIsNil) - }() c.Assert(att.Pending(), gc.Equals, 1) err = att.SetDying() @@ -414,11 +391,6 @@ c.Assert(u, gc.Equals, s.unitTag) return s.unitAttachmentIds[u], nil }, - watchStorageAttachment: func(storageTag names.StorageTag, u names.UnitTag) (watcher.NotifyWatcher, error) { - w := newMockNotifyWatcher() - w.changes <- struct{}{} - return w, nil - }, storageAttachment: func(storageTag names.StorageTag, u names.UnitTag) (params.StorageAttachment, error) { att, ok := s.attachmentsByTag[storageTag] c.Assert(ok, jc.IsTrue) @@ -435,10 +407,6 @@ var err error s.att, err = storage.NewAttachments(st, s.unitTag, stateDir, abort) c.Assert(err, jc.ErrorIsNil) - s.AddCleanup(func(c *gc.C) { - err := s.att.Stop() - c.Assert(err, jc.ErrorIsNil) - }) } func (s *attachmentsUpdateSuite) TestAttachmentsUpdateUntrackedAlive(c *gc.C) { @@ -452,11 +420,6 @@ c.Assert(err, jc.ErrorIsNil) } assertStorageTags(c, s.att, s.storageTag0) - hi := waitOneHook(c, s.att.Hooks()) - c.Assert(hi, gc.Equals, hook.Info{ - Kind: hooks.StorageAttached, - StorageId: s.storageTag0.Id(), - }) c.Assert(s.att.Pending(), gc.Equals, 1) } @@ -466,7 +429,6 @@ // be started. err := s.att.UpdateStorage([]names.StorageTag{s.storageTag1}) c.Assert(err, jc.ErrorIsNil) - assertNoHooks(c, s.att.Hooks()) c.Assert(s.att.Pending(), gc.Equals, 0) assertStorageTags(c, s.att) } @@ -487,11 +449,6 @@ err := s.att.Refresh() c.Assert(err, jc.ErrorIsNil) } - hi := waitOneHook(c, s.att.Hooks()) - c.Assert(hi, gc.Equals, hook.Info{ - Kind: hooks.StorageAttached, - StorageId: s.storageTag0.Id(), - }) c.Assert(s.att.Pending(), gc.Equals, 1) } @@ -513,7 +470,6 @@ s.attachmentsByTag[s.storageTag1].Life = params.Dying err = s.att.UpdateStorage([]names.StorageTag{s.storageTag1}) c.Assert(err, jc.ErrorIsNil) - assertNoHooks(c, s.att.Hooks()) err = s.att.ValidateHook(hook.Info{ Kind: hooks.StorageAttached, StorageId: s.storageTag1.Id(), === modified file 'src/github.com/juju/juju/worker/uniter/storage/export_test.go' --- src/github.com/juju/juju/worker/uniter/storage/export_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/storage/export_test.go 2016-03-22 15:18:22 +0000 @@ -8,7 +8,7 @@ "github.com/juju/juju/apiserver/params" "github.com/juju/juju/worker/uniter/hook" - "github.com/juju/juju/worker/uniter/runner/jujuc" + "github.com/juju/juju/worker/uniter/resolver" ) type State interface { @@ -16,14 +16,6 @@ hook.Validator } -type StorageHookQueue interface { - Empty() bool - Next() hook.Info - Pop() - Update(attachment params.StorageAttachment) error - Context() (jujuc.ContextStorageAttachment, bool) -} - func StateAttached(s State) bool { return s.(*stateFile).attached } @@ -50,24 +42,6 @@ return states, nil } -func NewStorageHookQueue( - unitTag names.UnitTag, - storageTag names.StorageTag, - attached bool, -) StorageHookQueue { - return &storageHookQueue{ - unitTag: unitTag, - storageTag: storageTag, - attached: attached, - } -} - -func NewStorageSource( - st StorageAccessor, - unitTag names.UnitTag, - storageTag names.StorageTag, - attached bool, -) (hook.Source, error) { - source, err := newStorageSource(st, unitTag, storageTag, attached) - return source, err +func SetStorageLife(resolver resolver.Resolver, life map[names.StorageTag]params.Life) { + resolver.(*storageResolver).life = life } === modified file 'src/github.com/juju/juju/worker/uniter/storage/mock_test.go' --- src/github.com/juju/juju/worker/uniter/storage/mock_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/storage/mock_test.go 2016-03-22 15:18:22 +0000 @@ -4,22 +4,17 @@ package storage_test import ( - "time" + "fmt" "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "launchpad.net/tomb" - "github.com/juju/juju/api/watcher" "github.com/juju/juju/apiserver/common" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/operation" ) type mockStorageAccessor struct { - watchStorageAttachment func(names.StorageTag, names.UnitTag) (watcher.NotifyWatcher, error) storageAttachment func(names.StorageTag, names.UnitTag) (params.StorageAttachment, error) storageAttachmentLife func([]params.StorageAttachmentId) ([]params.LifeResult, error) unitStorageAttachments func(names.UnitTag) ([]params.StorageAttachmentId, error) @@ -27,10 +22,6 @@ remove func(names.StorageTag, names.UnitTag) error } -func (m *mockStorageAccessor) WatchStorageAttachment(s names.StorageTag, u names.UnitTag) (watcher.NotifyWatcher, error) { - return m.watchStorageAttachment(s, u) -} - func (m *mockStorageAccessor) StorageAttachment(s names.StorageTag, u names.UnitTag) (params.StorageAttachment, error) { return m.storageAttachment(s, u) } @@ -73,54 +64,38 @@ return m.remove(s, u) } -type mockNotifyWatcher struct { - tomb tomb.Tomb - changes chan struct{} -} - -func newMockNotifyWatcher() *mockNotifyWatcher { - m := &mockNotifyWatcher{ - changes: make(chan struct{}, 1), - } - go func() { - <-m.tomb.Dying() - close(m.changes) - m.tomb.Kill(tomb.ErrDying) - m.tomb.Done() - }() - return m -} - -func (m *mockNotifyWatcher) Changes() <-chan struct{} { - return m.changes -} - -func (m *mockNotifyWatcher) Stop() error { - m.tomb.Kill(nil) - return m.tomb.Wait() -} - -func (m *mockNotifyWatcher) Err() error { - return m.tomb.Err() -} - -func assertNoHooks(c *gc.C, hooks <-chan hook.Info) { - select { - case <-hooks: - c.Fatal("unexpected hook") - case <-time.After(testing.ShortWait): - } -} - -func waitOneHook(c *gc.C, hooks <-chan hook.Info) hook.Info { - var hi hook.Info - var ok bool - select { - case hi, ok = <-hooks: - c.Assert(ok, jc.IsTrue) - case <-time.After(testing.LongWait): - c.Fatal("timed out waiting for hook") - } - assertNoHooks(c, hooks) - return hi +type mockOperations struct { + operation.Factory +} + +func (m *mockOperations) NewUpdateStorage(tags []names.StorageTag) (operation.Operation, error) { + return &mockOperation{"update storage"}, nil +} + +func (m *mockOperations) NewRunHook(hookInfo hook.Info) (operation.Operation, error) { + return &mockOperation{fmt.Sprintf("run hook %v", hookInfo.Kind)}, nil +} + +type mockOperation struct { + name string +} + +func (m *mockOperation) String() string { + return m.name +} + +func (m *mockOperation) NeedsGlobalMachineLock() bool { + return false +} + +func (m *mockOperation) Prepare(state operation.State) (*operation.State, error) { + return &state, nil +} + +func (m *mockOperation) Execute(state operation.State) (*operation.State, error) { + return &state, nil +} + +func (m *mockOperation) Commit(state operation.State) (*operation.State, error) { + return &state, nil } === added file 'src/github.com/juju/juju/worker/uniter/storage/resolver.go' --- src/github.com/juju/juju/worker/uniter/storage/resolver.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/storage/resolver.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,170 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storage + +import ( + "github.com/juju/errors" + "github.com/juju/names" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/storage" + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/remotestate" + "github.com/juju/juju/worker/uniter/resolver" +) + +// StorageResolverOperations instances know how to make operations +// required by the resolver. +type StorageResolverOperations interface { + NewUpdateStorage(tags []names.StorageTag) (operation.Operation, error) + NewRunHook(hookInfo hook.Info) (operation.Operation, error) +} + +type storageResolver struct { + storage *Attachments + dying bool + life map[names.StorageTag]params.Life +} + +// NewResolver returns a new storage resolver. +func NewResolver(storage *Attachments) resolver.Resolver { + return &storageResolver{ + storage: storage, + life: make(map[names.StorageTag]params.Life), + } +} + +// NextOp is defined on the Resolver interface. +func (s *storageResolver) NextOp( + localState resolver.LocalState, + remoteState remotestate.Snapshot, + opFactory operation.Factory, +) (operation.Operation, error) { + + var changed []names.StorageTag + for tag, storage := range remoteState.Storage { + life, ok := s.life[tag] + if !ok || life != storage.Life { + s.life[tag] = storage.Life + changed = append(changed, tag) + } + } + for tag := range s.life { + if _, ok := remoteState.Storage[tag]; !ok { + changed = append(changed, tag) + delete(s.life, tag) + } + } + if len(changed) > 0 { + return opFactory.NewUpdateStorage(changed) + } + if !localState.Installed && s.storage.Pending() == 0 { + logger.Infof("initial storage attachments ready") + } + return s.nextOp(localState, remoteState, opFactory) +} + +func (s *storageResolver) nextOp( + localState resolver.LocalState, + remoteState remotestate.Snapshot, + opFactory operation.Factory, +) (operation.Operation, error) { + if remoteState.Life == params.Dying { + if !s.dying { + if err := s.storage.SetDying(); err != nil { + return nil, errors.Trace(err) + } + s.dying = true + } + for tag, snap := range remoteState.Storage { + snap.Life = params.Dying + remoteState.Storage[tag] = snap + } + } + + var runStorageHooks bool + switch { + case localState.Kind == operation.Continue: + // There's nothing in progress. + runStorageHooks = true + case !localState.Installed && localState.Kind == operation.RunHook && localState.Step == operation.Queued: + // The install operation completed, and there's an install + // hook queued. Run storage-attached hooks first. + runStorageHooks = true + } + + if runStorageHooks { + for tag, snap := range remoteState.Storage { + op, err := s.nextHookOp(tag, snap, opFactory) + if errors.Cause(err) == resolver.ErrNoOperation { + continue + } + return op, err + } + if s.storage.Pending() > 0 { + logger.Debugf("still pending %v", s.storage.pending) + if !localState.Installed { + return nil, resolver.ErrWaiting + } + } + } + return nil, resolver.ErrNoOperation +} + +func (s *storageResolver) nextHookOp( + tag names.StorageTag, + snap remotestate.StorageSnapshot, + opFactory operation.Factory, +) (operation.Operation, error) { + logger.Debugf("next hook op for %v: %+v", tag, snap) + if !snap.Attached { + return nil, resolver.ErrNoOperation + } + storageAttachment, ok := s.storage.storageAttachments[tag] + if !ok { + return nil, resolver.ErrNoOperation + } + switch snap.Life { + case params.Alive: + if storageAttachment.attached { + // Storage attachments currently do not change + // (apart from lifecycle) after being provisioned. + // We don't process unprovisioned storage here, + // so there's nothing to do. + return nil, resolver.ErrNoOperation + } + case params.Dying: + if !storageAttachment.attached { + // Nothing to do: attachment is dying, but + // the storage-attached hook has not been + // consumed. + return nil, resolver.ErrNoOperation + } + case params.Dead: + // Storage must have been Dying to become Dead; + // no further action is required. + return nil, resolver.ErrNoOperation + } + + hookInfo := hook.Info{ + StorageId: tag.Id(), + } + if snap.Life == params.Alive { + hookInfo.Kind = hooks.StorageAttached + } else { + hookInfo.Kind = hooks.StorageDetaching + } + context := &contextStorage{ + tag: tag, + kind: storage.StorageKind(snap.Kind), + location: snap.Location, + } + storageAttachment.ContextStorageAttachment = context + s.storage.storageAttachments[tag] = storageAttachment + + logger.Debugf("queued hook: %v", hookInfo) + return opFactory.NewRunHook(hookInfo) +} === removed file 'src/github.com/juju/juju/worker/uniter/storage/source.go' --- src/github.com/juju/juju/worker/uniter/storage/source.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/storage/source.go 1970-01-01 00:00:00 +0000 @@ -1,227 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package storage - -import ( - "github.com/juju/errors" - "github.com/juju/names" - "gopkg.in/juju/charm.v5/hooks" - "launchpad.net/tomb" - - apiwatcher "github.com/juju/juju/api/watcher" - "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/state/watcher" - "github.com/juju/juju/storage" - "github.com/juju/juju/worker/uniter/hook" - "github.com/juju/juju/worker/uniter/runner/jujuc" -) - -// storageSource is a hook source that generates storage hooks for -// a single storage attachment. -type storageSource struct { - tomb tomb.Tomb - - *storageHookQueue - st StorageAccessor - watcher apiwatcher.NotifyWatcher - changes chan hook.SourceChange -} - -// storageHookQueue implements a subset of hook.Source, separated from -// storageSource for simpler testing. -type storageHookQueue struct { - unitTag names.UnitTag - storageTag names.StorageTag - - // attached records whether or not the storage-attached - // hook has been executed. - attached bool - - // hookInfo is the next hook.Info to return, if non-nil. - hookInfo *hook.Info - - // context contains the details of the storage attachment. - context *contextStorage -} - -// newStorageSource creates a hook source that watches for changes to, -// and generates storage hooks for, a single storage attachment. -func newStorageSource( - st StorageAccessor, - unitTag names.UnitTag, - storageTag names.StorageTag, - attached bool, -) (*storageSource, error) { - w, err := st.WatchStorageAttachment(storageTag, unitTag) - if err != nil { - return nil, errors.Annotate(err, "watching storage attachment") - } - s := &storageSource{ - storageHookQueue: &storageHookQueue{ - unitTag: unitTag, - storageTag: storageTag, - attached: attached, - }, - st: st, - watcher: w, - changes: make(chan hook.SourceChange), - } - go func() { - defer s.tomb.Done() - defer watcher.Stop(w, &s.tomb) - s.tomb.Kill(s.loop()) - }() - return s, nil -} - -func (s *storageSource) loop() error { - defer close(s.changes) - - var inChanges <-chan struct{} - var outChanges chan<- hook.SourceChange - var outChange hook.SourceChange - ready := make(chan struct{}, 1) - ready <- struct{}{} - for { - select { - case <-s.tomb.Dying(): - return tomb.ErrDying - case <-ready: - inChanges = s.watcher.Changes() - case _, ok := <-inChanges: - logger.Debugf("got storage attachment change") - if !ok { - return watcher.EnsureErr(s.watcher) - } - inChanges = nil - outChanges = s.changes - outChange = func() error { - defer func() { - ready <- struct{}{} - }() - logger.Debugf("processing storage source change") - return s.update() - } - case outChanges <- outChange: - logger.Debugf("sent storage source change") - outChanges = nil - outChange = nil - } - } -} - -// Changes is part of the hook.Source interface. -func (s *storageSource) Changes() <-chan hook.SourceChange { - return s.changes -} - -// Stop is part of the hook.Source interface. -func (s *storageSource) Stop() error { - s.tomb.Kill(nil) - return s.tomb.Wait() -} - -// update is called when hook.SourceChanges are applied. -func (s *storageSource) update() error { - attachment, err := s.st.StorageAttachment(s.storageTag, s.unitTag) - if params.IsCodeNotFound(err) { - // The storage attachment was removed from state, which - // implies that the storage has been detached already. - logger.Debugf("storage attachment %q not found", s.storageTag.Id()) - return nil - } else if params.IsCodeNotProvisioned(err) { - logger.Debugf("storage attachment %q not provisioned yet", s.storageTag.Id()) - return nil - } else if err != nil { - logger.Debugf("error refreshing storage details: %v", err) - return errors.Annotate(err, "refreshing storage details") - } - return s.storageHookQueue.Update(attachment) -} - -// Empty is part of the hook.Source interface. -func (s *storageHookQueue) Empty() bool { - return s.hookInfo == nil -} - -// Next is part of the hook.Source interface. -func (s *storageHookQueue) Next() hook.Info { - if s.Empty() { - panic("source is empty") - } - return *s.hookInfo -} - -// Pop is part of the hook.Source interface. -func (s *storageHookQueue) Pop() { - if s.Empty() { - panic("source is empty") - } - if s.hookInfo.Kind == hooks.StorageAttached { - s.attached = true - } - s.hookInfo = nil -} - -// Update updates the hook queue with the freshly acquired information about -// the storage attachment. -func (s *storageHookQueue) Update(attachment params.StorageAttachment) error { - switch attachment.Life { - case params.Alive: - if s.attached { - // Storage attachments currently do not change - // (apart from lifecycle) after being provisioned. - // We don't process unprovisioned storage here, - // so there's nothing to do. - return nil - } - case params.Dying: - if !s.attached { - // Nothing to do: attachment is dying, but - // the storage-attached hook has not been - // consumed. - s.hookInfo = nil - return nil - } - case params.Dead: - // Storage must been Dying to become Dead; - // no further action is required. - return nil - } - - // Set the storage context when the first hook is generated - // for this storager. Later, when we need to handle changing - // storage, we'll need to have a cache in the runner like - // we have for relations. - if s.context == nil { - s.context = &contextStorage{ - tag: s.storageTag, - kind: storage.StorageKind(attachment.Kind), - location: attachment.Location, - } - } - - if s.hookInfo == nil { - s.hookInfo = &hook.Info{ - StorageId: s.storageTag.Id(), - } - } - if attachment.Life == params.Alive { - s.hookInfo.Kind = hooks.StorageAttached - } else { - s.hookInfo.Kind = hooks.StorageDetaching - } - logger.Debugf("queued hook: %v", s.hookInfo) - return nil -} - -// Context returns the ContextStorage for the storage that this hook queue -// corresponds to, and whether there is any context available yet. There -// will be context beginning from when the first hook is queued. -func (s *storageHookQueue) Context() (jujuc.ContextStorageAttachment, bool) { - if s.context != nil { - return s.context, true - } - return nil, false -} === removed file 'src/github.com/juju/juju/worker/uniter/storage/source_test.go' --- src/github.com/juju/juju/worker/uniter/storage/source_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/storage/source_test.go 1970-01-01 00:00:00 +0000 @@ -1,230 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package storage_test - -import ( - "time" - - "github.com/juju/names" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5/hooks" - - "github.com/juju/juju/api/watcher" - "github.com/juju/juju/apiserver/params" - corestorage "github.com/juju/juju/storage" - "github.com/juju/juju/testing" - "github.com/juju/juju/worker/uniter/hook" - "github.com/juju/juju/worker/uniter/storage" -) - -const initiallyUnattached = false -const initiallyAttached = true - -type storageHookQueueSuite struct { - testing.BaseSuite -} - -var _ = gc.Suite(&storageHookQueueSuite{}) - -func newHookQueue(attached bool) storage.StorageHookQueue { - return storage.NewStorageHookQueue( - names.NewUnitTag("mysql/0"), - names.NewStorageTag("data/0"), - attached, - ) -} - -func updateHookQueue(c *gc.C, q storage.StorageHookQueue, life params.Life) { - err := q.Update(params.StorageAttachment{ - Life: life, - Kind: params.StorageKindBlock, - Location: "/dev/sdb", - }) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *storageHookQueueSuite) TestStorageHookQueueAttachedHook(c *gc.C) { - q := newHookQueue(initiallyUnattached) - updateHookQueue(c, q, params.Alive) - c.Assert(q.Empty(), jc.IsFalse) - c.Assert(q.Next(), gc.Equals, hook.Info{ - Kind: hooks.StorageAttached, - StorageId: "data/0", - }) -} - -func (s *storageHookQueueSuite) TestStorageHookQueueAlreadyAttached(c *gc.C) { - q := newHookQueue(initiallyAttached) - updateHookQueue(c, q, params.Alive) - // Already attached, so no hooks should have been queued. - c.Assert(q.Empty(), jc.IsTrue) -} - -func (s *storageHookQueueSuite) TestStorageHookQueueAttachedDetach(c *gc.C) { - q := newHookQueue(initiallyAttached) - updateHookQueue(c, q, params.Dying) - c.Assert(q.Empty(), jc.IsFalse) - c.Assert(q.Next(), gc.Equals, hook.Info{ - Kind: hooks.StorageDetaching, - StorageId: "data/0", - }) -} - -func (s *storageHookQueueSuite) TestStorageHookQueueUnattachedDetach(c *gc.C) { - q := newHookQueue(initiallyUnattached) - updateHookQueue(c, q, params.Dying) - // the storage wasn't attached, so Dying short-circuits. - c.Assert(q.Empty(), jc.IsTrue) -} - -func (s *storageHookQueueSuite) TestStorageHookQueueAttachedUnconsumedDetach(c *gc.C) { - q := newHookQueue(initiallyUnattached) - updateHookQueue(c, q, params.Alive) - c.Assert(q.Next(), gc.Equals, hook.Info{ - Kind: hooks.StorageAttached, - StorageId: "data/0", - }) - // don't consume the storage-attached hook; it should then be unqueued - updateHookQueue(c, q, params.Dying) - // since the storage-attached hook wasn't consumed, Dying short-circuits. - c.Assert(q.Empty(), jc.IsTrue) -} - -func (s *storageHookQueueSuite) TestStorageHookQueueAttachDetach(c *gc.C) { - q := newHookQueue(initiallyUnattached) - updateHookQueue(c, q, params.Alive) - q.Pop() - updateHookQueue(c, q, params.Dying) - c.Assert(q.Empty(), jc.IsFalse) - c.Assert(q.Next(), gc.Equals, hook.Info{ - Kind: hooks.StorageDetaching, - StorageId: "data/0", - }) -} - -func (s *storageHookQueueSuite) TestStorageHookQueueDead(c *gc.C) { - q := newHookQueue(initiallyAttached) - updateHookQueue(c, q, params.Dying) - q.Pop() - updateHookQueue(c, q, params.Dead) - // Dead does not cause any hook to be queued. - c.Assert(q.Empty(), jc.IsTrue) -} - -func (s *storageHookQueueSuite) TestStorageHookQueueContext(c *gc.C) { - q := newHookQueue(initiallyUnattached) - _, ok := q.Context() - c.Assert(ok, jc.IsFalse) - - err := q.Update(params.StorageAttachment{ - Life: params.Alive, - Kind: params.StorageKindFilesystem, - Location: "/srv", - }) - c.Assert(err, jc.ErrorIsNil) - c.Assert(q.Empty(), jc.IsFalse) - - ctx, ok := q.Context() - c.Assert(ok, jc.IsTrue) - c.Assert(ctx, gc.NotNil) - c.Assert(ctx.Tag(), gc.Equals, names.NewStorageTag("data/0")) - c.Assert(ctx.Kind(), gc.Equals, corestorage.StorageKindFilesystem) - c.Assert(ctx.Location(), gc.Equals, "/srv") -} - -func (s *storageHookQueueSuite) TestStorageHookQueueEmpty(c *gc.C) { - q := newHookQueue(initiallyAttached) - c.Assert(q.Empty(), jc.IsTrue) - c.Assert(q.Next, gc.PanicMatches, "source is empty") - c.Assert(q.Pop, gc.PanicMatches, "source is empty") -} - -func (s *storageHookQueueSuite) TestStorageSourceStop(c *gc.C) { - unitTag := names.NewUnitTag("mysql/0") - storageTag := names.NewStorageTag("data/0") - - // Simulate remote state returning a single Alive storage attachment. - st := &mockStorageAccessor{ - watchStorageAttachment: func(s names.StorageTag, u names.UnitTag) (watcher.NotifyWatcher, error) { - return newMockNotifyWatcher(), nil - }, - } - - const initiallyUnattached = false - source, err := storage.NewStorageSource(st, unitTag, storageTag, initiallyUnattached) - c.Assert(err, jc.ErrorIsNil) - err = source.Stop() - c.Assert(err, jc.ErrorIsNil) -} - -func (s *storageHookQueueSuite) TestStorageSourceUpdateErrors(c *gc.C) { - unitTag := names.NewUnitTag("mysql/0") - storageTag := names.NewStorageTag("data/0") - - // Simulate remote state returning a single Alive storage attachment. - var calls int - w := newMockNotifyWatcher() - st := &mockStorageAccessor{ - watchStorageAttachment: func(s names.StorageTag, u names.UnitTag) (watcher.NotifyWatcher, error) { - return w, nil - }, - storageAttachment: func(s names.StorageTag, u names.UnitTag) (params.StorageAttachment, error) { - calls++ - switch calls { - case 1: - return params.StorageAttachment{}, ¶ms.Error{Code: params.CodeNotFound} - case 2: - return params.StorageAttachment{}, ¶ms.Error{Code: params.CodeNotProvisioned} - case 3: - // This error should cause the source to stop with an error. - return params.StorageAttachment{}, ¶ms.Error{ - Code: params.CodeUnauthorized, - Message: "unauthorized", - } - } - panic("unexpected call to StorageAttachment") - }, - } - - const initiallyUnattached = false - source, err := storage.NewStorageSource(st, unitTag, storageTag, initiallyUnattached) - c.Assert(err, jc.ErrorIsNil) - - assertNoSourceChange := func() { - select { - case <-source.Changes(): - c.Fatal("unexpected source change") - case <-time.After(testing.ShortWait): - } - } - waitSourceChange := func() hook.SourceChange { - select { - case ch, ok := <-source.Changes(): - c.Assert(ok, jc.IsTrue) - assertNoSourceChange() - return ch - case <-time.After(testing.LongWait): - c.Fatal("timed out waiting for source change") - panic("unreachable") - } - } - - assertNoSourceChange() - - // First change is "NotFound": not an error. - w.changes <- struct{}{} - change := waitSourceChange() - c.Assert(change(), jc.ErrorIsNil) - - // Second change is "NotProvisioned": not an error. - w.changes <- struct{}{} - change = waitSourceChange() - c.Assert(change(), jc.ErrorIsNil) - - // Third change is "Unauthorized": this *is* an error. - w.changes <- struct{}{} - change = waitSourceChange() - c.Assert(change(), gc.ErrorMatches, "refreshing storage details: unauthorized") -} === modified file 'src/github.com/juju/juju/worker/uniter/storage/state.go' --- src/github.com/juju/juju/worker/uniter/storage/state.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/storage/state.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,7 @@ "github.com/juju/errors" "github.com/juju/names" "github.com/juju/utils" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/worker/uniter/hook" ) === modified file 'src/github.com/juju/juju/worker/uniter/storage/state_test.go' --- src/github.com/juju/juju/worker/uniter/storage/state_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/storage/state_test.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,7 @@ "github.com/juju/names" jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/testing" "github.com/juju/juju/worker/uniter/hook" @@ -76,7 +76,8 @@ dir := c.MkDir() writeFile(c, filepath.Join(dir, "data-0"), "rubbish") _, err := storage.ReadAllStateFiles(dir) - c.Assert(err, gc.ErrorMatches, `cannot load storage state from ".*": cannot load storage "data/0" state from ".*": invalid storage state file ".*": missing 'attached'`) + c.Assert(err, gc.ErrorMatches, `cannot load storage state from ".*": cannot load storage "data/0" state from ".*": invalid storage state file ".*": yaml: unmarshal errors: +`+" line 1: cannot unmarshal !!str `rubbish` into storage.diskInfo") } func (s *stateSuite) TestReadAllStateFilesDirNotExist(c *gc.C) { @@ -133,7 +134,7 @@ dir := c.MkDir() writeFile(c, filepath.Join(dir, "data-0"), "!@#") _, err := storage.ReadStateFile(dir, names.NewStorageTag("data/0")) - c.Assert(err, gc.ErrorMatches, `cannot load storage "data/0" state from ".*": invalid storage state file ".*": YAML error: did not find expected whitespace or line break`) + c.Assert(err, gc.ErrorMatches, `cannot load storage "data/0" state from ".*": invalid storage state file ".*": yaml: did not find expected whitespace or line break`) writeFile(c, filepath.Join(dir, "data-0"), "icantbelieveitsnotattached: true\n") _, err = storage.ReadStateFile(dir, names.NewStorageTag("data/0")) === removed file 'src/github.com/juju/juju/worker/uniter/storage/storager.go' --- src/github.com/juju/juju/worker/uniter/storage/storager.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/storage/storager.go 1970-01-01 00:00:00 +0000 @@ -1,67 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package storage - -import ( - "github.com/juju/errors" - "github.com/juju/names" - - "github.com/juju/juju/worker/uniter/hook" - "github.com/juju/juju/worker/uniter/runner/jujuc" -) - -type storager struct { - st StorageAccessor - unitTag names.UnitTag - storageTag names.StorageTag - state *stateFile - source *storageSource - sender hook.Sender -} - -// newStorager creates a new storager, watching for changes to the storage -// attachment with the specified tags, and generating hooks on the output -// channel. -func newStorager( - st StorageAccessor, - unitTag names.UnitTag, - storageTag names.StorageTag, - state *stateFile, - hooks chan<- hook.Info, -) (*storager, error) { - source, err := newStorageSource(st, unitTag, storageTag, state.attached) - if err != nil { - return nil, errors.Annotate(err, "creating storage event source") - } - sender := hook.NewSender(hooks, source) - return &storager{ - st: st, - unitTag: unitTag, - storageTag: storageTag, - state: state, - source: source, - sender: sender, - }, nil -} - -// Stop stops the storager from generating or sending any more hook events. -func (s *storager) Stop() error { - if err := s.sender.Stop(); err != nil { - return errors.Annotate(err, "stopping storage event sender") - } - return s.source.Stop() -} - -// Context returns the ContextStorage for the storage that this storager -// corresponds to, and whether there is any context available yet. There -// will be context beginning from when the first hook is queued. -func (s *storager) Context() (jujuc.ContextStorageAttachment, bool) { - return s.source.Context() -} - -// CommitHook persists the state change encoded in the supplied storage -// hook, or returns an error if the hook is invalid given current state. -func (s *storager) CommitHook(hi hook.Info) error { - return s.state.CommitHook(hi) -} === modified file 'src/github.com/juju/juju/worker/uniter/timer.go' --- src/github.com/juju/juju/worker/uniter/timer.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/timer.go 2016-03-22 15:18:22 +0000 @@ -7,6 +7,17 @@ "time" ) -// Signal is the signature of a function used to generate a -// hook signal. -type TimedSignal func(now, lastSignal time.Time, interval time.Duration) <-chan time.Time +const ( + // interval at which the unit's status should be polled + statusPollInterval = 5 * time.Minute +) + +// updateStatusSignal returns a time channel that fires after a given interval. +func updateStatusSignal() <-chan time.Time { + return time.After(statusPollInterval) +} + +// NewUpdateStatusTimer returns a timed signal suitable for update-status hook. +func NewUpdateStatusTimer() func() <-chan time.Time { + return updateStatusSignal +} === removed file 'src/github.com/juju/juju/worker/uniter/timer_test.go' --- src/github.com/juju/juju/worker/uniter/timer_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/timer_test.go 1970-01-01 00:00:00 +0000 @@ -1,70 +0,0 @@ -// Copyright 2012-2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package uniter_test - -import ( - "time" - - gc "gopkg.in/check.v1" - - coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/worker/uniter" -) - -type TimerSuite struct{} - -var _ = gc.Suite(&TimerSuite{}) - -func (s *TimerSuite) TestCollectMetricsTimer(c *gc.C) { - s.testTimer(c, uniter.ActiveCollectMetricsSignal) -} - -func (s *TimerSuite) TestUpdateStatusTimer(c *gc.C) { - s.testTimer(c, uniter.UpdateStatusSignal) -} - -func (*TimerSuite) testTimer(c *gc.C, s uniter.TimedSignal) { - now := time.Now() - defaultInterval := coretesting.ShortWait / 5 - testCases := []struct { - about string - now time.Time - lastRun time.Time - interval time.Duration - expectSignal bool - }{{ - "Timer firing after delay.", - now, - now.Add(-defaultInterval / 2), - defaultInterval, - true, - }, { - "Timer firing the first time.", - now, - time.Unix(0, 0), - defaultInterval, - true, - }, { - "Timer not firing soon.", - now, - now, - coretesting.ShortWait * 2, - false, - }} - - for i, t := range testCases { - c.Logf("running test %d", i) - sig := s(t.now, t.lastRun, t.interval) - select { - case <-sig: - if !t.expectSignal { - c.Errorf("not expecting a signal") - } - case <-time.After(coretesting.ShortWait): - if t.expectSignal { - c.Errorf("expected a signal") - } - } - } -} === modified file 'src/github.com/juju/juju/worker/uniter/uniter.go' --- src/github.com/juju/juju/worker/uniter/uniter.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/uniter.go 2016-03-22 15:18:22 +0000 @@ -13,29 +13,42 @@ "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/names" + "github.com/juju/utils" + "github.com/juju/utils/clock" "github.com/juju/utils/exec" "github.com/juju/utils/fslock" - corecharm "gopkg.in/juju/charm.v5" - "launchpad.net/tomb" + corecharm "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/api/uniter" "github.com/juju/juju/apiserver/params" - "github.com/juju/juju/version" + "github.com/juju/juju/core/leadership" "github.com/juju/juju/worker" - "github.com/juju/juju/worker/leadership" + "github.com/juju/juju/worker/catacomb" + "github.com/juju/juju/worker/fortress" + "github.com/juju/juju/worker/uniter/actions" "github.com/juju/juju/worker/uniter/charm" - "github.com/juju/juju/worker/uniter/filter" + "github.com/juju/juju/worker/uniter/hook" + uniterleadership "github.com/juju/juju/worker/uniter/leadership" "github.com/juju/juju/worker/uniter/operation" + "github.com/juju/juju/worker/uniter/relation" + "github.com/juju/juju/worker/uniter/remotestate" + "github.com/juju/juju/worker/uniter/resolver" + "github.com/juju/juju/worker/uniter/runcommands" "github.com/juju/juju/worker/uniter/runner" + "github.com/juju/juju/worker/uniter/runner/context" "github.com/juju/juju/worker/uniter/runner/jujuc" "github.com/juju/juju/worker/uniter/storage" + jujuos "github.com/juju/utils/os" ) var logger = loggo.GetLogger("juju.worker.uniter") -// leadershipGuarantee defines the period of time for which a successful call -// to the is-leader hook tool guarantees continued leadership. -var leadershipGuarantee = 30 * time.Second +const ( + retryTimeMin = 5 * time.Second + retryTimeMax = 5 * time.Minute + retryTimeJitter = true + retryTimeFactor = 2 +) // A UniterExecutionObserver gets the appropriate methods called when a hook // is executed and either succeeds or fails. Missing hooks don't get reported @@ -50,14 +63,13 @@ // delegated to Mode values, which are expected to react to events and direct // the uniter's responses to them. type Uniter struct { - tomb tomb.Tomb + catacomb catacomb.Catacomb st *uniter.State paths Paths - f filter.Filter unit *uniter.Unit - relations Relations - cleanups []cleanup + relations relation.Relations storage *storage.Attachments + clock clock.Clock // Cache the last reported status information // so we don't make unnecessary api calls. @@ -71,33 +83,24 @@ newOperationExecutor NewExecutorFunc leadershipTracker leadership.Tracker - - hookLock *fslock.Lock - runListener *RunListener - runCommands chan creator - - ranLeaderSettingsChanged bool - ranConfigChanged bool + charmDirGuard fortress.Guard + + hookLock *fslock.Lock + + // TODO(axw) move the runListener and run-command code outside of the + // uniter, and introduce a separate worker. Each worker would feed + // operations to a single, synchronized runner to execute. + runListener *RunListener + commands runcommands.Commands + commandChannel chan string // The execution observer is only used in tests at this stage. Should this // need to be extended, perhaps a list of observers would be needed. observer UniterExecutionObserver - // metricsTimerChooser is a struct that allows metrics to switch between - // active and inactive timers. - metricsTimerChooser *timerChooser - - // collectMetricsAt defines a function that will be used to generate signals - // for the collect-metrics hook. - collectMetricsAt TimedSignal - - // sendMetricsAt defines a function that will be used to generate signals - // to send metrics to the state server. - sendMetricsAt TimedSignal - // updateStatusAt defines a function that will be used to generate signals for // the update-status hook - updateStatusAt TimedSignal + updateStatusAt func() <-chan time.Time } // UniterParams hold all the necessary parameters for a new Uniter. @@ -107,9 +110,14 @@ LeadershipTracker leadership.Tracker DataDir string MachineLock *fslock.Lock - MetricsTimerChooser *timerChooser - UpdateStatusSignal TimedSignal + CharmDirGuard fortress.Guard + UpdateStatusSignal func() <-chan time.Time NewOperationExecutor NewExecutorFunc + Clock clock.Clock + // TODO (mattyw, wallyworld, fwereade) Having the observer here make this approach a bit more legitimate, but it isn't. + // the observer is only a stop gap to be used in tests. A better approach would be to have the uniter tests start hooks + // that write to files, and have the tests watch the output to know that hooks have finished. + Observer UniterExecutionObserver } type NewExecutorFunc func(string, func() (*corecharm.URL, error), func(string) (func() error, error)) (operation.Executor, error) @@ -117,37 +125,28 @@ // NewUniter creates a new Uniter which will install, run, and upgrade // a charm on behalf of the unit with the given unitTag, by executing // hooks and operations provoked by changes in st. -func NewUniter(uniterParams *UniterParams) *Uniter { +func NewUniter(uniterParams *UniterParams) (*Uniter, error) { u := &Uniter{ st: uniterParams.UniterFacade, paths: NewPaths(uniterParams.DataDir, uniterParams.UnitTag), hookLock: uniterParams.MachineLock, leadershipTracker: uniterParams.LeadershipTracker, - metricsTimerChooser: uniterParams.MetricsTimerChooser, - collectMetricsAt: uniterParams.MetricsTimerChooser.inactive, - sendMetricsAt: uniterParams.MetricsTimerChooser.inactive, + charmDirGuard: uniterParams.CharmDirGuard, updateStatusAt: uniterParams.UpdateStatusSignal, newOperationExecutor: uniterParams.NewOperationExecutor, - runCommands: make(chan creator), - } - go func() { - defer u.tomb.Done() - defer u.runCleanups() - u.tomb.Kill(u.loop(uniterParams.UnitTag)) - }() - return u -} - -type cleanup func() error - -func (u *Uniter) addCleanup(cleanup cleanup) { - u.cleanups = append(u.cleanups, cleanup) -} - -func (u *Uniter) runCleanups() { - for _, cleanup := range u.cleanups { - u.tomb.Kill(cleanup()) - } + observer: uniterParams.Observer, + clock: uniterParams.Clock, + } + err := catacomb.Invoke(catacomb.Plan{ + Site: &u.catacomb, + Work: func() error { + return u.loop(uniterParams.UnitTag) + }, + }) + if err != nil { + return nil, errors.Trace(err) + } + return u, nil } func (u *Uniter) loop(unitTag names.UnitTag) (err error) { @@ -159,47 +158,229 @@ } logger.Infof("unit %q started", u.unit) - // Start filtering state change events for consumption by modes. - u.f, err = filter.NewFilter(u.st, unitTag) - if err != nil { - return err - } - u.addCleanup(u.f.Stop) - - // Stop the uniter if the filter fails. - go func() { u.tomb.Kill(u.f.Wait()) }() - - // Start handling leader settings events, or not, as appropriate. - u.f.WantLeaderSettingsEvents(!u.operationState().Leader) - - // Run modes until we encounter an error. - mode := ModeContinue - for err == nil { + // Install is a special case, as it must run before there + // is any remote state, and before the remote state watcher + // is started. + var charmURL *corecharm.URL + var charmModifiedVersion int + opState := u.operationExecutor.State() + if opState.Kind == operation.Install { + logger.Infof("resuming charm install") + op, err := u.operationFactory.NewInstall(opState.CharmURL) + if err != nil { + return errors.Trace(err) + } + if err := u.operationExecutor.Run(op); err != nil { + return errors.Trace(err) + } + charmURL = opState.CharmURL + } else { + curl, err := u.unit.CharmURL() + if err != nil { + return errors.Trace(err) + } + charmURL = curl + svc, err := u.unit.Service() + if err != nil { + return errors.Trace(err) + } + charmModifiedVersion, err = svc.CharmModifiedVersion() + if err != nil { + return errors.Trace(err) + } + } + + var ( + watcher *remotestate.RemoteStateWatcher + watcherMu sync.Mutex + ) + + retryHookChan := make(chan struct{}, 1) + retryHookTimer := utils.NewBackoffTimer(utils.BackoffTimerConfig{ + Min: retryTimeMin, + Max: retryTimeMax, + Jitter: retryTimeJitter, + Factor: retryTimeFactor, + Func: func() { + // Don't try to send on the channel if it's already full + // This can happen if the timer fires off before the event is consumed + // by the resolver loop + select { + case retryHookChan <- struct{}{}: + default: + } + }, + Clock: u.clock, + }) + defer func() { + // Stop any send that might be pending + // before closing the channel + retryHookTimer.Reset() + close(retryHookChan) + }() + + restartWatcher := func() error { + watcherMu.Lock() + defer watcherMu.Unlock() + + if watcher != nil { + // watcher added to catacomb, will kill uniter if there's an error. + worker.Stop(watcher) + } + var err error + watcher, err = remotestate.NewWatcher( + remotestate.WatcherConfig{ + State: remotestate.NewAPIState(u.st), + LeadershipTracker: u.leadershipTracker, + UnitTag: unitTag, + UpdateStatusChannel: u.updateStatusAt, + CommandChannel: u.commandChannel, + RetryHookChannel: retryHookChan, + }) + if err != nil { + return errors.Trace(err) + } + if err := u.catacomb.Add(watcher); err != nil { + return errors.Trace(err) + } + return nil + } + + onIdle := func() error { + opState := u.operationExecutor.State() + if opState.Kind != operation.Continue { + // We should only set idle status if we're in + // the "Continue" state, which indicates that + // there is nothing to do and we're not in an + // error state. + return nil + } + return setAgentStatus(u, params.StatusIdle, "", nil) + } + + clearResolved := func() error { + if err := u.unit.ClearResolved(); err != nil { + return errors.Trace(err) + } + watcher.ClearResolvedMode() + return nil + } + + for { + if err = restartWatcher(); err != nil { + err = errors.Annotate(err, "(re)starting watcher") + break + } + + uniterResolver := NewUniterResolver(ResolverConfig{ + ClearResolved: clearResolved, + ReportHookError: u.reportHookError, + FixDeployer: u.deployer.Fix, + StartRetryHookTimer: retryHookTimer.Start, + StopRetryHookTimer: retryHookTimer.Reset, + Actions: actions.NewResolver(), + Leadership: uniterleadership.NewResolver(), + Relations: relation.NewRelationsResolver(u.relations), + Storage: storage.NewResolver(u.storage), + Commands: runcommands.NewCommandsResolver( + u.commands, watcher.CommandCompleted, + ), + }) + + // We should not do anything until there has been a change + // to the remote state. The watcher will trigger at least + // once initially. select { - case <-u.tomb.Dying(): - err = tomb.ErrDying - default: - mode, err = mode(u) + case <-u.catacomb.Dying(): + return u.catacomb.ErrDying() + case <-watcher.RemoteStateChanged(): + } + + localState := resolver.LocalState{ + CharmURL: charmURL, + CharmModifiedVersion: charmModifiedVersion, + } + for err == nil { + err = resolver.Loop(resolver.LoopConfig{ + Resolver: uniterResolver, + Watcher: watcher, + Executor: u.operationExecutor, + Factory: u.operationFactory, + Abort: u.catacomb.Dying(), + OnIdle: onIdle, + CharmDirGuard: u.charmDirGuard, + }, &localState) switch cause := errors.Cause(err); cause { + case nil: + // Loop back around. + case resolver.ErrLoopAborted: + err = u.catacomb.ErrDying() case operation.ErrNeedsReboot: err = worker.ErrRebootMachine - case tomb.ErrDying, worker.ErrTerminateAgent: - err = cause case operation.ErrHookFailed: - mode, err = ModeHookError, nil + // Loop back around. The resolver can tell that it is in + // an error state by inspecting the operation state. + err = nil + case resolver.ErrTerminate: + err = u.terminate() + case resolver.ErrRestart: + charmURL = localState.CharmURL + // leave err assigned, causing loop to break default: - charmURL, ok := operation.DeployConflictCharmURL(cause) - if ok { - mode, err = ModeConflicted(charmURL), nil + // We need to set conflicted from here, because error + // handling is outside of the resolver's control. + if operation.IsDeployConflictError(cause) { + localState.Conflicted = true + err = setAgentStatus(u, params.StatusError, "upgrade failed", nil) + } else { + reportAgentError(u, "resolver loop error", err) } } } + + if errors.Cause(err) != resolver.ErrRestart { + break + } } logger.Infof("unit %q shutting down: %s", u.unit, err) return err } +func (u *Uniter) terminate() error { + unitWatcher, err := u.unit.Watch() + if err != nil { + return errors.Trace(err) + } + if err := u.catacomb.Add(unitWatcher); err != nil { + return errors.Trace(err) + } + for { + select { + case <-u.catacomb.Dying(): + return u.catacomb.ErrDying() + case _, ok := <-unitWatcher.Changes(): + if !ok { + return errors.New("unit watcher closed") + } + if err := u.unit.Refresh(); err != nil { + return errors.Trace(err) + } + if hasSubs, err := u.unit.HasSubordinates(); err != nil { + return errors.Trace(err) + } else if hasSubs { + continue + } + // The unit is known to be Dying; so if it didn't have subordinates + // just above, it can't acquire new ones before this call. + if err := u.unit.EnsureDead(); err != nil { + return errors.Trace(err) + } + return worker.ErrTerminateAgent + } + } +} + func (u *Uniter) setupLocks() (err error) { if message := u.hookLock.Message(); u.hookLock.IsLocked() && message != "" { // Look to see if it was us that held the lock before. If it was, we @@ -236,19 +417,23 @@ if err := os.MkdirAll(u.paths.State.RelationsDir, 0755); err != nil { return errors.Trace(err) } - relations, err := newRelations(u.st, unitTag, u.paths, u.tomb.Dying()) + relations, err := relation.NewRelations( + u.st, unitTag, u.paths.State.CharmDir, + u.paths.State.RelationsDir, u.catacomb.Dying(), + ) if err != nil { return errors.Annotatef(err, "cannot create relations") } u.relations = relations storageAttachments, err := storage.NewAttachments( - u.st, unitTag, u.paths.State.StorageDir, u.tomb.Dying(), + u.st, unitTag, u.paths.State.StorageDir, u.catacomb.Dying(), ) if err != nil { return errors.Annotatef(err, "cannot create storage hook source") } u.storage = storageAttachments - u.addCleanup(storageAttachments.Stop) + u.commands = runcommands.NewCommands() + u.commandChannel = make(chan string) deployer, err := charm.NewDeployer( u.paths.State.CharmDir, @@ -259,8 +444,8 @@ return errors.Annotatef(err, "cannot create deployer") } u.deployer = &deployerProxy{deployer} - contextFactory, err := runner.NewContextFactory( - u.st, unitTag, u.leadershipTracker, u.relations.GetInfo, u.storage, u.paths, + contextFactory, err := context.NewContextFactory( + u.st, unitTag, u.leadershipTracker, u.relations.GetInfo, u.storage, u.paths, u.clock, ) if err != nil { return err @@ -269,56 +454,56 @@ u.st, u.paths, contextFactory, ) if err != nil { - return err + return errors.Trace(err) } u.operationFactory = operation.NewFactory(operation.FactoryParams{ Deployer: u.deployer, RunnerFactory: runnerFactory, Callbacks: &operationCallbacks{u}, StorageUpdater: u.storage, - Abort: u.tomb.Dying(), - MetricSender: u.unit, + Abort: u.catacomb.Dying(), MetricSpoolDir: u.paths.GetMetricsSpoolDir(), }) operationExecutor, err := u.newOperationExecutor(u.paths.State.OperationsFile, u.getServiceCharmURL, u.acquireExecutionLock) if err != nil { - return err + return errors.Trace(err) } u.operationExecutor = operationExecutor logger.Debugf("starting juju-run listener on unix:%s", u.paths.Runtime.JujuRunSocket) - u.runListener, err = NewRunListener(u, u.paths.Runtime.JujuRunSocket) - if err != nil { - return err - } - u.addCleanup(func() error { - // TODO(fwereade): RunListener returns no error on Close. This seems wrong. - u.runListener.Close() - return nil - }) + commandRunner, err := NewChannelCommandRunner(ChannelCommandRunnerConfig{ + Abort: u.catacomb.Dying(), + Commands: u.commands, + CommandChannel: u.commandChannel, + }) + if err != nil { + return errors.Annotate(err, "creating command runner") + } + u.runListener, err = NewRunListener(RunListenerConfig{ + SocketPath: u.paths.Runtime.JujuRunSocket, + CommandRunner: commandRunner, + }) + if err != nil { + return errors.Trace(err) + } + rlw := newRunListenerWrapper(u.runListener) + if err := u.catacomb.Add(rlw); err != nil { + return errors.Trace(err) + } // The socket needs to have permissions 777 in order for other users to use it. - if version.Current.OS != version.Windows { + if jujuos.HostOS() != jujuos.Windows { return os.Chmod(u.paths.Runtime.JujuRunSocket, 0777) } return nil } func (u *Uniter) Kill() { - u.tomb.Kill(nil) + u.catacomb.Kill(nil) } func (u *Uniter) Wait() error { - return u.tomb.Wait() -} - -func (u *Uniter) Stop() error { - u.tomb.Kill(nil) - return u.Wait() -} - -func (u *Uniter) Dead() <-chan struct{} { - return u.tomb.Dead() + return u.catacomb.Wait() } func (u *Uniter) getServiceCharmURL() (*corecharm.URL, error) { @@ -335,115 +520,56 @@ return u.operationExecutor.State() } -// initializeMetricsTimers enables the periodic collect-metrics hook -// and periodic sending of collected metrics for charms that declare metrics. -func (u *Uniter) initializeMetricsTimers() error { - charm, err := corecharm.ReadCharmDir(u.paths.State.CharmDir) - if err != nil { - return err - } - u.collectMetricsAt = u.metricsTimerChooser.getCollectMetricsTimer(charm) - u.sendMetricsAt = u.metricsTimerChooser.getSendMetricsTimer(charm) - return nil -} - // RunCommands executes the supplied commands in a hook context. func (u *Uniter) RunCommands(args RunCommandsArgs) (results *exec.ExecResponse, err error) { - logger.Tracef("run commands: %s", args.Commands) - - type responseInfo struct { - response *exec.ExecResponse - err error - } - responseChan := make(chan responseInfo, 1) - sendResponse := func(response *exec.ExecResponse, err error) { - responseChan <- responseInfo{response, err} - } - - commandArgs := operation.CommandArgs{ - Commands: args.Commands, - RelationId: args.RelationId, - RemoteUnitName: args.RemoteUnitName, - ForceRemoteUnit: args.ForceRemoteUnit, - } - - select { - case <-u.tomb.Dying(): - return nil, tomb.ErrDying - case u.runCommands <- newCommandsOp(commandArgs, sendResponse): - } - - select { - case <-u.tomb.Dying(): - return nil, tomb.ErrDying - case response := <-responseChan: - results, err := response.response, response.err - if errors.Cause(err) == operation.ErrNeedsReboot { - u.tomb.Kill(worker.ErrRebootMachine) - err = nil - } else if err != nil { - u.tomb.Kill(err) - } - return results, err - } -} - -// runOperation uses the uniter's operation factory to run the supplied creation -// func, and then runs the resulting operation. -// -// This has a number of advantages over having mode funcs use the factory and -// executor directly: -// * it cuts down on duplicated code in the mode funcs, making the logic easier -// to parse -// * it narrows the (conceptual) interface exposed to the mode funcs -- one day -// we might even be able to use a (real) interface and maybe even approach a -// point where we can run direct unit tests(!) on the modes themselves. -// * it opens a path to fixing RunCommands -- all operation creation and -// execution is done in a single place, and it's much easier to force those -// onto a single thread. -// * this can't be done quite yet, though, because relation changes are -// not yet encapsulated in operations, and that needs to happen before -// RunCommands will *actually* be goroutine-safe. -func (u *Uniter) runOperation(creator creator) (err error) { - errorMessage := "creating operation to run" - defer func() { - reportAgentError(u, errorMessage, err) - }() - op, err := creator(u.operationFactory) - if err != nil { - return errors.Annotatef(err, "cannot create operation") - } - errorMessage = op.String() - before := u.operationState() - defer func() { - // Check that if we lose leadership as a result of this - // operation, we want to start getting leader settings events, - // or if we gain leadership we want to stop receiving those - // events. - if after := u.operationState(); before.Leader != after.Leader { - u.f.WantLeaderSettingsEvents(before.Leader) - } - }() - return u.operationExecutor.Run(op) + // TODO(axw) drop this when we move the run-listener to an independent + // worker. This exists purely for the tests. + return u.runListener.RunCommands(args) } // acquireExecutionLock acquires the machine-level execution lock, and // returns a func that must be called to unlock it. It's used by operation.Executor // when running operations that execute external code. func (u *Uniter) acquireExecutionLock(message string) (func() error, error) { + logger.Debugf("lock: %v", message) // We want to make sure we don't block forever when locking, but take the - // Uniter's tomb into account. - checkTomb := func() error { + // Uniter's catacomb into account. + checkCatacomb := func() error { select { - case <-u.tomb.Dying(): - return tomb.ErrDying + case <-u.catacomb.Dying(): + return u.catacomb.ErrDying() default: return nil } } message = fmt.Sprintf("%s: %s", u.unit.Name(), message) - if err := u.hookLock.LockWithFunc(message, checkTomb); err != nil { + if err := u.hookLock.LockWithFunc(message, checkCatacomb); err != nil { return nil, err } - return func() error { return u.hookLock.Unlock() }, nil + return func() error { + logger.Debugf("unlock: %v", message) + return u.hookLock.Unlock() + }, nil +} + +func (u *Uniter) reportHookError(hookInfo hook.Info) error { + // Set the agent status to "error". We must do this here in case the + // hook is interrupted (e.g. unit agent crashes), rather than immediately + // after attempting a runHookOp. + hookName := string(hookInfo.Kind) + statusData := map[string]interface{}{} + if hookInfo.Kind.IsRelation() { + statusData["relation-id"] = hookInfo.RelationId + if hookInfo.RemoteUnit != "" { + statusData["remote-unit"] = hookInfo.RemoteUnit + } + relationName, err := u.relations.Name(hookInfo.RelationId) + if err != nil { + return errors.Trace(err) + } + hookName = fmt.Sprintf("%s-%s", relationName, hookInfo.Kind) + } + statusData["hook"] = hookName + statusMessage := fmt.Sprintf("hook failed: %q", hookName) + return setAgentStatus(u, params.StatusError, statusMessage, statusData) } === modified file 'src/github.com/juju/juju/worker/uniter/uniter_test.go' --- src/github.com/juju/juju/worker/uniter/uniter_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/uniter_test.go 2016-03-22 15:18:22 +0000 @@ -20,15 +20,15 @@ ft "github.com/juju/testing/filetesting" "github.com/juju/utils/clock" gc "gopkg.in/check.v1" - corecharm "gopkg.in/juju/charm.v5" + corecharm "gopkg.in/juju/charm.v6-unstable" "github.com/juju/juju/agent/tools" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/component/all" "github.com/juju/juju/juju/testing" "github.com/juju/juju/state" "github.com/juju/juju/testcharms" coretesting "github.com/juju/juju/testing" - "github.com/juju/juju/worker/uniter" "github.com/juju/juju/worker/uniter/operation" ) @@ -39,9 +39,7 @@ oldLcAll string unitDir string - collectMetricsTicker *uniter.ManualTicker - sendMetricsTicker *uniter.ManualTicker - updateStatusHookTicker *uniter.ManualTicker + updateStatusHookTicker *manualTicker } var _ = gc.Suite(&UniterSuite{}) @@ -79,6 +77,7 @@ return leaseClock } s.AddSuiteCleanup(func(*gc.C) { state.GetClock = oldGetClock }) + all.RegisterForServer() } func (s *UniterSuite) TearDownSuite(c *gc.C) { @@ -88,12 +87,9 @@ } func (s *UniterSuite) SetUpTest(c *gc.C) { - s.collectMetricsTicker = uniter.NewManualTicker() - s.sendMetricsTicker = uniter.NewManualTicker() - s.updateStatusHookTicker = uniter.NewManualTicker() + s.updateStatusHookTicker = newManualTicker() s.GitSuite.SetUpTest(c) s.JujuConnSuite.SetUpTest(c) - s.PatchValue(uniter.IdleWaitTime, 1*time.Millisecond) } func (s *UniterSuite) TearDownTest(c *gc.C) { @@ -117,7 +113,7 @@ c.Logf("\ntest %d: %s\n", i, t.summary) func() { defer s.Reset(c) - env, err := s.State.Environment() + env, err := s.State.Model() c.Assert(err, jc.ErrorIsNil) ctx := &context{ s: s, @@ -126,9 +122,8 @@ path: s.unitDir, dataDir: s.dataDir, charms: make(map[string][]byte), - collectMetricsTicker: s.collectMetricsTicker, - sendMetricsTicker: s.sendMetricsTicker, updateStatusHookTicker: s.updateStatusHookTicker, + charmDirGuard: &mockCharmDirGuard{}, } ctx.run(c, t.steps) }() @@ -171,13 +166,13 @@ serveCharm{}, writeFile{"charm", 0644}, createUniter{}, - waitUniterDead{err: `ModeInstalling cs:quantal/wordpress-0: executing operation "install cs:quantal/wordpress-0": open .*` + errNotDir}, + waitUniterDead{err: `executing operation "install cs:quantal/wordpress-0": open .*` + errNotDir}, ), ut( "charm cannot be downloaded", createCharm{}, // don't serve charm createUniter{}, - waitUniterDead{err: `ModeInstalling cs:quantal/wordpress-0: preparing operation "install cs:quantal/wordpress-0": failed to download charm .* 404 Not Found`}, + waitUniterDead{err: `preparing operation "install cs:quantal/wordpress-0": failed to download charm .* 404 Not Found`}, ), }) } @@ -240,6 +235,27 @@ }) } +func (s *UniterSuite) TestNoUniterUpdateStatusHookInError(c *gc.C) { + s.runUniterTests(c, []uniterTest{ + ut( + "update status hook doesn't run if in error", + startupError{"start"}, + waitHooks{}, + updateStatusHookTick{}, + waitHooks{}, + + // Resolve and hook should run. + resolveError{state.ResolvedNoHooks}, + waitUnitAgent{ + status: params.StatusIdle, + }, + waitHooks{}, + updateStatusHookTick{}, + waitHooks{"update-status"}, + ), + }) +} + func (s *UniterSuite) TestUniterStartHook(c *gc.C) { s.runUniterTests(c, []uniterTest{ ut( @@ -351,7 +367,12 @@ statusGetter: unitStatusGetter, status: params.StatusUnknown, }, - waitHooks{"start", "config-changed"}, + // TODO(axw) confirm with fwereade that this is correct. + // Previously we would see "start", "config-changed". + // I don't think we should see another config-changed, + // since config did not change since we resolved the + // failed one above. + waitHooks{"start"}, // If we'd accidentally retried that hook, somehow, we would get // an extra config-changed as we entered started; see that we don't. waitHooks{}, @@ -444,16 +465,10 @@ s.runUniterTests(c, []uniterTest{ // Reaction to entity deaths. ut( - "steady state service dying", - quickStart{}, - serviceDying, - waitHooks{"stop"}, - waitUniterDead{}, - ), ut( "steady state unit dying", quickStart{}, unitDying, - waitHooks{"stop"}, + waitHooks{"leader-settings-changed", "stop"}, waitUniterDead{}, ), ut( "steady state unit dead", @@ -462,22 +477,13 @@ waitUniterDead{}, waitHooks{}, ), ut( - "hook error service dying", - startupError{"start"}, - serviceDying, - verifyWaiting{}, - fixHook{"start"}, - resolveError{state.ResolvedRetryHooks}, - waitHooks{"start", "config-changed", "stop"}, - waitUniterDead{}, - ), ut( "hook error unit dying", startupError{"start"}, unitDying, verifyWaiting{}, fixHook{"start"}, resolveError{state.ResolvedRetryHooks}, - waitHooks{"start", "config-changed", "stop"}, + waitHooks{"start", "leader-settings-changed", "stop"}, waitUniterDead{}, ), ut( "hook error unit dead", @@ -509,7 +515,13 @@ waitHooks{"upgrade-charm", "config-changed"}, verifyCharm{revision: 1}, verifyRunning{}, - ), ut( + ), + }) +} + +func (s *UniterSuite) TestUniterSteadyStateUpgradeForce(c *gc.C) { + s.runUniterTests(c, []uniterTest{ + ut( "steady state forced upgrade (identical behaviour)", quickStart{}, createCharm{revision: 1}, @@ -526,7 +538,13 @@ waitHooks{"upgrade-charm", "config-changed"}, verifyCharm{revision: 1}, verifyRunning{}, - ), ut( + ), + }) +} + +func (s *UniterSuite) TestUniterSteadyStateUpgradeResolve(c *gc.C) { + s.runUniterTests(c, []uniterTest{ + ut( "steady state upgrade hook fail and resolve", quickStart{}, createCharm{revision: 1, badHooks: []string{"upgrade-charm"}}, @@ -556,7 +574,13 @@ }, waitHooks{"config-changed"}, verifyRunning{}, - ), ut( + ), + }) +} + +func (s *UniterSuite) TestUniterSteadyStateUpgradeRetry(c *gc.C) { + s.runUniterTests(c, []uniterTest{ + ut( "steady state upgrade hook fail and retry", quickStart{}, createCharm{revision: 1, badHooks: []string{"upgrade-charm"}}, @@ -595,7 +619,13 @@ }, waitHooks{"upgrade-charm", "config-changed"}, verifyRunning{}, - ), ut( + ), + }) +} + +func (s *UniterSuite) TestUniterSteadyStateUpgradeRelations(c *gc.C) { + s.runUniterTests(c, []uniterTest{ + ut( // This test does an add-relation as quickly as possible // after an upgrade-charm, in the hope that the scheduler will // deliver the events in the wrong order. The observed @@ -620,6 +650,45 @@ }) } +func (s *UniterSuite) TestUpdateResourceCausesUpgrade(c *gc.C) { + // appendStorageMetadata customises the wordpress charm's metadata, + // adding a "wp-content" filesystem store. We do it here rather + // than in the charm itself to avoid modifying all of the other + // scenarios. + appendResource := func(c *gc.C, ctx *context, path string) { + f, err := os.OpenFile(filepath.Join(path, "metadata.yaml"), os.O_RDWR|os.O_APPEND, 0644) + c.Assert(err, jc.ErrorIsNil) + defer func() { + err := f.Close() + c.Assert(err, jc.ErrorIsNil) + }() + _, err = io.WriteString(f, ` +resources: + data: + Type: file + filename: filename.tgz + comment: One line that is useful when operators need to push it.`) + c.Assert(err, jc.ErrorIsNil) + } + s.runUniterTests(c, []uniterTest{ + ut( + "update resource causes upgrade", + + // These steps are just copied from quickstart with a customized + // createCharm. + createCharm{customize: appendResource}, + serveCharm{}, + createUniter{}, + waitUnitAgent{status: params.StatusIdle}, + waitHooks(startupHooks(false)), + verifyCharm{}, + + pushResource{}, + waitHooks{"upgrade-charm", "config-changed"}, + ), + }) +} + func (s *UniterSuite) TestUniterUpgradeOverwrite(c *gc.C) { //TODO(bogdanteleaga): Fix this on windows if runtime.GOOS == "windows" { @@ -716,7 +785,7 @@ }) } -func (s *UniterSuite) TestUniterErrorStateUpgrade(c *gc.C) { +func (s *UniterSuite) TestUniterErrorStateUnforcedUpgrade(c *gc.C) { s.runUniterTests(c, []uniterTest{ // Upgrade scenarios from error state. ut( @@ -747,10 +816,15 @@ info: "installing charm software", charm: 1, }, - waitHooks{"config-changed", "upgrade-charm", "config-changed"}, + waitHooks{"upgrade-charm", "config-changed"}, verifyCharm{revision: 1}, verifyRunning{}, - ), ut( + )}) +} + +func (s *UniterSuite) TestUniterErrorStateForcedUpgrade(c *gc.C) { + s.runUniterTests(c, []uniterTest{ + ut( "error state forced upgrade", startupError{"start"}, createCharm{revision: 1}, @@ -955,22 +1029,13 @@ }, verifyCharm{revision: 2}, ), ut( - "upgrade conflict service dying", - startUpgradeError{}, - serviceDying, - verifyWaitingUpgradeError{revision: 1}, - fixUpgradeError{}, - resolveError{state.ResolvedNoHooks}, - waitHooks{"upgrade-charm", "config-changed", "stop"}, - waitUniterDead{}, - ), ut( "upgrade conflict unit dying", startUpgradeError{}, unitDying, verifyWaitingUpgradeError{revision: 1}, fixUpgradeError{}, resolveError{state.ResolvedNoHooks}, - waitHooks{"upgrade-charm", "config-changed", "stop"}, + waitHooks{"upgrade-charm", "config-changed", "leader-settings-changed", "stop"}, waitUniterDead{}, ), ut( "upgrade conflict unit dead", @@ -1102,20 +1167,12 @@ c.Assert(string(data), gc.Equals, "STARTDATA\n") }}, ), ugt( - "upgrade conflict service dying", - startGitUpgradeError{}, - serviceDying, - verifyWaiting{}, - resolveError{state.ResolvedNoHooks}, - waitHooks{"upgrade-charm", "config-changed", "stop"}, - waitUniterDead{}, - ), ugt( "upgrade conflict unit dying", startGitUpgradeError{}, unitDying, verifyWaiting{}, resolveError{state.ResolvedNoHooks}, - waitHooks{"upgrade-charm", "config-changed", "stop"}, + waitHooks{"upgrade-charm", "config-changed", "leader-settings-changed", "stop"}, waitUniterDead{}, ), ugt( "upgrade conflict unit dead", @@ -1202,15 +1259,6 @@ relationState{removed: true}, verifyRunning{}, ), ut( - "service becomes dying while in a relation", - quickStartRelation{}, - serviceDying, - waitUniterDead{}, - waitDyingHooks, - relationState{life: state.Dying}, - removeRelationUnit{"mysql/0"}, - relationState{removed: true}, - ), ut( "unit becomes dying while in a relation", quickStartRelation{}, unitDying, @@ -1349,179 +1397,6 @@ }) } -func (s *UniterSuite) TestUniterMeterStatusChanged(c *gc.C) { - s.runUniterTests(c, []uniterTest{ - ut( - "meter status event triggered by unit meter status change", - quickStart{}, - changeMeterStatus{"AMBER", "Investigate charm."}, - waitHooks{"meter-status-changed"}, - ), - }) -} - -func (s *UniterSuite) TestUniterSendMetricsBeforeDying(c *gc.C) { - s.runUniterTests(c, []uniterTest{ - ut( - "metrics must be sent before the unit is destroyed", - createCharm{ - customize: func(c *gc.C, ctx *context, path string) { - ctx.writeMetricsYaml(c, path) - }, - }, - serveCharm{}, - createUniter{}, - waitHooks{"install", "leader-elected", "config-changed", "start"}, - addMetrics{[]string{"5", "42"}}, - unitDying, - waitUniterDead{}, - checkStateMetrics{number: 1, values: []string{"5", "42"}}, - ), - }) -} - -func (s *UniterSuite) TestUniterCollectMetrics(c *gc.C) { - s.runUniterTests(c, []uniterTest{ - ut( - "collect-metrics event triggered by manual timer", - createCharm{ - customize: func(c *gc.C, ctx *context, path string) { - ctx.writeMetricsYaml(c, path) - }, - }, - serveCharm{}, - createUniter{}, - waitUnitAgent{status: params.StatusIdle}, - waitUnitAgent{ - statusGetter: unitStatusGetter, - status: params.StatusUnknown, - }, - waitHooks{"install", "leader-elected", "config-changed", "start"}, - verifyCharm{}, - collectMetricsTick{}, - waitHooks{"collect-metrics"}, - ), ut( - "collect-metrics resumed after hook error", - startupErrorWithCustomCharm{ - badHook: "config-changed", - customize: func(c *gc.C, ctx *context, path string) { - ctx.writeMetricsYaml(c, path) - }, - }, - collectMetricsTick{expectFail: true}, - fixHook{"config-changed"}, - resolveError{state.ResolvedRetryHooks}, - waitUnitAgent{ - status: params.StatusIdle, - }, - waitUnitAgent{ - statusGetter: unitStatusGetter, - status: params.StatusUnknown, - }, - waitHooks{"config-changed", "start"}, - collectMetricsTick{}, - waitHooks{"collect-metrics"}, - verifyRunning{}, - ), - ut( - "collect-metrics state maintained during uniter restart", - startupErrorWithCustomCharm{ - badHook: "config-changed", - customize: func(c *gc.C, ctx *context, path string) { - ctx.writeMetricsYaml(c, path) - }, - }, - collectMetricsTick{expectFail: true}, - fixHook{"config-changed"}, - stopUniter{}, - startUniter{}, - resolveError{state.ResolvedRetryHooks}, - waitUnitAgent{ - status: params.StatusIdle, - }, - waitUnitAgent{ - statusGetter: unitStatusGetter, - status: params.StatusUnknown, - }, - waitHooks{"config-changed", "start"}, - collectMetricsTick{}, - waitHooks{"collect-metrics"}, - verifyRunning{}, - ), ut( - "collect-metrics event not triggered for non-metered charm", - quickStart{}, - collectMetricsTick{expectFail: true}, - waitHooks{}, - ), - }) -} - -func (s *UniterSuite) TestUniterSendMetrics(c *gc.C) { - s.runUniterTests(c, []uniterTest{ - ut( - "send metrics event triggered by manual timer", - createCharm{ - customize: func(c *gc.C, ctx *context, path string) { - ctx.writeMetricsYaml(c, path) - }, - }, - serveCharm{}, - createUniter{}, - waitHooks{"install", "leader-elected", "config-changed", "start"}, - addMetrics{[]string{"15", "17"}}, - sendMetricsTick{}, - checkStateMetrics{number: 1, values: []string{"17", "15"}}, - ), ut( - "send-metrics resumed after hook error", - startupErrorWithCustomCharm{ - badHook: "config-changed", - customize: func(c *gc.C, ctx *context, path string) { - ctx.writeMetricsYaml(c, path) - }, - }, - addMetrics{[]string{"15"}}, - sendMetricsTick{expectFail: true}, - fixHook{"config-changed"}, - resolveError{state.ResolvedRetryHooks}, - waitHooks{"config-changed", "start"}, - addMetrics{[]string{"17"}}, - sendMetricsTick{}, - checkStateMetrics{number: 2, values: []string{"15", "17"}}, - verifyRunning{}, - ), ut( - "send-metrics state maintained during uniter restart", - startupErrorWithCustomCharm{ - badHook: "config-changed", - customize: func(c *gc.C, ctx *context, path string) { - ctx.writeMetricsYaml(c, path) - }, - }, - collectMetricsTick{expectFail: true}, - addMetrics{[]string{"13"}}, - sendMetricsTick{expectFail: true}, - fixHook{"config-changed"}, - stopUniter{}, - startUniter{}, - resolveError{state.ResolvedRetryHooks}, - waitHooks{"config-changed", "start"}, - collectMetricsTick{}, - waitHooks{"collect-metrics"}, - addMetrics{[]string{"21"}}, - sendMetricsTick{}, - checkStateMetrics{number: 2, values: []string{"13", "21"}}, - verifyRunning{}, - ), ut( - "collect-metrics event not triggered for non-metered charm", - quickStart{}, - collectMetricsTick{expectFail: true}, - addMetrics{[]string{"21"}}, - sendMetricsTick{expectFail: true}, - waitHooks{}, - checkStateMetrics{number: 0}, - ), - }) -} - func (s *UniterSuite) TestActionEvents(c *gc.C) { s.runUniterTests(c, []uniterTest{ ut( @@ -1854,7 +1729,7 @@ waitSubordinateExists{"logging/0"}, unitDying, waitSubordinateDying{}, - waitHooks{"stop"}, + waitHooks{"leader-settings-changed", "stop"}, verifyWaiting{}, removeSubordinate{}, waitUniterDead{}, @@ -1882,12 +1757,11 @@ path: filepath.Join(s.dataDir, "agents", "unit-u-0"), dataDir: s.dataDir, charms: make(map[string][]byte), - collectMetricsTicker: s.collectMetricsTicker, - sendMetricsTicker: s.sendMetricsTicker, updateStatusHookTicker: s.updateStatusHookTicker, + charmDirGuard: &mockCharmDirGuard{}, } - addStateServerMachine(c, ctx.st) + addControllerMachine(c, ctx.st) // Create the subordinate service. dir := testcharms.Repo.ClonedDir(c.MkDir(), "logging") @@ -1928,7 +1802,7 @@ }) } -func (s *UniterSuite) TestReboot(c *gc.C) { +func (s *UniterSuite) TestRebootDisabledInActions(c *gc.C) { s.runUniterTests(c, []uniterTest{ ut( "test that juju-reboot disabled in actions", @@ -1959,7 +1833,12 @@ }, status: params.ActionCompleted, }}}, - ), ut( + )}) +} + +func (s *UniterSuite) TestRebootFinishesHook(c *gc.C) { + s.runUniterTests(c, []uniterTest{ + ut( "test that juju-reboot finishes hook, and reboots", createCharm{ customize: func(c *gc.C, ctx *context, path string) { @@ -1983,7 +1862,12 @@ status: params.StatusUnknown, }, waitHooks{"leader-elected", "config-changed", "start"}, - ), ut( + )}) +} + +func (s *UniterSuite) TestRebootNowKillsHook(c *gc.C) { + s.runUniterTests(c, []uniterTest{ + ut( "test that juju-reboot --now kills hook and exits", createCharm{ customize: func(c *gc.C, ctx *context, path string) { @@ -2007,7 +1891,12 @@ status: params.StatusUnknown, }, waitHooks{"install", "leader-elected", "config-changed", "start"}, - ), ut( + )}) +} + +func (s *UniterSuite) TestRebootDisabledOnHookError(c *gc.C) { + s.runUniterTests(c, []uniterTest{ + ut( "test juju-reboot will not happen if hook errors out", createCharm{ customize: func(c *gc.C, ctx *context, path string) { @@ -2029,7 +1918,7 @@ }) } -func (s *UniterSuite) TestJujuRunKeepsHookErrors(c *gc.C) { +func (s *UniterSuite) TestJujuRunExecutionSerialized(c *gc.C) { s.runUniterTests(c, []uniterTest{ ut( "hook failed status should stay around after juju run", @@ -2130,7 +2019,6 @@ } func (s *UniterSuite) TestLeadershipUnexpectedDepose(c *gc.C) { - s.PatchValue(uniter.LeadershipGuarantee, 2*coretesting.ShortWait) s.runUniterTests(c, []uniterTest{ ut( // NOTE: this is a strange and ugly test, intended to detect what @@ -2203,7 +2091,7 @@ waitHooks(startupHooks(false)), unitDying, // storage-detaching is not called because it was never attached - waitHooks{"stop"}, + waitHooks{"leader-settings-changed", "stop"}, verifyStorageDetached{}, waitUniterDead{}, ), ut( @@ -2224,14 +2112,10 @@ serveCharm{}, ensureStateWorker{}, createServiceAndUnit{}, + unitDying, startUniter{}, - // no hooks should be run, as storage isn't provisioned + // no hooks should be run, and unit agent should terminate waitHooks{}, - unitDying, - // TODO(axw) should we really be running startup hooks - // when the unit is dying? - waitHooks(startupHooks(true)), - waitHooks{"stop"}, waitUniterDead{}, ), // TODO(axw) test that storage-attached is run for new @@ -2268,7 +2152,7 @@ createUniter{executorFunc: executorFunc}, waitUnitAgent{ status: params.StatusFailed, - info: "run install hook", + info: "resolver loop error", }, expectError{".*some error occurred.*"}, ), === modified file 'src/github.com/juju/juju/worker/uniter/upgrade123.go' --- src/github.com/juju/juju/worker/uniter/upgrade123.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/upgrade123.go 2016-03-22 15:18:22 +0000 @@ -8,7 +8,7 @@ "github.com/juju/names" "github.com/juju/utils" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/worker/uniter/operation" ) @@ -28,7 +28,6 @@ default: return err } - } func getUniterStateFile(dataDir string, tag names.UnitTag) string { === modified file 'src/github.com/juju/juju/worker/uniter/upgrade123_test.go' --- src/github.com/juju/juju/worker/uniter/upgrade123_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/juju/worker/uniter/upgrade123_test.go 2016-03-22 15:18:22 +0000 @@ -11,8 +11,8 @@ jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/hooks" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/hooks" "github.com/juju/juju/worker/uniter" "github.com/juju/juju/worker/uniter/hook" === added file 'src/github.com/juju/juju/worker/uniter/upgrade126.go' --- src/github.com/juju/juju/worker/uniter/upgrade126.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/upgrade126.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,46 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package uniter + +import ( + "github.com/juju/names" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/worker/uniter/operation" +) + +// AddInstalledToUniterState sets the Installed boolean in state to true +// if the charm has been installed. The only occasion where this is not +// true is if we are currently installing. +func AddInstalledToUniterState(tag names.UnitTag, dataDir string) error { + logger.Tracef("entering upgrade step AddInstalledToUniterState") + defer logger.Tracef("leaving upgrade step AddInstalledToUniterState") + + opsFile := getUniterStateFile(dataDir, tag) + state, err := readUnsafe(opsFile) + switch err { + case nil: + return addInstalled(opsFile, state) + case operation.ErrNoStateFile: + logger.Warningf("no uniter state file found for unit %s, skipping uniter upgrade step", tag) + return nil + default: + return err + } +} + +func addInstalled(opsFile string, state *operation.State) error { + statefile := operation.NewStateFile(opsFile) + if state.Kind == operation.Install { + return nil + } + if state.Kind == operation.RunHook && state.Hook.Kind == hooks.Install { + return nil + } + if !state.Installed { + state.Installed = true + return statefile.Write(state) + } + return nil +} === added file 'src/github.com/juju/juju/worker/uniter/upgrade126_test.go' --- src/github.com/juju/juju/worker/uniter/upgrade126_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/uniter/upgrade126_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,58 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package uniter_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/hooks" + + "github.com/juju/juju/worker/uniter" + "github.com/juju/juju/worker/uniter/hook" + "github.com/juju/juju/worker/uniter/operation" +) + +func (s *upgradeStateContextSuite) TestInstalledBooleanFalseIfInstalling(c *gc.C) { + oldState := &operation.State{ + Kind: operation.Install, + Step: operation.Pending, + CharmURL: charm.MustParseURL("local:quantal/charm"), + } + err := s.statefile.Write(oldState) + c.Assert(err, jc.ErrorIsNil) + err = uniter.AddInstalledToUniterState(s.unitTag, s.datadir) + c.Assert(err, jc.ErrorIsNil) + newState := s.readState(c) + c.Assert(newState.Installed, gc.Equals, false) +} + +func (s *upgradeStateContextSuite) TestInstalledBooleanFalseIfRunHookInstalling(c *gc.C) { + oldState := &operation.State{ + Kind: operation.RunHook, + Step: operation.Pending, + Hook: &hook.Info{ + Kind: hooks.Install, + }, + } + err := s.statefile.Write(oldState) + c.Assert(err, jc.ErrorIsNil) + err = uniter.AddInstalledToUniterState(s.unitTag, s.datadir) + c.Assert(err, jc.ErrorIsNil) + newState := s.readState(c) + c.Assert(newState.Installed, gc.Equals, false) +} + +func (s *upgradeStateContextSuite) TestInstalledBooleanTrueIfInstalled(c *gc.C) { + oldState := &operation.State{ + Kind: operation.Continue, + Step: operation.Pending, + } + err := s.statefile.Write(oldState) + c.Assert(err, jc.ErrorIsNil) + err = uniter.AddInstalledToUniterState(s.unitTag, s.datadir) + c.Assert(err, jc.ErrorIsNil) + newState := s.readState(c) + c.Assert(newState.Installed, gc.Equals, true) +} === modified file 'src/github.com/juju/juju/worker/uniter/util_test.go' --- src/github.com/juju/juju/worker/uniter/util_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/util_test.go 2016-03-22 15:18:22 +0000 @@ -23,28 +23,30 @@ jc "github.com/juju/testing/checkers" ft "github.com/juju/testing/filetesting" "github.com/juju/utils" + "github.com/juju/utils/clock" utilexec "github.com/juju/utils/exec" "github.com/juju/utils/fslock" "github.com/juju/utils/proxy" gc "gopkg.in/check.v1" - corecharm "gopkg.in/juju/charm.v5" - goyaml "gopkg.in/yaml.v1" + corecharm "gopkg.in/juju/charm.v6-unstable" + goyaml "gopkg.in/yaml.v2" apiuniter "github.com/juju/juju/api/uniter" "github.com/juju/juju/apiserver/params" + "github.com/juju/juju/core/leadership" + coreleadership "github.com/juju/juju/core/leadership" "github.com/juju/juju/juju/sockets" "github.com/juju/juju/juju/testing" - coreleadership "github.com/juju/juju/leadership" "github.com/juju/juju/network" + "github.com/juju/juju/resource/resourcetesting" "github.com/juju/juju/state" "github.com/juju/juju/state/storage" "github.com/juju/juju/testcharms" coretesting "github.com/juju/juju/testing" "github.com/juju/juju/worker" - "github.com/juju/juju/worker/leadership" + "github.com/juju/juju/worker/fortress" "github.com/juju/juju/worker/uniter" "github.com/juju/juju/worker/uniter/charm" - "github.com/juju/juju/worker/uniter/metrics" "github.com/juju/juju/worker/uniter/operation" ) @@ -85,6 +87,7 @@ api *apiuniter.State leaderClaimer coreleadership.Claimer leaderTracker *mockLeaderTracker + charmDirGuard *mockCharmDirGuard charms map[string][]byte hooks []string sch *state.Charm @@ -95,9 +98,7 @@ relation *state.Relation relationUnits map[string]*state.RelationUnit subordinate *state.Unit - collectMetricsTicker *uniter.ManualTicker - sendMetricsTicker *uniter.ManualTicker - updateStatusHookTicker *uniter.ManualTicker + updateStatusHookTicker *manualTicker err string wg sync.WaitGroup @@ -130,7 +131,7 @@ func (ctx *context) run(c *gc.C, steps []stepper) { defer func() { if ctx.uniter != nil { - err := ctx.uniter.Stop() + err := worker.Stop(ctx.uniter) if ctx.err == "" { c.Assert(err, jc.ErrorIsNil) } else { @@ -241,7 +242,8 @@ func (ctx *context) matchHooks(c *gc.C) (match bool, overshoot bool) { ctx.mu.Lock() defer ctx.mu.Unlock() - c.Logf("ctx.hooksCompleted: %#v", ctx.hooksCompleted) + c.Logf(" actual hooks: %#v", ctx.hooksCompleted) + c.Logf("expected hooks: %#v", ctx.hooks) if len(ctx.hooksCompleted) < len(ctx.hooks) { return false, false } @@ -276,20 +278,20 @@ func (s ensureStateWorker) step(c *gc.C, ctx *context) { addresses, err := ctx.st.Addresses() if err != nil || len(addresses) == 0 { - addStateServerMachine(c, ctx.st) + addControllerMachine(c, ctx.st) } addresses, err = ctx.st.APIAddressesFromMachines() c.Assert(err, jc.ErrorIsNil) c.Assert(addresses, gc.HasLen, 1) } -func addStateServerMachine(c *gc.C, st *state.State) { - // The AddStateServerMachine call will update the API host ports +func addControllerMachine(c *gc.C, st *state.State) { + // The AddControllerMachine call will update the API host ports // to made-up addresses. We need valid addresses so that the uniter // can download charms from the API server. apiHostPorts, err := st.APIHostPorts() c.Assert(err, gc.IsNil) - testing.AddStateServerMachine(c, st) + testing.AddControllerMachine(c, st) err = st.SetAPIHostPorts(apiHostPorts) c.Assert(err, gc.IsNil) } @@ -375,7 +377,7 @@ type serveCharm struct{} func (s serveCharm) step(c *gc.C, ctx *context) { - storage := storage.NewStorage(ctx.st.EnvironUUID(), ctx.st.MongoSession()) + storage := storage.NewStorage(ctx.st.ModelUUID(), ctx.st.MongoSession()) for storagePath, data := range ctx.charms { err := storage.Put(storagePath, bytes.NewReader(data), int64(len(data))) c.Assert(err, jc.ErrorIsNil) @@ -468,7 +470,7 @@ panic(err.Error()) } locksDir := filepath.Join(ctx.dataDir, "locks") - lock, err := fslock.NewLock(locksDir, "uniter-hook-execution") + lock, err := fslock.NewLock(locksDir, "uniter-hook-execution", fslock.Defaults()) c.Assert(err, jc.ErrorIsNil) operationExecutor := operation.NewExecutor if s.newExecutorFunc != nil { @@ -476,20 +478,22 @@ } uniterParams := uniter.UniterParams{ - UniterFacade: ctx.api, - UnitTag: tag, - LeadershipTracker: ctx.leaderTracker, - DataDir: ctx.dataDir, - MachineLock: lock, - MetricsTimerChooser: uniter.NewTestingMetricsTimerChooser( - ctx.collectMetricsTicker.ReturnTimer, - ctx.sendMetricsTicker.ReturnTimer, - ), + UniterFacade: ctx.api, + UnitTag: tag, + LeadershipTracker: ctx.leaderTracker, + CharmDirGuard: ctx.charmDirGuard, + DataDir: ctx.dataDir, + MachineLock: lock, UpdateStatusSignal: ctx.updateStatusHookTicker.ReturnTimer, NewOperationExecutor: operationExecutor, + Observer: ctx, + // TODO(axw) 2015-11-02 #1512191 + // update tests that rely on timing to advance clock + // appropriately. + Clock: clock.WallClock, } - ctx.uniter = uniter.NewUniter(&uniterParams) - uniter.SetUniterObserver(ctx.uniter, ctx) + ctx.uniter, err = uniter.NewUniter(&uniterParams) + c.Assert(err, jc.ErrorIsNil) } type waitUniterDead struct { @@ -524,25 +528,21 @@ func (s waitUniterDead) waitDead(c *gc.C, ctx *context) error { u := ctx.uniter ctx.uniter = nil - timeout := time.After(worstCase) - for { - // The repeated StartSync is to ensure timely completion of this method - // in the case(s) where a state change causes a uniter action which - // causes a state change which causes a uniter action, in which case we - // need more than one sync. At the moment there's only one situation - // that causes this -- setting the unit's service to Dying -- but it's - // not an intrinsically insane pattern of action (and helps to simplify - // the filter code) so this test seems like a small price to pay. - ctx.s.BackingState.StartSync() - select { - case <-u.Dead(): - return u.Wait() - case <-time.After(coretesting.ShortWait): - continue - case <-timeout: - c.Fatalf("uniter still alive") - } + + wait := make(chan error, 1) + go func() { + wait <- u.Wait() + }() + + ctx.s.BackingState.StartSync() + select { + case err := <-wait: + return err + case <-time.After(worstCase): + u.Kill() + c.Fatalf("uniter still alive") } + panic("unreachable") } type stopUniter struct { @@ -556,7 +556,7 @@ return } ctx.uniter = nil - err := u.Stop() + err := worker.Stop(u) if s.err == "" { c.Assert(err, jc.ErrorIsNil) } else { @@ -780,7 +780,22 @@ if overshoot && len(s) == 0 { c.Fatalf("ran more hooks than expected") } + waitExecutionLockReleased := func() { + lock := createHookLock(c, ctx.dataDir) + if err := lock.LockWithTimeout(worstCase, "waiting for lock"); err != nil { + c.Fatalf("failed to acquire execution lock: %v", err) + } + if err := lock.Unlock(); err != nil { + c.Fatalf("failed to release execution lock: %v", err) + } + } if match { + if len(s) > 0 { + // only check for lock release if there were hooks + // run; hooks *not* running may be due to the lock + // being held. + waitExecutionLockReleased() + } return } timeout := time.After(worstCase) @@ -789,6 +804,7 @@ select { case <-time.After(coretesting.ShortWait): if match, _ = ctx.matchHooks(c); match { + waitExecutionLockReleased() return } case <-timeout: @@ -896,19 +912,6 @@ c.Assert(err, jc.ErrorIsNil) } -type collectMetricsTick struct { - expectFail bool -} - -func (s collectMetricsTick) step(c *gc.C, ctx *context) { - err := ctx.collectMetricsTicker.Tick() - if s.expectFail { - c.Assert(err, gc.ErrorMatches, "ticker channel blocked") - } else { - c.Assert(err, jc.ErrorIsNil) - } -} - type updateStatusHookTick struct{} func (s updateStatusHookTick) step(c *gc.C, ctx *context) { @@ -916,75 +919,6 @@ c.Assert(err, jc.ErrorIsNil) } -type sendMetricsTick struct { - expectFail bool -} - -func (s sendMetricsTick) step(c *gc.C, ctx *context) { - err := ctx.sendMetricsTicker.Tick() - if s.expectFail { - c.Assert(err, gc.ErrorMatches, "ticker channel blocked") - - } else { - c.Assert(err, jc.ErrorIsNil) - } -} - -type addMetrics struct { - values []string -} - -func (s addMetrics) step(c *gc.C, ctx *context) { - var declaredMetrics map[string]corecharm.Metric - if ctx.sch.Metrics() != nil { - declaredMetrics = ctx.sch.Metrics().Metrics - } - spoolDir := filepath.Join(ctx.path, "state", "spool", "metrics") - - recorder, err := metrics.NewJSONMetricRecorder(spoolDir, declaredMetrics, ctx.sch.URL().String()) - c.Assert(err, jc.ErrorIsNil) - - for _, value := range s.values { - recorder.AddMetric("pings", value, time.Now()) - } - - err = recorder.Close() - c.Assert(err, jc.ErrorIsNil) -} - -type checkStateMetrics struct { - number int - values []string -} - -func (s checkStateMetrics) step(c *gc.C, ctx *context) { - timeout := time.After(worstCase) - for { - select { - case <-timeout: - c.Fatalf("specified number of metric batches not received by the state server") - case <-time.After(coretesting.ShortWait): - batches, err := ctx.st.MetricBatches() - c.Assert(err, jc.ErrorIsNil) - if len(batches) != s.number { - continue - } - for _, value := range s.values { - found := false - for _, batch := range batches { - for _, metric := range batch.Metrics() { - if metric.Key == "pings" && metric.Value == value { - found = true - } - } - } - c.Assert(found, gc.Equals, true) - } - return - } - } -} - type changeConfig map[string]interface{} func (s changeConfig) step(c *gc.C, ctx *context) { @@ -1011,7 +945,11 @@ curl := curl(s.revision) sch, err := ctx.st.Charm(curl) c.Assert(err, jc.ErrorIsNil) - err = ctx.svc.SetCharm(sch, s.forced) + cfg := state.SetCharmConfig{ + Charm: sch, + ForceUnits: s.forced, + } + err = ctx.svc.SetCharm(cfg) c.Assert(err, jc.ErrorIsNil) serveCharm{}.step(c, ctx) } @@ -1039,6 +977,22 @@ c.Assert(url, gc.DeepEquals, curl(checkRevision)) } +type pushResource struct{} + +func (s pushResource) step(c *gc.C, ctx *context) { + opened := resourcetesting.NewResource(c, >.Stub{}, "data", ctx.unit.ServiceName(), "the bytes") + + res, err := ctx.st.Resources() + c.Assert(err, jc.ErrorIsNil) + _, err = res.SetResource( + ctx.unit.ServiceName(), + opened.Username, + opened.Resource.Resource, + opened.ReadCloser, + ) + c.Assert(err, jc.ErrorIsNil) +} + type startUpgradeError struct{} func (s startUpgradeError) step(c *gc.C, ctx *context) { @@ -1090,11 +1044,12 @@ verifyWaitingSteps := []stepper{ stopUniter{}, custom{func(c *gc.C, ctx *context) { - // By setting status to Started, and waiting for the restarted uniter + // By setting status to Idle, and waiting for the restarted uniter // to reset the error status, we can avoid a race in which a subsequent // fixUpgradeError lands just before the restarting uniter retries the // upgrade; and thus puts us in an unexpected state for future steps. - ctx.unit.SetAgentStatus(state.StatusActive, "", nil) + err := ctx.unit.SetAgentStatus(state.StatusIdle, "", nil) + c.Check(err, jc.ErrorIsNil) }}, startUniter{}, } @@ -1337,10 +1292,6 @@ s.f(c, ctx) } -var serviceDying = custom{func(c *gc.C, ctx *context) { - c.Assert(ctx.svc.Destroy(), gc.IsNil) -}} - var relationDying = custom{func(c *gc.C, ctx *context) { c.Assert(ctx.relation.Destroy(), gc.IsNil) }} @@ -1404,7 +1355,7 @@ func createHookLock(c *gc.C, dataDir string) *fslock.Lock { lockDir := filepath.Join(dataDir, "locks") - lock, err := fslock.NewLock(lockDir, "uniter-hook-execution") + lock, err := fslock.NewLock(lockDir, "uniter-hook-execution", fslock.Defaults()) c.Assert(err, jc.ErrorIsNil) return lock } @@ -1446,7 +1397,7 @@ "ftp-proxy": s.Ftp, "no-proxy": s.NoProxy, } - err := ctx.st.UpdateEnvironConfig(attrs, nil, nil) + err := ctx.st.UpdateModelConfig(attrs, nil, nil) c.Assert(err, jc.ErrorIsNil) } @@ -1713,6 +1664,14 @@ c.Assert(verify.filename, jc.DoesNotExist) } +type mockCharmDirGuard struct{} + +// Unlock implements fortress.Guard. +func (*mockCharmDirGuard) Unlock() error { return nil } + +// Lockdown implements fortress.Guard. +func (*mockCharmDirGuard) Lockdown(_ fortress.Abort) error { return nil } + // prepareGitUniter runs a sequence of uniter tests with the manifest deployer // replacement logic patched out, simulating the effect of running an older // version of juju that exclusively used a git deployer. This is useful both @@ -1883,3 +1842,30 @@ func (s expectError) step(c *gc.C, ctx *context) { ctx.setExpectedError(s.err) } + +// manualTicker will be used to generate collect-metrics events +// in a time-independent manner for testing. +type manualTicker struct { + c chan time.Time +} + +// Tick sends a signal on the ticker channel. +func (t *manualTicker) Tick() error { + select { + case t.c <- time.Now(): + case <-time.After(worstCase): + return fmt.Errorf("ticker channel blocked") + } + return nil +} + +// ReturnTimer can be used to replace the update status signal generator. +func (t *manualTicker) ReturnTimer() <-chan time.Time { + return t.c +} + +func newManualTicker() *manualTicker { + return &manualTicker{ + c: make(chan time.Time), + } +} === modified file 'src/github.com/juju/juju/worker/uniter/util_unix_test.go' --- src/github.com/juju/juju/worker/uniter/util_unix_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/util_unix_test.go 2016-03-22 15:18:22 +0000 @@ -31,12 +31,12 @@ // Different hook file contents. These are used in util_test goodHook = ` #!/bin/bash --norc -juju-log $JUJU_ENV_UUID %s $JUJU_REMOTE_UNIT +juju-log $JUJU_MODEL_UUID %s $JUJU_REMOTE_UNIT `[1:] badHook = ` #!/bin/bash --norc -juju-log $JUJU_ENV_UUID fail-%s $JUJU_REMOTE_UNIT +juju-log $JUJU_MODEL_UUID fail-%s $JUJU_REMOTE_UNIT exit 1 `[1:] @@ -66,7 +66,7 @@ actions = map[string]string{ "action-log": ` #!/bin/bash --norc -juju-log $JUJU_ENV_UUID action-log +juju-log $JUJU_MODEL_UUID action-log `[1:], "snapshot": ` #!/bin/bash --norc @@ -111,7 +111,7 @@ s.runUniterTests(c, []uniterTest{ ut( - "run commands: environment", + "run commands: model", quickStart{}, runCommands{echoUnitNameToFile("run.output")}, verifyFile{filepath.Join(testDir, "run.output"), "juju run u/0\n"}, @@ -127,7 +127,7 @@ "private.address.example.com\npublic.address.example.com\n", }, ), ut( - "run commands: jujuc environment", + "run commands: jujuc model", quickStartRelation{}, relationRunCommands{ fmt.Sprintf("echo $JUJU_RELATION_ID > %s", testFile("jujuc-env.output")), === modified file 'src/github.com/juju/juju/worker/uniter/util_windows_test.go' --- src/github.com/juju/juju/worker/uniter/util_windows_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/uniter/util_windows_test.go 2016-03-22 15:18:22 +0000 @@ -30,12 +30,12 @@ // Different hook file contents. These are used in util_test goodHook = ` -juju-log.exe %%JUJU_ENV_UUID%% %s %%JUJU_REMOTE_UNIT%% +juju-log.exe %%JUJU_MODEL_UUID%% %s %%JUJU_REMOTE_UNIT%% `[1:] badHook = ` #!/bin/bash --norc -juju-log.exe %%JUJU_ENV_UUID%% fail-%s %%JUJU_REMOTE_UNIT%% +juju-log.exe %%JUJU_MODEL_UUID%% fail-%s %%JUJU_REMOTE_UNIT%% exit 1 `[1:] @@ -59,7 +59,7 @@ // Map of action files contents. These are used in util_test actions = map[string]string{ "action-log": ` -juju-log.exe %%JUJU_ENV_UUID%% action-log +juju-log.exe %%JUJU_MODEL_UUID%% action-log `[1:], "snapshot": ` action-set.exe outfile.name="snapshot-01.tar" outfile.size="10.3GB" @@ -100,7 +100,7 @@ s.runUniterTests(c, []uniterTest{ ut( - "run commands: environment", + "run commands: model", quickStart{}, runCommands{echoUnitNameToFile("run.output")}, verifyFile{filepath.Join(testDir, "run.output"), "juju run u/0\r\n"}, @@ -116,7 +116,7 @@ "private.address.example.com\r\npublic.address.example.com\r\n", }, ), ut( - "run commands: jujuc environment", + "run commands: jujuc model", quickStartRelation{}, relationRunCommands{ fmt.Sprintf("Set-Content %s $env:JUJU_RELATION_ID", testFile("jujuc-env.output")), === modified file 'src/github.com/juju/juju/worker/upgrader/manifold.go' --- src/github.com/juju/juju/worker/upgrader/manifold.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/upgrader/manifold.go 2016-03-22 15:18:22 +0000 @@ -4,38 +4,88 @@ package upgrader import ( + "github.com/juju/errors" + "github.com/juju/juju/agent" "github.com/juju/juju/api/base" "github.com/juju/juju/api/upgrader" + "github.com/juju/juju/version" "github.com/juju/juju/worker" "github.com/juju/juju/worker/dependency" - "github.com/juju/juju/worker/util" + "github.com/juju/juju/worker/gate" ) // ManifoldConfig defines the names of the manifolds on which a // Manifold will depend. -type ManifoldConfig util.AgentApiManifoldConfig +type ManifoldConfig struct { + AgentName string + APICallerName string + UpgradeStepsGateName string + UpgradeCheckGateName string + PreviousAgentVersion version.Number +} // Manifold returns a dependency manifold that runs an upgrader // worker, using the resource names defined in the supplied config. func Manifold(config ManifoldConfig) dependency.Manifold { - return util.AgentApiManifold(util.AgentApiManifoldConfig(config), newWorker) -} - -// newWorker wraps NewUpgrader for the convenience of AgentApiManifold. It should -// eventually replace NewUpgrader. -var newWorker = func(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { - currentConfig := a.CurrentConfig() - upgraderFacade := upgrader.NewState(apiCaller) - return NewAgentUpgrader( - upgraderFacade, - currentConfig, - // TODO(fwereade): surely we shouldn't need both currentConfig - // *and* currentConfig.UpgradedToVersion? - currentConfig.UpgradedToVersion(), - // TODO(fwereade): these are unit-agent-specific, and very much - // unsuitable for use in a machine agent. - func() bool { return false }, - make(chan struct{}), - ), nil + inputs := []string{ + config.AgentName, + config.APICallerName, + } + // The machine agent uses these but the unit agent doesn't. + if config.UpgradeStepsGateName != "" { + inputs = append(inputs, config.UpgradeStepsGateName) + } + if config.UpgradeCheckGateName != "" { + inputs = append(inputs, config.UpgradeCheckGateName) + } + + return dependency.Manifold{ + Inputs: inputs, + Start: func(getResource dependency.GetResourceFunc) (worker.Worker, error) { + // This wraps NewUpgrader for the convenience of the manifold. It should + // eventually replace NewUpgrader. + + var agent agent.Agent + if err := getResource(config.AgentName, &agent); err != nil { + return nil, err + } + currentConfig := agent.CurrentConfig() + + var apiCaller base.APICaller + if err := getResource(config.APICallerName, &apiCaller); err != nil { + return nil, err + } + upgraderFacade := upgrader.NewState(apiCaller) + + var upgradeStepsWaiter gate.Waiter + if config.UpgradeStepsGateName == "" { + upgradeStepsWaiter = gate.NewLock() + } else { + if config.PreviousAgentVersion == version.Zero { + return nil, errors.New("previous agent version not specified") + } + if err := getResource(config.UpgradeStepsGateName, &upgradeStepsWaiter); err != nil { + return nil, err + } + } + + var initialCheckUnlocker gate.Unlocker + if config.UpgradeCheckGateName == "" { + initialCheckUnlocker = gate.NewLock() + } else { + if err := getResource(config.UpgradeCheckGateName, &initialCheckUnlocker); err != nil { + return nil, err + } + } + + return NewAgentUpgrader( + upgraderFacade, + currentConfig, + config.PreviousAgentVersion, + upgradeStepsWaiter, + initialCheckUnlocker, + ) + }, + } } === modified file 'src/github.com/juju/juju/worker/upgrader/upgrader.go' --- src/github.com/juju/juju/worker/upgrader/upgrader.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/upgrader/upgrader.go 2016-03-22 15:18:22 +0000 @@ -12,14 +12,16 @@ "github.com/juju/loggo" "github.com/juju/names" "github.com/juju/utils" - "launchpad.net/tomb" + "github.com/juju/utils/arch" + "github.com/juju/utils/series" "github.com/juju/juju/agent" agenttools "github.com/juju/juju/agent/tools" "github.com/juju/juju/api/upgrader" - "github.com/juju/juju/state/watcher" coretools "github.com/juju/juju/tools" "github.com/juju/juju/version" + "github.com/juju/juju/worker/catacomb" + "github.com/juju/juju/worker/gate" ) // retryAfter returns a channel that receives a value @@ -33,13 +35,13 @@ // Upgrader represents a worker that watches the state for upgrade // requests. type Upgrader struct { - tomb tomb.Tomb - st *upgrader.State - dataDir string - tag names.Tag - origAgentVersion version.Number - areUpgradeStepsRunning func() bool - agentUpgradeComplete chan struct{} + catacomb catacomb.Catacomb + st *upgrader.State + dataDir string + tag names.Tag + origAgentVersion version.Number + upgradeStepsWaiter gate.Waiter + initialUpgradeCheckComplete gate.Unlocker } // NewAgentUpgrader returns a new upgrader worker. It watches changes to the @@ -52,32 +54,35 @@ st *upgrader.State, agentConfig agent.Config, origAgentVersion version.Number, - areUpgradeStepsRunning func() bool, - agentUpgradeComplete chan struct{}, -) *Upgrader { + upgradeStepsWaiter gate.Waiter, + initialUpgradeCheckComplete gate.Unlocker, +) (*Upgrader, error) { u := &Upgrader{ - st: st, - dataDir: agentConfig.DataDir(), - tag: agentConfig.Tag(), - origAgentVersion: origAgentVersion, - areUpgradeStepsRunning: areUpgradeStepsRunning, - agentUpgradeComplete: agentUpgradeComplete, - } - go func() { - defer u.tomb.Done() - u.tomb.Kill(u.loop()) - }() - return u + st: st, + dataDir: agentConfig.DataDir(), + tag: agentConfig.Tag(), + origAgentVersion: origAgentVersion, + upgradeStepsWaiter: upgradeStepsWaiter, + initialUpgradeCheckComplete: initialUpgradeCheckComplete, + } + err := catacomb.Invoke(catacomb.Plan{ + Site: &u.catacomb, + Work: u.loop, + }) + if err != nil { + return nil, errors.Trace(err) + } + return u, nil } // Kill implements worker.Worker.Kill. func (u *Upgrader) Kill() { - u.tomb.Kill(nil) + u.catacomb.Kill(nil) } // Wait implements worker.Worker.Wait. func (u *Upgrader) Wait() error { - return u.tomb.Wait() + return u.catacomb.Wait() } // Stop stops the upgrader and returns any @@ -92,10 +97,10 @@ func allowedTargetVersion( origAgentVersion version.Number, curVersion version.Number, - upgradeRunning bool, + upgradeStepsRunning bool, targetVersion version.Number, ) bool { - if upgradeRunning && targetVersion == origAgentVersion { + if upgradeStepsRunning && targetVersion == origAgentVersion { return true } if targetVersion.Major < curVersion.Major { @@ -107,61 +112,77 @@ return true } -// closeChannel can be called multiple times to -// close the channel without panicing. -func closeChannel(ch chan struct{}) { - select { - case <-ch: - return - default: - close(ch) - } -} - func (u *Upgrader) loop() error { // Start by reporting current tools (which includes arch/series, and is - // used by the state server in communicating the desired version below). - if err := u.st.SetVersion(u.tag.String(), version.Current); err != nil { + // used by the controller in communicating the desired version below). + if err := u.st.SetVersion(u.tag.String(), toBinaryVersion(version.Current)); err != nil { return errors.Annotate(err, "cannot set agent version") } - versionWatcher, err := u.st.WatchAPIVersion(u.tag.String()) - if err != nil { - return err - } - changes := versionWatcher.Changes() - defer watcher.Stop(versionWatcher, &u.tomb) - var retry <-chan time.Time // We don't read on the dying channel until we have received the // initial event from the API version watcher, thus ensuring // that we attempt an upgrade even if other workers are dying - // all around us. - var ( - dying <-chan struct{} - wantTools *coretools.Tools - wantVersion version.Number - ) + // all around us. Similarly, we don't want to bind the watcher + // to the catacomb's lifetime (yet!) lest we wait forever for a + // stopped watcher. + // + // However, that absolutely depends on versionWatcher's guaranteed + // initial event, and we should assume that it'll break its contract + // sometime. So we allow the watcher to wait patiently for the event + // for a full minute; but after that we proceed regardless. + versionWatcher, err := u.st.WatchAPIVersion(u.tag.String()) + if err != nil { + return errors.Trace(err) + } + logger.Infof("abort check blocked until version event received") + mustProceed := time.After(time.Minute) + var dying <-chan struct{} + allowDying := func() { + if dying == nil { + logger.Infof("unblocking abort check") + mustProceed = nil + dying = u.catacomb.Dying() + if err := u.catacomb.Add(versionWatcher); err != nil { + u.catacomb.Kill(err) + } + } + } + + var retry <-chan time.Time for { select { - case _, ok := <-changes: - if !ok { - return watcher.EnsureErr(versionWatcher) - } - wantVersion, err = u.st.DesiredVersion(u.tag.String()) - if err != nil { - return err - } - logger.Infof("desired tool version: %v", wantVersion) - dying = u.tomb.Dying() + // NOTE: retry and dying both start out nil, so they can't be chosen + // first time round the loop. However... case <-retry: case <-dying: - return nil - } - if wantVersion == version.Current.Number { - closeChannel(u.agentUpgradeComplete) + return u.catacomb.ErrDying() + // ...*every* other case *must* allowDying(), before doing anything + // else, lest an error cause us to leak versionWatcher. + case <-mustProceed: + logger.Infof("version event not received after one minute") + allowDying() + case _, ok := <-versionWatcher.Changes(): + allowDying() + if !ok { + return errors.New("version watcher closed") + } + } + + wantVersion, err := u.st.DesiredVersion(u.tag.String()) + if err != nil { + return err + } + logger.Infof("desired tool version: %v", wantVersion) + + if wantVersion == version.Current { + u.initialUpgradeCheckComplete.Unlock() continue - } else if !allowedTargetVersion(u.origAgentVersion, version.Current.Number, - u.areUpgradeStepsRunning(), wantVersion) { + } else if !allowedTargetVersion( + u.origAgentVersion, + version.Current, + !u.upgradeStepsWaiter.IsUnlocked(), + wantVersion, + ) { // See also bug #1299802 where when upgrading from // 1.16 to 1.18 there is a race condition that can // cause the unit agent to upgrade, and then want to @@ -169,7 +190,7 @@ // finished upgrading. logger.Infof("desired tool version: %s is older than current %s, refusing to downgrade", wantVersion, version.Current) - closeChannel(u.agentUpgradeComplete) + u.initialUpgradeCheckComplete.Unlock() continue } logger.Infof("upgrade requested from %v to %v", version.Current, wantVersion) @@ -181,7 +202,7 @@ } // Check if tools are available for download. - wantTools, err = u.st.Tools(u.tag.String()) + wantTools, err := u.st.Tools(u.tag.String()) if err != nil { // Not being able to lookup Tools is considered fatal return err @@ -191,7 +212,7 @@ // repeatedly (causing the agent to be stopped), as long // as we have got as far as this, we will still be able to // upgrade the agent. - err := u.ensureTools(wantTools) + err = u.ensureTools(wantTools) if err == nil { return u.newUpgradeReadyError(wantTools.Version) } @@ -201,8 +222,11 @@ } func toBinaryVersion(vers version.Number) version.Binary { - outVers := version.Current - outVers.Number = vers + outVers := version.Binary{ + Number: vers, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } return outVers } @@ -213,7 +237,7 @@ func (u *Upgrader) newUpgradeReadyError(newVersion version.Binary) *UpgradeReadyError { return &UpgradeReadyError{ - OldTools: version.Current, + OldTools: toBinaryVersion(version.Current), NewTools: newVersion, AgentName: u.tag.String(), DataDir: u.dataDir, === modified file 'src/github.com/juju/juju/worker/upgrader/upgrader_test.go' --- src/github.com/juju/juju/worker/upgrader/upgrader_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/upgrader/upgrader_test.go 2016-03-22 15:18:22 +0000 @@ -14,6 +14,8 @@ "github.com/juju/names" jc "github.com/juju/testing/checkers" "github.com/juju/utils" + "github.com/juju/utils/arch" + "github.com/juju/utils/series" "github.com/juju/utils/symlink" gc "gopkg.in/check.v1" @@ -23,12 +25,12 @@ envtesting "github.com/juju/juju/environs/testing" envtools "github.com/juju/juju/environs/tools" jujutesting "github.com/juju/juju/juju/testing" - "github.com/juju/juju/provider/dummy" "github.com/juju/juju/state" statetesting "github.com/juju/juju/state/testing" coretesting "github.com/juju/juju/testing" coretools "github.com/juju/juju/tools" "github.com/juju/juju/version" + "github.com/juju/juju/worker/gate" "github.com/juju/juju/worker/upgrader" ) @@ -43,8 +45,8 @@ state api.Connection oldRetryAfter func() <-chan time.Time confVersion version.Number - upgradeRunning bool - agentUpgradeComplete chan struct{} + upgradeStepsComplete gate.Lock + initialCheckComplete gate.Lock } type AllowedTargetVersionSuite struct{} @@ -56,14 +58,21 @@ s.JujuConnSuite.SetUpTest(c) // s.machine needs to have IsManager() so that it can get the actual // current revision to upgrade to. - s.state, s.machine = s.OpenAPIAsNewMachine(c, state.JobManageEnviron) + s.state, s.machine = s.OpenAPIAsNewMachine(c, state.JobManageModel) // Capture the value of RetryAfter, and use that captured // value in the cleanup lambda. oldRetryAfter := *upgrader.RetryAfter s.AddCleanup(func(*gc.C) { *upgrader.RetryAfter = oldRetryAfter }) - s.agentUpgradeComplete = make(chan struct{}) + s.upgradeStepsComplete = gate.NewLock() + s.initialCheckComplete = gate.NewLock() +} + +func (s *UpgraderSuite) patchVersion(v version.Binary) { + s.PatchValue(&arch.HostArch, func() string { return v.Arch }) + s.PatchValue(&series.HostSeries, func() string { return v.Series }) + s.PatchValue(&version.Current, v.Number) } type mockConfig struct { @@ -89,13 +98,15 @@ } func (s *UpgraderSuite) makeUpgrader(c *gc.C) *upgrader.Upgrader { - return upgrader.NewAgentUpgrader( + w, err := upgrader.NewAgentUpgrader( s.state.Upgrader(), agentConfig(s.machine.Tag(), s.DataDir()), s.confVersion, - func() bool { return s.upgradeRunning }, - s.agentUpgradeComplete, + s.upgradeStepsComplete, + s.initialCheckComplete, ) + c.Assert(err, jc.ErrorIsNil) + return w } func (s *UpgraderSuite) TestUpgraderSetsTools(c *gc.C) { @@ -104,14 +115,14 @@ c.Assert(err, jc.ErrorIsNil) stor := s.DefaultToolsStorage agentTools := envtesting.PrimeTools(c, stor, s.DataDir(), s.Environ.Config().AgentStream(), vers) - s.PatchValue(&version.Current, agentTools.Version) + s.patchVersion(agentTools.Version) err = envtools.MergeAndWriteMetadata(stor, "released", "released", coretools.List{agentTools}, envtools.DoNotWriteMirrors) _, err = s.machine.AgentTools() c.Assert(err, jc.Satisfies, errors.IsNotFound) u := s.makeUpgrader(c) statetesting.AssertStop(c, u) - s.expectUpgradeChannelClosed(c) + s.expectInitialUpgradeCheckDone(c) s.machine.Refresh() gotTools, err := s.machine.AgentTools() c.Assert(err, jc.ErrorIsNil) @@ -121,7 +132,7 @@ func (s *UpgraderSuite) TestUpgraderSetVersion(c *gc.C) { vers := version.MustParseBinary("5.4.3-precise-amd64") agentTools := envtesting.PrimeTools(c, s.DefaultToolsStorage, s.DataDir(), s.Environ.Config().AgentStream(), vers) - s.PatchValue(&version.Current, agentTools.Version) + s.patchVersion(agentTools.Version) err := os.RemoveAll(filepath.Join(s.DataDir(), "tools")) c.Assert(err, jc.ErrorIsNil) @@ -132,46 +143,33 @@ u := s.makeUpgrader(c) statetesting.AssertStop(c, u) - s.expectUpgradeChannelClosed(c) + s.expectInitialUpgradeCheckDone(c) s.machine.Refresh() gotTools, err := s.machine.AgentTools() c.Assert(err, jc.ErrorIsNil) - c.Assert(gotTools, gc.DeepEquals, &coretools.Tools{Version: version.Current}) -} - -func (s *UpgraderSuite) expectUpgradeChannelClosed(c *gc.C) { - select { - case <-s.agentUpgradeComplete: - default: - c.Fail() - } -} - -func (s *UpgraderSuite) expectUpgradeChannelNotClosed(c *gc.C) { - select { - case <-s.agentUpgradeComplete: - c.Fail() - default: - } + c.Assert(gotTools, gc.DeepEquals, &coretools.Tools{Version: vers}) +} + +func (s *UpgraderSuite) expectInitialUpgradeCheckDone(c *gc.C) { + c.Assert(s.initialCheckComplete.IsUnlocked(), jc.IsTrue) +} + +func (s *UpgraderSuite) expectInitialUpgradeCheckNotDone(c *gc.C) { + c.Assert(s.initialCheckComplete.IsUnlocked(), jc.IsFalse) } func (s *UpgraderSuite) TestUpgraderUpgradesImmediately(c *gc.C) { stor := s.DefaultToolsStorage oldTools := envtesting.PrimeTools(c, stor, s.DataDir(), s.Environ.Config().AgentStream(), version.MustParseBinary("5.4.3-precise-amd64")) - s.PatchValue(&version.Current, oldTools.Version) + s.patchVersion(oldTools.Version) newTools := envtesting.AssertUploadFakeToolsVersions( c, stor, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), version.MustParseBinary("5.4.5-precise-amd64"))[0] err := statetesting.SetAgentVersion(s.State, newTools.Version.Number) c.Assert(err, jc.ErrorIsNil) - // Make the download take a while so that we verify that - // the download happens before the upgrader checks if - // it's been stopped. - dummy.SetStorageDelay(coretesting.ShortWait) - u := s.makeUpgrader(c) err = u.Stop() - s.expectUpgradeChannelNotClosed(c) + s.expectInitialUpgradeCheckNotDone(c) envtesting.CheckUpgraderReadyError(c, err, &upgrader.UpgradeReadyError{ AgentName: s.machine.Tag().String(), OldTools: oldTools.Version, @@ -180,15 +178,15 @@ }) foundTools, err := agenttools.ReadTools(s.DataDir(), newTools.Version) c.Assert(err, jc.ErrorIsNil) - newTools.URL = fmt.Sprintf("https://%s/environment/%s/tools/5.4.5-precise-amd64", - s.APIState.Addr(), coretesting.EnvironmentTag.Id()) + newTools.URL = fmt.Sprintf("https://%s/model/%s/tools/5.4.5-precise-amd64", + s.APIState.Addr(), coretesting.ModelTag.Id()) envtesting.CheckTools(c, foundTools, newTools) } func (s *UpgraderSuite) TestUpgraderRetryAndChanged(c *gc.C) { stor := s.DefaultToolsStorage oldTools := envtesting.PrimeTools(c, stor, s.DataDir(), s.Environ.Config().AgentStream(), version.MustParseBinary("5.4.3-precise-amd64")) - s.PatchValue(&version.Current, oldTools.Version) + s.patchVersion(oldTools.Version) newTools := envtesting.AssertUploadFakeToolsVersions( c, stor, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), version.MustParseBinary("5.4.5-precise-amd64"))[0] err := statetesting.SetAgentVersion(s.State, newTools.Version.Number) @@ -203,7 +201,7 @@ c.Assert(err, jc.ErrorIsNil) u := s.makeUpgrader(c) defer u.Stop() - s.expectUpgradeChannelNotClosed(c) + s.expectInitialUpgradeCheckNotDone(c) for i := 0; i < 3; i++ { select { @@ -247,7 +245,7 @@ stor := s.DefaultToolsStorage newToolsBinary := "5.4.3-precise-amd64" newTools := envtesting.PrimeTools(c, stor, s.DataDir(), s.Environ.Config().AgentStream(), version.MustParseBinary(newToolsBinary)) - s.PatchValue(&version.Current, newTools.Version) + s.patchVersion(newTools.Version) err := envtools.MergeAndWriteMetadata(stor, "released", "released", coretools.List{newTools}, envtools.DoNotWriteMirrors) c.Assert(err, jc.ErrorIsNil) ugErr := &upgrader.UpgradeReadyError{ @@ -266,7 +264,7 @@ func (s *UpgraderSuite) TestUsesAlreadyDownloadedToolsIfAvailable(c *gc.C) { oldVersion := version.MustParseBinary("1.2.3-quantal-amd64") - s.PatchValue(&version.Current, oldVersion) + s.patchVersion(oldVersion) newVersion := version.MustParseBinary("5.4.3-quantal-amd64") err := statetesting.SetAgentVersion(s.State, newVersion.Number) @@ -279,7 +277,7 @@ u := s.makeUpgrader(c) err = u.Stop() - s.expectUpgradeChannelNotClosed(c) + s.expectInitialUpgradeCheckNotDone(c) envtesting.CheckUpgraderReadyError(c, err, &upgrader.UpgradeReadyError{ AgentName: s.machine.Tag().String(), @@ -292,7 +290,7 @@ func (s *UpgraderSuite) TestUpgraderRefusesToDowngradeMinorVersions(c *gc.C) { stor := s.DefaultToolsStorage origTools := envtesting.PrimeTools(c, stor, s.DataDir(), s.Environ.Config().AgentStream(), version.MustParseBinary("5.4.3-precise-amd64")) - s.PatchValue(&version.Current, origTools.Version) + s.patchVersion(origTools.Version) downgradeTools := envtesting.AssertUploadFakeToolsVersions( c, stor, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), version.MustParseBinary("5.3.3-precise-amd64"))[0] err := statetesting.SetAgentVersion(s.State, downgradeTools.Version.Number) @@ -300,7 +298,7 @@ u := s.makeUpgrader(c) err = u.Stop() - s.expectUpgradeChannelClosed(c) + s.expectInitialUpgradeCheckDone(c) // If the upgrade would have triggered, we would have gotten an // UpgradeReadyError, since it was skipped, we get no error c.Check(err, jc.ErrorIsNil) @@ -314,17 +312,15 @@ func (s *UpgraderSuite) TestUpgraderAllowsDowngradingPatchVersions(c *gc.C) { stor := s.DefaultToolsStorage origTools := envtesting.PrimeTools(c, stor, s.DataDir(), s.Environ.Config().AgentStream(), version.MustParseBinary("5.4.3-precise-amd64")) - s.PatchValue(&version.Current, origTools.Version) + s.patchVersion(origTools.Version) downgradeTools := envtesting.AssertUploadFakeToolsVersions( c, stor, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), version.MustParseBinary("5.4.2-precise-amd64"))[0] err := statetesting.SetAgentVersion(s.State, downgradeTools.Version.Number) c.Assert(err, jc.ErrorIsNil) - dummy.SetStorageDelay(coretesting.ShortWait) - u := s.makeUpgrader(c) err = u.Stop() - s.expectUpgradeChannelNotClosed(c) + s.expectInitialUpgradeCheckNotDone(c) envtesting.CheckUpgraderReadyError(c, err, &upgrader.UpgradeReadyError{ AgentName: s.machine.Tag().String(), OldTools: origTools.Version, @@ -333,8 +329,8 @@ }) foundTools, err := agenttools.ReadTools(s.DataDir(), downgradeTools.Version) c.Assert(err, jc.ErrorIsNil) - downgradeTools.URL = fmt.Sprintf("https://%s/environment/%s/tools/5.4.2-precise-amd64", - s.APIState.Addr(), coretesting.EnvironmentTag.Id()) + downgradeTools.URL = fmt.Sprintf("https://%s/model/%s/tools/5.4.2-precise-amd64", + s.APIState.Addr(), coretesting.ModelTag.Id()) envtesting.CheckTools(c, foundTools, downgradeTools) } @@ -342,21 +338,18 @@ // note: otherwise illegal version jump downgradeVersion := version.MustParseBinary("5.3.0-precise-amd64") s.confVersion = downgradeVersion.Number - s.upgradeRunning = true stor := s.DefaultToolsStorage origTools := envtesting.PrimeTools(c, stor, s.DataDir(), s.Environ.Config().AgentStream(), version.MustParseBinary("5.4.3-precise-amd64")) - s.PatchValue(&version.Current, origTools.Version) + s.patchVersion(origTools.Version) downgradeTools := envtesting.AssertUploadFakeToolsVersions( c, stor, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), downgradeVersion)[0] err := statetesting.SetAgentVersion(s.State, downgradeVersion.Number) c.Assert(err, jc.ErrorIsNil) - dummy.SetStorageDelay(coretesting.ShortWait) - u := s.makeUpgrader(c) err = u.Stop() - s.expectUpgradeChannelNotClosed(c) + s.expectInitialUpgradeCheckNotDone(c) envtesting.CheckUpgraderReadyError(c, err, &upgrader.UpgradeReadyError{ AgentName: s.machine.Tag().String(), OldTools: origTools.Version, @@ -365,29 +358,27 @@ }) foundTools, err := agenttools.ReadTools(s.DataDir(), downgradeTools.Version) c.Assert(err, jc.ErrorIsNil) - downgradeTools.URL = fmt.Sprintf("https://%s/environment/%s/tools/5.3.0-precise-amd64", - s.APIState.Addr(), coretesting.EnvironmentTag.Id()) + downgradeTools.URL = fmt.Sprintf("https://%s/model/%s/tools/5.3.0-precise-amd64", + s.APIState.Addr(), coretesting.ModelTag.Id()) envtesting.CheckTools(c, foundTools, downgradeTools) } func (s *UpgraderSuite) TestUpgraderRefusesDowngradeToOrigVersionIfUpgradeNotInProgress(c *gc.C) { downgradeVersion := version.MustParseBinary("5.3.0-precise-amd64") s.confVersion = downgradeVersion.Number - s.upgradeRunning = false + s.upgradeStepsComplete.Unlock() stor := s.DefaultToolsStorage origTools := envtesting.PrimeTools(c, stor, s.DataDir(), s.Environ.Config().AgentStream(), version.MustParseBinary("5.4.3-precise-amd64")) - s.PatchValue(&version.Current, origTools.Version) + s.patchVersion(origTools.Version) envtesting.AssertUploadFakeToolsVersions( c, stor, s.Environ.Config().AgentStream(), s.Environ.Config().AgentStream(), downgradeVersion) err := statetesting.SetAgentVersion(s.State, downgradeVersion.Number) c.Assert(err, jc.ErrorIsNil) - dummy.SetStorageDelay(coretesting.ShortWait) - u := s.makeUpgrader(c) err = u.Stop() - s.expectUpgradeChannelClosed(c) + s.expectInitialUpgradeCheckDone(c) // If the upgrade would have triggered, we would have gotten an // UpgradeReadyError, since it was skipped, we get no error === added directory 'src/github.com/juju/juju/worker/upgradesteps' === added file 'src/github.com/juju/juju/worker/upgradesteps/manifold.go' --- src/github.com/juju/juju/worker/upgradesteps/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/upgradesteps/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,94 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgradesteps + +import ( + "errors" + + "github.com/juju/juju/agent" + "github.com/juju/juju/api" + "github.com/juju/juju/state" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/gate" + "github.com/juju/names" +) + +// ManifoldConfig defines the names of the manifolds on which a +// Manifold will depend. +type ManifoldConfig struct { + AgentName string + APICallerName string + UpgradeStepsGateName string + OpenStateForUpgrade func() (*state.State, func(), error) + PreUpgradeSteps func(*state.State, agent.Config, bool, bool) error +} + +// Manifold returns a dependency manifold that runs an upgrader +// worker, using the resource names defined in the supplied config. +func Manifold(config ManifoldConfig) dependency.Manifold { + return dependency.Manifold{ + Inputs: []string{ + config.AgentName, + config.APICallerName, + config.UpgradeStepsGateName, + }, + Start: func(getResource dependency.GetResourceFunc) (worker.Worker, error) { + // Sanity checks + if config.OpenStateForUpgrade == nil { + return nil, errors.New("missing OpenStateForUpgrade in config") + } + if config.PreUpgradeSteps == nil { + return nil, errors.New("missing PreUpgradeSteps in config") + } + + // Get machine agent. + var agent agent.Agent + if err := getResource(config.AgentName, &agent); err != nil { + return nil, err + } + + // Grab the tag and ensure that it's for a machine. + tag, ok := agent.CurrentConfig().Tag().(names.MachineTag) + if !ok { + return nil, errors.New("agent's tag is not a machine tag") + } + + // Get API connection. + var apiConn api.Connection + if err := getResource(config.APICallerName, &apiConn); err != nil { + return nil, err + } + + // Get the machine agent's jobs. + entity, err := apiConn.Agent().Entity(tag) + if err != nil { + return nil, err + } + jobs := entity.Jobs() + + // Get machine instance for setting status on. + machine, err := apiConn.Machiner().Machine(tag) + if err != nil { + return nil, err + } + + // Get upgradesteps completed lock. + var upgradeStepsLock gate.Lock + if err := getResource(config.UpgradeStepsGateName, &upgradeStepsLock); err != nil { + return nil, err + } + + return NewWorker( + upgradeStepsLock, + agent, + apiConn, + jobs, + config.OpenStateForUpgrade, + config.PreUpgradeSteps, + machine, + ) + }, + } +} === added file 'src/github.com/juju/juju/worker/upgradesteps/package_test.go' --- src/github.com/juju/juju/worker/upgradesteps/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/upgradesteps/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgradesteps + +import ( + stdtesting "testing" + + "github.com/juju/juju/testing" +) + +func TestAll(t *stdtesting.T) { + testing.MgoTestPackage(t) +} === added file 'src/github.com/juju/juju/worker/upgradesteps/worker.go' --- src/github.com/juju/juju/worker/upgradesteps/worker.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/upgradesteps/worker.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,465 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgradesteps + +import ( + "fmt" + "time" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + "github.com/juju/utils" + "launchpad.net/tomb" + + "github.com/juju/juju/agent" + "github.com/juju/juju/api" + "github.com/juju/juju/apiserver/params" + cmdutil "github.com/juju/juju/cmd/jujud/util" + "github.com/juju/juju/mongo" + "github.com/juju/juju/state" + "github.com/juju/juju/state/multiwatcher" + "github.com/juju/juju/upgrades" + "github.com/juju/juju/version" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/gate" + "github.com/juju/juju/wrench" +) + +var logger = loggo.GetLogger("juju.worker.upgradesteps") + +var ( + PerformUpgrade = upgrades.PerformUpgrade // Allow patching + + // The maximum time a master controller will wait for other + // controllers to come up and indicate they are ready to begin + // running upgrade steps. + UpgradeStartTimeoutMaster = time.Minute * 15 + + // The maximum time a secondary controller will wait for other + // controllers to come up and indicate they are ready to begin + // running upgrade steps. This is effectively "forever" because we + // don't really want secondaries to ever give up once they've + // indicated that they're ready to upgrade. It's up to the master + // to abort the upgrade if required. + // + // This should get reduced when/if master re-elections are + // introduce in the case a master that failing to come up for + // upgrade. + UpgradeStartTimeoutSecondary = time.Hour * 4 +) + +// NewLock creates a gate.Lock to be used to synchronise workers which +// need to start after upgrades have completed. If no upgrade steps +// are required the Lock is unlocked and the version in agent's +// configuration is updated to the currently running version. +// +// The returned Lock should be passed to NewWorker. +func NewLock(a agent.Agent) (gate.Lock, error) { + lock := gate.NewLock() + + if wrench.IsActive("machine-agent", "always-try-upgrade") { + // Always enter upgrade mode. This allows test of upgrades + // even when there's actually no upgrade steps to run. + return lock, nil + } + + err := a.ChangeConfig(func(agentConfig agent.ConfigSetter) error { + if !upgrades.AreUpgradesDefined(agentConfig.UpgradedToVersion()) { + logger.Infof("no upgrade steps required or upgrade steps for %v "+ + "have already been run.", version.Current) + lock.Unlock() + + // Even if no upgrade is required the version number in + // the agent's config still needs to be bumped. + agentConfig.SetUpgradedToVersion(version.Current) + } + return nil + }) + if err != nil { + return nil, err + } + return lock, nil +} + +// StatusSetter defines the single method required to set an agent's +// status. +type StatusSetter interface { + SetStatus(status params.Status, info string, data map[string]interface{}) error +} + +// NewWorker returns a new instance of the upgradesteps worker. It +// will run any required steps to upgrade to the currently running +// Juju version. +func NewWorker( + upgradeComplete gate.Lock, + agent agent.Agent, + apiConn api.Connection, + jobs []multiwatcher.MachineJob, + openState func() (*state.State, func(), error), + preUpgradeSteps func(st *state.State, agentConf agent.Config, isController, isMasterServer bool) error, + machine StatusSetter, +) (worker.Worker, error) { + tag, ok := agent.CurrentConfig().Tag().(names.MachineTag) + if !ok { + return nil, errors.New("machine agent's tag is not a MachineTag") + } + w := &upgradesteps{ + upgradeComplete: upgradeComplete, + agent: agent, + apiConn: apiConn, + jobs: jobs, + openState: openState, + preUpgradeSteps: preUpgradeSteps, + machine: machine, + tag: tag, + } + go func() { + defer w.tomb.Done() + w.tomb.Kill(w.run()) + }() + return w, nil +} + +type upgradesteps struct { + tomb tomb.Tomb + upgradeComplete gate.Lock + agent agent.Agent + apiConn api.Connection + jobs []multiwatcher.MachineJob + openState func() (*state.State, func(), error) + preUpgradeSteps func(st *state.State, agentConf agent.Config, isController, isMaster bool) error + machine StatusSetter + + fromVersion version.Number + toVersion version.Number + tag names.MachineTag + isMaster bool + isController bool + st *state.State +} + +// Kill is part of the worker.Worker interface. +func (w *upgradesteps) Kill() { + w.tomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (w *upgradesteps) Wait() error { + return w.tomb.Wait() +} + +type apiLostDuringUpgrade struct { + err error +} + +func (e *apiLostDuringUpgrade) Error() string { + return fmt.Sprintf("API connection lost during upgrade: %v", e.err) +} + +func isAPILostDuringUpgrade(err error) bool { + _, ok := err.(*apiLostDuringUpgrade) + return ok +} + +func (w *upgradesteps) run() error { + if wrench.IsActive("machine-agent", "fail-upgrade-start") { + return nil // Make the worker stop + } + + if w.upgradeComplete.IsUnlocked() { + // Our work is already done (we're probably being restarted + // because the API connection has gone down), so do nothing. + return nil + } + + w.fromVersion = w.agent.CurrentConfig().UpgradedToVersion() + w.toVersion = version.Current + if w.fromVersion == w.toVersion { + logger.Infof("upgrade to %v already completed.", w.toVersion) + w.upgradeComplete.Unlock() + return nil + } + + // If the machine agent is a controller, flag that state + // needs to be opened before running upgrade steps + for _, job := range w.jobs { + if job == multiwatcher.JobManageModel { + w.isController = true + } + } + + // We need a *state.State for upgrades. We open it independently + // of StateWorker, because we have no guarantees about when + // and how often StateWorker might run. + if w.isController { + var closer func() + var err error + if w.st, closer, err = w.openState(); err != nil { + return err + } + defer closer() + + if w.isMaster, err = IsMachineMaster(w.st, w.tag.Id()); err != nil { + return errors.Trace(err) + } + } + + if err := w.runUpgrades(); err != nil { + // Only return an error from the worker if the connection to + // state went away (possible mongo master change). Returning + // an error when the connection is lost will cause the agent + // to restart. + // + // For other errors, the error is not returned because we want + // the machine agent to stay running in an error state waiting + // for user intervention. + if isAPILostDuringUpgrade(err) { + return err + } + w.reportUpgradeFailure(err, false) + + } else { + // Upgrade succeeded - signal that the upgrade is complete. + logger.Infof("upgrade to %v completed successfully.", w.toVersion) + w.machine.SetStatus(params.StatusStarted, "", nil) + w.upgradeComplete.Unlock() + } + return nil +} + +// runUpgrades runs the upgrade operations for each job type and +// updates the updatedToVersion on success. +func (w *upgradesteps) runUpgrades() error { + upgradeInfo, err := w.prepareForUpgrade() + if err != nil { + return err + } + + if wrench.IsActive("machine-agent", "fail-upgrade") { + return errors.New("wrench") + } + + if err := w.agent.ChangeConfig(w.runUpgradeSteps); err != nil { + return err + } + + if err := w.finaliseUpgrade(upgradeInfo); err != nil { + return err + } + return nil +} + +func (w *upgradesteps) prepareForUpgrade() (*state.UpgradeInfo, error) { + logger.Infof("checking that upgrade can proceed") + if err := w.preUpgradeSteps(w.st, w.agent.CurrentConfig(), w.st != nil, w.isMaster); err != nil { + return nil, errors.Annotatef(err, "%s cannot be upgraded", names.ReadableString(w.tag)) + } + + if !w.isController { + return nil, nil + } + + logger.Infof("signalling that this controller is ready for upgrade") + info, err := w.st.EnsureUpgradeInfo(w.tag.Id(), w.fromVersion, w.toVersion) + if err != nil { + return nil, errors.Trace(err) + } + + // controllers need to wait for other controllers to be ready + // to run the upgrade steps. + logger.Infof("waiting for other controllers to be ready for upgrade") + if err := w.waitForOtherControllers(info); err != nil { + if err == tomb.ErrDying { + logger.Warningf(`stopped waiting for other controllers: %v`, err) + return nil, err + } + logger.Errorf(`aborted wait for other controllers: %v`, err) + // If master, trigger a rollback to the previous agent version. + if w.isMaster { + logger.Errorf("downgrading model agent version to %v due to aborted upgrade", + w.fromVersion) + if rollbackErr := w.st.SetModelAgentVersion(w.fromVersion); rollbackErr != nil { + logger.Errorf("rollback failed: %v", rollbackErr) + return nil, errors.Annotate(rollbackErr, "failed to roll back desired agent version") + } + } + return nil, errors.Annotate(err, "aborted wait for other controllers") + } + if w.isMaster { + logger.Infof("finished waiting - all controllers are ready to run upgrade steps") + } else { + logger.Infof("finished waiting - the master has completed its upgrade steps") + } + return info, nil +} + +func (w *upgradesteps) waitForOtherControllers(info *state.UpgradeInfo) error { + watcher := info.Watch() + defer watcher.Stop() + + maxWait := getUpgradeStartTimeout(w.isMaster) + timeout := time.After(maxWait) + for { + select { + case <-watcher.Changes(): + if err := info.Refresh(); err != nil { + return errors.Trace(err) + } + if w.isMaster { + if ready, err := info.AllProvisionedControllersReady(); err != nil { + return errors.Trace(err) + } else if ready { + // All controllers ready to start upgrade + err := info.SetStatus(state.UpgradeRunning) + return errors.Trace(err) + } + } else { + if info.Status() == state.UpgradeFinishing { + // Master is done, ok to proceed + return nil + } + } + case <-timeout: + if w.isMaster { + if err := info.Abort(); err != nil { + return errors.Annotate(err, "unable to abort upgrade") + } + } + return errors.Errorf("timed out after %s", maxWait) + case <-w.tomb.Dying(): + return tomb.ErrDying + } + + } +} + +// runUpgradeSteps runs the required upgrade steps for the machine +// agent, retrying on failure. The agent's UpgradedToVersion is set +// once the upgrade is complete. +// +// This function conforms to the agent.ConfigMutator type and is +// designed to be called via a machine agent's ChangeConfig method. +func (w *upgradesteps) runUpgradeSteps(agentConfig agent.ConfigSetter) error { + var upgradeErr error + w.machine.SetStatus(params.StatusStarted, fmt.Sprintf("upgrading to %v", w.toVersion), nil) + + context := upgrades.NewContext(agentConfig, w.apiConn, w.st) + logger.Infof("starting upgrade from %v to %v for %q", w.fromVersion, w.toVersion, w.tag) + + targets := jobsToTargets(w.jobs, w.isMaster) + attempts := getUpgradeRetryStrategy() + for attempt := attempts.Start(); attempt.Next(); { + upgradeErr = PerformUpgrade(w.fromVersion, targets, context) + if upgradeErr == nil { + break + } + if cmdutil.ConnectionIsDead(logger, w.apiConn) { + // API connection has gone away - abort! + return &apiLostDuringUpgrade{upgradeErr} + } + if attempt.HasNext() { + w.reportUpgradeFailure(upgradeErr, true) + } + } + if upgradeErr != nil { + return upgradeErr + } + agentConfig.SetUpgradedToVersion(w.toVersion) + return nil +} + +func (w *upgradesteps) reportUpgradeFailure(err error, willRetry bool) { + retryText := "will retry" + if !willRetry { + retryText = "giving up" + } + logger.Errorf("upgrade from %v to %v for %q failed (%s): %v", + w.fromVersion, w.toVersion, w.tag, retryText, err) + w.machine.SetStatus(params.StatusError, + fmt.Sprintf("upgrade to %v failed (%s): %v", w.toVersion, retryText, err), nil) +} + +func (w *upgradesteps) finaliseUpgrade(info *state.UpgradeInfo) error { + if !w.isController { + return nil + } + + if w.isMaster { + // Tell other controllers that the master has completed its + // upgrade steps. + if err := info.SetStatus(state.UpgradeFinishing); err != nil { + return errors.Annotate(err, "upgrade done but") + } + } + + if err := info.SetControllerDone(w.tag.Id()); err != nil { + return errors.Annotate(err, "upgrade done but failed to synchronise") + } + + return nil +} + +func getUpgradeStartTimeout(isMaster bool) time.Duration { + if wrench.IsActive("machine-agent", "short-upgrade-timeout") { + // This duration is fairly arbitrary. During manual testing it + // avoids the normal long wait but still provides a small + // window to check the environment status and logs before the + // timeout is triggered. + return time.Minute + } + + if isMaster { + return UpgradeStartTimeoutMaster + } + return UpgradeStartTimeoutSecondary +} + +var IsMachineMaster = func(st *state.State, machineId string) (bool, error) { + if st == nil { + // If there is no state, we aren't a master. + return false, nil + } + // Not calling the agent openState method as it does other checks + // we really don't care about here. All we need here is the machine + // so we can determine if we are the master or not. + machine, err := st.Machine(machineId) + if err != nil { + // This shouldn't happen, and if it does, the state worker will have + // found out before us, and already errored, or is likely to error out + // very shortly. All we do here is return the error. The state worker + // returns an error that will cause the agent to be terminated. + return false, errors.Trace(err) + } + isMaster, err := mongo.IsMaster(st.MongoSession(), machine) + if err != nil { + return false, errors.Trace(err) + } + return isMaster, nil +} + +var getUpgradeRetryStrategy = func() utils.AttemptStrategy { + return utils.AttemptStrategy{ + Delay: 2 * time.Minute, + Min: 5, + } +} + +// jobsToTargets determines the upgrade targets corresponding to the +// jobs assigned to a machine agent. This determines the upgrade steps +// which will run during an upgrade. +func jobsToTargets(jobs []multiwatcher.MachineJob, isMaster bool) (targets []upgrades.Target) { + for _, job := range jobs { + switch job { + case multiwatcher.JobManageModel: + targets = append(targets, upgrades.Controller) + if isMaster { + targets = append(targets, upgrades.DatabaseMaster) + } + case multiwatcher.JobHostUnits: + targets = append(targets, upgrades.HostMachine) + } + } + return +} === added file 'src/github.com/juju/juju/worker/upgradesteps/worker_test.go' --- src/github.com/juju/juju/worker/upgradesteps/worker_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/upgradesteps/worker_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,624 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgradesteps + +import ( + "fmt" + "time" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/names" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + "github.com/juju/utils/arch" + "github.com/juju/utils/series" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/agent" + "github.com/juju/juju/apiserver/params" + cmdutil "github.com/juju/juju/cmd/jujud/util" + "github.com/juju/juju/constraints" + "github.com/juju/juju/environs" + "github.com/juju/juju/instance" + "github.com/juju/juju/mongo" + "github.com/juju/juju/state" + "github.com/juju/juju/state/multiwatcher" + statetesting "github.com/juju/juju/state/testing" + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/testing/factory" + "github.com/juju/juju/upgrades" + "github.com/juju/juju/version" + "github.com/juju/juju/worker/gate" +) + +// TODO(mjs) - these tests are too tightly coupled to the +// implementation. They needn't be internal tests. + +type UpgradeSuite struct { + statetesting.StateSuite + + oldVersion version.Binary + logWriter loggo.TestWriter + connectionDead bool + machineIsMaster bool + preUpgradeError bool +} + +var _ = gc.Suite(&UpgradeSuite{}) + +const fails = true +const succeeds = false + +func (s *UpgradeSuite) SetUpTest(c *gc.C) { + s.StateSuite.SetUpTest(c) + + s.preUpgradeError = false + // Most of these tests normally finish sub-second on a fast machine. + // If any given test hits a minute, we have almost certainly become + // wedged, so dump the logs. + coretesting.DumpTestLogsAfter(time.Minute, c, s) + + s.oldVersion = version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + s.oldVersion.Major = 1 + s.oldVersion.Minor = 16 + + // Don't wait so long in tests. + s.PatchValue(&UpgradeStartTimeoutMaster, time.Duration(time.Millisecond*50)) + s.PatchValue(&UpgradeStartTimeoutSecondary, time.Duration(time.Millisecond*60)) + + // Allow tests to make the API connection appear to be dead. + s.connectionDead = false + s.PatchValue(&cmdutil.ConnectionIsDead, func(loggo.Logger, cmdutil.Pinger) bool { + return s.connectionDead + }) + + s.machineIsMaster = true + fakeIsMachineMaster := func(*state.State, string) (bool, error) { + return s.machineIsMaster, nil + } + s.PatchValue(&IsMachineMaster, fakeIsMachineMaster) + +} + +func (s *UpgradeSuite) captureLogs(c *gc.C) { + c.Assert(loggo.RegisterWriter("upgrade-tests", &s.logWriter, loggo.INFO), gc.IsNil) + s.AddCleanup(func(*gc.C) { + loggo.RemoveWriter("upgrade-tests") + s.logWriter.Clear() + }) +} + +func (s *UpgradeSuite) countUpgradeAttempts(upgradeErr error) *int { + count := 0 + s.PatchValue(&PerformUpgrade, func(version.Number, []upgrades.Target, upgrades.Context) error { + count++ + return upgradeErr + }) + return &count +} + +func (s *UpgradeSuite) TestNewChannelWhenNoUpgradeRequired(c *gc.C) { + // Set the agent's initial upgradedToVersion to almost the same as + // the current version. We want it to be different to + // version.Current (so that we can see it change) but not to + // trigger upgrade steps. + config := NewFakeConfigSetter(names.NewMachineTag("0"), makeBumpedCurrentVersion().Number) + agent := NewFakeAgent(config) + + lock, err := NewLock(agent) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(lock.IsUnlocked(), jc.IsTrue) + // The agent's version should have been updated. + c.Assert(config.Version, gc.Equals, version.Current) + +} + +func (s *UpgradeSuite) TestNewChannelWhenUpgradeRequired(c *gc.C) { + // Set the agent's upgradedToVersion so that upgrade steps are required. + initialVersion := version.MustParse("1.16.0") + config := NewFakeConfigSetter(names.NewMachineTag("0"), initialVersion) + agent := NewFakeAgent(config) + + lock, err := NewLock(agent) + c.Assert(err, jc.ErrorIsNil) + + c.Assert(lock.IsUnlocked(), jc.IsFalse) + // The agent's version should NOT have been updated. + c.Assert(config.Version, gc.Equals, initialVersion) +} + +func (s *UpgradeSuite) TestRetryStrategy(c *gc.C) { + retries := getUpgradeRetryStrategy() + c.Assert(retries.Delay, gc.Equals, 2*time.Minute) + c.Assert(retries.Min, gc.Equals, 5) +} + +func (s *UpgradeSuite) TestNoUpgradeNecessary(c *gc.C) { + attemptsP := s.countUpgradeAttempts(nil) + s.captureLogs(c) + s.oldVersion.Number = version.Current // nothing to do + + workerErr, config, _, doneLock := s.runUpgradeWorker(c, multiwatcher.JobHostUnits) + + c.Check(workerErr, gc.IsNil) + c.Check(*attemptsP, gc.Equals, 0) + c.Check(config.Version, gc.Equals, version.Current) + c.Check(doneLock.IsUnlocked(), jc.IsTrue) +} + +func (s *UpgradeSuite) TestUpgradeStepsFailure(c *gc.C) { + // This test checks what happens when every upgrade attempt fails. + // A number of retries should be observed and the agent should end + // up in a state where it is is still running but is reporting an + // error and the upgrade is not flagged as having completed (which + // prevents most of the agent's workers from running and keeps the + // API in restricted mode). + + attemptsP := s.countUpgradeAttempts(errors.New("boom")) + s.captureLogs(c) + + workerErr, config, statusCalls, doneLock := s.runUpgradeWorker(c, multiwatcher.JobHostUnits) + + // The worker shouldn't return an error so that the worker and + // agent keep running. + c.Check(workerErr, gc.IsNil) + + c.Check(*attemptsP, gc.Equals, maxUpgradeRetries) + c.Check(config.Version, gc.Equals, s.oldVersion.Number) // Upgrade didn't finish + c.Assert(statusCalls, jc.DeepEquals, + s.makeExpectedStatusCalls(maxUpgradeRetries-1, fails, "boom")) + c.Assert(s.logWriter.Log(), jc.LogMatches, + s.makeExpectedUpgradeLogs(maxUpgradeRetries-1, "hostMachine", fails, "boom")) + c.Assert(doneLock.IsUnlocked(), jc.IsFalse) +} + +func (s *UpgradeSuite) TestUpgradeStepsRetries(c *gc.C) { + // This test checks what happens when the first upgrade attempt + // fails but the following on succeeds. The final state should be + // the same as a successful upgrade which worked first go. + attempts := 0 + fail := true + fakePerformUpgrade := func(version.Number, []upgrades.Target, upgrades.Context) error { + attempts++ + if fail { + fail = false + return errors.New("boom") + } else { + return nil + } + } + s.PatchValue(&PerformUpgrade, fakePerformUpgrade) + s.captureLogs(c) + + workerErr, config, statusCalls, doneLock := s.runUpgradeWorker(c, multiwatcher.JobHostUnits) + + c.Check(workerErr, gc.IsNil) + c.Check(attempts, gc.Equals, 2) + c.Check(config.Version, gc.Equals, version.Current) // Upgrade finished + c.Assert(statusCalls, jc.DeepEquals, s.makeExpectedStatusCalls(1, succeeds, "boom")) + c.Assert(s.logWriter.Log(), jc.LogMatches, s.makeExpectedUpgradeLogs(1, "hostMachine", succeeds, "boom")) + c.Check(doneLock.IsUnlocked(), jc.IsTrue) +} + +func (s *UpgradeSuite) TestOtherUpgradeRunFailure(c *gc.C) { + // This test checks what happens something other than the upgrade + // steps themselves fails, ensuring the something is logged and + // the agent status is updated. + + fakePerformUpgrade := func(version.Number, []upgrades.Target, upgrades.Context) error { + // Delete UpgradeInfo for the upgrade so that finaliseUpgrade() will fail + s.State.ClearUpgradeInfo() + return nil + } + s.PatchValue(&PerformUpgrade, fakePerformUpgrade) + s.Factory.MakeMachine(c, &factory.MachineParams{ + Jobs: []state.MachineJob{state.JobManageModel}, + }) + s.captureLogs(c) + + workerErr, config, statusCalls, doneLock := s.runUpgradeWorker(c, multiwatcher.JobManageModel) + + c.Check(workerErr, gc.IsNil) + c.Check(config.Version, gc.Equals, version.Current) // Upgrade almost finished + failReason := `upgrade done but: cannot set upgrade status to "finishing": ` + + `Another status change may have occurred concurrently` + c.Assert(statusCalls, jc.DeepEquals, + s.makeExpectedStatusCalls(0, fails, failReason)) + c.Assert(s.logWriter.Log(), jc.LogMatches, + s.makeExpectedUpgradeLogs(0, "databaseMaster", fails, failReason)) + c.Assert(doneLock.IsUnlocked(), jc.IsFalse) +} + +func (s *UpgradeSuite) TestApiConnectionFailure(c *gc.C) { + // This test checks what happens when an upgrade fails because the + // connection to mongo has gone away. This will happen when the + // mongo master changes. In this case we want the upgrade worker + // to return immediately without further retries. The error should + // be returned by the worker so that the agent will restart. + + attemptsP := s.countUpgradeAttempts(errors.New("boom")) + s.connectionDead = true // Make the connection to state appear to be dead + s.captureLogs(c) + + workerErr, config, _, doneLock := s.runUpgradeWorker(c, multiwatcher.JobHostUnits) + + c.Check(workerErr, gc.ErrorMatches, "API connection lost during upgrade: boom") + c.Check(*attemptsP, gc.Equals, 1) + c.Check(config.Version, gc.Equals, s.oldVersion.Number) // Upgrade didn't finish + c.Assert(doneLock.IsUnlocked(), jc.IsFalse) +} + +func (s *UpgradeSuite) TestAbortWhenOtherControllerDoesntStartUpgrade(c *gc.C) { + // This test checks when a controller is upgrading and one of + // the other controllers doesn't signal it is ready in time. + + err := s.State.SetModelAgentVersion(version.Current) + c.Assert(err, jc.ErrorIsNil) + + // The master controller in this scenario is functionally tested + // elsewhere. + s.machineIsMaster = false + + s.create3Controllers(c) + s.captureLogs(c) + attemptsP := s.countUpgradeAttempts(nil) + + workerErr, config, statusCalls, doneLock := s.runUpgradeWorker(c, multiwatcher.JobManageModel) + + c.Check(workerErr, gc.IsNil) + c.Check(*attemptsP, gc.Equals, 0) + c.Check(config.Version, gc.Equals, s.oldVersion.Number) // Upgrade didn't happen + c.Assert(doneLock.IsUnlocked(), jc.IsFalse) + + // The environment agent-version should still be the new version. + // It's up to the master to trigger the rollback. + s.assertEnvironAgentVersion(c, version.Current) + + causeMsg := " timed out after 60ms" + c.Assert(s.logWriter.Log(), jc.LogMatches, []jc.SimpleMessage{ + {loggo.INFO, "waiting for other controllers to be ready for upgrade"}, + {loggo.ERROR, "aborted wait for other controllers: timed out after 60ms"}, + {loggo.ERROR, `upgrade from .+ to .+ for "machine-0" failed \(giving up\): ` + + "aborted wait for other controllers:" + causeMsg}, + }) + c.Assert(statusCalls, jc.DeepEquals, []StatusCall{{ + params.StatusError, + fmt.Sprintf( + "upgrade to %s failed (giving up): aborted wait for other controllers:"+causeMsg, + version.Current), + }}) +} + +func (s *UpgradeSuite) TestSuccessMaster(c *gc.C) { + // This test checks what happens when an upgrade works on the + // first attempt on a master controller. + s.machineIsMaster = true + info := s.checkSuccess(c, "databaseMaster", func(*state.UpgradeInfo) {}) + c.Assert(info.Status(), gc.Equals, state.UpgradeFinishing) +} + +func (s *UpgradeSuite) TestSuccessSecondary(c *gc.C) { + // This test checks what happens when an upgrade works on the + // first attempt on a secondary controller. + s.machineIsMaster = false + mungeInfo := func(info *state.UpgradeInfo) { + // Indicate that the master is done + err := info.SetStatus(state.UpgradeRunning) + c.Assert(err, jc.ErrorIsNil) + err = info.SetStatus(state.UpgradeFinishing) + c.Assert(err, jc.ErrorIsNil) + } + s.checkSuccess(c, "controller", mungeInfo) +} + +func (s *UpgradeSuite) checkSuccess(c *gc.C, target string, mungeInfo func(*state.UpgradeInfo)) *state.UpgradeInfo { + _, machineIdB, machineIdC := s.create3Controllers(c) + + // Indicate that machine B and C are ready to upgrade + vPrevious := s.oldVersion.Number + vNext := version.Current + info, err := s.State.EnsureUpgradeInfo(machineIdB, vPrevious, vNext) + c.Assert(err, jc.ErrorIsNil) + _, err = s.State.EnsureUpgradeInfo(machineIdC, vPrevious, vNext) + c.Assert(err, jc.ErrorIsNil) + + mungeInfo(info) + + attemptsP := s.countUpgradeAttempts(nil) + s.captureLogs(c) + + workerErr, config, statusCalls, doneLock := s.runUpgradeWorker(c, multiwatcher.JobManageModel) + + c.Check(workerErr, gc.IsNil) + c.Check(*attemptsP, gc.Equals, 1) + c.Check(config.Version, gc.Equals, version.Current) // Upgrade finished + c.Assert(statusCalls, jc.DeepEquals, s.makeExpectedStatusCalls(0, succeeds, "")) + c.Assert(s.logWriter.Log(), jc.LogMatches, s.makeExpectedUpgradeLogs(0, target, succeeds, "")) + c.Check(doneLock.IsUnlocked(), jc.IsTrue) + + err = info.Refresh() + c.Assert(err, jc.ErrorIsNil) + c.Assert(info.ControllersDone(), jc.DeepEquals, []string{"0"}) + return info +} + +func (s *UpgradeSuite) TestJobsToTargets(c *gc.C) { + check := func(jobs []multiwatcher.MachineJob, isMaster bool, expectedTargets ...upgrades.Target) { + c.Assert(jobsToTargets(jobs, isMaster), jc.SameContents, expectedTargets) + } + + check([]multiwatcher.MachineJob{multiwatcher.JobHostUnits}, false, upgrades.HostMachine) + check([]multiwatcher.MachineJob{multiwatcher.JobManageModel}, false, upgrades.Controller) + check([]multiwatcher.MachineJob{multiwatcher.JobManageModel}, true, + upgrades.Controller, upgrades.DatabaseMaster) + check([]multiwatcher.MachineJob{multiwatcher.JobManageModel, multiwatcher.JobHostUnits}, false, + upgrades.Controller, upgrades.HostMachine) + check([]multiwatcher.MachineJob{multiwatcher.JobManageModel, multiwatcher.JobHostUnits}, true, + upgrades.Controller, upgrades.DatabaseMaster, upgrades.HostMachine) +} + +func (s *UpgradeSuite) TestPreUpgradeFail(c *gc.C) { + s.preUpgradeError = true + s.captureLogs(c) + + workerErr, config, statusCalls, doneLock := s.runUpgradeWorker(c, multiwatcher.JobHostUnits) + + c.Check(workerErr, jc.ErrorIsNil) + c.Check(config.Version, gc.Equals, s.oldVersion.Number) // Upgrade didn't finish + c.Assert(doneLock.IsUnlocked(), jc.IsFalse) + + causeMessage := `machine 0 cannot be upgraded: preupgrade error` + failMessage := fmt.Sprintf( + `upgrade from %s to %s for "machine-0" failed \(giving up\): %s`, + s.oldVersion.Number, version.Current, causeMessage) + c.Assert(s.logWriter.Log(), jc.LogMatches, []jc.SimpleMessage{ + {loggo.INFO, "checking that upgrade can proceed"}, + {loggo.ERROR, failMessage}, + }) + + statusMessage := fmt.Sprintf( + `upgrade to %s failed (giving up): %s`, version.Current, causeMessage) + c.Assert(statusCalls, jc.DeepEquals, []StatusCall{{ + params.StatusError, statusMessage, + }}) +} + +// Run just the upgradesteps worker with a fake machine agent and +// fake agent config. +func (s *UpgradeSuite) runUpgradeWorker(c *gc.C, jobs ...multiwatcher.MachineJob) ( + error, *fakeConfigSetter, []StatusCall, gate.Lock, +) { + s.setInstantRetryStrategy(c) + config := s.makeFakeConfig() + agent := NewFakeAgent(config) + doneLock, err := NewLock(agent) + c.Assert(err, jc.ErrorIsNil) + machineStatus := &testStatusSetter{} + worker, err := NewWorker(doneLock, agent, nil, jobs, s.openStateForUpgrade, s.preUpgradeSteps, machineStatus) + c.Assert(err, jc.ErrorIsNil) + return worker.Wait(), config, machineStatus.Calls, doneLock +} + +func (s *UpgradeSuite) openStateForUpgrade() (*state.State, func(), error) { + mongoInfo := s.State.MongoConnectionInfo() + st, err := state.Open(s.State.ModelTag(), mongoInfo, mongo.DefaultDialOpts(), environs.NewStatePolicy()) + if err != nil { + return nil, nil, err + } + return st, func() { st.Close() }, nil +} + +func (s *UpgradeSuite) preUpgradeSteps(st *state.State, agentConf agent.Config, isController, isMasterController bool) error { + if s.preUpgradeError { + return errors.New("preupgrade error") + } + return nil +} + +func (s *UpgradeSuite) makeFakeConfig() *fakeConfigSetter { + return NewFakeConfigSetter(names.NewMachineTag("0"), s.oldVersion.Number) +} + +func (s *UpgradeSuite) create3Controllers(c *gc.C) (machineIdA, machineIdB, machineIdC string) { + machine0 := s.Factory.MakeMachine(c, &factory.MachineParams{ + Jobs: []state.MachineJob{state.JobManageModel}, + }) + machineIdA = machine0.Id() + s.setMachineAlive(c, machineIdA) + + changes, err := s.State.EnableHA(3, constraints.Value{}, "quantal", nil) + c.Assert(err, jc.ErrorIsNil) + c.Assert(len(changes.Added), gc.Equals, 2) + + machineIdB = changes.Added[0] + s.setMachineProvisioned(c, machineIdB) + s.setMachineAlive(c, machineIdB) + + machineIdC = changes.Added[1] + s.setMachineProvisioned(c, machineIdC) + s.setMachineAlive(c, machineIdC) + + return +} + +func (s *UpgradeSuite) setMachineProvisioned(c *gc.C, id string) { + machine, err := s.State.Machine(id) + c.Assert(err, jc.ErrorIsNil) + err = machine.SetProvisioned(instance.Id(id+"-inst"), "nonce", nil) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *UpgradeSuite) setMachineAlive(c *gc.C, id string) { + machine, err := s.State.Machine(id) + c.Assert(err, jc.ErrorIsNil) + pinger, err := machine.SetAgentPresence() + c.Assert(err, jc.ErrorIsNil) + s.AddCleanup(func(c *gc.C) { pinger.Stop() }) +} + +// Return a version the same as the current software version, but with +// the build number bumped. +// +// The version Tag is also cleared so that upgrades.PerformUpgrade +// doesn't think it needs to run upgrade steps unnecessarily. +func makeBumpedCurrentVersion() version.Binary { + v := version.Binary{ + Number: version.Current, + Arch: arch.HostArch(), + Series: series.HostSeries(), + } + v.Build++ + v.Tag = "" + return v +} + +const maxUpgradeRetries = 3 + +func (s *UpgradeSuite) setInstantRetryStrategy(c *gc.C) { + s.PatchValue(&getUpgradeRetryStrategy, func() utils.AttemptStrategy { + c.Logf("setting instant retry strategy for upgrade: retries=%d", maxUpgradeRetries) + return utils.AttemptStrategy{ + Delay: 0, + Min: maxUpgradeRetries, + } + }) +} + +func (s *UpgradeSuite) makeExpectedStatusCalls(retryCount int, expectFail bool, failReason string) []StatusCall { + calls := []StatusCall{{ + params.StatusStarted, + fmt.Sprintf("upgrading to %s", version.Current), + }} + for i := 0; i < retryCount; i++ { + calls = append(calls, StatusCall{ + params.StatusError, + fmt.Sprintf("upgrade to %s failed (will retry): %s", version.Current, failReason), + }) + } + if expectFail { + calls = append(calls, StatusCall{ + params.StatusError, + fmt.Sprintf("upgrade to %s failed (giving up): %s", version.Current, failReason), + }) + } else { + calls = append(calls, StatusCall{params.StatusStarted, ""}) + } + return calls +} + +func (s *UpgradeSuite) makeExpectedUpgradeLogs(retryCount int, target string, expectFail bool, failReason string) []jc.SimpleMessage { + outLogs := []jc.SimpleMessage{} + + if target == "databaseMaster" || target == "controller" { + outLogs = append(outLogs, jc.SimpleMessage{ + loggo.INFO, "waiting for other controllers to be ready for upgrade", + }) + var waitMsg string + switch target { + case "databaseMaster": + waitMsg = "all controllers are ready to run upgrade steps" + case "controller": + waitMsg = "the master has completed its upgrade steps" + } + outLogs = append(outLogs, jc.SimpleMessage{loggo.INFO, "finished waiting - " + waitMsg}) + } + + outLogs = append(outLogs, jc.SimpleMessage{ + loggo.INFO, fmt.Sprintf( + `starting upgrade from %s to %s for "machine-0"`, + s.oldVersion.Number, version.Current), + }) + + failMessage := fmt.Sprintf( + `upgrade from %s to %s for "machine-0" failed \(%%s\): %s`, + s.oldVersion.Number, version.Current, failReason) + + for i := 0; i < retryCount; i++ { + outLogs = append(outLogs, jc.SimpleMessage{loggo.ERROR, fmt.Sprintf(failMessage, "will retry")}) + } + if expectFail { + outLogs = append(outLogs, jc.SimpleMessage{loggo.ERROR, fmt.Sprintf(failMessage, "giving up")}) + } else { + outLogs = append(outLogs, jc.SimpleMessage{loggo.INFO, + fmt.Sprintf(`upgrade to %s completed successfully.`, version.Current)}) + } + return outLogs +} + +func (s *UpgradeSuite) assertEnvironAgentVersion(c *gc.C, expected version.Number) { + envConfig, err := s.State.ModelConfig() + c.Assert(err, jc.ErrorIsNil) + agentVersion, ok := envConfig.AgentVersion() + c.Assert(ok, jc.IsTrue) + c.Assert(agentVersion, gc.Equals, expected) +} + +// NewFakeConfigSetter returns a fakeConfigSetter which implements +// just enough of the agent.ConfigSetter interface to keep the upgrade +// steps worker happy. +func NewFakeConfigSetter(agentTag names.Tag, initialVersion version.Number) *fakeConfigSetter { + return &fakeConfigSetter{ + AgentTag: agentTag, + Version: initialVersion, + } +} + +type fakeConfigSetter struct { + agent.ConfigSetter + AgentTag names.Tag + Version version.Number +} + +func (s *fakeConfigSetter) Tag() names.Tag { + return s.AgentTag +} + +func (s *fakeConfigSetter) UpgradedToVersion() version.Number { + return s.Version +} + +func (s *fakeConfigSetter) SetUpgradedToVersion(newVersion version.Number) { + s.Version = newVersion +} + +// NewFakeAgent returns a fakeAgent which implements the agent.Agent +// interface. This provides enough MachineAgent functionality to +// support upgrades. +func NewFakeAgent(confSetter agent.ConfigSetter) *fakeAgent { + return &fakeAgent{ + config: confSetter, + } +} + +type fakeAgent struct { + config agent.ConfigSetter +} + +func (a *fakeAgent) CurrentConfig() agent.Config { + return a.config +} + +func (a *fakeAgent) ChangeConfig(mutate agent.ConfigMutator) error { + return mutate(a.config) +} + +type StatusCall struct { + Status params.Status + Info string +} + +type testStatusSetter struct { + Calls []StatusCall +} + +func (s *testStatusSetter) SetStatus(status params.Status, info string, _ map[string]interface{}) error { + s.Calls = append(s.Calls, StatusCall{status, info}) + return nil +} === added directory 'src/github.com/juju/juju/worker/upgradewaiter' === added file 'src/github.com/juju/juju/worker/upgradewaiter/manifold.go' --- src/github.com/juju/juju/worker/upgradewaiter/manifold.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/upgradewaiter/manifold.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,133 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgradewaiter + +import ( + "github.com/juju/errors" + "github.com/juju/loggo" + "launchpad.net/tomb" + + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + "github.com/juju/juju/worker/gate" +) + +var logger = loggo.GetLogger("juju.worker.upgradewaiter") + +type ManifoldConfig struct { + // UpgradeStepsWaiterName is the name of a gate.Waiter which + // reports when upgrade steps have been run. + UpgradeStepsWaiterName string + + // UpgradeCheckWaiter name is the name of a gate.Waiter which + // reports when the initial check for the need to upgrade has been + // done. + UpgradeCheckWaiterName string +} + +// Manifold returns a dependency.Manifold which aggregates the +// upgradesteps lock and the upgrader's "initial check" lock into a +// single boolean output. The output is false until both locks are +// unlocked. To make it easy to depend on this manifold, the +// manifold's worker restarts when the output value changes, causing +// dependent workers to be restarted. +func Manifold(config ManifoldConfig) dependency.Manifold { + + // This lock is unlocked when both the upgradesteps and upgrader + // locks are unlocked. It exists outside of the start func and + // worker code so that the state can be maintained beyond restart + // of the manifold's worker. + done := gate.NewLock() + + return dependency.Manifold{ + Inputs: []string{ + config.UpgradeStepsWaiterName, + config.UpgradeCheckWaiterName, + }, + Start: func(getResource dependency.GetResourceFunc) (worker.Worker, error) { + var stepsWaiter gate.Waiter + if err := getResource(config.UpgradeStepsWaiterName, &stepsWaiter); err != nil { + return nil, err + } + var checkWaiter gate.Waiter + if err := getResource(config.UpgradeCheckWaiterName, &checkWaiter); err != nil { + return nil, err + } + + w := &upgradeWaiter{ + done: done, + stepsWaiter: stepsWaiter, + checkWaiter: checkWaiter, + } + go func() { + defer w.tomb.Done() + w.tomb.Kill(w.wait()) + }() + return w, nil + }, + Output: func(in worker.Worker, out interface{}) error { + inWorker, _ := in.(*upgradeWaiter) + if inWorker == nil { + return errors.Errorf("in should be a *upgradeWaiter; is %T", in) + } + switch outPointer := out.(type) { + case *bool: + *outPointer = done.IsUnlocked() + default: + return errors.Errorf("out should be a *bool; is %T", out) + } + return nil + }, + } +} + +type upgradeWaiter struct { + tomb tomb.Tomb + stepsWaiter gate.Waiter + checkWaiter gate.Waiter + done gate.Lock +} + +func (w *upgradeWaiter) wait() error { + stepsCh := getWaiterChannel(w.stepsWaiter) + checkCh := getWaiterChannel(w.checkWaiter) + + for { + // If both waiters have unlocked and the aggregate gate to + // signal upgrade completion hasn't been unlocked yet, unlock + // it and trigger an upgradeWaiter restart so that dependent + // manifolds notice. + if stepsCh == nil && checkCh == nil && !w.done.IsUnlocked() { + logger.Infof("startup upgrade operations complete") + w.done.Unlock() + return dependency.ErrBounce + } + select { + case <-w.tomb.Dying(): + return tomb.ErrDying + case <-stepsCh: + stepsCh = nil + case <-checkCh: + checkCh = nil + } + } +} + +func getWaiterChannel(waiter gate.Waiter) <-chan struct{} { + // If a gate is unlocked, don't select on it. + if waiter.IsUnlocked() { + return nil + } + return waiter.Unlocked() +} + +// Kill is part of the worker.Worker interface. +func (w *upgradeWaiter) Kill() { + w.tomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (w *upgradeWaiter) Wait() error { + return w.tomb.Wait() +} === added file 'src/github.com/juju/juju/worker/upgradewaiter/manifold_test.go' --- src/github.com/juju/juju/worker/upgradewaiter/manifold_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/upgradewaiter/manifold_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,158 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgradewaiter_test + +import ( + "time" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + dt "github.com/juju/juju/worker/dependency/testing" + "github.com/juju/juju/worker/gate" + "github.com/juju/juju/worker/upgradewaiter" +) + +type ManifoldSuite struct { + testing.IsolationSuite + manifold dependency.Manifold + worker worker.Worker +} + +var _ = gc.Suite(&ManifoldSuite{}) + +func (s *ManifoldSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.manifold = upgradewaiter.Manifold(upgradewaiter.ManifoldConfig{ + UpgradeStepsWaiterName: "steps-waiter", + UpgradeCheckWaiterName: "check-waiter", + }) +} + +func (s *ManifoldSuite) TestInputs(c *gc.C) { + c.Assert(s.manifold.Inputs, jc.SameContents, []string{"steps-waiter", "check-waiter"}) +} + +func (s *ManifoldSuite) TestStartNoStepsWaiter(c *gc.C) { + getResource := dt.StubGetResource(dt.StubResources{ + "steps-waiter": dt.StubResource{Error: dependency.ErrMissing}, + "check-waiter": dt.StubResource{Output: gate.NewLock()}, + }) + w, err := s.manifold.Start(getResource) + c.Assert(w, gc.IsNil) + c.Assert(err, gc.Equals, dependency.ErrMissing) +} + +func (s *ManifoldSuite) TestStartNoCheckWaiter(c *gc.C) { + getResource := dt.StubGetResource(dt.StubResources{ + "steps-waiter": dt.StubResource{Output: gate.NewLock()}, + "check-waiter": dt.StubResource{Error: dependency.ErrMissing}, + }) + w, err := s.manifold.Start(getResource) + c.Assert(w, gc.IsNil) + c.Assert(err, gc.Equals, dependency.ErrMissing) +} + +func (s *ManifoldSuite) TestStartSuccess(c *gc.C) { + getResource := dt.StubGetResource(dt.StubResources{ + "steps-waiter": dt.StubResource{Output: gate.NewLock()}, + "check-waiter": dt.StubResource{Output: gate.NewLock()}, + }) + w, err := s.manifold.Start(getResource) + c.Assert(err, jc.ErrorIsNil) + checkStop(c, w) +} + +func (s *ManifoldSuite) TestOutput(c *gc.C) { + stepsLock := gate.NewLock() + checkLock := gate.NewLock() + getResource := dt.StubGetResource(dt.StubResources{ + "steps-waiter": dt.StubResource{Output: stepsLock}, + "check-waiter": dt.StubResource{Output: checkLock}, + }) + w, err := s.manifold.Start(getResource) + c.Assert(err, jc.ErrorIsNil) + + // Upgrades not completed yet so output is false. + s.assertOutputFalse(c, w) + + // Unlock one of the upgrade gates, output should still be false. + stepsLock.Unlock() + s.assertOutputFalse(c, w) + + // Unlock the other gate, output should now be true. + checkLock.Unlock() + s.assertOutputTrue(c, w) + + // .. and the worker should exit with ErrBounce. + checkStopWithError(c, w, dependency.ErrBounce) + + // Restarting the worker should result in the output immediately + // being true. + w2, err := s.manifold.Start(getResource) + c.Assert(err, jc.ErrorIsNil) + s.assertOutputTrue(c, w) + checkStop(c, w2) +} + +func (s *ManifoldSuite) TestOutputWithWrongWorker(c *gc.C) { + getResource := dt.StubGetResource(dt.StubResources{ + "steps-waiter": dt.StubResource{Output: gate.NewLock()}, + "check-waiter": dt.StubResource{Output: gate.NewLock()}, + }) + _, err := s.manifold.Start(getResource) + c.Assert(err, jc.ErrorIsNil) + + type dummyWorker struct { + worker.Worker + } + var foo bool + err = s.manifold.Output(new(dummyWorker), &foo) + c.Assert(err, gc.ErrorMatches, `in should be a \*upgradeWaiter;.+`) +} + +func (s *ManifoldSuite) TestOutputWithWrongType(c *gc.C) { + getResource := dt.StubGetResource(dt.StubResources{ + "steps-waiter": dt.StubResource{Output: gate.NewLock()}, + "check-waiter": dt.StubResource{Output: gate.NewLock()}, + }) + w, err := s.manifold.Start(getResource) + c.Assert(err, jc.ErrorIsNil) + + var foo int + err = s.manifold.Output(w, &foo) + c.Assert(err, gc.ErrorMatches, `out should be a \*bool;.+`) +} + +func (s *ManifoldSuite) assertOutputFalse(c *gc.C, w worker.Worker) { + time.Sleep(coretesting.ShortWait) + var done bool + s.manifold.Output(w, &done) + c.Assert(done, jc.IsFalse) +} + +func (s *ManifoldSuite) assertOutputTrue(c *gc.C, w worker.Worker) { + for attempt := coretesting.LongAttempt.Start(); attempt.Next(); { + var done bool + s.manifold.Output(w, &done) + if done == true { + return + } + } + c.Fatalf("timed out waiting for output to become true") +} + +func checkStop(c *gc.C, w worker.Worker) { + err := worker.Stop(w) + c.Check(err, jc.ErrorIsNil) +} + +func checkStopWithError(c *gc.C, w worker.Worker, expectedErr error) { + err := worker.Stop(w) + c.Check(err, gc.Equals, expectedErr) +} === added file 'src/github.com/juju/juju/worker/upgradewaiter/package_test.go' --- src/github.com/juju/juju/worker/upgradewaiter/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/upgradewaiter/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package upgradewaiter_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/worker/util/postupgrade.go' --- src/github.com/juju/juju/worker/util/postupgrade.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/util/postupgrade.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,68 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package util + +import ( + "github.com/juju/juju/agent" + "github.com/juju/juju/api/base" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" +) + +// Many machine agent manifolds depend on just the agent, an API +// connection and upgrades being complete; this type configures them. +type PostUpgradeManifoldConfig struct { + AgentName string + APICallerName string + UpgradeWaiterName string +} + +// UpgradeWaitNotRequired can be passed as the UpgradeWaiterName in +// the config if the manifold shouldn't wait for upgrades to +// complete. This is useful for manifolds that need to run in both the +// unit agent and machine agent. +const UpgradeWaitNotRequired = "-" + +// AgentApiUpgradesStartFunc encapsulates the behaviour that varies among PostUpgradeManifolds. +type PostUpgradeStartFunc func(agent.Agent, base.APICaller) (worker.Worker, error) + +// PostUpgradeManifold returns a dependency.Manifold that calls the +// supplied start func with API and agent resources once machine agent +// upgrades have completed (and all required resources are present). +// +// The wait for upgrade completion can be skipped if +// UpgradeWaitNotRequired is passed as the UpgradeWaiterName. +func PostUpgradeManifold(config PostUpgradeManifoldConfig, start PostUpgradeStartFunc) dependency.Manifold { + inputs := []string{ + config.AgentName, + config.APICallerName, + } + if config.UpgradeWaiterName != UpgradeWaitNotRequired { + inputs = append(inputs, config.UpgradeWaiterName) + } + return dependency.Manifold{ + Inputs: inputs, + Start: func(getResource dependency.GetResourceFunc) (worker.Worker, error) { + if config.UpgradeWaiterName != UpgradeWaitNotRequired { + var upgradesDone bool + if err := getResource(config.UpgradeWaiterName, &upgradesDone); err != nil { + return nil, err + } + if !upgradesDone { + return nil, dependency.ErrMissing + } + } + + var agent agent.Agent + if err := getResource(config.AgentName, &agent); err != nil { + return nil, err + } + var apiCaller base.APICaller + if err := getResource(config.APICallerName, &apiCaller); err != nil { + return nil, err + } + return start(agent, apiCaller) + }, + } +} === added file 'src/github.com/juju/juju/worker/util/postupgrade_test.go' --- src/github.com/juju/juju/worker/util/postupgrade_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/util/postupgrade_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,161 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package util_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/agent" + "github.com/juju/juju/api/base" + "github.com/juju/juju/worker" + "github.com/juju/juju/worker/dependency" + dt "github.com/juju/juju/worker/dependency/testing" + "github.com/juju/juju/worker/util" +) + +type PostUpgradeManifoldSuite struct { + testing.IsolationSuite + testing.Stub + manifold dependency.Manifold + worker worker.Worker +} + +var _ = gc.Suite(&PostUpgradeManifoldSuite{}) + +func (s *PostUpgradeManifoldSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.Stub = testing.Stub{} + s.worker = &dummyWorker{} + s.manifold = util.PostUpgradeManifold(util.PostUpgradeManifoldConfig{ + AgentName: "agent-name", + APICallerName: "api-caller-name", + UpgradeWaiterName: "upgradewaiter-name", + }, s.newWorker) +} + +func (s *PostUpgradeManifoldSuite) newWorker(a agent.Agent, apiCaller base.APICaller) (worker.Worker, error) { + s.AddCall("newWorker", a, apiCaller) + if err := s.NextErr(); err != nil { + return nil, err + } + return s.worker, nil +} + +func (s *PostUpgradeManifoldSuite) TestInputs(c *gc.C) { + c.Check(s.manifold.Inputs, jc.DeepEquals, []string{ + "agent-name", "api-caller-name", "upgradewaiter-name"}) +} + +func (s *PostUpgradeManifoldSuite) TestOutput(c *gc.C) { + c.Check(s.manifold.Output, gc.IsNil) +} + +func (s *PostUpgradeManifoldSuite) TestUpgradeWaiterMissing(c *gc.C) { + getResource := dt.StubGetResource(dt.StubResources{ + "upgradewaiter-name": dt.StubResource{Error: dependency.ErrMissing}, + }) + + worker, err := s.manifold.Start(getResource) + c.Check(worker, gc.IsNil) + c.Check(err, gc.Equals, dependency.ErrMissing) +} + +func (s *PostUpgradeManifoldSuite) TestUpgradesNotComplete(c *gc.C) { + getResource := dt.StubGetResource(dt.StubResources{ + "upgradewaiter-name": dt.StubResource{Output: false}, + "agent-name": dt.StubResource{Output: new(dummyAgent)}, + "api-caller-name": dt.StubResource{Output: new(dummyApiCaller)}, + }) + + worker, err := s.manifold.Start(getResource) + c.Check(worker, gc.IsNil) + c.Check(err, gc.Equals, dependency.ErrMissing) +} + +func (s *PostUpgradeManifoldSuite) TestStartAgentMissing(c *gc.C) { + getResource := dt.StubGetResource(dt.StubResources{ + "upgradewaiter-name": dt.StubResource{Output: true}, + "agent-name": dt.StubResource{Error: dependency.ErrMissing}, + }) + + worker, err := s.manifold.Start(getResource) + c.Check(worker, gc.IsNil) + c.Check(err, gc.Equals, dependency.ErrMissing) +} + +func (s *PostUpgradeManifoldSuite) TestStartApiConnMissing(c *gc.C) { + getResource := dt.StubGetResource(dt.StubResources{ + "upgradewaiter-name": dt.StubResource{Output: true}, + "agent-name": dt.StubResource{Output: &dummyAgent{}}, + "api-caller-name": dt.StubResource{Error: dependency.ErrMissing}, + }) + + worker, err := s.manifold.Start(getResource) + c.Check(worker, gc.IsNil) + c.Check(err, gc.Equals, dependency.ErrMissing) +} + +func (s *PostUpgradeManifoldSuite) TestStartFailure(c *gc.C) { + expectAgent := &dummyAgent{} + expectApiCaller := &dummyApiCaller{} + getResource := dt.StubGetResource(dt.StubResources{ + "upgradewaiter-name": dt.StubResource{Output: true}, + "agent-name": dt.StubResource{Output: expectAgent}, + "api-caller-name": dt.StubResource{Output: expectApiCaller}, + }) + s.SetErrors(errors.New("some error")) + + worker, err := s.manifold.Start(getResource) + c.Check(worker, gc.IsNil) + c.Check(err, gc.ErrorMatches, "some error") + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "newWorker", + Args: []interface{}{expectAgent, expectApiCaller}, + }}) +} + +func (s *PostUpgradeManifoldSuite) TestStartSuccess(c *gc.C) { + expectAgent := &dummyAgent{} + expectApiCaller := &dummyApiCaller{} + getResource := dt.StubGetResource(dt.StubResources{ + "upgradewaiter-name": dt.StubResource{Output: true}, + "agent-name": dt.StubResource{Output: expectAgent}, + "api-caller-name": dt.StubResource{Output: expectApiCaller}, + }) + + worker, err := s.manifold.Start(getResource) + c.Check(err, jc.ErrorIsNil) + c.Check(worker, gc.Equals, s.worker) + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "newWorker", + Args: []interface{}{expectAgent, expectApiCaller}, + }}) +} + +func (s *PostUpgradeManifoldSuite) TestUpgradeWaitNotRequired(c *gc.C) { + manifold := util.PostUpgradeManifold(util.PostUpgradeManifoldConfig{ + AgentName: "agent-name", + APICallerName: "api-caller-name", + UpgradeWaiterName: util.UpgradeWaitNotRequired, + }, s.newWorker) + + c.Check(manifold.Inputs, jc.DeepEquals, []string{"agent-name", "api-caller-name"}) + + expectAgent := &dummyAgent{} + expectApiCaller := &dummyApiCaller{} + getResource := dt.StubGetResource(dt.StubResources{ + "agent-name": dt.StubResource{Output: expectAgent}, + "api-caller-name": dt.StubResource{Output: expectApiCaller}, + }) + worker, err := manifold.Start(getResource) + c.Check(err, jc.ErrorIsNil) + c.Check(worker, gc.Equals, s.worker) + s.CheckCalls(c, []testing.StubCall{{ + FuncName: "newWorker", + Args: []interface{}{expectAgent, expectApiCaller}, + }}) +} === modified file 'src/github.com/juju/juju/worker/util/valueworker.go' --- src/github.com/juju/juju/worker/util/valueworker.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/juju/worker/util/valueworker.go 2016-03-22 15:18:22 +0000 @@ -12,24 +12,11 @@ "github.com/juju/juju/worker" ) -// valueWorker implements a degenerate worker wrapping a single value. -type valueWorker struct { - tomb tomb.Tomb - value interface{} -} - -// Kill is part of the worker.Worker interface. -func (v *valueWorker) Kill() { - v.tomb.Kill(nil) -} - -// Wait is part of the worker.Worker interface. -func (v *valueWorker) Wait() error { - return v.tomb.Wait() -} - // NewValueWorker returns a degenerate worker that exposes the supplied value -// when passed into ValueWorkerOutput. +// when passed into ValueWorkerOutput. Please do not supply values that have +// their own dependency or lifecycle considerations; such values will subvert +// the operation of any containing dependency.Engine by insulating it from the +// failures and dependency changes of the contained value. func NewValueWorker(value interface{}) (worker.Worker, error) { if value == nil { return nil, errors.New("NewValueWorker expects a value") @@ -65,3 +52,19 @@ outValV.Set(inValV.Convert(outValT)) return nil } + +// valueWorker implements a degenerate worker wrapping a single value. +type valueWorker struct { + tomb tomb.Tomb + value interface{} +} + +// Kill is part of the worker.Worker interface. +func (v *valueWorker) Kill() { + v.tomb.Kill(nil) +} + +// Wait is part of the worker.Worker interface. +func (v *valueWorker) Wait() error { + return v.tomb.Wait() +} === added directory 'src/github.com/juju/juju/worker/workertest' === added file 'src/github.com/juju/juju/worker/workertest/check.go' --- src/github.com/juju/juju/worker/workertest/check.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/workertest/check.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,99 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package workertest + +import ( + "time" + + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + coretesting "github.com/juju/juju/testing" + "github.com/juju/juju/worker" +) + +var ( + // AliveDelay is the minimum time an Alive helper will Wait for its worker + // to stop before returning successfully. + aliveDelay = coretesting.ShortWait + + // KillTimeout is the maximum time a Kill helper will Wait for its worker + // before failing the test. + killTimeout = coretesting.LongWait +) + +// CheckAlive Wait()s a short time for the supplied worker to return an error, +// and fails the test if it does. If it doesn't fail, it'll leave a goroutine +// running in the background, blocked on the worker's death; but that doesn't +// matter, because of *course* you correctly deferred a suitable Kill helper +// as soon as you created the worker in the first place. Right? Right. +// +// It doesn't Assert and is therefore suitable for use from any goroutine. +func CheckAlive(c *gc.C, w worker.Worker) { + wait := make(chan error, 1) + go func() { + wait <- w.Wait() + }() + select { + case <-time.After(aliveDelay): + case err := <-wait: + c.Errorf("expected alive worker; failed with %v", err) + } +} + +// CheckKilled Wait()s for the supplied worker's error, which it returns for +// further analysis, or fails the test after a timeout expires. It doesn't +// Assert and is therefore suitable for use from any goroutine. +func CheckKilled(c *gc.C, w worker.Worker) error { + wait := make(chan error, 1) + go func() { + wait <- w.Wait() + }() + select { + case err := <-wait: + return err + case <-time.After(killTimeout): + c.Errorf("timed out waiting for worker to stop") + return errors.New("workertest: worker not stopping") + } +} + +// CheckKill Kill()s the supplied worker and Wait()s for its error, which it +// returns for further analysis, or fails the test after a timeout expires. +// It doesn't Assert and is therefore suitable for use from any goroutine. +func CheckKill(c *gc.C, w worker.Worker) error { + w.Kill() + return CheckKilled(c, w) +} + +// CleanKill calls CheckKill with the supplied arguments, and Checks that the +// returned error is nil. It's particularly suitable for deferring: +// +// someWorker, err := some.NewWorker() +// c.Assert(err, jc.ErrorIsNil) +// defer workertest.CleanKill(c, someWorker) +// +// ...in the large number (majority?) of situations where a worker is expected +// to run successfully; and it doesn't Assert, and is therefore suitable for use +// from any goroutine. +func CleanKill(c *gc.C, w worker.Worker) { + err := CheckKill(c, w) + c.Check(err, jc.ErrorIsNil) +} + +// DirtyKill calls CheckKill with the supplied arguments, and logs the returned +// error. It's particularly suitable for deferring: +// +// someWorker, err := some.NewWorker() +// c.Assert(err, jc.ErrorIsNil) +// defer workertest.DirtyKill(c, someWorker) +// +// ...in the cases where we expect a worker to fail, but aren't specifically +// testing that failure; and it doesn't Assert, and is therefore suitable for +// use from any goroutine. +func DirtyKill(c *gc.C, w worker.Worker) { + err := CheckKill(c, w) + c.Logf("ignoring error: %v", err) +} === added file 'src/github.com/juju/juju/worker/workertest/export_test.go' --- src/github.com/juju/juju/worker/workertest/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/workertest/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package workertest + +// KillTimeout exists because it's the least unpleasant of the many options that +// let us test the package tolerably. Consider: +// +// * we ought to actually write some tests for the claimed behaviour +// * waiting 10s for a test to pass is stupid, we can't test with the default +// * using a clock abstraction misses the point: it's all about wall clocks +// * nobody's going to bother writing explicit checker config in their tests +// +// ...and so we convince ourselves that this mutable global state is a tolerable +// price to pay given the limited locus of influence. +var KillTimeout = &killTimeout === added file 'src/github.com/juju/juju/worker/workertest/package_test.go' --- src/github.com/juju/juju/worker/workertest/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/workertest/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package workertest_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/juju/worker/workertest/workers.go' --- src/github.com/juju/juju/worker/workertest/workers.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/workertest/workers.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,88 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package workertest + +import ( + "launchpad.net/tomb" + + "github.com/juju/juju/worker" +) + +// NewErrorWorker returns a Worker that runs until Kill()ed; at which point it +// fails with the supplied error. The caller takes responsibility for causing +// it to be Kill()ed, lest the goroutine be leaked, but the worker has no +// outside interactions or safety concerns so there's no particular need to +// Wait() for it. +func NewErrorWorker(err error) worker.Worker { + w := &errorWorker{err: err} + go func() { + defer w.tomb.Done() + <-w.tomb.Dying() + }() + return w +} + +type errorWorker struct { + tomb tomb.Tomb + err error +} + +// Kill is part of the worker.Worker interface. +func (w *errorWorker) Kill() { + w.tomb.Kill(w.err) +} + +// Wait is part of the worker.Worker interface. +func (w *errorWorker) Wait() error { + return w.tomb.Wait() +} + +// NewDeadWorker returns a Worker that's already dead, and always immediately +// returns the supplied error from Wait(). +func NewDeadWorker(err error) worker.Worker { + return &deadWorker{err: err} +} + +type deadWorker struct { + err error +} + +// Kill is part of the worker.Worker interface. +func (w *deadWorker) Kill() {} + +// Wait is part of the worker.Worker interface. +func (w *deadWorker) Wait() error { + return w.err +} + +// NewForeverWorker returns a Worker that ignores Kill() calls. You must be sure +// to call ReallyKill() to cause the worker to fail with the supplied error, +// lest any goroutines trying to manage it be leaked or blocked forever. +func NewForeverWorker(err error) *ForeverWorker { + w := &ForeverWorker{err: err} + go func() { + defer w.tomb.Done() + <-w.tomb.Dying() + }() + return w +} + +// ForeverWorker is a Worker that breaks its contract. Use with care. +type ForeverWorker struct { + tomb tomb.Tomb + err error +} + +// Kill is part of the worker.Worker interface. +func (w *ForeverWorker) Kill() {} + +// Wait is part of the worker.Worker interface. +func (w *ForeverWorker) Wait() error { + return w.tomb.Wait() +} + +// ReallyKill does what Kill should. +func (w *ForeverWorker) ReallyKill() { + w.tomb.Kill(w.err) +} === added file 'src/github.com/juju/juju/worker/workertest/workertest_test.go' --- src/github.com/juju/juju/worker/workertest/workertest_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/juju/worker/workertest/workertest_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,120 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package workertest_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/testing" + gc "gopkg.in/check.v1" + + "github.com/juju/juju/worker/workertest" +) + +type Suite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&Suite{}) + +func (s *Suite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.PatchValue(workertest.KillTimeout, time.Second) +} + +func (s *Suite) CheckFailed(c *gc.C) { + if c.Failed() { + c.Succeed() + } else { + c.Errorf("expected failure; none observed") + } + c.Logf("-------------------------------") +} + +func (s *Suite) TestCheckAliveSuccess(c *gc.C) { + w := workertest.NewErrorWorker(nil) + defer workertest.CleanKill(c, w) + + workertest.CheckAlive(c, w) +} + +func (s *Suite) TestCheckAliveFailure(c *gc.C) { + w := workertest.NewDeadWorker(nil) + + workertest.CheckAlive(c, w) + s.CheckFailed(c) +} + +func (s *Suite) TestCheckKilledSuccess(c *gc.C) { + expect := errors.New("snifplog") + w := workertest.NewErrorWorker(expect) + defer workertest.DirtyKill(c, w) + + w.Kill() + err := workertest.CheckKilled(c, w) + c.Check(err, gc.Equals, expect) +} + +func (s *Suite) TestCheckKilledTimeout(c *gc.C) { + w := workertest.NewErrorWorker(nil) + defer workertest.CleanKill(c, w) + + err := workertest.CheckKilled(c, w) + s.CheckFailed(c) + c.Check(err, gc.ErrorMatches, "workertest: worker not stopping") +} + +func (s *Suite) TestCheckKillSuccess(c *gc.C) { + expect := errors.New("fledbon") + w := workertest.NewErrorWorker(expect) + defer workertest.DirtyKill(c, w) + + err := workertest.CheckKill(c, w) + c.Check(err, gc.Equals, expect) +} + +func (s *Suite) TestCheckKillTimeout(c *gc.C) { + w := workertest.NewForeverWorker(nil) + defer w.ReallyKill() + + err := workertest.CheckKill(c, w) + s.CheckFailed(c) + c.Check(err, gc.ErrorMatches, "workertest: worker not stopping") +} + +func (s *Suite) TestCleanKillSuccess(c *gc.C) { + w := workertest.NewErrorWorker(nil) + + workertest.CleanKill(c, w) +} + +func (s *Suite) TestCleanKillFailure(c *gc.C) { + w := workertest.NewErrorWorker(errors.New("kebdrix")) + + workertest.CleanKill(c, w) + s.CheckFailed(c) +} + +func (s *Suite) TestCleanKillTimeout(c *gc.C) { + w := workertest.NewForeverWorker(nil) + defer w.ReallyKill() + + workertest.CleanKill(c, w) + s.CheckFailed(c) +} + +func (s *Suite) TestDirtyKillSuccess(c *gc.C) { + w := workertest.NewErrorWorker(errors.New("hifstit")) + + workertest.DirtyKill(c, w) +} + +func (s *Suite) TestDirtyKillTimeout(c *gc.C) { + w := workertest.NewForeverWorker(nil) + defer w.ReallyKill() + + workertest.DirtyKill(c, w) + s.CheckFailed(c) +} === modified file 'src/github.com/juju/juju/wrench/wrench.go' --- src/github.com/juju/juju/wrench/wrench.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/juju/wrench/wrench.go 2016-03-22 15:18:22 +0000 @@ -12,16 +12,16 @@ "sync" "github.com/juju/loggo" + "github.com/juju/utils/series" "github.com/juju/juju/juju/paths" - "github.com/juju/juju/version" ) var ( enabledMu sync.Mutex enabled = true - dataDir = paths.MustSucceed(paths.DataDir(version.Current.Series)) + dataDir = paths.MustSucceed(paths.DataDir(series.HostSeries())) wrenchDir = filepath.Join(dataDir, "wrench") jujuUid = os.Getuid() ) === removed directory 'src/github.com/juju/jujusvg' === removed file 'src/github.com/juju/jujusvg/.gitignore' --- src/github.com/juju/jujusvg/.gitignore 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/jujusvg/.gitignore 1970-01-01 00:00:00 +0000 @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test === removed file 'src/github.com/juju/jujusvg/LICENSE' --- src/github.com/juju/jujusvg/LICENSE 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/jujusvg/LICENSE 1970-01-01 00:00:00 +0000 @@ -1,191 +0,0 @@ -All files in this repository are licensed as follows. If you contribute -to this repository, it is assumed that you license your contribution -under the same license unless you state otherwise. - -All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. - -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. === removed file 'src/github.com/juju/jujusvg/Makefile' --- src/github.com/juju/jujusvg/Makefile 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/jujusvg/Makefile 1970-01-01 00:00:00 +0000 @@ -1,55 +0,0 @@ -ifndef GOPATH - $(warning You need to set up a GOPATH.) -endif - -PROJECT := github.com/juju/jujusvg -PROJECT_DIR := $(shell go list -e -f '{{.Dir}}' $(PROJECT)) - -help: - @echo "Available targets:" - @echo " deps - fetch all dependencies" - @echo " build - build the project" - @echo " check - run tests" - @echo " install - install the library in your GOPATH" - @echo " clean - clean the project" - -# Start of GOPATH-dependent targets. Some targets only make sense - -# and will only work - when this tree is found on the GOPATH. -ifeq ($(CURDIR),$(PROJECT_DIR)) - -deps: $(GOPATH)/bin/godeps - go get -v $(PROJECT)/... - -build: - go build $(PROJECT)/... - -check: - go test $(PROJECT)/... - -install: - go install $(INSTALL_FLAGS) -v $(PROJECT)/... - -clean: - go clean $(PROJECT)/... - -else - -deps: - $(error Cannot $@; $(CURDIR) is not on GOPATH) - -build: - $(error Cannot $@; $(CURDIR) is not on GOPATH) - -check: - $(error Cannot $@; $(CURDIR) is not on GOPATH) - -install: - $(error Cannot $@; $(CURDIR) is not on GOPATH) - -clean: - $(error Cannot $@; $(CURDIR) is not on GOPATH) - -endif -# End of GOPATH-dependent targets. - -.PHONY: help deps build check install clean === removed file 'src/github.com/juju/jujusvg/README.md' --- src/github.com/juju/jujusvg/README.md 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/jujusvg/README.md 1970-01-01 00:00:00 +0000 @@ -1,59 +0,0 @@ -jujusvg -======= - -A library for generating SVGs from Juju bundles and environments. - -Installation ------------- - -To start using jujusvg, first ensure you have a valid Go environment, then run -the following: - - go get github.com/juju/jujusvg - -Dependencies ------------- - -The project uses godeps (https://launchpad.net/godeps) to manage Go -dependencies. To install this, run: - - - go get launchpad.net/godeps - -After installing it, you can update the dependencies to the revision specified -in the `dependencies.tsv` file with the following: - - make deps - -Use `make create-deps` to update the dependencies file. - -Usage ------ - -Given a Juju bundle, you can convert this to an SVG programatically. This -generates a simple SVG representation of a bundle or bundles that can then be -included in a webpage as a visualization. - -For an example of how to use this library, please see `examples/generatesvg.go`. -You can run this example like: - - go run generatesvg.go bundle.yaml > bundle.svg - -The examples directory also includes three sample bundles that you can play -around with, or you can use the [Juju GUI](https://demo.jujucharms.com) to -generate your own bundles. - -Design-related assets ---------------------- - -Some assets are specified based on assets provided by the design team. These -assets are specified in the defs section of the generated SVG, and can thus -be found in the Canvas.definition() method. Should these assets be updated, -the SVGo code will need to be updated to reflect these changes. Unfortunately, -this can only be done by hand, so care must be made to match the SVGs provided -by design exactly. These original SVG assets live in the `assets` directory. - -Current assets in use: - -* The service block -* The relation health indicator === removed directory 'src/github.com/juju/jujusvg/assets' === removed file 'src/github.com/juju/jujusvg/assets/relation-icon-healthy.svg' --- src/github.com/juju/jujusvg/assets/relation-icon-healthy.svg 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/jujusvg/assets/relation-icon-healthy.svg 1970-01-01 00:00:00 +0000 @@ -1,13 +0,0 @@ - - - - - - - - - - - === removed file 'src/github.com/juju/jujusvg/assets/service_module.svg' --- src/github.com/juju/jujusvg/assets/service_module.svg 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/jujusvg/assets/service_module.svg 1970-01-01 00:00:00 +0000 @@ -1,41 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - === removed file 'src/github.com/juju/jujusvg/canvas.go' --- src/github.com/juju/jujusvg/canvas.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/jujusvg/canvas.go 1970-01-01 00:00:00 +0000 @@ -1,312 +0,0 @@ -package jujusvg - -import ( - "fmt" - "image" - "io" - "math" - - svg "github.com/ajstarks/svgo" -) - -const ( - iconSize = 96 - serviceBlockSize = 189 - healthCircleRadius = 10 - relationLineWidth = 2 - maxInt = int(^uint(0) >> 1) - minInt = -(maxInt - 1) - maxHeight = 450 - maxWidth = 1000 - - fontColor = "#505050" - relationColor = "#38B44A" -) - -// Canvas holds the parsed form of a bundle or environment. -type Canvas struct { - services []*service - relations []*serviceRelation -} - -// service represents a service deployed to an environment and contains the -// point of the top-left corner of the icon, icon URL, and additional metadata. -type service struct { - name string - iconUrl string - point image.Point -} - -// serviceRelation represents a relation created between two services. -type serviceRelation struct { - serviceA *service - serviceB *service -} - -// line represents a line segment with two endpoints. -type line struct { - p0, p1 image.Point -} - -// definition creates any necessary defs that can be used later in the SVG. -func (s *service) definition(canvas *svg.SVG) { -} - -// usage creates any necessary tags for actually using the service in the SVG. -func (s *service) usage(canvas *svg.SVG) { - canvas.Use( - s.point.X, - s.point.Y, - "#serviceBlock", - fmt.Sprintf(`id="%s"`, s.name)) - canvas.Image( - s.point.X+serviceBlockSize/2-iconSize/2, - s.point.Y+serviceBlockSize/2-iconSize/2, - iconSize, - iconSize, - s.iconUrl) - canvas.Textlines( - s.point.X+serviceBlockSize/2, - s.point.Y+serviceBlockSize/6, - []string{s.name}, - serviceBlockSize/10, - 0, - "#505050", - "middle") -} - -// definition creates any necessary defs that can be used later in the SVG. -func (r *serviceRelation) definition(canvas *svg.SVG) { -} - -// usage creates any necessary tags for actually using the relation in the SVG. -func (r *serviceRelation) usage(canvas *svg.SVG) { - l := r.shortestRelation() - canvas.Line( - l.p0.X, - l.p0.Y, - l.p1.X, - l.p1.Y, - fmt.Sprintf(`stroke="%s"`, relationColor), - fmt.Sprintf(`stroke-width="%dpx"`, relationLineWidth), - fmt.Sprintf(`stroke-dasharray="%s"`, strokeDashArray(l))) - mid := l.p0.Add(l.p1).Div(2).Sub(point(healthCircleRadius, healthCircleRadius)) - canvas.Use(mid.X, mid.Y, "#healthCircle") -} - -// shortestRelation finds the shortest line between two services, assuming -// that each service can be connected on one of four cardinal points only. -func (r *serviceRelation) shortestRelation() line { - aConnectors, bConnectors := r.serviceA.cardinalPoints(), r.serviceB.cardinalPoints() - shortestDistance := float64(maxInt) - shortestPair := line{ - p0: r.serviceA.point, - p1: r.serviceB.point, - } - for _, pointA := range aConnectors { - for _, pointB := range bConnectors { - ab := line{p0: pointA, p1: pointB} - distance := ab.length() - if distance < shortestDistance { - shortestDistance = distance - shortestPair = ab - } - } - } - return shortestPair -} - -// cardinalPoints generates the points for each of the four cardinal points -// of each service. -func (s *service) cardinalPoints() []image.Point { - return []image.Point{ - point(s.point.X+serviceBlockSize/2, s.point.Y), - point(s.point.X, s.point.Y+serviceBlockSize/2), - point(s.point.X+serviceBlockSize/2, s.point.Y+serviceBlockSize), - point(s.point.X+serviceBlockSize, s.point.Y+serviceBlockSize/2), - } -} - -// strokeDashArray generates the stroke-dasharray attribute content so that -// the relation health indicator is placed in an empty space. -func strokeDashArray(l line) string { - return fmt.Sprintf("%.2f, %d", l.length()/2-healthCircleRadius, healthCircleRadius*2) -} - -// length calculates the length of a line. -func (l *line) length() float64 { - dp := l.p0.Sub(l.p1) - return math.Sqrt(square(float64(dp.X)) + square(float64(dp.Y))) -} - -// addService adds a new service to the canvas. -func (c *Canvas) addService(s *service) { - c.services = append(c.services, s) -} - -// addRelation adds a new relation to the canvas. -func (c *Canvas) addRelation(r *serviceRelation) { - c.relations = append(c.relations, r) -} - -// layout adjusts all items so that they are positioned appropriately, -// and returns the overall size of the canvas. -func (c *Canvas) layout() (int, int) { - minWidth := maxInt - minHeight := maxInt - maxWidth := minInt - maxHeight := minInt - - for _, service := range c.services { - if service.point.X < minWidth { - minWidth = service.point.X - } - if service.point.Y < minHeight { - minHeight = service.point.Y - } - if service.point.X > maxWidth { - maxWidth = service.point.X - } - if service.point.Y > maxHeight { - maxHeight = service.point.Y - } - } - for _, service := range c.services { - service.point = service.point.Sub(point(minWidth, minHeight)) - } - return abs(maxWidth-minWidth) + serviceBlockSize, - abs(maxHeight-minHeight) + serviceBlockSize -} - -func (c *Canvas) definition(canvas *svg.SVG) { - canvas.Def() - defer canvas.DefEnd() - - // Service block. - // Note: this is implemented based off the service block SVG provided by - // design; any changes to that will likely incur an entire rewrite of this - // bit of SVGo. See the README for more information. - canvas.Group(`id="serviceBlock"`, - `transform="translate(115.183,4.8),scale(0.8)"`) - canvas.Gtransform("translate(-399.571,-251.207)") - canvas.Path(`M410.565,479.165h-73.988c-38.324,0-57.56,0-68.272-10.713c-10.712-10.713-10.712-29.949-10.712-68.273 -v-73.986c-0.001-38.324-0.001-57.561,10.711-68.273c10.713-10.713,29.949-10.713,68.274-10.713h73.988 -c38.324,0,57.561,0,68.272,10.713c10.713,10.712,10.713,29.949,10.713,68.273v73.986c0,38.324,0,57.561-10.713,68.273 -C468.126,479.165,448.889,479.165,410.565,479.165z M336.577,257.207c-34.445,0-53.419,0-61.203,7.784 -s-7.783,26.757-7.782,61.202v73.986c0,34.444,0,53.419,7.784,61.202c7.784,7.784,26.757,7.784,61.201,7.784h73.988 -c34.444,0,53.418,0,61.202-7.784c7.783-7.783,7.783-26.758,7.783-61.202v-73.986c0-34.444,0-53.418-7.783-61.202 -c-7.784-7.784-26.758-7.784-61.202-7.784H336.577z`, - `fill="#BBBBBB"`) - canvas.Path(`M410.565,479.165h-73.988c-38.324,0-57.56,0-68.272-10.713c-10.712-10.713-10.712-29.949-10.712-68.273 -v-73.986c0-38.324,0-57.561,10.712-68.273c10.713-10.713,29.949-10.713,68.272-10.713h73.988c38.324,0,57.561,0,68.272,10.713 -c10.713,10.712,10.713,29.949,10.713,68.273v73.986c0,38.324,0,57.561-10.713,68.273 -C468.126,479.165,448.889,479.165,410.565,479.165z M336.577,257.207c-34.444,0-53.417,0-61.201,7.784 -s-7.784,26.758-7.784,61.202v73.986c0,34.444,0,53.419,7.784,61.202c7.784,7.784,26.757,7.784,61.201,7.784h73.988 -c34.444,0,53.418,0,61.201-7.784c7.784-7.783,7.784-26.758,7.784-61.202v-73.986c0-34.444,0-53.418-7.784-61.202 -c-7.783-7.784-26.757-7.784-61.201-7.784H336.577z`, - `fill="#BBBBBB"`) - canvas.Gend() // Gtransform - canvas.Path(`M-42,219.958h32c2.209,0,4,1.791,4,4v2c0,2.209-1.791,4-4,4h-32 -c-2.209,0-4-1.791-4-4v-2C-46,221.749-44.209,219.958-42,219.958z`, - `fill-rule="evenodd"`, - `clip-rule="evenodd"`, - `fill="#BBBBBB"`) - canvas.Path(`M-42-6h32c2.209,0,4,1.791,4,4v2c0,2.209-1.791,4-4,4h-32 -c-2.209,0-4-1.791-4-4v-2C-46-4.209-44.209-6-42-6z`, - `fill-rule="evenodd"`, - `clip-rule="evenodd"`, - `fill="#BBBBBB"`) - canvas.Path(`M81.979,127.979v-32c0-2.209,1.791-4,4-4h2c2.209,0,4,1.791,4,4 -v32c0,2.209-1.791,4-4,4h-2C83.771,131.979,81.979,130.188,81.979,127.979z`, - `fill-rule="evenodd"`, - `clip-rule="evenodd"`, - `fill="#BBBBBB"`) - canvas.Path(`M-143.979,127.979v-32c0-2.209,1.791-4,4-4h2c2.209,0,4,1.791,4,4 -v32c0,2.209-1.791,4-4,4h-2C-142.188,131.979-143.979,130.188-143.979,127.979z`, - `fill-rule="evenodd"`, - `clip-rule="evenodd"`, - `fill="#BBBBBB"`) - canvas.Path(`M10.994-1h-73.988c-73.987,0-73.987,0-73.985,73.986v73.986c0,73.986,0,73.986,73.985,73.986h73.988 -c73.985,0,73.985,0,73.985-73.986V72.986C84.979-1,84.979-1,10.994-1z`, - `fill="#FFFFFF"`) - canvas.Gend() // Gid - - // Relation health circle. - canvas.Gid("healthCircle") - canvas.Circle( - healthCircleRadius, - healthCircleRadius, - healthCircleRadius, - fmt.Sprintf("stroke:%s;fill:none;stroke-width:%dpx", relationColor, relationLineWidth)) - canvas.Circle( - healthCircleRadius, - healthCircleRadius, - healthCircleRadius/2, - fmt.Sprintf("fill:%s", relationColor)) - canvas.Gend() - - // Service and relation specific defs. - for _, relation := range c.relations { - relation.definition(canvas) - } - for _, service := range c.services { - service.definition(canvas) - } -} - -func (c *Canvas) relationsGroup(canvas *svg.SVG) { - canvas.Gid("relations") - defer canvas.Gend() - for _, relation := range c.relations { - relation.usage(canvas) - } -} - -func (c *Canvas) servicesGroup(canvas *svg.SVG) { - canvas.Gid("services") - defer canvas.Gend() - for _, service := range c.services { - service.usage(canvas) - } -} - -// Marshal renders the SVG to the given io.Writer. -func (c *Canvas) Marshal(w io.Writer) { - - // TODO check write errors and return an error from - // Marshal if the write fails. The svg package does not - // itself check or return write errors; a possible work-around - // is to wrap the writer in a custom writer that panics - // on error, and catch the panic here. - width, height := c.layout() - - canvas := svg.New(w) - canvas.Start( - width, - height, - fmt.Sprintf(`style="font-family:Ubuntu, sans-serif;" viewBox="0 0 %d %d"`, - width, height)) - defer canvas.End() - c.definition(canvas) - c.relationsGroup(canvas) - c.servicesGroup(canvas) -} - -// abs returns the absolute value of a number. -func abs(x int) int { - if x < 0 { - return -x - } else { - return x - } -} - -// square multiplies a number by itself. -func square(x float64) float64 { - return x * x -} - -// point generates an image.Point given its coordinates. -func point(x, y int) image.Point { - return image.Point{x, y} -} === removed file 'src/github.com/juju/jujusvg/canvas_test.go' --- src/github.com/juju/jujusvg/canvas_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/jujusvg/canvas_test.go 1970-01-01 00:00:00 +0000 @@ -1,225 +0,0 @@ -package jujusvg - -import ( - "bytes" - "encoding/xml" - "image" - "io" - - "github.com/ajstarks/svgo" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" -) - -type CanvasSuite struct{} - -var _ = gc.Suite(&CanvasSuite{}) - -func (s *CanvasSuite) TestServiceRender(c *gc.C) { - // Ensure that the Service's definition and usage methods output the - // proper SVG elements. - var buf bytes.Buffer - svg := svg.New(&buf) - service := service{ - name: "foo", - point: image.Point{ - X: 0, - Y: 0, - }, - iconUrl: "foo", - } - service.definition(svg) - service.usage(svg) - c.Assert(buf.String(), gc.Equals, - ` - - -foo - -`) -} - -func (s *CanvasSuite) TestRelationRender(c *gc.C) { - // Ensure that the Relation's definition and usage methods output the - // proper SVG elements. - var buf bytes.Buffer - svg := svg.New(&buf) - relation := serviceRelation{ - serviceA: &service{ - point: image.Point{ - X: 0, - Y: 0, - }, - }, - serviceB: &service{ - point: image.Point{ - X: 100, - Y: 100, - }, - }, - } - relation.definition(svg) - relation.usage(svg) - c.Assert(buf.String(), gc.Equals, - ` - -`) -} - -func (s *CanvasSuite) TestLayout(c *gc.C) { - // Ensure that the SVG is sized exactly around the positioned services. - canvas := Canvas{} - canvas.addService(&service{ - point: image.Point{ - X: 0, - Y: 0, - }, - }) - canvas.addService(&service{ - point: image.Point{ - X: 100, - Y: 100, - }, - }) - width, height := canvas.layout() - c.Assert(width, gc.Equals, 289) - c.Assert(height, gc.Equals, 289) - canvas.addService(&service{ - point: image.Point{ - X: -100, - Y: -100, - }, - }) - canvas.addService(&service{ - point: image.Point{ - X: -100, - Y: 100, - }, - }) - canvas.addService(&service{ - point: image.Point{ - X: 200, - Y: -100, - }, - }) - width, height = canvas.layout() - c.Assert(width, gc.Equals, 489) - c.Assert(height, gc.Equals, 389) -} - -func (s *CanvasSuite) TestMarshal(c *gc.C) { - // Ensure that the internal representation of the canvas can be marshalled - // to SVG. - var buf bytes.Buffer - canvas := Canvas{} - serviceA := &service{ - name: "service-a", - point: image.Point{ - X: 0, - Y: 0, - }, - } - serviceB := &service{ - name: "service-b", - point: image.Point{ - X: 100, - Y: 100, - }, - } - canvas.addService(serviceA) - canvas.addService(serviceB) - canvas.addRelation(&serviceRelation{ - serviceA: serviceA, - serviceB: serviceB, - }) - canvas.Marshal(&buf) - c.Logf("%s", buf) - assertXMLEqual(c, buf.Bytes(), []byte(` - - - - - - - - - - - - - - - - - - - - - - - - - - - - -service-a - - - - -service-b - - - -`)) -} - -func assertXMLEqual(c *gc.C, obtained, expected []byte) { - toksObtained := xmlTokens(c, obtained) - toksExpected := xmlTokens(c, expected) - c.Assert(toksObtained, jc.DeepEquals, toksExpected) -} - -func xmlTokens(c *gc.C, data []byte) []xml.Token { - dec := xml.NewDecoder(bytes.NewReader(data)) - var toks []xml.Token - for { - tok, err := dec.Token() - if err == io.EOF { - return toks - } - c.Assert(err, gc.IsNil) - - if cdata, ok := tok.(xml.CharData); ok { - // It's char data - trim all white space and ignore it - // if it's all blank. - cdata = bytes.TrimSpace(cdata) - if len(cdata) == 0 { - continue - } - tok = cdata - } - toks = append(toks, xml.CopyToken(tok)) - } -} === removed file 'src/github.com/juju/jujusvg/doc.go' --- src/github.com/juju/jujusvg/doc.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/jujusvg/doc.go 1970-01-01 00:00:00 +0000 @@ -1,8 +0,0 @@ -// Copyright 2014 Canonical, Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -// jujusvg generates SVG representations of various Juju artifacts, such as -// charm bundles or live environments. -// -// For more information, please refer to the README file in this directory. -package jujusvg === removed directory 'src/github.com/juju/jujusvg/examples' === removed file 'src/github.com/juju/jujusvg/examples/charmworld.yaml' --- src/github.com/juju/jujusvg/examples/charmworld.yaml 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/jujusvg/examples/charmworld.yaml 1970-01-01 00:00:00 +0000 @@ -1,32 +0,0 @@ -services: - mongodb: - charm: "cs:precise/mongodb-21" - num_units: 1 - annotations: - "gui-x": "940.5" - "gui-y": "388.7698359714502" - constraints: "mem=2G cpu-cores=1" - elasticsearch: - charm: "cs:~charming-devs/precise/elasticsearch-2" - num_units: 1 - annotations: - "gui-x": "490.5" - "gui-y": "369.7698359714502" - constraints: "mem=2G cpu-cores=1" - charmworld: - charm: "cs:~juju-jitsu/precise/charmworld-58" - num_units: 1 - expose: true - annotations: - "gui-x": "813.5" - "gui-y": "112.23016402854975" - options: - charm_import_limit: -1 - source: "lp:~bac/charmworld/ingest-local-charms" - revno: 511 -relations: - - - "charmworld:essearch" - - "elasticsearch:essearch" - - - "charmworld:database" - - "mongodb:database" -series: precise === removed file 'src/github.com/juju/jujusvg/examples/generatesvg.go' --- src/github.com/juju/jujusvg/examples/generatesvg.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/jujusvg/examples/generatesvg.go 1970-01-01 00:00:00 +0000 @@ -1,53 +0,0 @@ -package main - -// This is a demo application that uses the jujusvg library to build a bundle SVG -// from a given bundle.yaml file. - -import ( - "io/ioutil" - "log" - "os" - "strings" - - "gopkg.in/juju/charm.v5" - - // Import the jujusvg library and the juju charm library - "github.com/juju/jujusvg" -) - -// iconURL takes a reference to a charm and returns the URL for that charm's icon. -// In this case, we're using the api.jujucharms.com API to provide the icon's URL. -func iconURL(ref *charm.Reference) string { - return "https://api.jujucharms.com/v4/" + ref.Path() + "/archive/icon.svg" -} - -func main() { - if len(os.Args) != 2 { - log.Fatalf("Please provide the name of a bundle file as the first argument") - } - - // First, we need to read our bundle data into a []byte - bundle_data, err := ioutil.ReadFile(os.Args[1]) - if err != nil { - log.Fatalf("Error reading bundle: %s\n", err) - } - - // Next, generate a charm.Bundle from the bytearray by passing it to ReadNewBundleData. - // This gives us an in-memory object representation of the bundle that we can pass to jujusvg - bundle, err := charm.ReadBundleData(strings.NewReader(string(bundle_data))) - if err != nil { - log.Fatalf("Error parsing bundle: %s\n", err) - } - - // Next, build a canvas of the bundle. This is a simplified version of a charm.Bundle - // that contains just the position information and charm icon URLs necessary to build - // the SVG representation of the bundle - canvas, err := jujusvg.NewFromBundle(bundle, iconURL) - if err != nil { - log.Fatalf("Error generating canvas: %s\n", err) - } - - // Finally, marshal that canvas as SVG to os.Stdout; this will print the SVG data - // required to generate an image of the bundle. - canvas.Marshal(os.Stdout) -} === removed file 'src/github.com/juju/jujusvg/examples/mediawiki-scalable.yaml' --- src/github.com/juju/jujusvg/examples/mediawiki-scalable.yaml 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/jujusvg/examples/mediawiki-scalable.yaml 1970-01-01 00:00:00 +0000 @@ -1,114 +0,0 @@ -services: - haproxy: - charm: cs:precise/haproxy-35 - num_units: 1 - options: - default_log: global - default_mode: http - default_options: httplog, dontlognull - default_retries: 3 - default_timeouts: queue 20000, client 50000, connect 5000, server 50000 - enable_monitoring: false - global_debug: false - global_group: haproxy - global_log: 127.0.0.1 local0, 127.0.0.1 local1 notice - global_maxconn: 4096 - global_quiet: false - global_spread_checks: 0 - global_user: haproxy - monitoring_allowed_cidr: 127.0.0.1/32 - monitoring_password: changeme - monitoring_port: 10000 - monitoring_stats_refresh: 3 - monitoring_username: haproxy - nagios_context: juju - package_status: install - services: "- service_name: haproxy_service\n service_host: \"0.0.0.0\"\n service_port: - 80\n service_options: [balance leastconn]\n server_options: maxconn 100\n" - sysctl: "" - annotations: - gui-x: "619" - gui-y: "-406" - mediawiki: - charm: cs:precise/mediawiki-10 - num_units: 1 - options: - debug: false - name: Please set name of wiki - skin: vector - annotations: - gui-x: "618" - gui-y: "-128" - memcached: - charm: cs:precise/memcached-7 - num_units: 1 - options: - connection-limit: 1024 - disable-auto-cleanup: "no" - disable-cas: "no" - disable-large-pages: "no" - extra-options: "" - factor: 1.25 - min-item-size: -1 - nagios_context: juju - request-limit: -1 - size: 768 - slab-page-size: -1 - tcp-port: 11211 - threads: -1 - udp-port: 0 - annotations: - gui-x: "926" - gui-y: "-125" - mysql: - charm: cs:precise/mysql-28 - num_units: 1 - options: - binlog-format: MIXED - block-size: 5 - dataset-size: 80% - flavor: distro - ha-bindiface: eth0 - ha-mcastport: 5411 - max-connections: -1 - preferred-storage-engine: InnoDB - query-cache-size: -1 - query-cache-type: "OFF" - rbd-name: mysql1 - tuning-level: safest - vip_cidr: 24 - vip_iface: eth0 - annotations: - gui-x: "926" - gui-y: "123" - mysql-slave: - charm: cs:precise/mysql-28 - num_units: 1 - options: - binlog-format: MIXED - block-size: 5 - dataset-size: 80% - flavor: distro - ha-bindiface: eth0 - ha-mcastport: 5411 - max-connections: -1 - preferred-storage-engine: InnoDB - query-cache-size: -1 - query-cache-type: "OFF" - rbd-name: mysql1 - tuning-level: safest - vip_cidr: 24 - vip_iface: eth0 - annotations: - gui-x: "619" - gui-y: "124" -series: precise -relations: -- - mediawiki:cache - - memcached:cache -- - haproxy:reverseproxy - - mediawiki:website -- - mysql-slave:slave - - mysql:master -- - mediawiki:slave - - mysql-slave:db === removed file 'src/github.com/juju/jujusvg/examples/openstack.yaml' --- src/github.com/juju/jujusvg/examples/openstack.yaml 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/jujusvg/examples/openstack.yaml 1970-01-01 00:00:00 +0000 @@ -1,268 +0,0 @@ -services: - mongodb: - charm: "cs:precise/mongodb-36" - num_units: 1 - constraints: mem=1G - annotations: - "gui-x": "639.4860908103093" - "gui-y": "636.380460366218" - ceilometer: - charm: "cs:precise/ceilometer-22" - num_units: 1 - constraints: mem=1G - annotations: - "gui-x": "350.1477364318532" - "gui-y": "922.7442622575415" - ceph: - charm: "cs:precise/ceph-27" - num_units: 3 - options: - "ephemeral-unmount": /mnt - fsid: "6547bd3e-1397-11e2-82e5-53567c8d32dc" - "monitor-secret": AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ== - "osd-reformat": "yes" - constraints: mem=1G - annotations: - "gui-x": "-20.643093692706657" - "gui-y": "51.01664830551104" - cinder: - charm: "cs:precise/cinder-27" - num_units: 1 - options: - "block-device": None - annotations: - "gui-x": "194.05177718665982" - "gui-y": "266.145686795207" - keystone: - charm: "cs:precise/keystone-38" - num_units: 1 - options: - "admin-password": openstack - "admin-token": ubuntutesting - constraints: mem=1G - annotations: - "gui-x": "-4.963964429345083" - "gui-y": "468.638517006843" - heat: - charm: "cs:precise/heat-4" - num_units: 1 - constraints: mem=1G - annotations: - "gui-x": "94.05177718665982" - "gui-y": "748.5507197770728" - "ceilometer-agent": - charm: "cs:precise/ceilometer-agent-19" - num_units: 0 - annotations: - "gui-x": "309.1965280494909" - "gui-y": "552.5094886865305" - mysql: - charm: "cs:precise/mysql-48" - num_units: 1 - options: - "dataset-size": "50%" - constraints: mem=1G - annotations: - "gui-x": "644.9534429914997" - "gui-y": "909.841064015354" - "nova-cloud-controller": - charm: "cs:precise/nova-cloud-controller-43" - num_units: 1 - options: - "network-manager": Neutron - "quantum-security-groups": "yes" - constraints: mem=1G - annotations: - "gui-x": "1534.2916447820653" - "gui-y": "336.380460366218" - "neutron-gateway": - charm: "cs:precise/quantum-gateway-21" - num_units: 1 - constraints: mem=1G - annotations: - "gui-x": "1051.4050421125935" - "gui-y": "95.17794387528511" - "nova-compute": - charm: "cs:precise/nova-compute-35" - num_units: 3 - options: - "config-flags": auto_assign_floating_ip=False - constraints: mem=4G - annotations: - "gui-x": "407.14071845761987" - "gui-y": "-100.96660846127398" - ntp: - charm: "cs:precise/ntp-3" - num_units: 0 - annotations: - "gui-x": "1404.9864510314626" - "gui-y": "-79.0155986051837" - glance: - charm: "cs:precise/glance-34" - num_units: 1 - constraints: mem=1G - annotations: - "gui-x": "829.8086616110905" - "gui-y": "-123.44393373014978" - "openstack-dashboard": - charm: "cs:precise/openstack-dashboard-20" - num_units: 1 - constraints: mem=1G - annotations: - "gui-x": "998.0178961164997" - "gui-y": "850.0023188981665" - "rabbitmq-server": - charm: "cs:precise/rabbitmq-server-33" - num_units: 1 - constraints: mem=1G - annotations: - "gui-x": "1078.6630377180622" - "gui-y": "495.1636348161353" - "swift-storage-z1": - charm: "cs:precise/swift-storage-22" - num_units: 1 - options: - "block-device": vdb - overwrite: "true" - constraints: mem=1G - annotations: - "gui-x": "1285.1146978743122" - "gui-y": "808.0668330583228" - "swift-storage-z2": - charm: "cs:precise/swift-storage-22" - num_units: 1 - options: - "block-device": vdb - overwrite: "true" - zone: 2 - constraints: mem=1G - annotations: - "gui-x": "1620.5985845930622" - "gui-y": "688.7120356950415" - "swift-storage-z3": - charm: "cs:precise/swift-storage-22" - num_units: 1 - options: - "block-device": vdb - overwrite: "true" - zone: 3 - constraints: mem=1G - annotations: - "gui-x": "1791.5662359602497" - "gui-y": "337.0991511735572" - "swift-proxy": - charm: "cs:precise/swift-proxy-34" - num_units: 1 - options: - "swift-hash": "fdfef9d4-8b06-11e2-8ac0-531c923c8fae" - "use-https": "no" - constraints: mem=1G - annotations: - "gui-x": "1791.5663580305622" - "gui-y": "53.22814134689702" -relations: - - - "ntp:juju-info" - - "heat:juju-info" - - - "ntp:juju-info" - - "openstack-dashboard:juju-info" - - - "ntp:juju-info" - - "mysql:juju-info" - - - "ntp:juju-info" - - "rabbitmq-server:juju-info" - - - "ntp:juju-info" - - "mongodb:juju-info" - - - "ntp:juju-info" - - "ceilometer:juju-info" - - - "ntp:juju-info" - - "swift-storage-z3:juju-info" - - - "ntp:juju-info" - - "swift-storage-z2:juju-info" - - - "ntp:juju-info" - - "swift-storage-z1:juju-info" - - - "ntp:juju-info" - - "swift-proxy:juju-info" - - - "ntp:juju-info" - - "glance:juju-info" - - - "ntp:juju-info" - - "keystone:juju-info" - - - "keystone:shared-db" - - "mysql:shared-db" - - - "nova-cloud-controller:shared-db" - - "mysql:shared-db" - - - "nova-cloud-controller:amqp" - - "rabbitmq-server:amqp" - - - "nova-cloud-controller:image-service" - - "glance:image-service" - - - "nova-cloud-controller:identity-service" - - "keystone:identity-service" - - - "nova-cloud-controller:cloud-compute" - - "nova-compute:cloud-compute" - - - "nova-compute:shared-db" - - "mysql:shared-db" - - - "nova-compute:amqp" - - "rabbitmq-server:amqp" - - - "nova-compute:image-service" - - "glance:image-service" - - - "nova-compute:ceph" - - "ceph:client" - - - "glance:shared-db" - - "mysql:shared-db" - - - "glance:identity-service" - - "keystone:identity-service" - - - "glance:ceph" - - "ceph:client" - - - "cinder:image-service" - - "glance:image-service" - - - "cinder:shared-db" - - "mysql:shared-db" - - - "cinder:amqp" - - "rabbitmq-server:amqp" - - - "nova-cloud-controller:cinder-volume-service" - - "cinder:cinder-volume-service" - - - "cinder:identity-service" - - "keystone:identity-service" - - - "cinder:ceph" - - "ceph:client" - - - "neutron-gateway:shared-db" - - "mysql:shared-db" - - - "neutron-gateway:amqp" - - "rabbitmq-server:amqp" - - - "nova-cloud-controller:quantum-network-service" - - "neutron-gateway:quantum-network-service" - - - "openstack-dashboard:identity-service" - - "keystone:identity-service" - - - "swift-proxy:identity-service" - - "keystone:identity-service" - - - "swift-proxy:swift-storage" - - "swift-storage-z1:swift-storage" - - - "swift-proxy:swift-storage" - - "swift-storage-z2:swift-storage" - - - "swift-proxy:swift-storage" - - "swift-storage-z3:swift-storage" - - - "ceilometer:identity-service" - - "keystone:identity-service" - - - "ceilometer:amqp" - - "rabbitmq-server:amqp" - - - "ceilometer:shared-db" - - "mongodb:database" - - - "nova-compute:nova-ceilometer" - - "ceilometer-agent:nova-ceilometer" - - - "ceilometer-agent:ceilometer-service" - - "ceilometer:ceilometer-service" - - - "heat:identity-service" - - "keystone:identity-service" - - - "heat:shared-db" - - "mysql:shared-db" - - - "heat:amqp" - - "rabbitmq-server:amqp" - - - "ntp:juju-info" - - "nova-compute:juju-info" - - - "ntp:juju-info" - - "nova-cloud-controller:juju-info" - - - "ntp:juju-info" - - "neutron-gateway:juju-info" - - - "ntp:juju-info" - - "ceph:juju-info" - - - "ntp:juju-info" - - "cinder:juju-info" -series: precise === removed file 'src/github.com/juju/jujusvg/jujusvg.go' --- src/github.com/juju/jujusvg/jujusvg.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/jujusvg/jujusvg.go 1970-01-01 00:00:00 +0000 @@ -1,60 +0,0 @@ -package jujusvg - -import ( - "image" - "sort" - "strconv" - "strings" - - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" -) - -// NewFromBundle returns a new Canvas that can be used -// to generate a graphical representation of the given bundle -// data. The iconURL function is used to generate a URL -// that refers to an SVG for the supplied charm URL. -func NewFromBundle(b *charm.BundleData, iconURL func(*charm.Reference) string) (*Canvas, error) { - var canvas Canvas - - // Verify the bundle to make sure that all the invariants - // that we depend on below actually hold true. - if err := b.Verify(nil); err != nil { - return nil, errgo.Notef(err, "cannot verify bundle") - } - // Go through all services in alphabetical order so that - // we get consistent results. - serviceNames := make([]string, 0, len(b.Services)) - for name := range b.Services { - serviceNames = append(serviceNames, name) - } - sort.Strings(serviceNames) - services := make(map[string]*service) - for _, name := range serviceNames { - serviceData := b.Services[name] - x, xerr := strconv.ParseFloat(serviceData.Annotations["gui-x"], 64) - y, yerr := strconv.ParseFloat(serviceData.Annotations["gui-y"], 64) - if xerr != nil || yerr != nil { - return nil, errgo.Newf("service %q does not have a valid position", name) - } - charmId, err := charm.ParseReference(serviceData.Charm) - if err != nil { - // cannot actually happen, as we've verified it. - return nil, errgo.Notef(err, "cannot parse charm %q", serviceData.Charm) - } - svc := &service{ - name: name, - point: image.Point{int(x), int(y)}, - iconUrl: iconURL(charmId), - } - services[name] = svc - canvas.addService(svc) - } - for _, relation := range b.Relations { - canvas.addRelation(&serviceRelation{ - serviceA: services[strings.Split(relation[0], ":")[0]], - serviceB: services[strings.Split(relation[1], ":")[0]], - }) - } - return &canvas, nil -} === removed file 'src/github.com/juju/jujusvg/jujusvg_test.go' --- src/github.com/juju/jujusvg/jujusvg_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/jujusvg/jujusvg_test.go 1970-01-01 00:00:00 +0000 @@ -1,162 +0,0 @@ -package jujusvg - -import ( - "bytes" - "strings" - "testing" - - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" -) - -func Test(t *testing.T) { gc.TestingT(t) } - -type newSuite struct{} - -var _ = gc.Suite(&newSuite{}) - -var bundle = ` -services: - mongodb: - charm: "cs:precise/mongodb-21" - num_units: 1 - annotations: - "gui-x": "940.5" - "gui-y": "388.7698359714502" - constraints: "mem=2G cpu-cores=1" - elasticsearch: - charm: "cs:~charming-devs/precise/elasticsearch-2" - num_units: 1 - annotations: - "gui-x": "490.5" - "gui-y": "369.7698359714502" - constraints: "mem=2G cpu-cores=1" - charmworld: - charm: "cs:~juju-jitsu/precise/charmworld-58" - num_units: 1 - expose: true - annotations: - "gui-x": "813.5" - "gui-y": "112.23016402854975" - options: - charm_import_limit: -1 - source: "lp:~bac/charmworld/ingest-local-charms" - revno: 511 -relations: - - - "charmworld:essearch" - - "elasticsearch:essearch" - - - "charmworld:database" - - "mongodb:database" -series: precise -` - -func iconURL(ref *charm.Reference) string { - return "http://0.1.2.3/" + ref.Path() + ".svg" -} - -func (s *newSuite) TestNewFromBundle(c *gc.C) { - b, err := charm.ReadBundleData(strings.NewReader(bundle)) - c.Assert(err, gc.IsNil) - err = b.Verify(nil) - c.Assert(err, gc.IsNil) - - cvs, err := NewFromBundle(b, iconURL) - c.Assert(err, gc.IsNil) - - var buf bytes.Buffer - cvs.Marshal(&buf) - c.Logf("%s", buf.String()) - assertXMLEqual(c, buf.Bytes(), []byte(` - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -charmworld - - - - -elasticsearch - - - - -mongodb - - - -`)) -} - -func (s *newSuite) TestWithBadBundle(c *gc.C) { - b, err := charm.ReadBundleData(strings.NewReader(bundle)) - c.Assert(err, gc.IsNil) - b.Relations[0][0] = "evil-unknown-service" - cvs, err := NewFromBundle(b, iconURL) - c.Assert(err, gc.ErrorMatches, "cannot verify bundle: .*") - c.Assert(cvs, gc.IsNil) -} - -func (s *newSuite) TestWithBadPosition(c *gc.C) { - b, err := charm.ReadBundleData(strings.NewReader(bundle)) - c.Assert(err, gc.IsNil) - - b.Services["charmworld"].Annotations["gui-x"] = "bad" - cvs, err := NewFromBundle(b, iconURL) - c.Assert(err, gc.ErrorMatches, `service "charmworld" does not have a valid position`) - c.Assert(cvs, gc.IsNil) - - b, err = charm.ReadBundleData(strings.NewReader(bundle)) - c.Assert(err, gc.IsNil) - - b.Services["charmworld"].Annotations["gui-y"] = "bad" - cvs, err = NewFromBundle(b, iconURL) - c.Assert(err, gc.ErrorMatches, `service "charmworld" does not have a valid position`) - c.Assert(cvs, gc.IsNil) -} === modified file 'src/github.com/juju/names/environ.go' --- src/github.com/juju/names/environ.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/names/environ.go 2016-03-22 15:18:22 +0000 @@ -3,18 +3,13 @@ package names -import ( - "regexp" -) - +// EnvironTagKind is DEPRECATED: model tags are used instead. const EnvironTagKind = "environment" type EnvironTag struct { uuid string } -var validUUID = regexp.MustCompile(`[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}`) - // NewEnvironTag returns the tag of an environment with the given environment UUID. func NewEnvironTag(uuid string) EnvironTag { return EnvironTag{uuid: uuid} === added file 'src/github.com/juju/names/model.go' --- src/github.com/juju/names/model.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/names/model.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,44 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package names + +import ( + "regexp" +) + +const ModelTagKind = "model" + +// ModelTag represents a tag used to describe a model. +type ModelTag struct { + uuid string +} + +var validUUID = regexp.MustCompile(`[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}`) + +// NewModelTag returns the tag of an model with the given model UUID. +func NewModelTag(uuid string) ModelTag { + return ModelTag{uuid: uuid} +} + +// ParseModelTag parses an environ tag string. +func ParseModelTag(modelTag string) (ModelTag, error) { + tag, err := ParseTag(modelTag) + if err != nil { + return ModelTag{}, err + } + et, ok := tag.(ModelTag) + if !ok { + return ModelTag{}, invalidTagError(modelTag, ModelTagKind) + } + return et, nil +} + +func (t ModelTag) String() string { return t.Kind() + "-" + t.Id() } +func (t ModelTag) Kind() string { return ModelTagKind } +func (t ModelTag) Id() string { return t.uuid } + +// IsValidModel returns whether id is a valid model UUID. +func IsValidModel(id string) bool { + return validUUID.MatchString(id) +} === added file 'src/github.com/juju/names/model_test.go' --- src/github.com/juju/names/model_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/names/model_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,49 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package names_test + +import ( + gc "gopkg.in/check.v1" + + "github.com/juju/names" +) + +type modelSuite struct{} + +var _ = gc.Suite(&modelSuite{}) + +var parseModelTagTests = []struct { + tag string + expected names.Tag + err error +}{{ + tag: "", + err: names.InvalidTagError("", ""), +}, { + tag: "model-f47ac10b-58cc-4372-a567-0e02b2c3d479", + expected: names.NewModelTag("f47ac10b-58cc-4372-a567-0e02b2c3d479"), +}, { + tag: "dave", + err: names.InvalidTagError("dave", ""), + //}, { + // TODO(dfc) passes, but should not + // tag: "model-", + // err: names.InvalidTagError("model", ""), +}, { + tag: "service-dave", + err: names.InvalidTagError("service-dave", names.ModelTagKind), +}} + +func (s *modelSuite) TestParseModelTag(c *gc.C) { + for i, t := range parseModelTagTests { + c.Logf("test %d: %s", i, t.tag) + got, err := names.ParseModelTag(t.tag) + if err != nil || t.err != nil { + c.Check(err, gc.DeepEquals, t.err) + continue + } + c.Check(got, gc.FitsTypeOf, t.expected) + c.Check(got, gc.Equals, t.expected) + } +} === modified file 'src/github.com/juju/names/tag.go' --- src/github.com/juju/names/tag.go 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/names/tag.go 2016-03-22 15:18:22 +0000 @@ -63,7 +63,7 @@ case UnitTagKind, MachineTagKind, ServiceTagKind, EnvironTagKind, UserTagKind, RelationTagKind, NetworkTagKind, ActionTagKind, VolumeTagKind, CharmTagKind, StorageTagKind, FilesystemTagKind, IPAddressTagKind, - SpaceTagKind, SubnetTagKind, PayloadTagKind: + SpaceTagKind, SubnetTagKind, PayloadTagKind, ModelTagKind: return true } return false @@ -111,6 +111,11 @@ return nil, invalidTagError(tag, kind) } return NewEnvironTag(id), nil + case ModelTagKind: + if !IsValidModel(id) { + return nil, invalidTagError(tag, kind) + } + return NewModelTag(id), nil case RelationTagKind: id = relationTagSuffixToKey(id) if !IsValidRelation(id) { === modified file 'src/github.com/juju/names/tag_test.go' --- src/github.com/juju/names/tag_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/names/tag_test.go 2016-03-22 15:18:22 +0000 @@ -22,6 +22,7 @@ {tag: "machine-42", kind: names.MachineTagKind}, {tag: "service-foo", kind: names.ServiceTagKind}, {tag: "environment-42", kind: names.EnvironTagKind}, + {tag: "model-42", kind: names.ModelTagKind}, {tag: "user-admin", kind: names.UserTagKind}, {tag: "relation-service1.rel1#other-svc.other-rel2", kind: names.RelationTagKind}, {tag: "relation-service.peerRelation", kind: names.RelationTagKind}, @@ -109,6 +110,11 @@ expectType: names.EnvironTag{}, resultId: "f47ac10b-58cc-4372-a567-0e02b2c3d479", }, { + tag: "model-f47ac10b-58cc-4372-a567-0e02b2c3d479", + expectKind: names.ModelTagKind, + expectType: names.ModelTag{}, + resultId: "f47ac10b-58cc-4372-a567-0e02b2c3d479", +}, { tag: "relation-my-svc1.myrel1#other-svc.other-rel2", expectKind: names.RelationTagKind, expectType: names.RelationTag{}, @@ -124,6 +130,11 @@ expectType: names.EnvironTag{}, resultErr: `"environment-/" is not a valid environment tag`, }, { + tag: "model-/", + expectKind: names.ModelTagKind, + expectType: names.ModelTag{}, + resultErr: `"model-/" is not a valid model tag`, +}, { tag: "user-foo", expectKind: names.UserTagKind, expectType: names.UserTag{}, @@ -218,6 +229,7 @@ names.ServiceTagKind: func(tag string) names.Tag { return names.NewServiceTag(tag) }, names.RelationTagKind: func(tag string) names.Tag { return names.NewRelationTag(tag) }, names.EnvironTagKind: func(tag string) names.Tag { return names.NewEnvironTag(tag) }, + names.ModelTagKind: func(tag string) names.Tag { return names.NewModelTag(tag) }, names.UserTagKind: func(tag string) names.Tag { return names.NewUserTag(tag) }, names.NetworkTagKind: func(tag string) names.Tag { return names.NewNetworkTag(tag) }, names.ActionTagKind: func(tag string) names.Tag { return names.NewActionTag(tag) }, === added directory 'src/github.com/juju/retry' === added file 'src/github.com/juju/retry/.gitignore' --- src/github.com/juju/retry/.gitignore 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/retry/.gitignore 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +*.test === added file 'src/github.com/juju/retry/LICENSE' --- src/github.com/juju/retry/LICENSE 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/retry/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,191 @@ +All files in this repository are licensed as follows. If you contribute +to this repository, it is assumed that you license your contribution +under the same license unless you state otherwise. + +All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. === added file 'src/github.com/juju/retry/Makefile' --- src/github.com/juju/retry/Makefile 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/retry/Makefile 2016-03-22 15:18:22 +0000 @@ -0,0 +1,15 @@ +PROJECT := github.com/juju/retry + +default: check + +check-licence: + @(fgrep -rl "Licensed under the LGPLv3" .;\ + fgrep -rl "MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT" .;\ + find . -name "*.go") | sed -e 's,\./,,' | sort | uniq -u | \ + xargs -I {} echo FAIL: licence missed: {} + +check: check-licence + go test $(PROJECT)/... + +docs: + godoc2md $(PROJECT) > README.md === added file 'src/github.com/juju/retry/README.md' --- src/github.com/juju/retry/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/retry/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,264 @@ + +# retry + import "github.com/juju/retry" + +The retry package encapsulates the mechanism around retrying commands. + +The simple use is to call retry.Call with a function closure. + +```go + + + err := retry.Call(retry.CallArgs{ + Func: func() error { ... }, + Attempts: 5, + Delay: time.Minute, + Clock: clock.WallClock, + }) + +``` + +The bare minimum arguments that need to be specified are: +* Func - the function to call +* Attempts - the number of times to try Func before giving up, or a negative number for unlimited attempts (`retry.UnlimitedAttempts`) +* Delay - how long to wait between each try that returns an error +* Clock - either the wall clock, or some testing clock + +Any error that is returned from the `Func` is considered transient. +In order to identify some errors as fatal, pass in a function for the +`IsFatalError` CallArgs value. + +In order to have the `Delay` change for each iteration, a `BackoffFunc` +needs to be set on the CallArgs. A simple doubling delay function is +provided by `DoubleDelay`. + +An example of a more complex `BackoffFunc` could be a stepped function such +as: + +```go + + + func StepDelay(last time.Duration, attempt int) time.Duration { + switch attempt{ + case 1: + return time.Second + case 2: + return 5 * time.Second + case 3: + return 20 * time.Second + case 4: + return time.Minute + case 5: + return 5 * time.Minute + default: + return 2 * last + } + } + +``` + +Consider some package `foo` that has a `TryAgainError`, which looks something +like this: +```go + + + type TryAgainError struct { + After time.Duration + } + +``` +and we create something that looks like this: + +```go + + + type TryAgainHelper struct { + next time.Duration + } + + func (h *TryAgainHelper) notify(lastError error, attempt int) { + if tryAgain, ok := lastError.(*foo.TryAgainError); ok { + h.next = tryAgain.After + } else { + h.next = 0 + } + } + + func (h *TryAgainHelper) next(last time.Duration) time.Duration { + if h.next != 0 { + return h.next + } + return last + } + +``` + +Then we could do this: +```go + + + helper := TryAgainHelper{} + retry.Call(retry.CallArgs{ + Func: func() error { + return foo.SomeFunc() + }, + NotifyFunc: helper.notify, + BackoffFunc: helper.next, + Attempts: 20, + Delay: 100 * time.Millisecond, + Clock: clock.WallClock, + }) + +``` + + + + +## Constants +``` go +const ( + // UnlimitedAttempts can be used as a value for `Attempts` to clearly + // show to the reader that there is no limit to the number of attempts. + UnlimitedAttempts = -1 +) +``` + + +## func Call +``` go +func Call(args CallArgs) error +``` +Call will repeatedly execute the Func until either the function returns no +error, the retry count is exceeded or the stop channel is closed. + + +## func DoubleDelay +``` go +func DoubleDelay(delay time.Duration, attempt int) time.Duration +``` +DoubleDelay provides a simple function that doubles the duration passed in. +This can then be easily used as the `BackoffFunc` in the `CallArgs` +structure. + + +## func IsAttemptsExceeded +``` go +func IsAttemptsExceeded(err error) bool +``` +IsAttemptsExceeded returns true if the error is the result of the `Call` +function finishing due to hitting the requested number of `Attempts`. + + +## func IsDurationExceeded +``` go +func IsDurationExceeded(err error) bool +``` +IsDurationExceeded returns true if the error is the result of the `Call` +function finishing due to the total duration exceeding the specified +`MaxDuration` value. + + +## func IsRetryStopped +``` go +func IsRetryStopped(err error) bool +``` +IsRetryStopped returns true if the error is the result of the `Call` +function finishing due to the stop channel being closed. + + +## func LastError +``` go +func LastError(err error) error +``` +LastError retrieves the last error returned from `Func` before iteration +was terminated due to the attempt count being exceeded, the maximum +duration being exceeded, or the stop channel being closed. + + + +## type CallArgs +``` go +type CallArgs struct { + // Func is the function that will be retried if it returns an error result. + Func func() error + + // IsFatalError is a function that, if set, will be called for every non- + // nil error result from `Func`. If `IsFatalError` returns true, the error + // is immediately returned breaking out from any further retries. + IsFatalError func(error) bool + + // NotifyFunc is a function that is called if Func fails, and the attempt + // number. The first time this function is called attempt is 1, the second + // time, attempt is 2 and so on. + NotifyFunc func(lastError error, attempt int) + + // Attempts specifies the number of times Func should be retried before + // giving up and returning the `AttemptsExceeded` error. If a negative + // value is specified, the `Call` will retry forever. + Attempts int + + // Delay specifies how long to wait between retries. + Delay time.Duration + + // MaxDelay specifies how longest time to wait between retries. If no + // value is specified there is no maximum delay. + MaxDelay time.Duration + + // MaxDuration specifies the maximum time the `Call` function should spend + // iterating over `Func`. The duration is calculated from the start of the + // `Call` function. If the next delay time would take the total duration + // of the call over MaxDuration, then a DurationExceeded error is + // returned. If no value is specified, Call will continue until the number + // of attempts is complete. + MaxDuration time.Duration + + // BackoffFunc allows the caller to provide a function that alters the + // delay each time through the loop. If this function is not provided the + // delay is the same each iteration. Alternatively a function such as + // `retry.DoubleDelay` can be used that will provide an exponential + // backoff. The first time this function is called attempt is 1, the + // second time, attempt is 2 and so on. + BackoffFunc func(delay time.Duration, attempt int) time.Duration + + // Clock provides the mechanism for waiting. Normal program execution is + // expected to use something like clock.WallClock, and tests can override + // this to not actually sleep in tests. + Clock clock.Clock + + // Stop is a channel that can be used to indicate that the waiting should + // be interrupted. If Stop is nil, then the Call function cannot be interrupted. + // If the channel is closed prior to the Call function being executed, the + // Func is still attempted once. + Stop <-chan struct{} +} +``` +CallArgs is a simple structure used to define the behaviour of the Call +function. + + + + + + + + + + + +### func (\*CallArgs) Validate +``` go +func (args *CallArgs) Validate() error +``` +Validate the values are valid. The ensures that the Func, Delay, Attempts +and Clock have been specified. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) \ No newline at end of file === added file 'src/github.com/juju/retry/doc.go' --- src/github.com/juju/retry/doc.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/retry/doc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,97 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// The retry package encapsulates the mechanism around retrying commands. +// +// The simple use is to call retry.Call with a function closure. +// +// ```go +// err := retry.Call(retry.CallArgs{ +// Func: func() error { ... }, +// Attempts: 5, +// Delay: time.Minute, +// Clock: clock.WallClock, +// }) +// ``` +// +// The bare minimum arguments that need to be specified are: +// * Func - the function to call +// * Attempts - the number of times to try Func before giving up, or a negative number for unlimited attempts (`retry.UnlimitedAttempts`) +// * Delay - how long to wait between each try that returns an error +// * Clock - either the wall clock, or some testing clock +// +// Any error that is returned from the `Func` is considered transient. +// In order to identify some errors as fatal, pass in a function for the +// `IsFatalError` CallArgs value. +// +// In order to have the `Delay` change for each iteration, a `BackoffFunc` +// needs to be set on the CallArgs. A simple doubling delay function is +// provided by `DoubleDelay`. +// +// An example of a more complex `BackoffFunc` could be a stepped function such +// as: +// +// ```go +// func StepDelay(last time.Duration, attempt int) time.Duration { +// switch attempt{ +// case 1: +// return time.Second +// case 2: +// return 5 * time.Second +// case 3: +// return 20 * time.Second +// case 4: +// return time.Minute +// case 5: +// return 5 * time.Minute +// default: +// return 2 * last +// } +// } +// ``` +// +// Consider some package `foo` that has a `TryAgainError`, which looks something +// like this: +// ```go +// type TryAgainError struct { +// After time.Duration +// } +// ``` +// and we create something that looks like this: +// +// ```go +// type TryAgainHelper struct { +// next time.Duration +// } +// +// func (h *TryAgainHelper) notify(lastError error, attempt int) { +// if tryAgain, ok := lastError.(*foo.TryAgainError); ok { +// h.next = tryAgain.After +// } else { +// h.next = 0 +// } +// } +// +// func (h *TryAgainHelper) next(last time.Duration) time.Duration { +// if h.next != 0 { +// return h.next +// } +// return last +// } +// ``` +// +// Then we could do this: +// ```go +// helper := TryAgainHelper{} +// retry.Call(retry.CallArgs{ +// Func: func() error { +// return foo.SomeFunc() +// }, +// NotifyFunc: helper.notify, +// BackoffFunc: helper.next, +// Attempts: 20, +// Delay: 100 * time.Millisecond, +// Clock: clock.WallClock, +// }) +// ``` +package retry === added file 'src/github.com/juju/retry/package_test.go' --- src/github.com/juju/retry/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/retry/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package retry_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/retry/retry.go' --- src/github.com/juju/retry/retry.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/retry/retry.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,226 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package retry + +import ( + "fmt" + "time" + + "github.com/juju/errors" + "github.com/juju/utils/clock" +) + +const ( + // UnlimitedAttempts can be used as a value for `Attempts` to clearly + // show to the reader that there is no limit to the number of attempts. + UnlimitedAttempts = -1 +) + +// retryStopped is the error that is returned from the `Call` function +// when the stop channel has been closed. +type retryStopped struct { + lastError error +} + +// Error provides the implementation for the error interface method. +func (e *retryStopped) Error() string { + return fmt.Sprintf("retry stopped") +} + +// attemptsExceeded is the error that is returned when the retry count has +// been hit without the function returning a nil error result. The last error +// returned from the function being retried is available as the LastError +// attribute. +type attemptsExceeded struct { + lastError error +} + +// Error provides the implementation for the error interface method. +func (e *attemptsExceeded) Error() string { + return fmt.Sprintf("attempt count exceeded: %s", e.lastError) +} + +// durationExceeded is the error that is returned when the total time that the +// `Call` function would have executed exceeds the `MaxDuration` specified. +// The last error returned from the function being retried is available as the +// LastError attribute. +type durationExceeded struct { + lastError error +} + +// Error provides the implementation for the error interface method. +func (e *durationExceeded) Error() string { + return fmt.Sprintf("max duration exceeded: %s", e.lastError) +} + +// LastError retrieves the last error returned from `Func` before iteration +// was terminated due to the attempt count being exceeded, the maximum +// duration being exceeded, or the stop channel being closed. +func LastError(err error) error { + cause := errors.Cause(err) + switch err := cause.(type) { + case *attemptsExceeded: + return err.lastError + case *retryStopped: + return err.lastError + case *durationExceeded: + return err.lastError + } + return errors.Errorf("unexpected error type: %T, %s", cause, cause) +} + +// IsAttemptsExceeded returns true if the error is the result of the `Call` +// function finishing due to hitting the requested number of `Attempts`. +func IsAttemptsExceeded(err error) bool { + cause := errors.Cause(err) + _, ok := cause.(*attemptsExceeded) + return ok +} + +// IsDurationExceeded returns true if the error is the result of the `Call` +// function finishing due to the total duration exceeding the specified +// `MaxDuration` value. +func IsDurationExceeded(err error) bool { + cause := errors.Cause(err) + _, ok := cause.(*durationExceeded) + return ok +} + +// IsRetryStopped returns true if the error is the result of the `Call` +// function finishing due to the stop channel being closed. +func IsRetryStopped(err error) bool { + cause := errors.Cause(err) + _, ok := cause.(*retryStopped) + return ok +} + +// CallArgs is a simple structure used to define the behaviour of the Call +// function. +type CallArgs struct { + // Func is the function that will be retried if it returns an error result. + Func func() error + + // IsFatalError is a function that, if set, will be called for every non- + // nil error result from `Func`. If `IsFatalError` returns true, the error + // is immediately returned breaking out from any further retries. + IsFatalError func(error) bool + + // NotifyFunc is a function that is called if Func fails, and the attempt + // number. The first time this function is called attempt is 1, the second + // time, attempt is 2 and so on. + NotifyFunc func(lastError error, attempt int) + + // Attempts specifies the number of times Func should be retried before + // giving up and returning the `AttemptsExceeded` error. If a negative + // value is specified, the `Call` will retry forever. + Attempts int + + // Delay specifies how long to wait between retries. + Delay time.Duration + + // MaxDelay specifies how longest time to wait between retries. If no + // value is specified there is no maximum delay. + MaxDelay time.Duration + + // MaxDuration specifies the maximum time the `Call` function should spend + // iterating over `Func`. The duration is calculated from the start of the + // `Call` function. If the next delay time would take the total duration + // of the call over MaxDuration, then a DurationExceeded error is + // returned. If no value is specified, Call will continue until the number + // of attempts is complete. + MaxDuration time.Duration + + // BackoffFunc allows the caller to provide a function that alters the + // delay each time through the loop. If this function is not provided the + // delay is the same each iteration. Alternatively a function such as + // `retry.DoubleDelay` can be used that will provide an exponential + // backoff. The first time this function is called attempt is 1, the + // second time, attempt is 2 and so on. + BackoffFunc func(delay time.Duration, attempt int) time.Duration + + // Clock provides the mechanism for waiting. Normal program execution is + // expected to use something like clock.WallClock, and tests can override + // this to not actually sleep in tests. + Clock clock.Clock + + // Stop is a channel that can be used to indicate that the waiting should + // be interrupted. If Stop is nil, then the Call function cannot be interrupted. + // If the channel is closed prior to the Call function being executed, the + // Func is still attempted once. + Stop <-chan struct{} +} + +// Validate the values are valid. The ensures that the Func, Delay, Attempts +// and Clock have been specified. +func (args *CallArgs) Validate() error { + if args.Func == nil { + return errors.NotValidf("missing Func") + } + if args.Delay == 0 { + return errors.NotValidf("missing Delay") + } + if args.Clock == nil { + return errors.NotValidf("missing Clock") + } + // One of Attempts or MaxDuration need to be specified + if args.Attempts == 0 && args.MaxDuration == 0 { + return errors.NotValidf("missing Attempts or MaxDuration") + } + return nil +} + +// Call will repeatedly execute the Func until either the function returns no +// error, the retry count is exceeded or the stop channel is closed. +func Call(args CallArgs) error { + err := args.Validate() + if err != nil { + return errors.Trace(err) + } + start := args.Clock.Now() + for i := 1; args.Attempts <= 0 || i <= args.Attempts; i++ { + err = args.Func() + if err == nil { + return nil + } + if args.IsFatalError != nil && args.IsFatalError(err) { + return errors.Trace(err) + } + if args.NotifyFunc != nil { + args.NotifyFunc(err, i) + } + if i == args.Attempts && args.Attempts > 0 { + break // don't wait before returning the error + } + + if args.BackoffFunc != nil { + delay := args.BackoffFunc(args.Delay, i) + if delay > args.MaxDelay && args.MaxDelay > 0 { + delay = args.MaxDelay + } + args.Delay = delay + } + elapsedTime := args.Clock.Now().Sub(start) + if args.MaxDuration > 0 && (elapsedTime+args.Delay) > args.MaxDuration { + return errors.Wrap(err, &durationExceeded{err}) + } + + // Wait for the delay, and retry + select { + case <-args.Clock.After(args.Delay): + case <-args.Stop: + return errors.Wrap(err, &retryStopped{err}) + } + } + return errors.Wrap(err, &attemptsExceeded{err}) +} + +// DoubleDelay provides a simple function that doubles the duration passed in. +// This can then be easily used as the `BackoffFunc` in the `CallArgs` +// structure. +func DoubleDelay(delay time.Duration, attempt int) time.Duration { + if attempt == 1 { + return delay + } + return delay * 2 +} === added file 'src/github.com/juju/retry/retry_test.go' --- src/github.com/juju/retry/retry_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/retry/retry_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,314 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package retry_test + +import ( + "time" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/clock" + gc "gopkg.in/check.v1" + + "github.com/juju/retry" +) + +type retrySuite struct { + testing.LoggingSuite +} + +var _ = gc.Suite(&retrySuite{}) + +type mockClock struct { + now time.Time + delays []time.Duration +} + +func (mock *mockClock) Now() time.Time { + return mock.now +} + +func (mock *mockClock) After(wait time.Duration) <-chan time.Time { + mock.delays = append(mock.delays, wait) + mock.now = mock.now.Add(wait) + return time.After(time.Microsecond) +} + +func (*retrySuite) TestSuccessHasNoDelay(c *gc.C) { + clock := &mockClock{} + err := retry.Call(retry.CallArgs{ + Func: func() error { return nil }, + Attempts: 5, + Delay: time.Minute, + Clock: clock, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(clock.delays, gc.HasLen, 0) +} + +func (*retrySuite) TestCalledOnceEvenIfStopped(c *gc.C) { + stop := make(chan struct{}) + clock := &mockClock{} + called := false + close(stop) + err := retry.Call(retry.CallArgs{ + Func: func() error { + called = true + return nil + }, + Attempts: 5, + Delay: time.Minute, + Clock: clock, + Stop: stop, + }) + c.Assert(called, jc.IsTrue) + c.Assert(err, jc.ErrorIsNil) + c.Assert(clock.delays, gc.HasLen, 0) +} + +func (*retrySuite) TestAttempts(c *gc.C) { + clock := &mockClock{} + funcErr := errors.New("bah") + err := retry.Call(retry.CallArgs{ + Func: func() error { return funcErr }, + Attempts: 4, + Delay: time.Minute, + Clock: clock, + }) + c.Assert(err, jc.Satisfies, retry.IsAttemptsExceeded) + // We delay between attempts, and don't delay after the last one. + c.Assert(clock.delays, jc.DeepEquals, []time.Duration{ + time.Minute, + time.Minute, + time.Minute, + }) +} + +func (*retrySuite) TestAttemptsExceededError(c *gc.C) { + clock := &mockClock{} + funcErr := errors.New("bah") + err := retry.Call(retry.CallArgs{ + Func: func() error { return funcErr }, + Attempts: 5, + Delay: time.Minute, + Clock: clock, + }) + c.Assert(err, gc.ErrorMatches, `attempt count exceeded: bah`) + c.Assert(err, jc.Satisfies, retry.IsAttemptsExceeded) + c.Assert(retry.LastError(err), gc.Equals, funcErr) +} + +func (*retrySuite) TestFatalErrorsNotRetried(c *gc.C) { + clock := &mockClock{} + funcErr := errors.New("bah") + err := retry.Call(retry.CallArgs{ + Func: func() error { return funcErr }, + IsFatalError: func(error) bool { return true }, + Attempts: 5, + Delay: time.Minute, + Clock: clock, + }) + c.Assert(errors.Cause(err), gc.Equals, funcErr) + c.Assert(clock.delays, gc.HasLen, 0) +} + +func (*retrySuite) TestBackoffFactor(c *gc.C) { + clock := &mockClock{} + err := retry.Call(retry.CallArgs{ + Func: func() error { return errors.New("bah") }, + Clock: clock, + Attempts: 5, + Delay: time.Minute, + BackoffFunc: retry.DoubleDelay, + }) + c.Assert(err, jc.Satisfies, retry.IsAttemptsExceeded) + c.Assert(clock.delays, jc.DeepEquals, []time.Duration{ + time.Minute, + time.Minute * 2, + time.Minute * 4, + time.Minute * 8, + }) +} + +func (*retrySuite) TestStopChannel(c *gc.C) { + clock := &mockClock{} + stop := make(chan struct{}) + count := 0 + err := retry.Call(retry.CallArgs{ + Func: func() error { + if count == 2 { + close(stop) + } + count++ + return errors.New("bah") + }, + Attempts: 5, + Delay: time.Minute, + Clock: clock, + Stop: stop, + }) + c.Assert(err, jc.Satisfies, retry.IsRetryStopped) + c.Assert(clock.delays, gc.HasLen, 3) +} + +func (*retrySuite) TestNotifyFunc(c *gc.C) { + var ( + clock = &mockClock{} + funcErr = errors.New("bah") + attempts []int + funcErrors []error + ) + err := retry.Call(retry.CallArgs{ + Func: func() error { + return funcErr + }, + NotifyFunc: func(lastError error, attempt int) { + funcErrors = append(funcErrors, lastError) + attempts = append(attempts, attempt) + }, + Attempts: 3, + Delay: time.Minute, + Clock: clock, + }) + c.Assert(err, jc.Satisfies, retry.IsAttemptsExceeded) + c.Assert(clock.delays, gc.HasLen, 2) + c.Assert(funcErrors, jc.DeepEquals, []error{funcErr, funcErr, funcErr}) + c.Assert(attempts, jc.DeepEquals, []int{1, 2, 3}) +} + +func (*retrySuite) TestInfiniteRetries(c *gc.C) { + // OK, we can't test infinite, but we'll go for lots. + clock := &mockClock{} + stop := make(chan struct{}) + count := 0 + err := retry.Call(retry.CallArgs{ + Func: func() error { + if count == 111 { + close(stop) + } + count++ + return errors.New("bah") + }, + Attempts: retry.UnlimitedAttempts, + Delay: time.Minute, + Clock: clock, + Stop: stop, + }) + c.Assert(err, jc.Satisfies, retry.IsRetryStopped) + c.Assert(clock.delays, gc.HasLen, count) +} + +func (*retrySuite) TestMaxDuration(c *gc.C) { + clock := &mockClock{} + err := retry.Call(retry.CallArgs{ + Func: func() error { return errors.New("bah") }, + Delay: time.Minute, + MaxDuration: 5 * time.Minute, + Clock: clock, + }) + c.Assert(err, jc.Satisfies, retry.IsDurationExceeded) + c.Assert(clock.delays, jc.DeepEquals, []time.Duration{ + time.Minute, + time.Minute, + time.Minute, + time.Minute, + time.Minute, + }) +} + +func (*retrySuite) TestMaxDurationDoubling(c *gc.C) { + clock := &mockClock{} + err := retry.Call(retry.CallArgs{ + Func: func() error { return errors.New("bah") }, + Delay: time.Minute, + MaxDuration: 10 * time.Minute, + BackoffFunc: retry.DoubleDelay, + Clock: clock, + }) + c.Assert(err, jc.Satisfies, retry.IsDurationExceeded) + // Stops after seven minutes, because the next wait time + // would take it to 15 minutes. + c.Assert(clock.delays, jc.DeepEquals, []time.Duration{ + time.Minute, + 2 * time.Minute, + 4 * time.Minute, + }) +} + +func (*retrySuite) TestMaxDelay(c *gc.C) { + clock := &mockClock{} + err := retry.Call(retry.CallArgs{ + Func: func() error { return errors.New("bah") }, + Attempts: 7, + Delay: time.Minute, + MaxDelay: 10 * time.Minute, + BackoffFunc: retry.DoubleDelay, + Clock: clock, + }) + c.Assert(err, jc.Satisfies, retry.IsAttemptsExceeded) + c.Assert(clock.delays, jc.DeepEquals, []time.Duration{ + time.Minute, + 2 * time.Minute, + 4 * time.Minute, + 8 * time.Minute, + 10 * time.Minute, + 10 * time.Minute, + }) +} + +func (*retrySuite) TestWithWallClock(c *gc.C) { + var attempts []int + err := retry.Call(retry.CallArgs{ + Func: func() error { return errors.New("bah") }, + NotifyFunc: func(lastError error, attempt int) { + attempts = append(attempts, attempt) + }, + Attempts: 5, + Delay: time.Microsecond, + Clock: clock.WallClock, + }) + c.Assert(err, jc.Satisfies, retry.IsAttemptsExceeded) + c.Assert(attempts, jc.DeepEquals, []int{1, 2, 3, 4, 5}) +} + +func (*retrySuite) TestMissingFuncNotValid(c *gc.C) { + err := retry.Call(retry.CallArgs{ + Attempts: 5, + Delay: time.Minute, + Clock: clock.WallClock, + }) + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `missing Func not valid`) +} + +func (*retrySuite) TestMissingAttemptsNotValid(c *gc.C) { + err := retry.Call(retry.CallArgs{ + Func: func() error { return errors.New("bah") }, + Delay: time.Minute, + Clock: clock.WallClock, + }) + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `missing Attempts or MaxDuration not valid`) +} + +func (*retrySuite) TestMissingDelayNotValid(c *gc.C) { + err := retry.Call(retry.CallArgs{ + Func: func() error { return errors.New("bah") }, + Attempts: 5, + Clock: clock.WallClock, + }) + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `missing Delay not valid`) +} + +func (*retrySuite) TestMissingClockNotValid(c *gc.C) { + err := retry.Call(retry.CallArgs{ + Func: func() error { return errors.New("bah") }, + Attempts: 5, + Delay: time.Minute, + }) + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `missing Clock not valid`) +} === added directory 'src/github.com/juju/romulus' === added file 'src/github.com/juju/romulus/LICENCE' --- src/github.com/juju/romulus/LICENCE 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/LICENCE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,667 @@ +All files in this repository are licensed as follows. If you contribute +to this repository, it is assumed that you license your contribution +under the same license unless you state otherwise. + +All files Copyright (C) 2016 Canonical Ltd. unless otherwise specified in the file. + + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. === added file 'src/github.com/juju/romulus/Makefile' --- src/github.com/juju/romulus/Makefile 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/Makefile 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ + +PROJECT := github.com/juju/romulus + +check-licence: + @(fgrep -rl "Licensed under the AGPL" .;\ + fgrep -rl "MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT" .;\ + find . -name "*.go") | sed -e 's,\./,,' | sort | uniq -u | \ + xargs -I {} echo FAIL: licence missed: {} + +check: check-licence + go test $(PROJECT)/... === added file 'src/github.com/juju/romulus/README.md' --- src/github.com/juju/romulus/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +# romulus === added directory 'src/github.com/juju/romulus/api' === added directory 'src/github.com/juju/romulus/api/budget' === added file 'src/github.com/juju/romulus/api/budget/api.go' --- src/github.com/juju/romulus/api/budget/api.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/api/budget/api.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,220 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package budget contains the budget service API client. +package budget + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/juju/errors" + + wireformat "github.com/juju/romulus/wireformat/budget" +) + +type httpClient interface { + DoWithBody(*http.Request, io.ReadSeeker) (*http.Response, error) +} + +// NewClient returns a new budget API client using the provided http client. +func NewClient(c httpClient) *client { + return &client{ + h: c, + } +} + +type client struct { + h httpClient +} + +// CreateBudget creates a new budget with the specified name and limit. +// The call returns the service's response message and an error if one occurred. +func (c *client) CreateBudget(name string, limit string) (string, error) { + create := wireformat.CreateBudgetRequest{ + Budget: name, + Limit: limit, + } + var response string + err := c.doRequest(create, &response) + return response, err +} + +// ListBudgets lists the budgets belonging to the current user. +func (c *client) ListBudgets() (*wireformat.ListBudgetsResponse, error) { + list := wireformat.ListBudgetsRequest{} + var response wireformat.ListBudgetsResponse + err := c.doRequest(list, &response) + if err != nil { + return nil, err + } + return &response, nil +} + +// SetBudget updates the budget limit. +func (c *client) SetBudget(budget, limit string) (string, error) { + set := wireformat.SetBudgetRequest{ + Budget: budget, + Limit: limit, + } + var response string + err := c.doRequest(set, &response) + return response, err +} + +// GetBudget returns the information of a particular budget. +func (c *client) GetBudget(budget string) (*wireformat.BudgetWithAllocations, error) { + get := wireformat.GetBudgetRequest{ + Budget: budget, + } + var response wireformat.BudgetWithAllocations + err := c.doRequest(get, &response) + if err != nil { + return nil, err + } + return &response, nil +} + +// CreateAllocation creates a new allocation in a specific budget. +func (c *client) CreateAllocation(budget, limit string, model string, services []string) (string, error) { + create := wireformat.CreateAllocationRequest{ + Budget: budget, + Limit: limit, + Model: model, + Services: services, + } + var response string + err := c.doRequest(create, &response) + return response, err +} + +// UpdateAllocation updates the allocation associated with the specified service with new limit. +func (c *client) UpdateAllocation(model, service, limit string) (string, error) { + create := wireformat.UpdateAllocationRequest{ + Limit: limit, + Model: model, + Service: service, + } + var response string + err := c.doRequest(create, &response) + return response, err +} + +// DeleteAllocation deletes the allocation associated with the specified service. +func (c *client) DeleteAllocation(model, service string) (string, error) { + create := wireformat.DeleteAllocationRequest{ + Model: model, + Service: service, + } + var response string + err := c.doRequest(create, &response) + return response, err +} + +// hasURL is an interface implemented by request structures that +// modify the request URL. +type hasURL interface { + // URL takes the base URL as a parameter and returns + // the modified request URL. + URL() string +} + +// hasBody is an interface implemented by requests that send +// data in the request body +type hasBody interface { + // Body returns the request body value. + Body() interface{} +} + +// hasMethod is an interface implemented by requests to +// specify the request method. +type hasMethod interface { + // Method returns the request method. + Method() string +} + +// doRequest executes a generic request, retrieving relevant information +// from the req interface. If result is not nil, the response will be +// decoded to it. +func (c *client) doRequest(req interface{}, result interface{}) error { + reqURL := "" + if urlP, ok := req.(hasURL); ok { + reqURL = urlP.URL() + } else { + return errors.Errorf("unknown request URL") + } + + u, err := url.Parse(reqURL) + if err != nil { + return errors.Trace(err) + } + + method := "GET" + if methodP, ok := req.(hasMethod); ok { + method = methodP.Method() + } + + var resp *http.Response + if bodyP, ok := req.(hasBody); ok { + reqBody := bodyP.Body() + + payload := &bytes.Buffer{} + err = json.NewEncoder(payload).Encode(reqBody) + if err != nil { + return errors.Annotate(err, "failed to encode request") + } + req, err := http.NewRequest(method, u.String(), nil) + if err != nil { + return errors.Annotate(err, "failed to create request") + } + resp, err = c.h.DoWithBody(req, bytes.NewReader(payload.Bytes())) + if err != nil { + if strings.HasSuffix(err.Error(), "Connection refused") { + return wireformat.NotAvailError{} + } + return errors.Annotate(err, "failed to execute request") + } + defer discardClose(resp) + } else { + req, err := http.NewRequest(method, u.String(), nil) + if err != nil { + return errors.Annotate(err, "failed to create request") + } + resp, err = c.h.DoWithBody(req, nil) + if err != nil { + return errors.Annotate(err, "failed to execute request") + } + defer discardClose(resp) + } + if resp.StatusCode == http.StatusServiceUnavailable { + return wireformat.NotAvailError{Resp: resp.StatusCode} + } else if resp.StatusCode != http.StatusOK { + response := "http request failed" + json.NewDecoder(resp.Body).Decode(&response) + return wireformat.HttpError{ + StatusCode: resp.StatusCode, + Message: response, + } + + } + if result != nil { + err = json.NewDecoder(resp.Body).Decode(result) + if err != nil { + return errors.Annotate(err, "failed to decode response") + } + } + return nil +} + +func discardClose(response *http.Response) { + if response == nil || response.Body == nil { + return + } + io.Copy(ioutil.Discard, response.Body) + response.Body.Close() +} === added file 'src/github.com/juju/romulus/api/budget/api_test.go' --- src/github.com/juju/romulus/api/budget/api_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/api/budget/api_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,618 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package budget_test + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "testing" + + "github.com/juju/errors" + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/romulus/api/budget" + wireformat "github.com/juju/romulus/wireformat/budget" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} + +type TSuite struct{} + +var _ = gc.Suite(&TSuite{}) + +func (t *TSuite) TestCreateBudget(c *gc.C) { + expected := "Budget created successfully" + respBody, err := json.Marshal(expected) + c.Assert(err, jc.ErrorIsNil) + httpClient := &mockClient{ + RespCode: http.StatusOK, + RespBody: respBody, + } + client := budget.NewClient(httpClient) + response, err := client.CreateBudget("personal", "200") + c.Assert(err, jc.ErrorIsNil) + c.Assert(response, gc.Equals, expected) + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"POST", + "https://api.jujucharms.com/omnibus/v2/budget", + map[string]interface{}{ + "limit": "200", + "budget": "personal", + }, + }}}) +} + +func (t *TSuite) TestCreateBudgetServerError(c *gc.C) { + respBody, err := json.Marshal("budget already exists") + c.Assert(err, jc.ErrorIsNil) + httpClient := &mockClient{ + RespCode: http.StatusBadRequest, + RespBody: respBody, + } + client := budget.NewClient(httpClient) + response, err := client.CreateBudget("personal", "200") + c.Assert(err, gc.ErrorMatches, "400: budget already exists") + c.Assert(response, gc.Equals, "") + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"POST", + "https://api.jujucharms.com/omnibus/v2/budget", + map[string]interface{}{ + "limit": "200", + "budget": "personal", + }, + }}}) +} + +func (t *TSuite) TestCreateBudgetRequestError(c *gc.C) { + httpClient := &mockClient{ + RespCode: http.StatusBadRequest, + } + httpClient.SetErrors(errors.New("bogus error")) + client := budget.NewClient(httpClient) + response, err := client.CreateBudget("personal", "200") + c.Assert(err, gc.ErrorMatches, ".*bogus error") + c.Assert(response, gc.Equals, "") + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"POST", + "https://api.jujucharms.com/omnibus/v2/budget", + map[string]interface{}{ + "limit": "200", + "budget": "personal", + }, + }}}) +} + +func (t *TSuite) TestCreateBudgetUnavail(c *gc.C) { + httpClient := &mockClient{ + RespCode: http.StatusServiceUnavailable, + } + client := budget.NewClient(httpClient) + response, err := client.CreateBudget("personal", "200") + c.Assert(wireformat.IsNotAvail(err), jc.IsTrue) + c.Assert(response, gc.Equals, "") + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"POST", + "https://api.jujucharms.com/omnibus/v2/budget", + map[string]interface{}{ + "limit": "200", + "budget": "personal", + }, + }}}) +} + +func (t *TSuite) TestCreateBudgetConnRefused(c *gc.C) { + httpClient := &mockClient{ + RespCode: http.StatusOK, + } + httpClient.SetErrors(errors.New("Connection refused")) + client := budget.NewClient(httpClient) + response, err := client.CreateBudget("personal", "200") + c.Assert(wireformat.IsNotAvail(err), jc.IsTrue) + c.Assert(response, gc.Equals, "") + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"POST", + "https://api.jujucharms.com/omnibus/v2/budget", + map[string]interface{}{ + "limit": "200", + "budget": "personal", + }, + }}}) +} + +func (t *TSuite) TestListBudgets(c *gc.C) { + expected := &wireformat.ListBudgetsResponse{ + Budgets: wireformat.BudgetSummaries{ + wireformat.BudgetSummary{ + Owner: "bob", + Budget: "personal", + Limit: "50", + Allocated: "30", + Unallocated: "20", + Available: "45", + Consumed: "5", + }, + wireformat.BudgetSummary{ + Owner: "bob", + Budget: "work", + Limit: "200", + Allocated: "100", + Unallocated: "100", + Available: "150", + Consumed: "50", + }, + wireformat.BudgetSummary{ + Owner: "bob", + Budget: "team", + Limit: "50", + Allocated: "10", + Unallocated: "40", + Available: "40", + Consumed: "10", + }, + }, + Total: wireformat.BudgetTotals{ + Limit: "300", + Allocated: "140", + Available: "235", + Unallocated: "160", + Consumed: "65", + }, + Credit: "400", + } + respBody, err := json.Marshal(expected) + c.Assert(err, jc.ErrorIsNil) + httpClient := &mockClient{ + RespCode: http.StatusOK, + RespBody: respBody, + } + client := budget.NewClient(httpClient) + response, err := client.ListBudgets() + c.Assert(err, jc.ErrorIsNil) + c.Assert(response, gc.DeepEquals, expected) + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"GET", + "https://api.jujucharms.com/omnibus/v2/budget", + map[string]interface{}{}, + }}}) +} + +func (t *TSuite) TestListBudgetsServerError(c *gc.C) { + respBody, err := json.Marshal("budget already exists") + c.Assert(err, jc.ErrorIsNil) + httpClient := &mockClient{ + RespCode: http.StatusBadRequest, + RespBody: respBody, + } + client := budget.NewClient(httpClient) + response, err := client.ListBudgets() + c.Assert(err, gc.ErrorMatches, "400: budget already exists") + c.Assert(response, gc.IsNil) + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"GET", + "https://api.jujucharms.com/omnibus/v2/budget", + map[string]interface{}{}, + }}}) +} + +func (t *TSuite) TestListBudgetsRequestError(c *gc.C) { + httpClient := &mockClient{ + RespCode: http.StatusBadRequest, + } + httpClient.SetErrors(errors.New("bogus error")) + client := budget.NewClient(httpClient) + response, err := client.ListBudgets() + c.Assert(err, gc.ErrorMatches, ".*bogus error") + c.Assert(response, gc.IsNil) + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"GET", + "https://api.jujucharms.com/omnibus/v2/budget", + map[string]interface{}{}, + }}}) +} + +func (t *TSuite) TestSetBudget(c *gc.C) { + expected := "Budget updated successfully" + respBody, err := json.Marshal(expected) + c.Assert(err, jc.ErrorIsNil) + httpClient := &mockClient{ + RespCode: http.StatusOK, + RespBody: respBody, + } + client := budget.NewClient(httpClient) + response, err := client.SetBudget("personal", "200") + c.Assert(err, jc.ErrorIsNil) + c.Assert(response, gc.Equals, expected) + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"PUT", + "https://api.jujucharms.com/omnibus/v2/budget/personal", + map[string]interface{}{ + "limit": "200", + }, + }}}) +} + +func (t *TSuite) TestSetBudgetServerError(c *gc.C) { + respBody, err := json.Marshal("cannot update budget") + c.Assert(err, jc.ErrorIsNil) + httpClient := &mockClient{ + RespCode: http.StatusBadRequest, + RespBody: respBody, + } + client := budget.NewClient(httpClient) + response, err := client.SetBudget("personal", "200") + c.Assert(err, gc.ErrorMatches, "400: cannot update budget") + c.Assert(response, gc.Equals, "") + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"PUT", + "https://api.jujucharms.com/omnibus/v2/budget/personal", + map[string]interface{}{ + "limit": "200", + }, + }}}) +} + +func (t *TSuite) TestSetBudgetRequestError(c *gc.C) { + httpClient := &mockClient{ + RespCode: http.StatusBadRequest, + } + httpClient.SetErrors(errors.New("bogus error")) + client := budget.NewClient(httpClient) + response, err := client.SetBudget("personal", "200") + c.Assert(err, gc.ErrorMatches, ".*bogus error") + c.Assert(response, gc.Equals, "") + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"PUT", + "https://api.jujucharms.com/omnibus/v2/budget/personal", + map[string]interface{}{ + "limit": "200", + }, + }}}) +} + +func (t *TSuite) TestGetBudget(c *gc.C) { + expected := &wireformat.BudgetWithAllocations{ + Limit: "4000.00", + Total: wireformat.BudgetTotals{ + Allocated: "2200.00", + Unallocated: "1800.00", + Available: "1100,00", + Consumed: "1100.0", + Usage: "50%", + }, + Allocations: []wireformat.Allocation{{ + Owner: "user.joe", + Limit: "1200.00", + Consumed: "500.00", + Usage: "42%", + Model: "model.joe", + Services: map[string]wireformat.ServiceAllocation{ + "wordpress": wireformat.ServiceAllocation{ + Consumed: "300.00", + }, + "mysql": wireformat.ServiceAllocation{ + Consumed: "200.00", + }, + }, + }, { + Owner: "user.jess", + Limit: "1000.00", + Consumed: "600.00", + Usage: "60%", + Model: "model.jess", + Services: map[string]wireformat.ServiceAllocation{ + "landscape": wireformat.ServiceAllocation{ + Consumed: "600.00", + }, + }, + }, + }, + } + respBody, err := json.Marshal(expected) + c.Assert(err, jc.ErrorIsNil) + httpClient := &mockClient{ + RespCode: http.StatusOK, + RespBody: respBody, + } + client := budget.NewClient(httpClient) + response, err := client.GetBudget("personal") + c.Assert(err, jc.ErrorIsNil) + c.Assert(response, gc.DeepEquals, expected) + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"GET", + "https://api.jujucharms.com/omnibus/v2/budget/personal", + map[string]interface{}{}, + }}}) +} + +func (t *TSuite) TestGetBudgetServerError(c *gc.C) { + respBody, err := json.Marshal("budget not found") + c.Assert(err, jc.ErrorIsNil) + httpClient := &mockClient{ + RespCode: http.StatusNotFound, + RespBody: respBody, + } + client := budget.NewClient(httpClient) + response, err := client.GetBudget("personal") + c.Assert(err, gc.ErrorMatches, "404: budget not found") + c.Assert(response, gc.IsNil) + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"GET", + "https://api.jujucharms.com/omnibus/v2/budget/personal", + map[string]interface{}{}, + }}}) +} + +func (t *TSuite) TestGetBudgetRequestError(c *gc.C) { + httpClient := &mockClient{ + RespCode: http.StatusBadRequest, + } + httpClient.SetErrors(errors.New("bogus error")) + client := budget.NewClient(httpClient) + response, err := client.GetBudget("personal") + c.Assert(err, gc.ErrorMatches, ".*bogus error") + c.Assert(response, gc.IsNil) + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"GET", + "https://api.jujucharms.com/omnibus/v2/budget/personal", + map[string]interface{}{}, + }}}) +} + +func (t *TSuite) TestCreateAllocation(c *gc.C) { + expected := "Allocation created successfully" + respBody, err := json.Marshal(expected) + c.Assert(err, jc.ErrorIsNil) + httpClient := &mockClient{ + RespCode: http.StatusOK, + RespBody: respBody, + } + client := budget.NewClient(httpClient) + response, err := client.CreateAllocation("personal", "200", "model", []string{"db"}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(response, gc.Equals, expected) + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"POST", + "https://api.jujucharms.com/omnibus/v2/budget/personal/allocation", + map[string]interface{}{ + "limit": "200", + "model": "model", + "services": []interface{}{"db"}, + }, + }}}) +} + +func (t *TSuite) TestCreateAllocationServerError(c *gc.C) { + respBody, err := json.Marshal("cannot create allocation") + c.Assert(err, jc.ErrorIsNil) + httpClient := &mockClient{ + RespCode: http.StatusBadRequest, + RespBody: respBody, + } + client := budget.NewClient(httpClient) + response, err := client.CreateAllocation("personal", "200", "model", []string{"db"}) + c.Assert(err, gc.ErrorMatches, "400: cannot create allocation") + c.Assert(response, gc.Equals, "") + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"POST", + "https://api.jujucharms.com/omnibus/v2/budget/personal/allocation", + map[string]interface{}{ + "limit": "200", + "model": "model", + "services": []interface{}{"db"}, + }, + }}}) +} + +func (t *TSuite) TestCreateAllocationRequestError(c *gc.C) { + httpClient := &mockClient{ + RespCode: http.StatusBadRequest, + } + httpClient.SetErrors(errors.New("bogus error")) + client := budget.NewClient(httpClient) + response, err := client.CreateAllocation("personal", "200", "model", []string{"db"}) + c.Assert(err, gc.ErrorMatches, ".*bogus error") + c.Assert(response, gc.Equals, "") + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"POST", + "https://api.jujucharms.com/omnibus/v2/budget/personal/allocation", + map[string]interface{}{ + "limit": "200", + "model": "model", + "services": []interface{}{"db"}, + }, + }}}) +} + +func (t *TSuite) TestUpdateAllocation(c *gc.C) { + expected := "Allocation updated." + respBody, err := json.Marshal(expected) + c.Assert(err, jc.ErrorIsNil) + httpClient := &mockClient{ + RespCode: http.StatusOK, + RespBody: respBody, + } + client := budget.NewClient(httpClient) + response, err := client.UpdateAllocation("model", "db", "200") + c.Assert(err, jc.ErrorIsNil) + c.Assert(response, gc.Equals, expected) + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"PUT", + "https://api.jujucharms.com/omnibus/v2/environment/model/service/db/allocation", + map[string]interface{}{ + "limit": "200", + }, + }}}) +} + +func (t *TSuite) TestUpdateAllocationServerError(c *gc.C) { + respBody, err := json.Marshal("cannot update allocation") + c.Assert(err, jc.ErrorIsNil) + httpClient := &mockClient{ + RespCode: http.StatusBadRequest, + RespBody: respBody, + } + client := budget.NewClient(httpClient) + response, err := client.UpdateAllocation("model", "db", "200") + c.Assert(err, gc.ErrorMatches, "400: cannot update allocation") + c.Assert(response, gc.Equals, "") + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"PUT", + "https://api.jujucharms.com/omnibus/v2/environment/model/service/db/allocation", + map[string]interface{}{ + "limit": "200", + }, + }}}) +} + +func (t *TSuite) TestUpdateAllocationRequestError(c *gc.C) { + httpClient := &mockClient{ + RespCode: http.StatusBadRequest, + } + httpClient.SetErrors(errors.New("bogus error")) + client := budget.NewClient(httpClient) + response, err := client.UpdateAllocation("model", "db", "200") + c.Assert(err, gc.ErrorMatches, ".*bogus error") + c.Assert(response, gc.Equals, "") + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"PUT", + "https://api.jujucharms.com/omnibus/v2/environment/model/service/db/allocation", + map[string]interface{}{ + "limit": "200", + }, + }}}) +} + +func (t *TSuite) TestDeleteAllocation(c *gc.C) { + expected := "Allocation deleted." + respBody, err := json.Marshal(expected) + c.Assert(err, jc.ErrorIsNil) + httpClient := &mockClient{ + RespCode: http.StatusOK, + RespBody: respBody, + } + client := budget.NewClient(httpClient) + response, err := client.DeleteAllocation("model", "db") + c.Assert(err, jc.ErrorIsNil) + c.Assert(response, gc.Equals, expected) + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"DELETE", + "https://api.jujucharms.com/omnibus/v2/environment/model/service/db/allocation", + map[string]interface{}{}, + }}}) +} + +func (t *TSuite) TestDeleteAllocationServerError(c *gc.C) { + respBody, err := json.Marshal("cannot delete allocation") + c.Assert(err, jc.ErrorIsNil) + httpClient := &mockClient{ + RespCode: http.StatusBadRequest, + RespBody: respBody, + } + client := budget.NewClient(httpClient) + response, err := client.DeleteAllocation("model", "db") + c.Assert(err, gc.ErrorMatches, "400: cannot delete allocation") + c.Assert(response, gc.Equals, "") + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"DELETE", + "https://api.jujucharms.com/omnibus/v2/environment/model/service/db/allocation", + map[string]interface{}{}, + }}}) +} + +func (t *TSuite) TestDeleteAllocationRequestError(c *gc.C) { + httpClient := &mockClient{ + RespCode: http.StatusBadRequest, + } + httpClient.SetErrors(errors.New("bogus error")) + client := budget.NewClient(httpClient) + response, err := client.DeleteAllocation("model", "db") + c.Assert(err, gc.ErrorMatches, ".*bogus error") + c.Assert(response, gc.Equals, "") + httpClient.CheckCalls(c, + []jujutesting.StubCall{{ + "DoWithBody", + []interface{}{"DELETE", + "https://api.jujucharms.com/omnibus/v2/environment/model/service/db/allocation", + map[string]interface{}{}, + }}}) +} + +type mockClient struct { + jujutesting.Stub + + RespCode int + RespBody []byte +} + +func (c *mockClient) DoWithBody(req *http.Request, body io.ReadSeeker) (*http.Response, error) { + requestData := map[string]interface{}{} + if body != nil { + bodyBytes, err := ioutil.ReadAll(body) + if err != nil { + panic(err) + } + err = json.Unmarshal(bodyBytes, &requestData) + if err != nil { + panic(err) + } + } + c.Stub.MethodCall(c, "DoWithBody", req.Method, req.URL.String(), requestData) + + resp := &http.Response{ + StatusCode: c.RespCode, + Body: ioutil.NopCloser(bytes.NewReader(c.RespBody)), + } + return resp, c.Stub.NextErr() +} === added directory 'src/github.com/juju/romulus/api/plan' === added file 'src/github.com/juju/romulus/api/plan/api.go' --- src/github.com/juju/romulus/api/plan/api.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/api/plan/api.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,191 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package plan contains the plan service API client. +package plan + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + + "github.com/juju/errors" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon.v1" + + wireformat "github.com/juju/romulus/wireformat/plan" +) + +var baseURL = "https://api.jujucharms.com/omnibus/v2" + +// Client defines the interface available to clients of the plan api. +type Client interface { + // GetAssociatedPlans returns the plans associated with the charm. + GetAssociatedPlans(charmURL string) ([]wireformat.Plan, error) +} + +// AuthorizationClient defines the interface available to clients of the public plan api. +type AuthorizationClient interface { + // Authorize returns the authorization macaroon for the specified environment, charm url and service name. + Authorize(environmentUUID, charmURL, serviceName, plan string, visitWebPage func(*url.URL) error) (*macaroon.Macaroon, error) +} + +var _ Client = (*client)(nil) +var _ AuthorizationClient = (*client)(nil) + +type httpClient interface { + // Do sends the given HTTP request and returns its response. + Do(*http.Request) (*http.Response, error) + // DoWithBody is like Do except that the given body is used + // for the body of the HTTP request, and reset to its start + // by seeking if the request is retried. It is an error if + // req.Body is non-zero. + DoWithBody(req *http.Request, body io.ReadSeeker) (*http.Response, error) +} + +// client is the implementation of the Client interface. +type client struct { + client httpClient + baseURL string +} + +// ClientOption defines a function which configures a Client. +type ClientOption func(h *client) error + +// HTTPClient returns a function that sets the http client used by the API +// (e.g. if we want to use TLS). +func HTTPClient(c httpClient) func(h *client) error { + return func(h *client) error { + h.client = c + return nil + } +} + +// BaseURL sets the base url for the api client. +func BaseURL(url string) func(h *client) error { + return func(h *client) error { + h.baseURL = url + return nil + } +} + +// NewAuthorizationClient returns a new public authorization client. +func NewAuthorizationClient(options ...ClientOption) (AuthorizationClient, error) { + return NewClient(options...) +} + +// NewClient returns a new client for plan management. +func NewClient(options ...ClientOption) (*client, error) { + c := &client{ + client: httpbakery.NewClient(), + baseURL: baseURL, + } + + for _, option := range options { + err := option(c) + if err != nil { + return nil, errors.Trace(err) + } + } + + return c, nil +} + +// GetAssociatedPlans returns the default plan for the specified charm. +func (c *client) GetAssociatedPlans(charmURL string) ([]wireformat.Plan, error) { + u, err := url.Parse(c.baseURL + "/charm") + if err != nil { + return nil, errors.Trace(err) + } + query := u.Query() + query.Set("charm-url", charmURL) + u.RawQuery = query.Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, errors.Annotate(err, "failed to create GET request") + } + response, err := c.client.Do(req) + if err != nil { + return nil, errors.Annotate(err, "failed to retrieve associated plans") + } + defer discardClose(response) + + if response.StatusCode != http.StatusOK { + body, err := ioutil.ReadAll(response.Body) + if err == nil { + return nil, errors.Errorf("failed to retrieve associated plans: received http response: %v - code %q", string(body), http.StatusText(response.StatusCode)) + } + return nil, errors.Errorf("failed to retrieve associated plans: received http response: %q", http.StatusText(response.StatusCode)) + } + var plans []wireformat.Plan + dec := json.NewDecoder(response.Body) + err = dec.Decode(&plans) + if err != nil { + return nil, errors.Annotatef(err, "failed to unmarshal response") + } + return plans, nil +} + +// Authorize implements the AuthorizationClient.Authorize method. +func (c *client) Authorize(environmentUUID, charmURL, serviceName, planURL string, visitWebPage func(*url.URL) error) (*macaroon.Macaroon, error) { + u, err := url.Parse(c.baseURL + "/plan/authorize") + if err != nil { + return nil, errors.Trace(err) + } + + auth := wireformat.AuthorizationRequest{ + EnvironmentUUID: environmentUUID, + CharmURL: charmURL, + ServiceName: serviceName, + PlanURL: planURL, + } + + buff := &bytes.Buffer{} + encoder := json.NewEncoder(buff) + err = encoder.Encode(auth) + if err != nil { + return nil, errors.Trace(err) + } + + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return nil, errors.Trace(err) + } + req.Header.Set("Content-Type", "application/json") + + response, err := c.client.DoWithBody(req, bytes.NewReader(buff.Bytes())) + if err != nil { + return nil, errors.Trace(err) + } + defer discardClose(response) + + if response.StatusCode != http.StatusOK { + body, err := ioutil.ReadAll(response.Body) + if err == nil { + return nil, errors.Errorf("failed to authorize plan: received http response: %v - code %q", string(body), http.StatusText(response.StatusCode)) + } + return nil, errors.Errorf("failed to authorize plan: http response is %q", http.StatusText(response.StatusCode)) + } + + var m *macaroon.Macaroon + decoder := json.NewDecoder(response.Body) + err = decoder.Decode(&m) + if err != nil { + return nil, errors.Annotatef(err, "failed to unmarshal the response") + } + + return m, nil +} + +// discardClose reads any remaining data from the response body and closes it. +func discardClose(response *http.Response) { + if response == nil || response.Body == nil { + return + } + io.Copy(ioutil.Discard, response.Body) + response.Body.Close() +} === added file 'src/github.com/juju/romulus/api/plan/api_test.go' --- src/github.com/juju/romulus/api/plan/api_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/api/plan/api_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,169 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package plan_test + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + "gopkg.in/macaroon.v1" + + api "github.com/juju/romulus/api/plan" + wireformat "github.com/juju/romulus/wireformat/plan" +) + +const ( + testPlan = ` +metrics: + pings: + unit: + transform: max + period: hour + gaps: zero +` +) + +type clientSuite struct { + httpClient *mockHttpClient + + client api.Client +} + +var _ = gc.Suite(&clientSuite{}) + +func (s *clientSuite) SetUpTest(c *gc.C) { + s.httpClient = &mockHttpClient{} + + client, err := api.NewClient(api.HTTPClient(s.httpClient)) + c.Assert(err, jc.ErrorIsNil) + s.client = client + +} + +func (s *clientSuite) TestBaseURL(c *gc.C) { + client, err := api.NewClient(api.HTTPClient(s.httpClient), api.BaseURL("https://example.com")) + c.Assert(err, jc.ErrorIsNil) + + s.httpClient.status = http.StatusNotFound + _, err = client.GetAssociatedPlans("bob/uptime") + c.Assert(err, gc.ErrorMatches, `failed to retrieve associated plans: received http response: - code "Not Found"`) + s.httpClient.CheckCall(c, 0, "Do", "https://example.com/charm?charm-url=bob%2Fuptime") + s.httpClient.ResetCalls() + + m, err := macaroon.New(nil, "", "") + c.Assert(err, jc.ErrorIsNil) + data, err := json.Marshal(m) + c.Assert(err, jc.ErrorIsNil) + + s.httpClient.status = http.StatusOK + s.httpClient.body = data + _, err = client.Authorize(utils.MustNewUUID().String(), "cs:trusty/test-charm-0", "test-charm", utils.MustNewUUID().String(), nil) + c.Assert(err, jc.ErrorIsNil) + s.httpClient.CheckCall(c, 0, "DoWithBody", "https://example.com/plan/authorize") +} + +func (s *clientSuite) TestGet(c *gc.C) { + plans := []wireformat.Plan{{URL: "bob/uptime", Definition: testPlan}} + jsonPlans, err := json.Marshal(plans) + c.Assert(err, jc.ErrorIsNil) + + tests := []struct { + about string + planURL string + err string + status int + body []byte + }{{ + about: "not found", + planURL: "bob/uptime", + status: http.StatusNotFound, + err: `failed to retrieve associated plans: received http response: - code "Not Found"`, + }, { + about: "internal server error", + planURL: "bob/uptime", + status: http.StatusInternalServerError, + err: `failed to retrieve associated plans: received http response: - code "Internal Server Error"`, + }, { + about: "wrong response format", + planURL: "bob/uptime", + status: http.StatusOK, + body: []byte("wrong response format"), + err: `failed to unmarshal response: invalid character 'w' looking for beginning of value`, + }, { + about: "all is well", + planURL: "bob/uptime", + status: http.StatusOK, + body: jsonPlans, + }} + + for _, t := range tests { + s.httpClient.status = t.status + s.httpClient.body = t.body + plans, err := s.client.GetAssociatedPlans(t.planURL) + if t.err == "" { + c.Assert(err, jc.ErrorIsNil) + c.Assert(plans, jc.DeepEquals, plans) + } else { + c.Assert(err, gc.ErrorMatches, t.err) + } + } +} + +func (s *clientSuite) TestAuthorize(c *gc.C) { + envUUID := utils.MustNewUUID() + charmURL := "cs:trusty/test-charm-0" + service := "test-charm" + plan := utils.MustNewUUID() + + m, err := macaroon.New(nil, "", "") + c.Assert(err, jc.ErrorIsNil) + data, err := json.Marshal(m) + c.Assert(err, jc.ErrorIsNil) + + httpClient := &mockHttpClient{} + httpClient.status = http.StatusOK + httpClient.body = data + authClient, err := api.NewAuthorizationClient(api.HTTPClient(httpClient)) + c.Assert(err, jc.ErrorIsNil) + _, err = authClient.Authorize(envUUID.String(), charmURL, service, plan.String(), nil) + c.Assert(err, jc.ErrorIsNil) +} + +type mockHttpClient struct { + testing.Stub + + status int + body []byte +} + +func (m *mockHttpClient) Do(req *http.Request) (*http.Response, error) { + m.AddCall("Do", req.URL.String()) + return &http.Response{ + Status: http.StatusText(m.status), + StatusCode: m.status, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 1, + Body: ioutil.NopCloser(bytes.NewReader(m.body)), + }, nil +} + +func (m *mockHttpClient) DoWithBody(req *http.Request, body io.ReadSeeker) (*http.Response, error) { + m.AddCall("DoWithBody", req.URL.String()) + return &http.Response{ + Status: http.StatusText(m.status), + StatusCode: m.status, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 1, + Body: ioutil.NopCloser(bytes.NewReader(m.body)), + }, nil +} === added file 'src/github.com/juju/romulus/api/plan/package_test.go' --- src/github.com/juju/romulus/api/plan/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/api/plan/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package plan_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} === added directory 'src/github.com/juju/romulus/api/terms' === added file 'src/github.com/juju/romulus/api/terms/api.go' --- src/github.com/juju/romulus/api/terms/api.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/api/terms/api.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,187 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package terms contains the terms service API client. +package terms + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/juju/errors" + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +var baseURL = "https://api.jujucharms.com/terms/v1" + +// CheckAgreementsRequest holds a slice of terms and the /v1/agreement +// endpoint will check if the user has agreed to the specified terms +// and return a slice of terms the user has not agreed to yet. +type CheckAgreementsRequest struct { + Terms []string +} + +// GetTermsResponse holds the response of the GetTerms call. +type GetTermsResponse struct { + Name string `json:"name"` + Revision int `json:"revision"` + CreatedOn time.Time `json:"created-on"` + Content string `json:"content"` +} + +// SaveAgreementResponses holds the response of the SaveAgreement +// call. +type SaveAgreementResponses struct { + Agreements []AgreementResponse `json:"agreements"` +} + +// AgreementResponse holds the a single agreement made by +// the user to a specific revision of terms and conditions +// document. +type AgreementResponse struct { + User string `json:"user"` + Term string `json:"term"` + Revision int `json:"revision"` + CreatedOn time.Time `json:"created-on"` +} + +// SaveAgreements holds the parameters for creating new +// user agreements to one or more specific revisions of terms. +type SaveAgreements struct { + Agreements []SaveAgreement `json:"agreements"` +} + +// SaveAgreement holds the parameters for creating a new +// user agreement to a specific revision of terms. +type SaveAgreement struct { + TermName string `json:"termname"` + TermRevision int `json:"termrevision"` +} + +// Client defines method needed for the Terms Service CLI +// commands. +type Client interface { + GetUnsignedTerms(p *CheckAgreementsRequest) ([]GetTermsResponse, error) + SaveAgreement(p *SaveAgreements) (*SaveAgreementResponses, error) +} + +var _ Client = (*client)(nil) + +type httpClient interface { + Do(*http.Request) (*http.Response, error) + DoWithBody(req *http.Request, body io.ReadSeeker) (*http.Response, error) +} + +// client is the implementation of the Client interface. +type client struct { + client httpClient +} + +// ClientOption defines a function which configures a Client. +type ClientOption func(h *client) error + +// HTTPClient returns a function that sets the http client used by the API +// (e.g. if we want to use TLS). +func HTTPClient(c httpClient) func(h *client) error { + return func(h *client) error { + h.client = c + return nil + } +} + +// NewClient returns a new client for plan management. +func NewClient(options ...ClientOption) (Client, error) { + c := &client{ + client: httpbakery.NewClient(), + } + + for _, option := range options { + err := option(c) + if err != nil { + return nil, errors.Trace(err) + } + } + + return c, nil +} + +// GetUnsignedTerms returns the default plan for the specified charm. +func (c *client) GetUnsignedTerms(p *CheckAgreementsRequest) ([]GetTermsResponse, error) { + values := url.Values{} + for _, t := range p.Terms { + values.Add("Terms", t) + } + u := fmt.Sprintf("%s/agreement?%s", baseURL, values.Encode()) + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, errors.Trace(err) + } + req.Header.Set("Content-Type", "application/json") + response, err := c.client.Do(req) + if err != nil { + return nil, errors.Trace(err) + } + if response.StatusCode != http.StatusOK { + b, err := ioutil.ReadAll(response.Body) + if err != nil { + return nil, errors.Errorf("failed to get unsigned agreements: %v", response.Status) + } + return nil, errors.Errorf("failed to get unsigned agreements: %v: %s", response.Status, string(b)) + } + defer discardClose(response) + var results []GetTermsResponse + dec := json.NewDecoder(response.Body) + err = dec.Decode(&results) + if err != nil { + return nil, errors.Trace(err) + } + return results, nil +} + +// SaveAgreements saves a user agreement to the specificed terms document. +func (c *client) SaveAgreement(p *SaveAgreements) (*SaveAgreementResponses, error) { + u := fmt.Sprintf("%s/agreement", baseURL) + req, err := http.NewRequest("POST", u, nil) + if err != nil { + return nil, errors.Trace(err) + } + req.Header.Set("Content-Type", "application/json") + data, err := json.Marshal(p.Agreements) + if err != nil { + return nil, errors.Trace(err) + } + response, err := c.client.DoWithBody(req, bytes.NewReader(data)) + if err != nil { + return nil, errors.Trace(err) + } + if response.StatusCode != http.StatusOK { + b, err := ioutil.ReadAll(response.Body) + if err != nil { + return nil, errors.Errorf("failed to get unsigned agreements: %v", response.Status) + } + return nil, errors.Errorf("failed to get unsigned agreements: %v: %s", response.Status, string(b)) + } + defer discardClose(response) + var results SaveAgreementResponses + dec := json.NewDecoder(response.Body) + err = dec.Decode(&results) + if err != nil { + return nil, errors.Trace(err) + } + return &results, nil +} + +// discardClose reads any remaining data from the response body and closes it. +func discardClose(response *http.Response) { + if response == nil || response.Body == nil { + return + } + io.Copy(ioutil.Discard, response.Body) + response.Body.Close() +} === added file 'src/github.com/juju/romulus/api/terms/api_test.go' --- src/github.com/juju/romulus/api/terms/api_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/api/terms/api_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,130 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package terms defines the terms service API. +package terms_test + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + stdtesting "testing" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/romulus/api/terms" +) + +type apiSuite struct { + client terms.Client + httpClient *mockHttpClient +} + +func Test(t *stdtesting.T) { + gc.TestingT(t) +} + +var _ = gc.Suite(&apiSuite{}) + +func (s *apiSuite) SetUpTest(c *gc.C) { + s.httpClient = &mockHttpClient{} + var err error + s.client, err = terms.NewClient(terms.HTTPClient(s.httpClient)) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *apiSuite) TestUnsignedTerms(c *gc.C) { + s.httpClient.status = http.StatusOK + s.httpClient.SetBody(c, []terms.GetTermsResponse{ + { + Name: "hello-world-terms", + Revision: 1, + Content: "terms doc content", + }, + { + Name: "hello-universe-terms", + Revision: 1, + Content: "universal terms doc content", + }, + }) + missingAgreements, err := s.client.GetUnsignedTerms(&terms.CheckAgreementsRequest{ + Terms: []string{ + "hello-world-terms/1", + "hello-universe-terms/1", + }, + }) + c.Assert(err, jc.ErrorIsNil) + c.Assert(missingAgreements, gc.HasLen, 2) + c.Assert(missingAgreements[0].Name, gc.Equals, "hello-world-terms") + c.Assert(missingAgreements[0].Revision, gc.Equals, 1) + c.Assert(missingAgreements[0].Content, gc.Equals, "terms doc content") + c.Assert(missingAgreements[1].Name, gc.Equals, "hello-universe-terms") + c.Assert(missingAgreements[1].Revision, gc.Equals, 1) + c.Assert(missingAgreements[1].Content, gc.Equals, "universal terms doc content") + s.httpClient.SetBody(c, terms.SaveAgreementResponses{ + Agreements: []terms.AgreementResponse{{ + User: "test-user", + Term: "hello-world-terms", + Revision: 1, + }}}) + + p1 := &terms.SaveAgreements{ + Agreements: []terms.SaveAgreement{{ + TermName: "hello-world-terms", + TermRevision: 1, + }}} + response, err := s.client.SaveAgreement(p1) + c.Assert(err, jc.ErrorIsNil) + c.Assert(response.Agreements, gc.HasLen, 1) + c.Assert(response.Agreements[0].User, gc.Equals, "test-user") + c.Assert(response.Agreements[0].Term, gc.Equals, "hello-world-terms") + c.Assert(response.Agreements[0].Revision, gc.Equals, 1) +} + +func (s *apiSuite) TestNoFoundReturnsError(c *gc.C) { + s.httpClient.status = http.StatusNotFound + s.httpClient.body = []byte("something failed") + _, err := s.client.GetUnsignedTerms(&terms.CheckAgreementsRequest{ + Terms: []string{ + "hello-world-terms/1", + "hello-universe-terms/1", + }, + }) + c.Assert(err, gc.ErrorMatches, "failed to get unsigned agreements: Not Found: something failed") +} + +type mockHttpClient struct { + status int + body []byte +} + +func (m *mockHttpClient) Do(req *http.Request) (*http.Response, error) { + return &http.Response{ + Status: http.StatusText(m.status), + StatusCode: m.status, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 1, + Body: ioutil.NopCloser(bytes.NewReader(m.body)), + }, nil +} + +func (m *mockHttpClient) DoWithBody(req *http.Request, body io.ReadSeeker) (*http.Response, error) { + return &http.Response{ + Status: http.StatusText(m.status), + StatusCode: m.status, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 1, + Body: ioutil.NopCloser(bytes.NewReader(m.body)), + }, nil +} + +func (m *mockHttpClient) SetBody(c *gc.C, v interface{}) { + b, err := json.Marshal(&v) + c.Assert(err, jc.ErrorIsNil) + m.body = b +} === added directory 'src/github.com/juju/romulus/cmd' === added directory 'src/github.com/juju/romulus/cmd/agree' === added file 'src/github.com/juju/romulus/cmd/agree/agree.go' --- src/github.com/juju/romulus/cmd/agree/agree.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/agree/agree.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,241 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agree + +import ( + "bufio" + "bytes" + "fmt" + "os" + "os/exec" + "strconv" + "strings" + + "github.com/juju/cmd" + "github.com/juju/errors" + "launchpad.net/gnuflag" + + "github.com/juju/romulus/api/terms" + rcmd "github.com/juju/romulus/cmd" +) + +var ( + clientNew = terms.NewClient +) + +const agreeDoc = ` +Agree to the terms required by a charm. + +When deploying a charm that requires agreement to terms, use 'juju agree' to +view the terms and agree to them. Then the charm may be deployed. + +Once you have agreed to terms, you will not be prompted to view them again. + +Examples: + + juju agree somePlan/1 + Displays terms for somePlan revision 1 and prompts for agreement. + juju agree somePlan/1 otherPlan/2 + Displays the terms for revision 1 of somePlan, revision 2 of otherPlan, + and prompts for agreement. + juju agree somePlan/1 otherPlan/2 --yes + Agrees to the terms without prompting. +` + +// NewAgreeCommand returns a new command that can be +// used to create user agreements. +func NewAgreeCommand() cmd.Command { + return &agreeCommand{} +} + +type term struct { + name string + revision int +} + +// agreeCommand creates a user agreement to the specified terms. +type agreeCommand struct { + rcmd.HttpCommand + out cmd.Output + + terms []term + termIds []string + SkipTermContent bool +} + +// SetFlags implements Command.SetFlags. +func (c *agreeCommand) SetFlags(f *gnuflag.FlagSet) { + f.BoolVar(&c.SkipTermContent, "yes", false, "agree to terms non interactively") + c.out.AddFlags(f, "json", cmd.DefaultFormatters) +} + +// Info implements Command.Info. +func (c *agreeCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "agree", + Args: "", + Purpose: "agree to terms", + Doc: agreeDoc, + } +} + +// Init read and verifies the arguments. +func (c *agreeCommand) Init(args []string) error { + if len(args) < 1 { + return errors.New("missing arguments") + } + + for _, t := range args { + name, rev, err := parseTermRevision(t) + if err != nil { + return errors.Annotate(err, "invalid term format") + } + if rev == 0 { + return errors.Errorf("must specify a valid term revision %q", t) + } + c.terms = append(c.terms, term{name, rev}) + c.termIds = append(c.termIds, t) + } + if len(c.terms) == 0 { + return errors.New("must specify a valid term revision") + } + return nil +} + +// Run implements Command.Run. +func (c *agreeCommand) Run(ctx *cmd.Context) error { + client, err := c.NewClient() + if err != nil { + return errors.Trace(err) + } + defer c.Close() + + termsClient, err := clientNew(terms.HTTPClient(client)) + if err != nil { + return err + } + + if c.SkipTermContent { + err := saveAgreements(ctx, termsClient, c.terms) + if err != nil { + return errors.Trace(err) + } + return nil + } + + needAgreement := []terms.GetTermsResponse{} + terms, err := termsClient.GetUnsignedTerms(&terms.CheckAgreementsRequest{ + Terms: c.termIds, + }) + if err != nil { + return errors.Annotate(err, "failed to retrieve terms") + } + needAgreement = append(needAgreement, terms...) + + if len(needAgreement) == 0 { + fmt.Fprintf(ctx.Stdout, "Already agreed\n") + return nil + } + + err = printTerms(ctx, needAgreement) + if err != nil { + return errors.Trace(err) + } + fmt.Fprintf(ctx.Stdout, "Do you agree to the displayed terms? (Y/n): ") + answer, err := userAnswer() + if err != nil { + return errors.Trace(err) + } + + agreedTerms := make([]term, len(needAgreement)) + for i, t := range needAgreement { + agreedTerms[i] = term{name: t.Name, revision: t.Revision} + } + + answer = strings.TrimSpace(answer) + if userAgrees(answer) { + err = saveAgreements(ctx, termsClient, agreedTerms) + if err != nil { + return errors.Trace(err) + } + } else { + fmt.Fprintf(ctx.Stdout, "You didn't agree to the presented terms.\n") + return nil + } + + return nil +} + +func saveAgreements(ctx *cmd.Context, termsClient terms.Client, ts []term) error { + agreements := make([]terms.SaveAgreement, len(ts)) + for i, t := range ts { + agreements[i] = terms.SaveAgreement{ + TermName: t.name, + TermRevision: t.revision, + } + } + response, err := termsClient.SaveAgreement(&terms.SaveAgreements{Agreements: agreements}) + if err != nil { + return errors.Annotate(err, "failed to save user agreement") + } + for _, agreement := range response.Agreements { + _, err = fmt.Fprintf(ctx.Stdout, "Agreed to revision %v of %v for Juju users\n", agreement.Revision, agreement.Term) + if err != nil { + return errors.Trace(err) + } + } + return nil +} + +var userAnswer = func() (string, error) { + return bufio.NewReader(os.Stdin).ReadString('\n') +} + +func parseTermRevision(s string) (string, int, error) { + fail := func(err error) (string, int, error) { + return "", -1, err + } + tokens := strings.Split(s, "/") + if len(tokens) == 1 { + return tokens[0], 0, nil + } else if len(tokens) > 2 { + return fail(errors.New("unknown term revision format")) + } + + termName := tokens[0] + termRevisionString := tokens[1] + termRevision, err := strconv.Atoi(termRevisionString) + if err != nil { + return fail(errors.Trace(err)) + } + return termName, termRevision, nil +} + +func printTerms(ctx *cmd.Context, terms []terms.GetTermsResponse) error { + output := "" + for _, t := range terms { + output += fmt.Sprintf(` +=== %v/%v: %v === +%v +======== +`, t.Name, t.Revision, t.CreatedOn, t.Content) + } + buffer := bytes.NewReader([]byte(output)) + less := exec.Command("less") + less.Stdout = ctx.Stdout + less.Stdin = buffer + err := less.Run() + if err != nil { + fmt.Fprintf(ctx.Stdout, output) + return errors.Annotate(err, "failed to print plan") + } + return nil +} + +func userAgrees(input string) bool { + if input == "y" || input == "Y" || input == "" { + return true + } + return false +} === added file 'src/github.com/juju/romulus/cmd/agree/agree_test.go' --- src/github.com/juju/romulus/cmd/agree/agree_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/agree/agree_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,218 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agree_test + +import ( + "sync" + "testing" + + "github.com/juju/cmd/cmdtesting" + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/romulus/api/terms" + "github.com/juju/romulus/cmd/agree" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} + +var _ = gc.Suite(&agreeSuite{}) + +var testTerms = "Test Terms" + +type agreeSuite struct { + client *mockClient +} + +func (s *agreeSuite) SetUpTest(c *gc.C) { + s.client = &mockClient{} + + jujutesting.PatchValue(agree.ClientNew, func(...terms.ClientOption) (terms.Client, error) { + return s.client, nil + }) +} + +func (s *agreeSuite) TestAgreementNothingToSign(c *gc.C) { + jujutesting.PatchValue(agree.UserAnswer, func() (string, error) { + return "y", nil + }) + + s.client.user = "test-user" + s.client.setUnsignedTerms([]terms.GetTermsResponse{}) + + ctx, err := cmdtesting.RunCommand(c, agree.NewAgreeCommand(), "test-term/1") + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, `Already agreed +`) +} +func (s *agreeSuite) TestAgreement(c *gc.C) { + var answer string + jujutesting.PatchValue(agree.UserAnswer, func() (string, error) { + return answer, nil + }) + + s.client.user = "test-user" + s.client.setUnsignedTerms([]terms.GetTermsResponse{{ + Name: "test-term", + Revision: 1, + Content: testTerms, + }}) + tests := []struct { + about string + args []string + err string + stdout string + answer string + apiCalls []jujutesting.StubCall + }{{ + about: "everything works", + args: []string{"test-term/1", "--yes"}, + stdout: "Agreed to revision 1 of test-term for Juju users\n", + apiCalls: []jujutesting.StubCall{{FuncName: "SaveAgreement", Args: []interface{}{&terms.SaveAgreements{Agreements: []terms.SaveAgreement{{TermName: "test-term", TermRevision: 1}}}}}}, + }, { + about: "cannot parse revision number", + args: []string{"test-term/abc"}, + err: "invalid term format: strconv.ParseInt: parsing \"abc\": invalid syntax", + }, { + about: "missing arguments", + args: []string{}, + err: "missing arguments", + }, { + about: "everything works - user accepts", + args: []string{"test-term/1"}, + answer: "y", + stdout: ` +=== test-term/1: 0001-01-01 00:00:00 +0000 UTC === +Test Terms +======== +Do you agree to the displayed terms? (Y/n): Agreed to revision 1 of test-term for Juju users +`, + apiCalls: []jujutesting.StubCall{{ + FuncName: "GetUnunsignedTerms", Args: []interface{}{ + &terms.CheckAgreementsRequest{Terms: []string{"test-term/1"}}, + }, + }, { + FuncName: "SaveAgreement", Args: []interface{}{ + &terms.SaveAgreements{Agreements: []terms.SaveAgreement{{TermName: "test-term", TermRevision: 1}}}, + }, + }}, + }, { + about: "everything works - user refuses", + args: []string{"test-term/1"}, + answer: "n", + stdout: ` +=== test-term/1: 0001-01-01 00:00:00 +0000 UTC === +Test Terms +======== +Do you agree to the displayed terms? (Y/n): You didn't agree to the presented terms. +`, + apiCalls: []jujutesting.StubCall{{ + FuncName: "GetUnunsignedTerms", Args: []interface{}{ + &terms.CheckAgreementsRequest{Terms: []string{"test-term/1"}}, + }, + }}, + }, { + about: "must not accept 0 revision", + args: []string{"test-term/0", "--yes"}, + err: `must specify a valid term revision "test-term/0"`, + }, { + about: "user accepts, multiple terms", + args: []string{"test-term/1", "test-term/2"}, + answer: "y", + stdout: ` +=== test-term/1: 0001-01-01 00:00:00 +0000 UTC === +Test Terms +======== +Do you agree to the displayed terms? (Y/n): Agreed to revision 1 of test-term for Juju users +`, + apiCalls: []jujutesting.StubCall{ + { + FuncName: "GetUnunsignedTerms", Args: []interface{}{ + &terms.CheckAgreementsRequest{Terms: []string{"test-term/1", "test-term/2"}}, + }, + }, { + FuncName: "SaveAgreement", Args: []interface{}{ + &terms.SaveAgreements{Agreements: []terms.SaveAgreement{ + {TermName: "test-term", TermRevision: 1}, + }}, + }, + }}, + }, { + about: "valid then unknown arguments", + args: []string{"test-term/1", "unknown", "arguments"}, + err: `must specify a valid term revision "unknown"`, + }, { + about: "user accepts all the terms", + args: []string{"test-term/1", "test-term/2", "--yes"}, + stdout: `Agreed to revision 1 of test-term for Juju users +Agreed to revision 2 of test-term for Juju users +`, + apiCalls: []jujutesting.StubCall{ + {FuncName: "SaveAgreement", Args: []interface{}{&terms.SaveAgreements{ + Agreements: []terms.SaveAgreement{ + {TermName: "test-term", TermRevision: 1}, + {TermName: "test-term", TermRevision: 2}, + }}}}}, + }, + } + for i, test := range tests { + s.client.ResetCalls() + c.Logf("running test %d: %s", i, test.about) + if test.answer != "" { + answer = test.answer + } + ctx, err := cmdtesting.RunCommand(c, agree.NewAgreeCommand(), test.args...) + if test.err != "" { + c.Assert(err, gc.ErrorMatches, test.err) + } else { + c.Assert(err, jc.ErrorIsNil) + } + if ctx != nil { + c.Assert(cmdtesting.Stdout(ctx), gc.Equals, test.stdout) + } + if len(test.apiCalls) > 0 { + s.client.CheckCalls(c, test.apiCalls) + } + } +} + +type mockClient struct { + jujutesting.Stub + + lock sync.Mutex + user string + terms []terms.GetTermsResponse + unsignedTerms []terms.GetTermsResponse +} + +func (c *mockClient) setUnsignedTerms(t []terms.GetTermsResponse) { + c.lock.Lock() + defer c.lock.Unlock() + c.unsignedTerms = t +} + +// SaveAgreement saves user's agreement to the specified +// revision of the terms documents +func (c *mockClient) SaveAgreement(p *terms.SaveAgreements) (*terms.SaveAgreementResponses, error) { + c.AddCall("SaveAgreement", p) + responses := make([]terms.AgreementResponse, len(p.Agreements)) + for i, agreement := range p.Agreements { + responses[i] = terms.AgreementResponse{ + User: c.user, + Term: agreement.TermName, + Revision: agreement.TermRevision, + } + } + return &terms.SaveAgreementResponses{responses}, nil +} + +func (c *mockClient) GetUnsignedTerms(p *terms.CheckAgreementsRequest) ([]terms.GetTermsResponse, error) { + c.MethodCall(c, "GetUnunsignedTerms", p) + r := make([]terms.GetTermsResponse, len(c.unsignedTerms)) + copy(r, c.unsignedTerms) + return r, nil +} === added file 'src/github.com/juju/romulus/cmd/agree/export.go' --- src/github.com/juju/romulus/cmd/agree/export.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/agree/export.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,12 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agree + +// These two var are exported becuase they are useful in tests outside of this +// package. Unless you are writing a test you shouldn't be using either of these +// values. +var ( + ClientNew = &clientNew + UserAnswer = &userAnswer +) === added directory 'src/github.com/juju/romulus/cmd/allocate' === added file 'src/github.com/juju/romulus/cmd/allocate/allocate.go' --- src/github.com/juju/romulus/cmd/allocate/allocate.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/allocate/allocate.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,137 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package allocate + +import ( + "fmt" + "regexp" + "strings" + + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/juju/cmd/modelcmd" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "launchpad.net/gnuflag" + + api "github.com/juju/romulus/api/budget" + rcmd "github.com/juju/romulus/cmd" +) + +var budgetWithLimitRe = regexp.MustCompile(`^[a-zA-Z0-9\-]+:[1-9][0-9]*$`) + +type allocateCommand struct { + modelcmd.ModelCommandBase + rcmd.HttpCommand + api apiClient + Budget string + Model string + Services []string + Limit string +} + +// NewAllocateCommand returns a new allocateCommand +func NewAllocateCommand() cmd.Command { + return modelcmd.Wrap(&allocateCommand{}) +} + +const doc = ` +Allocate budget for the specified services, replacing any prior allocations +made for the specified services. + +Usage: + + juju allocate : [ ...] + +Example: + + juju allocate somebudget:42 db + Assigns service "db" to an allocation on budget "somebudget" with the limit "42". +` + +// Info implements cmd.Command.Info. +func (c *allocateCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "allocate", + Purpose: "allocate budget to services", + Doc: doc, + } +} + +// SetFlags implements cmd.Command. +func (c *allocateCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) +} + +// AllowInterspersedFlags implements cmd.Command. +func (c *allocateCommand) AllowInterspersedFlags() bool { return true } + +// IsSuperCommand implements cmd.Command. +func (c *allocateCommand) IsSuperCommand() bool { return false } + +// Init implements cmd.Command.Init. +func (c *allocateCommand) Init(args []string) error { + if len(args) < 2 { + return errors.New("budget and service name required") + } + budgetWithLimit := args[0] + var err error + c.Budget, c.Limit, err = parseBudgetWithLimit(budgetWithLimit) + if err != nil { + return err + } + c.Model, err = c.modelUUID() + if err != nil { + return err + } + + c.Services = args[1:] + return nil +} + +// Run implements cmd.Command.Run and has most of the logic for the run command. +func (c *allocateCommand) Run(ctx *cmd.Context) error { + defer c.Close() + client, err := c.NewClient() + if err != nil { + return errors.Annotate(err, "failed to create an http client") + } + api, err := c.newAPIClient(client) + if err != nil { + return errors.Annotate(err, "failed to create an api client") + } + resp, err := api.CreateAllocation(c.Budget, c.Limit, c.Model, c.Services) + if err != nil { + return errors.Annotate(err, "failed to create allocation") + } + fmt.Fprintf(ctx.Stdout, resp) + return nil +} + +func (c *allocateCommand) modelUUID() (string, error) { + model, err := c.ClientStore().ModelByName(c.ControllerName(), c.AccountName(), c.ModelName()) + if err != nil { + return "", errors.Trace(err) + } + return model.ModelUUID, nil +} + +func parseBudgetWithLimit(bl string) (string, string, error) { + if !budgetWithLimitRe.MatchString(bl) { + return "", "", errors.New("invalid budget specification, expecting :") + } + parts := strings.Split(bl, ":") + return parts[0], parts[1], nil +} + +func (c *allocateCommand) newAPIClient(bakery *httpbakery.Client) (apiClient, error) { + if c.api != nil { + return c.api, nil + } + c.api = api.NewClient(bakery) + return c.api, nil +} + +type apiClient interface { + CreateAllocation(string, string, string, []string) (string, error) +} === added file 'src/github.com/juju/romulus/cmd/allocate/allocate_test.go' --- src/github.com/juju/romulus/cmd/allocate/allocate_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/allocate/allocate_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,120 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package allocate_test + +import ( + "github.com/juju/cmd" + "github.com/juju/cmd/cmdtesting" + "github.com/juju/errors" + "github.com/juju/juju/jujuclient" + "github.com/juju/juju/jujuclient/jujuclienttesting" + "github.com/juju/testing" + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/romulus/cmd/allocate" +) + +var _ = gc.Suite(&allocateSuite{}) + +type allocateSuite struct { + jujutesting.FakeHomeSuite + stub *testing.Stub + mockAPI *mockapi + store jujuclient.ClientStore +} + +func (s *allocateSuite) SetUpTest(c *gc.C) { + s.FakeHomeSuite.SetUpTest(c) + s.store = &jujuclienttesting.MemStore{ + Controllers: map[string]jujuclient.ControllerDetails{ + "controller": {}, + }, + Models: map[string]jujuclient.ControllerAccountModels{ + "controller": { + AccountModels: map[string]*jujuclient.AccountModels{ + "admin@local": { + Models: map[string]jujuclient.ModelDetails{ + "model": {"model-uuid"}, + }, + CurrentModel: "model", + }, + }, + }, + }, + Accounts: map[string]*jujuclient.ControllerAccounts{ + "controller": { + Accounts: map[string]jujuclient.AccountDetails{ + "admin@local": {}, + }, + CurrentAccount: "admin@local", + }, + }, + } + s.stub = &testing.Stub{} + s.mockAPI = newMockAPI(s.stub) +} + +func (s *allocateSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { + alloc := allocate.NewAllocateCommandForTest(s.mockAPI, s.store) + a := []string{"-m", "controller:model"} + a = append(a, args...) + return cmdtesting.RunCommand(c, alloc, a...) +} + +func (s *allocateSuite) TestAllocate(c *gc.C) { + s.mockAPI.resp = "allocation updated" + ctx, err := s.run(c, "name:100", "db") + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, "allocation updated") + s.mockAPI.CheckCall(c, 0, "CreateAllocation", "name", "100", "model-uuid", []string{"db"}) +} + +func (s *allocateSuite) TestallocateAPIError(c *gc.C) { + s.stub.SetErrors(errors.New("something failed")) + _, err := s.run(c, "name:100", "db") + c.Assert(err, gc.ErrorMatches, "failed to create allocation: something failed") + s.mockAPI.CheckCall(c, 0, "CreateAllocation", "name", "100", "model-uuid", []string{"db"}) +} + +func (s *allocateSuite) TestAllocateErrors(c *gc.C) { + tests := []struct { + about string + args []string + expectedError string + }{{ + about: "no args", + args: []string{}, + expectedError: "budget and service name required", + }, { + about: "budget without allocation limit", + args: []string{"name", "db"}, + expectedError: "invalid budget specification, expecting :", + }, { + about: "service not specified", + args: []string{"name:100"}, + expectedError: "budget and service name required", + }} + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + _, err := s.run(c, test.args...) + c.Check(err, gc.ErrorMatches, test.expectedError) + s.mockAPI.CheckNoCalls(c) + } +} + +func newMockAPI(s *testing.Stub) *mockapi { + return &mockapi{Stub: s} +} + +type mockapi struct { + *testing.Stub + resp string +} + +func (api *mockapi) CreateAllocation(name, limit, modelUUID string, services []string) (string, error) { + api.MethodCall(api, "CreateAllocation", name, limit, modelUUID, services) + return api.resp, api.NextErr() +} === added file 'src/github.com/juju/romulus/cmd/allocate/export_test.go' --- src/github.com/juju/romulus/cmd/allocate/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/allocate/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package allocate + +import ( + "github.com/juju/cmd" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/jujuclient" +) + +func NewAllocateCommandForTest(api apiClient, store jujuclient.ClientStore) cmd.Command { + c := &allocateCommand{api: api} + c.SetClientStore(store) + return modelcmd.Wrap(c) +} === added file 'src/github.com/juju/romulus/cmd/allocate/package_test.go' --- src/github.com/juju/romulus/cmd/allocate/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/allocate/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package allocate_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/romulus/cmd/cmd.go' --- src/github.com/juju/romulus/cmd/cmd.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/cmd.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,44 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/persistent-cookiejar" + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +// HttpCommand can instantiate http bakery clients using a commong cookie jar. +type HttpCommand struct { + cmd.CommandBase + + cookiejar *cookiejar.Jar +} + +// NewClient returns a new http bakery client for commands. +func (s *HttpCommand) NewClient() (*httpbakery.Client, error) { + if s.cookiejar == nil { + cookieFile := cookiejar.DefaultCookieFile() + jar, err := cookiejar.New(&cookiejar.Options{ + Filename: cookieFile, + }) + if err != nil { + return nil, errors.Trace(err) + } + s.cookiejar = jar + } + client := httpbakery.NewClient() + client.Jar = s.cookiejar + client.VisitWebPage = httpbakery.OpenWebBrowser + return client, nil +} + +// Close saves the persistent cookie jar used by the specified httpbakery.Client. +func (s *HttpCommand) Close() error { + if s.cookiejar != nil { + return s.cookiejar.Save() + } + return nil +} === added file 'src/github.com/juju/romulus/cmd/cmd_test.go' --- src/github.com/juju/romulus/cmd/cmd_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/cmd_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,51 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd_test + +import ( + gc "gopkg.in/check.v1" + stdtesting "testing" + + jujucmd "github.com/juju/cmd" + "github.com/juju/cmd/cmdtesting" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + + "github.com/juju/romulus/cmd" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} + +type httpSuite struct { + testing.CleanupSuite + caCert string +} + +var _ = gc.Suite(&httpSuite{}) + +type testCommand struct { + cmd.HttpCommand +} + +func (c *testCommand) Info() *jujucmd.Info { + return &jujucmd.Info{Name: "test"} +} + +func (c *testCommand) Run(ctx *jujucmd.Context) error { + return nil +} + +func (s *httpSuite) TestNewClient(c *gc.C) { + basecmd := &testCommand{} + defer basecmd.Close() + + _, err := cmdtesting.RunCommand(c, basecmd) + c.Assert(err, jc.ErrorIsNil) + + client, err := basecmd.NewClient() + c.Assert(err, jc.ErrorIsNil) + c.Assert(client, gc.NotNil) +} === added directory 'src/github.com/juju/romulus/cmd/commands' === added file 'src/github.com/juju/romulus/cmd/commands/commands.go' --- src/github.com/juju/romulus/cmd/commands/commands.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/commands/commands.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,37 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package commands provides functionality for registering all the romulus commands. +package commands + +import ( + "github.com/juju/cmd" + + "github.com/juju/romulus/cmd/agree" + "github.com/juju/romulus/cmd/allocate" + "github.com/juju/romulus/cmd/createbudget" + "github.com/juju/romulus/cmd/listbudgets" + "github.com/juju/romulus/cmd/listplans" + "github.com/juju/romulus/cmd/setbudget" + "github.com/juju/romulus/cmd/setplan" + "github.com/juju/romulus/cmd/showbudget" + "github.com/juju/romulus/cmd/updateallocation" +) + +type commandRegister interface { + Register(cmd.Command) +} + +// RegisterAll registers all romulus commands with the +// provided command registry. +func RegisterAll(r commandRegister) { + r.Register(agree.NewAgreeCommand()) + r.Register(allocate.NewAllocateCommand()) + r.Register(createbudget.NewCreateBudgetCommand()) + r.Register(listbudgets.NewListBudgetsCommand()) + r.Register(listplans.NewListPlansCommand()) + r.Register(setbudget.NewSetBudgetCommand()) + r.Register(setplan.NewSetPlanCommand()) + r.Register(showbudget.NewShowBudgetCommand()) + r.Register(updateallocation.NewUpdateAllocationCommand()) +} === added file 'src/github.com/juju/romulus/cmd/commands/commands_test.go' --- src/github.com/juju/romulus/cmd/commands/commands_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/commands/commands_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,45 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package commands_test + +import ( + stdtesting "testing" + + "github.com/juju/cmd" + gc "gopkg.in/check.v1" + + "github.com/juju/romulus/cmd/commands" +) + +type commandSuite struct{} + +var _ = gc.Suite(&commandSuite{}) + +type mockRegister struct { + commands []string +} + +func (m *mockRegister) Register(command cmd.Command) { + m.commands = append(m.commands, command.Info().Name) +} + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} + +func (s *commandSuite) TestRegister(c *gc.C) { + m := &mockRegister{} + commands.RegisterAll(m) + c.Assert(m.commands, gc.DeepEquals, []string{ + "agree", + "allocate", + "create-budget", + "list-budgets", + "list-plans", + "set-budget", + "set-plan", + "show-budget", + "update-allocation", + }) +} === added directory 'src/github.com/juju/romulus/cmd/createbudget' === added file 'src/github.com/juju/romulus/cmd/createbudget/createbudget.go' --- src/github.com/juju/romulus/cmd/createbudget/createbudget.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/createbudget/createbudget.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,86 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package createbudget + +import ( + "fmt" + "strconv" + + "github.com/juju/cmd" + "github.com/juju/errors" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + api "github.com/juju/romulus/api/budget" + rcmd "github.com/juju/romulus/cmd" +) + +type createBudgetCommand struct { + rcmd.HttpCommand + Name string + Value string +} + +// NewCreateBudgetCommand returns a new createBudgetCommand +func NewCreateBudgetCommand() cmd.Command { + return &createBudgetCommand{} +} + +const doc = ` +Create a new budget with monthly limit. + +Example: + juju create-budget qa 42 + Creates a budget named 'qa' with a limit of 42. +` + +// Info implements cmd.Command.Info. +func (c *createBudgetCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "create-budget", + Purpose: "create a new budget", + Doc: doc, + } +} + +// Init implements cmd.Command.Init. +func (c *createBudgetCommand) Init(args []string) error { + if len(args) < 2 { + return errors.New("name and value required") + } + c.Name, c.Value = args[0], args[1] + if _, err := strconv.ParseInt(c.Value, 10, 32); err != nil { + return errors.New("budget value needs to be a whole number") + } + return cmd.CheckEmpty(args[2:]) +} + +// Run implements cmd.Command.Run and has most of the logic for the run command. +func (c *createBudgetCommand) Run(ctx *cmd.Context) error { + defer c.Close() + client, err := c.NewClient() + if err != nil { + return errors.Annotate(err, "failed to create an http client") + } + api, err := newAPIClient(client) + if err != nil { + return errors.Annotate(err, "failed to create an api client") + } + resp, err := api.CreateBudget(c.Name, c.Value) + if err != nil { + return errors.Annotate(err, "failed to create the budget") + } + fmt.Fprintf(ctx.Stdout, resp) + return nil +} + +var newAPIClient = newAPIClientImpl + +func newAPIClientImpl(c *httpbakery.Client) (apiClient, error) { + client := api.NewClient(c) + return client, nil +} + +type apiClient interface { + CreateBudget(name string, limit string) (string, error) +} === added file 'src/github.com/juju/romulus/cmd/createbudget/createbudget_test.go' --- src/github.com/juju/romulus/cmd/createbudget/createbudget_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/createbudget/createbudget_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,93 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package createbudget_test + +import ( + "github.com/juju/cmd/cmdtesting" + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/romulus/cmd/createbudget" +) + +var _ = gc.Suite(&createBudgetSuite{}) + +type createBudgetSuite struct { + testing.CleanupSuite + stub *testing.Stub + mockAPI *mockapi +} + +func (s *createBudgetSuite) SetUpTest(c *gc.C) { + s.stub = &testing.Stub{} + s.mockAPI = newMockAPI(s.stub) + s.PatchValue(createbudget.NewAPIClient, createbudget.APIClientFnc(s.mockAPI)) +} + +func (s *createBudgetSuite) TestCreateBudget(c *gc.C) { + s.mockAPI.resp = "name budget set to 5" + createCmd := createbudget.NewCreateBudgetCommand() + ctx, err := cmdtesting.RunCommand(c, createCmd, "name", "5") + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, "name budget set to 5") + s.mockAPI.CheckCall(c, 0, "CreateBudget", "name", "5") +} + +func (s *createBudgetSuite) TestCreateBudgetAPIError(c *gc.C) { + s.mockAPI.SetErrors(errors.New("something failed")) + createCmd := createbudget.NewCreateBudgetCommand() + _, err := cmdtesting.RunCommand(c, createCmd, "name", "5") + c.Assert(err, gc.ErrorMatches, "failed to create the budget: something failed") + s.mockAPI.CheckCall(c, 0, "CreateBudget", "name", "5") +} + +func (s *createBudgetSuite) TestCreateBudgetErrors(c *gc.C) { + tests := []struct { + about string + args []string + expectedError string + }{ + { + about: "test value needs to be a number", + args: []string{"name", "badvalue"}, + expectedError: "budget value needs to be a whole number", + }, + { + about: "value is missing", + args: []string{"name"}, + expectedError: "name and value required", + }, + { + about: "no args", + args: []string{}, + expectedError: "name and value required", + }, + } + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + if test.expectedError != "" { + s.mockAPI.SetErrors(errors.New(test.expectedError)) + } + createCmd := createbudget.NewCreateBudgetCommand() + _, err := cmdtesting.RunCommand(c, createCmd, test.args...) + c.Assert(err, gc.ErrorMatches, test.expectedError) + s.mockAPI.CheckNoCalls(c) + } +} + +func newMockAPI(s *testing.Stub) *mockapi { + return &mockapi{Stub: s} +} + +type mockapi struct { + *testing.Stub + resp string +} + +func (api *mockapi) CreateBudget(name, value string) (string, error) { + api.MethodCall(api, "CreateBudget", name, value) + return api.resp, api.NextErr() +} === added file 'src/github.com/juju/romulus/cmd/createbudget/export_test.go' --- src/github.com/juju/romulus/cmd/createbudget/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/createbudget/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,18 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package createbudget + +import ( + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +var ( + NewAPIClient = &newAPIClient +) + +func APIClientFnc(api apiClient) func(*httpbakery.Client) (apiClient, error) { + return func(*httpbakery.Client) (apiClient, error) { + return api, nil + } +} === added file 'src/github.com/juju/romulus/cmd/createbudget/package_test.go' --- src/github.com/juju/romulus/cmd/createbudget/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/createbudget/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package createbudget_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} === added directory 'src/github.com/juju/romulus/cmd/listbudgets' === added file 'src/github.com/juju/romulus/cmd/listbudgets/export_test.go' --- src/github.com/juju/romulus/cmd/listbudgets/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/listbudgets/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,20 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listbudgets + +import ( + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +var ( + NewAPIClient = &newAPIClient +) + +// APIClientFnc returns a function that returns the provided apiClient +// and can be used to patch the NewAPIClient variable for tests. +func APIClientFnc(api apiClient) func(*httpbakery.Client) (apiClient, error) { + return func(*httpbakery.Client) (apiClient, error) { + return api, nil + } +} === added file 'src/github.com/juju/romulus/cmd/listbudgets/list-budgets.go' --- src/github.com/juju/romulus/cmd/listbudgets/list-budgets.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/listbudgets/list-budgets.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,111 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listbudgets + +import ( + "sort" + + "github.com/gosuri/uitable" + "github.com/juju/cmd" + "github.com/juju/errors" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "launchpad.net/gnuflag" + + api "github.com/juju/romulus/api/budget" + rcmd "github.com/juju/romulus/cmd" + wireformat "github.com/juju/romulus/wireformat/budget" +) + +// NewListBudgetsCommand returns a new command that is used +// to list budgets a user has access to. +func NewListBudgetsCommand() cmd.Command { + return &listBudgetsCommand{} +} + +type listBudgetsCommand struct { + rcmd.HttpCommand + + out cmd.Output +} + +const listBudgetsDoc = ` +List the available budgets. + +Example: + juju list-budgets +` + +// Info implements cmd.Command.Info. +func (c *listBudgetsCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "list-budgets", + Purpose: "list budgets", + Doc: listBudgetsDoc, + } +} + +// SetFlags implements cmd.Command.SetFlags. +func (c *listBudgetsCommand) SetFlags(f *gnuflag.FlagSet) { + c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ + "tabular": formatTabular, + }) +} + +func (c *listBudgetsCommand) Run(ctx *cmd.Context) error { + defer c.Close() + client, err := c.NewClient() + if err != nil { + return errors.Annotate(err, "failed to create an http client") + } + api, err := newAPIClient(client) + if err != nil { + return errors.Annotate(err, "failed to create an api client") + } + budgets, err := api.ListBudgets() + if err != nil { + return errors.Annotate(err, "failed to retrieve budgets") + } + if budgets == nil { + return errors.New("no budget information available") + } + err = c.out.Write(ctx, budgets) + if err != nil { + return errors.Trace(err) + } + return nil +} + +// formatTabular returns a tabular view of available budgets. +func formatTabular(value interface{}) ([]byte, error) { + b, ok := value.(*wireformat.ListBudgetsResponse) + if !ok { + return nil, errors.Errorf("expected value of type %T, got %T", b, value) + } + sort.Sort(b.Budgets) + + table := uitable.New() + table.MaxColWidth = 50 + table.Wrap = true + + table.AddRow("BUDGET", "MONTHLY", "ALLOCATED", "AVAILABLE", "SPENT") + for _, budgetEntry := range b.Budgets { + table.AddRow(budgetEntry.Budget, budgetEntry.Limit, budgetEntry.Allocated, budgetEntry.Available, budgetEntry.Consumed) + } + table.AddRow("TOTAL", b.Total.Limit, b.Total.Allocated, b.Total.Available, b.Total.Consumed) + table.AddRow("", "", "", "", "") + table.AddRow("Credit limit:", b.Credit, "", "", "") + return []byte(table.String()), nil +} + +var newAPIClient = newAPIClientImpl + +func newAPIClientImpl(c *httpbakery.Client) (apiClient, error) { + client := api.NewClient(c) + return client, nil +} + +type apiClient interface { + // ListBudgets returns a list of budgets a user has access to. + ListBudgets() (*wireformat.ListBudgetsResponse, error) +} === added file 'src/github.com/juju/romulus/cmd/listbudgets/list-budgets_test.go' --- src/github.com/juju/romulus/cmd/listbudgets/list-budgets_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/listbudgets/list-budgets_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,148 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listbudgets_test + +import ( + "github.com/juju/cmd/cmdtesting" + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/romulus/cmd/listbudgets" + "github.com/juju/romulus/wireformat/budget" +) + +var _ = gc.Suite(&listBudgetsSuite{}) + +type listBudgetsSuite struct { + testing.CleanupSuite + stub *testing.Stub + mockAPI *mockapi +} + +func (s *listBudgetsSuite) SetUpTest(c *gc.C) { + s.stub = &testing.Stub{} + s.mockAPI = &mockapi{Stub: s.stub} + s.PatchValue(listbudgets.NewAPIClient, listbudgets.APIClientFnc(s.mockAPI)) +} + +func (s *listBudgetsSuite) TestUnexpectedParameters(c *gc.C) { + listBudgets := listbudgets.NewListBudgetsCommand() + _, err := cmdtesting.RunCommand(c, listBudgets, "unexpected") + c.Assert(err, gc.ErrorMatches, `unrecognized args: \["unexpected"\]`) +} + +func (s *listBudgetsSuite) TestAPIError(c *gc.C) { + s.mockAPI.SetErrors(errors.New("well, this is embarrassing")) + listBudgets := listbudgets.NewListBudgetsCommand() + _, err := cmdtesting.RunCommand(c, listBudgets) + c.Assert(err, gc.ErrorMatches, "failed to retrieve budgets: well, this is embarrassing") +} + +func (s *listBudgetsSuite) TestListBudgetsOutput(c *gc.C) { + s.mockAPI.result = &budget.ListBudgetsResponse{ + Budgets: budget.BudgetSummaries{ + budget.BudgetSummary{ + Owner: "bob", + Budget: "personal", + Limit: "50", + Allocated: "30", + Unallocated: "20", + Available: "45", + Consumed: "5", + }, + budget.BudgetSummary{ + Owner: "bob", + Budget: "work", + Limit: "200", + Allocated: "100", + Unallocated: "100", + Available: "150", + Consumed: "50", + }, + budget.BudgetSummary{ + Owner: "bob", + Budget: "team", + Limit: "50", + Allocated: "10", + Unallocated: "40", + Available: "40", + Consumed: "10", + }, + }, + Total: budget.BudgetTotals{ + Limit: "300", + Allocated: "140", + Available: "235", + Unallocated: "160", + Consumed: "65", + }, + Credit: "400", + } + // Expected command output. Make sure budgets are sorted alphabetically. + expected := "" + + "BUDGET \tMONTHLY\tALLOCATED\tAVAILABLE\tSPENT\n" + + "personal \t50 \t30 \t45 \t5 \n" + + "team \t50 \t10 \t40 \t10 \n" + + "work \t200 \t100 \t150 \t50 \n" + + "TOTAL \t300 \t140 \t235 \t65 \n" + + " \t \t \t \t \n" + + "Credit limit:\t400 \t \t \t \n" + + listBudgets := listbudgets.NewListBudgetsCommand() + + ctx, err := cmdtesting.RunCommand(c, listBudgets) + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, expected) + s.mockAPI.CheckCallNames(c, "ListBudgets") +} + +func (s *listBudgetsSuite) TestListBudgetsOutputNoBudgets(c *gc.C) { + s.mockAPI.result = &budget.ListBudgetsResponse{ + Budgets: budget.BudgetSummaries{}, + Total: budget.BudgetTotals{ + Limit: "0", + Allocated: "0", + Available: "0", + Unallocated: "0", + Consumed: "0", + }, + Credit: "0", + } + expected := "" + + "BUDGET \tMONTHLY\tALLOCATED\tAVAILABLE\tSPENT\n" + + "TOTAL \t0 \t0 \t0 \t0 \n" + + " \t \t \t \t \n" + + "Credit limit:\t0 \t \t \t \n" + + listBudgets := listbudgets.NewListBudgetsCommand() + + ctx, err := cmdtesting.RunCommand(c, listBudgets) + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, expected) + s.mockAPI.CheckCallNames(c, "ListBudgets") +} + +func (s *listBudgetsSuite) TestListBudgetsNoOutput(c *gc.C) { + listBudgets := listbudgets.NewListBudgetsCommand() + + ctx, err := cmdtesting.RunCommand(c, listBudgets) + c.Assert(err, gc.ErrorMatches, `no budget information available`) + c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, ``) + s.mockAPI.CheckCallNames(c, "ListBudgets") +} + +type mockapi struct { + *testing.Stub + result *budget.ListBudgetsResponse +} + +func (api *mockapi) ListBudgets() (*budget.ListBudgetsResponse, error) { + api.AddCall("ListBudgets") + if err := api.NextErr(); err != nil { + return nil, err + } + return api.result, nil +} === added file 'src/github.com/juju/romulus/cmd/listbudgets/package_test.go' --- src/github.com/juju/romulus/cmd/listbudgets/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/listbudgets/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listbudgets_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} === added directory 'src/github.com/juju/romulus/cmd/listplans' === added file 'src/github.com/juju/romulus/cmd/listplans/export_test.go' --- src/github.com/juju/romulus/cmd/listplans/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/listplans/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,20 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listplans + +import ( + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +var ( + NewClient = &newClient +) + +// APIClientFnc returns a function that returns the provided apiClient +// and can be used to patch the NewAPIClient variable for tests. +func APIClientFnc(api apiClient) func(client *httpbakery.Client) (apiClient, error) { + return func(*httpbakery.Client) (apiClient, error) { + return api, nil + } +} === added file 'src/github.com/juju/romulus/cmd/listplans/list_plans.go' --- src/github.com/juju/romulus/cmd/listplans/list_plans.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/listplans/list_plans.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,221 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// The listplans package contains implementation of the command that +// can be used to list plans that are available for a charm. +package listplans + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "strings" + "text/tabwriter" + + "github.com/gosuri/uitable" + "github.com/juju/cmd" + "github.com/juju/errors" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/yaml.v2" + "launchpad.net/gnuflag" + + api "github.com/juju/romulus/api/plan" + rcmd "github.com/juju/romulus/cmd" + wireformat "github.com/juju/romulus/wireformat/plan" +) + +// apiClient defines the interface of the plan api client need by this command. +type apiClient interface { + // GetAssociatedPlans returns the plans associated with the charm. + GetAssociatedPlans(charmURL string) ([]wireformat.Plan, error) +} + +var newClient = func(client *httpbakery.Client) (apiClient, error) { + return api.NewClient(api.HTTPClient(client)) +} + +const listPlansDoc = ` +List plans available for the specified charm. + +Example: + juju list-plans cs:webapp +` + +// ListPlansCommand retrieves plans that are available for the specified charm +type ListPlansCommand struct { + rcmd.HttpCommand + + out cmd.Output + CharmURL string + + CharmResolver rcmd.CharmResolver +} + +// NewListPlansCommand creates a new ListPlansCommand. +func NewListPlansCommand() cmd.Command { + return &ListPlansCommand{ + CharmResolver: rcmd.NewCharmStoreResolver(), + } +} + +// Info implements Command.Info. +func (c *ListPlansCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "list-plans", + Args: "", + Purpose: "list plans", + Doc: listPlansDoc, + } +} + +// Init reads and verifies the cli arguments for the ListPlansCommand +func (c *ListPlansCommand) Init(args []string) error { + if len(args) == 0 { + return errors.New("missing arguments") + } + charmURL, args := args[0], args[1:] + if err := cmd.CheckEmpty(args); err != nil { + return errors.Errorf("unknown command line arguments: " + strings.Join(args, ",")) + } + c.CharmURL = charmURL + return nil +} + +// SetFlags implements Command.SetFlags. +func (c *ListPlansCommand) SetFlags(f *gnuflag.FlagSet) { + c.HttpCommand.SetFlags(f) + defaultFormat := "yaml" + c.out.AddFlags(f, defaultFormat, map[string]cmd.Formatter{ + "yaml": cmd.FormatYaml, + "json": cmd.FormatJson, + "smart": cmd.FormatSmart, + "summary": formatSummary, + "tabular": formatTabular, + }) +} + +// Run implements Command.Run. +// Retrieves the plan from the plans service. The set of plans to be +// retrieved can be limited using the plan and isv flags. +func (c *ListPlansCommand) Run(ctx *cmd.Context) (rErr error) { + defer c.Close() + client, err := c.NewClient() + if err != nil { + return errors.Annotate(err, "failed to create an http client") + } + + resolvedUrl, err := c.CharmResolver.Resolve(client.Client, c.CharmURL) + if err != nil { + return errors.Annotatef(err, "failed to resolve charmURL %v", c.CharmURL) + } + c.CharmURL = resolvedUrl + + apiClient, err := newClient(client) + if err != nil { + return errors.Annotate(err, "failed to create a plan API client") + } + + plans, err := apiClient.GetAssociatedPlans(c.CharmURL) + if err != nil { + return errors.Annotate(err, "failed to retrieve plans") + } + + output := make([]plan, len(plans)) + for i, p := range plans { + outputPlan := plan{ + URL: p.URL, + } + def, err := readPlan(bytes.NewBufferString(p.Definition)) + if err != nil { + return errors.Annotate(err, "failed to parse plan definition") + } + if def.Description != nil { + outputPlan.Price = def.Description.Price + outputPlan.Description = def.Description.Text + } + output[i] = outputPlan + } + err = c.out.Write(ctx, output) + if err != nil { + return errors.Trace(err) + } + + return nil +} + +type plan struct { + URL string `json:"plan" yaml:"plan"` + Price string `json:"price" yaml:"price"` + Description string `json:"description" yaml:"description"` +} + +// formatSummary returns a summary of available plans. +func formatSummary(value interface{}) ([]byte, error) { + plans, ok := value.([]plan) + if !ok { + return nil, errors.Errorf("expected value of type %T, got %T", plans, value) + } + var out bytes.Buffer + tw := tabwriter.NewWriter(&out, 0, 1, 1, ' ', 0) + p := func(values ...interface{}) { + for _, v := range values { + fmt.Fprintf(tw, "%s\t", v) + } + fmt.Fprintln(tw) + } + p("PLAN", "PRICE") + for _, plan := range plans { + p(plan.URL, plan.Price) + } + err := tw.Flush() + if err != nil { + return nil, errors.Trace(err) + } + + return out.Bytes(), nil +} + +// formatTabular returns a tabular summary of available plans. +func formatTabular(value interface{}) ([]byte, error) { + plans, ok := value.([]plan) + if !ok { + return nil, errors.Errorf("expected value of type %T, got %T", plans, value) + } + + table := uitable.New() + table.MaxColWidth = 50 + table.Wrap = true + + table.AddRow("PLAN", "PRICE", "DESCRIPTION") + for _, plan := range plans { + table.AddRow(plan.URL, plan.Price, plan.Description) + } + + return []byte(table.String()), nil +} + +type planModel struct { + Description *descriptionModel `json:"description,omitempty"` +} + +// descriptionModel provides a human readable description of the plan. +type descriptionModel struct { + Price string `json:"price,omitempty"` + Text string `json:"text,omitempty"` +} + +// readPlan reads, parses and returns a planModel struct representation. +func readPlan(r io.Reader) (plan *planModel, err error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return + } + + var doc planModel + err = yaml.Unmarshal(data, &doc) + if err != nil { + return + } + return &doc, nil +} === added file 'src/github.com/juju/romulus/cmd/listplans/list_plans_test.go' --- src/github.com/juju/romulus/cmd/listplans/list_plans_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/listplans/list_plans_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,173 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listplans_test + +import ( + "net/http" + "time" + + "github.com/juju/cmd/cmdtesting" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + api "github.com/juju/romulus/api/plan" + "github.com/juju/romulus/cmd/listplans" + wireformat "github.com/juju/romulus/wireformat/plan" +) + +var ( + testPlan1 = ` + description: + text: >- + Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc pretium purus nec magna faucibus, sed eleifend dui fermentum. Nulla nec ornare lorem, sed imperdiet turpis. Nam auctor quis massa et commodo. Maecenas in magna erat. Duis non iaculis risus, a malesuada quam. Sed quis commodo sapien. Suspendisse laoreet diam eu interdum tristique. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. + Donec eu nunc quis eros fermentum porta non ut justo. Donec ut tempus sapien. Suspendisse bibendum fermentum eros, id feugiat justo elementum quis. Quisque vel volutpat risus. Aenean pellentesque ultrices consequat. Maecenas luctus, augue vitae ullamcorper vulputate, purus ligula accumsan diam, ut efficitur diam tellus ac nibh. Cras eros ligula, mattis in ex quis, porta efficitur quam. Donec porta, est ut interdum blandit, enim est elementum sapien, quis congue orci dui et nulla. Maecenas vehicula malesuada vehicula. Phasellus sapien ante, semper eu ornare sed, vulputate id nunc. Maecenas in orci mollis, sagittis lorem quis, ultrices metus. Integer molestie tempor augue, pulvinar blandit sapien ultricies eget. + Fusce sed tellus sit amet tortor mollis pellentesque. Nulla tempus sem tellus, vitae tempor ipsum scelerisque eu. Cras tempor, tellus nec pretium egestas, felis massa luctus velit, vitae feugiat nunc velit ac tellus. Maecenas quis nisi diam. Sed pulvinar suscipit nibh sit amet cursus. Ut sem orci, consequat id pretium id, lacinia id nisl. Maecenas id quam at nisi eleifend porta. Vestibulum at ligula arcu. Quisque tincidunt pulvinar egestas. Ut suscipit ornare ligula a fermentum. Morbi ante justo, condimentum ut risus vitae, molestie elementum elit. Curabitur malesuada commodo diam sed ultrices. Vestibulum tincidunt turpis at ultricies fermentum. Morbi ipsum felis, laoreet quis risus id, ornare elementum urna. Morbi ultrices porttitor pulvinar. Maecenas facilisis velit sit amet tellus feugiat iaculis. + metrics: + pings: + unit: + transform: max + period: hour + gaps: zero +` + testPlan2 = ` + metrics: + pongs: + unit: + transform: max + period: hour + gaps: zero +` +) + +type ListPlansCommandSuite struct { + testing.CleanupSuite + mockAPI *mockapi + stub *testing.Stub +} + +var _ = gc.Suite(&ListPlansCommandSuite{}) + +func (s *ListPlansCommandSuite) SetUpTest(c *gc.C) { + s.stub = &testing.Stub{} + s.mockAPI = newMockAPI(s.stub) + s.PatchValue(listplans.NewClient, listplans.APIClientFnc(s.mockAPI)) +} + +func (s *ListPlansCommandSuite) TestGetCommands(c *gc.C) { + tests := []struct { + about string + args []string + err string + resolvedCharmURL string + apiCall []interface{} + }{{ + about: "charm url is resolved", + args: []string{"some-charm-url"}, + resolvedCharmURL: "series/some-charm-url-1", + apiCall: []interface{}{"series/some-charm-url-1"}, + }, { + about: "everything works - default yaml format", + args: []string{"some-charm-url"}, + apiCall: []interface{}{"some-charm-url"}, + }, { + about: "everything works - yaml", + args: []string{"some-charm-url", "--format", "yaml"}, + apiCall: []interface{}{"some-charm-url"}, + }, { + about: "everything works - smart", + args: []string{"some-charm-url", "--format", "smart"}, + apiCall: []interface{}{"some-charm-url"}, + }, { + about: "everything works - json", + args: []string{"some-charm-url", "--format", "json"}, + apiCall: []interface{}{"some-charm-url"}, + }, { + about: "everything works - summary", + args: []string{"some-charm-url", "--format", "summary"}, + apiCall: []interface{}{"some-charm-url"}, + }, { + about: "everything works - tabular", + args: []string{"some-charm-url", "--format", "tabular"}, + apiCall: []interface{}{"some-charm-url"}, + }, { + about: "missing argument", + args: []string{}, + err: `missing arguments`, + apiCall: []interface{}{}, + }, { + about: "unknown arguments", + args: []string{"some-charm-url", "extra", "arguments"}, + err: `unknown command line arguments: extra,arguments`, + apiCall: []interface{}{}, + }, + } + + for i, t := range tests { + c.Logf("Running test %d %s", i, t.about) + s.mockAPI.reset() + + listPlans := &listplans.ListPlansCommand{ + CharmResolver: &mockCharmResolver{ + ResolvedURL: t.resolvedCharmURL, + Stub: s.stub, + }, + } + _, err := cmdtesting.RunCommand(c, listPlans, t.args...) + if t.err != "" { + c.Assert(err, gc.ErrorMatches, t.err) + } else { + c.Assert(err, jc.ErrorIsNil) + s.mockAPI.CheckCall(c, 0, "Resolve", t.args[0]) + s.mockAPI.CheckCall(c, 1, "GetAssociatedPlans", t.apiCall...) + } + listPlans.Close() + } +} + +// mockapi mocks the plan service api +type mockapi struct { + *testing.Stub + api.Client +} + +func newMockAPI(s *testing.Stub) *mockapi { + return &mockapi{Stub: s} +} + +// Get implements the Get function of the api.PlanClient interface. +// TODO (domas) : fix once querying by charm url is in place +func (m *mockapi) GetAssociatedPlans(charmURL string) ([]wireformat.Plan, error) { + m.AddCall("GetAssociatedPlans", charmURL) + p1 := wireformat.Plan{ + URL: "bob/test-plan-1", + Definition: testPlan1, + CreatedOn: time.Date(2015, 0, 0, 0, 0, 0, 0, time.UTC).Format(time.RFC3339), + } + p2 := wireformat.Plan{ + URL: "carol/test-plan-2", + Definition: testPlan2, + CreatedOn: time.Date(2015, 0, 0, 0, 0, 0, 0, time.UTC).Format(time.RFC3339), + } + return []wireformat.Plan{p1, p2}, m.NextErr() +} + +func (m *mockapi) reset() { + m.ResetCalls() +} + +// mockCharmResolver is a mock implementation of cmd.CharmResolver. +type mockCharmResolver struct { + *testing.Stub + ResolvedURL string +} + +// Resolve implements cmd.CharmResolver. +func (r *mockCharmResolver) Resolve(_ *http.Client, charmURL string) (string, error) { + r.AddCall("Resolve", charmURL) + if r.ResolvedURL != "" { + return r.ResolvedURL, r.NextErr() + } + return charmURL, r.NextErr() +} === added file 'src/github.com/juju/romulus/cmd/listplans/package_test.go' --- src/github.com/juju/romulus/cmd/listplans/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/listplans/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package listplans_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/romulus/cmd/resolve.go' --- src/github.com/juju/romulus/cmd/resolve.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/resolve.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,55 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cmd + +import ( + "net/http" + + "github.com/juju/errors" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient" + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +// CharmResolver interface defines the functionality to resolve a charm URL. +type CharmResolver interface { + // Resolve resolves the charm URL. + Resolve(client *http.Client, charmURL string) (string, error) +} + +// CharmStoreResolver implements the CharmResolver interface. +type CharmStoreResolver struct { + csURL string +} + +// NewCharmStoreResolver creates a new charm store resolver. +func NewCharmStoreResolver() *CharmStoreResolver { + return &CharmStoreResolver{ + csURL: csclient.ServerURL, + } +} + +// Resolve implements the CharmResolver interface. +func (r *CharmStoreResolver) Resolve(client *http.Client, charmURL string) (string, error) { + repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ + URL: r.csURL, + HTTPClient: client, + VisitWebPage: httpbakery.OpenWebBrowser, + }) + + curl, err := charm.ParseURL(charmURL) + if err != nil { + return "", errors.Annotate(err, "could not parse charm url") + } + // ignore local charm urls + if curl.Schema == "local" { + return charmURL, nil + } + resolvedURL, _, err := repo.Resolve(curl) + if err != nil { + return "", errors.Trace(err) + } + return resolvedURL.String(), nil +} === added directory 'src/github.com/juju/romulus/cmd/setbudget' === added file 'src/github.com/juju/romulus/cmd/setbudget/export_test.go' --- src/github.com/juju/romulus/cmd/setbudget/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/setbudget/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,18 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package setbudget + +import ( + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +var ( + NewAPIClient = &newAPIClient +) + +func APIClientFnc(api apiClient) func(*httpbakery.Client) (apiClient, error) { + return func(*httpbakery.Client) (apiClient, error) { + return api, nil + } +} === added file 'src/github.com/juju/romulus/cmd/setbudget/package_test.go' --- src/github.com/juju/romulus/cmd/setbudget/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/setbudget/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package setbudget_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/romulus/cmd/setbudget/setbudget.go' --- src/github.com/juju/romulus/cmd/setbudget/setbudget.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/setbudget/setbudget.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,86 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package setbudget + +import ( + "fmt" + "strconv" + + "github.com/juju/cmd" + "github.com/juju/errors" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + api "github.com/juju/romulus/api/budget" + rcmd "github.com/juju/romulus/cmd" +) + +type setBudgetCommand struct { + rcmd.HttpCommand + Name string + Value string +} + +// NewSetBudgetCommand returns a new setBudgetCommand. +func NewSetBudgetCommand() cmd.Command { + return &setBudgetCommand{} +} + +const doc = ` +Set the monthly budget limit. + +Example: + juju set-budget personal 96 + Sets the monthly limit for budget named 'personal' to 96. +` + +// Info implements cmd.Command.Info. +func (c *setBudgetCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "set-budget", + Purpose: "set the budget limit", + Doc: doc, + } +} + +// Init implements cmd.Command.Init. +func (c *setBudgetCommand) Init(args []string) error { + if len(args) < 2 { + return errors.New("name and value required") + } + c.Name, c.Value = args[0], args[1] + if _, err := strconv.ParseInt(c.Value, 10, 32); err != nil { + return errors.New("budget value needs to be a whole number") + } + return cmd.CheckEmpty(args[2:]) +} + +// Run implements cmd.Command.Run and contains most of the setbudget logic. +func (c *setBudgetCommand) Run(ctx *cmd.Context) error { + defer c.Close() + client, err := c.NewClient() + if err != nil { + return errors.Annotate(err, "failed to create an http client") + } + api, err := newAPIClient(client) + if err != nil { + return errors.Annotate(err, "failed to create an api client") + } + resp, err := api.SetBudget(c.Name, c.Value) + if err != nil { + return errors.Annotate(err, "failed to set the budget") + } + fmt.Fprintf(ctx.Stdout, resp) + return nil +} + +var newAPIClient = newAPIClientImpl + +func newAPIClientImpl(c *httpbakery.Client) (apiClient, error) { + client := api.NewClient(c) + return client, nil +} + +type apiClient interface { + SetBudget(string, string) (string, error) +} === added file 'src/github.com/juju/romulus/cmd/setbudget/setbudget_test.go' --- src/github.com/juju/romulus/cmd/setbudget/setbudget_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/setbudget/setbudget_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,92 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package setbudget_test + +import ( + "github.com/juju/cmd/cmdtesting" + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/romulus/cmd/setbudget" +) + +var _ = gc.Suite(&setBudgetSuite{}) + +type setBudgetSuite struct { + testing.CleanupSuite + stub *testing.Stub + mockAPI *mockapi +} + +func (s *setBudgetSuite) SetUpTest(c *gc.C) { + s.stub = &testing.Stub{} + s.mockAPI = newMockAPI(s.stub) + s.PatchValue(setbudget.NewAPIClient, setbudget.APIClientFnc(s.mockAPI)) +} + +func (s *setBudgetSuite) TestSetBudget(c *gc.C) { + s.mockAPI.resp = "name budget set to 5" + set := setbudget.NewSetBudgetCommand() + ctx, err := cmdtesting.RunCommand(c, set, "name", "5") + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, "name budget set to 5") + s.mockAPI.CheckCall(c, 0, "SetBudget", "name", "5") +} + +func (s *setBudgetSuite) TestSetBudgetAPIError(c *gc.C) { + s.stub.SetErrors(errors.New("something failed")) + set := setbudget.NewSetBudgetCommand() + _, err := cmdtesting.RunCommand(c, set, "name", "5") + c.Assert(err, gc.ErrorMatches, "failed to set the budget: something failed") + s.mockAPI.CheckCall(c, 0, "SetBudget", "name", "5") +} + +func (s *setBudgetSuite) TestSetBudgetErrors(c *gc.C) { + tests := []struct { + about string + args []string + expectedError string + }{ + { + about: "value needs to be a number", + args: []string{"name", "badvalue"}, + expectedError: "budget value needs to be a whole number", + }, + { + about: "value is missing", + args: []string{"name"}, + expectedError: "name and value required", + }, + { + about: "no args", + args: []string{}, + expectedError: "name and value required", + }, + } + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + s.stub.SetErrors(errors.New(test.expectedError)) + defer s.mockAPI.ResetCalls() + set := setbudget.NewSetBudgetCommand() + _, err := cmdtesting.RunCommand(c, set, test.args...) + c.Assert(err, gc.ErrorMatches, test.expectedError) + s.mockAPI.CheckNoCalls(c) + } +} + +func newMockAPI(s *testing.Stub) *mockapi { + return &mockapi{Stub: s} +} + +type mockapi struct { + *testing.Stub + resp string +} + +func (api *mockapi) SetBudget(name, value string) (string, error) { + api.MethodCall(api, "SetBudget", name, value) + return api.resp, api.NextErr() +} === added directory 'src/github.com/juju/romulus/cmd/setplan' === added file 'src/github.com/juju/romulus/cmd/setplan/export_test.go' --- src/github.com/juju/romulus/cmd/setplan/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/setplan/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,20 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package setplan + +import ( + api "github.com/juju/romulus/api/plan" +) + +var ( + NewAuthorizationClient = &newAuthorizationClient +) + +// APIClientFnc returns a function that returns the provided apiClient +// and can be used to patch the NewAPIClient variable for tests. +func APIClientFnc(client authorizationClient) func(...api.ClientOption) (authorizationClient, error) { + return func(...api.ClientOption) (authorizationClient, error) { + return client, nil + } +} === added file 'src/github.com/juju/romulus/cmd/setplan/set_plan.go' --- src/github.com/juju/romulus/cmd/setplan/set_plan.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/setplan/set_plan.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,151 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// The setplan package contains the implementation of the juju set-plan +// command. +package setplan + +import ( + "encoding/json" + "net/url" + + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/juju/api/service" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/names" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon.v1" + "launchpad.net/gnuflag" + + api "github.com/juju/romulus/api/plan" + rcmd "github.com/juju/romulus/cmd" +) + +// authorizationClient defines the interface of an api client that +// the comand uses to create an authorization macaroon. +type authorizationClient interface { + // Authorize returns the authorization macaroon for the specified environment, + // charm url, service name and plan. + Authorize(environmentUUID, charmURL, serviceName, plan string, visitWebPage func(*url.URL) error) (*macaroon.Macaroon, error) +} + +var newAuthorizationClient = func(options ...api.ClientOption) (authorizationClient, error) { + return api.NewAuthorizationClient(options...) +} + +// NewSetPlanCommand returns a new command that is used to set metric credentials for a +// deployed service. +func NewSetPlanCommand() cmd.Command { + return modelcmd.Wrap(&setPlanCommand{}) +} + +// setPlanCommand is a command-line tool for setting +// Service.MetricCredential for development & demonstration purposes. +type setPlanCommand struct { + modelcmd.ModelCommandBase + rcmd.HttpCommand + + Service string + Plan string +} + +// Info implements cmd.Command. +func (c *setPlanCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "set-plan", + Args: " ", + Purpose: "set the plan for a service", + Doc: ` +Set the plan for the deployed service, effective immediately. + +The specified plan name must be a valid plan that is offered for this particular charm. Use "juju list-plans " for more information. + +Usage: + + juju set-plan [options] + +Example: + + juju set-plan myapp example/uptime +`, + } +} + +// SetFlags implements cmd.Command. +func (c *setPlanCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) +} + +// Init implements cmd.Command. +func (c *setPlanCommand) Init(args []string) error { + if len(args) < 2 { + return errors.New("need to specify plan uuid and service name") + } + + serviceName := args[0] + if !names.IsValidService(serviceName) { + return errors.Errorf("invalid service name %q", serviceName) + } + + c.Plan = args[1] + c.Service = serviceName + + return c.ModelCommandBase.Init(args[2:]) +} + +// IsSuperCommand implements cmd.Command. +// Defined here because of ambiguity between HttpCommand and ModelCommandBase. +func (c *setPlanCommand) IsSuperCommand() bool { return false } + +// AllowInterspersed implements cmd.Command. +// Defined here because of ambiguity between HttpCommand and ModelCommandBase. +func (c *setPlanCommand) AllowInterspersedFlags() bool { return true } + +func (c *setPlanCommand) requestMetricCredentials() ([]byte, error) { + root, err := c.NewAPIRoot() + if err != nil { + return nil, errors.Trace(err) + } + jclient := service.NewClient(root) + envUUID := jclient.ModelUUID() + charmURL, err := jclient.GetCharmURL(c.Service) + if err != nil { + return nil, errors.Trace(err) + } + + hc, err := c.NewClient() + if err != nil { + return nil, errors.Trace(err) + } + client, err := newAuthorizationClient(api.HTTPClient(hc)) + if err != nil { + return nil, errors.Trace(err) + } + m, err := client.Authorize(envUUID, charmURL.String(), c.Service, c.Plan, httpbakery.OpenWebBrowser) + if err != nil { + return nil, errors.Trace(err) + } + ms := macaroon.Slice{m} + return json.Marshal(ms) +} + +// Run implements cmd.Command. +func (c *setPlanCommand) Run(ctx *cmd.Context) error { + credentials, err := c.requestMetricCredentials() + if err != nil { + return errors.Trace(err) + } + + root, err := c.NewAPIRoot() + if err != nil { + return errors.Trace(err) + } + api := service.NewClient(root) + + err = api.SetMetricCredentials(c.Service, credentials) + if err != nil { + return errors.Trace(err) + } + return nil +} === added file 'src/github.com/juju/romulus/cmd/setplan/set_plan_test.go' --- src/github.com/juju/romulus/cmd/setplan/set_plan_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/setplan/set_plan_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,168 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package setplan_test + +import ( + "encoding/json" + "fmt" + "net/url" + stdtesting "testing" + + "github.com/juju/cmd/cmdtesting" + "github.com/juju/errors" + jjjtesting "github.com/juju/juju/juju/testing" + "github.com/juju/juju/testcharms" + jjtesting "github.com/juju/juju/testing" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon.v1" + + "github.com/juju/romulus/cmd/setplan" +) + +func TestPackage(t *stdtesting.T) { + jjtesting.MgoTestPackage(t) +} + +var _ = gc.Suite(&setPlanCommandSuite{}) + +type setPlanCommandSuite struct { + jjjtesting.JujuConnSuite + + mockAPI *mockapi + charmURL string +} + +func (s *setPlanCommandSuite) SetUpTest(c *gc.C) { + s.JujuConnSuite.SetUpTest(c) + + ch := testcharms.Repo.CharmDir("dummy") + curl := charm.MustParseURL( + fmt.Sprintf("local:quantal/%s-%d", ch.Meta().Name, ch.Revision()), + ) + s.charmURL = curl.String() + dummyCharm, err := s.State.AddCharm(ch, curl, "dummy-path", "dummy-1") + c.Assert(err, jc.ErrorIsNil) + s.AddTestingService(c, "mysql", dummyCharm) + + mockAPI, err := newMockAPI() + c.Assert(err, jc.ErrorIsNil) + s.mockAPI = mockAPI + + s.PatchValue(setplan.NewAuthorizationClient, setplan.APIClientFnc(s.mockAPI)) +} + +func (s setPlanCommandSuite) TestSetPlanCommand(c *gc.C) { + tests := []struct { + about string + plan string + service string + err string + apiErr error + apiCalls []testing.StubCall + }{{ + about: "all is well", + plan: "bob/default", + service: "mysql", + apiCalls: []testing.StubCall{{ + FuncName: "Authorize", + Args: []interface{}{ + s.State.ModelUUID(), + s.charmURL, + "mysql", + }, + }}, + }, { + about: "invalid service name", + plan: "bob/default", + service: "mysql-0", + err: "invalid service name \"mysql-0\"", + }, { + about: "unknown service", + plan: "bob/default", + service: "wordpress", + err: "service \"wordpress\" not found.*", + }, { + about: "unknown service", + plan: "bob/default", + service: "mysql", + apiErr: errors.New("some strange error"), + err: "some strange error", + }, + } + for i, test := range tests { + c.Logf("running test %d: %v", i, test.about) + s.mockAPI.ResetCalls() + if test.apiErr != nil { + s.mockAPI.SetErrors(test.apiErr) + } + _, err := cmdtesting.RunCommand(c, setplan.NewSetPlanCommand(), test.service, test.plan) + if test.err == "" { + c.Assert(err, jc.ErrorIsNil) + c.Assert(s.mockAPI.Calls(), gc.HasLen, 1) + s.mockAPI.CheckCalls(c, test.apiCalls) + + svc, err := s.State.Service("mysql") + c.Assert(err, jc.ErrorIsNil) + svcMacaroon := svc.MetricCredentials() + data, err := json.Marshal(macaroon.Slice{s.mockAPI.macaroon}) + c.Assert(err, jc.ErrorIsNil) + c.Assert(svcMacaroon, gc.DeepEquals, data) + } else { + c.Assert(err, gc.ErrorMatches, test.err) + c.Assert(s.mockAPI.Calls(), gc.HasLen, 0) + } + } +} + +func newMockAPI() (*mockapi, error) { + kp, err := bakery.GenerateKey() + if err != nil { + return nil, errors.Trace(err) + } + svc, err := bakery.NewService(bakery.NewServiceParams{ + Location: "omnibus", + Key: kp, + }) + if err != nil { + return nil, errors.Trace(err) + } + return &mockapi{ + service: svc, + }, nil +} + +type mockapi struct { + testing.Stub + + service *bakery.Service + macaroon *macaroon.Macaroon +} + +func (m *mockapi) Authorize(environmentUUID, charmURL, serviceName, plan string, visitWebPage func(*url.URL) error) (*macaroon.Macaroon, error) { + err := m.NextErr() + if err != nil { + return nil, errors.Trace(err) + } + m.AddCall("Authorize", environmentUUID, charmURL, serviceName) + macaroon, err := m.service.NewMacaroon( + "", + nil, + []checkers.Caveat{ + checkers.DeclaredCaveat("environment", environmentUUID), + checkers.DeclaredCaveat("charm", charmURL), + checkers.DeclaredCaveat("service", serviceName), + checkers.DeclaredCaveat("plan", plan), + }, + ) + if err != nil { + return nil, errors.Trace(err) + } + m.macaroon = macaroon + return m.macaroon, nil +} === added directory 'src/github.com/juju/romulus/cmd/showbudget' === added file 'src/github.com/juju/romulus/cmd/showbudget/export_test.go' --- src/github.com/juju/romulus/cmd/showbudget/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/showbudget/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,20 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package showbudget + +import ( + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +var ( + NewAPIClient = &newAPIClient +) + +// APIClientFnc returns a function that returns the provided apiClient +// and can be used to patch the NewAPIClient variable for tests. +func APIClientFnc(api apiClient) func(*httpbakery.Client) (apiClient, error) { + return func(*httpbakery.Client) (apiClient, error) { + return api, nil + } +} === added file 'src/github.com/juju/romulus/cmd/showbudget/package_test.go' --- src/github.com/juju/romulus/cmd/showbudget/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/showbudget/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package showbudget_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/romulus/cmd/showbudget/show_budget.go' --- src/github.com/juju/romulus/cmd/showbudget/show_budget.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/showbudget/show_budget.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,135 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package showbudget + +import ( + "sort" + + "github.com/gosuri/uitable" + "github.com/juju/cmd" + "github.com/juju/errors" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "launchpad.net/gnuflag" + + api "github.com/juju/romulus/api/budget" + rcmd "github.com/juju/romulus/cmd" + wireformat "github.com/juju/romulus/wireformat/budget" +) + +// NewShowBudgetCommand returns a new command that is used +// to show details of the specified wireformat. +func NewShowBudgetCommand() cmd.Command { + return &showBudgetCommand{} +} + +type showBudgetCommand struct { + rcmd.HttpCommand + + out cmd.Output + budget string +} + +const showBudgetDoc = ` +Display budget usage information. + +Example: + juju show-budget personal +` + +// Info implements cmd.Command.Info. +func (c *showBudgetCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "show-budget", + Purpose: "show budget usage", + Doc: showBudgetDoc, + } +} + +// Init implements cmd.Command.Init. +func (c *showBudgetCommand) Init(args []string) error { + if len(args) < 1 { + return errors.New("missing arguments") + } + c.budget, args = args[0], args[1:] + + return cmd.CheckEmpty(args) +} + +// SetFlags implements cmd.Command.SetFlags. +func (c *showBudgetCommand) SetFlags(f *gnuflag.FlagSet) { + c.out.AddFlags(f, "tabular", map[string]cmd.Formatter{ + "tabular": formatTabular, + }) +} + +func (c *showBudgetCommand) Run(ctx *cmd.Context) error { + defer c.Close() + client, err := c.NewClient() + if err != nil { + return errors.Annotate(err, "failed to create an http client") + } + api, err := newAPIClient(client) + if err != nil { + return errors.Annotate(err, "failed to create an api client") + } + budget, err := api.GetBudget(c.budget) + if err != nil { + return errors.Annotate(err, "failed to retrieve the budget") + } + err = c.out.Write(ctx, budget) + if err != nil { + return errors.Trace(err) + } + return nil +} + +// formatTabular returns a tabular view of available budgets. +func formatTabular(value interface{}) ([]byte, error) { + b, ok := value.(*wireformat.BudgetWithAllocations) + if !ok { + return nil, errors.Errorf("expected value of type %T, got %T", b, value) + } + + table := uitable.New() + table.MaxColWidth = 50 + table.Wrap = true + + table.AddRow("MODEL", "SERVICES", "SPENT", "ALLOCATED BY", "USAGE") + for _, allocation := range b.Allocations { + firstLine := true + // We'll sort the service names to avoid nondeterministic + // command output. + services := make([]string, 0, len(allocation.Services)) + for serviceName, _ := range allocation.Services { + services = append(services, serviceName) + } + sort.Strings(services) + for _, serviceName := range services { + service, _ := allocation.Services[serviceName] + if firstLine { + table.AddRow(allocation.Model, serviceName, service.Consumed, allocation.Owner, allocation.Usage) + firstLine = false + continue + } + table.AddRow("", serviceName, service.Consumed, "", "") + } + + } + table.AddRow("", "", "", "", "") + table.AddRow("TOTAL", "", b.Total.Consumed, b.Total.Allocated, b.Total.Usage) + table.AddRow("BUDGET", "", "", b.Limit, "") + table.AddRow("UNALLOCATED", "", "", b.Total.Unallocated, "") + return []byte(table.String()), nil +} + +var newAPIClient = newAPIClientImpl + +func newAPIClientImpl(c *httpbakery.Client) (apiClient, error) { + client := api.NewClient(c) + return client, nil +} + +type apiClient interface { + GetBudget(string) (*wireformat.BudgetWithAllocations, error) +} === added file 'src/github.com/juju/romulus/cmd/showbudget/show_budget_test.go' --- src/github.com/juju/romulus/cmd/showbudget/show_budget_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/showbudget/show_budget_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,123 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details.s + +package showbudget_test + +import ( + "github.com/juju/cmd/cmdtesting" + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/romulus/cmd/showbudget" + "github.com/juju/romulus/wireformat/budget" +) + +var _ = gc.Suite(&showBudgetSuite{}) + +type showBudgetSuite struct { + testing.CleanupSuite + stub *testing.Stub + mockAPI *mockapi +} + +func (s *showBudgetSuite) SetUpTest(c *gc.C) { + s.stub = &testing.Stub{} + s.mockAPI = &mockapi{s.stub} + s.PatchValue(showbudget.NewAPIClient, showbudget.APIClientFnc(s.mockAPI)) +} + +func (s *showBudgetSuite) TestShowBudgetCommand(c *gc.C) { + tests := []struct { + about string + args []string + err string + budget string + apierr string + }{{ + about: "missing argument", + err: `missing arguments`, + }, { + about: "unknown arguments", + args: []string{"my-special-budget", "extra", "arguments"}, + err: `unrecognized args: \["extra" "arguments"\]`, + }, { + about: "api error", + args: []string{"personal"}, + apierr: "well, this is embarrassing", + err: "failed to retrieve the budget: well, this is embarrassing", + }, { + about: "all ok", + args: []string{"personal"}, + budget: "personal", + }, + } + + for i, test := range tests { + c.Logf("running test %d: %v", i, test.about) + s.mockAPI.ResetCalls() + + if test.apierr != "" { + s.mockAPI.SetErrors(errors.New(test.apierr)) + } + + showBudget := showbudget.NewShowBudgetCommand() + + _, err := cmdtesting.RunCommand(c, showBudget, test.args...) + if test.err == "" { + c.Assert(err, jc.ErrorIsNil) + s.stub.CheckCalls(c, []testing.StubCall{{"GetBudget", []interface{}{test.budget}}}) + } else { + c.Assert(err, gc.ErrorMatches, test.err) + } + } +} + +type mockapi struct { + *testing.Stub +} + +func (api *mockapi) GetBudget(name string) (*budget.BudgetWithAllocations, error) { + api.AddCall("GetBudget", name) + if err := api.NextErr(); err != nil { + return nil, err + } + return &budget.BudgetWithAllocations{ + Limit: "4000.00", + Total: budget.BudgetTotals{ + Allocated: "2200.00", + Unallocated: "1800.00", + Available: "1100,00", + Consumed: "1100.0", + Usage: "50%", + }, + Allocations: []budget.Allocation{{ + Owner: "user.joe", + Limit: "1200.00", + Consumed: "500.00", + Usage: "42%", + Model: "model.joe", + Services: map[string]budget.ServiceAllocation{ + "wordpress": budget.ServiceAllocation{ + Consumed: "300.00", + }, + "mysql": budget.ServiceAllocation{ + Consumed: "200.00", + }, + }, + }, { + Owner: "user.jess", + Limit: "1000.00", + Consumed: "600.00", + Usage: "60%", + Model: "model.jess", + Services: map[string]budget.ServiceAllocation{ + "landscape": budget.ServiceAllocation{ + Consumed: "600.00", + }, + }, + }, + }, + }, nil +} === added directory 'src/github.com/juju/romulus/cmd/updateallocation' === added file 'src/github.com/juju/romulus/cmd/updateallocation/export_test.go' --- src/github.com/juju/romulus/cmd/updateallocation/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/updateallocation/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package updateallocation + +import ( + "github.com/juju/cmd" + "github.com/juju/juju/cmd/modelcmd" + "github.com/juju/juju/jujuclient" +) + +func NewUpdateAllocateCommandForTest(api apiClient, store jujuclient.ClientStore) cmd.Command { + c := &updateAllocationCommand{api: api} + c.SetClientStore(store) + return modelcmd.Wrap(c) +} === added file 'src/github.com/juju/romulus/cmd/updateallocation/package_test.go' --- src/github.com/juju/romulus/cmd/updateallocation/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/updateallocation/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package updateallocation_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestAll(t *stdtesting.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/romulus/cmd/updateallocation/updateallocation.go' --- src/github.com/juju/romulus/cmd/updateallocation/updateallocation.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/updateallocation/updateallocation.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,116 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package updateallocation defines the command used to update allocations. +package updateallocation + +import ( + "fmt" + "strconv" + + "github.com/juju/cmd" + "github.com/juju/errors" + "github.com/juju/juju/cmd/modelcmd" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "launchpad.net/gnuflag" + + api "github.com/juju/romulus/api/budget" + rcmd "github.com/juju/romulus/cmd" +) + +type updateAllocationCommand struct { + modelcmd.ModelCommandBase + api apiClient + rcmd.HttpCommand + Name string + Value string +} + +// NewUpdateAllocationCommand returns a new updateAllocationCommand. +func NewUpdateAllocationCommand() cmd.Command { + return modelcmd.Wrap(&updateAllocationCommand{}) +} + +func (c *updateAllocationCommand) newAPIClient(bakery *httpbakery.Client) (apiClient, error) { + if c.api != nil { + return c.api, nil + } + c.api = api.NewClient(bakery) + return c.api, nil +} + +type apiClient interface { + UpdateAllocation(string, string, string) (string, error) +} + +const doc = ` +Updates an existing allocation on a service. + +Example: + juju update-allocation wordpress 10 + Sets the allocation for the wordpress service to 10. +` + +// Info implements cmd.Command.Info. +func (c *updateAllocationCommand) Info() *cmd.Info { + return &cmd.Info{ + Name: "update-allocation", + Purpose: "update an allocation", + Doc: doc, + } +} + +// SetFlags implements cmd.Command. +func (c *updateAllocationCommand) SetFlags(f *gnuflag.FlagSet) { + c.ModelCommandBase.SetFlags(f) +} + +// AllowInterspersed implements cmd.Command. +func (c *updateAllocationCommand) AllowInterspersedFlags() bool { return true } + +// IsSuperCommand implements cmd.Command. +// Defined here because of ambiguity between HttpCommand and ModelCommandBase. +func (c *updateAllocationCommand) IsSuperCommand() bool { return false } + +// Init implements cmd.Command.Init. +func (c *updateAllocationCommand) Init(args []string) error { + if len(args) < 2 { + return errors.New("service and value required") + } + c.Name, c.Value = args[0], args[1] + if _, err := strconv.ParseInt(c.Value, 10, 32); err != nil { + return errors.New("value needs to be a whole number") + } + return cmd.CheckEmpty(args[2:]) +} + +func (c *updateAllocationCommand) modelUUID() (string, error) { + model, err := c.ClientStore().ModelByName(c.ControllerName(), c.AccountName(), c.ModelName()) + if err != nil { + return "", errors.Trace(err) + } + return model.ModelUUID, nil +} + +// Run implements cmd.Command.Run and contains most of the setbudget logic. +func (c *updateAllocationCommand) Run(ctx *cmd.Context) error { + defer c.Close() + modelUUID, err := c.modelUUID() + if err != nil { + return errors.Annotate(err, "failed to get model uuid") + } + client, err := c.NewClient() + if err != nil { + return errors.Annotate(err, "failed to create an http client") + } + api, err := c.newAPIClient(client) + if err != nil { + return errors.Annotate(err, "failed to create an api client") + } + resp, err := api.UpdateAllocation(modelUUID, c.Name, c.Value) + if err != nil { + return errors.Annotate(err, "failed to update the allocation") + } + fmt.Fprint(ctx.Stdout, resp) + return nil +} === added file 'src/github.com/juju/romulus/cmd/updateallocation/updateallocation_test.go' --- src/github.com/juju/romulus/cmd/updateallocation/updateallocation_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/cmd/updateallocation/updateallocation_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,125 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package updateallocation_test + +import ( + "github.com/juju/cmd" + "github.com/juju/cmd/cmdtesting" + "github.com/juju/errors" + "github.com/juju/juju/jujuclient" + "github.com/juju/juju/jujuclient/jujuclienttesting" + "github.com/juju/testing" + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/romulus/cmd/updateallocation" +) + +var _ = gc.Suite(&updateAllocationSuite{}) + +type updateAllocationSuite struct { + jujutesting.FakeHomeSuite + stub *testing.Stub + mockAPI *mockapi + store jujuclient.ClientStore +} + +func (s *updateAllocationSuite) SetUpTest(c *gc.C) { + s.FakeHomeSuite.SetUpTest(c) + s.store = &jujuclienttesting.MemStore{ + Controllers: map[string]jujuclient.ControllerDetails{ + "controller": {}, + }, + Models: map[string]jujuclient.ControllerAccountModels{ + "controller": { + AccountModels: map[string]*jujuclient.AccountModels{ + "admin@local": { + Models: map[string]jujuclient.ModelDetails{ + "model": {"model-uuid"}, + }, + CurrentModel: "model", + }, + }, + }, + }, + Accounts: map[string]*jujuclient.ControllerAccounts{ + "controller": { + Accounts: map[string]jujuclient.AccountDetails{ + "admin@local": {}, + }, + CurrentAccount: "admin@local", + }, + }, + } + s.stub = &testing.Stub{} + s.mockAPI = newMockAPI(s.stub) +} + +func (s *updateAllocationSuite) run(c *gc.C, args ...string) (*cmd.Context, error) { + updateAlloc := updateallocation.NewUpdateAllocateCommandForTest(s.mockAPI, s.store) + a := []string{"-m", "controller:model"} + a = append(a, args...) + return cmdtesting.RunCommand(c, updateAlloc, a...) +} + +func (s *updateAllocationSuite) TestUpdateAllocation(c *gc.C) { + s.mockAPI.resp = "name budget set to 5" + ctx, err := s.run(c, "name", "5") + c.Assert(err, jc.ErrorIsNil) + c.Assert(cmdtesting.Stdout(ctx), jc.DeepEquals, "name budget set to 5") + s.mockAPI.CheckCall(c, 0, "UpdateAllocation", "model-uuid", "name", "5") +} + +func (s *updateAllocationSuite) TestUpdateAllocationAPIError(c *gc.C) { + s.stub.SetErrors(errors.New("something failed")) + _, err := s.run(c, "name", "5") + c.Assert(err, gc.ErrorMatches, "failed to update the allocation: something failed") + s.mockAPI.CheckCall(c, 0, "UpdateAllocation", "model-uuid", "name", "5") +} + +func (s *updateAllocationSuite) TestUpdateAllocationErrors(c *gc.C) { + tests := []struct { + about string + args []string + expectedError string + }{ + { + about: "value needs to be a number", + args: []string{"name", "badvalue"}, + expectedError: "value needs to be a whole number", + }, + { + about: "value is missing", + args: []string{"name"}, + expectedError: "service and value required", + }, + { + about: "no args", + args: []string{}, + expectedError: "service and value required", + }, + } + for i, test := range tests { + s.mockAPI.ResetCalls() + c.Logf("test %d: %s", i, test.about) + _, err := s.run(c, test.args...) + c.Check(err, gc.ErrorMatches, test.expectedError) + s.mockAPI.CheckNoCalls(c) + } +} + +func newMockAPI(s *testing.Stub) *mockapi { + return &mockapi{Stub: s} +} + +type mockapi struct { + *testing.Stub + resp string +} + +func (api *mockapi) UpdateAllocation(modelUUID, name, value string) (string, error) { + api.MethodCall(api, "UpdateAllocation", modelUUID, name, value) + return api.resp, api.NextErr() +} === added file 'src/github.com/juju/romulus/dependencies.tsv' --- src/github.com/juju/romulus/dependencies.tsv 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/dependencies.tsv 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +github.com/bmizerany/pat git 48be7df2c27e1cec821a3284a683ce6ef90d9052 2014-04-29T04:34:05Z +github.com/coreos/go-systemd git 2d21675230a81a503f4363f4aa3490af06d52bb8 2015-01-26T19:09:17Z +github.com/dustin/go-humanize git 145fabdb1ab757076a70a886d092a3af27f66f4c 2014-12-28T07:11:48Z +github.com/godbus/dbus git 88765d85c0fdadcd98a54e30694fa4e4f5b51133 2015-01-22T18:02:51Z +github.com/gosuri/uitable git cacfc559e8712a81692496c5147c80aced020e51 2015-12-16T01:20:41Z +github.com/juju/bundlechanges git ad533f529b3b4a8bb2b76bf6213f1ef6b68df392 2016-01-06T16:13:59Z +github.com/juju/cmd git 33554d631f79b840d76673665b292215882ba90a 2015-10-28T03:05:29Z +github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z +github.com/juju/go4 git 40d72ab9641a2a8c36a9c46a51e28367115c8e59 2016-02-22T16:32:58Z +github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z +github.com/juju/gojsonreference git f0d24ac5ee330baa21721cdff56d45e4ee42628e 2015-02-04T19:46:33Z +github.com/juju/gojsonschema git e1ad140384f254c82f89450d9a7c8dd38a632838 2015-03-12T17:00:16Z +github.com/juju/httpprof git 14bf14c307672fd2456bdbf35d19cf0ccd3cf565 2014-12-17T16:00:36Z +github.com/juju/httprequest git 1015665b66c26101695f2f51407b3b1e000176fd 2015-10-07T14:02:54Z +github.com/juju/juju git 9522d3b7996a35797790559153830951476c8261 2016-02-24T07:01:01Z +github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z +github.com/juju/names git 0fa6e46604f432b3064b02b2160fc5e6056131b8 2016-01-22T05:59:08Z +github.com/juju/persistent-cookiejar git fa866b33e350f6357a566d1b4f1c09c5493cfbdb 2016-02-22T17:00:27Z +github.com/juju/replicaset git fb7294cf57a1e2f08a57691f1246d129a87ab7e8 2015-05-08T02:21:43Z +github.com/juju/retry git 62c62032529169c7ec02fa48f93349604c345e1f 2015-10-29T02:48:21Z +github.com/juju/schema git 47d9b102199f4c67e5bfb28eb07bbe39411754fe 2016-01-19T21:19:26Z +github.com/juju/testing git 321edad6b2d1ccac4af9ee05c25b8ad734d40546 2016-02-03T23:31:10Z +github.com/juju/txn git 99ec629d0066a4d73c54d8e021a7fc1dc07df614 2015-06-09T16:58:27Z +github.com/juju/utils git 93acdddf8455dcb95aa63f2e2f707e5f8c199754 2016-01-21T06:28:51Z +github.com/julienschmidt/httprouter git 109e267447e95ad1bb48b758e40dd7453eb7b039 2015-09-05T17:25:33Z +github.com/mattn/go-runewidth git d96d1bd051f2bd9e7e43d602782b37b93b1b5666 2015-11-18T07:21:59Z +golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z +golang.org/x/net git ea47fc708ee3e20177f3ca3716217c4ab75942cb 2015-08-29T23:03:18Z +gopkg.in/check.v1 git b3d3430320d4260e5fea99841af984b3badcea63 2015-06-26T10:50:28Z +gopkg.in/errgo.v1 git 66cb46252b94c1f3d65646f54ee8043ab38d766c 2015-10-07T15:31:57Z +gopkg.in/juju/blobstore.v2 git 51fa6e26128d74e445c72d3a91af555151cc3654 2016-01-25T02:37:03Z +gopkg.in/juju/charm.v6-unstable git a53382ef6e1940abf20f1150bd203d0d57ac1a44 2016-02-09T19:49:50Z +gopkg.in/juju/charmrepo.v2-unstable git b17697d8bb60cdac7d8ffd61e1357c9977cc2096 2015-11-30T13:55:09Z +gopkg.in/juju/environschema.v1 git 7bea6a9a531586600a7741e9bdd5e3c978ffda15 2015-10-20T16:12:31Z +gopkg.in/macaroon-bakery.v1 git 7b63aca524cc3f7b1ad0171e54cb78b33ce1e747 2015-12-01T10:11:23Z +gopkg.in/macaroon.v1 git ab3940c6c16510a850e1c2dd628b919f0f3f1464 2015-01-21T11:42:31Z +gopkg.in/mgo.v2 git 4d04138ffef2791c479c0c8bbffc30b34081b8d9 2015-10-26T16:34:53Z +gopkg.in/natefinch/lumberjack.v2 git 588a21fb0fa0ebdfde42670fa214576b6f0f22df 2015-05-21T01:59:18Z +gopkg.in/natefinch/npipe.v2 git e562d4ae5c2f838f9e7e406f7d9890d5b02467a9 2014-08-11T16:19:00Z +gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z +gopkg.in/yaml.v2 git 53feefa2559fb8dfa8d81baad31be332c97d6c77 2015-09-24T14:23:14Z +launchpad.net/gnuflag bzr roger.peppe@canonical.com-20140716064605-pk32dnmfust02yab 13 +launchpad.net/tomb bzr gustavo@niemeyer.net-20140529072043-hzcrlnl3ygvg914q 18 === added directory 'src/github.com/juju/romulus/wireformat' === added directory 'src/github.com/juju/romulus/wireformat/budget' === added file 'src/github.com/juju/romulus/wireformat/budget/entities.go' --- src/github.com/juju/romulus/wireformat/budget/entities.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/wireformat/budget/entities.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,72 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// The budget package contains definitions of wireformats used by +// the budget service clients. +package budget + +import ( + "strings" +) + +// BudgetWithAllocations represents the current state of the budget and its allocations. +type BudgetWithAllocations struct { + Limit string `json:"limit, omitempty"` + Total BudgetTotals `json:"total"` + Allocations []Allocation `json:"allocations, omitempty"` +} + +type BudgetTotals struct { + Limit string `json:"limit, omitempty"` + Allocated string `json:"allocated"` + Available string `json:"available"` + Unallocated string `json:"unallocated"` + Usage string `json:"usage"` + Consumed string `json:"consumed"` +} + +// Allocation represents the amount the user has allocated to specific +// services in a named model. +type Allocation struct { + Owner string `json:"owner"` + Limit string `json:"limit"` + Consumed string `json:"consumed"` + Usage string `json:"usage"` + Model string `json:"model"` + Services map[string]ServiceAllocation `json:"services"` +} + +// ServiceAllocation represents the amount the user +// has allocated to a specific service. +type ServiceAllocation struct { + Consumed string `json:"consumed"` +} + +// ListBudgetsResponse is returned by the ListBdugets API call. +type ListBudgetsResponse struct { + Budgets BudgetSummaries `json:"budgets, omitempty"` + Total BudgetTotals `json:"total, omitempty"` + Credit string `json:"credit, omitempty"` +} + +// BudgetSummaries is an alphabetically sorted list of budget summaries. +type BudgetSummaries []BudgetSummary + +// Implement sort.Interface. +func (b BudgetSummaries) Len() int { return len(b) } +func (b BudgetSummaries) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b BudgetSummaries) Less(i, j int) bool { + return strings.ToLower(b[i].Budget) < strings.ToLower(b[j].Budget) +} + +// BudgetSummary represents the summary information for a single budget in +// the ListBudgetsResponse structure. +type BudgetSummary struct { + Owner string `json:"owner"` + Budget string `json:"budget"` + Limit string `json:"limit"` + Allocated string `json:"allocated"` + Unallocated string `json:"unallocated"` + Available string `json:"available"` + Consumed string `json:"consumed"` +} === added file 'src/github.com/juju/romulus/wireformat/budget/requests.go' --- src/github.com/juju/romulus/wireformat/budget/requests.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/wireformat/budget/requests.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,156 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package budget + +import ( + "fmt" + "net/http" +) + +var baseURL = "https://api.jujucharms.com/omnibus/v2" + +// CreateBudgetRequest is used in the requests to the budget service +// for creating the specified budget. +type CreateBudgetRequest struct { + Budget string `json:"budget"` + Limit string `json:"limit"` +} + +// Method returns the http method used for this request. +func (CreateBudgetRequest) Method() string { return "POST" } + +// Body returns the body of the request. +func (c CreateBudgetRequest) Body() interface{} { + return c +} + +// URL returns the URL of the request. +func (CreateBudgetRequest) URL() string { + return fmt.Sprintf("%s/budget", baseURL) +} + +// ListBudgetsRequest defines a request to the budgets service +// to list a user's budgets. +type ListBudgetsRequest struct{} + +// Method returns the method of the request. +func (ListBudgetsRequest) Method() string { return "GET" } + +// URL returns the URL of the request. +func (ListBudgetsRequest) URL() string { + return fmt.Sprintf("%s/budget", baseURL) +} + +// SetBudgetRequest defines a request that updates the limit of +// a budget. +type SetBudgetRequest struct { + Budget string `json:"-"` + Limit string `json:"limit"` +} + +// Method returns the method of the request. +func (SetBudgetRequest) Method() string { return "PUT" } + +// Body returns the request body. +func (r SetBudgetRequest) Body() interface{} { return r } + +// URL returns the URL for the request. +func (r SetBudgetRequest) URL() string { + return fmt.Sprintf("%s/budget/%s", baseURL, r.Budget) +} + +// GetBudgetRequest defines a request that retrieves a specific budget. +type GetBudgetRequest struct { + Budget string +} + +// URL returns the URL for the request. +func (r GetBudgetRequest) URL() string { + return fmt.Sprintf("%s/budget/%s", baseURL, r.Budget) +} + +// Method returns the method for the request. +func (GetBudgetRequest) Method() string { return "GET" } + +// CreateAllocationRequest defines a request to create an allocation in the specified budget. +type CreateAllocationRequest struct { + Model string `json:"model"` + Services []string `json:"services"` + Limit string `json:"limit"` + Budget string `json:"-"` +} + +// URL returns the URL for the request. +func (r CreateAllocationRequest) URL() string { + return fmt.Sprintf("%s/budget/%s/allocation", baseURL, r.Budget) +} + +// Method returns the method for the request. +func (CreateAllocationRequest) Method() string { return "POST" } + +// Body returns the request body. +func (r CreateAllocationRequest) Body() interface{} { return r } + +// UpdateAllocationRequest defines a request to update an allocation +// associated with a service. +type UpdateAllocationRequest struct { + Model string `json:"-"` + Service string `json:"-"` + Limit string `json:"limit"` +} + +// URL returns the URL for the request. +func (r UpdateAllocationRequest) URL() string { + return fmt.Sprintf("%s/environment/%s/service/%s/allocation", baseURL, r.Model, r.Service) +} + +// Method returns the method for the request. +func (UpdateAllocationRequest) Method() string { return "PUT" } + +// Body returns the request body. +func (r UpdateAllocationRequest) Body() interface{} { return r } + +// DeleteAllocationRequwest defines a request that removes an allocation associated +// with a service. +type DeleteAllocationRequest struct { + Model string `json:"-"` + Service string `json:"-"` +} + +// URL returns the URL for the request. +func (r DeleteAllocationRequest) URL() string { + return fmt.Sprintf("%s/environment/%s/service/%s/allocation", baseURL, r.Model, r.Service) +} + +// Method returns the method for the request. +func (DeleteAllocationRequest) Method() string { return "DELETE" } + +// HttpError represents an error caused by a failed http request. +type HttpError struct { + StatusCode int + Message string +} + +func (e HttpError) Error() string { + return fmt.Sprintf("%d: %s", e.StatusCode, e.Message) +} + +// NotAvailError indicates that the service is either unreachable or unavailable. +type NotAvailError struct { + Resp int +} + +func (e NotAvailError) Error() string { + if e.Resp == http.StatusServiceUnavailable { + return "service unavailable" + } else { + return "service unreachable" + } +} + +// IsNotAvail indicates whether the error is a NotAvailError. +func IsNotAvail(err error) bool { + _, ok := err.(NotAvailError) + return ok +} === added directory 'src/github.com/juju/romulus/wireformat/metrics' === added file 'src/github.com/juju/romulus/wireformat/metrics/metrics.go' --- src/github.com/juju/romulus/wireformat/metrics/metrics.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/wireformat/metrics/metrics.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,77 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package metrics defines the format that will be used to send metric +// batches to the collector and receive updates. +package metrics + +import ( + "time" +) + +// MetricBatch is a batch of metrics that will be sent to +// the metric collector +type MetricBatch struct { + UUID string `json:"uuid"` + ModelUUID string `json:"env-uuid"` + UnitName string `json:"unit-name"` + CharmUrl string `json:"charm-url"` + Created time.Time `json:"created"` + Metrics []Metric `json:"metrics"` + Credentials []byte `json:"credentials"` +} + +// Metric represents a single Metric. +type Metric struct { + Key string `json:"key"` + Value string `json:"value"` + Time time.Time `json:"time"` +} + +// Response represents the response from the metrics collector. +type Response struct { + UUID string `json:"uuid"` + EnvResponses EnvironmentResponses `json:"env-responses"` + NewGracePeriod time.Duration `json:"new-grace-period"` +} + +type EnvironmentResponses map[string]EnvResponse + +// Ack adds the specified the batch UUID to the list of acknowledged batches +// for the specified environment. +func (e EnvironmentResponses) Ack(modelUUID, batchUUID string) { + env := e[modelUUID] + + env.AcknowledgedBatches = append(env.AcknowledgedBatches, batchUUID) + e[modelUUID] = env +} + +func (e EnvironmentResponses) SetStatus(modelUUID, unitName, status, info string) { + s := UnitStatus{ + Status: status, + Info: info, + } + + env := e[modelUUID] + + if env.UnitStatuses == nil { + env.UnitStatuses = map[string]UnitStatus{ + unitName: s, + } + } else { + env.UnitStatuses[unitName] = s + } + e[modelUUID] = env + +} + +// EnvResponse contains the response data relevant to a concrete environment. +type EnvResponse struct { + AcknowledgedBatches []string `json:"acks,omitempty"` + UnitStatuses map[string]UnitStatus `json:"unit-statuses,omitempty"` +} + +type UnitStatus struct { + Status string `json:"status"` + Info string `json:"info"` +} === added file 'src/github.com/juju/romulus/wireformat/metrics/metrics_test.go' --- src/github.com/juju/romulus/wireformat/metrics/metrics_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/wireformat/metrics/metrics_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,66 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package metrics_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/romulus/wireformat/metrics" +) + +type metricsSuite struct { +} + +var _ = gc.Suite(&metricsSuite{}) + +func (s *metricsSuite) TestAck(c *gc.C) { + resp := metrics.EnvironmentResponses{} + c.Assert(resp, gc.HasLen, 0) + + modelUUID := "model-uuid" + modelUUID2 := "model-uuid2" + batchUUID := "batch-uuid" + batchUUID2 := "batch-uuid2" + + resp.Ack(modelUUID, batchUUID) + resp.Ack(modelUUID, batchUUID2) + resp.Ack(modelUUID2, batchUUID) + c.Assert(resp, gc.HasLen, 2) + + c.Assert(resp[modelUUID].AcknowledgedBatches, jc.SameContents, []string{batchUUID, batchUUID2}) + c.Assert(resp[modelUUID2].AcknowledgedBatches, jc.SameContents, []string{batchUUID}) +} + +func (s *metricsSuite) TestSetStatus(c *gc.C) { + resp := metrics.EnvironmentResponses{} + c.Assert(resp, gc.HasLen, 0) + + modelUUID := "model-uuid" + modelUUID2 := "model-uuid2" + unitName := "some-unit/0" + unitName2 := "some-unit/1" + + resp.SetStatus(modelUUID, unitName, "GREEN", "") + c.Assert(resp, gc.HasLen, 1) + c.Assert(resp[modelUUID].UnitStatuses[unitName].Status, gc.Equals, "GREEN") + c.Assert(resp[modelUUID].UnitStatuses[unitName].Info, gc.Equals, "") + + resp.SetStatus(modelUUID, unitName2, "RED", "Unit unresponsive.") + c.Assert(resp, gc.HasLen, 1) + c.Assert(resp[modelUUID].UnitStatuses[unitName].Status, gc.Equals, "GREEN") + c.Assert(resp[modelUUID].UnitStatuses[unitName].Info, gc.Equals, "") + c.Assert(resp[modelUUID].UnitStatuses[unitName2].Status, gc.Equals, "RED") + c.Assert(resp[modelUUID].UnitStatuses[unitName2].Info, gc.Equals, "Unit unresponsive.") + + resp.SetStatus(modelUUID2, unitName, "UNKNOWN", "") + c.Assert(resp, gc.HasLen, 2) + c.Assert(resp[modelUUID2].UnitStatuses[unitName].Status, gc.Equals, "UNKNOWN") + c.Assert(resp[modelUUID2].UnitStatuses[unitName].Info, gc.Equals, "") + + resp.SetStatus(modelUUID, unitName, "RED", "Invalid data received.") + c.Assert(resp, gc.HasLen, 2) + c.Assert(resp[modelUUID].UnitStatuses[unitName].Status, gc.Equals, "RED") + c.Assert(resp[modelUUID].UnitStatuses[unitName].Info, gc.Equals, "Invalid data received.") +} === added file 'src/github.com/juju/romulus/wireformat/metrics/package_test.go' --- src/github.com/juju/romulus/wireformat/metrics/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/wireformat/metrics/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package metrics_test + +import ( + stdtesting "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *stdtesting.T) { + gc.TestingT(t) +} === added directory 'src/github.com/juju/romulus/wireformat/plan' === added file 'src/github.com/juju/romulus/wireformat/plan/entities.go' --- src/github.com/juju/romulus/wireformat/plan/entities.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/romulus/wireformat/plan/entities.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,52 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// The plan package contains wireformat structs intended for +// subscription service plan management API. +package plan + +import ( + "github.com/juju/errors" + "github.com/juju/names" + "github.com/juju/utils" +) + +// Plan structure is used as a wire format to store information on ISV-created +// rating plan and charm URLs for which the plan is valid (a subscription +// using this plan can be created). +type Plan struct { + URL string `json:"url"` // Name of the rating plan + Definition string `json:"plan"` // The rating plan + CreatedOn string `json:"created-on"` // When the plan was created - RFC3339 encoded timestamp +} + +// AuthorizationRequest defines the struct used to request a plan authorization. +type AuthorizationRequest struct { + EnvironmentUUID string `json:"env-uuid"` // TODO(cmars): rename to EnvUUID + CharmURL string `json:"charm-url"` + ServiceName string `json:"service-name"` + PlanURL string `json:"plan-url"` +} + +// Validate checks the AuthorizationRequest for errors. +func (s AuthorizationRequest) Validate() error { + if !utils.IsValidUUIDString(s.EnvironmentUUID) { + return errors.Errorf("invalid environment UUID: %q", s.EnvironmentUUID) + } + if s.ServiceName == "" { + return errors.New("undefined service name") + } + if !names.IsValidService(s.ServiceName) { + return errors.Errorf("invalid service name: %q", s.ServiceName) + } + if s.CharmURL == "" { + return errors.New("undefined charm url") + } + if !names.IsValidCharm(s.CharmURL) { + return errors.Errorf("invalid charm url: %q", s.CharmURL) + } + if s.PlanURL == "" { + return errors.Errorf("undefined plan url") + } + return nil +} === modified file 'src/github.com/juju/schema/numeric.go' --- src/github.com/juju/schema/numeric.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/schema/numeric.go 2016-03-22 15:18:22 +0000 @@ -61,6 +61,41 @@ return reflect.ValueOf(v).Int(), nil } +// Uint returns a Checker that accepts any integer or unsigned value, and +// returns the same value consistently typed as an uint64. If the integer +// value is negative an error is raised. +func Uint() Checker { + return uintC{} +} + +type uintC struct{} + +func (c uintC) Coerce(v interface{}, path []string) (interface{}, error) { + if v == nil { + return nil, error_{"uint", v, path} + } + switch reflect.TypeOf(v).Kind() { + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return reflect.ValueOf(v).Uint(), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + val := reflect.ValueOf(v).Int() + if val < 0 { + return nil, error_{"uint", v, path} + } + // All positive int64 values fit into uint64. + return uint64(val), nil + case reflect.String: + val, err := strconv.ParseUint(reflect.ValueOf(v).String(), 0, 64) + if err == nil { + return val, nil + } else { + return nil, error_{"uint", v, path} + } + default: + return nil, error_{"uint", v, path} + } +} + // ForceInt returns a Checker that accepts any integer or float value, and // returns the same value consistently typed as an int. This is required // in order to handle the interface{}/float64 type conversion performed by === modified file 'src/github.com/juju/schema/schema_test.go' --- src/github.com/juju/schema/schema_test.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/schema/schema_test.go 2016-03-22 15:18:22 +0000 @@ -5,6 +5,7 @@ import ( "math" + "time" gc "launchpad.net/gocheck" @@ -109,6 +110,42 @@ c.Assert(err, gc.ErrorMatches, ": expected int, got nothing") } +func (s *S) TestUint(c *gc.C) { + s.sch = schema.Uint() + + out, err := s.sch.Coerce(42, aPath) + c.Assert(err, gc.IsNil) + c.Assert(out, gc.Equals, uint64(42)) + + out, err = s.sch.Coerce(int8(42), aPath) + c.Assert(err, gc.IsNil) + c.Assert(out, gc.Equals, uint64(42)) + + out, err = s.sch.Coerce(uint8(42), aPath) + c.Assert(err, gc.IsNil) + c.Assert(out, gc.Equals, uint64(42)) + + out, err = s.sch.Coerce("42", aPath) + c.Assert(err, gc.IsNil) + c.Assert(out, gc.Equals, uint64(42)) + + out, err = s.sch.Coerce("-42", aPath) + c.Assert(out, gc.IsNil) + c.Assert(err.Error(), gc.Equals, `: expected uint, got string("-42")`) + + out, err = s.sch.Coerce(-42, aPath) + c.Assert(out, gc.IsNil) + c.Assert(err.Error(), gc.Equals, ": expected uint, got int(-42)") + + out, err = s.sch.Coerce(true, aPath) + c.Assert(out, gc.IsNil) + c.Assert(err.Error(), gc.Equals, ": expected uint, got bool(true)") + + out, err = s.sch.Coerce(nil, aPath) + c.Assert(out, gc.IsNil) + c.Assert(err.Error(), gc.Equals, ": expected uint, got nothing") +} + func (s *S) TestForceInt(c *gc.C) { s.sch = schema.ForceInt() @@ -466,6 +503,33 @@ c.Assert(err, gc.ErrorMatches, ": expected uuid, got nothing") } +func (s *S) TestTime(c *gc.C) { + s.sch = schema.Time() + + var empty time.Time + value := time.Date(2016, 10, 9, 12, 34, 56, 0, time.UTC) + + out, err := s.sch.Coerce("", aPath) + c.Assert(err, gc.IsNil) + c.Assert(out, gc.Equals, empty) + + out, err = s.sch.Coerce(value.Format(time.RFC3339Nano), aPath) + c.Assert(err, gc.IsNil) + c.Assert(out, gc.Equals, value) + + out, err = s.sch.Coerce("invalid", aPath) + c.Assert(out, gc.IsNil) + c.Assert(err.Error(), gc.Equals, `parsing time "invalid" as "2006-01-02T15:04:05.999999999Z07:00": cannot parse "invalid" as "2006"`) + + out, err = s.sch.Coerce(42, aPath) + c.Assert(out, gc.IsNil) + c.Assert(err.Error(), gc.Equals, ": expected string or time.Time, got int(42)") + + out, err = s.sch.Coerce(nil, aPath) + c.Assert(out, gc.IsNil) + c.Assert(err.Error(), gc.Equals, ": expected string or time.Time, got nothing") +} + func (s *S) TestStringified(c *gc.C) { s.sch = schema.Stringified() === added file 'src/github.com/juju/schema/time.go' --- src/github.com/juju/schema/time.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/schema/time.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,41 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package schema + +import ( + "reflect" + "time" +) + +// Time returns a Checker that accepts a string value, and returns +// the parsed time.Time value. Emtpy strings are considered empty times. +func Time() Checker { + return timeC{} +} + +type timeC struct{} + +// Coerce implements Checker Coerce method. +func (c timeC) Coerce(v interface{}, path []string) (interface{}, error) { + if v == nil { + return nil, error_{"string or time.Time", v, path} + } + var empty time.Time + switch reflect.TypeOf(v).Kind() { + case reflect.TypeOf(empty).Kind(): + return v, nil + case reflect.String: + vstr := reflect.ValueOf(v).String() + if vstr == "" { + return empty, nil + } + v, err := time.Parse(time.RFC3339Nano, vstr) + if err != nil { + return nil, err + } + return v, nil + default: + return nil, error_{"string or time.Time", v, path} + } +} === removed directory 'src/github.com/juju/syslog' === removed file 'src/github.com/juju/syslog/.gitignore' --- src/github.com/juju/syslog/.gitignore 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/syslog/.gitignore 1970-01-01 00:00:00 +0000 @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test === removed file 'src/github.com/juju/syslog/LICENSE' --- src/github.com/juju/syslog/LICENSE 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/syslog/LICENSE 1970-01-01 00:00:00 +0000 @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. === removed file 'src/github.com/juju/syslog/README.md' --- src/github.com/juju/syslog/README.md 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/syslog/README.md 1970-01-01 00:00:00 +0000 @@ -1,99 +0,0 @@ -syslog -====== - -fork of the standard go syslog package - -This adds the ability to write to syslog daemons using TLS, as well as implementing this for Windows - -```go - -package main - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - - "github.com/gabriel-samfira/syslog" -) - -const caPem = `-----BEGIN CERTIFICATE----- -MIICXzCCAcqgAwIBAgIBADALBgkqhkiG9w0BAQUwRTENMAsGA1UEChMEanVqdTE0 -MDIGA1UEAwwranVqdS1nZW5lcmF0ZWQgQ0EgZm9yIGVudmlyb25tZW50ICJyc3lz -bG9nIjAeFw0xNDA4MDUxMjEzNTBaFw0yNDA4MDUxMjE4NTBaMEUxDTALBgNVBAoT -BGp1anUxNDAyBgNVBAMMK2p1anUtZ2VuZXJhdGVkIENBIGZvciBlbnZpcm9ubWVu -dCAicnN5c2xvZyIwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALSz4DWGHrXW -xp6uwwJ3j6amUhQajtGetkrPWLXp85gpdnwDgXgCOm/RXWHV2F2FtiSXkAf9FOQR -AOz2UhElHRMsv4+dsLJL9HfG2VtD6p73qR4vpwMYfIYb9ofHoK9A9tSpUoZRwZRz -wgoiayjeXvXMh9WRiszjln9dpYsUmZQlAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIA -pDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRtRlWT4zNaljsAYuaJo4epOwaH -HTAfBgNVHSMEGDAWgBRtRlWT4zNaljsAYuaJo4epOwaHHTALBgkqhkiG9w0BAQUD -gYEAAwi3/RUlgxt5xEQW3V4kgZmyAMrGt6uM417htZw/7E9CkfCFPjYKIITQKjAO -2ytOpL9dkJcDPW488vWkTBBqBSJWX6Vjz+T1Z6sebw24+VvvTo7oaQGhlJD4stLY -byTiSrVQmhaH5QPCErgdeBn6AZkIZ1XuB5VMoYTYbBLObO0= ------END CERTIFICATE-----` - -const cert = `-----BEGIN CERTIFICATE----- -MIICOTCCAaSgAwIBAgIBADALBgkqhkiG9w0BAQUwRTENMAsGA1UEChMEanVqdTE0 -MDIGA1UEAwwranVqdS1nZW5lcmF0ZWQgQ0EgZm9yIGVudmlyb25tZW50ICJyc3lz -bG9nIjAeFw0xNDA4MDUxMjEzNTBaFw0yNDA4MDUxMjE4NTBaMBsxDTALBgNVBAoT -BGp1anUxCjAIBgNVBAMTASowgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOBc -CBEBj2K6dcV3xm1vqByyhki8dUl4AxmnrVDwr7pNKvgyf3t0qoY6/8P+/fphge8M -yFNS0cDmIL27PvUxFOdsPLFDEBeuY373L8EerYMq3Gp/M/UW4k/lwZEuRTKQ4oZ1 -mvjXySKEAqroQ8Fq7wOLRkBORLbBFJ47au9U4HKhAgMBAAGjZzBlMA4GA1UdDwEB -/wQEAwIAqDATBgNVHSUEDDAKBggrBgEFBQcDATAdBgNVHQ4EFgQU8RsHN12K62sV -irTv3dPEFrVjV0swHwYDVR0jBBgwFoAUbUZVk+MzWpY7AGLmiaOHqTsGhx0wCwYJ -KoZIhvcNAQEFA4GBAKdb7/YA3u7SuGxXMEoFz6zqe51E+CfNhhToNXEHFX2JYRUk -aDvUNHDelSsclipo8LEBwvffcN9PH3ruWVlNusGyLjMFaKcuhjJHwv+AoOHpJgBd -AFWciBspXneItQs1wi5kwyFPphLJifEOS83Sc4jtqHj5lq8vjoYBzDLgrnHw ------END CERTIFICATE-----` - -const key = `-----BEGIN RSA PRIVATE KEY----- -MIICXQIBAAKBgQDgXAgRAY9iunXFd8Ztb6gcsoZIvHVJeAMZp61Q8K+6TSr4Mn97 -dKqGOv/D/v36YYHvDMhTUtHA5iC9uz71MRTnbDyxQxAXrmN+9y/BHq2DKtxqfzP1 -FuJP5cGRLkUykOKGdZr418kihAKq6EPBau8Di0ZATkS2wRSeO2rvVOByoQIDAQAB -AoGAD/hdFqDOzQ9KvNCmzjlpdQl8J4dKrf0d82CNJLrNN2ywx1QI4QfP75gZhqEL -ARyZvCNjyxKVHa8D252NgLSKsUBTGllB3Dn9M8MZ9i9w6AapSwTwy9hxCrgB6ILC -6BnWW+HpuWq6v1Ft+lNycwoDwlevlpX7jfpmQTaNxYFg2jECQQDs354qlZs/Boqz -RTdgkM31kglcXUo8W4ZxU35DiVWsGb24boo6HurTwyqJBOogxDnWIZw4kgCbdRUW -FMA/04TtAkEA8nm8+WghdSgRDxXD486zzhrRnt6++vcARiJs4Mc621H9yjNwLrHz -2eIdWeE/2/xXtETWtGTX9ByQ8ufg3+kCBQJADDlF+kCaMFhwE+xAfVU7q66LmR6f -VBoNCBAc9fNCXo09gyUBMRqjV6Y8rbF5O5OkwG4fl7PBIEScf/U2LpUFyQJBAIdt -rzquCmHhKwX95hdKz+qB2CqfxpNted2yRJWXMSxmMxXIfRPXmJdNT49v27cGzgWF -nVXMLUHO4raJBHSLM/ECQQCAAuxb/GLAPDH9cbHo1BglU2mSzT81hSqanXcAapeh -2Y4xinXaXKxrgDFmPQJJZ2P+iCQuZp522N1+uro1zDlL ------END RSA PRIVATE KEY-----` - -func main() { - caCert := x509.NewCertPool() - ok := caCert.AppendCertsFromPEM([]byte(caPem)) - if !ok { - fmt.Println("failed to parse root certificate") - return - } - keyPair, err := tls.X509KeyPair([]byte(cert), []byte(key)) - if err != nil { - fmt.Println("invalid keypair") - return - } - - tlsCfg := &tls.Config{ - ClientCAs: caCert, - Certificates: []tls.Certificate{ - keyPair, - }, - InsecureSkipVerify: true, - } - sLog, err := syslog.Dial("tcp", "192.168.200.51:6514", syslog.LOG_CRIT, "juju-syslog_test", tlsCfg) - if err != nil { - fmt.Println(err) - return - } - defer sLog.Close() - err = sLog.Warning("hello") - if err != nil { - fmt.Println(err) - return - } -} -``` === removed file 'src/github.com/juju/syslog/syslog.go' --- src/github.com/juju/syslog/syslog.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/syslog/syslog.go 1970-01-01 00:00:00 +0000 @@ -1,327 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9 - -// Package syslog provides a simple interface to the system log -// service. It can send messages to the syslog daemon using UNIX -// domain sockets, UDP or TCP. -// -// Only one call to Dial is necessary. On write failures, -// the syslog client will attempt to reconnect to the server -// and write again. -package syslog - -import ( - "crypto/tls" - "errors" - "fmt" - "io" - "log" - "net" - "os" - "strings" - "sync" - "time" -) - -// The Priority is a combination of the syslog facility and -// severity. For example, LOG_ALERT | LOG_FTP sends an alert severity -// message from the FTP facility. The default severity is LOG_EMERG; -// the default facility is LOG_KERN. -type Priority int - -const severityMask = 0x07 -const facilityMask = 0xf8 - -const ( - // Severity. - - // From /usr/include/sys/syslog.h. - // These are the same on Linux, BSD, and OS X. - LOG_EMERG Priority = iota - LOG_ALERT - LOG_CRIT - LOG_ERR - LOG_WARNING - LOG_NOTICE - LOG_INFO - LOG_DEBUG -) - -const ( - // Facility. - - // From /usr/include/sys/syslog.h. - // These are the same up to LOG_FTP on Linux, BSD, and OS X. - LOG_KERN Priority = iota << 3 - LOG_USER - LOG_MAIL - LOG_DAEMON - LOG_AUTH - LOG_SYSLOG - LOG_LPR - LOG_NEWS - LOG_UUCP - LOG_CRON - LOG_AUTHPRIV - LOG_FTP - _ // unused - _ // unused - _ // unused - _ // unused - LOG_LOCAL0 - LOG_LOCAL1 - LOG_LOCAL2 - LOG_LOCAL3 - LOG_LOCAL4 - LOG_LOCAL5 - LOG_LOCAL6 - LOG_LOCAL7 -) - -// A Writer is a connection to a syslog server. -type Writer struct { - priority Priority - tag string - hostname string - network string - raddr string - tlsCfg *tls.Config - - mu sync.Mutex // guards conn - conn serverConn -} - -var _ io.Writer = (*Writer)(nil) - -// This interface and the separate syslog_unix.go file exist for -// Solaris support as implemented by gccgo. On Solaris you can not -// simply open a TCP connection to the syslog daemon. The gccgo -// sources have a syslog_solaris.go file that implements localSyslog to -// return a type that satisfies this interface and simply calls the C -// library syslog function. -type serverConn interface { - writeString(p Priority, hostname, tag, s, nl string) error - close() error -} - -type netConn struct { - local bool - conn net.Conn -} - -// New establishes a new connection to the system log daemon. Each -// write to the returned writer sends a log message with the given -// priority and prefix. -func New(priority Priority, tag string) (w *Writer, err error) { - return Dial("", "", priority, tag, nil) -} - -// Dial establishes a connection to a log daemon by connecting to -// address raddr on the network net. Each write to the returned -// writer sends a log message with the given facility, severity and -// tag. -func Dial(network, raddr string, priority Priority, tag string, tlsCfg *tls.Config) (*Writer, error) { - if priority < 0 || priority > LOG_LOCAL7|LOG_DEBUG { - return nil, errors.New("log/syslog: invalid priority") - } - - if tag == "" { - tag = os.Args[0] - } - hostname, _ := os.Hostname() - - w := &Writer{ - priority: priority, - tag: tag, - hostname: hostname, - network: network, - raddr: raddr, - tlsCfg: tlsCfg, - } - - w.mu.Lock() - defer w.mu.Unlock() - - err := w.connect() - if err != nil { - return nil, err - } - return w, err -} - -// connect makes a connection to the syslog server. -// It must be called with w.mu held. -func (w *Writer) connect() (err error) { - if w.conn != nil { - // ignore err from close, it makes sense to continue anyway - w.conn.close() - w.conn = nil - } - - if w.network == "" { - w.conn, err = localSyslog() - if err != nil { - return err - } - if w.hostname == "" { - w.hostname = "localhost" - } - } else { - var c net.Conn - c, err = dial(w.network, w.raddr, w.tlsCfg) - if err == nil { - w.conn = &netConn{conn: c} - if w.hostname == "" { - w.hostname = c.LocalAddr().String() - } - } - } - return -} - -// Write sends a log message to the syslog daemon. -func (w *Writer) Write(b []byte) (int, error) { - return w.writeAndRetry(w.priority, string(b)) -} - -// Close closes a connection to the syslog daemon. -func (w *Writer) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - - if w.conn != nil { - err := w.conn.close() - w.conn = nil - return err - } - return nil -} - -// Emerg logs a message with severity LOG_EMERG, ignoring the severity -// passed to New. -func (w *Writer) Emerg(m string) (err error) { - _, err = w.writeAndRetry(LOG_EMERG, m) - return err -} - -// Alert logs a message with severity LOG_ALERT, ignoring the severity -// passed to New. -func (w *Writer) Alert(m string) (err error) { - _, err = w.writeAndRetry(LOG_ALERT, m) - return err -} - -// Crit logs a message with severity LOG_CRIT, ignoring the severity -// passed to New. -func (w *Writer) Crit(m string) (err error) { - _, err = w.writeAndRetry(LOG_CRIT, m) - return err -} - -// Err logs a message with severity LOG_ERR, ignoring the severity -// passed to New. -func (w *Writer) Err(m string) (err error) { - _, err = w.writeAndRetry(LOG_ERR, m) - return err -} - -// Warning logs a message with severity LOG_WARNING, ignoring the -// severity passed to New. -func (w *Writer) Warning(m string) (err error) { - _, err = w.writeAndRetry(LOG_WARNING, m) - return err -} - -// Notice logs a message with severity LOG_NOTICE, ignoring the -// severity passed to New. -func (w *Writer) Notice(m string) (err error) { - _, err = w.writeAndRetry(LOG_NOTICE, m) - return err -} - -// Info logs a message with severity LOG_INFO, ignoring the severity -// passed to New. -func (w *Writer) Info(m string) (err error) { - _, err = w.writeAndRetry(LOG_INFO, m) - return err -} - -// Debug logs a message with severity LOG_DEBUG, ignoring the severity -// passed to New. -func (w *Writer) Debug(m string) (err error) { - _, err = w.writeAndRetry(LOG_DEBUG, m) - return err -} - -func (w *Writer) writeAndRetry(p Priority, s string) (int, error) { - pr := (w.priority & facilityMask) | (p & severityMask) - - w.mu.Lock() - defer w.mu.Unlock() - - if w.conn != nil { - if n, err := w.write(pr, s); err == nil { - return n, err - } - } - if err := w.connect(); err != nil { - return 0, err - } - return w.write(pr, s) -} - -// write generates and writes a syslog formatted string. The -// format is as follows: TIMESTAMP HOSTNAME TAG[PID]: MSG -func (w *Writer) write(p Priority, msg string) (int, error) { - // ensure it ends in a \n - nl := "" - if !strings.HasSuffix(msg, "\n") { - nl = "\n" - } - - err := w.conn.writeString(p, w.hostname, w.tag, msg, nl) - if err != nil { - return 0, err - } - // Note: return the length of the input, not the number of - // bytes printed by Fprintf, because this must behave like - // an io.Writer. - return len(msg), nil -} - -func (n *netConn) writeString(p Priority, hostname, tag, msg, nl string) error { - if n.local { - // Compared to the network form below, the changes are: - // 1. Use time.Stamp instead of time.RFC3339. - // 2. Drop the hostname field from the Fprintf. - timestamp := time.Now().Format(time.Stamp) - _, err := fmt.Fprintf(n.conn, "<%d>%s %s[%d]: %s%s", - p, timestamp, - tag, os.Getpid(), msg, nl) - return err - } - timestamp := time.Now().Format(time.RFC3339) - _, err := fmt.Fprintf(n.conn, "<%d>%s %s %s[%d]: %s%s", - p, timestamp, hostname, - tag, os.Getpid(), msg, nl) - return err -} - -func (n *netConn) close() error { - return n.conn.Close() -} - -// NewLogger creates a log.Logger whose output is written to -// the system log service with the specified priority. The logFlag -// argument is the flag set passed through to log.New to create -// the Logger. -func NewLogger(p Priority, logFlag int) (*log.Logger, error) { - s, err := New(p, "") - if err != nil { - return nil, err - } - return log.New(s, "", logFlag), nil -} === removed file 'src/github.com/juju/syslog/syslog_common_test.go' --- src/github.com/juju/syslog/syslog_common_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/syslog/syslog_common_test.go 1970-01-01 00:00:00 +0000 @@ -1,417 +0,0 @@ -// +build !plan9 - -package syslog - -import ( - "bufio" - "crypto/tls" - "crypto/x509" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "os" - "sync" - "testing" - "time" -) - -var crashy = false - -const caPem = `-----BEGIN CERTIFICATE----- -MIICXzCCAcqgAwIBAgIBADALBgkqhkiG9w0BAQUwRTENMAsGA1UEChMEanVqdTE0 -MDIGA1UEAwwranVqdS1nZW5lcmF0ZWQgQ0EgZm9yIGVudmlyb25tZW50ICJyc3lz -bG9nIjAeFw0xNDA4MDUxMjEzNTBaFw0yNDA4MDUxMjE4NTBaMEUxDTALBgNVBAoT -BGp1anUxNDAyBgNVBAMMK2p1anUtZ2VuZXJhdGVkIENBIGZvciBlbnZpcm9ubWVu -dCAicnN5c2xvZyIwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBALSz4DWGHrXW -xp6uwwJ3j6amUhQajtGetkrPWLXp85gpdnwDgXgCOm/RXWHV2F2FtiSXkAf9FOQR -AOz2UhElHRMsv4+dsLJL9HfG2VtD6p73qR4vpwMYfIYb9ofHoK9A9tSpUoZRwZRz -wgoiayjeXvXMh9WRiszjln9dpYsUmZQlAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIA -pDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRtRlWT4zNaljsAYuaJo4epOwaH -HTAfBgNVHSMEGDAWgBRtRlWT4zNaljsAYuaJo4epOwaHHTALBgkqhkiG9w0BAQUD -gYEAAwi3/RUlgxt5xEQW3V4kgZmyAMrGt6uM417htZw/7E9CkfCFPjYKIITQKjAO -2ytOpL9dkJcDPW488vWkTBBqBSJWX6Vjz+T1Z6sebw24+VvvTo7oaQGhlJD4stLY -byTiSrVQmhaH5QPCErgdeBn6AZkIZ1XuB5VMoYTYbBLObO0= ------END CERTIFICATE-----` - -const cert = `-----BEGIN CERTIFICATE----- -MIICOTCCAaSgAwIBAgIBADALBgkqhkiG9w0BAQUwRTENMAsGA1UEChMEanVqdTE0 -MDIGA1UEAwwranVqdS1nZW5lcmF0ZWQgQ0EgZm9yIGVudmlyb25tZW50ICJyc3lz -bG9nIjAeFw0xNDA4MDUxMjEzNTBaFw0yNDA4MDUxMjE4NTBaMBsxDTALBgNVBAoT -BGp1anUxCjAIBgNVBAMTASowgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOBc -CBEBj2K6dcV3xm1vqByyhki8dUl4AxmnrVDwr7pNKvgyf3t0qoY6/8P+/fphge8M -yFNS0cDmIL27PvUxFOdsPLFDEBeuY373L8EerYMq3Gp/M/UW4k/lwZEuRTKQ4oZ1 -mvjXySKEAqroQ8Fq7wOLRkBORLbBFJ47au9U4HKhAgMBAAGjZzBlMA4GA1UdDwEB -/wQEAwIAqDATBgNVHSUEDDAKBggrBgEFBQcDATAdBgNVHQ4EFgQU8RsHN12K62sV -irTv3dPEFrVjV0swHwYDVR0jBBgwFoAUbUZVk+MzWpY7AGLmiaOHqTsGhx0wCwYJ -KoZIhvcNAQEFA4GBAKdb7/YA3u7SuGxXMEoFz6zqe51E+CfNhhToNXEHFX2JYRUk -aDvUNHDelSsclipo8LEBwvffcN9PH3ruWVlNusGyLjMFaKcuhjJHwv+AoOHpJgBd -AFWciBspXneItQs1wi5kwyFPphLJifEOS83Sc4jtqHj5lq8vjoYBzDLgrnHw ------END CERTIFICATE-----` - -const key = `-----BEGIN RSA PRIVATE KEY----- -MIICXQIBAAKBgQDgXAgRAY9iunXFd8Ztb6gcsoZIvHVJeAMZp61Q8K+6TSr4Mn97 -dKqGOv/D/v36YYHvDMhTUtHA5iC9uz71MRTnbDyxQxAXrmN+9y/BHq2DKtxqfzP1 -FuJP5cGRLkUykOKGdZr418kihAKq6EPBau8Di0ZATkS2wRSeO2rvVOByoQIDAQAB -AoGAD/hdFqDOzQ9KvNCmzjlpdQl8J4dKrf0d82CNJLrNN2ywx1QI4QfP75gZhqEL -ARyZvCNjyxKVHa8D252NgLSKsUBTGllB3Dn9M8MZ9i9w6AapSwTwy9hxCrgB6ILC -6BnWW+HpuWq6v1Ft+lNycwoDwlevlpX7jfpmQTaNxYFg2jECQQDs354qlZs/Boqz -RTdgkM31kglcXUo8W4ZxU35DiVWsGb24boo6HurTwyqJBOogxDnWIZw4kgCbdRUW -FMA/04TtAkEA8nm8+WghdSgRDxXD486zzhrRnt6++vcARiJs4Mc621H9yjNwLrHz -2eIdWeE/2/xXtETWtGTX9ByQ8ufg3+kCBQJADDlF+kCaMFhwE+xAfVU7q66LmR6f -VBoNCBAc9fNCXo09gyUBMRqjV6Y8rbF5O5OkwG4fl7PBIEScf/U2LpUFyQJBAIdt -rzquCmHhKwX95hdKz+qB2CqfxpNted2yRJWXMSxmMxXIfRPXmJdNT49v27cGzgWF -nVXMLUHO4raJBHSLM/ECQQCAAuxb/GLAPDH9cbHo1BglU2mSzT81hSqanXcAapeh -2Y4xinXaXKxrgDFmPQJJZ2P+iCQuZp522N1+uro1zDlL ------END RSA PRIVATE KEY-----` - -func runPktSyslog(c net.PacketConn, done chan<- string) { - var buf [4096]byte - var rcvd string - ct := 0 - for { - var n int - var err error - - c.SetReadDeadline(time.Now().Add(100 * time.Millisecond)) - n, _, err = c.ReadFrom(buf[:]) - rcvd += string(buf[:n]) - if err != nil { - if oe, ok := err.(*net.OpError); ok { - if ct < 3 && oe.Temporary() { - ct++ - continue - } - } - break - } - } - c.Close() - done <- rcvd -} - -func startServer(n, la string, done chan<- string, tlsCfg *tls.Config) (addr string, sock io.Closer, wg *sync.WaitGroup) { - if n == "udp" || n == "tcp" { - la = "127.0.0.1:0" - } else { - // unix and unixgram: choose an address if none given - if la == "" { - // use ioutil.TempFile to get a name that is unique - f, err := ioutil.TempFile("", "syslogtest") - if err != nil { - log.Fatal("TempFile: ", err) - } - f.Close() - la = f.Name() - } - os.Remove(la) - } - - wg = new(sync.WaitGroup) - if n == "udp" || n == "unixgram" { - l, e := net.ListenPacket(n, la) - if e != nil { - log.Fatalf("startServer failed: %v", e) - } - addr = l.LocalAddr().String() - sock = l - wg.Add(1) - go func() { - defer wg.Done() - runPktSyslog(l, done) - }() - } else { - var l net.Listener - var e error - if tlsCfg != nil { - l, e = tls.Listen(n, la, tlsCfg) - } else { - l, e = net.Listen(n, la) - } - if e != nil { - log.Fatalf("startServer failed: %v", e) - } - addr = l.Addr().String() - sock = l - wg.Add(1) - go func() { - defer wg.Done() - runStreamSyslog(l, done, wg) - }() - } - return -} - -func runStreamSyslog(l net.Listener, done chan<- string, wg *sync.WaitGroup) { - for { - var c net.Conn - var err error - if c, err = l.Accept(); err != nil { - return - } - wg.Add(1) - go func(c net.Conn) { - defer wg.Done() - c.SetReadDeadline(time.Now().Add(5 * time.Second)) - b := bufio.NewReader(c) - for ct := 1; !crashy || ct&7 != 0; ct++ { - s, err := b.ReadString('\n') - if err != nil { - break - } - done <- s - } - c.Close() - }(c) - } -} - -func TestConcurrentWrite(t *testing.T) { - addr, sock, srvWG := startServer("udp", "", make(chan string, 1), nil) - defer srvWG.Wait() - defer sock.Close() - w, err := Dial("udp", addr, LOG_USER|LOG_ERR, "how's it going?", nil) - if err != nil { - t.Fatalf("syslog.Dial() failed: %v", err) - } - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - err := w.Info("test") - if err != nil { - t.Errorf("Info() failed: %v", err) - return - } - }() - } - wg.Wait() -} - -func TestWithSimulated(t *testing.T) { - msg := "Test 123" - - for _, tr := range transports { - done := make(chan string) - addr, sock, srvWG := startServer(tr, "", done, nil) - defer srvWG.Wait() - defer sock.Close() - if tr == "unix" || tr == "unixgram" { - defer os.Remove(addr) - } - s, err := Dial(tr, addr, LOG_INFO|LOG_USER, "syslog_test", nil) - if err != nil { - t.Fatalf("Dial() failed: %v", err) - } - err = s.Info(msg) - if err != nil { - t.Fatalf("log failed: %v", err) - } - check(t, msg, <-done) - s.Close() - } -} - -func check(t *testing.T, in, out string) { - tmpl := fmt.Sprintf("<%d>%%s %%s syslog_test[%%d]: %s\n", LOG_USER+LOG_INFO, in) - if hostname, err := os.Hostname(); err != nil { - t.Error("Error retrieving hostname") - } else { - var parsedHostname, timestamp string - var pid int - if n, err := fmt.Sscanf(out, tmpl, ×tamp, &parsedHostname, &pid); n != 3 || err != nil || hostname != parsedHostname { - t.Errorf("Got %q, does not match template %q (%d %s)", out, tmpl, n, err) - } - } -} - -func TestDial(t *testing.T) { - if testing.Short() { - t.Skip("skipping syslog test during -short") - } - done := make(chan string) - addr, sock, srvWG := startServer("tcp", "", done, nil) - defer srvWG.Wait() - defer sock.Close() - f, err := Dial("tcp", addr, (LOG_LOCAL7|LOG_DEBUG)+1, "syslog_test", nil) - if f != nil { - t.Fatalf("Should have trapped bad priority") - } - f, err = Dial("tcp", addr, -1, "syslog_test", nil) - if f != nil { - t.Fatalf("Should have trapped bad priority") - } - l, err := Dial("tcp", addr, LOG_USER|LOG_ERR, "syslog_test", nil) - if err != nil { - t.Fatalf("Dial() failed: %s", err) - } - l.Close() -} - -func TestTLSDial(t *testing.T) { - if testing.Short() { - t.Skip("skipping syslog test during -short") - } - certificate, err := tls.X509KeyPair([]byte(cert), []byte(key)) - caCert := x509.NewCertPool() - ok := caCert.AppendCertsFromPEM([]byte(caPem)) - if !ok { - t.Fatalf("failed to parse root certificate") - } - - srvCfg := &tls.Config{InsecureSkipVerify: true, Certificates: []tls.Certificate{certificate}, RootCAs: caCert} - clientCfg := &tls.Config{ClientCAs: caCert, InsecureSkipVerify: true} - - done := make(chan string) - addr, sock, srvWG := startServer("tcp", "", done, srvCfg) - defer srvWG.Wait() - defer sock.Close() - f, err := Dial("tcp", addr, (LOG_LOCAL7|LOG_DEBUG)+1, "syslog_test", clientCfg) - if f != nil { - t.Fatalf("Should have trapped bad priority") - } - f, err = Dial("tcp", addr, -1, "syslog_test", clientCfg) - if f != nil { - t.Fatalf("Should have trapped bad priority") - } - l, err := Dial("tcp", addr, LOG_USER|LOG_ERR, "syslog_test", clientCfg) - if err != nil { - t.Fatalf("Dial() failed: %s", err) - } - l.Close() -} - -func TestConcurrentReconnect(t *testing.T) { - crashy = true - defer func() { crashy = false }() - - const N = 10 - const M = 100 - net := "tcp" - done := make(chan string, N*M) - addr, sock, srvWG := startServer(net, "", done, nil) - defer os.Remove(addr) - - // count all the messages arriving - count := make(chan int) - go func() { - ct := 0 - for _ = range done { - ct++ - // we are looking for 500 out of 1000 events - // here because lots of log messages are lost - // in buffers (kernel and/or bufio) - if ct > N*M/2 { - break - } - } - count <- ct - }() - - var wg sync.WaitGroup - wg.Add(N) - for i := 0; i < N; i++ { - go func() { - defer wg.Done() - w, err := Dial(net, addr, LOG_USER|LOG_ERR, "tag", nil) - if err != nil { - t.Fatalf("syslog.Dial() failed: %v", err) - } - defer w.Close() - for i := 0; i < M; i++ { - err := w.Info("test") - if err != nil { - t.Errorf("Info() failed: %v", err) - return - } - } - }() - } - wg.Wait() - sock.Close() - srvWG.Wait() - close(done) - - select { - case <-count: - case <-time.After(100 * time.Millisecond): - t.Error("timeout in concurrent reconnect") - } -} - -func TestWrite(t *testing.T) { - tests := []struct { - pri Priority - pre string - msg string - exp string - }{ - {LOG_USER | LOG_ERR, "syslog_test", "", "%s %s syslog_test[%d]: \n"}, - {LOG_USER | LOG_ERR, "syslog_test", "write test", "%s %s syslog_test[%d]: write test\n"}, - // Write should not add \n if there already is one - {LOG_USER | LOG_ERR, "syslog_test", "write test 2\n", "%s %s syslog_test[%d]: write test 2\n"}, - } - - if hostname, err := os.Hostname(); err != nil { - t.Fatalf("Error retrieving hostname") - } else { - for _, test := range tests { - done := make(chan string) - addr, sock, srvWG := startServer("udp", "", done, nil) - defer srvWG.Wait() - defer sock.Close() - l, err := Dial("udp", addr, test.pri, test.pre, nil) - if err != nil { - t.Fatalf("syslog.Dial() failed: %v", err) - } - defer l.Close() - _, err = io.WriteString(l, test.msg) - if err != nil { - t.Fatalf("WriteString() failed: %v", err) - } - rcvd := <-done - test.exp = fmt.Sprintf("<%d>", test.pri) + test.exp - var parsedHostname, timestamp string - var pid int - if n, err := fmt.Sscanf(rcvd, test.exp, ×tamp, &parsedHostname, &pid); n != 3 || err != nil || hostname != parsedHostname { - t.Errorf("s.Info() = '%q', didn't match '%q' (%d %s)", rcvd, test.exp, n, err) - } - } - } -} - -func TestFlap(t *testing.T) { - net := "tcp" - done := make(chan string) - addr, sock, srvWG := startServer(net, "", done, nil) - defer srvWG.Wait() - defer sock.Close() - - s, err := Dial(net, addr, LOG_INFO|LOG_USER, "syslog_test", nil) - if err != nil { - t.Fatalf("Dial() failed: %v", err) - } - msg := "Moo 2" - err = s.Info(msg) - if err != nil { - t.Fatalf("log failed: %v", err) - } - check(t, msg, <-done) - - // restart the server - _, sock2, srvWG2 := startServer(net, addr, done, nil) - defer srvWG2.Wait() - defer sock2.Close() - - // and try retransmitting - msg = "Moo 3" - err = s.Info(msg) - if err != nil { - t.Fatalf("log failed: %v", err) - } - check(t, msg, <-done) - - s.Close() -} === removed file 'src/github.com/juju/syslog/syslog_plan9.go' --- src/github.com/juju/syslog/syslog_plan9.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/syslog/syslog_plan9.go 1970-01-01 00:00:00 +0000 @@ -1,8 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package syslog provides a simple interface to the system log service. -package syslog - -// BUG(akumar): This package is not implemented on Plan 9 yet. === removed file 'src/github.com/juju/syslog/syslog_test.go' --- src/github.com/juju/syslog/syslog_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/syslog/syslog_test.go 1970-01-01 00:00:00 +0000 @@ -1,59 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !windows,!plan9 - -package syslog - -import ( - "testing" -) - -var transports = []string{"unix", "unixgram", "udp", "tcp"} - -func TestNew(t *testing.T) { - if LOG_LOCAL7 != 23<<3 { - t.Fatalf("LOG_LOCAL7 has wrong value") - } - if testing.Short() { - // Depends on syslog daemon running, and sometimes it's not. - t.Skip("skipping syslog test during -short") - } - - s, err := New(LOG_INFO|LOG_USER, "the_tag") - if err != nil { - t.Fatalf("New() failed: %s", err) - } - // Don't send any messages. - s.Close() -} - -func TestNewLogger(t *testing.T) { - if testing.Short() { - t.Skip("skipping syslog test during -short") - } - f, err := NewLogger(LOG_USER|LOG_INFO, 0) - if f == nil { - t.Error(err) - } -} - -func TestLocalDial(t *testing.T) { - if testing.Short() { - t.Skip("skipping syslog test during -short") - } - f, err := Dial("", "", (LOG_LOCAL7|LOG_DEBUG)+1, "syslog_test", nil) - if f != nil { - t.Fatalf("Should have trapped bad priority") - } - f, err = Dial("", "", -1, "syslog_test", nil) - if f != nil { - t.Fatalf("Should have trapped bad priority") - } - l, err := Dial("", "", LOG_USER|LOG_ERR, "syslog_test", nil) - if err != nil { - t.Fatalf("Dial() failed: %s", err) - } - l.Close() -} === removed file 'src/github.com/juju/syslog/syslog_unix.go' --- src/github.com/juju/syslog/syslog_unix.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/syslog/syslog_unix.go 1970-01-01 00:00:00 +0000 @@ -1,40 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !windows,!plan9 - -package syslog - -import ( - "crypto/tls" - "errors" - "net" -) - -// unixSyslog opens a connection to the syslog daemon running on the -// local machine using a Unix domain socket. - -func localSyslog() (conn serverConn, err error) { - logTypes := []string{"unixgram", "unix"} - logPaths := []string{"/dev/log", "/var/run/syslog"} - for _, network := range logTypes { - for _, path := range logPaths { - conn, err := net.Dial(network, path) - if err != nil { - continue - } else { - return &netConn{conn: conn, local: true}, nil - } - } - } - return nil, errors.New("Unix syslog delivery error") -} - -func dial(network, address string, tlsCfg *tls.Config) (net.Conn, error) { - if tlsCfg != nil && network == "tcp" { - return tls.Dial(network, address, tlsCfg) - } else { - return net.Dial(network, address) - } -} === removed file 'src/github.com/juju/syslog/syslog_windows.go' --- src/github.com/juju/syslog/syslog_windows.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/syslog/syslog_windows.go 1970-01-01 00:00:00 +0000 @@ -1,29 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package syslog provides a simple interface to the system log service. -package syslog - -import ( - "crypto/tls" - "errors" - "net" -) - -// BUG(brainman): This package is not implemented on Windows yet. - -func localSyslog() (conn serverConn, err error) { - return nil, errors.New("Local syslog not implemented on windows") -} - -func dial(network, address string, tlsCfg *tls.Config) (net.Conn, error) { - if network != "tcp" && network != "udp" { - return nil, errors.New("Invalid protocol. Windows supports tcp and udp") - } - if tlsCfg != nil && network == "tcp" { - return tls.Dial(network, address, tlsCfg) - } else { - return net.Dial(network, address) - } -} === removed file 'src/github.com/juju/syslog/syslog_windows_test.go' --- src/github.com/juju/syslog/syslog_windows_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/syslog/syslog_windows_test.go 1970-01-01 00:00:00 +0000 @@ -1,3 +0,0 @@ -package syslog - -var transports = []string{"udp", "tcp"} === modified file 'src/github.com/juju/testing/checkers/bool_test.go' --- src/github.com/juju/testing/checkers/bool_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/testing/checkers/bool_test.go 2016-03-22 15:18:22 +0000 @@ -34,7 +34,7 @@ result, msg = jc.IsTrue.Check([]interface{}{nil}, nil) c.Assert(result, gc.Equals, false) - c.Assert(msg, gc.Equals, `expected type bool, received `) + c.Assert(msg, gc.Matches, `expected type bool, received `) } func (s *BoolSuite) TestIsFalse(c *gc.C) { === modified file 'src/github.com/juju/testing/checkers/codec.go' --- src/github.com/juju/testing/checkers/codec.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/testing/checkers/codec.go 2016-03-22 15:18:22 +0000 @@ -8,7 +8,7 @@ "fmt" gc "gopkg.in/check.v1" - "gopkg.in/yaml.v1" + "gopkg.in/yaml.v2" ) type codecEqualChecker struct { === modified file 'src/github.com/juju/testing/checkers/codec_test.go' --- src/github.com/juju/testing/checkers/codec_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/testing/checkers/codec_test.go 2016-03-22 15:18:22 +0000 @@ -140,7 +140,7 @@ descr: "illegal obtained content", obtained: `{"NotThere": `, result: false, - msg: `cannot unmarshal obtained contents: YAML error: .*`, + msg: `cannot unmarshal obtained contents: yaml: line 1: .*`, }, } for i, test := range tests { === modified file 'src/github.com/juju/testing/cmd.go' --- src/github.com/juju/testing/cmd.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/testing/cmd.go 2016-03-22 15:18:22 +0000 @@ -13,6 +13,8 @@ "strconv" "strings" + "github.com/juju/utils" + gc "gopkg.in/check.v1" ) @@ -50,7 +52,7 @@ exitcodesfile="$name.exitcodes" printf "%s" $name | tee -a $argfile for arg in "$@"; do - printf " \"%s\"" "$arg" | tee -a $argfile + printf " '%s'" "$arg" | tee -a $argfile done printf "\n" | tee -a $argfile if [ -f $exitcodesfile ] @@ -70,7 +72,7 @@ set /A argCount+=1 set "argVec[!argCount!]=%%~x" ) -for /L %%i in (1,1,%argCount%) do set list=!list! "!argVec[%%i]!" +for /L %%i in (1,1,%argCount%) do set list=!list! '!argVec[%%i]!' IF exist %0.exitcodes ( FOR /F "tokens=1* delims=;" %%i IN (%0.exitcodes) DO ( @@ -183,11 +185,12 @@ // Create expected output string expected := execName for _, arg := range args { - expected = fmt.Sprintf("%s %q", expected, arg) + expected = fmt.Sprintf("%s %s", expected, utils.ShQuote(arg)) } // Check that the expected and the first line of actual output are the same actual := strings.TrimSuffix(lines[0], "\r") + c.Assert(actual, gc.Equals, expected) // Write out the remaining lines for the next check === modified file 'src/github.com/juju/testing/cmd_test.go' --- src/github.com/juju/testing/cmd_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/testing/cmd_test.go 2016-03-22 15:18:22 +0000 @@ -5,7 +5,7 @@ import ( "os/exec" - "runtime" + "strings" gc "gopkg.in/check.v1" @@ -39,18 +39,14 @@ }) } -const testFunc = "test-ouput" +const testFunc = "test-output" func (s *cmdSuite) TestPatchExecutableNoArgs(c *gc.C) { s.EnsureArgFileRemoved(testFunc) testing.PatchExecutableAsEchoArgs(c, s, testFunc) output := runCommand(c, testFunc) - switch runtime.GOOS { - case "windows": - c.Assert(output, gc.Equals, testFunc+"\r\n") - default: - c.Assert(output, gc.Equals, testFunc+"\n") - } + output = strings.TrimRight(output, "\r\n") + c.Assert(output, gc.Equals, testFunc) testing.AssertEchoArgs(c, testFunc) } @@ -58,12 +54,10 @@ s.EnsureArgFileRemoved(testFunc) testing.PatchExecutableAsEchoArgs(c, s, testFunc) output := runCommand(c, testFunc, "foo", "bar baz") - switch runtime.GOOS { - case "windows": - c.Assert(output, gc.Equals, testFunc+" \"foo\" \"bar baz\"\r\n") - default: - c.Assert(output, gc.Equals, testFunc+" \"foo\" \"bar baz\"\n") - } + output = strings.TrimRight(output, "\r\n") + + c.Assert(output, gc.DeepEquals, testFunc+" 'foo' 'bar baz'") + testing.AssertEchoArgs(c, testFunc, "foo", "bar baz") } @@ -72,12 +66,8 @@ cmd := exec.Command(testFunc) out, err := cmd.CombinedOutput() c.Assert(err, gc.ErrorMatches, "exit status 1") - switch runtime.GOOS { - case "windows": - c.Assert(string(out), gc.Equals, "failing\r\n") - default: - c.Assert(string(out), gc.Equals, "failing\n") - } + output := strings.TrimRight(string(out), "\r\n") + c.Assert(output, gc.Equals, "failing") } func runCommand(c *gc.C, command string, args ...string) string { === added file 'src/github.com/juju/testing/filetesting/stub.go' --- src/github.com/juju/testing/filetesting/stub.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/testing/filetesting/stub.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,264 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package filetesting + +import ( + "bytes" + "hash" + "io" + "os" + "strings" + "time" + + "github.com/juju/errors" + "github.com/juju/testing" +) + +type StubReader struct { + Stub *testing.Stub + + ReturnRead io.Reader +} + +func NewStubReader(stub *testing.Stub, content string) io.Reader { + return &StubReader{ + Stub: stub, + ReturnRead: strings.NewReader(content), + } +} + +func (s *StubReader) Read(data []byte) (int, error) { + s.Stub.AddCall("Read", data) + if err := s.Stub.NextErr(); err != nil { + return 0, errors.Trace(err) + } + + if s.ReturnRead == nil { + return 0, nil + } + return s.ReturnRead.Read(data) +} + +type StubWriter struct { + Stub *testing.Stub + + ReturnWrite io.Writer +} + +func NewStubWriter(stub *testing.Stub) (io.Writer, *bytes.Buffer) { + buf := new(bytes.Buffer) + s := &StubWriter{ + Stub: stub, + ReturnWrite: buf, + } + return s, buf +} + +func (s *StubWriter) Write(data []byte) (int, error) { + s.Stub.AddCall("Write", data) + if err := s.Stub.NextErr(); err != nil { + return 0, errors.Trace(err) + } + + if s.ReturnWrite == nil { + return 0, nil + } + return s.ReturnWrite.Write(data) +} + +type StubSeeker struct { + Stub *testing.Stub + + ReturnSeek int64 +} + +func (s *StubSeeker) Seek(offset int64, whence int) (int64, error) { + s.Stub.AddCall("Seek", offset, whence) + if err := s.Stub.NextErr(); err != nil { + return 0, errors.Trace(err) + } + + return s.ReturnSeek, nil +} + +type StubCloser struct { + Stub *testing.Stub +} + +func (s *StubCloser) Close() error { + s.Stub.AddCall("Close") + if err := s.Stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +type StubFile struct { + io.Reader + io.Writer + io.Seeker + io.Closer + + Stub *testing.Stub + Info StubFileInfo +} + +func NewStubFile(stub *testing.Stub, raw io.ReadWriter) *StubFile { + return &StubFile{ + Reader: &StubReader{Stub: stub, ReturnRead: raw}, + Writer: &StubWriter{Stub: stub, ReturnWrite: raw}, + Seeker: &StubSeeker{Stub: stub}, + Closer: &StubCloser{Stub: stub}, + Stub: stub, + } +} + +func (s *StubFile) Name() string { + s.Stub.AddCall("Name") + s.Stub.NextErr() // Pop one off. + + return s.Info.Info.Name +} + +func (s *StubFile) Stat() (os.FileInfo, error) { + s.Stub.AddCall("Stat") + if err := s.Stub.NextErr(); err != nil { + return nil, errors.Trace(err) + } + + return &s.Info, nil +} + +func (s *StubFile) Sync() error { + s.Stub.AddCall("Sync") + if err := s.Stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *StubFile) Truncate(size int64) error { + s.Stub.AddCall("Truncate", size) + if err := s.Stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +type FileInfo struct { + Name string + Size int64 + Mode os.FileMode + ModTime time.Time +} + +var _ os.FileInfo = (*StubFileInfo)(nil) + +type StubFileInfo struct { + Stub *testing.Stub + + Info FileInfo + ReturnSys interface{} +} + +func NewStubFileInfo(stub *testing.Stub, name, content string) *StubFileInfo { + return &StubFileInfo{ + Stub: stub, + Info: FileInfo{ + Name: name, + Size: int64(len(content)), + Mode: 0644, + ModTime: time.Now(), + }, + } +} + +func (s StubFileInfo) Name() string { + s.Stub.AddCall("Name") + s.Stub.NextErr() // Pop one off. + + return s.Info.Name +} + +func (s StubFileInfo) Size() int64 { + s.Stub.AddCall("Size") + s.Stub.NextErr() // Pop one off. + + return s.Info.Size +} + +func (s StubFileInfo) Mode() os.FileMode { + s.Stub.AddCall("Mode") + s.Stub.NextErr() // Pop one off. + + return s.Info.Mode +} + +func (s StubFileInfo) ModTime() time.Time { + s.Stub.AddCall("ModTime") + s.Stub.NextErr() // Pop one off. + + return s.Info.ModTime +} + +func (s StubFileInfo) IsDir() bool { + s.Stub.AddCall("IsDir") + s.Stub.NextErr() // Pop one off. + + return s.Info.Mode.IsDir() +} + +func (s StubFileInfo) Sys() interface{} { + s.Stub.AddCall("Sys") + s.Stub.NextErr() // Pop one off. + + return s.ReturnSys +} + +var _ hash.Hash = (*StubHash)(nil) + +type StubHash struct { + io.Writer + + Stub *testing.Stub + ReturnSum []byte + ReturnSize int + ReturnBlockSize int +} + +func NewStubHash(stub *testing.Stub, raw io.Writer) *StubHash { + return &StubHash{ + Writer: &StubWriter{Stub: stub, ReturnWrite: raw}, + Stub: stub, + } +} + +func (s *StubHash) Sum(b []byte) []byte { + s.Stub.AddCall("Sum", b) + s.Stub.NextErr() // Pop one off. + + return s.ReturnSum +} + +func (s *StubHash) Reset() { + s.Stub.AddCall("Reset") + s.Stub.NextErr() // Pop one off. +} + +func (s *StubHash) Size() int { + s.Stub.AddCall("Size") + s.Stub.NextErr() // Pop one off. + + return s.ReturnSize +} + +func (s *StubHash) BlockSize() int { + s.Stub.AddCall("BlockSize") + s.Stub.NextErr() // Pop one off. + + return s.ReturnBlockSize +} === added file 'src/github.com/juju/testing/goversion12.go' --- src/github.com/juju/testing/goversion12.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/testing/goversion12.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// +build go1.2 +// +build !go1.3,!go1.4,!go1.5 + +package testing + +const GOVERSION = 1.2 === added file 'src/github.com/juju/testing/goversion13.go' --- src/github.com/juju/testing/goversion13.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/testing/goversion13.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// +build go1.3 +// +build !go1.4,!go1.5 + +package testing + +const GOVERSION = 1.3 === added file 'src/github.com/juju/testing/goversion14.go' --- src/github.com/juju/testing/goversion14.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/testing/goversion14.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// +build go1.4 +// +build !go1.5 + +package testing + +const GOVERSION = 1.4 === added file 'src/github.com/juju/testing/goversion15.go' --- src/github.com/juju/testing/goversion15.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/testing/goversion15.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// +build go1.5 + +package testing + +const GOVERSION = 1.5 === modified file 'src/github.com/juju/testing/home.go' --- src/github.com/juju/testing/home.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/testing/home.go 2016-03-22 15:18:22 +0000 @@ -82,6 +82,15 @@ return filepath.Join(all...) } +// JujuXDGDataHomePath returns the test home path, it is just a convenience +// for tests, if extra path snippets are passed they will be +// joined to juju home. +// This tool assumes ~/.config/juju as the juju home. +func JujuXDGDataHomePath(names ...string) string { + all := append([]string{".local", "share", "juju"}, names...) + return HomePath(all...) +} + // FakeHomeSuite sets up a fake home directory before running tests. type FakeHomeSuite struct { CleanupSuite === modified file 'src/github.com/juju/testing/httptesting/http.go' --- src/github.com/juju/testing/httptesting/http.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/testing/httptesting/http.go 2016-03-22 15:18:22 +0000 @@ -10,6 +10,8 @@ "io/ioutil" "net/http" "net/http/httptest" + "net/textproto" + "net/url" "strings" gc "gopkg.in/check.v1" @@ -36,16 +38,20 @@ // nil. ExpectError string - // Handler holds the handler to use to make the request. - Handler http.Handler - // Method holds the HTTP method to use for the call. // GET is assumed if this is empty. Method string // URL holds the URL to pass when making the request. + // If the URL does not contain a host, a temporary + // HTTP server is started running the Handler below + // which is used for the host. URL string + // Handler holds the handler to use to make the request. + // It is ignored if the above URL field has a host part. + Handler http.Handler + // JSONBody specifies a JSON value to marshal to use // as the body of the request. If this is specified, Body will // be ignored and the Content-Type header will @@ -81,6 +87,10 @@ // result. ExpectBody interface{} + // ExpectHeader holds any HTTP headers that must be present in the response. + // Note that the response may also contain headers not in this field. + ExpectHeader http.Header + // Cookies, if specified, are added to the request. Cookies []*http.Cookie } @@ -110,6 +120,10 @@ return } AssertJSONResponse(c, rec, p.ExpectStatus, p.ExpectBody) + + for k, v := range p.ExpectHeader { + c.Assert(rec.HeaderMap[textproto.CanonicalMIMEHeaderKey(k)], gc.DeepEquals, v, gc.Commentf("header %q", k)) + } } // AssertJSONResponse asserts that the given response recorder has @@ -125,6 +139,7 @@ return } c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "application/json") + if assertBody, ok := expectBody.(BodyAsserter); ok { var data json.RawMessage err := json.Unmarshal(rec.Body.Bytes(), &data) @@ -150,16 +165,20 @@ // nil. ExpectError string - // Handler holds the handler to use to make the request. - Handler http.Handler - // Method holds the HTTP method to use for the call. // GET is assumed if this is empty. Method string // URL holds the URL to pass when making the request. + // If the URL does not contain a host, a temporary + // HTTP server is started running the Handler below + // which is used for the host. URL string + // Handler holds the handler to use to make the request. + // It is ignored if the above URL field has a host part. + Handler http.Handler + // JSONBody specifies a JSON value to marshal to use // as the body of the request. If this is specified, Body will // be ignored and the Content-Type header will @@ -189,18 +208,40 @@ Cookies []*http.Cookie } -// DoRequest invokes a request on the given handler with the given -// parameters. +// DoRequest is the same as Do except that it returns +// an httptest.ResponseRecorder instead of an http.Response. +// This function exists for backward compatibility reasons. func DoRequest(c *gc.C, p DoRequestParams) *httptest.ResponseRecorder { + resp := Do(c, p) + if p.ExpectError != "" { + return nil + } + defer resp.Body.Close() + var rec httptest.ResponseRecorder + rec.HeaderMap = resp.Header + rec.Code = resp.StatusCode + rec.Body = new(bytes.Buffer) + _, err := io.Copy(rec.Body, resp.Body) + c.Assert(err, jc.ErrorIsNil) + return &rec +} + +// Do invokes a request on the given handler with the given +// parameters and returns the resulting HTTP response. +// Note that, as with http.Client.Do, the response body +// must be closed. +func Do(c *gc.C, p DoRequestParams) *http.Response { if p.Method == "" { p.Method = "GET" } if p.Do == nil { p.Do = http.DefaultClient.Do } - srv := httptest.NewServer(p.Handler) - defer srv.Close() - + if reqURL, err := url.Parse(p.URL); err == nil && reqURL.Host == "" { + srv := httptest.NewServer(p.Handler) + defer srv.Close() + p.URL = srv.URL + p.URL + } if p.JSONBody != nil { data, err := json.Marshal(p.JSONBody) c.Assert(err, jc.ErrorIsNil) @@ -208,7 +249,7 @@ } // Note: we avoid NewRequest's odious reader wrapping by using // a custom nopCloser function. - req, err := http.NewRequest(p.Method, srv.URL+p.URL, nopCloser(p.Body)) + req, err := http.NewRequest(p.Method, p.URL, nopCloser(p.Body)) c.Assert(err, jc.ErrorIsNil) if p.JSONBody != nil { req.Header.Set("Content-Type", "application/json") @@ -233,17 +274,7 @@ return nil } c.Assert(err, jc.ErrorIsNil) - defer resp.Body.Close() - - // TODO(rog) don't return a ResponseRecorder because we're not actually - // using httptest.NewRecorder ? - var rec httptest.ResponseRecorder - rec.HeaderMap = resp.Header - rec.Code = resp.StatusCode - rec.Body = new(bytes.Buffer) - _, err = io.Copy(rec.Body, resp.Body) - c.Assert(err, jc.ErrorIsNil) - return &rec + return resp } // bodyContentLength returns the Content-Length @@ -290,3 +321,40 @@ func (readSeekNopCloser) Close() error { return nil } + +// URLRewritingTransport is an http.RoundTripper that can rewrite request +// URLs. If the request URL has the prefix specified in Match that part +// will be changed to the value specified in Replace. RoundTripper will +// then be used to perform the resulting request. If RoundTripper is nil +// http.DefaultTransport will be used. +// +// This can be used in tests that, for whatever reason, need to make a +// call to a URL that's not in our control but we want to control the +// results of HTTP requests to that URL. +type URLRewritingTransport struct { + MatchPrefix string + Replace string + RoundTripper http.RoundTripper +} + +// RoundTrip implements http.RoundTripper. +func (t URLRewritingTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.RoundTripper + if rt == nil { + rt = http.DefaultTransport + } + if !strings.HasPrefix(req.URL.String(), t.MatchPrefix) { + return rt.RoundTrip(req) + } + req1 := *req + var err error + req1.URL, err = url.Parse(t.Replace + strings.TrimPrefix(req.URL.String(), t.MatchPrefix)) + if err != nil { + panic(err) + } + resp, err := rt.RoundTrip(&req1) + if resp != nil { + resp.Request = req + } + return resp, err +} === modified file 'src/github.com/juju/testing/httptesting/http_test.go' --- src/github.com/juju/testing/httptesting/http_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/testing/httptesting/http_test.go 2016-03-22 15:18:22 +0000 @@ -10,6 +10,7 @@ "io" "io/ioutil" "net/http" + "net/http/httptest" "strings" gc "gopkg.in/check.v1" @@ -104,6 +105,38 @@ ExpectStatus: http.StatusOK, }, }, { + about: "test for ExceptHeader in response", + params: httptesting.JSONCallParams{ + URL: "/", + Do: func(req *http.Request) (*http.Response, error) { + resp, err := http.DefaultClient.Do(req) + resp.StatusCode = http.StatusOK + resp.Header["Custom"] = []string{"value1", "value2"} + resp.Header["Ignored"] = []string{"value3", "value3"} + return resp, err + }, + ExpectStatus: http.StatusOK, + ExpectHeader: http.Header{ + "Custom": {"value1", "value2"}, + }, + }, +}, { + about: "test case insensitive for ExceptHeader in response", + params: httptesting.JSONCallParams{ + URL: "/", + Do: func(req *http.Request) (*http.Response, error) { + resp, err := http.DefaultClient.Do(req) + resp.StatusCode = http.StatusOK + resp.Header["Custom"] = []string{"value1", "value2"} + resp.Header["Ignored"] = []string{"value3", "value3"} + return resp, err + }, + ExpectStatus: http.StatusOK, + ExpectHeader: http.Header{ + "CUSTOM": {"value1", "value2"}, + }, + }, +}, { about: "error status", params: httptesting.JSONCallParams{ URL: "/", @@ -242,6 +275,18 @@ c.Assert(called, gc.Equals, true) } +func (*requestsSuite) TestAssertJSONCallWithHostedURL(c *gc.C) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(fmt.Sprintf("%q", "ok "+req.URL.Path))) + })) + defer srv.Close() + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + URL: srv.URL + "/foo", + ExpectBody: "ok /foo", + }) +} + var bodyReaderFuncs = []func(string) io.Reader{ func(s string) io.Reader { return strings.NewReader(s) @@ -274,3 +319,41 @@ // calls. Failures are already massively tested in practice. DoRequest and // AssertJSONResponse are also indirectly tested as they are called by // AssertJSONCall. + +type urlRewritingTransportSuite struct { + server *httptest.Server +} + +var _ = gc.Suite(&urlRewritingTransportSuite{}) + +func (s *urlRewritingTransportSuite) SetUpTest(c *gc.C) { + s.server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte(r.URL.String())) + })) +} + +func (s *urlRewritingTransportSuite) TestTransport(c *gc.C) { + t := httptesting.URLRewritingTransport{ + MatchPrefix: "http://example.com", + Replace: s.server.URL, + } + client := http.Client{ + Transport: &t, + } + resp, err := client.Get("http://example.com/path") + c.Assert(err, jc.ErrorIsNil) + body, err := ioutil.ReadAll(resp.Body) + c.Assert(err, jc.ErrorIsNil) + resp.Body.Close() + c.Assert(resp.Request.URL.String(), gc.Equals, "http://example.com/path") + c.Assert(string(body), gc.Equals, "/path") + + t.RoundTripper = &http.Transport{} + resp, err = client.Get(s.server.URL + "/otherpath") + c.Assert(err, jc.ErrorIsNil) + body, err = ioutil.ReadAll(resp.Body) + c.Assert(err, jc.ErrorIsNil) + resp.Body.Close() + c.Assert(resp.Request.URL.String(), gc.Equals, s.server.URL+"/otherpath") + c.Assert(string(body), gc.Equals, "/otherpath") +} === modified file 'src/github.com/juju/testing/mgo.go' --- src/github.com/juju/testing/mgo.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/testing/mgo.go 2016-03-22 15:18:22 +0000 @@ -24,7 +24,9 @@ "testing" "time" + "github.com/juju/errors" "github.com/juju/loggo" + jc "github.com/juju/testing/checkers" "github.com/juju/utils" gc "gopkg.in/check.v1" "gopkg.in/mgo.v2" @@ -295,7 +297,8 @@ } func getMongod() (string, error) { - paths := []string{"mongod", "/usr/lib/juju/bin/mongod"} + // The last path is needed in tests on CentOS where PATH is being completely removed + paths := []string{"mongod", "/usr/lib/juju/bin/mongod", "/usr/local/bin/mongod"} if path := os.Getenv("JUJU_MONGOD"); path != "" { paths = append([]string{path}, paths...) } @@ -408,16 +411,6 @@ utils.FastInsecureHash = false } -// MustDial returns a new connection to the MongoDB server, and panics on -// errors. -func (inst *MgoInstance) MustDial() *mgo.Session { - s, err := mgo.DialWithInfo(inst.DialInfo()) - if err != nil { - panic(err) - } - return s -} - // Dial returns a new connection to the MongoDB server. func (inst *MgoInstance) Dial() (*mgo.Session, error) { return mgo.DialWithInfo(inst.DialInfo()) @@ -482,22 +475,24 @@ func (s *MgoSuite) SetUpTest(c *gc.C) { mgo.ResetStats() - s.Session = MgoServer.MustDial() + var err error + s.Session, err = MgoServer.Dial() + c.Assert(err, jc.ErrorIsNil) dropAll(s.Session) } -// Reset deletes all content from the MongoDB server and panics if it encounters -// errors. -func (inst *MgoInstance) Reset() { +// Reset deletes all content from the MongoDB server. +func (inst *MgoInstance) Reset() error { // If the server has already been destroyed for testing purposes, // just start it again. if inst.Addr() == "" { - if err := inst.Start(inst.certs); err != nil { - panic(err) - } - return - } - session := inst.MustDial() + err := inst.Start(inst.certs) + return errors.Annotatef(err, "inst.Start(%v) failed", inst.certs) + } + session, err := inst.Dial() + if err != nil { + return errors.Annotate(err, "inst.Dial() failed") + } defer session.Close() dbnames, ok := resetAdminPasswordAndFetchDBNames(session) @@ -506,10 +501,8 @@ // happen when tests fail. logger.Infof("restarting MongoDB server after unauthorized access") inst.Destroy() - if err := inst.Start(inst.certs); err != nil { - panic(err) - } - return + err := inst.Start(inst.certs) + return errors.Annotatef(err, "inst.Start(%v) failed", inst.certs) } logger.Infof("reset successfully reset admin password") for _, name := range dbnames { @@ -519,9 +512,10 @@ continue } if err := session.DB(name).DropDatabase(); err != nil { - panic(fmt.Errorf("Cannot drop MongoDB database %v: %v", name, err)) + return errors.Annotatef(err, "cannot drop MongoDB database %v", name) } } + return nil } // dropAll drops all databases apart from admin, local and config. @@ -602,7 +596,8 @@ } func (s *MgoSuite) TearDownTest(c *gc.C) { - MgoServer.Reset() + err := MgoServer.Reset() + c.Assert(err, jc.ErrorIsNil) s.Session.Close() for i := 0; ; i++ { stats := mgo.GetStats() === modified file 'src/github.com/juju/testing/mgo_test.go' --- src/github.com/juju/testing/mgo_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/testing/mgo_test.go 2016-03-22 15:18:22 +0000 @@ -41,9 +41,10 @@ } func (s *mgoSuite) TestResetWhenUnauthorized(c *gc.C) { - session := testing.MgoServer.MustDial() + session, err := testing.MgoServer.Dial() + c.Assert(err, gc.IsNil) defer session.Close() - err := session.DB("admin").AddUser("admin", "foo", false) + err = session.DB("admin").AddUser("admin", "foo", false) if err != nil && err.Error() != "need to login" { c.Assert(err, gc.IsNil) } @@ -53,10 +54,11 @@ func (s *mgoSuite) TestStartAndClean(c *gc.C) { c.Assert(testing.MgoServer.Addr(), gc.Not(gc.Equals), "") - session := testing.MgoServer.MustDial() + session, err := testing.MgoServer.Dial() + c.Assert(err, gc.IsNil) defer session.Close() menu := session.DB("food").C("menu") - err := menu.Insert( + err = menu.Insert( bson.D{{"spam", "lots"}}, bson.D{{"eggs", "fried"}}, ) === modified file 'src/github.com/juju/testing/mgo_unix.go' --- src/github.com/juju/testing/mgo_unix.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/testing/mgo_unix.go 2016-03-22 15:18:22 +0000 @@ -13,4 +13,3 @@ func (inst *MgoInstance) DestroyWithLog() { inst.killAndCleanup(os.Interrupt) } - === modified file 'src/github.com/juju/testing/mgo_windows.go' --- src/github.com/juju/testing/mgo_windows.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/testing/mgo_windows.go 2016-03-22 15:18:22 +0000 @@ -16,4 +16,3 @@ func (inst *MgoInstance) DestroyWithLog() { inst.killAndCleanup(os.Kill) } - === added file 'src/github.com/juju/testing/norace.go' --- src/github.com/juju/testing/norace.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/testing/norace.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// +build !race + +package testing + +const RaceEnabled = false === modified file 'src/github.com/juju/testing/osenv.go' --- src/github.com/juju/testing/osenv.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/testing/osenv.go 2016-03-22 15:18:22 +0000 @@ -67,25 +67,25 @@ } func (s *OsEnvSuite) setEnviron() { - var isWhitelisted func (string) bool + var isWhitelisted func(string) bool switch runtime.GOOS { case "windows": // Lowercase variable names for comparison as they are case // insenstive on windows. Fancy folding not required for ascii. lowerEnv := make(map[string]struct{}, - len(windowsVariables) + len(testingVariables)) + len(windowsVariables)+len(testingVariables)) for _, envVar := range windowsVariables { lowerEnv[strings.ToLower(envVar)] = struct{}{} } for _, envVar := range testingVariables { lowerEnv[strings.ToLower(envVar)] = struct{}{} } - isWhitelisted = func (envVar string) bool { + isWhitelisted = func(envVar string) bool { _, ok := lowerEnv[strings.ToLower(envVar)] return ok } default: - isWhitelisted = func (envVar string) bool { + isWhitelisted = func(envVar string) bool { for _, testingVar := range testingVariables { if testingVar == envVar { return true === added file 'src/github.com/juju/testing/race.go' --- src/github.com/juju/testing/race.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/testing/race.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// +build race + +package testing + +const RaceEnabled = true === modified file 'src/github.com/juju/testing/stub.go' --- src/github.com/juju/testing/stub.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/testing/stub.go 2016-03-22 15:18:22 +0000 @@ -4,6 +4,7 @@ package testing import ( + "fmt" "sync" jc "github.com/juju/testing/checkers" @@ -122,6 +123,16 @@ return err } +// PopNoErr pops off the next error without returning it. If the error +// is not nil then PopNoErr will panic. +// +// PopNoErr is useful in stub methods that do not return an error. +func (f *Stub) PopNoErr() { + if err := f.NextErr(); err != nil { + panic(fmt.Sprintf("expected a nil error, got %v", err)) + } +} + func (f *Stub) addCall(rcvr interface{}, funcName string, args []interface{}) { f.mu.Lock() defer f.mu.Unlock() @@ -211,6 +222,11 @@ return c.Check(funcNames, jc.DeepEquals, expected) } +// CheckNoCalls verifies that none of the stub's methods have been called. +func (f *Stub) CheckNoCalls(c *gc.C) { + f.CheckCalls(c, nil) +} + // CheckErrors verifies that the list of errors is matches the expected list. func (f *Stub) CheckErrors(c *gc.C, expected ...error) bool { f.mu.Lock() === modified file 'src/github.com/juju/testing/stub_test.go' --- src/github.com/juju/testing/stub_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/testing/stub_test.go 2016-03-22 15:18:22 +0000 @@ -110,6 +110,36 @@ c.Check(err4, gc.Equals, exp2) } +func (s *stubSuite) TestPopNoErrOkay(c *gc.C) { + exp1 := errors.New("") + exp2 := errors.New("") + s.stub.SetErrors(exp1, nil, exp2) + + err1 := s.stub.NextErr() + s.stub.PopNoErr() + err2 := s.stub.NextErr() + + c.Check(err1, gc.Equals, exp1) + c.Check(err2, gc.Equals, exp2) +} + +func (s *stubSuite) TestPopNoErrEmpty(c *gc.C) { + s.stub.PopNoErr() + err := s.stub.NextErr() + + c.Check(err, jc.ErrorIsNil) +} + +func (s *stubSuite) TestPopNoErrPanic(c *gc.C) { + failure := errors.New("") + s.stub.SetErrors(failure) + + f := func() { + s.stub.PopNoErr() + } + c.Check(f, gc.PanicMatches, `expected a nil error, got .*`) +} + func (s *stubSuite) TestAddCallRecorded(c *gc.C) { s.stub.AddCall("aFunc", 1, 2, 3) @@ -392,3 +422,11 @@ c.ExpectFailure(`the "standard" Stub.CheckCallNames call should fail here`) s.stub.CheckCallNames(c, "first", "second", "third") } + +func (s *stubSuite) TestCheckNoCalls(c *gc.C) { + s.stub.CheckNoCalls(c) + + s.stub.AddCall("method", "arg") + c.ExpectFailure(`the "standard" Stub.CheckNoCalls call should fail here`) + s.stub.CheckNoCalls(c) +} === added file 'src/github.com/juju/testing/tcpproxy.go' --- src/github.com/juju/testing/tcpproxy.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/testing/tcpproxy.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,104 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package testing + +import ( + "io" + "net" + "sync" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" +) + +// TCPProxy is a simple TCP proxy that can be used +// to deliberately break TCP connections. +type TCPProxy struct { + listener net.Listener + // mu guards the fields below it. + mu sync.Mutex + // closed holds whether the proxy has been closed. + closed bool + // conns holds all connections that have been made. + conns []io.Closer +} + +// NewTCPProxy runs a proxy that copies to and from +// the given remote TCP address. When the proxy +// is closed, its listener and all connections will be closed. +func NewTCPProxy(c *gc.C, remoteAddr string) *TCPProxy { + listener, err := net.Listen("tcp", "127.0.0.1:0") + c.Assert(err, jc.ErrorIsNil) + p := &TCPProxy{ + listener: listener, + } + go func() { + for { + client, err := p.listener.Accept() + if err != nil { + if !p.isClosed() { + c.Error("cannot accept: %v", err) + } + return + } + p.addConn(client) + server, err := net.Dial("tcp", remoteAddr) + if err != nil { + if !p.isClosed() { + c.Error("cannot dial remote address: %v", err) + } + return + } + p.addConn(server) + go stream(client, server) + go stream(server, client) + } + }() + return p +} + +func (p *TCPProxy) addConn(c net.Conn) { + p.mu.Lock() + defer p.mu.Unlock() + if p.closed { + c.Close() + } else { + p.conns = append(p.conns, c) + } +} + +// Close closes the TCPProxy and any connections that +// are currently active. +func (p *TCPProxy) Close() error { + p.mu.Lock() + defer p.mu.Unlock() + p.closed = true + p.listener.Close() + for _, c := range p.conns { + c.Close() + } + return nil +} + +// Addr returns the TCP address of the proxy. Dialing +// this address will cause a connection to be made +// to the remote address; any data written will be +// written there, and any data read from the remote +// address will be available to read locally. +func (p *TCPProxy) Addr() string { + // Note: this only works because we explicitly listen on 127.0.0.1 rather + // than the wildcard address. + return p.listener.Addr().String() +} +func (p *TCPProxy) isClosed() bool { + p.mu.Lock() + defer p.mu.Unlock() + return p.closed +} + +func stream(dst io.WriteCloser, src io.ReadCloser) { + defer dst.Close() + defer src.Close() + io.Copy(dst, src) +} === added file 'src/github.com/juju/testing/tcpproxy_test.go' --- src/github.com/juju/testing/tcpproxy_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/testing/tcpproxy_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,89 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package testing_test + +import ( + "fmt" + "io" + "net" + "sync" + + "github.com/juju/testing" + gc "gopkg.in/check.v1" +) + +var _ = gc.Suite(&tcpProxySuite{}) + +type tcpProxySuite struct{} + +func (*tcpProxySuite) TestTCPProxy(c *gc.C) { + var wg sync.WaitGroup + + listener, err := net.Listen("tcp", "127.0.0.1:0") + c.Assert(err, gc.IsNil) + defer listener.Close() + wg.Add(1) + go tcpEcho(&wg, listener) + + p := testing.NewTCPProxy(c, listener.Addr().String()) + c.Assert(p.Addr(), gc.Not(gc.Equals), listener.Addr().String()) + + // Dial the proxy and check that we see the text echoed correctly. + conn, err := net.Dial("tcp", p.Addr()) + c.Assert(err, gc.IsNil) + defer conn.Close() + txt := "hello, world\n" + fmt.Fprint(conn, txt) + + buf := make([]byte, len(txt)) + n, err := io.ReadFull(conn, buf) + c.Assert(err, gc.IsNil) + c.Assert(string(buf[0:n]), gc.Equals, txt) + + // Close the connection and check that we see + // the connection closed for read. + conn.(*net.TCPConn).CloseWrite() + n, err = conn.Read(buf) + c.Assert(err, gc.Equals, io.EOF) + c.Assert(n, gc.Equals, 0) + + // Make another connection and close the proxy, + // which should close down the proxy and cause us + // to get an error. + conn, err = net.Dial("tcp", p.Addr()) + c.Assert(err, gc.IsNil) + defer conn.Close() + + p.Close() + _, err = conn.Read(buf) + c.Assert(err, gc.Equals, io.EOF) + + // Make sure that we cannot dial the proxy address either. + conn, err = net.Dial("tcp", p.Addr()) + c.Assert(err, gc.ErrorMatches, ".*connection refused") + + listener.Close() + // Make sure that all our connections have gone away too. + wg.Wait() +} + +// tcpEcho listens on the given listener for TCP connections, +// writes all traffic received back to the sender, and calls +// wg.Done when all its goroutines have completed. +func tcpEcho(wg *sync.WaitGroup, listener net.Listener) { + defer wg.Done() + for { + conn, err := listener.Accept() + if err != nil { + return + } + wg.Add(1) + go func() { + defer wg.Done() + defer conn.Close() + // Echo anything that was written. + io.Copy(conn, conn) + }() + } +} === modified file 'src/github.com/juju/utils/clock/clock.go' --- src/github.com/juju/utils/clock/clock.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/utils/clock/clock.go 2016-03-22 15:18:22 +0000 @@ -14,6 +14,10 @@ // After waits for the duration to elapse and then sends the // current time on the returned channel. After(time.Duration) <-chan time.Time + + // AfterFunc waits for the duration to elapse and then calls f in its own goroutine. + // It returns a Timer that can be used to cancel the call using its Stop method. + AfterFunc(time.Duration, func()) Timer } // Alarm returns a channel that will have the time sent on it at some point @@ -23,3 +27,20 @@ func Alarm(c Clock, t time.Time) <-chan time.Time { return c.After(t.Sub(c.Now())) } + +// The Timer type represents a single event. +// A Timer must be created with AfterFunc. +// This interface follows time.Timer's methods but provides easier mocking. +type Timer interface { + + // Reset changes the timer to expire after duration d. + // It returns true if the timer had been active, false if + // the timer had expired or been stopped. + Reset(time.Duration) bool + + // Stop prevents the Timer from firing. It returns true if + // the call stops the timer, false if the timer has already expired or been stopped. + // Stop does not close the channel, to prevent a read + // from the channel succeeding incorrectly. + Stop() bool +} === modified file 'src/github.com/juju/utils/clock/wall.go' --- src/github.com/juju/utils/clock/wall.go 2015-10-23 18:28:45 +0000 +++ src/github.com/juju/utils/clock/wall.go 2016-03-22 15:18:22 +0000 @@ -23,3 +23,7 @@ func (wallClock) After(d time.Duration) <-chan time.Time { return time.After(d) } + +func (wallClock) AfterFunc(d time.Duration, f func()) Timer { + return time.AfterFunc(d, f) +} === added file 'src/github.com/juju/utils/debugstatus/handler.go' --- src/github.com/juju/utils/debugstatus/handler.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/debugstatus/handler.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,118 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package debugstatus + +import ( + "net/http" + + "gopkg.in/errgo.v1" + + pprof "github.com/juju/httpprof" + "github.com/juju/httprequest" +) + +// Version describes the current version of the code being run. +type Version struct { + GitCommit string + Version string +} + +// Handler implements a type that can be used with httprequest.Handlers +// to serve a standard set of /debug endpoints, including +// the version of the system, its current health status +// the runtime profiling information. +type Handler struct { + // Check will be called to obtain the current health of the + // system. It should return a map as returned from the + // Check function. If this is nil, an empty result will + // always be returned from /debug/status. + Check func() map[string]CheckResult + + // Version should hold the current version + // of the binary running the server, served + // from the /debug/info endpoint. + Version Version + + // CheckPprofAllowed will be used to check whether the + // given pprof request should be allowed. + // It should return an error if not, which will not be masked. + // If this is nil, no access will be allowed to any + // of the endpoints under /debug/pprof - the + // error returned will be ErrNoPprofConfigured. + CheckPprofAllowed func(req *http.Request) error +} + +// DebugStatusRequest describes the /debug/status endpoint. +type DebugStatusRequest struct { + httprequest.Route `httprequest:"GET /debug/status"` +} + +// DebugStatus returns the current status of the server. +func (h *Handler) DebugStatus(*DebugStatusRequest) (map[string]CheckResult, error) { + if h.Check == nil { + return map[string]CheckResult{}, nil + } + return h.Check(), nil +} + +// DebugInfoRequest describes the /debug/info endpoint. +type DebugInfoRequest struct { + httprequest.Route `httprequest:"GET /debug/info"` +} + +// DebugInfo returns version information on the current server. +func (h *Handler) DebugInfo(*DebugInfoRequest) (Version, error) { + return h.Version, nil +} + +// DebugPprofRequest describes the /debug/pprof/ endpoint. +type DebugPprofRequest struct { + httprequest.Route `httprequest:"GET /debug/pprof/"` +} + +// DebugPprof serves index information on the available pprof endpoints. +func (h *Handler) DebugPprof(p httprequest.Params, _ *DebugPprofRequest) error { + if err := h.checkPprofAllowed(p.Request); err != nil { + return err + } + pprof.Index(p.Response, p.Request) + return nil +} + +// DebugPprofEndpointsRequest describes the endpoints under /debug/prof. +type DebugPprofEndpointsRequest struct { + httprequest.Route `httprequest:"GET /debug/pprof/:name"` + Name string `httprequest:"name,path"` +} + +// DebugPprofEndpoints serves all the endpoints under DebugPprof. +func (h *Handler) DebugPprofEndpoints(p httprequest.Params, r *DebugPprofEndpointsRequest) error { + if err := h.checkPprofAllowed(p.Request); err != nil { + return err + } + switch r.Name { + case "cmdline": + pprof.Cmdline(p.Response, p.Request) + case "profile": + pprof.Profile(p.Response, p.Request) + case "symbol": + pprof.Symbol(p.Response, p.Request) + default: + pprof.Handler(r.Name).ServeHTTP(p.Response, p.Request) + } + return nil +} + +// ErrNoPprofConfigured is the error returned on access +// to endpoints when Handler.CheckPprofAllowed is nil. +var ErrNoPprofConfigured = errgo.New("no pprof access configured") + +// checkPprofAllowed is used instead of h.CheckPprofAllowed +// so that we don't panic if that is nil. +func (h *Handler) checkPprofAllowed(req *http.Request) error { + if h.CheckPprofAllowed == nil { + return ErrNoPprofConfigured + } + return h.CheckPprofAllowed(req) +} === added file 'src/github.com/juju/utils/debugstatus/handler_test.go' --- src/github.com/juju/utils/debugstatus/handler_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/debugstatus/handler_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,161 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package debugstatus_test + +import ( + "encoding/json" + "net/http" + + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + "github.com/juju/utils/debugstatus" + "github.com/julienschmidt/httprouter" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + + "github.com/juju/httprequest" +) + +var errorMapper httprequest.ErrorMapper = func(err error) (httpStatus int, errorBody interface{}) { + return http.StatusInternalServerError, httprequest.RemoteError{ + Message: err.Error(), + } +} + +type handlerSuite struct { +} + +var _ = gc.Suite(&handlerSuite{}) + +var errUnauthorized = errgo.New("you shall not pass!") + +func newHTTPHandler(h *debugstatus.Handler) http.Handler { + errMapper := httprequest.ErrorMapper(func(err error) (httpStatus int, errorBody interface{}) { + code, status := "", http.StatusInternalServerError + switch err { + case errUnauthorized: + code, status = "unauthorized", http.StatusUnauthorized + case debugstatus.ErrNoPprofConfigured: + code, status = "forbidden", http.StatusForbidden + } + return status, httprequest.RemoteError{ + Code: code, + Message: err.Error(), + } + }) + + handlers := errMapper.Handlers(func(httprequest.Params) (*debugstatus.Handler, error) { + return h, nil + }) + r := httprouter.New() + for _, h := range handlers { + r.Handle(h.Method, h.Path, h.Handle) + } + return r +} + +func (s *handlerSuite) TestServeDebugStatus(c *gc.C) { + httpHandler := newHTTPHandler(&debugstatus.Handler{ + Check: func() map[string]debugstatus.CheckResult { + return debugstatus.Check(debugstatus.ServerStartTime) + }, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: httpHandler, + URL: "/debug/status", + ExpectBody: httptesting.BodyAsserter(func(c *gc.C, body json.RawMessage) { + var result map[string]debugstatus.CheckResult + err := json.Unmarshal(body, &result) + c.Assert(err, gc.IsNil) + for k, v := range result { + v.Duration = 0 + result[k] = v + } + c.Assert(result, jc.DeepEquals, map[string]debugstatus.CheckResult{ + "server_started": { + Name: "Server started", + Value: debugstatus.StartTime.String(), + Passed: true, + }, + }) + }), + }) +} + +func (s *handlerSuite) TestServeDebugStatusWithNilCheck(c *gc.C) { + httpHandler := newHTTPHandler(&debugstatus.Handler{}) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: httpHandler, + URL: "/debug/status", + ExpectBody: map[string]debugstatus.CheckResult{}, + }) +} + +func (s *handlerSuite) TestServeDebugInfo(c *gc.C) { + version := debugstatus.Version{ + GitCommit: "some-git-status", + Version: "a-version", + } + httpHandler := newHTTPHandler(&debugstatus.Handler{ + Version: version, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: httpHandler, + URL: "/debug/info", + ExpectStatus: http.StatusOK, + ExpectBody: version, + }) +} + +var debugPprofPaths = []string{ + "/debug/pprof/", + "/debug/pprof/cmdline", + "/debug/pprof/profile?seconds=1", + "/debug/pprof/symbol", + "/debug/pprof/goroutine", +} + +func (s *handlerSuite) TestServeDebugPprof(c *gc.C) { + httpHandler := newHTTPHandler(&debugstatus.Handler{ + CheckPprofAllowed: func(req *http.Request) error { + if req.Header.Get("Authorization") == "" { + return errUnauthorized + } + return nil + }, + }) + authHeader := make(http.Header) + authHeader.Set("Authorization", "let me in") + for i, path := range debugPprofPaths { + c.Logf("%d. %s", i, path) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: httpHandler, + URL: path, + ExpectStatus: http.StatusUnauthorized, + ExpectBody: httprequest.RemoteError{ + Code: "unauthorized", + Message: "you shall not pass!", + }, + }) + rr := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: httpHandler, + URL: path, + Header: authHeader, + }) + c.Assert(rr.Code, gc.Equals, http.StatusOK) + } +} + +func (s *handlerSuite) TestDebugPprofForbiddenWhenNotConfigured(c *gc.C) { + httpHandler := newHTTPHandler(&debugstatus.Handler{}) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: httpHandler, + URL: "/debug/pprof/", + ExpectStatus: http.StatusForbidden, + ExpectBody: httprequest.RemoteError{ + Code: "forbidden", + Message: "no pprof access configured", + }, + }) +} === modified file 'src/github.com/juju/utils/debugstatus/status.go' --- src/github.com/juju/utils/debugstatus/status.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/utils/debugstatus/status.go 2016-03-22 15:18:22 +0000 @@ -1,6 +1,8 @@ // Copyright 2014 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. +// Package debugstatus provides facilities for inspecting information +// about a running HTTP service. package debugstatus import ( === modified file 'src/github.com/juju/utils/dependencies.tsv' --- src/github.com/juju/utils/dependencies.tsv 2016-03-14 14:26:14 +0000 +++ src/github.com/juju/utils/dependencies.tsv 2016-03-22 15:18:22 +0000 @@ -11,7 +11,6 @@ gopkg.in/check.v1 git b3d3430320d4260e5fea99841af984b3badcea63 2015-06-26T10:50:28Z gopkg.in/errgo.v1 git 66cb46252b94c1f3d65646f54ee8043ab38d766c 2015-10-07T15:31:57Z gopkg.in/mgo.v2 git 4d04138ffef2791c479c0c8bbffc30b34081b8d9 2015-10-26T16:34:53Z -gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z gopkg.in/yaml.v2 git 7ad95dd0798a40da1ccdff6dff35fd177b5edf40 2015-06-24T10:29:02Z launchpad.net/gnuflag bzr roger.peppe@canonical.com-20140716064605-pk32dnmfust02yab 13 launchpad.net/tomb bzr gustavo@niemeyer.net-20130531003818-70ikdgklbxopn8x4 17 === added directory 'src/github.com/juju/utils/du' === added file 'src/github.com/juju/utils/du/LICENSE.ricochet2200' --- src/github.com/juju/utils/du/LICENSE.ricochet2200 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/du/LICENSE.ricochet2200 2016-03-22 15:18:22 +0000 @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to === added file 'src/github.com/juju/utils/du/diskusage.go' --- src/github.com/juju/utils/du/diskusage.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/du/diskusage.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,48 @@ +// Copied from https://github.com/ricochet2200/go-disk-usage +// Copyright 2011 Rick Smith. +// Use of this source code is governed by a public domain +// license that can be found in the LICENSE.ricochet2200 file. +// +// +build !windows + +package du + +import "syscall" + +type DiskUsage struct { + stat *syscall.Statfs_t +} + +// Returns an object holding the disk usage of volumePath +// This function assumes volumePath is a valid path +func NewDiskUsage(volumePath string) *DiskUsage { + + var stat syscall.Statfs_t + syscall.Statfs(volumePath, &stat) + return &DiskUsage{&stat} +} + +// Total free bytes on file system +func (this *DiskUsage) Free() uint64 { + return this.stat.Bfree * uint64(this.stat.Bsize) +} + +// Total available bytes on file system to an unpriveleged user +func (this *DiskUsage) Available() uint64 { + return this.stat.Bavail * uint64(this.stat.Bsize) +} + +// Total size of the file system +func (this *DiskUsage) Size() uint64 { + return this.stat.Blocks * uint64(this.stat.Bsize) +} + +// Total bytes used in file system +func (this *DiskUsage) Used() uint64 { + return this.Size() - this.Free() +} + +// Percentage of use on the file system +func (this *DiskUsage) Usage() float32 { + return float32(this.Used()) / float32(this.Size()) +} === added file 'src/github.com/juju/utils/du/diskusage_windows.go' --- src/github.com/juju/utils/du/diskusage_windows.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/du/diskusage_windows.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,61 @@ +// Copied from https://github.com/ricochet2200/go-disk-usage +// Copyright 2011 Rick Smith. +// Use of this source code is governed by a public domain +// license that can be found in the LICENSE.ricochet2200 file. +// + +package du + +import ( + "syscall" + "unsafe" +) + +type DiskUsage struct { + freeBytes int64 + totalBytes int64 + availBytes int64 +} + +// Returns an object holding the disk usage of volumePath +// This function assumes volumePath is a valid path +func NewDiskUsage(volumePath string) *DiskUsage { + + h := syscall.MustLoadDLL("kernel32.dll") + c := h.MustFindProc("GetDiskFreeSpaceExW") + + du := &DiskUsage{} + + c.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(volumePath))), + uintptr(unsafe.Pointer(&du.freeBytes)), + uintptr(unsafe.Pointer(&du.totalBytes)), + uintptr(unsafe.Pointer(&du.availBytes))) + + return du +} + +// Total free bytes on file system +func (this *DiskUsage) Free() uint64 { + return uint64(this.freeBytes) +} + +// Total available bytes on file system to an unpriveleged user +func (this *DiskUsage) Available() uint64 { + return uint64(this.availBytes) +} + +// Total size of the file system +func (this *DiskUsage) Size() uint64 { + return uint64(this.totalBytes) +} + +// Total bytes used in file system +func (this *DiskUsage) Used() uint64 { + return this.Size() - this.Free() +} + +// Percentage of use on the file system +func (this *DiskUsage) Usage() float32 { + return float32(this.Used()) / float32(this.Size()) +} === modified file 'src/github.com/juju/utils/export_test.go' --- src/github.com/juju/utils/export_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/utils/export_test.go 2016-03-22 15:18:22 +0000 @@ -3,6 +3,10 @@ package utils +import ( + "time" +) + var ( GOMAXPROCS = &gomaxprocs NumCPU = &numCPU @@ -10,3 +14,7 @@ NetDial = &netDial ResolveSudoByFunc = resolveSudo ) + +func ExposeBackoffTimerDuration(bot *BackoffTimer) time.Duration { + return bot.currentDuration +} === modified file 'src/github.com/juju/utils/file.go' --- src/github.com/juju/utils/file.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/utils/file.go 2016-03-22 15:18:22 +0000 @@ -51,6 +51,17 @@ return filepath.Clean(dir), nil } +// EnsureBaseDir ensures that path is always prefixed by baseDir, +// allowing for the fact that path might have a Window drive letter in +// it. +func EnsureBaseDir(baseDir, path string) string { + if baseDir == "" { + return path + } + volume := filepath.VolumeName(path) + return filepath.Join(baseDir, path[len(volume):]) +} + // JoinServerPath joins any number of path elements into a single path, adding // a path separator (based on the current juju server OS) if necessary. The // result is Cleaned; in particular, all empty strings are ignored. === added file 'src/github.com/juju/utils/file_unix_test.go' --- src/github.com/juju/utils/file_unix_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/file_unix_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,23 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// +build !windows + +package utils_test + +import ( + gc "gopkg.in/check.v1" + + "github.com/juju/utils" +) + +type unixFileSuite struct { +} + +var _ = gc.Suite(&unixFileSuite{}) + +func (s *unixFileSuite) TestEnsureBaseDir(c *gc.C) { + c.Assert(utils.EnsureBaseDir(`/a`, `/b/c`), gc.Equals, `/a/b/c`) + c.Assert(utils.EnsureBaseDir(`/`, `/b/c`), gc.Equals, `/b/c`) + c.Assert(utils.EnsureBaseDir(``, `/b/c`), gc.Equals, `/b/c`) +} === modified file 'src/github.com/juju/utils/file_windows_test.go' (properties changed: -x to +x) --- src/github.com/juju/utils/file_windows_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/utils/file_windows_test.go 2016-03-22 15:18:22 +0000 @@ -40,3 +40,11 @@ c.Assert(utils.MakeFileURL(t.in), gc.Equals, t.expected) } } + +func (s *windowsFileSuite) TestEnsureBaseDir(c *gc.C) { + c.Assert(utils.EnsureBaseDir(`C:\r`, `C:\a\b`), gc.Equals, `C:\r\a\b`) + c.Assert(utils.EnsureBaseDir(`C:\r`, `D:\a\b`), gc.Equals, `C:\r\a\b`) + c.Assert(utils.EnsureBaseDir(`C:`, `D:\a\b`), gc.Equals, `C:\a\b`) + c.Assert(utils.EnsureBaseDir(`C:`, `\a\b`), gc.Equals, `C:\a\b`) + c.Assert(utils.EnsureBaseDir(``, `C:\a\b`), gc.Equals, `C:\a\b`) +} === added file 'src/github.com/juju/utils/fslock/export_test.go' --- src/github.com/juju/utils/fslock/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/fslock/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,18 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package fslock + +type OnDisk onDisk + +func IsAlive(lock *Lock, PID int) bool { + return lock.isAlive(PID) +} + +func DeclareDead(lock *Lock) { + lock.declareDead() +} + +func AliveFile(lock *Lock) string { + return lock.aliveFile(lock.PID) +} === modified file 'src/github.com/juju/utils/fslock/fslock.go' --- src/github.com/juju/utils/fslock/fslock.go 2014-08-20 15:00:12 +0000 +++ src/github.com/juju/utils/fslock/fslock.go 2016-03-22 15:18:22 +0000 @@ -1,7 +1,7 @@ // Copyright 2013 Canonical Ltd. // Licensed under the LGPLv3, see LICENCE file for details. -// On-disk mutex protecting a resource +// Package fslock provides an on-disk mutex protecting a resource // // A lock is represented on disk by a directory of a particular name, // containing an information file. Taking a lock is done by renaming a @@ -11,63 +11,119 @@ package fslock import ( - "bytes" - "errors" "fmt" "io/ioutil" "os" "path" "regexp" + "runtime" + "sync" "time" + "github.com/juju/errors" "github.com/juju/loggo" "github.com/juju/utils" + "github.com/juju/utils/clock" + goyaml "gopkg.in/yaml.v2" ) const ( // NameRegexp specifies the regular expression used to identify valid lock names. - NameRegexp = "^[a-z]+[a-z0-9.-]*$" - heldFilename = "held" - messageFilename = "message" + NameRegexp = "^[a-z]+[a-z0-9.-]*$" + heldFilename = "held" ) var ( - logger = loggo.GetLogger("juju.utils.fslock") + logger = loggo.GetLogger("juju.utils.fslock") + + // ErrLockNotHeld is returned by Unlock if the lock file is not held by this lock ErrLockNotHeld = errors.New("lock not held") - ErrTimeout = errors.New("lock timeout exceeded") + // ErrTimeout is returned by LockWithTimeout if the lock could not be obtained before the given deadline + ErrTimeout = errors.New("lock timeout exceeded") validName = regexp.MustCompile(NameRegexp) - - LockWaitDelay = 1 * time.Second ) +// LockConfig defines the configuration of the new lock. Sensible defaults can be +// obtained from Defaults(). +type LockConfig struct { + // Clock is used to generate delays + Clock clock.Clock + // WaitDelay is how long to wait after trying to aquire a lock before trying again + WaitDelay time.Duration + // LividityTimeout is how old a lock can be without us considering its + // parent process dead. + LividityTimeout time.Duration + // ReadRetryTimeout is how long to wait after trying to examine a lock + // and not finding it before trying again. + ReadRetryTimeout time.Duration +} + +// Defaults generates a LockConfig pre-filled with sensible defaults. +func Defaults() LockConfig { + return LockConfig{ + Clock: clock.WallClock, + WaitDelay: 1 * time.Second, + LividityTimeout: 30 * time.Second, + ReadRetryTimeout: time.Millisecond * 10, + } +} + +// Lock is a file system lock type Lock struct { - name string - parent string - nonce []byte + name string + parent string + clock clock.Clock + nonce string + PID int + stopWritingAliveFile chan struct{} + createAliveFileRunning sync.WaitGroup + waitDelay time.Duration + lividityTimeout time.Duration + readRetryTimeout time.Duration + sanityCheck chan struct{} +} + +type onDisk struct { + Nonce string + PID int + Message string } // NewLock returns a new lock with the given name within the given lock // directory, without acquiring it. The lock name must match the regular // expression defined by NameRegexp. -func NewLock(lockDir, name string) (*Lock, error) { +func NewLock(lockDir, name string, cfg LockConfig) (*Lock, error) { if !validName.MatchString(name) { return nil, fmt.Errorf("Invalid lock name %q. Names must match %q", name, NameRegexp) } - nonce, err := utils.NewUUID() + uuid, err := utils.NewUUID() if err != nil { return nil, err } lock := &Lock{ - name: name, - parent: lockDir, - nonce: nonce[:], + name: name, + parent: lockDir, + clock: cfg.Clock, + nonce: uuid.String(), + PID: os.Getpid(), + stopWritingAliveFile: make(chan struct{}, 1), + waitDelay: cfg.WaitDelay, + lividityTimeout: cfg.LividityTimeout, + readRetryTimeout: cfg.ReadRetryTimeout, + sanityCheck: make(chan struct{}), } // Ensure the parent exists. if err := os.MkdirAll(lock.parent, 0755); err != nil { return nil, err } + // Ensure that an old alive file doesn't exist. RemoveAll doesn't raise + // an error if the target doesn't exist, so we don't expect any errors. + if err := os.RemoveAll(lock.aliveFile(lock.PID)); err != nil { + return nil, err + } + return lock, nil } @@ -79,8 +135,81 @@ return path.Join(lock.lockDir(), "held") } -func (lock *Lock) messageFile() string { - return path.Join(lock.lockDir(), "message") +func (lock *Lock) aliveFile(PID int) string { + return path.Join(lock.lockDir(), fmt.Sprintf("alive.%d", PID)) +} + +// isAlive checks that the PID given is alive by looking to see if it is the +// current process's PID or, if it isn't, for a file named alive., which +// has been updated in the last 30 seconds. +func (lock *Lock) isAlive(PID int) bool { + if PID == lock.PID { + return true + } + for i := 0; i < 10; i++ { + aliveInfo, err := os.Lstat(lock.aliveFile(PID)) + if err == nil { + return time.Now().Before(aliveInfo.ModTime().Add(lock.lividityTimeout)) + } + time.Sleep(lock.readRetryTimeout) + } + return false +} + +// createAliveFile kicks off a gorouteine that creates a proof of life file +// and keeps its timestamp current. +func (lock *Lock) createAliveFile() { + lock.createAliveFileRunning.Add(1) + close(lock.sanityCheck) + go func() { + defer lock.createAliveFileRunning.Done() + + aliveFile := lock.aliveFile(lock.PID) + if err := ioutil.WriteFile(aliveFile, []byte{}, 644); err != nil { + return + } + + for { + select { + case <-time.After(5 * lock.waitDelay): + now := time.Now() + if err := os.Chtimes(aliveFile, now, now); err != nil { + return + } + case <-lock.stopWritingAliveFile: + return + } + } + }() +} + +func (lock *Lock) declareDead() { + select { + case lock.stopWritingAliveFile <- struct{}{}: + default: + } + lock.createAliveFileRunning.Wait() + lock.sanityCheck = make(chan struct{}) // refresh sanity check +} + +// clean reads the lock and checks that it is valid. If the lock points to a running +// juju process that is older than the lock file, the lock is left in place, else +// the lock is removed. +func (lock *Lock) clean() error { + // If a lock exists, see if it is stale + lockInfo, err := lock.readLock() + if err != nil { + return nil + } + + if lock.isAlive(lockInfo.PID) { + // lock is current. Do nothing. + logger.Debugf("Lock alive") + return nil + } + + logger.Debugf("Lock dead") + return lock.BreakLock() } // If message is set, it will write the message to the lock directory as the @@ -98,22 +227,26 @@ // the right name. Using the same directory to make sure the directories // are on the same filesystem. Use a directory name starting with "." as // it isn't a valid lock name. - tempLockName := fmt.Sprintf(".%x", lock.nonce) + tempLockName := fmt.Sprintf(".%s", lock.nonce) tempDirName, err := ioutil.TempDir(lock.parent, tempLockName) if err != nil { return false, err // this shouldn't really fail... } - // write nonce into the temp dir - err = ioutil.WriteFile(path.Join(tempDirName, heldFilename), lock.nonce, 0755) + + // write lock into the temp dir + l := onDisk{ + PID: lock.PID, + Nonce: lock.nonce, + Message: message, + } + lockInfo, err := goyaml.Marshal(&l) + if err != nil { + return false, err // this shouldn't fail either... + } + err = ioutil.WriteFile(path.Join(tempDirName, heldFilename), lockInfo, 0664) if err != nil { return false, err } - if message != "" { - err = ioutil.WriteFile(path.Join(tempDirName, messageFilename), []byte(message), 0755) - if err != nil { - return false, err - } - } // Now move the temp directory to the lock directory. err = utils.ReplaceFile(tempDirName, lock.lockDir()) if err != nil { @@ -123,6 +256,7 @@ return false, nil } // We now have the lock. + lock.createAliveFile() return true, nil } @@ -146,7 +280,7 @@ logger.Infof("attempted lock failed %q, %s, currently held: %s", lock.name, message, currMessage) heldMessage = currMessage } - time.Sleep(LockWaitDelay) + <-lock.clock.After(lock.waitDelay) } } @@ -156,6 +290,7 @@ // information, and can be queried by any other Lock dealing with the same // lock name and lock directory. func (lock *Lock) Lock(message string) error { + lock.clean() // The continueFunc is effectively a no-op, causing continual looping // until the lock is acquired. continueFunc := func() error { return nil } @@ -166,9 +301,9 @@ // within the given duration, it returns ErrTimeout. See `Lock` for // information about the message. func (lock *Lock) LockWithTimeout(duration time.Duration, message string) error { - deadline := time.Now().Add(duration) + deadline := lock.clock.Now().Add(duration) continueFunc := func() error { - if time.Now().After(deadline) { + if lock.clock.Now().After(deadline) { return ErrTimeout } return nil @@ -183,13 +318,23 @@ return lock.lockLoop(message, continueFunc) } +func (lock *Lock) readLock() (lockInfo onDisk, err error) { + lockFile, err := ioutil.ReadFile(lock.heldFile()) + if err != nil { + return lockInfo, err + } + + err = goyaml.Unmarshal(lockFile, &lockInfo) + return lockInfo, err +} + // IsLockHeld returns whether the lock is currently held by the receiver. func (lock *Lock) IsLockHeld() bool { - heldNonce, err := ioutil.ReadFile(lock.heldFile()) + lockInfo, err := lock.readLock() if err != nil { return false } - return bytes.Equal(heldNonce, lock.nonce) + return lockInfo.Nonce == lock.nonce } // Unlock releases a held lock. If the lock is not held ErrLockNotHeld is @@ -199,14 +344,28 @@ return ErrLockNotHeld } // To ensure reasonable unlocking, we should rename to a temp name, and delete that. - tempLockName := fmt.Sprintf(".%s.%x", lock.name, lock.nonce) + lock.declareDead() + tempLockName := fmt.Sprintf(".%s.%s", lock.name, lock.nonce) tempDirName := path.Join(lock.parent, tempLockName) // Now move the lock directory to the temp directory to release the lock. - if err := utils.ReplaceFile(lock.lockDir(), tempDirName); err != nil { + for i := 0; ; i++ { + err := utils.ReplaceFile(lock.lockDir(), tempDirName) + if err == nil { + break + } + if i == 100 { + logger.Debugf("Failed to replace lock, giving up: (%s)", err) + return err + } + logger.Debugf("Failed to replace lock, retrying: (%s)", err) + runtime.Gosched() + } + // And now cleanup. + if err := os.RemoveAll(tempDirName); err != nil { + logger.Debugf("Failed to remove lock: %s", err) return err } - // And now cleanup. - return os.RemoveAll(tempDirName) + return nil } // IsLocked returns true if the lock is currently held by anyone. @@ -215,17 +374,18 @@ return err == nil } -// BreakLock forcably breaks the lock that is currently being held. +// BreakLock forcibly breaks the lock that is currently being held. func (lock *Lock) BreakLock() error { + lock.declareDead() return os.RemoveAll(lock.lockDir()) } // Message returns the saved message, or the empty string if there is no // saved message. func (lock *Lock) Message() string { - message, err := ioutil.ReadFile(lock.messageFile()) + lockInfo, err := lock.readLock() if err != nil { return "" } - return string(message) + return lockInfo.Message } === modified file 'src/github.com/juju/utils/fslock/fslock_test.go' --- src/github.com/juju/utils/fslock/fslock_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/utils/fslock/fslock_test.go 2016-03-22 15:18:22 +0000 @@ -13,9 +13,12 @@ "time" "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" gc "gopkg.in/check.v1" "launchpad.net/tomb" + "github.com/juju/utils/clock" "github.com/juju/utils/fslock" ) @@ -25,15 +28,34 @@ ) type fslockSuite struct { + testing.CleanupSuite testing.IsolationSuite - lockDelay time.Duration + lockDelay time.Duration + lockConfig fslock.LockConfig } var _ = gc.Suite(&fslockSuite{}) -func (s *fslockSuite) SetUpSuite(c *gc.C) { - s.IsolationSuite.SetUpSuite(c) - s.PatchValue(&fslock.LockWaitDelay, 1*time.Millisecond) +type fastclock struct { + c *gc.C +} + +func (*fastclock) Now() time.Time { + return time.Now() +} + +func (f *fastclock) After(duration time.Duration) <-chan time.Time { + f.c.Check(duration, gc.Equals, fslock.Defaults().WaitDelay) + return time.After(time.Millisecond) +} + +func (f *fastclock) AfterFunc(d time.Duration, af func()) clock.Timer { + return time.AfterFunc(d, af) +} + +func (s *fslockSuite) SetUpTest(c *gc.C) { + s.lockConfig = fslock.Defaults() + s.lockConfig.Clock = &fastclock{c} } // This test also happens to test that locks can get created when the parent @@ -46,7 +68,7 @@ "longer-with.special-characters", } { dir := c.MkDir() - _, err := fslock.NewLock(dir, name) + _, err := fslock.NewLock(dir, name, s.lockConfig) c.Assert(err, gc.IsNil) } } @@ -64,7 +86,7 @@ "no:colon", } { dir := c.MkDir() - _, err := fslock.NewLock(dir, name) + _, err := fslock.NewLock(dir, name, s.lockConfig) c.Assert(err, gc.ErrorMatches, "Invalid lock name .*") } } @@ -73,7 +95,7 @@ dir := c.MkDir() err := os.MkdirAll(dir, 0755) c.Assert(err, gc.IsNil) - _, err = fslock.NewLock(dir, "special") + _, err = fslock.NewLock(dir, "special", s.lockConfig) c.Assert(err, gc.IsNil) } @@ -84,14 +106,13 @@ path := path.Join(dir, "locks") err = ioutil.WriteFile(path, []byte("foo"), 0644) c.Assert(err, gc.IsNil) - - _, err = fslock.NewLock(path, "special") - c.Assert(err, gc.ErrorMatches, `.* not a directory`) + _, err = fslock.NewLock(path, "special", s.lockConfig) + c.Assert(err, gc.ErrorMatches, utils.MkdirFailErrRegexp) } func (s *fslockSuite) TestIsLockHeldBasics(c *gc.C) { dir := c.MkDir() - lock, err := fslock.NewLock(dir, "testing") + lock, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) c.Assert(lock.IsLockHeld(), gc.Equals, false) @@ -106,9 +127,9 @@ func (s *fslockSuite) TestIsLockHeldTwoLocks(c *gc.C) { dir := c.MkDir() - lock1, err := fslock.NewLock(dir, "testing") + lock1, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) - lock2, err := fslock.NewLock(dir, "testing") + lock2, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) err = lock1.Lock("") @@ -119,9 +140,9 @@ func (s *fslockSuite) TestLockBlocks(c *gc.C) { dir := c.MkDir() - lock1, err := fslock.NewLock(dir, "testing") + lock1, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) - lock2, err := fslock.NewLock(dir, "testing") + lock2, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) acquired := make(chan struct{}) @@ -157,7 +178,7 @@ func (s *fslockSuite) TestLockWithTimeoutUnlocked(c *gc.C) { dir := c.MkDir() - lock, err := fslock.NewLock(dir, "testing") + lock, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) err = lock.LockWithTimeout(shortWait, "") @@ -166,9 +187,9 @@ func (s *fslockSuite) TestLockWithTimeoutLocked(c *gc.C) { dir := c.MkDir() - lock1, err := fslock.NewLock(dir, "testing") + lock1, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) - lock2, err := fslock.NewLock(dir, "testing") + lock2, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) err = lock1.Lock("") @@ -180,7 +201,7 @@ func (s *fslockSuite) TestUnlock(c *gc.C) { dir := c.MkDir() - lock, err := fslock.NewLock(dir, "testing") + lock, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) err = lock.Unlock() @@ -189,9 +210,9 @@ func (s *fslockSuite) TestIsLocked(c *gc.C) { dir := c.MkDir() - lock1, err := fslock.NewLock(dir, "testing") + lock1, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) - lock2, err := fslock.NewLock(dir, "testing") + lock2, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) err = lock1.Lock("") @@ -203,9 +224,9 @@ func (s *fslockSuite) TestBreakLock(c *gc.C) { dir := c.MkDir() - lock1, err := fslock.NewLock(dir, "testing") + lock1, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) - lock2, err := fslock.NewLock(dir, "testing") + lock2, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) err = lock1.Lock("") @@ -226,7 +247,7 @@ func (s *fslockSuite) TestMessage(c *gc.C) { dir := c.MkDir() - lock, err := fslock.NewLock(dir, "testing") + lock, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) c.Assert(lock.Message(), gc.Equals, "") @@ -242,9 +263,9 @@ func (s *fslockSuite) TestMessageAcrossLocks(c *gc.C) { dir := c.MkDir() - lock1, err := fslock.NewLock(dir, "testing") + lock1, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) - lock2, err := fslock.NewLock(dir, "testing") + lock2, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) err = lock1.Lock("very busy") @@ -254,7 +275,7 @@ func (s *fslockSuite) TestInitialMessageWhenLocking(c *gc.C) { dir := c.MkDir() - lock, err := fslock.NewLock(dir, "testing") + lock, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) err = lock.Lock("initial message") @@ -284,7 +305,7 @@ var stress = func(name string) { defer func() { done <- struct{}{} }() - lock, err := fslock.NewLock(dir, "testing") + lock, err := fslock.NewLock(dir, "testing", s.lockConfig) if err != nil { c.Errorf("Failed to create a new lock") return @@ -321,7 +342,7 @@ die := tomb.Tomb{} dir := c.MkDir() - lock, err := fslock.NewLock(dir, "testing") + lock, err := fslock.NewLock(dir, "testing", s.lockConfig) c.Assert(err, gc.IsNil) // Just use one lock, and try to lock it twice. err = lock.Lock("very busy") @@ -347,3 +368,51 @@ c.Assert(lock.Message(), gc.Equals, "very busy") } + +func (s *fslockSuite) TestCleanStaleLock(c *gc.C) { + lock, lockFile, dir := newLockedLock(c, s.lockConfig) + c.Assert(fslock.IsAlive(lock, lock.PID), gc.Equals, true) + c.Assert(fslock.IsAlive(lock, 1), gc.Equals, false) + + // Make a stale alive file, point the lock to it, then try to re-lock. + PID := 1 + aliveFile := path.Join(dir, "testing", fmt.Sprintf("alive.%d", PID)) + ioutil.WriteFile(aliveFile, []byte{}, 644) + oneHourAgo := time.Now().Add(-time.Hour) + os.Chtimes(aliveFile, oneHourAgo, oneHourAgo) + changeLockfilePID(c, lockFile, PID) + assertCanLock(c, lock) +} + +func (s *fslockSuite) TestCleanNoMatchingProcess(c *gc.C) { + lock, lockFile, _ := newLockedLock(c, s.lockConfig) + + // Change the PID to a process that doesn't exist. + changeLockfilePID(c, lockFile, 1) + assertCanLock(c, lock) +} + +// TestProofOfLife checks that the alive file doesn't get older than 500ms. Normally +// it can get older, but we crank up the refresh interval for testing. +func (s *fslockSuite) TestProofOfLife(c *gc.C) { + s.lockConfig.WaitDelay = 20 * time.Millisecond + lock, _, dir := newLockedLock(c, s.lockConfig) + aliveFile := path.Join(dir, "testing", fmt.Sprintf("alive.%d", lock.PID)) + + tests := 0 + for check := 0; check < 20; check++ { + aliveInfo, err := os.Lstat(aliveFile) + if err != nil { + // Typically this is file not existing. Whatever the reason, just retry + time.Sleep(50 * time.Millisecond) + continue + } + + c.Assert(time.Now().Sub(aliveInfo.ModTime()), jc.DurationLessThan, 500*time.Millisecond) + tests++ + time.Sleep(50 * time.Millisecond) + } + + // Make sure we actually spotted an alive file and checked its time. + c.Assert(tests > 1, gc.Equals, true) +} === added file 'src/github.com/juju/utils/fslock/util_test.go' --- src/github.com/juju/utils/fslock/util_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/fslock/util_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,42 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package fslock_test + +import ( + "io/ioutil" + "path" + + gc "gopkg.in/check.v1" + goyaml "gopkg.in/yaml.v2" + + "github.com/juju/utils/fslock" +) + +func changeLockfilePID(c *gc.C, lockFile string, PID int) { + var l fslock.OnDisk + heldLock, err := ioutil.ReadFile(lockFile) + c.Assert(err, gc.IsNil) + err = goyaml.Unmarshal(heldLock, &l) + c.Assert(err, gc.IsNil) + l.PID = PID + heldLock, err = goyaml.Marshal(l) + c.Assert(err, gc.IsNil) + err = ioutil.WriteFile(lockFile, heldLock, 644) + c.Assert(err, gc.IsNil) +} + +func assertCanLock(c *gc.C, lock *fslock.Lock) { + err := lock.Lock("") + c.Assert(err, gc.IsNil) + c.Assert(lock.IsLocked(), gc.Equals, true) +} + +func newLockedLock(c *gc.C, cfg fslock.LockConfig) (lock *fslock.Lock, lockFile, aliveFile string) { + dir := c.MkDir() + lock, err := fslock.NewLock(dir, "testing", cfg) + c.Assert(err, gc.IsNil) + assertCanLock(c, lock) + lockFile = path.Join(dir, "testing", "held") + return lock, lockFile, dir +} === added file 'src/github.com/juju/utils/hash/fingerprint.go' --- src/github.com/juju/utils/hash/fingerprint.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/hash/fingerprint.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,132 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package hash + +import ( + "encoding/base64" + "encoding/hex" + "hash" + "io" + + "github.com/juju/errors" +) + +// Fingerprint represents the checksum for some data. +type Fingerprint struct { + sum []byte +} + +// NewFingerprint returns wraps the provided raw hash sum. This function +// roundtrips with Fingerprint.Bytes(). +func NewFingerprint(sum []byte, validate func([]byte) error) (Fingerprint, error) { + if validate == nil { + return Fingerprint{}, errors.New("missing validate func") + } + + if err := validate(sum); err != nil { + return Fingerprint{}, errors.Trace(err) + } + return newFingerprint(sum), nil +} + +// NewValidFingerprint returns a Fingerprint corresponding +// to the current of the provided hash. +func NewValidFingerprint(hash hash.Hash) Fingerprint { + sum := hash.Sum(nil) + return newFingerprint(sum) +} + +func newFingerprint(sum []byte) Fingerprint { + return Fingerprint{ + sum: append([]byte{}, sum...), // Use an isolated copy. + } +} + +// GenerateFingerprint returns the fingerprint for the provided data. +func GenerateFingerprint(reader io.Reader, newHash func() hash.Hash) (Fingerprint, error) { + var fp Fingerprint + + if reader == nil { + return fp, errors.New("missing reader") + } + if newHash == nil { + return fp, errors.New("missing new hash func") + } + + hash := newHash() + if _, err := io.Copy(hash, reader); err != nil { + return fp, errors.Trace(err) + } + fp.sum = hash.Sum(nil) + return fp, nil +} + +// ParseHexFingerprint returns wraps the provided raw fingerprint string. +// This function roundtrips with Fingerprint.Hex(). +func ParseHexFingerprint(hexSum string, validate func([]byte) error) (Fingerprint, error) { + if validate == nil { + return Fingerprint{}, errors.New("missing validate func") + } + + sum, err := hex.DecodeString(hexSum) + if err != nil { + return Fingerprint{}, errors.Trace(err) + } + fp, err := NewFingerprint(sum, validate) + if err != nil { + return Fingerprint{}, errors.Trace(err) + } + return fp, nil +} + +// ParseBase64Fingerprint returns wraps the provided raw fingerprint string. +// This function roundtrips with Fingerprint.Base64(). +func ParseBase64Fingerprint(b64Sum string, validate func([]byte) error) (Fingerprint, error) { + if validate == nil { + return Fingerprint{}, errors.New("missing validate func") + } + + sum, err := base64.StdEncoding.DecodeString(b64Sum) + if err != nil { + return Fingerprint{}, errors.Trace(err) + } + fp, err := NewFingerprint(sum, validate) + if err != nil { + return Fingerprint{}, errors.Trace(err) + } + return fp, nil +} + +// String implements fmt.Stringer. +func (fp Fingerprint) String() string { + return fp.Hex() +} + +// Hex returns the hex string representation of the fingerprint. +func (fp Fingerprint) Hex() string { + return hex.EncodeToString(fp.sum) +} + +// Base64 returns the base64 encoded fingerprint. +func (fp Fingerprint) Base64() string { + return base64.StdEncoding.EncodeToString(fp.sum) +} + +// Bytes returns the raw (sum) bytes of the fingerprint. +func (fp Fingerprint) Bytes() []byte { + return append([]byte{}, fp.sum...) +} + +// IsZero returns whether or not the fingerprint is the zero value. +func (fp Fingerprint) IsZero() bool { + return len(fp.sum) == 0 +} + +// Validate returns an error if the fingerprint is invalid. +func (fp Fingerprint) Validate() error { + if fp.IsZero() { + return errors.NotValidf("zero-value fingerprint") + } + return nil +} === added file 'src/github.com/juju/utils/hash/fingerprint_test.go' --- src/github.com/juju/utils/hash/fingerprint_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/hash/fingerprint_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,169 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package hash_test + +import ( + "crypto/sha512" + "encoding/hex" + stdhash "hash" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/filetesting" + gc "gopkg.in/check.v1" + + "github.com/juju/utils/hash" +) + +var _ = gc.Suite(&FingerprintSuite{}) + +type FingerprintSuite struct { + stub *testing.Stub + hash *filetesting.StubHash +} + +func (s *FingerprintSuite) SetUpTest(c *gc.C) { + s.stub = &testing.Stub{} + s.hash = filetesting.NewStubHash(s.stub, nil) +} + +func (s *FingerprintSuite) newHash() stdhash.Hash { + s.stub.AddCall("newHash") + s.stub.NextErr() // Pop one off. + + return s.hash +} + +func (s *FingerprintSuite) validate(sum []byte) error { + s.stub.AddCall("validate", sum) + if err := s.stub.NextErr(); err != nil { + return errors.Trace(err) + } + + return nil +} + +func (s *FingerprintSuite) TestNewFingerprintOkay(c *gc.C) { + expected, _ := newFingerprint(c, "spamspamspam") + + fp, err := hash.NewFingerprint(expected, s.validate) + c.Assert(err, jc.ErrorIsNil) + sum := fp.Bytes() + + s.stub.CheckCallNames(c, "validate") + c.Check(sum, jc.DeepEquals, expected) +} + +func (s *FingerprintSuite) TestNewFingerprintInvalid(c *gc.C) { + expected, _ := newFingerprint(c, "spamspamspam") + failure := errors.NewNotValid(nil, "bogus!!!") + s.stub.SetErrors(failure) + + _, err := hash.NewFingerprint(expected, s.validate) + + s.stub.CheckCallNames(c, "validate") + c.Check(errors.Cause(err), gc.Equals, failure) +} + +func (s *FingerprintSuite) TestNewValidFingerprint(c *gc.C) { + expected, _ := newFingerprint(c, "spamspamspam") + s.hash.ReturnSum = expected + + fp := hash.NewValidFingerprint(s.hash) + sum := fp.Bytes() + + s.stub.CheckCallNames(c, "Sum") + c.Check(sum, jc.DeepEquals, expected) +} + +func (s *FingerprintSuite) TestGenerateFingerprintOkay(c *gc.C) { + expected, _ := newFingerprint(c, "spamspamspam") + s.hash.ReturnSum = expected + s.hash.Writer, _ = filetesting.NewStubWriter(s.stub) + reader := filetesting.NewStubReader(s.stub, "spamspamspam") + + fp, err := hash.GenerateFingerprint(reader, s.newHash) + c.Assert(err, jc.ErrorIsNil) + sum := fp.Bytes() + + s.stub.CheckCallNames(c, "newHash", "Read", "Write", "Read", "Sum") + c.Check(sum, jc.DeepEquals, expected) +} + +func (s *FingerprintSuite) TestGenerateFingerprintNil(c *gc.C) { + _, err := hash.GenerateFingerprint(nil, s.newHash) + + s.stub.CheckNoCalls(c) + c.Check(err, gc.ErrorMatches, `missing reader`) +} + +func (s *FingerprintSuite) TestParseHexFingerprint(c *gc.C) { + expected, hexSum := newFingerprint(c, "spamspamspam") + + fp, err := hash.ParseHexFingerprint(hexSum, s.validate) + c.Assert(err, jc.ErrorIsNil) + sum := fp.Bytes() + + s.stub.CheckCallNames(c, "validate") + c.Check(sum, jc.DeepEquals, expected) +} + +func (s *FingerprintSuite) TestString(c *gc.C) { + sum, expected := newFingerprint(c, "spamspamspam") + fp, err := hash.NewFingerprint(sum, s.validate) + c.Assert(err, jc.ErrorIsNil) + + hex := fp.String() + + c.Check(hex, gc.Equals, expected) +} + +func (s *FingerprintSuite) TestHex(c *gc.C) { + sum, expected := newFingerprint(c, "spamspamspam") + fp, err := hash.NewFingerprint(sum, s.validate) + c.Assert(err, jc.ErrorIsNil) + + hex := fp.String() + + c.Check(hex, gc.Equals, expected) +} + +func (s *FingerprintSuite) TestBytes(c *gc.C) { + expected, _ := newFingerprint(c, "spamspamspam") + fp, err := hash.NewFingerprint(expected, s.validate) + c.Assert(err, jc.ErrorIsNil) + + sum := fp.Bytes() + + c.Check(sum, jc.DeepEquals, expected) +} + +func (s *FingerprintSuite) TestValidateOkay(c *gc.C) { + sum, _ := newFingerprint(c, "spamspamspam") + fp, err := hash.NewFingerprint(sum, s.validate) + c.Assert(err, jc.ErrorIsNil) + + err = fp.Validate() + + c.Check(err, jc.ErrorIsNil) +} + +func (s *FingerprintSuite) TestValidateZero(c *gc.C) { + var fp hash.Fingerprint + err := fp.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `zero-value fingerprint not valid`) +} + +func newFingerprint(c *gc.C, data string) ([]byte, string) { + hash := sha512.New384() + _, err := hash.Write([]byte(data)) + c.Assert(err, jc.ErrorIsNil) + sum := hash.Sum(nil) + + hexStr := hex.EncodeToString(sum) + return sum, hexStr +} === added file 'src/github.com/juju/utils/hash/hash.go' --- src/github.com/juju/utils/hash/hash.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/hash/hash.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,76 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// The hash package provides utilities that support use of the stdlib +// hash.Hash. Most notably is the Fingerprint type that wraps the +// checksum of a hash. +// +// Conversion between checksums and strings are facailitated through +// Fingerprint. +// +// Here are some hash-related recipes that bring it all together: +// +// * Extract the SHA384 hash while writing to elsewhere, then get the +// raw checksum: +// +// newHash, _ := hash.SHA384() +// h := newHash() +// hashingWriter := io.MultiWriter(writer, h) +// if err := writeAll(hashingWriter); err != nil { ... } +// fp := hash.NewValidFingerprint(h) +// checksum := fp.Bytes() +// +// * Extract the SHA384 hash while reading from elsewhere, then get the +// hex-encoded checksum to send over the wire: +// +// newHash, _ := hash.SHA384() +// h := newHash() +// hashingReader := io.TeeReader(reader, h) +// if err := processStream(hashingReader); err != nil { ... } +// fp := hash.NewValidFingerprint(h) +// hexSum := fp.Hex() +// req.Header.Set("Content-Sha384", hexSum) +// +// * Turn a checksum sent over the wire back into a fingerprint: +// +// _, validate := hash.SHA384() +// hexSum := req.Header.Get("Content-Sha384") +// var fp hash.Fingerprint +// if len(hexSum) != 0 { +// fp, err = hash.ParseHexFingerprint(hexSum, validate) +// ... +// } +// if fp.IsZero() { +// ... +// } +package hash + +import ( + "crypto/sha512" + "hash" + + "github.com/juju/errors" + "github.com/juju/loggo" +) + +var logger = loggo.GetLogger("utils.hash") + +// SHA384 returns the newHash and validate functions for use +// with SHA384 hashes. SHA384 is used in several key places in Juju. +func SHA384() (newHash func() hash.Hash, validate func([]byte) error) { + const digestLenBytes = 384 / 8 + validate = newSizeChecker(digestLenBytes) + return sha512.New384, validate +} + +func newSizeChecker(size int) func([]byte) error { + return func(sum []byte) error { + if len(sum) < size { + return errors.NewNotValid(nil, "invalid fingerprint (too small)") + } + if len(sum) > size { + return errors.NewNotValid(nil, "invalid fingerprint (too big)") + } + return nil + } +} === added file 'src/github.com/juju/utils/hash/hash_test.go' --- src/github.com/juju/utils/hash/hash_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/hash/hash_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,80 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package hash_test + +import ( + "bytes" + "io" + "io/ioutil" + "strings" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/filetesting" + gc "gopkg.in/check.v1" + + "github.com/juju/utils/hash" +) + +var _ = gc.Suite(&HashSuite{}) + +type HashSuite struct { + testing.IsolationSuite +} + +func (s *HashSuite) TestHashingWriter(c *gc.C) { + data := "some data" + newHash, _ := hash.SHA384() + expected, err := hash.GenerateFingerprint(strings.NewReader(data), newHash) + c.Assert(err, jc.ErrorIsNil) + var writer bytes.Buffer + + h := newHash() + hashingWriter := io.MultiWriter(&writer, h) + _, err = hashingWriter.Write([]byte(data)) + c.Assert(err, jc.ErrorIsNil) + fp := hash.NewValidFingerprint(h) + + c.Check(fp, jc.DeepEquals, expected) + c.Check(writer.String(), gc.Equals, data) +} + +func (s *HashSuite) TestHashingReader(c *gc.C) { + expected := "some data" + stub := &testing.Stub{} + reader := &filetesting.StubReader{ + Stub: stub, + ReturnRead: &fakeStream{ + data: expected, + }, + } + + newHash, validate := hash.SHA384() + h := newHash() + hashingReader := io.TeeReader(reader, h) + data, err := ioutil.ReadAll(hashingReader) + c.Assert(err, jc.ErrorIsNil) + fp := hash.NewValidFingerprint(h) + hexSum := fp.Hex() + fpAgain, err := hash.ParseHexFingerprint(hexSum, validate) + c.Assert(err, jc.ErrorIsNil) + + stub.CheckCallNames(c, "Read") // The EOF was mixed with the data. + c.Check(string(data), gc.Equals, expected) + c.Check(fpAgain, jc.DeepEquals, fp) +} + +type fakeStream struct { + data string + pos uint64 +} + +func (f *fakeStream) Read(data []byte) (int, error) { + n := copy(data, f.data[f.pos:]) + f.pos += uint64(n) + if f.pos >= uint64(len(f.data)) { + return n, io.EOF + } + return n, nil +} === modified file 'src/github.com/juju/utils/hash/writer.go' --- src/github.com/juju/utils/hash/writer.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/utils/hash/writer.go 2016-03-22 15:18:22 +0000 @@ -5,17 +5,21 @@ import ( "encoding/base64" - "fmt" "hash" "io" ) +// TODO(ericsnow) Remove HashingWriter and NewHashingWriter(). + // HashingWriter wraps an io.Writer, providing the checksum of all data // written to it. A HashingWriter may be used in place of the writer it // wraps. +// +// Note: HashingWriter is deprecated. Please do not use it. We will +// remove it ASAP. type HashingWriter struct { + hash hash.Hash wrapped io.Writer - hasher hash.Hash } // NewHashingWriter returns a new HashingWriter that wraps the provided @@ -25,33 +29,24 @@ // hw := NewHashingWriter(w, sha1.New()) // io.Copy(hw, reader) // hash := hw.Base64Sum() +// +// Note: NewHashingWriter is deprecated. Please do not use it. We will +// remove it ASAP. func NewHashingWriter(writer io.Writer, hasher hash.Hash) *HashingWriter { - hashingWriter := HashingWriter{ - wrapped: writer, - hasher: hasher, + return &HashingWriter{ + hash: hasher, + wrapped: io.MultiWriter(writer, hasher), } - return &hashingWriter +} + +// Base64Sum returns the base64 encoded hash. +func (hw HashingWriter) Base64Sum() string { + sumBytes := hw.hash.Sum(nil) + return base64.StdEncoding.EncodeToString(sumBytes) } // Write writes to both the wrapped file and the hash. -func (h *HashingWriter) Write(data []byte) (int, error) { - h.hasher.Write(data) - return h.wrapped.Write(data) -} - -// Sum returns the raw checksum. -func (h *HashingWriter) Sum() []byte { - return h.hasher.Sum(nil) -} - -// Base64Sum returns the base64 encoded hash. -func (h *HashingWriter) Base64Sum() string { - raw := h.hasher.Sum(nil) - return base64.StdEncoding.EncodeToString(raw) -} - -// HexSum returns the hex-ified checksum. -func (h *HashingWriter) HexSum() string { - raw := h.hasher.Sum(nil) - return fmt.Sprintf("%x", raw) +func (hw *HashingWriter) Write(data []byte) (int, error) { + // No trace because some callers, like ioutil.ReadAll(), won't work. + return hw.wrapped.Write(data) } === modified file 'src/github.com/juju/utils/hash/writer_test.go' --- src/github.com/juju/utils/hash/writer_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/utils/hash/writer_test.go 2016-03-22 15:18:22 +0000 @@ -5,9 +5,11 @@ import ( "bytes" - "errors" + "github.com/juju/errors" "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/filetesting" gc "gopkg.in/check.v1" "github.com/juju/utils/hash" @@ -17,86 +19,65 @@ type WriterSuite struct { testing.IsolationSuite -} - -type errorWriter struct { - err error -} - -func (w *errorWriter) Write(data []byte) (int, error) { - return 0, w.err -} - -type fakeHasher struct { - bytes.Buffer - sum []byte -} - -func (h *fakeHasher) Sum(b []byte) []byte { - return h.sum -} - -// Not used: -func (h *fakeHasher) Reset() {} -func (h *fakeHasher) Size() int { return -1 } -func (h *fakeHasher) BlockSize() int { return -1 } + + stub *testing.Stub + wBuffer *bytes.Buffer + writer *filetesting.StubWriter + hBuffer *bytes.Buffer + hash *filetesting.StubHash +} + +func (s *WriterSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + + s.stub = &testing.Stub{} + s.wBuffer = new(bytes.Buffer) + s.writer = &filetesting.StubWriter{ + Stub: s.stub, + ReturnWrite: s.wBuffer, + } + s.hBuffer = new(bytes.Buffer) + s.hash = filetesting.NewStubHash(s.stub, s.hBuffer) +} func (s *WriterSuite) TestHashingWriterWriteEmpty(c *gc.C) { - var buf bytes.Buffer - hasher := fakeHasher{} - w := hash.NewHashingWriter(&buf, &hasher) + w := hash.NewHashingWriter(s.writer, s.hash) n, err := w.Write(nil) + c.Assert(err, jc.ErrorIsNil) - c.Check(err, gc.IsNil) + s.stub.CheckCallNames(c, "Write", "Write") c.Check(n, gc.Equals, 0) - c.Check(buf.String(), gc.Equals, "") - c.Check(hasher.String(), gc.Equals, "") + c.Check(s.wBuffer.String(), gc.Equals, "") + c.Check(s.hBuffer.String(), gc.Equals, "") } func (s *WriterSuite) TestHashingWriterWriteSmall(c *gc.C) { - var buf bytes.Buffer - hasher := fakeHasher{} - w := hash.NewHashingWriter(&buf, &hasher) + w := hash.NewHashingWriter(s.writer, s.hash) n, err := w.Write([]byte("spam")) + c.Assert(err, jc.ErrorIsNil) - c.Check(err, gc.IsNil) + s.stub.CheckCallNames(c, "Write", "Write") c.Check(n, gc.Equals, 4) - c.Check(buf.String(), gc.Equals, "spam") - c.Check(hasher.String(), gc.Equals, "spam") + c.Check(s.wBuffer.String(), gc.Equals, "spam") + c.Check(s.hBuffer.String(), gc.Equals, "spam") } func (s *WriterSuite) TestHashingWriterWriteFileError(c *gc.C) { - file := errorWriter{err: errors.New("failed!")} - hasher := fakeHasher{} - w := hash.NewHashingWriter(&file, &hasher) + w := hash.NewHashingWriter(s.writer, s.hash) + failure := errors.New("") + s.stub.SetErrors(failure) + _, err := w.Write([]byte("spam")) - c.Check(err, gc.ErrorMatches, "failed!") -} - -func (s *WriterSuite) TestHashingWriterSum(c *gc.C) { - var buf bytes.Buffer - hasher := fakeHasher{sum: []byte("spam")} - w := hash.NewHashingWriter(&buf, &hasher) - b64hash := string(w.Sum()) - - c.Check(b64hash, gc.Equals, "spam") + s.stub.CheckCallNames(c, "Write") + c.Check(errors.Cause(err), gc.Equals, failure) } func (s *WriterSuite) TestHashingWriterBase64Sum(c *gc.C) { - var buf bytes.Buffer - hasher := fakeHasher{sum: []byte("spam")} - w := hash.NewHashingWriter(&buf, &hasher) - b64hash := w.Base64Sum() - - c.Check(b64hash, gc.Equals, "c3BhbQ==") -} - -func (s *WriterSuite) TestHashingWriterHexSum(c *gc.C) { - var buf bytes.Buffer - hasher := fakeHasher{sum: []byte("spam")} - w := hash.NewHashingWriter(&buf, &hasher) - rawhash := w.HexSum() - - c.Check(rawhash, gc.Equals, "7370616d") + s.hash.ReturnSum = []byte("spam") + w := hash.NewHashingWriter(s.writer, s.hash) + b64sum := w.Base64Sum() + + s.stub.CheckCallNames(c, "Sum") + c.Check(b64sum, gc.Equals, "c3BhbQ==") } === modified file 'src/github.com/juju/utils/packaging/config/functions.go' --- src/github.com/juju/utils/packaging/config/functions.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/utils/packaging/config/functions.go 2016-03-22 15:18:22 +0000 @@ -27,3 +27,15 @@ return configureCloudArchiveSourceUbuntu(series) } } + +func RequiresBackports(series string, pkg string) bool { + backportPkgs := backportsBySeries[series] + + for _, backportPkg := range backportPkgs { + if pkg == backportPkg { + return true + } + } + + return false +} === modified file 'src/github.com/juju/utils/packaging/config/functions_test.go' --- src/github.com/juju/utils/packaging/config/functions_test.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/utils/packaging/config/functions_test.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,7 @@ "github.com/juju/utils/packaging" "github.com/juju/utils/packaging/config" + jc "github.com/juju/testing/checkers" gc "gopkg.in/check.v1" ) @@ -64,3 +65,8 @@ c.Assert(src, gc.Equals, expectedSrc) c.Assert(prefs, gc.Equals, expectedPrefs) } + +func (s *FunctionsSuite) TestRequiresBackportsTrustyLXD(c *gc.C) { + requiresBackports := config.RequiresBackports("trusty", "lxd") + c.Assert(requiresBackports, jc.IsTrue) +} === modified file 'src/github.com/juju/utils/packaging/config/global_constants.go' --- src/github.com/juju/utils/packaging/config/global_constants.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/utils/packaging/config/global_constants.go 2016-03-22 15:18:22 +0000 @@ -41,4 +41,8 @@ // for example: "curl", } + + backportsBySeries = map[string][]string{ + "trusty": []string{"lxd"}, + } ) === added file 'src/github.com/juju/utils/relativeurl.go' --- src/github.com/juju/utils/relativeurl.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/relativeurl.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,62 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package utils + +import ( + "strings" + + "github.com/juju/errors" +) + +// RelativeURLPath returns a relative URL path that is lexically +// equivalent to targpath when interpreted by url.URL.ResolveReference. +// On success, the returned path will always be non-empty and relative +// to basePath, even if basePath and targPath share no elements. +// +// It is assumed that both basePath and targPath are normalized +// (have no . or .. elements). +// +// An error is returned if basePath or targPath are not absolute paths. +func RelativeURLPath(basePath, targPath string) (string, error) { + if !strings.HasPrefix(basePath, "/") { + return "", errors.New("non-absolute base URL") + } + if !strings.HasPrefix(targPath, "/") { + return "", errors.New("non-absolute target URL") + } + baseParts := strings.Split(basePath, "/") + targParts := strings.Split(targPath, "/") + + // For the purposes of dotdot, the last element of + // the paths are irrelevant. We save the last part + // of the target path for later. + lastElem := targParts[len(targParts)-1] + baseParts = baseParts[0 : len(baseParts)-1] + targParts = targParts[0 : len(targParts)-1] + + // Find the common prefix between the two paths: + var i int + for ; i < len(baseParts); i++ { + if i >= len(targParts) || baseParts[i] != targParts[i] { + break + } + } + dotdotCount := len(baseParts) - i + targOnly := targParts[i:] + result := make([]string, 0, dotdotCount+len(targOnly)+1) + for i := 0; i < dotdotCount; i++ { + result = append(result, "..") + } + result = append(result, targOnly...) + result = append(result, lastElem) + final := strings.Join(result, "/") + if final == "" { + // If the final result is empty, the last element must + // have been empty, so the target was slash terminated + // and there were no previous elements, so "." + // is appropriate. + final = "." + } + return final, nil +} === added file 'src/github.com/juju/utils/relativeurl_test.go' --- src/github.com/juju/utils/relativeurl_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/relativeurl_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,152 @@ +// Copyright 2016 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package utils_test + +import ( + "net/url" + + jujutesting "github.com/juju/testing" + gc "gopkg.in/check.v1" + + "github.com/juju/utils" +) + +type relativeURLSuite struct { + jujutesting.LoggingSuite +} + +var _ = gc.Suite(&relativeURLSuite{}) + +var relativeURLTests = []struct { + base string + target string + expect string + expectError string +}{{ + expectError: "non-absolute base URL", +}, { + base: "/foo", + expectError: "non-absolute target URL", +}, { + base: "foo", + expectError: "non-absolute base URL", +}, { + base: "/foo", + target: "foo", + expectError: "non-absolute target URL", +}, { + base: "/foo", + target: "/bar", + expect: "bar", +}, { + base: "/foo/", + target: "/bar", + expect: "../bar", +}, { + base: "/bar", + target: "/foo/", + expect: "foo/", +}, { + base: "/foo/", + target: "/bar/", + expect: "../bar/", +}, { + base: "/foo/bar", + target: "/bar/", + expect: "../bar/", +}, { + base: "/foo/bar/", + target: "/bar/", + expect: "../../bar/", +}, { + base: "/foo/bar/baz", + target: "/foo/targ", + expect: "../targ", +}, { + base: "/foo/bar/baz/frob", + target: "/foo/bar/one/two/", + expect: "../one/two/", +}, { + base: "/foo/bar/baz/", + target: "/foo/targ", + expect: "../../targ", +}, { + base: "/foo/bar/baz/frob/", + target: "/foo/bar/one/two/", + expect: "../../one/two/", +}, { + base: "/foo/bar", + target: "/foot/bar", + expect: "../foot/bar", +}, { + base: "/foo/bar/baz/frob", + target: "/foo/bar", + expect: "../../bar", +}, { + base: "/foo/bar/baz/frob/", + target: "/foo/bar", + expect: "../../../bar", +}, { + base: "/foo/bar/baz/frob/", + target: "/foo/bar/", + expect: "../../", +}, { + base: "/foo/bar/baz", + target: "/foo/bar/other", + expect: "other", +}, { + base: "/foo/bar/", + target: "/foo/bar/", + expect: ".", +}, { + base: "/foo/bar", + target: "/foo/bar", + expect: "bar", +}, { + base: "/foo/bar/", + target: "/foo/bar/", + expect: ".", +}, { + base: "/foo/bar", + target: "/foo/", + expect: ".", +}, { + base: "/foo", + target: "/", + expect: ".", +}, { + base: "/foo/", + target: "/", + expect: "../", +}, { + base: "/foo/bar", + target: "/", + expect: "../", +}, { + base: "/foo/bar/", + target: "/", + expect: "../../", +}} + +func (*relativeURLSuite) TestRelativeURL(c *gc.C) { + for i, test := range relativeURLTests { + c.Logf("test %d: %q %q", i, test.base, test.target) + // Sanity check the test itself. + if test.expectError == "" { + baseURL := &url.URL{Path: test.base} + expectURL := &url.URL{Path: test.expect} + targetURL := baseURL.ResolveReference(expectURL) + c.Check(targetURL.Path, gc.Equals, test.target, gc.Commentf("resolve reference failure (%q + %q != %q)", test.base, test.expect, test.target)) + } + + result, err := utils.RelativeURLPath(test.base, test.target) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + c.Assert(result, gc.Equals, "") + } else { + c.Assert(err, gc.IsNil) + c.Check(result, gc.Equals, test.expect) + } + } +} === added directory 'src/github.com/juju/utils/series' === added file 'src/github.com/juju/utils/series/export_linux_test.go' --- src/github.com/juju/utils/series/export_linux_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/export_linux_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,10 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package series + +var ( + DistroInfo = &distroInfo + ReadSeries = readSeries + OSReleaseFile = &osReleaseFile +) === added file 'src/github.com/juju/utils/series/export_test.go' --- src/github.com/juju/utils/series/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,23 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package series + +var ( + KernelToMajor = kernelToMajor + MacOSXSeriesFromKernelVersion = macOSXSeriesFromKernelVersion + MacOSXSeriesFromMajorVersion = macOSXSeriesFromMajorVersion +) + +func SetSeriesVersions(value map[string]string) func() { + origVersions := seriesVersions + origUpdated := updatedseriesVersions + seriesVersions = value + updateVersionSeries() + updatedseriesVersions = len(value) != 0 + return func() { + seriesVersions = origVersions + updateVersionSeries() + updatedseriesVersions = origUpdated + } +} === added file 'src/github.com/juju/utils/series/export_windows_test.go' --- src/github.com/juju/utils/series/export_windows_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/export_windows_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +// Copyright 2015 Canonical Ltd. +// Copyright 2015 Cloudbase Solutions SRL +// Licensed under the AGPLv3, see LICENCE file for details. + +package series + +var ( + CurrentVersionKey = ¤tVersionKey + IsNanoKey = &isNanoKey + ReadSeries = readSeries +) === added file 'src/github.com/juju/utils/series/package_test.go' --- src/github.com/juju/utils/series/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package series_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/utils/series/series.go' --- src/github.com/juju/utils/series/series.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/series.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,98 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// series provides helpers for determining the series of +// a host, and translating from os to series. +package series + +import ( + "strconv" + "strings" + "sync" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/utils/os" +) + +var logger = loggo.GetLogger("juju.juju.series") + +var HostSeries = hostSeries + +var ( + seriesOnce sync.Once + series string // filled in by the first call to hostSeries +) + +func hostSeries() string { + seriesOnce.Do(func() { + var err error + series, err = readSeries() + if err != nil { + panic("unable to determine host series: " + err.Error()) + } + }) + return series +} + +// MustOSFromSeries will panic if the series represents an "unknown" +// operating system +func MustOSFromSeries(series string) os.OSType { + operatingSystem, err := GetOSFromSeries(series) + if err != nil { + panic("osVersion reported an error: " + err.Error()) + } + return operatingSystem +} + +// kernelToMajor takes a dotted version and returns just the Major portion +func kernelToMajor(getKernelVersion func() (string, error)) (int, error) { + fullVersion, err := getKernelVersion() + if err != nil { + return 0, err + } + parts := strings.SplitN(fullVersion, ".", 2) + majorVersion, err := strconv.ParseInt(parts[0], 10, 32) + if err != nil { + return 0, err + } + return int(majorVersion), nil +} + +func macOSXSeriesFromKernelVersion(getKernelVersion func() (string, error)) (string, error) { + majorVersion, err := kernelToMajor(getKernelVersion) + if err != nil { + logger.Infof("unable to determine OS version: %v", err) + return "unknown", err + } + return macOSXSeriesFromMajorVersion(majorVersion) +} + +// TODO(jam): 2014-05-06 https://launchpad.net/bugs/1316593 +// we should have a system file that we can read so this can be updated without +// recompiling Juju. For now, this is a lot easier, and also solves the fact +// that we want to populate HostSeries during init() time, before +// we've potentially read that information from anywhere else +// macOSXSeries maps from the Darwin Kernel Major Version to the Mac OSX +// series. +var macOSXSeries = map[int]string{ + 15: "elcapitan", + 14: "yosemite", + 13: "mavericks", + 12: "mountainlion", + 11: "lion", + 10: "snowleopard", + 9: "leopard", + 8: "tiger", + 7: "panther", + 6: "jaguar", + 5: "puma", +} + +func macOSXSeriesFromMajorVersion(majorVersion int) (string, error) { + series, ok := macOSXSeries[majorVersion] + if !ok { + return "unknown", errors.Errorf("unknown series %q", series) + } + return series, nil +} === added file 'src/github.com/juju/utils/series/series_darwin.go' --- src/github.com/juju/utils/series/series_darwin.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/series_darwin.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,17 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package series + +import ( + "syscall" +) + +func sysctlVersion() (string, error) { + return syscall.Sysctl("kern.osrelease") +} + +// readSeries returns the best approximation to what version this machine is. +func readSeries() (string, error) { + return macOSXSeriesFromKernelVersion(sysctlVersion) +} === added file 'src/github.com/juju/utils/series/series_darwin_test.go' --- src/github.com/juju/utils/series/series_darwin_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/series_darwin_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,31 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package series + +import ( + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/set" + gc "gopkg.in/check.v1" +) + +type macOSXSeriesSuite struct{} + +var _ = gc.Suite(&macOSXSeriesSuite{}) + +func (*macOSXSeriesSuite) TestGetSysctlVersionPlatform(c *gc.C) { + // Test that sysctlVersion returns something that looks like a dotted revision number + releaseVersion, err := sysctlVersion() + c.Assert(err, jc.ErrorIsNil) + c.Check(releaseVersion, gc.Matches, `\d+\..*`) +} + +func (s *macOSXSeriesSuite) TestOSVersion(c *gc.C) { + knownSeries := make(set.Strings) + for _, series := range macOSXSeries { + knownSeries.Add(series) + } + version, err := readSeries() + c.Assert(err, jc.ErrorIsNil) + c.Check(version, jc.Satisfies, knownSeries.Contains) +} === added file 'src/github.com/juju/utils/series/series_linux.go' --- src/github.com/juju/utils/series/series_linux.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/series_linux.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,114 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package series + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "strings" + + jujuos "github.com/juju/utils/os" +) + +var ( + // osReleaseFile is the name of the file that is read in order to determine + // the linux type release version. + osReleaseFile = "/etc/os-release" +) + +func readSeries() (string, error) { + values, err := jujuos.ReadOSRelease(osReleaseFile) + if err != nil { + return "unknown", err + } + updateSeriesVersionsOnce() + return seriesFromOSRelease(values) +} + +func seriesFromOSRelease(values map[string]string) (string, error) { + switch values["ID"] { + case strings.ToLower(jujuos.Ubuntu.String()): + return getValue(ubuntuSeries, values["VERSION_ID"]) + case strings.ToLower(jujuos.Arch.String()): + return getValue(archSeries, values["VERSION_ID"]) + case strings.ToLower(jujuos.CentOS.String()): + codename := fmt.Sprintf("%s%s", values["ID"], values["VERSION_ID"]) + return getValue(centosSeries, codename) + default: + return "unknown", nil + } +} + +func getValue(from map[string]string, val string) (string, error) { + for serie, ver := range from { + if ver == val { + return serie, nil + } + } + return "unknown", errors.New("Could not determine series") +} + +// ReleaseVersion looks for the value of VERSION_ID in the content of +// the os-release. If the value is not found, the file is not found, or +// an error occurs reading the file, an empty string is returned. +func ReleaseVersion() string { + release, err := jujuos.ReadOSRelease(osReleaseFile) + if err != nil { + return "" + } + return release["VERSION_ID"] +} + +func updateLocalSeriesVersions() error { + return updateDistroInfo() +} + +var distroInfo = "/usr/share/distro-info/ubuntu.csv" + +// updateDistroInfo updates seriesVersions from /usr/share/distro-info/ubuntu.csv if possible.. +func updateDistroInfo() error { + // We need to find the series version eg 12.04 from the series eg precise. Use the information found in + // /usr/share/distro-info/ubuntu.csv provided by distro-info-data package. + f, err := os.Open(distroInfo) + if err != nil { + // On non-Ubuntu systems this file won't exist but that's expected. + return nil + } + defer f.Close() + bufRdr := bufio.NewReader(f) + // Only find info for precise or later. + // TODO: only add in series that are supported (i.e. before end of life) + preciseOrLaterFound := false + for { + line, err := bufRdr.ReadString('\n') + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("reading distro info file file: %v", err) + } + // lines are of the form: "12.04 LTS,Precise Pangolin,precise,2011-10-13,2012-04-26,2017-04-26" + parts := strings.Split(line, ",") + // Ignore any malformed lines. + if len(parts) < 3 { + continue + } + series := parts[2] + if series == "precise" { + preciseOrLaterFound = true + } + if series != "precise" && !preciseOrLaterFound { + continue + } + // the numeric version may contain a LTS moniker so strip that out. + seriesInfo := strings.Split(parts[0], " ") + seriesVersions[series] = seriesInfo[0] + ubuntuSeries[series] = seriesInfo[0] + } + updateVersionSeries() + return nil +} === added file 'src/github.com/juju/utils/series/series_linux_test.go' --- src/github.com/juju/utils/series/series_linux_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/series_linux_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,231 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package series_test + +import ( + "io/ioutil" + "path/filepath" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/utils/series" +) + +type linuxVersionSuite struct { + testing.CleanupSuite +} + +var futureReleaseFileContents = `NAME="Ubuntu" +VERSION="99.04 LTS, Star Trek" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu spock (99.04 LTS)" +VERSION_ID="99.04" +` + +var distroInfoContents = `version,codename,series,created,release,eol,eol-server +12.04 LTS,Precise Pangolin,precise,2011-10-13,2012-04-26,2017-04-26 +99.04,Star Trek,spock,2364-04-25,2364-10-17,2365-07-17 +` + +var _ = gc.Suite(&linuxVersionSuite{}) + +func (s *linuxVersionSuite) SetUpTest(c *gc.C) { + s.CleanupSuite.SetUpTest(c) + + cleanup := series.SetSeriesVersions(make(map[string]string)) + s.AddCleanup(func(*gc.C) { cleanup() }) +} + +func (s *linuxVersionSuite) TestOSVersion(c *gc.C) { + // Set up fake /etc/os-release file from the future. + d := c.MkDir() + release := filepath.Join(d, "future-release") + s.PatchValue(series.OSReleaseFile, release) + err := ioutil.WriteFile(release, []byte(futureReleaseFileContents), 0666) + c.Assert(err, jc.ErrorIsNil) + + // Set up fake /usr/share/distro-info/ubuntu.csv, also from the future. + distroInfo := filepath.Join(d, "ubuntu.csv") + err = ioutil.WriteFile(distroInfo, []byte(distroInfoContents), 0644) + c.Assert(err, jc.ErrorIsNil) + s.PatchValue(series.DistroInfo, distroInfo) + + // Ensure the future series can be read even though Juju doesn't + // know about it. + version, err := series.ReadSeries() + c.Assert(err, jc.ErrorIsNil) + c.Assert(version, gc.Equals, "spock") +} + +func (s *linuxVersionSuite) TestUseFastLXC(c *gc.C) { + for i, test := range []struct { + message string + releaseContent string + expected string + }{{ + message: "missing release file", + }, { + message: "OS release file is missing ID", + releaseContent: "some junk\nand more junk", + }, { + message: "precise release", + releaseContent: ` +NAME="Ubuntu" +VERSION="12.04 LTS, Precise" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 12.04.3 LTS" +VERSION_ID="12.04" +`, + expected: "12.04", + }, { + message: "trusty release", + releaseContent: ` +NAME="Ubuntu" +VERSION="14.04.1 LTS, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 14.04.1 LTS" +VERSION_ID="14.04" +`, + expected: "14.04", + }, { + message: "minimal trusty release", + releaseContent: ` +ID=ubuntu +VERSION_ID="14.04" +`, + expected: "14.04", + }, { + message: "minimal unstable unicorn", + releaseContent: ` +ID=ubuntu +VERSION_ID="14.10" +`, + expected: "14.10", + }, { + message: "minimal jaunty", + releaseContent: ` +ID=ubuntu +VERSION_ID="9.10" +`, + expected: "9.10", + }} { + c.Logf("%v: %v", i, test.message) + filename := filepath.Join(c.MkDir(), "os-release") + s.PatchValue(series.OSReleaseFile, filename) + if test.releaseContent != "" { + err := ioutil.WriteFile(filename, []byte(test.releaseContent+"\n"), 0644) + c.Assert(err, jc.ErrorIsNil) + } + value := series.ReleaseVersion() + c.Assert(value, gc.Equals, test.expected) + } +} + +type readSeriesSuite struct { + testing.CleanupSuite +} + +var _ = gc.Suite(&readSeriesSuite{}) + +var readSeriesTests = []struct { + contents string + series string + err string +}{{ + `NAME="Ubuntu" +VERSION="12.04.5 LTS, Precise Pangolin" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu precise (12.04.5 LTS)" +VERSION_ID="12.04" +`, + "precise", + "", +}, { + `NAME="Ubuntu" +ID=ubuntu +VERSION_ID= "12.04" `, + "precise", + "", +}, { + `NAME='Ubuntu' +ID='ubuntu' +VERSION_ID='12.04' +`, + "precise", + "", +}, { + `NAME="CentOS Linux" +ID="centos" +VERSION_ID="7" +`, + "centos7", + "", +}, { + `NAME="Arch Linux" +ID=arch +PRETTY_NAME="Arch Linux" +ANSI_COLOR="0;36" +HOME_URL="https://www.archlinux.org/" +SUPPORT_URL="https://bbs.archlinux.org/" +BUG_REPORT_URL="https://bugs.archlinux.org/" +`, + "arch", + "", +}, { + `NAME="Ubuntu" +VERSION="14.04.1 LTS, Trusty Tahr" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 14.04.1 LTS" +VERSION_ID="14.04" +HOME_URL="http://www.ubuntu.com/" +SUPPORT_URL="http://help.ubuntu.com/" +BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/" +`, + "trusty", + "", +}, { + "", + "unknown", + "OS release file is missing ID", +}, { + `NAME="CentOS Linux" +ID="centos" +`, + "unknown", + "OS release file is missing VERSION_ID", +}, { + `NAME="SuSE Linux" +ID="SuSE" +VERSION_ID="12" +`, + "unknown", + "", +}, +} + +func (s *readSeriesSuite) TestReadSeries(c *gc.C) { + d := c.MkDir() + f := filepath.Join(d, "foo") + s.PatchValue(series.OSReleaseFile, f) + for i, t := range readSeriesTests { + c.Logf("test %d", i) + err := ioutil.WriteFile(f, []byte(t.contents), 0666) + c.Assert(err, jc.ErrorIsNil) + series, err := series.ReadSeries() + if t.err == "" { + c.Assert(err, jc.ErrorIsNil) + } else { + c.Assert(err, gc.ErrorMatches, t.err) + } + + c.Assert(series, gc.Equals, t.series) + } +} === added file 'src/github.com/juju/utils/series/series_nonlinux.go' --- src/github.com/juju/utils/series/series_nonlinux.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/series_nonlinux.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,17 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build !linux + +package series + +// TODO(ericsnow) Refactor dependents so we can remove this for non-linux. + +// ReleaseVersion is a function that has no meaning except on linux. +func ReleaseVersion() string { + return "" +} + +func updateLocalSeriesVersions() error { + return nil +} === added file 'src/github.com/juju/utils/series/series_test.go' --- src/github.com/juju/utils/series/series_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/series_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,105 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package series_test + +import ( + "fmt" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/utils/series" +) + +type kernelVersionSuite struct { + testing.LoggingSuite +} + +var _ = gc.Suite(&kernelVersionSuite{}) + +func sysctlMacOS10dot9dot2() (string, error) { + // My 10.9.2 Mac gives "13.1.0" as the kernel version + return "13.1.0", nil +} + +func sysctlError() (string, error) { + return "", fmt.Errorf("no such syscall") +} + +func (*kernelVersionSuite) TestKernelToMajorVersion(c *gc.C) { + majorVersion, err := series.KernelToMajor(sysctlMacOS10dot9dot2) + c.Assert(err, jc.ErrorIsNil) + c.Check(majorVersion, gc.Equals, 13) +} + +func (*kernelVersionSuite) TestKernelToMajorVersionError(c *gc.C) { + majorVersion, err := series.KernelToMajor(sysctlError) + c.Assert(err, gc.ErrorMatches, "no such syscall") + c.Check(majorVersion, gc.Equals, 0) +} + +func (*kernelVersionSuite) TestKernelToMajorVersionNoDots(c *gc.C) { + majorVersion, err := series.KernelToMajor(func() (string, error) { + return "1234", nil + }) + c.Assert(err, jc.ErrorIsNil) + c.Check(majorVersion, gc.Equals, 1234) +} + +func (*kernelVersionSuite) TestKernelToMajorVersionNotInt(c *gc.C) { + majorVersion, err := series.KernelToMajor(func() (string, error) { + return "a.b.c", nil + }) + c.Assert(err, gc.ErrorMatches, `strconv.ParseInt: parsing "a": invalid syntax`) + c.Check(majorVersion, gc.Equals, 0) +} + +func (*kernelVersionSuite) TestKernelToMajorVersionEmpty(c *gc.C) { + majorVersion, err := series.KernelToMajor(func() (string, error) { + return "", nil + }) + c.Assert(err, gc.ErrorMatches, `strconv.ParseInt: parsing "": invalid syntax`) + c.Check(majorVersion, gc.Equals, 0) +} + +func (*kernelVersionSuite) TestMacOSXSeriesFromKernelVersion(c *gc.C) { + series, err := series.MacOSXSeriesFromKernelVersion(sysctlMacOS10dot9dot2) + c.Assert(err, jc.ErrorIsNil) + c.Check(series, gc.Equals, "mavericks") +} + +func (*kernelVersionSuite) TestMacOSXSeriesFromKernelVersionError(c *gc.C) { + // We suppress the actual error in favor of returning "unknown", but we + // do log the error + series, err := series.MacOSXSeriesFromKernelVersion(sysctlError) + c.Assert(err, gc.ErrorMatches, "no such syscall") + c.Assert(series, gc.Equals, "unknown") + c.Check(c.GetTestLog(), gc.Matches, ".* juju.juju.series unable to determine OS version: no such syscall\n") +} + +func (*kernelVersionSuite) TestMacOSXSeries(c *gc.C) { + tests := []struct { + version int + series string + err string + }{ + {version: 13, series: "mavericks"}, + {version: 12, series: "mountainlion"}, + {version: 14, series: "yosemite"}, + {version: 15, series: "elcapitan"}, + {version: 16, series: "unknown", err: `unknown series ""`}, + {version: 4, series: "unknown", err: `unknown series ""`}, + {version: 0, series: "unknown", err: `unknown series ""`}, + } + for _, test := range tests { + series, err := series.MacOSXSeriesFromMajorVersion(test.version) + if test.err != "" { + c.Assert(err, gc.ErrorMatches, test.err) + } else { + c.Assert(err, jc.ErrorIsNil) + } + c.Check(series, gc.Equals, test.series) + } +} === added file 'src/github.com/juju/utils/series/series_windows.go' --- src/github.com/juju/utils/series/series_windows.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/series_windows.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,77 @@ +// Copyright 2015 Canonical Ltd. +// Copyright 2015 Cloudbase Solutions SRL +// Licensed under the AGPLv3, see LICENCE file for details. + +package series + +import ( + "os" + "strings" + + "github.com/gabriel-samfira/sys/windows/registry" + "github.com/juju/errors" +) + +var ( + // currentVersionKey is defined as a variable instead of a constant + // to allow overwriting during testing + currentVersionKey = "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion" + + // isNanoKey determines the registry key that can be queried to determine whether + // a machine is a nano machine + isNanoKey = "Software\\Microsoft\\Windows NT\\CurrentVersion\\Server\\ServerLevels" +) + +func getVersionFromRegistry() (string, error) { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, currentVersionKey, registry.QUERY_VALUE) + if err != nil { + return "", errors.Trace(err) + } + defer k.Close() + s, _, err := k.GetStringValue("ProductName") + if err != nil { + return "", errors.Trace(err) + } + + return s, nil +} + +func readSeries() (string, error) { + ver, err := getVersionFromRegistry() + if err != nil { + return "unknown", errors.Trace(err) + } + + var lookAt = windowsVersions + + isNano, err := isWindowsNano() + if err != nil && os.IsNotExist(err) { + return "unknown", errors.Trace(err) + } + if isNano { + lookAt = windowsNanoVersions + } + + for _, value := range windowsVersionMatchOrder { + if strings.HasPrefix(ver, value) { + if val, ok := lookAt[value]; ok { + return val, nil + } + } + } + return "unknown", errors.Errorf("unknown series %q", ver) +} + +func isWindowsNano() (bool, error) { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, isNanoKey, registry.QUERY_VALUE) + if err != nil { + return false, errors.Trace(err) + } + defer k.Close() + + s, _, err := k.GetIntegerValue("NanoServer") + if err != nil { + return false, errors.Trace(err) + } + return s == 1, nil +} === added file 'src/github.com/juju/utils/series/series_windows_test.go' --- src/github.com/juju/utils/series/series_windows_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/series_windows_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,161 @@ +// Copyright 2015 Canonical Ltd. +// Copyright 2015 Cloudbase Solutions SRL +// Licensed under the AGPLv3, see LICENCE file for details. + +package series_test + +import ( + "fmt" + + "github.com/gabriel-samfira/sys/windows/registry" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/utils" + "github.com/juju/utils/series" +) + +type windowsSeriesSuite struct { + testing.CleanupSuite +} + +var _ = gc.Suite(&windowsSeriesSuite{}) + +var versionTests = []struct { + version string + want string +}{ + { + "Hyper-V Server 2012 R2", + "win2012hvr2", + }, + { + "Hyper-V Server 2012", + "win2012hv", + }, + { + "Windows Server 2012 R2", + "win2012r2", + }, + { + "Windows Server 2012", + "win2012", + }, + { + "Windows Server 2012 R2 Datacenter", + "win2012r2", + }, + { + "Windows Server 2012 Standard", + "win2012", + }, + { + "Windows Storage Server 2012 R2", + "win2012r2", + }, + { + "Windows Storage Server 2012 Standard", + "win2012", + }, + { + "Windows Storage Server 2012 R2 Standard", + "win2012r2", + }, + { + "Windows 7 Home", + "win7", + }, + { + "Windows 8 Pro", + "win8", + }, + { + "Windows 8.1 Pro", + "win81", + }, +} + +func (s *windowsSeriesSuite) SetUpTest(c *gc.C) { + s.CleanupSuite.SetUpTest(c) + s.createRegKey(c, series.CurrentVersionKey) +} + +func (s *windowsSeriesSuite) createRegKey(c *gc.C, key *string) { + salt, err := utils.RandomPassword() + c.Assert(err, jc.ErrorIsNil) + regKey := fmt.Sprintf(`SOFTWARE\JUJU\%s`, salt) + s.PatchValue(key, regKey) + + k, _, err := registry.CreateKey(registry.LOCAL_MACHINE, *key, registry.ALL_ACCESS) + c.Assert(err, jc.ErrorIsNil) + + err = k.Close() + c.Assert(err, jc.ErrorIsNil) + + s.AddCleanup(func(*gc.C) { + registry.DeleteKey(registry.LOCAL_MACHINE, *series.CurrentVersionKey) + }) +} + +func (s *windowsSeriesSuite) TestReadSeries(c *gc.C) { + for _, value := range versionTests { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, *series.CurrentVersionKey, registry.ALL_ACCESS) + c.Assert(err, jc.ErrorIsNil) + + err = k.SetStringValue("ProductName", value.version) + c.Assert(err, jc.ErrorIsNil) + + err = k.Close() + c.Assert(err, jc.ErrorIsNil) + + ver, err := series.ReadSeries() + c.Assert(err, jc.ErrorIsNil) + c.Assert(ver, gc.Equals, value.want) + } +} + +type windowsNanoSeriesSuite struct { + windowsSeriesSuite +} + +var _ = gc.Suite(&windowsNanoSeriesSuite{}) + +func (s *windowsNanoSeriesSuite) SetUpTest(c *gc.C) { + s.windowsSeriesSuite.SetUpTest(c) + s.createRegKey(c, series.IsNanoKey) + + k, err := registry.OpenKey(registry.LOCAL_MACHINE, *series.IsNanoKey, registry.ALL_ACCESS) + c.Assert(err, jc.ErrorIsNil) + + err = k.SetDWordValue("NanoServer", 1) + c.Assert(err, jc.ErrorIsNil) + + err = k.Close() + c.Assert(err, jc.ErrorIsNil) +} + +var nanoVersionTests = []struct { + version string + want string +}{{ + "Windows Server 2016", + "win2016nano", +}} + +func (s *windowsNanoSeriesSuite) TestReadSeries(c *gc.C) { + for _, value := range nanoVersionTests { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, *series.CurrentVersionKey, registry.ALL_ACCESS) + c.Assert(err, jc.ErrorIsNil) + + err = k.SetStringValue("ProductName", value.version) + c.Assert(err, jc.ErrorIsNil) + + err = k.Close() + c.Assert(err, jc.ErrorIsNil) + + ver, err := series.ReadSeries() + c.Assert(err, jc.ErrorIsNil) + c.Assert(ver, gc.Equals, value.want) + } +} === added file 'src/github.com/juju/utils/series/supportedseries.go' --- src/github.com/juju/utils/series/supportedseries.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/supportedseries.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,290 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package series + +import ( + "sync" + + "github.com/juju/errors" + "github.com/juju/utils/os" +) + +type unknownOSForSeriesError string + +func (e unknownOSForSeriesError) Error() string { + return `unknown OS for series: "` + string(e) + `"` +} + +// IsUnknownOSForSeriesError returns true if err is of type unknownOSForSeriesError. +func IsUnknownOSForSeriesError(err error) bool { + _, ok := errors.Cause(err).(unknownOSForSeriesError) + return ok +} + +type unknownSeriesVersionError string + +func (e unknownSeriesVersionError) Error() string { + return `unknown version for series: "` + string(e) + `"` +} + +// IsUnknownSeriesVersionError returns true if err is of type unknownSeriesVersionError. +func IsUnknownSeriesVersionError(err error) bool { + _, ok := errors.Cause(err).(unknownSeriesVersionError) + return ok +} + +type unknownVersionSeriesError string + +func (e unknownVersionSeriesError) Error() string { + return `unknown series for version: "` + string(e) + `"` +} + +// IsUnknownVersionSeriesError returns true if err is of type unknownVersionSeriesError. +func IsUnknownVersionSeriesError(err error) bool { + _, ok := errors.Cause(err).(unknownVersionSeriesError) + return ok +} + +var defaultVersionIDs = map[string]string{ + "arch": "rolling", +} + +// seriesVersions provides a mapping between series names and versions. +// The values here are current as of the time of writing. On Ubuntu systems, we update +// these values from /usr/share/distro-info/ubuntu.csv to ensure we have the latest values. +// On non-Ubuntu systems, these values provide a nice fallback option. +// Exported so tests can change the values to ensure the distro-info lookup works. +var seriesVersions = map[string]string{ + "precise": "12.04", + "quantal": "12.10", + "raring": "13.04", + "saucy": "13.10", + "trusty": "14.04", + "utopic": "14.10", + "vivid": "15.04", + "win2012hvr2": "win2012hvr2", + "win2012hv": "win2012hv", + "win2012r2": "win2012r2", + "win2012": "win2012", + "win2016": "win2016", + "win2016nano": "win2016nano", + "win7": "win7", + "win8": "win8", + "win81": "win81", + "win10": "win10", + "centos7": "centos7", + "arch": "rolling", +} + +// versionSeries provides a mapping between versions and series names. +var versionSeries = reverseSeriesVersion() + +var centosSeries = map[string]string{ + "centos7": "centos7", +} + +var archSeries = map[string]string{ + "arch": "rolling", +} + +var ubuntuSeries = map[string]string{ + "precise": "12.04", + "quantal": "12.10", + "raring": "13.04", + "saucy": "13.10", + "trusty": "14.04", + "utopic": "14.10", + "vivid": "15.04", +} + +// Windows versions come in various flavors: +// Standard, Datacenter, etc. We use string prefix match them to one +// of the following. Specify the longest name in a particular series first +// For example, if we have "Win 2012" and "Win 2012 R2", we specify "Win 2012 R2" first. +// We need to make sure we manually update this list with each new windows release. +var windowsVersionMatchOrder = []string{ + "Hyper-V Server 2012 R2", + "Hyper-V Server 2012", + "Windows Server 2012 R2", + "Windows Server 2012", + "Windows Server 2016", + "Windows Storage Server 2012 R2", + "Windows Storage Server 2012", + "Windows 7", + "Windows 8.1", + "Windows 8", + "Windows 10", +} + +// windowsVersions is a mapping consisting of the output from +// the following WMI query: (gwmi Win32_OperatingSystem).Name +var windowsVersions = map[string]string{ + "Hyper-V Server 2012 R2": "win2012hvr2", + "Hyper-V Server 2012": "win2012hv", + "Windows Server 2012 R2": "win2012r2", + "Windows Server 2012": "win2012", + "Windows Server 2016": "win2016", + "Windows Storage Server 2012 R2": "win2012r2", + "Windows Storage Server 2012": "win2012", + "Windows 7": "win7", + "Windows 8.1": "win81", + "Windows 8": "win8", + "Windows 10": "win10", +} + +// windowsNanoVersions is a mapping from the product name +// stored in registry to a juju defined nano-series +// On the nano version so far the product name actually +// is identical to the correspondent main windows version +// and the information about it being nano is stored in +// a different place. +var windowsNanoVersions = map[string]string{ + "Windows Server 2016": "win2016nano", +} + +// IsWindowsNano tells us whether the provided series is a +// nano series. It may seem futile at this point, but more +// nano series will come up with time. +// This is here and not in a windows specific package +// because we might want to take decisions dependant on +// whether we have a nano series or not in more general code. +func IsWindowsNano(series string) bool { + for _, val := range windowsNanoVersions { + if val == series { + return true + } + } + return false +} + +// GetOSFromSeries will return the operating system based +// on the series that is passed to it +func GetOSFromSeries(series string) (os.OSType, error) { + if series == "" { + return os.Unknown, errors.NotValidf("series %q", series) + } + if _, ok := ubuntuSeries[series]; ok { + return os.Ubuntu, nil + } + if _, ok := centosSeries[series]; ok { + return os.CentOS, nil + } + if _, ok := archSeries[series]; ok { + return os.Arch, nil + } + for _, val := range windowsVersions { + if val == series { + return os.Windows, nil + } + } + for _, val := range windowsNanoVersions { + if val == series { + return os.Windows, nil + } + } + for _, val := range macOSXSeries { + if val == series { + return os.OSX, nil + } + } + return os.Unknown, errors.Trace(unknownOSForSeriesError(series)) +} + +var ( + seriesVersionsMutex sync.Mutex +) + +// SeriesVersion returns the version for the specified series. +func SeriesVersion(series string) (string, error) { + if series == "" { + return "", errors.Trace(unknownSeriesVersionError("")) + } + seriesVersionsMutex.Lock() + defer seriesVersionsMutex.Unlock() + if vers, ok := seriesVersions[series]; ok { + return vers, nil + } + updateSeriesVersionsOnce() + if vers, ok := seriesVersions[series]; ok { + return vers, nil + } + + return "", errors.Trace(unknownSeriesVersionError(series)) +} + +// VersionSeries returns the series (e.g.trusty) for the specified version (e.g. 14.04). +func VersionSeries(version string) (string, error) { + if version == "" { + return "", errors.Trace(unknownVersionSeriesError("")) + } + seriesVersionsMutex.Lock() + defer seriesVersionsMutex.Unlock() + if series, ok := versionSeries[version]; ok { + return series, nil + } + updateSeriesVersionsOnce() + if series, ok := versionSeries[version]; ok { + return series, nil + } + return "", errors.Trace(unknownVersionSeriesError(version)) +} + +func updateVersionSeries() { + versionSeries = reverseSeriesVersion() +} + +// reverseSeriesVersion returns reverse of seriesVersion map, +// keyed on versions with series as values. +func reverseSeriesVersion() map[string]string { + reverse := make(map[string]string, len(seriesVersions)) + for k, v := range seriesVersions { + reverse[v] = k + } + return reverse +} + +// SupportedSeries returns the series on which we can run Juju workloads. +func SupportedSeries() []string { + seriesVersionsMutex.Lock() + defer seriesVersionsMutex.Unlock() + updateSeriesVersionsOnce() + var series []string + for s := range seriesVersions { + series = append(series, s) + } + return series +} + +// OSSupportedSeries returns the series of the specified OS on which we +// can run Juju workloads. +func OSSupportedSeries(os os.OSType) []string { + var osSeries []string + for _, series := range SupportedSeries() { + seriesOS, err := GetOSFromSeries(series) + if err != nil || seriesOS != os { + continue + } + osSeries = append(osSeries, series) + } + return osSeries +} + +// UpdateSeriesVersions forces an update of the series versions by querying +// distro-info if possible. +func UpdateSeriesVersions() error { + seriesVersionsMutex.Lock() + defer seriesVersionsMutex.Unlock() + return updateLocalSeriesVersions() +} + +var updatedseriesVersions bool + +func updateSeriesVersionsOnce() { + if !updatedseriesVersions { + if err := updateLocalSeriesVersions(); err != nil { + logger.Warningf("failed to update distro info: %v", err) + } + updatedseriesVersions = true + } +} === added file 'src/github.com/juju/utils/series/supportedseries_linux_test.go' --- src/github.com/juju/utils/series/supportedseries_linux_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/supportedseries_linux_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,95 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package series_test + +import ( + "io/ioutil" + "path/filepath" + "sort" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/utils/os" + "github.com/juju/utils/series" +) + +func (s *supportedSeriesSuite) TestSeriesVersion(c *gc.C) { + // There is no distro-info on Windows or CentOS. + if os.HostOS() != os.Ubuntu { + c.Skip("This test is only relevant on Ubuntu.") + } + vers, err := series.SeriesVersion("precise") + if err != nil && err.Error() == `invalid series "precise"` { + c.Fatalf(`Unable to lookup series "precise", you may need to: apt-get install distro-info`) + } + c.Assert(err, jc.ErrorIsNil) + c.Assert(vers, gc.Equals, "12.04") +} + +func (s *supportedSeriesSuite) TestSupportedSeries(c *gc.C) { + d := c.MkDir() + filename := filepath.Join(d, "ubuntu.csv") + err := ioutil.WriteFile(filename, []byte(distInfoData), 0644) + c.Assert(err, jc.ErrorIsNil) + s.PatchValue(series.DistroInfo, filename) + + expectedSeries := []string{"precise", "quantal", "raring", "saucy"} + series := series.SupportedSeries() + sort.Strings(series) + c.Assert(series, gc.DeepEquals, expectedSeries) +} + +func (s *supportedSeriesSuite) TestUpdateSeriesVersions(c *gc.C) { + d := c.MkDir() + filename := filepath.Join(d, "ubuntu.csv") + err := ioutil.WriteFile(filename, []byte(distInfoData), 0644) + c.Assert(err, jc.ErrorIsNil) + s.PatchValue(series.DistroInfo, filename) + + expectedSeries := []string{"precise", "quantal", "raring", "saucy"} + checkSeries := func() { + series := series.SupportedSeries() + sort.Strings(series) + c.Assert(series, gc.DeepEquals, expectedSeries) + } + checkSeries() + + // Updating the file does not normally trigger an update; + // we only refresh automatically one time. After that, we + // must explicitly refresh. + err = ioutil.WriteFile(filename, []byte(distInfoData2), 0644) + c.Assert(err, jc.ErrorIsNil) + checkSeries() + + expectedSeries = append(expectedSeries, "trusty") + series.UpdateSeriesVersions() + checkSeries() +} + +const distInfoData = `version,codename,series,created,release,eol,eol-server +4.10,Warty Warthog,warty,2004-03-05,2004-10-20,2006-04-30 +5.04,Hoary Hedgehog,hoary,2004-10-20,2005-04-08,2006-10-31 +5.10,Breezy Badger,breezy,2005-04-08,2005-10-12,2007-04-13 +6.06 LTS,Dapper Drake,dapper,2005-10-12,2006-06-01,2009-07-14,2011-06-01 +6.10,Edgy Eft,edgy,2006-06-01,2006-10-26,2008-04-25 +7.04,Feisty Fawn,feisty,2006-10-26,2007-04-19,2008-10-19 +7.10,Gutsy Gibbon,gutsy,2007-04-19,2007-10-18,2009-04-18 +8.04 LTS,Hardy Heron,hardy,2007-10-18,2008-04-24,2011-05-12,2013-05-09 +8.10,Intrepid Ibex,intrepid,2008-04-24,2008-10-30,2010-04-30 +9.04,Jaunty Jackalope,jaunty,2008-10-30,2009-04-23,2010-10-23 +9.10,Karmic Koala,karmic,2009-04-23,2009-10-29,2011-04-29 +10.04 LTS,Lucid Lynx,lucid,2009-10-29,2010-04-29,2013-05-09,2015-04-29 +10.10,Maverick Meerkat,maverick,2010-04-29,2010-10-10,2012-04-10 +11.04,Natty Narwhal,natty,2010-10-10,2011-04-28,2012-10-28 +11.10,Oneiric Ocelot,oneiric,2011-04-28,2011-10-13,2013-05-09 +12.04 LTS,Precise Pangolin,precise,2011-10-13,2012-04-26,2017-04-26 +12.10,Quantal Quetzal,quantal,2012-04-26,2012-10-18,2014-04-18 +13.04,Raring Ringtail,raring,2012-10-18,2013-04-25,2014-01-27 +13.10,Saucy Salamander,saucy,2013-04-25,2013-10-17,2014-07-17 +` + +const distInfoData2 = distInfoData + ` +14.04 LTS,Trusty Tahr,trusty,2013-10-17,2014-04-17,2019-04-17 +` === added file 'src/github.com/juju/utils/series/supportedseries_test.go' --- src/github.com/juju/utils/series/supportedseries_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/supportedseries_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,136 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package series_test + +import ( + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/utils/os" + "github.com/juju/utils/series" +) + +type supportedSeriesSuite struct { + testing.CleanupSuite +} + +var _ = gc.Suite(&supportedSeriesSuite{}) + +func (s *supportedSeriesSuite) SetUpTest(c *gc.C) { + s.CleanupSuite.SetUpTest(c) + cleanup := series.SetSeriesVersions(make(map[string]string)) + s.AddCleanup(func(*gc.C) { cleanup() }) +} + +var getOSFromSeriesTests = []struct { + series string + want os.OSType + err string +}{{ + series: "precise", + want: os.Ubuntu, +}, { + series: "win2012r2", + want: os.Windows, +}, { + series: "win2016nano", + want: os.Windows, +}, { + series: "mountainlion", + want: os.OSX, +}, { + series: "centos7", + want: os.CentOS, +}, { + series: "arch", + want: os.Arch, +}, { + series: "", + err: "series \"\" not valid", +}, +} + +func (s *supportedSeriesSuite) TestGetOSFromSeries(c *gc.C) { + for _, t := range getOSFromSeriesTests { + got, err := series.GetOSFromSeries(t.series) + if t.err != "" { + c.Assert(err, gc.ErrorMatches, t.err) + } else { + c.Check(err, jc.ErrorIsNil) + c.Assert(got, gc.Equals, t.want) + } + } +} + +func (s *supportedSeriesSuite) TestUnknownOSFromSeries(c *gc.C) { + _, err := series.GetOSFromSeries("Xuanhuaceratops") + c.Assert(err, jc.Satisfies, series.IsUnknownOSForSeriesError) + c.Assert(err, gc.ErrorMatches, `unknown OS for series: "Xuanhuaceratops"`) +} + +func setSeriesTestData() { + series.SetSeriesVersions(map[string]string{ + "trusty": "14.04", + "utopic": "14.10", + "win7": "win7", + "win81": "win81", + "win2016nano": "win2016nano", + "centos7": "centos7", + "arch": "rolling", + }) +} + +func (s *supportedSeriesSuite) TestOSSupportedSeries(c *gc.C) { + setSeriesTestData() + supported := series.OSSupportedSeries(os.Ubuntu) + c.Assert(supported, jc.SameContents, []string{"trusty", "utopic"}) + supported = series.OSSupportedSeries(os.Windows) + c.Assert(supported, jc.SameContents, []string{"win7", "win81", "win2016nano"}) + supported = series.OSSupportedSeries(os.CentOS) + c.Assert(supported, jc.SameContents, []string{"centos7"}) + supported = series.OSSupportedSeries(os.Arch) + c.Assert(supported, jc.SameContents, []string{"arch"}) +} + +func (s *supportedSeriesSuite) TestVersionSeriesValid(c *gc.C) { + setSeriesTestData() + seriesResult, err := series.VersionSeries("14.04") + c.Assert(err, jc.ErrorIsNil) + c.Assert("trusty", gc.DeepEquals, seriesResult) +} + +func (s *supportedSeriesSuite) TestVersionSeriesEmpty(c *gc.C) { + setSeriesTestData() + _, err := series.VersionSeries("") + c.Assert(err, gc.ErrorMatches, `.*unknown series for version: "".*`) +} + +func (s *supportedSeriesSuite) TestVersionSeriesInvalid(c *gc.C) { + setSeriesTestData() + _, err := series.VersionSeries("73655") + c.Assert(err, gc.ErrorMatches, `.*unknown series for version: "73655".*`) +} + +func (s *supportedSeriesSuite) TestSeriesVersionEmpty(c *gc.C) { + setSeriesTestData() + _, err := series.SeriesVersion("") + c.Assert(err, gc.ErrorMatches, `.*unknown version for series: "".*`) +} + +func (s *supportedSeriesSuite) TestIsWindowsNano(c *gc.C) { + var isWindowsNanoTests = []struct { + series string + expected bool + }{ + {"win2016nano", true}, + {"win2016", false}, + {"win2012r2", false}, + {"trusty", false}, + } + + for _, t := range isWindowsNanoTests { + c.Assert(series.IsWindowsNano(t.series), gc.Equals, t.expected) + } +} === added file 'src/github.com/juju/utils/series/supportedseries_windows_test.go' --- src/github.com/juju/utils/series/supportedseries_windows_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/series/supportedseries_windows_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,57 @@ +// Copyright 2014 Canonical Ltd. +// Copyright 2014 Cloudbase Solutions SRL +// Licensed under the AGPLv3, see LICENCE file for details. + +package series_test + +import ( + "sort" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/utils/series" +) + +type supportedSeriesWindowsSuite struct { +} + +var _ = gc.Suite(&supportedSeriesWindowsSuite{}) + +func (s *supportedSeriesWindowsSuite) TestSeriesVersion(c *gc.C) { + vers, err := series.SeriesVersion("win8") + if err != nil { + c.Assert(err, gc.Not(gc.ErrorMatches), `invalid series "win8"`, gc.Commentf(`unable to lookup series "win8"`)) + } else { + c.Assert(err, jc.ErrorIsNil) + } + c.Assert(err, jc.ErrorIsNil) + c.Assert(vers, gc.Equals, "win8") +} + +func (s *supportedSeriesWindowsSuite) TestSupportedSeries(c *gc.C) { + expectedSeries := []string{ + "arch", + "centos7", + "precise", + "quantal", + "raring", + "saucy", + "trusty", + "utopic", + "vivid", + "win10", + "win2012", + "win2012hv", + "win2012hvr2", + "win2012r2", + "win2016", + "win2016nano", + "win7", + "win8", + "win81", + } + series := series.SupportedSeries() + sort.Strings(series) + c.Assert(series, gc.DeepEquals, expectedSeries) +} === modified file 'src/github.com/juju/utils/size.go' --- src/github.com/juju/utils/size.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/utils/size.go 2016-03-22 15:18:22 +0000 @@ -54,3 +54,25 @@ func sizeSuffixMultiplier(i int) int { return 1 << uint(i*10) } + +// SizeTracker tracks the number of bytes passing through +// its Write method (which is otherwise a no-op). +// +// Use SizeTracker with io.MultiWriter() to track number of bytes +// written. Use with io.TeeReader() to track number of bytes read. +type SizeTracker struct { + // size is the number of bytes written so far. + size int64 +} + +// Size returns the number of bytes written so far. +func (st SizeTracker) Size() int64 { + return st.size +} + +// Write implements io.Writer. +func (st *SizeTracker) Write(data []byte) (n int, err error) { + n = len(data) + st.size += int64(n) + return n, nil +} === modified file 'src/github.com/juju/utils/size_test.go' --- src/github.com/juju/utils/size_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/utils/size_test.go 2016-03-22 15:18:22 +0000 @@ -4,7 +4,12 @@ package utils_test import ( + "io" + "io/ioutil" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/filetesting" gc "gopkg.in/check.v1" "github.com/juju/utils" @@ -77,3 +82,68 @@ } } } + +func (*sizeSuite) TestSizingReaderOkay(c *gc.C) { + expected := "some data" + stub := &testing.Stub{} + reader := filetesting.NewStubReader(stub, expected) + + var st utils.SizeTracker + sizingReader := io.TeeReader(reader, &st) + data, err := ioutil.ReadAll(sizingReader) + c.Assert(err, jc.ErrorIsNil) + + stub.CheckCallNames(c, "Read", "Read") + c.Check(string(data), gc.Equals, expected) + c.Check(st.Size(), gc.Equals, int64(len(expected))) +} + +func (*sizeSuite) TestSizingReaderMixedEOF(c *gc.C) { + expected := "some data" + stub := &testing.Stub{} + reader := &filetesting.StubReader{ + Stub: stub, + ReturnRead: &fakeStream{ + data: expected, + }, + } + + var st utils.SizeTracker + sizingReader := io.TeeReader(reader, &st) + data, err := ioutil.ReadAll(sizingReader) + c.Assert(err, jc.ErrorIsNil) + + stub.CheckCallNames(c, "Read") // The EOF was mixed with the data. + c.Check(string(data), gc.Equals, expected) + c.Check(st.Size(), gc.Equals, int64(len(expected))) +} + +func (*sizeSuite) TestSizingWriter(c *gc.C) { + expected := "some data" + stub := &testing.Stub{} + writer, buffer := filetesting.NewStubWriter(stub) + + var st utils.SizeTracker + sizingWriter := io.MultiWriter(writer, &st) + n, err := sizingWriter.Write([]byte(expected)) + c.Assert(err, jc.ErrorIsNil) + + stub.CheckCallNames(c, "Write") + c.Check(n, gc.Equals, len(expected)) + c.Check(buffer.String(), gc.Equals, expected) + c.Check(st.Size(), gc.Equals, int64(len(expected))) +} + +type fakeStream struct { + data string + pos uint64 +} + +func (f *fakeStream) Read(data []byte) (int, error) { + n := copy(data, f.data[f.pos:]) + f.pos += uint64(n) + if f.pos >= uint64(len(f.data)) { + return n, io.EOF + } + return n, nil +} === added directory 'src/github.com/juju/utils/ssh' === added file 'src/github.com/juju/utils/ssh/authorisedkeys.go' --- src/github.com/juju/utils/ssh/authorisedkeys.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/authorisedkeys.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,334 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ssh + +import ( + "fmt" + "io/ioutil" + "os" + "os/user" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + + "github.com/juju/errors" + "github.com/juju/loggo" + "github.com/juju/utils" + "golang.org/x/crypto/ssh" +) + +var logger = loggo.GetLogger("juju.utils.ssh") + +type ListMode bool + +var ( + FullKeys ListMode = true + Fingerprints ListMode = false +) + +const ( + authKeysFile = "authorized_keys" +) + +type AuthorisedKey struct { + Type string + Key []byte + Comment string +} + +func authKeysDir(username string) (string, error) { + homeDir, err := utils.UserHomeDir(username) + if err != nil { + return "", err + } + homeDir, err = utils.NormalizePath(homeDir) + if err != nil { + return "", err + } + return filepath.Join(homeDir, ".ssh"), nil +} + +// ParseAuthorisedKey parses a non-comment line from an +// authorized_keys file and returns the constituent parts. +// Based on description in "man sshd". +func ParseAuthorisedKey(line string) (*AuthorisedKey, error) { + key, comment, _, _, err := ssh.ParseAuthorizedKey([]byte(line)) + if err != nil { + return nil, errors.Errorf("invalid authorized_key %q", line) + } + return &AuthorisedKey{ + Type: key.Type(), + Key: key.Marshal(), + Comment: comment, + }, nil +} + +// SplitAuthorisedKeys extracts a key slice from the specified key data, +// by splitting the key data into lines and ignoring comments and blank lines. +func SplitAuthorisedKeys(keyData string) []string { + var keys []string + for _, key := range strings.Split(string(keyData), "\n") { + key = strings.Trim(key, " \r") + if len(key) == 0 { + continue + } + if key[0] == '#' { + continue + } + keys = append(keys, key) + } + return keys +} + +func readAuthorisedKeys(username string) ([]string, error) { + keyDir, err := authKeysDir(username) + if err != nil { + return nil, err + } + sshKeyFile := filepath.Join(keyDir, authKeysFile) + logger.Debugf("reading authorised keys file %s", sshKeyFile) + keyData, err := ioutil.ReadFile(sshKeyFile) + if os.IsNotExist(err) { + return []string{}, nil + } + if err != nil { + return nil, errors.Annotate(err, "reading ssh authorised keys file") + } + var keys []string + for _, key := range strings.Split(string(keyData), "\n") { + if len(strings.Trim(key, " \r")) == 0 { + continue + } + keys = append(keys, key) + } + return keys, nil +} + +func writeAuthorisedKeys(username string, keys []string) error { + keyDir, err := authKeysDir(username) + if err != nil { + return err + } + err = os.MkdirAll(keyDir, os.FileMode(0755)) + if err != nil { + return errors.Annotate(err, "cannot create ssh key directory") + } + keyData := strings.Join(keys, "\n") + "\n" + + // Get perms to use on auth keys file + sshKeyFile := filepath.Join(keyDir, authKeysFile) + perms := os.FileMode(0644) + info, err := os.Stat(sshKeyFile) + if err == nil { + perms = info.Mode().Perm() + } + + logger.Debugf("writing authorised keys file %s", sshKeyFile) + err = utils.AtomicWriteFile(sshKeyFile, []byte(keyData), perms) + if err != nil { + return err + } + + // TODO (wallyworld) - what to do on windows (if anything) + // TODO(dimitern) - no need to use user.Current() if username + // is "" - it will use the current user anyway. + if runtime.GOOS != "windows" { + // Ensure the resulting authorised keys file has its ownership + // set to the specified username. + var u *user.User + if username == "" { + u, err = user.Current() + } else { + u, err = user.Lookup(username) + } + if err != nil { + return err + } + // chown requires ints but user.User has strings for windows. + uid, err := strconv.Atoi(u.Uid) + if err != nil { + return err + } + gid, err := strconv.Atoi(u.Gid) + if err != nil { + return err + } + err = os.Chown(sshKeyFile, uid, gid) + if err != nil { + return err + } + } + return nil +} + +// We need a mutex because updates to the authorised keys file are done by +// reading the contents, updating, and writing back out. So only one caller +// at a time can use either Add, Delete, List. +var mutex sync.Mutex + +// AddKeys adds the specified ssh keys to the authorized_keys file for user. +// Returns an error if there is an issue with *any* of the supplied keys. +func AddKeys(user string, newKeys ...string) error { + mutex.Lock() + defer mutex.Unlock() + existingKeys, err := readAuthorisedKeys(user) + if err != nil { + return err + } + for _, newKey := range newKeys { + fingerprint, comment, err := KeyFingerprint(newKey) + if err != nil { + return err + } + if comment == "" { + return errors.Errorf("cannot add ssh key without comment") + } + for _, key := range existingKeys { + existingFingerprint, existingComment, err := KeyFingerprint(key) + if err != nil { + // Only log a warning if the unrecognised key line is not a comment. + if key[0] != '#' { + logger.Warningf("invalid existing ssh key %q: %v", key, err) + } + continue + } + if existingFingerprint == fingerprint { + return errors.Errorf("cannot add duplicate ssh key: %v", fingerprint) + } + if existingComment == comment { + return errors.Errorf("cannot add ssh key with duplicate comment: %v", comment) + } + } + } + sshKeys := append(existingKeys, newKeys...) + return writeAuthorisedKeys(user, sshKeys) +} + +// DeleteKeys removes the specified ssh keys from the authorized ssh keys file for user. +// keyIds may be either key comments or fingerprints. +// Returns an error if there is an issue with *any* of the keys to delete. +func DeleteKeys(user string, keyIds ...string) error { + mutex.Lock() + defer mutex.Unlock() + existingKeyData, err := readAuthorisedKeys(user) + if err != nil { + return err + } + // Build up a map of keys indexed by fingerprint, and fingerprints indexed by comment + // so we can easily get the key represented by each keyId, which may be either a fingerprint + // or comment. + var keysToWrite []string + var sshKeys = make(map[string]string) + var keyComments = make(map[string]string) + for _, key := range existingKeyData { + fingerprint, comment, err := KeyFingerprint(key) + if err != nil { + logger.Debugf("keeping unrecognised existing ssh key %q: %v", key, err) + keysToWrite = append(keysToWrite, key) + continue + } + sshKeys[fingerprint] = key + if comment != "" { + keyComments[comment] = fingerprint + } + } + for _, keyId := range keyIds { + // assume keyId may be a fingerprint + fingerprint := keyId + _, ok := sshKeys[keyId] + if !ok { + // keyId is a comment + fingerprint, ok = keyComments[keyId] + } + if !ok { + return errors.Errorf("cannot delete non existent key: %v", keyId) + } + delete(sshKeys, fingerprint) + } + for _, key := range sshKeys { + keysToWrite = append(keysToWrite, key) + } + if len(keysToWrite) == 0 { + return errors.Errorf("cannot delete all keys") + } + return writeAuthorisedKeys(user, keysToWrite) +} + +// ReplaceKeys writes the specified ssh keys to the authorized_keys file for user, +// replacing any that are already there. +// Returns an error if there is an issue with *any* of the supplied keys. +func ReplaceKeys(user string, newKeys ...string) error { + mutex.Lock() + defer mutex.Unlock() + + existingKeyData, err := readAuthorisedKeys(user) + if err != nil { + return err + } + var existingNonKeyLines []string + for _, line := range existingKeyData { + _, _, err := KeyFingerprint(line) + if err != nil { + existingNonKeyLines = append(existingNonKeyLines, line) + } + } + return writeAuthorisedKeys(user, append(existingNonKeyLines, newKeys...)) +} + +// ListKeys returns either the full keys or key comments from the authorized ssh keys file for user. +func ListKeys(user string, mode ListMode) ([]string, error) { + mutex.Lock() + defer mutex.Unlock() + keyData, err := readAuthorisedKeys(user) + if err != nil { + return nil, err + } + var keys []string + for _, key := range keyData { + fingerprint, comment, err := KeyFingerprint(key) + if err != nil { + // Only log a warning if the unrecognised key line is not a comment. + if key[0] != '#' { + logger.Warningf("ignoring invalid ssh key %q: %v", key, err) + } + continue + } + if mode == FullKeys { + keys = append(keys, key) + } else { + shortKey := fingerprint + if comment != "" { + shortKey += fmt.Sprintf(" (%s)", comment) + } + keys = append(keys, shortKey) + } + } + return keys, nil +} + +// Any ssh key added to the authorised keys list by Juju will have this prefix. +// This allows Juju to know which keys have been added externally and any such keys +// will always be retained by Juju when updating the authorised keys file. +const JujuCommentPrefix = "Juju:" + +func EnsureJujuComment(key string) string { + ak, err := ParseAuthorisedKey(key) + // Just return an invalid key as is. + if err != nil { + logger.Warningf("invalid Juju ssh key %s: %v", key, err) + return key + } + if ak.Comment == "" { + return key + " " + JujuCommentPrefix + "sshkey" + } else { + // Add the Juju prefix to the comment if necessary. + if !strings.HasPrefix(ak.Comment, JujuCommentPrefix) { + commentIndex := strings.LastIndex(key, ak.Comment) + return key[:commentIndex] + JujuCommentPrefix + ak.Comment + } + } + return key +} === added file 'src/github.com/juju/utils/ssh/authorisedkeys_test.go' --- src/github.com/juju/utils/ssh/authorisedkeys_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/authorisedkeys_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,275 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ssh_test + +import ( + "encoding/base64" + "strings" + + gitjujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/utils/ssh" + sshtesting "github.com/juju/utils/ssh/testing" +) + +type AuthorisedKeysKeysSuite struct { + gitjujutesting.FakeHomeSuite +} + +const ( + // We'll use the current user for ssh tests. + testSSHUser = "" +) + +var _ = gc.Suite(&AuthorisedKeysKeysSuite{}) + +func writeAuthKeysFile(c *gc.C, keys []string) { + err := ssh.WriteAuthorisedKeys(testSSHUser, keys) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *AuthorisedKeysKeysSuite) TestListKeys(c *gc.C) { + keys := []string{ + sshtesting.ValidKeyOne.Key + " user@host", + sshtesting.ValidKeyTwo.Key, + } + writeAuthKeysFile(c, keys) + keys, err := ssh.ListKeys(testSSHUser, ssh.Fingerprints) + c.Assert(err, jc.ErrorIsNil) + c.Assert( + keys, gc.DeepEquals, + []string{sshtesting.ValidKeyOne.Fingerprint + " (user@host)", sshtesting.ValidKeyTwo.Fingerprint}) +} + +func (s *AuthorisedKeysKeysSuite) TestListKeysFull(c *gc.C) { + keys := []string{ + sshtesting.ValidKeyOne.Key + " user@host", + sshtesting.ValidKeyTwo.Key + " anotheruser@host", + } + writeAuthKeysFile(c, keys) + actual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys) + c.Assert(err, jc.ErrorIsNil) + c.Assert(actual, gc.DeepEquals, keys) +} + +func (s *AuthorisedKeysKeysSuite) TestAddNewKey(c *gc.C) { + key := sshtesting.ValidKeyOne.Key + " user@host" + err := ssh.AddKeys(testSSHUser, key) + c.Assert(err, jc.ErrorIsNil) + actual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys) + c.Assert(err, jc.ErrorIsNil) + c.Assert(actual, gc.DeepEquals, []string{key}) +} + +func (s *AuthorisedKeysKeysSuite) TestAddMoreKeys(c *gc.C) { + firstKey := sshtesting.ValidKeyOne.Key + " user@host" + writeAuthKeysFile(c, []string{firstKey}) + moreKeys := []string{ + sshtesting.ValidKeyTwo.Key + " anotheruser@host", + sshtesting.ValidKeyThree.Key + " yetanotheruser@host", + } + err := ssh.AddKeys(testSSHUser, moreKeys...) + c.Assert(err, jc.ErrorIsNil) + actual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys) + c.Assert(err, jc.ErrorIsNil) + c.Assert(actual, gc.DeepEquals, append([]string{firstKey}, moreKeys...)) +} + +func (s *AuthorisedKeysKeysSuite) TestAddDuplicateKey(c *gc.C) { + key := sshtesting.ValidKeyOne.Key + " user@host" + err := ssh.AddKeys(testSSHUser, key) + c.Assert(err, jc.ErrorIsNil) + moreKeys := []string{ + sshtesting.ValidKeyOne.Key + " user@host", + sshtesting.ValidKeyTwo.Key + " yetanotheruser@host", + } + err = ssh.AddKeys(testSSHUser, moreKeys...) + c.Assert(err, gc.ErrorMatches, "cannot add duplicate ssh key: "+sshtesting.ValidKeyOne.Fingerprint) +} + +func (s *AuthorisedKeysKeysSuite) TestAddDuplicateComment(c *gc.C) { + key := sshtesting.ValidKeyOne.Key + " user@host" + err := ssh.AddKeys(testSSHUser, key) + c.Assert(err, jc.ErrorIsNil) + moreKeys := []string{ + sshtesting.ValidKeyTwo.Key + " user@host", + sshtesting.ValidKeyThree.Key + " yetanotheruser@host", + } + err = ssh.AddKeys(testSSHUser, moreKeys...) + c.Assert(err, gc.ErrorMatches, "cannot add ssh key with duplicate comment: user@host") +} + +func (s *AuthorisedKeysKeysSuite) TestAddKeyWithoutComment(c *gc.C) { + keys := []string{ + sshtesting.ValidKeyOne.Key + " user@host", + sshtesting.ValidKeyTwo.Key, + } + err := ssh.AddKeys(testSSHUser, keys...) + c.Assert(err, gc.ErrorMatches, "cannot add ssh key without comment") +} + +func (s *AuthorisedKeysKeysSuite) TestAddKeepsUnrecognised(c *gc.C) { + writeAuthKeysFile(c, []string{sshtesting.ValidKeyOne.Key, "invalid-key"}) + anotherKey := sshtesting.ValidKeyTwo.Key + " anotheruser@host" + err := ssh.AddKeys(testSSHUser, anotherKey) + c.Assert(err, jc.ErrorIsNil) + actual, err := ssh.ReadAuthorisedKeys(testSSHUser) + c.Assert(err, jc.ErrorIsNil) + c.Assert(actual, gc.DeepEquals, []string{sshtesting.ValidKeyOne.Key, "invalid-key", anotherKey}) +} + +func (s *AuthorisedKeysKeysSuite) TestDeleteKeys(c *gc.C) { + firstKey := sshtesting.ValidKeyOne.Key + " user@host" + anotherKey := sshtesting.ValidKeyTwo.Key + thirdKey := sshtesting.ValidKeyThree.Key + " anotheruser@host" + writeAuthKeysFile(c, []string{firstKey, anotherKey, thirdKey}) + err := ssh.DeleteKeys(testSSHUser, "user@host", sshtesting.ValidKeyTwo.Fingerprint) + c.Assert(err, jc.ErrorIsNil) + actual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys) + c.Assert(err, jc.ErrorIsNil) + c.Assert(actual, gc.DeepEquals, []string{thirdKey}) +} + +func (s *AuthorisedKeysKeysSuite) TestDeleteKeysKeepsUnrecognised(c *gc.C) { + firstKey := sshtesting.ValidKeyOne.Key + " user@host" + writeAuthKeysFile(c, []string{firstKey, sshtesting.ValidKeyTwo.Key, "invalid-key"}) + err := ssh.DeleteKeys(testSSHUser, "user@host") + c.Assert(err, jc.ErrorIsNil) + actual, err := ssh.ReadAuthorisedKeys(testSSHUser) + c.Assert(err, jc.ErrorIsNil) + c.Assert(actual, gc.DeepEquals, []string{"invalid-key", sshtesting.ValidKeyTwo.Key}) +} + +func (s *AuthorisedKeysKeysSuite) TestDeleteNonExistentComment(c *gc.C) { + firstKey := sshtesting.ValidKeyOne.Key + " user@host" + writeAuthKeysFile(c, []string{firstKey}) + err := ssh.DeleteKeys(testSSHUser, "someone@host") + c.Assert(err, gc.ErrorMatches, "cannot delete non existent key: someone@host") +} + +func (s *AuthorisedKeysKeysSuite) TestDeleteNonExistentFingerprint(c *gc.C) { + firstKey := sshtesting.ValidKeyOne.Key + " user@host" + writeAuthKeysFile(c, []string{firstKey}) + err := ssh.DeleteKeys(testSSHUser, sshtesting.ValidKeyTwo.Fingerprint) + c.Assert(err, gc.ErrorMatches, "cannot delete non existent key: "+sshtesting.ValidKeyTwo.Fingerprint) +} + +func (s *AuthorisedKeysKeysSuite) TestDeleteLastKeyForbidden(c *gc.C) { + keys := []string{ + sshtesting.ValidKeyOne.Key + " user@host", + sshtesting.ValidKeyTwo.Key + " yetanotheruser@host", + } + writeAuthKeysFile(c, keys) + err := ssh.DeleteKeys(testSSHUser, "user@host", sshtesting.ValidKeyTwo.Fingerprint) + c.Assert(err, gc.ErrorMatches, "cannot delete all keys") +} + +func (s *AuthorisedKeysKeysSuite) TestReplaceKeys(c *gc.C) { + firstKey := sshtesting.ValidKeyOne.Key + " user@host" + anotherKey := sshtesting.ValidKeyTwo.Key + writeAuthKeysFile(c, []string{firstKey, anotherKey}) + + // replaceKey is created without a comment so test that + // ReplaceKeys handles keys without comments. This is + // because existing keys may not have a comment and + // ReplaceKeys is used to rewrite the entire authorized_keys + // file when adding new keys. + replaceKey := sshtesting.ValidKeyThree.Key + err := ssh.ReplaceKeys(testSSHUser, replaceKey) + c.Assert(err, jc.ErrorIsNil) + actual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys) + c.Assert(err, jc.ErrorIsNil) + c.Assert(actual, gc.DeepEquals, []string{replaceKey}) +} + +func (s *AuthorisedKeysKeysSuite) TestReplaceKeepsUnrecognised(c *gc.C) { + writeAuthKeysFile(c, []string{sshtesting.ValidKeyOne.Key, "invalid-key"}) + anotherKey := sshtesting.ValidKeyTwo.Key + " anotheruser@host" + err := ssh.ReplaceKeys(testSSHUser, anotherKey) + c.Assert(err, jc.ErrorIsNil) + actual, err := ssh.ReadAuthorisedKeys(testSSHUser) + c.Assert(err, jc.ErrorIsNil) + c.Assert(actual, gc.DeepEquals, []string{"invalid-key", anotherKey}) +} + +func (s *AuthorisedKeysKeysSuite) TestEnsureJujuComment(c *gc.C) { + sshKey := sshtesting.ValidKeyOne.Key + for _, test := range []struct { + key string + expected string + }{ + {"invalid-key", "invalid-key"}, + {sshKey, sshKey + " Juju:sshkey"}, + {sshKey + " user@host", sshKey + " Juju:user@host"}, + {sshKey + " Juju:user@host", sshKey + " Juju:user@host"}, + {sshKey + " " + sshKey[3:5], sshKey + " Juju:" + sshKey[3:5]}, + } { + actual := ssh.EnsureJujuComment(test.key) + c.Assert(actual, gc.Equals, test.expected) + } +} + +func (s *AuthorisedKeysKeysSuite) TestSplitAuthorisedKeys(c *gc.C) { + sshKey := sshtesting.ValidKeyOne.Key + for _, test := range []struct { + keyData string + expected []string + }{ + {"", nil}, + {sshKey, []string{sshKey}}, + {sshKey + "\n", []string{sshKey}}, + {sshKey + "\n\n", []string{sshKey}}, + {sshKey + "\n#comment\n", []string{sshKey}}, + {sshKey + "\n #comment\n", []string{sshKey}}, + {sshKey + "\ninvalid\n", []string{sshKey, "invalid"}}, + } { + actual := ssh.SplitAuthorisedKeys(test.keyData) + c.Assert(actual, gc.DeepEquals, test.expected) + } +} + +func b64decode(c *gc.C, s string) []byte { + b, err := base64.StdEncoding.DecodeString(s) + c.Assert(err, jc.ErrorIsNil) + return b +} + +func (s *AuthorisedKeysKeysSuite) TestParseAuthorisedKey(c *gc.C) { + for i, test := range []struct { + line string + key []byte + comment string + err string + }{{ + line: sshtesting.ValidKeyOne.Key, + key: b64decode(c, strings.Fields(sshtesting.ValidKeyOne.Key)[1]), + }, { + line: sshtesting.ValidKeyOne.Key + " a b c", + key: b64decode(c, strings.Fields(sshtesting.ValidKeyOne.Key)[1]), + comment: "a b c", + }, { + line: "ssh-xsa blah", + err: "invalid authorized_key \"ssh-xsa blah\"", + }, { + // options should be skipped + line: `no-pty,principals="\"",command="\!" ` + sshtesting.ValidKeyOne.Key, + key: b64decode(c, strings.Fields(sshtesting.ValidKeyOne.Key)[1]), + }, { + line: "ssh-rsa", + err: "invalid authorized_key \"ssh-rsa\"", + }} { + c.Logf("test %d: %s", i, test.line) + ak, err := ssh.ParseAuthorisedKey(test.line) + if test.err != "" { + c.Assert(err, gc.ErrorMatches, test.err) + } else { + c.Assert(err, jc.ErrorIsNil) + c.Assert(ak, gc.Not(gc.IsNil)) + c.Assert(ak.Key, gc.DeepEquals, test.key) + c.Assert(ak.Comment, gc.Equals, test.comment) + } + } +} === added file 'src/github.com/juju/utils/ssh/clientkeys.go' --- src/github.com/juju/utils/ssh/clientkeys.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/clientkeys.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,183 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ssh + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/juju/utils" + "github.com/juju/utils/set" + "golang.org/x/crypto/ssh" +) + +const clientKeyName = "juju_id_rsa" + +// PublicKeySuffix is the file extension for public key files. +const PublicKeySuffix = ".pub" + +var ( + clientKeysMutex sync.Mutex + + // clientKeys is a cached map of private key filenames + // to ssh.Signers. The private keys are those loaded + // from the client key directory, passed to LoadClientKeys. + clientKeys map[string]ssh.Signer +) + +// LoadClientKeys loads the client SSH keys from the +// specified directory, and caches them as a process-wide +// global. If the directory does not exist, it is created; +// if the directory did not exist, or contains no keys, it +// is populated with a new key pair. +// +// If the directory exists, then all pairs of files where one +// has the same name as the other + ".pub" will be loaded as +// private/public key pairs. +// +// Calls to LoadClientKeys will clear the previously loaded +// keys, and recompute the keys. +func LoadClientKeys(dir string) error { + clientKeysMutex.Lock() + defer clientKeysMutex.Unlock() + dir, err := utils.NormalizePath(dir) + if err != nil { + return err + } + if _, err := os.Stat(dir); err == nil { + keys, err := loadClientKeys(dir) + if err != nil { + return err + } else if len(keys) > 0 { + clientKeys = keys + return nil + } + // Directory exists but contains no keys; + // fall through and create one. + } + if err := os.MkdirAll(dir, 0700); err != nil { + return err + } + keyfile, key, err := generateClientKey(dir) + if err != nil { + os.RemoveAll(dir) + return err + } + clientKeys = map[string]ssh.Signer{keyfile: key} + return nil +} + +// ClearClientKeys clears the client keys cached in memory. +func ClearClientKeys() { + clientKeysMutex.Lock() + defer clientKeysMutex.Unlock() + clientKeys = nil +} + +func generateClientKey(dir string) (keyfile string, key ssh.Signer, err error) { + private, public, err := GenerateKey("juju-client-key") + if err != nil { + return "", nil, err + } + clientPrivateKey, err := ssh.ParsePrivateKey([]byte(private)) + if err != nil { + return "", nil, err + } + privkeyFilename := filepath.Join(dir, clientKeyName) + if err = ioutil.WriteFile(privkeyFilename, []byte(private), 0600); err != nil { + return "", nil, err + } + if err := ioutil.WriteFile(privkeyFilename+PublicKeySuffix, []byte(public), 0600); err != nil { + os.Remove(privkeyFilename) + return "", nil, err + } + return privkeyFilename, clientPrivateKey, nil +} + +func loadClientKeys(dir string) (map[string]ssh.Signer, error) { + publicKeyFiles, err := publicKeyFiles(dir) + if err != nil { + return nil, err + } + keys := make(map[string]ssh.Signer, len(publicKeyFiles)) + for _, filename := range publicKeyFiles { + filename = filename[:len(filename)-len(PublicKeySuffix)] + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + keys[filename], err = ssh.ParsePrivateKey(data) + if err != nil { + return nil, fmt.Errorf("parsing key file %q: %v", filename, err) + } + } + return keys, nil +} + +// privateKeys returns the private keys loaded by LoadClientKeys. +func privateKeys() (signers []ssh.Signer) { + clientKeysMutex.Lock() + defer clientKeysMutex.Unlock() + for _, key := range clientKeys { + signers = append(signers, key) + } + return signers +} + +// PrivateKeyFiles returns the filenames of private SSH keys loaded by +// LoadClientKeys. +func PrivateKeyFiles() []string { + clientKeysMutex.Lock() + defer clientKeysMutex.Unlock() + keyfiles := make([]string, 0, len(clientKeys)) + for f := range clientKeys { + keyfiles = append(keyfiles, f) + } + return keyfiles +} + +// PublicKeyFiles returns the filenames of public SSH keys loaded by +// LoadClientKeys. +func PublicKeyFiles() []string { + privkeys := PrivateKeyFiles() + pubkeys := make([]string, len(privkeys)) + for i, priv := range privkeys { + pubkeys[i] = priv + PublicKeySuffix + } + return pubkeys +} + +// publicKeyFiles returns the filenames of public SSH keys +// in the specified directory (all the files ending with .pub). +func publicKeyFiles(clientKeysDir string) ([]string, error) { + if clientKeysDir == "" { + return nil, nil + } + var keys []string + dir, err := os.Open(clientKeysDir) + if err != nil { + return nil, err + } + names, err := dir.Readdirnames(-1) + dir.Close() + if err != nil { + return nil, err + } + candidates := set.NewStrings(names...) + for _, name := range names { + if !strings.HasSuffix(name, PublicKeySuffix) { + continue + } + // If the private key filename also exists, add the file. + priv := name[:len(name)-len(PublicKeySuffix)] + if candidates.Contains(priv) { + keys = append(keys, filepath.Join(dir.Name(), name)) + } + } + return keys, nil +} === added file 'src/github.com/juju/utils/ssh/clientkeys_test.go' --- src/github.com/juju/utils/ssh/clientkeys_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/clientkeys_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,105 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ssh_test + +import ( + "io/ioutil" + "os" + + gitjujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + "github.com/juju/utils/ssh" + gc "gopkg.in/check.v1" +) + +type ClientKeysSuite struct { + gitjujutesting.FakeHomeSuite +} + +var _ = gc.Suite(&ClientKeysSuite{}) + +func (s *ClientKeysSuite) SetUpTest(c *gc.C) { + s.FakeHomeSuite.SetUpTest(c) + s.AddCleanup(func(*gc.C) { ssh.ClearClientKeys() }) + generateKeyRestorer := overrideGenerateKey(c) + s.AddCleanup(func(*gc.C) { generateKeyRestorer.Restore() }) +} + +func checkFiles(c *gc.C, obtained, expected []string) { + var err error + for i, e := range expected { + expected[i], err = utils.NormalizePath(e) + c.Assert(err, jc.ErrorIsNil) + } + c.Assert(obtained, jc.SameContents, expected) +} + +func checkPublicKeyFiles(c *gc.C, expected ...string) { + keys := ssh.PublicKeyFiles() + checkFiles(c, keys, expected) +} + +func checkPrivateKeyFiles(c *gc.C, expected ...string) { + keys := ssh.PrivateKeyFiles() + checkFiles(c, keys, expected) +} + +func (s *ClientKeysSuite) TestPublicKeyFiles(c *gc.C) { + // LoadClientKeys will create the specified directory + // and populate it with a key pair. + err := ssh.LoadClientKeys("~/.juju/ssh") + c.Assert(err, jc.ErrorIsNil) + checkPublicKeyFiles(c, "~/.juju/ssh/juju_id_rsa.pub") + // All files ending with .pub in the client key dir get picked up. + priv, pub, err := ssh.GenerateKey("whatever") + c.Assert(err, jc.ErrorIsNil) + err = ioutil.WriteFile(gitjujutesting.HomePath(".juju", "ssh", "whatever.pub"), []byte(pub), 0600) + c.Assert(err, jc.ErrorIsNil) + err = ssh.LoadClientKeys("~/.juju/ssh") + c.Assert(err, jc.ErrorIsNil) + // The new public key won't be observed until the + // corresponding private key exists. + checkPublicKeyFiles(c, "~/.juju/ssh/juju_id_rsa.pub") + err = ioutil.WriteFile(gitjujutesting.HomePath(".juju", "ssh", "whatever"), []byte(priv), 0600) + c.Assert(err, jc.ErrorIsNil) + err = ssh.LoadClientKeys("~/.juju/ssh") + c.Assert(err, jc.ErrorIsNil) + checkPublicKeyFiles(c, "~/.juju/ssh/juju_id_rsa.pub", "~/.juju/ssh/whatever.pub") +} + +func (s *ClientKeysSuite) TestPrivateKeyFiles(c *gc.C) { + // Create/load client keys. They will be cached in memory: + // any files added to the directory will not be considered + // unless LoadClientKeys is called again. + err := ssh.LoadClientKeys("~/.juju/ssh") + c.Assert(err, jc.ErrorIsNil) + checkPrivateKeyFiles(c, "~/.juju/ssh/juju_id_rsa") + priv, pub, err := ssh.GenerateKey("whatever") + c.Assert(err, jc.ErrorIsNil) + err = ioutil.WriteFile(gitjujutesting.HomePath(".juju", "ssh", "whatever"), []byte(priv), 0600) + c.Assert(err, jc.ErrorIsNil) + err = ssh.LoadClientKeys("~/.juju/ssh") + c.Assert(err, jc.ErrorIsNil) + // The new private key won't be observed until the + // corresponding public key exists. + checkPrivateKeyFiles(c, "~/.juju/ssh/juju_id_rsa") + err = ioutil.WriteFile(gitjujutesting.HomePath(".juju", "ssh", "whatever.pub"), []byte(pub), 0600) + c.Assert(err, jc.ErrorIsNil) + // new keys won't be reported until we call LoadClientKeys again + checkPublicKeyFiles(c, "~/.juju/ssh/juju_id_rsa.pub") + checkPrivateKeyFiles(c, "~/.juju/ssh/juju_id_rsa") + err = ssh.LoadClientKeys("~/.juju/ssh") + c.Assert(err, jc.ErrorIsNil) + checkPublicKeyFiles(c, "~/.juju/ssh/juju_id_rsa.pub", "~/.juju/ssh/whatever.pub") + checkPrivateKeyFiles(c, "~/.juju/ssh/juju_id_rsa", "~/.juju/ssh/whatever") +} + +func (s *ClientKeysSuite) TestLoadClientKeysDirExists(c *gc.C) { + err := os.MkdirAll(gitjujutesting.HomePath(".juju", "ssh"), 0755) + c.Assert(err, jc.ErrorIsNil) + err = ssh.LoadClientKeys("~/.juju/ssh") + c.Assert(err, jc.ErrorIsNil) + checkPrivateKeyFiles(c, "~/.juju/ssh/juju_id_rsa") +} === added file 'src/github.com/juju/utils/ssh/export_test.go' --- src/github.com/juju/utils/ssh/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,15 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ssh + +var ( + ReadAuthorisedKeys = readAuthorisedKeys + WriteAuthorisedKeys = writeAuthorisedKeys + InitDefaultClient = initDefaultClient + DefaultIdentities = &defaultIdentities + SSHDial = &sshDial + RSAGenerateKey = &rsaGenerateKey + TestCopyReader = copyReader + TestNewCmd = newCmd +) === added file 'src/github.com/juju/utils/ssh/fakes_test.go' --- src/github.com/juju/utils/ssh/fakes_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/fakes_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,125 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ssh_test + +import ( + "bytes" + "io" + "io/ioutil" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/utils/ssh" +) + +type fakeClient struct { + calls []string + hostArg string + commandArg []string + optionsArg *ssh.Options + copyArgs []string + + err error + cmd *ssh.Cmd + impl fakeCommandImpl +} + +func (cl *fakeClient) checkCalls(c *gc.C, host string, command []string, options *ssh.Options, copyArgs []string, calls ...string) { + c.Check(cl.hostArg, gc.Equals, host) + c.Check(cl.commandArg, jc.DeepEquals, command) + c.Check(cl.optionsArg, gc.Equals, options) + c.Check(cl.copyArgs, jc.DeepEquals, copyArgs) + c.Check(cl.calls, jc.DeepEquals, calls) +} + +func (cl *fakeClient) Command(host string, command []string, options *ssh.Options) *ssh.Cmd { + cl.calls = append(cl.calls, "Command") + cl.hostArg = host + cl.commandArg = command + cl.optionsArg = options + cmd := cl.cmd + if cmd == nil { + cmd = ssh.TestNewCmd(&cl.impl) + } + return cmd +} + +func (cl *fakeClient) Copy(args []string, options *ssh.Options) error { + cl.calls = append(cl.calls, "Copy") + cl.copyArgs = args + cl.optionsArg = options + return cl.err +} + +type bufferWriter struct { + bytes.Buffer +} + +func (*bufferWriter) Close() error { + return nil +} + +type fakeCommandImpl struct { + calls []string + stdinArg io.Reader + stdoutArg io.Writer + stderrArg io.Writer + stdinData bufferWriter + + err error + stdinRaw io.Reader + stdoutRaw io.Writer + stderrRaw io.Writer + stdoutData bytes.Buffer + stderrData bytes.Buffer +} + +func (ci *fakeCommandImpl) checkCalls(c *gc.C, stdin io.Reader, stdout, stderr io.Writer, calls ...string) { + c.Check(ci.stdinArg, gc.Equals, stdin) + c.Check(ci.stdoutArg, gc.Equals, stdout) + c.Check(ci.stderrArg, gc.Equals, stderr) + c.Check(ci.calls, jc.DeepEquals, calls) +} + +func (ci *fakeCommandImpl) checkStdin(c *gc.C, data string) { + c.Check(ci.stdinData.String(), gc.Equals, data) +} + +func (ci *fakeCommandImpl) Start() error { + ci.calls = append(ci.calls, "Start") + return ci.err +} + +func (ci *fakeCommandImpl) Wait() error { + ci.calls = append(ci.calls, "Wait") + return ci.err +} + +func (ci *fakeCommandImpl) Kill() error { + ci.calls = append(ci.calls, "Kill") + return ci.err +} + +func (ci *fakeCommandImpl) SetStdio(stdin io.Reader, stdout, stderr io.Writer) { + ci.calls = append(ci.calls, "SetStdio") + ci.stdinArg = stdin + ci.stdoutArg = stdout + ci.stderrArg = stderr +} + +func (ci *fakeCommandImpl) StdinPipe() (io.WriteCloser, io.Reader, error) { + ci.calls = append(ci.calls, "StdinPipe") + return &ci.stdinData, ci.stdinRaw, ci.err +} + +func (ci *fakeCommandImpl) StdoutPipe() (io.ReadCloser, io.Writer, error) { + ci.calls = append(ci.calls, "StdoutPipe") + return ioutil.NopCloser(&ci.stdoutData), ci.stdoutRaw, ci.err +} + +func (ci *fakeCommandImpl) StderrPipe() (io.ReadCloser, io.Writer, error) { + ci.calls = append(ci.calls, "StderrPipe") + return ioutil.NopCloser(&ci.stderrData), ci.stderrRaw, ci.err +} === added file 'src/github.com/juju/utils/ssh/fingerprint.go' --- src/github.com/juju/utils/ssh/fingerprint.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/fingerprint.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,33 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ssh + +import ( + "bytes" + "crypto/md5" + "fmt" + + "github.com/juju/errors" +) + +// KeyFingerprint returns the fingerprint and comment for the specified key +// in authorized_key format. Fingerprints are generated according to RFC4716. +// See ttp://www.ietf.org/rfc/rfc4716.txt, section 4. +func KeyFingerprint(key string) (fingerprint, comment string, err error) { + ak, err := ParseAuthorisedKey(key) + if err != nil { + return "", "", errors.Errorf("generating key fingerprint: %v", err) + } + hash := md5.New() + hash.Write(ak.Key) + sum := hash.Sum(nil) + var buf bytes.Buffer + for i := 0; i < hash.Size(); i++ { + if i > 0 { + buf.WriteByte(':') + } + buf.WriteString(fmt.Sprintf("%02x", sum[i])) + } + return buf.String(), ak.Comment, nil +} === added file 'src/github.com/juju/utils/ssh/fingerprint_test.go' --- src/github.com/juju/utils/ssh/fingerprint_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/fingerprint_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,37 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ssh_test + +import ( + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/utils/ssh" + sshtesting "github.com/juju/utils/ssh/testing" +) + +type FingerprintSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&FingerprintSuite{}) + +func (s *FingerprintSuite) TestKeyFingerprint(c *gc.C) { + keys := []sshtesting.SSHKey{ + sshtesting.ValidKeyOne, + sshtesting.ValidKeyTwo, + sshtesting.ValidKeyThree, + } + for _, k := range keys { + fingerprint, _, err := ssh.KeyFingerprint(k.Key) + c.Assert(err, jc.ErrorIsNil) + c.Assert(fingerprint, gc.Equals, k.Fingerprint) + } +} + +func (s *FingerprintSuite) TestKeyFingerprintError(c *gc.C) { + _, _, err := ssh.KeyFingerprint("invalid key") + c.Assert(err, gc.ErrorMatches, `generating key fingerprint: invalid authorized_key "invalid key"`) +} === added file 'src/github.com/juju/utils/ssh/generate.go' --- src/github.com/juju/utils/ssh/generate.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/generate.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,65 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ssh + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "strings" + + "github.com/juju/errors" + "golang.org/x/crypto/ssh" +) + +// rsaGenerateKey allows for tests to patch out rsa key generation +var rsaGenerateKey = rsa.GenerateKey + +// KeyBits is used to determine the number of bits to use for the RSA keys +// created using the GenerateKey function. +var KeyBits = 2048 + +// GenerateKey makes a 2048 bit RSA no-passphrase SSH capable key. The bit +// size is actually controlled by the KeyBits var. The private key returned is +// encoded to ASCII using the PKCS1 encoding. The public key is suitable to +// be added into an authorized_keys file, and has the comment passed in as the +// comment part of the key. +func GenerateKey(comment string) (private, public string, err error) { + key, err := rsaGenerateKey(rand.Reader, KeyBits) + if err != nil { + return "", "", errors.Trace(err) + } + + identity := pem.EncodeToMemory( + &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(key), + }) + + public, err = PublicKey(identity, comment) + if err != nil { + return "", "", errors.Trace(err) + } + + return string(identity), public, nil +} + +// PublicKey returns the public key for any private key. The public key is +// suitable to be added into an authorized_keys file, and has the comment +// passed in as the comment part of the key. +func PublicKey(privateKey []byte, comment string) (string, error) { + signer, err := ssh.ParsePrivateKey(privateKey) + if err != nil { + return "", errors.Annotate(err, "failed to load key") + } + + auth_key := string(ssh.MarshalAuthorizedKey(signer.PublicKey())) + // Strip off the trailing new line so we can add a comment. + auth_key = strings.TrimSpace(auth_key) + public := fmt.Sprintf("%s %s\n", auth_key, comment) + + return public, nil +} === added file 'src/github.com/juju/utils/ssh/generate_test.go' --- src/github.com/juju/utils/ssh/generate_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/generate_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,53 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ssh_test + +import ( + "crypto/rsa" + "io" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/utils/ssh" +) + +type GenerateSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&GenerateSuite{}) + +var pregeneratedKey *rsa.PrivateKey + +// overrideGenerateKey patches out rsa.GenerateKey to create a single testing +// key which is saved and used between tests to save computation time. +func overrideGenerateKey(c *gc.C) testing.Restorer { + restorer := testing.PatchValue(ssh.RSAGenerateKey, func(random io.Reader, bits int) (*rsa.PrivateKey, error) { + if pregeneratedKey != nil { + return pregeneratedKey, nil + } + // Ignore requested bits and just use 512 bits for speed + key, err := rsa.GenerateKey(random, 512) + if err != nil { + return nil, err + } + key.Precompute() + pregeneratedKey = key + return key, nil + }) + return restorer +} + +func (s *GenerateSuite) TestGenerate(c *gc.C) { + defer overrideGenerateKey(c).Restore() + private, public, err := ssh.GenerateKey("some-comment") + + c.Check(err, jc.ErrorIsNil) + c.Check(private, jc.HasPrefix, "-----BEGIN RSA PRIVATE KEY-----\n") + c.Check(private, jc.HasSuffix, "-----END RSA PRIVATE KEY-----\n") + c.Check(public, jc.HasPrefix, "ssh-rsa ") + c.Check(public, jc.HasSuffix, " some-comment\n") +} === added file 'src/github.com/juju/utils/ssh/package_test.go' --- src/github.com/juju/utils/ssh/package_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ssh_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/github.com/juju/utils/ssh/run.go' --- src/github.com/juju/utils/ssh/run.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/run.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,165 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ssh + +import ( + "bytes" + "os/exec" + "strings" + "syscall" + "time" + + "github.com/juju/errors" + "github.com/juju/utils/clock" + utilexec "github.com/juju/utils/exec" +) + +// ExecParams are used for the parameters for ExecuteCommandOnMachine. +type ExecParams struct { + IdentityFile string + Host string + Command string + Timeout time.Duration +} + +// StartCommandOnMachine executes the command on the given host. The +// command is run in a Bash shell over an SSH connection. All output +// is captured. A RunningCmd is returned that may be used to wait +// for the command to finish running. +func StartCommandOnMachine(params ExecParams) (*RunningCmd, error) { + // execute bash accepting commands on stdin + if params.Host == "" { + return nil, errors.Errorf("missing host address") + } + logger.Debugf("execute on %s", params.Host) + + var options Options + if params.IdentityFile != "" { + options.SetIdentities(params.IdentityFile) + } + command := Command(params.Host, []string{"/bin/bash", "-s"}, &options) + + // Run the command. + running := &RunningCmd{ + SSHCmd: command, + } + command.Stdout = &running.Stdout + command.Stderr = &running.Stderr + command.Stdin = strings.NewReader(params.Command + "\n") + if err := command.Start(); err != nil { + return nil, errors.Trace(err) + } + + return running, nil +} + +// RunningCmd represents a command that has been started. +type RunningCmd struct { + // SSHCmd is the command the was started. + SSHCmd *Cmd + + // Stdout and Stderr are the output streams the command is using. + Stdout bytes.Buffer + Stderr bytes.Buffer +} + +// Wait waits for the command to complete and returns the result. +func (cmd *RunningCmd) Wait() (result utilexec.ExecResponse, _ error) { + defer func() { + // Gather as much as we have from stdout and stderr. + result.Stdout = cmd.Stdout.Bytes() + result.Stderr = cmd.Stderr.Bytes() + }() + + err := cmd.SSHCmd.Wait() + logger.Debugf("command.Wait finished (err: %v)", err) + code, err := getExitCode(err) + if err != nil { + return result, errors.Trace(err) + } + + result.Code = code + return result, nil +} + +// TODO(ericsnow) Add RunningCmd.WaitAbortable(abortChan <-chan error) ... +// based on WaitWithTimeout and update WaitWithTimeout to use it. We +// could make it WaitAbortable(abortChans ...<-chan error), which would +// require using reflect.Select(). Then that could simply replace Wait(). +// It may make more sense, however, to have a helper function: +// Wait(cmd T, abortChans ...<-chan error) ... + +// Cancelled is an error indicating that a command timed out. +var Cancelled = errors.New("command timed out") + +// WaitWithCancel waits for the command to complete and returns the result. If +// cancel is closed before the result was returned, then it takes longer than +// the provided timeout then Cancelled is returned. +func (cmd *RunningCmd) WaitWithCancel(cancel <-chan struct{}) (utilexec.ExecResponse, error) { + var result utilexec.ExecResponse + + done := make(chan error, 1) + go func() { + defer close(done) + waitResult, err := cmd.Wait() + result = waitResult + done <- err + }() + + select { + case err := <-done: + return result, errors.Trace(err) + case <-cancel: + logger.Infof("killing the command due to cancellation") + cmd.SSHCmd.Kill() + + <-done // Ensure that the original cmd.Wait() call completed. + cmd.SSHCmd.Wait() // Finalize cmd.SSHCmd, if necessary. + return result, Cancelled + } +} + +func getExitCode(err error) (int, error) { + if err == nil { + return 0, nil + } + err = errors.Cause(err) + if ee, ok := err.(*exec.ExitError); ok { + raw := ee.ProcessState.Sys() + status, ok := raw.(syscall.WaitStatus) + if !ok { + logger.Errorf("unexpected type %T from ProcessState.Sys()", raw) + } else if status.Exited() { + // A non-zero return code isn't considered an error here. + return status.ExitStatus(), nil + } + } + return -1, err +} + +// ExecuteCommandOnMachine will execute the command passed through on +// the host specified. This is done using ssh, and passing the commands +// through /bin/bash. If the command is not finished within the timeout +// specified, an error is returned. Any output captured during that time +// is also returned in the remote response. +func ExecuteCommandOnMachine(args ExecParams) (utilexec.ExecResponse, error) { + var result utilexec.ExecResponse + + cmd, err := StartCommandOnMachine(args) + if err != nil { + return result, errors.Trace(err) + } + + cancel := make(chan struct{}) + go func() { + <-clock.WallClock.After(args.Timeout) + close(cancel) + }() + result, err = cmd.WaitWithCancel(cancel) + if err != nil { + return result, errors.Trace(err) + } + + return result, nil +} === added file 'src/github.com/juju/utils/ssh/run_test.go' --- src/github.com/juju/utils/ssh/run_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/run_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,132 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ssh_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "runtime" + "time" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/utils/ssh" +) + +const ( + shortWait = 50 * time.Millisecond + longWait = 10 * time.Second +) + +type ExecuteSSHCommandSuite struct { + testing.IsolationSuite + originalPath string + testbin string + fakessh string +} + +var _ = gc.Suite(&ExecuteSSHCommandSuite{}) + +func (s *ExecuteSSHCommandSuite) SetUpSuite(c *gc.C) { + s.originalPath = os.Getenv("PATH") + s.IsolationSuite.SetUpSuite(c) +} + +func (s *ExecuteSSHCommandSuite) SetUpTest(c *gc.C) { + if runtime.GOOS == "windows" { + c.Skip("issue 1403084: Tests use OpenSSH only") + } + s.IsolationSuite.SetUpTest(c) + err := os.Setenv("PATH", s.originalPath) + c.Assert(err, jc.ErrorIsNil) + s.testbin = c.MkDir() + s.fakessh = filepath.Join(s.testbin, "ssh") + s.PatchEnvPathPrepend(s.testbin) +} + +func (s *ExecuteSSHCommandSuite) fakeSSH(c *gc.C, cmd string) { + err := ioutil.WriteFile(s.fakessh, []byte(cmd), 0755) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *ExecuteSSHCommandSuite) TestCaptureOutput(c *gc.C) { + s.fakeSSH(c, echoSSH) + + response, err := ssh.ExecuteCommandOnMachine(ssh.ExecParams{ + Host: "hostname", + Command: "sudo apt-get update\nsudo apt-get upgrade", + Timeout: longWait, + }) + + c.Assert(err, jc.ErrorIsNil) + c.Assert(response.Code, gc.Equals, 0) + c.Assert(string(response.Stdout), gc.Equals, "sudo apt-get update\nsudo apt-get upgrade\n") + c.Assert(string(response.Stderr), gc.Equals, + "-o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 hostname /bin/bash -s\n") +} + +func (s *ExecuteSSHCommandSuite) TestIdentityFile(c *gc.C) { + s.fakeSSH(c, echoSSH) + + response, err := ssh.ExecuteCommandOnMachine(ssh.ExecParams{ + IdentityFile: "identity-file", + Host: "hostname", + Timeout: longWait, + }) + + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(response.Stderr), jc.Contains, " -i identity-file ") +} + +func (s *ExecuteSSHCommandSuite) TestTimoutCaptureOutput(c *gc.C) { + s.fakeSSH(c, slowSSH) + + response, err := ssh.ExecuteCommandOnMachine(ssh.ExecParams{ + IdentityFile: "identity-file", + Host: "hostname", + Command: "ignored", + Timeout: shortWait, + }) + + c.Check(err, gc.ErrorMatches, "command timed out") + c.Assert(response.Code, gc.Equals, 0) + c.Assert(string(response.Stdout), gc.Equals, "stdout\n") + c.Assert(string(response.Stderr), gc.Equals, "stderr\n") +} + +func (s *ExecuteSSHCommandSuite) TestCapturesReturnCode(c *gc.C) { + s.fakeSSH(c, passthroughSSH) + + response, err := ssh.ExecuteCommandOnMachine(ssh.ExecParams{ + IdentityFile: "identity-file", + Host: "hostname", + Command: "echo stdout; exit 42", + Timeout: longWait, + }) + + c.Check(err, jc.ErrorIsNil) + c.Assert(response.Code, gc.Equals, 42) + c.Assert(string(response.Stdout), gc.Equals, "stdout\n") + c.Assert(string(response.Stderr), gc.Equals, "") +} + +// echoSSH outputs the command args to stderr, and copies stdin to stdout +var echoSSH = `#!/bin/bash +# Write the args to stderr +echo "$*" >&2 +cat /dev/stdin +` + +// slowSSH sleeps for a while after outputting some text to stdout and stderr +var slowSSH = `#!/bin/bash +echo "stderr" >&2 +echo "stdout" +sleep 5s +` + +// passthroughSSH creates an ssh that executes stdin. +var passthroughSSH = `#!/bin/bash -s` === added file 'src/github.com/juju/utils/ssh/ssh.go' --- src/github.com/juju/utils/ssh/ssh.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/ssh.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,269 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package ssh contains utilities for dealing with SSH connections, +// key management, and so on. All SSH-based command executions in +// Juju should use the Command/ScpCommand functions in this package. +// +package ssh + +import ( + "bytes" + "errors" + "io" + "os/exec" + "syscall" + + "github.com/juju/cmd" + je "github.com/juju/errors" +) + +// Options is a client-implementation independent SSH options set. +type Options struct { + // proxyCommand specifies the command to + // execute to proxy SSH traffic through. + proxyCommand []string + // ssh server port; zero means use the default (22) + port int + // no PTY forced by default + allocatePTY bool + // password authentication is disallowed by default + passwordAuthAllowed bool + // identities is a sequence of paths to private key/identity files + // to use when attempting to login. A client implementaton may attempt + // with additional identities, but must give preference to these + identities []string + // knownHostsFile is a path to a file in which to save the host's + // fingerprint. + knownHostsFile string +} + +// SetProxyCommand sets a command to execute to proxy traffic through. +func (o *Options) SetProxyCommand(command ...string) { + o.proxyCommand = append([]string{}, command...) +} + +// SetPort sets the SSH server port to connect to. +func (o *Options) SetPort(port int) { + o.port = port +} + +// EnablePTY forces the allocation of a pseudo-TTY. +// +// Forcing a pseudo-TTY is required, for example, for sudo +// prompts on the target host. +func (o *Options) EnablePTY() { + o.allocatePTY = true +} + +// SetKnownHostsFile sets the host's fingerprint to be saved in the given file. +// +// Host fingerprints are saved in ~/.ssh/known_hosts by default. +func (o *Options) SetKnownHostsFile(file string) { + o.knownHostsFile = file +} + +// AllowPasswordAuthentication allows the SSH +// client to prompt the user for a password. +// +// Password authentication is disallowed by default. +func (o *Options) AllowPasswordAuthentication() { + o.passwordAuthAllowed = true +} + +// SetIdentities sets a sequence of paths to private key/identity files +// to use when attempting login. Client implementations may attempt to +// use additional identities, but must give preference to the ones +// specified here. +func (o *Options) SetIdentities(identityFiles ...string) { + o.identities = append([]string{}, identityFiles...) +} + +// Client is an interface for SSH clients to implement +type Client interface { + // Command returns a Command for executing a command + // on the specified host. Each Command is executed + // within its own SSH session. + // + // Host is specified in the format [user@]host. + Command(host string, command []string, options *Options) *Cmd + + // Copy copies file(s) between local and remote host(s). + // Paths are specified in the scp format, [[user@]host:]path. If + // any extra arguments are specified in extraArgs, they are passed + // verbatim. + Copy(args []string, options *Options) error +} + +// Cmd represents a command to be (or being) executed +// on a remote host. +type Cmd struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + impl command +} + +func newCmd(impl command) *Cmd { + return &Cmd{impl: impl} +} + +// CombinedOutput runs the command, and returns the +// combined stdout/stderr output and result of +// executing the command. +func (c *Cmd) CombinedOutput() ([]byte, error) { + if c.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if c.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + var b bytes.Buffer + c.Stdout = &b + c.Stderr = &b + err := c.Run() + return b.Bytes(), err +} + +// Output runs the command, and returns the stdout +// output and result of executing the command. +func (c *Cmd) Output() ([]byte, error) { + if c.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + var b bytes.Buffer + c.Stdout = &b + err := c.Run() + return b.Bytes(), err +} + +// Run runs the command, and returns the result as an error. +func (c *Cmd) Run() error { + if err := c.Start(); err != nil { + return err + } + err := c.Wait() + if exitError, ok := err.(*exec.ExitError); ok && exitError != nil { + status := exitError.ProcessState.Sys().(syscall.WaitStatus) + if status.Exited() { + return cmd.NewRcPassthroughError(status.ExitStatus()) + } + } + return err +} + +// Start starts the command running, but does not wait for +// it to complete. If the command could not be started, an +// error is returned. +func (c *Cmd) Start() error { + c.impl.SetStdio(c.Stdin, c.Stdout, c.Stderr) + return c.impl.Start() +} + +// Wait waits for the started command to complete, +// and returns the result as an error. +func (c *Cmd) Wait() error { + return c.impl.Wait() +} + +// Kill kills the started command. +func (c *Cmd) Kill() error { + return c.impl.Kill() +} + +// StdinPipe creates a pipe and connects it to +// the command's stdin. The read end of the pipe +// is assigned to c.Stdin. +func (c *Cmd) StdinPipe() (io.WriteCloser, error) { + wc, r, err := c.impl.StdinPipe() + if err != nil { + return nil, err + } + c.Stdin = r + return wc, nil +} + +// StdoutPipe creates a pipe and connects it to +// the command's stdout. The write end of the pipe +// is assigned to c.Stdout. +func (c *Cmd) StdoutPipe() (io.ReadCloser, error) { + rc, w, err := c.impl.StdoutPipe() + if err != nil { + return nil, err + } + c.Stdout = w + return rc, nil +} + +// StderrPipe creates a pipe and connects it to +// the command's stderr. The write end of the pipe +// is assigned to c.Stderr. +func (c *Cmd) StderrPipe() (io.ReadCloser, error) { + rc, w, err := c.impl.StderrPipe() + if err != nil { + return nil, err + } + c.Stderr = w + return rc, nil +} + +// command is an implementation-specific representation of a +// command prepared to execute against a specific host. +type command interface { + Start() error + Wait() error + Kill() error + SetStdio(stdin io.Reader, stdout, stderr io.Writer) + StdinPipe() (io.WriteCloser, io.Reader, error) + StdoutPipe() (io.ReadCloser, io.Writer, error) + StderrPipe() (io.ReadCloser, io.Writer, error) +} + +// DefaultClient is the default SSH client for the process. +// +// If the OpenSSH client is found in $PATH, then it will be +// used for DefaultClient; otherwise, DefaultClient will use +// an embedded client based on go.crypto/ssh. +var DefaultClient Client + +// chosenClient holds the type of SSH client created for +// DefaultClient, so that we can log it in Command or Copy. +var chosenClient string + +func init() { + initDefaultClient() +} + +func initDefaultClient() { + if client, err := NewOpenSSHClient(); err == nil { + DefaultClient = client + chosenClient = "OpenSSH" + } else if client, err := NewGoCryptoClient(); err == nil { + DefaultClient = client + chosenClient = "go.crypto (embedded)" + } +} + +// Command is a short-cut for DefaultClient.Command. +func Command(host string, command []string, options *Options) *Cmd { + logger.Debugf("using %s ssh client", chosenClient) + return DefaultClient.Command(host, command, options) +} + +// Copy is a short-cut for DefaultClient.Copy. +func Copy(args []string, options *Options) error { + logger.Debugf("using %s ssh client", chosenClient) + return DefaultClient.Copy(args, options) +} + +// CopyReader sends the reader's data to a file on the remote host over SSH. +func CopyReader(host, filename string, r io.Reader, options *Options) error { + logger.Debugf("using %s ssh client", chosenClient) + return copyReader(DefaultClient, host, filename, r, options) +} + +func copyReader(client Client, host, filename string, r io.Reader, options *Options) error { + cmd := client.Command(host, []string{"cat - > " + filename}, options) + cmd.Stdin = r + return je.Trace(cmd.Run()) +} === added file 'src/github.com/juju/utils/ssh/ssh_gocrypto.go' --- src/github.com/juju/utils/ssh/ssh_gocrypto.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/ssh_gocrypto.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,243 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ssh + +import ( + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "os/user" + "strconv" + "strings" + + "github.com/juju/errors" + "github.com/juju/utils" + "golang.org/x/crypto/ssh" +) + +const sshDefaultPort = 22 + +// GoCryptoClient is an implementation of Client that +// uses the embedded go.crypto/ssh SSH client. +// +// GoCryptoClient is intentionally limited in the +// functionality that it enables, as it is currently +// intended to be used only for non-interactive command +// execution. +type GoCryptoClient struct { + signers []ssh.Signer +} + +// NewGoCryptoClient creates a new GoCryptoClient. +// +// If no signers are specified, NewGoCryptoClient will +// use the private key generated by LoadClientKeys. +func NewGoCryptoClient(signers ...ssh.Signer) (*GoCryptoClient, error) { + return &GoCryptoClient{signers: signers}, nil +} + +// Command implements Client.Command. +func (c *GoCryptoClient) Command(host string, command []string, options *Options) *Cmd { + shellCommand := utils.CommandString(command...) + signers := c.signers + if len(signers) == 0 { + signers = privateKeys() + } + user, host := splitUserHost(host) + port := sshDefaultPort + var proxyCommand []string + if options != nil { + if options.port != 0 { + port = options.port + } + proxyCommand = options.proxyCommand + } + logger.Tracef(`running (equivalent of): ssh "%s@%s" -p %d '%s'`, user, host, port, shellCommand) + return &Cmd{impl: &goCryptoCommand{ + signers: signers, + user: user, + addr: net.JoinHostPort(host, strconv.Itoa(port)), + command: shellCommand, + proxyCommand: proxyCommand, + }} +} + +// Copy implements Client.Copy. +// +// Copy is currently unimplemented, and will always return an error. +func (c *GoCryptoClient) Copy(args []string, options *Options) error { + return errors.Errorf("scp command is not implemented (OpenSSH scp not available in PATH)") +} + +type goCryptoCommand struct { + signers []ssh.Signer + user string + addr string + command string + proxyCommand []string + stdin io.Reader + stdout io.Writer + stderr io.Writer + client *ssh.Client + sess *ssh.Session +} + +var sshDial = ssh.Dial + +var sshDialWithProxy = func(addr string, proxyCommand []string, config *ssh.ClientConfig) (*ssh.Client, error) { + if len(proxyCommand) == 0 { + return sshDial("tcp", addr, config) + } + // User has specified a proxy. Create a pipe and + // redirect the proxy command's stdin/stdout to it. + host, port, err := net.SplitHostPort(addr) + if err != nil { + host = addr + } + for i, arg := range proxyCommand { + arg = strings.Replace(arg, "%h", host, -1) + if port != "" { + arg = strings.Replace(arg, "%p", port, -1) + } + arg = strings.Replace(arg, "%r", config.User, -1) + proxyCommand[i] = arg + } + client, server := net.Pipe() + logger.Tracef(`executing proxy command %q`, proxyCommand) + cmd := exec.Command(proxyCommand[0], proxyCommand[1:]...) + cmd.Stdin = server + cmd.Stdout = server + cmd.Stderr = os.Stderr + if err := cmd.Start(); err != nil { + return nil, err + } + conn, chans, reqs, err := ssh.NewClientConn(client, addr, config) + if err != nil { + return nil, err + } + return ssh.NewClient(conn, chans, reqs), nil +} + +func (c *goCryptoCommand) ensureSession() (*ssh.Session, error) { + if c.sess != nil { + return c.sess, nil + } + if len(c.signers) == 0 { + return nil, errors.Errorf("no private keys available") + } + if c.user == "" { + currentUser, err := user.Current() + if err != nil { + return nil, errors.Errorf("getting current user: %v", err) + } + c.user = currentUser.Username + } + config := &ssh.ClientConfig{ + User: c.user, + Auth: []ssh.AuthMethod{ + ssh.PublicKeysCallback(func() ([]ssh.Signer, error) { + return c.signers, nil + }), + }, + } + client, err := sshDialWithProxy(c.addr, c.proxyCommand, config) + if err != nil { + return nil, err + } + sess, err := client.NewSession() + if err != nil { + client.Close() + return nil, err + } + c.client = client + c.sess = sess + c.sess.Stdin = c.stdin + c.sess.Stdout = c.stdout + c.sess.Stderr = c.stderr + return sess, nil +} + +func (c *goCryptoCommand) Start() error { + sess, err := c.ensureSession() + if err != nil { + return err + } + if c.command == "" { + return sess.Shell() + } + return sess.Start(c.command) +} + +func (c *goCryptoCommand) Close() error { + if c.sess == nil { + return nil + } + err0 := c.sess.Close() + err1 := c.client.Close() + if err0 == nil { + err0 = err1 + } + c.sess = nil + c.client = nil + return err0 +} + +func (c *goCryptoCommand) Wait() error { + if c.sess == nil { + return errors.Errorf("command has not been started") + } + err := c.sess.Wait() + c.Close() + return err +} + +func (c *goCryptoCommand) Kill() error { + if c.sess == nil { + return errors.Errorf("command has not been started") + } + return c.sess.Signal(ssh.SIGKILL) +} + +func (c *goCryptoCommand) SetStdio(stdin io.Reader, stdout, stderr io.Writer) { + c.stdin = stdin + c.stdout = stdout + c.stderr = stderr +} + +func (c *goCryptoCommand) StdinPipe() (io.WriteCloser, io.Reader, error) { + sess, err := c.ensureSession() + if err != nil { + return nil, nil, err + } + wc, err := sess.StdinPipe() + return wc, sess.Stdin, err +} + +func (c *goCryptoCommand) StdoutPipe() (io.ReadCloser, io.Writer, error) { + sess, err := c.ensureSession() + if err != nil { + return nil, nil, err + } + wc, err := sess.StdoutPipe() + return ioutil.NopCloser(wc), sess.Stdout, err +} + +func (c *goCryptoCommand) StderrPipe() (io.ReadCloser, io.Writer, error) { + sess, err := c.ensureSession() + if err != nil { + return nil, nil, err + } + wc, err := sess.StderrPipe() + return ioutil.NopCloser(wc), sess.Stderr, err +} + +func splitUserHost(s string) (user, host string) { + userHost := strings.SplitN(s, "@", 2) + if len(userHost) == 2 { + return userHost[0], userHost[1] + } + return "", userHost[0] +} === added file 'src/github.com/juju/utils/ssh/ssh_gocrypto_test.go' --- src/github.com/juju/utils/ssh/ssh_gocrypto_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/ssh_gocrypto_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,197 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ssh_test + +import ( + "encoding/binary" + "errors" + "fmt" + "io/ioutil" + "net" + "os/exec" + "path/filepath" + "sync" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + cryptossh "golang.org/x/crypto/ssh" + gc "gopkg.in/check.v1" + + "github.com/juju/utils/ssh" +) + +var ( + testCommand = []string{"echo", "$abc"} + testCommandFlat = `echo "\$abc"` +) + +type sshServer struct { + cfg *cryptossh.ServerConfig + listener net.Listener + client *cryptossh.Client +} + +func newServer(c *gc.C) *sshServer { + private, _, err := ssh.GenerateKey("test-server") + c.Assert(err, jc.ErrorIsNil) + key, err := cryptossh.ParsePrivateKey([]byte(private)) + c.Assert(err, jc.ErrorIsNil) + server := &sshServer{ + cfg: &cryptossh.ServerConfig{}, + } + server.cfg.AddHostKey(key) + server.listener, err = net.Listen("tcp", "127.0.0.1:0") + c.Assert(err, jc.ErrorIsNil) + return server +} + +func (s *sshServer) run(c *gc.C) { + netconn, err := s.listener.Accept() + c.Assert(err, jc.ErrorIsNil) + defer netconn.Close() + conn, chans, reqs, err := cryptossh.NewServerConn(netconn, s.cfg) + c.Assert(err, jc.ErrorIsNil) + s.client = cryptossh.NewClient(conn, chans, reqs) + var wg sync.WaitGroup + defer wg.Wait() + sessionChannels := s.client.HandleChannelOpen("session") + c.Assert(sessionChannels, gc.NotNil) + for newChannel := range sessionChannels { + c.Assert(newChannel.ChannelType(), gc.Equals, "session") + channel, reqs, err := newChannel.Accept() + c.Assert(err, jc.ErrorIsNil) + wg.Add(1) + go func() { + defer wg.Done() + defer channel.Close() + for req := range reqs { + switch req.Type { + case "exec": + c.Assert(req.WantReply, jc.IsTrue) + n := binary.BigEndian.Uint32(req.Payload[:4]) + command := string(req.Payload[4 : n+4]) + c.Assert(command, gc.Equals, testCommandFlat) + req.Reply(true, nil) + channel.Write([]byte("abc value\n")) + _, err := channel.SendRequest("exit-status", false, cryptossh.Marshal(&struct{ n uint32 }{0})) + c.Check(err, jc.ErrorIsNil) + return + default: + c.Errorf("Unexpected request type: %v", req.Type) + return + } + } + }() + } +} + +type SSHGoCryptoCommandSuite struct { + testing.IsolationSuite + client ssh.Client +} + +var _ = gc.Suite(&SSHGoCryptoCommandSuite{}) + +func (s *SSHGoCryptoCommandSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + generateKeyRestorer := overrideGenerateKey(c) + s.AddCleanup(func(*gc.C) { generateKeyRestorer.Restore() }) + client, err := ssh.NewGoCryptoClient() + c.Assert(err, jc.ErrorIsNil) + s.client = client +} + +func (s *SSHGoCryptoCommandSuite) TestNewGoCryptoClient(c *gc.C) { + _, err := ssh.NewGoCryptoClient() + c.Assert(err, jc.ErrorIsNil) + private, _, err := ssh.GenerateKey("test-client") + c.Assert(err, jc.ErrorIsNil) + key, err := cryptossh.ParsePrivateKey([]byte(private)) + c.Assert(err, jc.ErrorIsNil) + _, err = ssh.NewGoCryptoClient(key) + c.Assert(err, jc.ErrorIsNil) +} + +func (s *SSHGoCryptoCommandSuite) TestClientNoKeys(c *gc.C) { + client, err := ssh.NewGoCryptoClient() + c.Assert(err, jc.ErrorIsNil) + cmd := client.Command("0.1.2.3", []string{"echo", "123"}, nil) + _, err = cmd.Output() + c.Assert(err, gc.ErrorMatches, "no private keys available") + defer ssh.ClearClientKeys() + err = ssh.LoadClientKeys(c.MkDir()) + c.Assert(err, jc.ErrorIsNil) + + s.PatchValue(ssh.SSHDial, func(network, address string, cfg *cryptossh.ClientConfig) (*cryptossh.Client, error) { + return nil, errors.New("ssh.Dial failed") + }) + cmd = client.Command("0.1.2.3", []string{"echo", "123"}, nil) + _, err = cmd.Output() + // error message differs based on whether using cgo or not + c.Assert(err, gc.ErrorMatches, "ssh.Dial failed") +} + +func (s *SSHGoCryptoCommandSuite) TestCommand(c *gc.C) { + private, _, err := ssh.GenerateKey("test-server") + c.Assert(err, jc.ErrorIsNil) + key, err := cryptossh.ParsePrivateKey([]byte(private)) + client, err := ssh.NewGoCryptoClient(key) + c.Assert(err, jc.ErrorIsNil) + server := newServer(c) + var opts ssh.Options + opts.SetPort(server.listener.Addr().(*net.TCPAddr).Port) + cmd := client.Command("127.0.0.1", testCommand, &opts) + checkedKey := false + server.cfg.PublicKeyCallback = func(conn cryptossh.ConnMetadata, pubkey cryptossh.PublicKey) (*cryptossh.Permissions, error) { + c.Check(pubkey, gc.DeepEquals, key.PublicKey()) + checkedKey = true + return nil, nil + } + go server.run(c) + out, err := cmd.Output() + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(out), gc.Equals, "abc value\n") + c.Assert(checkedKey, jc.IsTrue) +} + +func (s *SSHGoCryptoCommandSuite) TestCopy(c *gc.C) { + client, err := ssh.NewGoCryptoClient() + c.Assert(err, jc.ErrorIsNil) + err = client.Copy([]string{"0.1.2.3:b", c.MkDir()}, nil) + c.Assert(err, gc.ErrorMatches, `scp command is not implemented \(OpenSSH scp not available in PATH\)`) +} + +func (s *SSHGoCryptoCommandSuite) TestProxyCommand(c *gc.C) { + realNetcat, err := exec.LookPath("nc") + if err != nil { + c.Skip("skipping test, couldn't find netcat: %v") + return + } + netcat := filepath.Join(c.MkDir(), "nc") + err = ioutil.WriteFile(netcat, []byte("#!/bin/sh\necho $0 \"$@\" > $0.args && exec "+realNetcat+" \"$@\""), 0755) + c.Assert(err, jc.ErrorIsNil) + + private, _, err := ssh.GenerateKey("test-server") + c.Assert(err, jc.ErrorIsNil) + key, err := cryptossh.ParsePrivateKey([]byte(private)) + client, err := ssh.NewGoCryptoClient(key) + c.Assert(err, jc.ErrorIsNil) + server := newServer(c) + var opts ssh.Options + port := server.listener.Addr().(*net.TCPAddr).Port + opts.SetProxyCommand(netcat, "-q0", "%h", "%p") + opts.SetPort(port) + cmd := client.Command("127.0.0.1", testCommand, &opts) + server.cfg.PublicKeyCallback = func(_ cryptossh.ConnMetadata, pubkey cryptossh.PublicKey) (*cryptossh.Permissions, error) { + return nil, nil + } + go server.run(c) + out, err := cmd.Output() + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(out), gc.Equals, "abc value\n") + // Ensure the proxy command was executed with the appropriate arguments. + data, err := ioutil.ReadFile(netcat + ".args") + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(data), gc.Equals, fmt.Sprintf("%s -q0 127.0.0.1 %v\n", netcat, port)) +} === added file 'src/github.com/juju/utils/ssh/ssh_openssh.go' --- src/github.com/juju/utils/ssh/ssh_openssh.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/ssh_openssh.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,197 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package ssh + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "strings" + + "github.com/juju/errors" + "github.com/juju/utils" +) + +var opensshCommonOptions = []string{"-o", "StrictHostKeyChecking no"} + +// default identities will not be attempted if +// -i is specified and they are not explcitly +// included. +var defaultIdentities = []string{ + "~/.ssh/identity", + "~/.ssh/id_rsa", + "~/.ssh/id_dsa", + "~/.ssh/id_ecdsa", +} + +type opensshCommandKind int + +const ( + sshKind opensshCommandKind = iota + scpKind +) + +// sshpassWrap wraps the command/args with sshpass if it is found in $PATH +// and the SSHPASS environment variable is set. Otherwise, the original +// command/args are returned. +func sshpassWrap(cmd string, args []string) (string, []string) { + if os.Getenv("SSHPASS") != "" { + if path, err := exec.LookPath("sshpass"); err == nil { + return path, append([]string{"-e", cmd}, args...) + } + } + return cmd, args +} + +// OpenSSHClient is an implementation of Client that +// uses the ssh and scp executables found in $PATH. +type OpenSSHClient struct{} + +// NewOpenSSHClient creates a new OpenSSHClient. +// If the ssh and scp programs cannot be found +// in $PATH, then an error is returned. +func NewOpenSSHClient() (*OpenSSHClient, error) { + var c OpenSSHClient + if _, err := exec.LookPath("ssh"); err != nil { + return nil, err + } + if _, err := exec.LookPath("scp"); err != nil { + return nil, err + } + return &c, nil +} + +func opensshOptions(options *Options, commandKind opensshCommandKind) []string { + args := append([]string{}, opensshCommonOptions...) + if options == nil { + options = &Options{} + } + if len(options.proxyCommand) > 0 { + args = append(args, "-o", "ProxyCommand "+utils.CommandString(options.proxyCommand...)) + } + if !options.passwordAuthAllowed { + args = append(args, "-o", "PasswordAuthentication no") + } + + // We must set ServerAliveInterval or the server may + // think we've become unresponsive on long running + // command executions such as "apt-get upgrade". + args = append(args, "-o", "ServerAliveInterval 30") + + if options.allocatePTY { + args = append(args, "-t", "-t") // twice to force + } + if options.knownHostsFile != "" { + args = append(args, "-o", "UserKnownHostsFile "+utils.CommandString(options.knownHostsFile)) + } + identities := append([]string{}, options.identities...) + if pk := PrivateKeyFiles(); len(pk) > 0 { + // Add client keys as implicit identities + identities = append(identities, pk...) + } + // If any identities are specified, the + // default ones must be explicitly specified. + if len(identities) > 0 { + for _, identity := range defaultIdentities { + path, err := utils.NormalizePath(identity) + if err != nil { + logger.Warningf("failed to normalize path %q: %v", identity, err) + continue + } + if _, err := os.Stat(path); err == nil { + identities = append(identities, path) + } + } + } + for _, identity := range identities { + args = append(args, "-i", identity) + } + if options.port != 0 { + port := fmt.Sprint(options.port) + if commandKind == scpKind { + // scp uses -P instead of -p (-p means preserve). + args = append(args, "-P", port) + } else { + args = append(args, "-p", port) + } + } + return args +} + +// Command implements Client.Command. +func (c *OpenSSHClient) Command(host string, command []string, options *Options) *Cmd { + args := opensshOptions(options, sshKind) + args = append(args, host) + if len(command) > 0 { + args = append(args, command...) + } + bin, args := sshpassWrap("ssh", args) + logger.Tracef("running: %s %s", bin, utils.CommandString(args...)) + return &Cmd{impl: &opensshCmd{exec.Command(bin, args...)}} +} + +// Copy implements Client.Copy. +func (c *OpenSSHClient) Copy(args []string, userOptions *Options) error { + var options Options + if userOptions != nil { + options = *userOptions + options.allocatePTY = false // doesn't make sense for scp + } + allArgs := opensshOptions(&options, scpKind) + allArgs = append(allArgs, args...) + bin, allArgs := sshpassWrap("scp", allArgs) + cmd := exec.Command(bin, allArgs...) + var stderr bytes.Buffer + cmd.Stderr = &stderr + logger.Tracef("running: %s %s", bin, utils.CommandString(args...)) + if err := cmd.Run(); err != nil { + stderr := strings.TrimSpace(stderr.String()) + if len(stderr) > 0 { + err = errors.Errorf("%v (%v)", err, stderr) + } + return err + } + return nil +} + +type opensshCmd struct { + *exec.Cmd +} + +func (c *opensshCmd) SetStdio(stdin io.Reader, stdout, stderr io.Writer) { + c.Stdin, c.Stdout, c.Stderr = stdin, stdout, stderr +} + +func (c *opensshCmd) StdinPipe() (io.WriteCloser, io.Reader, error) { + wc, err := c.Cmd.StdinPipe() + if err != nil { + return nil, nil, err + } + return wc, c.Stdin, nil +} + +func (c *opensshCmd) StdoutPipe() (io.ReadCloser, io.Writer, error) { + rc, err := c.Cmd.StdoutPipe() + if err != nil { + return nil, nil, err + } + return rc, c.Stdout, nil +} + +func (c *opensshCmd) StderrPipe() (io.ReadCloser, io.Writer, error) { + rc, err := c.Cmd.StderrPipe() + if err != nil { + return nil, nil, err + } + return rc, c.Stderr, nil +} + +func (c *opensshCmd) Kill() error { + if c.Process == nil { + return errors.Errorf("process has not been started") + } + return c.Process.Kill() +} === added file 'src/github.com/juju/utils/ssh/ssh_test.go' --- src/github.com/juju/utils/ssh/ssh_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/ssh_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,235 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// +build !windows + +package ssh_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/juju/cmd" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "github.com/juju/utils/ssh" +) + +const ( + echoCommand = "/bin/echo" + echoScript = "#!/bin/sh\n" + echoCommand + " $0 \"$@\" | /usr/bin/tee $0.args" +) + +type SSHCommandSuite struct { + testing.IsolationSuite + originalPath string + testbin string + fakessh string + fakescp string + client ssh.Client +} + +var _ = gc.Suite(&SSHCommandSuite{}) + +func (s *SSHCommandSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.testbin = c.MkDir() + s.fakessh = filepath.Join(s.testbin, "ssh") + s.fakescp = filepath.Join(s.testbin, "scp") + err := ioutil.WriteFile(s.fakessh, []byte(echoScript), 0755) + c.Assert(err, jc.ErrorIsNil) + err = ioutil.WriteFile(s.fakescp, []byte(echoScript), 0755) + c.Assert(err, jc.ErrorIsNil) + s.PatchEnvPathPrepend(s.testbin) + s.client, err = ssh.NewOpenSSHClient() + c.Assert(err, jc.ErrorIsNil) + s.PatchValue(ssh.DefaultIdentities, nil) +} + +func (s *SSHCommandSuite) command(args ...string) *ssh.Cmd { + return s.commandOptions(args, nil) +} + +func (s *SSHCommandSuite) commandOptions(args []string, opts *ssh.Options) *ssh.Cmd { + return s.client.Command("localhost", args, opts) +} + +func (s *SSHCommandSuite) assertCommandArgs(c *gc.C, cmd *ssh.Cmd, expected string) { + out, err := cmd.Output() + c.Assert(err, jc.ErrorIsNil) + c.Assert(strings.TrimSpace(string(out)), gc.Equals, expected) +} + +func (s *SSHCommandSuite) TestDefaultClient(c *gc.C) { + ssh.InitDefaultClient() + c.Assert(ssh.DefaultClient, gc.FitsTypeOf, &ssh.OpenSSHClient{}) + s.PatchEnvironment("PATH", "") + ssh.InitDefaultClient() + c.Assert(ssh.DefaultClient, gc.FitsTypeOf, &ssh.GoCryptoClient{}) +} + +func (s *SSHCommandSuite) TestCommandSSHPass(c *gc.C) { + // First create a fake sshpass, but don't set $SSHPASS + fakesshpass := filepath.Join(s.testbin, "sshpass") + err := ioutil.WriteFile(fakesshpass, []byte(echoScript), 0755) + s.assertCommandArgs(c, s.command(echoCommand, "123"), + fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 localhost %s 123", + s.fakessh, echoCommand), + ) + // Now set $SSHPASS. + s.PatchEnvironment("SSHPASS", "anyoldthing") + s.assertCommandArgs(c, s.command(echoCommand, "123"), + fmt.Sprintf("%s -e ssh -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 localhost %s 123", + fakesshpass, echoCommand), + ) + // Finally, remove sshpass from $PATH. + err = os.Remove(fakesshpass) + c.Assert(err, jc.ErrorIsNil) + s.assertCommandArgs(c, s.command(echoCommand, "123"), + fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 localhost %s 123", + s.fakessh, echoCommand), + ) +} + +func (s *SSHCommandSuite) TestCommand(c *gc.C) { + s.assertCommandArgs(c, s.command(echoCommand, "123"), + fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 localhost %s 123", + s.fakessh, echoCommand), + ) +} + +func (s *SSHCommandSuite) TestCommandEnablePTY(c *gc.C) { + var opts ssh.Options + opts.EnablePTY() + s.assertCommandArgs(c, s.commandOptions([]string{echoCommand, "123"}, &opts), + fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 -t -t localhost %s 123", + s.fakessh, echoCommand), + ) +} + +func (s *SSHCommandSuite) TestCommandSetKnownHostsFile(c *gc.C) { + var opts ssh.Options + opts.SetKnownHostsFile("/tmp/known hosts") + s.assertCommandArgs(c, s.commandOptions([]string{echoCommand, "123"}, &opts), + fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 -o UserKnownHostsFile \"/tmp/known hosts\" localhost %s 123", + s.fakessh, echoCommand), + ) +} + +func (s *SSHCommandSuite) TestCommandAllowPasswordAuthentication(c *gc.C) { + var opts ssh.Options + opts.AllowPasswordAuthentication() + s.assertCommandArgs(c, s.commandOptions([]string{echoCommand, "123"}, &opts), + fmt.Sprintf("%s -o StrictHostKeyChecking no -o ServerAliveInterval 30 localhost %s 123", + s.fakessh, echoCommand), + ) +} + +func (s *SSHCommandSuite) TestCommandIdentities(c *gc.C) { + var opts ssh.Options + opts.SetIdentities("x", "y") + s.assertCommandArgs(c, s.commandOptions([]string{echoCommand, "123"}, &opts), + fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 -i x -i y localhost %s 123", + s.fakessh, echoCommand), + ) +} + +func (s *SSHCommandSuite) TestCommandPort(c *gc.C) { + var opts ssh.Options + opts.SetPort(2022) + s.assertCommandArgs(c, s.commandOptions([]string{echoCommand, "123"}, &opts), + fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 -p 2022 localhost %s 123", + s.fakessh, echoCommand), + ) +} + +func (s *SSHCommandSuite) TestCopy(c *gc.C) { + var opts ssh.Options + opts.EnablePTY() + opts.AllowPasswordAuthentication() + opts.SetIdentities("x", "y") + opts.SetPort(2022) + err := s.client.Copy([]string{"/tmp/blah", "foo@bar.com:baz"}, &opts) + c.Assert(err, jc.ErrorIsNil) + out, err := ioutil.ReadFile(s.fakescp + ".args") + c.Assert(err, jc.ErrorIsNil) + // EnablePTY has no effect for Copy + c.Assert(string(out), gc.Equals, s.fakescp+" -o StrictHostKeyChecking no -o ServerAliveInterval 30 -i x -i y -P 2022 /tmp/blah foo@bar.com:baz\n") + + // Try passing extra args + err = s.client.Copy([]string{"/tmp/blah", "foo@bar.com:baz", "-r", "-v"}, &opts) + c.Assert(err, jc.ErrorIsNil) + out, err = ioutil.ReadFile(s.fakescp + ".args") + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(out), gc.Equals, s.fakescp+" -o StrictHostKeyChecking no -o ServerAliveInterval 30 -i x -i y -P 2022 /tmp/blah foo@bar.com:baz -r -v\n") + + // Try interspersing extra args + err = s.client.Copy([]string{"-r", "/tmp/blah", "-v", "foo@bar.com:baz"}, &opts) + c.Assert(err, jc.ErrorIsNil) + out, err = ioutil.ReadFile(s.fakescp + ".args") + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(out), gc.Equals, s.fakescp+" -o StrictHostKeyChecking no -o ServerAliveInterval 30 -i x -i y -P 2022 -r /tmp/blah -v foo@bar.com:baz\n") +} + +func (s *SSHCommandSuite) TestCommandClientKeys(c *gc.C) { + defer overrideGenerateKey(c).Restore() + clientKeysDir := c.MkDir() + defer ssh.ClearClientKeys() + err := ssh.LoadClientKeys(clientKeysDir) + c.Assert(err, jc.ErrorIsNil) + ck := filepath.Join(clientKeysDir, "juju_id_rsa") + var opts ssh.Options + opts.SetIdentities("x", "y") + s.assertCommandArgs(c, s.commandOptions([]string{echoCommand, "123"}, &opts), + fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 -i x -i y -i %s localhost %s 123", + s.fakessh, ck, echoCommand), + ) +} + +func (s *SSHCommandSuite) TestCommandError(c *gc.C) { + var opts ssh.Options + err := ioutil.WriteFile(s.fakessh, []byte("#!/bin/sh\nexit 42"), 0755) + c.Assert(err, jc.ErrorIsNil) + command := s.client.Command("ignored", []string{echoCommand, "foo"}, &opts) + err = command.Run() + c.Assert(cmd.IsRcPassthroughError(err), jc.IsTrue) +} + +func (s *SSHCommandSuite) TestCommandDefaultIdentities(c *gc.C) { + var opts ssh.Options + tempdir := c.MkDir() + def1 := filepath.Join(tempdir, "def1") + def2 := filepath.Join(tempdir, "def2") + s.PatchValue(ssh.DefaultIdentities, []string{def1, def2}) + // If no identities are specified, then the defaults aren't added. + s.assertCommandArgs(c, s.commandOptions([]string{echoCommand, "123"}, &opts), + fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 localhost %s 123", + s.fakessh, echoCommand), + ) + // If identities are specified, then the defaults are must added. + // Only the defaults that exist on disk will be added. + err := ioutil.WriteFile(def2, nil, 0644) + c.Assert(err, jc.ErrorIsNil) + opts.SetIdentities("x", "y") + s.assertCommandArgs(c, s.commandOptions([]string{echoCommand, "123"}, &opts), + fmt.Sprintf("%s -o StrictHostKeyChecking no -o PasswordAuthentication no -o ServerAliveInterval 30 -i x -i y -i %s localhost %s 123", + s.fakessh, def2, echoCommand), + ) +} + +func (s *SSHCommandSuite) TestCopyReader(c *gc.C) { + client := &fakeClient{} + r := bytes.NewBufferString("") + + err := ssh.TestCopyReader(client, "foo@bar.com:baz", "/tmp/blah", r, nil) + c.Assert(err, jc.ErrorIsNil) + + client.checkCalls(c, "foo@bar.com:baz", []string{"cat - > /tmp/blah"}, nil, nil, "Command") + client.impl.checkCalls(c, r, nil, nil, "SetStdio", "Start", "Wait") +} === added directory 'src/github.com/juju/utils/ssh/testing' === added file 'src/github.com/juju/utils/ssh/testing/keys.go' --- src/github.com/juju/utils/ssh/testing/keys.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/ssh/testing/keys.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,84 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package testing + +type SSHKey struct { + Key string + Fingerprint string +} + +var ( + ValidKeyOne = SSHKey{ + `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEX/dPu4PmtvgK3La9zioCEDrJ` + + `yUr6xEIK7Pr+rLgydcqWTU/kt7w7gKjOw4vvzgHfjKl09CWyvgb+y5dCiTk` + + `9MxI+erGNhs3pwaoS+EavAbawB7iEqYyTep3YaJK+4RJ4OX7ZlXMAIMrTL+` + + `UVrK89t56hCkFYaAgo3VY+z6rb/b3bDBYtE1Y2tS7C3au73aDgeb9psIrSV` + + `86ucKBTl5X62FnYiyGd++xCnLB6uLximM5OKXfLzJQNS/QyZyk12g3D8y69` + + `Xw1GzCSKX1u1+MQboyf0HJcG2ryUCLHdcDVppApyHx2OLq53hlkQ/yxdflD` + + `qCqAE4j+doagSsIfC1T2T`, + "86:ed:1b:cd:26:a0:a3:4c:27:35:49:60:95:b7:0f:68", + } + + ValidKeyTwo = SSHKey{ + `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDNC6zK8UMazlVgp8en8N7m7H/Y6` + + `DoMWbmPFjXYRXu6iQJJ18hCtsfMe63E5/PBaOjDT8am0Sx3Eqn4ZzpWMj+z` + + `knTcSd8xnMHYYxH2HStRWC1akTe4tTno2u2mqzjKd8f62URPtIocYCNRBls` + + `9yjnq9SogI5EXgcx6taQcrIFcIK0SlthxxcMVSlLpnbReujW65JHtiMqoYA` + + `OIALyO+Rkmtvb/ObmViDnwCKCN1up/xWt6J10MrAUtpI5b4prqG7FOqVMM/` + + `zdgrVg6rUghnzdYeQ8QMyEv4mVSLzX0XIPcxorkl9q06s5mZmAzysEbKZCO` + + `aXcLeNlXx/nkmuWslYCJ`, + "2f:fb:b0:65:68:c8:4e:a6:1b:a6:4b:8d:14:0b:40:79", + } + + ValidKeyThree = SSHKey{ + `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpGj1JMjGjAFt5wjARbIORyjQ/c` + + `ZAiDyDHe/w8qmLKUG2KTs6586QqqM6DKPZiYesrzXqvZsWYV4B6OjLM1sxq` + + `WjeDIl56PSnJ0+KP8pUV9KTkkKtRXxAoNg/II4l69e05qGffj9AcQ/7JPxx` + + `eL14Ulvh/a69r3uVkw1UGVk9Bwm4eCOSCqKalYLA1k5da6crEAXn9hiXLGs` + + `S9dOn3Lsqj5tK31aaUncue+a3iKb7R5LRFflDizzNS+h8tPuANQflOjOhR0` + + `Vas0BsurgISseZZ0NIMISyWhZpr0eOBWA/YruN9r++kYPOnDy0eMaOVGLO7` + + `SQwJ/6QHvf73yksJTncz`, + "1d:cf:ab:66:8a:f6:77:fb:4c:b2:59:6f:12:cf:cb:2f", + } + + ValidKeyFour = SSHKey{ + `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCSEDMH5RyjGtEMIqM2RiPYYQgUK` + + `9wdHCo1/AXkuQ7m1iVjHhACp8Oawf2Grn7hO4e0JUn5FaEZOnDj/9HB2VPw` + + `EDGBwSN1caVC3yrTVkqQcsxBY9nTV+spQQMsePOdUZALcoEilvAcLRETbyn` + + `rybaS2bfzpqbA9MEEaKQKLKGdgqiMdNXAj5I/ik/BPp0ziOMlMl1A1zilnS` + + `UXubs1U49WWV0A70vAASvZVTXr3zrPAmstH+9Ik6FdpeE99um08FXxKYWqZ` + + `6rZF1M6L1/SqC7ediYdVgRCoti85kKhi7fZBzwrGcCnxer+D0GFz++KDSNS` + + `iAnVZxyXhmBrwnR6Q/v7`, + "37:99:ab:96:c4:e8:f8:0b:0d:04:3e:1e:ee:66:e8:9e", + } + + ValidKeyMulti = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDW+8zWO6qqXrHlcMK7obliuYp7D` + + `vZBsK6rHlnbeV5Hh38Qn0GUX4Ahm6XeQ/NSx53wqkBQDGOJFY3s4w1a/hbd` + + `PyLM2/yFXCYsj5FRf01JmUjAzWhuJMH9ViqzD//l4v8cR/pHC2B8PD6abKd` + + `mIH+yLI9Cl3C4ICMKteG54egsUyboBOVKCDIKmWRLAak6sE5DPpqKF53NvD` + + `cuDufWtaCfVAOrq6NW8wSQ7PAvfDh8gsG5uvZjY3gcWl9yI3EJVGFHcdxcv` + + `4LtQI8mKdeg3JoufnEmeBJTZMoo83Gru5Z7tjv8J4JTUeQpd9uCCED1JAMe` + + `cJSKgQ2gZMTbTshobpHr` + "\n" + + `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSgfrzyGpE5eLiXusvLcxEmoE6e` + + `SMUDvTW1dd2BZgfvUVwq+toQdZ6C0C1JmbC3X563n8fmKVUAQGo5JavzABG` + + `Kpy90L3cwoGCFtb+A28YsT+bfuP+LdnCbFXm9c3DPJQx6Dch8prnDtzRjRV` + + `CorbPvm35NY73liUXVF6g58Owlx5rWtb8OnoTh5KQps9JTSfyNckdV9bFxP` + + `7bZvMyRYW5X33KaA+CQGpTNAKDHruSuKdAdaS6rBIZRvzzzSCF28BWwFL7Z` + + `ghQo0ADlUMnqIeQ58nwRImZHpmvadsZi47aMKFeykk4JQUQlwjbM0xGi0uj` + + `+hlaqGYbNo0Evcjn23cj` + + PartValidKeyMulti = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZRvG2miYVkbWOr2I+9xHWXqALb` + + `eBcyxAlYtbjxBRwrq8oFOw9vtIIZSO0r1FM6+JHzKhLSiPCMR/PK78ZqPgZ` + + `fia8Y7cEZKaUWLtZUAl0RF9w8EtsA/2gpuLZErjcoIx6fzfEYFCJcLgcQSc` + + `RlKG8VZT6tWIjvoLj9ki6unkG5YGmapkT60afhf3/vd7pCJO/uyszkQ9qU8` + + `odUDTTlwftpJtUb8xGmzpEZJTgk1lbZKlZm5pVXwjNEodH7Je88RBzR7PBB` + + `Jct+vf8wVJ/UEFXCnamvHLanJTcJIi/I5qRlKns65Bwb8M0HszPYmvTfFRD` + + `ZLi3sPUmw6PJCJ0SgATd` + "\n" + + `ssh-rsa bad key` + + MultiInvalid = `ssh-rsa bad key` + "\n" + + `ssh-rsa also bad` + + EmptyKeyMulti = "" +) === modified file 'src/github.com/juju/utils/systemerrmessages_unix.go' --- src/github.com/juju/utils/systemerrmessages_unix.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/utils/systemerrmessages_unix.go 2016-03-22 15:18:22 +0000 @@ -12,4 +12,5 @@ const ( NoSuchUserErrRegexp = `user: unknown user [a-z0-9_-]*` NoSuchFileErrRegexp = `no such file or directory` + MkdirFailErrRegexp = `.* not a directory` ) === modified file 'src/github.com/juju/utils/systemerrmessages_windows.go' --- src/github.com/juju/utils/systemerrmessages_windows.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/utils/systemerrmessages_windows.go 2016-03-22 15:18:22 +0000 @@ -10,4 +10,5 @@ const ( NoSuchUserErrRegexp = `No mapping between account names and security IDs was done\.` NoSuchFileErrRegexp = `The system cannot find the (file|path) specified\.` + MkdirFailErrRegexp = `mkdir .*` + NoSuchFileErrRegexp ) === modified file 'src/github.com/juju/utils/tar/tar.go' --- src/github.com/juju/utils/tar/tar.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/utils/tar/tar.go 2016-03-22 15:18:22 +0000 @@ -167,22 +167,21 @@ return fmt.Errorf("failed while reading tar header: %v", err) } fullPath := filepath.Join(outputFolder, hdr.Name) - if hdr.Typeflag == tar.TypeDir { + switch hdr.Typeflag { + case tar.TypeDir: if err = os.MkdirAll(fullPath, os.FileMode(hdr.Mode)); err != nil { return fmt.Errorf("cannot extract directory %q: %v", fullPath, err) } - continue - } - if hdr.Typeflag == tar.TypeSymlink { + case tar.TypeSymlink: if err = symlink.New(hdr.Linkname, fullPath); err != nil { return fmt.Errorf("cannot extract symlink %q to %q: %v", hdr.Linkname, fullPath, err) } continue - } - if err = createAndFill(fullPath, hdr.Mode, tr); err != nil { - return fmt.Errorf("cannot extract file %q: %v", fullPath, err) - } - + case tar.TypeReg, tar.TypeRegA: + if err = createAndFill(fullPath, hdr.Mode, tr); err != nil { + return fmt.Errorf("cannot extract file %q: %v", fullPath, err) + } + } } return nil } === modified file 'src/github.com/juju/utils/tar/tar_test.go' --- src/github.com/juju/utils/tar/tar_test.go 2015-03-26 15:54:39 +0000 +++ src/github.com/juju/utils/tar/tar_test.go 2016-03-22 15:18:22 +0000 @@ -285,3 +285,24 @@ c.Check(err, gc.ErrorMatches, "does_not_exist not found") } + +func (t *TarSuite) TestUntarFilesHeadersIgnored(c *gc.C) { + var buf bytes.Buffer + w := tar.NewWriter(&buf) + err := w.WriteHeader(&tar.Header{ + Name: "pax_global_header", + Typeflag: tar.TypeXGlobalHeader, + }) + c.Assert(err, gc.IsNil) + err = w.Flush() + c.Assert(err, gc.IsNil) + + err = UntarFiles(&buf, t.cwd) + err = filepath.Walk(t.cwd, func(path string, finfo os.FileInfo, err error) error { + if path != t.cwd { + return fmt.Errorf("unexpected file: %v", path) + } + return err + }) + c.Assert(err, gc.IsNil) +} === added file 'src/github.com/juju/utils/timer.go' --- src/github.com/juju/utils/timer.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/timer.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,116 @@ +// Copyright 2015 Canonical Ltd. +// Copyright 2015 Cloudbase Solutions SRL +// Licensed under the AGPLv3, see LICENCE file for details. + +package utils + +import ( + "math/rand" + "time" + + "github.com/juju/utils/clock" +) + +// Countdown implements a timer that will call a provided function. +// after a internally stored duration. The steps as well as min and max +// durations are declared upon initialization and depend on +// the particular implementation. +type Countdown interface { + // Reset stops the timer and resets its duration to the minimum one. + // Start must be called to start the timer again. + Reset() + + // Start starts the internal timer. + // At the end of the timer, if Reset hasn't been called in the mean time + // Func will be called and the duration is increased for the next call. + Start() +} + +// NewBackoffTimer creates and initializes a new BackoffTimer +// A backoff timer starts at min and gets multiplied by factor +// until it reaches max. Jitter determines whether a small +// randomization is added to the duration. +func NewBackoffTimer(config BackoffTimerConfig) *BackoffTimer { + return &BackoffTimer{ + config: config, + currentDuration: config.Min, + } +} + +// BackoffTimer implements Countdown. +// A backoff timer starts at min and gets multiplied by factor +// until it reaches max. Jitter determines whether a small +// randomization is added to the duration. +type BackoffTimer struct { + config BackoffTimerConfig + + timer clock.Timer + currentDuration time.Duration +} + +// BackoffTimerConfig is a helper struct for backoff timer +// that encapsulates config information. +type BackoffTimerConfig struct { + // The minimum duration after which Func is called. + Min time.Duration + + // The maximum duration after which Func is called. + Max time.Duration + + // Determines whether a small randomization is applied to + // the duration. + Jitter bool + + // The factor by which you want the duration to increase + // every time. + Factor int64 + + // Func is the function that will be called when the countdown reaches 0. + Func func() + + // Clock provides the AfterFunc function used to call func. + // It is exposed here so it's easier to mock it in tests. + Clock clock.Clock +} + +// Start implements the Timer interface. +// Any existing timer execution is stopped before +// a new one is created. +func (t *BackoffTimer) Start() { + if t.timer != nil { + t.timer.Stop() + } + t.timer = t.config.Clock.AfterFunc(t.currentDuration, t.config.Func) + + // Since it's a backoff timer we will increase + // the duration after each signal. + t.increaseDuration() +} + +// Reset implements the Timer interface. +func (t *BackoffTimer) Reset() { + if t.timer != nil { + t.timer.Stop() + } + if t.currentDuration > t.config.Min { + t.currentDuration = t.config.Min + } +} + +// increaseDuration will increase the duration based on +// the current value and the factor. If jitter is true +// it will add a 0.3% jitter to the final value. +func (t *BackoffTimer) increaseDuration() { + current := int64(t.currentDuration) + nextDuration := time.Duration(current * t.config.Factor) + if t.config.Jitter { + // Get a factor in [-1; 1]. + randFactor := (rand.Float64() * 2) - 1 + jitter := float64(nextDuration) * randFactor * 0.03 + nextDuration = nextDuration + time.Duration(jitter) + } + if nextDuration > t.config.Max { + nextDuration = t.config.Max + } + t.currentDuration = nextDuration +} === added file 'src/github.com/juju/utils/timer_test.go' --- src/github.com/juju/utils/timer_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/timer_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,182 @@ +// Copyright 2015 Canonical Ltd. +// Copyright 2015 Cloudbase Solutions SRL +// Licensed under the LGPLv3, see LICENCE file for details. + +package utils_test + +import ( + "math" + "time" + + gc "gopkg.in/check.v1" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + "github.com/juju/utils/clock" +) + +type TestStdTimer struct { + stdStub *testing.Stub +} + +func (t *TestStdTimer) Stop() bool { + t.stdStub.AddCall("Stop") + return true +} + +func (t *TestStdTimer) Reset(d time.Duration) bool { + t.stdStub.AddCall("Reset", d) + return true +} + +type timerSuite struct { + baseSuite testing.CleanupSuite + timer *utils.BackoffTimer + afterFuncCalls int64 + properFuncCalled bool + stub *testing.Stub + + min time.Duration + max time.Duration + factor int64 +} + +var _ = gc.Suite(&timerSuite{}) + +type mockClock struct { + stub *testing.Stub + c *gc.C + afterFuncCalls *int64 + properFuncCalled bool +} + +// These 2 methods are not used here but are needed to satisfy the intergface +func (c *mockClock) Now() time.Time { return time.Now() } +func (c *mockClock) After(d time.Duration) <-chan time.Time { return time.After(d) } + +func (c *mockClock) AfterFunc(d time.Duration, f func()) clock.Timer { + *c.afterFuncCalls++ + f() + c.c.Assert(c.properFuncCalled, jc.IsTrue) + c.properFuncCalled = false + return &TestStdTimer{c.stub} +} + +func (s *timerSuite) SetUpTest(c *gc.C) { + s.baseSuite.SetUpTest(c) + s.afterFuncCalls = 0 + s.stub = &testing.Stub{} + + // This along with the checks in afterFuncMock below assert + // that mockFunc is indeed passed as the argument to afterFuncMock + // to be executed. + mockFunc := func() { s.properFuncCalled = true } + mockClock := &mockClock{ + stub: s.stub, + c: c, + afterFuncCalls: &s.afterFuncCalls, + properFuncCalled: s.properFuncCalled, + } + + s.min = 2 * time.Second + s.max = 16 * time.Second + s.factor = 2 + s.timer = utils.NewBackoffTimer( + utils.BackoffTimerConfig{ + Min: s.min, + Max: s.max, + Jitter: false, + Factor: s.factor, + Func: mockFunc, + Clock: mockClock, + }, + ) +} + +func (s *timerSuite) TestStart(c *gc.C) { + s.timer.Start() + s.testStart(c, 1, 1) +} + +func (s *timerSuite) TestMultipleStarts(c *gc.C) { + s.timer.Start() + s.testStart(c, 1, 1) + + s.timer.Start() + s.checkStopCalls(c, 1) + s.testStart(c, 2, 2) + + s.timer.Start() + s.checkStopCalls(c, 2) + s.testStart(c, 3, 3) +} + +func (s *timerSuite) TestResetNoStart(c *gc.C) { + s.timer.Reset() + currentDuration := utils.ExposeBackoffTimerDuration(s.timer) + c.Assert(currentDuration, gc.Equals, s.min) +} + +func (s *timerSuite) TestResetAndStart(c *gc.C) { + s.timer.Reset() + currentDuration := utils.ExposeBackoffTimerDuration(s.timer) + c.Assert(currentDuration, gc.Equals, s.min) + + // These variables are used to track the number + // of afterFuncCalls(signalCallsNo) and the number + // of Stop calls(resetStopCallsNo + signalCallsNo) + resetStopCallsNo := 0 + signalCallsNo := 0 + + signalCallsNo++ + s.timer.Start() + s.testStart(c, 1, 1) + + resetStopCallsNo++ + s.timer.Reset() + s.checkStopCalls(c, resetStopCallsNo+signalCallsNo-1) + currentDuration = utils.ExposeBackoffTimerDuration(s.timer) + c.Assert(currentDuration, gc.Equals, s.min) + + for i := 1; i < 200; i++ { + signalCallsNo++ + s.timer.Start() + s.testStart(c, int64(signalCallsNo), int64(i)) + s.checkStopCalls(c, resetStopCallsNo+signalCallsNo-1) + } + + resetStopCallsNo++ + s.timer.Reset() + s.checkStopCalls(c, signalCallsNo+resetStopCallsNo-1) + + for i := 1; i < 100; i++ { + signalCallsNo++ + s.timer.Start() + s.testStart(c, int64(signalCallsNo), int64(i)) + s.checkStopCalls(c, resetStopCallsNo+signalCallsNo-1) + } + + resetStopCallsNo++ + s.timer.Reset() + s.checkStopCalls(c, signalCallsNo+resetStopCallsNo-1) +} + +func (s *timerSuite) testStart(c *gc.C, afterFuncCalls int64, durationFactor int64) { + c.Assert(s.afterFuncCalls, gc.Equals, afterFuncCalls) + c.Logf("iteration %d", afterFuncCalls) + expectedDuration := time.Duration(math.Pow(float64(s.factor), float64(durationFactor))) * s.min + if expectedDuration > s.max || expectedDuration <= 0 { + expectedDuration = s.max + } + currentDuration := utils.ExposeBackoffTimerDuration(s.timer) + c.Assert(currentDuration, gc.Equals, expectedDuration) +} + +func (s *timerSuite) checkStopCalls(c *gc.C, number int) { + calls := make([]testing.StubCall, number) + for i := 0; i < number; i++ { + calls[i] = testing.StubCall{FuncName: "Stop"} + } + s.stub.CheckCalls(c, calls) +} === modified file 'src/github.com/juju/utils/trivial.go' --- src/github.com/juju/utils/trivial.go 2015-09-22 15:27:01 +0000 +++ src/github.com/juju/utils/trivial.go 2016-03-22 15:18:22 +0000 @@ -13,43 +13,8 @@ "os" "strings" "unicode" - - goyaml "gopkg.in/yaml.v1" ) -// WriteYaml marshals obj as yaml and then writes it to a file, atomically, -// by first writing a sibling with the suffix ".preparing" and then moving -// the sibling to the real path. -func WriteYaml(path string, obj interface{}) error { - data, err := goyaml.Marshal(obj) - if err != nil { - return err - } - prep := path + ".preparing" - f, err := os.OpenFile(prep, os.O_WRONLY|os.O_CREATE|os.O_SYNC, 0644) - if err != nil { - return err - } - _, err = f.Write(data) - // Explicitly close the file before moving it. This is needed on Windows - // where the OS will not allow us to move a file that still has an open file handle - f.Close() - if err != nil { - return err - } - return ReplaceFile(prep, path) -} - -// ReadYaml unmarshals the yaml contained in the file at path into obj. See -// goyaml.Unmarshal. -func ReadYaml(path string, obj interface{}) error { - data, err := ioutil.ReadFile(path) - if err != nil { - return err - } - return goyaml.Unmarshal(data, obj) -} - // TODO(ericsnow) Move the quoting helpers into the shell package? // ShQuote quotes s so that when read by bash, no metacharacters === added file 'src/github.com/juju/utils/yaml.go' --- src/github.com/juju/utils/yaml.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/yaml.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,62 @@ +// Copyright 2012, 2013 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package utils + +import ( + "io/ioutil" + "os" + "path/filepath" + + "github.com/juju/errors" + + "gopkg.in/yaml.v2" +) + +// WriteYaml marshals obj as yaml to a temporary file in the same directory +// as path, than atomically replaces path with the temporary file. +func WriteYaml(path string, obj interface{}) error { + data, err := yaml.Marshal(obj) + if err != nil { + return errors.Trace(err) + } + dir := filepath.Dir(path) + f, err := ioutil.TempFile(dir, "juju") + if err != nil { + return errors.Trace(err) + } + tmp := f.Name() + if _, err := f.Write(data); err != nil { + f.Close() // don't leak file handle + os.Remove(tmp) // don't leak half written files on disk + return errors.Trace(err) + } + // Explicitly close the file before moving it. This is needed on Windows + // where the OS will not allow us to move a file that still has an open + // file handle. Must check the error on close because filesystems can delay + // reporting errors until the file is closed. + if err := f.Close(); err != nil { + os.Remove(tmp) // don't leak half written files on disk + return errors.Trace(err) + } + + // ioutils.TempFile creates files 0600, but this function has a contract + // that files will be world readable, 0644 after replacement. + if err := os.Chmod(tmp, 0644); err != nil { + os.Remove(tmp) // remove file with incorrect permissions. + return errors.Trace(err) + } + + return ReplaceFile(tmp, path) +} + +// ReadYaml unmarshals the yaml contained in the file at path into obj. See +// goyaml.Unmarshal. If path is not found, the error returned will be compatible +// with os.IsNotExist. +func ReadYaml(path string, obj interface{}) error { + data, err := ioutil.ReadFile(path) + if err != nil { + return err // cannot wrap here because callers check for NotFound. + } + return yaml.Unmarshal(data, obj) +} === added file 'src/github.com/juju/utils/yaml_test.go' --- src/github.com/juju/utils/yaml_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/juju/utils/yaml_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,85 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package utils + +import ( + "io/ioutil" + "os" + "path/filepath" + + gc "gopkg.in/check.v1" +) + +type yamlSuite struct { +} + +var _ = gc.Suite(&yamlSuite{}) + +func (*yamlSuite) TestYamlRoundTrip(c *gc.C) { + // test happy path of round tripping an object via yaml + + type T struct { + A int `yaml:"a"` + B bool `yaml:"deleted"` + C string `yaml:"omitempty"` + D string + } + + v := T{A: 1, B: true, C: "", D: ""} + + f, err := ioutil.TempFile(c.MkDir(), "yaml") + c.Assert(err, gc.IsNil) + path := f.Name() + f.Close() + + err = WriteYaml(path, v) + c.Assert(err, gc.IsNil) + + var v2 T + err = ReadYaml(path, &v2) + c.Assert(err, gc.IsNil) + + c.Assert(v, gc.Equals, v2) +} + +func (*yamlSuite) TestReadYamlReturnsNotFound(c *gc.C) { + // The contract for ReadYaml requires it returns an error + // that can be inspected by os.IsNotExist. Notably, we cannot + // use juju/errors gift wrapping. + f, err := ioutil.TempFile(c.MkDir(), "yaml") + c.Assert(err, gc.IsNil) + path := f.Name() + err = os.Remove(path) + c.Assert(err, gc.IsNil) + err = ReadYaml(path, nil) + + // assert that the error is reported as NotExist + c.Assert(os.IsNotExist(err), gc.Equals, true) +} + +func (*yamlSuite) TestWriteYamlMissingDirectory(c *gc.C) { + // WriteYaml tries to create a temporary file in the same + // directory as the target. Test what happens if the path's + // directory is missing + + root := c.MkDir() + missing := filepath.Join(root, "missing", "filename") + + v := struct{ A, B int }{1, 2} + err := WriteYaml(missing, v) + c.Assert(err, gc.NotNil) +} + +func (*yamlSuite) TestWriteYamlWriteGarbage(c *gc.C) { + c.Skip("https://github.com/go-yaml/yaml/issues/144") + // some things cannot be marshalled into yaml, check that + // WriteYaml detects this. + + root := c.MkDir() + path := filepath.Join(root, "f") + + v := struct{ A, B [10]bool }{} + err := WriteYaml(path, v) + c.Assert(err, gc.NotNil) +} === added directory 'src/github.com/julienschmidt' === added directory 'src/github.com/julienschmidt/httprouter' === added file 'src/github.com/julienschmidt/httprouter/.travis.yml' --- src/github.com/julienschmidt/httprouter/.travis.yml 1970-01-01 00:00:00 +0000 +++ src/github.com/julienschmidt/httprouter/.travis.yml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +sudo: false +language: go +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - tip === added file 'src/github.com/julienschmidt/httprouter/LICENSE' --- src/github.com/julienschmidt/httprouter/LICENSE 1970-01-01 00:00:00 +0000 +++ src/github.com/julienschmidt/httprouter/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,24 @@ +Copyright (c) 2013 Julien Schmidt. All rights reserved. + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * The names of the contributors may not be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL JULIEN SCHMIDT BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file === added file 'src/github.com/julienschmidt/httprouter/README.md' --- src/github.com/julienschmidt/httprouter/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/julienschmidt/httprouter/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,323 @@ +# HttpRouter [![Build Status](https://travis-ci.org/julienschmidt/httprouter.png?branch=master)](https://travis-ci.org/julienschmidt/httprouter) [![Coverage](http://gocover.io/_badge/github.com/julienschmidt/httprouter?0)](http://gocover.io/github.com/julienschmidt/httprouter) [![GoDoc](http://godoc.org/github.com/julienschmidt/httprouter?status.png)](http://godoc.org/github.com/julienschmidt/httprouter) + +HttpRouter is a lightweight high performance HTTP request router +(also called *multiplexer* or just *mux* for short) for [Go](http://golang.org/). + +In contrast to the [default mux](http://golang.org/pkg/net/http/#ServeMux) of Go's net/http package, this router supports +variables in the routing pattern and matches against the request method. +It also scales better. + +The router is optimized for high performance and a small memory footprint. +It scales well even with very long paths and a large number of routes. +A compressing dynamic trie (radix tree) structure is used for efficient matching. + +## Features +**Only explicit matches:** With other routers, like [http.ServeMux](http://golang.org/pkg/net/http/#ServeMux), +a requested URL path could match multiple patterns. Therefore they have some +awkward pattern priority rules, like *longest match* or *first registered, +first matched*. By design of this router, a request can only match exactly one +or no route. As a result, there are also no unintended matches, which makes it +great for SEO and improves the user experience. + +**Stop caring about trailing slashes:** Choose the URL style you like, the +router automatically redirects the client if a trailing slash is missing or if +there is one extra. Of course it only does so, if the new path has a handler. +If you don't like it, you can [turn off this behavior](http://godoc.org/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash). + +**Path auto-correction:** Besides detecting the missing or additional trailing +slash at no extra cost, the router can also fix wrong cases and remove +superfluous path elements (like `../` or `//`). +Is [CAPTAIN CAPS LOCK](http://www.urbandictionary.com/define.php?term=Captain+Caps+Lock) one of your users? +HttpRouter can help him by making a case-insensitive look-up and redirecting him +to the correct URL. + +**Parameters in your routing pattern:** Stop parsing the requested URL path, +just give the path segment a name and the router delivers the dynamic value to +you. Because of the design of the router, path parameters are very cheap. + +**Zero Garbage:** The matching and dispatching process generates zero bytes of +garbage. In fact, the only heap allocations that are made, is by building the +slice of the key-value pairs for path parameters. If the request path contains +no parameters, not a single heap allocation is necessary. + +**Best Performance:** [Benchmarks speak for themselves](https://github.com/julienschmidt/go-http-routing-benchmark). +See below for technical details of the implementation. + +**No more server crashes:** You can set a [Panic handler](http://godoc.org/github.com/julienschmidt/httprouter#Router.PanicHandler) to deal with panics +occurring during handling a HTTP request. The router then recovers and lets the +PanicHandler log what happened and deliver a nice error page. + +Of course you can also set **custom [NotFound](http://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) and [MethodNotAllowed](http://godoc.org/github.com/julienschmidt/httprouter#Router.MethodNotAllowed) handlers** and [**serve static files**](http://godoc.org/github.com/julienschmidt/httprouter#Router.ServeFiles). + +## Usage +This is just a quick introduction, view the [GoDoc](http://godoc.org/github.com/julienschmidt/httprouter) for details. + +Let's start with a trivial example: +```go +package main + +import ( + "fmt" + "github.com/julienschmidt/httprouter" + "net/http" + "log" +) + +func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + fmt.Fprint(w, "Welcome!\n") +} + +func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name")) +} + +func main() { + router := httprouter.New() + router.GET("/", Index) + router.GET("/hello/:name", Hello) + + log.Fatal(http.ListenAndServe(":8080", router)) +} +``` + +### Named parameters +As you can see, `:name` is a *named parameter*. +The values are accessible via `httprouter.Params`, which is just a slice of `httprouter.Param`s. +You can get the value of a parameter either by its index in the slice, or by using the `ByName(name)` method: +`:name` can be retrived by `ByName("name")`. + +Named parameters only match a single path segment: +``` +Pattern: /user/:user + + /user/gordon match + /user/you match + /user/gordon/profile no match + /user/ no match +``` + +**Note:** Since this router has only explicit matches, you can not register static routes and parameters for the same path segment. For example you can not register the patterns `/user/new` and `/user/:user` for the same request method at the same time. The routing of different request methods is independent from each other. + +### Catch-All parameters +The second type are *catch-all* parameters and have the form `*name`. +Like the name suggests, they match everything. +Therefore they must always be at the **end** of the pattern: +``` +Pattern: /src/*filepath + + /src/ match + /src/somefile.go match + /src/subdir/somefile.go match +``` + +## How does it work? +The router relies on a tree structure which makes heavy use of *common prefixes*, +it is basically a *compact* [*prefix tree*](http://en.wikipedia.org/wiki/Trie) +(or just [*Radix tree*](http://en.wikipedia.org/wiki/Radix_tree)). +Nodes with a common prefix also share a common parent. Here is a short example +what the routing tree for the `GET` request method could look like: + +``` +Priority Path Handle +9 \ *<1> +3 ├s nil +2 |├earch\ *<2> +1 |â””upport\ *<3> +2 ├blog\ *<4> +1 | â””:post nil +1 | â””\ *<5> +2 ├about-us\ *<6> +1 | â””team\ *<7> +1 â””contact\ *<8> +``` +Every `*` represents the memory address of a handler function (a pointer). +If you follow a path trough the tree from the root to the leaf, you get the +complete route path, e.g `\blog\:post\`, where `:post` is just a placeholder +([*parameter*](#named-parameters)) for an actual post name. Unlike hash-maps, a +tree structure also allows us to use dynamic parts like the `:post` parameter, +since we actually match against the routing patterns instead of just comparing +hashes. [As benchmarks show](https://github.com/julienschmidt/go-http-routing-benchmark), +this works very well and efficient. + +Since URL paths have a hierarchical structure and make use only of a limited set +of characters (byte values), it is very likely that there are a lot of common +prefixes. This allows us to easily reduce the routing into ever smaller problems. +Moreover the router manages a separate tree for every request method. +For one thing it is more space efficient than holding a method->handle map in +every single node, for another thing is also allows us to greatly reduce the +routing problem before even starting the look-up in the prefix-tree. + +For even better scalability, the child nodes on each tree level are ordered by +priority, where the priority is just the number of handles registered in sub +nodes (children, grandchildren, and so on..). +This helps in two ways: + +1. Nodes which are part of the most routing paths are evaluated first. This +helps to make as much routes as possible to be reachable as fast as possible. +2. It is some sort of cost compensation. The longest reachable path (highest +cost) can always be evaluated first. The following scheme visualizes the tree +structure. Nodes are evaluated from top to bottom and from left to right. + +``` +├------------ +├--------- +├----- +├---- +├-- +├-- +â””- +``` + + +## Why doesn't this work with http.Handler? +**It does!** The router itself implements the http.Handler interface. +Moreover the router provides convenient [adapters for http.Handler](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handler)s and [http.HandlerFunc](http://godoc.org/github.com/julienschmidt/httprouter#Router.HandlerFunc)s +which allows them to be used as a [httprouter.Handle](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) when registering a route. +The only disadvantage is, that no parameter values can be retrieved when a +http.Handler or http.HandlerFunc is used, since there is no efficient way to +pass the values with the existing function parameters. +Therefore [httprouter.Handle](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) has a third function parameter. + +Just try it out for yourself, the usage of HttpRouter is very straightforward. The package is compact and minimalistic, but also probably one of the easiest routers to set up. + + +## Where can I find Middleware *X*? +This package just provides a very efficient request router with a few extra +features. The router is just a [http.Handler](http://golang.org/pkg/net/http/#Handler), +you can chain any http.Handler compatible middleware before the router, +for example the [Gorilla handlers](http://www.gorillatoolkit.org/pkg/handlers). +Or you could [just write your own](http://justinas.org/writing-http-middleware-in-go/), +it's very easy! + +Alternatively, you could try [a web framework based on HttpRouter](#web-frameworks-based-on-httprouter). + +### Multi-domain / Sub-domains +Here is a quick example: Does your server serve multiple domains / hosts? +You want to use sub-domains? +Define a router per host! +```go +// We need an object that implements the http.Handler interface. +// Therefore we need a type for which we implement the ServeHTTP method. +// We just use a map here, in which we map host names (with port) to http.Handlers +type HostSwitch map[string]http.Handler + +// Implement the ServerHTTP method on our new type +func (hs HostSwitch) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Check if a http.Handler is registered for the given host. + // If yes, use it to handle the request. + if handler := hs[r.Host]; handler != nil { + handler.ServeHTTP(w, r) + } else { + // Handle host names for wich no handler is registered + http.Error(w, "Forbidden", 403) // Or Redirect? + } +} + +func main() { + // Initialize a router as usual + router := httprouter.New() + router.GET("/", Index) + router.GET("/hello/:name", Hello) + + // Make a new HostSwitch and insert the router (our http handler) + // for example.com and port 12345 + hs := make(HostSwitch) + hs["example.com:12345"] = router + + // Use the HostSwitch to listen and serve on port 12345 + log.Fatal(http.ListenAndServe(":12345", hs)) +} +``` + +### Basic Authentication +Another quick example: Basic Authentification (RFC 2617) for handles: + +```go +package main + +import ( + "bytes" + "encoding/base64" + "fmt" + "github.com/julienschmidt/httprouter" + "net/http" + "log" + "strings" +) + +func BasicAuth(h httprouter.Handle, user, pass []byte) httprouter.Handle { + return func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + const basicAuthPrefix string = "Basic " + + // Get the Basic Authentication credentials + auth := r.Header.Get("Authorization") + if strings.HasPrefix(auth, basicAuthPrefix) { + // Check credentials + payload, err := base64.StdEncoding.DecodeString(auth[len(basicAuthPrefix):]) + if err == nil { + pair := bytes.SplitN(payload, []byte(":"), 2) + if len(pair) == 2 && + bytes.Equal(pair[0], user) && + bytes.Equal(pair[1], pass) { + + // Delegate request to the given handle + h(w, r, ps) + return + } + } + } + + // Request Basic Authentication otherwise + w.Header().Set("WWW-Authenticate", "Basic realm=Restricted") + http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) + } +} + +func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + fmt.Fprint(w, "Not protected!\n") +} + +func Protected(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + fmt.Fprint(w, "Protected!\n") +} + +func main() { + user := []byte("gordon") + pass := []byte("secret!") + + router := httprouter.New() + router.GET("/", Index) + router.GET("/protected/", BasicAuth(Protected, user, pass)) + + log.Fatal(http.ListenAndServe(":8080", router)) +} +``` + +## Chaining with the NotFound handler + +**NOTE: It might be required to set [Router.HandleMethodNotAllowed](http://godoc.org/github.com/julienschmidt/httprouter#Router.HandleMethodNotAllowed) to `false` to avoid problems.** + +You can use another [http.Handler](http://golang.org/pkg/net/http/#Handler), for example another router, to handle requests which could not be matched by this router by using the [Router.NotFound](http://godoc.org/github.com/julienschmidt/httprouter#Router.NotFound) handler. This allows chaining. + +### Static files +The `NotFound` handler can for example be used to serve static files from the root path `/` (like an index.html file along with other assets): +```go +// Serve static files from the ./public directory +router.NotFound = http.FileServer(http.Dir("public")).ServeHTTP +``` + +But this approach sidesteps the strict core rules of this router to avoid routing problems. A cleaner approach is to use a distinct sub-path for serving files, like `/static/*filepath` or `/files/*filepath`. + +## Web Frameworks based on HttpRouter +If the HttpRouter is a bit too minimalistic for you, you might try one of the following more high-level 3rd-party web frameworks building upon the HttpRouter package: +* [Ace](https://github.com/plimble/ace): Blazing fast Go Web Framework +* [api2go](https://github.com/univedo/api2go): A JSON API Implementation for Go +* [Gin](https://github.com/gin-gonic/gin): Features a martini-like API with much better performance +* [Goat](https://github.com/bahlo/goat): A minimalistic REST API server in Go +* [Hikaru](https://github.com/najeira/hikaru): Supports standalone and Google AppEngine +* [Hitch](https://github.com/nbio/hitch): Hitch ties httprouter, [httpcontext](https://github.com/nbio/httpcontext), and middleware up in a bow +* [kami](https://github.com/guregu/kami): A tiny web framework using x/net/context +* [Medeina](https://github.com/imdario/medeina): Inspired by Ruby's Roda and Cuba +* [Neko](https://github.com/rocwong/neko): A lightweight web application framework for Golang +* [Roxanna](https://github.com/iamthemuffinman/Roxanna): An amalgamation of httprouter, better logging, and hot reload +* [siesta](https://github.com/VividCortex/siesta): Composable HTTP handlers with contexts === added file 'src/github.com/julienschmidt/httprouter/path.go' --- src/github.com/julienschmidt/httprouter/path.go 1970-01-01 00:00:00 +0000 +++ src/github.com/julienschmidt/httprouter/path.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,123 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Based on the path package, Copyright 2009 The Go Authors. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package httprouter + +// CleanPath is the URL version of path.Clean, it returns a canonical URL path +// for p, eliminating . and .. elements. +// +// The following rules are applied iteratively until no further processing can +// be done: +// 1. Replace multiple slashes with a single slash. +// 2. Eliminate each . path name element (the current directory). +// 3. Eliminate each inner .. path name element (the parent directory) +// along with the non-.. element that precedes it. +// 4. Eliminate .. elements that begin a rooted path: +// that is, replace "/.." by "/" at the beginning of a path. +// +// If the result of this process is an empty string, "/" is returned +func CleanPath(p string) string { + // Turn empty string into "/" + if p == "" { + return "/" + } + + n := len(p) + var buf []byte + + // Invariants: + // reading from path; r is index of next byte to process. + // writing to buf; w is index of next byte to write. + + // path must start with '/' + r := 1 + w := 1 + + if p[0] != '/' { + r = 0 + buf = make([]byte, n+1) + buf[0] = '/' + } + + trailing := n > 2 && p[n-1] == '/' + + // A bit more clunky without a 'lazybuf' like the path package, but the loop + // gets completely inlined (bufApp). So in contrast to the path package this + // loop has no expensive function calls (except 1x make) + + for r < n { + switch { + case p[r] == '/': + // empty path element, trailing slash is added after the end + r++ + + case p[r] == '.' && r+1 == n: + trailing = true + r++ + + case p[r] == '.' && p[r+1] == '/': + // . element + r++ + + case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'): + // .. element: remove to last / + r += 2 + + if w > 1 { + // can backtrack + w-- + + if buf == nil { + for w > 1 && p[w] != '/' { + w-- + } + } else { + for w > 1 && buf[w] != '/' { + w-- + } + } + } + + default: + // real path element. + // add slash if needed + if w > 1 { + bufApp(&buf, p, w, '/') + w++ + } + + // copy element + for r < n && p[r] != '/' { + bufApp(&buf, p, w, p[r]) + w++ + r++ + } + } + } + + // re-append trailing slash + if trailing && w > 1 { + bufApp(&buf, p, w, '/') + w++ + } + + if buf == nil { + return p[:w] + } + return string(buf[:w]) +} + +// internal helper to lazily create a buffer if necessary +func bufApp(buf *[]byte, s string, w int, c byte) { + if *buf == nil { + if s[w] == c { + return + } + + *buf = make([]byte, len(s)) + copy(*buf, s[:w]) + } + (*buf)[w] = c +} === added file 'src/github.com/julienschmidt/httprouter/path_test.go' --- src/github.com/julienschmidt/httprouter/path_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/julienschmidt/httprouter/path_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,92 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Based on the path package, Copyright 2009 The Go Authors. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package httprouter + +import ( + "runtime" + "testing" +) + +var cleanTests = []struct { + path, result string +}{ + // Already clean + {"/", "/"}, + {"/abc", "/abc"}, + {"/a/b/c", "/a/b/c"}, + {"/abc/", "/abc/"}, + {"/a/b/c/", "/a/b/c/"}, + + // missing root + {"", "/"}, + {"abc", "/abc"}, + {"abc/def", "/abc/def"}, + {"a/b/c", "/a/b/c"}, + + // Remove doubled slash + {"//", "/"}, + {"/abc//", "/abc/"}, + {"/abc/def//", "/abc/def/"}, + {"/a/b/c//", "/a/b/c/"}, + {"/abc//def//ghi", "/abc/def/ghi"}, + {"//abc", "/abc"}, + {"///abc", "/abc"}, + {"//abc//", "/abc/"}, + + // Remove . elements + {".", "/"}, + {"./", "/"}, + {"/abc/./def", "/abc/def"}, + {"/./abc/def", "/abc/def"}, + {"/abc/.", "/abc/"}, + + // Remove .. elements + {"..", "/"}, + {"../", "/"}, + {"../../", "/"}, + {"../..", "/"}, + {"../../abc", "/abc"}, + {"/abc/def/ghi/../jkl", "/abc/def/jkl"}, + {"/abc/def/../ghi/../jkl", "/abc/jkl"}, + {"/abc/def/..", "/abc"}, + {"/abc/def/../..", "/"}, + {"/abc/def/../../..", "/"}, + {"/abc/def/../../..", "/"}, + {"/abc/def/../../../ghi/jkl/../../../mno", "/mno"}, + + // Combinations + {"abc/./../def", "/def"}, + {"abc//./../def", "/def"}, + {"abc/../../././../def", "/def"}, +} + +func TestPathClean(t *testing.T) { + for _, test := range cleanTests { + if s := CleanPath(test.path); s != test.result { + t.Errorf("CleanPath(%q) = %q, want %q", test.path, s, test.result) + } + if s := CleanPath(test.result); s != test.result { + t.Errorf("CleanPath(%q) = %q, want %q", test.result, s, test.result) + } + } +} + +func TestPathCleanMallocs(t *testing.T) { + if testing.Short() { + t.Skip("skipping malloc count in short mode") + } + if runtime.GOMAXPROCS(0) > 1 { + t.Log("skipping AllocsPerRun checks; GOMAXPROCS>1") + return + } + + for _, test := range cleanTests { + allocs := testing.AllocsPerRun(100, func() { CleanPath(test.result) }) + if allocs > 0 { + t.Errorf("CleanPath(%q): %v allocs, want zero", test.result, allocs) + } + } +} === added file 'src/github.com/julienschmidt/httprouter/router.go' --- src/github.com/julienschmidt/httprouter/router.go 1970-01-01 00:00:00 +0000 +++ src/github.com/julienschmidt/httprouter/router.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,363 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +// Package httprouter is a trie based high performance HTTP request router. +// +// A trivial example is: +// +// package main +// +// import ( +// "fmt" +// "github.com/julienschmidt/httprouter" +// "net/http" +// "log" +// ) +// +// func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { +// fmt.Fprint(w, "Welcome!\n") +// } +// +// func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { +// fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name")) +// } +// +// func main() { +// router := httprouter.New() +// router.GET("/", Index) +// router.GET("/hello/:name", Hello) +// +// log.Fatal(http.ListenAndServe(":8080", router)) +// } +// +// The router matches incoming requests by the request method and the path. +// If a handle is registered for this path and method, the router delegates the +// request to that function. +// For the methods GET, POST, PUT, PATCH and DELETE shortcut functions exist to +// register handles, for all other methods router.Handle can be used. +// +// The registered path, against which the router matches incoming requests, can +// contain two types of parameters: +// Syntax Type +// :name named parameter +// *name catch-all parameter +// +// Named parameters are dynamic path segments. They match anything until the +// next '/' or the path end: +// Path: /blog/:category/:post +// +// Requests: +// /blog/go/request-routers match: category="go", post="request-routers" +// /blog/go/request-routers/ no match, but the router would redirect +// /blog/go/ no match +// /blog/go/request-routers/comments no match +// +// Catch-all parameters match anything until the path end, including the +// directory index (the '/' before the catch-all). Since they match anything +// until the end, catch-all parameters must always be the final path element. +// Path: /files/*filepath +// +// Requests: +// /files/ match: filepath="/" +// /files/LICENSE match: filepath="/LICENSE" +// /files/templates/article.html match: filepath="/templates/article.html" +// /files no match, but the router would redirect +// +// The value of parameters is saved as a slice of the Param struct, consisting +// each of a key and a value. The slice is passed to the Handle func as a third +// parameter. +// There are two ways to retrieve the value of a parameter: +// // by the name of the parameter +// user := ps.ByName("user") // defined by :user or *user +// +// // by the index of the parameter. This way you can also get the name (key) +// thirdKey := ps[2].Key // the name of the 3rd parameter +// thirdValue := ps[2].Value // the value of the 3rd parameter +package httprouter + +import ( + "net/http" +) + +// Handle is a function that can be registered to a route to handle HTTP +// requests. Like http.HandlerFunc, but has a third parameter for the values of +// wildcards (variables). +type Handle func(http.ResponseWriter, *http.Request, Params) + +// Param is a single URL parameter, consisting of a key and a value. +type Param struct { + Key string + Value string +} + +// Params is a Param-slice, as returned by the router. +// The slice is ordered, the first URL parameter is also the first slice value. +// It is therefore safe to read values by the index. +type Params []Param + +// ByName returns the value of the first Param which key matches the given name. +// If no matching Param is found, an empty string is returned. +func (ps Params) ByName(name string) string { + for i := range ps { + if ps[i].Key == name { + return ps[i].Value + } + } + return "" +} + +// Router is a http.Handler which can be used to dispatch requests to different +// handler functions via configurable routes +type Router struct { + trees map[string]*node + + // Enables automatic redirection if the current route can't be matched but a + // handler for the path with (without) the trailing slash exists. + // For example if /foo/ is requested but a route only exists for /foo, the + // client is redirected to /foo with http status code 301 for GET requests + // and 307 for all other request methods. + RedirectTrailingSlash bool + + // If enabled, the router tries to fix the current request path, if no + // handle is registered for it. + // First superfluous path elements like ../ or // are removed. + // Afterwards the router does a case-insensitive lookup of the cleaned path. + // If a handle can be found for this route, the router makes a redirection + // to the corrected path with status code 301 for GET requests and 307 for + // all other request methods. + // For example /FOO and /..//Foo could be redirected to /foo. + // RedirectTrailingSlash is independent of this option. + RedirectFixedPath bool + + // If enabled, the router checks if another method is allowed for the + // current route, if the current request can not be routed. + // If this is the case, the request is answered with 'Method Not Allowed' + // and HTTP status code 405. + // If no other Method is allowed, the request is delegated to the NotFound + // handler. + HandleMethodNotAllowed bool + + // Configurable http.Handler which is called when no matching route is + // found. If it is not set, http.NotFound is used. + NotFound http.Handler + + // Configurable http.Handler which is called when a request + // cannot be routed and HandleMethodNotAllowed is true. + // If it is not set, http.Error with http.StatusMethodNotAllowed is used. + MethodNotAllowed http.Handler + + // Function to handle panics recovered from http handlers. + // It should be used to generate a error page and return the http error code + // 500 (Internal Server Error). + // The handler can be used to keep your server from crashing because of + // unrecovered panics. + PanicHandler func(http.ResponseWriter, *http.Request, interface{}) +} + +// Make sure the Router conforms with the http.Handler interface +var _ http.Handler = New() + +// New returns a new initialized Router. +// Path auto-correction, including trailing slashes, is enabled by default. +func New() *Router { + return &Router{ + RedirectTrailingSlash: true, + RedirectFixedPath: true, + HandleMethodNotAllowed: true, + } +} + +// GET is a shortcut for router.Handle("GET", path, handle) +func (r *Router) GET(path string, handle Handle) { + r.Handle("GET", path, handle) +} + +// HEAD is a shortcut for router.Handle("HEAD", path, handle) +func (r *Router) HEAD(path string, handle Handle) { + r.Handle("HEAD", path, handle) +} + +// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle) +func (r *Router) OPTIONS(path string, handle Handle) { + r.Handle("OPTIONS", path, handle) +} + +// POST is a shortcut for router.Handle("POST", path, handle) +func (r *Router) POST(path string, handle Handle) { + r.Handle("POST", path, handle) +} + +// PUT is a shortcut for router.Handle("PUT", path, handle) +func (r *Router) PUT(path string, handle Handle) { + r.Handle("PUT", path, handle) +} + +// PATCH is a shortcut for router.Handle("PATCH", path, handle) +func (r *Router) PATCH(path string, handle Handle) { + r.Handle("PATCH", path, handle) +} + +// DELETE is a shortcut for router.Handle("DELETE", path, handle) +func (r *Router) DELETE(path string, handle Handle) { + r.Handle("DELETE", path, handle) +} + +// Handle registers a new request handle with the given path and method. +// +// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut +// functions can be used. +// +// This function is intended for bulk loading and to allow the usage of less +// frequently used, non-standardized or custom methods (e.g. for internal +// communication with a proxy). +func (r *Router) Handle(method, path string, handle Handle) { + if path[0] != '/' { + panic("path must begin with '/' in path '" + path + "'") + } + + if r.trees == nil { + r.trees = make(map[string]*node) + } + + root := r.trees[method] + if root == nil { + root = new(node) + r.trees[method] = root + } + + root.addRoute(path, handle) +} + +// Handler is an adapter which allows the usage of an http.Handler as a +// request handle. +func (r *Router) Handler(method, path string, handler http.Handler) { + r.Handle(method, path, + func(w http.ResponseWriter, req *http.Request, _ Params) { + handler.ServeHTTP(w, req) + }, + ) +} + +// HandlerFunc is an adapter which allows the usage of an http.HandlerFunc as a +// request handle. +func (r *Router) HandlerFunc(method, path string, handler http.HandlerFunc) { + r.Handler(method, path, handler) +} + +// ServeFiles serves files from the given file system root. +// The path must end with "/*filepath", files are then served from the local +// path /defined/root/dir/*filepath. +// For example if root is "/etc" and *filepath is "passwd", the local file +// "/etc/passwd" would be served. +// Internally a http.FileServer is used, therefore http.NotFound is used instead +// of the Router's NotFound handler. +// To use the operating system's file system implementation, +// use http.Dir: +// router.ServeFiles("/src/*filepath", http.Dir("/var/www")) +func (r *Router) ServeFiles(path string, root http.FileSystem) { + if len(path) < 10 || path[len(path)-10:] != "/*filepath" { + panic("path must end with /*filepath in path '" + path + "'") + } + + fileServer := http.FileServer(root) + + r.GET(path, func(w http.ResponseWriter, req *http.Request, ps Params) { + req.URL.Path = ps.ByName("filepath") + fileServer.ServeHTTP(w, req) + }) +} + +func (r *Router) recv(w http.ResponseWriter, req *http.Request) { + if rcv := recover(); rcv != nil { + r.PanicHandler(w, req, rcv) + } +} + +// Lookup allows the manual lookup of a method + path combo. +// This is e.g. useful to build a framework around this router. +// If the path was found, it returns the handle function and the path parameter +// values. Otherwise the third return value indicates whether a redirection to +// the same path with an extra / without the trailing slash should be performed. +func (r *Router) Lookup(method, path string) (Handle, Params, bool) { + if root := r.trees[method]; root != nil { + return root.getValue(path) + } + return nil, nil, false +} + +// ServeHTTP makes the router implement the http.Handler interface. +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if r.PanicHandler != nil { + defer r.recv(w, req) + } + + if root := r.trees[req.Method]; root != nil { + path := req.URL.Path + + if handle, ps, tsr := root.getValue(path); handle != nil { + handle(w, req, ps) + return + } else if req.Method != "CONNECT" && path != "/" { + code := 301 // Permanent redirect, request with GET method + if req.Method != "GET" { + // Temporary redirect, request with same method + // As of Go 1.3, Go does not support status code 308. + code = 307 + } + + if tsr && r.RedirectTrailingSlash { + if len(path) > 1 && path[len(path)-1] == '/' { + req.URL.Path = path[:len(path)-1] + } else { + req.URL.Path = path + "/" + } + http.Redirect(w, req, req.URL.String(), code) + return + } + + // Try to fix the request path + if r.RedirectFixedPath { + fixedPath, found := root.findCaseInsensitivePath( + CleanPath(path), + r.RedirectTrailingSlash, + ) + if found { + req.URL.Path = string(fixedPath) + http.Redirect(w, req, req.URL.String(), code) + return + } + } + } + } + + // Handle 405 + if r.HandleMethodNotAllowed { + for method := range r.trees { + // Skip the requested method - we already tried this one + if method == req.Method { + continue + } + + handle, _, _ := r.trees[method].getValue(req.URL.Path) + if handle != nil { + if r.MethodNotAllowed != nil { + r.MethodNotAllowed.ServeHTTP(w, req) + } else { + http.Error(w, + http.StatusText(http.StatusMethodNotAllowed), + http.StatusMethodNotAllowed, + ) + } + return + } + } + } + + // Handle 404 + if r.NotFound != nil { + r.NotFound.ServeHTTP(w, req) + } else { + http.NotFound(w, req) + } +} === added file 'src/github.com/julienschmidt/httprouter/router_test.go' --- src/github.com/julienschmidt/httprouter/router_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/julienschmidt/httprouter/router_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,420 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package httprouter + +import ( + "errors" + "fmt" + "net/http" + "net/http/httptest" + "reflect" + "testing" +) + +type mockResponseWriter struct{} + +func (m *mockResponseWriter) Header() (h http.Header) { + return http.Header{} +} + +func (m *mockResponseWriter) Write(p []byte) (n int, err error) { + return len(p), nil +} + +func (m *mockResponseWriter) WriteString(s string) (n int, err error) { + return len(s), nil +} + +func (m *mockResponseWriter) WriteHeader(int) {} + +func TestParams(t *testing.T) { + ps := Params{ + Param{"param1", "value1"}, + Param{"param2", "value2"}, + Param{"param3", "value3"}, + } + for i := range ps { + if val := ps.ByName(ps[i].Key); val != ps[i].Value { + t.Errorf("Wrong value for %s: Got %s; Want %s", ps[i].Key, val, ps[i].Value) + } + } + if val := ps.ByName("noKey"); val != "" { + t.Errorf("Expected empty string for not found key; got: %s", val) + } +} + +func TestRouter(t *testing.T) { + router := New() + + routed := false + router.Handle("GET", "/user/:name", func(w http.ResponseWriter, r *http.Request, ps Params) { + routed = true + want := Params{Param{"name", "gopher"}} + if !reflect.DeepEqual(ps, want) { + t.Fatalf("wrong wildcard values: want %v, got %v", want, ps) + } + }) + + w := new(mockResponseWriter) + + req, _ := http.NewRequest("GET", "/user/gopher", nil) + router.ServeHTTP(w, req) + + if !routed { + t.Fatal("routing failed") + } +} + +type handlerStruct struct { + handeled *bool +} + +func (h handlerStruct) ServeHTTP(w http.ResponseWriter, r *http.Request) { + *h.handeled = true +} + +func TestRouterAPI(t *testing.T) { + var get, head, options, post, put, patch, delete, handler, handlerFunc bool + + httpHandler := handlerStruct{&handler} + + router := New() + router.GET("/GET", func(w http.ResponseWriter, r *http.Request, _ Params) { + get = true + }) + router.HEAD("/GET", func(w http.ResponseWriter, r *http.Request, _ Params) { + head = true + }) + router.OPTIONS("/GET", func(w http.ResponseWriter, r *http.Request, _ Params) { + options = true + }) + router.POST("/POST", func(w http.ResponseWriter, r *http.Request, _ Params) { + post = true + }) + router.PUT("/PUT", func(w http.ResponseWriter, r *http.Request, _ Params) { + put = true + }) + router.PATCH("/PATCH", func(w http.ResponseWriter, r *http.Request, _ Params) { + patch = true + }) + router.DELETE("/DELETE", func(w http.ResponseWriter, r *http.Request, _ Params) { + delete = true + }) + router.Handler("GET", "/Handler", httpHandler) + router.HandlerFunc("GET", "/HandlerFunc", func(w http.ResponseWriter, r *http.Request) { + handlerFunc = true + }) + + w := new(mockResponseWriter) + + r, _ := http.NewRequest("GET", "/GET", nil) + router.ServeHTTP(w, r) + if !get { + t.Error("routing GET failed") + } + + r, _ = http.NewRequest("HEAD", "/GET", nil) + router.ServeHTTP(w, r) + if !head { + t.Error("routing HEAD failed") + } + + r, _ = http.NewRequest("OPTIONS", "/GET", nil) + router.ServeHTTP(w, r) + if !options { + t.Error("routing OPTIONS failed") + } + + r, _ = http.NewRequest("POST", "/POST", nil) + router.ServeHTTP(w, r) + if !post { + t.Error("routing POST failed") + } + + r, _ = http.NewRequest("PUT", "/PUT", nil) + router.ServeHTTP(w, r) + if !put { + t.Error("routing PUT failed") + } + + r, _ = http.NewRequest("PATCH", "/PATCH", nil) + router.ServeHTTP(w, r) + if !patch { + t.Error("routing PATCH failed") + } + + r, _ = http.NewRequest("DELETE", "/DELETE", nil) + router.ServeHTTP(w, r) + if !delete { + t.Error("routing DELETE failed") + } + + r, _ = http.NewRequest("GET", "/Handler", nil) + router.ServeHTTP(w, r) + if !handler { + t.Error("routing Handler failed") + } + + r, _ = http.NewRequest("GET", "/HandlerFunc", nil) + router.ServeHTTP(w, r) + if !handlerFunc { + t.Error("routing HandlerFunc failed") + } +} + +func TestRouterRoot(t *testing.T) { + router := New() + recv := catchPanic(func() { + router.GET("noSlashRoot", nil) + }) + if recv == nil { + t.Fatal("registering path not beginning with '/' did not panic") + } +} + +func TestRouterChaining(t *testing.T) { + router1 := New() + router2 := New() + router1.NotFound = router2 + + fooHit := false + router1.POST("/foo", func(w http.ResponseWriter, req *http.Request, _ Params) { + fooHit = true + w.WriteHeader(http.StatusOK) + }) + + barHit := false + router2.POST("/bar", func(w http.ResponseWriter, req *http.Request, _ Params) { + barHit = true + w.WriteHeader(http.StatusOK) + }) + + r, _ := http.NewRequest("POST", "/foo", nil) + w := httptest.NewRecorder() + router1.ServeHTTP(w, r) + if !(w.Code == http.StatusOK && fooHit) { + t.Errorf("Regular routing failed with router chaining.") + t.FailNow() + } + + r, _ = http.NewRequest("POST", "/bar", nil) + w = httptest.NewRecorder() + router1.ServeHTTP(w, r) + if !(w.Code == http.StatusOK && barHit) { + t.Errorf("Chained routing failed with router chaining.") + t.FailNow() + } + + r, _ = http.NewRequest("POST", "/qax", nil) + w = httptest.NewRecorder() + router1.ServeHTTP(w, r) + if !(w.Code == http.StatusNotFound) { + t.Errorf("NotFound behavior failed with router chaining.") + t.FailNow() + } +} + +func TestRouterNotAllowed(t *testing.T) { + handlerFunc := func(_ http.ResponseWriter, _ *http.Request, _ Params) {} + + router := New() + router.POST("/path", handlerFunc) + + // Test not allowed + r, _ := http.NewRequest("GET", "/path", nil) + w := httptest.NewRecorder() + router.ServeHTTP(w, r) + if !(w.Code == http.StatusMethodNotAllowed) { + t.Errorf("NotAllowed handling failed: Code=%d, Header=%v", w.Code, w.Header()) + } + + w = httptest.NewRecorder() + responseText := "custom method" + router.MethodNotAllowed = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.WriteHeader(http.StatusTeapot) + w.Write([]byte(responseText)) + }) + router.ServeHTTP(w, r) + if got := w.Body.String(); !(got == responseText) { + t.Errorf("unexpected response got %q want %q", got, responseText) + } + if w.Code != http.StatusTeapot { + t.Errorf("unexpected response code %d want %d", w.Code, http.StatusTeapot) + } +} + +func TestRouterNotFound(t *testing.T) { + handlerFunc := func(_ http.ResponseWriter, _ *http.Request, _ Params) {} + + router := New() + router.GET("/path", handlerFunc) + router.GET("/dir/", handlerFunc) + router.GET("/", handlerFunc) + + testRoutes := []struct { + route string + code int + header string + }{ + {"/path/", 301, "map[Location:[/path]]"}, // TSR -/ + {"/dir", 301, "map[Location:[/dir/]]"}, // TSR +/ + {"", 301, "map[Location:[/]]"}, // TSR +/ + {"/PATH", 301, "map[Location:[/path]]"}, // Fixed Case + {"/DIR/", 301, "map[Location:[/dir/]]"}, // Fixed Case + {"/PATH/", 301, "map[Location:[/path]]"}, // Fixed Case -/ + {"/DIR", 301, "map[Location:[/dir/]]"}, // Fixed Case +/ + {"/../path", 301, "map[Location:[/path]]"}, // CleanPath + {"/nope", 404, ""}, // NotFound + } + for _, tr := range testRoutes { + r, _ := http.NewRequest("GET", tr.route, nil) + w := httptest.NewRecorder() + router.ServeHTTP(w, r) + if !(w.Code == tr.code && (w.Code == 404 || fmt.Sprint(w.Header()) == tr.header)) { + t.Errorf("NotFound handling route %s failed: Code=%d, Header=%v", tr.route, w.Code, w.Header()) + } + } + + // Test custom not found handler + var notFound bool + router.NotFound = http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(404) + notFound = true + }) + r, _ := http.NewRequest("GET", "/nope", nil) + w := httptest.NewRecorder() + router.ServeHTTP(w, r) + if !(w.Code == 404 && notFound == true) { + t.Errorf("Custom NotFound handler failed: Code=%d, Header=%v", w.Code, w.Header()) + } + + // Test other method than GET (want 307 instead of 301) + router.PATCH("/path", handlerFunc) + r, _ = http.NewRequest("PATCH", "/path/", nil) + w = httptest.NewRecorder() + router.ServeHTTP(w, r) + if !(w.Code == 307 && fmt.Sprint(w.Header()) == "map[Location:[/path]]") { + t.Errorf("Custom NotFound handler failed: Code=%d, Header=%v", w.Code, w.Header()) + } + + // Test special case where no node for the prefix "/" exists + router = New() + router.GET("/a", handlerFunc) + r, _ = http.NewRequest("GET", "/", nil) + w = httptest.NewRecorder() + router.ServeHTTP(w, r) + if !(w.Code == 404) { + t.Errorf("NotFound handling route / failed: Code=%d", w.Code) + } +} + +func TestRouterPanicHandler(t *testing.T) { + router := New() + panicHandled := false + + router.PanicHandler = func(rw http.ResponseWriter, r *http.Request, p interface{}) { + panicHandled = true + } + + router.Handle("PUT", "/user/:name", func(_ http.ResponseWriter, _ *http.Request, _ Params) { + panic("oops!") + }) + + w := new(mockResponseWriter) + req, _ := http.NewRequest("PUT", "/user/gopher", nil) + + defer func() { + if rcv := recover(); rcv != nil { + t.Fatal("handling panic failed") + } + }() + + router.ServeHTTP(w, req) + + if !panicHandled { + t.Fatal("simulating failed") + } +} + +func TestRouterLookup(t *testing.T) { + routed := false + wantHandle := func(_ http.ResponseWriter, _ *http.Request, _ Params) { + routed = true + } + wantParams := Params{Param{"name", "gopher"}} + + router := New() + + // try empty router first + handle, _, tsr := router.Lookup("GET", "/nope") + if handle != nil { + t.Fatalf("Got handle for unregistered pattern: %v", handle) + } + if tsr { + t.Error("Got wrong TSR recommendation!") + } + + // insert route and try again + router.GET("/user/:name", wantHandle) + + handle, params, tsr := router.Lookup("GET", "/user/gopher") + if handle == nil { + t.Fatal("Got no handle!") + } else { + handle(nil, nil, nil) + if !routed { + t.Fatal("Routing failed!") + } + } + + if !reflect.DeepEqual(params, wantParams) { + t.Fatalf("Wrong parameter values: want %v, got %v", wantParams, params) + } + + handle, _, tsr = router.Lookup("GET", "/user/gopher/") + if handle != nil { + t.Fatalf("Got handle for unregistered pattern: %v", handle) + } + if !tsr { + t.Error("Got no TSR recommendation!") + } + + handle, _, tsr = router.Lookup("GET", "/nope") + if handle != nil { + t.Fatalf("Got handle for unregistered pattern: %v", handle) + } + if tsr { + t.Error("Got wrong TSR recommendation!") + } +} + +type mockFileSystem struct { + opened bool +} + +func (mfs *mockFileSystem) Open(name string) (http.File, error) { + mfs.opened = true + return nil, errors.New("this is just a mock") +} + +func TestRouterServeFiles(t *testing.T) { + router := New() + mfs := &mockFileSystem{} + + recv := catchPanic(func() { + router.ServeFiles("/noFilepath", mfs) + }) + if recv == nil { + t.Fatal("registering path not ending with '*filepath' did not panic") + } + + router.ServeFiles("/*filepath", mfs) + w := new(mockResponseWriter) + r, _ := http.NewRequest("GET", "/favicon.ico", nil) + router.ServeHTTP(w, r) + if !mfs.opened { + t.Error("serving file failed") + } +} === added file 'src/github.com/julienschmidt/httprouter/tree.go' --- src/github.com/julienschmidt/httprouter/tree.go 1970-01-01 00:00:00 +0000 +++ src/github.com/julienschmidt/httprouter/tree.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,555 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package httprouter + +import ( + "strings" + "unicode" +) + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +func countParams(path string) uint8 { + var n uint + for i := 0; i < len(path); i++ { + if path[i] != ':' && path[i] != '*' { + continue + } + n++ + } + if n >= 255 { + return 255 + } + return uint8(n) +} + +type nodeType uint8 + +const ( + static nodeType = 0 + param nodeType = 1 + catchAll nodeType = 2 +) + +type node struct { + path string + wildChild bool + nType nodeType + maxParams uint8 + indices string + children []*node + handle Handle + priority uint32 +} + +// increments priority of the given child and reorders if necessary +func (n *node) incrementChildPrio(pos int) int { + n.children[pos].priority++ + prio := n.children[pos].priority + + // adjust position (move to front) + newPos := pos + for newPos > 0 && n.children[newPos-1].priority < prio { + // swap node positions + tmpN := n.children[newPos-1] + n.children[newPos-1] = n.children[newPos] + n.children[newPos] = tmpN + + newPos-- + } + + // build new index char string + if newPos != pos { + n.indices = n.indices[:newPos] + // unchanged prefix, might be empty + n.indices[pos:pos+1] + // the index char we move + n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos' + } + + return newPos +} + +// addRoute adds a node with the given handle to the path. +// Not concurrency-safe! +func (n *node) addRoute(path string, handle Handle) { + fullPath := path + n.priority++ + numParams := countParams(path) + + // non-empty tree + if len(n.path) > 0 || len(n.children) > 0 { + walk: + for { + // Update maxParams of the current node + if numParams > n.maxParams { + n.maxParams = numParams + } + + // Find the longest common prefix. + // This also implies that the common prefix contains no ':' or '*' + // since the existing key can't contain those chars. + i := 0 + max := min(len(path), len(n.path)) + for i < max && path[i] == n.path[i] { + i++ + } + + // Split edge + if i < len(n.path) { + child := node{ + path: n.path[i:], + wildChild: n.wildChild, + indices: n.indices, + children: n.children, + handle: n.handle, + priority: n.priority - 1, + } + + // Update maxParams (max of all children) + for i := range child.children { + if child.children[i].maxParams > child.maxParams { + child.maxParams = child.children[i].maxParams + } + } + + n.children = []*node{&child} + // []byte for proper unicode char conversion, see #65 + n.indices = string([]byte{n.path[i]}) + n.path = path[:i] + n.handle = nil + n.wildChild = false + } + + // Make new node a child of this node + if i < len(path) { + path = path[i:] + + if n.wildChild { + n = n.children[0] + n.priority++ + + // Update maxParams of the child node + if numParams > n.maxParams { + n.maxParams = numParams + } + numParams-- + + // Check if the wildcard matches + if len(path) >= len(n.path) && n.path == path[:len(n.path)] { + // check for longer wildcard, e.g. :name and :names + if len(n.path) >= len(path) || path[len(n.path)] == '/' { + continue walk + } + } + + panic("path segment '" + path + + "' conflicts with existing wildcard '" + n.path + + "' in path '" + fullPath + "'") + } + + c := path[0] + + // slash after param + if n.nType == param && c == '/' && len(n.children) == 1 { + n = n.children[0] + n.priority++ + continue walk + } + + // Check if a child with the next path byte exists + for i := 0; i < len(n.indices); i++ { + if c == n.indices[i] { + i = n.incrementChildPrio(i) + n = n.children[i] + continue walk + } + } + + // Otherwise insert it + if c != ':' && c != '*' { + // []byte for proper unicode char conversion, see #65 + n.indices += string([]byte{c}) + child := &node{ + maxParams: numParams, + } + n.children = append(n.children, child) + n.incrementChildPrio(len(n.indices) - 1) + n = child + } + n.insertChild(numParams, path, fullPath, handle) + return + + } else if i == len(path) { // Make node a (in-path) leaf + if n.handle != nil { + panic("a handle is already registered for path ''" + fullPath + "'") + } + n.handle = handle + } + return + } + } else { // Empty tree + n.insertChild(numParams, path, fullPath, handle) + } +} + +func (n *node) insertChild(numParams uint8, path, fullPath string, handle Handle) { + var offset int // already handled bytes of the path + + // find prefix until first wildcard (beginning with ':'' or '*'') + for i, max := 0, len(path); numParams > 0; i++ { + c := path[i] + if c != ':' && c != '*' { + continue + } + + // find wildcard end (either '/' or path end) + end := i + 1 + for end < max && path[end] != '/' { + switch path[end] { + // the wildcard name must not contain ':' and '*' + case ':', '*': + panic("only one wildcard per path segment is allowed, has: '" + + path[i:] + "' in path '" + fullPath + "'") + default: + end++ + } + } + + // check if this Node existing children which would be + // unreachable if we insert the wildcard here + if len(n.children) > 0 { + panic("wildcard route '" + path[i:end] + + "' conflicts with existing children in path '" + fullPath + "'") + } + + // check if the wildcard has a name + if end-i < 2 { + panic("wildcards must be named with a non-empty name in path '" + fullPath + "'") + } + + if c == ':' { // param + // split path at the beginning of the wildcard + if i > 0 { + n.path = path[offset:i] + offset = i + } + + child := &node{ + nType: param, + maxParams: numParams, + } + n.children = []*node{child} + n.wildChild = true + n = child + n.priority++ + numParams-- + + // if the path doesn't end with the wildcard, then there + // will be another non-wildcard subpath starting with '/' + if end < max { + n.path = path[offset:end] + offset = end + + child := &node{ + maxParams: numParams, + priority: 1, + } + n.children = []*node{child} + n = child + } + + } else { // catchAll + if end != max || numParams > 1 { + panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'") + } + + if len(n.path) > 0 && n.path[len(n.path)-1] == '/' { + panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'") + } + + // currently fixed width 1 for '/' + i-- + if path[i] != '/' { + panic("no / before catch-all in path '" + fullPath + "'") + } + + n.path = path[offset:i] + + // first node: catchAll node with empty path + child := &node{ + wildChild: true, + nType: catchAll, + maxParams: 1, + } + n.children = []*node{child} + n.indices = string(path[i]) + n = child + n.priority++ + + // second node: node holding the variable + child = &node{ + path: path[i:], + nType: catchAll, + maxParams: 1, + handle: handle, + priority: 1, + } + n.children = []*node{child} + + return + } + } + + // insert remaining path part and handle to the leaf + n.path = path[offset:] + n.handle = handle +} + +// Returns the handle registered with the given path (key). The values of +// wildcards are saved to a map. +// If no handle can be found, a TSR (trailing slash redirect) recommendation is +// made if a handle exists with an extra (without the) trailing slash for the +// given path. +func (n *node) getValue(path string) (handle Handle, p Params, tsr bool) { +walk: // Outer loop for walking the tree + for { + if len(path) > len(n.path) { + if path[:len(n.path)] == n.path { + path = path[len(n.path):] + // If this node does not have a wildcard (param or catchAll) + // child, we can just look up the next child node and continue + // to walk down the tree + if !n.wildChild { + c := path[0] + for i := 0; i < len(n.indices); i++ { + if c == n.indices[i] { + n = n.children[i] + continue walk + } + } + + // Nothing found. + // We can recommend to redirect to the same URL without a + // trailing slash if a leaf exists for that path. + tsr = (path == "/" && n.handle != nil) + return + + } + + // handle wildcard child + n = n.children[0] + switch n.nType { + case param: + // find param end (either '/' or path end) + end := 0 + for end < len(path) && path[end] != '/' { + end++ + } + + // save param value + if p == nil { + // lazy allocation + p = make(Params, 0, n.maxParams) + } + i := len(p) + p = p[:i+1] // expand slice within preallocated capacity + p[i].Key = n.path[1:] + p[i].Value = path[:end] + + // we need to go deeper! + if end < len(path) { + if len(n.children) > 0 { + path = path[end:] + n = n.children[0] + continue walk + } + + // ... but we can't + tsr = (len(path) == end+1) + return + } + + if handle = n.handle; handle != nil { + return + } else if len(n.children) == 1 { + // No handle found. Check if a handle for this path + a + // trailing slash exists for TSR recommendation + n = n.children[0] + tsr = (n.path == "/" && n.handle != nil) + } + + return + + case catchAll: + // save param value + if p == nil { + // lazy allocation + p = make(Params, 0, n.maxParams) + } + i := len(p) + p = p[:i+1] // expand slice within preallocated capacity + p[i].Key = n.path[2:] + p[i].Value = path + + handle = n.handle + return + + default: + panic("invalid node type") + } + } + } else if path == n.path { + // We should have reached the node containing the handle. + // Check if this node has a handle registered. + if handle = n.handle; handle != nil { + return + } + + // No handle found. Check if a handle for this path + a + // trailing slash exists for trailing slash recommendation + for i := 0; i < len(n.indices); i++ { + if n.indices[i] == '/' { + n = n.children[i] + tsr = (len(n.path) == 1 && n.handle != nil) || + (n.nType == catchAll && n.children[0].handle != nil) + return + } + } + + return + } + + // Nothing found. We can recommend to redirect to the same URL with an + // extra trailing slash if a leaf exists for that path + tsr = (path == "/") || + (len(n.path) == len(path)+1 && n.path[len(path)] == '/' && + path == n.path[:len(n.path)-1] && n.handle != nil) + return + } +} + +// Makes a case-insensitive lookup of the given path and tries to find a handler. +// It can optionally also fix trailing slashes. +// It returns the case-corrected path and a bool indicating whether the lookup +// was successful. +func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) { + ciPath = make([]byte, 0, len(path)+1) // preallocate enough memory + + // Outer loop for walking the tree + for len(path) >= len(n.path) && strings.ToLower(path[:len(n.path)]) == strings.ToLower(n.path) { + path = path[len(n.path):] + ciPath = append(ciPath, n.path...) + + if len(path) > 0 { + // If this node does not have a wildcard (param or catchAll) child, + // we can just look up the next child node and continue to walk down + // the tree + if !n.wildChild { + r := unicode.ToLower(rune(path[0])) + for i, index := range n.indices { + // must use recursive approach since both index and + // ToLower(index) could exist. We must check both. + if r == unicode.ToLower(index) { + out, found := n.children[i].findCaseInsensitivePath(path, fixTrailingSlash) + if found { + return append(ciPath, out...), true + } + } + } + + // Nothing found. We can recommend to redirect to the same URL + // without a trailing slash if a leaf exists for that path + found = (fixTrailingSlash && path == "/" && n.handle != nil) + return + } + + n = n.children[0] + switch n.nType { + case param: + // find param end (either '/' or path end) + k := 0 + for k < len(path) && path[k] != '/' { + k++ + } + + // add param value to case insensitive path + ciPath = append(ciPath, path[:k]...) + + // we need to go deeper! + if k < len(path) { + if len(n.children) > 0 { + path = path[k:] + n = n.children[0] + continue + } + + // ... but we can't + if fixTrailingSlash && len(path) == k+1 { + return ciPath, true + } + return + } + + if n.handle != nil { + return ciPath, true + } else if fixTrailingSlash && len(n.children) == 1 { + // No handle found. Check if a handle for this path + a + // trailing slash exists + n = n.children[0] + if n.path == "/" && n.handle != nil { + return append(ciPath, '/'), true + } + } + return + + case catchAll: + return append(ciPath, path...), true + + default: + panic("invalid node type") + } + } else { + // We should have reached the node containing the handle. + // Check if this node has a handle registered. + if n.handle != nil { + return ciPath, true + } + + // No handle found. + // Try to fix the path by adding a trailing slash + if fixTrailingSlash { + for i := 0; i < len(n.indices); i++ { + if n.indices[i] == '/' { + n = n.children[i] + if (len(n.path) == 1 && n.handle != nil) || + (n.nType == catchAll && n.children[0].handle != nil) { + return append(ciPath, '/'), true + } + return + } + } + } + return + } + } + + // Nothing found. + // Try to fix the path by adding / removing a trailing slash + if fixTrailingSlash { + if path == "/" { + return ciPath, true + } + if len(path)+1 == len(n.path) && n.path[len(path)] == '/' && + strings.ToLower(path) == strings.ToLower(n.path[:len(path)]) && + n.handle != nil { + return append(ciPath, n.path...), true + } + } + return +} === added file 'src/github.com/julienschmidt/httprouter/tree_test.go' --- src/github.com/julienschmidt/httprouter/tree_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/julienschmidt/httprouter/tree_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,611 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package httprouter + +import ( + "fmt" + "net/http" + "reflect" + "strings" + "testing" +) + +func printChildren(n *node, prefix string) { + fmt.Printf(" %02d:%02d %s%s[%d] %v %t %d \r\n", n.priority, n.maxParams, prefix, n.path, len(n.children), n.handle, n.wildChild, n.nType) + for l := len(n.path); l > 0; l-- { + prefix += " " + } + for _, child := range n.children { + printChildren(child, prefix) + } +} + +// Used as a workaround since we can't compare functions or their adresses +var fakeHandlerValue string + +func fakeHandler(val string) Handle { + return func(http.ResponseWriter, *http.Request, Params) { + fakeHandlerValue = val + } +} + +type testRequests []struct { + path string + nilHandler bool + route string + ps Params +} + +func checkRequests(t *testing.T, tree *node, requests testRequests) { + for _, request := range requests { + handler, ps, _ := tree.getValue(request.path) + + if handler == nil { + if !request.nilHandler { + t.Errorf("handle mismatch for route '%s': Expected non-nil handle", request.path) + } + } else if request.nilHandler { + t.Errorf("handle mismatch for route '%s': Expected nil handle", request.path) + } else { + handler(nil, nil, nil) + if fakeHandlerValue != request.route { + t.Errorf("handle mismatch for route '%s': Wrong handle (%s != %s)", request.path, fakeHandlerValue, request.route) + } + } + + if !reflect.DeepEqual(ps, request.ps) { + t.Errorf("Params mismatch for route '%s'", request.path) + } + } +} + +func checkPriorities(t *testing.T, n *node) uint32 { + var prio uint32 + for i := range n.children { + prio += checkPriorities(t, n.children[i]) + } + + if n.handle != nil { + prio++ + } + + if n.priority != prio { + t.Errorf( + "priority mismatch for node '%s': is %d, should be %d", + n.path, n.priority, prio, + ) + } + + return prio +} + +func checkMaxParams(t *testing.T, n *node) uint8 { + var maxParams uint8 + for i := range n.children { + params := checkMaxParams(t, n.children[i]) + if params > maxParams { + maxParams = params + } + } + if n.nType != static && !n.wildChild { + maxParams++ + } + + if n.maxParams != maxParams { + t.Errorf( + "maxParams mismatch for node '%s': is %d, should be %d", + n.path, n.maxParams, maxParams, + ) + } + + return maxParams +} + +func TestCountParams(t *testing.T) { + if countParams("/path/:param1/static/*catch-all") != 2 { + t.Fail() + } + if countParams(strings.Repeat("/:param", 256)) != 255 { + t.Fail() + } +} + +func TestTreeAddAndGet(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/hi", + "/contact", + "/co", + "/c", + "/a", + "/ab", + "/doc/", + "/doc/go_faq.html", + "/doc/go1.html", + "/α", + "/β", + } + for _, route := range routes { + tree.addRoute(route, fakeHandler(route)) + } + + //printChildren(tree, "") + + checkRequests(t, tree, testRequests{ + {"/a", false, "/a", nil}, + {"/", true, "", nil}, + {"/hi", false, "/hi", nil}, + {"/contact", false, "/contact", nil}, + {"/co", false, "/co", nil}, + {"/con", true, "", nil}, // key mismatch + {"/cona", true, "", nil}, // key mismatch + {"/no", true, "", nil}, // no matching child + {"/ab", false, "/ab", nil}, + {"/α", false, "/α", nil}, + {"/β", false, "/β", nil}, + }) + + checkPriorities(t, tree) + checkMaxParams(t, tree) +} + +func TestTreeWildcard(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/", + "/cmd/:tool/:sub", + "/cmd/:tool/", + "/src/*filepath", + "/search/", + "/search/:query", + "/user_:name", + "/user_:name/about", + "/files/:dir/*filepath", + "/doc/", + "/doc/go_faq.html", + "/doc/go1.html", + "/info/:user/public", + "/info/:user/project/:project", + } + for _, route := range routes { + tree.addRoute(route, fakeHandler(route)) + } + + //printChildren(tree, "") + + checkRequests(t, tree, testRequests{ + {"/", false, "/", nil}, + {"/cmd/test/", false, "/cmd/:tool/", Params{Param{"tool", "test"}}}, + {"/cmd/test", true, "", Params{Param{"tool", "test"}}}, + {"/cmd/test/3", false, "/cmd/:tool/:sub", Params{Param{"tool", "test"}, Param{"sub", "3"}}}, + {"/src/", false, "/src/*filepath", Params{Param{"filepath", "/"}}}, + {"/src/some/file.png", false, "/src/*filepath", Params{Param{"filepath", "/some/file.png"}}}, + {"/search/", false, "/search/", nil}, + {"/search/someth!ng+in+ünìcodé", false, "/search/:query", Params{Param{"query", "someth!ng+in+ünìcodé"}}}, + {"/search/someth!ng+in+ünìcodé/", true, "", Params{Param{"query", "someth!ng+in+ünìcodé"}}}, + {"/user_gopher", false, "/user_:name", Params{Param{"name", "gopher"}}}, + {"/user_gopher/about", false, "/user_:name/about", Params{Param{"name", "gopher"}}}, + {"/files/js/inc/framework.js", false, "/files/:dir/*filepath", Params{Param{"dir", "js"}, Param{"filepath", "/inc/framework.js"}}}, + {"/info/gordon/public", false, "/info/:user/public", Params{Param{"user", "gordon"}}}, + {"/info/gordon/project/go", false, "/info/:user/project/:project", Params{Param{"user", "gordon"}, Param{"project", "go"}}}, + }) + + checkPriorities(t, tree) + checkMaxParams(t, tree) +} + +func catchPanic(testFunc func()) (recv interface{}) { + defer func() { + recv = recover() + }() + + testFunc() + return +} + +type testRoute struct { + path string + conflict bool +} + +func testRoutes(t *testing.T, routes []testRoute) { + tree := &node{} + + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route.path, nil) + }) + + if route.conflict { + if recv == nil { + t.Errorf("no panic for conflicting route '%s'", route.path) + } + } else if recv != nil { + t.Errorf("unexpected panic for route '%s': %v", route.path, recv) + } + } + + //printChildren(tree, "") +} + +func TestTreeWildcardConflict(t *testing.T) { + routes := []testRoute{ + {"/cmd/:tool/:sub", false}, + {"/cmd/vet", true}, + {"/src/*filepath", false}, + {"/src/*filepathx", true}, + {"/src/", true}, + {"/src1/", false}, + {"/src1/*filepath", true}, + {"/src2*filepath", true}, + {"/search/:query", false}, + {"/search/invalid", true}, + {"/user_:name", false}, + {"/user_x", true}, + {"/user_:name", false}, + {"/id:id", false}, + {"/id/:id", true}, + } + testRoutes(t, routes) +} + +func TestTreeChildConflict(t *testing.T) { + routes := []testRoute{ + {"/cmd/vet", false}, + {"/cmd/:tool/:sub", true}, + {"/src/AUTHORS", false}, + {"/src/*filepath", true}, + {"/user_x", false}, + {"/user_:name", true}, + {"/id/:id", false}, + {"/id:id", true}, + {"/:id", true}, + {"/*filepath", true}, + } + testRoutes(t, routes) +} + +func TestTreeDupliatePath(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/", + "/doc/", + "/src/*filepath", + "/search/:query", + "/user_:name", + } + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, fakeHandler(route)) + }) + if recv != nil { + t.Fatalf("panic inserting route '%s': %v", route, recv) + } + + // Add again + recv = catchPanic(func() { + tree.addRoute(route, nil) + }) + if recv == nil { + t.Fatalf("no panic while inserting duplicate route '%s", route) + } + } + + //printChildren(tree, "") + + checkRequests(t, tree, testRequests{ + {"/", false, "/", nil}, + {"/doc/", false, "/doc/", nil}, + {"/src/some/file.png", false, "/src/*filepath", Params{Param{"filepath", "/some/file.png"}}}, + {"/search/someth!ng+in+ünìcodé", false, "/search/:query", Params{Param{"query", "someth!ng+in+ünìcodé"}}}, + {"/user_gopher", false, "/user_:name", Params{Param{"name", "gopher"}}}, + }) +} + +func TestEmptyWildcardName(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/user:", + "/user:/", + "/cmd/:/", + "/src/*", + } + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, nil) + }) + if recv == nil { + t.Fatalf("no panic while inserting route with empty wildcard name '%s", route) + } + } +} + +func TestTreeCatchAllConflict(t *testing.T) { + routes := []testRoute{ + {"/src/*filepath/x", true}, + {"/src2/", false}, + {"/src2/*filepath/x", true}, + } + testRoutes(t, routes) +} + +func TestTreeCatchAllConflictRoot(t *testing.T) { + routes := []testRoute{ + {"/", false}, + {"/*filepath", true}, + } + testRoutes(t, routes) +} + +func TestTreeDoubleWildcard(t *testing.T) { + const panicMsg = "only one wildcard per path segment is allowed" + + routes := [...]string{ + "/:foo:bar", + "/:foo:bar/", + "/:foo*bar", + } + + for _, route := range routes { + tree := &node{} + recv := catchPanic(func() { + tree.addRoute(route, nil) + }) + + if rs, ok := recv.(string); !ok || !strings.HasPrefix(rs, panicMsg) { + t.Fatalf(`"Expected panic "%s" for route '%s', got "%v"`, panicMsg, route, recv) + } + } +} + +/*func TestTreeDuplicateWildcard(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/:id/:name/:id", + } + for _, route := range routes { + ... + } +}*/ + +func TestTreeTrailingSlashRedirect(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/hi", + "/b/", + "/search/:query", + "/cmd/:tool/", + "/src/*filepath", + "/x", + "/x/y", + "/y/", + "/y/z", + "/0/:id", + "/0/:id/1", + "/1/:id/", + "/1/:id/2", + "/aa", + "/a/", + "/doc", + "/doc/go_faq.html", + "/doc/go1.html", + "/no/a", + "/no/b", + "/api/hello/:name", + } + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, fakeHandler(route)) + }) + if recv != nil { + t.Fatalf("panic inserting route '%s': %v", route, recv) + } + } + + //printChildren(tree, "") + + tsrRoutes := [...]string{ + "/hi/", + "/b", + "/search/gopher/", + "/cmd/vet", + "/src", + "/x/", + "/y", + "/0/go/", + "/1/go", + "/a", + "/doc/", + } + for _, route := range tsrRoutes { + handler, _, tsr := tree.getValue(route) + if handler != nil { + t.Fatalf("non-nil handler for TSR route '%s", route) + } else if !tsr { + t.Errorf("expected TSR recommendation for route '%s'", route) + } + } + + noTsrRoutes := [...]string{ + "/", + "/no", + "/no/", + "/_", + "/_/", + "/api/world/abc", + } + for _, route := range noTsrRoutes { + handler, _, tsr := tree.getValue(route) + if handler != nil { + t.Fatalf("non-nil handler for No-TSR route '%s", route) + } else if tsr { + t.Errorf("expected no TSR recommendation for route '%s'", route) + } + } +} + +func TestTreeFindCaseInsensitivePath(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/hi", + "/b/", + "/ABC/", + "/search/:query", + "/cmd/:tool/", + "/src/*filepath", + "/x", + "/x/y", + "/y/", + "/y/z", + "/0/:id", + "/0/:id/1", + "/1/:id/", + "/1/:id/2", + "/aa", + "/a/", + "/doc", + "/doc/go_faq.html", + "/doc/go1.html", + "/doc/go/away", + "/no/a", + "/no/b", + } + + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, fakeHandler(route)) + }) + if recv != nil { + t.Fatalf("panic inserting route '%s': %v", route, recv) + } + } + + // Check out == in for all registered routes + // With fixTrailingSlash = true + for _, route := range routes { + out, found := tree.findCaseInsensitivePath(route, true) + if !found { + t.Errorf("Route '%s' not found!", route) + } else if string(out) != route { + t.Errorf("Wrong result for route '%s': %s", route, string(out)) + } + } + // With fixTrailingSlash = false + for _, route := range routes { + out, found := tree.findCaseInsensitivePath(route, false) + if !found { + t.Errorf("Route '%s' not found!", route) + } else if string(out) != route { + t.Errorf("Wrong result for route '%s': %s", route, string(out)) + } + } + + tests := []struct { + in string + out string + found bool + slash bool + }{ + {"/HI", "/hi", true, false}, + {"/HI/", "/hi", true, true}, + {"/B", "/b/", true, true}, + {"/B/", "/b/", true, false}, + {"/abc", "/ABC/", true, true}, + {"/abc/", "/ABC/", true, false}, + {"/aBc", "/ABC/", true, true}, + {"/aBc/", "/ABC/", true, false}, + {"/abC", "/ABC/", true, true}, + {"/abC/", "/ABC/", true, false}, + {"/SEARCH/QUERY", "/search/QUERY", true, false}, + {"/SEARCH/QUERY/", "/search/QUERY", true, true}, + {"/CMD/TOOL/", "/cmd/TOOL/", true, false}, + {"/CMD/TOOL", "/cmd/TOOL/", true, true}, + {"/SRC/FILE/PATH", "/src/FILE/PATH", true, false}, + {"/x/Y", "/x/y", true, false}, + {"/x/Y/", "/x/y", true, true}, + {"/X/y", "/x/y", true, false}, + {"/X/y/", "/x/y", true, true}, + {"/X/Y", "/x/y", true, false}, + {"/X/Y/", "/x/y", true, true}, + {"/Y/", "/y/", true, false}, + {"/Y", "/y/", true, true}, + {"/Y/z", "/y/z", true, false}, + {"/Y/z/", "/y/z", true, true}, + {"/Y/Z", "/y/z", true, false}, + {"/Y/Z/", "/y/z", true, true}, + {"/y/Z", "/y/z", true, false}, + {"/y/Z/", "/y/z", true, true}, + {"/Aa", "/aa", true, false}, + {"/Aa/", "/aa", true, true}, + {"/AA", "/aa", true, false}, + {"/AA/", "/aa", true, true}, + {"/aA", "/aa", true, false}, + {"/aA/", "/aa", true, true}, + {"/A/", "/a/", true, false}, + {"/A", "/a/", true, true}, + {"/DOC", "/doc", true, false}, + {"/DOC/", "/doc", true, true}, + {"/NO", "", false, true}, + {"/DOC/GO", "", false, true}, + } + // With fixTrailingSlash = true + for _, test := range tests { + out, found := tree.findCaseInsensitivePath(test.in, true) + if found != test.found || (found && (string(out) != test.out)) { + t.Errorf("Wrong result for '%s': got %s, %t; want %s, %t", + test.in, string(out), found, test.out, test.found) + return + } + } + // With fixTrailingSlash = false + for _, test := range tests { + out, found := tree.findCaseInsensitivePath(test.in, false) + if test.slash { + if found { // test needs a trailingSlash fix. It must not be found! + t.Errorf("Found without fixTrailingSlash: %s; got %s", test.in, string(out)) + } + } else { + if found != test.found || (found && (string(out) != test.out)) { + t.Errorf("Wrong result for '%s': got %s, %t; want %s, %t", + test.in, string(out), found, test.out, test.found) + return + } + } + } +} + +func TestTreeInvalidNodeType(t *testing.T) { + const panicMsg = "invalid node type" + + tree := &node{} + tree.addRoute("/", fakeHandler("/")) + tree.addRoute("/:page", fakeHandler("/:page")) + + // set invalid node type + tree.children[0].nType = 42 + + // normal lookup + recv := catchPanic(func() { + tree.getValue("/test") + }) + if rs, ok := recv.(string); !ok || rs != panicMsg { + t.Fatalf("Expected panic '"+panicMsg+"', got '%v'", recv) + } + + // case-insensitive lookup + recv = catchPanic(func() { + tree.findCaseInsensitivePath("/test", true) + }) + if rs, ok := recv.(string); !ok || rs != panicMsg { + t.Fatalf("Expected panic '"+panicMsg+"', got '%v'", recv) + } +} === added directory 'src/github.com/lxc' === added directory 'src/github.com/lxc/lxd' === added directory 'src/github.com/lxc/lxd/.github' === added file 'src/github.com/lxc/lxd/.github/ISSUE_TEMPLATE.md' --- src/github.com/lxc/lxd/.github/ISSUE_TEMPLATE.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/.github/ISSUE_TEMPLATE.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,30 @@ +The template below is mostly useful for bug reports and support questions. +Feel free to remove anything which doesn't apply to you and add more information where it makes sense. + +# Required information + + * Distribution: + * Distribution version: + * The output of "lxc info" or if that fails: + * Kernel version: + * LXC version: + * LXD version: + * Storage backend in use: + +# Issue description + +A brief description of what failed or what could be improved. + +# Steps to reproduce + + 1. Step one + 2. Step two + 3. Step three + +# Information to attach + + - [ ] any relevant kernel output (dmesg) + - [ ] container log (lxc info NAME --show-log) + - [ ] main daemon log (/var/log/lxd.log) + - [ ] output of the client with --debug + - [ ] output of the daemon with --debug === added file 'src/github.com/lxc/lxd/.gitignore' --- src/github.com/lxc/lxd/.gitignore 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/.gitignore 2016-03-22 15:18:22 +0000 @@ -0,0 +1,12 @@ +*.swp +po/*.mo +po/*.po~ +lxd-*.tar.gz +.vagrant +test/deps/devlxd-client +*~ +tags + +# For Atom ctags +.tags +.tags1 === added file 'src/github.com/lxc/lxd/.travis.yml' --- src/github.com/lxc/lxd/.travis.yml 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/.travis.yml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,24 @@ +language: go + +os: + - osx + +go: + - 1.5 + - 1.6 + - tip + +matrix: + fast_finish: true + allow_failures: + - go: tip + +install: + - "mkdir -p $GOPATH/github.com/lxc" + - "rsync -az ${TRAVIS_BUILD_DIR}/ $HOME/gopath/src/github.com/lxc/lxd/" + +script: + - "make client" + +notifications: + webhooks: https://linuxcontainers.org/webhook-lxcbot/ === added file 'src/github.com/lxc/lxd/AUTHORS' --- src/github.com/lxc/lxd/AUTHORS 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/AUTHORS 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +Unless mentioned otherwise in a specific file's header, all code in this +project is released under the Apache 2.0 license. + +The list of authors and contributors can be retrieved from the git +commit history and in some cases, the file headers. === added file 'src/github.com/lxc/lxd/CONTRIBUTING.md' --- src/github.com/lxc/lxd/CONTRIBUTING.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/CONTRIBUTING.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,72 @@ +# Pull requests: + +Changes to this project should be proposed as pull requests on Github +at: https://github.com/lxc/lxd + +Proposed changes will then go through code review there and once acked, +be merged in the main branch. + + +# License and copyright: + +By default, any contribution to this project is made under the Apache +2.0 license. + +The author of a change remains the copyright holder of their code +(no copyright assignment). + + +# Developer Certificate of Origin: + +To improve tracking of contributions to this project we use the DCO 1.1 +and use a "sign-off" procedure for all changes going into the branch. + +The sign-off is a simple line at the end of the explanation for the +commit which certifies that you wrote it or otherwise have the right +to pass it on as an open-source contribution. + +> Developer Certificate of Origin +> Version 1.1 +> +> Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +> 660 York Street, Suite 102, +> San Francisco, CA 94110 USA +> +> Everyone is permitted to copy and distribute verbatim copies of this +> license document, but changing it is not allowed. +> +> Developer's Certificate of Origin 1.1 +> +> By making a contribution to this project, I certify that: +> +> (a) The contribution was created in whole or in part by me and I +> have the right to submit it under the open source license +> indicated in the file; or +> +> (b) The contribution is based upon previous work that, to the best +> of my knowledge, is covered under an appropriate open source +> license and I have the right under that license to submit that +> work with modifications, whether created in whole or in part +> by me, under the same open source license (unless I am +> permitted to submit under a different license), as indicated +> in the file; or +> +> (c) The contribution was provided directly to me by some other +> person who certified (a), (b) or (c) and I have not modified +> it. +> +> (d) I understand and agree that this project and the contribution +> are public and that a record of the contribution (including all +> personal information I submit with it, including my sign-off) is +> maintained indefinitely and may be redistributed consistent with +> this project or the open source license(s) involved. + +An example of a valid sign-off line is: + + Signed-off-by: Random J Developer + +Use your real name and a valid e-mail address. +Sorry, no pseudonyms or anonymous contributions are allowed. + +We also require each commit be individually signed-off by their author, +even when part of a larger set. You may find `git commit -s` useful. === added file 'src/github.com/lxc/lxd/COPYING' --- src/github.com/lxc/lxd/COPYING 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/COPYING 2016-03-22 15:18:22 +0000 @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. === added file 'src/github.com/lxc/lxd/Makefile' --- src/github.com/lxc/lxd/Makefile 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/Makefile 2016-03-22 15:18:22 +0000 @@ -0,0 +1,94 @@ +DOMAIN=lxd +POFILES=$(wildcard po/*.po) +MOFILES=$(patsubst %.po,%.mo,$(POFILES)) +LINGUAS=$(basename $(POFILES)) +POTFILE=po/$(DOMAIN).pot + +# dist is primarily for use when packaging; for development we still manage +# dependencies via `go get` explicitly. +# TODO: use git describe for versioning +VERSION=$(shell grep "var Version" shared/flex.go | cut -d'"' -f2) +ARCHIVE=lxd-$(VERSION).tar + +.PHONY: default +default: + # Must run twice due to go get race + -go get -t -v -d ./... + -go get -t -v -d ./... + go install -v $(DEBUG) ./... + @echo "LXD built successfully" + +.PHONY: client +client: + # Must run twice due to go get race + -go get -t -v -d ./... + -go get -t -v -d ./... + go install -v $(DEBUG) ./lxc + @echo "LXD client built successfully" + +.PHONY: update +update: + # Must run twice due to go get race + -go get -t -v -d -u ./... + go get -t -v -d -u ./... + @echo "Dependencies updated" + +# This only needs to be done when migrate.proto is actually changed; since we +# commit the .pb.go in the tree and it's not expected to change very often, +# it's not a default build step. +.PHONY: protobuf +protobuf: + protoc --go_out=. ./lxd/migrate.proto + +.PHONY: check +check: default + go get -v -x github.com/remyoudompheng/go-misc/deadcode + @# go vet can (and does on go < 1.5) fail + go get -v -x golang.org/x/tools/cmd/vet || true + go test -v ./... + cd test && ./main.sh + +gccgo: + go build -compiler gccgo ./... + @echo "LXD built successfully with gccgo" + +.PHONY: dist +dist: + rm -Rf lxd-$(VERSION) $(ARCHIVE) $(ARCHIVE).gz + mkdir -p lxd-$(VERSION)/dist + -GOPATH=$(shell pwd)/lxd-$(VERSION)/dist go get -t -v -d ./... + GOPATH=$(shell pwd)/lxd-$(VERSION)/dist go get -t -v -d ./... + rm -rf $(shell pwd)/lxd-$(VERSION)/dist/src/github.com/lxc/lxd + ln -s ../../../.. ./lxd-$(VERSION)/dist/src/github.com/lxc/lxd + git archive --prefix=lxd-$(VERSION)/ --output=$(ARCHIVE) HEAD + tar -uf $(ARCHIVE) --exclude-vcs lxd-$(VERSION)/ + gzip -9 $(ARCHIVE) + rm -Rf dist lxd-$(VERSION) $(ARCHIVE) + +.PHONY: i18n update-po update-pot build-mo static-analysis +i18n: update-pot + +po/%.mo: po/%.po + msgfmt --statistics -o $@ $< + +po/%.po: po/$(DOMAIN).pot + msgmerge -U po/$*.po po/$(DOMAIN).pot + +update-po: + -for lang in $(LINGUAS); do\ + msgmerge -U $$lang.po po/$(DOMAIN).pot; \ + rm -f $$lang.po~; \ + done + +update-pot: + go get -v -x github.com/ubuntu-core/snappy/i18n/xgettext-go/ + xgettext-go -o po/$(DOMAIN).pot --add-comments-tag=TRANSLATORS: --sort-output --package-name=$(DOMAIN) --msgid-bugs-address=lxc-devel@lists.linuxcontainers.org --keyword=i18n.G --keyword-plural=i18n.NG *.go shared/*.go lxc/*.go lxd/*.go + + +build-mo: $(MOFILES) + +static-analysis: + /bin/bash -x -c ". test/static_analysis.sh; static_analysis" + +tags: *.go lxd/*.go shared/*.go lxc/*.go + find . | grep \.go | grep -v git | grep -v .swp | grep -v vagrant | xargs gotags > tags === added file 'src/github.com/lxc/lxd/README.md' --- src/github.com/lxc/lxd/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,279 @@ +# LXD + +REST API, command line tool and OpenStack integration plugin for LXC. + +LXD is pronounced lex-dee. + +To easily see what LXD is about, you can [try it online](https://linuxcontainers.org/lxd/try-it). + +## CI status + + * Travis: [![Build Status](https://travis-ci.org/lxc/lxd.svg?branch=master)](https://travis-ci.org/lxc/lxd) + * Jenkins: [![Build Status](https://jenkins.linuxcontainers.org/job/lxd-github-commit/badge/icon)](https://jenkins.linuxcontainers.org/job/lxd-github-commit/) + +## Getting started with LXD + +Since LXD development is happening at such a rapid pace, we only provide daily +builds right now. They're available via: + + sudo add-apt-repository ppa:ubuntu-lxc/lxd-git-master && sudo apt-get update + sudo apt-get install lxd + +Because group membership is only applied at login, you then either need to +close and re-open your user session or use the "newgrp lxd" command in the +shell you're going to interact with lxd from. + + newgrp lxd + +After you've got LXD installed and a session with the right permissions, you +can take your [first steps](#first-steps). + +## Building from source + +We recommend having the latest versions of liblxc (>= 1.1 required) and CRIU +(>= 1.7 recommended) available for LXD development. Additionally, LXD requires +Golang 1.5 or later to work. All the right versions dependencies are available +via the LXD PPA: + + sudo apt-get install software-properties-common + sudo add-apt-repository ppa:ubuntu-lxc/lxd-git-master + sudo apt-get update + sudo apt-get install golang lxc lxc-dev mercurial git pkg-config protobuf-compiler golang-goprotobuf-dev xz-utils tar acl make + +There are a few storage backends for LXD besides the default "directory" +backend. Installing these tools adds a bit to initramfs and may slow down your +host boot, but are needed if you'd like to use a particular backend: + + sudo apt-get install lvm2 thin-provisioning-tools + sudo apt-get install btrfs-tools + +To run the testsuite, you'll also need: + + sudo apt-get install curl gettext jq sqlite3 uuid-runtime pyflakes pep8 shellcheck bzr + +### Building the tools + +LXD consists of two binaries, a client called `lxc` and a server called `lxd`. +These live in the source tree in the `lxc/` and `lxd/` dirs, respectively. To +get the code, set up your go environment: + + mkdir -p ~/go + export GOPATH=~/go + +And then download it as usual: + + go get github.com/lxc/lxd + cd $GOPATH/src/github.com/lxc/lxd + make + +...which will give you two binaries in $GOPATH/bin, `lxd` the daemon binary, +and `lxc` a command line client to that daemon. + +### Machine Setup + +You'll need sub{u,g}ids for root, so that LXD can create the unprivileged +containers: + + echo "root:1000000:65536" | sudo tee -a /etc/subuid /etc/subgid + +Now you can run the daemon (the --group sudo bit allows everyone in the sudo +group to talk to LXD; you can create your own group if you want): + + sudo -E $GOPATH/bin/lxd --group sudo + +## First steps + +LXD has two parts, the daemon (the `lxd` binary), and the client (the `lxc` +binary). Now that the daemon is all configured and running (either via the +packaging or via the from-source instructions above), you can create a container: + + $GOPATH/bin/lxc launch ubuntu:14.04 + +Alternatively, you can also use a remote LXD host as a source of images. +One comes pre-configured in LXD, called "images" (images.linuxcontainers.org) + + $GOPATH/bin/lxc launch images:centos/7/amd64 centos + +## Bug reports + +Bug reports can be filed at https://github.com/lxc/lxd/issues/new + +## Contributing + +Fixes and new features are greatly appreciated but please read our +[contributing guidelines](CONTRIBUTING.md) first. + +Contributions to this project should be sent as pull requests on github. + +## Hacking + +Sometimes it is useful to view the raw response that LXD sends; you can do +this by: + + lxc config set core.trust_password foo + lxc remote add local 127.0.0.1:8443 + wget --no-check-certificate https://127.0.0.1:8443/1.0 --certificate=$HOME/.config/lxc/client.crt --private-key=$HOME/.config/lxc/client.key -O - -q + +## Upgrading + +The `lxd` and `lxc` (`lxd-client`) binaries should be upgraded at the same time with: + + apt-get update + apt-get install lxd lxd-client + +## Support and discussions + +We use the LXC mailing-lists for developer and user discussions, you can +find and subscribe to those at: https://lists.linuxcontainers.org + +If you prefer live discussions, some of us also hang out in +[#lxcontainers](http://webchat.freenode.net/?channels=#lxcontainers) on irc.freenode.net. + + +## FAQ + +#### How to enable LXD server for remote access? + +By default LXD server is not accessible from the networks as it only listens +on a local unix socket. You can make LXD available from the network by specifying +additional addresses to listen to. This is done with the `core.https_address` +config variable. + +To see the current server configuration, run: + + lxc config show + +To set the address to listen to, find out what addresses are available and use +the `config set` command on the server: + + ip addr + lxc config set core.https_address 192.168.1.15 + +#### When I do a `lxc remote add` over https, it asks for a password? + +By default, LXD has no password for security reasons, so you can't do a remote +add this way. In order to set a password, do: + + lxc config set core.trust_password SECRET + +on the host LXD is running on. This will set the remote password that you can +then use to do `lxc remote add`. + +You can also access the server without setting a password by copying the client +certificate from `.config/lxc/client.crt` to the server and adding it with: + + lxc config trust add client.crt + + +#### How do I configure alternative storage backends for LXD? + +LXD supports various storage backends; below are instructions on how to +configure some of them. By default, we use a simple directory backed storage +mechanism, but we recommend using ZFS for best results. + +###### ZFS + +First, you need to install the ZFS tooling. On Wily and above this is just: + + sudo apt-get install zfsutils-linux + +ZFS has many different ways to procure a zpool, which is what you need to feed +LXD. For example, if you have an extra block device laying around, you can +just: + + sudo zpool create lxd /dev/sdc6 -m none + +However, if you want to test things out on a laptop or don't have an extra disk +laying around, ZFS has its own loopback driver and can be used directly on a +(sparse) file. To do this, first create the sparse file: + + sudo truncate -s 100G /var/lib/lxd.img + +then, + + sudo zpool create lxd /var/lib/lxd.img -m none + +Finally, whichever method you used to create your zpool, you need to tell LXD +to use it: + + lxc config set storage.zfs_pool_name lxd + +###### BTRFS + +The setup for btrfs is fairly simple, just mount /var/lib/lxd (or whatever your +chosen `LXD_DIR` is) as a btrfs filesystem before you start LXD, and you're +good to go. First install the btrfs userspace tools, + + sudo apt-get install btrfs-tools + +Now, you need to create a btrfs filesystem. If you don't have an extra disk +laying around, you'll have to create your own loopback device manually: + + sudo truncate -s 100G /var/lib/lxd.img + sudo losetup /dev/loop0 /var/lib/lxd.img + +Once you've got a loopback device (or an actual device), you can create the +btrfs filesystem and mount it: + + sudo mkfs.btrfs /dev/loop0 # or your real device + sudo mount /dev/loop0 /var/lib/lxd + +###### LVM + +To set up LVM, the instructions are similar to the above. First, install the +userspace tools: + + sudo apt-get install lvm2 thin-provisioning-tools + +Then, if you have a block device laying around: + + sudo pvcreate /dev/sdc6 + sudo vgcreate lxd /dev/sdc6 + lxc config set storage.lvm_vg_name lxd + +Alternatively, if you want to try it via a loopback device, there is a script +provided in +[/scripts/lxd-setup-lvm-storage](https://raw.githubusercontent.com/lxc/lxd/master/scripts/lxd-setup-lvm-storage) +which will do it for you. It can be run via: + + sudo apt-get install lvm2 + ./scripts/lxd-setup-lvm-storage -s 10G + +And it has a --destroy argument to clean up the bits as well: + + ./scripts/lxd-setup-lvm-storage --destroy + + + +#### How can I live migrate a container using LXD? + +Live migration requires a tool installed on both hosts called +[CRIU](http://criu.org), which is available in Ubuntu via: + + sudo apt-get install criu + +Then, launch your container with the following, + + lxc launch ubuntu $somename + sleep 5s # let the container get to an interesting state + lxc move host1:$somename host2:$somename + +And with luck you'll have migrated the container :). Migration is still in +experimental stages and may not work for all workloads. Please report bugs on +lxc-devel, and we can escalate to CRIU lists as necessary. + +#### Can I bind mount my home directory in a container? + +Yes. The easiest way to do that is using a privileged container: + + lxc launch ubuntu priv -c security.privileged=true + lxc config device add priv homedir disk source=/home/$USER path=/home/ubuntu + +#### How can I run docker inside a LXD container? + +Create a container with the migrateable profile: + + lxc launch ubuntu:xenial my-docker-host -p default -p docker + +Then run a version of docker with the needed patches, for instance version +v1.10.0.serge.2 branch of github.com/hallyn/docker. === added file 'src/github.com/lxc/lxd/Vagrantfile' --- src/github.com/lxc/lxd/Vagrantfile 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/Vagrantfile 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +Vagrant.configure('2') do |config| + # grab Ubuntu 14.04 boxcutter image: https://atlas.hashicorp.com/boxcutter + config.vm.box = "boxcutter/ubuntu1404" # Ubuntu 14.04 + + # fix issues with slow dns https://www.virtualbox.org/ticket/13002 + config.vm.provider :virtualbox do |vb, override| + vb.customize ["modifyvm", :id, "--natdnsproxy1", "off"] + end + + config.vm.network "forwarded_port", guest: 443, host: 8443 + config.vm.provision :shell, :privileged => false, :path => "scripts/vagrant/install-go.sh" + config.vm.provision :shell, :privileged => false, :path => "scripts/vagrant/install-lxd.sh" + +end === added file 'src/github.com/lxc/lxd/client.go' --- src/github.com/lxc/lxd/client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2080 @@ +package lxd + +import ( + "bytes" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "mime" + "mime/multipart" + "net" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strconv" + "strings" + + "github.com/gorilla/websocket" + + "github.com/lxc/lxd/shared" +) + +// Client can talk to a LXD daemon. +type Client struct { + BaseURL string + BaseWSURL string + Config Config + Name string + Remote *RemoteConfig + Transport string + Certificate string + + Http http.Client + websocketDialer websocket.Dialer + simplestreams *shared.SimpleStreams +} + +type ResponseType string + +const ( + Sync ResponseType = "sync" + Async ResponseType = "async" + Error ResponseType = "error" +) + +var ( + // LXDErrors are special errors; the client library hoists error codes + // to these errors internally so that user code can compare against + // them. We probably shouldn't hoist BadRequest or InternalError, since + // LXD passes an error string along which is more informative than + // whatever static error message we would put here. + LXDErrors = map[int]error{ + http.StatusNotFound: fmt.Errorf("not found"), + } +) + +type Response struct { + Type ResponseType `json:"type"` + + /* Valid only for Sync responses */ + Status string `json:"status"` + StatusCode int `json:"status_code"` + + /* Valid only for Async responses */ + Operation string `json:"operation"` + + /* Valid only for Error responses */ + Code int `json:"error_code"` + Error string `json:"error"` + + /* Valid for Sync and Error responses */ + Metadata json.RawMessage `json:"metadata"` +} + +func (r *Response) MetadataAsMap() (*shared.Jmap, error) { + ret := shared.Jmap{} + if err := json.Unmarshal(r.Metadata, &ret); err != nil { + return nil, err + } + return &ret, nil +} + +func (r *Response) MetadataAsOperation() (*shared.Operation, error) { + op := shared.Operation{} + if err := json.Unmarshal(r.Metadata, &op); err != nil { + return nil, err + } + + return &op, nil +} + +// ParseResponse parses a lxd style response out of an http.Response. Note that +// this does _not_ automatically convert error responses to golang errors. To +// do that, use ParseError. Internal client library uses should probably use +// HoistResponse, unless they are interested in accessing the underlying Error +// response (e.g. to inspect the error code). +func ParseResponse(r *http.Response) (*Response, error) { + if r == nil { + return nil, fmt.Errorf("no response!") + } + defer r.Body.Close() + ret := Response{} + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, err + } + shared.Debugf("Raw response: %s", string(s)) + + if err := json.Unmarshal(s, &ret); err != nil { + return nil, err + } + + return &ret, nil +} + +// HoistResponse hoists a regular http response into a response of type rtype +// or returns a golang error. +func HoistResponse(r *http.Response, rtype ResponseType) (*Response, error) { + resp, err := ParseResponse(r) + if err != nil { + return nil, err + } + + if resp.Type == Error { + // Try and use a known error if we have one for this code. + err, ok := LXDErrors[resp.Code] + if !ok { + return nil, fmt.Errorf(resp.Error) + } + return nil, err + } + + if resp.Type != rtype { + return nil, fmt.Errorf("got bad response type, expected %s got %s", rtype, resp.Type) + } + + return resp, nil +} + +func ensureMyCert(configDir string) (string, string, error) { + certf := path.Join(configDir, "client.crt") + keyf := path.Join(configDir, "client.key") + + err := shared.FindOrGenCert(certf, keyf) + + return certf, keyf, err +} + +// NewClient returns a new LXD client. +func NewClient(config *Config, remote string) (*Client, error) { + if remote == "" { + return nil, fmt.Errorf("A remote name must be provided.") + } + + r, ok := config.Remotes[remote] + if !ok { + return nil, fmt.Errorf("unknown remote name: %q", remote) + } + info := ConnectInfo{ + Name: remote, + Addr: r.Addr, + } + + if strings.HasPrefix(r.Addr, "unix:") { + // replace "unix://" with the official "unix:/var/lib/lxd/unix.socket" + if info.Addr == "unix://" { + info.Addr = fmt.Sprintf("unix:%s", shared.VarPath("unix.socket")) + } + } else { + certf, keyf, err := ensureMyCert(config.ConfigDir) + if err != nil { + return nil, err + } + certBytes, err := ioutil.ReadFile(certf) + if err != nil { + return nil, err + } + keyBytes, err := ioutil.ReadFile(keyf) + if err != nil { + return nil, err + } + info.ClientPEMCert = string(certBytes) + info.ClientPEMKey = string(keyBytes) + serverCertPath := config.ServerCertPath(remote) + if shared.PathExists(serverCertPath) { + cert, err := shared.ReadCert(serverCertPath) + if err != nil { + return nil, err + } + + info.ServerPEMCert = string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})) + } + } + c, err := NewClientFromInfo(info) + if err != nil { + return nil, err + } + c.Config = *config + c.Remote = &r + + if c.Remote.Protocol == "simplestreams" { + ss, err := shared.SimpleStreamsClient(c.Remote.Addr) + if err != nil { + return nil, err + } + + c.simplestreams = ss + } + + return c, nil +} + +// ConnectInfo contains the information we need to connect to a specific LXD server +type ConnectInfo struct { + // Name is a simple identifier for the remote server. In 'lxc' it is + // the name used to lookup the address and other information in the + // config.yml file. + Name string + // Addr is the host address to connect to. It can be + // unix:/path/to/socket to indicate we should connect over a unix + // socket, or it can be an IP Address or + // Hostname, or an https:// URL. + // The standard unix socket is located at $LXD_DIR/unix.socket + // See also github.com/lxc/lxd/shared.VarPath("unix.socket") + Addr string + // ClientPEMCert is the PEM encoded bytes of the client's certificate. + // If Addr indicates a Unix socket, the certificate and key bytes will + // not be used. + ClientPEMCert string + // ClientPEMKey is the PEM encoded private bytes of the client's key associated with its certificate + ClientPEMKey string + // ServerPEMCert is the PEM encoded server certificate that we are + // connecting to. It can be the empty string if we do not know the + // server's certificate yet. + ServerPEMCert string +} + +func connectViaUnix(c *Client, addr string) error { + c.BaseURL = "http://unix.socket" + c.BaseWSURL = "ws://unix.socket" + c.Transport = "unix" + r := &RemoteConfig{Addr: addr} + uDial := func(network, addr string) (net.Conn, error) { + // The arguments 'network' and 'addr' are ignored because + // they are the wrong information. + // addr is generated from BaseURL which becomes + // 'unix.socket:80' which is certainly not what we want. + // handle: + // unix:///path/to/socket + // unix:/path/to/socket + // unix:path/to/socket + path := strings.TrimPrefix(r.Addr, "unix:") + if strings.HasPrefix(path, "///") { + // translate unix:///path/to, to just "/path/to" + path = path[2:] + } + raddr, err := net.ResolveUnixAddr("unix", path) + if err != nil { + return nil, err + } + return net.DialUnix("unix", nil, raddr) + } + c.Http.Transport = &http.Transport{Dial: uDial} + c.websocketDialer.NetDial = uDial + c.Remote = r + + st, err := c.ServerStatus() + if err != nil { + return err + } + c.Certificate = st.Environment.Certificate + return nil +} + +func connectViaHttp(c *Client, addr, clientCert, clientKey, serverCert string) error { + tlsconfig, err := shared.GetTLSConfigMem(clientCert, clientKey, serverCert) + if err != nil { + return err + } + + tr := &http.Transport{ + TLSClientConfig: tlsconfig, + Dial: shared.RFC3493Dialer, + Proxy: http.ProxyFromEnvironment, + } + + c.websocketDialer.NetDial = shared.RFC3493Dialer + c.websocketDialer.TLSClientConfig = tlsconfig + + justAddr := strings.TrimPrefix(addr, "https://") + c.BaseURL = "https://" + justAddr + c.BaseWSURL = "wss://" + justAddr + c.Transport = "https" + c.Http.Transport = tr + c.Remote = &RemoteConfig{Addr: addr} + c.Certificate = serverCert + // We don't actually need to connect yet, defer that until someone + // needs something from the server. + + return nil +} + +// NewClientFromInfo returns a new LXD client. +func NewClientFromInfo(info ConnectInfo) (*Client, error) { + c := &Client{ + // Config: *config, + Http: http.Client{}, + Config: Config{ + Remotes: DefaultRemotes, + Aliases: map[string]string{}, + }, + } + c.Name = info.Name + var err error + if info.Addr[0:5] == "unix:" { + err = connectViaUnix(c, info.Addr) + } else { + err = connectViaHttp(c, info.Addr, info.ClientPEMCert, info.ClientPEMKey, info.ServerPEMCert) + } + if err != nil { + return nil, err + } + + return c, nil +} + +func (c *Client) Addresses() ([]string, error) { + addresses := make([]string, 0) + + if c.Transport == "unix" { + serverStatus, err := c.ServerStatus() + if err != nil { + return nil, err + } + addresses = serverStatus.Environment.Addresses + } else if c.Transport == "https" { + addresses = append(addresses, c.BaseURL[8:]) + } else { + return nil, fmt.Errorf("unknown transport type: %s", c.Transport) + } + + if len(addresses) == 0 { + return nil, fmt.Errorf("The source remote isn't available over the network") + } + + return addresses, nil +} + +func (c *Client) get(base string) (*Response, error) { + uri := c.url(shared.APIVersion, base) + + return c.baseGet(uri) +} + +func (c *Client) baseGet(getUrl string) (*Response, error) { + req, err := http.NewRequest("GET", getUrl, nil) + if err != nil { + return nil, err + } + + req.Header.Set("User-Agent", shared.UserAgent) + + resp, err := c.Http.Do(req) + if err != nil { + return nil, err + } + + return HoistResponse(resp, Sync) +} + +func (c *Client) put(base string, args interface{}, rtype ResponseType) (*Response, error) { + uri := c.url(shared.APIVersion, base) + + buf := bytes.Buffer{} + err := json.NewEncoder(&buf).Encode(args) + if err != nil { + return nil, err + } + + shared.Debugf("Putting %s to %s", buf.String(), uri) + + req, err := http.NewRequest("PUT", uri, &buf) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", shared.UserAgent) + req.Header.Set("Content-Type", "application/json") + + resp, err := c.Http.Do(req) + if err != nil { + return nil, err + } + + return HoistResponse(resp, rtype) +} + +func (c *Client) post(base string, args interface{}, rtype ResponseType) (*Response, error) { + uri := c.url(shared.APIVersion, base) + + buf := bytes.Buffer{} + err := json.NewEncoder(&buf).Encode(args) + if err != nil { + return nil, err + } + + shared.Debugf("Posting %s to %s", buf.String(), uri) + + req, err := http.NewRequest("POST", uri, &buf) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", shared.UserAgent) + req.Header.Set("Content-Type", "application/json") + + resp, err := c.Http.Do(req) + if err != nil { + return nil, err + } + + return HoistResponse(resp, rtype) +} + +func (c *Client) getRaw(uri string) (*http.Response, error) { + req, err := http.NewRequest("GET", uri, nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", shared.UserAgent) + + raw, err := c.Http.Do(req) + if err != nil { + return nil, err + } + + // because it is raw data, we need to check for http status + if raw.StatusCode != 200 { + resp, err := HoistResponse(raw, Sync) + if err != nil { + return nil, err + } + return nil, fmt.Errorf("expected error, got %v", *resp) + } + + return raw, nil +} + +func (c *Client) delete(base string, args interface{}, rtype ResponseType) (*Response, error) { + uri := c.url(shared.APIVersion, base) + + buf := bytes.Buffer{} + err := json.NewEncoder(&buf).Encode(args) + if err != nil { + return nil, err + } + + shared.Debugf("Deleting %s to %s", buf.String(), uri) + + req, err := http.NewRequest("DELETE", uri, &buf) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", shared.UserAgent) + req.Header.Set("Content-Type", "application/json") + + resp, err := c.Http.Do(req) + if err != nil { + return nil, err + } + + return HoistResponse(resp, rtype) +} + +func (c *Client) websocket(operation string, secret string) (*websocket.Conn, error) { + query := url.Values{"secret": []string{secret}} + url := c.BaseWSURL + path.Join(operation, "websocket") + "?" + query.Encode() + return WebsocketDial(c.websocketDialer, url) +} + +func (c *Client) url(elem ...string) string { + path := strings.Join(elem, "/") + uri := c.BaseURL + "/" + path + + if strings.HasPrefix(path, "1.0/images/aliases") { + return uri + } + + if strings.Contains(path, "?") { + return uri + } + + return strings.TrimSuffix(uri, "/") +} + +func (c *Client) GetServerConfig() (*Response, error) { + return c.baseGet(c.url(shared.APIVersion)) +} + +func (c *Client) Finger() error { + shared.Debugf("Fingering the daemon") + _, err := c.GetServerConfig() + if err != nil { + return err + } + + shared.Debugf("Pong received") + return nil +} + +func (c *Client) AmTrusted() bool { + resp, err := c.GetServerConfig() + if err != nil { + return false + } + + shared.Debugf("%s", resp) + + jmap, err := resp.MetadataAsMap() + if err != nil { + return false + } + + auth, err := jmap.GetString("auth") + if err != nil { + return false + } + + return auth == "trusted" +} + +func (c *Client) IsPublic() bool { + resp, err := c.GetServerConfig() + if err != nil { + return false + } + + shared.Debugf("%s", resp) + + jmap, err := resp.MetadataAsMap() + if err != nil { + return false + } + + public, err := jmap.GetBool("public") + if err != nil { + return false + } + + return public +} + +func (c *Client) ListContainers() ([]shared.ContainerInfo, error) { + resp, err := c.get("containers?recursion=1") + if err != nil { + return nil, err + } + + var result []shared.ContainerInfo + + if err := json.Unmarshal(resp.Metadata, &result); err != nil { + return nil, err + } + + return result, nil +} + +func (c *Client) CopyImage(image string, dest *Client, copy_aliases bool, aliases []string, public bool, autoUpdate bool, progressHandler func(progress string)) error { + source := shared.Jmap{ + "type": "image", + "mode": "pull", + "server": c.BaseURL, + "protocol": c.Remote.Protocol, + "certificate": c.Certificate, + "fingerprint": image} + + target := c.GetAlias(image) + if target != "" { + image = target + } + + info, err := c.GetImageInfo(image) + if err != nil { + return err + } + + if c.Remote.Protocol != "simplestreams" { + if !info.Public { + var secret string + + resp, err := c.post("images/"+image+"/secret", nil, Async) + if err != nil { + return err + } + + op, err := resp.MetadataAsOperation() + if err != nil { + return err + } + + secret, err = op.Metadata.GetString("secret") + if err != nil { + return err + } + + source["secret"] = secret + } + source["fingerprint"] = image + } + + addresses, err := c.Addresses() + if err != nil { + return err + } + + operation := "" + handler := func(msg interface{}) { + if msg == nil { + return + } + + event := msg.(map[string]interface{}) + if event["type"].(string) != "operation" { + return + } + + if event["metadata"] == nil { + return + } + + md := event["metadata"].(map[string]interface{}) + if !strings.HasSuffix(operation, md["id"].(string)) { + return + } + + if md["metadata"] == nil { + return + } + + opMd := md["metadata"].(map[string]interface{}) + _, ok := opMd["download_progress"] + if ok { + progressHandler(opMd["download_progress"].(string)) + } + } + + if progressHandler != nil { + go dest.Monitor([]string{"operation"}, handler) + } + + for _, addr := range addresses { + sourceUrl := "https://" + addr + + source["server"] = sourceUrl + body := shared.Jmap{"public": public, "auto_update": autoUpdate, "source": source} + + resp, err := dest.post("images", body, Async) + if err != nil { + continue + } + + operation = resp.Operation + + err = dest.WaitForSuccess(resp.Operation) + if err != nil { + return err + } + + break + } + + if err != nil { + return err + } + + /* copy aliases from source image */ + if copy_aliases { + for _, alias := range info.Aliases { + dest.DeleteAlias(alias.Name) + err = dest.PostAlias(alias.Name, alias.Description, info.Fingerprint) + if err != nil { + return fmt.Errorf("Error adding alias %s: %s", alias.Name, err) + } + } + } + + /* add new aliases */ + for _, alias := range aliases { + dest.DeleteAlias(alias) + err = dest.PostAlias(alias, alias, info.Fingerprint) + if err != nil { + return fmt.Errorf("Error adding alias %s: %s\n", alias, err) + } + } + + return err +} + +func (c *Client) ExportImage(image string, target string) (string, error) { + if c.Remote.Protocol == "simplestreams" && c.simplestreams != nil { + return c.simplestreams.ExportImage(image, target) + } + + uri := c.url(shared.APIVersion, "images", image, "export") + raw, err := c.getRaw(uri) + if err != nil { + return "", err + } + + ctype, ctypeParams, err := mime.ParseMediaType(raw.Header.Get("Content-Type")) + if err != nil { + ctype = "application/octet-stream" + } + + // Deal with split images + if ctype == "multipart/form-data" { + if !shared.IsDir(target) { + return "", fmt.Errorf("Split images can only be written to a directory.") + } + + // Parse the POST data + mr := multipart.NewReader(raw.Body, ctypeParams["boundary"]) + + // Get the metadata tarball + part, err := mr.NextPart() + if err != nil { + return "", err + } + + if part.FormName() != "metadata" { + return "", fmt.Errorf("Invalid multipart image") + } + + imageTarf, err := os.OpenFile(filepath.Join(target, part.FileName()), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return "", err + } + + _, err = io.Copy(imageTarf, part) + + imageTarf.Close() + if err != nil { + return "", err + } + + // Get the rootfs tarball + part, err = mr.NextPart() + if err != nil { + return "", err + } + + if part.FormName() != "rootfs" { + return "", fmt.Errorf("Invalid multipart image") + } + + rootfsTarf, err := os.OpenFile(filepath.Join(part.FileName()), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return "", err + } + + _, err = io.Copy(rootfsTarf, part) + + rootfsTarf.Close() + if err != nil { + return "", err + } + + return target, nil + } + + // Deal with unified images + var wr io.Writer + var destpath string + if target == "-" { + wr = os.Stdout + destpath = "stdout" + } else if fi, err := os.Stat(target); err == nil { + // file exists, so check if folder + switch mode := fi.Mode(); { + case mode.IsDir(): + // save in directory, header content-disposition can not be null + // and will have a filename + cd := strings.Split(raw.Header["Content-Disposition"][0], "=") + + // write filename from header + destpath = filepath.Join(target, cd[1]) + f, err := os.Create(destpath) + defer f.Close() + + if err != nil { + return "", err + } + + wr = f + + default: + // overwrite file + destpath = target + f, err := os.OpenFile(destpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + defer f.Close() + + if err != nil { + return "", err + } + + wr = f + } + } else { + // write as simple file + destpath = target + f, err := os.Create(destpath) + defer f.Close() + + wr = f + if err != nil { + return "", err + } + } + + _, err = io.Copy(wr, raw.Body) + + if err != nil { + return "", err + } + + // it streams to stdout or file, so no response returned + return destpath, nil +} + +func (c *Client) PostImageURL(imageFile string, public bool, aliases []string) (string, error) { + source := shared.Jmap{ + "type": "url", + "mode": "pull", + "url": imageFile} + body := shared.Jmap{"public": public, "source": source} + + resp, err := c.post("images", body, Async) + if err != nil { + return "", err + } + + op, err := c.WaitFor(resp.Operation) + if err != nil { + return "", err + } + + if op.Metadata == nil { + return "", fmt.Errorf("Missing operation metadata") + } + + fingerprint, err := op.Metadata.GetString("fingerprint") + if err != nil { + return "", err + } + + /* add new aliases */ + for _, alias := range aliases { + c.DeleteAlias(alias) + err = c.PostAlias(alias, alias, fingerprint) + if err != nil { + return "", fmt.Errorf("Error adding alias %s: %s", alias, err) + } + } + + return fingerprint, nil +} + +func (c *Client) PostImage(imageFile string, rootfsFile string, properties []string, public bool, aliases []string, progressHandler func(percent int)) (string, error) { + uri := c.url(shared.APIVersion, "images") + + var err error + var fImage *os.File + var fRootfs *os.File + var req *http.Request + + if rootfsFile != "" { + fImage, err = os.Open(imageFile) + if err != nil { + return "", err + } + defer fImage.Close() + + fRootfs, err = os.Open(rootfsFile) + if err != nil { + return "", err + } + defer fRootfs.Close() + + body, err := ioutil.TempFile("", "lxc_image_") + if err != nil { + return "", err + } + defer os.Remove(body.Name()) + + w := multipart.NewWriter(body) + + // Metadata file + fw, err := w.CreateFormFile("metadata", path.Base(imageFile)) + if err != nil { + return "", err + } + + _, err = io.Copy(fw, fImage) + if err != nil { + return "", err + } + + // Rootfs file + fw, err = w.CreateFormFile("rootfs", path.Base(rootfsFile)) + if err != nil { + return "", err + } + + _, err = io.Copy(fw, fRootfs) + if err != nil { + return "", err + } + + w.Close() + + size, err := body.Seek(0, 2) + if err != nil { + return "", err + } + + _, err = body.Seek(0, 0) + if err != nil { + return "", err + } + + progress := &shared.TransferProgress{Reader: body, Length: size, Handler: progressHandler} + + req, err = http.NewRequest("POST", uri, progress) + req.Header.Set("Content-Type", w.FormDataContentType()) + } else { + fImage, err = os.Open(imageFile) + if err != nil { + return "", err + } + defer fImage.Close() + + stat, err := fImage.Stat() + if err != nil { + return "", err + } + + progress := &shared.TransferProgress{Reader: fImage, Length: stat.Size(), Handler: progressHandler} + + req, err = http.NewRequest("POST", uri, progress) + req.Header.Set("X-LXD-filename", filepath.Base(imageFile)) + req.Header.Set("Content-Type", "application/octet-stream") + } + + if err != nil { + return "", err + } + req.Header.Set("User-Agent", shared.UserAgent) + + if public { + req.Header.Set("X-LXD-public", "1") + } else { + req.Header.Set("X-LXD-public", "0") + } + + if len(properties) != 0 { + imgProps := url.Values{} + for _, value := range properties { + eqIndex := strings.Index(value, "=") + + // props must be in key=value format + // if not, request will not be accepted + if eqIndex > -1 { + imgProps.Set(value[:eqIndex], value[eqIndex+1:]) + } else { + return "", fmt.Errorf("Bad image property: %s", value) + } + + } + + req.Header.Set("X-LXD-properties", imgProps.Encode()) + } + + raw, err := c.Http.Do(req) + if err != nil { + return "", err + } + + resp, err := HoistResponse(raw, Async) + if err != nil { + return "", err + } + + jmap, err := c.AsyncWaitMeta(resp) + if err != nil { + return "", err + } + + fingerprint, err := jmap.GetString("fingerprint") + if err != nil { + return "", err + } + + /* add new aliases */ + for _, alias := range aliases { + c.DeleteAlias(alias) + err = c.PostAlias(alias, alias, fingerprint) + if err != nil { + return "", fmt.Errorf("Error adding alias %s: %s", alias, err) + } + } + + return fingerprint, nil +} + +func (c *Client) GetImageInfo(image string) (*shared.ImageInfo, error) { + if c.Remote.Protocol == "simplestreams" && c.simplestreams != nil { + return c.simplestreams.GetImageInfo(image) + } + + resp, err := c.get(fmt.Sprintf("images/%s", image)) + if err != nil { + return nil, err + } + + info := shared.ImageInfo{} + if err := json.Unmarshal(resp.Metadata, &info); err != nil { + return nil, err + } + + return &info, nil +} + +func (c *Client) PutImageInfo(name string, p shared.BriefImageInfo) error { + _, err := c.put(fmt.Sprintf("images/%s", name), p, Sync) + return err +} + +func (c *Client) ListImages() ([]shared.ImageInfo, error) { + if c.Remote.Protocol == "simplestreams" && c.simplestreams != nil { + return c.simplestreams.ListImages() + } + + resp, err := c.get("images?recursion=1") + if err != nil { + return nil, err + } + + var result []shared.ImageInfo + if err := json.Unmarshal(resp.Metadata, &result); err != nil { + return nil, err + } + + return result, nil +} + +func (c *Client) DeleteImage(image string) error { + _, err := c.delete(fmt.Sprintf("images/%s", image), nil, Sync) + return err +} + +func (c *Client) PostAlias(alias string, desc string, target string) error { + body := shared.Jmap{"description": desc, "target": target, "name": alias} + + _, err := c.post("images/aliases", body, Sync) + return err +} + +func (c *Client) DeleteAlias(alias string) error { + _, err := c.delete(fmt.Sprintf("images/aliases/%s", alias), nil, Sync) + return err +} + +func (c *Client) ListAliases() (shared.ImageAliases, error) { + if c.Remote.Protocol == "simplestreams" && c.simplestreams != nil { + return c.simplestreams.ListAliases() + } + + resp, err := c.get("images/aliases?recursion=1") + if err != nil { + return nil, err + } + + var result shared.ImageAliases + + if err := json.Unmarshal(resp.Metadata, &result); err != nil { + return nil, err + } + + return result, nil +} + +func (c *Client) CertificateList() ([]shared.CertInfo, error) { + resp, err := c.get("certificates?recursion=1") + if err != nil { + return nil, err + } + + var result []shared.CertInfo + if err := json.Unmarshal(resp.Metadata, &result); err != nil { + return nil, err + } + + return result, nil +} + +func (c *Client) AddMyCertToServer(pwd string) error { + body := shared.Jmap{"type": "client", "password": pwd} + + _, err := c.post("certificates", body, Sync) + return err +} + +func (c *Client) CertificateAdd(cert *x509.Certificate, name string) error { + b64 := base64.StdEncoding.EncodeToString(cert.Raw) + _, err := c.post("certificates", shared.Jmap{"type": "client", "certificate": b64, "name": name}, Sync) + return err +} + +func (c *Client) CertificateRemove(fingerprint string) error { + _, err := c.delete(fmt.Sprintf("certificates/%s", fingerprint), nil, Sync) + return err +} + +func (c *Client) IsAlias(alias string) (bool, error) { + _, err := c.get(fmt.Sprintf("images/aliases/%s", alias)) + if err != nil { + if err == LXDErrors[http.StatusNotFound] { + return false, nil + } + return false, err + } + + return true, nil +} + +func (c *Client) GetAlias(alias string) string { + if c.Remote.Protocol == "simplestreams" && c.simplestreams != nil { + return c.simplestreams.GetAlias(alias) + } + + resp, err := c.get(fmt.Sprintf("images/aliases/%s", alias)) + if err != nil { + return "" + } + + if resp.Type == Error { + return "" + } + + var result shared.ImageAliasesEntry + if err := json.Unmarshal(resp.Metadata, &result); err != nil { + return "" + } + return result.Target +} + +// Init creates a container from either a fingerprint or an alias; you must +// provide at least one. +func (c *Client) Init(name string, imgremote string, image string, profiles *[]string, config map[string]string, ephem bool) (*Response, error) { + var tmpremote *Client + var err error + + serverStatus, err := c.ServerStatus() + if err != nil { + return nil, err + } + architectures := serverStatus.Environment.Architectures + + source := shared.Jmap{"type": "image"} + + if image == "" { + image = "default" + } + + if imgremote != c.Name { + source["type"] = "image" + source["mode"] = "pull" + tmpremote, err = NewClient(&c.Config, imgremote) + if err != nil { + return nil, err + } + + if tmpremote.Remote.Protocol != "simplestreams" { + target := tmpremote.GetAlias(image) + if target != "" { + image = target + } + + imageinfo, err := tmpremote.GetImageInfo(image) + if err != nil { + return nil, err + } + + if len(architectures) != 0 && !shared.StringInSlice(imageinfo.Architecture, architectures) { + return nil, fmt.Errorf("The image architecture is incompatible with the target server") + } + + if !imageinfo.Public { + var secret string + + resp, err := tmpremote.post("images/"+image+"/secret", nil, Async) + if err != nil { + return nil, err + } + + op, err := resp.MetadataAsOperation() + if err != nil { + return nil, err + } + + secret, err = op.Metadata.GetString("secret") + if err != nil { + return nil, err + } + + source["secret"] = secret + } + } + + source["server"] = tmpremote.BaseURL + source["protocol"] = tmpremote.Remote.Protocol + source["certificate"] = tmpremote.Certificate + source["fingerprint"] = image + } else { + fingerprint := c.GetAlias(image) + if fingerprint == "" { + fingerprint = image + } + + imageinfo, err := c.GetImageInfo(fingerprint) + if err != nil { + return nil, fmt.Errorf("can't get info for image '%s': %s", image, err) + } + + if len(architectures) != 0 && !shared.StringInSlice(imageinfo.Architecture, architectures) { + return nil, fmt.Errorf("The image architecture is incompatible with the target server") + } + source["fingerprint"] = fingerprint + } + + body := shared.Jmap{"source": source} + + if name != "" { + body["name"] = name + } + + if profiles != nil { + body["profiles"] = *profiles + } + + if config != nil { + body["config"] = config + } + + if ephem { + body["ephemeral"] = ephem + } + + var resp *Response + + if imgremote != c.Name { + var addresses []string + addresses, err = tmpremote.Addresses() + if err != nil { + return nil, err + } + + for _, addr := range addresses { + body["source"].(shared.Jmap)["server"] = "https://" + addr + + resp, err = c.post("containers", body, Async) + if err != nil { + continue + } + + break + } + } else { + resp, err = c.post("containers", body, Async) + } + + if err != nil { + if LXDErrors[http.StatusNotFound] == err { + return nil, fmt.Errorf("image doesn't exist") + } + return nil, err + } + + return resp, nil +} + +func (c *Client) LocalCopy(source string, name string, config map[string]string, profiles []string, ephemeral bool) (*Response, error) { + body := shared.Jmap{ + "source": shared.Jmap{ + "type": "copy", + "source": source, + }, + "name": name, + "config": config, + "profiles": profiles, + "ephemeral": ephemeral, + } + + return c.post("containers", body, Async) +} + +func (c *Client) Monitor(types []string, handler func(interface{})) error { + url := c.BaseWSURL + path.Join("/", "1.0", "events") + if len(types) != 0 { + url += "?type=" + strings.Join(types, ",") + } + + conn, err := WebsocketDial(c.websocketDialer, url) + if err != nil { + return err + } + defer conn.Close() + + for { + message := make(map[string]interface{}) + + _, data, err := conn.ReadMessage() + if err != nil { + return err + } + + err = json.Unmarshal(data, &message) + if err != nil { + return err + } + + handler(message) + } +} + +// Exec runs a command inside the LXD container. For "interactive" use such as +// `lxc exec ...`, one should pass a controlHandler that talks over the control +// socket and handles things like SIGWINCH. If running non-interactive, passing +// a nil controlHandler will cause Exec to return when all of the command +// output is sent to the output buffers. +func (c *Client) Exec(name string, cmd []string, env map[string]string, + stdin io.ReadCloser, stdout io.WriteCloser, + stderr io.WriteCloser, controlHandler func(*Client, *websocket.Conn)) (int, error) { + + body := shared.Jmap{ + "command": cmd, + "wait-for-websocket": true, + "interactive": controlHandler != nil, + "environment": env, + } + + resp, err := c.post(fmt.Sprintf("containers/%s/exec", name), body, Async) + if err != nil { + return -1, err + } + + var fds shared.Jmap + + op, err := resp.MetadataAsOperation() + if err != nil { + return -1, err + } + + fds, err = op.Metadata.GetMap("fds") + if err != nil { + return -1, err + } + + if controlHandler != nil { + var control *websocket.Conn + if wsControl, ok := fds["control"]; ok { + control, err = c.websocket(resp.Operation, wsControl.(string)) + if err != nil { + return -1, err + } + defer control.Close() + + go controlHandler(c, control) + } + + conn, err := c.websocket(resp.Operation, fds["0"].(string)) + if err != nil { + return -1, err + } + + shared.WebsocketSendStream(conn, stdin) + <-shared.WebsocketRecvStream(stdout, conn) + conn.Close() + + } else { + conns := make([]*websocket.Conn, 3) + dones := make([]chan bool, 3) + + conns[0], err = c.websocket(resp.Operation, fds[strconv.Itoa(0)].(string)) + if err != nil { + return -1, err + } + defer conns[0].Close() + + dones[0] = shared.WebsocketSendStream(conns[0], stdin) + + outputs := []io.WriteCloser{stdout, stderr} + for i := 1; i < 3; i++ { + conns[i], err = c.websocket(resp.Operation, fds[strconv.Itoa(i)].(string)) + if err != nil { + return -1, err + } + defer conns[i].Close() + + dones[i] = shared.WebsocketRecvStream(outputs[i-1], conns[i]) + } + + /* + * We'll get a read signal from each of stdout, stderr when they've + * both died. We need to wait for these in addition to the operation, + * because the server may indicate that the operation is done before we + * can actually read the last bits of data off these sockets and print + * it to the screen. + * + * We don't wait for stdin here, because if we're interactive, the user + * may not have closed it (e.g. if the command exits but the user + * didn't ^D). + */ + for i := 1; i < 3; i++ { + <-dones[i] + } + + // Once we're done, we explicitly close stdin, to signal the websockets + // we're done. + stdin.Close() + } + + // Now, get the operation's status too. + op, err = c.WaitFor(resp.Operation) + if err != nil { + return -1, err + } + + if op.StatusCode == shared.Failure { + return -1, fmt.Errorf(op.Err) + } + + if op.StatusCode != shared.Success { + return -1, fmt.Errorf("got bad op status %s", op.Status) + } + + if op.Metadata == nil { + return -1, fmt.Errorf("no metadata received") + } + + return op.Metadata.GetInt("return") +} + +func (c *Client) Action(name string, action shared.ContainerAction, timeout int, force bool, stateful bool) (*Response, error) { + body := shared.Jmap{ + "action": action, + "timeout": timeout, + "force": force} + + if shared.StringInSlice(string(action), []string{"start", "stop"}) { + body["stateful"] = stateful + } + + return c.put(fmt.Sprintf("containers/%s/state", name), body, Async) +} + +func (c *Client) Delete(name string) (*Response, error) { + var url string + s := strings.SplitN(name, "/", 2) + if len(s) == 2 { + url = fmt.Sprintf("containers/%s/snapshots/%s", s[0], s[1]) + } else { + url = fmt.Sprintf("containers/%s", name) + } + + return c.delete(url, nil, Async) +} + +func (c *Client) ServerStatus() (*shared.ServerState, error) { + ss := shared.ServerState{} + + resp, err := c.GetServerConfig() + if err != nil { + return nil, err + } + + if err := json.Unmarshal(resp.Metadata, &ss); err != nil { + return nil, err + } + + return &ss, nil +} + +func (c *Client) ContainerInfo(name string) (*shared.ContainerInfo, error) { + ct := shared.ContainerInfo{} + + resp, err := c.get(fmt.Sprintf("containers/%s", name)) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(resp.Metadata, &ct); err != nil { + return nil, err + } + + return &ct, nil +} + +func (c *Client) ContainerState(name string) (*shared.ContainerState, error) { + ct := shared.ContainerState{} + + resp, err := c.get(fmt.Sprintf("containers/%s/state", name)) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(resp.Metadata, &ct); err != nil { + return nil, err + } + + return &ct, nil +} + +func (c *Client) GetLog(container string, log string) (io.Reader, error) { + uri := c.url(shared.APIVersion, "containers", container, "logs", log) + resp, err := c.getRaw(uri) + if err != nil { + return nil, err + } + + return resp.Body, nil +} + +func (c *Client) ProfileConfig(name string) (*shared.ProfileConfig, error) { + ct := shared.ProfileConfig{} + + resp, err := c.get(fmt.Sprintf("profiles/%s", name)) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(resp.Metadata, &ct); err != nil { + return nil, err + } + + return &ct, nil +} + +func (c *Client) PushFile(container string, p string, gid int, uid int, mode os.FileMode, buf io.ReadSeeker) error { + query := url.Values{"path": []string{p}} + uri := c.url(shared.APIVersion, "containers", container, "files") + "?" + query.Encode() + + req, err := http.NewRequest("POST", uri, buf) + if err != nil { + return err + } + req.Header.Set("User-Agent", shared.UserAgent) + + req.Header.Set("X-LXD-mode", fmt.Sprintf("%04o", mode.Perm())) + req.Header.Set("X-LXD-uid", strconv.FormatUint(uint64(uid), 10)) + req.Header.Set("X-LXD-gid", strconv.FormatUint(uint64(gid), 10)) + + raw, err := c.Http.Do(req) + if err != nil { + return err + } + + _, err = HoistResponse(raw, Sync) + return err +} + +func (c *Client) PullFile(container string, p string) (int, int, os.FileMode, io.ReadCloser, error) { + uri := c.url(shared.APIVersion, "containers", container, "files") + query := url.Values{"path": []string{p}} + + r, err := c.getRaw(uri + "?" + query.Encode()) + if err != nil { + return 0, 0, 0, nil, err + } + + uid, gid, mode := shared.ParseLXDFileHeaders(r.Header) + + return uid, gid, mode, r.Body, nil +} + +func (c *Client) GetMigrationSourceWS(container string) (*Response, error) { + body := shared.Jmap{"migration": true} + url := fmt.Sprintf("containers/%s", container) + if shared.IsSnapshot(container) { + pieces := strings.SplitN(container, shared.SnapshotDelimiter, 2) + if len(pieces) != 2 { + return nil, fmt.Errorf("invalid snapshot name %s", container) + } + + url = fmt.Sprintf("containers/%s/snapshots/%s", pieces[0], pieces[1]) + } + + return c.post(url, body, Async) +} + +func (c *Client) MigrateFrom(name string, operation string, certificate string, secrets map[string]string, architecture string, config map[string]string, devices shared.Devices, profiles []string, baseImage string, ephemeral bool) (*Response, error) { + source := shared.Jmap{ + "type": "migration", + "mode": "pull", + "operation": operation, + "certificate": certificate, + "secrets": secrets, + "base-image": baseImage, + } + body := shared.Jmap{ + "architecture": architecture, + "config": config, + "devices": devices, + "ephemeral": ephemeral, + "name": name, + "profiles": profiles, + "source": source, + } + + return c.post("containers", body, Async) +} + +func (c *Client) Rename(name string, newName string) (*Response, error) { + oldNameParts := strings.SplitN(name, "/", 2) + newNameParts := strings.SplitN(newName, "/", 2) + if len(oldNameParts) != len(newNameParts) { + return nil, fmt.Errorf("Attempting to rename container to snapshot or vice versa.") + } + if len(oldNameParts) == 1 { + body := shared.Jmap{"name": newName} + return c.post(fmt.Sprintf("containers/%s", name), body, Async) + } + if oldNameParts[0] != newNameParts[0] { + return nil, fmt.Errorf("Attempting to rename snapshot of one container into a snapshot of another container.") + } + body := shared.Jmap{"name": newNameParts[1]} + return c.post(fmt.Sprintf("containers/%s/snapshots/%s", oldNameParts[0], oldNameParts[1]), body, Async) +} + +/* Wait for an operation */ +func (c *Client) WaitFor(waitURL string) (*shared.Operation, error) { + if len(waitURL) < 1 { + return nil, fmt.Errorf("invalid wait url %s", waitURL) + } + + /* For convenience, waitURL is expected to be in the form of a + * Response.Operation string, i.e. it already has + * "//operations/" in it; we chop off the leading / and pass + * it to url directly. + */ + shared.Debugf(path.Join(waitURL[1:], "wait")) + resp, err := c.baseGet(c.url(waitURL, "wait")) + if err != nil { + return nil, err + } + + return resp.MetadataAsOperation() +} + +func (c *Client) WaitForSuccess(waitURL string) error { + op, err := c.WaitFor(waitURL) + if err != nil { + return err + } + + if op.StatusCode == shared.Success { + return nil + } + + return fmt.Errorf(op.Err) +} + +func (c *Client) RestoreSnapshot(container string, snapshotName string, stateful bool) (*Response, error) { + body := shared.Jmap{"restore": snapshotName, "stateful": stateful} + return c.put(fmt.Sprintf("containers/%s", container), body, Async) +} + +func (c *Client) Snapshot(container string, snapshotName string, stateful bool) (*Response, error) { + body := shared.Jmap{"name": snapshotName, "stateful": stateful} + return c.post(fmt.Sprintf("containers/%s/snapshots", container), body, Async) +} + +func (c *Client) ListSnapshots(container string) ([]shared.SnapshotInfo, error) { + qUrl := fmt.Sprintf("containers/%s/snapshots?recursion=1", container) + resp, err := c.get(qUrl) + if err != nil { + return nil, err + } + + var result []shared.SnapshotInfo + + if err := json.Unmarshal(resp.Metadata, &result); err != nil { + return nil, err + } + + return result, nil +} + +func (c *Client) GetServerConfigString() ([]string, error) { + var resp []string + + ss, err := c.ServerStatus() + if err != nil { + return resp, err + } + + if ss.Auth == "untrusted" { + return resp, nil + } + + if len(ss.Config) == 0 { + resp = append(resp, "No config variables set.") + } + + for k, v := range ss.Config { + resp = append(resp, fmt.Sprintf("%s = %v", k, v)) + } + + return resp, nil +} + +func (c *Client) SetServerConfig(key string, value string) (*Response, error) { + ss, err := c.ServerStatus() + if err != nil { + return nil, err + } + + ss.Config[key] = value + + return c.put("", ss, Sync) +} + +func (c *Client) UpdateServerConfig(ss shared.BriefServerState) (*Response, error) { + return c.put("", ss, Sync) +} + +/* + * return string array representing a container's full configuration + */ +func (c *Client) GetContainerConfig(container string) ([]string, error) { + var resp []string + + st, err := c.ContainerInfo(container) + if err != nil { + return resp, err + } + + profiles := strings.Join(st.Profiles, ",") + pstr := fmt.Sprintf("Profiles: %s", profiles) + + resp = append(resp, pstr) + for k, v := range st.Config { + str := fmt.Sprintf("%s = %s", k, v) + resp = append(resp, str) + } + + return resp, nil +} + +func (c *Client) SetContainerConfig(container, key, value string) error { + st, err := c.ContainerInfo(container) + if err != nil { + return err + } + + if value == "" { + delete(st.Config, key) + } else { + st.Config[key] = value + } + + /* + * Although container config is an async operation (we PUT to restore a + * snapshot), we expect config to be a sync operation, so let's just + * handle it here. + */ + resp, err := c.put(fmt.Sprintf("containers/%s", container), st, Async) + if err != nil { + return err + } + + return c.WaitForSuccess(resp.Operation) +} + +func (c *Client) UpdateContainerConfig(container string, st shared.BriefContainerInfo) error { + resp, err := c.put(fmt.Sprintf("containers/%s", container), st, Async) + if err != nil { + return err + } + + return c.WaitForSuccess(resp.Operation) +} + +func (c *Client) ProfileCreate(p string) error { + body := shared.Jmap{"name": p} + + _, err := c.post("profiles", body, Sync) + return err +} + +func (c *Client) ProfileDelete(p string) error { + _, err := c.delete(fmt.Sprintf("profiles/%s", p), nil, Sync) + return err +} + +func (c *Client) GetProfileConfig(profile string) (map[string]string, error) { + st, err := c.ProfileConfig(profile) + if err != nil { + return nil, err + } + + return st.Config, nil +} + +func (c *Client) SetProfileConfigItem(profile, key, value string) error { + st, err := c.ProfileConfig(profile) + if err != nil { + shared.Debugf("Error getting profile %s to update", profile) + return err + } + + if value == "" { + delete(st.Config, key) + } else { + st.Config[key] = value + } + + _, err = c.put(fmt.Sprintf("profiles/%s", profile), st, Sync) + return err +} + +func (c *Client) PutProfile(name string, profile shared.ProfileConfig) error { + if profile.Name != name { + return fmt.Errorf("Cannot change profile name") + } + + _, err := c.put(fmt.Sprintf("profiles/%s", name), profile, Sync) + return err +} + +func (c *Client) ListProfiles() ([]string, error) { + resp, err := c.get("profiles") + if err != nil { + return nil, err + } + + var result []string + + if err := json.Unmarshal(resp.Metadata, &result); err != nil { + return nil, err + } + + names := []string{} + + for _, url := range result { + toScan := strings.Replace(url, "/", " ", -1) + version := "" + name := "" + count, err := fmt.Sscanf(toScan, " %s profiles %s", &version, &name) + if err != nil { + return nil, err + } + + if count != 2 { + return nil, fmt.Errorf("bad profile url %s", url) + } + + if version != shared.APIVersion { + return nil, fmt.Errorf("bad version in profile url") + } + + names = append(names, name) + } + + return names, nil +} + +func (c *Client) ApplyProfile(container, profile string) (*Response, error) { + st, err := c.ContainerInfo(container) + if err != nil { + return nil, err + } + + st.Profiles = strings.Split(profile, ",") + + return c.put(fmt.Sprintf("containers/%s", container), st, Async) +} + +func (c *Client) ContainerDeviceDelete(container, devname string) (*Response, error) { + st, err := c.ContainerInfo(container) + if err != nil { + return nil, err + } + + delete(st.Devices, devname) + + return c.put(fmt.Sprintf("containers/%s", container), st, Async) +} + +func (c *Client) ContainerDeviceAdd(container, devname, devtype string, props []string) (*Response, error) { + st, err := c.ContainerInfo(container) + if err != nil { + return nil, err + } + + newdev := shared.Device{} + for _, p := range props { + results := strings.SplitN(p, "=", 2) + if len(results) != 2 { + return nil, fmt.Errorf("no value found in %q", p) + } + k := results[0] + v := results[1] + newdev[k] = v + } + + if st.Devices != nil && st.Devices.ContainsName(devname) { + return nil, fmt.Errorf("device already exists") + } + + newdev["type"] = devtype + if st.Devices == nil { + st.Devices = shared.Devices{} + } + + st.Devices[devname] = newdev + + return c.put(fmt.Sprintf("containers/%s", container), st, Async) +} + +func (c *Client) ContainerListDevices(container string) ([]string, error) { + st, err := c.ContainerInfo(container) + if err != nil { + return nil, err + } + devs := []string{} + for n, d := range st.Devices { + devs = append(devs, fmt.Sprintf("%s: %s", n, d["type"])) + } + return devs, nil +} + +func (c *Client) ProfileDeviceDelete(profile, devname string) (*Response, error) { + st, err := c.ProfileConfig(profile) + if err != nil { + return nil, err + } + + for n, _ := range st.Devices { + if n == devname { + delete(st.Devices, n) + } + } + + return c.put(fmt.Sprintf("profiles/%s", profile), st, Sync) +} + +func (c *Client) ProfileDeviceAdd(profile, devname, devtype string, props []string) (*Response, error) { + st, err := c.ProfileConfig(profile) + if err != nil { + return nil, err + } + + newdev := shared.Device{} + for _, p := range props { + results := strings.SplitN(p, "=", 2) + if len(results) != 2 { + return nil, fmt.Errorf("no value found in %q", p) + } + k := results[0] + v := results[1] + newdev[k] = v + } + if st.Devices != nil && st.Devices.ContainsName(devname) { + return nil, fmt.Errorf("device already exists") + } + newdev["type"] = devtype + if st.Devices == nil { + st.Devices = shared.Devices{} + } + st.Devices[devname] = newdev + + return c.put(fmt.Sprintf("profiles/%s", profile), st, Sync) +} + +func (c *Client) ProfileListDevices(profile string) ([]string, error) { + st, err := c.ProfileConfig(profile) + if err != nil { + return nil, err + } + devs := []string{} + for n, d := range st.Devices { + devs = append(devs, fmt.Sprintf("%s: %s", n, d["type"])) + } + return devs, nil + +} + +// WebsocketDial attempts to dial a websocket to a LXD instance, parsing +// LXD-style errors and returning them as go errors. +func WebsocketDial(dialer websocket.Dialer, url string) (*websocket.Conn, error) { + conn, raw, err := dialer.Dial(url, http.Header{}) + if err != nil { + _, err2 := HoistResponse(raw, Error) + if err2 != nil { + /* The response isn't one we understand, so return + * whatever the original error was. */ + return nil, err + } + + return nil, err + } + return conn, err +} + +func (c *Client) ProfileCopy(name, newname string, dest *Client) error { + st, err := c.ProfileConfig(name) + if err != nil { + return err + } + + body := shared.Jmap{"config": st.Config, "name": newname, "devices": st.Devices} + _, err = dest.post("profiles", body, Sync) + return err +} + +func (c *Client) AsyncWaitMeta(resp *Response) (*shared.Jmap, error) { + op, err := c.WaitFor(resp.Operation) + if err != nil { + return nil, err + } + + if op.StatusCode == shared.Failure { + return nil, fmt.Errorf(op.Err) + } + + if op.StatusCode != shared.Success { + return nil, fmt.Errorf("got bad op status %s", op.Status) + } + + return op.Metadata, nil +} + +func (c *Client) ImageFromContainer(cname string, public bool, aliases []string, properties map[string]string) (string, error) { + source := shared.Jmap{"type": "container", "name": cname} + if shared.IsSnapshot(cname) { + source["type"] = "snapshot" + } + body := shared.Jmap{"public": public, "source": source, "properties": properties} + + resp, err := c.post("images", body, Async) + if err != nil { + return "", err + } + + jmap, err := c.AsyncWaitMeta(resp) + if err != nil { + return "", err + } + + fingerprint, err := jmap.GetString("fingerprint") + if err != nil { + return "", err + } + + /* add new aliases */ + for _, alias := range aliases { + c.DeleteAlias(alias) + err = c.PostAlias(alias, alias, fingerprint) + if err != nil { + return "", fmt.Errorf("Error adding alias %s: %s", alias, err) + } + } + + return fingerprint, nil +} === added directory 'src/github.com/lxc/lxd/config' === added file 'src/github.com/lxc/lxd/config.go' --- src/github.com/lxc/lxd/config.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/config.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,168 @@ +package lxd + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + + "gopkg.in/yaml.v2" + + "github.com/lxc/lxd/shared" +) + +// Config holds settings to be used by a client or daemon. +type Config struct { + // DefaultRemote holds the remote daemon name from the Remotes map + // that the client should communicate with by default. + // If empty it defaults to "local". + DefaultRemote string `yaml:"default-remote"` + + // Remotes defines a map of remote daemon names to the details for + // communication with the named daemon. + // The implicit "local" remote is always available and communicates + // with the local daemon over a unix socket. + Remotes map[string]RemoteConfig `yaml:"remotes"` + + // Command line aliases for `lxc` + Aliases map[string]string `yaml:"aliases"` + + // This is the path to the config directory, so the client can find + // previously stored server certs, give good error messages, and save + // new server certs, etc. + // + // We don't need to store it, because of course once we've loaded this + // structure we already know where it is :) + ConfigDir string `yaml:"-"` +} + +// RemoteConfig holds details for communication with a remote daemon. +type RemoteConfig struct { + Addr string `yaml:"addr"` + Public bool `yaml:"public"` + Protocol string `yaml:"protocol,omitempty"` + Static bool `yaml:"-"` +} + +var LocalRemote = RemoteConfig{ + Addr: "unix://", + Static: true, + Public: false} + +var ImagesRemote = RemoteConfig{ + Addr: "https://images.linuxcontainers.org", + Public: true} + +var UbuntuRemote = RemoteConfig{ + Addr: "https://cloud-images.ubuntu.com/releases", + Static: true, + Public: true, + Protocol: "simplestreams"} + +var UbuntuDailyRemote = RemoteConfig{ + Addr: "https://cloud-images.ubuntu.com/daily", + Static: true, + Public: true, + Protocol: "simplestreams"} + +var StaticRemotes = map[string]RemoteConfig{ + "local": LocalRemote, + "ubuntu": UbuntuRemote, + "ubuntu-daily": UbuntuDailyRemote} + +var DefaultRemotes = map[string]RemoteConfig{ + "images": ImagesRemote, + "local": LocalRemote, + "ubuntu": UbuntuRemote, + "ubuntu-daily": UbuntuDailyRemote} + +var DefaultConfig = Config{ + Remotes: DefaultRemotes, + DefaultRemote: "local", + Aliases: map[string]string{}, +} + +// LoadConfig reads the configuration from the config path; if the path does +// not exist, it returns a default configuration. +func LoadConfig(path string) (*Config, error) { + data, err := ioutil.ReadFile(path) + if os.IsNotExist(err) { + // A missing file is equivalent to the default configuration. + withPath := DefaultConfig + withPath.ConfigDir = filepath.Dir(path) + return &withPath, nil + } + if err != nil { + return nil, fmt.Errorf("cannot read config file: %v", err) + } + + var c Config + err = yaml.Unmarshal(data, &c) + if err != nil { + return nil, fmt.Errorf("cannot parse configuration: %v", err) + } + if c.Remotes == nil { + c.Remotes = make(map[string]RemoteConfig) + } + c.ConfigDir = filepath.Dir(path) + + for k, v := range StaticRemotes { + c.Remotes[k] = v + } + + return &c, nil +} + +// SaveConfig writes the provided configuration to the config file. +func SaveConfig(c *Config, fname string) error { + for k, _ := range StaticRemotes { + delete(c.Remotes, k) + } + + // Ignore errors on these two calls. Create will report any problems. + os.Remove(fname + ".new") + os.Mkdir(filepath.Dir(fname), 0700) + f, err := os.Create(fname + ".new") + if err != nil { + return fmt.Errorf("cannot create config file: %v", err) + } + + // If there are any errors, do not leave it around. + defer f.Close() + defer os.Remove(fname + ".new") + + data, err := yaml.Marshal(c) + _, err = f.Write(data) + if err != nil { + return fmt.Errorf("cannot write configuration: %v", err) + } + + f.Close() + err = shared.FileMove(fname+".new", fname) + if err != nil { + return fmt.Errorf("cannot rename temporary config file: %v", err) + } + return nil +} + +func (c *Config) ParseRemoteAndContainer(raw string) (string, string) { + result := strings.SplitN(raw, ":", 2) + if len(result) == 1 { + return c.DefaultRemote, result[0] + } + return result[0], result[1] +} + +func (c *Config) ParseRemote(raw string) string { + return strings.SplitN(raw, ":", 2)[0] +} + +func (c *Config) ConfigPath(file string) string { + return path.Join(c.ConfigDir, file) +} + +func (c *Config) ServerCertPath(name string) string { + return path.Join(c.ConfigDir, "servercerts", fmt.Sprintf("%s.crt", name)) +} === added directory 'src/github.com/lxc/lxd/config/bash' === added file 'src/github.com/lxc/lxd/config/bash/lxd-client' --- src/github.com/lxc/lxd/config/bash/lxd-client 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/config/bash/lxd-client 2016-03-22 15:18:22 +0000 @@ -0,0 +1,95 @@ +_have lxc && { + _lxd_complete() + { + _lxd_names() + { + COMPREPLY=( $( compgen -W \ + "$( lxc list | tail -n +4 | awk '{print $2}' | egrep -v '^(\||^$)' )" "$cur" ) + ) + } + + _lxd_images() + { + COMPREPLY=( $( compgen -W \ + "$( lxc image list | tail -n +4 | awk '{print $2}' | egrep -v '^(\||^$)' )" "$cur" ) + ) + } + + local cur prev + + COMPREPLY=() + cur=${COMP_WORDS[COMP_CWORD]} + prev=${COMP_WORDS[COMP_CWORD-1]} + lxc_cmds="config copy delete exec file help image info init launch \ + list move profile publish remote restart restore snapshot start stop \ + version" + + if [ $COMP_CWORD -eq 1 ]; then + COMPREPLY=( $(compgen -W "$lxc_cmds" -- $cur) ) + elif [ $COMP_CWORD -eq 2 ]; then + case "$prev" in + "config") + COMPREPLY=( $(compgen -W "device edit get set show trust" -- $cur) ) + ;; + "copy") + _lxd_names + ;; + "delete") + _lxd_names + ;; + "exec") + _lxd_names + ;; + "exec") + COMPREPLY=( $(compgen -W "pull push" -- $cur) ) + ;; + "help") + COMPREPLY=( $(compgen -W "$lxc_cmds" -- $cur) ) + ;; + "image") + COMPREPLY=( $(compgen -W "copy delete edit export info list" -- $cur) ) + ;; + "info") + _lxd_names + ;; + "launch") + _lxd_images + ;; + "profile") + COMPREPLY=( $(compgen -W \ + "list show create edit copy set delete apply" -- $cur) ) + ;; + "publish") + _lxd_names + ;; + "remote") + COMPREPLY=( $(compgen -W \ + "add remove list rename set-url set-default get-default" -- $cur) ) + ;; + "restart") + _lxd_names + ;; + "restore") + _lxd_names + ;; + "snapshot") + _lxd_names + ;; + "start") + # should check if containers are stopped + _lxd_names + ;; + "stop") + # should check if containers are started + _lxd_names + ;; + *) + ;; + esac + fi + + return 0 + } + + complete -F _lxd_complete lxc +} === added directory 'src/github.com/lxc/lxd/fuidshift' === added file 'src/github.com/lxc/lxd/fuidshift/Makefile' --- src/github.com/lxc/lxd/fuidshift/Makefile 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/fuidshift/Makefile 2016-03-22 15:18:22 +0000 @@ -0,0 +1,7 @@ +# we let go build figure out dependency changes +.PHONY: fuidmap +lxc: + go build + +clean: + -rm -f fuidshift === added file 'src/github.com/lxc/lxd/fuidshift/main.go' --- src/github.com/lxc/lxd/fuidshift/main.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/fuidshift/main.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,72 @@ +package main + +import ( + "fmt" + "os" + + "github.com/lxc/lxd/shared" +) + +func help(me string, status int) { + fmt.Printf("Usage: %s directory [-t] [-r] [ ...]\n", me) + fmt.Printf(" -t implies test mode. No file ownerships will be changed.\n") + fmt.Printf(" -r means reverse, that is shift the uids out of the container.\n") + fmt.Printf("\n") + fmt.Printf(" A range is [u|b|g]:.\n") + fmt.Printf(" where u means shift uids, g means shift gids, b means shift both.\n") + fmt.Printf(" For example: %s directory b:0:100000:65536 u:10000:1000:1\n", me) + os.Exit(status) +} + +func main() { + if err := run(); err != nil { + fmt.Printf("Error: %q\n", err) + help(os.Args[0], 1) + } +} + +func run() error { + if len(os.Args) < 3 { + if len(os.Args) > 1 && (os.Args[1] == "-h" || os.Args[1] == "--help" || os.Args[1] == "help") { + help(os.Args[0], 0) + } else { + help(os.Args[0], 1) + } + } + + directory := os.Args[1] + idmap := shared.IdmapSet{} + testmode := false + reverse := false + + for pos := 2; pos < len(os.Args); pos++ { + + switch os.Args[pos] { + case "-r", "--reverse": + reverse = true + case "t", "-t", "--test", "test": + testmode = true + default: + var err error + idmap, err = idmap.Append(os.Args[pos]) + if err != nil { + return err + } + } + } + + if idmap.Len() == 0 { + fmt.Printf("No idmaps given\n") + help(os.Args[0], 1) + } + + if !testmode && os.Geteuid() != 0 { + fmt.Printf("This must be run as root\n") + os.Exit(1) + } + + if reverse { + return idmap.UidshiftFromContainer(directory, testmode) + } + return idmap.UidshiftIntoContainer(directory, testmode) +} === added directory 'src/github.com/lxc/lxd/lxc' === added file 'src/github.com/lxc/lxd/lxc/action.go' --- src/github.com/lxc/lxd/lxc/action.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/action.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,97 @@ +package main + +import ( + "fmt" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" +) + +type actionCmd struct { + action shared.ContainerAction + hasTimeout bool + visible bool + name string + timeout int + force bool + stateful bool + stateless bool +} + +func (c *actionCmd) showByDefault() bool { + return c.visible +} + +func (c *actionCmd) usage() string { + return fmt.Sprintf(i18n.G( + `Changes state of one or more containers to %s. + +lxc %s [...]`), c.name, c.name) +} + +func (c *actionCmd) flags() { + if c.hasTimeout { + gnuflag.IntVar(&c.timeout, "timeout", -1, i18n.G("Time to wait for the container before killing it.")) + gnuflag.BoolVar(&c.force, "force", false, i18n.G("Force the container to shutdown.")) + gnuflag.BoolVar(&c.stateful, "stateful", false, i18n.G("Store the container state (only for stop).")) + gnuflag.BoolVar(&c.stateless, "stateless", false, i18n.G("Ignore the container state (only forstart).")) + } +} + +func (c *actionCmd) run(config *lxd.Config, args []string) error { + if len(args) == 0 { + return errArgs + } + + state := false + + // Only store state if asked to + if c.action == "stop" && c.stateful { + state = true + } + + for _, nameArg := range args { + remote, name := config.ParseRemoteAndContainer(nameArg) + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + if name == "" { + return fmt.Errorf(i18n.G("Must supply container name for: ")+"\"%s\"", nameArg) + } + + if c.action == shared.Start || c.action == shared.Stop { + current, err := d.ContainerInfo(name) + if err != nil { + return err + } + + // "start" for a frozen container means "unfreeze" + if current.StatusCode == shared.Frozen { + c.action = shared.Unfreeze + } + + // Always restore state (if present) unless asked not to + if c.action == shared.Start && current.Stateful && !c.stateless { + state = true + } + } + + resp, err := d.Action(name, c.action, c.timeout, c.force, state) + if err != nil { + return err + } + + if resp.Type != lxd.Async { + return fmt.Errorf(i18n.G("bad result type from action")) + } + + if err := d.WaitForSuccess(resp.Operation); err != nil { + return fmt.Errorf("%s\n"+i18n.G("Try `lxc info --show-log %s` for more info"), err, name) + } + } + return nil +} === added file 'src/github.com/lxc/lxd/lxc/config.go' --- src/github.com/lxc/lxd/lxc/config.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/config.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,702 @@ +package main + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "sort" + "strings" + "syscall" + + "github.com/olekukonko/tablewriter" + "gopkg.in/yaml.v2" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" + "github.com/lxc/lxd/shared/termios" +) + +type configCmd struct { + httpAddr string + expanded bool +} + +func (c *configCmd) showByDefault() bool { + return true +} + +func (c *configCmd) flags() { + gnuflag.BoolVar(&c.expanded, "expanded", false, i18n.G("Whether to show the expanded configuration")) +} + +func (c *configCmd) configEditHelp() string { + return i18n.G( + `### This is a yaml representation of the configuration. +### Any line starting with a '# will be ignored. +### +### A sample configuration looks like: +### name: container1 +### profiles: +### - default +### config: +### volatile.eth0.hwaddr: 00:16:3e:e9:f8:7f +### devices: +### homedir: +### path: /extra +### source: /home/user +### type: disk +### ephemeral: false +### +### Note that the name is shown but cannot be changed`) +} + +func (c *configCmd) usage() string { + return i18n.G( + `Manage configuration. + +lxc config device add <[remote:]container> [key=value]... Add a device to a container. +lxc config device list [remote:] List devices for container. +lxc config device show [remote:] Show full device details for container. +lxc config device remove [remote:] Remove device from container. + +lxc config get [remote:] key Get configuration key. +lxc config set [remote:] key value Set container configuration key. +lxc config unset [remote:] key Unset container configuration key. +lxc config set key value Set server configuration key. +lxc config unset key Unset server configuration key. +lxc config show [--expanded] [remote:] Show container configuration. +lxc config edit [remote:][container] Edit container configuration in external editor. + Edit configuration, either by launching external editor or reading STDIN. + Example: lxc config edit # launch editor + cat config.yml | lxc config edit # read from config.yml + +lxc config trust list [remote] List all trusted certs. +lxc config trust add [remote] Add certfile.crt to trusted hosts. +lxc config trust remove [remote] [hostname|fingerprint] Remove the cert from trusted hosts. + +Examples: +To mount host's /share/c1 onto /opt in the container: + lxc config device add [remote:]container1 disk source=/share/c1 path=opt + +To set an lxc config value: + lxc config set [remote:] raw.lxc 'lxc.aa_allow_incomplete = 1' + +To listen on IPv4 and IPv6 port 8443 (you can omit the 8443 its the default): + lxc config set core.https_address [::]:8443 + +To set the server trust password: + lxc config set core.trust_password blah`) +} + +func (c *configCmd) doSet(config *lxd.Config, args []string, unset bool) error { + if len(args) != 4 { + return errArgs + } + + // [[lxc config]] set dakara:c1 limits.memory 200000 + remote, container := config.ParseRemoteAndContainer(args[1]) + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + key := args[2] + value := args[3] + + if !termios.IsTerminal(int(syscall.Stdin)) && value == "-" { + buf, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return fmt.Errorf(i18n.G("Can't read from stdin: %s"), err) + } + value = string(buf[:]) + } + + if unset { + st, err := d.ContainerInfo(container) + if err != nil { + return err + } + + _, ok := st.Config[key] + if !ok { + return fmt.Errorf(i18n.G("Can't unset key '%s', it's not currently set."), key) + } + } + + return d.SetContainerConfig(container, key, value) +} + +func (c *configCmd) run(config *lxd.Config, args []string) error { + if len(args) < 1 { + return errArgs + } + + switch args[0] { + + case "unset": + if len(args) < 2 { + return errArgs + } + + // Deal with local server + if len(args) == 2 { + c, err := lxd.NewClient(config, config.DefaultRemote) + if err != nil { + return err + } + + ss, err := c.ServerStatus() + if err != nil { + return err + } + + _, ok := ss.Config[args[1]] + if !ok { + return fmt.Errorf(i18n.G("Can't unset key '%s', it's not currently set."), args[1]) + } + + _, err = c.SetServerConfig(args[1], "") + return err + } + + // Deal with remote server + remote, container := config.ParseRemoteAndContainer(args[1]) + if container == "" { + c, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + ss, err := c.ServerStatus() + if err != nil { + return err + } + + _, ok := ss.Config[args[1]] + if !ok { + return fmt.Errorf(i18n.G("Can't unset key '%s', it's not currently set."), args[1]) + } + + _, err = c.SetServerConfig(args[2], "") + return err + } + + // Deal with container + args = append(args, "") + return c.doSet(config, args, true) + + case "set": + if len(args) < 3 { + return errArgs + } + + // Deal with local server + if len(args) == 3 { + c, err := lxd.NewClient(config, config.DefaultRemote) + if err != nil { + return err + } + + _, err = c.SetServerConfig(args[1], args[2]) + return err + } + + // Deal with remote server + remote, container := config.ParseRemoteAndContainer(args[1]) + if container == "" { + c, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + _, err = c.SetServerConfig(args[2], args[3]) + return err + } + + // Deal with container + return c.doSet(config, args, false) + + case "trust": + if len(args) < 2 { + return errArgs + } + + switch args[1] { + case "list": + var remote string + if len(args) == 3 { + remote = config.ParseRemote(args[2]) + } else { + remote = config.DefaultRemote + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + trust, err := d.CertificateList() + if err != nil { + return err + } + + data := [][]string{} + for _, cert := range trust { + fp := cert.Fingerprint[0:12] + + certBlock, _ := pem.Decode([]byte(cert.Certificate)) + cert, err := x509.ParseCertificate(certBlock.Bytes) + if err != nil { + return err + } + + const layout = "Jan 2, 2006 at 3:04pm (MST)" + issue := cert.NotBefore.Format(layout) + expiry := cert.NotAfter.Format(layout) + data = append(data, []string{fp, cert.Subject.CommonName, issue, expiry}) + } + + table := tablewriter.NewWriter(os.Stdout) + table.SetAutoWrapText(false) + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetRowLine(true) + table.SetHeader([]string{ + i18n.G("FINGERPRINT"), + i18n.G("COMMON NAME"), + i18n.G("ISSUE DATE"), + i18n.G("EXPIRY DATE")}) + sort.Sort(SortImage(data)) + table.AppendBulk(data) + table.Render() + + return nil + case "add": + var remote string + if len(args) < 3 { + return fmt.Errorf(i18n.G("No certificate provided to add")) + } else if len(args) == 4 { + remote = config.ParseRemote(args[2]) + } else { + remote = config.DefaultRemote + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + fname := args[len(args)-1] + cert, err := shared.ReadCert(fname) + if err != nil { + return err + } + + name, _ := shared.SplitExt(fname) + return d.CertificateAdd(cert, name) + case "remove": + var remote string + if len(args) < 3 { + return fmt.Errorf(i18n.G("No fingerprint specified.")) + } else if len(args) == 4 { + remote = config.ParseRemote(args[2]) + } else { + remote = config.DefaultRemote + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + return d.CertificateRemove(args[len(args)-1]) + default: + return errArgs + } + + case "show": + remote := config.DefaultRemote + container := "" + if len(args) > 1 { + remote, container = config.ParseRemoteAndContainer(args[1]) + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + var data []byte + + if len(args) == 1 || container == "" { + config, err := d.ServerStatus() + if err != nil { + return err + } + + brief := config.Brief() + data, err = yaml.Marshal(&brief) + } else { + config, err := d.ContainerInfo(container) + if err != nil { + return err + } + + brief := config.Brief() + if c.expanded { + brief = config.BriefExpanded() + } + data, err = yaml.Marshal(&brief) + } + + fmt.Printf("%s", data) + + return nil + + case "get": + if len(args) > 3 || len(args) < 2 { + return errArgs + } + + remote := config.DefaultRemote + container := "" + key := args[1] + if len(args) > 2 { + remote, container = config.ParseRemoteAndContainer(args[1]) + key = args[2] + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + if container != "" { + resp, err := d.ContainerInfo(container) + if err != nil { + return err + } + fmt.Printf("%s: %s\n", key, resp.Config[key]) + } else { + resp, err := d.ServerStatus() + if err != nil { + return err + } + + value := resp.Config[key] + if value == nil { + value = "" + } else if value == true { + value = "true" + } else if value == false { + value = "false" + } + + fmt.Printf("%s: %s\n", key, value) + } + return nil + + case "profile": + case "device": + if len(args) < 2 { + return errArgs + } + switch args[1] { + case "list": + return c.deviceList(config, "container", args) + case "add": + return c.deviceAdd(config, "container", args) + case "remove": + return c.deviceRm(config, "container", args) + case "show": + return c.deviceShow(config, "container", args) + default: + return errArgs + } + + case "edit": + if len(args) < 1 { + return errArgs + } + + remote := config.DefaultRemote + container := "" + if len(args) > 1 { + remote, container = config.ParseRemoteAndContainer(args[1]) + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + if len(args) == 1 || container == "" { + return c.doDaemonConfigEdit(d) + } + + return c.doContainerConfigEdit(d, container) + + default: + return errArgs + } + + return errArgs +} + +func (c *configCmd) doContainerConfigEdit(client *lxd.Client, cont string) error { + // If stdin isn't a terminal, read text from it + if !termios.IsTerminal(int(syscall.Stdin)) { + contents, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + + newdata := shared.BriefContainerInfo{} + err = yaml.Unmarshal(contents, &newdata) + if err != nil { + return err + } + return client.UpdateContainerConfig(cont, newdata) + } + + // Extract the current value + config, err := client.ContainerInfo(cont) + if err != nil { + return err + } + + brief := config.Brief() + data, err := yaml.Marshal(&brief) + if err != nil { + return err + } + + // Spawn the editor + content, err := shared.TextEditor("", []byte(c.configEditHelp()+"\n\n"+string(data))) + if err != nil { + return err + } + + for { + // Parse the text received from the editor + newdata := shared.BriefContainerInfo{} + err = yaml.Unmarshal(content, &newdata) + if err == nil { + err = client.UpdateContainerConfig(cont, newdata) + } + + // Respawn the editor + if err != nil { + fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err) + fmt.Println(i18n.G("Press enter to start the editor again")) + + _, err := os.Stdin.Read(make([]byte, 1)) + if err != nil { + return err + } + + content, err = shared.TextEditor("", content) + if err != nil { + return err + } + continue + } + break + } + return nil +} + +func (c *configCmd) doDaemonConfigEdit(client *lxd.Client) error { + // If stdin isn't a terminal, read text from it + if !termios.IsTerminal(int(syscall.Stdin)) { + contents, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + + newdata := shared.BriefServerState{} + err = yaml.Unmarshal(contents, &newdata) + if err != nil { + return err + } + + _, err = client.UpdateServerConfig(newdata) + return err + } + + // Extract the current value + config, err := client.ServerStatus() + if err != nil { + return err + } + + brief := config.Brief() + data, err := yaml.Marshal(&brief) + if err != nil { + return err + } + + // Spawn the editor + content, err := shared.TextEditor("", []byte(c.configEditHelp()+"\n\n"+string(data))) + if err != nil { + return err + } + + for { + // Parse the text received from the editor + newdata := shared.BriefServerState{} + err = yaml.Unmarshal(content, &newdata) + if err == nil { + _, err = client.UpdateServerConfig(newdata) + } + + // Respawn the editor + if err != nil { + fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err) + fmt.Println(i18n.G("Press enter to start the editor again")) + + _, err := os.Stdin.Read(make([]byte, 1)) + if err != nil { + return err + } + + content, err = shared.TextEditor("", content) + if err != nil { + return err + } + continue + } + break + } + return nil +} + +func (c *configCmd) deviceAdd(config *lxd.Config, which string, args []string) error { + if len(args) < 5 { + return errArgs + } + remote, name := config.ParseRemoteAndContainer(args[2]) + + client, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + devname := args[3] + devtype := args[4] + var props []string + if len(args) > 5 { + props = args[5:] + } else { + props = []string{} + } + + var resp *lxd.Response + if which == "profile" { + resp, err = client.ProfileDeviceAdd(name, devname, devtype, props) + } else { + resp, err = client.ContainerDeviceAdd(name, devname, devtype, props) + } + if err != nil { + return err + } + fmt.Printf(i18n.G("Device %s added to %s")+"\n", devname, name) + if which == "profile" { + return nil + } + return client.WaitForSuccess(resp.Operation) +} + +func (c *configCmd) deviceRm(config *lxd.Config, which string, args []string) error { + if len(args) < 4 { + return errArgs + } + remote, name := config.ParseRemoteAndContainer(args[2]) + + client, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + devname := args[3] + var resp *lxd.Response + if which == "profile" { + resp, err = client.ProfileDeviceDelete(name, devname) + } else { + resp, err = client.ContainerDeviceDelete(name, devname) + } + if err != nil { + return err + } + fmt.Printf(i18n.G("Device %s removed from %s")+"\n", devname, name) + if which == "profile" { + return nil + } + return client.WaitForSuccess(resp.Operation) +} + +func (c *configCmd) deviceList(config *lxd.Config, which string, args []string) error { + if len(args) < 3 { + return errArgs + } + remote, name := config.ParseRemoteAndContainer(args[2]) + + client, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + var resp []string + if which == "profile" { + resp, err = client.ProfileListDevices(name) + } else { + resp, err = client.ContainerListDevices(name) + } + if err != nil { + return err + } + fmt.Printf("%s\n", strings.Join(resp, "\n")) + + return nil +} + +func (c *configCmd) deviceShow(config *lxd.Config, which string, args []string) error { + if len(args) < 3 { + return errArgs + } + remote, name := config.ParseRemoteAndContainer(args[2]) + + client, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + var devices map[string]shared.Device + if which == "profile" { + resp, err := client.ProfileConfig(name) + if err != nil { + return err + } + + devices = resp.Devices + } else { + resp, err := client.ContainerInfo(name) + if err != nil { + return err + } + + devices = resp.Devices + } + + for n, d := range devices { + fmt.Printf("%s\n", n) + for attr, val := range d { + fmt.Printf(" %s: %s\n", attr, val) + } + } + + return nil +} === added file 'src/github.com/lxc/lxd/lxc/copy.go' --- src/github.com/lxc/lxd/lxc/copy.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/copy.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,176 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" +) + +type copyCmd struct { + ephem bool +} + +func (c *copyCmd) showByDefault() bool { + return true +} + +func (c *copyCmd) usage() string { + return i18n.G( + `Copy containers within or in between lxd instances. + +lxc copy [remote:] [remote:] [--ephemeral|e]`) +} + +func (c *copyCmd) flags() { + gnuflag.BoolVar(&c.ephem, "ephemeral", false, i18n.G("Ephemeral container")) + gnuflag.BoolVar(&c.ephem, "e", false, i18n.G("Ephemeral container")) +} + +func (c *copyCmd) copyContainer(config *lxd.Config, sourceResource string, destResource string, keepVolatile bool, ephemeral int) error { + sourceRemote, sourceName := config.ParseRemoteAndContainer(sourceResource) + destRemote, destName := config.ParseRemoteAndContainer(destResource) + + if sourceName == "" { + return fmt.Errorf(i18n.G("you must specify a source container name")) + } + + if destName == "" { + destName = sourceName + } + + source, err := lxd.NewClient(config, sourceRemote) + if err != nil { + return err + } + + status := &shared.ContainerInfo{} + + // TODO: presumably we want to do this for copying snapshots too? We + // need to think a bit more about how we track the baseImage in the + // face of LVM and snapshots in general; this will probably make more + // sense once that work is done. + baseImage := "" + + if !shared.IsSnapshot(sourceName) { + status, err = source.ContainerInfo(sourceName) + if err != nil { + return err + } + + baseImage = status.Config["volatile.base_image"] + + if !keepVolatile { + for k := range status.Config { + if strings.HasPrefix(k, "volatile") { + delete(status.Config, k) + } + } + } + } + + // Do a local copy if the remotes are the same, otherwise do a migration + if sourceRemote == destRemote { + if sourceName == destName { + return fmt.Errorf(i18n.G("can't copy to the same container name")) + } + + cp, err := source.LocalCopy(sourceName, destName, status.Config, status.Profiles, ephemeral == 1) + if err != nil { + return err + } + + return source.WaitForSuccess(cp.Operation) + } + + dest, err := lxd.NewClient(config, destRemote) + if err != nil { + return err + } + + sourceProfs := shared.NewStringSet(status.Profiles) + destProfs, err := dest.ListProfiles() + if err != nil { + return err + } + + if !sourceProfs.IsSubset(shared.NewStringSet(destProfs)) { + return fmt.Errorf(i18n.G("not all the profiles from the source exist on the target")) + } + + if ephemeral == -1 { + ct, err := source.ContainerInfo(sourceName) + if err != nil { + return err + } + + if ct.Ephemeral { + ephemeral = 1 + } else { + ephemeral = 0 + } + } + + sourceWSResponse, err := source.GetMigrationSourceWS(sourceName) + if err != nil { + return err + } + + secrets := map[string]string{} + + op, err := sourceWSResponse.MetadataAsOperation() + if err != nil { + return err + } + + for k, v := range *op.Metadata { + secrets[k] = v.(string) + } + + addresses, err := source.Addresses() + if err != nil { + return err + } + + /* Since we're trying a bunch of different network ports that + * may be invalid, we can get "bad handshake" errors when the + * websocket code tries to connect. If the first error is a + * real error, but the subsequent errors are only network + * errors, we should try to report the first real error. Of + * course, if all the errors are websocket errors, let's just + * report that. + */ + for _, addr := range addresses { + var migration *lxd.Response + + sourceWSUrl := "https://" + addr + sourceWSResponse.Operation + migration, err = dest.MigrateFrom(destName, sourceWSUrl, source.Certificate, secrets, status.Architecture, status.Config, status.Devices, status.Profiles, baseImage, ephemeral == 1) + if err != nil { + continue + } + + if err = dest.WaitForSuccess(migration.Operation); err != nil { + return err + } + + return nil + } + + return err +} + +func (c *copyCmd) run(config *lxd.Config, args []string) error { + if len(args) != 2 { + return errArgs + } + + ephem := 0 + if c.ephem { + ephem = 1 + } + + return c.copyContainer(config, args[0], args[1], false, ephem) +} === added file 'src/github.com/lxc/lxd/lxc/delete.go' --- src/github.com/lxc/lxd/lxc/delete.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/delete.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,119 @@ +package main + +import ( + "bufio" + "fmt" + "os" + "strings" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" +) + +type deleteCmd struct { + force bool + interactive bool +} + +func (c *deleteCmd) showByDefault() bool { + return true +} + +func (c *deleteCmd) usage() string { + return i18n.G( + `Delete containers or container snapshots. + +lxc delete [remote:][/] [remote:][[/]...] + +Destroy containers or snapshots with any attached data (configuration, snapshots, ...).`) +} + +func (c *deleteCmd) flags() { + gnuflag.BoolVar(&c.force, "f", false, i18n.G("Force the removal of stopped containers.")) + gnuflag.BoolVar(&c.force, "force", false, i18n.G("Force the removal of stopped containers.")) + gnuflag.BoolVar(&c.interactive, "i", false, i18n.G("Require user confirmation.")) + gnuflag.BoolVar(&c.interactive, "interactive", false, i18n.G("Require user confirmation.")) +} + +func (c *deleteCmd) promptDelete(name string) error { + reader := bufio.NewReader(os.Stdin) + fmt.Printf(i18n.G("Remove %s (yes/no): "), name) + input, _ := reader.ReadString('\n') + input = strings.TrimSuffix(input, "\n") + if !shared.StringInSlice(strings.ToLower(input), []string{i18n.G("yes")}) { + return fmt.Errorf(i18n.G("User aborted delete operation.")) + } + + return nil +} + +func (c *deleteCmd) doDelete(d *lxd.Client, name string) error { + resp, err := d.Delete(name) + if err != nil { + return err + } + + return d.WaitForSuccess(resp.Operation) +} + +func (c *deleteCmd) run(config *lxd.Config, args []string) error { + if len(args) == 0 { + return errArgs + } + + for _, nameArg := range args { + remote, name := config.ParseRemoteAndContainer(nameArg) + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + if c.interactive { + err := c.promptDelete(name) + if err != nil { + return err + } + } + + if shared.IsSnapshot(name) { + return c.doDelete(d, name) + } + + ct, err := d.ContainerInfo(name) + if err != nil { + return err + } + + if ct.StatusCode != 0 && ct.StatusCode != shared.Stopped { + if !c.force { + return fmt.Errorf(i18n.G("The container is currently running, stop it first or pass --force.")) + } + + resp, err := d.Action(name, shared.Stop, -1, true, false) + if err != nil { + return err + } + + op, err := d.WaitFor(resp.Operation) + if err != nil { + return err + } + + if op.StatusCode == shared.Failure { + return fmt.Errorf(i18n.G("Stopping container failed!")) + } + + if ct.Ephemeral == true { + return nil + } + } + + if err := c.doDelete(d, name); err != nil { + return err + } + } + return nil +} === added file 'src/github.com/lxc/lxd/lxc/exec.go' --- src/github.com/lxc/lxd/lxc/exec.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/exec.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,160 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "strconv" + "strings" + "syscall" + + "github.com/gorilla/websocket" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" + "github.com/lxc/lxd/shared/termios" +) + +type envFlag []string + +func (f *envFlag) String() string { + return fmt.Sprint(*f) +} + +func (f *envFlag) Set(value string) error { + if f == nil { + *f = make(envFlag, 1) + } else { + *f = append(*f, value) + } + return nil +} + +type execCmd struct { + modeFlag string + envArgs envFlag +} + +func (c *execCmd) showByDefault() bool { + return true +} + +func (c *execCmd) usage() string { + return i18n.G( + `Execute the specified command in a container. + +lxc exec [remote:]container [--mode=auto|interactive|non-interactive] [--env EDITOR=/usr/bin/vim]... + +Mode defaults to non-interactive, interactive mode is selected if both stdin AND stdout are terminals (stderr is ignored).`) +} + +func (c *execCmd) flags() { + gnuflag.Var(&c.envArgs, "env", i18n.G("An environment variable of the form HOME=/home/foo")) + gnuflag.StringVar(&c.modeFlag, "mode", "auto", i18n.G("Override the terminal mode (auto, interactive or non-interactive)")) +} + +func (c *execCmd) sendTermSize(control *websocket.Conn) error { + width, height, err := termios.GetSize(int(syscall.Stdout)) + if err != nil { + return err + } + + shared.Debugf("Window size is now: %dx%d", width, height) + + w, err := control.NextWriter(websocket.TextMessage) + if err != nil { + return err + } + + msg := shared.ContainerExecControl{} + msg.Command = "window-resize" + msg.Args = make(map[string]string) + msg.Args["width"] = strconv.Itoa(width) + msg.Args["height"] = strconv.Itoa(height) + + buf, err := json.Marshal(msg) + if err != nil { + return err + } + _, err = w.Write(buf) + + w.Close() + return err +} + +func (c *execCmd) run(config *lxd.Config, args []string) error { + if len(args) < 2 { + return errArgs + } + + remote, name := config.ParseRemoteAndContainer(args[0]) + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + env := map[string]string{"HOME": "/root", "USER": "root"} + myEnv := os.Environ() + for _, ent := range myEnv { + if strings.HasPrefix(ent, "TERM=") { + env["TERM"] = ent[len("TERM="):] + } + } + + for _, arg := range c.envArgs { + pieces := strings.SplitN(arg, "=", 2) + value := "" + if len(pieces) > 1 { + value = pieces[1] + } + env[pieces[0]] = value + } + + cfd := int(syscall.Stdin) + + var interactive bool + if c.modeFlag == "interactive" { + interactive = true + } else if c.modeFlag == "non-interactive" { + interactive = false + } else { + interactive = termios.IsTerminal(cfd) && termios.IsTerminal(int(syscall.Stdout)) + } + + var oldttystate *termios.State + if interactive { + oldttystate, err = termios.MakeRaw(cfd) + if err != nil { + return err + } + defer termios.Restore(cfd, oldttystate) + } + + handler := c.controlSocketHandler + if !interactive { + handler = nil + } + + stdout := c.getStdout() + ret, err := d.Exec(name, args[1:], env, os.Stdin, stdout, os.Stderr, handler) + if err != nil { + return err + } + + if oldttystate != nil { + /* A bit of a special case here: we want to exit with the same code as + * the process inside the container, so we explicitly exit here + * instead of returning an error. + * + * Additionally, since os.Exit() exits without running deferred + * functions, we restore the terminal explicitly. + */ + termios.Restore(cfd, oldttystate) + } + + /* we get the result of waitpid() here so we need to transform it */ + os.Exit(ret >> 8) + return fmt.Errorf(i18n.G("unreachable return reached")) +} === added file 'src/github.com/lxc/lxd/lxc/exec_unix.go' --- src/github.com/lxc/lxd/lxc/exec_unix.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/exec_unix.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,39 @@ +// +build !windows + +package main + +import ( + "io" + "os" + "os/signal" + "syscall" + + "github.com/gorilla/websocket" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" +) + +func (c *execCmd) getStdout() io.WriteCloser { + return os.Stdout +} + +func (c *execCmd) controlSocketHandler(d *lxd.Client, control *websocket.Conn) { + ch := make(chan os.Signal) + signal.Notify(ch, syscall.SIGWINCH) + + for { + err := c.sendTermSize(control) + if err != nil { + shared.Debugf("error setting term size %s", err) + break + } + + sig := <-ch + + shared.Debugf("Received '%s signal', updating window geometry.", sig) + } + + closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") + control.WriteMessage(websocket.CloseMessage, closeMsg) +} === added file 'src/github.com/lxc/lxd/lxc/exec_windows.go' --- src/github.com/lxc/lxd/lxc/exec_windows.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/exec_windows.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,39 @@ +// +build windows + +package main + +import ( + "io" + "os" + + "github.com/gorilla/websocket" + "github.com/mattn/go-colorable" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" +) + +// Windows doesn't process ANSI sequences natively, so we wrap +// os.Stdout for improved user experience for Windows client +type WrappedWriteCloser struct { + io.Closer + wrapper io.Writer +} + +func (wwc *WrappedWriteCloser) Write(p []byte) (int, error) { + return wwc.wrapper.Write(p) +} + +func (c *execCmd) getStdout() io.WriteCloser { + return &WrappedWriteCloser{os.Stdout, colorable.NewColorableStdout()} +} + +func (c *execCmd) controlSocketHandler(d *lxd.Client, control *websocket.Conn) { + // TODO: figure out what the equivalent of signal.SIGWINCH is on + // windows and use that; for now if you resize your terminal it just + // won't work quite correctly. + err := c.sendTermSize(control) + if err != nil { + shared.Debugf("error setting term size %s", err) + } +} === added file 'src/github.com/lxc/lxd/lxc/file.go' --- src/github.com/lxc/lxd/lxc/file.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/file.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,278 @@ +package main + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "syscall" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" + "github.com/lxc/lxd/shared/termios" +) + +type fileCmd struct { + uid int + gid int + mode string +} + +func (c *fileCmd) showByDefault() bool { + return true +} + +func (c *fileCmd) usage() string { + return i18n.G( + `Manage files on a container. + +lxc file pull [...] +lxc file push [--uid=UID] [--gid=GID] [--mode=MODE] [...] +lxc file edit + + in the case of pull, in the case of push and in the case of edit are /`) +} + +func (c *fileCmd) flags() { + gnuflag.IntVar(&c.uid, "uid", -1, i18n.G("Set the file's uid on push")) + gnuflag.IntVar(&c.gid, "gid", -1, i18n.G("Set the file's gid on push")) + gnuflag.StringVar(&c.mode, "mode", "", i18n.G("Set the file's perms on push")) +} + +func (c *fileCmd) push(config *lxd.Config, args []string) error { + if len(args) < 2 { + return errArgs + } + + target := args[len(args)-1] + pathSpec := strings.SplitN(target, "/", 2) + + if len(pathSpec) != 2 { + return fmt.Errorf(i18n.G("Invalid target %s"), target) + } + + targetPath := pathSpec[1] + remote, container := config.ParseRemoteAndContainer(pathSpec[0]) + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + mode := os.FileMode(0755) + if c.mode != "" { + m, err := strconv.ParseInt(c.mode, 0, 0) + if err != nil { + return err + } + mode = os.FileMode(m) + } + + uid := 0 + if c.uid >= 0 { + uid = c.uid + } + + gid := 0 + if c.gid >= 0 { + gid = c.gid + } + + _, targetfilename := filepath.Split(targetPath) + + var sourcefilenames []string + for _, fname := range args[:len(args)-1] { + if !strings.HasPrefix(fname, "--") { + sourcefilenames = append(sourcefilenames, fname) + } + } + + if (targetfilename != "") && (len(sourcefilenames) > 1) { + return errArgs + } + + /* Make sure all of the files are accessible by us before trying to + * push any of them. */ + var files []*os.File + for _, f := range sourcefilenames { + var file *os.File + if f == "-" { + file = os.Stdin + } else { + file, err = os.Open(f) + if err != nil { + return err + } + } + + defer file.Close() + files = append(files, file) + } + + for _, f := range files { + fpath := targetPath + if targetfilename == "" { + fpath = path.Join(fpath, path.Base(f.Name())) + } + + if c.mode == "" || c.uid == -1 || c.gid == -1 { + fMode, fUid, fGid, err := c.getOwner(f) + if err != nil { + return err + } + + if c.mode == "" { + mode = fMode + } + + if c.uid == -1 { + uid = fUid + } + + if c.gid == -1 { + gid = fGid + } + } + + err = d.PushFile(container, fpath, gid, uid, mode, f) + if err != nil { + return err + } + } + + return nil +} + +func (c *fileCmd) pull(config *lxd.Config, args []string) error { + if len(args) < 2 { + return errArgs + } + + target := args[len(args)-1] + targetIsDir := false + sb, err := os.Stat(target) + if err != nil && !os.IsNotExist(err) { + return err + } + + /* + * If the path exists, just use it. If it doesn't exist, it might be a + * directory in one of two cases: + * 1. Someone explicitly put "/" at the end + * 2. Someone provided more than one source. In this case the target + * should be a directory so we can save all the files into it. + */ + if err == nil { + targetIsDir = sb.IsDir() + if !targetIsDir && len(args)-1 > 1 { + return fmt.Errorf(i18n.G("More than one file to download, but target is not a directory")) + } + } else if strings.HasSuffix(target, string(os.PathSeparator)) || len(args)-1 > 1 { + if err := os.MkdirAll(target, 0755); err != nil { + return err + } + targetIsDir = true + } + + for _, f := range args[:len(args)-1] { + pathSpec := strings.SplitN(f, "/", 2) + if len(pathSpec) != 2 { + return fmt.Errorf(i18n.G("Invalid source %s"), f) + } + + remote, container := config.ParseRemoteAndContainer(pathSpec[0]) + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + _, _, _, buf, err := d.PullFile(container, pathSpec[1]) + if err != nil { + return err + } + + var targetPath string + if targetIsDir { + targetPath = path.Join(target, path.Base(pathSpec[1])) + } else { + targetPath = target + } + + var f *os.File + if targetPath == "-" { + f = os.Stdout + } else { + f, err = os.Create(targetPath) + if err != nil { + return err + } + defer f.Close() + } + + _, err = io.Copy(f, buf) + if err != nil { + return err + } + } + + return nil +} + +func (c *fileCmd) edit(config *lxd.Config, args []string) error { + if len(args) != 1 { + return errArgs + } + + // If stdin isn't a terminal, read text from it + if !termios.IsTerminal(int(syscall.Stdin)) { + return c.push(config, append([]string{os.Stdin.Name()}, args[0])) + } + + // Create temp file + f, err := ioutil.TempFile("", "lxd_file_edit_") + fname := f.Name() + f.Close() + os.Remove(fname) + defer os.Remove(fname) + + // Extract current value + err = c.pull(config, append([]string{args[0]}, fname)) + if err != nil { + return err + } + + _, err = shared.TextEditor(fname, []byte{}) + if err != nil { + return err + } + + err = c.push(config, append([]string{fname}, args[0])) + if err != nil { + return err + } + + return nil +} + +func (c *fileCmd) run(config *lxd.Config, args []string) error { + if len(args) < 1 { + return errArgs + } + + switch args[0] { + case "push": + return c.push(config, args[1:]) + case "pull": + return c.pull(config, args[1:]) + case "edit": + return c.edit(config, args[1:]) + default: + return errArgs + } +} === added file 'src/github.com/lxc/lxd/lxc/file_unix.go' --- src/github.com/lxc/lxd/lxc/file_unix.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/file_unix.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,21 @@ +// +build !windows + +package main + +import ( + "os" + "syscall" +) + +func (c *fileCmd) getOwner(f *os.File) (os.FileMode, int, int, error) { + fInfo, err := f.Stat() + if err != nil { + return os.FileMode(0), -1, -1, err + } + + mode := fInfo.Mode() + uid := int(fInfo.Sys().(*syscall.Stat_t).Uid) + gid := int(fInfo.Sys().(*syscall.Stat_t).Gid) + + return mode, uid, gid, nil +} === added file 'src/github.com/lxc/lxd/lxc/file_windows.go' --- src/github.com/lxc/lxd/lxc/file_windows.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/file_windows.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +// +build windows + +package main + +import ( + "os" +) + +func (c *fileCmd) getOwner(f *os.File) (os.FileMode, int, int, error) { + return os.FileMode(0), -1, -1, nil +} === added file 'src/github.com/lxc/lxd/lxc/finger.go' --- src/github.com/lxc/lxd/lxc/finger.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/finger.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,46 @@ +package main + +import ( + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared/i18n" +) + +type fingerCmd struct { + httpAddr string +} + +func (c *fingerCmd) showByDefault() bool { + return false +} + +func (c *fingerCmd) usage() string { + return i18n.G( + `Fingers the LXD instance to check if it is up and working. + +lxc finger `) +} + +func (c *fingerCmd) flags() {} + +func (c *fingerCmd) run(config *lxd.Config, args []string) error { + if len(args) > 1 { + return errArgs + } + + var remote string + if len(args) == 1 { + remote = config.ParseRemote(args[0]) + } else { + remote = config.DefaultRemote + } + + // New client may or may not need to connect to the remote host, but + // client.ServerStatus will at least request the basic information from + // the server. + client, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + _, err = client.ServerStatus() + return err +} === added file 'src/github.com/lxc/lxd/lxc/help.go' --- src/github.com/lxc/lxd/lxc/help.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/help.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,87 @@ +package main + +import ( + "bufio" + "bytes" + "fmt" + "os" + "sort" + "strings" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" +) + +type helpCmd struct { + showAll bool +} + +func (c *helpCmd) showByDefault() bool { + return true +} + +func (c *helpCmd) usage() string { + return i18n.G( + `Presents details on how to use LXD. + +lxd help [--all]`) +} + +func (c *helpCmd) flags() { + gnuflag.BoolVar(&c.showAll, "all", false, i18n.G("Show all commands (not just interesting ones)")) +} + +func (c *helpCmd) run(_ *lxd.Config, args []string) error { + if len(args) > 0 { + for _, name := range args { + cmd, ok := commands[name] + if !ok { + fmt.Fprintf(os.Stderr, i18n.G("error: unknown command: %s")+"\n", name) + } else { + fmt.Fprintf(os.Stderr, cmd.usage()+"\n") + } + } + return nil + } + + fmt.Println(i18n.G("Usage: lxc [subcommand] [options]")) + fmt.Println(i18n.G("Available commands:")) + var names []string + for name := range commands { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + cmd := commands[name] + if c.showAll || cmd.showByDefault() { + fmt.Printf("\t%-10s - %s\n", name, c.summaryLine(cmd.usage())) + } + } + if !c.showAll { + fmt.Println() + fmt.Println(i18n.G("Options:")) + fmt.Println(" --all " + i18n.G("Print less common commands.")) + fmt.Println(" --debug " + i18n.G("Print debug information.")) + fmt.Println(" --verbose " + i18n.G("Print verbose information.")) + fmt.Println() + fmt.Println(i18n.G("Environment:")) + fmt.Println(" LXD_CONF " + i18n.G("Path to an alternate client configuration directory.")) + fmt.Println(" LXD_DIR " + i18n.G("Path to an alternate server directory.")) + } + return nil +} + +// summaryLine returns the first line of the help text. Conventionally, this +// should be a one-line command summary, potentially followed by a longer +// explanation. +func (c *helpCmd) summaryLine(usage string) string { + usage = strings.TrimSpace(usage) + s := bufio.NewScanner(bytes.NewBufferString(usage)) + if s.Scan() { + if len(s.Text()) > 1 { + return s.Text() + } + } + return i18n.G("Missing summary.") +} === added file 'src/github.com/lxc/lxd/lxc/image.go' --- src/github.com/lxc/lxd/lxc/image.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/image.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,745 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "regexp" + "sort" + "strings" + "syscall" + + "github.com/olekukonko/tablewriter" + "gopkg.in/yaml.v2" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" + "github.com/lxc/lxd/shared/termios" +) + +type SortImage [][]string + +func (a SortImage) Len() int { + return len(a) +} + +func (a SortImage) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a SortImage) Less(i, j int) bool { + if a[i][0] == a[j][0] { + if a[i][3] == "" { + return false + } + + if a[j][3] == "" { + return true + } + + return a[i][3] < a[j][3] + } + + if a[i][0] == "" { + return false + } + + if a[j][0] == "" { + return true + } + + return a[i][0] < a[j][0] +} + +type aliasList []string + +func (f *aliasList) String() string { + return fmt.Sprint(*f) +} + +func (f *aliasList) Set(value string) error { + if f == nil { + *f = make(aliasList, 1) + } else { + *f = append(*f, value) + } + return nil +} + +type imageCmd struct { + addAliases aliasList + publicImage bool + copyAliases bool + autoUpdate bool +} + +func (c *imageCmd) showByDefault() bool { + return true +} + +func (c *imageCmd) imageEditHelp() string { + return i18n.G( + `### This is a yaml representation of the image properties. +### Any line starting with a '# will be ignored. +### +### Each property is represented by a single line: +### An example would be: +### description: My custom image`) +} + +func (c *imageCmd) usage() string { + return i18n.G( + `Manipulate container images. + +In LXD containers are created from images. Those images were themselves +either generated from an existing container or downloaded from an image +server. + +When using remote images, LXD will automatically cache images for you +and remove them upon expiration. + +The image unique identifier is the hash (sha-256) of its representation +as a compressed tarball (or for split images, the concatenation of the +metadata and rootfs tarballs). + +Images can be referenced by their full hash, shortest unique partial +hash or alias name (if one is set). + + +lxc image import [rootfs tarball|URL] [remote:] [--public] [--created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] [prop=value] + Import an image tarball (or tarballs) into the LXD image store. + +lxc image copy [remote:] : [--alias=ALIAS].. [--copy-aliases] [--public] [--auto-update] + Copy an image from one LXD daemon to another over the network. + + The auto-update flag instructs the server to keep this image up to + date. It requires the source to be an alias and for it to be public. + +lxc image delete [remote:] + Delete an image from the LXD image store. + +lxc image export [remote:] + Export an image from the LXD image store into a distributable tarball. + +lxc image info [remote:] + Print everything LXD knows about a given image. + +lxc image list [remote:] [filter] + List images in the LXD image store. Filters may be of the + = form for property based filtering, or part of the image + hash or part of the image alias name. + +lxc image show [remote:] + Yaml output of the user modifiable properties of an image. + +lxc image edit [remote:] + Edit image, either by launching external editor or reading STDIN. + Example: lxc image edit # launch editor + cat image.yml | lxc image edit # read from image.yml + +lxc image alias create [remote:] + Create a new alias for an existing image. + +lxc image alias delete [remote:] + Delete an alias. + +lxc image alias list [remote:] + List the aliases. +`) +} + +func (c *imageCmd) flags() { + gnuflag.BoolVar(&c.publicImage, "public", false, i18n.G("Make image public")) + gnuflag.BoolVar(&c.copyAliases, "copy-aliases", false, i18n.G("Copy aliases from source")) + gnuflag.BoolVar(&c.autoUpdate, "auto-update", false, i18n.G("Keep the image up to date after initial copy")) + gnuflag.Var(&c.addAliases, "alias", i18n.G("New alias to define at target")) +} + +func (c *imageCmd) doImageAlias(config *lxd.Config, args []string) error { + var remote string + switch args[1] { + case "list": + /* alias list [:] */ + if len(args) > 2 { + remote, _ = config.ParseRemoteAndContainer(args[2]) + } else { + remote, _ = config.ParseRemoteAndContainer("") + } + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + resp, err := d.ListAliases() + if err != nil { + return err + } + + c.showAliases(resp) + + return nil + case "create": + /* alias create [:] */ + if len(args) < 4 { + return errArgs + } + remote, alias := config.ParseRemoteAndContainer(args[2]) + target := args[3] + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + /* TODO - what about description? */ + err = d.PostAlias(alias, alias, target) + return err + case "delete": + /* alias delete [:] */ + if len(args) < 3 { + return errArgs + } + remote, alias := config.ParseRemoteAndContainer(args[2]) + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + err = d.DeleteAlias(alias) + return err + } + return errArgs +} + +func (c *imageCmd) run(config *lxd.Config, args []string) error { + var remote string + + if len(args) < 1 { + return errArgs + } + + switch args[0] { + case "alias": + if len(args) < 2 { + return errArgs + } + return c.doImageAlias(config, args) + + case "copy": + /* copy [:] [:] */ + if len(args) != 3 { + return errArgs + } + + remote, inName := config.ParseRemoteAndContainer(args[1]) + if inName == "" { + inName = "default" + } + + destRemote, outName := config.ParseRemoteAndContainer(args[2]) + if outName != "" { + return errArgs + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + dest, err := lxd.NewClient(config, destRemote) + if err != nil { + return err + } + + progressHandler := func(progress string) { + fmt.Printf(i18n.G("Copying the image: %s")+"\r", progress) + } + + err = d.CopyImage(inName, dest, c.copyAliases, c.addAliases, c.publicImage, c.autoUpdate, progressHandler) + if err == nil { + fmt.Println(i18n.G("Image copied successfully!")) + } + return err + + case "delete": + /* delete [:] */ + if len(args) < 2 { + return errArgs + } + + remote, inName := config.ParseRemoteAndContainer(args[1]) + if inName == "" { + inName = "default" + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + image := c.dereferenceAlias(d, inName) + err = d.DeleteImage(image) + return err + + case "info": + if len(args) < 2 { + return errArgs + } + + remote, inName := config.ParseRemoteAndContainer(args[1]) + if inName == "" { + inName = "default" + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + image := c.dereferenceAlias(d, inName) + info, err := d.GetImageInfo(image) + if err != nil { + return err + } + + public := i18n.G("no") + if info.Public { + public = i18n.G("yes") + } + + autoUpdate := i18n.G("disabled") + if info.AutoUpdate { + autoUpdate = i18n.G("enabled") + } + + fmt.Printf(i18n.G("Fingerprint: %s")+"\n", info.Fingerprint) + fmt.Printf(i18n.G("Size: %.2fMB")+"\n", float64(info.Size)/1024.0/1024.0) + fmt.Printf(i18n.G("Architecture: %s")+"\n", info.Architecture) + fmt.Printf(i18n.G("Public: %s")+"\n", public) + fmt.Printf(i18n.G("Timestamps:") + "\n") + const layout = "2006/01/02 15:04 UTC" + if info.CreationDate.UTC().Unix() != 0 { + fmt.Printf(" "+i18n.G("Created: %s")+"\n", info.CreationDate.UTC().Format(layout)) + } + fmt.Printf(" "+i18n.G("Uploaded: %s")+"\n", info.UploadDate.UTC().Format(layout)) + if info.ExpiryDate.UTC().Unix() != 0 { + fmt.Printf(" "+i18n.G("Expires: %s")+"\n", info.ExpiryDate.UTC().Format(layout)) + } else { + fmt.Printf(" " + i18n.G("Expires: never") + "\n") + } + fmt.Println(i18n.G("Properties:")) + for key, value := range info.Properties { + fmt.Printf(" %s: %s\n", key, value) + } + fmt.Println(i18n.G("Aliases:")) + for _, alias := range info.Aliases { + fmt.Printf(" - %s\n", alias.Name) + } + fmt.Printf(i18n.G("Auto update: %s")+"\n", autoUpdate) + if info.Source != nil { + fmt.Println(i18n.G("Source:")) + fmt.Printf(" Server: %s\n", info.Source.Server) + fmt.Printf(" Protocol: %s\n", info.Source.Protocol) + fmt.Printf(" Alias: %s\n", info.Source.Alias) + } + return nil + + case "import": + if len(args) < 2 { + return errArgs + } + + var fingerprint string + var imageFile string + var rootfsFile string + var properties []string + var remote string + + for _, arg := range args[1:] { + split := strings.Split(arg, "=") + if len(split) == 1 || shared.PathExists(arg) { + if strings.HasSuffix(arg, ":") { + remote = config.ParseRemote(arg) + } else { + if imageFile == "" { + imageFile = args[1] + } else { + rootfsFile = arg + } + } + } else { + properties = append(properties, arg) + } + } + + if remote == "" { + remote = config.DefaultRemote + } + + if imageFile == "" { + return errArgs + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + handler := func(percent int) { + fmt.Printf(i18n.G("Transferring image: %d%%")+"\r", percent) + if percent == 100 { + fmt.Printf("\n") + } + } + + if strings.HasPrefix(imageFile, "https://") { + fingerprint, err = d.PostImageURL(imageFile, c.publicImage, c.addAliases) + } else if strings.HasPrefix(imageFile, "http://") { + return fmt.Errorf(i18n.G("Only https:// is supported for remote image import.")) + } else { + fingerprint, err = d.PostImage(imageFile, rootfsFile, properties, c.publicImage, c.addAliases, handler) + } + + if err != nil { + return err + } + fmt.Printf(i18n.G("Image imported with fingerprint: %s")+"\n", fingerprint) + + return nil + + case "list": + filters := []string{} + + if len(args) > 1 { + result := strings.SplitN(args[1], ":", 2) + if len(result) == 1 { + filters = append(filters, args[1]) + remote, _ = config.ParseRemoteAndContainer("") + } else { + remote, _ = config.ParseRemoteAndContainer(args[1]) + } + } else { + remote, _ = config.ParseRemoteAndContainer("") + } + + if len(args) > 2 { + for _, filter := range args[2:] { + filters = append(filters, filter) + } + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + images, err := d.ListImages() + if err != nil { + return err + } + + return c.showImages(images, filters) + + case "edit": + if len(args) < 2 { + return errArgs + } + + remote, inName := config.ParseRemoteAndContainer(args[1]) + if inName == "" { + inName = "default" + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + image := c.dereferenceAlias(d, inName) + if image == "" { + image = inName + } + + return c.doImageEdit(d, image) + + case "export": + if len(args) < 2 { + return errArgs + } + + remote, inName := config.ParseRemoteAndContainer(args[1]) + if inName == "" { + inName = "default" + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + image := c.dereferenceAlias(d, inName) + + target := "." + if len(args) > 2 { + target = args[2] + } + + outfile, err := d.ExportImage(image, target) + if err != nil { + return err + } + + if target != "-" { + fmt.Printf(i18n.G("Output is in %s")+"\n", outfile) + } + return nil + + case "show": + if len(args) < 2 { + return errArgs + } + + remote, inName := config.ParseRemoteAndContainer(args[1]) + if inName == "" { + inName = "default" + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + image := c.dereferenceAlias(d, inName) + info, err := d.GetImageInfo(image) + if err != nil { + return err + } + + properties := info.Brief() + + data, err := yaml.Marshal(&properties) + fmt.Printf("%s", data) + return err + + default: + return errArgs + } +} + +func (c *imageCmd) dereferenceAlias(d *lxd.Client, inName string) string { + result := d.GetAlias(inName) + if result == "" { + return inName + } + return result +} + +func (c *imageCmd) shortestAlias(list []shared.ImageAlias) string { + shortest := "" + for _, l := range list { + if shortest == "" { + shortest = l.Name + continue + } + if len(l.Name) != 0 && len(l.Name) < len(shortest) { + shortest = l.Name + } + } + + return shortest +} + +func (c *imageCmd) findDescription(props map[string]string) string { + for k, v := range props { + if k == "description" { + return v + } + } + return "" +} + +func (c *imageCmd) showImages(images []shared.ImageInfo, filters []string) error { + data := [][]string{} + for _, image := range images { + if !c.imageShouldShow(filters, &image) { + continue + } + + shortest := c.shortestAlias(image.Aliases) + if len(image.Aliases) > 1 { + shortest = fmt.Sprintf(i18n.G("%s (%d more)"), shortest, len(image.Aliases)-1) + } + fp := image.Fingerprint[0:12] + public := i18n.G("no") + description := c.findDescription(image.Properties) + + if image.Public { + public = i18n.G("yes") + } + + const layout = "Jan 2, 2006 at 3:04pm (MST)" + uploaded := image.UploadDate.UTC().Format(layout) + size := fmt.Sprintf("%.2fMB", float64(image.Size)/1024.0/1024.0) + data = append(data, []string{shortest, fp, public, description, image.Architecture, size, uploaded}) + } + + table := tablewriter.NewWriter(os.Stdout) + table.SetAutoWrapText(false) + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetRowLine(true) + table.SetHeader([]string{ + i18n.G("ALIAS"), + i18n.G("FINGERPRINT"), + i18n.G("PUBLIC"), + i18n.G("DESCRIPTION"), + i18n.G("ARCH"), + i18n.G("SIZE"), + i18n.G("UPLOAD DATE")}) + sort.Sort(SortImage(data)) + table.AppendBulk(data) + table.Render() + + return nil +} + +func (c *imageCmd) showAliases(aliases shared.ImageAliases) error { + data := [][]string{} + for _, alias := range aliases { + data = append(data, []string{alias.Name, alias.Target[0:12], alias.Description}) + } + + table := tablewriter.NewWriter(os.Stdout) + table.SetAutoWrapText(false) + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetRowLine(true) + table.SetHeader([]string{ + i18n.G("ALIAS"), + i18n.G("FINGERPRINT"), + i18n.G("DESCRIPTION")}) + sort.Sort(SortImage(data)) + table.AppendBulk(data) + table.Render() + + return nil +} + +func (c *imageCmd) doImageEdit(client *lxd.Client, image string) error { + // If stdin isn't a terminal, read text from it + if !termios.IsTerminal(int(syscall.Stdin)) { + contents, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + + newdata := shared.BriefImageInfo{} + err = yaml.Unmarshal(contents, &newdata) + if err != nil { + return err + } + return client.PutImageInfo(image, newdata) + } + + // Extract the current value + config, err := client.GetImageInfo(image) + if err != nil { + return err + } + + brief := config.Brief() + data, err := yaml.Marshal(&brief) + if err != nil { + return err + } + + // Spawn the editor + content, err := shared.TextEditor("", []byte(c.imageEditHelp()+"\n\n"+string(data))) + if err != nil { + return err + } + + for { + // Parse the text received from the editor + newdata := shared.BriefImageInfo{} + err = yaml.Unmarshal(content, &newdata) + if err == nil { + err = client.PutImageInfo(image, newdata) + } + + // Respawn the editor + if err != nil { + fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err) + fmt.Println(i18n.G("Press enter to start the editor again")) + + _, err := os.Stdin.Read(make([]byte, 1)) + if err != nil { + return err + } + + content, err = shared.TextEditor("", content) + if err != nil { + return err + } + continue + } + break + } + return nil +} + +func (c *imageCmd) imageShouldShow(filters []string, state *shared.ImageInfo) bool { + if len(filters) == 0 { + return true + } + + for _, filter := range filters { + found := false + if strings.Contains(filter, "=") { + membs := strings.SplitN(filter, "=", 2) + + key := membs[0] + var value string + if len(membs) < 2 { + value = "" + } else { + value = membs[1] + } + + for configKey, configValue := range state.Properties { + list := listCmd{} + if list.dotPrefixMatch(key, configKey) { + //try to test filter value as a regexp + regexpValue := value + if !(strings.Contains(value, "^") || strings.Contains(value, "$")) { + regexpValue = "^" + regexpValue + "$" + } + r, err := regexp.Compile(regexpValue) + //if not regexp compatible use original value + if err != nil { + if value == configValue { + found = true + break + } + } else if r.MatchString(configValue) == true { + found = true + break + } + } + } + } else { + for _, alias := range state.Aliases { + if strings.Contains(alias.Name, filter) { + found = true + break + } + } + if strings.Contains(state.Fingerprint, filter) { + found = true + } + } + + if !found { + return false + } + } + + return true +} === added file 'src/github.com/lxc/lxd/lxc/info.go' --- src/github.com/lxc/lxd/lxc/info.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/info.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,163 @@ +package main + +import ( + "fmt" + "io/ioutil" + "strings" + + "gopkg.in/yaml.v2" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" +) + +type infoCmd struct { + showLog bool +} + +func (c *infoCmd) showByDefault() bool { + return true +} + +func (c *infoCmd) usage() string { + return i18n.G( + `List information on containers. + +This will support remotes and images as well, but only containers for now. + +lxc info [:]container [--show-log]`) +} + +func (c *infoCmd) flags() { + gnuflag.BoolVar(&c.showLog, "show-log", false, i18n.G("Show the container's last 100 log lines?")) +} + +func (c *infoCmd) run(config *lxd.Config, args []string) error { + var remote string + var cName string + if len(args) == 1 { + remote, cName = config.ParseRemoteAndContainer(args[0]) + } else { + remote, cName = config.ParseRemoteAndContainer("") + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + if cName == "" { + return c.remoteInfo(d) + } else { + return c.containerInfo(d, cName, c.showLog) + } +} + +func (c *infoCmd) remoteInfo(d *lxd.Client) error { + serverStatus, err := d.ServerStatus() + if err != nil { + return err + } + + data, err := yaml.Marshal(&serverStatus) + if err != nil { + return err + } + + fmt.Printf("%s", data) + + return nil +} + +func (c *infoCmd) containerInfo(d *lxd.Client, name string, showLog bool) error { + ct, err := d.ContainerInfo(name) + if err != nil { + return err + } + + cs, err := d.ContainerState(name) + if err != nil { + return err + } + + const layout = "2006/01/02 15:04 UTC" + + fmt.Printf(i18n.G("Name: %s")+"\n", ct.Name) + fmt.Printf(i18n.G("Architecture: %s")+"\n", ct.Architecture) + if ct.CreationDate.UTC().Unix() != 0 { + fmt.Printf(i18n.G("Created: %s")+"\n", ct.CreationDate.UTC().Format(layout)) + } + + fmt.Printf(i18n.G("Status: %s")+"\n", ct.Status) + if ct.Ephemeral { + fmt.Printf(i18n.G("Type: ephemeral") + "\n") + } else { + fmt.Printf(i18n.G("Type: persistent") + "\n") + } + fmt.Printf(i18n.G("Profiles: %s")+"\n", strings.Join(ct.Profiles, ", ")) + if cs.Pid != 0 { + fmt.Printf(i18n.G("Pid: %d")+"\n", cs.Pid) + fmt.Printf(i18n.G("Processes: %d")+"\n", cs.Processes) + + ipInfo := "" + for netName, net := range cs.Network { + vethStr := "" + if net.HostName != "" { + vethStr = fmt.Sprintf("\t%s", net.HostName) + } + + for _, addr := range net.Addresses { + ipInfo += fmt.Sprintf(" %s:\t%s\t%s%s\n", netName, addr.Family, addr.Address, vethStr) + } + } + + if ipInfo != "" { + fmt.Printf(i18n.G("Ips:") + "\n") + fmt.Printf(ipInfo) + } + } + + // List snapshots + first_snapshot := true + snaps, err := d.ListSnapshots(name) + if err != nil { + return nil + } + + for _, snap := range snaps { + if first_snapshot { + fmt.Println(i18n.G("Snapshots:")) + } + fmt.Printf(" %s", snap.Name) + + if snap.CreationDate.UTC().Unix() != 0 { + fmt.Printf(" ("+i18n.G("taken at %s")+")", snap.CreationDate.UTC().Format(layout)) + } + + if snap.Stateful { + fmt.Printf(" (" + i18n.G("stateful") + ")") + } else { + fmt.Printf(" (" + i18n.G("stateless") + ")") + } + fmt.Printf("\n") + + first_snapshot = false + } + + if showLog { + log, err := d.GetLog(name, "lxc.log") + if err != nil { + return err + } + + stuff, err := ioutil.ReadAll(log) + if err != nil { + return err + } + + fmt.Printf("\n"+i18n.G("Log:")+"\n\n%s\n", string(stuff)) + } + + return nil +} === added file 'src/github.com/lxc/lxd/lxc/init.go' --- src/github.com/lxc/lxd/lxc/init.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/init.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,254 @@ +package main + +import ( + "fmt" + "os" + "strings" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" +) + +type profileList []string + +var configMap map[string]string + +func (f *profileList) String() string { + return fmt.Sprint(*f) +} + +type configList []string + +func (f *configList) String() string { + return fmt.Sprint(configMap) +} + +func (f *configList) Set(value string) error { + if value == "" { + return fmt.Errorf(i18n.G("Invalid configuration key")) + } + + items := strings.SplitN(value, "=", 2) + if len(items) < 2 { + return fmt.Errorf(i18n.G("Invalid configuration key")) + } + + if configMap == nil { + configMap = map[string]string{} + } + + configMap[items[0]] = items[1] + + return nil +} + +func (f *profileList) Set(value string) error { + if value == "" { + initRequestedEmptyProfiles = true + return nil + } + if f == nil { + *f = make(profileList, 1) + } else { + *f = append(*f, value) + } + return nil +} + +var initRequestedEmptyProfiles bool + +type initCmd struct { + profArgs profileList + confArgs configList + ephem bool +} + +func (c *initCmd) showByDefault() bool { + return false +} + +func (c *initCmd) usage() string { + return i18n.G( + `Initialize a container from a particular image. + +lxc init [remote:] [remote:][] [--ephemeral|-e] [--profile|-p ...] [--config|-c ...] + +Initializes a container using the specified image and name. + +Not specifying -p will result in the default profile. +Specifying "-p" with no argument will result in no profile. + +Example: +lxc init ubuntu u1`) +} + +func (c *initCmd) is_ephem(s string) bool { + switch s { + case "-e": + return true + case "--ephemeral": + return true + } + return false +} + +func (c *initCmd) is_profile(s string) bool { + switch s { + case "-p": + return true + case "--profile": + return true + } + return false +} + +func (c *initCmd) massage_args() { + l := len(os.Args) + if l < 2 { + return + } + + if c.is_profile(os.Args[l-1]) { + initRequestedEmptyProfiles = true + os.Args = os.Args[0 : l-1] + return + } + + if l < 3 { + return + } + + /* catch "lxc init ubuntu -p -e */ + if c.is_ephem(os.Args[l-1]) && c.is_profile(os.Args[l-2]) { + initRequestedEmptyProfiles = true + newargs := os.Args[0 : l-2] + newargs = append(newargs, os.Args[l-1]) + return + } +} + +func (c *initCmd) flags() { + c.massage_args() + gnuflag.Var(&c.confArgs, "config", i18n.G("Config key/value to apply to the new container")) + gnuflag.Var(&c.confArgs, "c", i18n.G("Config key/value to apply to the new container")) + gnuflag.Var(&c.profArgs, "profile", i18n.G("Profile to apply to the new container")) + gnuflag.Var(&c.profArgs, "p", i18n.G("Profile to apply to the new container")) + gnuflag.BoolVar(&c.ephem, "ephemeral", false, i18n.G("Ephemeral container")) + gnuflag.BoolVar(&c.ephem, "e", false, i18n.G("Ephemeral container")) +} + +func (c *initCmd) run(config *lxd.Config, args []string) error { + if len(args) > 2 || len(args) < 1 { + return errArgs + } + + iremote, image := config.ParseRemoteAndContainer(args[0]) + + var name string + var remote string + if len(args) == 2 { + remote, name = config.ParseRemoteAndContainer(args[1]) + } else { + remote, name = config.ParseRemoteAndContainer("") + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + // TODO: implement the syntax for supporting other image types/remotes + + /* + * initRequestedEmptyProfiles means user requested empty + * !initRequestedEmptyProfiles but len(profArgs) == 0 means use profile default + */ + profiles := []string{} + for _, p := range c.profArgs { + profiles = append(profiles, p) + } + + var resp *lxd.Response + if name == "" { + fmt.Printf(i18n.G("Creating the container") + "\n") + } else { + fmt.Printf(i18n.G("Creating %s")+"\n", name) + } + + if !initRequestedEmptyProfiles && len(profiles) == 0 { + resp, err = d.Init(name, iremote, image, nil, configMap, c.ephem) + } else { + resp, err = d.Init(name, iremote, image, &profiles, configMap, c.ephem) + } + + if err != nil { + return err + } + + c.initProgressTracker(d, resp.Operation) + + err = d.WaitForSuccess(resp.Operation) + + if err != nil { + return err + } else { + op, err := resp.MetadataAsOperation() + if err != nil { + return fmt.Errorf(i18n.G("didn't get any affected image, container or snapshot from server")) + } + + containers, ok := op.Resources["containers"] + if !ok || len(containers) == 0 { + return fmt.Errorf(i18n.G("didn't get any affected image, container or snapshot from server")) + } + + if len(containers) == 1 && name == "" { + fields := strings.Split(containers[0], "/") + fmt.Printf(i18n.G("Container name is: %s")+"\n", fields[len(fields)-1]) + } + } + return nil +} + +func (c *initCmd) initProgressTracker(d *lxd.Client, operation string) { + handler := func(msg interface{}) { + if msg == nil { + return + } + + event := msg.(map[string]interface{}) + if event["type"].(string) != "operation" { + return + } + + if event["metadata"] == nil { + return + } + + md := event["metadata"].(map[string]interface{}) + if !strings.HasSuffix(operation, md["id"].(string)) { + return + } + + if md["metadata"] == nil { + return + } + + if shared.StatusCode(md["status_code"].(float64)).IsFinal() { + return + } + + opMd := md["metadata"].(map[string]interface{}) + _, ok := opMd["download_progress"] + if ok { + fmt.Printf(i18n.G("Retrieving image: %s")+"\r", opMd["download_progress"].(string)) + } + + if opMd["download_progress"].(string) == "100%" { + fmt.Printf("\n") + } + } + go d.Monitor([]string{"operation"}, handler) +} === added file 'src/github.com/lxc/lxd/lxc/launch.go' --- src/github.com/lxc/lxd/lxc/launch.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/launch.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,134 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" +) + +type launchCmd struct { + init initCmd +} + +func (c *launchCmd) showByDefault() bool { + return true +} + +func (c *launchCmd) usage() string { + return i18n.G( + `Launch a container from a particular image. + +lxc launch [remote:] [remote:][] [--ephemeral|-e] [--profile|-p ...] [--config|-c ...] + +Launches a container using the specified image and name. + +Not specifying -p will result in the default profile. +Specifying "-p" with no argument will result in no profile. + +Example: +lxc launch ubuntu u1`) +} + +func (c *launchCmd) flags() { + c.init = initCmd{} + + c.init.massage_args() + gnuflag.Var(&c.init.confArgs, "config", i18n.G("Config key/value to apply to the new container")) + gnuflag.Var(&c.init.confArgs, "c", i18n.G("Config key/value to apply to the new container")) + gnuflag.Var(&c.init.profArgs, "profile", i18n.G("Profile to apply to the new container")) + gnuflag.Var(&c.init.profArgs, "p", i18n.G("Profile to apply to the new container")) + gnuflag.BoolVar(&c.init.ephem, "ephemeral", false, i18n.G("Ephemeral container")) + gnuflag.BoolVar(&c.init.ephem, "e", false, i18n.G("Ephemeral container")) +} + +func (c *launchCmd) run(config *lxd.Config, args []string) error { + if len(args) > 2 || len(args) < 1 { + return errArgs + } + + iremote, image := config.ParseRemoteAndContainer(args[0]) + + var name string + var remote string + if len(args) == 2 { + remote, name = config.ParseRemoteAndContainer(args[1]) + } else { + remote, name = config.ParseRemoteAndContainer("") + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + /* + * initRequestedEmptyProfiles means user requested empty + * !initRequestedEmptyProfiles but len(profArgs) == 0 means use profile default + */ + var resp *lxd.Response + profiles := []string{} + for _, p := range c.init.profArgs { + profiles = append(profiles, p) + } + + if !initRequestedEmptyProfiles && len(profiles) == 0 { + resp, err = d.Init(name, iremote, image, nil, configMap, c.init.ephem) + } else { + resp, err = d.Init(name, iremote, image, &profiles, configMap, c.init.ephem) + } + + if err != nil { + return err + } + + c.init.initProgressTracker(d, resp.Operation) + + if name == "" { + op, err := resp.MetadataAsOperation() + if err != nil { + return fmt.Errorf(i18n.G("didn't get any affected image, container or snapshot from server")) + } + + containers, ok := op.Resources["containers"] + if !ok || len(containers) == 0 { + return fmt.Errorf(i18n.G("didn't get any affected image, container or snapshot from server")) + } + + var version string + toScan := strings.Replace(containers[0], "/", " ", -1) + count, err := fmt.Sscanf(toScan, " %s containers %s", &version, &name) + if err != nil { + return err + } + + if count != 2 { + return fmt.Errorf(i18n.G("bad number of things scanned from image, container or snapshot")) + } + + if version != shared.APIVersion { + return fmt.Errorf(i18n.G("got bad version")) + } + } + fmt.Printf(i18n.G("Creating %s")+"\n", name) + + if err = d.WaitForSuccess(resp.Operation); err != nil { + return err + } + + fmt.Printf(i18n.G("Starting %s")+"\n", name) + resp, err = d.Action(name, shared.Start, -1, false, false) + if err != nil { + return err + } + + err = d.WaitForSuccess(resp.Operation) + if err != nil { + return fmt.Errorf("%s\n"+i18n.G("Try `lxc info --show-log %s` for more info"), err, name) + } + + return nil +} === added file 'src/github.com/lxc/lxd/lxc/list.go' --- src/github.com/lxc/lxd/lxc/list.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/list.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,454 @@ +package main + +import ( + "fmt" + "os" + "regexp" + "sort" + "strings" + "sync" + + "github.com/olekukonko/tablewriter" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" +) + +type column struct { + Name string + Data columnData + NeedsState bool + NeedsSnapshots bool +} + +type columnData func(shared.ContainerInfo, *shared.ContainerState, []shared.SnapshotInfo) string + +type byName [][]string + +func (a byName) Len() int { + return len(a) +} + +func (a byName) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a byName) Less(i, j int) bool { + if a[i][0] == "" { + return false + } + + if a[j][0] == "" { + return true + } + + return a[i][0] < a[j][0] +} + +type listCmd struct { + chosenColumnRunes string + fast bool +} + +func (c *listCmd) showByDefault() bool { + return true +} + +func (c *listCmd) usage() string { + return i18n.G( + `Lists the available resources. + +lxc list [resource] [filters] [-c columns] [--fast] + +The filters are: +* A single keyword like "web" which will list any container with "web" in its name. +* A key/value pair referring to a configuration item. For those, the namespace can be abreviated to the smallest unambiguous identifier: +* "user.blah=abc" will list all containers with the "blah" user property set to "abc" +* "u.blah=abc" will do the same +* "security.privileged=1" will list all privileged containers +* "s.privileged=1" will do the same + +The columns are: +* 4 - IPv4 address +* 6 - IPv6 address +* a - architecture +* c - creation date +* n - name +* p - pid of container init process +* P - profiles +* s - state +* S - number of snapshots +* t - type (persistent or ephemeral) + +Default column layout: ns46tS +Fast column layout: nsacPt`) +} + +func (c *listCmd) flags() { + gnuflag.StringVar(&c.chosenColumnRunes, "c", "ns46tS", i18n.G("Columns")) + gnuflag.StringVar(&c.chosenColumnRunes, "columns", "ns46tS", i18n.G("Columns")) + gnuflag.BoolVar(&c.fast, "fast", false, i18n.G("Fast mode (same as --columns=nsacPt")) +} + +// This seems a little excessive. +func (c *listCmd) dotPrefixMatch(short string, full string) bool { + fullMembs := strings.Split(full, ".") + shortMembs := strings.Split(short, ".") + + if len(fullMembs) != len(shortMembs) { + return false + } + + for i, _ := range fullMembs { + if !strings.HasPrefix(fullMembs[i], shortMembs[i]) { + return false + } + } + + return true +} + +func (c *listCmd) shouldShow(filters []string, state *shared.ContainerInfo) bool { + for _, filter := range filters { + if strings.Contains(filter, "=") { + membs := strings.SplitN(filter, "=", 2) + + key := membs[0] + var value string + if len(membs) < 2 { + value = "" + } else { + value = membs[1] + } + + found := false + for configKey, configValue := range state.Config { + if c.dotPrefixMatch(key, configKey) { + //try to test filter value as a regexp + regexpValue := value + if !(strings.Contains(value, "^") || strings.Contains(value, "$")) { + regexpValue = "^" + regexpValue + "$" + } + r, err := regexp.Compile(regexpValue) + //if not regexp compatible use original value + if err != nil { + if value == configValue { + found = true + break + } else { + // the property was found but didn't match + return false + } + } else if r.MatchString(configValue) == true { + found = true + break + } + } + } + + if !found { + return false + } + } else { + if !strings.Contains(state.Name, filter) { + return false + } + } + } + + return true +} + +func (c *listCmd) listContainers(d *lxd.Client, cinfos []shared.ContainerInfo, filters []string, columns []column) error { + headers := []string{} + for _, column := range columns { + headers = append(headers, column.Name) + } + + threads := 10 + if len(cinfos) < threads { + threads = len(cinfos) + } + + cStates := map[string]*shared.ContainerState{} + cStatesLock := sync.Mutex{} + cStatesQueue := make(chan string, threads) + cStatesWg := sync.WaitGroup{} + + cSnapshots := map[string][]shared.SnapshotInfo{} + cSnapshotsLock := sync.Mutex{} + cSnapshotsQueue := make(chan string, threads) + cSnapshotsWg := sync.WaitGroup{} + + for i := 0; i < threads; i++ { + cStatesWg.Add(1) + go func() { + for { + cName, more := <-cStatesQueue + if !more { + break + } + + state, err := d.ContainerState(cName) + if err != nil { + continue + } + + cStatesLock.Lock() + cStates[cName] = state + cStatesLock.Unlock() + } + cStatesWg.Done() + }() + + cSnapshotsWg.Add(1) + go func() { + for { + cName, more := <-cSnapshotsQueue + if !more { + break + } + + snaps, err := d.ListSnapshots(cName) + if err != nil { + continue + } + + cSnapshotsLock.Lock() + cSnapshots[cName] = snaps + cSnapshotsLock.Unlock() + } + cSnapshotsWg.Done() + }() + } + + for _, cInfo := range cinfos { + if !c.shouldShow(filters, &cInfo) { + continue + } + + for _, column := range columns { + if column.NeedsState && cInfo.StatusCode != shared.Stopped { + _, ok := cStates[cInfo.Name] + if ok { + continue + } + + cStatesLock.Lock() + cStates[cInfo.Name] = nil + cStatesLock.Unlock() + + cStatesQueue <- cInfo.Name + } + + if column.NeedsSnapshots { + _, ok := cSnapshots[cInfo.Name] + if ok { + continue + } + + cSnapshotsLock.Lock() + cSnapshots[cInfo.Name] = nil + cSnapshotsLock.Unlock() + + cSnapshotsQueue <- cInfo.Name + } + } + } + + close(cStatesQueue) + close(cSnapshotsQueue) + cStatesWg.Wait() + cSnapshotsWg.Wait() + + data := [][]string{} + for _, cInfo := range cinfos { + if !c.shouldShow(filters, &cInfo) { + continue + } + + col := []string{} + for _, column := range columns { + col = append(col, column.Data(cInfo, cStates[cInfo.Name], cSnapshots[cInfo.Name])) + } + data = append(data, col) + } + + table := tablewriter.NewWriter(os.Stdout) + table.SetAutoWrapText(false) + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetRowLine(true) + table.SetHeader(headers) + sort.Sort(byName(data)) + table.AppendBulk(data) + table.Render() + + return nil +} + +func (c *listCmd) run(config *lxd.Config, args []string) error { + var remote string + name := "" + + filters := []string{} + + if len(args) != 0 { + filters = args + if strings.Contains(args[0], ":") { + remote, name = config.ParseRemoteAndContainer(args[0]) + filters = args[1:] + } else if !strings.Contains(args[0], "=") { + remote = config.DefaultRemote + name = args[0] + } + } + + if remote == "" { + remote = config.DefaultRemote + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + var cts []shared.ContainerInfo + ctslist, err := d.ListContainers() + if err != nil { + return err + } + + if name == "" { + cts = ctslist + } else { + for _, cinfo := range ctslist { + if len(cinfo.Name) >= len(name) && cinfo.Name[0:len(name)] == name { + cts = append(cts, cinfo) + } + } + } + + columns_map := map[rune]column{ + '4': column{i18n.G("IPV4"), c.IP4ColumnData, true, false}, + '6': column{i18n.G("IPV6"), c.IP6ColumnData, true, false}, + 'a': column{i18n.G("ARCHITECTURE"), c.ArchitectureColumnData, false, false}, + 'c': column{i18n.G("CREATED AT"), c.CreatedColumnData, false, false}, + 'n': column{i18n.G("NAME"), c.nameColumnData, false, false}, + 'p': column{i18n.G("PID"), c.PIDColumnData, true, false}, + 'P': column{i18n.G("PROFILES"), c.ProfilesColumnData, false, false}, + 'S': column{i18n.G("SNAPSHOTS"), c.numberSnapshotsColumnData, false, true}, + 's': column{i18n.G("STATE"), c.statusColumnData, false, false}, + 't': column{i18n.G("TYPE"), c.typeColumnData, false, false}, + } + + if c.fast { + c.chosenColumnRunes = "nsacPt" + } + + columns := []column{} + for _, columnRune := range c.chosenColumnRunes { + if column, ok := columns_map[columnRune]; ok { + columns = append(columns, column) + } else { + return fmt.Errorf("%s does contain invalid column characters\n", c.chosenColumnRunes) + } + } + + return c.listContainers(d, cts, filters, columns) +} + +func (c *listCmd) nameColumnData(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { + return cInfo.Name +} + +func (c *listCmd) statusColumnData(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { + return strings.ToUpper(cInfo.Status) +} + +func (c *listCmd) IP4ColumnData(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { + if cInfo.StatusCode != shared.Stopped { + ipv4s := []string{} + for netName, net := range cState.Network { + if net.Type == "loopback" { + continue + } + + for _, addr := range net.Addresses { + if shared.StringInSlice(addr.Scope, []string{"link", "local"}) { + continue + } + + if addr.Family == "inet" { + ipv4s = append(ipv4s, fmt.Sprintf("%s (%s)", addr.Address, netName)) + } + } + } + return strings.Join(ipv4s, "\n") + } else { + return "" + } +} + +func (c *listCmd) IP6ColumnData(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { + if cInfo.StatusCode != shared.Stopped { + ipv6s := []string{} + for netName, net := range cState.Network { + if net.Type == "loopback" { + continue + } + + for _, addr := range net.Addresses { + if shared.StringInSlice(addr.Scope, []string{"link", "local"}) { + continue + } + + if addr.Family == "inet6" { + ipv6s = append(ipv6s, fmt.Sprintf("%s (%s)", addr.Address, netName)) + } + } + } + return strings.Join(ipv6s, "\n") + } else { + return "" + } +} + +func (c *listCmd) typeColumnData(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { + if cInfo.Ephemeral { + return i18n.G("EPHEMERAL") + } else { + return i18n.G("PERSISTENT") + } +} + +func (c *listCmd) numberSnapshotsColumnData(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { + return fmt.Sprintf("%d", len(cSnaps)) +} + +func (c *listCmd) PIDColumnData(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { + if cInfo.StatusCode != shared.Stopped { + return fmt.Sprintf("%d", cState.Pid) + } + + return "" +} + +func (c *listCmd) ArchitectureColumnData(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { + return cInfo.Architecture +} + +func (c *listCmd) ProfilesColumnData(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { + return strings.Join(cInfo.Profiles, "\n") +} + +func (c *listCmd) CreatedColumnData(cInfo shared.ContainerInfo, cState *shared.ContainerState, cSnaps []shared.SnapshotInfo) string { + layout := "2006/01/02 15:04 UTC" + + if cInfo.CreationDate.UTC().Unix() != 0 { + return cInfo.CreationDate.UTC().Format(layout) + } + + return "" +} === added file 'src/github.com/lxc/lxd/lxc/list_test.go' --- src/github.com/lxc/lxd/lxc/list_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/list_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,47 @@ +package main + +import ( + "testing" + + "github.com/lxc/lxd/shared" +) + +func TestDotPrefixMatch(t *testing.T) { + list := listCmd{} + + pass := true + pass = pass && list.dotPrefixMatch("s.privileged", "security.privileged") + pass = pass && list.dotPrefixMatch("u.blah", "user.blah") + + if !pass { + t.Error("failed prefix matching") + } +} + +func TestShouldShow(t *testing.T) { + list := listCmd{} + + state := &shared.ContainerInfo{ + Name: "foo", + Config: map[string]string{ + "security.privileged": "1", + "user.blah": "abc", + }, + } + + if !list.shouldShow([]string{"u.blah=abc"}, state) { + t.Error("u.blah=abc didn't match") + } + + if !list.shouldShow([]string{"user.blah=abc"}, state) { + t.Error("user.blah=abc didn't match") + } + + if list.shouldShow([]string{"bar", "u.blah=abc"}, state) { + t.Errorf("name filter didn't work") + } + + if list.shouldShow([]string{"bar", "u.blah=other"}, state) { + t.Errorf("value filter didn't work") + } +} === added file 'src/github.com/lxc/lxd/lxc/main.go' --- src/github.com/lxc/lxd/lxc/main.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/main.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,267 @@ +package main + +import ( + "fmt" + "net" + "net/url" + "os" + "os/exec" + "path" + "strings" + "syscall" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" + "github.com/lxc/lxd/shared/logging" +) + +var configPath string + +func main() { + if err := run(); err != nil { + // The action we take depends on the error we get. + msg := fmt.Sprintf(i18n.G("error: %v"), err) + switch t := err.(type) { + case *url.Error: + switch u := t.Err.(type) { + case *net.OpError: + if u.Op == "dial" && u.Net == "unix" { + switch errno := u.Err.(type) { + case syscall.Errno: + switch errno { + case syscall.ENOENT: + msg = i18n.G("LXD socket not found; is LXD running?") + case syscall.ECONNREFUSED: + msg = i18n.G("Connection refused; is LXD running?") + case syscall.EACCES: + msg = i18n.G("Permisson denied, are you in the lxd group?") + default: + msg = fmt.Sprintf("%d %s", uintptr(errno), errno.Error()) + } + } + } + } + } + + fmt.Fprintln(os.Stderr, fmt.Sprintf("%s", msg)) + os.Exit(1) + } +} + +func run() error { + verbose := gnuflag.Bool("verbose", false, i18n.G("Enables verbose mode.")) + debug := gnuflag.Bool("debug", false, i18n.G("Enables debug mode.")) + forceLocal := gnuflag.Bool("force-local", false, i18n.G("Force using the local unix socket.")) + noAlias := gnuflag.Bool("no-alias", false, i18n.G("Ignore aliases when determining what command to run.")) + + configDir := "$HOME/.config/lxc" + if os.Getenv("LXD_CONF") != "" { + configDir = os.Getenv("LXD_CONF") + } + configPath = os.ExpandEnv(path.Join(configDir, "config.yml")) + + if len(os.Args) >= 3 && os.Args[1] == "config" && os.Args[2] == "profile" { + fmt.Fprintf(os.Stderr, i18n.G("`lxc config profile` is deprecated, please use `lxc profile`")+"\n") + os.Args = append(os.Args[:1], os.Args[2:]...) + } + + if len(os.Args) >= 2 && (os.Args[1] == "-h" || os.Args[1] == "--help") { + os.Args[1] = "help" + } + + if len(os.Args) >= 2 && (os.Args[1] == "--all") { + os.Args[1] = "help" + os.Args = append(os.Args, "--all") + } + + if len(os.Args) == 2 && os.Args[1] == "--version" { + os.Args[1] = "version" + } + + if len(os.Args) < 2 { + commands["help"].run(nil, nil) + os.Exit(1) + } + + var config *lxd.Config + var err error + + if *forceLocal { + config = &lxd.DefaultConfig + } else { + config, err = lxd.LoadConfig(configPath) + if err != nil { + return err + } + } + + // This is quite impolite, but it seems gnuflag needs us to shift our + // own exename out of the arguments before parsing them. However, this + // is useful for execIfAlias, which wants to know exactly the command + // line we received, and in some cases is called before this shift, and + // in others after. So, let's save the original args. + origArgs := os.Args + name := os.Args[1] + + /* at this point we haven't parsed the args, so we have to look for + * --no-alias by hand. + */ + if !shared.StringInSlice("--no-alias", origArgs) { + execIfAliases(config, origArgs) + } + cmd, ok := commands[name] + if !ok { + commands["help"].run(nil, nil) + fmt.Fprintf(os.Stderr, "\n"+i18n.G("error: unknown command: %s")+"\n", name) + os.Exit(1) + } + cmd.flags() + gnuflag.Usage = func() { + fmt.Fprintf(os.Stderr, i18n.G("Usage: %s")+"\n\n"+i18n.G("Options:")+"\n\n", strings.TrimSpace(cmd.usage())) + gnuflag.PrintDefaults() + } + + os.Args = os.Args[1:] + gnuflag.Parse(true) + + shared.Log, err = logging.GetLogger("", "", *verbose, *debug, nil) + if err != nil { + return err + } + + certf := config.ConfigPath("client.crt") + keyf := config.ConfigPath("client.key") + + if !*forceLocal && os.Args[0] != "help" && os.Args[0] != "version" && (!shared.PathExists(certf) || !shared.PathExists(keyf)) { + fmt.Fprintf(os.Stderr, i18n.G("Generating a client certificate. This may take a minute...")+"\n") + + err = shared.FindOrGenCert(certf, keyf) + if err != nil { + return err + } + } + + err = cmd.run(config, gnuflag.Args()) + if err == errArgs { + /* If we got an error about invalid arguments, let's try to + * expand this as an alias + */ + if !*noAlias { + execIfAliases(config, origArgs) + } + fmt.Fprintf(os.Stderr, "%s\n\n"+i18n.G("error: %v")+"\n", cmd.usage(), err) + os.Exit(1) + } + return err +} + +type command interface { + usage() string + flags() + showByDefault() bool + run(config *lxd.Config, args []string) error +} + +var commands = map[string]command{ + "config": &configCmd{}, + "copy": ©Cmd{}, + "delete": &deleteCmd{}, + "exec": &execCmd{}, + "file": &fileCmd{}, + "finger": &fingerCmd{}, + "help": &helpCmd{}, + "image": &imageCmd{}, + "info": &infoCmd{}, + "init": &initCmd{}, + "launch": &launchCmd{}, + "list": &listCmd{}, + "monitor": &monitorCmd{}, + "move": &moveCmd{}, + "pause": &actionCmd{shared.Freeze, false, false, "pause", -1, false, false, false}, + "profile": &profileCmd{}, + "publish": &publishCmd{}, + "remote": &remoteCmd{}, + "restart": &actionCmd{shared.Restart, true, true, "restart", -1, false, false, false}, + "restore": &restoreCmd{}, + "snapshot": &snapshotCmd{}, + "start": &actionCmd{shared.Start, false, true, "start", -1, false, false, false}, + "stop": &actionCmd{shared.Stop, true, true, "stop", -1, false, false, false}, + "version": &versionCmd{}, +} + +var errArgs = fmt.Errorf(i18n.G("wrong number of subcommand arguments")) + +func expandAlias(config *lxd.Config, origArgs []string) ([]string, bool) { + foundAlias := false + aliasKey := []string{} + aliasValue := []string{} + + for k, v := range config.Aliases { + matches := false + for i, key := range strings.Split(k, " ") { + if len(origArgs) <= i+1 { + break + } + + if origArgs[i+1] == key { + matches = true + aliasKey = strings.Split(k, " ") + aliasValue = strings.Split(v, " ") + break + } + } + + if !matches { + continue + } + + foundAlias = true + break + } + + if !foundAlias { + return []string{}, false + } + + newArgs := []string{origArgs[0]} + hasReplacedArgsVar := false + + for i, aliasArg := range aliasValue { + if aliasArg == "@ARGS@" && len(origArgs) > i { + newArgs = append(newArgs, origArgs[i+1:]...) + hasReplacedArgsVar = true + } else { + newArgs = append(newArgs, aliasArg) + } + } + + if !hasReplacedArgsVar { + /* add the rest of the arguments */ + newArgs = append(newArgs, origArgs[len(aliasKey)+1:]...) + } + + /* don't re-do aliases the next time; this allows us to have recursive + * aliases, e.g. `lxc list` to `lxc list -c n` + */ + newArgs = append(newArgs[:2], append([]string{"--no-alias"}, newArgs[2:]...)...) + + return newArgs, true +} + +func execIfAliases(config *lxd.Config, origArgs []string) { + newArgs, expanded := expandAlias(config, origArgs) + if !expanded { + return + } + + path, err := exec.LookPath(origArgs[0]) + if err != nil { + fmt.Fprintf(os.Stderr, i18n.G("processing aliases failed %s\n"), err) + os.Exit(5) + } + ret := syscall.Exec(path, newArgs, syscall.Environ()) + fmt.Fprintf(os.Stderr, i18n.G("processing aliases failed %s\n"), ret) + os.Exit(5) +} === added file 'src/github.com/lxc/lxd/lxc/main_test.go' --- src/github.com/lxc/lxd/lxc/main_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/main_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,72 @@ +package main + +import ( + "testing" + + "github.com/lxc/lxd" +) + +type aliasTestcase struct { + input []string + expected []string +} + +func slicesEqual(a, b []string) bool { + if a == nil && b == nil { + return true + } + + if a == nil || b == nil { + return false + } + + if len(a) != len(b) { + return false + } + + for i := range a { + if a[i] != b[i] { + return false + } + } + + return true +} + +func TestExpandAliases(t *testing.T) { + aliases := map[string]string{ + "tester 12": "list", + "foo": "list @ARGS@ -c n", + } + + testcases := []aliasTestcase{ + aliasTestcase{ + input: []string{"lxc", "list"}, + expected: []string{"lxc", "list"}, + }, + aliasTestcase{ + input: []string{"lxc", "tester", "12"}, + expected: []string{"lxc", "list", "--no-alias"}, + }, + aliasTestcase{ + input: []string{"lxc", "foo", "asdf"}, + expected: []string{"lxc", "list", "--no-alias", "asdf", "-c", "n"}, + }, + } + + conf := &lxd.Config{Aliases: aliases} + + for _, tc := range testcases { + result, expanded := expandAlias(conf, tc.input) + if !expanded { + if !slicesEqual(tc.input, tc.expected) { + t.Errorf("didn't expand when expected to: %s", tc.input) + } + continue + } + + if !slicesEqual(result, tc.expected) { + t.Errorf("%s didn't match %s", result, tc.expected) + } + } +} === added file 'src/github.com/lxc/lxd/lxc/monitor.go' --- src/github.com/lxc/lxd/lxc/monitor.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/monitor.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,88 @@ +package main + +import ( + "fmt" + + "gopkg.in/yaml.v2" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" +) + +type typeList []string + +func (f *typeList) String() string { + return fmt.Sprint(*f) +} + +func (f *typeList) Set(value string) error { + if value == "" { + return fmt.Errorf("Invalid type: %s", value) + } + + if f == nil { + *f = make(typeList, 1) + } else { + *f = append(*f, value) + } + return nil +} + +type monitorCmd struct { + typeArgs typeList +} + +func (c *monitorCmd) showByDefault() bool { + return false +} + +func (c *monitorCmd) usage() string { + return i18n.G( + `Monitor activity on the LXD server. + +lxc monitor [remote:] [--type=TYPE...] + +Connects to the monitoring interface of the specified LXD server. + +By default will listen to all message types. +Specific types to listen to can be specified with --type. + +Example: +lxc monitor --type=logging`) +} + +func (c *monitorCmd) flags() { + gnuflag.Var(&c.typeArgs, "type", i18n.G("Event type to listen for")) +} + +func (c *monitorCmd) run(config *lxd.Config, args []string) error { + var remote string + + if len(args) > 1 { + return errArgs + } + + if len(args) == 0 { + remote, _ = config.ParseRemoteAndContainer("") + } else { + remote, _ = config.ParseRemoteAndContainer(args[0]) + } + + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + handler := func(message interface{}) { + render, err := yaml.Marshal(&message) + if err != nil { + fmt.Printf("error: %s\n", err) + return + } + + fmt.Printf("%s\n\n", render) + } + + return d.Monitor(c.typeArgs, handler) +} === added file 'src/github.com/lxc/lxd/lxc/move.go' --- src/github.com/lxc/lxd/lxc/move.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/move.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,66 @@ +package main + +import ( + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared/i18n" +) + +type moveCmd struct { + httpAddr string +} + +func (c *moveCmd) showByDefault() bool { + return true +} + +func (c *moveCmd) usage() string { + return i18n.G( + `Move containers within or in between lxd instances. + +lxc move [remote:] [remote:] + Move a container between two hosts, renaming it if destination name differs. + +lxc move + Rename a local container. +`) +} + +func (c *moveCmd) flags() {} + +func (c *moveCmd) run(config *lxd.Config, args []string) error { + if len(args) != 2 { + return errArgs + } + + sourceRemote, sourceName := config.ParseRemoteAndContainer(args[0]) + destRemote, destName := config.ParseRemoteAndContainer(args[1]) + + // As an optimization, if the source an destination are the same, do + // this via a simple rename. This only works for containers that aren't + // running, containers that are running should be live migrated (of + // course, this changing of hostname isn't supported right now, so this + // simply won't work). + if sourceRemote == destRemote { + source, err := lxd.NewClient(config, sourceRemote) + if err != nil { + return err + } + + rename, err := source.Rename(sourceName, destName) + if err != nil { + return err + } + + return source.WaitForSuccess(rename.Operation) + } + + cpy := copyCmd{} + + // A move is just a copy followed by a delete; however, we want to + // keep the volatile entries around since we are moving the container. + if err := cpy.copyContainer(config, args[0], args[1], true, -1); err != nil { + return err + } + + return commands["delete"].run(config, args[:1]) +} === added file 'src/github.com/lxc/lxd/lxc/profile.go' --- src/github.com/lxc/lxd/lxc/profile.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/profile.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,351 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + "syscall" + + "gopkg.in/yaml.v2" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/i18n" + "github.com/lxc/lxd/shared/termios" +) + +type profileCmd struct { + httpAddr string +} + +func (c *profileCmd) showByDefault() bool { + return true +} + +func (c *profileCmd) profileEditHelp() string { + return i18n.G( + `### This is a yaml representation of the profile. +### Any line starting with a '# will be ignored. +### +### A profile consists of a set of configuration items followed by a set of +### devices. +### +### An example would look like: +### name: onenic +### config: +### raw.lxc: lxc.aa_profile=unconfined +### devices: +### eth0: +### nictype: bridged +### parent: lxcbr0 +### type: nic +### +### Note that the name is shown but cannot be changed`) +} + +func (c *profileCmd) usage() string { + return i18n.G( + `Manage configuration profiles. + +lxc profile list [filters] List available profiles. +lxc profile show Show details of a profile. +lxc profile create Create a profile. +lxc profile copy Copy the profile to the specified remote. +lxc profile get Get profile configuration. +lxc profile set Set profile configuration. +lxc profile delete Delete a profile. +lxc profile edit + Edit profile, either by launching external editor or reading STDIN. + Example: lxc profile edit # launch editor + cat profile.yml | lxc profile edit # read from profile.yml +lxc profile apply + Apply a comma-separated list of profiles to a container, in order. + All profiles passed in this call (and only those) will be applied + to the specified container. + Example: lxc profile apply foo default,bar # Apply default and bar + lxc profile apply foo default # Only default is active + lxc profile apply '' # no profiles are applied anymore + lxc profile apply bar,default # Apply default second now + +Devices: +lxc profile device list List devices in the given profile. +lxc profile device show Show full device details in the given profile. +lxc profile device remove Remove a device from a profile. +lxc profile device add [key=value]... + Add a profile device, such as a disk or a nic, to the containers + using the specified profile.`) +} + +func (c *profileCmd) flags() {} + +func (c *profileCmd) run(config *lxd.Config, args []string) error { + if len(args) < 1 { + return errArgs + } + + if args[0] == "list" { + return c.doProfileList(config, args) + } + + if len(args) < 2 { + return errArgs + } + + remote, profile := config.ParseRemoteAndContainer(args[1]) + client, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + switch args[0] { + case "create": + return c.doProfileCreate(client, profile) + case "delete": + return c.doProfileDelete(client, profile) + case "device": + return c.doProfileDevice(config, args) + case "edit": + return c.doProfileEdit(client, profile) + case "apply": + container := profile + switch len(args) { + case 2: + profile = "" + case 3: + profile = args[2] + default: + return errArgs + } + return c.doProfileApply(client, container, profile) + case "get": + return c.doProfileGet(client, profile, args[2:]) + case "set": + return c.doProfileSet(client, profile, args[2:]) + case "unset": + return c.doProfileSet(client, profile, args[2:]) + case "copy": + return c.doProfileCopy(config, client, profile, args[2:]) + case "show": + return c.doProfileShow(client, profile) + default: + return errArgs + } +} + +func (c *profileCmd) doProfileCreate(client *lxd.Client, p string) error { + err := client.ProfileCreate(p) + if err == nil { + fmt.Printf(i18n.G("Profile %s created")+"\n", p) + } + return err +} + +func (c *profileCmd) doProfileEdit(client *lxd.Client, p string) error { + // If stdin isn't a terminal, read text from it + if !termios.IsTerminal(int(syscall.Stdin)) { + contents, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + + newdata := shared.ProfileConfig{} + err = yaml.Unmarshal(contents, &newdata) + if err != nil { + return err + } + return client.PutProfile(p, newdata) + } + + // Extract the current value + profile, err := client.ProfileConfig(p) + if err != nil { + return err + } + + data, err := yaml.Marshal(&profile) + if err != nil { + return err + } + + // Spawn the editor + content, err := shared.TextEditor("", []byte(c.profileEditHelp()+"\n\n"+string(data))) + if err != nil { + return err + } + + for { + // Parse the text received from the editor + newdata := shared.ProfileConfig{} + err = yaml.Unmarshal(content, &newdata) + if err == nil { + err = client.PutProfile(p, newdata) + } + + // Respawn the editor + if err != nil { + fmt.Fprintf(os.Stderr, i18n.G("Config parsing error: %s")+"\n", err) + fmt.Println(i18n.G("Press enter to open the editor again")) + + _, err := os.Stdin.Read(make([]byte, 1)) + if err != nil { + return err + } + + content, err = shared.TextEditor("", content) + if err != nil { + return err + } + continue + } + break + } + return nil +} + +func (c *profileCmd) doProfileDelete(client *lxd.Client, p string) error { + err := client.ProfileDelete(p) + if err == nil { + fmt.Printf(i18n.G("Profile %s deleted")+"\n", p) + } + return err +} + +func (c *profileCmd) doProfileApply(client *lxd.Client, d string, p string) error { + resp, err := client.ApplyProfile(d, p) + if err != nil { + return err + } + + err = client.WaitForSuccess(resp.Operation) + if err == nil { + if p == "" { + p = i18n.G("(none)") + } + fmt.Printf(i18n.G("Profile %s applied to %s")+"\n", p, d) + } + + return err +} + +func (c *profileCmd) doProfileShow(client *lxd.Client, p string) error { + profile, err := client.ProfileConfig(p) + if err != nil { + return err + } + + data, err := yaml.Marshal(&profile) + fmt.Printf("%s", data) + + return nil +} + +func (c *profileCmd) doProfileCopy(config *lxd.Config, client *lxd.Client, p string, args []string) error { + if len(args) != 1 { + return errArgs + } + remote, newname := config.ParseRemoteAndContainer(args[0]) + if newname == "" { + newname = p + } + + dest, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + return client.ProfileCopy(p, newname, dest) +} + +func (c *profileCmd) doProfileDevice(config *lxd.Config, args []string) error { + // device add b1 eth0 nic type=bridged + // device list b1 + // device remove b1 eth0 + if len(args) < 3 { + return errArgs + } + + cfg := configCmd{} + + switch args[1] { + case "add": + return cfg.deviceAdd(config, "profile", args) + case "remove": + return cfg.deviceRm(config, "profile", args) + case "list": + return cfg.deviceList(config, "profile", args) + case "show": + return cfg.deviceShow(config, "profile", args) + default: + return errArgs + } +} + +func (c *profileCmd) doProfileGet(client *lxd.Client, p string, args []string) error { + // we shifted @args so so it should read "" + if len(args) != 1 { + return errArgs + } + + resp, err := client.GetProfileConfig(p) + if err != nil { + return err + } + for k, v := range resp { + if k == args[0] { + fmt.Printf("%s\n", v) + } + } + return nil +} + +func (c *profileCmd) doProfileSet(client *lxd.Client, p string, args []string) error { + // we shifted @args so so it should read " []" + if len(args) < 1 { + return errArgs + } + + key := args[0] + var value string + if len(args) < 2 { + value = "" + } else { + value = args[1] + } + + if !termios.IsTerminal(int(syscall.Stdin)) && value == "-" { + buf, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return fmt.Errorf("Can't read from stdin: %s", err) + } + value = string(buf[:]) + } + + err := client.SetProfileConfigItem(p, key, value) + return err +} + +func (c *profileCmd) doProfileList(config *lxd.Config, args []string) error { + var remote string + if len(args) > 1 { + var name string + remote, name = config.ParseRemoteAndContainer(args[1]) + if name != "" { + return fmt.Errorf(i18n.G("Cannot provide container name to list")) + } + } else { + remote = config.DefaultRemote + } + + client, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + profiles, err := client.ListProfiles() + if err != nil { + return err + } + fmt.Printf("%s\n", strings.Join(profiles, "\n")) + return nil +} === added file 'src/github.com/lxc/lxd/lxc/publish.go' --- src/github.com/lxc/lxd/lxc/publish.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/publish.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,159 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" + + "github.com/lxc/lxd/shared" +) + +type publishCmd struct { + pAliases aliasList // aliasList defined in lxc/image.go + makePublic bool + Force bool +} + +func (c *publishCmd) showByDefault() bool { + return true +} + +func (c *publishCmd) usage() string { + return i18n.G( + `Publish containers as images. + +lxc publish [remote:]container [remote:] [--alias=ALIAS]... [prop-key=prop-value]...`) +} + +func (c *publishCmd) flags() { + gnuflag.BoolVar(&c.makePublic, "public", false, i18n.G("Make the image public")) + gnuflag.Var(&c.pAliases, "alias", i18n.G("New alias to define at target")) + gnuflag.BoolVar(&c.Force, "force", false, i18n.G("Stop the container if currently running")) + gnuflag.BoolVar(&c.Force, "f", false, i18n.G("Stop the container if currently running")) +} + +func (c *publishCmd) run(config *lxd.Config, args []string) error { + var cRemote string + var cName string + iName := "" + iRemote := "" + properties := map[string]string{} + firstprop := 1 // first property is arg[2] if arg[1] is image remote, else arg[1] + + if len(args) < 1 { + return errArgs + } + + cRemote, cName = config.ParseRemoteAndContainer(args[0]) + if len(args) >= 2 && !strings.Contains(args[1], "=") { + firstprop = 2 + iRemote, iName = config.ParseRemoteAndContainer(args[1]) + } else { + iRemote, iName = config.ParseRemoteAndContainer("") + } + + if cName == "" { + return fmt.Errorf(i18n.G("Container name is mandatory")) + } + if iName != "" { + return fmt.Errorf(i18n.G("There is no \"image name\". Did you want an alias?")) + } + + d, err := lxd.NewClient(config, iRemote) + if err != nil { + return err + } + + s := d + if cRemote != iRemote { + s, err = lxd.NewClient(config, cRemote) + if err != nil { + return err + } + } + + if !shared.IsSnapshot(cName) { + ct, err := s.ContainerInfo(cName) + if err != nil { + return err + } + + wasRunning := ct.StatusCode != 0 && ct.StatusCode != shared.Stopped + wasEphemeral := ct.Ephemeral + + if wasRunning { + if !c.Force { + return fmt.Errorf(i18n.G("The container is currently running. Use --force to have it stopped and restarted.")) + } + + if ct.Ephemeral { + ct.Ephemeral = false + err := s.UpdateContainerConfig(cName, ct.Brief()) + if err != nil { + return err + } + } + + resp, err := s.Action(cName, shared.Stop, -1, true, false) + if err != nil { + return err + } + + op, err := s.WaitFor(resp.Operation) + if err != nil { + return err + } + + if op.StatusCode == shared.Failure { + return fmt.Errorf(i18n.G("Stopping container failed!")) + } + defer s.Action(cName, shared.Start, -1, true, false) + + if wasEphemeral { + ct.Ephemeral = true + err := s.UpdateContainerConfig(cName, ct.Brief()) + if err != nil { + return err + } + } + } + } + + for i := firstprop; i < len(args); i++ { + entry := strings.SplitN(args[i], "=", 2) + if len(entry) < 2 { + return errArgs + } + properties[entry[0]] = entry[1] + } + + var fp string + + // Optimized local publish + if cRemote == iRemote { + fp, err = d.ImageFromContainer(cName, c.makePublic, c.pAliases, properties) + if err != nil { + return err + } + fmt.Printf(i18n.G("Container published with fingerprint: %s")+"\n", fp) + return nil + } + + fp, err = s.ImageFromContainer(cName, false, nil, properties) + if err != nil { + return err + } + defer s.DeleteImage(fp) + + err = s.CopyImage(fp, d, false, c.pAliases, c.makePublic, false, nil) + if err != nil { + return err + } + + fmt.Printf(i18n.G("Container published with fingerprint: %s")+"\n", fp) + + return nil +} === added file 'src/github.com/lxc/lxd/lxc/remote.go' --- src/github.com/lxc/lxd/lxc/remote.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/remote.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,407 @@ +package main + +import ( + "crypto/sha256" + "crypto/x509" + "encoding/pem" + "fmt" + "net" + "net/url" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/olekukonko/tablewriter" + + "golang.org/x/crypto/ssh/terminal" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" +) + +type remoteCmd struct { + httpAddr string + acceptCert bool + password string + public bool + protocol string +} + +func (c *remoteCmd) showByDefault() bool { + return true +} + +func (c *remoteCmd) usage() string { + return i18n.G( + `Manage remote LXD servers. + +lxc remote add [--accept-certificate] [--password=PASSWORD] + [--public] [--protocol=PROTOCOL] Add the remote at . +lxc remote remove Remove the remote . +lxc remote list List all remotes. +lxc remote rename Rename remote to . +lxc remote set-url Update 's url to . +lxc remote set-default Set the default remote. +lxc remote get-default Print the default remote.`) +} + +func (c *remoteCmd) flags() { + gnuflag.BoolVar(&c.acceptCert, "accept-certificate", false, i18n.G("Accept certificate")) + gnuflag.StringVar(&c.password, "password", "", i18n.G("Remote admin password")) + gnuflag.StringVar(&c.protocol, "protocol", "", i18n.G("Server protocol (lxd or simplestreams)")) + gnuflag.BoolVar(&c.public, "public", false, i18n.G("Public image server")) +} + +func (c *remoteCmd) addServer(config *lxd.Config, server string, addr string, acceptCert bool, password string, public bool, protocol string) error { + var rScheme string + var rHost string + var rPort string + + // Setup the remotes list + if config.Remotes == nil { + config.Remotes = make(map[string]lxd.RemoteConfig) + } + + // Fast track simplestreams + if protocol == "simplestreams" { + config.Remotes[server] = lxd.RemoteConfig{Addr: addr, Public: true, Protocol: protocol} + return nil + } + + /* Complex remote URL parsing */ + remoteURL, err := url.Parse(addr) + if err != nil { + return err + } + + if remoteURL.Scheme != "" { + if remoteURL.Scheme != "unix" && remoteURL.Scheme != "https" { + rScheme = "https" + } else { + rScheme = remoteURL.Scheme + } + } else if addr[0] == '/' { + rScheme = "unix" + } else { + if !shared.PathExists(addr) { + rScheme = "https" + } else { + rScheme = "unix" + } + } + + if remoteURL.Host != "" { + rHost = remoteURL.Host + } else { + rHost = addr + } + + host, port, err := net.SplitHostPort(rHost) + if err == nil { + rHost = host + rPort = port + } else { + rPort = shared.DefaultPort + } + + if rScheme == "unix" { + if addr[0:5] == "unix:" { + if addr[0:7] == "unix://" { + if len(addr) > 8 { + rHost = addr[8:] + } else { + rHost = "" + } + } else { + rHost = addr[6:] + } + } + rPort = "" + } + + if strings.Contains(rHost, ":") && !strings.HasPrefix(rHost, "[") { + rHost = fmt.Sprintf("[%s]", rHost) + } + + if rPort != "" { + addr = rScheme + "://" + rHost + ":" + rPort + } else { + addr = rScheme + "://" + rHost + } + + /* Actually add the remote */ + config.Remotes[server] = lxd.RemoteConfig{Addr: addr, Protocol: protocol} + + remote := config.ParseRemote(server) + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + if len(addr) > 5 && addr[0:5] == "unix:" { + // NewClient succeeded so there was a lxd there (we fingered + // it) so just accept it + return nil + } + + var certificate *x509.Certificate + + /* Attempt to connect using the system root CA */ + err = d.Finger() + if err != nil { + // Failed to connect using the system CA, so retrieve the remote certificate + certificate, err = shared.GetRemoteCertificate(addr) + if err != nil { + return err + } + } + + if certificate != nil { + if !acceptCert { + digest := sha256.Sum256(certificate.Raw) + + fmt.Printf(i18n.G("Certificate fingerprint: %x")+"\n", digest) + fmt.Printf(i18n.G("ok (y/n)?") + " ") + line, err := shared.ReadStdin() + if err != nil { + return err + } + + if len(line) < 1 || line[0] != 'y' && line[0] != 'Y' { + return fmt.Errorf(i18n.G("Server certificate NACKed by user")) + } + } + + dnam := d.Config.ConfigPath("servercerts") + err := os.MkdirAll(dnam, 0750) + if err != nil { + return fmt.Errorf(i18n.G("Could not create server cert dir")) + } + + certf := fmt.Sprintf("%s/%s.crt", dnam, d.Name) + certOut, err := os.Create(certf) + if err != nil { + return err + } + + pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: certificate.Raw}) + certOut.Close() + + // Setup a new connection, this time with the remote certificate + d, err = lxd.NewClient(config, remote) + if err != nil { + return err + } + } + + if d.IsPublic() || public { + config.Remotes[server] = lxd.RemoteConfig{Addr: addr, Public: true} + + if err := d.Finger(); err != nil { + return err + } + + return nil + } + + if d.AmTrusted() { + // server already has our cert, so we're done + return nil + } + + if password == "" { + fmt.Printf(i18n.G("Admin password for %s: "), server) + pwd, err := terminal.ReadPassword(0) + if err != nil { + /* We got an error, maybe this isn't a terminal, let's try to + * read it as a file */ + pwd, err = shared.ReadStdin() + if err != nil { + return err + } + } + fmt.Println("") + password = string(pwd) + } + + err = d.AddMyCertToServer(password) + if err != nil { + return err + } + + if !d.AmTrusted() { + return fmt.Errorf(i18n.G("Server doesn't trust us after adding our cert")) + } + + fmt.Println(i18n.G("Client certificate stored at server: "), server) + return nil +} + +func (c *remoteCmd) removeCertificate(config *lxd.Config, remote string) { + certf := config.ServerCertPath(remote) + shared.Debugf("Trying to remove %s", certf) + + os.Remove(certf) +} + +func (c *remoteCmd) run(config *lxd.Config, args []string) error { + if len(args) < 1 { + return errArgs + } + + switch args[0] { + case "add": + if len(args) < 3 { + return errArgs + } + + if rc, ok := config.Remotes[args[1]]; ok { + return fmt.Errorf(i18n.G("remote %s exists as <%s>"), args[1], rc.Addr) + } + + err := c.addServer(config, args[1], args[2], c.acceptCert, c.password, c.public, c.protocol) + if err != nil { + delete(config.Remotes, args[1]) + c.removeCertificate(config, args[1]) + return err + } + + case "remove": + if len(args) != 2 { + return errArgs + } + + rc, ok := config.Remotes[args[1]] + if !ok { + return fmt.Errorf(i18n.G("remote %s doesn't exist"), args[1]) + } + + if rc.Static { + return fmt.Errorf(i18n.G("remote %s is static and cannot be modified"), args[1]) + } + + if config.DefaultRemote == args[1] { + return fmt.Errorf(i18n.G("can't remove the default remote")) + } + + delete(config.Remotes, args[1]) + + c.removeCertificate(config, args[1]) + + case "list": + data := [][]string{} + for name, rc := range config.Remotes { + strPublic := i18n.G("NO") + if rc.Public { + strPublic = i18n.G("YES") + } + + strStatic := i18n.G("NO") + if rc.Static { + strStatic = i18n.G("YES") + } + + if rc.Protocol == "" { + rc.Protocol = "lxd" + } + + strName := name + if name == config.DefaultRemote { + strName = fmt.Sprintf("%s (%s)", name, i18n.G("default")) + } + data = append(data, []string{strName, rc.Addr, rc.Protocol, strPublic, strStatic}) + } + + table := tablewriter.NewWriter(os.Stdout) + table.SetAutoWrapText(false) + table.SetAlignment(tablewriter.ALIGN_LEFT) + table.SetRowLine(true) + table.SetHeader([]string{ + i18n.G("NAME"), + i18n.G("URL"), + i18n.G("PROTOCOL"), + i18n.G("PUBLIC"), + i18n.G("STATIC")}) + sort.Sort(byName(data)) + table.AppendBulk(data) + table.Render() + + return nil + + case "rename": + if len(args) != 3 { + return errArgs + } + + rc, ok := config.Remotes[args[1]] + if !ok { + return fmt.Errorf(i18n.G("remote %s doesn't exist"), args[1]) + } + + if rc.Static { + return fmt.Errorf(i18n.G("remote %s is static and cannot be modified"), args[1]) + } + + if _, ok := config.Remotes[args[2]]; ok { + return fmt.Errorf(i18n.G("remote %s already exists"), args[2]) + } + + // Rename the certificate file + oldPath := filepath.Join(config.ConfigPath("servercerts"), fmt.Sprintf("%s.crt", args[1])) + newPath := filepath.Join(config.ConfigPath("servercerts"), fmt.Sprintf("%s.crt", args[2])) + if shared.PathExists(oldPath) { + err := os.Rename(oldPath, newPath) + if err != nil { + return err + } + } + + config.Remotes[args[2]] = rc + delete(config.Remotes, args[1]) + + if config.DefaultRemote == args[1] { + config.DefaultRemote = args[2] + } + + case "set-url": + if len(args) != 3 { + return errArgs + } + + rc, ok := config.Remotes[args[1]] + if !ok { + return fmt.Errorf(i18n.G("remote %s doesn't exist"), args[1]) + } + + if rc.Static { + return fmt.Errorf(i18n.G("remote %s is static and cannot be modified"), args[1]) + } + + config.Remotes[args[1]] = lxd.RemoteConfig{Addr: args[2]} + + case "set-default": + if len(args) != 2 { + return errArgs + } + + _, ok := config.Remotes[args[1]] + if !ok { + return fmt.Errorf(i18n.G("remote %s doesn't exist"), args[1]) + } + config.DefaultRemote = args[1] + + case "get-default": + if len(args) != 1 { + return errArgs + } + fmt.Println(config.DefaultRemote) + return nil + default: + return errArgs + } + + return lxd.SaveConfig(config, configPath) +} === added file 'src/github.com/lxc/lxd/lxc/restore.go' --- src/github.com/lxc/lxd/lxc/restore.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/restore.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,61 @@ +package main + +import ( + "fmt" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" +) + +type restoreCmd struct { + stateful bool +} + +func (c *restoreCmd) showByDefault() bool { + return true +} + +func (c *restoreCmd) usage() string { + return i18n.G( + `Set the current state of a resource back to a snapshot. + +lxc restore [remote:] [--stateful] + +Restores a container from a snapshot (optionally with running state, see +snapshot help for details). + +For example: +lxc snapshot u1 snap0 # create the snapshot +lxc restore u1 snap0 # restore the snapshot`) +} + +func (c *restoreCmd) flags() { + gnuflag.BoolVar(&c.stateful, "stateful", false, i18n.G("Whether or not to restore the container's running state from snapshot (if available)")) +} + +func (c *restoreCmd) run(config *lxd.Config, args []string) error { + if len(args) < 2 { + return errArgs + } + + var snapname = args[1] + + remote, name := config.ParseRemoteAndContainer(args[0]) + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + if !shared.IsSnapshot(snapname) { + snapname = fmt.Sprintf("%s/%s", name, snapname) + } + + resp, err := d.RestoreSnapshot(name, snapname, c.stateful) + if err != nil { + return err + } + + return d.WaitForSuccess(resp.Operation) +} === added file 'src/github.com/lxc/lxd/lxc/snapshot.go' --- src/github.com/lxc/lxd/lxc/snapshot.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/snapshot.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,70 @@ +package main + +import ( + "fmt" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/i18n" +) + +type snapshotCmd struct { + stateful bool +} + +func (c *snapshotCmd) showByDefault() bool { + return true +} + +func (c *snapshotCmd) usage() string { + return i18n.G( + `Create a read-only snapshot of a container. + +lxc snapshot [remote:] [--stateful] + +Creates a snapshot of the container (optionally with the container's memory +state). When --stateful is used, LXD attempts to checkpoint the container's +running state, including process memory state, TCP connections, etc. so that it +can be restored (via lxc restore) at a later time (although some things, e.g. +TCP connections after the TCP timeout window has expired, may not be restored +successfully). + +Example: +lxc snapshot u1 snap0`) +} + +func (c *snapshotCmd) flags() { + gnuflag.BoolVar(&c.stateful, "stateful", false, i18n.G("Whether or not to snapshot the container's running state")) +} + +func (c *snapshotCmd) run(config *lxd.Config, args []string) error { + if len(args) < 1 { + return errArgs + } + + var snapname string + if len(args) < 2 { + snapname = "" + } else { + snapname = args[1] + } + + remote, name := config.ParseRemoteAndContainer(args[0]) + d, err := lxd.NewClient(config, remote) + if err != nil { + return err + } + + // we don't allow '/' in snapshot names + if shared.IsSnapshot(snapname) { + return fmt.Errorf(i18n.G("'/' not allowed in snapshot name")) + } + + resp, err := d.Snapshot(name, snapname, c.stateful) + if err != nil { + return err + } + + return d.WaitForSuccess(resp.Operation) +} === added file 'src/github.com/lxc/lxd/lxc/version.go' --- src/github.com/lxc/lxd/lxc/version.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxc/version.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,33 @@ +package main + +import ( + "fmt" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/i18n" +) + +type versionCmd struct{} + +func (c *versionCmd) showByDefault() bool { + return true +} + +func (c *versionCmd) usage() string { + return i18n.G( + `Prints the version number of LXD. + +lxc version`) +} + +func (c *versionCmd) flags() { +} + +func (c *versionCmd) run(_ *lxd.Config, args []string) error { + if len(args) > 0 { + return errArgs + } + fmt.Println(shared.Version) + return nil +} === added directory 'src/github.com/lxc/lxd/lxd' === added directory 'src/github.com/lxc/lxd/lxd-bridge' === added file 'src/github.com/lxc/lxd/lxd-bridge/lxd-bridge' --- src/github.com/lxc/lxd/lxd-bridge/lxd-bridge 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd-bridge/lxd-bridge 2016-03-22 15:18:22 +0000 @@ -0,0 +1,212 @@ +#!/bin/sh - + +distrosysconfdir="/etc/default/" +varrun="/run/lxd/" +varlib="/var/lib/lxd/lxd-bridge/" + +# lxdbr0 defaults to only setting up the standard IPv6 link-local network +# to enable routable IPv4 and/or IPv6, please edit /etc/default/lxd + +# The values below are defaults +USE_LXD_BRIDGE="true" +LXD_BRIDGE="lxdbr0" +LXD_CONFILE="" +LXD_DOMAIN="" + +# IPv4 +LXD_IPV4_ADDR="" +LXD_IPV4_NETMASK="" +LXD_IPV4_NETWORK="" +LXD_IPV4_DHCP_RANGE="" +LXD_IPV4_DHCP_MAX="" +LXD_IPV4_NAT="false" + +# IPv6 +LXD_IPV6_ADDR="" +LXD_IPV6_MASK="" +LXD_IPV6_NETWORK="" +LXD_IPV6_NAT="false" +LXD_IPV6_PROXY="true" + +[ ! -f "${distrosysconfdir}/lxd" ] || . "${distrosysconfdir}/lxd" + +use_iptables_lock="-w" +iptables -w -L -n > /dev/null 2>&1 || use_iptables_lock="" + +_netmask2cidr () +{ + # Assumes there's no "255." after a non-255 byte in the mask + local x=${1##*255.} + set -- "0^^^128^192^224^240^248^252^254^" "$(( (${#1} - ${#x})*2 ))" "${x%%.*}" + x=${1%%${3}*} + echo $(( ${2} + (${#x}/4) )) +} + +ifdown() { + ip addr flush dev "${1}" + ip link set dev "${1}" down +} + +ifup() { + ip addr add fe80::1/64 dev "${1}" + if [ -n "${LXD_IPV4_NETMASK}" ] && [ -n "${LXD_IPV4_ADDR}" ]; then + MASK=$(_netmask2cidr ${LXD_IPV4_NETMASK}) + CIDR_ADDR="${LXD_IPV4_ADDR}/${MASK}" + ip addr add "${CIDR_ADDR}" dev "${1}" + fi + ip link set dev "${1}" up +} + +start() { + [ "x${USE_LXD_BRIDGE}" = "xtrue" ] || { exit 0; } + + [ ! -f "${varrun}/network_up" ] || { echo "lxd-bridge is already running"; exit 1; } + + if [ -d /sys/class/net/${LXD_BRIDGE} ]; then + stop force || true + fi + + FAILED=1 + + cleanup() { + set +e + if [ "${FAILED}" = "1" ]; then + echo "Failed to setup lxd-bridge." >&2 + stop force + fi + } + + trap cleanup EXIT HUP INT TERM + set -e + + # set up the lxd network + [ ! -d "/sys/class/net/${LXD_BRIDGE}" ] && ip link add dev "${LXD_BRIDGE}" type bridge + + # if we are run from systemd on a system with selinux enabled, + # the mkdir will create /run/lxd as init_var_run_t which dnsmasq + # can't write its pid into, so we restorecon it (to var_run_t) + if [ ! -d "${varrun}" ]; then + mkdir -p "${varrun}" + if which restorecon >/dev/null 2>&1; then + restorecon "${varrun}" + fi + fi + + ifup "${LXD_BRIDGE}" "${LXD_IPV4_ADDR}" "${LXD_IPV4_NETMASK}" + + LXD_IPV4_ARG="" + if [ -n "${LXD_IPV4_ADDR}" ] && [ -n "${LXD_IPV4_NETMASK}" ] && [ -n "${LXD_IPV4_NETWORK}" ]; then + echo 1 > /proc/sys/net/ipv4/ip_forward + if [ "${LXD_IPV4_NAT}" = "true" ]; then + iptables "${use_iptables_lock}" -t nat -A POSTROUTING -s "${LXD_IPV4_NETWORK}" ! -d "${LXD_IPV4_NETWORK}" -j MASQUERADE + fi + LXD_IPV4_ARG="--listen-address ${LXD_IPV4_ADDR} --dhcp-range ${LXD_IPV4_DHCP_RANGE} --dhcp-lease-max=${LXD_IPV4_DHCP_MAX}" + fi + + LXD_IPV6_ARG="" + if [ -n "${LXD_IPV6_ADDR}" ] && [ -n "${LXD_IPV6_MASK}" ] && [ -n "${LXD_IPV6_NETWORK}" ]; then + echo 1 > /proc/sys/net/ipv6/conf/all/forwarding + echo 0 > "/proc/sys/net/ipv6/conf/${LXD_BRIDGE}/autoconf" + echo 0 > "/proc/sys/net/ipv6/conf/${LXD_BRIDGE}/accept_dad" || true + ip -6 addr add dev "${LXD_BRIDGE}" "${LXD_IPV6_ADDR}/${LXD_IPV6_MASK}" + if [ "${LXD_IPV6_NAT}" = "true" ]; then + ip6tables "${use_iptables_lock}" -t nat -A POSTROUTING -s "${LXD_IPV6_NETWORK}" ! -d "${LXD_IPV6_NETWORK}" -j MASQUERADE + fi + LXD_IPV6_ARG="--dhcp-range=${LXD_IPV6_ADDR},ra-only --listen-address ${LXD_IPV6_ADDR}" + fi + + iptables "${use_iptables_lock}" -I INPUT -i "${LXD_BRIDGE}" -p udp --dport 67 -j ACCEPT + iptables "${use_iptables_lock}" -I INPUT -i "${LXD_BRIDGE}" -p tcp --dport 67 -j ACCEPT + iptables "${use_iptables_lock}" -I INPUT -i "${LXD_BRIDGE}" -p udp --dport 53 -j ACCEPT + iptables "${use_iptables_lock}" -I INPUT -i "${LXD_BRIDGE}" -p tcp --dport 53 -j ACCEPT + iptables "${use_iptables_lock}" -I FORWARD -i "${LXD_BRIDGE}" -j ACCEPT + iptables "${use_iptables_lock}" -I FORWARD -o "${LXD_BRIDGE}" -j ACCEPT + iptables "${use_iptables_lock}" -t mangle -A POSTROUTING -o "${LXD_BRIDGE}" -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill + + LXD_DOMAIN_ARG="" + if [ -n "${LXD_DOMAIN}" ]; then + LXD_DOMAIN_ARG="-s ${LXD_DOMAIN} -S /${LXD_DOMAIN}/" + fi + + LXD_CONFILE_ARG="" + if [ -n "${LXD_CONFILE}" ]; then + LXD_CONFILE_ARG="--conf-file=${LXD_CONFILE}" + fi + + # https://lists.linuxcontainers.org/pipermail/lxc-devel/2014-October/010561.html + for DNSMASQ_USER in lxd-dnsmasq dnsmasq nobody + do + if getent passwd "${DNSMASQ_USER}" >/dev/null; then + break + fi + done + + # shellcheck disable=SC2086 + dnsmasq ${LXD_CONFILE_ARG} ${LXD_DOMAIN_ARG} -u "${DNSMASQ_USER}" --strict-order --bind-interfaces --pid-file="${varrun}/dnsmasq.pid" --dhcp-no-override --except-interface=lo --interface="${LXD_BRIDGE}" --dhcp-leasefile="${varlib}/misc/dnsmasq.${LXD_BRIDGE}.leases" --dhcp-authoritative ${LXD_IPV4_ARG} ${LXD_IPV6_ARG} || cleanup + + if [ "${LXD_IPV6_PROXY}" = "true" ]; then + lxd-bridge-proxy -addr="[fe80::1%${LXD_BRIDGE}]:3128" & + PID=$! + echo "${PID}" > "${varrun}/proxy.pid" + fi + + touch "${varrun}/network_up" + FAILED=0 +} + +stop() { + [ "x${USE_LXD_BRIDGE}" = "xtrue" ] || { exit 0; } + + [ -f "${varrun}/network_up" ] || [ "${1}" = "force" ] || { echo "lxd-bridge isn't running"; exit 1; } + + if [ -d /sys/class/net/${LXD_BRIDGE} ]; then + ifdown ${LXD_BRIDGE} + iptables ${use_iptables_lock} -D INPUT -i ${LXD_BRIDGE} -p udp --dport 67 -j ACCEPT + iptables ${use_iptables_lock} -D INPUT -i ${LXD_BRIDGE} -p tcp --dport 67 -j ACCEPT + iptables ${use_iptables_lock} -D INPUT -i ${LXD_BRIDGE} -p udp --dport 53 -j ACCEPT + iptables ${use_iptables_lock} -D INPUT -i ${LXD_BRIDGE} -p tcp --dport 53 -j ACCEPT + iptables ${use_iptables_lock} -D FORWARD -i ${LXD_BRIDGE} -j ACCEPT + iptables ${use_iptables_lock} -D FORWARD -o ${LXD_BRIDGE} -j ACCEPT + iptables ${use_iptables_lock} -t mangle -D POSTROUTING -o ${LXD_BRIDGE} -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill + + if [ -n "${LXD_IPV4_NETWORK}" ] && [ "${LXD_IPV4_NAT}" = "true" ]; then + iptables ${use_iptables_lock} -t nat -D POSTROUTING -s ${LXD_IPV4_NETWORK} ! -d ${LXD_IPV4_NETWORK} -j MASQUERADE + fi + + if [ -n "${LXD_IPV6_NETWORK}" ] && [ "${LXD_IPV6_NAT}" = "true" ]; then + ip6tables ${use_iptables_lock} -t nat -D POSTROUTING -s ${LXD_IPV6_NETWORK} ! -d ${LXD_IPV6_NETWORK} -j MASQUERADE + fi + + pid=$(cat "${varrun}/dnsmasq.pid" 2>/dev/null) && kill -9 "${pid}" + rm -f "${varrun}/dnsmasq.pid" + + pid=$(cat "${varrun}/proxy.pid" 2>/dev/null) && kill -9 "${pid}" + rm -f "${varrun}/proxy.pid" + # if ${LXD_BRIDGE} has attached interfaces, don't destroy the bridge + ls /sys/class/net/${LXD_BRIDGE}/brif/* > /dev/null 2>&1 || ip link delete "${LXD_BRIDGE}" + fi + + rm -f "${varrun}/network_up" +} + +# See how we were called. +case "${1}" in + start) + start + ;; + + stop) + stop + ;; + + restart|reload|force-reload) + ${0} stop + ${0} start + ;; + + *) + echo "Usage: ${0} {start|stop|restart|reload|force-reload}" + exit 2 +esac + +exit $? === added directory 'src/github.com/lxc/lxd/lxd-bridge/lxd-bridge-proxy' === added file 'src/github.com/lxc/lxd/lxd-bridge/lxd-bridge-proxy/main.go' --- src/github.com/lxc/lxd/lxd-bridge/lxd-bridge-proxy/main.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd-bridge/lxd-bridge-proxy/main.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,25 @@ +package main + +import ( + "flag" + "fmt" + "log" + "net/http" + "net/http/httputil" +) + +func NewProxy() *httputil.ReverseProxy { + director := func(req *http.Request) { + if req.Method == "CONNECT" { + fmt.Printf("CONNECT: %s\n", req.Host) + } + } + return &httputil.ReverseProxy{Director: director} +} + +func main() { + addr := flag.String("addr", "[fe80::1%lxdbr0]:3128", "proxy listen address") + flag.Parse() + + log.Fatal(http.ListenAndServe(*addr, NewProxy())) +} === added file 'src/github.com/lxc/lxd/lxd/api_1.0.go' --- src/github.com/lxc/lxd/lxd/api_1.0.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/api_1.0.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,256 @@ +package main + +import ( + "encoding/pem" + "fmt" + "net/http" + "os" + "syscall" + + "gopkg.in/lxc/go-lxc.v2" + + "github.com/lxc/lxd/shared" +) + +var api10 = []Command{ + containersCmd, + containerCmd, + containerStateCmd, + containerFileCmd, + containerLogsCmd, + containerLogCmd, + containerSnapshotsCmd, + containerSnapshotCmd, + containerExecCmd, + aliasCmd, + aliasesCmd, + eventsCmd, + imageCmd, + imagesCmd, + imagesExportCmd, + imagesSecretCmd, + operationsCmd, + operationCmd, + operationWait, + operationWebsocket, + networksCmd, + networkCmd, + api10Cmd, + certificatesCmd, + certificateFingerprintCmd, + profilesCmd, + profileCmd, +} + +func api10Get(d *Daemon, r *http.Request) Response { + body := shared.Jmap{ + "api_extensions": []string{}, + "api_status": "development", + "api_version": shared.APIVersion, + } + + if d.isTrustedClient(r) { + body["auth"] = "trusted" + + /* + * Based on: https://groups.google.com/forum/#!topic/golang-nuts/Jel8Bb-YwX8 + * there is really no better way to do this, which is + * unfortunate. Also, we ditch the more accepted CharsToString + * version in that thread, since it doesn't seem as portable, + * viz. github issue #206. + */ + uname := syscall.Utsname{} + if err := syscall.Uname(&uname); err != nil { + return InternalError(err) + } + + kernel := "" + for _, c := range uname.Sysname { + if c == 0 { + break + } + kernel += string(byte(c)) + } + + kernelVersion := "" + for _, c := range uname.Release { + if c == 0 { + break + } + kernelVersion += string(byte(c)) + } + + kernelArchitecture := "" + for _, c := range uname.Machine { + if c == 0 { + break + } + kernelArchitecture += string(byte(c)) + } + + addresses, err := d.ListenAddresses() + if err != nil { + return InternalError(err) + } + + var certificate string + if len(d.tlsConfig.Certificates) != 0 { + certificate = string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: d.tlsConfig.Certificates[0].Certificate[0]})) + } + + architectures := []string{} + + for _, architecture := range d.architectures { + architectureName, err := shared.ArchitectureName(architecture) + if err != nil { + return InternalError(err) + } + architectures = append(architectures, architectureName) + } + + env := shared.Jmap{ + "addresses": addresses, + "architectures": architectures, + "certificate": certificate, + "driver": "lxc", + "driver_version": lxc.Version(), + "kernel": kernel, + "kernel_architecture": kernelArchitecture, + "kernel_version": kernelVersion, + "storage": d.Storage.GetStorageTypeName(), + "storage_version": d.Storage.GetStorageTypeVersion(), + "server": "lxd", + "server_pid": os.Getpid(), + "server_version": shared.Version} + + body["environment"] = env + body["public"] = false + + serverConfig, err := d.ConfigValuesGet() + if err != nil { + return InternalError(err) + } + + config := shared.Jmap{} + + for key, value := range serverConfig { + if key == "core.trust_password" { + config[key] = true + } else { + config[key] = value + } + } + + body["config"] = config + } else { + body["auth"] = "untrusted" + body["public"] = false + } + + return SyncResponse(true, body) +} + +type apiPut struct { + Config shared.Jmap `json:"config"` +} + +func api10Put(d *Daemon, r *http.Request) Response { + oldConfig, err := dbConfigValuesGet(d.db) + if err != nil { + return InternalError(err) + } + + req := apiPut{} + + if err := shared.ReadToJSON(r.Body, &req); err != nil { + return BadRequest(err) + } + + // Diff the configs + changedConfig := map[string]interface{}{} + for key, value := range oldConfig { + if req.Config[key] != value { + changedConfig[key] = req.Config[key] + } + } + + for key, value := range req.Config { + if oldConfig[key] != value { + changedConfig[key] = req.Config[key] + } + } + + for key, value := range changedConfig { + if value == nil { + value = "" + } + + if !d.ConfigKeyIsValid(key) { + return BadRequest(fmt.Errorf("Bad server config key: '%s'", key)) + } + + if key == "core.trust_password" { + if value == true { + continue + } + + err := d.PasswordSet(value.(string)) + if err != nil { + return InternalError(err) + } + } else if key == "storage.lvm_vg_name" { + err := storageLVMSetVolumeGroupNameConfig(d, value.(string)) + if err != nil { + return InternalError(err) + } + if err = d.SetupStorageDriver(); err != nil { + return InternalError(err) + } + } else if key == "storage.lvm_thinpool_name" { + err := storageLVMSetThinPoolNameConfig(d, value.(string)) + if err != nil { + return InternalError(err) + } + } else if key == "storage.lvm_fstype" { + err := storageLVMSetFsTypeConfig(d, value.(string)) + if err != nil { + return InternalError(err) + } + } else if key == "storage.zfs_pool_name" { + err := storageZFSSetPoolNameConfig(d, value.(string)) + if err != nil { + return InternalError(err) + } + if err = d.SetupStorageDriver(); err != nil { + return InternalError(err) + } + } else if key == "core.https_address" { + old_address, err := d.ConfigValueGet("core.https_address") + if err != nil { + return InternalError(err) + } + + err = d.UpdateHTTPsPort(old_address, value.(string)) + if err != nil { + return InternalError(err) + } + + err = d.ConfigValueSet(key, value.(string)) + if err != nil { + return InternalError(err) + } + } else { + err := d.ConfigValueSet(key, value.(string)) + if err != nil { + return InternalError(err) + } + if key == "images.remote_cache_expiry" { + d.pruneChan <- true + } + } + } + + return EmptySyncResponse +} + +var api10Cmd = Command{name: "", untrustedGet: true, get: api10Get, put: api10Put} === added file 'src/github.com/lxc/lxd/lxd/api_internal.go' --- src/github.com/lxc/lxd/lxd/api_internal.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/api_internal.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,67 @@ +package main + +import ( + "net/http" + "strconv" + + "github.com/gorilla/mux" +) + +var apiInternal = []Command{ + internalShutdownCmd, + internalContainerOnStartCmd, + internalContainerOnStopCmd, +} + +func internalShutdown(d *Daemon, r *http.Request) Response { + d.shutdownChan <- true + + return EmptySyncResponse +} + +func internalContainerOnStart(d *Daemon, r *http.Request) Response { + id, err := strconv.Atoi(mux.Vars(r)["id"]) + if err != nil { + return SmartError(err) + } + + c, err := containerLoadById(d, id) + if err != nil { + return SmartError(err) + } + + err = c.OnStart() + if err != nil { + return SmartError(err) + } + + return EmptySyncResponse +} + +func internalContainerOnStop(d *Daemon, r *http.Request) Response { + id, err := strconv.Atoi(mux.Vars(r)["id"]) + if err != nil { + return SmartError(err) + } + + target := r.FormValue("target") + if target == "" { + target = "unknown" + } + + c, err := containerLoadById(d, id) + if err != nil { + return SmartError(err) + } + + err = c.OnStop(target) + if err != nil { + return SmartError(err) + } + + return EmptySyncResponse +} + +var internalShutdownCmd = Command{name: "shutdown", put: internalShutdown} +var internalContainerOnStartCmd = Command{name: "containers/{id}/onstart", get: internalContainerOnStart} +var internalContainerOnStopCmd = Command{name: "containers/{id}/onstop", get: internalContainerOnStop} === added file 'src/github.com/lxc/lxd/lxd/apparmor.go' --- src/github.com/lxc/lxd/lxd/apparmor.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/apparmor.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,206 @@ +package main + +import ( + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "strings" + + "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" +) + +const ( + APPARMOR_CMD_LOAD = "r" + APPARMOR_CMD_UNLOAD = "R" + APPARMOR_CMD_PARSE = "Q" +) + +var aaPath = shared.VarPath("security", "apparmor") + +const NESTING_AA_PROFILE = ` + pivot_root, + mount /var/lib/lxd/shmounts/ -> /var/lib/lxd/shmounts/, + mount none -> /var/lib/lxd/shmounts/, + mount fstype=proc -> /usr/lib/*/lxc/**, + mount fstype=sysfs -> /usr/lib/*/lxc/**, + mount options=(rw,bind), + mount options=(rw,rbind), + deny /dev/.lxd/proc/** rw, + deny /dev/.lxd/sys/** rw, + mount options=(rw,make-rshared), + + # there doesn't seem to be a way to ask for: + # mount options=(ro,nosuid,nodev,noexec,remount,bind), + # as we always get mount to $cdir/proc/sys with those flags denied + # So allow all mounts until that is straightened out: + mount, + mount options=bind /var/lib/lxd/shmounts/** -> /var/lib/lxd/**, + # lxc-container-default-with-nesting also inherited these + # from start-container, and seems to need them. + ptrace, + signal, +` + +const DEFAULT_AA_PROFILE = ` +#include +profile "%s" flags=(attach_disconnected,mediate_deleted) { + #include + + # Special exception for cgroup namespaces + %s + + # user input raw.apparmor below here + %s + + # nesting support goes here if needed + %s + change_profile -> "%s", +}` + +func AAProfileFull(c container) string { + lxddir := shared.VarPath("") + if len(c.Name())+len(lxddir)+7 >= 253 { + hash := sha256.New() + io.WriteString(hash, lxddir) + lxddir = fmt.Sprintf("%x", hash.Sum(nil)) + } + + return fmt.Sprintf("lxd-%s_<%s>", c.Name(), lxddir) +} + +func AAProfileShort(c container) string { + return fmt.Sprintf("lxd-%s", c.Name()) +} + +func AAProfileCgns() string { + if shared.PathExists("/proc/self/ns/cgroup") { + return " mount fstype=cgroup -> /sys/fs/cgroup/**," + } + return "" +} + +// getProfileContent generates the apparmor profile template from the given +// container. This includes the stock lxc includes as well as stuff from +// raw.apparmor. +func getAAProfileContent(c container) string { + rawApparmor, ok := c.ExpandedConfig()["raw.apparmor"] + if !ok { + rawApparmor = "" + } + + nesting := "" + if c.IsNesting() { + nesting = NESTING_AA_PROFILE + } + + return fmt.Sprintf(DEFAULT_AA_PROFILE, AAProfileFull(c), AAProfileCgns(), rawApparmor, nesting, AAProfileFull(c)) +} + +func runApparmor(command string, c container) error { + if !aaAvailable { + return nil + } + + cmd := exec.Command("apparmor_parser", []string{ + fmt.Sprintf("-%sWL", command), + path.Join(aaPath, "cache"), + path.Join(aaPath, "profiles", AAProfileShort(c)), + }...) + + output, err := cmd.CombinedOutput() + if err != nil { + shared.Log.Error("Running apparmor", + log.Ctx{"action": command, "output": string(output), "err": err}) + } + + return err +} + +// Ensure that the container's policy is loaded into the kernel so the +// container can boot. +func AALoadProfile(c container) error { + if !aaAdmin { + return nil + } + + /* In order to avoid forcing a profile parse (potentially slow) on + * every container start, let's use apparmor's binary policy cache, + * which checks mtime of the files to figure out if the policy needs to + * be regenerated. + * + * Since it uses mtimes, we shouldn't just always write out our local + * apparmor template; instead we should check to see whether the + * template is the same as ours. If it isn't we should write our + * version out so that the new changes are reflected and we definitely + * force a recompile. + */ + profile := path.Join(aaPath, "profiles", AAProfileShort(c)) + content, err := ioutil.ReadFile(profile) + if err != nil && !os.IsNotExist(err) { + return err + } + + updated := getAAProfileContent(c) + + if string(content) != string(updated) { + if err := os.MkdirAll(path.Join(aaPath, "cache"), 0700); err != nil { + return err + } + + if err := os.MkdirAll(path.Join(aaPath, "profiles"), 0700); err != nil { + return err + } + + if err := ioutil.WriteFile(profile, []byte(updated), 0600); err != nil { + return err + } + } + + return runApparmor(APPARMOR_CMD_LOAD, c) +} + +// Ensure that the container's policy is unloaded to free kernel memory. This +// does not delete the policy from disk or cache. +func AAUnloadProfile(c container) error { + if !aaAdmin { + return nil + } + + return runApparmor(APPARMOR_CMD_UNLOAD, c) +} + +// Parse the profile without loading it into the kernel. +func AAParseProfile(c container) error { + if !aaAvailable { + } + + return runApparmor(APPARMOR_CMD_PARSE, c) +} + +// Delete the policy from cache/disk. +func AADeleteProfile(c container) { + if !aaAdmin { + return + } + + /* It's ok if these deletes fail: if the container was never started, + * we'll have never written a profile or cached it. + */ + os.Remove(path.Join(aaPath, "cache", AAProfileShort(c))) + os.Remove(path.Join(aaPath, "profiles", AAProfileShort(c))) +} + +// What's current apparmor profile +func aaProfile() string { + contents, err := ioutil.ReadFile("/proc/self/attr/current") + if err == nil { + return strings.TrimSpace(string(contents)) + } + return "" +} === added file 'src/github.com/lxc/lxd/lxd/certificates.go' --- src/github.com/lxc/lxd/lxd/certificates.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/certificates.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,222 @@ +package main + +import ( + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "net" + "net/http" + + "github.com/gorilla/mux" + + "github.com/lxc/lxd/shared" +) + +func certGenerateFingerprint(cert *x509.Certificate) string { + return fmt.Sprintf("%x", sha256.Sum256(cert.Raw)) +} + +func certificatesGet(d *Daemon, r *http.Request) Response { + recursion := d.isRecursionRequest(r) + + if recursion { + certResponses := []shared.CertInfo{} + + baseCerts, err := dbCertsGet(d.db) + if err != nil { + return SmartError(err) + } + for _, baseCert := range baseCerts { + resp := shared.CertInfo{} + resp.Fingerprint = baseCert.Fingerprint + resp.Certificate = baseCert.Certificate + if baseCert.Type == 1 { + resp.Type = "client" + } else { + resp.Type = "unknown" + } + certResponses = append(certResponses, resp) + } + return SyncResponse(true, certResponses) + } + + body := []string{} + for _, cert := range d.clientCerts { + fingerprint := fmt.Sprintf("/%s/certificates/%s", shared.APIVersion, certGenerateFingerprint(&cert)) + body = append(body, fingerprint) + } + + return SyncResponse(true, body) +} + +type certificatesPostBody struct { + Type string `json:"type"` + Certificate string `json:"certificate"` + Name string `json:"name"` + Password string `json:"password"` +} + +func readSavedClientCAList(d *Daemon) { + d.clientCerts = []x509.Certificate{} + + dbCerts, err := dbCertsGet(d.db) + if err != nil { + shared.Logf("Error reading certificates from database: %s", err) + return + } + + for _, dbCert := range dbCerts { + certBlock, _ := pem.Decode([]byte(dbCert.Certificate)) + cert, err := x509.ParseCertificate(certBlock.Bytes) + if err != nil { + shared.Logf("Error reading certificate for %s: %s", dbCert.Name, err) + continue + } + d.clientCerts = append(d.clientCerts, *cert) + } +} + +func saveCert(d *Daemon, host string, cert *x509.Certificate) error { + baseCert := new(dbCertInfo) + baseCert.Fingerprint = certGenerateFingerprint(cert) + baseCert.Type = 1 + baseCert.Name = host + baseCert.Certificate = string( + pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}), + ) + + return dbCertSave(d.db, baseCert) +} + +func certificatesPost(d *Daemon, r *http.Request) Response { + req := certificatesPostBody{} + + if err := shared.ReadToJSON(r.Body, &req); err != nil { + return BadRequest(err) + } + + if req.Type != "client" { + return BadRequest(fmt.Errorf("Unknown request type %s", req.Type)) + } + + var cert *x509.Certificate + var name string + if req.Certificate != "" { + + data, err := base64.StdEncoding.DecodeString(req.Certificate) + if err != nil { + return BadRequest(err) + } + + cert, err = x509.ParseCertificate(data) + if err != nil { + return BadRequest(err) + } + name = req.Name + + } else if r.TLS != nil { + + if len(r.TLS.PeerCertificates) < 1 { + return BadRequest(fmt.Errorf("No client certificate provided")) + } + cert = r.TLS.PeerCertificates[len(r.TLS.PeerCertificates)-1] + + remoteHost, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + return InternalError(err) + } + + name = remoteHost + } else { + return BadRequest(fmt.Errorf("Can't use TLS data on non-TLS link")) + } + + fingerprint := certGenerateFingerprint(cert) + for _, existingCert := range d.clientCerts { + if fingerprint == certGenerateFingerprint(&existingCert) { + return EmptySyncResponse + } + } + + if !d.isTrustedClient(r) && !d.PasswordCheck(req.Password) { + return Forbidden + } + + err := saveCert(d, name, cert) + if err != nil { + return SmartError(err) + } + + d.clientCerts = append(d.clientCerts, *cert) + + return EmptySyncResponse +} + +var certificatesCmd = Command{ + "certificates", + false, + true, + certificatesGet, + nil, + certificatesPost, + nil, +} + +func certificateFingerprintGet(d *Daemon, r *http.Request) Response { + fingerprint := mux.Vars(r)["fingerprint"] + + cert, err := doCertificateGet(d, fingerprint) + if err != nil { + return SmartError(err) + } + + return SyncResponse(true, cert) +} + +func doCertificateGet(d *Daemon, fingerprint string) (shared.CertInfo, error) { + resp := shared.CertInfo{} + + dbCertInfo, err := dbCertGet(d.db, fingerprint) + if err != nil { + return resp, err + } + + resp.Fingerprint = dbCertInfo.Fingerprint + resp.Certificate = dbCertInfo.Certificate + if dbCertInfo.Type == 1 { + resp.Type = "client" + } else { + resp.Type = "unknown" + } + + return resp, nil +} + +func certificateFingerprintDelete(d *Daemon, r *http.Request) Response { + fingerprint := mux.Vars(r)["fingerprint"] + + certInfo, err := dbCertGet(d.db, fingerprint) + if err != nil { + return NotFound + } + + err = dbCertDelete(d.db, certInfo.Fingerprint) + if err != nil { + return SmartError(err) + } + readSavedClientCAList(d) + + return EmptySyncResponse +} + +var certificateFingerprintCmd = Command{ + "certificates/{fingerprint}", + false, + false, + certificateFingerprintGet, + nil, + nil, + certificateFingerprintDelete, +} === added file 'src/github.com/lxc/lxd/lxd/container.go' --- src/github.com/lxc/lxd/lxd/container.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/container.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,661 @@ +package main + +import ( + "fmt" + "io" + "os" + "strings" + "time" + + "gopkg.in/lxc/go-lxc.v2" + + "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" +) + +// Helper functions +func containerPath(name string, isSnapshot bool) string { + if isSnapshot { + return shared.VarPath("snapshots", name) + } + + return shared.VarPath("containers", name) +} + +func containerValidName(name string) error { + if strings.Contains(name, shared.SnapshotDelimiter) { + return fmt.Errorf( + "The character '%s' is reserved for snapshots.", + shared.SnapshotDelimiter) + } + + if !shared.ValidHostname(name) { + return fmt.Errorf("Container name isn't a valid hostname.") + } + + return nil +} + +func containerValidConfigKey(k string) bool { + switch k { + case "boot.autostart": + return true + case "boot.autostart.delay": + return true + case "boot.autostart.priority": + return true + case "limits.cpu": + return true + case "limits.cpu.allowance": + return true + case "limits.cpu.priority": + return true + case "limits.disk.priority": + return true + case "limits.memory": + return true + case "limits.memory.enforce": + return true + case "limits.memory.swap": + return true + case "limits.memory.swap.priority": + return true + case "limits.network.priority": + return true + case "limits.processes": + return true + case "linux.kernel_modules": + return true + case "security.privileged": + return true + case "security.nesting": + return true + case "raw.apparmor": + return true + case "raw.lxc": + return true + case "volatile.base_image": + return true + case "volatile.last_state.idmap": + return true + case "volatile.last_state.power": + return true + } + + if strings.HasPrefix(k, "volatile.") { + if strings.HasSuffix(k, ".hwaddr") { + return true + } + + if strings.HasSuffix(k, ".name") { + return true + } + } + + if strings.HasPrefix(k, "environment.") { + return true + } + + if strings.HasPrefix(k, "user.") { + return true + } + + return false +} + +func containerValidDeviceConfigKey(t, k string) bool { + if k == "type" { + return true + } + + switch t { + case "unix-char": + switch k { + case "gid": + return true + case "major": + return true + case "minor": + return true + case "mode": + return true + case "path": + return true + case "uid": + return true + default: + return false + } + case "unix-block": + switch k { + case "gid": + return true + case "major": + return true + case "minor": + return true + case "mode": + return true + case "path": + return true + case "uid": + return true + default: + return false + } + case "nic": + switch k { + case "limits.max": + return true + case "limits.ingress": + return true + case "limits.egress": + return true + case "host_name": + return true + case "hwaddr": + return true + case "mtu": + return true + case "name": + return true + case "nictype": + return true + case "parent": + return true + default: + return false + } + case "disk": + switch k { + case "limits.max": + return true + case "limits.read": + return true + case "limits.write": + return true + case "optional": + return true + case "path": + return true + case "readonly": + return true + case "size": + return true + case "source": + return true + default: + return false + } + case "none": + return false + default: + return false + } +} + +func containerValidConfig(config map[string]string, profile bool, expanded bool) error { + if config == nil { + return nil + } + + for k, _ := range config { + if profile && strings.HasPrefix(k, "volatile.") { + return fmt.Errorf("Volatile keys can only be set on containers.") + } + + if k == "raw.lxc" { + err := lxcValidConfig(config["raw.lxc"]) + if err != nil { + return err + } + } + + if !containerValidConfigKey(k) { + return fmt.Errorf("Bad key: %s", k) + } + } + + return nil +} + +func containerValidDevices(devices shared.Devices, profile bool, expanded bool) error { + // Empty device list + if devices == nil { + return nil + } + + // Check each device individually + for _, m := range devices { + for k, _ := range m { + if !containerValidDeviceConfigKey(m["type"], k) { + return fmt.Errorf("Invalid device configuration key for %s: %s", m["type"], k) + } + } + + if m["type"] == "nic" { + if m["nictype"] == "" { + return fmt.Errorf("Missing nic type") + } + + if !shared.StringInSlice(m["nictype"], []string{"bridged", "physical", "p2p", "macvlan"}) { + return fmt.Errorf("Bad nic type: %s", m["nictype"]) + } + + if shared.StringInSlice(m["nictype"], []string{"bridged", "physical", "macvlan"}) && m["parent"] == "" { + return fmt.Errorf("Missing parent for %s type nic.", m["nictype"]) + } + } else if m["type"] == "disk" { + if m["path"] == "" { + return fmt.Errorf("Disk entry is missing the required \"path\" property.") + } + + if m["source"] == "" && m["path"] != "/" { + return fmt.Errorf("Disk entry is missing the required \"source\" property.") + } + + if m["path"] == "/" && m["source"] != "" { + return fmt.Errorf("Root disk entry may not have a \"source\" property set.") + } + + if m["size"] != "" && m["path"] != "/" { + return fmt.Errorf("Only the root disk may have a size quota.") + } + } else if shared.StringInSlice(m["type"], []string{"unix-char", "unix-block"}) { + if m["path"] == "" { + return fmt.Errorf("Unix device entry is missing the required \"path\" property.") + } + } else if m["type"] == "none" { + continue + } else { + return fmt.Errorf("Invalid device type: %s", m["type"]) + } + } + + // Checks on the expanded config + if expanded { + foundRootfs := false + for _, m := range devices { + if m["type"] == "disk" && m["path"] == "/" { + foundRootfs = true + } + } + + if !foundRootfs { + return fmt.Errorf("Container is lacking rootfs entry") + } + } + + return nil +} + +// The container arguments +type containerArgs struct { + // Don't set manually + Id int + + Architecture int + BaseImage string + Config map[string]string + CreationDate time.Time + Ctype containerType + Devices shared.Devices + Ephemeral bool + Name string + Profiles []string + Stateful bool +} + +// The container interface +type container interface { + // Container actions + Freeze() error + Shutdown(timeout time.Duration) error + Start(stateful bool) error + Stop(stateful bool) error + Unfreeze() error + + // Snapshots & migration + Restore(sourceContainer container) error + Checkpoint(opts lxc.CheckpointOptions) error + StartFromMigration(imagesDir string) error + Snapshots() ([]container, error) + + // Config handling + Rename(newName string) error + Update(newConfig containerArgs, userRequested bool) error + + Delete() error + Export(w io.Writer) error + + // Live configuration + CGroupGet(key string) (string, error) + CGroupSet(key string, value string) error + ConfigKeySet(key string, value string) error + + // File handling + FilePull(srcpath string, dstpath string) error + FilePush(srcpath string, dstpath string, uid int, gid int, mode os.FileMode) error + + // Status + Render() (*shared.ContainerInfo, error) + RenderState() (*shared.ContainerState, error) + IsPrivileged() bool + IsRunning() bool + IsFrozen() bool + IsEphemeral() bool + IsSnapshot() bool + IsStateful() bool + IsNesting() bool + + // Hooks + OnStart() error + OnStop(target string) error + + // Properties + Id() int + Name() string + Architecture() int + CreationDate() time.Time + ExpandedConfig() map[string]string + ExpandedDevices() shared.Devices + LocalConfig() map[string]string + LocalDevices() shared.Devices + Profiles() []string + InitPID() int + State() string + + // Paths + Path() string + RootfsPath() string + TemplatesPath() string + StatePath() string + LogFilePath() string + LogPath() string + + // FIXME: Those should be internal functions + LXContainerGet() *lxc.Container + StorageStart() error + StorageStop() error + Storage() storage + IdmapSet() *shared.IdmapSet + LastIdmapSet() (*shared.IdmapSet, error) + TemplateApply(trigger string) error + Daemon() *Daemon +} + +// Loader functions +func containerCreateAsEmpty(d *Daemon, args containerArgs) (container, error) { + // Create the container + c, err := containerCreateInternal(d, args) + if err != nil { + return nil, err + } + + // Now create the empty storage + if err := c.Storage().ContainerCreate(c); err != nil { + c.Delete() + return nil, err + } + + // Apply any post-storage configuration + err = containerConfigureInternal(c) + if err != nil { + c.Delete() + return nil, err + } + + return c, nil +} + +func containerCreateEmptySnapshot(d *Daemon, args containerArgs) (container, error) { + // Create the snapshot + c, err := containerCreateInternal(d, args) + if err != nil { + return nil, err + } + + // Now create the empty snapshot + if err := c.Storage().ContainerSnapshotCreateEmpty(c); err != nil { + c.Delete() + return nil, err + } + + return c, nil +} + +func containerCreateFromImage(d *Daemon, args containerArgs, hash string) (container, error) { + // Create the container + c, err := containerCreateInternal(d, args) + if err != nil { + return nil, err + } + + if err := dbImageLastAccessUpdate(d.db, hash, time.Now().UTC()); err != nil { + return nil, fmt.Errorf("Error updating image last use date: %s", err) + } + + // Now create the storage from an image + if err := c.Storage().ContainerCreateFromImage(c, hash); err != nil { + c.Delete() + return nil, err + } + + // Apply any post-storage configuration + err = containerConfigureInternal(c) + if err != nil { + c.Delete() + return nil, err + } + + return c, nil +} + +func containerCreateAsCopy(d *Daemon, args containerArgs, sourceContainer container) (container, error) { + // Create the container + c, err := containerCreateInternal(d, args) + if err != nil { + return nil, err + } + + // Now clone the storage + if err := c.Storage().ContainerCopy(c, sourceContainer); err != nil { + c.Delete() + return nil, err + } + + // Apply any post-storage configuration + err = containerConfigureInternal(c) + if err != nil { + c.Delete() + return nil, err + } + + return c, nil +} + +func containerCreateAsSnapshot(d *Daemon, args containerArgs, sourceContainer container) (container, error) { + // Create the snapshot + c, err := containerCreateInternal(d, args) + if err != nil { + return nil, err + } + + // Deal with state + if args.Stateful { + stateDir := sourceContainer.StatePath() + err = os.MkdirAll(stateDir, 0700) + if err != nil { + c.Delete() + return nil, err + } + + if !sourceContainer.IsRunning() { + c.Delete() + return nil, fmt.Errorf("Container not running, cannot do stateful snapshot") + } + + /* TODO: ideally we would freeze here and unfreeze below after + * we've copied the filesystem, to make sure there are no + * changes by the container while snapshotting. Unfortunately + * there is abug in CRIU where it doesn't leave the container + * in the same state it found it w.r.t. freezing, i.e. CRIU + * freezes too, and then /always/ thaws, even if the container + * was frozen. Until that's fixed, all calls to Unfreeze() + * after snapshotting will fail. + */ + + opts := lxc.CheckpointOptions{Directory: stateDir, Stop: false, Verbose: true} + err = sourceContainer.Checkpoint(opts) + err2 := CollectCRIULogFile(sourceContainer, stateDir, "snapshot", "dump") + if err2 != nil { + shared.Log.Warn("failed to collect criu log file", log.Ctx{"error": err2}) + } + + if err != nil { + return nil, err + } + } + + // Clone the container + if err := sourceContainer.Storage().ContainerSnapshotCreate(c, sourceContainer); err != nil { + c.Delete() + return nil, err + } + + // Once we're done, remove the state directory + if args.Stateful { + os.RemoveAll(sourceContainer.StatePath()) + } + + return c, nil +} + +func containerCreateInternal(d *Daemon, args containerArgs) (container, error) { + // Set default values + if args.Profiles == nil { + args.Profiles = []string{"default"} + } + + if args.Config == nil { + args.Config = map[string]string{} + } + + if args.BaseImage != "" { + args.Config["volatile.base_image"] = args.BaseImage + } + + if args.Devices == nil { + args.Devices = shared.Devices{} + } + + if args.Architecture == 0 { + args.Architecture = d.architectures[0] + } + + // Validate container name + if args.Ctype == cTypeRegular { + err := containerValidName(args.Name) + if err != nil { + return nil, err + } + } + + // Validate container config + err := containerValidConfig(args.Config, false, false) + if err != nil { + return nil, err + } + + // Validate container devices + err = containerValidDevices(args.Devices, false, false) + if err != nil { + return nil, err + } + + // Validate architecture + _, err = shared.ArchitectureName(args.Architecture) + if err != nil { + return nil, err + } + + // Validate profiles + profiles, err := dbProfiles(d.db) + if err != nil { + return nil, err + } + + for _, profile := range args.Profiles { + if !shared.StringInSlice(profile, profiles) { + return nil, fmt.Errorf("Requested profile '%s' doesn't exist", profile) + } + } + + path := containerPath(args.Name, args.Ctype == cTypeSnapshot) + if shared.PathExists(path) { + return nil, fmt.Errorf("The container already exists") + } + + // Wipe any existing log for this container name + os.RemoveAll(shared.LogPath(args.Name)) + + // Create the container entry + id, err := dbContainerCreate(d.db, args) + if err != nil { + return nil, err + } + args.Id = id + + // Read the timestamp from the database + dbArgs, err := dbContainerGet(d.db, args.Name) + if err != nil { + return nil, err + } + args.CreationDate = dbArgs.CreationDate + + return containerLXCCreate(d, args) +} + +func containerConfigureInternal(c container) error { + // Find the root device + for _, m := range c.ExpandedDevices() { + if m["type"] != "disk" || m["path"] != "/" || m["size"] == "" { + continue + } + + size, err := shared.ParseByteSizeString(m["size"]) + if err != nil { + return err + } + + err = c.Storage().ContainerSetQuota(c, size) + if err != nil { + return err + } + + break + } + + return nil +} + +func containerLoadById(d *Daemon, id int) (container, error) { + // Get the DB record + name, err := dbContainerName(d.db, id) + if err != nil { + return nil, err + } + + return containerLoadByName(d, name) +} + +func containerLoadByName(d *Daemon, name string) (container, error) { + // Get the DB record + args, err := dbContainerGet(d.db, name) + if err != nil { + return nil, err + } + + return containerLXCLoad(d, args) +} === added file 'src/github.com/lxc/lxd/lxd/container_delete.go' --- src/github.com/lxc/lxd/lxd/container_delete.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/container_delete.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,34 @@ +package main + +import ( + "fmt" + "net/http" + + "github.com/gorilla/mux" +) + +func containerDelete(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + c, err := containerLoadByName(d, name) + if err != nil { + return SmartError(err) + } + + if c.IsRunning() { + return BadRequest(fmt.Errorf("container is running")) + } + + rmct := func(op *operation) error { + return c.Delete() + } + + resources := map[string][]string{} + resources["containers"] = []string{name} + + op, err := operationCreate(operationClassTask, resources, nil, rmct, nil, nil) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) +} === added file 'src/github.com/lxc/lxd/lxd/container_exec.go' --- src/github.com/lxc/lxd/lxd/container_exec.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/container_exec.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,357 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "strconv" + "strings" + "sync" + + "github.com/gorilla/mux" + "github.com/gorilla/websocket" + "gopkg.in/lxc/go-lxc.v2" + + "github.com/lxc/lxd/shared" +) + +type commandPostContent struct { + Command []string `json:"command"` + WaitForWS bool `json:"wait-for-websocket"` + Interactive bool `json:"interactive"` + Environment map[string]string `json:"environment"` +} + +func runCommand(container *lxc.Container, command []string, options lxc.AttachOptions) (int, error) { + status, err := container.RunCommandStatus(command, options) + if err != nil { + shared.Debugf("Failed running command: %q", err.Error()) + return 0, err + } + + return status, nil +} + +type execWs struct { + command []string + container *lxc.Container + rootUid int + rootGid int + options lxc.AttachOptions + conns map[int]*websocket.Conn + connsLock sync.Mutex + allConnected chan bool + controlConnected chan bool + interactive bool + fds map[int]string +} + +func (s *execWs) Metadata() interface{} { + fds := shared.Jmap{} + for fd, secret := range s.fds { + if fd == -1 { + fds["control"] = secret + } else { + fds[strconv.Itoa(fd)] = secret + } + } + + return shared.Jmap{"fds": fds} +} + +func (s *execWs) Connect(op *operation, r *http.Request, w http.ResponseWriter) error { + secret := r.FormValue("secret") + if secret == "" { + return fmt.Errorf("missing secret") + } + + for fd, fdSecret := range s.fds { + if secret == fdSecret { + conn, err := shared.WebsocketUpgrader.Upgrade(w, r, nil) + if err != nil { + return err + } + + s.connsLock.Lock() + s.conns[fd] = conn + s.connsLock.Unlock() + + if fd == -1 { + s.controlConnected <- true + return nil + } + + for i, c := range s.conns { + if i != -1 && c == nil { + return nil + } + } + s.allConnected <- true + return nil + } + } + + /* If we didn't find the right secret, the user provided a bad one, + * which 403, not 404, since this operation actually exists */ + return os.ErrPermission +} + +func (s *execWs) Do(op *operation) error { + <-s.allConnected + + var err error + var ttys []*os.File + var ptys []*os.File + + if s.interactive { + ttys = make([]*os.File, 1) + ptys = make([]*os.File, 1) + ptys[0], ttys[0], err = shared.OpenPty(s.rootUid, s.rootGid) + s.options.StdinFd = ttys[0].Fd() + s.options.StdoutFd = ttys[0].Fd() + s.options.StderrFd = ttys[0].Fd() + } else { + ttys = make([]*os.File, 3) + ptys = make([]*os.File, 3) + for i := 0; i < len(ttys); i++ { + ptys[i], ttys[i], err = shared.Pipe() + if err != nil { + return err + } + } + s.options.StdinFd = ptys[0].Fd() + s.options.StdoutFd = ttys[1].Fd() + s.options.StderrFd = ttys[2].Fd() + } + + controlExit := make(chan bool) + var wgEOF sync.WaitGroup + + if s.interactive { + wgEOF.Add(1) + go func() { + select { + case <-s.controlConnected: + break + + case <-controlExit: + return + } + + for { + mt, r, err := s.conns[-1].NextReader() + if mt == websocket.CloseMessage { + break + } + + if err != nil { + shared.Debugf("Got error getting next reader %s", err) + break + } + + buf, err := ioutil.ReadAll(r) + if err != nil { + shared.Debugf("Failed to read message %s", err) + break + } + + command := shared.ContainerExecControl{} + + if err := json.Unmarshal(buf, &command); err != nil { + shared.Debugf("Failed to unmarshal control socket command: %s", err) + continue + } + + if command.Command == "window-resize" { + winchWidth, err := strconv.Atoi(command.Args["width"]) + if err != nil { + shared.Debugf("Unable to extract window width: %s", err) + continue + } + + winchHeight, err := strconv.Atoi(command.Args["height"]) + if err != nil { + shared.Debugf("Unable to extract window height: %s", err) + continue + } + + err = shared.SetSize(int(ptys[0].Fd()), winchWidth, winchHeight) + if err != nil { + shared.Debugf("Failed to set window size to: %dx%d", winchWidth, winchHeight) + continue + } + } + + if err != nil { + shared.Debugf("Got error writing to writer %s", err) + break + } + } + }() + go func() { + readDone, writeDone := shared.WebsocketMirror(s.conns[0], ptys[0], ptys[0]) + <-readDone + <-writeDone + s.conns[0].Close() + wgEOF.Done() + }() + } else { + wgEOF.Add(len(ttys) - 1) + for i := 0; i < len(ttys); i++ { + go func(i int) { + if i == 0 { + <-shared.WebsocketRecvStream(ttys[i], s.conns[i]) + ttys[i].Close() + } else { + <-shared.WebsocketSendStream(s.conns[i], ptys[i]) + ptys[i].Close() + wgEOF.Done() + } + }(i) + } + } + + cmdResult, cmdErr := runCommand( + s.container, + s.command, + s.options, + ) + + for _, tty := range ttys { + tty.Close() + } + + if s.conns[-1] == nil { + if s.interactive { + controlExit <- true + } + } else { + s.conns[-1].Close() + } + + wgEOF.Wait() + + for _, pty := range ptys { + pty.Close() + } + + metadata := shared.Jmap{"return": cmdResult} + err = op.UpdateMetadata(metadata) + if err != nil { + return err + } + + return cmdErr +} + +func containerExecPost(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + c, err := containerLoadByName(d, name) + if err != nil { + return SmartError(err) + } + + if !c.IsRunning() { + return BadRequest(fmt.Errorf("Container is not running.")) + } + + if c.IsFrozen() { + return BadRequest(fmt.Errorf("Container is frozen.")) + } + + post := commandPostContent{} + buf, err := ioutil.ReadAll(r.Body) + if err != nil { + return BadRequest(err) + } + + if err := json.Unmarshal(buf, &post); err != nil { + return BadRequest(err) + } + + opts := lxc.DefaultAttachOptions + opts.ClearEnv = true + opts.Env = []string{} + + for k, v := range c.ExpandedConfig() { + if strings.HasPrefix(k, "environment.") { + opts.Env = append(opts.Env, fmt.Sprintf("%s=%s", strings.TrimPrefix(k, "environment."), v)) + } + } + + if post.Environment != nil { + for k, v := range post.Environment { + if k == "HOME" { + opts.Cwd = v + } + opts.Env = append(opts.Env, fmt.Sprintf("%s=%s", k, v)) + } + } + + if post.WaitForWS { + ws := &execWs{} + ws.fds = map[int]string{} + idmapset := c.IdmapSet() + if idmapset != nil { + ws.rootUid, ws.rootGid = idmapset.ShiftIntoNs(0, 0) + } + ws.conns = map[int]*websocket.Conn{} + ws.conns[-1] = nil + ws.conns[0] = nil + if !post.Interactive { + ws.conns[1] = nil + ws.conns[2] = nil + } + ws.allConnected = make(chan bool, 1) + ws.controlConnected = make(chan bool, 1) + ws.interactive = post.Interactive + ws.options = opts + for i := -1; i < len(ws.conns)-1; i++ { + ws.fds[i], err = shared.RandomCryptoString() + if err != nil { + return InternalError(err) + } + } + + ws.command = post.Command + ws.container = c.LXContainerGet() + + resources := map[string][]string{} + resources["containers"] = []string{ws.container.Name()} + + op, err := operationCreate(operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) + } + + run := func(op *operation) error { + nullDev, err := os.OpenFile(os.DevNull, os.O_RDWR, 0666) + if err != nil { + return err + } + defer nullDev.Close() + nullfd := nullDev.Fd() + + opts.StdinFd = nullfd + opts.StdoutFd = nullfd + opts.StderrFd = nullfd + + _, cmdErr := runCommand(c.LXContainerGet(), post.Command, opts) + return cmdErr + } + + resources := map[string][]string{} + resources["containers"] = []string{name} + + op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) +} === added file 'src/github.com/lxc/lxd/lxd/container_file.go' --- src/github.com/lxc/lxd/lxd/container_file.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/container_file.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,113 @@ +package main + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strconv" + "syscall" + + "github.com/gorilla/mux" + + "github.com/lxc/lxd/shared" +) + +func containerFileHandler(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + c, err := containerLoadByName(d, name) + if err != nil { + return SmartError(err) + } + + path := r.FormValue("path") + if path == "" { + return BadRequest(fmt.Errorf("missing path argument")) + } + + switch r.Method { + case "GET": + return containerFileGet(c, path, r) + case "POST": + return containerFilePut(c, path, r) + default: + return NotFound + } +} + +func containerFileGet(c container, path string, r *http.Request) Response { + /* + * Copy out of the ns to a temporary file, and then use that to serve + * the request from. This prevents us from having to worry about stuff + * like people breaking out of the container root by symlinks or + * ../../../s etc. in the path, since we can just rely on the kernel + * for correctness. + */ + temp, err := ioutil.TempFile("", "lxd_forkgetfile_") + if err != nil { + return InternalError(err) + } + defer temp.Close() + + // Pul the file from the container + err = c.FilePull(path, temp.Name()) + if err != nil { + return InternalError(err) + } + + // Get file attributes + fi, err := temp.Stat() + if err != nil { + return SmartError(err) + } + + /* + * Unfortunately, there's no portable way to do this: + * https://groups.google.com/forum/#!topic/golang-nuts/tGYjYyrwsGM + * https://groups.google.com/forum/#!topic/golang-nuts/ywS7xQYJkHY + */ + sb := fi.Sys().(*syscall.Stat_t) + headers := map[string]string{ + "X-LXD-uid": strconv.FormatUint(uint64(sb.Uid), 10), + "X-LXD-gid": strconv.FormatUint(uint64(sb.Gid), 10), + "X-LXD-mode": fmt.Sprintf("%04o", fi.Mode()&os.ModePerm), + } + + // Make a file response struct + files := make([]fileResponseEntry, 1) + files[0].identifier = filepath.Base(path) + files[0].path = temp.Name() + files[0].filename = filepath.Base(path) + + return FileResponse(r, files, headers, true) +} + +func containerFilePut(c container, path string, r *http.Request) Response { + // Extract file ownership and mode from headers + uid, gid, mode := shared.ParseLXDFileHeaders(r.Header) + + // Write file content to a tempfile + temp, err := ioutil.TempFile("", "lxd_forkputfile_") + if err != nil { + return InternalError(err) + } + defer func() { + temp.Close() + os.Remove(temp.Name()) + }() + + _, err = io.Copy(temp, r.Body) + if err != nil { + return InternalError(err) + } + + // Transfer the file into the container + err = c.FilePush(temp.Name(), path, uid, gid, mode) + if err != nil { + return InternalError(err) + } + + return EmptySyncResponse +} === added file 'src/github.com/lxc/lxd/lxd/container_get.go' --- src/github.com/lxc/lxd/lxd/container_get.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/container_get.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +package main + +import ( + "net/http" + + "github.com/gorilla/mux" +) + +func containerGet(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + c, err := containerLoadByName(d, name) + if err != nil { + return SmartError(err) + } + + state, err := c.Render() + if err != nil { + return InternalError(err) + } + + return SyncResponse(true, state) +} === added file 'src/github.com/lxc/lxd/lxd/container_logs.go' --- src/github.com/lxc/lxd/lxd/container_logs.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/container_logs.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,97 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + + "github.com/gorilla/mux" + + "github.com/lxc/lxd/shared" +) + +func containerLogsGet(d *Daemon, r *http.Request) Response { + /* Let's explicitly *not* try to do a containerLoadByName here. In some + * cases (e.g. when container creation failed), the container won't + * exist in the DB but it does have some log files on disk. + * + * However, we should check this name and ensure it's a valid container + * name just so that people can't list arbitrary directories. + */ + name := mux.Vars(r)["name"] + + if err := containerValidName(name); err != nil { + return BadRequest(err) + } + + result := []string{} + + dents, err := ioutil.ReadDir(shared.LogPath(name)) + if err != nil { + return SmartError(err) + } + + for _, f := range dents { + result = append(result, fmt.Sprintf("/%s/containers/%s/logs/%s", shared.APIVersion, name, f.Name())) + } + + return SyncResponse(true, result) +} + +var containerLogsCmd = Command{ + name: "containers/{name}/logs", + get: containerLogsGet, +} + +func validLogFileName(fname string) bool { + /* Let's just require that the paths be relative, so that we don't have + * to deal with any escaping or whatever. + */ + return fname == "lxc.log" || + fname == "lxc.conf" || + strings.HasPrefix(fname, "migration_") || + strings.HasPrefix(fname, "snapshot_") +} + +func containerLogGet(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + file := mux.Vars(r)["file"] + + if err := containerValidName(name); err != nil { + return BadRequest(err) + } + + if !validLogFileName(file) { + return BadRequest(fmt.Errorf("log file name %s not valid", file)) + } + + ent := fileResponseEntry{ + path: shared.LogPath(name, file), + filename: file, + } + + return FileResponse(r, []fileResponseEntry{ent}, nil, false) +} + +func containerLogDelete(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + file := mux.Vars(r)["file"] + + if err := containerValidName(name); err != nil { + return BadRequest(err) + } + + if !validLogFileName(file) { + return BadRequest(fmt.Errorf("log file name %s not valid", file)) + } + + return SmartError(os.Remove(shared.LogPath(name, file))) +} + +var containerLogCmd = Command{ + name: "containers/{name}/logs/{file}", + get: containerLogGet, + delete: containerLogDelete, +} === added file 'src/github.com/lxc/lxd/lxd/container_lxc.go' --- src/github.com/lxc/lxd/lxd/container_lxc.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/container_lxc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,4237 @@ +package main + +import ( + "archive/tar" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "gopkg.in/flosch/pongo2.v3" + "gopkg.in/lxc/go-lxc.v2" + "gopkg.in/yaml.v2" + + "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" +) + +// Global variables +var lxcStoppingContainersLock sync.Mutex +var lxcStoppingContainers map[int]*sync.WaitGroup = make(map[int]*sync.WaitGroup) + +// Helper functions +func lxcSetConfigItem(c *lxc.Container, key string, value string) error { + if c == nil { + return fmt.Errorf("Uninitialized go-lxc struct") + } + + err := c.SetConfigItem(key, value) + if err != nil { + return fmt.Errorf("Failed to set LXC config: %s=%s", key, value) + } + + return nil +} + +func lxcValidConfig(rawLxc string) error { + for _, line := range strings.Split(rawLxc, "\n") { + // Ignore empty lines + if len(line) == 0 { + continue + } + + // Ignore comments + if strings.HasPrefix(line, "#") { + continue + } + + // Ensure the format is valid + membs := strings.SplitN(line, "=", 2) + if len(membs) != 2 { + return fmt.Errorf("Invalid raw.lxc line: %s", line) + } + + key := strings.ToLower(strings.Trim(membs[0], " \t")) + + // Blacklist some keys + if key == "lxc.logfile" { + return fmt.Errorf("Setting lxc.logfile is not allowed") + } + + if strings.HasPrefix(key, "lxc.network.") { + fields := strings.Split(key, ".") + if len(fields) == 4 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) { + continue + } + + if len(fields) == 5 && shared.StringInSlice(fields[3], []string{"ipv4", "ipv6"}) && fields[4] == "gateway" { + continue + } + + return fmt.Errorf("Only interface-specific ipv4/ipv6 lxc.network keys are allowed") + } + } + + return nil +} + +// Loader functions +func containerLXCCreate(d *Daemon, args containerArgs) (container, error) { + // Create the container struct + c := &containerLXC{ + daemon: d, + id: args.Id, + name: args.Name, + ephemeral: args.Ephemeral, + architecture: args.Architecture, + cType: args.Ctype, + stateful: args.Stateful, + creationDate: args.CreationDate, + profiles: args.Profiles, + localConfig: args.Config, + localDevices: args.Devices, + } + + // No need to detect storage here, its a new container. + c.storage = d.Storage + + // Load the config + err := c.init() + if err != nil { + c.Delete() + return nil, err + } + + // Look for a rootfs entry + rootfs := false + for _, m := range c.expandedDevices { + if m["type"] == "disk" && m["path"] == "/" { + rootfs = true + break + } + } + + if !rootfs { + deviceName := "root" + for { + if c.expandedDevices[deviceName] == nil { + break + } + + deviceName += "_" + } + + c.localDevices[deviceName] = shared.Device{"type": "disk", "path": "/"} + + updateArgs := containerArgs{ + Architecture: c.architecture, + Config: c.localConfig, + Devices: c.localDevices, + Ephemeral: c.ephemeral, + Profiles: c.profiles, + } + + err = c.Update(updateArgs, false) + if err != nil { + c.Delete() + return nil, err + } + } + + // Validate expanded config + err = containerValidConfig(c.expandedConfig, false, true) + if err != nil { + c.Delete() + return nil, err + } + + err = containerValidDevices(c.expandedDevices, false, true) + if err != nil { + c.Delete() + return nil, err + } + + // Setup initial idmap config + idmap := c.IdmapSet() + var jsonIdmap string + if idmap != nil { + idmapBytes, err := json.Marshal(idmap.Idmap) + if err != nil { + c.Delete() + return nil, err + } + jsonIdmap = string(idmapBytes) + } else { + jsonIdmap = "[]" + } + + err = c.ConfigKeySet("volatile.last_state.idmap", jsonIdmap) + if err != nil { + c.Delete() + return nil, err + } + + return c, nil +} + +func containerLXCLoad(d *Daemon, args containerArgs) (container, error) { + // Create the container struct + c := &containerLXC{ + daemon: d, + id: args.Id, + name: args.Name, + ephemeral: args.Ephemeral, + architecture: args.Architecture, + cType: args.Ctype, + creationDate: args.CreationDate, + profiles: args.Profiles, + localConfig: args.Config, + localDevices: args.Devices, + stateful: args.Stateful} + + // Detect the storage backend + s, err := storageForFilename(d, shared.VarPath("containers", strings.Split(c.name, "/")[0])) + if err != nil { + return nil, err + } + c.storage = s + + // Load the config + err = c.init() + if err != nil { + return nil, err + } + + return c, nil +} + +// The LXC container driver +type containerLXC struct { + // Properties + architecture int + cType containerType + creationDate time.Time + ephemeral bool + id int + name string + stateful bool + + // Config + expandedConfig map[string]string + expandedDevices shared.Devices + fromHook bool + localConfig map[string]string + localDevices shared.Devices + profiles []string + + // Cache + c *lxc.Container + daemon *Daemon + idmapset *shared.IdmapSet + storage storage +} + +func (c *containerLXC) init() error { + // Compute the expanded config and device list + err := c.expandConfig() + if err != nil { + return err + } + + err = c.expandDevices() + if err != nil { + return err + } + + // Setup the Idmap + if !c.IsPrivileged() { + if c.daemon.IdmapSet == nil { + return fmt.Errorf("LXD doesn't have a uid/gid allocation. In this mode, only privileged containers are supported.") + } + c.idmapset = c.daemon.IdmapSet + } + + return nil +} + +func (c *containerLXC) initLXC() error { + // Check if being called from a hook + if c.fromHook { + return fmt.Errorf("You can't use go-lxc from inside a LXC hook.") + } + + // Check if already initialized + if c.c != nil { + return nil + } + + // Load the go-lxc struct + cc, err := lxc.NewContainer(c.Name(), c.daemon.lxcpath) + if err != nil { + return err + } + + // Base config + err = lxcSetConfigItem(cc, "lxc.cap.drop", "mac_admin mac_override sys_time sys_module") + if err != nil { + return err + } + + // Set an appropriate /proc, /sys/ and /sys/fs/cgroup + mounts := []string{} + if c.IsPrivileged() && !runningInUserns { + mounts = append(mounts, "proc:mixed") + mounts = append(mounts, "sys:mixed") + } else { + mounts = append(mounts, "proc:rw") + mounts = append(mounts, "sys:rw") + } + + if !shared.PathExists("/proc/self/ns/cgroup") { + mounts = append(mounts, "cgroup:mixed") + } + + err = lxcSetConfigItem(cc, "lxc.mount.auto", strings.Join(mounts, " ")) + if err != nil { + return err + } + + err = lxcSetConfigItem(cc, "lxc.autodev", "1") + if err != nil { + return err + } + + err = lxcSetConfigItem(cc, "lxc.pts", "1024") + if err != nil { + return err + } + + err = lxcSetConfigItem(cc, "lxc.mount.entry", "mqueue dev/mqueue mqueue rw,relatime,create=dir,optional") + if err != nil { + return err + } + + for _, mnt := range []string{"/proc/sys/fs/binfmt_misc", "/sys/firmware/efi/efivars", "/sys/fs/fuse/connections", "/sys/fs/pstore", "/sys/kernel/debug", "/sys/kernel/security"} { + err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s %s none bind,optional", mnt, strings.TrimPrefix(mnt, "/"))) + if err != nil { + return err + } + } + + // For lxcfs + templateConfDir := os.Getenv("LXD_LXC_TEMPLATE_CONFIG") + if templateConfDir == "" { + templateConfDir = "/usr/share/lxc/config" + } + + if shared.PathExists(fmt.Sprintf("%s/common.conf.d/", templateConfDir)) { + err = lxcSetConfigItem(cc, "lxc.include", fmt.Sprintf("%s/common.conf.d/", templateConfDir)) + if err != nil { + return err + } + } + + // Configure devices cgroup + if c.IsPrivileged() && !runningInUserns && cgDevicesController { + err = lxcSetConfigItem(cc, "lxc.cgroup.devices.deny", "a") + if err != nil { + return err + } + + for _, dev := range []string{"c *:* m", "b *:* m", "c 5:0 rwm", "c 5:1 rwm", "c 1:5 rwm", "c 1:7 rwm", "c 1:3 rwm", "c 1:8 rwm", "c 1:9 rwm", "c 5:2 rwm", "c 136:* rwm"} { + err = lxcSetConfigItem(cc, "lxc.cgroup.devices.allow", dev) + if err != nil { + return err + } + } + } + + if c.IsNesting() { + /* + * mount extra /proc and /sys to work around kernel + * restrictions on remounting them when covered + */ + err = lxcSetConfigItem(cc, "lxc.mount.entry", "proc dev/.lxc/proc proc create=dir,optional") + if err != nil { + return err + } + + err = lxcSetConfigItem(cc, "lxc.mount.entry", "sys dev/.lxc/sys sysfs create=dir,optional") + if err != nil { + return err + } + } + + // Setup logging + logfile := c.LogFilePath() + + err = cc.SetLogFile(logfile) + if err != nil { + return err + } + + err = lxcSetConfigItem(cc, "lxc.loglevel", "0") + if err != nil { + return err + } + + // Setup architecture + personality, err := shared.ArchitecturePersonality(c.architecture) + if err != nil { + personality, err = shared.ArchitecturePersonality(c.daemon.architectures[0]) + if err != nil { + return err + } + } + + err = lxcSetConfigItem(cc, "lxc.arch", personality) + if err != nil { + return err + } + + // Setup the hooks + err = lxcSetConfigItem(cc, "lxc.hook.pre-start", fmt.Sprintf("%s callhook %s %d start", c.daemon.execPath, shared.VarPath(""), c.id)) + if err != nil { + return err + } + + err = lxcSetConfigItem(cc, "lxc.hook.post-stop", fmt.Sprintf("%s callhook %s %d stop", c.daemon.execPath, shared.VarPath(""), c.id)) + if err != nil { + return err + } + + // Setup the console + err = lxcSetConfigItem(cc, "lxc.tty", "0") + if err != nil { + return err + } + + // Setup the hostname + err = lxcSetConfigItem(cc, "lxc.utsname", c.Name()) + if err != nil { + return err + } + + // Setup devlxd + err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s dev/lxd none bind,create=dir 0 0", shared.VarPath("devlxd"))) + if err != nil { + return err + } + + // Setup AppArmor + if aaAvailable { + if aaConfined || !aaAdmin { + // If confined but otherwise able to use AppArmor, use our own profile + curProfile := aaProfile() + curProfile = strings.TrimSuffix(curProfile, " (enforce)") + err = lxcSetConfigItem(cc, "lxc.aa_profile", curProfile) + if err != nil { + return err + } + } else { + // If not currently confined, use the container's profile + err := lxcSetConfigItem(cc, "lxc.aa_profile", AAProfileFull(c)) + if err != nil { + return err + } + } + } + + // Setup Seccomp + err = lxcSetConfigItem(cc, "lxc.seccomp", SeccompProfilePath(c)) + if err != nil { + return err + } + + // Setup idmap + if c.idmapset != nil { + lines := c.idmapset.ToLxcString() + for _, line := range lines { + err := lxcSetConfigItem(cc, "lxc.id_map", strings.TrimSuffix(line, "\n")) + if err != nil { + return err + } + } + } + + // Setup environment + for k, v := range c.expandedConfig { + if strings.HasPrefix(k, "environment.") { + err = lxcSetConfigItem(cc, "lxc.environment", fmt.Sprintf("%s=%s", strings.TrimPrefix(k, "environment."), v)) + if err != nil { + return err + } + } + } + + // Memory limits + if cgMemoryController { + memory := c.expandedConfig["limits.memory"] + memoryEnforce := c.expandedConfig["limits.memory.enforce"] + memorySwap := c.expandedConfig["limits.memory.swap"] + memorySwapPriority := c.expandedConfig["limits.memory.swap.priority"] + + // Configure the memory limits + if memory != "" { + var valueInt int64 + if strings.HasSuffix(memory, "%") { + percent, err := strconv.ParseInt(strings.TrimSuffix(memory, "%"), 10, 64) + if err != nil { + return err + } + + memoryTotal, err := deviceTotalMemory() + if err != nil { + return err + } + + valueInt = int64((memoryTotal / 100) * percent) + } else { + valueInt, err = shared.ParseByteSizeString(memory) + if err != nil { + return err + } + } + + if memoryEnforce == "soft" { + err = lxcSetConfigItem(cc, "lxc.cgroup.memory.soft_limit_in_bytes", fmt.Sprintf("%d", valueInt)) + if err != nil { + return err + } + } else { + if memorySwap != "false" && cgSwapAccounting { + err = lxcSetConfigItem(cc, "lxc.cgroup.memory.limit_in_bytes", fmt.Sprintf("%d", valueInt)) + if err != nil { + return err + } + err = lxcSetConfigItem(cc, "lxc.cgroup.memory.memsw.limit_in_bytes", fmt.Sprintf("%d", valueInt)) + if err != nil { + return err + } + } else { + err = lxcSetConfigItem(cc, "lxc.cgroup.memory.limit_in_bytes", fmt.Sprintf("%d", valueInt)) + if err != nil { + return err + } + } + } + } + + // Configure the swappiness + if memorySwap == "false" { + err = lxcSetConfigItem(cc, "lxc.cgroup.memory.swappiness", "0") + if err != nil { + return err + } + } else if memorySwapPriority != "" { + priority, err := strconv.Atoi(memorySwapPriority) + if err != nil { + return err + } + + err = lxcSetConfigItem(cc, "lxc.cgroup.memory.swappiness", fmt.Sprintf("%d", 60-10+priority)) + if err != nil { + return err + } + } + } + + // CPU limits + cpuPriority := c.expandedConfig["limits.cpu.priority"] + cpuAllowance := c.expandedConfig["limits.cpu.allowance"] + + if (cpuPriority != "" || cpuAllowance != "") && cgCpuController { + cpuShares, cpuCfsQuota, cpuCfsPeriod, err := deviceParseCPU(cpuAllowance, cpuPriority) + if err != nil { + return err + } + + if cpuShares != "1024" { + err = lxcSetConfigItem(cc, "lxc.cgroup.cpu.shares", cpuShares) + if err != nil { + return err + } + } + + if cpuCfsPeriod != "-1" { + err = lxcSetConfigItem(cc, "lxc.cgroup.cpu.cfs_period_us", cpuCfsPeriod) + if err != nil { + return err + } + } + + if cpuCfsQuota != "-1" { + err = lxcSetConfigItem(cc, "lxc.cgroup.cpu.cfs_quota_us", cpuCfsQuota) + if err != nil { + return err + } + } + } + + // Disk limits + if cgBlkioController { + diskPriority := c.expandedConfig["limits.disk.priority"] + if diskPriority != "" { + priorityInt, err := strconv.Atoi(diskPriority) + if err != nil { + return err + } + + err = lxcSetConfigItem(cc, "lxc.cgroup.blkio.weight", fmt.Sprintf("%d", priorityInt*100)) + if err != nil { + return err + } + } + + hasDiskLimits := false + for _, m := range c.expandedDevices { + if m["type"] != "disk" { + continue + } + + if m["limits.read"] != "" || m["limits.write"] != "" || m["limits.max"] != "" { + hasDiskLimits = true + break + } + } + + if hasDiskLimits { + diskLimits, err := c.getDiskLimits() + if err != nil { + return err + } + + for block, limit := range diskLimits { + if limit.readBps > 0 { + err = lxcSetConfigItem(cc, "lxc.cgroup.blkio.throttle.read_bps_device", fmt.Sprintf("%s %d", block, limit.readBps)) + if err != nil { + return err + } + } + + if limit.readIops > 0 { + err = lxcSetConfigItem(cc, "lxc.cgroup.blkio.throttle.read_iops_device", fmt.Sprintf("%s %d", block, limit.readIops)) + if err != nil { + return err + } + } + + if limit.writeBps > 0 { + err = lxcSetConfigItem(cc, "lxc.cgroup.blkio.throttle.write_bps_device", fmt.Sprintf("%s %d", block, limit.writeBps)) + if err != nil { + return err + } + } + + if limit.writeIops > 0 { + err = lxcSetConfigItem(cc, "lxc.cgroup.blkio.throttle.write_iops_device", fmt.Sprintf("%s %d", block, limit.writeIops)) + if err != nil { + return err + } + } + } + } + } + + // Processes + if cgPidsController { + processes := c.expandedConfig["limits.processes"] + if processes != "" { + valueInt, err := strconv.ParseInt(processes, 10, 64) + if err != nil { + return err + } + + err = lxcSetConfigItem(cc, "lxc.cgroup.pids.max", fmt.Sprintf("%d", valueInt)) + if err != nil { + return err + } + } + } + + // Setup devices + for k, m := range c.expandedDevices { + if shared.StringInSlice(m["type"], []string{"unix-char", "unix-block"}) { + // Prepare all the paths + srcPath := m["path"] + tgtPath := strings.TrimPrefix(srcPath, "/") + devName := fmt.Sprintf("unix.%s", strings.Replace(tgtPath, "/", "-", -1)) + devPath := filepath.Join(c.DevicesPath(), devName) + + // Set the bind-mount entry + err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s %s none bind,create=file", devPath, tgtPath)) + if err != nil { + return err + } + } else if m["type"] == "nic" { + // Fill in some fields from volatile + m, err = c.fillNetworkDevice(k, m) + if err != nil { + return err + } + + // Interface type specific configuration + if shared.StringInSlice(m["nictype"], []string{"bridged", "p2p"}) { + err = lxcSetConfigItem(cc, "lxc.network.type", "veth") + if err != nil { + return err + } + } else if m["nictype"] == "physical" { + err = lxcSetConfigItem(cc, "lxc.network.type", "phys") + if err != nil { + return err + } + } else if m["nictype"] == "macvlan" { + err = lxcSetConfigItem(cc, "lxc.network.type", "macvlan") + if err != nil { + return err + } + + err = lxcSetConfigItem(cc, "lxc.network.macvlan.mode", "bridge") + if err != nil { + return err + } + } + + err = lxcSetConfigItem(cc, "lxc.network.flags", "up") + if err != nil { + return err + } + + if shared.StringInSlice(m["nictype"], []string{"bridged", "physical", "macvlan"}) { + err = lxcSetConfigItem(cc, "lxc.network.link", m["parent"]) + if err != nil { + return err + } + } + + // Host Virtual NIC name + if m["host_name"] != "" { + err = lxcSetConfigItem(cc, "lxc.network.veth.pair", m["host_name"]) + if err != nil { + return err + } + } + + // MAC address + if m["hwaddr"] != "" { + err = lxcSetConfigItem(cc, "lxc.network.hwaddr", m["hwaddr"]) + if err != nil { + return err + } + } + + // MTU + if m["mtu"] != "" { + err = lxcSetConfigItem(cc, "lxc.network.mtu", m["mtu"]) + if err != nil { + return err + } + } + + // Name + if m["name"] != "" { + err = lxcSetConfigItem(cc, "lxc.network.name", m["name"]) + if err != nil { + return err + } + } + } else if m["type"] == "disk" { + // Prepare all the paths + srcPath := m["source"] + tgtPath := strings.TrimPrefix(m["path"], "/") + devName := fmt.Sprintf("disk.%s", strings.Replace(tgtPath, "/", "-", -1)) + devPath := filepath.Join(c.DevicesPath(), devName) + + // Various option checks + isOptional := m["optional"] == "1" || m["optional"] == "true" + isReadOnly := m["readonly"] == "1" || m["readonly"] == "true" + isFile := !shared.IsDir(srcPath) && !deviceIsDevice(srcPath) + + // Deal with a rootfs + if tgtPath == "" { + // Set the rootfs path + err = lxcSetConfigItem(cc, "lxc.rootfs", c.RootfsPath()) + if err != nil { + return err + } + + // Read-only rootfs (unlikely to work very well) + if isReadOnly { + err = lxcSetConfigItem(cc, "lxc.rootfs.options", "ro") + if err != nil { + return err + } + } + } else { + options := []string{} + if isReadOnly { + options = append(options, "ro") + } + + if isOptional { + options = append(options, "optional") + } + + if isFile { + options = append(options, "create=file") + } else { + options = append(options, "create=dir") + } + + err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s %s none bind,%s", devPath, tgtPath, strings.Join(options, ","))) + if err != nil { + return err + } + } + } + } + + // Setup shmounts + err = lxcSetConfigItem(cc, "lxc.mount.entry", fmt.Sprintf("%s dev/.lxd-mounts none bind,create=dir 0 0", shared.VarPath("shmounts", c.Name()))) + if err != nil { + return err + } + + // Apply raw.lxc + if lxcConfig, ok := c.expandedConfig["raw.lxc"]; ok { + f, err := ioutil.TempFile("", "lxd_config_") + if err != nil { + return err + } + + err = shared.WriteAll(f, []byte(lxcConfig)) + f.Close() + defer os.Remove(f.Name()) + if err != nil { + return err + } + + if err := cc.LoadConfigFile(f.Name()); err != nil { + return fmt.Errorf("Failed to load raw.lxc") + } + } + + c.c = cc + + return nil +} + +// Config handling +func (c *containerLXC) expandConfig() error { + config := map[string]string{} + + // Apply all the profiles + for _, name := range c.profiles { + profileConfig, err := dbProfileConfig(c.daemon.db, name) + if err != nil { + return err + } + + for k, v := range profileConfig { + config[k] = v + } + } + + // Stick the local config on top + for k, v := range c.localConfig { + config[k] = v + } + + c.expandedConfig = config + return nil +} + +func (c *containerLXC) expandDevices() error { + devices := shared.Devices{} + + // Apply all the profiles + for _, p := range c.profiles { + profileDevices, err := dbDevices(c.daemon.db, p, true) + if err != nil { + return err + } + + for k, v := range profileDevices { + devices[k] = v + } + } + + // Stick local devices on top + for k, v := range c.localDevices { + devices[k] = v + } + + c.expandedDevices = devices + return nil +} + +// Start functions +func (c *containerLXC) startCommon() (string, error) { + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return "", err + } + + // Check that we're not already running + if c.IsRunning() { + return "", fmt.Errorf("The container is already running") + } + + // Load any required kernel modules + kernelModules := c.expandedConfig["linux.kernel_modules"] + if kernelModules != "" { + for _, module := range strings.Split(kernelModules, ",") { + module = strings.TrimPrefix(module, " ") + out, err := exec.Command("modprobe", module).CombinedOutput() + if err != nil { + return "", fmt.Errorf("Failed to load kernel module '%s': %s", module, out) + } + } + } + + /* Deal with idmap changes */ + idmap := c.IdmapSet() + + lastIdmap, err := c.LastIdmapSet() + if err != nil { + return "", err + } + + var jsonIdmap string + if idmap != nil { + idmapBytes, err := json.Marshal(idmap.Idmap) + if err != nil { + return "", err + } + jsonIdmap = string(idmapBytes) + } else { + jsonIdmap = "[]" + } + + if !reflect.DeepEqual(idmap, lastIdmap) { + shared.Debugf("Container idmap changed, remapping") + + err := c.StorageStart() + if err != nil { + return "", err + } + + if lastIdmap != nil { + err = lastIdmap.UnshiftRootfs(c.RootfsPath()) + if err != nil { + c.StorageStop() + return "", err + } + } + + if idmap != nil { + err = idmap.ShiftRootfs(c.RootfsPath()) + if err != nil { + c.StorageStop() + return "", err + } + } + } + + err = c.ConfigKeySet("volatile.last_state.idmap", jsonIdmap) + if err != nil { + return "", err + } + + // Generate the Seccomp profile + if err := SeccompCreateProfile(c); err != nil { + return "", err + } + + // Cleanup any existing leftover devices + c.removeUnixDevices() + c.removeDiskDevices() + + // Create the devices + for k, m := range c.expandedDevices { + if shared.StringInSlice(m["type"], []string{"unix-char", "unix-block"}) { + // Unix device + devPath, err := c.createUnixDevice(k, m) + if err != nil { + return "", err + } + + // Add the new device cgroup rule + dType, dMajor, dMinor, err := deviceGetAttributes(devPath) + if err != nil { + return "", err + } + + err = lxcSetConfigItem(c.c, "lxc.cgroup.devices.allow", fmt.Sprintf("%s %d:%d rwm", dType, dMajor, dMinor)) + if err != nil { + return "", fmt.Errorf("Failed to add cgroup rule for device") + } + } else if m["type"] == "disk" { + // Disk device + if m["path"] != "/" { + _, err := c.createDiskDevice(k, m) + if err != nil { + return "", err + } + } + } + } + + // Create any missing directory + err = os.MkdirAll(c.LogPath(), 0700) + if err != nil { + return "", err + } + + err = os.MkdirAll(shared.VarPath("devices", c.Name()), 0711) + if err != nil { + return "", err + } + + err = os.MkdirAll(shared.VarPath("shmounts", c.Name()), 0711) + if err != nil { + return "", err + } + + // Cleanup any leftover volatile entries + netNames := []string{} + for k, v := range c.expandedDevices { + if v["type"] == "nic" { + netNames = append(netNames, k) + } + } + + for k, _ := range c.localConfig { + // We only care about volatile + if !strings.HasPrefix(k, "volatile.") { + continue + } + + // Confirm it's a key of format volatile.. + fields := strings.SplitN(k, ".", 3) + if len(fields) != 3 { + continue + } + + // The only device keys we care about are name and hwaddr + if !shared.StringInSlice(fields[2], []string{"name", "hwaddr"}) { + continue + } + + // Check if the device still exists + if shared.StringInSlice(fields[1], netNames) { + // Don't remove the volatile entry if the device doesn't have the matching field set + if c.expandedDevices[fields[1]][fields[2]] == "" { + continue + } + } + + // Remove the volatile key from the DB + err := dbContainerConfigRemove(c.daemon.db, c.id, k) + if err != nil { + return "", err + } + + // Remove the volatile key from the in-memory configs + delete(c.localConfig, k) + delete(c.expandedConfig, k) + } + + // Generate the LXC config + f, err := ioutil.TempFile("", "lxd_lxc_startconfig_") + if err != nil { + return "", err + } + + configPath := f.Name() + if err = f.Chmod(0600); err != nil { + f.Close() + os.Remove(configPath) + return "", err + } + f.Close() + + err = c.c.SaveConfigFile(configPath) + if err != nil { + os.Remove(configPath) + return "", err + } + + return configPath, nil +} + +func (c *containerLXC) Start(stateful bool) error { + // Wait for container tear down to finish + wgStopping, stopping := lxcStoppingContainers[c.id] + if stopping { + wgStopping.Wait() + } + + // Run the shared start code + configPath, err := c.startCommon() + if err != nil { + return err + } + + // If stateful, restore now + if stateful { + if !c.stateful { + return fmt.Errorf("Container has no existing state to restore.") + } + + err := c.c.Restore(lxc.RestoreOptions{ + Directory: c.StatePath(), + Verbose: true, + }) + + err2 := os.RemoveAll(c.StatePath()) + if err2 != nil { + return err2 + } + + if err != nil { + return err + } + + c.stateful = false + err = dbContainerSetStateful(c.daemon.db, c.id, false) + if err != nil { + return err + } + + return nil + } + + // Start the LXC container + out, err := exec.Command( + c.daemon.execPath, + "forkstart", + c.name, + c.daemon.lxcpath, + configPath).CombinedOutput() + + if string(out) != "" { + for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") { + shared.Debugf("forkstart: %s", line) + } + } + + if err != nil { + return fmt.Errorf( + "Error calling 'lxd forkstart %s %s %s': err='%v'", + c.name, + c.daemon.lxcpath, + filepath.Join(c.LogPath(), "lxc.conf"), + err) + } + + return nil +} + +func (c *containerLXC) StartFromMigration(imagesDir string) error { + // Run the shared start code + configPath, err := c.startCommon() + if err != nil { + return err + } + + // Start the LXC container + out, err := exec.Command( + c.daemon.execPath, + "forkmigrate", + c.name, + c.daemon.lxcpath, + configPath, + imagesDir).CombinedOutput() + + if string(out) != "" { + for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") { + shared.Debugf("forkmigrate: %s", line) + } + } + + if err != nil { + return fmt.Errorf( + "Error calling 'lxd forkmigrate %s %s %s %s': err='%v'", + c.name, + c.daemon.lxcpath, + filepath.Join(c.LogPath(), "lxc.conf"), + imagesDir, + err) + } + + return nil +} + +func (c *containerLXC) OnStart() error { + // Make sure we can't call go-lxc functions by mistake + c.fromHook = true + + // Start the storage for this container + err := c.StorageStart() + if err != nil { + return err + } + + // Load the container AppArmor profile + err = AALoadProfile(c) + if err != nil { + c.StorageStop() + return err + } + + // Template anything that needs templating + err = c.TemplateApply("start") + if err != nil { + c.StorageStop() + return err + } + + // Trigger a rebalance + deviceTaskSchedulerTrigger("container", c.name, "started") + + // Apply network priority + if c.expandedConfig["limits.network.priority"] != "" { + go func(c *containerLXC) { + c.fromHook = false + err := c.setNetworkPriority() + if err != nil { + shared.Log.Error("Failed to apply network priority", log.Ctx{"container": c.name, "err": err}) + } + }(c) + } + + // Apply network limits + for name, m := range c.expandedDevices { + if m["type"] != "nic" { + continue + } + + if m["limits.max"] == "" && m["limits.ingress"] == "" && m["limits.egress"] == "" { + continue + } + + go func(c *containerLXC, name string, m shared.Device) { + c.fromHook = false + err = c.setNetworkLimits(name, m) + if err != nil { + shared.Log.Error("Failed to apply network limits", log.Ctx{"container": c.name, "err": err}) + } + }(c, name, m) + } + + return nil +} + +// Container shutdown locking +func (c *containerLXC) setupStopping() *sync.WaitGroup { + // Handle locking + lxcStoppingContainersLock.Lock() + defer lxcStoppingContainersLock.Unlock() + + // Existing entry + wg, stopping := lxcStoppingContainers[c.id] + if stopping { + return wg + } + + // Setup new entry + lxcStoppingContainers[c.id] = &sync.WaitGroup{} + + go func(wg *sync.WaitGroup, id int) { + wg.Wait() + + lxcStoppingContainersLock.Lock() + defer lxcStoppingContainersLock.Unlock() + + delete(lxcStoppingContainers, id) + }(lxcStoppingContainers[c.id], c.id) + + return lxcStoppingContainers[c.id] +} + +// Stop functions +func (c *containerLXC) Stop(stateful bool) error { + // Handle stateful stop + if stateful { + // Cleanup any existing state + stateDir := c.StatePath() + os.RemoveAll(stateDir) + + err := os.MkdirAll(stateDir, 0700) + if err != nil { + return err + } + + // Checkpoint + opts := lxc.CheckpointOptions{Directory: stateDir, Stop: true, Verbose: true} + err = c.Checkpoint(opts) + err2 := CollectCRIULogFile(c, stateDir, "snapshot", "dump") + if err2 != nil { + shared.Log.Warn("failed to collect criu log file", log.Ctx{"error": err2}) + } + + if err != nil { + return err + } + + c.stateful = true + err = dbContainerSetStateful(c.daemon.db, c.id, true) + if err != nil { + return err + } + + return nil + } + + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return err + } + + // Attempt to freeze the container first, helps massively with fork bombs + c.Freeze() + + // Handle locking + wg := c.setupStopping() + + // Stop the container + wg.Add(1) + if err := c.c.Stop(); err != nil { + wg.Done() + return err + } + + // Mark ourselves as done + wg.Done() + + // Wait for any other teardown routines to finish + wg.Wait() + + return nil +} + +func (c *containerLXC) Shutdown(timeout time.Duration) error { + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return err + } + + // Handle locking + wg := c.setupStopping() + + // Shutdown the container + wg.Add(1) + if err := c.c.Shutdown(timeout); err != nil { + wg.Done() + return err + } + + // Mark ourselves as done + wg.Done() + + // Wait for any other teardown routines to finish + wg.Wait() + + return nil +} + +func (c *containerLXC) OnStop(target string) error { + // Get locking + wg, stopping := lxcStoppingContainers[c.id] + if wg != nil { + wg.Add(1) + } + + // Make sure we can't call go-lxc functions by mistake + c.fromHook = true + + // Stop the storage for this container + err := c.StorageStop() + if err != nil { + return err + } + + // Unlock the apparmor profile + err = AAUnloadProfile(c) + if err != nil { + return err + } + + // FIXME: The go routine can go away once we can rely on LXC_TARGET + go func(c *containerLXC, target string, wg *sync.WaitGroup) { + c.fromHook = false + + // Unlock on return + if wg != nil { + defer wg.Done() + } + + if target == "unknown" && stopping { + target = "stop" + } + + if target == "unknown" { + time.Sleep(5 * time.Second) + + newContainer, err := containerLoadByName(c.daemon, c.Name()) + if err != nil { + return + } + + if newContainer.Id() != c.id { + return + } + + if newContainer.IsRunning() { + return + } + } + + // Clean all the unix devices + err = c.removeUnixDevices() + if err != nil { + shared.Log.Error("Unable to remove unix devices") + } + + // Clean all the disk devices + err = c.removeDiskDevices() + if err != nil { + shared.Log.Error("Unable to remove disk devices") + } + + // Reboot the container + if target == "reboot" { + + /* This part is a hack to workaround a LXC bug where a + failure from a post-stop script doesn't prevent the container to restart. */ + ephemeral := c.ephemeral + args := containerArgs{ + Architecture: c.Architecture(), + Config: c.LocalConfig(), + Devices: c.LocalDevices(), + Ephemeral: false, + Profiles: c.Profiles(), + } + c.Update(args, false) + c.Stop(false) + args.Ephemeral = ephemeral + c.Update(args, true) + + // Start the container again + c.Start(false) + return + } + + // Trigger a rebalance + deviceTaskSchedulerTrigger("container", c.name, "stopped") + + // Destroy ephemeral containers + if c.ephemeral { + c.Delete() + } + }(c, target, wg) + + return nil +} + +// Freezer functions +func (c *containerLXC) Freeze() error { + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return err + } + + return c.c.Freeze() +} + +func (c *containerLXC) Unfreeze() error { + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return err + } + + return c.c.Unfreeze() +} + +func (c *containerLXC) Render() (*shared.ContainerInfo, error) { + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return nil, err + } + + // FIXME: Render shouldn't directly access the go-lxc struct + statusCode := shared.FromLXCState(int(c.c.State())) + + // Ignore err as the arch string on error is correct (unknown) + architectureName, _ := shared.ArchitectureName(c.architecture) + + return &shared.ContainerInfo{ + Architecture: architectureName, + Config: c.localConfig, + CreationDate: c.creationDate, + Devices: c.localDevices, + Ephemeral: c.ephemeral, + ExpandedConfig: c.expandedConfig, + ExpandedDevices: c.expandedDevices, + Name: c.name, + Profiles: c.profiles, + Status: statusCode.String(), + StatusCode: statusCode, + Stateful: c.stateful, + }, nil +} + +func (c *containerLXC) RenderState() (*shared.ContainerState, error) { + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return nil, err + } + + // FIXME: RenderState shouldn't directly access the go-lxc struct + statusCode := shared.FromLXCState(int(c.c.State())) + status := shared.ContainerState{ + Status: statusCode.String(), + StatusCode: statusCode, + } + + if c.IsRunning() { + pid := c.InitPID() + status.Disk = c.diskState() + status.Memory = c.memoryState() + status.Network = c.networkState() + status.Pid = int64(pid) + status.Processes = c.processesState() + } + + return &status, nil +} + +func (c *containerLXC) Snapshots() ([]container, error) { + // Get all the snapshots + snaps, err := dbContainerGetSnapshots(c.daemon.db, c.name) + if err != nil { + return nil, err + } + + // Build the snapshot list + containers := []container{} + for _, snapName := range snaps { + snap, err := containerLoadByName(c.daemon, snapName) + if err != nil { + return nil, err + } + + containers = append(containers, snap) + } + + return containers, nil +} + +func (c *containerLXC) Restore(sourceContainer container) error { + // Check if we can restore the container + err := c.storage.ContainerCanRestore(c, sourceContainer) + if err != nil { + return err + } + + // Stop the container + wasRunning := false + if c.IsRunning() { + wasRunning = true + if err := c.Stop(false); err != nil { + shared.Log.Error( + "Could not stop container", + log.Ctx{ + "container": c.Name(), + "err": err}) + return err + } + } + + // Restore the rootfs + err = c.storage.ContainerRestore(c, sourceContainer) + if err != nil { + shared.Log.Error("Restoring the filesystem failed", + log.Ctx{ + "source": sourceContainer.Name(), + "destination": c.Name()}) + return err + } + + // Restore the configuration + args := containerArgs{ + Architecture: sourceContainer.Architecture(), + Config: sourceContainer.LocalConfig(), + Devices: sourceContainer.LocalDevices(), + Ephemeral: sourceContainer.IsEphemeral(), + Profiles: sourceContainer.Profiles(), + } + + err = c.Update(args, false) + if err != nil { + shared.Log.Error("Restoring the configuration failed", + log.Ctx{ + "source": sourceContainer.Name(), + "destination": c.Name()}) + + return err + } + + // If the container wasn't running but was stateful, should we restore + // it as running? + if shared.PathExists(c.StatePath()) { + err := c.c.Restore(lxc.RestoreOptions{ + Directory: c.StatePath(), + Verbose: true, + }) + + // Remove the state from the parent container; we only keep + // this in snapshots. + err2 := os.RemoveAll(c.StatePath()) + if err2 != nil { + shared.Log.Error("failed to delete snapshot state", "path", c.StatePath(), "err", err2) + } + + if err != nil { + return err + } + + return nil + } + + // Restart the container + if wasRunning { + return c.Start(false) + } + + return nil +} + +func (c *containerLXC) cleanup() { + // Unmount any leftovers + c.removeUnixDevices() + c.removeDiskDevices() + + // Remove the security profiles + AADeleteProfile(c) + SeccompDeleteProfile(c) + + // Remove the devices path + os.RemoveAll(c.DevicesPath()) + + // Remove the shmounts path + os.RemoveAll(shared.VarPath("shmounts", c.Name())) +} + +func (c *containerLXC) Delete() error { + if c.IsSnapshot() { + // Remove the snapshot + if err := c.storage.ContainerSnapshotDelete(c); err != nil { + shared.Log.Warn("failed to delete snapshot", "name", c.Name(), "err", err) + } + } else { + // Remove all snapshot + if err := containerDeleteSnapshots(c.daemon, c.Name()); err != nil { + shared.Log.Warn("failed to delete snapshots", "name", c.Name(), "err", err) + } + + // Clean things up + c.cleanup() + + // Delete the container from disk + if shared.PathExists(c.Path()) { + if err := c.storage.ContainerDelete(c); err != nil { + return err + } + } + } + + // Remove the database record + if err := dbContainerRemove(c.daemon.db, c.Name()); err != nil { + return err + } + + return nil +} + +func (c *containerLXC) Rename(newName string) error { + oldName := c.Name() + + // Sanity checks + if !c.IsSnapshot() && !shared.ValidHostname(newName) { + return fmt.Errorf("Invalid container name") + } + + if c.IsRunning() { + return fmt.Errorf("renaming of running container not allowed") + } + + // Clean things up + c.cleanup() + + // Rename the logging path + os.RemoveAll(shared.LogPath(newName)) + err := os.Rename(c.LogPath(), shared.LogPath(newName)) + if err != nil { + return err + } + + // Rename the storage entry + if c.IsSnapshot() { + if err := c.storage.ContainerSnapshotRename(c, newName); err != nil { + return err + } + } else { + if err := c.storage.ContainerRename(c, newName); err != nil { + return err + } + } + + // Rename the database entry + if err := dbContainerRename(c.daemon.db, oldName, newName); err != nil { + return err + } + + if !c.IsSnapshot() { + // Rename all the snapshots + results, err := dbContainerGetSnapshots(c.daemon.db, oldName) + if err != nil { + return err + } + + for _, sname := range results { + // Rename the snapshot + baseSnapName := filepath.Base(sname) + newSnapshotName := newName + shared.SnapshotDelimiter + baseSnapName + if err := dbContainerRename(c.daemon.db, sname, newSnapshotName); err != nil { + return err + } + } + } + + // Set the new name in the struct + c.name = newName + + // Invalidate the go-lxc cache + c.c = nil + + return nil +} + +func (c *containerLXC) CGroupGet(key string) (string, error) { + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return "", err + } + + // Make sure the container is running + if !c.IsRunning() { + return "", fmt.Errorf("Can't get cgroups on a stopped container") + } + + value := c.c.CgroupItem(key) + return strings.Join(value, "\n"), nil +} + +func (c *containerLXC) CGroupSet(key string, value string) error { + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return err + } + + // Make sure the container is running + if !c.IsRunning() { + return fmt.Errorf("Can't set cgroups on a stopped container") + } + + err = c.c.SetCgroupItem(key, value) + if err != nil { + return fmt.Errorf("Failed to set cgroup %s=\"%s\": %s", key, value, err) + } + + return nil +} + +func (c *containerLXC) ConfigKeySet(key string, value string) error { + c.localConfig[key] = value + + args := containerArgs{ + Architecture: c.architecture, + Config: c.localConfig, + Devices: c.localDevices, + Ephemeral: c.ephemeral, + Profiles: c.profiles, + } + + return c.Update(args, false) +} + +func (c *containerLXC) Update(args containerArgs, userRequested bool) error { + // Set sane defaults for unset keys + if args.Architecture == 0 { + args.Architecture = c.architecture + } + + if args.Config == nil { + args.Config = map[string]string{} + } + + if args.Devices == nil { + args.Devices = shared.Devices{} + } + + if args.Profiles == nil { + args.Profiles = []string{} + } + + // Validate the new config + err := containerValidConfig(args.Config, false, false) + if err != nil { + return err + } + + // Validate the new devices + err = containerValidDevices(args.Devices, false, false) + if err != nil { + return err + } + + // Validate the new profiles + profiles, err := dbProfiles(c.daemon.db) + if err != nil { + return err + } + + for _, name := range args.Profiles { + if !shared.StringInSlice(name, profiles) { + return fmt.Errorf("Profile doesn't exist: %s", name) + } + } + + // Validate the new architecture + if args.Architecture != 0 { + _, err = shared.ArchitectureName(args.Architecture) + if err != nil { + return fmt.Errorf("Invalid architecture id: %s", err) + } + } + + // Check that volatile wasn't modified + if userRequested { + for k, v := range args.Config { + if strings.HasPrefix(k, "volatile.") && c.localConfig[k] != v { + return fmt.Errorf("Volatile keys are read-only.") + } + } + + for k, v := range c.localConfig { + if strings.HasPrefix(k, "volatile.") && args.Config[k] != v { + return fmt.Errorf("Volatile keys are read-only.") + } + } + } + + // Get a copy of the old configuration + oldArchitecture := 0 + err = shared.DeepCopy(&c.architecture, &oldArchitecture) + if err != nil { + return err + } + + oldEphemeral := false + err = shared.DeepCopy(&c.ephemeral, &oldEphemeral) + if err != nil { + return err + } + + oldExpandedDevices := shared.Devices{} + err = shared.DeepCopy(&c.expandedDevices, &oldExpandedDevices) + if err != nil { + return err + } + + oldExpandedConfig := map[string]string{} + err = shared.DeepCopy(&c.expandedConfig, &oldExpandedConfig) + if err != nil { + return err + } + + oldLocalDevices := shared.Devices{} + err = shared.DeepCopy(&c.localDevices, &oldLocalDevices) + if err != nil { + return err + } + + oldLocalConfig := map[string]string{} + err = shared.DeepCopy(&c.localConfig, &oldLocalConfig) + if err != nil { + return err + } + + oldProfiles := []string{} + err = shared.DeepCopy(&c.profiles, &oldProfiles) + if err != nil { + return err + } + + // Define a function which reverts everything + undoChanges := func() { + c.architecture = oldArchitecture + c.ephemeral = oldEphemeral + c.expandedConfig = oldExpandedConfig + c.expandedDevices = oldExpandedDevices + c.localConfig = oldLocalConfig + c.localDevices = oldLocalDevices + c.profiles = oldProfiles + c.initLXC() + deviceTaskSchedulerTrigger("container", c.name, "changed") + } + + // Apply the various changes + c.architecture = args.Architecture + c.ephemeral = args.Ephemeral + c.localConfig = args.Config + c.localDevices = args.Devices + c.profiles = args.Profiles + + // Expand the config and refresh the LXC config + err = c.expandConfig() + if err != nil { + undoChanges() + return err + } + + err = c.expandDevices() + if err != nil { + undoChanges() + return err + } + + err = c.initLXC() + if err != nil { + undoChanges() + return err + } + + // Diff the configurations + changedConfig := []string{} + for key, _ := range oldExpandedConfig { + if oldExpandedConfig[key] != c.expandedConfig[key] { + if !shared.StringInSlice(key, changedConfig) { + changedConfig = append(changedConfig, key) + } + } + } + + for key, _ := range c.expandedConfig { + if oldExpandedConfig[key] != c.expandedConfig[key] { + if !shared.StringInSlice(key, changedConfig) { + changedConfig = append(changedConfig, key) + } + } + } + + // Diff the devices + removeDevices, addDevices, updateDevices := oldExpandedDevices.Update(c.expandedDevices) + + // Do some validation of the config diff + err = containerValidConfig(c.expandedConfig, false, true) + if err != nil { + undoChanges() + return err + } + + // Do some validation of the devices diff + err = containerValidDevices(c.expandedDevices, false, true) + if err != nil { + undoChanges() + return err + } + + // If apparmor changed, re-validate the apparmor profile + for _, key := range changedConfig { + if key == "raw.apparmor" || key == "security.nesting" { + err = AAParseProfile(c) + if err != nil { + undoChanges() + return err + } + } + } + + // Apply disk quota changes + for _, m := range addDevices { + var oldRootfsSize string + for _, m := range oldExpandedDevices { + if m["type"] == "disk" && m["path"] == "/" { + oldRootfsSize = m["size"] + break + } + } + + if m["size"] != oldRootfsSize { + size, err := shared.ParseByteSizeString(m["size"]) + if err != nil { + undoChanges() + return err + } + + err = c.storage.ContainerSetQuota(c, size) + if err != nil { + undoChanges() + return err + } + } + } + + // Apply the live changes + if c.IsRunning() { + // Confirm that the rootfs source didn't change + var oldRootfs shared.Device + for _, m := range oldExpandedDevices { + if m["type"] == "disk" && m["path"] == "/" { + oldRootfs = m + break + } + } + + var newRootfs shared.Device + for _, m := range c.expandedDevices { + if m["type"] == "disk" && m["path"] == "/" { + newRootfs = m + break + } + } + + if oldRootfs["source"] != newRootfs["source"] { + undoChanges() + return fmt.Errorf("Cannot change the rootfs path of a running container") + } + + // Live update the container config + for _, key := range changedConfig { + value := c.expandedConfig[key] + + if key == "raw.apparmor" || key == "security.nesting" { + // Update the AppArmor profile + err = AALoadProfile(c) + if err != nil { + undoChanges() + return err + } + } else if key == "linux.kernel_modules" && value != "" { + for _, module := range strings.Split(value, ",") { + module = strings.TrimPrefix(module, " ") + out, err := exec.Command("modprobe", module).CombinedOutput() + if err != nil { + undoChanges() + return fmt.Errorf("Failed to load kernel module '%s': %s", module, out) + } + } + } else if key == "limits.disk.priority" { + if !cgBlkioController { + continue + } + + priorityInt := 5 + diskPriority := c.expandedConfig["limits.disk.priority"] + if diskPriority != "" { + priorityInt, err = strconv.Atoi(diskPriority) + if err != nil { + return err + } + } + + err = c.CGroupSet("blkio.weight", fmt.Sprintf("%d", priorityInt*100)) + if err != nil { + return err + } + } else if key == "limits.memory" || strings.HasPrefix(key, "limits.memory.") { + // Skip if no memory CGroup + if !cgMemoryController { + continue + } + + // Set the new memory limit + memory := c.expandedConfig["limits.memory"] + memoryEnforce := c.expandedConfig["limits.memory.enforce"] + memorySwap := c.expandedConfig["limits.memory.swap"] + + // Parse memory + if memory == "" { + memory = "-1" + } else if strings.HasSuffix(memory, "%") { + percent, err := strconv.ParseInt(strings.TrimSuffix(memory, "%"), 10, 64) + if err != nil { + return err + } + + memoryTotal, err := deviceTotalMemory() + if err != nil { + return err + } + + memory = fmt.Sprintf("%d", int64((memoryTotal/100)*percent)) + } else { + valueInt, err := shared.ParseByteSizeString(memory) + if err != nil { + undoChanges() + return err + } + memory = fmt.Sprintf("%d", valueInt) + } + + // Reset everything + if cgSwapAccounting { + err = c.CGroupSet("memory.memsw.limit_in_bytes", "-1") + if err != nil { + undoChanges() + return err + } + } + + err = c.CGroupSet("memory.limit_in_bytes", "-1") + if err != nil { + undoChanges() + return err + } + + err = c.CGroupSet("memory.soft_limit_in_bytes", "-1") + if err != nil { + undoChanges() + return err + } + + // Set the new values + if memoryEnforce == "soft" { + // Set new limit + err = c.CGroupSet("memory.soft_limit_in_bytes", memory) + if err != nil { + undoChanges() + return err + } + } else { + if memorySwap != "false" && cgSwapAccounting { + err = c.CGroupSet("memory.limit_in_bytes", memory) + if err != nil { + undoChanges() + return err + } + err = c.CGroupSet("memory.memsw.limit_in_bytes", memory) + if err != nil { + undoChanges() + return err + } + } else { + err = c.CGroupSet("memory.limit_in_bytes", memory) + if err != nil { + undoChanges() + return err + } + } + } + + // Configure the swappiness + if key == "limits.memory.swap" || key == "limits.memory.swap.priority" { + memorySwap := c.expandedConfig["limits.memory.swap"] + memorySwapPriority := c.expandedConfig["limits.memory.swap.priority"] + if memorySwap == "false" { + err = c.CGroupSet("memory.swappiness", "0") + if err != nil { + undoChanges() + return err + } + } else { + priority := 0 + if memorySwapPriority != "" { + priority, err = strconv.Atoi(memorySwapPriority) + if err != nil { + undoChanges() + return err + } + } + + err = c.CGroupSet("memory.swappiness", fmt.Sprintf("%d", 60-10+priority)) + if err != nil { + undoChanges() + return err + } + } + } + } else if key == "limits.network.priority" { + err := c.setNetworkPriority() + if err != nil { + return err + } + } else if key == "limits.cpu" { + // Trigger a scheduler re-run + deviceTaskSchedulerTrigger("container", c.name, "changed") + } else if key == "limits.cpu.priority" || key == "limits.cpu.allowance" { + // Skip if no cpu CGroup + if !cgCpuController { + continue + } + + // Apply new CPU limits + cpuShares, cpuCfsQuota, cpuCfsPeriod, err := deviceParseCPU(c.expandedConfig["limits.cpu.allowance"], c.expandedConfig["limits.cpu.priority"]) + if err != nil { + undoChanges() + return err + } + + err = c.CGroupSet("cpu.shares", cpuShares) + if err != nil { + undoChanges() + return err + } + + err = c.CGroupSet("cpu.cfs_period_us", cpuCfsPeriod) + if err != nil { + undoChanges() + return err + } + + err = c.CGroupSet("cpu.cfs_quota_us", cpuCfsQuota) + if err != nil { + undoChanges() + return err + } + } else if key == "limits.processes" { + if !cgPidsController { + continue + } + + if value == "" { + err = c.CGroupSet("pids.max", "max") + if err != nil { + undoChanges() + return err + } + } else { + valueInt, err := strconv.ParseInt(value, 10, 64) + if err != nil { + undoChanges() + return err + } + + err = c.CGroupSet("pids.max", fmt.Sprintf("%d", valueInt)) + if err != nil { + undoChanges() + return err + } + } + } + } + + // Live update the devices + for k, m := range removeDevices { + if shared.StringInSlice(m["type"], []string{"unix-char", "unix-block"}) { + err = c.removeUnixDevice(k, m) + if err != nil { + undoChanges() + return err + } + } else if m["type"] == "disk" && m["path"] != "/" { + err = c.removeDiskDevice(k, m) + if err != nil { + undoChanges() + return err + } + } else if m["type"] == "nic" { + err = c.removeNetworkDevice(k, m) + if err != nil { + undoChanges() + return err + } + } + } + + for k, m := range addDevices { + if shared.StringInSlice(m["type"], []string{"unix-char", "unix-block"}) { + err = c.insertUnixDevice(k, m) + if err != nil { + undoChanges() + return err + } + } else if m["type"] == "disk" && m["path"] != "/" { + err = c.insertDiskDevice(k, m) + if err != nil { + undoChanges() + return err + } + } else if m["type"] == "nic" { + err = c.insertNetworkDevice(k, m) + if err != nil { + undoChanges() + return err + } + } + } + + updateDiskLimit := false + for k, m := range updateDevices { + if m["type"] == "disk" { + updateDiskLimit = true + } else if m["type"] == "nic" { + err = c.setNetworkLimits(k, m) + if err != nil { + undoChanges() + return err + } + } + } + + // Disk limits parse all devices, so just apply them once + if updateDiskLimit && cgBlkioController { + diskLimits, err := c.getDiskLimits() + if err != nil { + undoChanges() + return err + } + + for block, limit := range diskLimits { + err = c.CGroupSet("blkio.throttle.read_bps_device", fmt.Sprintf("%s %d", block, limit.readBps)) + if err != nil { + undoChanges() + return err + } + + err = c.CGroupSet("blkio.throttle.read_iops_device", fmt.Sprintf("%s %d", block, limit.readIops)) + if err != nil { + undoChanges() + return err + } + + err = c.CGroupSet("blkio.throttle.write_bps_device", fmt.Sprintf("%s %d", block, limit.writeBps)) + if err != nil { + undoChanges() + return err + } + + err = c.CGroupSet("blkio.throttle.write_iops_device", fmt.Sprintf("%s %d", block, limit.writeIops)) + if err != nil { + undoChanges() + return err + } + } + } + } + + // Finally, apply the changes to the database + tx, err := dbBegin(c.daemon.db) + if err != nil { + undoChanges() + return err + } + + err = dbContainerConfigClear(tx, c.id) + if err != nil { + tx.Rollback() + undoChanges() + return err + } + + err = dbContainerConfigInsert(tx, c.id, args.Config) + if err != nil { + tx.Rollback() + undoChanges() + return err + } + + err = dbContainerProfilesInsert(tx, c.id, args.Profiles) + if err != nil { + tx.Rollback() + undoChanges() + return err + } + + err = dbDevicesAdd(tx, "container", int64(c.id), args.Devices) + if err != nil { + tx.Rollback() + undoChanges() + return err + } + + err = dbContainerUpdate(tx, c.id, c.architecture, c.ephemeral) + if err != nil { + tx.Rollback() + undoChanges() + return err + } + + if err := txCommit(tx); err != nil { + undoChanges() + return err + } + + return nil +} + +func (c *containerLXC) Export(w io.Writer) error { + if c.IsRunning() { + return fmt.Errorf("Cannot export a running container as an image") + } + + // Start the storage + err := c.StorageStart() + if err != nil { + return err + } + defer c.StorageStop() + + // Unshift the container + idmap, err := c.LastIdmapSet() + if err != nil { + return err + } + + if idmap != nil { + if err := idmap.UnshiftRootfs(c.RootfsPath()); err != nil { + return err + } + + defer idmap.ShiftRootfs(c.RootfsPath()) + } + + // Create the tarball + tw := tar.NewWriter(w) + + // Keep track of the first path we saw for each path with nlink>1 + linkmap := map[uint64]string{} + cDir := c.Path() + + // Path inside the tar image is the pathname starting after cDir + offset := len(cDir) + 1 + + writeToTar := func(path string, fi os.FileInfo, err error) error { + if err := c.tarStoreFile(linkmap, offset, tw, path, fi); err != nil { + shared.Debugf("Error tarring up %s: %s", path, err) + return err + } + return nil + } + + // Look for metadata.yaml + fnam := filepath.Join(cDir, "metadata.yaml") + if !shared.PathExists(fnam) { + // Generate a new metadata.yaml + f, err := ioutil.TempFile("", "lxd_lxd_metadata_") + if err != nil { + tw.Close() + return err + } + defer os.Remove(f.Name()) + + // Get the container's architecture + var arch string + if c.IsSnapshot() { + parentName := strings.SplitN(c.name, shared.SnapshotDelimiter, 2)[0] + parent, err := containerLoadByName(c.daemon, parentName) + if err != nil { + tw.Close() + return err + } + + arch, _ = shared.ArchitectureName(parent.Architecture()) + } else { + arch, _ = shared.ArchitectureName(c.architecture) + } + + if arch == "" { + arch, err = shared.ArchitectureName(c.daemon.architectures[0]) + if err != nil { + return err + } + } + + // Fill in the metadata + meta := imageMetadata{} + meta.Architecture = arch + meta.CreationDate = time.Now().UTC().Unix() + + data, err := yaml.Marshal(&meta) + if err != nil { + tw.Close() + return err + } + + // Write the actual file + f.Write(data) + f.Close() + + fi, err := os.Lstat(f.Name()) + if err != nil { + tw.Close() + return err + } + + tmpOffset := len(path.Dir(f.Name())) + 1 + if err := c.tarStoreFile(linkmap, tmpOffset, tw, f.Name(), fi); err != nil { + shared.Debugf("Error writing to tarfile: %s", err) + tw.Close() + return err + } + + fnam = f.Name() + } else { + // Include metadata.yaml in the tarball + fi, err := os.Lstat(fnam) + if err != nil { + shared.Debugf("Error statting %s during export", fnam) + tw.Close() + return err + } + + if err := c.tarStoreFile(linkmap, offset, tw, fnam, fi); err != nil { + shared.Debugf("Error writing to tarfile: %s", err) + tw.Close() + return err + } + } + + // Include all the rootfs files + fnam = c.RootfsPath() + filepath.Walk(fnam, writeToTar) + + // Include all the templates + fnam = c.TemplatesPath() + if shared.PathExists(fnam) { + filepath.Walk(fnam, writeToTar) + } + + return tw.Close() +} + +func (c *containerLXC) Checkpoint(opts lxc.CheckpointOptions) error { + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return err + } + + return c.c.Checkpoint(opts) +} + +func (c *containerLXC) TemplateApply(trigger string) error { + // If there's no metadata, just return + fname := filepath.Join(c.Path(), "metadata.yaml") + if !shared.PathExists(fname) { + return nil + } + + // Parse the metadata + content, err := ioutil.ReadFile(fname) + if err != nil { + return err + } + + metadata := new(imageMetadata) + err = yaml.Unmarshal(content, &metadata) + + if err != nil { + return fmt.Errorf("Could not parse %s: %v", fname, err) + } + + // Go through the templates + for templatePath, template := range metadata.Templates { + var w *os.File + + // Check if the template should be applied now + found := false + for _, tplTrigger := range template.When { + if tplTrigger == trigger { + found = true + break + } + } + + if !found { + continue + } + + // Open the file to template, create if needed + fullpath := filepath.Join(c.RootfsPath(), strings.TrimLeft(templatePath, "/")) + if shared.PathExists(fullpath) { + // Open the existing file + w, err = os.Create(fullpath) + if err != nil { + return err + } + } else { + // Create a new one + uid := 0 + gid := 0 + + // Get the right uid and gid for the container + if !c.IsPrivileged() { + uid, gid = c.idmapset.ShiftIntoNs(0, 0) + } + + // Create the directories leading to the file + shared.MkdirAllOwner(path.Dir(fullpath), 0755, uid, gid) + + // Create the file itself + w, err = os.Create(fullpath) + if err != nil { + return err + } + + // Fix ownership and mode + if !c.IsPrivileged() { + w.Chown(uid, gid) + } + w.Chmod(0644) + } + defer w.Close() + + // Read the template + tplString, err := ioutil.ReadFile(filepath.Join(c.TemplatesPath(), template.Template)) + if err != nil { + return err + } + + tpl, err := pongo2.FromString("{% autoescape off %}" + string(tplString) + "{% endautoescape %}") + if err != nil { + return err + } + + // Figure out the architecture + arch, err := shared.ArchitectureName(c.architecture) + if err != nil { + arch, err = shared.ArchitectureName(c.daemon.architectures[0]) + if err != nil { + return err + } + } + + // Generate the metadata + containerMeta := make(map[string]string) + containerMeta["name"] = c.name + containerMeta["architecture"] = arch + + if c.ephemeral { + containerMeta["ephemeral"] = "true" + } else { + containerMeta["ephemeral"] = "false" + } + + if c.IsPrivileged() { + containerMeta["privileged"] = "true" + } else { + containerMeta["privileged"] = "false" + } + + configGet := func(confKey, confDefault *pongo2.Value) *pongo2.Value { + val, ok := c.expandedConfig[confKey.String()] + if !ok { + return confDefault + } + + return pongo2.AsValue(strings.TrimRight(val, "\r\n")) + } + + // Render the template + tpl.ExecuteWriter(pongo2.Context{"trigger": trigger, + "path": templatePath, + "container": containerMeta, + "config": c.expandedConfig, + "devices": c.expandedDevices, + "properties": template.Properties, + "config_get": configGet}, w) + } + + return nil +} + +func (c *containerLXC) FilePull(srcpath string, dstpath string) error { + // Setup container storage if needed + if !c.IsRunning() { + err := c.StorageStart() + if err != nil { + return err + } + } + + // Get the file from the container + out, err := exec.Command( + c.daemon.execPath, + "forkgetfile", + c.RootfsPath(), + fmt.Sprintf("%d", c.InitPID()), + dstpath, + srcpath, + ).CombinedOutput() + + // Tear down container storage if needed + if !c.IsRunning() { + err := c.StorageStop() + if err != nil { + return err + } + } + + // Process forkgetfile response + if string(out) != "" { + for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") { + shared.Debugf("forkgetfile: %s", line) + } + } + + if err != nil { + return fmt.Errorf( + "Error calling 'lxd forkgetfile %s %d %s': err='%v'", + dstpath, + c.InitPID(), + srcpath, + err) + } + + return nil +} + +func (c *containerLXC) FilePush(srcpath string, dstpath string, uid int, gid int, mode os.FileMode) error { + // Map uid and gid if needed + idmapset, err := c.LastIdmapSet() + if err != nil { + return err + } + + if idmapset != nil { + uid, gid = idmapset.ShiftIntoNs(uid, gid) + } + + // Setup container storage if needed + if !c.IsRunning() { + err := c.StorageStart() + if err != nil { + return err + } + } + + // Push the file to the container + out, err := exec.Command( + c.daemon.execPath, + "forkputfile", + c.RootfsPath(), + fmt.Sprintf("%d", c.InitPID()), + srcpath, + dstpath, + fmt.Sprintf("%d", uid), + fmt.Sprintf("%d", gid), + fmt.Sprintf("%d", mode&os.ModePerm), + ).CombinedOutput() + + // Tear down container storage if needed + if !c.IsRunning() { + err := c.StorageStop() + if err != nil { + return err + } + } + + // Process forkputfile response + if string(out) != "" { + for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") { + shared.Debugf("forkgetfile: %s", line) + } + } + + if err != nil { + return fmt.Errorf( + "Error calling 'lxd forkputfile %s %d %s %d %d %d': err='%v'", + srcpath, + c.InitPID(), + dstpath, + uid, + gid, + mode, + err) + } + + return nil +} + +func (c *containerLXC) diskState() map[string]shared.ContainerStateDisk { + disk := map[string]shared.ContainerStateDisk{} + + for name, d := range c.expandedDevices { + if d["type"] != "disk" { + continue + } + + if d["path"] != "/" { + continue + } + + usage, err := c.storage.ContainerGetUsage(c) + if err != nil { + continue + } + + disk[name] = shared.ContainerStateDisk{Usage: usage} + } + + return disk +} + +func (c *containerLXC) memoryState() shared.ContainerStateMemory { + memory := shared.ContainerStateMemory{} + + if !cgMemoryController { + return memory + } + + // Memory in bytes + value, err := c.CGroupGet("memory.usage_in_bytes") + valueInt, err := strconv.ParseInt(value, 10, 64) + if err != nil { + valueInt = -1 + } + memory.Usage = valueInt + + // Memory peak in bytes + value, err = c.CGroupGet("memory.max_usage_in_bytes") + valueInt, err = strconv.ParseInt(value, 10, 64) + if err != nil { + valueInt = -1 + } + + memory.UsagePeak = valueInt + + if cgSwapAccounting { + // Swap in bytes + value, err := c.CGroupGet("memory.memsw.usage_in_bytes") + valueInt, err := strconv.ParseInt(value, 10, 64) + if err != nil { + valueInt = -1 + } + + memory.SwapUsage = valueInt - memory.Usage + + // Swap peak in bytes + value, err = c.CGroupGet("memory.memsw.max_usage_in_bytes") + valueInt, err = strconv.ParseInt(value, 10, 64) + if err != nil { + valueInt = -1 + } + + memory.SwapUsagePeak = valueInt - memory.UsagePeak + } + + return memory +} + +func (c *containerLXC) networkState() map[string]shared.ContainerStateNetwork { + result := map[string]shared.ContainerStateNetwork{} + + pid := c.InitPID() + if pid < 1 { + return result + } + + // Get the network state from the container + out, err := exec.Command( + c.daemon.execPath, + "forkgetnet", + fmt.Sprintf("%d", pid)).CombinedOutput() + + // Process forkgetnet response + if err != nil { + shared.Log.Error("Error calling 'lxd forkgetnet", log.Ctx{"container": c.name, "output": string(out), "pid": pid}) + return result + } + + networks := map[string]shared.ContainerStateNetwork{} + + err = json.Unmarshal(out, &networks) + if err != nil { + shared.Log.Error("Failure to read forkgetnet json", log.Ctx{"container": c.name, "err": err}) + return result + } + + // Add HostName field + for netName, net := range networks { + net.HostName = c.getHostInterface(netName) + result[netName] = net + } + + return result +} + +func (c *containerLXC) processesState() int64 { + // Return 0 if not running + pid := c.InitPID() + if pid == -1 { + return 0 + } + + if cgPidsController { + value, err := c.CGroupGet("pids.current") + valueInt, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return -1 + } + + return valueInt + } + + pids := []int64{int64(pid)} + + // Go through the pid list, adding new pids at the end so we go through them all + for i := 0; i < len(pids); i++ { + fname := fmt.Sprintf("/proc/%d/task/%d/children", pids[i], pids[i]) + fcont, err := ioutil.ReadFile(fname) + if err != nil { + // the process terminated during execution of this loop + continue + } + + content := strings.Split(string(fcont), " ") + for j := 0; j < len(content); j++ { + pid, err := strconv.ParseInt(content[j], 10, 64) + if err == nil { + pids = append(pids, pid) + } + } + } + + return int64(len(pids)) +} + +func (c *containerLXC) tarStoreFile(linkmap map[uint64]string, offset int, tw *tar.Writer, path string, fi os.FileInfo) error { + var err error + var major, minor, nlink int + var ino uint64 + + link := "" + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + link, err = os.Readlink(path) + if err != nil { + return err + } + } + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return err + } + hdr.Name = path[offset:] + if fi.IsDir() || fi.Mode()&os.ModeSymlink == os.ModeSymlink { + hdr.Size = 0 + } else { + hdr.Size = fi.Size() + } + + hdr.Uid, hdr.Gid, major, minor, ino, nlink, err = shared.GetFileStat(path) + if err != nil { + return fmt.Errorf("error getting file info: %s", err) + } + + // Unshift the id under /rootfs/ for unpriv containers + if !c.IsPrivileged() && strings.HasPrefix(hdr.Name, "/rootfs") { + hdr.Uid, hdr.Gid = c.idmapset.ShiftFromNs(hdr.Uid, hdr.Gid) + if hdr.Uid == -1 || hdr.Gid == -1 { + return nil + } + } + if major != -1 { + hdr.Devmajor = int64(major) + hdr.Devminor = int64(minor) + } + + // If it's a hardlink we've already seen use the old name + if fi.Mode().IsRegular() && nlink > 1 { + if firstpath, found := linkmap[ino]; found { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = firstpath + hdr.Size = 0 + } else { + linkmap[ino] = hdr.Name + } + } + + // TODO: handle xattrs + if err := tw.WriteHeader(hdr); err != nil { + return fmt.Errorf("error writing header: %s", err) + } + + if hdr.Typeflag == tar.TypeReg { + f, err := os.Open(path) + if err != nil { + return fmt.Errorf("tarStoreFile: error opening file: %s", err) + } + defer f.Close() + if _, err := io.Copy(tw, f); err != nil { + return fmt.Errorf("error copying file %s", err) + } + } + return nil +} + +// Storage functions +func (c *containerLXC) Storage() storage { + return c.storage +} + +func (c *containerLXC) StorageStart() error { + if c.IsSnapshot() { + return c.storage.ContainerSnapshotStart(c) + } + + return c.storage.ContainerStart(c) +} + +func (c *containerLXC) StorageStop() error { + if c.IsSnapshot() { + return c.storage.ContainerSnapshotStop(c) + } + + return c.storage.ContainerStop(c) +} + +// Mount handling +func (c *containerLXC) insertMount(source, target, fstype string, flags int) error { + var err error + + // Get the init PID + pid := c.InitPID() + if pid == -1 { + // Container isn't running + return fmt.Errorf("Can't insert mount into stopped container") + } + + // Create the temporary mount target + var tmpMount string + if shared.IsDir(source) { + tmpMount, err = ioutil.TempDir(shared.VarPath("shmounts", c.name), "lxdmount_") + if err != nil { + return fmt.Errorf("Failed to create shmounts path: %s", err) + } + } else { + f, err := ioutil.TempFile(shared.VarPath("shmounts", c.name), "lxdmount_") + if err != nil { + return fmt.Errorf("Failed to create shmounts path: %s", err) + } + + tmpMount = f.Name() + f.Close() + } + defer os.Remove(tmpMount) + + // Mount the filesystem + err = syscall.Mount(source, tmpMount, fstype, uintptr(flags), "") + if err != nil { + return fmt.Errorf("Failed to setup temporary mount: %s", err) + } + defer syscall.Unmount(tmpMount, syscall.MNT_DETACH) + + // Move the mount inside the container + mntsrc := filepath.Join("/dev/.lxd-mounts", filepath.Base(tmpMount)) + pidStr := fmt.Sprintf("%d", pid) + + out, err := exec.Command(c.daemon.execPath, "forkmount", pidStr, mntsrc, target).CombinedOutput() + + if string(out) != "" { + for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") { + shared.Debugf("forkmount: %s", line) + } + } + + if err != nil { + return fmt.Errorf( + "Error calling 'lxd forkmount %s %s %s': err='%v'", + pidStr, + mntsrc, + target, + err) + } + + return nil +} + +func (c *containerLXC) removeMount(mount string) error { + // Get the init PID + pid := c.InitPID() + if pid == -1 { + // Container isn't running + return fmt.Errorf("Can't insert mount into stopped container") + } + + // Remove the mount from the container + pidStr := fmt.Sprintf("%d", pid) + out, err := exec.Command(c.daemon.execPath, "forkumount", pidStr, mount).CombinedOutput() + + if string(out) != "" { + for _, line := range strings.Split(strings.TrimRight(string(out), "\n"), "\n") { + shared.Debugf("forkumount: %s", line) + } + } + + if err != nil { + return fmt.Errorf( + "Error calling 'lxd forkumount %s %s': err='%v'", + pidStr, + mount, + err) + } + + return nil +} + +// Unix devices handling +func (c *containerLXC) createUnixDevice(name string, m shared.Device) (string, error) { + var err error + var major, minor int + + // Our device paths + srcPath := m["path"] + tgtPath := strings.TrimPrefix(srcPath, "/") + devName := fmt.Sprintf("unix.%s", strings.Replace(tgtPath, "/", "-", -1)) + devPath := filepath.Join(c.DevicesPath(), devName) + + // Get the major/minor of the device we want to create + if m["major"] == "" && m["minor"] == "" { + // If no major and minor are set, use those from the device on the host + _, major, minor, err = deviceGetAttributes(srcPath) + if err != nil { + return "", fmt.Errorf("Failed to get device attributes: %s", err) + } + } else if m["major"] == "" || m["minor"] == "" { + return "", fmt.Errorf("Both major and minor must be supplied for devices") + } else { + major, err = strconv.Atoi(m["major"]) + if err != nil { + return "", fmt.Errorf("Bad major %s in device %s", m["major"], m["path"]) + } + + minor, err = strconv.Atoi(m["minor"]) + if err != nil { + return "", fmt.Errorf("Bad minor %s in device %s", m["minor"], m["path"]) + } + } + + // Get the device mode + mode := os.FileMode(0660) + if m["mode"] != "" { + tmp, err := deviceModeOct(m["mode"]) + if err != nil { + return "", fmt.Errorf("Bad mode %s in device %s", m["mode"], m["path"]) + } + mode = os.FileMode(tmp) + } + + if m["type"] == "unix-block" { + mode |= syscall.S_IFBLK + } else { + mode |= syscall.S_IFCHR + } + + // Get the device owner + uid := 0 + gid := 0 + + if m["uid"] != "" { + uid, err = strconv.Atoi(m["uid"]) + if err != nil { + return "", fmt.Errorf("Invalid uid %s in device %s", m["uid"], m["path"]) + } + } + + if m["gid"] != "" { + gid, err = strconv.Atoi(m["gid"]) + if err != nil { + return "", fmt.Errorf("Invalid gid %s in device %s", m["gid"], m["path"]) + } + } + + // Create the devices directory if missing + if !shared.PathExists(c.DevicesPath()) { + os.Mkdir(c.DevicesPath(), 0711) + if err != nil { + return "", fmt.Errorf("Failed to create devices path: %s", err) + } + } + + // Clean any existing entry + if shared.PathExists(devPath) { + err = os.Remove(devPath) + if err != nil { + return "", fmt.Errorf("Failed to remove existing entry: %s", err) + } + } + + // Create the new entry + if err := syscall.Mknod(devPath, uint32(mode), minor|(major<<8)); err != nil { + return "", fmt.Errorf("Failed to create device %s for %s: %s", devPath, m["path"], err) + } + + if err := os.Chown(devPath, uid, gid); err != nil { + return "", fmt.Errorf("Failed to chown device %s: %s", devPath, err) + } + + if c.idmapset != nil { + if err := c.idmapset.ShiftFile(devPath); err != nil { + // uidshift failing is weird, but not a big problem. Log and proceed + shared.Debugf("Failed to uidshift device %s: %s\n", m["path"], err) + } + } + + return devPath, nil +} + +func (c *containerLXC) insertUnixDevice(name string, m shared.Device) error { + // Check that the container is running + if !c.IsRunning() { + return fmt.Errorf("Can't insert device into stopped container") + } + + // Create the device on the host + devPath, err := c.createUnixDevice(name, m) + if err != nil { + return fmt.Errorf("Failed to setup device: %s", err) + } + + // Bind-mount it into the container + tgtPath := strings.TrimSuffix(m["path"], "/") + err = c.insertMount(devPath, tgtPath, "none", syscall.MS_BIND) + if err != nil { + return fmt.Errorf("Failed to add mount for device: %s", err) + } + + // Add the new device cgroup rule + dType, dMajor, dMinor, err := deviceGetAttributes(devPath) + if err != nil { + return fmt.Errorf("Failed to get device attributes: %s", err) + } + + if err := c.CGroupSet("devices.allow", fmt.Sprintf("%s %d:%d rwm", dType, dMajor, dMinor)); err != nil { + return fmt.Errorf("Failed to add cgroup rule for device") + } + + return nil +} + +func (c *containerLXC) removeUnixDevice(name string, m shared.Device) error { + // Check that the container is running + pid := c.InitPID() + if pid == -1 { + return fmt.Errorf("Can't remove device from stopped container") + } + + // Figure out the paths + srcPath := m["path"] + tgtPath := strings.TrimPrefix(srcPath, "/") + devName := fmt.Sprintf("unix.%s", strings.Replace(tgtPath, "/", "-", -1)) + devPath := filepath.Join(c.DevicesPath(), devName) + + // Remove the device cgroup rule + dType, dMajor, dMinor, err := deviceGetAttributes(devPath) + if err != nil { + return err + } + + err = c.CGroupSet("devices.deny", fmt.Sprintf("%s %d:%d rwm", dType, dMajor, dMinor)) + if err != nil { + return err + } + + // Remove the bind-mount from the container + ctnPath := fmt.Sprintf("/proc/%d/root/%s", pid, tgtPath) + + if shared.PathExists(ctnPath) { + err = c.removeMount(m["path"]) + if err != nil { + return fmt.Errorf("Error unmounting the device: %s", err) + } + + err = os.Remove(ctnPath) + if err != nil { + return fmt.Errorf("Error removing the device: %s", err) + } + } + + // Remove the host side + err = os.Remove(devPath) + if err != nil { + return err + } + + return nil +} + +func (c *containerLXC) removeUnixDevices() error { + // Check that we indeed have devices to remove + if !shared.PathExists(c.DevicesPath()) { + return nil + } + + // Load the directory listing + dents, err := ioutil.ReadDir(c.DevicesPath()) + if err != nil { + return err + } + + // Go through all the unix devices + for _, f := range dents { + // Skip non-Unix devices + if !strings.HasPrefix(f.Name(), "unix.") { + continue + } + + // Remove the entry + err := os.Remove(filepath.Join(c.DevicesPath(), f.Name())) + if err != nil { + return err + } + } + + return nil +} + +// Network device handling +func (c *containerLXC) createNetworkDevice(name string, m shared.Device) (string, error) { + var dev, n1 string + + if shared.StringInSlice(m["nictype"], []string{"bridged", "p2p", "macvlan"}) { + // Host Virtual NIC name + if m["host_name"] != "" { + n1 = m["host_name"] + } else { + n1 = deviceNextVeth() + } + } + + // Handle bridged and p2p + if shared.StringInSlice(m["nictype"], []string{"bridged", "p2p"}) { + n2 := deviceNextVeth() + + err := exec.Command("ip", "link", "add", n1, "type", "veth", "peer", "name", n2).Run() + if err != nil { + return "", fmt.Errorf("Failed to create the veth interface: %s", err) + } + + err = exec.Command("ip", "link", "set", n1, "up").Run() + if err != nil { + return "", fmt.Errorf("Failed to bring up the veth interface %s: %s", n1, err) + } + + if m["nictype"] == "bridged" { + err = exec.Command("ip", "link", "set", n1, "master", m["parent"]).Run() + if err != nil { + deviceRemoveInterface(n2) + return "", fmt.Errorf("Failed to add interface to bridge: %s", err) + } + } + + dev = n2 + } + + // Handle physical + if m["nictype"] == "physical" { + dev = m["parent"] + } + + // Handle macvlan + if m["nictype"] == "macvlan" { + + err := exec.Command("ip", "link", "add", n1, "link", m["parent"], "type", "macvlan", "mode", "bridge").Run() + if err != nil { + return "", fmt.Errorf("Failed to create the new macvlan interface: %s", err) + } + + dev = n1 + } + + // Set the MAC address + if m["hwaddr"] != "" { + err := exec.Command("ip", "link", "set", "dev", dev, "address", m["hwaddr"]).Run() + if err != nil { + deviceRemoveInterface(dev) + return "", fmt.Errorf("Failed to set the MAC address: %s", err) + } + } + + // Bring the interface up + err := exec.Command("ip", "link", "set", "dev", dev, "up").Run() + if err != nil { + deviceRemoveInterface(dev) + return "", fmt.Errorf("Failed to bring up the interface: %s", err) + } + + return dev, nil +} + +func (c *containerLXC) fillNetworkDevice(name string, m shared.Device) (shared.Device, error) { + newDevice := shared.Device{} + err := shared.DeepCopy(&m, &newDevice) + if err != nil { + return nil, err + } + + // Function to try and guess an available name + nextInterfaceName := func() (string, error) { + devNames := []string{} + + // Include all static interface names + for _, v := range c.expandedDevices { + if v["name"] != "" && !shared.StringInSlice(v["name"], devNames) { + devNames = append(devNames, v["name"]) + } + } + + // Include all currently allocated interface names + for k, v := range c.expandedConfig { + if !strings.HasPrefix(k, "volatile.") { + continue + } + + fields := strings.SplitN(k, ".", 3) + if len(fields) != 3 { + continue + } + + if fields[2] != "name" || shared.StringInSlice(v, devNames) { + continue + } + + devNames = append(devNames, v) + } + + // Attempt to include all existing interfaces + cc, err := lxc.NewContainer(c.Name(), c.daemon.lxcpath) + if err == nil { + interfaces, err := cc.Interfaces() + if err == nil { + for _, name := range interfaces { + if shared.StringInSlice(name, devNames) { + continue + } + + devNames = append(devNames, name) + } + } + } + + // Find a free ethX device + i := 0 + for { + name := fmt.Sprintf("eth%d", i) + if !shared.StringInSlice(name, devNames) { + return name, nil + } + + i += 1 + } + } + + // Fill in the MAC address + if m["nictype"] != "physical" && m["hwaddr"] == "" { + configKey := fmt.Sprintf("volatile.%s.hwaddr", name) + volatileHwaddr := c.localConfig[configKey] + if volatileHwaddr == "" { + // Generate a new MAC address + volatileHwaddr, err = deviceNextInterfaceHWAddr() + if err != nil { + return nil, err + } + + c.localConfig[configKey] = volatileHwaddr + c.expandedConfig[configKey] = volatileHwaddr + + // Update the database + tx, err := dbBegin(c.daemon.db) + if err != nil { + return nil, err + } + + err = dbContainerConfigInsert(tx, c.id, map[string]string{configKey: volatileHwaddr}) + if err != nil { + tx.Rollback() + return nil, err + } + + err = txCommit(tx) + if err != nil { + return nil, err + } + } + newDevice["hwaddr"] = volatileHwaddr + } + + // File in the name + if m["name"] == "" { + configKey := fmt.Sprintf("volatile.%s.name", name) + volatileName := c.localConfig[configKey] + if volatileName == "" { + // Generate a new interface name + volatileName, err = nextInterfaceName() + if err != nil { + return nil, err + } + + c.localConfig[configKey] = volatileName + c.expandedConfig[configKey] = volatileName + + // Update the database + tx, err := dbBegin(c.daemon.db) + if err != nil { + return nil, err + } + + err = dbContainerConfigInsert(tx, c.id, map[string]string{configKey: volatileName}) + if err != nil { + tx.Rollback() + return nil, err + } + + err = txCommit(tx) + if err != nil { + return nil, err + } + } + newDevice["name"] = volatileName + } + + return newDevice, nil +} + +func (c *containerLXC) insertNetworkDevice(name string, m shared.Device) error { + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return nil + } + + // Fill in some fields from volatile + m, err = c.fillNetworkDevice(name, m) + if err != nil { + return nil + } + + if m["hwaddr"] == "" || m["name"] == "" { + return fmt.Errorf("wtf? hwaddr=%s name=%s", m["hwaddr"], m["name"]) + } + + // Return empty list if not running + if !c.IsRunning() { + return fmt.Errorf("Can't insert device into stopped container") + } + + // Create the interface + devName, err := c.createNetworkDevice(name, m) + if err != nil { + return err + } + + // Add the interface to the container + err = c.c.AttachInterface(devName, m["name"]) + if err != nil { + return fmt.Errorf("Failed to attach interface: %s: %s", devName, err) + } + + return nil +} + +func (c *containerLXC) removeNetworkDevice(name string, m shared.Device) error { + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return nil + } + + // Fill in some fields from volatile + m, err = c.fillNetworkDevice(name, m) + if err != nil { + return nil + } + + // Return empty list if not running + if !c.IsRunning() { + return fmt.Errorf("Can't insert device into stopped container") + } + + // Get a temporary device name + var hostName string + if m["nictype"] == "physical" { + hostName = m["parent"] + } else { + hostName = deviceNextVeth() + } + + // For some reason, having network config confuses detach, so get our own go-lxc struct + cc, err := lxc.NewContainer(c.Name(), c.daemon.lxcpath) + if err != nil { + return err + } + + // Remove the interface from the container + err = cc.DetachInterfaceRename(m["name"], hostName) + if err != nil { + return fmt.Errorf("Failed to detach interface: %s: %s", m["name"], err) + } + + // If a veth, destroy it + if m["nictype"] != "physical" { + deviceRemoveInterface(hostName) + } + + return nil +} + +// Disk device handling +func (c *containerLXC) createDiskDevice(name string, m shared.Device) (string, error) { + // Prepare all the paths + srcPath := m["source"] + tgtPath := strings.TrimPrefix(m["path"], "/") + devName := fmt.Sprintf("disk.%s", strings.Replace(tgtPath, "/", "-", -1)) + devPath := filepath.Join(c.DevicesPath(), devName) + + // Check if read-only + isOptional := m["optional"] == "1" || m["optional"] == "true" + isReadOnly := m["readonly"] == "1" || m["readonly"] == "true" + isFile := !shared.IsDir(srcPath) && !deviceIsDevice(srcPath) + + // Check if the source exists + if !shared.PathExists(srcPath) { + if isOptional { + return "", nil + } + return "", fmt.Errorf("Source path doesn't exist") + } + + // Create the devices directory if missing + if !shared.PathExists(c.DevicesPath()) { + err := os.Mkdir(c.DevicesPath(), 0711) + if err != nil { + return "", err + } + } + + // Clean any existing entry + if shared.PathExists(devPath) { + err := os.Remove(devPath) + if err != nil { + return "", err + } + } + + // Create the mount point + if isFile { + f, err := os.Create(devPath) + if err != nil { + return "", err + } + + f.Close() + } else { + err := os.Mkdir(devPath, 0700) + if err != nil { + return "", err + } + } + + // Mount the fs + err := deviceMountDisk(srcPath, devPath, isReadOnly) + if err != nil { + return "", err + } + + return devPath, nil +} + +func (c *containerLXC) insertDiskDevice(name string, m shared.Device) error { + // Check that the container is running + if !c.IsRunning() { + return fmt.Errorf("Can't insert device into stopped container") + } + + // Create the device on the host + devPath, err := c.createDiskDevice(name, m) + if err != nil { + return fmt.Errorf("Failed to setup device: %s", err) + } + + // Bind-mount it into the container + tgtPath := strings.TrimSuffix(m["path"], "/") + err = c.insertMount(devPath, tgtPath, "none", syscall.MS_BIND) + if err != nil { + return fmt.Errorf("Failed to add mount for device: %s", err) + } + + return nil +} + +func (c *containerLXC) removeDiskDevice(name string, m shared.Device) error { + // Check that the container is running + pid := c.InitPID() + if pid == -1 { + return fmt.Errorf("Can't remove device from stopped container") + } + + // Figure out the paths + tgtPath := strings.TrimPrefix(m["path"], "/") + devName := fmt.Sprintf("disk.%s", strings.Replace(tgtPath, "/", "-", -1)) + devPath := filepath.Join(c.DevicesPath(), devName) + + // Remove the bind-mount from the container + ctnPath := fmt.Sprintf("/proc/%d/root/%s", pid, tgtPath) + + if shared.PathExists(ctnPath) { + err := c.removeMount(m["path"]) + if err != nil { + return fmt.Errorf("Error unmounting the device: %s", err) + } + } + + // Unmount the host side + err := syscall.Unmount(devPath, syscall.MNT_DETACH) + if err != nil { + return err + } + + // Remove the host side + err = os.Remove(devPath) + if err != nil { + return err + } + + return nil +} + +func (c *containerLXC) removeDiskDevices() error { + // Check that we indeed have devices to remove + if !shared.PathExists(c.DevicesPath()) { + return nil + } + + // Load the directory listing + dents, err := ioutil.ReadDir(c.DevicesPath()) + if err != nil { + return err + } + + // Go through all the unix devices + for _, f := range dents { + // Skip non-Unix devices + if !strings.HasPrefix(f.Name(), "disk.") { + continue + } + + // Always try to unmount the host side + _ = syscall.Unmount(filepath.Join(c.DevicesPath(), f.Name()), syscall.MNT_DETACH) + + // Remove the entry + err := os.Remove(filepath.Join(c.DevicesPath(), f.Name())) + if err != nil { + return err + } + } + + return nil +} + +// Block I/O limits +func (c *containerLXC) getDiskLimits() (map[string]deviceBlockLimit, error) { + result := map[string]deviceBlockLimit{} + + // Build a list of all valid block devices + validBlocks := []string{} + + dents, err := ioutil.ReadDir("/sys/class/block/") + if err != nil { + return nil, err + } + + for _, f := range dents { + fPath := filepath.Join("/sys/class/block/", f.Name()) + if shared.PathExists(fmt.Sprintf("%s/partition", fPath)) { + continue + } + + if !shared.PathExists(fmt.Sprintf("%s/dev", fPath)) { + continue + } + + block, err := ioutil.ReadFile(fmt.Sprintf("%s/dev", fPath)) + if err != nil { + return nil, err + } + + validBlocks = append(validBlocks, strings.TrimSuffix(string(block), "\n")) + } + + // Process all the limits + blockLimits := map[string][]deviceBlockLimit{} + for _, m := range c.expandedDevices { + if m["type"] != "disk" { + continue + } + + // Apply max limit + if m["limits.max"] != "" { + m["limits.read"] = m["limits.max"] + m["limits.write"] = m["limits.max"] + } + + // Parse the user input + readBps, readIops, writeBps, writeIops, err := deviceParseDiskLimit(m["limits.read"], m["limits.write"]) + if err != nil { + return nil, err + } + + // Set the source path + source := m["source"] + if source == "" { + source = c.RootfsPath() + } + + // Get the backing block devices (major:minor) + blocks, err := deviceGetParentBlocks(source) + if err != nil { + if readBps == 0 && readIops == 0 && writeBps == 0 && writeIops == 0 { + // If the device doesn't exist, there is no limit to clear so ignore the failure + continue + } else { + return nil, err + } + } + + device := deviceBlockLimit{readBps: readBps, readIops: readIops, writeBps: writeBps, writeIops: writeIops} + for _, block := range blocks { + blockStr := "" + + if shared.StringInSlice(block, validBlocks) { + // Straightforward entry (full block device) + blockStr = block + } else { + // Attempt to deal with a partition (guess its parent) + fields := strings.SplitN(block, ":", 2) + fields[1] = "0" + if shared.StringInSlice(fmt.Sprintf("%s:%s", fields[0], fields[1]), validBlocks) { + blockStr = fmt.Sprintf("%s:%s", fields[0], fields[1]) + } + } + + if blockStr == "" { + return nil, fmt.Errorf("Block device doesn't support quotas: %s", block) + } + + if blockLimits[blockStr] == nil { + blockLimits[blockStr] = []deviceBlockLimit{} + } + blockLimits[blockStr] = append(blockLimits[blockStr], device) + } + } + + // Average duplicate limits + for block, limits := range blockLimits { + var readBpsCount, readBpsTotal, readIopsCount, readIopsTotal, writeBpsCount, writeBpsTotal, writeIopsCount, writeIopsTotal int64 + + for _, limit := range limits { + if limit.readBps > 0 { + readBpsCount += 1 + readBpsTotal += limit.readBps + } + + if limit.readIops > 0 { + readIopsCount += 1 + readIopsTotal += limit.readIops + } + + if limit.writeBps > 0 { + writeBpsCount += 1 + writeBpsTotal += limit.writeBps + } + + if limit.writeIops > 0 { + writeIopsCount += 1 + writeIopsTotal += limit.writeIops + } + } + + device := deviceBlockLimit{} + + if readBpsCount > 0 { + device.readBps = readBpsTotal / readBpsCount + } + + if readIopsCount > 0 { + device.readIops = readIopsTotal / readIopsCount + } + + if writeBpsCount > 0 { + device.writeBps = writeBpsTotal / writeBpsCount + } + + if writeIopsCount > 0 { + device.writeIops = writeIopsTotal / writeIopsCount + } + + result[block] = device + } + + return result, nil +} + +// Network I/O limits +func (c *containerLXC) setNetworkPriority() error { + // Check that the container is running + if !c.IsRunning() { + return fmt.Errorf("Can't set network priority on stopped container") + } + + // Don't bother if the cgroup controller doesn't exist + if !cgNetPrioController { + return nil + } + + // Extract the current priority + networkPriority := c.expandedConfig["limits.network.priority"] + if networkPriority == "" { + networkPriority = "0" + } + + networkInt, err := strconv.Atoi(networkPriority) + if err != nil { + return err + } + + // Get all the interfaces + netifs, err := net.Interfaces() + if err != nil { + return err + } + + // Check that we at least succeeded to set an entry + success := false + var last_error error + for _, netif := range netifs { + err = c.CGroupSet("net_prio.ifpriomap", fmt.Sprintf("%s %d", netif.Name, networkInt)) + if err == nil { + success = true + } else { + last_error = err + } + } + + if !success { + return fmt.Errorf("Failed to set network device priority: %s", last_error) + } + + return nil +} + +func (c *containerLXC) getHostInterface(name string) string { + if c.IsRunning() { + for i := 0; i < len(c.c.ConfigItem("lxc.network")); i++ { + nicName := c.c.RunningConfigItem(fmt.Sprintf("lxc.network.%d.name", i))[0] + if nicName != name { + continue + } + + veth := c.c.RunningConfigItem(fmt.Sprintf("lxc.network.%d.veth.pair", i))[0] + if veth != "" { + return veth + } + } + } + + for k, dev := range c.expandedDevices { + if dev["type"] != "nic" { + continue + } + + m, err := c.fillNetworkDevice(k, dev) + if err != nil { + m = dev + } + + if m["name"] != name { + continue + } + + return m["host_name"] + } + + return "" +} + +func (c *containerLXC) setNetworkLimits(name string, m shared.Device) error { + // We can only do limits on some network type + if m["nictype"] != "bridged" && m["nictype"] != "p2p" { + return fmt.Errorf("Network limits are only supported on bridged and p2p interfaces") + } + + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return err + } + + // Check that the container is running + if !c.IsRunning() { + return fmt.Errorf("Can't set network limits on stopped container") + } + + // Fill in some fields from volatile + m, err = c.fillNetworkDevice(name, m) + if err != nil { + return nil + } + + // Look for the host side interface name + veth := c.getHostInterface(m["name"]) + + if veth == "" { + return fmt.Errorf("LXC doesn't now about this device and the host_name property isn't set, can't find host side veth name") + } + + // Apply max limit + if m["limits.max"] != "" { + m["limits.ingress"] = m["limits.max"] + m["limits.egress"] = m["limits.max"] + } + + // Parse the values + var ingressInt int64 + if m["limits.ingress"] != "" { + ingressInt, err = shared.ParseBitSizeString(m["limits.ingress"]) + if err != nil { + return err + } + } + + var egressInt int64 + if m["limits.egress"] != "" { + egressInt, err = shared.ParseBitSizeString(m["limits.egress"]) + if err != nil { + return err + } + } + + // Clean any existing entry + _ = exec.Command("tc", "qdisc", "del", "dev", veth, "root").Run() + _ = exec.Command("tc", "qdisc", "del", "dev", veth, "ingress").Run() + + // Apply new limits + if m["limits.ingress"] != "" { + out, err := exec.Command("tc", "qdisc", "add", "dev", veth, "root", "handle", "1:0", "htb", "default", "10").CombinedOutput() + if err != nil { + return fmt.Errorf("Failed to create root tc qdisc: %s", out) + } + + out, err = exec.Command("tc", "class", "add", "dev", veth, "parent", "1:0", "classid", "1:10", "htb", "rate", fmt.Sprintf("%dbit", ingressInt)).CombinedOutput() + if err != nil { + return fmt.Errorf("Failed to create limit tc class: %s", out) + } + + out, err = exec.Command("tc", "filter", "add", "dev", veth, "parent", "1:0", "protocol", "all", "u32", "match", "u32", "0", "0", "flowid", "1:1").CombinedOutput() + if err != nil { + return fmt.Errorf("Failed to create tc filter: %s", out) + } + } + + if m["limits.egress"] != "" { + out, err := exec.Command("tc", "qdisc", "add", "dev", veth, "handle", "ffff:0", "ingress").CombinedOutput() + if err != nil { + return fmt.Errorf("Failed to create ingress tc qdisc: %s", out) + } + + out, err = exec.Command("tc", "filter", "add", "dev", veth, "parent", "ffff:0", "protocol", "all", "u32", "match", "u32", "0", "0", "police", "rate", fmt.Sprintf("%dbit", egressInt), "burst", "1024k", "mtu", "64kb", "drop", "flowid", ":1").CombinedOutput() + if err != nil { + return fmt.Errorf("Failed to create ingress tc qdisc: %s", out) + } + } + + return nil +} + +// Various state query functions +func (c *containerLXC) IsStateful() bool { + return c.stateful +} + +func (c *containerLXC) IsEphemeral() bool { + return c.ephemeral +} + +func (c *containerLXC) IsFrozen() bool { + return c.State() == "FROZEN" +} + +func (c *containerLXC) IsNesting() bool { + switch strings.ToLower(c.expandedConfig["security.nesting"]) { + case "1": + return true + case "true": + return true + } + return false +} + +func (c *containerLXC) IsPrivileged() bool { + switch strings.ToLower(c.expandedConfig["security.privileged"]) { + case "1": + return true + case "true": + return true + } + return false +} + +func (c *containerLXC) IsRunning() bool { + state := c.State() + return state != "BROKEN" && state != "STOPPED" +} + +func (c *containerLXC) IsSnapshot() bool { + return c.cType == cTypeSnapshot +} + +// Various property query functions +func (c *containerLXC) Architecture() int { + return c.architecture +} + +func (c *containerLXC) CreationDate() time.Time { + return c.creationDate +} +func (c *containerLXC) ExpandedConfig() map[string]string { + return c.expandedConfig +} + +func (c *containerLXC) ExpandedDevices() shared.Devices { + return c.expandedDevices +} + +func (c *containerLXC) Id() int { + return c.id +} + +func (c *containerLXC) IdmapSet() *shared.IdmapSet { + return c.idmapset +} + +func (c *containerLXC) InitPID() int { + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return -1 + } + + return c.c.InitPid() +} + +func (c *containerLXC) LocalConfig() map[string]string { + return c.localConfig +} + +func (c *containerLXC) LocalDevices() shared.Devices { + return c.localDevices +} + +func (c *containerLXC) LastIdmapSet() (*shared.IdmapSet, error) { + lastJsonIdmap := c.LocalConfig()["volatile.last_state.idmap"] + + if lastJsonIdmap == "" { + return c.IdmapSet(), nil + } + + lastIdmap := new(shared.IdmapSet) + err := json.Unmarshal([]byte(lastJsonIdmap), &lastIdmap.Idmap) + if err != nil { + return nil, err + } + + if len(lastIdmap.Idmap) == 0 { + return nil, nil + } + + return lastIdmap, nil +} + +func (c *containerLXC) LXContainerGet() *lxc.Container { + // FIXME: This function should go away + + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return nil + } + + return c.c +} + +func (c *containerLXC) Daemon() *Daemon { + // FIXME: This function should go away + return c.daemon +} + +func (c *containerLXC) Name() string { + return c.name +} + +func (c *containerLXC) Profiles() []string { + return c.profiles +} + +func (c *containerLXC) State() string { + // Load the go-lxc struct + err := c.initLXC() + if err != nil { + return "BROKEN" + } + + return c.c.State().String() +} + +// Various container paths +func (c *containerLXC) Path() string { + return containerPath(c.Name(), c.IsSnapshot()) +} + +func (c *containerLXC) DevicesPath() string { + return shared.VarPath("devices", c.Name()) +} + +func (c *containerLXC) LogPath() string { + return shared.LogPath(c.Name()) +} + +func (c *containerLXC) LogFilePath() string { + return filepath.Join(c.LogPath(), "lxc.log") +} + +func (c *containerLXC) RootfsPath() string { + return filepath.Join(c.Path(), "rootfs") +} + +func (c *containerLXC) TemplatesPath() string { + return filepath.Join(c.Path(), "templates") +} + +func (c *containerLXC) StatePath() string { + return filepath.Join(c.RootfsPath(), "state") +} === added file 'src/github.com/lxc/lxd/lxd/container_post.go' --- src/github.com/lxc/lxd/lxd/container_post.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/container_post.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,63 @@ +package main + +import ( + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/gorilla/mux" +) + +type containerPostBody struct { + Migration bool `json:"migration"` + Name string `json:"name"` +} + +func containerPost(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + c, err := containerLoadByName(d, name) + if err != nil { + return SmartError(err) + } + + buf, err := ioutil.ReadAll(r.Body) + if err != nil { + return InternalError(err) + } + + body := containerPostBody{} + if err := json.Unmarshal(buf, &body); err != nil { + return BadRequest(err) + } + + if body.Migration { + ws, err := NewMigrationSource(c) + if err != nil { + return InternalError(err) + } + + resources := map[string][]string{} + resources["containers"] = []string{name} + + op, err := operationCreate(operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) + } + + run := func(*operation) error { + return c.Rename(body.Name) + } + + resources := map[string][]string{} + resources["containers"] = []string{name} + + op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) +} === added file 'src/github.com/lxc/lxd/lxd/container_put.go' --- src/github.com/lxc/lxd/lxd/container_put.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/container_put.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,120 @@ +package main + +import ( + "database/sql" + "encoding/json" + "fmt" + "net/http" + + "github.com/gorilla/mux" + "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" +) + +type containerPutReq struct { + Architecture string `json:"architecture"` + Config map[string]string `json:"config"` + Devices shared.Devices `json:"devices"` + Ephemeral bool `json:"ephemeral"` + Profiles []string `json:"profiles"` + Restore string `json:"restore"` +} + +/* + * Update configuration, or, if 'restore:snapshot-name' is present, restore + * the named snapshot + */ +func containerPut(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + c, err := containerLoadByName(d, name) + if err != nil { + return NotFound + } + + configRaw := containerPutReq{} + if err := json.NewDecoder(r.Body).Decode(&configRaw); err != nil { + return BadRequest(err) + } + + architecture, err := shared.ArchitectureId(configRaw.Architecture) + if err != nil { + architecture = 0 + } + + var do = func(*operation) error { return nil } + + if configRaw.Restore == "" { + // Update container configuration + do = func(op *operation) error { + args := containerArgs{ + Architecture: architecture, + Config: configRaw.Config, + Devices: configRaw.Devices, + Ephemeral: configRaw.Ephemeral, + Profiles: configRaw.Profiles} + + // FIXME: should set to true when not migrating + err = c.Update(args, false) + if err != nil { + return err + } + + return nil + } + } else { + // Snapshot Restore + do = func(op *operation) error { + return containerSnapRestore(d, name, configRaw.Restore) + } + } + + resources := map[string][]string{} + resources["containers"] = []string{name} + + op, err := operationCreate(operationClassTask, resources, nil, do, nil, nil) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) +} + +func containerSnapRestore(d *Daemon, name string, snap string) error { + // normalize snapshot name + if !shared.IsSnapshot(snap) { + snap = name + shared.SnapshotDelimiter + snap + } + + shared.Log.Info( + "RESTORE => Restoring snapshot", + log.Ctx{ + "snapshot": snap, + "container": name}) + + c, err := containerLoadByName(d, name) + if err != nil { + shared.Log.Error( + "RESTORE => loadcontainerLXD() failed", + log.Ctx{ + "container": name, + "err": err}) + return err + } + + source, err := containerLoadByName(d, snap) + if err != nil { + switch err { + case sql.ErrNoRows: + return fmt.Errorf("snapshot %s does not exist", snap) + default: + return err + } + } + + if err := c.Restore(source); err != nil { + return err + } + + return nil +} === added file 'src/github.com/lxc/lxd/lxd/container_snapshot.go' --- src/github.com/lxc/lxd/lxd/container_snapshot.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/container_snapshot.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,250 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/gorilla/mux" + + "github.com/lxc/lxd/shared" +) + +type containerSnapshotPostReq struct { + Name string `json:"name"` + Stateful bool `json:"stateful"` +} + +func containerSnapshotsGet(d *Daemon, r *http.Request) Response { + recursionStr := r.FormValue("recursion") + recursion, err := strconv.Atoi(recursionStr) + if err != nil { + recursion = 0 + } + + cname := mux.Vars(r)["name"] + c, err := containerLoadByName(d, cname) + if err != nil { + return SmartError(err) + } + + snaps, err := c.Snapshots() + if err != nil { + return SmartError(err) + } + + resultString := []string{} + resultMap := []shared.Jmap{} + + for _, snap := range snaps { + snapName := strings.SplitN(snap.Name(), shared.SnapshotDelimiter, 2)[1] + if recursion == 0 { + url := fmt.Sprintf("/%s/containers/%s/snapshots/%s", shared.APIVersion, cname, snapName) + resultString = append(resultString, url) + } else { + body := shared.Jmap{ + "name": snapName, + "created_at": snap.CreationDate(), + "stateful": snap.IsStateful()} + resultMap = append(resultMap, body) + } + } + + if recursion == 0 { + return SyncResponse(true, resultString) + } + + return SyncResponse(true, resultMap) +} + +/* + * Note, the code below doesn't deal with snapshots of snapshots. + * To do that, we'll need to weed out based on # slashes in names + */ +func nextSnapshot(d *Daemon, name string) int { + base := name + shared.SnapshotDelimiter + "snap" + length := len(base) + q := fmt.Sprintf("SELECT MAX(name) FROM containers WHERE type=? AND SUBSTR(name,1,?)=?") + var numstr string + inargs := []interface{}{cTypeSnapshot, length, base} + outfmt := []interface{}{numstr} + results, err := dbQueryScan(d.db, q, inargs, outfmt) + if err != nil { + return 0 + } + max := 0 + + for _, r := range results { + numstr = r[0].(string) + if len(numstr) <= length { + continue + } + substr := numstr[length:] + var num int + count, err := fmt.Sscanf(substr, "%d", &num) + if err != nil || count != 1 { + continue + } + if num >= max { + max = num + 1 + } + } + + return max +} + +func containerSnapshotsPost(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + + /* + * snapshot is a three step operation: + * 1. choose a new name + * 2. copy the database info over + * 3. copy over the rootfs + */ + c, err := containerLoadByName(d, name) + if err != nil { + return SmartError(err) + } + + req := containerSnapshotPostReq{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return BadRequest(err) + } + + if req.Name == "" { + // come up with a name + i := nextSnapshot(d, name) + req.Name = fmt.Sprintf("snap%d", i) + } + + fullName := name + + shared.SnapshotDelimiter + + req.Name + + snapshot := func(op *operation) error { + args := containerArgs{ + Name: fullName, + Ctype: cTypeSnapshot, + Config: c.LocalConfig(), + Profiles: c.Profiles(), + Ephemeral: c.IsEphemeral(), + BaseImage: c.ExpandedConfig()["volatile.base_image"], + Architecture: c.Architecture(), + Devices: c.LocalDevices(), + Stateful: req.Stateful, + } + + _, err := containerCreateAsSnapshot(d, args, c) + if err != nil { + return err + } + + return nil + } + + resources := map[string][]string{} + resources["containers"] = []string{name} + + op, err := operationCreate(operationClassTask, resources, nil, snapshot, nil, nil) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) +} + +func snapshotHandler(d *Daemon, r *http.Request) Response { + containerName := mux.Vars(r)["name"] + snapshotName := mux.Vars(r)["snapshotName"] + + sc, err := containerLoadByName( + d, + containerName+ + shared.SnapshotDelimiter+ + snapshotName) + if err != nil { + return SmartError(err) + } + + switch r.Method { + case "GET": + return snapshotGet(sc, snapshotName) + case "POST": + return snapshotPost(r, sc, containerName) + case "DELETE": + return snapshotDelete(sc, snapshotName) + default: + return NotFound + } +} + +func snapshotGet(sc container, name string) Response { + body := shared.Jmap{ + "name": name, + "created_at": sc.CreationDate(), + "stateful": shared.PathExists(sc.StatePath())} + return SyncResponse(true, body) +} + +func snapshotPost(r *http.Request, sc container, containerName string) Response { + raw := shared.Jmap{} + if err := json.NewDecoder(r.Body).Decode(&raw); err != nil { + return BadRequest(err) + } + + migration, err := raw.GetBool("migration") + if err == nil && migration { + ws, err := NewMigrationSource(sc) + if err != nil { + return SmartError(err) + } + + resources := map[string][]string{} + resources["containers"] = []string{containerName} + + op, err := operationCreate(operationClassWebsocket, resources, ws.Metadata(), ws.Do, nil, ws.Connect) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) + } + + newName, err := raw.GetString("name") + if err != nil { + return BadRequest(err) + } + + rename := func(op *operation) error { + return sc.Rename(containerName + shared.SnapshotDelimiter + newName) + } + + resources := map[string][]string{} + resources["containers"] = []string{containerName} + + op, err := operationCreate(operationClassTask, resources, nil, rename, nil, nil) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) +} + +func snapshotDelete(sc container, name string) Response { + remove := func(op *operation) error { + return sc.Delete() + } + + resources := map[string][]string{} + resources["containers"] = []string{sc.Name()} + + op, err := operationCreate(operationClassTask, resources, nil, remove, nil, nil) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) +} === added file 'src/github.com/lxc/lxd/lxd/container_state.go' --- src/github.com/lxc/lxd/lxd/container_state.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/container_state.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,141 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/gorilla/mux" + + "github.com/lxc/lxd/shared" +) + +type containerStatePutReq struct { + Action string `json:"action"` + Timeout int `json:"timeout"` + Force bool `json:"force"` + Stateful bool `json:"stateful"` +} + +func containerState(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + c, err := containerLoadByName(d, name) + if err != nil { + return SmartError(err) + } + + state, err := c.RenderState() + if err != nil { + return InternalError(err) + } + + return SyncResponse(true, state) +} + +func containerStatePut(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + + raw := containerStatePutReq{} + + // We default to -1 (i.e. no timeout) here instead of 0 (instant + // timeout). + raw.Timeout = -1 + + if err := json.NewDecoder(r.Body).Decode(&raw); err != nil { + return BadRequest(err) + } + + c, err := containerLoadByName(d, name) + if err != nil { + return SmartError(err) + } + + var do func(*operation) error + switch shared.ContainerAction(raw.Action) { + case shared.Start: + do = func(op *operation) error { + if err = c.Start(raw.Stateful); err != nil { + return err + } + return nil + } + case shared.Stop: + if raw.Stateful { + do = func(op *operation) error { + err := c.Stop(raw.Stateful) + if err != nil { + return err + } + + return nil + } + } else if raw.Timeout == 0 || raw.Force { + do = func(op *operation) error { + err = c.Stop(false) + if err != nil { + return err + } + + if c.IsEphemeral() { + c.Delete() + } + + return nil + } + } else { + do = func(op *operation) error { + err = c.Shutdown(time.Duration(raw.Timeout) * time.Second) + if err != nil { + return err + } + + if c.IsEphemeral() { + c.Delete() + } + + return nil + } + } + case shared.Restart: + do = func(op *operation) error { + if raw.Timeout == 0 || raw.Force { + err = c.Stop(false) + if err != nil { + return err + } + } else { + err = c.Shutdown(time.Duration(raw.Timeout) * time.Second) + if err != nil { + return err + } + } + err = c.Start(false) + if err != nil { + return err + } + + return nil + } + case shared.Freeze: + do = func(op *operation) error { + return c.Freeze() + } + case shared.Unfreeze: + do = func(op *operation) error { + return c.Unfreeze() + } + default: + return BadRequest(fmt.Errorf("unknown action %s", raw.Action)) + } + + resources := map[string][]string{} + resources["containers"] = []string{name} + + op, err := operationCreate(operationClassTask, resources, nil, do, nil, nil) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) +} === added file 'src/github.com/lxc/lxd/lxd/container_test.go' --- src/github.com/lxc/lxd/lxd/container_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/container_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,220 @@ +package main + +import ( + "github.com/lxc/lxd/shared" +) + +func (suite *lxdTestSuite) TestContainer_ProfilesDefault() { + args := containerArgs{ + Ctype: cTypeRegular, + Ephemeral: false, + Name: "testFoo", + } + + c, err := containerCreateInternal(suite.d, args) + suite.Req.Nil(err) + defer c.Delete() + + profiles := c.Profiles() + suite.Len( + profiles, + 1, + "No default profile created on containerCreateInternal.") + + suite.Equal( + "default", + profiles[0], + "First profile should be the default profile.") +} + +func (suite *lxdTestSuite) TestContainer_ProfilesMulti() { + // Create an unprivileged profile + _, err := dbProfileCreate( + suite.d.db, + "unprivileged", + "unprivileged", + map[string]string{"security.privileged": "true"}, + shared.Devices{}) + + suite.Req.Nil(err, "Failed to create the unprivileged profile.") + defer func() { + dbProfileDelete(suite.d.db, "unprivileged") + }() + + args := containerArgs{ + Ctype: cTypeRegular, + Ephemeral: false, + Profiles: []string{"default", "unprivileged"}, + Name: "testFoo", + } + + c, err := containerCreateInternal(suite.d, args) + suite.Req.Nil(err) + defer c.Delete() + + profiles := c.Profiles() + suite.Len( + profiles, + 2, + "Didn't get both profiles in containerCreateInternal.") + + suite.True( + c.IsPrivileged(), + "The container is not privileged (didn't apply the unprivileged profile?).") +} + +func (suite *lxdTestSuite) TestContainer_ProfilesOverwriteDefaultNic() { + args := containerArgs{ + Ctype: cTypeRegular, + Ephemeral: false, + Config: map[string]string{"security.privileged": "true"}, + Devices: shared.Devices{ + "eth0": shared.Device{ + "type": "nic", + "nictype": "bridged", + "parent": "unknownbr0"}}, + Name: "testFoo", + } + + c, err := containerCreateInternal(suite.d, args) + suite.Req.Nil(err) + + suite.True(c.IsPrivileged(), "This container should be privileged.") + + state, err := c.Render() + suite.Req.Nil(err) + defer c.Delete() + + suite.Equal( + "unknownbr0", + state.Devices["eth0"]["parent"], + "Container config doesn't overwrite profile config.") +} + +func (suite *lxdTestSuite) TestContainer_LoadFromDB() { + args := containerArgs{ + Ctype: cTypeRegular, + Ephemeral: false, + Config: map[string]string{"security.privileged": "true"}, + Devices: shared.Devices{ + "eth0": shared.Device{ + "type": "nic", + "nictype": "bridged", + "parent": "unknownbr0"}}, + Name: "testFoo", + } + + // Create the container + c, err := containerCreateInternal(suite.d, args) + suite.Req.Nil(err) + defer c.Delete() + + // Load the container and trigger initLXC() + c2, err := containerLoadByName(suite.d, "testFoo") + c2.IsRunning() + suite.Req.Nil(err) + + suite.Exactly( + c, + c2, + "The loaded container isn't excactly the same as the created one.") +} + +func (suite *lxdTestSuite) TestContainer_Path_Regular() { + // Regular + args := containerArgs{ + Ctype: cTypeRegular, + Ephemeral: false, + Name: "testFoo", + } + + c, err := containerCreateInternal(suite.d, args) + suite.Req.Nil(err) + defer c.Delete() + + suite.Req.False(c.IsSnapshot(), "Shouldn't be a snapshot.") + suite.Req.Equal(shared.VarPath("containers", "testFoo"), c.Path()) + suite.Req.Equal(shared.VarPath("containers", "testFoo2"), containerPath("testFoo2", false)) +} + +func (suite *lxdTestSuite) TestContainer_Path_Snapshot() { + // Snapshot + args := containerArgs{ + Ctype: cTypeSnapshot, + Ephemeral: false, + Name: "test/snap0", + } + + c, err := containerCreateInternal(suite.d, args) + suite.Req.Nil(err) + defer c.Delete() + + suite.Req.True(c.IsSnapshot(), "Should be a snapshot.") + suite.Req.Equal( + shared.VarPath("snapshots", "test", "snap0"), + c.Path()) + suite.Req.Equal( + shared.VarPath("snapshots", "test", "snap1"), + containerPath("test/snap1", true)) +} + +func (suite *lxdTestSuite) TestContainer_LogPath() { + args := containerArgs{ + Ctype: cTypeRegular, + Ephemeral: false, + Name: "testFoo", + } + + c, err := containerCreateInternal(suite.d, args) + suite.Req.Nil(err) + defer c.Delete() + + suite.Req.Equal(shared.VarPath("logs", "testFoo"), c.LogPath()) +} + +func (suite *lxdTestSuite) TestContainer_IsPrivileged_Privileged() { + args := containerArgs{ + Ctype: cTypeRegular, + Ephemeral: false, + Config: map[string]string{"security.privileged": "true"}, + Name: "testFoo", + } + + c, err := containerCreateInternal(suite.d, args) + suite.Req.Nil(err) + defer c.Delete() + + suite.Req.True(c.IsPrivileged(), "This container should be privileged.") + suite.Req.Nil(c.Delete(), "Failed to delete the container.") +} + +func (suite *lxdTestSuite) TestContainer_IsPrivileged_Unprivileged() { + args := containerArgs{ + Ctype: cTypeRegular, + Ephemeral: false, + Config: map[string]string{"security.privileged": "false"}, + Name: "testFoo", + } + + c, err := containerCreateInternal(suite.d, args) + suite.Req.Nil(err) + defer c.Delete() + + suite.Req.False(c.IsPrivileged(), "This container should be unprivileged.") + suite.Req.Nil(c.Delete(), "Failed to delete the container.") +} + +func (suite *lxdTestSuite) TestContainer_Rename() { + args := containerArgs{ + Ctype: cTypeRegular, + Ephemeral: false, + Name: "testFoo", + } + + c, err := containerCreateInternal(suite.d, args) + suite.Req.Nil(err) + defer c.Delete() + + suite.Req.Nil(c.Rename("testFoo2"), "Failed to rename the container.") + suite.Req.Equal(shared.VarPath("containers", "testFoo2"), c.Path()) +} === added file 'src/github.com/lxc/lxd/lxd/containers.go' --- src/github.com/lxc/lxd/lxd/containers.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/containers.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,248 @@ +package main + +import ( + "fmt" + "os" + "sort" + "strconv" + "sync" + "syscall" + "time" + + "gopkg.in/lxc/go-lxc.v2" + + "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" +) + +var containersCmd = Command{ + name: "containers", + get: containersGet, + post: containersPost, +} + +var containerCmd = Command{ + name: "containers/{name}", + get: containerGet, + put: containerPut, + delete: containerDelete, + post: containerPost, +} + +var containerStateCmd = Command{ + name: "containers/{name}/state", + get: containerState, + put: containerStatePut, +} + +var containerFileCmd = Command{ + name: "containers/{name}/files", + get: containerFileHandler, + post: containerFileHandler, +} + +var containerSnapshotsCmd = Command{ + name: "containers/{name}/snapshots", + get: containerSnapshotsGet, + post: containerSnapshotsPost, +} + +var containerSnapshotCmd = Command{ + name: "containers/{name}/snapshots/{snapshotName}", + get: snapshotHandler, + post: snapshotHandler, + delete: snapshotHandler, +} + +var containerExecCmd = Command{ + name: "containers/{name}/exec", + post: containerExecPost, +} + +type containerAutostartList []container + +func (slice containerAutostartList) Len() int { + return len(slice) +} + +func (slice containerAutostartList) Less(i, j int) bool { + iOrder := slice[i].ExpandedConfig()["boot.autostart.priority"] + jOrder := slice[j].ExpandedConfig()["boot.autostart.priority"] + + if iOrder != jOrder { + iOrderInt, _ := strconv.Atoi(iOrder) + jOrderInt, _ := strconv.Atoi(jOrder) + return iOrderInt > jOrderInt + } + + return slice[i].Name() < slice[j].Name() +} + +func (slice containerAutostartList) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func containersRestart(d *Daemon) error { + result, err := dbContainersList(d.db, cTypeRegular) + if err != nil { + return err + } + + containers := []container{} + + for _, name := range result { + c, err := containerLoadByName(d, name) + if err != nil { + return err + } + + containers = append(containers, c) + } + + sort.Sort(containerAutostartList(containers)) + + for _, c := range containers { + config := c.ExpandedConfig() + lastState := config["volatile.last_state.power"] + + autoStart := config["boot.autostart"] + autoStartDelay := config["boot.autostart.delay"] + + if lastState == "RUNNING" || autoStart == "true" { + if c.IsRunning() { + continue + } + + c.Start(false) + + autoStartDelayInt, err := strconv.Atoi(autoStartDelay) + if err == nil { + time.Sleep(time.Duration(autoStartDelayInt) * time.Second) + } + } + } + + _, err = dbExec(d.db, "DELETE FROM containers_config WHERE key='volatile.last_state.power'") + if err != nil { + return err + } + + return nil +} + +func containersShutdown(d *Daemon) error { + results, err := dbContainersList(d.db, cTypeRegular) + if err != nil { + return err + } + + var wg sync.WaitGroup + + for _, r := range results { + c, err := containerLoadByName(d, r) + if err != nil { + return err + } + + err = c.ConfigKeySet("volatile.last_state.power", c.State()) + + if err != nil { + return err + } + + if c.IsRunning() { + wg.Add(1) + go func() { + c.Shutdown(time.Second * 30) + c.Stop(false) + wg.Done() + }() + } + wg.Wait() + } + + return nil +} + +func containerDeleteSnapshots(d *Daemon, cname string) error { + shared.Log.Debug("containerDeleteSnapshots", + log.Ctx{"container": cname}) + + results, err := dbContainerGetSnapshots(d.db, cname) + if err != nil { + return err + } + + for _, sname := range results { + sc, err := containerLoadByName(d, sname) + if err != nil { + shared.Log.Error( + "containerDeleteSnapshots: Failed to load the snapshotcontainer", + log.Ctx{"container": cname, "snapshot": sname}) + + continue + } + + if err := sc.Delete(); err != nil { + shared.Log.Error( + "containerDeleteSnapshots: Failed to delete a snapshotcontainer", + log.Ctx{"container": cname, "snapshot": sname, "err": err}) + } + } + + return nil +} + +/* + * This is called by lxd when called as "lxd forkstart " + * 'forkstart' is used instead of just 'start' in the hopes that people + * do not accidentally type 'lxd start' instead of 'lxc start' + * + * We expect to read the lxcconfig over fd 3. + */ +func startContainer(args []string) error { + if len(args) != 4 { + return fmt.Errorf("Bad arguments: %q", args) + } + + name := args[1] + lxcpath := args[2] + configPath := args[3] + + c, err := lxc.NewContainer(name, lxcpath) + if err != nil { + return fmt.Errorf("Error initializing container for start: %q", err) + } + + err = c.LoadConfigFile(configPath) + if err != nil { + return fmt.Errorf("Error opening startup config file: %q", err) + } + + /* due to https://github.com/golang/go/issues/13155 and the + * CollectOutput call we make for the forkstart process, we need to + * close our stdin/stdout/stderr here. Collecting some of the logs is + * better than collecting no logs, though. + */ + os.Stdin.Close() + os.Stderr.Close() + os.Stdout.Close() + + // Redirect stdout and stderr to a log file + logPath := shared.LogPath(name, "forkstart.log") + if shared.PathExists(logPath) { + os.Remove(logPath) + } + + logFile, err := os.OpenFile(logPath, os.O_WRONLY|os.O_CREATE|os.O_SYNC, 0644) + if err == nil { + syscall.Dup3(int(logFile.Fd()), 1, 0) + syscall.Dup3(int(logFile.Fd()), 2, 0) + } + + // Move the config so we can inspect it on failure + shared.FileMove(configPath, shared.LogPath(name, "lxc.conf")) + + return c.Start() +} === added file 'src/github.com/lxc/lxd/lxd/containers_get.go' --- src/github.com/lxc/lxd/lxd/containers_get.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/containers_get.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,75 @@ +package main + +import ( + "fmt" + "net/http" + "time" + + "github.com/lxc/lxd/shared" +) + +func containersGet(d *Daemon, r *http.Request) Response { + for i := 0; i < 100; i++ { + result, err := doContainersGet(d, d.isRecursionRequest(r)) + if err == nil { + return SyncResponse(true, result) + } + if !isDbLockedError(err) { + shared.Debugf("DBERR: containersGet: error %q", err) + return InternalError(err) + } + // 1 s may seem drastic, but we really don't want to thrash + // perhaps we should use a random amount + time.Sleep(100 * time.Millisecond) + } + + shared.Debugf("DBERR: containersGet, db is locked") + shared.PrintStack() + return InternalError(fmt.Errorf("DB is locked")) +} + +func doContainersGet(d *Daemon, recursion bool) (interface{}, error) { + result, err := dbContainersList(d.db, cTypeRegular) + if err != nil { + return nil, err + } + + resultString := []string{} + resultList := []*shared.ContainerInfo{} + if err != nil { + return []string{}, err + } + + for _, container := range result { + if !recursion { + url := fmt.Sprintf("/%s/containers/%s", shared.APIVersion, container) + resultString = append(resultString, url) + } else { + container, response := doContainerGet(d, container) + if response != nil { + continue + } + resultList = append(resultList, container) + } + } + + if !recursion { + return resultString, nil + } + + return resultList, nil +} + +func doContainerGet(d *Daemon, cname string) (*shared.ContainerInfo, Response) { + c, err := containerLoadByName(d, cname) + if err != nil { + return nil, SmartError(err) + } + + cts, err := c.Render() + if err != nil { + return nil, SmartError(err) + } + + return cts, nil +} === added file 'src/github.com/lxc/lxd/lxd/containers_post.go' --- src/github.com/lxc/lxd/lxd/containers_post.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/containers_post.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,378 @@ +package main + +import ( + "crypto/x509" + "encoding/json" + "encoding/pem" + "fmt" + "net/http" + "strings" + + "github.com/dustinkirkland/golang-petname" + "github.com/gorilla/websocket" + "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" +) + +type containerImageSource struct { + Type string `json:"type"` + Certificate string `json:"certificate"` + + /* for "image" type */ + Alias string `json:"alias"` + Fingerprint string `json:"fingerprint"` + Server string `json:"server"` + Secret string `json:"secret"` + Protocol string `json:"protocol"` + + /* + * for "migration" and "copy" types, as an optimization users can + * provide an image hash to extract before the filesystem is rsync'd, + * potentially cutting down filesystem transfer time. LXD will not go + * and fetch this image, it will simply use it if it exists in the + * image store. + */ + BaseImage string `json:"base-image"` + + /* for "migration" type */ + Mode string `json:"mode"` + Operation string `json:"operation"` + Websockets map[string]string `json:"secrets"` + + /* for "copy" type */ + Source string `json:"source"` +} + +type containerPostReq struct { + Architecture string `json:"architecture"` + Config map[string]string `json:"config"` + Devices shared.Devices `json:"devices"` + Ephemeral bool `json:"ephemeral"` + Name string `json:"name"` + Profiles []string `json:"profiles"` + Source containerImageSource `json:"source"` +} + +func createFromImage(d *Daemon, req *containerPostReq) Response { + var hash string + var err error + + if req.Source.Fingerprint != "" { + hash = req.Source.Fingerprint + } else if req.Source.Alias != "" { + if req.Source.Server != "" { + hash = req.Source.Alias + } else { + _, alias, err := dbImageAliasGet(d.db, req.Source.Alias, true) + if err != nil { + return InternalError(err) + } + + hash = alias.Target + } + } else if req.Source.Fingerprint != "" { + hash = req.Source.Fingerprint + } else { + return BadRequest(fmt.Errorf("must specify one of alias or fingerprint for init from image")) + } + + run := func(op *operation) error { + if req.Source.Server != "" { + updateCached, _ := d.ConfigValueGet("images.auto_update_cached") + hash, err = d.ImageDownload(op, req.Source.Server, req.Source.Protocol, req.Source.Certificate, req.Source.Secret, hash, true, updateCached != "false") + if err != nil { + return err + } + } + + _, imgInfo, err := dbImageGet(d.db, hash, false, false) + if err != nil { + return err + } + + hash = imgInfo.Fingerprint + + architecture, err := shared.ArchitectureId(imgInfo.Architecture) + if err != nil { + architecture = 0 + } + + args := containerArgs{ + Architecture: architecture, + BaseImage: hash, + Config: req.Config, + Ctype: cTypeRegular, + Devices: req.Devices, + Ephemeral: req.Ephemeral, + Name: req.Name, + Profiles: req.Profiles, + } + + _, err = containerCreateFromImage(d, args, hash) + return err + } + + resources := map[string][]string{} + resources["containers"] = []string{req.Name} + + op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) +} + +func createFromNone(d *Daemon, req *containerPostReq) Response { + architecture, err := shared.ArchitectureId(req.Architecture) + if err != nil { + architecture = 0 + } + + args := containerArgs{ + Architecture: architecture, + Config: req.Config, + Ctype: cTypeRegular, + Devices: req.Devices, + Ephemeral: req.Ephemeral, + Name: req.Name, + Profiles: req.Profiles, + } + + run := func(op *operation) error { + _, err := containerCreateAsEmpty(d, args) + return err + } + + resources := map[string][]string{} + resources["containers"] = []string{req.Name} + + op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) +} + +func createFromMigration(d *Daemon, req *containerPostReq) Response { + if req.Source.Mode != "pull" { + return NotImplemented + } + + architecture, err := shared.ArchitectureId(req.Architecture) + if err != nil { + architecture = 0 + } + + run := func(op *operation) error { + args := containerArgs{ + Architecture: architecture, + BaseImage: req.Source.BaseImage, + Config: req.Config, + Ctype: cTypeRegular, + Devices: req.Devices, + Ephemeral: req.Ephemeral, + Name: req.Name, + Profiles: req.Profiles, + } + + var c container + _, _, err := dbImageGet(d.db, req.Source.BaseImage, false, true) + + /* Only create a container from an image if we're going to + * rsync over the top of it. In the case of a better file + * transfer mechanism, let's just use that. + * + * TODO: we could invent some negotiation here, where if the + * source and sink both have the same image, we can clone from + * it, but we have to know before sending the snapshot that + * we're sending the whole thing or just a delta from the + * image, so one extra negotiation round trip is needed. An + * alternative is to move actual container object to a later + * point and just negotiate it over the migration control + * socket. Anyway, it'll happen later :) + */ + if err == nil && d.Storage.MigrationType() == MigrationFSType_RSYNC { + c, err = containerCreateFromImage(d, args, req.Source.BaseImage) + if err != nil { + return err + } + } else { + c, err = containerCreateAsEmpty(d, args) + if err != nil { + return err + } + } + + var cert *x509.Certificate + if req.Source.Certificate != "" { + certBlock, _ := pem.Decode([]byte(req.Source.Certificate)) + + cert, err = x509.ParseCertificate(certBlock.Bytes) + if err != nil { + return err + } + } + + config, err := shared.GetTLSConfig("", "", cert) + if err != nil { + c.Delete() + return err + } + + migrationArgs := MigrationSinkArgs{ + Url: req.Source.Operation, + Dialer: websocket.Dialer{ + TLSClientConfig: config, + NetDial: shared.RFC3493Dialer}, + Container: c, + Secrets: req.Source.Websockets, + } + + sink, err := NewMigrationSink(&migrationArgs) + if err != nil { + c.Delete() + return err + } + + // Start the storage for this container (LVM mount/umount) + c.StorageStart() + + // And finaly run the migration. + err = sink() + if err != nil { + c.StorageStop() + shared.Log.Error("Error during migration sink", "err", err) + c.Delete() + return fmt.Errorf("Error transferring container data: %s", err) + } + + defer c.StorageStop() + + err = c.TemplateApply("copy") + if err != nil { + return err + } + + return nil + } + + resources := map[string][]string{} + resources["containers"] = []string{req.Name} + + op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) +} + +func createFromCopy(d *Daemon, req *containerPostReq) Response { + if req.Source.Source == "" { + return BadRequest(fmt.Errorf("must specify a source container")) + } + + source, err := containerLoadByName(d, req.Source.Source) + if err != nil { + return SmartError(err) + } + + // Config override + sourceConfig := source.LocalConfig() + + if req.Config == nil { + req.Config = make(map[string]string) + } + + for key, value := range sourceConfig { + if len(key) > 8 && key[0:8] == "volatile" && key[9:] != "base_image" { + shared.Log.Debug("Skipping volatile key from copy source", + log.Ctx{"key": key}) + continue + } + + _, exists := req.Config[key] + if exists { + continue + } + + req.Config[key] = value + } + + // Profiles override + if req.Profiles == nil { + req.Profiles = source.Profiles() + } + + args := containerArgs{ + Architecture: source.Architecture(), + BaseImage: req.Source.BaseImage, + Config: req.Config, + Ctype: cTypeRegular, + Devices: source.LocalDevices(), + Ephemeral: req.Ephemeral, + Name: req.Name, + Profiles: req.Profiles, + } + + run := func(op *operation) error { + _, err := containerCreateAsCopy(d, args, source) + if err != nil { + return err + } + + return nil + } + + resources := map[string][]string{} + resources["containers"] = []string{req.Name, req.Source.Source} + + op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) +} + +func containersPost(d *Daemon, r *http.Request) Response { + shared.Debugf("Responding to container create") + + req := containerPostReq{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return BadRequest(err) + } + + if req.Name == "" { + req.Name = strings.ToLower(petname.Generate(2, "-")) + shared.Debugf("No name provided, creating %s", req.Name) + } + + if req.Devices == nil { + req.Devices = shared.Devices{} + } + + if req.Config == nil { + req.Config = map[string]string{} + } + + if strings.Contains(req.Name, shared.SnapshotDelimiter) { + return BadRequest(fmt.Errorf("Invalid container name: '%s' is reserved for snapshots", shared.SnapshotDelimiter)) + } + + switch req.Source.Type { + case "image": + return createFromImage(d, &req) + case "none": + return createFromNone(d, &req) + case "migration": + return createFromMigration(d, &req) + case "copy": + return createFromCopy(d, &req) + default: + return BadRequest(fmt.Errorf("unknown source type %s", req.Source.Type)) + } +} === added file 'src/github.com/lxc/lxd/lxd/daemon.go' --- src/github.com/lxc/lxd/lxd/daemon.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/daemon.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1311 @@ +package main + +import ( + "bytes" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "database/sql" + "encoding/hex" + "encoding/pem" + "fmt" + "io" + "net" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "golang.org/x/crypto/scrypt" + + "github.com/coreos/go-systemd/activation" + "github.com/gorilla/mux" + _ "github.com/mattn/go-sqlite3" + "github.com/syndtr/gocapability/capability" + "gopkg.in/tomb.v2" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/logging" + + log "gopkg.in/inconshreveable/log15.v2" +) + +// AppArmor +var aaAdmin = true +var aaAvailable = true +var aaConfined = false + +// CGroup +var cgBlkioController = false +var cgCpuController = false +var cgCpusetController = false +var cgDevicesController = false +var cgMemoryController = false +var cgNetPrioController = false +var cgPidsController = false +var cgSwapAccounting = false + +// UserNS +var runningInUserns = false + +const ( + pwSaltBytes = 32 + pwHashBytes = 64 +) + +type Socket struct { + Socket net.Listener + CloseOnExit bool +} + +// A Daemon can respond to requests from a shared client. +type Daemon struct { + architectures []int + BackingFs string + clientCerts []x509.Certificate + db *sql.DB + group string + IdmapSet *shared.IdmapSet + lxcpath string + mux *mux.Router + tomb tomb.Tomb + pruneChan chan bool + shutdownChan chan bool + resetAutoUpdateChan chan bool + execPath string + + Storage storage + + Sockets []Socket + + devlxd *net.UnixListener + + configValues map[string]string + + IsMock bool + + imagesDownloading map[string]chan bool + imagesDownloadingLock sync.RWMutex + + tlsConfig *tls.Config +} + +// Command is the basic structure for every API call. +type Command struct { + name string + untrustedGet bool + untrustedPost bool + get func(d *Daemon, r *http.Request) Response + put func(d *Daemon, r *http.Request) Response + post func(d *Daemon, r *http.Request) Response + delete func(d *Daemon, r *http.Request) Response +} + +func (d *Daemon) httpGetSync(url string, certificate string) (*lxd.Response, error) { + var err error + + var cert *x509.Certificate + if certificate != "" { + certBlock, _ := pem.Decode([]byte(certificate)) + + cert, err = x509.ParseCertificate(certBlock.Bytes) + if err != nil { + return nil, err + } + } + + tlsConfig, err := shared.GetTLSConfig("", "", cert) + if err != nil { + return nil, err + } + + tr := &http.Transport{ + TLSClientConfig: tlsConfig, + Dial: shared.RFC3493Dialer, + Proxy: http.ProxyFromEnvironment, + } + + myhttp := http.Client{ + Transport: tr, + } + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + + req.Header.Set("User-Agent", shared.UserAgent) + + r, err := myhttp.Do(req) + if err != nil { + return nil, err + } + + resp, err := lxd.ParseResponse(r) + if err != nil { + return nil, err + } + + if resp.Type != lxd.Sync { + return nil, fmt.Errorf("unexpected non-sync response") + } + + return resp, nil +} + +func (d *Daemon) httpGetFile(url string, certificate string) (*http.Response, error) { + var err error + + var cert *x509.Certificate + if certificate != "" { + certBlock, _ := pem.Decode([]byte(certificate)) + + cert, err = x509.ParseCertificate(certBlock.Bytes) + if err != nil { + return nil, err + } + } + + tlsConfig, err := shared.GetTLSConfig("", "", cert) + if err != nil { + return nil, err + } + + tr := &http.Transport{ + TLSClientConfig: tlsConfig, + Dial: shared.RFC3493Dialer, + Proxy: http.ProxyFromEnvironment, + } + myhttp := http.Client{ + Transport: tr, + } + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + + req.Header.Set("User-Agent", shared.UserAgent) + + raw, err := myhttp.Do(req) + if err != nil { + return nil, err + } + + if raw.StatusCode != 200 { + _, err := lxd.HoistResponse(raw, lxd.Error) + if err != nil { + return nil, err + } + + return nil, fmt.Errorf("non-200 status with no error response?") + } + + return raw, nil +} + +func readMyCert() (string, string, error) { + certf := shared.VarPath("server.crt") + keyf := shared.VarPath("server.key") + shared.Log.Info("Looking for existing certificates", log.Ctx{"cert": certf, "key": keyf}) + + err := shared.FindOrGenCert(certf, keyf) + + return certf, keyf, err +} + +func (d *Daemon) isTrustedClient(r *http.Request) bool { + if r.RemoteAddr == "@" { + // Unix socket + return true + } + if r.TLS == nil { + return false + } + for i := range r.TLS.PeerCertificates { + if d.CheckTrustState(*r.TLS.PeerCertificates[i]) { + return true + } + } + return false +} + +func isJSONRequest(r *http.Request) bool { + for k, vs := range r.Header { + if strings.ToLower(k) == "content-type" && + len(vs) == 1 && strings.ToLower(vs[0]) == "application/json" { + return true + } + } + + return false +} + +func (d *Daemon) isRecursionRequest(r *http.Request) bool { + recursionStr := r.FormValue("recursion") + recursion, err := strconv.Atoi(recursionStr) + if err != nil { + return false + } + + return recursion == 1 +} + +func (d *Daemon) createCmd(version string, c Command) { + var uri string + if c.name == "" { + uri = fmt.Sprintf("/%s", version) + } else { + uri = fmt.Sprintf("/%s/%s", version, c.name) + } + + d.mux.HandleFunc(uri, func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + + if d.isTrustedClient(r) { + shared.Log.Info( + "handling", + log.Ctx{"method": r.Method, "url": r.URL.RequestURI(), "ip": r.RemoteAddr}) + } else if r.Method == "GET" && c.untrustedGet { + shared.Log.Info( + "allowing untrusted GET", + log.Ctx{"url": r.URL.RequestURI(), "ip": r.RemoteAddr}) + } else if r.Method == "POST" && c.untrustedPost { + shared.Log.Info( + "allowing untrusted POST", + log.Ctx{"url": r.URL.RequestURI(), "ip": r.RemoteAddr}) + } else { + shared.Log.Warn( + "rejecting request from untrusted client", + log.Ctx{"ip": r.RemoteAddr}) + Forbidden.Render(w) + return + } + + if debug && r.Method != "GET" && isJSONRequest(r) { + newBody := &bytes.Buffer{} + captured := &bytes.Buffer{} + multiW := io.MultiWriter(newBody, captured) + if _, err := io.Copy(multiW, r.Body); err != nil { + InternalError(err).Render(w) + return + } + + r.Body = shared.BytesReadCloser{Buf: newBody} + shared.DebugJson(captured) + } + + var resp Response + resp = NotImplemented + + switch r.Method { + case "GET": + if c.get != nil { + resp = c.get(d, r) + } + case "PUT": + if c.put != nil { + resp = c.put(d, r) + } + case "POST": + if c.post != nil { + resp = c.post(d, r) + } + case "DELETE": + if c.delete != nil { + resp = c.delete(d, r) + } + default: + resp = NotFound + } + + if err := resp.Render(w); err != nil { + err := InternalError(err).Render(w) + if err != nil { + shared.Log.Error("Failed writing error for error, giving up") + } + } + + /* + * When we create a new lxc.Container, it adds a finalizer (via + * SetFinalizer) that frees the struct. However, it sometimes + * takes the go GC a while to actually free the struct, + * presumably since it is a small amount of memory. + * Unfortunately, the struct also keeps the log fd open, so if + * we leave too many of these around, we end up running out of + * fds. So, let's explicitly do a GC to collect these at the + * end of each request. + */ + runtime.GC() + }) +} + +func (d *Daemon) SetupStorageDriver() error { + lvmVgName, err := d.ConfigValueGet("storage.lvm_vg_name") + if err != nil { + return fmt.Errorf("Couldn't read config: %s", err) + } + + zfsPoolName, err := d.ConfigValueGet("storage.zfs_pool_name") + if err != nil { + return fmt.Errorf("Couldn't read config: %s", err) + } + + if lvmVgName != "" { + d.Storage, err = newStorage(d, storageTypeLvm) + if err != nil { + shared.Logf("Could not initialize storage type LVM: %s - falling back to dir", err) + } else { + return nil + } + } else if zfsPoolName != "" { + d.Storage, err = newStorage(d, storageTypeZfs) + if err != nil { + shared.Logf("Could not initialize storage type ZFS: %s - falling back to dir", err) + } else { + return nil + } + } else if d.BackingFs == "btrfs" { + d.Storage, err = newStorage(d, storageTypeBtrfs) + if err != nil { + shared.Logf("Could not initialize storage type btrfs: %s - falling back to dir", err) + } else { + return nil + } + } + + d.Storage, err = newStorage(d, storageTypeDir) + + return err +} + +func setupSharedMounts() error { + path := shared.VarPath("shmounts") + + isShared, err := shared.IsOnSharedMount(path) + if err != nil { + return err + } + + if isShared { + // / may already be ms-shared, or shmounts may have + // been mounted by a previous lxd run + return nil + } + + if err := syscall.Mount(path, path, "none", syscall.MS_BIND, ""); err != nil { + return err + } + + var flags uintptr = syscall.MS_SHARED | syscall.MS_REC + if err := syscall.Mount(path, path, "none", flags, ""); err != nil { + return err + } + + return nil +} + +func (d *Daemon) ListenAddresses() ([]string, error) { + addresses := make([]string, 0) + + value, err := d.ConfigValueGet("core.https_address") + if err != nil || value == "" { + return addresses, err + } + + localHost, localPort, err := net.SplitHostPort(value) + if err != nil { + localHost = value + localPort = shared.DefaultPort + } + + if localHost == "0.0.0.0" || localHost == "::" || localHost == "[::]" { + ifaces, err := net.Interfaces() + if err != nil { + return addresses, err + } + + for _, i := range ifaces { + addrs, err := i.Addrs() + if err != nil { + continue + } + + for _, addr := range addrs { + var ip net.IP + switch v := addr.(type) { + case *net.IPNet: + ip = v.IP + case *net.IPAddr: + ip = v.IP + } + + if !ip.IsGlobalUnicast() { + continue + } + + if ip.To4() == nil { + if localHost == "0.0.0.0" { + continue + } + addresses = append(addresses, fmt.Sprintf("[%s]:%s", ip, localPort)) + } else { + addresses = append(addresses, fmt.Sprintf("%s:%s", ip, localPort)) + } + } + } + } else { + if strings.Contains(localHost, ":") { + addresses = append(addresses, fmt.Sprintf("[%s]:%s", localHost, localPort)) + } else { + addresses = append(addresses, fmt.Sprintf("%s:%s", localHost, localPort)) + } + } + + return addresses, nil +} + +func bytesZero(x []byte) bool { + for _, b := range x { + if b != 0 { + return false + } + } + return true +} + +func bytesEqual(x, y []byte) bool { + if len(x) != len(y) { + return false + } + for i, b := range x { + if y[i] != b { + return false + } + } + return true +} + +func isZeroIP(x []byte) bool { + if x == nil { + return false + } + + if bytesZero(x) { + return true + } + + if len(x) != net.IPv6len { + return false + } + + var v4InV6Prefix = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff} + return bytesEqual(x[0:12], v4InV6Prefix) && bytesZero(x[12:]) +} + +func IpsEqual(ip1 net.IP, ip2 net.IP) bool { + if ip1.Equal(ip2) { + return true + } + + /* the go std library Equal doesn't recognize [::] == 0.0.0.0, since it + * tests for the ipv4 prefix, which isn't present in [::]. However, + * they are in fact equal. Let's test for this case too. + */ + return isZeroIP(ip1) && isZeroIP(ip2) +} + +func (d *Daemon) UpdateHTTPsPort(oldAddress string, newAddress string) error { + var sockets []Socket + + if oldAddress == newAddress { + return nil + } + + if oldAddress != "" { + oldHost, oldPort, err := net.SplitHostPort(oldAddress) + if err != nil { + oldHost = oldAddress + oldPort = shared.DefaultPort + } + + // Strip brackets around IPv6 once we've gotten rid of the port + oldHost = strings.TrimLeft(oldHost, "[") + oldHost = strings.TrimRight(oldHost, "]") + + for _, socket := range d.Sockets { + host, port, err := net.SplitHostPort(socket.Socket.Addr().String()) + if err != nil { + host = socket.Socket.Addr().String() + port = shared.DefaultPort + } + + // Strip brackets around IPv6 once we've gotten rid of the port + host = strings.TrimLeft(host, "[") + host = strings.TrimRight(host, "]") + + if !shared.PathExists(host) && IpsEqual(net.ParseIP(host), net.ParseIP(oldHost)) && port == oldPort { + socket.Socket.Close() + } else { + sockets = append(sockets, socket) + } + } + } else { + sockets = d.Sockets + } + + if newAddress != "" { + _, _, err := net.SplitHostPort(newAddress) + if err != nil { + ip := net.ParseIP(newAddress) + if ip != nil && ip.To4() == nil { + newAddress = fmt.Sprintf("[%s]:%s", newAddress, shared.DefaultPort) + } else { + newAddress = fmt.Sprintf("%s:%s", newAddress, shared.DefaultPort) + } + } + + tcpl, err := tls.Listen("tcp", newAddress, d.tlsConfig) + if err != nil { + return fmt.Errorf("cannot listen on https socket: %v", err) + } + + d.tomb.Go(func() error { return http.Serve(tcpl, d.mux) }) + sockets = append(sockets, Socket{Socket: tcpl, CloseOnExit: true}) + } + + d.Sockets = sockets + return nil +} + +// StartDaemon starts the shared daemon with the provided configuration. +func startDaemon(group string) (*Daemon, error) { + d := &Daemon{ + group: group, + IsMock: false, + imagesDownloading: map[string]chan bool{}, + imagesDownloadingLock: sync.RWMutex{}, + } + + if err := d.Init(); err != nil { + return nil, err + } + + return d, nil +} + +func haveMacAdmin() bool { + c, err := capability.NewPid(0) + if err != nil { + return false + } + if c.Get(capability.EFFECTIVE, capability.CAP_MAC_ADMIN) { + return true + } + return false +} + +func (d *Daemon) Init() error { + d.shutdownChan = make(chan bool) + + /* Set the executable path */ + absPath, err := os.Readlink("/proc/self/exe") + if err != nil { + return err + } + d.execPath = absPath + + /* Set the LVM environment */ + err = os.Setenv("LVM_SUPPRESS_FD_WARNINGS", "1") + if err != nil { + return err + } + + /* Setup logging if that wasn't done before */ + if shared.Log == nil { + shared.Log, err = logging.GetLogger("", "", true, true, nil) + if err != nil { + return err + } + } + + if !d.IsMock { + shared.Log.Info("LXD is starting", + log.Ctx{"path": shared.VarPath("")}) + } else { + shared.Log.Info("Mock LXD is starting", + log.Ctx{"path": shared.VarPath("")}) + } + + /* Detect user namespaces */ + runningInUserns = shared.RunningInUserNS() + + /* Detect AppArmor support */ + if aaAvailable && os.Getenv("LXD_SECURITY_APPARMOR") == "false" { + aaAvailable = false + aaAdmin = false + shared.Log.Warn("AppArmor support has been manually disabled") + } + + if aaAvailable && !shared.IsDir("/sys/kernel/security/apparmor") { + aaAvailable = false + aaAdmin = false + shared.Log.Warn("AppArmor support has been disabled because of lack of kernel support") + } + + _, err = exec.LookPath("apparmor_parser") + if aaAvailable && err != nil { + aaAvailable = false + aaAdmin = false + shared.Log.Warn("AppArmor support has been disabled because 'apparmor_parser' couldn't be found") + } + + /* Detect AppArmor admin support */ + if aaAdmin && !haveMacAdmin() { + aaAdmin = false + shared.Log.Warn("Per-container AppArmor profiles are disabled because the mac_admin capability is missing.") + } + + if aaAdmin && runningInUserns { + aaAdmin = false + shared.Log.Warn("Per-container AppArmor profiles are disabled because LXD is running in an unprivileged container.") + } + + /* Detect AppArmor confinment */ + if !aaConfined { + profile := aaProfile() + if profile != "unconfined" && profile != "" { + aaConfined = true + shared.Log.Warn("Per-container AppArmor profiles are disabled because LXD is already protected by AppArmor.") + } + } + + /* Detect CGroup support */ + cgBlkioController = shared.PathExists("/sys/fs/cgroup/blkio/") + if !cgBlkioController { + shared.Log.Warn("Couldn't find the CGroup blkio controller, I/O limits will be ignored.") + } + + cgCpuController = shared.PathExists("/sys/fs/cgroup/cpu/") + if !cgCpuController { + shared.Log.Warn("Couldn't find the CGroup CPU controller, CPU time limits will be ignored.") + } + + cgCpusetController = shared.PathExists("/sys/fs/cgroup/cpuset/") + if !cgCpusetController { + shared.Log.Warn("Couldn't find the CGroup CPUset controller, CPU pinning will be ignored.") + } + + cgDevicesController = shared.PathExists("/sys/fs/cgroup/devices/") + if !cgDevicesController { + shared.Log.Warn("Couldn't find the CGroup devices controller, device access control won't work.") + } + + cgMemoryController = shared.PathExists("/sys/fs/cgroup/memory/") + if !cgMemoryController { + shared.Log.Warn("Couldn't find the CGroup memory controller, memory limits will be ignored.") + } + + cgNetPrioController = shared.PathExists("/sys/fs/cgroup/net_prio/") + if !cgNetPrioController { + shared.Log.Warn("Couldn't find the CGroup network class controller, network limits will be ignored.") + } + + cgPidsController = shared.PathExists("/sys/fs/cgroup/pids/") + if !cgPidsController { + shared.Log.Warn("Couldn't find the CGroup pids controller, process limits will be ignored.") + } + + cgSwapAccounting = shared.PathExists("/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes") + if !cgSwapAccounting { + shared.Log.Warn("CGroup memory swap accounting is disabled, swap limits will be ignored.") + } + + /* Get the list of supported architectures */ + var architectures = []int{} + + architectureName, err := shared.ArchitectureGetLocal() + if err != nil { + return err + } + + architecture, err := shared.ArchitectureId(architectureName) + if err != nil { + return err + } + architectures = append(architectures, architecture) + + personalities, err := shared.ArchitecturePersonalities(architecture) + if err != nil { + return err + } + for _, personality := range personalities { + architectures = append(architectures, personality) + } + d.architectures = architectures + + /* Set container path */ + d.lxcpath = shared.VarPath("containers") + + /* Make sure all our directories are available */ + if err := os.MkdirAll(shared.VarPath("containers"), 0711); err != nil { + return err + } + if err := os.MkdirAll(shared.VarPath("devices"), 0711); err != nil { + return err + } + if err := os.MkdirAll(shared.VarPath("devlxd"), 0755); err != nil { + return err + } + if err := os.MkdirAll(shared.VarPath("images"), 0700); err != nil { + return err + } + if err := os.MkdirAll(shared.LogPath(), 0700); err != nil { + return err + } + if err := os.MkdirAll(shared.VarPath("security"), 0700); err != nil { + return err + } + if err := os.MkdirAll(shared.VarPath("shmounts"), 0711); err != nil { + return err + } + if err := os.MkdirAll(shared.VarPath("snapshots"), 0700); err != nil { + return err + } + + /* Detect the filesystem */ + d.BackingFs, err = filesystemDetect(d.lxcpath) + if err != nil { + shared.Log.Error("Error detecting backing fs", log.Ctx{"err": err}) + } + + /* Read the uid/gid allocation */ + d.IdmapSet, err = shared.DefaultIdmapSet() + if err != nil { + shared.Log.Warn("Error reading idmap", log.Ctx{"err": err.Error()}) + shared.Log.Warn("Only privileged containers will be able to run") + } else { + shared.Log.Info("Default uid/gid map:") + for _, lxcmap := range d.IdmapSet.ToLxcString() { + shared.Log.Info(strings.TrimRight(" - "+lxcmap, "\n")) + } + } + + /* Initialize the database */ + err = initializeDbObject(d, shared.VarPath("lxd.db")) + if err != nil { + return err + } + + /* Setup the storage driver */ + if !d.IsMock { + err = d.SetupStorageDriver() + if err != nil { + return fmt.Errorf("Failed to setup storage: %s", err) + } + } + + /* Prune images */ + d.pruneChan = make(chan bool) + go func() { + pruneExpiredImages(d) + for { + timer := time.NewTimer(24 * time.Hour) + timeChan := timer.C + select { + case <-timeChan: + /* run once per day */ + pruneExpiredImages(d) + case <-d.pruneChan: + /* run when image.remote_cache_expiry is changed */ + pruneExpiredImages(d) + timer.Stop() + } + } + }() + + /* Auto-update images */ + d.resetAutoUpdateChan = make(chan bool) + go func() { + autoUpdateImages(d) + + for { + interval, _ := d.ConfigValueGet("images.auto_update_interval") + if interval == "" { + interval = "6" + } + + intervalInt, err := strconv.Atoi(interval) + if err != nil { + intervalInt = 0 + } + + if intervalInt > 0 { + timer := time.NewTimer(time.Duration(intervalInt) * time.Hour) + timeChan := timer.C + + select { + case <-timeChan: + autoUpdateImages(d) + case <-d.resetAutoUpdateChan: + timer.Stop() + } + } else { + select { + case <-d.resetAutoUpdateChan: + continue + } + } + } + }() + + /* Setup /dev/lxd */ + d.devlxd, err = createAndBindDevLxd() + if err != nil { + return err + } + + if err := setupSharedMounts(); err != nil { + return err + } + + if !d.IsMock { + /* Start the scheduler */ + go deviceEventListener(d) + + /* Setup the TLS authentication */ + certf, keyf, err := readMyCert() + if err != nil { + return err + } + + cert, err := tls.LoadX509KeyPair(certf, keyf) + if err != nil { + return err + } + + tlsConfig := &tls.Config{ + InsecureSkipVerify: true, + ClientAuth: tls.RequestClientCert, + Certificates: []tls.Certificate{cert}, + MinVersion: tls.VersionTLS12, + MaxVersion: tls.VersionTLS12, + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + PreferServerCipherSuites: true, + } + tlsConfig.BuildNameToCertificate() + + d.tlsConfig = tlsConfig + + readSavedClientCAList(d) + } + + /* Setup the web server */ + d.mux = mux.NewRouter() + d.mux.StrictSlash(false) + + d.mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + SyncResponse(true, []string{"/1.0"}).Render(w) + }) + + for _, c := range api10 { + d.createCmd("1.0", c) + } + + for _, c := range apiInternal { + d.createCmd("internal", c) + } + + d.mux.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + shared.Log.Debug("Sending top level 404", log.Ctx{"url": r.URL}) + w.Header().Set("Content-Type", "application/json") + NotFound.Render(w) + }) + + listeners, err := activation.Listeners(false) + if err != nil { + return err + } + + var sockets []Socket + + if len(listeners) > 0 { + shared.Log.Info("LXD is socket activated") + + for _, listener := range listeners { + if shared.PathExists(listener.Addr().String()) { + sockets = append(sockets, Socket{Socket: listener, CloseOnExit: false}) + } else { + tlsListener := tls.NewListener(listener, d.tlsConfig) + sockets = append(sockets, Socket{Socket: tlsListener, CloseOnExit: false}) + } + } + } else { + shared.Log.Info("LXD isn't socket activated") + + localSocketPath := shared.VarPath("unix.socket") + + // If the socket exists, let's try to connect to it and see if there's + // a lxd running. + if shared.PathExists(localSocketPath) { + _, err := lxd.NewClient(&lxd.DefaultConfig, "local") + if err != nil { + shared.Log.Debug("Detected stale unix socket, deleting") + // Connecting failed, so let's delete the socket and + // listen on it ourselves. + err = os.Remove(localSocketPath) + if err != nil { + return err + } + } else { + return fmt.Errorf("LXD is already running.") + } + } + + unixAddr, err := net.ResolveUnixAddr("unix", localSocketPath) + if err != nil { + return fmt.Errorf("cannot resolve unix socket address: %v", err) + } + + unixl, err := net.ListenUnix("unix", unixAddr) + if err != nil { + return fmt.Errorf("cannot listen on unix socket: %v", err) + } + + if err := os.Chmod(localSocketPath, 0660); err != nil { + return err + } + + var gid int + if d.group != "" { + gid, err = shared.GroupId(d.group) + if err != nil { + return err + } + } else { + gid = os.Getgid() + } + + if err := os.Chown(localSocketPath, os.Getuid(), gid); err != nil { + return err + } + + sockets = append(sockets, Socket{Socket: unixl, CloseOnExit: true}) + } + + listenAddr, err := d.ConfigValueGet("core.https_address") + if err != nil { + return err + } + + if listenAddr != "" { + _, _, err := net.SplitHostPort(listenAddr) + if err != nil { + listenAddr = fmt.Sprintf("%s:%s", listenAddr, shared.DefaultPort) + } + + tcpl, err := tls.Listen("tcp", listenAddr, d.tlsConfig) + if err != nil { + shared.Log.Error("cannot listen on https socket, skipping...", log.Ctx{"err": err}) + } else { + sockets = append(sockets, Socket{Socket: tcpl, CloseOnExit: true}) + } + } + + if !d.IsMock { + d.Sockets = sockets + } else { + d.Sockets = []Socket{} + } + + d.tomb.Go(func() error { + shared.Log.Info("REST API daemon:") + for _, socket := range d.Sockets { + shared.Log.Info(" - binding socket", log.Ctx{"socket": socket.Socket.Addr()}) + current_socket := socket + d.tomb.Go(func() error { return http.Serve(current_socket.Socket, &lxdHttpServer{d.mux, d}) }) + } + + d.tomb.Go(func() error { + server := devLxdServer(d) + return server.Serve(d.devlxd) + }) + return nil + }) + + // Restore containers + if !d.IsMock { + /* Restart containers */ + go containersRestart(d) + + /* Re-balance in case things changed while LXD was down */ + deviceTaskBalance(d) + } + + return nil +} + +// CheckTrustState returns True if the client is trusted else false. +func (d *Daemon) CheckTrustState(cert x509.Certificate) bool { + for k, v := range d.clientCerts { + if bytes.Compare(cert.Raw, v.Raw) == 0 { + shared.Log.Debug("Found cert", log.Ctx{"k": k}) + return true + } + shared.Log.Debug("Client cert != key", log.Ctx{"k": k}) + } + return false +} + +func (d *Daemon) numRunningContainers() (int, error) { + results, err := dbContainersList(d.db, cTypeRegular) + if err != nil { + return 0, err + } + + count := 0 + for _, r := range results { + container, err := containerLoadByName(d, r) + if err != nil { + continue + } + + if container.IsRunning() { + count = count + 1 + } + } + + return count, nil +} + +var errStop = fmt.Errorf("requested stop") + +// Stop stops the shared daemon. +func (d *Daemon) Stop() error { + forceStop := false + + d.tomb.Kill(errStop) + shared.Log.Info("Stopping REST API handler:") + for _, socket := range d.Sockets { + if socket.CloseOnExit { + shared.Log.Info(" - closing socket", log.Ctx{"socket": socket.Socket.Addr()}) + socket.Socket.Close() + } else { + shared.Log.Info(" - skipping socket-activated socket", log.Ctx{"socket": socket.Socket.Addr()}) + forceStop = true + } + } + + if n, err := d.numRunningContainers(); err != nil || n == 0 { + shared.Log.Debug("Unmounting shmounts") + + syscall.Unmount(shared.VarPath("shmounts"), syscall.MNT_DETACH) + } else { + shared.Debugf("Not unmounting shmounts (containers are still running)") + } + + shared.Log.Debug("Closing the database") + d.db.Close() + + shared.Log.Debug("Stopping /dev/lxd handler") + d.devlxd.Close() + + if d.IsMock || forceStop { + return nil + } + + err := d.tomb.Wait() + if err == errStop { + return nil + } + + return err +} + +// ConfigKeyIsValid returns if the given key is a known config value. +func (d *Daemon) ConfigKeyIsValid(key string) bool { + switch key { + case "core.https_address": + return true + case "core.https_allowed_origin": + return true + case "core.https_allowed_methods": + return true + case "core.https_allowed_headers": + return true + case "core.trust_password": + return true + case "storage.lvm_vg_name": + return true + case "storage.lvm_thinpool_name": + return true + case "storage.lvm_fstype": + return true + case "storage.zfs_pool_name": + return true + case "images.remote_cache_expiry": + return true + case "images.compression_algorithm": + return true + case "images.auto_update_interval": + return true + case "images.auto_update_cached": + return true + } + + return false +} + +// ConfigValueGet returns a config value from the memory, +// calls ConfigValuesGet if required. +// It returns a empty result if the config key isn't given. +func (d *Daemon) ConfigValueGet(key string) (string, error) { + if d.configValues == nil { + if _, err := d.ConfigValuesGet(); err != nil { + return "", err + } + } + + if val, ok := d.configValues[key]; ok { + return val, nil + } + + return "", nil +} + +// ConfigValuesGet fetches all config values and stores them in memory. +func (d *Daemon) ConfigValuesGet() (map[string]string, error) { + if d.configValues == nil { + var err error + d.configValues, err = dbConfigValuesGet(d.db) + if err != nil { + return d.configValues, err + } + } + + return d.configValues, nil +} + +// ConfigValueSet sets a new or updates a config value, +// it updates the value in the DB and in memory. +func (d *Daemon) ConfigValueSet(key string, value string) error { + if err := dbConfigValueSet(d.db, key, value); err != nil { + return err + } + + if d.configValues == nil { + if _, err := d.ConfigValuesGet(); err != nil { + return err + } + } + + if value == "" { + delete(d.configValues, key) + } else { + d.configValues[key] = value + } + + return nil +} + +// PasswordSet sets the password to the new value. +func (d *Daemon) PasswordSet(password string) error { + shared.Log.Info("Setting new https password") + var value = password + if password != "" { + buf := make([]byte, pwSaltBytes) + _, err := io.ReadFull(rand.Reader, buf) + if err != nil { + return err + } + + hash, err := scrypt.Key([]byte(password), buf, 1<<14, 8, 1, pwHashBytes) + if err != nil { + return err + } + + buf = append(buf, hash...) + value = hex.EncodeToString(buf) + } + + err := d.ConfigValueSet("core.trust_password", value) + if err != nil { + return err + } + + return nil +} + +// PasswordCheck checks if the given password is the same +// as we have in the DB. +func (d *Daemon) PasswordCheck(password string) bool { + value, err := d.ConfigValueGet("core.trust_password") + if err != nil { + shared.Log.Error("verifyAdminPwd", log.Ctx{"err": err}) + return false + } + + // No password set + if value == "" { + return false + } + + buff, err := hex.DecodeString(value) + if err != nil { + shared.Log.Error("hex decode failed", log.Ctx{"err": err}) + return false + } + + salt := buff[0:pwSaltBytes] + hash, err := scrypt.Key([]byte(password), salt, 1<<14, 8, 1, pwHashBytes) + if err != nil { + shared.Log.Error("Failed to create hash to check", log.Ctx{"err": err}) + return false + } + if !bytes.Equal(hash, buff[pwSaltBytes:]) { + shared.Log.Error("Bad password received", log.Ctx{"err": err}) + return false + } + shared.Log.Debug("Verified the admin password") + return true +} + +type lxdHttpServer struct { + r *mux.Router + d *Daemon +} + +func (s *lxdHttpServer) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + allowedOrigin, _ := s.d.ConfigValueGet("core.https_allowed_origin") + origin := req.Header.Get("Origin") + if allowedOrigin != "" && origin != "" { + rw.Header().Set("Access-Control-Allow-Origin", allowedOrigin) + } + + allowedMethods, _ := s.d.ConfigValueGet("core.https_allowed_methods") + if allowedMethods != "" && origin != "" { + rw.Header().Set("Access-Control-Allow-Methods", allowedMethods) + } + + allowedHeaders, _ := s.d.ConfigValueGet("core.https_allowed_headers") + if allowedHeaders != "" && origin != "" { + rw.Header().Set("Access-Control-Allow-Headers", allowedHeaders) + } + + // OPTIONS request don't need any further processing + if req.Method == "OPTIONS" { + return + } + + // Call the original server + s.r.ServeHTTP(rw, req) +} === added file 'src/github.com/lxc/lxd/lxd/daemon_images.go' --- src/github.com/lxc/lxd/lxd/daemon_images.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/daemon_images.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,388 @@ +package main + +import ( + "encoding/json" + "fmt" + "io" + "mime" + "mime/multipart" + "os" + "path/filepath" + "time" + + "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" +) + +// ImageDownload checks if we have that Image Fingerprint else +// downloads the image from a remote server. +func (d *Daemon) ImageDownload(op *operation, server string, protocol string, certificate string, secret string, alias string, forContainer bool, autoUpdate bool) (string, error) { + var err error + var ss *shared.SimpleStreams + + if protocol == "" { + protocol = "lxd" + } + + fp := alias + + // Expand aliases + if protocol == "simplestreams" { + ss, err = shared.SimpleStreamsClient(server) + if err != nil { + return "", err + } + + target := ss.GetAlias(fp) + if target != "" { + fp = target + } + + image, err := ss.GetImageInfo(fp) + if err != nil { + return "", err + } + + if fp == alias { + alias = image.Fingerprint + } + fp = image.Fingerprint + } else if protocol == "lxd" { + target, err := remoteGetImageFingerprint(d, server, certificate, fp) + if err == nil && target != "" { + fp = target + } + } + + if _, _, err := dbImageGet(d.db, fp, false, false); err == nil { + shared.Log.Debug("Image already exists in the db", log.Ctx{"image": fp}) + // already have it + return fp, nil + } + + shared.Log.Info( + "Image not in the db, downloading it", + log.Ctx{"image": fp, "server": server}) + + // Now check if we already downloading the image + d.imagesDownloadingLock.RLock() + if waitChannel, ok := d.imagesDownloading[fp]; ok { + // We already download the image + d.imagesDownloadingLock.RUnlock() + + shared.Log.Info( + "Already downloading the image, waiting for it to succeed", + log.Ctx{"image": fp}) + + // Wait until the download finishes (channel closes) + if _, ok := <-waitChannel; ok { + shared.Log.Warn("Value transmitted over image lock semaphore?") + } + + if _, _, err := dbImageGet(d.db, fp, false, true); err != nil { + shared.Log.Error( + "Previous download didn't succeed", + log.Ctx{"image": fp}) + + return "", fmt.Errorf("Previous download didn't succeed") + } + + shared.Log.Info( + "Previous download succeeded", + log.Ctx{"image": fp}) + + return fp, nil + } + + d.imagesDownloadingLock.RUnlock() + + shared.Log.Info( + "Downloading the image", + log.Ctx{"image": fp}) + + // Add the download to the queue + d.imagesDownloadingLock.Lock() + d.imagesDownloading[fp] = make(chan bool) + d.imagesDownloadingLock.Unlock() + + // Unlock once this func ends. + defer func() { + d.imagesDownloadingLock.Lock() + if waitChannel, ok := d.imagesDownloading[fp]; ok { + close(waitChannel) + delete(d.imagesDownloading, fp) + } + d.imagesDownloadingLock.Unlock() + }() + + exporturl := server + + var info shared.ImageInfo + info.Fingerprint = fp + + destDir := shared.VarPath("images") + destName := filepath.Join(destDir, fp) + if shared.PathExists(destName) { + d.Storage.ImageDelete(fp) + } + + progress := func(progressInt int) { + if op == nil { + return + } + + meta := op.metadata + if meta == nil { + meta = make(map[string]interface{}) + } + + progress := fmt.Sprintf("%d%%", progressInt) + + if meta["download_progress"] != progress { + meta["download_progress"] = progress + op.UpdateMetadata(meta) + } + } + + if protocol == "lxd" { + /* grab the metadata from /1.0/images/%s */ + var url string + if secret != "" { + url = fmt.Sprintf( + "%s/%s/images/%s?secret=%s", + server, shared.APIVersion, fp, secret) + } else { + url = fmt.Sprintf("%s/%s/images/%s", server, shared.APIVersion, fp) + } + + resp, err := d.httpGetSync(url, certificate) + if err != nil { + shared.Log.Error( + "Failed to download image metadata", + log.Ctx{"image": fp, "err": err}) + + return "", err + } + + if err := json.Unmarshal(resp.Metadata, &info); err != nil { + return "", err + } + + /* now grab the actual file from /1.0/images/%s/export */ + if secret != "" { + exporturl = fmt.Sprintf( + "%s/%s/images/%s/export?secret=%s", + server, shared.APIVersion, fp, secret) + + } else { + exporturl = fmt.Sprintf( + "%s/%s/images/%s/export", + server, shared.APIVersion, fp) + } + } else if protocol == "simplestreams" { + err := ss.Download(fp, "meta", destName, nil) + if err != nil { + return "", err + } + + err = ss.Download(fp, "root", destName+".rootfs", progress) + if err != nil { + return "", err + } + + info, err := ss.GetImageInfo(fp) + if err != nil { + return "", err + } + + info.Public = false + info.AutoUpdate = autoUpdate + + _, err = imageBuildFromInfo(d, *info) + if err != nil { + return "", err + } + + if alias != fp { + id, _, err := dbImageGet(d.db, fp, false, true) + if err != nil { + return "", err + } + + err = dbImageSourceInsert(d.db, id, server, protocol, "", alias) + if err != nil { + return "", err + } + } + + if forContainer { + return fp, dbImageLastAccessInit(d.db, fp) + } + + return fp, nil + } + + raw, err := d.httpGetFile(exporturl, certificate) + if err != nil { + shared.Log.Error( + "Failed to download image", + log.Ctx{"image": fp, "err": err}) + return "", err + } + info.Size = raw.ContentLength + + ctype, ctypeParams, err := mime.ParseMediaType(raw.Header.Get("Content-Type")) + if err != nil { + ctype = "application/octet-stream" + } + + body := &shared.TransferProgress{Reader: raw.Body, Length: raw.ContentLength, Handler: progress} + + if ctype == "multipart/form-data" { + // Parse the POST data + mr := multipart.NewReader(body, ctypeParams["boundary"]) + + // Get the metadata tarball + part, err := mr.NextPart() + if err != nil { + shared.Log.Error( + "Invalid multipart image", + log.Ctx{"image": fp, "err": err}) + + return "", err + } + + if part.FormName() != "metadata" { + shared.Log.Error( + "Invalid multipart image", + log.Ctx{"image": fp, "err": err}) + + return "", fmt.Errorf("Invalid multipart image") + } + + destName = filepath.Join(destDir, info.Fingerprint) + f, err := os.Create(destName) + if err != nil { + shared.Log.Error( + "Failed to save image", + log.Ctx{"image": fp, "err": err}) + + return "", err + } + + _, err = io.Copy(f, part) + f.Close() + + if err != nil { + shared.Log.Error( + "Failed to save image", + log.Ctx{"image": fp, "err": err}) + + return "", err + } + + // Get the rootfs tarball + part, err = mr.NextPart() + if err != nil { + shared.Log.Error( + "Invalid multipart image", + log.Ctx{"image": fp, "err": err}) + + return "", err + } + + if part.FormName() != "rootfs" { + shared.Log.Error( + "Invalid multipart image", + log.Ctx{"image": fp}) + return "", fmt.Errorf("Invalid multipart image") + } + + destName = filepath.Join(destDir, info.Fingerprint+".rootfs") + f, err = os.Create(destName) + if err != nil { + shared.Log.Error( + "Failed to save image", + log.Ctx{"image": fp, "err": err}) + return "", err + } + + _, err = io.Copy(f, part) + f.Close() + + if err != nil { + shared.Log.Error( + "Failed to save image", + log.Ctx{"image": fp, "err": err}) + return "", err + } + } else { + destName = filepath.Join(destDir, info.Fingerprint) + + f, err := os.Create(destName) + if err != nil { + shared.Log.Error( + "Failed to save image", + log.Ctx{"image": fp, "err": err}) + + return "", err + } + + _, err = io.Copy(f, body) + f.Close() + + if err != nil { + shared.Log.Error( + "Failed to save image", + log.Ctx{"image": fp, "err": err}) + return "", err + } + } + + if protocol == "direct" { + imageMeta, err := getImageMetadata(destName) + if err != nil { + return "", err + } + + info.Architecture = imageMeta.Architecture + info.CreationDate = time.Unix(imageMeta.CreationDate, 0) + info.ExpiryDate = time.Unix(imageMeta.ExpiryDate, 0) + info.Properties = imageMeta.Properties + } + + // By default, make all downloaded images private + info.Public = false + + _, err = imageBuildFromInfo(d, info) + if err != nil { + shared.Log.Error( + "Failed to create image", + log.Ctx{"image": fp, "err": err}) + + return "", err + } + + if alias != fp { + id, _, err := dbImageGet(d.db, fp, false, true) + if err != nil { + return "", err + } + + err = dbImageSourceInsert(d.db, id, server, protocol, "", alias) + if err != nil { + return "", err + } + } + + shared.Log.Info( + "Download succeeded", + log.Ctx{"image": fp}) + + if forContainer { + return fp, dbImageLastAccessInit(d.db, fp) + } + + return fp, nil +} === added file 'src/github.com/lxc/lxd/lxd/daemon_test.go' --- src/github.com/lxc/lxd/lxd/daemon_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/daemon_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,26 @@ +package main + +func (suite *lxdTestSuite) Test_config_value_set_empty_removes_val() { + var err error + d := suite.d + + err = d.ConfigValueSet("storage.lvm_vg_name", "foo") + suite.Req.Nil(err) + + val, err := d.ConfigValueGet("storage.lvm_vg_name") + suite.Req.Nil(err) + suite.Req.Equal(val, "foo") + + err = d.ConfigValueSet("storage.lvm_vg_name", "") + suite.Req.Nil(err) + + val, err = d.ConfigValueGet("storage.lvm_vg_name") + suite.Req.Nil(err) + suite.Req.Equal(val, "") + + valMap, err := d.ConfigValuesGet() + suite.Req.Nil(err) + + _, present := valMap["storage.lvm_vg_name"] + suite.Req.False(present) +} === added file 'src/github.com/lxc/lxd/lxd/db.go' --- src/github.com/lxc/lxd/lxd/db.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/db.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,439 @@ +package main + +import ( + "database/sql" + "fmt" + "time" + + "github.com/mattn/go-sqlite3" + + "github.com/lxc/lxd/shared" +) + +var ( + // DbErrAlreadyDefined hapens when the given entry already exists, + // for example a container. + DbErrAlreadyDefined = fmt.Errorf("The container/snapshot already exists") + + /* NoSuchObjectError is in the case of joins (and probably other) queries, + * we don't get back sql.ErrNoRows when no rows are returned, even though we do + * on selects without joins. Instead, you can use this error to + * propagate up and generate proper 404s to the client when something + * isn't found so we don't abuse sql.ErrNoRows any more than we + * already do. + */ + NoSuchObjectError = fmt.Errorf("No such object") +) + +// Profile is here to order Profiles. +type Profile struct { + name string + order int +} + +// Profiles will contain a list of all Profiles. +type Profiles []Profile + +const DB_CURRENT_VERSION int = 28 + +// CURRENT_SCHEMA contains the current SQLite SQL Schema. +const CURRENT_SCHEMA string = ` +CREATE TABLE IF NOT EXISTS certificates ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + fingerprint VARCHAR(255) NOT NULL, + type INTEGER NOT NULL, + name VARCHAR(255) NOT NULL, + certificate TEXT NOT NULL, + UNIQUE (fingerprint) +); +CREATE TABLE IF NOT EXISTS config ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + UNIQUE (key) +); +CREATE TABLE IF NOT EXISTS containers ( + id INTEGER primary key AUTOINCREMENT NOT NULL, + name VARCHAR(255) NOT NULL, + architecture INTEGER NOT NULL, + type INTEGER NOT NULL, + ephemeral INTEGER NOT NULL DEFAULT 0, + stateful INTEGER NOT NULL DEFAULT 0, + creation_date DATETIME, + UNIQUE (name) +); +CREATE TABLE IF NOT EXISTS containers_config ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + container_id INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE, + UNIQUE (container_id, key) +); +CREATE TABLE IF NOT EXISTS containers_devices ( + id INTEGER primary key AUTOINCREMENT NOT NULL, + container_id INTEGER NOT NULL, + name VARCHAR(255) NOT NULL, + type INTEGER NOT NULL default 0, + FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE, + UNIQUE (container_id, name) +); +CREATE TABLE IF NOT EXISTS containers_devices_config ( + id INTEGER primary key AUTOINCREMENT NOT NULL, + container_device_id INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + FOREIGN KEY (container_device_id) REFERENCES containers_devices (id) ON DELETE CASCADE, + UNIQUE (container_device_id, key) +); +CREATE TABLE IF NOT EXISTS containers_profiles ( + id INTEGER primary key AUTOINCREMENT NOT NULL, + container_id INTEGER NOT NULL, + profile_id INTEGER NOT NULL, + apply_order INTEGER NOT NULL default 0, + UNIQUE (container_id, profile_id), + FOREIGN KEY (container_id) REFERENCES containers(id) ON DELETE CASCADE, + FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE +); +CREATE TABLE IF NOT EXISTS images ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + cached INTEGER NOT NULL DEFAULT 0, + fingerprint VARCHAR(255) NOT NULL, + filename VARCHAR(255) NOT NULL, + size INTEGER NOT NULL, + public INTEGER NOT NULL DEFAULT 0, + auto_update INTEGER NOT NULL DEFAULT 0, + architecture INTEGER NOT NULL, + creation_date DATETIME, + expiry_date DATETIME, + upload_date DATETIME NOT NULL, + last_use_date DATETIME, + UNIQUE (fingerprint) +); +CREATE TABLE IF NOT EXISTS images_aliases ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + name VARCHAR(255) NOT NULL, + image_id INTEGER NOT NULL, + description VARCHAR(255), + FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE, + UNIQUE (name) +); +CREATE TABLE IF NOT EXISTS images_properties ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + image_id INTEGER NOT NULL, + type INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE +); +CREATE TABLE IF NOT EXISTS images_source ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + image_id INTEGER NOT NULL, + server TEXT NOT NULL, + protocol INTEGER NOT NULL, + certificate TEXT NOT NULL, + alias VARCHAR(255) NOT NULL, + FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE +); +CREATE TABLE IF NOT EXISTS profiles ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + name VARCHAR(255) NOT NULL, + description TEXT, + UNIQUE (name) +); +CREATE TABLE IF NOT EXISTS profiles_config ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + profile_id INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value VARCHAR(255), + UNIQUE (profile_id, key), + FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE +); +CREATE TABLE IF NOT EXISTS profiles_devices ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + profile_id INTEGER NOT NULL, + name VARCHAR(255) NOT NULL, + type INTEGER NOT NULL default 0, + UNIQUE (profile_id, name), + FOREIGN KEY (profile_id) REFERENCES profiles (id) ON DELETE CASCADE +); +CREATE TABLE IF NOT EXISTS profiles_devices_config ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + profile_device_id INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + UNIQUE (profile_device_id, key), + FOREIGN KEY (profile_device_id) REFERENCES profiles_devices (id) ON DELETE CASCADE +); +CREATE TABLE IF NOT EXISTS schema ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + version INTEGER NOT NULL, + updated_at DATETIME NOT NULL, + UNIQUE (version) +);` + +// Create the initial (current) schema for a given SQLite DB connection. +func createDb(db *sql.DB) (err error) { + latestVersion := dbGetSchema(db) + + if latestVersion != 0 { + return nil + } + + _, err = db.Exec(CURRENT_SCHEMA) + if err != nil { + return err + } + + // There isn't an entry for schema version, let's put it in. + insertStmt := `INSERT INTO schema (version, updated_at) values (?, strftime("%s"));` + _, err = db.Exec(insertStmt, DB_CURRENT_VERSION) + if err != nil { + return err + } + + err = dbProfileCreateDefault(db) + if err != nil { + return err + } + + return dbProfileCreateDocker(db) +} + +func dbGetSchema(db *sql.DB) (v int) { + arg1 := []interface{}{} + arg2 := []interface{}{&v} + q := "SELECT max(version) FROM schema" + err := dbQueryRowScan(db, q, arg1, arg2) + if err != nil { + return 0 + } + return v +} + +// Create a database connection object and return it. +func initializeDbObject(d *Daemon, path string) (err error) { + var openPath string + + timeout := 5 // TODO - make this command-line configurable? + + // These are used to tune the transaction BEGIN behavior instead of using the + // similar "locking_mode" pragma (locking for the whole database connection). + openPath = fmt.Sprintf("%s?_busy_timeout=%d&_txlock=exclusive", path, timeout*1000) + + // Open the database. If the file doesn't exist it is created. + d.db, err = sql.Open("sqlite3", openPath) + if err != nil { + return err + } + + // Create the DB if it doesn't exist. + err = createDb(d.db) + if err != nil { + return fmt.Errorf("Error creating database: %s", err) + } + + // Run PRAGMA statements now since they are *per-connection*. + d.db.Exec("PRAGMA foreign_keys=ON;") // This allows us to use ON DELETE CASCADE + + v := dbGetSchema(d.db) + + if v != DB_CURRENT_VERSION { + err = dbUpdate(d, v) + if err != nil { + return err + } + } + + return nil +} + +func isDbLockedError(err error) bool { + if err == nil { + return false + } + if err == sqlite3.ErrLocked || err == sqlite3.ErrBusy { + return true + } + if err.Error() == "database is locked" { + return true + } + return false +} + +func isNoMatchError(err error) bool { + if err == nil { + return false + } + if err.Error() == "sql: no rows in result set" { + return true + } + return false +} + +func dbBegin(db *sql.DB) (*sql.Tx, error) { + for i := 0; i < 100; i++ { + tx, err := db.Begin() + if err == nil { + return tx, nil + } + if !isDbLockedError(err) { + shared.Debugf("DbBegin: error %q", err) + return nil, err + } + time.Sleep(100 * time.Millisecond) + } + + shared.Debugf("DbBegin: DB still locked") + shared.PrintStack() + return nil, fmt.Errorf("DB is locked") +} + +func txCommit(tx *sql.Tx) error { + for i := 0; i < 100; i++ { + err := tx.Commit() + if err == nil { + return nil + } + if !isDbLockedError(err) { + shared.Debugf("Txcommit: error %q", err) + return err + } + time.Sleep(100 * time.Millisecond) + } + + shared.Debugf("Txcommit: db still locked") + shared.PrintStack() + return fmt.Errorf("DB is locked") +} + +func dbQueryRowScan(db *sql.DB, q string, args []interface{}, outargs []interface{}) error { + for i := 0; i < 100; i++ { + err := db.QueryRow(q, args...).Scan(outargs...) + if err == nil { + return nil + } + if isNoMatchError(err) { + return err + } + if !isDbLockedError(err) { + return err + } + time.Sleep(100 * time.Millisecond) + } + + shared.Debugf("DbQueryRowScan: query %q args %q, DB still locked", q, args) + shared.PrintStack() + return fmt.Errorf("DB is locked") +} + +func dbQuery(db *sql.DB, q string, args ...interface{}) (*sql.Rows, error) { + for i := 0; i < 100; i++ { + result, err := db.Query(q, args...) + if err == nil { + return result, nil + } + if !isDbLockedError(err) { + shared.Debugf("DbQuery: query %q error %q", q, err) + return nil, err + } + time.Sleep(100 * time.Millisecond) + } + + shared.Debugf("DbQuery: query %q args %q, DB still locked", q, args) + shared.PrintStack() + return nil, fmt.Errorf("DB is locked") +} + +func doDbQueryScan(db *sql.DB, q string, args []interface{}, outargs []interface{}) ([][]interface{}, error) { + rows, err := db.Query(q, args...) + if err != nil { + return [][]interface{}{}, err + } + defer rows.Close() + result := [][]interface{}{} + for rows.Next() { + ptrargs := make([]interface{}, len(outargs)) + for i := range outargs { + switch t := outargs[i].(type) { + case string: + str := "" + ptrargs[i] = &str + case int: + integer := 0 + ptrargs[i] = &integer + default: + return [][]interface{}{}, fmt.Errorf("Bad interface type: %s", t) + } + } + err = rows.Scan(ptrargs...) + if err != nil { + return [][]interface{}{}, err + } + newargs := make([]interface{}, len(outargs)) + for i := range ptrargs { + switch t := outargs[i].(type) { + case string: + newargs[i] = *ptrargs[i].(*string) + case int: + newargs[i] = *ptrargs[i].(*int) + default: + return [][]interface{}{}, fmt.Errorf("Bad interface type: %s", t) + } + } + result = append(result, newargs) + } + err = rows.Err() + if err != nil { + return [][]interface{}{}, err + } + return result, nil +} + +/* + * . q is the database query + * . inargs is an array of interfaces containing the query arguments + * . outfmt is an array of interfaces containing the right types of output + * arguments, i.e. + * var arg1 string + * var arg2 int + * outfmt := {}interface{}{arg1, arg2} + * + * The result will be an array (one per output row) of arrays (one per output argument) + * of interfaces, containing pointers to the actual output arguments. + */ +func dbQueryScan(db *sql.DB, q string, inargs []interface{}, outfmt []interface{}) ([][]interface{}, error) { + for i := 0; i < 100; i++ { + result, err := doDbQueryScan(db, q, inargs, outfmt) + if err == nil { + return result, nil + } + if !isDbLockedError(err) { + shared.Debugf("DbQuery: query %q error %q", q, err) + return nil, err + } + time.Sleep(100 * time.Millisecond) + } + + shared.Debugf("DbQueryscan: query %q inargs %q, DB still locked", q, inargs) + shared.PrintStack() + return nil, fmt.Errorf("DB is locked") +} + +func dbExec(db *sql.DB, q string, args ...interface{}) (sql.Result, error) { + for i := 0; i < 100; i++ { + result, err := db.Exec(q, args...) + if err == nil { + return result, nil + } + if !isDbLockedError(err) { + shared.Debugf("DbExec: query %q error %q", q, err) + return nil, err + } + time.Sleep(100 * time.Millisecond) + } + + shared.Debugf("DbExec: query %q args %q, DB still locked", q, args) + shared.PrintStack() + return nil, fmt.Errorf("DB is locked") +} === added file 'src/github.com/lxc/lxd/lxd/db_certificates.go' --- src/github.com/lxc/lxd/lxd/db_certificates.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/db_certificates.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,120 @@ +package main + +import ( + "database/sql" + + _ "github.com/mattn/go-sqlite3" +) + +// dbCertInfo is here to pass the certificates content +// from the database around +type dbCertInfo struct { + ID int + Fingerprint string + Type int + Name string + Certificate string +} + +// dbCertsGet returns all certificates from the DB as CertBaseInfo objects. +func dbCertsGet(db *sql.DB) (certs []*dbCertInfo, err error) { + rows, err := dbQuery( + db, + "SELECT id, fingerprint, type, name, certificate FROM certificates", + ) + if err != nil { + return certs, err + } + + defer rows.Close() + + for rows.Next() { + cert := new(dbCertInfo) + rows.Scan( + &cert.ID, + &cert.Fingerprint, + &cert.Type, + &cert.Name, + &cert.Certificate, + ) + certs = append(certs, cert) + } + + return certs, nil +} + +// dbCertGet gets an CertBaseInfo object from the database. +// The argument fingerprint will be queried with a LIKE query, means you can +// pass a shortform and will get the full fingerprint. +// There can never be more than one image with a given fingerprint, as it is +// enforced by a UNIQUE constraint in the schema. +func dbCertGet(db *sql.DB, fingerprint string) (cert *dbCertInfo, err error) { + cert = new(dbCertInfo) + + inargs := []interface{}{fingerprint + "%"} + outfmt := []interface{}{ + &cert.ID, + &cert.Fingerprint, + &cert.Type, + &cert.Name, + &cert.Certificate, + } + + query := ` + SELECT + id, fingerprint, type, name, certificate + FROM + certificates + WHERE fingerprint LIKE ?` + + if err = dbQueryRowScan(db, query, inargs, outfmt); err != nil { + return nil, err + } + + return cert, err +} + +// dbCertSave stores a CertBaseInfo object in the db, +// it will ignore the ID field from the dbCertInfo. +func dbCertSave(db *sql.DB, cert *dbCertInfo) error { + tx, err := dbBegin(db) + if err != nil { + return err + } + stmt, err := tx.Prepare(` + INSERT INTO certificates ( + fingerprint, + type, + name, + certificate + ) VALUES (?, ?, ?, ?)`, + ) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + _, err = stmt.Exec( + cert.Fingerprint, + cert.Type, + cert.Name, + cert.Certificate, + ) + if err != nil { + tx.Rollback() + return err + } + + return txCommit(tx) +} + +// dbCertDelete deletes a certificate from the db. +func dbCertDelete(db *sql.DB, fingerprint string) error { + _, err := dbExec( + db, + "DELETE FROM certificates WHERE fingerprint=?", + fingerprint, + ) + + return err +} === added file 'src/github.com/lxc/lxd/lxd/db_config.go' --- src/github.com/lxc/lxd/lxd/db_config.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/db_config.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,61 @@ +package main + +import ( + "database/sql" + + _ "github.com/mattn/go-sqlite3" +) + +func dbConfigValuesGet(db *sql.DB) (map[string]string, error) { + q := "SELECT key, value FROM config" + rows, err := dbQuery(db, q) + if err != nil { + return map[string]string{}, err + } + defer rows.Close() + + results := map[string]string{} + + for rows.Next() { + var key, value string + rows.Scan(&key, &value) + results[key] = value + } + + return results, nil +} + +func dbConfigValueSet(db *sql.DB, key string, value string) error { + tx, err := dbBegin(db) + if err != nil { + return err + } + + _, err = tx.Exec("DELETE FROM config WHERE key=?", key) + if err != nil { + tx.Rollback() + return err + } + + if value != "" { + str := `INSERT INTO config (key, value) VALUES (?, ?);` + stmt, err := tx.Prepare(str) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + _, err = stmt.Exec(key, value) + if err != nil { + tx.Rollback() + return err + } + } + + err = txCommit(tx) + if err != nil { + return err + } + + return nil +} === added file 'src/github.com/lxc/lxd/lxd/db_containers.go' --- src/github.com/lxc/lxd/lxd/db_containers.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/db_containers.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,392 @@ +package main + +import ( + "database/sql" + "fmt" + "time" + + "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" +) + +type containerType int + +const ( + cTypeRegular containerType = 0 + cTypeSnapshot containerType = 1 +) + +func dbContainerRemove(db *sql.DB, name string) error { + id, err := dbContainerId(db, name) + if err != nil { + return err + } + + tx, err := dbBegin(db) + if err != nil { + return err + } + + err = dbContainerConfigClear(tx, id) + if err != nil { + tx.Rollback() + return err + } + + _, err = tx.Exec("DELETE FROM containers WHERE id=?", id) + if err != nil { + tx.Rollback() + return err + } + + return txCommit(tx) +} + +func dbContainerName(db *sql.DB, id int) (string, error) { + q := "SELECT name FROM containers WHERE id=?" + name := "" + arg1 := []interface{}{id} + arg2 := []interface{}{&name} + err := dbQueryRowScan(db, q, arg1, arg2) + return name, err +} + +func dbContainerId(db *sql.DB, name string) (int, error) { + q := "SELECT id FROM containers WHERE name=?" + id := -1 + arg1 := []interface{}{name} + arg2 := []interface{}{&id} + err := dbQueryRowScan(db, q, arg1, arg2) + return id, err +} + +func dbContainerGet(db *sql.DB, name string) (containerArgs, error) { + args := containerArgs{} + args.Name = name + + ephemInt := -1 + statefulInt := -1 + q := "SELECT id, architecture, type, ephemeral, stateful, creation_date FROM containers WHERE name=?" + arg1 := []interface{}{name} + arg2 := []interface{}{&args.Id, &args.Architecture, &args.Ctype, &ephemInt, &statefulInt, &args.CreationDate} + err := dbQueryRowScan(db, q, arg1, arg2) + if err != nil { + return args, err + } + + if args.Id == -1 { + return args, fmt.Errorf("Unknown container") + } + + if ephemInt == 1 { + args.Ephemeral = true + } + + if statefulInt == 1 { + args.Stateful = true + } + + config, err := dbContainerConfig(db, args.Id) + if err != nil { + return args, err + } + args.Config = config + + profiles, err := dbContainerProfiles(db, args.Id) + if err != nil { + return args, err + } + args.Profiles = profiles + + /* get container_devices */ + args.Devices = shared.Devices{} + newdevs, err := dbDevices(db, name, false) + if err != nil { + return args, err + } + + for k, v := range newdevs { + args.Devices[k] = v + } + + return args, nil +} + +func dbContainerCreate(db *sql.DB, args containerArgs) (int, error) { + id, err := dbContainerId(db, args.Name) + if err == nil { + return 0, DbErrAlreadyDefined + } + + tx, err := dbBegin(db) + if err != nil { + return 0, err + } + + ephemInt := 0 + if args.Ephemeral == true { + ephemInt = 1 + } + + statefulInt := 0 + if args.Stateful == true { + statefulInt = 1 + } + + args.CreationDate = time.Now().UTC() + + str := fmt.Sprintf("INSERT INTO containers (name, architecture, type, ephemeral, creation_date, stateful) VALUES (?, ?, ?, ?, ?, ?)") + stmt, err := tx.Prepare(str) + if err != nil { + tx.Rollback() + return 0, err + } + defer stmt.Close() + result, err := stmt.Exec(args.Name, args.Architecture, args.Ctype, ephemInt, args.CreationDate.Unix(), statefulInt) + if err != nil { + tx.Rollback() + return 0, err + } + + id64, err := result.LastInsertId() + if err != nil { + tx.Rollback() + return 0, fmt.Errorf("Error inserting %s into database", args.Name) + } + // TODO: is this really int64? we should fix it everywhere if so + id = int(id64) + if err := dbContainerConfigInsert(tx, id, args.Config); err != nil { + tx.Rollback() + return 0, err + } + + if err := dbContainerProfilesInsert(tx, id, args.Profiles); err != nil { + tx.Rollback() + return 0, err + } + + if err := dbDevicesAdd(tx, "container", int64(id), args.Devices); err != nil { + tx.Rollback() + return 0, err + } + + return id, txCommit(tx) +} + +func dbContainerConfigClear(tx *sql.Tx, id int) error { + _, err := tx.Exec("DELETE FROM containers_config WHERE container_id=?", id) + if err != nil { + return err + } + _, err = tx.Exec("DELETE FROM containers_profiles WHERE container_id=?", id) + if err != nil { + return err + } + _, err = tx.Exec(`DELETE FROM containers_devices_config WHERE id IN + (SELECT containers_devices_config.id + FROM containers_devices_config JOIN containers_devices + ON containers_devices_config.container_device_id=containers_devices.id + WHERE containers_devices.container_id=?)`, id) + if err != nil { + return err + } + _, err = tx.Exec("DELETE FROM containers_devices WHERE container_id=?", id) + return err +} + +func dbContainerConfigInsert(tx *sql.Tx, id int, config map[string]string) error { + str := "INSERT INTO containers_config (container_id, key, value) values (?, ?, ?)" + stmt, err := tx.Prepare(str) + if err != nil { + return err + } + defer stmt.Close() + + for k, v := range config { + _, err := stmt.Exec(id, k, v) + if err != nil { + shared.Debugf("Error adding configuration item %s = %s to container %d", + k, v, id) + return err + } + } + + return nil +} + +func dbContainerConfigRemove(db *sql.DB, id int, name string) error { + _, err := dbExec(db, "DELETE FROM containers_config WHERE key=? AND container_id=?", name, id) + return err +} + +func dbContainerSetStateful(db *sql.DB, id int, stateful bool) error { + statefulInt := 0 + if stateful { + statefulInt = 1 + } + + _, err := dbExec(db, "UPDATE containers SET stateful=? WHERE id=?", statefulInt, id) + return err +} + +func dbContainerProfilesInsert(tx *sql.Tx, id int, profiles []string) error { + applyOrder := 1 + str := `INSERT INTO containers_profiles (container_id, profile_id, apply_order) VALUES + (?, (SELECT id FROM profiles WHERE name=?), ?);` + stmt, err := tx.Prepare(str) + if err != nil { + return err + } + defer stmt.Close() + for _, p := range profiles { + _, err = stmt.Exec(id, p, applyOrder) + if err != nil { + shared.Debugf("Error adding profile %s to container: %s", + p, err) + return err + } + applyOrder = applyOrder + 1 + } + + return nil +} + +// Get a list of profiles for a given container id. +func dbContainerProfiles(db *sql.DB, containerId int) ([]string, error) { + var name string + var profiles []string + + query := ` + SELECT name FROM containers_profiles + JOIN profiles ON containers_profiles.profile_id=profiles.id + WHERE container_id=? + ORDER BY containers_profiles.apply_order` + inargs := []interface{}{containerId} + outfmt := []interface{}{name} + + results, err := dbQueryScan(db, query, inargs, outfmt) + if err != nil { + return nil, err + } + + for _, r := range results { + name = r[0].(string) + + profiles = append(profiles, name) + } + + return profiles, nil +} + +// dbContainerConfig gets the container configuration map from the DB +func dbContainerConfig(db *sql.DB, containerId int) (map[string]string, error) { + var key, value string + q := `SELECT key, value FROM containers_config WHERE container_id=?` + + inargs := []interface{}{containerId} + outfmt := []interface{}{key, value} + + // Results is already a slice here, not db Rows anymore. + results, err := dbQueryScan(db, q, inargs, outfmt) + if err != nil { + return nil, err //SmartError will wrap this and make "not found" errors pretty + } + + config := map[string]string{} + + for _, r := range results { + key = r[0].(string) + value = r[1].(string) + + config[key] = value + } + + return config, nil +} + +func dbContainersList(db *sql.DB, cType containerType) ([]string, error) { + q := fmt.Sprintf("SELECT name FROM containers WHERE type=? ORDER BY name") + inargs := []interface{}{cType} + var container string + outfmt := []interface{}{container} + result, err := dbQueryScan(db, q, inargs, outfmt) + if err != nil { + return nil, err + } + + var ret []string + for _, container := range result { + ret = append(ret, container[0].(string)) + } + + return ret, nil +} + +func dbContainerRename(db *sql.DB, oldName string, newName string) error { + tx, err := dbBegin(db) + if err != nil { + return err + } + + str := fmt.Sprintf("UPDATE containers SET name = ? WHERE name = ?") + stmt, err := tx.Prepare(str) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + shared.Log.Debug( + "Calling SQL Query", + log.Ctx{ + "query": "UPDATE containers SET name = ? WHERE name = ?", + "oldName": oldName, + "newName": newName}) + if _, err := stmt.Exec(newName, oldName); err != nil { + tx.Rollback() + return err + } + + return txCommit(tx) +} + +func dbContainerUpdate(tx *sql.Tx, id int, architecture int, ephemeral bool) error { + str := fmt.Sprintf("UPDATE containers SET architecture=?, ephemeral=? WHERE id=?") + stmt, err := tx.Prepare(str) + if err != nil { + return err + } + defer stmt.Close() + + ephemeralInt := 0 + if ephemeral { + ephemeralInt = 1 + } + + if _, err := stmt.Exec(architecture, ephemeralInt, id); err != nil { + return err + } + + return nil +} + +func dbContainerGetSnapshots(db *sql.DB, name string) ([]string, error) { + result := []string{} + + regexp := name + shared.SnapshotDelimiter + length := len(regexp) + q := "SELECT name FROM containers WHERE type=? AND SUBSTR(name,1,?)=?" + inargs := []interface{}{cTypeSnapshot, length, regexp} + outfmt := []interface{}{name} + dbResults, err := dbQueryScan(db, q, inargs, outfmt) + if err != nil { + return result, err + } + + for _, r := range dbResults { + result = append(result, r[0].(string)) + } + + return result, nil +} === added file 'src/github.com/lxc/lxd/lxd/db_devices.go' --- src/github.com/lxc/lxd/lxd/db_devices.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/db_devices.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,164 @@ +package main + +import ( + "database/sql" + "fmt" + + _ "github.com/mattn/go-sqlite3" + + "github.com/lxc/lxd/shared" +) + +func dbDeviceTypeToString(t int) (string, error) { + switch t { + case 0: + return "none", nil + case 1: + return "nic", nil + case 2: + return "disk", nil + case 3: + return "unix-char", nil + case 4: + return "unix-block", nil + default: + return "", fmt.Errorf("Invalid device type %d", t) + } +} + +func dbDeviceTypeToInt(t string) (int, error) { + switch t { + case "none": + return 0, nil + case "nic": + return 1, nil + case "disk": + return 2, nil + case "unix-char": + return 3, nil + case "unix-block": + return 4, nil + default: + return -1, fmt.Errorf("Invalid device type %s", t) + } +} + +func dbDevicesAdd(tx *sql.Tx, w string, cID int64, devices shared.Devices) error { + // Prepare the devices entry SQL + str1 := fmt.Sprintf("INSERT INTO %ss_devices (%s_id, name, type) VALUES (?, ?, ?)", w, w) + stmt1, err := tx.Prepare(str1) + if err != nil { + return err + } + defer stmt1.Close() + + // Prepare the devices config entry SQL + str2 := fmt.Sprintf("INSERT INTO %ss_devices_config (%s_device_id, key, value) VALUES (?, ?, ?)", w, w) + stmt2, err := tx.Prepare(str2) + if err != nil { + return err + } + defer stmt2.Close() + + // Insert all the devices + for k, v := range devices { + t, err := dbDeviceTypeToInt(v["type"]) + if err != nil { + return err + } + + result, err := stmt1.Exec(cID, k, t) + if err != nil { + return err + } + + id64, err := result.LastInsertId() + if err != nil { + return fmt.Errorf("Error inserting device %s into database", k) + } + id := int(id64) + + for ck, cv := range v { + // The type is stored as int in the parent entry + if ck == "type" { + continue + } + + _, err = stmt2.Exec(id, ck, cv) + if err != nil { + return err + } + } + } + + return nil +} + +func dbDeviceConfig(db *sql.DB, id int, isprofile bool) (shared.Device, error) { + var query string + var key, value string + newdev := shared.Device{} // That's a map[string]string + inargs := []interface{}{id} + outfmt := []interface{}{key, value} + + if isprofile { + query = `SELECT key, value FROM profiles_devices_config WHERE profile_device_id=?` + } else { + query = `SELECT key, value FROM containers_devices_config WHERE container_device_id=?` + } + + results, err := dbQueryScan(db, query, inargs, outfmt) + + if err != nil { + return newdev, err + } + + for _, r := range results { + key = r[0].(string) + value = r[1].(string) + newdev[key] = value + } + + return newdev, nil +} + +func dbDevices(db *sql.DB, qName string, isprofile bool) (shared.Devices, error) { + var q string + if isprofile { + q = `SELECT profiles_devices.id, profiles_devices.name, profiles_devices.type + FROM profiles_devices JOIN profiles + ON profiles_devices.profile_id = profiles.id + WHERE profiles.name=?` + } else { + q = `SELECT containers_devices.id, containers_devices.name, containers_devices.type + FROM containers_devices JOIN containers + ON containers_devices.container_id = containers.id + WHERE containers.name=?` + } + var id, dtype int + var name, stype string + inargs := []interface{}{qName} + outfmt := []interface{}{id, name, dtype} + results, err := dbQueryScan(db, q, inargs, outfmt) + if err != nil { + return nil, err + } + + devices := shared.Devices{} + for _, r := range results { + id = r[0].(int) + name = r[1].(string) + stype, err = dbDeviceTypeToString(r[2].(int)) + if err != nil { + return nil, err + } + newdev, err := dbDeviceConfig(db, id, isprofile) + if err != nil { + return nil, err + } + newdev["type"] = stype + devices[name] = newdev + } + + return devices, nil +} === added file 'src/github.com/lxc/lxd/lxd/db_images.go' --- src/github.com/lxc/lxd/lxd/db_images.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/db_images.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,438 @@ +package main + +import ( + "database/sql" + "fmt" + "time" + + _ "github.com/mattn/go-sqlite3" + + "github.com/lxc/lxd/shared" +) + +var dbImageSourceProtocol = map[int]string{ + 0: "lxd", + 1: "direct", + 2: "simplestreams", +} + +func dbImagesGet(db *sql.DB, public bool) ([]string, error) { + q := "SELECT fingerprint FROM images" + if public == true { + q = "SELECT fingerprint FROM images WHERE public=1" + } + + var fp string + inargs := []interface{}{} + outfmt := []interface{}{fp} + dbResults, err := dbQueryScan(db, q, inargs, outfmt) + if err != nil { + return []string{}, err + } + + results := []string{} + for _, r := range dbResults { + results = append(results, r[0].(string)) + } + + return results, nil +} + +func dbImagesGetExpired(db *sql.DB, expiry int) ([]string, error) { + q := `SELECT fingerprint FROM images WHERE cached=1 AND creation_date<=strftime('%s', date('now', '-` + fmt.Sprintf("%d", expiry) + ` day'))` + + var fp string + inargs := []interface{}{} + outfmt := []interface{}{fp} + dbResults, err := dbQueryScan(db, q, inargs, outfmt) + if err != nil { + return []string{}, err + } + + results := []string{} + for _, r := range dbResults { + results = append(results, r[0].(string)) + } + + return results, nil +} + +func dbImageSourceInsert(db *sql.DB, imageId int, server string, protocol string, certificate string, alias string) error { + stmt := `INSERT INTO images_source (image_id, server, protocol, certificate, alias) values (?, ?, ?, ?, ?)` + + protocolInt := -1 + for protoInt, protoString := range dbImageSourceProtocol { + if protoString == protocol { + protocolInt = protoInt + } + } + + if protocolInt == -1 { + return fmt.Errorf("Invalid protocol: %s", protocol) + } + + _, err := dbExec(db, stmt, imageId, server, protocolInt, certificate, alias) + return err +} + +func dbImageSourceGet(db *sql.DB, imageId int) (int, shared.ImageSource, error) { + q := `SELECT id, server, protocol, certificate, alias FROM images_source WHERE image_id=?` + + id := 0 + protocolInt := -1 + result := shared.ImageSource{} + + arg1 := []interface{}{imageId} + arg2 := []interface{}{&id, &result.Server, &protocolInt, &result.Certificate, &result.Alias} + err := dbQueryRowScan(db, q, arg1, arg2) + if err != nil { + if err == sql.ErrNoRows { + return -1, shared.ImageSource{}, NoSuchObjectError + } + + return -1, shared.ImageSource{}, err + } + + protocol, found := dbImageSourceProtocol[protocolInt] + if !found { + return -1, shared.ImageSource{}, fmt.Errorf("Invalid protocol: %d", protocolInt) + } + + result.Protocol = protocol + + return id, result, nil + +} + +// dbImageGet gets an ImageBaseInfo object from the database. +// The argument fingerprint will be queried with a LIKE query, means you can +// pass a shortform and will get the full fingerprint. +// There can never be more than one image with a given fingerprint, as it is +// enforced by a UNIQUE constraint in the schema. +func dbImageGet(db *sql.DB, fingerprint string, public bool, strictMatching bool) (int, *shared.ImageInfo, error) { + var err error + var create, expire, used, upload *time.Time // These hold the db-returned times + + // The object we'll actually return + image := shared.ImageInfo{} + id := -1 + arch := -1 + + // These two humongous things will be filled by the call to DbQueryRowScan + outfmt := []interface{}{&id, &image.Fingerprint, &image.Filename, + &image.Size, &image.Cached, &image.Public, &image.AutoUpdate, &arch, + &create, &expire, &used, &upload} + + var query string + + var inargs []interface{} + if strictMatching { + inargs = []interface{}{fingerprint} + query = ` + SELECT + id, fingerprint, filename, size, cached, public, auto_update, architecture, + creation_date, expiry_date, last_use_date, upload_date + FROM + images + WHERE fingerprint = ?` + } else { + inargs = []interface{}{fingerprint + "%"} + query = ` + SELECT + id, fingerprint, filename, size, cached, public, auto_update, architecture, + creation_date, expiry_date, last_use_date, upload_date + FROM + images + WHERE fingerprint LIKE ?` + } + + if public { + query = query + " AND public=1" + } + + err = dbQueryRowScan(db, query, inargs, outfmt) + + if err != nil { + return -1, nil, err // Likely: there are no rows for this fingerprint + } + + // Some of the dates can be nil in the DB, let's process them. + if create != nil { + image.CreationDate = *create + } else { + image.CreationDate = time.Time{} + } + + if expire != nil { + image.ExpiryDate = *expire + } else { + image.ExpiryDate = time.Time{} + } + + if used != nil { + image.LastUsedDate = *used + } else { + image.LastUsedDate = time.Time{} + } + + image.Architecture, _ = shared.ArchitectureName(arch) + + // The upload date is enforced by NOT NULL in the schema, so it can never be nil. + image.UploadDate = *upload + + // Get the properties + q := "SELECT key, value FROM images_properties where image_id=?" + var key, value, name, desc string + inargs = []interface{}{id} + outfmt = []interface{}{key, value} + results, err := dbQueryScan(db, q, inargs, outfmt) + if err != nil { + return -1, nil, err + } + + properties := map[string]string{} + for _, r := range results { + key = r[0].(string) + value = r[1].(string) + properties[key] = value + } + + image.Properties = properties + + // Get the aliases + q = "SELECT name, description FROM images_aliases WHERE image_id=?" + inargs = []interface{}{id} + outfmt = []interface{}{name, desc} + results, err = dbQueryScan(db, q, inargs, outfmt) + if err != nil { + return -1, nil, err + } + + aliases := []shared.ImageAlias{} + for _, r := range results { + name = r[0].(string) + desc = r[0].(string) + a := shared.ImageAlias{Name: name, Description: desc} + aliases = append(aliases, a) + } + + image.Aliases = aliases + + _, source, err := dbImageSourceGet(db, id) + if err == nil { + image.Source = &source + } + + return id, &image, nil +} + +func dbImageDelete(db *sql.DB, id int) error { + tx, err := dbBegin(db) + if err != nil { + return err + } + + _, _ = tx.Exec("DELETE FROM images_aliases WHERE image_id=?", id) + _, _ = tx.Exec("DELETE FROM images_properties WHERE image_id=?", id) + _, _ = tx.Exec("DELETE FROM images_source WHERE image_id=?", id) + _, _ = tx.Exec("DELETE FROM images WHERE id=?", id) + + if err := txCommit(tx); err != nil { + return err + } + + return nil +} + +func dbImageAliasGet(db *sql.DB, name string, isTrustedClient bool) (int, shared.ImageAliasesEntry, error) { + q := `SELECT images_aliases.id, images.fingerprint, images_aliases.description + FROM images_aliases + INNER JOIN images + ON images_aliases.image_id=images.id + WHERE images_aliases.name=?` + if !isTrustedClient { + q = q + ` AND images.public=1` + } + + var fingerprint, description string + id := -1 + + arg1 := []interface{}{name} + arg2 := []interface{}{&id, &fingerprint, &description} + err := dbQueryRowScan(db, q, arg1, arg2) + if err != nil { + if err == sql.ErrNoRows { + return -1, shared.ImageAliasesEntry{}, NoSuchObjectError + } + + return -1, shared.ImageAliasesEntry{}, err + } + + return id, shared.ImageAliasesEntry{Name: name, Target: fingerprint, Description: description}, nil +} + +func dbImageAliasRename(db *sql.DB, id int, name string) error { + _, err := dbExec(db, "UPDATE images_aliases SET name=? WHERE id=?", name, id) + return err +} + +func dbImageAliasDelete(db *sql.DB, name string) error { + _, err := dbExec(db, "DELETE FROM images_aliases WHERE name=?", name) + return err +} + +func dbImageAliasesMove(db *sql.DB, source int, destination int) error { + _, err := dbExec(db, "UPDATE images_aliases SET image_id=? WHERE image_id=?", destination, source) + return err +} + +// Insert an alias ento the database. +func dbImageAliasAdd(db *sql.DB, name string, imageID int, desc string) error { + stmt := `INSERT INTO images_aliases (name, image_id, description) values (?, ?, ?)` + _, err := dbExec(db, stmt, name, imageID, desc) + return err +} + +func dbImageAliasUpdate(db *sql.DB, id int, imageID int, desc string) error { + stmt := `UPDATE images_aliases SET image_id=?, description=? WHERE id=?` + _, err := dbExec(db, stmt, imageID, desc, id) + return err +} + +func dbImageLastAccessUpdate(db *sql.DB, fingerprint string, date time.Time) error { + stmt := `UPDATE images SET last_use_date=? WHERE fingerprint=?` + _, err := dbExec(db, stmt, date, fingerprint) + return err +} + +func dbImageLastAccessInit(db *sql.DB, fingerprint string) error { + stmt := `UPDATE images SET cached=1, last_use_date=strftime("%s") WHERE fingerprint=?` + _, err := dbExec(db, stmt, fingerprint) + return err +} + +func dbImageUpdate(db *sql.DB, id int, fname string, sz int64, public bool, autoUpdate bool, architecture string, creationDate time.Time, expiryDate time.Time, properties map[string]string) error { + arch, err := shared.ArchitectureId(architecture) + if err != nil { + arch = 0 + } + + tx, err := dbBegin(db) + if err != nil { + return err + } + + publicInt := 0 + if public { + publicInt = 1 + } + + autoUpdateInt := 0 + if autoUpdate { + autoUpdateInt = 1 + } + + stmt, err := tx.Prepare(`UPDATE images SET filename=?, size=?, public=?, auto_update=?, architecture=?, creation_date=?, expiry_date=? WHERE id=?`) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + _, err = stmt.Exec(fname, sz, publicInt, autoUpdateInt, arch, creationDate, expiryDate, id) + if err != nil { + tx.Rollback() + return err + } + + _, err = tx.Exec(`DELETE FROM images_properties WHERE image_id=?`, id) + + stmt, err = tx.Prepare(`INSERT INTO images_properties (image_id, type, key, value) VALUES (?, ?, ?, ?)`) + if err != nil { + tx.Rollback() + return err + } + + for key, value := range properties { + _, err = stmt.Exec(id, 0, key, value) + if err != nil { + tx.Rollback() + return err + } + } + + if err := txCommit(tx); err != nil { + return err + } + + return nil +} + +func dbImageInsert(db *sql.DB, fp string, fname string, sz int64, public bool, autoUpdate bool, architecture string, creationDate time.Time, expiryDate time.Time, properties map[string]string) error { + arch, err := shared.ArchitectureId(architecture) + if err != nil { + arch = 0 + } + + tx, err := dbBegin(db) + if err != nil { + return err + } + + publicInt := 0 + if public { + publicInt = 1 + } + + autoUpdateInt := 0 + if autoUpdate { + autoUpdateInt = 1 + } + + stmt, err := tx.Prepare(`INSERT INTO images (fingerprint, filename, size, public, auto_update, architecture, creation_date, expiry_date, upload_date) VALUES (?, ?, ?, ?, ?, ?, ?, ?, strftime("%s"))`) + if err != nil { + tx.Rollback() + return err + } + defer stmt.Close() + + result, err := stmt.Exec(fp, fname, sz, publicInt, autoUpdateInt, arch, creationDate, expiryDate) + if err != nil { + tx.Rollback() + return err + } + + if len(properties) > 0 { + id64, err := result.LastInsertId() + if err != nil { + tx.Rollback() + return err + } + id := int(id64) + + pstmt, err := tx.Prepare(`INSERT INTO images_properties (image_id, type, key, value) VALUES (?, 0, ?, ?)`) + if err != nil { + tx.Rollback() + return err + } + defer pstmt.Close() + + for k, v := range properties { + + // we can assume, that there is just one + // value per key + _, err = pstmt.Exec(id, k, v) + if err != nil { + tx.Rollback() + return err + } + } + + } + + if err := txCommit(tx); err != nil { + return err + } + + return nil +} === added file 'src/github.com/lxc/lxd/lxd/db_profiles.go' --- src/github.com/lxc/lxd/lxd/db_profiles.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/db_profiles.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,283 @@ +package main + +import ( + "database/sql" + "fmt" + + _ "github.com/mattn/go-sqlite3" + + "github.com/lxc/lxd/shared" +) + +// dbProfiles returns a string list of profiles. +func dbProfiles(db *sql.DB) ([]string, error) { + q := fmt.Sprintf("SELECT name FROM profiles") + inargs := []interface{}{} + var name string + outfmt := []interface{}{name} + result, err := dbQueryScan(db, q, inargs, outfmt) + if err != nil { + return []string{}, err + } + + response := []string{} + for _, r := range result { + response = append(response, r[0].(string)) + } + + return response, nil +} + +func dbProfileGet(db *sql.DB, profile string) (int64, *shared.ProfileConfig, error) { + id := int64(-1) + description := sql.NullString{} + + q := "SELECT id, description FROM profiles WHERE name=?" + arg1 := []interface{}{profile} + arg2 := []interface{}{&id, &description} + err := dbQueryRowScan(db, q, arg1, arg2) + if err != nil { + return -1, nil, fmt.Errorf("here: %s", err) + } + + config, err := dbProfileConfig(db, profile) + if err != nil { + return -1, nil, err + } + + devices, err := dbDevices(db, profile, true) + if err != nil { + return -1, nil, err + } + + return id, &shared.ProfileConfig{ + Name: profile, + Config: config, + Description: description.String, + Devices: devices, + }, nil +} + +func dbProfileCreate(db *sql.DB, profile string, description string, config map[string]string, + devices shared.Devices) (int64, error) { + + tx, err := dbBegin(db) + if err != nil { + return -1, err + } + result, err := tx.Exec("INSERT INTO profiles (name, description) VALUES (?, ?)", profile, description) + if err != nil { + tx.Rollback() + return -1, err + } + id, err := result.LastInsertId() + if err != nil { + tx.Rollback() + return -1, err + } + + err = dbProfileConfigAdd(tx, id, config) + if err != nil { + tx.Rollback() + return -1, err + } + + err = dbDevicesAdd(tx, "profile", id, devices) + if err != nil { + tx.Rollback() + return -1, err + } + + err = txCommit(tx) + if err != nil { + return -1, err + } + + return id, nil +} + +func dbProfileCreateDefault(db *sql.DB) error { + id, _, _ := dbProfileGet(db, "default") + + if id != -1 { + // default profile already exists + return nil + } + + // TODO: We should scan for bridges and use the best available as default. + devices := shared.Devices{ + "eth0": shared.Device{ + "name": "eth0", + "type": "nic", + "nictype": "bridged", + "parent": "lxcbr0"}} + id, err := dbProfileCreate(db, "default", "Default LXD profile", map[string]string{}, devices) + if err != nil { + return err + } + + return nil +} + +func dbProfileCreateDocker(db *sql.DB) error { + id, _, err := dbProfileGet(db, "docker") + + if id != -1 { + // docker profile already exists + return nil + } + + config := map[string]string{ + "security.nesting": "true", + "linux.kernel_modules": "overlay, nf_nat"} + fusedev := map[string]string{ + "path": "/dev/fuse", + "type": "unix-char", + } + devices := map[string]shared.Device{"fuse": fusedev} + + _, err = dbProfileCreate(db, "docker", "Profile supporting docker in containers", config, devices) + return err +} + +// Get the profile configuration map from the DB +func dbProfileConfig(db *sql.DB, name string) (map[string]string, error) { + var key, value string + query := ` + SELECT + key, value + FROM profiles_config + JOIN profiles ON profiles_config.profile_id=profiles.id + WHERE name=?` + inargs := []interface{}{name} + outfmt := []interface{}{key, value} + results, err := dbQueryScan(db, query, inargs, outfmt) + if err != nil { + return nil, fmt.Errorf("Failed to get profile '%s'", name) + } + + if len(results) == 0 { + /* + * If we didn't get any rows here, let's check to make sure the + * profile really exists; if it doesn't, let's send back a 404. + */ + query := "SELECT id FROM profiles WHERE name=?" + var id int + results, err := dbQueryScan(db, query, []interface{}{name}, []interface{}{id}) + if err != nil { + return nil, err + } + + if len(results) == 0 { + return nil, NoSuchObjectError + } + } + + config := map[string]string{} + + for _, r := range results { + key = r[0].(string) + value = r[1].(string) + + config[key] = value + } + + return config, nil +} + +func dbProfileDelete(db *sql.DB, name string) error { + tx, err := dbBegin(db) + if err != nil { + return err + } + _, err = tx.Exec("DELETE FROM profiles WHERE name=?", name) + if err != nil { + tx.Rollback() + return err + } + + err = txCommit(tx) + + return err +} + +func dbProfileUpdate(db *sql.DB, name string, newName string) error { + tx, err := dbBegin(db) + if err != nil { + return err + } + + _, err = tx.Exec("UPDATE profiles SET name=? WHERE name=?", newName, name) + if err != nil { + tx.Rollback() + return err + } + + err = txCommit(tx) + + return err +} + +func dbProfileDescriptionUpdate(tx *sql.Tx, id int64, description string) error { + _, err := tx.Exec("UPDATE profiles SET description=? WHERE id=?", description, id) + return err +} + +func dbProfileConfigClear(tx *sql.Tx, id int64) error { + _, err := tx.Exec("DELETE FROM profiles_config WHERE profile_id=?", id) + if err != nil { + return err + } + + _, err = tx.Exec(`DELETE FROM profiles_devices_config WHERE id IN + (SELECT profiles_devices_config.id + FROM profiles_devices_config JOIN profiles_devices + ON profiles_devices_config.profile_device_id=profiles_devices.id + WHERE profiles_devices.profile_id=?)`, id) + if err != nil { + return err + } + _, err = tx.Exec("DELETE FROM profiles_devices WHERE profile_id=?", id) + if err != nil { + return err + } + return nil +} + +func dbProfileConfigAdd(tx *sql.Tx, id int64, config map[string]string) error { + str := fmt.Sprintf("INSERT INTO profiles_config (profile_id, key, value) VALUES(?, ?, ?)") + stmt, err := tx.Prepare(str) + defer stmt.Close() + + for k, v := range config { + _, err = stmt.Exec(id, k, v) + if err != nil { + return err + } + } + + return nil +} + +func dbProfileContainersGet(db *sql.DB, profile string) ([]string, error) { + q := `SELECT containers.name FROM containers JOIN containers_profiles + ON containers.id == containers_profiles.container_id + JOIN profiles ON containers_profiles.profile_id == profiles.id + WHERE profiles.name == ?` + + results := []string{} + inargs := []interface{}{profile} + var name string + outfmt := []interface{}{name} + + output, err := dbQueryScan(db, q, inargs, outfmt) + if err != nil { + return results, err + } + + for _, r := range output { + results = append(results, r[0].(string)) + } + + return results, nil +} === added file 'src/github.com/lxc/lxd/lxd/db_test.go' --- src/github.com/lxc/lxd/lxd/db_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/db_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,629 @@ +package main + +import ( + "database/sql" + "fmt" + "testing" + "time" + + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/logging" +) + +const DB_FIXTURES string = ` + INSERT INTO containers (name, architecture, type) VALUES ('thename', 1, 1); + INSERT INTO profiles (name) VALUES ('theprofile'); + INSERT INTO containers_profiles (container_id, profile_id) VALUES (1, 3); + INSERT INTO containers_config (container_id, key, value) VALUES (1, 'thekey', 'thevalue'); + INSERT INTO containers_devices (container_id, name, type) VALUES (1, 'somename', 1); + INSERT INTO containers_devices_config (key, value, container_device_id) VALUES ('configkey', 'configvalue', 1); + INSERT INTO images (fingerprint, filename, size, architecture, creation_date, expiry_date, upload_date) VALUES ('fingerprint', 'filename', 1024, 0, 1431547174, 1431547175, 1431547176); + INSERT INTO images_aliases (name, image_id, description) VALUES ('somealias', 1, 'some description'); + INSERT INTO images_properties (image_id, type, key, value) VALUES (1, 0, 'thekey', 'some value'); + INSERT INTO profiles_config (profile_id, key, value) VALUES (3, 'thekey', 'thevalue'); + INSERT INTO profiles_devices (profile_id, name, type) VALUES (3, 'devicename', 1); + INSERT INTO profiles_devices_config (profile_device_id, key, value) VALUES (3, 'devicekey', 'devicevalue'); + ` + +// This Helper will initialize a test in-memory DB. +func createTestDb(t *testing.T) (db *sql.DB) { + // Setup logging if main() hasn't been called/when testing + if shared.Log == nil { + var err error + shared.Log, err = logging.GetLogger("", "", true, true, nil) + if err != nil { + t.Fatal(err) + } + } + + var err error + d := &Daemon{IsMock: true} + err = initializeDbObject(d, ":memory:") + db = d.db + + if err != nil { + t.Fatal(err) + } + + _, err = db.Exec(DB_FIXTURES) + if err != nil { + t.Fatal(err) + } + return // db is a named output param +} + +func Test_deleting_a_container_cascades_on_related_tables(t *testing.T) { + var db *sql.DB + var err error + var count int + var statements string + + // Insert a container and a related profile. + db = createTestDb(t) + defer db.Close() + + // Drop the container we just created. + statements = `DELETE FROM containers WHERE name = 'thename';` + + _, err = db.Exec(statements) + if err != nil { + t.Errorf("Error deleting container! %s", err) + } + + // Make sure there are 0 container_profiles entries left. + statements = `SELECT count(*) FROM containers_profiles;` + err = db.QueryRow(statements).Scan(&count) + + if count != 0 { + t.Errorf("Deleting a container didn't delete the profile association! There are %d left", count) + } + + // Make sure there are 0 containers_config entries left. + statements = `SELECT count(*) FROM containers_config;` + err = db.QueryRow(statements).Scan(&count) + + if count != 0 { + t.Errorf("Deleting a container didn't delete the associated container_config! There are %d left", count) + } + + // Make sure there are 0 containers_devices entries left. + statements = `SELECT count(*) FROM containers_devices;` + err = db.QueryRow(statements).Scan(&count) + + if count != 0 { + t.Errorf("Deleting a container didn't delete the associated container_devices! There are %d left", count) + } + + // Make sure there are 0 containers_devices_config entries left. + statements = `SELECT count(*) FROM containers_devices_config;` + err = db.QueryRow(statements).Scan(&count) + + if count != 0 { + t.Errorf("Deleting a container didn't delete the associated container_devices_config! There are %d left", count) + } + +} + +func Test_deleting_a_profile_cascades_on_related_tables(t *testing.T) { + var db *sql.DB + var err error + var count int + var statements string + + // Insert a container and a related profile. + db = createTestDb(t) + defer db.Close() + + // Drop the profile we just created. + statements = `DELETE FROM profiles WHERE name = 'theprofile';` + + _, err = db.Exec(statements) + if err != nil { + t.Errorf("Error deleting profile! %s", err) + } + + // Make sure there are 0 container_profiles entries left. + statements = `SELECT count(*) FROM containers_profiles WHERE profile_id = 3;` + err = db.QueryRow(statements).Scan(&count) + + if count != 0 { + t.Errorf("Deleting a profile didn't delete the container association! There are %d left", count) + } + + // Make sure there are 0 profiles_devices entries left. + statements = `SELECT count(*) FROM profiles_devices WHERE profile_id == 3;` + err = db.QueryRow(statements).Scan(&count) + + if count != 0 { + t.Errorf("Deleting a profile didn't delete the related profiles_devices! There are %d left", count) + } + + // Make sure there are 0 profiles_config entries left. + statements = `SELECT count(*) FROM profiles_config WHERE profile_id == 3;` + err = db.QueryRow(statements).Scan(&count) + + if count != 0 { + t.Errorf("Deleting a profile didn't delete the related profiles_config! There are %d left", count) + } + + // Make sure there are 0 profiles_devices_config entries left. + statements = `SELECT count(*) FROM profiles_devices_config WHERE profile_device_id == 3;` + err = db.QueryRow(statements).Scan(&count) + + if count != 0 { + t.Errorf("Deleting a profile didn't delete the related profiles_devices_config! There are %d left", count) + } + +} + +func Test_deleting_an_image_cascades_on_related_tables(t *testing.T) { + var db *sql.DB + var err error + var count int + var statements string + + db = createTestDb(t) + defer db.Close() + + // Drop the image we just created. + statements = `DELETE FROM images;` + + _, err = db.Exec(statements) + if err != nil { + t.Errorf("Error deleting image! %s", err) + } + + // Make sure there are 0 images_aliases entries left. + statements = `SELECT count(*) FROM images_aliases;` + err = db.QueryRow(statements).Scan(&count) + + if count != 0 { + t.Errorf("Deleting an image didn't delete the image alias association! There are %d left", count) + } + + // Make sure there are 0 images_properties entries left. + statements = `SELECT count(*) FROM images_properties;` + err = db.QueryRow(statements).Scan(&count) + + if count != 0 { + t.Errorf("Deleting an image didn't delete the related images_properties! There are %d left", count) + } +} + +func Test_initializing_db_is_indempotent(t *testing.T) { + var db *sql.DB + var err error + + // This calls "createDb" once already. + d := &Daemon{IsMock: true} + err = initializeDbObject(d, ":memory:") + db = d.db + + defer db.Close() + + // Let's call it a second time. + err = createDb(db) + if err != nil { + t.Errorf("The database schema is not indempotent, err='%s'", err) + } +} + +func Test_get_schema_returns_0_on_uninitialized_db(t *testing.T) { + var db *sql.DB + var err error + + db, err = sql.Open("sqlite3", ":memory:") + if err != nil { + t.Error(err) + } + result := dbGetSchema(db) + + if result != 0 { + t.Error("getSchema should return 0 on uninitialized db!") + } +} + +func Test_running_dbUpdateFromV6_adds_on_delete_cascade(t *testing.T) { + // Upgrading the database schema with updateFromV6 adds ON DELETE CASCADE + // to sqlite tables that require it, and conserve the data. + + var err error + var count int + + d := &Daemon{IsMock: true} + err = initializeDbObject(d, ":memory:") + defer d.db.Close() + + statements := ` +CREATE TABLE IF NOT EXISTS containers ( + id INTEGER primary key AUTOINCREMENT NOT NULL, + name VARCHAR(255) NOT NULL, + architecture INTEGER NOT NULL, + type INTEGER NOT NULL, + power_state INTEGER NOT NULL DEFAULT 0, + ephemeral INTEGER NOT NULL DEFAULT 0, + UNIQUE (name) +); +CREATE TABLE IF NOT EXISTS containers_config ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + container_id INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + FOREIGN KEY (container_id) REFERENCES containers (id), + UNIQUE (container_id, key) +); + +INSERT INTO containers (name, architecture, type) VALUES ('thename', 1, 1); +INSERT INTO containers_config (container_id, key, value) VALUES (1, 'thekey', 'thevalue');` + + _, err = d.db.Exec(statements) + if err != nil { + t.Error(err) + } + + // Run the upgrade from V6 code + err = dbUpdateFromV6(d.db) + + // Make sure the inserted data is still there. + statements = `SELECT count(*) FROM containers_config;` + err = d.db.QueryRow(statements).Scan(&count) + + if count != 1 { + t.Fatalf("There should be exactly one entry in containers_config! There are %d.", count) + } + + // Drop the container. + statements = `DELETE FROM containers WHERE name = 'thename';` + + _, err = d.db.Exec(statements) + if err != nil { + t.Errorf("Error deleting container! %s", err) + } + + // Make sure there are 0 container_profiles entries left. + statements = `SELECT count(*) FROM containers_profiles;` + err = d.db.QueryRow(statements).Scan(&count) + + if count != 0 { + t.Errorf("Deleting a container didn't delete the profile association! There are %d left", count) + } +} + +func Test_run_database_upgrades_with_some_foreign_keys_inconsistencies(t *testing.T) { + var db *sql.DB + var err error + var count int + var statements string + + db, err = sql.Open("sqlite3", ":memory:") + defer db.Close() + + if err != nil { + t.Fatal(err) + } + + // This schema is a part of schema rev 1. + statements = ` +CREATE TABLE containers ( + id INTEGER primary key AUTOINCREMENT NOT NULL, + name VARCHAR(255) NOT NULL, + architecture INTEGER NOT NULL, + type INTEGER NOT NULL, + UNIQUE (name) +); +CREATE TABLE containers_config ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + container_id INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + FOREIGN KEY (container_id) REFERENCES containers (id), + UNIQUE (container_id, key) +); +CREATE TABLE schema ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + version INTEGER NOT NULL, + updated_at DATETIME NOT NULL, + UNIQUE (version) +); +CREATE TABLE images ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + fingerprint VARCHAR(255) NOT NULL, + filename VARCHAR(255) NOT NULL, + size INTEGER NOT NULL, + public INTEGER NOT NULL DEFAULT 0, + architecture INTEGER NOT NULL, + creation_date DATETIME, + expiry_date DATETIME, + upload_date DATETIME NOT NULL, + UNIQUE (fingerprint) +); +CREATE TABLE images_properties ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + image_id INTEGER NOT NULL, + type INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + FOREIGN KEY (image_id) REFERENCES images (id) +); +CREATE TABLE certificates ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + fingerprint VARCHAR(255) NOT NULL, + type INTEGER NOT NULL, + name VARCHAR(255) NOT NULL, + certificate TEXT NOT NULL, + UNIQUE (fingerprint) +); +INSERT INTO schema (version, updated_at) values (1, "now"); +INSERT INTO containers (name, architecture, type) VALUES ('thename', 1, 1); +INSERT INTO containers_config (container_id, key, value) VALUES (1, 'thekey', 'thevalue');` + + _, err = db.Exec(statements) + if err != nil { + t.Fatal("Error creating schema!") + } + + // Now that we have a consistent schema, let's remove the container entry + // *without* the ON DELETE CASCADE in place. + statements = `DELETE FROM containers;` + _, err = db.Exec(statements) + if err != nil { + t.Fatal("Error truncating the container table!") + } + + // The "foreign key" on containers_config now points to nothing. + // Let's run the schema upgrades. + d := &Daemon{IsMock: true} + d.db = db + err = dbUpdate(d, 1) + + if err != nil { + t.Error("Error upgrading database schema!") + t.Fatal(err) + } + + result := dbGetSchema(db) + if result != DB_CURRENT_VERSION { + t.Fatal(fmt.Sprintf("The schema is not at the latest version after update! Found: %d, should be: %d", result, DB_CURRENT_VERSION)) + } + + // Make sure there are 0 containers_config entries left. + statements = `SELECT count(*) FROM containers_config;` + err = db.QueryRow(statements).Scan(&count) + + if count != 0 { + t.Fatal("updateDb did not delete orphaned child entries after adding ON DELETE CASCADE!") + } + +} + +func Test_dbImageGet_finds_image_for_fingerprint(t *testing.T) { + var db *sql.DB + var err error + var result *shared.ImageInfo + + db = createTestDb(t) + defer db.Close() + + _, result, err = dbImageGet(db, "fingerprint", false, false) + + if err != nil { + t.Fatal(err) + } + + if result == nil { + t.Fatal("No image returned!") + } + + if result.Filename != "filename" { + t.Fatal("Filename should be set.") + } + + if result.CreationDate != time.Unix(1431547174, 0).UTC() { + t.Fatal(fmt.Sprintf("%s != %s", result.CreationDate, time.Unix(1431547174, 0))) + } + + if result.ExpiryDate != time.Unix(1431547175, 0).UTC() { // It was short lived + t.Fatal(fmt.Sprintf("%s != %s", result.ExpiryDate, time.Unix(1431547175, 0))) + } + + if result.UploadDate != time.Unix(1431547176, 0).UTC() { + t.Fatal(fmt.Sprintf("%s != %s", result.UploadDate, time.Unix(1431547176, 0))) + } +} + +func Test_dbImageGet_for_missing_fingerprint(t *testing.T) { + var db *sql.DB + var err error + + db = createTestDb(t) + defer db.Close() + + _, _, err = dbImageGet(db, "unknown", false, false) + + if err != sql.ErrNoRows { + t.Fatal("Wrong err type returned") + } +} + +func Test_dbImageAliasGet_alias_exists(t *testing.T) { + var db *sql.DB + var err error + var result string + + db = createTestDb(t) + defer db.Close() + + _, alias, err := dbImageAliasGet(db, "somealias", true) + result = alias.Target + + if err != nil { + t.Fatal(err) + } + + if result != "fingerprint" { + t.Fatal("Fingerprint is not the expected fingerprint!") + } + +} + +func Test_dbImageAliasGet_alias_does_not_exists(t *testing.T) { + var db *sql.DB + var err error + + db = createTestDb(t) + defer db.Close() + + _, _, err = dbImageAliasGet(db, "whatever", true) + + if err != NoSuchObjectError { + t.Fatal("Error should be NoSuchObjectError") + } +} + +func Test_dbImageAliasAdd(t *testing.T) { + var db *sql.DB + var err error + var result string + + db = createTestDb(t) + defer db.Close() + + err = dbImageAliasAdd(db, "Chaosphere", 1, "Someone will like the name") + if err != nil { + t.Fatal("Error inserting Image alias.") + } + + _, alias, err := dbImageAliasGet(db, "Chaosphere", true) + if err != nil { + t.Fatal(err) + } + result = alias.Target + + if result != "fingerprint" { + t.Fatal("Couldn't retrieve newly created alias.") + } +} + +func Test_dbContainerConfig(t *testing.T) { + var db *sql.DB + var err error + var result map[string]string + var expected map[string]string + + db = createTestDb(t) + defer db.Close() + + _, err = db.Exec("INSERT INTO containers_config (container_id, key, value) VALUES (1, 'something', 'something else');") + + result, err = dbContainerConfig(db, 1) + if err != nil { + t.Fatal(err) + } + + expected = map[string]string{"thekey": "thevalue", "something": "something else"} + + for key, value := range expected { + if result[key] != value { + t.Errorf("Mismatching value for key %s: %s != %s", key, result[key], value) + } + } +} + +func Test_dbProfileConfig(t *testing.T) { + var db *sql.DB + var err error + var result map[string]string + var expected map[string]string + + db = createTestDb(t) + defer db.Close() + + _, err = db.Exec("INSERT INTO profiles_config (profile_id, key, value) VALUES (3, 'something', 'something else');") + + result, err = dbProfileConfig(db, "theprofile") + if err != nil { + t.Fatal(err) + } + + expected = map[string]string{"thekey": "thevalue", "something": "something else"} + + for key, value := range expected { + if result[key] != value { + t.Errorf("Mismatching value for key %s: %s != %s", key, result[key], value) + } + } +} + +func Test_dbContainerProfiles(t *testing.T) { + var db *sql.DB + var err error + var result []string + var expected []string + + db = createTestDb(t) + defer db.Close() + + expected = []string{"theprofile"} + result, err = dbContainerProfiles(db, 1) + if err != nil { + t.Fatal(err) + } + + for i := range expected { + if expected[i] != result[i] { + t.Fatal(fmt.Sprintf("Mismatching contents for profile list: %s != %s", result[i], expected[i])) + } + } +} + +func Test_dbDevices_profiles(t *testing.T) { + var db *sql.DB + var err error + var result shared.Devices + var subresult shared.Device + var expected shared.Device + + db = createTestDb(t) + defer db.Close() + + result, err = dbDevices(db, "theprofile", true) + if err != nil { + t.Fatal(err) + } + + expected = shared.Device{"type": "nic", "devicekey": "devicevalue"} + subresult = result["devicename"] + + for key, value := range expected { + if subresult[key] != value { + t.Errorf("Mismatching value for key %s: %v != %v", key, subresult[key], value) + } + } + +} + +func Test_dbDevices_containers(t *testing.T) { + var db *sql.DB + var err error + var result shared.Devices + var subresult shared.Device + var expected shared.Device + + db = createTestDb(t) + defer db.Close() + + result, err = dbDevices(db, "thename", false) + if err != nil { + t.Fatal(err) + } + + expected = shared.Device{"type": "nic", "configkey": "configvalue"} + subresult = result["somename"] + + for key, value := range expected { + if subresult[key] != value { + t.Errorf("Mismatching value for key %s: %s != %s", key, subresult[key], value) + } + } + +} === added file 'src/github.com/lxc/lxd/lxd/db_update.go' --- src/github.com/lxc/lxd/lxd/db_update.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/db_update.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,984 @@ +package main + +import ( + "database/sql" + "encoding/hex" + "fmt" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + + "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" +) + +func dbUpdateFromV27(db *sql.DB) error { + stmt := ` +UPDATE profiles_devices SET type=3 WHERE type='unix-char'; +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 28) + return err +} + +func dbUpdateFromV26(db *sql.DB) error { + stmt := ` +ALTER TABLE images ADD COLUMN auto_update INTEGER NOT NULL DEFAULT 0; +CREATE TABLE IF NOT EXISTS images_source ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + image_id INTEGER NOT NULL, + server TEXT NOT NULL, + protocol INTEGER NOT NULL, + certificate TEXT NOT NULL, + alias VARCHAR(255) NOT NULL, + FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE +); +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 27) + return err +} + +func dbUpdateFromV25(db *sql.DB) error { + stmt := ` +INSERT INTO profiles (name, description) VALUES ("docker", "Profile supporting docker in containers"); +INSERT INTO profiles_config (profile_id, key, value) SELECT id, "security.nesting", "true" FROM profiles WHERE name="docker"; +INSERT INTO profiles_config (profile_id, key, value) SELECT id, "linux.kernel_modules", "overlay, nf_nat" FROM profiles WHERE name="docker"; +INSERT INTO profiles_devices (profile_id, name, type) SELECT id, "fuse", "unix-char" FROM profiles WHERE name="docker"; +INSERT INTO profiles_devices_config (profile_device_id, key, value) SELECT profiles_devices.id, "path", "/dev/fuse" FROM profiles_devices LEFT JOIN profiles WHERE profiles_devices.profile_id = profiles.id AND profiles.name = "docker";` + db.Exec(stmt) + + stmt = `INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 26) + return err +} + +func dbUpdateFromV24(db *sql.DB) error { + stmt := ` +ALTER TABLE containers ADD COLUMN stateful INTEGER NOT NULL DEFAULT 0; +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 25) + return err +} + +func dbUpdateFromV23(db *sql.DB) error { + stmt := ` +ALTER TABLE profiles ADD COLUMN description TEXT; +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 24) + return err +} + +func dbUpdateFromV22(db *sql.DB) error { + stmt := ` +DELETE FROM containers_devices_config WHERE key='type'; +DELETE FROM profiles_devices_config WHERE key='type'; +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 23) + return err +} + +func dbUpdateFromV21(db *sql.DB) error { + stmt := ` +ALTER TABLE containers ADD COLUMN creation_date DATETIME NOT NULL DEFAULT 0; +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 22) + return err +} + +func dbUpdateFromV20(db *sql.DB) error { + stmt := ` +UPDATE containers_devices SET name='__lxd_upgrade_root' WHERE name='root'; +UPDATE profiles_devices SET name='__lxd_upgrade_root' WHERE name='root'; + +INSERT INTO containers_devices (container_id, name, type) SELECT id, "root", 2 FROM containers; +INSERT INTO containers_devices_config (container_device_id, key, value) SELECT id, "path", "/" FROM containers_devices WHERE name='root'; + +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 21) + + return err +} + +func dbUpdateFromV19(db *sql.DB) error { + stmt := ` +DELETE FROM containers_config WHERE container_id NOT IN (SELECT id FROM containers); +DELETE FROM containers_devices_config WHERE container_device_id NOT IN (SELECT id FROM containers_devices WHERE container_id IN (SELECT id FROM containers)); +DELETE FROM containers_devices WHERE container_id NOT IN (SELECT id FROM containers); +DELETE FROM containers_profiles WHERE container_id NOT IN (SELECT id FROM containers); +DELETE FROM images_aliases WHERE image_id NOT IN (SELECT id FROM images); +DELETE FROM images_properties WHERE image_id NOT IN (SELECT id FROM images); +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 20) + return err +} + +func dbUpdateFromV18(db *sql.DB) error { + var id int + var value string + + // Update container config + rows, err := dbQueryScan(db, "SELECT id, value FROM containers_config WHERE key='limits.memory'", nil, []interface{}{id, value}) + if err != nil { + return err + } + + for _, row := range rows { + id = row[0].(int) + value = row[1].(string) + + // If already an integer, don't touch + _, err := strconv.Atoi(value) + if err == nil { + continue + } + + // Generate the new value + value = strings.ToUpper(value) + value += "B" + + // Deal with completely broken values + _, err = shared.ParseByteSizeString(value) + if err != nil { + shared.Debugf("Invalid container memory limit, id=%d value=%s, removing.", id, value) + _, err = db.Exec("DELETE FROM containers_config WHERE id=?;", id) + if err != nil { + return err + } + } + + // Set the new value + _, err = db.Exec("UPDATE containers_config SET value=? WHERE id=?", value, id) + if err != nil { + return err + } + } + + // Update profiles config + rows, err = dbQueryScan(db, "SELECT id, value FROM profiles_config WHERE key='limits.memory'", nil, []interface{}{id, value}) + if err != nil { + return err + } + + for _, row := range rows { + id = row[0].(int) + value = row[1].(string) + + // If already an integer, don't touch + _, err := strconv.Atoi(value) + if err == nil { + continue + } + + // Generate the new value + value = strings.ToUpper(value) + value += "B" + + // Deal with completely broken values + _, err = shared.ParseByteSizeString(value) + if err != nil { + shared.Debugf("Invalid profile memory limit, id=%d value=%s, removing.", id, value) + _, err = db.Exec("DELETE FROM profiles_config WHERE id=?;", id) + if err != nil { + return err + } + } + + // Set the new value + _, err = db.Exec("UPDATE profiles_config SET value=? WHERE id=?", value, id) + if err != nil { + return err + } + } + + _, err = db.Exec("INSERT INTO schema (version, updated_at) VALUES (?, strftime(\"%s\"));", 19) + return err +} + +func dbUpdateFromV17(db *sql.DB) error { + stmt := ` +DELETE FROM profiles_config WHERE key LIKE 'volatile.%'; +UPDATE containers_config SET key='limits.cpu' WHERE key='limits.cpus'; +UPDATE profiles_config SET key='limits.cpu' WHERE key='limits.cpus'; +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 18) + return err +} + +func dbUpdateFromV16(db *sql.DB) error { + stmt := ` +UPDATE config SET key='storage.lvm_vg_name' WHERE key = 'core.lvm_vg_name'; +UPDATE config SET key='storage.lvm_thinpool_name' WHERE key = 'core.lvm_thinpool_name'; +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 17) + return err +} + +func dbUpdateFromV15(d *Daemon) error { + // munge all LVM-backed containers' LV names to match what is + // required for snapshot support + + cNames, err := dbContainersList(d.db, cTypeRegular) + if err != nil { + return err + } + + vgName, err := d.ConfigValueGet("storage.lvm_vg_name") + if err != nil { + return fmt.Errorf("Error checking server config: %v", err) + } + + for _, cName := range cNames { + var lvLinkPath string + if strings.Contains(cName, shared.SnapshotDelimiter) { + lvLinkPath = shared.VarPath("snapshots", fmt.Sprintf("%s.lv", cName)) + } else { + lvLinkPath = shared.VarPath("containers", fmt.Sprintf("%s.lv", cName)) + } + + if !shared.PathExists(lvLinkPath) { + continue + } + + newLVName := strings.Replace(cName, "-", "--", -1) + newLVName = strings.Replace(newLVName, shared.SnapshotDelimiter, "-", -1) + + if cName == newLVName { + shared.Log.Debug("No need to rename, skipping", log.Ctx{"cName": cName, "newLVName": newLVName}) + continue + } + + shared.Log.Debug("About to rename cName in lv upgrade", log.Ctx{"lvLinkPath": lvLinkPath, "cName": cName, "newLVName": newLVName}) + + output, err := exec.Command("lvrename", vgName, cName, newLVName).CombinedOutput() + if err != nil { + return fmt.Errorf("Could not rename LV '%s' to '%s': %v\noutput:%s", cName, newLVName, err, string(output)) + } + + if err := os.Remove(lvLinkPath); err != nil { + return fmt.Errorf("Couldn't remove lvLinkPath '%s'", lvLinkPath) + } + newLinkDest := fmt.Sprintf("/dev/%s/%s", vgName, newLVName) + if err := os.Symlink(newLinkDest, lvLinkPath); err != nil { + return fmt.Errorf("Couldn't recreate symlink '%s'->'%s'", lvLinkPath, newLinkDest) + } + } + stmt := ` +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err = d.db.Exec(stmt, 16) + return err +} + +func dbUpdateFromV14(db *sql.DB) error { + stmt := ` +PRAGMA foreign_keys=OFF; -- So that integrity doesn't get in the way for now + +DELETE FROM containers_config WHERE key="volatile.last_state.power"; + +INSERT INTO containers_config (container_id, key, value) + SELECT id, "volatile.last_state.power", "RUNNING" + FROM containers WHERE power_state=1; + +INSERT INTO containers_config (container_id, key, value) + SELECT id, "volatile.last_state.power", "STOPPED" + FROM containers WHERE power_state != 1; + +CREATE TABLE tmp ( + id INTEGER primary key AUTOINCREMENT NOT NULL, + name VARCHAR(255) NOT NULL, + architecture INTEGER NOT NULL, + type INTEGER NOT NULL, + ephemeral INTEGER NOT NULL DEFAULT 0, + UNIQUE (name) +); + +INSERT INTO tmp SELECT id, name, architecture, type, ephemeral FROM containers; + +DROP TABLE containers; +ALTER TABLE tmp RENAME TO containers; + +PRAGMA foreign_keys=ON; -- Make sure we turn integrity checks back on. +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 15) + return err +} + +func dbUpdateFromV13(db *sql.DB) error { + stmt := ` +UPDATE containers_config SET key='volatile.base_image' WHERE key = 'volatile.baseImage'; +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 14) + return err +} + +func dbUpdateFromV12(db *sql.DB) error { + stmt := ` +ALTER TABLE images ADD COLUMN cached INTEGER NOT NULL DEFAULT 0; +ALTER TABLE images ADD COLUMN last_use_date DATETIME; +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 13) + return err +} + +func dbUpdateFromV11(d *Daemon) error { + if d.IsMock { + // No need to move snapshots no mock runs, + // dbUpdateFromV12 will then set the db version to 13 + return nil + } + + cNames, err := dbContainersList(d.db, cTypeSnapshot) + if err != nil { + return err + } + + errors := 0 + + for _, cName := range cNames { + snappieces := strings.SplitN(cName, shared.SnapshotDelimiter, 2) + oldPath := shared.VarPath("containers", snappieces[0], "snapshots", snappieces[1]) + newPath := shared.VarPath("snapshots", snappieces[0], snappieces[1]) + if shared.PathExists(oldPath) && !shared.PathExists(newPath) { + shared.Log.Info( + "Moving snapshot", + log.Ctx{ + "snapshot": cName, + "oldPath": oldPath, + "newPath": newPath}) + + // Rsync + // containers//snapshots/ + // to + // snapshots// + output, err := storageRsyncCopy(oldPath, newPath) + if err != nil { + shared.Log.Error( + "Failed rsync snapshot", + log.Ctx{ + "snapshot": cName, + "output": string(output), + "err": err}) + errors++ + continue + } + + // Remove containers//snapshots/ + if err := os.RemoveAll(oldPath); err != nil { + shared.Log.Error( + "Failed to remove the old snapshot path", + log.Ctx{ + "snapshot": cName, + "oldPath": oldPath, + "err": err}) + + // Ignore this error. + // errors++ + // continue + } + + // Remove /var/lib/lxd/containers//snapshots + // if its empty. + cPathParent := filepath.Dir(oldPath) + if ok, _ := shared.PathIsEmpty(cPathParent); ok { + os.Remove(cPathParent) + } + + } // if shared.PathExists(oldPath) && !shared.PathExists(newPath) { + } // for _, cName := range cNames { + + // Refuse to start lxd if a rsync failed. + if errors > 0 { + return fmt.Errorf("Got errors while moving snapshots, see the log output.") + } + + stmt := ` +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err = d.db.Exec(stmt, 12) + + return err +} + +func dbUpdateFromV10(d *Daemon) error { + if d.IsMock { + // No need to move lxc to containers in mock runs, + // dbUpdateFromV12 will then set the db version to 13 + return nil + } + + if shared.PathExists(shared.VarPath("lxc")) { + err := os.Rename(shared.VarPath("lxc"), shared.VarPath("containers")) + if err != nil { + return err + } + + shared.Debugf("Restarting all the containers following directory rename") + containersShutdown(d) + containersRestart(d) + } + + stmt := ` +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := d.db.Exec(stmt, 11) + return err +} + +func dbUpdateFromV9(db *sql.DB) error { + stmt := ` +CREATE TABLE tmp ( + id INTEGER primary key AUTOINCREMENT NOT NULL, + container_id INTEGER NOT NULL, + name VARCHAR(255) NOT NULL, + type VARCHAR(255) NOT NULL default "none", + FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE, + UNIQUE (container_id, name) +); + +INSERT INTO tmp SELECT * FROM containers_devices; + +UPDATE containers_devices SET type=0 WHERE id IN (SELECT id FROM tmp WHERE type="none"); +UPDATE containers_devices SET type=1 WHERE id IN (SELECT id FROM tmp WHERE type="nic"); +UPDATE containers_devices SET type=2 WHERE id IN (SELECT id FROM tmp WHERE type="disk"); +UPDATE containers_devices SET type=3 WHERE id IN (SELECT id FROM tmp WHERE type="unix-char"); +UPDATE containers_devices SET type=4 WHERE id IN (SELECT id FROM tmp WHERE type="unix-block"); + +DROP TABLE tmp; + +CREATE TABLE tmp ( + id INTEGER primary key AUTOINCREMENT NOT NULL, + profile_id INTEGER NOT NULL, + name VARCHAR(255) NOT NULL, + type VARCHAR(255) NOT NULL default "none", + FOREIGN KEY (profile_id) REFERENCES profiles (id) ON DELETE CASCADE, + UNIQUE (profile_id, name) +); + +INSERT INTO tmp SELECT * FROM profiles_devices; + +UPDATE profiles_devices SET type=0 WHERE id IN (SELECT id FROM tmp WHERE type="none"); +UPDATE profiles_devices SET type=1 WHERE id IN (SELECT id FROM tmp WHERE type="nic"); +UPDATE profiles_devices SET type=2 WHERE id IN (SELECT id FROM tmp WHERE type="disk"); +UPDATE profiles_devices SET type=3 WHERE id IN (SELECT id FROM tmp WHERE type="unix-char"); +UPDATE profiles_devices SET type=4 WHERE id IN (SELECT id FROM tmp WHERE type="unix-block"); + +DROP TABLE tmp; +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 10) + return err +} + +func dbUpdateFromV8(db *sql.DB) error { + stmt := ` +UPDATE certificates SET fingerprint = replace(fingerprint, " ", ""); +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 9) + return err +} + +func dbUpdateFromV7(db *sql.DB) error { + stmt := ` +UPDATE config SET key='core.trust_password' WHERE key IN ('password', 'trust_password', 'trust-password', 'core.trust-password'); +DELETE FROM config WHERE key != 'core.trust_password'; +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 8) + return err +} + +func dbUpdateFromV6(db *sql.DB) error { + // This update recreates the schemas that need an ON DELETE CASCADE foreign + // key. + stmt := ` +PRAGMA foreign_keys=OFF; -- So that integrity doesn't get in the way for now + +CREATE TEMP TABLE tmp AS SELECT * FROM containers_config; +DROP TABLE containers_config; +CREATE TABLE IF NOT EXISTS containers_config ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + container_id INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE, + UNIQUE (container_id, key) +); +INSERT INTO containers_config SELECT * FROM tmp; +DROP TABLE tmp; + +CREATE TEMP TABLE tmp AS SELECT * FROM containers_devices; +DROP TABLE containers_devices; +CREATE TABLE IF NOT EXISTS containers_devices ( + id INTEGER primary key AUTOINCREMENT NOT NULL, + container_id INTEGER NOT NULL, + name VARCHAR(255) NOT NULL, + type INTEGER NOT NULL default 0, + FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE, + UNIQUE (container_id, name) +); +INSERT INTO containers_devices SELECT * FROM tmp; +DROP TABLE tmp; + +CREATE TEMP TABLE tmp AS SELECT * FROM containers_devices_config; +DROP TABLE containers_devices_config; +CREATE TABLE IF NOT EXISTS containers_devices_config ( + id INTEGER primary key AUTOINCREMENT NOT NULL, + container_device_id INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + FOREIGN KEY (container_device_id) REFERENCES containers_devices (id) ON DELETE CASCADE, + UNIQUE (container_device_id, key) +); +INSERT INTO containers_devices_config SELECT * FROM tmp; +DROP TABLE tmp; + +CREATE TEMP TABLE tmp AS SELECT * FROM containers_profiles; +DROP TABLE containers_profiles; +CREATE TABLE IF NOT EXISTS containers_profiles ( + id INTEGER primary key AUTOINCREMENT NOT NULL, + container_id INTEGER NOT NULL, + profile_id INTEGER NOT NULL, + apply_order INTEGER NOT NULL default 0, + UNIQUE (container_id, profile_id), + FOREIGN KEY (container_id) REFERENCES containers(id) ON DELETE CASCADE, + FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE +); +INSERT INTO containers_profiles SELECT * FROM tmp; +DROP TABLE tmp; + +CREATE TEMP TABLE tmp AS SELECT * FROM images_aliases; +DROP TABLE images_aliases; +CREATE TABLE IF NOT EXISTS images_aliases ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + name VARCHAR(255) NOT NULL, + image_id INTEGER NOT NULL, + description VARCHAR(255), + FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE, + UNIQUE (name) +); +INSERT INTO images_aliases SELECT * FROM tmp; +DROP TABLE tmp; + +CREATE TEMP TABLE tmp AS SELECT * FROM images_properties; +DROP TABLE images_properties; +CREATE TABLE IF NOT EXISTS images_properties ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + image_id INTEGER NOT NULL, + type INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE +); +INSERT INTO images_properties SELECT * FROM tmp; +DROP TABLE tmp; + +CREATE TEMP TABLE tmp AS SELECT * FROM profiles_config; +DROP TABLE profiles_config; +CREATE TABLE IF NOT EXISTS profiles_config ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + profile_id INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value VARCHAR(255), + UNIQUE (profile_id, key), + FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE +); +INSERT INTO profiles_config SELECT * FROM tmp; +DROP TABLE tmp; + +CREATE TEMP TABLE tmp AS SELECT * FROM profiles_devices; +DROP TABLE profiles_devices; +CREATE TABLE IF NOT EXISTS profiles_devices ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + profile_id INTEGER NOT NULL, + name VARCHAR(255) NOT NULL, + type INTEGER NOT NULL default 0, + UNIQUE (profile_id, name), + FOREIGN KEY (profile_id) REFERENCES profiles (id) ON DELETE CASCADE +); +INSERT INTO profiles_devices SELECT * FROM tmp; +DROP TABLE tmp; + +CREATE TEMP TABLE tmp AS SELECT * FROM profiles_devices_config; +DROP TABLE profiles_devices_config; +CREATE TABLE IF NOT EXISTS profiles_devices_config ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + profile_device_id INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + UNIQUE (profile_device_id, key), + FOREIGN KEY (profile_device_id) REFERENCES profiles_devices (id) ON DELETE CASCADE +); +INSERT INTO profiles_devices_config SELECT * FROM tmp; +DROP TABLE tmp; + +PRAGMA foreign_keys=ON; -- Make sure we turn integrity checks back on. +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 7) + if err != nil { + return err + } + + // Get the rows with broken foreign keys an nuke them + rows, err := db.Query("PRAGMA foreign_key_check;") + if err != nil { + return err + } + + var tablestodelete []string + var rowidtodelete []int + + defer rows.Close() + for rows.Next() { + var tablename string + var rowid int + var targetname string + var keynumber int + + rows.Scan(&tablename, &rowid, &targetname, &keynumber) + tablestodelete = append(tablestodelete, tablename) + rowidtodelete = append(rowidtodelete, rowid) + } + rows.Close() + + for i := range tablestodelete { + _, err = db.Exec(fmt.Sprintf("DELETE FROM %s WHERE rowid = %d;", tablestodelete[i], rowidtodelete[i])) + if err != nil { + return err + } + } + + return err +} + +func dbUpdateFromV5(db *sql.DB) error { + stmt := ` +ALTER TABLE containers ADD COLUMN power_state INTEGER NOT NULL DEFAULT 0; +ALTER TABLE containers ADD COLUMN ephemeral INTEGER NOT NULL DEFAULT 0; +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 6) + return err +} + +func dbUpdateFromV4(db *sql.DB) error { + stmt := ` +CREATE TABLE IF NOT EXISTS config ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + UNIQUE (key) +); +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 5) + if err != nil { + return err + } + + passfname := shared.VarPath("adminpwd") + passOut, err := os.Open(passfname) + oldPassword := "" + if err == nil { + defer passOut.Close() + buff := make([]byte, 96) + _, err = passOut.Read(buff) + if err != nil { + return err + } + + oldPassword = hex.EncodeToString(buff) + stmt := `INSERT INTO config (key, value) VALUES ("core.trust_password", ?);` + + _, err := db.Exec(stmt, oldPassword) + if err != nil { + return err + } + + return os.Remove(passfname) + } + + return nil +} + +func dbUpdateFromV3(db *sql.DB) error { + // Attempt to create a default profile (but don't fail if already there) + stmt := `INSERT INTO profiles (name) VALUES ("default"); +INSERT INTO profiles_devices (profile_id, name, type) SELECT id, "eth0", "nic" FROM profiles WHERE profiles.name="default"; +INSERT INTO profiles_devices_config (profile_device_id, key, value) SELECT profiles_devices.id, "nictype", "bridged" FROM profiles_devices LEFT JOIN profiles ON profiles.id=profiles_devices.profile_id WHERE profiles.name == "default"; +INSERT INTO profiles_devices_config (profile_device_id, key, value) SELECT profiles_devices.id, 'name', "eth0" FROM profiles_devices LEFT JOIN profiles ON profiles.id=profiles_devices.profile_id WHERE profiles.name == "default"; +INSERT INTO profiles_devices_config (profile_device_id, key, value) SELECT profiles_devices.id, "parent", "lxcbr0" FROM profiles_devices LEFT JOIN profiles ON profiles.id=profiles_devices.profile_id WHERE profiles.name == "default";` + db.Exec(stmt) + + stmt = `INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 4) + return err +} + +func dbUpdateFromV2(db *sql.DB) error { + stmt := ` +CREATE TABLE IF NOT EXISTS containers_devices ( + id INTEGER primary key AUTOINCREMENT NOT NULL, + container_id INTEGER NOT NULL, + name VARCHAR(255) NOT NULL, + type INTEGER NOT NULL default 0, + FOREIGN KEY (container_id) REFERENCES containers (id) ON DELETE CASCADE, + UNIQUE (container_id, name) +); +CREATE TABLE IF NOT EXISTS containers_devices_config ( + id INTEGER primary key AUTOINCREMENT NOT NULL, + container_device_id INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + FOREIGN KEY (container_device_id) REFERENCES containers_devices (id), + UNIQUE (container_device_id, key) +); +CREATE TABLE IF NOT EXISTS containers_profiles ( + id INTEGER primary key AUTOINCREMENT NOT NULL, + container_id INTEGER NOT NULL, + profile_id INTEGER NOT NULL, + apply_order INTEGER NOT NULL default 0, + UNIQUE (container_id, profile_id), + FOREIGN KEY (container_id) REFERENCES containers(id) ON DELETE CASCADE, + FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE +); +CREATE TABLE IF NOT EXISTS profiles ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + name VARCHAR(255) NOT NULL, + UNIQUE (name) +); +CREATE TABLE IF NOT EXISTS profiles_config ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + profile_id INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value VARCHAR(255), + UNIQUE (profile_id, key), + FOREIGN KEY (profile_id) REFERENCES profiles(id) ON DELETE CASCADE +); +CREATE TABLE IF NOT EXISTS profiles_devices ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + profile_id INTEGER NOT NULL, + name VARCHAR(255) NOT NULL, + type INTEGER NOT NULL default 0, + UNIQUE (profile_id, name), + FOREIGN KEY (profile_id) REFERENCES profiles (id) ON DELETE CASCADE +); +CREATE TABLE IF NOT EXISTS profiles_devices_config ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + profile_device_id INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + UNIQUE (profile_device_id, key), + FOREIGN KEY (profile_device_id) REFERENCES profiles_devices (id) +); +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 3) + return err +} + +/* Yeah we can do htis in a more clever way */ +func dbUpdateFromV1(db *sql.DB) error { + // v1..v2 adds images aliases + stmt := ` +CREATE TABLE IF NOT EXISTS images_aliases ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + name VARCHAR(255) NOT NULL, + image_id INTEGER NOT NULL, + description VARCHAR(255), + FOREIGN KEY (image_id) REFERENCES images (id) ON DELETE CASCADE, + UNIQUE (name) +); +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 2) + return err +} + +func dbUpdateFromV0(db *sql.DB) error { + // v0..v1 adds schema table + stmt := ` +CREATE TABLE IF NOT EXISTS schema ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + version INTEGER NOT NULL, + updated_at DATETIME NOT NULL, + UNIQUE (version) +); +INSERT INTO schema (version, updated_at) VALUES (?, strftime("%s"));` + _, err := db.Exec(stmt, 1) + return err +} + +func dbUpdate(d *Daemon, prevVersion int) error { + db := d.db + + if prevVersion < 0 || prevVersion > DB_CURRENT_VERSION { + return fmt.Errorf("Bad database version: %d", prevVersion) + } + if prevVersion == DB_CURRENT_VERSION { + return nil + } + var err error + if prevVersion < 1 { + err = dbUpdateFromV0(db) + if err != nil { + return err + } + } + if prevVersion < 2 { + err = dbUpdateFromV1(db) + if err != nil { + return err + } + } + if prevVersion < 3 { + err = dbUpdateFromV2(db) + if err != nil { + return err + } + } + if prevVersion < 4 { + err = dbUpdateFromV3(db) + if err != nil { + return err + } + } + if prevVersion < 5 { + err = dbUpdateFromV4(db) + if err != nil { + return err + } + } + if prevVersion < 6 { + err = dbUpdateFromV5(db) + if err != nil { + return err + } + } + if prevVersion < 7 { + err = dbUpdateFromV6(db) + if err != nil { + return err + } + } + if prevVersion < 8 { + err = dbUpdateFromV7(db) + if err != nil { + return err + } + } + if prevVersion < 9 { + err = dbUpdateFromV8(db) + if err != nil { + return err + } + } + if prevVersion < 10 { + err = dbUpdateFromV9(db) + if err != nil { + return err + } + } + if prevVersion < 11 { + err = dbUpdateFromV10(d) + if err != nil { + return err + } + } + if prevVersion < 12 { + err = dbUpdateFromV11(d) + if err != nil { + return err + } + } + if prevVersion < 13 { + err = dbUpdateFromV12(db) + if err != nil { + return err + } + } + if prevVersion < 14 { + err = dbUpdateFromV13(db) + if err != nil { + return err + } + } + if prevVersion < 15 { + err = dbUpdateFromV14(db) + if err != nil { + return err + } + } + if prevVersion < 16 { + err = dbUpdateFromV15(d) + if err != nil { + return err + } + } + if prevVersion < 17 { + err = dbUpdateFromV16(db) + if err != nil { + return err + } + } + if prevVersion < 18 { + err = dbUpdateFromV17(db) + if err != nil { + return err + } + } + if prevVersion < 19 { + err = dbUpdateFromV18(db) + if err != nil { + return err + } + } + if prevVersion < 20 { + err = dbUpdateFromV19(db) + if err != nil { + return err + } + } + if prevVersion < 21 { + err = dbUpdateFromV20(db) + if err != nil { + return err + } + } + if prevVersion < 22 { + err = dbUpdateFromV21(db) + if err != nil { + return err + } + } + if prevVersion < 23 { + err = dbUpdateFromV22(db) + if err != nil { + return err + } + } + if prevVersion < 24 { + err = dbUpdateFromV23(db) + if err != nil { + return err + } + } + if prevVersion < 25 { + err = dbUpdateFromV24(db) + if err != nil { + return err + } + } + if prevVersion < 26 { + err = dbUpdateFromV25(db) + if err != nil { + return err + } + } + if prevVersion < 27 { + err = dbUpdateFromV26(db) + if err != nil { + return err + } + } + if prevVersion < 28 { + err = dbUpdateFromV27(db) + if err != nil { + return err + } + } + + return nil +} === added file 'src/github.com/lxc/lxd/lxd/debug.go' --- src/github.com/lxc/lxd/lxd/debug.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/debug.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,30 @@ +package main + +import ( + "os" + "os/signal" + "runtime/pprof" + "syscall" + + "github.com/lxc/lxd/shared" +) + +func doMemDump(memProfile string) { + f, err := os.Create(memProfile) + if err != nil { + shared.Debugf("Error opening memory profile file '%s': %s", err) + return + } + pprof.WriteHeapProfile(f) + f.Close() +} + +func memProfiler(memProfile string) { + ch := make(chan os.Signal) + signal.Notify(ch, syscall.SIGUSR1) + for { + sig := <-ch + shared.Debugf("Received '%s signal', dumping memory.", sig) + doMemDump(memProfile) + } +} === added file 'src/github.com/lxc/lxd/lxd/devices.go' --- src/github.com/lxc/lxd/lxd/devices.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/devices.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,831 @@ +package main + +import ( + "bufio" + "bytes" + "crypto/rand" + "encoding/hex" + "fmt" + "io/ioutil" + "math/big" + "os" + "os/exec" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + "syscall" + + _ "github.com/mattn/go-sqlite3" + + "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" +) + +var deviceSchedRebalance = make(chan []string, 0) + +type deviceBlockLimit struct { + readBps int64 + readIops int64 + writeBps int64 + writeIops int64 +} + +type deviceTaskCPU struct { + id int + strId string + count *int +} +type deviceTaskCPUs []deviceTaskCPU + +func (c deviceTaskCPUs) Len() int { return len(c) } +func (c deviceTaskCPUs) Less(i, j int) bool { return *c[i].count < *c[j].count } +func (c deviceTaskCPUs) Swap(i, j int) { c[i], c[j] = c[j], c[i] } + +func deviceNetlinkListener() (chan []string, error) { + NETLINK_KOBJECT_UEVENT := 15 + UEVENT_BUFFER_SIZE := 2048 + + fd, err := syscall.Socket( + syscall.AF_NETLINK, syscall.SOCK_RAW, + NETLINK_KOBJECT_UEVENT, + ) + + if err != nil { + return nil, err + } + + nl := syscall.SockaddrNetlink{ + Family: syscall.AF_NETLINK, + Pid: uint32(os.Getpid()), + Groups: 1, + } + + err = syscall.Bind(fd, &nl) + if err != nil { + return nil, err + } + + ch := make(chan []string, 0) + + go func(ch chan []string) { + b := make([]byte, UEVENT_BUFFER_SIZE*2) + for { + _, err := syscall.Read(fd, b) + if err != nil { + continue + } + + props := map[string]string{} + last := 0 + for i, e := range b { + if i == len(b) || e == 0 { + msg := string(b[last+1 : i]) + last = i + if len(msg) == 0 || msg == "\x00" { + continue + } + + fields := strings.SplitN(msg, "=", 2) + if len(fields) != 2 { + continue + } + + props[fields[0]] = fields[1] + } + } + + if props["SUBSYSTEM"] == "cpu" { + if props["DRIVER"] != "processor" { + continue + } + + if props["ACTION"] != "offline" && props["ACTION"] != "online" { + continue + } + + ch <- []string{"cpu", path.Base(props["DEVPATH"]), props["ACTION"]} + } + + if props["SUBSYSTEM"] == "net" { + if props["ACTION"] != "add" && props["ACTION"] != "removed" { + continue + } + + ch <- []string{"net", props["INTERFACE"], props["ACTION"]} + } + } + }(ch) + + return ch, nil +} + +func deviceTaskBalance(d *Daemon) { + min := func(x, y int) int { + if x < y { + return x + } + return y + } + + // Don't bother running when CGroup support isn't there + if !cgCpusetController { + return + } + + // Count CPUs + cpus := []int{} + dents, err := ioutil.ReadDir("/sys/bus/cpu/devices/") + if err != nil { + shared.Log.Error("balance: Unable to list CPUs", log.Ctx{"err": err}) + return + } + + for _, f := range dents { + id := -1 + count, err := fmt.Sscanf(f.Name(), "cpu%d", &id) + if count != 1 || id == -1 { + shared.Log.Error("balance: Bad CPU", log.Ctx{"path": f.Name()}) + continue + } + + onlinePath := fmt.Sprintf("/sys/bus/cpu/devices/%s/online", f.Name()) + if !shared.PathExists(onlinePath) { + // CPUs without an online file are non-hotplug so are always online + cpus = append(cpus, id) + continue + } + + online, err := ioutil.ReadFile(onlinePath) + if err != nil { + shared.Log.Error("balance: Bad CPU", log.Ctx{"path": f.Name(), "err": err}) + continue + } + + if online[0] == byte('0') { + continue + } + + cpus = append(cpus, id) + } + + // Iterate through the containers + containers, err := dbContainersList(d.db, cTypeRegular) + fixedContainers := map[int][]container{} + balancedContainers := map[container]int{} + for _, name := range containers { + c, err := containerLoadByName(d, name) + if err != nil { + continue + } + + conf := c.ExpandedConfig() + cpu, ok := conf["limits.cpu"] + if !ok || cpu == "" { + currentCPUs, err := deviceGetCurrentCPUs() + if err != nil { + shared.Debugf("Couldn't get current CPU list: %s", err) + cpu = fmt.Sprintf("%d", len(cpus)) + } else { + cpu = currentCPUs + } + } + + if !c.IsRunning() { + continue + } + + count, err := strconv.Atoi(cpu) + if err == nil { + // Load-balance + count = min(count, len(cpus)) + balancedContainers[c] = count + } else { + // Pinned + chunks := strings.Split(cpu, ",") + for _, chunk := range chunks { + if strings.Contains(chunk, "-") { + // Range + fields := strings.SplitN(chunk, "-", 2) + if len(fields) != 2 { + shared.Log.Error("Invalid limits.cpu value.", log.Ctx{"container": c.Name(), "value": cpu}) + continue + } + + low, err := strconv.Atoi(fields[0]) + if err != nil { + shared.Log.Error("Invalid limits.cpu value.", log.Ctx{"container": c.Name(), "value": cpu}) + continue + } + + high, err := strconv.Atoi(fields[1]) + if err != nil { + shared.Log.Error("Invalid limits.cpu value.", log.Ctx{"container": c.Name(), "value": cpu}) + continue + } + + for i := low; i <= high; i++ { + if !shared.IntInSlice(i, cpus) { + continue + } + + _, ok := fixedContainers[i] + if ok { + fixedContainers[i] = append(fixedContainers[i], c) + } else { + fixedContainers[i] = []container{c} + } + } + } else { + // Simple entry + nr, err := strconv.Atoi(chunk) + if err != nil { + shared.Log.Error("Invalid limits.cpu value.", log.Ctx{"container": c.Name(), "value": cpu}) + continue + } + + if !shared.IntInSlice(nr, cpus) { + continue + } + + _, ok := fixedContainers[nr] + if ok { + fixedContainers[nr] = append(fixedContainers[nr], c) + } else { + fixedContainers[nr] = []container{c} + } + } + } + } + } + + // Balance things + pinning := map[container][]string{} + usage := make(deviceTaskCPUs, 0) + + for _, id := range cpus { + cpu := deviceTaskCPU{} + cpu.id = id + cpu.strId = fmt.Sprintf("%d", id) + count := 0 + cpu.count = &count + + usage = append(usage, cpu) + } + + for cpu, ctns := range fixedContainers { + id := usage[cpu].strId + for _, ctn := range ctns { + _, ok := pinning[ctn] + if ok { + pinning[ctn] = append(pinning[ctn], id) + } else { + pinning[ctn] = []string{id} + } + *usage[cpu].count += 1 + } + } + + for ctn, count := range balancedContainers { + sort.Sort(usage) + for _, cpu := range usage { + if count == 0 { + break + } + count -= 1 + + id := cpu.strId + _, ok := pinning[ctn] + if ok { + pinning[ctn] = append(pinning[ctn], id) + } else { + pinning[ctn] = []string{id} + } + *cpu.count += 1 + } + } + + // Set the new pinning + for ctn, set := range pinning { + // Confirm the container didn't just stop + if !ctn.IsRunning() { + continue + } + + sort.Strings(set) + err := ctn.CGroupSet("cpuset.cpus", strings.Join(set, ",")) + if err != nil { + shared.Log.Error("balance: Unable to set cpuset", log.Ctx{"name": ctn.Name(), "err": err, "value": strings.Join(set, ",")}) + } + } +} + +func deviceGetCurrentCPUs() (string, error) { + // Open /proc/self/status + f, err := os.Open("/proc/self/status") + if err != nil { + return "", err + } + defer f.Close() + + // Read it line by line + scan := bufio.NewScanner(f) + for scan.Scan() { + line := scan.Text() + + // We only care about MemTotal + if !strings.HasPrefix(line, "Cpus_allowed_list:") { + continue + } + + // Extract the before last (value) and last (unit) fields + fields := strings.Split(line, "\t") + value := fields[len(fields)-1] + + return value, nil + } + + return "", fmt.Errorf("Couldn't find cpus_allowed_list") +} + +func deviceNetworkPriority(d *Daemon, netif string) { + // Don't bother running when CGroup support isn't there + if !cgNetPrioController { + return + } + + containers, err := dbContainersList(d.db, cTypeRegular) + if err != nil { + return + } + + for _, name := range containers { + // Get the container struct + c, err := containerLoadByName(d, name) + if err != nil { + continue + } + + // Extract the current priority + networkPriority := c.ExpandedConfig()["limits.network.priority"] + if networkPriority == "" { + continue + } + + networkInt, err := strconv.Atoi(networkPriority) + if err != nil { + continue + } + + // Set the value for the new interface + c.CGroupSet("net_prio.ifpriomap", fmt.Sprintf("%s %d", netif, networkInt)) + } + + return +} + +func deviceEventListener(d *Daemon) { + chNetlink, err := deviceNetlinkListener() + if err != nil { + shared.Log.Error("scheduler: couldn't setup netlink listener") + return + } + + for { + select { + case e := <-chNetlink: + if len(e) != 3 { + shared.Log.Error("Scheduler: received an invalid hotplug event") + continue + } + + if e[0] == "cpu" && cgCpusetController { + shared.Debugf("Scheduler: %s: %s is now %s: re-balancing", e[0], e[1], e[2]) + deviceTaskBalance(d) + } + + if e[0] == "net" && e[2] == "add" && cgNetPrioController && shared.PathExists(fmt.Sprintf("/sys/class/net/%s", e[1])) { + shared.Debugf("Scheduler: %s: %s has been added: updating network priorities", e[0], e[1]) + deviceNetworkPriority(d, e[1]) + } + case e := <-deviceSchedRebalance: + if len(e) != 3 { + shared.Log.Error("Scheduler: received an invalid rebalance event") + continue + } + + if cgCpusetController { + shared.Debugf("Scheduler: %s %s %s: re-balancing", e[0], e[1], e[2]) + deviceTaskBalance(d) + } + } + } +} + +func deviceTaskSchedulerTrigger(srcType string, srcName string, srcStatus string) { + // Spawn a go routine which then triggers the scheduler + go func() { + deviceSchedRebalance <- []string{srcType, srcName, srcStatus} + }() +} + +func deviceIsDevice(path string) bool { + // Get a stat struct from the provided path + stat := syscall.Stat_t{} + err := syscall.Stat(path, &stat) + if err != nil { + return false + } + + // Check if it's a character device + if stat.Mode&syscall.S_IFMT == syscall.S_IFCHR { + return true + } + + // Check if it's a block device + if stat.Mode&syscall.S_IFMT == syscall.S_IFBLK { + return true + } + + // Not a device + return false +} + +func deviceModeOct(strmode string) (int, error) { + // Default mode + if strmode == "" { + return 0600, nil + } + + // Converted mode + i, err := strconv.ParseInt(strmode, 8, 32) + if err != nil { + return 0, fmt.Errorf("Bad device mode: %s", strmode) + } + + return int(i), nil +} + +func deviceGetAttributes(path string) (string, int, int, error) { + // Get a stat struct from the provided path + stat := syscall.Stat_t{} + err := syscall.Stat(path, &stat) + if err != nil { + return "", 0, 0, err + } + + // Check what kind of file it is + dType := "" + if stat.Mode&syscall.S_IFMT == syscall.S_IFBLK { + dType = "b" + } else if stat.Mode&syscall.S_IFMT == syscall.S_IFCHR { + dType = "c" + } else { + return "", 0, 0, fmt.Errorf("Not a device") + } + + // Return the device information + major := int(stat.Rdev / 256) + minor := int(stat.Rdev % 256) + return dType, major, minor, nil +} + +func deviceNextInterfaceHWAddr() (string, error) { + // Generate a new random MAC address using the usual prefix + ret := bytes.Buffer{} + for _, c := range "00:16:3e:xx:xx:xx" { + if c == 'x' { + c, err := rand.Int(rand.Reader, big.NewInt(16)) + if err != nil { + return "", err + } + ret.WriteString(fmt.Sprintf("%x", c.Int64())) + } else { + ret.WriteString(string(c)) + } + } + + return ret.String(), nil +} + +func deviceNextVeth() string { + // Return a new random veth device name + randBytes := make([]byte, 4) + rand.Read(randBytes) + return "veth" + hex.EncodeToString(randBytes) +} + +func deviceRemoveInterface(nic string) error { + return exec.Command("ip", "link", "del", nic).Run() +} + +func deviceMountDisk(srcPath string, dstPath string, readonly bool) error { + var err error + + // Prepare the mount flags + flags := 0 + if readonly { + flags |= syscall.MS_RDONLY + } + + // Detect the filesystem + fstype := "none" + if deviceIsDevice(srcPath) { + fstype, err = shared.BlockFsDetect(srcPath) + if err != nil { + return err + } + } else { + flags |= syscall.MS_BIND + } + + // Mount the filesystem + if err = syscall.Mount(srcPath, dstPath, fstype, uintptr(flags), ""); err != nil { + return fmt.Errorf("Unable to mount %s at %s: %s", srcPath, dstPath, err) + } + + return nil +} + +func deviceParseCPU(cpuAllowance string, cpuPriority string) (string, string, string, error) { + var err error + + // Parse priority + cpuShares := 0 + cpuPriorityInt := 10 + if cpuPriority != "" { + cpuPriorityInt, err = strconv.Atoi(cpuPriority) + if err != nil { + return "", "", "", err + } + } + cpuShares -= 10 - cpuPriorityInt + + // Parse allowance + cpuCfsQuota := "-1" + cpuCfsPeriod := "100000" + + if cpuAllowance != "" { + if strings.HasSuffix(cpuAllowance, "%") { + // Percentage based allocation + percent, err := strconv.Atoi(strings.TrimSuffix(cpuAllowance, "%")) + if err != nil { + return "", "", "", err + } + + cpuShares += (10 * percent) + 24 + } else { + // Time based allocation + fields := strings.SplitN(cpuAllowance, "/", 2) + if len(fields) != 2 { + return "", "", "", fmt.Errorf("Invalid allowance: %s", cpuAllowance) + } + + quota, err := strconv.Atoi(strings.TrimSuffix(fields[0], "ms")) + if err != nil { + return "", "", "", err + } + + period, err := strconv.Atoi(strings.TrimSuffix(fields[1], "ms")) + if err != nil { + return "", "", "", err + } + + // Set limit in ms + cpuCfsQuota = fmt.Sprintf("%d", quota*1000) + cpuCfsPeriod = fmt.Sprintf("%d", period*1000) + cpuShares += 1024 + } + } else { + // Default is 100% + cpuShares += 1024 + } + + // Deal with a potential negative score + if cpuShares < 0 { + cpuShares = 0 + } + + return fmt.Sprintf("%d", cpuShares), cpuCfsQuota, cpuCfsPeriod, nil +} + +func deviceTotalMemory() (int64, error) { + // Open /proc/meminfo + f, err := os.Open("/proc/meminfo") + if err != nil { + return -1, err + } + defer f.Close() + + // Read it line by line + scan := bufio.NewScanner(f) + for scan.Scan() { + line := scan.Text() + + // We only care about MemTotal + if !strings.HasPrefix(line, "MemTotal:") { + continue + } + + // Extract the before last (value) and last (unit) fields + fields := strings.Split(line, " ") + value := fields[len(fields)-2] + fields[len(fields)-1] + + // Feed the result to shared.ParseByteSizeString to get an int value + valueBytes, err := shared.ParseByteSizeString(value) + if err != nil { + return -1, err + } + + return valueBytes, nil + } + + return -1, fmt.Errorf("Couldn't find MemTotal") +} + +func deviceGetParentBlocks(path string) ([]string, error) { + var devices []string + var device []string + + // Expand the mount path + absPath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + + expPath, err := filepath.EvalSymlinks(absPath) + if err != nil { + expPath = absPath + } + + // Find the source mount of the path + file, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + match := "" + for scanner.Scan() { + line := scanner.Text() + rows := strings.Fields(line) + + if len(rows[4]) <= len(match) { + continue + } + + if expPath != rows[4] && !strings.HasPrefix(expPath, rows[4]) { + continue + } + + match = rows[4] + + // Go backward to avoid problems with optional fields + device = []string{rows[2], rows[len(rows)-2]} + } + + if device == nil { + return nil, fmt.Errorf("Couldn't find a match /proc/self/mountinfo entry") + } + + // Handle the most simple case + if !strings.HasPrefix(device[0], "0:") { + return []string{device[0]}, nil + } + + // Deal with per-filesystem oddities. We don't care about failures here + // because any non-special filesystem => directory backend. + fs, _ := filesystemDetect(expPath) + + if fs == "zfs" && shared.PathExists("/dev/zfs") { + // Accessible zfs filesystems + poolName := strings.Split(device[1], "/")[0] + + output, err := exec.Command("zpool", "status", poolName).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("Failed to query zfs filesystem information for %s: %s", device[1], output) + } + + for _, line := range strings.Split(string(output), "\n") { + fields := strings.Fields(line) + if len(fields) < 5 { + continue + } + + if fields[1] != "ONLINE" { + continue + } + + var path string + if shared.PathExists(fields[0]) { + if shared.IsBlockdevPath(fields[0]) { + path = fields[0] + } else { + subDevices, err := deviceGetParentBlocks(fields[0]) + if err != nil { + return nil, err + } + + for _, dev := range subDevices { + devices = append(devices, dev) + } + } + } else if shared.PathExists(fmt.Sprintf("/dev/%s", fields[0])) { + path = fmt.Sprintf("/dev/%s", fields[0]) + } else if shared.PathExists(fmt.Sprintf("/dev/disk/by-id/%s", fields[0])) { + path = fmt.Sprintf("/dev/disk/by-id/%s", fields[0]) + } else if shared.PathExists(fmt.Sprintf("/dev/mapper/%s", fields[0])) { + path = fmt.Sprintf("/dev/mapper/%s", fields[0]) + } else { + continue + } + + if path != "" { + _, major, minor, err := deviceGetAttributes(path) + if err != nil { + continue + } + + devices = append(devices, fmt.Sprintf("%d:%d", major, minor)) + } + } + + if len(devices) == 0 { + return nil, fmt.Errorf("Unable to find backing block for zfs pool: %s", poolName) + } + } else if fs == "btrfs" && shared.PathExists(device[1]) { + // Accessible btrfs filesystems + output, err := exec.Command("btrfs", "filesystem", "show", device[1]).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("Failed to query btrfs filesystem information for %s: %s", device[1], output) + } + + for _, line := range strings.Split(string(output), "\n") { + fields := strings.Fields(line) + if len(fields) == 0 || fields[0] != "devid" { + continue + } + + _, major, minor, err := deviceGetAttributes(fields[len(fields)-1]) + if err != nil { + return nil, err + } + + devices = append(devices, fmt.Sprintf("%d:%d", major, minor)) + } + } else if shared.PathExists(device[1]) { + // Anything else with a valid path + _, major, minor, err := deviceGetAttributes(device[1]) + if err != nil { + return nil, err + } + + devices = append(devices, fmt.Sprintf("%d:%d", major, minor)) + } else { + return nil, fmt.Errorf("Invalid block device: %s", device[1]) + } + + return devices, nil +} + +func deviceParseDiskLimit(readSpeed string, writeSpeed string) (int64, int64, int64, int64, error) { + parseValue := func(value string) (int64, int64, error) { + var err error + + bps := int64(0) + iops := int64(0) + + if readSpeed == "" { + return bps, iops, nil + } + + if strings.HasSuffix(value, "iops") { + iops, err = strconv.ParseInt(strings.TrimSuffix(value, "iops"), 10, 64) + if err != nil { + return -1, -1, err + } + } else { + bps, err = shared.ParseByteSizeString(value) + if err != nil { + return -1, -1, err + } + } + + return bps, iops, nil + } + + readBps, readIops, err := parseValue(readSpeed) + if err != nil { + return -1, -1, -1, -1, err + } + + writeBps, writeIops, err := parseValue(writeSpeed) + if err != nil { + return -1, -1, -1, -1, err + } + + return readBps, readIops, writeBps, writeIops, nil +} === added file 'src/github.com/lxc/lxd/lxd/devlxd.go' --- src/github.com/lxc/lxd/lxd/devlxd.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/devlxd.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,353 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path" + "reflect" + "regexp" + "strconv" + "strings" + "unsafe" + + "github.com/gorilla/mux" + + "github.com/lxc/lxd/shared" +) + +type devLxdResponse struct { + content interface{} + code int + ctype string +} + +func okResponse(ct interface{}, ctype string) *devLxdResponse { + return &devLxdResponse{ct, http.StatusOK, ctype} +} + +type devLxdHandler struct { + path string + + /* + * This API will have to be changed slightly when we decide to support + * websocket events upgrading, but since we don't have events on the + * server side right now either, I went the simple route to avoid + * needless noise. + */ + f func(c container, r *http.Request) *devLxdResponse +} + +var configGet = devLxdHandler{"/1.0/config", func(c container, r *http.Request) *devLxdResponse { + filtered := []string{} + for k, _ := range c.ExpandedConfig() { + if strings.HasPrefix(k, "user.") { + filtered = append(filtered, fmt.Sprintf("/1.0/config/%s", k)) + } + } + return okResponse(filtered, "json") +}} + +var configKeyGet = devLxdHandler{"/1.0/config/{key}", func(c container, r *http.Request) *devLxdResponse { + key := mux.Vars(r)["key"] + if !strings.HasPrefix(key, "user.") { + return &devLxdResponse{"not authorized", http.StatusForbidden, "raw"} + } + + value, ok := c.ExpandedConfig()[key] + if !ok { + return &devLxdResponse{"not found", http.StatusNotFound, "raw"} + } + + return okResponse(value, "raw") +}} + +var metadataGet = devLxdHandler{"/1.0/meta-data", func(c container, r *http.Request) *devLxdResponse { + value := c.ExpandedConfig()["user.meta-data"] + return okResponse(fmt.Sprintf("#cloud-config\ninstance-id: %s\nlocal-hostname: %s\n%s", c.Name(), c.Name(), value), "raw") +}} + +var handlers = []devLxdHandler{ + devLxdHandler{"/", func(c container, r *http.Request) *devLxdResponse { + return okResponse([]string{"/1.0"}, "json") + }}, + devLxdHandler{"/1.0", func(c container, r *http.Request) *devLxdResponse { + return okResponse(shared.Jmap{"api_version": shared.APIVersion}, "json") + }}, + configGet, + configKeyGet, + metadataGet, + /* TODO: events */ +} + +func hoistReq(f func(container, *http.Request) *devLxdResponse, d *Daemon) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + conn := extractUnderlyingConn(w) + pid, ok := pidMapper.m[conn] + if !ok { + http.Error(w, pidNotInContainerErr.Error(), 500) + return + } + + c, err := findContainerForPid(pid, d) + if err != nil { + http.Error(w, err.Error(), 500) + return + } + + resp := f(c, r) + if resp.code != http.StatusOK { + http.Error(w, fmt.Sprintf("%s", resp.content), resp.code) + } else if resp.ctype == "json" { + w.Header().Set("Content-Type", "application/json") + WriteJSON(w, resp.content) + } else { + w.Header().Set("Content-Type", "application/octet-stream") + fmt.Fprintf(w, resp.content.(string)) + } + } +} + +func createAndBindDevLxd() (*net.UnixListener, error) { + sockFile := path.Join(shared.VarPath("devlxd"), "sock") + + /* + * If this socket exists, that means a previous lxd died and didn't + * clean up after itself. We assume that the LXD is actually dead if we + * get this far, since StartDaemon() tries to connect to the actual lxd + * socket to make sure that it is actually dead. So, it is safe to + * remove it here without any checks. + * + * Also, it would be nice to SO_REUSEADDR here so we don't have to + * delete the socket, but we can't: + * http://stackoverflow.com/questions/15716302/so-reuseaddr-and-af-unix + * + * Note that this will force clients to reconnect when LXD is restarted. + */ + if err := os.Remove(sockFile); err != nil && !os.IsNotExist(err) { + return nil, err + } + + unixAddr, err := net.ResolveUnixAddr("unix", sockFile) + if err != nil { + return nil, err + } + + unixl, err := net.ListenUnix("unix", unixAddr) + if err != nil { + return nil, err + } + + if err := os.Chmod(sockFile, 0666); err != nil { + return nil, err + } + + return unixl, nil +} + +func devLxdServer(d *Daemon) *http.Server { + m := mux.NewRouter() + + for _, handler := range handlers { + m.HandleFunc(handler.path, hoistReq(handler.f, d)) + } + + return &http.Server{ + Handler: m, + ConnState: pidMapper.ConnStateHandler, + } +} + +/* + * Everything below here is the guts of the unix socket bits. Unfortunately, + * golang's API does not make this easy. What happens is: + * + * 1. We install a ConnState listener on the http.Server, which does the + * initial unix socket credential exchange. When we get a connection started + * event, we use SO_PEERCRED to extract the creds for the socket. + * + * 2. We store a map from the connection pointer to the pid for that + * connection, so that once the HTTP negotiation occurrs and we get a + * ResponseWriter, we know (because we negotiated on the first byte) which + * pid the connection belogs to. + * + * 3. Regular HTTP negotiation and dispatch occurs via net/http. + * + * 4. When rendering the response via ResponseWriter, we match its underlying + * connection against what we stored in step (2) to figure out which container + * it came from. + */ + +/* + * We keep this in a global so that we can reference it from the server and + * from our http handlers, since there appears to be no way to pass information + * around here. + */ +var pidMapper = ConnPidMapper{m: map[*net.UnixConn]int32{}} + +type ConnPidMapper struct { + m map[*net.UnixConn]int32 +} + +func (m *ConnPidMapper) ConnStateHandler(conn net.Conn, state http.ConnState) { + unixConn := conn.(*net.UnixConn) + switch state { + case http.StateNew: + pid, err := getPid(unixConn) + if err != nil { + shared.Debugf("Error getting pid for conn %s", err) + } else { + m.m[unixConn] = pid + } + case http.StateActive: + return + case http.StateIdle: + return + case http.StateHijacked: + /* + * The "Hijacked" state indicates that the connection has been + * taken over from net/http. This is useful for things like + * developing websocket libraries, who want to upgrade the + * connection to a websocket one, and not use net/http any + * more. Whatever the case, we want to forget about it since we + * won't see it either. + */ + delete(m.m, unixConn) + case http.StateClosed: + delete(m.m, unixConn) + default: + shared.Debugf("Unknown state for connection %s", state) + } +} + +/* + * I also don't see that golang exports an API to get at the underlying FD, but + * we need it to get at SO_PEERCRED, so let's grab it. + */ +func extractUnderlyingFd(unixConnPtr *net.UnixConn) int { + conn := reflect.Indirect(reflect.ValueOf(unixConnPtr)) + netFdPtr := conn.FieldByName("fd") + netFd := reflect.Indirect(netFdPtr) + fd := netFd.FieldByName("sysfd") + return int(fd.Int()) +} + +func getPid(conn *net.UnixConn) (int32, error) { + fd := extractUnderlyingFd(conn) + + _, _, pid, err := getUcred(fd) + if err != nil { + return 0, err + } + + return pid, nil +} + +/* + * As near as I can tell, there is no nice way of extracting an underlying + * net.Conn (or in our case, net.UnixConn) from an http.Request or + * ResponseWriter without hijacking it [1]. Since we want to send and recieve + * unix creds to figure out which container this request came from, we need to + * do this. + * + * [1]: https://groups.google.com/forum/#!topic/golang-nuts/_FWdFXJa6QA + */ +func extractUnderlyingConn(w http.ResponseWriter) *net.UnixConn { + v := reflect.Indirect(reflect.ValueOf(w)) + connPtr := v.FieldByName("conn") + conn := reflect.Indirect(connPtr) + rwc := conn.FieldByName("rwc") + + netConnPtr := (*net.Conn)(unsafe.Pointer(rwc.UnsafeAddr())) + unixConnPtr := (*netConnPtr).(*net.UnixConn) + + return unixConnPtr +} + +var pidNotInContainerErr = fmt.Errorf("pid not in container?") + +func findContainerForPid(pid int32, d *Daemon) (container, error) { + /* + * Try and figure out which container a pid is in. There is probably a + * better way to do this. Based on rharper's initial performance + * metrics, looping over every container and calling newLxdContainer is + * expensive, so I wanted to avoid that if possible, so this happens in + * a two step process: + * + * 1. Walk up the process tree until you see something that looks like + * an lxc monitor process and extract its name from there. + * + * 2. If this fails, it may be that someone did an `lxc exec foo bash`, + * so the process isn't actually a decendant of the container's + * init. In this case we just look through all the containers until + * we find an init with a matching pid namespace. This is probably + * uncommon, so hopefully the slowness won't hurt us. + */ + + origpid := pid + + for pid > 1 { + cmdline, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cmdline", pid)) + if err != nil { + return nil, err + } + + if strings.HasPrefix(string(cmdline), "[lxc monitor]") { + // container names can't have spaces + parts := strings.Split(string(cmdline), " ") + name := parts[len(parts)-1] + + return containerLoadByName(d, name) + } + + status, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/status", pid)) + if err != nil { + return nil, err + } + + re := regexp.MustCompile("PPid:\\s*([0-9]*)") + for _, line := range strings.Split(string(status), "\n") { + m := re.FindStringSubmatch(line) + if m != nil && len(m) > 1 { + result, err := strconv.Atoi(m[1]) + if err != nil { + return nil, err + } + + pid = int32(result) + break + } + } + } + + origPidNs, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/pid", origpid)) + if err != nil { + return nil, err + } + + containers, err := dbContainersList(d.db, cTypeRegular) + if err != nil { + return nil, err + } + + for _, container := range containers { + c, err := containerLoadByName(d, container) + if err != nil { + return nil, err + } + + initpid := c.InitPID() + pidNs, err := os.Readlink(fmt.Sprintf("/proc/%d/ns/pid", initpid)) + if err != nil { + return nil, err + } + + if origPidNs == pidNs { + return c, nil + } + } + + return nil, pidNotInContainerErr +} === added file 'src/github.com/lxc/lxd/lxd/devlxd_gc.go' --- src/github.com/lxc/lxd/lxd/devlxd_gc.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/devlxd_gc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +// +build gc + +package main + +import ( + "syscall" +) + +func getUcred(fd int) (uint32, uint32, int32, error) { + cred, err := syscall.GetsockoptUcred(fd, syscall.SOL_SOCKET, syscall.SO_PEERCRED) + if err != nil { + return 0, 0, -1, err + } + + return cred.Uid, cred.Gid, cred.Pid, nil +} === added file 'src/github.com/lxc/lxd/lxd/devlxd_gccgo.go' --- src/github.com/lxc/lxd/lxd/devlxd_gccgo.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/devlxd_gccgo.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,50 @@ +// +build gccgo +// +build cgo + +package main + +import ( + "errors" +) + +/* +#define _GNU_SOURCE +#include +#include +#include +#include +#include + +void getucred(int sock, uint *uid, uint *gid, int *pid) { + struct ucred peercred; + socklen_t len; + + len = sizeof(struct ucred); + + if (getsockopt(sock, SOL_SOCKET, SO_PEERCRED, &peercred, &len) != 0 || len != sizeof(peercred)) { + fprintf(stderr, "getsockopt failed: %s\n", strerror(errno)); + return; + } + + *uid = peercred.uid; + *gid = peercred.gid; + *pid = peercred.pid; + + return; +} +*/ +import "C" + +func getUcred(fd int) (uint32, uint32, int32, error) { + uid := C.uint(0) + gid := C.uint(0) + pid := C.int(-1) + + C.getucred(C.int(fd), &uid, &gid, &pid) + + if uid == 0 || gid == 0 || pid == -1 { + return 0, 0, -1, errors.New("Failed to get the ucred") + } + + return uint32(uid), uint32(gid), int32(pid), nil +} === added file 'src/github.com/lxc/lxd/lxd/devlxd_test.go' --- src/github.com/lxc/lxd/lxd/devlxd_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/devlxd_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,148 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "strings" + "testing" +) + +var testDir string + +type DevLxdDialer struct { + Path string +} + +func (d DevLxdDialer) DevLxdDial(network, path string) (net.Conn, error) { + addr, err := net.ResolveUnixAddr("unix", d.Path) + if err != nil { + return nil, err + } + + conn, err := net.DialUnix("unix", nil, addr) + if err != nil { + return nil, err + } + + return conn, err +} + +func setupDir() error { + var err error + + testDir, err = ioutil.TempDir("", "lxd_test_devlxd_") + if err != nil { + return err + } + + err = os.Chmod(testDir, 0700) + if err != nil { + return err + } + + os.MkdirAll(fmt.Sprintf("%s/devlxd", testDir), 0755) + + return os.Setenv("LXD_DIR", testDir) +} + +func setupSocket() (*net.UnixListener, error) { + setupDir() + + return createAndBindDevLxd() +} + +func connect(path string) (*net.UnixConn, error) { + addr, err := net.ResolveUnixAddr("unix", path) + if err != nil { + return nil, err + } + + conn, err := net.DialUnix("unix", nil, addr) + if err != nil { + return nil, err + } + + return conn, nil +} + +func TestCredsSendRecv(t *testing.T) { + result := make(chan int32, 1) + + listener, err := setupSocket() + if err != nil { + t.Fatal(err) + } + defer listener.Close() + defer os.RemoveAll(testDir) + + go func() { + conn, err := listener.AcceptUnix() + if err != nil { + t.Log(err) + result <- -1 + return + } + defer conn.Close() + + pid, err := getPid(conn) + if err != nil { + t.Log(err) + result <- -1 + return + } + result <- pid + }() + + conn, err := connect(fmt.Sprintf("%s/devlxd/sock", testDir)) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + pid := <-result + if pid != int32(os.Getpid()) { + t.Fatal("pid mismatch: ", pid, os.Getpid()) + } +} + +/* + * Here we're not really testing the API functionality (we can't, since it + * expects us to be inside a container to work), but it is useful to test that + * all the grotty connection extracting stuff works (that is, it gets to the + * point where it realizes the pid isn't in a container without crashing). + */ +func TestHttpRequest(t *testing.T) { + if err := setupDir(); err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + + d, err := startDaemon("") + if err != nil { + t.Fatal(err) + } + defer d.Stop() + + c := http.Client{Transport: &http.Transport{Dial: DevLxdDialer{Path: fmt.Sprintf("%s/devlxd/sock", testDir)}.DevLxdDial}} + + raw, err := c.Get("http://1.0") + if err != nil { + t.Fatal(err) + } + + if raw.StatusCode != 500 { + t.Fatal(err) + } + + resp, err := ioutil.ReadAll(raw.Body) + if err != nil { + t.Fatal(err) + } + + if !strings.Contains(string(resp), pidNotInContainerErr.Error()) { + t.Fatal("resp error not expected: ", string(resp)) + } +} === added file 'src/github.com/lxc/lxd/lxd/events.go' --- src/github.com/lxc/lxd/lxd/events.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/events.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,143 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "sync" + "time" + + "github.com/gorilla/websocket" + "github.com/pborman/uuid" + log "gopkg.in/inconshreveable/log15.v2" + + "github.com/lxc/lxd/shared" +) + +type eventsHandler struct { +} + +func logContextMap(ctx []interface{}) map[string]string { + var key string + ctxMap := map[string]string{} + + for _, entry := range ctx { + if key == "" { + key = entry.(string) + } else { + ctxMap[key] = fmt.Sprintf("%s", entry) + key = "" + } + } + + return ctxMap +} + +func (h eventsHandler) Log(r *log.Record) error { + eventSend("logging", shared.Jmap{ + "message": r.Msg, + "level": r.Lvl.String(), + "context": logContextMap(r.Ctx)}) + return nil +} + +var eventsLock sync.Mutex +var eventListeners map[string]*eventListener = make(map[string]*eventListener) + +type eventListener struct { + connection *websocket.Conn + messageTypes []string + active chan bool + id string + msgLock sync.Mutex + wgUsed sync.WaitGroup +} + +type eventsServe struct { + req *http.Request +} + +func (r *eventsServe) Render(w http.ResponseWriter) error { + return eventsSocket(r.req, w) +} + +func eventsSocket(r *http.Request, w http.ResponseWriter) error { + listener := eventListener{} + + typeStr := r.FormValue("type") + if typeStr == "" { + typeStr = "logging,operation" + } + + c, err := shared.WebsocketUpgrader.Upgrade(w, r, nil) + if err != nil { + return err + } + + listener.active = make(chan bool, 1) + listener.connection = c + listener.id = uuid.NewRandom().String() + listener.messageTypes = strings.Split(typeStr, ",") + + eventsLock.Lock() + eventListeners[listener.id] = &listener + eventsLock.Unlock() + + shared.Debugf("New events listener: %s", listener.id) + + <-listener.active + + return nil +} + +func eventsGet(d *Daemon, r *http.Request) Response { + return &eventsServe{r} +} + +var eventsCmd = Command{name: "events", get: eventsGet} + +func eventSend(eventType string, eventMessage interface{}) error { + event := shared.Jmap{} + event["type"] = eventType + event["timestamp"] = time.Now() + event["metadata"] = eventMessage + + body, err := json.Marshal(event) + if err != nil { + return err + } + + eventsLock.Lock() + listeners := eventListeners + for _, listener := range listeners { + if !shared.StringInSlice(eventType, listener.messageTypes) { + continue + } + + listener.wgUsed.Add(1) + go func(listener *eventListener, body []byte) { + listener.msgLock.Lock() + err = listener.connection.WriteMessage(websocket.TextMessage, body) + listener.msgLock.Unlock() + listener.wgUsed.Done() + + if err != nil { + listener.wgUsed.Wait() + listener.connection.Close() + listener.active <- false + + eventsLock.Lock() + delete(eventListeners, listener.id) + eventsLock.Unlock() + + shared.Debugf("Disconnected events listener: %s", listener.id) + return + } + + }(listener, body) + } + eventsLock.Unlock() + + return nil +} === added file 'src/github.com/lxc/lxd/lxd/images.go' --- src/github.com/lxc/lxd/lxd/images.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/images.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1255 @@ +package main + +import ( + "bytes" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "mime/multipart" + "net/http" + "net/url" + "os" + "os/exec" + "strconv" + "strings" + "sync" + "time" + + "github.com/gorilla/mux" + "gopkg.in/yaml.v2" + + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/logging" + + log "gopkg.in/inconshreveable/log15.v2" +) + +/* We only want a single publish running at any one time. + The CPU and I/O load of publish is such that running multiple ones in + parallel takes longer than running them serially. + + Additionaly, publishing the same container or container snapshot + twice would lead to storage problem, not to mention a conflict at the + end for whichever finishes last. */ +var imagePublishLock sync.Mutex + +func detectCompression(fname string) ([]string, string, error) { + f, err := os.Open(fname) + if err != nil { + return []string{""}, "", err + } + defer f.Close() + + // read header parts to detect compression method + // bz2 - 2 bytes, 'BZ' signature/magic number + // gz - 2 bytes, 0x1f 0x8b + // lzma - 6 bytes, { [0x000, 0xE0], '7', 'z', 'X', 'Z', 0x00 } - + // xy - 6 bytes, header format { 0xFD, '7', 'z', 'X', 'Z', 0x00 } + // tar - 263 bytes, trying to get ustar from 257 - 262 + header := make([]byte, 263) + _, err = f.Read(header) + if err != nil { + return []string{""}, "", err + } + + switch { + case bytes.Equal(header[0:2], []byte{'B', 'Z'}): + return []string{"-jxf"}, ".tar.bz2", nil + case bytes.Equal(header[0:2], []byte{0x1f, 0x8b}): + return []string{"-zxf"}, ".tar.gz", nil + case (bytes.Equal(header[1:5], []byte{'7', 'z', 'X', 'Z'}) && header[0] == 0xFD): + return []string{"-Jxf"}, ".tar.xz", nil + case (bytes.Equal(header[1:5], []byte{'7', 'z', 'X', 'Z'}) && header[0] != 0xFD): + return []string{"--lzma", "-xf"}, ".tar.lzma", nil + case bytes.Equal(header[257:262], []byte{'u', 's', 't', 'a', 'r'}): + return []string{"-xf"}, ".tar", nil + default: + return []string{""}, "", fmt.Errorf("Unsupported compression.") + } + +} + +func untar(tarball string, path string) error { + extractArgs, _, err := detectCompression(tarball) + if err != nil { + return err + } + + command := "tar" + args := []string{} + if runningInUserns { + args = append(args, "--wildcards") + args = append(args, "--exclude=dev/*") + args = append(args, "--exclude=./dev/*") + args = append(args, "--exclude=rootfs/dev/*") + args = append(args, "--exclude=rootfs/./dev/*") + } + args = append(args, "-C", path, "--numeric-owner") + args = append(args, extractArgs...) + args = append(args, tarball) + + output, err := exec.Command(command, args...).CombinedOutput() + if err != nil { + shared.Debugf("Unpacking failed") + shared.Debugf(string(output)) + return err + } + + return nil +} + +func untarImage(imagefname string, destpath string) error { + err := untar(imagefname, destpath) + if err != nil { + return err + } + + if shared.PathExists(imagefname + ".rootfs") { + rootfsPath := fmt.Sprintf("%s/rootfs", destpath) + err = os.MkdirAll(rootfsPath, 0755) + if err != nil { + return fmt.Errorf("Error creating rootfs directory") + } + + err = untar(imagefname+".rootfs", rootfsPath) + if err != nil { + return err + } + } + + return nil +} + +func compressFile(path string, compress string) (string, error) { + cmd := exec.Command(compress, path, "-c", "-n") + + outfile, err := os.Create(path + ".compressed") + if err != nil { + return "", err + } + + defer outfile.Close() + cmd.Stdout = outfile + + err = cmd.Run() + if err != nil { + os.Remove(outfile.Name()) + return "", err + } + + return outfile.Name(), nil +} + +type templateEntry struct { + When []string + Template string + Properties map[string]string +} + +type imagePostReq struct { + Filename string `json:"filename"` + Public bool `json:"public"` + Source map[string]string `json:"source"` + Properties map[string]string `json:"properties"` + AutoUpdate bool `json:"auto_update"` +} + +type imageMetadata struct { + Architecture string `yaml:"architecture"` + CreationDate int64 `yaml:"creation_date"` + ExpiryDate int64 `yaml:"expiry_date"` + Properties map[string]string `yaml:"properties"` + Templates map[string]*templateEntry `yaml:"templates"` +} + +/* + * This function takes a container or snapshot from the local image server and + * exports it as an image. + */ +func imgPostContInfo(d *Daemon, r *http.Request, req imagePostReq, + builddir string) (info shared.ImageInfo, err error) { + + info.Properties = map[string]string{} + name := req.Source["name"] + ctype := req.Source["type"] + if ctype == "" || name == "" { + return info, fmt.Errorf("No source provided") + } + + switch ctype { + case "snapshot": + if !shared.IsSnapshot(name) { + return info, fmt.Errorf("Not a snapshot") + } + case "container": + if shared.IsSnapshot(name) { + return info, fmt.Errorf("This is a snapshot") + } + default: + return info, fmt.Errorf("Bad type") + } + + info.Filename = req.Filename + switch req.Public { + case true: + info.Public = true + case false: + info.Public = false + } + + c, err := containerLoadByName(d, name) + if err != nil { + return info, err + } + + // Build the actual image file + tarfile, err := ioutil.TempFile(builddir, "lxd_build_tar_") + if err != nil { + return info, err + } + defer os.Remove(tarfile.Name()) + + if err := c.Export(tarfile); err != nil { + tarfile.Close() + return info, err + } + tarfile.Close() + + compress, err := d.ConfigValueGet("images.compression_algorithm") + if err != nil { + return info, err + } + + // Default to gzip for this + if compress == "" { + compress = "gzip" + } + + var compressedPath string + if compress != "none" { + compressedPath, err = compressFile(tarfile.Name(), compress) + if err != nil { + return info, err + } + } else { + compressedPath = tarfile.Name() + } + defer os.Remove(compressedPath) + + sha256 := sha256.New() + tarf, err := os.Open(compressedPath) + if err != nil { + return info, err + } + info.Size, err = io.Copy(sha256, tarf) + tarf.Close() + if err != nil { + return info, err + } + info.Fingerprint = fmt.Sprintf("%x", sha256.Sum(nil)) + + _, _, err = dbImageGet(d.db, info.Fingerprint, false, true) + if err == nil { + return info, fmt.Errorf("The image already exists: %s", info.Fingerprint) + } + + /* rename the the file to the expected name so our caller can use it */ + finalName := shared.VarPath("images", info.Fingerprint) + err = shared.FileMove(compressedPath, finalName) + if err != nil { + return info, err + } + + info.Architecture, _ = shared.ArchitectureName(c.Architecture()) + info.Properties = req.Properties + + return info, nil +} + +func imgPostRemoteInfo(d *Daemon, req imagePostReq, op *operation) error { + var err error + var hash string + + if req.Source["fingerprint"] != "" { + hash = req.Source["fingerprint"] + } else if req.Source["alias"] != "" { + hash = req.Source["alias"] + } else { + return fmt.Errorf("must specify one of alias or fingerprint for init from image") + } + + hash, err = d.ImageDownload(op, req.Source["server"], req.Source["protocol"], req.Source["certificate"], req.Source["secret"], hash, false, req.AutoUpdate) + if err != nil { + return err + } + + id, info, err := dbImageGet(d.db, hash, false, false) + if err != nil { + return err + } + + // Allow overriding or adding properties + for k, v := range req.Properties { + info.Properties[k] = v + } + + // Update the DB record if needed + if req.Public || req.AutoUpdate || req.Filename != "" || len(req.Properties) > 0 { + err = dbImageUpdate(d.db, id, req.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreationDate, info.ExpiryDate, info.Properties) + if err != nil { + return err + } + } + + metadata := make(map[string]string) + metadata["fingerprint"] = info.Fingerprint + metadata["size"] = strconv.FormatInt(info.Size, 10) + op.UpdateMetadata(metadata) + + return nil +} + +func imgPostURLInfo(d *Daemon, req imagePostReq, op *operation) error { + var err error + + if req.Source["url"] == "" { + return fmt.Errorf("Missing URL") + } + + // Resolve the image URL + tlsConfig, err := shared.GetTLSConfig("", "", nil) + if err != nil { + return err + } + + tr := &http.Transport{ + TLSClientConfig: tlsConfig, + Dial: shared.RFC3493Dialer, + Proxy: http.ProxyFromEnvironment, + } + + myhttp := http.Client{ + Transport: tr, + } + + head, err := http.NewRequest("HEAD", req.Source["url"], nil) + if err != nil { + return err + } + + architecturesStr := []string{} + for _, arch := range d.architectures { + architecturesStr = append(architecturesStr, fmt.Sprintf("%d", arch)) + } + + head.Header.Set("User-Agent", shared.UserAgent) + head.Header.Set("LXD-Server-Architectures", strings.Join(architecturesStr, ", ")) + head.Header.Set("LXD-Server-Version", shared.Version) + + raw, err := myhttp.Do(head) + if err != nil { + return err + } + + hash := raw.Header.Get("LXD-Image-Hash") + if hash == "" { + return fmt.Errorf("Missing LXD-Image-Hash header") + } + + url := raw.Header.Get("LXD-Image-URL") + if url == "" { + return fmt.Errorf("Missing LXD-Image-URL header") + } + + // Import the image + hash, err = d.ImageDownload(op, url, "direct", "", "", hash, false, req.AutoUpdate) + if err != nil { + return err + } + + id, info, err := dbImageGet(d.db, hash, false, false) + if err != nil { + return err + } + + // Allow overriding or adding properties + for k, v := range req.Properties { + info.Properties[k] = v + } + + if req.Public || req.AutoUpdate || req.Filename != "" || len(req.Properties) > 0 { + err = dbImageUpdate(d.db, id, req.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreationDate, info.ExpiryDate, info.Properties) + if err != nil { + return err + } + } + + metadata := make(map[string]string) + metadata["fingerprint"] = info.Fingerprint + metadata["size"] = strconv.FormatInt(info.Size, 10) + op.UpdateMetadata(metadata) + + return nil +} + +func getImgPostInfo(d *Daemon, r *http.Request, + builddir string, post *os.File) (info shared.ImageInfo, err error) { + + var imageMeta *imageMetadata + logger := logging.AddContext(shared.Log, log.Ctx{"function": "getImgPostInfo"}) + + public, _ := strconv.Atoi(r.Header.Get("X-LXD-public")) + info.Public = public == 1 + propHeaders := r.Header[http.CanonicalHeaderKey("X-LXD-properties")] + ctype, ctypeParams, err := mime.ParseMediaType(r.Header.Get("Content-Type")) + if err != nil { + ctype = "application/octet-stream" + } + + sha256 := sha256.New() + var size int64 + + // Create a temporary file for the image tarball + imageTarf, err := ioutil.TempFile(builddir, "lxd_tar_") + if err != nil { + return info, err + } + defer os.Remove(imageTarf.Name()) + + if ctype == "multipart/form-data" { + // Parse the POST data + post.Seek(0, 0) + mr := multipart.NewReader(post, ctypeParams["boundary"]) + + // Get the metadata tarball + part, err := mr.NextPart() + if err != nil { + return info, err + } + + if part.FormName() != "metadata" { + return info, fmt.Errorf("Invalid multipart image") + } + + size, err = io.Copy(io.MultiWriter(imageTarf, sha256), part) + info.Size += size + + imageTarf.Close() + if err != nil { + logger.Error( + "Failed to copy the image tarfile", + log.Ctx{"err": err}) + return info, err + } + + // Get the rootfs tarball + part, err = mr.NextPart() + if err != nil { + logger.Error( + "Failed to get the next part", + log.Ctx{"err": err}) + return info, err + } + + if part.FormName() != "rootfs" { + logger.Error( + "Invalid multipart image") + + return info, fmt.Errorf("Invalid multipart image") + } + + // Create a temporary file for the rootfs tarball + rootfsTarf, err := ioutil.TempFile(builddir, "lxd_tar_") + if err != nil { + return info, err + } + defer os.Remove(rootfsTarf.Name()) + + size, err = io.Copy(io.MultiWriter(rootfsTarf, sha256), part) + info.Size += size + + rootfsTarf.Close() + if err != nil { + logger.Error( + "Failed to copy the rootfs tarfile", + log.Ctx{"err": err}) + return info, err + } + + info.Filename = part.FileName() + info.Fingerprint = fmt.Sprintf("%x", sha256.Sum(nil)) + + expectedFingerprint := r.Header.Get("X-LXD-fingerprint") + if expectedFingerprint != "" && info.Fingerprint != expectedFingerprint { + err = fmt.Errorf("fingerprints don't match, got %s expected %s", info.Fingerprint, expectedFingerprint) + return info, err + } + + imageMeta, err = getImageMetadata(imageTarf.Name()) + if err != nil { + logger.Error( + "Failed to get image metadata", + log.Ctx{"err": err}) + return info, err + } + + imgfname := shared.VarPath("images", info.Fingerprint) + err = shared.FileMove(imageTarf.Name(), imgfname) + if err != nil { + logger.Error( + "Failed to move the image tarfile", + log.Ctx{ + "err": err, + "source": imageTarf.Name(), + "dest": imgfname}) + return info, err + } + + rootfsfname := shared.VarPath("images", info.Fingerprint+".rootfs") + err = shared.FileMove(rootfsTarf.Name(), rootfsfname) + if err != nil { + logger.Error( + "Failed to move the rootfs tarfile", + log.Ctx{ + "err": err, + "source": rootfsTarf.Name(), + "dest": imgfname}) + return info, err + } + } else { + post.Seek(0, 0) + size, err = io.Copy(io.MultiWriter(imageTarf, sha256), post) + info.Size = size + imageTarf.Close() + logger.Debug("Tar size", log.Ctx{"size": size}) + if err != nil { + logger.Error( + "Failed to copy the tarfile", + log.Ctx{"err": err}) + return info, err + } + + info.Filename = r.Header.Get("X-LXD-filename") + info.Fingerprint = fmt.Sprintf("%x", sha256.Sum(nil)) + + expectedFingerprint := r.Header.Get("X-LXD-fingerprint") + if expectedFingerprint != "" && info.Fingerprint != expectedFingerprint { + logger.Error( + "Fingerprints don't match", + log.Ctx{ + "got": info.Fingerprint, + "expected": expectedFingerprint}) + err = fmt.Errorf( + "fingerprints don't match, got %s expected %s", + info.Fingerprint, + expectedFingerprint) + return info, err + } + + imageMeta, err = getImageMetadata(imageTarf.Name()) + if err != nil { + logger.Error( + "Failed to get image metadata", + log.Ctx{"err": err}) + return info, err + } + + imgfname := shared.VarPath("images", info.Fingerprint) + err = shared.FileMove(imageTarf.Name(), imgfname) + if err != nil { + logger.Error( + "Failed to move the tarfile", + log.Ctx{ + "err": err, + "source": imageTarf.Name(), + "dest": imgfname}) + return info, err + } + } + + info.Architecture = imageMeta.Architecture + info.CreationDate = time.Unix(imageMeta.CreationDate, 0) + info.ExpiryDate = time.Unix(imageMeta.ExpiryDate, 0) + + info.Properties = imageMeta.Properties + if len(propHeaders) > 0 { + for _, ph := range propHeaders { + p, _ := url.ParseQuery(ph) + for pkey, pval := range p { + info.Properties[pkey] = pval[0] + } + } + } + + return info, nil +} + +func imageBuildFromInfo(d *Daemon, info shared.ImageInfo) (metadata map[string]string, err error) { + err = d.Storage.ImageCreate(info.Fingerprint) + if err != nil { + return metadata, err + } + + err = dbImageInsert( + d.db, + info.Fingerprint, + info.Filename, + info.Size, + info.Public, + info.AutoUpdate, + info.Architecture, + info.CreationDate, + info.ExpiryDate, + info.Properties) + if err != nil { + return metadata, err + } + + metadata = make(map[string]string) + metadata["fingerprint"] = info.Fingerprint + metadata["size"] = strconv.FormatInt(info.Size, 10) + + return metadata, nil +} + +func imagesPost(d *Daemon, r *http.Request) Response { + var err error + + // create a directory under which we keep everything while building + builddir, err := ioutil.TempDir(shared.VarPath("images"), "lxd_build_") + if err != nil { + return InternalError(err) + } + + cleanup := func(path string, fd *os.File) { + if fd != nil { + fd.Close() + } + + if err := os.RemoveAll(path); err != nil { + shared.Debugf("Error deleting temporary directory \"%s\": %s", path, err) + } + } + + // Store the post data to disk + post, err := ioutil.TempFile(builddir, "lxd_post_") + if err != nil { + cleanup(builddir, nil) + return InternalError(err) + } + + _, err = io.Copy(post, r.Body) + if err != nil { + cleanup(builddir, post) + return InternalError(err) + } + + // Is this a container request? + post.Seek(0, 0) + decoder := json.NewDecoder(post) + req := imagePostReq{} + err = decoder.Decode(&req) + imageUpload := err != nil + + if !imageUpload && !shared.StringInSlice(req.Source["type"], []string{"container", "snapshot", "image", "url"}) { + cleanup(builddir, post) + return InternalError(fmt.Errorf("Invalid images JSON")) + } + + // Begin background operation + run := func(op *operation) error { + var info shared.ImageInfo + + // Setup the cleanup function + defer cleanup(builddir, post) + + /* Processing image copy from remote */ + if !imageUpload && req.Source["type"] == "image" { + err := imgPostRemoteInfo(d, req, op) + if err != nil { + return err + } + return nil + } + + /* Processing image copy from URL */ + if !imageUpload && req.Source["type"] == "url" { + err := imgPostURLInfo(d, req, op) + if err != nil { + return err + } + return nil + } + + if imageUpload { + /* Processing image upload */ + info, err = getImgPostInfo(d, r, builddir, post) + if err != nil { + return err + } + } else { + /* Processing image creation from container */ + imagePublishLock.Lock() + info, err = imgPostContInfo(d, r, req, builddir) + if err != nil { + imagePublishLock.Unlock() + return err + } + imagePublishLock.Unlock() + } + + metadata, err := imageBuildFromInfo(d, info) + if err != nil { + return err + } + + op.UpdateMetadata(metadata) + return nil + } + + op, err := operationCreate(operationClassTask, nil, nil, run, nil, nil) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) +} + +func getImageMetadata(fname string) (*imageMetadata, error) { + metadataName := "metadata.yaml" + + compressionArgs, _, err := detectCompression(fname) + + if err != nil { + return nil, fmt.Errorf( + "detectCompression failed, err='%v', tarfile='%s'", + err, + fname) + } + + args := []string{"-O"} + args = append(args, compressionArgs...) + args = append(args, fname, metadataName) + + // read the metadata.yaml + output, err := exec.Command("tar", args...).CombinedOutput() + + if err != nil { + outputLines := strings.Split(string(output), "\n") + return nil, fmt.Errorf("Could not extract image metadata %s from tar: %v (%s)", metadataName, err, outputLines[0]) + } + + metadata := imageMetadata{} + err = yaml.Unmarshal(output, &metadata) + + if err != nil { + return nil, fmt.Errorf("Could not parse %s: %v", metadataName, err) + } + + return &metadata, nil +} + +func doImagesGet(d *Daemon, recursion bool, public bool) (interface{}, error) { + results, err := dbImagesGet(d.db, public) + if err != nil { + return []string{}, err + } + + resultString := make([]string, len(results)) + resultMap := make([]*shared.ImageInfo, len(results)) + i := 0 + for _, name := range results { + if !recursion { + url := fmt.Sprintf("/%s/images/%s", shared.APIVersion, name) + resultString[i] = url + } else { + image, response := doImageGet(d, name, public) + if response != nil { + continue + } + resultMap[i] = image + } + + i++ + } + + if !recursion { + return resultString, nil + } + + return resultMap, nil +} + +func imagesGet(d *Daemon, r *http.Request) Response { + public := !d.isTrustedClient(r) + + result, err := doImagesGet(d, d.isRecursionRequest(r), public) + if err != nil { + return SmartError(err) + } + return SyncResponse(true, result) +} + +var imagesCmd = Command{name: "images", post: imagesPost, untrustedGet: true, get: imagesGet} + +func autoUpdateImages(d *Daemon) { + shared.Debugf("Updating images") + + images, err := dbImagesGet(d.db, false) + if err != nil { + shared.Log.Error("Unable to retrieve the list of images", log.Ctx{"err": err}) + return + } + + for _, fp := range images { + id, info, err := dbImageGet(d.db, fp, false, true) + if err != nil { + shared.Log.Error("Error loading image", log.Ctx{"err": err, "fp": fp}) + continue + } + + if !info.AutoUpdate { + continue + } + + _, source, err := dbImageSourceGet(d.db, id) + if err != nil { + continue + } + + shared.Log.Debug("Processing image", log.Ctx{"fp": fp, "server": source.Server, "protocol": source.Protocol, "alias": source.Alias}) + + hash, err := d.ImageDownload(nil, source.Server, source.Protocol, "", "", source.Alias, false, true) + if hash == fp { + shared.Log.Debug("Already up to date", log.Ctx{"fp": fp}) + continue + } + + newId, _, err := dbImageGet(d.db, hash, false, true) + if err != nil { + shared.Log.Error("Error loading image", log.Ctx{"err": err, "fp": hash}) + continue + } + + err = dbImageLastAccessUpdate(d.db, hash, info.LastUsedDate) + if err != nil { + shared.Log.Error("Error setting last use date", log.Ctx{"err": err, "fp": hash}) + continue + } + + err = dbImageAliasesMove(d.db, id, newId) + if err != nil { + shared.Log.Error("Error moving aliases", log.Ctx{"err": err, "fp": hash}) + continue + } + + err = doDeleteImage(d, fp) + if err != nil { + shared.Log.Error("Error deleting image", log.Ctx{"err": err, "fp": fp}) + } + } +} + +func pruneExpiredImages(d *Daemon) { + shared.Debugf("Pruning expired images") + expiry, err := d.ConfigValueGet("images.remote_cache_expiry") + if err != nil { + shared.Log.Error("Unable to read the images.remote_cache_expiry key") + return + } + + if expiry == "" { + expiry = "10" + } + + expiryInt, err := strconv.Atoi(expiry) + if err != nil { + shared.Log.Error("Invalid value for images.remote_cache_expiry", log.Ctx{"err": err}) + return + } + + images, err := dbImagesGetExpired(d.db, expiryInt) + if err != nil { + shared.Log.Error("Unable to retrieve the list of expired images", log.Ctx{"err": err}) + return + } + + for _, fp := range images { + if err := doDeleteImage(d, fp); err != nil { + shared.Log.Error("Error deleting image", log.Ctx{"err": err, "fp": fp}) + } + } + shared.Debugf("Done pruning expired images") +} + +func doDeleteImage(d *Daemon, fingerprint string) error { + id, imgInfo, err := dbImageGet(d.db, fingerprint, false, false) + if err != nil { + return err + } + + // get storage before deleting images/$fp because we need to + // look at the path + s, err := storageForImage(d, imgInfo) + if err != nil { + return err + } + + // Remove the image from storage backend + if err = s.ImageDelete(imgInfo.Fingerprint); err != nil { + return err + } + + // Remove main image file + fname := shared.VarPath("images", imgInfo.Fingerprint) + if shared.PathExists(fname) { + err = os.Remove(fname) + if err != nil { + shared.Debugf("Error deleting image file %s: %s", fname, err) + } + } + + // Remote the rootfs file + fname = shared.VarPath("images", imgInfo.Fingerprint) + ".rootfs" + if shared.PathExists(fname) { + err = os.Remove(fname) + if err != nil { + shared.Debugf("Error deleting image file %s: %s", fname, err) + } + } + + // Remove the DB entry + if err = dbImageDelete(d.db, id); err != nil { + return err + } + + return nil +} + +func imageDelete(d *Daemon, r *http.Request) Response { + fingerprint := mux.Vars(r)["fingerprint"] + + if err := doDeleteImage(d, fingerprint); err != nil { + return SmartError(err) + } + + return EmptySyncResponse +} + +func doImageGet(d *Daemon, fingerprint string, public bool) (*shared.ImageInfo, Response) { + _, imgInfo, err := dbImageGet(d.db, fingerprint, public, false) + if err != nil { + return nil, SmartError(err) + } + + return imgInfo, nil +} + +func imageValidSecret(fingerprint string, secret string) bool { + for _, op := range operations { + if op.resources == nil { + continue + } + + opImages, ok := op.resources["images"] + if !ok { + continue + } + + if !shared.StringInSlice(fingerprint, opImages) { + continue + } + + opSecret, ok := op.metadata["secret"] + if !ok { + continue + } + + if opSecret == secret { + // Token is single-use, so cancel it now + op.Cancel() + return true + } + } + + return false +} + +func imageGet(d *Daemon, r *http.Request) Response { + fingerprint := mux.Vars(r)["fingerprint"] + public := !d.isTrustedClient(r) + secret := r.FormValue("secret") + + if public == true && imageValidSecret(fingerprint, secret) == true { + public = false + } + + info, response := doImageGet(d, fingerprint, public) + if response != nil { + return response + } + + return SyncResponse(true, info) +} + +type imagePutReq struct { + Properties map[string]string `json:"properties"` + Public bool `json:"public"` + AutoUpdate bool `json:"auto_update"` +} + +func imagePut(d *Daemon, r *http.Request) Response { + fingerprint := mux.Vars(r)["fingerprint"] + + req := imagePutReq{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return BadRequest(err) + } + + id, info, err := dbImageGet(d.db, fingerprint, false, false) + if err != nil { + return SmartError(err) + } + + err = dbImageUpdate(d.db, id, info.Filename, info.Size, req.Public, req.AutoUpdate, info.Architecture, info.CreationDate, info.ExpiryDate, req.Properties) + if err != nil { + return SmartError(err) + } + + return EmptySyncResponse +} + +var imageCmd = Command{name: "images/{fingerprint}", untrustedGet: true, get: imageGet, put: imagePut, delete: imageDelete} + +type aliasPostReq struct { + Name string `json:"name"` + Description string `json:"description"` + Target string `json:"target"` +} + +type aliasPutReq struct { + Description string `json:"description"` + Target string `json:"target"` +} + +func aliasesPost(d *Daemon, r *http.Request) Response { + req := aliasPostReq{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return BadRequest(err) + } + + if req.Name == "" || req.Target == "" { + return BadRequest(fmt.Errorf("name and target are required")) + } + + // This is just to see if the alias name already exists. + _, _, err := dbImageAliasGet(d.db, req.Name, true) + if err == nil { + return Conflict + } + + id, _, err := dbImageGet(d.db, req.Target, false, false) + if err != nil { + return SmartError(err) + } + + err = dbImageAliasAdd(d.db, req.Name, id, req.Description) + if err != nil { + return InternalError(err) + } + + return EmptySyncResponse +} + +func aliasesGet(d *Daemon, r *http.Request) Response { + recursion := d.isRecursionRequest(r) + + q := "SELECT name FROM images_aliases" + var name string + inargs := []interface{}{} + outfmt := []interface{}{name} + results, err := dbQueryScan(d.db, q, inargs, outfmt) + if err != nil { + return BadRequest(err) + } + responseStr := []string{} + responseMap := shared.ImageAliases{} + for _, res := range results { + name = res[0].(string) + if !recursion { + url := fmt.Sprintf("/%s/images/aliases/%s", shared.APIVersion, name) + responseStr = append(responseStr, url) + + } else { + _, alias, err := dbImageAliasGet(d.db, name, d.isTrustedClient(r)) + if err != nil { + continue + } + responseMap = append(responseMap, alias) + } + } + + if !recursion { + return SyncResponse(true, responseStr) + } + + return SyncResponse(true, responseMap) +} + +func aliasGet(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + + _, alias, err := dbImageAliasGet(d.db, name, d.isTrustedClient(r)) + if err != nil { + return SmartError(err) + } + + return SyncResponse(true, alias) +} + +func aliasDelete(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + _, _, err := dbImageAliasGet(d.db, name, true) + if err != nil { + return SmartError(err) + } + + err = dbImageAliasDelete(d.db, name) + if err != nil { + return SmartError(err) + } + + return EmptySyncResponse +} + +func aliasPut(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + + req := aliasPutReq{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return BadRequest(err) + } + + id, _, err := dbImageAliasGet(d.db, name, true) + if err != nil { + return SmartError(err) + } + + imageId, _, err := dbImageGet(d.db, req.Target, false, false) + if err != nil { + return SmartError(err) + } + + err = dbImageAliasUpdate(d.db, id, imageId, req.Description) + if err != nil { + return SmartError(err) + } + + return EmptySyncResponse +} + +func aliasPost(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + + req := aliasPostReq{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return BadRequest(err) + } + + id, _, err := dbImageAliasGet(d.db, name, true) + if err != nil { + return SmartError(err) + } + + err = dbImageAliasRename(d.db, id, req.Name) + if err != nil { + return SmartError(err) + } + + return EmptySyncResponse +} + +func imageExport(d *Daemon, r *http.Request) Response { + fingerprint := mux.Vars(r)["fingerprint"] + + public := !d.isTrustedClient(r) + secret := r.FormValue("secret") + + if public == true && imageValidSecret(fingerprint, secret) == true { + public = false + } + + _, imgInfo, err := dbImageGet(d.db, fingerprint, public, false) + if err != nil { + return SmartError(err) + } + + filename := imgInfo.Filename + imagePath := shared.VarPath("images", imgInfo.Fingerprint) + rootfsPath := imagePath + ".rootfs" + if filename == "" { + _, ext, err := detectCompression(imagePath) + if err != nil { + ext = "" + } + filename = fmt.Sprintf("%s%s", fingerprint, ext) + } + + if shared.PathExists(rootfsPath) { + files := make([]fileResponseEntry, 2) + + files[0].identifier = "metadata" + files[0].path = imagePath + files[0].filename = "meta-" + filename + + files[1].identifier = "rootfs" + files[1].path = rootfsPath + files[1].filename = filename + + return FileResponse(r, files, nil, false) + } + + files := make([]fileResponseEntry, 1) + files[0].identifier = filename + files[0].path = imagePath + files[0].filename = filename + + return FileResponse(r, files, nil, false) +} + +func imageSecret(d *Daemon, r *http.Request) Response { + fingerprint := mux.Vars(r)["fingerprint"] + _, _, err := dbImageGet(d.db, fingerprint, false, false) + if err != nil { + return SmartError(err) + } + + secret, err := shared.RandomCryptoString() + + if err != nil { + return InternalError(err) + } + + meta := shared.Jmap{} + meta["secret"] = secret + + resources := map[string][]string{} + resources["images"] = []string{fingerprint} + + op, err := operationCreate(operationClassToken, resources, meta, nil, nil, nil) + if err != nil { + return InternalError(err) + } + + return OperationResponse(op) +} + +var imagesExportCmd = Command{name: "images/{fingerprint}/export", untrustedGet: true, get: imageExport} +var imagesSecretCmd = Command{name: "images/{fingerprint}/secret", post: imageSecret} + +var aliasesCmd = Command{name: "images/aliases", post: aliasesPost, get: aliasesGet} + +var aliasCmd = Command{name: "images/aliases/{name:.*}", untrustedGet: true, get: aliasGet, delete: aliasDelete, put: aliasPut, post: aliasPost} === added file 'src/github.com/lxc/lxd/lxd/main.go' --- src/github.com/lxc/lxd/lxd/main.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/main.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,925 @@ +package main + +import ( + "bufio" + "encoding/json" + "fmt" + "io/ioutil" + "math/rand" + "net" + "net/http" + "os" + "os/exec" + "os/signal" + "runtime/pprof" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "golang.org/x/crypto/ssh/terminal" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/gnuflag" + "github.com/lxc/lxd/shared/logging" +) + +// Global arguments +var argAuto = gnuflag.Bool("auto", false, "") +var argCPUProfile = gnuflag.String("cpuprofile", "", "") +var argDebug = gnuflag.Bool("debug", false, "") +var argGroup = gnuflag.String("group", "", "") +var argHelp = gnuflag.Bool("help", false, "") +var argLogfile = gnuflag.String("logfile", "", "") +var argMemProfile = gnuflag.String("memprofile", "", "") +var argNetworkAddress = gnuflag.String("network-address", "", "") +var argNetworkPort = gnuflag.Int("network-port", -1, "") +var argPrintGoroutinesEvery = gnuflag.Int("print-goroutines-every", -1, "") +var argStorageBackend = gnuflag.String("storage-backend", "dir", "") +var argStorageCreateDevice = gnuflag.String("storage-create-device", "", "") +var argStorageCreateLoop = gnuflag.Int("storage-create-loop", -1, "") +var argStoragePool = gnuflag.String("storage-pool", "", "") +var argSyslog = gnuflag.Bool("syslog", false, "") +var argTimeout = gnuflag.Int("timeout", -1, "") +var argTrustPassword = gnuflag.String("trust-password", "", "") +var argVerbose = gnuflag.Bool("verbose", false, "") +var argVersion = gnuflag.Bool("version", false, "") + +// Global variables +var debug bool +var verbose bool + +func init() { + rand.Seed(time.Now().UTC().UnixNano()) +} + +func main() { + if err := run(); err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } +} + +func run() error { + // Our massive custom usage + gnuflag.Usage = func() { + fmt.Printf("Usage: lxd [command] [options]\n") + + fmt.Printf("\nCommands:\n") + fmt.Printf(" activateifneeded\n") + fmt.Printf(" Check if LXD should be started (at boot) and if so, spawns it through socket activation\n") + fmt.Printf(" daemon [--group=lxd] (default command)\n") + fmt.Printf(" Start the main LXD daemon\n") + fmt.Printf(" init [--auto] [--network-address=IP] [--network-port=8443] [--storage-backend=dir]\n") + fmt.Printf(" [--storage-create-device=DEVICE] [--storage-create-loop=SIZE] [--storage-pool=POOL]\n") + fmt.Printf(" [--trust-password=]\n") + fmt.Printf(" Setup storage and networking\n") + fmt.Printf(" shutdown [--timeout=60]\n") + fmt.Printf(" Perform a clean shutdown of LXD and all running containers\n") + fmt.Printf(" waitready [--timeout=15]\n") + fmt.Printf(" Wait until LXD is ready to handle requests\n") + + fmt.Printf("\n\nCommon options:\n") + fmt.Printf(" --debug\n") + fmt.Printf(" Enable debug mode\n") + fmt.Printf(" --help\n") + fmt.Printf(" Print this help message\n") + fmt.Printf(" --logfile FILE\n") + fmt.Printf(" Logfile to log to (e.g., /var/log/lxd/lxd.log)\n") + fmt.Printf(" --syslog\n") + fmt.Printf(" Enable syslog logging\n") + fmt.Printf(" --verbose\n") + fmt.Printf(" Enable verbose mode\n") + fmt.Printf(" --version\n") + fmt.Printf(" Print LXD's version number and exit\n") + + fmt.Printf("\nDaemon options:\n") + fmt.Printf(" --group GROUP\n") + fmt.Printf(" Group which owns the shared socket\n") + + fmt.Printf("\nDaemon debug options:\n") + fmt.Printf(" --cpuprofile FILE\n") + fmt.Printf(" Enable cpu profiling into the specified file\n") + fmt.Printf(" --memprofile FILE\n") + fmt.Printf(" Enable memory profiling into the specified file\n") + fmt.Printf(" --print-goroutines-every SECONDS\n") + fmt.Printf(" For debugging, print a complete stack trace every n seconds\n") + + fmt.Printf("\nInit options:\n") + fmt.Printf(" --auto\n") + fmt.Printf(" Automatic (non-interactive) mode\n") + fmt.Printf(" --network-address ADDRESS\n") + fmt.Printf(" Address to bind LXD to (default: none)\n") + fmt.Printf(" --network-port PORT\n") + fmt.Printf(" Port to bind LXD to (default: 8443)\n") + fmt.Printf(" --storage-backend NAME\n") + fmt.Printf(" Storage backend to use (zfs or dir, default: dir)\n") + fmt.Printf(" --storage-create-device DEVICE\n") + fmt.Printf(" Setup device based storage using DEVICE\n") + fmt.Printf(" --storage-create-loop SIZE\n") + fmt.Printf(" Setup loop based storage with SIZE in GB\n") + fmt.Printf(" --storage-pool NAME\n") + fmt.Printf(" Storage pool to use or create\n") + fmt.Printf(" --trust-password PASSWORD\n") + fmt.Printf(" Password required to add new clients\n") + + fmt.Printf("\nShutdown options:\n") + fmt.Printf(" --timeout SECONDS\n") + fmt.Printf(" How long to wait before failing\n") + + fmt.Printf("\nWaitready options:\n") + fmt.Printf(" --timeout SECONDS\n") + fmt.Printf(" How long to wait before failing\n") + + fmt.Printf("\n\nInternal commands (don't call these directly):\n") + fmt.Printf(" forkgetnet\n") + fmt.Printf(" Get container network information\n") + fmt.Printf(" forkgetfile\n") + fmt.Printf(" Grab a file from a running container\n") + fmt.Printf(" forkmigrate\n") + fmt.Printf(" Restore a container after migration\n") + fmt.Printf(" forkputfile\n") + fmt.Printf(" Push a file to a running container\n") + fmt.Printf(" forkstart\n") + fmt.Printf(" Start a container\n") + fmt.Printf(" callhook\n") + fmt.Printf(" Call a container hook\n") + } + + // Parse the arguments + gnuflag.Parse(true) + + // Set the global variables + debug = *argDebug + verbose = *argVerbose + + if *argHelp { + // The user asked for help via --help, so we shouldn't print to + // stderr. + gnuflag.SetOut(os.Stdout) + gnuflag.Usage() + return nil + } + + // Deal with --version right here + if *argVersion { + fmt.Println(shared.Version) + return nil + } + + if len(shared.VarPath("unix.sock")) > 107 { + return fmt.Errorf("LXD_DIR is too long, must be < %d", 107-len("unix.sock")) + } + + // Configure logging + syslog := "" + if *argSyslog { + syslog = "lxd" + } + + handler := eventsHandler{} + var err error + shared.Log, err = logging.GetLogger(syslog, *argLogfile, *argVerbose, *argDebug, handler) + if err != nil { + fmt.Printf("%s", err) + return nil + } + + // Process sub-commands + if len(os.Args) > 1 { + // "forkputfile", "forkgetfile", "forkmount" and "forkumount" are handled specially in nsexec.go + // "forkgetnet" is partially handled in nsexec.go (setns) + switch os.Args[1] { + case "activateifneeded": + return activateIfNeeded() + case "daemon": + return daemon() + case "forkgetnet": + return printnet() + case "forkmigrate": + return MigrateContainer(os.Args[1:]) + case "forkstart": + return startContainer(os.Args[1:]) + case "callhook": + return callHook(os.Args[1:]) + case "init": + return setupLXD() + case "shutdown": + return cleanShutdown() + case "waitready": + return waitReady() + } + } + + // Fail if some other command is passed + if gnuflag.NArg() > 0 { + gnuflag.Usage() + return fmt.Errorf("Unknown arguments") + } + + return daemon() +} + +func callHook(args []string) error { + if len(args) < 4 { + return fmt.Errorf("Invalid arguments") + } + + path := args[1] + id := args[2] + state := args[3] + target := "" + + err := os.Setenv("LXD_DIR", path) + if err != nil { + return err + } + + c, err := lxd.NewClient(&lxd.DefaultConfig, "local") + if err != nil { + return err + } + + url := fmt.Sprintf("%s/internal/containers/%s/on%s", c.BaseURL, id, state) + + if state == "stop" { + target = os.Getenv("LXC_TARGET") + if target == "" { + target = "unknown" + } + url = fmt.Sprintf("%s?target=%s", url, target) + } + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return err + } + + hook := make(chan error, 1) + go func() { + raw, err := c.Http.Do(req) + if err != nil { + hook <- err + return + } + + _, err = lxd.HoistResponse(raw, lxd.Sync) + if err != nil { + hook <- err + return + } + + hook <- nil + }() + + select { + case err := <-hook: + if err != nil { + return err + } + break + case <-time.After(30 * time.Second): + return fmt.Errorf("Hook didn't finish within 30s") + } + + if target == "reboot" { + return fmt.Errorf("Reboot must be handled by LXD.") + } + + return nil +} + +func daemon() error { + if *argCPUProfile != "" { + f, err := os.Create(*argCPUProfile) + if err != nil { + fmt.Printf("Error opening cpu profile file: %s\n", err) + return nil + } + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + if *argMemProfile != "" { + go memProfiler(*argMemProfile) + } + + neededPrograms := []string{"setfacl", "rsync", "tar", "xz"} + for _, p := range neededPrograms { + _, err := exec.LookPath(p) + if err != nil { + return err + } + } + + if *argPrintGoroutinesEvery > 0 { + go func() { + for { + time.Sleep(time.Duration(*argPrintGoroutinesEvery) * time.Second) + shared.PrintStack() + } + }() + } + + d, err := startDaemon(*argGroup) + + if err != nil { + if d != nil && d.db != nil { + d.db.Close() + } + return err + } + + var ret error + var wg sync.WaitGroup + wg.Add(1) + + go func() { + ch := make(chan os.Signal) + signal.Notify(ch, syscall.SIGPWR) + sig := <-ch + + shared.Log.Info( + fmt.Sprintf("Received '%s signal', shutting down containers.", sig)) + + containersShutdown(d) + + ret = d.Stop() + wg.Done() + }() + + go func() { + <-d.shutdownChan + + shared.Log.Info( + fmt.Sprintf("Asked to shutdown by API, shutting down containers.")) + + containersShutdown(d) + + ret = d.Stop() + wg.Done() + }() + + go func() { + ch := make(chan os.Signal) + signal.Notify(ch, syscall.SIGINT) + signal.Notify(ch, syscall.SIGQUIT) + signal.Notify(ch, syscall.SIGTERM) + sig := <-ch + + shared.Log.Info(fmt.Sprintf("Received '%s signal', exiting.", sig)) + ret = d.Stop() + wg.Done() + }() + + wg.Wait() + return ret +} + +func cleanShutdown() error { + var timeout int + + if *argTimeout == -1 { + timeout = 60 + } else { + timeout = *argTimeout + } + + c, err := lxd.NewClient(&lxd.DefaultConfig, "local") + if err != nil { + return err + } + + req, err := http.NewRequest("PUT", c.BaseURL+"/internal/shutdown", nil) + if err != nil { + return err + } + + _, err = c.Http.Do(req) + if err != nil { + return err + } + + monitor := make(chan error, 1) + go func() { + monitor <- c.Monitor(nil, func(m interface{}) {}) + }() + + select { + case <-monitor: + break + case <-time.After(time.Second * time.Duration(timeout)): + return fmt.Errorf("LXD still running after %ds timeout.", timeout) + } + + return nil +} + +func activateIfNeeded() error { + // Don't start a full daemon, we just need DB access + d := &Daemon{ + IsMock: false, + imagesDownloading: map[string]chan bool{}, + imagesDownloadingLock: sync.RWMutex{}, + } + + err := initializeDbObject(d, shared.VarPath("lxd.db")) + if err != nil { + return err + } + + // Look for network socket + value, err := d.ConfigValueGet("core.https_address") + if err != nil { + return err + } + + if value != "" { + shared.Debugf("Daemon has core.https_address set, activating...") + _, err := lxd.NewClient(&lxd.DefaultConfig, "local") + return err + } + + // Look for auto-started or previously started containers + d.IdmapSet, err = shared.DefaultIdmapSet() + if err != nil { + return err + } + + result, err := dbContainersList(d.db, cTypeRegular) + if err != nil { + return err + } + + for _, name := range result { + c, err := containerLoadByName(d, name) + if err != nil { + return err + } + + config := c.ExpandedConfig() + lastState := config["volatile.last_state.power"] + autoStart := config["boot.autostart"] + + if lastState == "RUNNING" || lastState == "Running" || autoStart == "true" { + shared.Debugf("Daemon has auto-started containers, activating...") + _, err := lxd.NewClient(&lxd.DefaultConfig, "local") + return err + } + } + + shared.Debugf("No need to start the daemon now.") + return nil +} + +func waitReady() error { + var timeout int + + if *argTimeout == -1 { + timeout = 15 + } else { + timeout = *argTimeout + } + + finger := make(chan error, 1) + go func() { + for { + _, err := lxd.NewClient(&lxd.DefaultConfig, "local") + if err != nil { + time.Sleep(500 * time.Millisecond) + continue + } + + finger <- nil + return + } + }() + + select { + case <-finger: + break + case <-time.After(time.Second * time.Duration(timeout)): + return fmt.Errorf("LXD still not running after %ds timeout.", timeout) + } + + return nil +} + +func setupLXD() error { + var storageBackend string // dir or zfs + var storageMode string // existing, loop or device + var storageLoopSize int // Size in GB + var storageDevice string // Path + var storagePool string // pool name + var networkAddress string // Address + var networkPort int // Port + var trustPassword string // Trust password + + // Only root should run this + if os.Geteuid() != 0 { + return fmt.Errorf("This must be run as root") + } + + backendsAvailable := []string{"dir"} + backendsSupported := []string{"dir", "zfs"} + + // Detect zfs + out, err := exec.LookPath("zfs") + if err == nil && len(out) != 0 { + backendsAvailable = append(backendsAvailable, "zfs") + } + + reader := bufio.NewReader(os.Stdin) + + askBool := func(question string) bool { + for { + fmt.Printf(question) + input, _ := reader.ReadString('\n') + input = strings.TrimSuffix(input, "\n") + if shared.StringInSlice(strings.ToLower(input), []string{"yes", "y", "true"}) { + return true + } else if shared.StringInSlice(strings.ToLower(input), []string{"no", "n", "false"}) { + return false + } + + fmt.Printf("Invalid input, try again.\n\n") + } + } + + askChoice := func(question string, choices []string) string { + for { + fmt.Printf(question) + input, _ := reader.ReadString('\n') + input = strings.TrimSuffix(input, "\n") + if shared.StringInSlice(input, choices) { + return input + } + + fmt.Printf("Invalid input, try again.\n\n") + } + } + + askInt := func(question string, min int, max int) int { + for { + fmt.Printf(question) + input, _ := reader.ReadString('\n') + input = strings.TrimSuffix(input, "\n") + intInput, err := strconv.Atoi(input) + + if err == nil && (min == -1 || intInput >= min) && (max == -1 || intInput <= max) { + return intInput + } + + fmt.Printf("Invalid input, try again.\n\n") + } + } + + askString := func(question string) string { + for { + fmt.Printf(question) + input, _ := reader.ReadString('\n') + input = strings.TrimSuffix(input, "\n") + if len(input) != 0 { + return input + } + + fmt.Printf("Invalid input, try again.\n\n") + } + } + + askPassword := func(question string) string { + for { + fmt.Printf(question) + pwd, _ := terminal.ReadPassword(0) + fmt.Printf("\n") + inFirst := string(pwd) + inFirst = strings.TrimSuffix(inFirst, "\n") + + fmt.Printf("Again: ") + pwd, _ = terminal.ReadPassword(0) + fmt.Printf("\n") + inSecond := string(pwd) + inSecond = strings.TrimSuffix(inSecond, "\n") + + if len(inFirst) != 0 && inFirst == inSecond { + return inFirst + } + + fmt.Printf("Invalid input, try again.\n\n") + } + } + + // Confirm that LXD is online + c, err := lxd.NewClient(&lxd.DefaultConfig, "local") + if err != nil { + return fmt.Errorf("Unable to talk to LXD: %s", err) + } + + // Check that we have no containers or images in the store + containers, err := c.ListContainers() + if err != nil { + return fmt.Errorf("Unable to list the LXD containers: %s", err) + } + + images, err := c.ListImages() + if err != nil { + return fmt.Errorf("Unable to list the LXD images: %s", err) + } + + if len(containers) > 0 || len(images) > 0 { + return fmt.Errorf("You have existing containers or images. lxd init requires an empty LXD.") + } + + if *argAuto { + // Do a bunch of sanity checks + if !shared.StringInSlice(*argStorageBackend, backendsSupported) { + return fmt.Errorf("The requested backend '%s' isn't supported by lxd init.", *argStorageBackend) + } + + if !shared.StringInSlice(*argStorageBackend, backendsAvailable) { + return fmt.Errorf("The requested backend '%s' isn't available on your system (missing tools).", *argStorageBackend) + } + + if *argStorageBackend == "dir" { + if *argStorageCreateLoop != -1 || *argStorageCreateDevice != "" || *argStoragePool != "" { + return fmt.Errorf("None of --storage-pool, --storage-create-device or --storage-create-pool may be used with the 'dir' backend.") + } + } + + if *argStorageBackend == "zfs" { + if *argStorageCreateLoop != -1 && *argStorageCreateDevice != "" { + return fmt.Errorf("Only one of --storage-create-device or --storage-create-pool can be specified with the 'zfs' backend.") + } + + if *argStoragePool == "" { + return fmt.Errorf("--storage-pool must be specified with the 'zfs' backend.") + } + } + + if *argNetworkAddress == "" { + if *argNetworkPort != -1 { + return fmt.Errorf("--network-port cannot be used without --network-address.") + } + if *argTrustPassword != "" { + return fmt.Errorf("--trust-password cannot be used without --network-address.") + } + } + + // Set the local variables + if *argStorageCreateDevice != "" { + storageMode = "device" + } else if *argStorageCreateLoop != -1 { + storageMode = "loop" + } else { + storageMode = "existing" + } + + storageBackend = *argStorageBackend + storageLoopSize = *argStorageCreateLoop + storageDevice = *argStorageCreateDevice + storagePool = *argStoragePool + networkAddress = *argNetworkAddress + networkPort = *argNetworkPort + trustPassword = *argTrustPassword + } else { + storageBackend = askChoice("Name of the storage backend to use (dir or zfs): ", backendsSupported) + + if !shared.StringInSlice(storageBackend, backendsSupported) { + return fmt.Errorf("The requested backend '%s' isn't supported by lxd init.", storageBackend) + } + + if !shared.StringInSlice(storageBackend, backendsAvailable) { + return fmt.Errorf("The requested backend '%s' isn't available on your system (missing tools).", storageBackend) + } + + if storageBackend == "zfs" { + if askBool("Create a new ZFS pool (yes/no)? ") { + storagePool = askString("Name of the new ZFS pool: ") + if askBool("Would you like to use an existing block device (yes/no)? ") { + storageDevice = askString("Path to the existing block device: ") + storageMode = "device" + } else { + storageLoopSize = askInt("Size in GB of the new loop device (1GB minimum): ", 1, -1) + storageMode = "loop" + } + } else { + storagePool = askString("Name of the existing ZFS pool or dataset: ") + storageMode = "existing" + } + } + + if askBool("Would you like LXD to be available over the network (yes/no)? ") { + networkAddress = askString("Address to bind LXD to: ") + networkPort = askInt("Port to bind LXD to: ", 1, 65535) + trustPassword = askPassword("Trust password for new clients: ") + } + } + + if !shared.StringInSlice(storageBackend, []string{"dir", "zfs"}) { + return fmt.Errorf("Invalid storage backend: %s", storageBackend) + } + + // Unset all storage keys, core.https_address and core.trust_password + for _, key := range []string{"core.https_address", "core.trust_password"} { + _, err = c.SetServerConfig(key, "") + if err != nil { + return err + } + } + + // Destroy any existing loop device + for _, file := range []string{"zfs.img"} { + os.Remove(shared.VarPath(file)) + } + + if storageBackend == "zfs" { + _ = exec.Command("modprobe", "zfs").Run() + + if storageMode == "loop" { + storageDevice = shared.VarPath("zfs.img") + f, err := os.Create(storageDevice) + if err != nil { + return fmt.Errorf("Failed to open %s: %s", storageDevice, err) + } + + err = f.Truncate(int64(storageLoopSize * 1024 * 1024 * 1024)) + if err != nil { + return fmt.Errorf("Failed to create sparse file %s: %s", storageDevice, err) + } + + err = f.Close() + if err != nil { + return fmt.Errorf("Failed to close %s: %s", storageDevice, err) + } + } + + if shared.StringInSlice(storageMode, []string{"loop", "device"}) { + output, err := exec.Command( + "zpool", + "create", storagePool, storageDevice, + "-f", "-m", "none").CombinedOutput() + if err != nil { + return fmt.Errorf("Failed to create the ZFS pool: %s", output) + } + } + + // Configure LXD to use the pool + _, err = c.SetServerConfig("storage.zfs_pool_name", storagePool) + if err != nil { + return err + } + } + + if networkAddress != "" { + _, err = c.SetServerConfig("core.https_address", fmt.Sprintf("%s:%d", networkAddress, networkPort)) + if err != nil { + return err + } + + if trustPassword != "" { + _, err = c.SetServerConfig("core.trust_password", trustPassword) + if err != nil { + return err + } + } + } + + fmt.Printf("LXD has been successfully configured.\n") + return nil +} + +func printnet() error { + networks := map[string]shared.ContainerStateNetwork{} + + interfaces, err := net.Interfaces() + if err != nil { + return err + } + + stats := map[string][]int64{} + + content, err := ioutil.ReadFile("/proc/net/dev") + if err == nil { + for _, line := range strings.Split(string(content), "\n") { + fields := strings.Fields(line) + + if len(fields) != 17 { + continue + } + + rxBytes, err := strconv.ParseInt(fields[1], 10, 64) + if err != nil { + continue + } + + rxPackets, err := strconv.ParseInt(fields[2], 10, 64) + if err != nil { + continue + } + + txBytes, err := strconv.ParseInt(fields[9], 10, 64) + if err != nil { + continue + } + + txPackets, err := strconv.ParseInt(fields[10], 10, 64) + if err != nil { + continue + } + + intName := strings.TrimSuffix(fields[0], ":") + stats[intName] = []int64{rxBytes, rxPackets, txBytes, txPackets} + } + } + + for _, netIf := range interfaces { + netState := "down" + netType := "unknown" + + if netIf.Flags&net.FlagBroadcast > 0 { + netType = "broadcast" + } + + if netIf.Flags&net.FlagPointToPoint > 0 { + netType = "point-to-point" + } + + if netIf.Flags&net.FlagLoopback > 0 { + netType = "loopback" + } + + if netIf.Flags&net.FlagUp > 0 { + netState = "up" + } + + network := shared.ContainerStateNetwork{ + Addresses: []shared.ContainerStateNetworkAddress{}, + Counters: shared.ContainerStateNetworkCounters{}, + Hwaddr: netIf.HardwareAddr.String(), + Mtu: netIf.MTU, + State: netState, + Type: netType, + } + + addrs, err := netIf.Addrs() + if err == nil { + for _, addr := range addrs { + fields := strings.SplitN(addr.String(), "/", 2) + if len(fields) != 2 { + continue + } + + family := "inet" + if strings.Contains(fields[0], ":") { + family = "inet6" + } + + scope := "global" + if strings.HasPrefix(fields[0], "127") { + scope = "local" + } + + if fields[0] == "::1" { + scope = "local" + } + + if strings.HasPrefix(fields[0], "169.254") { + scope = "link" + } + + if strings.HasPrefix(fields[0], "fe80:") { + scope = "link" + } + + address := shared.ContainerStateNetworkAddress{} + address.Family = family + address.Address = fields[0] + address.Netmask = fields[1] + address.Scope = scope + + network.Addresses = append(network.Addresses, address) + } + } + + counters, ok := stats[netIf.Name] + if ok { + network.Counters.BytesReceived = counters[0] + network.Counters.PacketsReceived = counters[1] + network.Counters.BytesSent = counters[2] + network.Counters.PacketsSent = counters[3] + } + + networks[netIf.Name] = network + } + + buf, err := json.Marshal(networks) + if err != nil { + return err + } + + fmt.Printf("%s\n", buf) + + return nil +} === added file 'src/github.com/lxc/lxd/lxd/main_test.go' --- src/github.com/lxc/lxd/lxd/main_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/main_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,73 @@ +package main + +import ( + "io/ioutil" + "os" + "sync" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +func mockStartDaemon() (*Daemon, error) { + d := &Daemon{ + IsMock: true, + imagesDownloading: map[string]chan bool{}, + imagesDownloadingLock: sync.RWMutex{}, + } + + if err := d.Init(); err != nil { + return nil, err + } + + // Call this after Init so we have a log object. + storageConfig := make(map[string]interface{}) + d.Storage = &storageLogWrapper{w: &storageMock{d: d}} + if _, err := d.Storage.Init(storageConfig); err != nil { + return nil, err + } + + return d, nil +} + +type lxdTestSuite struct { + suite.Suite + d *Daemon + Req *require.Assertions + tmpdir string +} + +func (suite *lxdTestSuite) SetupSuite() { + tmpdir, err := ioutil.TempDir("", "lxd_testrun_") + if err != nil { + os.Exit(1) + } + suite.tmpdir = tmpdir + + if err := os.Setenv("LXD_DIR", suite.tmpdir); err != nil { + os.Exit(1) + } + + suite.d, err = mockStartDaemon() + if err != nil { + os.Exit(1) + } +} + +func (suite *lxdTestSuite) TearDownSuite() { + suite.d.Stop() + + err := os.RemoveAll(suite.tmpdir) + if err != nil { + os.Exit(1) + } +} + +func (suite *lxdTestSuite) SetupTest() { + suite.Req = require.New(suite.T()) +} + +func TestLxdTestSuite(t *testing.T) { + suite.Run(t, new(lxdTestSuite)) +} === added file 'src/github.com/lxc/lxd/lxd/migrate.go' --- src/github.com/lxc/lxd/lxd/migrate.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/migrate.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,725 @@ +// Package migration provides the primitives for migration in LXD. +// +// See https://github.com/lxc/lxd/blob/master/specs/migration.md for a complete +// description. + +package main + +import ( + "bufio" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "github.com/gorilla/websocket" + "gopkg.in/lxc/go-lxc.v2" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" +) + +type migrationFields struct { + live bool + + controlSecret string + controlConn *websocket.Conn + controlLock sync.Mutex + + criuSecret string + criuConn *websocket.Conn + + fsSecret string + fsConn *websocket.Conn + + container container +} + +func (c *migrationFields) send(m proto.Message) error { + /* gorilla websocket doesn't allow concurrent writes, and + * panic()s if it sees them (which is reasonable). If e.g. we + * happen to fail, get scheduled, start our write, then get + * unscheduled before the write is bit to a new thread which is + * receiving an error from the other side (due to our previous + * close), we can engage in these concurrent writes, which + * casuses the whole daemon to panic. + * + * Instead, let's lock sends to the controlConn so that we only ever + * write one message at the time. + */ + c.controlLock.Lock() + defer c.controlLock.Unlock() + w, err := c.controlConn.NextWriter(websocket.BinaryMessage) + if err != nil { + return err + } + defer w.Close() + + data, err := proto.Marshal(m) + if err != nil { + return err + } + + return shared.WriteAll(w, data) +} + +func findCriu(host string) error { + _, err := exec.LookPath("criu") + if err != nil { + return fmt.Errorf("CRIU is required for live migration but its binary couldn't be found on the %s server. Is it installed in LXD's path?", host) + } + + return nil +} + +func (c *migrationFields) recv(m proto.Message) error { + mt, r, err := c.controlConn.NextReader() + if err != nil { + return err + } + + if mt != websocket.BinaryMessage { + return fmt.Errorf("Only binary messages allowed") + } + + buf, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + return proto.Unmarshal(buf, m) +} + +func (c *migrationFields) disconnect() { + closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") + + c.controlLock.Lock() + if c.controlConn != nil { + c.controlConn.WriteMessage(websocket.CloseMessage, closeMsg) + c.controlConn = nil /* don't close twice */ + } + c.controlLock.Unlock() + + /* Below we just Close(), which doesn't actually write to the + * websocket, it just closes the underlying connection. If e.g. there + * is still a filesystem transfer going on, but the other side has run + * out of disk space, writing an actual CloseMessage here will cause + * gorilla websocket to panic. Instead, we just force close this + * connection, since we report the error over the control channel + * anyway. + */ + if c.fsConn != nil { + c.fsConn.Close() + } + + if c.criuConn != nil { + c.criuConn.Close() + } +} + +func (c *migrationFields) sendControl(err error) { + message := "" + if err != nil { + message = err.Error() + } + + msg := MigrationControl{ + Success: proto.Bool(err == nil), + Message: proto.String(message), + } + c.send(&msg) + + if err != nil { + c.disconnect() + } +} + +func (c *migrationFields) controlChannel() <-chan MigrationControl { + ch := make(chan MigrationControl) + go func() { + msg := MigrationControl{} + err := c.recv(&msg) + if err != nil { + shared.Debugf("Got error reading migration control socket %s", err) + close(ch) + return + } + ch <- msg + }() + + return ch +} + +func CollectCRIULogFile(c container, imagesDir string, function string, method string) error { + t := time.Now().Format(time.RFC3339) + newPath := shared.LogPath(c.Name(), fmt.Sprintf("%s_%s_%s.log", function, method, t)) + return shared.FileCopy(filepath.Join(imagesDir, fmt.Sprintf("%s.log", method)), newPath) +} + +func GetCRIULogErrors(imagesDir string, method string) (string, error) { + f, err := os.Open(path.Join(imagesDir, fmt.Sprintf("%s.log", method))) + if err != nil { + return "", err + } + + defer f.Close() + + scanner := bufio.NewScanner(f) + ret := []string{} + for scanner.Scan() { + line := scanner.Text() + if strings.Contains(line, "Error") { + ret = append(ret, scanner.Text()) + } + } + + return strings.Join(ret, "\n"), nil +} + +type migrationSourceWs struct { + migrationFields + + allConnected chan bool +} + +func NewMigrationSource(c container) (*migrationSourceWs, error) { + ret := migrationSourceWs{migrationFields{container: c}, make(chan bool, 1)} + + var err error + ret.controlSecret, err = shared.RandomCryptoString() + if err != nil { + return nil, err + } + + ret.fsSecret, err = shared.RandomCryptoString() + if err != nil { + return nil, err + } + + if c.IsRunning() { + if err := findCriu("source"); err != nil { + return nil, err + } + + ret.live = true + ret.criuSecret, err = shared.RandomCryptoString() + if err != nil { + return nil, err + } + } + + return &ret, nil +} + +func (s *migrationSourceWs) Metadata() interface{} { + secrets := shared.Jmap{ + "control": s.controlSecret, + "fs": s.fsSecret, + } + + if s.criuSecret != "" { + secrets["criu"] = s.criuSecret + } + + return secrets +} + +func (s *migrationSourceWs) Connect(op *operation, r *http.Request, w http.ResponseWriter) error { + secret := r.FormValue("secret") + if secret == "" { + return fmt.Errorf("missing secret") + } + + var conn **websocket.Conn + + switch secret { + case s.controlSecret: + conn = &s.controlConn + case s.criuSecret: + conn = &s.criuConn + case s.fsSecret: + conn = &s.fsConn + default: + /* If we didn't find the right secret, the user provided a bad one, + * which 403, not 404, since this operation actually exists */ + return os.ErrPermission + } + + c, err := shared.WebsocketUpgrader.Upgrade(w, r, nil) + if err != nil { + return err + } + + *conn = c + + if s.controlConn != nil && (!s.live || s.criuConn != nil) && s.fsConn != nil { + s.allConnected <- true + } + + return nil +} + +func (s *migrationSourceWs) Do(op *operation) error { + <-s.allConnected + + criuType := CRIUType_CRIU_RSYNC.Enum() + if !s.live { + criuType = nil + + err := s.container.StorageStart() + if err != nil { + return err + } + + defer s.container.StorageStop() + } + + idmaps := make([]*IDMapType, 0) + + idmapset := s.container.IdmapSet() + if idmapset != nil { + for _, ctnIdmap := range idmapset.Idmap { + idmap := IDMapType{ + Isuid: proto.Bool(ctnIdmap.Isuid), + Isgid: proto.Bool(ctnIdmap.Isgid), + Hostid: proto.Int(ctnIdmap.Hostid), + Nsid: proto.Int(ctnIdmap.Nsid), + Maprange: proto.Int(ctnIdmap.Maprange), + } + + idmaps = append(idmaps, &idmap) + } + } + + sources, fsErr := s.container.Storage().MigrationSource(s.container) + /* the protocol says we have to send a header no matter what, so let's + * do that, but then immediately send an error. + */ + snapshots := []string{} + if fsErr == nil { + /* A bit of a special case here: doing lxc launch + * host2:c1/snap1 host1:container we're sending a snapshot, but + * it ends up as the container on the other end. So, we want to + * send it as the main container (i.e. ignore its IsSnapshot()). + */ + if len(sources) > 1 { + for _, snap := range sources { + if !snap.IsSnapshot() { + continue + } + name := shared.ExtractSnapshotName(snap.Name()) + snapshots = append(snapshots, name) + } + } + } + + myType := s.container.Storage().MigrationType() + header := MigrationHeader{ + Fs: &myType, + Criu: criuType, + Idmap: idmaps, + Snapshots: snapshots, + } + + if err := s.send(&header); err != nil { + s.sendControl(err) + return err + } + + if fsErr != nil { + s.sendControl(fsErr) + return fsErr + } + + if err := s.recv(&header); err != nil { + s.sendControl(err) + return err + } + + if *header.Fs != myType { + myType = MigrationFSType_RSYNC + header.Fs = &myType + + sources, _ = rsyncMigrationSource(s.container) + } + + if s.live { + if header.Criu == nil { + err := fmt.Errorf("Got no CRIU socket type for live migration") + s.sendControl(err) + return err + } else if *header.Criu != CRIUType_CRIU_RSYNC { + err := fmt.Errorf("Formats other than criu rsync not understood") + s.sendControl(err) + return err + } + + checkpointDir, err := ioutil.TempDir("", "lxd_checkpoint_") + if err != nil { + s.sendControl(err) + return err + } + defer os.RemoveAll(checkpointDir) + + opts := lxc.CheckpointOptions{Stop: true, Directory: checkpointDir, Verbose: true} + err = s.container.Checkpoint(opts) + + if err2 := CollectCRIULogFile(s.container, checkpointDir, "migration", "dump"); err2 != nil { + shared.Debugf("Error collecting checkpoint log file %s", err) + } + + if err != nil { + log, err2 := GetCRIULogErrors(checkpointDir, "dump") + + /* couldn't find the CRIU log file which means we + * didn't even get that far; give back the liblxc + * error. */ + if err2 != nil { + log = err.Error() + } + + err = fmt.Errorf("checkpoint failed:\n%s", log) + s.sendControl(err) + return err + } + + /* + * We do the serially right now, but there's really no reason for us + * to; since we have separate websockets, we can do it in parallel if + * we wanted to. However, assuming we're network bound, there's really + * no reason to do these in parallel. In the future when we're using + * p.haul's protocol, it will make sense to do these in parallel. + */ + if err := RsyncSend(shared.AddSlash(checkpointDir), s.criuConn); err != nil { + s.sendControl(err) + return err + } + } + + for _, source := range sources { + shared.Debugf("sending fs object %s", source.Name()) + if err := source.Send(s.fsConn); err != nil { + s.sendControl(err) + return err + } + } + + msg := MigrationControl{} + if err := s.recv(&msg); err != nil { + s.disconnect() + return err + } + + // TODO: should we add some config here about automatically restarting + // the container migrate failure? What about the failures above? + if !*msg.Success { + return fmt.Errorf(*msg.Message) + } + + return nil +} + +type migrationSink struct { + migrationFields + + url string + dialer websocket.Dialer +} + +type MigrationSinkArgs struct { + Url string + Dialer websocket.Dialer + Container container + Secrets map[string]string +} + +func NewMigrationSink(args *MigrationSinkArgs) (func() error, error) { + sink := migrationSink{ + migrationFields{container: args.Container}, + args.Url, + args.Dialer, + } + + var ok bool + sink.controlSecret, ok = args.Secrets["control"] + if !ok { + return nil, fmt.Errorf("Missing control secret") + } + + sink.fsSecret, ok = args.Secrets["fs"] + if !ok { + return nil, fmt.Errorf("Missing fs secret") + } + + sink.criuSecret, ok = args.Secrets["criu"] + sink.live = ok + + if err := findCriu("destination"); sink.live && err != nil { + return nil, err + } + + return sink.do, nil +} + +func (c *migrationSink) connectWithSecret(secret string) (*websocket.Conn, error) { + query := url.Values{"secret": []string{secret}} + + // The URL is a https URL to the operation, mangle to be a wss URL to the secret + wsUrl := fmt.Sprintf("wss://%s/websocket?%s", strings.TrimPrefix(c.url, "https://"), query.Encode()) + + return lxd.WebsocketDial(c.dialer, wsUrl) +} + +func (c *migrationSink) do() error { + var err error + c.controlConn, err = c.connectWithSecret(c.controlSecret) + if err != nil { + return err + } + defer c.disconnect() + + c.fsConn, err = c.connectWithSecret(c.fsSecret) + if err != nil { + c.sendControl(err) + return err + } + + if c.live { + c.criuConn, err = c.connectWithSecret(c.criuSecret) + if err != nil { + c.sendControl(err) + return err + } + } + + header := MigrationHeader{} + if err := c.recv(&header); err != nil { + c.sendControl(err) + return err + } + + criuType := CRIUType_CRIU_RSYNC.Enum() + if !c.live { + criuType = nil + } + + mySink := c.container.Storage().MigrationSink + myType := c.container.Storage().MigrationType() + resp := MigrationHeader{ + Fs: &myType, + Criu: criuType, + } + + // If the storage type the source has doesn't match what we have, then + // we have to use rsync. + if *header.Fs != *resp.Fs { + mySink = rsyncMigrationSink + myType = MigrationFSType_RSYNC + resp.Fs = &myType + } + + if err := c.send(&resp); err != nil { + c.sendControl(err) + return err + } + + restore := make(chan error) + go func(c *migrationSink) { + imagesDir := "" + srcIdmap := new(shared.IdmapSet) + + if c.live { + var err error + imagesDir, err = ioutil.TempDir("", "lxd_restore_") + if err != nil { + os.RemoveAll(imagesDir) + c.sendControl(err) + return + } + + defer func() { + err := CollectCRIULogFile(c.container, imagesDir, "migration", "restore") + /* + * If the checkpoint fails, we won't have any log to collect, + * so don't warn about that. + */ + if err != nil && !os.IsNotExist(err) { + shared.Debugf("Error collectiong migration log file %s", err) + } + + os.RemoveAll(imagesDir) + }() + + if err := RsyncRecv(shared.AddSlash(imagesDir), c.criuConn); err != nil { + restore <- err + os.RemoveAll(imagesDir) + c.sendControl(err) + return + } + + /* + * For unprivileged containers we need to shift the + * perms on the images images so that they can be + * opened by the process after it is in its user + * namespace. + */ + if !c.container.IsPrivileged() { + if err := c.container.IdmapSet().ShiftRootfs(imagesDir); err != nil { + restore <- err + os.RemoveAll(imagesDir) + c.sendControl(err) + return + } + } + } + + snapshots := []container{} + for _, snap := range header.Snapshots { + // TODO: we need to propagate snapshot configurations + // as well. Right now the container configuration is + // done through the initial migration post. Should we + // post the snapshots and their configs as well, or do + // it some other way? + name := c.container.Name() + shared.SnapshotDelimiter + snap + args := containerArgs{ + Ctype: cTypeSnapshot, + Config: c.container.LocalConfig(), + Profiles: c.container.Profiles(), + Ephemeral: c.container.IsEphemeral(), + Architecture: c.container.Architecture(), + Devices: c.container.LocalDevices(), + Name: name, + } + + ct, err := containerCreateEmptySnapshot(c.container.Daemon(), args) + if err != nil { + restore <- err + c.sendControl(err) + return + } + snapshots = append(snapshots, ct) + } + + for _, idmap := range header.Idmap { + e := shared.IdmapEntry{ + Isuid: *idmap.Isuid, + Isgid: *idmap.Isgid, + Nsid: int(*idmap.Nsid), + Hostid: int(*idmap.Hostid), + Maprange: int(*idmap.Maprange)} + srcIdmap.Idmap = shared.Extend(srcIdmap.Idmap, e) + } + + if err := mySink(c.container, snapshots, c.fsConn); err != nil { + restore <- err + c.sendControl(err) + return + } + + if err := ShiftIfNecessary(c.container, srcIdmap); err != nil { + restore <- err + c.sendControl(err) + return + } + + for _, snap := range snapshots { + if err := ShiftIfNecessary(snap, srcIdmap); err != nil { + restore <- err + c.sendControl(err) + return + } + } + + if c.live { + err := c.container.StartFromMigration(imagesDir) + if err != nil { + log, err2 := GetCRIULogErrors(imagesDir, "restore") + /* restore failed before CRIU was invoked, give + * back the liblxc error */ + if err2 != nil { + log = err.Error() + } + err = fmt.Errorf("restore failed:\n%s", log) + } + + restore <- err + } else { + restore <- nil + } + }(c) + + source := c.controlChannel() + + for { + select { + case err = <-restore: + c.sendControl(err) + return err + case msg, ok := <-source: + if !ok { + c.disconnect() + return fmt.Errorf("Got error reading source") + } + if !*msg.Success { + c.disconnect() + return fmt.Errorf(*msg.Message) + } else { + // The source can only tell us it failed (e.g. if + // checkpointing failed). We have to tell the source + // whether or not the restore was successful. + shared.Debugf("Unknown message %v from source", msg) + } + } + } +} + +/* + * Similar to forkstart, this is called when lxd is invoked as: + * + * lxd forkmigrate + * + * liblxc's restore() sets up the processes in such a way that the monitor ends + * up being a child of the process that calls it, in our case lxd. However, we + * really want the monitor to be daemonized, so we fork again. Additionally, we + * want to fork for the same reasons we do forkstart (i.e. reduced memory + * footprint when we fork tasks that will never free golang's memory, etc.) + */ +func MigrateContainer(args []string) error { + if len(args) != 5 { + return fmt.Errorf("Bad arguments %q", args) + } + + name := args[1] + lxcpath := args[2] + configPath := args[3] + imagesDir := args[4] + + defer os.Remove(configPath) + + c, err := lxc.NewContainer(name, lxcpath) + if err != nil { + return err + } + + if err := c.LoadConfigFile(configPath); err != nil { + return err + } + + /* see https://github.com/golang/go/issues/13155, startContainer, and dc3a229 */ + os.Stdin.Close() + os.Stdout.Close() + os.Stderr.Close() + + return c.Restore(lxc.RestoreOptions{ + Directory: imagesDir, + Verbose: true, + }) +} === added file 'src/github.com/lxc/lxd/lxd/migrate.pb.go' --- src/github.com/lxc/lxd/lxd/migrate.pb.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/migrate.pb.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,210 @@ +// Code generated by protoc-gen-go. +// source: lxd/migrate.proto +// DO NOT EDIT! + +/* +Package main is a generated protocol buffer package. + +It is generated from these files: + lxd/migrate.proto + +It has these top-level messages: + IDMapType + MigrationHeader + MigrationControl +*/ +package main + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type MigrationFSType int32 + +const ( + MigrationFSType_RSYNC MigrationFSType = 0 + MigrationFSType_BTRFS MigrationFSType = 1 + MigrationFSType_ZFS MigrationFSType = 2 +) + +var MigrationFSType_name = map[int32]string{ + 0: "RSYNC", + 1: "BTRFS", + 2: "ZFS", +} +var MigrationFSType_value = map[string]int32{ + "RSYNC": 0, + "BTRFS": 1, + "ZFS": 2, +} + +func (x MigrationFSType) Enum() *MigrationFSType { + p := new(MigrationFSType) + *p = x + return p +} +func (x MigrationFSType) String() string { + return proto.EnumName(MigrationFSType_name, int32(x)) +} +func (x *MigrationFSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MigrationFSType_value, data, "MigrationFSType") + if err != nil { + return err + } + *x = MigrationFSType(value) + return nil +} + +type CRIUType int32 + +const ( + CRIUType_CRIU_RSYNC CRIUType = 0 + CRIUType_PHAUL CRIUType = 1 +) + +var CRIUType_name = map[int32]string{ + 0: "CRIU_RSYNC", + 1: "PHAUL", +} +var CRIUType_value = map[string]int32{ + "CRIU_RSYNC": 0, + "PHAUL": 1, +} + +func (x CRIUType) Enum() *CRIUType { + p := new(CRIUType) + *p = x + return p +} +func (x CRIUType) String() string { + return proto.EnumName(CRIUType_name, int32(x)) +} +func (x *CRIUType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CRIUType_value, data, "CRIUType") + if err != nil { + return err + } + *x = CRIUType(value) + return nil +} + +type IDMapType struct { + Isuid *bool `protobuf:"varint,1,req,name=isuid" json:"isuid,omitempty"` + Isgid *bool `protobuf:"varint,2,req,name=isgid" json:"isgid,omitempty"` + Hostid *int32 `protobuf:"varint,3,req,name=hostid" json:"hostid,omitempty"` + Nsid *int32 `protobuf:"varint,4,req,name=nsid" json:"nsid,omitempty"` + Maprange *int32 `protobuf:"varint,5,req,name=maprange" json:"maprange,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IDMapType) Reset() { *m = IDMapType{} } +func (m *IDMapType) String() string { return proto.CompactTextString(m) } +func (*IDMapType) ProtoMessage() {} + +func (m *IDMapType) GetIsuid() bool { + if m != nil && m.Isuid != nil { + return *m.Isuid + } + return false +} + +func (m *IDMapType) GetIsgid() bool { + if m != nil && m.Isgid != nil { + return *m.Isgid + } + return false +} + +func (m *IDMapType) GetHostid() int32 { + if m != nil && m.Hostid != nil { + return *m.Hostid + } + return 0 +} + +func (m *IDMapType) GetNsid() int32 { + if m != nil && m.Nsid != nil { + return *m.Nsid + } + return 0 +} + +func (m *IDMapType) GetMaprange() int32 { + if m != nil && m.Maprange != nil { + return *m.Maprange + } + return 0 +} + +type MigrationHeader struct { + Fs *MigrationFSType `protobuf:"varint,1,req,name=fs,enum=main.MigrationFSType" json:"fs,omitempty"` + Criu *CRIUType `protobuf:"varint,2,opt,name=criu,enum=main.CRIUType" json:"criu,omitempty"` + Idmap []*IDMapType `protobuf:"bytes,3,rep,name=idmap" json:"idmap,omitempty"` + Snapshots []string `protobuf:"bytes,4,rep,name=snapshots" json:"snapshots,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MigrationHeader) Reset() { *m = MigrationHeader{} } +func (m *MigrationHeader) String() string { return proto.CompactTextString(m) } +func (*MigrationHeader) ProtoMessage() {} + +func (m *MigrationHeader) GetFs() MigrationFSType { + if m != nil && m.Fs != nil { + return *m.Fs + } + return MigrationFSType_RSYNC +} + +func (m *MigrationHeader) GetCriu() CRIUType { + if m != nil && m.Criu != nil { + return *m.Criu + } + return CRIUType_CRIU_RSYNC +} + +func (m *MigrationHeader) GetIdmap() []*IDMapType { + if m != nil { + return m.Idmap + } + return nil +} + +func (m *MigrationHeader) GetSnapshots() []string { + if m != nil { + return m.Snapshots + } + return nil +} + +type MigrationControl struct { + Success *bool `protobuf:"varint,1,req,name=success" json:"success,omitempty"` + // optional failure message if sending a failure + Message *string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MigrationControl) Reset() { *m = MigrationControl{} } +func (m *MigrationControl) String() string { return proto.CompactTextString(m) } +func (*MigrationControl) ProtoMessage() {} + +func (m *MigrationControl) GetSuccess() bool { + if m != nil && m.Success != nil { + return *m.Success + } + return false +} + +func (m *MigrationControl) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +func init() { + proto.RegisterEnum("main.MigrationFSType", MigrationFSType_name, MigrationFSType_value) + proto.RegisterEnum("main.CRIUType", CRIUType_name, CRIUType_value) +} === added file 'src/github.com/lxc/lxd/lxd/migrate.proto' --- src/github.com/lxc/lxd/lxd/migrate.proto 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/migrate.proto 2016-03-22 15:18:22 +0000 @@ -0,0 +1,35 @@ +package main; + +enum MigrationFSType { + RSYNC = 0; + BTRFS = 1; + ZFS = 2; +} + +enum CRIUType { + CRIU_RSYNC = 0; + PHAUL = 1; +} + +message IDMapType { + required bool isuid = 1; + required bool isgid = 2; + required int32 hostid = 3; + required int32 nsid = 4; + required int32 maprange = 5; +} + +message MigrationHeader { + required MigrationFSType fs = 1; + optional CRIUType criu = 2; + repeated IDMapType idmap = 3; + + repeated string snapshots = 4; +} + +message MigrationControl { + required bool success = 1; + + /* optional failure message if sending a failure */ + optional string message = 2; +} === added file 'src/github.com/lxc/lxd/lxd/networks.go' --- src/github.com/lxc/lxd/lxd/networks.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/networks.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,131 @@ +package main + +import ( + "fmt" + "net" + "net/http" + "strconv" + + "github.com/gorilla/mux" + + "github.com/lxc/lxd/shared" +) + +func networksGet(d *Daemon, r *http.Request) Response { + recursionStr := r.FormValue("recursion") + recursion, err := strconv.Atoi(recursionStr) + if err != nil { + recursion = 0 + } + + ifs, err := net.Interfaces() + if err != nil { + return InternalError(err) + } + + resultString := []string{} + resultMap := []network{} + for _, iface := range ifs { + if recursion == 0 { + resultString = append(resultString, fmt.Sprintf("/%s/networks/%s", shared.APIVersion, iface.Name)) + } else { + net, err := doNetworkGet(d, iface.Name) + if err != nil { + continue + } + resultMap = append(resultMap, net) + + } + } + + if recursion == 0 { + return SyncResponse(true, resultString) + } + + return SyncResponse(true, resultMap) +} + +var networksCmd = Command{name: "networks", get: networksGet} + +type network struct { + Name string `json:"name"` + Type string `json:"type"` + UsedBy []string `json:"used_by"` +} + +func isOnBridge(c container, bridge string) bool { + for _, device := range c.ExpandedDevices() { + if device["type"] != "nic" { + continue + } + + if !shared.StringInSlice(device["nictype"], []string{"bridged", "macvlan"}) { + continue + } + + if device["parent"] == "" { + continue + } + + if device["parent"] == bridge { + return true + } + } + + return false +} + +func networkGet(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + + n, err := doNetworkGet(d, name) + if err != nil { + return InternalError(err) + } + + return SyncResponse(true, &n) +} + +func doNetworkGet(d *Daemon, name string) (network, error) { + iface, err := net.InterfaceByName(name) + if err != nil { + return network{}, err + } + + // Prepare the response + n := network{} + n.Name = iface.Name + n.UsedBy = []string{} + + // Look for containers using the interface + cts, err := dbContainersList(d.db, cTypeRegular) + if err != nil { + return network{}, err + } + + for _, ct := range cts { + c, err := containerLoadByName(d, ct) + if err != nil { + return network{}, err + } + + if isOnBridge(c, n.Name) { + n.UsedBy = append(n.UsedBy, fmt.Sprintf("/%s/containers/%s", shared.APIVersion, ct)) + } + } + + // Set the device type as needed + if shared.IsLoopback(iface) { + n.Type = "loopback" + } else if shared.PathExists(fmt.Sprintf("/sys/class/net/%s/bridge", n.Name)) { + n.Type = "bridge" + } else if shared.PathExists(fmt.Sprintf("/sys/class/net/%s/device", n.Name)) { + n.Type = "physical" + } else { + n.Type = "unknown" + } + + return n, nil +} + +var networkCmd = Command{name: "networks/{name}", get: networkGet} === added file 'src/github.com/lxc/lxd/lxd/nsexec.go' --- src/github.com/lxc/lxd/lxd/nsexec.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/nsexec.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,402 @@ +/** + * This file is a bit funny. The goal here is to use setns() to manipulate + * files inside the container, so we don't have to reason about the paths to + * make sure they don't escape (we can simply rely on the kernel for + * correctness). Unfortunately, you can't setns() to a mount namespace with a + * multi-threaded program, which every golang binary is. However, by declaring + * our init as an initializer, we can capture process control before it is + * transferred to the golang runtime, so we can then setns() as we'd like + * before golang has a chance to set up any threads. So, we implement two new + * lxd fork* commands which are captured here, and take a file on the host fs + * and copy it into the container ns. + * + * An alternative to this would be to move this code into a separate binary, + * which of course has problems of its own when it comes to packaging (how do + * we find the binary, what do we do if someone does file push and it is + * missing, etc.). After some discussion, even though the embedded method is + * somewhat convoluted, it was preferred. + */ +package main + +/* +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// This expects: +// ./lxd forkputfile /source/path /target/path +// or +// ./lxd forkgetfile /target/path /soruce/path +// i.e. 8 arguments, each which have a max length of PATH_MAX. +// Unfortunately, lseek() and fstat() both fail (EINVAL and 0 size) for +// procfs. Also, we can't mmap, because procfs doesn't support that, either. +// +#define CMDLINE_SIZE (8 * PATH_MAX) + +int mkdir_p(const char *dir, mode_t mode) +{ + const char *tmp = dir; + const char *orig = dir; + char *makeme; + + do { + dir = tmp + strspn(tmp, "/"); + tmp = dir + strcspn(dir, "/"); + makeme = strndup(orig, dir - orig); + if (*makeme) { + if (mkdir(makeme, mode) && errno != EEXIST) { + fprintf(stderr, "failed to create directory '%s'", makeme); + free(makeme); + return -1; + } + } + free(makeme); + } while(tmp != dir); + + return 0; +} + +int copy(int target, int source) +{ + ssize_t n; + char buf[1024]; + + if (ftruncate(target, 0) < 0) { + perror("truncate"); + return -1; + } + + while ((n = read(source, buf, 1024)) > 0) { + if (write(target, buf, n) != n) { + perror("write"); + return -1; + } + } + + if (n < 0) { + perror("read"); + return -1; + } + + return 0; +} + +int dosetns(int pid, char *nstype) { + int mntns; + char buf[PATH_MAX]; + + sprintf(buf, "/proc/%d/ns/%s", pid, nstype); + mntns = open(buf, O_RDONLY); + if (mntns < 0) { + perror("open mntns"); + return -1; + } + + if (setns(mntns, 0) < 0) { + perror("setns"); + close(mntns); + return -1; + } + close(mntns); + + return 0; +} + +int manip_file_in_ns(char *rootfs, int pid, char *host, char *container, bool is_put, uid_t uid, gid_t gid, mode_t mode) { + int host_fd, container_fd; + int ret = -1; + int container_open_flags; + + host_fd = open(host, O_RDWR); + if (host_fd < 0) { + perror("open host"); + return -1; + } + + container_open_flags = O_RDWR; + if (is_put) + container_open_flags |= O_CREAT; + + if (pid > 0) { + if (dosetns(pid, "mnt") < 0) + goto close_host; + } else { + if (chroot(rootfs) < 0) + goto close_host; + + if (chdir("/") < 0) + goto close_host; + } + + container_fd = open(container, container_open_flags, mode); + if (container_fd < 0) { + fprintf(stderr, "%s\n", strerror(errno)); + goto close_host; + } + + if (is_put) { + if (copy(container_fd, host_fd) < 0) + goto close_container; + + if (fchown(container_fd, uid, gid) < 0) { + perror("fchown"); + goto close_container; + } + + ret = 0; + } else + ret = copy(host_fd, container_fd); + +close_container: + close(container_fd); +close_host: + close(host_fd); + return ret; +} + +#define ADVANCE_ARG_REQUIRED() \ + do { \ + while (*cur != 0) \ + cur++; \ + cur++; \ + if (size <= cur - buf) { \ + fprintf(stderr, "not enough arguments\n"); \ + _exit(1); \ + } \ + } while(0) + +void ensure_dir(char *dest) { + struct stat sb; + if (stat(dest, &sb) == 0) { + if ((sb.st_mode & S_IFMT) == S_IFDIR) + return; + if (unlink(dest) < 0) { + fprintf(stderr, "Failed to remove old %s: %s\n", dest, strerror(errno)); + _exit(1); + } + } + if (mkdir(dest, 0755) < 0) { + fprintf(stderr, "Failed to mkdir %s: %s\n", dest, strerror(errno)); + _exit(1); + } +} + +void ensure_file(char *dest) { + struct stat sb; + int fd; + + if (stat(dest, &sb) == 0) { + if ((sb.st_mode & S_IFMT) != S_IFDIR) + return; + if (rmdir(dest) < 0) { + fprintf(stderr, "Failed to remove old %s: %s\n", dest, strerror(errno)); + _exit(1); + } + } + + fd = creat(dest, 0755); + if (fd < 0) { + fprintf(stderr, "Failed to mkdir %s: %s\n", dest, strerror(errno)); + _exit(1); + } + close(fd); +} + +void create(char *src, char *dest) { + char *destdirname; + struct stat sb; + if (stat(src, &sb) < 0) { + fprintf(stderr, "source %s does not exist\n", src); + _exit(1); + } + + destdirname = strdup(dest); + destdirname = dirname(destdirname); + + if (mkdir_p(destdirname, 0755) < 0) { + fprintf(stderr, "failed to create path: %s\n", destdirname); + free(destdirname); + _exit(1); + } + + switch (sb.st_mode & S_IFMT) { + case S_IFDIR: + ensure_dir(dest); + return; + default: + ensure_file(dest); + return; + } + + free(destdirname); +} + +void forkmount(char *buf, char *cur, ssize_t size) { + char *src, *dest, *opts; + + ADVANCE_ARG_REQUIRED(); + int pid = atoi(cur); + + if (dosetns(pid, "mnt") < 0) { + fprintf(stderr, "Failed setns to container mount namespace: %s\n", strerror(errno)); + _exit(1); + } + + ADVANCE_ARG_REQUIRED(); + src = cur; + + ADVANCE_ARG_REQUIRED(); + dest = cur; + + create(src, dest); + + if (access(src, F_OK) < 0) { + fprintf(stderr, "Mount source doesn't exist: %s\n", strerror(errno)); + _exit(1); + } + + if (access(dest, F_OK) < 0) { + fprintf(stderr, "Mount destination doesn't exist: %s\n", strerror(errno)); + _exit(1); + } + + if (mount(src, dest, "none", MS_MOVE, NULL) < 0) { + fprintf(stderr, "Failed mounting %s onto %s: %s\n", src, dest, strerror(errno)); + _exit(1); + } + + _exit(0); +} + +void forkumount(char *buf, char *cur, ssize_t size) { + ADVANCE_ARG_REQUIRED(); + int pid = atoi(cur); + + if (dosetns(pid, "mnt") < 0) { + fprintf(stderr, "Failed setns to container mount namespace: %s\n", strerror(errno)); + _exit(1); + } + + ADVANCE_ARG_REQUIRED(); + if (access(cur, F_OK) < 0) { + fprintf(stderr, "Mount path doesn't exist: %s\n", strerror(errno)); + _exit(1); + } + + if (umount2(cur, MNT_DETACH) < 0) { + fprintf(stderr, "Error unmounting %s: %s\n", cur, strerror(errno)); + _exit(1); + } + _exit(0); +} + +void forkdofile(char *buf, char *cur, bool is_put, ssize_t size) { + uid_t uid = 0; + gid_t gid = 0; + mode_t mode = 0; + char *command = cur, *rootfs = NULL, *source = NULL, *target = NULL; + pid_t pid; + + ADVANCE_ARG_REQUIRED(); + rootfs = cur; + + ADVANCE_ARG_REQUIRED(); + pid = atoi(cur); + + ADVANCE_ARG_REQUIRED(); + source = cur; + + ADVANCE_ARG_REQUIRED(); + target = cur; + + if (is_put) { + ADVANCE_ARG_REQUIRED(); + uid = atoi(cur); + + ADVANCE_ARG_REQUIRED(); + gid = atoi(cur); + + ADVANCE_ARG_REQUIRED(); + mode = atoi(cur); + } + + printf("command: %s\n", command); + printf("source: %s\n", source); + printf("pid: %d\n", pid); + printf("target: %s\n", target); + printf("uid: %d\n", uid); + printf("gid: %d\n", gid); + printf("mode: %d\n", mode); + + _exit(manip_file_in_ns(rootfs, pid, source, target, is_put, uid, gid, mode)); +} + +void forkgetnet(char *buf, char *cur, ssize_t size) { + ADVANCE_ARG_REQUIRED(); + int pid = atoi(cur); + + if (dosetns(pid, "net") < 0) { + fprintf(stderr, "Failed setns to container network namespace: %s\n", strerror(errno)); + _exit(1); + } + + // The rest happens in Go +} + +__attribute__((constructor)) void init(void) { + int cmdline; + char buf[CMDLINE_SIZE]; + ssize_t size; + char *cur; + + cmdline = open("/proc/self/cmdline", O_RDONLY); + if (cmdline < 0) { + perror("open"); + _exit(232); + } + + memset(buf, 0, sizeof(buf)); + if ((size = read(cmdline, buf, sizeof(buf)-1)) < 0) { + close(cmdline); + perror("read"); + _exit(232); + } + close(cmdline); + + cur = buf; + // skip argv[0] + while (*cur != 0) + cur++; + cur++; + if (size <= cur - buf) + return; + + if (strcmp(cur, "forkputfile") == 0) { + forkdofile(buf, cur, true, size); + } else if (strcmp(cur, "forkgetfile") == 0) { + forkdofile(buf, cur, false, size); + } else if (strcmp(cur, "forkmount") == 0) { + forkmount(buf, cur, size); + } else if (strcmp(cur, "forkumount") == 0) { + forkumount(buf, cur, size); + } else if (strcmp(cur, "forkgetnet") == 0) { + forkgetnet(buf, cur, size); + } +} +*/ +import "C" === added file 'src/github.com/lxc/lxd/lxd/operations.go' --- src/github.com/lxc/lxd/lxd/operations.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/operations.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,554 @@ +package main + +import ( + "fmt" + "net/http" + "runtime" + "strings" + "sync" + "time" + + "github.com/gorilla/mux" + "github.com/pborman/uuid" + + "github.com/lxc/lxd/shared" +) + +var operationsLock sync.Mutex +var operations map[string]*operation = make(map[string]*operation) + +type operationClass int + +const ( + operationClassTask operationClass = 1 + operationClassWebsocket operationClass = 2 + operationClassToken operationClass = 3 +) + +func (t operationClass) String() string { + return map[operationClass]string{ + operationClassTask: "task", + operationClassWebsocket: "websocket", + operationClassToken: "token", + }[t] +} + +type operation struct { + id string + class operationClass + createdAt time.Time + updatedAt time.Time + status shared.StatusCode + url string + resources map[string][]string + metadata map[string]interface{} + err string + readonly bool + + // Those functions are called at various points in the operation lifecycle + onRun func(*operation) error + onCancel func(*operation) error + onConnect func(*operation, *http.Request, http.ResponseWriter) error + + // Channels used for error reporting and state tracking of background actions + chanDone chan error + + // Locking for concurent access to the operation + lock sync.Mutex +} + +func (op *operation) done() { + if op.readonly { + return + } + + op.lock.Lock() + op.readonly = true + op.onRun = nil + op.onCancel = nil + op.onConnect = nil + close(op.chanDone) + op.lock.Unlock() + + time.AfterFunc(time.Second*5, func() { + operationsLock.Lock() + _, ok := operations[op.id] + if !ok { + operationsLock.Unlock() + return + } + + delete(operations, op.id) + operationsLock.Unlock() + + /* + * When we create a new lxc.Container, it adds a finalizer (via + * SetFinalizer) that frees the struct. However, it sometimes + * takes the go GC a while to actually free the struct, + * presumably since it is a small amount of memory. + * Unfortunately, the struct also keeps the log fd open, so if + * we leave too many of these around, we end up running out of + * fds. So, let's explicitly do a GC to collect these at the + * end of each request. + */ + runtime.GC() + }) +} + +func (op *operation) Run() (chan error, error) { + if op.status != shared.Pending { + return nil, fmt.Errorf("Only pending operations can be started") + } + + chanRun := make(chan error, 1) + + op.lock.Lock() + op.status = shared.Running + + if op.onRun != nil { + go func(op *operation, chanRun chan error) { + err := op.onRun(op) + if err != nil { + op.lock.Lock() + op.status = shared.Failure + op.err = err.Error() + op.lock.Unlock() + op.done() + chanRun <- err + + shared.Debugf("Failure for %s operation: %s: %s", op.class.String(), op.id, err) + + _, md, _ := op.Render() + eventSend("operation", md) + return + } + + op.lock.Lock() + op.status = shared.Success + op.lock.Unlock() + op.done() + chanRun <- nil + + shared.Debugf("Success for %s operation: %s", op.class.String(), op.id) + _, md, _ := op.Render() + eventSend("operation", md) + }(op, chanRun) + } + op.lock.Unlock() + + shared.Debugf("Started %s operation: %s", op.class.String(), op.id) + _, md, _ := op.Render() + eventSend("operation", md) + + return chanRun, nil +} + +func (op *operation) Cancel() (chan error, error) { + if op.status != shared.Running { + return nil, fmt.Errorf("Only running operations can be cancelled") + } + + if !op.mayCancel() { + return nil, fmt.Errorf("This operation can't be cancelled") + } + + chanCancel := make(chan error, 1) + + op.lock.Lock() + oldStatus := op.status + op.status = shared.Cancelling + op.lock.Unlock() + + if op.onCancel != nil { + go func(op *operation, oldStatus shared.StatusCode, chanCancel chan error) { + err := op.onCancel(op) + if err != nil { + op.lock.Lock() + op.status = oldStatus + op.lock.Unlock() + chanCancel <- err + + shared.Debugf("Failed to cancel %s operation: %s: %s", op.class.String(), op.id, err) + _, md, _ := op.Render() + eventSend("operation", md) + return + } + + op.lock.Lock() + op.status = shared.Cancelled + op.lock.Unlock() + op.done() + chanCancel <- nil + + shared.Debugf("Cancelled %s operation: %s", op.class.String(), op.id) + _, md, _ := op.Render() + eventSend("operation", md) + }(op, oldStatus, chanCancel) + } + + shared.Debugf("Cancelling %s operation: %s", op.class.String(), op.id) + _, md, _ := op.Render() + eventSend("operation", md) + + if op.onCancel == nil { + op.lock.Lock() + op.status = shared.Cancelled + op.lock.Unlock() + op.done() + chanCancel <- nil + } + + shared.Debugf("Cancelled %s operation: %s", op.class.String(), op.id) + _, md, _ = op.Render() + eventSend("operation", md) + + return chanCancel, nil +} + +func (op *operation) Connect(r *http.Request, w http.ResponseWriter) (chan error, error) { + if op.class != operationClassWebsocket { + return nil, fmt.Errorf("Only websocket operations can be connected") + } + + if op.status != shared.Running { + return nil, fmt.Errorf("Only running operations can be connected") + } + + chanConnect := make(chan error, 1) + + op.lock.Lock() + + go func(op *operation, chanConnect chan error) { + err := op.onConnect(op, r, w) + if err != nil { + chanConnect <- err + + shared.Debugf("Failed to handle %s operation: %s: %s", op.class.String(), op.id, err) + _, md, _ := op.Render() + eventSend("operation", md) + return + } + + chanConnect <- nil + + shared.Debugf("Handled %s operation: %s", op.class.String(), op.id) + _, md, _ := op.Render() + eventSend("operation", md) + }(op, chanConnect) + op.lock.Unlock() + + shared.Debugf("Connected %s operation: %s", op.class.String(), op.id) + _, md, _ := op.Render() + eventSend("operation", md) + + return chanConnect, nil +} + +func (op *operation) mayCancel() bool { + return op.onCancel != nil || op.class == operationClassToken +} + +func (op *operation) Render() (string, *shared.Operation, error) { + // Setup the resource URLs + resources := op.resources + if resources != nil { + tmpResources := make(map[string][]string) + for key, value := range resources { + var values []string + for _, c := range value { + values = append(values, fmt.Sprintf("/%s/%s/%s", shared.APIVersion, key, c)) + } + tmpResources[key] = values + } + resources = tmpResources + } + + md := shared.Jmap(op.metadata) + + return op.url, &shared.Operation{ + Id: op.id, + Class: op.class.String(), + CreatedAt: op.createdAt, + UpdatedAt: op.updatedAt, + Status: op.status.String(), + StatusCode: op.status, + Resources: resources, + Metadata: &md, + MayCancel: op.mayCancel(), + Err: op.err, + }, nil +} + +func (op *operation) WaitFinal(timeout int) (bool, error) { + // Check current state + if op.status.IsFinal() { + return true, nil + } + + // Wait indefinitely + if timeout == -1 { + for { + <-op.chanDone + return true, nil + } + } + + // Wait until timeout + if timeout > 0 { + timer := time.NewTimer(time.Duration(timeout) * time.Second) + for { + select { + case <-op.chanDone: + return false, nil + + case <-timer.C: + return false, nil + } + } + } + + return false, nil +} + +func (op *operation) UpdateResources(opResources map[string][]string) error { + if op.status != shared.Pending && op.status != shared.Running { + return fmt.Errorf("Only pending or running operations can be updated") + } + + if op.readonly { + return fmt.Errorf("Read-only operations can't be updated") + } + + op.lock.Lock() + op.updatedAt = time.Now() + op.resources = opResources + op.lock.Unlock() + + shared.Debugf("Updated resources for %s operation: %s", op.class.String(), op.id) + _, md, _ := op.Render() + eventSend("operation", md) + + return nil +} + +func (op *operation) UpdateMetadata(opMetadata interface{}) error { + if op.status != shared.Pending && op.status != shared.Running { + return fmt.Errorf("Only pending or running operations can be updated") + } + + if op.readonly { + return fmt.Errorf("Read-only operations can't be updated") + } + + newMetadata, err := shared.ParseMetadata(opMetadata) + if err != nil { + return err + } + + op.lock.Lock() + op.updatedAt = time.Now() + op.metadata = newMetadata + op.lock.Unlock() + + shared.Debugf("Updated metadata for %s operation: %s", op.class.String(), op.id) + _, md, _ := op.Render() + eventSend("operation", md) + + return nil +} + +func operationCreate(opClass operationClass, opResources map[string][]string, opMetadata interface{}, + onRun func(*operation) error, + onCancel func(*operation) error, + onConnect func(*operation, *http.Request, http.ResponseWriter) error) (*operation, error) { + + // Main attributes + op := operation{} + op.id = uuid.NewRandom().String() + op.class = opClass + op.createdAt = time.Now() + op.updatedAt = op.createdAt + op.status = shared.Pending + op.url = fmt.Sprintf("/%s/operations/%s", shared.APIVersion, op.id) + op.resources = opResources + op.chanDone = make(chan error) + + newMetadata, err := shared.ParseMetadata(opMetadata) + if err != nil { + return nil, err + } + op.metadata = newMetadata + + // Callback functions + op.onRun = onRun + op.onCancel = onCancel + op.onConnect = onConnect + + // Sanity check + if op.class != operationClassWebsocket && op.onConnect != nil { + return nil, fmt.Errorf("Only websocket operations can have a Connect hook") + } + + if op.class == operationClassWebsocket && op.onConnect == nil { + return nil, fmt.Errorf("Websocket operations must have a Connect hook") + } + + if op.class == operationClassToken && op.onRun != nil { + return nil, fmt.Errorf("Token operations can't have a Run hook") + } + + if op.class == operationClassToken && op.onCancel != nil { + return nil, fmt.Errorf("Token operations can't have a Cancel hook") + } + + operationsLock.Lock() + operations[op.id] = &op + operationsLock.Unlock() + + shared.Debugf("New %s operation: %s", op.class.String(), op.id) + _, md, _ := op.Render() + eventSend("operation", md) + + return &op, nil +} + +func operationGet(id string) (*operation, error) { + operationsLock.Lock() + op, ok := operations[id] + operationsLock.Unlock() + + if !ok { + return nil, fmt.Errorf("Operation '%s' doesn't exist", id) + } + + return op, nil +} + +// API functions +func operationAPIGet(d *Daemon, r *http.Request) Response { + id := mux.Vars(r)["id"] + + op, err := operationGet(id) + if err != nil { + return NotFound + } + + _, body, err := op.Render() + if err != nil { + return InternalError(err) + } + + return SyncResponse(true, body) +} + +func operationAPIDelete(d *Daemon, r *http.Request) Response { + id := mux.Vars(r)["id"] + + op, err := operationGet(id) + if err != nil { + return NotFound + } + + _, err = op.Cancel() + if err != nil { + return BadRequest(err) + } + + return EmptySyncResponse +} + +var operationCmd = Command{name: "operations/{id}", get: operationAPIGet, delete: operationAPIDelete} + +func operationsAPIGet(d *Daemon, r *http.Request) Response { + var md shared.Jmap + + recursion := d.isRecursionRequest(r) + + md = shared.Jmap{} + + operationsLock.Lock() + ops := operations + operationsLock.Unlock() + + for _, v := range ops { + status := strings.ToLower(v.status.String()) + _, ok := md[status] + if !ok { + if recursion { + md[status] = make([]*shared.Operation, 0) + } else { + md[status] = make([]string, 0) + } + } + + if !recursion { + md[status] = append(md[status].([]string), v.url) + continue + } + + _, body, err := v.Render() + if err != nil { + continue + } + + md[status] = append(md[status].([]*shared.Operation), body) + } + + return SyncResponse(true, md) +} + +var operationsCmd = Command{name: "operations", get: operationsAPIGet} + +func operationAPIWaitGet(d *Daemon, r *http.Request) Response { + timeout, err := shared.AtoiEmptyDefault(r.FormValue("timeout"), -1) + if err != nil { + return InternalError(err) + } + + id := mux.Vars(r)["id"] + op, err := operationGet(id) + if err != nil { + return NotFound + } + + _, err = op.WaitFinal(timeout) + if err != nil { + return InternalError(err) + } + + _, body, err := op.Render() + if err != nil { + return InternalError(err) + } + + return SyncResponse(true, body) +} + +var operationWait = Command{name: "operations/{id}/wait", get: operationAPIWaitGet} + +type operationWebSocket struct { + req *http.Request + op *operation +} + +func (r *operationWebSocket) Render(w http.ResponseWriter) error { + chanErr, err := r.op.Connect(r.req, w) + if err != nil { + return err + } + + err = <-chanErr + return err +} + +func operationAPIWebsocketGet(d *Daemon, r *http.Request) Response { + id := mux.Vars(r)["id"] + op, err := operationGet(id) + if err != nil { + return NotFound + } + + return &operationWebSocket{r, op} +} + +var operationWebsocket = Command{name: "operations/{id}/websocket", untrustedGet: true, get: operationAPIWebsocketGet} === added file 'src/github.com/lxc/lxd/lxd/profiles.go' --- src/github.com/lxc/lxd/lxd/profiles.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/profiles.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,271 @@ +package main + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + + "github.com/gorilla/mux" + _ "github.com/mattn/go-sqlite3" + + "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" +) + +/* This is used for both profiles post and profile put */ +type profilesPostReq struct { + Name string `json:"name"` + Config map[string]string `json:"config"` + Description string `json:"description"` + Devices shared.Devices `json:"devices"` +} + +func profilesGet(d *Daemon, r *http.Request) Response { + results, err := dbProfiles(d.db) + if err != nil { + return SmartError(err) + } + + recursion := d.isRecursionRequest(r) + + resultString := make([]string, len(results)) + resultMap := make([]*shared.ProfileConfig, len(results)) + i := 0 + for _, name := range results { + if !recursion { + url := fmt.Sprintf("/%s/profiles/%s", shared.APIVersion, name) + resultString[i] = url + } else { + profile, err := doProfileGet(d, name) + if err != nil { + shared.Log.Error("Failed to get profile", log.Ctx{"profile": name}) + continue + } + resultMap[i] = profile + } + i++ + } + + if !recursion { + return SyncResponse(true, resultString) + } + + return SyncResponse(true, resultMap) +} + +func profilesPost(d *Daemon, r *http.Request) Response { + req := profilesPostReq{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return BadRequest(err) + } + + // Sanity checks + if req.Name == "" { + return BadRequest(fmt.Errorf("No name provided")) + } + + err := containerValidConfig(req.Config, true, false) + if err != nil { + return BadRequest(err) + } + + err = containerValidDevices(req.Devices, true, false) + if err != nil { + return BadRequest(err) + } + + // Update DB entry + _, err = dbProfileCreate(d.db, req.Name, req.Description, req.Config, req.Devices) + if err != nil { + return InternalError( + fmt.Errorf("Error inserting %s into database: %s", req.Name, err)) + } + + return EmptySyncResponse +} + +var profilesCmd = Command{ + name: "profiles", + get: profilesGet, + post: profilesPost} + +func doProfileGet(d *Daemon, name string) (*shared.ProfileConfig, error) { + _, profile, err := dbProfileGet(d.db, name) + return profile, err +} + +func profileGet(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + + resp, err := doProfileGet(d, name) + if err != nil { + return SmartError(err) + } + + return SyncResponse(true, resp) +} + +func getRunningContainersWithProfile(d *Daemon, profile string) []container { + results := []container{} + + output, err := dbProfileContainersGet(d.db, profile) + if err != nil { + return results + } + + for _, name := range output { + c, err := containerLoadByName(d, name) + if err != nil { + shared.Log.Error("Failed opening container", log.Ctx{"container": name}) + continue + } + results = append(results, c) + } + return results +} + +func profilePut(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + + req := profilesPostReq{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return BadRequest(err) + } + + // Sanity checks + err := containerValidConfig(req.Config, true, false) + if err != nil { + return BadRequest(err) + } + + err = containerValidDevices(req.Devices, true, false) + if err != nil { + return BadRequest(err) + } + + // Get the running container list + clist := getRunningContainersWithProfile(d, name) + var containers []container + for _, c := range clist { + if !c.IsRunning() { + continue + } + + containers = append(containers, c) + } + + // Update the database + id, profile, err := dbProfileGet(d.db, name) + if err != nil { + return InternalError(fmt.Errorf("Failed to retrieve profile='%s'", name)) + } + + tx, err := dbBegin(d.db) + if err != nil { + return InternalError(err) + } + + if profile.Description != req.Description { + err = dbProfileDescriptionUpdate(tx, id, req.Description) + if err != nil { + tx.Rollback() + return InternalError(err) + } + } + + // Optimize for description-only changes + if reflect.DeepEqual(profile.Config, req.Config) && reflect.DeepEqual(profile.Devices, req.Devices) { + err = txCommit(tx) + if err != nil { + return InternalError(err) + } + + return EmptySyncResponse + } + + err = dbProfileConfigClear(tx, id) + if err != nil { + tx.Rollback() + return InternalError(err) + } + + err = dbProfileConfigAdd(tx, id, req.Config) + if err != nil { + tx.Rollback() + return SmartError(err) + } + + err = dbDevicesAdd(tx, "profile", id, req.Devices) + if err != nil { + tx.Rollback() + return SmartError(err) + } + + err = txCommit(tx) + if err != nil { + return InternalError(err) + } + + // Update all the containers using the profile. Must be done after txCommit due to DB lock. + failures := map[string]error{} + for _, c := range containers { + err = c.Update(containerArgs{ + Architecture: c.Architecture(), + Ephemeral: c.IsEphemeral(), + Config: c.LocalConfig(), + Devices: c.LocalDevices(), + Profiles: c.Profiles()}, true) + + if err != nil { + failures[c.Name()] = err + } + } + + if len(failures) != 0 { + msg := "The following containers failed to update (profile change still saved):\n" + for cname, err := range failures { + msg += fmt.Sprintf(" - %s: %s\n", cname, err) + } + return InternalError(fmt.Errorf("%s", msg)) + } + + return EmptySyncResponse +} + +// The handler for the post operation. +func profilePost(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + + req := profilesPostReq{} + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return BadRequest(err) + } + + // Sanity checks + if req.Name == "" { + return BadRequest(fmt.Errorf("No name provided")) + } + + err := dbProfileUpdate(d.db, name, req.Name) + if err != nil { + return InternalError(err) + } + + return EmptySyncResponse +} + +// The handler for the delete operation. +func profileDelete(d *Daemon, r *http.Request) Response { + name := mux.Vars(r)["name"] + err := dbProfileDelete(d.db, name) + + if err != nil { + return InternalError(err) + } + + return EmptySyncResponse +} + +var profileCmd = Command{name: "profiles/{name}", get: profileGet, put: profilePut, delete: profileDelete, post: profilePost} === added file 'src/github.com/lxc/lxd/lxd/profiles_test.go' --- src/github.com/lxc/lxd/lxd/profiles_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/profiles_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,55 @@ +package main + +import ( + "database/sql" + "testing" +) + +func Test_removing_a_profile_deletes_associated_configuration_entries(t *testing.T) { + var db *sql.DB + var err error + + d := &Daemon{} + err = initializeDbObject(d, ":memory:") + db = d.db + + // Insert a container and a related profile. Dont't forget that the profile + // we insert is profile ID 2 (there is a default profile already). + statements := ` + INSERT INTO containers (name, architecture, type) VALUES ('thename', 1, 1); + INSERT INTO profiles (name) VALUES ('theprofile'); + INSERT INTO containers_profiles (container_id, profile_id) VALUES (1, 2); + INSERT INTO profiles_devices (name, profile_id) VALUES ('somename', 2); + INSERT INTO profiles_config (key, value, profile_id) VALUES ('thekey', 'thevalue', 2); + INSERT INTO profiles_devices_config (profile_device_id, key, value) VALUES (1, 'something', 'boring');` + + _, err = db.Exec(statements) + if err != nil { + t.Fatal(err) + } + + // Delete the profile we just created with dbProfileDelete + err = dbProfileDelete(db, "theprofile") + if err != nil { + t.Fatal(err) + } + + // Make sure there are 0 profiles_devices entries left. + devices, err := dbDevices(d.db, "theprofile", true) + if err != nil { + t.Fatal(err) + } + if len(devices) != 0 { + t.Errorf("Deleting a profile didn't delete the related profiles_devices! There are %d left", len(devices)) + } + + // Make sure there are 0 profiles_config entries left. + config, err := dbProfileConfig(d.db, "theprofile") + if err == nil { + t.Fatal("found the profile!") + } + + if len(config) != 0 { + t.Errorf("Deleting a profile didn't delete the related profiles_config! There are %d left", len(config)) + } +} === added file 'src/github.com/lxc/lxd/lxd/remote.go' --- src/github.com/lxc/lxd/lxd/remote.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/remote.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,26 @@ +package main + +import ( + "encoding/json" + "fmt" + + "github.com/lxc/lxd/shared" +) + +func remoteGetImageFingerprint(d *Daemon, server string, certificate string, alias string) (string, error) { + url := fmt.Sprintf( + "%s/%s/images/aliases/%s", + server, shared.APIVersion, alias) + + resp, err := d.httpGetSync(url, certificate) + if err != nil { + return "", err + } + + var result shared.ImageAliasesEntry + if err = json.Unmarshal(resp.Metadata, &result); err != nil { + return "", fmt.Errorf("Error reading alias") + } + + return result.Target, nil +} === added file 'src/github.com/lxc/lxd/lxd/response.go' --- src/github.com/lxc/lxd/lxd/response.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/response.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,252 @@ +package main + +import ( + "bytes" + "database/sql" + "encoding/json" + "fmt" + "io" + "mime/multipart" + "net/http" + "os" + + "github.com/mattn/go-sqlite3" + + "github.com/lxc/lxd" + "github.com/lxc/lxd/shared" +) + +type syncResp struct { + Type lxd.ResponseType `json:"type"` + Status string `json:"status"` + StatusCode shared.StatusCode `json:"status_code"` + Metadata interface{} `json:"metadata"` +} + +type asyncResp struct { + Type lxd.ResponseType `json:"type"` + Status string `json:"status"` + StatusCode shared.StatusCode `json:"status_code"` + Metadata interface{} `json:"metadata"` + Operation string `json:"operation"` +} + +type Response interface { + Render(w http.ResponseWriter) error +} + +// Sync response +type syncResponse struct { + success bool + metadata interface{} +} + +func (r *syncResponse) Render(w http.ResponseWriter) error { + status := shared.Success + if !r.success { + status = shared.Failure + } + + resp := syncResp{Type: lxd.Sync, Status: status.String(), StatusCode: status, Metadata: r.metadata} + return WriteJSON(w, resp) +} + +func SyncResponse(success bool, metadata interface{}) Response { + return &syncResponse{success, metadata} +} + +var EmptySyncResponse = &syncResponse{true, make(map[string]interface{})} + +// File transfer response +type fileResponseEntry struct { + identifier string + path string + filename string +} + +type fileResponse struct { + req *http.Request + files []fileResponseEntry + headers map[string]string + removeAfterServe bool +} + +func (r *fileResponse) Render(w http.ResponseWriter) error { + if r.headers != nil { + for k, v := range r.headers { + w.Header().Set(k, v) + } + } + + // No file, well, it's easy then + if len(r.files) == 0 { + return nil + } + + // For a single file, return it inline + if len(r.files) == 1 { + f, err := os.Open(r.files[0].path) + if err != nil { + return err + } + defer f.Close() + + fi, err := f.Stat() + if err != nil { + return err + } + + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Length", fmt.Sprintf("%d", fi.Size())) + w.Header().Set("Content-Disposition", fmt.Sprintf("inline;filename=%s", r.files[0].filename)) + + http.ServeContent(w, r.req, r.files[0].filename, fi.ModTime(), f) + if r.removeAfterServe { + err = os.Remove(r.files[0].path) + if err != nil { + return err + } + } + + return nil + } + + // Now the complex multipart answer + body := &bytes.Buffer{} + mw := multipart.NewWriter(body) + + for _, entry := range r.files { + fd, err := os.Open(entry.path) + if err != nil { + return err + } + defer fd.Close() + + fw, err := mw.CreateFormFile(entry.identifier, entry.filename) + if err != nil { + return err + } + + _, err = io.Copy(fw, fd) + if err != nil { + return err + } + } + mw.Close() + + w.Header().Set("Content-Type", mw.FormDataContentType()) + w.Header().Set("Content-Length", fmt.Sprintf("%d", body.Len())) + + _, err := io.Copy(w, body) + return err +} + +func FileResponse(r *http.Request, files []fileResponseEntry, headers map[string]string, removeAfterServe bool) Response { + return &fileResponse{r, files, headers, removeAfterServe} +} + +// Operation response +type operationResponse struct { + op *operation +} + +func (r *operationResponse) Render(w http.ResponseWriter) error { + _, err := r.op.Run() + if err != nil { + return err + } + + url, md, err := r.op.Render() + if err != nil { + return err + } + + body := asyncResp{ + Type: lxd.Async, + Status: shared.OperationCreated.String(), + StatusCode: shared.OperationCreated, + Operation: url, + Metadata: md} + + w.Header().Set("Location", url) + w.WriteHeader(202) + + return WriteJSON(w, body) +} + +func OperationResponse(op *operation) Response { + return &operationResponse{op} +} + +// Error response +type errorResponse struct { + code int + msg string +} + +func (r *errorResponse) Render(w http.ResponseWriter) error { + var output io.Writer + + buf := &bytes.Buffer{} + output = buf + var captured *bytes.Buffer + if debug { + captured = &bytes.Buffer{} + output = io.MultiWriter(buf, captured) + } + + err := json.NewEncoder(output).Encode(shared.Jmap{"type": lxd.Error, "error": r.msg, "error_code": r.code}) + + if err != nil { + return err + } + + if debug { + shared.DebugJson(captured) + } + + w.Header().Set("Content-Type", "application/json") + w.Header().Set("X-Content-Type-Options", "nosniff") + w.WriteHeader(r.code) + fmt.Fprintln(w, buf.String()) + + return nil +} + +/* Some standard responses */ +var NotImplemented = &errorResponse{http.StatusNotImplemented, "not implemented"} +var NotFound = &errorResponse{http.StatusNotFound, "not found"} +var Forbidden = &errorResponse{http.StatusForbidden, "not authorized"} +var Conflict = &errorResponse{http.StatusConflict, "already exists"} + +func BadRequest(err error) Response { + return &errorResponse{http.StatusBadRequest, err.Error()} +} + +func InternalError(err error) Response { + return &errorResponse{http.StatusInternalServerError, err.Error()} +} + +/* + * SmartError returns the right error message based on err. + */ +func SmartError(err error) Response { + switch err { + case nil: + return EmptySyncResponse + case os.ErrNotExist: + return NotFound + case sql.ErrNoRows: + return NotFound + case NoSuchObjectError: + return NotFound + case os.ErrPermission: + return Forbidden + case DbErrAlreadyDefined: + return Conflict + case sqlite3.ErrConstraintUnique: + return Conflict + default: + return InternalError(err) + } +} === added file 'src/github.com/lxc/lxd/lxd/rsync.go' --- src/github.com/lxc/lxd/lxd/rsync.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/rsync.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,170 @@ +package main + +import ( + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + + "github.com/gorilla/websocket" + + "github.com/lxc/lxd/shared" +) + +func rsyncWebsocket(path string, cmd *exec.Cmd, conn *websocket.Conn) error { + stdin, err := cmd.StdinPipe() + if err != nil { + return err + } + + stdout, err := cmd.StdoutPipe() + if err != nil { + return err + } + + stderr, err := cmd.StderrPipe() + if err != nil { + return err + } + + if err := cmd.Start(); err != nil { + return err + } + + readDone, writeDone := shared.WebsocketMirror(conn, stdin, stdout) + data, err2 := ioutil.ReadAll(stderr) + if err2 != nil { + shared.Debugf("error reading rsync stderr: %s", err2) + return err2 + } + + err = cmd.Wait() + if err != nil { + shared.Debugf("rsync recv error for path %s: %s: %s", path, err, string(data)) + } + + <-readDone + <-writeDone + + return err +} + +func rsyncSendSetup(path string) (*exec.Cmd, net.Conn, io.ReadCloser, error) { + /* + * It's sort of unfortunate, but there's no library call to get a + * temporary name, so we get the file and close it and use its name. + */ + f, err := ioutil.TempFile("", "lxd_rsync_") + if err != nil { + return nil, nil, nil, err + } + f.Close() + os.Remove(f.Name()) + + /* + * The way rsync works, it invokes a subprocess that does the actual + * talking (given to it by a -E argument). Since there isn't an easy + * way for us to capture this process' stdin/stdout, we just use netcat + * and write to/from a unix socket. + * + * In principle we don't need this socket. It seems to me that some + * clever invocation of rsync --server --sender and usage of that + * process' stdin/stdout could work around the need for this socket, + * but I couldn't get it to work. Another option would be to look at + * the spawned process' first child and read/write from its + * stdin/stdout, but that also seemed messy. In any case, this seems to + * work just fine. + */ + l, err := net.Listen("unix", f.Name()) + if err != nil { + return nil, nil, nil, err + } + + /* + * Here, the path /tmp/foo is ignored. Since we specify localhost, + * rsync thinks we are syncing to a remote host (in this case, the + * other end of the lxd websocket), and so the path specified on the + * --server instance of rsync takes precedence. + * + * Additionally, we use sh -c instead of just calling nc directly + * because rsync passes a whole bunch of arguments to the wrapper + * command (i.e. the command to run on --server). However, we're + * hardcoding that at the other end, so we can just ignore it. + */ + rsyncCmd := fmt.Sprintf("sh -c \"nc -U %s\"", f.Name()) + cmd := exec.Command( + "rsync", + "-arvP", + "--devices", + "--numeric-ids", + "--partial", + path, + "localhost:/tmp/foo", + "-e", + rsyncCmd) + + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, nil, nil, err + } + + if err := cmd.Start(); err != nil { + return nil, nil, nil, err + } + + conn, err := l.Accept() + if err != nil { + return nil, nil, nil, err + } + l.Close() + + return cmd, conn, stderr, nil +} + +// RsyncSend sets up the sending half of an rsync, to recursively send the +// directory pointed to by path over the websocket. +func RsyncSend(path string, conn *websocket.Conn) error { + cmd, dataSocket, stderr, err := rsyncSendSetup(path) + if dataSocket != nil { + defer dataSocket.Close() + } + if err != nil { + return err + } + + readDone, writeDone := shared.WebsocketMirror(conn, dataSocket, dataSocket) + + output, err := ioutil.ReadAll(stderr) + if err != nil { + shared.Debugf("problem reading rsync stderr %s", err) + } + + if err := cmd.Wait(); err != nil { + shared.Debugf("problem with rsync send of %s: %s: %s", path, err, string(output)) + } + + <-readDone + <-writeDone + + return err +} + +func rsyncRecvCmd(path string) *exec.Cmd { + return exec.Command("rsync", + "--server", + "-vlogDtpre.iLsfx", + "--numeric-ids", + "--devices", + "--partial", + ".", + path) +} + +// RsyncRecv sets up the receiving half of the websocket to rsync (the other +// half set up by RsyncSend), putting the contents in the directory specified +// by path. +func RsyncRecv(path string, conn *websocket.Conn) error { + return rsyncWebsocket(path, rsyncRecvCmd(path), conn) +} === added file 'src/github.com/lxc/lxd/lxd/rsync_test.go' --- src/github.com/lxc/lxd/lxd/rsync_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/rsync_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,106 @@ +package main + +import ( + "io" + "io/ioutil" + "os" + "path" + "testing" + + "github.com/lxc/lxd/shared" +) + +const helloWorld = "hello world\n" + +func TestRsyncSendRecv(t *testing.T) { + source, err := ioutil.TempDir("", "lxd_test_source_") + if err != nil { + t.Error(err) + return + } + defer os.RemoveAll(source) + + sink, err := ioutil.TempDir("", "lxd_test_sink_") + if err != nil { + t.Error(err) + return + } + defer os.RemoveAll(sink) + + /* now, write something to rsync over */ + f, err := os.Create(path.Join(source, "foo")) + if err != nil { + t.Error(err) + return + } + f.Write([]byte(helloWorld)) + f.Close() + + send, sendConn, _, err := rsyncSendSetup(shared.AddSlash(source)) + if err != nil { + t.Error(err) + return + } + + recv := rsyncRecvCmd(sink) + + recvOut, err := recv.StdoutPipe() + if err != nil { + t.Error(err) + return + } + + recvIn, err := recv.StdinPipe() + if err != nil { + t.Error(err) + return + } + + if err := recv.Start(); err != nil { + t.Error(err) + return + } + + go func() { + defer sendConn.Close() + if _, err := io.Copy(sendConn, recvOut); err != nil { + t.Error(err) + } + + if err := recv.Wait(); err != nil { + t.Error(err) + } + + }() + + /* + * We close the socket in the above gofunc, but go tells us + * https://github.com/golang/go/issues/4373 that this is an error + * because we were reading from a socket that was closed. Thus, we + * ignore it + */ + io.Copy(recvIn, sendConn) + + if err := send.Wait(); err != nil { + t.Error(err) + return + } + + f, err = os.Open(path.Join(sink, "foo")) + if err != nil { + t.Error(err) + return + } + defer f.Close() + + buf, err := ioutil.ReadAll(f) + if err != nil { + t.Error(err) + return + } + + if string(buf) != helloWorld { + t.Errorf("expected %s got %s", helloWorld, buf) + return + } +} === added file 'src/github.com/lxc/lxd/lxd/seccomp.go' --- src/github.com/lxc/lxd/lxd/seccomp.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/seccomp.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,54 @@ +package main + +import ( + "io/ioutil" + "os" + "path" + + "github.com/lxc/lxd/shared" +) + +const DEFAULT_SECCOMP_POLICY = ` +2 +blacklist +reject_force_umount # comment this to allow umount -f; not recommended +[all] +kexec_load errno 1 +open_by_handle_at errno 1 +init_module errno 1 +finit_module errno 1 +delete_module errno 1 +` + +var seccompPath = shared.VarPath("security", "seccomp") + +func SeccompProfilePath(c container) string { + return path.Join(seccompPath, c.Name()) +} + +func getSeccompProfileContent(c container) string { + /* for now there are no seccomp knobs. */ + return DEFAULT_SECCOMP_POLICY +} + +func SeccompCreateProfile(c container) error { + /* Unlike apparmor, there is no way to "cache" profiles, and profiles + * are automatically unloaded when a task dies. Thus, we don't need to + * unload them when a container stops, and we don't have to worry about + * the mtime on the file for any compiler purpose, so let's just write + * out the profile. + */ + profile := getSeccompProfileContent(c) + if err := os.MkdirAll(seccompPath, 0700); err != nil { + return err + } + + return ioutil.WriteFile(SeccompProfilePath(c), []byte(profile), 0600) +} + +func SeccompDeleteProfile(c container) { + /* similar to AppArmor, if we've never started this container, the + * delete can fail and that's ok. + */ + os.Remove(SeccompProfilePath(c)) +} === added file 'src/github.com/lxc/lxd/lxd/storage.go' --- src/github.com/lxc/lxd/lxd/storage.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/storage.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,624 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "reflect" + "syscall" + + "github.com/gorilla/websocket" + + "github.com/lxc/lxd/shared" + "github.com/lxc/lxd/shared/logging" + + log "gopkg.in/inconshreveable/log15.v2" +) + +/* Some interesting filesystems */ +const ( + filesystemSuperMagicTmpfs = 0x01021994 + filesystemSuperMagicExt4 = 0xEF53 + filesystemSuperMagicXfs = 0x58465342 + filesystemSuperMagicNfs = 0x6969 + filesystemSuperMagicZfs = 0x2fc12fc1 +) + +/* + * filesystemDetect returns the filesystem on which + * the passed-in path sits + */ +func filesystemDetect(path string) (string, error) { + fs := syscall.Statfs_t{} + + err := syscall.Statfs(path, &fs) + if err != nil { + return "", err + } + + switch fs.Type { + case filesystemSuperMagicBtrfs: + return "btrfs", nil + case filesystemSuperMagicZfs: + return "zfs", nil + case filesystemSuperMagicTmpfs: + return "tmpfs", nil + case filesystemSuperMagicExt4: + return "ext4", nil + case filesystemSuperMagicXfs: + return "xfs", nil + case filesystemSuperMagicNfs: + return "nfs", nil + default: + shared.Debugf("Unknown backing filesystem type: 0x%x", fs.Type) + return string(fs.Type), nil + } +} + +// storageRsyncCopy copies a directory using rsync (with the --devices option). +func storageRsyncCopy(source string, dest string) (string, error) { + if err := os.MkdirAll(dest, 0755); err != nil { + return "", err + } + + rsyncVerbosity := "-q" + if debug { + rsyncVerbosity = "-vi" + } + + output, err := exec.Command( + "rsync", + "-a", + "-HAX", + "--devices", + "--delete", + "--checksum", + "--numeric-ids", + rsyncVerbosity, + shared.AddSlash(source), + dest).CombinedOutput() + + return string(output), err +} + +// storageType defines the type of a storage +type storageType int + +const ( + storageTypeBtrfs storageType = iota + storageTypeZfs + storageTypeLvm + storageTypeDir + storageTypeMock +) + +func storageTypeToString(sType storageType) string { + switch sType { + case storageTypeBtrfs: + return "btrfs" + case storageTypeZfs: + return "zfs" + case storageTypeLvm: + return "lvm" + case storageTypeMock: + return "mock" + } + + return "dir" +} + +type MigrationStorageSource interface { + Name() string + IsSnapshot() bool + Send(conn *websocket.Conn) error +} + +type storage interface { + Init(config map[string]interface{}) (storage, error) + + GetStorageType() storageType + GetStorageTypeName() string + GetStorageTypeVersion() string + + // ContainerCreate creates an empty container (no rootfs/metadata.yaml) + ContainerCreate(container container) error + + // ContainerCreateFromImage creates a container from a image. + ContainerCreateFromImage(container container, imageFingerprint string) error + + ContainerCanRestore(container container, sourceContainer container) error + ContainerDelete(container container) error + ContainerCopy(container container, sourceContainer container) error + ContainerStart(container container) error + ContainerStop(container container) error + ContainerRename(container container, newName string) error + ContainerRestore(container container, sourceContainer container) error + ContainerSetQuota(container container, size int64) error + ContainerGetUsage(container container) (int64, error) + + ContainerSnapshotCreate( + snapshotContainer container, sourceContainer container) error + ContainerSnapshotDelete(snapshotContainer container) error + ContainerSnapshotRename(snapshotContainer container, newName string) error + ContainerSnapshotStart(container container) error + ContainerSnapshotStop(container container) error + + /* for use in migrating snapshots */ + ContainerSnapshotCreateEmpty(snapshotContainer container) error + + ImageCreate(fingerprint string) error + ImageDelete(fingerprint string) error + + MigrationType() MigrationFSType + + // Get the pieces required to migrate the source. This contains a list + // of the "object" (i.e. container or snapshot, depending on whether or + // not it is a snapshot name) to be migrated in order, and a channel + // for arguments of the specific migration command. We use a channel + // here so we don't have to invoke `zfs send` or `rsync` or whatever + // and keep its stdin/stdout open for each snapshot during the course + // of migration, we can do it lazily. + // + // N.B. that the order here important: e.g. in btrfs/zfs, snapshots + // which are parents of other snapshots should be sent first, to save + // as much transfer as possible. However, the base container is always + // sent as the first object, since that is the grandparent of every + // snapshot. + // + // We leave sending containers which are snapshots of other containers + // already present on the target instance as an exercise for the + // enterprising developer. + MigrationSource(container container) ([]MigrationStorageSource, error) + MigrationSink(container container, objects []container, conn *websocket.Conn) error +} + +func newStorage(d *Daemon, sType storageType) (storage, error) { + var nilmap map[string]interface{} + return newStorageWithConfig(d, sType, nilmap) +} + +func newStorageWithConfig(d *Daemon, sType storageType, config map[string]interface{}) (storage, error) { + if d.IsMock { + return d.Storage, nil + } + + var s storage + + switch sType { + case storageTypeBtrfs: + if d.Storage != nil && d.Storage.GetStorageType() == storageTypeBtrfs { + return d.Storage, nil + } + + s = &storageLogWrapper{w: &storageBtrfs{d: d}} + case storageTypeZfs: + if d.Storage != nil && d.Storage.GetStorageType() == storageTypeZfs { + return d.Storage, nil + } + + s = &storageLogWrapper{w: &storageZfs{d: d}} + case storageTypeLvm: + if d.Storage != nil && d.Storage.GetStorageType() == storageTypeLvm { + return d.Storage, nil + } + + s = &storageLogWrapper{w: &storageLvm{d: d}} + default: + if d.Storage != nil && d.Storage.GetStorageType() == storageTypeDir { + return d.Storage, nil + } + + s = &storageLogWrapper{w: &storageDir{d: d}} + } + + return s.Init(config) +} + +func storageForFilename(d *Daemon, filename string) (storage, error) { + config := make(map[string]interface{}) + storageType := storageTypeDir + + if d.IsMock { + return newStorageWithConfig(d, storageTypeMock, config) + } + + filesystem, err := filesystemDetect(filename) + if err != nil { + return nil, fmt.Errorf("couldn't detect filesystem for '%s': %v", filename, err) + } + + if shared.PathExists(filename + ".lv") { + storageType = storageTypeLvm + lvPath, err := os.Readlink(filename + ".lv") + if err != nil { + return nil, fmt.Errorf("couldn't read link dest for '%s': %v", filename+".lv", err) + } + vgname := filepath.Base(filepath.Dir(lvPath)) + config["vgName"] = vgname + } else if shared.PathExists(filename + ".zfs") { + storageType = storageTypeZfs + } else if shared.PathExists(filename+".btrfs") || filesystem == "btrfs" { + storageType = storageTypeBtrfs + } + + return newStorageWithConfig(d, storageType, config) +} + +func storageForImage(d *Daemon, imgInfo *shared.ImageInfo) (storage, error) { + imageFilename := shared.VarPath("images", imgInfo.Fingerprint) + return storageForFilename(d, imageFilename) +} + +type storageShared struct { + sType storageType + sTypeName string + sTypeVersion string + + log shared.Logger +} + +func (ss *storageShared) initShared() error { + ss.log = logging.AddContext( + shared.Log, + log.Ctx{"driver": fmt.Sprintf("storage/%s", ss.sTypeName)}, + ) + return nil +} + +func (ss *storageShared) GetStorageType() storageType { + return ss.sType +} + +func (ss *storageShared) GetStorageTypeName() string { + return ss.sTypeName +} + +func (ss *storageShared) GetStorageTypeVersion() string { + return ss.sTypeVersion +} + +func (ss *storageShared) shiftRootfs(c container) error { + dpath := c.Path() + rpath := c.RootfsPath() + + shared.Log.Debug("Shifting root filesystem", + log.Ctx{"container": c.Name(), "rootfs": rpath}) + + idmapset := c.IdmapSet() + + if idmapset == nil { + return fmt.Errorf("IdmapSet of container '%s' is nil", c.Name()) + } + + err := idmapset.ShiftRootfs(rpath) + if err != nil { + shared.Debugf("Shift of rootfs %s failed: %s", rpath, err) + return err + } + + /* Set an acl so the container root can descend the container dir */ + // TODO: i changed this so it calls ss.setUnprivUserAcl, which does + // the acl change only if the container is not privileged, think thats right. + return ss.setUnprivUserAcl(c, dpath) +} + +func (ss *storageShared) setUnprivUserAcl(c container, destPath string) error { + idmapset := c.IdmapSet() + + // Skip for privileged containers + if idmapset == nil { + return nil + } + + // Make sure the map is valid. Skip if container uid 0 == host uid 0 + uid, _ := idmapset.ShiftIntoNs(0, 0) + switch uid { + case -1: + return fmt.Errorf("Container doesn't have a uid 0 in its map") + case 0: + return nil + } + + // Attempt to set a POSIX ACL first. Fallback to chmod if the fs doesn't support it. + acl := fmt.Sprintf("%d:rx", uid) + _, err := exec.Command("setfacl", "-m", acl, destPath).CombinedOutput() + if err != nil { + _, err := exec.Command("chmod", "+x", destPath).CombinedOutput() + if err != nil { + return fmt.Errorf("Failed to chmod the container path.") + } + } + + return nil +} + +type storageLogWrapper struct { + w storage + log shared.Logger +} + +func (lw *storageLogWrapper) Init(config map[string]interface{}) (storage, error) { + _, err := lw.w.Init(config) + lw.log = logging.AddContext( + shared.Log, + log.Ctx{"driver": fmt.Sprintf("storage/%s", lw.w.GetStorageTypeName())}, + ) + + lw.log.Info("Init") + return lw, err +} + +func (lw *storageLogWrapper) GetStorageType() storageType { + return lw.w.GetStorageType() +} + +func (lw *storageLogWrapper) GetStorageTypeName() string { + return lw.w.GetStorageTypeName() +} + +func (lw *storageLogWrapper) GetStorageTypeVersion() string { + return lw.w.GetStorageTypeVersion() +} + +func (lw *storageLogWrapper) ContainerCreate(container container) error { + lw.log.Debug( + "ContainerCreate", + log.Ctx{ + "name": container.Name(), + "isPrivileged": container.IsPrivileged()}) + return lw.w.ContainerCreate(container) +} + +func (lw *storageLogWrapper) ContainerCreateFromImage( + container container, imageFingerprint string) error { + + lw.log.Debug( + "ContainerCreateFromImage", + log.Ctx{ + "imageFingerprint": imageFingerprint, + "name": container.Name(), + "isPrivileged": container.IsPrivileged()}) + return lw.w.ContainerCreateFromImage(container, imageFingerprint) +} + +func (lw *storageLogWrapper) ContainerCanRestore(container container, sourceContainer container) error { + lw.log.Debug("ContainerCanRestore", log.Ctx{"container": container.Name()}) + return lw.w.ContainerCanRestore(container, sourceContainer) +} + +func (lw *storageLogWrapper) ContainerDelete(container container) error { + lw.log.Debug("ContainerDelete", log.Ctx{"container": container.Name()}) + return lw.w.ContainerDelete(container) +} + +func (lw *storageLogWrapper) ContainerCopy( + container container, sourceContainer container) error { + + lw.log.Debug( + "ContainerCopy", + log.Ctx{ + "container": container.Name(), + "source": sourceContainer.Name()}) + return lw.w.ContainerCopy(container, sourceContainer) +} + +func (lw *storageLogWrapper) ContainerStart(container container) error { + lw.log.Debug("ContainerStart", log.Ctx{"container": container.Name()}) + return lw.w.ContainerStart(container) +} + +func (lw *storageLogWrapper) ContainerStop(container container) error { + lw.log.Debug("ContainerStop", log.Ctx{"container": container.Name()}) + return lw.w.ContainerStop(container) +} + +func (lw *storageLogWrapper) ContainerRename( + container container, newName string) error { + + lw.log.Debug( + "ContainerRename", + log.Ctx{ + "container": container.Name(), + "newName": newName}) + return lw.w.ContainerRename(container, newName) +} + +func (lw *storageLogWrapper) ContainerRestore( + container container, sourceContainer container) error { + + lw.log.Debug( + "ContainerRestore", + log.Ctx{ + "container": container.Name(), + "source": sourceContainer.Name()}) + return lw.w.ContainerRestore(container, sourceContainer) +} + +func (lw *storageLogWrapper) ContainerSetQuota( + container container, size int64) error { + + lw.log.Debug( + "ContainerSetQuota", + log.Ctx{ + "container": container.Name(), + "size": size}) + return lw.w.ContainerSetQuota(container, size) +} + +func (lw *storageLogWrapper) ContainerGetUsage( + container container) (int64, error) { + + lw.log.Debug( + "ContainerGetUsage", + log.Ctx{ + "container": container.Name()}) + return lw.w.ContainerGetUsage(container) +} + +func (lw *storageLogWrapper) ContainerSnapshotCreate( + snapshotContainer container, sourceContainer container) error { + + lw.log.Debug("ContainerSnapshotCreate", + log.Ctx{ + "snapshotContainer": snapshotContainer.Name(), + "sourceContainer": sourceContainer.Name()}) + + return lw.w.ContainerSnapshotCreate(snapshotContainer, sourceContainer) +} + +func (lw *storageLogWrapper) ContainerSnapshotCreateEmpty(snapshotContainer container) error { + lw.log.Debug("ContainerSnapshotCreateEmpty", + log.Ctx{ + "snapshotContainer": snapshotContainer.Name()}) + + return lw.w.ContainerSnapshotCreateEmpty(snapshotContainer) +} + +func (lw *storageLogWrapper) ContainerSnapshotDelete( + snapshotContainer container) error { + + lw.log.Debug("ContainerSnapshotDelete", + log.Ctx{"snapshotContainer": snapshotContainer.Name()}) + return lw.w.ContainerSnapshotDelete(snapshotContainer) +} + +func (lw *storageLogWrapper) ContainerSnapshotRename( + snapshotContainer container, newName string) error { + + lw.log.Debug("ContainerSnapshotRename", + log.Ctx{ + "snapshotContainer": snapshotContainer.Name(), + "newName": newName}) + return lw.w.ContainerSnapshotRename(snapshotContainer, newName) +} + +func (lw *storageLogWrapper) ContainerSnapshotStart(container container) error { + lw.log.Debug("ContainerStart", log.Ctx{"container": container.Name()}) + return lw.w.ContainerSnapshotStart(container) +} + +func (lw *storageLogWrapper) ContainerSnapshotStop(container container) error { + lw.log.Debug("ContainerStop", log.Ctx{"container": container.Name()}) + return lw.w.ContainerSnapshotStop(container) +} + +func (lw *storageLogWrapper) ImageCreate(fingerprint string) error { + lw.log.Debug( + "ImageCreate", + log.Ctx{"fingerprint": fingerprint}) + return lw.w.ImageCreate(fingerprint) +} + +func (lw *storageLogWrapper) ImageDelete(fingerprint string) error { + lw.log.Debug("ImageDelete", log.Ctx{"fingerprint": fingerprint}) + return lw.w.ImageDelete(fingerprint) + +} + +func (lw *storageLogWrapper) MigrationType() MigrationFSType { + return lw.w.MigrationType() +} + +func (lw *storageLogWrapper) MigrationSource(container container) ([]MigrationStorageSource, error) { + lw.log.Debug("MigrationSource", log.Ctx{"container": container.Name()}) + return lw.w.MigrationSource(container) +} + +func (lw *storageLogWrapper) MigrationSink(container container, objects []container, conn *websocket.Conn) error { + objNames := []string{} + for _, obj := range objects { + objNames = append(objNames, obj.Name()) + } + + lw.log.Debug("MigrationSink", log.Ctx{ + "container": container.Name(), + "objects": objNames, + }) + + return lw.w.MigrationSink(container, objects, conn) +} + +func ShiftIfNecessary(container container, srcIdmap *shared.IdmapSet) error { + dstIdmap := container.IdmapSet() + if dstIdmap == nil { + dstIdmap = new(shared.IdmapSet) + } + + if !reflect.DeepEqual(srcIdmap, dstIdmap) { + var jsonIdmap string + if srcIdmap != nil { + idmapBytes, err := json.Marshal(srcIdmap.Idmap) + if err != nil { + return err + } + jsonIdmap = string(idmapBytes) + } else { + jsonIdmap = "[]" + } + + err := container.ConfigKeySet("volatile.last_state.idmap", jsonIdmap) + if err != nil { + return err + } + } + + return nil +} + +type rsyncStorageSource struct { + container container +} + +func (s *rsyncStorageSource) Name() string { + return s.container.Name() +} + +func (s *rsyncStorageSource) IsSnapshot() bool { + return s.container.IsSnapshot() +} + +func (s *rsyncStorageSource) Send(conn *websocket.Conn) error { + path := s.container.Path() + return RsyncSend(shared.AddSlash(path), conn) +} + +func rsyncMigrationSource(container container) ([]MigrationStorageSource, error) { + sources := []MigrationStorageSource{} + + /* transfer the container, and then all the snapshots */ + sources = append(sources, &rsyncStorageSource{container}) + snaps, err := container.Snapshots() + if err != nil { + return nil, err + } + + for _, snap := range snaps { + sources = append(sources, &rsyncStorageSource{snap}) + } + + return sources, nil +} + +func rsyncMigrationSink(container container, snapshots []container, conn *websocket.Conn) error { + /* the first object is the actual container */ + if err := RsyncRecv(shared.AddSlash(container.Path()), conn); err != nil { + return err + } + + if len(snapshots) > 0 { + err := os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", container.Name())), 0700) + if err != nil { + return err + } + } + + for _, snap := range snapshots { + if err := RsyncRecv(shared.AddSlash(snap.Path()), conn); err != nil { + return err + } + } + + return nil +} === added file 'src/github.com/lxc/lxd/lxd/storage_32bit.go' --- src/github.com/lxc/lxd/lxd/storage_32bit.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/storage_32bit.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +// +build 386 arm ppc s390 + +package main + +const ( + /* This is really 0x9123683E, go wants us to give it in signed form + * since we use it as a signed constant. */ + filesystemSuperMagicBtrfs = -1859950530 +) === added file 'src/github.com/lxc/lxd/lxd/storage_64bit.go' --- src/github.com/lxc/lxd/lxd/storage_64bit.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/storage_64bit.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,7 @@ +// +build amd64 ppc64 ppc64le arm64 s390x + +package main + +const ( + filesystemSuperMagicBtrfs = 0x9123683E +) === added file 'src/github.com/lxc/lxd/lxd/storage_btrfs.go' --- src/github.com/lxc/lxd/lxd/storage_btrfs.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/storage_btrfs.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1053 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "syscall" + + "github.com/gorilla/websocket" + "github.com/pborman/uuid" + + "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" +) + +type storageBtrfs struct { + d *Daemon + + storageShared +} + +func (s *storageBtrfs) Init(config map[string]interface{}) (storage, error) { + s.sType = storageTypeBtrfs + s.sTypeName = storageTypeToString(s.sType) + if err := s.initShared(); err != nil { + return s, err + } + + out, err := exec.LookPath("btrfs") + if err != nil || len(out) == 0 { + return s, fmt.Errorf("The 'btrfs' tool isn't available") + } + + output, err := exec.Command("btrfs", "version").CombinedOutput() + if err != nil { + return s, fmt.Errorf("The 'btrfs' tool isn't working properly") + } + + count, err := fmt.Sscanf(strings.SplitN(string(output), " ", 2)[1], "v%s\n", &s.sTypeVersion) + if err != nil || count != 1 { + return s, fmt.Errorf("The 'btrfs' tool isn't working properly") + } + + return s, nil +} + +func (s *storageBtrfs) ContainerCreate(container container) error { + cPath := container.Path() + + // MkdirAll the pardir of the BTRFS Subvolume. + if err := os.MkdirAll(filepath.Dir(cPath), 0755); err != nil { + return err + } + + // Create the BTRFS Subvolume + err := s.subvolCreate(cPath) + if err != nil { + return err + } + + if container.IsPrivileged() { + if err := os.Chmod(cPath, 0700); err != nil { + return err + } + } + + return container.TemplateApply("create") +} + +func (s *storageBtrfs) ContainerCreateFromImage( + container container, imageFingerprint string) error { + + imageSubvol := fmt.Sprintf( + "%s.btrfs", + shared.VarPath("images", imageFingerprint)) + + // Create the btrfs subvol of the image first if it doesn exists. + if !shared.PathExists(imageSubvol) { + if err := s.ImageCreate(imageFingerprint); err != nil { + return err + } + } + + // Now make a snapshot of the image subvol + err := s.subvolsSnapshot(imageSubvol, container.Path(), false) + if err != nil { + return err + } + + if !container.IsPrivileged() { + if err = s.shiftRootfs(container); err != nil { + s.ContainerDelete(container) + return err + } + } else { + if err := os.Chmod(container.Path(), 0700); err != nil { + return err + } + } + + return container.TemplateApply("create") +} + +func (s *storageBtrfs) ContainerCanRestore(container container, sourceContainer container) error { + return nil +} + +func (s *storageBtrfs) ContainerDelete(container container) error { + cPath := container.Path() + + // First remove the subvol (if it was one). + if s.isSubvolume(cPath) { + if err := s.subvolsDelete(cPath); err != nil { + return err + } + } + + // Then the directory (if it still exists). + err := os.RemoveAll(cPath) + if err != nil { + s.log.Error("ContainerDelete: failed", log.Ctx{"cPath": cPath, "err": err}) + return fmt.Errorf("Error cleaning up %s: %s", cPath, err) + } + + return nil +} + +func (s *storageBtrfs) ContainerCopy(container container, sourceContainer container) error { + subvol := sourceContainer.Path() + dpath := container.Path() + + if s.isSubvolume(subvol) { + // Snapshot the sourcecontainer + err := s.subvolsSnapshot(subvol, dpath, false) + if err != nil { + return err + } + } else { + // Create the BTRFS Container. + if err := s.ContainerCreate(container); err != nil { + return err + } + + /* + * Copy by using rsync + */ + output, err := storageRsyncCopy( + sourceContainer.Path(), + container.Path()) + if err != nil { + s.ContainerDelete(container) + + s.log.Error("ContainerCopy: rsync failed", log.Ctx{"output": string(output)}) + return fmt.Errorf("rsync failed: %s", string(output)) + } + } + + if err := s.setUnprivUserAcl(sourceContainer, dpath); err != nil { + s.ContainerDelete(container) + return err + } + + return container.TemplateApply("copy") +} + +func (s *storageBtrfs) ContainerStart(container container) error { + return nil +} + +func (s *storageBtrfs) ContainerStop(container container) error { + return nil +} + +func (s *storageBtrfs) ContainerRename(container container, newName string) error { + oldName := container.Name() + oldPath := container.Path() + newPath := containerPath(newName, false) + + if err := os.Rename(oldPath, newPath); err != nil { + return err + } + + if shared.PathExists(shared.VarPath(fmt.Sprintf("snapshots/%s", oldName))) { + err := os.Rename(shared.VarPath(fmt.Sprintf("snapshots/%s", oldName)), shared.VarPath(fmt.Sprintf("snapshots/%s", newName))) + if err != nil { + return err + } + } + + // TODO: No TemplateApply here? + return nil +} + +func (s *storageBtrfs) ContainerRestore( + container container, sourceContainer container) error { + + targetSubVol := container.Path() + sourceSubVol := sourceContainer.Path() + sourceBackupPath := container.Path() + ".back" + + // Create a backup of the container + err := os.Rename(container.Path(), sourceBackupPath) + if err != nil { + return err + } + + var failure error + if s.isSubvolume(sourceSubVol) { + // Restore using btrfs snapshots. + err := s.subvolsSnapshot(sourceSubVol, targetSubVol, false) + if err != nil { + failure = err + } + } else { + // Restore using rsync but create a btrfs subvol. + if err := s.subvolCreate(targetSubVol); err == nil { + output, err := storageRsyncCopy( + sourceSubVol, + targetSubVol) + + if err != nil { + s.log.Error( + "ContainerRestore: rsync failed", + log.Ctx{"output": string(output)}) + + failure = err + } + } else { + failure = err + } + } + + // Now allow unprivileged users to access its data. + if err := s.setUnprivUserAcl(sourceContainer, targetSubVol); err != nil { + failure = err + } + + if failure != nil { + // Restore original container + s.ContainerDelete(container) + os.Rename(sourceBackupPath, container.Path()) + } else { + // Remove the backup, we made + if s.isSubvolume(sourceBackupPath) { + return s.subvolDelete(sourceBackupPath) + } + os.RemoveAll(sourceBackupPath) + } + + return failure +} + +func (s *storageBtrfs) ContainerSetQuota(container container, size int64) error { + subvol := container.Path() + + _, err := s.subvolQGroup(subvol) + if err != nil { + return err + } + + output, err := exec.Command( + "btrfs", + "qgroup", + "limit", + "-e", fmt.Sprintf("%d", size), + subvol).CombinedOutput() + + if err != nil { + return fmt.Errorf("Failed to set btrfs quota: %s", output) + } + + return nil +} + +func (s *storageBtrfs) ContainerGetUsage(container container) (int64, error) { + return s.subvolQGroupUsage(container.Path()) +} + +func (s *storageBtrfs) ContainerSnapshotCreate( + snapshotContainer container, sourceContainer container) error { + + subvol := sourceContainer.Path() + dpath := snapshotContainer.Path() + + if s.isSubvolume(subvol) { + // Create a readonly snapshot of the source. + err := s.subvolsSnapshot(subvol, dpath, true) + if err != nil { + s.ContainerSnapshotDelete(snapshotContainer) + return err + } + } else { + /* + * Copy by using rsync + */ + output, err := storageRsyncCopy( + subvol, + dpath) + if err != nil { + s.ContainerSnapshotDelete(snapshotContainer) + + s.log.Error( + "ContainerSnapshotCreate: rsync failed", + log.Ctx{"output": string(output)}) + return fmt.Errorf("rsync failed: %s", string(output)) + } + } + + return nil +} +func (s *storageBtrfs) ContainerSnapshotDelete( + snapshotContainer container) error { + + err := s.ContainerDelete(snapshotContainer) + if err != nil { + return fmt.Errorf("Error deleting snapshot %s: %s", snapshotContainer.Name(), err) + } + + oldPathParent := filepath.Dir(snapshotContainer.Path()) + if ok, _ := shared.PathIsEmpty(oldPathParent); ok { + os.Remove(oldPathParent) + } + return nil +} + +func (s *storageBtrfs) ContainerSnapshotStart(container container) error { + if shared.PathExists(container.Path() + ".ro") { + return fmt.Errorf("The snapshot is already mounted read-write.") + } + + err := os.Rename(container.Path(), container.Path()+".ro") + if err != nil { + return err + } + + err = s.subvolsSnapshot(container.Path()+".ro", container.Path(), false) + if err != nil { + return err + } + + return nil +} + +func (s *storageBtrfs) ContainerSnapshotStop(container container) error { + if !shared.PathExists(container.Path() + ".ro") { + return fmt.Errorf("The snapshot isn't currently mounted read-write.") + } + + err := s.subvolsDelete(container.Path()) + if err != nil { + return err + } + + err = os.Rename(container.Path()+".ro", container.Path()) + if err != nil { + return err + } + + return nil +} + +// ContainerSnapshotRename renames a snapshot of a container. +func (s *storageBtrfs) ContainerSnapshotRename( + snapshotContainer container, newName string) error { + + oldPath := snapshotContainer.Path() + newPath := containerPath(newName, true) + + // Create the new parent. + if !shared.PathExists(filepath.Dir(newPath)) { + os.MkdirAll(filepath.Dir(newPath), 0700) + } + + // Now rename the snapshot. + if !s.isSubvolume(oldPath) { + if err := os.Rename(oldPath, newPath); err != nil { + return err + } + } else { + if err := s.subvolsSnapshot(oldPath, newPath, true); err != nil { + return err + } + if err := s.subvolsDelete(oldPath); err != nil { + return err + } + } + + // Remove the old parent (on container rename) if its empty. + if ok, _ := shared.PathIsEmpty(filepath.Dir(oldPath)); ok { + os.Remove(filepath.Dir(oldPath)) + } + + return nil +} + +func (s *storageBtrfs) ContainerSnapshotCreateEmpty(snapshotContainer container) error { + dpath := snapshotContainer.Path() + return s.subvolCreate(dpath) +} + +func (s *storageBtrfs) ImageCreate(fingerprint string) error { + imagePath := shared.VarPath("images", fingerprint) + subvol := fmt.Sprintf("%s.btrfs", imagePath) + + if err := s.subvolCreate(subvol); err != nil { + return err + } + + if err := untarImage(imagePath, subvol); err != nil { + return err + } + + return nil +} + +func (s *storageBtrfs) ImageDelete(fingerprint string) error { + imagePath := shared.VarPath("images", fingerprint) + subvol := fmt.Sprintf("%s.btrfs", imagePath) + + return s.subvolDelete(subvol) +} + +func (s *storageBtrfs) subvolCreate(subvol string) error { + parentDestPath := filepath.Dir(subvol) + if !shared.PathExists(parentDestPath) { + if err := os.MkdirAll(parentDestPath, 0700); err != nil { + return err + } + } + + output, err := exec.Command( + "btrfs", + "subvolume", + "create", + subvol).CombinedOutput() + if err != nil { + s.log.Debug( + "subvolume create failed", + log.Ctx{"subvol": subvol, "output": string(output)}, + ) + return fmt.Errorf( + "btrfs subvolume create failed, subvol=%s, output%s", + subvol, + string(output), + ) + } + + return nil +} + +func (s *storageBtrfs) subvolQGroup(subvol string) (string, error) { + output, err := exec.Command( + "btrfs", + "qgroup", + "show", + subvol, + "-e", + "-f").CombinedOutput() + + if err != nil { + return "", fmt.Errorf("btrfs quotas not supported. Try enabling them with 'btrfs quota enable'.") + } + + var qgroup string + for _, line := range strings.Split(string(output), "\n") { + if line == "" || strings.HasPrefix(line, "qgroupid") || strings.HasPrefix(line, "---") { + continue + } + + fields := strings.Fields(line) + if len(fields) != 4 { + continue + } + + qgroup = fields[0] + } + + if qgroup == "" { + return "", fmt.Errorf("Unable to find quota group") + } + + return qgroup, nil +} + +func (s *storageBtrfs) subvolQGroupUsage(subvol string) (int64, error) { + output, err := exec.Command( + "btrfs", + "qgroup", + "show", + subvol, + "-e", + "-f").CombinedOutput() + + if err != nil { + return -1, fmt.Errorf("btrfs quotas not supported. Try enabling them with 'btrfs quota enable'.") + } + + for _, line := range strings.Split(string(output), "\n") { + if line == "" || strings.HasPrefix(line, "qgroupid") || strings.HasPrefix(line, "---") { + continue + } + + fields := strings.Fields(line) + if len(fields) != 4 { + continue + } + + usage, err := strconv.ParseInt(fields[2], 10, 64) + if err != nil { + continue + } + + return usage, nil + } + + return -1, fmt.Errorf("Unable to find current qgroup usage") +} + +func (s *storageBtrfs) subvolDelete(subvol string) error { + // Attempt (but don't fail on) to delete any qgroup on the subvolume + qgroup, err := s.subvolQGroup(subvol) + if err == nil { + output, err := exec.Command( + "btrfs", + "qgroup", + "destroy", + qgroup, + subvol).CombinedOutput() + + if err != nil { + s.log.Warn( + "subvolume qgroup delete failed", + log.Ctx{"subvol": subvol, "output": string(output)}, + ) + } + } + + // Delete the subvolume itself + output, err := exec.Command( + "btrfs", + "subvolume", + "delete", + subvol, + ).CombinedOutput() + + if err != nil { + s.log.Warn( + "subvolume delete failed", + log.Ctx{"subvol": subvol, "output": string(output)}, + ) + } + return nil +} + +// subvolsDelete is the recursive variant on subvolDelete, +// it first deletes subvolumes of the subvolume and then the +// subvolume itself. +func (s *storageBtrfs) subvolsDelete(subvol string) error { + // Delete subsubvols. + subsubvols, err := s.getSubVolumes(subvol) + if err != nil { + return err + } + + for _, subsubvol := range subsubvols { + s.log.Debug( + "Deleting subsubvol", + log.Ctx{ + "subvol": subvol, + "subsubvol": subsubvol}) + + if err := s.subvolDelete(path.Join(subvol, subsubvol)); err != nil { + return err + } + } + + // Delete the subvol itself + if err := s.subvolDelete(subvol); err != nil { + return err + } + + return nil +} + +/* + * subvolSnapshot creates a snapshot of "source" to "dest" + * the result will be readonly if "readonly" is True. + */ +func (s *storageBtrfs) subvolSnapshot( + source string, dest string, readonly bool) error { + + parentDestPath := filepath.Dir(dest) + if !shared.PathExists(parentDestPath) { + if err := os.MkdirAll(parentDestPath, 0700); err != nil { + return err + } + } + + if shared.PathExists(dest) { + if err := os.Remove(dest); err != nil { + return err + } + } + + var output []byte + var err error + if readonly { + output, err = exec.Command( + "btrfs", + "subvolume", + "snapshot", + "-r", + source, + dest).CombinedOutput() + } else { + output, err = exec.Command( + "btrfs", + "subvolume", + "snapshot", + source, + dest).CombinedOutput() + } + if err != nil { + s.log.Error( + "subvolume snapshot failed", + log.Ctx{"source": source, "dest": dest, "output": string(output)}, + ) + return fmt.Errorf( + "subvolume snapshot failed, source=%s, dest=%s, output=%s", + source, + dest, + string(output), + ) + } + + return err +} + +func (s *storageBtrfs) subvolsSnapshot( + source string, dest string, readonly bool) error { + + // Get a list of subvolumes of the root + subsubvols, err := s.getSubVolumes(source) + if err != nil { + return err + } + + if len(subsubvols) > 0 && readonly { + // A root with subvolumes can never be readonly, + // also don't make subvolumes readonly. + readonly = false + + s.log.Warn( + "Subvolumes detected, ignoring ro flag", + log.Ctx{"source": source, "dest": dest}) + } + + // First snapshot the root + if err := s.subvolSnapshot(source, dest, readonly); err != nil { + return err + } + + // Now snapshot all subvolumes of the root. + for _, subsubvol := range subsubvols { + if err := s.subvolSnapshot( + path.Join(source, subsubvol), + path.Join(dest, subsubvol), + readonly); err != nil { + + return err + } + } + + return nil +} + +/* + * isSubvolume returns true if the given Path is a btrfs subvolume + * else false. + */ +func (s *storageBtrfs) isSubvolume(subvolPath string) bool { + if runningInUserns { + // subvolume show is restricted to real root, use a workaround + + fs := syscall.Statfs_t{} + err := syscall.Statfs(subvolPath, &fs) + if err != nil { + return false + } + + if fs.Type != filesystemSuperMagicBtrfs { + return false + } + + parentFs := syscall.Statfs_t{} + err = syscall.Statfs(path.Dir(subvolPath), &parentFs) + if err != nil { + return false + } + + if fs.Fsid == parentFs.Fsid { + return false + } + + return true + } + + output, err := exec.Command( + "btrfs", + "subvolume", + "show", + subvolPath).CombinedOutput() + if err != nil || strings.HasPrefix(string(output), "ERROR: ") { + return false + } + + return true +} + +// getSubVolumes returns a list of relative subvolume paths of "path". +func (s *storageBtrfs) getSubVolumes(path string) ([]string, error) { + result := []string{} + + if runningInUserns { + if !strings.HasSuffix(path, "/") { + path = path + "/" + } + + // Unprivileged users can't get to fs internals + filepath.Walk(path, func(fpath string, fi os.FileInfo, err error) error { + if strings.TrimRight(fpath, "/") == strings.TrimRight(path, "/") { + return nil + } + + if err != nil { + return nil + } + + if !fi.IsDir() { + return nil + } + + if s.isSubvolume(fpath) { + result = append(result, strings.TrimPrefix(fpath, path)) + } + return nil + }) + + return result, nil + } + + out, err := exec.Command( + "btrfs", + "inspect-internal", + "rootid", + path).CombinedOutput() + if err != nil { + return result, fmt.Errorf( + "Unable to get btrfs rootid, path='%s', err='%s'", + path, + err) + } + rootid := strings.TrimRight(string(out), "\n") + + out, err = exec.Command( + "btrfs", + "inspect-internal", + "subvolid-resolve", + rootid, path).CombinedOutput() + if err != nil { + return result, fmt.Errorf( + "Unable to resolve btrfs rootid, path='%s', err='%s'", + path, + err) + } + basePath := strings.TrimRight(string(out), "\n") + + out, err = exec.Command( + "btrfs", + "subvolume", + "list", + "-o", + path).CombinedOutput() + if err != nil { + return result, fmt.Errorf( + "Unable to list subvolumes, path='%s', err='%s'", + path, + err) + } + + lines := strings.Split(string(out), "\n") + for _, line := range lines { + if line == "" { + continue + } + + cols := strings.Fields(line) + result = append(result, cols[8][len(basePath):]) + } + + return result, nil +} + +type btrfsMigrationSource struct { + lxdName string + deleteAfterSending bool + btrfsPath string + btrfsParent string + + btrfs *storageBtrfs +} + +func (s btrfsMigrationSource) Name() string { + return s.lxdName +} + +func (s btrfsMigrationSource) IsSnapshot() bool { + return !s.deleteAfterSending +} + +func (s btrfsMigrationSource) Send(conn *websocket.Conn) error { + args := []string{"send", s.btrfsPath} + if s.btrfsParent != "" { + args = append(args, "-p", s.btrfsParent) + } + + cmd := exec.Command("btrfs", args...) + + deleteAfterSending := func(path string) { + s.btrfs.subvolsDelete(path) + os.Remove(filepath.Dir(path)) + } + + stdout, err := cmd.StdoutPipe() + if err != nil { + if s.deleteAfterSending { + deleteAfterSending(s.btrfsPath) + } + return err + } + + stderr, err := cmd.StderrPipe() + if err != nil { + if s.deleteAfterSending { + deleteAfterSending(s.btrfsPath) + } + return err + } + + if err := cmd.Start(); err != nil { + if s.deleteAfterSending { + deleteAfterSending(s.btrfsPath) + } + return err + } + + <-shared.WebsocketSendStream(conn, stdout) + + output, err := ioutil.ReadAll(stderr) + if err != nil { + shared.Log.Error("problem reading btrfs send stderr", "err", err) + } + + err = cmd.Wait() + if err != nil { + shared.Log.Error("problem with btrfs send", "output", string(output)) + } + if s.deleteAfterSending { + deleteAfterSending(s.btrfsPath) + } + return err +} + +func (s *storageBtrfs) MigrationType() MigrationFSType { + if runningInUserns { + return MigrationFSType_RSYNC + } else { + return MigrationFSType_BTRFS + } +} + +func (s *storageBtrfs) MigrationSource(c container) ([]MigrationStorageSource, error) { + if runningInUserns { + return rsyncMigrationSource(c) + } + + sources := []MigrationStorageSource{} + + /* If the container is a snapshot, let's just send that; we don't need + * to send anything else, because that's all the user asked for. + */ + if c.IsSnapshot() { + tmpPath := containerPath(fmt.Sprintf("%s/.migration-send-%s", c.Name(), uuid.NewRandom().String()), true) + err := os.MkdirAll(tmpPath, 0700) + if err != nil { + return nil, err + } + + btrfsPath := fmt.Sprintf("%s/.root", tmpPath) + if err := s.subvolSnapshot(c.Path(), btrfsPath, true); err != nil { + return nil, err + } + + sources = append(sources, btrfsMigrationSource{c.Name(), true, btrfsPath, "", s}) + return sources, nil + } + + /* List all the snapshots in order of reverse creation. The idea here + * is that we send the oldest to newest snapshot, hopefully saving on + * xfer costs. Then, after all that, we send the container itself. + */ + snapshots, err := c.Snapshots() + if err != nil { + return nil, err + } + + for i, snap := range snapshots { + var prev container + if i > 0 { + prev = snapshots[i-1] + } + + btrfsPath := snap.Path() + parentName := "" + if prev != nil { + parentName = prev.Path() + } + + sources = append(sources, btrfsMigrationSource{snap.Name(), false, btrfsPath, parentName, s}) + } + + /* We can't send running fses, so let's snapshot the fs and send + * the snapshot. + */ + tmpPath := containerPath(fmt.Sprintf("%s/.migration-send-%s", c.Name(), uuid.NewRandom().String()), true) + err = os.MkdirAll(tmpPath, 0700) + if err != nil { + return nil, err + } + + btrfsPath := fmt.Sprintf("%s/.root", tmpPath) + if err := s.subvolSnapshot(c.Path(), btrfsPath, true); err != nil { + return nil, err + } + + btrfsParent := "" + if len(sources) > 0 { + btrfsParent = sources[len(sources)-1].(btrfsMigrationSource).btrfsPath + } + + sources = append(sources, btrfsMigrationSource{c.Name(), true, btrfsPath, btrfsParent, s}) + + return sources, nil +} + +func (s *storageBtrfs) MigrationSink(container container, snapshots []container, conn *websocket.Conn) error { + if runningInUserns { + return rsyncMigrationSink(container, snapshots, conn) + } + + cName := container.Name() + + snapshotsPath := shared.VarPath(fmt.Sprintf("snapshots/%s", cName)) + if !shared.PathExists(snapshotsPath) { + err := os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", cName)), 0700) + if err != nil { + return err + } + } + + btrfsRecv := func(btrfsPath string, targetPath string, isSnapshot bool) error { + args := []string{"receive", "-e", btrfsPath} + cmd := exec.Command("btrfs", args...) + + // Remove the existing pre-created subvolume + err := s.subvolsDelete(targetPath) + if err != nil { + return err + } + + stdin, err := cmd.StdinPipe() + if err != nil { + return err + } + + stderr, err := cmd.StderrPipe() + if err != nil { + return err + } + + if err := cmd.Start(); err != nil { + return err + } + + <-shared.WebsocketRecvStream(stdin, conn) + + output, err := ioutil.ReadAll(stderr) + if err != nil { + shared.Debugf("problem reading btrfs receive stderr %s", err) + } + + err = cmd.Wait() + if err != nil { + shared.Log.Error("problem with btrfs receive", log.Ctx{"output": string(output)}) + return err + } + + if !isSnapshot { + cPath := containerPath(fmt.Sprintf("%s/.root", cName), true) + + err := s.subvolSnapshot(cPath, targetPath, false) + if err != nil { + shared.Log.Error("problem with btrfs snapshot", log.Ctx{"err": err}) + return err + } + + err = s.subvolsDelete(cPath) + if err != nil { + shared.Log.Error("problem with btrfs delete", log.Ctx{"err": err}) + return err + } + } + + return nil + } + + for _, snap := range snapshots { + if err := btrfsRecv(containerPath(cName, true), snap.Path(), true); err != nil { + return err + } + } + + /* finally, do the real container */ + if err := btrfsRecv(containerPath(cName, true), container.Path(), false); err != nil { + return err + } + + // Cleanup + if ok, _ := shared.PathIsEmpty(snapshotsPath); ok { + err := os.Remove(snapshotsPath) + if err != nil { + return err + } + } + + return nil +} === added file 'src/github.com/lxc/lxd/lxd/storage_dir.go' --- src/github.com/lxc/lxd/lxd/storage_dir.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/storage_dir.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,275 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/gorilla/websocket" + + "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" +) + +type storageDir struct { + d *Daemon + + storageShared +} + +func (s *storageDir) Init(config map[string]interface{}) (storage, error) { + s.sType = storageTypeDir + s.sTypeName = storageTypeToString(s.sType) + if err := s.initShared(); err != nil { + return s, err + } + + return s, nil +} + +func (s *storageDir) ContainerCreate(container container) error { + cPath := container.Path() + if err := os.MkdirAll(cPath, 0755); err != nil { + return fmt.Errorf("Error creating containers directory") + } + + if container.IsPrivileged() { + if err := os.Chmod(cPath, 0700); err != nil { + return err + } + } + + return container.TemplateApply("create") +} + +func (s *storageDir) ContainerCreateFromImage( + container container, imageFingerprint string) error { + + rootfsPath := container.RootfsPath() + if err := os.MkdirAll(rootfsPath, 0755); err != nil { + return fmt.Errorf("Error creating rootfs directory") + } + + if container.IsPrivileged() { + if err := os.Chmod(container.Path(), 0700); err != nil { + return err + } + } + + imagePath := shared.VarPath("images", imageFingerprint) + if err := untarImage(imagePath, container.Path()); err != nil { + os.RemoveAll(rootfsPath) + return err + } + + if !container.IsPrivileged() { + if err := s.shiftRootfs(container); err != nil { + s.ContainerDelete(container) + return err + } + } + + return container.TemplateApply("create") +} + +func (s *storageDir) ContainerCanRestore(container container, sourceContainer container) error { + return nil +} + +func (s *storageDir) ContainerDelete(container container) error { + cPath := container.Path() + + err := os.RemoveAll(cPath) + if err != nil { + s.log.Error("ContainerDelete: failed", log.Ctx{"cPath": cPath, "err": err}) + return fmt.Errorf("Error cleaning up %s: %s", cPath, err) + } + + return nil +} + +func (s *storageDir) ContainerCopy( + container container, sourceContainer container) error { + + oldPath := sourceContainer.RootfsPath() + newPath := container.RootfsPath() + + /* + * Copy by using rsync + */ + output, err := storageRsyncCopy(oldPath, newPath) + if err != nil { + s.ContainerDelete(container) + s.log.Error("ContainerCopy: rsync failed", log.Ctx{"output": string(output)}) + return fmt.Errorf("rsync failed: %s", string(output)) + } + + err = s.setUnprivUserAcl(sourceContainer, container.Path()) + if err != nil { + return err + } + + return container.TemplateApply("copy") +} + +func (s *storageDir) ContainerStart(container container) error { + return nil +} + +func (s *storageDir) ContainerStop(container container) error { + return nil +} + +func (s *storageDir) ContainerRename(container container, newName string) error { + oldName := container.Name() + + oldPath := container.Path() + newPath := containerPath(newName, false) + + if err := os.Rename(oldPath, newPath); err != nil { + return err + } + + if shared.PathExists(shared.VarPath(fmt.Sprintf("snapshots/%s", oldName))) { + err := os.Rename(shared.VarPath(fmt.Sprintf("snapshots/%s", oldName)), shared.VarPath(fmt.Sprintf("snapshots/%s", newName))) + if err != nil { + return err + } + } + + // TODO: No TemplateApply here? + return nil +} + +func (s *storageDir) ContainerRestore( + container container, sourceContainer container) error { + + targetPath := container.Path() + sourcePath := sourceContainer.Path() + + // Restore using rsync + output, err := storageRsyncCopy( + sourcePath, + targetPath) + + if err != nil { + s.log.Error( + "ContainerRestore: rsync failed", + log.Ctx{"output": string(output)}) + + return err + } + + // Now allow unprivileged users to access its data. + if err := s.setUnprivUserAcl(sourceContainer, targetPath); err != nil { + return err + } + + return nil +} + +func (s *storageDir) ContainerSetQuota(container container, size int64) error { + return fmt.Errorf("The directory container backend doesn't support quotas.") +} + +func (s *storageDir) ContainerGetUsage(container container) (int64, error) { + return -1, fmt.Errorf("The directory container backend doesn't support quotas.") +} + +func (s *storageDir) ContainerSnapshotCreate( + snapshotContainer container, sourceContainer container) error { + + oldPath := sourceContainer.Path() + newPath := snapshotContainer.Path() + + /* + * Copy by using rsync + */ + output, err := storageRsyncCopy(oldPath, newPath) + if err != nil { + s.ContainerDelete(snapshotContainer) + s.log.Error("ContainerSnapshotCreate: rsync failed", + log.Ctx{"output": string(output)}) + + return fmt.Errorf("rsync failed: %s", string(output)) + } + + return nil +} + +func (s *storageDir) ContainerSnapshotCreateEmpty(snapshotContainer container) error { + return os.MkdirAll(snapshotContainer.Path(), 0700) +} + +func (s *storageDir) ContainerSnapshotDelete( + snapshotContainer container) error { + err := s.ContainerDelete(snapshotContainer) + if err != nil { + return fmt.Errorf("Error deleting snapshot %s: %s", snapshotContainer.Name(), err) + } + + oldPathParent := filepath.Dir(snapshotContainer.Path()) + if ok, _ := shared.PathIsEmpty(oldPathParent); ok { + os.Remove(oldPathParent) + } + + return nil +} + +func (s *storageDir) ContainerSnapshotRename( + snapshotContainer container, newName string) error { + + oldPath := snapshotContainer.Path() + newPath := containerPath(newName, true) + + // Create the new parent. + if strings.Contains(snapshotContainer.Name(), "/") { + if !shared.PathExists(filepath.Dir(newPath)) { + os.MkdirAll(filepath.Dir(newPath), 0700) + } + } + + // Now rename the snapshot. + if err := os.Rename(oldPath, newPath); err != nil { + return err + } + + // Remove the old parent (on container rename) if its empty. + if strings.Contains(snapshotContainer.Name(), "/") { + if ok, _ := shared.PathIsEmpty(filepath.Dir(oldPath)); ok { + os.Remove(filepath.Dir(oldPath)) + } + } + + return nil +} + +func (s *storageDir) ContainerSnapshotStart(container container) error { + return nil +} + +func (s *storageDir) ContainerSnapshotStop(container container) error { + return nil +} + +func (s *storageDir) ImageCreate(fingerprint string) error { + return nil +} + +func (s *storageDir) ImageDelete(fingerprint string) error { + return nil +} + +func (s *storageDir) MigrationType() MigrationFSType { + return MigrationFSType_RSYNC +} + +func (s *storageDir) MigrationSource(container container) ([]MigrationStorageSource, error) { + return rsyncMigrationSource(container) +} + +func (s *storageDir) MigrationSink(container container, snapshots []container, conn *websocket.Conn) error { + return rsyncMigrationSink(container, snapshots, conn) +} === added file 'src/github.com/lxc/lxd/lxd/storage_lvm.go' --- src/github.com/lxc/lxd/lxd/storage_lvm.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/storage_lvm.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1065 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/gorilla/websocket" + + "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" +) + +var storageLvmDefaultThinLVSize = "10GiB" +var storageLvmDefaultThinPoolName = "LXDPool" + +func storageLVMCheckVolumeGroup(vgName string) error { + output, err := exec.Command("vgdisplay", "-s", vgName).CombinedOutput() + if err != nil { + shared.Log.Debug("vgdisplay failed to find vg", log.Ctx{"output": string(output)}) + return fmt.Errorf("LVM volume group '%s' not found", vgName) + } + + return nil +} + +func storageLVMThinpoolExists(vgName string, poolName string) (bool, error) { + output, err := exec.Command("vgs", "--noheadings", "-o", "lv_attr", fmt.Sprintf("%s/%s", vgName, poolName)).Output() + if err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + waitStatus := exitError.Sys().(syscall.WaitStatus) + if waitStatus.ExitStatus() == 5 { + // pool LV was not found + return false, nil + } + } + return false, fmt.Errorf("Error checking for pool '%s'", poolName) + } + // Found LV named poolname, check type: + attrs := strings.TrimSpace(string(output[:])) + if strings.HasPrefix(attrs, "t") { + return true, nil + } + + return false, fmt.Errorf("Pool named '%s' exists but is not a thin pool.", poolName) +} + +func storageLVMGetThinPoolUsers(d *Daemon) ([]string, error) { + results := []string{} + vgname, err := d.ConfigValueGet("storage.lvm_vg_name") + if err != nil { + return results, fmt.Errorf("Error getting lvm_vg_name config") + } + if vgname == "" { + return results, nil + } + poolname, err := d.ConfigValueGet("storage.lvm_thinpool_name") + if err != nil { + return results, fmt.Errorf("Error getting lvm_thinpool_name config") + } + if poolname == "" { + return results, nil + } + + cNames, err := dbContainersList(d.db, cTypeRegular) + if err != nil { + return results, err + } + for _, cName := range cNames { + var lvLinkPath string + if strings.Contains(cName, shared.SnapshotDelimiter) { + lvLinkPath = shared.VarPath("snapshots", fmt.Sprintf("%s.lv", cName)) + } else { + lvLinkPath = shared.VarPath("containers", fmt.Sprintf("%s.lv", cName)) + } + + if shared.PathExists(lvLinkPath) { + results = append(results, cName) + } + } + + imageNames, err := dbImagesGet(d.db, false) + if err != nil { + return results, err + } + + for _, imageName := range imageNames { + imageLinkPath := shared.VarPath("images", fmt.Sprintf("%s.lv", imageName)) + if shared.PathExists(imageLinkPath) { + results = append(results, imageName) + } + } + + return results, nil +} + +func storageLVMSetThinPoolNameConfig(d *Daemon, poolname string) error { + users, err := storageLVMGetThinPoolUsers(d) + if err != nil { + return fmt.Errorf("Error checking if a pool is already in use: %v", err) + } + if len(users) > 0 { + return fmt.Errorf("Can not change LVM config. Images or containers are still using LVs: %v", users) + } + + vgname, err := d.ConfigValueGet("storage.lvm_vg_name") + if err != nil { + return fmt.Errorf("Error getting lvm_vg_name config: %v", err) + } + + if poolname != "" { + if vgname == "" { + return fmt.Errorf("Can not set lvm_thinpool_name without lvm_vg_name set.") + } + + poolExists, err := storageLVMThinpoolExists(vgname, poolname) + if err != nil { + return fmt.Errorf("Error checking for thin pool '%s' in '%s': %v", poolname, vgname, err) + } + if !poolExists { + return fmt.Errorf("Pool '%s' does not exist in Volume Group '%s'", poolname, vgname) + } + } + + err = d.ConfigValueSet("storage.lvm_thinpool_name", poolname) + if err != nil { + return err + } + + return nil +} + +func storageLVMSetVolumeGroupNameConfig(d *Daemon, vgname string) error { + users, err := storageLVMGetThinPoolUsers(d) + if err != nil { + return fmt.Errorf("Error checking if a pool is already in use: %v", err) + } + if len(users) > 0 { + return fmt.Errorf("Can not change LVM config. Images or containers are still using LVs: %v", users) + } + + if vgname != "" { + err = storageLVMCheckVolumeGroup(vgname) + if err != nil { + return err + } + } + + err = d.ConfigValueSet("storage.lvm_vg_name", vgname) + if err != nil { + return err + } + + return nil +} + +func storageLVMSetFsTypeConfig(d *Daemon, fstype string) error { + err := d.ConfigValueSet("storage.lvm_fstype", fstype) + if err != nil { + return err + } + + return nil +} + +func xfsGenerateNewUUID(lvpath string) error { + output, err := exec.Command( + "xfs_admin", + "-U", "generate", + lvpath).CombinedOutput() + if err != nil { + return fmt.Errorf("Error generating new UUID: %v\noutput:'%s'", err, string(output)) + } + + return nil +} + +func containerNameToLVName(containerName string) string { + lvName := strings.Replace(containerName, "-", "--", -1) + return strings.Replace(lvName, shared.SnapshotDelimiter, "-", -1) +} + +type storageLvm struct { + d *Daemon + vgName string + + storageShared +} + +func (s *storageLvm) Init(config map[string]interface{}) (storage, error) { + s.sType = storageTypeLvm + s.sTypeName = storageTypeToString(s.sType) + if err := s.initShared(); err != nil { + return s, err + } + + output, err := exec.Command("lvm", "version").CombinedOutput() + if err != nil { + return nil, fmt.Errorf("Error getting LVM version: %v\noutput:'%s'", err, string(output)) + } + lines := strings.Split(string(output), "\n") + + s.sTypeVersion = "" + for idx, line := range lines { + fields := strings.SplitAfterN(line, ":", 2) + if len(fields) < 2 { + continue + } + if idx > 0 { + s.sTypeVersion += " / " + } + s.sTypeVersion += strings.TrimSpace(fields[1]) + } + + if config["vgName"] == nil { + vgName, err := s.d.ConfigValueGet("storage.lvm_vg_name") + if err != nil { + return s, fmt.Errorf("Error checking server config: %v", err) + } + if vgName == "" { + return s, fmt.Errorf("LVM isn't enabled") + } + + if err := storageLVMCheckVolumeGroup(vgName); err != nil { + return s, err + } + s.vgName = vgName + } else { + s.vgName = config["vgName"].(string) + } + + return s, nil +} + +func versionSplit(versionString string) (int, int, int, error) { + fs := strings.Split(versionString, ".") + majs, mins, incs := fs[0], fs[1], fs[2] + + maj, err := strconv.Atoi(majs) + if err != nil { + return 0, 0, 0, err + } + min, err := strconv.Atoi(mins) + if err != nil { + return 0, 0, 0, err + } + incs = strings.Split(incs, "(")[0] + inc, err := strconv.Atoi(incs) + if err != nil { + return 0, 0, 0, err + } + + return maj, min, inc, nil +} + +func (s *storageLvm) lvmVersionIsAtLeast(versionString string) (bool, error) { + lvmVersion := strings.Split(s.sTypeVersion, "/")[0] + + lvmMaj, lvmMin, lvmInc, err := versionSplit(lvmVersion) + if err != nil { + return false, err + } + + inMaj, inMin, inInc, err := versionSplit(versionString) + if err != nil { + return false, err + } + + if lvmMaj < inMaj || lvmMin < inMin || lvmInc < inInc { + return false, nil + } else { + return true, nil + } + +} + +func (s *storageLvm) ContainerCreate(container container) error { + containerName := containerNameToLVName(container.Name()) + lvpath, err := s.createThinLV(containerName) + if err != nil { + return err + } + + if err := os.MkdirAll(container.Path(), 0755); err != nil { + return err + } + + var mode os.FileMode + if container.IsPrivileged() { + mode = 0700 + } else { + mode = 0755 + } + + err = os.Chmod(container.Path(), mode) + if err != nil { + return err + } + + dst := fmt.Sprintf("%s.lv", container.Path()) + err = os.Symlink(lvpath, dst) + if err != nil { + return err + } + + return nil +} + +func (s *storageLvm) ContainerCreateFromImage( + container container, imageFingerprint string) error { + + imageLVFilename := shared.VarPath( + "images", fmt.Sprintf("%s.lv", imageFingerprint)) + + if !shared.PathExists(imageLVFilename) { + if err := s.ImageCreate(imageFingerprint); err != nil { + return err + } + } + + containerName := containerNameToLVName(container.Name()) + + lvpath, err := s.createSnapshotLV(containerName, imageFingerprint, false) + if err != nil { + return err + } + + destPath := container.Path() + if err := os.MkdirAll(destPath, 0755); err != nil { + return fmt.Errorf("Error creating container directory: %v", err) + } + + var mode os.FileMode + if container.IsPrivileged() { + mode = 0700 + } else { + mode = 0755 + } + + err = os.Chmod(destPath, mode) + if err != nil { + return err + } + + dst := shared.VarPath("containers", fmt.Sprintf("%s.lv", container.Name())) + err = os.Symlink(lvpath, dst) + if err != nil { + return err + } + + var fstype string + fstype, err = s.d.ConfigValueGet("storage.lvm_fstype") + if err != nil { + return fmt.Errorf("Error checking server config, err=%v", err) + } + + if fstype == "" { + fstype = "ext4" + } + + // Generate a new xfs's UUID + if fstype == "xfs" { + err := xfsGenerateNewUUID(lvpath) + if err != nil { + s.ContainerDelete(container) + return err + } + } + + err = s.tryMount(lvpath, destPath, fstype, 0, "discard") + if err != nil { + s.ContainerDelete(container) + return fmt.Errorf("Error mounting snapshot LV: %v", err) + } + + if !container.IsPrivileged() { + if err = s.shiftRootfs(container); err != nil { + err2 := s.tryUnmount(destPath, 0) + if err2 != nil { + return fmt.Errorf("Error in umount: '%s' while cleaning up after error in shiftRootfs: '%s'", err2, err) + } + s.ContainerDelete(container) + return fmt.Errorf("Error in shiftRootfs: %v", err) + } + } + + err = container.TemplateApply("create") + if err != nil { + s.log.Error("Error in create template during ContainerCreateFromImage, continuing to unmount", + log.Ctx{"err": err}) + } + + umounterr := s.tryUnmount(destPath, 0) + if umounterr != nil { + return fmt.Errorf("Error unmounting '%s' after shiftRootfs: %v", destPath, umounterr) + } + + return err +} + +func (s *storageLvm) ContainerCanRestore(container container, sourceContainer container) error { + return nil +} + +func (s *storageLvm) ContainerDelete(container container) error { + lvName := containerNameToLVName(container.Name()) + if err := s.removeLV(lvName); err != nil { + return err + } + + lvLinkPath := fmt.Sprintf("%s.lv", container.Path()) + if err := os.Remove(lvLinkPath); err != nil { + return err + } + + cPath := container.Path() + if err := os.RemoveAll(cPath); err != nil { + s.log.Error("ContainerDelete: failed to remove path", log.Ctx{"cPath": cPath, "err": err}) + return fmt.Errorf("Cleaning up %s: %s", cPath, err) + } + + return nil +} + +func (s *storageLvm) ContainerCopy(container container, sourceContainer container) error { + if s.isLVMContainer(sourceContainer) { + if err := s.createSnapshotContainer(container, sourceContainer, false); err != nil { + s.log.Error("Error creating snapshot LV for copy", log.Ctx{"err": err}) + return err + } + } else { + s.log.Info("Copy from Non-LVM container", log.Ctx{"container": container.Name(), + "sourceContainer": sourceContainer.Name()}) + if err := s.ContainerCreate(container); err != nil { + s.log.Error("Error creating empty container", log.Ctx{"err": err}) + return err + } + + if err := s.ContainerStart(container); err != nil { + s.log.Error("Error starting/mounting container", log.Ctx{"err": err, "container": container.Name()}) + s.ContainerDelete(container) + return err + } + + output, err := storageRsyncCopy( + sourceContainer.Path(), + container.Path()) + if err != nil { + s.log.Error("ContainerCopy: rsync failed", log.Ctx{"output": string(output)}) + s.ContainerDelete(container) + return fmt.Errorf("rsync failed: %s", string(output)) + } + + if err := s.ContainerStop(container); err != nil { + return err + } + } + return container.TemplateApply("copy") +} + +func (s *storageLvm) ContainerStart(container container) error { + lvName := containerNameToLVName(container.Name()) + lvpath := fmt.Sprintf("/dev/%s/%s", s.vgName, lvName) + fstype, err := s.d.ConfigValueGet("storage.lvm_fstype") + if err != nil { + return fmt.Errorf("Error checking server config, err=%v", err) + } + + if fstype == "" { + fstype = "ext4" + } + + err = s.tryMount(lvpath, container.Path(), fstype, 0, "discard") + if err != nil { + return fmt.Errorf( + "Error mounting snapshot LV path='%s': %v", + container.Path(), + err) + } + + return nil +} + +func (s *storageLvm) ContainerStop(container container) error { + err := s.tryUnmount(container.Path(), 0) + if err != nil { + return fmt.Errorf( + "failed to unmount container path '%s'.\nError: %v", + container.Path(), + err) + } + + return nil +} + +func (s *storageLvm) tryExec(name string, arg ...string) ([]byte, error) { + var err error + var output []byte + + for i := 0; i < 20; i++ { + output, err = exec.Command(name, arg...).CombinedOutput() + if err == nil { + break + } + + time.Sleep(500 * time.Millisecond) + } + + return output, err +} + +func (s *storageLvm) tryMount(src string, dst string, fs string, flags uintptr, options string) error { + var err error + + for i := 0; i < 20; i++ { + err = syscall.Mount(src, dst, fs, flags, options) + if err == nil { + break + } + + time.Sleep(500 * time.Millisecond) + } + + if err != nil { + return err + } + + return nil +} + +func (s *storageLvm) tryUnmount(path string, flags int) error { + var err error + + for i := 0; i < 20; i++ { + err = syscall.Unmount(path, flags) + if err == nil { + break + } + + time.Sleep(500 * time.Millisecond) + } + + if err != nil { + return err + } + + return nil +} + +func (s *storageLvm) ContainerRename( + container container, newContainerName string) error { + + oldName := containerNameToLVName(container.Name()) + newName := containerNameToLVName(newContainerName) + output, err := s.renameLV(oldName, newName) + if err != nil { + s.log.Error("Failed to rename a container LV", + log.Ctx{"oldName": oldName, + "newName": newName, + "err": err, + "output": string(output)}) + + return fmt.Errorf("Failed to rename a container LV, oldName='%s', newName='%s', err='%s'", oldName, newName, err) + } + + // Rename the snapshots + if !container.IsSnapshot() { + snaps, err := container.Snapshots() + if err != nil { + return err + } + + for _, snap := range snaps { + baseSnapName := filepath.Base(snap.Name()) + newSnapshotName := newName + shared.SnapshotDelimiter + baseSnapName + err := s.ContainerRename(snap, newSnapshotName) + if err != nil { + return err + } + + oldPathParent := filepath.Dir(snap.Path()) + if ok, _ := shared.PathIsEmpty(oldPathParent); ok { + os.Remove(oldPathParent) + } + } + } + + // Create a new symlink + newSymPath := fmt.Sprintf("%s.lv", containerPath(newContainerName, container.IsSnapshot())) + + err = os.MkdirAll(filepath.Dir(containerPath(newContainerName, container.IsSnapshot())), 0700) + if err != nil { + return err + } + + err = os.Symlink(fmt.Sprintf("/dev/%s/%s", s.vgName, newName), newSymPath) + if err != nil { + return err + } + + // Remove the old symlink + oldSymPath := fmt.Sprintf("%s.lv", container.Path()) + err = os.Remove(oldSymPath) + if err != nil { + return err + } + + // Rename the directory + err = os.Rename(container.Path(), containerPath(newContainerName, container.IsSnapshot())) + if err != nil { + return err + } + + return nil + +} + +func (s *storageLvm) ContainerRestore( + container container, sourceContainer container) error { + srcName := containerNameToLVName(sourceContainer.Name()) + destName := containerNameToLVName(container.Name()) + + err := s.removeLV(destName) + if err != nil { + return fmt.Errorf("Error removing LV about to be restored over: %v", err) + } + + _, err = s.createSnapshotLV(destName, srcName, false) + if err != nil { + return fmt.Errorf("Error creating snapshot LV: %v", err) + } + + return nil +} + +func (s *storageLvm) ContainerSetQuota(container container, size int64) error { + return fmt.Errorf("The LVM container backend doesn't support quotas.") +} + +func (s *storageLvm) ContainerGetUsage(container container) (int64, error) { + return -1, fmt.Errorf("The LVM container backend doesn't support quotas.") +} + +func (s *storageLvm) ContainerSnapshotCreate( + snapshotContainer container, sourceContainer container) error { + return s.createSnapshotContainer(snapshotContainer, sourceContainer, true) +} + +func (s *storageLvm) createSnapshotContainer( + snapshotContainer container, sourceContainer container, readonly bool) error { + + srcName := containerNameToLVName(sourceContainer.Name()) + destName := containerNameToLVName(snapshotContainer.Name()) + shared.Log.Debug( + "Creating snapshot", + log.Ctx{"srcName": srcName, "destName": destName}) + + lvpath, err := s.createSnapshotLV(destName, srcName, readonly) + if err != nil { + return fmt.Errorf("Error creating snapshot LV: %v", err) + } + + destPath := snapshotContainer.Path() + if err := os.MkdirAll(destPath, 0755); err != nil { + return fmt.Errorf("Error creating container directory: %v", err) + } + + var mode os.FileMode + if snapshotContainer.IsPrivileged() { + mode = 0700 + } else { + mode = 0755 + } + + err = os.Chmod(destPath, mode) + if err != nil { + return err + } + + dest := fmt.Sprintf("%s.lv", snapshotContainer.Path()) + err = os.Symlink(lvpath, dest) + if err != nil { + return err + } + + return nil +} + +func (s *storageLvm) ContainerSnapshotDelete( + snapshotContainer container) error { + + err := s.ContainerDelete(snapshotContainer) + if err != nil { + return fmt.Errorf("Error deleting snapshot %s: %s", snapshotContainer.Name(), err) + } + + oldPathParent := filepath.Dir(snapshotContainer.Path()) + if ok, _ := shared.PathIsEmpty(oldPathParent); ok { + os.Remove(oldPathParent) + } + return nil +} + +func (s *storageLvm) ContainerSnapshotRename( + snapshotContainer container, newContainerName string) error { + oldName := containerNameToLVName(snapshotContainer.Name()) + newName := containerNameToLVName(newContainerName) + oldPath := snapshotContainer.Path() + oldSymPath := fmt.Sprintf("%s.lv", oldPath) + newPath := containerPath(newContainerName, true) + newSymPath := fmt.Sprintf("%s.lv", newPath) + + // Rename the LV + output, err := s.renameLV(oldName, newName) + if err != nil { + s.log.Error("Failed to rename a snapshot LV", + log.Ctx{"oldName": oldName, "newName": newName, "err": err, "output": string(output)}) + return fmt.Errorf("Failed to rename a container LV, oldName='%s', newName='%s', err='%s'", oldName, newName, err) + } + + // Delete the symlink + err = os.Remove(oldSymPath) + if err != nil { + return fmt.Errorf("Failed to remove old symlink: %s", err) + } + + // Create the symlink + err = os.Symlink(fmt.Sprintf("/dev/%s/%s", s.vgName, newName), newSymPath) + if err != nil { + return fmt.Errorf("Failed to create symlink: %s", err) + } + + // Rename the mount point + err = os.Rename(oldPath, newPath) + if err != nil { + return fmt.Errorf("Failed to rename mountpoint: %s", err) + } + + return nil +} + +func (s *storageLvm) ContainerSnapshotStart(container container) error { + srcName := containerNameToLVName(container.Name()) + destName := containerNameToLVName(container.Name() + "/rw") + + shared.Log.Debug( + "Creating snapshot", + log.Ctx{"srcName": srcName, "destName": destName}) + + lvpath, err := s.createSnapshotLV(destName, srcName, false) + if err != nil { + return fmt.Errorf("Error creating snapshot LV: %v", err) + } + + destPath := container.Path() + if !shared.PathExists(destPath) { + if err := os.MkdirAll(destPath, 0755); err != nil { + return fmt.Errorf("Error creating container directory: %v", err) + } + } + + var fstype string + fstype, err = s.d.ConfigValueGet("storage.lvm_fstype") + if err != nil { + return fmt.Errorf("Error checking server config, err=%v", err) + } + + if fstype == "" { + fstype = "ext4" + } + + // Generate a new xfs's UUID + if fstype == "xfs" { + err := xfsGenerateNewUUID(lvpath) + if err != nil { + s.ContainerDelete(container) + return err + } + } + + err = s.tryMount(lvpath, container.Path(), fstype, 0, "discard") + if err != nil { + return fmt.Errorf( + "Error mounting snapshot LV path='%s': %v", + container.Path(), + err) + } + + return nil +} + +func (s *storageLvm) ContainerSnapshotStop(container container) error { + err := s.ContainerStop(container) + if err != nil { + return err + } + + lvName := containerNameToLVName(container.Name() + "/rw") + if err := s.removeLV(lvName); err != nil { + return err + } + + return nil +} + +func (s *storageLvm) ContainerSnapshotCreateEmpty(snapshotContainer container) error { + return s.ContainerCreate(snapshotContainer) +} + +func (s *storageLvm) ImageCreate(fingerprint string) error { + finalName := shared.VarPath("images", fingerprint) + + lvpath, err := s.createThinLV(fingerprint) + if err != nil { + s.log.Error("LVMCreateThinLV", log.Ctx{"err": err}) + return fmt.Errorf("Error Creating LVM LV for new image: %v", err) + } + + dst := shared.VarPath("images", fmt.Sprintf("%s.lv", fingerprint)) + err = os.Symlink(lvpath, dst) + if err != nil { + return err + } + + tempLVMountPoint, err := ioutil.TempDir(shared.VarPath("images"), "tmp_lv_mnt") + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(tempLVMountPoint); err != nil { + s.log.Error("Deleting temporary LVM mount point", log.Ctx{"err": err}) + } + }() + + var fstype string + fstype, err = s.d.ConfigValueGet("storage.lvm_fstype") + if err != nil { + return fmt.Errorf("Error checking server config, err=%v", err) + } + + if fstype == "" { + fstype = "ext4" + } + + err = s.tryMount(lvpath, tempLVMountPoint, fstype, 0, "discard") + if err != nil { + shared.Logf("Error mounting image LV for untarring: %v", err) + return fmt.Errorf("Error mounting image LV: %v", err) + + } + + untarErr := untarImage(finalName, tempLVMountPoint) + + err = s.tryUnmount(tempLVMountPoint, 0) + if err != nil { + s.log.Warn("could not unmount LV. Will not remove", + log.Ctx{"lvpath": lvpath, "mountpoint": tempLVMountPoint, "err": err}) + if untarErr == nil { + return err + } + + return fmt.Errorf( + "Error unmounting '%s' during cleanup of error %v", + tempLVMountPoint, untarErr) + } + + return untarErr +} + +func (s *storageLvm) ImageDelete(fingerprint string) error { + err := s.removeLV(fingerprint) + if err != nil { + return err + } + + lvsymlink := fmt.Sprintf( + "%s.lv", shared.VarPath("images", fingerprint)) + err = os.Remove(lvsymlink) + if err != nil { + return fmt.Errorf( + "Failed to remove symlink to deleted image LV: '%s': %v", lvsymlink, err) + } + + return nil +} + +func (s *storageLvm) createDefaultThinPool() (string, error) { + // Create a tiny 1G thinpool + output, err := s.tryExec( + "lvcreate", + "--poolmetadatasize", "1G", + "-L", "1G", + "--thinpool", + fmt.Sprintf("%s/%s", s.vgName, storageLvmDefaultThinPoolName)) + + if err != nil { + s.log.Debug( + "Could not create thin pool", + log.Ctx{ + "name": storageLvmDefaultThinPoolName, + "err": err, + "output": string(output)}) + + return "", fmt.Errorf( + "Could not create LVM thin pool named %s", storageLvmDefaultThinPoolName) + } + + // Grow it to the maximum VG size (two step process required by old LVM) + output, err = s.tryExec( + "lvextend", + "--alloc", "anywhere", + "-l", "100%FREE", + fmt.Sprintf("%s/%s", s.vgName, storageLvmDefaultThinPoolName)) + + if err != nil { + s.log.Debug( + "Could not grow thin pool", + log.Ctx{ + "name": storageLvmDefaultThinPoolName, + "err": err, + "output": string(output)}) + + return "", fmt.Errorf( + "Could not grow LVM thin pool named %s", storageLvmDefaultThinPoolName) + } + + return storageLvmDefaultThinPoolName, nil +} + +func (s *storageLvm) createThinLV(lvname string) (string, error) { + poolname, err := s.d.ConfigValueGet("storage.lvm_thinpool_name") + if err != nil { + return "", fmt.Errorf("Error checking server config, err=%v", err) + } + + if poolname == "" { + poolname, err = s.createDefaultThinPool() + if err != nil { + return "", fmt.Errorf("Error creating LVM thin pool: %v", err) + } + err = storageLVMSetThinPoolNameConfig(s.d, poolname) + if err != nil { + s.log.Error("Setting thin pool name", log.Ctx{"err": err}) + return "", fmt.Errorf("Error setting LVM thin pool config: %v", err) + } + } + + output, err := s.tryExec( + "lvcreate", + "--thin", + "-n", lvname, + "--virtualsize", storageLvmDefaultThinLVSize, + fmt.Sprintf("%s/%s", s.vgName, poolname)) + + if err != nil { + s.log.Debug("Could not create LV", log.Ctx{"lvname": lvname, "output": string(output)}) + return "", fmt.Errorf("Could not create thin LV named %s", lvname) + } + + lvpath := fmt.Sprintf("/dev/%s/%s", s.vgName, lvname) + + fstype, err := s.d.ConfigValueGet("storage.lvm_fstype") + + switch fstype { + case "xfs": + output, err = s.tryExec( + "mkfs.xfs", + lvpath) + default: + // default = ext4 + output, err = s.tryExec( + "mkfs.ext4", + "-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0", + lvpath) + } + + if err != nil { + s.log.Error("mkfs.ext4", log.Ctx{"output": string(output)}) + return "", fmt.Errorf("Error making filesystem on image LV: %v", err) + } + + return lvpath, nil +} + +func (s *storageLvm) removeLV(lvname string) error { + var err error + var output []byte + + output, err = s.tryExec( + "lvremove", "-f", fmt.Sprintf("%s/%s", s.vgName, lvname)) + + if err != nil { + s.log.Debug("Could not remove LV", log.Ctx{"lvname": lvname, "output": string(output)}) + return fmt.Errorf("Could not remove LV named %s", lvname) + } + + return nil +} + +func (s *storageLvm) createSnapshotLV(lvname string, origlvname string, readonly bool) (string, error) { + s.log.Debug("in createSnapshotLV:", log.Ctx{"lvname": lvname, "dev string": fmt.Sprintf("/dev/%s/%s", s.vgName, origlvname)}) + isRecent, err := s.lvmVersionIsAtLeast("2.02.99") + if err != nil { + return "", fmt.Errorf("Error checking LVM version: %v", err) + } + var output []byte + if isRecent { + output, err = s.tryExec( + "lvcreate", + "-kn", + "-n", lvname, + "-s", fmt.Sprintf("/dev/%s/%s", s.vgName, origlvname)) + } else { + output, err = s.tryExec( + "lvcreate", + "-n", lvname, + "-s", fmt.Sprintf("/dev/%s/%s", s.vgName, origlvname)) + } + if err != nil { + s.log.Debug("Could not create LV snapshot", log.Ctx{"lvname": lvname, "origlvname": origlvname, "output": string(output)}) + return "", fmt.Errorf("Could not create snapshot LV named %s", lvname) + } + + snapshotFullName := fmt.Sprintf("/dev/%s/%s", s.vgName, lvname) + + if readonly { + output, err = s.tryExec("lvchange", "-ay", "-pr", snapshotFullName) + } else { + output, err = s.tryExec("lvchange", "-ay", snapshotFullName) + } + + if err != nil { + return "", fmt.Errorf("Could not activate new snapshot '%s': %v\noutput:%s", lvname, err, string(output)) + } + + return snapshotFullName, nil +} + +func (s *storageLvm) isLVMContainer(container container) bool { + return shared.PathExists(fmt.Sprintf("%s.lv", container.Path())) +} + +func (s *storageLvm) renameLV(oldName string, newName string) (string, error) { + output, err := s.tryExec("lvrename", s.vgName, oldName, newName) + return string(output), err +} + +func (s *storageLvm) MigrationType() MigrationFSType { + return MigrationFSType_RSYNC +} + +func (s *storageLvm) MigrationSource(container container) ([]MigrationStorageSource, error) { + return rsyncMigrationSource(container) +} + +func (s *storageLvm) MigrationSink(container container, snapshots []container, conn *websocket.Conn) error { + return rsyncMigrationSink(container, snapshots, conn) +} === added file 'src/github.com/lxc/lxd/lxd/storage_test.go' --- src/github.com/lxc/lxd/lxd/storage_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/storage_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,139 @@ +package main + +import ( + "fmt" + + "github.com/gorilla/websocket" + + log "gopkg.in/inconshreveable/log15.v2" +) + +type storageMock struct { + d *Daemon + sType storageType + log log.Logger + + storageShared +} + +func (s *storageMock) Init(config map[string]interface{}) (storage, error) { + s.sType = storageTypeMock + s.sTypeName = storageTypeToString(storageTypeMock) + + if err := s.initShared(); err != nil { + return s, err + } + + return s, nil +} + +func (s *storageMock) GetStorageType() storageType { + return s.sType +} + +func (s *storageMock) GetStorageTypeName() string { + return s.sTypeName +} + +func (s *storageMock) ContainerCreate(container container) error { + return nil +} + +func (s *storageMock) ContainerCreateFromImage( + container container, imageFingerprint string) error { + + return nil +} + +func (s *storageMock) ContainerCanRestore(container container, sourceContainer container) error { + return nil +} + +func (s *storageMock) ContainerDelete(container container) error { + return nil +} + +func (s *storageMock) ContainerCopy( + container container, sourceContainer container) error { + + return nil +} + +func (s *storageMock) ContainerStart(container container) error { + return nil +} + +func (s *storageMock) ContainerStop(container container) error { + return nil +} + +func (s *storageMock) ContainerRename( + container container, newName string) error { + + return nil +} + +func (s *storageMock) ContainerRestore( + container container, sourceContainer container) error { + + return nil +} + +func (s *storageMock) ContainerSetQuota( + container container, size int64) error { + + return nil +} + +func (s *storageMock) ContainerGetUsage( + container container) (int64, error) { + + return 0, nil +} +func (s *storageMock) ContainerSnapshotCreate( + snapshotContainer container, sourceContainer container) error { + + return nil +} +func (s *storageMock) ContainerSnapshotDelete( + snapshotContainer container) error { + + return nil +} + +func (s *storageMock) ContainerSnapshotRename( + snapshotContainer container, newName string) error { + + return nil +} + +func (s *storageMock) ContainerSnapshotStart(container container) error { + return nil +} + +func (s *storageMock) ContainerSnapshotStop(container container) error { + return nil +} + +func (s *storageMock) ContainerSnapshotCreateEmpty(snapshotContainer container) error { + return nil +} + +func (s *storageMock) ImageCreate(fingerprint string) error { + return nil +} + +func (s *storageMock) ImageDelete(fingerprint string) error { + return nil +} + +func (s *storageMock) MigrationType() MigrationFSType { + return MigrationFSType_RSYNC +} + +func (s *storageMock) MigrationSource(container container) ([]MigrationStorageSource, error) { + return nil, fmt.Errorf("not implemented") +} +func (s *storageMock) MigrationSink(container container, snapshots []container, conn *websocket.Conn) error { + return nil +} === added file 'src/github.com/lxc/lxd/lxd/storage_zfs.go' --- src/github.com/lxc/lxd/lxd/storage_zfs.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/storage_zfs.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1399 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "strconv" + "strings" + "syscall" + "time" + + "github.com/gorilla/websocket" + + "github.com/lxc/lxd/shared" + + "github.com/pborman/uuid" + log "gopkg.in/inconshreveable/log15.v2" +) + +type storageZfs struct { + d *Daemon + zfsPool string + + storageShared +} + +func (s *storageZfs) Init(config map[string]interface{}) (storage, error) { + s.sType = storageTypeZfs + s.sTypeName = storageTypeToString(s.sType) + + err := s.initShared() + if err != nil { + return s, err + } + + if config["zfsPool"] == nil { + zfsPool, err := s.d.ConfigValueGet("storage.zfs_pool_name") + if err != nil { + return s, fmt.Errorf("Error checking server config: %v", err) + } + + if zfsPool == "" { + return s, fmt.Errorf("ZFS isn't enabled") + } + + s.zfsPool = zfsPool + } else { + s.zfsPool = config["zfsPool"].(string) + } + + out, err := exec.LookPath("zfs") + if err != nil || len(out) == 0 { + return s, fmt.Errorf("The 'zfs' tool isn't available") + } + + err = s.zfsCheckPool(s.zfsPool) + if err != nil { + if shared.PathExists(shared.VarPath("zfs.img")) { + _ = exec.Command("modprobe", "zfs").Run() + + output, err := exec.Command("zpool", "import", + "-d", shared.VarPath(), s.zfsPool).CombinedOutput() + if err != nil { + return s, fmt.Errorf("Unable to import the ZFS pool: %s", output) + } + } else { + return s, err + } + } + + output, err := exec.Command("zfs", "get", "version", "-H", "-o", "value", s.zfsPool).CombinedOutput() + if err != nil { + return s, fmt.Errorf("The 'zfs' tool isn't working properly") + } + + count, err := fmt.Sscanf(string(output), "%s\n", &s.sTypeVersion) + if err != nil || count != 1 { + return s, fmt.Errorf("The 'zfs' tool isn't working properly") + } + + return s, nil +} + +// Things we don't need to care about +func (s *storageZfs) ContainerStart(container container) error { + return nil +} + +func (s *storageZfs) ContainerStop(container container) error { + return nil +} + +// Things we do have to care about +func (s *storageZfs) ContainerCreate(container container) error { + cPath := container.Path() + fs := fmt.Sprintf("containers/%s", container.Name()) + + err := s.zfsCreate(fs) + if err != nil { + return err + } + + err = os.Symlink(cPath+".zfs", cPath) + if err != nil { + return err + } + + var mode os.FileMode + if container.IsPrivileged() { + mode = 0700 + } else { + mode = 0755 + } + + err = os.Chmod(cPath, mode) + if err != nil { + return err + } + + return container.TemplateApply("create") +} + +func (s *storageZfs) ContainerCreateFromImage(container container, fingerprint string) error { + cPath := container.Path() + imagePath := shared.VarPath("images", fingerprint) + subvol := fmt.Sprintf("%s.zfs", imagePath) + fs := fmt.Sprintf("containers/%s", container.Name()) + fsImage := fmt.Sprintf("images/%s", fingerprint) + + if !shared.PathExists(subvol) { + err := s.ImageCreate(fingerprint) + if err != nil { + return err + } + } + + err := s.zfsClone(fsImage, "readonly", fs, true) + if err != nil { + return err + } + + err = os.Symlink(cPath+".zfs", cPath) + if err != nil { + return err + } + + var mode os.FileMode + if container.IsPrivileged() { + mode = 0700 + } else { + mode = 0755 + } + + err = os.Chmod(cPath, mode) + if err != nil { + return err + } + + if !container.IsPrivileged() { + err = s.shiftRootfs(container) + if err != nil { + return err + } + } + + return container.TemplateApply("create") +} + +func (s *storageZfs) ContainerCanRestore(container container, sourceContainer container) error { + fields := strings.SplitN(sourceContainer.Name(), shared.SnapshotDelimiter, 2) + cName := fields[0] + snapName := fmt.Sprintf("snapshot-%s", fields[1]) + + snapshots, err := s.zfsListSnapshots(fmt.Sprintf("containers/%s", cName)) + if err != nil { + return err + } + + if snapshots[len(snapshots)-1] != snapName { + return fmt.Errorf("ZFS only supports restoring state to the latest snapshot.") + } + + return nil +} + +func (s *storageZfs) ContainerDelete(container container) error { + fs := fmt.Sprintf("containers/%s", container.Name()) + + removable := true + snaps, err := s.zfsListSnapshots(fs) + if err != nil { + return err + } + + for _, snap := range snaps { + var err error + removable, err = s.zfsSnapshotRemovable(fs, snap) + if err != nil { + return err + } + + if !removable { + break + } + } + + if removable { + origin, err := s.zfsGet(fs, "origin") + if err != nil { + return err + } + origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", s.zfsPool)) + + err = s.zfsDestroy(fs) + if err != nil { + return err + } + + err = s.zfsCleanup(origin) + if err != nil { + return err + } + } else { + err := s.zfsSet(fs, "mountpoint", "none") + if err != nil { + return err + } + + err = s.zfsRename(fs, fmt.Sprintf("deleted/containers/%s", uuid.NewRandom().String())) + if err != nil { + return err + } + } + + if shared.PathExists(shared.VarPath(fs)) { + os.Remove(shared.VarPath(fs)) + if err != nil { + return err + } + } + + if shared.PathExists(shared.VarPath(fs) + ".zfs") { + os.Remove(shared.VarPath(fs) + ".zfs") + if err != nil { + return err + } + } + + s.zfsDestroy(fmt.Sprintf("snapshots/%s", container.Name())) + + return nil +} + +func (s *storageZfs) ContainerCopy(container container, sourceContainer container) error { + var sourceFs string + var sourceSnap string + + sourceFields := strings.SplitN(sourceContainer.Name(), shared.SnapshotDelimiter, 2) + sourceName := sourceFields[0] + + destName := container.Name() + destFs := fmt.Sprintf("containers/%s", destName) + + if len(sourceFields) == 2 { + sourceSnap = sourceFields[1] + } + + if sourceSnap == "" { + if s.zfsExists(fmt.Sprintf("containers/%s", sourceName)) { + sourceSnap = fmt.Sprintf("copy-%s", uuid.NewRandom().String()) + sourceFs = fmt.Sprintf("containers/%s", sourceName) + err := s.zfsSnapshotCreate(fmt.Sprintf("containers/%s", sourceName), sourceSnap) + if err != nil { + return err + } + } + } else { + if s.zfsExists(fmt.Sprintf("containers/%s@snapshot-%s", sourceName, sourceSnap)) { + sourceFs = fmt.Sprintf("containers/%s", sourceName) + sourceSnap = fmt.Sprintf("snapshot-%s", sourceSnap) + } + } + + if sourceFs != "" { + err := s.zfsClone(sourceFs, sourceSnap, destFs, true) + if err != nil { + return err + } + + cPath := container.Path() + err = os.Symlink(cPath+".zfs", cPath) + if err != nil { + return err + } + + var mode os.FileMode + if container.IsPrivileged() { + mode = 0700 + } else { + mode = 0755 + } + + err = os.Chmod(cPath, mode) + if err != nil { + return err + } + } else { + err := s.ContainerCreate(container) + if err != nil { + return err + } + + output, err := storageRsyncCopy(sourceContainer.Path(), container.Path()) + if err != nil { + return fmt.Errorf("rsync failed: %s", string(output)) + } + } + + return container.TemplateApply("copy") +} + +func (s *storageZfs) zfsMounted(path string) bool { + output, err := exec.Command("zfs", "mount").CombinedOutput() + if err != nil { + shared.Log.Error("error listing zfs mounts", "err", output) + return false + } + + for _, line := range strings.Split(string(output), "\n") { + zfsName := strings.Split(line, " ")[0] + if zfsName == fmt.Sprintf("%s/%s", s.zfsPool, path) { + return true + } + } + + return false +} + +func (s *storageZfs) ContainerRename(container container, newName string) error { + oldName := container.Name() + + // Unmount the filesystem + err := s.zfsUnmount(fmt.Sprintf("containers/%s", oldName)) + if err != nil { + return err + } + + // Rename the filesystem + err = s.zfsRename(fmt.Sprintf("containers/%s", oldName), fmt.Sprintf("containers/%s", newName)) + if err != nil { + return err + } + + // Update to the new mountpoint + err = s.zfsSet(fmt.Sprintf("containers/%s", newName), "mountpoint", shared.VarPath(fmt.Sprintf("containers/%s.zfs", newName))) + if err != nil { + return err + } + + // In case ZFS didn't mount the filesystem, do it ourselves + if !shared.PathExists(shared.VarPath(fmt.Sprintf("containers/%s.zfs", newName))) { + for i := 0; i < 20; i++ { + err = s.zfsMount(fmt.Sprintf("containers/%s", newName)) + if err == nil { + break + } + time.Sleep(500 * time.Millisecond) + } + if err != nil { + return err + } + } + + // In case the change of mountpoint didn't remove the old path, do it ourselves + if shared.PathExists(shared.VarPath(fmt.Sprintf("containers/%s.zfs", oldName))) { + err = os.Remove(shared.VarPath(fmt.Sprintf("containers/%s.zfs", oldName))) + if err != nil { + return err + } + } + + // Remove the old symlink + err = os.Remove(shared.VarPath(fmt.Sprintf("containers/%s", oldName))) + if err != nil { + return err + } + + // Create a new symlink + err = os.Symlink(shared.VarPath(fmt.Sprintf("containers/%s.zfs", newName)), shared.VarPath(fmt.Sprintf("containers/%s", newName))) + if err != nil { + return err + } + + // Rename the snapshot path + if shared.PathExists(shared.VarPath(fmt.Sprintf("snapshots/%s", oldName))) { + err = os.Rename(shared.VarPath(fmt.Sprintf("snapshots/%s", oldName)), shared.VarPath(fmt.Sprintf("snapshots/%s", newName))) + if err != nil { + return err + } + } + + return nil +} + +func (s *storageZfs) ContainerRestore(container container, sourceContainer container) error { + fields := strings.SplitN(sourceContainer.Name(), shared.SnapshotDelimiter, 2) + cName := fields[0] + snapName := fmt.Sprintf("snapshot-%s", fields[1]) + + err := s.zfsSnapshotRestore(fmt.Sprintf("containers/%s", cName), snapName) + if err != nil { + return err + } + + return nil +} + +func (s *storageZfs) ContainerSetQuota(container container, size int64) error { + var err error + + fs := fmt.Sprintf("containers/%s", container.Name()) + + if size > 0 { + err = s.zfsSet(fs, "quota", fmt.Sprintf("%d", size)) + } else { + err = s.zfsSet(fs, "quota", "none") + } + + if err != nil { + return err + } + + return nil +} + +func (s *storageZfs) ContainerGetUsage(container container) (int64, error) { + var err error + + fs := fmt.Sprintf("containers/%s", container.Name()) + + value, err := s.zfsGet(fs, "used") + if err != nil { + return -1, err + } + + valueInt, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return -1, err + } + + return valueInt, nil +} + +func (s *storageZfs) ContainerSnapshotCreate(snapshotContainer container, sourceContainer container) error { + fields := strings.SplitN(snapshotContainer.Name(), shared.SnapshotDelimiter, 2) + cName := fields[0] + snapName := fmt.Sprintf("snapshot-%s", fields[1]) + + err := s.zfsSnapshotCreate(fmt.Sprintf("containers/%s", cName), snapName) + if err != nil { + return err + } + + if !shared.PathExists(shared.VarPath(fmt.Sprintf("snapshots/%s", cName))) { + err = os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", cName)), 0700) + if err != nil { + return err + } + } + + err = os.Symlink("on-zfs", shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", cName, fields[1]))) + if err != nil { + return err + } + + return nil +} + +func (s *storageZfs) ContainerSnapshotDelete(snapshotContainer container) error { + fields := strings.SplitN(snapshotContainer.Name(), shared.SnapshotDelimiter, 2) + cName := fields[0] + snapName := fmt.Sprintf("snapshot-%s", fields[1]) + + removable, err := s.zfsSnapshotRemovable(fmt.Sprintf("containers/%s", cName), snapName) + if removable { + err = s.zfsSnapshotDestroy(fmt.Sprintf("containers/%s", cName), snapName) + if err != nil { + return err + } + } else { + err = s.zfsSnapshotRename(fmt.Sprintf("containers/%s", cName), snapName, fmt.Sprintf("copy-%s", uuid.NewRandom().String())) + if err != nil { + return err + } + } + + err = os.Remove(shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", cName, fields[1]))) + if err != nil { + return err + } + + parent := shared.VarPath(fmt.Sprintf("snapshots/%s", cName)) + if ok, _ := shared.PathIsEmpty(parent); ok { + err = os.Remove(parent) + if err != nil { + return err + } + } + + return nil +} + +func (s *storageZfs) ContainerSnapshotRename(snapshotContainer container, newName string) error { + oldFields := strings.SplitN(snapshotContainer.Name(), shared.SnapshotDelimiter, 2) + oldcName := oldFields[0] + oldName := fmt.Sprintf("snapshot-%s", oldFields[1]) + + newFields := strings.SplitN(newName, shared.SnapshotDelimiter, 2) + newcName := newFields[0] + newName = fmt.Sprintf("snapshot-%s", newFields[1]) + + if oldName != newName { + err := s.zfsSnapshotRename(fmt.Sprintf("containers/%s", oldcName), oldName, newName) + if err != nil { + return err + } + } + + err := os.Remove(shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", oldcName, oldFields[1]))) + if err != nil { + return err + } + + if !shared.PathExists(shared.VarPath(fmt.Sprintf("snapshots/%s", newcName))) { + err = os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", newcName)), 0700) + if err != nil { + return err + } + } + + err = os.Symlink("on-zfs", shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", newcName, newFields[1]))) + if err != nil { + return err + } + + parent := shared.VarPath(fmt.Sprintf("snapshots/%s", oldcName)) + if ok, _ := shared.PathIsEmpty(parent); ok { + err = os.Remove(parent) + if err != nil { + return err + } + } + + return nil +} + +func (s *storageZfs) ContainerSnapshotStart(container container) error { + fields := strings.SplitN(container.Name(), shared.SnapshotDelimiter, 2) + if len(fields) < 2 { + return fmt.Errorf("Invalid snapshot name: %s", container.Name()) + } + cName := fields[0] + sName := fields[1] + sourceFs := fmt.Sprintf("containers/%s", cName) + sourceSnap := fmt.Sprintf("snapshot-%s", sName) + destFs := fmt.Sprintf("snapshots/%s/%s", cName, sName) + + err := s.zfsClone(sourceFs, sourceSnap, destFs, false) + if err != nil { + return err + } + + return nil +} + +func (s *storageZfs) ContainerSnapshotStop(container container) error { + fields := strings.SplitN(container.Name(), shared.SnapshotDelimiter, 2) + if len(fields) < 2 { + return fmt.Errorf("Invalid snapshot name: %s", container.Name()) + } + cName := fields[0] + sName := fields[1] + destFs := fmt.Sprintf("snapshots/%s/%s", cName, sName) + + err := s.zfsDestroy(destFs) + if err != nil { + return err + } + + /* zfs creates this directory on clone (start), so we need to clean it + * up on stop */ + return os.RemoveAll(container.Path()) +} + +func (s *storageZfs) ContainerSnapshotCreateEmpty(snapshotContainer container) error { + /* don't touch the fs yet, as migration will do that for us */ + return nil +} + +func (s *storageZfs) ImageCreate(fingerprint string) error { + imagePath := shared.VarPath("images", fingerprint) + subvol := fmt.Sprintf("%s.zfs", imagePath) + fs := fmt.Sprintf("images/%s", fingerprint) + + if s.zfsExists(fmt.Sprintf("deleted/%s", fs)) { + err := s.zfsRename(fmt.Sprintf("deleted/%s", fs), fs) + if err != nil { + return err + } + + err = s.zfsSet(fs, "mountpoint", subvol) + if err != nil { + return err + } + + return nil + } + + err := s.zfsCreate(fs) + if err != nil { + return err + } + + err = untarImage(imagePath, subvol) + if err != nil { + return err + } + + err = s.zfsSet(fs, "readonly", "on") + if err != nil { + return err + } + + err = s.zfsSnapshotCreate(fs, "readonly") + if err != nil { + return err + } + + return nil +} + +func (s *storageZfs) ImageDelete(fingerprint string) error { + fs := fmt.Sprintf("images/%s", fingerprint) + + removable, err := s.zfsSnapshotRemovable(fs, "readonly") + + if err != nil { + return err + } + + if removable { + err := s.zfsDestroy(fs) + if err != nil { + return err + } + } else { + err := s.zfsSet(fs, "mountpoint", "none") + if err != nil { + return err + } + + err = s.zfsRename(fs, fmt.Sprintf("deleted/%s", fs)) + if err != nil { + return err + } + } + + if shared.PathExists(shared.VarPath(fs + ".zfs")) { + os.Remove(shared.VarPath(fs + ".zfs")) + } + return nil +} + +// Helper functions +func (s *storageZfs) zfsCheckPool(pool string) error { + output, err := exec.Command( + "zfs", "get", "type", "-H", "-o", "value", pool).CombinedOutput() + if err != nil { + return fmt.Errorf(strings.Split(string(output), "\n")[0]) + } + + poolType := strings.Split(string(output), "\n")[0] + if poolType != "filesystem" { + return fmt.Errorf("Unsupported pool type: %s", poolType) + } + + return nil +} + +func (s *storageZfs) zfsClone(source string, name string, dest string, dotZfs bool) error { + var mountpoint string + + mountpoint = shared.VarPath(dest) + if dotZfs { + mountpoint += ".zfs" + } + + output, err := exec.Command( + "zfs", + "clone", + "-p", + "-o", fmt.Sprintf("mountpoint=%s", mountpoint), + fmt.Sprintf("%s/%s@%s", s.zfsPool, source, name), + fmt.Sprintf("%s/%s", s.zfsPool, dest)).CombinedOutput() + if err != nil { + s.log.Error("zfs clone failed", log.Ctx{"output": string(output)}) + return fmt.Errorf("Failed to clone the filesystem: %s", output) + } + + subvols, err := s.zfsListSubvolumes(source) + if err != nil { + return err + } + + for _, sub := range subvols { + snaps, err := s.zfsListSnapshots(sub) + if err != nil { + return err + } + + if !shared.StringInSlice(name, snaps) { + continue + } + + destSubvol := dest + strings.TrimPrefix(sub, source) + mountpoint = shared.VarPath(destSubvol) + if dotZfs { + mountpoint += ".zfs" + } + + output, err := exec.Command( + "zfs", + "clone", + "-p", + "-o", fmt.Sprintf("mountpoint=%s", mountpoint), + fmt.Sprintf("%s/%s@%s", s.zfsPool, sub, name), + fmt.Sprintf("%s/%s", s.zfsPool, destSubvol)).CombinedOutput() + if err != nil { + s.log.Error("zfs clone failed", log.Ctx{"output": string(output)}) + return fmt.Errorf("Failed to clone the sub-volume: %s", output) + } + } + + return nil +} + +func (s *storageZfs) zfsCreate(path string) error { + output, err := exec.Command( + "zfs", + "create", + "-p", + "-o", fmt.Sprintf("mountpoint=%s.zfs", shared.VarPath(path)), + fmt.Sprintf("%s/%s", s.zfsPool, path)).CombinedOutput() + if err != nil { + s.log.Error("zfs create failed", log.Ctx{"output": string(output)}) + return fmt.Errorf("Failed to create ZFS filesystem: %s", output) + } + + return nil +} + +func (s *storageZfs) zfsDestroy(path string) error { + mountpoint, err := s.zfsGet(path, "mountpoint") + if err != nil { + return err + } + + if mountpoint != "none" && shared.IsMountPoint(mountpoint) { + err := syscall.Unmount(mountpoint, syscall.MNT_DETACH) + if err != nil { + s.log.Error("umount failed", log.Ctx{"err": err}) + return err + } + } + + // Due to open fds or kernel refs, this may fail for a bit, give it 10s + var output []byte + for i := 0; i < 20; i++ { + output, err = exec.Command( + "zfs", + "destroy", + "-r", + fmt.Sprintf("%s/%s", s.zfsPool, path)).CombinedOutput() + + if err == nil { + break + } + time.Sleep(500 * time.Millisecond) + } + + if err != nil { + s.log.Error("zfs destroy failed", log.Ctx{"output": string(output)}) + return fmt.Errorf("Failed to destroy ZFS filesystem: %s", output) + } + + return nil +} + +func (s *storageZfs) zfsCleanup(path string) error { + if strings.HasPrefix(path, "deleted/") { + removablePath, err := s.zfsSnapshotRemovable(path, "") + if err != nil { + return err + } + + if removablePath { + subPath := strings.SplitN(path, "@", 2)[0] + + origin, err := s.zfsGet(subPath, "origin") + if err != nil { + return err + } + origin = strings.TrimPrefix(origin, fmt.Sprintf("%s/", s.zfsPool)) + + err = s.zfsDestroy(subPath) + if err != nil { + return err + } + + s.zfsCleanup(origin) + + return nil + } + } + + return nil +} + +func (s *storageZfs) zfsExists(path string) bool { + output, _ := s.zfsGet(path, "name") + + if output == fmt.Sprintf("%s/%s", s.zfsPool, path) { + return true + } + + return false +} + +func (s *storageZfs) zfsGet(path string, key string) (string, error) { + output, err := exec.Command( + "zfs", + "get", + "-H", + "-p", + "-o", "value", + key, + fmt.Sprintf("%s/%s", s.zfsPool, path)).CombinedOutput() + if err != nil { + return string(output), fmt.Errorf("Failed to get ZFS config: %s", output) + } + + return strings.TrimRight(string(output), "\n"), nil +} + +func (s *storageZfs) zfsMount(path string) error { + output, err := exec.Command( + "zfs", + "mount", + fmt.Sprintf("%s/%s", s.zfsPool, path)).CombinedOutput() + if err != nil { + s.log.Error("zfs mount failed", log.Ctx{"output": string(output)}) + return fmt.Errorf("Failed to mount ZFS filesystem: %s", output) + } + + return nil +} + +func (s *storageZfs) zfsRename(source string, dest string) error { + output, err := exec.Command( + "zfs", + "rename", + "-p", + fmt.Sprintf("%s/%s", s.zfsPool, source), + fmt.Sprintf("%s/%s", s.zfsPool, dest)).CombinedOutput() + if err != nil { + if s.zfsExists(source) || !s.zfsExists(dest) { + s.log.Error("zfs rename failed", log.Ctx{"output": string(output)}) + return fmt.Errorf("Failed to rename ZFS filesystem: %s", output) + } + } + + return nil +} + +func (s *storageZfs) zfsSet(path string, key string, value string) error { + output, err := exec.Command( + "zfs", + "set", + fmt.Sprintf("%s=%s", key, value), + fmt.Sprintf("%s/%s", s.zfsPool, path)).CombinedOutput() + if err != nil { + s.log.Error("zfs set failed", log.Ctx{"output": string(output)}) + return fmt.Errorf("Failed to set ZFS config: %s", output) + } + + return nil +} + +func (s *storageZfs) zfsSnapshotCreate(path string, name string) error { + output, err := exec.Command( + "zfs", + "snapshot", + "-r", + fmt.Sprintf("%s/%s@%s", s.zfsPool, path, name)).CombinedOutput() + if err != nil { + s.log.Error("zfs snapshot failed", log.Ctx{"output": string(output)}) + return fmt.Errorf("Failed to create ZFS snapshot: %s", output) + } + + return nil +} + +func (s *storageZfs) zfsSnapshotDestroy(path string, name string) error { + output, err := exec.Command( + "zfs", + "destroy", + "-r", + fmt.Sprintf("%s/%s@%s", s.zfsPool, path, name)).CombinedOutput() + if err != nil { + s.log.Error("zfs destroy failed", log.Ctx{"output": string(output)}) + return fmt.Errorf("Failed to destroy ZFS snapshot: %s", output) + } + + return nil +} + +func (s *storageZfs) zfsSnapshotRestore(path string, name string) error { + output, err := exec.Command( + "zfs", + "rollback", + fmt.Sprintf("%s/%s@%s", s.zfsPool, path, name)).CombinedOutput() + if err != nil { + s.log.Error("zfs rollback failed", log.Ctx{"output": string(output)}) + return fmt.Errorf("Failed to restore ZFS snapshot: %s", output) + } + + subvols, err := s.zfsListSubvolumes(path) + if err != nil { + return err + } + + for _, sub := range subvols { + snaps, err := s.zfsListSnapshots(sub) + if err != nil { + return err + } + + if !shared.StringInSlice(name, snaps) { + continue + } + + output, err := exec.Command( + "zfs", + "rollback", + fmt.Sprintf("%s/%s@%s", s.zfsPool, sub, name)).CombinedOutput() + if err != nil { + s.log.Error("zfs rollback failed", log.Ctx{"output": string(output)}) + return fmt.Errorf("Failed to restore ZFS sub-volume snapshot: %s", output) + } + } + + return nil +} + +func (s *storageZfs) zfsSnapshotRename(path string, oldName string, newName string) error { + output, err := exec.Command( + "zfs", + "rename", + "-r", + fmt.Sprintf("%s/%s@%s", s.zfsPool, path, oldName), + fmt.Sprintf("%s/%s@%s", s.zfsPool, path, newName)).CombinedOutput() + if err != nil { + s.log.Error("zfs snapshot rename failed", log.Ctx{"output": string(output)}) + return fmt.Errorf("Failed to rename ZFS snapshot: %s", output) + } + + return nil +} + +func (s *storageZfs) zfsUnmount(path string) error { + output, err := exec.Command( + "zfs", + "unmount", + fmt.Sprintf("%s/%s", s.zfsPool, path)).CombinedOutput() + if err != nil { + s.log.Error("zfs unmount failed", log.Ctx{"output": string(output)}) + return fmt.Errorf("Failed to unmount ZFS filesystem: %s", output) + } + + return nil +} + +func (s *storageZfs) zfsListSubvolumes(path string) ([]string, error) { + path = strings.TrimRight(path, "/") + fullPath := s.zfsPool + if path != "" { + fullPath = fmt.Sprintf("%s/%s", s.zfsPool, path) + } + + output, err := exec.Command( + "zfs", + "list", + "-t", "filesystem", + "-o", "name", + "-H", + "-r", fullPath).CombinedOutput() + if err != nil { + s.log.Error("zfs list failed", log.Ctx{"output": string(output)}) + return []string{}, fmt.Errorf("Failed to list ZFS filesystems: %s", output) + } + + children := []string{} + for _, entry := range strings.Split(string(output), "\n") { + if entry == "" { + continue + } + + if entry == fullPath { + continue + } + + children = append(children, strings.TrimPrefix(entry, fmt.Sprintf("%s/", s.zfsPool))) + } + + return children, nil +} + +func (s *storageZfs) zfsListSnapshots(path string) ([]string, error) { + path = strings.TrimRight(path, "/") + fullPath := s.zfsPool + if path != "" { + fullPath = fmt.Sprintf("%s/%s", s.zfsPool, path) + } + + output, err := exec.Command( + "zfs", + "list", + "-t", "snapshot", + "-o", "name", + "-H", + "-d", "1", + "-s", "creation", + "-r", fullPath).CombinedOutput() + if err != nil { + s.log.Error("zfs list failed", log.Ctx{"output": string(output)}) + return []string{}, fmt.Errorf("Failed to list ZFS snapshots: %s", output) + } + + children := []string{} + for _, entry := range strings.Split(string(output), "\n") { + if entry == "" { + continue + } + + if entry == fullPath { + continue + } + + children = append(children, strings.SplitN(entry, "@", 2)[1]) + } + + return children, nil +} + +func (s *storageZfs) zfsSnapshotRemovable(path string, name string) (bool, error) { + var snap string + if name == "" { + snap = path + } else { + snap = fmt.Sprintf("%s@%s", path, name) + } + + clones, err := s.zfsGet(snap, "clones") + if err != nil { + return false, err + } + + if clones == "-" || clones == "" { + return true, nil + } + + return false, nil +} + +func (s *storageZfs) zfsGetPoolUsers() ([]string, error) { + subvols, err := s.zfsListSubvolumes("") + if err != nil { + return []string{}, err + } + + exceptions := []string{ + "containers", + "images", + "snapshots", + "deleted", + "deleted/containers", + "deleted/images"} + + users := []string{} + for _, subvol := range subvols { + if shared.StringInSlice(subvol, exceptions) { + continue + } + + users = append(users, subvol) + } + + return users, nil +} + +// Global functions +func storageZFSSetPoolNameConfig(d *Daemon, poolname string) error { + s := storageZfs{} + + // Confirm the backend is working + err := s.initShared() + if err != nil { + return fmt.Errorf("Unable to initialize the ZFS backend: %v", err) + } + + // Confirm the new pool exists and is compatible + if poolname != "" { + err = s.zfsCheckPool(poolname) + if err != nil { + return fmt.Errorf("Invalid ZFS pool: %v", err) + } + } + + // Check if we're switching pools + oldPoolname, err := d.ConfigValueGet("storage.zfs_pool_name") + if err != nil { + return err + } + + // Confirm the old pool isn't in use anymore + if oldPoolname != "" { + s.zfsPool = oldPoolname + + users, err := s.zfsGetPoolUsers() + if err != nil { + return fmt.Errorf("Error checking if a pool is already in use: %v", err) + } + + if len(users) > 0 { + return fmt.Errorf("Can not change ZFS config. Images or containers are still using the ZFS pool: %v", users) + } + } + s.zfsPool = poolname + + // All good, set the new pool name + err = d.ConfigValueSet("storage.zfs_pool_name", poolname) + if err != nil { + return err + } + + return nil +} + +type zfsMigrationSource struct { + lxdName string + deleteAfterSending bool + zfsName string + zfsParent string + + zfs *storageZfs +} + +func (s zfsMigrationSource) Name() string { + return s.lxdName +} + +func (s zfsMigrationSource) IsSnapshot() bool { + return !s.deleteAfterSending +} + +func (s zfsMigrationSource) Send(conn *websocket.Conn) error { + args := []string{"send", fmt.Sprintf("%s/%s", s.zfs.zfsPool, s.zfsName)} + if s.zfsParent != "" { + args = append(args, "-i", fmt.Sprintf("%s/%s", s.zfs.zfsPool, s.zfsParent)) + } + + cmd := exec.Command("zfs", args...) + + stdout, err := cmd.StdoutPipe() + if err != nil { + /* If this is not a lxd snapshot, that means it is the root container. + * The way we zfs send a root container is by taking a temporary zfs + * snapshot and sending that, then deleting that snapshot. Here's where + * we delete it. + * + * Note that we can't use a defer here, because zfsDestroy + * takes some time, and defer doesn't block the current + * goroutine. Due to our retry mechanism for network failures + * (and because zfsDestroy takes a while), we might retry + * moving (and thus creating a temporary snapshot) before the + * last one is deleted, resulting in either a snapshot name + * collision if it was fast enough, or an extra snapshot with + * an odd name on the destination side. Instead, we don't use + * defer so we always block until the snapshot is dead. + */ + if s.deleteAfterSending { + s.zfs.zfsDestroy(s.zfsName) + } + return err + } + + stderr, err := cmd.StderrPipe() + if err != nil { + if s.deleteAfterSending { + s.zfs.zfsDestroy(s.zfsName) + } + return err + } + + if err := cmd.Start(); err != nil { + if s.deleteAfterSending { + s.zfs.zfsDestroy(s.zfsName) + } + return err + } + + <-shared.WebsocketSendStream(conn, stdout) + + output, err := ioutil.ReadAll(stderr) + if err != nil { + shared.Log.Error("problem reading zfs send stderr", "err", err) + } + + err = cmd.Wait() + if err != nil { + shared.Log.Error("problem with zfs send", "output", string(output)) + } + if s.deleteAfterSending { + s.zfs.zfsDestroy(s.zfsName) + } + return err +} + +func (s *storageZfs) MigrationType() MigrationFSType { + return MigrationFSType_ZFS +} + +func (s *storageZfs) MigrationSource(container container) ([]MigrationStorageSource, error) { + sources := []MigrationStorageSource{} + + /* If the container is a snapshot, let's just send that; we don't need + * to send anything else, because that's all the user asked for. + */ + if container.IsSnapshot() { + fields := strings.SplitN(container.Name(), shared.SnapshotDelimiter, 2) + snapshotName := fmt.Sprintf("containers/%s@snapshot-%s", fields[0], fields[1]) + sources = append(sources, zfsMigrationSource{container.Name(), false, snapshotName, "", s}) + return sources, nil + } + + /* List all the snapshots in order of reverse creation. The idea here + * is that we send the oldest to newest snapshot, hopefully saving on + * xfer costs. Then, after all that, we send the container itself. + */ + snapshots, err := s.zfsListSnapshots(fmt.Sprintf("containers/%s", container.Name())) + if err != nil { + return nil, err + } + + for i, snap := range snapshots { + /* In the case of e.g. multiple copies running at the same + * time, we will have potentially multiple migration-send + * snapshots. (Or in the case of the test suite, sometimes one + * will take too long to delete.) + */ + if !strings.HasPrefix(snap, "snapshot-") { + continue + } + + prev := "" + if i > 0 { + prev = snapshots[i-1] + } + + lxdName := fmt.Sprintf("%s%s%s", container.Name(), shared.SnapshotDelimiter, snap[len("snapshot-"):]) + zfsName := fmt.Sprintf("containers/%s@%s", container.Name(), snap) + parentName := "" + if prev != "" { + parentName = fmt.Sprintf("containers/%s@%s", container.Name(), prev) + } + + sources = append(sources, zfsMigrationSource{lxdName, false, zfsName, parentName, s}) + } + + /* We can't send running fses, so let's snapshot the fs and send + * the snapshot. + */ + snapshotName := fmt.Sprintf("migration-send-%s", uuid.NewRandom().String()) + if err := s.zfsSnapshotCreate(fmt.Sprintf("containers/%s", container.Name()), snapshotName); err != nil { + return nil, err + } + + zfsName := fmt.Sprintf("containers/%s@%s", container.Name(), snapshotName) + zfsParent := "" + if len(sources) > 0 { + zfsParent = sources[len(sources)-1].(zfsMigrationSource).zfsName + } + + sources = append(sources, zfsMigrationSource{container.Name(), true, zfsName, zfsParent, s}) + + return sources, nil +} + +func (s *storageZfs) MigrationSink(container container, snapshots []container, conn *websocket.Conn) error { + zfsRecv := func(zfsName string) error { + zfsFsName := fmt.Sprintf("%s/%s", s.zfsPool, zfsName) + args := []string{"receive", "-F", "-u", zfsFsName} + cmd := exec.Command("zfs", args...) + + stdin, err := cmd.StdinPipe() + if err != nil { + return err + } + + stderr, err := cmd.StderrPipe() + if err != nil { + return err + } + + if err := cmd.Start(); err != nil { + return err + } + + <-shared.WebsocketRecvStream(stdin, conn) + + output, err := ioutil.ReadAll(stderr) + if err != nil { + shared.Debugf("problem reading zfs recv stderr %s", "err", err) + } + + err = cmd.Wait() + if err != nil { + shared.Log.Error("problem with zfs recv", "output", string(output)) + } + return err + } + + /* In some versions of zfs we can write `zfs recv -F` to mounted + * filesystems, and in some versions we can't. So, let's always unmount + * this fs (it's empty anyway) before we zfs recv. N.B. that `zfs recv` + * of a snapshot also needs tha actual fs that it has snapshotted + * unmounted, so we do this before receiving anything. + * + * Further, `zfs unmount` doesn't actually unmount things right away, + * so we ask /proc/self/mountinfo whether or not this path is mounted + * before continuing so that we're sure the fs is actually unmounted + * before doing a recv. + */ + zfsName := fmt.Sprintf("containers/%s", container.Name()) + fsPath := shared.VarPath(fmt.Sprintf("containers/%s.zfs", container.Name())) + for i := 0; i < 20; i++ { + if shared.IsMountPoint(fsPath) || s.zfsMounted(zfsName) { + if err := s.zfsUnmount(zfsName); err != nil { + shared.Log.Error("zfs umount error for", "path", zfsName, "err", err) + } + } else { + break + } + + time.Sleep(500 * time.Millisecond) + } + + for _, snap := range snapshots { + fields := strings.SplitN(snap.Name(), shared.SnapshotDelimiter, 2) + name := fmt.Sprintf("containers/%s@snapshot-%s", fields[0], fields[1]) + if err := zfsRecv(name); err != nil { + return err + } + + err := os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", fields[0])), 0700) + if err != nil { + return err + } + + err = os.Symlink("on-zfs", shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", fields[0], fields[1]))) + if err != nil { + return err + } + } + + /* finally, do the real container */ + if err := zfsRecv(zfsName); err != nil { + return err + } + + /* Sometimes, zfs recv mounts this anyway, even if we pass -u + * (https://forums.freebsd.org/threads/zfs-receive-u-shouldnt-mount-received-filesystem-right.36844/) + * but sometimes it doesn't. Let's try to mount, but not complain about + * failure. + */ + s.zfsMount(zfsName) + return nil +} === added file 'src/github.com/lxc/lxd/lxd/util.go' --- src/github.com/lxc/lxd/lxd/util.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/lxd/util.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,29 @@ +package main + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + + "github.com/lxc/lxd/shared" +) + +func WriteJSON(w http.ResponseWriter, body interface{}) error { + var output io.Writer + var captured *bytes.Buffer + + output = w + if debug { + captured = &bytes.Buffer{} + output = io.MultiWriter(w, captured) + } + + err := json.NewEncoder(output).Encode(body) + + if captured != nil { + shared.DebugJson(captured) + } + + return err +} === added directory 'src/github.com/lxc/lxd/po' === added file 'src/github.com/lxc/lxd/po/de.po' --- src/github.com/lxc/lxd/po/de.po 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/po/de.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1592 @@ +# German translation for LXD +# Copyright (C) 2015 - LXD contributors +# This file is distributed under the same license as LXD. +# Felix Engelmann , 2015. +# +msgid "" +msgstr "" +"Project-Id-Version: LXD\n" +"Report-Msgid-Bugs-To: lxc-devel@lists.linuxcontainers.org\n" +"POT-Creation-Date: 2016-02-10 22:15-0500\n" +"PO-Revision-Date: 2015-06-13 06:10+0200\n" +"Last-Translator: Felix Engelmann \n" +"Language-Team: \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: lxc/config.go:36 +#, fuzzy +msgid "" +"### This is a yaml representation of the configuration.\n" +"### Any line starting with a '# will be ignored.\n" +"###\n" +"### A sample configuration looks like:\n" +"### name: container1\n" +"### profiles:\n" +"### - default\n" +"### config:\n" +"### volatile.eth0.hwaddr: 00:16:3e:e9:f8:7f\n" +"### devices:\n" +"### homedir:\n" +"### path: /extra\n" +"### source: /home/user\n" +"### type: disk\n" +"### ephemeral: false\n" +"###\n" +"### Note that the name is shown but cannot be changed" +msgstr "" +"### Dies ist eine Darstellung der Konfiguration in yaml.\n" +"### Jede Zeile die mit '# beginnt wird ignoriert.\n" +"###\n" +"### Beispiel einer Konfiguration:\n" +"### name: container1\n" +"### profiles:\n" +"### - default\n" +"### config:\n" +"### volatile.eth0.hwaddr: 00:16:3e:e9:f8:7f\n" +"### devices:\n" +"### homedir:\n" +"### path: /extra\n" +"### source: /home/user\n" +"### type: disk\n" +"### ephemeral: false\n" +"###\n" +"### Der Name wird zwar angezeigt, lässt sich jedoch nicht ändern.\n" + +#: lxc/image.go:29 +#, fuzzy +msgid "" +"### This is a yaml representation of the image properties.\n" +"### Any line starting with a '# will be ignored.\n" +"###\n" +"### Each property is represented by a single line:\n" +"### An example would be:\n" +"### description: My custom image" +msgstr "" +"### Dies ist eine Darstellung der Eigenschaften eines Abbildes in yaml.\n" +"### Jede Zeile die mit '# beginnt wird ignoriert.\n" +"###\n" +"### Pro Eigenschaft wird eine Zeile verwendet:\n" +"### Zum Beispiel:\n" +"### description: Mein eigenes Abbild\n" + +#: lxc/profile.go:26 +#, fuzzy +msgid "" +"### This is a yaml representation of the profile.\n" +"### Any line starting with a '# will be ignored.\n" +"###\n" +"### A profile consists of a set of configuration items followed by a set of\n" +"### devices.\n" +"###\n" +"### An example would look like:\n" +"### name: onenic\n" +"### config:\n" +"### raw.lxc: lxc.aa_profile=unconfined\n" +"### devices:\n" +"### eth0:\n" +"### nictype: bridged\n" +"### parent: lxcbr0\n" +"### type: nic\n" +"###\n" +"### Note that the name is shown but cannot be changed" +msgstr "" +"### Dies ist eine Darstellung eines Profils in yaml.\n" +"### Jede Zeile die mit '# beginnt wird ignoriert.\n" +"###\n" +"### Ein Profil besteht aus mehreren Konfigurationselementen gefolgt von\n" +"### mehrere Geräten.\n" +"###\n" +"### Zum Beispiel:\n" +"### name: onenic\n" +"### config:\n" +"### raw.lxc: lxc.aa_profile=unconfined\n" +"### devices:\n" +"### eth0:\n" +"### nictype: bridged\n" +"### parent: lxcbr0\n" +"### type: nic\n" +"###\n" +"### Der Name wird zwar angezeigt, lässt sich jedoch nicht ändern.\n" + +#: lxc/image.go:500 +#, c-format +msgid "%s (%d more)" +msgstr "" + +#: lxc/snapshot.go:61 +#, fuzzy +msgid "'/' not allowed in snapshot name" +msgstr "'/' ist kein gültiges Zeichen im Namen eines Sicherungspunktes\n" + +#: lxc/info.go:109 lxc/profile.go:221 +msgid "(none)" +msgstr "" + +#: lxc/image.go:520 lxc/image.go:542 +msgid "ALIAS" +msgstr "" + +#: lxc/image.go:524 +msgid "ARCH" +msgstr "" + +#: lxc/remote.go:46 +msgid "Accept certificate" +msgstr "Akzeptiere Zertifikat" + +#: lxc/remote.go:181 +#, c-format +msgid "Admin password for %s: " +msgstr "Administrator Passwort für %s: " + +#: lxc/image.go:281 +#, fuzzy +msgid "Aliases:" +msgstr "Aliasse:\n" + +#: lxc/exec.go:53 +msgid "An environment variable of the form HOME=/home/foo" +msgstr "" + +#: lxc/image.go:264 +#, fuzzy, c-format +msgid "Architecture: %s" +msgstr "Architektur: %s\n" + +#: lxc/help.go:49 +msgid "Available commands:" +msgstr "" + +#: lxc/config.go:264 +msgid "COMMON NAME" +msgstr "" + +#: lxc/config.go:111 +#, c-format +msgid "Can't read from stdin: %s" +msgstr "" + +#: lxc/config.go:124 lxc/config.go:157 lxc/config.go:179 +#, c-format +msgid "Can't unset key '%s', it's not currently set." +msgstr "" + +#: lxc/profile.go:329 +msgid "Cannot provide container name to list" +msgstr "" + +#: lxc/remote.go:147 +#, fuzzy, c-format +msgid "Certificate fingerprint: %x" +msgstr "Fingerabdruck des Zertifikats: % x\n" + +#: lxc/action.go:27 +#, fuzzy, c-format +msgid "" +"Changes state of one or more containers to %s.\n" +"\n" +"lxc %s [...]" +msgstr "" +"Ändert den Laufzustand eines Containers in %s.\n" +"\n" +"lxd %s \n" + +#: lxc/remote.go:204 +msgid "Client certificate stored at server: " +msgstr "Gespeichertes Nutzerzertifikat auf dem Server: " + +#: lxc/list.go:80 +msgid "Columns" +msgstr "" + +#: lxc/init.go:132 lxc/init.go:133 lxc/launch.go:36 lxc/launch.go:37 +#, fuzzy +msgid "Config key/value to apply to the new container" +msgstr "kann nicht zum selben Container Namen kopieren" + +#: lxc/config.go:490 lxc/config.go:555 lxc/image.go:597 lxc/profile.go:185 +#, fuzzy, c-format +msgid "Config parsing error: %s" +msgstr "YAML Analyse Fehler %v\n" + +#: lxc/main.go:37 +msgid "Connection refused; is LXD running?" +msgstr "" + +#: lxc/publish.go:54 +msgid "Container name is mandatory" +msgstr "" + +#: lxc/init.go:206 +#, c-format +msgid "Container name is: %s" +msgstr "" + +#: lxc/publish.go:81 lxc/publish.go:101 +#, fuzzy, c-format +msgid "Container published with fingerprint: %s" +msgstr "Abbild mit Fingerabdruck %s importiert\n" + +#: lxc/image.go:116 +msgid "Copy aliases from source" +msgstr "Kopiere Aliasse von der Quelle" + +#: lxc/copy.go:22 +#, fuzzy +msgid "" +"Copy containers within or in between lxd instances.\n" +"\n" +"lxc copy [remote:] [remote:] [--" +"ephemeral|e]" +msgstr "" +"Kopiert Container innerhalb einer oder zwischen lxd Instanzen\n" +"\n" +"lxc copy \n" + +#: lxc/image.go:211 +#, c-format +msgid "Copying the image: %s" +msgstr "" + +#: lxc/snapshot.go:21 +msgid "" +"Create a read-only snapshot of a container.\n" +"\n" +"lxc snapshot [remote:] [--stateful]\n" +"\n" +"Creates a snapshot of the container (optionally with the container's memory\n" +"state). When --stateful is used, LXD attempts to checkpoint the container's\n" +"running state, including process memory state, TCP connections, etc. so that " +"it\n" +"can be restored (via lxc restore) at a later time (although some things, e." +"g.\n" +"TCP connections after the TCP timeout window has expired, may not be " +"restored\n" +"successfully).\n" +"\n" +"Example:\n" +"lxc snapshot u1 snap0" +msgstr "" + +#: lxc/image.go:269 lxc/info.go:84 +#, c-format +msgid "Created: %s" +msgstr "" + +#: lxc/init.go:175 lxc/launch.go:112 +#, c-format +msgid "Creating %s" +msgstr "" + +#: lxc/init.go:173 +#, fuzzy +msgid "Creating the container" +msgstr "kann nicht zum selben Container Namen kopieren" + +#: lxc/image.go:523 +msgid "DESCRIPTION" +msgstr "" + +#: lxc/delete.go:25 +#, fuzzy +msgid "" +"Delete containers or container snapshots.\n" +"\n" +"lxc delete [remote:][/] [remote:][[/" +"]...]\n" +"\n" +"Destroy containers or snapshots with any attached data (configuration, " +"snapshots, ...)." +msgstr "" +"Löscht einen Container oder Container Sicherungspunkt.\n" +"\n" +"Entfernt einen Container (oder Sicherungspunkt) und alle dazugehörigen\n" +"Daten (Konfiguration, Sicherungspunkte, ...).\n" + +#: lxc/config.go:603 +#, fuzzy, c-format +msgid "Device %s added to %s" +msgstr "Gerät %s wurde zu %s hinzugefügt\n" + +#: lxc/config.go:631 +#, fuzzy, c-format +msgid "Device %s removed from %s" +msgstr "Gerät %s wurde von %s entfernt\n" + +#: lxc/list.go:228 +msgid "EPHEMERAL" +msgstr "" + +#: lxc/config.go:266 +msgid "EXPIRY DATE" +msgstr "" + +#: lxc/main.go:55 +msgid "Enables debug mode." +msgstr "Aktiviert Debug Modus" + +#: lxc/main.go:54 +msgid "Enables verbose mode." +msgstr "Aktiviert ausführliche Ausgabe" + +#: lxc/help.go:68 +msgid "Environment:" +msgstr "" + +#: lxc/copy.go:29 lxc/copy.go:30 lxc/init.go:136 lxc/init.go:137 +#: lxc/launch.go:40 lxc/launch.go:41 +msgid "Ephemeral container" +msgstr "Flüchtiger Container" + +#: lxc/monitor.go:56 +msgid "Event type to listen for" +msgstr "" + +#: lxc/exec.go:27 +#, fuzzy +msgid "" +"Execute the specified command in a container.\n" +"\n" +"lxc exec [remote:]container [--mode=auto|interactive|non-interactive] [--env " +"EDITOR=/usr/bin/vim]... " +msgstr "" +"Führt den angegebenen Befehl in einem Container aus.\n" +"\n" +"lxc exec [--env EDITOR=/usr/bin/vim]... \n" + +#: lxc/image.go:273 +#, c-format +msgid "Expires: %s" +msgstr "" + +#: lxc/image.go:275 +msgid "Expires: never" +msgstr "" + +#: lxc/config.go:263 lxc/image.go:521 lxc/image.go:543 +msgid "FINGERPRINT" +msgstr "" + +#: lxc/image.go:255 +#, fuzzy, c-format +msgid "Fingerprint: %s" +msgstr "Fingerabdruck: %s\n" + +#: lxc/finger.go:17 +#, fuzzy +msgid "" +"Fingers the LXD instance to check if it is up and working.\n" +"\n" +"lxc finger " +msgstr "" +"Fingert die LXD Instanz zum überprüfen ob diese funktionsfähig ist.\n" +"\n" +"lxc finger \n" + +#: lxc/main.go:156 +#, fuzzy +msgid "For example: 'lxd-images import ubuntu --alias ubuntu'." +msgstr "Zum Beispiel: 'lxd-images import ubuntu --alias ubuntu'.\n" + +#: lxc/action.go:36 +msgid "Force the container to shutdown." +msgstr "Herunterfahren des Containers erzwingen." + +#: lxc/delete.go:34 lxc/delete.go:35 +msgid "Force the removal of stopped containers." +msgstr "" + +#: lxc/main.go:56 +msgid "Force using the local unix socket." +msgstr "" + +#: lxc/main.go:148 +#, fuzzy +msgid "Generating a client certificate. This may take a minute..." +msgstr "Generiere Nutzerzertifikat. Dies kann wenige Minuten dauern...\n" + +#: lxc/list.go:226 +msgid "IPV4" +msgstr "" + +#: lxc/list.go:227 +msgid "IPV6" +msgstr "" + +#: lxc/config.go:265 +msgid "ISSUE DATE" +msgstr "" + +#: lxc/main.go:155 +#, fuzzy +msgid "" +"If this is your first run, you will need to import images using the 'lxd-" +"images' script." +msgstr "" +"Falls dies der erste Start ist, sollten Sie mit dem 'lxd-images' Script " +"Abbilder importieren.\n" + +#: lxc/main.go:57 +msgid "Ignore aliases when determining what command to run." +msgstr "" + +#: lxc/image.go:216 +msgid "Image copied successfully!" +msgstr "" + +#: lxc/image.go:339 +#, fuzzy, c-format +msgid "Image imported with fingerprint: %s" +msgstr "Abbild mit Fingerabdruck %s importiert\n" + +#: lxc/info.go:95 +#, c-format +msgid "Init: %d" +msgstr "" + +#: lxc/init.go:21 +#, fuzzy +msgid "" +"Initialize a container from a particular image.\n" +"\n" +"lxc init [remote:] [remote:][] [--ephemeral|-e] [--profile|-p " +"...] [--config|-c ...]\n" +"\n" +"Initializes a container using the specified image and name.\n" +"\n" +"Not specifying -p will result in the default profile.\n" +"Specifying \"-p\" with no argument will result in no profile.\n" +"\n" +"Example:\n" +"lxc init ubuntu u1" +msgstr "" +"Starte Container von gegebenem Abbild.\n" +"\n" +"lxc launch [] [—ephemeral|-e] [—profile|-p …]\n" +"\n" +"Startet einen Container von gegebenem Abbild und mit Namen\n" +"\n" +"Ohne den -p Parameter wird das default Profil benutzt.\n" +"Wird -p ohne Argument angegeben, wird kein Profil verwendet\n" +"\n" +"Beispiel:\n" +"lxc launch ubuntu u1\n" + +#: lxc/init.go:63 lxc/init.go:68 +msgid "Invalid configuration key" +msgstr "" + +#: lxc/file.go:181 +#, c-format +msgid "Invalid source %s" +msgstr "Ungültige Quelle %s" + +#: lxc/file.go:58 +#, c-format +msgid "Invalid target %s" +msgstr "Ungültiges Ziel %s" + +#: lxc/info.go:97 +msgid "Ips:" +msgstr "" + +#: lxc/main.go:35 +msgid "LXD socket not found; is LXD running?" +msgstr "" + +#: lxc/launch.go:20 +#, fuzzy +msgid "" +"Launch a container from a particular image.\n" +"\n" +"lxc launch [remote:] [remote:][] [--ephemeral|-e] [--profile|-p " +"...] [--config|-c ...]\n" +"\n" +"Launches a container using the specified image and name.\n" +"\n" +"Not specifying -p will result in the default profile.\n" +"Specifying \"-p\" with no argument will result in no profile.\n" +"\n" +"Example:\n" +"lxc launch ubuntu u1" +msgstr "" +"Starte Container von gegebenem Abbild.\n" +"\n" +"lxc launch [] [—ephemeral|-e] [—profile|-p …]\n" +"\n" +"Startet einen Container von gegebenem Abbild und mit Namen\n" +"\n" +"Ohne den -p Parameter wird das default Profil benutzt.\n" +"Wird -p ohne Argument angegeben, wird kein Profil verwendet\n" +"\n" +"Beispiel:\n" +"lxc launch ubuntu u1\n" + +#: lxc/info.go:25 +#, fuzzy +msgid "" +"List information on containers.\n" +"\n" +"This will support remotes and images as well, but only containers for now.\n" +"\n" +"lxc info [:]container [--show-log]" +msgstr "" +"Listet Informationen über Container.\n" +"\n" +"Dies wird entfernte Instanzen und Abbilder unterstützen, \n" +"zur Zeit jedoch nur Container.\n" +"\n" +"lxc info [:]Container [--show-log]\n" + +#: lxc/list.go:54 +#, fuzzy +msgid "" +"Lists the available resources.\n" +"\n" +"lxc list [resource] [filters] -c [columns]\n" +"\n" +"The filters are:\n" +"* A single keyword like \"web\" which will list any container with \"web\" " +"in its name.\n" +"* A key/value pair referring to a configuration item. For those, the " +"namespace can be abreviated to the smallest unambiguous identifier:\n" +"* \"user.blah=abc\" will list all containers with the \"blah\" user property " +"set to \"abc\"\n" +"* \"u.blah=abc\" will do the same\n" +"* \"security.privileged=1\" will list all privileged containers\n" +"* \"s.privileged=1\" will do the same\n" +"\n" +"The columns are:\n" +"* n - name\n" +"* s - state\n" +"* 4 - IP4\n" +"* 6 - IP6\n" +"* e - ephemeral\n" +"* S - snapshots\n" +"* p - pid of container init process" +msgstr "" +"Listet vorhandene Ressourcen.\n" +"\n" +"lxc list [Resource] [Filter]\n" +"\n" +"Filter sind:\n" +"* Ein einzelnes Schlüsselwort wie \"web“, was alle Container mit \"web\" im " +"Namen listet.\n" +"* Ein key/value Paar bezüglich eines Konfigurationsparameters. Dafür kann " +"der Namensraum, solange eindeutig, abgekürzt werden:\n" +"* \"user.blah=abc\" listet alle Container mit der \"blah\" Benutzer " +"Eigenschaft \"abc\"\n" +"* \"u.blah=abc\" ebenfalls\n" +"* \"security.privileged=1\" listet alle privilegierten Container\n" +"* \"s.privileged=1\" ebenfalls\n" + +#: lxc/info.go:151 +msgid "Log:" +msgstr "" + +#: lxc/image.go:115 +msgid "Make image public" +msgstr "Veröffentliche Abbild" + +#: lxc/publish.go:29 +#, fuzzy +msgid "Make the image public" +msgstr "Veröffentliche Abbild" + +#: lxc/profile.go:46 +#, fuzzy +msgid "" +"Manage configuration profiles.\n" +"\n" +"lxc profile list [filters] List available profiles.\n" +"lxc profile show Show details of a profile.\n" +"lxc profile create Create a profile.\n" +"lxc profile copy Copy the profile to the " +"specified remote.\n" +"lxc profile get Get profile configuration.\n" +"lxc profile set Set profile configuration.\n" +"lxc profile delete Delete a profile.\n" +"lxc profile edit \n" +" Edit profile, either by launching external editor or reading STDIN.\n" +" Example: lxc profile edit # launch editor\n" +" cat profile.yml | lxc profile edit # read from " +"profile.yml\n" +"lxc profile apply \n" +" Apply a comma-separated list of profiles to a container, in order.\n" +" All profiles passed in this call (and only those) will be applied\n" +" to the specified container.\n" +" Example: lxc profile apply foo default,bar # Apply default and bar\n" +" lxc profile apply foo default # Only default is active\n" +" lxc profile apply '' # no profiles are applied anymore\n" +" lxc profile apply bar,default # Apply default second now\n" +"\n" +"Devices:\n" +"lxc profile device list List devices in the given " +"profile.\n" +"lxc profile device show Show full device details in " +"the given profile.\n" +"lxc profile device remove Remove a device from a " +"profile.\n" +"lxc profile device add " +"[key=value]...\n" +" Add a profile device, such as a disk or a nic, to the containers\n" +" using the specified profile." +msgstr "" +"Verwalte Konfigurationsprofile.\n" +"\n" +"lxc profile list [Filter] Listet verfügbare Profile\n" +"lxc profile show Zeigt Details zu einem Profil\n" +"lxc profile create Erstellt ein Profil\n" +"lxc profile edit Bearbeitet das Profil in " +"externem Editor\n" +"lxc profile copy Kopiert das Profil zur " +"entfernten Instanz\n" +"lxc profile set Setzt eine " +"Profilkonfiguration\n" +"lxc profile delete Löscht ein Profil\n" +"lxc profile apply \n" +" Wendet eine durch Kommata getrennte Liste von Profilen,\n" +" in gegeben Reihenfolge, auf einen Container an.\n" +" Alle angegeben, und nur diese, werden auf den Container angewandt.\n" +" Beispiel: lxc profile apply foo default,bar # Wende default und bar an\n" +" lxc profile apply foo default # Nur default ist aktiv\n" +" lxc profile apply '' # keine Profile werden angewandt\n" +" lxc profile apply bar,default # Wende nun default als zweites " +"an\n" +"\n" +"Geräte:\n" +"lxc profile device list Listet Geräte im Profil\n" +"lxc profile device show Zeigt alle Geräte Details im " +"gegebenen Profil.\n" +"lxc profile device remove Entfernt ein Gerät von dem " +"Profil.\n" +"lxc profile device add " +"[key=value]...\n" +" Fügt ein Gerät, wie zum Beispiel eine Festplatte oder Netzwerkkarte, den " +"Containern hinzu,\n" +" die dieses Profil verwenden.\n" + +#: lxc/config.go:56 +#, fuzzy +msgid "" +"Manage configuration.\n" +"\n" +"lxc config device add <[remote:]container> [key=value]... " +"Add a device to a container.\n" +"lxc config device list [remote:] " +"List devices for container.\n" +"lxc config device show [remote:] " +"Show full device details for container.\n" +"lxc config device remove [remote:] " +"Remove device from container.\n" +"\n" +"lxc config get [remote:] key " +"Get configuration key.\n" +"lxc config set [remote:] key value " +"Set container configuration key.\n" +"lxc config unset [remote:] key " +"Unset container configuration key.\n" +"lxc config set key value " +"Set server configuration key.\n" +"lxc config unset key " +"Unset server configuration key.\n" +"lxc config show [--expanded] [remote:] " +"Show container configuration.\n" +"lxc config edit [remote:][container] " +"Edit container configuration in external editor.\n" +" Edit configuration, either by launching external editor or reading " +"STDIN.\n" +" Example: lxc config edit # launch editor\n" +" cat config.yml | lxc config edit # read from config." +"yml\n" +"\n" +"lxc config trust list [remote] " +"List all trusted certs.\n" +"lxc config trust add [remote] " +"Add certfile.crt to trusted hosts.\n" +"lxc config trust remove [remote] [hostname|fingerprint] " +"Remove the cert from trusted hosts.\n" +"\n" +"Examples:\n" +"To mount host's /share/c1 onto /opt in the container:\n" +" lxc config device add [remote:]container1 disk source=/" +"share/c1 path=opt\n" +"\n" +"To set an lxc config value:\n" +" lxc config set [remote:] raw.lxc 'lxc.aa_allow_incomplete = " +"1'\n" +"\n" +"To listen on IPv4 and IPv6 port 8443 (you can omit the 8443 its the " +"default):\n" +" lxc config set core.https_address [::]:8443\n" +"\n" +"To set the server trust password:\n" +" lxc config set core.trust_password blah" +msgstr "" +"Verwalte Konfiguration.\n" +"\n" +"lxc config device add [key=value]...\n" +" Füge ein Gerät zu einem Container hinzu\n" +"lxc config device list Listet die Geräte des " +"Containers\n" +"lxc config device show Zeigt alle Geräte " +"Details für den Container\n" +"lxc config device remove Entfernt Gerät vom " +"Container\n" +"lxc config edit Bearbeite Container " +"Konfiguration in externem Editor\n" +"lxc config get key Holt " +"Konfigurationsschlüssel\n" +"lxc config set key [value] Setzt Container " +"Konfigurationsschlüssel\n" +"lxc config show Zeigt Konfiguration " +"des Containers\n" +"lxc config trust list [remote] Liste alle " +"vertrauenswürdigen Zertifikate.\n" +"lxc config trust add [remote] [certfile.crt] Füge certfile.crt zu " +"vertrauenden Instanzen hinzu.\n" +"lxc config trust remove [remote] [hostname|fingerprint]\n" +" Löscht das Zertifikat aus der Liste der vertrauenswürdigen.\n" +"\n" +"Beispiele:\n" +"Zum mounten von /share/c1 des Hosts nach /opt im Container:\n" +"\tlxc config device add container1 mntdir disk source=/share/c1 path=opt\n" +"Um einen lxc config Wert zu setzen:\n" +"\tlxc config set raw.lxc 'lxc.aa_allow_incomplete = 1'\n" +"Um das Server Passwort zur authentifizierung zu setzen:\n" +"\tlxc config set core.trust_password blah\n" + +#: lxc/file.go:33 +#, fuzzy +msgid "" +"Manage files on a container.\n" +"\n" +"lxc file pull [...] \n" +"lxc file push [--uid=UID] [--gid=GID] [--mode=MODE] [...] " +"\n" +"lxc file edit \n" +"\n" +" in the case of pull, in the case of push and in the " +"case of edit are /" +msgstr "" +"Verwaltet Dateien in einem Container.\n" +"\n" +"lxc file pull [...] \n" +"lxc file push [--uid=UID] [--gid=GID] [--mode=MODE] [...] " +"\n" +"\n" +" bei pull und bei push sind jeweils von der Form /\n" + +#: lxc/remote.go:33 +#, fuzzy +msgid "" +"Manage remote LXD servers.\n" +"\n" +"lxc remote add [--accept-certificate] [--password=PASSWORD] [--" +"public] Add the remote at .\n" +"lxc remote remove " +" Remove " +"the remote .\n" +"lxc remote " +"list " +"List all remotes.\n" +"lxc remote rename " +" Rename remote " +" to .\n" +"lxc remote set-url " +" Update 's " +"url to .\n" +"lxc remote set-default " +" Set the " +"default remote.\n" +"lxc remote get-" +"default " +"Print the default remote." +msgstr "" +"Verwalte entfernte LXD Server.\n" +"\n" +"lxc remote add [--accept-certificate] [--password=PASSWORT] " +"Fügt die Instanz auf hinzu.\n" +"lxc remote remove " +"Entfernt die Instanz .\n" +"lxc remote list " +"Listet alle entfernte Instanzen.\n" +"lxc remote rename " +"Benennt Instanz von nach um.\n" +"lxc remote set-url " +"Setzt die URL von auf .\n" +"lxc remote set-default " +"Setzt die Standard Instanz.\n" +"lxc remote get-default " +"Gibt die Standard Instanz aus.\n" + +#: lxc/image.go:38 +msgid "" +"Manipulate container images.\n" +"\n" +"In LXD containers are created from images. Those images were themselves\n" +"either generated from an existing container or downloaded from an image\n" +"server.\n" +"\n" +"When using remote images, LXD will automatically cache images for you\n" +"and remove them upon expiration.\n" +"\n" +"The image unique identifier is the hash (sha-256) of its representation\n" +"as a compressed tarball (or for split images, the concatenation of the\n" +"metadata and rootfs tarballs).\n" +"\n" +"Images can be referenced by their full hash, shortest unique partial\n" +"hash or alias name (if one is set).\n" +"\n" +"\n" +"lxc image import [rootfs tarball|URL] [remote:] [--public] [--" +"created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] " +"[prop=value]\n" +" Import an image tarball (or tarballs) into the LXD image store.\n" +"\n" +"lxc image copy [remote:] : [--alias=ALIAS].. [--copy-aliases] " +"[--public]\n" +" Copy an image from one LXD daemon to another over the network.\n" +"\n" +"lxc image delete [remote:]\n" +" Delete an image from the LXD image store.\n" +"\n" +"lxc image export [remote:]\n" +" Export an image from the LXD image store into a distributable tarball.\n" +"\n" +"lxc image info [remote:]\n" +" Print everything LXD knows about a given image.\n" +"\n" +"lxc image list [remote:] [filter]\n" +" List images in the LXD image store. Filters may be of the\n" +" = form for property based filtering, or part of the image\n" +" hash or part of the image alias name.\n" +"\n" +"lxc image show [remote:]\n" +" Yaml output of the user modifiable properties of an image.\n" +"\n" +"lxc image edit [remote:]\n" +" Edit image, either by launching external editor or reading STDIN.\n" +" Example: lxc image edit # launch editor\n" +" cat image.yml | lxc image edit # read from image.yml\n" +"\n" +"lxc image alias create [remote:] \n" +" Create a new alias for an existing image.\n" +"\n" +"lxc image alias delete [remote:]\n" +" Delete an alias.\n" +"\n" +"lxc image alias list [remote:]\n" +" List the aliases.\n" +msgstr "" + +#: lxc/help.go:86 +msgid "Missing summary." +msgstr "Fehlende Zusammenfassung." + +#: lxc/monitor.go:20 +msgid "" +"Monitor activity on the LXD server.\n" +"\n" +"lxc monitor [remote:] [--type=TYPE...]\n" +"\n" +"Connects to the monitoring interface of the specified LXD server.\n" +"\n" +"By default will listen to all message types.\n" +"Specific types to listen to can be specified with --type.\n" +"\n" +"Example:\n" +"lxc monitor --type=logging" +msgstr "" + +#: lxc/file.go:169 +msgid "More than one file to download, but target is not a directory" +msgstr "" +"Mehr als eine Datei herunterzuladen, aber das Ziel ist kein Verzeichnis" + +#: lxc/move.go:17 +#, fuzzy +msgid "" +"Move containers within or in between lxd instances.\n" +"\n" +"lxc move [remote:] [remote:]\n" +" Move a container between two hosts, renaming it if destination name " +"differs.\n" +"\n" +"lxc move \n" +" Rename a local container.\n" +msgstr "" +"Verschiebt Container innerhalb einer oder zwischen lxd Instanzen\n" +"\n" +"lxc move \n" + +#: lxc/list.go:224 lxc/remote.go:271 +msgid "NAME" +msgstr "" + +#: lxc/list.go:293 lxc/remote.go:257 +msgid "NO" +msgstr "" + +#: lxc/info.go:82 +#, c-format +msgid "Name: %s" +msgstr "" + +#: lxc/image.go:117 lxc/publish.go:30 +msgid "New alias to define at target" +msgstr "" + +#: lxc/config.go:277 +#, fuzzy +msgid "No certificate provided to add" +msgstr "Kein Zertifikat zum hinzufügen bereitgestellt" + +#: lxc/config.go:300 +msgid "No fingerprint specified." +msgstr "Kein Fingerabdruck angegeben." + +#: lxc/image.go:331 +msgid "Only https:// is supported for remote image import." +msgstr "" + +#: lxc/help.go:63 lxc/main.go:132 +msgid "Options:" +msgstr "" + +#: lxc/image.go:425 +#, c-format +msgid "Output is in %s" +msgstr "" + +#: lxc/exec.go:54 +msgid "Override the terminal mode (auto, interactive or non-interactive)" +msgstr "" + +#: lxc/list.go:230 +msgid "PID" +msgstr "" + +#: lxc/image.go:522 lxc/remote.go:273 +msgid "PUBLIC" +msgstr "" + +#: lxc/help.go:69 +#, fuzzy +msgid "Path to an alternate client configuration directory." +msgstr "Alternatives config Verzeichnis." + +#: lxc/help.go:70 +#, fuzzy +msgid "Path to an alternate server directory." +msgstr "Alternatives config Verzeichnis." + +#: lxc/main.go:39 +msgid "Permisson denied, are you in the lxd group?" +msgstr "" + +#: lxc/help.go:23 +#, fuzzy +msgid "" +"Presents details on how to use LXD.\n" +"\n" +"lxd help [--all]" +msgstr "" +"Zeigt Details über die Benutzung von LXD an.\n" +"\n" +"lxd help [—all]\n" + +#: lxc/profile.go:186 +msgid "Press enter to open the editor again" +msgstr "" + +#: lxc/config.go:491 lxc/config.go:556 lxc/image.go:598 +msgid "Press enter to start the editor again" +msgstr "" + +#: lxc/help.go:65 +msgid "Print debug information." +msgstr "" + +#: lxc/help.go:64 +msgid "Print less common commands." +msgstr "" + +#: lxc/help.go:66 +msgid "Print verbose information." +msgstr "" + +#: lxc/version.go:18 +#, fuzzy +msgid "" +"Prints the version number of LXD.\n" +"\n" +"lxc version" +msgstr "" +"Zeigt die Versionsnummer von LXD.\n" +"\n" +"lxc version\n" + +#: lxc/info.go:96 +#, c-format +msgid "Processcount: %d" +msgstr "" + +#: lxc/profile.go:223 +#, fuzzy, c-format +msgid "Profile %s applied to %s" +msgstr "Profil %s wurde auf %s angewandt\n" + +#: lxc/profile.go:137 +#, fuzzy, c-format +msgid "Profile %s created" +msgstr "Profil %s erstellt\n" + +#: lxc/profile.go:207 +#, fuzzy, c-format +msgid "Profile %s deleted" +msgstr "Profil %s gelöscht\n" + +#: lxc/init.go:134 lxc/init.go:135 lxc/launch.go:38 lxc/launch.go:39 +#, fuzzy +msgid "Profile to apply to the new container" +msgstr "kann nicht zum selben Container Namen kopieren" + +#: lxc/info.go:93 +#, fuzzy, c-format +msgid "Profiles: %s" +msgstr "Profil %s erstellt\n" + +#: lxc/image.go:277 +#, fuzzy +msgid "Properties:" +msgstr "Eigenschaften:\n" + +#: lxc/remote.go:48 +msgid "Public image server" +msgstr "" + +#: lxc/image.go:265 +#, fuzzy, c-format +msgid "Public: %s" +msgstr "Öffentlich: %s\n" + +#: lxc/publish.go:19 +msgid "" +"Publish containers as images.\n" +"\n" +"lxc publish [remote:]container [remote:] [--alias=ALIAS]... [prop-key=prop-" +"value]..." +msgstr "" + +#: lxc/remote.go:47 +msgid "Remote admin password" +msgstr "Entferntes Administrator Passwort" + +#: lxc/delete.go:43 +#, c-format +msgid "Remove %s (yes/no): " +msgstr "" + +#: lxc/delete.go:36 lxc/delete.go:37 +msgid "Require user confirmation." +msgstr "" + +#: lxc/init.go:244 +#, c-format +msgid "Retrieving image: %s" +msgstr "" + +#: lxc/image.go:525 +msgid "SIZE" +msgstr "" + +#: lxc/list.go:229 +msgid "SNAPSHOTS" +msgstr "" + +#: lxc/list.go:225 +msgid "STATE" +msgstr "" + +#: lxc/remote.go:155 +msgid "Server certificate NACKed by user" +msgstr "Server Zertifikat vom Benutzer nicht akzeptiert" + +#: lxc/remote.go:201 +msgid "Server doesn't trust us after adding our cert" +msgstr "" +"Der Server vertraut uns nicht nachdem er unser Zertifikat hinzugefügt hat" + +#: lxc/restore.go:21 +msgid "" +"Set the current state of a resource back to a snapshot.\n" +"\n" +"lxc restore [remote:] [--stateful]\n" +"\n" +"Restores a container from a snapshot (optionally with running state, see\n" +"snapshot help for details).\n" +"\n" +"For example:\n" +"lxc snapshot u1 snap0 # create the snapshot\n" +"lxc restore u1 snap0 # restore the snapshot" +msgstr "" + +#: lxc/file.go:45 +msgid "Set the file's gid on push" +msgstr "Setzt die gid der Datei beim Ãœbertragen" + +#: lxc/file.go:46 +msgid "Set the file's perms on push" +msgstr "Setzt die Dateiberechtigungen beim Ãœbertragen" + +#: lxc/file.go:44 +msgid "Set the file's uid on push" +msgstr "Setzt die uid der Datei beim Ãœbertragen" + +#: lxc/help.go:32 +msgid "Show all commands (not just interesting ones)" +msgstr "Zeigt alle Befehle (nicht nur die interessanten)" + +#: lxc/info.go:34 +msgid "Show the container's last 100 log lines?" +msgstr "Zeige die letzten 100 Zeilen Protokoll des Containers?" + +#: lxc/image.go:262 +#, fuzzy, c-format +msgid "Size: %.2fMB" +msgstr "Größe: %.2vMB\n" + +#: lxc/info.go:122 +msgid "Snapshots:" +msgstr "" + +#: lxc/launch.go:118 +#, c-format +msgid "Starting %s" +msgstr "" + +#: lxc/info.go:87 +#, c-format +msgid "Status: %s" +msgstr "" + +#: lxc/delete.go:97 +msgid "Stopping container failed!" +msgstr "Anhalten des Containers fehlgeschlagen!" + +#: lxc/delete.go:83 +msgid "The container is currently running, stop it first or pass --force." +msgstr "" + +#: lxc/publish.go:57 +msgid "There is no \"image name\". Did you want an alias?" +msgstr "" + +#: lxc/action.go:35 +msgid "Time to wait for the container before killing it." +msgstr "Wartezeit bevor der Container gestoppt wird." + +#: lxc/image.go:266 +#, fuzzy +msgid "Timestamps:" +msgstr "Zeitstempel:\n" + +#: lxc/action.go:62 lxc/launch.go:126 +#, c-format +msgid "Try `lxc info --show-log %s` for more info" +msgstr "" + +#: lxc/info.go:89 +msgid "Type: ephemeral" +msgstr "" + +#: lxc/info.go:91 +msgid "Type: persistent" +msgstr "" + +#: lxc/image.go:526 +msgid "UPLOAD DATE" +msgstr "" + +#: lxc/remote.go:272 +msgid "URL" +msgstr "" + +#: lxc/image.go:271 +#, c-format +msgid "Uploaded: %s" +msgstr "" + +#: lxc/main.go:132 +#, fuzzy, c-format +msgid "Usage: %s" +msgstr "" +"Benutzung: %s\n" +"\n" +"Optionen:\n" +"\n" + +#: lxc/help.go:48 +#, fuzzy +msgid "Usage: lxc [subcommand] [options]" +msgstr "" +"Benutzung: lxc [Unterbefehl] [Optionen]\n" +"Verfügbare Befehle:\n" + +#: lxc/delete.go:47 +msgid "User aborted delete operation." +msgstr "" + +#: lxc/restore.go:35 +msgid "" +"Whether or not to restore the container's running state from snapshot (if " +"available)" +msgstr "" +"Laufenden Zustand des Containers aus dem Sicherungspunkt (falls vorhanden) " +"wiederherstellen oder nicht" + +#: lxc/snapshot.go:38 +msgid "Whether or not to snapshot the container's running state" +msgstr "Zustand des laufenden Containers sichern oder nicht" + +#: lxc/config.go:33 +msgid "Whether to show the expanded configuration" +msgstr "" + +#: lxc/list.go:291 lxc/remote.go:259 +msgid "YES" +msgstr "" + +#: lxc/main.go:66 +msgid "`lxc config profile` is deprecated, please use `lxc profile`" +msgstr "" + +#: lxc/launch.go:105 +msgid "bad number of things scanned from image, container or snapshot" +msgstr "" +"Falsche Anzahl an Objekten im Abbild, Container oder Sicherungspunkt gelesen." + +#: lxc/action.go:58 +msgid "bad result type from action" +msgstr "" + +#: lxc/copy.go:78 +msgid "can't copy to the same container name" +msgstr "kann nicht zum selben Container Namen kopieren" + +#: lxc/remote.go:247 +msgid "can't remove the default remote" +msgstr "" + +#: lxc/remote.go:264 +msgid "default" +msgstr "" + +#: lxc/init.go:197 lxc/init.go:202 lxc/launch.go:89 lxc/launch.go:94 +msgid "didn't get any affected image, container or snapshot from server" +msgstr "" + +#: lxc/main.go:25 lxc/main.go:167 +#, fuzzy, c-format +msgid "error: %v" +msgstr "Fehler: %v\n" + +#: lxc/help.go:40 lxc/main.go:127 +#, fuzzy, c-format +msgid "error: unknown command: %s" +msgstr "Fehler: unbekannter Befehl: %s\n" + +#: lxc/launch.go:109 +msgid "got bad version" +msgstr "Versionskonflikt" + +#: lxc/image.go:256 lxc/image.go:503 +msgid "no" +msgstr "" + +#: lxc/copy.go:100 +msgid "not all the profiles from the source exist on the target" +msgstr "nicht alle Profile der Quelle sind am Ziel vorhanden." + +#: lxc/remote.go:148 +#, fuzzy +msgid "ok (y/n)?" +msgstr "OK (y/n)? " + +#: lxc/main.go:274 lxc/main.go:278 +#, c-format +msgid "processing aliases failed %s\n" +msgstr "" + +#: lxc/remote.go:291 +#, c-format +msgid "remote %s already exists" +msgstr "entfernte Instanz %s existiert bereits" + +#: lxc/remote.go:243 lxc/remote.go:287 lxc/remote.go:317 lxc/remote.go:328 +#, c-format +msgid "remote %s doesn't exist" +msgstr "entfernte Instanz %s existiert nicht" + +#: lxc/remote.go:227 +#, c-format +msgid "remote %s exists as <%s>" +msgstr "entfernte Instanz %s existiert als <%s>" + +#: lxc/info.go:131 +msgid "stateful" +msgstr "" + +#: lxc/info.go:133 +msgid "stateless" +msgstr "" + +#: lxc/info.go:127 +#, c-format +msgid "taken at %s" +msgstr "" + +#: lxc/exec.go:158 +msgid "unreachable return reached" +msgstr "" + +#: lxc/main.go:207 +msgid "wrong number of subcommand arguments" +msgstr "falsche Anzahl an Parametern für Unterbefehl" + +#: lxc/delete.go:46 lxc/image.go:259 lxc/image.go:507 +msgid "yes" +msgstr "" + +#: lxc/copy.go:38 +msgid "you must specify a source container name" +msgstr "der Name des Ursprung Containers muss angegeben werden" + +#, fuzzy +#~ msgid "Bad image property: %s" +#~ msgstr "Ungültige Abbild Eigenschaft: %s\n" + +#~ msgid "Cannot change profile name" +#~ msgstr "Profilname kann nicht geändert werden" + +#~ msgid "Could not create server cert dir" +#~ msgstr "Kann Verzeichnis für Zertifikate auf dem Server nicht erstellen" + +#, fuzzy +#~ msgid "" +#~ "Create a read-only snapshot of a container.\n" +#~ "\n" +#~ "lxc snapshot [remote:] [--stateful]" +#~ msgstr "" +#~ "Erstellt einen schreibgeschützten Sicherungspunkt des Containers.\n" +#~ "\n" +#~ "lxc snapshot [--stateful]\n" + +#, fuzzy +#~ msgid "Error adding alias %s" +#~ msgstr "Fehler beim hinzufügen des Alias %s\n" + +#, fuzzy +#~ msgid "" +#~ "Manipulate container images.\n" +#~ "\n" +#~ "lxc image import [rootfs tarball|URL] [target] [--public] [--" +#~ "created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] " +#~ "[prop=value]\n" +#~ "\n" +#~ "lxc image copy [remote:] : [--alias=ALIAS].. [--copy-" +#~ "aliases] [--public]\n" +#~ "lxc image delete [remote:]\n" +#~ "lxc image export [remote:]\n" +#~ "lxc image info [remote:]\n" +#~ "lxc image list [remote:] [filter]\n" +#~ "lxc image show [remote:]\n" +#~ "lxc image edit [remote:]\n" +#~ " Edit image, either by launching external editor or reading STDIN.\n" +#~ " Example: lxc image edit # launch editor\n" +#~ " cat image.yml | lxc image edit # read from image." +#~ "yml\n" +#~ "\n" +#~ "Lists the images at specified remote, or local images.\n" +#~ "Filters are not yet supported.\n" +#~ "\n" +#~ "lxc image alias create \n" +#~ "lxc image alias delete \n" +#~ "lxc image alias list [remote:]\n" +#~ "\n" +#~ "Create, delete, list image aliases. Example:\n" +#~ "lxc remote add store2 images.linuxcontainers.org\n" +#~ "lxc image alias list store2:" +#~ msgstr "" +#~ "Ändere Container Abbilder\n" +#~ "\n" +#~ "lxc image import [Ziel] [--public] [--created-at=ISO-8601] [--" +#~ "expires-at=ISO-8601] [--fingerprint=FINGERPRINT] [prop=value]\n" +#~ "\n" +#~ "lxc image copy [remote:] : [--alias=ALIAS].. [--copy-" +#~ "alias]\n" +#~ "lxc image delete [remote:]\n" +#~ "lxc image edit [remote:]\n" +#~ "lxc image export [remote:]\n" +#~ "lxc image info [remote:]\n" +#~ "lxc image list [remote:] [Filter]\n" +#~ "lxc image show [remote:]\n" +#~ "\n" +#~ "Listet die Abbilder auf der entfernten oder lokalen Instanz.\n" +#~ "Filter werden noch nicht unterstützt.\n" +#~ "\n" +#~ "lxc image alias create \n" +#~ "lxc image alias delete \n" +#~ "lxc remote add images images.linuxcontainers.org\n" +#~ "lxc image alias list images:\n" +#~ "erstelle, lösche und liste Abbild Aliasse\n" + +#~ msgid "No certificate on this connection" +#~ msgstr "Kein Zertifikat für diese Verbindung" + +#~ msgid "" +#~ "Server certificate for host %s has changed. Add correct certificate or " +#~ "remove certificate in %s" +#~ msgstr "" +#~ "Server Zertifikat für Rechner %s hat sich geändert. Fürgen Sie das " +#~ "richtige Zertifikat hinzu oder löschen Sie das Zertifikat unter %s" + +#, fuzzy +#~ msgid "" +#~ "Set the current state of a resource back to its state at the time the " +#~ "snapshot was created.\n" +#~ "\n" +#~ "lxc restore [remote:] [--stateful]" +#~ msgstr "" +#~ "Setzt eine Ressource auf einen Sicherungspunkt zurück.\n" +#~ "\n" +#~ "lxc restore [—stateful]\n" + +#~ msgid "api version mismatch: mine: %q, daemon: %q" +#~ msgstr "API Versionskonflikt: meine: %q, Hintergrund Dienst: %q" + +#~ msgid "bad profile url %s" +#~ msgstr "Fehlerhafte Profil URL %s" + +#~ msgid "bad version in profile url" +#~ msgstr "Falsche Version in Profil URL" + +#, fuzzy +#~ msgid "device already exists" +#~ msgstr "entfernte Instanz %s existiert bereits" + +#, fuzzy +#~ msgid "error." +#~ msgstr "Fehler: %v\n" + +#~ msgid "no response!" +#~ msgstr "keine Antwort!" + +#, fuzzy +#~ msgid "no value found in %q" +#~ msgstr "kein Wert in %q gefunden\n" + +#~ msgid "unknown remote name: %q" +#~ msgstr "unbekannter entfernter Instanz Name: %q" + +#, fuzzy +#~ msgid "unknown transport type: %s" +#~ msgstr "unbekannter entfernter Instanz Name: %q" + +#~ msgid "cannot resolve unix socket address: %v" +#~ msgstr "kann unix Socket Adresse %v nicht auflösen" + +#, fuzzy +#~ msgid "unknown group %s" +#~ msgstr "unbekannter entfernter Instanz Name: %q" + +#, fuzzy +#~ msgid "Information about remotes not yet supported" +#~ msgstr "" +#~ "Informationen über entfernte Instanzen wird noch nicht unterstützt\n" + +#~ msgid "Unknown image command %s" +#~ msgstr "Unbekannter Befehl %s für Abbild" + +#~ msgid "Unknown remote subcommand %s" +#~ msgstr "Unbekannter Unterbefehl %s für entfernte Instanz" + +#~ msgid "Unkonwn config trust command %s" +#~ msgstr "Unbekannter config Befehl %s" + +#, fuzzy +#~ msgid "YAML parse error %v" +#~ msgstr "YAML Analyse Fehler %v\n" + +#~ msgid "invalid argument %s" +#~ msgstr "ungültiges Argument %s" + +#, fuzzy +#~ msgid "unknown profile cmd %s" +#~ msgstr "Unbekannter Befehl %s für Abbild" + +#, fuzzy +#~ msgid "Publish to remote server is not supported yet" +#~ msgstr "" +#~ "Anzeigen von Informationen über entfernte Instanzen wird noch nicht " +#~ "unterstützt\n" + +#, fuzzy +#~ msgid "Use an alternative config path." +#~ msgstr "Alternatives config Verzeichnis." + +#, fuzzy +#~ msgid "" +#~ "error: %v\n" +#~ "%s\n" +#~ msgstr "" +#~ "Fehler: %v\n" +#~ "%s" + +#, fuzzy +#~ msgid "" +#~ "lxc init [remote:] [remote:][] [--ephemeral|-e] [--profile|-" +#~ "p ...]\n" +#~ "\n" +#~ "Initializes a container using the specified image and name.\n" +#~ "\n" +#~ "Not specifying -p will result in the default profile.\n" +#~ "Specifying \"-p\" with no argument will result in no profile.\n" +#~ "\n" +#~ "Example:\n" +#~ "lxc init ubuntu u1\n" +#~ msgstr "" +#~ "lxc init [] [--ephemeral|-e] [--profile|-p ...]\n" +#~ "\n" +#~ "Initialisiert einen Container mit Namen von dem angegebenen Abbild .\n" +#~ "\n" +#~ "Ohne den -p Parameter wird das default Profil benutzt.\n" +#~ "Wird -p ohne Argument angegeben, wird kein Profil verwendet\n" +#~ "\n" +#~ "Beispiel:\n" +#~ "lxc init ubuntu u1\n" + +#~ msgid "" +#~ "Changing the name of a running container during copy isn't supported." +#~ msgstr "" +#~ "Den Namen eines laufenden Containers beim kopieren zu ändern wird nicht " +#~ "unterstützt." + +#~ msgid "non-http remotes are not supported for migration right now" +#~ msgstr "" +#~ "die Migration an entfernte Instanzen wird zur Zeit nur über http " +#~ "unterstützt." + +#~ msgid "Cannot connect to unix socket at %s Is the server running?\n" +#~ msgstr "" +#~ "Keine Verbindung zum unix Socket unter %s möglich. Läuft der Server?\n" + +#~ msgid "Show for remotes is not yet supported\n" +#~ msgstr "" +#~ "Anzeigen von Informationen über entfernte Instanzen wird noch nicht " +#~ "unterstützt\n" === added file 'src/github.com/lxc/lxd/po/fr.po' --- src/github.com/lxc/lxd/po/fr.po 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/po/fr.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1358 @@ +# French translation for LXD +# Copyright (C) 2015 - LXD contributors +# This file is distributed under the same license as LXD. +# Stéphane Graber \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: lxc/config.go:36 +msgid "" +"### This is a yaml representation of the configuration.\n" +"### Any line starting with a '# will be ignored.\n" +"###\n" +"### A sample configuration looks like:\n" +"### name: container1\n" +"### profiles:\n" +"### - default\n" +"### config:\n" +"### volatile.eth0.hwaddr: 00:16:3e:e9:f8:7f\n" +"### devices:\n" +"### homedir:\n" +"### path: /extra\n" +"### source: /home/user\n" +"### type: disk\n" +"### ephemeral: false\n" +"###\n" +"### Note that the name is shown but cannot be changed" +msgstr "" + +#: lxc/image.go:29 +msgid "" +"### This is a yaml representation of the image properties.\n" +"### Any line starting with a '# will be ignored.\n" +"###\n" +"### Each property is represented by a single line:\n" +"### An example would be:\n" +"### description: My custom image" +msgstr "" + +#: lxc/profile.go:26 +msgid "" +"### This is a yaml representation of the profile.\n" +"### Any line starting with a '# will be ignored.\n" +"###\n" +"### A profile consists of a set of configuration items followed by a set of\n" +"### devices.\n" +"###\n" +"### An example would look like:\n" +"### name: onenic\n" +"### config:\n" +"### raw.lxc: lxc.aa_profile=unconfined\n" +"### devices:\n" +"### eth0:\n" +"### nictype: bridged\n" +"### parent: lxcbr0\n" +"### type: nic\n" +"###\n" +"### Note that the name is shown but cannot be changed" +msgstr "" + +#: lxc/image.go:500 +#, c-format +msgid "%s (%d more)" +msgstr "" + +#: lxc/snapshot.go:61 +#, fuzzy +msgid "'/' not allowed in snapshot name" +msgstr "'/' n'est pas autorisé dans le nom d'un instantané (snapshot)\n" + +#: lxc/info.go:109 lxc/profile.go:221 +msgid "(none)" +msgstr "" + +#: lxc/image.go:520 lxc/image.go:542 +msgid "ALIAS" +msgstr "" + +#: lxc/image.go:524 +msgid "ARCH" +msgstr "" + +#: lxc/remote.go:46 +msgid "Accept certificate" +msgstr "" + +#: lxc/remote.go:181 +#, c-format +msgid "Admin password for %s: " +msgstr "Mot de passe administrateur pour %s: " + +#: lxc/image.go:281 +msgid "Aliases:" +msgstr "" + +#: lxc/exec.go:53 +msgid "An environment variable of the form HOME=/home/foo" +msgstr "" + +#: lxc/image.go:264 +#, c-format +msgid "Architecture: %s" +msgstr "" + +#: lxc/help.go:49 +msgid "Available commands:" +msgstr "" + +#: lxc/config.go:264 +msgid "COMMON NAME" +msgstr "" + +#: lxc/config.go:111 +#, c-format +msgid "Can't read from stdin: %s" +msgstr "" + +#: lxc/config.go:124 lxc/config.go:157 lxc/config.go:179 +#, c-format +msgid "Can't unset key '%s', it's not currently set." +msgstr "" + +#: lxc/profile.go:329 +msgid "Cannot provide container name to list" +msgstr "" + +#: lxc/remote.go:147 +#, fuzzy, c-format +msgid "Certificate fingerprint: %x" +msgstr "Empreinte du certificat: % x\n" + +#: lxc/action.go:27 +#, fuzzy, c-format +msgid "" +"Changes state of one or more containers to %s.\n" +"\n" +"lxc %s [...]" +msgstr "Change l'état du conteneur à %s.\n" + +#: lxc/remote.go:204 +msgid "Client certificate stored at server: " +msgstr "Certificat client enregistré avec le serveur: " + +#: lxc/list.go:80 +msgid "Columns" +msgstr "" + +#: lxc/init.go:132 lxc/init.go:133 lxc/launch.go:36 lxc/launch.go:37 +msgid "Config key/value to apply to the new container" +msgstr "" + +#: lxc/config.go:490 lxc/config.go:555 lxc/image.go:597 lxc/profile.go:185 +#, fuzzy, c-format +msgid "Config parsing error: %s" +msgstr "erreur: %v\n" + +#: lxc/main.go:37 +msgid "Connection refused; is LXD running?" +msgstr "" + +#: lxc/publish.go:54 +msgid "Container name is mandatory" +msgstr "" + +#: lxc/init.go:206 +#, c-format +msgid "Container name is: %s" +msgstr "" + +#: lxc/publish.go:81 lxc/publish.go:101 +#, fuzzy, c-format +msgid "Container published with fingerprint: %s" +msgstr "Empreinte du certificat: % x\n" + +#: lxc/image.go:116 +msgid "Copy aliases from source" +msgstr "" + +#: lxc/copy.go:22 +msgid "" +"Copy containers within or in between lxd instances.\n" +"\n" +"lxc copy [remote:] [remote:] [--" +"ephemeral|e]" +msgstr "" + +#: lxc/image.go:211 +#, c-format +msgid "Copying the image: %s" +msgstr "" + +#: lxc/snapshot.go:21 +msgid "" +"Create a read-only snapshot of a container.\n" +"\n" +"lxc snapshot [remote:] [--stateful]\n" +"\n" +"Creates a snapshot of the container (optionally with the container's memory\n" +"state). When --stateful is used, LXD attempts to checkpoint the container's\n" +"running state, including process memory state, TCP connections, etc. so that " +"it\n" +"can be restored (via lxc restore) at a later time (although some things, e." +"g.\n" +"TCP connections after the TCP timeout window has expired, may not be " +"restored\n" +"successfully).\n" +"\n" +"Example:\n" +"lxc snapshot u1 snap0" +msgstr "" + +#: lxc/image.go:269 lxc/info.go:84 +#, c-format +msgid "Created: %s" +msgstr "" + +#: lxc/init.go:175 lxc/launch.go:112 +#, c-format +msgid "Creating %s" +msgstr "" + +#: lxc/init.go:173 +msgid "Creating the container" +msgstr "" + +#: lxc/image.go:523 +msgid "DESCRIPTION" +msgstr "" + +#: lxc/delete.go:25 +msgid "" +"Delete containers or container snapshots.\n" +"\n" +"lxc delete [remote:][/] [remote:][[/" +"]...]\n" +"\n" +"Destroy containers or snapshots with any attached data (configuration, " +"snapshots, ...)." +msgstr "" + +#: lxc/config.go:603 +#, c-format +msgid "Device %s added to %s" +msgstr "" + +#: lxc/config.go:631 +#, c-format +msgid "Device %s removed from %s" +msgstr "" + +#: lxc/list.go:228 +msgid "EPHEMERAL" +msgstr "" + +#: lxc/config.go:266 +msgid "EXPIRY DATE" +msgstr "" + +#: lxc/main.go:55 +msgid "Enables debug mode." +msgstr "Active le mode de déboguage." + +#: lxc/main.go:54 +msgid "Enables verbose mode." +msgstr "Active le mode verbeux." + +#: lxc/help.go:68 +msgid "Environment:" +msgstr "" + +#: lxc/copy.go:29 lxc/copy.go:30 lxc/init.go:136 lxc/init.go:137 +#: lxc/launch.go:40 lxc/launch.go:41 +msgid "Ephemeral container" +msgstr "" + +#: lxc/monitor.go:56 +msgid "Event type to listen for" +msgstr "" + +#: lxc/exec.go:27 +#, fuzzy +msgid "" +"Execute the specified command in a container.\n" +"\n" +"lxc exec [remote:]container [--mode=auto|interactive|non-interactive] [--env " +"EDITOR=/usr/bin/vim]... " +msgstr "Exécute la commande spécifiée dans un conteneur.\n" + +#: lxc/image.go:273 +#, c-format +msgid "Expires: %s" +msgstr "" + +#: lxc/image.go:275 +msgid "Expires: never" +msgstr "" + +#: lxc/config.go:263 lxc/image.go:521 lxc/image.go:543 +msgid "FINGERPRINT" +msgstr "" + +#: lxc/image.go:255 +#, fuzzy, c-format +msgid "Fingerprint: %s" +msgstr "Empreinte du certificat: % x\n" + +#: lxc/finger.go:17 +#, fuzzy +msgid "" +"Fingers the LXD instance to check if it is up and working.\n" +"\n" +"lxc finger " +msgstr "Contacte LXD pour voir s'il est fonctionel.\n" + +#: lxc/main.go:156 +msgid "For example: 'lxd-images import ubuntu --alias ubuntu'." +msgstr "" + +#: lxc/action.go:36 +msgid "Force the container to shutdown." +msgstr "Force l'arrêt du conteneur." + +#: lxc/delete.go:34 lxc/delete.go:35 +msgid "Force the removal of stopped containers." +msgstr "" + +#: lxc/main.go:56 +msgid "Force using the local unix socket." +msgstr "" + +#: lxc/main.go:148 +#, fuzzy +msgid "Generating a client certificate. This may take a minute..." +msgstr "Géneration d'un certificat client. Ceci peut prendre une minute...\n" + +#: lxc/list.go:226 +msgid "IPV4" +msgstr "" + +#: lxc/list.go:227 +msgid "IPV6" +msgstr "" + +#: lxc/config.go:265 +msgid "ISSUE DATE" +msgstr "" + +#: lxc/main.go:155 +msgid "" +"If this is your first run, you will need to import images using the 'lxd-" +"images' script." +msgstr "" + +#: lxc/main.go:57 +msgid "Ignore aliases when determining what command to run." +msgstr "" + +#: lxc/image.go:216 +msgid "Image copied successfully!" +msgstr "" + +#: lxc/image.go:339 +#, fuzzy, c-format +msgid "Image imported with fingerprint: %s" +msgstr "Empreinte du certificat: % x\n" + +#: lxc/info.go:95 +#, c-format +msgid "Init: %d" +msgstr "" + +#: lxc/init.go:21 +msgid "" +"Initialize a container from a particular image.\n" +"\n" +"lxc init [remote:] [remote:][] [--ephemeral|-e] [--profile|-p " +"...] [--config|-c ...]\n" +"\n" +"Initializes a container using the specified image and name.\n" +"\n" +"Not specifying -p will result in the default profile.\n" +"Specifying \"-p\" with no argument will result in no profile.\n" +"\n" +"Example:\n" +"lxc init ubuntu u1" +msgstr "" + +#: lxc/init.go:63 lxc/init.go:68 +#, fuzzy +msgid "Invalid configuration key" +msgstr "Gérer la configuration.\n" + +#: lxc/file.go:181 +#, c-format +msgid "Invalid source %s" +msgstr "Source invalide %s" + +#: lxc/file.go:58 +#, c-format +msgid "Invalid target %s" +msgstr "Destination invalide %s" + +#: lxc/info.go:97 +msgid "Ips:" +msgstr "" + +#: lxc/main.go:35 +msgid "LXD socket not found; is LXD running?" +msgstr "" + +#: lxc/launch.go:20 +msgid "" +"Launch a container from a particular image.\n" +"\n" +"lxc launch [remote:] [remote:][] [--ephemeral|-e] [--profile|-p " +"...] [--config|-c ...]\n" +"\n" +"Launches a container using the specified image and name.\n" +"\n" +"Not specifying -p will result in the default profile.\n" +"Specifying \"-p\" with no argument will result in no profile.\n" +"\n" +"Example:\n" +"lxc launch ubuntu u1" +msgstr "" + +#: lxc/info.go:25 +msgid "" +"List information on containers.\n" +"\n" +"This will support remotes and images as well, but only containers for now.\n" +"\n" +"lxc info [:]container [--show-log]" +msgstr "" + +#: lxc/list.go:54 +msgid "" +"Lists the available resources.\n" +"\n" +"lxc list [resource] [filters] -c [columns]\n" +"\n" +"The filters are:\n" +"* A single keyword like \"web\" which will list any container with \"web\" " +"in its name.\n" +"* A key/value pair referring to a configuration item. For those, the " +"namespace can be abreviated to the smallest unambiguous identifier:\n" +"* \"user.blah=abc\" will list all containers with the \"blah\" user property " +"set to \"abc\"\n" +"* \"u.blah=abc\" will do the same\n" +"* \"security.privileged=1\" will list all privileged containers\n" +"* \"s.privileged=1\" will do the same\n" +"\n" +"The columns are:\n" +"* n - name\n" +"* s - state\n" +"* 4 - IP4\n" +"* 6 - IP6\n" +"* e - ephemeral\n" +"* S - snapshots\n" +"* p - pid of container init process" +msgstr "" + +#: lxc/info.go:151 +msgid "Log:" +msgstr "" + +#: lxc/image.go:115 +msgid "Make image public" +msgstr "" + +#: lxc/publish.go:29 +msgid "Make the image public" +msgstr "" + +#: lxc/profile.go:46 +msgid "" +"Manage configuration profiles.\n" +"\n" +"lxc profile list [filters] List available profiles.\n" +"lxc profile show Show details of a profile.\n" +"lxc profile create Create a profile.\n" +"lxc profile copy Copy the profile to the " +"specified remote.\n" +"lxc profile get Get profile configuration.\n" +"lxc profile set Set profile configuration.\n" +"lxc profile delete Delete a profile.\n" +"lxc profile edit \n" +" Edit profile, either by launching external editor or reading STDIN.\n" +" Example: lxc profile edit # launch editor\n" +" cat profile.yml | lxc profile edit # read from " +"profile.yml\n" +"lxc profile apply \n" +" Apply a comma-separated list of profiles to a container, in order.\n" +" All profiles passed in this call (and only those) will be applied\n" +" to the specified container.\n" +" Example: lxc profile apply foo default,bar # Apply default and bar\n" +" lxc profile apply foo default # Only default is active\n" +" lxc profile apply '' # no profiles are applied anymore\n" +" lxc profile apply bar,default # Apply default second now\n" +"\n" +"Devices:\n" +"lxc profile device list List devices in the given " +"profile.\n" +"lxc profile device show Show full device details in " +"the given profile.\n" +"lxc profile device remove Remove a device from a " +"profile.\n" +"lxc profile device add " +"[key=value]...\n" +" Add a profile device, such as a disk or a nic, to the containers\n" +" using the specified profile." +msgstr "" + +#: lxc/config.go:56 +msgid "" +"Manage configuration.\n" +"\n" +"lxc config device add <[remote:]container> [key=value]... " +"Add a device to a container.\n" +"lxc config device list [remote:] " +"List devices for container.\n" +"lxc config device show [remote:] " +"Show full device details for container.\n" +"lxc config device remove [remote:] " +"Remove device from container.\n" +"\n" +"lxc config get [remote:] key " +"Get configuration key.\n" +"lxc config set [remote:] key value " +"Set container configuration key.\n" +"lxc config unset [remote:] key " +"Unset container configuration key.\n" +"lxc config set key value " +"Set server configuration key.\n" +"lxc config unset key " +"Unset server configuration key.\n" +"lxc config show [--expanded] [remote:] " +"Show container configuration.\n" +"lxc config edit [remote:][container] " +"Edit container configuration in external editor.\n" +" Edit configuration, either by launching external editor or reading " +"STDIN.\n" +" Example: lxc config edit # launch editor\n" +" cat config.yml | lxc config edit # read from config." +"yml\n" +"\n" +"lxc config trust list [remote] " +"List all trusted certs.\n" +"lxc config trust add [remote] " +"Add certfile.crt to trusted hosts.\n" +"lxc config trust remove [remote] [hostname|fingerprint] " +"Remove the cert from trusted hosts.\n" +"\n" +"Examples:\n" +"To mount host's /share/c1 onto /opt in the container:\n" +" lxc config device add [remote:]container1 disk source=/" +"share/c1 path=opt\n" +"\n" +"To set an lxc config value:\n" +" lxc config set [remote:] raw.lxc 'lxc.aa_allow_incomplete = " +"1'\n" +"\n" +"To listen on IPv4 and IPv6 port 8443 (you can omit the 8443 its the " +"default):\n" +" lxc config set core.https_address [::]:8443\n" +"\n" +"To set the server trust password:\n" +" lxc config set core.trust_password blah" +msgstr "" + +#: lxc/file.go:33 +msgid "" +"Manage files on a container.\n" +"\n" +"lxc file pull [...] \n" +"lxc file push [--uid=UID] [--gid=GID] [--mode=MODE] [...] " +"\n" +"lxc file edit \n" +"\n" +" in the case of pull, in the case of push and in the " +"case of edit are /" +msgstr "" + +#: lxc/remote.go:33 +msgid "" +"Manage remote LXD servers.\n" +"\n" +"lxc remote add [--accept-certificate] [--password=PASSWORD] [--" +"public] Add the remote at .\n" +"lxc remote remove " +" Remove " +"the remote .\n" +"lxc remote " +"list " +"List all remotes.\n" +"lxc remote rename " +" Rename remote " +" to .\n" +"lxc remote set-url " +" Update 's " +"url to .\n" +"lxc remote set-default " +" Set the " +"default remote.\n" +"lxc remote get-" +"default " +"Print the default remote." +msgstr "" + +#: lxc/image.go:38 +msgid "" +"Manipulate container images.\n" +"\n" +"In LXD containers are created from images. Those images were themselves\n" +"either generated from an existing container or downloaded from an image\n" +"server.\n" +"\n" +"When using remote images, LXD will automatically cache images for you\n" +"and remove them upon expiration.\n" +"\n" +"The image unique identifier is the hash (sha-256) of its representation\n" +"as a compressed tarball (or for split images, the concatenation of the\n" +"metadata and rootfs tarballs).\n" +"\n" +"Images can be referenced by their full hash, shortest unique partial\n" +"hash or alias name (if one is set).\n" +"\n" +"\n" +"lxc image import [rootfs tarball|URL] [remote:] [--public] [--" +"created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] " +"[prop=value]\n" +" Import an image tarball (or tarballs) into the LXD image store.\n" +"\n" +"lxc image copy [remote:] : [--alias=ALIAS].. [--copy-aliases] " +"[--public]\n" +" Copy an image from one LXD daemon to another over the network.\n" +"\n" +"lxc image delete [remote:]\n" +" Delete an image from the LXD image store.\n" +"\n" +"lxc image export [remote:]\n" +" Export an image from the LXD image store into a distributable tarball.\n" +"\n" +"lxc image info [remote:]\n" +" Print everything LXD knows about a given image.\n" +"\n" +"lxc image list [remote:] [filter]\n" +" List images in the LXD image store. Filters may be of the\n" +" = form for property based filtering, or part of the image\n" +" hash or part of the image alias name.\n" +"\n" +"lxc image show [remote:]\n" +" Yaml output of the user modifiable properties of an image.\n" +"\n" +"lxc image edit [remote:]\n" +" Edit image, either by launching external editor or reading STDIN.\n" +" Example: lxc image edit # launch editor\n" +" cat image.yml | lxc image edit # read from image.yml\n" +"\n" +"lxc image alias create [remote:] \n" +" Create a new alias for an existing image.\n" +"\n" +"lxc image alias delete [remote:]\n" +" Delete an alias.\n" +"\n" +"lxc image alias list [remote:]\n" +" List the aliases.\n" +msgstr "" + +#: lxc/help.go:86 +msgid "Missing summary." +msgstr "Sommaire manquant." + +#: lxc/monitor.go:20 +msgid "" +"Monitor activity on the LXD server.\n" +"\n" +"lxc monitor [remote:] [--type=TYPE...]\n" +"\n" +"Connects to the monitoring interface of the specified LXD server.\n" +"\n" +"By default will listen to all message types.\n" +"Specific types to listen to can be specified with --type.\n" +"\n" +"Example:\n" +"lxc monitor --type=logging" +msgstr "" + +#: lxc/file.go:169 +msgid "More than one file to download, but target is not a directory" +msgstr "" +"Plusieurs fichiers à télécharger mais la destination n'est pas un dossier" + +#: lxc/move.go:17 +msgid "" +"Move containers within or in between lxd instances.\n" +"\n" +"lxc move [remote:] [remote:]\n" +" Move a container between two hosts, renaming it if destination name " +"differs.\n" +"\n" +"lxc move \n" +" Rename a local container.\n" +msgstr "" + +#: lxc/list.go:224 lxc/remote.go:271 +msgid "NAME" +msgstr "" + +#: lxc/list.go:293 lxc/remote.go:257 +msgid "NO" +msgstr "" + +#: lxc/info.go:82 +#, c-format +msgid "Name: %s" +msgstr "" + +#: lxc/image.go:117 lxc/publish.go:30 +msgid "New alias to define at target" +msgstr "" + +#: lxc/config.go:277 +#, fuzzy +msgid "No certificate provided to add" +msgstr "Un certificat n'a pas été fournis" + +#: lxc/config.go:300 +msgid "No fingerprint specified." +msgstr "Aucune empreinte n'a été spécifié." + +#: lxc/image.go:331 +msgid "Only https:// is supported for remote image import." +msgstr "" + +#: lxc/help.go:63 lxc/main.go:132 +#, fuzzy +msgid "Options:" +msgstr "Opération %s" + +#: lxc/image.go:425 +#, c-format +msgid "Output is in %s" +msgstr "" + +#: lxc/exec.go:54 +msgid "Override the terminal mode (auto, interactive or non-interactive)" +msgstr "" + +#: lxc/list.go:230 +msgid "PID" +msgstr "" + +#: lxc/image.go:522 lxc/remote.go:273 +msgid "PUBLIC" +msgstr "" + +#: lxc/help.go:69 +#, fuzzy +msgid "Path to an alternate client configuration directory." +msgstr "Dossier de configuration alternatif." + +#: lxc/help.go:70 +#, fuzzy +msgid "Path to an alternate server directory." +msgstr "Dossier de configuration alternatif." + +#: lxc/main.go:39 +msgid "Permisson denied, are you in the lxd group?" +msgstr "" + +#: lxc/help.go:23 +#, fuzzy +msgid "" +"Presents details on how to use LXD.\n" +"\n" +"lxd help [--all]" +msgstr "Explique comment utiliser LXD.\n" + +#: lxc/profile.go:186 +msgid "Press enter to open the editor again" +msgstr "" + +#: lxc/config.go:491 lxc/config.go:556 lxc/image.go:598 +msgid "Press enter to start the editor again" +msgstr "" + +#: lxc/help.go:65 +msgid "Print debug information." +msgstr "" + +#: lxc/help.go:64 +msgid "Print less common commands." +msgstr "" + +#: lxc/help.go:66 +msgid "Print verbose information." +msgstr "" + +#: lxc/version.go:18 +#, fuzzy +msgid "" +"Prints the version number of LXD.\n" +"\n" +"lxc version" +msgstr "Montre le numéro de version de LXD.\n" + +#: lxc/info.go:96 +#, c-format +msgid "Processcount: %d" +msgstr "" + +#: lxc/profile.go:223 +#, c-format +msgid "Profile %s applied to %s" +msgstr "" + +#: lxc/profile.go:137 +#, c-format +msgid "Profile %s created" +msgstr "" + +#: lxc/profile.go:207 +#, c-format +msgid "Profile %s deleted" +msgstr "" + +#: lxc/init.go:134 lxc/init.go:135 lxc/launch.go:38 lxc/launch.go:39 +msgid "Profile to apply to the new container" +msgstr "" + +#: lxc/info.go:93 +#, fuzzy, c-format +msgid "Profiles: %s" +msgstr "Mauvaise URL pour le conteneur %s" + +#: lxc/image.go:277 +msgid "Properties:" +msgstr "" + +#: lxc/remote.go:48 +msgid "Public image server" +msgstr "" + +#: lxc/image.go:265 +#, c-format +msgid "Public: %s" +msgstr "" + +#: lxc/publish.go:19 +msgid "" +"Publish containers as images.\n" +"\n" +"lxc publish [remote:]container [remote:] [--alias=ALIAS]... [prop-key=prop-" +"value]..." +msgstr "" + +#: lxc/remote.go:47 +msgid "Remote admin password" +msgstr "" + +#: lxc/delete.go:43 +#, c-format +msgid "Remove %s (yes/no): " +msgstr "" + +#: lxc/delete.go:36 lxc/delete.go:37 +msgid "Require user confirmation." +msgstr "" + +#: lxc/init.go:244 +#, c-format +msgid "Retrieving image: %s" +msgstr "" + +#: lxc/image.go:525 +msgid "SIZE" +msgstr "" + +#: lxc/list.go:229 +msgid "SNAPSHOTS" +msgstr "" + +#: lxc/list.go:225 +msgid "STATE" +msgstr "" + +#: lxc/remote.go:155 +msgid "Server certificate NACKed by user" +msgstr "Le certificat serveur a été rejeté par l'utilisateur" + +#: lxc/remote.go:201 +msgid "Server doesn't trust us after adding our cert" +msgstr "Identification refuse après l'ajout du certificat client" + +#: lxc/restore.go:21 +msgid "" +"Set the current state of a resource back to a snapshot.\n" +"\n" +"lxc restore [remote:] [--stateful]\n" +"\n" +"Restores a container from a snapshot (optionally with running state, see\n" +"snapshot help for details).\n" +"\n" +"For example:\n" +"lxc snapshot u1 snap0 # create the snapshot\n" +"lxc restore u1 snap0 # restore the snapshot" +msgstr "" + +#: lxc/file.go:45 +msgid "Set the file's gid on push" +msgstr "Définit le gid lors de l'envoi" + +#: lxc/file.go:46 +msgid "Set the file's perms on push" +msgstr "Définit les permissions lors de l'envoi" + +#: lxc/file.go:44 +msgid "Set the file's uid on push" +msgstr "Définit le uid lors de l'envoi" + +#: lxc/help.go:32 +msgid "Show all commands (not just interesting ones)" +msgstr "Affiche toutes les comandes (pas seulement les intéresantes)" + +#: lxc/info.go:34 +msgid "Show the container's last 100 log lines?" +msgstr "" + +#: lxc/image.go:262 +#, c-format +msgid "Size: %.2fMB" +msgstr "" + +#: lxc/info.go:122 +msgid "Snapshots:" +msgstr "" + +#: lxc/launch.go:118 +#, c-format +msgid "Starting %s" +msgstr "" + +#: lxc/info.go:87 +#, c-format +msgid "Status: %s" +msgstr "" + +#: lxc/delete.go:97 +msgid "Stopping container failed!" +msgstr "L'arrêt du conteneur a échoué!" + +#: lxc/delete.go:83 +msgid "The container is currently running, stop it first or pass --force." +msgstr "" + +#: lxc/publish.go:57 +msgid "There is no \"image name\". Did you want an alias?" +msgstr "" + +#: lxc/action.go:35 +msgid "Time to wait for the container before killing it." +msgstr "Temps d'attente avant de tuer le conteneur." + +#: lxc/image.go:266 +msgid "Timestamps:" +msgstr "" + +#: lxc/action.go:62 lxc/launch.go:126 +#, c-format +msgid "Try `lxc info --show-log %s` for more info" +msgstr "" + +#: lxc/info.go:89 +msgid "Type: ephemeral" +msgstr "" + +#: lxc/info.go:91 +msgid "Type: persistent" +msgstr "" + +#: lxc/image.go:526 +msgid "UPLOAD DATE" +msgstr "" + +#: lxc/remote.go:272 +msgid "URL" +msgstr "" + +#: lxc/image.go:271 +#, c-format +msgid "Uploaded: %s" +msgstr "" + +#: lxc/main.go:132 +#, fuzzy, c-format +msgid "Usage: %s" +msgstr "" +"Utilisation: %s\n" +"\n" +"Options:\n" +"\n" + +#: lxc/help.go:48 +#, fuzzy +msgid "Usage: lxc [subcommand] [options]" +msgstr "" +"Utilisation: lxc [sous commande] [options]\n" +"Comande disponibles:\n" + +#: lxc/delete.go:47 +msgid "User aborted delete operation." +msgstr "" + +#: lxc/restore.go:35 +#, fuzzy +msgid "" +"Whether or not to restore the container's running state from snapshot (if " +"available)" +msgstr "" +"Est-ce que l'état de fonctionement du conteneur doit être inclus dans " +"l'instantané (snapshot)" + +#: lxc/snapshot.go:38 +msgid "Whether or not to snapshot the container's running state" +msgstr "" +"Est-ce que l'état de fonctionement du conteneur doit être inclus dans " +"l'instantané (snapshot)" + +#: lxc/config.go:33 +msgid "Whether to show the expanded configuration" +msgstr "" + +#: lxc/list.go:291 lxc/remote.go:259 +msgid "YES" +msgstr "" + +#: lxc/main.go:66 +msgid "`lxc config profile` is deprecated, please use `lxc profile`" +msgstr "" + +#: lxc/launch.go:105 +#, fuzzy +msgid "bad number of things scanned from image, container or snapshot" +msgstr "nombre de propriété invalide pour la ressource" + +#: lxc/action.go:58 +msgid "bad result type from action" +msgstr "mauvais type de réponse pour l'action!" + +#: lxc/copy.go:78 +msgid "can't copy to the same container name" +msgstr "" + +#: lxc/remote.go:247 +msgid "can't remove the default remote" +msgstr "" + +#: lxc/remote.go:264 +msgid "default" +msgstr "" + +#: lxc/init.go:197 lxc/init.go:202 lxc/launch.go:89 lxc/launch.go:94 +#, fuzzy +msgid "didn't get any affected image, container or snapshot from server" +msgstr "N'a pas pû obtenir de resource du serveur" + +#: lxc/main.go:25 lxc/main.go:167 +#, fuzzy, c-format +msgid "error: %v" +msgstr "erreur: %v\n" + +#: lxc/help.go:40 lxc/main.go:127 +#, fuzzy, c-format +msgid "error: unknown command: %s" +msgstr "erreur: comande inconnue: %s\n" + +#: lxc/launch.go:109 +msgid "got bad version" +msgstr "reçu une version invalide" + +#: lxc/image.go:256 lxc/image.go:503 +msgid "no" +msgstr "" + +#: lxc/copy.go:100 +msgid "not all the profiles from the source exist on the target" +msgstr "" + +#: lxc/remote.go:148 +#, fuzzy +msgid "ok (y/n)?" +msgstr "ok (y/n)?" + +#: lxc/main.go:274 lxc/main.go:278 +#, c-format +msgid "processing aliases failed %s\n" +msgstr "" + +#: lxc/remote.go:291 +#, c-format +msgid "remote %s already exists" +msgstr "le serveur distant %s existe déjà" + +#: lxc/remote.go:243 lxc/remote.go:287 lxc/remote.go:317 lxc/remote.go:328 +#, c-format +msgid "remote %s doesn't exist" +msgstr "le serveur distant %s n'existe pas" + +#: lxc/remote.go:227 +#, c-format +msgid "remote %s exists as <%s>" +msgstr "le serveur distant %s existe en tant que <%s>" + +#: lxc/info.go:131 +msgid "stateful" +msgstr "" + +#: lxc/info.go:133 +msgid "stateless" +msgstr "" + +#: lxc/info.go:127 +#, c-format +msgid "taken at %s" +msgstr "" + +#: lxc/exec.go:158 +msgid "unreachable return reached" +msgstr "Un retour inacessible à été atteint" + +#: lxc/main.go:207 +msgid "wrong number of subcommand arguments" +msgstr "nombre d'argument incorrect pour la sous-comande" + +#: lxc/delete.go:46 lxc/image.go:259 lxc/image.go:507 +msgid "yes" +msgstr "" + +#: lxc/copy.go:38 +msgid "you must specify a source container name" +msgstr "" + +#, fuzzy +#~ msgid "Bad image property: %s" +#~ msgstr "(Image invalide: %s\n" + +#~ msgid "Could not create server cert dir" +#~ msgstr "" +#~ "Le dossier de stockage des certificats serveurs n'a pas pû être créé" + +#, fuzzy +#~ msgid "" +#~ "Create a read-only snapshot of a container.\n" +#~ "\n" +#~ "lxc snapshot [remote:] [--stateful]" +#~ msgstr "Prend un instantané (snapshot) en lecture seule d'un conteneur.\n" + +#~ msgid "No certificate on this connection" +#~ msgstr "Aucun certificat pour cette connexion" + +#, fuzzy +#~ msgid "" +#~ "Set the current state of a resource back to its state at the time the " +#~ "snapshot was created.\n" +#~ "\n" +#~ "lxc restore [remote:] [--stateful]" +#~ msgstr "Prend un instantané (snapshot) en lecture seule d'un conteneur.\n" + +#~ msgid "api version mismatch: mine: %q, daemon: %q" +#~ msgstr "Version de l'API incompatible: local: %q, distant: %q" + +#, fuzzy +#~ msgid "bad version in profile url" +#~ msgstr "version invalide dans l'URL du conteneur" + +#, fuzzy +#~ msgid "device already exists" +#~ msgstr "le serveur distant %s existe déjà" + +#, fuzzy +#~ msgid "error." +#~ msgstr "erreur: %v\n" + +#~ msgid "got bad op status %s" +#~ msgstr "reçu un status d'opration invalide %s" + +#, fuzzy +#~ msgid "got bad response type, expected %s got %s" +#~ msgstr "reçu une mauvaise réponse pour \"exec\"" + +#~ msgid "invalid wait url %s" +#~ msgstr "URL d'attente invalide %s" + +#~ msgid "no response!" +#~ msgstr "pas de réponse!" + +#~ msgid "unknown remote name: %q" +#~ msgstr "serveur distant inconnu: %q" + +#, fuzzy +#~ msgid "unknown transport type: %s" +#~ msgstr "serveur distant inconnu: %q" + +#~ msgid "cannot resolve unix socket address: %v" +#~ msgstr "Ne peut pas résoudre l'adresse du unix socket: %v" + +#, fuzzy +#~ msgid "unknown group %s" +#~ msgstr "serveur distant inconnu: %q" + +#, fuzzy +#~ msgid "Information about remotes not yet supported" +#~ msgstr "" +#~ "Il n'est pas encore possible d'obtenir de l'information sur un serveur " +#~ "distant\n" + +#~ msgid "Unknown image command %s" +#~ msgstr "Comande d'image inconnue %s" + +#~ msgid "Unknown remote subcommand %s" +#~ msgstr "Comande de serveur distant inconnue %s" + +#~ msgid "Unkonwn config trust command %s" +#~ msgstr "Comande de configuration de confiance inconnue %s" + +#, fuzzy +#~ msgid "YAML parse error %v" +#~ msgstr "erreur: %v\n" + +#~ msgid "invalid argument %s" +#~ msgstr "Arguments invalides %s" + +#, fuzzy +#~ msgid "unknown profile cmd %s" +#~ msgstr "Comande de configuration inconue %s" + +#, fuzzy +#~ msgid "Publish to remote server is not supported yet" +#~ msgstr "" +#~ "Il n'est pas encore possible d'obtenir de l'information sur un serveur " +#~ "distant\n" + +#, fuzzy +#~ msgid "Use an alternative config path." +#~ msgstr "Dossier de configuration alternatif." + +#, fuzzy +#~ msgid "" +#~ "error: %v\n" +#~ "%s\n" +#~ msgstr "" +#~ "erreur: %v\n" +#~ "%s" + +#, fuzzy +#~ msgid "Show for remotes is not yet supported\n" +#~ msgstr "" +#~ "Il n'est pas encore possible d'obtenir de l'information sur un serveur " +#~ "distant\n" + +#~ msgid "(Bad alias entry: %s\n" +#~ msgstr "(Alias invalide: %s\n" + +#~ msgid "bad container url %s" +#~ msgstr "Mauvaise URL pour le conteneur %s" + +#~ msgid "bad version in container url" +#~ msgstr "version invalide dans l'URL du conteneur" + +#, fuzzy +#~ msgid "Ephemeral containers not yet supported\n" +#~ msgstr "" +#~ "Il n'est pas encore possible d'obtenir de l'information sur un serveur " +#~ "distant\n" + +#~ msgid "(Bad image entry: %s\n" +#~ msgstr "(Image invalide: %s\n" + +#~ msgid "Certificate already stored.\n" +#~ msgstr "Le certificat a déjà été enregistré.\n" + +#, fuzzy +#~ msgid "Non-async response from delete!" +#~ msgstr "Réponse invalide (non-async) durant la suppression!" + +#, fuzzy +#~ msgid "Non-async response from init!" +#~ msgstr "" +#~ "Réponse invalide (non-async) durant la cration d'un instantané (snapshot)!" + +#~ msgid "Non-async response from snapshot!" +#~ msgstr "" +#~ "Réponse invalide (non-async) durant la cration d'un instantané (snapshot)!" + +#~ msgid "Server certificate has changed" +#~ msgstr "Le certificat serveur a changé" + +#, fuzzy +#~ msgid "Unexpected non-async response" +#~ msgstr "Réponse invalide (non-async) durant la suppression!" + +#~ msgid "bad response type from image list!" +#~ msgstr "mauvais type de réponse pour la liste d'image!" + +#~ msgid "bad response type from list!" +#~ msgstr "mauvais type de réponse pour la liste!" + +#, fuzzy +#~ msgid "got non-async response!" +#~ msgstr "Réponse invalide (non-async) durant la suppression!" + +#~ msgid "got non-sync response from containers get!" +#~ msgstr "Réponse invalide (non-async) durant le chargement!" + +#~ msgid "Delete a container or container snapshot.\n" +#~ msgstr "Supprime un conteneur ou l'instantané (snapshot) d'un conteneur.\n" + +#~ msgid "List information on containers.\n" +#~ msgstr "Liste de l'information sur les conteneurs.\n" + +#~ msgid "Lists the available resources.\n" +#~ msgstr "Liste des ressources disponibles.\n" + +#~ msgid "Manage files on a container.\n" +#~ msgstr "Gérer les fichiers du conteneur.\n" + +#~ msgid "Manage remote lxc servers.\n" +#~ msgstr "Gérer les serveurs distants.\n" + +#~ msgid "Non-async response from create!" +#~ msgstr "Réponse invalide (non-async) durant la cration!" + +#~ msgid "Only 'password' can be set currently" +#~ msgstr "Seul 'password' peut être configuré en ce moment" + +#~ msgid "" +#~ "lxc image import [target] [--created-at=ISO-8601] [--expires-" +#~ "at=ISO-8601] [--fingerprint=HASH] [prop=value]\n" +#~ msgstr "" +#~ "lxc image import [destination] [--created-at=ISO-8601] [--" +#~ "expires-at=ISO-8601] [--fingerprint=HASH] [proprit=valeur]\n" + +#~ msgid "lxc init ubuntu []\n" +#~ msgstr "lxc init ubuntu []\n" + +#~ msgid "lxc launch ubuntu []\n" +#~ msgstr "lxc launch ubuntu []\n" === added file 'src/github.com/lxc/lxd/po/ja.po' --- src/github.com/lxc/lxd/po/ja.po 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/po/ja.po 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1353 @@ +# Japanese translation for LXD +# Copyright (C) 2015 - LXD contributors +# This file is distributed under the same license as LXD. +# Hiroaki Nakamura , 2015. +# +msgid "" +msgstr "" +"Project-Id-Version: LXD\n" +"Report-Msgid-Bugs-To: lxc-devel@lists.linuxcontainers.org\n" +"POT-Creation-Date: 2016-02-10 22:15-0500\n" +"PO-Revision-Date: 2015-03-13 23:44+0900\n" +"Last-Translator: KATOH Yasufumi \n" +"Language-Team: Japanese \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: lxc/config.go:36 +msgid "" +"### This is a yaml representation of the configuration.\n" +"### Any line starting with a '# will be ignored.\n" +"###\n" +"### A sample configuration looks like:\n" +"### name: container1\n" +"### profiles:\n" +"### - default\n" +"### config:\n" +"### volatile.eth0.hwaddr: 00:16:3e:e9:f8:7f\n" +"### devices:\n" +"### homedir:\n" +"### path: /extra\n" +"### source: /home/user\n" +"### type: disk\n" +"### ephemeral: false\n" +"###\n" +"### Note that the name is shown but cannot be changed" +msgstr "" + +#: lxc/image.go:29 +msgid "" +"### This is a yaml representation of the image properties.\n" +"### Any line starting with a '# will be ignored.\n" +"###\n" +"### Each property is represented by a single line:\n" +"### An example would be:\n" +"### description: My custom image" +msgstr "" + +#: lxc/profile.go:26 +msgid "" +"### This is a yaml representation of the profile.\n" +"### Any line starting with a '# will be ignored.\n" +"###\n" +"### A profile consists of a set of configuration items followed by a set of\n" +"### devices.\n" +"###\n" +"### An example would look like:\n" +"### name: onenic\n" +"### config:\n" +"### raw.lxc: lxc.aa_profile=unconfined\n" +"### devices:\n" +"### eth0:\n" +"### nictype: bridged\n" +"### parent: lxcbr0\n" +"### type: nic\n" +"###\n" +"### Note that the name is shown but cannot be changed" +msgstr "" + +#: lxc/image.go:500 +#, c-format +msgid "%s (%d more)" +msgstr "" + +#: lxc/snapshot.go:61 +#, fuzzy +msgid "'/' not allowed in snapshot name" +msgstr "'/' ã¯ã‚¹ãƒŠãƒƒãƒ—ショットã®åå‰ã«ã¯ä½¿ç”¨ã§ãã¾ã›ã‚“。\n" + +#: lxc/info.go:109 lxc/profile.go:221 +msgid "(none)" +msgstr "" + +#: lxc/image.go:520 lxc/image.go:542 +msgid "ALIAS" +msgstr "" + +#: lxc/image.go:524 +msgid "ARCH" +msgstr "" + +#: lxc/remote.go:46 +msgid "Accept certificate" +msgstr "" + +#: lxc/remote.go:181 +#, c-format +msgid "Admin password for %s: " +msgstr "%s ã®ç®¡ç†è€…パスワード: " + +#: lxc/image.go:281 +#, fuzzy +msgid "Aliases:" +msgstr "エイリアス:\n" + +#: lxc/exec.go:53 +msgid "An environment variable of the form HOME=/home/foo" +msgstr "" + +#: lxc/image.go:264 +#, c-format +msgid "Architecture: %s" +msgstr "" + +#: lxc/help.go:49 +msgid "Available commands:" +msgstr "" + +#: lxc/config.go:264 +msgid "COMMON NAME" +msgstr "" + +#: lxc/config.go:111 +#, c-format +msgid "Can't read from stdin: %s" +msgstr "" + +#: lxc/config.go:124 lxc/config.go:157 lxc/config.go:179 +#, c-format +msgid "Can't unset key '%s', it's not currently set." +msgstr "" + +#: lxc/profile.go:329 +msgid "Cannot provide container name to list" +msgstr "" + +#: lxc/remote.go:147 +#, fuzzy, c-format +msgid "Certificate fingerprint: %x" +msgstr "証明書ã®ãƒ•ã‚£ãƒ³ã‚¬ãƒ¼ãƒ—リント: % x\n" + +#: lxc/action.go:27 +#, fuzzy, c-format +msgid "" +"Changes state of one or more containers to %s.\n" +"\n" +"lxc %s [...]" +msgstr "コンテナã®çŠ¶æ…‹ã‚’ %s ã«å¤‰æ›´ã—ã¾ã™ã€‚\n" + +#: lxc/remote.go:204 +msgid "Client certificate stored at server: " +msgstr "クライアント証明書ãŒã‚µãƒ¼ãƒã«æ ¼ç´ã•ã‚Œã¾ã—ãŸ: " + +#: lxc/list.go:80 +msgid "Columns" +msgstr "" + +#: lxc/init.go:132 lxc/init.go:133 lxc/launch.go:36 lxc/launch.go:37 +msgid "Config key/value to apply to the new container" +msgstr "" + +#: lxc/config.go:490 lxc/config.go:555 lxc/image.go:597 lxc/profile.go:185 +#, fuzzy, c-format +msgid "Config parsing error: %s" +msgstr "エラー: %v\n" + +#: lxc/main.go:37 +msgid "Connection refused; is LXD running?" +msgstr "" + +#: lxc/publish.go:54 +#, fuzzy +msgid "Container name is mandatory" +msgstr "コンテナãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" + +#: lxc/init.go:206 +#, fuzzy, c-format +msgid "Container name is: %s" +msgstr "コンテナãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" + +#: lxc/publish.go:81 lxc/publish.go:101 +#, fuzzy, c-format +msgid "Container published with fingerprint: %s" +msgstr "証明書ã®ãƒ•ã‚£ãƒ³ã‚¬ãƒ¼ãƒ—リント: % x\n" + +#: lxc/image.go:116 +msgid "Copy aliases from source" +msgstr "" + +#: lxc/copy.go:22 +msgid "" +"Copy containers within or in between lxd instances.\n" +"\n" +"lxc copy [remote:] [remote:] [--" +"ephemeral|e]" +msgstr "" + +#: lxc/image.go:211 +#, c-format +msgid "Copying the image: %s" +msgstr "" + +#: lxc/snapshot.go:21 +msgid "" +"Create a read-only snapshot of a container.\n" +"\n" +"lxc snapshot [remote:] [--stateful]\n" +"\n" +"Creates a snapshot of the container (optionally with the container's memory\n" +"state). When --stateful is used, LXD attempts to checkpoint the container's\n" +"running state, including process memory state, TCP connections, etc. so that " +"it\n" +"can be restored (via lxc restore) at a later time (although some things, e." +"g.\n" +"TCP connections after the TCP timeout window has expired, may not be " +"restored\n" +"successfully).\n" +"\n" +"Example:\n" +"lxc snapshot u1 snap0" +msgstr "" + +#: lxc/image.go:269 lxc/info.go:84 +#, c-format +msgid "Created: %s" +msgstr "" + +#: lxc/init.go:175 lxc/launch.go:112 +#, c-format +msgid "Creating %s" +msgstr "" + +#: lxc/init.go:173 +msgid "Creating the container" +msgstr "" + +#: lxc/image.go:523 +msgid "DESCRIPTION" +msgstr "" + +#: lxc/delete.go:25 +msgid "" +"Delete containers or container snapshots.\n" +"\n" +"lxc delete [remote:][/] [remote:][[/" +"]...]\n" +"\n" +"Destroy containers or snapshots with any attached data (configuration, " +"snapshots, ...)." +msgstr "" + +#: lxc/config.go:603 +#, fuzzy, c-format +msgid "Device %s added to %s" +msgstr "デãƒã‚¤ã‚¹ %s ㌠%s ã«è¿½åŠ ã•ã‚Œã¾ã—ãŸ\n" + +#: lxc/config.go:631 +#, fuzzy, c-format +msgid "Device %s removed from %s" +msgstr "デãƒã‚¤ã‚¹ %s ㌠%s ã‹ã‚‰å‰Šé™¤ã•ã‚Œã¾ã—ãŸ\n" + +#: lxc/list.go:228 +msgid "EPHEMERAL" +msgstr "" + +#: lxc/config.go:266 +msgid "EXPIRY DATE" +msgstr "" + +#: lxc/main.go:55 +msgid "Enables debug mode." +msgstr "デãƒãƒƒã‚°ãƒ¢ãƒ¼ãƒ‰ã‚’有効ã«ã—ã¾ã™ã€‚" + +#: lxc/main.go:54 +msgid "Enables verbose mode." +msgstr "詳細モードを有効ã«ã—ã¾ã™ã€‚" + +#: lxc/help.go:68 +msgid "Environment:" +msgstr "" + +#: lxc/copy.go:29 lxc/copy.go:30 lxc/init.go:136 lxc/init.go:137 +#: lxc/launch.go:40 lxc/launch.go:41 +msgid "Ephemeral container" +msgstr "" + +#: lxc/monitor.go:56 +msgid "Event type to listen for" +msgstr "" + +#: lxc/exec.go:27 +#, fuzzy +msgid "" +"Execute the specified command in a container.\n" +"\n" +"lxc exec [remote:]container [--mode=auto|interactive|non-interactive] [--env " +"EDITOR=/usr/bin/vim]... " +msgstr "コンテナã§æŒ‡å®šã—ãŸã‚³ãƒžãƒ³ãƒ‰ã‚’実行ã—ã¾ã™ã€‚\n" + +#: lxc/image.go:273 +#, c-format +msgid "Expires: %s" +msgstr "" + +#: lxc/image.go:275 +msgid "Expires: never" +msgstr "" + +#: lxc/config.go:263 lxc/image.go:521 lxc/image.go:543 +msgid "FINGERPRINT" +msgstr "" + +#: lxc/image.go:255 +#, fuzzy, c-format +msgid "Fingerprint: %s" +msgstr "証明書ã®ãƒ•ã‚£ãƒ³ã‚¬ãƒ¼ãƒ—リント: % x\n" + +#: lxc/finger.go:17 +#, fuzzy +msgid "" +"Fingers the LXD instance to check if it is up and working.\n" +"\n" +"lxc finger " +msgstr "LXDインスタンスãŒç¨¼åƒä¸­ã‹ã‚’確èªã—ã¾ã™ã€‚\n" + +#: lxc/main.go:156 +msgid "For example: 'lxd-images import ubuntu --alias ubuntu'." +msgstr "" + +#: lxc/action.go:36 +msgid "Force the container to shutdown." +msgstr "コンテナを強制シャットダウンã—ã¾ã™ã€‚" + +#: lxc/delete.go:34 lxc/delete.go:35 +msgid "Force the removal of stopped containers." +msgstr "" + +#: lxc/main.go:56 +msgid "Force using the local unix socket." +msgstr "" + +#: lxc/main.go:148 +#, fuzzy +msgid "Generating a client certificate. This may take a minute..." +msgstr "クライアント証明書を生æˆã—ã¾ã™ã€‚1分ãらã„ã‹ã‹ã‚Šã¾ã™â€¦\n" + +#: lxc/list.go:226 +msgid "IPV4" +msgstr "" + +#: lxc/list.go:227 +msgid "IPV6" +msgstr "" + +#: lxc/config.go:265 +msgid "ISSUE DATE" +msgstr "" + +#: lxc/main.go:155 +msgid "" +"If this is your first run, you will need to import images using the 'lxd-" +"images' script." +msgstr "" + +#: lxc/main.go:57 +msgid "Ignore aliases when determining what command to run." +msgstr "" + +#: lxc/image.go:216 +msgid "Image copied successfully!" +msgstr "" + +#: lxc/image.go:339 +#, fuzzy, c-format +msgid "Image imported with fingerprint: %s" +msgstr "証明書ã®ãƒ•ã‚£ãƒ³ã‚¬ãƒ¼ãƒ—リント: % x\n" + +#: lxc/info.go:95 +#, c-format +msgid "Init: %d" +msgstr "" + +#: lxc/init.go:21 +msgid "" +"Initialize a container from a particular image.\n" +"\n" +"lxc init [remote:] [remote:][] [--ephemeral|-e] [--profile|-p " +"...] [--config|-c ...]\n" +"\n" +"Initializes a container using the specified image and name.\n" +"\n" +"Not specifying -p will result in the default profile.\n" +"Specifying \"-p\" with no argument will result in no profile.\n" +"\n" +"Example:\n" +"lxc init ubuntu u1" +msgstr "" + +#: lxc/init.go:63 lxc/init.go:68 +#, fuzzy +msgid "Invalid configuration key" +msgstr "設定を管ç†ã—ã¾ã™ã€‚\n" + +#: lxc/file.go:181 +#, c-format +msgid "Invalid source %s" +msgstr "ä¸æ­£ãªã‚½ãƒ¼ã‚¹ %s" + +#: lxc/file.go:58 +#, c-format +msgid "Invalid target %s" +msgstr "ä¸æ­£ãªé€ã‚Šå…ˆ %s" + +#: lxc/info.go:97 +msgid "Ips:" +msgstr "" + +#: lxc/main.go:35 +msgid "LXD socket not found; is LXD running?" +msgstr "" + +#: lxc/launch.go:20 +msgid "" +"Launch a container from a particular image.\n" +"\n" +"lxc launch [remote:] [remote:][] [--ephemeral|-e] [--profile|-p " +"...] [--config|-c ...]\n" +"\n" +"Launches a container using the specified image and name.\n" +"\n" +"Not specifying -p will result in the default profile.\n" +"Specifying \"-p\" with no argument will result in no profile.\n" +"\n" +"Example:\n" +"lxc launch ubuntu u1" +msgstr "" + +#: lxc/info.go:25 +msgid "" +"List information on containers.\n" +"\n" +"This will support remotes and images as well, but only containers for now.\n" +"\n" +"lxc info [:]container [--show-log]" +msgstr "" + +#: lxc/list.go:54 +msgid "" +"Lists the available resources.\n" +"\n" +"lxc list [resource] [filters] -c [columns]\n" +"\n" +"The filters are:\n" +"* A single keyword like \"web\" which will list any container with \"web\" " +"in its name.\n" +"* A key/value pair referring to a configuration item. For those, the " +"namespace can be abreviated to the smallest unambiguous identifier:\n" +"* \"user.blah=abc\" will list all containers with the \"blah\" user property " +"set to \"abc\"\n" +"* \"u.blah=abc\" will do the same\n" +"* \"security.privileged=1\" will list all privileged containers\n" +"* \"s.privileged=1\" will do the same\n" +"\n" +"The columns are:\n" +"* n - name\n" +"* s - state\n" +"* 4 - IP4\n" +"* 6 - IP6\n" +"* e - ephemeral\n" +"* S - snapshots\n" +"* p - pid of container init process" +msgstr "" + +#: lxc/info.go:151 +msgid "Log:" +msgstr "" + +#: lxc/image.go:115 +msgid "Make image public" +msgstr "" + +#: lxc/publish.go:29 +msgid "Make the image public" +msgstr "" + +#: lxc/profile.go:46 +msgid "" +"Manage configuration profiles.\n" +"\n" +"lxc profile list [filters] List available profiles.\n" +"lxc profile show Show details of a profile.\n" +"lxc profile create Create a profile.\n" +"lxc profile copy Copy the profile to the " +"specified remote.\n" +"lxc profile get Get profile configuration.\n" +"lxc profile set Set profile configuration.\n" +"lxc profile delete Delete a profile.\n" +"lxc profile edit \n" +" Edit profile, either by launching external editor or reading STDIN.\n" +" Example: lxc profile edit # launch editor\n" +" cat profile.yml | lxc profile edit # read from " +"profile.yml\n" +"lxc profile apply \n" +" Apply a comma-separated list of profiles to a container, in order.\n" +" All profiles passed in this call (and only those) will be applied\n" +" to the specified container.\n" +" Example: lxc profile apply foo default,bar # Apply default and bar\n" +" lxc profile apply foo default # Only default is active\n" +" lxc profile apply '' # no profiles are applied anymore\n" +" lxc profile apply bar,default # Apply default second now\n" +"\n" +"Devices:\n" +"lxc profile device list List devices in the given " +"profile.\n" +"lxc profile device show Show full device details in " +"the given profile.\n" +"lxc profile device remove Remove a device from a " +"profile.\n" +"lxc profile device add " +"[key=value]...\n" +" Add a profile device, such as a disk or a nic, to the containers\n" +" using the specified profile." +msgstr "" + +#: lxc/config.go:56 +msgid "" +"Manage configuration.\n" +"\n" +"lxc config device add <[remote:]container> [key=value]... " +"Add a device to a container.\n" +"lxc config device list [remote:] " +"List devices for container.\n" +"lxc config device show [remote:] " +"Show full device details for container.\n" +"lxc config device remove [remote:] " +"Remove device from container.\n" +"\n" +"lxc config get [remote:] key " +"Get configuration key.\n" +"lxc config set [remote:] key value " +"Set container configuration key.\n" +"lxc config unset [remote:] key " +"Unset container configuration key.\n" +"lxc config set key value " +"Set server configuration key.\n" +"lxc config unset key " +"Unset server configuration key.\n" +"lxc config show [--expanded] [remote:] " +"Show container configuration.\n" +"lxc config edit [remote:][container] " +"Edit container configuration in external editor.\n" +" Edit configuration, either by launching external editor or reading " +"STDIN.\n" +" Example: lxc config edit # launch editor\n" +" cat config.yml | lxc config edit # read from config." +"yml\n" +"\n" +"lxc config trust list [remote] " +"List all trusted certs.\n" +"lxc config trust add [remote] " +"Add certfile.crt to trusted hosts.\n" +"lxc config trust remove [remote] [hostname|fingerprint] " +"Remove the cert from trusted hosts.\n" +"\n" +"Examples:\n" +"To mount host's /share/c1 onto /opt in the container:\n" +" lxc config device add [remote:]container1 disk source=/" +"share/c1 path=opt\n" +"\n" +"To set an lxc config value:\n" +" lxc config set [remote:] raw.lxc 'lxc.aa_allow_incomplete = " +"1'\n" +"\n" +"To listen on IPv4 and IPv6 port 8443 (you can omit the 8443 its the " +"default):\n" +" lxc config set core.https_address [::]:8443\n" +"\n" +"To set the server trust password:\n" +" lxc config set core.trust_password blah" +msgstr "" + +#: lxc/file.go:33 +msgid "" +"Manage files on a container.\n" +"\n" +"lxc file pull [...] \n" +"lxc file push [--uid=UID] [--gid=GID] [--mode=MODE] [...] " +"\n" +"lxc file edit \n" +"\n" +" in the case of pull, in the case of push and in the " +"case of edit are /" +msgstr "" + +#: lxc/remote.go:33 +msgid "" +"Manage remote LXD servers.\n" +"\n" +"lxc remote add [--accept-certificate] [--password=PASSWORD] [--" +"public] Add the remote at .\n" +"lxc remote remove " +" Remove " +"the remote .\n" +"lxc remote " +"list " +"List all remotes.\n" +"lxc remote rename " +" Rename remote " +" to .\n" +"lxc remote set-url " +" Update 's " +"url to .\n" +"lxc remote set-default " +" Set the " +"default remote.\n" +"lxc remote get-" +"default " +"Print the default remote." +msgstr "" + +#: lxc/image.go:38 +msgid "" +"Manipulate container images.\n" +"\n" +"In LXD containers are created from images. Those images were themselves\n" +"either generated from an existing container or downloaded from an image\n" +"server.\n" +"\n" +"When using remote images, LXD will automatically cache images for you\n" +"and remove them upon expiration.\n" +"\n" +"The image unique identifier is the hash (sha-256) of its representation\n" +"as a compressed tarball (or for split images, the concatenation of the\n" +"metadata and rootfs tarballs).\n" +"\n" +"Images can be referenced by their full hash, shortest unique partial\n" +"hash or alias name (if one is set).\n" +"\n" +"\n" +"lxc image import [rootfs tarball|URL] [remote:] [--public] [--" +"created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] " +"[prop=value]\n" +" Import an image tarball (or tarballs) into the LXD image store.\n" +"\n" +"lxc image copy [remote:] : [--alias=ALIAS].. [--copy-aliases] " +"[--public]\n" +" Copy an image from one LXD daemon to another over the network.\n" +"\n" +"lxc image delete [remote:]\n" +" Delete an image from the LXD image store.\n" +"\n" +"lxc image export [remote:]\n" +" Export an image from the LXD image store into a distributable tarball.\n" +"\n" +"lxc image info [remote:]\n" +" Print everything LXD knows about a given image.\n" +"\n" +"lxc image list [remote:] [filter]\n" +" List images in the LXD image store. Filters may be of the\n" +" = form for property based filtering, or part of the image\n" +" hash or part of the image alias name.\n" +"\n" +"lxc image show [remote:]\n" +" Yaml output of the user modifiable properties of an image.\n" +"\n" +"lxc image edit [remote:]\n" +" Edit image, either by launching external editor or reading STDIN.\n" +" Example: lxc image edit # launch editor\n" +" cat image.yml | lxc image edit # read from image.yml\n" +"\n" +"lxc image alias create [remote:] \n" +" Create a new alias for an existing image.\n" +"\n" +"lxc image alias delete [remote:]\n" +" Delete an alias.\n" +"\n" +"lxc image alias list [remote:]\n" +" List the aliases.\n" +msgstr "" + +#: lxc/help.go:86 +msgid "Missing summary." +msgstr "サマリーã¯ã‚ã‚Šã¾ã›ã‚“。" + +#: lxc/monitor.go:20 +msgid "" +"Monitor activity on the LXD server.\n" +"\n" +"lxc monitor [remote:] [--type=TYPE...]\n" +"\n" +"Connects to the monitoring interface of the specified LXD server.\n" +"\n" +"By default will listen to all message types.\n" +"Specific types to listen to can be specified with --type.\n" +"\n" +"Example:\n" +"lxc monitor --type=logging" +msgstr "" + +#: lxc/file.go:169 +msgid "More than one file to download, but target is not a directory" +msgstr "" +"ダウンロード対象ã®ãƒ•ã‚¡ã‚¤ãƒ«ãŒè¤‡æ•°ã‚ã‚Šã¾ã™ãŒã€ã‚³ãƒ”ー先ãŒãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã§ã¯ã‚ã‚Šã¾" +"ã›ã‚“。" + +#: lxc/move.go:17 +msgid "" +"Move containers within or in between lxd instances.\n" +"\n" +"lxc move [remote:] [remote:]\n" +" Move a container between two hosts, renaming it if destination name " +"differs.\n" +"\n" +"lxc move \n" +" Rename a local container.\n" +msgstr "" + +#: lxc/list.go:224 lxc/remote.go:271 +msgid "NAME" +msgstr "" + +#: lxc/list.go:293 lxc/remote.go:257 +msgid "NO" +msgstr "" + +#: lxc/info.go:82 +#, c-format +msgid "Name: %s" +msgstr "" + +#: lxc/image.go:117 lxc/publish.go:30 +msgid "New alias to define at target" +msgstr "" + +#: lxc/config.go:277 +#, fuzzy +msgid "No certificate provided to add" +msgstr "追加ã™ã¹ã証明書ãŒæä¾›ã•ã‚Œã¦ã„ã¾ã›ã‚“" + +#: lxc/config.go:300 +msgid "No fingerprint specified." +msgstr "フィンガープリントãŒæŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“。" + +#: lxc/image.go:331 +msgid "Only https:// is supported for remote image import." +msgstr "" + +#: lxc/help.go:63 lxc/main.go:132 +#, fuzzy +msgid "Options:" +msgstr "æ“作 %s" + +#: lxc/image.go:425 +#, c-format +msgid "Output is in %s" +msgstr "" + +#: lxc/exec.go:54 +msgid "Override the terminal mode (auto, interactive or non-interactive)" +msgstr "" + +#: lxc/list.go:230 +msgid "PID" +msgstr "" + +#: lxc/image.go:522 lxc/remote.go:273 +msgid "PUBLIC" +msgstr "" + +#: lxc/help.go:69 +#, fuzzy +msgid "Path to an alternate client configuration directory." +msgstr "別ã®è¨­å®šãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒª" + +#: lxc/help.go:70 +#, fuzzy +msgid "Path to an alternate server directory." +msgstr "別ã®è¨­å®šãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒª" + +#: lxc/main.go:39 +msgid "Permisson denied, are you in the lxd group?" +msgstr "" + +#: lxc/help.go:23 +#, fuzzy +msgid "" +"Presents details on how to use LXD.\n" +"\n" +"lxd help [--all]" +msgstr "LXDã®ä½¿ã„æ–¹ã®è©³ç´°ã‚’表示ã—ã¾ã™ã€‚\n" + +#: lxc/profile.go:186 +msgid "Press enter to open the editor again" +msgstr "" + +#: lxc/config.go:491 lxc/config.go:556 lxc/image.go:598 +msgid "Press enter to start the editor again" +msgstr "" + +#: lxc/help.go:65 +msgid "Print debug information." +msgstr "" + +#: lxc/help.go:64 +msgid "Print less common commands." +msgstr "" + +#: lxc/help.go:66 +msgid "Print verbose information." +msgstr "" + +#: lxc/version.go:18 +#, fuzzy +msgid "" +"Prints the version number of LXD.\n" +"\n" +"lxc version" +msgstr "LXDã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ç•ªå·ã‚’表示ã—ã¾ã™ã€‚\n" + +#: lxc/info.go:96 +#, c-format +msgid "Processcount: %d" +msgstr "" + +#: lxc/profile.go:223 +#, fuzzy, c-format +msgid "Profile %s applied to %s" +msgstr "デãƒã‚¤ã‚¹ %s ㌠%s ã«è¿½åŠ ã•ã‚Œã¾ã—ãŸ\n" + +#: lxc/profile.go:137 +#, c-format +msgid "Profile %s created" +msgstr "" + +#: lxc/profile.go:207 +#, c-format +msgid "Profile %s deleted" +msgstr "" + +#: lxc/init.go:134 lxc/init.go:135 lxc/launch.go:38 lxc/launch.go:39 +msgid "Profile to apply to the new container" +msgstr "" + +#: lxc/info.go:93 +#, fuzzy, c-format +msgid "Profiles: %s" +msgstr "デãƒã‚¤ã‚¹ %s ㌠%s ã«è¿½åŠ ã•ã‚Œã¾ã—ãŸ\n" + +#: lxc/image.go:277 +msgid "Properties:" +msgstr "" + +#: lxc/remote.go:48 +msgid "Public image server" +msgstr "" + +#: lxc/image.go:265 +#, c-format +msgid "Public: %s" +msgstr "" + +#: lxc/publish.go:19 +msgid "" +"Publish containers as images.\n" +"\n" +"lxc publish [remote:]container [remote:] [--alias=ALIAS]... [prop-key=prop-" +"value]..." +msgstr "" + +#: lxc/remote.go:47 +msgid "Remote admin password" +msgstr "" + +#: lxc/delete.go:43 +#, c-format +msgid "Remove %s (yes/no): " +msgstr "" + +#: lxc/delete.go:36 lxc/delete.go:37 +msgid "Require user confirmation." +msgstr "" + +#: lxc/init.go:244 +#, c-format +msgid "Retrieving image: %s" +msgstr "" + +#: lxc/image.go:525 +msgid "SIZE" +msgstr "" + +#: lxc/list.go:229 +msgid "SNAPSHOTS" +msgstr "" + +#: lxc/list.go:225 +msgid "STATE" +msgstr "" + +#: lxc/remote.go:155 +msgid "Server certificate NACKed by user" +msgstr "ユーザã«ã‚ˆã‚Šã‚µãƒ¼ãƒè¨¼æ˜Žæ›¸ãŒæ‹’å¦ã•ã‚Œã¾ã—ãŸ" + +#: lxc/remote.go:201 +msgid "Server doesn't trust us after adding our cert" +msgstr "サーãƒãŒæˆ‘々ã®è¨¼æ˜Žæ›¸ã‚’追加ã—ãŸå¾Œæˆ‘々を信頼ã—ã¦ã„ã¾ã›ã‚“" + +#: lxc/restore.go:21 +msgid "" +"Set the current state of a resource back to a snapshot.\n" +"\n" +"lxc restore [remote:] [--stateful]\n" +"\n" +"Restores a container from a snapshot (optionally with running state, see\n" +"snapshot help for details).\n" +"\n" +"For example:\n" +"lxc snapshot u1 snap0 # create the snapshot\n" +"lxc restore u1 snap0 # restore the snapshot" +msgstr "" + +#: lxc/file.go:45 +msgid "Set the file's gid on push" +msgstr "プッシュ時ã«ãƒ•ã‚¡ã‚¤ãƒ«ã®gidを設定ã—ã¾ã™" + +#: lxc/file.go:46 +msgid "Set the file's perms on push" +msgstr "プッシュ時ã«ãƒ•ã‚¡ã‚¤ãƒ«ã®ãƒ‘ーミションを設定ã—ã¾ã™" + +#: lxc/file.go:44 +msgid "Set the file's uid on push" +msgstr "プッシュ時ã«ãƒ•ã‚¡ã‚¤ãƒ«ã®uidを設定ã—ã¾ã™" + +#: lxc/help.go:32 +msgid "Show all commands (not just interesting ones)" +msgstr "å…¨ã¦ã‚³ãƒžãƒ³ãƒ‰ã‚’表示ã—ã¾ã™ (主ãªã‚³ãƒžãƒ³ãƒ‰ã ã‘ã§ã¯ãªã)" + +#: lxc/info.go:34 +msgid "Show the container's last 100 log lines?" +msgstr "" + +#: lxc/image.go:262 +#, c-format +msgid "Size: %.2fMB" +msgstr "" + +#: lxc/info.go:122 +msgid "Snapshots:" +msgstr "" + +#: lxc/launch.go:118 +#, c-format +msgid "Starting %s" +msgstr "" + +#: lxc/info.go:87 +#, c-format +msgid "Status: %s" +msgstr "" + +#: lxc/delete.go:97 +msgid "Stopping container failed!" +msgstr "コンテナã®åœæ­¢ã«å¤±æ•—ã—ã¾ã—ãŸï¼" + +#: lxc/delete.go:83 +msgid "The container is currently running, stop it first or pass --force." +msgstr "" + +#: lxc/publish.go:57 +msgid "There is no \"image name\". Did you want an alias?" +msgstr "" + +#: lxc/action.go:35 +msgid "Time to wait for the container before killing it." +msgstr "コンテナを強制åœæ­¢ã™ã‚‹ã¾ã§ã®æ™‚é–“" + +#: lxc/image.go:266 +msgid "Timestamps:" +msgstr "" + +#: lxc/action.go:62 lxc/launch.go:126 +#, c-format +msgid "Try `lxc info --show-log %s` for more info" +msgstr "" + +#: lxc/info.go:89 +msgid "Type: ephemeral" +msgstr "" + +#: lxc/info.go:91 +msgid "Type: persistent" +msgstr "" + +#: lxc/image.go:526 +msgid "UPLOAD DATE" +msgstr "" + +#: lxc/remote.go:272 +msgid "URL" +msgstr "" + +#: lxc/image.go:271 +#, c-format +msgid "Uploaded: %s" +msgstr "" + +#: lxc/main.go:132 +#, fuzzy, c-format +msgid "Usage: %s" +msgstr "" +"Utilisation: %s\n" +"\n" +"Options:\n" +"\n" + +#: lxc/help.go:48 +#, fuzzy +msgid "Usage: lxc [subcommand] [options]" +msgstr "" +"使ã„æ–¹: lxc [サブコマンド] [オプション]\n" +"利用å¯èƒ½ãªã‚³ãƒžãƒ³ãƒ‰:\n" + +#: lxc/delete.go:47 +msgid "User aborted delete operation." +msgstr "" + +#: lxc/restore.go:35 +#, fuzzy +msgid "" +"Whether or not to restore the container's running state from snapshot (if " +"available)" +msgstr "コンテナã®ç¨¼å‹•çŠ¶æ…‹ã®ã‚¹ãƒŠãƒƒãƒ—ショットをå–å¾—ã™ã‚‹ã‹ã©ã†ã‹" + +#: lxc/snapshot.go:38 +msgid "Whether or not to snapshot the container's running state" +msgstr "コンテナã®ç¨¼å‹•çŠ¶æ…‹ã®ã‚¹ãƒŠãƒƒãƒ—ショットをå–å¾—ã™ã‚‹ã‹ã©ã†ã‹" + +#: lxc/config.go:33 +msgid "Whether to show the expanded configuration" +msgstr "" + +#: lxc/list.go:291 lxc/remote.go:259 +msgid "YES" +msgstr "" + +#: lxc/main.go:66 +msgid "`lxc config profile` is deprecated, please use `lxc profile`" +msgstr "" + +#: lxc/launch.go:105 +#, fuzzy +msgid "bad number of things scanned from image, container or snapshot" +msgstr "リソースã‹ã‚‰ã‚¹ã‚­ãƒ£ãƒ³ã•ã‚ŒãŸæ•°ãŒä¸æ­£" + +#: lxc/action.go:58 +msgid "bad result type from action" +msgstr "アクションã‹ã‚‰ã®çµæžœã‚¿ã‚¤ãƒ—ãŒä¸æ­£ï¼" + +#: lxc/copy.go:78 +msgid "can't copy to the same container name" +msgstr "" + +#: lxc/remote.go:247 +msgid "can't remove the default remote" +msgstr "" + +#: lxc/remote.go:264 +msgid "default" +msgstr "" + +#: lxc/init.go:197 lxc/init.go:202 lxc/launch.go:89 lxc/launch.go:94 +#, fuzzy +msgid "didn't get any affected image, container or snapshot from server" +msgstr "サーãƒã‹ã‚‰å¤‰æ›´ã•ã‚ŒãŸãƒªã‚½ãƒ¼ã‚¹ã‚’å–å¾—ã§ãã¾ã›ã‚“ã§ã—ãŸ" + +#: lxc/main.go:25 lxc/main.go:167 +#, fuzzy, c-format +msgid "error: %v" +msgstr "エラー: %v\n" + +#: lxc/help.go:40 lxc/main.go:127 +#, fuzzy, c-format +msgid "error: unknown command: %s" +msgstr "エラー: 未知ã®ã‚³ãƒžãƒ³ãƒ‰: %s\n" + +#: lxc/launch.go:109 +msgid "got bad version" +msgstr "ä¸æ­£ãªãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚’å¾—ã¾ã—ãŸ" + +#: lxc/image.go:256 lxc/image.go:503 +msgid "no" +msgstr "" + +#: lxc/copy.go:100 +msgid "not all the profiles from the source exist on the target" +msgstr "" + +#: lxc/remote.go:148 +#, fuzzy +msgid "ok (y/n)?" +msgstr "ok (y/n)?" + +#: lxc/main.go:274 lxc/main.go:278 +#, c-format +msgid "processing aliases failed %s\n" +msgstr "" + +#: lxc/remote.go:291 +#, c-format +msgid "remote %s already exists" +msgstr "リモート %s ã¯æ—¢ã«å­˜åœ¨ã—ã¾ã™" + +#: lxc/remote.go:243 lxc/remote.go:287 lxc/remote.go:317 lxc/remote.go:328 +#, c-format +msgid "remote %s doesn't exist" +msgstr "リモート %s ã¯å­˜åœ¨ã—ã¾ã›ã‚“" + +#: lxc/remote.go:227 +#, c-format +msgid "remote %s exists as <%s>" +msgstr "リモート %s 㯠<%s> ã¨ã—ã¦å­˜åœ¨ã—ã¾ã™" + +#: lxc/info.go:131 +msgid "stateful" +msgstr "" + +#: lxc/info.go:133 +msgid "stateless" +msgstr "" + +#: lxc/info.go:127 +#, c-format +msgid "taken at %s" +msgstr "" + +#: lxc/exec.go:158 +msgid "unreachable return reached" +msgstr "到é”ã—ãªã„ã¯ãšã®returnã«åˆ°é”ã—ã¾ã—ãŸ" + +#: lxc/main.go:207 +msgid "wrong number of subcommand arguments" +msgstr "サブコマンドã®å¼•æ•°ã®æ•°ãŒæ­£ã—ãã‚ã‚Šã¾ã›ã‚“" + +#: lxc/delete.go:46 lxc/image.go:259 lxc/image.go:507 +msgid "yes" +msgstr "" + +#: lxc/copy.go:38 +msgid "you must specify a source container name" +msgstr "" + +#, fuzzy +#~ msgid "Bad image property: %s" +#~ msgstr "(ä¸æ­£ãªã‚¤ãƒ¡ãƒ¼ã‚¸ãƒ—ロパティ形å¼: %s\n" + +#~ msgid "Cannot change profile name" +#~ msgstr "プロファイルåを変更ã§ãã¾ã›ã‚“" + +#~ msgid "Could not create server cert dir" +#~ msgstr "サーãƒè¨¼æ˜Žæ›¸æ ¼ç´ç”¨ã®ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã‚’作æˆã§ãã¾ã›ã‚“。" + +#, fuzzy +#~ msgid "" +#~ "Create a read-only snapshot of a container.\n" +#~ "\n" +#~ "lxc snapshot [remote:] [--stateful]" +#~ msgstr "コンテナã®èª­ã¿å–り専用スナップショットを作æˆã—ã¾ã™ã€‚\n" + +#~ msgid "No certificate on this connection" +#~ msgstr "ã“ã®æŽ¥ç¶šã«ä½¿ç”¨ã™ã‚‹è¨¼æ˜Žæ›¸ãŒã‚ã‚Šã¾ã›ã‚“" + +#, fuzzy +#~ msgid "" +#~ "Set the current state of a resource back to its state at the time the " +#~ "snapshot was created.\n" +#~ "\n" +#~ "lxc restore [remote:] [--stateful]" +#~ msgstr "コンテナã®èª­ã¿å–り専用スナップショットを作æˆã—ã¾ã™ã€‚\n" + +#~ msgid "api version mismatch: mine: %q, daemon: %q" +#~ msgstr "APIã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ä¸ä¸€è‡´: クライアント: %q, サーãƒ: %q" + +#, fuzzy +#~ msgid "bad profile url %s" +#~ msgstr "プロファイルURLãŒä¸æ­£ %s" + +#, fuzzy +#~ msgid "bad version in profile url" +#~ msgstr "プロファイルURL内ã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ãŒä¸æ­£" + +#, fuzzy +#~ msgid "device already exists" +#~ msgstr "リモート %s ã¯æ—¢ã«å­˜åœ¨ã—ã¾ã™" + +#, fuzzy +#~ msgid "error." +#~ msgstr "エラー: %v\n" + +#~ msgid "got bad op status %s" +#~ msgstr "ä¸æ­£ãªæ“作ステータスを得ã¾ã—㟠%s" + +#, fuzzy +#~ msgid "got bad response type, expected %s got %s" +#~ msgstr "\"exec\"ã‹ã‚‰ä¸æ­£ãªå¿œç­”タイプを得ã¾ã—ãŸ" + +#~ msgid "invalid wait url %s" +#~ msgstr "å¾…ã¤URLãŒä¸æ­£ %s" + +#~ msgid "no response!" +#~ msgstr "応答ãŒã‚ã‚Šã¾ã›ã‚“ï¼" + +#~ msgid "unknown remote name: %q" +#~ msgstr "未知ã®ãƒªãƒ¢ãƒ¼ãƒˆå: %q" + +#, fuzzy +#~ msgid "unknown transport type: %s" +#~ msgstr "未知ã®ãƒªãƒ¢ãƒ¼ãƒˆå: %q" + +#~ msgid "cannot resolve unix socket address: %v" +#~ msgstr "UNIXソケットã®ã‚¢ãƒ‰ãƒ¬ã‚¹ã‚’解決ã§ãã¾ã›ã‚“: %v" + +#, fuzzy +#~ msgid "unknown group %s" +#~ msgstr "未知ã®ãƒªãƒ¢ãƒ¼ãƒˆå: %q" + +#, fuzzy +#~ msgid "Information about remotes not yet supported" +#~ msgstr "リモートã®æƒ…報表示ã¯ã¾ã ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¦ã„ã¾ã›ã‚“。\n" + +#~ msgid "Unknown image command %s" +#~ msgstr "未知ã®ã‚¤ãƒ¡ãƒ¼ã‚¸ã‚³ãƒžãƒ³ãƒ‰ %s" + +#~ msgid "Unknown remote subcommand %s" +#~ msgstr "未知ã®ãƒªãƒ¢ãƒ¼ãƒˆã‚µãƒ–コマンド %s" + +#~ msgid "Unkonwn config trust command %s" +#~ msgstr "未知ã®è¨­å®šä¿¡é ¼ã‚³ãƒžãƒ³ãƒ‰ %s" + +#, fuzzy +#~ msgid "YAML parse error %v" +#~ msgstr "エラー: %v\n" + +#~ msgid "invalid argument %s" +#~ msgstr "ä¸æ­£ãªå¼•æ•° %s" + +#, fuzzy +#~ msgid "unknown profile cmd %s" +#~ msgstr "未知ã®è¨­å®šã‚³ãƒžãƒ³ãƒ‰ %s" + +#, fuzzy +#~ msgid "Publish to remote server is not supported yet" +#~ msgstr "リモートã®æƒ…報表示ã¯ã¾ã ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¦ã„ã¾ã›ã‚“。\n" + +#, fuzzy +#~ msgid "Use an alternative config path." +#~ msgstr "別ã®è¨­å®šãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒª" + +#, fuzzy +#~ msgid "" +#~ "error: %v\n" +#~ "%s\n" +#~ msgstr "" +#~ "エラー: %v\n" +#~ "%s" + +#, fuzzy +#~ msgid "Show for remotes is not yet supported\n" +#~ msgstr "リモートã®æƒ…報表示ã¯ã¾ã ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¦ã„ã¾ã›ã‚“。\n" + +#~ msgid "(Bad alias entry: %s\n" +#~ msgstr "(ä¸æ­£ãªã‚¨ã‚¤ãƒªã‚¢ã‚¹: %s\n" + +#~ msgid "bad container url %s" +#~ msgstr "コンテナã®ä¸æ­£ãªURL %s" + +#~ msgid "bad version in container url" +#~ msgstr "コンテナURL内ã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ãŒä¸æ­£" + +#, fuzzy +#~ msgid "Ephemeral containers not yet supported\n" +#~ msgstr "リモートã®æƒ…報表示ã¯ã¾ã ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¦ã„ã¾ã›ã‚“。\n" + +#~ msgid "(Bad image entry: %s\n" +#~ msgstr "(ä¸æ­£ãªã‚¤ãƒ¡ãƒ¼ã‚¸: %s\n" + +#~ msgid "Certificate already stored.\n" +#~ msgstr "証明書ã¯æ—¢ã«æ ¼ç´ã•ã‚Œã¦ã„ã¾ã™ã€‚\n" + +#, fuzzy +#~ msgid "Non-async response from delete!" +#~ msgstr "削除コマンドã‹ã‚‰ã®å¿œç­”ãŒä¸æ­£(éžåŒæœŸã§ãªã„)ã§ã™ï¼" + +#, fuzzy +#~ msgid "Non-async response from init!" +#~ msgstr "åˆæœŸåŒ–コマンドã‹ã‚‰ã®å¿œç­”ãŒä¸æ­£(éžåŒæœŸã§ãªã„)ã§ã™ï¼" + +#~ msgid "Non-async response from snapshot!" +#~ msgstr "スナップショットã‹ã‚‰ã®å¿œç­”ãŒä¸æ­£(éžåŒæœŸã§ãªã„)ã§ã™ï¼" + +#~ msgid "Server certificate has changed" +#~ msgstr "サーãƒã®è¨¼æ˜Žæ›¸ãŒå¤‰æ›´ã•ã‚Œã¦ã„ã¾ã—ãŸ" + +#, fuzzy +#~ msgid "Unexpected non-async response" +#~ msgstr "ä¸æ­£ãªãƒ¬ã‚¹ãƒãƒ³ã‚¹ (éžåŒæœŸã§ãªã„)" + +#~ msgid "bad response type from image list!" +#~ msgstr "イメージリストã‹ã‚‰ã®ãƒ¬ã‚¹ãƒãƒ³ã‚¹ã‚¿ã‚¤ãƒ—ãŒä¸æ­£ï¼" + +#~ msgid "bad response type from list!" +#~ msgstr "一覧ã‹ã‚‰ã®ãƒ¬ã‚¹ãƒãƒ³ã‚¹ã‚¿ã‚¤ãƒ—ãŒä¸æ­£ï¼" + +#, fuzzy +#~ msgid "got non-async response!" +#~ msgstr "ä¸æ­£ãª(éžåŒæœŸã§ãªã„)応答を得ã¾ã—ãŸï¼" + +#~ msgid "got non-sync response from containers get!" +#~ msgstr "コンテナã‹ã‚‰ä¸æ­£ãª(éžåŒæœŸã§ãªã„)応答を得ã¾ã—ãŸï¼" + +#~ msgid "Delete a container or container snapshot.\n" +#~ msgstr "コンテナã¾ãŸã¯ã‚³ãƒ³ãƒ†ãƒŠã®ã‚¹ãƒŠãƒƒãƒ—ショットを削除ã—ã¾ã™ã€‚\n" + +#~ msgid "List information on containers.\n" +#~ msgstr "コンテナã®æƒ…報を一覧表示ã—ã¾ã™ã€‚\n" + +#~ msgid "Lists the available resources.\n" +#~ msgstr "利用å¯èƒ½ãªãƒªã‚½ãƒ¼ã‚¹ã‚’一覧表示ã—ã¾ã™ã€‚\n" + +#~ msgid "Manage files on a container.\n" +#~ msgstr "コンテナ上ã®ãƒ•ã‚¡ã‚¤ãƒ«ã‚’管ç†ã—ã¾ã™ã€‚\n" + +#~ msgid "Manage remote lxc servers.\n" +#~ msgstr "リモートã®lxcサーãƒã‚’管ç†ã—ã¾ã™ã€‚\n" + +#~ msgid "Non-async response from create!" +#~ msgstr "作æˆã‚³ãƒžãƒ³ãƒ‰ã‹ã‚‰ã®ãƒ¬ã‚¹ãƒãƒ³ã‚¹ãŒä¸æ­£(éžåŒæœŸã§ãªã„)ï¼" + +#~ msgid "Only 'password' can be set currently" +#~ msgstr "ç¾æ™‚点ã§ã¯ 'password' ã®ã¿ãŒè¨­å®šå¯èƒ½ã§ã™" + +#~ msgid "" +#~ "lxc image import [target] [--created-at=ISO-8601] [--expires-" +#~ "at=ISO-8601] [--fingerprint=HASH] [prop=value]\n" +#~ msgstr "" +#~ "lxc image import [destination] [--created-at=ISO-8601] [--" +#~ "expires-at=ISO-8601] [--fingerprint=HASH] [proprit=valeur]\n" + +#~ msgid "lxc init ubuntu []\n" +#~ msgstr "lxc init ubuntu []\n" + +#~ msgid "lxc launch ubuntu []\n" +#~ msgstr "lxc launch ubuntu []\n" === added file 'src/github.com/lxc/lxd/po/lxd.pot' --- src/github.com/lxc/lxd/po/lxd.pot 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/po/lxd.pot 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1135 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "Project-Id-Version: lxd\n" + "Report-Msgid-Bugs-To: lxc-devel@lists.linuxcontainers.org\n" + "POT-Creation-Date: 2016-03-02 19:53-0500\n" + "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" + "Last-Translator: FULL NAME \n" + "Language-Team: LANGUAGE \n" + "Language: \n" + "MIME-Version: 1.0\n" + "Content-Type: text/plain; charset=CHARSET\n" + "Content-Transfer-Encoding: 8bit\n" + +#: lxc/config.go:37 +msgid "### This is a yaml representation of the configuration.\n" + "### Any line starting with a '# will be ignored.\n" + "###\n" + "### A sample configuration looks like:\n" + "### name: container1\n" + "### profiles:\n" + "### - default\n" + "### config:\n" + "### volatile.eth0.hwaddr: 00:16:3e:e9:f8:7f\n" + "### devices:\n" + "### homedir:\n" + "### path: /extra\n" + "### source: /home/user\n" + "### type: disk\n" + "### ephemeral: false\n" + "###\n" + "### Note that the name is shown but cannot be changed" +msgstr "" + +#: lxc/image.go:83 +msgid "### This is a yaml representation of the image properties.\n" + "### Any line starting with a '# will be ignored.\n" + "###\n" + "### Each property is represented by a single line:\n" + "### An example would be:\n" + "### description: My custom image" +msgstr "" + +#: lxc/profile.go:27 +msgid "### This is a yaml representation of the profile.\n" + "### Any line starting with a '# will be ignored.\n" + "###\n" + "### A profile consists of a set of configuration items followed by a set of\n" + "### devices.\n" + "###\n" + "### An example would look like:\n" + "### name: onenic\n" + "### config:\n" + "### raw.lxc: lxc.aa_profile=unconfined\n" + "### devices:\n" + "### eth0:\n" + "### nictype: bridged\n" + "### parent: lxcbr0\n" + "### type: nic\n" + "###\n" + "### Note that the name is shown but cannot be changed" +msgstr "" + +#: lxc/image.go:569 +#, c-format +msgid "%s (%d more)" +msgstr "" + +#: lxc/snapshot.go:61 +msgid "'/' not allowed in snapshot name" +msgstr "" + +#: lxc/profile.go:223 +msgid "(none)" +msgstr "" + +#: lxc/image.go:590 lxc/image.go:615 +msgid "ALIAS" +msgstr "" + +#: lxc/image.go:594 +msgid "ARCH" +msgstr "" + +#: lxc/list.go:336 +msgid "ARCHITECTURE" +msgstr "" + +#: lxc/remote.go:52 +msgid "Accept certificate" +msgstr "" + +#: lxc/remote.go:216 +#, c-format +msgid "Admin password for %s: " +msgstr "" + +#: lxc/image.go:333 +msgid "Aliases:" +msgstr "" + +#: lxc/exec.go:54 +msgid "An environment variable of the form HOME=/home/foo" +msgstr "" + +#: lxc/image.go:316 lxc/info.go:87 +#, c-format +msgid "Architecture: %s" +msgstr "" + +#: lxc/image.go:337 +#, c-format +msgid "Auto update: %s" +msgstr "" + +#: lxc/help.go:49 +msgid "Available commands:" +msgstr "" + +#: lxc/config.go:269 +msgid "COMMON NAME" +msgstr "" + +#: lxc/list.go:337 +msgid "CREATED AT" +msgstr "" + +#: lxc/config.go:113 +#, c-format +msgid "Can't read from stdin: %s" +msgstr "" + +#: lxc/config.go:126 lxc/config.go:159 lxc/config.go:181 +#, c-format +msgid "Can't unset key '%s', it's not currently set." +msgstr "" + +#: lxc/profile.go:334 +msgid "Cannot provide container name to list" +msgstr "" + +#: lxc/remote.go:166 +#, c-format +msgid "Certificate fingerprint: %x" +msgstr "" + +#: lxc/action.go:28 +#, c-format +msgid "Changes state of one or more containers to %s.\n" + "\n" + "lxc %s [...]" +msgstr "" + +#: lxc/remote.go:239 +msgid "Client certificate stored at server: " +msgstr "" + +#: lxc/list.go:90 lxc/list.go:91 +msgid "Columns" +msgstr "" + +#: lxc/init.go:134 lxc/init.go:135 lxc/launch.go:40 lxc/launch.go:41 +msgid "Config key/value to apply to the new container" +msgstr "" + +#: lxc/config.go:493 lxc/config.go:558 lxc/image.go:669 lxc/profile.go:187 +#, c-format +msgid "Config parsing error: %s" +msgstr "" + +#: lxc/main.go:37 +msgid "Connection refused; is LXD running?" +msgstr "" + +#: lxc/publish.go:59 +msgid "Container name is mandatory" +msgstr "" + +#: lxc/init.go:209 +#, c-format +msgid "Container name is: %s" +msgstr "" + +#: lxc/publish.go:141 lxc/publish.go:156 +#, c-format +msgid "Container published with fingerprint: %s" +msgstr "" + +#: lxc/image.go:155 +msgid "Copy aliases from source" +msgstr "" + +#: lxc/copy.go:22 +msgid "Copy containers within or in between lxd instances.\n" + "\n" + "lxc copy [remote:] [remote:] [--ephemeral|e]" +msgstr "" + +#: lxc/image.go:254 +#, c-format +msgid "Copying the image: %s" +msgstr "" + +#: lxc/remote.go:181 +msgid "Could not create server cert dir" +msgstr "" + +#: lxc/snapshot.go:21 +msgid "Create a read-only snapshot of a container.\n" + "\n" + "lxc snapshot [remote:] [--stateful]\n" + "\n" + "Creates a snapshot of the container (optionally with the container's memory\n" + "state). When --stateful is used, LXD attempts to checkpoint the container's\n" + "running state, including process memory state, TCP connections, etc. so that it\n" + "can be restored (via lxc restore) at a later time (although some things, e.g.\n" + "TCP connections after the TCP timeout window has expired, may not be restored\n" + "successfully).\n" + "\n" + "Example:\n" + "lxc snapshot u1 snap0" +msgstr "" + +#: lxc/image.go:321 lxc/info.go:89 +#, c-format +msgid "Created: %s" +msgstr "" + +#: lxc/init.go:177 lxc/launch.go:116 +#, c-format +msgid "Creating %s" +msgstr "" + +#: lxc/init.go:175 +msgid "Creating the container" +msgstr "" + +#: lxc/image.go:593 lxc/image.go:617 +msgid "DESCRIPTION" +msgstr "" + +#: lxc/delete.go:25 +msgid "Delete containers or container snapshots.\n" + "\n" + "lxc delete [remote:][/] [remote:][[/]...]\n" + "\n" + "Destroy containers or snapshots with any attached data (configuration, snapshots, ...)." +msgstr "" + +#: lxc/config.go:606 +#, c-format +msgid "Device %s added to %s" +msgstr "" + +#: lxc/config.go:634 +#, c-format +msgid "Device %s removed from %s" +msgstr "" + +#: lxc/list.go:420 +msgid "EPHEMERAL" +msgstr "" + +#: lxc/config.go:271 +msgid "EXPIRY DATE" +msgstr "" + +#: lxc/main.go:55 +msgid "Enables debug mode." +msgstr "" + +#: lxc/main.go:54 +msgid "Enables verbose mode." +msgstr "" + +#: lxc/help.go:68 +msgid "Environment:" +msgstr "" + +#: lxc/copy.go:29 lxc/copy.go:30 lxc/init.go:138 lxc/init.go:139 lxc/launch.go:44 lxc/launch.go:45 +msgid "Ephemeral container" +msgstr "" + +#: lxc/monitor.go:56 +msgid "Event type to listen for" +msgstr "" + +#: lxc/exec.go:45 +msgid "Execute the specified command in a container.\n" + "\n" + "lxc exec [remote:]container [--mode=auto|interactive|non-interactive] [--env EDITOR=/usr/bin/vim]... \n" + "\n" + "Mode defaults to non-interactive, interactive mode is selected if both stdin AND stdout are terminals (stderr is ignored)." +msgstr "" + +#: lxc/image.go:325 +#, c-format +msgid "Expires: %s" +msgstr "" + +#: lxc/image.go:327 +msgid "Expires: never" +msgstr "" + +#: lxc/config.go:268 lxc/image.go:591 lxc/image.go:616 +msgid "FINGERPRINT" +msgstr "" + +#: lxc/list.go:92 +msgid "Fast mode (same as --columns=nsacPt" +msgstr "" + +#: lxc/image.go:314 +#, c-format +msgid "Fingerprint: %s" +msgstr "" + +#: lxc/finger.go:17 +msgid "Fingers the LXD instance to check if it is up and working.\n" + "\n" + "lxc finger " +msgstr "" + +#: lxc/action.go:37 +msgid "Force the container to shutdown." +msgstr "" + +#: lxc/delete.go:34 lxc/delete.go:35 +msgid "Force the removal of stopped containers." +msgstr "" + +#: lxc/main.go:56 +msgid "Force using the local unix socket." +msgstr "" + +#: lxc/main.go:138 +msgid "Generating a client certificate. This may take a minute..." +msgstr "" + +#: lxc/list.go:334 +msgid "IPV4" +msgstr "" + +#: lxc/list.go:335 +msgid "IPV6" +msgstr "" + +#: lxc/config.go:270 +msgid "ISSUE DATE" +msgstr "" + +#: lxc/main.go:57 +msgid "Ignore aliases when determining what command to run." +msgstr "" + +#: lxc/action.go:39 +msgid "Ignore the container state (only forstart)." +msgstr "" + +#: lxc/image.go:259 +msgid "Image copied successfully!" +msgstr "" + +#: lxc/image.go:405 +#, c-format +msgid "Image imported with fingerprint: %s" +msgstr "" + +#: lxc/init.go:73 +msgid "Initialize a container from a particular image.\n" + "\n" + "lxc init [remote:] [remote:][] [--ephemeral|-e] [--profile|-p ...] [--config|-c ...]\n" + "\n" + "Initializes a container using the specified image and name.\n" + "\n" + "Not specifying -p will result in the default profile.\n" + "Specifying \"-p\" with no argument will result in no profile.\n" + "\n" + "Example:\n" + "lxc init ubuntu u1" +msgstr "" + +#: lxc/init.go:30 lxc/init.go:35 +msgid "Invalid configuration key" +msgstr "" + +#: lxc/file.go:186 +#, c-format +msgid "Invalid source %s" +msgstr "" + +#: lxc/file.go:57 +#, c-format +msgid "Invalid target %s" +msgstr "" + +#: lxc/info.go:116 +msgid "Ips:" +msgstr "" + +#: lxc/image.go:156 +msgid "Keep the image up to date after initial copy" +msgstr "" + +#: lxc/main.go:35 +msgid "LXD socket not found; is LXD running?" +msgstr "" + +#: lxc/launch.go:22 +msgid "Launch a container from a particular image.\n" + "\n" + "lxc launch [remote:] [remote:][] [--ephemeral|-e] [--profile|-p ...] [--config|-c ...]\n" + "\n" + "Launches a container using the specified image and name.\n" + "\n" + "Not specifying -p will result in the default profile.\n" + "Specifying \"-p\" with no argument will result in no profile.\n" + "\n" + "Example:\n" + "lxc launch ubuntu u1" +msgstr "" + +#: lxc/info.go:24 +msgid "List information on containers.\n" + "\n" + "This will support remotes and images as well, but only containers for now.\n" + "\n" + "lxc info [:]container [--show-log]" +msgstr "" + +#: lxc/list.go:60 +msgid "Lists the available resources.\n" + "\n" + "lxc list [resource] [filters] [-c columns] [--fast]\n" + "\n" + "The filters are:\n" + "* A single keyword like \"web\" which will list any container with \"web\" in its name.\n" + "* A key/value pair referring to a configuration item. For those, the namespace can be abreviated to the smallest unambiguous identifier:\n" + "* \"user.blah=abc\" will list all containers with the \"blah\" user property set to \"abc\"\n" + "* \"u.blah=abc\" will do the same\n" + "* \"security.privileged=1\" will list all privileged containers\n" + "* \"s.privileged=1\" will do the same\n" + "\n" + "The columns are:\n" + "* 4 - IPv4 address\n" + "* 6 - IPv6 address\n" + "* a - architecture\n" + "* c - creation date\n" + "* n - name\n" + "* p - pid of container init process\n" + "* P - profiles\n" + "* s - state\n" + "* S - number of snapshots\n" + "* t - type (persistent or ephemeral)\n" + "\n" + "Default column layout: ns46tS\n" + "Fast column layout: nsacPt" +msgstr "" + +#: lxc/info.go:159 +msgid "Log:" +msgstr "" + +#: lxc/image.go:154 +msgid "Make image public" +msgstr "" + +#: lxc/publish.go:32 +msgid "Make the image public" +msgstr "" + +#: lxc/profile.go:48 +msgid "Manage configuration profiles.\n" + "\n" + "lxc profile list [filters] List available profiles.\n" + "lxc profile show Show details of a profile.\n" + "lxc profile create Create a profile.\n" + "lxc profile copy Copy the profile to the specified remote.\n" + "lxc profile get Get profile configuration.\n" + "lxc profile set Set profile configuration.\n" + "lxc profile delete Delete a profile.\n" + "lxc profile edit \n" + " Edit profile, either by launching external editor or reading STDIN.\n" + " Example: lxc profile edit # launch editor\n" + " cat profile.yml | lxc profile edit # read from profile.yml\n" + "lxc profile apply \n" + " Apply a comma-separated list of profiles to a container, in order.\n" + " All profiles passed in this call (and only those) will be applied\n" + " to the specified container.\n" + " Example: lxc profile apply foo default,bar # Apply default and bar\n" + " lxc profile apply foo default # Only default is active\n" + " lxc profile apply '' # no profiles are applied anymore\n" + " lxc profile apply bar,default # Apply default second now\n" + "\n" + "Devices:\n" + "lxc profile device list List devices in the given profile.\n" + "lxc profile device show Show full device details in the given profile.\n" + "lxc profile device remove Remove a device from a profile.\n" + "lxc profile device add [key=value]...\n" + " Add a profile device, such as a disk or a nic, to the containers\n" + " using the specified profile." +msgstr "" + +#: lxc/config.go:58 +msgid "Manage configuration.\n" + "\n" + "lxc config device add <[remote:]container> [key=value]... Add a device to a container.\n" + "lxc config device list [remote:] List devices for container.\n" + "lxc config device show [remote:] Show full device details for container.\n" + "lxc config device remove [remote:] Remove device from container.\n" + "\n" + "lxc config get [remote:] key Get configuration key.\n" + "lxc config set [remote:] key value Set container configuration key.\n" + "lxc config unset [remote:] key Unset container configuration key.\n" + "lxc config set key value Set server configuration key.\n" + "lxc config unset key Unset server configuration key.\n" + "lxc config show [--expanded] [remote:] Show container configuration.\n" + "lxc config edit [remote:][container] Edit container configuration in external editor.\n" + " Edit configuration, either by launching external editor or reading STDIN.\n" + " Example: lxc config edit # launch editor\n" + " cat config.yml | lxc config edit # read from config.yml\n" + "\n" + "lxc config trust list [remote] List all trusted certs.\n" + "lxc config trust add [remote] Add certfile.crt to trusted hosts.\n" + "lxc config trust remove [remote] [hostname|fingerprint] Remove the cert from trusted hosts.\n" + "\n" + "Examples:\n" + "To mount host's /share/c1 onto /opt in the container:\n" + " lxc config device add [remote:]container1 disk source=/share/c1 path=opt\n" + "\n" + "To set an lxc config value:\n" + " lxc config set [remote:] raw.lxc 'lxc.aa_allow_incomplete = 1'\n" + "\n" + "To listen on IPv4 and IPv6 port 8443 (you can omit the 8443 its the default):\n" + " lxc config set core.https_address [::]:8443\n" + "\n" + "To set the server trust password:\n" + " lxc config set core.trust_password blah" +msgstr "" + +#: lxc/file.go:32 +msgid "Manage files on a container.\n" + "\n" + "lxc file pull [...] \n" + "lxc file push [--uid=UID] [--gid=GID] [--mode=MODE] [...] \n" + "lxc file edit \n" + "\n" + " in the case of pull, in the case of push and in the case of edit are /" +msgstr "" + +#: lxc/remote.go:38 +msgid "Manage remote LXD servers.\n" + "\n" + "lxc remote add [--accept-certificate] [--password=PASSWORD]\n" + " [--public] [--protocol=PROTOCOL] Add the remote at .\n" + "lxc remote remove Remove the remote .\n" + "lxc remote list List all remotes.\n" + "lxc remote rename Rename remote to .\n" + "lxc remote set-url Update 's url to .\n" + "lxc remote set-default Set the default remote.\n" + "lxc remote get-default Print the default remote." +msgstr "" + +#: lxc/image.go:93 +msgid "Manipulate container images.\n" + "\n" + "In LXD containers are created from images. Those images were themselves\n" + "either generated from an existing container or downloaded from an image\n" + "server.\n" + "\n" + "When using remote images, LXD will automatically cache images for you\n" + "and remove them upon expiration.\n" + "\n" + "The image unique identifier is the hash (sha-256) of its representation\n" + "as a compressed tarball (or for split images, the concatenation of the\n" + "metadata and rootfs tarballs).\n" + "\n" + "Images can be referenced by their full hash, shortest unique partial\n" + "hash or alias name (if one is set).\n" + "\n" + "\n" + "lxc image import [rootfs tarball|URL] [remote:] [--public] [--created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] [prop=value]\n" + " Import an image tarball (or tarballs) into the LXD image store.\n" + "\n" + "lxc image copy [remote:] : [--alias=ALIAS].. [--copy-aliases] [--public] [--auto-update]\n" + " Copy an image from one LXD daemon to another over the network.\n" + "\n" + " The auto-update flag instructs the server to keep this image up to\n" + " date. It requires the source to be an alias and for it to be public.\n" + "\n" + "lxc image delete [remote:]\n" + " Delete an image from the LXD image store.\n" + "\n" + "lxc image export [remote:]\n" + " Export an image from the LXD image store into a distributable tarball.\n" + "\n" + "lxc image info [remote:]\n" + " Print everything LXD knows about a given image.\n" + "\n" + "lxc image list [remote:] [filter]\n" + " List images in the LXD image store. Filters may be of the\n" + " = form for property based filtering, or part of the image\n" + " hash or part of the image alias name.\n" + "\n" + "lxc image show [remote:]\n" + " Yaml output of the user modifiable properties of an image.\n" + "\n" + "lxc image edit [remote:]\n" + " Edit image, either by launching external editor or reading STDIN.\n" + " Example: lxc image edit # launch editor\n" + " cat image.yml | lxc image edit # read from image.yml\n" + "\n" + "lxc image alias create [remote:] \n" + " Create a new alias for an existing image.\n" + "\n" + "lxc image alias delete [remote:]\n" + " Delete an alias.\n" + "\n" + "lxc image alias list [remote:]\n" + " List the aliases.\n" +msgstr "" + +#: lxc/help.go:86 +msgid "Missing summary." +msgstr "" + +#: lxc/monitor.go:41 +msgid "Monitor activity on the LXD server.\n" + "\n" + "lxc monitor [remote:] [--type=TYPE...]\n" + "\n" + "Connects to the monitoring interface of the specified LXD server.\n" + "\n" + "By default will listen to all message types.\n" + "Specific types to listen to can be specified with --type.\n" + "\n" + "Example:\n" + "lxc monitor --type=logging" +msgstr "" + +#: lxc/file.go:174 +msgid "More than one file to download, but target is not a directory" +msgstr "" + +#: lxc/move.go:17 +msgid "Move containers within or in between lxd instances.\n" + "\n" + "lxc move [remote:] [remote:]\n" + " Move a container between two hosts, renaming it if destination name differs.\n" + "\n" + "lxc move \n" + " Rename a local container.\n" +msgstr "" + +#: lxc/action.go:63 +msgid "Must supply container name for: " +msgstr "" + +#: lxc/list.go:338 lxc/remote.go:323 +msgid "NAME" +msgstr "" + +#: lxc/remote.go:297 lxc/remote.go:302 +msgid "NO" +msgstr "" + +#: lxc/info.go:86 +#, c-format +msgid "Name: %s" +msgstr "" + +#: lxc/image.go:157 lxc/publish.go:33 +msgid "New alias to define at target" +msgstr "" + +#: lxc/config.go:280 +msgid "No certificate provided to add" +msgstr "" + +#: lxc/config.go:303 +msgid "No fingerprint specified." +msgstr "" + +#: lxc/image.go:397 +msgid "Only https:// is supported for remote image import." +msgstr "" + +#: lxc/help.go:63 lxc/main.go:122 +msgid "Options:" +msgstr "" + +#: lxc/image.go:492 +#, c-format +msgid "Output is in %s" +msgstr "" + +#: lxc/exec.go:55 +msgid "Override the terminal mode (auto, interactive or non-interactive)" +msgstr "" + +#: lxc/list.go:422 +msgid "PERSISTENT" +msgstr "" + +#: lxc/list.go:339 +msgid "PID" +msgstr "" + +#: lxc/list.go:340 +msgid "PROFILES" +msgstr "" + +#: lxc/remote.go:325 +msgid "PROTOCOL" +msgstr "" + +#: lxc/image.go:592 lxc/remote.go:326 +msgid "PUBLIC" +msgstr "" + +#: lxc/help.go:69 +msgid "Path to an alternate client configuration directory." +msgstr "" + +#: lxc/help.go:70 +msgid "Path to an alternate server directory." +msgstr "" + +#: lxc/main.go:39 +msgid "Permisson denied, are you in the lxd group?" +msgstr "" + +#: lxc/info.go:100 +#, c-format +msgid "Pid: %d" +msgstr "" + +#: lxc/help.go:25 +msgid "Presents details on how to use LXD.\n" + "\n" + "lxd help [--all]" +msgstr "" + +#: lxc/profile.go:188 +msgid "Press enter to open the editor again" +msgstr "" + +#: lxc/config.go:494 lxc/config.go:559 lxc/image.go:670 +msgid "Press enter to start the editor again" +msgstr "" + +#: lxc/help.go:65 +msgid "Print debug information." +msgstr "" + +#: lxc/help.go:64 +msgid "Print less common commands." +msgstr "" + +#: lxc/help.go:66 +msgid "Print verbose information." +msgstr "" + +#: lxc/version.go:18 +msgid "Prints the version number of LXD.\n" + "\n" + "lxc version" +msgstr "" + +#: lxc/info.go:101 +#, c-format +msgid "Processes: %d" +msgstr "" + +#: lxc/profile.go:225 +#, c-format +msgid "Profile %s applied to %s" +msgstr "" + +#: lxc/profile.go:139 +#, c-format +msgid "Profile %s created" +msgstr "" + +#: lxc/profile.go:209 +#, c-format +msgid "Profile %s deleted" +msgstr "" + +#: lxc/init.go:136 lxc/init.go:137 lxc/launch.go:42 lxc/launch.go:43 +msgid "Profile to apply to the new container" +msgstr "" + +#: lxc/info.go:98 +#, c-format +msgid "Profiles: %s" +msgstr "" + +#: lxc/image.go:329 +msgid "Properties:" +msgstr "" + +#: lxc/remote.go:55 +msgid "Public image server" +msgstr "" + +#: lxc/image.go:317 +#, c-format +msgid "Public: %s" +msgstr "" + +#: lxc/publish.go:25 +msgid "Publish containers as images.\n" + "\n" + "lxc publish [remote:]container [remote:] [--alias=ALIAS]... [prop-key=prop-value]..." +msgstr "" + +#: lxc/remote.go:53 +msgid "Remote admin password" +msgstr "" + +#: lxc/delete.go:42 +#, c-format +msgid "Remove %s (yes/no): " +msgstr "" + +#: lxc/delete.go:36 lxc/delete.go:37 +msgid "Require user confirmation." +msgstr "" + +#: lxc/init.go:246 +#, c-format +msgid "Retrieving image: %s" +msgstr "" + +#: lxc/image.go:595 +msgid "SIZE" +msgstr "" + +#: lxc/list.go:341 +msgid "SNAPSHOTS" +msgstr "" + +#: lxc/list.go:342 +msgid "STATE" +msgstr "" + +#: lxc/remote.go:327 +msgid "STATIC" +msgstr "" + +#: lxc/remote.go:174 +msgid "Server certificate NACKed by user" +msgstr "" + +#: lxc/remote.go:236 +msgid "Server doesn't trust us after adding our cert" +msgstr "" + +#: lxc/remote.go:54 +msgid "Server protocol (lxd or simplestreams)" +msgstr "" + +#: lxc/restore.go:21 +msgid "Set the current state of a resource back to a snapshot.\n" + "\n" + "lxc restore [remote:] [--stateful]\n" + "\n" + "Restores a container from a snapshot (optionally with running state, see\n" + "snapshot help for details).\n" + "\n" + "For example:\n" + "lxc snapshot u1 snap0 # create the snapshot\n" + "lxc restore u1 snap0 # restore the snapshot" +msgstr "" + +#: lxc/file.go:44 +msgid "Set the file's gid on push" +msgstr "" + +#: lxc/file.go:45 +msgid "Set the file's perms on push" +msgstr "" + +#: lxc/file.go:43 +msgid "Set the file's uid on push" +msgstr "" + +#: lxc/help.go:32 +msgid "Show all commands (not just interesting ones)" +msgstr "" + +#: lxc/info.go:33 +msgid "Show the container's last 100 log lines?" +msgstr "" + +#: lxc/image.go:315 +#, c-format +msgid "Size: %.2fMB" +msgstr "" + +#: lxc/info.go:130 +msgid "Snapshots:" +msgstr "" + +#: lxc/image.go:339 +msgid "Source:" +msgstr "" + +#: lxc/launch.go:122 +#, c-format +msgid "Starting %s" +msgstr "" + +#: lxc/info.go:92 +#, c-format +msgid "Status: %s" +msgstr "" + +#: lxc/publish.go:34 lxc/publish.go:35 +msgid "Stop the container if currently running" +msgstr "" + +#: lxc/delete.go:106 lxc/publish.go:111 +msgid "Stopping container failed!" +msgstr "" + +#: lxc/action.go:38 +msgid "Store the container state (only for stop)." +msgstr "" + +#: lxc/list.go:343 +msgid "TYPE" +msgstr "" + +#: lxc/delete.go:92 +msgid "The container is currently running, stop it first or pass --force." +msgstr "" + +#: lxc/publish.go:89 +msgid "The container is currently running. Use --force to have it stopped and restarted." +msgstr "" + +#: lxc/publish.go:62 +msgid "There is no \"image name\". Did you want an alias?" +msgstr "" + +#: lxc/action.go:36 +msgid "Time to wait for the container before killing it." +msgstr "" + +#: lxc/image.go:318 +msgid "Timestamps:" +msgstr "" + +#: lxc/image.go:388 +#, c-format +msgid "Transferring image: %d%%" +msgstr "" + +#: lxc/action.go:93 lxc/launch.go:130 +#, c-format +msgid "Try `lxc info --show-log %s` for more info" +msgstr "" + +#: lxc/info.go:94 +msgid "Type: ephemeral" +msgstr "" + +#: lxc/info.go:96 +msgid "Type: persistent" +msgstr "" + +#: lxc/image.go:596 +msgid "UPLOAD DATE" +msgstr "" + +#: lxc/remote.go:324 +msgid "URL" +msgstr "" + +#: lxc/image.go:323 +#, c-format +msgid "Uploaded: %s" +msgstr "" + +#: lxc/main.go:122 +#, c-format +msgid "Usage: %s" +msgstr "" + +#: lxc/help.go:48 +msgid "Usage: lxc [subcommand] [options]" +msgstr "" + +#: lxc/delete.go:46 +msgid "User aborted delete operation." +msgstr "" + +#: lxc/restore.go:35 +msgid "Whether or not to restore the container's running state from snapshot (if available)" +msgstr "" + +#: lxc/snapshot.go:38 +msgid "Whether or not to snapshot the container's running state" +msgstr "" + +#: lxc/config.go:33 +msgid "Whether to show the expanded configuration" +msgstr "" + +#: lxc/remote.go:299 lxc/remote.go:304 +msgid "YES" +msgstr "" + +#: lxc/main.go:66 +msgid "`lxc config profile` is deprecated, please use `lxc profile`" +msgstr "" + +#: lxc/launch.go:109 +msgid "bad number of things scanned from image, container or snapshot" +msgstr "" + +#: lxc/action.go:89 +msgid "bad result type from action" +msgstr "" + +#: lxc/copy.go:78 +msgid "can't copy to the same container name" +msgstr "" + +#: lxc/remote.go:287 +msgid "can't remove the default remote" +msgstr "" + +#: lxc/remote.go:313 +msgid "default" +msgstr "" + +#: lxc/init.go:199 lxc/init.go:204 lxc/launch.go:93 lxc/launch.go:98 +msgid "didn't get any affected image, container or snapshot from server" +msgstr "" + +#: lxc/image.go:309 +msgid "disabled" +msgstr "" + +#: lxc/image.go:311 +msgid "enabled" +msgstr "" + +#: lxc/main.go:25 lxc/main.go:154 +#, c-format +msgid "error: %v" +msgstr "" + +#: lxc/help.go:40 lxc/main.go:117 +#, c-format +msgid "error: unknown command: %s" +msgstr "" + +#: lxc/launch.go:113 +msgid "got bad version" +msgstr "" + +#: lxc/image.go:304 lxc/image.go:572 +msgid "no" +msgstr "" + +#: lxc/copy.go:101 +msgid "not all the profiles from the source exist on the target" +msgstr "" + +#: lxc/remote.go:167 +msgid "ok (y/n)?" +msgstr "" + +#: lxc/main.go:261 lxc/main.go:265 +#, c-format +msgid "processing aliases failed %s\n" +msgstr "" + +#: lxc/remote.go:349 +#, c-format +msgid "remote %s already exists" +msgstr "" + +#: lxc/remote.go:279 lxc/remote.go:341 lxc/remote.go:376 lxc/remote.go:392 +#, c-format +msgid "remote %s doesn't exist" +msgstr "" + +#: lxc/remote.go:262 +#, c-format +msgid "remote %s exists as <%s>" +msgstr "" + +#: lxc/remote.go:283 lxc/remote.go:345 lxc/remote.go:380 +#, c-format +msgid "remote %s is static and cannot be modified" +msgstr "" + +#: lxc/info.go:139 +msgid "stateful" +msgstr "" + +#: lxc/info.go:141 +msgid "stateless" +msgstr "" + +#: lxc/info.go:135 +#, c-format +msgid "taken at %s" +msgstr "" + +#: lxc/exec.go:159 +msgid "unreachable return reached" +msgstr "" + +#: lxc/main.go:194 +msgid "wrong number of subcommand arguments" +msgstr "" + +#: lxc/delete.go:45 lxc/image.go:306 lxc/image.go:576 +msgid "yes" +msgstr "" + +#: lxc/copy.go:38 +msgid "you must specify a source container name" +msgstr "" + === added directory 'src/github.com/lxc/lxd/scripts' === added file 'src/github.com/lxc/lxd/scripts/lxc-to-lxd' --- src/github.com/lxc/lxd/scripts/lxc-to-lxd 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/scripts/lxc-to-lxd 2016-03-22 15:18:22 +0000 @@ -0,0 +1,419 @@ +#!/usr/bin/python3 + +import argparse +import json +import lxc +import os +import subprocess +import time +from pylxd import api as api + + +# Fetch a config key as a list +def config_get(config, key, default=None): + result = [] + for line in config: + fields = line.split("=", 1) + if fields[0].strip() == key: + result.append(fields[-1].strip()) + + if len(result) == 0: + return default + else: + return result + + +# Parse a LXC configuration file, called recursively for includes +def config_parse(path): + config = [] + with open(path, "r") as fd: + for line in fd: + line = line.strip() + key = line.split("=", 1)[0].strip() + value = line.split("=", 1)[-1].strip() + + # Parse user-added includes + if key == "lxc.include": + # Ignore our own default configs + if value.startswith("/usr/share/lxc/config/"): + continue + + if os.path.isfile(value): + config += config_parse(value) + continue + elif os.path.isdir(value): + for entry in os.listdir(value): + if not entry.endswith(".conf"): + continue + + config += config_parse(os.path.join(value, entry)) + continue + else: + print("Invalid include: %s", line) + + # Expand any fstab + if key == "lxc.mount": + if not os.path.exists(value): + print("Container fstab file doesn't exist, skipping...") + return False + + with open(value, "r") as fd: + for line in fd: + line = line.strip() + if line and not line.startswith("#"): + config.append("lxc.mount.entry = %s" % line) + continue + + # Proces normal configuration keys + if line and not line.startswith("#"): + config.append(line) + + return config + + +# Convert a LXC container to a LXD one +def convert_container(container_name, args): + # Connect to LXD + if args.lxdpath: + os.environ['LXD_DIR'] = args.lxdpath + lxd = api.API() + + print("==> Processing container: %s" % container_name) + + # Load the container + try: + container = lxc.Container(container_name, args.lxcpath) + except: + print("Invalid container configuration, skipping...") + return False + + if container.running: + print("Only stopped containers can be migrated, skipping...") + return False + + # As some keys can't be queried over the API, parse the config ourselves + print("Parsing LXC configuration") + lxc_config = config_parse(container.config_file_name) + + if args.debug: + print("Container configuration:") + print(" ", end="") + print("\n ".join(lxc_config)) + print("") + + if config_get(lxc_config, "lxd.migrated"): + print("Container has already been migrated, skipping...") + return False + + # Make sure we don't have a conflict + print("Checking for existing containers") + if lxd.container_defined(container_name): + print("Container already exists, skipping...") + return False + + # Validate lxc.utsname + print("Validating container name") + value = config_get(lxc_config, "lxc.utsname") + if value and value[0] != container_name: + print("Container name doesn't match lxc.utsname, skipping...") + return False + + # Detect privileged containers + print("Validating container mode") + if config_get(lxc_config, "lxc.id_map"): + print("Unprivileged containers aren't supported, skipping...") + return False + + # Detect hooks in config + for line in lxc_config: + if line.startswith("lxc.hook."): + print("Hooks aren't supported, skipping...") + return False + + # Extract and valid rootfs key + print("Validating container rootfs") + value = config_get(lxc_config, "lxc.rootfs") + if not value: + print("Invalid container, missing lxc.rootfs key, skipping...") + return False + + rootfs = value[0] + + if not os.path.exists(rootfs): + print("Couldn't find the container rootfs '%s', skipping..." % rootfs) + return False + + # Base config + config = {} + config['security.privileged'] = "true" + devices = {} + devices['eth0'] = {'type': "none"} + + # Convert network configuration + print("Processing network configuration") + try: + count = len(container.get_config_item("lxc.network")) + except: + count = 0 + + for i in range(count): + device = {"type": "nic"} + + # Get the device type + device["nictype"] = container.get_config_item("lxc.network")[i] + + # Get everything else + dev = container.network[i] + + # Validate configuration + if dev.ipv4 or dev.ipv4_gateway: + print("IPv4 network configuration isn't supported, skipping...") + return False + + if dev.ipv6 or dev.ipv6_gateway: + print("IPv6 network configuration isn't supported, skipping...") + return False + + if dev.script_up or dev.script_down: + print("Network config scripts aren't supported, skipping...") + return False + + if device["nictype"] == "none": + print("\"none\" network mode isn't supported, skipping...") + return False + + if device["nictype"] == "vlan": + print("\"vlan\" network mode isn't supported, skipping...") + return False + + # Convert the configuration + if dev.hwaddr: + device['hwaddr'] = dev.hwaddr + + if dev.link: + device['parent'] = dev.link + + if dev.mtu: + device['mtu'] = int(dev.mtu) + + if dev.name: + device['name'] = dev.name + + if dev.veth_pair: + device['host_name'] = dev.veth_pair + + if device["nictype"] == "veth": + if "parent" in device: + device["nictype"] = "bridged" + else: + device["nictype"] = "p2p" + + if device["nictype"] == "phys": + device["nictype"] = "physical" + + if device["nictype"] == "empty": + continue + + devices['convert_net%d' % i] = device + count += 1 + + # Convert storage configuration + value = config_get(lxc_config, "lxc.mount.entry", []) + i = 0 + for entry in value: + mount = entry.split(" ") + if len(mount) < 4: + print("Invalid mount configuration, skipping...") + return False + + device = {'type': "disk"} + + # Deal with read-only mounts + if "ro" in mount[3].split(","): + device['readonly'] = "true" + + # Deal with optional mounts + if "optional" in mount[3].split(","): + device['optional'] = "true" + + # Set the source + device['source'] = mount[0] + + # Figure out the target + if mount[1][0] != "/": + device['path'] = "/%s" % mount[1] + else: + device['path'] = mount[1].split(rootfs, 1)[-1] + + devices['convert_mount%d' % i] = device + i += 1 + + # Convert environment + print("Processing environment configuration") + value = config_get(lxc_config, "lxc.environment", []) + for env in value: + entry = env.split("=", 1) + config['environment.%s' % entry[0].strip()] = entry[-1].strip() + + # Convert auto-start + print("Processing container boot configuration") + value = config_get(lxc_config, "lxc.start.auto") + if value and int(value[0]) > 0: + config['boot.autostart'] = "true" + + value = config_get(lxc_config, "lxc.start.delay") + if value and int(value[0]) > 0: + config['boot.autostart.delay'] = value[0] + + value = config_get(lxc_config, "lxc.start.order") + if value and int(value[0]) > 0: + config['boot.autostart.priority'] = value[0] + + # Convert apparmor + print("Processing container apparmor configuration") + value = config_get(lxc_config, "lxc.aa_profile") + if value: + if value[0] == "lxc-container-default-with-nesting": + config['security.nesting'] = "true" + elif value[0] != "lxc-container-default": + print("Unsupported custom apparmor profile, skipping...") + return False + + # Convert seccomp + print("Processing container seccomp configuration") + value = config_get(lxc_config, "lxc.seccomp") + if value: + print("Custom seccomp profiles aren't supported, skipping...") + return False + + # Convert SELinux + print("Processing container SELinux configuration") + value = config_get(lxc_config, "lxc.se_context") + if value: + print("Custom SELinux policies aren't supported, skipping...") + return False + + # Convert capabilities + print("Processing container capabilities configuration") + value = config_get(lxc_config, "lxc.cap.drop") + if value: + print("Custom capabilities aren't supported, skipping...") + return False + + value = config_get(lxc_config, "lxc.cap.keep") + if value: + print("Custom capabilities aren't supported, skipping...") + return False + + # Setup the container creation request + new = {'name': container_name, + 'source': {'type': 'none'}, + 'config': config, + 'devices': devices, + 'profiles': ["default"]} + + # Set the container architecture if set in LXC + print("Converting container architecture configuration") + arches = {'i686': 1, + 'x86_64': 2, + 'armhf': 3, + 'arm64': 4, + 'powerpc': 5, + 'powerpc64': 6, + 'ppc64el': 7, + 's390x': 8} + + arch = None + try: + arch = config_get(lxc_config, "lxc.arch", None) + + if arch and arch[0] in arches: + new['architecture'] = arches[arch[0]] + else: + print("Unknown architecture, assuming native.") + except: + print("Couldn't find container architecture, assuming native.") + + # Define the container in LXD + if args.debug: + print("LXD container config:") + print(json.dumps(new, indent=True, sort_keys=True)) + + if args.dry_run: + return True + + try: + print("Creating the container") + lxd.container_init(new) + except Exception as e: + print("Failed to create the container: %s" % e) + return False + time.sleep(1) + + # Transfer the filesystem + lxd_rootfs = os.path.join("/var/lib/lxd/", "containers", + container_name, "rootfs") + + if args.copy_rootfs: + print("Copying container rootfs") + if not os.path.exists(lxd_rootfs): + os.mkdir(lxd_rootfs) + + if subprocess.call(["rsync", "-Aa", "--sparse", + "--acls", "--numeric-ids", "--hard-links", + "%s/" % rootfs, "%s/" % lxd_rootfs]) != 0: + print("Failed to transfer the container rootfs, skipping...") + return False + else: + if os.path.exists(lxd_rootfs): + os.rmdir(lxd_rootfs) + + if subprocess.call(["mv", rootfs, lxd_rootfs]) != 0: + print("Failed to move the container rootfs, skipping...") + return False + + os.mkdir(rootfs) + + # Delete the source + if args.delete: + print("Deleting source container") + container.delete() + + # Mark the container as migrated + with open(container.config_file_name, "a") as fd: + fd.write("lxd.migrated=true\n") + print("Container is ready to use") + + +# Argument parsing +parser = argparse.ArgumentParser() +parser.add_argument("--dry-run", action="store_true", default=False, + help="Dry run mode") +parser.add_argument("--debug", action="store_true", default=False, + help="Print debugging output") +parser.add_argument("--all", action="store_true", default=False, + help="Import all containers") +parser.add_argument("--delete", action="store_true", default=False, + help="Delete the source container") +parser.add_argument("--copy-rootfs", action="store_true", default=False, + help="Copy the container rootfs rather than moving it") +parser.add_argument("--lxcpath", type=str, default=False, + help="Alternate LXC path") +parser.add_argument("--lxdpath", type=str, default=False, + help="Alternate LXD path") +parser.add_argument(dest='containers', metavar="CONTAINER", type=str, + help="Container to import", nargs="*") +args = parser.parse_args() + +# Sanity checks +if not os.geteuid() == 0: + parser.error("You must be root to run this tool") + +if (not args.containers and not args.all) or (args.containers and args.all): + parser.error("You must either pass container names or --all") + +for container_name in lxc.list_containers(config_path=args.lxcpath): + if args.containers and container_name not in args.containers: + continue + + convert_container(container_name, args) === added file 'src/github.com/lxc/lxd/scripts/lxd-images' --- src/github.com/lxc/lxd/scripts/lxd-images 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/scripts/lxd-images 2016-03-22 15:18:22 +0000 @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 +import argparse +import os +import sys + + +def import_ubuntu(parser, args): + remote = "ubuntu" + + if args.stream == "daily": + remote = "ubuntu-daily" + + parts = [] + if args.release: + parts.append(args.release) + + if args.architecture: + parts.append(args.architecture) + + if args.version: + parts.append(args.version) + + image = "/".join(parts) + + cmd = ["lxc", "image", "copy", "%s:%s" % (remote, image), "local:"] + + for alias in args.alias: + cmd += ["--alias", alias] + + if args.public: + cmd += ["--public"] + + if args.sync: + cmd += ["--auto-update"] + + print("Redirecting to: %s" % " ".join(cmd), file=sys.stderr) + os.execvp("lxc", cmd) + + +def import_busybox(parser, args): + print("Redirecting to: test/deps/import-busybox %s" % + " ".join(sys.argv[2:]), file=sys.stderr) + os.execvp("test/deps/import-busybox", + ["import-busybox"] + sys.argv[3:]) + + +def sync(parser, args): + print("Sync is now done by LXD itself.", file=sys.stderr) + pass + +parser = argparse.ArgumentParser("Compatibility wrapper") +parser.add_argument("--quiet", action="store_true") + +parser_subparsers = parser.add_subparsers(dest="action") +parser_subparsers.required = True + +# Image import +parser_import = parser_subparsers.add_parser("import") +parser_import_subparsers = parser_import.add_subparsers(dest="source") +parser_import_subparsers.required = True + +# # Busybox +parser_import_busybox = parser_import_subparsers.add_parser("busybox") +parser_import_busybox.add_argument("--alias", action="append", default=[]) +parser_import_busybox.add_argument("--public", action="store_true", + default=False) +parser_import_busybox.add_argument("--split", action="store_true", + default=False) +parser_import_busybox.set_defaults(func=import_busybox) + +# # Ubuntu +parser_import_ubuntu = parser_import_subparsers.add_parser("ubuntu") +parser_import_ubuntu.add_argument("release", default=None, nargs="?") +parser_import_ubuntu.add_argument("architecture", default=None, nargs="?") +parser_import_ubuntu.add_argument("version", default=None, nargs="?") +parser_import_ubuntu.add_argument("--stream", default="auto") +parser_import_ubuntu.add_argument("--alias", action="append", default=[]) +parser_import_ubuntu.add_argument("--public", action="store_true", + default=False) +parser_import_ubuntu.add_argument("--sync", action="store_true", default=False) +parser_import_ubuntu.set_defaults(func=import_ubuntu) + +# Image sync +parser_import = parser_subparsers.add_parser("sync") +parser_import.set_defaults(func=sync) + +# Call the function +args = parser.parse_args() + +try: + args.func(parser, args) +except Exception as e: + parser.error(e) === added file 'src/github.com/lxc/lxd/scripts/lxd-setup-lvm-storage' --- src/github.com/lxc/lxd/scripts/lxd-setup-lvm-storage 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/scripts/lxd-setup-lvm-storage 2016-03-22 15:18:22 +0000 @@ -0,0 +1,230 @@ +#!/usr/bin/env python3 +# Let's stick to core python3 modules +import argparse +import gettext +import http.client +import json +import os +import socket +from subprocess import check_output +import sys + + +DEFAULT_VGNAME = "LXDStorage" + +_ = gettext.gettext +gettext.textdomain("lxd") + + +class FriendlyParser(argparse.ArgumentParser): + def error(self, message): + sys.stderr.write('error: %s\n' % message) + self.print_help() + sys.exit(2) + + +class UnixHTTPConnection(http.client.HTTPConnection): + def __init__(self, path): + http.client.HTTPConnection.__init__(self, 'localhost') + self.path = path + + def connect(self): + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(self.path) + self.sock = sock + + +class LXD(object): + def __init__(self, path): + self.lxd = UnixHTTPConnection(path) + + def rest_call(self, path, data=None, method="GET", headers={}): + if method == "GET" and data: + self.lxd.request( + method, + "%s?%s" % "&".join(["%s=%s" % (key, value) + for key, value in data.items()]), headers) + else: + self.lxd.request(method, path, data, headers) + + r = self.lxd.getresponse() + d = json.loads(r.read().decode("utf-8")) + return r.status, d + + def set_lvm_vgname(self, vgname): + self._set_lvm_config("storage.lvm_vg_name", vgname) + + def set_lvm_poolname(self, poolname): + self._set_lvm_config("storage.lvm_thinpool_name", poolname) + + def _set_lvm_config(self, key, val): + data = json.dumps({"config": {key: val}}) + + status, data = self.rest_call("/1.0", data, "PUT") + + if status != 200: + sys.stderr.write("Error in setting vgname:{}\n{}\n".format(status, + data)) + raise Exception("Failed to set vgname: %s" % val) + + def get_server_config(self): + status, config = self.rest_call("/1.0", "", "GET") + if status != 200: + sys.stderr.write("Error in getting vgname\n") + raise Exception("Failed to get vgname") + + return config["metadata"]["config"] + + +def lxd_dir(): + if "LXD_DIR" in os.environ: + return os.environ["LXD_DIR"] + else: + return "/var/lib/lxd" + + +def connect_to_socket(): + lxd_socket = os.path.join(lxd_dir(), "unix.socket") + + if not os.path.exists(lxd_socket): + print(_("LXD isn't running.")) + sys.exit(1) + + return LXD(lxd_socket) + + +def create_image(args): + imgfname = os.path.join(lxd_dir(), "{}.img".format(args.size)) + rollbacks = [] + try: + print("Creating sparse backing file {}".format(imgfname), flush=True) + check_output("truncate -s {} {}".format(args.size, imgfname), + shell=True) + rollbacks.append("rm {}".format(imgfname)) + + print("Setting up loop device", flush=True) + pvloopdev = check_output("losetup -f", shell=True).decode().strip() + check_output("losetup {} {}".format(pvloopdev, imgfname), shell=True) + rollbacks.append("losetup -d " + pvloopdev) + + print("Creating LVM PV {}".format(pvloopdev), flush=True) + check_output("pvcreate {}".format(pvloopdev), shell=True) + rollbacks.append("pvremove " + pvloopdev) + + print("Creating LVM VG {}".format(DEFAULT_VGNAME), flush=True) + check_output("vgcreate {} {}".format(DEFAULT_VGNAME, pvloopdev), + shell=True) + rollbacks.append("vgremove {}".format(DEFAULT_VGNAME)) + + except Exception as e: + sys.stderr.write("Error: {}. Cleaning up:\n".format(e)) + for rbcmd in reversed(rollbacks): + sys.stderr.write("+ {}\n".format(rbcmd)) + check_output(rbcmd, shell=True) + raise e + + +def destroy_image(args, lxd): + print("Checking current LXD configuration", flush=True) + cfg = lxd.get_server_config() + vgname = cfg.get("storage.lvm_vg_name", None) + if vgname is None: + sys.stderr.write("LXD is not configured for LVM. " + "No changes will be made.\n") + return + + lvnames = check_output("lvs {} -o name,lv_attr --noheadings" + .format(vgname), shell=True).decode().strip() + used_lvs = [] + for lvline in lvnames.split("\n"): + if lvline == '': + continue + name, attrs = lvline.split() + if attrs.strip().startswith("V"): + used_lvs.append(name) + if len(used_lvs) > 0: + print("LVM storage is still in use by the following volumes: {}" + .format(used_lvs)) + print("Please delete the corresponding images and/or " + "containers before destroying storage.") + sys.exit() + + pvname = check_output("vgs {} --noheadings -o pv_name" + .format(vgname), shell=True).decode().strip() + print("Removing volume group {}".format(vgname)) + check_output("vgremove -f {}".format(vgname), shell=True) + print("Removing physical volume {}".format(pvname)) + check_output("pvremove -y {}".format(pvname), shell=True) + + lostr = check_output("losetup -a | grep {}".format(pvname), + shell=True).decode().strip() + imgfname = lostr.split('(')[-1].replace(')', '') + print("Detaching loop device {}".format(pvname)) + check_output("losetup -d {}".format(pvname), shell=True) + print("Deleting backing file {}".format(imgfname)) + if os.path.exists(imgfname): + check_output("rm '{}'".format(imgfname), shell=True) + + +def do_main(): + parser = FriendlyParser( + description=_("LXD: LVM storage helper"), + formatter_class=argparse.RawTextHelpFormatter, + epilog=_("""Examples: + To create a 10G sparse loopback file and register it with LVM and LXD: + %s -s 10G + To de-configure LXD and destroy the LVM volumes and backing file: + %s --destroy +""" % (sys.argv[0], sys.argv[0]))) + parser.add_argument("-s", "--size", default="10G", + help=_("Size of backing file to register as LVM PV")) + parser.add_argument("--destroy", action="store_true", default=False, + help=_("Un-configure LXD and delete image file")) + + args = parser.parse_args() + if os.geteuid() != 0: + sys.exit("Configuring LVM requires root privileges.") + + try: + check_output("type vgcreate", shell=True) + except: + sys.exit("lvm2 tools not found. try 'apt-get install lvm2 " + "thin-provisioning-tools'") + try: + check_output("type thin_check", shell=True) + except: + sys.exit("lvm thin provisioning tools are required. " + "try 'apt-get install thin-provisioning-tools'") + + lxd = connect_to_socket() + + if args.destroy: + try: + destroy_image(args, lxd) + print("Clearing LXD storage configuration") + lxd.set_lvm_vgname("") + lxd.set_lvm_poolname("") + except Exception as e: + sys.stderr.write("Error destroying image:") + sys.stderr.write(str(e)) + sys.stderr.write("\n") + + else: + try: + create_image(args) + except: + sys.stderr.write("Stopping.\n") + else: + try: + print("Configuring LXD") + lxd.set_lvm_vgname(DEFAULT_VGNAME) + except: + sys.stderr.write("Error configuring LXD, " + "removing backing file\n") + destroy_image(args, lxd) + + print("Done.") + + +if __name__ == "__main__": + do_main() === added directory 'src/github.com/lxc/lxd/scripts/vagrant' === added file 'src/github.com/lxc/lxd/scripts/vagrant/install-go.sh' --- src/github.com/lxc/lxd/scripts/vagrant/install-go.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/scripts/vagrant/install-go.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,17 @@ +#!/bin/bash + +set -xe +export DEBIAN_FRONTEND=noninteractive + +which add-apt-repository || (sudo apt-get update ; sudo apt-get install -y software-properties-common) +sudo add-apt-repository ppa:ubuntu-lxc/lxd-git-master +sudo apt-get update +which go || sudo apt-get install -y golang + +[ -e $HOME/go ] || mkdir -p $HOME/go + +cat << 'EOF' | sudo tee /etc/profile.d/S99go.sh +export GOPATH=$HOME/go +export PATH=$PATH:$GOPATH/bin +EOF + === added file 'src/github.com/lxc/lxd/scripts/vagrant/install-lxd.sh' --- src/github.com/lxc/lxd/scripts/vagrant/install-lxd.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/scripts/vagrant/install-lxd.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,41 @@ +#!/bin/bash + +set -xe +export DEBIAN_FRONTEND=noninteractive + +# install runtime dependencies +sudo apt-get -y install xz-utils tar acl curl gettext \ + jq sqlite3 + +# install build dependencies +sudo apt-get -y install lxc lxc-dev mercurial git pkg-config \ + protobuf-compiler golang-goprotobuf-dev + +# setup env +[ -e uid_gid_setup ] || \ + echo "root:1000000:65536" | sudo tee -a /etc/subuid /etc/subgid && \ + touch uid_gid_setup + + +go get github.com/lxc/lxd +cd $GOPATH/src/github.com/lxc/lxd +go get -v -d ./... +make + + +cat << 'EOF' | sudo tee /etc/init/lxd.conf +description "LXD daemon" +author "John Brooker" + +start on filesystem or runlevel [2345] +stop on shutdown + +script + + exec /home/vagrant/go/bin/lxd --group vagrant + +end script + +EOF + +sudo service lxd start === added directory 'src/github.com/lxc/lxd/shared' === added file 'src/github.com/lxc/lxd/shared/architectures.go' --- src/github.com/lxc/lxd/shared/architectures.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/architectures.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,107 @@ +package shared + +import ( + "fmt" +) + +const ( + ARCH_UNKNOWN = 0 + ARCH_32BIT_INTEL_X86 = 1 + ARCH_64BIT_INTEL_X86 = 2 + ARCH_32BIT_ARMV7_LITTLE_ENDIAN = 3 + ARCH_64BIT_ARMV8_LITTLE_ENDIAN = 4 + ARCH_32BIT_POWERPC_BIG_ENDIAN = 5 + ARCH_64BIT_POWERPC_BIG_ENDIAN = 6 + ARCH_64BIT_POWERPC_LITTLE_ENDIAN = 7 + ARCH_64BIT_S390_BIG_ENDIAN = 8 +) + +var architectureNames = map[int]string{ + ARCH_32BIT_INTEL_X86: "i686", + ARCH_64BIT_INTEL_X86: "x86_64", + ARCH_32BIT_ARMV7_LITTLE_ENDIAN: "armv7l", + ARCH_64BIT_ARMV8_LITTLE_ENDIAN: "aarch64", + ARCH_32BIT_POWERPC_BIG_ENDIAN: "ppc", + ARCH_64BIT_POWERPC_BIG_ENDIAN: "ppc64", + ARCH_64BIT_POWERPC_LITTLE_ENDIAN: "ppc64le", + ARCH_64BIT_S390_BIG_ENDIAN: "s390x", +} + +var architectureAliases = map[int][]string{ + ARCH_32BIT_INTEL_X86: []string{"i386"}, + ARCH_64BIT_INTEL_X86: []string{"amd64"}, + ARCH_32BIT_ARMV7_LITTLE_ENDIAN: []string{"armel", "armhf"}, + ARCH_64BIT_ARMV8_LITTLE_ENDIAN: []string{"arm64"}, + ARCH_32BIT_POWERPC_BIG_ENDIAN: []string{"powerpc"}, + ARCH_64BIT_POWERPC_BIG_ENDIAN: []string{"powerpc64"}, + ARCH_64BIT_POWERPC_LITTLE_ENDIAN: []string{"ppc64el"}, +} + +var architecturePersonalities = map[int]string{ + ARCH_32BIT_INTEL_X86: "linux32", + ARCH_64BIT_INTEL_X86: "linux64", + ARCH_32BIT_ARMV7_LITTLE_ENDIAN: "linux32", + ARCH_64BIT_ARMV8_LITTLE_ENDIAN: "linux64", + ARCH_32BIT_POWERPC_BIG_ENDIAN: "linux32", + ARCH_64BIT_POWERPC_BIG_ENDIAN: "linux64", + ARCH_64BIT_POWERPC_LITTLE_ENDIAN: "linux64", + ARCH_64BIT_S390_BIG_ENDIAN: "linux64", +} + +var architectureSupportedPersonalities = map[int][]int{ + ARCH_32BIT_INTEL_X86: []int{}, + ARCH_64BIT_INTEL_X86: []int{ARCH_32BIT_INTEL_X86}, + ARCH_32BIT_ARMV7_LITTLE_ENDIAN: []int{}, + ARCH_64BIT_ARMV8_LITTLE_ENDIAN: []int{ARCH_32BIT_ARMV7_LITTLE_ENDIAN}, + ARCH_32BIT_POWERPC_BIG_ENDIAN: []int{}, + ARCH_64BIT_POWERPC_BIG_ENDIAN: []int{ARCH_32BIT_POWERPC_BIG_ENDIAN}, + ARCH_64BIT_POWERPC_LITTLE_ENDIAN: []int{}, + ARCH_64BIT_S390_BIG_ENDIAN: []int{}, +} + +const ArchitectureDefault = "x86_64" + +func ArchitectureName(arch int) (string, error) { + arch_name, exists := architectureNames[arch] + if exists { + return arch_name, nil + } + + return "unknown", fmt.Errorf("Architecture isn't supported: %d", arch) +} + +func ArchitectureId(arch string) (int, error) { + for arch_id, arch_name := range architectureNames { + if arch_name == arch { + return arch_id, nil + } + } + + for arch_id, arch_aliases := range architectureAliases { + for _, arch_name := range arch_aliases { + if arch_name == arch { + return arch_id, nil + } + } + } + + return 0, fmt.Errorf("Architecture isn't supported: %s", arch) +} + +func ArchitecturePersonality(arch int) (string, error) { + arch_personality, exists := architecturePersonalities[arch] + if exists { + return arch_personality, nil + } + + return "", fmt.Errorf("Architecture isn't supported: %d", arch) +} + +func ArchitecturePersonalities(arch int) ([]int, error) { + personalities, exists := architectureSupportedPersonalities[arch] + if exists { + return personalities, nil + } + + return []int{}, fmt.Errorf("Architecture isn't supported: %d", arch) +} === added file 'src/github.com/lxc/lxd/shared/architectures_linux.go' --- src/github.com/lxc/lxd/shared/architectures_linux.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/architectures_linux.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,24 @@ +// +build linux + +package shared + +import ( + "syscall" +) + +func ArchitectureGetLocal() (string, error) { + uname := syscall.Utsname{} + if err := syscall.Uname(&uname); err != nil { + return ArchitectureDefault, err + } + + architectureName := "" + for _, c := range uname.Machine { + if c == 0 { + break + } + architectureName += string(byte(c)) + } + + return architectureName, nil +} === added file 'src/github.com/lxc/lxd/shared/architectures_others.go' --- src/github.com/lxc/lxd/shared/architectures_others.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/architectures_others.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,7 @@ +// +build !linux + +package shared + +func ArchitectureGetLocal() (string, error) { + return ArchitectureDefault, nil +} === added file 'src/github.com/lxc/lxd/shared/cert.go' --- src/github.com/lxc/lxd/shared/cert.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/cert.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,201 @@ +// http://golang.org/src/pkg/crypto/tls/generate_cert.go +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package shared + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "log" + "math/big" + "net" + "os" + "os/user" + "path" + "time" +) + +// CertInfo is the representation of a Certificate in the API. +type CertInfo struct { + Certificate string `json:"certificate"` + Fingerprint string `json:"fingerprint"` + Type string `json:"type"` +} + +/* + * Generate a list of names for which the certificate will be valid. + * This will include the hostname and ip address + */ +func mynames() ([]string, error) { + h, err := os.Hostname() + if err != nil { + return nil, err + } + + ret := []string{h} + + ifs, err := net.Interfaces() + if err != nil { + return nil, err + } + + for _, iface := range ifs { + if IsLoopback(&iface) { + continue + } + + addrs, err := iface.Addrs() + if err != nil { + return nil, err + } + + for _, addr := range addrs { + ret = append(ret, addr.String()) + } + } + + return ret, nil +} + +func FindOrGenCert(certf string, keyf string) error { + if PathExists(certf) && PathExists(keyf) { + return nil + } + + /* If neither stat succeeded, then this is our first run and we + * need to generate cert and privkey */ + err := GenCert(certf, keyf) + if err != nil { + return err + } + + return nil +} + +// GenCert will create and populate a certificate file and a key file +func GenCert(certf string, keyf string) error { + /* Create the basenames if needed */ + dir := path.Dir(certf) + err := os.MkdirAll(dir, 0750) + if err != nil { + return err + } + dir = path.Dir(keyf) + err = os.MkdirAll(dir, 0750) + if err != nil { + return err + } + + certBytes, keyBytes, err := GenerateMemCert() + if err != nil { + return err + } + + certOut, err := os.Create(certf) + if err != nil { + log.Fatalf("failed to open %s for writing: %s", certf, err) + return err + } + certOut.Write(certBytes) + certOut.Close() + + keyOut, err := os.OpenFile(keyf, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + log.Printf("failed to open %s for writing: %s", keyf, err) + return err + } + keyOut.Write(keyBytes) + keyOut.Close() + return nil +} + +// GenerateMemCert creates a certificate and key pair, returning them as byte +// arrays in memory. +func GenerateMemCert() ([]byte, []byte, error) { + privk, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + log.Fatalf("failed to generate key") + return nil, nil, err + } + + hosts, err := mynames() + if err != nil { + log.Fatalf("Failed to get my hostname") + return nil, nil, err + } + + validFrom := time.Now() + validTo := validFrom.Add(10 * 365 * 24 * time.Hour) + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + log.Fatalf("failed to generate serial number: %s", err) + return nil, nil, err + } + + userEntry, err := user.Current() + var username string + if err == nil { + username = userEntry.Username + if username == "" { + username = "UNKNOWN" + } + } else { + username = "UNKNOWN" + } + + hostname, err := os.Hostname() + if err != nil { + hostname = "UNKNOWN" + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"linuxcontainers.org"}, + CommonName: fmt.Sprintf("%s@%s", username, hostname), + }, + NotBefore: validFrom, + NotAfter: validTo, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + for _, h := range hosts { + if ip := net.ParseIP(h); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else { + template.DNSNames = append(template.DNSNames, h) + } + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privk.PublicKey, privk) + if err != nil { + log.Fatalf("Failed to create certificate: %s", err) + return nil, nil, err + } + + cert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + key := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privk)}) + return cert, key, nil +} + +func ReadCert(fpath string) (*x509.Certificate, error) { + cf, err := ioutil.ReadFile(fpath) + if err != nil { + return nil, err + } + + certBlock, _ := pem.Decode(cf) + return x509.ParseCertificate(certBlock.Bytes) +} === added file 'src/github.com/lxc/lxd/shared/cert_test.go' --- src/github.com/lxc/lxd/shared/cert_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/cert_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,39 @@ +package shared + +import ( + "encoding/pem" + "testing" +) + +func TestGenerateMemCert(t *testing.T) { + if testing.Short() { + t.Skip("skipping cert generation in short mode") + } + cert, key, err := GenerateMemCert() + if err != nil { + t.Error(err) + return + } + if cert == nil { + t.Error("GenerateMemCert returned a nil cert") + return + } + if key == nil { + t.Error("GenerateMemCert returned a nil key") + return + } + block, rest := pem.Decode(cert) + if len(rest) != 0 { + t.Errorf("GenerateMemCert returned a cert with trailing content: %q", string(rest)) + } + if block.Type != "CERTIFICATE" { + t.Errorf("GenerateMemCert returned a cert with Type %q not \"CERTIFICATE\"", block.Type) + } + block, rest = pem.Decode(key) + if len(rest) != 0 { + t.Errorf("GenerateMemCert returned a key with trailing content: %q", string(rest)) + } + if block.Type != "RSA PRIVATE KEY" { + t.Errorf("GenerateMemCert returned a cert with Type %q not \"RSA PRIVATE KEY\"", block.Type) + } +} === added file 'src/github.com/lxc/lxd/shared/container.go' --- src/github.com/lxc/lxd/shared/container.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/container.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,123 @@ +package shared + +import ( + "time" +) + +type ContainerState struct { + Status string `json:"status"` + StatusCode StatusCode `json:"status_code"` + Disk map[string]ContainerStateDisk `json:"disk"` + Memory ContainerStateMemory `json:"memory"` + Network map[string]ContainerStateNetwork `json:"network"` + Pid int64 `json:"pid"` + Processes int64 `json:"processes"` +} + +type ContainerStateDisk struct { + Usage int64 `json:"usage"` +} + +type ContainerStateMemory struct { + Usage int64 `json:"usage"` + UsagePeak int64 `json:"usage_peak"` + SwapUsage int64 `json:"swap_usage"` + SwapUsagePeak int64 `json:"swap_usage_peak"` +} + +type ContainerStateNetwork struct { + Addresses []ContainerStateNetworkAddress `json:"addresses"` + Counters ContainerStateNetworkCounters `json:"counters"` + Hwaddr string `json:"hwaddr"` + HostName string `json:"host_name"` + Mtu int `json:"mtu"` + State string `json:"state"` + Type string `json:"type"` +} + +type ContainerStateNetworkAddress struct { + Family string `json:"family"` + Address string `json:"address"` + Netmask string `json:"netmask"` + Scope string `json:"scope"` +} + +type ContainerStateNetworkCounters struct { + BytesReceived int64 `json:"bytes_received"` + BytesSent int64 `json:"bytes_sent"` + PacketsReceived int64 `json:"packets_received"` + PacketsSent int64 `json:"packets_sent"` +} + +type ContainerExecControl struct { + Command string `json:"command"` + Args map[string]string `json:"args"` +} + +type SnapshotInfo struct { + CreationDate time.Time `json:"created_at"` + Name string `json:"name"` + Stateful bool `json:"stateful"` +} + +type ContainerInfo struct { + Architecture string `json:"architecture"` + Config map[string]string `json:"config"` + CreationDate time.Time `json:"created_at"` + Devices Devices `json:"devices"` + Ephemeral bool `json:"ephemeral"` + ExpandedConfig map[string]string `json:"expanded_config"` + ExpandedDevices Devices `json:"expanded_devices"` + Name string `json:"name"` + Profiles []string `json:"profiles"` + Stateful bool `json:"stateful"` + Status string `json:"status"` + StatusCode StatusCode `json:"status_code"` +} + +/* + * BriefContainerState contains a subset of the fields in + * ContainerState, namely those which a user may update + */ +type BriefContainerInfo struct { + Name string `json:"name"` + Profiles []string `json:"profiles"` + Config map[string]string `json:"config"` + Devices Devices `json:"devices"` + Ephemeral bool `json:"ephemeral"` +} + +func (c *ContainerInfo) Brief() BriefContainerInfo { + retstate := BriefContainerInfo{Name: c.Name, + Profiles: c.Profiles, + Config: c.Config, + Devices: c.Devices, + Ephemeral: c.Ephemeral} + return retstate +} + +func (c *ContainerInfo) BriefExpanded() BriefContainerInfo { + retstate := BriefContainerInfo{Name: c.Name, + Profiles: c.Profiles, + Config: c.ExpandedConfig, + Devices: c.ExpandedDevices, + Ephemeral: c.Ephemeral} + return retstate +} + +type ContainerAction string + +const ( + Stop ContainerAction = "stop" + Start ContainerAction = "start" + Restart ContainerAction = "restart" + Freeze ContainerAction = "freeze" + Unfreeze ContainerAction = "unfreeze" +) + +type ProfileConfig struct { + Name string `json:"name"` + Config map[string]string `json:"config"` + Description string `json:"description"` + Devices Devices `json:"devices"` +} === added file 'src/github.com/lxc/lxd/shared/devices.go' --- src/github.com/lxc/lxd/shared/devices.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/devices.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,104 @@ +package shared + +type Device map[string]string +type Devices map[string]Device + +func (list Devices) ContainsName(k string) bool { + if list[k] != nil { + return true + } + return false +} + +func (d Device) get(key string) string { + return d[key] +} + +func (list Devices) Contains(k string, d Device) bool { + // If it didn't exist, it's different + if list[k] == nil { + return false + } + + old := list[k] + + return deviceEquals(old, d) +} + +func deviceEquals(old Device, d Device) bool { + // Check for any difference and addition/removal of properties + for k, _ := range d { + if d[k] != old[k] { + return false + } + } + + for k, _ := range old { + if d[k] != old[k] { + return false + } + } + + return true +} + +func (old Devices) Update(newlist Devices) (map[string]Device, map[string]Device, map[string]Device) { + rmlist := map[string]Device{} + addlist := map[string]Device{} + updatelist := map[string]Device{} + + for key, d := range old { + if !newlist.Contains(key, d) { + rmlist[key] = d + } + } + + for key, d := range newlist { + if !old.Contains(key, d) { + addlist[key] = d + } + } + + for key, d := range addlist { + srcOldDevice := rmlist[key] + var oldDevice Device + err := DeepCopy(&srcOldDevice, &oldDevice) + if err != nil { + continue + } + + srcNewDevice := newlist[key] + var newDevice Device + err = DeepCopy(&srcNewDevice, &newDevice) + if err != nil { + continue + } + + for _, k := range []string{"limits.max", "limits.read", "limits.write", "limits.egress", "limits.ingress"} { + delete(oldDevice, k) + delete(newDevice, k) + } + + if deviceEquals(oldDevice, newDevice) { + delete(rmlist, key) + delete(addlist, key) + updatelist[key] = d + } + } + + return rmlist, addlist, updatelist +} + +func (newBaseDevices Devices) ExtendFromProfile(currentFullDevices Devices, newDevicesFromProfile Devices) error { + // For any entry which exists in a profile and doesn't in the container config, add it + + for name, newDev := range newDevicesFromProfile { + if curDev, ok := currentFullDevices[name]; ok { + newBaseDevices[name] = curDev + } else { + newBaseDevices[name] = newDev + } + } + + return nil +} === added file 'src/github.com/lxc/lxd/shared/flex.go' --- src/github.com/lxc/lxd/shared/flex.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/flex.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +/* This is a FLEXible file which can be used by both client and daemon. + * Teehee. + */ +package shared + +var Version = "2.0.0.rc1" +var UserAgent = "LXD " + Version + +/* + * Please increment the api compat number every time you change the API. + * + * Version 1.0: ping + */ +var APIVersion = "1.0" === added directory 'src/github.com/lxc/lxd/shared/gnuflag' === added file 'src/github.com/lxc/lxd/shared/gnuflag/export_test.go' --- src/github.com/lxc/lxd/shared/gnuflag/export_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/gnuflag/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,24 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gnuflag + +import ( + "os" +) + +// Additional routines compiled into the package only during testing. + +// ResetForTesting clears all flag state and sets the usage function as directed. +// After calling ResetForTesting, parse errors in flag handling will not +// exit the program. +func ResetForTesting(usage func()) { + commandLine = NewFlagSet(os.Args[0], ContinueOnError) + Usage = usage +} + +// CommandLine returns the default FlagSet. +func CommandLine() *FlagSet { + return commandLine +} === added file 'src/github.com/lxc/lxd/shared/gnuflag/flag.go' --- src/github.com/lxc/lxd/shared/gnuflag/flag.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/gnuflag/flag.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,895 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + Package flag implements command-line flag parsing in the GNU style. + It is almost exactly the same as the standard flag package, + the only difference being the extra argument to Parse. + + Command line flag syntax: + -f // single letter flag + -fg // two single letter flags together + --flag // multiple letter flag + --flag x // non-boolean flags only + -f x // non-boolean flags only + -fx // if f is a non-boolean flag, x is its argument. + + The last three forms are not permitted for boolean flags because the + meaning of the command + cmd -f * + will change if there is a file called 0, false, etc. There is currently + no way to turn off a boolean flag. + + Flag parsing stops after the terminator "--", or just before the first + non-flag argument ("-" is a non-flag argument) if the interspersed + argument to Parse is false. +*/ +package gnuflag + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +var ErrHelp = errors.New("flag: help requested") + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} + +func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +type Value interface { + String() string + Set(string) error +} + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +const ( + ContinueOnError ErrorHandling = iota + ExitOnError + PanicOnError +) + +// A FlagSet represents a set of defined flags. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + + name string + parsed bool + actual map[string]*Flag + formal map[string]*Flag + args []string // arguments after flags + procArgs []string // arguments being processed (gnu only) + procFlag string // flag being processed (gnu only) + allowIntersperse bool // (gnu only) + exitOnError bool // does the program exit if there's an error? + errorHandling ErrorHandling + output io.Writer // nil means stderr; use out() accessor +} + +// A Flag represents the state of a flag. +type Flag struct { + Name string // name as it appears on command line + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message +} + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[string]*Flag) []*Flag { + list := make(sort.StringSlice, len(flags)) + i := 0 + for _, f := range flags { + list[i] = f.Name + i++ + } + list.Sort() + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[name] + } + return result +} + +func (f *FlagSet) out() io.Writer { + if f.output == nil { + return os.Stderr + } + return f.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (f *FlagSet) SetOutput(output io.Writer) { + f.output = output +} + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. +func (f *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(f.formal) { + fn(flag) + } +} + +// VisitAll visits the command-line flags in lexicographical order, calling +// fn for each. It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + commandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. +func (f *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(f.actual) { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order, calling fn +// for each. It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + commandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) Lookup(name string) *Flag { + return f.formal[name] +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return commandLine.formal[name] +} + +// Set sets the value of the named flag. +func (f *FlagSet) Set(name, value string) error { + flag, ok := f.formal[name] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + err := flag.Value.Set(value) + if err != nil { + return err + } + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + return nil +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return commandLine.Set(name, value) +} + +// flagsByLength is a slice of flags implementing sort.Interface, +// sorting primarily by the length of the flag, and secondarily +// alphabetically. +type flagsByLength []*Flag + +func (f flagsByLength) Less(i, j int) bool { + s1, s2 := f[i].Name, f[j].Name + if len(s1) != len(s2) { + return len(s1) < len(s2) + } + return s1 < s2 +} +func (f flagsByLength) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} +func (f flagsByLength) Len() int { + return len(f) +} + +// flagsByName is a slice of slices of flags implementing sort.Interface, +// alphabetically sorting by the name of the first flag in each slice. +type flagsByName [][]*Flag + +func (f flagsByName) Less(i, j int) bool { + return f[i][0].Name < f[j][0].Name +} +func (f flagsByName) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} +func (f flagsByName) Len() int { + return len(f) +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +// If there is more than one name for a given flag, the usage information and +// default value from the shortest will be printed (or the least alphabetically +// if there are several equally short flag names). +func (f *FlagSet) PrintDefaults() { + // group together all flags for a given value + flags := make(map[interface{}]flagsByLength) + f.VisitAll(func(f *Flag) { + flags[f.Value] = append(flags[f.Value], f) + }) + + // sort the output flags by shortest name for each group. + var byName flagsByName + for _, f := range flags { + sort.Sort(f) + byName = append(byName, f) + } + sort.Sort(byName) + + var line bytes.Buffer + for _, fs := range byName { + line.Reset() + for i, f := range fs { + if i > 0 { + line.WriteString(", ") + } + line.WriteString(flagWithMinus(f.Name)) + } + format := " %s (= %s)\n %s\n" + if _, ok := fs[0].Value.(*stringValue); ok { + // put quotes on the value + format = " %s (= %q)\n %s\n" + } + fmt.Fprintf(f.out(), format, line.Bytes(), fs[0].DefValue, fs[0].Usage) + } +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + commandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(f *FlagSet) { + fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + f.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(commandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// NFlag returns the number of flags that have been set. +func (f *FlagSet) NFlag() int { return len(f.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(commandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (f *FlagSet) Arg(i int) string { + if i < 0 || i >= len(f.args) { + return "" + } + return f.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return commandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (f *FlagSet) NArg() int { return len(f.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(commandLine.args) } + +// Args returns the non-flag arguments. +func (f *FlagSet) Args() []string { return f.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return commandLine.args } + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { + f.Var(newBoolValue(value, p), name, usage) +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, name string, value bool, usage string) { + commandLine.Var(newBoolValue(value, p), name, usage) +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (f *FlagSet) Bool(name string, value bool, usage string) *bool { + p := new(bool) + f.BoolVar(p, name, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(name string, value bool, usage string) *bool { + return commandLine.Bool(name, value, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { + f.Var(newIntValue(value, p), name, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, name string, value int, usage string) { + commandLine.Var(newIntValue(value, p), name, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (f *FlagSet) Int(name string, value int, usage string) *int { + p := new(int) + f.IntVar(p, name, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(name string, value int, usage string) *int { + return commandLine.Int(name, value, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { + f.Var(newInt64Value(value, p), name, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, name string, value int64, usage string) { + commandLine.Var(newInt64Value(value, p), name, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { + p := new(int64) + f.Int64Var(p, name, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(name string, value int64, usage string) *int64 { + return commandLine.Int64(name, value, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { + f.Var(newUintValue(value, p), name, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, name string, value uint, usage string) { + commandLine.Var(newUintValue(value, p), name, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint(name string, value uint, usage string) *uint { + p := new(uint) + f.UintVar(p, name, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(name string, value uint, usage string) *uint { + return commandLine.Uint(name, value, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { + f.Var(newUint64Value(value, p), name, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, name string, value uint64, usage string) { + commandLine.Var(newUint64Value(value, p), name, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64Var(p, name, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(name string, value uint64, usage string) *uint64 { + return commandLine.Uint64(name, value, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { + f.Var(newStringValue(value, p), name, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, name string, value string, usage string) { + commandLine.Var(newStringValue(value, p), name, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (f *FlagSet) String(name string, value string, usage string) *string { + p := new(string) + f.StringVar(p, name, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(name string, value string, usage string) *string { + return commandLine.String(name, value, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { + f.Var(newFloat64Value(value, p), name, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, name string, value float64, usage string) { + commandLine.Var(newFloat64Value(value, p), name, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { + p := new(float64) + f.Float64Var(p, name, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(name string, value float64, usage string) *float64 { + return commandLine.Float64(name, value, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + f.Var(newDurationValue(value, p), name, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + commandLine.Var(newDurationValue(value, p), name, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVar(p, name, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(name string, value time.Duration, usage string) *time.Duration { + return commandLine.Duration(name, value, usage) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (f *FlagSet) Var(value Value, name string, usage string) { + // Remember the default value as a string; it won't change. + flag := &Flag{name, usage, value, value.String()} + _, alreadythere := f.formal[name] + if alreadythere { + fmt.Fprintf(f.out(), "%s flag redefined: %s\n", f.name, name) + panic("flag redefinition") // Happens only if flags are declared with identical names + } + if f.formal == nil { + f.formal = make(map[string]*Flag) + } + f.formal[name] = flag +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, name string, usage string) { + commandLine.Var(value, name, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (f *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintf(f.out(), "error: %v\n", err) + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is commandLine. +func (f *FlagSet) usage() { + if f == commandLine { + Usage() + } else if f.Usage == nil { + defaultUsage(f) + } else { + f.Usage() + } +} + +func (f *FlagSet) parseOneGnu() (flagName string, long, finished bool, err error) { + if len(f.procArgs) == 0 { + finished = true + return + } + + // processing previously encountered single-rune flag + if flag := f.procFlag; len(flag) > 0 { + _, n := utf8.DecodeRuneInString(flag) + f.procFlag = flag[n:] + flagName = flag[0:n] + return + } + + a := f.procArgs[0] + + // one non-flag argument + if a == "-" || a == "" || a[0] != '-' { + if f.allowIntersperse { + f.args = append(f.args, a) + f.procArgs = f.procArgs[1:] + return + } + f.args = append(f.args, f.procArgs...) + f.procArgs = nil + finished = true + return + } + + // end of flags + if f.procArgs[0] == "--" { + f.args = append(f.args, f.procArgs[1:]...) + f.procArgs = nil + finished = true + return + } + + // long flag signified with "--" prefix + if a[1] == '-' { + long = true + i := strings.Index(a, "=") + if i < 0 { + f.procArgs = f.procArgs[1:] + flagName = a[2:] + return + } + flagName = a[2:i] + if flagName == "" { + err = fmt.Errorf("empty flag in argument %q", a) + return + } + f.procArgs = f.procArgs[1:] + f.procFlag = a[i:] + return + } + + // some number of single-rune flags + a = a[1:] + _, n := utf8.DecodeRuneInString(a) + flagName = a[0:n] + f.procFlag = a[n:] + f.procArgs = f.procArgs[1:] + return +} + +func flagWithMinus(name string) string { + if len(name) > 1 { + return "--" + name + } + return "-" + name +} + +func (f *FlagSet) parseGnuFlagArg(name string, long bool) (finished bool, err error) { + m := f.formal + flag, alreadythere := m[name] // BUG + if !alreadythere { + if name == "help" || name == "h" { // special case for nice help message. + f.usage() + return false, ErrHelp + } + // TODO print --xxx when flag is more than one rune. + return false, f.failf("flag provided but not defined: %s", flagWithMinus(name)) + } + if fv, ok := flag.Value.(*boolValue); ok && !strings.HasPrefix(f.procFlag, "=") { + // special case: doesn't need an arg, and an arg hasn't + // been provided explicitly. + fv.Set("true") + } else { + // It must have a value, which might be the next argument. + var hasValue bool + var value string + if f.procFlag != "" { + // value directly follows flag + value = f.procFlag + if long { + if value[0] != '=' { + panic("no leading '=' in long flag") + } + value = value[1:] + } + hasValue = true + f.procFlag = "" + } + if !hasValue && len(f.procArgs) > 0 { + // value is the next arg + hasValue = true + value, f.procArgs = f.procArgs[0], f.procArgs[1:] + } + if !hasValue { + return false, f.failf("flag needs an argument: %s", flagWithMinus(name)) + } + if err := flag.Value.Set(value); err != nil { + return false, f.failf("invalid value %q for flag %s: %v", value, flagWithMinus(name), err) + } + } + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + return +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if --help was set but not defined. +// If allowIntersperse is set, arguments and flags can be interspersed, that +// is flags can follow positional arguments. +func (f *FlagSet) Parse(allowIntersperse bool, arguments []string) error { + f.parsed = true + f.procArgs = arguments + f.procFlag = "" + f.args = nil + f.allowIntersperse = allowIntersperse + for { + name, long, finished, err := f.parseOneGnu() + if !finished { + if name != "" { + finished, err = f.parseGnuFlagArg(name, long) + } + } + if err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + if !finished { + continue + } + if err == nil { + break + } + } + return nil +} + +// Parsed reports whether f.Parse has been called. +func (f *FlagSet) Parsed() bool { + return f.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +// If allowIntersperse is set, arguments and flags can be interspersed, that +// is flags can follow positional arguments. +func Parse(allowIntersperse bool) { + // Ignore errors; commandLine is set for ExitOnError. + commandLine.Parse(allowIntersperse, os.Args[1:]) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return commandLine.Parsed() +} + +// The default set of command-line flags, parsed from os.Args. +var commandLine = NewFlagSet(os.Args[0], ExitOnError) + +// SetOut sets the output writer for the default FlagSet. +func SetOut(w io.Writer) { + commandLine.output = w +} + +// NewFlagSet returns a new, empty flag set with the specified name and +// error handling property. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + } + return f +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { + f.name = name + f.errorHandling = errorHandling +} === added file 'src/github.com/lxc/lxd/shared/gnuflag/flag_test.go' --- src/github.com/lxc/lxd/shared/gnuflag/flag_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/gnuflag/flag_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,550 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gnuflag_test + +import ( + "bytes" + "fmt" + "os" + "reflect" + "sort" + "strings" + "testing" + "time" + + . "github.com/lxc/lxd/shared/gnuflag" +) + +var ( + test_bool = Bool("test_bool", false, "bool value") + test_int = Int("test_int", 0, "int value") + test_int64 = Int64("test_int64", 0, "int64 value") + test_uint = Uint("test_uint", 0, "uint value") + test_uint64 = Uint64("test_uint64", 0, "uint64 value") + test_string = String("test_string", "0", "string value") + test_float64 = Float64("test_float64", 0, "float64 value") + test_duration = Duration("test_duration", 0, "time.Duration value") +) + +func boolString(s string) string { + if s == "0" { + return "false" + } + return "true" +} + +func TestEverything(t *testing.T) { + m := make(map[string]*Flag) + desired := "0" + visitor := func(f *Flag) { + if len(f.Name) > 5 && f.Name[0:5] == "test_" { + m[f.Name] = f + ok := false + switch { + case f.Value.String() == desired: + ok = true + case f.Name == "test_bool" && f.Value.String() == boolString(desired): + ok = true + case f.Name == "test_duration" && f.Value.String() == desired+"s": + ok = true + } + if !ok { + t.Error("Visit: bad value", f.Value.String(), "for", f.Name) + } + } + } + VisitAll(visitor) + if len(m) != 8 { + t.Error("VisitAll misses some flags") + for k, v := range m { + t.Log(k, *v) + } + } + m = make(map[string]*Flag) + Visit(visitor) + if len(m) != 0 { + t.Errorf("Visit sees unset flags") + for k, v := range m { + t.Log(k, *v) + } + } + // Now set all flags + Set("test_bool", "true") + Set("test_int", "1") + Set("test_int64", "1") + Set("test_uint", "1") + Set("test_uint64", "1") + Set("test_string", "1") + Set("test_float64", "1") + Set("test_duration", "1s") + desired = "1" + Visit(visitor) + if len(m) != 8 { + t.Error("Visit fails after set") + for k, v := range m { + t.Log(k, *v) + } + } + // Now test they're visited in sort order. + var flagNames []string + Visit(func(f *Flag) { flagNames = append(flagNames, f.Name) }) + if !sort.StringsAreSorted(flagNames) { + t.Errorf("flag names not sorted: %v", flagNames) + } +} + +func TestUsage(t *testing.T) { + called := false + ResetForTesting(func() { called = true }) + f := CommandLine() + f.SetOutput(nullWriter{}) + if f.Parse(true, []string{"-x"}) == nil { + t.Error("parse did not fail for unknown flag") + } + if called { + t.Error("called Usage for unknown flag") + } +} + +var parseTests = []struct { + about string + intersperse bool + args []string + vals map[string]interface{} + remaining []string + error string +}{{ + about: "regular args", + intersperse: true, + args: []string{ + "--bool2", + "--int", "22", + "--int64", "0x23", + "--uint", "24", + "--uint64", "25", + "--string", "hello", + "--float64", "2718e28", + "--duration", "2m", + "one - extra - argument", + }, + vals: map[string]interface{}{ + "bool": false, + "bool2": true, + "int": 22, + "int64": int64(0x23), + "uint": uint(24), + "uint64": uint64(25), + "string": "hello", + "float64": 2718e28, + "duration": 2 * 60 * time.Second, + }, + remaining: []string{ + "one - extra - argument", + }, +}, { + about: "playing with -", + intersperse: true, + args: []string{ + "-a", + "-", + "-bc", + "2", + "-de1s", + "-f2s", + "-g", "3s", + "--h", + "--long", + "--long2", "-4s", + "3", + "4", + "--", "-5", + }, + vals: map[string]interface{}{ + "a": true, + "b": true, + "c": true, + "d": true, + "e": "1s", + "f": "2s", + "g": "3s", + "h": true, + "long": true, + "long2": "-4s", + "z": "default", + "www": 99, + }, + remaining: []string{ + "-", + "2", + "3", + "4", + "-5", + }, +}, { + about: "flag after explicit --", + intersperse: true, + args: []string{ + "-a", + "--", + "-b", + }, + vals: map[string]interface{}{ + "a": true, + "b": false, + }, + remaining: []string{ + "-b", + }, +}, { + about: "flag after end", + args: []string{ + "-a", + "foo", + "-b", + }, + vals: map[string]interface{}{ + "a": true, + "b": false, + }, + remaining: []string{ + "foo", + "-b", + }, +}, { + about: "arg and flag after explicit end", + args: []string{ + "-a", + "--", + "foo", + "-b", + }, + vals: map[string]interface{}{ + "a": true, + "b": false, + }, + remaining: []string{ + "foo", + "-b", + }, +}, { + about: "boolean args, explicitly and non-explicitly given", + args: []string{ + "--a=false", + "--b=true", + "--c", + }, + vals: map[string]interface{}{ + "a": false, + "b": true, + "c": true, + }, +}, { + about: "using =", + args: []string{ + "--arble=bar", + "--bletch=", + "--a=something", + "-b=other", + "-cdand more", + "--curdle=--milk", + "--sandwich", "=", + "--darn=", + "=arg", + }, + vals: map[string]interface{}{ + "arble": "bar", + "bletch": "", + "a": "something", + "b": "=other", + "c": true, + "d": "and more", + "curdle": "--milk", + "sandwich": "=", + "darn": "", + }, + remaining: []string{"=arg"}, +}, { + about: "empty flag #1", + args: []string{ + "--=bar", + }, + error: `empty flag in argument "--=bar"`, +}, { + about: "single-letter equals", + args: []string{ + "-=bar", + }, + error: `flag provided but not defined: -=`, +}, { + about: "empty flag #2", + args: []string{ + "--=", + }, + error: `empty flag in argument "--="`, +}, { + about: "no equals", + args: []string{ + "-=", + }, + error: `flag provided but not defined: -=`, +}, { + args: []string{ + "-a=true", + }, + vals: map[string]interface{}{ + "a": true, + }, + error: `invalid value "=true" for flag -a: strconv.ParseBool: parsing "=true": invalid syntax`, +}, { + intersperse: true, + args: []string{ + "-a", + "-b", + }, + vals: map[string]interface{}{ + "a": true, + }, + error: "flag provided but not defined: -b", +}, { + intersperse: true, + args: []string{ + "-a", + }, + vals: map[string]interface{}{ + "a": "default", + }, + error: "flag needs an argument: -a", +}, { + intersperse: true, + args: []string{ + "-a", "b", + }, + vals: map[string]interface{}{ + "a": 0, + }, + error: `invalid value "b" for flag -a: strconv.ParseInt: parsing "b": invalid syntax`, +}, +} + +func testParse(newFlagSet func() *FlagSet, t *testing.T) { + for i, g := range parseTests { + t.Logf("test %d. %s", i, g.about) + f := newFlagSet() + flags := make(map[string]interface{}) + for name, val := range g.vals { + switch val.(type) { + case bool: + flags[name] = f.Bool(name, false, "bool value "+name) + case string: + flags[name] = f.String(name, "default", "string value "+name) + case int: + flags[name] = f.Int(name, 99, "int value "+name) + case uint: + flags[name] = f.Uint(name, 0, "uint value") + case uint64: + flags[name] = f.Uint64(name, 0, "uint64 value") + case int64: + flags[name] = f.Int64(name, 0, "uint64 value") + case float64: + flags[name] = f.Float64(name, 0, "float64 value") + case time.Duration: + flags[name] = f.Duration(name, 5*time.Second, "duration value") + default: + t.Fatalf("unhandled type %T", val) + } + } + err := f.Parse(g.intersperse, g.args) + if g.error != "" { + if err == nil { + t.Errorf("expected error %q got nil", g.error) + } else if err.Error() != g.error { + t.Errorf("expected error %q got %q", g.error, err.Error()) + } + continue + } + for name, val := range g.vals { + actual := reflect.ValueOf(flags[name]).Elem().Interface() + if val != actual { + t.Errorf("flag %q, expected %v got %v", name, val, actual) + } + } + if len(f.Args()) != len(g.remaining) { + t.Fatalf("remaining args, expected %q got %q", g.remaining, f.Args()) + } + for j, a := range f.Args() { + if a != g.remaining[j] { + t.Errorf("arg %d, expected %q got %q", j, g.remaining[i], a) + } + } + } +} + +func TestParse(t *testing.T) { + testParse(func() *FlagSet { + ResetForTesting(func() {}) + f := CommandLine() + f.SetOutput(nullWriter{}) + return f + }, t) +} + +func TestFlagSetParse(t *testing.T) { + testParse(func() *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.SetOutput(nullWriter{}) + return f + }, t) +} + +// Declare a user-defined flag type. +type flagVar []string + +func (f *flagVar) String() string { + return fmt.Sprint([]string(*f)) +} + +func (f *flagVar) Set(value string) error { + *f = append(*f, value) + return nil +} + +func TestUserDefined(t *testing.T) { + var flags FlagSet + flags.Init("test", ContinueOnError) + var v flagVar + flags.Var(&v, "v", "usage") + if err := flags.Parse(true, []string{"-v", "1", "-v", "2", "-v3"}); err != nil { + t.Error(err) + } + if len(v) != 3 { + t.Fatal("expected 3 args; got ", len(v)) + } + expect := "[1 2 3]" + if v.String() != expect { + t.Errorf("expected value %q got %q", expect, v.String()) + } +} + +func TestSetOutput(t *testing.T) { + var flags FlagSet + var buf bytes.Buffer + flags.SetOutput(&buf) + flags.Init("test", ContinueOnError) + flags.Parse(true, []string{"-unknown"}) + if out := buf.String(); !strings.Contains(out, "-unknown") { + t.Logf("expected output mentioning unknown; got %q", out) + } +} + +// This tests that one can reset the flags. This still works but not well, and is +// superseded by FlagSet. +func TestChangingArgs(t *testing.T) { + ResetForTesting(func() { t.Fatal("bad parse") }) + oldArgs := os.Args + defer func() { os.Args = oldArgs }() + os.Args = []string{"cmd", "--before", "subcmd", "--after", "args"} + before := Bool("before", false, "") + if err := CommandLine().Parse(false, os.Args[1:]); err != nil { + t.Fatal(err) + } + cmd := Arg(0) + os.Args = Args() + after := Bool("after", false, "") + Parse(false) + args := Args() + + if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { + t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) + } +} + +// Test that -help invokes the usage message and returns ErrHelp. +func TestHelp(t *testing.T) { + var helpCalled = false + fs := NewFlagSet("help test", ContinueOnError) + fs.SetOutput(nullWriter{}) + fs.Usage = func() { helpCalled = true } + var flag bool + fs.BoolVar(&flag, "flag", false, "regular flag") + // Regular flag invocation should work + err := fs.Parse(true, []string{"--flag"}) + if err != nil { + t.Fatal("expected no error; got ", err) + } + if !flag { + t.Error("flag was not set by --flag") + } + if helpCalled { + t.Error("help called for regular flag") + helpCalled = false // reset for next test + } + // Help flag should work as expected. + err = fs.Parse(true, []string{"--help"}) + if err == nil { + t.Fatal("error expected") + } + if err != ErrHelp { + t.Fatal("expected ErrHelp; got ", err) + } + if !helpCalled { + t.Fatal("help was not called") + } + // If we define a help flag, that should override. + var help bool + fs.BoolVar(&help, "help", false, "help flag") + helpCalled = false + err = fs.Parse(true, []string{"--help"}) + if err != nil { + t.Fatal("expected no error for defined --help; got ", err) + } + if helpCalled { + t.Fatal("help was called; should not have been for defined help flag") + } +} + +type nullWriter struct{} + +func (nullWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +func TestPrintDefaults(t *testing.T) { + f := NewFlagSet("print test", ContinueOnError) + f.SetOutput(nullWriter{}) + var b bool + var c int + var d string + var e float64 + f.IntVar(&c, "trapclap", 99, "usage not shown") + f.IntVar(&c, "c", 99, "c usage") + + f.BoolVar(&b, "bal", false, "usage not shown") + f.BoolVar(&b, "x", false, "usage not shown") + f.BoolVar(&b, "b", false, "b usage") + f.BoolVar(&b, "balalaika", false, "usage not shown") + + f.StringVar(&d, "d", "d default", "d usage") + + f.Float64Var(&e, "elephant", 3.14, "elephant usage") + + var buf bytes.Buffer + f.SetOutput(&buf) + f.PrintDefaults() + f.SetOutput(nullWriter{}) + + expect := + ` -b, -x, --bal, --balalaika (= false) + b usage + -c, --trapclap (= 99) + c usage + -d (= "d default") + d usage + --elephant (= 3.14) + elephant usage +` + if buf.String() != expect { + t.Errorf("expect %q got %q", expect, buf.String()) + } +} === added directory 'src/github.com/lxc/lxd/shared/i18n' === added file 'src/github.com/lxc/lxd/shared/i18n/i18n.go' --- src/github.com/lxc/lxd/shared/i18n/i18n.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/i18n/i18n.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +// +build !linux + +package i18n + +func G(msgid string) string { + return msgid +} + +func NG(msgid string, msgidPlural string, n uint64) string { + return msgid +} === added file 'src/github.com/lxc/lxd/shared/i18n/i18n_linux.go' --- src/github.com/lxc/lxd/shared/i18n/i18n_linux.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/i18n/i18n_linux.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,21 @@ +// +build linux + +package i18n + +import ( + "github.com/gosexy/gettext" +) + +var TEXTDOMAIN = "lxd" + +func G(msgid string) string { + return gettext.DGettext(TEXTDOMAIN, msgid) +} + +func NG(msgid string, msgidPlural string, n uint64) string { + return gettext.DNGettext(TEXTDOMAIN, msgid, msgidPlural, n) +} + +func init() { + gettext.SetLocale(gettext.LC_ALL, "") +} === added file 'src/github.com/lxc/lxd/shared/idmapset_linux.go' --- src/github.com/lxc/lxd/shared/idmapset_linux.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/idmapset_linux.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,389 @@ +package shared + +import ( + "bufio" + "fmt" + "os" + "os/exec" + "os/user" + "path" + "path/filepath" + "strconv" + "strings" +) + +/* + * One entry in id mapping set - a single range of either + * uid or gid mappings. + */ +type IdmapEntry struct { + Isuid bool + Isgid bool + Hostid int // id as seen on the host - i.e. 100000 + Nsid int // id as seen in the ns - i.e. 0 + Maprange int +} + +func (e *IdmapEntry) ToLxcString() string { + if e.Isuid { + return fmt.Sprintf("u %d %d %d", e.Nsid, e.Hostid, e.Maprange) + } + return fmt.Sprintf("g %d %d %d", e.Nsid, e.Hostid, e.Maprange) +} + +func is_between(x, low, high int) bool { + return x >= low && x < high +} + +func (e *IdmapEntry) Intersects(i IdmapEntry) bool { + if (e.Isuid && i.Isuid) || (e.Isgid && i.Isgid) { + switch { + case is_between(e.Hostid, i.Hostid, i.Hostid+i.Maprange): + return true + case is_between(i.Hostid, e.Hostid, e.Hostid+e.Maprange): + return true + case is_between(e.Hostid+e.Maprange, i.Hostid, i.Hostid+i.Maprange): + return true + case is_between(i.Hostid+e.Maprange, e.Hostid, e.Hostid+e.Maprange): + return true + case is_between(e.Nsid, i.Nsid, i.Nsid+i.Maprange): + return true + case is_between(i.Nsid, e.Nsid, e.Nsid+e.Maprange): + return true + case is_between(e.Nsid+e.Maprange, i.Nsid, i.Nsid+i.Maprange): + return true + case is_between(i.Nsid+e.Maprange, e.Nsid, e.Nsid+e.Maprange): + return true + } + } + return false +} + +func (e *IdmapEntry) parse(s string) error { + split := strings.Split(s, ":") + var err error + if len(split) != 4 { + return fmt.Errorf("Bad idmap: %q", s) + } + switch split[0] { + case "u": + e.Isuid = true + case "g": + e.Isgid = true + case "b": + e.Isuid = true + e.Isgid = true + default: + return fmt.Errorf("Bad idmap type in %q", s) + } + e.Nsid, err = strconv.Atoi(split[1]) + if err != nil { + return err + } + e.Hostid, err = strconv.Atoi(split[2]) + if err != nil { + return err + } + e.Maprange, err = strconv.Atoi(split[3]) + if err != nil { + return err + } + + // wraparound + if e.Hostid+e.Maprange < e.Hostid || e.Nsid+e.Maprange < e.Nsid { + return fmt.Errorf("Bad mapping: id wraparound") + } + + return nil +} + +/* + * Shift a uid from the host into the container + * I.e. 0 -> 1000 -> 101000 + */ +func (e *IdmapEntry) shift_into_ns(id int) (int, error) { + if id < e.Nsid || id >= e.Nsid+e.Maprange { + // this mapping doesn't apply + return 0, fmt.Errorf("N/A") + } + + return id - e.Nsid + e.Hostid, nil +} + +/* + * Shift a uid from the container back to the host + * I.e. 101000 -> 1000 + */ +func (e *IdmapEntry) shift_from_ns(id int) (int, error) { + if id < e.Hostid || id >= e.Hostid+e.Maprange { + // this mapping doesn't apply + return 0, fmt.Errorf("N/A") + } + + return id - e.Hostid + e.Nsid, nil +} + +/* taken from http://blog.golang.org/slices (which is under BSD licence) */ +func Extend(slice []IdmapEntry, element IdmapEntry) []IdmapEntry { + n := len(slice) + if n == cap(slice) { + // Slice is full; must grow. + // We double its size and add 1, so if the size is zero we still grow. + newSlice := make([]IdmapEntry, len(slice), 2*len(slice)+1) + copy(newSlice, slice) + slice = newSlice + } + slice = slice[0 : n+1] + slice[n] = element + return slice +} + +type IdmapSet struct { + Idmap []IdmapEntry +} + +func (m IdmapSet) Len() int { + return len(m.Idmap) +} + +func (m IdmapSet) Intersects(i IdmapEntry) bool { + for _, e := range m.Idmap { + if i.Intersects(e) { + return true + } + } + return false +} + +func (m IdmapSet) ToLxcString() []string { + var lines []string + for _, e := range m.Idmap { + lines = append(lines, e.ToLxcString()+"\n") + } + return lines +} + +func (m IdmapSet) Append(s string) (IdmapSet, error) { + e := IdmapEntry{} + err := e.parse(s) + if err != nil { + return m, err + } + if m.Intersects(e) { + return m, fmt.Errorf("Conflicting id mapping") + } + m.Idmap = Extend(m.Idmap, e) + return m, nil +} + +func (m IdmapSet) doShiftIntoNs(uid int, gid int, how string) (int, int) { + u := -1 + g := -1 + for _, e := range m.Idmap { + var err error + var tmpu, tmpg int + if e.Isuid && u == -1 { + switch how { + case "in": + tmpu, err = e.shift_into_ns(uid) + case "out": + tmpu, err = e.shift_from_ns(uid) + } + if err == nil { + u = tmpu + } + } + if e.Isgid && g == -1 { + switch how { + case "in": + tmpg, err = e.shift_into_ns(gid) + case "out": + tmpg, err = e.shift_from_ns(gid) + } + if err == nil { + g = tmpg + } + } + } + + return u, g +} + +func (m IdmapSet) ShiftIntoNs(uid int, gid int) (int, int) { + return m.doShiftIntoNs(uid, gid, "in") +} + +func (m IdmapSet) ShiftFromNs(uid int, gid int) (int, int) { + return m.doShiftIntoNs(uid, gid, "out") +} + +func GetOwner(path string) (int, int, error) { + uid, gid, _, _, _, _, err := GetFileStat(path) + return uid, gid, err +} + +func (set *IdmapSet) doUidshiftIntoContainer(dir string, testmode bool, how string) error { + // Expand any symlink in dir and cleanup resulting path + dir, err := filepath.EvalSymlinks(dir) + if err != nil { + return err + } + dir = strings.TrimRight(dir, "/") + + convert := func(path string, fi os.FileInfo, err error) (e error) { + uid, gid, err := GetOwner(path) + if err != nil { + return err + } + var newuid, newgid int + switch how { + case "in": + newuid, newgid = set.ShiftIntoNs(uid, gid) + case "out": + newuid, newgid = set.ShiftFromNs(uid, gid) + } + if testmode { + fmt.Printf("I would shift %q to %d %d\n", path, newuid, newgid) + } else { + err = ShiftOwner(dir, path, int(newuid), int(newgid)) + if err != nil { + return err + } + } + return nil + } + + if !PathExists(dir) { + return fmt.Errorf("No such file or directory: %q", dir) + } + return filepath.Walk(dir, convert) +} + +func (set *IdmapSet) UidshiftIntoContainer(dir string, testmode bool) error { + return set.doUidshiftIntoContainer(dir, testmode, "in") +} + +func (set *IdmapSet) UidshiftFromContainer(dir string, testmode bool) error { + return set.doUidshiftIntoContainer(dir, testmode, "out") +} + +func (set *IdmapSet) ShiftRootfs(p string) error { + return set.doUidshiftIntoContainer(p, false, "in") +} + +func (set *IdmapSet) UnshiftRootfs(p string) error { + return set.doUidshiftIntoContainer(p, false, "out") +} + +func (set *IdmapSet) ShiftFile(p string) error { + return set.ShiftRootfs(p) +} + +const ( + minIDRange = 65536 +) + +/* + * get a uid or gid mapping from /etc/subxid + */ +func getFromMap(fname string, username string) (int, int, error) { + f, err := os.Open(fname) + var min int + var idrange int + if err != nil { + return 0, 0, err + } + defer f.Close() + scanner := bufio.NewScanner(f) + min = 0 + idrange = 0 + for scanner.Scan() { + /* + * /etc/sub{gu}id allow comments in the files, so ignore + * everything after a '#' + */ + s := strings.Split(scanner.Text(), "#") + if len(s[0]) == 0 { + continue + } + + s = strings.Split(s[0], ":") + if len(s) < 3 { + return 0, 0, fmt.Errorf("unexpected values in %q: %q", fname, s) + } + if strings.EqualFold(s[0], username) { + bigmin, err := strconv.ParseUint(s[1], 10, 32) + if err != nil { + continue + } + bigIdrange, err := strconv.ParseUint(s[2], 10, 32) + if err != nil { + continue + } + min = int(bigmin) + idrange = int(bigIdrange) + if idrange < minIDRange { + continue + } + return min, idrange, nil + } + } + + return 0, 0, fmt.Errorf("User %q has no %ss.", username, path.Base(fname)) +} + +/* + * Get current username + */ +func getUsername() (string, error) { + me, err := user.Current() + if err == nil { + return me.Username, nil + } else { + /* user.Current() requires cgo */ + username := os.Getenv("USER") + if username == "" { + return "", err + } + return username, nil + } +} + +/* + * Create a new default idmap + */ +func DefaultIdmapSet() (*IdmapSet, error) { + myname, err := getUsername() + if err != nil { + return nil, err + } + + umin := 1000000 + urange := 100000 + gmin := 1000000 + grange := 100000 + + newuidmap, _ := exec.LookPath("newuidmap") + newgidmap, _ := exec.LookPath("newgidmap") + + if newuidmap != "" && newgidmap != "" && PathExists("/etc/subuid") && PathExists("/etc/subgid") { + umin, urange, err = getFromMap("/etc/subuid", myname) + if err != nil { + return nil, err + } + + gmin, grange, err = getFromMap("/etc/subgid", myname) + if err != nil { + return nil, err + } + } + + m := new(IdmapSet) + + e := IdmapEntry{Isuid: true, Nsid: 0, Hostid: umin, Maprange: urange} + m.Idmap = Extend(m.Idmap, e) + e = IdmapEntry{Isgid: true, Nsid: 0, Hostid: gmin, Maprange: grange} + m.Idmap = Extend(m.Idmap, e) + + return m, nil +} === added file 'src/github.com/lxc/lxd/shared/image.go' --- src/github.com/lxc/lxd/shared/image.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/image.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,64 @@ +package shared + +import ( + "time" +) + +type ImageProperties map[string]string + +type ImageAliasesEntry struct { + Name string `json:"name"` + Description string `json:"description"` + Target string `json:"target"` +} + +type ImageAliases []ImageAliasesEntry + +type ImageAlias struct { + Name string `json:"name"` + Description string `json:"description"` +} + +type ImageSource struct { + Server string `json:"server"` + Protocol string `json:"protocol"` + Certificate string `json:"certificate"` + Alias string `json:"alias"` +} + +type ImageInfo struct { + Aliases []ImageAlias `json:"aliases"` + Architecture string `json:"architecture"` + Cached bool `json:"cached"` + Filename string `json:"filename"` + Fingerprint string `json:"fingerprint"` + Properties map[string]string `json:"properties"` + Public bool `json:"public"` + Size int64 `json:"size"` + + AutoUpdate bool `json:"auto_update"` + Source *ImageSource `json:"update_source,omitempty"` + + CreationDate time.Time `json:"created_at"` + ExpiryDate time.Time `json:"expires_at"` + LastUsedDate time.Time `json:"last_used_at"` + UploadDate time.Time `json:"uploaded_at"` +} + +/* + * BriefImageInfo contains a subset of the fields in + * ImageInfo, namely those which a user may update + */ +type BriefImageInfo struct { + AutoUpdate bool `json:"auto_update"` + Properties map[string]string `json:"properties"` + Public bool `json:"public"` +} + +func (i *ImageInfo) Brief() BriefImageInfo { + retstate := BriefImageInfo{ + AutoUpdate: i.AutoUpdate, + Properties: i.Properties, + Public: i.Public} + return retstate +} === added file 'src/github.com/lxc/lxd/shared/json.go' --- src/github.com/lxc/lxd/shared/json.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/json.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,61 @@ +package shared + +import ( + "bytes" + "encoding/json" + "fmt" +) + +type Jmap map[string]interface{} + +func (m Jmap) GetString(key string) (string, error) { + if val, ok := m[key]; !ok { + return "", fmt.Errorf("Response was missing `%s`", key) + } else if val, ok := val.(string); !ok { + return "", fmt.Errorf("`%s` was not a string", key) + } else { + return val, nil + } +} + +func (m Jmap) GetMap(key string) (Jmap, error) { + if val, ok := m[key]; !ok { + return nil, fmt.Errorf("Response was missing `%s`", key) + } else if val, ok := val.(map[string]interface{}); !ok { + return nil, fmt.Errorf("`%s` was not a map, got %T", key, m[key]) + } else { + return val, nil + } +} + +func (m Jmap) GetInt(key string) (int, error) { + if val, ok := m[key]; !ok { + return -1, fmt.Errorf("Response was missing `%s`", key) + } else if val, ok := val.(float64); !ok { + return -1, fmt.Errorf("`%s` was not an int", key) + } else { + return int(val), nil + } +} + +func (m Jmap) GetBool(key string) (bool, error) { + if val, ok := m[key]; !ok { + return false, fmt.Errorf("Response was missing `%s`", key) + } else if val, ok := val.(bool); !ok { + return false, fmt.Errorf("`%s` was not an int", key) + } else { + return val, nil + } +} + +func DebugJson(r *bytes.Buffer) { + pretty := &bytes.Buffer{} + if err := json.Indent(pretty, r.Bytes(), "\t", "\t"); err != nil { + Debugf("error indenting json: %s", err) + return + } + + // Print the JSON without the last "\n" + str := pretty.String() + Debugf("\n\t%s", str[0:len(str)-1]) +} === added file 'src/github.com/lxc/lxd/shared/log.go' --- src/github.com/lxc/lxd/shared/log.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/log.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,51 @@ +package shared + +import ( + "fmt" + "runtime" +) + +type Logger interface { + Debug(msg string, ctx ...interface{}) + Info(msg string, ctx ...interface{}) + Warn(msg string, ctx ...interface{}) + Error(msg string, ctx ...interface{}) + Crit(msg string, ctx ...interface{}) +} + +var Log Logger + +type nullLogger struct{} + +func (nl nullLogger) Debug(msg string, ctx ...interface{}) {} +func (nl nullLogger) Info(msg string, ctx ...interface{}) {} +func (nl nullLogger) Warn(msg string, ctx ...interface{}) {} +func (nl nullLogger) Error(msg string, ctx ...interface{}) {} +func (nl nullLogger) Crit(msg string, ctx ...interface{}) {} + +func init() { + Log = nullLogger{} +} + +// Logf sends to the logger registered via SetLogger the string resulting +// from running format and args through Sprintf. +func Logf(format string, args ...interface{}) { + if Log != nil { + Log.Info(fmt.Sprintf(format, args...)) + } +} + +// Debugf sends to the logger registered via SetLogger the string resulting +// from running format and args through Sprintf, but only if debugging was +// enabled via SetDebug. +func Debugf(format string, args ...interface{}) { + if Log != nil { + Log.Debug(fmt.Sprintf(format, args...)) + } +} + +func PrintStack() { + buf := make([]byte, 1<<16) + runtime.Stack(buf, true) + Debugf("%s", buf) +} === added directory 'src/github.com/lxc/lxd/shared/logging' === added file 'src/github.com/lxc/lxd/shared/logging/log.go' --- src/github.com/lxc/lxd/shared/logging/log.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/logging/log.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,94 @@ +package logging + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/lxc/lxd/shared" + + log "gopkg.in/inconshreveable/log15.v2" +) + +// GetLogger returns a logger suitable for using as shared.Log. +func GetLogger(syslog string, logfile string, verbose bool, debug bool, customHandler log.Handler) (shared.Logger, error) { + Log := log.New() + + var handlers []log.Handler + + var syshandler log.Handler + + // System specific handler + syshandler = getSystemHandler(syslog, debug) + if syshandler != nil { + handlers = append(handlers, syshandler) + } + + // FileHandler + if logfile != "" { + if !pathExists(filepath.Dir(logfile)) { + return nil, fmt.Errorf("Log file path doesn't exist: %s", filepath.Dir(logfile)) + } + + if !debug { + handlers = append( + handlers, + log.LvlFilterHandler( + log.LvlInfo, + log.Must.FileHandler(logfile, log.LogfmtFormat()), + ), + ) + } else { + handlers = append(handlers, log.Must.FileHandler(logfile, log.LogfmtFormat())) + } + } + + // StderrHandler + if verbose || debug { + if !debug { + handlers = append( + handlers, + log.LvlFilterHandler( + log.LvlInfo, + log.StderrHandler, + ), + ) + } else { + handlers = append(handlers, log.StderrHandler) + } + } else { + handlers = append( + handlers, + log.LvlFilterHandler( + log.LvlWarn, + log.StderrHandler, + ), + ) + } + + if customHandler != nil { + handlers = append(handlers, customHandler) + } + + Log.SetHandler(log.MultiHandler(handlers...)) + + return Log, nil +} + +func AddContext(logger shared.Logger, ctx log.Ctx) shared.Logger { + log15logger, ok := logger.(log.Logger) + if !ok { + logger.Error("couldn't downcast logger to add context", log.Ctx{"logger": log15logger, "ctx": ctx}) + return logger + } + + return log15logger.New(ctx) +} + +func pathExists(name string) bool { + _, err := os.Lstat(name) + if err != nil && os.IsNotExist(err) { + return false + } + return true +} === added file 'src/github.com/lxc/lxd/shared/logging/log_posix.go' --- src/github.com/lxc/lxd/shared/logging/log_posix.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/logging/log_posix.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,24 @@ +// +build linux darwin + +package logging + +import ( + log "gopkg.in/inconshreveable/log15.v2" +) + +// getSystemHandler on Linux writes messages to syslog. +func getSystemHandler(syslog string, debug bool) log.Handler { + // SyslogHandler + if syslog != "" { + if !debug { + return log.LvlFilterHandler( + log.LvlInfo, + log.Must.SyslogHandler(syslog, log.LogfmtFormat()), + ) + } else { + return log.Must.SyslogHandler(syslog, log.LogfmtFormat()) + } + } + + return nil +} === added file 'src/github.com/lxc/lxd/shared/logging/log_windows.go' --- src/github.com/lxc/lxd/shared/logging/log_windows.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/logging/log_windows.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,12 @@ +// +build windows + +package logging + +import ( + log "gopkg.in/inconshreveable/log15.v2" +) + +// getSystemHandler on Windows does nothing. +func getSystemHandler(syslog string, debug bool) log.Handler { + return nil +} === added file 'src/github.com/lxc/lxd/shared/network.go' --- src/github.com/lxc/lxd/shared/network.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/network.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,307 @@ +package shared + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "time" + + "github.com/gorilla/websocket" +) + +func RFC3493Dialer(network, address string) (net.Conn, error) { + host, port, err := net.SplitHostPort(address) + if err != nil { + return nil, err + } + + addrs, err := net.LookupHost(host) + if err != nil { + return nil, err + } + for _, a := range addrs { + c, err := net.DialTimeout(network, net.JoinHostPort(a, port), 10*time.Second) + if err != nil { + continue + } + return c, err + } + return nil, fmt.Errorf("Unable to connect to: " + address) +} + +func GetRemoteCertificate(address string) (*x509.Certificate, error) { + // Setup a permissive TLS config + tlsConfig, err := GetTLSConfig("", "", nil) + if err != nil { + return nil, err + } + + tlsConfig.InsecureSkipVerify = true + tr := &http.Transport{ + TLSClientConfig: tlsConfig, + Dial: RFC3493Dialer, + Proxy: http.ProxyFromEnvironment, + } + + // Connect + client := &http.Client{Transport: tr} + resp, err := client.Get(address) + if err != nil { + return nil, err + } + + // Retrieve the certificate + if resp.TLS == nil || len(resp.TLS.PeerCertificates) == 0 { + return nil, fmt.Errorf("Unable to read remote TLS certificate") + } + + return resp.TLS.PeerCertificates[0], nil +} + +func initTLSConfig() *tls.Config { + return &tls.Config{ + MinVersion: tls.VersionTLS12, + MaxVersion: tls.VersionTLS12, + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, + PreferServerCipherSuites: true, + } +} + +func finalizeTLSConfig(tlsConfig *tls.Config, tlsRemoteCert *x509.Certificate) { + // Trusted certificates + if tlsRemoteCert != nil { + caCertPool := x509.NewCertPool() + + // Make it a valid RootCA + tlsRemoteCert.IsCA = true + tlsRemoteCert.KeyUsage = x509.KeyUsageCertSign + + // Setup the pool + caCertPool.AddCert(tlsRemoteCert) + tlsConfig.RootCAs = caCertPool + + // Set the ServerName + if tlsRemoteCert.DNSNames != nil { + tlsConfig.ServerName = tlsRemoteCert.DNSNames[0] + } + } + + tlsConfig.BuildNameToCertificate() +} + +func GetTLSConfig(tlsClientCertFile string, tlsClientKeyFile string, tlsRemoteCert *x509.Certificate) (*tls.Config, error) { + tlsConfig := initTLSConfig() + + // Client authentication + if tlsClientCertFile != "" && tlsClientKeyFile != "" { + cert, err := tls.LoadX509KeyPair(tlsClientCertFile, tlsClientKeyFile) + if err != nil { + return nil, err + } + + tlsConfig.Certificates = []tls.Certificate{cert} + } + + finalizeTLSConfig(tlsConfig, tlsRemoteCert) + return tlsConfig, nil +} + +func GetTLSConfigMem(tlsClientCert string, tlsClientKey string, tlsRemoteCertPEM string) (*tls.Config, error) { + tlsConfig := initTLSConfig() + + // Client authentication + if tlsClientCert != "" && tlsClientKey != "" { + cert, err := tls.X509KeyPair([]byte(tlsClientCert), []byte(tlsClientKey)) + if err != nil { + return nil, err + } + + tlsConfig.Certificates = []tls.Certificate{cert} + } + + var tlsRemoteCert *x509.Certificate + if tlsRemoteCertPEM != "" { + // Ignore any content outside of the PEM bytes we care about + certBlock, _ := pem.Decode([]byte(tlsRemoteCertPEM)) + var err error + tlsRemoteCert, err = x509.ParseCertificate(certBlock.Bytes) + if err != nil { + return nil, err + } + } + finalizeTLSConfig(tlsConfig, tlsRemoteCert) + + return tlsConfig, nil +} + +func IsLoopback(iface *net.Interface) bool { + return int(iface.Flags&net.FlagLoopback) > 0 +} + +func WebsocketSendStream(conn *websocket.Conn, r io.Reader) chan bool { + ch := make(chan bool) + + if r == nil { + close(ch) + return ch + } + + go func(conn *websocket.Conn, r io.Reader) { + in := ReaderToChannel(r) + for { + buf, ok := <-in + if !ok { + break + } + + w, err := conn.NextWriter(websocket.BinaryMessage) + if err != nil { + Debugf("Got error getting next writer %s", err) + break + } + + _, err = w.Write(buf) + w.Close() + if err != nil { + Debugf("Got err writing %s", err) + break + } + } + conn.WriteMessage(websocket.TextMessage, []byte{}) + ch <- true + }(conn, r) + + return ch +} + +func WebsocketRecvStream(w io.WriteCloser, conn *websocket.Conn) chan bool { + ch := make(chan bool) + + go func(w io.WriteCloser, conn *websocket.Conn) { + for { + mt, r, err := conn.NextReader() + if mt == websocket.CloseMessage { + Debugf("Got close message for reader") + break + } + + if mt == websocket.TextMessage { + Debugf("got message barrier") + break + } + + if err != nil { + Debugf("Got error getting next reader %s, %s", err, w) + break + } + + buf, err := ioutil.ReadAll(r) + if err != nil { + Debugf("Got error writing to writer %s", err) + break + } + + if w == nil { + continue + } + + i, err := w.Write(buf) + if i != len(buf) { + Debugf("Didn't write all of buf") + break + } + if err != nil { + Debugf("Error writing buf %s", err) + break + } + } + ch <- true + }(w, conn) + + return ch +} + +// WebsocketMirror allows mirroring a reader to a websocket and taking the +// result and writing it to a writer. This function allows for multiple +// mirrorings and correctly negotiates stream endings. However, it means any +// websocket.Conns passed to it are live when it returns, and must be closed +// explicitly. +func WebsocketMirror(conn *websocket.Conn, w io.WriteCloser, r io.ReadCloser) (chan bool, chan bool) { + readDone := make(chan bool, 1) + writeDone := make(chan bool, 1) + go func(conn *websocket.Conn, w io.WriteCloser) { + for { + mt, r, err := conn.NextReader() + if err != nil { + Debugf("Got error getting next reader %s, %s", err, w) + break + } + + if mt == websocket.CloseMessage { + Debugf("Got close message for reader") + break + } + + if mt == websocket.TextMessage { + Debugf("Got message barrier, resetting stream") + break + } + + buf, err := ioutil.ReadAll(r) + if err != nil { + Debugf("Got error writing to writer %s", err) + break + } + i, err := w.Write(buf) + if i != len(buf) { + Debugf("Didn't write all of buf") + break + } + if err != nil { + Debugf("Error writing buf %s", err) + break + } + } + writeDone <- true + w.Close() + }(conn, w) + + go func(conn *websocket.Conn, r io.ReadCloser) { + in := ReaderToChannel(r) + for { + buf, ok := <-in + if !ok { + readDone <- true + r.Close() + Debugf("sending write barrier") + conn.WriteMessage(websocket.TextMessage, []byte{}) + return + } + w, err := conn.NextWriter(websocket.BinaryMessage) + if err != nil { + Debugf("Got error getting next writer %s", err) + break + } + + _, err = w.Write(buf) + w.Close() + if err != nil { + Debugf("Got err writing %s", err) + break + } + } + closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") + conn.WriteMessage(websocket.CloseMessage, closeMsg) + readDone <- true + r.Close() + }(conn, r) + + return readDone, writeDone +} === added file 'src/github.com/lxc/lxd/shared/operation.go' --- src/github.com/lxc/lxd/shared/operation.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/operation.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,27 @@ +package shared + +import ( + "net/http" + "time" + + "github.com/gorilla/websocket" +) + +var WebsocketUpgrader = websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, + CheckOrigin: func(r *http.Request) bool { return true }, +} + +type Operation struct { + Id string `json:"id"` + Class string `json:"class"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Status string `json:"status"` + StatusCode StatusCode `json:"status_code"` + Resources map[string][]string `json:"resources"` + Metadata *Jmap `json:"metadata"` + MayCancel bool `json:"may_cancel"` + Err string `json:"err"` +} === added file 'src/github.com/lxc/lxd/shared/server.go' --- src/github.com/lxc/lxd/shared/server.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/server.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,34 @@ +package shared + +type ServerStateEnvironment struct { + Addresses []string `json:"addresses"` + Architectures []string `json:"architectures"` + Certificate string `json:"certificate"` + Driver string `json:"driver"` + DriverVersion string `json:"driver_version"` + Kernel string `json:"kernel"` + KernelArchitecture string `json:"kernel_architecture"` + KernelVersion string `json:"kernel_version"` + Server string `json:"server"` + ServerPid int `json:"server_pid"` + ServerVersion string `json:"server_version"` + Storage string `json:"storage"` + StorageVersion string `json:"storage_version"` +} + +type ServerState struct { + APICompat int `json:"api_compat"` + Auth string `json:"auth"` + Environment ServerStateEnvironment `json:"environment"` + Config map[string]interface{} `json:"config"` + Public bool `json:"public"` +} + +type BriefServerState struct { + Config map[string]interface{} `json:"config"` +} + +func (c *ServerState) Brief() BriefServerState { + retstate := BriefServerState{Config: c.Config} + return retstate +} === added file 'src/github.com/lxc/lxd/shared/simplestreams.go' --- src/github.com/lxc/lxd/shared/simplestreams.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/simplestreams.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,676 @@ +package shared + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "sort" + "strings" + "time" +) + +type ssSortImage []ImageInfo + +func (a ssSortImage) Len() int { + return len(a) +} + +func (a ssSortImage) Swap(i, j int) { + a[i], a[j] = a[j], a[i] +} + +func (a ssSortImage) Less(i, j int) bool { + if a[i].Properties["os"] == a[j].Properties["os"] { + if a[i].Properties["release"] == a[j].Properties["release"] { + if a[i].CreationDate.UTC().Unix() == 0 { + return true + } + + if a[j].CreationDate.UTC().Unix() == 0 { + return false + } + + return a[i].CreationDate.UTC().Unix() > a[j].CreationDate.UTC().Unix() + } + + if a[i].Properties["release"] == "" { + return false + } + + if a[j].Properties["release"] == "" { + return true + } + + return a[i].Properties["release"] < a[j].Properties["release"] + } + + if a[i].Properties["os"] == "" { + return false + } + + if a[j].Properties["os"] == "" { + return true + } + + return a[i].Properties["os"] < a[j].Properties["os"] +} + +var ssDefaultOS = map[string]string{ + "https://cloud-images.ubuntu.com": "ubuntu", +} + +type SimpleStreamsManifest struct { + Updated string `json:"updated"` + DataType string `json:"datatype"` + Format string `json:"format"` + License string `json:"license"` + Products map[string]SimpleStreamsManifestProduct `json:"products"` +} + +func (s *SimpleStreamsManifest) ToLXD() ([]ImageInfo, map[string][][]string) { + downloads := map[string][][]string{} + + images := []ImageInfo{} + nameLayout := "20060102" + eolLayout := "2006-01-02" + + for _, product := range s.Products { + // Skip unsupported architectures + architecture, err := ArchitectureId(product.Architecture) + if err != nil { + continue + } + + architectureName, err := ArchitectureName(architecture) + if err != nil { + continue + } + + for name, version := range product.Versions { + // Short of anything better, use the name as date + creationDate, err := time.Parse(nameLayout, name) + if err != nil { + continue + } + + size := int64(0) + filename := "" + fingerprint := "" + + metaPath := "" + metaHash := "" + rootfsPath := "" + rootfsHash := "" + + found := 0 + for _, item := range version.Items { + // Skip the files we don't care about + if !StringInSlice(item.FileType, []string{"root.tar.xz", "lxd.tar.xz"}) { + continue + } + found += 1 + + size += item.Size + if item.LXDHashSha256 != "" { + fingerprint = item.LXDHashSha256 + } + + if item.FileType == "lxd.tar.xz" { + fields := strings.Split(item.Path, "/") + filename = fields[len(fields)-1] + metaPath = item.Path + metaHash = item.HashSha256 + } + + if item.FileType == "root.tar.xz" { + rootfsPath = item.Path + rootfsHash = item.HashSha256 + } + } + + if found != 2 || size == 0 || filename == "" || fingerprint == "" { + // Invalid image + continue + } + + // Generate the actual image entry + image := ImageInfo{} + image.Architecture = architectureName + image.Public = true + image.Size = size + image.CreationDate = creationDate + image.UploadDate = creationDate + image.Filename = filename + image.Fingerprint = fingerprint + image.Properties = map[string]string{ + "aliases": product.Aliases, + "os": product.OperatingSystem, + "release": product.Release, + "version": product.Version, + "architecture": product.Architecture, + "label": version.Label, + "serial": name, + "description": fmt.Sprintf("%s %s %s (%s) (%s)", product.OperatingSystem, product.ReleaseTitle, product.Architecture, version.Label, name), + } + + // Attempt to parse the EOL + if product.SupportedEOL != "" { + eolDate, err := time.Parse(eolLayout, product.SupportedEOL) + if err == nil { + image.ExpiryDate = eolDate + } + } + + downloads[fingerprint] = [][]string{[]string{metaPath, metaHash, "meta"}, []string{rootfsPath, rootfsHash, "root"}} + images = append(images, image) + } + } + + return images, downloads +} + +type SimpleStreamsManifestProduct struct { + Aliases string `json:"aliases"` + Architecture string `json:"arch"` + OperatingSystem string `json:"os"` + Release string `json:"release"` + ReleaseCodename string `json:"release_codename"` + ReleaseTitle string `json:"release_title"` + Supported bool `json:"supported"` + SupportedEOL string `json:"support_eol"` + Version string `json:"version"` + Versions map[string]SimpleStreamsManifestProductVersion `json:"versions"` +} + +type SimpleStreamsManifestProductVersion struct { + PublicName string `json:"pubname"` + Label string `json:"label"` + Items map[string]SimpleStreamsManifestProductVersionItem `json:"items"` +} + +type SimpleStreamsManifestProductVersionItem struct { + Path string `json:"path"` + FileType string `json:"ftype"` + HashMd5 string `json:"md5"` + HashSha256 string `json:"sha256"` + LXDHashSha256 string `json:"combined_sha256"` + Size int64 `json:"size"` +} + +type SimpleStreamsIndex struct { + Format string `json:"format"` + Index map[string]SimpleStreamsIndexStream `json:"index"` + Updated string `json:"updated"` +} + +type SimpleStreamsIndexStream struct { + Updated string `json:"updated"` + DataType string `json:"datatype"` + Path string `json:"path"` + Products []string `json:"products"` +} + +func SimpleStreamsClient(url string) (*SimpleStreams, error) { + // Setup a http client + tlsConfig, err := GetTLSConfig("", "", nil) + if err != nil { + return nil, err + } + + tr := &http.Transport{ + TLSClientConfig: tlsConfig, + Dial: RFC3493Dialer, + Proxy: http.ProxyFromEnvironment, + } + + myHttp := http.Client{ + Transport: tr, + } + + return &SimpleStreams{ + http: &myHttp, + url: url, + cachedManifest: map[string]*SimpleStreamsManifest{}}, nil +} + +type SimpleStreams struct { + http *http.Client + url string + + cachedIndex *SimpleStreamsIndex + cachedManifest map[string]*SimpleStreamsManifest + cachedImages []ImageInfo + cachedAliases map[string]*ImageAliasesEntry +} + +func (s *SimpleStreams) parseIndex() (*SimpleStreamsIndex, error) { + if s.cachedIndex != nil { + return s.cachedIndex, nil + } + + req, err := http.NewRequest("GET", fmt.Sprintf("%s/streams/v1/index.json", s.url), nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", UserAgent) + + r, err := s.http.Do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, err + } + + // Parse the idnex + ssIndex := SimpleStreamsIndex{} + err = json.Unmarshal(body, &ssIndex) + if err != nil { + return nil, err + } + + s.cachedIndex = &ssIndex + + return &ssIndex, nil +} + +func (s *SimpleStreams) parseManifest(path string) (*SimpleStreamsManifest, error) { + if s.cachedManifest[path] != nil { + return s.cachedManifest[path], nil + } + + req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", s.url, path), nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", UserAgent) + + r, err := s.http.Do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, err + } + + // Parse the idnex + ssManifest := SimpleStreamsManifest{} + err = json.Unmarshal(body, &ssManifest) + if err != nil { + return nil, err + } + + s.cachedManifest[path] = &ssManifest + + return &ssManifest, nil +} + +func (s *SimpleStreams) applyAliases(images []ImageInfo) ([]ImageInfo, map[string]*ImageAliasesEntry, error) { + aliases := map[string]*ImageAliasesEntry{} + + sort.Sort(ssSortImage(images)) + + defaultOS := "" + for k, v := range ssDefaultOS { + if strings.HasPrefix(s.url, k) { + defaultOS = v + break + } + } + + addAlias := func(name string, fingerprint string) *ImageAlias { + if defaultOS != "" { + name = strings.TrimPrefix(name, fmt.Sprintf("%s/", defaultOS)) + } + + if aliases[name] != nil { + return nil + } + + alias := ImageAliasesEntry{} + alias.Name = name + alias.Target = fingerprint + aliases[name] = &alias + + return &ImageAlias{Name: name} + } + + architectureName, _ := ArchitectureGetLocal() + + newImages := []ImageInfo{} + for _, image := range images { + if image.Properties["aliases"] != "" { + aliases := strings.Split(image.Properties["aliases"], ",") + for _, entry := range aliases { + // Short + if image.Architecture == architectureName { + alias := addAlias(fmt.Sprintf("%s", entry), image.Fingerprint) + if alias != nil { + image.Aliases = append(image.Aliases, *alias) + } + + alias = addAlias(fmt.Sprintf("%s/%s", entry, image.Properties["serial"]), image.Fingerprint) + if alias != nil { + image.Aliases = append(image.Aliases, *alias) + } + } + + // Medium + alias := addAlias(fmt.Sprintf("%s/%s", entry, image.Properties["architecture"]), image.Fingerprint) + if alias != nil { + image.Aliases = append(image.Aliases, *alias) + } + + // Long + alias = addAlias(fmt.Sprintf("%s/%s/%s", entry, image.Properties["architecture"], image.Properties["serial"]), image.Fingerprint) + if alias != nil { + image.Aliases = append(image.Aliases, *alias) + } + } + } else { + // FIXME: This is backward compatibility needed until cloud-images.ubuntu.com supports the aliases field + // Short + if image.Architecture == architectureName { + alias := addAlias(fmt.Sprintf("%s/%s", image.Properties["os"], image.Properties["release"]), image.Fingerprint) + if alias != nil { + image.Aliases = append(image.Aliases, *alias) + } + + alias = addAlias(fmt.Sprintf("%s/%s/%s", image.Properties["os"], image.Properties["release"], image.Properties["serial"]), image.Fingerprint) + if alias != nil { + image.Aliases = append(image.Aliases, *alias) + } + + alias = addAlias(fmt.Sprintf("%s/%c", image.Properties["os"], image.Properties["release"][0]), image.Fingerprint) + if alias != nil { + image.Aliases = append(image.Aliases, *alias) + } + + alias = addAlias(fmt.Sprintf("%s/%c/%s", image.Properties["os"], image.Properties["release"][0], image.Properties["serial"]), image.Fingerprint) + if alias != nil { + image.Aliases = append(image.Aliases, *alias) + } + + alias = addAlias(fmt.Sprintf("%s/%s", image.Properties["os"], image.Properties["version"]), image.Fingerprint) + if alias != nil { + image.Aliases = append(image.Aliases, *alias) + } + + alias = addAlias(fmt.Sprintf("%s/%s/%s", image.Properties["os"], image.Properties["version"], image.Properties["serial"]), image.Fingerprint) + if alias != nil { + image.Aliases = append(image.Aliases, *alias) + } + } + + // Medium + alias := addAlias(fmt.Sprintf("%s/%s/%s", image.Properties["os"], image.Properties["release"], image.Properties["architecture"]), image.Fingerprint) + if alias != nil { + image.Aliases = append(image.Aliases, *alias) + } + + alias = addAlias(fmt.Sprintf("%s/%c/%s", image.Properties["os"], image.Properties["release"][0], image.Properties["architecture"]), image.Fingerprint) + if alias != nil { + image.Aliases = append(image.Aliases, *alias) + } + + alias = addAlias(fmt.Sprintf("%s/%s/%s", image.Properties["os"], image.Properties["version"], image.Properties["architecture"]), image.Fingerprint) + if alias != nil { + image.Aliases = append(image.Aliases, *alias) + } + + // Long + alias = addAlias(fmt.Sprintf("%s/%s/%s/%s", image.Properties["os"], image.Properties["release"], image.Properties["architecture"], image.Properties["serial"]), image.Fingerprint) + if alias != nil { + image.Aliases = append(image.Aliases, *alias) + } + + alias = addAlias(fmt.Sprintf("%s/%c/%s/%s", image.Properties["os"], image.Properties["release"][0], image.Properties["architecture"], image.Properties["serial"]), image.Fingerprint) + if alias != nil { + image.Aliases = append(image.Aliases, *alias) + } + + alias = addAlias(fmt.Sprintf("%s/%s/%s/%s", image.Properties["os"], image.Properties["version"], image.Properties["architecture"], image.Properties["serial"]), image.Fingerprint) + if alias != nil { + image.Aliases = append(image.Aliases, *alias) + } + } + + newImages = append(newImages, image) + } + + return newImages, aliases, nil +} + +func (s *SimpleStreams) getImages() ([]ImageInfo, map[string]*ImageAliasesEntry, error) { + if s.cachedImages != nil && s.cachedAliases != nil { + return s.cachedImages, s.cachedAliases, nil + } + + images := []ImageInfo{} + + // Load the main index + ssIndex, err := s.parseIndex() + if err != nil { + return nil, nil, err + } + + // Iterate through the various image manifests + for _, entry := range ssIndex.Index { + // We only care about images + if entry.DataType != "image-downloads" { + continue + } + + // No point downloading an empty image list + if len(entry.Products) == 0 { + continue + } + + manifest, err := s.parseManifest(entry.Path) + if err != nil { + return nil, nil, err + } + + manifestImages, _ := manifest.ToLXD() + + for _, image := range manifestImages { + images = append(images, image) + } + } + + // Setup the aliases + images, aliases, err := s.applyAliases(images) + if err != nil { + return nil, nil, err + } + + s.cachedImages = images + s.cachedAliases = aliases + + return images, aliases, nil +} + +func (s *SimpleStreams) getPaths(fingerprint string) ([][]string, error) { + // Load the main index + ssIndex, err := s.parseIndex() + if err != nil { + return nil, err + } + + // Iterate through the various image manifests + for _, entry := range ssIndex.Index { + // We only care about images + if entry.DataType != "image-downloads" { + continue + } + + // No point downloading an empty image list + if len(entry.Products) == 0 { + continue + } + + manifest, err := s.parseManifest(entry.Path) + if err != nil { + return nil, err + } + + manifestImages, downloads := manifest.ToLXD() + + for _, image := range manifestImages { + if strings.HasPrefix(image.Fingerprint, fingerprint) { + urls := [][]string{} + for _, path := range downloads[image.Fingerprint] { + urls = append(urls, []string{path[0], path[1], path[2]}) + } + return urls, nil + } + } + } + + return nil, fmt.Errorf("Couldn't find the requested image") +} + +func (s *SimpleStreams) downloadFile(path string, hash string, target string, progress func(int)) error { + download := func(url string, hash string, target string) error { + out, err := os.Create(target) + if err != nil { + return err + } + defer out.Close() + + resp, err := s.http.Get(url) + if err != nil { + } + defer resp.Body.Close() + + body := &TransferProgress{Reader: resp.Body, Length: resp.ContentLength, Handler: progress} + + sha256 := sha256.New() + _, err = io.Copy(io.MultiWriter(out, sha256), body) + if err != nil { + return err + } + + if fmt.Sprintf("%x", sha256.Sum(nil)) != hash { + os.Remove(target) + return fmt.Errorf("Hash mismatch") + } + + return nil + } + + // Try http first + if strings.HasPrefix(s.url, "https://") { + err := download(fmt.Sprintf("http://%s/%s", strings.TrimPrefix(s.url, "https://"), path), hash, target) + if err == nil { + return nil + } + } + + err := download(fmt.Sprintf("%s/%s", s.url, path), hash, target) + if err != nil { + return err + } + + return nil +} + +func (s *SimpleStreams) ListAliases() (ImageAliases, error) { + _, aliasesMap, err := s.getImages() + if err != nil { + return nil, err + } + + aliases := ImageAliases{} + + for _, alias := range aliasesMap { + aliases = append(aliases, *alias) + } + + return aliases, nil +} + +func (s *SimpleStreams) ListImages() ([]ImageInfo, error) { + images, _, err := s.getImages() + return images, err +} + +func (s *SimpleStreams) GetAlias(name string) string { + _, aliasesMap, err := s.getImages() + if err != nil { + return "" + } + + alias, ok := aliasesMap[name] + if !ok { + return "" + } + + return alias.Target +} + +func (s *SimpleStreams) GetImageInfo(fingerprint string) (*ImageInfo, error) { + images, _, err := s.getImages() + if err != nil { + return nil, err + } + + for _, image := range images { + if strings.HasPrefix(image.Fingerprint, fingerprint) { + return &image, nil + } + } + + return nil, fmt.Errorf("The requested image couldn't be found.") +} + +func (s *SimpleStreams) ExportImage(image string, target string) (string, error) { + if !IsDir(target) { + return "", fmt.Errorf("Split images can only be written to a directory.") + } + + paths, err := s.getPaths(image) + if err != nil { + return "", err + } + + for _, path := range paths { + fields := strings.Split(path[0], "/") + targetFile := filepath.Join(target, fields[len(fields)-1]) + + err := s.downloadFile(path[0], path[1], targetFile, nil) + if err != nil { + return "", err + } + } + + return target, nil +} + +func (s *SimpleStreams) Download(image string, file string, target string, progress func(int)) error { + paths, err := s.getPaths(image) + if err != nil { + return err + } + + for _, path := range paths { + if file != path[2] { + continue + } + + return s.downloadFile(path[0], path[1], target, progress) + } + + return fmt.Errorf("The file couldn't be found.") +} === added file 'src/github.com/lxc/lxd/shared/status.go' --- src/github.com/lxc/lxd/shared/status.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/status.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,65 @@ +package shared + +type StatusCode int + +const ( + OperationCreated StatusCode = 100 + Started StatusCode = 101 + Stopped StatusCode = 102 + Running StatusCode = 103 + Cancelling StatusCode = 104 + Pending StatusCode = 105 + Starting StatusCode = 106 + Stopping StatusCode = 107 + Aborting StatusCode = 108 + Freezing StatusCode = 109 + Frozen StatusCode = 110 + Thawed StatusCode = 111 + + Success StatusCode = 200 + + Failure StatusCode = 400 + Cancelled StatusCode = 401 +) + +func (o StatusCode) String() string { + return map[StatusCode]string{ + OperationCreated: "Operation created", + Started: "Started", + Stopped: "Stopped", + Running: "Running", + Cancelling: "Cancelling", + Pending: "Pending", + Success: "Success", + Failure: "Failure", + Cancelled: "Cancelled", + Starting: "Starting", + Stopping: "Stopping", + Aborting: "Aborting", + Freezing: "Freezing", + Frozen: "Frozen", + Thawed: "Thawed", + }[o] +} + +func (o StatusCode) IsFinal() bool { + return int(o) >= 200 +} + +/* + * Create a StatusCode from an lxc.State code. N.B.: we accept an int instead + * of a lxc.State so that the shared code doesn't depend on lxc, which depends + * on liblxc, etc. + */ +func FromLXCState(state int) StatusCode { + return map[int]StatusCode{ + 1: Stopped, + 2: Starting, + 3: Running, + 4: Stopping, + 5: Aborting, + 6: Freezing, + 7: Frozen, + 8: Thawed, + }[state] +} === added file 'src/github.com/lxc/lxd/shared/stringset.go' --- src/github.com/lxc/lxd/shared/stringset.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/stringset.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,24 @@ +// That this code needs to exist is kind of dumb, but I'm not sure how else to +// do it. +package shared + +type StringSet map[string]bool + +func (ss StringSet) IsSubset(oss StringSet) bool { + for k, _ := range map[string]bool(ss) { + if _, ok := map[string]bool(oss)[k]; !ok { + return false + } + } + + return true +} + +func NewStringSet(strings []string) StringSet { + ret := map[string]bool{} + for _, s := range strings { + ret[s] = true + } + + return StringSet(ret) +} === added file 'src/github.com/lxc/lxd/shared/stringset_test.go' --- src/github.com/lxc/lxd/shared/stringset_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/stringset_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,24 @@ +package shared + +import ( + "testing" +) + +func TestStringSetSubset(t *testing.T) { + ss := NewStringSet([]string{"one", "two"}) + + if !ss.IsSubset(ss) { + t.Error("subests wrong") + return + } + + if !ss.IsSubset(NewStringSet([]string{"one", "two", "three"})) { + t.Error("subsets wrong") + return + } + + if ss.IsSubset(NewStringSet([]string{"four"})) { + t.Error("subests wrong") + return + } +} === added directory 'src/github.com/lxc/lxd/shared/termios' === added file 'src/github.com/lxc/lxd/shared/termios/termios.go' --- src/github.com/lxc/lxd/shared/termios/termios.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/termios/termios.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,79 @@ +// +build !windows + +package termios + +import ( + "syscall" + "unsafe" + + "github.com/lxc/lxd/shared" +) + +// #include +import "C" + +type State struct { + Termios syscall.Termios +} + +func IsTerminal(fd int) bool { + _, err := GetState(fd) + return err == nil +} + +func GetState(fd int) (*State, error) { + termios := syscall.Termios{} + + ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(&termios))) + if ret != 0 { + return nil, err.(syscall.Errno) + } + + state := State{} + state.Termios = termios + + return &state, nil +} + +func GetSize(fd int) (int, int, error) { + var dimensions [4]uint16 + + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 { + return -1, -1, err + } + + return int(dimensions[1]), int(dimensions[0]), nil +} + +func MakeRaw(fd int) (*State, error) { + var err error + var oldState, newState *State + + oldState, err = GetState(fd) + if err != nil { + return nil, err + } + + err = shared.DeepCopy(&oldState, &newState) + if err != nil { + return nil, err + } + + C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState.Termios))) + + err = Restore(fd, newState) + if err != nil { + return nil, err + } + + return oldState, nil +} + +func Restore(fd int, state *State) error { + ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(&state.Termios))) + if ret != 0 { + return err.(syscall.Errno) + } + + return nil +} === added file 'src/github.com/lxc/lxd/shared/termios/termios_windows.go' --- src/github.com/lxc/lxd/shared/termios/termios_windows.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/termios/termios_windows.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +// +build windows + +package termios + +import ( + "golang.org/x/crypto/ssh/terminal" +) + +type State terminal.State + +func IsTerminal(fd int) bool { + return terminal.IsTerminal(fd) +} + +func GetState(fd int) (*State, error) { + state, err := terminal.GetState(fd) + if err != nil { + return nil, err + } + + currentState := State(*state) + return ¤tState, nil +} + +func GetSize(fd int) (int, int, error) { + return terminal.GetSize(fd) +} + +func MakeRaw(fd int) (*State, error) { + state, err := terminal.MakeRaw(fd) + if err != nil { + return nil, err + } + + oldState := State(*state) + return &oldState, nil +} + +func Restore(fd int, state *State) error { + newState := terminal.State(*state) + + return terminal.Restore(fd, &newState) +} === added file 'src/github.com/lxc/lxd/shared/util.go' --- src/github.com/lxc/lxd/shared/util.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/util.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,693 @@ +package shared + +import ( + "bufio" + "bytes" + "crypto/rand" + "encoding/gob" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "regexp" + "strconv" + "strings" +) + +const SnapshotDelimiter = "/" +const DefaultPort = "8443" + +// AddSlash adds a slash to the end of paths if they don't already have one. +// This can be useful for rsyncing things, since rsync has behavior present on +// the presence or absence of a trailing slash. +func AddSlash(path string) string { + if path[len(path)-1] != '/' { + return path + "/" + } + + return path +} + +func PathExists(name string) bool { + _, err := os.Lstat(name) + if err != nil && os.IsNotExist(err) { + return false + } + return true +} + +// PathIsEmpty checks if the given path is empty. +func PathIsEmpty(path string) (bool, error) { + f, err := os.Open(path) + if err != nil { + return false, err + } + defer f.Close() + + // read in ONLY one file + _, err = f.Readdir(1) + + // and if the file is EOF... well, the dir is empty. + if err == io.EOF { + return true, nil + } + return false, err +} + +// IsDir returns true if the given path is a directory. +func IsDir(name string) bool { + stat, err := os.Lstat(name) + if err != nil { + return false + } + return stat.IsDir() +} + +// VarPath returns the provided path elements joined by a slash and +// appended to the end of $LXD_DIR, which defaults to /var/lib/lxd. +func VarPath(path ...string) string { + varDir := os.Getenv("LXD_DIR") + if varDir == "" { + varDir = "/var/lib/lxd" + } + + items := []string{varDir} + items = append(items, path...) + return filepath.Join(items...) +} + +// LogPath returns the directory that LXD should put logs under. If LXD_DIR is +// set, this path is $LXD_DIR/logs, otherwise it is /var/log/lxd. +func LogPath(path ...string) string { + varDir := os.Getenv("LXD_DIR") + logDir := "/var/log/lxd" + if varDir != "" { + logDir = filepath.Join(varDir, "logs") + } + items := []string{logDir} + items = append(items, path...) + return filepath.Join(items...) +} + +func ParseLXDFileHeaders(headers http.Header) (uid int, gid int, mode os.FileMode) { + uid, err := strconv.Atoi(headers.Get("X-LXD-uid")) + if err != nil { + uid = 0 + } + + gid, err = strconv.Atoi(headers.Get("X-LXD-gid")) + if err != nil { + gid = 0 + } + + /* Allow people to send stuff with a leading 0 for octal or a regular + * int that represents the perms when redered in octal. */ + rawMode, err := strconv.ParseInt(headers.Get("X-LXD-mode"), 0, 0) + if err != nil { + rawMode = 0644 + } + mode = os.FileMode(rawMode) + + return uid, gid, mode +} + +func ReadToJSON(r io.Reader, req interface{}) error { + buf, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + return json.Unmarshal(buf, req) +} + +func ReaderToChannel(r io.Reader) <-chan []byte { + ch := make(chan ([]byte)) + + go func() { + for { + /* io.Copy uses a 32KB buffer, so we might as well too. */ + buf := make([]byte, 32*1024) + nr, err := r.Read(buf) + if nr > 0 { + ch <- buf[0:nr] + } + + if err != nil { + close(ch) + break + } + } + }() + + return ch +} + +// Returns a random base64 encoded string from crypto/rand. +func RandomCryptoString() (string, error) { + buf := make([]byte, 32) + n, err := rand.Read(buf) + if err != nil { + return "", err + } + + if n != len(buf) { + return "", fmt.Errorf("not enough random bytes read") + } + + return hex.EncodeToString(buf), nil +} + +func SplitExt(fpath string) (string, string) { + b := path.Base(fpath) + ext := path.Ext(fpath) + return b[:len(b)-len(ext)], ext +} + +func AtoiEmptyDefault(s string, def int) (int, error) { + if s == "" { + return def, nil + } + + return strconv.Atoi(s) +} + +func ReadStdin() ([]byte, error) { + buf := bufio.NewReader(os.Stdin) + line, _, err := buf.ReadLine() + if err != nil { + return nil, err + } + return line, nil +} + +func WriteAll(w io.Writer, buf []byte) error { + return WriteAllBuf(w, bytes.NewBuffer(buf)) +} + +func WriteAllBuf(w io.Writer, buf *bytes.Buffer) error { + toWrite := int64(buf.Len()) + for { + n, err := io.Copy(w, buf) + if err != nil { + return err + } + + toWrite -= n + if toWrite <= 0 { + return nil + } + } +} + +// FileMove tries to move a file by using os.Rename, +// if that fails it tries to copy the file and remove the source. +func FileMove(oldPath string, newPath string) error { + if err := os.Rename(oldPath, newPath); err == nil { + return nil + } + + if err := FileCopy(oldPath, newPath); err != nil { + return err + } + + os.Remove(oldPath) + + return nil +} + +// FileCopy copies a file, overwriting the target if it exists. +func FileCopy(source string, dest string) error { + s, err := os.Open(source) + if err != nil { + return err + } + defer s.Close() + + d, err := os.Create(dest) + if err != nil { + if os.IsExist(err) { + d, err = os.OpenFile(dest, os.O_WRONLY, 0700) + if err != nil { + return err + } + } else { + return err + } + } + defer d.Close() + + _, err = io.Copy(d, s) + return err +} + +type BytesReadCloser struct { + Buf *bytes.Buffer +} + +func (r BytesReadCloser) Read(b []byte) (n int, err error) { + return r.Buf.Read(b) +} + +func (r BytesReadCloser) Close() error { + /* no-op since we're in memory */ + return nil +} + +func IsSnapshot(name string) bool { + return strings.Contains(name, SnapshotDelimiter) +} + +func ExtractSnapshotName(name string) string { + return strings.SplitN(name, SnapshotDelimiter, 2)[1] +} + +func ReadDir(p string) ([]string, error) { + ents, err := ioutil.ReadDir(p) + if err != nil { + return []string{}, err + } + + var ret []string + for _, ent := range ents { + ret = append(ret, ent.Name()) + } + return ret, nil +} + +func MkdirAllOwner(path string, perm os.FileMode, uid int, gid int) error { + // This function is a slightly modified version of MkdirAll from the Go standard library. + // https://golang.org/src/os/path.go?s=488:535#L9 + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return fmt.Errorf("path exists but isn't a directory") + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = MkdirAllOwner(path[0:j-1], perm, uid, gid) + if err != nil { + return err + } + } + + // Parent now exists; invoke Mkdir and use its result. + err = os.Mkdir(path, perm) + + err_chown := os.Chown(path, uid, gid) + if err_chown != nil { + return err_chown + } + + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +func StringInSlice(key string, list []string) bool { + for _, entry := range list { + if entry == key { + return true + } + } + return false +} + +func IntInSlice(key int, list []int) bool { + for _, entry := range list { + if entry == key { + return true + } + } + return false +} + +func IsOnSharedMount(pathName string) (bool, error) { + file, err := os.Open("/proc/self/mountinfo") + if err != nil { + return false, err + } + defer file.Close() + + absPath, err := filepath.Abs(pathName) + if err != nil { + return false, err + } + + expPath, err := os.Readlink(absPath) + if err != nil { + expPath = absPath + } + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + rows := strings.Fields(line) + + if rows[4] != expPath { + continue + } + + if strings.HasPrefix(rows[6], "shared:") { + return true, nil + } else { + return false, nil + } + } + + return false, nil +} + +func IsBlockdev(fm os.FileMode) bool { + return ((fm&os.ModeDevice != 0) && (fm&os.ModeCharDevice == 0)) +} + +func IsBlockdevPath(pathName string) bool { + sb, err := os.Stat(pathName) + if err != nil { + return false + } + + fm := sb.Mode() + return ((fm&os.ModeDevice != 0) && (fm&os.ModeCharDevice == 0)) +} + +func BlockFsDetect(dev string) (string, error) { + out, err := exec.Command("blkid", "-s", "TYPE", "-o", "value", dev).Output() + if err != nil { + return "", fmt.Errorf("Failed to run blkid on: %s", dev) + } + + return strings.TrimSpace(string(out)), nil +} + +// DeepCopy copies src to dest by using encoding/gob so its not that fast. +func DeepCopy(src, dest interface{}) error { + buff := new(bytes.Buffer) + enc := gob.NewEncoder(buff) + dec := gob.NewDecoder(buff) + if err := enc.Encode(src); err != nil { + return err + } + + if err := dec.Decode(dest); err != nil { + return err + } + + return nil +} + +func RunningInUserNS() bool { + file, err := os.Open("/proc/self/uid_map") + if err != nil { + return false + } + defer file.Close() + + buf := bufio.NewReader(file) + l, _, err := buf.ReadLine() + if err != nil { + return false + } + + line := string(l) + var a, b, c int64 + fmt.Sscanf(line, "%d %d %d", &a, &b, &c) + if a == 0 && b == 0 && c == 4294967295 { + return false + } + return true +} + +func ValidHostname(name string) bool { + // Validate length + if len(name) < 1 || len(name) > 63 { + return false + } + + // Validate first character + if strings.HasPrefix(name, "-") { + return false + } + + if _, err := strconv.Atoi(string(name[0])); err == nil { + return false + } + + // Validate last character + if strings.HasSuffix(name, "-") { + return false + } + + // Validate the character set + match, _ := regexp.MatchString("^[-a-zA-Z0-9]*$", name) + if !match { + return false + } + + return true +} + +func TextEditor(inPath string, inContent []byte) ([]byte, error) { + var f *os.File + var err error + var path string + + // Detect the text editor to use + editor := os.Getenv("VISUAL") + if editor == "" { + editor = os.Getenv("EDITOR") + if editor == "" { + for _, p := range []string{"editor", "vi", "emacs", "nano"} { + _, err := exec.LookPath(p) + if err == nil { + editor = p + break + } + } + if editor == "" { + return []byte{}, fmt.Errorf("No text editor found, please set the EDITOR environment variable.") + } + } + } + + if inPath == "" { + // If provided input, create a new file + f, err = ioutil.TempFile("", "lxd_editor_") + if err != nil { + return []byte{}, err + } + + if err = f.Chmod(0600); err != nil { + f.Close() + os.Remove(f.Name()) + return []byte{}, err + } + + f.Write(inContent) + f.Close() + + path = f.Name() + defer os.Remove(path) + } else { + path = inPath + } + + cmdParts := strings.Fields(editor) + cmd := exec.Command(cmdParts[0], append(cmdParts[1:], path)...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + return []byte{}, err + } + + content, err := ioutil.ReadFile(path) + if err != nil { + return []byte{}, err + } + + return content, nil +} + +func ParseMetadata(metadata interface{}) (map[string]interface{}, error) { + newMetadata := make(map[string]interface{}) + s := reflect.ValueOf(metadata) + if !s.IsValid() { + return nil, nil + } + + if s.Kind() == reflect.Map { + for _, k := range s.MapKeys() { + if k.Kind() != reflect.String { + return nil, fmt.Errorf("Invalid metadata provided (key isn't a string).") + } + newMetadata[k.String()] = s.MapIndex(k).Interface() + } + } else if s.Kind() == reflect.Ptr && !s.Elem().IsValid() { + return nil, nil + } else { + return nil, fmt.Errorf("Invalid metadata provided (type isn't a map).") + } + + return newMetadata, nil +} + +// Parse a size string in bytes (e.g. 200kB or 5GB) into the number of bytes it +// represents. Supports suffixes up to EB. "" == 0. +func ParseByteSizeString(input string) (int64, error) { + if input == "" { + return 0, nil + } + + if len(input) < 3 { + return -1, fmt.Errorf("Invalid value: %s", input) + } + + // Extract the suffix + suffix := input[len(input)-2:] + + // Extract the value + value := input[0 : len(input)-2] + valueInt, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return -1, fmt.Errorf("Invalid integer: %s", input) + } + + if valueInt < 0 { + return -1, fmt.Errorf("Invalid value: %d", valueInt) + } + + // Figure out the multiplicator + multiplicator := int64(0) + switch suffix { + case "kB": + multiplicator = 1024 + case "MB": + multiplicator = 1024 * 1024 + case "GB": + multiplicator = 1024 * 1024 * 1024 + case "TB": + multiplicator = 1024 * 1024 * 1024 * 1024 + case "PB": + multiplicator = 1024 * 1024 * 1024 * 1024 * 1024 + case "EB": + multiplicator = 1024 * 1024 * 1024 * 1024 * 1024 * 1024 + default: + return -1, fmt.Errorf("Unsupported suffix: %s", suffix) + } + + return valueInt * multiplicator, nil +} + +// Parse a size string in bits (e.g. 200kbit or 5Gbit) into the number of bits +// it represents. Supports suffixes up to Ebit. "" == 0. +func ParseBitSizeString(input string) (int64, error) { + if input == "" { + return 0, nil + } + + if len(input) < 5 { + return -1, fmt.Errorf("Invalid value: %s", input) + } + + // Extract the suffix + suffix := input[len(input)-4:] + + // Extract the value + value := input[0 : len(input)-4] + valueInt, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return -1, fmt.Errorf("Invalid integer: %s", input) + } + + if valueInt < 0 { + return -1, fmt.Errorf("Invalid value: %d", valueInt) + } + + // Figure out the multiplicator + multiplicator := int64(0) + switch suffix { + case "kbit": + multiplicator = 1000 + case "Mbit": + multiplicator = 1000 * 1000 + case "Gbit": + multiplicator = 1000 * 1000 * 1000 + case "Tbit": + multiplicator = 1000 * 1000 * 1000 * 1000 + case "Pbit": + multiplicator = 1000 * 1000 * 1000 * 1000 * 1000 + case "Ebit": + multiplicator = 1000 * 1000 * 1000 * 1000 * 1000 * 1000 + default: + return -1, fmt.Errorf("Unsupported suffix: %s", suffix) + } + + return valueInt * multiplicator, nil +} + +type TransferProgress struct { + io.Reader + percentage float64 + total int64 + + Length int64 + Handler func(int) +} + +func (pt *TransferProgress) Read(p []byte) (int, error) { + n, err := pt.Reader.Read(p) + + if pt.Handler == nil { + return n, err + } + + if n > 0 { + pt.total += int64(n) + percentage := float64(pt.total) / float64(pt.Length) * float64(100) + + if percentage-pt.percentage > 0.9 { + pt.percentage = percentage + + progressInt := 1 - (int(percentage) % 1) + int(percentage) + if progressInt > 100 { + progressInt = 100 + } + + pt.Handler(progressInt) + } + } + + return n, err +} === added file 'src/github.com/lxc/lxd/shared/util_linux.go' --- src/github.com/lxc/lxd/shared/util_linux.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/util_linux.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,386 @@ +// +build linux +// +build cgo + +package shared + +import ( + "errors" + "fmt" + "os" + "os/exec" + "syscall" + "unsafe" +) + +// #cgo LDFLAGS: -lutil -lpthread +/* +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef AT_SYMLINK_FOLLOW +#define AT_SYMLINK_FOLLOW 0x400 +#endif + +#ifndef AT_EMPTY_PATH +#define AT_EMPTY_PATH 0x1000 +#endif + +// This is an adaption from https://codereview.appspot.com/4589049, to be +// included in the stdlib with the stdlib's license. + +static int mygetgrgid_r(int gid, struct group *grp, + char *buf, size_t buflen, struct group **result) { + return getgrgid_r(gid, grp, buf, buflen, result); +} + +void configure_pty(int fd) { + struct termios term_settings; + struct winsize win; + + if (tcgetattr(fd, &term_settings) < 0) { + fprintf(stderr, "Failed to get settings: %s\n", strerror(errno)); + return; + } + + term_settings.c_iflag |= IMAXBEL; + term_settings.c_iflag |= IUTF8; + term_settings.c_iflag |= BRKINT; + term_settings.c_iflag |= IXANY; + + term_settings.c_cflag |= HUPCL; + + if (tcsetattr(fd, TCSANOW, &term_settings) < 0) { + fprintf(stderr, "Failed to set settings: %s\n", strerror(errno)); + return; + } + + if (ioctl(fd, TIOCGWINSZ, &win) < 0) { + fprintf(stderr, "Failed to get the terminal size: %s\n", strerror(errno)); + return; + } + + win.ws_col = 80; + win.ws_row = 25; + + if (ioctl(fd, TIOCSWINSZ, &win) < 0) { + fprintf(stderr, "Failed to set the terminal size: %s\n", strerror(errno)); + return; + } + + if (fcntl(fd, F_SETFD, FD_CLOEXEC) < 0) { + fprintf(stderr, "Failed to set FD_CLOEXEC: %s\n", strerror(errno)); + return; + } + + return; +} + +void create_pty(int *master, int *slave, int uid, int gid) { + if (openpty(master, slave, NULL, NULL, NULL) < 0) { + fprintf(stderr, "Failed to openpty: %s\n", strerror(errno)); + return; + } + + configure_pty(*master); + configure_pty(*slave); + + if (fchown(*slave, uid, gid) < 0) { + fprintf(stderr, "Warning: error chowning pty to container root\n"); + fprintf(stderr, "Continuing...\n"); + } + if (fchown(*master, uid, gid) < 0) { + fprintf(stderr, "Warning: error chowning pty to container root\n"); + fprintf(stderr, "Continuing...\n"); + } +} + +void create_pipe(int *master, int *slave) { + int pipefd[2]; + + if (pipe2(pipefd, O_CLOEXEC) < 0) { + fprintf(stderr, "Failed to create a pipe: %s\n", strerror(errno)); + return; + } + + *master = pipefd[0]; + *slave = pipefd[1]; +} + +int shiftowner(char *basepath, char *path, int uid, int gid) { + struct stat sb; + int fd, r; + char fdpath[PATH_MAX]; + char realpath[PATH_MAX]; + + fd = open(path, O_PATH|O_NOFOLLOW); + if (fd < 0 ) { + perror("Failed open"); + return 1; + } + + r = sprintf(fdpath, "/proc/self/fd/%d", fd); + if (r < 0) { + perror("Failed sprintf"); + close(fd); + return 1; + } + + r = readlink(fdpath, realpath, PATH_MAX); + if (r < 0) { + perror("Failed readlink"); + close(fd); + return 1; + } + + if (strlen(realpath) < strlen(basepath)) { + printf("Invalid path, source (%s) is outside of basepath (%s).\n", realpath, basepath); + close(fd); + return 1; + } + + if (strncmp(realpath, basepath, strlen(basepath))) { + printf("Invalid path, source (%s) is outside of basepath (%s).\n", realpath, basepath); + close(fd); + return 1; + } + + r = fstat(fd, &sb); + if (r < 0) { + perror("Failed fstat"); + close(fd); + return 1; + } + + r = fchownat(fd, "", uid, gid, AT_EMPTY_PATH|AT_SYMLINK_NOFOLLOW); + if (r < 0) { + perror("Failed chown"); + close(fd); + return 1; + } + + if (!S_ISLNK(sb.st_mode)) { + r = chmod(fdpath, sb.st_mode); + if (r < 0) { + perror("Failed chmod"); + close(fd); + return 1; + } + } + + close(fd); + return 0; +} +*/ +import "C" + +func ShiftOwner(basepath string, path string, uid int, gid int) error { + cbasepath := C.CString(basepath) + defer C.free(unsafe.Pointer(cbasepath)) + + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + + r := C.shiftowner(cbasepath, cpath, C.int(uid), C.int(gid)) + if r != 0 { + return fmt.Errorf("Failed to change ownership of: %s", path) + } + return nil +} + +func OpenPty(uid, gid int) (master *os.File, slave *os.File, err error) { + fd_master := C.int(-1) + fd_slave := C.int(-1) + rootUid := C.int(uid) + rootGid := C.int(gid) + + C.create_pty(&fd_master, &fd_slave, rootUid, rootGid) + + if fd_master == -1 || fd_slave == -1 { + return nil, nil, errors.New("Failed to create a new pts pair") + } + + master = os.NewFile(uintptr(fd_master), "master") + slave = os.NewFile(uintptr(fd_slave), "slave") + + return master, slave, nil +} + +func Pipe() (master *os.File, slave *os.File, err error) { + fd_master := C.int(-1) + fd_slave := C.int(-1) + + C.create_pipe(&fd_master, &fd_slave) + + if fd_master == -1 || fd_slave == -1 { + return nil, nil, errors.New("Failed to create a new pipe") + } + + master = os.NewFile(uintptr(fd_master), "master") + slave = os.NewFile(uintptr(fd_slave), "slave") + + return master, slave, nil +} + +// GroupName is an adaption from https://codereview.appspot.com/4589049. +func GroupName(gid int) (string, error) { + var grp C.struct_group + var result *C.struct_group + + bufSize := C.size_t(C.sysconf(C._SC_GETGR_R_SIZE_MAX)) + buf := C.malloc(bufSize) + if buf == nil { + return "", fmt.Errorf("allocation failed") + } + defer C.free(buf) + + // mygetgrgid_r is a wrapper around getgrgid_r to + // to avoid using gid_t because C.gid_t(gid) for + // unknown reasons doesn't work on linux. + rv := C.mygetgrgid_r(C.int(gid), + &grp, + (*C.char)(buf), + bufSize, + &result) + + if rv != 0 { + return "", fmt.Errorf("failed group lookup: %s", syscall.Errno(rv)) + } + + if result == nil { + return "", fmt.Errorf("unknown group %d", gid) + } + + return C.GoString(result.gr_name), nil +} + +// GroupId is an adaption from https://codereview.appspot.com/4589049. +func GroupId(name string) (int, error) { + var grp C.struct_group + var result *C.struct_group + + bufSize := C.size_t(C.sysconf(C._SC_GETGR_R_SIZE_MAX)) + buf := C.malloc(bufSize) + if buf == nil { + return -1, fmt.Errorf("allocation failed") + } + defer C.free(buf) + + // mygetgrgid_r is a wrapper around getgrgid_r to + // to avoid using gid_t because C.gid_t(gid) for + // unknown reasons doesn't work on linux. + cname := C.CString(name) + defer C.free(unsafe.Pointer(cname)) + + rv := C.getgrnam_r(cname, + &grp, + (*C.char)(buf), + bufSize, + &result) + + if rv != 0 { + return -1, fmt.Errorf("failed group lookup: %s", syscall.Errno(rv)) + } + + if result == nil { + return -1, fmt.Errorf("unknown group %s", name) + } + + return int(C.int(result.gr_gid)), nil +} + +// --- pure Go functions --- + +func GetFileStat(p string) (uid int, gid int, major int, minor int, + inode uint64, nlink int, err error) { + var stat syscall.Stat_t + err = syscall.Lstat(p, &stat) + if err != nil { + return + } + uid = int(stat.Uid) + gid = int(stat.Gid) + inode = uint64(stat.Ino) + nlink = int(stat.Nlink) + major = -1 + minor = -1 + if stat.Mode&syscall.S_IFBLK != 0 || stat.Mode&syscall.S_IFCHR != 0 { + major = int(stat.Rdev / 256) + minor = int(stat.Rdev % 256) + } + + return +} + +func IsMountPoint(name string) bool { + _, err := exec.LookPath("mountpoint") + if err == nil { + err = exec.Command("mountpoint", "-q", name).Run() + if err != nil { + return false + } + + return true + } + + stat, err := os.Stat(name) + if err != nil { + return false + } + + rootStat, err := os.Lstat(name + "/..") + if err != nil { + return false + } + // If the directory has the same device as parent, then it's not a mountpoint. + return stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev +} + +func ReadLastNLines(f *os.File, lines int) (string, error) { + if lines <= 0 { + return "", fmt.Errorf("invalid line count") + } + + stat, err := f.Stat() + if err != nil { + return "", err + } + + data, err := syscall.Mmap(int(f.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED) + if err != nil { + return "", err + } + defer syscall.Munmap(data) + + for i := len(data) - 1; i >= 0; i-- { + if data[i] == '\n' { + lines-- + } + + if lines < 0 { + return string(data[i+1 : len(data)]), nil + } + } + + return string(data), nil +} + +func SetSize(fd int, width int, height int) (err error) { + var dimensions [4]uint16 + dimensions[0] = uint16(height) + dimensions[1] = uint16(width) + + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 { + return err + } + return nil +} === added file 'src/github.com/lxc/lxd/shared/util_test.go' --- src/github.com/lxc/lxd/shared/util_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/shared/util_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,102 @@ +package shared + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + "testing" +) + +func TestFileCopy(t *testing.T) { + helloWorld := []byte("hello world\n") + source, err := ioutil.TempFile("", "") + if err != nil { + t.Error(err) + return + } + defer os.Remove(source.Name()) + + if err := WriteAll(source, helloWorld); err != nil { + source.Close() + t.Error(err) + return + } + source.Close() + + dest, err := ioutil.TempFile("", "") + defer os.Remove(dest.Name()) + if err != nil { + t.Error(err) + return + } + dest.Close() + + if err := FileCopy(source.Name(), dest.Name()); err != nil { + t.Error(err) + return + } + + dest2, err := os.Open(dest.Name()) + if err != nil { + t.Error(err) + return + } + + content, err := ioutil.ReadAll(dest2) + if err != nil { + t.Error(err) + return + } + + if string(content) != string(helloWorld) { + t.Error("content mismatch: ", string(content), "!=", string(helloWorld)) + return + } +} + +func TestReadLastNLines(t *testing.T) { + source, err := ioutil.TempFile("", "") + if err != nil { + t.Error(err) + return + } + defer os.Remove(source.Name()) + + for i := 0; i < 50; i++ { + fmt.Fprintf(source, "%d\n", i) + } + + lines, err := ReadLastNLines(source, 100) + if err != nil { + t.Error(err) + return + } + + split := strings.Split(lines, "\n") + for i := 0; i < 50; i++ { + if fmt.Sprintf("%d", i) != split[i] { + t.Error(fmt.Sprintf("got %s expected %d", split[i], i)) + return + } + } + + source.Seek(0, 0) + for i := 0; i < 150; i++ { + fmt.Fprintf(source, "%d\n", i) + } + + lines, err = ReadLastNLines(source, 100) + if err != nil { + t.Error(err) + return + } + + split = strings.Split(lines, "\n") + for i := 0; i < 100; i++ { + if fmt.Sprintf("%d", i+50) != split[i] { + t.Error(fmt.Sprintf("got %s expected %d", split[i], i)) + return + } + } +} === added directory 'src/github.com/lxc/lxd/specs' === added file 'src/github.com/lxc/lxd/specs/architectures.md' --- src/github.com/lxc/lxd/specs/architectures.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/specs/architectures.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,35 @@ +# Introduction +LXD just like LXC can run on just about any architecture that's +supported by the Linux kernel and by Go. + +Some objects in LXD are tied to an architecture, like the container, +container snapshots and images. + +This document lists all the supported architectures, their unique +identifier (used in the database), how they should be named and some +notes. + + +Please note that what LXD cares about is the kernel architecture, not +the particular userspace flavor as determined by the toolchain. + +That means that LXD considers armv7 hard-float to be the same as armv7 +soft-float and refers to both as "armv7". If useful to the user, the +exact userspace ABI may be set as an image and container property, +allowing easy query. + +# Architectures + +ID | Name | Notes | Personalities +:--- | :--- | :---- | :------------ +1 | i686 | 32bit Intel x86 | +2 | x86\_64 | 64bit Intel x86 | x86 +3 | armv7l | 32bit ARMv7 little-endian | +4 | aarch64 | 64bit ARMv8 little-endian | armv7 (optional) +5 | ppc | 32bit PowerPC big-endian | +6 | ppc64 | 64bit PowerPC big-endian | powerpc +7 | ppc64le | 64bit PowerPC little-endian | +8 | s390x | 64bit ESA/390 big-endian | + +The architecture names above are typically aligned with the Linux kernel +architecture names. === added file 'src/github.com/lxc/lxd/specs/command-line-user-experience.md' --- src/github.com/lxc/lxd/specs/command-line-user-experience.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/specs/command-line-user-experience.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,680 @@ +# Introduction + +The "lxc" command is the main tool used by users to interact with LXD when +running it outside of OpenStack. The command is available to all users and can +be used to manage any local or remote resources provided they have the +credentials to do so. + +# Remote operations + +The lxc command line tool is designed to manage LXD hosts as well as connect +to a variety of remote resources. + +The list of remote servers and their credentials is only stored in the client, +the servers don't know about each other nor do they have to. The client is the +one initiating any cross-server communication by instructing the two servers to +start talking to each other. Should that fail, for example because of a +firewall between the two servers, the client will then act as a relay +forwarding the data stream between the two servers. + +* * * + +# Resources +The lxc command interacts with resources. Currently supported resources are: + * containers + * container snapshots + * images + * container hosts + +lxc defaults to interacting with the local LXD daemon, remote operations +must be prefixed with the remote server's name followed by a colon. + +Some examples with the "info" command: + +Command | Result +:------ | :----- +lxc info | Show some information on the local LXD server +lxc info dakara: | Same but against the remote "dakara" server +lxc info c1 | Show information about the "c1" container +lxc image info ubuntu/trusty/amd64 | Show information about the "ubuntu/trusty/amd64" image (alias) +lxc info dakara:c2/yesterday | Show information about the "yesterday" snapshot of container "c2" on remote host "dakara" + + +This URI scheme is designed to be very specific (no ambiguity) and as short as +possible. + +* * * + +# Commands +## Overview + +Command | Description +:------ | :---------- +config | Change container settings (quotas, notes, OS metadata, ...) +copy | Copy an existing container or container snapshot as a new container +delete | Delete a resource (container, snapshot, image, ...) +exec | Spawn a command within the container +file | Transfer files in and out of the container +image | Image handling +info | Show information about a container, container snapshot or remote server +init | Create a container without starting it +launch | Create and start a new container from an image +list | List all the containers +move | Move a container either to rename it or to migrate it +profile | Manage container configuration profiles. +publish | Make an image out of an existing container or container snapshot +remote | Remote server handling +restart | Restart a container +restore | Restore a snapshot of a container +snapshot | Make a snapshot (stateful or not) of a container +start | Start a container +stop | Stop a container + +* * * + +## config + +**Arguments** + + edit [resource] + get [resource] + set [resource] + show [resource] + unset [resource] + device add [key=value]... + device remove + device list + device show + trust add [remote] + trust remove [remote] + trust list [remote] + +**Description** + +Probably one of the most complex commands, it allows querying and +setting all the configuration options available for containers and LXD hosts. + +The trust sub-command is there to manage the server's trust store. It +can list the certificates which the server currently trusts, delete +entries (based on their fingerprint) and add new entries using a +provided certificate. + +The edit commands are there to offer a more convenient user interface by +opening a text editor in which the current configuration is displayed +alongside a set of useful examples. The user can then edit things in +place and when saved, all changes will be committed. + +**Examples** + +Command | Result +:------ | :----- +lxc config show | Show the local server's configuration +lxc config show dakara: | Show "dakara"'s server' configuration +lxc config set core.trust\_password new-trust-password | Set the local server's trust password to "new-trust-password" +lxc config set c1 limits.memory 2GB | Set a memory limit of 2GB for container "c1" +lxc config show c1 | Show the configuration of the "c1" container, starting by the list of profiles it’s based on, then the container specific settings and finally the resulting overall configuration. +lxc config trust add new-client-cert.crt | Add new-client-cert.pem to the default remote's trust store (typically local LXD) +lxc config trust add dakara: new-client-cert.crt | Add new-client-cert.pem to the "dakara"'s trust store +lxc config trust list | List all the trusted certificates on the default remote +lxc config trust list dakara: | List all the trusted certificates on "dakara" +lxc config trust remove [name|\] | Remove a certificate from the default remote +lxc config trust remove dakara: \ | Remove a certificate from "dakara"'s trust store + +* * * + +## copy + +**Arguments** + + [container name] + +**Description** + +Creates a copy of an existing container or container snapshot as a new +container. If the new container's name isn't specified, a random one +will be generated. + +**Examples** + +Command | Result +:------ | :----- +lxc copy c1 c2 | Create a container called "c2" which is a copy of container "c1" with its hostname changed and a fresh MAC address +lxc copy c1 dakara: | Copy container "c1" to remote host "dakara" still keeping the name "c1" on the target +lxc copy c1 dakara:c2 | Same as above but also rename the container and change its hostname + + +* * * + +## delete + +**Arguments** + + + +**Description** +Destroy a container or container snapshot and any attached data +(configuration, snapshots, ...). + +This will destroy the resource (container) even if it is currently in use. + +**Examples** + +Command | Result +:------ | :----- +lxc delete c1 | Remove the c1 container, its configuration and any snapshot it may have +lxc delete c1/yesterday | Remove the "yesterday" snapshot of "c1" +lxc delete dakara:c2/yesterday | Remove the "yesterday" snapshot for "c2" on remote host "dakara" + +* * * + +## exec + +**Arguments** + + command... + +**Description** + +Execute a command inside the remote container. + +**Examples** + +Command | Result +:------ | :----- +lxc exec c1 -- /bin/bash | Spawn /bin/bash in local container c1 +tar cf - /opt/myapp \| lxc exec dakara:c2 -- tar xvf - | Make a tarball of /opt/myapp with the stream going out to stdout, then have that piped into lxc exec connecting to a receiving tar command in container running on remote host "dakara" + +* * * + +## file + +**Arguments** + + file push [--uid=UID] [--gid=GID] [--mode=MODE] [...] + file pull [...] + +**Description** +Copies file to or from the container. Supports rewriting the uid/gid/mode. + +**Examples** + +Command | Result +:------ | :----- +lxc file push --uid=0 --gid=0 test.sh dakara:c2/root/ | Push test.sh as /root/test.sh inside container "c2" on host "dakara", rewrite the uid/gid to 0/0 +lxc file pull dakara:c2/etc/hosts /tmp/ | Grab /etc/hosts from container "c2" on "dakara" and write it as /tmp/hosts on the client + +* * * + +## image + +**Arguments** + + image alias create + image alias list [:] + image alias delete + image copy [:] : [--alias=ALIAS].. [--copy-aliases] [--public] + image delete + image edit + image export [target] + image import [rootfs tarball] [target] [--public] [--created-at=ISO-8601] [--expires-at=ISO-8601] [--fingerprint=FINGERPRINT] [--alias=ALIAS].. [prop=value] + image info + image list [filter] + image move + image set + image show + image unset + +**Description** +Manage the LXD image store. + +Images can either be fed from an external tool using the API or manually +imported into LXD using the import command. Attributes can then be set +on them and images can be copied/moved to other LXD hosts. + +Images may also be copied or moved between hosts. + +The unique identifier of an image is its sha256, as a result, it's only +possible to have one copy of any given image on a given LXD host. + + +The "description" property is special in that if it's set, it'll appear in "lxc image list". + +Aliases are mappings between a user friendly name and an image. +Aliases may contain any character except for colons. + +Images are typically referenced by their full or partial fingerprint, in most +cases aliases may also be used and for listings, property filters can +also be used. + + +**Examples** + +Command | Result +:------ | :----- +lxc image import centos-7-x86\_64.tar.gz --created-at=2014-12-10 --expires-at=2015-01-10 os=fedora release=7 arch=amd64 | Import a centos LXD image in the local LXD image store +lxc image import debian-jessie\_amd64.tar.gz dakara: | Import a debian LXD image in the lxc image store of remote host "dakara" +lxc image import debian-jessie\_amd64.meta.tar.gz debian-jessie\_amd64.tar.g dakara: | Import a debian LXD image in split format in the lxc image store of remote host "dakara" +lxc image alias create centos/7 \ | Create an alias for centos/7 pointing to our centos 7 image + +**Example output (lxc image list)** + + ALIAS FINGERPRINT PUBLIC DESCRIPTION UPLOAD DATE + ------------------------------------------------------------------------------------------- + busybox-amd64 146246146827... yes - Mar 12, 2015 at 10:41pm (CDT) + ubuntu/devel (3 more) 95830b5e4e04... yes Ubuntu 15.04 (devel) x86 64bit Mar 8, 2015 at 1:27am (CST) + - a1420943168a... no Test image Mar 4, 2015 at 3:41pm (CST) + +**Example output (lxc image info)** + + Fingerprint: 146246146827e213eff5c9b5243c8c28cf461184a507588d6c7abac192e600dd + Filename: ubuntu-vivid-amd64-default-20150308.tar.xz + Size: 65MB + Architecture: x86_64 + Public: yes + Timestamps: + Created: 2015/03/08 10:50 UTC + Uploaded: 2015/03/09 16:00 UTC + Expires: never + Properties: + arch: x86_64 + build: 20150308 + description: Ubuntu 15.04 (devel) x86 64bit + os: Ubuntu + release: vivid, 15.04 + variant: default + Aliases: + - ubuntu/devel + - ubuntu/vivid + - ubuntu/vivid/amd64 + +* * * + +## info + +**Arguments** + + [resource] + +**Description** + +Prints information about a container, snapshot or LXD host. + +**Examples** + +Command | Result +:------ | :----- +lxc info | Displays local host status +lxc info dakara: | Displays the host status of remote host "dakara" +lxc info c1 | Displays the status of local container "c1" +lxc info dakara:c2 | Displays the status of remote container "c2" on "dakara" +lxc info dakara:c2/yesterday | Displays the status of snapshot "yesterday" for remote container "c2" on "dakara" + +* * * + +## init + +**Arguments** + + [container name] [--ephemeral|-e] [--profile|-p ...] + +**Description** + +init is used to create a new container from an image, but not start it. + +If the container name isn't specified, a random one will be used. + +Passing --ephemeral will make LXD create a temporary container which +will be destroyed when shutdown. + +--profile is used to apply a configuration profile (or multiple ones if passed +multiple times) to the newly created container, when passed with an existing +container, it will only append the configuration profile for that run. + +**Examples** + +Command | Result +:------ | :----- +lxc init ubuntu/trusty/amd64 | Create a new local container based on the Ubuntu 14.04 amd64 image and with a random name +lxc init ubuntu/precise/i386 dakara: | Create a new remote container on "dakara" based on the local Ubuntu 14.04 i386 image and with a random name +lxc init ubuntu c1 -p micro | Create a new local container called "c1" based on the Ubuntu image and run it with a "micro" profile + +* * * + +## launch + +**Arguments** + + [container name] [--ephemeral|-e] [--profile|-p ...] + +**Description** + +launch is used to create and start a new container from an image. + +If the container name isn't specified, a random one will be used. + +Passing --ephemeral will make LXD create a temporary container which +will be destroyed when shutdown. + +--profile is used to apply a configuration profile (or multiple ones if passed +multiple times) to the newly created container, when passed with an existing +container, it will only append the configuration profile for that run. + +**Examples** + +Command | Result +:------ | :----- +lxc launch ubuntu/trusty/amd64 | Create a new local container using a random name and based on the Ubuntu 14.04 amd64 image +lxc launch ubuntu/precise/i386 dakara: | Create a new remote container on "dakara" using a random name and based on the local Ubuntu 14.04 i386 image +lxc launch ubuntu c1 -p with-nesting | Create a new local container called "c1" based on the Ubuntu image and run it with a profile allowing container nesting + +## list +**Arguments** + + [resource] [filters] [format] + +**Description** + +Lists all the available containers. If a container is specified, then +it'll list all the available snapshots for the container. + +Each comes with some minimal status information (status, addresses, ...) +configurable if needed by passing a list of fields to display. + +For containers, a reasonable default would be to show the name, state, ipv4 +addresses, ipv6 addresses, memory and disk consumption. +Snapshots would be displayed below their parent containers and would re-use the +name, state and disk consumption field, the others wouldn’t be relevant and +will be displayed as "-". + +The filters are: + * A single keyword like "web" which will list any container with "web" in its name. + * A key/value pair referring to a configuration item. For those, the namespace can be abreviated to the smallest unambiguous identifier: + * "user.blah=abc" will list all containers with the "blah" user property set to "abc" + * "u.blah=abc" will do the same + * "security.privileged=1" will list all privileged containers + * "s.privileged=1" will do the same + +Multiple filters may be passed, a container will then have to match them all to be listed. + +**Examples** + +Command | Result +:------ | :----- +lxc list | Show the list of local containers, snapshots and images +lxc list dakara: | Show the list of remote containers, snapshots and images on "dakara" +lxc list c1 | Show the entry for the local container "c1" as well as any snapshot it may have + +**Example output** + + NAME STATE IPV4 IPV6 MEMORY DISK + ------------------------------------------------------------------------------------------- + precise STOPPED - - - UNKNOWN + precise-gui RUNNING 10.0.3.59 2607:f2c0:f00f:2761:216:3eff:fe51:234f 4435.89MB UNKNOWN + vivid STOPPED - - - UNKNOWN + +* * * + +## move + +**Arguments** + + + +**Description** + +Moves a resource either locally (rename) or remotely (migration). If the +container is running, this will do a live migration, otherwise it will simply +move the on-disk container data. + +**Examples** + +Command | Result +:------ | :----- +lxc move c1 c2 | Rename container c1 to c2 +lxc move c1 dakara: | Move c1 to "dakara". If the container is stopped, this simply moves the container and its configuration to "dakara". If it is running, this live migrates container c1 to "dakara". This will first stream the filesystem content over to "dakara", then dump the container state to disk, sync the state and the delta of the filesystem, restore the container on the remote host and then wipe it from the source host +lxc move c1 dakara:c2 | Move c1 to "dakara" as "c2" + +* * * + +## profile + +**Arguments** + + device add [key=value]... + device remove + device list + apply [,, ...] + create + copy + delete + edit + list [remote] [filters] + get + move + set + show + unset + +This command supports profiles which are used to group configuration settings +(configurations keys and devices) and then apply the resulting set to a given +container. + +It’s possible to create, delete, list and setup profiles on a remote +host. The one limitation is that a container can only reference local +profiles, so profiles need to be copied across hosts or be moved around +alongside the containers. + +Also note that removing a profile or moving it off the host will fail if any +local container still references it. + +**Examples** + +Command | Result +:------ | :----- +lxc profile create micro | Create a new "micro" profile. +lxc profile set micro limits.memory 256MB | Restrict memory usage to 256MB +lxc profile set micro limits.cpu 1 | Restrict CPU usage to a single core +lxc profile copy micro dakara: | Copy the resulting profile over to "dakara" +lxc profile show micro | Show all the options associated with the "micro" profile and all the containers using it +lxc profile unset dakara:nano limits.memory | Unset "limits.memory" for the "nano" profile on "dakara" +lxc profile apply c1 micro,nesting | Set the profiles for container "c1" to be "micro" followed by "nesting" +lxc profile apply c1 "" | Unset any assigned profile for container "c1" + + +* * * + +## publish + +**Arguments** + + [target] [--public] [--expires-at=ISO-8601] [--alias=ALIAS].. [prop-key=prop-value]... + +**Description** +Takes an existing container or container snapshot and makes a +compressed image out of it. By default the image will be private, that is, +it’ll only be accessible locally or remotely by authenticated clients. If -- +public is passed, then anyone can pull the image so long as LXD is running. + +It will also be possible for some image stores to allow users to push new +images to them using that command, though the two image stores that will come +pre-configured will be read-only. + +**Examples** + +Command | Result +:------ | :----- +lxc publish c1/yesterday | Turn c1/yesterday into a private image for consumption by trusted LXD servers +lxc publish c2 dakara: --public | Turn c2 into a public image on remote host "dakara" + +* * * + +## remote + +**Arguments** + + add [--always-relay] [--password=PASSWORD] [--accept-certificate] [--public] + remove + list + rename + set-url + set-default + get-default + +**Description** +Manages remote LXD servers. + +Scheme | Description +:----- | :---------- +unix://Unix | socket (or abstract if leading @) access to LXD +https:// | Communication with LXD over the network (https) + +By default lxc will have the following remotes defined: + +Name | URI | Description +:--- | :-- | :---------- +local | unix:///var/lib/lxd/sock  | Communication to the local LXD daemon (hidden if not present) + +The default remote is "local", this allows simple operations with local +resources without having to specify local: in front of all of their names. This +behavior can be changed by using the set-default argument. On a system without +a local LXD, the first manually added remote should be automatically set as +default. + +Protocol auto-detection will happen so that adding a source solely based on its +name will work too, assuming it doesn’t support multiple protocols. + +The "--always-relay" flag of "remote add" can mean one of two things: + * If it's an image server, that this server is only reachable by the + client and that the client needs to act as a relay and transfer the + image over to the server. + * If it's a LXD server, that this server has limited connectivity which + prevents it from accessing the image servers and that the client needs + to act as a relay for it. + +The "--accept-certificate" flag of "remote add" will automatically accept +the remote's certificate without prompting the user to verify the certificate +fingerprint. + +**Examples** + +Command | Result +:------ | :----- +lxc remote add dakara dakara.local | Add a new remote called "dakara" using its avahi DNS record and protocol auto-detection +lxc remote add dakara dakara.local --password=BLAH | Add a new remote called "dakara" using its avahi DNS record and protocol auto-detection and providing the password in advance +lxc remote add dakara dakara.local --password=BLAH --accept-certificate | Add a new remote called "dakara" using its avahi DNS record and protocol auto-detection and providing the password in advance and also accepting the certificate without fingerprint verification +lxc remote add vorash https://vorash.srv.dcmtl.stgraber.net | Add remote "vorash" pointing to a remote lxc instance using the full URI +lxc remote set-default vorash | Mark it as the default remote +lxc start c1 | Start container "c1" on it + +* * * + +## restart + +**Arguments** + + [--kill|-k] [--timeout|-t] + +**Description** + +Restarts the container. The flags have the same behavior as the 'stop' command. +Restart will fail on ephemeral containers, as they cannot be booted after they +are stopped. + +**Examples** + +Command | Result +:------ | :----- +lxc restart c1 | Do a clean restart of local container "c1" +lxc restart dakara:c1 -t 10 | Do a clean restart of remote container "c1" on "dakara" with a reduced timeout of 10s +lxc restart dakara:c1 -k | Kill and restart the remote container "c1" on "dakara" +  +* * * + +## restore + +**Arguments** + + [--stateful] + +**Description** + +Set the current state of a resource back to what it was when it +was snapshotted. All snapshots are kept as they are, only the current state is +discarded and replaced by that from the snapshot. This requires the container +be stopped unless --stateful is passed and the snapshot contained a running +container state in which case the container will be killed, reset to the +snapshot and the container’s state restored. + +**Examples** + +Command | Result +:------ | :----- +lxc restore c1 it-works | Restore the c1 container back to its "it-works" snapshot state +lxc restore dakara:c1 pre-upgrade --stateful | Restore a pre-dist-upgrade snapshot of container "c1" running on "dakara". Allows for a very fast recovery time in case of problem + +* * * + +## snapshot + +**Arguments** + + [snapshot name] [--stateful] [--expire=ISO-8601] + +**Description** + +Makes a read-only snapshot of a resource (typically a container). +For a container this will be a snapshot of the container’s filesystem, +configuration and if --stateful is passed, its current running state. + +If the snapshot name isn't specified, a timestamp will be used. + +**Examples** + +Command | Result +:------ | :----- +lxc snapshot c1 it-works | Create "it-works" snapshot of container c1 +lxc snapshot dakara:c1 pre-upgrade --stateful | Make a pre-dist-upgrade snapshot of container "c1" running on "dakara". Allows for a very fast recovery time in case of problem + +* * * + +## start + +**Arguments** + + + +**Description** + +start is used to start an existing container. + + +**Examples** + +Command | Result +:------ | :----- +lxc start c2 | Start local container "c2" +lxc start dakara:c3 | Start the "c3" container on remote host "dakara" + +* * * + +## stop + +**Arguments** + + [--kill|-k] [--timeout|-t] + +**Description** + +Stops the container. By default does a clean shutdown by sending +SIGPWR to the container’s init process if after 30s the container is still +running, an error is displayed. The 30s timeout can be overridden using -- +timeout. Alternatively, --kill can be passed which will cause the container to +be immediately killed (timeout is meaningless in this case). + +**Examples** + +Command | Result +:------ | :----- +lxc stop c1 | Do a clean shutdown of local container "c1" +lxc stop dakara:c1 -t 10 | Do a clean shutdown of remote container "c1" on "dakara" with a reduced timeout of 10s +lxc stop dakara:c1 -k | Kill the remote container "c1" on "dakara" === added file 'src/github.com/lxc/lxd/specs/configuration.md' --- src/github.com/lxc/lxd/specs/configuration.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/specs/configuration.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,298 @@ +# Introduction +Current LXD stores the following kind of configurations: + - Server configuration (the LXD daemon itself) + - Container configuration + +The server configuration is a simple set of key and values. + +The container configuration is a bit more complex as it uses both +key/value configuration and some more complex configuration structures +for devices, network interfaces and storage volumes. + +# Server configuration +## Key/value configuration +The key/value configuration is namespaced with the following namespaces +currently supported: + - core (core daemon configuration) + - images (image configuration) + - storage (storage configuration) + +Key | Type | Default | Description +:-- | :--- | :------ | :---------- +core.https\_address | string | - | Address to bind for the remote API +core.https\_allowed\_origin | string | - | Access-Control-Allow-Origin http header value +core.https\_allowed\_methods | string | - | Access-Control-Allow-Methods http header value +core.https\_allowed\_headers | string | - | Access-Control-Allow-Headers http header value +core.trust\_password | string | - | Password to be provided by clients to setup a trust +storage.lvm\_vg\_name | string | - | LVM Volume Group name to be used for container and image storage. A default Thin Pool is created using 100% of the free space in the Volume Group, unless `storage.lvm_thinpool_name` is set. +storage.lvm\_thinpool\_name | string | "LXDPool" | LVM Thin Pool to use within the Volume Group specified in `storage.lvm_vg_name`, if the default pool parameters are undesirable. +storage.lvm\_fstype | string | ext4 | Format LV with filesystem, for now it's value can be only ext4 (default) or xfs. +storage.zfs\_pool\_name | string | - | ZFS pool name +images.compression\_algorithm | string | gzip | Compression algorithm to use for new images (bzip2, gzip, lzma, xz or none) +images.remote\_cache\_expiry | integer | 10 | Number of days after which an unused cached remote image will be flushed +images.auto\_update\_interval | integer | 6 | Interval in hours at which to look for update to cached images (0 disables it) +images.auto\_update\_cached | boolean | true | Whether to automatically update any image that LXD caches + +Those keys can be set using the lxc tool with: + + lxc config set + + +# Container configuration +## Properties +The following are direct container properties and can't be part of a profile: + - name + - architecture + +Name is the container name and can only be changed by renaming the container. + +## Key/value configuration +The key/value configuration is namespaced with the following namespaces +currently supported: + - boot (boot related options, timing, dependencies, ...) + - environment (environment variables) + - limits (resource limits) + - raw (raw container configuration overrides) + - security (security policies) + - user (storage for user properties, searchable) + - volatile (used internally by LXD to store settings that are specific to a specific container instance) + +The currently supported keys are: + +Key | Type | Default | Live update | Description +:-- | :--- | :------ | :---------- | :---------- +boot.autostart | boolean | false | n/a | Always start the container when LXD starts +boot.autostart.delay | integer | 0 | n/a | Number of seconds to wait after the container started before starting the next one +boot.autostart.priority | integer | 0 | n/a | What order to start the containers in (starting with highest) +environment.\* | string | - | yes (exec) | key/value environment variables to export to the container and set on exec +limits.cpu | string | - (all) | yes | Number or range of CPUs to expose to the container +limits.cpu.allowance | string | 100% | yes | How much of the CPU can be used. Can be a percentage (e.g. 50%) for a soft limit or hard a chunk of time (25ms/100ms) +limits.cpu.priority | integer | 10 (maximum) | yes | CPU scheduling priority compared to other containers sharing the same CPUs (overcommit) +limits.disk.priority | integer | 5 (medium) | yes | When under load, how much priority to give to the container's I/O requests +limits.memory | string | - (all) | yes | Percentage of the host's memory or fixed value in bytes (supports kB, MB, GB, TB, PB and EB suffixes) +limits.memory.enforce | string | hard | yes | If hard, container can't exceed its memory limit. If soft, the container can exceed its memory limit when extra host memory is available. +limits.memory.swap | boolean | true | yes | Whether to allow some of the container's memory to be swapped out to disk +limits.memory.swap.priority | integer | 10 (maximum) | yes | The higher this is set, the least likely the container is to be swapped to disk +limits.network.priority | integer | 0 (minimum) | yes | When under load, how much priority to give to the container's network requests +limits.processes | integer | - (max) | yes | Maximum number of processes that can run in the container +linux.kernel\_modules | string | - | yes | Comma separated list of kernel modules to load before starting the container +raw.apparmor | blob | - | yes | Apparmor profile entries to be appended to the generated profile +raw.lxc | blob | - | no | Raw LXC configuration to be appended to the generated one +security.nesting | boolean | false | yes | Support running lxd (nested) inside the container +security.privileged | boolean | false | no | Runs the container in privileged mode +user.\* | string | - | n/a | Free form user key/value storage (can be used in search) + +The following volatile keys are currently internally used by LXD: + +Key | Type | Default | Description +:-- | :--- | :------ | :---------- +volatile.\.hwaddr | string | - | Network device MAC address (when no hwaddr property is set on the device itself) +volatile.\.name | string | - | Network device name (when no name propery is set on the device itself) +volatile.base\_image | string | - | The hash of the image the container was created from, if any. +volatile.last\_state.idmap | string | - | Serialized container uid/gid map +volatile.last\_state.power | string | - | Container state as of last host shutdown + + +Additionally, those user keys have become common with images (support isn't guaranteed): + +Key | Type | Default | Description +:-- | :--- | :------ | :---------- +user.network\_mode | string | dhcp | One of "dhcp" or "link-local". Used to configure network in supported images. +user.meta-data | string | - | Cloud-init meta-data, content is appended to seed value. +user.user-data | string | #!cloud-config | Cloud-init user-data, content is used as seed value. +user.vendor-data | string | #!cloud-config | Cloud-init vendor-data, content is used as seed value. + +Note that while a type is defined above as a convenience, all values are +stored as strings and should be exported over the REST API as strings +(which makes it possible to support any extra values without breaking +backward compatibility). + +Those keys can be set using the lxc tool with: + + lxc config set + +Volatile keys can't be set by the user and can only be set directly against a container. + +The raw keys allow direct interaction with the backend features that LXD +itself uses, setting those may very well break LXD in non-obvious ways +and should whenever possible be avoided. + + +## Devices configuration +LXD will always provide the container with the basic devices which are +required for a standard POSIX system to work. These aren't visible in +container or profile configuration and may not be overriden. + +Those includes: + - /dev/null (character device) + - /dev/zero (character device) + - /dev/full (character device) + - /dev/console (character device) + - /dev/tty (character device) + - /dev/random (character device) + - /dev/urandom (character device) + - lo (network interface) + +Anything else has to be defined in the container configuration or in one +of its profiles. The default profile will typically contain a network +interface to become eth0 in the container. + +To add extra devices to a container, device entries can be added +directly to a container, or to a profile. + +Devices may be added or removed while the container is running. + +Every device entry is identified by a unique name. If the same name is +used in a subsequent profile or in the container's own configuration, +the whole entry is overriden by the new definition. + +Device entries are added through: + lxc config device add [key=value]... + lxc profile device add [key=value]... + +### Device types +LXD supports the following device types: + +ID (database) | Name | Description +:-- | :-- | :-- +0 | none | Inheritance blocker +1 | nic | Network interface +2 | disk | Mountpoint inside the container +3 | unix-char | Unix character device +4 | unix-block | Unix block device + +### Type: none +A none type device doesn't have any property and doesn't create anything inside the container. + +It's only purpose it to stop inheritance of devices coming from profiles. + +To do so, just add a none type device with the same name of the one you wish to skip inheriting. +It can be added in a profile being applied after the profile it originated from or directly on the container. + +### Type: nic +LXD supports different kind of network devices: + - physical: Straight physical device passthrough from the host. The targeted device will vanish from the host and appear in the container. + - bridged: Uses an existing bridge on the host and creates a virtual device pair to connect the host bridge to the container. + - macvlan: Sets up a new network device based on an existing one but using a different MAC address. + - p2p: Creates a virtual device pair, putting one side in the container and leaving the other side on the host. + +Different network interface types have different additional properties, the current list is: + +Key | Type | Default | Required | Used by | Description +:-- | :-- | :-- | :-- | :-- | :-- +nictype | string | - | yes | all | The device type, one of "physical", "bridged", "macvlan" or "p2p" +limits.ingress | string | - | no | bridged, p2p | I/O limit in bit/s (supports kbit, Mbit, Gbit suffixes) +limits.egress | string | - | no | bridged, p2p | I/O limit in bit/s (supports kbit, Mbit, Gbit suffixes) +limits.max | string | - | no | bridged, p2p | Same as modifying both limits.read and limits.write +name | string | kernel assigned | no | all | The name of the interface inside the container +host\_name | string | randomly assigned | no | bridged, p2p, macvlan | The name of the interface inside the host +hwaddr | string | randomly assigned | no | all | The MAC address of the new interface +mtu | integer | parent MTU | no | all | The MTU of the new interface +parent | string | - | yes | physical, bridged, macvlan | The name of the host device or bridge + +### Type: disk +Disk entries are essentially mountpoints inside the container. They can +either be a bind-mount of an existing file or directory on the host, or +if the source is a block device, a regular mount. + +The following properties exist: + +Key | Type | Default | Required | Description +:-- | :-- | :-- | :-- | :-- +limits.read | string | - | no | I/O limit in byte/s (supports kB, MB, GB, TB, PB and EB suffixes) or in iops (must be suffixed with "iops") +limits.write | string | - | no | I/O limit in byte/s (supports kB, MB, GB, TB, PB and EB suffixes) or in iops (must be suffixed with "iops") +limits.max | string | - | no | Same as modifying both limits.read and limits.write +path | string | - | yes | Path inside the container where the disk will be mounted +source | string | - | yes | Path on the host, either to a file/directory or to a block device +optional | boolean | false | no | Controls whether to fail if the source doesn't exist +readonly | boolean | false | no | Controls whether to make the mount read-only +size | string | - | no | Disk size in bytes (supports kB, MB, GB, TB, PB and EB suffixes). This is only supported for the rootfs (/). + +If multiple disks, backed by the same block device, have I/O limits set, +the average of the limits will be used. + +### Type: unix-char +Unix character device entries simply make the requested character device +appear in the container's /dev and allow read/write operations to it. + +The following properties exist: + +Key | Type | Default | Required | Description +:-- | :-- | :-- | :-- | :-- +path | string | - | yes | Path inside the container +major | int | device on host | no | Device major number +minor | int | device on host | no | Device minor number +uid | int | 0 | no | UID of the device owner in the container +gid | int | 0 | no | GID of the device owner in the container +mode | int | 0660 | no | Mode of the device in the container + +### Type: unix-block +Unix block device entries simply make the requested character device +appear in the container's /dev and allow read/write operations to it. + +The following properties exist: + +Key | Type | Default | Required | Description +:-- | :-- | :-- | :-- | :-- +path | string | - | yes | Path inside the container +major | int | device on host | no | Device major number +minor | int | device on host | no | Device minor number +uid | int | 0 | no | UID of the device owner in the container +gid | int | 0 | no | GID of the device owner in the container +mode | int | 0660 | no | Mode of the device in the container + +## Profiles +Profiles can store any configuration that a container can (key/value or devices) +and any number of profiles can be applied to a container. + +Profiles are applied in the order they are specified so the last profile +to specify a specific key wins. + +In any case, resource-specific configuration always overrides that +coming from the profiles. + + +If not present, LXD will create a "default" profile which comes with a +network interface connected to LXD's default bridge (lxcbr0). + +The "default" profile is set for any new container created which doesn't +specify a different profiles list. + +## JSON representation +A representation of a container using all the different types of +configurations would look like: + + { + 'name': "my-container", + 'profiles': ["default"], + 'architecture': 'x86_64', + 'config': { + 'limits.cpu': '3', + 'security.privileged': 'true' + }, + 'devices': { + 'nic-lxcbr0': { + 'type': 'none' + }, + 'nic-mybr0': { + 'type': 'nic', + 'mtu': '9000', + 'parent': 'mybr0' + }, + 'rootfs': { + 'type': 'disk', + 'path': '/', + 'source': 'UUID=8f7fdf5e-dc60-4524-b9fe-634f82ac2fb6' + }, + }, + 'status': { + 'status': "Running", + 'status_code': 103, + 'ips': [{'interface': "eth0", + 'protocol': "INET6", + 'address': "2001:470:b368:1020:1::2"}, + {'interface': "eth0", + 'protocol': "INET", + 'address': "172.16.15.30"}]} + } + === added file 'src/github.com/lxc/lxd/specs/daemon-behavior.md' --- src/github.com/lxc/lxd/specs/daemon-behavior.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/specs/daemon-behavior.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,34 @@ +# Introduction + +This specification covers some of the daemon's behavior, such as reaction to given signals, crashes, ... + +# Startup +On every start, LXD checks that its directory structure exists. If it +doesn't, it'll create the required directories, generate a keypair and +initialize the database. + +Once the daemon is ready for work, LXD will scan the containers table +for any container for which the stored power\_state differs from the +current one. If a container's power\_state in the database is 1 and the +container isn't running, LXD will start it. + +# Signal handling +## SIGINT, SIGQUIT, SIGTERM +For those signals, LXD assumes that it's being temporarily stopped and +will be restarted at a later time to continue handling the containers. + +The containers will keep running and LXD will close all connections and +exit cleanly. + +## SIGPWR +Indicates to LXD that the host is going down. + +LXD will attempt a clean shutdown of all the containers. After 30s, it +will kill any remaining container. + +The container power\_state in the containers table is kept as it was so +that LXD after the host is done rebooting can restore the containers as +they were. + +## SIGUSR1 +Write a memory profile dump to the file specified with \-\-memprofile. === added file 'src/github.com/lxc/lxd/specs/database.md' --- src/github.com/lxc/lxd/specs/database.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/specs/database.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,300 @@ +# Introduction +So first of all, why a database? + +Rather than keeping the configuration and state within each container's +directory as is traditionally done by LXC, LXD has an internal database +which stores all of that information. This allows very quick queries +against all containers configuration. + + +An example is the rather obvious question "what containers are using br0?". +To answer that question without a database, LXD would have to iterate +through every single container, load and parse its configuration and +then look at what network devices are defined in there. + +While that may be quick with a few containers, imagine how many +filesystem access would be required for 2000 containers. Instead with a +database, it's only a matter of accessing the already cached database +with a pretty simple query. + + +# Database engine +As this is a purely internal database with a single client and very +little data, we'll be using sqlite3. + +We have no interest in replication or other HA features offered by the +bigger database engines as LXD runs on each compute nodes and having the +database accessible when the compute node itself isn't, wouldn't be +terribly useful. + + +# Design +The design of the database is made to be as close as possible to the REST API. + +The main table and field names are exact match for the REST API. + +However this database isn't an exact match of the API, mostly because +any runtime or external piece of information will not be stored in the +database (as this would require constent polling and wouldn't gain us +anything). + +We make no guarantee of stability for the database schema. This is a +purely internal database which only LXD should ever use. Updating LXD +may cause a schema update and data being shuffled. In those cases, LXD +will make a copy of the old database as ".old" to allow for a revert. + + +# Tables +The list of tables is: + + * certificates + * config + * containers + * containers\_config + * containers\_devices + * containers\_devices\_config + * containers\_profiles + * images + * images\_properties + * images\_aliases + * images\_source + * profiles + * profiles\_config + * profiles\_devices + * profiles\_devices\_config + * schema + +You'll notice that compared to the REST API, there are three main differences: + + 1. The extra "\*\_config" tables which are there for key/value config storage. + 2. The extra "images\_properties" table which is there for key/value property storage. + 3. The extra "schema" table whish is used for database schema version tracking. + 4. There is no "snapshots" table. That's because snapshots are a copy + of a container at a given point in time, including its configuration and + on-disk state. So having snapshots in a separate table would only be needless duplication. + +# Notes on sqlite3 +sqlite3 only supports 5 storage classes: NULL, INTEGER, REAL, TEXT and BLOB +There are then a set of aliases for each of those storage classes which is what we use below. + +# Schema +## certificates + +Column | Type | Default | Constraint | Description +:----- | :--- | :------ | :--------- | :---------- +id | INTEGER | SERIAL | NOT NULL | SERIAL +fingerprint | VARCHAR(255) | - | NOT NULL | HEX encoded certificate fingerprint +type | INTEGER | - | NOT NULL | Certificate type (0 = client) +name | VARCHAR(255) | - | NOT NULL | Certificate name (defaults to CN) +certificate | TEXT | - | NOT NULL | PEM encoded certificate + +Index: UNIQUE ON id AND fingerprint + + +## config (server configuration) + +Column | Type | Default | Constraint | Description +:----- | :--- | :------ | :--------- | :---------- +id | INTEGER | SERIAL | NOT NULL | SERIAL +key | VARCHAR(255) | - | NOT NULL | Configuration key +value | TEXT | - | | Configuration value (NULL for unset) + +Index: UNIQUE ON id AND key + + +## containers + +Column | Type | Default | Constraint | Description +:----- | :--- | :------ | :--------- | :---------- +id | INTEGER | SERIAL | NOT NULL | SERIAL +name | VARCHAR(255) | - | NOT NULL | Container name +architecture | INTEGER | - | NOT NULL | Container architecture +type | INTEGER | 0 | NOT NULL | Container type (0 = container, 1 = container snapshot) +ephemeral | INTEGER | 0 | NOT NULL | Whether the container is ephemeral (0 = persistent, 1 = ephemeral) +stateful | INTEGER | 0 | NOT NULL | Whether the snapshot contains state (snapshot only) +creation\_date | DATETIME | - | | Image creation date (user supplied, 0 = unknown) + +Index: UNIQUE ON id AND name + + +## containers\_config + +Column | Type | Default | Constraint | Description +:----- | :--- | :------ | :--------- | :---------- +id | INTEGER | SERIAL | NOT NULL | SERIAL +container\_id | INTEGER | - | NOT NULL | containers.id FK +key | VARCHAR(255) | - | NOT NULL | Configuration key +value | TEXT | - | | Configuration value (NULL for unset) + +Index: UNIQUE ON id AND container\_id + key + +Foreign keys: container\_id REFERENCES containers(id) + + +## containers\_devices + +Column | Type | Default | Constraint | Description +:----- | :--- | :------ | :--------- | :---------- +id | INTEGER | SERIAL | NOT NULL | SERIAL +container\_id | INTEGER | - | NOT NULL | containers.id FK +name | VARCHAR(255) | - | NOT NULL | Container name +type | INTEGER | 0 | NOT NULL | Device type (see configuration.md) + +Index: UNIQUE ON id AND container\_id + name + +Foreign keys: container\_id REFERENCES containers(id) + + +## containers\_devices\_config + +Column | Type | Default | Constraint | Description +:----- | :--- | :------ | :--------- | :---------- +id | INTEGER | SERIAL | NOT NULL | SERIAL +container\_device\_id | INTEGER | - | NOT NULL | containers\_devices.id FK +key | VARCHAR(255) | - | NOT NULL | Configuration key +value | TEXT | - | | Configuration value (NULL for unset) + +Index: UNIQUE ON id AND container\_device\_id + key + +Foreign keys: container\_device\_id REFERENCES containers\_devices(id) + + +## containers\_profiles + +Column | Type | Default | Constraint | Description +:----- | :--- | :------ | :--------- | :---------- +id | INTEGER | SERIAL | NOT NULL | SERIAL +container\_id | INTEGER | - | NOT NULL | containers.id FK +profile\_id | INTEGER | - | NOT NULL | profiles.id FK +apply\_order | INTEGER | 0 | NOT NULL | Profile ordering + +Index: UNIQUE ON id AND container\_id + profile\_id + +Foreign keys: container\_id REFERENCES containers(id) and profile\_id REFERENCES profiles(id) + + +## images + +Column | Type | Default | Constraint | Description +:----- | :--- | :------ | :--------- | :---------- +id | INTEGER | SERIAL | NOT NULL | SERIAL +cached | INTEGER | 0 | NOT NULL | Whether this is a cached image +fingerprint | VARCHAR(255) | - | NOT NULL | Tarball fingerprint +filename | VARCHAR(255) | - | NOT NULL | Tarball filename +size | INTEGER | - | NOT NULL | Tarball size +public | INTEGER | 0 | NOT NULL | Whether the image is public or not +auto\_update | INTEGER | 0 | NOT NULL | Whether to update from the source of this image +architecture | INTEGER | - | NOT NULL | Image architecture +creation\_date | DATETIME | - | | Image creation date (user supplied, 0 = unknown) +expiry\_date | DATETIME | - | | Image expiry (user supplied, 0 = never) +upload\_date | DATETIME | - | NOT NULL | Image entry creation date +last\_use\_date | DATETIME | - | | Last time the image was used to spawn a container + +Index: UNIQUE ON id AND fingerprint + + +## images\_aliases + +Column | Type | Default | Constraint | Description +:----- | :--- | :------ | :--------- | :---------- +id | INTEGER | SERIAL | NOT NULL | SERIAL +name | VARCHAR(255) | - | NOT NULL | Alias name +image\_id | INTEGER | - | NOT NULL | images.id FK +description | VARCHAR(255) | - | | Description of the alias + +Index: UNIQUE ON id AND name + +Foreign keys: image\_id REFERENCES images(id) + + +## images\_properties + +Column | Type | Default | Constraint | Description +:----- | :--- | :------ | :--------- | :---------- +id | INTEGER | SERIAL | NOT NULL | SERIAL +image\_id | INTEGER | - | NOT NULL | images.id FK +type | INTEGER | 0 | NOT NULL | Property type (0 = string, 1 = text) +key | VARCHAR(255) | - | NOT NULL | Property name +value | TEXT | - | | Property value (NULL for unset) + +Index: UNIQUE ON id + +Foreign keys: image\_id REFERENCES images(id) + +## images\_source + +Column | Type | Default | Constraint | Description +:----- | :--- | :------ | :--------- | :---------- +id | INTEGER | SERIAL | NOT NULL | SERIAL +image\_id | INTEGER | - | NOT NULL | images.id FK +server | TEXT | - | NOT NULL | Server URL +protocol | INTEGER | 0 | NOT NULL | Protocol to access the remote (0 = lxd, 1 = direct, 2 = simplestreams) +alias | VARCHAR(255) | - | NOT NULL | What remote alias to use as the source +certificate | TEXT | - | | PEM encoded certificate of the server + +Index: UNIQUE ON id + +Foreign keys: image\_id REFERENCES images(id) + +## profiles + +Column | Type | Default | Constraint | Description +:----- | :--- | :------ | :--------- | :---------- +id | INTEGER | SERIAL | NOT NULL | SERIAL +name | VARCHAR(255) | - | NOT NULL | Profile name +description | TEXT | - | | Description of the profile + +Index: UNIQUE on id AND name + + +## profiles\_config + +Column | Type | Default | Constraint | Description +:----- | :--- | :------ | :--------- | :---------- +id | INTEGER | SERIAL | NOT NULL | SERIAL +profile\_id | INTEGER | - | NOT NULL | profiles.id FK +key | VARCHAR(255) | - | NOT NULL | Configuration key +value | VARCHAR(255) | - | | Configuration value (NULL for unset) + +Index: UNIQUE ON id AND profile\_id + key + +Foreign keys: profile\_id REFERENCES profiles(id) + + +## profiles\_devices + +Column | Type | Default | Constraint | Description +:----- | :--- | :------ | :--------- | :---------- +id | INTEGER | SERIAL | NOT NULL | SERIAL +profile\_id | INTEGER | - | NOT NULL | profiles.id FK +name | VARCHAR(255) | - | NOT NULL | Container name +type | INTEGER | 0 | NOT NULL | Device type (see configuration.md) + +Index: UNIQUE ON id AND profile\_id + name + +Foreign keys: profile\_id REFERENCES profiles(id) + + +## profiles\_devices\_config + +Column | Type | Default | Constraint | Description +:----- | :--- | :------ | :--------- | :---------- +id | INTEGER | SERIAL | NOT NULL | SERIAL +profile\_device\_id | INTEGER | - | NOT NULL | profiles\_devices.id FK +key | VARCHAR(255) | - | NOT NULL | Configuration key +value | TEXT | - | | Configuration value (NULL for unset) + +Index: UNIQUE ON id AND profile\_device\_id + key + +Foreign keys: profile\_device\_id REFERENCES profiles\_devices(id) + + +## schema + +Column | Type | Default | Constraint | Description +:----- | :--- | :------ | :--------- | :---------- +id | INTEGER | SERIAL | NOT NULL | SERIAL +version | INTEGER | - | NOT NULL | Schema version +updated\_at | DATETIME | - | NOT NULL | When the schema update was done + +Index: UNIQUE ON id AND version === added file 'src/github.com/lxc/lxd/specs/database_schema.dia' Binary files src/github.com/lxc/lxd/specs/database_schema.dia 1970-01-01 00:00:00 +0000 and src/github.com/lxc/lxd/specs/database_schema.dia 2016-03-22 15:18:22 +0000 differ === added file 'src/github.com/lxc/lxd/specs/dev-lxd.md' --- src/github.com/lxc/lxd/specs/dev-lxd.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/specs/dev-lxd.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,99 @@ +# Introduction +Communication between the hosted workload (container) and its host while +not strictly needed is a pretty useful feature. This allows querying for +configuration options, reporting errors back to the host as well as +adding support for a range of new features by allowing events to be sent +in either directions. + +In LXD, this feature is implemented through a /dev/lxd/sock node which is +created and setup for all LXD containers. + +This file is a Unix socket which processes inside the container can +connect to. It's multi-threaded so multiple clients can be connected at the +same time. + +# Implementation details +LXD on the host binds /var/lib/lxd/devlxd and starts listening for new +connections on it. + +This socket is then bind-mounted into every single container started by +LXD at /dev/lxd/sock. + +The bind-mount is required so we can exceed 4096 containers, otherwise, +LXD would have to bind a different socket for every container, quickly +reaching the FD limit. + +# Authentication +Queries on /dev/lxd/sock will only return information related to the +requesting container. To figure out where a request comes from, LXD will +extract the initial socket ucred and compare that to the list of +containers it manages. + +# Protocol +The protocol on /dev/lxd/sock is plain-text HTTP with JSON messaging, so very +similar to the local version of the LXD protocol. + +Unlike the main LXD API, there is no background operation and no +authentication support in the /dev/lxd/sock API. + +# REST-API +## API structure + * / + * /1.0 + * /1.0/config + * /1.0/config/{key} + * /1.0/events + * /1.0/meta-data + +## API details +### / +#### GET + * Description: List of supported APIs + * Return: list of supported API endpoint URLs (by default ['/1.0']) + +### /1.0 +#### GET + * Description: Information about the 1.0 API + * Return: dict + +Return value: + + { + 'api_compat': 0 # Used to determine API functionality + } + +### /1.0/config +#### GET + * Description: List of configuration keys + * Return: list of configuration keys URL + +Note that the configuration key names match those in the container +config, however not all configuration namespaces will be exported to +/dev/lxd/sock. +We'll initially only support the user namespace (user.\* keys). + +At this time, there also aren't any container-writable namespace. + +### /1.0/config/\ +#### GET + * Description: Value of that key + * Return: Plain-text value + +### /1.0/events +#### GET + * Description: event interface + * Return: websocket upgrade (similar to /1.0/events on main API) + +#### POST + * Description: post a new event + +### /1.0/meta-data +#### GET + * Description: Container meta-data compatible with cloud-init + * Return: cloud-init meta-data + +Return value: + + #cloud-config + instance-id: abc + local-hostname: abc === added file 'src/github.com/lxc/lxd/specs/environment.md' --- src/github.com/lxc/lxd/specs/environment.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/specs/environment.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +# Introduction +The LXD client and daemon respect some environment variables to adapt to +the user's environment and to turn some advanced features on and off. + +# Common +LXD\_DIR | The LXD data directory +PATH | List of paths to look into when resolving binaries + +# Client environment variable +Name | Description +:--- | :---- +EDITOR | What text editor to use +VISUAL | What text editor to use (if EDITOR isn't set) + +# Server environment variable +Name | Description +:--- | :---- +LXD\_SECURITY\_APPARMOR | If set to "false", forces AppArmor off +LXD\_LXC\_TEMPLATE\_CONFIG | Path to the LXC template configuration directory === added file 'src/github.com/lxc/lxd/specs/image-handling.md' --- src/github.com/lxc/lxd/specs/image-handling.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/specs/image-handling.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,101 @@ +# Introduction +LXD uses an image based workflow. It comes with a built-in image store +where the user or external tools can import images. + +Containers are then started from those images. + +It's possible to spawn remote containers using local images or local +containers using remote images. In such cases, the image may be cached +on the target LXD. + +# Caching +When spawning a container from a remote image, the remote image is +downloaded into the local image store with the cached bit set. The image +will be kept locally as a private image until either it's been unused +(no new container spawned) for the number of days set in +images.remote\_cache\_expiry or until the image's expiry is reached +whichever comes first. + +LXD keeps track of image usage by updating the last\_use\_date image +property every time a new container is spawned from the image. + +# Image format +LXD currently supports two LXD-specific image formats. + +The first is a unified tarball, where a single tarball +contains both the container rootfs and the needed metadata. + +The second is a split model, using two tarballs instead, one containing +the rootfs, the other containing the metadata. + +The former is what's produced by LXD itself and what people should be +using for LXD-specific images. + +The latter is designed to allow for easy image building from existing +non-LXD rootfs tarballs already available today. + +## Unified tarball +Tarball, can be compressed and contains: + - rootfs/ + - metadata.yaml + - templates/ (optional) + +## Split tarballs +Two (possibly compressed) tarballs. One for metadata, one for the rootfs. + +metadata.tar contains: + - metadata.yaml + - templates/ (optional) + +rootfs.tar contains a Linux root filesystem at its root. + +## Content +The rootfs directory (or tarball) contains a full file system tree of what will become the container's /. + +The templates directory contains pongo2-formatted templates of files inside the container. + +metadata.yaml contains information relevant to running the image under +LXD, at the moment, this contains: + + architecture: x86_64 + creation_date: 1424284563 + properties: + description: Ubuntu 14.04 LTS Intel 64bit + os: Ubuntu + release: + - trusty + - 14.04 + templates: + /etc/hosts: + when: + - create + - rename + template: hosts.tpl + properties: + foo: bar + /etc/hostname: + when: + - start + template: hostname.tpl + +The architecture and creation\_date fields are mandatory, the properties +are just a set of default properties for the image. The os, release, +name and description fields while not mandatory in any way, should be +pretty common. + +For templates, the "when" key can be one or more of: + - create (run at the time a new container is created from the image) + - copy (run when a container is created from an existing one) + - start (run every time the container is started) + +The templates will always receive the following context: + - trigger: name of the event which triggered the template (string) + - path: path of the file being templated (string) + - container: key/value map of container properties (name, architecture, privileged and ephemeral) (map[string]string) + - config: key/value map of the container's configuration (map[string]string) + - devices: key/value map of the devices assigned to this container (map[string]map[string]string) + - properties: key/value map of the template properties specified in metadata.yaml (map[string]string) + +As a general rule, you should never template a file which is owned by a +package or is otherwise expected to be overwritten by normal operation +of the container. === added file 'src/github.com/lxc/lxd/specs/lxd-ssl-authentication.md' --- src/github.com/lxc/lxd/specs/lxd-ssl-authentication.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/specs/lxd-ssl-authentication.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,118 @@ +# Introduction +Local communications over the UNIX socket happen over a cleartext HTTP +socket and access is restricted by socket ownership and mode. + +Remote communications with the LXD daemon happen using JSON over HTTPS. +The supported protocol must be TLS1.2 or better. +All communications must use perfect forward secrecy and ciphers must be +limited to strong elliptic curve ones (such as ECDHE-RSA or +ECDHE-ECDSA). + +Any generated key should be at least 4096bit RSA and when using +signatures, only SHA-2 signatures should be trusted. + +Since we control both client and server, there is no reason to support +any backward compatibility to broken protocol or ciphers. + +Both the client and the server will generate a keypair the first time +they're launched. The server will use that for all https connections to +the LXD socket and the client will use its certificate as a client +certificate for any client-server communication. + +# Adding a remote with a default setup +In the default setup, when the user adds a new server with "lxc remote +add", the server will be contacted over HTTPs, its certificate +downloaded and the fingerprint will be shown to the user. + +The user will then be asked to confirm that this is indeed the server's +fingerprint which they can manually check by connecting to or asking +someone with access to the server to run the status command and compare +the fingerprints. + +After that, the user must enter the trust password for that server, if +it matches, the client certificate is added to the server's trust store +and the client can now connect to the server without having to provide +any additional credentials. + +This is a workflow that's very similar to that of ssh where an initial +connection to an unknown server triggers a prompt. + +A possible extension to that is to support something similar to ssh's +fingerprint in DNS feature where the certificate fingerprint is added as +a TXT record, then if the domain is signed by DNSSEC, the client will +automatically accept the fingerprint if it matches that in the DNS +record. + +# Adding a remote with a PKI based setup +In the PKI setup, a system administrator is managing a central PKI, that +PKI then issues client certificates for all the lxc clients and server +certificates for all the LXD daemons. + +Those certificates and keys are manually put in place on the various +machines, replacing the automatically generated ones. + +The CA certificate is also added to all lxc clients and LXD daemons. +A CRL may also accompany the CA certificate. + +In that mode, any connection to a LXD daemon will be done using the +preseeded CA certificate. If the server certificate isn't signed by the +CA, or if it has been revoked, the connection will simply fail with no +way obvious way for the user to bypass this. + +If the server certificate is valid and signed by the CA, then the +connection continues without prompting the user for the certificate. + +After that, the user must enter the trust password for that server, if +it matches, the client certificate is added to the server's trust store +and the client can now connect to the server without having to provide +any additional credentials. + +# Password prompt +To establish a new trust relationship, a password must be set on the +server and send by the client when adding itself. + +A remote add operation should therefore go like this: + 1. Call GET /1.0 + 2. If we're not in a PKI setup with a ca.crt, ask the user to confirm the fingerprint. + 3. Look at the dict we received back from the server. If "auth" is + "untrusted", ask the user for the server's password and do a POST to + /1.0/certificates, then call /1.0 again to check that we're indeed + trusted. + 4. Remote is now ready + +# Failure scenarios +## Server certificate changes +This will typically happen in two cases: + + * The server was fully reinstalled and so changed certificate + * The connection is being intercepted (MITM) + +In such cases the client will refuse to connect to the server since the +certificate fringerprint will not match that in the config for this +remote. + +This is a fatal error and so the client shouldn't attempt to recover +from it. Instead it must print a message to the console saying that the +server certificate changed and that this may either be due to the server +having been reinstalled or because the communication is being +intercepted. + +That message can also tell the user that if this is expected, they can +resolve the situation by removing the remote and adding it again (the +message should include the two commands required to achieve that in a +copy/pastable manner). + + +## Server trust relationship revoked +In this case, the server still uses the same certificate but all API +calls return a 403 with an error indicating that the client isn't +trusted. + +This happens if another trusted client or the local server administrator +removed the trust entry on the server. + +As with the other failure scenario, this is a fatal error. A message +must be displayed to the user explaining that this client isn't trusted +by the server and that to re-establish the trust relationship, the user +must remove the remote and add it again (and as above, provide the +commands to do so). === added file 'src/github.com/lxc/lxd/specs/migration.md' --- src/github.com/lxc/lxd/specs/migration.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/specs/migration.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,40 @@ +# Live Migration in LXD + +## Overview + +Migration has two pieces, a "source", that is, the host that already has the +container, and a "sink", the host that's getting the container. Currently, +in the 'pull' mode, the source sets up an operation, and the sink connects +to the source and pulls the container. + +There are three websockets (channels) used in migration: 1. the control stream, +2. the criu images stream, and 3. the filesystem stream. When a migration is +initiated, information about the container, its configuration, etc. are sent +over the control channel (a full description of this process is below), the +criu images and container filesystem are synced over their respective channels, +and the result of the restore operation is sent from the sink to the source +over the control channel. + +In particular, the protocol that is spoken over the criu channel and filesystem +channel can vary, depending on what is negotiated over the control socket. For +example, both the source and the sink's LXD directory is on btrfs, the +filesystem socket can speak btrfs-send/receive. Additionally, although we do a +"stop the world" type migration right now, support for criu's p.haul protocol +will happen over the criu socket. + +## Control Socket + +Once all three websockets are connected between the two endpoints, the source +sends a MigrationHeader (protobuf description found in +`/lxd/migration/migrate.proto`). This header contains the container +configuration which will be added to the new container (TODO: profiles?). There +are also two fields indicating the filesystem and criu protocol to speak. For +example, if a server is hosted on a btrfs filesystem, it can indicate that it +wants to do a `btrfs send` instead of a simple rsync (similarly, it could +indicate that it wants to speak the p.haul protocol, instead of just rsyncing +the images over slowly). The sink then examines this message and responds with +whatever it supports. Continuing our example, if the sink is not on a btrfs +filesystem, it responds with the lowest common denominator (rsync, in this +case), and the source is to send the root filesystem using rsync. Similarly +with the criu connection; if the sink doesn't have support for the p.haul +protocol (or whatever), we fall back to rsync. === added file 'src/github.com/lxc/lxd/specs/requirements.md' --- src/github.com/lxc/lxd/specs/requirements.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/specs/requirements.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,28 @@ +# Requirements +## Go + +LXD requires Go 1.5 or higher. +Both the golang and gccgo compilers are supported. + +## Kernel requirements +The minimum supported kernel version is 3.13. + +LXD requires a kernel with support for: + * Namespaces (pid, net, uts, ipc, mount and user) + * Control Groups (cpuset, cpuacct, devices, memory and net\_cls) + * Seccomp + +The following optional features also require extra kernel options: + * AppArmor (including Ubuntu patch for mount mediation) + * CRIU (exact details to be found with CRIU upstream) + +As well as any other kernel feature required by the LXC version in use. + +## LXC +LXD requires LXC 1.1.2 or higher with the following build options: + * apparmor (if using LXD's apparmor support) + * cgmanager + * seccomp + +To run recent version of various distributions, including Ubuntu, LXCFS +0.8 or higher should also be installed. === added file 'src/github.com/lxc/lxd/specs/rest-api.md' --- src/github.com/lxc/lxd/specs/rest-api.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/specs/rest-api.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1505 @@ +# Introduction +All the communications between LXD and its clients happen using a +RESTful API over http which is then encapsulated over either SSL for +remote operations or a unix socket for local operations. + +Not all of the REST interface requires authentication: + + * GET to / is allowed for everyone (lists the API endpoints) + * GET to /1.0 is allowed for everyone (but result varies) + * POST to /1.0/certificates is allowed for everyone with a client certificate + * GET to /1.0/images/\* is allowed for everyone but only returns public images for unauthenticated users + +Unauthenticated endpoints are clearly identified as such below. + +# API versioning +The list of supported major API versions can be retrieved using GET /. + +The reason for a major API bump is if the API breaks backward compatibility. + +Feature additions done without breaking backward compatibility only +result in addition to api\_extensions which can be used by the client +to check if a given feature is supported by the server. + +# Return values +There are three standard return types: + * Standard return value + * Background operation + * Error + +### Standard return value +For a standard synchronous operation, the following dict is returned: + + { + "type": "sync", + "status": "Success", + "status_code": 200, + "metadata": {} # Extra resource/action specific metadata + } + +HTTP code must be 200. + +### Background operation +When a request results in a background operation, the HTTP code is set to 202 (Accepted) +and the Location HTTP header is set to the operation URL. + +The body is a dict with the following structure: + + { + "type": "async", + "status": "OK", + "status_code": 100, + "operation": "/1.0/containers/", # URL to the background operation + "metadata": {} # Operation metadata (see below) + } + +The operation metadata structure looks like: + + { + "id": "a40f5541-5e98-454f-b3b6-8a51ef5dbd3c", # UUID of the operation + "class": "websocket", # Class of the operation (task, websocket or token) + "created_at": "2015-11-17T22:32:02.226176091-05:00", # When the operation was created + "updated_at": "2015-11-17T22:32:02.226176091-05:00", # Last time the operation was updated + "status": "Running", # String version of the operation's status + "status_code": 103, # Integer version of the operation's status (use this rather than status) + "resources": { # Dictionary of resource types (container, snapshots, images) and affected resources + "containers": [ + "/1.0/containers/test" + ] + }, + "metadata": { # Metadata specific to the operation in question (in this case, exec) + "fds": { + "0": "2a4a97af81529f6608dca31f03a7b7e47acc0b8dc6514496eb25e325f9e4fa6a", + "control": "5b64c661ef313b423b5317ba9cb6410e40b705806c28255f601c0ef603f079a7" + } + }, + "may_cancel": false, # Whether the operation can be canceled (DELETE over REST) + "err": "" # The error string should the operation have failed + } + +The body is mostly provided as a user friendly way of seeing what's +going on without having to pull the target operation, all information in +the body can also be retrieved from the background operation URL. + +### Error +There are various situations in which something may immediately go +wrong, in those cases, the following return value is used: + + { + "type": "error", + "error": "Failure", + "error_code": 400, + "metadata": {} # More details about the error + } + +HTTP code must be one of of 400, 401, 403, 404, 409, 412 or 500. + +# Status codes +The LXD REST API often has to return status information, be that the +reason for an error, the current state of an operation or the state of +the various resources it exports. + +To make it simple to debug, all of those are always doubled. There is a +numeric representation of the state which is guaranteed never to change +and can be relied on by API clients. Then there is a text version meant +to make it easier for people manually using the API to figure out what's +happening. + +In most cases, those will be called status and status\_code, the former +being the user-friendly string representation and the latter the fixed +numeric value. + +The codes are always 3 digits, with the following ranges: + * 100 to 199: resource state (started, stopped, ready, ...) + * 200 to 399: positive action result + * 400 to 599: negative action result + * 600 to 999: future use + +## List of current status codes + +Code | Meaning +:--- | :------ +100 | Operation created +101 | Started +102 | Stopped +103 | Running +104 | Cancelling +105 | Pending +106 | Starting +107 | Stopping +108 | Aborting +109 | Freezing +110 | Frozen +111 | Thawed +200 | Success +400 | Failure +401 | Cancelled + + +# Recursion +To optimize queries of large lists, recursion is implemented for collections. +A "recursion" argument can be passed to a GET query against a collection. + +The default value is 0 which means that collection member URLs are +returned. Setting it to 1 will have those URLs be replaced by the object +they point to (typically a dict). + +Recursion is implemented by simply replacing any pointer to an job (URL) +by the object itself. + +# Async operations +Any operation which may take more than a second to be done must be done +in the background, returning a background operation ID to the client. + +The client will then be able to either poll for a status update or wait +for a notification using the long-poll API. + +# Notifications +A websocket based API is available for notifications, different notification +types exist to limit the traffic going to the client. + +It's recommended that the client always subscribes to the operations +notification type before triggering remote operations so that it doesn't +have to then poll for their status. + +# API structure + * / + * /1.0 + * /1.0/certificates + * /1.0/certificates/\ + * /1.0/containers + * /1.0/containers/\ + * /1.0/containers/\/exec + * /1.0/containers/\/files + * /1.0/containers/\/snapshots + * /1.0/containers/\/snapshots/\ + * /1.0/containers/\/state + * /1.0/containers/\/logs + * /1.0/containers/\/logs/\ + * /1.0/events + * /1.0/images + * /1.0/images/\ + * /1.0/images/\/export + * /1.0/images/aliases + * /1.0/images/aliases/\ + * /1.0/networks + * /1.0/networks/\ + * /1.0/operations + * /1.0/operations/\ + * /1.0/operations/\/wait + * /1.0/operations/\/websocket + * /1.0/profiles + * /1.0/profiles/\ + +# API details +## / +### GET + * Description: List of supported APIs + * Authentication: guest + * Operation: sync + * Return: list of supported API endpoint URLs + +Return value: + + [ + "/1.0" + ] + +## /1.0/ +### GET + * Description: Server configuration and environment information + * Authentication: guest, untrusted or trusted + * Operation: sync + * Return: Dict representing server state + +Return value (if trusted): + + { + "api_extensions": [], # List of API extensions added after the API was marked stable + "api_status": "development", # API implementation status (one of, development, stable or deprecated) + "api_version": "1.0", # The API version as a string + "auth": "trusted", # Authentication state, one of "guest", "untrusted" or "trusted" + "config": { # Host configuration + "core.trust_password": true, + "core.https_address": "[::]:8443" + }, + "environment": { # Various information about the host (OS, kernel, ...) + "addresses": [ + "1.2.3.4:8443", + "[1234::1234]:8443" + ], + "architectures": [ + "x86_64", + "i686" + ], + "certificate": "PEM certificate", + "driver": "lxc", + "driver_version": "1.0.6", + "kernel": "Linux", + "kernel_architecture": "x86_64", + "kernel_version": "3.16", + "server": "lxd", + "server_pid": 10224, + "server_version": "0.8.1"} + "storage": "btrfs", + "storage_version": "3.19", + }, + "public": false, # Whether the server should be treated as a public (read-only) remote by the client + } + +Return value (if guest or untrusted): + + { + "api_extensions": [], # List of API extensions added after the API was marked stable + "api_status": "development", # API implementation status (one of, development, stable or deprecated) + "api_version": "1.0", # The API version as a string + "auth": "guest", # Authentication state, one of "guest", "untrusted" or "trusted" + "public": false, # Whether the server should be treated as a public (read-only) remote by the client + } + +### PUT + * Description: Updates the server configuration or other properties + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input (replaces any existing config with the provided one): + + { + "config": { + "core.trust_password": "my-new-password", + "storage.zfs_pool_name": "lxd" + } + } + +## /1.0/certificates +### GET + * Description: list of trusted certificates + * Authentication: trusted + * Operation: sync + * Return: list of URLs for trusted certificates + +Return: + + [ + "/1.0/certificates/3ee64be3c3c7d617a7470e14f2d847081ad467c8c26e1caad841c8f67f7c7b09" + ] + +### POST + * Description: add a new trusted certificate + * Authentication: trusted or untrusted + * Operation: sync + * Return: standard return value or standard error + +Input: + + { + "type": "client", # Certificate type (keyring), currently only client + "certificate": "PEM certificate", # If provided, a valid x509 certificate. If not, the client certificate of the connection will be used + "name": "foo" # An optional name for the certificate. If nothing is provided, the host in the TLS header for the request is used. + "password": "server-trust-password" # The trust password for that server (only required if untrusted) + } + +## /1.0/certificates/\ +### GET + * Description: trusted certificate information + * Authentication: trusted + * Operation: sync + * Return: dict representing a trusted certificate + +Output: + + { + "type": "client", + "certificate": "PEM certificate" + "fingerprint": "SHA256 Hash of the raw certificate" + } + +### DELETE + * Description: Remove a trusted certificate + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input (none at present): + + { + } + +HTTP code for this should be 202 (Accepted). + +## /1.0/containers +### GET + * Description: List of containers + * Authentication: trusted + * Operation: sync + * Return: list of URLs for containers this server publishes + +Return value: + + [ + "/1.0/containers/blah", + "/1.0/containers/blah1" + ] + +### POST + * Description: Create a new container + * Authentication: trusted + * Operation: async + * Return: background operation or standard error + +Input (container based on a local image with the "ubuntu/devel" alias): + + { + "name": "my-new-container", # 64 chars max, ASCII, no slash, no colon and no comma + "architecture": "x86_64", + "profiles": ["default"], # List of profiles + "ephemeral": true, # Whether to destroy the container on shutdown + "config": {"limits.cpu": "2"}, # Config override. + "source": {"type": "image", # Can be: "image", "migration", "copy" or "none" + "alias": "ubuntu/devel"}, # Name of the alias + } + +Input (container based on a local image identified by its fingerprint): + + { + "name": "my-new-container", # 64 chars max, ASCII, no slash, no colon and no comma + "architecture": "x86_64", + "profiles": ["default"], # List of profiles + "ephemeral": true, # Whether to destroy the container on shutdown + "config": {"limits.cpu": "2"}, # Config override. + "source": {"type": "image", # Can be: "image", "migration", "copy" or "none" + "fingerprint": "SHA-256"}, # Fingerprint + } + +Input (container based on most recent match based on image properties): + + { + "name": "my-new-container", # 64 chars max, ASCII, no slash, no colon and no comma + "architecture": "x86_64", + "profiles": ["default"], # List of profiles + "ephemeral": true, # Whether to destroy the container on shutdown + "config": {"limits.cpu": "2"}, # Config override. + "source": {"type": "image", # Can be: "image", "migration", "copy" or "none" + "properties": { # Properties + "os": "ubuntu", + "release": "14.04", + "architecture": "x86_64" + }}, + } + +Input (container without a pre-populated rootfs, useful when attaching to an existing one): + + { + "name": "my-new-container", # 64 chars max, ASCII, no slash, no colon and no comma + "architecture": "x86_64", + "profiles": ["default"], # List of profiles + "ephemeral": true, # Whether to destroy the container on shutdown + "config": {"limits.cpu": "2"}, # Config override. + "source": {"type": "none"}, # Can be: "image", "migration", "copy" or "none" + } + +Input (using a public remote image): + + { + "name": "my-new-container", # 64 chars max, ASCII, no slash, no colon and no comma + "architecture": "x86_64", + "profiles": ["default"], # List of profiles + "ephemeral": true, # Whether to destroy the container on shutdown + "config": {"limits.cpu": "2"}, # Config override. + "source": {"type": "image", # Can be: "image", "migration", "copy" or "none" + "mode": "pull", # One of "local" (default) or "pull" + "server": "https://10.0.2.3:8443", # Remote server (pull mode only) + "protocol": "lxd", # Protocol (one of lxd or simplestreams, defaults to lxd) + "certificate": "PEM certificate", # Optional PEM certificate. If not mentioned, system CA is used. + "alias": "ubuntu/devel"}, # Name of the alias + } + + +Input (using a private remote image after having obtained a secret for that image): + + { + "name": "my-new-container", # 64 chars max, ASCII, no slash, no colon and no comma + "architecture": "x86_64", + "profiles": ["default"], # List of profiles + "ephemeral": true, # Whether to destroy the container on shutdown + "config": {"limits.cpu": "2"}, # Config override. + "source": {"type": "image", # Can be: "image", "migration", "copy" or "none" + "mode": "pull", # One of "local" (default) or "pull" + "server": "https://10.0.2.3:8443", # Remote server (pull mode only) + "secret": "my-secret-string", # Secret to use to retrieve the image (pull mode only) + "certificate": "PEM certificate", # Optional PEM certificate. If not mentioned, system CA is used. + "alias": "ubuntu/devel"}, # Name of the alias + } + +Input (using a remote container, sent over the migration websocket): + + { + "name": "my-new-container", # 64 chars max, ASCII, no slash, no colon and no comma + "architecture": "x86_64", + "profiles": ["default"], # List of profiles + "ephemeral": true, # Whether to destroy the container on shutdown + "config": {"limits.cpu": "2"}, # Config override. + "source": {"type": "migration", # Can be: "image", "migration", "copy" or "none" + "mode": "pull", # Only "pull" is supported for now + "operation": "https://10.0.2.3:8443/1.0/operations/", # Full URL to the remote operation (pull mode only) + "certificate": "PEM certificate", # Optional PEM certificate. If not mentioned, system CA is used. + "base-image": "", # Optional, the base image the container was created from + "secrets": {"control": "my-secret-string", # Secrets to use when talking to the migration source + "criu": "my-other-secret", + "fs": "my third secret"}, + } + +Input (using a local container): + + { + "name": "my-new-container", # 64 chars max, ASCII, no slash, no colon and no comma + "architecture": "x86_64", + "profiles": ["default"], # List of profiles + "ephemeral": true, # Whether to destroy the container on shutdown + "config": {"limits.cpu": "2"}, # Config override. + "source": {"type": "copy", # Can be: "image", "migration", "copy" or "none" + "source": "my-old-container"} # Name of the source container + } + + +## /1.0/containers/\ +### GET + * Description: Container information + * Authentication: trusted + * Operation: sync + * Return: dict of the container configuration and current state. + +Output: + + { + "architecture": "x86_64", + "config": { + "limits.cpu": "3", + "volatile.base_image": "97d97a3d1d053840ca19c86cdd0596cf1be060c5157d31407f2a4f9f350c78cc", + "volatile.eth0.hwaddr": "00:16:3e:1c:94:38" + }, + "created_at": "2016-02-16T01:05:05Z", + "devices": { + "rootfs": { + "path": "/", + "type": "disk" + } + }, + "ephemeral": false, + "expanded_config": { # the result of expanding profiles and adding the container's local config + "limits.cpu": "3", + "volatile.base_image": "97d97a3d1d053840ca19c86cdd0596cf1be060c5157d31407f2a4f9f350c78cc", + "volatile.eth0.hwaddr": "00:16:3e:1c:94:38" + }, + "expanded_devices": { # the result of expanding profiles and adding the container's local devices + "eth0": { + "name": "eth0", + "nictype": "bridged", + "parent": "lxcbr0", + "type": "nic" + }, + "root": { + "path": "/", + "type": "disk" + } + }, + "name": "my-container", + "profiles": [ + "default" + ], + "stateful": false, # If true, indicates that the container has some stored state that can be restored on startup + "status": "Running", + "status_code": 103 + } + + +### PUT + * Description: update container configuration or restore snapshot + * Authentication: trusted + * Operation: async + * Return: background operation or standard error + +Input (update container configuration): + + { + "architecture": "x86_64", + "config": { + "limits.cpu": "4", + "volatile.base_image": "97d97a3d1d053840ca19c86cdd0596cf1be060c5157d31407f2a4f9f350c78cc", + "volatile.eth0.hwaddr": "00:16:3e:1c:94:38" + }, + "devices": { + "rootfs": { + "path": "/", + "type": "disk" + } + }, + "ephemeral": true, + "profiles": [ + "default" + ] + } + + +Takes the same structure as that returned by GET but doesn't allow name +changes (see POST below) or changes to the status sub-dict (since that's +read-only). + +Input (restore snapshot): + + { + "restore": "snapshot-name" + } + +### POST + * Description: used to rename/migrate the container + * Authentication: trusted + * Operation: async + * Return: background operation or standard error + +Renaming to an existing name must return the 409 (Conflict) HTTP code. + +Input (simple rename): + + { + "name": "new-name" + } + +Input (migration across lxd instances): + { + "migration": true + } + +The migration does not actually start until someone (i.e. another lxd instance) +connects to all the websockets and begins negotiation with the source. + +Output in metadata section (for migration): + + { + "control": "secret1", # Migration control socket + "criu": "secret2", # State transfer socket (only if live migrating) + "fs": "secret3" # Filesystem transfer socket + } + +These are the secrets that should be passed to the create call. + +### DELETE + * Description: remove the container + * Authentication: trusted + * Operation: async + * Return: background operation or standard error + +Input (none at present): + + { + } + +HTTP code for this should be 202 (Accepted). + +## /1.0/containers/\/state +### GET + * Description: current state + * Authentication: trusted + * Operation: sync + * Return: dict representing current state + + { + "type": "sync", + "status": "Success", + "status_code": 200, + "metadata": { + "status": "Running", + "status_code": 103, + "disk": { + "root": { + "usage": 422330368 + } + }, + "memory": { + "usage": 51126272, + "usage_peak": 70246400, + "swap_usage": 0, + "swap_usage_peak": 0 + }, + "network": { + "eth0": { + "addresses": [ + { + "family": "inet", + "address": "10.0.3.27", + "netmask": "24", + "scope": "global" + }, + { + "family": "inet6", + "address": "fe80::216:3eff:feec:65a8", + "netmask": "64", + "scope": "link" + } + ], + "counters": { + "bytes_received": 33942, + "bytes_sent": 30810, + "packets_received": 402, + "packets_sent": 178 + }, + "hwaddr": "00:16:3e:ec:65:a8", + "host_name": "vethBWTSU5", + "mtu": 1500, + "state": "up", + "type": "broadcast" + }, + "lo": { + "addresses": [ + { + "family": "inet", + "address": "127.0.0.1", + "netmask": "8", + "scope": "local" + }, + { + "family": "inet6", + "address": "::1", + "netmask": "128", + "scope": "local" + } + ], + "counters": { + "bytes_received": 86816, + "bytes_sent": 86816, + "packets_received": 1226, + "packets_sent": 1226 + }, + "hwaddr": "", + "host_name": "", + "mtu": 65536, + "state": "up", + "type": "loopback" + }, + "lxcbr0": { + "addresses": [ + { + "family": "inet", + "address": "10.0.3.1", + "netmask": "24", + "scope": "global" + }, + { + "family": "inet6", + "address": "fe80::68d4:87ff:fe40:7769", + "netmask": "64", + "scope": "link" + } + ], + "counters": { + "bytes_received": 0, + "bytes_sent": 570, + "packets_received": 0, + "packets_sent": 7 + }, + "hwaddr": "6a:d4:87:40:77:69", + "host_name": "", + "mtu": 1500, + "state": "up", + "type": "broadcast" + }, + "zt0": { + "addresses": [ + { + "family": "inet", + "address": "29.17.181.59", + "netmask": "7", + "scope": "global" + }, + { + "family": "inet6", + "address": "fd80:56c2:e21c:0:199:9379:e711:b3e1", + "netmask": "88", + "scope": "global" + }, + { + "family": "inet6", + "address": "fe80::79:e7ff:fe0d:5123", + "netmask": "64", + "scope": "link" + } + ], + "counters": { + "bytes_received": 0, + "bytes_sent": 806, + "packets_received": 0, + "packets_sent": 9 + }, + "hwaddr": "02:79:e7:0d:51:23", + "host_name": "", + "mtu": 2800, + "state": "up", + "type": "broadcast" + } + }, + "pid": 13663, + "processes": 32 + } + } + + +### PUT + * Description: change the container state + * Authentication: trusted + * Operation: async + * Return: background operation or standard error + +Input: + + { + "action": "stop", # State change action (stop, start, restart, freeze or unfreeze) + "timeout": 30, # A timeout after which the state change is considered as failed + "force": true, # Force the state change (currently only valid for stop and restart where it means killing the container) + "stateful": true # Whether to store or restore runtime state before stopping or startiong (only valid for stop and start, defaults to false) + } + +## /1.0/containers/\/files +### GET (?path=/path/inside/the/container) + * Description: download a file from the container + * Authentication: trusted + * Operation: sync + * Return: Raw file or standard error + +The following headers will be set (on top of standard size and mimetype headers): + * X-LXD-uid: 0 + * X-LXD-gid: 0 + * X-LXD-mode: 0700 + +This is designed to be easily usable from the command line or even a web +browser. + +### POST (?path=/path/inside/the/container) + * Description: upload a file to the container + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input: + * Standard http file upload + +The following headers may be set by the client: + * X-LXD-uid: 0 + * X-LXD-gid: 0 + * X-LXD-mode: 0700 + +This is designed to be easily usable from the command line or even a web +browser. + +## /1.0/containers/\/snapshots +### GET + * Description: List of snapshots + * Authentication: trusted + * Operation: sync + * Return: list of URLs for snapshots for this container + +Return value: + + [ + "/1.0/containers/blah/snapshots/snap0" + ] + +### POST + * Description: create a new snapshot + * Authentication: trusted + * Operation: async + * Return: background operation or standard error + +Input: + + { + "name": "my-snapshot", # Name of the snapshot + "stateful": true # Whether to include state too + } + +## /1.0/containers/\/snapshots/\ +### GET + * Description: Snapshot information + * Authentication: trusted + * Operation: sync + * Return: dict representing the snapshot + +Return: + + { + "created_at": "2016-02-16T01:05:05Z", + "name": "my-snapshot", + "stateful": true + } + +### POST + * Description: used to rename/migrate the snapshot + * Authentication: trusted + * Operation: async + * Return: background operation or standard error + +Input (rename the snapshot): + + { + "name": "new-name" + } + +Input (setup the migration source): + + { + "migration": true, + } + +Return (with migration=true): + + { + "control": "secret1", # Migration control socket + "fs": "secret3" # Filesystem transfer socket + } + +Renaming to an existing name must return the 409 (Conflict) HTTP code. + +### DELETE + * Description: remove the snapshot + * Authentication: trusted + * Operation: async + * Return: background operation or standard error + +Input (none at present): + + { + } + +HTTP code for this should be 202 (Accepted). + +## /1.0/containers/\/exec +### POST + * Description: run a remote command + * Authentication: trusted + * Operation: async + * Return: background operation + optional websocket information or standard error + +Input (run bash): + + { + "command": ["/bin/bash"], # Command and arguments + "environment": {}, # Optional extra environment variables to set + "wait-for-websocket": false, # Whether to wait for a connection before starting the process + "interactive": true # Whether to allocate a pts device instead of PIPEs + } + +`wait-for-websocket` indicates whether the operation should block and wait for +a websocket connection to start (so that users can pass stdin and read +stdout), or simply run to completion with /dev/null as stdin and stdout. + +If interactive is set to true, a single websocket is returned and is mapped to a +pts device for stdin, stdout and stderr of the execed process. + +If interactive is set to false (default), three pipes will be setup, one +for each of stdin, stdout and stderr. + +Depending on the state of the interactive flag, one or three different +websocket/secret pairs will be returned, which are valid for connecting to this +operations /websocket endpoint. + +Return (with wait-for-websocket=true and interactive=false): + + { + "fds": { + "0": "f5b6c760c0aa37a6430dd2a00c456430282d89f6e1661a077a926ed1bf3d1c21", + "1": "464dcf9f8fdce29d0d6478284523a9f26f4a31ae365d94cd38bac41558b797cf", + "2": "25b70415b686360e3b03131e33d6d94ee85a7f19b0f8d141d6dca5a1fc7b00eb", + "control": "20c479d9532ab6d6c3060f6cdca07c1f177647c9d96f0c143ab61874160bd8a5" + } + } + +Return (with wait-for-websocket=true and interactive=true): + + { + "fds": { + "0": "f5b6c760c0aa37a6430dd2a00c456430282d89f6e1661a077a926ed1bf3d1c21", + "control": "20c479d9532ab6d6c3060f6cdca07c1f177647c9d96f0c143ab61874160bd8a5" + } + } + + +When the exec command finishes, its exit status is available from the +operation's metadata: + + { + "return": 0 + } + +## /1.0/containers/\/logs +### GET +* Description: Returns a list of the log files available for this container. + Note that this works on containers that have been deleted (or were never + created) to enable people to get logs for failed creations. +* Authentication: trusted +* Operation: Sync +* Return: a list of the available log files + +Return: + + [ + "/1.0/containers/blah/logs/forkstart.log", + "/1.0/containers/blah/logs/lxc.conf", + "/1.0/containers/blah/logs/lxc.log" + ] + +## /1.0/containers/\/logs/\ +### GET +* Description: returns the contents of a particular log file. +* Authentication: trusted +* Operation: N/A +* Return: the contents of the log file + +### DELETE +* Description: delete a particular log file. +* Authentication: trusted +* Operation: Sync +* Return: empty response or standard error + +## /1.0/events +This URL isn't a real REST API endpoint, instead doing a GET query on it +will upgrade the connection to a websocket on which notifications will +be sent. + +### GET (?type=operation,logging) + * Description: websocket upgrade + * Authentication: trusted + * Operation: sync + * Return: none (never ending flow of events) + +Supported arguments are: + * type: comma separated list of notifications to subscribe to (defaults to all) + +The notification types are: + * operation (notification about creation, updates and termination of all background operations) + * logging (every log entry from the server) + +This never returns. Each notification is sent as a separate JSON dict: + + { + "timestamp": "2015-06-09T19:07:24.379615253-06:00", # Current timestamp + "type": "operation", # Notification type + "metadata": {} # Extra resource or type specific metadata + } + + { + "timestamp": "2016-02-17T11:44:28.572721913-05:00", + "type": "logging", + "metadata": { + "context": { + "ip": "@", + "method": "GET" + "url": "/1.0/containers/xen/snapshots", + }, + "level": "info", + "message": "handling" + } + } + + +## /1.0/images +### GET + * Description: list of images (public or private) + * Authentication: guest or trusted + * Operation: sync + * Return: list of URLs for images this server publishes + +Return: + + [ + "/1.0/images/54c8caac1f61901ed86c68f24af5f5d3672bdc62c71d04f06df3a59e95684473", + "/1.0/images/97d97a3d1d053840ca19c86cdd0596cf1be060c5157d31407f2a4f9f350c78cc", + "/1.0/images/a49d26ce5808075f5175bf31f5cb90561f5023dcd408da8ac5e834096d46b2d8", + "/1.0/images/c9b6e738fae75286d52f497415463a8ecc61bbcb046536f220d797b0e500a41f" + ] + +### POST + * Description: create and publish a new image + * Authentication: trusted + * Operation: async + * Return: background operation or standard error + +Input (one of): + * Standard http file upload + * Source image dictionary (transfers a remote image) + * Source container dictionary (makes an image out of a local container) + * Remote image URL dictionary (downloads a remote image) + +In the http file upload case, The following headers may be set by the client: + * X-LXD-fingerprint: SHA-256 (if set, uploaded file must match) + * X-LXD-filename: FILENAME (used for export) + * X-LXD-public: true/false (defaults to false) + * X-LXD-properties: URL-encoded key value pairs without duplicate keys (optional properties) + +In the source image case, the following dict must be used: + + { + "filename": filename, # Used for export (optional) + "public": true, # Whether the image can be downloaded by untrusted users (defaults to false + "properties": { # Image properties (optional, applied on top of source properties) + "os": "Ubuntu" + }, + "source": { + "type": "image", + "mode": "pull", # Only pull is supported for now + "server": "https://10.0.2.3:8443", # Remote server (pull mode only) + "protocol": "lxd", # Protocol (one of lxd or simplestreams, defaults to lxd) + "secret": "my-secret-string", # Secret (pull mode only, private images only) + "certificate": "PEM certificate", # Optional PEM certificate. If not mentioned, system CA is used. + "fingerprint": "SHA256", # Fingerprint of the image (must be set if alias isn't) + "alias": "ubuntu/devel", # Name of the alias (must be set if fingerprint isn't) + } + } + +In the source container case, the following dict must be used: + + { + "filename": filename, # Used for export (optional) + "public": true, # Whether the image can be downloaded by untrusted users (defaults to false) + "properties": { # Image properties (optional) + "os": "Ubuntu" + }, + "source": { + "type": "container", # One of "container" or "snapshot" + "name": "abc" + } + } + +In the remote image URL case, the following dict must be used: + + { + "filename": filename, # Used for export (optional) + "public": true, # Whether the image can be downloaded by untrusted users (defaults to false) + "properties": { # Image properties (optional) + "os": "Ubuntu" + }, + "source": { + "type": "url", + "url": "https://www.some-server.com/image" # URL for the image + } + } + + +After the input is received by LXD, a background operation is started +which will add the image to the store and possibly do some backend +filesystem-specific optimizations. + +## /1.0/images/\ +### GET (optional ?secret=SECRET) + * Description: Image description and metadata + * Authentication: guest or trusted + * Operation: sync + * Return: dict representing an image properties + +Output: + + { + "aliases": [ + { + "name": "trusty", + "description": "", + } + ], + "architecture": "x86_64", + "auto_update": true, + "cached": false, + "fingerprint": "54c8caac1f61901ed86c68f24af5f5d3672bdc62c71d04f06df3a59e95684473", + "filename": "ubuntu-trusty-14.04-amd64-server-20160201.tar.xz", + "properties": { + "architecture": "x86_64", + "description": "Ubuntu 14.04 LTS server (20160201)", + "os": "ubuntu", + "release": "trusty" + }, + "update_source": { + "server": "https://10.1.2.4:8443", + "protocol": "lxd", + "certificate": "PEM certificate", + "alias": "ubuntu/trusty/amd64" + }, + "public": false, + "size": 123792592, + "created_at": "2016-02-01T21:07:41Z", + "expires_at": "1970-01-01T00:00:00Z", + "last_used_at": "1970-01-01T00:00:00Z", + "uploaded_at": "2016-02-16T00:44:47Z" + } + +### DELETE + * Description: Remove an image + * Authentication: trusted + * Operation: async + * Return: background operaton or standard error + +Input (none at present): + + { + } + +HTTP code for this should be 202 (Accepted). + +### PUT + * Description: Updates the image properties + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input: + + { + "auto_update": true, + "properties": { + "architecture": "x86_64", + "description": "Ubuntu 14.04 LTS server (20160201)", + "os": "ubuntu", + "release": "trusty" + }, + "public": true, + } + +## /1.0/images/\/export +### GET (optional ?secret=SECRET) + * Description: Download the image tarball + * Authentication: guest or trusted + * Operation: sync + * Return: Raw file or standard error + +The secret string is required when an untrusted LXD is spawning a new +container from a private image stored on a different LXD. + +Rather than require a trust relationship between the two LXDs, the +client will POST to /1.0/images/\/export to get a secret +token which it'll then pass to the target LXD. That target LXD will then +GET the image as a guest, passing the secret token. + + +## /1.0/images/\/secret +### POST + * Description: Generate a random token and tell LXD to expect it be used by a guest + * Authentication: guest or trusted + * Operation: async + * Return: background operation or standard error + +Input: + + { + } + +Return: + + { + "secret": "52e9ec5885562aa24d05d7b4846ebb8b5f1f7bf5cd6e285639b569d9eaf54c9b" + } + +Standard backround operation with "secret" set to the generated secret +string in metadata. + +The secret is automatically invalidated 5s after an image URL using it +has been accessed. This allows to both retried the image information and +then hit /export with the same secret. + +## /1.0/images/aliases +### GET + * Description: list of aliases (public or private based on image visibility) + * Authentication: guest or trusted + * Operation: sync + * Return: list of URLs for aliases this server knows about + +Return: + + [ + "/1.0/images/aliases/sl6", + "/1.0/images/aliases/trusty", + "/1.0/images/aliases/xenial" + ] + +### POST + * Description: create a new alias + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input: + + { + "description": "The alias description", + "target": "SHA-256", + "name": "alias-name" + } + +## /1.0/images/aliases/\ +### GET + * Description: Alias description and target + * Authentication: guest or trusted + * Operation: sync + * Return: dict representing an alias description and target + +Output: + { + "name": "test", + "description": "my description", + "target": "c9b6e738fae75286d52f497415463a8ecc61bbcb046536f220d797b0e500a41f" + } + +### PUT + * Description: Updates the alias target or description + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input: + + { + "description": "New description", + "target": "54c8caac1f61901ed86c68f24af5f5d3672bdc62c71d04f06df3a59e95684473" + } + +### POST + * Description: rename an alias + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input: + + { + "name": "new-name" + } + +Renaming to an existing name must return the 409 (Conflict) HTTP code. + +### DELETE + * Description: Remove an alias + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input (none at present): + + { + } + +## /1.0/networks +### GET + * Description: list of networks + * Authentication: trusted + * Operation: sync + * Return: list of URLs for networks that are current defined on the host + + [ + "/1.0/networks/eth0",, + "/1.0/networks/lxcbr0" + ] + +## /1.0/networks/\ +### GET + * Description: information about a network + * Authentication: trusted + * Operation: sync + * Return: dict representing a network + + { + "name": "lxcbr0", + "type": "bridge", + "used_by": [ + "/1.0/containers/blah" + ] + } + +## /1.0/operations +### GET + * Description: list of operations + * Authentication: trusted + * Operation: sync + * Return: list of URLs for operations that are currently going on/queued + + [ + "/1.0/operations/c0fc0d0d-a997-462b-842b-f8bd0df82507", + "/1.0/operations/092a8755-fd90-4ce4-bf91-9f87d03fd5bc" + ] + +## /1.0/operations/\ +### GET + * Description: background operation + * Authentication: trusted + * Operation: sync + * Return: dict representing a background operation + +Return: + + { + "id": "b8d84888-1dc2-44fd-b386-7f679e171ba5", + "class": "token", # One of "task" (background task), "websocket" (set of websockets and crendentials) or "token" (temporary credentials) + "created_at": "2016-02-17T16:59:27.237628195-05:00", # Creation timestamp + "updated_at": "2016-02-17T16:59:27.237628195-05:00", # Last update timestamp + "status": "Running", + "status_code": 103, + "resources": { # List of affected resources + "images": [ + "/1.0/images/54c8caac1f61901ed86c68f24af5f5d3672bdc62c71d04f06df3a59e95684473" + ] + }, + "metadata": { # Extra information about the operation (action, target, ...) + "secret": "c9209bee6df99315be1660dd215acde4aec89b8e5336039712fc11008d918b0d" + }, + "may_cancel": true, # Whether it's possible to cancel the operation (DELETE) + "err": "" + } + +### DELETE + * Description: cancel an operation. Calling this will change the state to "cancelling" rather than actually removing the entry. + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input (none at present): + + { + } + +HTTP code for this should be 202 (Accepted). + +## /1.0/operations/\/wait +### GET (optional ?timeout=30) + * Description: Wait for an operation to finish + * Authentication: trusted + * Operation: sync + * Return: dict of the operation after it's reached its final state + +Input (wait indefinitely for a final state): no argument + +Input (similar but times out after 30s): ?timeout=30 + +## /1.0/operations/\/websocket +### GET (?secret=SECRET) + * Description: This connection is upgraded into a websocket connection + speaking the protocol defined by the operation type. For example, in the + case of an exec operation, the websocket is the bidirectional pipe for + stdin/stdout/stderr to flow to and from the process inside the container. + In the case of migration, it will be the primary interface over which the + migration information is communicated. The secret here is the one that was + provided when the operation was created. Guests are allowed to connect + provided they have the right secret. + * Authentication: guest or trusted + * Operation: sync + * Return: websocket stream or standard error + +## /1.0/profiles +### GET + * Description: List of configuration profiles + * Authentication: trusted + * Operation: sync + * Return: list of URLs to defined profiles + +Return: + + [ + "/1.0/profiles/default" + ] + + +### POST + * Description: define a new profile + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input: + + { + "name": "my-profilename", + "description": "Some description string", + "config": { + "limits.memory": "2GB" + }, + "devices": { + "kvm": { + "type": "unix-char", + "path": "/dev/kvm" + } + } + } + +## /1.0/profiles/\ +### GET + * Description: profile configuration + * Authentication: trusted + * Operation: sync + * Return: dict representing the profile content + +Output: + + { + "name": "test", + "description": "Some description string", + "config": { + "limits.memory": "2GB" + }, + "devices": { + "kvm": { + "path": "/dev/kvm", + "type": "unix-char" + } + } + } + +### PUT + * Description: update the profile + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input: + + { + "config": { + "limits.memory": "4GB" + }, + "description": "Some description string", + "devices": { + "kvm": { + "path": "/dev/kvm", + "type": "unix-char" + } + } + } + +Same dict as used for initial creation and coming from GET. The name +property can't be changed (see POST for that). + + +### POST + * Description: rename a profile + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input (rename a profile): + + { + "name": "new-name" + } + + +HTTP return value must be 204 (No content) and Location must point to +the renamed resource. + +Renaming to an existing name must return the 409 (Conflict) HTTP code. + + +### DELETE + * Description: remove a profile + * Authentication: trusted + * Operation: sync + * Return: standard return value or standard error + +Input (none at present): + + { + } + +HTTP code for this should be 202 (Accepted). === added file 'src/github.com/lxc/lxd/specs/storage-backends.md' --- src/github.com/lxc/lxd/specs/storage-backends.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/specs/storage-backends.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,66 @@ +# Storage Backends and supported functions +## Feature comparison + +LXD supports using plain dirs, Btrfs, LVM, and ZFS for storage of images and containers. +Where possible, LXD tries to use the advanced features of each system to optimize operations. + +Feature | Directory | Btrfs | LVM | ZFS +:--- | :--- | :--- | :--- | :--- +Optimized image storage | no | yes | yes | yes +Optimized container creation | no | yes | yes | yes +Optimized snapshot creation | no | yes | yes | yes +Optimized image transfer | no | yes | no | yes +Optimized container transfer | no | yes | no | yes +Copy on write | no | yes | yes | yes +Block based | no | no | yes | no +Instant cloning | no | yes | yes | yes +Nesting support | yes | yes | no | no +Restore from older snapshots (not latest) | yes | yes | yes | no +Storage quotas | no | yes | no | yes + +## Mixed storage +When switching storage backend after some containers or images already exist, LXD will create any new container +using the new backend and converting older images to the new backend as needed. + +## Non-optimized container transfer +When the filesystem on the source and target hosts differs or when there is no faster way, +rsync is used to transfer the container content across. + +## Notes +### Directory + + - The directory backend is the fallback backend when nothing else is configured or detected. + - While this backend is fully functional, it's also much slower than + all the others due to it having to unpack images or do instant copies of + containers, snapshots and images. + +### Btrfs + + - The btrfs backend is automatically used if /var/lib/lxd is on a btrfs filesystem. + - Uses a subvolume per container, image and snapshot, creating btrfs snapshots when creating a new object. + +### LVM + + - LXD uses LVM with thinpool support to offer fast, scalable container and image storage. + - A LVM VG must be created and then storage.lvm\_vg\_name set to point to it. + - If a thinpool doesn't already exist, one will be created, the name of the thinpool can be set with storage.lvm\_thinpool\_name . + - Uses LVs for images, then LV snapshots for containers and container snapshots. + - The filesystem used for the LVs is ext4. + - LVs are created with a default size of 100GiB. + +### ZFS + + - LXD can use any zpool or part of a zpool. storage.zfs\_pool\_name must be set to the path to be used. + - ZFS doesn't have (and shouldn't be) mounted on /var/lib/lxd + - Uses ZFS filesystems for images, then snapshots and clones to create containers and snapshots. + - Due to the way copy-on-write works in ZFS, parent filesystems can't + be removed until all children are gone. As a result, LXD will + automatically rename any removed but still referenced object to a random + deleted/ path and keep it until such time the references are gone and it + can safely be removed. + - ZFS as it is today doesn't support delegating part of a pool to a + container user. Upstream is actively working on this. + - ZFS doesn't support restoring from snapshots other than the latest + one. You can however create new containers from older snapshots which + makes it possible to confirm the snapshots is indeed what you want to + restore before you remove the newer snapshots. === added file 'src/github.com/lxc/lxd/specs/userns-idmap.md' --- src/github.com/lxc/lxd/specs/userns-idmap.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/specs/userns-idmap.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,99 @@ +# Introduction + +LXD runs safe containers. This is achieved mostly through the use of +user namespaces which make it possible to run containers unprivileged, +greatly limiting the attack surface. + +User namespaces work by mapping a set of uids and gids on the host to a +set of uids and gids in the container. + + +For example, we can define that the host uids and gids from 100000 to +165535 may be used by LXD and should be mapped to uid/gid 0 through +65535 in the container. + +As a result a process running as uid 0 in the container will actually be +running as uid 100000. + +Allocations should always be of at least 65536 uids and gids to cover +the POSIX range including root (0) and nobody (65534). + + +To simplify things, at this point, we will only deal with identical +allocations for uids and gids and only support a single contiguous range +per container. + +# Kernel support +User namespaces require a kernel >= 3.12, LXD will start even on older +kernels but will refuse to start containers. + +# Allowed ranges +On most hosts, LXD will check /etc/subuid and /etc/subgid for +allocations for the "lxd" user and on first start, set the default +profile to use the first 65536 uids and gids from that range. + +If the range is shorter than 65536 (which includes no range at all), +then LXD will fail to create or start any container until this is corrected. + +If some but not all of /etc/subuid, /etc/subgid, newuidmap (path lookup) +and newgidmap (path lookup) can't be found on the system, LXD will fail +the startup of any container until this is corrected as this shows a +broken shadow setup. + +If none of those 4 files can be found, then LXD will assume it's running +on a host using an old version of shadow. In this mode, LXD will assume +it can use any uids and gids above 65535 and will take the first 65536 +as its default map. + +# Multiple allocations +There are a few reasons why you wouldn't want a single allocation for +all of your containers: + * ulimits (a container couldn't exhaust a ulimit and affect all others) + * in the unlikely event where someone breaks out of a container, they + can then enter any of the others running with the same map. + +And there's at least one reason why you'd want a shared allocation: + * shared filesystems + +As a result, the plan is for the default profile to come with a 65536 +uid/gid allocation which will be used by all container running with the +default profile. + +If you need to completely separate users, you can then create one +profile per user and assign it a different allocation. + +# Changing the allocation of a used profile +When changing the allocation of a profile which is in use, LXD will +check whether the new allocation is smaller or larger than the previous +one. + +If it's smaller and containers currently use the profile, the change +will be simply be rejected. +You'll have to move all the existing containers to different profiles +before you can do the change. + +If the new allocation is of the same size or larger than the current one +and some of the containers are currently running, the change will be +rejected and you'll have to first stop all the affected containers +before re-trying. + +Then if no containers are affected, the change will simply be committed, +otherwise, a uid/gid remap of all affected containers will occur, then +the change will be saved. + +# Varying ranges between hosts +When migrating a container between host, the size of its current uid/gid +allocation with its current profile will be checked against that it'll +get on the remote server. + +If the allocation is smaller, the transfer will be aborted and the user +will need to update the remote profile or switch the container to a +different profile before attempting the transfer again. + +In the other cases, the source host uid and gid range will be compared +to that of the destination host. If it happens to be identical, then the +filesystem will be transferred as-is. + +Otherwise, the filesystem will be transferred and a uid/gid remap +operation will then happen to convert all the uids and gids to the right +range. === added directory 'src/github.com/lxc/lxd/test' === added file 'src/github.com/lxc/lxd/test/README.md' --- src/github.com/lxc/lxd/test/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +# How to run + +To run all tests, including the Go tests, run from repository root: + + sudo -E make check + +To run only the integration tests, run from the test directory: + + sudo -E ./main.sh + +# Environment variables + +Name | Default | Description +:-- | :--- | :---------- +LXD\_BACKEND | dir | What backend to test against (btrfs, dir, lvm or zfs) +LXD\_CONCURRENT | 0 | Run concurency tests, very CPU intensive +LXD\_DEBUG | 0 | Run lxd, lxc and the shell in debug mode (very verbose) +LXD\_INSPECT | 0 | Don't teardown the test environment on failure +LXD\_LOGS | "" | Path to a directory to copy all the LXD logs to +LXD\_OFFLINE | 0 | Skip anything that requires network access +LXD\_TEST\_IMAGE | "" (busybox test image) | Path to an image tarball to use instead of the default busybox image +LXD\_TMPFS | 0 | Sets up a tmpfs for the whole testsuite to run on (fast but needs memory) === added directory 'src/github.com/lxc/lxd/test/backends' === added file 'src/github.com/lxc/lxd/test/backends/btrfs.sh' --- src/github.com/lxc/lxd/test/backends/btrfs.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/backends/btrfs.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,34 @@ +#!/bin/sh + +btrfs_setup() { + local LXD_DIR + LXD_DIR=$1 + + echo "==> Setting up btrfs backend in ${LXD_DIR}" + + if ! which btrfs >/dev/null 2>&1; then + echo "Couldn't find the btrfs binary"; false + fi + + truncate -s 100G "${TEST_DIR}/$(basename "${LXD_DIR}").btrfs" + mkfs.btrfs "${TEST_DIR}/$(basename "${LXD_DIR}").btrfs" + + mount -o loop "${TEST_DIR}/$(basename "${LXD_DIR}").btrfs" "${LXD_DIR}" +} + +btrfs_configure() { + local LXD_DIR + LXD_DIR=$1 + + echo "==> Configuring btrfs backend in ${LXD_DIR}" +} + +btrfs_teardown() { + local LXD_DIR + LXD_DIR=$1 + + echo "==> Tearing down btrfs backend in ${LXD_DIR}" + + umount -l "${LXD_DIR}" + rm -f "${TEST_DIR}/$(basename "${LXD_DIR}").btrfs" +} === added file 'src/github.com/lxc/lxd/test/backends/dir.sh' --- src/github.com/lxc/lxd/test/backends/dir.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/backends/dir.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,27 @@ +#!/bin/sh +# Nothing need be done for the dir backed, but we still need some functions. +# This file can also serve as a skel file for what needs to be done to +# implement a new backend. + +# Any necessary backend-specific setup +dir_setup() { + local LXD_DIR + LXD_DIR=$1 + + echo "==> Setting up directory backend in ${LXD_DIR}" +} + +# Do the API voodoo necessary to configure LXD to use this backend +dir_configure() { + local LXD_DIR + LXD_DIR=$1 + + echo "==> Configuring directory backend in ${LXD_DIR}" +} + +dir_teardown() { + local LXD_DIR + LXD_DIR=$1 + + echo "==> Tearing down directory backend in ${LXD_DIR}" +} === added file 'src/github.com/lxc/lxd/test/backends/lvm.sh' --- src/github.com/lxc/lxd/test/backends/lvm.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/backends/lvm.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,60 @@ +#!/bin/sh + +lvm_setup() { + local LXD_DIR + LXD_DIR=$1 + + echo "==> Setting up lvm backend in ${LXD_DIR}" + + if ! which lvm >/dev/null 2>&1; then + echo "Couldn't find the lvm binary"; false + fi + + truncate -s 100G "${TEST_DIR}/$(basename "${LXD_DIR}").lvm" + pvloopdev=$(losetup --show -f "${TEST_DIR}/$(basename "${LXD_DIR}").lvm") + if [ ! -e "${pvloopdev}" ]; then + echo "failed to setup loop" + false + fi + echo "${pvloopdev}" > "${TEST_DIR}/$(basename "${LXD_DIR}").lvm.vg" + + pvcreate "${pvloopdev}" + vgcreate "lxdtest-$(basename "${LXD_DIR}")" "${pvloopdev}" +} + +lvm_configure() { + local LXD_DIR + LXD_DIR=$1 + + echo "==> Configuring lvm backend in ${LXD_DIR}" + + lxc config set storage.lvm_vg_name "lxdtest-$(basename "${LXD_DIR}")" +} + +lvm_teardown() { + local LXD_DIR + LXD_DIR=$1 + + echo "==> Tearing down lvm backend in ${LXD_DIR}" + + SUCCESS=0 + # shellcheck disable=SC2034 + for i in $(seq 10); do + vgremove -f "lxdtest-$(basename "${LXD_DIR}")" >/dev/null 2>&1 || true + pvremove -f "$(cat "${TEST_DIR}/$(basename "${LXD_DIR}").lvm.vg")" >/dev/null 2>&1 || true + if losetup -d "$(cat "${TEST_DIR}/$(basename "${LXD_DIR}").lvm.vg")"; then + SUCCESS=1 + break + fi + + sleep 0.5 + done + + if [ "${SUCCESS}" = "0" ]; then + echo "Failed to tear down LVM" + false + fi + + rm -f "${TEST_DIR}/$(basename "${LXD_DIR}").lvm" + rm -f "${TEST_DIR}/$(basename "${LXD_DIR}").lvm.vg" +} === added file 'src/github.com/lxc/lxd/test/backends/zfs.sh' --- src/github.com/lxc/lxd/test/backends/zfs.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/backends/zfs.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,52 @@ +#!/bin/sh + +zfs_setup() { + local LXD_DIR + LXD_DIR=$1 + + echo "==> Setting up ZFS backend in ${LXD_DIR}" + + if ! which zfs >/dev/null 2>&1; then + echo "Couldn't find zfs binary"; false + fi + + truncate -s 100G "${LXD_DIR}/zfspool" + # prefix lxdtest- here, as zfs pools must start with a letter, but tempdir + # won't necessarily generate one that does. + zpool create "lxdtest-$(basename "${LXD_DIR}")" "${LXD_DIR}/zfspool" -m none +} + +zfs_configure() { + local LXD_DIR + LXD_DIR=$1 + + echo "==> Configuring ZFS backend in ${LXD_DIR}" + + lxc config set storage.zfs_pool_name "lxdtest-$(basename "${LXD_DIR}")" +} + +zfs_teardown() { + local LXD_DIR + LXD_DIR=$1 + + echo "==> Tearing down ZFS backend in ${LXD_DIR}" + + # Wait up to 5s for zpool destroy to succeed + SUCCESS=0 + + # shellcheck disable=SC2034 + for i in $(seq 10); do + zpool destroy -f "lxdtest-$(basename "${LXD_DIR}")" >/dev/null 2>&1 || true + if ! zpool list -o name -H | grep -q "^lxdtest-$(basename "${LXD_DIR}")"; then + SUCCESS=1 + break + fi + + sleep 0.5 + done + + if [ "${SUCCESS}" = "0" ]; then + echo "Failed to destroy the zpool" + false + fi +} === added directory 'src/github.com/lxc/lxd/test/deps' === added file 'src/github.com/lxc/lxd/test/deps/devlxd-client.go' --- src/github.com/lxc/lxd/test/deps/devlxd-client.go 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/deps/devlxd-client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,83 @@ +/* + * An example of how to use lxd's golang /dev/lxd client. This is intended to + * be run from inside a container. + */ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" +) + +type DevLxdDialer struct { + Path string +} + +func (d DevLxdDialer) DevLxdDial(network, path string) (net.Conn, error) { + addr, err := net.ResolveUnixAddr("unix", d.Path) + if err != nil { + return nil, err + } + + conn, err := net.DialUnix("unix", nil, addr) + if err != nil { + return nil, err + } + + return conn, err +} + +var DevLxdTransport = &http.Transport{ + Dial: DevLxdDialer{"/dev/lxd/sock"}.DevLxdDial, +} + +func main() { + c := http.Client{Transport: DevLxdTransport} + raw, err := c.Get("http://meshuggah-rocks/") + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + if raw.StatusCode != http.StatusOK { + fmt.Println("http error", raw.StatusCode) + result, err := ioutil.ReadAll(raw.Body) + if err == nil { + fmt.Println(string(result)) + } + os.Exit(1) + } + + result := []string{} + if err := json.NewDecoder(raw.Body).Decode(&result); err != nil { + fmt.Println("err decoding response", err) + os.Exit(1) + } + + if result[0] != "/1.0" { + fmt.Println("unknown response", result) + os.Exit(1) + } + + if len(os.Args) > 1 { + raw, err := c.Get(fmt.Sprintf("http://meshuggah-rocks/1.0/config/%s", os.Args[1])) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + value, err := ioutil.ReadAll(raw.Body) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + fmt.Println(string(value)) + } else { + fmt.Println("/dev/lxd ok") + } +} === added file 'src/github.com/lxc/lxd/test/deps/import-busybox' --- src/github.com/lxc/lxd/test/deps/import-busybox 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/deps/import-busybox 2016-03-22 15:18:22 +0000 @@ -0,0 +1,349 @@ +#!/usr/bin/env python3 +import argparse +import atexit +import hashlib +import http.client +import io +import json +import os +import shutil +import socket +import subprocess +import sys +import tarfile +import tempfile +import uuid + + +class FriendlyParser(argparse.ArgumentParser): + def error(self, message): + sys.stderr.write('\nerror: %s\n' % message) + self.print_help() + sys.exit(2) + + +def find_on_path(command): + """Is command on the executable search path?""" + + if 'PATH' not in os.environ: + return False + + path = os.environ['PATH'] + for element in path.split(os.pathsep): + if not element: + continue + filename = os.path.join(element, command) + if os.path.isfile(filename) and os.access(filename, os.X_OK): + return True + + return False + + +class UnixHTTPConnection(http.client.HTTPConnection): + def __init__(self, path): + http.client.HTTPConnection.__init__(self, 'localhost') + self.path = path + + def connect(self): + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(self.path) + self.sock = sock + + +class LXD(object): + workdir = None + + def __init__(self, path): + self.lxd = UnixHTTPConnection(path) + + # Create our workdir + self.workdir = tempfile.mkdtemp() + atexit.register(self.cleanup) + + def cleanup(self): + if self.workdir: + shutil.rmtree(self.workdir) + + def rest_call(self, path, data=None, method="GET", headers={}): + if method == "GET" and data: + self.lxd.request( + method, + "%s?%s" % "&".join(["%s=%s" % (key, value) + for key, value in data.items()]), headers) + else: + self.lxd.request(method, path, data, headers) + + r = self.lxd.getresponse() + d = json.loads(r.read().decode("utf-8")) + return r.status, d + + def aliases_create(self, name, target): + data = json.dumps({"target": target, + "name": name}) + + status, data = self.rest_call("/1.0/images/aliases", data, "POST") + + if status != 200: + raise Exception("Failed to create alias: %s" % name) + + def aliases_remove(self, name): + status, data = self.rest_call("/1.0/images/aliases/%s" % name, + method="DELETE") + + if status != 200: + raise Exception("Failed to remove alias: %s" % name) + + def aliases_list(self): + status, data = self.rest_call("/1.0/images/aliases") + + return [alias.split("/1.0/images/aliases/")[-1] + for alias in data['metadata']] + + def images_list(self, recursive=False): + if recursive: + status, data = self.rest_call("/1.0/images?recursion=1") + return data['metadata'] + else: + status, data = self.rest_call("/1.0/images") + return [image.split("/1.0/images/")[-1] + for image in data['metadata']] + + def images_upload(self, path, filename, public): + headers = {} + if public: + headers['X-LXD-public'] = "1" + + if isinstance(path, str): + headers['Content-Type'] = "application/octet-stream" + + status, data = self.rest_call("/1.0/images", open(path, "rb"), + "POST", headers) + else: + meta_path, rootfs_path = path + boundary = str(uuid.uuid1()) + + upload_path = os.path.join(self.workdir, "upload") + body = open(upload_path, "wb+") + for name, path in [("metadata", meta_path), + ("rootfs", rootfs_path)]: + filename = os.path.basename(path) + body.write(bytes("--%s\r\n" % boundary, "utf-8")) + body.write(bytes("Content-Disposition: form-data; " + "name=%s; filename=%s\r\n" % + (name, filename), "utf-8")) + body.write(b"Content-Type: application/octet-stream\r\n") + body.write(b"\r\n") + with open(path, "rb") as fd: + shutil.copyfileobj(fd, body) + body.write(b"\r\n") + + body.write(bytes("--%s--\r\n" % boundary, "utf-8")) + body.write(b"\r\n") + body.close() + + headers['Content-Type'] = "multipart/form-data; boundary=%s" \ + % boundary + + status, data = self.rest_call("/1.0/images", + open(upload_path, "rb"), + "POST", headers) + + if status != 202: + raise Exception("Failed to upload the image: %s" % status) + + status, data = self.rest_call(data['operation'] + "/wait", + "", "GET", {}) + if status != 200: + raise Exception("Failed to query the operation: %s" % status) + + if data['status_code'] != 200: + raise Exception("Failed to import the image: %s" % + data['metadata']) + + return data['metadata']['metadata'] + + +class Busybox(object): + workdir = None + + def __init__(self): + # Create our workdir + self.workdir = tempfile.mkdtemp() + atexit.register(self.cleanup) + + def cleanup(self): + if self.workdir: + shutil.rmtree(self.workdir) + + def create_tarball(self, split=False): + xz = "pxz" if find_on_path("pxz") else "xz" + + destination_tar = os.path.join(self.workdir, "busybox.tar") + target_tarball = tarfile.open(destination_tar, "w:") + + if split: + destination_tar_rootfs = os.path.join(self.workdir, + "busybox.rootfs.tar") + target_tarball_rootfs = tarfile.open(destination_tar_rootfs, "w:") + + metadata = {'architecture': os.uname()[4], + 'creation_date': int(os.stat("/bin/busybox").st_ctime), + 'properties': { + 'os': "Busybox", + 'architecture': os.uname()[4], + 'description': "Busybox %s" % os.uname()[4], + 'name': "busybox-%s" % os.uname()[4] + }, + } + + # Add busybox + with open("/bin/busybox", "rb") as fd: + busybox_file = tarfile.TarInfo() + busybox_file.size = os.stat("/bin/busybox").st_size + busybox_file.mode = 0o755 + if split: + busybox_file.name = "bin/busybox" + target_tarball_rootfs.addfile(busybox_file, fd) + else: + busybox_file.name = "rootfs/bin/busybox" + target_tarball.addfile(busybox_file, fd) + + # Add symlinks + busybox = subprocess.Popen(["/bin/busybox", "--list-full"], + stdout=subprocess.PIPE, + universal_newlines=True) + busybox.wait() + + for path in busybox.stdout.read().split("\n"): + if not path.strip(): + continue + + symlink_file = tarfile.TarInfo() + symlink_file.type = tarfile.SYMTYPE + symlink_file.linkname = "/bin/busybox" + if split: + symlink_file.name = "%s" % path.strip() + target_tarball_rootfs.addfile(symlink_file) + else: + symlink_file.name = "rootfs/%s" % path.strip() + target_tarball.addfile(symlink_file) + + # Add directories + for path in ("dev", "mnt", "proc", "root", "sys", "tmp"): + directory_file = tarfile.TarInfo() + directory_file.type = tarfile.DIRTYPE + if split: + directory_file.name = "%s" % path + target_tarball_rootfs.addfile(directory_file) + else: + directory_file.name = "rootfs/%s" % path + target_tarball.addfile(directory_file) + + # Add the metadata file + metadata_yaml = json.dumps(metadata, sort_keys=True, + indent=4, separators=(',', ': '), + ensure_ascii=False).encode('utf-8') + b"\n" + + metadata_file = tarfile.TarInfo() + metadata_file.size = len(metadata_yaml) + metadata_file.name = "metadata.yaml" + target_tarball.addfile(metadata_file, + io.BytesIO(metadata_yaml)) + + # Add an /etc/inittab; this is to work around: + # http://lists.busybox.net/pipermail/busybox/2015-November/083618.html + # Basically, since there are some hardcoded defaults that misbehave, we + # just pass an empty inittab so those aren't applied, and then busybox + # doesn't spin forever. + inittab = tarfile.TarInfo() + inittab.size = 1 + inittab.name = "/rootfs/etc/inittab" + target_tarball.addfile(inittab, io.BytesIO(b"\n")) + + target_tarball.close() + if split: + target_tarball_rootfs.close() + + # Compress the tarball + r = subprocess.call([xz, "-9", destination_tar]) + if r: + raise Exception("Failed to compress: %s" % destination_tar) + + if split: + r = subprocess.call([xz, "-9", destination_tar_rootfs]) + if r: + raise Exception("Failed to compress: %s" % + destination_tar_rootfs) + return destination_tar + ".xz", destination_tar_rootfs + ".xz" + else: + return destination_tar + ".xz" + + +if __name__ == "__main__": + if "LXD_DIR" in os.environ: + lxd_socket = os.path.join(os.environ['LXD_DIR'], "unix.socket") + else: + lxd_socket = "/var/lib/lxd/unix.socket" + + if not os.path.exists(lxd_socket): + print("LXD isn't running.") + sys.exit(1) + + lxd = LXD(lxd_socket) + + def setup_alias(aliases, fingerprint): + existing = lxd.aliases_list() + + for alias in aliases: + if alias in existing: + lxd.aliases_remove(alias) + lxd.aliases_create(alias, fingerprint) + print("Setup alias: %s" % alias) + + def import_busybox(parser, args): + busybox = Busybox() + + if args.split: + meta_path, rootfs_path = busybox.create_tarball(split=True) + + with open(meta_path, "rb") as meta_fd: + with open(rootfs_path, "rb") as rootfs_fd: + fingerprint = hashlib.sha256(meta_fd.read() + + rootfs_fd.read()).hexdigest() + + if fingerprint in lxd.images_list(): + parser.exit(1, "This image is already in the store.\n") + + r = lxd.images_upload((meta_path, rootfs_path), + meta_path.split("/")[-1], args.public) + print("Image imported as: %s" % r['fingerprint']) + else: + path = busybox.create_tarball() + + with open(path, "rb") as fd: + fingerprint = hashlib.sha256(fd.read()).hexdigest() + + if fingerprint in lxd.images_list(): + parser.exit(1, "This image is already in the store.\n") + + r = lxd.images_upload(path, path.split("/")[-1], args.public) + print("Image imported as: %s" % r['fingerprint']) + + setup_alias(args.alias, fingerprint) + + parser = FriendlyParser(description="Import a busybox image") + parser.add_argument("--alias", action="append", + default=[], help="Aliases for the image") + parser.add_argument("--public", action="store_true", + default=False, help="Make the image public") + parser.add_argument("--split", action="store_true", + default=False, help="Whether to create a split image") + parser.set_defaults(func=import_busybox) + + # Call the function + args = parser.parse_args() + + try: + args.func(parser, args) + except Exception as e: + parser.error(e) === added file 'src/github.com/lxc/lxd/test/deps/schema1.sql' --- src/github.com/lxc/lxd/test/deps/schema1.sql 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/deps/schema1.sql 2016-03-22 15:18:22 +0000 @@ -0,0 +1,51 @@ +-- Database schema version 1 as taken from febb96e8164fbd189698da77383c26ce68b9762a +CREATE TABLE certificates ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + fingerprint VARCHAR(255) NOT NULL, + type INTEGER NOT NULL, + name VARCHAR(255) NOT NULL, + certificate TEXT NOT NULL, + UNIQUE (fingerprint) +); +CREATE TABLE containers ( + id INTEGER primary key AUTOINCREMENT NOT NULL, + name VARCHAR(255) NOT NULL, + architecture INTEGER NOT NULL, + type INTEGER NOT NULL, + UNIQUE (name) +); +CREATE TABLE containers_config ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + container_id INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + FOREIGN KEY (container_id) REFERENCES containers (id), + UNIQUE (container_id, key) +); +CREATE TABLE images ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + fingerprint VARCHAR(255) NOT NULL, + filename VARCHAR(255) NOT NULL, + size INTEGER NOT NULL, + public INTEGER NOT NULL DEFAULT 0, + architecture INTEGER NOT NULL, + creation_date DATETIME, + expiry_date DATETIME, + upload_date DATETIME NOT NULL, + UNIQUE (fingerprint) +); +CREATE TABLE images_properties ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + image_id INTEGER NOT NULL, + type INTEGER NOT NULL, + key VARCHAR(255) NOT NULL, + value TEXT, + FOREIGN KEY (image_id) REFERENCES images (id) +); +CREATE TABLE schema ( + id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, + version INTEGER NOT NULL, + updated_at DATETIME NOT NULL, + UNIQUE (version) +); +INSERT INTO schema (version, updated_at) values (1, "now"); === added file 'src/github.com/lxc/lxd/test/deps/server.crt' --- src/github.com/lxc/lxd/test/deps/server.crt 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/deps/server.crt 2016-03-22 15:18:22 +0000 @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- +MIIFzjCCA7igAwIBAgIRAKnCQRdpkZ86oXYOd9hGrPgwCwYJKoZIhvcNAQELMB4x +HDAaBgNVBAoTE2xpbnV4Y29udGFpbmVycy5vcmcwHhcNMTUwNzE1MDQ1NjQ0WhcN +MjUwNzEyMDQ1NjQ0WjAeMRwwGgYDVQQKExNsaW51eGNvbnRhaW5lcnMub3JnMIIC +IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAyViJkCzoxa1NYilXqGJog6xz +lSm4xt8KIzayc0JdB9VxEdIVdJqUzBAUtyCS4KZ9MbPmMEOX9NbBASL0tRK58/7K +Scq99Kj4XbVMLU1P/y5aW0ymnF0OpKbG6unmgAI2k/duRlbYHvGRdhlswpKl0Yst +l8i2kXOK0Rxcz90FewcEXGSnIYW21sz8YpBLfIZqOx6XEV36mOdi3MLrhUSAhXDw +Pay33Y7NonCQUBtiO7BT938cqI14FJrWdKon1UnODtzONcVBLTWtoe7D41+mx7EE +Taq5OPxBSe0DD6KQcPOZ7ZSJEhIqVKMvzLyiOJpyShmhm4OuGNoAG6jAuSij/9Kc +aLU4IitcrvFOuAo8M9OpiY9ZCR7Gb/qaPAXPAxE7Ci3f9DDNKXtPXDjhj3YG01+h +fNXMW3kCkMImn0A/+mZUMdCL87GWN2AN3Do5qaIc5XVEt1gp+LVqJeMoZ/lAeZWT +IbzcnkneOzE25m+bjw3r3WlR26amhyrWNwjGzRkgfEpw336kniX/GmwaCNgdNk+g +5aIbVxIHO0DbgkDBtdljR3VOic4djW/LtUIYIQ2egnPPyRR3fcFI+x5EQdVQYUXf +jpGIwovUDyG0Lkam2tpdeEXvLMZr8+Lhzu+H6vUFSj3cz6gcw/Xepw40FOkYdAI9 +LYB6nwpZLTVaOqZCJ2ECAwEAAaOCAQkwggEFMA4GA1UdDwEB/wQEAwIAoDATBgNV +HSUEDDAKBggrBgEFBQcDATAMBgNVHRMBAf8EAjAAMIHPBgNVHREEgccwgcSCCVVi +dW50dVByb4IRMTAuMTY3LjE2MC4xODMvMjSCHzIwMDE6MTVjMDo2NzM1OmVlMDA6 +OmU6ZTMxMy8xMjiCKWZkNTc6Yzg3ZDpmMWVlOmVlMDA6MjFkOjdkZmY6ZmUwOToz +NzUzLzY0gikyMDAxOjE1YzA6NjczNTplZTAwOjIxZDo3ZGZmOmZlMDk6Mzc1My82 +NIIbZmU4MDo6MjFkOjdkZmY6ZmUwOTozNzUzLzY0ghAxOTIuMTY4LjEyMi4xLzI0 +MAsGCSqGSIb3DQEBCwOCAgEAmcJUSBH7cLw3auEEV1KewtdqY1ARVB/pafAtbe9F +7ZKBbxUcS7cP3P1hRs5FH1bH44bIJKHxckctNUPqvC+MpXSryKinQ5KvGPNjGdlW +6EPlQr23btizC6hRdQ6RjEkCnQxhyTLmQ9n78nt47hjA96rFAhCUyfPdv9dI4Zux +bBTJekhCx5taamQKoxr7tql4Y2TchVlwASZvOfar8I0GxBRFT8w9IjckOSLoT9/s +OhlvXpeoxxFT7OHwqXEXdRUvw/8MGBo6JDnw+J/NGDBw3Z0goebG4FMT//xGSHia +czl3A0M0flk4/45L7N6vctwSqi+NxVaJRKeiYPZyzOO9K/d+No+WVBPwKmyP8icQ +b7FGTelPJOUolC6kmoyM+vyaNUoU4nz6lgOSHAtuqGNDWZWuX/gqzZw77hzDIgkN +qisOHZWPVlG/iUh1JBkbglBaPeaa3zf0XwSdgwwf4v8Z+YtEiRqkuFgQY70eQKI/ +CIkj1p0iW5IBEsEAGUGklz4ZwqJwH3lQIqDBzIgHe3EP4cXaYsx6oYhPSDdHLPv4 +HMZhl05DP75CEkEWRD0AIaL7SHdyuYUmCZ2zdrMI7TEDrAqcUuPbYpHcdJ2wnYmi +2G8XHJibfu4PCpIm1J8kPL8rqpdgW3moKR8Mp0HJQOH4tSBr1Ep7xNLP1wg6PIe+ +p7U= +-----END CERTIFICATE----- === added file 'src/github.com/lxc/lxd/test/deps/server.key' --- src/github.com/lxc/lxd/test/deps/server.key 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/deps/server.key 2016-03-22 15:18:22 +0000 @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAyViJkCzoxa1NYilXqGJog6xzlSm4xt8KIzayc0JdB9VxEdIV +dJqUzBAUtyCS4KZ9MbPmMEOX9NbBASL0tRK58/7KScq99Kj4XbVMLU1P/y5aW0ym +nF0OpKbG6unmgAI2k/duRlbYHvGRdhlswpKl0Ystl8i2kXOK0Rxcz90FewcEXGSn +IYW21sz8YpBLfIZqOx6XEV36mOdi3MLrhUSAhXDwPay33Y7NonCQUBtiO7BT938c +qI14FJrWdKon1UnODtzONcVBLTWtoe7D41+mx7EETaq5OPxBSe0DD6KQcPOZ7ZSJ +EhIqVKMvzLyiOJpyShmhm4OuGNoAG6jAuSij/9KcaLU4IitcrvFOuAo8M9OpiY9Z +CR7Gb/qaPAXPAxE7Ci3f9DDNKXtPXDjhj3YG01+hfNXMW3kCkMImn0A/+mZUMdCL +87GWN2AN3Do5qaIc5XVEt1gp+LVqJeMoZ/lAeZWTIbzcnkneOzE25m+bjw3r3WlR +26amhyrWNwjGzRkgfEpw336kniX/GmwaCNgdNk+g5aIbVxIHO0DbgkDBtdljR3VO +ic4djW/LtUIYIQ2egnPPyRR3fcFI+x5EQdVQYUXfjpGIwovUDyG0Lkam2tpdeEXv +LMZr8+Lhzu+H6vUFSj3cz6gcw/Xepw40FOkYdAI9LYB6nwpZLTVaOqZCJ2ECAwEA +AQKCAgBCe8GwoaOa4kaTCyOurg/kqqTftA8XW751MjJqbJdbZtcXE0+SWRiY6RZu +AYt+MntUVhrEBQ3AAsloHqq+v5g3QQJ6qz9d8g1Qo/SrYMPxdtTPINhC+VdEdu1n +1CQQUKrE4QbAoxxp20o0vOB0vweR0WsUm2ntTUGhGsRqvoh4vzBpcbLeFtDwzG7p +/MtwKtIZA1jOm0GMC5tRWet67cuiRFCPjOCJgAXWhWShjuk43FhdeNN1tIDaDOaT +Tzwn6V7o+W/9wUxsKTVUKwrzoTno5kKNgrn2XxUP2/sOxpb7NPS2xj0cgnMHz3qR +GBhYqGbkoOID/88U1acDew1oFktQL24yd8/cvooh7KLN3k5oSKjpKmGAKaMMwsSv +ccRSM9EkTtgTANLpSFiVF738drZw7UXUsvVTCF8WHhMtGD50XOahR02D1kZnpqpe +SdxJ9qFNEeozk6w56cTerJNz4od18/gQtNADcPI6WE+8NBrqYjN/X4CBNS76IEtp +5ddGbi6+4HgO5B0pU87f2bZH4BwR8XJ07wdMRyXXhmnKcnirkyqUtgHmLF3LZnGX ++Fph5KmhBGs/ZovBvnBI2nREsMfNvzffK7x3hyFXv6J+XxILk4i3LkgKLJFC+RY0 +sjWNQB5tHuA1dbq3AtsbfJcTK764kSaUsq0JoqPQgiSuiNoCIQKCAQEA1Fk4SR5I +H1QHlXeQ/k1sg6B5H0uosPAnAQxjuI8SvYkty+b4diP+CJIS4IphgLIItROORUFE +bOi6pj2D2oK04J55fhlJaE8LQs7i90nFXT4B09Ut4oBYGCz5aE/wAUxUanaq1dxj +K17y+ejlqh7yKTwupHOvIm4ddDwU1U5H9J/Cyywvp5fznVIGMJynVk7zriXYM6aC +tioNCbOTHwQxjYEaG3AwymXaI6sNwdNiAzgq6M7v43GF3IOj8SYK2VhVdLqLJPnL +6G5OqMRxxQtxOcSctFOuicu+Jq/KVWJGDaERQZJloHcBJCtO34ONswGJqC/PGoU+ +Ny/BOaZdLQDIpwKCAQEA8rxOKaLuOWEi4MDJuAgQYqpO9JxY0h3yN1YrspBuGezR +4Lzdh0vUh9Jr4npV723gGwA7r8AcqIPZvSk8MmcYVuwoxz9VWYeNP8P6cRc3bDO8 +shnSvFxV32gKTEH8fOH3/BlJOnbn62tebSFHnGxyh2WPsRbzAMOKj9Q3Yq6ad3DD +6rJhtopIedC3AWc3aVeO2FHPC+Lza0PhUVsHf5X7Bg+zQlHaaEXB0lysruXkDlU9 +WdW+Ajvo0enhOROgEa7QBC74NsKZF4KJGMGTaglydRtVYbqfx4QbfgDU5h2zaUnB +lRINZvKNYGRXDN944ymynE9bo4xfOERbWc68GFaItwKCAQBCY+qvIaKW+OSuHIXe +nEJTHPcBi9wgBdWMBF2hNEo9rAf/eiUweqxP7autPFajsAX85zJSAMft7Q1+MDlr +NfZrS+DcRfenfx8cMibP/eaQ8nQL0NjZuhrQ5C7OKD/3h+/UoWlkF9WBl9wLun8j +oy0/KyvCCtE0yIy47Jfu4NyqZNC4SQZVNbLa+uwogrHm0CRrzDU+YM75OUh+QgC7 +b8o2XajV70ux3ApJoI9ajEZWj1cLFrf1umaJvTaijKxTq8R8DF64nsjb0LETHugb +HSq3TvtXfdpSBrtayRdPfrw8QqFsiOLxOoPG1SuBwlWpI8/wH5J2zjXXdzzIU3VK +PrZ9AoIBAQDazTjbuT1pxZCN7donJEW42nHPdvttc4b5sJg1HpHQlrNdFIHPyl/q +iperD8FU0MM5M42Zz99FW4yzQW88s8ex2rCrYgCKcnC1cO/YbygLRduq4zIdjlHt +zrexo6132K0TtqtWowZNJHx6fIwziWH3gGn1JI2pO5o0KgQ+1MryLVi8v0zrIV1R +SP0dq6+8Kivd/GhY+5uWLhr1nct1i3k6Ln7Uojnw0ihzegxCn4FiFh32U4AyPVSR +m3PkYjdgmSZzDu+5VNJw6b6w7RT3eUqOGzRsorASRZgOjatbPpyRpOV1fU9NZAhi +QjBhrzMl+VlCIxqkowzWCHAb1QmiGqajAoIBAGYKD5h7jTgPFKFlMViTg8LoMcQl +9vbpmWkB+WdY5xXOwO0hO99rFDmLx6elsmYjdpq8zJkOFTnSB2o3IpenxZltNMsI ++aDlZWxDxokTxr6gbQPPrjePT1oON0/6sLEYkDOln8H1P9jmLPqTrET0DxCMgE5D +NE9TAEuUKVhRTWy6FSdP58hUimyVnlbnvbGOh2tviNO+TK/H7k0WjRg57Sz9XTHO +q36ob5TEsQngkTATEoksE9xhXFxtmTm/nu/26wN2Py49LSwu2aAYTfX/KhQKklNX +P/tP5//z+hGeba8/xv8YhEr7vhbnlBdwp0wHJj5g7nHAbYfo9ELbXSON8wc= +-----END RSA PRIVATE KEY----- === added directory 'src/github.com/lxc/lxd/test/extras' === added file 'src/github.com/lxc/lxd/test/extras/speedtest_create.sh' --- src/github.com/lxc/lxd/test/extras/speedtest_create.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/extras/speedtest_create.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,47 @@ +#!/bin/bash + +MYDIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +CIMAGE="testimage" +CNAME="speedtest" + +count=${1} +if [ "x${count}" == "x" ]; then + echo "USAGE: ${0} 10" + echo "This creates 10 busybox containers" + exit 1 +fi + +if [ "x${2}" != "xnotime" ]; then + time ${0} ${count} notime + exit 0 +fi + +${MYDIR}/deps/import-busybox --alias busybox + +PIDS="" +for c in $(seq 1 $count); do + lxc init busybox "${CNAME}${c}" 2>&1 & + PIDS="$PIDS $!" +done + +for pid in $PIDS; do + wait $pid +done + +echo -e "\nlxc list: All shutdown" +time lxc list 1>/dev/null + +PIDS="" +for c in $(seq 1 $count); do + lxc start "${CNAME}${c}" 2>&1 & + PIDS="$PIDS $!" +done + +for pid in $PIDS; do + wait $pid +done + +echo -e "\nlxc list: All started" +time lxc list 1>/dev/null + +echo -e "\nRun completed" === added file 'src/github.com/lxc/lxd/test/extras/speedtest_delete.sh' --- src/github.com/lxc/lxd/test/extras/speedtest_delete.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/extras/speedtest_delete.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,29 @@ +#!/bin/bash + +MYDIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +CIMAGE="testimage" +CNAME="speedtest" + +count=${1} +if [ "x${count}" == "x" ]; then + echo "USAGE: ${0} 10" + echo "This deletes 10 busybox containers" + exit 1 +fi + +if [ "x${2}" != "xnotime" ]; then + time ${0} ${count} notime + exit 0 +fi + +PIDS="" +for c in $(seq 1 $count); do + lxc delete "${CNAME}${c}" 2>&1 & + PIDS="$PIDS $!" +done + +for pid in $PIDS; do + wait $pid +done + +echo -e "\nRun completed" === added file 'src/github.com/lxc/lxd/test/extras/stresstest.sh' --- src/github.com/lxc/lxd/test/extras/stresstest.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/extras/stresstest.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,214 @@ +#!/bin/bash +export PATH=$GOPATH/bin:$PATH + +# /tmp isn't moutned exec on most systems, so we can't actually start +# containers that are created there. +export SRC_DIR=$(pwd) +export LXD_DIR=$(mktemp -d -p $(pwd)) +chmod 777 "${LXD_DIR}" +export LXD_CONF=$(mktemp -d) +export LXD_FUIDMAP_DIR=${LXD_DIR}/fuidmap +mkdir -p ${LXD_FUIDMAP_DIR} +BASEURL=https://127.0.0.1:18443 +RESULT=failure + +set -e +if [ -n "$LXD_DEBUG" ]; then + set -x + debug=--debug +fi + +echo "==> Running the LXD testsuite" + +BASEURL=https://127.0.0.1:18443 +my_curl() { + curl -k -s --cert "${LXD_CONF}/client.crt" --key "${LXD_CONF}/client.key" $@ +} + +wait_for() { + op=$($@ | jq -r .operation) + my_curl $BASEURL$op/wait +} + +lxc() { + INJECTED=0 + CMD="$(which lxc)" + for arg in $@; do + if [ "$arg" = "--" ]; then + INJECTED=1 + CMD="$CMD $debug" + CMD="$CMD --" + else + CMD="$CMD \"$arg\"" + fi + done + + if [ "$INJECTED" = "0" ]; then + CMD="$CMD $debug" + fi + + eval "$CMD" +} + +cleanup() { + read -p "Tests Completed ($RESULT): hit enter to continue" x + echo "==> Cleaning up" + + # Try to stop all the containers + my_curl "$BASEURL/1.0/containers" | jq -r .metadata[] 2>/dev/null | while read -r line; do + wait_for my_curl -X PUT "$BASEURL$line/state" -d "{\"action\":\"stop\",\"force\":true}" + done + + # kill the lxds which share our pgrp as parent + mygrp=`awk '{ print $5 }' /proc/self/stat` + for p in `pidof lxd`; do + pgrp=`awk '{ print $5 }' /proc/$p/stat` + if [ "$pgrp" = "$mygrp" ]; then + do_kill_lxd $p + fi + done + + # Apparently we need to wait a while for everything to die + sleep 3 + rm -Rf ${LXD_DIR} + rm -Rf ${LXD_CONF} + + echo "" + echo "" + echo "==> Test result: $RESULT" +} + +trap cleanup EXIT HUP INT TERM + +if [ -z "`which lxc`" ]; then + echo "==> Couldn't find lxc" && false +fi + +spawn_lxd() { + # LXD_DIR is local here because since `lxc` is actually a function, it + # overwrites the environment and we would lose LXD_DIR's value otherwise. + local LXD_DIR + + addr=$1 + lxddir=$2 + shift + shift + echo "==> Spawning lxd on $addr in $lxddir" + LXD_DIR=$lxddir lxd ${DEBUG} $extraargs $* 2>&1 > $lxddir/lxd.log & + + echo "==> Confirming lxd on $addr is responsive" + alive=0 + while [ $alive -eq 0 ]; do + [ -e "${lxddir}/unix.socket" ] && LXD_DIR=$lxddir lxc finger && alive=1 + sleep 1s + done + + echo "==> Binding to network" + LXD_DIR=$lxddir lxc config set core.https_address $addr + + echo "==> Setting trust password" + LXD_DIR=$lxddir lxc config set core.trust_password foo +} + +spawn_lxd 127.0.0.1:18443 $LXD_DIR + +## tests go here +if [ ! -e "$LXD_TEST_IMAGE" ]; then + echo "Please define LXD_TEST_IMAGE" + false +fi +lxc image import $LXD_TEST_IMAGE --alias busybox + +lxc image list +lxc list + +NUMCREATES=5 +createthread() { + echo "createthread: I am $$" + for i in `seq 1 $NUMCREATES`; do + echo "createthread: starting loop $i out of $NUMCREATES" + declare -a pids + for j in `seq 1 20`; do + lxc launch busybox b.$i.$j & + pids[$j]=$! + done + for j in `seq 1 20`; do + # ignore errors if the task has already exited + wait ${pids[$j]} 2>/dev/null || true + done + echo "createthread: deleting..." + for j in `seq 1 20`; do + lxc delete b.$i.$j & + pids[$j]=$! + done + for j in `seq 1 20`; do + # ignore errors if the task has already exited + wait ${pids[$j]} 2>/dev/null || true + done + done + exit 0 +} + +listthread() { + echo "listthread: I am $$" + while [ 1 ]; do + lxc list + sleep 2s + done + exit 0 +} + +configthread() { + echo "configthread: I am $$" + for i in `seq 1 20`; do + lxc profile create p$i + lxc profile set p$i limits.memory 100MB + lxc profile delete p$i + done + exit 0 +} + +disturbthread() { + echo "disturbthread: I am $$" + while [ 1 ]; do + lxc profile create empty + lxc init busybox disturb1 + lxc profile apply disturb1 empty + lxc start disturb1 + lxc exec disturb1 -- ps -ef + lxc stop disturb1 --force + lxc delete disturb1 + lxc profile delete empty + done + exit 0 +} + +echo "Starting create thread" +createthread 2>&1 | tee $LXD_DIR/createthread.out & +p1=$! + +echo "starting the disturb thread" +disturbthread 2>&1 | tee $LXD_DIR/disturbthread.out & +pdisturb=$! + +echo "Starting list thread" +listthread 2>&1 | tee $LXD_DIR/listthread.out & +p2=$! +echo "Starting config thread" +configthread 2>&1 | tee $LXD_DIR/configthread.out & +p3=$! + +# wait for listthread to finish +wait $p1 +# and configthread, it should be quick +wait $p3 + +echo "The creation loop is done, killing the list and disturb threads" + +kill $p2 +wait $p2 || true + +kill $pdisturb +wait $pdisturb || true + +RESULT=success === added file 'src/github.com/lxc/lxd/test/main.sh' --- src/github.com/lxc/lxd/test/main.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/main.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,451 @@ +#!/bin/sh -eu +[ -n "${GOPATH:-}" ] && export "PATH=${GOPATH}/bin:${PATH}" + +if [ -n "${LXD_DEBUG:-}" ]; then + set -x + DEBUG="--debug" +fi + +echo "==> Checking for dependencies" +for dep in lxd lxc curl jq git xgettext sqlite3 msgmerge msgfmt shuf setfacl uuidgen pyflakes3 pep8 shellcheck; do + which "${dep}" >/dev/null 2>&1 || (echo "Missing dependency: ${dep}" >&2 && exit 1) +done + +if [ "${USER:-'root'}" != "root" ]; then + echo "The testsuite must be run as root." >&2 + exit 1 +fi + +if [ -n "${LXD_LOGS:-}" ] && [ ! -d "${LXD_LOGS}" ]; then + echo "Your LXD_LOGS path doesn't exist: ${LXD_LOGS}" + exit 1 +fi + +# Helper functions +local_tcp_port() { + while :; do + port=$(shuf -i 10000-32768 -n 1) + nc -l 127.0.0.1 "${port}" >/dev/null 2>&1 & + pid=$! + kill "${pid}" >/dev/null 2>&1 || continue + wait "${pid}" || true + echo "${port}" + return + done +} + +# import all the backends +for backend in backends/*.sh; do + . "${backend}" +done + +if [ -z "${LXD_BACKEND:-}" ]; then + LXD_BACKEND=dir +fi + +spawn_lxd() { + set +x + # LXD_DIR is local here because since $(lxc) is actually a function, it + # overwrites the environment and we would lose LXD_DIR's value otherwise. + local LXD_DIR + + lxddir=${1} + shift + + # Copy pre generated Certs + cp deps/server.crt "${lxddir}" + cp deps/server.key "${lxddir}" + + # setup storage + "$LXD_BACKEND"_setup "${lxddir}" + + echo "==> Spawning lxd in ${lxddir}" + # shellcheck disable=SC2086 + LXD_DIR="${lxddir}" lxd --logfile "${lxddir}/lxd.log" ${DEBUG-} "$@" 2>&1 & + LXD_PID=$! + echo "${LXD_PID}" > "${lxddir}/lxd.pid" + echo "${lxddir}" >> "${TEST_DIR}/daemons" + echo "==> Spawned LXD (PID is ${LXD_PID})" + + echo "==> Confirming lxd is responsive" + LXD_DIR="${lxddir}" lxd waitready --timeout=300 + + echo "==> Binding to network" + # shellcheck disable=SC2034 + for i in $(seq 10); do + addr="127.0.0.1:$(local_tcp_port)" + LXD_DIR="${lxddir}" lxc config set core.https_address "${addr}" || continue + echo "${addr}" > "${lxddir}/lxd.addr" + echo "==> Bound to ${addr}" + break + done + + echo "==> Setting trust password" + LXD_DIR="${lxddir}" lxc config set core.trust_password foo + if [ -n "${LXD_DEBUG:-}" ]; then + set -x + fi + + echo "==> Configuring storage backend" + "$LXD_BACKEND"_configure "${lxddir}" +} + +lxc() { + LXC_LOCAL=1 + lxc_remote "$@" + RET=$? + unset LXC_LOCAL + return ${RET} +} + +lxc_remote() { + set +x + injected=0 + cmd=$(which lxc) + + # shellcheck disable=SC2048,SC2068 + for arg in $@; do + if [ "${arg}" = "--" ]; then + injected=1 + cmd="${cmd} ${DEBUG:-}" + [ -n "${LXC_LOCAL}" ] && cmd="${cmd} --force-local" + cmd="${cmd} --" + elif [ "${arg}" = "--force-local" ]; then + continue + else + cmd="${cmd} \"${arg}\"" + fi + done + + if [ "${injected}" = "0" ]; then + cmd="${cmd} ${DEBUG-}" + fi + if [ -n "${LXD_DEBUG:-}" ]; then + set -x + fi + eval "${cmd}" +} + +my_curl() { + curl -k -s --cert "${LXD_CONF}/client.crt" --key "${LXD_CONF}/client.key" "$@" +} + +wait_for() { + addr=${1} + shift + op=$("$@" | jq -r .operation) + my_curl "https://${addr}${op}/wait" +} + +ensure_has_localhost_remote() { + addr=${1} + if ! lxc remote list | grep -q "localhost"; then + lxc remote add localhost "https://${addr}" --accept-certificate --password foo + fi +} + +ensure_import_testimage() { + if ! lxc image alias list | grep -q "^| testimage\s*|.*$"; then + if [ -e "${LXD_TEST_IMAGE:-}" ]; then + lxc image import "${LXD_TEST_IMAGE}" --alias testimage + else + deps/import-busybox --alias testimage + fi + fi +} + +check_empty() { + if [ "$(find "${1}" 2> /dev/null | wc -l)" -gt "1" ]; then + echo "${1} is not empty, content:" + find "${1}" + false + fi +} + +check_empty_table() { + if [ -n "$(sqlite3 "${1}" "SELECT * FROM ${2};")" ]; then + echo "DB table ${2} is not empty, content:" + sqlite3 "${1}" "SELECT * FROM ${2};" + false + fi +} + +kill_lxd() { + # LXD_DIR is local here because since $(lxc) is actually a function, it + # overwrites the environment and we would lose LXD_DIR's value otherwise. + local LXD_DIR + daemon_dir=${1} + LXD_DIR=${daemon_dir} + daemon_pid=$(cat "${daemon_dir}/lxd.pid") + echo "==> Killing LXD at ${daemon_dir}" + + if [ -e "${daemon_dir}/unix.socket" ]; then + # Delete all containers + echo "==> Deleting all containers" + for container in $(lxc list --force-local | tail -n+3 | grep "^| " | cut -d' ' -f2); do + lxc delete "${container}" --force-local -f || true + done + + # Delete all images + echo "==> Deleting all images" + for image in $(lxc image list --force-local | tail -n+3 | grep "^| " | cut -d'|' -f3 | sed "s/^ //g"); do + lxc image delete "${image}" --force-local || true + done + + echo "==> Checking for locked DB tables" + for table in $(echo .tables | sqlite3 "${daemon_dir}/lxd.db"); do + echo "SELECT * FROM ${table};" | sqlite3 "${daemon_dir}/lxd.db" >/dev/null + done + + # Kill the daemon + lxd shutdown || kill -9 "${daemon_pid}" 2>/dev/null || true + + # Cleanup shmounts (needed due to the forceful kill) + find "${daemon_dir}" -name shmounts -exec "umount" "-l" "{}" \; >/dev/null 2>&1 || true + fi + + if [ -n "${LXD_LOGS:-}" ]; then + echo "==> Copying the logs" + mkdir -p "${LXD_LOGS}/${daemon_pid}" + cp -R "${daemon_dir}/logs/" "${LXD_LOGS}/${daemon_pid}/" + cp "${daemon_dir}/lxd.log" "${LXD_LOGS}/${daemon_pid}/" + fi + + echo "==> Checking for leftover files" + rm -f "${daemon_dir}/containers/lxc-monitord.log" + rm -f "${daemon_dir}/security/apparmor/cache/.features" + check_empty "${daemon_dir}/containers/" + check_empty "${daemon_dir}/devices/" + check_empty "${daemon_dir}/images/" + # FIXME: Once container logging rework is done, uncomment + # check_empty "${daemon_dir}/logs/" + check_empty "${daemon_dir}/security/apparmor/cache/" + check_empty "${daemon_dir}/security/apparmor/profiles/" + check_empty "${daemon_dir}/security/seccomp/" + check_empty "${daemon_dir}/shmounts/" + check_empty "${daemon_dir}/snapshots/" + + echo "==> Checking for leftover DB entries" + check_empty_table "${daemon_dir}/lxd.db" "containers" + check_empty_table "${daemon_dir}/lxd.db" "containers_config" + check_empty_table "${daemon_dir}/lxd.db" "containers_devices" + check_empty_table "${daemon_dir}/lxd.db" "containers_devices_config" + check_empty_table "${daemon_dir}/lxd.db" "containers_profiles" + check_empty_table "${daemon_dir}/lxd.db" "images" + check_empty_table "${daemon_dir}/lxd.db" "images_aliases" + check_empty_table "${daemon_dir}/lxd.db" "images_properties" + + # teardown storage + "$LXD_BACKEND"_teardown "${daemon_dir}" + + # Wipe the daemon directory + wipe "${daemon_dir}" + + # Remove the daemon from the list + sed "\|^${daemon_dir}|d" -i "${TEST_DIR}/daemons" +} + +cleanup() { + set +e + + # Allow for inspection + if [ -n "${LXD_INSPECT:-}" ]; then + echo "==> Test result: ${TEST_RESULT}" + if [ "${TEST_RESULT}" != "success" ]; then + echo "failed test: ${TEST_CURRENT}" + fi + + # shellcheck disable=SC2086 + printf "To poke around, use:\n LXD_DIR=%s LXD_CONF=%s sudo -E %s/bin/lxc COMMAND\n" "${LXD_DIR}" "${LXD_CONF}" ${GOPATH:-} + echo "Tests Completed (${TEST_RESULT}): hit enter to continue" + + # shellcheck disable=SC2034 + read nothing + fi + + echo "==> Cleaning up" + + # Kill all the LXD instances + while read daemon_dir; do + kill_lxd "${daemon_dir}" + done < "${TEST_DIR}/daemons" + + # Wipe the test environment + wipe "${TEST_DIR}" + + echo "" + echo "" + echo "==> Test result: ${TEST_RESULT}" + if [ "${TEST_RESULT}" != "success" ]; then + echo "failed test: ${TEST_CURRENT}" + fi +} + +wipe() { + if which btrfs >/dev/null 2>&1; then + rm -Rf "${1}" 2>/dev/null || true + if [ -d "${1}" ]; then + find "${1}" | tac | xargs btrfs subvolume delete >/dev/null 2>&1 || true + fi + fi + + # shellcheck disable=SC2009 + ps aux | grep lxc-monitord | grep "${1}" | awk '{print $2}' | while read pid; do + kill -9 "${pid}" + done + + if [ -f "${TEST_DIR}/loops" ]; then + while read line; do + losetup -d "${line}" || true + done < "${TEST_DIR}/loops" + fi + if mountpoint -q "${1}"; then + umount "${1}" + fi + + rm -Rf "${1}" +} + +# Must be set before cleanup() +TEST_CURRENT=setup +TEST_RESULT=failure + +trap cleanup EXIT HUP INT TERM + +# Import all the testsuites +for suite in suites/*.sh; do + . "${suite}" +done + +# Setup test directory +TEST_DIR=$(mktemp -d -p "$(pwd)" tmp.XXX) +chmod +x "${TEST_DIR}" + +if [ -n "${LXD_TMPFS:-}" ]; then + mount -t tmpfs tmpfs "${TEST_DIR}" -o mode=0751 +fi + +LXD_CONF=$(mktemp -d -p "${TEST_DIR}" XXX) +export LXD_CONF + +# Setup the first LXD +LXD_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) +export LXD_DIR +chmod +x "${LXD_DIR}" +spawn_lxd "${LXD_DIR}" +LXD_ADDR=$(cat "${LXD_DIR}/lxd.addr") +export LXD_ADDR + +# Setup the second LXD +LXD2_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) +chmod +x "${LXD2_DIR}" +spawn_lxd "${LXD2_DIR}" +LXD2_ADDR=$(cat "${LXD2_DIR}/lxd.addr") +export LXD2_ADDR + +# allow for running a specific set of tests +if [ "$#" -gt 0 ]; then + "test_${1}" + TEST_RESULT=success + exit +fi + +echo "==> TEST: doing static analysis of commits" +TEST_CURRENT=test_static_analysis +test_static_analysis + +echo "==> TEST: checking dependencies" +TEST_CURRENT=test_check_deps +test_check_deps + +echo "==> TEST: Database schema update" +TEST_CURRENT=test_database_update +test_database_update + +echo "==> TEST: lxc remote url" +TEST_CURRENT=test_remote_url +test_remote_url + +echo "==> TEST: lxc remote administration" +TEST_CURRENT=test_remote_admin +test_remote_admin + +echo "==> TEST: basic usage" +TEST_CURRENT=test_basic_usage +test_basic_usage + +echo "==> TEST: images (and cached image expiry)" +TEST_CURRENT=test_image_expiry +test_image_expiry + +if [ -n "${LXD_CONCURRENT:-}" ]; then + echo "==> TEST: concurrent exec" + TEST_CURRENT=test_concurrent_exec + test_concurrent_exec + + echo "==> TEST: concurrent startup" + TEST_CURRENT=test_concurrent + test_concurrent +fi + +echo "==> TEST: lxc remote usage" +TEST_CURRENT=test_remote_usage +test_remote_usage + +echo "==> TEST: snapshots" +TEST_CURRENT=test_snapshots +test_snapshots + +echo "==> TEST: snapshot restore" +TEST_CURRENT=test_snap_restore +test_snap_restore + +echo "==> TEST: profiles, devices and configuration" +TEST_CURRENT=test_config_profiles +test_config_profiles + +echo "==> TEST: server config" +TEST_CURRENT=test_server_config +test_server_config + +echo "==> TEST: filemanip" +TEST_CURRENT=test_filemanip +test_filemanip + +echo "==> TEST: devlxd" +TEST_CURRENT=test_devlxd +test_devlxd + +if which fuidshift >/dev/null 2>&1; then + echo "==> TEST: uidshift" + TEST_CURRENT=test_fuidshift + test_fuidshift +else + echo "==> SKIP: fuidshift (binary missing)" +fi + +echo "==> TEST: migration" +TEST_CURRENT=test_migration +test_migration + +curversion=$(dpkg -s lxc | awk '/^Version/ { print $2 }') +if dpkg --compare-versions "${curversion}" gt 1.1.2-0ubuntu3; then + echo "==> TEST: fdleak" + TEST_CURRENT=test_fdleak + test_fdleak +else + # We temporarily skip the fdleak test because a bug in lxc is + # known to make it # fail without lxc commit + # 858377e: # logs: introduce a thread-local 'current' lxc_config (v2) + echo "==> SKIPPING TEST: fdleak" +fi + +echo "==> TEST: cpu profiling" +TEST_CURRENT=test_cpu_profiling +test_cpu_profiling + +echo "==> TEST: memory profiling" +TEST_CURRENT=test_mem_profiling +test_mem_profiling + +TEST_RESULT=success === added directory 'src/github.com/lxc/lxd/test/suites' === added file 'src/github.com/lxc/lxd/test/suites/basic.sh' --- src/github.com/lxc/lxd/test/suites/basic.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/basic.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,269 @@ +#!/bin/sh + +gen_third_cert() { + [ -f "${LXD_CONF}/client3.crt" ] && return + mv "${LXD_CONF}/client.crt" "${LXD_CONF}/client.crt.bak" + mv "${LXD_CONF}/client.key" "${LXD_CONF}/client.key.bak" + lxc_remote list > /dev/null 2>&1 + mv "${LXD_CONF}/client.crt" "${LXD_CONF}/client3.crt" + mv "${LXD_CONF}/client.key" "${LXD_CONF}/client3.key" + mv "${LXD_CONF}/client.crt.bak" "${LXD_CONF}/client.crt" + mv "${LXD_CONF}/client.key.bak" "${LXD_CONF}/client.key" +} + +test_basic_usage() { + ensure_import_testimage + ensure_has_localhost_remote "${LXD_ADDR}" + + # Test image export + sum=$(lxc image info testimage | grep ^Fingerprint | cut -d' ' -f2) + lxc image export testimage "${LXD_DIR}/" + if [ -e "${LXD_TEST_IMAGE:-}" ]; then + name=$(basename "${LXD_TEST_IMAGE}") + else + name=${sum}.tar.xz + fi + [ "${sum}" = "$(sha256sum "${LXD_DIR}/${name}" | cut -d' ' -f1)" ] + + # Test an alias with slashes + lxc image show "${sum}" + lxc image alias create a/b/ "${sum}" + lxc image alias delete a/b/ + + # Test image delete + lxc image delete testimage + + # test GET /1.0, since the client always puts to /1.0/ + my_curl -f -X GET "https://${LXD_ADDR}/1.0" + my_curl -f -X GET "https://${LXD_ADDR}/1.0/containers" + + # Re-import the image + mv "${LXD_DIR}/${name}" "${LXD_DIR}/testimage.tar.xz" + lxc image import "${LXD_DIR}/testimage.tar.xz" --alias testimage + rm "${LXD_DIR}/testimage.tar.xz" + + # Test filename for image export (should be "out") + lxc image export testimage "${LXD_DIR}/" + [ "${sum}" = "$(sha256sum "${LXD_DIR}/testimage.tar.xz" | cut -d' ' -f1)" ] + rm "${LXD_DIR}/testimage.tar.xz" + + # Test container creation + lxc init testimage foo + lxc list | grep foo | grep STOPPED + lxc list fo | grep foo | grep STOPPED + + # Test container rename + lxc move foo bar + lxc list | grep -v foo + lxc list | grep bar + + # Test container copy + lxc copy bar foo + lxc delete foo + + # gen untrusted cert + gen_third_cert + + # don't allow requests without a cert to get trusted data + curl -k -s -X GET "https://${LXD_ADDR}/1.0/containers/foo" | grep 403 + + # Test unprivileged container publish + lxc publish bar --alias=foo-image prop1=val1 + lxc image show foo-image | grep val1 + curl -k -s --cert "${LXD_CONF}/client3.crt" --key "${LXD_CONF}/client3.key" -X GET "https://${LXD_ADDR}/1.0/images" | grep "/1.0/images/" && false + lxc image delete foo-image + + # Test privileged container publish + lxc profile create priv + lxc profile set priv security.privileged true + lxc init testimage barpriv -p default -p priv + lxc publish barpriv --alias=foo-image prop1=val1 + lxc image show foo-image | grep val1 + curl -k -s --cert "${LXD_CONF}/client3.crt" --key "${LXD_CONF}/client3.key" -X GET "https://${LXD_ADDR}/1.0/images" | grep "/1.0/images/" && false + lxc image delete foo-image + lxc delete barpriv + lxc profile delete priv + + # Test that containers without metadata.yaml are published successfully. + # Note that this quick hack won't work for LVM, since it doesn't always mount + # the container's filesystem. That's ok though: the logic we're trying to + # test here is independent of storage backend, so running it for just one + # backend (or all non-lvm backends) is enough. + if [ "${LXD_BACKEND}" != "lvm" ]; then + lxc init testimage nometadata + rm "${LXD_DIR}/containers/nometadata/metadata.yaml" + lxc publish nometadata --alias=nometadata-image + lxc image delete nometadata-image + lxc delete nometadata + fi + + # Test public images + lxc publish --public bar --alias=foo-image2 + curl -k -s --cert "${LXD_CONF}/client3.crt" --key "${LXD_CONF}/client3.key" -X GET "https://${LXD_ADDR}/1.0/images" | grep "/1.0/images/" + lxc image delete foo-image2 + + # Test invalid container names + ! lxc init testimage -abc + ! lxc init testimage abc- + ! lxc init testimage 1234 + ! lxc init testimage 12test + ! lxc init testimage a_b_c + ! lxc init testimage aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + + # Test snapshot publish + lxc snapshot bar + lxc publish bar/snap0 --alias foo + lxc init foo bar2 + lxc list | grep bar2 + lxc delete bar2 + lxc image delete foo + + # test basic alias support + printf "aliases:\n ls: list" >> "${LXD_CONF}/config.yml" + lxc ls + + # Delete the bar container we've used for several tests + lxc delete bar + + # lxc delete should also delete all snapshots of bar + [ ! -d "${LXD_DIR}/snapshots/bar" ] + + # Test randomly named container creation + lxc init testimage + RDNAME=$(lxc list | tail -n2 | grep ^\| | awk '{print $2}') + lxc delete "${RDNAME}" + + # Test "nonetype" container creation + wait_for "${LXD_ADDR}" my_curl -X POST "https://${LXD_ADDR}/1.0/containers" \ + -d "{\"name\":\"nonetype\",\"source\":{\"type\":\"none\"}}" + lxc delete nonetype + + # Test "nonetype" container creation with an LXC config + wait_for "${LXD_ADDR}" my_curl -X POST "https://${LXD_ADDR}/1.0/containers" \ + -d "{\"name\":\"configtest\",\"config\":{\"raw.lxc\":\"lxc.hook.clone=/bin/true\"},\"source\":{\"type\":\"none\"}}" + [ "$(my_curl "https://${LXD_ADDR}/1.0/containers/configtest" | jq -r .metadata.config[\"raw.lxc\"])" = "lxc.hook.clone=/bin/true" ] + lxc delete configtest + + # Test socket activation + LXD_ACTIVATION_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + spawn_lxd "${LXD_ACTIVATION_DIR}" + ( + set -e + # shellcheck disable=SC2030 + LXD_DIR=${LXD_ACTIVATION_DIR} + ensure_import_testimage + lxd activateifneeded --debug 2>&1 | grep -q "Daemon has core.https_address set, activating..." + lxc config unset core.https_address --force-local + lxd activateifneeded --debug 2>&1 | grep -q -v "activating..." + lxc init testimage autostart --force-local + lxd activateifneeded --debug 2>&1 | grep -q -v "activating..." + lxc config set autostart boot.autostart true --force-local + lxd activateifneeded --debug 2>&1 | grep -q "Daemon has auto-started containers, activating..." + lxc delete autostart --force-local + ) + # shellcheck disable=SC2031 + LXD_DIR=${LXD_DIR} + kill_lxd "${LXD_ACTIVATION_DIR}" + + # Create and start a container + lxc launch testimage foo + lxc list | grep foo | grep RUNNING + lxc stop foo --force # stop is hanging + + # cycle it a few times + lxc start foo + mac1=$(lxc exec foo cat /sys/class/net/eth0/address) + lxc stop foo --force # stop is hanging + lxc start foo + mac2=$(lxc exec foo cat /sys/class/net/eth0/address) + + if [ -n "${mac1}" ] && [ -n "${mac2}" ] && [ "${mac1}" != "${mac2}" ]; then + echo "==> MAC addresses didn't match across restarts (${mac1} vs ${mac2})" + false + fi + + # check that we can set the environment + lxc exec foo pwd | grep /root + lxc exec --env BEST_BAND=meshuggah foo env | grep meshuggah + lxc exec foo ip link show | grep eth0 + + # test file transfer + echo abc > "${LXD_DIR}/in" + + lxc file push "${LXD_DIR}/in" foo/root/ + lxc exec foo /bin/cat /root/in | grep abc + lxc exec foo -- /bin/rm -f root/in + + lxc file push "${LXD_DIR}/in" foo/root/in1 + lxc exec foo /bin/cat /root/in1 | grep abc + lxc exec foo -- /bin/rm -f root/in1 + + # make sure stdin is chowned to our container root uid (Issue #590) + [ -t 0 ] && [ -t 1 ] && lxc exec foo -- chown 1000:1000 /proc/self/fd/0 + + echo foo | lxc exec foo tee /tmp/foo + + # Detect regressions/hangs in exec + sum=$(ps aux | tee "${LXD_DIR}/out" | lxc exec foo md5sum | cut -d' ' -f1) + [ "${sum}" = "$(md5sum "${LXD_DIR}/out" | cut -d' ' -f1)" ] + rm "${LXD_DIR}/out" + + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" = "dir" ]; then + content=$(cat "${LXD_DIR}/containers/foo/rootfs/tmp/foo") + [ "${content}" = "foo" ] + fi + + lxc launch testimage deleterunning + my_curl -X DELETE "https://${LXD_ADDR}/1.0/containers/deleterunning" | grep "container is running" + lxc delete deleterunning -f + + # cleanup + lxc delete foo -f + + # check that an apparmor profile is created for this container, that it is + # unloaded on stop, and that it is deleted when the container is deleted + lxc launch testimage lxd-apparmor-test + aa-status | grep "lxd-lxd-apparmor-test_<${LXD_DIR}>" + lxc stop lxd-apparmor-test --force + ! aa-status | grep -q "lxd-lxd-apparmor-test_<${LXD_DIR}>" + lxc delete lxd-apparmor-test + [ ! -f "${LXD_DIR}/security/apparmor/profiles/lxd-lxd-apparmor-test" ] + + # make sure that privileged containers are not world-readable + lxc profile create unconfined + lxc profile set unconfined security.privileged true + lxc init testimage foo2 -p unconfined + [ "$(stat -L -c "%a" "${LXD_DIR}/containers/foo2")" = "700" ] + lxc delete foo2 + lxc profile delete unconfined + + # Ephemeral + lxc launch testimage foo -e + + OLD_INIT=$(lxc info foo | grep ^Pid) + lxc exec foo reboot + + REBOOTED="false" + + # shellcheck disable=SC2034 + for i in $(seq 10); do + NEW_INIT=$(lxc info foo | grep ^Pid || true) + + if [ -n "${NEW_INIT}" ] && [ "${OLD_INIT}" != "${NEW_INIT}" ]; then + REBOOTED="true" + break + fi + + sleep 0.5 + done + + [ "${REBOOTED}" = "true" ] + + # Workaround for LXC bug which causes LXD to double-start containers + # on reboot + sleep 2 + + lxc stop foo --force || true + ! lxc list | grep -q foo +} === added file 'src/github.com/lxc/lxd/test/suites/concurrent.sh' --- src/github.com/lxc/lxd/test/suites/concurrent.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/concurrent.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,30 @@ +#!/bin/sh + +test_concurrent() { + ensure_import_testimage + + spawn_container() { + set -e + + name=concurrent-${1} + + lxc launch testimage "${name}" + lxc info "${name}" | grep Running + echo abc | lxc exec "${name}" -- cat | grep abc + lxc stop "${name}" --force + lxc delete "${name}" + } + + PIDS="" + + for id in $(seq $(($(find /sys/bus/cpu/devices/ -type l | wc -l)*8))); do + spawn_container "${id}" 2>&1 | tee "${LXD_DIR}/lxc-${id}.out" & + PIDS="${PIDS} $!" + done + + for pid in ${PIDS}; do + wait "${pid}" + done + + ! lxc list | grep -q concurrent +} === added file 'src/github.com/lxc/lxd/test/suites/config.sh' --- src/github.com/lxc/lxd/test/suites/config.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/config.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,184 @@ +#!/bin/sh +ensure_removed() { + bad=0 + lxc exec foo -- stat /dev/ttyS0 && bad=1 + if [ "${bad}" -eq 1 ]; then + echo "device should have been removed; $*" + false + fi +} + +dounixdevtest() { + lxc start foo + lxc config device add foo tty unix-char "$@" + lxc exec foo -- stat /dev/ttyS0 + lxc exec foo reboot + lxc exec foo -- stat /dev/ttyS0 + lxc restart foo --force + lxc exec foo -- stat /dev/ttyS0 + lxc config device remove foo tty + ensure_removed "was not hot-removed" + lxc exec foo reboot + ensure_removed "removed device re-appeared after container reboot" + lxc restart foo --force + ensure_removed "removed device re-appaared after lxc reboot" + lxc stop foo --force +} + +testunixdevs() { + echo "Testing passing char device /dev/ttyS0" + dounixdevtest path=/dev/ttyS0 + + echo "Testing passing char device 4 64" + dounixdevtest path=/dev/ttyS0 major=4 minor=64 +} + +ensure_fs_unmounted() { + bad=0 + lxc exec foo -- stat /mnt/hello && bad=1 + if [ "${bad}" -eq 1 ]; then + echo "device should have been removed; $*" + false + fi +} + +testloopmounts() { + loopfile=$(mktemp -p "${TEST_DIR}" loop_XXX) + dd if=/dev/zero of="${loopfile}" bs=1M seek=200 count=1 + mkfs.ext4 -F "${loopfile}" + + lpath=$(losetup --show -f "${loopfile}") + if [ ! -e "${lpath}" ]; then + echo "failed to setup loop" + false + fi + echo "${lpath}" >> "${TEST_DIR}/loops" + + mkdir -p "${TEST_DIR}/mnt" + mount "${lpath}" "${TEST_DIR}/mnt" || { echo "loop mount failed"; return; } + touch "${TEST_DIR}/mnt/hello" + umount -l "${TEST_DIR}/mnt" + lxc start foo + lxc config device add foo mnt disk source="${lpath}" path=/mnt + lxc exec foo stat /mnt/hello + # Note - we need to add a set_running_config_item to lxc + # or work around its absence somehow. Once that's done, we + # can run the following two lines: + #lxc exec foo reboot + #lxc exec foo stat /mnt/hello + lxc restart foo --force + lxc exec foo stat /mnt/hello + lxc config device remove foo mnt + ensure_fs_unmounted "fs should have been hot-unmounted" + lxc exec foo reboot + ensure_fs_unmounted "removed fs re-appeared after reboot" + lxc restart foo --force + ensure_fs_unmounted "removed fs re-appeared after restart" + lxc stop foo --force + losetup -d "${lpath}" + sed -i "\|^${lpath}|d" "${TEST_DIR}/loops" +} + +test_config_profiles() { + ensure_import_testimage + + lxc init testimage foo + lxc profile list | grep default + + # let's check that 'lxc config profile' still works while it's deprecated + lxc config profile list | grep default + + # setting an invalid config item should error out when setting it, not get + # into the database and never let the user edit the container again. + ! lxc config set foo raw.lxc "lxc.notaconfigkey = invalid" + + lxc profile create stdintest + echo "BADCONF" | lxc profile set stdintest user.user_data - + lxc profile show stdintest | grep BADCONF + lxc profile delete stdintest + + echo "BADCONF" | lxc config set foo user.user_data - + lxc config show foo | grep BADCONF + lxc config unset foo user.user_data + + mkdir -p "${TEST_DIR}/mnt1" + lxc config device add foo mnt1 disk source="${TEST_DIR}/mnt1" path=/mnt1 readonly=true + lxc profile create onenic + lxc profile device add onenic eth0 nic nictype=bridged parent=lxcbr0 + lxc profile apply foo onenic + lxc profile create unconfined + lxc profile set unconfined raw.lxc "lxc.aa_profile=unconfined" + lxc profile apply foo onenic,unconfined + + lxc config device list foo | grep mnt1 + lxc config device show foo | grep "/mnt1" + lxc config show foo | grep "onenic" -A1 | grep "unconfined" + lxc profile list | grep onenic + lxc profile device list onenic | grep eth0 + lxc profile device show onenic | grep lxcbr0 + + # test live-adding a nic + lxc start foo + ! lxc config show foo | grep -q "raw.lxc" + lxc config show foo --expanded | grep -q "raw.lxc" + ! lxc config show foo | grep -v "volatile.eth0" | grep -q "eth0" + lxc config show foo --expanded | grep -v "volatile.eth0" | grep -q "eth0" + lxc config device add foo eth2 nic nictype=bridged parent=lxcbr0 name=eth10 + lxc exec foo -- /sbin/ifconfig -a | grep eth0 + lxc exec foo -- /sbin/ifconfig -a | grep eth10 + lxc config device list foo | grep eth2 + lxc config device remove foo eth2 + + # test live-adding a disk + mkdir "${TEST_DIR}/mnt2" + touch "${TEST_DIR}/mnt2/hosts" + lxc config device add foo mnt2 disk source="${TEST_DIR}/mnt2" path=/mnt2 readonly=true + lxc exec foo -- ls /mnt2/hosts + lxc stop foo --force + lxc start foo + lxc exec foo -- ls /mnt2/hosts + lxc config device remove foo mnt2 + ! lxc exec foo -- ls /mnt2/hosts + lxc stop foo --force + lxc start foo + ! lxc exec foo -- ls /mnt2/hosts + lxc stop foo --force + + lxc config set foo user.prop value + lxc list user.prop=value | grep foo + lxc config unset foo user.prop + + # Test for invalid raw.lxc + ! lxc config set foo raw.lxc a + ! lxc profile set default raw.lxc a + + bad=0 + lxc list user.prop=value | grep foo && bad=1 + if [ "${bad}" -eq 1 ]; then + echo "property unset failed" + false + fi + + bad=0 + lxc config set foo user.prop 2>/dev/null && bad=1 + if [ "${bad}" -eq 1 ]; then + echo "property set succeded when it shouldn't have" + false + fi + + testunixdevs + + testloopmounts + + lxc delete foo + + lxc init testimage foo + lxc profile apply foo onenic,unconfined + lxc start foo + + lxc exec foo -- cat /proc/self/attr/current | grep unconfined + lxc exec foo -- ls /sys/class/net | grep eth0 + + lxc stop foo --force + lxc delete foo +} === added file 'src/github.com/lxc/lxd/test/suites/database_update.sh' --- src/github.com/lxc/lxd/test/suites/database_update.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/database_update.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +#!/bin/sh + +test_database_update(){ + LXD_MIGRATE_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + MIGRATE_DB=${LXD_MIGRATE_DIR}/lxd.db + + # Create the version 1 schema as the database + sqlite3 "${MIGRATE_DB}" > /dev/null < deps/schema1.sql + + # Start an LXD demon in the tmp directory. This should start the updates. + spawn_lxd "${LXD_MIGRATE_DIR}" + + # Assert there are enough tables. + expected_tables=16 + tables=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "CREATE TABLE") + [ "${tables}" -eq "${expected_tables}" ] || { echo "FAIL: Wrong number of tables after database migration. Found: ${tables}, expected ${expected_tables}"; false; } + + # There should be 10 "ON DELETE CASCADE" occurences + expected_cascades=11 + cascades=$(sqlite3 "${MIGRATE_DB}" ".dump" | grep -c "ON DELETE CASCADE") + [ "${cascades}" -eq "${expected_cascades}" ] || { echo "FAIL: Wrong number of ON DELETE CASCADE foreign keys. Found: ${cascades}, exected: ${expected_cascades}"; false; } +} === added file 'src/github.com/lxc/lxd/test/suites/deps.sh' --- src/github.com/lxc/lxd/test/suites/deps.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/deps.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +#!/bin/sh + +test_check_deps() { + ! ldd "$(which lxc)" | grep -q liblxc +} === added file 'src/github.com/lxc/lxd/test/suites/devlxd.sh' --- src/github.com/lxc/lxd/test/suites/devlxd.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/devlxd.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,20 @@ +#!/bin/sh + +test_devlxd() { + ensure_import_testimage + + cd "${TEST_DIR}" + go build -tags netgo -a -installsuffix devlxd ../deps/devlxd-client.go + cd - + + lxc launch testimage devlxd + + lxc file push "${TEST_DIR}/devlxd-client" devlxd/bin/ + + lxc exec devlxd chmod +x /bin/devlxd-client + + lxc config set devlxd user.foo bar + lxc exec devlxd devlxd-client user.foo | grep bar + + lxc stop devlxd --force +} === added file 'src/github.com/lxc/lxd/test/suites/exec.sh' --- src/github.com/lxc/lxd/test/suites/exec.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/exec.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,26 @@ +#!/bin/sh + +test_concurrent_exec() { + ensure_import_testimage + + name=x1 + lxc launch testimage x1 + lxc list ${name} | grep RUNNING + + exec_container() { + echo "abc${1}" | lxc exec "${name}" -- cat | grep abc + } + + PIDS="" + for i in $(seq 1 50); do + exec_container "${i}" > "${LXD_DIR}/exec-${i}.out" 2>&1 & + PIDS="${PIDS} $!" + done + + for pid in ${PIDS}; do + wait "${pid}" + done + + lxc stop "${name}" --force + lxc delete "${name}" +} === added file 'src/github.com/lxc/lxd/test/suites/fdleak.sh' --- src/github.com/lxc/lxd/test/suites/fdleak.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/fdleak.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,44 @@ +#!/bin/sh + +test_fdleak() { + LXD_FDLEAK_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + chmod +x "${LXD_FDLEAK_DIR}" + spawn_lxd "${LXD_FDLEAK_DIR}" + pid=$(cat "${LXD_FDLEAK_DIR}/lxd.pid") + + beforefds=$(/bin/ls "/proc/${pid}/fd" | wc -l) + ( + set -e + # shellcheck disable=SC2034 + LXD_DIR=${LXD_FDLEAK_DIR} + + ensure_import_testimage + + for i in $(seq 5); do + lxc init "testimage leaktest${i}" + lxc info "leaktest${i}" + lxc start "leaktest${i}" + lxc exec "leaktest${i}" -- ps -ef + lxc stop "leaktest${i}" --force + lxc delete "leaktest${i}" + done + + sleep 5 + + exit 0 + ) + afterfds=$(/bin/ls "/proc/${pid}/fd" | wc -l) + leakedfds=$((afterfds - beforefds)) + + bad=0 + # shellcheck disable=SC2015 + [ ${leakedfds} -gt 5 ] && bad=1 || true + if [ ${bad} -eq 1 ]; then + echo "${leakedfds} FDS leaked" + ls "/proc/${pid}/fd" -al + netstat -anp 2>&1 | grep "${pid}/" + false + fi + + kill_lxd "${LXD_FDLEAK_DIR}" +} === added file 'src/github.com/lxc/lxd/test/suites/filemanip.sh' --- src/github.com/lxc/lxd/test/suites/filemanip.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/filemanip.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +#!/bin/sh + +test_filemanip() { + ensure_import_testimage + + lxc launch testimage filemanip + lxc exec filemanip -- ln -s /tmp/ /tmp/outside + lxc file push main.sh filemanip/tmp/outside/ + + [ ! -f /tmp/main.sh ] + [ -f "${LXD_DIR}/containers/filemanip/rootfs/tmp/main.sh" ] + + lxc delete filemanip -f +} === added file 'src/github.com/lxc/lxd/test/suites/fuidshift.sh' --- src/github.com/lxc/lxd/test/suites/fuidshift.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/fuidshift.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,56 @@ +#!/bin/sh + +test_common_fuidshift() { + # test some bad arguments + fail=0 + fuidshift > /dev/null 2>&1 && fail=1 + fuidshift -t > /dev/null 2>&1 && fail=1 + fuidshift /tmp -t b:0 > /dev/null 2>&1 && fail=1 + fuidshift /tmp -t x:0:0:0 > /dev/null 2>&1 && fail=1 + [ "${fail}" -ne 1 ] +} + +test_nonroot_fuidshift() { + test_common_fuidshift + + LXD_FUIDMAP_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + + u=$(id -u) + g=$(id -g) + u1=$((u+1)) + g1=$((g+1)) + + touch "${LXD_FUIDMAP_DIR}/x1" + fuidshift "${LXD_FUIDMAP_DIR}/x1" -t "u:${u}:100000:1" "g:${g}:100000:1" | tee /dev/stderr | grep "to 100000 100000" > /dev/null || fail=1 + if [ "${fail}" -eq 1 ]; then + echo "==> Failed to shift own uid to container root" + false + fi + fuidshift "${LXD_FUIDMAP_DIR}/x1" -t "u:${u1}:10000:1" "g:${g1}:100000:1" | tee /dev/stderr | grep "to -1 -1" > /dev/null || fail=1 + if [ "${fail}" -eq 1 ]; then + echo "==> Wrongly shifted invalid uid to container root" + false + fi + + # unshift it + chown 100000:100000 "${LXD_FUIDMAP_DIR}/x1" + fuidshift "${LXD_FUIDMAP_DIR}/x1" -r -t "u:${u}:100000:1" "g:${g}:100000:1" | tee /dev/stderr | grep "to 0 0" > /dev/null || fail=1 + if [ "${fail}" -eq 1 ]; then + echo "==> Failed to unshift container root back to own uid" + false + fi +} + +test_root_fuidshift() { + test_nonroot_fuidshift + + # Todo - test ranges +} + +test_fuidshift() { + if [ "$(id -u)" -ne 0 ]; then + test_nonroot_fuidshift + else + test_root_fuidshift + fi +} === added file 'src/github.com/lxc/lxd/test/suites/image.sh' --- src/github.com/lxc/lxd/test/suites/image.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/image.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,31 @@ +#!/bin/sh + +test_image_expiry() { + ensure_import_testimage + + if ! lxc_remote remote list | grep -q l1; then + lxc_remote remote add l1 "${LXD_ADDR}" --accept-certificate --password foo + fi + if ! lxc_remote remote list | grep -q l2; then + lxc_remote remote add l2 "${LXD2_ADDR}" --accept-certificate --password foo + fi + lxc_remote init l1:testimage l2:c1 + fp=$(lxc_remote image info testimage | awk -F: '/^Fingerprint/ { print $2 }' | awk '{ print $1 }') + [ ! -z "${fp}" ] + fpbrief=$(echo "${fp}" | cut -c 1-10) + + lxc_remote image list l2: | grep -q "${fpbrief}" + + lxc_remote remote set-default l2 + lxc_remote config set images.remote_cache_expiry 0 + lxc_remote remote set-default local + + ! lxc_remote image list l2: | grep -q "${fpbrief}" + + lxc_remote delete l2:c1 + + # rest the default expiry + lxc_remote remote set-default l2 + lxc_remote config set images.remote_cache_expiry 10 + lxc_remote remote set-default local +} === added file 'src/github.com/lxc/lxd/test/suites/migration.sh' --- src/github.com/lxc/lxd/test/suites/migration.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/migration.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,71 @@ +#!/bin/sh + +test_migration() { + ensure_import_testimage + + if ! lxc_remote remote list | grep -q l1; then + lxc_remote remote add l1 "${LXD_ADDR}" --accept-certificate --password foo + fi + if ! lxc_remote remote list | grep -q l2; then + lxc_remote remote add l2 "${LXD2_ADDR}" --accept-certificate --password foo + fi + + lxc_remote init testimage nonlive + # test moving snapshots + lxc_remote snapshot l1:nonlive + lxc_remote move l1:nonlive l2: + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" != "lvm" ]; then + [ -d "${LXD2_DIR}/containers/nonlive/rootfs" ] + fi + [ ! -d "${LXD_DIR}/containers/nonlive" ] + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" = "dir" ]; then + [ -d "${LXD2_DIR}/snapshots/nonlive/snap0/rootfs/bin" ] + fi + + lxc_remote copy l2:nonlive l1:nonlive2 + [ -d "${LXD_DIR}/containers/nonlive2" ] + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" != "lvm" ]; then + [ -d "${LXD2_DIR}/containers/nonlive/rootfs/bin" ] + fi + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" = "dir" ]; then + [ -d "${LXD_DIR}/snapshots/nonlive2/snap0/rootfs/bin" ] + fi + + lxc_remote copy l1:nonlive2/snap0 l2:nonlive3 + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" != "lvm" ]; then + [ -d "${LXD2_DIR}/containers/nonlive3/rootfs/bin" ] + fi + + lxc_remote copy l2:nonlive l2:nonlive2 + # should have the same base image tag + [ "$(lxc_remote config get l2:nonlive volatile.base_image)" = "$(lxc_remote config get l2:nonlive2 volatile.base_image)" ] + # check that nonlive2 has a new addr in volatile + [ "$(lxc_remote config get l2:nonlive volatile.eth0.hwaddr)" != "$(lxc_remote config get l2:nonlive2 volatile.eth0.hwaddr)" ] + + lxc_remote config unset l2:nonlive volatile.base_image + lxc_remote copy l2:nonlive l1:nobase + lxc_remote delete l1:nobase + + lxc_remote start l1:nonlive2 + lxc_remote list l1: | grep RUNNING | grep nonlive2 + lxc_remote stop l1:nonlive2 --force + + lxc_remote start l2:nonlive + lxc_remote list l2: | grep RUNNING | grep nonlive + lxc_remote stop l2:nonlive --force + + if ! which criu >/dev/null 2>&1; then + echo "==> SKIP: live migration with CRIU (missing binary)" + return + fi + + lxc_remote launch testimage migratee + + lxc_remote move l1:migratee l2:migratee + lxc_remote stop l2:migratee --force +} === added file 'src/github.com/lxc/lxd/test/suites/profiling.sh' --- src/github.com/lxc/lxd/test/suites/profiling.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/profiling.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,41 @@ +#!/bin/sh + +test_cpu_profiling() { + LXD3_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + chmod +x "${LXD3_DIR}" + spawn_lxd "${LXD3_DIR}" --cpuprofile "${LXD3_DIR}/cpu.out" + lxdpid=$(cat "${LXD3_DIR}/lxd.pid") + kill -TERM "${lxdpid}" + wait "${lxdpid}" || true + export PPROF_TMPDIR="${TEST_DIR}/pprof" + echo top5 | go tool pprof "$(which lxd)" "${LXD3_DIR}/cpu.out" + echo "" + + kill_lxd "${LXD3_DIR}" +} + +test_mem_profiling() { + LXD4_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + chmod +x "${LXD4_DIR}" + spawn_lxd "${LXD4_DIR}" --memprofile "${LXD4_DIR}/mem" + lxdpid=$(cat "${LXD4_DIR}/lxd.pid") + + if [ -e "${LXD4_DIR}/mem" ]; then + false + fi + + kill -USR1 "${lxdpid}" + + timeout=50 + while [ "${timeout}" != "0" ]; do + [ -e "${LXD4_DIR}/mem" ] && break + sleep 0.1 + timeout=$((timeout-1)) + done + + export PPROF_TMPDIR="${TEST_DIR}/pprof" + echo top5 | go tool pprof "$(which lxd)" "${LXD4_DIR}/mem" + echo "" + + kill_lxd "${LXD4_DIR}" +} === added file 'src/github.com/lxc/lxd/test/suites/remote.sh' --- src/github.com/lxc/lxd/test/suites/remote.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/remote.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,141 @@ +#!/bin/sh + +gen_second_cert() { + [ -f "${LXD_CONF}/client2.crt" ] && return + mv "${LXD_CONF}/client.crt" "${LXD_CONF}/client.crt.bak" + mv "${LXD_CONF}/client.key" "${LXD_CONF}/client.key.bak" + lxc_remote list > /dev/null 2>&1 + mv "${LXD_CONF}/client.crt" "${LXD_CONF}/client2.crt" + mv "${LXD_CONF}/client.key" "${LXD_CONF}/client2.key" + mv "${LXD_CONF}/client.crt.bak" "${LXD_CONF}/client.crt" + mv "${LXD_CONF}/client.key.bak" "${LXD_CONF}/client.key" +} + +test_remote_url() { + for url in "${LXD_ADDR}" "https://${LXD_ADDR}"; do + lxc_remote remote add test "${url}" --accept-certificate --password foo + lxc_remote finger test: + lxc_remote config trust list | grep @ | awk '{print $2}' | while read line ; do + lxc_remote config trust remove "\"${line}\"" + done + lxc_remote remote remove test + done + + urls="${LXD_DIR}/unix.socket unix:${LXD_DIR}/unix.socket unix://${LXD_DIR}/unix.socket" + if [ -z "${LXD_OFFLINE:-}" ]; then + urls="images.linuxcontainers.org https://images.linuxcontainers.org ${urls}" + fi + + for url in ${urls}; do + lxc_remote remote add test "${url}" + lxc_remote finger test: + lxc_remote remote remove test + done +} + +test_remote_admin() { + lxc_remote remote add badpass "${LXD_ADDR}" --accept-certificate --password bad || true + ! lxc_remote list badpass: + + lxc_remote remote add localhost "${LXD_ADDR}" --accept-certificate --password foo + lxc_remote remote list | grep 'localhost' + + lxc_remote remote set-default localhost + [ "$(lxc_remote remote get-default)" = "localhost" ] + + lxc_remote remote rename localhost foo + lxc_remote remote list | grep 'foo' + lxc_remote remote list | grep -v 'localhost' + [ "$(lxc_remote remote get-default)" = "foo" ] + + ! lxc_remote remote remove foo + lxc_remote remote set-default local + lxc_remote remote remove foo + + # This is a test for #91, we expect this to hang asking for a password if we + # tried to re-add our cert. + echo y | lxc_remote remote add localhost "${LXD_ADDR}" + + # we just re-add our cert under a different name to test the cert + # manipulation mechanism. + gen_second_cert + + # Test for #623 + lxc_remote remote add test-623 "${LXD_ADDR}" --accept-certificate --password foo + + # now re-add under a different alias + lxc_remote config trust add "${LXD_CONF}/client2.crt" + if [ "$(lxc_remote config trust list | wc -l)" -ne 7 ]; then + echo "wrong number of certs" + false + fi + + # Check that we can add domains with valid certs without confirmation: + + # avoid default high port behind some proxies: + if [ -z "${LXD_OFFLINE:-}" ]; then + lxc_remote remote add images1 images.linuxcontainers.org + lxc_remote remote add images2 images.linuxcontainers.org:443 + fi +} + +test_remote_usage() { + lxc_remote remote add lxd2 "${LXD2_ADDR}" --accept-certificate --password foo + + # we need a public image on localhost + lxc_remote image export localhost:testimage "${LXD_DIR}/foo.img" + lxc_remote image delete localhost:testimage + sum=$(sha256sum "${LXD_DIR}/foo.img" | cut -d' ' -f1) + lxc_remote image import "${LXD_DIR}/foo.img" localhost: --public + lxc_remote image alias create localhost:testimage "${sum}" + + lxc_remote image delete "lxd2:${sum}" || true + + lxc_remote image copy localhost:testimage lxd2: --copy-aliases --public + lxc_remote image delete "localhost:${sum}" + lxc_remote image copy "lxd2:${sum}" local: --copy-aliases --public + lxc_remote image info localhost:testimage + lxc_remote image delete "lxd2:${sum}" + + lxc_remote image copy "localhost:${sum}" lxd2: + lxc_remote image delete "lxd2:${sum}" + + lxc_remote image copy "localhost:$(echo "${sum}" | colrm 3)" lxd2: + lxc_remote image delete "lxd2:${sum}" + + # test a private image + lxc_remote image copy "localhost:${sum}" lxd2: + lxc_remote image delete "localhost:${sum}" + lxc_remote init "lxd2:${sum}" localhost:c1 + lxc_remote delete localhost:c1 + + lxc_remote image alias create localhost:testimage "${sum}" + + # test remote publish + lxc_remote init testimage pub + lxc_remote publish pub lxd2: --alias bar --public a=b + lxc_remote image show lxd2:bar | grep -q "a: b" + lxc_remote image show lxd2:bar | grep -q "public: true" + ! lxc_remote image show bar + lxc_remote delete pub + lxc_remote image delete lxd2:bar + + # Double launch to test if the image downloads only once. + lxc_remote init localhost:testimage lxd2:c1 & + C1PID=$! + + lxc_remote init localhost:testimage lxd2:c2 + lxc_remote delete lxd2:c2 + + wait "${C1PID}" + lxc_remote delete lxd2:c1 + + # launch testimage stored on localhost as container c1 on lxd2 + lxc_remote launch localhost:testimage lxd2:c1 + + # make sure it is running + lxc_remote list lxd2: | grep c1 | grep RUNNING + lxc_remote info lxd2:c1 + lxc_remote stop lxd2:c1 --force + lxc_remote delete lxd2:c1 +} === added file 'src/github.com/lxc/lxd/test/suites/serverconfig.sh' --- src/github.com/lxc/lxd/test/suites/serverconfig.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/serverconfig.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,18 @@ +#!/bin/sh + +test_server_config() { + LXD_SERVERCONFIG_DIR=$(mktemp -d -p "${TEST_DIR}" XXX) + spawn_lxd "${LXD_SERVERCONFIG_DIR}" + + lxc config set core.trust_password 123456 + + config=$(lxc config show) + echo "${config}" | grep -q "trust_password" + echo "${config}" | grep -q -v "123456" + + lxc config unset core.trust_password + lxc config show | grep -q -v "trust_password" + + # test untrusted server GET + my_curl -X GET "https://$(cat "${LXD_SERVERCONFIG_DIR}/lxd.addr")/1.0" | grep -v -q environment +} === added file 'src/github.com/lxc/lxd/test/suites/snapshots.sh' --- src/github.com/lxc/lxd/test/suites/snapshots.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/snapshots.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,177 @@ +#!/bin/sh + +test_snapshots() { + ensure_import_testimage + ensure_has_localhost_remote "${LXD_ADDR}" + + lxc init testimage foo + + lxc snapshot foo + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" = "dir" ]; then + [ -d "${LXD_DIR}/snapshots/foo/snap0" ] + fi + + lxc snapshot foo + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" = "dir" ]; then + [ -d "${LXD_DIR}/snapshots/foo/snap1" ] + fi + + lxc snapshot foo tester + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" = "dir" ]; then + [ -d "${LXD_DIR}/snapshots/foo/tester" ] + fi + + lxc copy foo/tester foosnap1 + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" != "lvm" ]; then + [ -d "${LXD_DIR}/containers/foosnap1/rootfs" ] + fi + + lxc delete foo/snap0 + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" = "dir" ]; then + [ ! -d "${LXD_DIR}/snapshots/foo/snap0" ] + fi + + # no CLI for this, so we use the API directly (rename a snapshot) + wait_for "${LXD_ADDR}" my_curl -X POST "https://${LXD_ADDR}/1.0/containers/foo/snapshots/tester" -d "{\"name\":\"tester2\"}" + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" = "dir" ]; then + [ ! -d "${LXD_DIR}/snapshots/foo/tester" ] + fi + + lxc move foo/tester2 foo/tester-two + lxc delete foo/tester-two + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" = "dir" ]; then + [ ! -d "${LXD_DIR}/snapshots/foo/tester-two" ] + fi + + lxc snapshot foo namechange + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" = "dir" ]; then + [ -d "${LXD_DIR}/snapshots/foo/namechange" ] + fi + lxc move foo foople + [ ! -d "${LXD_DIR}/containers/foo" ] + [ -d "${LXD_DIR}/containers/foople" ] + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" = "dir" ]; then + [ -d "${LXD_DIR}/snapshots/foople/namechange" ] + [ -d "${LXD_DIR}/snapshots/foople/namechange" ] + fi + + lxc delete foople + lxc delete foosnap1 + [ ! -d "${LXD_DIR}/containers/foople" ] + [ ! -d "${LXD_DIR}/containers/foosnap1" ] +} + +test_snap_restore() { + ensure_import_testimage + ensure_has_localhost_remote "${LXD_ADDR}" + + ########################################################## + # PREPARATION + ########################################################## + + ## create some state we will check for when snapshot is restored + + ## prepare snap0 + lxc launch testimage bar + echo snap0 > state + lxc file push state bar/root/state + lxc file push state bar/root/file_only_in_snap0 + + mkdir "${LXD_DIR}/containers/bar/rootfs/root/dir_only_in_snap0" + cd "${LXD_DIR}/containers/bar/rootfs/root/" + ln -s ./file_only_in_snap0 statelink + cd - + lxc stop bar --force + + lxc snapshot bar snap0 + + ## prepare snap1 + lxc start bar + echo snap1 > state + lxc file push state bar/root/state + lxc file push state bar/root/file_only_in_snap1 + + cd "${LXD_DIR}/containers/bar/rootfs/root/" + rmdir dir_only_in_snap0 + rm file_only_in_snap0 + rm statelink + ln -s ./file_only_in_snap1 statelink + mkdir dir_only_in_snap1 + cd - + lxc stop bar --force + + # Delete the state file we created to prevent leaking. + rm state + + lxc config set bar limits.cpu 1 + + lxc snapshot bar snap1 + + ########################################################## + + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" = "dir" ]; then + # The problem here is that you can't `zfs rollback` to a snapshot with a + # parent, which snap0 has (snap1). + restore_and_compare_fs snap0 + + # Check container config has been restored (limits.cpu is unset) + cpus=$(lxc config get bar limits.cpu) + if [ "${cpus}" != "limits.cpu: " ]; then + echo "==> config didn't match expected value after restore (${cpus})" + false + fi + fi + + ########################################################## + + # test restore using full snapshot name + restore_and_compare_fs snap1 + + # Check config value in snapshot has been restored + cpus=$(lxc config get bar limits.cpu) + if [ "${cpus}" != "limits.cpu: 1" ]; then + echo "==> config didn't match expected value after restore (${cpus})" + false + fi + + ########################################################## + + # Start container and then restore snapshot to verify the running state after restore. + lxc start bar + + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" = "dir" ]; then + # see comment above about snap0 + restore_and_compare_fs snap0 + + # check container is running after restore + lxc list | grep bar | grep RUNNING + fi + + lxc stop --force bar + + lxc delete bar +} + +restore_and_compare_fs() { + snap=${1} + echo "==> Restoring ${snap}" + + lxc restore bar "${snap}" + + # FIXME: make this backend agnostic + if [ "${LXD_BACKEND}" = "dir" ]; then + # Recursive diff of container FS + diff -r "${LXD_DIR}/containers/bar/rootfs" "${LXD_DIR}/snapshots/bar/${snap}/rootfs" + fi +} === added file 'src/github.com/lxc/lxd/test/suites/static_analysis.sh' --- src/github.com/lxc/lxd/test/suites/static_analysis.sh 1970-01-01 00:00:00 +0000 +++ src/github.com/lxc/lxd/test/suites/static_analysis.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,70 @@ +#!/bin/sh + +safe_pot_hash() { + sed -e "/Project-Id-Version/,/Content-Transfer-Encoding/d" -e "/^#/d" "po/lxd.pot" | tee /tmp/foo | md5sum | cut -f1 -d" " +} + +test_static_analysis() { + ( + set -e + + cd ../ + # Python3 static analysis + pep8 test/deps/import-busybox scripts/lxd-setup-lvm-storage + pyflakes3 test/deps/import-busybox scripts/lxd-setup-lvm-storage + + # Shell static analysis + shellcheck lxd-bridge/lxd-bridge test/main.sh test/suites/* test/backends/* + + # Go static analysis + ## Functions starting by empty line + OUT=$(grep -r "^$" -B1 . | grep "func " | grep -v "}$" || true) + if [ -n "${OUT}" ]; then + echo "${OUT}" + false + fi + + ## go vet, if it exists + have_go_vet=1 + go help vet > /dev/null 2>&1 || have_go_vet=0 + if [ "${have_go_vet}" -eq 1 ]; then + go vet ./... + fi + + ## vet + if which vet >/dev/null 2>&1; then + vet --all . + fi + + ## deadcode + if which deadcode >/dev/null 2>&1; then + for path in . lxc/ lxd/ shared/ shared/i18n shared/termios fuidshift/ lxd-bridge/lxd-bridge-proxy/; do + OUT=$(deadcode ${path} 2>&1 | grep -v lxd/migrate.pb.go || true) + if [ -n "${OUT}" ]; then + echo "${OUT}" >&2 + false + fi + done + fi + + # Skip the tests which require git + if ! git status; then + return + fi + + # go fmt + git add -u :/ + go fmt ./... + git diff --exit-code + + # make sure the .pot is updated + cp --preserve "po/lxd.pot" "po/lxd.pot.bak" + hash1=$(safe_pot_hash) + make i18n -s + hash2=$(safe_pot_hash) + mv "po/lxd.pot.bak" "po/lxd.pot" + if [ "${hash1}" != "${hash2}" ]; then + echo "==> Please update the .pot file in your commit (make i18n)" && false + fi + ) +} === added directory 'src/github.com/mattn' === added directory 'src/github.com/mattn/go-colorable' === added file 'src/github.com/mattn/go-colorable/README.md' --- src/github.com/mattn/go-colorable/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-colorable/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +# go-colorable + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) === added directory 'src/github.com/mattn/go-colorable/_example' === added file 'src/github.com/mattn/go-colorable/_example/main.go' --- src/github.com/mattn/go-colorable/_example/main.go 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-colorable/_example/main.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +package main + +import ( + "github.com/Sirupsen/logrus" + "github.com/mattn/go-colorable" +) + +func main() { + logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) + logrus.SetOutput(colorable.NewColorableStdout()) + + logrus.Info("succeeded") + logrus.Warn("not correct") + logrus.Error("something error") + logrus.Fatal("panic") +} === added file 'src/github.com/mattn/go-colorable/colorable_others.go' --- src/github.com/mattn/go-colorable/colorable_others.go 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-colorable/colorable_others.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +// +build !windows + +package colorable + +import ( + "io" + "os" +) + +func NewColorableStdout() io.Writer { + return os.Stdout +} + +func NewColorableStderr() io.Writer { + return os.Stderr +} === added file 'src/github.com/mattn/go-colorable/colorable_windows.go' --- src/github.com/mattn/go-colorable/colorable_windows.go 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-colorable/colorable_windows.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,594 @@ +package colorable + +import ( + "bytes" + "fmt" + "io" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") +) + +type Writer struct { + out io.Writer + handle syscall.Handle + lastbuf bytes.Buffer + oldattr word +} + +func NewColorableStdout() io.Writer { + var csbi consoleScreenBufferInfo + out := os.Stdout + if !isatty.IsTerminal(out.Fd()) { + return out + } + handle := syscall.Handle(out.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: out, handle: handle, oldattr: csbi.attributes} +} + +func NewColorableStderr() io.Writer { + var csbi consoleScreenBufferInfo + out := os.Stderr + if !isatty.IsTerminal(out.Fd()) { + return out + } + handle := syscall.Handle(out.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: out, handle: handle, oldattr: csbi.attributes} +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +func (w *Writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + er := bytes.NewBuffer(data) +loop: + for { + r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + if r1 == 0 { + break loop + } + + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + var m rune + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + break + } + buf.Write([]byte(string(c))) + } + + switch m { + case 'm': + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i, ns := range token { + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 22 == n || n == 25 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr = (attr & backgroundMask) + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr = (attr & foregroundMask) + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + } + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + } + } + } + } + return len(data) - w.lastbuf.Len(), nil +} + +type consoleColor struct { + red bool + green bool + blue bool + intensity bool +} + +func minmax3(a, b, c int) (min, max int) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +func toConsoleColor(rgb int) (c consoleColor) { + r, g, b := (rgb&0xFF0000)>>16, (rgb&0x00FF00)>>8, rgb&0x0000FF + min, max := minmax3(r, g, b) + a := (min + max) / 2 + if r < 128 && g < 128 && b < 128 { + if r >= a { + c.red = true + } + if g >= a { + c.green = true + } + if b >= a { + c.blue = true + } + // non-intensed white is lighter than intensed black, so swap those. + if c.red && c.green && c.blue { + c.red, c.green, c.blue = false, false, false + c.intensity = true + } + } else { + if min < 128 { + min = 128 + a = (min + max) / 2 + } + if r >= a { + c.red = true + } + if g >= a { + c.green = true + } + if b >= a { + c.blue = true + } + c.intensity = true + // intensed black is darker than non-intensed white, so swap those. + if !c.red && !c.green && !c.blue { + c.red, c.green, c.blue = true, true, true + c.intensity = false + } + } + return c +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + for i, rgb := range color256 { + c := toConsoleColor(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} === added directory 'src/github.com/mattn/go-isatty' === added file 'src/github.com/mattn/go-isatty/LICENSE' --- src/github.com/mattn/go-isatty/LICENSE 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-isatty/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. === added file 'src/github.com/mattn/go-isatty/README.md' --- src/github.com/mattn/go-isatty/README.md 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-isatty/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,37 @@ +# go-isatty + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) === added file 'src/github.com/mattn/go-isatty/doc.go' --- src/github.com/mattn/go-isatty/doc.go 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-isatty/doc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty === added file 'src/github.com/mattn/go-isatty/isatty_bsd.go' --- src/github.com/mattn/go-isatty/isatty_bsd.go 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-isatty/isatty_bsd.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,17 @@ +// +build darwin freebsd openbsd netbsd + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} === added file 'src/github.com/mattn/go-isatty/isatty_linux.go' --- src/github.com/mattn/go-isatty/isatty_linux.go 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-isatty/isatty_linux.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,17 @@ +// +build linux + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} === added file 'src/github.com/mattn/go-isatty/isatty_solaris.go' --- src/github.com/mattn/go-isatty/isatty_solaris.go 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-isatty/isatty_solaris.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,15 @@ +// +build solaris + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} === added file 'src/github.com/mattn/go-isatty/isatty_windows.go' --- src/github.com/mattn/go-isatty/isatty_windows.go 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-isatty/isatty_windows.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,18 @@ +// +build windows + +package isatty + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") +var procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} === added directory 'src/github.com/mattn/go-runewidth' === added file 'src/github.com/mattn/go-runewidth/.travis.yml' --- src/github.com/mattn/go-runewidth/.travis.yml 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-runewidth/.travis.yml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +language: go +go: + - tip +before_install: + - go get github.com/axw/gocov/gocov + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken lAKAWPzcGsD3A8yBX3BGGtRUdJ6CaGERL === added file 'src/github.com/mattn/go-runewidth/README.mkd' --- src/github.com/mattn/go-runewidth/README.mkd 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-runewidth/README.mkd 2016-03-22 15:18:22 +0000 @@ -0,0 +1,25 @@ +go-runewidth +============ + +[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth) +[![Coverage Status](https://coveralls.io/repos/mattn/go-runewidth/badge.png?branch=HEAD)](https://coveralls.io/r/mattn/go-runewidth?branch=HEAD) + +Provides functions to get fixed width of the character or string. + +Usage +----- + +```go +runewidth.StringWidth("ã¤ã®ã â˜†HIRO") == 12 +``` + + +Author +------ + +Yasuhiro Matsumoto + +License +------- + +under the MIT License: http://mattn.mit-license.org/2013 === added file 'src/github.com/mattn/go-runewidth/runewidth.go' --- src/github.com/mattn/go-runewidth/runewidth.go 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-runewidth/runewidth.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,464 @@ +package runewidth + +var EastAsianWidth = IsEastAsian() +var DefaultCondition = &Condition{EastAsianWidth} + +type interval struct { + first rune + last rune +} + +var combining = []interval{ + {0x0300, 0x036F}, {0x0483, 0x0486}, {0x0488, 0x0489}, + {0x0591, 0x05BD}, {0x05BF, 0x05BF}, {0x05C1, 0x05C2}, + {0x05C4, 0x05C5}, {0x05C7, 0x05C7}, {0x0600, 0x0603}, + {0x0610, 0x0615}, {0x064B, 0x065E}, {0x0670, 0x0670}, + {0x06D6, 0x06E4}, {0x06E7, 0x06E8}, {0x06EA, 0x06ED}, + {0x070F, 0x070F}, {0x0711, 0x0711}, {0x0730, 0x074A}, + {0x07A6, 0x07B0}, {0x07EB, 0x07F3}, {0x0901, 0x0902}, + {0x093C, 0x093C}, {0x0941, 0x0948}, {0x094D, 0x094D}, + {0x0951, 0x0954}, {0x0962, 0x0963}, {0x0981, 0x0981}, + {0x09BC, 0x09BC}, {0x09C1, 0x09C4}, {0x09CD, 0x09CD}, + {0x09E2, 0x09E3}, {0x0A01, 0x0A02}, {0x0A3C, 0x0A3C}, + {0x0A41, 0x0A42}, {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, + {0x0A70, 0x0A71}, {0x0A81, 0x0A82}, {0x0ABC, 0x0ABC}, + {0x0AC1, 0x0AC5}, {0x0AC7, 0x0AC8}, {0x0ACD, 0x0ACD}, + {0x0AE2, 0x0AE3}, {0x0B01, 0x0B01}, {0x0B3C, 0x0B3C}, + {0x0B3F, 0x0B3F}, {0x0B41, 0x0B43}, {0x0B4D, 0x0B4D}, + {0x0B56, 0x0B56}, {0x0B82, 0x0B82}, {0x0BC0, 0x0BC0}, + {0x0BCD, 0x0BCD}, {0x0C3E, 0x0C40}, {0x0C46, 0x0C48}, + {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56}, {0x0CBC, 0x0CBC}, + {0x0CBF, 0x0CBF}, {0x0CC6, 0x0CC6}, {0x0CCC, 0x0CCD}, + {0x0CE2, 0x0CE3}, {0x0D41, 0x0D43}, {0x0D4D, 0x0D4D}, + {0x0DCA, 0x0DCA}, {0x0DD2, 0x0DD4}, {0x0DD6, 0x0DD6}, + {0x0E31, 0x0E31}, {0x0E34, 0x0E3A}, {0x0E47, 0x0E4E}, + {0x0EB1, 0x0EB1}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC}, + {0x0EC8, 0x0ECD}, {0x0F18, 0x0F19}, {0x0F35, 0x0F35}, + {0x0F37, 0x0F37}, {0x0F39, 0x0F39}, {0x0F71, 0x0F7E}, + {0x0F80, 0x0F84}, {0x0F86, 0x0F87}, {0x0F90, 0x0F97}, + {0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102D, 0x1030}, + {0x1032, 0x1032}, {0x1036, 0x1037}, {0x1039, 0x1039}, + {0x1058, 0x1059}, {0x1160, 0x11FF}, {0x135F, 0x135F}, + {0x1712, 0x1714}, {0x1732, 0x1734}, {0x1752, 0x1753}, + {0x1772, 0x1773}, {0x17B4, 0x17B5}, {0x17B7, 0x17BD}, + {0x17C6, 0x17C6}, {0x17C9, 0x17D3}, {0x17DD, 0x17DD}, + {0x180B, 0x180D}, {0x18A9, 0x18A9}, {0x1920, 0x1922}, + {0x1927, 0x1928}, {0x1932, 0x1932}, {0x1939, 0x193B}, + {0x1A17, 0x1A18}, {0x1B00, 0x1B03}, {0x1B34, 0x1B34}, + {0x1B36, 0x1B3A}, {0x1B3C, 0x1B3C}, {0x1B42, 0x1B42}, + {0x1B6B, 0x1B73}, {0x1DC0, 0x1DCA}, {0x1DFE, 0x1DFF}, + {0x200B, 0x200F}, {0x202A, 0x202E}, {0x2060, 0x2063}, + {0x206A, 0x206F}, {0x20D0, 0x20EF}, {0x302A, 0x302F}, + {0x3099, 0x309A}, {0xA806, 0xA806}, {0xA80B, 0xA80B}, + {0xA825, 0xA826}, {0xFB1E, 0xFB1E}, {0xFE00, 0xFE0F}, + {0xFE20, 0xFE23}, {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, + {0x10A01, 0x10A03}, {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, + {0x10A38, 0x10A3A}, {0x10A3F, 0x10A3F}, {0x1D167, 0x1D169}, + {0x1D173, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD}, + {0x1D242, 0x1D244}, {0xE0001, 0xE0001}, {0xE0020, 0xE007F}, + {0xE0100, 0xE01EF}, +} + +type ctype int + +const ( + narrow ctype = iota + ambiguous + wide + halfwidth + fullwidth + neutral +) + +type intervalType struct { + first rune + last rune + ctype ctype +} + +var ctypes = []intervalType{ + {0x0020, 0x007E, narrow}, + {0x00A1, 0x00A1, ambiguous}, + {0x00A2, 0x00A3, narrow}, + {0x00A4, 0x00A4, ambiguous}, + {0x00A5, 0x00A6, narrow}, + {0x00A7, 0x00A8, ambiguous}, + {0x00AA, 0x00AA, ambiguous}, + {0x00AC, 0x00AC, narrow}, + {0x00AD, 0x00AE, ambiguous}, + {0x00AF, 0x00AF, narrow}, + {0x00B0, 0x00B4, ambiguous}, + {0x00B6, 0x00BA, ambiguous}, + {0x00BC, 0x00BF, ambiguous}, + {0x00C6, 0x00C6, ambiguous}, + {0x00D0, 0x00D0, ambiguous}, + {0x00D7, 0x00D8, ambiguous}, + {0x00DE, 0x00E1, ambiguous}, + {0x00E6, 0x00E6, ambiguous}, + {0x00E8, 0x00EA, ambiguous}, + {0x00EC, 0x00ED, ambiguous}, + {0x00F0, 0x00F0, ambiguous}, + {0x00F2, 0x00F3, ambiguous}, + {0x00F7, 0x00FA, ambiguous}, + {0x00FC, 0x00FC, ambiguous}, + {0x00FE, 0x00FE, ambiguous}, + {0x0101, 0x0101, ambiguous}, + {0x0111, 0x0111, ambiguous}, + {0x0113, 0x0113, ambiguous}, + {0x011B, 0x011B, ambiguous}, + {0x0126, 0x0127, ambiguous}, + {0x012B, 0x012B, ambiguous}, + {0x0131, 0x0133, ambiguous}, + {0x0138, 0x0138, ambiguous}, + {0x013F, 0x0142, ambiguous}, + {0x0144, 0x0144, ambiguous}, + {0x0148, 0x014B, ambiguous}, + {0x014D, 0x014D, ambiguous}, + {0x0152, 0x0153, ambiguous}, + {0x0166, 0x0167, ambiguous}, + {0x016B, 0x016B, ambiguous}, + {0x01CE, 0x01CE, ambiguous}, + {0x01D0, 0x01D0, ambiguous}, + {0x01D2, 0x01D2, ambiguous}, + {0x01D4, 0x01D4, ambiguous}, + {0x01D6, 0x01D6, ambiguous}, + {0x01D8, 0x01D8, ambiguous}, + {0x01DA, 0x01DA, ambiguous}, + {0x01DC, 0x01DC, ambiguous}, + {0x0251, 0x0251, ambiguous}, + {0x0261, 0x0261, ambiguous}, + {0x02C4, 0x02C4, ambiguous}, + {0x02C7, 0x02C7, ambiguous}, + {0x02C9, 0x02CB, ambiguous}, + {0x02CD, 0x02CD, ambiguous}, + {0x02D0, 0x02D0, ambiguous}, + {0x02D8, 0x02DB, ambiguous}, + {0x02DD, 0x02DD, ambiguous}, + {0x02DF, 0x02DF, ambiguous}, + {0x0300, 0x036F, ambiguous}, + {0x0391, 0x03A2, ambiguous}, + {0x03A3, 0x03A9, ambiguous}, + {0x03B1, 0x03C1, ambiguous}, + {0x03C3, 0x03C9, ambiguous}, + {0x0401, 0x0401, ambiguous}, + {0x0410, 0x044F, ambiguous}, + {0x0451, 0x0451, ambiguous}, + {0x1100, 0x115F, wide}, + {0x2010, 0x2010, ambiguous}, + {0x2013, 0x2016, ambiguous}, + {0x2018, 0x2019, ambiguous}, + {0x201C, 0x201D, ambiguous}, + {0x2020, 0x2022, ambiguous}, + {0x2024, 0x2027, ambiguous}, + {0x2030, 0x2030, ambiguous}, + {0x2032, 0x2033, ambiguous}, + {0x2035, 0x2035, ambiguous}, + {0x203B, 0x203B, ambiguous}, + {0x203E, 0x203E, ambiguous}, + {0x2074, 0x2074, ambiguous}, + {0x207F, 0x207F, ambiguous}, + {0x2081, 0x2084, ambiguous}, + {0x20A9, 0x20A9, halfwidth}, + {0x20AC, 0x20AC, ambiguous}, + {0x2103, 0x2103, ambiguous}, + {0x2105, 0x2105, ambiguous}, + {0x2109, 0x2109, ambiguous}, + {0x2113, 0x2113, ambiguous}, + {0x2116, 0x2116, ambiguous}, + {0x2121, 0x2122, ambiguous}, + {0x2126, 0x2126, ambiguous}, + {0x212B, 0x212B, ambiguous}, + {0x2153, 0x2154, ambiguous}, + {0x215B, 0x215E, ambiguous}, + {0x2160, 0x216B, ambiguous}, + {0x2170, 0x2179, ambiguous}, + {0x2189, 0x218A, ambiguous}, + {0x2190, 0x2199, ambiguous}, + {0x21B8, 0x21B9, ambiguous}, + {0x21D2, 0x21D2, ambiguous}, + {0x21D4, 0x21D4, ambiguous}, + {0x21E7, 0x21E7, ambiguous}, + {0x2200, 0x2200, ambiguous}, + {0x2202, 0x2203, ambiguous}, + {0x2207, 0x2208, ambiguous}, + {0x220B, 0x220B, ambiguous}, + {0x220F, 0x220F, ambiguous}, + {0x2211, 0x2211, ambiguous}, + {0x2215, 0x2215, ambiguous}, + {0x221A, 0x221A, ambiguous}, + {0x221D, 0x2220, ambiguous}, + {0x2223, 0x2223, ambiguous}, + {0x2225, 0x2225, ambiguous}, + {0x2227, 0x222C, ambiguous}, + {0x222E, 0x222E, ambiguous}, + {0x2234, 0x2237, ambiguous}, + {0x223C, 0x223D, ambiguous}, + {0x2248, 0x2248, ambiguous}, + {0x224C, 0x224C, ambiguous}, + {0x2252, 0x2252, ambiguous}, + {0x2260, 0x2261, ambiguous}, + {0x2264, 0x2267, ambiguous}, + {0x226A, 0x226B, ambiguous}, + {0x226E, 0x226F, ambiguous}, + {0x2282, 0x2283, ambiguous}, + {0x2286, 0x2287, ambiguous}, + {0x2295, 0x2295, ambiguous}, + {0x2299, 0x2299, ambiguous}, + {0x22A5, 0x22A5, ambiguous}, + {0x22BF, 0x22BF, ambiguous}, + {0x2312, 0x2312, ambiguous}, + {0x2329, 0x232A, wide}, + {0x2460, 0x24E9, ambiguous}, + {0x24EB, 0x254B, ambiguous}, + {0x2550, 0x2573, ambiguous}, + {0x2580, 0x258F, ambiguous}, + {0x2592, 0x2595, ambiguous}, + {0x25A0, 0x25A1, ambiguous}, + {0x25A3, 0x25A9, ambiguous}, + {0x25B2, 0x25B3, ambiguous}, + {0x25B6, 0x25B7, ambiguous}, + {0x25BC, 0x25BD, ambiguous}, + {0x25C0, 0x25C1, ambiguous}, + {0x25C6, 0x25C8, ambiguous}, + {0x25CB, 0x25CB, ambiguous}, + {0x25CE, 0x25D1, ambiguous}, + {0x25E2, 0x25E5, ambiguous}, + {0x25EF, 0x25EF, ambiguous}, + {0x2605, 0x2606, ambiguous}, + {0x2609, 0x2609, ambiguous}, + {0x260E, 0x260F, ambiguous}, + {0x2614, 0x2615, ambiguous}, + {0x261C, 0x261C, ambiguous}, + {0x261E, 0x261E, ambiguous}, + {0x2640, 0x2640, ambiguous}, + {0x2642, 0x2642, ambiguous}, + {0x2660, 0x2661, ambiguous}, + {0x2663, 0x2665, ambiguous}, + {0x2667, 0x266A, ambiguous}, + {0x266C, 0x266D, ambiguous}, + {0x266F, 0x266F, ambiguous}, + {0x269E, 0x269F, ambiguous}, + {0x26BE, 0x26BF, ambiguous}, + {0x26C4, 0x26CD, ambiguous}, + {0x26CF, 0x26E1, ambiguous}, + {0x26E3, 0x26E3, ambiguous}, + {0x26E8, 0x26FF, ambiguous}, + {0x273D, 0x273D, ambiguous}, + {0x2757, 0x2757, ambiguous}, + {0x2776, 0x277F, ambiguous}, + {0x27E6, 0x27ED, narrow}, + {0x2985, 0x2986, narrow}, + {0x2B55, 0x2B59, ambiguous}, + {0x2E80, 0x2E9A, wide}, + {0x2E9B, 0x2EF4, wide}, + {0x2F00, 0x2FD6, wide}, + {0x2FF0, 0x2FFC, wide}, + {0x3000, 0x3000, fullwidth}, + {0x3001, 0x303E, wide}, + {0x3041, 0x3097, wide}, + {0x3099, 0x3100, wide}, + {0x3105, 0x312E, wide}, + {0x3131, 0x318F, wide}, + {0x3190, 0x31BB, wide}, + {0x31C0, 0x31E4, wide}, + {0x31F0, 0x321F, wide}, + {0x3220, 0x3247, wide}, + {0x3248, 0x324F, ambiguous}, + {0x3250, 0x32FF, wide}, + {0x3300, 0x4DBF, wide}, + {0x4E00, 0xA48D, wide}, + {0xA490, 0xA4C7, wide}, + {0xA960, 0xA97D, wide}, + {0xAC00, 0xD7A4, wide}, + {0xE000, 0xF8FF, ambiguous}, + {0xF900, 0xFAFF, wide}, + {0xFE00, 0xFE0F, ambiguous}, + {0xFE10, 0xFE1A, wide}, + {0xFE30, 0xFE53, wide}, + {0xFE54, 0xFE67, wide}, + {0xFE68, 0xFE6C, wide}, + {0xFF01, 0xFF60, fullwidth}, + {0xFF61, 0xFFBF, halfwidth}, + {0xFFC2, 0xFFC8, halfwidth}, + {0xFFCA, 0xFFD0, halfwidth}, + {0xFFD2, 0xFFD8, halfwidth}, + {0xFFDA, 0xFFDD, halfwidth}, + {0xFFE0, 0xFFE7, fullwidth}, + {0xFFE8, 0xFFEF, halfwidth}, + {0xFFFD, 0xFFFE, ambiguous}, + {0x1B000, 0x1B002, wide}, + {0x1F100, 0x1F10A, ambiguous}, + {0x1F110, 0x1F12D, ambiguous}, + {0x1F130, 0x1F169, ambiguous}, + {0x1F170, 0x1F19B, ambiguous}, + {0x1F200, 0x1F203, wide}, + {0x1F210, 0x1F23B, wide}, + {0x1F240, 0x1F249, wide}, + {0x1F250, 0x1F252, wide}, + {0x20000, 0x2FFFE, wide}, + {0x30000, 0x3FFFE, wide}, + {0xE0100, 0xE01F0, ambiguous}, + {0xF0000, 0xFFFFD, ambiguous}, + {0x100000, 0x10FFFE, ambiguous}, +} + +type Condition struct { + EastAsianWidth bool +} + +func NewCondition() *Condition { + return &Condition{EastAsianWidth} +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func (c *Condition) RuneWidth(r rune) int { + if r == 0 { + return 0 + } + if r < 32 || (r >= 0x7f && r < 0xa0) { + return 1 + } + for _, iv := range combining { + if iv.first <= r && r <= iv.last { + return 0 + } + } + + if c.EastAsianWidth && IsAmbiguousWidth(r) { + return 2 + } + + if r >= 0x1100 && + (r <= 0x115f || r == 0x2329 || r == 0x232a || + (r >= 0x2e80 && r <= 0xa4cf && r != 0x303f) || + (r >= 0xac00 && r <= 0xd7a3) || + (r >= 0xf900 && r <= 0xfaff) || + (r >= 0xfe30 && r <= 0xfe6f) || + (r >= 0xff00 && r <= 0xff60) || + (r >= 0xffe0 && r <= 0xffe6) || + (r >= 0x20000 && r <= 0x2fffd) || + (r >= 0x30000 && r <= 0x3fffd)) { + return 2 + } + return 1 +} + +func (c *Condition) StringWidth(s string) (width int) { + for _, r := range []rune(s) { + width += c.RuneWidth(r) + } + return width +} + +func (c *Condition) Truncate(s string, w int, tail string) string { + r := []rune(s) + tw := c.StringWidth(tail) + w -= tw + width := 0 + i := 0 + for ; i < len(r); i++ { + cw := c.RuneWidth(r[i]) + if width+cw > w { + break + } + width += cw + } + if i == len(r) { + return string(r[0:i]) + } + return string(r[0:i]) + tail +} + +func (c *Condition) Wrap(s string, w int) string { + width := 0 + out := "" + for _, r := range []rune(s) { + cw := RuneWidth(r) + if r == '\n' { + out += string(r) + width = 0 + continue + } else if width+cw > w { + out += "\n" + width = 0 + out += string(r) + width += cw + continue + } + out += string(r) + width += cw + } + return out +} + +func (c *Condition) FillLeft(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return string(b) + s + } + return s +} + +func (c *Condition) FillRight(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return s + string(b) + } + return s +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func RuneWidth(r rune) int { + return DefaultCondition.RuneWidth(r) +} + +func ct(r rune) ctype { + for _, iv := range ctypes { + if iv.first <= r && r <= iv.last { + return iv.ctype + } + } + return neutral +} + +// IsAmbiguousWidth returns whether is ambiguous width or not. +func IsAmbiguousWidth(r rune) bool { + return ct(r) == ambiguous +} + +// IsAmbiguousWidth returns whether is ambiguous width or not. +func IsNeutralWidth(r rune) bool { + return ct(r) == neutral +} + +func StringWidth(s string) (width int) { + return DefaultCondition.StringWidth(s) +} + +func Truncate(s string, w int, tail string) string { + return DefaultCondition.Truncate(s, w, tail) +} + +func Wrap(s string, w int) string { + return DefaultCondition.Wrap(s, w) +} + +func FillLeft(s string, w int) string { + return DefaultCondition.FillLeft(s, w) +} + +func FillRight(s string, w int) string { + return DefaultCondition.FillRight(s, w) +} === added file 'src/github.com/mattn/go-runewidth/runewidth_js.go' --- src/github.com/mattn/go-runewidth/runewidth_js.go 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-runewidth/runewidth_js.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +// +build js + +package runewidth + +func IsEastAsian() bool { + // TODO: Implement this for the web. Detect east asian in a compatible way, and return true. + return false +} === added file 'src/github.com/mattn/go-runewidth/runewidth_posix.go' --- src/github.com/mattn/go-runewidth/runewidth_posix.go 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-runewidth/runewidth_posix.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,69 @@ +// +build !windows,!js + +package runewidth + +import ( + "os" + "regexp" + "strings" +) + +var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`) + +func IsEastAsian() bool { + locale := os.Getenv("LC_CTYPE") + if locale == "" { + locale = os.Getenv("LANG") + } + + // ignore C locale + if locale == "POSIX" || locale == "C" { + return false + } + if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') { + return false + } + + charset := strings.ToLower(locale) + r := reLoc.FindStringSubmatch(locale) + if len(r) == 2 { + charset = strings.ToLower(r[1]) + } + + if strings.HasSuffix(charset, "@cjk_narrow") { + return false + } + + for pos, b := range []byte(charset) { + if b == '@' { + charset = charset[:pos] + break + } + } + + mbc_max := 1 + switch charset { + case "utf-8", "utf8": + mbc_max = 6 + case "jis": + mbc_max = 8 + case "eucjp": + mbc_max = 3 + case "euckr", "euccn": + mbc_max = 2 + case "sjis", "cp932", "cp51932", "cp936", "cp949", "cp950": + mbc_max = 2 + case "big5": + mbc_max = 2 + case "gbk", "gb2312": + mbc_max = 2 + } + + if mbc_max > 1 && (charset[0] != 'u' || + strings.HasPrefix(locale, "ja") || + strings.HasPrefix(locale, "ko") || + strings.HasPrefix(locale, "zh")) { + return true + } + return false +} === added file 'src/github.com/mattn/go-runewidth/runewidth_test.go' --- src/github.com/mattn/go-runewidth/runewidth_test.go 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-runewidth/runewidth_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,169 @@ +package runewidth + +import ( + "testing" +) + +var runewidthtests = []struct { + in rune + out int +}{ + {'世', 2}, + {'ç•Œ', 2}, + {'ï½¾', 1}, + {'カ', 1}, + {'ï½²', 1}, + {'☆', 2}, // double width in ambiguous + {'\x00', 0}, + {'\x01', 1}, + {'\u0300', 0}, +} + +func TestRuneWidth(t *testing.T) { + c := NewCondition() + c.EastAsianWidth = true + for _, tt := range runewidthtests { + if out := c.RuneWidth(tt.in); out != tt.out { + t.Errorf("Width(%q) = %q, want %q", tt.in, out, tt.out) + } + } +} + +var isambiguouswidthtests = []struct { + in rune + out bool +}{ + {'世', false}, + {'â– ', true}, + {'ç•Œ', false}, + {'â—‹', true}, + {'㈱', false}, + {'â‘ ', true}, + {'â‘¡', true}, + {'â‘¢', true}, + {'â‘£', true}, + {'⑤', true}, + {'â‘¥', true}, + {'⑦', true}, + {'⑧', true}, + {'⑨', true}, + {'â‘©', true}, + {'⑪', true}, + {'â‘«', true}, + {'⑬', true}, + {'â‘­', true}, + {'â‘®', true}, + {'⑯', true}, + {'â‘°', true}, + {'⑱', true}, + {'⑲', true}, + {'⑳', true}, + {'☆', true}, +} + +func TestIsAmbiguousWidth(t *testing.T) { + for _, tt := range isambiguouswidthtests { + if out := IsAmbiguousWidth(tt.in); out != tt.out { + t.Errorf("IsAmbiguousWidth(%q) = %q, want %q", tt.in, out, tt.out) + } + } +} + +var stringwidthtests = []struct { + in string + out int +}{ + {"■㈱ã®ä¸–界①", 12}, + {"スター☆", 8}, +} + +func TestStringWidth(t *testing.T) { + c := NewCondition() + c.EastAsianWidth = true + for _, tt := range stringwidthtests { + if out := c.StringWidth(tt.in); out != tt.out { + t.Errorf("StringWidth(%q) = %q, want %q", tt.in, out, tt.out) + } + } +} + +func TestStringWidthInvalid(t *testing.T) { + s := "ã“ã‚“ã«ã¡ã‚\x00世界" + if out := StringWidth(s); out != 14 { + t.Errorf("StringWidth(%q) = %q, want %q", s, out, 14) + } +} + +func TestTruncate(t *testing.T) { + s := "ã‚ã„ã†ãˆãŠã‚ã„ã†ãˆãŠãˆãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠ" + expected := "ã‚ã„ã†ãˆãŠã‚ã„ã†ãˆãŠãˆãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠãŠ..." + + if out := Truncate(s, 80, "..."); out != expected { + t.Errorf("Truncate(%q) = %q, want %q", s, out, expected) + } +} + +func TestWrap(t *testing.T) { + s := `æ±äº¬ç‰¹è¨±è¨±å¯å±€å±€é•·ã¯ã‚ˆã柿喰ã†å®¢ã /æ±äº¬ç‰¹è¨±è¨±å¯å±€å±€é•·ã¯ã‚ˆã柿喰ã†å®¢ã  +123456789012345678901234567890 + +END` + expected := `æ±äº¬ç‰¹è¨±è¨±å¯å±€å±€é•·ã¯ã‚ˆã柿喰ㆠ+客ã /æ±äº¬ç‰¹è¨±è¨±å¯å±€å±€é•·ã¯ã‚ˆã +柿喰ã†å®¢ã  +123456789012345678901234567890 + +END` + + if out := Wrap(s, 30); out != expected { + t.Errorf("Wrap(%q) = %q, want %q", s, out, expected) + } +} + +func TestTruncateNoNeeded(t *testing.T) { + s := "ã‚ã„ã†ãˆãŠã‚ã„" + expected := "ã‚ã„ã†ãˆãŠã‚ã„" + + if out := Truncate(s, 80, "..."); out != expected { + t.Errorf("Truncate(%q) = %q, want %q", s, out, expected) + } +} + +var isneutralwidthtests = []struct { + in rune + out bool +}{ + {'→', false}, + {'┊', false}, + {'┈', false}, + {'~', false}, + {'â””', false}, + {'⣀', true}, + {'⣀', true}, +} + +func TestIsNeutralWidth(t *testing.T) { + for _, tt := range isneutralwidthtests { + if out := IsNeutralWidth(tt.in); out != tt.out { + t.Errorf("IsNeutralWidth(%q) = %q, want %q", tt.in, out, tt.out) + } + } +} + +func TestFillLeft(t *testing.T) { + s := "ã‚xã„ã†ãˆãŠ" + expected := " ã‚xã„ã†ãˆãŠ" + + if out := FillLeft(s, 15); out != expected { + t.Errorf("FillLeft(%q) = %q, want %q", s, out, expected) + } +} + +func TestFillRight(t *testing.T) { + s := "ã‚xã„ã†ãˆãŠ" + expected := "ã‚xã„ã†ãˆãŠ " + + if out := FillRight(s, 15); out != expected { + t.Errorf("FillRight(%q) = %q, want %q", s, out, expected) + } +} === added file 'src/github.com/mattn/go-runewidth/runewidth_windows.go' --- src/github.com/mattn/go-runewidth/runewidth_windows.go 1970-01-01 00:00:00 +0000 +++ src/github.com/mattn/go-runewidth/runewidth_windows.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,24 @@ +package runewidth + +import ( + "syscall" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32") + procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP") +) + +func IsEastAsian() bool { + r1, _, _ := procGetConsoleOutputCP.Call() + if r1 == 0 { + return false + } + + switch int(r1) { + case 932, 51932, 936, 949, 950: + return true + } + + return false +} === modified file 'src/golang.org/x/crypto/ocsp/ocsp.go' --- src/golang.org/x/crypto/ocsp/ocsp.go 2015-09-22 15:27:01 +0000 +++ src/golang.org/x/crypto/ocsp/ocsp.go 2016-03-22 15:18:22 +0000 @@ -49,7 +49,7 @@ } type tbsRequest struct { - Version int `asn1:"explicit,tag:0,default:0"` + Version int `asn1:"explicit,tag:0,default:0,optional"` RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"` RequestList []request } @@ -76,26 +76,26 @@ } type responseData struct { - Raw asn1.RawContent - Version int `asn1:"optional,default:1,explicit,tag:0"` - ResponderName pkix.RDNSequence `asn1:"optional,explicit,tag:1"` - KeyHash []byte `asn1:"optional,explicit,tag:2"` - ProducedAt time.Time - Responses []singleResponse + Raw asn1.RawContent + Version int `asn1:"optional,default:1,explicit,tag:0"` + RawResponderName asn1.RawValue `asn1:"optional,explicit,tag:1"` + KeyHash []byte `asn1:"optional,explicit,tag:2"` + ProducedAt time.Time `asn1:"generalized"` + Responses []singleResponse } type singleResponse struct { CertID certID - Good asn1.Flag `asn1:"explicit,tag:0,optional"` - Revoked revokedInfo `asn1:"explicit,tag:1,optional"` - Unknown asn1.Flag `asn1:"explicit,tag:2,optional"` - ThisUpdate time.Time - NextUpdate time.Time `asn1:"explicit,tag:0,optional"` + Good asn1.Flag `asn1:"tag:0,optional"` + Revoked revokedInfo `asn1:"tag:1,optional"` + Unknown asn1.Flag `asn1:"tag:2,optional"` + ThisUpdate time.Time `asn1:"generalized"` + NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"` } type revokedInfo struct { - RevocationTime time.Time - Reason int `asn1:"explicit,tag:0,optional"` + RevocationTime time.Time `asn1:"generalized"` + Reason asn1.Enumerated `asn1:"explicit,tag:0,optional"` } var ( @@ -230,6 +230,7 @@ // This is the exposed reflection of the internal OCSP structures. +// The status values that can be expressed in OCSP. See RFC 6960. const ( // Good means that the certificate is valid. Good = iota @@ -241,6 +242,21 @@ ServerFailed = iota ) +// The enumerated reasons for revoking a certificate. See RFC 5280. +const ( + Unspecified = iota + KeyCompromise = iota + CACompromise = iota + AffiliationChanged = iota + Superseded = iota + CessationOfOperation = iota + CertificateHold = iota + _ = iota + RemoveFromCRL = iota + PrivilegeWithdrawn = iota + AACompromise = iota +) + // Request represents an OCSP request. See RFC 2560. type Request struct { HashAlgorithm crypto.Hash @@ -264,6 +280,18 @@ SignatureAlgorithm x509.SignatureAlgorithm } +// These are pre-serialized error responses for the various non-success codes +// defined by OCSP. The Unauthorized code in particular can be used by an OCSP +// responder that supports only pre-signed responses as a response to requests +// for certificates with unknown status. See RFC 5019. +var ( + MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01} + InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02} + TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03} + SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05} + UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06} +) + // CheckSignatureFrom checks that the signature in resp is a valid signature // from issuer. This should only be used if resp.Certificate is nil. Otherwise, // the OCSP response contained an intermediate certificate that created the @@ -387,7 +415,7 @@ default: ret.Status = Revoked ret.RevokedAt = r.Revoked.RevocationTime - ret.RevocationReason = r.Revoked.Reason + ret.RevocationReason = int(r.Revoked.Reason) } ret.ProducedAt = basicResp.TBSResponseData.ProducedAt @@ -517,15 +545,22 @@ innerResponse.Unknown = true case Revoked: innerResponse.Revoked = revokedInfo{ - RevocationTime: template.RevokedAt, - Reason: template.RevocationReason, + RevocationTime: template.RevokedAt.UTC(), + Reason: asn1.Enumerated(template.RevocationReason), } } + responderName := asn1.RawValue{ + Class: 2, // context-specific + Tag: 1, // explicit tag + IsCompound: true, + Bytes: responderCert.RawSubject, + } tbsResponseData := responseData{ - ResponderName: responderCert.Subject.ToRDNSequence(), - ProducedAt: time.Now().Truncate(time.Minute), - Responses: []singleResponse{innerResponse}, + Version: 0, + RawResponderName: responderName, + ProducedAt: time.Now().Truncate(time.Minute).UTC(), + Responses: []singleResponse{innerResponse}, } tbsResponseDataDER, err := asn1.Marshal(tbsResponseData) === modified file 'src/golang.org/x/crypto/ocsp/ocsp_test.go' --- src/golang.org/x/crypto/ocsp/ocsp_test.go 2015-09-22 15:27:01 +0000 +++ src/golang.org/x/crypto/ocsp/ocsp_test.go 2016-03-22 15:18:22 +0000 @@ -26,9 +26,9 @@ } expected := Response{ - Status: 0, + Status: Good, SerialNumber: big.NewInt(0x1d0fa), - RevocationReason: 0, + RevocationReason: Unspecified, ThisUpdate: time.Date(2010, 7, 7, 15, 1, 5, 0, time.UTC), NextUpdate: time.Date(2010, 7, 7, 18, 35, 17, 0, time.UTC), } @@ -171,7 +171,7 @@ ThisUpdate: thisUpdate, NextUpdate: nextUpdate, RevokedAt: thisUpdate, - RevocationReason: 1, // keyCompromise + RevocationReason: KeyCompromise, Certificate: responder, } @@ -194,11 +194,11 @@ } if !reflect.DeepEqual(resp.RevokedAt, template.RevokedAt) { - t.Errorf("resp.NextUpdate: got %d, want %d", resp.NextUpdate, template.NextUpdate) + t.Errorf("resp.RevokedAt: got %d, want %d", resp.RevokedAt, template.RevokedAt) } - if !reflect.DeepEqual(resp.ProducedAt, producedAt) { - t.Errorf("resp.NextUpdate: got %d, want %d", resp.NextUpdate, template.NextUpdate) + if !resp.ProducedAt.Equal(producedAt) { + t.Errorf("resp.ProducedAt: got %d, want %d", resp.ProducedAt, producedAt) } if resp.Status != template.Status { @@ -333,9 +333,9 @@ "20a1a65c7f0b6427a224b3c98edd96b9b61f706099951188b0289555ad30a216fb774651" + "5a35fca2e054dfa8" -const ocspRequestHex = "30563054a003020100304d304b3049300906052b0e03021a05000414c0fe0278fc991888" + - "91b3f212e9c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b213177e6f8d157cd4f60210" + - "017f77deb3bcbb235d44ccc7dba62e72" +const ocspRequestHex = "3051304f304d304b3049300906052b0e03021a05000414c0fe0278fc99188891b3f212e9" + + "c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b213177e6f8d157cd4f60210017f77deb3" + + "bcbb235d44ccc7dba62e72" const leafCertHex = "308203c830820331a0030201020210017f77deb3bcbb235d44ccc7dba62e72300d06092a" + "864886f70d01010505003081ba311f301d060355040a1316566572695369676e20547275" + === modified file 'src/golang.org/x/crypto/openpgp/clearsign/clearsign.go' --- src/golang.org/x/crypto/openpgp/clearsign/clearsign.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/openpgp/clearsign/clearsign.go 2016-03-22 15:18:22 +0000 @@ -35,7 +35,7 @@ // start is the marker which denotes the beginning of a clearsigned message. var start = []byte("\n-----BEGIN PGP SIGNED MESSAGE-----") -// dashEscape is prefixed to any lines that begin with a hypen so that they +// dashEscape is prefixed to any lines that begin with a hyphen so that they // can't be confused with endText. var dashEscape = []byte("- ") @@ -197,7 +197,17 @@ d.h.Write(crlf) } d.isFirstLine = false - + } + + // Any whitespace at the end of the line has to be removed so we + // buffer it until we find out whether there's more on this line. + if b == ' ' || b == '\t' || b == '\r' { + d.whitespace = append(d.whitespace, b) + d.atBeginningOfLine = false + continue + } + + if d.atBeginningOfLine { // At the beginning of a line, hyphens have to be escaped. if b == '-' { // The signature isn't calculated over the dash-escaped text so @@ -208,7 +218,7 @@ d.h.Write(d.byteBuf) d.atBeginningOfLine = false } else if b == '\n' { - // Nothing to do because we dely writing CRLF to the hash. + // Nothing to do because we delay writing CRLF to the hash. } else { d.h.Write(d.byteBuf) d.atBeginningOfLine = false @@ -217,15 +227,11 @@ return } } else { - // Any whitespace at the end of the line has to be removed so we - // buffer it until we find out whether there's more on this line. - if b == ' ' || b == '\t' || b == '\r' { - d.whitespace = append(d.whitespace, b) - } else if b == '\n' { + if b == '\n' { // We got a raw \n. Drop any trailing whitespace and write a // CRLF. d.whitespace = d.whitespace[:0] - // We dely writing CRLF to the hash until the start of the + // We delay writing CRLF to the hash until the start of the // next line. if err = d.buffered.WriteByte(b); err != nil { return === modified file 'src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go' --- src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go 2016-03-22 15:18:22 +0000 @@ -64,6 +64,16 @@ {"a\n", "a", "a\n"}, {"-a\n", "-a", "-a\n"}, {"--a\nb", "--a\r\nb", "--a\nb\n"}, + // leading whitespace + {" a\n", " a", " a\n"}, + {" a\n", " a", " a\n"}, + // trailing whitespace (should be stripped) + {"a \n", "a", "a\n"}, + {"a ", "a", "a\n"}, + // whitespace-only lines (should be stripped) + {" \n", "", "\n"}, + {" ", "", "\n"}, + {"a\n \n \nb\n", "a\r\n\r\n\r\nb", "a\n\n\nb\n"}, } func TestSigning(t *testing.T) { === modified file 'src/golang.org/x/crypto/openpgp/keys.go' --- src/golang.org/x/crypto/openpgp/keys.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/openpgp/keys.go 2016-03-22 15:18:22 +0000 @@ -6,11 +6,12 @@ import ( "crypto/rsa" + "io" + "time" + "golang.org/x/crypto/openpgp/armor" "golang.org/x/crypto/openpgp/errors" "golang.org/x/crypto/openpgp/packet" - "io" - "time" ) // PublicKeyType is the armor type for a PGP public key. @@ -90,13 +91,16 @@ func (e *Entity) encryptionKey(now time.Time) (Key, bool) { candidateSubkey := -1 + // Iterate the keys to find the newest key + var maxTime time.Time for i, subkey := range e.Subkeys { if subkey.Sig.FlagsValid && subkey.Sig.FlagEncryptCommunications && subkey.PublicKey.PubKeyAlgo.CanEncrypt() && - !subkey.Sig.KeyExpired(now) { + !subkey.Sig.KeyExpired(now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { candidateSubkey = i - break + maxTime = subkey.Sig.CreationTime } } @@ -460,15 +464,20 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { currentTime := config.Now() + bits := defaultRSAKeyBits + if config != nil && config.RSABits != 0 { + bits = config.RSABits + } + uid := packet.NewUserId(name, comment, email) if uid == nil { return nil, errors.InvalidArgumentError("user id field contained invalid characters") } - signingPriv, err := rsa.GenerateKey(config.Random(), defaultRSAKeyBits) + signingPriv, err := rsa.GenerateKey(config.Random(), bits) if err != nil { return nil, err } - encryptingPriv, err := rsa.GenerateKey(config.Random(), defaultRSAKeyBits) + encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) if err != nil { return nil, err } === modified file 'src/golang.org/x/crypto/openpgp/keys_test.go' --- src/golang.org/x/crypto/openpgp/keys_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/openpgp/keys_test.go 2016-03-22 15:18:22 +0000 @@ -1,9 +1,12 @@ package openpgp import ( + "bytes" + "strings" "testing" "time" + "golang.org/x/crypto/openpgp/errors" "golang.org/x/crypto/openpgp/packet" ) @@ -13,15 +16,16 @@ const timeFormat = "2006-01-02" time1, _ := time.Parse(timeFormat, "2013-07-01") + // The expiringKeyHex key is structured as: // - // pub 1024R/5E237D8C created: 2013-07-01 expires: 2013-07-31 usage: SC - // sub 1024R/1ABB25A0 created: 2013-07-01 expires: 2013-07-08 usage: E - // sub 1024R/96A672F5 created: 2013-07-01 expires: 2013-07-31 usage: E + // pub 1024R/5E237D8C created: 2013-07-01 expires: 2013-07-31 usage: SC + // sub 1024R/1ABB25A0 created: 2013-07-01 23:11:07 +0200 CEST expires: 2013-07-08 usage: E + // sub 1024R/96A672F5 created: 2013-07-01 23:11:23 +0200 CEST expires: 2013-07-31 usage: E // - // So this should select the first, non-expired encryption key. + // So this should select the newest, non-expired encryption key. key, _ := entity.encryptionKey(time1) - if id := key.PublicKey.KeyIdShortString(); id != "1ABB25A0" { + if id := key.PublicKey.KeyIdShortString(); id != "96A672F5" { t.Errorf("Expected key 1ABB25A0 at time %s, but got key %s", time1.Format(timeFormat), id) } @@ -40,6 +44,63 @@ } } +func TestMissingCrossSignature(t *testing.T) { + // This public key has a signing subkey, but the subkey does not + // contain a cross-signature. + keys, err := ReadArmoredKeyRing(bytes.NewBufferString(missingCrossSignatureKey)) + if len(keys) != 0 { + t.Errorf("Accepted key with missing cross signature") + } + if err == nil { + t.Fatal("Failed to detect error in keyring with missing cross signature") + } + structural, ok := err.(errors.StructuralError) + if !ok { + t.Fatalf("Unexpected class of error: %T. Wanted StructuralError", err) + } + const expectedMsg = "signing subkey is missing cross-signature" + if !strings.Contains(string(structural), expectedMsg) { + t.Fatalf("Unexpected error: %q. Expected it to contain %q", err, expectedMsg) + } +} + +func TestInvalidCrossSignature(t *testing.T) { + // This public key has a signing subkey, and the subkey has an + // embedded cross-signature. However, the cross-signature does + // not correctly validate over the primary and subkey. + keys, err := ReadArmoredKeyRing(bytes.NewBufferString(invalidCrossSignatureKey)) + if len(keys) != 0 { + t.Errorf("Accepted key with invalid cross signature") + } + if err == nil { + t.Fatal("Failed to detect error in keyring with an invalid cross signature") + } + structural, ok := err.(errors.StructuralError) + if !ok { + t.Fatalf("Unexpected class of error: %T. Wanted StructuralError", err) + } + const expectedMsg = "subkey signature invalid" + if !strings.Contains(string(structural), expectedMsg) { + t.Fatalf("Unexpected error: %q. Expected it to contain %q", err, expectedMsg) + } +} + +func TestGoodCrossSignature(t *testing.T) { + // This public key has a signing subkey, and the subkey has an + // embedded cross-signature which correctly validates over the + // primary and subkey. + keys, err := ReadArmoredKeyRing(bytes.NewBufferString(goodCrossSignatureKey)) + if err != nil { + t.Fatal(err) + } + if len(keys) != 1 { + t.Errorf("Failed to accept key with good cross signature, %d", len(keys)) + } + if len(keys[0].Subkeys) != 1 { + t.Errorf("Failed to accept good subkey, %d", len(keys[0].Subkeys)) + } +} + // TestExternallyRevokableKey attempts to load and parse a key with a third party revocation permission. func TestExternallyRevocableKey(t *testing.T) { kring, _ := ReadKeyRing(readerFromHex(subkeyUsageHex)) @@ -214,3 +275,96 @@ const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98" const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f" const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011" +const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Charset: UTF-8 + +mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY +ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG +zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 +QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ +QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo +9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu +Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ +dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R +JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL +ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew +RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW +/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu +yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv +2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR +bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL +C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP +WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y +MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA +EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ +MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N +1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm ++ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N +lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW +CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF +4artDmrG +=7FfJ +-----END PGP PUBLIC KEY BLOCK-----` + +const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY +ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG +zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 +QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ +QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo +9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu +Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ +dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R +JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL +ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew +RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW +/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu +yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ +UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe +iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK +FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8 +R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh ++SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA +EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO +52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb +u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl +w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep +54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+ +YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL +bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E +i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB +DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1 +8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY +s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745 +U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL +6LCg2mg= +=Dhm4 +-----END PGP PUBLIC KEY BLOCK-----` + +const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo +7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom +lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0 +E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC +CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw +6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH +7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv +X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7 +GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl +y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw +R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW +CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+ +LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO +aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx +yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl +BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr +Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK +CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp +C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ +SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/ +MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70= +=vtbN +-----END PGP PUBLIC KEY BLOCK-----` === modified file 'src/golang.org/x/crypto/openpgp/packet/config.go' --- src/golang.org/x/crypto/openpgp/packet/config.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/openpgp/packet/config.go 2016-03-22 15:18:22 +0000 @@ -43,6 +43,9 @@ // use a value that is at least 65536. See RFC 4880 Section // 3.7.1.3. S2KCount int + // RSABits is the number of bits in new RSA keys made with NewEntity. + // If zero, then 2048 bit keys are created. + RSABits int } func (c *Config) Random() io.Reader { === modified file 'src/golang.org/x/crypto/openpgp/packet/encrypted_key.go' --- src/golang.org/x/crypto/openpgp/packet/encrypted_key.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/openpgp/packet/encrypted_key.go 2016-03-22 15:18:22 +0000 @@ -7,11 +7,12 @@ import ( "crypto/rsa" "encoding/binary" - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" "io" "math/big" "strconv" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" ) const encryptedKeyVersion = 3 @@ -24,7 +25,7 @@ CipherFunc CipherFunction // only valid after a successful Decrypt Key []byte // only valid after a successful Decrypt - encryptedMPI1, encryptedMPI2 []byte + encryptedMPI1, encryptedMPI2 parsedMPI } func (e *EncryptedKey) parse(r io.Reader) (err error) { @@ -40,13 +41,13 @@ e.Algo = PublicKeyAlgorithm(buf[9]) switch e.Algo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - e.encryptedMPI1, _, err = readMPI(r) + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) case PubKeyAlgoElGamal: - e.encryptedMPI1, _, err = readMPI(r) + e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) if err != nil { return } - e.encryptedMPI2, _, err = readMPI(r) + e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) } _, err = consumeAll(r) return @@ -71,10 +72,10 @@ // padding oracle attacks. switch priv.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - b, err = rsa.DecryptPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), e.encryptedMPI1) + b, err = rsa.DecryptPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), e.encryptedMPI1.bytes) case PubKeyAlgoElGamal: - c1 := new(big.Int).SetBytes(e.encryptedMPI1) - c2 := new(big.Int).SetBytes(e.encryptedMPI2) + c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) + c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) default: err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) @@ -95,6 +96,36 @@ return nil } +// Serialize writes the encrypted key packet, e, to w. +func (e *EncryptedKey) Serialize(w io.Writer) error { + var mpiLen int + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + case PubKeyAlgoElGamal: + mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) + default: + return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) + } + + serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) + + w.Write([]byte{encryptedKeyVersion}) + binary.Write(w, binary.BigEndian, e.KeyId) + w.Write([]byte{byte(e.Algo)}) + + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + writeMPIs(w, e.encryptedMPI1) + case PubKeyAlgoElGamal: + writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) + default: + panic("internal error") + } + + return nil +} + // SerializeEncryptedKey serializes an encrypted key packet to w that contains // key, encrypted to pub. // If config is nil, sensible defaults will be used. === modified file 'src/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go' --- src/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go 2016-03-22 15:18:22 +0000 @@ -7,6 +7,7 @@ import ( "bytes" "crypto/rsa" + "encoding/hex" "fmt" "math/big" "testing" @@ -123,3 +124,23 @@ t.Errorf("bad key, got %s want %x", keyHex, expectedKeyHex) } } + +func TestSerializingEncryptedKey(t *testing.T) { + const encryptedKeyHex = "c18c032a67d68660df41c70104005789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8" + + p, err := Read(readerFromHex(encryptedKeyHex)) + if err != nil { + t.Fatalf("error from Read: %s", err) + } + ek, ok := p.(*EncryptedKey) + if !ok { + t.Fatalf("didn't parse an EncryptedKey, got %#v", p) + } + + var buf bytes.Buffer + ek.Serialize(&buf) + + if bufHex := hex.EncodeToString(buf.Bytes()); bufHex != encryptedKeyHex { + t.Fatalf("serialization of encrypted key differed from original. Original was %s, but reserialized as %s", encryptedKeyHex, bufHex) + } +} === modified file 'src/golang.org/x/crypto/openpgp/packet/opaque.go' --- src/golang.org/x/crypto/openpgp/packet/opaque.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/openpgp/packet/opaque.go 2016-03-22 15:18:22 +0000 @@ -6,9 +6,10 @@ import ( "bytes" - "golang.org/x/crypto/openpgp/errors" "io" "io/ioutil" + + "golang.org/x/crypto/openpgp/errors" ) // OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is @@ -138,7 +139,7 @@ uint32(contents[4]) contents = contents[5:] } - if subLen > uint32(len(contents)) { + if subLen > uint32(len(contents)) || subLen == 0 { goto Truncated } subPacket.SubType = contents[0] === modified file 'src/golang.org/x/crypto/openpgp/packet/packet.go' --- src/golang.org/x/crypto/openpgp/packet/packet.go 2015-09-22 15:27:01 +0000 +++ src/golang.org/x/crypto/openpgp/packet/packet.go 2016-03-22 15:18:22 +0000 @@ -385,16 +385,17 @@ type SignatureType uint8 const ( - SigTypeBinary SignatureType = 0 - SigTypeText = 1 - SigTypeGenericCert = 0x10 - SigTypePersonaCert = 0x11 - SigTypeCasualCert = 0x12 - SigTypePositiveCert = 0x13 - SigTypeSubkeyBinding = 0x18 - SigTypeDirectSignature = 0x1F - SigTypeKeyRevocation = 0x20 - SigTypeSubkeyRevocation = 0x28 + SigTypeBinary SignatureType = 0 + SigTypeText = 1 + SigTypeGenericCert = 0x10 + SigTypePersonaCert = 0x11 + SigTypeCasualCert = 0x12 + SigTypePositiveCert = 0x13 + SigTypeSubkeyBinding = 0x18 + SigTypePrimaryKeyBinding = 0x19 + SigTypeDirectSignature = 0x1F + SigTypeKeyRevocation = 0x20 + SigTypeSubkeyRevocation = 0x28 ) // PublicKeyAlgorithm represents the different public key system specified for === modified file 'src/golang.org/x/crypto/openpgp/packet/private_key.go' --- src/golang.org/x/crypto/openpgp/packet/private_key.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/openpgp/packet/private_key.go 2016-03-22 15:18:22 +0000 @@ -263,6 +263,9 @@ rsaPriv.Primes = make([]*big.Int, 2) rsaPriv.Primes[0] = new(big.Int).SetBytes(p) rsaPriv.Primes[1] = new(big.Int).SetBytes(q) + if err := rsaPriv.Validate(); err != nil { + return err + } rsaPriv.Precompute() pk.PrivateKey = rsaPriv pk.Encrypted = false === modified file 'src/golang.org/x/crypto/openpgp/packet/private_key_test.go' --- src/golang.org/x/crypto/openpgp/packet/private_key_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/openpgp/packet/private_key_test.go 2016-03-22 15:18:22 +0000 @@ -56,6 +56,11 @@ } } +func TestIssue11505(t *testing.T) { + // parsing a rsa private key with p or q == 1 used to panic due to a divide by zero + _, _ = Read(readerFromHex("9c3004303030300100000011303030000000000000010130303030303030303030303030303030303030303030303030303030303030303030303030303030303030")) +} + // Generated with `gpg --export-secret-keys "Test Key 2"` const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec" === modified file 'src/golang.org/x/crypto/openpgp/packet/public_key.go' --- src/golang.org/x/crypto/openpgp/packet/public_key.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/openpgp/packet/public_key.go 2016-03-22 15:18:22 +0000 @@ -16,13 +16,14 @@ _ "crypto/sha512" "encoding/binary" "fmt" - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" "hash" "io" "math/big" "strconv" "time" + + "golang.org/x/crypto/openpgp/elgamal" + "golang.org/x/crypto/openpgp/errors" ) var ( @@ -192,7 +193,7 @@ return pk } -// NewDSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. +// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { pk := &PublicKey{ CreationTime: creationTime, @@ -564,12 +565,33 @@ // VerifyKeySignature returns nil iff sig is a valid signature, made by this // public key, of signed. -func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) (err error) { +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { h, err := keySignatureHash(pk, signed, sig.Hash) if err != nil { return err } - return pk.VerifySignature(h, sig) + if err = pk.VerifySignature(h, sig); err != nil { + return err + } + + if sig.FlagSign { + // Signing subkeys must be cross-signed. See + // https://www.gnupg.org/faq/subkey-cross-certify.html. + if sig.EmbeddedSignature == nil { + return errors.StructuralError("signing subkey is missing cross-signature") + } + // Verify the cross-signature. This is calculated over the same + // data as the main signature, so we cannot just recursively + // call signed.VerifyKeySignature(...) + if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { + return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) + } + if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { + return errors.StructuralError("error while verifying cross-signature: " + err.Error()) + } + } + + return nil } func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { === modified file 'src/golang.org/x/crypto/openpgp/packet/public_key_v3.go' --- src/golang.org/x/crypto/openpgp/packet/public_key_v3.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/openpgp/packet/public_key_v3.go 2016-03-22 15:18:22 +0000 @@ -95,6 +95,11 @@ return } + // RFC 4880 Section 12.2 requires the low 8 bytes of the + // modulus to form the key id. + if len(pk.n.bytes) < 8 { + return errors.StructuralError("v3 public key modulus is too short") + } if len(pk.e.bytes) > 3 { err = errors.UnsupportedError("large public exponent") return === modified file 'src/golang.org/x/crypto/openpgp/packet/reader.go' --- src/golang.org/x/crypto/openpgp/packet/reader.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/openpgp/packet/reader.go 2016-03-22 15:18:22 +0000 @@ -16,6 +16,14 @@ readers []io.Reader } +// New io.Readers are pushed when a compressed or encrypted packet is processed +// and recursively treated as a new source of packets. However, a carefully +// crafted packet can trigger an infinite recursive sequence of packets. See +// http://mumble.net/~campbell/misc/pgp-quine +// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 +// This constant limits the number of recursive packets that may be pushed. +const maxReaders = 32 + // Next returns the most recently unread Packet, or reads another packet from // the top-most io.Reader. Unknown packet types are skipped. func (r *Reader) Next() (p Packet, err error) { @@ -44,9 +52,15 @@ // Push causes the Reader to start reading from a new io.Reader. When an EOF // error is seen from the new io.Reader, it is popped and the Reader continues -// to read from the next most recent io.Reader. -func (r *Reader) Push(reader io.Reader) { +// to read from the next most recent io.Reader. Push returns a StructuralError +// if pushing the reader would exceed the maximum recursion level, otherwise it +// returns nil. +func (r *Reader) Push(reader io.Reader) (err error) { + if len(r.readers) >= maxReaders { + return errors.StructuralError("too many layers of packets") + } r.readers = append(r.readers, reader) + return nil } // Unread causes the given Packet to be returned from the next call to Next. === modified file 'src/golang.org/x/crypto/openpgp/packet/signature.go' --- src/golang.org/x/crypto/openpgp/packet/signature.go 2015-09-22 15:27:01 +0000 +++ src/golang.org/x/crypto/openpgp/packet/signature.go 2016-03-22 15:18:22 +0000 @@ -5,6 +5,7 @@ package packet import ( + "bytes" "crypto" "crypto/dsa" "crypto/rsa" @@ -68,6 +69,11 @@ // support for MDC subpackets. MDC bool + // EmbeddedSignature, if non-nil, is a signature of the parent key, by + // this key. This prevents an attacker from claiming another's signing + // subkey as their own. + EmbeddedSignature *Signature + outSubpackets []outputSubpacket } @@ -196,6 +202,7 @@ keyFlagsSubpacket signatureSubpacketType = 27 reasonForRevocationSubpacket signatureSubpacketType = 29 featuresSubpacket signatureSubpacketType = 30 + embeddedSignatureSubpacket signatureSubpacketType = 32 ) // parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. @@ -355,6 +362,24 @@ // features. In practice, the subpacket is used exclusively to // indicate support for MDC-protected encryption. sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 + case embeddedSignatureSubpacket: + // Only usage is in signatures that cross-certify + // signing subkeys. section 5.2.3.26 describes the + // format, with its usage described in section 11.1 + if sig.EmbeddedSignature != nil { + err = errors.StructuralError("Cannot have multiple embedded signatures") + return + } + sig.EmbeddedSignature = new(Signature) + // Embedded signatures are required to be v4 signatures see + // section 12.1. However, we only parse v4 signatures in this + // file anyway. + if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { + return nil, err + } + if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { + return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) + } default: if isCritical { err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) === modified file 'src/golang.org/x/crypto/openpgp/read.go' --- src/golang.org/x/crypto/openpgp/read.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/openpgp/read.go 2016-03-22 15:18:22 +0000 @@ -8,12 +8,13 @@ import ( "crypto" _ "crypto/sha256" + "hash" + "io" + "strconv" + "golang.org/x/crypto/openpgp/armor" "golang.org/x/crypto/openpgp/errors" "golang.org/x/crypto/openpgp/packet" - "hash" - "io" - "strconv" ) // SignatureType is the armor type for a PGP signature. @@ -211,7 +212,9 @@ } md.decrypted = decrypted - packets.Push(decrypted) + if err := packets.Push(decrypted); err != nil { + return nil, err + } return readSignedMessage(packets, md, keyring) } @@ -235,7 +238,9 @@ } switch p := p.(type) { case *packet.Compressed: - packets.Push(p.Body) + if err := packets.Push(p.Body); err != nil { + return nil, err + } case *packet.OnePassSignature: if !p.IsLast { return nil, errors.UnsupportedError("nested signatures") @@ -353,44 +358,55 @@ // returns the signer if the signature is valid. If the signer isn't known, // ErrUnknownIssuer is returned. func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - p, err := packet.Read(signature) - if err != nil { - return - } - var issuerKeyId uint64 var hashFunc crypto.Hash var sigType packet.SignatureType - - switch sig := p.(type) { - case *packet.Signature: - if sig.IssuerKeyId == nil { - return nil, errors.StructuralError("signature doesn't have an issuer") - } - issuerKeyId = *sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - case *packet.SignatureV3: - issuerKeyId = sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - default: - return nil, errors.StructuralError("non signature packet found") + var keys []Key + var p packet.Packet + + packets := packet.NewReader(signature) + for { + p, err = packets.Next() + if err == io.EOF { + return nil, errors.ErrUnknownIssuer + } + if err != nil { + return nil, err + } + + switch sig := p.(type) { + case *packet.Signature: + if sig.IssuerKeyId == nil { + return nil, errors.StructuralError("signature doesn't have an issuer") + } + issuerKeyId = *sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + case *packet.SignatureV3: + issuerKeyId = sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + default: + return nil, errors.StructuralError("non signature packet found") + } + + keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) + if len(keys) > 0 { + break + } + } + + if len(keys) == 0 { + panic("unreachable") } h, wrappedHash, err := hashForSignature(hashFunc, sigType) if err != nil { - return - } - - _, err = io.Copy(wrappedHash, signed) - if err != nil && err != io.EOF { - return - } - - keys := keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) - if len(keys) == 0 { - return nil, errors.ErrUnknownIssuer + return nil, err + } + + if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { + return nil, err } for _, key := range keys { @@ -399,15 +415,15 @@ err = key.PublicKey.VerifySignature(h, sig) case *packet.SignatureV3: err = key.PublicKey.VerifySignatureV3(h, sig) + default: + panic("unreachable") } + if err == nil { return key.Entity, nil } } - if err == nil { - err = errors.ErrUnknownIssuer - } return nil, err } === modified file 'src/golang.org/x/crypto/openpgp/read_test.go' --- src/golang.org/x/crypto/openpgp/read_test.go 2015-09-22 15:27:01 +0000 +++ src/golang.org/x/crypto/openpgp/read_test.go 2016-03-22 15:18:22 +0000 @@ -8,11 +8,12 @@ "bytes" _ "crypto/sha512" "encoding/hex" - "golang.org/x/crypto/openpgp/errors" "io" "io/ioutil" "strings" "testing" + + "golang.org/x/crypto/openpgp/errors" ) func readerFromHex(s string) io.Reader { @@ -135,6 +136,25 @@ checkSignedMessage(t, signedTextMessageHex, signedTextInput) } +// The reader should detect "compressed quines", which are compressed +// packets that expand into themselves and cause an infinite recursive +// parsing loop. +// The packet in this test case comes from Taylor R. Campbell at +// http://mumble.net/~campbell/misc/pgp-quine/ +func TestCampbellQuine(t *testing.T) { + md, err := ReadMessage(readerFromHex(campbellQuine), nil, nil, nil) + if md != nil { + t.Errorf("Reading a compressed quine should not return any data: %#v", md) + } + structural, ok := err.(errors.StructuralError) + if !ok { + t.Fatalf("Unexpected class of error: %T", err) + } + if !strings.Contains(string(structural), "too many layers of packets") { + t.Fatalf("Unexpected error: %s", err) + } +} + var signedEncryptedMessageTests = []struct { keyRingHex string messageHex string @@ -295,6 +315,11 @@ testDetachedSignature(t, kring, readerFromHex(detachedSignatureDSAHex), signedInput, "binary", testKey3KeyId) } +func TestMultipleSignaturePacketsDSA(t *testing.T) { + kring, _ := ReadKeyRing(readerFromHex(dsaTestKeyHex)) + testDetachedSignature(t, kring, readerFromHex(missingHashFunctionHex+detachedSignatureDSAHex), signedInput, "binary", testKey3KeyId) +} + func testHashFunctionError(t *testing.T, signatureHex string) { kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex)) _, err := CheckDetachedSignature(kring, nil, readerFromHex(signatureHex)) @@ -318,8 +343,16 @@ func TestMissingHashFunction(t *testing.T) { // missingHashFunctionHex contains a signature packet that uses - // RIPEMD160, which isn't compiled in. - testHashFunctionError(t, missingHashFunctionHex) + // RIPEMD160, which isn't compiled in. Since that's the only signature + // packet we don't find any suitable packets and end up with ErrUnknownIssuer + kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex)) + _, err := CheckDetachedSignature(kring, nil, readerFromHex(missingHashFunctionHex)) + if err == nil { + t.Fatal("Packet with missing hash type was correctly parsed") + } + if err != errors.ErrUnknownIssuer { + t.Fatalf("Unexpected class of error: %s", err) + } } func TestReadingArmoredPrivateKey(t *testing.T) { @@ -349,6 +382,35 @@ } } +func testReadMessageError(t *testing.T, messageHex string) { + buf, err := hex.DecodeString(messageHex) + if err != nil { + t.Errorf("hex.DecodeString(): %v", err) + } + + kr, err := ReadKeyRing(new(bytes.Buffer)) + if err != nil { + t.Errorf("ReadKeyring(): %v", err) + } + + _, err = ReadMessage(bytes.NewBuffer(buf), kr, + func([]Key, bool) ([]byte, error) { + return []byte("insecure"), nil + }, nil) + + if err == nil { + t.Errorf("ReadMessage(): Unexpected nil error") + } +} + +func TestIssue11503(t *testing.T) { + testReadMessageError(t, "8c040402000aa430aa8228b9248b01fc899a91197130303030") +} + +func TestIssue11504(t *testing.T) { + testReadMessageError(t, "9303000130303030303030303030983002303030303030030000000130") +} + const testKey1KeyId = 0xA34D7E18C20C31BB const testKey3KeyId = 0x338934250CCC0360 @@ -439,4 +501,6 @@ const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101` -const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101` +const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` + +const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000` === modified file 'src/golang.org/x/crypto/openpgp/write.go' --- src/golang.org/x/crypto/openpgp/write.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/openpgp/write.go 2016-03-22 15:18:22 +0000 @@ -6,14 +6,15 @@ import ( "crypto" + "hash" + "io" + "strconv" + "time" + "golang.org/x/crypto/openpgp/armor" "golang.org/x/crypto/openpgp/errors" "golang.org/x/crypto/openpgp/packet" "golang.org/x/crypto/openpgp/s2k" - "hash" - "io" - "strconv" - "time" ) // DetachSign signs message with the private key from signer (which must @@ -176,6 +177,9 @@ return nil, errors.InvalidArgumentError("no valid signing keys") } signer = signKey.PrivateKey + if signer == nil { + return nil, errors.InvalidArgumentError("no private key in signing key") + } if signer.Encrypted { return nil, errors.InvalidArgumentError("signing key must be decrypted") } === modified file 'src/golang.org/x/crypto/openpgp/write_test.go' --- src/golang.org/x/crypto/openpgp/write_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/openpgp/write_test.go 2016-03-22 15:18:22 +0000 @@ -10,6 +10,8 @@ "io/ioutil" "testing" "time" + + "golang.org/x/crypto/openpgp/packet" ) func TestSignDetached(t *testing.T) { @@ -53,11 +55,34 @@ return } + // Check bit-length with no config. e, err := NewEntity("Test User", "test", "test@example.com", nil) if err != nil { t.Errorf("failed to create entity: %s", err) return } + bl, err := e.PrimaryKey.BitLength() + if err != nil { + t.Errorf("failed to find bit length: %s", err) + } + if int(bl) != defaultRSAKeyBits { + t.Errorf("BitLength %v, expected %v", defaultRSAKeyBits) + } + + // Check bit-length with a config. + cfg := &packet.Config{RSABits: 1024} + e, err = NewEntity("Test User", "test", "test@example.com", cfg) + if err != nil { + t.Errorf("failed to create entity: %s", err) + return + } + bl, err = e.PrimaryKey.BitLength() + if err != nil { + t.Errorf("failed to find bit length: %s", err) + } + if int(bl) != cfg.RSABits { + t.Errorf("BitLength %v, expected %v", bl, cfg.RSABits) + } w := bytes.NewBuffer(nil) if err := e.SerializePrivate(w, nil); err != nil { === modified file 'src/golang.org/x/crypto/otr/otr.go' --- src/golang.org/x/crypto/otr/otr.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/otr/otr.go 2016-03-22 15:18:22 +0000 @@ -848,7 +848,6 @@ slot := &c.keySlots[i] if slot.used && slot.myKeyId == c.myKeyId-1 { slot.used = false - c.oldMACs = append(c.oldMACs, slot.sendMACKey...) c.oldMACs = append(c.oldMACs, slot.recvMACKey...) } } @@ -924,7 +923,6 @@ slot := &c.keySlots[i] if slot.used && slot.theirKeyId == theirKeyId-1 { slot.used = false - c.oldMACs = append(c.oldMACs, slot.sendMACKey...) c.oldMACs = append(c.oldMACs, slot.recvMACKey...) } } @@ -1096,6 +1094,10 @@ h.Write(slot.recvAESKey) slot.recvMACKey = h.Sum(slot.recvMACKey[:0]) + slot.theirKeyId = theirKeyId + slot.myKeyId = myKeyId + slot.used = true + zero(slot.theirLastCtr[:]) return } === modified file 'src/golang.org/x/crypto/otr/otr_test.go' --- src/golang.org/x/crypto/otr/otr_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/otr/otr_test.go 2016-03-22 15:18:22 +0000 @@ -194,23 +194,75 @@ t.Error("Bob doesn't believe that the conversation is secure") } - var testMessage = []byte("hello Bob") - alicesMessage, err = alice.Send(testMessage) - for i, msg := range alicesMessage { - out, encrypted, _, _, err := bob.Receive(msg) - if err != nil { - t.Errorf("Error generated while processing test message: %s", err.Error()) - } - if len(out) > 0 { - if i != len(alicesMessage)-1 { - t.Fatal("Bob produced a message while processing a fragment of Alice's") - } - if !encrypted { - t.Errorf("Message was not marked as encrypted") - } - if !bytes.Equal(out, testMessage) { - t.Errorf("Message corrupted: got %x, want %x", out, testMessage) - } + var testMessages = [][]byte{ + []byte("hello"), []byte("bye"), + } + + for j, testMessage := range testMessages { + alicesMessage, err = alice.Send(testMessage) + + if len(alice.oldMACs) != 0 { + t.Errorf("Alice has not revealed all MAC keys") + } + + for i, msg := range alicesMessage { + out, encrypted, _, _, err := bob.Receive(msg) + + if err != nil { + t.Errorf("Error generated while processing test message: %s", err.Error()) + } + if len(out) > 0 { + if i != len(alicesMessage)-1 { + t.Fatal("Bob produced a message while processing a fragment of Alice's") + } + if !encrypted { + t.Errorf("Message was not marked as encrypted") + } + if !bytes.Equal(out, testMessage) { + t.Errorf("Message corrupted: got %x, want %x", out, testMessage) + } + } + } + + if j == 0 { + if len(bob.oldMACs) != 0 { + t.Errorf("Bob should not have MAC keys to reveal") + } + } else if len(bob.oldMACs) != 40 { + t.Errorf("Bob does not have MAC keys to reveal") + } + + bobsMessage, err = bob.Send(testMessage) + + if len(bob.oldMACs) != 0 { + t.Errorf("Bob has not revealed all MAC keys") + } + + for i, msg := range bobsMessage { + out, encrypted, _, _, err := alice.Receive(msg) + + if err != nil { + t.Errorf("Error generated while processing test message: %s", err.Error()) + } + if len(out) > 0 { + if i != len(bobsMessage)-1 { + t.Fatal("Alice produced a message while processing a fragment of Bob's") + } + if !encrypted { + t.Errorf("Message was not marked as encrypted") + } + if !bytes.Equal(out, testMessage) { + t.Errorf("Message corrupted: got %x, want %x", out, testMessage) + } + } + } + + if j == 0 { + if len(alice.oldMACs) != 20 { + t.Errorf("Alice does not have MAC keys to reveal") + } + } else if len(alice.oldMACs) != 40 { + t.Errorf("Alice does not have MAC keys to reveal") } } } === added file 'src/golang.org/x/crypto/poly1305/poly1305_arm.s' --- src/golang.org/x/crypto/poly1305/poly1305_arm.s 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/crypto/poly1305/poly1305_arm.s 2016-03-22 15:18:22 +0000 @@ -0,0 +1,379 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 5a from the public +// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305. + +// +build arm,!gccgo,!appengine + +DATA poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff +DATA poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03 +DATA poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff +DATA poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff +DATA poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff +GLOBL poly1305_init_constants_armv6<>(SB), 8, $20 + +// Warning: the linker may use R11 to synthesize certain instructions. Please +// take care and verify that no synthetic instructions use it. + +TEXT poly1305_init_ext_armv6<>(SB),4,$-4 + MOVM.DB.W [R4-R11], (R13) + MOVM.IA.W (R1), [R2-R5] + MOVW $poly1305_init_constants_armv6<>(SB), R7 + MOVW R2, R8 + MOVW R2>>26, R9 + MOVW R3>>20, g + MOVW R4>>14, R11 + MOVW R5>>8, R12 + ORR R3<<6, R9, R9 + ORR R4<<12, g, g + ORR R5<<18, R11, R11 + MOVM.IA (R7), [R2-R6] + AND R8, R2, R2 + AND R9, R3, R3 + AND g, R4, R4 + AND R11, R5, R5 + AND R12, R6, R6 + MOVM.IA.W [R2-R6], (R0) + EOR R2, R2, R2 + EOR R3, R3, R3 + EOR R4, R4, R4 + EOR R5, R5, R5 + EOR R6, R6, R6 + MOVM.IA.W [R2-R6], (R0) + MOVM.IA.W (R1), [R2-R5] + MOVM.IA [R2-R6], (R0) + MOVM.IA.W (R13), [R4-R11] + RET + +#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \ + MOVBU (offset+0)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+0)(Rdst); \ + MOVBU (offset+1)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+1)(Rdst); \ + MOVBU (offset+2)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+2)(Rdst); \ + MOVBU (offset+3)(Rsrc), Rtmp; \ + MOVBU Rtmp, (offset+3)(Rdst) + +TEXT poly1305_blocks_armv6<>(SB),4,$-4 + MOVM.DB.W [R4, R5, R6, R7, R8, R9, g, R11, R14], (R13) + SUB $128, R13 + MOVW R0, 36(R13) + MOVW R1, 40(R13) + MOVW R2, 44(R13) + MOVW R1, R14 + MOVW R2, R12 + MOVW 56(R0), R8 + WORD $0xe1180008 // TST R8, R8 not working see issue 5921 + EOR R6, R6, R6 + MOVW.EQ $(1<<24), R6 + MOVW R6, 32(R13) + ADD $64, R13, g + MOVM.IA (R0), [R0-R9] + MOVM.IA [R0-R4], (g) + CMP $16, R12 + BLO poly1305_blocks_armv6_done +poly1305_blocks_armv6_mainloop: + WORD $0xe31e0003 // TST R14, #3 not working see issue 5921 + BEQ poly1305_blocks_armv6_mainloop_aligned + ADD $48, R13, g + MOVW_UNALIGNED(R14, g, R0, 0) + MOVW_UNALIGNED(R14, g, R0, 4) + MOVW_UNALIGNED(R14, g, R0, 8) + MOVW_UNALIGNED(R14, g, R0, 12) + MOVM.IA (g), [R0-R3] + ADD $16, R14 + B poly1305_blocks_armv6_mainloop_loaded +poly1305_blocks_armv6_mainloop_aligned: + MOVM.IA.W (R14), [R0-R3] +poly1305_blocks_armv6_mainloop_loaded: + MOVW R0>>26, g + MOVW R1>>20, R11 + MOVW R2>>14, R12 + MOVW R14, 40(R13) + MOVW R3>>8, R4 + ORR R1<<6, g, g + ORR R2<<12, R11, R11 + ORR R3<<18, R12, R12 + BIC $0xfc000000, R0, R0 + BIC $0xfc000000, g, g + MOVW 32(R13), R3 + BIC $0xfc000000, R11, R11 + BIC $0xfc000000, R12, R12 + ADD R0, R5, R5 + ADD g, R6, R6 + ORR R3, R4, R4 + ADD R11, R7, R7 + ADD $64, R13, R14 + ADD R12, R8, R8 + ADD R4, R9, R9 + MOVM.IA (R14), [R0-R4] + MULLU R4, R5, (R11, g) + MULLU R3, R5, (R14, R12) + MULALU R3, R6, (R11, g) + MULALU R2, R6, (R14, R12) + MULALU R2, R7, (R11, g) + MULALU R1, R7, (R14, R12) + ADD R4<<2, R4, R4 + ADD R3<<2, R3, R3 + MULALU R1, R8, (R11, g) + MULALU R0, R8, (R14, R12) + MULALU R0, R9, (R11, g) + MULALU R4, R9, (R14, R12) + MOVW g, 24(R13) + MOVW R11, 28(R13) + MOVW R12, 16(R13) + MOVW R14, 20(R13) + MULLU R2, R5, (R11, g) + MULLU R1, R5, (R14, R12) + MULALU R1, R6, (R11, g) + MULALU R0, R6, (R14, R12) + MULALU R0, R7, (R11, g) + MULALU R4, R7, (R14, R12) + ADD R2<<2, R2, R2 + ADD R1<<2, R1, R1 + MULALU R4, R8, (R11, g) + MULALU R3, R8, (R14, R12) + MULALU R3, R9, (R11, g) + MULALU R2, R9, (R14, R12) + MOVW g, 8(R13) + MOVW R11, 12(R13) + MOVW R12, 0(R13) + MOVW R14, w+4(SP) + MULLU R0, R5, (R11, g) + MULALU R4, R6, (R11, g) + MULALU R3, R7, (R11, g) + MULALU R2, R8, (R11, g) + MULALU R1, R9, (R11, g) + MOVM.IA (R13), [R0-R7] + MOVW g>>26, R12 + MOVW R4>>26, R14 + ORR R11<<6, R12, R12 + ORR R5<<6, R14, R14 + BIC $0xfc000000, g, g + BIC $0xfc000000, R4, R4 + ADD.S R12, R0, R0 + ADC $0, R1, R1 + ADD.S R14, R6, R6 + ADC $0, R7, R7 + MOVW R0>>26, R12 + MOVW R6>>26, R14 + ORR R1<<6, R12, R12 + ORR R7<<6, R14, R14 + BIC $0xfc000000, R0, R0 + BIC $0xfc000000, R6, R6 + ADD R14<<2, R14, R14 + ADD.S R12, R2, R2 + ADC $0, R3, R3 + ADD R14, g, g + MOVW R2>>26, R12 + MOVW g>>26, R14 + ORR R3<<6, R12, R12 + BIC $0xfc000000, g, R5 + BIC $0xfc000000, R2, R7 + ADD R12, R4, R4 + ADD R14, R0, R0 + MOVW R4>>26, R12 + BIC $0xfc000000, R4, R8 + ADD R12, R6, R9 + MOVW w+44(SP), R12 + MOVW w+40(SP), R14 + MOVW R0, R6 + CMP $32, R12 + SUB $16, R12, R12 + MOVW R12, 44(R13) + BHS poly1305_blocks_armv6_mainloop +poly1305_blocks_armv6_done: + MOVW 36(R13), R12 + MOVW R5, 20(R12) + MOVW R6, 24(R12) + MOVW R7, 28(R12) + MOVW R8, 32(R12) + MOVW R9, 36(R12) + ADD $128, R13, R13 + MOVM.IA.W (R13), [R4, R5, R6, R7, R8, R9, g, R11, R14] + RET + +#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \ + MOVBU.P 1(Rsrc), Rtmp; \ + MOVBU.P Rtmp, 1(Rdst); \ + MOVBU.P 1(Rsrc), Rtmp; \ + MOVBU.P Rtmp, 1(Rdst) + +#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \ + MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \ + MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) + +TEXT poly1305_finish_ext_armv6<>(SB),4,$-4 + MOVM.DB.W [R4, R5, R6, R7, R8, R9, g, R11, R14], (R13) + SUB $16, R13, R13 + MOVW R0, R5 + MOVW R1, R6 + MOVW R2, R7 + MOVW R3, R8 + AND.S R2, R2, R2 + BEQ poly1305_finish_ext_armv6_noremaining + EOR R0, R0 + MOVW R13, R9 + MOVW R0, 0(R13) + MOVW R0, 4(R13) + MOVW R0, 8(R13) + MOVW R0, 12(R13) + WORD $0xe3110003 // TST R1, #3 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_aligned + WORD $0xe3120008 // TST R2, #8 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip8 + MOVWP_UNALIGNED(R1, R9, g) + MOVWP_UNALIGNED(R1, R9, g) +poly1305_finish_ext_armv6_skip8: + WORD $0xe3120004 // TST $4, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip4 + MOVWP_UNALIGNED(R1, R9, g) +poly1305_finish_ext_armv6_skip4: + WORD $0xe3120002 // TST $2, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip2 + MOVHUP_UNALIGNED(R1, R9, g) + B poly1305_finish_ext_armv6_skip2 +poly1305_finish_ext_armv6_aligned: + WORD $0xe3120008 // TST R2, #8 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip8_aligned + MOVM.IA.W (R1), [g-R11] + MOVM.IA.W [g-R11], (R9) +poly1305_finish_ext_armv6_skip8_aligned: + WORD $0xe3120004 // TST $4, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip4_aligned + MOVW.P 4(R1), g + MOVW.P g, 4(R9) +poly1305_finish_ext_armv6_skip4_aligned: + WORD $0xe3120002 // TST $2, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip2 + MOVHU.P 2(R1), g + MOVH.P g, 2(R9) +poly1305_finish_ext_armv6_skip2: + WORD $0xe3120001 // TST $1, R2 not working see issue 5921 + BEQ poly1305_finish_ext_armv6_skip1 + MOVBU.P 1(R1), g + MOVBU.P g, 1(R9) +poly1305_finish_ext_armv6_skip1: + MOVW $1, R11 + MOVBU R11, 0(R9) + MOVW R11, 56(R5) + MOVW R5, R0 + MOVW R13, R1 + MOVW $16, R2 + BL poly1305_blocks_armv6<>(SB) +poly1305_finish_ext_armv6_noremaining: + MOVW 20(R5), R0 + MOVW 24(R5), R1 + MOVW 28(R5), R2 + MOVW 32(R5), R3 + MOVW 36(R5), R4 + MOVW R4>>26, R12 + BIC $0xfc000000, R4, R4 + ADD R12<<2, R12, R12 + ADD R12, R0, R0 + MOVW R0>>26, R12 + BIC $0xfc000000, R0, R0 + ADD R12, R1, R1 + MOVW R1>>26, R12 + BIC $0xfc000000, R1, R1 + ADD R12, R2, R2 + MOVW R2>>26, R12 + BIC $0xfc000000, R2, R2 + ADD R12, R3, R3 + MOVW R3>>26, R12 + BIC $0xfc000000, R3, R3 + ADD R12, R4, R4 + ADD $5, R0, R6 + MOVW R6>>26, R12 + BIC $0xfc000000, R6, R6 + ADD R12, R1, R7 + MOVW R7>>26, R12 + BIC $0xfc000000, R7, R7 + ADD R12, R2, g + MOVW g>>26, R12 + BIC $0xfc000000, g, g + ADD R12, R3, R11 + MOVW $-(1<<26), R12 + ADD R11>>26, R12, R12 + BIC $0xfc000000, R11, R11 + ADD R12, R4, R14 + MOVW R14>>31, R12 + SUB $1, R12 + AND R12, R6, R6 + AND R12, R7, R7 + AND R12, g, g + AND R12, R11, R11 + AND R12, R14, R14 + MVN R12, R12 + AND R12, R0, R0 + AND R12, R1, R1 + AND R12, R2, R2 + AND R12, R3, R3 + AND R12, R4, R4 + ORR R6, R0, R0 + ORR R7, R1, R1 + ORR g, R2, R2 + ORR R11, R3, R3 + ORR R14, R4, R4 + ORR R1<<26, R0, R0 + MOVW R1>>6, R1 + ORR R2<<20, R1, R1 + MOVW R2>>12, R2 + ORR R3<<14, R2, R2 + MOVW R3>>18, R3 + ORR R4<<8, R3, R3 + MOVW 40(R5), R6 + MOVW 44(R5), R7 + MOVW 48(R5), g + MOVW 52(R5), R11 + ADD.S R6, R0, R0 + ADC.S R7, R1, R1 + ADC.S g, R2, R2 + ADC.S R11, R3, R3 + MOVM.IA [R0-R3], (R8) + MOVW R5, R12 + EOR R0, R0, R0 + EOR R1, R1, R1 + EOR R2, R2, R2 + EOR R3, R3, R3 + EOR R4, R4, R4 + EOR R5, R5, R5 + EOR R6, R6, R6 + EOR R7, R7, R7 + MOVM.IA.W [R0-R7], (R12) + MOVM.IA [R0-R7], (R12) + ADD $16, R13, R13 + MOVM.IA.W (R13), [R4, R5, R6, R7, R8, R9, g, R11, R14] + RET + +// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key) +TEXT ·poly1305_auth_armv6(SB),0,$280-16 + MOVW out+0(FP), R4 + MOVW m+4(FP), R5 + MOVW mlen+8(FP), R6 + MOVW key+12(FP), R7 + + MOVW R13, R8 + BIC $63, R13 + SUB $64, R13, R13 + MOVW R13, R0 + MOVW R7, R1 + BL poly1305_init_ext_armv6<>(SB) + BIC.S $15, R6, R2 + BEQ poly1305_auth_armv6_noblocks + MOVW R13, R0 + MOVW R5, R1 + ADD R2, R5, R5 + SUB R2, R6, R6 + BL poly1305_blocks_armv6<>(SB) +poly1305_auth_armv6_noblocks: + MOVW R13, R0 + MOVW R5, R1 + MOVW R6, R2 + MOVW R4, R3 + BL poly1305_finish_ext_armv6<>(SB) + MOVW R8, R13 + RET === modified file 'src/golang.org/x/crypto/poly1305/poly1305_test.go' --- src/golang.org/x/crypto/poly1305/poly1305_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/poly1305/poly1305_test.go 2016-03-22 15:18:22 +0000 @@ -7,6 +7,7 @@ import ( "bytes" "testing" + "unsafe" ) var testData = []struct { @@ -34,41 +35,52 @@ }, } -func TestSum(t *testing.T) { +func testSum(t *testing.T, unaligned bool) { var out [16]byte var key [32]byte for i, v := range testData { + in := v.in + if unaligned { + in = unalignBytes(in) + } copy(key[:], v.k) - Sum(&out, v.in, &key) + Sum(&out, in, &key) if !bytes.Equal(out[:], v.correct) { t.Errorf("%d: expected %x, got %x", i, v.correct, out[:]) } } } -func Benchmark1K(b *testing.B) { - b.StopTimer() +func TestSum(t *testing.T) { testSum(t, false) } +func TestSumUnaligned(t *testing.T) { testSum(t, true) } + +func benchmark(b *testing.B, size int, unaligned bool) { var out [16]byte var key [32]byte - in := make([]byte, 1024) + in := make([]byte, size) + if unaligned { + in = unalignBytes(in) + } b.SetBytes(int64(len(in))) - b.StartTimer() - + b.ResetTimer() for i := 0; i < b.N; i++ { Sum(&out, in, &key) } } -func Benchmark64(b *testing.B) { - b.StopTimer() - var out [16]byte - var key [32]byte - in := make([]byte, 64) - b.SetBytes(int64(len(in))) - b.StartTimer() +func Benchmark64(b *testing.B) { benchmark(b, 64, false) } +func Benchmark1K(b *testing.B) { benchmark(b, 1024, false) } +func Benchmark64Unaligned(b *testing.B) { benchmark(b, 64, true) } +func Benchmark1KUnaligned(b *testing.B) { benchmark(b, 1024, true) } - for i := 0; i < b.N; i++ { - Sum(&out, in, &key) +func unalignBytes(in []byte) []byte { + out := make([]byte, len(in)+1) + if uintptr(unsafe.Pointer(&out[0]))&(unsafe.Alignof(uint32(0))-1) == 0 { + out = out[1:] + } else { + out = out[:len(in)] } + copy(out, in) + return out } === added file 'src/golang.org/x/crypto/poly1305/sum_arm.go' --- src/golang.org/x/crypto/poly1305/sum_arm.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/crypto/poly1305/sum_arm.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,24 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,!gccgo,!appengine + +package poly1305 + +// This function is implemented in poly1305_arm.s + +//go:noescape + +func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte) + +// Sum generates an authenticator for m using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + var mPtr *byte + if len(m) > 0 { + mPtr = &m[0] + } + poly1305_auth_armv6(out, mPtr, uint32(len(m)), key) +} === modified file 'src/golang.org/x/crypto/poly1305/sum_ref.go' --- src/golang.org/x/crypto/poly1305/sum_ref.go 2015-09-22 15:27:01 +0000 +++ src/golang.org/x/crypto/poly1305/sum_ref.go 2016-03-22 15:18:22 +0000 @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 gccgo appengine +// +build !amd64,!arm gccgo appengine package poly1305 === modified file 'src/golang.org/x/crypto/sha3/sha3_test.go' --- src/golang.org/x/crypto/sha3/sha3_test.go 2015-09-22 15:27:01 +0000 +++ src/golang.org/x/crypto/sha3/sha3_test.go 2016-03-22 15:18:22 +0000 @@ -159,7 +159,7 @@ testUnalignedAndGeneric(t, func(impl string) { d := New224() - for capacity := 2; capacity < 64; capacity += 64 { + for capacity := 2; capacity <= 66; capacity += 64 { // The first time around the loop, Sum will have to reallocate. // The second time, it will not. buf := make([]byte, 2, capacity) === modified file 'src/golang.org/x/crypto/ssh/agent/client.go' --- src/golang.org/x/crypto/ssh/agent/client.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/agent/client.go 2016-03-22 15:18:22 +0000 @@ -6,7 +6,7 @@ Package agent implements a client to an ssh-agent daemon. References: - [PROTOCOL.agent]: http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.agent + [PROTOCOL.agent]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.agent?rev=HEAD */ package agent // import "golang.org/x/crypto/ssh/agent" @@ -36,9 +36,8 @@ // in [PROTOCOL.agent] section 2.6.2. Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) - // Insert adds a private key to the agent. If a certificate - // is given, that certificate is added as public key. - Add(s interface{}, cert *ssh.Certificate, comment string) error + // Add adds a private key to the agent. + Add(key AddedKey) error // Remove removes all identities with the given public key. Remove(key ssh.PublicKey) error @@ -56,6 +55,24 @@ Signers() ([]ssh.Signer, error) } +// AddedKey describes an SSH key to be added to an Agent. +type AddedKey struct { + // PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey or + // *ecdsa.PrivateKey, which will be inserted into the agent. + PrivateKey interface{} + // Certificate, if not nil, is communicated to the agent and will be + // stored with the key. + Certificate *ssh.Certificate + // Comment is an optional, free-form string. + Comment string + // LifetimeSecs, if not zero, is the number of seconds that the + // agent will store the key for. + LifetimeSecs uint32 + // ConfirmBeforeUse, if true, requests that the agent confirm with the + // user before each use of this key. + ConfirmBeforeUse bool +} + // See [PROTOCOL.agent], section 3. const ( agentRequestV1Identities = 1 @@ -368,36 +385,39 @@ } type rsaKeyMsg struct { - Type string `sshtype:"17"` - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int // IQMP = Inverse Q Mod P - P *big.Int - Q *big.Int - Comments string + Type string `sshtype:"17"` + N *big.Int + E *big.Int + D *big.Int + Iqmp *big.Int // IQMP = Inverse Q Mod P + P *big.Int + Q *big.Int + Comments string + Constraints []byte `ssh:"rest"` } type dsaKeyMsg struct { - Type string `sshtype:"17"` - P *big.Int - Q *big.Int - G *big.Int - Y *big.Int - X *big.Int - Comments string + Type string `sshtype:"17"` + P *big.Int + Q *big.Int + G *big.Int + Y *big.Int + X *big.Int + Comments string + Constraints []byte `ssh:"rest"` } type ecdsaKeyMsg struct { - Type string `sshtype:"17"` - Curve string - KeyBytes []byte - D *big.Int - Comments string + Type string `sshtype:"17"` + Curve string + KeyBytes []byte + D *big.Int + Comments string + Constraints []byte `ssh:"rest"` } // Insert adds a private key to the agent. -func (c *client) insertKey(s interface{}, comment string) error { +func (c *client) insertKey(s interface{}, comment string, constraints []byte) error { var req []byte switch k := s.(type) { case *rsa.PrivateKey: @@ -406,37 +426,46 @@ } k.Precompute() req = ssh.Marshal(rsaKeyMsg{ - Type: ssh.KeyAlgoRSA, - N: k.N, - E: big.NewInt(int64(k.E)), - D: k.D, - Iqmp: k.Precomputed.Qinv, - P: k.Primes[0], - Q: k.Primes[1], - Comments: comment, + Type: ssh.KeyAlgoRSA, + N: k.N, + E: big.NewInt(int64(k.E)), + D: k.D, + Iqmp: k.Precomputed.Qinv, + P: k.Primes[0], + Q: k.Primes[1], + Comments: comment, + Constraints: constraints, }) case *dsa.PrivateKey: req = ssh.Marshal(dsaKeyMsg{ - Type: ssh.KeyAlgoDSA, - P: k.P, - Q: k.Q, - G: k.G, - Y: k.Y, - X: k.X, - Comments: comment, + Type: ssh.KeyAlgoDSA, + P: k.P, + Q: k.Q, + G: k.G, + Y: k.Y, + X: k.X, + Comments: comment, + Constraints: constraints, }) case *ecdsa.PrivateKey: nistID := fmt.Sprintf("nistp%d", k.Params().BitSize) req = ssh.Marshal(ecdsaKeyMsg{ - Type: "ecdsa-sha2-" + nistID, - Curve: nistID, - KeyBytes: elliptic.Marshal(k.Curve, k.X, k.Y), - D: k.D, - Comments: comment, + Type: "ecdsa-sha2-" + nistID, + Curve: nistID, + KeyBytes: elliptic.Marshal(k.Curve, k.X, k.Y), + D: k.D, + Comments: comment, + Constraints: constraints, }) default: return fmt.Errorf("agent: unsupported key type %T", s) } + + // if constraints are present then the message type needs to be changed. + if len(constraints) != 0 { + req[0] = agentAddIdConstrained + } + resp, err := c.call(req) if err != nil { return err @@ -448,40 +477,57 @@ } type rsaCertMsg struct { - Type string `sshtype:"17"` - CertBytes []byte - D *big.Int - Iqmp *big.Int // IQMP = Inverse Q Mod P - P *big.Int - Q *big.Int - Comments string + Type string `sshtype:"17"` + CertBytes []byte + D *big.Int + Iqmp *big.Int // IQMP = Inverse Q Mod P + P *big.Int + Q *big.Int + Comments string + Constraints []byte `ssh:"rest"` } type dsaCertMsg struct { - Type string `sshtype:"17"` - CertBytes []byte - X *big.Int - Comments string + Type string `sshtype:"17"` + CertBytes []byte + X *big.Int + Comments string + Constraints []byte `ssh:"rest"` } type ecdsaCertMsg struct { - Type string `sshtype:"17"` - CertBytes []byte - D *big.Int - Comments string + Type string `sshtype:"17"` + CertBytes []byte + D *big.Int + Comments string + Constraints []byte `ssh:"rest"` } // Insert adds a private key to the agent. If a certificate is given, // that certificate is added instead as public key. -func (c *client) Add(s interface{}, cert *ssh.Certificate, comment string) error { - if cert == nil { - return c.insertKey(s, comment) +func (c *client) Add(key AddedKey) error { + var constraints []byte + + if secs := key.LifetimeSecs; secs != 0 { + constraints = append(constraints, agentConstrainLifetime) + + var secsBytes [4]byte + binary.BigEndian.PutUint32(secsBytes[:], secs) + constraints = append(constraints, secsBytes[:]...) + } + + if key.ConfirmBeforeUse { + constraints = append(constraints, agentConstrainConfirm) + } + + if cert := key.Certificate; cert == nil { + return c.insertKey(key.PrivateKey, key.Comment, constraints) } else { - return c.insertCert(s, cert, comment) + return c.insertCert(key.PrivateKey, cert, key.Comment, constraints) } } -func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string) error { +func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string, constraints []byte) error { var req []byte switch k := s.(type) { case *rsa.PrivateKey: @@ -490,13 +536,14 @@ } k.Precompute() req = ssh.Marshal(rsaCertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - D: k.D, - Iqmp: k.Precomputed.Qinv, - P: k.Primes[0], - Q: k.Primes[1], - Comments: comment, + Type: cert.Type(), + CertBytes: cert.Marshal(), + D: k.D, + Iqmp: k.Precomputed.Qinv, + P: k.Primes[0], + Q: k.Primes[1], + Comments: comment, + Constraints: constraints, }) case *dsa.PrivateKey: req = ssh.Marshal(dsaCertMsg{ @@ -516,6 +563,11 @@ return fmt.Errorf("agent: unsupported key type %T", s) } + // if constraints are present then the message type needs to be changed. + if len(constraints) != 0 { + req[0] = agentAddIdConstrained + } + signer, err := ssh.NewSignerFromKey(s) if err != nil { return err === modified file 'src/golang.org/x/crypto/ssh/agent/client_test.go' --- src/golang.org/x/crypto/ssh/agent/client_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/agent/client_test.go 2016-03-22 15:18:22 +0000 @@ -78,14 +78,14 @@ } } -func testAgent(t *testing.T, key interface{}, cert *ssh.Certificate) { +func testAgent(t *testing.T, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) { agent, _, cleanup := startAgent(t) defer cleanup() - testAgentInterface(t, agent, key, cert) + testAgentInterface(t, agent, key, cert, lifetimeSecs) } -func testAgentInterface(t *testing.T, agent Agent, key interface{}, cert *ssh.Certificate) { +func testAgentInterface(t *testing.T, agent Agent, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) { signer, err := ssh.NewSignerFromKey(key) if err != nil { t.Fatalf("NewSignerFromKey(%T): %v", key, err) @@ -100,10 +100,15 @@ // Attempt to insert the key, with certificate if specified. var pubKey ssh.PublicKey if cert != nil { - err = agent.Add(key, cert, "comment") + err = agent.Add(AddedKey{ + PrivateKey: key, + Certificate: cert, + Comment: "comment", + LifetimeSecs: lifetimeSecs, + }) pubKey = cert } else { - err = agent.Add(key, nil, "comment") + err = agent.Add(AddedKey{PrivateKey: key, Comment: "comment", LifetimeSecs: lifetimeSecs}) pubKey = signer.PublicKey() } if err != nil { @@ -135,7 +140,7 @@ func TestAgent(t *testing.T) { for _, keyType := range []string{"rsa", "dsa", "ecdsa"} { - testAgent(t, testPrivateKeys[keyType], nil) + testAgent(t, testPrivateKeys[keyType], nil, 0) } } @@ -147,7 +152,11 @@ } cert.SignCert(rand.Reader, testSigners["ecdsa"]) - testAgent(t, testPrivateKeys["rsa"], cert) + testAgent(t, testPrivateKeys["rsa"], cert, 0) +} + +func TestConstraints(t *testing.T) { + testAgent(t, testPrivateKeys["rsa"], nil, 3600 /* lifetime in seconds */) } // netPipe is analogous to net.Pipe, but it uses a real net.Conn, and @@ -185,7 +194,7 @@ agent, _, cleanup := startAgent(t) defer cleanup() - if err := agent.Add(testPrivateKeys["rsa"], nil, "comment"); err != nil { + if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["rsa"], Comment: "comment"}); err != nil { t.Errorf("Add: %v", err) } @@ -223,10 +232,10 @@ } func testLockAgent(agent Agent, t *testing.T) { - if err := agent.Add(testPrivateKeys["rsa"], nil, "comment 1"); err != nil { + if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["rsa"], Comment: "comment 1"}); err != nil { t.Errorf("Add: %v", err) } - if err := agent.Add(testPrivateKeys["dsa"], nil, "comment dsa"); err != nil { + if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["dsa"], Comment: "comment dsa"}); err != nil { t.Errorf("Add: %v", err) } if keys, err := agent.List(); err != nil { === modified file 'src/golang.org/x/crypto/ssh/agent/keyring.go' --- src/golang.org/x/crypto/ssh/agent/keyring.go 2015-09-22 15:27:01 +0000 +++ src/golang.org/x/crypto/ssh/agent/keyring.go 2016-03-22 15:18:22 +0000 @@ -125,27 +125,28 @@ } // Insert adds a private key to the keyring. If a certificate -// is given, that certificate is added as public key. -func (r *keyring) Add(priv interface{}, cert *ssh.Certificate, comment string) error { +// is given, that certificate is added as public key. Note that +// any constraints given are ignored. +func (r *keyring) Add(key AddedKey) error { r.mu.Lock() defer r.mu.Unlock() if r.locked { return errLocked } - signer, err := ssh.NewSignerFromKey(priv) + signer, err := ssh.NewSignerFromKey(key.PrivateKey) if err != nil { return err } - if cert != nil { + if cert := key.Certificate; cert != nil { signer, err = ssh.NewCertSigner(cert, signer) if err != nil { return err } } - r.keys = append(r.keys, privKey{signer, comment}) + r.keys = append(r.keys, privKey{signer, key.Comment}) return nil } === modified file 'src/golang.org/x/crypto/ssh/agent/server.go' --- src/golang.org/x/crypto/ssh/agent/server.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/agent/server.go 2016-03-22 15:18:22 +0000 @@ -167,7 +167,7 @@ } priv.Precompute() - return s.agent.Add(&priv, nil, k.Comments) + return s.agent.Add(AddedKey{PrivateKey: &priv, Comment: k.Comments}) } return fmt.Errorf("not implemented: %s", record.Type) } === modified file 'src/golang.org/x/crypto/ssh/agent/server_test.go' --- src/golang.org/x/crypto/ssh/agent/server_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/agent/server_test.go 2016-03-22 15:18:22 +0000 @@ -21,7 +21,7 @@ go ServeAgent(NewKeyring(), c2) - testAgentInterface(t, client, testPrivateKeys["rsa"], nil) + testAgentInterface(t, client, testPrivateKeys["rsa"], nil, 0) } func TestLockServer(t *testing.T) { @@ -72,6 +72,6 @@ go ssh.DiscardRequests(reqs) agentClient := NewClient(ch) - testAgentInterface(t, agentClient, testPrivateKeys["rsa"], nil) + testAgentInterface(t, agentClient, testPrivateKeys["rsa"], nil, 0) conn.Close() } === modified file 'src/golang.org/x/crypto/ssh/certs.go' --- src/golang.org/x/crypto/ssh/certs.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/certs.go 2016-03-22 15:18:22 +0000 @@ -85,46 +85,73 @@ return to } +type optionsTuple struct { + Key string + Value []byte +} + +type optionsTupleValue struct { + Value string +} + +// serialize a map of critical options or extensions +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty string value func marshalTuples(tups map[string]string) []byte { keys := make([]string, 0, len(tups)) - for k := range tups { - keys = append(keys, k) + for key := range tups { + keys = append(keys, key) } sort.Strings(keys) - var r []byte - for _, k := range keys { - s := struct{ K, V string }{k, tups[k]} - r = append(r, Marshal(&s)...) + var ret []byte + for _, key := range keys { + s := optionsTuple{Key: key} + if value := tups[key]; len(value) > 0 { + s.Value = Marshal(&optionsTupleValue{value}) + } + ret = append(ret, Marshal(&s)...) } - return r + return ret } +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty option value func parseTuples(in []byte) (map[string]string, error) { tups := map[string]string{} var lastKey string var haveLastKey bool for len(in) > 0 { - nameBytes, rest, ok := parseString(in) - if !ok { - return nil, errShortRead - } - data, rest, ok := parseString(rest) - if !ok { - return nil, errShortRead - } - name := string(nameBytes) + var key, val, extra []byte + var ok bool + if key, in, ok = parseString(in); !ok { + return nil, errShortRead + } + keyStr := string(key) // according to [PROTOCOL.certkeys], the names must be in // lexical order. - if haveLastKey && name <= lastKey { + if haveLastKey && keyStr <= lastKey { return nil, fmt.Errorf("ssh: certificate options are not in lexical order") } - lastKey, haveLastKey = name, true - - tups[name] = string(data) - in = rest + lastKey, haveLastKey = keyStr, true + // the next field is a data field, which if non-empty has a string embedded + if val, in, ok = parseString(in); !ok { + return nil, errShortRead + } + if len(val) > 0 { + val, extra, ok = parseString(val) + if !ok { + return nil, errShortRead + } + if len(extra) > 0 { + return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") + } + tups[keyStr] = string(val) + } else { + tups[keyStr] = "" + } } return tups, nil } @@ -341,7 +368,7 @@ if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { return fmt.Errorf("ssh: cert is not yet valid") } - if before := int64(cert.ValidBefore); cert.ValidBefore != CertTimeInfinity && (unixNow >= before || before < 0) { + if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { return fmt.Errorf("ssh: cert has expired") } if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { === modified file 'src/golang.org/x/crypto/ssh/certs_test.go' --- src/golang.org/x/crypto/ssh/certs_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/certs_test.go 2016-03-22 15:18:22 +0000 @@ -7,13 +7,14 @@ import ( "bytes" "crypto/rand" + "reflect" "testing" "time" ) // Cert generated by ssh-keygen 6.0p1 Debian-4. // % ssh-keygen -s ca-key -I test user-key -var exampleSSHCert = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgb1srW/W3ZDjYAO45xLYAwzHBDLsJ4Ux6ICFIkTjb1LEAAAADAQABAAAAYQCkoR51poH0wE8w72cqSB8Sszx+vAhzcMdCO0wqHTj7UNENHWEXGrU0E0UQekD7U+yhkhtoyjbPOVIP7hNa6aRk/ezdh/iUnCIt4Jt1v3Z1h1P+hA4QuYFMHNB+rmjPwAcAAAAAAAAAAAAAAAEAAAAEdGVzdAAAAAAAAAAAAAAAAP//////////AAAAAAAAAIIAAAAVcGVybWl0LVgxMS1mb3J3YXJkaW5nAAAAAAAAABdwZXJtaXQtYWdlbnQtZm9yd2FyZGluZwAAAAAAAAAWcGVybWl0LXBvcnQtZm9yd2FyZGluZwAAAAAAAAAKcGVybWl0LXB0eQAAAAAAAAAOcGVybWl0LXVzZXItcmMAAAAAAAAAAAAAAHcAAAAHc3NoLXJzYQAAAAMBAAEAAABhANFS2kaktpSGc+CcmEKPyw9mJC4nZKxHKTgLVZeaGbFZOvJTNzBspQHdy7Q1uKSfktxpgjZnksiu/tFF9ngyY2KFoc+U88ya95IZUycBGCUbBQ8+bhDtw/icdDGQD5WnUwAAAG8AAAAHc3NoLXJzYQAAAGC8Y9Z2LQKhIhxf52773XaWrXdxP0t3GBVo4A10vUWiYoAGepr6rQIoGGXFxT4B9Gp+nEBJjOwKDXPrAevow0T9ca8gZN+0ykbhSrXLE5Ao48rqr3zP4O1/9P7e6gp0gw8=` +const exampleSSHCert = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgb1srW/W3ZDjYAO45xLYAwzHBDLsJ4Ux6ICFIkTjb1LEAAAADAQABAAAAYQCkoR51poH0wE8w72cqSB8Sszx+vAhzcMdCO0wqHTj7UNENHWEXGrU0E0UQekD7U+yhkhtoyjbPOVIP7hNa6aRk/ezdh/iUnCIt4Jt1v3Z1h1P+hA4QuYFMHNB+rmjPwAcAAAAAAAAAAAAAAAEAAAAEdGVzdAAAAAAAAAAAAAAAAP//////////AAAAAAAAAIIAAAAVcGVybWl0LVgxMS1mb3J3YXJkaW5nAAAAAAAAABdwZXJtaXQtYWdlbnQtZm9yd2FyZGluZwAAAAAAAAAWcGVybWl0LXBvcnQtZm9yd2FyZGluZwAAAAAAAAAKcGVybWl0LXB0eQAAAAAAAAAOcGVybWl0LXVzZXItcmMAAAAAAAAAAAAAAHcAAAAHc3NoLXJzYQAAAAMBAAEAAABhANFS2kaktpSGc+CcmEKPyw9mJC4nZKxHKTgLVZeaGbFZOvJTNzBspQHdy7Q1uKSfktxpgjZnksiu/tFF9ngyY2KFoc+U88ya95IZUycBGCUbBQ8+bhDtw/icdDGQD5WnUwAAAG8AAAAHc3NoLXJzYQAAAGC8Y9Z2LQKhIhxf52773XaWrXdxP0t3GBVo4A10vUWiYoAGepr6rQIoGGXFxT4B9Gp+nEBJjOwKDXPrAevow0T9ca8gZN+0ykbhSrXLE5Ao48rqr3zP4O1/9P7e6gp0gw8=` func TestParseCert(t *testing.T) { authKeyBytes := []byte(exampleSSHCert) @@ -27,9 +28,63 @@ } if _, ok := key.(*Certificate); !ok { - t.Fatalf("got %#v, want *Certificate", key) - } - + t.Fatalf("got %v (%T), want *Certificate", key, key) + } + + marshaled := MarshalAuthorizedKey(key) + // Before comparison, remove the trailing newline that + // MarshalAuthorizedKey adds. + marshaled = marshaled[:len(marshaled)-1] + if !bytes.Equal(authKeyBytes, marshaled) { + t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes) + } +} + +// Cert generated by ssh-keygen OpenSSH_6.8p1 OS X 10.10.3 +// % ssh-keygen -s ca -I testcert -O source-address=192.168.1.0/24 -O force-command=/bin/sleep user.pub +// user.pub key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMN +// Critical Options: +// force-command /bin/sleep +// source-address 192.168.1.0/24 +// Extensions: +// permit-X11-forwarding +// permit-agent-forwarding +// permit-port-forwarding +// permit-pty +// permit-user-rc +const exampleSSHCertWithOptions = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgDyysCJY0XrO1n03EeRRoITnTPdjENFmWDs9X58PP3VUAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMNAAAAAAAAAAAAAAABAAAACHRlc3RjZXJ0AAAAAAAAAAAAAAAA//////////8AAABLAAAADWZvcmNlLWNvbW1hbmQAAAAOAAAACi9iaW4vc2xlZXAAAAAOc291cmNlLWFkZHJlc3MAAAASAAAADjE5Mi4xNjguMS4wLzI0AAAAggAAABVwZXJtaXQtWDExLWZvcndhcmRpbmcAAAAAAAAAF3Blcm1pdC1hZ2VudC1mb3J3YXJkaW5nAAAAAAAAABZwZXJtaXQtcG9ydC1mb3J3YXJkaW5nAAAAAAAAAApwZXJtaXQtcHR5AAAAAAAAAA5wZXJtaXQtdXNlci1yYwAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEAwU+c5ui5A8+J/CFpjW8wCa52bEODA808WWQDCSuTG/eMXNf59v9Y8Pk0F1E9dGCosSNyVcB/hacUrc6He+i97+HJCyKavBsE6GDxrjRyxYqAlfcOXi/IVmaUGiO8OQ39d4GHrjToInKvExSUeleQyH4Y4/e27T/pILAqPFL3fyrvMLT5qU9QyIt6zIpa7GBP5+urouNavMprV3zsfIqNBbWypinOQAw823a5wN+zwXnhZrgQiHZ/USG09Y6k98y1dTVz8YHlQVR4D3lpTAsKDKJ5hCH9WU4fdf+lU8OyNGaJ/vz0XNqxcToe1l4numLTnaoSuH89pHryjqurB7lJKwAAAQ8AAAAHc3NoLXJzYQAAAQCaHvUIoPL1zWUHIXLvu96/HU1s/i4CAW2IIEuGgxCUCiFj6vyTyYtgxQxcmbfZf6eaITlS6XJZa7Qq4iaFZh75C1DXTX8labXhRSD4E2t//AIP9MC1rtQC5xo6FmbQ+BoKcDskr+mNACcbRSxs3IL3bwCfWDnIw2WbVox9ZdcthJKk4UoCW4ix4QwdHw7zlddlz++fGEEVhmTbll1SUkycGApPFBsAYRTMupUJcYPIeReBI/m8XfkoMk99bV8ZJQTAd7OekHY2/48Ff53jLmyDjP7kNw1F8OaPtkFs6dGJXta4krmaekPy87j+35In5hFj7yoOqvSbmYUkeX70/GGQ` + +func TestParseCertWithOptions(t *testing.T) { + opts := map[string]string{ + "source-address": "192.168.1.0/24", + "force-command": "/bin/sleep", + } + exts := map[string]string{ + "permit-X11-forwarding": "", + "permit-agent-forwarding": "", + "permit-port-forwarding": "", + "permit-pty": "", + "permit-user-rc": "", + } + authKeyBytes := []byte(exampleSSHCertWithOptions) + + key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes) + if err != nil { + t.Fatalf("ParseAuthorizedKey: %v", err) + } + if len(rest) > 0 { + t.Errorf("rest: got %q, want empty", rest) + } + cert, ok := key.(*Certificate) + if !ok { + t.Fatalf("got %v (%T), want *Certificate", key, key) + } + if !reflect.DeepEqual(cert.CriticalOptions, opts) { + t.Errorf("unexpected critical options - got %v, want %v", cert.CriticalOptions, opts) + } + if !reflect.DeepEqual(cert.Extensions, exts) { + t.Errorf("unexpected Extensions - got %v, want %v", cert.Extensions, exts) + } marshaled := MarshalAuthorizedKey(key) // Before comparison, remove the trailing newline that // MarshalAuthorizedKey adds. @@ -131,15 +186,15 @@ defer c1.Close() defer c2.Close() + errc := make(chan error) + go func() { conf := ServerConfig{ NoClientAuth: true, } conf.AddHostKey(certSigner) _, _, _, err := NewServerConn(c1, &conf) - if err != nil { - t.Fatalf("NewServerConn: %v", err) - } + errc <- err }() config := &ClientConfig{ @@ -152,5 +207,10 @@ if (err == nil) != succeed { t.Fatalf("NewClientConn(%q): %v", name, err) } + + err = <-errc + if (err == nil) != succeed { + t.Fatalf("NewServerConn(%q): %v", name, err) + } } } === modified file 'src/golang.org/x/crypto/ssh/cipher.go' --- src/golang.org/x/crypto/ssh/cipher.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/cipher.go 2016-03-22 15:18:22 +0000 @@ -14,6 +14,7 @@ "fmt" "hash" "io" + "io/ioutil" ) const ( @@ -113,6 +114,10 @@ // special case. If we add any more non-stream ciphers, we // should invest a cleaner way to do this. gcmCipherID: {16, 12, 0, nil}, + + // insecure cipher, see http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf + // uncomment below to enable it. + // aes128cbcID: {16, aes.BlockSize, 0, nil}, } // prefixLen is the length of the packet prefix that contains the packet length @@ -342,3 +347,203 @@ plain = plain[1 : length-uint32(padding)] return plain, nil } + +// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 +type cbcCipher struct { + mac hash.Hash + macSize uint32 + decrypter cipher.BlockMode + encrypter cipher.BlockMode + + // The following members are to avoid per-packet allocations. + seqNumBytes [4]byte + packetData []byte + macResult []byte + + // Amount of data we should still read to hide which + // verification error triggered. + oracleCamouflage uint32 +} + +func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + cbc := &cbcCipher{ + mac: macModes[algs.MAC].new(macKey), + decrypter: cipher.NewCBCDecrypter(c, iv), + encrypter: cipher.NewCBCEncrypter(c, iv), + packetData: make([]byte, 1024), + } + if cbc.mac != nil { + cbc.macSize = uint32(cbc.mac.Size()) + } + + return cbc, nil +} + +func maxUInt32(a, b int) uint32 { + if a > b { + return uint32(a) + } + return uint32(b) +} + +const ( + cbcMinPacketSizeMultiple = 8 + cbcMinPacketSize = 16 + cbcMinPaddingSize = 4 +) + +// cbcError represents a verification error that may leak information. +type cbcError string + +func (e cbcError) Error() string { return string(e) } + +func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { + p, err := c.readPacketLeaky(seqNum, r) + if err != nil { + if _, ok := err.(cbcError); ok { + // Verification error: read a fixed amount of + // data, to make distinguishing between + // failing MAC and failing length check more + // difficult. + io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) + } + } + return p, err +} + +func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { + blockSize := c.decrypter.BlockSize() + + // Read the header, which will include some of the subsequent data in the + // case of block ciphers - this is copied back to the payload later. + // How many bytes of payload/padding will be read with this first read. + firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) + firstBlock := c.packetData[:firstBlockLength] + if _, err := io.ReadFull(r, firstBlock); err != nil { + return nil, err + } + + c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength + + c.decrypter.CryptBlocks(firstBlock, firstBlock) + length := binary.BigEndian.Uint32(firstBlock[:4]) + if length > maxPacket { + return nil, cbcError("ssh: packet too large") + } + if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { + // The minimum size of a packet is 16 (or the cipher block size, whichever + // is larger) bytes. + return nil, cbcError("ssh: packet too small") + } + // The length of the packet (including the length field but not the MAC) must + // be a multiple of the block size or 8, whichever is larger. + if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { + return nil, cbcError("ssh: invalid packet length multiple") + } + + paddingLength := uint32(firstBlock[4]) + if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { + return nil, cbcError("ssh: invalid packet length") + } + + // Positions within the c.packetData buffer: + macStart := 4 + length + paddingStart := macStart - paddingLength + + // Entire packet size, starting before length, ending at end of mac. + entirePacketSize := macStart + c.macSize + + // Ensure c.packetData is large enough for the entire packet data. + if uint32(cap(c.packetData)) < entirePacketSize { + // Still need to upsize and copy, but this should be rare at runtime, only + // on upsizing the packetData buffer. + c.packetData = make([]byte, entirePacketSize) + copy(c.packetData, firstBlock) + } else { + c.packetData = c.packetData[:entirePacketSize] + } + + if n, err := io.ReadFull(r, c.packetData[firstBlockLength:]); err != nil { + return nil, err + } else { + c.oracleCamouflage -= uint32(n) + } + + remainingCrypted := c.packetData[firstBlockLength:macStart] + c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) + + mac := c.packetData[macStart:] + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData[:macStart]) + c.macResult = c.mac.Sum(c.macResult[:0]) + if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { + return nil, cbcError("ssh: MAC failure") + } + } + + return c.packetData[prefixLen:paddingStart], nil +} + +func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) + + // Length of encrypted portion of the packet (header, payload, padding). + // Enforce minimum padding and packet size. + encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) + // Enforce block size. + encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize + + length := encLength - 4 + paddingLength := int(length) - (1 + len(packet)) + + // Overall buffer contains: header, payload, padding, mac. + // Space for the MAC is reserved in the capacity but not the slice length. + bufferSize := encLength + c.macSize + if uint32(cap(c.packetData)) < bufferSize { + c.packetData = make([]byte, encLength, bufferSize) + } else { + c.packetData = c.packetData[:encLength] + } + + p := c.packetData + + // Packet header. + binary.BigEndian.PutUint32(p, length) + p = p[4:] + p[0] = byte(paddingLength) + + // Payload. + p = p[1:] + copy(p, packet) + + // Padding. + p = p[len(packet):] + if _, err := io.ReadFull(rand, p); err != nil { + return err + } + + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData) + // The MAC is now appended into the capacity reserved for it earlier. + c.packetData = c.mac.Sum(c.packetData) + } + + c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) + + if _, err := w.Write(c.packetData); err != nil { + return err + } + + return nil +} === modified file 'src/golang.org/x/crypto/ssh/cipher_test.go' --- src/golang.org/x/crypto/ssh/cipher_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/cipher_test.go 2016-03-22 15:18:22 +0000 @@ -7,6 +7,7 @@ import ( "bytes" "crypto" + "crypto/aes" "crypto/rand" "testing" ) @@ -20,6 +21,10 @@ } func TestPacketCiphers(t *testing.T) { + // Still test aes128cbc cipher althought it's commented out. + cipherModes[aes128cbcID] = &streamCipherMode{16, aes.BlockSize, 0, nil} + defer delete(cipherModes, aes128cbcID) + for cipher := range cipherModes { kr := &kexResult{Hash: crypto.SHA1} algs := directionAlgorithms{ @@ -57,3 +62,66 @@ } } } + +func TestCBCOracleCounterMeasure(t *testing.T) { + cipherModes[aes128cbcID] = &streamCipherMode{16, aes.BlockSize, 0, nil} + defer delete(cipherModes, aes128cbcID) + + kr := &kexResult{Hash: crypto.SHA1} + algs := directionAlgorithms{ + Cipher: aes128cbcID, + MAC: "hmac-sha1", + Compression: "none", + } + client, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Fatalf("newPacketCipher(client): %v", err) + } + + want := "bla bla" + input := []byte(want) + buf := &bytes.Buffer{} + if err := client.writePacket(0, buf, rand.Reader, input); err != nil { + t.Errorf("writePacket: %v", err) + } + + packetSize := buf.Len() + buf.Write(make([]byte, 2*maxPacket)) + + // We corrupt each byte, but this usually will only test the + // 'packet too large' or 'MAC failure' cases. + lastRead := -1 + for i := 0; i < packetSize; i++ { + server, err := newPacketCipher(clientKeys, algs, kr) + if err != nil { + t.Fatalf("newPacketCipher(client): %v", err) + } + + fresh := &bytes.Buffer{} + fresh.Write(buf.Bytes()) + fresh.Bytes()[i] ^= 0x01 + + before := fresh.Len() + _, err = server.readPacket(0, fresh) + if err == nil { + t.Errorf("corrupt byte %d: readPacket succeeded ", i) + continue + } + if _, ok := err.(cbcError); !ok { + t.Errorf("corrupt byte %d: got %v (%T), want cbcError", i, err, err) + continue + } + + after := fresh.Len() + bytesRead := before - after + if bytesRead < maxPacket { + t.Errorf("corrupt byte %d: read %d bytes, want more than %d", i, bytesRead, maxPacket) + continue + } + + if i > 0 && bytesRead != lastRead { + t.Errorf("corrupt byte %d: read %d bytes, want %d bytes read", i, bytesRead, lastRead) + } + lastRead = bytesRead + } +} === modified file 'src/golang.org/x/crypto/ssh/client.go' --- src/golang.org/x/crypto/ssh/client.go 2015-09-22 15:27:01 +0000 +++ src/golang.org/x/crypto/ssh/client.go 2016-03-22 15:18:22 +0000 @@ -203,4 +203,11 @@ // ClientVersion contains the version identification string that will // be used for the connection. If empty, a reasonable default is used. ClientVersion string + + // HostKeyAlgorithms lists the key types that the client will + // accept from the server as host key, in order of + // preference. If empty, a reasonable default is used. Any + // string returned from PublicKey.Type method may be used, or + // any of the CertAlgoXxxx and KeyAlgoXxxx constants. + HostKeyAlgorithms []string } === modified file 'src/golang.org/x/crypto/ssh/client_auth_test.go' --- src/golang.org/x/crypto/ssh/client_auth_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/client_auth_test.go 2016-03-22 15:18:22 +0000 @@ -252,8 +252,8 @@ KeyExchanges: []string{"diffie-hellman-group-exchange-sha256"}, // not currently supported }, } - if err := tryAuth(t, config); err == nil || !strings.Contains(err.Error(), "no common algorithms") { - t.Errorf("got %v, expected 'no common algorithms'", err) + if err := tryAuth(t, config); err == nil || !strings.Contains(err.Error(), "common algorithm") { + t.Errorf("got %v, expected 'common algorithm'", err) } } === modified file 'src/golang.org/x/crypto/ssh/common.go' --- src/golang.org/x/crypto/ssh/common.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/common.go 2016-03-22 15:18:22 +0000 @@ -33,6 +33,7 @@ // supportedKexAlgos specifies the supported key-exchange algorithms in // preference order. var supportedKexAlgos = []string{ + kexAlgoCurve25519SHA256, // P384 and P521 are not constant-time yet, but since we don't // reuse ephemeral keys, using them for ECDH should be OK. kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, @@ -53,7 +54,7 @@ // This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed // because they have reached the end of their useful life. var supportedMACs = []string{ - "hmac-sha1", "hmac-sha1-96", + "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", } var supportedCompressions = []string{compressionNone} @@ -84,27 +85,15 @@ return fmt.Errorf("ssh: parse error in message type %d", tag) } -func findCommonAlgorithm(clientAlgos []string, serverAlgos []string) (commonAlgo string, ok bool) { - for _, clientAlgo := range clientAlgos { - for _, serverAlgo := range serverAlgos { - if clientAlgo == serverAlgo { - return clientAlgo, true - } - } - } - return -} - -func findCommonCipher(clientCiphers []string, serverCiphers []string) (commonCipher string, ok bool) { - for _, clientCipher := range clientCiphers { - for _, serverCipher := range serverCiphers { - // reject the cipher if we have no cipherModes definition - if clientCipher == serverCipher && cipherModes[clientCipher] != nil { - return clientCipher, true - } - } - } - return +func findCommon(what string, client []string, server []string) (common string, err error) { + for _, c := range client { + for _, s := range server { + if c == s { + return c, nil + } + } + } + return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) } type directionAlgorithms struct { @@ -120,50 +109,50 @@ r directionAlgorithms } -func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms) { - var ok bool +func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { result := &algorithms{} - result.kex, ok = findCommonAlgorithm(clientKexInit.KexAlgos, serverKexInit.KexAlgos) - if !ok { - return - } - - result.hostKey, ok = findCommonAlgorithm(clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) - if !ok { - return - } - - result.w.Cipher, ok = findCommonCipher(clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) - if !ok { - return - } - - result.r.Cipher, ok = findCommonCipher(clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) - if !ok { - return - } - - result.w.MAC, ok = findCommonAlgorithm(clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) - if !ok { - return - } - - result.r.MAC, ok = findCommonAlgorithm(clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) - if !ok { - return - } - - result.w.Compression, ok = findCommonAlgorithm(clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) - if !ok { - return - } - - result.r.Compression, ok = findCommonAlgorithm(clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) - if !ok { - return - } - - return result + + result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) + if err != nil { + return + } + + result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) + if err != nil { + return + } + + result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) + if err != nil { + return + } + + result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) + if err != nil { + return + } + + result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + if err != nil { + return + } + + result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + if err != nil { + return + } + + result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) + if err != nil { + return + } + + result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) + if err != nil { + return + } + + return result, nil } // If rekeythreshold is too small, we can't make any progress sending @@ -206,6 +195,14 @@ if c.Ciphers == nil { c.Ciphers = supportedCiphers } + var ciphers []string + for _, c := range c.Ciphers { + if cipherModes[c] != nil { + // reject the cipher if we have no cipherModes definition + ciphers = append(ciphers, c) + } + } + c.Ciphers = ciphers if c.KeyExchanges == nil { c.KeyExchanges = supportedKexAlgos === modified file 'src/golang.org/x/crypto/ssh/connection.go' --- src/golang.org/x/crypto/ssh/connection.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/connection.go 2016-03-22 15:18:22 +0000 @@ -33,7 +33,7 @@ // into the session ID. ClientVersion() []byte - // ServerVersion returns the client's version string as hashed + // ServerVersion returns the server's version string as hashed // into the session ID. ServerVersion() []byte === modified file 'src/golang.org/x/crypto/ssh/doc.go' --- src/golang.org/x/crypto/ssh/doc.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/doc.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,7 @@ others. References: - [PROTOCOL.certkeys]: http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys + [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 */ package ssh // import "golang.org/x/crypto/ssh" === modified file 'src/golang.org/x/crypto/ssh/handshake.go' --- src/golang.org/x/crypto/ssh/handshake.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/handshake.go 2016-03-22 15:18:22 +0000 @@ -59,7 +59,14 @@ serverVersion []byte clientVersion []byte - hostKeys []Signer // If hostKeys are given, we are the server. + // hostKeys is non-empty if we are the server. In that case, + // it contains all host keys that can be used to sign the + // connection. + hostKeys []Signer + + // hostKeyAlgorithms is non-empty if we are the client. In that case, + // we accept these key types from the server as host key. + hostKeyAlgorithms []string // On read error, incoming is closed, and readError is set. incoming chan []byte @@ -98,6 +105,11 @@ t.dialAddress = dialAddr t.remoteAddr = addr t.hostKeyCallback = config.HostKeyCallback + if config.HostKeyAlgorithms != nil { + t.hostKeyAlgorithms = config.HostKeyAlgorithms + } else { + t.hostKeyAlgorithms = supportedHostKeyAlgos + } go t.readLoop() return t } @@ -141,6 +153,14 @@ } t.incoming <- p } + + // If we can't read, declare the writing part dead too. + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError == nil { + t.writeError = t.readError + } + t.cond.Broadcast() } func (t *handshakeTransport) readOnePacket() ([]byte, error) { @@ -234,7 +254,7 @@ msg.ServerHostKeyAlgos, k.PublicKey().Type()) } } else { - msg.ServerHostKeyAlgos = supportedHostKeyAlgos + msg.ServerHostKeyAlgos = t.hostKeyAlgorithms } packet := Marshal(msg) @@ -253,10 +273,12 @@ func (t *handshakeTransport) writePacket(p []byte) error { t.mu.Lock() + defer t.mu.Unlock() + if t.writtenSinceKex > t.config.RekeyThreshold { t.sendKexInitLocked() } - for t.sentInitMsg != nil { + for t.sentInitMsg != nil && t.writeError == nil { t.cond.Wait() } if t.writeError != nil { @@ -264,17 +286,14 @@ } t.writtenSinceKex += uint64(len(p)) - var err error switch p[0] { case msgKexInit: - err = errors.New("ssh: only handshakeTransport can send kexInit") + return errors.New("ssh: only handshakeTransport can send kexInit") case msgNewKeys: - err = errors.New("ssh: only handshakeTransport can send newKeys") + return errors.New("ssh: only handshakeTransport can send newKeys") default: - err = t.conn.writePacket(p) + return t.conn.writePacket(p) } - t.mu.Unlock() - return err } func (t *handshakeTransport) Close() error { @@ -313,9 +332,9 @@ magics.serverKexInit = otherInitPacket } - algs := findAgreedAlgorithms(clientInit, serverInit) - if algs == nil { - return errors.New("ssh: no common algorithms") + algs, err := findAgreedAlgorithms(clientInit, serverInit) + if err != nil { + return err } // We don't send FirstKexFollows, but we handle receiving it. === modified file 'src/golang.org/x/crypto/ssh/handshake_test.go' --- src/golang.org/x/crypto/ssh/handshake_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/handshake_test.go 2016-03-22 15:18:22 +0000 @@ -7,8 +7,12 @@ import ( "bytes" "crypto/rand" + "errors" "fmt" "net" + "runtime" + "strings" + "sync" "testing" ) @@ -68,6 +72,7 @@ serverConf := &ServerConfig{} serverConf.AddHostKey(testSigners["ecdsa"]) + serverConf.AddHostKey(testSigners["rsa"]) serverConf.SetDefaults() server = newServerTransport(trS, v, v, serverConf) @@ -75,6 +80,9 @@ } func TestHandshakeBasic(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/7237") + } checker := &testChecker{} trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr") if err != nil { @@ -309,3 +317,99 @@ <-sync.called } + +// errorKeyingTransport generates errors after a given number of +// read/write operations. +type errorKeyingTransport struct { + packetConn + readLeft, writeLeft int +} + +func (n *errorKeyingTransport) prepareKeyChange(*algorithms, *kexResult) error { + return nil +} +func (n *errorKeyingTransport) getSessionID() []byte { + return nil +} + +func (n *errorKeyingTransport) writePacket(packet []byte) error { + if n.writeLeft == 0 { + n.Close() + return errors.New("barf") + } + + n.writeLeft-- + return n.packetConn.writePacket(packet) +} + +func (n *errorKeyingTransport) readPacket() ([]byte, error) { + if n.readLeft == 0 { + n.Close() + return nil, errors.New("barf") + } + + n.readLeft-- + return n.packetConn.readPacket() +} + +func TestHandshakeErrorHandlingRead(t *testing.T) { + for i := 0; i < 20; i++ { + testHandshakeErrorHandlingN(t, i, -1) + } +} + +func TestHandshakeErrorHandlingWrite(t *testing.T) { + for i := 0; i < 20; i++ { + testHandshakeErrorHandlingN(t, -1, i) + } +} + +// testHandshakeErrorHandlingN runs handshakes, injecting errors. If +// handshakeTransport deadlocks, the go runtime will detect it and +// panic. +func testHandshakeErrorHandlingN(t *testing.T, readLimit, writeLimit int) { + msg := Marshal(&serviceRequestMsg{strings.Repeat("x", int(minRekeyThreshold)/4)}) + + a, b := memPipe() + defer a.Close() + defer b.Close() + + key := testSigners["ecdsa"] + serverConf := Config{RekeyThreshold: minRekeyThreshold} + serverConf.SetDefaults() + serverConn := newHandshakeTransport(&errorKeyingTransport{a, readLimit, writeLimit}, &serverConf, []byte{'a'}, []byte{'b'}) + serverConn.hostKeys = []Signer{key} + go serverConn.readLoop() + + clientConf := Config{RekeyThreshold: 10 * minRekeyThreshold} + clientConf.SetDefaults() + clientConn := newHandshakeTransport(&errorKeyingTransport{b, -1, -1}, &clientConf, []byte{'a'}, []byte{'b'}) + clientConn.hostKeyAlgorithms = []string{key.PublicKey().Type()} + go clientConn.readLoop() + + var wg sync.WaitGroup + wg.Add(4) + + for _, hs := range []packetConn{serverConn, clientConn} { + go func(c packetConn) { + for { + err := c.writePacket(msg) + if err != nil { + break + } + } + wg.Done() + }(hs) + go func(c packetConn) { + for { + _, err := c.readPacket() + if err != nil { + break + } + } + wg.Done() + }(hs) + } + + wg.Wait() +} === modified file 'src/golang.org/x/crypto/ssh/kex.go' --- src/golang.org/x/crypto/ssh/kex.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/kex.go 2016-03-22 15:18:22 +0000 @@ -8,18 +8,22 @@ "crypto" "crypto/ecdsa" "crypto/elliptic" + "crypto/subtle" "crypto/rand" "errors" "io" "math/big" + + "golang.org/x/crypto/curve25519" ) const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" + kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" + kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" + kexAlgoECDH256 = "ecdh-sha2-nistp256" + kexAlgoECDH384 = "ecdh-sha2-nistp384" + kexAlgoECDH521 = "ecdh-sha2-nistp521" + kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" ) // kexResult captures the outcome of a key exchange. @@ -383,4 +387,140 @@ kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} + kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} +} + +// curve25519sha256 implements the curve25519-sha256@libssh.org key +// agreement protocol, as described in +// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt +type curve25519sha256 struct{} + +type curve25519KeyPair struct { + priv [32]byte + pub [32]byte +} + +func (kp *curve25519KeyPair) generate(rand io.Reader) error { + if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { + return err + } + curve25519.ScalarBaseMult(&kp.pub, &kp.priv) + return nil +} + +// curve25519Zeros is just an array of 32 zero bytes so that we have something +// convenient to compare against in order to reject curve25519 points with the +// wrong order. +var curve25519Zeros [32]byte + +func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + if len(reply.EphemeralPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var servPub, secret [32]byte + copy(servPub[:], reply.EphemeralPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &servPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kp.pub[:]) + writeString(h, reply.EphemeralPubKey) + + kInt := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: crypto.SHA256, + }, nil +} + +func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return + } + var kexInit kexECDHInitMsg + if err = Unmarshal(packet, &kexInit); err != nil { + return + } + + if len(kexInit.ClientPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + + var clientPub, secret [32]byte + copy(clientPub[:], kexInit.ClientPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &clientPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexInit.ClientPubKey) + writeString(h, kp.pub[:]) + + kInt := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + H := h.Sum(nil) + + sig, err := signAndMarshal(priv, rand, H) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: kp.pub[:], + HostKey: hostKeyBytes, + Signature: sig, + } + if err := c.writePacket(Marshal(&reply)); err != nil { + return nil, err + } + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA256, + }, nil } === modified file 'src/golang.org/x/crypto/ssh/kex_test.go' --- src/golang.org/x/crypto/ssh/kex_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/kex_test.go 2016-03-22 15:18:22 +0000 @@ -26,10 +26,12 @@ var magics handshakeMagics go func() { r, e := kex.Client(a, rand.Reader, &magics) + a.Close() c <- kexResultErr{r, e} }() go func() { r, e := kex.Server(b, rand.Reader, &magics, testSigners["ecdsa"]) + b.Close() s <- kexResultErr{r, e} }() === modified file 'src/golang.org/x/crypto/ssh/keys.go' --- src/golang.org/x/crypto/ssh/keys.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/keys.go 2016-03-22 15:18:22 +0000 @@ -422,14 +422,19 @@ // parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { - identifier, in, ok := parseString(in) - if !ok { - return nil, nil, errShortRead + var w struct { + Curve string + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err } key := new(ecdsa.PublicKey) - switch string(identifier) { + switch w.Curve { case "nistp256": key.Curve = elliptic.P256() case "nistp384": @@ -440,16 +445,11 @@ return nil, nil, errors.New("ssh: unsupported curve") } - var keyBytes []byte - if keyBytes, in, ok = parseString(in); !ok { - return nil, nil, errShortRead - } - - key.X, key.Y = elliptic.Unmarshal(key.Curve, keyBytes) + key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) if key.X == nil || key.Y == nil { return nil, nil, errors.New("ssh: invalid curve point") } - return (*ecdsaPublicKey)(key), in, nil + return (*ecdsaPublicKey)(key), w.Rest, nil } func (key *ecdsaPublicKey) Marshal() []byte { === modified file 'src/golang.org/x/crypto/ssh/mac.go' --- src/golang.org/x/crypto/ssh/mac.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/mac.go 2016-03-22 15:18:22 +0000 @@ -9,6 +9,7 @@ import ( "crypto/hmac" "crypto/sha1" + "crypto/sha256" "hash" ) @@ -44,6 +45,9 @@ func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } var macModes = map[string]*macMode{ + "hmac-sha2-256": {32, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, "hmac-sha1": {20, func(key []byte) hash.Hash { return hmac.New(sha1.New, key) }}, === modified file 'src/golang.org/x/crypto/ssh/mempipe_test.go' --- src/golang.org/x/crypto/ssh/mempipe_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/mempipe_test.go 2016-03-22 15:18:22 +0000 @@ -76,7 +76,7 @@ return &t1, &t2 } -func TestmemPipe(t *testing.T) { +func TestMemPipe(t *testing.T) { a, b := memPipe() if err := a.writePacket([]byte{42}); err != nil { t.Fatalf("writePacket: %v", err) === modified file 'src/golang.org/x/crypto/ssh/messages.go' --- src/golang.org/x/crypto/ssh/messages.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/messages.go 2016-03-22 15:18:22 +0000 @@ -484,11 +484,12 @@ return } length := binary.BigEndian.Uint32(in) - if uint32(len(in)) < 4+length { + in = in[4:] + if uint32(len(in)) < length { return } - out = in[4 : 4+length] - rest = in[4+length:] + out = in[:length] + rest = in[length:] ok = true return } === modified file 'src/golang.org/x/crypto/ssh/messages_test.go' --- src/golang.org/x/crypto/ssh/messages_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/messages_test.go 2016-03-22 15:18:22 +0000 @@ -162,6 +162,16 @@ } } +func TestUnmarshalShortKexInitPacket(t *testing.T) { + // This used to panic. + // Issue 11348 + packet := []byte{0x14, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xff, 0xff, 0xff, 0xff} + kim := &kexInitMsg{} + if err := Unmarshal(packet, kim); err == nil { + t.Error("truncated packet unmarshaled without error") + } +} + func randomBytes(out []byte, rand *rand.Rand) { for i := 0; i < len(out); i++ { out[i] = byte(rand.Int31()) === modified file 'src/golang.org/x/crypto/ssh/server.go' --- src/golang.org/x/crypto/ssh/server.go 2015-09-22 15:27:01 +0000 +++ src/golang.org/x/crypto/ssh/server.go 2016-03-22 15:18:22 +0000 @@ -168,6 +168,10 @@ return nil, errors.New("ssh: server has no host keys") } + if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + if config.ServerVersion != "" { s.serverVersion = []byte(config.ServerVersion) } else { === modified file 'src/golang.org/x/crypto/ssh/session_test.go' --- src/golang.org/x/crypto/ssh/session_test.go 2015-09-22 15:27:01 +0000 +++ src/golang.org/x/crypto/ssh/session_test.go 2016-03-22 15:18:22 +0000 @@ -9,9 +9,11 @@ import ( "bytes" crypto_rand "crypto/rand" + "errors" "io" "io/ioutil" "math/rand" + "net" "testing" "golang.org/x/crypto/ssh/terminal" @@ -678,3 +680,95 @@ t.Errorf("client and server SessionID were empty.") } } + +type noReadConn struct { + readSeen bool + net.Conn +} + +func (c *noReadConn) Close() error { + return nil +} + +func (c *noReadConn) Read(b []byte) (int, error) { + c.readSeen = true + return 0, errors.New("noReadConn error") +} + +func TestInvalidServerConfiguration(t *testing.T) { + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + serveConn := noReadConn{Conn: c1} + serverConf := &ServerConfig{} + + NewServerConn(&serveConn, serverConf) + if serveConn.readSeen { + t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing host key") + } + + serverConf.AddHostKey(testSigners["ecdsa"]) + + NewServerConn(&serveConn, serverConf) + if serveConn.readSeen { + t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing authentication method") + } +} + +func TestHostKeyAlgorithms(t *testing.T) { + serverConf := &ServerConfig{ + NoClientAuth: true, + } + serverConf.AddHostKey(testSigners["rsa"]) + serverConf.AddHostKey(testSigners["ecdsa"]) + + connect := func(clientConf *ClientConfig, want string) { + var alg string + clientConf.HostKeyCallback = func(h string, a net.Addr, key PublicKey) error { + alg = key.Type() + return nil + } + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewServerConn(c1, serverConf) + _, _, _, err = NewClientConn(c2, "", clientConf) + if err != nil { + t.Fatalf("NewClientConn: %v", err) + } + if alg != want { + t.Errorf("selected key algorithm %s, want %s", alg, want) + } + } + + // By default, we get the preferred algorithm, which is ECDSA 256. + + clientConf := &ClientConfig{} + connect(clientConf, KeyAlgoECDSA256) + + // Client asks for RSA explicitly. + clientConf.HostKeyAlgorithms = []string{KeyAlgoRSA} + connect(clientConf, KeyAlgoRSA) + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewServerConn(c1, serverConf) + clientConf.HostKeyAlgorithms = []string{"nonexistent-hostkey-algo"} + _, _, _, err = NewClientConn(c2, "", clientConf) + if err == nil { + t.Fatal("succeeded connecting with unknown hostkey algorithm") + } +} === modified file 'src/golang.org/x/crypto/ssh/tcpip.go' --- src/golang.org/x/crypto/ssh/tcpip.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/tcpip.go 2016-03-22 15:18:22 +0000 @@ -355,6 +355,9 @@ lport: uint32(lport), } ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) + if err != nil { + return nil, err + } go DiscardRequests(in) return ch, err } === modified file 'src/golang.org/x/crypto/ssh/test/agent_unix_test.go' --- src/golang.org/x/crypto/ssh/test/agent_unix_test.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/test/agent_unix_test.go 2016-03-22 15:18:22 +0000 @@ -21,7 +21,16 @@ defer conn.Close() keyring := agent.NewKeyring() - keyring.Add(testPrivateKeys["dsa"], nil, "") + if err := keyring.Add(agent.AddedKey{PrivateKey: testPrivateKeys["dsa"]}); err != nil { + t.Fatalf("Error adding key: %s", err) + } + if err := keyring.Add(agent.AddedKey{ + PrivateKey: testPrivateKeys["dsa"], + ConfirmBeforeUse: true, + LifetimeSecs: 3600, + }); err != nil { + t.Fatalf("Error adding key with constraints: %s", err) + } pub := testPublicKeys["dsa"] sess, err := conn.NewSession() === modified file 'src/golang.org/x/crypto/ssh/test/doc.go' --- src/golang.org/x/crypto/ssh/test/doc.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/test/doc.go 2016-03-22 15:18:22 +0000 @@ -3,5 +3,5 @@ // license that can be found in the LICENSE file. // This package contains integration tests for the -// code.google.com/p/go.crypto/ssh package. +// golang.org/x/crypto/ssh package. package test // import "golang.org/x/crypto/ssh/test" === modified file 'src/golang.org/x/crypto/ssh/test/session_test.go' --- src/golang.org/x/crypto/ssh/test/session_test.go 2015-09-22 15:27:01 +0000 +++ src/golang.org/x/crypto/ssh/test/session_test.go 2016-03-22 15:18:22 +0000 @@ -280,6 +280,9 @@ var config ssh.Config config.SetDefaults() cipherOrder := config.Ciphers + // This cipher will not be tested when commented out in cipher.go it will + // fallback to the next available as per line 292. + cipherOrder = append(cipherOrder, "aes128-cbc") for _, ciph := range cipherOrder { server := newServer(t) @@ -316,3 +319,22 @@ } } } + +func TestKeyExchanges(t *testing.T) { + var config ssh.Config + config.SetDefaults() + kexOrder := config.KeyExchanges + for _, kex := range kexOrder { + server := newServer(t) + defer server.Shutdown() + conf := clientConfig() + // Don't fail if sshd doesnt have the kex. + conf.KeyExchanges = append([]string{kex}, kexOrder...) + conn, err := server.TryDial(conf) + if err == nil { + conn.Close() + } else { + t.Errorf("failed for kex %q", kex) + } + } +} === modified file 'src/golang.org/x/crypto/ssh/testdata/doc.go' --- src/golang.org/x/crypto/ssh/testdata/doc.go 2015-03-26 15:54:39 +0000 +++ src/golang.org/x/crypto/ssh/testdata/doc.go 2016-03-22 15:18:22 +0000 @@ -3,6 +3,6 @@ // license that can be found in the LICENSE file. // This package contains test data shared between the various subpackages of -// the code.google.com/p/go.crypto/ssh package. Under no circumstance should +// the golang.org/x/crypto/ssh package. Under no circumstance should // this data be used for production code. package testdata // import "golang.org/x/crypto/ssh/testdata" === modified file 'src/golang.org/x/crypto/ssh/transport.go' --- src/golang.org/x/crypto/ssh/transport.go 2015-09-22 15:27:01 +0000 +++ src/golang.org/x/crypto/ssh/transport.go 2016-03-22 15:18:22 +0000 @@ -12,6 +12,7 @@ const ( gcmCipherID = "aes128-gcm@openssh.com" + aes128cbcID = "aes128-cbc" ) // packetConn represents a transport that implements packet based @@ -218,6 +219,10 @@ return newGCMCipher(iv, key, macKey) } + if algs.Cipher == aes128cbcID { + return newAESCBCCipher(iv, key, macKey, algs) + } + c := &streamPacketCipher{ mac: macModes[algs.MAC].new(macKey), } === added directory 'src/golang.org/x/crypto/tea' === added file 'src/golang.org/x/crypto/tea/cipher.go' --- src/golang.org/x/crypto/tea/cipher.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/crypto/tea/cipher.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,109 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tea implements the TEA algorithm, as defined in Needham and +// Wheeler's 1994 technical report, “TEA, a Tiny Encryption Algorithmâ€. See +// http://www.cix.co.uk/~klockstone/tea.pdf for details. + +package tea + +import ( + "crypto/cipher" + "encoding/binary" + "errors" +) + +const ( + // BlockSize is the size of a TEA block, in bytes. + BlockSize = 8 + + // KeySize is the size of a TEA key, in bytes. + KeySize = 16 + + // delta is the TEA key schedule constant. + delta = 0x9e3779b9 + + // numRounds is the standard number of rounds in TEA. + numRounds = 64 +) + +// tea is an instance of the TEA cipher with a particular key. +type tea struct { + key [16]byte + rounds int +} + +// NewCipher returns an instance of the TEA cipher with the standard number of +// rounds. The key argument must be 16 bytes long. +func NewCipher(key []byte) (cipher.Block, error) { + return NewCipherWithRounds(key, numRounds) +} + +// NewCipherWithRounds returns an instance of the TEA cipher with a given +// number of rounds, which must be even. The key argument must be 16 bytes +// long. +func NewCipherWithRounds(key []byte, rounds int) (cipher.Block, error) { + if len(key) != 16 { + return nil, errors.New("tea: incorrect key size") + } + + if rounds&1 != 0 { + return nil, errors.New("tea: odd number of rounds specified") + } + + c := &tea{ + rounds: rounds, + } + copy(c.key[:], key) + + return c, nil +} + +// BlockSize returns the TEA block size, which is eight bytes. It is necessary +// to satisfy the Block interface in the package "crypto/cipher". +func (*tea) BlockSize() int { + return BlockSize +} + +// Encrypt encrypts the 8 byte buffer src using the key in t and stores the +// result in dst. Note that for amounts of data larger than a block, it is not +// safe to just call Encrypt on successive blocks; instead, use an encryption +// mode like CBC (see crypto/cipher/cbc.go). +func (t *tea) Encrypt(dst, src []byte) { + e := binary.BigEndian + v0, v1 := e.Uint32(src), e.Uint32(src[4:]) + k0, k1, k2, k3 := e.Uint32(t.key[0:]), e.Uint32(t.key[4:]), e.Uint32(t.key[8:]), e.Uint32(t.key[12:]) + + sum := uint32(0) + delta := uint32(delta) + + for i := 0; i < t.rounds/2; i++ { + sum += delta + v0 += ((v1 << 4) + k0) ^ (v1 + sum) ^ ((v1 >> 5) + k1) + v1 += ((v0 << 4) + k2) ^ (v0 + sum) ^ ((v0 >> 5) + k3) + } + + e.PutUint32(dst, v0) + e.PutUint32(dst[4:], v1) +} + +// Decrypt decrypts the 8 byte buffer src using the key in t and stores the +// result in dst. +func (t *tea) Decrypt(dst, src []byte) { + e := binary.BigEndian + v0, v1 := e.Uint32(src), e.Uint32(src[4:]) + k0, k1, k2, k3 := e.Uint32(t.key[0:]), e.Uint32(t.key[4:]), e.Uint32(t.key[8:]), e.Uint32(t.key[12:]) + + delta := uint32(delta) + sum := delta * uint32(t.rounds/2) // in general, sum = delta * n + + for i := 0; i < t.rounds/2; i++ { + v1 -= ((v0 << 4) + k2) ^ (v0 + sum) ^ ((v0 >> 5) + k3) + v0 -= ((v1 << 4) + k0) ^ (v1 + sum) ^ ((v1 >> 5) + k1) + sum -= delta + } + + e.PutUint32(dst, v0) + e.PutUint32(dst[4:], v1) +} === added file 'src/golang.org/x/crypto/tea/tea_test.go' --- src/golang.org/x/crypto/tea/tea_test.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/crypto/tea/tea_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,93 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tea + +import ( + "bytes" + "testing" +) + +// A sample test key for when we just want to initialize a cipher +var testKey = []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF} + +// Test that the block size for tea is correct +func TestBlocksize(t *testing.T) { + c, err := NewCipher(testKey) + if err != nil { + t.Fatalf("NewCipher returned error: %s", err) + } + + if result := c.BlockSize(); result != BlockSize { + t.Errorf("cipher.BlockSize returned %d, but expected %d", result, BlockSize) + } +} + +// Test that invalid key sizes return an error +func TestInvalidKeySize(t *testing.T) { + var key [KeySize + 1]byte + + if _, err := NewCipher(key[:]); err == nil { + t.Errorf("invalid key size %d didn't result in an error.", len(key)) + } + + if _, err := NewCipher(key[:KeySize-1]); err == nil { + t.Errorf("invalid key size %d didn't result in an error.", KeySize-1) + } +} + +// Test Vectors +type teaTest struct { + rounds int + key []byte + plaintext []byte + ciphertext []byte +} + +var teaTests = []teaTest{ + // These were sourced from https://github.com/froydnj/ironclad/blob/master/testing/test-vectors/tea.testvec + { + numRounds, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x41, 0xea, 0x3a, 0x0a, 0x94, 0xba, 0xa9, 0x40}, + }, + { + numRounds, + []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + []byte{0x31, 0x9b, 0xbe, 0xfb, 0x01, 0x6a, 0xbd, 0xb2}, + }, + { + 16, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + []byte{0xed, 0x28, 0x5d, 0xa1, 0x45, 0x5b, 0x33, 0xc1}, + }, +} + +// Test encryption +func TestCipherEncrypt(t *testing.T) { + // Test encryption with standard 64 rounds + for i, test := range teaTests { + c, err := NewCipherWithRounds(test.key, test.rounds) + if err != nil { + t.Fatalf("#%d: NewCipher returned error: %s", i, err) + } + + var ciphertext [BlockSize]byte + c.Encrypt(ciphertext[:], test.plaintext) + + if !bytes.Equal(ciphertext[:], test.ciphertext) { + t.Errorf("#%d: incorrect ciphertext. Got %x, wanted %x", i, ciphertext, test.ciphertext) + } + + var plaintext2 [BlockSize]byte + c.Decrypt(plaintext2[:], ciphertext[:]) + + if !bytes.Equal(plaintext2[:], test.plaintext) { + t.Errorf("#%d: incorrect plaintext. Got %x, wanted %x", i, plaintext2, test.plaintext) + } + } +} === modified file 'src/golang.org/x/net/context/context_test.go' --- src/golang.org/x/net/context/context_test.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/context/context_test.go 2016-03-22 15:18:22 +0000 @@ -375,7 +375,7 @@ <-c.Done() }, limit: 8, - gccgoLimit: 13, + gccgoLimit: 15, }, { desc: "WithCancel(bg)", === added directory 'src/golang.org/x/net/context/ctxhttp' === added file 'src/golang.org/x/net/context/ctxhttp/cancelreq.go' --- src/golang.org/x/net/context/ctxhttp/cancelreq.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/context/ctxhttp/cancelreq.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,18 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package ctxhttp + +import "net/http" + +func canceler(client *http.Client, req *http.Request) func() { + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} === added file 'src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go' --- src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package ctxhttp + +import "net/http" + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +func canceler(client *http.Client, req *http.Request) func() { + rc, ok := client.Transport.(requestCanceler) + if !ok { + return func() {} + } + return func() { + rc.CancelRequest(req) + } +} === added file 'src/golang.org/x/net/context/ctxhttp/ctxhttp.go' --- src/golang.org/x/net/context/ctxhttp/ctxhttp.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/context/ctxhttp/ctxhttp.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,79 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. + cancel := canceler(client, req) + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + go func() { + resp, err := client.Do(req) + result <- responseAndError{resp, err} + }() + + select { + case <-ctx.Done(): + cancel() + return nil, ctx.Err() + case r := <-result: + return r.resp, r.err + } +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} === added file 'src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go' --- src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/context/ctxhttp/ctxhttp_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,72 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ctxhttp + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "golang.org/x/net/context" +) + +const ( + requestDuration = 100 * time.Millisecond + requestBody = "ok" +) + +func TestNoTimeout(t *testing.T) { + ctx := context.Background() + resp, err := doRequest(ctx) + + if resp == nil || err != nil { + t.Fatalf("error received from client: %v %v", err, resp) + } +} +func TestCancel(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(requestDuration / 2) + cancel() + }() + + resp, err := doRequest(ctx) + + if resp != nil || err == nil { + t.Fatalf("expected error, didn't get one. resp: %v", resp) + } + if err != ctx.Err() { + t.Fatalf("expected error from context but got: %v", err) + } +} + +func TestCancelAfterRequest(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + resp, err := doRequest(ctx) + + // Cancel before reading the body. + // Request.Body should still be readable after the context is canceled. + cancel() + + b, err := ioutil.ReadAll(resp.Body) + if err != nil || string(b) != requestBody { + t.Fatalf("could not read body: %q %v", b, err) + } +} + +func doRequest(ctx context.Context) (*http.Response, error) { + var okHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(requestDuration) + w.Write([]byte(requestBody)) + }) + + serv := httptest.NewServer(okHandler) + defer serv.Close() + + return Get(ctx, nil, serv.URL) +} === modified file 'src/golang.org/x/net/html/render.go' --- src/golang.org/x/net/html/render.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/html/render.go 2016-03-22 15:18:22 +0000 @@ -14,7 +14,7 @@ type writer interface { io.Writer - WriteByte(c byte) error // in Go 1.1, use io.ByteWriter + io.ByteWriter WriteString(string) (int, error) } === modified file 'src/golang.org/x/net/icmp/example_test.go' --- src/golang.org/x/net/icmp/example_test.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/icmp/example_test.go 2016-03-22 15:18:22 +0000 @@ -8,13 +8,22 @@ "log" "net" "os" + "runtime" "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" "golang.org/x/net/ipv6" ) func ExamplePacketConn_nonPrivilegedPing() { + switch runtime.GOOS { + case "darwin": + case "linux": + log.Println("you may need to adjust the net.ipv4.ping_group_range kernel state") + default: + log.Println("not supported on", runtime.GOOS) + return + } + c, err := icmp.ListenPacket("udp6", "fe80::1%en0") if err != nil { log.Fatal(err) @@ -41,7 +50,7 @@ if err != nil { log.Fatal(err) } - rm, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]) + rm, err := icmp.ParseMessage(58, rb[:n]) if err != nil { log.Fatal(err) } === modified file 'src/golang.org/x/net/icmp/extension_test.go' --- src/golang.org/x/net/icmp/extension_test.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/icmp/extension_test.go 2016-03-22 15:18:22 +0000 @@ -238,3 +238,22 @@ } } } + +var parseInterfaceNameTests = []struct { + b []byte + error +}{ + {[]byte{0, 'e', 'n', '0'}, errInvalidExtension}, + {[]byte{4, 'e', 'n', '0'}, nil}, + {[]byte{7, 'e', 'n', '0', 0xff, 0xff, 0xff, 0xff}, errInvalidExtension}, + {[]byte{8, 'e', 'n', '0', 0xff, 0xff, 0xff}, errMessageTooShort}, +} + +func TestParseInterfaceName(t *testing.T) { + ifi := InterfaceInfo{Interface: &net.Interface{}} + for i, tt := range parseInterfaceNameTests { + if _, err := ifi.parseName(tt.b); err != tt.error { + t.Errorf("#%d: got %v; want %v", i, err, tt.error) + } + } +} === modified file 'src/golang.org/x/net/icmp/interface.go' --- src/golang.org/x/net/icmp/interface.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/icmp/interface.go 2016-03-22 15:18:22 +0000 @@ -174,6 +174,9 @@ return nil, errMessageTooShort } l := int(b[0]) + if l%4 != 0 || 4 > l || l > 64 { + return nil, errInvalidExtension + } var name [63]byte copy(name[:], b[1:l]) ifi.Interface.Name = strings.Trim(string(name[:]), "\000") === modified file 'src/golang.org/x/net/icmp/listen_posix.go' --- src/golang.org/x/net/icmp/listen_posix.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/icmp/listen_posix.go 2016-03-22 15:18:22 +0000 @@ -57,7 +57,7 @@ proto = iana.ProtocolIPv6ICMP } } - var err error + var cerr error var c net.PacketConn switch family { case syscall.AF_INET, syscall.AF_INET6: @@ -80,12 +80,12 @@ } f := os.NewFile(uintptr(s), "datagram-oriented icmp") defer f.Close() - c, err = net.FilePacketConn(f) + c, cerr = net.FilePacketConn(f) default: - c, err = net.ListenPacket(network, address) + c, cerr = net.ListenPacket(network, address) } - if err != nil { - return nil, err + if cerr != nil { + return nil, cerr } switch proto { case iana.ProtocolICMP: === modified file 'src/golang.org/x/net/icmp/message.go' --- src/golang.org/x/net/icmp/message.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/icmp/message.go 2016-03-22 15:18:22 +0000 @@ -24,11 +24,12 @@ ) var ( - errMessageTooShort = errors.New("message too short") - errHeaderTooShort = errors.New("header too short") - errBufferTooShort = errors.New("buffer too short") - errOpNoSupport = errors.New("operation not supported") - errNoExtension = errors.New("no extension") + errMessageTooShort = errors.New("message too short") + errHeaderTooShort = errors.New("header too short") + errBufferTooShort = errors.New("buffer too short") + errOpNoSupport = errors.New("operation not supported") + errNoExtension = errors.New("no extension") + errInvalidExtension = errors.New("invalid extension") ) func checksum(b []byte) uint16 { === modified file 'src/golang.org/x/net/icmp/ping_test.go' --- src/golang.org/x/net/icmp/ping_test.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/icmp/ping_test.go 2016-03-22 15:18:22 +0000 @@ -6,10 +6,12 @@ import ( "errors" + "fmt" "net" "os" "runtime" "testing" + "time" "golang.org/x/net/icmp" "golang.org/x/net/internal/iana" @@ -49,21 +51,21 @@ return nil, errors.New("no A or AAAA record") } -var pingGoogleTests = []struct { +type pingTest struct { network, address string protocol int mtype icmp.Type -}{ +} + +var nonPrivilegedPingTests = []pingTest{ {"udp4", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, - {"ip4:icmp", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, {"udp6", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, - {"ip6:ipv6-icmp", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, } -func TestPingGoogle(t *testing.T) { +func TestNonPrivilegedPing(t *testing.T) { if testing.Short() { - t.Skip("to avoid external network") + t.Skip("avoid external network") } switch runtime.GOOS { case "darwin": @@ -73,61 +75,92 @@ t.Skipf("not supported on %s", runtime.GOOS) } - m, ok := nettest.SupportsRawIPSocket() - for i, tt := range pingGoogleTests { - if tt.network[:2] == "ip" && !ok { - t.Log(m) - continue - } - c, err := icmp.ListenPacket(tt.network, tt.address) - if err != nil { - t.Error(err) - continue - } - defer c.Close() - - dst, err := googleAddr(c, tt.protocol) - if err != nil { - t.Error(err) - continue - } - - wm := icmp.Message{ - Type: tt.mtype, Code: 0, - Body: &icmp.Echo{ - ID: os.Getpid() & 0xffff, Seq: 1 << uint(i), - Data: []byte("HELLO-R-U-THERE"), - }, - } - wb, err := wm.Marshal(nil) - if err != nil { - t.Error(err) - continue - } - if n, err := c.WriteTo(wb, dst); err != nil { - t.Error(err, dst) - continue - } else if n != len(wb) { - t.Errorf("got %v; want %v", n, len(wb)) - continue - } - - rb := make([]byte, 1500) - n, peer, err := c.ReadFrom(rb) - if err != nil { - t.Error(err) - continue - } - rm, err := icmp.ParseMessage(tt.protocol, rb[:n]) - if err != nil { - t.Error(err) - continue - } - switch rm.Type { - case ipv4.ICMPTypeEchoReply, ipv6.ICMPTypeEchoReply: - t.Logf("got reflection from %v", peer) - default: - t.Errorf("got %+v; want echo reply", rm) - } + for i, tt := range nonPrivilegedPingTests { + if err := doPing(tt, i); err != nil { + t.Error(err) + } + } +} + +var privilegedPingTests = []pingTest{ + {"ip4:icmp", "0.0.0.0", iana.ProtocolICMP, ipv4.ICMPTypeEcho}, + + {"ip6:ipv6-icmp", "::", iana.ProtocolIPv6ICMP, ipv6.ICMPTypeEchoRequest}, +} + +func TestPrivilegedPing(t *testing.T) { + if testing.Short() { + t.Skip("avoid external network") + } + if m, ok := nettest.SupportsRawIPSocket(); !ok { + t.Skip(m) + } + + for i, tt := range privilegedPingTests { + if err := doPing(tt, i); err != nil { + t.Error(err) + } + } +} + +func doPing(tt pingTest, seq int) error { + c, err := icmp.ListenPacket(tt.network, tt.address) + if err != nil { + return err + } + defer c.Close() + + dst, err := googleAddr(c, tt.protocol) + if err != nil { + return err + } + + if tt.network != "udp6" && tt.protocol == iana.ProtocolIPv6ICMP { + var f ipv6.ICMPFilter + f.SetAll(true) + f.Accept(ipv6.ICMPTypeDestinationUnreachable) + f.Accept(ipv6.ICMPTypePacketTooBig) + f.Accept(ipv6.ICMPTypeTimeExceeded) + f.Accept(ipv6.ICMPTypeParameterProblem) + f.Accept(ipv6.ICMPTypeEchoReply) + if err := c.IPv6PacketConn().SetICMPFilter(&f); err != nil { + return err + } + } + + wm := icmp.Message{ + Type: tt.mtype, Code: 0, + Body: &icmp.Echo{ + ID: os.Getpid() & 0xffff, Seq: 1 << uint(seq), + Data: []byte("HELLO-R-U-THERE"), + }, + } + wb, err := wm.Marshal(nil) + if err != nil { + return err + } + if n, err := c.WriteTo(wb, dst); err != nil { + return err + } else if n != len(wb) { + return fmt.Errorf("got %v; want %v", n, len(wb)) + } + + rb := make([]byte, 1500) + if err := c.SetReadDeadline(time.Now().Add(3 * time.Second)); err != nil { + return err + } + n, peer, err := c.ReadFrom(rb) + if err != nil { + return err + } + rm, err := icmp.ParseMessage(tt.protocol, rb[:n]) + if err != nil { + return err + } + switch rm.Type { + case ipv4.ICMPTypeEchoReply, ipv6.ICMPTypeEchoReply: + return nil + default: + return fmt.Errorf("got %+v from %v; want echo reply", rm, peer) } } === modified file 'src/golang.org/x/net/internal/iana/const.go' --- src/golang.org/x/net/internal/iana/const.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/internal/iana/const.go 2016-03-22 15:18:22 +0000 @@ -38,7 +38,7 @@ CongestionExperienced = 0x3 // CE (Congestion Experienced) ) -// Protocol Numbers, Updated: 2015-01-06 +// Protocol Numbers, Updated: 2015-06-23 const ( ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option @@ -157,7 +157,6 @@ ProtocolSRP = 119 // SpectraLink Radio Protocol ProtocolUTI = 120 // UTI ProtocolSMP = 121 // Simple Message Protocol - ProtocolSM = 122 // Simple Multicast Protocol ProtocolPTP = 123 // Performance Transparency Protocol ProtocolISIS = 124 // ISIS over IPv4 ProtocolFIRE = 125 // FIRE === added file 'src/golang.org/x/net/internal/nettest/rlimit.go' --- src/golang.org/x/net/internal/nettest/rlimit.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/internal/nettest/rlimit.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +const defaultMaxOpenFiles = 256 + +// MaxOpenFiles returns the maximum number of open files for the +// caller's process. +func MaxOpenFiles() int { return maxOpenFiles() } === added file 'src/golang.org/x/net/internal/nettest/rlimit_stub.go' --- src/golang.org/x/net/internal/nettest/rlimit_stub.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/internal/nettest/rlimit_stub.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 + +package nettest + +func maxOpenFiles() int { return defaultMaxOpenFiles } === added file 'src/golang.org/x/net/internal/nettest/rlimit_unix.go' --- src/golang.org/x/net/internal/nettest/rlimit_unix.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/internal/nettest/rlimit_unix.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,17 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package nettest + +import "syscall" + +func maxOpenFiles() int { + var rlim syscall.Rlimit + if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlim); err != nil { + return defaultMaxOpenFiles + } + return int(rlim.Cur) +} === added file 'src/golang.org/x/net/internal/nettest/rlimit_windows.go' --- src/golang.org/x/net/internal/nettest/rlimit_windows.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/internal/nettest/rlimit_windows.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,7 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +func maxOpenFiles() int { return 4 * defaultMaxOpenFiles /* actually it's 16581375 */ } === added directory 'src/golang.org/x/net/internal/timeseries' === added file 'src/golang.org/x/net/internal/timeseries/timeseries.go' --- src/golang.org/x/net/internal/timeseries/timeseries.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/internal/timeseries/timeseries.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} === added file 'src/golang.org/x/net/internal/timeseries/timeseries_test.go' --- src/golang.org/x/net/internal/timeseries/timeseries_test.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/internal/timeseries/timeseries_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package timeseries + +import ( + "math" + "testing" + "time" +) + +func isNear(x *Float, y float64, tolerance float64) bool { + return math.Abs(x.Value()-y) < tolerance +} + +func isApproximate(x *Float, y float64) bool { + return isNear(x, y, 1e-2) +} + +func checkApproximate(t *testing.T, o Observable, y float64) { + x := o.(*Float) + if !isApproximate(x, y) { + t.Errorf("Wanted %g, got %g", y, x.Value()) + } +} + +func checkNear(t *testing.T, o Observable, y, tolerance float64) { + x := o.(*Float) + if !isNear(x, y, tolerance) { + t.Errorf("Wanted %g +- %g, got %g", y, tolerance, x.Value()) + } +} + +var baseTime = time.Date(2013, 1, 1, 0, 0, 0, 0, time.UTC) + +func tu(s int64) time.Time { + return baseTime.Add(time.Duration(s) * time.Second) +} + +func tu2(s int64, ns int64) time.Time { + return baseTime.Add(time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond) +} + +func TestBasicTimeSeries(t *testing.T) { + ts := NewTimeSeries(NewFloat) + fo := new(Float) + *fo = Float(10) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(1)), 40) + checkApproximate(t, ts.Total(), 40) + ts.AddWithTime(fo, tu(3)) + ts.AddWithTime(fo, tu(3)) + ts.AddWithTime(fo, tu(3)) + checkApproximate(t, ts.Range(tu(0), tu(2)), 40) + checkApproximate(t, ts.Range(tu(2), tu(4)), 30) + checkApproximate(t, ts.Total(), 70) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(2)), 60) + checkApproximate(t, ts.Range(tu(2), tu(4)), 30) + checkApproximate(t, ts.Total(), 90) + *fo = Float(100) + ts.AddWithTime(fo, tu(100)) + checkApproximate(t, ts.Range(tu(99), tu(100)), 100) + checkApproximate(t, ts.Range(tu(0), tu(4)), 36) + checkApproximate(t, ts.Total(), 190) + *fo = Float(10) + ts.AddWithTime(fo, tu(1)) + ts.AddWithTime(fo, tu(1)) + checkApproximate(t, ts.Range(tu(0), tu(4)), 44) + checkApproximate(t, ts.Range(tu(37), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Range(tu(50), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Range(tu(99), tu2(100, 100e6)), 100) + checkApproximate(t, ts.Total(), 210) + + for i, l := range ts.ComputeRange(tu(36), tu(100), 64) { + if i == 63 { + checkApproximate(t, l, 100) + } else { + checkApproximate(t, l, 0) + } + } + + checkApproximate(t, ts.Range(tu(0), tu(100)), 210) + checkApproximate(t, ts.Range(tu(10), tu(100)), 100) + + for i, l := range ts.ComputeRange(tu(0), tu(100), 100) { + if i < 10 { + checkApproximate(t, l, 11) + } else if i >= 90 { + checkApproximate(t, l, 10) + } else { + checkApproximate(t, l, 0) + } + } +} + +func TestFloat(t *testing.T) { + f := Float(1) + if g, w := f.String(), "1"; g != w { + t.Errorf("Float(1).String = %q; want %q", g, w) + } + f2 := Float(2) + var o Observable = &f2 + f.Add(o) + if g, w := f.Value(), 3.0; g != w { + t.Errorf("Float post-add = %v; want %v", g, w) + } + f.Multiply(2) + if g, w := f.Value(), 6.0; g != w { + t.Errorf("Float post-multiply = %v; want %v", g, w) + } + f.Clear() + if g, w := f.Value(), 0.0; g != w { + t.Errorf("Float post-clear = %v; want %v", g, w) + } + f.CopyFrom(&f2) + if g, w := f.Value(), 2.0; g != w { + t.Errorf("Float post-CopyFrom = %v; want %v", g, w) + } +} + +type mockClock struct { + time time.Time +} + +func (m *mockClock) Time() time.Time { return m.time } +func (m *mockClock) Set(t time.Time) { m.time = t } + +const buckets = 6 + +var testResolutions = []time.Duration{ + 10 * time.Second, // level holds one minute of observations + 100 * time.Second, // level holds ten minutes of observations + 10 * time.Minute, // level holds one hour of observations +} + +// TestTimeSeries uses a small number of buckets to force a higher +// error rate on approximations from the timeseries. +type TestTimeSeries struct { + timeSeries +} + +func TestExpectedErrorRate(t *testing.T) { + ts := new(TestTimeSeries) + fake := new(mockClock) + fake.Set(time.Now()) + ts.timeSeries.init(testResolutions, NewFloat, buckets, fake) + for i := 1; i <= 61*61; i++ { + fake.Set(fake.Time().Add(1 * time.Second)) + ob := Float(1) + ts.AddWithTime(&ob, fake.Time()) + + // The results should be accurate within one missing bucket (1/6) of the observations recorded. + checkNear(t, ts.Latest(0, buckets), min(float64(i), 60), 10) + checkNear(t, ts.Latest(1, buckets), min(float64(i), 600), 100) + checkNear(t, ts.Latest(2, buckets), min(float64(i), 3600), 600) + } +} + +func min(a, b float64) float64 { + if a < b { + return a + } + return b +} === modified file 'src/golang.org/x/net/ipv4/doc.go' --- src/golang.org/x/net/ipv4/doc.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/ipv4/doc.go 2016-03-22 15:18:22 +0000 @@ -114,7 +114,7 @@ // // error handling // } // if cm.Dst.IsMulticast() { -// if cm.Dst.Equal(group) +// if cm.Dst.Equal(group) { // // joined group, do something // } else { // // unknown group, discard === modified file 'src/golang.org/x/net/ipv4/example_test.go' --- src/golang.org/x/net/ipv4/example_test.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/ipv4/example_test.go 2016-03-22 15:18:22 +0000 @@ -13,7 +13,6 @@ "time" "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" "golang.org/x/net/ipv4" ) @@ -32,7 +31,7 @@ go func(c net.Conn) { defer c.Close() p := ipv4.NewConn(c) - if err := p.SetTOS(iana.DiffServAF11); err != nil { + if err := p.SetTOS(0x28); err != nil { // DSCP AF11 log.Fatal(err) } if err := p.SetTTL(128); err != nil { @@ -102,7 +101,7 @@ log.Fatal("no A record found") } - c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolICMP), "0.0.0.0") // ICMP for IPv4 + c, err := net.ListenPacket("ip4:1", "0.0.0.0") // ICMP for IPv4 if err != nil { log.Fatal(err) } @@ -149,7 +148,7 @@ } log.Fatal(err) } - rm, err := icmp.ParseMessage(iana.ProtocolICMP, rb[:n]) + rm, err := icmp.ParseMessage(1, rb[:n]) if err != nil { log.Fatal(err) } @@ -173,7 +172,7 @@ } func ExampleRawConn_advertisingOSPFHello() { - c, err := net.ListenPacket(fmt.Sprintf("ip4:%d", iana.ProtocolOSPFIGP), "0.0.0.0") // OSPF for IPv4 + c, err := net.ListenPacket("ip4:89", "0.0.0.0") // OSPF for IPv4 if err != nil { log.Fatal(err) } @@ -201,10 +200,10 @@ iph := &ipv4.Header{ Version: ipv4.Version, Len: ipv4.HeaderLen, - TOS: iana.DiffServCS6, + TOS: 0xc0, // DSCP CS6 TotalLen: ipv4.HeaderLen + len(ospf), TTL: 1, - Protocol: iana.ProtocolOSPFIGP, + Protocol: 89, Dst: allSPFRouters.IP.To4(), } === modified file 'src/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go' --- src/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go 2016-03-22 15:18:22 +0000 @@ -14,13 +14,28 @@ "golang.org/x/net/internal/iana" ) +var freebsd32o64 bool + func setsockoptGroupReq(fd, name int, ifi *net.Interface, grp net.IP) error { var gr sysGroupReq if ifi != nil { gr.Interface = uint32(ifi.Index) } gr.setGroup(grp) - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&gr), sysSizeofGroupReq)) + var p unsafe.Pointer + var l sysSockoptLen + if freebsd32o64 { + var d [sysSizeofGroupReq + 4]byte + s := (*[sysSizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + p = unsafe.Pointer(&d[0]) + l = sysSizeofGroupReq + 4 + } else { + p = unsafe.Pointer(&gr) + l = sysSizeofGroupReq + } + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, p, l)) } func setsockoptGroupSourceReq(fd, name int, ifi *net.Interface, grp, src net.IP) error { @@ -29,5 +44,18 @@ gsr.Interface = uint32(ifi.Index) } gsr.setSourceGroup(grp, src) - return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, unsafe.Pointer(&gsr), sysSizeofGroupSourceReq)) + var p unsafe.Pointer + var l sysSockoptLen + if freebsd32o64 { + var d [sysSizeofGroupSourceReq + 4]byte + s := (*[sysSizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + p = unsafe.Pointer(&d[0]) + l = sysSizeofGroupSourceReq + 4 + } else { + p = unsafe.Pointer(&gsr) + l = sysSizeofGroupSourceReq + } + return os.NewSyscallError("setsockopt", setsockopt(fd, iana.ProtocolIP, name, p, l)) } === modified file 'src/golang.org/x/net/ipv4/sys_freebsd.go' --- src/golang.org/x/net/ipv4/sys_freebsd.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/ipv4/sys_freebsd.go 2016-03-22 15:18:22 +0000 @@ -6,6 +6,8 @@ import ( "net" + "runtime" + "strings" "syscall" "unsafe" ) @@ -43,6 +45,15 @@ if freebsdVersion >= 1000000 { sockOpts[ssoMulticastInterface].typ = ssoTypeIPMreqn } + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + freebsd32o64 = true + break + } + } + } } func (gr *sysGroupReq) setGroup(grp net.IP) { === modified file 'src/golang.org/x/net/ipv4/zsys_freebsd_arm.go' --- src/golang.org/x/net/ipv4/zsys_freebsd_arm.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/ipv4/zsys_freebsd_arm.go 2016-03-22 15:18:22 +0000 @@ -44,8 +44,8 @@ sysSizeofIPMreq = 0x8 sysSizeofIPMreqn = 0xc sysSizeofIPMreqSource = 0xc - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 ) type sysSockaddrStorage struct { @@ -83,11 +83,13 @@ type sysGroupReq struct { Interface uint32 + Pad_cgo_0 [4]byte Group sysSockaddrStorage } type sysGroupSourceReq struct { Interface uint32 + Pad_cgo_0 [4]byte Group sysSockaddrStorage Source sysSockaddrStorage } === modified file 'src/golang.org/x/net/ipv6/doc.go' --- src/golang.org/x/net/ipv6/doc.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/ipv6/doc.go 2016-03-22 15:18:22 +0000 @@ -114,7 +114,7 @@ // // error handling // } // if rcm.Dst.IsMulticast() { -// if rcm.Dst.Equal(group) +// if rcm.Dst.Equal(group) { // // joined group, do something // } else { // // unknown group, discard === modified file 'src/golang.org/x/net/ipv6/example_test.go' --- src/golang.org/x/net/ipv6/example_test.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/ipv6/example_test.go 2016-03-22 15:18:22 +0000 @@ -12,7 +12,6 @@ "time" "golang.org/x/net/icmp" - "golang.org/x/net/internal/iana" "golang.org/x/net/ipv6" ) @@ -31,7 +30,7 @@ go func(c net.Conn) { defer c.Close() p := ipv6.NewConn(c) - if err := p.SetTrafficClass(iana.DiffServAF11); err != nil { + if err := p.SetTrafficClass(0x28); err != nil { // DSCP AF11 log.Fatal(err) } if err := p.SetHopLimit(128); err != nil { @@ -103,7 +102,7 @@ log.Fatal("no AAAA record found") } - c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolIPv6ICMP), "::") // ICMP for IPv6 + c, err := net.ListenPacket("ip6:58", "::") // ICMP for IPv6 if err != nil { log.Fatal(err) } @@ -156,7 +155,7 @@ } log.Fatal(err) } - rm, err := icmp.ParseMessage(iana.ProtocolIPv6ICMP, rb[:n]) + rm, err := icmp.ParseMessage(58, rb[:n]) if err != nil { log.Fatal(err) } @@ -178,7 +177,7 @@ } func ExamplePacketConn_advertisingOSPFHello() { - c, err := net.ListenPacket(fmt.Sprintf("ip6:%d", iana.ProtocolOSPFIGP), "::") // OSPF for IPv6 + c, err := net.ListenPacket("ip6:89", "::") // OSPF for IPv6 if err != nil { log.Fatal(err) } @@ -205,7 +204,7 @@ } cm := ipv6.ControlMessage{ - TrafficClass: iana.DiffServCS6, + TrafficClass: 0xc0, // DSCP CS6 HopLimit: 1, IfIndex: en0.Index, } === modified file 'src/golang.org/x/net/ipv6/iana.go' --- src/golang.org/x/net/ipv6/iana.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/ipv6/iana.go 2016-03-22 15:18:22 +0000 @@ -3,7 +3,7 @@ package ipv6 -// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2014-09-22 +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07 const ( ICMPTypeDestinationUnreachable ICMPType = 1 // Destination Unreachable ICMPTypePacketTooBig ICMPType = 2 // Packet Too Big @@ -39,9 +39,10 @@ ICMPTypeILNPv6LocatorUpdate ICMPType = 156 // ILNPv6 Locator Update Message ICMPTypeDuplicateAddressRequest ICMPType = 157 // Duplicate Address Request ICMPTypeDuplicateAddressConfirmation ICMPType = 158 // Duplicate Address Confirmation + ICMPTypeMPLControl ICMPType = 159 // MPL Control Message ) -// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2014-09-22 +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07 var icmpTypes = map[ICMPType]string{ 1: "destination unreachable", 2: "packet too big", @@ -77,4 +78,5 @@ 156: "ilnpv6 locator update message", 157: "duplicate address request", 158: "duplicate address confirmation", + 159: "mpl control message", } === modified file 'src/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go' --- src/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go 2016-03-22 15:18:22 +0000 @@ -12,13 +12,28 @@ "unsafe" ) +var freebsd32o64 bool + func setsockoptGroupReq(fd int, opt *sockOpt, ifi *net.Interface, grp net.IP) error { var gr sysGroupReq if ifi != nil { gr.Interface = uint32(ifi.Index) } gr.setGroup(grp) - return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&gr), sysSizeofGroupReq)) + var p unsafe.Pointer + var l sysSockoptLen + if freebsd32o64 { + var d [sysSizeofGroupReq + 4]byte + s := (*[sysSizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + p = unsafe.Pointer(&d[0]) + l = sysSizeofGroupReq + 4 + } else { + p = unsafe.Pointer(&gr) + l = sysSizeofGroupReq + } + return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, p, l)) } func setsockoptGroupSourceReq(fd int, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error { @@ -27,5 +42,18 @@ gsr.Interface = uint32(ifi.Index) } gsr.setSourceGroup(grp, src) - return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, unsafe.Pointer(&gsr), sysSizeofGroupSourceReq)) + var p unsafe.Pointer + var l sysSockoptLen + if freebsd32o64 { + var d [sysSizeofGroupSourceReq + 4]byte + s := (*[sysSizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + p = unsafe.Pointer(&d[0]) + l = sysSizeofGroupSourceReq + 4 + } else { + p = unsafe.Pointer(&gsr) + l = sysSizeofGroupSourceReq + } + return os.NewSyscallError("setsockopt", setsockopt(fd, opt.level, opt.name, p, l)) } === modified file 'src/golang.org/x/net/ipv6/sys_freebsd.go' --- src/golang.org/x/net/ipv6/sys_freebsd.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/ipv6/sys_freebsd.go 2016-03-22 15:18:22 +0000 @@ -6,6 +6,8 @@ import ( "net" + "runtime" + "strings" "syscall" "unsafe" @@ -45,6 +47,18 @@ } ) +func init() { + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + freebsd32o64 = true + break + } + } + } +} + func (sa *sysSockaddrInet6) setSockaddr(ip net.IP, i int) { sa.Len = sysSizeofSockaddrInet6 sa.Family = syscall.AF_INET6 === modified file 'src/golang.org/x/net/ipv6/zsys_freebsd_arm.go' --- src/golang.org/x/net/ipv6/zsys_freebsd_arm.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/ipv6/zsys_freebsd_arm.go 2016-03-22 15:18:22 +0000 @@ -68,8 +68,8 @@ sysSizeofIPv6Mtuinfo = 0x20 sysSizeofIPv6Mreq = 0x14 - sysSizeofGroupReq = 0x84 - sysSizeofGroupSourceReq = 0x104 + sysSizeofGroupReq = 0x88 + sysSizeofGroupSourceReq = 0x108 sysSizeofICMPv6Filter = 0x20 ) @@ -108,11 +108,13 @@ type sysGroupReq struct { Interface uint32 + Pad_cgo_0 [4]byte Group sysSockaddrStorage } type sysGroupSourceReq struct { Interface uint32 + Pad_cgo_0 [4]byte Group sysSockaddrStorage Source sysSockaddrStorage } === modified file 'src/golang.org/x/net/netutil/listen_test.go' --- src/golang.org/x/net/netutil/listen_test.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/netutil/listen_test.go 2016-03-22 15:18:22 +0000 @@ -20,17 +20,20 @@ "sync/atomic" "testing" "time" + + "golang.org/x/net/internal/nettest" ) func TestLimitListener(t *testing.T) { - const ( - max = 5 - num = 200 - ) + const max = 5 + attempts := (nettest.MaxOpenFiles() - max) / 2 + if attempts > 256 { // maximum length of accept queue is 128 by default + attempts = 256 + } l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { - t.Fatalf("Listen: %v", err) + t.Fatal(err) } defer l.Close() l = LimitListener(l, max) @@ -47,14 +50,14 @@ var wg sync.WaitGroup var failed int32 - for i := 0; i < num; i++ { + for i := 0; i < attempts; i++ { wg.Add(1) go func() { defer wg.Done() c := http.Client{Timeout: 3 * time.Second} r, err := c.Get("http://" + l.Addr().String()) if err != nil { - t.Logf("Get: %v", err) + t.Log(err) atomic.AddInt32(&failed, 1) return } @@ -66,8 +69,8 @@ // We expect some Gets to fail as the kernel's accept queue is filled, // but most should succeed. - if failed >= num/2 { - t.Errorf("too many Gets failed: %v", failed) + if int(failed) >= attempts/2 { + t.Errorf("%d requests failed within %d attempts", failed, attempts) } } === modified file 'src/golang.org/x/net/publicsuffix/gen.go' --- src/golang.org/x/net/publicsuffix/gen.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/publicsuffix/gen.go 2016-03-22 15:18:22 +0000 @@ -13,10 +13,10 @@ // go run gen.go -version "xxx" -test >table_test.go // // The version is derived from information found at -// https://hg.mozilla.org/mozilla-central/log/tip/netwerk/dns/effective_tld_names.dat +// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat // -// To fetch a particular hg revision, such as 05b11a8d1ace, pass -// -url "https://hg.mozilla.org/mozilla-central/raw-file/05b11a8d1ace/netwerk/dns/effective_tld_names.dat" +// To fetch a particular git revision, such as 5c70ccd250, pass +// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat" import ( "bufio" === modified file 'src/golang.org/x/net/publicsuffix/list_test.go' --- src/golang.org/x/net/publicsuffix/list_test.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/publicsuffix/list_test.go 2016-03-22 15:18:22 +0000 @@ -329,7 +329,7 @@ } // eTLDPlusOneTestCases come from -// http://mxr.mozilla.org/mozilla-central/source/netwerk/test/unit/data/test_psl.txt +// https://github.com/publicsuffix/list/blob/master/tests/test_psl.txt var eTLDPlusOneTestCases = []struct { domain, want string }{ @@ -356,10 +356,10 @@ {"a.b.example.uk.com", "example.uk.com"}, {"test.ac", "test.ac"}, // TLD with only 1 (wildcard) rule. - {"cy", ""}, - {"c.cy", ""}, - {"b.c.cy", "b.c.cy"}, - {"a.b.c.cy", "b.c.cy"}, + {"il", ""}, + {"c.il", ""}, + {"b.c.il", "b.c.il"}, + {"a.b.c.il", "b.c.il"}, // More complex TLD. {"jp", ""}, {"test.jp", "test.jp"}, === modified file 'src/golang.org/x/net/publicsuffix/table.go' --- src/golang.org/x/net/publicsuffix/table.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/publicsuffix/table.go 2016-03-22 15:18:22 +0000 @@ -2,7 +2,7 @@ package publicsuffix -const version = "publicsuffix.org's effective_tld_names.dat, hg revision 063babcbcbcc (2014-12-29)" +const version = "publicsuffix.org's public_suffix_list.dat, git revision 0de6f8e (2015-07-15)" const ( nodesBitsChildren = 9 @@ -23,400 +23,417 @@ ) // numTLD is the number of top level domains. -const numTLD = 1039 +const numTLD = 1337 // Text is the combined text of all labels. -const text = "bielawashingtondclkasukabeerhcloudcontrolappalacebinorilskasumig" + - "aurawa-mazowszexhibitionavigationavuotnakatsugawabiellaakesvuemi" + - "eleccebizenakaniikawatanagurabieszczadygeyachiyodatsunanjoetsuru" + - "taharabievatmpalanakayamatsuurabifukagawassamukawataricohdavvenj" + - "argamvikasuyameiwamarylhurstjordalshalsenavyatkanazawabihorology" + - "ukuhashimoichinosekigaharabikedavvesiidazaifuchukotkakegawatch-a" + - "nd-clockaszubyurihonjournalistjohnayoroceanographiquehimejinfini" + - "tires3-website-us-east-1bilbaogakijoburgliwicembroideryusuharabi" + - "llustrationfshostre-totenkawabiomutashinainfoggiabirdartdecoalop" + - "padovald-aostathelleitungsenhs3-website-us-gov-west-1birkenesodd" + - "tangenovarabirthplacemergencyberlevagangaviikarugausdalorenskogl" + - "obalatinabeauxartsandcrafts3-website-us-west-1bjarkoyusuisserveb" + - "bs3-website-us-west-2bjerkreimperiabjugniizabloombergbauernikkoe" + - "benhavnikolaeventsakuhokkaidonnakamagayahabaghdadyndns-homeftpac" + - "cessakuragawabluenoharabmsakurainsureiseninohelsinkitakatakamori" + - "okamchatkameokameyamashinatsukigatakanabedzin-addrammenuernbergl" + - "obodoes-itatarstaninomiyakonojosoyrovnoslombardyndns-at-homednsa" + - "kyotanabellevuedatinglogowegrowestfalenirasakinuyamanouchikuhoku" + - "ryugasakitaurayasudabmwfarmequipmentateyamabnpparibaselburgloppe" + - "nzaokinawashirosatobishimagnitkafjordyndns-ip6bolzanordreisa-gee" + - "katowicemrbomloabathsbcargodoesntexistanbullensakerbondyndns-mai" + - "losangelesalangenishiazainvestmentsalatrobellunordre-landyndns-o" + - "ffice-on-the-webcambridgestonewportlligatewayuulminamiechizenish" + - "igovtatsunostrodabonnishiharabostonakijinsekikogenishiizunazukis" + - "-a-candidatepilepsykkylvenetogoldpointelligencepsongdalenishikat" + - "akatoris-a-catererbotanicalgardenishikatsuragivingmailoteneis-a-" + - "celticsfanishikawazukaneyamaxunjargabotanicgardenishimerabotanyc" + - "arrierboutiquebecartierbozentsujiieu-central-1bradescorporationi" + - "shinomiyashironostrolekaniepceverbankatsushikabeiarnrtattoolszty" + - "nsettlersalondonetskatsuyamasfjordenishinoomotegostrowiecartoona" + - "rteducationalchikugojomediabrandywinevalleyuzawabrasiljan-mayeni" + - "shinoshimatsuzakis-a-chefarmerseinewyorkshireggiocalabriabremang" + - "erbresciabrindisiciliabristolgaulardalottevje-og-hornnesaltdalot" + - "tokigawabritishcolumbialowiezachpomorskienishiokoppegardyndns-pi" + - "csalvadordalikes-piedmontblancashirepair-traffic-controlleyuzhno" + - "-sakhalinskaufenishitosashimizunaminamiawajikis-a-conservativefs" + - "nillfjordyndns-remotegildeskalmykiabroadcastleasinglesalzburgmin" + - "akamichigangwonishiwakis-a-cpaderbornissandvikcoromantovaldaosta" + - "tionissedalouvrepbodyndns-at-workinggroupowiataxindianmarketingm" + - "odalenisshinguernseybroke-itgorybrokerbronnoysundyndns-serverban" + - "iabrumunddalowiczest-a-la-masioniyodogawabrunelblagdenesnaaseral" + - "ingenkainanaejrietiendaburyatiabrusselsamegawabruxellesjaguarchi" + - "tecturecipesaro-urbino-pesarourbinopesaromaintenancexpressexyzgo" + - "rabryanskleppaleostrowwlkpalermomasvuotnakatombetsupportjeldsund" + - "yndns-webhoppdalucaniabryneuesamnangerbuyshousesamsungmxboxeroxj" + - "aworznobuzenrwhalingqldyndns-wikirovogradoybuzzgorzeleccollectio" + - "nbwhoswhokksundyndns-workshopalmspringsakerbzhitomirkutskodjeffe" + - "rsoncloudcontrolledekaluganskypescaravantaacloudfrontariocntoyoo" + - "karasjohkaminokawanishiaizubangecollegersundcolognewhampshireggi" + - "o-calabriacolonialwilliamsburgrossetouchijiwadell-ogliastrakhana" + - "migawacoloradoplateaudnedalncolumbusantiquesantabarbaracommunity" + - "chyllestadcomobaracompanynysabaerobaticasertaipeigersundyroyrvik" + - "nakamurataitogitsuliernewspapercompute-1computerhistoryofscience" + - "-fictioncondoshichikashukujitawaraconferenceconstructionconsulad" + - "oharuovattorneyagawalesundconsultanthropologyconsultingvolluroyc" + - "ontemporaryartgalleryazanconagawakkanaibetsubamericanartanddesig" + - "nieznodawarahkkeravjudygarlandcontractorskenconventureshinodesas" + - "hibetsuikinkobayashikaoizumizakiracookingroundhandlingroznycoolb" + - "ia-tempio-olbiatempioolbialystokkecoopocznorfolkebiblebtimnetzgr" + - "adcopenhagencyclopedicasinore-og-uvdaluccapetowncorsicadaquesant" + - "acruziparachutingrparaglidingruecorvettemasekhabarovskhakassiaco" + - "senzagannakadomari-elasticbeanstalkharkivalleaostavropolicecostu" + - "medio-campidano-mediocampidanomediocouncilustercoursesantafedera" + - "tioncqponcranbrookuwanalyticsanukis-a-designercreditcardcremonas" + - "horokanaiecrewindmillutskharkovalled-aostakazakis-a-doctoraycric" + - "ketrzyncrimearthruhereggio-emilia-romagnagatorokunohealthcareers" + - "aotomelbournecrotonewjerseycrowncrsapodhaleluxembourguidelloglia" + - "stradercruisesapporocuisinellahppiacenzakopaneraircraftoyosatosh" + - "imaculturalcentertainmentoyotaris-a-financialadvisor-aurdaluxury" + - "cuneocupcakecxn--0trq7p7nnferraraferreroticahcesuoloanswedenfets" + - "undfguitarsaratovalledaostaketomisatomaritimekeepingujolsterfhva" + - "lerfiguerestaurantoyotomiyazakis-a-geekhersonfilateliafilminamif" + - "uranofinaluzernfinancefineartsardegnaklodzkodairafinlandfinnoyfi" + - "rebaseappspotenzamamicrolightingulenfirenzefirestonexusdecorativ" + - "eartsardiniafirmdaleclercateringebuildersvpanamafishingolfarmste" + - "adfitjarfitnessettlementoyotsukaidovre-eikerfjalerdalvivano-fran" + - "kivskhmelnitskiyamashikeflesbergunmarcheapartmentsarlflightsarps" + - "borganicfdflogisticsarufutsunomiyawakasaikaitakoelnfloraflorence" + - "floridafloristanohataiwanairguardfloromskoguchikuzenflowersasaya" + - "maflsmidthachinohekinannestadflynnhubalsanagochihayaakasakawagoe" + - "164fndfolldalfootballooninguovdageaidnulsanfranciscotlandfor-bet" + - "ter-thanawafor-ourfor-somedizinhistorischesaseboknowsitallfor-th" + - "edmarketsaskatchewanggouvicenzaforexeterforgotdnsassaris-a-green" + - "forli-cesena-forlicesenaforliguriaforsalegalsaceforsandasuolocal" + - "historyfortmissoulan-udefensejnyfortworthachiojiyaitakaharulvikh" + - "melnytskyivallee-aosteroyforuminamiiserniafosnesatxn--1ctwolomin" + - "amatakinouefotherokuapparisor-fronfredrikstadtoyourafreiburgushi" + - "kamifuranoshiroomurafreightoystre-slidrettozawafribourgwangjuifm" + - "inamiizukamitondabayashiogamagoriziafriuli-v-giuliafriuli-ve-giu" + - "liafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriul" + - "i-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezi" + - "a-giuliafriuliveneziagiuliafriulivgiuliafrlfrogansaudafrognfrola" + - "ndfrom-akunemurorankoshigayabukidsauheradfrom-alfrom-arfrom-azjc" + - "bnlfrom-camerakershuscountryestateshinanomachippubetsubetsugaruh" + - "rfrom-coldwarszawafrom-ctozsdevenesavannahgafrom-dchernigovernme" + - "ntjomelhusgardenfrom-degreefrom-flandersaves-the-whalessandria-t" + - "rani-barletta-andriatranibarlettaandriafrom-gafrom-higashiagatsu" + - "magois-a-gurunsaitokuyamafrom-iafrom-idfrom-ilfrom-in-the-bandai" + - "wafunefrom-ksavonamsosnowiechernihivguccircleborkautokeinofrom-k" + - "yotobetsumidatlantichernivtsicilyngenfrom-langevagrarboretumbria" + - "from-mangonohejis-a-hard-workerfrom-mdfrom-meeresaxofrom-microso" + - "ftwaremarkerfrom-mnfrom-mochizukiryuohkurafrom-msayokkaichirurgi" + - "ens-dentistesbschoenbrunnfrom-mtnfrom-nchernovtsydneyfrom-ndfrom" + - "-nefrom-nhkhvalleeaosteigenfrom-njeonnamerikawauefrom-nminamimak" + - "is-a-hunterfrom-nvanylvenicefrom-nyfrom-ohtawaramotoineppugliafr" + - "om-oketogurafrom-orfrom-pacifichiryukyuragifudaigokaseekazimierz" + - "-dolnyfrom-praxis-a-anarchistoireggioemiliaromagnakanojohanamaki" + - "noharafrom-rittohmaniwakuratenris-a-knightrainingxn--1lqs03nfrom" + - "-schmidtre-gauldalfrom-sdfrom-tnfrom-txn--1lqs71dfrom-utazunzenf" + - "rom-vadsogndalfrom-vtranapleschokoladenfrom-wafrom-wielunnerfrom" + - "-wvaomoriguchiharamcoacharterfrom-wyfrosinonefrostalowa-wolawafr" + - "oyahikobearalvahkikuchikumagayagawallonieruchomoscienceandindust" + - "rynfstcgrouparliamentranbyfujiiderafujikawaguchikonefujiminohach" + - "irogatakahashimamakitahatakahatakaishimogosenfujinomiyadafujioka" + - "yamannosegawafujisatoshonairlinebraskaunbieidsvollfujisawafujish" + - "iroishidakabiratorideliverybnikahokutogakushimotoganewmexicoffee" + - "dbacklabusinessebydgoszczecincinnativeamericanantiquescholarship" + - "schoolkuszlgzminamiminowafujiyoshidafukayabeardudinkakamigaharav" + - "ennagasakikugawalterfukuchiyamadafukudominichitachinakagawawildl" + - "ifestylefukuis-a-landscaperugiafukumitsukefukuokazakisarazure-mo" + - "bilegnicampobassociateschulevangerfukuroishigakishiwadafukusakis" + - "ofukushimansionschwarzparmafukuyamagatajimidoris-a-lawyerfunabas" + - "hiriuchinadafunagatajiris-a-liberalfunahashikamiamakusatsumasend" + - "aisennangooglecodespotrani-andria-barletta-trani-andriafundaciof" + - "uoiskujukuriyamanxn--1qqw23afuosskoczowindowschweizwithgoogleapi" + - "sa-hockeynutraniandriabarlettatraniandriafurniturehabikinokawair" + - "portland-4-salernogatagajobsciencecentersciencehistoryfurubiraqu" + - "arelleangaviikagaminogiessenvironmentalconservationfurudonostiaf" + - "urukawaharafusognefussafetysfjordfutabayamaguchinomigawafutboldl" + - "ygoingnowhere-for-moregontrailroadfuttsurugashimaoris-a-libertar" + - "ianfvgfylkesbiblackfridayfyresdalhannanmokuizumodenakanotoddenha" + - "nnovhadanotairescientistor-elvdalhanyuzenhapmirumarnardalhappous" + - "livinghistoryhareidsbergenharstadharvestcelebrationhasamarahasam" + - "inami-alpsienakasatsunairteledatabaseballangenoamishirasatobamag" + - "azinedre-eikerhasudahasvikmscrapper-sitehatogayaizuwakamatsubush" + - "ikusakadogawahatoyamazakitakamiizumisanofieldhatsukaichiharahatt" + - "fjelldalhawaiijimarriottranoyhayashimamotobunkyonanaoshimabariak" + - "ehazuminobuseljordhemnescrappinghemsedalherokusslattuminamisanri" + - "kubetsupplyheroyhigashichichibusheyhigashihiroshimanehigashiizum" + - "ozakitakyushuaiahigashikagawahigashikagurasoedahigashikawakitaai" + - "kitamidsundhigashikurumeetnedalhigashimatsushimarugame-hostinghi" + - "gashimatsuyamakitaakitadaitoigawahigashimurayamalatvuopmifunehig" + - "ashinarusells-for-lesserveftparservegame-servercellikescandynath" + - "omebuiltransportrapaniiminamiashigarahigashinehigashiomihachiman" + - "agementravellinohigashiosakasayamamotorcycleservicesettsurgeonsh" + - "alloffameldalhigashishirakawamatakanezawahigashisumiyoshikawamin" + - "amiaikitamotosumitakaginowanidhigashitsunotteroyhigashiurausukit" + - "anakagusukumodernhigashiyamatokoriyamanakakogawahigashiyodogawah" + - "igashiyoshinogaris-a-musicianhiraizumisatohnoshoooshikamaishimod" + - "atextileirvikokonoehirakatashinagawahiranairtraffichitosetoeihei" + - "jis-a-cubicle-slaveroykenhirarahiratsukagawahirayakagehistoricho" + - "usesevastopolewismillerhitachiomiyaginozawaonsenhitachiotagopart" + - "is-a-nascarfanhitoyoshimihamadahitradinghjartdalhjelmelandholeck" + - "obierzyceholidayhomelinuxn--2m4a15ehomessinashikitashiobarahomeu" + - "nixn--30rr7yhondahonefossewloclawekolobrzegyptianpachigasakieven" + - "assisibenikihokumakogeniwaizumiotsukumiyamazonawshakotankomagane" + - "hongorgets-itrdhonjyoichiropractichloehornindalhorsells-for-ustk" + - "arasuyamazoehortendofinternetreehotelefonicapebretonamiastarostw" + - "odzislawritesthisblogspotrentino-a-adigehotmailhoyangerhoylandet" + - "roitrentino-aadigehurdalhurumajis-a-nursells-itrentino-alto-adig" + - "ehyogoris-a-painteractivegarsheis-a-patsfanhyugawaraiwchocolatel" + - "evisionjewishartrentino-sudtiroljfkomitamamurajgorajlchofunatori" + - "kuzentakatairajoyoitakaokamikitayamatotakadajpnjprshimonosekikaw" + - "ajurkozagawakozakis-a-teacherkassymantechnologykragerotikamisuna" + - "gawakrakowroclawtcmwtfarsundkrasnoyarskomonokredstonekristiansan" + - "defjordkristiansundkrodsheradkrokstadelval-daostavalleykryminami" + - "tanekumatorinokumejimasudakumenanyokaichibaikaliszczytnordkappgk" + - "unisakis-a-techietis-a-photographermesaverdelmenhorstalbansharis" + - "-a-personaltrainerkunitachiaraisaijoshkar-olanshimotsumakunitomi" + - "gusukukis-a-therapistoiakunneppulawykunstsammlungkunstunddesignk" + - "ureitrentino-sued-tirolkurgankurobelaudiokurogimimatakasagotemba" + - "ixadakuroisohuissier-justicekuromatsunaishobaraholtalengerdalask" + - "anittedalvdalkurotakikawasakis-an-accountantsharpartnershellasko" + - "yabenorddalkurskomorotsukamishihoronobeokamiminershimosuwalkis-a" + - "-socialistmeincheonkushirogawakustanais-an-actorkusupplieshinich" + - "inankutchannelkutnokuzbassnoasagamiharakuzumakis-an-actressherbr" + - "ookegawakvafjordkvalsundkvamlidlugolekadenagahamaroygardenglandk" + - "vanangenkvinesdalkvinnheradkviteseidskogkvitsoykwwwkyowariasahik" + - "awamishimatsumaebashikshacknetrentino-suedtirolmissileksvikongsb" + - "ergmisugitokonamegatakashimatsumotofukemitakeharamitourismolensk" + - "ongsvingermitoyoakemiuramiyazurewebsiteshikagamiishikarikaturind" + - "almiyotamanomjondalenmonmouthagamonticellombardiamondshinjukuman" + - "omontrealestateofdelawarendalenvikingatlantakasugais-an-entertai" + - "nermonza-brianzaporizhzhiamonza-e-della-brianzaramonzabrianzamon" + - "zaebrianzamonzaedellabrianzamordoviajessheiminamiuonumateramolis" + - "ellsyourhomeipartshinjournalismolapyatigorskomvuxn--32vp30haebar" + - "uminamiogunionmoriyamatsunomoriyoshiokamitsuemormoneymoroyamatsu" + - "sakahogithubusercontentrentinoa-adigemortgagemoscowmoseushistory" + - "mosjoenmoskeneshinkamigotoyohashimotokyotangomosshinshinotsurger" + - "ymosvikoninjamisonmovistargardmtpchonanbuildingretajimakanegasak" + - "itagawamuenstermugivestbyklebesbyglandmuikamogawamukochikushinon" + - "senergymulhousembokumamotoyamassa-carrara-massacarraramassabuske" + - "rudineustarnbergmunakatanemuncieszynmuosattemurmanskonskowolaqui" + - "larvikommunalforbundmurotorcraftrentinoaadigemusashimurayamatsus" + - "higemusashinoharamuseetrentinoalto-adigemuseumverenigingmutsuzaw" + - "amyphotoshibajddarchaeologyeongnamegawakuyachimatainaikawababia-" + - "goracleaningmytis-a-bookkeeperminamiyamashirokawanabelgorodeopas" + - "senger-associationpaviapharmacienshinshiropharmacymrussiaphilade" + - "lphiaareadmyblogsitephilatelyphilipshintokushimaphoenixn--3bst00" + - "minanophotographysiopiagetmyipartysvardoomdnsaliascolipicenord-f" + - "ronpictetrentinoaltoadigepictureshintomikasaharapiemontepilotshi" + - "nyoshitomiokanmakitchenpippuwajimapiszpittsburghofashionpizzapko" + - "nyvelodingenplanetariumincommbankonantanangerplantationplantshio" + - "jirishirifujiedaplazaplchoseikakudamatsueplomzaporizhzhelpasaden" + - "akhodkanagawaplumbingopmnpodlasiellakasamatsudoosandnessjoenpodz" + - "onepohlpokerpolkowicepoltavalle-aostatoilpomorzeszowpordenonepor" + - "nporsangerporsangugeporsgrunnanposts-and-telecommunicationshioya" + - "nagawapoznanprdpreservationpresidioprincipeprivneprochowiceprodu" + - "ctionshirahamatonbetsurgutsiracusaikis-bytomakomaibaraprofastlyp" + - "rojectrentinos-tirolpromombetsurfauskedsmokorsetagayaseljejuegos" + - "hikiminokamoenairforcertificationpropertieshirakoenigpropertyume" + - "npruszkowprzeworskogptzpvtrentinostirolpzqsldsolundsolutionshira" + - "ois-certifieducatorahimeshimageandsoundandvisionsomasomnamsskoga" + - "neis-into-carshimokawasoosopotrentinosudtirolsor-odalsor-varange" + - "rsorfoldsorreisahayakawakamiichikaiseiyokoshibahikariwanumatakay" + - "amasortlandsorumisakis-an-artisteinkjerusalembetsukuis-a-republi" + - "cancerresearchaeologicaliforniasouthcarolinazawasouthwesterniika" + - "ppusowaspace-to-rentalshiraokanonjis-foundationspbaltimore-og-ro" + - "msdalimoldepotaruibmdnepropetrovskaruizawaustrheimatunduhrenneso" + - "yokote-burggfamilyokoze12spiegelspjelkavikoryolasitespreadbettin" + - "gspydebergsrvareseminestordalstorenburgstorfjordstpetersburgstud" + - "yndns-blogdnshiratakahagis-gonestuff-4-salezajskosaigawastuttgar" + - "trentinosued-tirolsurreysusakis-into-cartoonshimokitayamasusonos" + - "uzakanumazurysuzukanzakiwakunigamihoboleslawiechoshibukawasuzuki" + - "s-into-gameshimonitayanagis-a-rockstarachowicesvalbardurbannefra" + - "nkfurtrentinosuedtirolsveiosvelvikosakaerodromedecinemailsvizzer" + - "aswidnicapitalswiebodzindianapolis-a-bloggerswinoujscienceandhis" + - "toryswisshikis-leetrentino-altoadigesxn--3ds443gtuis-savedtulava" + - "gisketurystykarasjoksneshishikuis-into-animegurovigorlicetuscany" + - "tushumanitieshisokanoyakutiatuvalle-d-aostavangertverdalvaroyvba" + - "mbleasecn-north-1vchoyodobashibuyachtsangovdonskoshimizumakiyose" + - "mitevegaskvollvenneslaskerveronarashinoverranversailleshisuifuel" + - "veruminnesotaketakasakis-an-anarchistoricalsocietysneshimojis-a-" + - "playerversicherungvestfoldvestneshitaramavestre-slidreamhostersh" + - "izukuishimofusaintlouis-a-bruinsfanshiranukannamiharuvestre-tote" + - "nnishiawakuravestvagoyvevelstadvibo-valentiavibovalentiavideovil" + - "lasmatartcenterprisesakikonaioirasebastopologyeonggiehtavuoatnad" + - "exchangeiseiroumuenchendoftheinternetcimmobilienebakkeshibechamb" + - "agriculturennebudapest-a-la-maisondriodejaneirochestervinnicarbo" + - "nia-iglesias-carboniaiglesiascarboniavinnytsiavirginiavirtualvir" + - "tuelvistaprinternationalfirearmshizuokanraviterboltroandinosaure" + - "portrentottoris-lostfoldvladikavkazanvladimirvladivostokaizukara" + - "tevlogvoldavolgogradvolkenkunderseaportrogstadvologdanskoshunant" + - "okashikiyosumypetshimotsukevolyngdalvoronezhytomyrvossevangenvot" + - "evotingvotoyakokamisatohobby-sitevrnxn--4it168dxn--4it797kostrom" + - "ahabororoskoleirfjordxn--4pvxshowaxn--54b7fta0cchtraeumtgeradeat" + - "nurembergrimstadxn--55qw42gxn--55qx5dxn--5js045dxn--5rtp49chungb" + - "ukazoxn--5rtq34kosugexn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3" + - "xlxn--7t0a264chungnamdalseidfjordxn--80adxhkshriramsterdamberkel" + - "eyxn--80ao21axn--80asehdbarcelonagareyamalopolskanlandnipropetro" + - "vskarumaifareastcoastaldefencebetsukubahcavuotnagaivuotnagaokaky" + - "otambadajozorakkestadultargiheyakumoduminamidaitomandalindaskimi" + - "tsubatamiasakuchinotsuchiurakawalbrzychattanooganord-odalindesne" + - "s3-eu-west-1xn--80aswgxn--80augustowadaejeonbukotohiradomainsura" + - "ncexn--8ltr62kotouraxn--8pvr4uxn--90a3academykolaivanovosibirski" + - "ervaapsteiermarkhangelskiptveterinairealtorlandxn--90azhagebosta" + - "dxn--9dbhblg6diethnologyxn--9et52uxn--andy-iraxn--aroport-byanai" + - "zuxn--asky-iraxn--aurskog-hland-jnbarclaycards3-fips-us-gov-west" + - "-1xn--avery-yuasakakinokis-slickomakiyokawaraxn--b-5gaxn--b4w605" + - "ferdxn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuot" + - "na-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qaxn-" + - "-bjarky-fyandexposedogawarabikomaezakirunortonsbergxn--bjddar-pt" + - "akkofuefukihabmerxn--blt-elaborxn--bmlo-grajeworldxn--bod-2narit" + - "akurashikis-uberleetrentino-s-tirollagrigentomologyeongbukomatsu" + - "shimashikiyosatokamachildrensgardenxn--brnny-wuaccident-investig" + - "ationjukudoyamaceratabuseat-band-campaniamallamadridvagsoyerimo-" + - "i-ranaamesjevuemielnoboribetsuitachikawakayamagadancechirebungoo" + - "nomichinomiyakeisenbahnxn--brnnysund-m8accident-preventionlineat" + - "-urlxn--brum-voagatromsaitamatsukuris-not-certifiedunetworkanger" + - "xn--btsfjord-9zaxn--c1avgxn--c3s14misasaguris-an-engineeringerik" + - "exn--cg4bkis-very-badaddjamalborkdalxn--ciqpnxn--clchc0ea0b2g2a9" + - "gcdxn--comunicaes-v6a2oxn--correios-e-telecomunicaes-ghc29axn--c" + - "zr694barclays3-sa-east-1xn--czrs0tromsolarssonxn--czru2dxn--czrw" + - "28bargainstitutelekommunikationaturalhistorymuseumcentereviews3-" + - "us-gov-west-1xn--d1acj3barreauctionaturalsciencesnaturelles3-us-" + - "west-1xn--d1atrusteexn--d5qv7z876churcheltenham-radio-operaunite" + - "lemarkazunoxn--davvenjrga-y4axn--djrs72d6uyxn--djty4kouhokutamak" + - "izunokunimilitaryxn--dnna-grandrapidsigdalxn--drbak-wuaxn--dyry-" + - "iraxn--eckvdtc9dxn--efvn9simbirskooris-a-soxfanxn--efvy88haibara" + - "kitahiroshimaritimodellingxn--ehqz56nxn--elqq16hakatanotogawaxn-" + - "-eveni-0qa01gaxn--f6qx53axn--finny-yuaxn--fiq228c5hsimple-urlxn-" + - "-fiq64barrel-of-knowledgeometre-experts-comptablest-mon-blogueur" + - "ovisionaturbruksgymnaturhistorisches3-us-west-2xn--fiqs8sirdalxn" + - "--fiqz9slgbtrentinosud-tirolxn--fjord-lraxn--fjq720axn--fl-ziaxn" + - "--flor-jraxn--flw351exn--fpcrj9c3dxn--frde-granexn--frna-woarais" + - "-very-evillagexn--frya-hraxn--fzc2c9e2chuvashiaxn--gecrj9circusc" + - "ulturecreationxn--ggaviika-8ya47hakodatexasiaxn--gildeskl-g0axn-" + - "-givuotna-8yaotsurnadalxn--gjvik-wuaxn--gls-elacaixaxn--gmq050is" + - "-very-gooddaxn--gmqw5axn--h-2failxn--h1aeghakonexn--h2brj9citica" + - "sadelamonedaxn--hbmer-xqaxn--hcesuolo-7ya35barrell-of-knowledgeo" + - "rgiauthordalandroidiscountydalillesandiegotsukisosakitagatakaham" + - "arburgjemnes3-ap-southeast-1xn--hery-iraxn--hgebostad-g3axn--hmm" + - "rfeasta-s4acityeatsanjotoyonezawaxn--hnefoss-q1axn--hobl-iraxn--" + - "holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-5" + - "4axn--i1b6b1a6a2exn--imr513nxn--indery-fyaroslavlaanderenxn--io0" + - "a7is-very-nicexn--j1amhakubanxn--j6w193gxn--jlster-byasakaiminat" + - "oyokawaxn--jrpeland-54axn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcr" + - "x77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9" + - "axn--klty5xn--3e0b707exn--koluokta-7ya57hakuis-a-linux-useranish" + - "iaritabashijonawatexn--kprw13dxn--kpry57dxn--kput3is-very-sweetr" + - "entino-stirolxn--krager-gyasugis-with-thebandontexisteingeekomfo" + - "rbalsfjordivttasvuotnakaiwamizawaustraliaisondre-landebudejjuedi" + - "schesapeakebayerndirectoryggeelvinckarpaczeladz-2xn--kranghke-b0" + - "axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jetztrentino-" + - "sud-tirolxn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasuokaratsugin" + - "amikatagamilanoxn--kvnangen-k0axn--l-1fairwindslupskopervikommun" + - "exn--l1accentureklamurskjaknoluoktaikicks-assedicivilaviationxn-" + - "-laheadju-7yatominamibosojavaksdalxn--langevg-jxaxn--lcvr32dxn--" + - "ldingen-q1axn--leagaviika-52bashkiriautomotivelandiscoveryokamik" + - "awanehonbetsuruokamakurazakirkenes3-ap-southeast-2xn--lesund-hua" + - "xn--lgbbat1ad8jevnakerxn--lgrd-poacivilisationxn--lhppi-xqaxn--l" + - "inds-pratoyonakagyokutomskounosunndalxn--lns-qlavangenxn--loabt-" + - "0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liacivilizationxn--lten-g" + - "ranvindafjordxn--lury-iraxn--mely-iraxn--merker-kuaxn--mgb2ddesn" + - "zxn--mgb9awbfcastresistancexn--mgba3a3ejtrysilkoseis-a-studental" + - "xn--mgba3a4f16axn--mgba3a4franamizuholdingsmileikangerxn--mgbaam" + - "7a8hakusandoyxn--mgbab2bdxn--mgbayh7gpaduaxn--mgbb9fbpobanazawax" + +const text = "biomutashinainfoggiabirdartdecodynaliascoli-picenord-frontierbir" + + "kenesoddtangenovaravennagatorockartuzyunsakakinokiabirthplacevje" + + "-og-hornnesangostrodawarabjarkoyurihonjournalistjordalshalsenfsh" + + "ostre-totenkawabjerkreimmobilienhsanjotatsunostrolekaniepcexpose" + + "dogawarabikomaezakirunortonsbergminakamichigangwonikonantananger" + + "bjugninohelpaleostrowiecasertairabloombergbauernuorokunohealthca" + + "reerschmidtre-gauldalottokashikinuyamanouchikuhokuryugasakitaura" + + "yasudabluedatsunanjogaszkolahppiacenzachpomorskieninomiyakonojos" + + "oyrovnostrowwlkpmgmodalenirasakinvestmentsannanishiazais-a-candi" + + "datexasiabmsannohelsinkitakamiizumisanofieldyndns-freemasonryusu" + + "harabmweirbnpparibaselburgmxboxeroxjaworznobomloanswatch-and-clo" + + "ckerbondyndns-homednsanokasumigaurawa-mazowszexeterbonnishigotvs" + + "antabarbarabootsantacruzsantafedjelenia-goraboschlesischesanukis" + + "-a-catererbostikasuyakutiabostonakijinsekikogentingretajimakaneg" + + "asakitagawabotanicalgardenishiharabotanicgardenishiizunazukis-a-" + + "celticsfanishikatakayamatta-varjjatattoolsztynsettlersaotomelbou" + + "rnextraspace-to-rentalsapodhalebotanynysadoesntexistanbullensake" + + "rboutiquebecasinore-og-uvdalouvrepair-traffic-controlleyusuisser" + + "vegame-serverdalovenneslaskerrylogisticsapporobozentsujiiebrades" + + "corporationishikatsuragivingrimstadyndns-ip6brandywinevalleyuulm" + + "inamibosogndalowiczest-le-patrondheimperiabrasiljan-mayenishikaw" + + "azukaneyamaxunjargabresciabrindisiciliabristolgalsacebritishcolu" + + "mbialowiezagannakadomari-elasticbeanstalkaszubyuzawabroadcastleb" + + "timnetzgorabroadwayuzhno-sakhalinskatowicebroke-itaxihuanishimer" + + "abrokerrypropertiesaratovalleaostavropolicebronnoysundyndns-mail" + + "ubindalucaniabrothermesaverdealstahaugesundyndns-office-on-the-w" + + "ebcambridgestonewportlligatewaybrumunddaluccapetownishinomiyashi" + + "ronobrunelblagdenesnaaseralingenkainanaejrietiendaburyatiabrusse" + + "lsardegnamsosnowiecastresistancebruxellesardiniabryanskleppalerm" + + "omasvuotnakatsugawabryneuesarlucernebuyshousesarpsborgripebuzeni" + + "shinoomotegovtgorybuzzgorzeleccollegersundyndns-picsarufutsunomi" + + "yawakasaikaitakoelnishinoshimabwfarmequipmentjeldsundyndns-remot" + + "egildeskalmykiabzhitomirkutskodjeffersonishiokoppegardyndns-serv" + + "erbaniacntoyonezawacolognewjerseycolonialwilliamsburgrpanamacolo" + + "radoplateaudiocolumbusantiquest-a-la-masioncommunitydaluxembourg" + + "ruecomobaracompanycompute-1computerhistoryofscience-fictioncomse" + + "curitysfjordcondoshichikashukujitawaraconferenceconstructioncons" + + "uladollsatxn--0trq7p7nnconsultanthropologyconsultingvolluxurycon" + + "tactoyonocontemporaryartgallerybnikahokutogitsuldaluzerncontract" + + "orskenconventureshinodesashibetsuikindleikangercookingchannelver" + + "uminamiechizencoolbia-tempio-olbiatempioolbialystokkecoopocznori" + + "lskypescaravantaarparachutinguidellogliastradercopenhagencyclope" + + "dicdn77-sslattuminamidaitomangotsukisosakitagatakamoriokamikitay" + + "amatotakadacorsicamerakershus-east-1corvettelemarkazunocosenzako" + + "panerairguardcostumedio-campidano-mediocampidanomediocouncilviva" + + "no-frankivskchristiansburguitarsaudacouponsauheradcoursesavannah" + + "gacqhachinoheguris-a-democratoyookarasjohkamiokaminokawanishiaiz" + + "ubangecranbrookuwanalyticsaves-the-whalessandria-trani-barletta-" + + "andriatranibarlettaandriacreditcardcreditunioncremonashorokanaie" + + "crewiiheyaizuwakamatsubushikusakadogawacricketrzyncrimeacrotonew" + + "mexicoldwarmiamiastarnbergujolstercrowncrsavonamsskoganeis-a-des" + + "ignercruisesaxocuisinellancasterculturalcentertainmentoyosatomsk" + + "ddielddanuorrikuzentakatajirissafetysnesayokkaichirurgiens-denti" + + "stesbscholarshipschoolcuneocupcakecxn--11b4c3dcyouthdfcbankfhskh" + + "abarovskhakassiafinlandfinnoyfirebaseappspotenzamamicrolightingu" + + "nmaritimodellinguovdageaidnulsanfranciscotlandfirenzefirestonewy" + + "orkshireggio-calabriafirmdaleirfjordfishingoldpointelligencefitj" + + "arfitnessettlementoyotsukaidovre-eikerfjalerflesbergushikamifura" + + "noshiroomuraflickragerotikamchatkameokameyamashinatsukigatakahar" + + "ussiaflightschweizippodlasiellakasamatsudoosandnessjoenfloguchik" + + "uzenfloraflorencefloridafloristanohatakahashimamakirkenesciencec" + + "entersciencehistoryfloromskogxn--1ck2e1balestrandabergamoarekepn" + + "ordlandivtasvuodnakaiwamizawaugustowadaejeonbukarlsoyokote12flow" + + "erscientistor-elvdalflsmidthruhereggio-emilia-romagnakanotoddenf" + + "lynnhubalsanagochihayaakasakawagoebinagisoccertificationaturalsc" + + "iencesnaturelles3-sa-east-1fndfolldalfoodnetworkangerfor-better-" + + "thandafor-ourfor-somedizinhistorischescrapper-sitefor-theatreefo" + + "rexrothachiojiyahikobeatscrappingzjcbnlforgotdnservicesettsuppor" + + "toyouraforli-cesena-forlicesenaforliguriaforsaleirvikharkivalled" + + "-aostakinoueforsandasuolodingenfortmissoulan-udefenseljordfortwo" + + "rthachirogatakanabeauxartsandcraftsevastopoleforuminamiizukamito" + + "ndabayashiogamagoriziafosnesewildlifestylefotoystre-slidrettozaw" + + "afredrikstadtveronakamuratakahamaniwakuratelevisionfreiburgfreig" + + "htcmwilliamhillfribourgfriuli-v-giuliafriuli-ve-giuliafriuli-veg" + + "iuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriu" + + "liv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriul" + + "iveneziagiuliafriulivgiuliafrlfroganshakotankharkovalledaostakko" + + "fuelfrognfrolandfrom-akrehamnfrom-alfrom-arfrom-azlgfrom-capebre" + + "tonamiasakuchinotsuchiurakawassamukawataricohdavvesiidazaifudaig" + + "odoharuhrfrom-collectionfrom-ctozsdeltajimibungotakadavvenjargam" + + "vikhersonfrom-dcheltenham-radio-operaunitelefonicagliaridagawars" + + "zawashingtondclkatsushikabelaudiblebesbyglandyndns-weberlincolni" + + "shitosashimizunaminamiashigarafrom-degreefrom-flandersharis-a-ge" + + "ekhmelnitskiyamashikiyosatohoboleslawiechelyabinskydivingriwatar" + + "ailwayfrom-gausdalfrom-higashiagatsumagoizumizakirovogradoyfrom-" + + "iafrom-idfrom-ilfrom-in-the-bandaiwafunexusdecorativeartsharpara" + + "glidingfrom-kshawaiijimarylandfrom-kyknetnedalfrom-langevagrarbo" + + "retumbriafrom-mannosegawafrom-mdfrom-meereshellaspeziafrom-micro" + + "softbankhmelnytskyivallee-aosteroyfrom-mnfrom-mochizukiryuohkura" + + "from-msherbrookegawafrom-mtnfrom-nchernigovernmentjomeloyalistoc" + + "kholmestrandyndns-wikirafrom-ndfrom-nefrom-nhktranaritakurashiki" + + "s-a-greenfrom-njcparisor-fronfrom-nminamimakis-a-gurulvikhvallee" + + "aosteigenfrom-nvanylvenicefrom-nyfrom-ohtawaramotoineppugliafrom" + + "-oketogolfashionfrom-orfrom-paderbornfrom-praxis-a-anarchistoire" + + "ggiocalabriafrom-rittogurafrom-schokoladenfrom-sdnipropetrovskla" + + "businessebykleclerchernihivgucciprianiigataishinomakikugawatchan" + + "dclockatsuyamashikefrom-tnfrom-txn--1ctwolominamatamayukis-a-har" + + "d-workerfrom-utazuerichardlikes-piedmontblancashireggioemiliarom" + + "agnakasatsunairlinebraskaunbieidsvollfrom-vadsochildrensgardenfr" + + "om-vtranbyfrom-wafrom-wielunnerfrom-wvaofrom-wyfrosinonefrostalo" + + "wa-wolawafroyahabaghdadfstcgrouparliamentrani-andria-barletta-tr" + + "ani-andriafujiiderafujikawaguchikonefujiminohadanotaireshimojis-" + + "a-hunterfujinomiyadafujiokayamansionshimokawafujisatoshonairport" + + "land-4-salernogiessengerdalaskanittedallasalleaseeklogeshimokita" + + "yamafujisawafujishiroishidakabiratorideliveryggeelvinckmpspbalsf" + + "jordivttasvuotnakamagayachts3-us-gov-west-1fujiyoshidafukayabear" + + "dubaiduckdnsdojoetsurutaharaumakeupowiatmallorcafederationfukuch" + + "iyamadafukudominichernivtsicilyfukuis-a-knightraniandriabarletta" + + "traniandriafukumitsukefukuokazakisarazure-mobileitungsenfukurois" + + "higakishiwadafukusakisofukushimanxn--1lqs03nfukuyamagatakahataka" + + "ishimogosenfunabashiriuchinadafunagatakamatsukawafunahashikamiam" + + "akusatsumasendaisennangonohejis-a-landscaperugiafundaciofuoiskuj" + + "ukuriyamaoris-a-lawyerfuosskoczowinbaltimore-og-romsdalillesandi" + + "egotembaixadaukraanghkemerovodkagoshimagnitkakamigaharaholtaleni" + + "waizumiotsukumiyamazonawsaarlandgcampobassociates3-eu-west-1furn" + + "iturehabikinokawairtelecityeatshimonitayanagis-a-liberalfurubira" + + "quarelleasingleshimonosekikawafurudonostiafurukawaharafusodegaur" + + "afussagaeroclubmedecincinnativeamericanantiquesquarendalenugfuta" + + "bayamaguchinomigawafutboldlygoingnowhere-for-moregontrailroadfut" + + "tsurugashimarburgfvgfyis-a-libertarianfylkesbiblackfridayfyresda" + + "lhannanmokuizumodenakatombetsulikescandynathomebuiltranoyhannova" + + "reservebbshimotsukehanyuzenhapmirumarnardalhappounzenhareidsberg" + + "enharstadharvestcelebrationhasamarahasaminami-alpssells-for-ustk" + + "arasuyamazoehasudahasvikokonoehatogayahoooshikamaishimodatenris-" + + "a-nurseminehatoyamazakitahiroshimarriottransportrapaniimimatakas" + + "ugais-a-painteractivegaskimitsubatamicadaqueshimotsumahatsukaich" + + "iharahattfjelldalhayashimamotobunkyonanaoshimabariakehazuminobus" + + "ells-itravelchannelhembygdsforbundhemneshinichinanhemsedalheroku" + + "ssldheroyhgtvarggatravelersinsurancehigashichichibusheyhigashihi" + + "roshimanehigashiizumozakitakatakaokamikoaniikappulawyhigashikaga" + + "wahigashikagurasoedahigashikawakitaaikitakyushuaiahigashikurumee" + + "trdhigashimatsushimarugame-hostinghigashimatsuyamakitaakitadaito" + + "igawahigashimurayamalatvuopmidoris-a-patsfanhigashinarusellsyour" + + "homeftpaccesshinjournalismolanshinjukumanohigashinehigashiomihac" + + "himanchesterhigashiosakasayamamotorcycleshinkamigotoyohashimotok" + + "uyamahigashishirakawamatakarazukamiminershinshinotsurfastlyhigas" + + "hisumiyoshikawaminamiaikitamidsundhigashitsunotteroyhigashiuraus" + + "ukitamotosumidatlantichiryukyuragifuefukihabmerhigashiyamatokori" + + "yamanakakogawahigashiyodogawahigashiyoshinogaris-a-personaltrain" + + "erhiraizumisatohmarumorimachidahirakatashinagawahiranairtrafficb" + + "cghirarahiratsukagawahirayaitakasagooglecodespotrentino-a-adigeh" + + "istorichouseshinshirohitachiomiyaginowaniihamatamakawajimarcheap" + + "artmentshintokushimahitachiotagoparocherkasyzrankoshigayaltaikis" + + "-a-photographerokuapparshintomikasaharahitoyoshimifunehitradingh" + + "jartdalhjelmelandholeckobierzyceholidayhomeipartis-a-playerhomel" + + "inuxn--1lqs71dhomessinashikitanakagusukumodernhomeunixn--1qqw23a" + + "hondahonefosshinyoshitomiokanmakiwakunigamihamadahongorgehonjyoi" + + "chiropractichitachinakagawatchesasayamahornindalhorsembokukitash" + + "iobarahortendofinternetrentino-aadigehoteleshiojirishirifujiedah" + + "otmailhoyangerhoylandetroitskolobrzegyptianquanconagawakeisenbah" + + "nhurdalhurumajis-a-republicancerresearchaeologicaliforniahyogori" + + "s-a-rockstarachowicehyugawarajfkomakiyosemitejgorajlchloejlljmpa" + + "rtnershiraokanonjis-a-studentaljnjeonnamerikawauejoyoitakasakitc" + + "henjpmorganichocolatelekommunikationishiwakis-a-conservativegars" + + "heis-a-cpadoval-daostavalleyjpnchofunatorientexpressexyzgradyndn" + + "s-workshoppdalukowhalingroks-thisayamanashichinohedmarketsasebok" + + "nowsitallyngenissandvikcoromantovalle-aostatoiluroyjprshiratakah" + + "agis-a-teacherkassymantechnologyjurkredstonekristiansandefjordkr" + + "istiansundkrodsheradkrokstadelvaldaostathellewismillerkryminamio" + + "guniversitykumatorinokumejimateramoldelmenhorstalbanshisuifuette" + + "rtdasnetzwindmillkumenanyokaichibahccavuotnagareyamaintenancekun" + + "isakis-an-actresshirahamatonbetsurgeonshalloffamelhuslivinghisto" + + "rykunitachiaraisaijoshkar-olayangroupartykunitomigusukumamotoyam" + + "asudakunneppupasadenaklodzkodairakunstsammlungkunstunddesignkuok" + + "groupassagenshitaramakureitrentino-sudtirolkurgankurobellevuelos" + + "angelesjaguarchitecturebungoonomichinomiyakembuchikujobservercel" + + "liernemurorangemologicallfinanzkurogimilitarykuroisoftwaremarker" + + "ryhotelshizukuishimofusaikis-an-anarchistoricalsocietyumenkuroma" + + "tsunais-an-artistjohnkurotakikawasakis-an-engineeringerikekursko" + + "mmunalforbundkushirogawakustanais-an-entertainerkusunndalkutchan" + + "elkutnokuzbassnillfjordkuzumakis-bykvafjordkvalsundkvamlidlugole" + + "kagaminord-aurdalvdalipayufuchukotkafjordkvanangenkvinesdalkvinn" + + "heradkviteseidskogkvitsoykwpspjelkavikommunekyotobetsupersportre" + + "ntino-sued-tirolkyowariasahikawamissilexusgardenmisugitokigawami" + + "takeharamitourismolenskomorotsukamisunagawamitoyoakemiuramiyazur" + + "ewebsiteshikagamiishikarikaturindalmiyotamanomjondalenmlbarclays" + + "3-us-west-2monmouthaebaruminamiminowamonticellombardiamondshouji" + + "s-into-animegurovigorlicemontrealestatebankomvuxn--2m4a15emonza-" + + "brianzaporizhzhekinannestadmonza-e-della-brianzaporizhzhiamonzab" + + "rianzapposts-and-telecommunicationshowamonzaebrianzaramonzaedell" + + "abrianzamordoviajessheiminamisanrikubetsupplieshizuokanoyakagemo" + + "riyamatsusakahogithubusercontentrentino-suedtirolmoriyoshiokamit" + + "suemormoneymoroyamatsushigemortgagemoscowindowshriramsterdamberk" + + "eleymoseushistorymosjoenmoskenesienaplesigdalmossimbirskongsberg" + + "mosvikongsvingermoviemovistargardmtpccwioslombardyndns-at-homede" + + "potaruis-into-carshirakoenigmtrainingmuenstermugivestbytomakomai" + + "baramuikamogawamukochikushinonsenergymulhouseoullensvangmulticho" + + "icemunakatanemuncieszynmuosattemupassenger-associationmurmanskon" + + "injamisonymurotorcraftrentinoa-adigemusashimurayamatsuuramusashi" + + "noharamuseetrentinoaadigemuseumverenigingmutsuzawamutuellezajsko" + + "nskowolapyatigorskomatsushimashikokuchuomyphotoshibaikaliszczytn" + + "ordkappgmytis-a-bookkeeperminamitanephilatelyphilipsyphoenixn--3" + + "0rr7yphotographysiopiagetmyipaviancapitalpictetrentinoaltoadigep" + + "icturesirdalpiemontepilotslupskonsulatrobelgorodeopinkonyvelolku" + + "szpartshishikuis-a-techietis-a-socialistmeincheonpippupiszpittsb" + + "urghofauskedsmokorsetagayasells-for-lesschulepiwatepizzapkooris-" + + "a-therapistoiaplanetariuminamiuonumatsumotofukeplantationplantsn" + + "oasaintlouis-a-bruinsfansnzplatforminamiyamashirokawanabellunord" + + "re-landplaystationplazaplchonanbuildingrondarplomzansimagichosei" + + "kakegawawhoswhokksundyroyrvikingrongaplumbingoplusterpmnpodzonep" + + "ohlpokerpokrovskopervikomforbambleborkarpaczeladz-2polkowicepolt" + + "avalle-d-aostavangerpomorzeszowitdkoryolasitepordenonepornporsan" + + "gerporsangugeporsgrunnanpoznanprdpreservationpresidioprimeiwamat" + + "sumaebashikshacknetrentinos-tirolprincipeprivneprochowiceproduct" + + "ionsokanraprofermobilyprojectrentinostirolpromombetsupplypropert" + + "yprotectionpruszkowithgoogleapisa-hockeynutrentinosud-tirolprzew" + + "orskogptzpvtrentinosudtirolpzqldqponqslgbtrentinosued-tirolsrlsr" + + "varoystoragestordalstorenburgstorfjordstpetersburgstudiostudyndn" + + "s-blogdnsolarssonstuff-4-salestuttgartrentoshimasurgutsiracusait" + + "amatsukuris-into-cartoonshiranukannamiharusurnadalsurreysusakis-" + + "into-gameshiraois-a-soxfansusonosuzakanumazurysuzukanzakiwiensuz" + + "ukis-leetrentino-alto-adigesvalbardudinkamakurazakinkobayashikao" + + "irmincommbankomonosveiosvelvikosaigawasvizzeraswedenswidnicarbon" + + "ia-iglesias-carboniaiglesiascarboniaswiebodzindianapolis-a-blogg" + + "erswinoujscienceandhistoryswisshikis-lostfoldsxn--32vp30hagakhan" + + "amigawatuis-not-certifiedunetflixiltulaquilarvikoseis-an-account" + + "antshioyameldaltunesologneturystykarasjoksnesolundtuscanytushuma" + + "nitiesolutionsokndaltuvalle-daostavernversicherungvestfoldvestne" + + "somnarashinovestre-slidreamhostersoovestre-totennishiawakuravest" + + "vagoyvevelstadvibo-valentiavibovalentiavideovillaskvolloabathsbc" + + "ngvinnicargojomediavinnytsiavipsinaappharmacymruovatrentinoalto-" + + "adigevirginiavirtualvirtuelvistaprinternationalfirearmsopotrenti" + + "nosuedtirolviterboltrevisohuissier-justicevladikavkazanvladimirv" + + "ladivostokaizukaratevlogvoldavolgogradvolkenkunderseaportroandin" + + "osaurepbodyndns-at-workinggroupharmaciensimple-urlvolkswagentsor" + + "-odalvologdanskoshunantokamachippubetsubetsugaruvolyngdalvoronez" + + "hytomyrvossevangenvotevotingvotottoris-savedvrnwroclawloclawekos" + + "tromahabororoskolelwtchoyodobashibuyachiyodawtferrarawuozuwwwrit" + + "esthisblogspotrogstadwzmiuwajimaxn--3pxu8kosugexn--42c2d9axn--45" + + "brj9christmasakimobetsuwanouchikuseihichisobetsuitaitogakushimot" + + "oganewhampshirecreationissedalutskaufenisshinguernseyxn--45q11ch" + + "romedicaltanissettaiwanairforceoxn--4gbriminingxn--4it168dxn--4i" + + "t797kotohiradomainsureisenxn--4pvxsor-varangerxn--54b7fta0cchtra" + + "eumtgeradeatnurembergrossetouchijiwadell-ogliastrakhanawaxn--55q" + + "w42gxn--55qx5dxn--5js045dxn--5rtp49chungbukautokeinoxn--5rtq34ko" + + "touraxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--" + + "7t0a264chungnamdalseidfjordxn--80adxhksorfoldxn--80ao21axn--80as" + + "ehdbargainstituteledatabaseballooningjerdrumemorialimitedownload" + + "rangedalimoliserniaustraliaisondre-landebudejjuedischesapeakebay" + + "erndigitalillehammerfest-mon-blogueurovisionasushiobaragrinetban" + + "kz-1kappleangaviikadenaamesjevuemielnoboribetsucks3-ap-northeast" + + "-1xn--80aswgxn--80audnedalnxn--8ltr62kouhokutamakizunokunimilano" + + "xn--8pvr4uxn--8y0a063axn--90a3academykolaivanovosibirskiervaapst" + + "eiermarkhangelskinderoyerimo-i-ranadexchangeiseiroumuenchendofth" + + "einternetcimdbarreauctionaturbruksgymnaturhistorischesakuraibiga" + + "waustrheimatunduhrennesoyokoze164xn--90aishobaraxn--90azhagebost" + + "adxn--9dbhblg6diethnologyxn--9dbq2axn--9et52uxn--9krt00axn--andy" + + "-iraxn--aroport-byanagawaxn--asky-iraxn--aurskog-hland-jnbarrel-" + + "of-knowledgeorgiauthordalandroidiscountysvardonnaharimamurogawag" + + "roks-theaternidds3-ap-southeast-1xn--avery-yuasakatakazakis-slic" + + "komaganexn--b-5gaxn--b4w605ferdxn--bck1b9a5dre4churchaseljejuego" + + "shikiminokamoenaircraftmpamperedchefarmsteadxn--bdddj-mrabdxn--b" + + "earalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7" + + "axn--bidr-5nachikatsuuraxn--bievt-0qaxn--bjarky-fyanaizuxn--bjdd" + + "ar-ptambovdonskoshimizumakiyosumitakaginozawaonsenxn--blt-elabor" + + "xn--bmlo-graingerxn--bod-2naroyxn--brnny-wuaccident-investigatio" + + "njukudoyamaceratabuseat-band-campaniamallamadridvagsoygardenebak" + + "keshibechambagricaaarborteaches-yogasawaracingxn--brnnysund-m8ac" + + "cident-preventionlineat-urlxn--brum-voagatromsaitokorozawaxn--bt" + + "sfjord-9zaxn--c1avgxn--c2br7gxn--c3s14misakis-foundationxn--cck2" + + "b3barrell-of-knowledgets-itarumizusawautomotivelandiscoveryokami" + + "kawanehonbetsuruokaluganskarmoyomitanobihirosakikamijimakunecn-n" + + "orth-1xn--cg4bkis-uberleetrentino-altoadigexn--ciqpnxn--clchc0ea" + + "0b2g2a9gcdn77-securecipesaro-urbino-pesarourbinopesaromalvikouno" + + "sumypetshisognexn--comunicaes-v6a2oxn--correios-e-telecomunicaes" + + "-ghc29axn--czr694bashkiriautoscanadagestangeologyonabarudmurtiam" + + "usementargibestadevenes3-ap-southeast-2xn--czrs0tromsojavald-aos" + + "tarostwodzislawiwatsukiyonowtvbarefootballangenoamishirasatobish" + + "imaizurubtsovskjervoyageometre-experts-comptablesakuragawaustinn" + + "aspers3-external-1xn--czru2dxn--czrw28basilicataniaveroykenviron" + + "mentalconservationaturalhistorymuseumcentereportarnobrzegjemnes3" + + "-external-2xn--d1acj3batochigiftsakyotanabeneventochiokinoshimal" + + "opolskanlandrivefsncfagebizenakaniikawatanaguravocatanzarowebhop" + + "agespeedmobilizerobirasnesoddenmarketplace-burggfamilyokosukariy" + + "akumodumemerck-uralsk12xn--d1alferreroticanonoichikawamisatodayx" + + "n--d1atrusteexn--d5qv7z876chuvashiaxn--davvenjrga-y4axn--djrs72d" + + "6uyxn--djty4kouyamasoyxn--dnna-grajeworldxn--drbak-wuaxn--dyry-i" + + "raxn--eckvdtc9dxn--efvn9sorreisahayakawakamiichikaiseiyokoshibah" + + "ikariwanumataketomisatokyotangoxn--efvy88haibarakitahatakanezawa" + + "xn--ehqz56nxn--elqq16hakatanotogawaxn--estv75gxn--eveni-0qa01gax" + + "n--f6qx53axn--fct429kouzushimassa-carrara-massacarraramassabuske" + + "rudineustarhubarclaycards3-us-west-1xn--fhbeiarnxn--finny-yuaxn-" + + "-fiq228c5hsortlandxn--fiq64batsfjordrobaknoluoktainaibetsubameri" + + "canartanddesignieznogatagajobojibmdunloppacificarriereviewskrako" + + "weddingjerstadotsurugimetlifeinsurancehimejiinetatamotorsalangen" + + "atuurwetenschappenaumburgjesdalindaskoyabenorddalindesnesalatata" + + "rstanaustdalinkaruizawavoues3-fips-us-gov-west-1xn--fiqs8sorumin" + + "anoxn--fiqz9southcarolinazawaxn--fjord-lraxn--fjq720axn--fl-ziax" + + "n--flor-jraxn--flw351exn--fpcrj9c3dxn--frde-grandrapidsouthwestf" + + "alenxn--frna-woarais-very-badaddjamalborkdalxn--frya-hraxn--fzc2" + + "c9e2circlegnicahcesuolocalhistoryazannefrankfurtoyokawaxn--fzys8" + + "d69uvgmailxn--g2xx48circusculturedumbrellajollanbibaidarq-axn--g" + + "ckr3f0fetsundxn--gecrj9citicateringebuildersvpalmspringsakerxn--" + + "ggaviika-8ya47hakodatemasekmshimosuwalkis-a-linux-useranishiarit" + + "abashijonawatextilevangerxn--gildeskl-g0axn--givuotna-8yandexhib" + + "itionxn--gjvik-wuaxn--gls-elacaixaxn--gmq050is-very-evillagexn--" + + "gmqw5axn--h-2failxn--h1aeghakonexn--h2brj9civilaviationiyodogawa" + + "xn--hbmer-xqaxn--hcesuolo-7ya35bauhaustevollinzaiitatebayashiiba" + + "hcavuotnagarakkestadupontariobninskarumaifareastcoastaldefencemb" + + "roideryonagoyaxastronomyokohamamatsudaegubs3-eu-central-1xn--her" + + "y-iraxn--hgebostad-g3axn--hmmrfeasta-s4acoachampionshiphopenair-" + + "surveillancebetsukubabia-goracleaningatlantachikawakayamagadance" + + "chirealtorlandxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmi" + + "r-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn" + + "--imr513nxn--indery-fyaotsurgeryxn--io0a7is-very-goodyearthadsel" + + "fipirangaxn--j1aefgulenxn--j1amhakubanxn--j6w193gxn--jlq61u9w7bb" + + "cartierhcloudcontrolappalanakhodkanagawaxn--jlster-byaroslavlaan" + + "derenxn--jrpeland-54axn--jvr189misasaguris-gonexn--k7yn95exn--ka" + + "rmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--kl" + + "t787dxn--kltp7dxn--kltx9axn--klty5xn--3bst00minnesotaketakatoris" + + "-certifieducatorahimeshimageandsoundandvisionxn--koluokta-7ya57h" + + "akuis-a-llamarylhursteinkjerusalembetsukuis-a-musicianxn--kprw13" + + "dxn--kpry57dxn--kpu716figuerestaurantoyotaris-a-doctorayxn--kput" + + "3is-very-nicexn--krager-gyasakaiminatoursowaxn--kranghke-b0axn--" + + "krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jetztrentino-stiro" + + "lxn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasugis-very-sweetrenti" + + "no-s-tirollagriculturennebudapest-a-la-maisondriodejaneirocheste" + + "rxn--kvnangen-k0axn--l-1fairwindspreadbettingxn--l1accentureklam" + + "borghinikkoebenhavnxn--laheadju-7yasuokaratsuginamikatagamihobby" + + "-sitevaksdalxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagavi" + + "ika-52bbvacationsnasabaerobaticketsalondonetskasaokamisatohnosho" + + "oceanographicsaltdalipetskashibatakashimasfjordenaval-d-aosta-va" + + "lleyonagunikolaeventsalvadordalibabajddarchaeologyeongnamegawakk" + + "anaikawababydgoszczecinemailivornoceanographiquemergencyberlevag" + + "angaviikarugaulardalorenskogjovikashiharaxn--lesund-huaxn--lgbba" + + "t1ad8jevnakerxn--lgrd-poacivilisationrwifarsundxn--lhppi-xqaxn--" + + "linds-pratoyakokamishihoronobeokaminoyamatsuris-with-thebandoomd" + + "nsaliascolipicenord-odalxn--lns-qlavagiskexn--loabt-0qaxn--lrdal" + + "-sraxn--lrenskog-54axn--lt-liacivilizationxn--lten-granexn--lury" + + "-iraxn--mely-iraxn--merker-kuaxn--mgb2ddespydebergxn--mgb9awbfil" + + "ateliaxn--mgba3a3ejtrvchoshibukawaxn--mgba3a4f16axn--mgba3a4fran" + + "amizuholdingsmileksvikozagawaxn--mgba7c0bbn0axn--mgbaam7a8hakusa" + + "ndoyxn--mgbab2bdxn--mgbai9a5eva00beppubolognagasukesennumalselve" + + "ndrelloteneiiyamanobedzin-addrammenuernberglassassinationalherit" + + "agematsubarakawachinaganoharaogashimadachicagoboatsalzburgliwice" + + "mrxn--mgbai9azgqp6jewelryxn--mgbayh7gpaduaxn--mgbb9fbpobanazawax" + "n--mgbbh1a71exn--mgbc0a9azcgxn--mgberp4a5d4a87gxn--mgberp4a5d4ar" + - "xn--mgbqly7c0a67fbcivilwarmiamibungotakadaxn--mgbqly7cvafranzisk" + - "anerimarinexn--mgbt3dhdxn--mgbtf8flatangerxn--mgbx4cd0abogadocsc" + - "bgxn--mjndalen-64axn--mk0axisleofmanchesterxn--mkru45issmarterth" + - "anyouthadselfipirangaxn--mlatvuopmi-s4axn--mli-tlazioxn--mlselv-" + - "iuaxn--moreke-juaxn--mori-qsakatakatsukiwatarailwayxn--mosjen-ey" + - "atsukareliaxn--mot-tlaxn--mre-og-romsdal-qqbasilicataniautoscana" + - "dagestangeologyomitanobihirosakikamijimatta-varjjatarantomobenev" + - "entochiokinoshimalselvendrellimanowarudastronomyokohamamatsudaeg" + - "ubs3-ap-northeast-1xn--msy-ula0haldenxn--mtta-vrjjat-k7aflakstad" + - "aokagakibichuozuerichampionshiphopenair-surveillanceoxn--muost-0" + - "qaxn--mxtq1misawaxn--ngbc5azdxn--ngbe9e0axn--nit225kouyamashikok" + - "uchuoxn--nmesjevuemie-tcbalestrandabergamoarekepnordlandxn--nnx3" + - "88axn--nodessakegawaxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn" + - "--ntsq17gxn--nttery-byaeseoullensvangxn--nvuotna-hwaxn--nyqy26ax" + - "n--o1achelyabinskydivingxn--o3cw4halsagaeroclubindallaspeziaxn--" + - "od0algxn--od0aq3batochigifts3-website-ap-northeast-1xn--ogbpf8fl" + - "ekkefjordxn--oppegrd-ixaxn--ostery-fyatsushiroxn--osyro-wuaxn--p" + - "1acfagentsolognexn--p1aiwatexn--pgbs0dhammarfeastafricanonoichik" + - "awamisatodayxn--porsgu-sta26fedjelenia-goraxn--pssu33lxn--q9jyb4" + - "claimsannanxn--qcka1pmclickchristiansburgripexn--qqqt11misconfus" + - "edxn--rady-iraxn--rdal-poaxn--rde-ulaxn--rdy-0nabariwatsukiyonow" + - "ruzhgorodoyxn--rennesy-v1axn--rhkkervju-01afermobilyxn--rholt-mr" + - "agoworse-thandaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-" + - "5naroyxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byawara" + - "xn--rny31hamurakamigoris-a-llamarumorimachidaxn--rros-gratangenx" + - "n--rskog-uuaxn--rst-0narusawaxn--rsta-francaiseharaxn--ryken-vua" + - "xn--ryrvik-byawatahamaxn--s-1faithandsoniigatakamatsukawaxn--s9b" + - "rj9clinicodynaliascoli-picenord-aurdalukowilliamhillupinkddieldd" + - "anuorrissadollsannohembygdsforbundxn--sandnessjen-ogbizhevskouzu" + - "shimasoyxn--sandy-yuaxn--seral-lraxn--ses554gxn--sgne-gratis-a-b" + - "ulls-fanxn--skierv-utazasnesoddenmarketplacexn--skjervy-v1axn--s" + - "kjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5narutokorozawaxn--s" + - "lt-elabourxn--smla-hraxn--smna-grazxn--snase-nraxn--sndre-land-0" + - "cbatsfjordnpagespeedmobilizerobiravocatanzaroweddingjerdrumemori" + - "alimitedivtasvuodnaharimamurogawaukraanghkemerovodkagoshimaizuru" + - "btsovskjervoyagemologicallfinanz-1xn--snes-poaxn--snsa-roaxn--sr" + - "-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbauha" + - "ustevollinkasaokaminoyamatsuriinetarnobrzegjerstadotsuruginankok" + - "ubunjiitatebayashiibahccavuotnagaraumallorcabbottarumizusawavoue" + - "squarezzoologyonabarudmurtiaurskog-holandroverhalla-speziamuseme" + - "ntambovalle-daostavernarvikarlsoyekaterinburgdyniaeroportalahead" + - "judaicaarborteaches-yogasawaracing12000xn--srfold-byaxn--srreisa" + - "-q1axn--srum-graxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-" + - "sqbbcngjesdalinzaiiyamanobeeldengeluidownloadrangedalipetskashib" + - "atakarazukamiokamikoaniihamatamakawajimarylandrobakrehamnatuurwe" + - "tenschappenaumburgjovikashiharaxaustinnasushiobaraquariumeloyali" + - "stockholmestrandigitalillehammerfest-le-patrondheimemerckarmoyok" + - "osukariyaltaijibigawagroks-theaternopilawakembuchikujobojibestad" + - "gcagliaridagawatchandclock-uralsk12xn--stre-toten-zcbbvacationsn" + - "asaarlandurhamburglassassinationalheritagematsubarakawachinagano" + - "haraogashimadachicagoboats3-website-ap-southeast-1xn--tjme-hraxn" + - "--tn0agrinetbankzxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trgs" + - "tad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atvedestrand" + - "xn--uc0ay4axn--uist22hangglidingxn--uisz3gxn--unjrga-rtamayufuet" + - "tertdasnetzxn--unup4yxn--uuwu58axn--vads-jraxn--vard-jraxn--vegr" + - "shei-c0axn--vermgensberater-ctbeppubolognagasukesennumalvikashiw" + - "araxn--vermgensberatung-pwberlincolnaustdalivornoceanographics3-" + - "website-ap-southeast-2xn--vestvgy-ixa6oxn--vg-yiabruzzoologicala" + - "bamagasakishimabaragusartsaritsynxn--vgan-qoaxn--vgsy-qoa0jewelr" + - "yxn--vgu402clintonoshoesanokfhskgroks-thisayamanashichinoheguris" + - "-a-democratoyonoxn--vhquvarggatrevisokndalxn--vler-qoaxn--vre-ei" + - "ker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bernuorockartuzyonag" + - "oyaxn--wcvs22dxn--wgbh1clothingrongaxn--wgbl6axn--xhq521beskidyn" + - "-o-saurlandes3-website-eu-west-1xn--xkc2al3hye2axn--xkc2dl3a5ee0" + - "hangoutsystemscloudapparocherkasyzranzanxn--yer-znarviikananporo" + - "xn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--45brj9christmasakimo" + - "betsuwanouchikuseihichisobetsuldalucernexn--ystre-slidre-ujbetai" + - "naboxfordealstahaugesundvrdnsdojogaszkolajollanbibaidarqhachijor" + - "pelandyndns-freemasonryonaguniversityoriikashiwazakiwienaval-d-a" + - "osta-valleyoutubentleyukindustriesteambulancebinagisodegauraxn--" + - "zbx025dxn--zf0ao64axn--zf0avxn--45q11chromedicaltanissettaishino" + - "makinderoyxn--zfr164bhartipschlesisches3-website-sa-east-1xxxn--" + - "4gbriminingxz" + "xn--mgbpl2fhvalerxn--mgbqly7c0a67fbcivilwarmanagementoyonakagyok" + + "utomobentleyxn--mgbqly7cvafranziskanerimarinextdirectoryxn--mgbt" + + "3dhdxn--mgbtf8flatangerxn--mgbtx2bernrtateshinanomachintaijindus" + + "triesteambulancepilepsykkylvenetogliattipschoenbrunnavigationavu" + + "otnakayamatsuzakinfinitiresamegawaxn--mgbx4cd0abogadocscbgxn--mi" + + "x082filminamifuranoxn--mix891finalxn--mjndalen-64axn--mk0axindia" + + "nmarketingxn--mk1bu44claimsaskatchewanggouvicenzaxn--mkru45isleo" + + "fmandalxn--mlatvuopmi-s4axn--mli-tlavangenxn--mlselv-iuaxn--more" + + "ke-juaxn--mori-qsakegawaxn--mosjen-eyatominamiawajikissmartertha" + + "nyoutubeeldengeluidxn--mot-tlazioxn--mre-og-romsdal-qqbeskidyn-o" + + "-saurlandesamnangerxn--msy-ula0haldenxn--mtta-vrjjat-k7aflakstad" + + "aokagakibichuoxn--muost-0qaxn--mxtq1misawaxn--ngbc5azdxn--ngbe9e" + + "0axn--nit225kozakis-an-actorxn--nmesjevuemie-tcbalatinabearalvah" + + "kikonaioirasebastopologyeonggiehtavuoatnagaivuotnagaokakyotambad" + + "ajozorahkkeravjudygarlandxn--nnx388axn--nodessakuhokkaidontexist" + + "eingeekpnxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn" + + "--nttery-byaeserveftphiladelphiaareadmyblogsitexn--nvuotna-hwaxn" + + "--nyqy26axn--o1achattanooganorfolkebiblegallocuscountryestateofd" + + "elawarezzoologyxn--o3cw4halsagamiharaxn--od0algxn--od0aq3betaina" + + "boxfordealerdalottepsongdalenviknakanojohanamakinoharaxn--ogbpf8" + + "flekkefjordxn--oppegrd-ixaxn--ostery-fyatsukareliancexn--osyro-w" + + "uaxn--p1acfdxn--p1aiwchitosetoeiheijis-a-chefarmerseinewspaperxn" + + "--pbt977clickazimierz-dolnyxn--pgbs0dhammarfeastafricamagicherno" + + "vtsydneyxn--porsgu-sta26financexn--pssu33lxn--pssy2uxn--q9jyb4cl" + + "inicoffeedbackazoxn--qcka1pmclintonoshoesassaris-a-cubicle-slave" + + "llinowruzhgorodoyxn--qqqt11misconfusedxn--qxamurskjakdnepropetro" + + "vskiptveterinairealtychyllestadultrysilkosakaerodromegallupinbar" + + "celonagasakikuchikumagayagawalbrzycharternopilawalesundnpalacebi" + + "northwesternmutualimanowarudaurskog-holandroverhalla-speziaetnag" + + "ahamaroyekaterinburgdyniaeroportalaheadjudaicabbottarantomaritim" + + "ekeeping12000xn--rady-iraxn--rdal-poaxn--rde-ulaxn--rdy-0nabarix" + + "n--rennesy-v1axn--rhkkervju-01afineartschwarzgwangjuifminamiisel" + + "ectoyotomiyazakis-a-financialadvisor-aurdalxn--rholt-mragoworse-" + + "thandsoniizaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5na" + + "rusawaxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byatsus" + + "hiroxn--rny31hamurakamigoriginankokubunjis-a-nascarfanxn--rovu88" + + "bhartiffanycartoonarteducationalchikugokasejnyoriikashiwaraxn--r" + + "ros-granvindafjordxn--rskog-uuaxn--rst-0narutokonamegatakatsukix" + + "n--rsta-francaiseharaxn--ryken-vuaxn--ryrvik-byawaraxn--s-1faith" + + "eguardianxn--s9brj9clothingroundhandlingroznyxn--sandnessjen-ogb" + + "izhevskppspiegelxn--sandy-yuaxn--seral-lraxn--ses554gxn--sgne-gr" + + "atangenxn--skierv-utazasmatartcenterprisesakievenassisibenikihok" + + "umakogenglandxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland" + + "-fxaxn--slat-5narviikananporoxn--slt-elabourxn--smla-hraxn--smna" + + "-gratis-a-bulls-fanxn--snase-nraxn--sndre-land-0cbremangerxn--sn" + + "es-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1" + + "axn--sr-varanger-ggbielawallonieruchomoscienceandindustrynavyatk" + + "anazawaxn--srfold-byawatahamaxn--srreisa-q1axn--srum-grazxn--stf" + + "old-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbiellaakesvuemielecce" + + "verbankashiwazakiyokawaraxn--stre-toten-zcbieszczadygeyachimatai" + + "peigersundurbanpachigasakicks-assedicasadelamonedaxn--t60b56axn-" + + "-tckweatherchannelxn--tjme-hraxn--tn0agrigentomologyeongbukrasno" + + "darxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trgstad-r1axn--trn" + + "a-woaxn--troms-zuaxn--tysvr-vraxn--uc0atverranzanxn--uc0ay4axn--" + + "uist22hangglidingxn--uisz3gxn--unjrga-rtaobaomoriguchiharamcoalx" + + "n--unup4yxn--uuwu58axn--vads-jraxn--vard-jraxn--vegrshei-c0axn--" + + "vermgensberater-ctbievattorneyagawakuyabukijoburglobalashovhachi" + + "jorpelandurhamburglobodoes-itateyamaxn--vermgensberatung-pwbifuk" + + "agawalterxn--vestvgy-ixa6oxn--vg-yiabruzzoologicalabamagasakishi" + + "mabaragusartsaritsynxn--vgan-qoaxn--vgsy-qoa0jewishartrentino-su" + + "d-tirolxn--vgu402cloudcontrolledekakudamatsuenoharaxn--vhquversa" + + "illesomaxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5g" + + "xn--vuq861bihorologyukuhashimoichinosekigaharaxn--w4r85el8fhu5dn" + + "raxn--wcvs22dxn--wgbh1cloudfrontdoorxn--wgbl6axn--xhq521bikedati" + + "nglogowegroweibolzanordreisa-geekasukabeerxn--xkc2al3hye2axn--xk" + + "c2dl3a5ee0hangoutsystemscloudapparmaxn--y9a3aquariumishimatsunox" + + "n--yer-znarvikrasnoyarskomitamamuraxn--yfro4i67oxn--ygarden-p1ax" + + "n--ygbi2ammxn--3ds443gxn--ystre-slidre-ujbilbaogakidstvedestrand" + + "vrdnsamsungloppenzaokinawashirosatobamagazinedre-eikerxn--zbx025" + + "dxn--zf0ao64axn--zf0avxn--3e0b707exn--zfr164billustrationayorodd" + + "axperiaxxxn--3oq18vl8pn36axz" // nodes is the list of nodes. Each node is represented as a uint32, which // encodes the node's children, wildcard bit and node type (as an index into @@ -435,7112 +452,7619 @@ // [15 bits] text index // [ 6 bits] text length var nodes = [...]uint32{ - 0x00364ac3, // n0x0000 c0x0000 (---------------) + I abb - 0x00364ac6, // n0x0001 c0x0000 (---------------) + I abbott - 0x0033fd87, // n0x0002 c0x0000 (---------------) + I abogado - 0x01a00b82, // n0x0003 c0x0006 (n0x040f-n0x0415) + I ac - 0x002fe987, // n0x0004 c0x0000 (---------------) + I academy - 0x003321c9, // n0x0005 c0x0000 (---------------) + I accenture - 0x002ac88a, // n0x0006 c0x0000 (---------------) + I accountant - 0x002ac88b, // n0x0007 c0x0000 (---------------) + I accountants - 0x002a04c6, // n0x0008 c0x0000 (---------------) + I active - 0x002388c5, // n0x0009 c0x0000 (---------------) + I actor - 0x01e02902, // n0x000a c0x0007 (n0x0415-n0x0416) + I ad - 0x0026d143, // n0x000b c0x0000 (---------------) + I ads - 0x002fb4c5, // n0x000c c0x0000 (---------------) + I adult - 0x0220d2c2, // n0x000d c0x0008 (n0x0416-n0x041e) + I ae - 0x02633784, // n0x000e c0x0009 (n0x041e-n0x0477) + I aero - 0x02a0b5c2, // n0x000f c0x000a (n0x0477-n0x047c) + I af - 0x0024ddc3, // n0x0010 c0x0000 (---------------) + I afl - 0x0034fc46, // n0x0011 c0x0000 (---------------) + I africa - 0x02e02602, // n0x0012 c0x000b (n0x047c-n0x0481) + I ag - 0x0023b206, // n0x0013 c0x0000 (---------------) + I agency - 0x03205a42, // n0x0014 c0x000c (n0x0481-n0x0485) + I ai - 0x00269b83, // n0x0015 c0x0000 (---------------) + I aig - 0x002d2388, // n0x0016 c0x0000 (---------------) + I airforce - 0x00285586, // n0x0017 c0x0000 (---------------) + I airtel - 0x03600b02, // n0x0018 c0x000d (n0x0485-n0x048b) + I al - 0x003615c9, // n0x0019 c0x0000 (---------------) + I allfinanz - 0x00253a86, // n0x001a c0x0000 (---------------) + I alsace - 0x00203582, // n0x001b c0x0000 (---------------) + I am - 0x002f8c49, // n0x001c c0x0000 (---------------) + I amsterdam - 0x03a02342, // n0x001d c0x000e (n0x048b-n0x048f) + I an - 0x0023f509, // n0x001e c0x0000 (---------------) + I analytics - 0x00322307, // n0x001f c0x0000 (---------------) + I android - 0x03e07702, // n0x0020 c0x000f (n0x048f-n0x0495) + I ao - 0x0024ca0a, // n0x0021 c0x0000 (---------------) + I apartments - 0x0023be02, // n0x0022 c0x0000 (---------------) + I aq - 0x0027ef49, // n0x0023 c0x0000 (---------------) + I aquarelle - 0x042030c2, // n0x0024 c0x0010 (n0x0495-n0x049e) + I ar - 0x0026e3c6, // n0x0025 c0x0000 (---------------) + I aramco - 0x00229ec5, // n0x0026 c0x0000 (---------------) + I archi - 0x003289c4, // n0x0027 c0x0000 (---------------) + I army - 0x04aacb84, // n0x0028 c0x0012 (n0x049f-n0x04a5) + I arpa - 0x0021dfc4, // n0x0029 c0x0000 (---------------) + I arte - 0x04e00182, // n0x002a c0x0013 (n0x04a5-n0x04a6) + I as - 0x0031ea44, // n0x002b c0x0000 (---------------) + I asia - 0x0027834a, // n0x002c c0x0000 (---------------) + I associates - 0x05201702, // n0x002d c0x0014 (n0x04a6-n0x04ad) + I at - 0x002361c8, // n0x002e c0x0000 (---------------) + I attorney - 0x05a01002, // n0x002f c0x0016 (n0x04ae-n0x04c0) + I au - 0x00312707, // n0x0030 c0x0000 (---------------) + I auction - 0x002aa905, // n0x0031 c0x0000 (---------------) + I audio - 0x003220c6, // n0x0032 c0x0000 (---------------) + I author - 0x00262c44, // n0x0033 c0x0000 (---------------) + I auto - 0x00343dc5, // n0x0034 c0x0000 (---------------) + I autos - 0x06a00102, // n0x0035 c0x001a (n0x04ce-n0x04cf) + I aw - 0x0021a6c2, // n0x0036 c0x0000 (---------------) + I ax - 0x0036b783, // n0x0037 c0x0000 (---------------) + I axa - 0x06e01202, // n0x0038 c0x001b (n0x04cf-n0x04db) + I az - 0x00277e05, // n0x0039 c0x0000 (---------------) + I azure - 0x072076c2, // n0x003a c0x001c (n0x04db-n0x04e5) + I ba - 0x00261e84, // n0x003b c0x0000 (---------------) + I band - 0x0021c844, // n0x003c c0x0000 (---------------) + I bank - 0x00210ac3, // n0x003d c0x0000 (---------------) + I bar - 0x002f9609, // n0x003e c0x0000 (---------------) + I barcelona - 0x0030188b, // n0x003f c0x0000 (---------------) + I barclaycard - 0x00310148, // n0x0040 c0x0000 (---------------) + I barclays - 0x00311088, // n0x0041 c0x0000 (---------------) + I bargains - 0x00362ec7, // n0x0042 c0x0000 (---------------) + I bauhaus - 0x0032e2c6, // n0x0043 c0x0000 (---------------) + I bayern - 0x0760bfc2, // n0x0044 c0x001d (n0x04e5-n0x04ef) + I bb - 0x00369083, // n0x0045 c0x0000 (---------------) + I bbc - 0x0036ed04, // n0x0046 c0x0000 (---------------) + I bbva - 0x003690c3, // n0x0047 c0x0000 (---------------) + I bcn - 0x01703202, // n0x0048 c0x0005 (---------------)* o I bd - 0x07a00602, // n0x0049 c0x001e (n0x04ef-n0x04f1) + I be - 0x00200604, // n0x004a c0x0000 (---------------) + I beer - 0x003833c7, // n0x004b c0x0000 (---------------) + I bentley - 0x00376686, // n0x004c c0x0000 (---------------) + I berlin - 0x0036de84, // n0x004d c0x0000 (---------------) + I best - 0x07f3a2c2, // n0x004e c0x001f (n0x04f1-n0x04f2) + I bf - 0x08340002, // n0x004f c0x0020 (n0x04f2-n0x0516) + I bg - 0x0862c202, // n0x0050 c0x0021 (n0x0516-n0x051b) + I bh - 0x00385506, // n0x0051 c0x0000 (---------------) + I bharti - 0x08a00002, // n0x0052 c0x0022 (n0x051b-n0x0520) + I bi - 0x0023ac45, // n0x0053 c0x0000 (---------------) + I bible - 0x00304743, // n0x0054 c0x0000 (---------------) + I bid - 0x00205684, // n0x0055 c0x0000 (---------------) + I bike - 0x002cc444, // n0x0056 c0x0000 (---------------) + I bing - 0x002cc445, // n0x0057 c0x0000 (---------------) + I bingo - 0x00208743, // n0x0058 c0x0000 (---------------) + I bio - 0x08e02183, // n0x0059 c0x0023 (n0x0520-n0x0527) + I biz - 0x0920bb82, // n0x005a c0x0024 (n0x0527-n0x052b) + I bj - 0x00282385, // n0x005b c0x0000 (---------------) + I black - 0x0028238b, // n0x005c c0x0000 (---------------) + I blackfriday - 0x0020cb09, // n0x005d c0x0000 (---------------) + I bloomberg - 0x0020e344, // n0x005e c0x0000 (---------------) + I blue - 0x0960e5c2, // n0x005f c0x0025 (n0x052b-n0x0530) + I bm - 0x0020e5c3, // n0x0060 c0x0000 (---------------) + I bms - 0x00212503, // n0x0061 c0x0000 (---------------) + I bmw - 0x01612ac2, // n0x0062 c0x0005 (---------------)* o I bn - 0x0025d003, // n0x0063 c0x0000 (---------------) + I bnl - 0x00212aca, // n0x0064 c0x0000 (---------------) + I bnpparibas - 0x09a10042, // n0x0065 c0x0026 (n0x0530-n0x0539) + I bo - 0x00370545, // n0x0066 c0x0000 (---------------) + I boats - 0x00214283, // n0x0067 c0x0000 (---------------) + I bom - 0x00214cc4, // n0x0068 c0x0000 (---------------) + I bond - 0x002c4503, // n0x0069 c0x0000 (---------------) + I boo - 0x00219483, // n0x006a c0x0000 (---------------) + I bot - 0x0021b188, // n0x006b c0x0000 (---------------) + I boutique - 0x09e07bc2, // n0x006c c0x0027 (n0x0539-n0x057f) + I br - 0x0021bb88, // n0x006d c0x0000 (---------------) + I bradesco - 0x002164cb, // n0x006e c0x0000 (---------------) + I bridgestone - 0x00227806, // n0x006f c0x0000 (---------------) + I broker - 0x002297c8, // n0x0070 c0x0000 (---------------) + I brussels - 0x0a607242, // n0x0071 c0x0029 (n0x0580-n0x0585) + I bs - 0x0aa3ad82, // n0x0072 c0x002a (n0x0585-n0x058a) + I bt - 0x002ee388, // n0x0073 c0x0000 (---------------) + I budapest - 0x0024a845, // n0x0074 c0x0000 (---------------) + I build - 0x0024a848, // n0x0075 c0x0000 (---------------) + I builders - 0x002741c8, // n0x0076 c0x0000 (---------------) + I business - 0x0022c983, // n0x0077 c0x0000 (---------------) + I buy - 0x0022dac4, // n0x0078 c0x0000 (---------------) + I buzz - 0x0036ed42, // n0x0079 c0x0000 (---------------) + I bv - 0x0ae2e042, // n0x007a c0x002b (n0x058a-n0x058c) + I bw - 0x0b206302, // n0x007b c0x002c (n0x058c-n0x0590) + I by - 0x0b62eac2, // n0x007c c0x002d (n0x0590-n0x0596) + I bz - 0x0022eac3, // n0x007d c0x0000 (---------------) + I bzh - 0x0ba14582, // n0x007e c0x002e (n0x0596-n0x05a7) + I ca - 0x00364a83, // n0x007f c0x0000 (---------------) + I cab - 0x00219603, // n0x0080 c0x0000 (---------------) + I cal - 0x00361584, // n0x0081 c0x0000 (---------------) + I call - 0x0025d206, // n0x0082 c0x0000 (---------------) + I camera - 0x0023e184, // n0x0083 c0x0000 (---------------) + I camp - 0x002d800e, // n0x0084 c0x0000 (---------------) + I cancerresearch - 0x0034fd45, // n0x0085 c0x0000 (---------------) + I canon - 0x0023ba08, // n0x0086 c0x0000 (---------------) + I capetown - 0x002e21c7, // n0x0087 c0x0000 (---------------) + I capital - 0x0022f847, // n0x0088 c0x0000 (---------------) + I caravan - 0x00301a45, // n0x0089 c0x0000 (---------------) + I cards - 0x00241e44, // n0x008a c0x0000 (---------------) + I care - 0x00241e46, // n0x008b c0x0000 (---------------) + I career - 0x00241e47, // n0x008c c0x0000 (---------------) + I careers - 0x002d5284, // n0x008d c0x0000 (---------------) + I cars - 0x0021b407, // n0x008e c0x0000 (---------------) + I cartier - 0x00320f84, // n0x008f c0x0000 (---------------) + I casa - 0x00222744, // n0x0090 c0x0000 (---------------) + I cash - 0x0023b546, // n0x0091 c0x0000 (---------------) + I casino - 0x002192c3, // n0x0092 c0x0000 (---------------) + I cat - 0x0024a608, // n0x0093 c0x0000 (---------------) + I catering - 0x003495c3, // n0x0094 c0x0000 (---------------) + I cba - 0x0025cfc3, // n0x0095 c0x0000 (---------------) + I cbn - 0x0be020c2, // n0x0096 c0x002f (n0x05a7-n0x05ab) + I cc - 0x0c30f042, // n0x0097 c0x0030 (n0x05ab-n0x05ac) + I cd - 0x002442c6, // n0x0098 c0x0000 (---------------) + I center - 0x00347c83, // n0x0099 c0x0000 (---------------) + I ceo - 0x003808c4, // n0x009a c0x0000 (---------------) + I cern - 0x0c64d1c2, // n0x009b c0x0031 (n0x05ac-n0x05ad) + I cf - 0x0034f083, // n0x009c c0x0000 (---------------) + I cfa - 0x0024d1c3, // n0x009d c0x0000 (---------------) + I cfd - 0x0021aa82, // n0x009e c0x0000 (---------------) + I cg - 0x0ca02ac2, // n0x009f c0x0032 (n0x05ad-n0x05ae) + I ch - 0x002af1c7, // n0x00a0 c0x0000 (---------------) + I channel - 0x0020f104, // n0x00a1 c0x0000 (---------------) + I chat - 0x0024c945, // n0x00a2 c0x0000 (---------------) + I cheap - 0x0029c845, // n0x00a3 c0x0000 (---------------) + I chloe - 0x0037fc89, // n0x00a4 c0x0000 (---------------) + I christmas - 0x00384946, // n0x00a5 c0x0000 (---------------) + I chrome - 0x00313846, // n0x00a6 c0x0000 (---------------) + I church - 0x0ce20182, // n0x00a7 c0x0033 (n0x05ae-n0x05bd) + I ci - 0x002629c6, // n0x00a8 c0x0000 (---------------) + I circle - 0x00320e85, // n0x00a9 c0x0000 (---------------) + I citic - 0x003242c4, // n0x00aa c0x0000 (---------------) + I city - 0x003242c8, // n0x00ab c0x0000 (---------------) + I cityeats - 0x0d206182, // n0x00ac c0x0034 (n0x05bd-n0x05be)* o I ck - 0x0d600402, // n0x00ad c0x0035 (n0x05be-n0x05c2) + I cl - 0x00351006, // n0x00ae c0x0000 (---------------) + I claims - 0x002c4108, // n0x00af c0x0000 (---------------) + I cleaning - 0x00351585, // n0x00b0 c0x0000 (---------------) + I click - 0x003590c6, // n0x00b1 c0x0000 (---------------) + I clinic - 0x0037c5c8, // n0x00b2 c0x0000 (---------------) + I clothing - 0x0034cac4, // n0x00b3 c0x0000 (---------------) + I club - 0x0daa4442, // n0x00b4 c0x0036 (n0x05c2-n0x05c6) + I cm - 0x0de2fe42, // n0x00b5 c0x0037 (n0x05c6-n0x05f3) + I cn - 0x0ea00882, // n0x00b6 c0x003a (n0x05f5-n0x0602) + I co - 0x0026e4c5, // n0x00b7 c0x0000 (---------------) + I coach - 0x0027b385, // n0x00b8 c0x0000 (---------------) + I codes - 0x00273e86, // n0x00b9 c0x0000 (---------------) + I coffee - 0x00230807, // n0x00ba c0x0000 (---------------) + I college - 0x00230b07, // n0x00bb c0x0000 (---------------) + I cologne - 0x0ee32dc3, // n0x00bc c0x003b (n0x0602-n0x06c7) + I com - 0x002ca648, // n0x00bd c0x0000 (---------------) + I commbank - 0x00232dc9, // n0x00be c0x0000 (---------------) + I community - 0x00233487, // n0x00bf c0x0000 (---------------) + I company - 0x00234a08, // n0x00c0 c0x0000 (---------------) + I computer - 0x00235206, // n0x00c1 c0x0000 (---------------) + I condos - 0x00235b0c, // n0x00c2 c0x0000 (---------------) + I construction - 0x00236b4a, // n0x00c3 c0x0000 (---------------) + I consulting - 0x0023878b, // n0x00c4 c0x0000 (---------------) + I contractors - 0x00239887, // n0x00c5 c0x0000 (---------------) + I cooking - 0x00239ec4, // n0x00c6 c0x0000 (---------------) + I cool - 0x0023a884, // n0x00c7 c0x0000 (---------------) + I coop - 0x0023bc07, // n0x00c8 c0x0000 (---------------) + I corsica - 0x0025d547, // n0x00c9 c0x0000 (---------------) + I country - 0x0023ebc7, // n0x00ca c0x0000 (---------------) + I courses - 0x0fe0b542, // n0x00cb c0x003f (n0x06e9-n0x06f0) + I cr - 0x0023fb86, // n0x00cc c0x0000 (---------------) + I credit - 0x0023fb8a, // n0x00cd c0x0000 (---------------) + I creditcard - 0x00240f07, // n0x00ce c0x0000 (---------------) + I cricket - 0x002426c5, // n0x00cf c0x0000 (---------------) + I crown - 0x00242803, // n0x00d0 c0x0000 (---------------) + I crs - 0x002431c7, // n0x00d1 c0x0000 (---------------) + I cruises - 0x0033ff43, // n0x00d2 c0x0000 (---------------) + I csc - 0x10243502, // n0x00d3 c0x0040 (n0x06f0-n0x06f6) + I cu - 0x0024350a, // n0x00d4 c0x0000 (---------------) + I cuisinella - 0x10733d82, // n0x00d5 c0x0041 (n0x06f6-n0x06f7) + I cv - 0x10b4c782, // n0x00d6 c0x0042 (n0x06f7-n0x06fb) + I cw - 0x10e45302, // n0x00d7 c0x0043 (n0x06fb-n0x06fd) + I cx - 0x0160a6c2, // n0x00d8 c0x0005 (---------------)* o I cy - 0x002c5c05, // n0x00d9 c0x0000 (---------------) + I cymru - 0x11202882, // n0x00da c0x0044 (n0x06fd-n0x06fe) + I cz - 0x00229545, // n0x00db c0x0000 (---------------) + I dabur - 0x0020db83, // n0x00dc c0x0000 (---------------) + I dad - 0x0030ac05, // n0x00dd c0x0000 (---------------) + I dance - 0x00218204, // n0x00de c0x0000 (---------------) + I date - 0x002113c6, // n0x00df c0x0000 (---------------) + I dating - 0x00202c06, // n0x00e0 c0x0000 (---------------) + I datsun - 0x00282583, // n0x00e1 c0x0000 (---------------) + I day - 0x002003c4, // n0x00e2 c0x0000 (---------------) + I dclk - 0x11607cc2, // n0x00e3 c0x0045 (n0x06fe-n0x0706) + I de - 0x00381205, // n0x00e4 c0x0000 (---------------) + I deals - 0x0025f546, // n0x00e5 c0x0000 (---------------) + I degree - 0x002734c8, // n0x00e6 c0x0000 (---------------) + I delivery - 0x00231b44, // n0x00e7 c0x0000 (---------------) + I dell - 0x0037a0c8, // n0x00e8 c0x0000 (---------------) + I democrat - 0x0033ae86, // n0x00e9 c0x0000 (---------------) + I dental - 0x00266107, // n0x00ea c0x0000 (---------------) + I dentist - 0x00237ec4, // n0x00eb c0x0000 (---------------) + I desi - 0x00237ec6, // n0x00ec c0x0000 (---------------) + I design - 0x0025e703, // n0x00ed c0x0000 (---------------) + I dev - 0x002b5b88, // n0x00ee c0x0000 (---------------) + I diamonds - 0x00300344, // n0x00ef c0x0000 (---------------) + I diet - 0x0036c3c7, // n0x00f0 c0x0000 (---------------) + I digital - 0x0032e446, // n0x00f1 c0x0000 (---------------) + I direct - 0x0032e449, // n0x00f2 c0x0000 (---------------) + I directory - 0x00322488, // n0x00f3 c0x0000 (---------------) + I discount - 0x0022ee82, // n0x00f4 c0x0000 (---------------) + I dj - 0x11aa46c2, // n0x00f5 c0x0046 (n0x0706-n0x0707) + I dk - 0x11e22502, // n0x00f6 c0x0047 (n0x0707-n0x070c) + I dm - 0x0035f243, // n0x00f7 c0x0000 (---------------) + I dnp - 0x12209082, // n0x00f8 c0x0048 (n0x070c-n0x0716) + I do - 0x0033fec4, // n0x00f9 c0x0000 (---------------) + I docs - 0x002288c3, // n0x00fa c0x0000 (---------------) + I dog - 0x00235fc4, // n0x00fb c0x0000 (---------------) + I doha - 0x002fdd87, // n0x00fc c0x0000 (---------------) + I domains - 0x002ccb46, // n0x00fd c0x0000 (---------------) + I doosan - 0x00369948, // n0x00fe c0x0000 (---------------) + I download - 0x002e0d46, // n0x00ff c0x0000 (---------------) + I durban - 0x00309cc4, // n0x0100 c0x0000 (---------------) + I dvag - 0x1260fa42, // n0x0101 c0x0049 (n0x0716-n0x071e) + I dz - 0x002412c5, // n0x0102 c0x0000 (---------------) + I earth - 0x002f5f43, // n0x0103 c0x0000 (---------------) + I eat - 0x12a02082, // n0x0104 c0x004a (n0x071e-n0x072a) + I ec - 0x0022f445, // n0x0105 c0x0000 (---------------) + I edeka - 0x0021e083, // n0x0106 c0x0000 (---------------) + I edu - 0x0021e089, // n0x0107 c0x0000 (---------------) + I education - 0x12e00642, // n0x0108 c0x004b (n0x072a-n0x0734) + I ee - 0x13205d82, // n0x0109 c0x004c (n0x0734-n0x073d) + I eg - 0x002e1d05, // n0x010a c0x0000 (---------------) + I email - 0x0036ccc6, // n0x010b c0x0000 (---------------) + I emerck - 0x002bf046, // n0x010c c0x0000 (---------------) + I energy - 0x0030dc48, // n0x010d c0x0000 (---------------) + I engineer - 0x0030dc4b, // n0x010e c0x0000 (---------------) + I engineering - 0x002ec28b, // n0x010f c0x0000 (---------------) + I enterprises - 0x00218b85, // n0x0110 c0x0000 (---------------) + I epson - 0x002126c9, // n0x0111 c0x0000 (---------------) + I equipment - 0x01600682, // n0x0112 c0x0005 (---------------)* o I er - 0x0020ce04, // n0x0113 c0x0000 (---------------) + I erni - 0x13601e42, // n0x0114 c0x004d (n0x073d-n0x0742) + I es - 0x00364fc3, // n0x0115 c0x0000 (---------------) + I esq - 0x0025d706, // n0x0116 c0x0000 (---------------) + I estate - 0x13e02e82, // n0x0117 c0x004f (n0x0743-n0x074a) + I et - 0x0021b882, // n0x0118 c0x0000 (---------------) + I eu - 0x00319f4a, // n0x0119 c0x0000 (---------------) + I eurovision - 0x002bbd83, // n0x011a c0x0000 (---------------) + I eus - 0x0020d306, // n0x011b c0x0000 (---------------) + I events - 0x0021c748, // n0x011c c0x0000 (---------------) + I everbank - 0x002ed008, // n0x011d c0x0000 (---------------) + I exchange - 0x00319806, // n0x011e c0x0000 (---------------) + I expert - 0x00305347, // n0x011f c0x0000 (---------------) + I exposed - 0x0034f0c4, // n0x0120 c0x0000 (---------------) + I fage - 0x00320744, // n0x0121 c0x0000 (---------------) + I fail - 0x00331989, // n0x0122 c0x0000 (---------------) + I fairwinds - 0x00358745, // n0x0123 c0x0000 (---------------) + I faith - 0x0021a1c3, // n0x0124 c0x0000 (---------------) + I fan - 0x002ea804, // n0x0125 c0x0000 (---------------) + I fans - 0x002125c4, // n0x0126 c0x0000 (---------------) + I farm - 0x002c9c47, // n0x0127 c0x0000 (---------------) + I fashion - 0x002d0e44, // n0x0128 c0x0000 (---------------) + I fast - 0x00273f48, // n0x0129 c0x0000 (---------------) + I feedback - 0x00245847, // n0x012a c0x0000 (---------------) + I ferrero - 0x14206f02, // n0x012b c0x0050 (n0x074a-n0x074d) + I fi - 0x00248145, // n0x012c c0x0000 (---------------) + I final - 0x002483c7, // n0x012d c0x0000 (---------------) + I finance - 0x00244909, // n0x012e c0x0000 (---------------) + I financial - 0x00249ac9, // n0x012f c0x0000 (---------------) + I firestone - 0x0024a308, // n0x0130 c0x0000 (---------------) + I firmdale - 0x0024ac04, // n0x0131 c0x0000 (---------------) + I fish - 0x0024ac07, // n0x0132 c0x0000 (---------------) + I fishing - 0x0024b083, // n0x0133 c0x0000 (---------------) + I fit - 0x0024b207, // n0x0134 c0x0000 (---------------) + I fitness - 0x01613802, // n0x0135 c0x0005 (---------------)* o I fj - 0x016a1842, // n0x0136 c0x0005 (---------------)* o I fk - 0x0024cd47, // n0x0137 c0x0000 (---------------) + I flights - 0x0024e1c7, // n0x0138 c0x0000 (---------------) + I florist - 0x0024ed07, // n0x0139 c0x0000 (---------------) + I flowers - 0x0024f088, // n0x013a c0x0000 (---------------) + I flsmidth - 0x0024f743, // n0x013b c0x0000 (---------------) + I fly - 0x00257f82, // n0x013c c0x0000 (---------------) + I fm - 0x00208ac2, // n0x013d c0x0000 (---------------) + I fo - 0x00250343, // n0x013e c0x0000 (---------------) + I foo - 0x00250348, // n0x013f c0x0000 (---------------) + I football - 0x00381144, // n0x0140 c0x0000 (---------------) + I ford - 0x002527c5, // n0x0141 c0x0000 (---------------) + I forex - 0x00253887, // n0x0142 c0x0000 (---------------) + I forsale - 0x002d994a, // n0x0143 c0x0000 (---------------) + I foundation - 0x1462fc02, // n0x0144 c0x0051 (n0x074d-n0x0765) + I fr - 0x0025ba03, // n0x0145 c0x0000 (---------------) + I frl - 0x0025bac7, // n0x0146 c0x0000 (---------------) + I frogans - 0x0027bdc4, // n0x0147 c0x0000 (---------------) + I fund - 0x0027da49, // n0x0148 c0x0000 (---------------) + I furniture - 0x00280e06, // n0x0149 c0x0000 (---------------) + I futbol - 0x00200fc2, // n0x014a c0x0000 (---------------) + I ga - 0x00237383, // n0x014b c0x0000 (---------------) + I gal - 0x00237387, // n0x014c c0x0000 (---------------) + I gallery - 0x002196c6, // n0x014d c0x0000 (---------------) + I garden - 0x0020cd02, // n0x014e c0x0000 (---------------) + I gb - 0x0035acc4, // n0x014f c0x0000 (---------------) + I gbiz - 0x00218cc2, // n0x0150 c0x0000 (---------------) + I gd - 0x002dd243, // n0x0151 c0x0000 (---------------) + I gdn - 0x14a029c2, // n0x0152 c0x0052 (n0x0765-n0x076c) + I ge - 0x00250803, // n0x0153 c0x0000 (---------------) + I gea - 0x00307a04, // n0x0154 c0x0000 (---------------) + I gent - 0x00282142, // n0x0155 c0x0000 (---------------) + I gf - 0x14e08b42, // n0x0156 c0x0053 (n0x076c-n0x076f) + I gg - 0x0032e684, // n0x0157 c0x0000 (---------------) + I ggee - 0x1520db02, // n0x0158 c0x0054 (n0x076f-n0x0774) + I gh - 0x15608b82, // n0x0159 c0x0055 (n0x0774-n0x077a) + I gi - 0x0034d5c4, // n0x015a c0x0000 (---------------) + I gift - 0x0034d5c5, // n0x015b c0x0000 (---------------) + I gifts - 0x002be445, // n0x015c c0x0000 (---------------) + I gives - 0x00219b06, // n0x015d c0x0000 (---------------) + I giving - 0x002079c2, // n0x015e c0x0000 (---------------) + I gl - 0x0036f445, // n0x015f c0x0000 (---------------) + I glass - 0x00224c83, // n0x0160 c0x0000 (---------------) + I gle - 0x0020af86, // n0x0161 c0x0000 (---------------) + I global - 0x0020ff85, // n0x0162 c0x0000 (---------------) + I globo - 0x00219c42, // n0x0163 c0x0000 (---------------) + I gm - 0x00219c45, // n0x0164 c0x0000 (---------------) + I gmail - 0x00226f83, // n0x0165 c0x0000 (---------------) + I gmo - 0x0022cd43, // n0x0166 c0x0000 (---------------) + I gmx - 0x15a0c982, // n0x0167 c0x0056 (n0x077a-n0x0780) + I gn - 0x00218749, // n0x0168 c0x0000 (---------------) + I goldpoint - 0x0024ad84, // n0x0169 c0x0000 (---------------) + I golf - 0x0027b203, // n0x016a c0x0000 (---------------) + I goo - 0x0027b204, // n0x016b c0x0000 (---------------) + I goog - 0x0027b206, // n0x016c c0x0000 (---------------) + I google - 0x00297e83, // n0x016d c0x0000 (---------------) + I gop - 0x00252ac3, // n0x016e c0x0000 (---------------) + I got - 0x00209ac3, // n0x016f c0x0000 (---------------) + I gov - 0x15ed2d02, // n0x0170 c0x0057 (n0x0780-n0x0786) + I gp - 0x0022d4c2, // n0x0171 c0x0000 (---------------) + I gq - 0x16206b02, // n0x0172 c0x0058 (n0x0786-n0x078c) + I gr - 0x00376d88, // n0x0173 c0x0000 (---------------) + I graphics - 0x0035bd86, // n0x0174 c0x0000 (---------------) + I gratis - 0x00252ec5, // n0x0175 c0x0000 (---------------) + I green - 0x00351a05, // n0x0176 c0x0000 (---------------) + I gripe - 0x00226905, // n0x0177 c0x0000 (---------------) + I group - 0x00209602, // n0x0178 c0x0000 (---------------) + I gs - 0x166002c2, // n0x0179 c0x0059 (n0x078c-n0x0793) + I gt - 0x01602642, // n0x017a c0x0005 (---------------)* o I gu - 0x00262905, // n0x017b c0x0000 (---------------) + I gucci - 0x002ce404, // n0x017c c0x0000 (---------------) + I guge - 0x00242d05, // n0x017d c0x0000 (---------------) + I guide - 0x00246107, // n0x017e c0x0000 (---------------) + I guitars - 0x00261244, // n0x017f c0x0000 (---------------) + I guru - 0x002252c2, // n0x0180 c0x0000 (---------------) + I gw - 0x16a04f82, // n0x0181 c0x005a (n0x0793-n0x0796) + I gy - 0x0036f2c7, // n0x0182 c0x0000 (---------------) + I hamburg - 0x0037e007, // n0x0183 c0x0000 (---------------) + I hangout - 0x00362f84, // n0x0184 c0x0000 (---------------) + I haus - 0x00241cca, // n0x0185 c0x0000 (---------------) + I healthcare - 0x002cbd84, // n0x0186 c0x0000 (---------------) + I help - 0x00241484, // n0x0187 c0x0000 (---------------) + I here - 0x002a7806, // n0x0188 c0x0000 (---------------) + I hermes - 0x00347706, // n0x0189 c0x0000 (---------------) + I hiphop - 0x00276b07, // n0x018a c0x0000 (---------------) + I hitachi - 0x00262843, // n0x018b c0x0000 (---------------) + I hiv - 0x16e301c2, // n0x018c c0x005b (n0x0796-n0x07ae) + I hk - 0x0024c142, // n0x018d c0x0000 (---------------) + I hm - 0x17206802, // n0x018e c0x005c (n0x07ae-n0x07b4) + I hn - 0x0033b8c8, // n0x018f c0x0000 (---------------) + I holdings - 0x00299247, // n0x0190 c0x0000 (---------------) + I holiday - 0x002998c5, // n0x0191 c0x0000 (---------------) + I homes - 0x0029a305, // n0x0192 c0x0000 (---------------) + I honda - 0x0029cbc5, // n0x0193 c0x0000 (---------------) + I horse - 0x00208344, // n0x0194 c0x0000 (---------------) + I host - 0x0028cdc7, // n0x0195 c0x0000 (---------------) + I hosting - 0x0029ea07, // n0x0196 c0x0000 (---------------) + I hotmail - 0x0022ca85, // n0x0197 c0x0000 (---------------) + I house - 0x002cfd43, // n0x0198 c0x0000 (---------------) + I how - 0x17636902, // n0x0199 c0x005d (n0x07b4-n0x07b8) + I hr - 0x002144c4, // n0x019a c0x0000 (---------------) + I hsbc - 0x17a496c2, // n0x019b c0x005e (n0x07b8-n0x07c9) + I ht - 0x17e045c2, // n0x019c c0x005f (n0x07c9-n0x07e9) + I hu - 0x002da443, // n0x019d c0x0000 (---------------) + I ibm - 0x00207ac3, // n0x019e c0x0000 (---------------) + I ice - 0x18205942, // n0x019f c0x0060 (n0x07e9-n0x07f4) + I id - 0x18600042, // n0x01a0 c0x0061 (n0x07f4-n0x07f6) + I ie - 0x00257f43, // n0x01a1 c0x0000 (---------------) + I ifm - 0x00363745, // n0x01a2 c0x0000 (---------------) + I iinet - 0x18a00d82, // n0x01a3 c0x0062 (n0x07f6-n0x07f7)* o I il - 0x192051c2, // n0x01a4 c0x0064 (n0x07f8-n0x07ff) + I im - 0x002ed984, // n0x01a5 c0x0000 (---------------) + I immo - 0x002ed98a, // n0x01a6 c0x0000 (---------------) + I immobilien - 0x19a00242, // n0x01a7 c0x0066 (n0x0801-n0x080e) + I in - 0x0038360a, // n0x01a8 c0x0000 (---------------) + I industries - 0x00206e88, // n0x01a9 c0x0000 (---------------) + I infiniti - 0x19e08a44, // n0x01aa c0x0067 (n0x080e-n0x0818) + I info - 0x00200243, // n0x01ab c0x0000 (---------------) + I ing - 0x0020ec03, // n0x01ac c0x0000 (---------------) + I ink - 0x003111c9, // n0x01ad c0x0000 (---------------) + I institute - 0x0020e7c6, // n0x01ae c0x0000 (---------------) + I insure - 0x1a2188c3, // n0x01af c0x0068 (n0x0818-n0x0819) + I int - 0x002f02cd, // n0x01b0 c0x0000 (---------------) + I international - 0x0021560b, // n0x01b1 c0x0000 (---------------) + I investments - 0x1a601542, // n0x01b2 c0x0069 (n0x0819-n0x081c) + I io - 0x00341348, // n0x01b3 c0x0000 (---------------) + I ipiranga - 0x1aa06c42, // n0x01b4 c0x006a (n0x081c-n0x0822) + I iq - 0x1ae07042, // n0x01b5 c0x006b (n0x0822-n0x082b) + I ir - 0x002cb045, // n0x01b6 c0x0000 (---------------) + I irish - 0x1b2066c2, // n0x01b7 c0x006c (n0x082b-n0x0832) + I is - 0x002066c3, // n0x01b8 c0x0000 (---------------) + I ist - 0x002148c8, // n0x01b9 c0x0000 (---------------) + I istanbul - 0x1b6014c2, // n0x01ba c0x006d (n0x0832-n0x09a3) + I it - 0x00212204, // n0x01bb c0x0000 (---------------) + I itau - 0x002a0d03, // n0x01bc c0x0000 (---------------) + I iwc - 0x00229dc6, // n0x01bd c0x0000 (---------------) + I jaguar - 0x00333644, // n0x01be c0x0000 (---------------) + I java - 0x0025cf83, // n0x01bf c0x0000 (---------------) + I jcb - 0x1ba0c582, // n0x01c0 c0x006e (n0x09a3-n0x09a6) + I je - 0x0032fcc5, // n0x01c1 c0x0000 (---------------) + I jetzt - 0x002a1cc3, // n0x01c2 c0x0000 (---------------) + I jlc - 0x01770dc2, // n0x01c3 c0x0005 (---------------)* o I jm - 0x1be02e02, // n0x01c4 c0x006f (n0x09a6-n0x09ae) + I jo - 0x0027e604, // n0x01c5 c0x0000 (---------------) + I jobs - 0x00207886, // n0x01c6 c0x0000 (---------------) + I joburg - 0x00324543, // n0x01c7 c0x0000 (---------------) + I jot - 0x002a2383, // n0x01c8 c0x0000 (---------------) + I joy - 0x1c2a2b02, // n0x01c9 c0x0070 (n0x09ae-n0x0a1d) + I jp - 0x002a2bc4, // n0x01ca c0x0000 (---------------) + I jprs - 0x002d1e86, // n0x01cb c0x0000 (---------------) + I juegos - 0x00223286, // n0x01cc c0x0000 (---------------) + I kaufen - 0x00359e04, // n0x01cd c0x0000 (---------------) + I kddi - 0x01601e02, // n0x01ce c0x0005 (---------------)* o I ke - 0x003796c3, // n0x01cf c0x0000 (---------------) + I kfh - 0x29f797c2, // n0x01d0 c0x00a7 (n0x10b1-n0x10b7) + I kg - 0x016230c2, // n0x01d1 c0x0005 (---------------)* o I kh - 0x2a205482, // n0x01d2 c0x00a8 (n0x10b7-n0x10be) + I ki - 0x002d2083, // n0x01d3 c0x0000 (---------------) + I kim - 0x00385086, // n0x01d4 c0x0000 (---------------) + I kinder - 0x002c9407, // n0x01d5 c0x0000 (---------------) + I kitchen - 0x00382c84, // n0x01d6 c0x0000 (---------------) + I kiwi - 0x2a686642, // n0x01d7 c0x00a9 (n0x10be-n0x10cf) + I km - 0x2aa33fc2, // n0x01d8 c0x00aa (n0x10cf-n0x10d3) + I kn - 0x0024db85, // n0x01d9 c0x0000 (---------------) + I koeln - 0x2ae2b602, // n0x01da c0x00ab (n0x10d3-n0x10d9) + I kp - 0x2b20c642, // n0x01db c0x00ac (n0x10d9-n0x10f7) + I kr - 0x0032f143, // n0x01dc c0x0000 (---------------) + I krd - 0x002a4b04, // n0x01dd c0x0000 (---------------) + I kred - 0x016b1bc2, // n0x01de c0x0005 (---------------)* o I kw - 0x2b611002, // n0x01df c0x00ad (n0x10f7-n0x10fc) + I ky - 0x00262fc5, // n0x01e0 c0x0000 (---------------) + I kyoto - 0x2bb713c2, // n0x01e1 c0x00ae (n0x10fc-n0x1102) + I kz - 0x2be000c2, // n0x01e2 c0x00af (n0x1102-n0x110b) + I la - 0x0031fb47, // n0x01e3 c0x0000 (---------------) + I lacaixa - 0x00215d44, // n0x01e4 c0x0000 (---------------) + I land - 0x00365949, // n0x01e5 c0x0000 (---------------) + I landrover - 0x0020b0c3, // n0x01e6 c0x0000 (---------------) + I lat - 0x00215907, // n0x01e7 c0x0000 (---------------) + I latrobe - 0x00279cc6, // n0x01e8 c0x0000 (---------------) + I lawyer - 0x2c207682, // n0x01e9 c0x00b0 (n0x110b-n0x1110) + I lb - 0x2c61e302, // n0x01ea c0x00b1 (n0x1110-n0x1116) + I lc - 0x0022be83, // n0x01eb c0x0000 (---------------) + I lds - 0x002e6085, // n0x01ec c0x0000 (---------------) + I lease - 0x0024a487, // n0x01ed c0x0000 (---------------) + I leclerc - 0x002539c5, // n0x01ee c0x0000 (---------------) + I legal - 0x0031b204, // n0x01ef c0x0000 (---------------) + I lgbt - 0x00206682, // n0x01f0 c0x0000 (---------------) + I li - 0x0032d987, // n0x01f1 c0x0000 (---------------) + I liaison - 0x002b0704, // n0x01f2 c0x0000 (---------------) + I lidl - 0x00276fc4, // n0x01f3 c0x0000 (---------------) + I life - 0x00276fc9, // n0x01f4 c0x0000 (---------------) + I lifestyle - 0x00249608, // n0x01f5 c0x0000 (---------------) + I lighting - 0x002222c4, // n0x01f6 c0x0000 (---------------) + I like - 0x00360047, // n0x01f7 c0x0000 (---------------) + I limited - 0x002da104, // n0x01f8 c0x0000 (---------------) + I limo - 0x00376747, // n0x01f9 c0x0000 (---------------) + I lincoln - 0x002fce05, // n0x01fa c0x0000 (---------------) + I linde - 0x003631c4, // n0x01fb c0x0000 (---------------) + I link - 0x0025a6c4, // n0x01fc c0x0000 (---------------) + I live - 0x2ca00442, // n0x01fd c0x00b2 (n0x1116-n0x1124) + I lk - 0x00245c84, // n0x01fe c0x0000 (---------------) + I loan - 0x00245c85, // n0x01ff c0x0000 (---------------) + I loans - 0x0021d286, // n0x0200 c0x0000 (---------------) + I london - 0x00220945, // n0x0201 c0x0000 (---------------) + I lotte - 0x00220f45, // n0x0202 c0x0000 (---------------) + I lotto - 0x2ce81742, // n0x0203 c0x00b3 (n0x1124-n0x1129) + I lr - 0x2d200dc2, // n0x0204 c0x00b4 (n0x1129-n0x112b) + I ls - 0x2d61a082, // n0x0205 c0x00b5 (n0x112b-n0x112c) + I lt - 0x00220e43, // n0x0206 c0x0000 (---------------) + I ltd - 0x00220e44, // n0x0207 c0x0000 (---------------) + I ltda - 0x00208042, // n0x0208 c0x0000 (---------------) + I lu - 0x00359cc5, // n0x0209 c0x0000 (---------------) + I lupin - 0x00242ac4, // n0x020a c0x0000 (---------------) + I luxe - 0x00244e86, // n0x020b c0x0000 (---------------) + I luxury - 0x2da18582, // n0x020c c0x00b6 (n0x112c-n0x1135) + I lv - 0x2de3a682, // n0x020d c0x00b7 (n0x1135-n0x113e) + I ly - 0x2e2011c2, // n0x020e c0x00b8 (n0x113e-n0x1144) + I ma - 0x00309b86, // n0x020f c0x0000 (---------------) + I madrid - 0x002fa184, // n0x0210 c0x0000 (---------------) + I maif - 0x002ee706, // n0x0211 c0x0000 (---------------) + I maison - 0x00211c83, // n0x0212 c0x0000 (---------------) + I man - 0x0028ff4a, // n0x0213 c0x0000 (---------------) + I management - 0x00264145, // n0x0214 c0x0000 (---------------) + I mango - 0x00226d86, // n0x0215 c0x0000 (---------------) + I market - 0x00226d89, // n0x0216 c0x0000 (---------------) + I marketing - 0x00252087, // n0x0217 c0x0000 (---------------) + I markets - 0x00288408, // n0x0218 c0x0000 (---------------) + I marriott - 0x2e60f0c2, // n0x0219 c0x00b9 (n0x1144-n0x1146) + I mc - 0x0024a3c2, // n0x021a c0x0000 (---------------) + I md - 0x2ea04342, // n0x021b c0x00ba (n0x1146-n0x114e) + I me - 0x0021e585, // n0x021c c0x0000 (---------------) + I media - 0x0028c584, // n0x021d c0x0000 (---------------) + I meet - 0x00242109, // n0x021e c0x0000 (---------------) + I melbourne - 0x0036cc84, // n0x021f c0x0000 (---------------) + I meme - 0x0035fe88, // n0x0220 c0x0000 (---------------) + I memorial - 0x0020fd04, // n0x0221 c0x0000 (---------------) + I menu - 0x0020f283, // n0x0222 c0x0000 (---------------) + I meo - 0x2ef39dc2, // n0x0223 c0x00bb (n0x114e-n0x1156) + I mg - 0x002e9dc2, // n0x0224 c0x0000 (---------------) + I mh - 0x0033e685, // n0x0225 c0x0000 (---------------) + I miami - 0x00264e49, // n0x0226 c0x0000 (---------------) + I microsoft - 0x00240443, // n0x0227 c0x0000 (---------------) + I mil - 0x002769c4, // n0x0228 c0x0000 (---------------) + I mini - 0x2f340582, // n0x0229 c0x00bc (n0x1156-n0x115d) + I mk - 0x2f614302, // n0x022a c0x00bd (n0x115d-n0x1164) + I ml - 0x0160fcc2, // n0x022b c0x0005 (---------------)* o I mm - 0x0034fa03, // n0x022c c0x0000 (---------------) + I mma - 0x2fa2c7c2, // n0x022d c0x00be (n0x1164-n0x1168) + I mn - 0x2fe05202, // n0x022e c0x00bf (n0x1168-n0x116d) + I mo - 0x00277f84, // n0x022f c0x0000 (---------------) + I mobi - 0x00353b86, // n0x0230 c0x0000 (---------------) + I mobily - 0x00226fc4, // n0x0231 c0x0000 (---------------) + I moda - 0x002d2283, // n0x0232 c0x0000 (---------------) + I moe - 0x00205203, // n0x0233 c0x0000 (---------------) + I moi - 0x0023fec6, // n0x0234 c0x0000 (---------------) + I monash - 0x002bac05, // n0x0235 c0x0000 (---------------) + I money - 0x00222549, // n0x0236 c0x0000 (---------------) + I montblanc - 0x002bab46, // n0x0237 c0x0000 (---------------) + I mormon - 0x002bb948, // n0x0238 c0x0000 (---------------) + I mortgage - 0x002bbb46, // n0x0239 c0x0000 (---------------) + I moscow - 0x0029088b, // n0x023a c0x0000 (---------------) + I motorcycles - 0x002bd483, // n0x023b c0x0000 (---------------) + I mov - 0x002bd488, // n0x023c c0x0000 (---------------) + I movistar - 0x00203302, // n0x023d c0x0000 (---------------) + I mp - 0x0031fe42, // n0x023e c0x0000 (---------------) + I mq - 0x30214202, // n0x023f c0x00c0 (n0x116d-n0x116f) + I mr - 0x3060e602, // n0x0240 c0x00c1 (n0x116f-n0x1174) + I ms - 0x30a66782, // n0x0241 c0x00c2 (n0x1174-n0x1178) + I mt - 0x00266783, // n0x0242 c0x0000 (---------------) + I mtn - 0x002bd784, // n0x0243 c0x0000 (---------------) + I mtpc - 0x30e03b02, // n0x0244 c0x00c3 (n0x1178-n0x117f) + I mu - 0x312c2a06, // n0x0245 c0x00c4 (n0x117f-n0x13a3) + I museum - 0x31604102, // n0x0246 c0x00c5 (n0x13a3-n0x13b1) + I mv - 0x31a12542, // n0x0247 c0x00c6 (n0x13b1-n0x13bc) + I mw - 0x31e2cd82, // n0x0248 c0x00c7 (n0x13bc-n0x13c2) + I mx - 0x32224782, // n0x0249 c0x00c8 (n0x13c2-n0x13c9) + I my - 0x326cbb02, // n0x024a c0x00c9 (n0x13c9-n0x13ca)* o I mz - 0x32a015c2, // n0x024b c0x00ca (n0x13ca-n0x13db) + I na - 0x002ecf45, // n0x024c c0x0000 (---------------) + I nadex - 0x0037bf46, // n0x024d c0x0000 (---------------) + I nagoya - 0x32e67944, // n0x024e c0x00cb (n0x13db-n0x13dd) + I name - 0x00204a04, // n0x024f c0x0000 (---------------) + I navy - 0x33a0a682, // n0x0250 c0x00ce (n0x13df-n0x13e0) + I nc - 0x00209e82, // n0x0251 c0x0000 (---------------) + I ne - 0x33e18643, // n0x0252 c0x00cf (n0x13e0-n0x140f) + I net - 0x00371247, // n0x0253 c0x0000 (---------------) + I netbank - 0x0030cd07, // n0x0254 c0x0000 (---------------) + I network - 0x002c0107, // n0x0255 c0x0000 (---------------) + I neustar - 0x00216703, // n0x0256 c0x0000 (---------------) + I new - 0x00234584, // n0x0257 c0x0000 (---------------) + I news - 0x00249c85, // n0x0258 c0x0000 (---------------) + I nexus - 0x34e06ec2, // n0x0259 c0x00d3 (n0x1416-n0x1420) + I nf - 0x35200282, // n0x025a c0x00d4 (n0x1420-n0x1429) + I ng - 0x0024ad43, // n0x025b c0x0000 (---------------) + I ngo - 0x00267203, // n0x025c c0x0000 (---------------) + I nhk - 0x01602382, // n0x025d c0x0005 (---------------)* o I ni - 0x00359184, // n0x025e c0x0000 (---------------) + I nico - 0x002bd205, // n0x025f c0x0000 (---------------) + I ninja - 0x00225906, // n0x0260 c0x0000 (---------------) + I nissan - 0x35648cc2, // n0x0261 c0x00d5 (n0x1429-n0x142c) + I nl - 0x35a00cc2, // n0x0262 c0x00d6 (n0x142c-n0x1702) + I no - 0x00305a06, // n0x0263 c0x0000 (---------------) + I norton - 0x00352f46, // n0x0264 c0x0000 (---------------) + I nowruz - 0x01612b02, // n0x0265 c0x0005 (---------------)* o I np - 0x3de1ccc2, // n0x0266 c0x00f7 (n0x172a-n0x1731) + I nr - 0x002f09c3, // n0x0267 c0x0000 (---------------) + I nra - 0x0022d2c3, // n0x0268 c0x0000 (---------------) + I nrw - 0x0034b343, // n0x0269 c0x0000 (---------------) + I ntt - 0x3e20fd82, // n0x026a c0x00f8 (n0x1731-n0x1734) + I nu - 0x0021af43, // n0x026b c0x0000 (---------------) + I nyc - 0x3e613002, // n0x026c c0x00f9 (n0x1734-n0x1744) + I nz - 0x00213483, // n0x026d c0x0000 (---------------) + I obi - 0x002130c7, // n0x026e c0x0000 (---------------) + I okinawa - 0x3ee087c2, // n0x026f c0x00fb (n0x1745-n0x174e) + I om - 0x002166c3, // n0x0270 c0x0000 (---------------) + I one - 0x00218c43, // n0x0271 c0x0000 (---------------) + I ong - 0x0030be03, // n0x0272 c0x0000 (---------------) + I onl - 0x00294bc3, // n0x0273 c0x0000 (---------------) + I ooo - 0x002c4046, // n0x0274 c0x0000 (---------------) + I oracle - 0x3f24d043, // n0x0275 c0x00fc (n0x174e-n0x1784) + I org - 0x0024d047, // n0x0276 c0x0000 (---------------) + I organic - 0x002905c5, // n0x0277 c0x0000 (---------------) + I osaka - 0x0024b686, // n0x0278 c0x0000 (---------------) + I otsuka - 0x00283083, // n0x0279 c0x0000 (---------------) + I ovh - 0x3fa00ac2, // n0x027a c0x00fe (n0x1786-n0x1791) + I pa - 0x0035f2c4, // n0x027b c0x0000 (---------------) + I page - 0x00243a87, // n0x027c c0x0000 (---------------) + I panerai - 0x00256585, // n0x027d c0x0000 (---------------) + I paris - 0x0028e704, // n0x027e c0x0000 (---------------) + I pars - 0x002acc08, // n0x027f c0x0000 (---------------) + I partners - 0x002b9345, // n0x0280 c0x0000 (---------------) + I parts - 0x002c7745, // n0x0281 c0x0000 (---------------) + I party - 0x3fe0c782, // n0x0282 c0x00ff (n0x1791-n0x1798) + I pe - 0x4034dec2, // n0x0283 c0x0100 (n0x1798-n0x179b) + I pf - 0x016a6f42, // n0x0284 c0x0005 (---------------)* o I pg - 0x40606bc2, // n0x0285 c0x0101 (n0x179b-n0x17a3) + I ph - 0x002c5a88, // n0x0286 c0x0000 (---------------) + I pharmacy - 0x002c6747, // n0x0287 c0x0000 (---------------) + I philips - 0x002a75c5, // n0x0288 c0x0000 (---------------) + I photo - 0x002c718b, // n0x0289 c0x0000 (---------------) + I photography - 0x002c30c6, // n0x028a c0x0000 (---------------) + I photos - 0x002c7386, // n0x028b c0x0000 (---------------) + I physio - 0x002c7506, // n0x028c c0x0000 (---------------) + I piaget - 0x00221f84, // n0x028d c0x0000 (---------------) + I pics - 0x002c80c6, // n0x028e c0x0000 (---------------) + I pictet - 0x002c8648, // n0x028f c0x0000 (---------------) + I pictures - 0x00246c03, // n0x0290 c0x0000 (---------------) + I pin - 0x00359d44, // n0x0291 c0x0000 (---------------) + I pink - 0x002c9e05, // n0x0292 c0x0000 (---------------) + I pizza - 0x40ac9f42, // n0x0293 c0x0102 (n0x17a3-n0x17b1) + I pk - 0x40e0a402, // n0x0294 c0x0103 (n0x17b1-n0x1856) + I pl - 0x0020a405, // n0x0295 c0x0000 (---------------) + I place - 0x002cc348, // n0x0296 c0x0000 (---------------) + I plumbing - 0x002127c2, // n0x0297 c0x0000 (---------------) + I pm - 0x416a2b42, // n0x0298 c0x0105 (n0x185f-n0x1864) + I pn - 0x002cd0c4, // n0x0299 c0x0000 (---------------) + I pohl - 0x002cd1c5, // n0x029a c0x0000 (---------------) + I poker - 0x002cdf44, // n0x029b c0x0000 (---------------) + I porn - 0x002ce7c4, // n0x029c c0x0000 (---------------) + I post - 0x41a2ad42, // n0x029d c0x0106 (n0x1864-n0x1871) + I pr - 0x0026a2c5, // n0x029e c0x0000 (---------------) + I praxi - 0x0022ad45, // n0x029f c0x0000 (---------------) + I press - 0x41ecfc43, // n0x02a0 c0x0107 (n0x1871-n0x1878) + I pro - 0x002cfec4, // n0x02a1 c0x0000 (---------------) + I prod - 0x002cfecb, // n0x02a2 c0x0000 (---------------) + I productions - 0x002d0d84, // n0x02a3 c0x0000 (---------------) + I prof - 0x002d1505, // n0x02a4 c0x0000 (---------------) + I promo - 0x002d284a, // n0x02a5 c0x0000 (---------------) + I properties - 0x002d2d48, // n0x02a6 c0x0000 (---------------) + I property - 0x42218402, // n0x02a7 c0x0108 (n0x1878-n0x187f) + I ps - 0x4269ab02, // n0x02a8 c0x0109 (n0x187f-n0x1888) + I pt - 0x0025dbc3, // n0x02a9 c0x0000 (---------------) + I pub - 0x42b76602, // n0x02aa c0x010a (n0x1888-n0x188e) + I pw - 0x42eb9882, // n0x02ab c0x010b (n0x188e-n0x1895) + I py - 0x43304ec2, // n0x02ac c0x010c (n0x1895-n0x189d) + I qa - 0x0023f144, // n0x02ad c0x0000 (---------------) + I qpon - 0x0021b2c6, // n0x02ae c0x0000 (---------------) + I quebec - 0x00367706, // n0x02af c0x0000 (---------------) + I racing - 0x43607082, // n0x02b0 c0x010d (n0x189d-n0x18a1) + I re - 0x002c6184, // n0x02b1 c0x0000 (---------------) + I read - 0x002ff907, // n0x02b2 c0x0000 (---------------) + I realtor - 0x0022a147, // n0x02b3 c0x0000 (---------------) + I recipes - 0x0023fbc3, // n0x02b4 c0x0000 (---------------) + I red - 0x002a4b48, // n0x02b5 c0x0000 (---------------) + I redstone - 0x0027dc05, // n0x02b6 c0x0000 (---------------) + I rehab - 0x0020e8c5, // n0x02b7 c0x0000 (---------------) + I reise - 0x0020e8c6, // n0x02b8 c0x0000 (---------------) + I reisen - 0x002aa044, // n0x02b9 c0x0000 (---------------) + I reit - 0x0020ae03, // n0x02ba c0x0000 (---------------) + I ren - 0x0029e644, // n0x02bb c0x0000 (---------------) + I rent - 0x002d9407, // n0x02bc c0x0000 (---------------) + I rentals - 0x00222886, // n0x02bd c0x0000 (---------------) + I repair - 0x002f0f86, // n0x02be c0x0000 (---------------) + I report - 0x002d7e4a, // n0x02bf c0x0000 (---------------) + I republican - 0x00247204, // n0x02c0 c0x0000 (---------------) + I rest - 0x0024720a, // n0x02c1 c0x0000 (---------------) + I restaurant - 0x00311dc6, // n0x02c2 c0x0000 (---------------) + I review - 0x00311dc7, // n0x02c3 c0x0000 (---------------) + I reviews - 0x00296ec4, // n0x02c4 c0x0000 (---------------) + I rich - 0x00203d05, // n0x02c5 c0x0000 (---------------) + I ricoh - 0x0020ef83, // n0x02c6 c0x0000 (---------------) + I rio - 0x00351a43, // n0x02c7 c0x0000 (---------------) + I rip - 0x43a00982, // n0x02c8 c0x010e (n0x18a1-n0x18ad) + I ro - 0x0037e5c6, // n0x02c9 c0x0000 (---------------) + I rocher - 0x002e0785, // n0x02ca c0x0000 (---------------) + I rocks - 0x002c4e05, // n0x02cb c0x0000 (---------------) + I rodeo - 0x00257344, // n0x02cc c0x0000 (---------------) + I room - 0x43e04642, // n0x02cd c0x010f (n0x18ad-n0x18b3) + I rs - 0x0024a9c4, // n0x02ce c0x0000 (---------------) + I rsvp - 0x44202f82, // n0x02cf c0x0110 (n0x18b3-n0x1937) + I ru - 0x0025df44, // n0x02d0 c0x0000 (---------------) + I ruhr - 0x4462d302, // n0x02d1 c0x0111 (n0x1937-n0x1940) + I rw - 0x00269846, // n0x02d2 c0x0000 (---------------) + I ryukyu - 0x44a03a82, // n0x02d3 c0x0112 (n0x1940-n0x1948) + I sa - 0x0036f048, // n0x02d4 c0x0000 (---------------) + I saarland - 0x00280544, // n0x02d5 c0x0000 (---------------) + I safe - 0x0020e0c6, // n0x02d6 c0x0000 (---------------) + I sakura - 0x00253944, // n0x02d7 c0x0000 (---------------) + I sale - 0x0021d205, // n0x02d8 c0x0000 (---------------) + I salon - 0x0022cbc7, // n0x02d9 c0x0000 (---------------) + I samsung - 0x002259c7, // n0x02da c0x0000 (---------------) + I sandvik - 0x002259cf, // n0x02db c0x0000 (---------------) + I sandvikcoromant - 0x00287906, // n0x02dc c0x0000 (---------------) + I sanofi - 0x00242883, // n0x02dd c0x0000 (---------------) + I sap - 0x00242884, // n0x02de c0x0000 (---------------) + I sapo - 0x0024cc44, // n0x02df c0x0000 (---------------) + I sarl - 0x00264c04, // n0x02e0 c0x0000 (---------------) + I saxo - 0x44e14502, // n0x02e1 c0x0113 (n0x1948-n0x194d) + I sb - 0x00266303, // n0x02e2 c0x0000 (---------------) + I sbs - 0x4521bcc2, // n0x02e3 c0x0114 (n0x194d-n0x1952) + I sc - 0x0022f803, // n0x02e4 c0x0000 (---------------) + I sca - 0x0033ff83, // n0x02e5 c0x0000 (---------------) + I scb - 0x0026c047, // n0x02e6 c0x0000 (---------------) + I schmidt - 0x00274d4c, // n0x02e7 c0x0000 (---------------) + I scholarships - 0x00275006, // n0x02e8 c0x0000 (---------------) + I school - 0x00278586, // n0x02e9 c0x0000 (---------------) + I schule - 0x002793c7, // n0x02ea c0x0000 (---------------) + I schwarz - 0x00234e47, // n0x02eb c0x0000 (---------------) + I science - 0x0021bcc4, // n0x02ec c0x0000 (---------------) + I scor - 0x00250c44, // n0x02ed c0x0000 (---------------) + I scot - 0x4560acc2, // n0x02ee c0x0115 (n0x1952-n0x195a) + I sd - 0x45a04982, // n0x02ef c0x0116 (n0x195a-n0x1983) + I se - 0x003095c4, // n0x02f0 c0x0000 (---------------) + I seat - 0x00269d04, // n0x02f1 c0x0000 (---------------) + I seek - 0x002bf005, // n0x02f2 c0x0000 (---------------) + I sener - 0x00290b08, // n0x02f3 c0x0000 (---------------) + I services - 0x0029a603, // n0x02f4 c0x0000 (---------------) + I sew - 0x0022ae43, // n0x02f5 c0x0000 (---------------) + I sex - 0x0022ae44, // n0x02f6 c0x0000 (---------------) + I sexy - 0x45e5f242, // n0x02f7 c0x0117 (n0x1983-n0x198a) + I sg - 0x462001c2, // n0x02f8 c0x0118 (n0x198a-n0x198f) + I sh - 0x002acb05, // n0x02f9 c0x0000 (---------------) + I sharp - 0x00215484, // n0x02fa c0x0000 (---------------) + I shia - 0x002b2487, // n0x02fb c0x0000 (---------------) + I shiksha - 0x003794c5, // n0x02fc c0x0000 (---------------) + I shoes - 0x002f8b07, // n0x02fd c0x0000 (---------------) + I shriram - 0x002058c2, // n0x02fe c0x0000 (---------------) + I si - 0x00224bc7, // n0x02ff c0x0000 (---------------) + I singles - 0x00229d82, // n0x0300 c0x0000 (---------------) + I sj - 0x46600e02, // n0x0301 c0x0119 (n0x198f-n0x1990) + I sk - 0x0022f6c3, // n0x0302 c0x0000 (---------------) + I sky - 0x0022f6c5, // n0x0303 c0x0000 (---------------) + I skype - 0x46a109c2, // n0x0304 c0x011a (n0x1990-n0x1995) + I sl - 0x0024f102, // n0x0305 c0x0000 (---------------) + I sm - 0x0033ba85, // n0x0306 c0x0000 (---------------) + I smile - 0x46e14782, // n0x0307 c0x011b (n0x1995-n0x199c) + I sn - 0x47209f02, // n0x0308 c0x011c (n0x199c-n0x199f) + I so - 0x002ae046, // n0x0309 c0x0000 (---------------) + I social - 0x00264f88, // n0x030a c0x0000 (---------------) + I software - 0x002ab284, // n0x030b c0x0000 (---------------) + I sohu - 0x00310945, // n0x030c c0x0000 (---------------) + I solar - 0x002d3cc9, // n0x030d c0x0000 (---------------) + I solutions - 0x002107c3, // n0x030e c0x0000 (---------------) + I soy - 0x002d91c5, // n0x030f c0x0000 (---------------) + I space - 0x002db647, // n0x0310 c0x0000 (---------------) + I spiegel - 0x002dbd0d, // n0x0311 c0x0000 (---------------) + I spreadbetting - 0x002dc282, // n0x0312 c0x0000 (---------------) + I sr - 0x47604682, // n0x0313 c0x011d (n0x199f-n0x19ab) + I st - 0x00346f05, // n0x0314 c0x0000 (---------------) + I stada - 0x002cd887, // n0x0315 c0x0000 (---------------) + I statoil - 0x00270103, // n0x0316 c0x0000 (---------------) + I stc - 0x00270108, // n0x0317 c0x0000 (---------------) + I stcgroup - 0x0036c009, // n0x0318 c0x0000 (---------------) + I stockholm - 0x002dcf05, // n0x0319 c0x0000 (---------------) + I study - 0x002770c5, // n0x031a c0x0000 (---------------) + I style - 0x00200502, // n0x031b c0x0000 (---------------) + I su - 0x002aec88, // n0x031c c0x0000 (---------------) + I supplies - 0x0028a146, // n0x031d c0x0000 (---------------) + I supply - 0x0022bc47, // n0x031e c0x0000 (---------------) + I support - 0x002d1744, // n0x031f c0x0000 (---------------) + I surf - 0x002bce87, // n0x0320 c0x0000 (---------------) + I surgery - 0x002dfe86, // n0x0321 c0x0000 (---------------) + I suzuki - 0x47a01e82, // n0x0322 c0x011e (n0x19ab-n0x19b0) + I sv - 0x002e3105, // n0x0323 c0x0000 (---------------) + I swiss - 0x47ee3902, // n0x0324 c0x011f (n0x19b0-n0x19b1) + I sx - 0x48218442, // n0x0325 c0x0120 (n0x19b1-n0x19b7) + I sy - 0x00266bc6, // n0x0326 c0x0000 (---------------) + I sydney - 0x002a3808, // n0x0327 c0x0000 (---------------) + I symantec - 0x0037e1c7, // n0x0328 c0x0000 (---------------) + I systems - 0x48601302, // n0x0329 c0x0121 (n0x19b7-n0x19ba) + I sz - 0x00232b83, // n0x032a c0x0000 (---------------) + I tab - 0x00233ac6, // n0x032b c0x0000 (---------------) + I taipei - 0x00210245, // n0x032c c0x0000 (---------------) + I tatar - 0x0021cd46, // n0x032d c0x0000 (---------------) + I tattoo - 0x00226b43, // n0x032e c0x0000 (---------------) + I tax - 0x00205ec2, // n0x032f c0x0000 (---------------) + I tc - 0x002ed903, // n0x0330 c0x0000 (---------------) + I tci - 0x48a08dc2, // n0x0331 c0x0122 (n0x19ba-n0x19bb) + I td - 0x002a394a, // n0x0332 c0x0000 (---------------) + I technology - 0x00218943, // n0x0333 c0x0000 (---------------) + I tel - 0x0029d88a, // n0x0334 c0x0000 (---------------) + I telefonica - 0x0023c987, // n0x0335 c0x0000 (---------------) + I temasek - 0x002eaf86, // n0x0336 c0x0000 (---------------) + I tennis - 0x00211842, // n0x0337 c0x0000 (---------------) + I tf - 0x002276c2, // n0x0338 c0x0000 (---------------) + I tg - 0x48e09382, // n0x0339 c0x0123 (n0x19bb-n0x19c2) + I th - 0x00229446, // n0x033a c0x0000 (---------------) + I tienda - 0x00385604, // n0x033b c0x0000 (---------------) + I tips - 0x00207005, // n0x033c c0x0000 (---------------) + I tires - 0x002a16c5, // n0x033d c0x0000 (---------------) + I tirol - 0x492046c2, // n0x033e c0x0124 (n0x19c2-n0x19d1) + I tj - 0x00204b42, // n0x033f c0x0000 (---------------) + I tk - 0x49616882, // n0x0340 c0x0125 (n0x19d1-n0x19d2) + I tl - 0x49a032c2, // n0x0341 c0x0126 (n0x19d2-n0x19da) + I tm - 0x49e01942, // n0x0342 c0x0127 (n0x19da-n0x19ee) + I tn - 0x4a200302, // n0x0343 c0x0128 (n0x19ee-n0x19f4) + I to - 0x003501c5, // n0x0344 c0x0000 (---------------) + I today - 0x002bc8c5, // n0x0345 c0x0000 (---------------) + I tokyo - 0x0021ce05, // n0x0346 c0x0000 (---------------) + I tools - 0x00297203, // n0x0347 c0x0000 (---------------) + I top - 0x00240dc5, // n0x0348 c0x0000 (---------------) + I toray - 0x002c3187, // n0x0349 c0x0000 (---------------) + I toshiba - 0x0023bb04, // n0x034a c0x0000 (---------------) + I town - 0x00257684, // n0x034b c0x0000 (---------------) + I toys - 0x0020df02, // n0x034c c0x0000 (---------------) + I tp - 0x4a600942, // n0x034d c0x0129 (n0x19f4-n0x1a09) + I tr - 0x00243045, // n0x034e c0x0000 (---------------) + I trade - 0x00298847, // n0x034f c0x0000 (---------------) + I trading - 0x0026ba48, // n0x0350 c0x0000 (---------------) + I training - 0x00290186, // n0x0351 c0x0000 (---------------) + I travel - 0x00313345, // n0x0352 c0x0000 (---------------) + I trust - 0x4b21cdc2, // n0x0353 c0x012c (n0x1a0b-n0x1a1c) + I tt - 0x002e3c03, // n0x0354 c0x0000 (---------------) + I tui - 0x002e5005, // n0x0355 c0x0000 (---------------) + I tushu - 0x4b68dc82, // n0x0356 c0x012d (n0x1a1c-n0x1a20) + I tv - 0x4ba548c2, // n0x0357 c0x012e (n0x1a20-n0x1a2e) + I tw - 0x4be3af02, // n0x0358 c0x012f (n0x1a2e-n0x1a3a) + I tz - 0x4c229e82, // n0x0359 c0x0130 (n0x1a3a-n0x1a88) + I ua - 0x00346003, // n0x035a c0x0000 (---------------) + I ubs - 0x4c601b02, // n0x035b c0x0131 (n0x1a88-n0x1a90) + I ug - 0x4ca00542, // n0x035c c0x0132 (n0x1a90-n0x1a9b) + I uk - 0x003826ca, // n0x035d c0x0000 (---------------) + I university - 0x00215b43, // n0x035e c0x0000 (---------------) + I uno - 0x00245c03, // n0x035f c0x0000 (---------------) + I uol - 0x4d6073c2, // n0x0360 c0x0135 (n0x1a9d-n0x1adc) + I us - 0x5ba04282, // n0x0361 c0x016e (n0x1b7f-n0x1b85) + I uy - 0x5be1eac2, // n0x0362 c0x016f (n0x1b85-n0x1b89) + I uz - 0x00203242, // n0x0363 c0x0000 (---------------) + I va - 0x0036ed89, // n0x0364 c0x0000 (---------------) + I vacations - 0x002b1004, // n0x0365 c0x0000 (---------------) + I vana - 0x5c2e6442, // n0x0366 c0x0170 (n0x1b89-n0x1b8f) + I vc - 0x5c603f02, // n0x0367 c0x0171 (n0x1b8f-n0x1ba0) + I ve - 0x002e7105, // n0x0368 c0x0000 (---------------) + I vegas - 0x00238bc8, // n0x0369 c0x0000 (---------------) + I ventures - 0x002e918c, // n0x036a c0x0000 (---------------) + I versicherung - 0x0023c8c3, // n0x036b c0x0000 (---------------) + I vet - 0x0025a082, // n0x036c c0x0000 (---------------) + I vg - 0x5ca01642, // n0x036d c0x0172 (n0x1ba0-n0x1ba5) + I vi - 0x002b8846, // n0x036e c0x0000 (---------------) + I viajes - 0x002ebe05, // n0x036f c0x0000 (---------------) + I video - 0x002ebf46, // n0x0370 c0x0000 (---------------) + I villas - 0x002efb86, // n0x0371 c0x0000 (---------------) + I virgin - 0x002a1046, // n0x0372 c0x0000 (---------------) + I vision - 0x002bd505, // n0x0373 c0x0000 (---------------) + I vista - 0x002f010a, // n0x0374 c0x0000 (---------------) + I vistaprint - 0x0024bd44, // n0x0375 c0x0000 (---------------) + I viva - 0x00326bca, // n0x0376 c0x0000 (---------------) + I vlaanderen - 0x5ce0d142, // n0x0377 c0x0173 (n0x1ba5-n0x1bb1) + I vn - 0x00360bc5, // n0x0378 c0x0000 (---------------) + I vodka - 0x002f3f44, // n0x0379 c0x0000 (---------------) + I vote - 0x002f4046, // n0x037a c0x0000 (---------------) + I voting - 0x002f41c4, // n0x037b c0x0000 (---------------) + I voto - 0x00361286, // n0x037c c0x0000 (---------------) + I voyage - 0x5d201882, // n0x037d c0x0174 (n0x1bb1-n0x1bb5) + I vu - 0x00236485, // n0x037e c0x0000 (---------------) + I wales - 0x00276386, // n0x037f c0x0000 (---------------) + I walter - 0x00252444, // n0x0380 c0x0000 (---------------) + I wang - 0x00252447, // n0x0381 c0x0000 (---------------) + I wanggou - 0x00205e45, // n0x0382 c0x0000 (---------------) + I watch - 0x00216346, // n0x0383 c0x0000 (---------------) + I webcam - 0x002071c7, // n0x0384 c0x0000 (---------------) + I website - 0x00245dc3, // n0x0385 c0x0000 (---------------) + I wed - 0x0035fb47, // n0x0386 c0x0000 (---------------) + I wedding - 0x00212582, // n0x0387 c0x0000 (---------------) + I wf - 0x0022e087, // n0x0388 c0x0000 (---------------) + I whoswho - 0x00382d04, // n0x0389 c0x0000 (---------------) + I wien - 0x0022d744, // n0x038a c0x0000 (---------------) + I wiki - 0x00359a4b, // n0x038b c0x0000 (---------------) + I williamhill - 0x0021e843, // n0x038c c0x0000 (---------------) + I win - 0x0027c987, // n0x038d c0x0000 (---------------) + I windows - 0x00273d43, // n0x038e c0x0000 (---------------) + I wme - 0x00226744, // n0x038f c0x0000 (---------------) + I work - 0x0022e545, // n0x0390 c0x0000 (---------------) + I works - 0x00306b45, // n0x0391 c0x0000 (---------------) + I world - 0x5d6012c2, // n0x0392 c0x0175 (n0x1bb5-n0x1bbc) + I ws - 0x002a43c3, // n0x0393 c0x0000 (---------------) + I wtc - 0x002a44c3, // n0x0394 c0x0000 (---------------) + I wtf - 0x0022cdc4, // n0x0395 c0x0000 (---------------) + I xbox - 0x0022ce85, // n0x0396 c0x0000 (---------------) + I xerox - 0x00226bc3, // n0x0397 c0x0000 (---------------) + I xin - 0x0027c44b, // n0x0398 c0x0000 (---------------) + I xn--1qqw23a - 0x0029a08a, // n0x0399 c0x0000 (---------------) + I xn--30rr7y - 0x002c6d8b, // n0x039a c0x0000 (---------------) + I xn--3bst00m - 0x002e394b, // n0x039b c0x0000 (---------------) + I xn--3ds443g - 0x0032a28c, // n0x039c c0x0000 (---------------) + I xn--3e0b707e - 0x0037fa0b, // n0x039d c0x0000 (---------------) + I xn--45brj9c - 0x0038470a, // n0x039e c0x0000 (---------------) + I xn--45q11c - 0x00385f0a, // n0x039f c0x0000 (---------------) + I xn--4gbrim - 0x002f588e, // n0x03a0 c0x0000 (---------------) + I xn--54b7fta0cc - 0x002f640b, // n0x03a1 c0x0000 (---------------) + I xn--55qw42g - 0x002f66ca, // n0x03a2 c0x0000 (---------------) + I xn--55qx5d - 0x002f77cb, // n0x03a3 c0x0000 (---------------) + I xn--6frz82g - 0x002f7d0e, // n0x03a4 c0x0000 (---------------) + I xn--6qq986b3xl - 0x002f884c, // n0x03a5 c0x0000 (---------------) + I xn--80adxhks - 0x002f908b, // n0x03a6 c0x0000 (---------------) + I xn--80ao21a - 0x002f934c, // n0x03a7 c0x0000 (---------------) + I xn--80asehdb - 0x002fd30a, // n0x03a8 c0x0000 (---------------) + I xn--80aswg - 0x5dafe78a, // n0x03a9 c0x0176 (n0x1bbc-n0x1bc2) + I xn--90a3ac - 0x0030060a, // n0x03aa c0x0000 (---------------) + I xn--9et52u - 0x00302d8e, // n0x03ab c0x0000 (---------------) + I xn--b4w605ferd - 0x0030d409, // n0x03ac c0x0000 (---------------) + I xn--c1avg - 0x0030e04a, // n0x03ad c0x0000 (---------------) + I xn--cg4bki - 0x0030eb56, // n0x03ae c0x0000 (---------------) + I xn--clchc0ea0b2g2a9gcd - 0x0030fecb, // n0x03af c0x0000 (---------------) + I xn--czr694b - 0x0031060a, // n0x03b0 c0x0000 (---------------) + I xn--czrs0t - 0x00310b8a, // n0x03b1 c0x0000 (---------------) + I xn--czru2d - 0x0031234b, // n0x03b2 c0x0000 (---------------) + I xn--d1acj3b - 0x003160cd, // n0x03b3 c0x0000 (---------------) + I xn--eckvdtc9d - 0x00316bcb, // n0x03b4 c0x0000 (---------------) + I xn--efvy88h - 0x0031898e, // n0x03b5 c0x0000 (---------------) + I xn--fiq228c5hs - 0x00318f4a, // n0x03b6 c0x0000 (---------------) + I xn--fiq64b - 0x0031abca, // n0x03b7 c0x0000 (---------------) + I xn--fiqs8s - 0x0031af8a, // n0x03b8 c0x0000 (---------------) + I xn--fiqz9s - 0x0031ba4b, // n0x03b9 c0x0000 (---------------) + I xn--fjq720a - 0x0031c28b, // n0x03ba c0x0000 (---------------) + I xn--flw351e - 0x0031c54d, // n0x03bb c0x0000 (---------------) + I xn--fpcrj9c3d - 0x0031d68d, // n0x03bc c0x0000 (---------------) + I xn--fzc2c9e2c - 0x0031dbcb, // n0x03bd c0x0000 (---------------) + I xn--gecrj9c - 0x00320c0b, // n0x03be c0x0000 (---------------) + I xn--h2brj9c - 0x0032560b, // n0x03bf c0x0000 (---------------) + I xn--hxt814e - 0x0032608f, // n0x03c0 c0x0000 (---------------) + I xn--i1b6b1a6a2e - 0x0032644b, // n0x03c1 c0x0000 (---------------) + I xn--imr513n - 0x00326e4a, // n0x03c2 c0x0000 (---------------) + I xn--io0a7i - 0x00327389, // n0x03c3 c0x0000 (---------------) + I xn--j1amh - 0x0032774b, // n0x03c4 c0x0000 (---------------) + I xn--j6w193g - 0x00328e4f, // n0x03c5 c0x0000 (---------------) + I xn--kcrx77d1x4a - 0x0032b4cb, // n0x03c6 c0x0000 (---------------) + I xn--kprw13d - 0x0032b78b, // n0x03c7 c0x0000 (---------------) + I xn--kpry57d - 0x0032ba4a, // n0x03c8 c0x0000 (---------------) + I xn--kput3i - 0x00332049, // n0x03c9 c0x0000 (---------------) + I xn--l1acc - 0x0033600f, // n0x03ca c0x0000 (---------------) + I xn--lgbbat1ad8j - 0x00339ccc, // n0x03cb c0x0000 (---------------) + I xn--mgb2ddes - 0x0033a04c, // n0x03cc c0x0000 (---------------) + I xn--mgb9awbf - 0x0033a6ce, // n0x03cd c0x0000 (---------------) + I xn--mgba3a3ejt - 0x0033b00f, // n0x03ce c0x0000 (---------------) + I xn--mgba3a4f16a - 0x0033b3ce, // n0x03cf c0x0000 (---------------) + I xn--mgba3a4fra - 0x0033bd8e, // n0x03d0 c0x0000 (---------------) + I xn--mgbaam7a8h - 0x0033c34c, // n0x03d1 c0x0000 (---------------) + I xn--mgbab2bd - 0x0033c64e, // n0x03d2 c0x0000 (---------------) + I xn--mgbayh7gpa - 0x0033ca8e, // n0x03d3 c0x0000 (---------------) + I xn--mgbb9fbpob - 0x0033cfce, // n0x03d4 c0x0000 (---------------) + I xn--mgbbh1a71e - 0x0033d34f, // n0x03d5 c0x0000 (---------------) + I xn--mgbc0a9azcg - 0x0033d713, // n0x03d6 c0x0000 (---------------) + I xn--mgberp4a5d4a87g - 0x0033dbd1, // n0x03d7 c0x0000 (---------------) + I xn--mgberp4a5d4ar - 0x0033e013, // n0x03d8 c0x0000 (---------------) + I xn--mgbqly7c0a67fbc - 0x0033ea90, // n0x03d9 c0x0000 (---------------) + I xn--mgbqly7cvafr - 0x0033f2cc, // n0x03da c0x0000 (---------------) + I xn--mgbt3dhd - 0x0033f5cc, // n0x03db c0x0000 (---------------) + I xn--mgbtf8fl - 0x0033fa8e, // n0x03dc c0x0000 (---------------) + I xn--mgbx4cd0ab - 0x0034808a, // n0x03dd c0x0000 (---------------) + I xn--mxtq1m - 0x0034844c, // n0x03de c0x0000 (---------------) + I xn--ngbc5azd - 0x0034874c, // n0x03df c0x0000 (---------------) + I xn--ngbe9e0a - 0x00349e0b, // n0x03e0 c0x0000 (---------------) + I xn--nnx388a - 0x0034a0c8, // n0x03e1 c0x0000 (---------------) + I xn--node - 0x0034a509, // n0x03e2 c0x0000 (---------------) + I xn--nqv7f - 0x0034a50f, // n0x03e3 c0x0000 (---------------) + I xn--nqv7fs00ema - 0x0034bd0b, // n0x03e4 c0x0000 (---------------) + I xn--nyqy26a - 0x0034c60a, // n0x03e5 c0x0000 (---------------) + I xn--o3cw4h - 0x0034dd0c, // n0x03e6 c0x0000 (---------------) + I xn--ogbpf8fl - 0x0034eec9, // n0x03e7 c0x0000 (---------------) + I xn--p1acf - 0x0034f408, // n0x03e8 c0x0000 (---------------) + I xn--p1ai - 0x0034f70b, // n0x03e9 c0x0000 (---------------) + I xn--pgbs0dh - 0x00350d8b, // n0x03ea c0x0000 (---------------) + I xn--q9jyb4c - 0x003512cc, // n0x03eb c0x0000 (---------------) + I xn--qcka1pmc - 0x003543cb, // n0x03ec c0x0000 (---------------) + I xn--rhqv96g - 0x00358e4b, // n0x03ed c0x0000 (---------------) + I xn--s9brj9c - 0x0035b88b, // n0x03ee c0x0000 (---------------) + I xn--ses554g - 0x003742ca, // n0x03ef c0x0000 (---------------) + I xn--unup4y - 0x00375217, // n0x03f0 c0x0000 (---------------) + I xn--vermgensberater-ctb - 0x003760d8, // n0x03f1 c0x0000 (---------------) + I xn--vermgensberatung-pwb - 0x0037a409, // n0x03f2 c0x0000 (---------------) + I xn--vhquv - 0x0037b88b, // n0x03f3 c0x0000 (---------------) + I xn--vuq861b - 0x0037c38a, // n0x03f4 c0x0000 (---------------) + I xn--wgbh1c - 0x0037c90a, // n0x03f5 c0x0000 (---------------) + I xn--wgbl6a - 0x0037cb8b, // n0x03f6 c0x0000 (---------------) + I xn--xhq521b - 0x0037d810, // n0x03f7 c0x0000 (---------------) + I xn--xkc2al3hye2a - 0x0037dc11, // n0x03f8 c0x0000 (---------------) + I xn--xkc2dl3a5ee0h - 0x0037f00d, // n0x03f9 c0x0000 (---------------) + I xn--yfro4i67o - 0x0037f70d, // n0x03fa c0x0000 (---------------) + I xn--ygbi2ammx - 0x0038528b, // n0x03fb c0x0000 (---------------) + I xn--zfr164b - 0x00385e83, // n0x03fc c0x0000 (---------------) + I xxx - 0x0022aec3, // n0x03fd c0x0000 (---------------) + I xyz - 0x002e6806, // n0x03fe c0x0000 (---------------) + I yachts - 0x0021a607, // n0x03ff c0x0000 (---------------) + I yamaxun - 0x00305246, // n0x0400 c0x0000 (---------------) + I yandex - 0x0161ef02, // n0x0401 c0x0005 (---------------)* o I ye - 0x002e6549, // n0x0402 c0x0000 (---------------) + I yodobashi - 0x00367504, // n0x0403 c0x0000 (---------------) + I yoga - 0x00345bc8, // n0x0404 c0x0000 (---------------) + I yokohama - 0x00383287, // n0x0405 c0x0000 (---------------) + I youtube - 0x0023f602, // n0x0406 c0x0000 (---------------) + I yt - 0x016028c2, // n0x0407 c0x0005 (---------------)* o I za - 0x002b7b44, // n0x0408 c0x0000 (---------------) + I zara - 0x0035f684, // n0x0409 c0x0000 (---------------) + I zero - 0x0023c103, // n0x040a c0x0000 (---------------) + I zip - 0x01675302, // n0x040b c0x0005 (---------------)* o I zm - 0x002ccfc4, // n0x040c c0x0000 (---------------) + I zone - 0x00347387, // n0x040d c0x0000 (---------------) + I zuerich - 0x0167cc82, // n0x040e c0x0005 (---------------)* o I zw - 0x00232dc3, // n0x040f c0x0000 (---------------) + I com - 0x0021e083, // n0x0410 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x0411 c0x0000 (---------------) + I gov - 0x00240443, // n0x0412 c0x0000 (---------------) + I mil - 0x00218643, // n0x0413 c0x0000 (---------------) + I net - 0x0024d043, // n0x0414 c0x0000 (---------------) + I org - 0x002104c3, // n0x0415 c0x0000 (---------------) + I nom - 0x00200b82, // n0x0416 c0x0000 (---------------) + I ac - 0x0009e448, // n0x0417 c0x0000 (---------------) + blogspot - 0x00200882, // n0x0418 c0x0000 (---------------) + I co - 0x00209ac3, // n0x0419 c0x0000 (---------------) + I gov - 0x00240443, // n0x041a c0x0000 (---------------) + I mil - 0x00218643, // n0x041b c0x0000 (---------------) + I net - 0x0024d043, // n0x041c c0x0000 (---------------) + I org - 0x00251983, // n0x041d c0x0000 (---------------) + I sch - 0x00308bd6, // n0x041e c0x0000 (---------------) + I accident-investigation - 0x0030b9d3, // n0x041f c0x0000 (---------------) + I accident-prevention - 0x00233789, // n0x0420 c0x0000 (---------------) + I aerobatic - 0x0034c9c8, // n0x0421 c0x0000 (---------------) + I aeroclub - 0x002e1989, // n0x0422 c0x0000 (---------------) + I aerodrome - 0x0034f106, // n0x0423 c0x0000 (---------------) + I agents - 0x00347910, // n0x0424 c0x0000 (---------------) + I air-surveillance - 0x00222953, // n0x0425 c0x0000 (---------------) + I air-traffic-control - 0x00243bc8, // n0x0426 c0x0000 (---------------) + I aircraft - 0x00272647, // n0x0427 c0x0000 (---------------) + I airline - 0x0027df47, // n0x0428 c0x0000 (---------------) + I airport - 0x00295a4a, // n0x0429 c0x0000 (---------------) + I airtraffic - 0x00383909, // n0x042a c0x0000 (---------------) + I ambulance - 0x00365e49, // n0x042b c0x0000 (---------------) + I amusement - 0x002c51cb, // n0x042c c0x0000 (---------------) + I association - 0x003220c6, // n0x042d c0x0000 (---------------) + I author - 0x0025044a, // n0x042e c0x0000 (---------------) + I ballooning - 0x00227806, // n0x042f c0x0000 (---------------) + I broker - 0x00367143, // n0x0430 c0x0000 (---------------) + I caa - 0x00214585, // n0x0431 c0x0000 (---------------) + I cargo - 0x0024a608, // n0x0432 c0x0000 (---------------) + I catering - 0x002d250d, // n0x0433 c0x0000 (---------------) + I certification - 0x003474cc, // n0x0434 c0x0000 (---------------) + I championship - 0x0026e587, // n0x0435 c0x0000 (---------------) + I charter - 0x00332c0d, // n0x0436 c0x0000 (---------------) + I civilaviation - 0x0034cac4, // n0x0437 c0x0000 (---------------) + I club - 0x0023588a, // n0x0438 c0x0000 (---------------) + I conference - 0x0023668a, // n0x0439 c0x0000 (---------------) + I consultant - 0x00236b4a, // n0x043a c0x0000 (---------------) + I consulting - 0x00200887, // n0x043b c0x0000 (---------------) + I control - 0x0023e8c7, // n0x043c c0x0000 (---------------) + I council - 0x00240284, // n0x043d c0x0000 (---------------) + I crew - 0x00237ec6, // n0x043e c0x0000 (---------------) + I design - 0x0036dfc4, // n0x043f c0x0000 (---------------) + I dgca - 0x002d42c8, // n0x0440 c0x0000 (---------------) + I educator - 0x0020a509, // n0x0441 c0x0000 (---------------) + I emergency - 0x0030dc46, // n0x0442 c0x0000 (---------------) + I engine - 0x0030dc48, // n0x0443 c0x0000 (---------------) + I engineer - 0x0024430d, // n0x0444 c0x0000 (---------------) + I entertainment - 0x002126c9, // n0x0445 c0x0000 (---------------) + I equipment - 0x002ed008, // n0x0446 c0x0000 (---------------) + I exchange - 0x0022acc7, // n0x0447 c0x0000 (---------------) + I express - 0x0023ee8a, // n0x0448 c0x0000 (---------------) + I federation - 0x0024cd46, // n0x0449 c0x0000 (---------------) + I flight - 0x00257507, // n0x044a c0x0000 (---------------) + I freight - 0x002e7f04, // n0x044b c0x0000 (---------------) + I fuel - 0x0023c587, // n0x044c c0x0000 (---------------) + I gliding - 0x0025ee0a, // n0x044d c0x0000 (---------------) + I government - 0x00239a0e, // n0x044e c0x0000 (---------------) + I groundhandling - 0x00226905, // n0x044f c0x0000 (---------------) + I group - 0x0037354b, // n0x0450 c0x0000 (---------------) + I hanggliding - 0x0028efc9, // n0x0451 c0x0000 (---------------) + I homebuilt - 0x002fde89, // n0x0452 c0x0000 (---------------) + I insurance - 0x00206507, // n0x0453 c0x0000 (---------------) + I journal - 0x0020650a, // n0x0454 c0x0000 (---------------) + I journalist - 0x00224b07, // n0x0455 c0x0000 (---------------) + I leasing - 0x0024d2c9, // n0x0456 c0x0000 (---------------) + I logistics - 0x00285f48, // n0x0457 c0x0000 (---------------) + I magazine - 0x0022aa4b, // n0x0458 c0x0000 (---------------) + I maintenance - 0x0035c84b, // n0x0459 c0x0000 (---------------) + I marketplace - 0x0021e585, // n0x045a c0x0000 (---------------) + I media - 0x002494ca, // n0x045b c0x0000 (---------------) + I microlight - 0x00317449, // n0x045c c0x0000 (---------------) + I modelling - 0x002015ca, // n0x045d c0x0000 (---------------) + I navigation - 0x0023c18b, // n0x045e c0x0000 (---------------) + I parachuting - 0x0023c48b, // n0x045f c0x0000 (---------------) + I paragliding - 0x002c4f55, // n0x0460 c0x0000 (---------------) + I passenger-association - 0x002c8e05, // n0x0461 c0x0000 (---------------) + I pilot - 0x0022ad45, // n0x0462 c0x0000 (---------------) + I press - 0x002cfeca, // n0x0463 c0x0000 (---------------) + I production - 0x0031e10a, // n0x0464 c0x0000 (---------------) + I recreation - 0x00226387, // n0x0465 c0x0000 (---------------) + I repbody - 0x00207083, // n0x0466 c0x0000 (---------------) + I res - 0x002d8188, // n0x0467 c0x0000 (---------------) + I research - 0x002c158a, // n0x0468 c0x0000 (---------------) + I rotorcraft - 0x00280546, // n0x0469 c0x0000 (---------------) + I safety - 0x002833c9, // n0x046a c0x0000 (---------------) + I scientist - 0x00290b08, // n0x046b c0x0000 (---------------) + I services - 0x002f5744, // n0x046c c0x0000 (---------------) + I show - 0x0034c3c9, // n0x046d c0x0000 (---------------) + I skydiving - 0x00264f88, // n0x046e c0x0000 (---------------) + I software - 0x0033adc7, // n0x046f c0x0000 (---------------) + I student - 0x00226b44, // n0x0470 c0x0000 (---------------) + I taxi - 0x00243046, // n0x0471 c0x0000 (---------------) + I trader - 0x00298847, // n0x0472 c0x0000 (---------------) + I trading - 0x002a82c7, // n0x0473 c0x0000 (---------------) + I trainer - 0x002ba245, // n0x0474 c0x0000 (---------------) + I union - 0x0022674c, // n0x0475 c0x0000 (---------------) + I workinggroup - 0x0022e545, // n0x0476 c0x0000 (---------------) + I works - 0x00232dc3, // n0x0477 c0x0000 (---------------) + I com - 0x0021e083, // n0x0478 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x0479 c0x0000 (---------------) + I gov - 0x00218643, // n0x047a c0x0000 (---------------) + I net - 0x0024d043, // n0x047b c0x0000 (---------------) + I org - 0x00200882, // n0x047c c0x0000 (---------------) + I co - 0x00232dc3, // n0x047d c0x0000 (---------------) + I com - 0x00218643, // n0x047e c0x0000 (---------------) + I net - 0x002104c3, // n0x047f c0x0000 (---------------) + I nom - 0x0024d043, // n0x0480 c0x0000 (---------------) + I org - 0x00232dc3, // n0x0481 c0x0000 (---------------) + I com - 0x00218643, // n0x0482 c0x0000 (---------------) + I net - 0x00215fc3, // n0x0483 c0x0000 (---------------) + I off - 0x0024d043, // n0x0484 c0x0000 (---------------) + I org - 0x00232dc3, // n0x0485 c0x0000 (---------------) + I com - 0x0021e083, // n0x0486 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x0487 c0x0000 (---------------) + I gov - 0x00240443, // n0x0488 c0x0000 (---------------) + I mil - 0x00218643, // n0x0489 c0x0000 (---------------) + I net - 0x0024d043, // n0x048a c0x0000 (---------------) + I org - 0x00232dc3, // n0x048b c0x0000 (---------------) + I com - 0x0021e083, // n0x048c c0x0000 (---------------) + I edu - 0x00218643, // n0x048d c0x0000 (---------------) + I net - 0x0024d043, // n0x048e c0x0000 (---------------) + I org - 0x00200882, // n0x048f c0x0000 (---------------) + I co - 0x00205742, // n0x0490 c0x0000 (---------------) + I ed - 0x00236d82, // n0x0491 c0x0000 (---------------) + I gv - 0x002014c2, // n0x0492 c0x0000 (---------------) + I it - 0x00204f42, // n0x0493 c0x0000 (---------------) + I og - 0x00226402, // n0x0494 c0x0000 (---------------) + I pb - 0x04632dc3, // n0x0495 c0x0011 (n0x049e-n0x049f) + I com - 0x0021e083, // n0x0496 c0x0000 (---------------) + I edu - 0x003704c3, // n0x0497 c0x0000 (---------------) + I gob - 0x00209ac3, // n0x0498 c0x0000 (---------------) + I gov - 0x002188c3, // n0x0499 c0x0000 (---------------) + I int - 0x00240443, // n0x049a c0x0000 (---------------) + I mil - 0x00218643, // n0x049b c0x0000 (---------------) + I net - 0x0024d043, // n0x049c c0x0000 (---------------) + I org - 0x0022a0c3, // n0x049d c0x0000 (---------------) + I tur - 0x0009e448, // n0x049e c0x0000 (---------------) + blogspot - 0x0024ffc4, // n0x049f c0x0000 (---------------) + I e164 - 0x0020fac7, // n0x04a0 c0x0000 (---------------) + I in-addr - 0x00213ac3, // n0x04a1 c0x0000 (---------------) + I ip6 - 0x0027a544, // n0x04a2 c0x0000 (---------------) + I iris - 0x00206383, // n0x04a3 c0x0000 (---------------) + I uri - 0x00206583, // n0x04a4 c0x0000 (---------------) + I urn - 0x00209ac3, // n0x04a5 c0x0000 (---------------) + I gov - 0x00200b82, // n0x04a6 c0x0000 (---------------) + I ac - 0x00002183, // n0x04a7 c0x0000 (---------------) + biz - 0x05600882, // n0x04a8 c0x0015 (n0x04ad-n0x04ae) + I co - 0x00236d82, // n0x04a9 c0x0000 (---------------) + I gv - 0x00008a44, // n0x04aa c0x0000 (---------------) + info - 0x00200d02, // n0x04ab c0x0000 (---------------) + I or - 0x000cfac4, // n0x04ac c0x0000 (---------------) + priv - 0x0009e448, // n0x04ad c0x0000 (---------------) + blogspot - 0x002388c3, // n0x04ae c0x0000 (---------------) + I act - 0x002a4783, // n0x04af c0x0000 (---------------) + I asn - 0x05e32dc3, // n0x04b0 c0x0017 (n0x04c0-n0x04c1) + I com - 0x00235884, // n0x04b1 c0x0000 (---------------) + I conf - 0x0621e083, // n0x04b2 c0x0018 (n0x04c1-n0x04c9) + I edu - 0x06609ac3, // n0x04b3 c0x0019 (n0x04c9-n0x04ce) + I gov - 0x00205942, // n0x04b4 c0x0000 (---------------) + I id - 0x00208a44, // n0x04b5 c0x0000 (---------------) + I info - 0x00218643, // n0x04b6 c0x0000 (---------------) + I net - 0x00245d43, // n0x04b7 c0x0000 (---------------) + I nsw - 0x00200902, // n0x04b8 c0x0000 (---------------) + I nt - 0x0024d043, // n0x04b9 c0x0000 (---------------) + I org - 0x0021b602, // n0x04ba c0x0000 (---------------) + I oz - 0x0022d503, // n0x04bb c0x0000 (---------------) + I qld - 0x00203a82, // n0x04bc c0x0000 (---------------) + I sa - 0x00208883, // n0x04bd c0x0000 (---------------) + I tas - 0x00252603, // n0x04be c0x0000 (---------------) + I vic - 0x00200142, // n0x04bf c0x0000 (---------------) + I wa - 0x0009e448, // n0x04c0 c0x0000 (---------------) + blogspot - 0x002388c3, // n0x04c1 c0x0000 (---------------) + I act - 0x00245d43, // n0x04c2 c0x0000 (---------------) + I nsw - 0x00200902, // n0x04c3 c0x0000 (---------------) + I nt - 0x0022d503, // n0x04c4 c0x0000 (---------------) + I qld - 0x00203a82, // n0x04c5 c0x0000 (---------------) + I sa - 0x00208883, // n0x04c6 c0x0000 (---------------) + I tas - 0x00252603, // n0x04c7 c0x0000 (---------------) + I vic - 0x00200142, // n0x04c8 c0x0000 (---------------) + I wa - 0x0022d503, // n0x04c9 c0x0000 (---------------) + I qld - 0x00203a82, // n0x04ca c0x0000 (---------------) + I sa - 0x00208883, // n0x04cb c0x0000 (---------------) + I tas - 0x00252603, // n0x04cc c0x0000 (---------------) + I vic - 0x00200142, // n0x04cd c0x0000 (---------------) + I wa - 0x00232dc3, // n0x04ce c0x0000 (---------------) + I com - 0x00202183, // n0x04cf c0x0000 (---------------) + I biz - 0x00232dc3, // n0x04d0 c0x0000 (---------------) + I com - 0x0021e083, // n0x04d1 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x04d2 c0x0000 (---------------) + I gov - 0x00208a44, // n0x04d3 c0x0000 (---------------) + I info - 0x002188c3, // n0x04d4 c0x0000 (---------------) + I int - 0x00240443, // n0x04d5 c0x0000 (---------------) + I mil - 0x00267944, // n0x04d6 c0x0000 (---------------) + I name - 0x00218643, // n0x04d7 c0x0000 (---------------) + I net - 0x0024d043, // n0x04d8 c0x0000 (---------------) + I org - 0x00200a82, // n0x04d9 c0x0000 (---------------) + I pp - 0x002cfc43, // n0x04da c0x0000 (---------------) + I pro - 0x00200882, // n0x04db c0x0000 (---------------) + I co - 0x00232dc3, // n0x04dc c0x0000 (---------------) + I com - 0x0021e083, // n0x04dd c0x0000 (---------------) + I edu - 0x00209ac3, // n0x04de c0x0000 (---------------) + I gov - 0x00240443, // n0x04df c0x0000 (---------------) + I mil - 0x00218643, // n0x04e0 c0x0000 (---------------) + I net - 0x0024d043, // n0x04e1 c0x0000 (---------------) + I org - 0x00204642, // n0x04e2 c0x0000 (---------------) + I rs - 0x00272984, // n0x04e3 c0x0000 (---------------) + I unbi - 0x00261304, // n0x04e4 c0x0000 (---------------) + I unsa - 0x00202183, // n0x04e5 c0x0000 (---------------) + I biz - 0x00200882, // n0x04e6 c0x0000 (---------------) + I co - 0x00232dc3, // n0x04e7 c0x0000 (---------------) + I com - 0x0021e083, // n0x04e8 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x04e9 c0x0000 (---------------) + I gov - 0x00208a44, // n0x04ea c0x0000 (---------------) + I info - 0x00218643, // n0x04eb c0x0000 (---------------) + I net - 0x0024d043, // n0x04ec c0x0000 (---------------) + I org - 0x002dc745, // n0x04ed c0x0000 (---------------) + I store - 0x0028dc82, // n0x04ee c0x0000 (---------------) + I tv - 0x00200b82, // n0x04ef c0x0000 (---------------) + I ac - 0x0009e448, // n0x04f0 c0x0000 (---------------) + blogspot - 0x00209ac3, // n0x04f1 c0x0000 (---------------) + I gov - 0x00245441, // n0x04f2 c0x0000 (---------------) + I 0 - 0x002075c1, // n0x04f3 c0x0000 (---------------) + I 1 - 0x0020c501, // n0x04f4 c0x0000 (---------------) + I 2 - 0x00207141, // n0x04f5 c0x0000 (---------------) + I 3 - 0x00250081, // n0x04f6 c0x0000 (---------------) + I 4 - 0x00299841, // n0x04f7 c0x0000 (---------------) + I 5 - 0x00213b41, // n0x04f8 c0x0000 (---------------) + I 6 - 0x00245541, // n0x04f9 c0x0000 (---------------) + I 7 - 0x002f4b41, // n0x04fa c0x0000 (---------------) + I 8 - 0x002f4dc1, // n0x04fb c0x0000 (---------------) + I 9 - 0x00200101, // n0x04fc c0x0000 (---------------) + I a - 0x00200001, // n0x04fd c0x0000 (---------------) + I b - 0x00200401, // n0x04fe c0x0000 (---------------) + I c - 0x002003c1, // n0x04ff c0x0000 (---------------) + I d - 0x00200081, // n0x0500 c0x0000 (---------------) + I e - 0x00203841, // n0x0501 c0x0000 (---------------) + I f - 0x002002c1, // n0x0502 c0x0000 (---------------) + I g - 0x00200201, // n0x0503 c0x0000 (---------------) + I h - 0x00200041, // n0x0504 c0x0000 (---------------) + I i - 0x00202e01, // n0x0505 c0x0000 (---------------) + I j - 0x00200481, // n0x0506 c0x0000 (---------------) + I k - 0x002000c1, // n0x0507 c0x0000 (---------------) + I l - 0x00200f41, // n0x0508 c0x0000 (---------------) + I m - 0x00200281, // n0x0509 c0x0000 (---------------) + I n - 0x00200341, // n0x050a c0x0000 (---------------) + I o - 0x00200a81, // n0x050b c0x0000 (---------------) + I p - 0x00206c81, // n0x050c c0x0000 (---------------) + I q - 0x002006c1, // n0x050d c0x0000 (---------------) + I r - 0x002001c1, // n0x050e c0x0000 (---------------) + I s - 0x00200301, // n0x050f c0x0000 (---------------) + I t - 0x00200541, // n0x0510 c0x0000 (---------------) + I u - 0x00201641, // n0x0511 c0x0000 (---------------) + I v - 0x00200141, // n0x0512 c0x0000 (---------------) + I w - 0x002013c1, // n0x0513 c0x0000 (---------------) + I x - 0x00202981, // n0x0514 c0x0000 (---------------) + I y - 0x00201241, // n0x0515 c0x0000 (---------------) + I z - 0x00232dc3, // n0x0516 c0x0000 (---------------) + I com - 0x0021e083, // n0x0517 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x0518 c0x0000 (---------------) + I gov - 0x00218643, // n0x0519 c0x0000 (---------------) + I net - 0x0024d043, // n0x051a c0x0000 (---------------) + I org - 0x00200882, // n0x051b c0x0000 (---------------) + I co - 0x00232dc3, // n0x051c c0x0000 (---------------) + I com - 0x0021e083, // n0x051d c0x0000 (---------------) + I edu - 0x00200d02, // n0x051e c0x0000 (---------------) + I or - 0x0024d043, // n0x051f c0x0000 (---------------) + I org - 0x0000dc06, // n0x0520 c0x0000 (---------------) + dyndns - 0x00050e4a, // n0x0521 c0x0000 (---------------) + for-better - 0x00081388, // n0x0522 c0x0000 (---------------) + for-more - 0x00051488, // n0x0523 c0x0000 (---------------) + for-some - 0x00051e87, // n0x0524 c0x0000 (---------------) + for-the - 0x00141246, // n0x0525 c0x0000 (---------------) + selfip - 0x0002c186, // n0x0526 c0x0000 (---------------) + webhop - 0x00278344, // n0x0527 c0x0000 (---------------) + I asso - 0x003125c7, // n0x0528 c0x0000 (---------------) + I barreau - 0x0009e448, // n0x0529 c0x0000 (---------------) + blogspot - 0x00252544, // n0x052a c0x0000 (---------------) + I gouv - 0x00232dc3, // n0x052b c0x0000 (---------------) + I com - 0x0021e083, // n0x052c c0x0000 (---------------) + I edu - 0x00209ac3, // n0x052d c0x0000 (---------------) + I gov - 0x00218643, // n0x052e c0x0000 (---------------) + I net - 0x0024d043, // n0x052f c0x0000 (---------------) + I org - 0x00232dc3, // n0x0530 c0x0000 (---------------) + I com - 0x0021e083, // n0x0531 c0x0000 (---------------) + I edu - 0x003704c3, // n0x0532 c0x0000 (---------------) + I gob - 0x00209ac3, // n0x0533 c0x0000 (---------------) + I gov - 0x002188c3, // n0x0534 c0x0000 (---------------) + I int - 0x00240443, // n0x0535 c0x0000 (---------------) + I mil - 0x00218643, // n0x0536 c0x0000 (---------------) + I net - 0x0024d043, // n0x0537 c0x0000 (---------------) + I org - 0x0028dc82, // n0x0538 c0x0000 (---------------) + I tv - 0x002c6203, // n0x0539 c0x0000 (---------------) + I adm - 0x00244b43, // n0x053a c0x0000 (---------------) + I adv - 0x00263c03, // n0x053b c0x0000 (---------------) + I agr - 0x00203582, // n0x053c c0x0000 (---------------) + I am - 0x00381d43, // n0x053d c0x0000 (---------------) + I arq - 0x00208d43, // n0x053e c0x0000 (---------------) + I art - 0x00213403, // n0x053f c0x0000 (---------------) + I ato - 0x00200001, // n0x0540 c0x0000 (---------------) + I b - 0x00208743, // n0x0541 c0x0000 (---------------) + I bio - 0x0029e444, // n0x0542 c0x0000 (---------------) + I blog - 0x002da483, // n0x0543 c0x0000 (---------------) + I bmd - 0x002ed943, // n0x0544 c0x0000 (---------------) + I cim - 0x00369103, // n0x0545 c0x0000 (---------------) + I cng - 0x0022fe43, // n0x0546 c0x0000 (---------------) + I cnt - 0x0a232dc3, // n0x0547 c0x0028 (n0x057f-n0x0580) + I com - 0x0023a884, // n0x0548 c0x0000 (---------------) + I coop - 0x002e6183, // n0x0549 c0x0000 (---------------) + I ecn - 0x00208e43, // n0x054a c0x0000 (---------------) + I eco - 0x0021e083, // n0x054b c0x0000 (---------------) + I edu - 0x002370c3, // n0x054c c0x0000 (---------------) + I emp - 0x002abd43, // n0x054d c0x0000 (---------------) + I eng - 0x0027b443, // n0x054e c0x0000 (---------------) + I esp - 0x002ed8c3, // n0x054f c0x0000 (---------------) + I etc - 0x00226e83, // n0x0550 c0x0000 (---------------) + I eti - 0x002125c3, // n0x0551 c0x0000 (---------------) + I far - 0x0024d284, // n0x0552 c0x0000 (---------------) + I flog - 0x00257f82, // n0x0553 c0x0000 (---------------) + I fm - 0x002500c3, // n0x0554 c0x0000 (---------------) + I fnd - 0x002562c3, // n0x0555 c0x0000 (---------------) + I fot - 0x002700c3, // n0x0556 c0x0000 (---------------) + I fst - 0x00367843, // n0x0557 c0x0000 (---------------) + I g12 - 0x002db283, // n0x0558 c0x0000 (---------------) + I ggf - 0x00209ac3, // n0x0559 c0x0000 (---------------) + I gov - 0x00316683, // n0x055a c0x0000 (---------------) + I imb - 0x002202c3, // n0x055b c0x0000 (---------------) + I ind - 0x00206e83, // n0x055c c0x0000 (---------------) + I inf - 0x00204703, // n0x055d c0x0000 (---------------) + I jor - 0x002ab543, // n0x055e c0x0000 (---------------) + I jus - 0x002308c3, // n0x055f c0x0000 (---------------) + I leg - 0x00242a43, // n0x0560 c0x0000 (---------------) + I lel - 0x002035c3, // n0x0561 c0x0000 (---------------) + I mat - 0x00210e83, // n0x0562 c0x0000 (---------------) + I med - 0x00240443, // n0x0563 c0x0000 (---------------) + I mil - 0x00203302, // n0x0564 c0x0000 (---------------) + I mp - 0x00294543, // n0x0565 c0x0000 (---------------) + I mus - 0x00218643, // n0x0566 c0x0000 (---------------) + I net - 0x016104c3, // n0x0567 c0x0005 (---------------)* o I nom - 0x00282d83, // n0x0568 c0x0000 (---------------) + I not - 0x00200903, // n0x0569 c0x0000 (---------------) + I ntr - 0x00210083, // n0x056a c0x0000 (---------------) + I odo - 0x0024d043, // n0x056b c0x0000 (---------------) + I org - 0x002a6f03, // n0x056c c0x0000 (---------------) + I ppg - 0x002cfc43, // n0x056d c0x0000 (---------------) + I pro - 0x00274fc3, // n0x056e c0x0000 (---------------) + I psc - 0x00285203, // n0x056f c0x0000 (---------------) + I psi - 0x002d3a43, // n0x0570 c0x0000 (---------------) + I qsl - 0x00313c05, // n0x0571 c0x0000 (---------------) + I radio - 0x0022a143, // n0x0572 c0x0000 (---------------) + I rec - 0x0031b1c3, // n0x0573 c0x0000 (---------------) + I slg - 0x002dc283, // n0x0574 c0x0000 (---------------) + I srv - 0x00226b44, // n0x0575 c0x0000 (---------------) + I taxi - 0x002b6343, // n0x0576 c0x0000 (---------------) + I teo - 0x002032c3, // n0x0577 c0x0000 (---------------) + I tmp - 0x0029c303, // n0x0578 c0x0000 (---------------) + I trd - 0x0022a0c3, // n0x0579 c0x0000 (---------------) + I tur - 0x0028dc82, // n0x057a c0x0000 (---------------) + I tv - 0x0023c8c3, // n0x057b c0x0000 (---------------) + I vet - 0x002f2084, // n0x057c c0x0000 (---------------) + I vlog - 0x0022d744, // n0x057d c0x0000 (---------------) + I wiki - 0x00275243, // n0x057e c0x0000 (---------------) + I zlg - 0x0009e448, // n0x057f c0x0000 (---------------) + blogspot - 0x00232dc3, // n0x0580 c0x0000 (---------------) + I com - 0x0021e083, // n0x0581 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x0582 c0x0000 (---------------) + I gov - 0x00218643, // n0x0583 c0x0000 (---------------) + I net - 0x0024d043, // n0x0584 c0x0000 (---------------) + I org - 0x00232dc3, // n0x0585 c0x0000 (---------------) + I com - 0x0021e083, // n0x0586 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x0587 c0x0000 (---------------) + I gov - 0x00218643, // n0x0588 c0x0000 (---------------) + I net - 0x0024d043, // n0x0589 c0x0000 (---------------) + I org - 0x00200882, // n0x058a c0x0000 (---------------) + I co - 0x0024d043, // n0x058b c0x0000 (---------------) + I org - 0x00232dc3, // n0x058c c0x0000 (---------------) + I com - 0x00209ac3, // n0x058d c0x0000 (---------------) + I gov - 0x00240443, // n0x058e c0x0000 (---------------) + I mil - 0x00215fc2, // n0x058f c0x0000 (---------------) + I of - 0x00232dc3, // n0x0590 c0x0000 (---------------) + I com - 0x0021e083, // n0x0591 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x0592 c0x0000 (---------------) + I gov - 0x00218643, // n0x0593 c0x0000 (---------------) + I net - 0x0024d043, // n0x0594 c0x0000 (---------------) + I org - 0x000028c2, // n0x0595 c0x0000 (---------------) + za - 0x002005c2, // n0x0596 c0x0000 (---------------) + I ab - 0x00214542, // n0x0597 c0x0000 (---------------) + I bc - 0x0009e448, // n0x0598 c0x0000 (---------------) + blogspot - 0x00000882, // n0x0599 c0x0000 (---------------) + co - 0x0030f002, // n0x059a c0x0000 (---------------) + I gc - 0x00207b82, // n0x059b c0x0000 (---------------) + I mb - 0x0020fe82, // n0x059c c0x0000 (---------------) + I nb - 0x00206ec2, // n0x059d c0x0000 (---------------) + I nf - 0x00248cc2, // n0x059e c0x0000 (---------------) + I nl - 0x0020ae82, // n0x059f c0x0000 (---------------) + I ns - 0x00200902, // n0x05a0 c0x0000 (---------------) + I nt - 0x0020fd82, // n0x05a1 c0x0000 (---------------) + I nu - 0x00200342, // n0x05a2 c0x0000 (---------------) + I on - 0x0020c782, // n0x05a3 c0x0000 (---------------) + I pe - 0x003513c2, // n0x05a4 c0x0000 (---------------) + I qc - 0x00200e02, // n0x05a5 c0x0000 (---------------) + I sk - 0x00218482, // n0x05a6 c0x0000 (---------------) + I yk - 0x0000dec9, // n0x05a7 c0x0000 (---------------) + ftpaccess - 0x0008e90b, // n0x05a8 c0x0000 (---------------) + game-server - 0x000c3048, // n0x05a9 c0x0000 (---------------) + myphotos - 0x000895c9, // n0x05aa c0x0000 (---------------) + scrapping - 0x00209ac3, // n0x05ab c0x0000 (---------------) + I gov - 0x0009e448, // n0x05ac c0x0000 (---------------) + blogspot - 0x0009e448, // n0x05ad c0x0000 (---------------) + blogspot - 0x00200b82, // n0x05ae c0x0000 (---------------) + I ac - 0x00278344, // n0x05af c0x0000 (---------------) + I asso - 0x00200882, // n0x05b0 c0x0000 (---------------) + I co - 0x00232dc3, // n0x05b1 c0x0000 (---------------) + I com - 0x00205742, // n0x05b2 c0x0000 (---------------) + I ed - 0x0021e083, // n0x05b3 c0x0000 (---------------) + I edu - 0x00209ac2, // n0x05b4 c0x0000 (---------------) + I go - 0x00252544, // n0x05b5 c0x0000 (---------------) + I gouv - 0x002188c3, // n0x05b6 c0x0000 (---------------) + I int - 0x0024a3c2, // n0x05b7 c0x0000 (---------------) + I md - 0x00218643, // n0x05b8 c0x0000 (---------------) + I net - 0x00200d02, // n0x05b9 c0x0000 (---------------) + I or - 0x0024d043, // n0x05ba c0x0000 (---------------) + I org - 0x0022ad46, // n0x05bb c0x0000 (---------------) + I presse - 0x00300b8f, // n0x05bc c0x0000 (---------------) + I xn--aroport-bya - 0x006b1c03, // n0x05bd c0x0001 (---------------) ! I www - 0x00200882, // n0x05be c0x0000 (---------------) + I co - 0x003704c3, // n0x05bf c0x0000 (---------------) + I gob - 0x00209ac3, // n0x05c0 c0x0000 (---------------) + I gov - 0x00240443, // n0x05c1 c0x0000 (---------------) + I mil - 0x00200882, // n0x05c2 c0x0000 (---------------) + I co - 0x00232dc3, // n0x05c3 c0x0000 (---------------) + I com - 0x00209ac3, // n0x05c4 c0x0000 (---------------) + I gov - 0x00218643, // n0x05c5 c0x0000 (---------------) + I net - 0x00200b82, // n0x05c6 c0x0000 (---------------) + I ac - 0x00203042, // n0x05c7 c0x0000 (---------------) + I ah - 0x0e29ba09, // n0x05c8 c0x0038 (n0x05f3-n0x05f4) o I amazonaws - 0x0020bb82, // n0x05c9 c0x0000 (---------------) + I bj - 0x00232dc3, // n0x05ca c0x0000 (---------------) + I com - 0x0023f102, // n0x05cb c0x0000 (---------------) + I cq - 0x0021e083, // n0x05cc c0x0000 (---------------) + I edu - 0x00213802, // n0x05cd c0x0000 (---------------) + I fj - 0x00218cc2, // n0x05ce c0x0000 (---------------) + I gd - 0x00209ac3, // n0x05cf c0x0000 (---------------) + I gov - 0x00209602, // n0x05d0 c0x0000 (---------------) + I gs - 0x0026bc02, // n0x05d1 c0x0000 (---------------) + I gx - 0x002752c2, // n0x05d2 c0x0000 (---------------) + I gz - 0x00203082, // n0x05d3 c0x0000 (---------------) + I ha - 0x00300202, // n0x05d4 c0x0000 (---------------) + I hb - 0x002093c2, // n0x05d5 c0x0000 (---------------) + I he - 0x00200202, // n0x05d6 c0x0000 (---------------) + I hi - 0x002301c2, // n0x05d7 c0x0000 (---------------) + I hk - 0x0029c882, // n0x05d8 c0x0000 (---------------) + I hl - 0x00206802, // n0x05d9 c0x0000 (---------------) + I hn - 0x002a1cc2, // n0x05da c0x0000 (---------------) + I jl - 0x002ddb42, // n0x05db c0x0000 (---------------) + I js - 0x00303c42, // n0x05dc c0x0000 (---------------) + I jx - 0x00232682, // n0x05dd c0x0000 (---------------) + I ln - 0x00240443, // n0x05de c0x0000 (---------------) + I mil - 0x00205202, // n0x05df c0x0000 (---------------) + I mo - 0x00218643, // n0x05e0 c0x0000 (---------------) + I net - 0x00226d42, // n0x05e1 c0x0000 (---------------) + I nm - 0x0027c402, // n0x05e2 c0x0000 (---------------) + I nx - 0x0024d043, // n0x05e3 c0x0000 (---------------) + I org - 0x00381dc2, // n0x05e4 c0x0000 (---------------) + I qh - 0x0021bcc2, // n0x05e5 c0x0000 (---------------) + I sc - 0x0020acc2, // n0x05e6 c0x0000 (---------------) + I sd - 0x002001c2, // n0x05e7 c0x0000 (---------------) + I sh - 0x00214782, // n0x05e8 c0x0000 (---------------) + I sn - 0x002e3902, // n0x05e9 c0x0000 (---------------) + I sx - 0x002046c2, // n0x05ea c0x0000 (---------------) + I tj - 0x002548c2, // n0x05eb c0x0000 (---------------) + I tw - 0x0022cf82, // n0x05ec c0x0000 (---------------) + I xj - 0x002f66ca, // n0x05ed c0x0000 (---------------) + I xn--55qx5d - 0x00326e4a, // n0x05ee c0x0000 (---------------) + I xn--io0a7i - 0x0034cf0a, // n0x05ef c0x0000 (---------------) + I xn--od0alg - 0x003862c2, // n0x05f0 c0x0000 (---------------) + I xz - 0x0020dc42, // n0x05f1 c0x0000 (---------------) + I yn - 0x0025cf42, // n0x05f2 c0x0000 (---------------) + I zj - 0x0e4347c7, // n0x05f3 c0x0039 (n0x05f4-n0x05f5) + compute - 0x000e61ca, // n0x05f4 c0x0000 (---------------) + cn-north-1 - 0x0020b384, // n0x05f5 c0x0000 (---------------) + I arts - 0x00232dc3, // n0x05f6 c0x0000 (---------------) + I com - 0x0021e083, // n0x05f7 c0x0000 (---------------) + I edu - 0x0024a304, // n0x05f8 c0x0000 (---------------) + I firm - 0x00209ac3, // n0x05f9 c0x0000 (---------------) + I gov - 0x00208a44, // n0x05fa c0x0000 (---------------) + I info - 0x002188c3, // n0x05fb c0x0000 (---------------) + I int - 0x00240443, // n0x05fc c0x0000 (---------------) + I mil - 0x00218643, // n0x05fd c0x0000 (---------------) + I net - 0x002104c3, // n0x05fe c0x0000 (---------------) + I nom - 0x0024d043, // n0x05ff c0x0000 (---------------) + I org - 0x0022a143, // n0x0600 c0x0000 (---------------) + I rec - 0x002071c3, // n0x0601 c0x0000 (---------------) + I web - 0x0014fc46, // n0x0602 c0x0000 (---------------) + africa - 0x0f29ba09, // n0x0603 c0x003c (n0x06c7-n0x06de) o I amazonaws - 0x00049187, // n0x0604 c0x0000 (---------------) + appspot - 0x000030c2, // n0x0605 c0x0000 (---------------) + ar - 0x00180eca, // n0x0606 c0x0000 (---------------) + betainabox - 0x000dd187, // n0x0607 c0x0000 (---------------) + blogdns - 0x0009e448, // n0x0608 c0x0000 (---------------) + blogspot - 0x00007bc2, // n0x0609 c0x0000 (---------------) + br - 0x0010acc7, // n0x060a c0x0000 (---------------) + cechire - 0x0000074f, // n0x060b c0x0000 (---------------) + cloudcontrolapp - 0x0002f10f, // n0x060c c0x0000 (---------------) + cloudcontrolled - 0x0002fe42, // n0x060d c0x0000 (---------------) + cn - 0x00000882, // n0x060e c0x0000 (---------------) + co - 0x0007b388, // n0x060f c0x0000 (---------------) + codespot - 0x00007cc2, // n0x0610 c0x0000 (---------------) + de - 0x000c7a88, // n0x0611 c0x0000 (---------------) + dnsalias - 0x00181687, // n0x0612 c0x0000 (---------------) + dnsdojo - 0x000146cb, // n0x0613 c0x0000 (---------------) + doesntexist - 0x0012cb09, // n0x0614 c0x0000 (---------------) + dontexist - 0x000c7987, // n0x0615 c0x0000 (---------------) + doomdns - 0x000e9ccc, // n0x0616 c0x0000 (---------------) + dreamhosters - 0x0017cf4a, // n0x0617 c0x0000 (---------------) + dyn-o-saur - 0x00159288, // n0x0618 c0x0000 (---------------) + dynalias - 0x00010b8e, // n0x0619 c0x0000 (---------------) + dyndns-at-home - 0x000264ce, // n0x061a c0x0000 (---------------) + dyndns-at-work - 0x000dcfcb, // n0x061b c0x0000 (---------------) + dyndns-blog - 0x0018214b, // n0x061c c0x0000 (---------------) + dyndns-free - 0x0000dc0b, // n0x061d c0x0000 (---------------) + dyndns-home - 0x00013909, // n0x061e c0x0000 (---------------) + dyndns-ip - 0x00014d8b, // n0x061f c0x0000 (---------------) + dyndns-mail - 0x00015e0d, // n0x0620 c0x0000 (---------------) + dyndns-office - 0x00021dcb, // n0x0621 c0x0000 (---------------) + dyndns-pics - 0x0002420d, // n0x0622 c0x0000 (---------------) + dyndns-remote - 0x00027c0d, // n0x0623 c0x0000 (---------------) + dyndns-server - 0x0002bfca, // n0x0624 c0x0000 (---------------) + dyndns-web - 0x0002d58b, // n0x0625 c0x0000 (---------------) + dyndns-wiki - 0x0002e38b, // n0x0626 c0x0000 (---------------) + dyndns-work - 0x0003d4d0, // n0x0627 c0x0000 (---------------) + elasticbeanstalk - 0x000ee4cf, // n0x0628 c0x0000 (---------------) + est-a-la-maison - 0x0002844f, // n0x0629 c0x0000 (---------------) + est-a-la-masion - 0x0016c84d, // n0x062a c0x0000 (---------------) + est-le-patron - 0x00119c10, // n0x062b c0x0000 (---------------) + est-mon-blogueur - 0x0001b882, // n0x062c c0x0000 (---------------) + eu - 0x00048f8b, // n0x062d c0x0000 (---------------) + firebaseapp - 0x0004f748, // n0x062e c0x0000 (---------------) + flynnhub - 0x0005c087, // n0x062f c0x0000 (---------------) + from-ak - 0x0005ca47, // n0x0630 c0x0000 (---------------) + from-al - 0x0005cc07, // n0x0631 c0x0000 (---------------) + from-ar - 0x0005d0c7, // n0x0632 c0x0000 (---------------) + from-ca - 0x0005e487, // n0x0633 c0x0000 (---------------) + from-ct - 0x0005eb07, // n0x0634 c0x0000 (---------------) + from-dc - 0x0005f407, // n0x0635 c0x0000 (---------------) + from-de - 0x0005f6c7, // n0x0636 c0x0000 (---------------) + from-fl - 0x000609c7, // n0x0637 c0x0000 (---------------) + from-ga - 0x00060b87, // n0x0638 c0x0000 (---------------) + from-hi - 0x00061647, // n0x0639 c0x0000 (---------------) + from-ia - 0x00061807, // n0x063a c0x0000 (---------------) + from-id - 0x000619c7, // n0x063b c0x0000 (---------------) + from-il - 0x00061b87, // n0x063c c0x0000 (---------------) + from-in - 0x00062187, // n0x063d c0x0000 (---------------) + from-ks - 0x00062e87, // n0x063e c0x0000 (---------------) + from-ky - 0x00064007, // n0x063f c0x0000 (---------------) + from-ma - 0x000647c7, // n0x0640 c0x0000 (---------------) + from-md - 0x00064d07, // n0x0641 c0x0000 (---------------) + from-mi - 0x00065307, // n0x0642 c0x0000 (---------------) + from-mn - 0x000654c7, // n0x0643 c0x0000 (---------------) + from-mo - 0x00065a87, // n0x0644 c0x0000 (---------------) + from-ms - 0x00066647, // n0x0645 c0x0000 (---------------) + from-mt - 0x00066847, // n0x0646 c0x0000 (---------------) + from-nc - 0x00066d47, // n0x0647 c0x0000 (---------------) + from-nd - 0x00066f07, // n0x0648 c0x0000 (---------------) + from-ne - 0x000670c7, // n0x0649 c0x0000 (---------------) + from-nh - 0x000676c7, // n0x064a c0x0000 (---------------) + from-nj - 0x00067c47, // n0x064b c0x0000 (---------------) + from-nm - 0x000682c7, // n0x064c c0x0000 (---------------) + from-nv - 0x000688c7, // n0x064d c0x0000 (---------------) + from-oh - 0x00068f87, // n0x064e c0x0000 (---------------) + from-ok - 0x00069307, // n0x064f c0x0000 (---------------) + from-or - 0x000694c7, // n0x0650 c0x0000 (---------------) + from-pa - 0x0006a187, // n0x0651 c0x0000 (---------------) + from-pr - 0x0006b187, // n0x0652 c0x0000 (---------------) + from-ri - 0x0006bf07, // n0x0653 c0x0000 (---------------) + from-sc - 0x0006c487, // n0x0654 c0x0000 (---------------) + from-sd - 0x0006c647, // n0x0655 c0x0000 (---------------) + from-tn - 0x0006c807, // n0x0656 c0x0000 (---------------) + from-tx - 0x0006cc47, // n0x0657 c0x0000 (---------------) + from-ut - 0x0006cfc7, // n0x0658 c0x0000 (---------------) + from-va - 0x0006d387, // n0x0659 c0x0000 (---------------) + from-vt - 0x0006d9c7, // n0x065a c0x0000 (---------------) + from-wa - 0x0006db87, // n0x065b c0x0000 (---------------) + from-wi - 0x0006df07, // n0x065c c0x0000 (---------------) + from-wv - 0x0006e747, // n0x065d c0x0000 (---------------) + from-wy - 0x0000cd02, // n0x065e c0x0000 (---------------) + gb - 0x000c75c7, // n0x065f c0x0000 (---------------) + getmyip - 0x000bb191, // n0x0660 c0x0000 (---------------) + githubusercontent - 0x0007cdca, // n0x0661 c0x0000 (---------------) + googleapis - 0x0007b20a, // n0x0662 c0x0000 (---------------) + googlecode - 0x00052ac6, // n0x0663 c0x0000 (---------------) + gotdns - 0x00006b02, // n0x0664 c0x0000 (---------------) + gr - 0x00056389, // n0x0665 c0x0000 (---------------) + herokuapp - 0x00089a09, // n0x0666 c0x0000 (---------------) + herokussl - 0x000301c2, // n0x0667 c0x0000 (---------------) + hk - 0x000f45ca, // n0x0668 c0x0000 (---------------) + hobby-site - 0x00099409, // n0x0669 c0x0000 (---------------) + homelinux - 0x00099ec8, // n0x066a c0x0000 (---------------) + homeunix - 0x000045c2, // n0x066b c0x0000 (---------------) + hu - 0x001099c9, // n0x066c c0x0000 (---------------) + iamallama - 0x0006a3ce, // n0x066d c0x0000 (---------------) + is-a-anarchist - 0x000e280c, // n0x066e c0x0000 (---------------) + is-a-blogger - 0x000c43cf, // n0x066f c0x0000 (---------------) + is-a-bookkeeper - 0x0015be8e, // n0x0670 c0x0000 (---------------) + is-a-bulls-fan - 0x0001918c, // n0x0671 c0x0000 (---------------) + is-a-caterer - 0x0001f409, // n0x0672 c0x0000 (---------------) + is-a-chef - 0x00023b51, // n0x0673 c0x0000 (---------------) + is-a-conservative - 0x00025588, // n0x0674 c0x0000 (---------------) + is-a-cpa - 0x00096052, // n0x0675 c0x0000 (---------------) + is-a-cubicle-slave - 0x00179f8d, // n0x0676 c0x0000 (---------------) + is-a-democrat - 0x0003f84d, // n0x0677 c0x0000 (---------------) + is-a-designer - 0x00040bcb, // n0x0678 c0x0000 (---------------) + is-a-doctor - 0x000447d5, // n0x0679 c0x0000 (---------------) + is-a-financialadvisor - 0x00047789, // n0x067a c0x0000 (---------------) + is-a-geek - 0x00052d8a, // n0x067b c0x0000 (---------------) + is-a-green - 0x00061109, // n0x067c c0x0000 (---------------) + is-a-guru - 0x000643d0, // n0x067d c0x0000 (---------------) + is-a-hard-worker - 0x0006800b, // n0x067e c0x0000 (---------------) + is-a-hunter - 0x0007730f, // n0x067f c0x0000 (---------------) + is-a-landscaper - 0x00079b8b, // n0x0680 c0x0000 (---------------) + is-a-lawyer - 0x0007a5cc, // n0x0681 c0x0000 (---------------) + is-a-liberal - 0x00081cd0, // n0x0682 c0x0000 (---------------) + is-a-libertarian - 0x0015658a, // n0x0683 c0x0000 (---------------) + is-a-llama - 0x0009440d, // n0x0684 c0x0000 (---------------) + is-a-musician - 0x0009800e, // n0x0685 c0x0000 (---------------) + is-a-nascarfan - 0x0009f7ca, // n0x0686 c0x0000 (---------------) + is-a-nurse - 0x000a01cc, // n0x0687 c0x0000 (---------------) + is-a-painter - 0x000a7f94, // n0x0688 c0x0000 (---------------) + is-a-personaltrainer - 0x000a7491, // n0x0689 c0x0000 (---------------) + is-a-photographer - 0x000e8ecb, // n0x068a c0x0000 (---------------) + is-a-player - 0x000d7d0f, // n0x068b c0x0000 (---------------) + is-a-republican - 0x000e064d, // n0x068c c0x0000 (---------------) + is-a-rockstar - 0x000adf0e, // n0x068d c0x0000 (---------------) + is-a-socialist - 0x0013ac8c, // n0x068e c0x0000 (---------------) + is-a-student - 0x000a344c, // n0x068f c0x0000 (---------------) + is-a-teacher - 0x000a718b, // n0x0690 c0x0000 (---------------) + is-a-techie - 0x000a91ce, // n0x0691 c0x0000 (---------------) + is-a-therapist - 0x000ac710, // n0x0692 c0x0000 (---------------) + is-an-accountant - 0x000ae94b, // n0x0693 c0x0000 (---------------) + is-an-actor - 0x000afb8d, // n0x0694 c0x0000 (---------------) + is-an-actress - 0x000e858f, // n0x0695 c0x0000 (---------------) + is-an-anarchist - 0x000d750c, // n0x0696 c0x0000 (---------------) + is-an-artist - 0x0010dace, // n0x0697 c0x0000 (---------------) + is-an-engineer - 0x000b6c91, // n0x0698 c0x0000 (---------------) + is-an-entertainer - 0x000d404c, // n0x0699 c0x0000 (---------------) + is-certified - 0x000dd607, // n0x069a c0x0000 (---------------) + is-gone - 0x000e47cd, // n0x069b c0x0000 (---------------) + is-into-anime - 0x000d508c, // n0x069c c0x0000 (---------------) + is-into-cars - 0x000de750, // n0x069d c0x0000 (---------------) + is-into-cartoons - 0x000dffcd, // n0x069e c0x0000 (---------------) + is-into-games - 0x000e3307, // n0x069f c0x0000 (---------------) + is-leet - 0x0010c8d0, // n0x06a0 c0x0000 (---------------) + is-not-certified - 0x00102608, // n0x06a1 c0x0000 (---------------) + is-slick - 0x0010724b, // n0x06a2 c0x0000 (---------------) + is-uberleet - 0x0012c78f, // n0x06a3 c0x0000 (---------------) + is-with-theband - 0x00013e48, // n0x06a4 c0x0000 (---------------) + isa-geek - 0x0007cfcd, // n0x06a5 c0x0000 (---------------) + isa-hockeynut - 0x00140d50, // n0x06a6 c0x0000 (---------------) + issmarterthanyou - 0x000a2b03, // n0x06a7 c0x0000 (---------------) + jpn - 0x0000c642, // n0x06a8 c0x0000 (---------------) + kr - 0x000222c9, // n0x06a9 c0x0000 (---------------) + likes-pie - 0x0008ec8a, // n0x06aa c0x0000 (---------------) + likescandy - 0x00073d83, // n0x06ab c0x0000 (---------------) + mex - 0x0010bf08, // n0x06ac c0x0000 (---------------) + neat-url - 0x00008287, // n0x06ad c0x0000 (---------------) + nfshost - 0x00000cc2, // n0x06ae c0x0000 (---------------) + no - 0x00113d8a, // n0x06af c0x0000 (---------------) + operaunite - 0x0017e10f, // n0x06b0 c0x0000 (---------------) + outsystemscloud - 0x0015f2d2, // n0x06b1 c0x0000 (---------------) + pagespeedmobilizer - 0x001513c2, // n0x06b2 c0x0000 (---------------) + qc - 0x000006c7, // n0x06b3 c0x0000 (---------------) + rhcloud - 0x00000982, // n0x06b4 c0x0000 (---------------) + ro - 0x00002f82, // n0x06b5 c0x0000 (---------------) + ru - 0x00003a82, // n0x06b6 c0x0000 (---------------) + sa - 0x0005f9d0, // n0x06b7 c0x0000 (---------------) + saves-the-whales - 0x00004982, // n0x06b8 c0x0000 (---------------) + se - 0x00141246, // n0x06b9 c0x0000 (---------------) + selfip - 0x0008e20e, // n0x06ba c0x0000 (---------------) + sells-for-less - 0x0009cc8b, // n0x06bb c0x0000 (---------------) + sells-for-u - 0x0000be88, // n0x06bc c0x0000 (---------------) + servebbs - 0x00118cca, // n0x06bd c0x0000 (---------------) + simple-url - 0x000d91cd, // n0x06be c0x0000 (---------------) + space-to-rent - 0x0016730c, // n0x06bf c0x0000 (---------------) + teaches-yoga - 0x00000542, // n0x06c0 c0x0000 (---------------) + uk - 0x000073c2, // n0x06c1 c0x0000 (---------------) + us - 0x00004282, // n0x06c2 c0x0000 (---------------) + uy - 0x0007ccca, // n0x06c3 c0x0000 (---------------) + withgoogle - 0x0009e1ce, // n0x06c4 c0x0000 (---------------) + writesthisblog - 0x000dbb08, // n0x06c5 c0x0000 (---------------) + yolasite - 0x000028c2, // n0x06c6 c0x0000 (---------------) + za - 0x0f4347c7, // n0x06c7 c0x003d (n0x06de-n0x06e7) + compute - 0x0f8347c9, // n0x06c8 c0x003e (n0x06e7-n0x06e9) + compute-1 - 0x00012d43, // n0x06c9 c0x0000 (---------------) + elb - 0x00007102, // n0x06ca c0x0000 (---------------) + s3 - 0x00146091, // n0x06cb c0x0000 (---------------) + s3-ap-northeast-1 - 0x001232d1, // n0x06cc c0x0000 (---------------) + s3-ap-southeast-1 - 0x00135851, // n0x06cd c0x0000 (---------------) + s3-ap-southeast-2 - 0x000fd00c, // n0x06ce c0x0000 (---------------) + s3-eu-west-1 - 0x00101b55, // n0x06cf c0x0000 (---------------) + s3-fips-us-gov-west-1 - 0x0011030c, // n0x06d0 c0x0000 (---------------) + s3-sa-east-1 - 0x00111f50, // n0x06d1 c0x0000 (---------------) + s3-us-gov-west-1 - 0x00112e8c, // n0x06d2 c0x0000 (---------------) + s3-us-west-1 - 0x0011a8cc, // n0x06d3 c0x0000 (---------------) + s3-us-west-2 - 0x0014d6d9, // n0x06d4 c0x0000 (---------------) + s3-website-ap-northeast-1 - 0x00170659, // n0x06d5 c0x0000 (---------------) + s3-website-ap-southeast-1 - 0x00176f59, // n0x06d6 c0x0000 (---------------) + s3-website-ap-southeast-2 - 0x0017d314, // n0x06d7 c0x0000 (---------------) + s3-website-eu-west-1 - 0x00185994, // n0x06d8 c0x0000 (---------------) + s3-website-sa-east-1 - 0x00007114, // n0x06d9 c0x0000 (---------------) + s3-website-us-east-1 - 0x00009758, // n0x06da c0x0000 (---------------) + s3-website-us-gov-west-1 - 0x0000b694, // n0x06db c0x0000 (---------------) + s3-website-us-west-1 - 0x0000c054, // n0x06dc c0x0000 (---------------) + s3-website-us-west-2 - 0x000073c9, // n0x06dd c0x0000 (---------------) + us-east-1 - 0x0014614e, // n0x06de c0x0000 (---------------) + ap-northeast-1 - 0x0012338e, // n0x06df c0x0000 (---------------) + ap-southeast-1 - 0x0013590e, // n0x06e0 c0x0000 (---------------) + ap-southeast-2 - 0x0001b88c, // n0x06e1 c0x0000 (---------------) + eu-central-1 - 0x000fd0c9, // n0x06e2 c0x0000 (---------------) + eu-west-1 - 0x001103c9, // n0x06e3 c0x0000 (---------------) + sa-east-1 - 0x00009a0d, // n0x06e4 c0x0000 (---------------) + us-gov-west-1 - 0x0000b949, // n0x06e5 c0x0000 (---------------) + us-west-1 - 0x0000c309, // n0x06e6 c0x0000 (---------------) + us-west-2 - 0x001617c3, // n0x06e7 c0x0000 (---------------) + z-1 - 0x0012eb83, // n0x06e8 c0x0000 (---------------) + z-2 - 0x00200b82, // n0x06e9 c0x0000 (---------------) + I ac - 0x00200882, // n0x06ea c0x0000 (---------------) + I co - 0x00205742, // n0x06eb c0x0000 (---------------) + I ed - 0x00206f02, // n0x06ec c0x0000 (---------------) + I fi - 0x00209ac2, // n0x06ed c0x0000 (---------------) + I go - 0x00200d02, // n0x06ee c0x0000 (---------------) + I or - 0x00203a82, // n0x06ef c0x0000 (---------------) + I sa - 0x00232dc3, // n0x06f0 c0x0000 (---------------) + I com - 0x0021e083, // n0x06f1 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x06f2 c0x0000 (---------------) + I gov - 0x00206e83, // n0x06f3 c0x0000 (---------------) + I inf - 0x00218643, // n0x06f4 c0x0000 (---------------) + I net - 0x0024d043, // n0x06f5 c0x0000 (---------------) + I org - 0x0009e448, // n0x06f6 c0x0000 (---------------) + blogspot - 0x00232dc3, // n0x06f7 c0x0000 (---------------) + I com - 0x0021e083, // n0x06f8 c0x0000 (---------------) + I edu - 0x00218643, // n0x06f9 c0x0000 (---------------) + I net - 0x0024d043, // n0x06fa c0x0000 (---------------) + I org - 0x00009343, // n0x06fb c0x0000 (---------------) + ath - 0x00209ac3, // n0x06fc c0x0000 (---------------) + I gov - 0x0009e448, // n0x06fd c0x0000 (---------------) + blogspot - 0x0009e448, // n0x06fe c0x0000 (---------------) + blogspot - 0x00032dc3, // n0x06ff c0x0000 (---------------) + com - 0x00173f0f, // n0x0700 c0x0000 (---------------) + fuettertdasnetz - 0x0012cc8a, // n0x0701 c0x0000 (---------------) + isteingeek - 0x000ae1c7, // n0x0702 c0x0000 (---------------) + istmein - 0x0003ad0a, // n0x0703 c0x0000 (---------------) + lebtimnetz - 0x0000948a, // n0x0704 c0x0000 (---------------) + leitungsen - 0x000f5c4d, // n0x0705 c0x0000 (---------------) + traeumtgerade - 0x0009e448, // n0x0706 c0x0000 (---------------) + blogspot - 0x00232dc3, // n0x0707 c0x0000 (---------------) + I com - 0x0021e083, // n0x0708 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x0709 c0x0000 (---------------) + I gov - 0x00218643, // n0x070a c0x0000 (---------------) + I net - 0x0024d043, // n0x070b c0x0000 (---------------) + I org - 0x00208d43, // n0x070c c0x0000 (---------------) + I art - 0x00232dc3, // n0x070d c0x0000 (---------------) + I com - 0x0021e083, // n0x070e c0x0000 (---------------) + I edu - 0x003704c3, // n0x070f c0x0000 (---------------) + I gob - 0x00209ac3, // n0x0710 c0x0000 (---------------) + I gov - 0x00240443, // n0x0711 c0x0000 (---------------) + I mil - 0x00218643, // n0x0712 c0x0000 (---------------) + I net - 0x0024d043, // n0x0713 c0x0000 (---------------) + I org - 0x002d3a83, // n0x0714 c0x0000 (---------------) + I sld - 0x002071c3, // n0x0715 c0x0000 (---------------) + I web - 0x00208d43, // n0x0716 c0x0000 (---------------) + I art - 0x00278344, // n0x0717 c0x0000 (---------------) + I asso - 0x00232dc3, // n0x0718 c0x0000 (---------------) + I com - 0x0021e083, // n0x0719 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x071a c0x0000 (---------------) + I gov - 0x00218643, // n0x071b c0x0000 (---------------) + I net - 0x0024d043, // n0x071c c0x0000 (---------------) + I org - 0x002369c3, // n0x071d c0x0000 (---------------) + I pol - 0x00232dc3, // n0x071e c0x0000 (---------------) + I com - 0x0021e083, // n0x071f c0x0000 (---------------) + I edu - 0x00206f03, // n0x0720 c0x0000 (---------------) + I fin - 0x003704c3, // n0x0721 c0x0000 (---------------) + I gob - 0x00209ac3, // n0x0722 c0x0000 (---------------) + I gov - 0x00208a44, // n0x0723 c0x0000 (---------------) + I info - 0x0036e803, // n0x0724 c0x0000 (---------------) + I k12 - 0x00210e83, // n0x0725 c0x0000 (---------------) + I med - 0x00240443, // n0x0726 c0x0000 (---------------) + I mil - 0x00218643, // n0x0727 c0x0000 (---------------) + I net - 0x0024d043, // n0x0728 c0x0000 (---------------) + I org - 0x002cfc43, // n0x0729 c0x0000 (---------------) + I pro - 0x00233b03, // n0x072a c0x0000 (---------------) + I aip - 0x00232dc3, // n0x072b c0x0000 (---------------) + I com - 0x0021e083, // n0x072c c0x0000 (---------------) + I edu - 0x00287a03, // n0x072d c0x0000 (---------------) + I fie - 0x00209ac3, // n0x072e c0x0000 (---------------) + I gov - 0x0027a703, // n0x072f c0x0000 (---------------) + I lib - 0x00210e83, // n0x0730 c0x0000 (---------------) + I med - 0x0024d043, // n0x0731 c0x0000 (---------------) + I org - 0x0022e843, // n0x0732 c0x0000 (---------------) + I pri - 0x00382984, // n0x0733 c0x0000 (---------------) + I riik - 0x00232dc3, // n0x0734 c0x0000 (---------------) + I com - 0x0021e083, // n0x0735 c0x0000 (---------------) + I edu - 0x00299f83, // n0x0736 c0x0000 (---------------) + I eun - 0x00209ac3, // n0x0737 c0x0000 (---------------) + I gov - 0x00240443, // n0x0738 c0x0000 (---------------) + I mil - 0x00267944, // n0x0739 c0x0000 (---------------) + I name - 0x00218643, // n0x073a c0x0000 (---------------) + I net - 0x0024d043, // n0x073b c0x0000 (---------------) + I org - 0x00220143, // n0x073c c0x0000 (---------------) + I sci - 0x13a32dc3, // n0x073d c0x004e (n0x0742-n0x0743) + I com - 0x0021e083, // n0x073e c0x0000 (---------------) + I edu - 0x003704c3, // n0x073f c0x0000 (---------------) + I gob - 0x002104c3, // n0x0740 c0x0000 (---------------) + I nom - 0x0024d043, // n0x0741 c0x0000 (---------------) + I org - 0x0009e448, // n0x0742 c0x0000 (---------------) + blogspot - 0x00202183, // n0x0743 c0x0000 (---------------) + I biz - 0x00232dc3, // n0x0744 c0x0000 (---------------) + I com - 0x0021e083, // n0x0745 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x0746 c0x0000 (---------------) + I gov - 0x00208a44, // n0x0747 c0x0000 (---------------) + I info - 0x00267944, // n0x0748 c0x0000 (---------------) + I name - 0x0024d043, // n0x0749 c0x0000 (---------------) + I org - 0x00322285, // n0x074a c0x0000 (---------------) + I aland - 0x0009e448, // n0x074b c0x0000 (---------------) + blogspot - 0x00023ac3, // n0x074c c0x0000 (---------------) + iki - 0x00366c48, // n0x074d c0x0000 (---------------) + I aeroport - 0x00332a87, // n0x074e c0x0000 (---------------) + I assedic - 0x00278344, // n0x074f c0x0000 (---------------) + I asso - 0x0035f846, // n0x0750 c0x0000 (---------------) + I avocat - 0x00364ec6, // n0x0751 c0x0000 (---------------) + I avoues - 0x0009e448, // n0x0752 c0x0000 (---------------) + blogspot - 0x00262983, // n0x0753 c0x0000 (---------------) + I cci - 0x002edec9, // n0x0754 c0x0000 (---------------) + I chambagri - 0x00265e15, // n0x0755 c0x0000 (---------------) + I chirurgiens-dentistes - 0x00232dc3, // n0x0756 c0x0000 (---------------) + I com - 0x00319812, // n0x0757 c0x0000 (---------------) + I experts-comptables - 0x003195cf, // n0x0758 c0x0000 (---------------) + I geometre-expert - 0x00252544, // n0x0759 c0x0000 (---------------) + I gouv - 0x002bdb85, // n0x075a c0x0000 (---------------) + I greta - 0x002ab310, // n0x075b c0x0000 (---------------) + I huissier-justice - 0x002e1b47, // n0x075c c0x0000 (---------------) + I medecin - 0x002104c3, // n0x075d c0x0000 (---------------) + I nom - 0x00283208, // n0x075e c0x0000 (---------------) + I notaires - 0x002c55ca, // n0x075f c0x0000 (---------------) + I pharmacien - 0x002167c4, // n0x0760 c0x0000 (---------------) + I port - 0x002cf303, // n0x0761 c0x0000 (---------------) + I prd - 0x0022ad46, // n0x0762 c0x0000 (---------------) + I presse - 0x002032c2, // n0x0763 c0x0000 (---------------) + I tm - 0x002ff6cb, // n0x0764 c0x0000 (---------------) + I veterinaire - 0x00232dc3, // n0x0765 c0x0000 (---------------) + I com - 0x0021e083, // n0x0766 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x0767 c0x0000 (---------------) + I gov - 0x00240443, // n0x0768 c0x0000 (---------------) + I mil - 0x00218643, // n0x0769 c0x0000 (---------------) + I net - 0x0024d043, // n0x076a c0x0000 (---------------) + I org - 0x002d35c3, // n0x076b c0x0000 (---------------) + I pvt - 0x00200882, // n0x076c c0x0000 (---------------) + I co - 0x00218643, // n0x076d c0x0000 (---------------) + I net - 0x0024d043, // n0x076e c0x0000 (---------------) + I org - 0x00232dc3, // n0x076f c0x0000 (---------------) + I com - 0x0021e083, // n0x0770 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x0771 c0x0000 (---------------) + I gov - 0x00240443, // n0x0772 c0x0000 (---------------) + I mil - 0x0024d043, // n0x0773 c0x0000 (---------------) + I org - 0x00232dc3, // n0x0774 c0x0000 (---------------) + I com - 0x0021e083, // n0x0775 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x0776 c0x0000 (---------------) + I gov - 0x00220e43, // n0x0777 c0x0000 (---------------) + I ltd - 0x00226fc3, // n0x0778 c0x0000 (---------------) + I mod - 0x0024d043, // n0x0779 c0x0000 (---------------) + I org - 0x00200b82, // n0x077a c0x0000 (---------------) + I ac - 0x00232dc3, // n0x077b c0x0000 (---------------) + I com - 0x0021e083, // n0x077c c0x0000 (---------------) + I edu - 0x00209ac3, // n0x077d c0x0000 (---------------) + I gov - 0x00218643, // n0x077e c0x0000 (---------------) + I net - 0x0024d043, // n0x077f c0x0000 (---------------) + I org - 0x00278344, // n0x0780 c0x0000 (---------------) + I asso - 0x00232dc3, // n0x0781 c0x0000 (---------------) + I com - 0x0021e083, // n0x0782 c0x0000 (---------------) + I edu - 0x00277f84, // n0x0783 c0x0000 (---------------) + I mobi - 0x00218643, // n0x0784 c0x0000 (---------------) + I net - 0x0024d043, // n0x0785 c0x0000 (---------------) + I org - 0x0009e448, // n0x0786 c0x0000 (---------------) + blogspot - 0x00232dc3, // n0x0787 c0x0000 (---------------) + I com - 0x0021e083, // n0x0788 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x0789 c0x0000 (---------------) + I gov - 0x00218643, // n0x078a c0x0000 (---------------) + I net - 0x0024d043, // n0x078b c0x0000 (---------------) + I org - 0x00232dc3, // n0x078c c0x0000 (---------------) + I com - 0x0021e083, // n0x078d c0x0000 (---------------) + I edu - 0x003704c3, // n0x078e c0x0000 (---------------) + I gob - 0x002202c3, // n0x078f c0x0000 (---------------) + I ind - 0x00240443, // n0x0790 c0x0000 (---------------) + I mil - 0x00218643, // n0x0791 c0x0000 (---------------) + I net - 0x0024d043, // n0x0792 c0x0000 (---------------) + I org - 0x00200882, // n0x0793 c0x0000 (---------------) + I co - 0x00232dc3, // n0x0794 c0x0000 (---------------) + I com - 0x00218643, // n0x0795 c0x0000 (---------------) + I net - 0x0009e448, // n0x0796 c0x0000 (---------------) + blogspot - 0x00232dc3, // n0x0797 c0x0000 (---------------) + I com - 0x0021e083, // n0x0798 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x0799 c0x0000 (---------------) + I gov - 0x00309c83, // n0x079a c0x0000 (---------------) + I idv - 0x000746c3, // n0x079b c0x0000 (---------------) + inc - 0x00020e43, // n0x079c c0x0000 (---------------) + ltd - 0x00218643, // n0x079d c0x0000 (---------------) + I net - 0x0024d043, // n0x079e c0x0000 (---------------) + I org - 0x002f66ca, // n0x079f c0x0000 (---------------) + I xn--55qx5d - 0x0030e909, // n0x07a0 c0x0000 (---------------) + I xn--ciqpn - 0x0031fd0b, // n0x07a1 c0x0000 (---------------) + I xn--gmq050i - 0x0032030a, // n0x07a2 c0x0000 (---------------) + I xn--gmqw5a - 0x00326e4a, // n0x07a3 c0x0000 (---------------) + I xn--io0a7i - 0x00333c4b, // n0x07a4 c0x0000 (---------------) + I xn--lcvr32d - 0x0034048a, // n0x07a5 c0x0000 (---------------) + I xn--mk0axi - 0x0034808a, // n0x07a6 c0x0000 (---------------) + I xn--mxtq1m - 0x0034cf0a, // n0x07a7 c0x0000 (---------------) + I xn--od0alg - 0x0034d18b, // n0x07a8 c0x0000 (---------------) + I xn--od0aq3b - 0x00370f89, // n0x07a9 c0x0000 (---------------) + I xn--tn0ag - 0x00372b4a, // n0x07aa c0x0000 (---------------) + I xn--uc0atv - 0x0037300b, // n0x07ab c0x0000 (---------------) + I xn--uc0ay4a - 0x0037c0cb, // n0x07ac c0x0000 (---------------) + I xn--wcvs22d - 0x003844ca, // n0x07ad c0x0000 (---------------) + I xn--zf0avx - 0x00232dc3, // n0x07ae c0x0000 (---------------) + I com - 0x0021e083, // n0x07af c0x0000 (---------------) + I edu - 0x003704c3, // n0x07b0 c0x0000 (---------------) + I gob - 0x00240443, // n0x07b1 c0x0000 (---------------) + I mil - 0x00218643, // n0x07b2 c0x0000 (---------------) + I net - 0x0024d043, // n0x07b3 c0x0000 (---------------) + I org - 0x00232dc3, // n0x07b4 c0x0000 (---------------) + I com - 0x0025c084, // n0x07b5 c0x0000 (---------------) + I from - 0x002021c2, // n0x07b6 c0x0000 (---------------) + I iz - 0x00267944, // n0x07b7 c0x0000 (---------------) + I name - 0x002fb4c5, // n0x07b8 c0x0000 (---------------) + I adult - 0x00208d43, // n0x07b9 c0x0000 (---------------) + I art - 0x00278344, // n0x07ba c0x0000 (---------------) + I asso - 0x00232dc3, // n0x07bb c0x0000 (---------------) + I com - 0x0023a884, // n0x07bc c0x0000 (---------------) + I coop - 0x0021e083, // n0x07bd c0x0000 (---------------) + I edu - 0x0024a304, // n0x07be c0x0000 (---------------) + I firm - 0x00252544, // n0x07bf c0x0000 (---------------) + I gouv - 0x00208a44, // n0x07c0 c0x0000 (---------------) + I info - 0x00210e83, // n0x07c1 c0x0000 (---------------) + I med - 0x00218643, // n0x07c2 c0x0000 (---------------) + I net - 0x0024d043, // n0x07c3 c0x0000 (---------------) + I org - 0x002a80c5, // n0x07c4 c0x0000 (---------------) + I perso - 0x002369c3, // n0x07c5 c0x0000 (---------------) + I pol - 0x002cfc43, // n0x07c6 c0x0000 (---------------) + I pro - 0x0027f043, // n0x07c7 c0x0000 (---------------) + I rel - 0x0022e644, // n0x07c8 c0x0000 (---------------) + I shop - 0x003678c4, // n0x07c9 c0x0000 (---------------) + I 2000 - 0x00263c05, // n0x07ca c0x0000 (---------------) + I agrar - 0x0009e448, // n0x07cb c0x0000 (---------------) + blogspot - 0x002f0bc4, // n0x07cc c0x0000 (---------------) + I bolt - 0x0023b546, // n0x07cd c0x0000 (---------------) + I casino - 0x003242c4, // n0x07ce c0x0000 (---------------) + I city - 0x00200882, // n0x07cf c0x0000 (---------------) + I co - 0x00245947, // n0x07d0 c0x0000 (---------------) + I erotica - 0x002a3cc7, // n0x07d1 c0x0000 (---------------) + I erotika - 0x00247d84, // n0x07d2 c0x0000 (---------------) + I film - 0x00255645, // n0x07d3 c0x0000 (---------------) + I forum - 0x002e01c5, // n0x07d4 c0x0000 (---------------) + I games - 0x0029d805, // n0x07d5 c0x0000 (---------------) + I hotel - 0x00208a44, // n0x07d6 c0x0000 (---------------) + I info - 0x002b6888, // n0x07d7 c0x0000 (---------------) + I ingatlan - 0x003817c6, // n0x07d8 c0x0000 (---------------) + I jogasz - 0x002c9f88, // n0x07d9 c0x0000 (---------------) + I konyvelo - 0x002cc885, // n0x07da c0x0000 (---------------) + I lakas - 0x0021e585, // n0x07db c0x0000 (---------------) + I media - 0x00234584, // n0x07dc c0x0000 (---------------) + I news - 0x0024d043, // n0x07dd c0x0000 (---------------) + I org - 0x002cfac4, // n0x07de c0x0000 (---------------) + I priv - 0x00332386, // n0x07df c0x0000 (---------------) + I reklam - 0x0022ae43, // n0x07e0 c0x0000 (---------------) + I sex - 0x0022e644, // n0x07e1 c0x0000 (---------------) + I shop - 0x0028f2c5, // n0x07e2 c0x0000 (---------------) + I sport - 0x00234404, // n0x07e3 c0x0000 (---------------) + I suli - 0x00201304, // n0x07e4 c0x0000 (---------------) + I szex - 0x002032c2, // n0x07e5 c0x0000 (---------------) + I tm - 0x0025e606, // n0x07e6 c0x0000 (---------------) + I tozsde - 0x0035c4c6, // n0x07e7 c0x0000 (---------------) + I utazas - 0x002ebe05, // n0x07e8 c0x0000 (---------------) + I video - 0x00200b82, // n0x07e9 c0x0000 (---------------) + I ac - 0x00202183, // n0x07ea c0x0000 (---------------) + I biz - 0x00200882, // n0x07eb c0x0000 (---------------) + I co - 0x00238ec4, // n0x07ec c0x0000 (---------------) + I desa - 0x00209ac2, // n0x07ed c0x0000 (---------------) + I go - 0x00240443, // n0x07ee c0x0000 (---------------) + I mil - 0x00224782, // n0x07ef c0x0000 (---------------) + I my - 0x00218643, // n0x07f0 c0x0000 (---------------) + I net - 0x00200d02, // n0x07f1 c0x0000 (---------------) + I or - 0x00251983, // n0x07f2 c0x0000 (---------------) + I sch - 0x002071c3, // n0x07f3 c0x0000 (---------------) + I web - 0x0009e448, // n0x07f4 c0x0000 (---------------) + blogspot - 0x00209ac3, // n0x07f5 c0x0000 (---------------) + I gov - 0x18e00882, // n0x07f6 c0x0063 (n0x07f7-n0x07f8) o I co - 0x0009e448, // n0x07f7 c0x0000 (---------------) + blogspot - 0x00200b82, // n0x07f8 c0x0000 (---------------) + I ac - 0x19600882, // n0x07f9 c0x0065 (n0x07ff-n0x0801) + I co - 0x00232dc3, // n0x07fa c0x0000 (---------------) + I com - 0x00218643, // n0x07fb c0x0000 (---------------) + I net - 0x0024d043, // n0x07fc c0x0000 (---------------) + I org - 0x0021cdc2, // n0x07fd c0x0000 (---------------) + I tt - 0x0028dc82, // n0x07fe c0x0000 (---------------) + I tv - 0x00220e43, // n0x07ff c0x0000 (---------------) + I ltd - 0x002cb543, // n0x0800 c0x0000 (---------------) + I plc - 0x00200b82, // n0x0801 c0x0000 (---------------) + I ac - 0x0009e448, // n0x0802 c0x0000 (---------------) + blogspot - 0x00200882, // n0x0803 c0x0000 (---------------) + I co - 0x0021e083, // n0x0804 c0x0000 (---------------) + I edu - 0x0024a304, // n0x0805 c0x0000 (---------------) + I firm - 0x0020a0c3, // n0x0806 c0x0000 (---------------) + I gen - 0x00209ac3, // n0x0807 c0x0000 (---------------) + I gov - 0x002202c3, // n0x0808 c0x0000 (---------------) + I ind - 0x00240443, // n0x0809 c0x0000 (---------------) + I mil - 0x00218643, // n0x080a c0x0000 (---------------) + I net - 0x00219583, // n0x080b c0x0000 (---------------) + I nic - 0x0024d043, // n0x080c c0x0000 (---------------) + I org - 0x00207083, // n0x080d c0x0000 (---------------) + I res - 0x00119193, // n0x080e c0x0000 (---------------) + barrel-of-knowledge - 0x00121ad4, // n0x080f c0x0000 (---------------) + barrell-of-knowledge - 0x0000dc06, // n0x0810 c0x0000 (---------------) + dyndns - 0x000512c7, // n0x0811 c0x0000 (---------------) + for-our - 0x0016d549, // n0x0812 c0x0000 (---------------) + groks-the - 0x0017980a, // n0x0813 c0x0000 (---------------) + groks-this - 0x0008124d, // n0x0814 c0x0000 (---------------) + here-for-more - 0x00051c0a, // n0x0815 c0x0000 (---------------) + knowsitall - 0x00141246, // n0x0816 c0x0000 (---------------) + selfip - 0x0002c186, // n0x0817 c0x0000 (---------------) + webhop - 0x0021b882, // n0x0818 c0x0000 (---------------) + I eu - 0x00232dc3, // n0x0819 c0x0000 (---------------) + I com - 0x000bb186, // n0x081a c0x0000 (---------------) + github - 0x000926c3, // n0x081b c0x0000 (---------------) + nid - 0x00232dc3, // n0x081c c0x0000 (---------------) + I com - 0x0021e083, // n0x081d c0x0000 (---------------) + I edu - 0x00209ac3, // n0x081e c0x0000 (---------------) + I gov - 0x00240443, // n0x081f c0x0000 (---------------) + I mil - 0x00218643, // n0x0820 c0x0000 (---------------) + I net - 0x0024d043, // n0x0821 c0x0000 (---------------) + I org - 0x00200b82, // n0x0822 c0x0000 (---------------) + I ac - 0x00200882, // n0x0823 c0x0000 (---------------) + I co - 0x00209ac3, // n0x0824 c0x0000 (---------------) + I gov - 0x00205942, // n0x0825 c0x0000 (---------------) + I id - 0x00218643, // n0x0826 c0x0000 (---------------) + I net - 0x0024d043, // n0x0827 c0x0000 (---------------) + I org - 0x00251983, // n0x0828 c0x0000 (---------------) + I sch - 0x0033b00f, // n0x0829 c0x0000 (---------------) + I xn--mgba3a4f16a - 0x0033b3ce, // n0x082a c0x0000 (---------------) + I xn--mgba3a4fra - 0x00232dc3, // n0x082b c0x0000 (---------------) + I com - 0x00045147, // n0x082c c0x0000 (---------------) + cupcake - 0x0021e083, // n0x082d c0x0000 (---------------) + I edu - 0x00209ac3, // n0x082e c0x0000 (---------------) + I gov - 0x002188c3, // n0x082f c0x0000 (---------------) + I int - 0x00218643, // n0x0830 c0x0000 (---------------) + I net - 0x0024d043, // n0x0831 c0x0000 (---------------) + I org - 0x0021e683, // n0x0832 c0x0000 (---------------) + I abr - 0x00377c07, // n0x0833 c0x0000 (---------------) + I abruzzo - 0x00202602, // n0x0834 c0x0000 (---------------) + I ag - 0x00307909, // n0x0835 c0x0000 (---------------) + I agrigento - 0x00200b02, // n0x0836 c0x0000 (---------------) + I al - 0x0025fccb, // n0x0837 c0x0000 (---------------) + I alessandria - 0x0029fdca, // n0x0838 c0x0000 (---------------) + I alto-adige - 0x002c8409, // n0x0839 c0x0000 (---------------) + I altoadige - 0x00202342, // n0x083a c0x0000 (---------------) + I an - 0x002375c6, // n0x083b c0x0000 (---------------) + I ancona - 0x0027b6d5, // n0x083c c0x0000 (---------------) + I andria-barletta-trani - 0x0025fe15, // n0x083d c0x0000 (---------------) + I andria-trani-barletta - 0x0027d413, // n0x083e c0x0000 (---------------) + I andriabarlettatrani - 0x00260393, // n0x083f c0x0000 (---------------) + I andriatranibarletta - 0x00207702, // n0x0840 c0x0000 (---------------) + I ao - 0x00209245, // n0x0841 c0x0000 (---------------) + I aosta - 0x00382fcc, // n0x0842 c0x0000 (---------------) + I aosta-valley - 0x002a5b0b, // n0x0843 c0x0000 (---------------) + I aostavalley - 0x00255445, // n0x0844 c0x0000 (---------------) + I aoste - 0x00200a42, // n0x0845 c0x0000 (---------------) + I ap - 0x0023be02, // n0x0846 c0x0000 (---------------) + I aq - 0x002c0f06, // n0x0847 c0x0000 (---------------) + I aquila - 0x002030c2, // n0x0848 c0x0000 (---------------) + I ar - 0x003650c6, // n0x0849 c0x0000 (---------------) + I arezzo - 0x0035940d, // n0x084a c0x0000 (---------------) + I ascoli-piceno - 0x002c7c0c, // n0x084b c0x0000 (---------------) + I ascolipiceno - 0x0023d544, // n0x084c c0x0000 (---------------) + I asti - 0x00201702, // n0x084d c0x0000 (---------------) + I at - 0x00201602, // n0x084e c0x0000 (---------------) + I av - 0x00290208, // n0x084f c0x0000 (---------------) + I avellino - 0x002076c2, // n0x0850 c0x0000 (---------------) + I ba - 0x0024f906, // n0x0851 c0x0000 (---------------) + I balsan - 0x00288e84, // n0x0852 c0x0000 (---------------) + I bari - 0x0027b895, // n0x0853 c0x0000 (---------------) + I barletta-trani-andria - 0x0027d593, // n0x0854 c0x0000 (---------------) + I barlettatraniandria - 0x00212c83, // n0x0855 c0x0000 (---------------) + I bas - 0x00343aca, // n0x0856 c0x0000 (---------------) + I basilicata - 0x00215a47, // n0x0857 c0x0000 (---------------) + I belluno - 0x00344ec9, // n0x0858 c0x0000 (---------------) + I benevento - 0x003498c7, // n0x0859 c0x0000 (---------------) + I bergamo - 0x00340002, // n0x085a c0x0000 (---------------) + I bg - 0x00200002, // n0x085b c0x0000 (---------------) + I bi - 0x00201c46, // n0x085c c0x0000 (---------------) + I biella - 0x0020cb02, // n0x085d c0x0000 (---------------) + I bl - 0x0009e448, // n0x085e c0x0000 (---------------) + blogspot - 0x00212ac2, // n0x085f c0x0000 (---------------) + I bn - 0x00210042, // n0x0860 c0x0000 (---------------) + I bo - 0x003758c7, // n0x0861 c0x0000 (---------------) + I bologna - 0x00213b87, // n0x0862 c0x0000 (---------------) + I bolzano - 0x0021b5c5, // n0x0863 c0x0000 (---------------) + I bozen - 0x00207bc2, // n0x0864 c0x0000 (---------------) + I br - 0x00220087, // n0x0865 c0x0000 (---------------) + I brescia - 0x00220248, // n0x0866 c0x0000 (---------------) + I brindisi - 0x00207242, // n0x0867 c0x0000 (---------------) + I bs - 0x0023ad82, // n0x0868 c0x0000 (---------------) + I bt - 0x0022eac2, // n0x0869 c0x0000 (---------------) + I bz - 0x00214582, // n0x086a c0x0000 (---------------) + I ca - 0x0036e048, // n0x086b c0x0000 (---------------) + I cagliari - 0x00219603, // n0x086c c0x0000 (---------------) + I cal - 0x0021fc48, // n0x086d c0x0000 (---------------) + I calabria - 0x00384b4d, // n0x086e c0x0000 (---------------) + I caltanissetta - 0x00216403, // n0x086f c0x0000 (---------------) + I cam - 0x00309848, // n0x0870 c0x0000 (---------------) + I campania - 0x0023e18f, // n0x0871 c0x0000 (---------------) + I campidano-medio - 0x0023e54e, // n0x0872 c0x0000 (---------------) + I campidanomedio - 0x002781ca, // n0x0873 c0x0000 (---------------) + I campobasso - 0x002eeed1, // n0x0874 c0x0000 (---------------) + I carbonia-iglesias - 0x002ef350, // n0x0875 c0x0000 (---------------) + I carboniaiglesias - 0x002bf88d, // n0x0876 c0x0000 (---------------) + I carrara-massa - 0x002bfbcc, // n0x0877 c0x0000 (---------------) + I carraramassa - 0x00233987, // n0x0878 c0x0000 (---------------) + I caserta - 0x00343c47, // n0x0879 c0x0000 (---------------) + I catania - 0x0035f909, // n0x087a c0x0000 (---------------) + I catanzaro - 0x0023d642, // n0x087b c0x0000 (---------------) + I cb - 0x00200bc2, // n0x087c c0x0000 (---------------) + I ce - 0x0025318c, // n0x087d c0x0000 (---------------) + I cesena-forli - 0x0025348b, // n0x087e c0x0000 (---------------) + I cesenaforli - 0x00202ac2, // n0x087f c0x0000 (---------------) + I ch - 0x002a7346, // n0x0880 c0x0000 (---------------) + I chieti - 0x00220182, // n0x0881 c0x0000 (---------------) + I ci - 0x00200402, // n0x0882 c0x0000 (---------------) + I cl - 0x0022fe42, // n0x0883 c0x0000 (---------------) + I cn - 0x00200882, // n0x0884 c0x0000 (---------------) + I co - 0x00233284, // n0x0885 c0x0000 (---------------) + I como - 0x0023cf87, // n0x0886 c0x0000 (---------------) + I cosenza - 0x0020b542, // n0x0887 c0x0000 (---------------) + I cr - 0x0023fe07, // n0x0888 c0x0000 (---------------) + I cremona - 0x00242347, // n0x0889 c0x0000 (---------------) + I crotone - 0x0021a142, // n0x088a c0x0000 (---------------) + I cs - 0x0022a082, // n0x088b c0x0000 (---------------) + I ct - 0x00245005, // n0x088c c0x0000 (---------------) + I cuneo - 0x00202882, // n0x088d c0x0000 (---------------) + I cz - 0x00231b4e, // n0x088e c0x0000 (---------------) + I dell-ogliastra - 0x00242dcd, // n0x088f c0x0000 (---------------) + I dellogliastra - 0x0021e083, // n0x0890 c0x0000 (---------------) + I edu - 0x002416ce, // n0x0891 c0x0000 (---------------) + I emilia-romagna - 0x0026a94d, // n0x0892 c0x0000 (---------------) + I emiliaromagna - 0x002141c3, // n0x0893 c0x0000 (---------------) + I emr - 0x00202242, // n0x0894 c0x0000 (---------------) + I en - 0x00276004, // n0x0895 c0x0000 (---------------) + I enna - 0x0033a302, // n0x0896 c0x0000 (---------------) + I fc - 0x00223342, // n0x0897 c0x0000 (---------------) + I fe - 0x00353ac5, // n0x0898 c0x0000 (---------------) + I fermo - 0x00245687, // n0x0899 c0x0000 (---------------) + I ferrara - 0x002460c2, // n0x089a c0x0000 (---------------) + I fg - 0x00206f02, // n0x089b c0x0000 (---------------) + I fi - 0x00249907, // n0x089c c0x0000 (---------------) + I firenze - 0x0024de08, // n0x089d c0x0000 (---------------) + I florence - 0x00257f82, // n0x089e c0x0000 (---------------) + I fm - 0x00208ac6, // n0x089f c0x0000 (---------------) + I foggia - 0x0025300c, // n0x08a0 c0x0000 (---------------) + I forli-cesena - 0x0025334b, // n0x08a1 c0x0000 (---------------) + I forlicesena - 0x0022fc02, // n0x08a2 c0x0000 (---------------) + I fr - 0x0025890f, // n0x08a3 c0x0000 (---------------) + I friuli-v-giulia - 0x00258cd0, // n0x08a4 c0x0000 (---------------) + I friuli-ve-giulia - 0x002590cf, // n0x08a5 c0x0000 (---------------) + I friuli-vegiulia - 0x00259495, // n0x08a6 c0x0000 (---------------) + I friuli-venezia-giulia - 0x002599d4, // n0x08a7 c0x0000 (---------------) + I friuli-veneziagiulia - 0x00259ece, // n0x08a8 c0x0000 (---------------) + I friuli-vgiulia - 0x0025a24e, // n0x08a9 c0x0000 (---------------) + I friuliv-giulia - 0x0025a5cf, // n0x08aa c0x0000 (---------------) + I friulive-giulia - 0x0025a98e, // n0x08ab c0x0000 (---------------) + I friulivegiulia - 0x0025ad14, // n0x08ac c0x0000 (---------------) + I friulivenezia-giulia - 0x0025b213, // n0x08ad c0x0000 (---------------) + I friuliveneziagiulia - 0x0025b6cd, // n0x08ae c0x0000 (---------------) + I friulivgiulia - 0x0026e909, // n0x08af c0x0000 (---------------) + I frosinone - 0x002820c3, // n0x08b0 c0x0000 (---------------) + I fvg - 0x002029c2, // n0x08b1 c0x0000 (---------------) + I ge - 0x00285ac5, // n0x08b2 c0x0000 (---------------) + I genoa - 0x0020a0c6, // n0x08b3 c0x0000 (---------------) + I genova - 0x00209ac2, // n0x08b4 c0x0000 (---------------) + I go - 0x00258747, // n0x08b5 c0x0000 (---------------) + I gorizia - 0x00209ac3, // n0x08b6 c0x0000 (---------------) + I gov - 0x00206b02, // n0x08b7 c0x0000 (---------------) + I gr - 0x00231748, // n0x08b8 c0x0000 (---------------) + I grosseto - 0x002ef111, // n0x08b9 c0x0000 (---------------) + I iglesias-carbonia - 0x002ef550, // n0x08ba c0x0000 (---------------) + I iglesiascarbonia - 0x002051c2, // n0x08bb c0x0000 (---------------) + I im - 0x0020c707, // n0x08bc c0x0000 (---------------) + I imperia - 0x002066c2, // n0x08bd c0x0000 (---------------) + I is - 0x002558c7, // n0x08be c0x0000 (---------------) + I isernia - 0x0020c642, // n0x08bf c0x0000 (---------------) + I kr - 0x00365c49, // n0x08c0 c0x0000 (---------------) + I la-spezia - 0x002c0ec7, // n0x08c1 c0x0000 (---------------) + I laquila - 0x0034cd08, // n0x08c2 c0x0000 (---------------) + I laspezia - 0x0020b0c6, // n0x08c3 c0x0000 (---------------) + I latina - 0x002cb443, // n0x08c4 c0x0000 (---------------) + I laz - 0x00341c05, // n0x08c5 c0x0000 (---------------) + I lazio - 0x0021e302, // n0x08c6 c0x0000 (---------------) + I lc - 0x00202042, // n0x08c7 c0x0000 (---------------) + I le - 0x00202045, // n0x08c8 c0x0000 (---------------) + I lecce - 0x0022dd05, // n0x08c9 c0x0000 (---------------) + I lecco - 0x00206682, // n0x08ca c0x0000 (---------------) + I li - 0x00216903, // n0x08cb c0x0000 (---------------) + I lig - 0x002536c7, // n0x08cc c0x0000 (---------------) + I liguria - 0x00376a87, // n0x08cd c0x0000 (---------------) + I livorno - 0x00200782, // n0x08ce c0x0000 (---------------) + I lo - 0x002ca104, // n0x08cf c0x0000 (---------------) + I lodi - 0x00210a03, // n0x08d0 c0x0000 (---------------) + I lom - 0x002b5a09, // n0x08d1 c0x0000 (---------------) + I lombardia - 0x00210a08, // n0x08d2 c0x0000 (---------------) + I lombardy - 0x0021a082, // n0x08d3 c0x0000 (---------------) + I lt - 0x00208042, // n0x08d4 c0x0000 (---------------) + I lu - 0x0022c3c7, // n0x08d5 c0x0000 (---------------) + I lucania - 0x0023b945, // n0x08d6 c0x0000 (---------------) + I lucca - 0x00309348, // n0x08d7 c0x0000 (---------------) + I macerata - 0x00225c87, // n0x08d8 c0x0000 (---------------) + I mantova - 0x00204483, // n0x08d9 c0x0000 (---------------) + I mar - 0x0024c886, // n0x08da c0x0000 (---------------) + I marche - 0x002bf70d, // n0x08db c0x0000 (---------------) + I massa-carrara - 0x002bfa8c, // n0x08dc c0x0000 (---------------) + I massacarrara - 0x002b8d46, // n0x08dd c0x0000 (---------------) + I matera - 0x00207b82, // n0x08de c0x0000 (---------------) + I mb - 0x0020f0c2, // n0x08df c0x0000 (---------------) + I mc - 0x00204342, // n0x08e0 c0x0000 (---------------) + I me - 0x0023e00f, // n0x08e1 c0x0000 (---------------) + I medio-campidano - 0x0023e40e, // n0x08e2 c0x0000 (---------------) + I mediocampidano - 0x00299947, // n0x08e3 c0x0000 (---------------) + I messina - 0x00200f42, // n0x08e4 c0x0000 (---------------) + I mi - 0x00331245, // n0x08e5 c0x0000 (---------------) + I milan - 0x00331246, // n0x08e6 c0x0000 (---------------) + I milano - 0x0022c7c2, // n0x08e7 c0x0000 (---------------) + I mn - 0x00205202, // n0x08e8 c0x0000 (---------------) + I mo - 0x00282b86, // n0x08e9 c0x0000 (---------------) + I modena - 0x002b3e43, // n0x08ea c0x0000 (---------------) + I mol - 0x002b8ec6, // n0x08eb c0x0000 (---------------) + I molise - 0x002b70c5, // n0x08ec c0x0000 (---------------) + I monza - 0x002b70cd, // n0x08ed c0x0000 (---------------) + I monza-brianza - 0x002b7695, // n0x08ee c0x0000 (---------------) + I monza-e-della-brianza - 0x002b7c4c, // n0x08ef c0x0000 (---------------) + I monzabrianza - 0x002b7f4d, // n0x08f0 c0x0000 (---------------) + I monzaebrianza - 0x002b8292, // n0x08f1 c0x0000 (---------------) + I monzaedellabrianza - 0x0020e602, // n0x08f2 c0x0000 (---------------) + I ms - 0x00266782, // n0x08f3 c0x0000 (---------------) + I mt - 0x002015c2, // n0x08f4 c0x0000 (---------------) + I na - 0x0026d5c6, // n0x08f5 c0x0000 (---------------) + I naples - 0x002e26c6, // n0x08f6 c0x0000 (---------------) + I napoli - 0x00200cc2, // n0x08f7 c0x0000 (---------------) + I no - 0x0020a146, // n0x08f8 c0x0000 (---------------) + I novara - 0x0020fd82, // n0x08f9 c0x0000 (---------------) + I nu - 0x0037bbc5, // n0x08fa c0x0000 (---------------) + I nuoro - 0x00204f42, // n0x08fb c0x0000 (---------------) + I og - 0x00231c89, // n0x08fc c0x0000 (---------------) + I ogliastra - 0x00239f4c, // n0x08fd c0x0000 (---------------) + I olbia-tempio - 0x0023a28b, // n0x08fe c0x0000 (---------------) + I olbiatempio - 0x00200d02, // n0x08ff c0x0000 (---------------) + I or - 0x0024e248, // n0x0900 c0x0000 (---------------) + I oristano - 0x00201902, // n0x0901 c0x0000 (---------------) + I ot - 0x00200ac2, // n0x0902 c0x0000 (---------------) + I pa - 0x00209006, // n0x0903 c0x0000 (---------------) + I padova - 0x0033c945, // n0x0904 c0x0000 (---------------) + I padua - 0x0022b647, // n0x0905 c0x0000 (---------------) + I palermo - 0x00279585, // n0x0906 c0x0000 (---------------) + I parma - 0x002c5485, // n0x0907 c0x0000 (---------------) + I pavia - 0x0021c6c2, // n0x0908 c0x0000 (---------------) + I pc - 0x0022c302, // n0x0909 c0x0000 (---------------) + I pd - 0x0020c782, // n0x090a c0x0000 (---------------) + I pe - 0x00277607, // n0x090b c0x0000 (---------------) + I perugia - 0x0022a24d, // n0x090c c0x0000 (---------------) + I pesaro-urbino - 0x0022a5cc, // n0x090d c0x0000 (---------------) + I pesarourbino - 0x0022f787, // n0x090e c0x0000 (---------------) + I pescara - 0x002a6f42, // n0x090f c0x0000 (---------------) + I pg - 0x00218302, // n0x0910 c0x0000 (---------------) + I pi - 0x00243808, // n0x0911 c0x0000 (---------------) + I piacenza - 0x00222448, // n0x0912 c0x0000 (---------------) + I piedmont - 0x002c8c08, // n0x0913 c0x0000 (---------------) + I piemonte - 0x0027cf84, // n0x0914 c0x0000 (---------------) + I pisa - 0x002a9447, // n0x0915 c0x0000 (---------------) + I pistoia - 0x002cc583, // n0x0916 c0x0000 (---------------) + I pmn - 0x002a2b42, // n0x0917 c0x0000 (---------------) + I pn - 0x002167c2, // n0x0918 c0x0000 (---------------) + I po - 0x002cdd09, // n0x0919 c0x0000 (---------------) + I pordenone - 0x00249287, // n0x091a c0x0000 (---------------) + I potenza - 0x0022ad42, // n0x091b c0x0000 (---------------) + I pr - 0x00337145, // n0x091c c0x0000 (---------------) + I prato - 0x0029ab02, // n0x091d c0x0000 (---------------) + I pt - 0x00234882, // n0x091e c0x0000 (---------------) + I pu - 0x00268e03, // n0x091f c0x0000 (---------------) + I pug - 0x00268e06, // n0x0920 c0x0000 (---------------) + I puglia - 0x002d35c2, // n0x0921 c0x0000 (---------------) + I pv - 0x002d39c2, // n0x0922 c0x0000 (---------------) + I pz - 0x00201082, // n0x0923 c0x0000 (---------------) + I ra - 0x00378446, // n0x0924 c0x0000 (---------------) + I ragusa - 0x00275f47, // n0x0925 c0x0000 (---------------) + I ravenna - 0x00229f02, // n0x0926 c0x0000 (---------------) + I rc - 0x00207082, // n0x0927 c0x0000 (---------------) + I re - 0x00230ecf, // n0x0928 c0x0000 (---------------) + I reggio-calabria - 0x0024150d, // n0x0929 c0x0000 (---------------) + I reggio-emilia - 0x0021face, // n0x092a c0x0000 (---------------) + I reggiocalabria - 0x0026a7cc, // n0x092b c0x0000 (---------------) + I reggioemilia - 0x00204042, // n0x092c c0x0000 (---------------) + I rg - 0x00200d42, // n0x092d c0x0000 (---------------) + I ri - 0x00229385, // n0x092e c0x0000 (---------------) + I rieti - 0x003860c6, // n0x092f c0x0000 (---------------) + I rimini - 0x00212642, // n0x0930 c0x0000 (---------------) + I rm - 0x002065c2, // n0x0931 c0x0000 (---------------) + I rn - 0x00200982, // n0x0932 c0x0000 (---------------) + I ro - 0x00225c04, // n0x0933 c0x0000 (---------------) + I roma - 0x002e1ac4, // n0x0934 c0x0000 (---------------) + I rome - 0x002e4b86, // n0x0935 c0x0000 (---------------) + I rovigo - 0x00203a82, // n0x0936 c0x0000 (---------------) + I sa - 0x0027e2c7, // n0x0937 c0x0000 (---------------) + I salerno - 0x0022a2c3, // n0x0938 c0x0000 (---------------) + I sar - 0x00248748, // n0x0939 c0x0000 (---------------) + I sardegna - 0x0024a108, // n0x093a c0x0000 (---------------) + I sardinia - 0x00252c07, // n0x093b c0x0000 (---------------) + I sassari - 0x00262306, // n0x093c c0x0000 (---------------) + I savona - 0x002058c2, // n0x093d c0x0000 (---------------) + I si - 0x002203c3, // n0x093e c0x0000 (---------------) + I sic - 0x002203c7, // n0x093f c0x0000 (---------------) + I sicilia - 0x002636c6, // n0x0940 c0x0000 (---------------) + I sicily - 0x00285245, // n0x0941 c0x0000 (---------------) + I siena - 0x002d0688, // n0x0942 c0x0000 (---------------) + I siracusa - 0x00209f02, // n0x0943 c0x0000 (---------------) + I so - 0x002ee7c7, // n0x0944 c0x0000 (---------------) + I sondrio - 0x0022e802, // n0x0945 c0x0000 (---------------) + I sp - 0x002dc282, // n0x0946 c0x0000 (---------------) + I sr - 0x00203a42, // n0x0947 c0x0000 (---------------) + I ss - 0x002b2989, // n0x0948 c0x0000 (---------------) + I suedtirol - 0x00201e82, // n0x0949 c0x0000 (---------------) + I sv - 0x00202542, // n0x094a c0x0000 (---------------) + I ta - 0x0022fa03, // n0x094b c0x0000 (---------------) + I taa - 0x00344c87, // n0x094c c0x0000 (---------------) + I taranto - 0x00207302, // n0x094d c0x0000 (---------------) + I te - 0x0023a0cc, // n0x094e c0x0000 (---------------) + I tempio-olbia - 0x0023a3cb, // n0x094f c0x0000 (---------------) + I tempioolbia - 0x002b8dc6, // n0x0950 c0x0000 (---------------) + I teramo - 0x002d8e05, // n0x0951 c0x0000 (---------------) + I terni - 0x00201942, // n0x0952 c0x0000 (---------------) + I tn - 0x00200302, // n0x0953 c0x0000 (---------------) + I to - 0x002a6206, // n0x0954 c0x0000 (---------------) + I torino - 0x00223503, // n0x0955 c0x0000 (---------------) + I tos - 0x00343e47, // n0x0956 c0x0000 (---------------) + I toscana - 0x0020df02, // n0x0957 c0x0000 (---------------) + I tp - 0x00200942, // n0x0958 c0x0000 (---------------) + I tr - 0x0027b555, // n0x0959 c0x0000 (---------------) + I trani-andria-barletta - 0x0025ffd5, // n0x095a c0x0000 (---------------) + I trani-barletta-andria - 0x0027d2d3, // n0x095b c0x0000 (---------------) + I traniandriabarletta - 0x00260513, // n0x095c c0x0000 (---------------) + I tranibarlettaandria - 0x0028f3c7, // n0x095d c0x0000 (---------------) + I trapani - 0x0029e608, // n0x095e c0x0000 (---------------) + I trentino - 0x0029e610, // n0x095f c0x0000 (---------------) + I trentino-a-adige - 0x0029f0cf, // n0x0960 c0x0000 (---------------) + I trentino-aadige - 0x0029fb93, // n0x0961 c0x0000 (---------------) + I trentino-alto-adige - 0x002e3492, // n0x0962 c0x0000 (---------------) + I trentino-altoadige - 0x003074d0, // n0x0963 c0x0000 (---------------) + I trentino-s-tirol - 0x0032bf8f, // n0x0964 c0x0000 (---------------) + I trentino-stirol - 0x0032fdd2, // n0x0965 c0x0000 (---------------) + I trentino-sud-tirol - 0x002a13d1, // n0x0966 c0x0000 (---------------) + I trentino-sudtirol - 0x002aa113, // n0x0967 c0x0000 (---------------) + I trentino-sued-tirol - 0x002b2752, // n0x0968 c0x0000 (---------------) + I trentino-suedtirol - 0x002bb58f, // n0x0969 c0x0000 (---------------) + I trentinoa-adige - 0x002c17ce, // n0x096a c0x0000 (---------------) + I trentinoaadige - 0x002c2592, // n0x096b c0x0000 (---------------) + I trentinoalto-adige - 0x002c8211, // n0x096c c0x0000 (---------------) + I trentinoaltoadige - 0x002d114f, // n0x096d c0x0000 (---------------) + I trentinos-tirol - 0x002d364e, // n0x096e c0x0000 (---------------) + I trentinostirol - 0x0031b2d1, // n0x096f c0x0000 (---------------) + I trentinosud-tirol - 0x002d5750, // n0x0970 c0x0000 (---------------) + I trentinosudtirol - 0x002de012, // n0x0971 c0x0000 (---------------) + I trentinosued-tirol - 0x002e1151, // n0x0972 c0x0000 (---------------) + I trentinosuedtirol - 0x002f10c6, // n0x0973 c0x0000 (---------------) + I trento - 0x0037a787, // n0x0974 c0x0000 (---------------) + I treviso - 0x00383747, // n0x0975 c0x0000 (---------------) + I trieste - 0x00201a82, // n0x0976 c0x0000 (---------------) + I ts - 0x002b4e85, // n0x0977 c0x0000 (---------------) + I turin - 0x002e4e47, // n0x0978 c0x0000 (---------------) + I tuscany - 0x0028dc82, // n0x0979 c0x0000 (---------------) + I tv - 0x00200802, // n0x097a c0x0000 (---------------) + I ud - 0x002c0045, // n0x097b c0x0000 (---------------) + I udine - 0x00221483, // n0x097c c0x0000 (---------------) + I umb - 0x00263e86, // n0x097d c0x0000 (---------------) + I umbria - 0x0022a40d, // n0x097e c0x0000 (---------------) + I urbino-pesaro - 0x0022a74c, // n0x097f c0x0000 (---------------) + I urbinopesaro - 0x00203242, // n0x0980 c0x0000 (---------------) + I va - 0x00382e4b, // n0x0981 c0x0000 (---------------) + I val-d-aosta - 0x002a59ca, // n0x0982 c0x0000 (---------------) + I val-daosta - 0x0020910a, // n0x0983 c0x0000 (---------------) + I vald-aosta - 0x00225dc9, // n0x0984 c0x0000 (---------------) + I valdaosta - 0x002cd68b, // n0x0985 c0x0000 (---------------) + I valle-aosta - 0x002e578d, // n0x0986 c0x0000 (---------------) + I valle-d-aosta - 0x0036618c, // n0x0987 c0x0000 (---------------) + I valle-daosta - 0x0023da0a, // n0x0988 c0x0000 (---------------) + I valleaosta - 0x0024078c, // n0x0989 c0x0000 (---------------) + I valled-aosta - 0x0024640b, // n0x098a c0x0000 (---------------) + I valledaosta - 0x0025528c, // n0x098b c0x0000 (---------------) + I vallee-aoste - 0x0026730b, // n0x098c c0x0000 (---------------) + I valleeaoste - 0x0026e083, // n0x098d c0x0000 (---------------) + I vao - 0x002dc306, // n0x098e c0x0000 (---------------) + I varese - 0x002e5f42, // n0x098f c0x0000 (---------------) + I vb - 0x002e6442, // n0x0990 c0x0000 (---------------) + I vc - 0x0023b883, // n0x0991 c0x0000 (---------------) + I vda - 0x00203f02, // n0x0992 c0x0000 (---------------) + I ve - 0x00203f03, // n0x0993 c0x0000 (---------------) + I ven - 0x002185c6, // n0x0994 c0x0000 (---------------) + I veneto - 0x00259647, // n0x0995 c0x0000 (---------------) + I venezia - 0x00268586, // n0x0996 c0x0000 (---------------) + I venice - 0x00227e88, // n0x0997 c0x0000 (---------------) + I verbania - 0x0028eb08, // n0x0998 c0x0000 (---------------) + I vercelli - 0x002e7686, // n0x0999 c0x0000 (---------------) + I verona - 0x00201642, // n0x099a c0x0000 (---------------) + I vi - 0x002eb7cd, // n0x099b c0x0000 (---------------) + I vibo-valentia - 0x002ebb0c, // n0x099c c0x0000 (---------------) + I vibovalentia - 0x00252607, // n0x099d c0x0000 (---------------) + I vicenza - 0x002f0a87, // n0x099e c0x0000 (---------------) + I viterbo - 0x00226342, // n0x099f c0x0000 (---------------) + I vr - 0x0023ccc2, // n0x09a0 c0x0000 (---------------) + I vs - 0x002170c2, // n0x09a1 c0x0000 (---------------) + I vt - 0x00203ec2, // n0x09a2 c0x0000 (---------------) + I vv - 0x00200882, // n0x09a3 c0x0000 (---------------) + I co - 0x00218643, // n0x09a4 c0x0000 (---------------) + I net - 0x0024d043, // n0x09a5 c0x0000 (---------------) + I org - 0x00232dc3, // n0x09a6 c0x0000 (---------------) + I com - 0x0021e083, // n0x09a7 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x09a8 c0x0000 (---------------) + I gov - 0x00240443, // n0x09a9 c0x0000 (---------------) + I mil - 0x00267944, // n0x09aa c0x0000 (---------------) + I name - 0x00218643, // n0x09ab c0x0000 (---------------) + I net - 0x0024d043, // n0x09ac c0x0000 (---------------) + I org - 0x00251983, // n0x09ad c0x0000 (---------------) + I sch - 0x00200b82, // n0x09ae c0x0000 (---------------) + I ac - 0x00202902, // n0x09af c0x0000 (---------------) + I ad - 0x1c665d85, // n0x09b0 c0x0071 (n0x0a1d-n0x0a51) + I aichi - 0x1ca12185, // n0x09b1 c0x0072 (n0x0a51-n0x0a6d) + I akita - 0x1ce6e0c6, // n0x09b2 c0x0073 (n0x0a6d-n0x0a83) + I aomori - 0x0009e448, // n0x09b3 c0x0000 (---------------) + blogspot - 0x1d2a6985, // n0x09b4 c0x0074 (n0x0a83-n0x0abd) + I chiba - 0x00200882, // n0x09b5 c0x0000 (---------------) + I co - 0x00205742, // n0x09b6 c0x0000 (---------------) + I ed - 0x1d606d05, // n0x09b7 c0x0075 (n0x0abd-n0x0ad3) + I ehime - 0x1da77205, // n0x09b8 c0x0076 (n0x0ad3-n0x0ae2) + I fukui - 0x1de77a87, // n0x09b9 c0x0077 (n0x0ae2-n0x0b21) + I fukuoka - 0x1e279049, // n0x09ba c0x0078 (n0x0b21-n0x0b54) + I fukushima - 0x1e669a44, // n0x09bb c0x0079 (n0x0b54-n0x0b7a) + I gifu - 0x00209ac2, // n0x09bc c0x0000 (---------------) + I go - 0x00206b02, // n0x09bd c0x0000 (---------------) + I gr - 0x1ea4c7c5, // n0x09be c0x007a (n0x0b7a-n0x0b9e) + I gunma - 0x1ee8aa89, // n0x09bf c0x007b (n0x0b9e-n0x0bb7) + I hiroshima - 0x1f20d548, // n0x09c0 c0x007c (n0x0bb7-n0x0c45) + I hokkaido - 0x1f6a0045, // n0x09c1 c0x007d (n0x0c45-n0x0c73) + I hyogo - 0x1fb16ec7, // n0x09c2 c0x007e (n0x0c73-n0x0ca6) + I ibaraki - 0x1fe1a288, // n0x09c3 c0x007f (n0x0ca6-n0x0cb9) + I ishikawa - 0x2034f5c5, // n0x09c4 c0x0080 (n0x0cb9-n0x0cdb) + I iwate - 0x206038c6, // n0x09c5 c0x0081 (n0x0cdb-n0x0cea) + I kagawa - 0x20b60c89, // n0x09c6 c0x0082 (n0x0cea-n0x0cfe) + I kagoshima - 0x20ecc148, // n0x09c7 c0x0083 (n0x0cfe-n0x0d1c) + I kanagawa - 0x212ac548, // n0x09c8 c0x0084 (n0x0d1c-n0x0d1d)* o I kawasaki - 0x2168b0ca, // n0x09c9 c0x0085 (n0x0d1d-n0x0d1e)* o I kitakyushu - 0x21a6f144, // n0x09ca c0x0086 (n0x0d1e-n0x0d1f)* o I kobe - 0x21ebecc5, // n0x09cb c0x0087 (n0x0d1f-n0x0d3e) + I kochi - 0x222bf488, // n0x09cc c0x0088 (n0x0d3e-n0x0d58) + I kumamoto - 0x22662fc5, // n0x09cd c0x0089 (n0x0d58-n0x0d77) + I kyoto - 0x00219682, // n0x09ce c0x0000 (---------------) + I lg - 0x22a01f83, // n0x09cf c0x008a (n0x0d77-n0x0d95) + I mie - 0x22e977c6, // n0x09d0 c0x008b (n0x0d95-n0x0db6) + I miyagi - 0x232475c8, // n0x09d1 c0x008c (n0x0db6-n0x0dd1) + I miyazaki - 0x2376fe86, // n0x09d2 c0x008d (n0x0dd1-n0x0e1c) + I nagano - 0x23a76088, // n0x09d3 c0x008e (n0x0e1c-n0x0e32) + I nagasaki - 0x23f7bf46, // n0x09d4 c0x008f (n0x0e32-n0x0e33)* o I nagoya - 0x242e7784, // n0x09d5 c0x0090 (n0x0e33-n0x0e59) + I nara - 0x00209e82, // n0x09d6 c0x0000 (---------------) + I ne - 0x247589c7, // n0x09d7 c0x0091 (n0x0e59-n0x0e7b) + I niigata - 0x24aa2444, // n0x09d8 c0x0092 (n0x0e7b-n0x0e8e) + I oita - 0x24e71f47, // n0x09d9 c0x0093 (n0x0e8e-n0x0ea8) + I okayama - 0x252130c7, // n0x09da c0x0094 (n0x0ea8-n0x0ed2) + I okinawa - 0x00200d02, // n0x09db c0x0000 (---------------) + I or - 0x256905c5, // n0x09dc c0x0095 (n0x0ed2-n0x0f04) + I osaka - 0x25aaf744, // n0x09dd c0x0096 (n0x0f04-n0x0f1e) + I saga - 0x25f0c587, // n0x09de c0x0097 (n0x0f1e-n0x0f63) + I saitama - 0x26243347, // n0x09df c0x0098 (n0x0f63-n0x0f64)* o I sapporo - 0x2667af06, // n0x09e0 c0x0099 (n0x0f64-n0x0f65)* o I sendai - 0x26a5c545, // n0x09e1 c0x009a (n0x0f65-n0x0f7c) + I shiga - 0x26e8ab87, // n0x09e2 c0x009b (n0x0f7c-n0x0f93) + I shimane - 0x272f07c8, // n0x09e3 c0x009c (n0x0f93-n0x0fb7) + I shizuoka - 0x2774d487, // n0x09e4 c0x009d (n0x0fb7-n0x0fd6) + I tochigi - 0x27ac69c9, // n0x09e5 c0x009e (n0x0fd6-n0x0fe7) + I tokushima - 0x27ebc8c5, // n0x09e6 c0x009f (n0x0fe7-n0x1020) + I tokyo - 0x282f11c7, // n0x09e7 c0x00a0 (n0x1020-n0x102d) + I tottori - 0x286873c6, // n0x09e8 c0x00a1 (n0x102d-n0x1045) + I toyama - 0x28b0a988, // n0x09e9 c0x00a2 (n0x1045-n0x1062) + I wakayama - 0x0024534d, // n0x09ea c0x0000 (---------------) + I xn--0trq7p7nn - 0x00255c89, // n0x09eb c0x0000 (---------------) + I xn--1ctwo - 0x0026bc4b, // n0x09ec c0x0000 (---------------) + I xn--1lqs03n - 0x0026c98b, // n0x09ed c0x0000 (---------------) + I xn--1lqs71d - 0x0029960b, // n0x09ee c0x0000 (---------------) + I xn--2m4a15e - 0x002b9c0b, // n0x09ef c0x0000 (---------------) + I xn--32vp30h - 0x002f490b, // n0x09f0 c0x0000 (---------------) + I xn--4it168d - 0x002f4bcb, // n0x09f1 c0x0000 (---------------) + I xn--4it797k - 0x002f5549, // n0x09f2 c0x0000 (---------------) + I xn--4pvxs - 0x002f694b, // n0x09f3 c0x0000 (---------------) + I xn--5js045d - 0x002f6c0b, // n0x09f4 c0x0000 (---------------) + I xn--5rtp49c - 0x002f714b, // n0x09f5 c0x0000 (---------------) + I xn--5rtq34k - 0x002f754a, // n0x09f6 c0x0000 (---------------) + I xn--6btw5a - 0x002f7a8a, // n0x09f7 c0x0000 (---------------) + I xn--6orx2r - 0x002f808c, // n0x09f8 c0x0000 (---------------) + I xn--7t0a264c - 0x002fe0cb, // n0x09f9 c0x0000 (---------------) + I xn--8ltr62k - 0x002fe50a, // n0x09fa c0x0000 (---------------) + I xn--8pvr4u - 0x0030d64a, // n0x09fb c0x0000 (---------------) + I xn--c3s14m - 0x0031350e, // n0x09fc c0x0000 (---------------) + I xn--d5qv7z876c - 0x0031474e, // n0x09fd c0x0000 (---------------) + I xn--djrs72d6uy - 0x00314aca, // n0x09fe c0x0000 (---------------) + I xn--djty4k - 0x0031640a, // n0x09ff c0x0000 (---------------) + I xn--efvn9s - 0x0031768b, // n0x0a00 c0x0000 (---------------) + I xn--ehqz56n - 0x0031794b, // n0x0a01 c0x0000 (---------------) + I xn--elqq16h - 0x0031838b, // n0x0a02 c0x0000 (---------------) + I xn--f6qx53a - 0x003285cb, // n0x0a03 c0x0000 (---------------) + I xn--k7yn95e - 0x00328bca, // n0x0a04 c0x0000 (---------------) + I xn--kbrq7o - 0x0032988b, // n0x0a05 c0x0000 (---------------) + I xn--klt787d - 0x00329b4a, // n0x0a06 c0x0000 (---------------) + I xn--kltp7d - 0x00329dca, // n0x0a07 c0x0000 (---------------) + I xn--kltx9a - 0x0032a04a, // n0x0a08 c0x0000 (---------------) + I xn--klty5x - 0x00340acb, // n0x0a09 c0x0000 (---------------) + I xn--mkru45i - 0x00348a4b, // n0x0a0a c0x0000 (---------------) + I xn--nit225k - 0x0034ac0e, // n0x0a0b c0x0000 (---------------) + I xn--ntso0iqx3a - 0x0034af8b, // n0x0a0c c0x0000 (---------------) + I xn--ntsq17g - 0x00350acb, // n0x0a0d c0x0000 (---------------) + I xn--pssu33l - 0x00351b4b, // n0x0a0e c0x0000 (---------------) + I xn--qqqt11m - 0x0035468a, // n0x0a0f c0x0000 (---------------) + I xn--rht27z - 0x00354909, // n0x0a10 c0x0000 (---------------) + I xn--rht3d - 0x00354b4a, // n0x0a11 c0x0000 (---------------) + I xn--rht61e - 0x0035600a, // n0x0a12 c0x0000 (---------------) + I xn--rny31h - 0x0037180b, // n0x0a13 c0x0000 (---------------) + I xn--tor131o - 0x003732cb, // n0x0a14 c0x0000 (---------------) + I xn--uist22h - 0x0037380a, // n0x0a15 c0x0000 (---------------) + I xn--uisz3g - 0x0037454b, // n0x0a16 c0x0000 (---------------) + I xn--uuwu58a - 0x0037904b, // n0x0a17 c0x0000 (---------------) + I xn--vgu402c - 0x00383f0b, // n0x0a18 c0x0000 (---------------) + I xn--zbx025d - 0x28e797c8, // n0x0a19 c0x00a3 (n0x1062-n0x1084) + I yamagata - 0x292809c9, // n0x0a1a c0x00a4 (n0x1084-n0x1094) + I yamaguchi - 0x29779ac9, // n0x0a1b c0x00a5 (n0x1094-n0x10b0) + I yamanashi - 0x29b45bc8, // n0x0a1c c0x00a6 (n0x10b0-n0x10b1)* o I yokohama - 0x002a8745, // n0x0a1d c0x0000 (---------------) + I aisai - 0x00203583, // n0x0a1e c0x0000 (---------------) + I ama - 0x00202d84, // n0x0a1f c0x0000 (---------------) + I anjo - 0x00375ac5, // n0x0a20 c0x0000 (---------------) + I asuke - 0x00269786, // n0x0a21 c0x0000 (---------------) + I chiryu - 0x00276ac5, // n0x0a22 c0x0000 (---------------) + I chita - 0x002802c4, // n0x0a23 c0x0000 (---------------) + I fuso - 0x00258648, // n0x0a24 c0x0000 (---------------) + I gamagori - 0x00354285, // n0x0a25 c0x0000 (---------------) + I handa - 0x00289044, // n0x0a26 c0x0000 (---------------) + I hazu - 0x0024f407, // n0x0a27 c0x0000 (---------------) + I hekinan - 0x00292c0a, // n0x0a28 c0x0000 (---------------) + I higashiura - 0x0030b0ca, // n0x0a29 c0x0000 (---------------) + I ichinomiya - 0x002d8a47, // n0x0a2a c0x0000 (---------------) + I inazawa - 0x00211b47, // n0x0a2b c0x0000 (---------------) + I inuyama - 0x002e3187, // n0x0a2c c0x0000 (---------------) + I isshiki - 0x0026b507, // n0x0a2d c0x0000 (---------------) + I iwakura - 0x0021c585, // n0x0a2e c0x0000 (---------------) + I kanie - 0x0036d0c6, // n0x0a2f c0x0000 (---------------) + I kariya - 0x002b6b07, // n0x0a30 c0x0000 (---------------) + I kasugai - 0x00239784, // n0x0a31 c0x0000 (---------------) + I kira - 0x002f3186, // n0x0a32 c0x0000 (---------------) + I kiyosu - 0x003027c6, // n0x0a33 c0x0000 (---------------) + I komaki - 0x002ca805, // n0x0a34 c0x0000 (---------------) + I konan - 0x0029bcc4, // n0x0a35 c0x0000 (---------------) + I kota - 0x002985c6, // n0x0a36 c0x0000 (---------------) + I mihama - 0x00291c87, // n0x0a37 c0x0000 (---------------) + I miyoshi - 0x00221a46, // n0x0a38 c0x0000 (---------------) + I nishio - 0x00227147, // n0x0a39 c0x0000 (---------------) + I nisshin - 0x002078c3, // n0x0a3a c0x0000 (---------------) + I obu - 0x0024ea46, // n0x0a3b c0x0000 (---------------) + I oguchi - 0x00236005, // n0x0a3c c0x0000 (---------------) + I oharu - 0x00277b87, // n0x0a3d c0x0000 (---------------) + I okazaki - 0x002b1d4a, // n0x0a3e c0x0000 (---------------) + I owariasahi - 0x00231844, // n0x0a3f c0x0000 (---------------) + I seto - 0x00219888, // n0x0a40 c0x0000 (---------------) + I shikatsu - 0x002c5849, // n0x0a41 c0x0000 (---------------) + I shinshiro - 0x002e9807, // n0x0a42 c0x0000 (---------------) + I shitara - 0x00203006, // n0x0a43 c0x0000 (---------------) + I tahara - 0x00322e48, // n0x0a44 c0x0000 (---------------) + I takahama - 0x00213449, // n0x0a45 c0x0000 (---------------) + I tobishima - 0x00295e44, // n0x0a46 c0x0000 (---------------) + I toei - 0x002186c4, // n0x0a47 c0x0000 (---------------) + I togo - 0x002f1d45, // n0x0a48 c0x0000 (---------------) + I tokai - 0x002b3248, // n0x0a49 c0x0000 (---------------) + I tokoname - 0x002b4307, // n0x0a4a c0x0000 (---------------) + I toyoake - 0x002bc609, // n0x0a4b c0x0000 (---------------) + I toyohashi - 0x00327fc8, // n0x0a4c c0x0000 (---------------) + I toyokawa - 0x003245c6, // n0x0a4d c0x0000 (---------------) + I toyone - 0x00244606, // n0x0a4e c0x0000 (---------------) + I toyota - 0x0028ca08, // n0x0a4f c0x0000 (---------------) + I tsushima - 0x003332c6, // n0x0a50 c0x0000 (---------------) + I yatomi - 0x00212185, // n0x0a51 c0x0000 (---------------) + I akita - 0x0027afc6, // n0x0a52 c0x0000 (---------------) + I daisen - 0x00272348, // n0x0a53 c0x0000 (---------------) + I fujisato - 0x0021e486, // n0x0a54 c0x0000 (---------------) + I gojome - 0x00270f0b, // n0x0a55 c0x0000 (---------------) + I hachirogata - 0x00283e46, // n0x0a56 c0x0000 (---------------) + I happou - 0x0028df4d, // n0x0a57 c0x0000 (---------------) + I higashinaruse - 0x00206445, // n0x0a58 c0x0000 (---------------) + I honjo - 0x0029c3c6, // n0x0a59 c0x0000 (---------------) + I honjyo - 0x00202405, // n0x0a5a c0x0000 (---------------) + I ikawa - 0x0036a389, // n0x0a5b c0x0000 (---------------) + I kamikoani - 0x0036a247, // n0x0a5c c0x0000 (---------------) + I kamioka - 0x003310c8, // n0x0a5d c0x0000 (---------------) + I katagami - 0x00314146, // n0x0a5e c0x0000 (---------------) + I kazuno - 0x0028d389, // n0x0a5f c0x0000 (---------------) + I kitaakita - 0x002e1846, // n0x0a60 c0x0000 (---------------) + I kosaka - 0x002b1cc5, // n0x0a61 c0x0000 (---------------) + I kyowa - 0x002467c6, // n0x0a62 c0x0000 (---------------) + I misato - 0x002a5f86, // n0x0a63 c0x0000 (---------------) + I mitane - 0x002ba6c9, // n0x0a64 c0x0000 (---------------) + I moriyoshi - 0x00273706, // n0x0a65 c0x0000 (---------------) + I nikaho - 0x00257207, // n0x0a66 c0x0000 (---------------) + I noshiro - 0x00294f85, // n0x0a67 c0x0000 (---------------) + I odate - 0x00207743, // n0x0a68 c0x0000 (---------------) + I oga - 0x00271085, // n0x0a69 c0x0000 (---------------) + I ogata - 0x002bf347, // n0x0a6a c0x0000 (---------------) + I semboku - 0x002db006, // n0x0a6b c0x0000 (---------------) + I yokote - 0x00206349, // n0x0a6c c0x0000 (---------------) + I yurihonjo - 0x0026e0c6, // n0x0a6d c0x0000 (---------------) + I aomori - 0x00264206, // n0x0a6e c0x0000 (---------------) + I gonohe - 0x0024f249, // n0x0a6f c0x0000 (---------------) + I hachinohe - 0x0027a9c9, // n0x0a70 c0x0000 (---------------) + I hashikami - 0x00295907, // n0x0a71 c0x0000 (---------------) + I hiranai - 0x00344608, // n0x0a72 c0x0000 (---------------) + I hirosaki - 0x002e0449, // n0x0a73 c0x0000 (---------------) + I itayanagi - 0x00278908, // n0x0a74 c0x0000 (---------------) + I kuroishi - 0x003482c6, // n0x0a75 c0x0000 (---------------) + I misawa - 0x002c2e05, // n0x0a76 c0x0000 (---------------) + I mutsu - 0x0023d20a, // n0x0a77 c0x0000 (---------------) + I nakadomari - 0x00264286, // n0x0a78 c0x0000 (---------------) + I noheji - 0x002ec746, // n0x0a79 c0x0000 (---------------) + I oirase - 0x00292605, // n0x0a7a c0x0000 (---------------) + I owani - 0x00241b48, // n0x0a7b c0x0000 (---------------) + I rokunohe - 0x0035a387, // n0x0a7c c0x0000 (---------------) + I sannohe - 0x00379c4a, // n0x0a7d c0x0000 (---------------) + I shichinohe - 0x0024ac86, // n0x0a7e c0x0000 (---------------) + I shingo - 0x00305fc5, // n0x0a7f c0x0000 (---------------) + I takko - 0x002fd846, // n0x0a80 c0x0000 (---------------) + I towada - 0x0025de07, // n0x0a81 c0x0000 (---------------) + I tsugaru - 0x00202ec7, // n0x0a82 c0x0000 (---------------) + I tsuruta - 0x00305685, // n0x0a83 c0x0000 (---------------) + I abiko - 0x002b1e85, // n0x0a84 c0x0000 (---------------) + I asahi - 0x002bd846, // n0x0a85 c0x0000 (---------------) + I chonan - 0x002cb5c6, // n0x0a86 c0x0000 (---------------) + I chosei - 0x002dfb86, // n0x0a87 c0x0000 (---------------) + I choshi - 0x00347284, // n0x0a88 c0x0000 (---------------) + I chuo - 0x00279e49, // n0x0a89 c0x0000 (---------------) + I funabashi - 0x00281886, // n0x0a8a c0x0000 (---------------) + I futtsu - 0x00231f0a, // n0x0a8b c0x0000 (---------------) + I hanamigawa - 0x00287d08, // n0x0a8c c0x0000 (---------------) + I ichihara - 0x0034fec8, // n0x0a8d c0x0000 (---------------) + I ichikawa - 0x0030b0ca, // n0x0a8e c0x0000 (---------------) + I ichinomiya - 0x00369345, // n0x0a8f c0x0000 (---------------) + I inzai - 0x00291bc5, // n0x0a90 c0x0000 (---------------) + I isumi - 0x0020d808, // n0x0a91 c0x0000 (---------------) + I kamagaya - 0x002bea48, // n0x0a92 c0x0000 (---------------) + I kamogawa - 0x00375e87, // n0x0a93 c0x0000 (---------------) + I kashiwa - 0x00219046, // n0x0a94 c0x0000 (---------------) + I katori - 0x00304a08, // n0x0a95 c0x0000 (---------------) + I katsuura - 0x002fbf07, // n0x0a96 c0x0000 (---------------) + I kimitsu - 0x00277cc8, // n0x0a97 c0x0000 (---------------) + I kisarazu - 0x002a3306, // n0x0a98 c0x0000 (---------------) + I kozaki - 0x0027c108, // n0x0a99 c0x0000 (---------------) + I kujukuri - 0x00288b46, // n0x0a9a c0x0000 (---------------) + I kyonan - 0x002cca07, // n0x0a9b c0x0000 (---------------) + I matsudo - 0x00279a46, // n0x0a9c c0x0000 (---------------) + I midori - 0x002985c6, // n0x0a9d c0x0000 (---------------) + I mihama - 0x003333ca, // n0x0a9e c0x0000 (---------------) + I minamiboso - 0x00233306, // n0x0a9f c0x0000 (---------------) + I mobara - 0x002c2e09, // n0x0aa0 c0x0000 (---------------) + I mutsuzawa - 0x00364746, // n0x0aa1 c0x0000 (---------------) + I nagara - 0x002f97ca, // n0x0aa2 c0x0000 (---------------) + I nagareyama - 0x002e7789, // n0x0aa3 c0x0000 (---------------) + I narashino - 0x00306ec6, // n0x0aa4 c0x0000 (---------------) + I narita - 0x00238104, // n0x0aa5 c0x0000 (---------------) + I noda - 0x00285b8d, // n0x0aa6 c0x0000 (---------------) + I oamishirasato - 0x00280c47, // n0x0aa7 c0x0000 (---------------) + I omigawa - 0x003090c6, // n0x0aa8 c0x0000 (---------------) + I onjuku - 0x002ac405, // n0x0aa9 c0x0000 (---------------) + I otaki - 0x002e18c5, // n0x0aaa c0x0000 (---------------) + I sakae - 0x0020e0c6, // n0x0aab c0x0000 (---------------) + I sakura - 0x002ea189, // n0x0aac c0x0000 (---------------) + I shimofusa - 0x002d2a87, // n0x0aad c0x0000 (---------------) + I shirako - 0x00272f86, // n0x0aae c0x0000 (---------------) + I shiroi - 0x002e7d86, // n0x0aaf c0x0000 (---------------) + I shisui - 0x00383cc9, // n0x0ab0 c0x0000 (---------------) + I sodegaura - 0x00322bc4, // n0x0ab1 c0x0000 (---------------) + I sosa - 0x0024db04, // n0x0ab2 c0x0000 (---------------) + I tako - 0x002128c8, // n0x0ab3 c0x0000 (---------------) + I tateyama - 0x00273bc6, // n0x0ab4 c0x0000 (---------------) + I togane - 0x00294a08, // n0x0ab5 c0x0000 (---------------) + I tohnosho - 0x00246748, // n0x0ab6 c0x0000 (---------------) + I tomisato - 0x002122c7, // n0x0ab7 c0x0000 (---------------) + I urayasu - 0x002c3a49, // n0x0ab8 c0x0000 (---------------) + I yachimata - 0x00202a47, // n0x0ab9 c0x0000 (---------------) + I yachiyo - 0x002a684a, // n0x0aba c0x0000 (---------------) + I yokaichiba - 0x002d698f, // n0x0abb c0x0000 (---------------) + I yokoshibahikari - 0x0024b64a, // n0x0abc c0x0000 (---------------) + I yotsukaido - 0x00229185, // n0x0abd c0x0000 (---------------) + I ainan - 0x00272585, // n0x0abe c0x0000 (---------------) + I honai - 0x00218f05, // n0x0abf c0x0000 (---------------) + I ikata - 0x00288dc7, // n0x0ac0 c0x0000 (---------------) + I imabari - 0x00202b43, // n0x0ac1 c0x0000 (---------------) + I iyo - 0x00344808, // n0x0ac2 c0x0000 (---------------) + I kamijima - 0x0029b286, // n0x0ac3 c0x0000 (---------------) + I kihoku - 0x0029b389, // n0x0ac4 c0x0000 (---------------) + I kumakogen - 0x0037fe06, // n0x0ac5 c0x0000 (---------------) + I masaki - 0x002ba507, // n0x0ac6 c0x0000 (---------------) + I matsuno - 0x0028d149, // n0x0ac7 c0x0000 (---------------) + I matsuyama - 0x00330fc8, // n0x0ac8 c0x0000 (---------------) + I namikata - 0x0036a547, // n0x0ac9 c0x0000 (---------------) + I niihama - 0x00347343, // n0x0aca c0x0000 (---------------) + I ozu - 0x002a87c5, // n0x0acb c0x0000 (---------------) + I saijo - 0x002d68c5, // n0x0acc c0x0000 (---------------) + I seiyo - 0x00348e8b, // n0x0acd c0x0000 (---------------) + I shikokuchuo - 0x00263084, // n0x0ace c0x0000 (---------------) + I tobe - 0x0021dec4, // n0x0acf c0x0000 (---------------) + I toon - 0x00270b06, // n0x0ad0 c0x0000 (---------------) + I uchiko - 0x002c96c7, // n0x0ad1 c0x0000 (---------------) + I uwajima - 0x0035830a, // n0x0ad2 c0x0000 (---------------) + I yawatahama - 0x00216d87, // n0x0ad3 c0x0000 (---------------) + I echizen - 0x00295ec7, // n0x0ad4 c0x0000 (---------------) + I eiheiji - 0x00277205, // n0x0ad5 c0x0000 (---------------) + I fukui - 0x002056c5, // n0x0ad6 c0x0000 (---------------) + I ikeda - 0x0021d4c9, // n0x0ad7 c0x0000 (---------------) + I katsuyama - 0x002985c6, // n0x0ad8 c0x0000 (---------------) + I mihama - 0x00216c0d, // n0x0ad9 c0x0000 (---------------) + I minamiechizen - 0x00285e85, // n0x0ada c0x0000 (---------------) + I obama - 0x002903c3, // n0x0adb c0x0000 (---------------) + I ohi - 0x00210683, // n0x0adc c0x0000 (---------------) + I ono - 0x002336c5, // n0x0add c0x0000 (---------------) + I sabae - 0x00327d85, // n0x0ade c0x0000 (---------------) + I sakai - 0x00322e48, // n0x0adf c0x0000 (---------------) + I takahama - 0x00281947, // n0x0ae0 c0x0000 (---------------) + I tsuruga - 0x0024d886, // n0x0ae1 c0x0000 (---------------) + I wakasa - 0x00293506, // n0x0ae2 c0x0000 (---------------) + I ashiya - 0x0022d1c5, // n0x0ae3 c0x0000 (---------------) + I buzen - 0x0021e347, // n0x0ae4 c0x0000 (---------------) + I chikugo - 0x00211dc7, // n0x0ae5 c0x0000 (---------------) + I chikuho - 0x0036dbc7, // n0x0ae6 c0x0000 (---------------) + I chikujo - 0x002bed4a, // n0x0ae7 c0x0000 (---------------) + I chikushino - 0x0024eb08, // n0x0ae8 c0x0000 (---------------) + I chikuzen - 0x00347284, // n0x0ae9 c0x0000 (---------------) + I chuo - 0x00205987, // n0x0aea c0x0000 (---------------) + I dazaifu - 0x00276507, // n0x0aeb c0x0000 (---------------) + I fukuchi - 0x00317bc6, // n0x0aec c0x0000 (---------------) + I hakata - 0x00260cc7, // n0x0aed c0x0000 (---------------) + I higashi - 0x002c4a48, // n0x0aee c0x0000 (---------------) + I hirokawa - 0x003799c8, // n0x0aef c0x0000 (---------------) + I hisayama - 0x00258106, // n0x0af0 c0x0000 (---------------) + I iizuka - 0x0020f5c8, // n0x0af1 c0x0000 (---------------) + I inatsuki - 0x00273784, // n0x0af2 c0x0000 (---------------) + I kaho - 0x002b6b06, // n0x0af3 c0x0000 (---------------) + I kasuga - 0x002041c6, // n0x0af4 c0x0000 (---------------) + I kasuya - 0x003029c6, // n0x0af5 c0x0000 (---------------) + I kawara - 0x0030b346, // n0x0af6 c0x0000 (---------------) + I keisen - 0x00293a84, // n0x0af7 c0x0000 (---------------) + I koga - 0x0026b5c6, // n0x0af8 c0x0000 (---------------) + I kurate - 0x002aaa46, // n0x0af9 c0x0000 (---------------) + I kurogi - 0x0028c486, // n0x0afa c0x0000 (---------------) + I kurume - 0x00216c06, // n0x0afb c0x0000 (---------------) + I minami - 0x00210546, // n0x0afc c0x0000 (---------------) + I miyako - 0x0029b946, // n0x0afd c0x0000 (---------------) + I miyama - 0x0024d788, // n0x0afe c0x0000 (---------------) + I miyawaka - 0x002e6d08, // n0x0aff c0x0000 (---------------) + I mizumaki - 0x002c0408, // n0x0b00 c0x0000 (---------------) + I munakata - 0x00276cc8, // n0x0b01 c0x0000 (---------------) + I nakagawa - 0x0020d786, // n0x0b02 c0x0000 (---------------) + I nakama - 0x00215405, // n0x0b03 c0x0000 (---------------) + I nishi - 0x0027e406, // n0x0b04 c0x0000 (---------------) + I nogata - 0x002a00c5, // n0x0b05 c0x0000 (---------------) + I ogori - 0x00347047, // n0x0b06 c0x0000 (---------------) + I okagaki - 0x00230345, // n0x0b07 c0x0000 (---------------) + I okawa - 0x002130c3, // n0x0b08 c0x0000 (---------------) + I oki - 0x002087c5, // n0x0b09 c0x0000 (---------------) + I omuta - 0x0037c804, // n0x0b0a c0x0000 (---------------) + I onga - 0x00210685, // n0x0b0b c0x0000 (---------------) + I onojo - 0x00242043, // n0x0b0c c0x0000 (---------------) + I oto - 0x002ddc47, // n0x0b0d c0x0000 (---------------) + I saigawa - 0x0030d908, // n0x0b0e c0x0000 (---------------) + I sasaguri - 0x00227206, // n0x0b0f c0x0000 (---------------) + I shingu - 0x002c8f4d, // n0x0b10 c0x0000 (---------------) + I shinyoshitomi - 0x00272546, // n0x0b11 c0x0000 (---------------) + I shonai - 0x0028ba85, // n0x0b12 c0x0000 (---------------) + I soeda - 0x002aa343, // n0x0b13 c0x0000 (---------------) + I sue - 0x002a8589, // n0x0b14 c0x0000 (---------------) + I tachiarai - 0x002be046, // n0x0b15 c0x0000 (---------------) + I tagawa - 0x0020ed06, // n0x0b16 c0x0000 (---------------) + I takata - 0x002f4544, // n0x0b17 c0x0000 (---------------) + I toho - 0x0024b5c7, // n0x0b18 c0x0000 (---------------) + I toyotsu - 0x00239106, // n0x0b19 c0x0000 (---------------) + I tsuiki - 0x00306205, // n0x0b1a c0x0000 (---------------) + I ukiha - 0x00200f03, // n0x0b1b c0x0000 (---------------) + I umi - 0x0020bd44, // n0x0b1c c0x0000 (---------------) + I usui - 0x002766c6, // n0x0b1d c0x0000 (---------------) + I yamada - 0x002042c4, // n0x0b1e c0x0000 (---------------) + I yame - 0x002cef88, // n0x0b1f c0x0000 (---------------) + I yanagawa - 0x00204fc9, // n0x0b20 c0x0000 (---------------) + I yukuhashi - 0x002305c9, // n0x0b21 c0x0000 (---------------) + I aizubange - 0x0029480a, // n0x0b22 c0x0000 (---------------) + I aizumisato - 0x00286bcd, // n0x0b23 c0x0000 (---------------) + I aizuwakamatsu - 0x0024fd87, // n0x0b24 c0x0000 (---------------) + I asakawa - 0x00261e86, // n0x0b25 c0x0000 (---------------) + I bandai - 0x00218204, // n0x0b26 c0x0000 (---------------) + I date - 0x00279049, // n0x0b27 c0x0000 (---------------) + I fukushima - 0x0027fcc8, // n0x0b28 c0x0000 (---------------) + I furudono - 0x00280846, // n0x0b29 c0x0000 (---------------) + I futaba - 0x00251146, // n0x0b2a c0x0000 (---------------) + I hanawa - 0x00260cc7, // n0x0b2b c0x0000 (---------------) + I higashi - 0x002dd346, // n0x0b2c c0x0000 (---------------) + I hirata - 0x0021c286, // n0x0b2d c0x0000 (---------------) + I hirono - 0x00364146, // n0x0b2e c0x0000 (---------------) + I iitate - 0x0021314a, // n0x0b2f c0x0000 (---------------) + I inawashiro - 0x0021a288, // n0x0b30 c0x0000 (---------------) + I ishikawa - 0x00225485, // n0x0b31 c0x0000 (---------------) + I iwaki - 0x002395c9, // n0x0b32 c0x0000 (---------------) + I izumizaki - 0x002b4a8a, // n0x0b33 c0x0000 (---------------) + I kagamiishi - 0x0021a508, // n0x0b34 c0x0000 (---------------) + I kaneyama - 0x00291648, // n0x0b35 c0x0000 (---------------) + I kawamata - 0x0020ec88, // n0x0b36 c0x0000 (---------------) + I kitakata - 0x00299bcc, // n0x0b37 c0x0000 (---------------) + I kitashiobara - 0x00316805, // n0x0b38 c0x0000 (---------------) + I koori - 0x00293788, // n0x0b39 c0x0000 (---------------) + I koriyama - 0x00315146, // n0x0b3a c0x0000 (---------------) + I kunimi - 0x002eabc6, // n0x0b3b c0x0000 (---------------) + I miharu - 0x002b20c7, // n0x0b3c c0x0000 (---------------) + I mishima - 0x00216c85, // n0x0b3d c0x0000 (---------------) + I namie - 0x0027b145, // n0x0b3e c0x0000 (---------------) + I nango - 0x00230489, // n0x0b3f c0x0000 (---------------) + I nishiaizu - 0x00216f07, // n0x0b40 c0x0000 (---------------) + I nishigo - 0x0029b345, // n0x0b41 c0x0000 (---------------) + I okuma - 0x0021da87, // n0x0b42 c0x0000 (---------------) + I omotego - 0x00210683, // n0x0b43 c0x0000 (---------------) + I ono - 0x002b5145, // n0x0b44 c0x0000 (---------------) + I otama - 0x00229988, // n0x0b45 c0x0000 (---------------) + I samegawa - 0x002718c7, // n0x0b46 c0x0000 (---------------) + I shimogo - 0x00291509, // n0x0b47 c0x0000 (---------------) + I shirakawa - 0x002f5745, // n0x0b48 c0x0000 (---------------) + I showa - 0x002d4c04, // n0x0b49 c0x0000 (---------------) + I soma - 0x00296908, // n0x0b4a c0x0000 (---------------) + I sukagawa - 0x00384e07, // n0x0b4b c0x0000 (---------------) + I taishin - 0x0036a708, // n0x0b4c c0x0000 (---------------) + I tamakawa - 0x00202548, // n0x0b4d c0x0000 (---------------) + I tanagura - 0x00219dc5, // n0x0b4e c0x0000 (---------------) + I tenei - 0x0025c686, // n0x0b4f c0x0000 (---------------) + I yabuki - 0x00293606, // n0x0b50 c0x0000 (---------------) + I yamato - 0x00363549, // n0x0b51 c0x0000 (---------------) + I yamatsuri - 0x00300ec7, // n0x0b52 c0x0000 (---------------) + I yanaizu - 0x002a0b06, // n0x0b53 c0x0000 (---------------) + I yugawa - 0x0029abc7, // n0x0b54 c0x0000 (---------------) + I anpachi - 0x00202243, // n0x0b55 c0x0000 (---------------) + I ena - 0x00269a44, // n0x0b56 c0x0000 (---------------) + I gifu - 0x00363e05, // n0x0b57 c0x0000 (---------------) + I ginan - 0x00214644, // n0x0b58 c0x0000 (---------------) + I godo - 0x00246cc4, // n0x0b59 c0x0000 (---------------) + I gujo - 0x00271247, // n0x0b5a c0x0000 (---------------) + I hashima - 0x00380487, // n0x0b5b c0x0000 (---------------) + I hichiso - 0x00273144, // n0x0b5c c0x0000 (---------------) + I hida - 0x00291350, // n0x0b5d c0x0000 (---------------) + I higashishirakawa - 0x0036d387, // n0x0b5e c0x0000 (---------------) + I ibigawa - 0x002056c5, // n0x0b5f c0x0000 (---------------) + I ikeda - 0x00275ccc, // n0x0b60 c0x0000 (---------------) + I kakamigahara - 0x00202304, // n0x0b61 c0x0000 (---------------) + I kani - 0x002c8a08, // n0x0b62 c0x0000 (---------------) + I kasahara - 0x002cc909, // n0x0b63 c0x0000 (---------------) + I kasamatsu - 0x00267ac6, // n0x0b64 c0x0000 (---------------) + I kawaue - 0x00322cc8, // n0x0b65 c0x0000 (---------------) + I kitagata - 0x00230284, // n0x0b66 c0x0000 (---------------) + I mino - 0x002d2108, // n0x0b67 c0x0000 (---------------) + I minokamo - 0x002b39c6, // n0x0b68 c0x0000 (---------------) + I mitake - 0x002236c8, // n0x0b69 c0x0000 (---------------) + I mizunami - 0x00292246, // n0x0b6a c0x0000 (---------------) + I motosu - 0x0020198b, // n0x0b6b c0x0000 (---------------) + I nakatsugawa - 0x00207745, // n0x0b6c c0x0000 (---------------) + I ogaki - 0x002bb008, // n0x0b6d c0x0000 (---------------) + I sakahogi - 0x00205404, // n0x0b6e c0x0000 (---------------) + I seki - 0x0020540a, // n0x0b6f c0x0000 (---------------) + I sekigahara - 0x00291509, // n0x0b70 c0x0000 (---------------) + I shirakawa - 0x00279946, // n0x0b71 c0x0000 (---------------) + I tajimi - 0x002d6ec8, // n0x0b72 c0x0000 (---------------) + I takayama - 0x002da345, // n0x0b73 c0x0000 (---------------) + I tarui - 0x00221004, // n0x0b74 c0x0000 (---------------) + I toki - 0x002c8906, // n0x0b75 c0x0000 (---------------) + I tomika - 0x00380148, // n0x0b76 c0x0000 (---------------) + I wanouchi - 0x002797c8, // n0x0b77 c0x0000 (---------------) + I yamagata - 0x0031f2c6, // n0x0b78 c0x0000 (---------------) + I yaotsu - 0x002068c4, // n0x0b79 c0x0000 (---------------) + I yoro - 0x0023d186, // n0x0b7a c0x0000 (---------------) + I annaka - 0x00202ac7, // n0x0b7b c0x0000 (---------------) + I chiyoda - 0x00271e47, // n0x0b7c c0x0000 (---------------) + I fujioka - 0x00260ccf, // n0x0b7d c0x0000 (---------------) + I higashiagatsuma - 0x002ec447, // n0x0b7e c0x0000 (---------------) + I isesaki - 0x00306f87, // n0x0b7f c0x0000 (---------------) + I itakura - 0x002eaa85, // n0x0b80 c0x0000 (---------------) + I kanna - 0x002f0945, // n0x0b81 c0x0000 (---------------) + I kanra - 0x002955c9, // n0x0b82 c0x0000 (---------------) + I katashina - 0x002c3d86, // n0x0b83 c0x0000 (---------------) + I kawaba - 0x002657c5, // n0x0b84 c0x0000 (---------------) + I kiryu - 0x0027acc7, // n0x0b85 c0x0000 (---------------) + I kusatsu - 0x002b2348, // n0x0b86 c0x0000 (---------------) + I maebashi - 0x00204345, // n0x0b87 c0x0000 (---------------) + I meiwa - 0x00279a46, // n0x0b88 c0x0000 (---------------) + I midori - 0x00224f48, // n0x0b89 c0x0000 (---------------) + I minakami - 0x0036fe8a, // n0x0b8a c0x0000 (---------------) + I naganohara - 0x0026ac08, // n0x0b8b c0x0000 (---------------) + I nakanojo - 0x00282907, // n0x0b8c c0x0000 (---------------) + I nanmoku - 0x002d6dc6, // n0x0b8d c0x0000 (---------------) + I numata - 0x00239586, // n0x0b8e c0x0000 (---------------) + I oizumi - 0x0021be03, // n0x0b8f c0x0000 (---------------) + I ora - 0x00211083, // n0x0b90 c0x0000 (---------------) + I ota - 0x002dfc49, // n0x0b91 c0x0000 (---------------) + I shibukawa - 0x002e02c9, // n0x0b92 c0x0000 (---------------) + I shimonita - 0x002c68c6, // n0x0b93 c0x0000 (---------------) + I shinto - 0x002f5745, // n0x0b94 c0x0000 (---------------) + I showa - 0x002e83c8, // n0x0b95 c0x0000 (---------------) + I takasaki - 0x002d6ec8, // n0x0b96 c0x0000 (---------------) + I takayama - 0x002a1988, // n0x0b97 c0x0000 (---------------) + I tamamura - 0x003641cb, // n0x0b98 c0x0000 (---------------) + I tatebayashi - 0x002c9187, // n0x0b99 c0x0000 (---------------) + I tomioka - 0x00352d89, // n0x0b9a c0x0000 (---------------) + I tsukiyono - 0x00260f48, // n0x0b9b c0x0000 (---------------) + I tsumagoi - 0x0020e3c4, // n0x0b9c c0x0000 (---------------) + I ueno - 0x002ba7c8, // n0x0b9d c0x0000 (---------------) + I yoshioka - 0x00284f09, // n0x0b9e c0x0000 (---------------) + I asaminami - 0x00261f45, // n0x0b9f c0x0000 (---------------) + I daiwa - 0x002bdc07, // n0x0ba0 c0x0000 (---------------) + I etajima - 0x00205ac5, // n0x0ba1 c0x0000 (---------------) + I fuchu - 0x002796c8, // n0x0ba2 c0x0000 (---------------) + I fukuyama - 0x00287b4b, // n0x0ba3 c0x0000 (---------------) + I hatsukaichi - 0x0028a8d0, // n0x0ba4 c0x0000 (---------------) + I higashihiroshima - 0x0029c005, // n0x0ba5 c0x0000 (---------------) + I hongo - 0x0021798c, // n0x0ba6 c0x0000 (---------------) + I jinsekikogen - 0x0024da45, // n0x0ba7 c0x0000 (---------------) + I kaita - 0x00277283, // n0x0ba8 c0x0000 (---------------) + I kui - 0x002b5ec6, // n0x0ba9 c0x0000 (---------------) + I kumano - 0x002a9fc4, // n0x0baa c0x0000 (---------------) + I kure - 0x002af846, // n0x0bab c0x0000 (---------------) + I mihara - 0x00291c87, // n0x0bac c0x0000 (---------------) + I miyoshi - 0x00201984, // n0x0bad c0x0000 (---------------) + I naka - 0x0030afc8, // n0x0bae c0x0000 (---------------) + I onomichi - 0x003446cd, // n0x0baf c0x0000 (---------------) + I osakikamijima - 0x002e8285, // n0x0bb0 c0x0000 (---------------) + I otake - 0x0024fdc4, // n0x0bb1 c0x0000 (---------------) + I saka - 0x00228ec4, // n0x0bb2 c0x0000 (---------------) + I sera - 0x0032ae09, // n0x0bb3 c0x0000 (---------------) + I seranishi - 0x002aee48, // n0x0bb4 c0x0000 (---------------) + I shinichi - 0x002aba07, // n0x0bb5 c0x0000 (---------------) + I shobara - 0x002b3a48, // n0x0bb6 c0x0000 (---------------) + I takehara - 0x00279f08, // n0x0bb7 c0x0000 (---------------) + I abashiri - 0x00273285, // n0x0bb8 c0x0000 (---------------) + I abira - 0x00237947, // n0x0bb9 c0x0000 (---------------) + I aibetsu - 0x00273207, // n0x0bba c0x0000 (---------------) + I akabira - 0x002edc87, // n0x0bbb c0x0000 (---------------) + I akkeshi - 0x002b1e89, // n0x0bbc c0x0000 (---------------) + I asahikawa - 0x00238f89, // n0x0bbd c0x0000 (---------------) + I ashibetsu - 0x0023ff86, // n0x0bbe c0x0000 (---------------) + I ashoro - 0x002bfdc6, // n0x0bbf c0x0000 (---------------) + I assabu - 0x00260f06, // n0x0bc0 c0x0000 (---------------) + I atsuma - 0x00381bc5, // n0x0bc1 c0x0000 (---------------) + I bibai - 0x00272a04, // n0x0bc2 c0x0000 (---------------) + I biei - 0x002037c6, // n0x0bc3 c0x0000 (---------------) + I bifuka - 0x00204d86, // n0x0bc4 c0x0000 (---------------) + I bihoro - 0x002732c8, // n0x0bc5 c0x0000 (---------------) + I biratori - 0x0025dacb, // n0x0bc6 c0x0000 (---------------) + I chippubetsu - 0x00295c87, // n0x0bc7 c0x0000 (---------------) + I chitose - 0x00218204, // n0x0bc8 c0x0000 (---------------) + I date - 0x002fa746, // n0x0bc9 c0x0000 (---------------) + I ebetsu - 0x002d7ac7, // n0x0bca c0x0000 (---------------) + I embetsu - 0x0029b545, // n0x0bcb c0x0000 (---------------) + I eniwa - 0x00309e85, // n0x0bcc c0x0000 (---------------) + I erimo - 0x00232a84, // n0x0bcd c0x0000 (---------------) + I esan - 0x00238f06, // n0x0bce c0x0000 (---------------) + I esashi - 0x00203848, // n0x0bcf c0x0000 (---------------) + I fukagawa - 0x00279049, // n0x0bd0 c0x0000 (---------------) + I fukushima - 0x00247fc6, // n0x0bd1 c0x0000 (---------------) + I furano - 0x0027ed88, // n0x0bd2 c0x0000 (---------------) + I furubira - 0x002f5046, // n0x0bd3 c0x0000 (---------------) + I haboro - 0x0031e808, // n0x0bd4 c0x0000 (---------------) + I hakodate - 0x002d028c, // n0x0bd5 c0x0000 (---------------) + I hamatonbetsu - 0x00273146, // n0x0bd6 c0x0000 (---------------) + I hidaka - 0x0028b74d, // n0x0bd7 c0x0000 (---------------) + I higashikagura - 0x0028bbcb, // n0x0bd8 c0x0000 (---------------) + I higashikawa - 0x002572c5, // n0x0bd9 c0x0000 (---------------) + I hiroo - 0x00211f07, // n0x0bda c0x0000 (---------------) + I hokuryu - 0x00273806, // n0x0bdb c0x0000 (---------------) + I hokuto - 0x00335148, // n0x0bdc c0x0000 (---------------) + I honbetsu - 0x00240009, // n0x0bdd c0x0000 (---------------) + I horokanai - 0x002ad7c8, // n0x0bde c0x0000 (---------------) + I horonobe - 0x002056c5, // n0x0bdf c0x0000 (---------------) + I ikeda - 0x002bdd07, // n0x0be0 c0x0000 (---------------) + I imakane - 0x002b4c08, // n0x0be1 c0x0000 (---------------) + I ishikari - 0x0032d609, // n0x0be2 c0x0000 (---------------) + I iwamizawa - 0x0024e546, // n0x0be3 c0x0000 (---------------) + I iwanai - 0x0025700a, // n0x0be4 c0x0000 (---------------) + I kamifurano - 0x00334ec8, // n0x0be5 c0x0000 (---------------) + I kamikawa - 0x002ad60b, // n0x0be6 c0x0000 (---------------) + I kamishihoro - 0x002a3e0c, // n0x0be7 c0x0000 (---------------) + I kamisunagawa - 0x002d2208, // n0x0be8 c0x0000 (---------------) + I kamoenai - 0x00275986, // n0x0be9 c0x0000 (---------------) + I kayabe - 0x0036da88, // n0x0bea c0x0000 (---------------) + I kembuchi - 0x002ec587, // n0x0beb c0x0000 (---------------) + I kikonai - 0x0037ff09, // n0x0bec c0x0000 (---------------) + I kimobetsu - 0x0031700d, // n0x0bed c0x0000 (---------------) + I kitahiroshima - 0x0028c006, // n0x0bee c0x0000 (---------------) + I kitami - 0x00308208, // n0x0bef c0x0000 (---------------) + I kiyosato - 0x002e6bc9, // n0x0bf0 c0x0000 (---------------) + I koshimizu - 0x002a9608, // n0x0bf1 c0x0000 (---------------) + I kunneppu - 0x0027c208, // n0x0bf2 c0x0000 (---------------) + I kuriyama - 0x002ab70c, // n0x0bf3 c0x0000 (---------------) + I kuromatsunai - 0x002ae4c7, // n0x0bf4 c0x0000 (---------------) + I kushiro - 0x002af107, // n0x0bf5 c0x0000 (---------------) + I kutchan - 0x002b1cc5, // n0x0bf6 c0x0000 (---------------) + I kyowa - 0x0024c447, // n0x0bf7 c0x0000 (---------------) + I mashike - 0x002b2208, // n0x0bf8 c0x0000 (---------------) + I matsumae - 0x002c8986, // n0x0bf9 c0x0000 (---------------) + I mikasa - 0x00247e4c, // n0x0bfa c0x0000 (---------------) + I minamifurano - 0x002d15c8, // n0x0bfb c0x0000 (---------------) + I mombetsu - 0x002bbcc8, // n0x0bfc c0x0000 (---------------) + I moseushi - 0x00203b06, // n0x0bfd c0x0000 (---------------) + I mukawa - 0x0025c307, // n0x0bfe c0x0000 (---------------) + I muroran - 0x00240184, // n0x0bff c0x0000 (---------------) + I naie - 0x00276cc8, // n0x0c00 c0x0000 (---------------) + I nakagawa - 0x0028530c, // n0x0c01 c0x0000 (---------------) + I nakasatsunai - 0x0022b9cc, // n0x0c02 c0x0000 (---------------) + I nakatombetsu - 0x00229205, // n0x0c03 c0x0000 (---------------) + I nanae - 0x0037ee47, // n0x0c04 c0x0000 (---------------) + I nanporo - 0x00206846, // n0x0c05 c0x0000 (---------------) + I nayoro - 0x0025c286, // n0x0c06 c0x0000 (---------------) + I nemuro - 0x002d8ec8, // n0x0c07 c0x0000 (---------------) + I niikappu - 0x0029b204, // n0x0c08 c0x0000 (---------------) + I niki - 0x00221a4b, // n0x0c09 c0x0000 (---------------) + I nishiokoppe - 0x0030a4cb, // n0x0c0a c0x0000 (---------------) + I noboribetsu - 0x002d6dc6, // n0x0c0b c0x0000 (---------------) + I numata - 0x00344547, // n0x0c0c c0x0000 (---------------) + I obihiro - 0x0035f745, // n0x0c0d c0x0000 (---------------) + I obira - 0x002690c5, // n0x0c0e c0x0000 (---------------) + I oketo - 0x00221b86, // n0x0c0f c0x0000 (---------------) + I okoppe - 0x002da305, // n0x0c10 c0x0000 (---------------) + I otaru - 0x00263045, // n0x0c11 c0x0000 (---------------) + I otobe - 0x002b3807, // n0x0c12 c0x0000 (---------------) + I otofuke - 0x00268c49, // n0x0c13 c0x0000 (---------------) + I otoineppu - 0x002ed344, // n0x0c14 c0x0000 (---------------) + I oumu - 0x002fb245, // n0x0c15 c0x0000 (---------------) + I ozora - 0x002c95c5, // n0x0c16 c0x0000 (---------------) + I pippu - 0x0025c408, // n0x0c17 c0x0000 (---------------) + I rankoshi - 0x0030ae05, // n0x0c18 c0x0000 (---------------) + I rebun - 0x00289f89, // n0x0c19 c0x0000 (---------------) + I rikubetsu - 0x002cb087, // n0x0c1a c0x0000 (---------------) + I rishiri - 0x002cb08b, // n0x0c1b c0x0000 (---------------) + I rishirifuji - 0x0022a946, // n0x0c1c c0x0000 (---------------) + I saroma - 0x0024d4c9, // n0x0c1d c0x0000 (---------------) + I sarufutsu - 0x0029bc08, // n0x0c1e c0x0000 (---------------) + I shakotan - 0x002a7e85, // n0x0c1f c0x0000 (---------------) + I shari - 0x002edd88, // n0x0c20 c0x0000 (---------------) + I shibecha - 0x00238fc8, // n0x0c21 c0x0000 (---------------) + I shibetsu - 0x0021ca47, // n0x0c22 c0x0000 (---------------) + I shikabe - 0x00239447, // n0x0c23 c0x0000 (---------------) + I shikaoi - 0x002712c9, // n0x0c24 c0x0000 (---------------) + I shimamaki - 0x00223607, // n0x0c25 c0x0000 (---------------) + I shimizu - 0x002d5349, // n0x0c26 c0x0000 (---------------) + I shimokawa - 0x002bcc0c, // n0x0c27 c0x0000 (---------------) + I shinshinotsu - 0x002c68c8, // n0x0c28 c0x0000 (---------------) + I shintoku - 0x002ea8c9, // n0x0c29 c0x0000 (---------------) + I shiranuka - 0x002d3ec7, // n0x0c2a c0x0000 (---------------) + I shiraoi - 0x00279fc9, // n0x0c2b c0x0000 (---------------) + I shiriuchi - 0x003805c7, // n0x0c2c c0x0000 (---------------) + I sobetsu - 0x002a3f08, // n0x0c2d c0x0000 (---------------) + I sunagawa - 0x00332845, // n0x0c2e c0x0000 (---------------) + I taiki - 0x002b6a86, // n0x0c2f c0x0000 (---------------) + I takasu - 0x002ac448, // n0x0c30 c0x0000 (---------------) + I takikawa - 0x002560c8, // n0x0c31 c0x0000 (---------------) + I takinoue - 0x002b4949, // n0x0c32 c0x0000 (---------------) + I teshikaga - 0x00263087, // n0x0c33 c0x0000 (---------------) + I tobetsu - 0x0026b385, // n0x0c34 c0x0000 (---------------) + I tohma - 0x002d0a49, // n0x0c35 c0x0000 (---------------) + I tomakomai - 0x002468c6, // n0x0c36 c0x0000 (---------------) + I tomari - 0x002873c4, // n0x0c37 c0x0000 (---------------) + I toya - 0x002f4246, // n0x0c38 c0x0000 (---------------) + I toyako - 0x00247448, // n0x0c39 c0x0000 (---------------) + I toyotomi - 0x00256b47, // n0x0c3a c0x0000 (---------------) + I toyoura - 0x0025dcc8, // n0x0c3b c0x0000 (---------------) + I tsubetsu - 0x0020f689, // n0x0c3c c0x0000 (---------------) + I tsukigata - 0x002fc647, // n0x0c3d c0x0000 (---------------) + I urakawa - 0x00292dc6, // n0x0c3e c0x0000 (---------------) + I urausu - 0x00211fc4, // n0x0c3f c0x0000 (---------------) + I uryu - 0x00208849, // n0x0c40 c0x0000 (---------------) + I utashinai - 0x002377c8, // n0x0c41 c0x0000 (---------------) + I wakkanai - 0x002039c7, // n0x0c42 c0x0000 (---------------) + I wassamu - 0x002fb786, // n0x0c43 c0x0000 (---------------) + I yakumo - 0x0029c4c6, // n0x0c44 c0x0000 (---------------) + I yoichi - 0x002ec6c4, // n0x0c45 c0x0000 (---------------) + I aioi - 0x002b3506, // n0x0c46 c0x0000 (---------------) + I akashi - 0x00210603, // n0x0c47 c0x0000 (---------------) + I ako - 0x00378049, // n0x0c48 c0x0000 (---------------) + I amagasaki - 0x00207706, // n0x0c49 c0x0000 (---------------) + I aogaki - 0x002aad85, // n0x0c4a c0x0000 (---------------) + I asago - 0x00293506, // n0x0c4b c0x0000 (---------------) + I ashiya - 0x002239c5, // n0x0c4c c0x0000 (---------------) + I awaji - 0x00278dc8, // n0x0c4d c0x0000 (---------------) + I fukusaki - 0x002d1f47, // n0x0c4e c0x0000 (---------------) + I goshiki - 0x003604c6, // n0x0c4f c0x0000 (---------------) + I harima - 0x00206d46, // n0x0c50 c0x0000 (---------------) + I himeji - 0x0034fec8, // n0x0c51 c0x0000 (---------------) + I ichikawa - 0x00295747, // n0x0c52 c0x0000 (---------------) + I inagawa - 0x0028c045, // n0x0c53 c0x0000 (---------------) + I itami - 0x00293a08, // n0x0c54 c0x0000 (---------------) + I kakogawa - 0x003563c8, // n0x0c55 c0x0000 (---------------) + I kamigori - 0x00334ec8, // n0x0c56 c0x0000 (---------------) + I kamikawa - 0x0024d905, // n0x0c57 c0x0000 (---------------) + I kasai - 0x002b6b06, // n0x0c58 c0x0000 (---------------) + I kasuga - 0x00230389, // n0x0c59 c0x0000 (---------------) + I kawanishi - 0x002a2684, // n0x0c5a c0x0000 (---------------) + I miki - 0x0022384b, // n0x0c5b c0x0000 (---------------) + I minamiawaji - 0x0021bf8b, // n0x0c5c c0x0000 (---------------) + I nishinomiya - 0x00225389, // n0x0c5d c0x0000 (---------------) + I nishiwaki - 0x00210683, // n0x0c5e c0x0000 (---------------) + I ono - 0x00253cc5, // n0x0c5f c0x0000 (---------------) + I sanda - 0x00351146, // n0x0c60 c0x0000 (---------------) + I sannan - 0x0024ee88, // n0x0c61 c0x0000 (---------------) + I sasayama - 0x00265c04, // n0x0c62 c0x0000 (---------------) + I sayo - 0x00227206, // n0x0c63 c0x0000 (---------------) + I shingu - 0x002bee89, // n0x0c64 c0x0000 (---------------) + I shinonsen - 0x002e5305, // n0x0c65 c0x0000 (---------------) + I shiso - 0x002b3746, // n0x0c66 c0x0000 (---------------) + I sumoto - 0x00384e06, // n0x0c67 c0x0000 (---------------) + I taishi - 0x0020ed04, // n0x0c68 c0x0000 (---------------) + I taka - 0x0036a04a, // n0x0c69 c0x0000 (---------------) + I takarazuka - 0x002aacc8, // n0x0c6a c0x0000 (---------------) + I takasago - 0x002560c6, // n0x0c6b c0x0000 (---------------) + I takino - 0x002fb045, // n0x0c6c c0x0000 (---------------) + I tamba - 0x00217107, // n0x0c6d c0x0000 (---------------) + I tatsuno - 0x0022fec7, // n0x0c6e c0x0000 (---------------) + I toyooka - 0x0025c684, // n0x0c6f c0x0000 (---------------) + I yabu - 0x0021c1c7, // n0x0c70 c0x0000 (---------------) + I yashiro - 0x002a6844, // n0x0c71 c0x0000 (---------------) + I yoka - 0x00302946, // n0x0c72 c0x0000 (---------------) + I yokawa - 0x00216cc3, // n0x0c73 c0x0000 (---------------) + I ami - 0x002b1e85, // n0x0c74 c0x0000 (---------------) + I asahi - 0x0032ca45, // n0x0c75 c0x0000 (---------------) + I bando - 0x00380288, // n0x0c76 c0x0000 (---------------) + I chikusei - 0x00269b45, // n0x0c77 c0x0000 (---------------) + I daigo - 0x00272e89, // n0x0c78 c0x0000 (---------------) + I fujishiro - 0x00276b07, // n0x0c79 c0x0000 (---------------) + I hitachi - 0x00276b0b, // n0x0c7a c0x0000 (---------------) + I hitachinaka - 0x002975cc, // n0x0c7b c0x0000 (---------------) + I hitachiomiya - 0x00297c0a, // n0x0c7c c0x0000 (---------------) + I hitachiota - 0x00316ec7, // n0x0c7d c0x0000 (---------------) + I ibaraki - 0x00208983, // n0x0c7e c0x0000 (---------------) + I ina - 0x00299a48, // n0x0c7f c0x0000 (---------------) + I inashiki - 0x0024dac5, // n0x0c80 c0x0000 (---------------) + I itako - 0x002043c5, // n0x0c81 c0x0000 (---------------) + I iwama - 0x00210744, // n0x0c82 c0x0000 (---------------) + I joso - 0x002a3e06, // n0x0c83 c0x0000 (---------------) + I kamisu - 0x002cc906, // n0x0c84 c0x0000 (---------------) + I kasama - 0x002b3547, // n0x0c85 c0x0000 (---------------) + I kashima - 0x00200e4b, // n0x0c86 c0x0000 (---------------) + I kasumigaura - 0x00293a84, // n0x0c87 c0x0000 (---------------) + I koga - 0x002df804, // n0x0c88 c0x0000 (---------------) + I miho - 0x00258284, // n0x0c89 c0x0000 (---------------) + I mito - 0x002ba386, // n0x0c8a c0x0000 (---------------) + I moriya - 0x00201984, // n0x0c8b c0x0000 (---------------) + I naka - 0x002b3348, // n0x0c8c c0x0000 (---------------) + I namegata - 0x0031ce85, // n0x0c8d c0x0000 (---------------) + I oarai - 0x00228905, // n0x0c8e c0x0000 (---------------) + I ogawa - 0x002a18c7, // n0x0c8f c0x0000 (---------------) + I omitama - 0x00212009, // n0x0c90 c0x0000 (---------------) + I ryugasaki - 0x00327d85, // n0x0c91 c0x0000 (---------------) + I sakai - 0x0020e0ca, // n0x0c92 c0x0000 (---------------) + I sakuragawa - 0x00294e89, // n0x0c93 c0x0000 (---------------) + I shimodate - 0x002a8b8a, // n0x0c94 c0x0000 (---------------) + I shimotsuma - 0x00213289, // n0x0c95 c0x0000 (---------------) + I shirosato - 0x002d90c4, // n0x0c96 c0x0000 (---------------) + I sowa - 0x002e7e45, // n0x0c97 c0x0000 (---------------) + I suifu - 0x002dd448, // n0x0c98 c0x0000 (---------------) + I takahagi - 0x0030c64b, // n0x0c99 c0x0000 (---------------) + I tamatsukuri - 0x002f1d45, // n0x0c9a c0x0000 (---------------) + I tokai - 0x00344dc6, // n0x0c9b c0x0000 (---------------) + I tomobe - 0x00216684, // n0x0c9c c0x0000 (---------------) + I tone - 0x002733c6, // n0x0c9d c0x0000 (---------------) + I toride - 0x002fc4c9, // n0x0c9e c0x0000 (---------------) + I tsuchiura - 0x002fa807, // n0x0c9f c0x0000 (---------------) + I tsukuba - 0x0026e288, // n0x0ca0 c0x0000 (---------------) + I uchihara - 0x00286f46, // n0x0ca1 c0x0000 (---------------) + I ushiku - 0x00202a47, // n0x0ca2 c0x0000 (---------------) + I yachiyo - 0x002797c8, // n0x0ca3 c0x0000 (---------------) + I yamagata - 0x00355e86, // n0x0ca4 c0x0000 (---------------) + I yawara - 0x00383544, // n0x0ca5 c0x0000 (---------------) + I yuki - 0x0033b707, // n0x0ca6 c0x0000 (---------------) + I anamizu - 0x0032aa05, // n0x0ca7 c0x0000 (---------------) + I hakui - 0x0033c0c7, // n0x0ca8 c0x0000 (---------------) + I hakusan - 0x002038c4, // n0x0ca9 c0x0000 (---------------) + I kaga - 0x00273786, // n0x0caa c0x0000 (---------------) + I kahoku - 0x00204b88, // n0x0cab c0x0000 (---------------) + I kanazawa - 0x0028bd88, // n0x0cac c0x0000 (---------------) + I kawakita - 0x00307e47, // n0x0cad c0x0000 (---------------) + I komatsu - 0x00282c88, // n0x0cae c0x0000 (---------------) + I nakanoto - 0x00288c05, // n0x0caf c0x0000 (---------------) + I nanao - 0x002104c4, // n0x0cb0 c0x0000 (---------------) + I nomi - 0x0034fdc8, // n0x0cb1 c0x0000 (---------------) + I nonoichi - 0x00282d84, // n0x0cb2 c0x0000 (---------------) + I noto - 0x00218e85, // n0x0cb3 c0x0000 (---------------) + I shika - 0x002df344, // n0x0cb4 c0x0000 (---------------) + I suzu - 0x002fc007, // n0x0cb5 c0x0000 (---------------) + I tsubata - 0x00363cc7, // n0x0cb6 c0x0000 (---------------) + I tsurugi - 0x0027a108, // n0x0cb7 c0x0000 (---------------) + I uchinada - 0x002c9706, // n0x0cb8 c0x0000 (---------------) + I wajima - 0x00269ac5, // n0x0cb9 c0x0000 (---------------) + I fudai - 0x00272c88, // n0x0cba c0x0000 (---------------) + I fujisawa - 0x0026ae08, // n0x0cbb c0x0000 (---------------) + I hanamaki - 0x00294749, // n0x0cbc c0x0000 (---------------) + I hiraizumi - 0x0021c286, // n0x0cbd c0x0000 (---------------) + I hirono - 0x00379cc8, // n0x0cbe c0x0000 (---------------) + I ichinohe - 0x0020528a, // n0x0cbf c0x0000 (---------------) + I ichinoseki - 0x0029b5c8, // n0x0cc0 c0x0000 (---------------) + I iwaizumi - 0x0034f5c5, // n0x0cc1 c0x0000 (---------------) + I iwate - 0x0036dd06, // n0x0cc2 c0x0000 (---------------) + I joboji - 0x00294d48, // n0x0cc3 c0x0000 (---------------) + I kamaishi - 0x002bddca, // n0x0cc4 c0x0000 (---------------) + I kanegasaki - 0x002fa087, // n0x0cc5 c0x0000 (---------------) + I karumai - 0x0027de85, // n0x0cc6 c0x0000 (---------------) + I kawai - 0x002875c8, // n0x0cc7 c0x0000 (---------------) + I kitakami - 0x00235604, // n0x0cc8 c0x0000 (---------------) + I kuji - 0x00241bc6, // n0x0cc9 c0x0000 (---------------) + I kunohe - 0x002af9c8, // n0x0cca c0x0000 (---------------) + I kuzumaki - 0x00210546, // n0x0ccb c0x0000 (---------------) + I miyako - 0x00364d08, // n0x0ccc c0x0000 (---------------) + I mizusawa - 0x0020ef07, // n0x0ccd c0x0000 (---------------) + I morioka - 0x0020ea06, // n0x0cce c0x0000 (---------------) + I ninohe - 0x00238104, // n0x0ccf c0x0000 (---------------) + I noda - 0x002a1dc7, // n0x0cd0 c0x0000 (---------------) + I ofunato - 0x002f2e04, // n0x0cd1 c0x0000 (---------------) + I oshu - 0x002fc487, // n0x0cd2 c0x0000 (---------------) + I otsuchi - 0x002a1f8d, // n0x0cd3 c0x0000 (---------------) + I rikuzentakata - 0x00225405, // n0x0cd4 c0x0000 (---------------) + I shiwa - 0x002e9f8b, // n0x0cd5 c0x0000 (---------------) + I shizukuishi - 0x00292346, // n0x0cd6 c0x0000 (---------------) + I sumita - 0x0024e348, // n0x0cd7 c0x0000 (---------------) + I tanohata - 0x003793c4, // n0x0cd8 c0x0000 (---------------) + I tono - 0x0020d986, // n0x0cd9 c0x0000 (---------------) + I yahaba - 0x002766c6, // n0x0cda c0x0000 (---------------) + I yamada - 0x0026f707, // n0x0cdb c0x0000 (---------------) + I ayagawa - 0x0028b40d, // n0x0cdc c0x0000 (---------------) + I higashikagawa - 0x002d9707, // n0x0cdd c0x0000 (---------------) + I kanonji - 0x002fdb88, // n0x0cde c0x0000 (---------------) + I kotohira - 0x00272085, // n0x0cdf c0x0000 (---------------) + I manno - 0x0028cb88, // n0x0ce0 c0x0000 (---------------) + I marugame - 0x002b4286, // n0x0ce1 c0x0000 (---------------) + I mitoyo - 0x00288c88, // n0x0ce2 c0x0000 (---------------) + I naoshima - 0x0023f706, // n0x0ce3 c0x0000 (---------------) + I sanuki - 0x00363bc7, // n0x0ce4 c0x0000 (---------------) + I tadotsu - 0x00358b09, // n0x0ce5 c0x0000 (---------------) + I takamatsu - 0x003793c7, // n0x0ce6 c0x0000 (---------------) + I tonosho - 0x00280b08, // n0x0ce7 c0x0000 (---------------) + I uchinomi - 0x0026cd85, // n0x0ce8 c0x0000 (---------------) + I utazu - 0x0021b648, // n0x0ce9 c0x0000 (---------------) + I zentsuji - 0x0025c1c5, // n0x0cea c0x0000 (---------------) + I akune - 0x00249405, // n0x0ceb c0x0000 (---------------) + I amami - 0x00345145, // n0x0cec c0x0000 (---------------) + I hioki - 0x00213e43, // n0x0ced c0x0000 (---------------) + I isa - 0x0020e944, // n0x0cee c0x0000 (---------------) + I isen - 0x002395c5, // n0x0cef c0x0000 (---------------) + I izumi - 0x00360c89, // n0x0cf0 c0x0000 (---------------) + I kagoshima - 0x002e5446, // n0x0cf1 c0x0000 (---------------) + I kanoya - 0x002c4b48, // n0x0cf2 c0x0000 (---------------) + I kawanabe - 0x00239205, // n0x0cf3 c0x0000 (---------------) + I kinko - 0x00348cc7, // n0x0cf4 c0x0000 (---------------) + I kouyama - 0x0033548a, // n0x0cf5 c0x0000 (---------------) + I makurazaki - 0x002b3689, // n0x0cf6 c0x0000 (---------------) + I matsumoto - 0x002a5e8a, // n0x0cf7 c0x0000 (---------------) + I minamitane - 0x002c0488, // n0x0cf8 c0x0000 (---------------) + I nakatane - 0x0021d8cc, // n0x0cf9 c0x0000 (---------------) + I nishinoomote - 0x0027ad4d, // n0x0cfa c0x0000 (---------------) + I satsumasendai - 0x002d5583, // n0x0cfb c0x0000 (---------------) + I soo - 0x00364c08, // n0x0cfc c0x0000 (---------------) + I tarumizu - 0x0020bd05, // n0x0cfd c0x0000 (---------------) + I yusui - 0x002c3d06, // n0x0cfe c0x0000 (---------------) + I aikawa - 0x00330e46, // n0x0cff c0x0000 (---------------) + I atsugi - 0x002d1c85, // n0x0d00 c0x0000 (---------------) + I ayase - 0x0029acc9, // n0x0d01 c0x0000 (---------------) + I chigasaki - 0x00383b05, // n0x0d02 c0x0000 (---------------) + I ebina - 0x00272c88, // n0x0d03 c0x0000 (---------------) + I fujisawa - 0x00283106, // n0x0d04 c0x0000 (---------------) + I hadano - 0x00320a86, // n0x0d05 c0x0000 (---------------) + I hakone - 0x002967c9, // n0x0d06 c0x0000 (---------------) + I hiratsuka - 0x00357b07, // n0x0d07 c0x0000 (---------------) + I isehara - 0x002d6806, // n0x0d08 c0x0000 (---------------) + I kaisei - 0x00335408, // n0x0d09 c0x0000 (---------------) + I kamakura - 0x003028c8, // n0x0d0a c0x0000 (---------------) + I kiyokawa - 0x00345dc7, // n0x0d0b c0x0000 (---------------) + I matsuda - 0x0028f5ce, // n0x0d0c c0x0000 (---------------) + I minamiashigara - 0x002b44c5, // n0x0d0d c0x0000 (---------------) + I miura - 0x0032d505, // n0x0d0e c0x0000 (---------------) + I nakai - 0x00210448, // n0x0d0f c0x0000 (---------------) + I ninomiya - 0x00238147, // n0x0d10 c0x0000 (---------------) + I odawara - 0x00205242, // n0x0d11 c0x0000 (---------------) + I oi - 0x002ab204, // n0x0d12 c0x0000 (---------------) + I oiso - 0x002af74a, // n0x0d13 c0x0000 (---------------) + I sagamihara - 0x00203a88, // n0x0d14 c0x0000 (---------------) + I samukawa - 0x002d7bc6, // n0x0d15 c0x0000 (---------------) + I tsukui - 0x0028d288, // n0x0d16 c0x0000 (---------------) + I yamakita - 0x00293606, // n0x0d17 c0x0000 (---------------) + I yamato - 0x0036cf48, // n0x0d18 c0x0000 (---------------) + I yokosuka - 0x002a0b08, // n0x0d19 c0x0000 (---------------) + I yugawara - 0x002493c4, // n0x0d1a c0x0000 (---------------) + I zama - 0x0035af85, // n0x0d1b c0x0000 (---------------) + I zushi - 0x007242c4, // n0x0d1c c0x0001 (---------------) ! I city - 0x007242c4, // n0x0d1d c0x0001 (---------------) ! I city - 0x007242c4, // n0x0d1e c0x0001 (---------------) ! I city - 0x002077c3, // n0x0d1f c0x0000 (---------------) + I aki - 0x002ed186, // n0x0d20 c0x0000 (---------------) + I geisei - 0x00273146, // n0x0d21 c0x0000 (---------------) + I hidaka - 0x0029278c, // n0x0d22 c0x0000 (---------------) + I higashitsuno - 0x00200c83, // n0x0d23 c0x0000 (---------------) + I ino - 0x0027f346, // n0x0d24 c0x0000 (---------------) + I kagami - 0x00225044, // n0x0d25 c0x0000 (---------------) + I kami - 0x002bdfc8, // n0x0d26 c0x0000 (---------------) + I kitagawa - 0x002becc5, // n0x0d27 c0x0000 (---------------) + I kochi - 0x002af846, // n0x0d28 c0x0000 (---------------) + I mihara - 0x002bf588, // n0x0d29 c0x0000 (---------------) + I motoyama - 0x002c1506, // n0x0d2a c0x0000 (---------------) + I muroto - 0x00360446, // n0x0d2b c0x0000 (---------------) + I nahari - 0x00234008, // n0x0d2c c0x0000 (---------------) + I nakamura - 0x00363e87, // n0x0d2d c0x0000 (---------------) + I nankoku - 0x002233c9, // n0x0d2e c0x0000 (---------------) + I nishitosa - 0x002287ca, // n0x0d2f c0x0000 (---------------) + I niyodogawa - 0x0024fb04, // n0x0d30 c0x0000 (---------------) + I ochi - 0x00230345, // n0x0d31 c0x0000 (---------------) + I okawa - 0x002bc5c5, // n0x0d32 c0x0000 (---------------) + I otoyo - 0x00322a46, // n0x0d33 c0x0000 (---------------) + I otsuki - 0x0024fdc6, // n0x0d34 c0x0000 (---------------) + I sakawa - 0x002931c6, // n0x0d35 c0x0000 (---------------) + I sukumo - 0x002de606, // n0x0d36 c0x0000 (---------------) + I susaki - 0x00223504, // n0x0d37 c0x0000 (---------------) + I tosa - 0x0022350b, // n0x0d38 c0x0000 (---------------) + I tosashimizu - 0x0022fec4, // n0x0d39 c0x0000 (---------------) + I toyo - 0x00217185, // n0x0d3a c0x0000 (---------------) + I tsuno - 0x0029f6c5, // n0x0d3b c0x0000 (---------------) + I umaji - 0x00212386, // n0x0d3c c0x0000 (---------------) + I yasuda - 0x00207d88, // n0x0d3d c0x0000 (---------------) + I yusuhara - 0x0027ac07, // n0x0d3e c0x0000 (---------------) + I amakusa - 0x00370044, // n0x0d3f c0x0000 (---------------) + I arao - 0x0028ba43, // n0x0d40 c0x0000 (---------------) + I aso - 0x002e6485, // n0x0d41 c0x0000 (---------------) + I choyo - 0x00337407, // n0x0d42 c0x0000 (---------------) + I gyokuto - 0x00298389, // n0x0d43 c0x0000 (---------------) + I hitoyoshi - 0x0027ab0b, // n0x0d44 c0x0000 (---------------) + I kamiamakusa - 0x002b3547, // n0x0d45 c0x0000 (---------------) + I kashima - 0x0026f407, // n0x0d46 c0x0000 (---------------) + I kikuchi - 0x002ddbc4, // n0x0d47 c0x0000 (---------------) + I kosa - 0x002bf488, // n0x0d48 c0x0000 (---------------) + I kumamoto - 0x003080c7, // n0x0d49 c0x0000 (---------------) + I mashiki - 0x0028ddc6, // n0x0d4a c0x0000 (---------------) + I mifune - 0x00255f48, // n0x0d4b c0x0000 (---------------) + I minamata - 0x002ba04b, // n0x0d4c c0x0000 (---------------) + I minamioguni - 0x00375a06, // n0x0d4d c0x0000 (---------------) + I nagasu - 0x00217509, // n0x0d4e c0x0000 (---------------) + I nishihara - 0x002ba1c5, // n0x0d4f c0x0000 (---------------) + I oguni - 0x00347343, // n0x0d50 c0x0000 (---------------) + I ozu - 0x002b3746, // n0x0d51 c0x0000 (---------------) + I sumoto - 0x0020ee08, // n0x0d52 c0x0000 (---------------) + I takamori - 0x0020f703, // n0x0d53 c0x0000 (---------------) + I uki - 0x00262c83, // n0x0d54 c0x0000 (---------------) + I uto - 0x002797c6, // n0x0d55 c0x0000 (---------------) + I yamaga - 0x00293606, // n0x0d56 c0x0000 (---------------) + I yamato - 0x0034e90a, // n0x0d57 c0x0000 (---------------) + I yatsushiro - 0x002759c5, // n0x0d58 c0x0000 (---------------) + I ayabe - 0x0027650b, // n0x0d59 c0x0000 (---------------) + I fukuchiyama - 0x0029344b, // n0x0d5a c0x0000 (---------------) + I higashiyama - 0x00207c83, // n0x0d5b c0x0000 (---------------) + I ide - 0x0021e883, // n0x0d5c c0x0000 (---------------) + I ine - 0x002a2384, // n0x0d5d c0x0000 (---------------) + I joyo - 0x0020f207, // n0x0d5e c0x0000 (---------------) + I kameoka - 0x0020ee84, // n0x0d5f c0x0000 (---------------) + I kamo - 0x0020ec84, // n0x0d60 c0x0000 (---------------) + I kita - 0x00314fc4, // n0x0d61 c0x0000 (---------------) + I kizu - 0x0029b8c8, // n0x0d62 c0x0000 (---------------) + I kumiyama - 0x002faf88, // n0x0d63 c0x0000 (---------------) + I kyotamba - 0x00211009, // n0x0d64 c0x0000 (---------------) + I kyotanabe - 0x002bc948, // n0x0d65 c0x0000 (---------------) + I kyotango - 0x00360e47, // n0x0d66 c0x0000 (---------------) + I maizuru - 0x00216c06, // n0x0d67 c0x0000 (---------------) + I minami - 0x002c478f, // n0x0d68 c0x0000 (---------------) + I minamiyamashiro - 0x002b4606, // n0x0d69 c0x0000 (---------------) + I miyazu - 0x002bec44, // n0x0d6a c0x0000 (---------------) + I muko - 0x002fadca, // n0x0d6b c0x0000 (---------------) + I nagaokakyo - 0x00337307, // n0x0d6c c0x0000 (---------------) + I nakagyo - 0x002ca886, // n0x0d6d c0x0000 (---------------) + I nantan - 0x00287409, // n0x0d6e c0x0000 (---------------) + I oyamazaki - 0x00210f85, // n0x0d6f c0x0000 (---------------) + I sakyo - 0x002cb685, // n0x0d70 c0x0000 (---------------) + I seika - 0x002110c6, // n0x0d71 c0x0000 (---------------) + I tanabe - 0x0021b783, // n0x0d72 c0x0000 (---------------) + I uji - 0x00235649, // n0x0d73 c0x0000 (---------------) + I ujitawara - 0x0021a406, // n0x0d74 c0x0000 (---------------) + I wazuka - 0x0020f449, // n0x0d75 c0x0000 (---------------) + I yamashina - 0x00358306, // n0x0d76 c0x0000 (---------------) + I yawata - 0x002b1e85, // n0x0d77 c0x0000 (---------------) + I asahi - 0x0020b185, // n0x0d78 c0x0000 (---------------) + I inabe - 0x0020e943, // n0x0d79 c0x0000 (---------------) + I ise - 0x0020f348, // n0x0d7a c0x0000 (---------------) + I kameyama - 0x0024fe47, // n0x0d7b c0x0000 (---------------) + I kawagoe - 0x0029b284, // n0x0d7c c0x0000 (---------------) + I kiho - 0x00322b48, // n0x0d7d c0x0000 (---------------) + I kisosaki - 0x002df584, // n0x0d7e c0x0000 (---------------) + I kiwa - 0x002a4986, // n0x0d7f c0x0000 (---------------) + I komono - 0x002b5ec6, // n0x0d80 c0x0000 (---------------) + I kumano - 0x0023f446, // n0x0d81 c0x0000 (---------------) + I kuwana - 0x002baec9, // n0x0d82 c0x0000 (---------------) + I matsusaka - 0x00204345, // n0x0d83 c0x0000 (---------------) + I meiwa - 0x002985c6, // n0x0d84 c0x0000 (---------------) + I mihama - 0x00255749, // n0x0d85 c0x0000 (---------------) + I minamiise - 0x002b30c6, // n0x0d86 c0x0000 (---------------) + I misugi - 0x0029b946, // n0x0d87 c0x0000 (---------------) + I miyama - 0x00352b86, // n0x0d88 c0x0000 (---------------) + I nabari - 0x00213545, // n0x0d89 c0x0000 (---------------) + I shima - 0x002df346, // n0x0d8a c0x0000 (---------------) + I suzuka - 0x00363bc4, // n0x0d8b c0x0000 (---------------) + I tado - 0x00332845, // n0x0d8c c0x0000 (---------------) + I taiki - 0x002560c4, // n0x0d8d c0x0000 (---------------) + I taki - 0x00314ec6, // n0x0d8e c0x0000 (---------------) + I tamaki - 0x00285e44, // n0x0d8f c0x0000 (---------------) + I toba - 0x00201a83, // n0x0d90 c0x0000 (---------------) + I tsu - 0x0027fd85, // n0x0d91 c0x0000 (---------------) + I udono - 0x00238cc8, // n0x0d92 c0x0000 (---------------) + I ureshino - 0x00342a07, // n0x0d93 c0x0000 (---------------) + I watarai - 0x00265c89, // n0x0d94 c0x0000 (---------------) + I yokkaichi - 0x0027ffc8, // n0x0d95 c0x0000 (---------------) + I furukawa - 0x0028c7d1, // n0x0d96 c0x0000 (---------------) + I higashimatsushima - 0x00384e8a, // n0x0d97 c0x0000 (---------------) + I ishinomaki - 0x002d6d07, // n0x0d98 c0x0000 (---------------) + I iwanuma - 0x002cb746, // n0x0d99 c0x0000 (---------------) + I kakuda - 0x00225044, // n0x0d9a c0x0000 (---------------) + I kami - 0x002ac548, // n0x0d9b c0x0000 (---------------) + I kawasaki - 0x00375b89, // n0x0d9c c0x0000 (---------------) + I kesennuma - 0x00356788, // n0x0d9d c0x0000 (---------------) + I marumori - 0x0028c98a, // n0x0d9e c0x0000 (---------------) + I matsushima - 0x00289d4d, // n0x0d9f c0x0000 (---------------) + I minamisanriku - 0x002467c6, // n0x0da0 c0x0000 (---------------) + I misato - 0x00234106, // n0x0da1 c0x0000 (---------------) + I murata - 0x002a1e86, // n0x0da2 c0x0000 (---------------) + I natori - 0x00305507, // n0x0da3 c0x0000 (---------------) + I ogawara - 0x002fdc45, // n0x0da4 c0x0000 (---------------) + I ohira - 0x00237687, // n0x0da5 c0x0000 (---------------) + I onagawa - 0x00322c05, // n0x0da6 c0x0000 (---------------) + I osaki - 0x002cb1c4, // n0x0da7 c0x0000 (---------------) + I rifu - 0x002dc406, // n0x0da8 c0x0000 (---------------) + I semine - 0x00369f07, // n0x0da9 c0x0000 (---------------) + I shibata - 0x0023534d, // n0x0daa c0x0000 (---------------) + I shichikashuku - 0x00294c87, // n0x0dab c0x0000 (---------------) + I shikama - 0x00258548, // n0x0dac c0x0000 (---------------) + I shiogama - 0x00272f89, // n0x0dad c0x0000 (---------------) + I shiroishi - 0x0027e506, // n0x0dae c0x0000 (---------------) + I tagajo - 0x0024e4c5, // n0x0daf c0x0000 (---------------) + I taiwa - 0x00242084, // n0x0db0 c0x0000 (---------------) + I tome - 0x00247546, // n0x0db1 c0x0000 (---------------) + I tomiya - 0x002c3946, // n0x0db2 c0x0000 (---------------) + I wakuya - 0x00203c06, // n0x0db3 c0x0000 (---------------) + I watari - 0x00290788, // n0x0db4 c0x0000 (---------------) + I yamamoto - 0x00213043, // n0x0db5 c0x0000 (---------------) + I zao - 0x00203503, // n0x0db6 c0x0000 (---------------) + I aya - 0x00200c05, // n0x0db7 c0x0000 (---------------) + I ebino - 0x00269c06, // n0x0db8 c0x0000 (---------------) + I gokase - 0x002a0ac5, // n0x0db9 c0x0000 (---------------) + I hyuga - 0x00287148, // n0x0dba c0x0000 (---------------) + I kadogawa - 0x00291e4a, // n0x0dbb c0x0000 (---------------) + I kawaminami - 0x00207804, // n0x0dbc c0x0000 (---------------) + I kijo - 0x002bdfc8, // n0x0dbd c0x0000 (---------------) + I kitagawa - 0x0020ec88, // n0x0dbe c0x0000 (---------------) + I kitakata - 0x002121c7, // n0x0dbf c0x0000 (---------------) + I kitaura - 0x002392c9, // n0x0dc0 c0x0000 (---------------) + I kobayashi - 0x002a8e08, // n0x0dc1 c0x0000 (---------------) + I kunitomi - 0x002790c7, // n0x0dc2 c0x0000 (---------------) + I kushima - 0x002aabc6, // n0x0dc3 c0x0000 (---------------) + I mimata - 0x0021054a, // n0x0dc4 c0x0000 (---------------) + I miyakonojo - 0x002475c8, // n0x0dc5 c0x0000 (---------------) + I miyazaki - 0x002ad449, // n0x0dc6 c0x0000 (---------------) + I morotsuka - 0x002aef08, // n0x0dc7 c0x0000 (---------------) + I nichinan - 0x0021ac09, // n0x0dc8 c0x0000 (---------------) + I nishimera - 0x002ad8c7, // n0x0dc9 c0x0000 (---------------) + I nobeoka - 0x00261385, // n0x0dca c0x0000 (---------------) + I saito - 0x003643c6, // n0x0dcb c0x0000 (---------------) + I shiiba - 0x002c8808, // n0x0dcc c0x0000 (---------------) + I shintomi - 0x00254cc8, // n0x0dcd c0x0000 (---------------) + I takaharu - 0x0020f848, // n0x0dce c0x0000 (---------------) + I takanabe - 0x00240a08, // n0x0dcf c0x0000 (---------------) + I takazaki - 0x00217185, // n0x0dd0 c0x0000 (---------------) + I tsuno - 0x00202a84, // n0x0dd1 c0x0000 (---------------) + I achi - 0x0036f9c8, // n0x0dd2 c0x0000 (---------------) + I agematsu - 0x00274b04, // n0x0dd3 c0x0000 (---------------) + I anan - 0x00213084, // n0x0dd4 c0x0000 (---------------) + I aoki - 0x002b1e85, // n0x0dd5 c0x0000 (---------------) + I asahi - 0x00289087, // n0x0dd6 c0x0000 (---------------) + I azumino - 0x00211dc9, // n0x0dd7 c0x0000 (---------------) + I chikuhoku - 0x0026f507, // n0x0dd8 c0x0000 (---------------) + I chikuma - 0x002052c5, // n0x0dd9 c0x0000 (---------------) + I chino - 0x00270d06, // n0x0dda c0x0000 (---------------) + I fujimi - 0x00327586, // n0x0ddb c0x0000 (---------------) + I hakuba - 0x00203084, // n0x0ddc c0x0000 (---------------) + I hara - 0x00296b06, // n0x0ddd c0x0000 (---------------) + I hiraya - 0x00205904, // n0x0dde c0x0000 (---------------) + I iida - 0x00288306, // n0x0ddf c0x0000 (---------------) + I iijima - 0x00369446, // n0x0de0 c0x0000 (---------------) + I iiyama - 0x00217d46, // n0x0de1 c0x0000 (---------------) + I iizuna - 0x002056c5, // n0x0de2 c0x0000 (---------------) + I ikeda - 0x00287007, // n0x0de3 c0x0000 (---------------) + I ikusaka - 0x00208983, // n0x0de4 c0x0000 (---------------) + I ina - 0x002da849, // n0x0de5 c0x0000 (---------------) + I karuizawa - 0x002d6508, // n0x0de6 c0x0000 (---------------) + I kawakami - 0x00278f44, // n0x0de7 c0x0000 (---------------) + I kiso - 0x00278f4d, // n0x0de8 c0x0000 (---------------) + I kisofukushima - 0x0028be88, // n0x0de9 c0x0000 (---------------) + I kitaaiki - 0x0029be08, // n0x0dea c0x0000 (---------------) + I komagane - 0x002ad3c6, // n0x0deb c0x0000 (---------------) + I komoro - 0x00358c09, // n0x0dec c0x0000 (---------------) + I matsukawa - 0x002b3689, // n0x0ded c0x0000 (---------------) + I matsumoto - 0x002fc1c5, // n0x0dee c0x0000 (---------------) + I miasa - 0x00291f4a, // n0x0def c0x0000 (---------------) + I minamiaiki - 0x00267dca, // n0x0df0 c0x0000 (---------------) + I minamimaki - 0x0027534c, // n0x0df1 c0x0000 (---------------) + I minamiminowa - 0x002754c6, // n0x0df2 c0x0000 (---------------) + I minowa - 0x00271cc6, // n0x0df3 c0x0000 (---------------) + I miyada - 0x002b5086, // n0x0df4 c0x0000 (---------------) + I miyota - 0x00265609, // n0x0df5 c0x0000 (---------------) + I mochizuki - 0x0036fe86, // n0x0df6 c0x0000 (---------------) + I nagano - 0x002376c6, // n0x0df7 c0x0000 (---------------) + I nagawa - 0x00383bc6, // n0x0df8 c0x0000 (---------------) + I nagiso - 0x00276cc8, // n0x0df9 c0x0000 (---------------) + I nakagawa - 0x0026ac06, // n0x0dfa c0x0000 (---------------) + I nakano - 0x0029794b, // n0x0dfb c0x0000 (---------------) + I nozawaonsen - 0x00289205, // n0x0dfc c0x0000 (---------------) + I obuse - 0x00228905, // n0x0dfd c0x0000 (---------------) + I ogawa - 0x00271f45, // n0x0dfe c0x0000 (---------------) + I okaya - 0x0025da06, // n0x0dff c0x0000 (---------------) + I omachi - 0x00210503, // n0x0e00 c0x0000 (---------------) + I omi - 0x0023f3c6, // n0x0e01 c0x0000 (---------------) + I ookuwa - 0x00294c07, // n0x0e02 c0x0000 (---------------) + I ooshika - 0x002ac405, // n0x0e03 c0x0000 (---------------) + I otaki - 0x002446c5, // n0x0e04 c0x0000 (---------------) + I otari - 0x002e18c5, // n0x0e05 c0x0000 (---------------) + I sakae - 0x003023c6, // n0x0e06 c0x0000 (---------------) + I sakaki - 0x0020d444, // n0x0e07 c0x0000 (---------------) + I saku - 0x0020d446, // n0x0e08 c0x0000 (---------------) + I sakuho - 0x002adc49, // n0x0e09 c0x0000 (---------------) + I shimosuwa - 0x0025d88c, // n0x0e0a c0x0000 (---------------) + I shinanomachi - 0x002caf08, // n0x0e0b c0x0000 (---------------) + I shiojiri - 0x002add84, // n0x0e0c c0x0000 (---------------) + I suwa - 0x002defc6, // n0x0e0d c0x0000 (---------------) + I suzaka - 0x00292446, // n0x0e0e c0x0000 (---------------) + I takagi - 0x0020ee08, // n0x0e0f c0x0000 (---------------) + I takamori - 0x002d6ec8, // n0x0e10 c0x0000 (---------------) + I takayama - 0x0025d789, // n0x0e11 c0x0000 (---------------) + I tateshina - 0x00217107, // n0x0e12 c0x0000 (---------------) + I tatsuno - 0x00273909, // n0x0e13 c0x0000 (---------------) + I togakushi - 0x00269186, // n0x0e14 c0x0000 (---------------) + I togura - 0x0022ebc4, // n0x0e15 c0x0000 (---------------) + I tomi - 0x00211344, // n0x0e16 c0x0000 (---------------) + I ueda - 0x00278cc4, // n0x0e17 c0x0000 (---------------) + I wada - 0x002797c8, // n0x0e18 c0x0000 (---------------) + I yamagata - 0x00211c0a, // n0x0e19 c0x0000 (---------------) + I yamanouchi - 0x00327d06, // n0x0e1a c0x0000 (---------------) + I yasaka - 0x00330c47, // n0x0e1b c0x0000 (---------------) + I yasuoka - 0x00231987, // n0x0e1c c0x0000 (---------------) + I chijiwa - 0x0024d5c5, // n0x0e1d c0x0000 (---------------) + I futsu - 0x002bc584, // n0x0e1e c0x0000 (---------------) + I goto - 0x00284ec6, // n0x0e1f c0x0000 (---------------) + I hasami - 0x002fdc86, // n0x0e20 c0x0000 (---------------) + I hirado - 0x00223ac3, // n0x0e21 c0x0000 (---------------) + I iki - 0x002d6347, // n0x0e22 c0x0000 (---------------) + I isahaya - 0x00202448, // n0x0e23 c0x0000 (---------------) + I kawatana - 0x002fc30a, // n0x0e24 c0x0000 (---------------) + I kuchinotsu - 0x002035c8, // n0x0e25 c0x0000 (---------------) + I matsuura - 0x00276088, // n0x0e26 c0x0000 (---------------) + I nagasaki - 0x00285e85, // n0x0e27 c0x0000 (---------------) + I obama - 0x002573c5, // n0x0e28 c0x0000 (---------------) + I omura - 0x00295d85, // n0x0e29 c0x0000 (---------------) + I oseto - 0x0024d986, // n0x0e2a c0x0000 (---------------) + I saikai - 0x00251a86, // n0x0e2b c0x0000 (---------------) + I sasebo - 0x003803c5, // n0x0e2c c0x0000 (---------------) + I seihi - 0x00378289, // n0x0e2d c0x0000 (---------------) + I shimabara - 0x002bc38c, // n0x0e2e c0x0000 (---------------) + I shinkamigoto - 0x002342c7, // n0x0e2f c0x0000 (---------------) + I togitsu - 0x0028ca08, // n0x0e30 c0x0000 (---------------) + I tsushima - 0x0026ce85, // n0x0e31 c0x0000 (---------------) + I unzen - 0x007242c4, // n0x0e32 c0x0001 (---------------) ! I city - 0x0032ca84, // n0x0e33 c0x0000 (---------------) + I ando - 0x00271a04, // n0x0e34 c0x0000 (---------------) + I gose - 0x00379e46, // n0x0e35 c0x0000 (---------------) + I heguri - 0x00293fce, // n0x0e36 c0x0000 (---------------) + I higashiyoshino - 0x0020aac7, // n0x0e37 c0x0000 (---------------) + I ikaruga - 0x00305705, // n0x0e38 c0x0000 (---------------) + I ikoma - 0x002a260c, // n0x0e39 c0x0000 (---------------) + I kamikitayama - 0x002c92c7, // n0x0e3a c0x0000 (---------------) + I kanmaki - 0x00369e87, // n0x0e3b c0x0000 (---------------) + I kashiba - 0x0036b589, // n0x0e3c c0x0000 (---------------) + I kashihara - 0x00219949, // n0x0e3d c0x0000 (---------------) + I katsuragi - 0x0027de85, // n0x0e3e c0x0000 (---------------) + I kawai - 0x002d6508, // n0x0e3f c0x0000 (---------------) + I kawakami - 0x00230389, // n0x0e40 c0x0000 (---------------) + I kawanishi - 0x002dba45, // n0x0e41 c0x0000 (---------------) + I koryo - 0x002ac348, // n0x0e42 c0x0000 (---------------) + I kurotaki - 0x002ba9c6, // n0x0e43 c0x0000 (---------------) + I mitsue - 0x0030b246, // n0x0e44 c0x0000 (---------------) + I miyake - 0x002e7784, // n0x0e45 c0x0000 (---------------) + I nara - 0x00272148, // n0x0e46 c0x0000 (---------------) + I nosegawa - 0x00254b43, // n0x0e47 c0x0000 (---------------) + I oji - 0x0037e404, // n0x0e48 c0x0000 (---------------) + I ouda - 0x002e6505, // n0x0e49 c0x0000 (---------------) + I oyodo - 0x0020e647, // n0x0e4a c0x0000 (---------------) + I sakurai - 0x002e6945, // n0x0e4b c0x0000 (---------------) + I sango - 0x00205149, // n0x0e4c c0x0000 (---------------) + I shimoichi - 0x002deb0d, // n0x0e4d c0x0000 (---------------) + I shimokitayama - 0x002b9446, // n0x0e4e c0x0000 (---------------) + I shinjo - 0x00358944, // n0x0e4f c0x0000 (---------------) + I soni - 0x00218fc8, // n0x0e50 c0x0000 (---------------) + I takatori - 0x00268a8a, // n0x0e51 c0x0000 (---------------) + I tawaramoto - 0x00208587, // n0x0e52 c0x0000 (---------------) + I tenkawa - 0x0026b6c5, // n0x0e53 c0x0000 (---------------) + I tenri - 0x00212443, // n0x0e54 c0x0000 (---------------) + I uda - 0x0029360e, // n0x0e55 c0x0000 (---------------) + I yamatokoriyama - 0x002a280c, // n0x0e56 c0x0000 (---------------) + I yamatotakada - 0x0029d147, // n0x0e57 c0x0000 (---------------) + I yamazoe - 0x00294187, // n0x0e58 c0x0000 (---------------) + I yoshino - 0x00203903, // n0x0e59 c0x0000 (---------------) + I aga - 0x0036fec5, // n0x0e5a c0x0000 (---------------) + I agano - 0x00271a05, // n0x0e5b c0x0000 (---------------) + I gosen - 0x0028d648, // n0x0e5c c0x0000 (---------------) + I itoigawa - 0x0028af09, // n0x0e5d c0x0000 (---------------) + I izumozaki - 0x00202e06, // n0x0e5e c0x0000 (---------------) + I joetsu - 0x0020ee84, // n0x0e5f c0x0000 (---------------) + I kamo - 0x002d6c46, // n0x0e60 c0x0000 (---------------) + I kariwa - 0x00382a4b, // n0x0e61 c0x0000 (---------------) + I kashiwazaki - 0x002b8acc, // n0x0e62 c0x0000 (---------------) + I minamiuonuma - 0x002778c7, // n0x0e63 c0x0000 (---------------) + I mitsuke - 0x002be985, // n0x0e64 c0x0000 (---------------) + I muika - 0x003562c8, // n0x0e65 c0x0000 (---------------) + I murakami - 0x00345b85, // n0x0e66 c0x0000 (---------------) + I myoko - 0x002fadc7, // n0x0e67 c0x0000 (---------------) + I nagaoka - 0x003589c7, // n0x0e68 c0x0000 (---------------) + I niigata - 0x00254b45, // n0x0e69 c0x0000 (---------------) + I ojiya - 0x00210503, // n0x0e6a c0x0000 (---------------) + I omi - 0x0035a204, // n0x0e6b c0x0000 (---------------) + I sado - 0x00324485, // n0x0e6c c0x0000 (---------------) + I sanjo - 0x002ed245, // n0x0e6d c0x0000 (---------------) + I seiro - 0x002ed246, // n0x0e6e c0x0000 (---------------) + I seirou - 0x002a2e48, // n0x0e6f c0x0000 (---------------) + I sekikawa - 0x00369f07, // n0x0e70 c0x0000 (---------------) + I shibata - 0x00331146, // n0x0e71 c0x0000 (---------------) + I tagami - 0x002c3c06, // n0x0e72 c0x0000 (---------------) + I tainai - 0x00345086, // n0x0e73 c0x0000 (---------------) + I tochio - 0x00308389, // n0x0e74 c0x0000 (---------------) + I tokamachi - 0x00237a47, // n0x0e75 c0x0000 (---------------) + I tsubame - 0x00202c86, // n0x0e76 c0x0000 (---------------) + I tsunan - 0x002b8c46, // n0x0e77 c0x0000 (---------------) + I uonuma - 0x0026f046, // n0x0e78 c0x0000 (---------------) + I yahiko - 0x002a2405, // n0x0e79 c0x0000 (---------------) + I yoita - 0x0021ea86, // n0x0e7a c0x0000 (---------------) + I yuzawa - 0x00375785, // n0x0e7b c0x0000 (---------------) + I beppu - 0x0030ae88, // n0x0e7c c0x0000 (---------------) + I bungoono - 0x0033e7cb, // n0x0e7d c0x0000 (---------------) + I bungotakada - 0x00284cc6, // n0x0e7e c0x0000 (---------------) + I hasama - 0x002319c4, // n0x0e7f c0x0000 (---------------) + I hiji - 0x002d4509, // n0x0e80 c0x0000 (---------------) + I himeshima - 0x00276b04, // n0x0e81 c0x0000 (---------------) + I hita - 0x002ba948, // n0x0e82 c0x0000 (---------------) + I kamitsue - 0x00295307, // n0x0e83 c0x0000 (---------------) + I kokonoe - 0x0027c104, // n0x0e84 c0x0000 (---------------) + I kuju - 0x002a6fc8, // n0x0e85 c0x0000 (---------------) + I kunisaki - 0x002aec04, // n0x0e86 c0x0000 (---------------) + I kusu - 0x002a2444, // n0x0e87 c0x0000 (---------------) + I oita - 0x002d0805, // n0x0e88 c0x0000 (---------------) + I saiki - 0x002e82c6, // n0x0e89 c0x0000 (---------------) + I taketa - 0x0029b807, // n0x0e8a c0x0000 (---------------) + I tsukumi - 0x00232883, // n0x0e8b c0x0000 (---------------) + I usa - 0x00292e85, // n0x0e8c c0x0000 (---------------) + I usuki - 0x00373e84, // n0x0e8d c0x0000 (---------------) + I yufu - 0x0032d546, // n0x0e8e c0x0000 (---------------) + I akaiwa - 0x002fc248, // n0x0e8f c0x0000 (---------------) + I asakuchi - 0x00202185, // n0x0e90 c0x0000 (---------------) + I bizen - 0x00288749, // n0x0e91 c0x0000 (---------------) + I hayashima - 0x002d0c45, // n0x0e92 c0x0000 (---------------) + I ibara - 0x0027f348, // n0x0e93 c0x0000 (---------------) + I kagamino - 0x00363287, // n0x0e94 c0x0000 (---------------) + I kasaoka - 0x00347188, // n0x0e95 c0x0000 (---------------) + I kibichuo - 0x002a6687, // n0x0e96 c0x0000 (---------------) + I kumenan - 0x00307049, // n0x0e97 c0x0000 (---------------) + I kurashiki - 0x0026b446, // n0x0e98 c0x0000 (---------------) + I maniwa - 0x002d73c6, // n0x0e99 c0x0000 (---------------) + I misaki - 0x002e0584, // n0x0e9a c0x0000 (---------------) + I nagi - 0x0028f505, // n0x0e9b c0x0000 (---------------) + I niimi - 0x002eb04c, // n0x0e9c c0x0000 (---------------) + I nishiawakura - 0x00271f47, // n0x0e9d c0x0000 (---------------) + I okayama - 0x00272447, // n0x0e9e c0x0000 (---------------) + I satosho - 0x00231848, // n0x0e9f c0x0000 (---------------) + I setouchi - 0x002b9446, // n0x0ea0 c0x0000 (---------------) + I shinjo - 0x00294b44, // n0x0ea1 c0x0000 (---------------) + I shoo - 0x003335c4, // n0x0ea2 c0x0000 (---------------) + I soja - 0x00271149, // n0x0ea3 c0x0000 (---------------) + I takahashi - 0x002b5186, // n0x0ea4 c0x0000 (---------------) + I tamano - 0x0021d547, // n0x0ea5 c0x0000 (---------------) + I tsuyama - 0x0036da04, // n0x0ea6 c0x0000 (---------------) + I wake - 0x00296c06, // n0x0ea7 c0x0000 (---------------) + I yakage - 0x00382645, // n0x0ea8 c0x0000 (---------------) + I aguni - 0x00292547, // n0x0ea9 c0x0000 (---------------) + I ginowan - 0x002978c6, // n0x0eaa c0x0000 (---------------) + I ginoza - 0x00256ec9, // n0x0eab c0x0000 (---------------) + I gushikami - 0x002b9e87, // n0x0eac c0x0000 (---------------) + I haebaru - 0x00260cc7, // n0x0ead c0x0000 (---------------) + I higashi - 0x00296646, // n0x0eae c0x0000 (---------------) + I hirara - 0x002fb6c5, // n0x0eaf c0x0000 (---------------) + I iheya - 0x00278a08, // n0x0eb0 c0x0000 (---------------) + I ishigaki - 0x0021a288, // n0x0eb1 c0x0000 (---------------) + I ishikawa - 0x002fbb86, // n0x0eb2 c0x0000 (---------------) + I itoman - 0x002021c5, // n0x0eb3 c0x0000 (---------------) + I izena - 0x002b0946, // n0x0eb4 c0x0000 (---------------) + I kadena - 0x00211b03, // n0x0eb5 c0x0000 (---------------) + I kin - 0x0028d4c9, // n0x0eb6 c0x0000 (---------------) + I kitadaito - 0x00292f4e, // n0x0eb7 c0x0000 (---------------) + I kitanakagusuku - 0x002a6388, // n0x0eb8 c0x0000 (---------------) + I kumejima - 0x002df688, // n0x0eb9 c0x0000 (---------------) + I kunigami - 0x002fb98b, // n0x0eba c0x0000 (---------------) + I minamidaito - 0x00288986, // n0x0ebb c0x0000 (---------------) + I motobu - 0x0024fa44, // n0x0ebc c0x0000 (---------------) + I nago - 0x0027a944, // n0x0ebd c0x0000 (---------------) + I naha - 0x0029304a, // n0x0ebe c0x0000 (---------------) + I nakagusuku - 0x00217887, // n0x0ebf c0x0000 (---------------) + I nakijin - 0x00202d45, // n0x0ec0 c0x0000 (---------------) + I nanjo - 0x00217509, // n0x0ec1 c0x0000 (---------------) + I nishihara - 0x002aab05, // n0x0ec2 c0x0000 (---------------) + I ogimi - 0x002130c7, // n0x0ec3 c0x0000 (---------------) + I okinawa - 0x0020d704, // n0x0ec4 c0x0000 (---------------) + I onna - 0x002e8d47, // n0x0ec5 c0x0000 (---------------) + I shimoji - 0x00246648, // n0x0ec6 c0x0000 (---------------) + I taketomi - 0x002e98c6, // n0x0ec7 c0x0000 (---------------) + I tarama - 0x002f2fc9, // n0x0ec8 c0x0000 (---------------) + I tokashiki - 0x002a8f0a, // n0x0ec9 c0x0000 (---------------) + I tomigusuku - 0x00217806, // n0x0eca c0x0000 (---------------) + I tonaki - 0x0028b9c6, // n0x0ecb c0x0000 (---------------) + I urasoe - 0x0029f645, // n0x0ecc c0x0000 (---------------) + I uruma - 0x0034b545, // n0x0ecd c0x0000 (---------------) + I yaese - 0x00344387, // n0x0ece c0x0000 (---------------) + I yomitan - 0x00365348, // n0x0ecf c0x0000 (---------------) + I yonabaru - 0x00382588, // n0x0ed0 c0x0000 (---------------) + I yonaguni - 0x002493c6, // n0x0ed1 c0x0000 (---------------) + I zamami - 0x002ad045, // n0x0ed2 c0x0000 (---------------) + I abeno - 0x0024fb4e, // n0x0ed3 c0x0000 (---------------) + I chihayaakasaka - 0x00347284, // n0x0ed4 c0x0000 (---------------) + I chuo - 0x0028d5c5, // n0x0ed5 c0x0000 (---------------) + I daito - 0x00270689, // n0x0ed6 c0x0000 (---------------) + I fujiidera - 0x0027dc88, // n0x0ed7 c0x0000 (---------------) + I habikino - 0x00282846, // n0x0ed8 c0x0000 (---------------) + I hannan - 0x0029040c, // n0x0ed9 c0x0000 (---------------) + I higashiosaka - 0x00291a50, // n0x0eda c0x0000 (---------------) + I higashisumiyoshi - 0x00293c0f, // n0x0edb c0x0000 (---------------) + I higashiyodogawa - 0x002954c8, // n0x0edc c0x0000 (---------------) + I hirakata - 0x00316ec7, // n0x0edd c0x0000 (---------------) + I ibaraki - 0x002056c5, // n0x0ede c0x0000 (---------------) + I ikeda - 0x002395c5, // n0x0edf c0x0000 (---------------) + I izumi - 0x0029b689, // n0x0ee0 c0x0000 (---------------) + I izumiotsu - 0x002877c9, // n0x0ee1 c0x0000 (---------------) + I izumisano - 0x0023d286, // n0x0ee2 c0x0000 (---------------) + I kadoma - 0x002f1dc7, // n0x0ee3 c0x0000 (---------------) + I kaizuka - 0x0037edc5, // n0x0ee4 c0x0000 (---------------) + I kanan - 0x00375e89, // n0x0ee5 c0x0000 (---------------) + I kashiwara - 0x00317c46, // n0x0ee6 c0x0000 (---------------) + I katano - 0x0036fccd, // n0x0ee7 c0x0000 (---------------) + I kawachinagano - 0x00278b89, // n0x0ee8 c0x0000 (---------------) + I kishiwada - 0x0020ec84, // n0x0ee9 c0x0000 (---------------) + I kita - 0x002a6108, // n0x0eea c0x0000 (---------------) + I kumatori - 0x0036fa89, // n0x0eeb c0x0000 (---------------) + I matsubara - 0x00327ec6, // n0x0eec c0x0000 (---------------) + I minato - 0x00270e05, // n0x0eed c0x0000 (---------------) + I minoh - 0x002d73c6, // n0x0eee c0x0000 (---------------) + I misaki - 0x0026e149, // n0x0eef c0x0000 (---------------) + I moriguchi - 0x00236308, // n0x0ef0 c0x0000 (---------------) + I neyagawa - 0x00215405, // n0x0ef1 c0x0000 (---------------) + I nishi - 0x00205384, // n0x0ef2 c0x0000 (---------------) + I nose - 0x002905cb, // n0x0ef3 c0x0000 (---------------) + I osakasayama - 0x00327d85, // n0x0ef4 c0x0000 (---------------) + I sakai - 0x0024ef06, // n0x0ef5 c0x0000 (---------------) + I sayama - 0x0027b086, // n0x0ef6 c0x0000 (---------------) + I sennan - 0x00290cc6, // n0x0ef7 c0x0000 (---------------) + I settsu - 0x0032b20b, // n0x0ef8 c0x0000 (---------------) + I shijonawate - 0x00288849, // n0x0ef9 c0x0000 (---------------) + I shimamoto - 0x0030a705, // n0x0efa c0x0000 (---------------) + I suita - 0x00346f47, // n0x0efb c0x0000 (---------------) + I tadaoka - 0x00384e06, // n0x0efc c0x0000 (---------------) + I taishi - 0x0027a486, // n0x0efd c0x0000 (---------------) + I tajiri - 0x00271788, // n0x0efe c0x0000 (---------------) + I takaishi - 0x003427c9, // n0x0eff c0x0000 (---------------) + I takatsuki - 0x0025830c, // n0x0f00 c0x0000 (---------------) + I tondabayashi - 0x00337208, // n0x0f01 c0x0000 (---------------) + I toyonaka - 0x0037a286, // n0x0f02 c0x0000 (---------------) + I toyono - 0x0031f2c3, // n0x0f03 c0x0000 (---------------) + I yao - 0x00288ec6, // n0x0f04 c0x0000 (---------------) + I ariake - 0x00306f05, // n0x0f05 c0x0000 (---------------) + I arita - 0x00276848, // n0x0f06 c0x0000 (---------------) + I fukudomi - 0x00229086, // n0x0f07 c0x0000 (---------------) + I genkai - 0x0036a608, // n0x0f08 c0x0000 (---------------) + I hamatama - 0x00216e05, // n0x0f09 c0x0000 (---------------) + I hizen - 0x00317285, // n0x0f0a c0x0000 (---------------) + I imari - 0x002ada08, // n0x0f0b c0x0000 (---------------) + I kamimine - 0x002df447, // n0x0f0c c0x0000 (---------------) + I kanzaki - 0x00330d87, // n0x0f0d c0x0000 (---------------) + I karatsu - 0x002b3547, // n0x0f0e c0x0000 (---------------) + I kashima - 0x00322cc8, // n0x0f0f c0x0000 (---------------) + I kitagata - 0x00271488, // n0x0f10 c0x0000 (---------------) + I kitahata - 0x0024c346, // n0x0f11 c0x0000 (---------------) + I kiyama - 0x00314d07, // n0x0f12 c0x0000 (---------------) + I kouhoku - 0x00269907, // n0x0f13 c0x0000 (---------------) + I kyuragi - 0x0032af0a, // n0x0f14 c0x0000 (---------------) + I nishiarita - 0x00234303, // n0x0f15 c0x0000 (---------------) + I ogi - 0x0025da06, // n0x0f16 c0x0000 (---------------) + I omachi - 0x00211d45, // n0x0f17 c0x0000 (---------------) + I ouchi - 0x002af744, // n0x0f18 c0x0000 (---------------) + I saga - 0x00272f89, // n0x0f19 c0x0000 (---------------) + I shiroishi - 0x00306fc4, // n0x0f1a c0x0000 (---------------) + I taku - 0x002e08c4, // n0x0f1b c0x0000 (---------------) + I tara - 0x002922c4, // n0x0f1c c0x0000 (---------------) + I tosu - 0x0029418b, // n0x0f1d c0x0000 (---------------) + I yoshinogari - 0x0036fc07, // n0x0f1e c0x0000 (---------------) + I arakawa - 0x0024fd85, // n0x0f1f c0x0000 (---------------) + I asaka - 0x0028a5c8, // n0x0f20 c0x0000 (---------------) + I chichibu - 0x00270d06, // n0x0f21 c0x0000 (---------------) + I fujimi - 0x00270d08, // n0x0f22 c0x0000 (---------------) + I fujimino - 0x00275906, // n0x0f23 c0x0000 (---------------) + I fukaya - 0x00282f85, // n0x0f24 c0x0000 (---------------) + I hanno - 0x00283845, // n0x0f25 c0x0000 (---------------) + I hanyu - 0x00286386, // n0x0f26 c0x0000 (---------------) + I hasuda - 0x00286a08, // n0x0f27 c0x0000 (---------------) + I hatogaya - 0x00287348, // n0x0f28 c0x0000 (---------------) + I hatoyama - 0x00273146, // n0x0f29 c0x0000 (---------------) + I hidaka - 0x0028a40f, // n0x0f2a c0x0000 (---------------) + I higashichichibu - 0x0028cf90, // n0x0f2b c0x0000 (---------------) + I higashimatsuyama - 0x00206445, // n0x0f2c c0x0000 (---------------) + I honjo - 0x00208983, // n0x0f2d c0x0000 (---------------) + I ina - 0x00283b45, // n0x0f2e c0x0000 (---------------) + I iruma - 0x00352cc8, // n0x0f2f c0x0000 (---------------) + I iwatsuki - 0x002876c9, // n0x0f30 c0x0000 (---------------) + I kamiizumi - 0x00334ec8, // n0x0f31 c0x0000 (---------------) + I kamikawa - 0x002f43c8, // n0x0f32 c0x0000 (---------------) + I kamisato - 0x00200488, // n0x0f33 c0x0000 (---------------) + I kasukabe - 0x0024fe47, // n0x0f34 c0x0000 (---------------) + I kawagoe - 0x002709c9, // n0x0f35 c0x0000 (---------------) + I kawaguchi - 0x0036a808, // n0x0f36 c0x0000 (---------------) + I kawajima - 0x002f7044, // n0x0f37 c0x0000 (---------------) + I kazo - 0x00292148, // n0x0f38 c0x0000 (---------------) + I kitamoto - 0x0025c4c9, // n0x0f39 c0x0000 (---------------) + I koshigaya - 0x00337647, // n0x0f3a c0x0000 (---------------) + I kounosu - 0x002a9104, // n0x0f3b c0x0000 (---------------) + I kuki - 0x0026f5c8, // n0x0f3c c0x0000 (---------------) + I kumagaya - 0x00286dca, // n0x0f3d c0x0000 (---------------) + I matsubushi - 0x002c7006, // n0x0f3e c0x0000 (---------------) + I minano - 0x002467c6, // n0x0f3f c0x0000 (---------------) + I misato - 0x0021c149, // n0x0f40 c0x0000 (---------------) + I miyashiro - 0x00291c87, // n0x0f41 c0x0000 (---------------) + I miyoshi - 0x002bad48, // n0x0f42 c0x0000 (---------------) + I moroyama - 0x002419c8, // n0x0f43 c0x0000 (---------------) + I nagatoro - 0x002c37c8, // n0x0f44 c0x0000 (---------------) + I namegawa - 0x0020c9c5, // n0x0f45 c0x0000 (---------------) + I niiza - 0x002fcb45, // n0x0f46 c0x0000 (---------------) + I ogano - 0x00228905, // n0x0f47 c0x0000 (---------------) + I ogawa - 0x002719c5, // n0x0f48 c0x0000 (---------------) + I ogose - 0x002b0047, // n0x0f49 c0x0000 (---------------) + I okegawa - 0x00210505, // n0x0f4a c0x0000 (---------------) + I omiya - 0x002ac405, // n0x0f4b c0x0000 (---------------) + I otaki - 0x0037e886, // n0x0f4c c0x0000 (---------------) + I ranzan - 0x00334e07, // n0x0f4d c0x0000 (---------------) + I ryokami - 0x0030c587, // n0x0f4e c0x0000 (---------------) + I saitama - 0x002870c6, // n0x0f4f c0x0000 (---------------) + I sakado - 0x002c09c5, // n0x0f50 c0x0000 (---------------) + I satte - 0x0024ef06, // n0x0f51 c0x0000 (---------------) + I sayama - 0x00299b05, // n0x0f52 c0x0000 (---------------) + I shiki - 0x002d9588, // n0x0f53 c0x0000 (---------------) + I shiraoka - 0x002e53c4, // n0x0f54 c0x0000 (---------------) + I soka - 0x002b3146, // n0x0f55 c0x0000 (---------------) + I sugito - 0x003501c4, // n0x0f56 c0x0000 (---------------) + I toda - 0x00221008, // n0x0f57 c0x0000 (---------------) + I tokigawa - 0x0035dc4a, // n0x0f58 c0x0000 (---------------) + I tokorozawa - 0x0028194c, // n0x0f59 c0x0000 (---------------) + I tsurugashima - 0x00201045, // n0x0f5a c0x0000 (---------------) + I urawa - 0x003055c6, // n0x0f5b c0x0000 (---------------) + I warabi - 0x002584c6, // n0x0f5c c0x0000 (---------------) + I yashio - 0x002db446, // n0x0f5d c0x0000 (---------------) + I yokoze - 0x00352ec4, // n0x0f5e c0x0000 (---------------) + I yono - 0x00382905, // n0x0f5f c0x0000 (---------------) + I yorii - 0x00275747, // n0x0f60 c0x0000 (---------------) + I yoshida - 0x00291d09, // n0x0f61 c0x0000 (---------------) + I yoshikawa - 0x00298487, // n0x0f62 c0x0000 (---------------) + I yoshimi - 0x007242c4, // n0x0f63 c0x0001 (---------------) ! I city - 0x007242c4, // n0x0f64 c0x0001 (---------------) ! I city - 0x002ab985, // n0x0f65 c0x0000 (---------------) + I aisho - 0x002b5784, // n0x0f66 c0x0000 (---------------) + I gamo - 0x0028fb8a, // n0x0f67 c0x0000 (---------------) + I higashiomi - 0x00270b86, // n0x0f68 c0x0000 (---------------) + I hikone - 0x002f4344, // n0x0f69 c0x0000 (---------------) + I koka - 0x002ca805, // n0x0f6a c0x0000 (---------------) + I konan - 0x0033ab85, // n0x0f6b c0x0000 (---------------) + I kosei - 0x002fdb84, // n0x0f6c c0x0000 (---------------) + I koto - 0x0027acc7, // n0x0f6d c0x0000 (---------------) + I kusatsu - 0x002d0bc7, // n0x0f6e c0x0000 (---------------) + I maibara - 0x002ba388, // n0x0f6f c0x0000 (---------------) + I moriyama - 0x002b0a48, // n0x0f70 c0x0000 (---------------) + I nagahama - 0x00215409, // n0x0f71 c0x0000 (---------------) + I nishiazai - 0x00317d48, // n0x0f72 c0x0000 (---------------) + I notogawa - 0x0028fd4b, // n0x0f73 c0x0000 (---------------) + I omihachiman - 0x0024b684, // n0x0f74 c0x0000 (---------------) + I otsu - 0x0026b2c5, // n0x0f75 c0x0000 (---------------) + I ritto - 0x00265845, // n0x0f76 c0x0000 (---------------) + I ryuoh - 0x002b34c9, // n0x0f77 c0x0000 (---------------) + I takashima - 0x003427c9, // n0x0f78 c0x0000 (---------------) + I takatsuki - 0x002d4408, // n0x0f79 c0x0000 (---------------) + I torahime - 0x00243d88, // n0x0f7a c0x0000 (---------------) + I toyosato - 0x00212384, // n0x0f7b c0x0000 (---------------) + I yasu - 0x00292485, // n0x0f7c c0x0000 (---------------) + I akagi - 0x00203583, // n0x0f7d c0x0000 (---------------) + I ama - 0x00322a05, // n0x0f7e c0x0000 (---------------) + I gotsu - 0x00298646, // n0x0f7f c0x0000 (---------------) + I hamada - 0x0028ad4c, // n0x0f80 c0x0000 (---------------) + I higashiizumo - 0x0021a306, // n0x0f81 c0x0000 (---------------) + I hikawa - 0x002d2006, // n0x0f82 c0x0000 (---------------) + I hikimi - 0x00282ac5, // n0x0f83 c0x0000 (---------------) + I izumo - 0x00302448, // n0x0f84 c0x0000 (---------------) + I kakinoki - 0x002a6506, // n0x0f85 c0x0000 (---------------) + I masuda - 0x002cb8c6, // n0x0f86 c0x0000 (---------------) + I matsue - 0x002467c6, // n0x0f87 c0x0000 (---------------) + I misato - 0x0021ef8c, // n0x0f88 c0x0000 (---------------) + I nishinoshima - 0x00203dc4, // n0x0f89 c0x0000 (---------------) + I ohda - 0x003451ca, // n0x0f8a c0x0000 (---------------) + I okinoshima - 0x00282a08, // n0x0f8b c0x0000 (---------------) + I okuizumo - 0x0028ab87, // n0x0f8c c0x0000 (---------------) + I shimane - 0x00373d86, // n0x0f8d c0x0000 (---------------) + I tamayu - 0x00380087, // n0x0f8e c0x0000 (---------------) + I tsuwano - 0x002ce685, // n0x0f8f c0x0000 (---------------) + I unnan - 0x002fb786, // n0x0f90 c0x0000 (---------------) + I yakumo - 0x0032c646, // n0x0f91 c0x0000 (---------------) + I yasugi - 0x00342fc7, // n0x0f92 c0x0000 (---------------) + I yatsuka - 0x002a0c44, // n0x0f93 c0x0000 (---------------) + I arai - 0x002fc105, // n0x0f94 c0x0000 (---------------) + I atami - 0x00270684, // n0x0f95 c0x0000 (---------------) + I fuji - 0x002cb247, // n0x0f96 c0x0000 (---------------) + I fujieda - 0x002708c8, // n0x0f97 c0x0000 (---------------) + I fujikawa - 0x00271b4a, // n0x0f98 c0x0000 (---------------) + I fujinomiya - 0x00278887, // n0x0f99 c0x0000 (---------------) + I fukuroi - 0x002aae47, // n0x0f9a c0x0000 (---------------) + I gotemba - 0x00316e47, // n0x0f9b c0x0000 (---------------) + I haibara - 0x00345cc9, // n0x0f9c c0x0000 (---------------) + I hamamatsu - 0x0028ad4a, // n0x0f9d c0x0000 (---------------) + I higashiizu - 0x002234c3, // n0x0f9e c0x0000 (---------------) + I ito - 0x003429c5, // n0x0f9f c0x0000 (---------------) + I iwata - 0x00217d83, // n0x0fa0 c0x0000 (---------------) + I izu - 0x00315009, // n0x0fa1 c0x0000 (---------------) + I izunokuni - 0x00205cc8, // n0x0fa2 c0x0000 (---------------) + I kakegawa - 0x002eaa87, // n0x0fa3 c0x0000 (---------------) + I kannami - 0x00334fc9, // n0x0fa4 c0x0000 (---------------) + I kawanehon - 0x0021a386, // n0x0fa5 c0x0000 (---------------) + I kawazu - 0x00276208, // n0x0fa6 c0x0000 (---------------) + I kikugawa - 0x002ddbc5, // n0x0fa7 c0x0000 (---------------) + I kosai - 0x0026af0a, // n0x0fa8 c0x0000 (---------------) + I makinohara - 0x0021f209, // n0x0fa9 c0x0000 (---------------) + I matsuzaki - 0x00257fc9, // n0x0faa c0x0000 (---------------) + I minamiizu - 0x002b20c7, // n0x0fab c0x0000 (---------------) + I mishima - 0x00356889, // n0x0fac c0x0000 (---------------) + I morimachi - 0x00217c48, // n0x0fad c0x0000 (---------------) + I nishiizu - 0x002df146, // n0x0fae c0x0000 (---------------) + I numazu - 0x00305788, // n0x0faf c0x0000 (---------------) + I omaezaki - 0x003701c7, // n0x0fb0 c0x0000 (---------------) + I shimada - 0x00223607, // n0x0fb1 c0x0000 (---------------) + I shimizu - 0x00294e87, // n0x0fb2 c0x0000 (---------------) + I shimoda - 0x002f07c8, // n0x0fb3 c0x0000 (---------------) + I shizuoka - 0x002dee46, // n0x0fb4 c0x0000 (---------------) + I susono - 0x00286b85, // n0x0fb5 c0x0000 (---------------) + I yaizu - 0x00275747, // n0x0fb6 c0x0000 (---------------) + I yoshida - 0x0028b4c8, // n0x0fb7 c0x0000 (---------------) + I ashikaga - 0x0034d404, // n0x0fb8 c0x0000 (---------------) + I bato - 0x002b5704, // n0x0fb9 c0x0000 (---------------) + I haga - 0x002d6707, // n0x0fba c0x0000 (---------------) + I ichikai - 0x00261fc7, // n0x0fbb c0x0000 (---------------) + I iwafune - 0x0023020a, // n0x0fbc c0x0000 (---------------) + I kaminokawa - 0x002df0c6, // n0x0fbd c0x0000 (---------------) + I kanuma - 0x0029cfca, // n0x0fbe c0x0000 (---------------) + I karasuyama - 0x002ab147, // n0x0fbf c0x0000 (---------------) + I kuroiso - 0x00348e07, // n0x0fc0 c0x0000 (---------------) + I mashiko - 0x0033e744, // n0x0fc1 c0x0000 (---------------) + I mibu - 0x002d5404, // n0x0fc2 c0x0000 (---------------) + I moka - 0x00224446, // n0x0fc3 c0x0000 (---------------) + I motegi - 0x0036b984, // n0x0fc4 c0x0000 (---------------) + I nasu - 0x0036b98c, // n0x0fc5 c0x0000 (---------------) + I nasushiobara - 0x0020ce85, // n0x0fc6 c0x0000 (---------------) + I nikko - 0x00218e09, // n0x0fc7 c0x0000 (---------------) + I nishikata - 0x0027f4c4, // n0x0fc8 c0x0000 (---------------) + I nogi - 0x002fdc45, // n0x0fc9 c0x0000 (---------------) + I ohira - 0x00268a08, // n0x0fca c0x0000 (---------------) + I ohtawara - 0x00287405, // n0x0fcb c0x0000 (---------------) + I oyama - 0x0020e0c6, // n0x0fcc c0x0000 (---------------) + I sakura - 0x00287904, // n0x0fcd c0x0000 (---------------) + I sano - 0x002f344a, // n0x0fce c0x0000 (---------------) + I shimotsuke - 0x002cee86, // n0x0fcf c0x0000 (---------------) + I shioya - 0x002917ca, // n0x0fd0 c0x0000 (---------------) + I takanezawa - 0x0034d487, // n0x0fd1 c0x0000 (---------------) + I tochigi - 0x00201a85, // n0x0fd2 c0x0000 (---------------) + I tsuga - 0x0021b785, // n0x0fd3 c0x0000 (---------------) + I ujiie - 0x0024d60a, // n0x0fd4 c0x0000 (---------------) + I utsunomiya - 0x00254c05, // n0x0fd5 c0x0000 (---------------) + I yaita - 0x00294806, // n0x0fd6 c0x0000 (---------------) + I aizumi - 0x00274b04, // n0x0fd7 c0x0000 (---------------) + I anan - 0x002a6946, // n0x0fd8 c0x0000 (---------------) + I ichiba - 0x00344445, // n0x0fd9 c0x0000 (---------------) + I itano - 0x00229146, // n0x0fda c0x0000 (---------------) + I kainan - 0x00307e4c, // n0x0fdb c0x0000 (---------------) + I komatsushima - 0x002c1e8a, // n0x0fdc c0x0000 (---------------) + I matsushige - 0x00267ec4, // n0x0fdd c0x0000 (---------------) + I mima - 0x00216c06, // n0x0fde c0x0000 (---------------) + I minami - 0x00291c87, // n0x0fdf c0x0000 (---------------) + I miyoshi - 0x002be3c4, // n0x0fe0 c0x0000 (---------------) + I mugi - 0x00276cc8, // n0x0fe1 c0x0000 (---------------) + I nakagawa - 0x0035db46, // n0x0fe2 c0x0000 (---------------) + I naruto - 0x0024f9c9, // n0x0fe3 c0x0000 (---------------) + I sanagochi - 0x002e45c9, // n0x0fe4 c0x0000 (---------------) + I shishikui - 0x002c69c9, // n0x0fe5 c0x0000 (---------------) + I tokushima - 0x00223a06, // n0x0fe6 c0x0000 (---------------) + I wajiki - 0x003702c6, // n0x0fe7 c0x0000 (---------------) + I adachi - 0x003058c7, // n0x0fe8 c0x0000 (---------------) + I akiruno - 0x003781c8, // n0x0fe9 c0x0000 (---------------) + I akishima - 0x003700c9, // n0x0fea c0x0000 (---------------) + I aogashima - 0x0036fc07, // n0x0feb c0x0000 (---------------) + I arakawa - 0x00288a86, // n0x0fec c0x0000 (---------------) + I bunkyo - 0x00202ac7, // n0x0fed c0x0000 (---------------) + I chiyoda - 0x002a1d45, // n0x0fee c0x0000 (---------------) + I chofu - 0x00347284, // n0x0fef c0x0000 (---------------) + I chuo - 0x00305487, // n0x0ff0 c0x0000 (---------------) + I edogawa - 0x00205ac5, // n0x0ff1 c0x0000 (---------------) + I fuchu - 0x00280485, // n0x0ff2 c0x0000 (---------------) + I fussa - 0x00381e07, // n0x0ff3 c0x0000 (---------------) + I hachijo - 0x00254a08, // n0x0ff4 c0x0000 (---------------) + I hachioji - 0x00356246, // n0x0ff5 c0x0000 (---------------) + I hamura - 0x0028c2cd, // n0x0ff6 c0x0000 (---------------) + I higashikurume - 0x0028d84f, // n0x0ff7 c0x0000 (---------------) + I higashimurayama - 0x0029344d, // n0x0ff8 c0x0000 (---------------) + I higashiyamato - 0x00205304, // n0x0ff9 c0x0000 (---------------) + I hino - 0x00238dc6, // n0x0ffa c0x0000 (---------------) + I hinode - 0x002c2248, // n0x0ffb c0x0000 (---------------) + I hinohara - 0x00383b85, // n0x0ffc c0x0000 (---------------) + I inagi - 0x0032b0c8, // n0x0ffd c0x0000 (---------------) + I itabashi - 0x0021c90a, // n0x0ffe c0x0000 (---------------) + I katsushika - 0x0020ec84, // n0x0fff c0x0000 (---------------) + I kita - 0x002e6e86, // n0x1000 c0x0000 (---------------) + I kiyose - 0x00248a87, // n0x1001 c0x0000 (---------------) + I kodaira - 0x002d4f07, // n0x1002 c0x0000 (---------------) + I koganei - 0x00363f49, // n0x1003 c0x0000 (---------------) + I kokubunji - 0x00305745, // n0x1004 c0x0000 (---------------) + I komae - 0x002fdb84, // n0x1005 c0x0000 (---------------) + I koto - 0x0035aeca, // n0x1006 c0x0000 (---------------) + I kouzushima - 0x002a8489, // n0x1007 c0x0000 (---------------) + I kunitachi - 0x00356987, // n0x1008 c0x0000 (---------------) + I machida - 0x002e4a86, // n0x1009 c0x0000 (---------------) + I meguro - 0x00327ec6, // n0x100a c0x0000 (---------------) + I minato - 0x002923c6, // n0x100b c0x0000 (---------------) + I mitaka - 0x0033b7c6, // n0x100c c0x0000 (---------------) + I mizuho - 0x002c1b4f, // n0x100d c0x0000 (---------------) + I musashimurayama - 0x002c2109, // n0x100e c0x0000 (---------------) + I musashino - 0x0026ac06, // n0x100f c0x0000 (---------------) + I nakano - 0x0033f046, // n0x1010 c0x0000 (---------------) + I nerima - 0x00367549, // n0x1011 c0x0000 (---------------) + I ogasawara - 0x00314e07, // n0x1012 c0x0000 (---------------) + I okutama - 0x0020de03, // n0x1013 c0x0000 (---------------) + I ome - 0x0021f106, // n0x1014 c0x0000 (---------------) + I oshima - 0x00211083, // n0x1015 c0x0000 (---------------) + I ota - 0x002d1b48, // n0x1016 c0x0000 (---------------) + I setagaya - 0x002e66c7, // n0x1017 c0x0000 (---------------) + I shibuya - 0x002956c9, // n0x1018 c0x0000 (---------------) + I shinagawa - 0x002b5d48, // n0x1019 c0x0000 (---------------) + I shinjuku - 0x00330ec8, // n0x101a c0x0000 (---------------) + I suginami - 0x002631c6, // n0x101b c0x0000 (---------------) + I sumida - 0x0030a7c9, // n0x101c c0x0000 (---------------) + I tachikawa - 0x00234205, // n0x101d c0x0000 (---------------) + I taito - 0x002a1984, // n0x101e c0x0000 (---------------) + I tama - 0x00243f07, // n0x101f c0x0000 (---------------) + I toshima - 0x00265685, // n0x1020 c0x0000 (---------------) + I chizu - 0x00205304, // n0x1021 c0x0000 (---------------) + I hino - 0x002800c8, // n0x1022 c0x0000 (---------------) + I kawahara - 0x00217b44, // n0x1023 c0x0000 (---------------) + I koge - 0x002fe347, // n0x1024 c0x0000 (---------------) + I kotoura - 0x0030d886, // n0x1025 c0x0000 (---------------) + I misasa - 0x002bd905, // n0x1026 c0x0000 (---------------) + I nanbu - 0x002aef08, // n0x1027 c0x0000 (---------------) + I nichinan - 0x00327d8b, // n0x1028 c0x0000 (---------------) + I sakaiminato - 0x002f11c7, // n0x1029 c0x0000 (---------------) + I tottori - 0x0024d886, // n0x102a c0x0000 (---------------) + I wakasa - 0x002b4684, // n0x102b c0x0000 (---------------) + I yazu - 0x0037bec6, // n0x102c c0x0000 (---------------) + I yonago - 0x002b1e85, // n0x102d c0x0000 (---------------) + I asahi - 0x00205ac5, // n0x102e c0x0000 (---------------) + I fuchu - 0x002777c9, // n0x102f c0x0000 (---------------) + I fukumitsu - 0x0027a8c9, // n0x1030 c0x0000 (---------------) + I funahashi - 0x00223644, // n0x1031 c0x0000 (---------------) + I himi - 0x00223685, // n0x1032 c0x0000 (---------------) + I imizu - 0x00216c45, // n0x1033 c0x0000 (---------------) + I inami - 0x0026ad86, // n0x1034 c0x0000 (---------------) + I johana - 0x002d6608, // n0x1035 c0x0000 (---------------) + I kamiichi - 0x002aa746, // n0x1036 c0x0000 (---------------) + I kurobe - 0x0020228b, // n0x1037 c0x0000 (---------------) + I nakaniikawa - 0x0026794a, // n0x1038 c0x0000 (---------------) + I namerikawa - 0x002f2f05, // n0x1039 c0x0000 (---------------) + I nanto - 0x002838c6, // n0x103a c0x0000 (---------------) + I nyuzen - 0x002acfc5, // n0x103b c0x0000 (---------------) + I oyabe - 0x002a2245, // n0x103c c0x0000 (---------------) + I taira - 0x002a24c7, // n0x103d c0x0000 (---------------) + I takaoka - 0x002128c8, // n0x103e c0x0000 (---------------) + I tateyama - 0x00273904, // n0x103f c0x0000 (---------------) + I toga - 0x0029dc46, // n0x1040 c0x0000 (---------------) + I tonami - 0x002873c6, // n0x1041 c0x0000 (---------------) + I toyama - 0x00217e07, // n0x1042 c0x0000 (---------------) + I unazuki - 0x00347304, // n0x1043 c0x0000 (---------------) + I uozu - 0x002766c6, // n0x1044 c0x0000 (---------------) + I yamada - 0x0036e185, // n0x1045 c0x0000 (---------------) + I arida - 0x0036e189, // n0x1046 c0x0000 (---------------) + I aridagawa - 0x003704c4, // n0x1047 c0x0000 (---------------) + I gobo - 0x002bc709, // n0x1048 c0x0000 (---------------) + I hashimoto - 0x00273146, // n0x1049 c0x0000 (---------------) + I hidaka - 0x002ae588, // n0x104a c0x0000 (---------------) + I hirogawa - 0x00216c45, // n0x104b c0x0000 (---------------) + I inami - 0x00231a85, // n0x104c c0x0000 (---------------) + I iwade - 0x00229146, // n0x104d c0x0000 (---------------) + I kainan - 0x00258209, // n0x104e c0x0000 (---------------) + I kamitonda - 0x00219949, // n0x104f c0x0000 (---------------) + I katsuragi - 0x002d2086, // n0x1050 c0x0000 (---------------) + I kimino - 0x0027dd88, // n0x1051 c0x0000 (---------------) + I kinokawa - 0x002a2708, // n0x1052 c0x0000 (---------------) + I kitayama - 0x002acf84, // n0x1053 c0x0000 (---------------) + I koya - 0x002a3104, // n0x1054 c0x0000 (---------------) + I koza - 0x002a3108, // n0x1055 c0x0000 (---------------) + I kozagawa - 0x003091c8, // n0x1056 c0x0000 (---------------) + I kudoyama - 0x00273a09, // n0x1057 c0x0000 (---------------) + I kushimoto - 0x002985c6, // n0x1058 c0x0000 (---------------) + I mihama - 0x002467c6, // n0x1059 c0x0000 (---------------) + I misato - 0x003048cd, // n0x105a c0x0000 (---------------) + I nachikatsuura - 0x00227206, // n0x105b c0x0000 (---------------) + I shingu - 0x002d0149, // n0x105c c0x0000 (---------------) + I shirahama - 0x0036d285, // n0x105d c0x0000 (---------------) + I taiji - 0x002110c6, // n0x105e c0x0000 (---------------) + I tanabe - 0x0030a988, // n0x105f c0x0000 (---------------) + I wakayama - 0x00302305, // n0x1060 c0x0000 (---------------) + I yuasa - 0x00269944, // n0x1061 c0x0000 (---------------) + I yura - 0x002b1e85, // n0x1062 c0x0000 (---------------) + I asahi - 0x0027a308, // n0x1063 c0x0000 (---------------) + I funagata - 0x0028f949, // n0x1064 c0x0000 (---------------) + I higashine - 0x00270744, // n0x1065 c0x0000 (---------------) + I iide - 0x00273786, // n0x1066 c0x0000 (---------------) + I kahoku - 0x003633ca, // n0x1067 c0x0000 (---------------) + I kaminoyama - 0x0021a508, // n0x1068 c0x0000 (---------------) + I kaneyama - 0x00230389, // n0x1069 c0x0000 (---------------) + I kawanishi - 0x003605ca, // n0x106a c0x0000 (---------------) + I mamurogawa - 0x00334f46, // n0x106b c0x0000 (---------------) + I mikawa - 0x0028da08, // n0x106c c0x0000 (---------------) + I murayama - 0x002fab85, // n0x106d c0x0000 (---------------) + I nagai - 0x00203448, // n0x106e c0x0000 (---------------) + I nakayama - 0x002a6785, // n0x106f c0x0000 (---------------) + I nanyo - 0x0021a249, // n0x1070 c0x0000 (---------------) + I nishikawa - 0x0033cd89, // n0x1071 c0x0000 (---------------) + I obanazawa - 0x00202e42, // n0x1072 c0x0000 (---------------) + I oe - 0x002ba1c5, // n0x1073 c0x0000 (---------------) + I oguni - 0x00265906, // n0x1074 c0x0000 (---------------) + I ohkura - 0x00273087, // n0x1075 c0x0000 (---------------) + I oishida - 0x0034c905, // n0x1076 c0x0000 (---------------) + I sagae - 0x003426c6, // n0x1077 c0x0000 (---------------) + I sakata - 0x0034a308, // n0x1078 c0x0000 (---------------) + I sakegawa - 0x002b9446, // n0x1079 c0x0000 (---------------) + I shinjo - 0x002dd309, // n0x107a c0x0000 (---------------) + I shirataka - 0x00272546, // n0x107b c0x0000 (---------------) + I shonai - 0x00271608, // n0x107c c0x0000 (---------------) + I takahata - 0x0029d3c5, // n0x107d c0x0000 (---------------) + I tendo - 0x00257a46, // n0x107e c0x0000 (---------------) + I tozawa - 0x00335288, // n0x107f c0x0000 (---------------) + I tsuruoka - 0x002797c8, // n0x1080 c0x0000 (---------------) + I yamagata - 0x003694c8, // n0x1081 c0x0000 (---------------) + I yamanobe - 0x00324648, // n0x1082 c0x0000 (---------------) + I yonezawa - 0x0021ea84, // n0x1083 c0x0000 (---------------) + I yuza - 0x00229583, // n0x1084 c0x0000 (---------------) + I abu - 0x002dd544, // n0x1085 c0x0000 (---------------) + I hagi - 0x002b4c86, // n0x1086 c0x0000 (---------------) + I hikari - 0x002a1d84, // n0x1087 c0x0000 (---------------) + I hofu - 0x002df5c7, // n0x1088 c0x0000 (---------------) + I iwakuni - 0x002cb7c9, // n0x1089 c0x0000 (---------------) + I kudamatsu - 0x002b3c45, // n0x108a c0x0000 (---------------) + I mitou - 0x002419c6, // n0x108b c0x0000 (---------------) + I nagato - 0x0021f106, // n0x108c c0x0000 (---------------) + I oshima - 0x002a2c8b, // n0x108d c0x0000 (---------------) + I shimonoseki - 0x002f2e46, // n0x108e c0x0000 (---------------) + I shunan - 0x003094c6, // n0x108f c0x0000 (---------------) + I tabuse - 0x00261448, // n0x1090 c0x0000 (---------------) + I tokuyama - 0x00244606, // n0x1091 c0x0000 (---------------) + I toyota - 0x0025dc03, // n0x1092 c0x0000 (---------------) + I ube - 0x00216b03, // n0x1093 c0x0000 (---------------) + I yuu - 0x00347284, // n0x1094 c0x0000 (---------------) + I chuo - 0x002352c5, // n0x1095 c0x0000 (---------------) + I doshi - 0x00306107, // n0x1096 c0x0000 (---------------) + I fuefuki - 0x002708c8, // n0x1097 c0x0000 (---------------) + I fujikawa - 0x002708cf, // n0x1098 c0x0000 (---------------) + I fujikawaguchiko - 0x0027564b, // n0x1099 c0x0000 (---------------) + I fujiyoshida - 0x002d6408, // n0x109a c0x0000 (---------------) + I hayakawa - 0x00273806, // n0x109b c0x0000 (---------------) + I hokuto - 0x0034fece, // n0x109c c0x0000 (---------------) + I ichikawamisato - 0x0020d603, // n0x109d c0x0000 (---------------) + I kai - 0x00306084, // n0x109e c0x0000 (---------------) + I kofu - 0x002f2dc5, // n0x109f c0x0000 (---------------) + I koshu - 0x002f73c6, // n0x10a0 c0x0000 (---------------) + I kosuge - 0x00284fcb, // n0x10a1 c0x0000 (---------------) + I minami-alps - 0x00289146, // n0x10a2 c0x0000 (---------------) + I minobu - 0x00224fc9, // n0x10a3 c0x0000 (---------------) + I nakamichi - 0x002bd905, // n0x10a4 c0x0000 (---------------) + I nanbu - 0x00357548, // n0x10a5 c0x0000 (---------------) + I narusawa - 0x00211988, // n0x10a6 c0x0000 (---------------) + I nirasaki - 0x0021980c, // n0x10a7 c0x0000 (---------------) + I nishikatsura - 0x002941c6, // n0x10a8 c0x0000 (---------------) + I oshino - 0x00322a46, // n0x10a9 c0x0000 (---------------) + I otsuki - 0x002f5745, // n0x10aa c0x0000 (---------------) + I showa - 0x002808c8, // n0x10ab c0x0000 (---------------) + I tabayama - 0x00202ec5, // n0x10ac c0x0000 (---------------) + I tsuru - 0x0020e3c8, // n0x10ad c0x0000 (---------------) + I uenohara - 0x0029388a, // n0x10ae c0x0000 (---------------) + I yamanakako - 0x00379ac9, // n0x10af c0x0000 (---------------) + I yamanashi - 0x007242c4, // n0x10b0 c0x0001 (---------------) ! I city - 0x00232dc3, // n0x10b1 c0x0000 (---------------) + I com - 0x0021e083, // n0x10b2 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x10b3 c0x0000 (---------------) + I gov - 0x00240443, // n0x10b4 c0x0000 (---------------) + I mil - 0x00218643, // n0x10b5 c0x0000 (---------------) + I net - 0x0024d043, // n0x10b6 c0x0000 (---------------) + I org - 0x00202183, // n0x10b7 c0x0000 (---------------) + I biz - 0x00232dc3, // n0x10b8 c0x0000 (---------------) + I com - 0x0021e083, // n0x10b9 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x10ba c0x0000 (---------------) + I gov - 0x00208a44, // n0x10bb c0x0000 (---------------) + I info - 0x00218643, // n0x10bc c0x0000 (---------------) + I net - 0x0024d043, // n0x10bd c0x0000 (---------------) + I org - 0x00203a03, // n0x10be c0x0000 (---------------) + I ass - 0x00278344, // n0x10bf c0x0000 (---------------) + I asso - 0x00232dc3, // n0x10c0 c0x0000 (---------------) + I com - 0x0023a884, // n0x10c1 c0x0000 (---------------) + I coop - 0x0021e083, // n0x10c2 c0x0000 (---------------) + I edu - 0x00252544, // n0x10c3 c0x0000 (---------------) + I gouv - 0x00209ac3, // n0x10c4 c0x0000 (---------------) + I gov - 0x002e1b47, // n0x10c5 c0x0000 (---------------) + I medecin - 0x00240443, // n0x10c6 c0x0000 (---------------) + I mil - 0x002104c3, // n0x10c7 c0x0000 (---------------) + I nom - 0x00283208, // n0x10c8 c0x0000 (---------------) + I notaires - 0x0024d043, // n0x10c9 c0x0000 (---------------) + I org - 0x002c55cb, // n0x10ca c0x0000 (---------------) + I pharmaciens - 0x002cf303, // n0x10cb c0x0000 (---------------) + I prd - 0x0022ad46, // n0x10cc c0x0000 (---------------) + I presse - 0x002032c2, // n0x10cd c0x0000 (---------------) + I tm - 0x002ff6cb, // n0x10ce c0x0000 (---------------) + I veterinaire - 0x0021e083, // n0x10cf c0x0000 (---------------) + I edu - 0x00209ac3, // n0x10d0 c0x0000 (---------------) + I gov - 0x00218643, // n0x10d1 c0x0000 (---------------) + I net - 0x0024d043, // n0x10d2 c0x0000 (---------------) + I org - 0x00232dc3, // n0x10d3 c0x0000 (---------------) + I com - 0x0021e083, // n0x10d4 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x10d5 c0x0000 (---------------) + I gov - 0x0024d043, // n0x10d6 c0x0000 (---------------) + I org - 0x00222883, // n0x10d7 c0x0000 (---------------) + I rep - 0x00208103, // n0x10d8 c0x0000 (---------------) + I tra - 0x00200b82, // n0x10d9 c0x0000 (---------------) + I ac - 0x0009e448, // n0x10da c0x0000 (---------------) + blogspot - 0x00232845, // n0x10db c0x0000 (---------------) + I busan - 0x002f6e88, // n0x10dc c0x0000 (---------------) + I chungbuk - 0x002f8348, // n0x10dd c0x0000 (---------------) + I chungnam - 0x00200882, // n0x10de c0x0000 (---------------) + I co - 0x00345f05, // n0x10df c0x0000 (---------------) + I daegu - 0x002fd947, // n0x10e0 c0x0000 (---------------) + I daejeon - 0x00201e42, // n0x10e1 c0x0000 (---------------) + I es - 0x00225207, // n0x10e2 c0x0000 (---------------) + I gangwon - 0x00209ac2, // n0x10e3 c0x0000 (---------------) + I go - 0x00257d87, // n0x10e4 c0x0000 (---------------) + I gwangju - 0x00307c49, // n0x10e5 c0x0000 (---------------) + I gyeongbuk - 0x002ecb08, // n0x10e6 c0x0000 (---------------) + I gyeonggi - 0x002c3649, // n0x10e7 c0x0000 (---------------) + I gyeongnam - 0x00209702, // n0x10e8 c0x0000 (---------------) + I hs - 0x002ae307, // n0x10e9 c0x0000 (---------------) + I incheon - 0x002d1e04, // n0x10ea c0x0000 (---------------) + I jeju - 0x002fda07, // n0x10eb c0x0000 (---------------) + I jeonbuk - 0x00267847, // n0x10ec c0x0000 (---------------) + I jeonnam - 0x003797c2, // n0x10ed c0x0000 (---------------) + I kg - 0x00240443, // n0x10ee c0x0000 (---------------) + I mil - 0x0020e602, // n0x10ef c0x0000 (---------------) + I ms - 0x00209e82, // n0x10f0 c0x0000 (---------------) + I ne - 0x00200d02, // n0x10f1 c0x0000 (---------------) + I or - 0x0020c782, // n0x10f2 c0x0000 (---------------) + I pe - 0x00207082, // n0x10f3 c0x0000 (---------------) + I re - 0x0021bcc2, // n0x10f4 c0x0000 (---------------) + I sc - 0x0034b605, // n0x10f5 c0x0000 (---------------) + I seoul - 0x00250985, // n0x10f6 c0x0000 (---------------) + I ulsan - 0x00232dc3, // n0x10f7 c0x0000 (---------------) + I com - 0x0021e083, // n0x10f8 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x10f9 c0x0000 (---------------) + I gov - 0x00218643, // n0x10fa c0x0000 (---------------) + I net - 0x0024d043, // n0x10fb c0x0000 (---------------) + I org - 0x00232dc3, // n0x10fc c0x0000 (---------------) + I com - 0x0021e083, // n0x10fd c0x0000 (---------------) + I edu - 0x00209ac3, // n0x10fe c0x0000 (---------------) + I gov - 0x00240443, // n0x10ff c0x0000 (---------------) + I mil - 0x00218643, // n0x1100 c0x0000 (---------------) + I net - 0x0024d043, // n0x1101 c0x0000 (---------------) + I org - 0x00000401, // n0x1102 c0x0000 (---------------) + c - 0x00232dc3, // n0x1103 c0x0000 (---------------) + I com - 0x0021e083, // n0x1104 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1105 c0x0000 (---------------) + I gov - 0x00208a44, // n0x1106 c0x0000 (---------------) + I info - 0x002188c3, // n0x1107 c0x0000 (---------------) + I int - 0x00218643, // n0x1108 c0x0000 (---------------) + I net - 0x0024d043, // n0x1109 c0x0000 (---------------) + I org - 0x0020c783, // n0x110a c0x0000 (---------------) + I per - 0x00232dc3, // n0x110b c0x0000 (---------------) + I com - 0x0021e083, // n0x110c c0x0000 (---------------) + I edu - 0x00209ac3, // n0x110d c0x0000 (---------------) + I gov - 0x00218643, // n0x110e c0x0000 (---------------) + I net - 0x0024d043, // n0x110f c0x0000 (---------------) + I org - 0x00200882, // n0x1110 c0x0000 (---------------) + I co - 0x00232dc3, // n0x1111 c0x0000 (---------------) + I com - 0x0021e083, // n0x1112 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1113 c0x0000 (---------------) + I gov - 0x00218643, // n0x1114 c0x0000 (---------------) + I net - 0x0024d043, // n0x1115 c0x0000 (---------------) + I org - 0x002af5c4, // n0x1116 c0x0000 (---------------) + I assn - 0x00232dc3, // n0x1117 c0x0000 (---------------) + I com - 0x0021e083, // n0x1118 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1119 c0x0000 (---------------) + I gov - 0x0023c403, // n0x111a c0x0000 (---------------) + I grp - 0x0029d805, // n0x111b c0x0000 (---------------) + I hotel - 0x002188c3, // n0x111c c0x0000 (---------------) + I int - 0x00220e43, // n0x111d c0x0000 (---------------) + I ltd - 0x00218643, // n0x111e c0x0000 (---------------) + I net - 0x0024ad43, // n0x111f c0x0000 (---------------) + I ngo - 0x0024d043, // n0x1120 c0x0000 (---------------) + I org - 0x00251983, // n0x1121 c0x0000 (---------------) + I sch - 0x002783c3, // n0x1122 c0x0000 (---------------) + I soc - 0x002071c3, // n0x1123 c0x0000 (---------------) + I web - 0x00232dc3, // n0x1124 c0x0000 (---------------) + I com - 0x0021e083, // n0x1125 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1126 c0x0000 (---------------) + I gov - 0x00218643, // n0x1127 c0x0000 (---------------) + I net - 0x0024d043, // n0x1128 c0x0000 (---------------) + I org - 0x00200882, // n0x1129 c0x0000 (---------------) + I co - 0x0024d043, // n0x112a c0x0000 (---------------) + I org - 0x00209ac3, // n0x112b c0x0000 (---------------) + I gov - 0x002a4783, // n0x112c c0x0000 (---------------) + I asn - 0x00232dc3, // n0x112d c0x0000 (---------------) + I com - 0x00235884, // n0x112e c0x0000 (---------------) + I conf - 0x0021e083, // n0x112f c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1130 c0x0000 (---------------) + I gov - 0x00205942, // n0x1131 c0x0000 (---------------) + I id - 0x00240443, // n0x1132 c0x0000 (---------------) + I mil - 0x00218643, // n0x1133 c0x0000 (---------------) + I net - 0x0024d043, // n0x1134 c0x0000 (---------------) + I org - 0x00232dc3, // n0x1135 c0x0000 (---------------) + I com - 0x0021e083, // n0x1136 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1137 c0x0000 (---------------) + I gov - 0x00205942, // n0x1138 c0x0000 (---------------) + I id - 0x00210e83, // n0x1139 c0x0000 (---------------) + I med - 0x00218643, // n0x113a c0x0000 (---------------) + I net - 0x0024d043, // n0x113b c0x0000 (---------------) + I org - 0x002cb543, // n0x113c c0x0000 (---------------) + I plc - 0x00251983, // n0x113d c0x0000 (---------------) + I sch - 0x00200b82, // n0x113e c0x0000 (---------------) + I ac - 0x00200882, // n0x113f c0x0000 (---------------) + I co - 0x00209ac3, // n0x1140 c0x0000 (---------------) + I gov - 0x00218643, // n0x1141 c0x0000 (---------------) + I net - 0x0024d043, // n0x1142 c0x0000 (---------------) + I org - 0x0022ad45, // n0x1143 c0x0000 (---------------) + I press - 0x00278344, // n0x1144 c0x0000 (---------------) + I asso - 0x002032c2, // n0x1145 c0x0000 (---------------) + I tm - 0x00200b82, // n0x1146 c0x0000 (---------------) + I ac - 0x00200882, // n0x1147 c0x0000 (---------------) + I co - 0x0021e083, // n0x1148 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1149 c0x0000 (---------------) + I gov - 0x00234383, // n0x114a c0x0000 (---------------) + I its - 0x00218643, // n0x114b c0x0000 (---------------) + I net - 0x0024d043, // n0x114c c0x0000 (---------------) + I org - 0x002cfac4, // n0x114d c0x0000 (---------------) + I priv - 0x00232dc3, // n0x114e c0x0000 (---------------) + I com - 0x0021e083, // n0x114f c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1150 c0x0000 (---------------) + I gov - 0x00240443, // n0x1151 c0x0000 (---------------) + I mil - 0x002104c3, // n0x1152 c0x0000 (---------------) + I nom - 0x0024d043, // n0x1153 c0x0000 (---------------) + I org - 0x002cf303, // n0x1154 c0x0000 (---------------) + I prd - 0x002032c2, // n0x1155 c0x0000 (---------------) + I tm - 0x00232dc3, // n0x1156 c0x0000 (---------------) + I com - 0x0021e083, // n0x1157 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1158 c0x0000 (---------------) + I gov - 0x00206e83, // n0x1159 c0x0000 (---------------) + I inf - 0x00267944, // n0x115a c0x0000 (---------------) + I name - 0x00218643, // n0x115b c0x0000 (---------------) + I net - 0x0024d043, // n0x115c c0x0000 (---------------) + I org - 0x00232dc3, // n0x115d c0x0000 (---------------) + I com - 0x0021e083, // n0x115e c0x0000 (---------------) + I edu - 0x00252544, // n0x115f c0x0000 (---------------) + I gouv - 0x00209ac3, // n0x1160 c0x0000 (---------------) + I gov - 0x00218643, // n0x1161 c0x0000 (---------------) + I net - 0x0024d043, // n0x1162 c0x0000 (---------------) + I org - 0x0022ad46, // n0x1163 c0x0000 (---------------) + I presse - 0x0021e083, // n0x1164 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1165 c0x0000 (---------------) + I gov - 0x0001af43, // n0x1166 c0x0000 (---------------) + nyc - 0x0024d043, // n0x1167 c0x0000 (---------------) + I org - 0x00232dc3, // n0x1168 c0x0000 (---------------) + I com - 0x0021e083, // n0x1169 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x116a c0x0000 (---------------) + I gov - 0x00218643, // n0x116b c0x0000 (---------------) + I net - 0x0024d043, // n0x116c c0x0000 (---------------) + I org - 0x0009e448, // n0x116d c0x0000 (---------------) + blogspot - 0x00209ac3, // n0x116e c0x0000 (---------------) + I gov - 0x00232dc3, // n0x116f c0x0000 (---------------) + I com - 0x0021e083, // n0x1170 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1171 c0x0000 (---------------) + I gov - 0x00218643, // n0x1172 c0x0000 (---------------) + I net - 0x0024d043, // n0x1173 c0x0000 (---------------) + I org - 0x00232dc3, // n0x1174 c0x0000 (---------------) + I com - 0x0021e083, // n0x1175 c0x0000 (---------------) + I edu - 0x00218643, // n0x1176 c0x0000 (---------------) + I net - 0x0024d043, // n0x1177 c0x0000 (---------------) + I org - 0x00200b82, // n0x1178 c0x0000 (---------------) + I ac - 0x00200882, // n0x1179 c0x0000 (---------------) + I co - 0x00232dc3, // n0x117a c0x0000 (---------------) + I com - 0x00209ac3, // n0x117b c0x0000 (---------------) + I gov - 0x00218643, // n0x117c c0x0000 (---------------) + I net - 0x00200d02, // n0x117d c0x0000 (---------------) + I or - 0x0024d043, // n0x117e c0x0000 (---------------) + I org - 0x002fe987, // n0x117f c0x0000 (---------------) + I academy - 0x002ee00b, // n0x1180 c0x0000 (---------------) + I agriculture - 0x00222943, // n0x1181 c0x0000 (---------------) + I air - 0x0024e648, // n0x1182 c0x0000 (---------------) + I airguard - 0x00377f47, // n0x1183 c0x0000 (---------------) + I alabama - 0x002abec6, // n0x1184 c0x0000 (---------------) + I alaska - 0x002f8e05, // n0x1185 c0x0000 (---------------) + I amber - 0x00383909, // n0x1186 c0x0000 (---------------) + I ambulance - 0x00237b48, // n0x1187 c0x0000 (---------------) + I american - 0x00237b49, // n0x1188 c0x0000 (---------------) + I americana - 0x00274990, // n0x1189 c0x0000 (---------------) + I americanantiques - 0x00237b4b, // n0x118a c0x0000 (---------------) + I americanart - 0x002f8c49, // n0x118b c0x0000 (---------------) + I amsterdam - 0x00205fc3, // n0x118c c0x0000 (---------------) + I and - 0x002e0e49, // n0x118d c0x0000 (---------------) + I annefrank - 0x00236846, // n0x118e c0x0000 (---------------) + I anthro - 0x0023684c, // n0x118f c0x0000 (---------------) + I anthropology - 0x00232908, // n0x1190 c0x0000 (---------------) + I antiques - 0x0036bc48, // n0x1191 c0x0000 (---------------) + I aquarium - 0x00263cc9, // n0x1192 c0x0000 (---------------) + I arboretum - 0x002d828e, // n0x1193 c0x0000 (---------------) + I archaeological - 0x002c340b, // n0x1194 c0x0000 (---------------) + I archaeology - 0x00229ecc, // n0x1195 c0x0000 (---------------) + I architecture - 0x00208d43, // n0x1196 c0x0000 (---------------) + I art - 0x00237d4c, // n0x1197 c0x0000 (---------------) + I artanddesign - 0x002ec189, // n0x1198 c0x0000 (---------------) + I artcenter - 0x00208d47, // n0x1199 c0x0000 (---------------) + I artdeco - 0x0021dfcc, // n0x119a c0x0000 (---------------) + I arteducation - 0x002372ca, // n0x119b c0x0000 (---------------) + I artgallery - 0x0020b384, // n0x119c c0x0000 (---------------) + I arts - 0x0020b38d, // n0x119d c0x0000 (---------------) + I artsandcrafts - 0x002ec048, // n0x119e c0x0000 (---------------) + I asmatart - 0x0036f4cd, // n0x119f c0x0000 (---------------) + I assassination - 0x0029b006, // n0x11a0 c0x0000 (---------------) + I assisi - 0x002c51cb, // n0x11a1 c0x0000 (---------------) + I association - 0x003459c9, // n0x11a2 c0x0000 (---------------) + I astronomy - 0x002b6947, // n0x11a3 c0x0000 (---------------) + I atlanta - 0x0036b806, // n0x11a4 c0x0000 (---------------) + I austin - 0x0032d809, // n0x11a5 c0x0000 (---------------) + I australia - 0x0033490a, // n0x11a6 c0x0000 (---------------) + I automotive - 0x00332d48, // n0x11a7 c0x0000 (---------------) + I aviation - 0x0026a344, // n0x11a8 c0x0000 (---------------) + I axis - 0x002fb107, // n0x11a9 c0x0000 (---------------) + I badajoz - 0x0020da87, // n0x11aa c0x0000 (---------------) + I baghdad - 0x0030b4c4, // n0x11ab c0x0000 (---------------) + I bahn - 0x00349604, // n0x11ac c0x0000 (---------------) + I bale - 0x002d9c49, // n0x11ad c0x0000 (---------------) + I baltimore - 0x002f9609, // n0x11ae c0x0000 (---------------) + I barcelona - 0x00285848, // n0x11af c0x0000 (---------------) + I baseball - 0x00212c85, // n0x11b0 c0x0000 (---------------) + I basel - 0x00214405, // n0x11b1 c0x0000 (---------------) + I baths - 0x0020cd46, // n0x11b2 c0x0000 (---------------) + I bauern - 0x0020b249, // n0x11b3 c0x0000 (---------------) + I beauxarts - 0x0036964d, // n0x11b4 c0x0000 (---------------) + I beeldengeluid - 0x002111c8, // n0x11b5 c0x0000 (---------------) + I bellevue - 0x0020cc47, // n0x11b6 c0x0000 (---------------) + I bergbau - 0x002f8e88, // n0x11b7 c0x0000 (---------------) + I berkeley - 0x00376686, // n0x11b8 c0x0000 (---------------) + I berlin - 0x0037bb04, // n0x11b9 c0x0000 (---------------) + I bern - 0x0023ac45, // n0x11ba c0x0000 (---------------) + I bible - 0x00207606, // n0x11bb c0x0000 (---------------) + I bilbao - 0x00207f84, // n0x11bc c0x0000 (---------------) + I bill - 0x00208c47, // n0x11bd c0x0000 (---------------) + I birdart - 0x0020a2ca, // n0x11be c0x0000 (---------------) + I birthplace - 0x00217444, // n0x11bf c0x0000 (---------------) + I bonn - 0x00217746, // n0x11c0 c0x0000 (---------------) + I boston - 0x00219489, // n0x11c1 c0x0000 (---------------) + I botanical - 0x0021948f, // n0x11c2 c0x0000 (---------------) + I botanicalgarden - 0x0021a90d, // n0x11c3 c0x0000 (---------------) + I botanicgarden - 0x0021ae46, // n0x11c4 c0x0000 (---------------) + I botany - 0x0021e6d0, // n0x11c5 c0x0000 (---------------) + I brandywinevalley - 0x0021ec06, // n0x11c6 c0x0000 (---------------) + I brasil - 0x00220587, // n0x11c7 c0x0000 (---------------) + I bristol - 0x00221207, // n0x11c8 c0x0000 (---------------) + I british - 0x0022120f, // n0x11c9 c0x0000 (---------------) + I britishcolumbia - 0x002248c9, // n0x11ca c0x0000 (---------------) + I broadcast - 0x00228a46, // n0x11cb c0x0000 (---------------) + I brunel - 0x002297c7, // n0x11cc c0x0000 (---------------) + I brussel - 0x002297c8, // n0x11cd c0x0000 (---------------) + I brussels - 0x00229b89, // n0x11ce c0x0000 (---------------) + I bruxelles - 0x002bd9c8, // n0x11cf c0x0000 (---------------) + I building - 0x002c9ac7, // n0x11d0 c0x0000 (---------------) + I burghof - 0x00232843, // n0x11d1 c0x0000 (---------------) + I bus - 0x0028a746, // n0x11d2 c0x0000 (---------------) + I bushey - 0x0023bd48, // n0x11d3 c0x0000 (---------------) + I cadaques - 0x002d854a, // n0x11d4 c0x0000 (---------------) + I california - 0x00216409, // n0x11d5 c0x0000 (---------------) + I cambridge - 0x002180c3, // n0x11d6 c0x0000 (---------------) + I can - 0x00343f06, // n0x11d7 c0x0000 (---------------) + I canada - 0x0029da8a, // n0x11d8 c0x0000 (---------------) + I capebreton - 0x0021afc7, // n0x11d9 c0x0000 (---------------) + I carrier - 0x0021de0a, // n0x11da c0x0000 (---------------) + I cartoonart - 0x00320f8e, // n0x11db c0x0000 (---------------) + I casadelamoneda - 0x00224a06, // n0x11dc c0x0000 (---------------) + I castle - 0x0033a347, // n0x11dd c0x0000 (---------------) + I castres - 0x0021a006, // n0x11de c0x0000 (---------------) + I celtic - 0x002442c6, // n0x11df c0x0000 (---------------) + I center - 0x002fc94b, // n0x11e0 c0x0000 (---------------) + I chattanooga - 0x0031394a, // n0x11e1 c0x0000 (---------------) + I cheltenham - 0x0032e04d, // n0x11e2 c0x0000 (---------------) + I chesapeakebay - 0x00370387, // n0x11e3 c0x0000 (---------------) + I chicago - 0x00308508, // n0x11e4 c0x0000 (---------------) + I children - 0x00308509, // n0x11e5 c0x0000 (---------------) + I childrens - 0x0030850f, // n0x11e6 c0x0000 (---------------) + I childrensgarden - 0x0029c58c, // n0x11e7 c0x0000 (---------------) + I chiropractic - 0x002a0d89, // n0x11e8 c0x0000 (---------------) + I chocolate - 0x003516ce, // n0x11e9 c0x0000 (---------------) + I christiansburg - 0x0027468a, // n0x11ea c0x0000 (---------------) + I cincinnati - 0x002e1c46, // n0x11eb c0x0000 (---------------) + I cinema - 0x0031de46, // n0x11ec c0x0000 (---------------) + I circus - 0x0033688c, // n0x11ed c0x0000 (---------------) + I civilisation - 0x00338a8c, // n0x11ee c0x0000 (---------------) + I civilization - 0x0033e488, // n0x11ef c0x0000 (---------------) + I civilwar - 0x003792c7, // n0x11f0 c0x0000 (---------------) + I clinton - 0x002060c5, // n0x11f1 c0x0000 (---------------) + I clock - 0x00208e84, // n0x11f2 c0x0000 (---------------) + I coal - 0x002fa40e, // n0x11f3 c0x0000 (---------------) + I coastaldefence - 0x00359204, // n0x11f4 c0x0000 (---------------) + I cody - 0x0025e187, // n0x11f5 c0x0000 (---------------) + I coldwar - 0x0022ddca, // n0x11f6 c0x0000 (---------------) + I collection - 0x00231294, // n0x11f7 c0x0000 (---------------) + I colonialwilliamsburg - 0x0023218f, // n0x11f8 c0x0000 (---------------) + I coloradoplateau - 0x002213c8, // n0x11f9 c0x0000 (---------------) + I columbia - 0x00232708, // n0x11fa c0x0000 (---------------) + I columbus - 0x002ceb4d, // n0x11fb c0x0000 (---------------) + I communication - 0x002ceb4e, // n0x11fc c0x0000 (---------------) + I communications - 0x00232dc9, // n0x11fd c0x0000 (---------------) + I community - 0x00234a08, // n0x11fe c0x0000 (---------------) + I computer - 0x00234a0f, // n0x11ff c0x0000 (---------------) + I computerhistory - 0x00236fcc, // n0x1200 c0x0000 (---------------) + I contemporary - 0x00236fcf, // n0x1201 c0x0000 (---------------) + I contemporaryart - 0x00238b07, // n0x1202 c0x0000 (---------------) + I convent - 0x0023b08a, // n0x1203 c0x0000 (---------------) + I copenhagen - 0x0021bd0b, // n0x1204 c0x0000 (---------------) + I corporation - 0x0023c808, // n0x1205 c0x0000 (---------------) + I corvette - 0x0023dec7, // n0x1206 c0x0000 (---------------) + I costume - 0x0025d54d, // n0x1207 c0x0000 (---------------) + I countryestate - 0x00322546, // n0x1208 c0x0000 (---------------) + I county - 0x0020b546, // n0x1209 c0x0000 (---------------) + I crafts - 0x0023f249, // n0x120a c0x0000 (---------------) + I cranbrook - 0x0031e188, // n0x120b c0x0000 (---------------) + I creation - 0x002440c8, // n0x120c c0x0000 (---------------) + I cultural - 0x002440ce, // n0x120d c0x0000 (---------------) + I culturalcenter - 0x002ee107, // n0x120e c0x0000 (---------------) + I culture - 0x0020a6c5, // n0x120f c0x0000 (---------------) + I cyber - 0x002c5c05, // n0x1210 c0x0000 (---------------) + I cymru - 0x00222244, // n0x1211 c0x0000 (---------------) + I dali - 0x0034cc46, // n0x1212 c0x0000 (---------------) + I dallas - 0x00285748, // n0x1213 c0x0000 (---------------) + I database - 0x0020fbc3, // n0x1214 c0x0000 (---------------) + I ddr - 0x00249dce, // n0x1215 c0x0000 (---------------) + I decorativearts - 0x002b6448, // n0x1216 c0x0000 (---------------) + I delaware - 0x002a7a8b, // n0x1217 c0x0000 (---------------) + I delmenhorst - 0x0035c787, // n0x1218 c0x0000 (---------------) + I denmark - 0x002da245, // n0x1219 c0x0000 (---------------) + I depot - 0x00237ec6, // n0x121a c0x0000 (---------------) + I design - 0x0029ef47, // n0x121b c0x0000 (---------------) + I detroit - 0x002f0dc8, // n0x121c c0x0000 (---------------) + I dinosaur - 0x00334c49, // n0x121d c0x0000 (---------------) + I discovery - 0x0035a285, // n0x121e c0x0000 (---------------) + I dolls - 0x0027fdc8, // n0x121f c0x0000 (---------------) + I donostia - 0x0036f206, // n0x1220 c0x0000 (---------------) + I durham - 0x0034fb4a, // n0x1221 c0x0000 (---------------) + I eastafrica - 0x002fa309, // n0x1222 c0x0000 (---------------) + I eastcoast - 0x0021e089, // n0x1223 c0x0000 (---------------) + I education - 0x0021e08b, // n0x1224 c0x0000 (---------------) + I educational - 0x0029aa48, // n0x1225 c0x0000 (---------------) + I egyptian - 0x0030b389, // n0x1226 c0x0000 (---------------) + I eisenbahn - 0x00212d46, // n0x1227 c0x0000 (---------------) + I elburg - 0x003454ca, // n0x1228 c0x0000 (---------------) + I elvendrell - 0x00207b4a, // n0x1229 c0x0000 (---------------) + I embroidery - 0x0023b28c, // n0x122a c0x0000 (---------------) + I encyclopedic - 0x002b0e07, // n0x122b c0x0000 (---------------) + I england - 0x00307a4a, // n0x122c c0x0000 (---------------) + I entomology - 0x0027f68b, // n0x122d c0x0000 (---------------) + I environment - 0x0027f699, // n0x122e c0x0000 (---------------) + I environmentalconservation - 0x002182c8, // n0x122f c0x0000 (---------------) + I epilepsy - 0x0022adc5, // n0x1230 c0x0000 (---------------) + I essex - 0x0025d706, // n0x1231 c0x0000 (---------------) + I estate - 0x003003c9, // n0x1232 c0x0000 (---------------) + I ethnology - 0x00252886, // n0x1233 c0x0000 (---------------) + I exeter - 0x0020138a, // n0x1234 c0x0000 (---------------) + I exhibition - 0x002db306, // n0x1235 c0x0000 (---------------) + I family - 0x002125c4, // n0x1236 c0x0000 (---------------) + I farm - 0x002125cd, // n0x1237 c0x0000 (---------------) + I farmequipment - 0x0021f607, // n0x1238 c0x0000 (---------------) + I farmers - 0x0024ae49, // n0x1239 c0x0000 (---------------) + I farmstead - 0x00287a05, // n0x123a c0x0000 (---------------) + I field - 0x002470c8, // n0x123b c0x0000 (---------------) + I figueres - 0x00247b49, // n0x123c c0x0000 (---------------) + I filatelia - 0x00247d84, // n0x123d c0x0000 (---------------) + I film - 0x00248587, // n0x123e c0x0000 (---------------) + I fineart - 0x00248588, // n0x123f c0x0000 (---------------) + I finearts - 0x00248c47, // n0x1240 c0x0000 (---------------) + I finland - 0x0025f808, // n0x1241 c0x0000 (---------------) + I flanders - 0x0024e007, // n0x1242 c0x0000 (---------------) + I florida - 0x002d2445, // n0x1243 c0x0000 (---------------) + I force - 0x002541cc, // n0x1244 c0x0000 (---------------) + I fortmissoula - 0x00254809, // n0x1245 c0x0000 (---------------) + I fortworth - 0x002d994a, // n0x1246 c0x0000 (---------------) + I foundation - 0x00357989, // n0x1247 c0x0000 (---------------) + I francaise - 0x002e0f49, // n0x1248 c0x0000 (---------------) + I frankfurt - 0x0033ee0c, // n0x1249 c0x0000 (---------------) + I franziskaner - 0x0038230b, // n0x124a c0x0000 (---------------) + I freemasonry - 0x00256d08, // n0x124b c0x0000 (---------------) + I freiburg - 0x00257bc8, // n0x124c c0x0000 (---------------) + I fribourg - 0x0025bac4, // n0x124d c0x0000 (---------------) + I frog - 0x0027bdc8, // n0x124e c0x0000 (---------------) + I fundacio - 0x0027da49, // n0x124f c0x0000 (---------------) + I furniture - 0x00237387, // n0x1250 c0x0000 (---------------) + I gallery - 0x002196c6, // n0x1251 c0x0000 (---------------) + I garden - 0x00216987, // n0x1252 c0x0000 (---------------) + I gateway - 0x0032e6c9, // n0x1253 c0x0000 (---------------) + I geelvinck - 0x0036138b, // n0x1254 c0x0000 (---------------) + I gemological - 0x00344207, // n0x1255 c0x0000 (---------------) + I geology - 0x00321f47, // n0x1256 c0x0000 (---------------) + I georgia - 0x0027f547, // n0x1257 c0x0000 (---------------) + I giessen - 0x0036f444, // n0x1258 c0x0000 (---------------) + I glas - 0x0036f445, // n0x1259 c0x0000 (---------------) + I glass - 0x0029c0c5, // n0x125a c0x0000 (---------------) + I gorge - 0x0031568b, // n0x125b c0x0000 (---------------) + I grandrapids - 0x0035e7c4, // n0x125c c0x0000 (---------------) + I graz - 0x00227308, // n0x125d c0x0000 (---------------) + I guernsey - 0x00290fca, // n0x125e c0x0000 (---------------) + I halloffame - 0x0036f2c7, // n0x125f c0x0000 (---------------) + I hamburg - 0x00358847, // n0x1260 c0x0000 (---------------) + I handson - 0x00284852, // n0x1261 c0x0000 (---------------) + I harvestcelebration - 0x00288206, // n0x1262 c0x0000 (---------------) + I hawaii - 0x00241cc6, // n0x1263 c0x0000 (---------------) + I health - 0x002dab8e, // n0x1264 c0x0000 (---------------) + I heimatunduhren - 0x002ace06, // n0x1265 c0x0000 (---------------) + I hellas - 0x0020eb08, // n0x1266 c0x0000 (---------------) + I helsinki - 0x0035a4cf, // n0x1267 c0x0000 (---------------) + I hembygdsforbund - 0x0036f888, // n0x1268 c0x0000 (---------------) + I heritage - 0x0026a648, // n0x1269 c0x0000 (---------------) + I histoire - 0x002e884a, // n0x126a c0x0000 (---------------) + I historical - 0x002e8851, // n0x126b c0x0000 (---------------) + I historicalsociety - 0x00296d8e, // n0x126c c0x0000 (---------------) + I historichouses - 0x002517ca, // n0x126d c0x0000 (---------------) + I historisch - 0x002517cc, // n0x126e c0x0000 (---------------) + I historisches - 0x00234c07, // n0x126f c0x0000 (---------------) + I history - 0x00234c10, // n0x1270 c0x0000 (---------------) + I historyofscience - 0x00204e08, // n0x1271 c0x0000 (---------------) + I horology - 0x0022ca85, // n0x1272 c0x0000 (---------------) + I house - 0x002e50ca, // n0x1273 c0x0000 (---------------) + I humanities - 0x00207fcc, // n0x1274 c0x0000 (---------------) + I illustration - 0x002d468d, // n0x1275 c0x0000 (---------------) + I imageandsound - 0x00226c06, // n0x1276 c0x0000 (---------------) + I indian - 0x002e2587, // n0x1277 c0x0000 (---------------) + I indiana - 0x002e258c, // n0x1278 c0x0000 (---------------) + I indianapolis - 0x00226c0c, // n0x1279 c0x0000 (---------------) + I indianmarket - 0x002188cc, // n0x127a c0x0000 (---------------) + I intelligence - 0x002a038b, // n0x127b c0x0000 (---------------) + I interactive - 0x0027eec4, // n0x127c c0x0000 (---------------) + I iraq - 0x0021c2c4, // n0x127d c0x0000 (---------------) + I iron - 0x003406c9, // n0x127e c0x0000 (---------------) + I isleofman - 0x002bd2c7, // n0x127f c0x0000 (---------------) + I jamison - 0x0022eec9, // n0x1280 c0x0000 (---------------) + I jefferson - 0x002d7909, // n0x1281 c0x0000 (---------------) + I jerusalem - 0x00378e87, // n0x1282 c0x0000 (---------------) + I jewelry - 0x002a11c6, // n0x1283 c0x0000 (---------------) + I jewish - 0x002a11c9, // n0x1284 c0x0000 (---------------) + I jewishart - 0x002a1803, // n0x1285 c0x0000 (---------------) + I jfk - 0x002b954a, // n0x1286 c0x0000 (---------------) + I journalism - 0x00367007, // n0x1287 c0x0000 (---------------) + I judaica - 0x002384cb, // n0x1288 c0x0000 (---------------) + I judygarland - 0x0032deca, // n0x1289 c0x0000 (---------------) + I juedisches - 0x00257ec4, // n0x128a c0x0000 (---------------) + I juif - 0x002f1f06, // n0x128b c0x0000 (---------------) + I karate - 0x002b4d09, // n0x128c c0x0000 (---------------) + I karikatur - 0x0025c784, // n0x128d c0x0000 (---------------) + I kids - 0x0020cf4a, // n0x128e c0x0000 (---------------) + I koebenhavn - 0x0024db85, // n0x128f c0x0000 (---------------) + I koeln - 0x002a9905, // n0x1290 c0x0000 (---------------) + I kunst - 0x002a990d, // n0x1291 c0x0000 (---------------) + I kunstsammlung - 0x002a9c4e, // n0x1292 c0x0000 (---------------) + I kunstunddesign - 0x00306685, // n0x1293 c0x0000 (---------------) + I labor - 0x0035e106, // n0x1294 c0x0000 (---------------) + I labour - 0x003819c7, // n0x1295 c0x0000 (---------------) + I lajolla - 0x0022268a, // n0x1296 c0x0000 (---------------) + I lancashire - 0x0037d1c6, // n0x1297 c0x0000 (---------------) + I landes - 0x002a8ac4, // n0x1298 c0x0000 (---------------) + I lans - 0x003109c7, // n0x1299 c0x0000 (---------------) + I larsson - 0x0029730b, // n0x129a c0x0000 (---------------) + I lewismiller - 0x00376747, // n0x129b c0x0000 (---------------) + I lincoln - 0x00369304, // n0x129c c0x0000 (---------------) + I linz - 0x00284006, // n0x129d c0x0000 (---------------) + I living - 0x0028400d, // n0x129e c0x0000 (---------------) + I livinghistory - 0x00253ecc, // n0x129f c0x0000 (---------------) + I localhistory - 0x0021d286, // n0x12a0 c0x0000 (---------------) + I london - 0x0021500a, // n0x12a1 c0x0000 (---------------) + I losangeles - 0x00226286, // n0x12a2 c0x0000 (---------------) + I louvre - 0x0036be88, // n0x12a3 c0x0000 (---------------) + I loyalist - 0x00380847, // n0x12a4 c0x0000 (---------------) + I lucerne - 0x00242aca, // n0x12a5 c0x0000 (---------------) + I luxembourg - 0x00248246, // n0x12a6 c0x0000 (---------------) + I luzern - 0x00276743, // n0x12a7 c0x0000 (---------------) + I mad - 0x00309b86, // n0x12a8 c0x0000 (---------------) + I madrid - 0x00364908, // n0x12a9 c0x0000 (---------------) + I mallorca - 0x0034084a, // n0x12aa c0x0000 (---------------) + I manchester - 0x00279207, // n0x12ab c0x0000 (---------------) + I mansion - 0x00279208, // n0x12ac c0x0000 (---------------) + I mansions - 0x0027c384, // n0x12ad c0x0000 (---------------) + I manx - 0x00322fc7, // n0x12ae c0x0000 (---------------) + I marburg - 0x00246948, // n0x12af c0x0000 (---------------) + I maritime - 0x003172c8, // n0x12b0 c0x0000 (---------------) + I maritimo - 0x0036a988, // n0x12b1 c0x0000 (---------------) + I maryland - 0x0020448a, // n0x12b2 c0x0000 (---------------) + I marylhurst - 0x0021e585, // n0x12b3 c0x0000 (---------------) + I media - 0x00384a47, // n0x12b4 c0x0000 (---------------) + I medical - 0x00251613, // n0x12b5 c0x0000 (---------------) + I medizinhistorisches - 0x00264ac6, // n0x12b6 c0x0000 (---------------) + I meeres - 0x0035fe88, // n0x12b7 c0x0000 (---------------) + I memorial - 0x002a78c9, // n0x12b8 c0x0000 (---------------) + I mesaverde - 0x002250c8, // n0x12b9 c0x0000 (---------------) + I michigan - 0x0026324b, // n0x12ba c0x0000 (---------------) + I midatlantic - 0x00315248, // n0x12bb c0x0000 (---------------) + I military - 0x00240444, // n0x12bc c0x0000 (---------------) + I mill - 0x002adb06, // n0x12bd c0x0000 (---------------) + I miners - 0x00386146, // n0x12be c0x0000 (---------------) + I mining - 0x002e8109, // n0x12bf c0x0000 (---------------) + I minnesota - 0x002b2bc7, // n0x12c0 c0x0000 (---------------) + I missile - 0x002542c8, // n0x12c1 c0x0000 (---------------) + I missoula - 0x002932c6, // n0x12c2 c0x0000 (---------------) + I modern - 0x0022b784, // n0x12c3 c0x0000 (---------------) + I moma - 0x002bac05, // n0x12c4 c0x0000 (---------------) + I money - 0x002b5548, // n0x12c5 c0x0000 (---------------) + I monmouth - 0x002b580a, // n0x12c6 c0x0000 (---------------) + I monticello - 0x002b6048, // n0x12c7 c0x0000 (---------------) + I montreal - 0x002bbb46, // n0x12c8 c0x0000 (---------------) + I moscow - 0x0029088a, // n0x12c9 c0x0000 (---------------) + I motorcycle - 0x002ed3c8, // n0x12ca c0x0000 (---------------) + I muenchen - 0x002be1c8, // n0x12cb c0x0000 (---------------) + I muenster - 0x002bf1c8, // n0x12cc c0x0000 (---------------) + I mulhouse - 0x002c0686, // n0x12cd c0x0000 (---------------) + I muncie - 0x002c2446, // n0x12ce c0x0000 (---------------) + I museet - 0x00311b0c, // n0x12cf c0x0000 (---------------) + I museumcenter - 0x002c2a10, // n0x12d0 c0x0000 (---------------) + I museumvereniging - 0x00294545, // n0x12d1 c0x0000 (---------------) + I music - 0x002f0408, // n0x12d2 c0x0000 (---------------) + I national - 0x002f0410, // n0x12d3 c0x0000 (---------------) + I nationalfirearms - 0x0036f690, // n0x12d4 c0x0000 (---------------) + I nationalheritage - 0x0027480e, // n0x12d5 c0x0000 (---------------) + I nativeamerican - 0x0031178e, // n0x12d6 c0x0000 (---------------) + I naturalhistory - 0x00311794, // n0x12d7 c0x0000 (---------------) + I naturalhistorymuseum - 0x0031288f, // n0x12d8 c0x0000 (---------------) + I naturalsciences - 0x00312c46, // n0x12d9 c0x0000 (---------------) + I nature - 0x0031a4d1, // n0x12da c0x0000 (---------------) + I naturhistorisches - 0x0036ae13, // n0x12db c0x0000 (---------------) + I natuurwetenschappen - 0x0036b288, // n0x12dc c0x0000 (---------------) + I naumburg - 0x00382dc5, // n0x12dd c0x0000 (---------------) + I naval - 0x00272788, // n0x12de c0x0000 (---------------) + I nebraska - 0x0022c645, // n0x12df c0x0000 (---------------) + I neues - 0x00230c4c, // n0x12e0 c0x0000 (---------------) + I newhampshire - 0x00242489, // n0x12e1 c0x0000 (---------------) + I newjersey - 0x00273cc9, // n0x12e2 c0x0000 (---------------) + I newmexico - 0x00216707, // n0x12e3 c0x0000 (---------------) + I newport - 0x00234589, // n0x12e4 c0x0000 (---------------) + I newspaper - 0x0021f847, // n0x12e5 c0x0000 (---------------) + I newyork - 0x0021c606, // n0x12e6 c0x0000 (---------------) + I niepce - 0x0023aa47, // n0x12e7 c0x0000 (---------------) + I norfolk - 0x002e6285, // n0x12e8 c0x0000 (---------------) + I north - 0x0022d2c3, // n0x12e9 c0x0000 (---------------) + I nrw - 0x0020fd89, // n0x12ea c0x0000 (---------------) + I nuernberg - 0x002f6009, // n0x12eb c0x0000 (---------------) + I nuremberg - 0x0021af43, // n0x12ec c0x0000 (---------------) + I nyc - 0x002335c4, // n0x12ed c0x0000 (---------------) + I nyny - 0x00376c0d, // n0x12ee c0x0000 (---------------) + I oceanographic - 0x0020698f, // n0x12ef c0x0000 (---------------) + I oceanographique - 0x002f4f85, // n0x12f0 c0x0000 (---------------) + I omaha - 0x0030be06, // n0x12f1 c0x0000 (---------------) + I online - 0x0022fc87, // n0x12f2 c0x0000 (---------------) + I ontario - 0x00347807, // n0x12f3 c0x0000 (---------------) + I openair - 0x002814c6, // n0x12f4 c0x0000 (---------------) + I oregon - 0x002814cb, // n0x12f5 c0x0000 (---------------) + I oregontrail - 0x00297dc5, // n0x12f6 c0x0000 (---------------) + I otago - 0x003810c6, // n0x12f7 c0x0000 (---------------) + I oxford - 0x00269607, // n0x12f8 c0x0000 (---------------) + I pacific - 0x00225709, // n0x12f9 c0x0000 (---------------) + I paderborn - 0x00200ac6, // n0x12fa c0x0000 (---------------) + I palace - 0x0022b305, // n0x12fb c0x0000 (---------------) + I paleo - 0x0022e70b, // n0x12fc c0x0000 (---------------) + I palmsprings - 0x0024aa86, // n0x12fd c0x0000 (---------------) + I panama - 0x00256585, // n0x12fe c0x0000 (---------------) + I paris - 0x002cbe48, // n0x12ff c0x0000 (---------------) + I pasadena - 0x002c5a88, // n0x1300 c0x0000 (---------------) + I pharmacy - 0x002c5e4c, // n0x1301 c0x0000 (---------------) + I philadelphia - 0x002c5e50, // n0x1302 c0x0000 (---------------) + I philadelphiaarea - 0x002c6509, // n0x1303 c0x0000 (---------------) + I philately - 0x002c6c07, // n0x1304 c0x0000 (---------------) + I phoenix - 0x002c718b, // n0x1305 c0x0000 (---------------) + I photography - 0x002c8e06, // n0x1306 c0x0000 (---------------) + I pilots - 0x002c998a, // n0x1307 c0x0000 (---------------) + I pittsburgh - 0x002ca30b, // n0x1308 c0x0000 (---------------) + I planetarium - 0x002cab4a, // n0x1309 c0x0000 (---------------) + I plantation - 0x002cadc6, // n0x130a c0x0000 (---------------) + I plants - 0x002cb405, // n0x130b c0x0000 (---------------) + I plaza - 0x00366d46, // n0x130c c0x0000 (---------------) + I portal - 0x0027e008, // n0x130d c0x0000 (---------------) + I portland - 0x002167ca, // n0x130e c0x0000 (---------------) + I portlligat - 0x002ce7dc, // n0x130f c0x0000 (---------------) + I posts-and-telecommunications - 0x002cf3cc, // n0x1310 c0x0000 (---------------) + I preservation - 0x002cf6c8, // n0x1311 c0x0000 (---------------) + I presidio - 0x0022ad45, // n0x1312 c0x0000 (---------------) + I press - 0x002d0fc7, // n0x1313 c0x0000 (---------------) + I project - 0x002d7ec6, // n0x1314 c0x0000 (---------------) + I public - 0x00375845, // n0x1315 c0x0000 (---------------) + I pubol - 0x0021b2c6, // n0x1316 c0x0000 (---------------) + I quebec - 0x00281688, // n0x1317 c0x0000 (---------------) + I railroad - 0x00342b07, // n0x1318 c0x0000 (---------------) + I railway - 0x002d8188, // n0x1319 c0x0000 (---------------) + I research - 0x0033a44a, // n0x131a c0x0000 (---------------) + I resistance - 0x002ee8cc, // n0x131b c0x0000 (---------------) + I riodejaneiro - 0x002eeb49, // n0x131c c0x0000 (---------------) + I rochester - 0x0037bc87, // n0x131d c0x0000 (---------------) + I rockart - 0x00225c04, // n0x131e c0x0000 (---------------) + I roma - 0x002c5cc6, // n0x131f c0x0000 (---------------) + I russia - 0x002ea34a, // n0x1320 c0x0000 (---------------) + I saintlouis - 0x002d7a05, // n0x1321 c0x0000 (---------------) + I salem - 0x0022204c, // n0x1322 c0x0000 (---------------) + I salvadordali - 0x00224d48, // n0x1323 c0x0000 (---------------) + I salzburg - 0x00322888, // n0x1324 c0x0000 (---------------) + I sandiego - 0x00250a0c, // n0x1325 c0x0000 (---------------) + I sanfrancisco - 0x00232acc, // n0x1326 c0x0000 (---------------) + I santabarbara - 0x0023bf09, // n0x1327 c0x0000 (---------------) + I santacruz - 0x0023ed47, // n0x1328 c0x0000 (---------------) + I santafe - 0x0025220c, // n0x1329 c0x0000 (---------------) + I saskatchewan - 0x00255bc4, // n0x132a c0x0000 (---------------) + I satx - 0x0025e88a, // n0x132b c0x0000 (---------------) + I savannahga - 0x003856cc, // n0x132c c0x0000 (---------------) + I schlesisches - 0x0026638b, // n0x132d c0x0000 (---------------) + I schoenbrunn - 0x0026d70b, // n0x132e c0x0000 (---------------) + I schokoladen - 0x00275006, // n0x132f c0x0000 (---------------) + I school - 0x0027cb07, // n0x1330 c0x0000 (---------------) + I schweiz - 0x00234e47, // n0x1331 c0x0000 (---------------) + I science - 0x00234e4f, // n0x1332 c0x0000 (---------------) + I science-fiction - 0x002e2cd1, // n0x1333 c0x0000 (---------------) + I scienceandhistory - 0x0026fc12, // n0x1334 c0x0000 (---------------) + I scienceandindustry - 0x0027e6cd, // n0x1335 c0x0000 (---------------) + I sciencecenter - 0x0027e6ce, // n0x1336 c0x0000 (---------------) + I sciencecenters - 0x0027ea0e, // n0x1337 c0x0000 (---------------) + I sciencehistory - 0x00312a48, // n0x1338 c0x0000 (---------------) + I sciences - 0x00312a52, // n0x1339 c0x0000 (---------------) + I sciencesnaturelles - 0x00250c48, // n0x133a c0x0000 (---------------) + I scotland - 0x002f2807, // n0x133b c0x0000 (---------------) + I seaport - 0x0024b38a, // n0x133c c0x0000 (---------------) + I settlement - 0x0021d048, // n0x133d c0x0000 (---------------) + I settlers - 0x002acdc5, // n0x133e c0x0000 (---------------) + I shell - 0x002afe8a, // n0x133f c0x0000 (---------------) + I sherbrooke - 0x0029b107, // n0x1340 c0x0000 (---------------) + I sibenik - 0x0033aac4, // n0x1341 c0x0000 (---------------) + I silk - 0x00221943, // n0x1342 c0x0000 (---------------) + I ski - 0x002f5245, // n0x1343 c0x0000 (---------------) + I skole - 0x002e8ac7, // n0x1344 c0x0000 (---------------) + I society - 0x0034f247, // n0x1345 c0x0000 (---------------) + I sologne - 0x002d488e, // n0x1346 c0x0000 (---------------) + I soundandvision - 0x002d87cd, // n0x1347 c0x0000 (---------------) + I southcarolina - 0x002d8c09, // n0x1348 c0x0000 (---------------) + I southwest - 0x002d91c5, // n0x1349 c0x0000 (---------------) + I space - 0x002dc043, // n0x134a c0x0000 (---------------) + I spy - 0x00365006, // n0x134b c0x0000 (---------------) + I square - 0x00256a45, // n0x134c c0x0000 (---------------) + I stadt - 0x002a7cc8, // n0x134d c0x0000 (---------------) + I stalbans - 0x002c01c9, // n0x134e c0x0000 (---------------) + I starnberg - 0x0025d745, // n0x134f c0x0000 (---------------) + I state - 0x002b628f, // n0x1350 c0x0000 (---------------) + I stateofdelaware - 0x00225f47, // n0x1351 c0x0000 (---------------) + I station - 0x00383845, // n0x1352 c0x0000 (---------------) + I steam - 0x002ff18a, // n0x1353 c0x0000 (---------------) + I steiermark - 0x00206706, // n0x1354 c0x0000 (---------------) + I stjohn - 0x0036c009, // n0x1355 c0x0000 (---------------) + I stockholm - 0x002dcc0c, // n0x1356 c0x0000 (---------------) + I stpetersburg - 0x002dde09, // n0x1357 c0x0000 (---------------) + I stuttgart - 0x0020bd86, // n0x1358 c0x0000 (---------------) + I suisse - 0x00290dcc, // n0x1359 c0x0000 (---------------) + I surgeonshall - 0x002de486, // n0x135a c0x0000 (---------------) + I surrey - 0x002e1e48, // n0x135b c0x0000 (---------------) + I svizzera - 0x00245d86, // n0x135c c0x0000 (---------------) + I sweden - 0x00266bc6, // n0x135d c0x0000 (---------------) + I sydney - 0x0029bd44, // n0x135e c0x0000 (---------------) + I tank - 0x002a4403, // n0x135f c0x0000 (---------------) + I tcm - 0x002a394a, // n0x1360 c0x0000 (---------------) + I technology - 0x00311391, // n0x1361 c0x0000 (---------------) + I telekommunikation - 0x002a0f4a, // n0x1362 c0x0000 (---------------) + I television - 0x0031e985, // n0x1363 c0x0000 (---------------) + I texas - 0x00295047, // n0x1364 c0x0000 (---------------) + I textile - 0x0036d6c7, // n0x1365 c0x0000 (---------------) + I theater - 0x00246a44, // n0x1366 c0x0000 (---------------) + I time - 0x00246a4b, // n0x1367 c0x0000 (---------------) + I timekeeping - 0x002ec988, // n0x1368 c0x0000 (---------------) + I topology - 0x002a6206, // n0x1369 c0x0000 (---------------) + I torino - 0x002318c5, // n0x136a c0x0000 (---------------) + I touch - 0x0023bb04, // n0x136b c0x0000 (---------------) + I town - 0x0028f1c9, // n0x136c c0x0000 (---------------) + I transport - 0x0029d704, // n0x136d c0x0000 (---------------) + I tree - 0x00222d07, // n0x136e c0x0000 (---------------) + I trolley - 0x00313345, // n0x136f c0x0000 (---------------) + I trust - 0x00313347, // n0x1370 c0x0000 (---------------) + I trustee - 0x002dadc5, // n0x1371 c0x0000 (---------------) + I uhren - 0x00216b83, // n0x1372 c0x0000 (---------------) + I ulm - 0x002f26c8, // n0x1373 c0x0000 (---------------) + I undersea - 0x003826ca, // n0x1374 c0x0000 (---------------) + I university - 0x00232883, // n0x1375 c0x0000 (---------------) + I usa - 0x0023288a, // n0x1376 c0x0000 (---------------) + I usantiques - 0x00378506, // n0x1377 c0x0000 (---------------) + I usarts - 0x0025d4cf, // n0x1378 c0x0000 (---------------) + I uscountryestate - 0x0031df49, // n0x1379 c0x0000 (---------------) + I usculture - 0x00249d50, // n0x137a c0x0000 (---------------) + I usdecorativearts - 0x0025f208, // n0x137b c0x0000 (---------------) + I usgarden - 0x002bbdc9, // n0x137c c0x0000 (---------------) + I ushistory - 0x0028b247, // n0x137d c0x0000 (---------------) + I ushuaia - 0x00283f8f, // n0x137e c0x0000 (---------------) + I uslivinghistory - 0x00202fc4, // n0x137f c0x0000 (---------------) + I utah - 0x002525c4, // n0x1380 c0x0000 (---------------) + I uvic - 0x0021e946, // n0x1381 c0x0000 (---------------) + I valley - 0x0022f946, // n0x1382 c0x0000 (---------------) + I vantaa - 0x002e7b4a, // n0x1383 c0x0000 (---------------) + I versailles - 0x002b67c6, // n0x1384 c0x0000 (---------------) + I viking - 0x0031d1c7, // n0x1385 c0x0000 (---------------) + I village - 0x002efb88, // n0x1386 c0x0000 (---------------) + I virginia - 0x002efd87, // n0x1387 c0x0000 (---------------) + I virtual - 0x002eff47, // n0x1388 c0x0000 (---------------) + I virtuel - 0x00326bca, // n0x1389 c0x0000 (---------------) + I vlaanderen - 0x002f250b, // n0x138a c0x0000 (---------------) + I volkenkunde - 0x00236485, // n0x138b c0x0000 (---------------) + I wales - 0x0026f848, // n0x138c c0x0000 (---------------) + I wallonie - 0x00235783, // n0x138d c0x0000 (---------------) + I war - 0x0020014c, // n0x138e c0x0000 (---------------) + I washingtondc - 0x00205e4f, // n0x138f c0x0000 (---------------) + I watch-and-clock - 0x0036e34d, // n0x1390 c0x0000 (---------------) + I watchandclock - 0x002d8d47, // n0x1391 c0x0000 (---------------) + I western - 0x00211789, // n0x1392 c0x0000 (---------------) + I westfalen - 0x0022d347, // n0x1393 c0x0000 (---------------) + I whaling - 0x00276ec8, // n0x1394 c0x0000 (---------------) + I wildlife - 0x0023148c, // n0x1395 c0x0000 (---------------) + I williamsburg - 0x00240348, // n0x1396 c0x0000 (---------------) + I windmill - 0x0022e548, // n0x1397 c0x0000 (---------------) + I workshop - 0x0030004e, // n0x1398 c0x0000 (---------------) + I xn--9dbhblg6di - 0x0030f0d4, // n0x1399 c0x0000 (---------------) + I xn--comunicaes-v6a2o - 0x0030f5e4, // n0x139a c0x0000 (---------------) + I xn--correios-e-telecomunicaes-ghc29a - 0x0032084a, // n0x139b c0x0000 (---------------) + I xn--h1aegh - 0x0033794b, // n0x139c c0x0000 (---------------) + I xn--lns-qla - 0x0021f904, // n0x139d c0x0000 (---------------) + I york - 0x0021f909, // n0x139e c0x0000 (---------------) + I yorkshire - 0x002e6f08, // n0x139f c0x0000 (---------------) + I yosemite - 0x00341085, // n0x13a0 c0x0000 (---------------) + I youth - 0x00377d4a, // n0x13a1 c0x0000 (---------------) + I zoological - 0x003651c7, // n0x13a2 c0x0000 (---------------) + I zoology - 0x00233784, // n0x13a3 c0x0000 (---------------) + I aero - 0x00202183, // n0x13a4 c0x0000 (---------------) + I biz - 0x00232dc3, // n0x13a5 c0x0000 (---------------) + I com - 0x0023a884, // n0x13a6 c0x0000 (---------------) + I coop - 0x0021e083, // n0x13a7 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x13a8 c0x0000 (---------------) + I gov - 0x00208a44, // n0x13a9 c0x0000 (---------------) + I info - 0x002188c3, // n0x13aa c0x0000 (---------------) + I int - 0x00240443, // n0x13ab c0x0000 (---------------) + I mil - 0x002c2a06, // n0x13ac c0x0000 (---------------) + I museum - 0x00267944, // n0x13ad c0x0000 (---------------) + I name - 0x00218643, // n0x13ae c0x0000 (---------------) + I net - 0x0024d043, // n0x13af c0x0000 (---------------) + I org - 0x002cfc43, // n0x13b0 c0x0000 (---------------) + I pro - 0x00200b82, // n0x13b1 c0x0000 (---------------) + I ac - 0x00202183, // n0x13b2 c0x0000 (---------------) + I biz - 0x00200882, // n0x13b3 c0x0000 (---------------) + I co - 0x00232dc3, // n0x13b4 c0x0000 (---------------) + I com - 0x0023a884, // n0x13b5 c0x0000 (---------------) + I coop - 0x0021e083, // n0x13b6 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x13b7 c0x0000 (---------------) + I gov - 0x002188c3, // n0x13b8 c0x0000 (---------------) + I int - 0x002c2a06, // n0x13b9 c0x0000 (---------------) + I museum - 0x00218643, // n0x13ba c0x0000 (---------------) + I net - 0x0024d043, // n0x13bb c0x0000 (---------------) + I org - 0x0009e448, // n0x13bc c0x0000 (---------------) + blogspot - 0x00232dc3, // n0x13bd c0x0000 (---------------) + I com - 0x0021e083, // n0x13be c0x0000 (---------------) + I edu - 0x003704c3, // n0x13bf c0x0000 (---------------) + I gob - 0x00218643, // n0x13c0 c0x0000 (---------------) + I net - 0x0024d043, // n0x13c1 c0x0000 (---------------) + I org - 0x00232dc3, // n0x13c2 c0x0000 (---------------) + I com - 0x0021e083, // n0x13c3 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x13c4 c0x0000 (---------------) + I gov - 0x00240443, // n0x13c5 c0x0000 (---------------) + I mil - 0x00267944, // n0x13c6 c0x0000 (---------------) + I name - 0x00218643, // n0x13c7 c0x0000 (---------------) + I net - 0x0024d043, // n0x13c8 c0x0000 (---------------) + I org - 0x00685648, // n0x13c9 c0x0001 (---------------) ! I teledata - 0x00214582, // n0x13ca c0x0000 (---------------) + I ca - 0x002020c2, // n0x13cb c0x0000 (---------------) + I cc - 0x00200882, // n0x13cc c0x0000 (---------------) + I co - 0x00232dc3, // n0x13cd c0x0000 (---------------) + I com - 0x0020fc02, // n0x13ce c0x0000 (---------------) + I dr - 0x00200242, // n0x13cf c0x0000 (---------------) + I in - 0x00208a44, // n0x13d0 c0x0000 (---------------) + I info - 0x00277f84, // n0x13d1 c0x0000 (---------------) + I mobi - 0x0022cd82, // n0x13d2 c0x0000 (---------------) + I mx - 0x00267944, // n0x13d3 c0x0000 (---------------) + I name - 0x00200d02, // n0x13d4 c0x0000 (---------------) + I or - 0x0024d043, // n0x13d5 c0x0000 (---------------) + I org - 0x002cfc43, // n0x13d6 c0x0000 (---------------) + I pro - 0x00275006, // n0x13d7 c0x0000 (---------------) + I school - 0x0028dc82, // n0x13d8 c0x0000 (---------------) + I tv - 0x002073c2, // n0x13d9 c0x0000 (---------------) + I us - 0x002012c2, // n0x13da c0x0000 (---------------) + I ws - 0x33241483, // n0x13db c0x00cc (n0x13dd-n0x13de) o I her - 0x33634c03, // n0x13dc c0x00cd (n0x13de-n0x13df) o I his - 0x00052a06, // n0x13dd c0x0000 (---------------) + forgot - 0x00052a06, // n0x13de c0x0000 (---------------) + forgot - 0x00278344, // n0x13df c0x0000 (---------------) + I asso - 0x0010964c, // n0x13e0 c0x0000 (---------------) + at-band-camp - 0x00077e0c, // n0x13e1 c0x0000 (---------------) + azure-mobile - 0x000b46cd, // n0x13e2 c0x0000 (---------------) + azurewebsites - 0x000dd187, // n0x13e3 c0x0000 (---------------) + blogdns - 0x00027508, // n0x13e4 c0x0000 (---------------) + broke-it - 0x0002c98a, // n0x13e5 c0x0000 (---------------) + buyshouses - 0x0017e388, // n0x13e6 c0x0000 (---------------) + cloudapp - 0x0002faca, // n0x13e7 c0x0000 (---------------) + cloudfront - 0x000c7a88, // n0x13e8 c0x0000 (---------------) + dnsalias - 0x00181687, // n0x13e9 c0x0000 (---------------) + dnsdojo - 0x000100c7, // n0x13ea c0x0000 (---------------) + does-it - 0x0012cb09, // n0x13eb c0x0000 (---------------) + dontexist - 0x00159288, // n0x13ec c0x0000 (---------------) + dynalias - 0x0008ee89, // n0x13ed c0x0000 (---------------) + dynathome - 0x0009d40d, // n0x13ee c0x0000 (---------------) + endofinternet - 0x342d0e46, // n0x13ef c0x00d0 (n0x140f-n0x1411) o I fastly - 0x0005cdc7, // n0x13f0 c0x0000 (---------------) + from-az - 0x0005e047, // n0x13f1 c0x0000 (---------------) + from-co - 0x00063947, // n0x13f2 c0x0000 (---------------) + from-la - 0x00068707, // n0x13f3 c0x0000 (---------------) + from-ny - 0x0000cd02, // n0x13f4 c0x0000 (---------------) + gb - 0x0009c187, // n0x13f5 c0x0000 (---------------) + gets-it - 0x00113b0c, // n0x13f6 c0x0000 (---------------) + ham-radio-op - 0x0000ddc7, // n0x13f7 c0x0000 (---------------) + homeftp - 0x000b9206, // n0x13f8 c0x0000 (---------------) + homeip - 0x00099409, // n0x13f9 c0x0000 (---------------) + homelinux - 0x00099ec8, // n0x13fa c0x0000 (---------------) + homeunix - 0x000045c2, // n0x13fb c0x0000 (---------------) + hu - 0x00000242, // n0x13fc c0x0000 (---------------) + in - 0x00061ccb, // n0x13fd c0x0000 (---------------) + in-the-band - 0x0001f409, // n0x13fe c0x0000 (---------------) + is-a-chef - 0x00047789, // n0x13ff c0x0000 (---------------) + is-a-geek - 0x00013e48, // n0x1400 c0x0000 (---------------) + isa-geek - 0x000a2b02, // n0x1401 c0x0000 (---------------) + jp - 0x00132909, // n0x1402 c0x0000 (---------------) + kicks-ass - 0x00015fcd, // n0x1403 c0x0000 (---------------) + office-on-the - 0x000ccf07, // n0x1404 c0x0000 (---------------) + podzone - 0x000866cd, // n0x1405 c0x0000 (---------------) + scrapper-site - 0x00004982, // n0x1406 c0x0000 (---------------) + se - 0x00141246, // n0x1407 c0x0000 (---------------) + selfip - 0x0009f9c8, // n0x1408 c0x0000 (---------------) + sells-it - 0x0000be88, // n0x1409 c0x0000 (---------------) + servebbs - 0x0008e548, // n0x140a c0x0000 (---------------) + serveftp - 0x00041388, // n0x140b c0x0000 (---------------) + thruhere - 0x00000542, // n0x140c c0x0000 (---------------) + uk - 0x0002c186, // n0x140d c0x0000 (---------------) + webhop - 0x000028c2, // n0x140e c0x0000 (---------------) + za - 0x346cfec4, // n0x140f c0x00d1 (n0x1411-n0x1413) o I prod - 0x34a89b83, // n0x1410 c0x00d2 (n0x1413-n0x1416) o I ssl - 0x00000101, // n0x1411 c0x0000 (---------------) + a - 0x0000af86, // n0x1412 c0x0000 (---------------) + global - 0x00000101, // n0x1413 c0x0000 (---------------) + a - 0x00000001, // n0x1414 c0x0000 (---------------) + b - 0x0000af86, // n0x1415 c0x0000 (---------------) + global - 0x0020b384, // n0x1416 c0x0000 (---------------) + I arts - 0x00232dc3, // n0x1417 c0x0000 (---------------) + I com - 0x0024a304, // n0x1418 c0x0000 (---------------) + I firm - 0x00208a44, // n0x1419 c0x0000 (---------------) + I info - 0x00218643, // n0x141a c0x0000 (---------------) + I net - 0x00256305, // n0x141b c0x0000 (---------------) + I other - 0x0020c783, // n0x141c c0x0000 (---------------) + I per - 0x0022a143, // n0x141d c0x0000 (---------------) + I rec - 0x002dc745, // n0x141e c0x0000 (---------------) + I store - 0x002071c3, // n0x141f c0x0000 (---------------) + I web - 0x00232dc3, // n0x1420 c0x0000 (---------------) + I com - 0x0021e083, // n0x1421 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1422 c0x0000 (---------------) + I gov - 0x00240443, // n0x1423 c0x0000 (---------------) + I mil - 0x00277f84, // n0x1424 c0x0000 (---------------) + I mobi - 0x00267944, // n0x1425 c0x0000 (---------------) + I name - 0x00218643, // n0x1426 c0x0000 (---------------) + I net - 0x0024d043, // n0x1427 c0x0000 (---------------) + I org - 0x00251983, // n0x1428 c0x0000 (---------------) + I sch - 0x0009e448, // n0x1429 c0x0000 (---------------) + blogspot - 0x0036ed42, // n0x142a c0x0000 (---------------) + I bv - 0x00000882, // n0x142b c0x0000 (---------------) + co - 0x35e01d82, // n0x142c c0x00d7 (n0x1702-n0x1703) + I aa - 0x00367188, // n0x142d c0x0000 (---------------) + I aarborte - 0x002292c6, // n0x142e c0x0000 (---------------) + I aejrie - 0x002137c6, // n0x142f c0x0000 (---------------) + I afjord - 0x00228c47, // n0x1430 c0x0000 (---------------) + I agdenes - 0x36203042, // n0x1431 c0x00d8 (n0x1703-n0x1704) + I ah - 0x3665d348, // n0x1432 c0x00d9 (n0x1704-n0x1705) o I akershus - 0x0033264a, // n0x1433 c0x0000 (---------------) + I aknoluokta - 0x0036ac48, // n0x1434 c0x0000 (---------------) + I akrehamn - 0x00200b02, // n0x1435 c0x0000 (---------------) + I al - 0x00366e49, // n0x1436 c0x0000 (---------------) + I alaheadju - 0x002364c7, // n0x1437 c0x0000 (---------------) + I alesund - 0x00219646, // n0x1438 c0x0000 (---------------) + I algard - 0x00381289, // n0x1439 c0x0000 (---------------) + I alstahaug - 0x0036d204, // n0x143a c0x0000 (---------------) + I alta - 0x002ac1c6, // n0x143b c0x0000 (---------------) + I alvdal - 0x002b0684, // n0x143c c0x0000 (---------------) + I amli - 0x00268bc4, // n0x143d c0x0000 (---------------) + I amot - 0x00253d09, // n0x143e c0x0000 (---------------) + I andasuolo - 0x0032dc86, // n0x143f c0x0000 (---------------) + I andebu - 0x0033c205, // n0x1440 c0x0000 (---------------) + I andoy - 0x00220845, // n0x1441 c0x0000 (---------------) + I ardal - 0x002650c7, // n0x1442 c0x0000 (---------------) + I aremark - 0x002b6587, // n0x1443 c0x0000 (---------------) + I arendal - 0x00283c44, // n0x1444 c0x0000 (---------------) + I arna - 0x00228e86, // n0x1445 c0x0000 (---------------) + I aseral - 0x002e7545, // n0x1446 c0x0000 (---------------) + I asker - 0x002fbe85, // n0x1447 c0x0000 (---------------) + I askim - 0x002acf05, // n0x1448 c0x0000 (---------------) + I askoy - 0x002e71c7, // n0x1449 c0x0000 (---------------) + I askvoll - 0x0035c5c5, // n0x144a c0x0000 (---------------) + I asnes - 0x002324c9, // n0x144b c0x0000 (---------------) + I audnedaln - 0x00360805, // n0x144c c0x0000 (---------------) + I aukra - 0x002f0f04, // n0x144d c0x0000 (---------------) + I aure - 0x0037d107, // n0x144e c0x0000 (---------------) + I aurland - 0x003656ce, // n0x144f c0x0000 (---------------) + I aurskog-holand - 0x00362fc9, // n0x1450 c0x0000 (---------------) + I austevoll - 0x002daa49, // n0x1451 c0x0000 (---------------) + I austrheim - 0x00296406, // n0x1452 c0x0000 (---------------) + I averoy - 0x0030e488, // n0x1453 c0x0000 (---------------) + I badaddja - 0x002fa94b, // n0x1454 c0x0000 (---------------) + I bahcavuotna - 0x003644cc, // n0x1455 c0x0000 (---------------) + I bahccavuotna - 0x00381c46, // n0x1456 c0x0000 (---------------) + I baidar - 0x002c32c7, // n0x1457 c0x0000 (---------------) + I bajddar - 0x0020b045, // n0x1458 c0x0000 (---------------) + I balat - 0x0034960a, // n0x1459 c0x0000 (---------------) + I balestrand - 0x00285949, // n0x145a c0x0000 (---------------) + I ballangen - 0x0032d049, // n0x145b c0x0000 (---------------) + I balsfjord - 0x002e5f86, // n0x145c c0x0000 (---------------) + I bamble - 0x002e0c85, // n0x145d c0x0000 (---------------) + I bardu - 0x002b9f45, // n0x145e c0x0000 (---------------) + I barum - 0x0035f049, // n0x145f c0x0000 (---------------) + I batsfjord - 0x0026f1cb, // n0x1460 c0x0000 (---------------) + I bearalvahki - 0x00275a86, // n0x1461 c0x0000 (---------------) + I beardu - 0x0021cb86, // n0x1462 c0x0000 (---------------) + I beiarn - 0x0020cc44, // n0x1463 c0x0000 (---------------) + I berg - 0x00284506, // n0x1464 c0x0000 (---------------) + I bergen - 0x0020a748, // n0x1465 c0x0000 (---------------) + I berlevag - 0x00203186, // n0x1466 c0x0000 (---------------) + I bievat - 0x0034cb86, // n0x1467 c0x0000 (---------------) + I bindal - 0x00209d48, // n0x1468 c0x0000 (---------------) + I birkenes - 0x0020bb87, // n0x1469 c0x0000 (---------------) + I bjarkoy - 0x0020c549, // n0x146a c0x0000 (---------------) + I bjerkreim - 0x0020c8c5, // n0x146b c0x0000 (---------------) + I bjugn - 0x0009e448, // n0x146c c0x0000 (---------------) + blogspot - 0x00210044, // n0x146d c0x0000 (---------------) + I bodo - 0x00251b84, // n0x146e c0x0000 (---------------) + I bokn - 0x00214285, // n0x146f c0x0000 (---------------) + I bomlo - 0x0021fe49, // n0x1470 c0x0000 (---------------) + I bremanger - 0x00227987, // n0x1471 c0x0000 (---------------) + I bronnoy - 0x0022798b, // n0x1472 c0x0000 (---------------) + I bronnoysund - 0x0022808a, // n0x1473 c0x0000 (---------------) + I brumunddal - 0x0022c585, // n0x1474 c0x0000 (---------------) + I bryne - 0x36a07902, // n0x1475 c0x00da (n0x1705-n0x1706) + I bu - 0x0032dd87, // n0x1476 c0x0000 (---------------) + I budejju - 0x36ebfec8, // n0x1477 c0x00db (n0x1706-n0x1707) o I buskerud - 0x002be7c7, // n0x1478 c0x0000 (---------------) + I bygland - 0x002be5c5, // n0x1479 c0x0000 (---------------) + I bykle - 0x00245a8a, // n0x147a c0x0000 (---------------) + I cahcesuolo - 0x00000882, // n0x147b c0x0000 (---------------) + co - 0x00203e4b, // n0x147c c0x0000 (---------------) + I davvenjarga - 0x0020578a, // n0x147d c0x0000 (---------------) + I davvesiida - 0x002f5f06, // n0x147e c0x0000 (---------------) + I deatnu - 0x002da243, // n0x147f c0x0000 (---------------) + I dep - 0x00359e8d, // n0x1480 c0x0000 (---------------) + I dielddanuorri - 0x003601cc, // n0x1481 c0x0000 (---------------) + I divtasvuodna - 0x0032d24d, // n0x1482 c0x0000 (---------------) + I divttasvuotna - 0x0020d6c5, // n0x1483 c0x0000 (---------------) + I donna - 0x0024b845, // n0x1484 c0x0000 (---------------) + I dovre - 0x0020fc07, // n0x1485 c0x0000 (---------------) + I drammen - 0x00369b09, // n0x1486 c0x0000 (---------------) + I drangedal - 0x0036ab46, // n0x1487 c0x0000 (---------------) + I drobak - 0x00233dc5, // n0x1488 c0x0000 (---------------) + I dyroy - 0x00230908, // n0x1489 c0x0000 (---------------) + I egersund - 0x00272a83, // n0x148a c0x0000 (---------------) + I eid - 0x002f8648, // n0x148b c0x0000 (---------------) + I eidfjord - 0x00284408, // n0x148c c0x0000 (---------------) + I eidsberg - 0x002b1847, // n0x148d c0x0000 (---------------) + I eidskog - 0x00272a88, // n0x148e c0x0000 (---------------) + I eidsvoll - 0x00233bc9, // n0x148f c0x0000 (---------------) + I eigersund - 0x002e7f87, // n0x1490 c0x0000 (---------------) + I elverum - 0x002edb87, // n0x1491 c0x0000 (---------------) + I enebakk - 0x002abd48, // n0x1492 c0x0000 (---------------) + I engerdal - 0x0028c604, // n0x1493 c0x0000 (---------------) + I etne - 0x0028c607, // n0x1494 c0x0000 (---------------) + I etnedal - 0x0029af08, // n0x1495 c0x0000 (---------------) + I evenassi - 0x0025e746, // n0x1496 c0x0000 (---------------) + I evenes - 0x00220a4f, // n0x1497 c0x0000 (---------------) + I evje-og-hornnes - 0x002a4547, // n0x1498 c0x0000 (---------------) + I farsund - 0x002d1806, // n0x1499 c0x0000 (---------------) + I fauske - 0x00350705, // n0x149a c0x0000 (---------------) + I fedje - 0x00245f03, // n0x149b c0x0000 (---------------) + I fet - 0x00245f07, // n0x149c c0x0000 (---------------) + I fetsund - 0x00379703, // n0x149d c0x0000 (---------------) + I fhs - 0x00248e06, // n0x149e c0x0000 (---------------) + I finnoy - 0x0024b086, // n0x149f c0x0000 (---------------) + I fitjar - 0x0024bb06, // n0x14a0 c0x0000 (---------------) + I fjaler - 0x00288005, // n0x14a1 c0x0000 (---------------) + I fjell - 0x0025f803, // n0x14a2 c0x0000 (---------------) + I fla - 0x00346e08, // n0x14a3 c0x0000 (---------------) + I flakstad - 0x0033f849, // n0x14a4 c0x0000 (---------------) + I flatanger - 0x0034df8b, // n0x14a5 c0x0000 (---------------) + I flekkefjord - 0x0024c608, // n0x14a6 c0x0000 (---------------) + I flesberg - 0x0024dcc5, // n0x14a7 c0x0000 (---------------) + I flora - 0x0024e845, // n0x14a8 c0x0000 (---------------) + I floro - 0x37257f82, // n0x14a9 c0x00dc (n0x1707-n0x1708) + I fm - 0x0023ab09, // n0x14aa c0x0000 (---------------) + I folkebibl - 0x00250187, // n0x14ab c0x0000 (---------------) + I folldal - 0x00381145, // n0x14ac c0x0000 (---------------) + I forde - 0x00253c07, // n0x14ad c0x0000 (---------------) + I forsand - 0x00255a86, // n0x14ae c0x0000 (---------------) + I fosnes - 0x0033b685, // n0x14af c0x0000 (---------------) + I frana - 0x0025688b, // n0x14b0 c0x0000 (---------------) + I fredrikstad - 0x00256d04, // n0x14b1 c0x0000 (---------------) + I frei - 0x0025bd85, // n0x14b2 c0x0000 (---------------) + I frogn - 0x0025bec7, // n0x14b3 c0x0000 (---------------) + I froland - 0x0026eb46, // n0x14b4 c0x0000 (---------------) + I frosta - 0x0026ef85, // n0x14b5 c0x0000 (---------------) + I froya - 0x0027bfc7, // n0x14b6 c0x0000 (---------------) + I fuoisku - 0x0027c707, // n0x14b7 c0x0000 (---------------) + I fuossko - 0x002ea2c4, // n0x14b8 c0x0000 (---------------) + I fusa - 0x0028218a, // n0x14b9 c0x0000 (---------------) + I fylkesbibl - 0x00282648, // n0x14ba c0x0000 (---------------) + I fyresdal - 0x002fac09, // n0x14bb c0x0000 (---------------) + I gaivuotna - 0x00253a45, // n0x14bc c0x0000 (---------------) + I galsa - 0x00204086, // n0x14bd c0x0000 (---------------) + I gamvik - 0x0020a90a, // n0x14be c0x0000 (---------------) + I gangaviika - 0x00220746, // n0x14bf c0x0000 (---------------) + I gaular - 0x0020ac07, // n0x14c0 c0x0000 (---------------) + I gausdal - 0x002ecc8d, // n0x14c1 c0x0000 (---------------) + I giehtavuoatna - 0x00224549, // n0x14c2 c0x0000 (---------------) + I gildeskal - 0x002e4005, // n0x14c3 c0x0000 (---------------) + I giske - 0x00323147, // n0x14c4 c0x0000 (---------------) + I gjemnes - 0x0035fcc8, // n0x14c5 c0x0000 (---------------) + I gjerdrum - 0x00363a88, // n0x14c6 c0x0000 (---------------) + I gjerstad - 0x00369187, // n0x14c7 c0x0000 (---------------) + I gjesdal - 0x0036b446, // n0x14c8 c0x0000 (---------------) + I gjovik - 0x00212e87, // n0x14c9 c0x0000 (---------------) + I gloppen - 0x00218743, // n0x14ca c0x0000 (---------------) + I gol - 0x00315684, // n0x14cb c0x0000 (---------------) + I gran - 0x0031cac5, // n0x14cc c0x0000 (---------------) + I grane - 0x00338fc7, // n0x14cd c0x0000 (---------------) + I granvin - 0x00356d89, // n0x14ce c0x0000 (---------------) + I gratangen - 0x002f6208, // n0x14cf c0x0000 (---------------) + I grimstad - 0x0037c785, // n0x14d0 c0x0000 (---------------) + I grong - 0x0023c704, // n0x14d1 c0x0000 (---------------) + I grue - 0x002497c5, // n0x14d2 c0x0000 (---------------) + I gulen - 0x0025068d, // n0x14d3 c0x0000 (---------------) + I guovdageaidnu - 0x00203082, // n0x14d4 c0x0000 (---------------) + I ha - 0x003062c6, // n0x14d5 c0x0000 (---------------) + I habmer - 0x00341186, // n0x14d6 c0x0000 (---------------) + I hadsel - 0x002ffdca, // n0x14d7 c0x0000 (---------------) + I hagebostad - 0x003467c6, // n0x14d8 c0x0000 (---------------) + I halden - 0x0034c845, // n0x14d9 c0x0000 (---------------) + I halsa - 0x002b0b45, // n0x14da c0x0000 (---------------) + I hamar - 0x002b0b47, // n0x14db c0x0000 (---------------) + I hamaroy - 0x0034f98c, // n0x14dc c0x0000 (---------------) + I hammarfeasta - 0x0036c68a, // n0x14dd c0x0000 (---------------) + I hammerfest - 0x00283a46, // n0x14de c0x0000 (---------------) + I hapmir - 0x0026e385, // n0x14df c0x0000 (---------------) + I haram - 0x00284346, // n0x14e0 c0x0000 (---------------) + I hareid - 0x00284687, // n0x14e1 c0x0000 (---------------) + I harstad - 0x00286506, // n0x14e2 c0x0000 (---------------) + I hasvik - 0x00287f0c, // n0x14e3 c0x0000 (---------------) + I hattfjelldal - 0x003813c9, // n0x14e4 c0x0000 (---------------) + I haugesund - 0x37651fc7, // n0x14e5 c0x00dd (n0x1708-n0x170b) o I hedmark - 0x00289485, // n0x14e6 c0x0000 (---------------) + I hemne - 0x00289486, // n0x14e7 c0x0000 (---------------) + I hemnes - 0x00289808, // n0x14e8 c0x0000 (---------------) + I hemsedal - 0x0025c905, // n0x14e9 c0x0000 (---------------) + I herad - 0x002987c5, // n0x14ea c0x0000 (---------------) + I hitra - 0x00298a08, // n0x14eb c0x0000 (---------------) + I hjartdal - 0x00298c0a, // n0x14ec c0x0000 (---------------) + I hjelmeland - 0x37a9c882, // n0x14ed c0x00de (n0x170b-n0x170c) + I hl - 0x37e4c142, // n0x14ee c0x00df (n0x170c-n0x170d) + I hm - 0x002df885, // n0x14ef c0x0000 (---------------) + I hobol - 0x002a1d83, // n0x14f0 c0x0000 (---------------) + I hof - 0x0022e1c8, // n0x14f1 c0x0000 (---------------) + I hokksund - 0x00274dc3, // n0x14f2 c0x0000 (---------------) + I hol - 0x00298e84, // n0x14f3 c0x0000 (---------------) + I hole - 0x0036c14b, // n0x14f4 c0x0000 (---------------) + I holmestrand - 0x002abbc8, // n0x14f5 c0x0000 (---------------) + I holtalen - 0x0029a448, // n0x14f6 c0x0000 (---------------) + I honefoss - 0x38322189, // n0x14f7 c0x00e0 (n0x170d-n0x170e) o I hordaland - 0x0029c989, // n0x14f8 c0x0000 (---------------) + I hornindal - 0x0029d306, // n0x14f9 c0x0000 (---------------) + I horten - 0x0029ebc8, // n0x14fa c0x0000 (---------------) + I hoyanger - 0x0029edc9, // n0x14fb c0x0000 (---------------) + I hoylandet - 0x0029f486, // n0x14fc c0x0000 (---------------) + I hurdal - 0x0029f605, // n0x14fd c0x0000 (---------------) + I hurum - 0x00246f46, // n0x14fe c0x0000 (---------------) + I hvaler - 0x00233049, // n0x14ff c0x0000 (---------------) + I hyllestad - 0x0036de47, // n0x1500 c0x0000 (---------------) + I ibestad - 0x00257906, // n0x1501 c0x0000 (---------------) + I idrett - 0x003850c7, // n0x1502 c0x0000 (---------------) + I inderoy - 0x00334ac7, // n0x1503 c0x0000 (---------------) + I iveland - 0x00262884, // n0x1504 c0x0000 (---------------) + I ivgu - 0x3861ed89, // n0x1505 c0x00e1 (n0x170e-n0x170f) + I jan-mayen - 0x002b8908, // n0x1506 c0x0000 (---------------) + I jessheim - 0x00336388, // n0x1507 c0x0000 (---------------) + I jevnaker - 0x00246d47, // n0x1508 c0x0000 (---------------) + I jolster - 0x002b5346, // n0x1509 c0x0000 (---------------) + I jondal - 0x00381f49, // n0x150a c0x0000 (---------------) + I jorpeland - 0x00213787, // n0x150b c0x0000 (---------------) + I kafjord - 0x0023000a, // n0x150c c0x0000 (---------------) + I karasjohka - 0x002e4308, // n0x150d c0x0000 (---------------) + I karasjok - 0x00366687, // n0x150e c0x0000 (---------------) + I karlsoy - 0x0036ce06, // n0x150f c0x0000 (---------------) + I karmoy - 0x00262c0a, // n0x1510 c0x0000 (---------------) + I kautokeino - 0x00335688, // n0x1511 c0x0000 (---------------) + I kirkenes - 0x00274105, // n0x1512 c0x0000 (---------------) + I klabu - 0x0022b205, // n0x1513 c0x0000 (---------------) + I klepp - 0x00331e87, // n0x1514 c0x0000 (---------------) + I kommune - 0x002b2e89, // n0x1515 c0x0000 (---------------) + I kongsberg - 0x002b3fcb, // n0x1516 c0x0000 (---------------) + I kongsvinger - 0x00331cc8, // n0x1517 c0x0000 (---------------) + I kopervik - 0x00360889, // n0x1518 c0x0000 (---------------) + I kraanghke - 0x002a3bc7, // n0x1519 c0x0000 (---------------) + I kragero - 0x002a4d4c, // n0x151a c0x0000 (---------------) + I kristiansand - 0x002a51cc, // n0x151b c0x0000 (---------------) + I kristiansund - 0x002a54ca, // n0x151c c0x0000 (---------------) + I krodsherad - 0x002a574c, // n0x151d c0x0000 (---------------) + I krokstadelva - 0x002b0208, // n0x151e c0x0000 (---------------) + I kvafjord - 0x002b0408, // n0x151f c0x0000 (---------------) + I kvalsund - 0x002b0604, // n0x1520 c0x0000 (---------------) + I kvam - 0x002b0fc9, // n0x1521 c0x0000 (---------------) + I kvanangen - 0x002b1209, // n0x1522 c0x0000 (---------------) + I kvinesdal - 0x002b144a, // n0x1523 c0x0000 (---------------) + I kvinnherad - 0x002b16c9, // n0x1524 c0x0000 (---------------) + I kviteseid - 0x002b1a07, // n0x1525 c0x0000 (---------------) + I kvitsoy - 0x00201d4c, // n0x1526 c0x0000 (---------------) + I laakesvuemie - 0x00243706, // n0x1527 c0x0000 (---------------) + I lahppi - 0x00263a88, // n0x1528 c0x0000 (---------------) + I langevag - 0x00220806, // n0x1529 c0x0000 (---------------) + I lardal - 0x002c1006, // n0x152a c0x0000 (---------------) + I larvik - 0x002e3f07, // n0x152b c0x0000 (---------------) + I lavagis - 0x00337b88, // n0x152c c0x0000 (---------------) + I lavangen - 0x0027f10b, // n0x152d c0x0000 (---------------) + I leangaviika - 0x002be687, // n0x152e c0x0000 (---------------) + I lebesby - 0x0033bb49, // n0x152f c0x0000 (---------------) + I leikanger - 0x002f5309, // n0x1530 c0x0000 (---------------) + I leirfjord - 0x00295187, // n0x1531 c0x0000 (---------------) + I leirvik - 0x0021c504, // n0x1532 c0x0000 (---------------) + I leka - 0x002b2d07, // n0x1533 c0x0000 (---------------) + I leksvik - 0x002b6706, // n0x1534 c0x0000 (---------------) + I lenvik - 0x0024bbc6, // n0x1535 c0x0000 (---------------) + I lerdal - 0x00229d05, // n0x1536 c0x0000 (---------------) + I lesja - 0x00278688, // n0x1537 c0x0000 (---------------) + I levanger - 0x00234484, // n0x1538 c0x0000 (---------------) + I lier - 0x00234486, // n0x1539 c0x0000 (---------------) + I lierne - 0x0036c54b, // n0x153a c0x0000 (---------------) + I lillehammer - 0x00322749, // n0x153b c0x0000 (---------------) + I lillesand - 0x002fbd86, // n0x153c c0x0000 (---------------) + I lindas - 0x002fce09, // n0x153d c0x0000 (---------------) + I lindesnes - 0x00214346, // n0x153e c0x0000 (---------------) + I loabat - 0x002ca108, // n0x153f c0x0000 (---------------) + I lodingen - 0x00210a03, // n0x1540 c0x0000 (---------------) + I lom - 0x00208f45, // n0x1541 c0x0000 (---------------) + I loppa - 0x0020ad89, // n0x1542 c0x0000 (---------------) + I lorenskog - 0x00219d45, // n0x1543 c0x0000 (---------------) + I loten - 0x002d3bc4, // n0x1544 c0x0000 (---------------) + I lund - 0x0026dd86, // n0x1545 c0x0000 (---------------) + I lunner - 0x00236e85, // n0x1546 c0x0000 (---------------) + I luroy - 0x0023ea46, // n0x1547 c0x0000 (---------------) + I luster - 0x002f3747, // n0x1548 c0x0000 (---------------) + I lyngdal - 0x002637c6, // n0x1549 c0x0000 (---------------) + I lyngen - 0x0028db8b, // n0x154a c0x0000 (---------------) + I malatvuopmi - 0x003453c7, // n0x154b c0x0000 (---------------) + I malselv - 0x00375d46, // n0x154c c0x0000 (---------------) + I malvik - 0x002fbc46, // n0x154d c0x0000 (---------------) + I mandal - 0x00265186, // n0x154e c0x0000 (---------------) + I marker - 0x00283c09, // n0x154f c0x0000 (---------------) + I marnardal - 0x0021d68a, // n0x1550 c0x0000 (---------------) + I masfjorden - 0x0035b0c5, // n0x1551 c0x0000 (---------------) + I masoy - 0x0034498d, // n0x1552 c0x0000 (---------------) + I matta-varjjat - 0x00298d06, // n0x1553 c0x0000 (---------------) + I meland - 0x002911c6, // n0x1554 c0x0000 (---------------) + I meldal - 0x0025f106, // n0x1555 c0x0000 (---------------) + I melhus - 0x0036be05, // n0x1556 c0x0000 (---------------) + I meloy - 0x0025d287, // n0x1557 c0x0000 (---------------) + I meraker - 0x0028c107, // n0x1558 c0x0000 (---------------) + I midsund - 0x0026c10e, // n0x1559 c0x0000 (---------------) + I midtre-gauldal - 0x00240443, // n0x155a c0x0000 (---------------) + I mil - 0x002b5309, // n0x155b c0x0000 (---------------) + I mjondalen - 0x00309f49, // n0x155c c0x0000 (---------------) + I mo-i-rana - 0x00349a07, // n0x155d c0x0000 (---------------) + I moareke - 0x00226fc7, // n0x155e c0x0000 (---------------) + I modalen - 0x002fb885, // n0x155f c0x0000 (---------------) + I modum - 0x002da185, // n0x1560 c0x0000 (---------------) + I molde - 0x38ad9d8f, // n0x1561 c0x00e2 (n0x170f-n0x1711) o I more-og-romsdal - 0x002bc007, // n0x1562 c0x0000 (---------------) + I mosjoen - 0x002bc1c8, // n0x1563 c0x0000 (---------------) + I moskenes - 0x002bcb44, // n0x1564 c0x0000 (---------------) + I moss - 0x002bd046, // n0x1565 c0x0000 (---------------) + I mosvik - 0x38e14202, // n0x1566 c0x00e3 (n0x1711-n0x1712) + I mr - 0x002c0906, // n0x1567 c0x0000 (---------------) + I muosat - 0x002c2a06, // n0x1568 c0x0000 (---------------) + I museum - 0x0030a10e, // n0x1569 c0x0000 (---------------) + I naamesjevuemie - 0x002f848a, // n0x156a c0x0000 (---------------) + I namdalseid - 0x00262406, // n0x156b c0x0000 (---------------) + I namsos - 0x002d4dca, // n0x156c c0x0000 (---------------) + I namsskogan - 0x0024f509, // n0x156d c0x0000 (---------------) + I nannestad - 0x00355045, // n0x156e c0x0000 (---------------) + I naroy - 0x0037ec48, // n0x156f c0x0000 (---------------) + I narviika - 0x00366546, // n0x1570 c0x0000 (---------------) + I narvik - 0x003768c8, // n0x1571 c0x0000 (---------------) + I naustdal - 0x00201808, // n0x1572 c0x0000 (---------------) + I navuotna - 0x002860cb, // n0x1573 c0x0000 (---------------) + I nedre-eiker - 0x00228d45, // n0x1574 c0x0000 (---------------) + I nesna - 0x0035c648, // n0x1575 c0x0000 (---------------) + I nesodden - 0x00209e8c, // n0x1576 c0x0000 (---------------) + I nesoddtangen - 0x002742c7, // n0x1577 c0x0000 (---------------) + I nesseby - 0x0024b2c6, // n0x1578 c0x0000 (---------------) + I nesset - 0x002260c8, // n0x1579 c0x0000 (---------------) + I nissedal - 0x002ac048, // n0x157a c0x0000 (---------------) + I nittedal - 0x39248cc2, // n0x157b c0x00e4 (n0x1712-n0x1713) + I nl - 0x003596cb, // n0x157c c0x0000 (---------------) + I nord-aurdal - 0x002c7e89, // n0x157d c0x0000 (---------------) + I nord-fron - 0x002fcc09, // n0x157e c0x0000 (---------------) + I nord-odal - 0x002ad107, // n0x157f c0x0000 (---------------) + I norddal - 0x002a6d88, // n0x1580 c0x0000 (---------------) + I nordkapp - 0x39749c08, // n0x1581 c0x00e5 (n0x1713-n0x1717) o I nordland - 0x00215b8b, // n0x1582 c0x0000 (---------------) + I nordre-land - 0x00213cc9, // n0x1583 c0x0000 (---------------) + I nordreisa - 0x0023b64d, // n0x1584 c0x0000 (---------------) + I nore-og-uvdal - 0x00282d88, // n0x1585 c0x0000 (---------------) + I notodden - 0x00292a08, // n0x1586 c0x0000 (---------------) + I notteroy - 0x39a00902, // n0x1587 c0x00e6 (n0x1717-n0x1718) + I nt - 0x00320204, // n0x1588 c0x0000 (---------------) + I odda - 0x39e15fc2, // n0x1589 c0x00e7 (n0x1718-n0x1719) + I of - 0x002e4486, // n0x158a c0x0000 (---------------) + I oksnes - 0x3a2009c2, // n0x158b c0x00e8 (n0x1719-n0x171a) + I ol - 0x0022b7ca, // n0x158c c0x0000 (---------------) + I omasvuotna - 0x0022c286, // n0x158d c0x0000 (---------------) + I oppdal - 0x00221c08, // n0x158e c0x0000 (---------------) + I oppegard - 0x0030ce08, // n0x158f c0x0000 (---------------) + I orkanger - 0x0030e786, // n0x1590 c0x0000 (---------------) + I orkdal - 0x002ffa46, // n0x1591 c0x0000 (---------------) + I orland - 0x002d3386, // n0x1592 c0x0000 (---------------) + I orskog - 0x002a7c45, // n0x1593 c0x0000 (---------------) + I orsta - 0x0023cfc4, // n0x1594 c0x0000 (---------------) + I osen - 0x3a610984, // n0x1595 c0x00e9 (n0x171a-n0x171b) + I oslo - 0x00210786, // n0x1596 c0x0000 (---------------) + I osoyro - 0x00255487, // n0x1597 c0x0000 (---------------) + I osteroy - 0x3aaf1447, // n0x1598 c0x00ea (n0x171b-n0x171c) o I ostfold - 0x0020838b, // n0x1599 c0x0000 (---------------) + I ostre-toten - 0x00365a89, // n0x159a c0x0000 (---------------) + I overhalla - 0x0024b88a, // n0x159b c0x0000 (---------------) + I ovre-eiker - 0x00309e04, // n0x159c c0x0000 (---------------) + I oyer - 0x002b0c88, // n0x159d c0x0000 (---------------) + I oygarden - 0x002576cd, // n0x159e c0x0000 (---------------) + I oystre-slidre - 0x002ce049, // n0x159f c0x0000 (---------------) + I porsanger - 0x002ce288, // n0x15a0 c0x0000 (---------------) + I porsangu - 0x002ce509, // n0x15a1 c0x0000 (---------------) + I porsgrunn - 0x002cfac4, // n0x15a2 c0x0000 (---------------) + I priv - 0x0021bbc4, // n0x15a3 c0x0000 (---------------) + I rade - 0x0022d985, // n0x15a4 c0x0000 (---------------) + I radoy - 0x0023828b, // n0x15a5 c0x0000 (---------------) + I rahkkeravju - 0x002abb46, // n0x15a6 c0x0000 (---------------) + I raholt - 0x002a8705, // n0x15a7 c0x0000 (---------------) + I raisa - 0x002fb309, // n0x15a8 c0x0000 (---------------) + I rakkestad - 0x00228f48, // n0x15a9 c0x0000 (---------------) + I ralingen - 0x0026d544, // n0x15aa c0x0000 (---------------) + I rana - 0x00349789, // n0x15ab c0x0000 (---------------) + I randaberg - 0x00364845, // n0x15ac c0x0000 (---------------) + I rauma - 0x002b65c8, // n0x15ad c0x0000 (---------------) + I rendalen - 0x002ee247, // n0x15ae c0x0000 (---------------) + I rennebu - 0x002dae48, // n0x15af c0x0000 (---------------) + I rennesoy - 0x002b4f06, // n0x15b0 c0x0000 (---------------) + I rindal - 0x0024a707, // n0x15b1 c0x0000 (---------------) + I ringebu - 0x0030de09, // n0x15b2 c0x0000 (---------------) + I ringerike - 0x0022e889, // n0x15b3 c0x0000 (---------------) + I ringsaker - 0x00256605, // n0x15b4 c0x0000 (---------------) + I risor - 0x0035a145, // n0x15b5 c0x0000 (---------------) + I rissa - 0x3ae0a7c2, // n0x15b6 c0x00eb (n0x171c-n0x171d) + I rl - 0x002f0cc4, // n0x15b7 c0x0000 (---------------) + I roan - 0x00353185, // n0x15b8 c0x0000 (---------------) + I rodoy - 0x00307806, // n0x15b9 c0x0000 (---------------) + I rollag - 0x0030c4c5, // n0x15ba c0x0000 (---------------) + I romsa - 0x0024e907, // n0x15bb c0x0000 (---------------) + I romskog - 0x002f5145, // n0x15bc c0x0000 (---------------) + I roros - 0x0026eb84, // n0x15bd c0x0000 (---------------) + I rost - 0x002964c6, // n0x15be c0x0000 (---------------) + I royken - 0x00233e47, // n0x15bf c0x0000 (---------------) + I royrvik - 0x002360c6, // n0x15c0 c0x0000 (---------------) + I ruovat - 0x0032e605, // n0x15c1 c0x0000 (---------------) + I rygge - 0x00215248, // n0x15c2 c0x0000 (---------------) + I salangen - 0x00215885, // n0x15c3 c0x0000 (---------------) + I salat - 0x00220dc7, // n0x15c4 c0x0000 (---------------) + I saltdal - 0x0022c749, // n0x15c5 c0x0000 (---------------) + I samnanger - 0x002a4f4a, // n0x15c6 c0x0000 (---------------) + I sandefjord - 0x002ccc07, // n0x15c7 c0x0000 (---------------) + I sandnes - 0x002ccc0c, // n0x15c8 c0x0000 (---------------) + I sandnessjoen - 0x0033c1c6, // n0x15c9 c0x0000 (---------------) + I sandoy - 0x0024cec9, // n0x15ca c0x0000 (---------------) + I sarpsborg - 0x0025bc45, // n0x15cb c0x0000 (---------------) + I sauda - 0x0025c848, // n0x15cc c0x0000 (---------------) + I sauherad - 0x00212d03, // n0x15cd c0x0000 (---------------) + I sel - 0x00212d05, // n0x15ce c0x0000 (---------------) + I selbu - 0x002d1d45, // n0x15cf c0x0000 (---------------) + I selje - 0x002892c7, // n0x15d0 c0x0000 (---------------) + I seljord - 0x3b21a182, // n0x15d1 c0x00ec (n0x171d-n0x171e) + I sf - 0x002cc787, // n0x15d2 c0x0000 (---------------) + I siellak - 0x00315906, // n0x15d3 c0x0000 (---------------) + I sigdal - 0x0021ecc6, // n0x15d4 c0x0000 (---------------) + I siljan - 0x0031ae06, // n0x15d5 c0x0000 (---------------) + I sirdal - 0x002abf86, // n0x15d6 c0x0000 (---------------) + I skanit - 0x002f9b88, // n0x15d7 c0x0000 (---------------) + I skanland - 0x002728c5, // n0x15d8 c0x0000 (---------------) + I skaun - 0x002d18c7, // n0x15d9 c0x0000 (---------------) + I skedsmo - 0x002d18cd, // n0x15da c0x0000 (---------------) + I skedsmokorset - 0x00221943, // n0x15db c0x0000 (---------------) + I ski - 0x00221945, // n0x15dc c0x0000 (---------------) + I skien - 0x002fef47, // n0x15dd c0x0000 (---------------) + I skierva - 0x002ff588, // n0x15de c0x0000 (---------------) + I skiptvet - 0x00332585, // n0x15df c0x0000 (---------------) + I skjak - 0x00361148, // n0x15e0 c0x0000 (---------------) + I skjervoy - 0x0022edc6, // n0x15e1 c0x0000 (---------------) + I skodje - 0x00289bc7, // n0x15e2 c0x0000 (---------------) + I slattum - 0x002b9745, // n0x15e3 c0x0000 (---------------) + I smola - 0x00228dc6, // n0x15e4 c0x0000 (---------------) + I snaase - 0x0036ef85, // n0x15e5 c0x0000 (---------------) + I snasa - 0x00223fca, // n0x15e6 c0x0000 (---------------) + I snillfjord - 0x002af646, // n0x15e7 c0x0000 (---------------) + I snoasa - 0x0026d1c7, // n0x15e8 c0x0000 (---------------) + I sogndal - 0x00280345, // n0x15e9 c0x0000 (---------------) + I sogne - 0x0037a8c7, // n0x15ea c0x0000 (---------------) + I sokndal - 0x00310944, // n0x15eb c0x0000 (---------------) + I sola - 0x002d3b46, // n0x15ec c0x0000 (---------------) + I solund - 0x002d4d05, // n0x15ed c0x0000 (---------------) + I somna - 0x0032da8b, // n0x15ee c0x0000 (---------------) + I sondre-land - 0x00218c09, // n0x15ef c0x0000 (---------------) + I songdalen - 0x00244c4a, // n0x15f0 c0x0000 (---------------) + I sor-aurdal - 0x00256688, // n0x15f1 c0x0000 (---------------) + I sor-fron - 0x002d5b48, // n0x15f2 c0x0000 (---------------) + I sor-odal - 0x002d5d4c, // n0x15f3 c0x0000 (---------------) + I sor-varanger - 0x002d6047, // n0x15f4 c0x0000 (---------------) + I sorfold - 0x002d6208, // n0x15f5 c0x0000 (---------------) + I sorreisa - 0x002d70c8, // n0x15f6 c0x0000 (---------------) + I sortland - 0x002d72c5, // n0x15f7 c0x0000 (---------------) + I sorum - 0x002db80a, // n0x15f8 c0x0000 (---------------) + I spjelkavik - 0x002dc049, // n0x15f9 c0x0000 (---------------) + I spydeberg - 0x3b604682, // n0x15fa c0x00ed (n0x171e-n0x171f) + I st - 0x00344106, // n0x15fb c0x0000 (---------------) + I stange - 0x002092c4, // n0x15fc c0x0000 (---------------) + I stat - 0x002092c9, // n0x15fd c0x0000 (---------------) + I stathelle - 0x002e5a09, // n0x15fe c0x0000 (---------------) + I stavanger - 0x003663c7, // n0x15ff c0x0000 (---------------) + I stavern - 0x00267507, // n0x1600 c0x0000 (---------------) + I steigen - 0x002d7789, // n0x1601 c0x0000 (---------------) + I steinkjer - 0x00204688, // n0x1602 c0x0000 (---------------) + I stjordal - 0x0020468f, // n0x1603 c0x0000 (---------------) + I stjordalshalsen - 0x0023a706, // n0x1604 c0x0000 (---------------) + I stokke - 0x0028358b, // n0x1605 c0x0000 (---------------) + I stor-elvdal - 0x002dc585, // n0x1606 c0x0000 (---------------) + I stord - 0x002dc587, // n0x1607 c0x0000 (---------------) + I stordal - 0x002dc9c9, // n0x1608 c0x0000 (---------------) + I storfjord - 0x00349706, // n0x1609 c0x0000 (---------------) + I strand - 0x00349707, // n0x160a c0x0000 (---------------) + I stranda - 0x0026ff85, // n0x160b c0x0000 (---------------) + I stryn - 0x00235ec4, // n0x160c c0x0000 (---------------) + I sula - 0x00380706, // n0x160d c0x0000 (---------------) + I suldal - 0x00227b44, // n0x160e c0x0000 (---------------) + I sund - 0x00337787, // n0x160f c0x0000 (---------------) + I sunndal - 0x0031f3c8, // n0x1610 c0x0000 (---------------) + I surnadal - 0x3bae0b88, // n0x1611 c0x00ee (n0x171f-n0x1720) + I svalbard - 0x002e1585, // n0x1612 c0x0000 (---------------) + I sveio - 0x002e16c7, // n0x1613 c0x0000 (---------------) + I svelvik - 0x00218449, // n0x1614 c0x0000 (---------------) + I sykkylven - 0x00202544, // n0x1615 c0x0000 (---------------) + I tana - 0x002ca948, // n0x1616 c0x0000 (---------------) + I tananger - 0x3bf13f88, // n0x1617 c0x00ef (n0x1720-n0x1722) o I telemark - 0x00246a44, // n0x1618 c0x0000 (---------------) + I time - 0x00236cc8, // n0x1619 c0x0000 (---------------) + I tingvoll - 0x0036b8c4, // n0x161a c0x0000 (---------------) + I tinn - 0x0022bdc9, // n0x161b c0x0000 (---------------) + I tjeldsund - 0x0025f045, // n0x161c c0x0000 (---------------) + I tjome - 0x3c2032c2, // n0x161d c0x00f0 (n0x1722-n0x1723) + I tm - 0x0023a745, // n0x161e c0x0000 (---------------) + I tokke - 0x00220685, // n0x161f c0x0000 (---------------) + I tolga - 0x00305ac8, // n0x1620 c0x0000 (---------------) + I tonsberg - 0x00238947, // n0x1621 c0x0000 (---------------) + I torsken - 0x3c600942, // n0x1622 c0x00f1 (n0x1723-n0x1724) + I tr - 0x0026d505, // n0x1623 c0x0000 (---------------) + I trana - 0x00270506, // n0x1624 c0x0000 (---------------) + I tranby - 0x002885c6, // n0x1625 c0x0000 (---------------) + I tranoy - 0x002f0c88, // n0x1626 c0x0000 (---------------) + I troandin - 0x002f2988, // n0x1627 c0x0000 (---------------) + I trogstad - 0x0030c486, // n0x1628 c0x0000 (---------------) + I tromsa - 0x00310846, // n0x1629 c0x0000 (---------------) + I tromso - 0x0036ca89, // n0x162a c0x0000 (---------------) + I trondheim - 0x0033aa06, // n0x162b c0x0000 (---------------) + I trysil - 0x00372d4b, // n0x162c c0x0000 (---------------) + I tvedestrand - 0x00322645, // n0x162d c0x0000 (---------------) + I tydal - 0x0021cf86, // n0x162e c0x0000 (---------------) + I tynset - 0x00280648, // n0x162f c0x0000 (---------------) + I tysfjord - 0x002e8c06, // n0x1630 c0x0000 (---------------) + I tysnes - 0x002c7806, // n0x1631 c0x0000 (---------------) + I tysvar - 0x00214a4a, // n0x1632 c0x0000 (---------------) + I ullensaker - 0x0034b6ca, // n0x1633 c0x0000 (---------------) + I ullensvang - 0x00254e85, // n0x1634 c0x0000 (---------------) + I ulvik - 0x0021a747, // n0x1635 c0x0000 (---------------) + I unjarga - 0x002d0606, // n0x1636 c0x0000 (---------------) + I utsira - 0x3ca03242, // n0x1637 c0x00f2 (n0x1724-n0x1725) + I va - 0x002ff087, // n0x1638 c0x0000 (---------------) + I vaapste - 0x0026d105, // n0x1639 c0x0000 (---------------) + I vadso - 0x0020a884, // n0x163a c0x0000 (---------------) + I vaga - 0x0020a885, // n0x163b c0x0000 (---------------) + I vagan - 0x00309d06, // n0x163c c0x0000 (---------------) + I vagsoy - 0x003336c7, // n0x163d c0x0000 (---------------) + I vaksdal - 0x0021e945, // n0x163e c0x0000 (---------------) + I valle - 0x00278704, // n0x163f c0x0000 (---------------) + I vang - 0x00268448, // n0x1640 c0x0000 (---------------) + I vanylven - 0x002c78c5, // n0x1641 c0x0000 (---------------) + I vardo - 0x0037a607, // n0x1642 c0x0000 (---------------) + I varggat - 0x002e5e05, // n0x1643 c0x0000 (---------------) + I varoy - 0x00223f05, // n0x1644 c0x0000 (---------------) + I vefsn - 0x0028e884, // n0x1645 c0x0000 (---------------) + I vega - 0x002a05c9, // n0x1646 c0x0000 (---------------) + I vegarshei - 0x002e7388, // n0x1647 c0x0000 (---------------) + I vennesla - 0x002e5c86, // n0x1648 c0x0000 (---------------) + I verdal - 0x002e79c6, // n0x1649 c0x0000 (---------------) + I verran - 0x002be4c6, // n0x164a c0x0000 (---------------) + I vestby - 0x3cee9488, // n0x164b c0x00f3 (n0x1725-n0x1726) o I vestfold - 0x002e9687, // n0x164c c0x0000 (---------------) + I vestnes - 0x002e9a4d, // n0x164d c0x0000 (---------------) + I vestre-slidre - 0x002ead4c, // n0x164e c0x0000 (---------------) + I vestre-toten - 0x002eb349, // n0x164f c0x0000 (---------------) + I vestvagoy - 0x002eb589, // n0x1650 c0x0000 (---------------) + I vevelstad - 0x3d3306c2, // n0x1651 c0x00f4 (n0x1726-n0x1727) + I vf - 0x00378c43, // n0x1652 c0x0000 (---------------) + I vgs - 0x00204143, // n0x1653 c0x0000 (---------------) + I vik - 0x00233f45, // n0x1654 c0x0000 (---------------) + I vikna - 0x003390ca, // n0x1655 c0x0000 (---------------) + I vindafjord - 0x0030c346, // n0x1656 c0x0000 (---------------) + I voagat - 0x002f2185, // n0x1657 c0x0000 (---------------) + I volda - 0x002f3c84, // n0x1658 c0x0000 (---------------) + I voss - 0x002f3c8b, // n0x1659 c0x0000 (---------------) + I vossevangen - 0x0030088c, // n0x165a c0x0000 (---------------) + I xn--andy-ira - 0x0030108c, // n0x165b c0x0000 (---------------) + I xn--asky-ira - 0x00301395, // n0x165c c0x0000 (---------------) + I xn--aurskog-hland-jnb - 0x0030208d, // n0x165d c0x0000 (---------------) + I xn--avery-yua - 0x0030310f, // n0x165e c0x0000 (---------------) + I xn--bdddj-mrabd - 0x003034d2, // n0x165f c0x0000 (---------------) + I xn--bearalvhki-y4a - 0x0030394f, // n0x1660 c0x0000 (---------------) + I xn--berlevg-jxa - 0x00303d12, // n0x1661 c0x0000 (---------------) + I xn--bhcavuotna-s4a - 0x00304193, // n0x1662 c0x0000 (---------------) + I xn--bhccavuotna-k7a - 0x0030464d, // n0x1663 c0x0000 (---------------) + I xn--bidr-5nac - 0x00304c0d, // n0x1664 c0x0000 (---------------) + I xn--bievt-0qa - 0x00304f4e, // n0x1665 c0x0000 (---------------) + I xn--bjarky-fya - 0x00305cce, // n0x1666 c0x0000 (---------------) + I xn--bjddar-pta - 0x0030644c, // n0x1667 c0x0000 (---------------) + I xn--blt-elab - 0x003067cc, // n0x1668 c0x0000 (---------------) + I xn--bmlo-gra - 0x00306c8b, // n0x1669 c0x0000 (---------------) + I xn--bod-2na - 0x003088ce, // n0x166a c0x0000 (---------------) + I xn--brnny-wuac - 0x0030b5d2, // n0x166b c0x0000 (---------------) + I xn--brnnysund-m8ac - 0x0030c10c, // n0x166c c0x0000 (---------------) + I xn--brum-voa - 0x0030d010, // n0x166d c0x0000 (---------------) + I xn--btsfjord-9za - 0x003142d2, // n0x166e c0x0000 (---------------) + I xn--davvenjrga-y4a - 0x0031544c, // n0x166f c0x0000 (---------------) + I xn--dnna-gra - 0x00315a8d, // n0x1670 c0x0000 (---------------) + I xn--drbak-wua - 0x00315dcc, // n0x1671 c0x0000 (---------------) + I xn--dyry-ira - 0x00317f51, // n0x1672 c0x0000 (---------------) + I xn--eveni-0qa01ga - 0x0031864d, // n0x1673 c0x0000 (---------------) + I xn--finny-yua - 0x0031b70d, // n0x1674 c0x0000 (---------------) + I xn--fjord-lra - 0x0031bd0a, // n0x1675 c0x0000 (---------------) + I xn--fl-zia - 0x0031bf8c, // n0x1676 c0x0000 (---------------) + I xn--flor-jra - 0x0031c88c, // n0x1677 c0x0000 (---------------) + I xn--frde-gra - 0x0031cc0c, // n0x1678 c0x0000 (---------------) + I xn--frna-woa - 0x0031d38c, // n0x1679 c0x0000 (---------------) + I xn--frya-hra - 0x0031e393, // n0x167a c0x0000 (---------------) + I xn--ggaviika-8ya47h - 0x0031eb50, // n0x167b c0x0000 (---------------) + I xn--gildeskl-g0a - 0x0031ef50, // n0x167c c0x0000 (---------------) + I xn--givuotna-8ya - 0x0031f5cd, // n0x167d c0x0000 (---------------) + I xn--gjvik-wua - 0x0031f90c, // n0x167e c0x0000 (---------------) + I xn--gls-elac - 0x00320589, // n0x167f c0x0000 (---------------) + I xn--h-2fa - 0x0032130d, // n0x1680 c0x0000 (---------------) + I xn--hbmer-xqa - 0x00321653, // n0x1681 c0x0000 (---------------) + I xn--hcesuolo-7ya35b - 0x00323a11, // n0x1682 c0x0000 (---------------) + I xn--hgebostad-g3a - 0x00323e53, // n0x1683 c0x0000 (---------------) + I xn--hmmrfeasta-s4ac - 0x0032484f, // n0x1684 c0x0000 (---------------) + I xn--hnefoss-q1a - 0x00324c0c, // n0x1685 c0x0000 (---------------) + I xn--hobl-ira - 0x00324f0f, // n0x1686 c0x0000 (---------------) + I xn--holtlen-hxa - 0x003252cd, // n0x1687 c0x0000 (---------------) + I xn--hpmir-xqa - 0x003258cf, // n0x1688 c0x0000 (---------------) + I xn--hyanger-q1a - 0x00325c90, // n0x1689 c0x0000 (---------------) + I xn--hylandet-54a - 0x0032670e, // n0x168a c0x0000 (---------------) + I xn--indery-fya - 0x00327a0e, // n0x168b c0x0000 (---------------) + I xn--jlster-bya - 0x003281d0, // n0x168c c0x0000 (---------------) + I xn--jrpeland-54a - 0x0032888d, // n0x168d c0x0000 (---------------) + I xn--karmy-yua - 0x0032920e, // n0x168e c0x0000 (---------------) + I xn--kfjord-iua - 0x0032958c, // n0x168f c0x0000 (---------------) + I xn--klbu-woa - 0x0032a593, // n0x1690 c0x0000 (---------------) + I xn--koluokta-7ya57h - 0x0032c34e, // n0x1691 c0x0000 (---------------) + I xn--krager-gya - 0x0032ec50, // n0x1692 c0x0000 (---------------) + I xn--kranghke-b0a - 0x0032f051, // n0x1693 c0x0000 (---------------) + I xn--krdsherad-m8a - 0x0032f48f, // n0x1694 c0x0000 (---------------) + I xn--krehamn-dxa - 0x0032f853, // n0x1695 c0x0000 (---------------) + I xn--krjohka-hwab49j - 0x0033024d, // n0x1696 c0x0000 (---------------) + I xn--ksnes-uua - 0x0033058f, // n0x1697 c0x0000 (---------------) + I xn--kvfjord-nxa - 0x0033094e, // n0x1698 c0x0000 (---------------) + I xn--kvitsy-fya - 0x003313d0, // n0x1699 c0x0000 (---------------) + I xn--kvnangen-k0a - 0x003317c9, // n0x169a c0x0000 (---------------) + I xn--l-1fa - 0x00332f50, // n0x169b c0x0000 (---------------) + I xn--laheadju-7ya - 0x0033388f, // n0x169c c0x0000 (---------------) + I xn--langevg-jxa - 0x00333f0f, // n0x169d c0x0000 (---------------) + I xn--ldingen-q1a - 0x003342d2, // n0x169e c0x0000 (---------------) + I xn--leagaviika-52b - 0x00335c8e, // n0x169f c0x0000 (---------------) + I xn--lesund-hua - 0x0033658d, // n0x16a0 c0x0000 (---------------) + I xn--lgrd-poac - 0x00336b8d, // n0x16a1 c0x0000 (---------------) + I xn--lhppi-xqa - 0x00336ecd, // n0x16a2 c0x0000 (---------------) + I xn--linds-pra - 0x00337d8d, // n0x16a3 c0x0000 (---------------) + I xn--loabt-0qa - 0x003380cd, // n0x16a4 c0x0000 (---------------) + I xn--lrdal-sra - 0x00338410, // n0x16a5 c0x0000 (---------------) + I xn--lrenskog-54a - 0x0033880b, // n0x16a6 c0x0000 (---------------) + I xn--lt-liac - 0x00338d8c, // n0x16a7 c0x0000 (---------------) + I xn--lten-gra - 0x0033934c, // n0x16a8 c0x0000 (---------------) + I xn--lury-ira - 0x0033964c, // n0x16a9 c0x0000 (---------------) + I xn--mely-ira - 0x0033994e, // n0x16aa c0x0000 (---------------) + I xn--merker-kua - 0x00340090, // n0x16ab c0x0000 (---------------) + I xn--mjndalen-64a - 0x00341552, // n0x16ac c0x0000 (---------------) + I xn--mlatvuopmi-s4a - 0x003419cb, // n0x16ad c0x0000 (---------------) + I xn--mli-tla - 0x00341d4e, // n0x16ae c0x0000 (---------------) + I xn--mlselv-iua - 0x003420ce, // n0x16af c0x0000 (---------------) + I xn--moreke-jua - 0x00342cce, // n0x16b0 c0x0000 (---------------) + I xn--mosjen-eya - 0x003432cb, // n0x16b1 c0x0000 (---------------) + I xn--mot-tla - 0x3d743596, // n0x16b2 c0x00f5 (n0x1727-n0x1729) o I xn--mre-og-romsdal-qqb - 0x003464cd, // n0x16b3 c0x0000 (---------------) + I xn--msy-ula0h - 0x00346954, // n0x16b4 c0x0000 (---------------) + I xn--mtta-vrjjat-k7af - 0x00347d4d, // n0x16b5 c0x0000 (---------------) + I xn--muost-0qa - 0x00349155, // n0x16b6 c0x0000 (---------------) + I xn--nmesjevuemie-tcba - 0x0034a8cd, // n0x16b7 c0x0000 (---------------) + I xn--nry-yla5g - 0x0034b24f, // n0x16b8 c0x0000 (---------------) + I xn--nttery-byae - 0x0034b94f, // n0x16b9 c0x0000 (---------------) + I xn--nvuotna-hwa - 0x0034e24f, // n0x16ba c0x0000 (---------------) + I xn--oppegrd-ixa - 0x0034e60e, // n0x16bb c0x0000 (---------------) + I xn--ostery-fya - 0x0034eb8d, // n0x16bc c0x0000 (---------------) + I xn--osyro-wua - 0x00350311, // n0x16bd c0x0000 (---------------) + I xn--porsgu-sta26f - 0x0035208c, // n0x16be c0x0000 (---------------) + I xn--rady-ira - 0x0035238c, // n0x16bf c0x0000 (---------------) + I xn--rdal-poa - 0x0035268b, // n0x16c0 c0x0000 (---------------) + I xn--rde-ula - 0x0035294c, // n0x16c1 c0x0000 (---------------) + I xn--rdy-0nab - 0x003532cf, // n0x16c2 c0x0000 (---------------) + I xn--rennesy-v1a - 0x00353692, // n0x16c3 c0x0000 (---------------) + I xn--rhkkervju-01af - 0x00353d0d, // n0x16c4 c0x0000 (---------------) + I xn--rholt-mra - 0x00354dcc, // n0x16c5 c0x0000 (---------------) + I xn--risa-5na - 0x0035518c, // n0x16c6 c0x0000 (---------------) + I xn--risr-ira - 0x0035548d, // n0x16c7 c0x0000 (---------------) + I xn--rland-uua - 0x003557cf, // n0x16c8 c0x0000 (---------------) + I xn--rlingen-mxa - 0x00355b8e, // n0x16c9 c0x0000 (---------------) + I xn--rmskog-bya - 0x00356b4c, // n0x16ca c0x0000 (---------------) + I xn--rros-gra - 0x00356fcd, // n0x16cb c0x0000 (---------------) + I xn--rskog-uua - 0x0035730b, // n0x16cc c0x0000 (---------------) + I xn--rst-0na - 0x0035774c, // n0x16cd c0x0000 (---------------) + I xn--rsta-fra - 0x00357ccd, // n0x16ce c0x0000 (---------------) + I xn--ryken-vua - 0x0035800e, // n0x16cf c0x0000 (---------------) + I xn--ryrvik-bya - 0x00358589, // n0x16d0 c0x0000 (---------------) + I xn--s-1fa - 0x0035a893, // n0x16d1 c0x0000 (---------------) + I xn--sandnessjen-ogb - 0x0035b20d, // n0x16d2 c0x0000 (---------------) + I xn--sandy-yua - 0x0035b54d, // n0x16d3 c0x0000 (---------------) + I xn--seral-lra - 0x0035bb4c, // n0x16d4 c0x0000 (---------------) + I xn--sgne-gra - 0x0035c20e, // n0x16d5 c0x0000 (---------------) + I xn--skierv-uta - 0x0035cb0f, // n0x16d6 c0x0000 (---------------) + I xn--skjervy-v1a - 0x0035cecc, // n0x16d7 c0x0000 (---------------) + I xn--skjk-soa - 0x0035d1cd, // n0x16d8 c0x0000 (---------------) + I xn--sknit-yqa - 0x0035d50f, // n0x16d9 c0x0000 (---------------) + I xn--sknland-fxa - 0x0035d8cc, // n0x16da c0x0000 (---------------) + I xn--slat-5na - 0x0035decc, // n0x16db c0x0000 (---------------) + I xn--slt-elab - 0x0035e28c, // n0x16dc c0x0000 (---------------) + I xn--smla-hra - 0x0035e58c, // n0x16dd c0x0000 (---------------) + I xn--smna-gra - 0x0035e8cd, // n0x16de c0x0000 (---------------) + I xn--snase-nra - 0x0035ec12, // n0x16df c0x0000 (---------------) + I xn--sndre-land-0cb - 0x0036188c, // n0x16e0 c0x0000 (---------------) + I xn--snes-poa - 0x00361b8c, // n0x16e1 c0x0000 (---------------) + I xn--snsa-roa - 0x00361e91, // n0x16e2 c0x0000 (---------------) + I xn--sr-aurdal-l8a - 0x003622cf, // n0x16e3 c0x0000 (---------------) + I xn--sr-fron-q1a - 0x0036268f, // n0x16e4 c0x0000 (---------------) + I xn--sr-odal-q1a - 0x00362a53, // n0x16e5 c0x0000 (---------------) + I xn--sr-varanger-ggb - 0x003679ce, // n0x16e6 c0x0000 (---------------) + I xn--srfold-bya - 0x00367d4f, // n0x16e7 c0x0000 (---------------) + I xn--srreisa-q1a - 0x0036810c, // n0x16e8 c0x0000 (---------------) + I xn--srum-gra - 0x3db6840e, // n0x16e9 c0x00f6 (n0x1729-n0x172a) o I xn--stfold-9xa - 0x0036878f, // n0x16ea c0x0000 (---------------) + I xn--stjrdal-s1a - 0x00368b56, // n0x16eb c0x0000 (---------------) + I xn--stjrdalshalsen-sqb - 0x0036e8d2, // n0x16ec c0x0000 (---------------) + I xn--stre-toten-zcb - 0x00370c8c, // n0x16ed c0x0000 (---------------) + I xn--tjme-hra - 0x0037144f, // n0x16ee c0x0000 (---------------) + I xn--tnsberg-q1a - 0x00371acd, // n0x16ef c0x0000 (---------------) + I xn--trany-yua - 0x00371e0f, // n0x16f0 c0x0000 (---------------) + I xn--trgstad-r1a - 0x003721cc, // n0x16f1 c0x0000 (---------------) + I xn--trna-woa - 0x003724cd, // n0x16f2 c0x0000 (---------------) + I xn--troms-zua - 0x0037280d, // n0x16f3 c0x0000 (---------------) + I xn--tysvr-vra - 0x00373a8e, // n0x16f4 c0x0000 (---------------) + I xn--unjrga-rta - 0x0037480c, // n0x16f5 c0x0000 (---------------) + I xn--vads-jra - 0x00374b0c, // n0x16f6 c0x0000 (---------------) + I xn--vard-jra - 0x00374e10, // n0x16f7 c0x0000 (---------------) + I xn--vegrshei-c0a - 0x00377591, // n0x16f8 c0x0000 (---------------) + I xn--vestvgy-ixa6o - 0x003779cb, // n0x16f9 c0x0000 (---------------) + I xn--vg-yiab - 0x0037884c, // n0x16fa c0x0000 (---------------) + I xn--vgan-qoa - 0x00378b4e, // n0x16fb c0x0000 (---------------) + I xn--vgsy-qoa0j - 0x0037ad91, // n0x16fc c0x0000 (---------------) + I xn--vre-eiker-k8a - 0x0037b1ce, // n0x16fd c0x0000 (---------------) + I xn--vrggt-xqad - 0x0037b54d, // n0x16fe c0x0000 (---------------) + I xn--vry-yla5g - 0x0037ea0b, // n0x16ff c0x0000 (---------------) + I xn--yer-zna - 0x0037f34f, // n0x1700 c0x0000 (---------------) + I xn--ygarden-p1a - 0x00380a14, // n0x1701 c0x0000 (---------------) + I xn--ystre-slidre-ujb - 0x00209602, // n0x1702 c0x0000 (---------------) + I gs - 0x00209602, // n0x1703 c0x0000 (---------------) + I gs - 0x00209e83, // n0x1704 c0x0000 (---------------) + I nes - 0x00209602, // n0x1705 c0x0000 (---------------) + I gs - 0x00209e83, // n0x1706 c0x0000 (---------------) + I nes - 0x00209602, // n0x1707 c0x0000 (---------------) + I gs - 0x002053c2, // n0x1708 c0x0000 (---------------) + I os - 0x00246f85, // n0x1709 c0x0000 (---------------) + I valer - 0x0037aa8c, // n0x170a c0x0000 (---------------) + I xn--vler-qoa - 0x00209602, // n0x170b c0x0000 (---------------) + I gs - 0x00209602, // n0x170c c0x0000 (---------------) + I gs - 0x002053c2, // n0x170d c0x0000 (---------------) + I os - 0x00209602, // n0x170e c0x0000 (---------------) + I gs - 0x0028a2c5, // n0x170f c0x0000 (---------------) + I heroy - 0x002a4f45, // n0x1710 c0x0000 (---------------) + I sande - 0x00209602, // n0x1711 c0x0000 (---------------) + I gs - 0x00209602, // n0x1712 c0x0000 (---------------) + I gs - 0x00210042, // n0x1713 c0x0000 (---------------) + I bo - 0x0028a2c5, // n0x1714 c0x0000 (---------------) + I heroy - 0x00302b49, // n0x1715 c0x0000 (---------------) + I xn--b-5ga - 0x0032370c, // n0x1716 c0x0000 (---------------) + I xn--hery-ira - 0x00209602, // n0x1717 c0x0000 (---------------) + I gs - 0x00209602, // n0x1718 c0x0000 (---------------) + I gs - 0x00209602, // n0x1719 c0x0000 (---------------) + I gs - 0x00209602, // n0x171a c0x0000 (---------------) + I gs - 0x00246f85, // n0x171b c0x0000 (---------------) + I valer - 0x00209602, // n0x171c c0x0000 (---------------) + I gs - 0x00209602, // n0x171d c0x0000 (---------------) + I gs - 0x00209602, // n0x171e c0x0000 (---------------) + I gs - 0x00209602, // n0x171f c0x0000 (---------------) + I gs - 0x00210042, // n0x1720 c0x0000 (---------------) + I bo - 0x00302b49, // n0x1721 c0x0000 (---------------) + I xn--b-5ga - 0x00209602, // n0x1722 c0x0000 (---------------) + I gs - 0x00209602, // n0x1723 c0x0000 (---------------) + I gs - 0x00209602, // n0x1724 c0x0000 (---------------) + I gs - 0x002a4f45, // n0x1725 c0x0000 (---------------) + I sande - 0x00209602, // n0x1726 c0x0000 (---------------) + I gs - 0x002a4f45, // n0x1727 c0x0000 (---------------) + I sande - 0x0032370c, // n0x1728 c0x0000 (---------------) + I xn--hery-ira - 0x0037aa8c, // n0x1729 c0x0000 (---------------) + I xn--vler-qoa - 0x00202183, // n0x172a c0x0000 (---------------) + I biz - 0x00232dc3, // n0x172b c0x0000 (---------------) + I com - 0x0021e083, // n0x172c c0x0000 (---------------) + I edu - 0x00209ac3, // n0x172d c0x0000 (---------------) + I gov - 0x00208a44, // n0x172e c0x0000 (---------------) + I info - 0x00218643, // n0x172f c0x0000 (---------------) + I net - 0x0024d043, // n0x1730 c0x0000 (---------------) + I org - 0x0001f6c8, // n0x1731 c0x0000 (---------------) + merseine - 0x000adb04, // n0x1732 c0x0000 (---------------) + mine - 0x000b2588, // n0x1733 c0x0000 (---------------) + shacknet - 0x00200b82, // n0x1734 c0x0000 (---------------) + I ac - 0x3ea00882, // n0x1735 c0x00fa (n0x1744-n0x1745) + I co - 0x00240f03, // n0x1736 c0x0000 (---------------) + I cri - 0x00213f44, // n0x1737 c0x0000 (---------------) + I geek - 0x0020a0c3, // n0x1738 c0x0000 (---------------) + I gen - 0x00217044, // n0x1739 c0x0000 (---------------) + I govt - 0x00241cc6, // n0x173a c0x0000 (---------------) + I health - 0x00207a43, // n0x173b c0x0000 (---------------) + I iwi - 0x00382c84, // n0x173c c0x0000 (---------------) + I kiwi - 0x00281bc5, // n0x173d c0x0000 (---------------) + I maori - 0x00240443, // n0x173e c0x0000 (---------------) + I mil - 0x00218643, // n0x173f c0x0000 (---------------) + I net - 0x0024d043, // n0x1740 c0x0000 (---------------) + I org - 0x002702ca, // n0x1741 c0x0000 (---------------) + I parliament - 0x00275006, // n0x1742 c0x0000 (---------------) + I school - 0x0034244c, // n0x1743 c0x0000 (---------------) + I xn--mori-qsa - 0x0009e448, // n0x1744 c0x0000 (---------------) + blogspot - 0x00200882, // n0x1745 c0x0000 (---------------) + I co - 0x00232dc3, // n0x1746 c0x0000 (---------------) + I com - 0x0021e083, // n0x1747 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1748 c0x0000 (---------------) + I gov - 0x00210e83, // n0x1749 c0x0000 (---------------) + I med - 0x002c2a06, // n0x174a c0x0000 (---------------) + I museum - 0x00218643, // n0x174b c0x0000 (---------------) + I net - 0x0024d043, // n0x174c c0x0000 (---------------) + I org - 0x002cfc43, // n0x174d c0x0000 (---------------) + I pro - 0x0000d2c2, // n0x174e c0x0000 (---------------) + ae - 0x000dd187, // n0x174f c0x0000 (---------------) + blogdns - 0x000c6308, // n0x1750 c0x0000 (---------------) + blogsite - 0x00080ed2, // n0x1751 c0x0000 (---------------) + boldlygoingnowhere - 0x000c7a88, // n0x1752 c0x0000 (---------------) + dnsalias - 0x00181687, // n0x1753 c0x0000 (---------------) + dnsdojo - 0x000146cb, // n0x1754 c0x0000 (---------------) + doesntexist - 0x0012cb09, // n0x1755 c0x0000 (---------------) + dontexist - 0x000c7987, // n0x1756 c0x0000 (---------------) + doomdns - 0x001815c6, // n0x1757 c0x0000 (---------------) + dvrdns - 0x00159288, // n0x1758 c0x0000 (---------------) + dynalias - 0x3f40dc06, // n0x1759 c0x00fd (n0x1784-n0x1786) + dyndns - 0x0009d40d, // n0x175a c0x0000 (---------------) + endofinternet - 0x000ed550, // n0x175b c0x0000 (---------------) + endoftheinternet - 0x00064987, // n0x175c c0x0000 (---------------) + from-me - 0x0008cc89, // n0x175d c0x0000 (---------------) + game-host - 0x00052ac6, // n0x175e c0x0000 (---------------) + gotdns - 0x000301c2, // n0x175f c0x0000 (---------------) + hk - 0x000f45ca, // n0x1760 c0x0000 (---------------) + hobby-site - 0x00010e07, // n0x1761 c0x0000 (---------------) + homedns - 0x0000ddc7, // n0x1762 c0x0000 (---------------) + homeftp - 0x00099409, // n0x1763 c0x0000 (---------------) + homelinux - 0x00099ec8, // n0x1764 c0x0000 (---------------) + homeunix - 0x000ea54e, // n0x1765 c0x0000 (---------------) + is-a-bruinsfan - 0x00017f8e, // n0x1766 c0x0000 (---------------) + is-a-candidate - 0x00019ecf, // n0x1767 c0x0000 (---------------) + is-a-celticsfan - 0x0001f409, // n0x1768 c0x0000 (---------------) + is-a-chef - 0x00047789, // n0x1769 c0x0000 (---------------) + is-a-geek - 0x0006b7cb, // n0x176a c0x0000 (---------------) + is-a-knight - 0x0012ab0f, // n0x176b c0x0000 (---------------) + is-a-linux-user - 0x000a07cc, // n0x176c c0x0000 (---------------) + is-a-patsfan - 0x0011690b, // n0x176d c0x0000 (---------------) + is-a-soxfan - 0x000d9888, // n0x176e c0x0000 (---------------) + is-found - 0x000f1347, // n0x176f c0x0000 (---------------) + is-lost - 0x000e3c88, // n0x1770 c0x0000 (---------------) + is-saved - 0x0010e28b, // n0x1771 c0x0000 (---------------) + is-very-bad - 0x0011cf8c, // n0x1772 c0x0000 (---------------) + is-very-evil - 0x0011ff8c, // n0x1773 c0x0000 (---------------) + is-very-good - 0x0012708c, // n0x1774 c0x0000 (---------------) + is-very-nice - 0x0012bc8d, // n0x1775 c0x0000 (---------------) + is-very-sweet - 0x00013e48, // n0x1776 c0x0000 (---------------) + isa-geek - 0x00132909, // n0x1777 c0x0000 (---------------) + kicks-ass - 0x00151dcb, // n0x1778 c0x0000 (---------------) + misconfused - 0x000ccf07, // n0x1779 c0x0000 (---------------) + podzone - 0x000c618a, // n0x177a c0x0000 (---------------) + readmyblog - 0x00141246, // n0x177b c0x0000 (---------------) + selfip - 0x000b8fcd, // n0x177c c0x0000 (---------------) + sellsyourhome - 0x0000be88, // n0x177d c0x0000 (---------------) + servebbs - 0x0008e548, // n0x177e c0x0000 (---------------) + serveftp - 0x0008e7c9, // n0x177f c0x0000 (---------------) + servegame - 0x000dd7cc, // n0x1780 c0x0000 (---------------) + stuff-4-sale - 0x000073c2, // n0x1781 c0x0000 (---------------) + us - 0x0002c186, // n0x1782 c0x0000 (---------------) + webhop - 0x000028c2, // n0x1783 c0x0000 (---------------) + za - 0x00009ac2, // n0x1784 c0x0000 (---------------) + go - 0x0000ddc4, // n0x1785 c0x0000 (---------------) + home - 0x00217403, // n0x1786 c0x0000 (---------------) + I abo - 0x00200b82, // n0x1787 c0x0000 (---------------) + I ac - 0x00232dc3, // n0x1788 c0x0000 (---------------) + I com - 0x0021e083, // n0x1789 c0x0000 (---------------) + I edu - 0x003704c3, // n0x178a c0x0000 (---------------) + I gob - 0x00200243, // n0x178b c0x0000 (---------------) + I ing - 0x00210e83, // n0x178c c0x0000 (---------------) + I med - 0x00218643, // n0x178d c0x0000 (---------------) + I net - 0x002104c3, // n0x178e c0x0000 (---------------) + I nom - 0x0024d043, // n0x178f c0x0000 (---------------) + I org - 0x002d3a83, // n0x1790 c0x0000 (---------------) + I sld - 0x00232dc3, // n0x1791 c0x0000 (---------------) + I com - 0x0021e083, // n0x1792 c0x0000 (---------------) + I edu - 0x003704c3, // n0x1793 c0x0000 (---------------) + I gob - 0x00240443, // n0x1794 c0x0000 (---------------) + I mil - 0x00218643, // n0x1795 c0x0000 (---------------) + I net - 0x002104c3, // n0x1796 c0x0000 (---------------) + I nom - 0x0024d043, // n0x1797 c0x0000 (---------------) + I org - 0x00232dc3, // n0x1798 c0x0000 (---------------) + I com - 0x0021e083, // n0x1799 c0x0000 (---------------) + I edu - 0x0024d043, // n0x179a c0x0000 (---------------) + I org - 0x00232dc3, // n0x179b c0x0000 (---------------) + I com - 0x0021e083, // n0x179c c0x0000 (---------------) + I edu - 0x00209ac3, // n0x179d c0x0000 (---------------) + I gov - 0x00200041, // n0x179e c0x0000 (---------------) + I i - 0x00240443, // n0x179f c0x0000 (---------------) + I mil - 0x00218643, // n0x17a0 c0x0000 (---------------) + I net - 0x0024ad43, // n0x17a1 c0x0000 (---------------) + I ngo - 0x0024d043, // n0x17a2 c0x0000 (---------------) + I org - 0x00202183, // n0x17a3 c0x0000 (---------------) + I biz - 0x00232dc3, // n0x17a4 c0x0000 (---------------) + I com - 0x0021e083, // n0x17a5 c0x0000 (---------------) + I edu - 0x00291143, // n0x17a6 c0x0000 (---------------) + I fam - 0x003704c3, // n0x17a7 c0x0000 (---------------) + I gob - 0x00269c03, // n0x17a8 c0x0000 (---------------) + I gok - 0x00264203, // n0x17a9 c0x0000 (---------------) + I gon - 0x00297e83, // n0x17aa c0x0000 (---------------) + I gop - 0x0021dbc3, // n0x17ab c0x0000 (---------------) + I gos - 0x00209ac3, // n0x17ac c0x0000 (---------------) + I gov - 0x00208a44, // n0x17ad c0x0000 (---------------) + I info - 0x00218643, // n0x17ae c0x0000 (---------------) + I net - 0x0024d043, // n0x17af c0x0000 (---------------) + I org - 0x002071c3, // n0x17b0 c0x0000 (---------------) + I web - 0x0036d504, // n0x17b1 c0x0000 (---------------) + I agro - 0x0020d643, // n0x17b2 c0x0000 (---------------) + I aid - 0x00008d43, // n0x17b3 c0x0000 (---------------) + art - 0x00203283, // n0x17b4 c0x0000 (---------------) + I atm - 0x002fd708, // n0x17b5 c0x0000 (---------------) + I augustow - 0x00262c44, // n0x17b6 c0x0000 (---------------) + I auto - 0x002c3e8a, // n0x17b7 c0x0000 (---------------) + I babia-gora - 0x0020f9c6, // n0x17b8 c0x0000 (---------------) + I bedzin - 0x0037ce07, // n0x17b9 c0x0000 (---------------) + I beskidy - 0x0022150a, // n0x17ba c0x0000 (---------------) + I bialowieza - 0x0023a5c9, // n0x17bb c0x0000 (---------------) + I bialystok - 0x00200007, // n0x17bc c0x0000 (---------------) + I bielawa - 0x0020274a, // n0x17bd c0x0000 (---------------) + I bieszczady - 0x00202183, // n0x17be c0x0000 (---------------) + I biz - 0x002df90b, // n0x17bf c0x0000 (---------------) + I boleslawiec - 0x00274409, // n0x17c0 c0x0000 (---------------) + I bydgoszcz - 0x002d09c5, // n0x17c1 c0x0000 (---------------) + I bytom - 0x002c0747, // n0x17c2 c0x0000 (---------------) + I cieszyn - 0x00000882, // n0x17c3 c0x0000 (---------------) + co - 0x00232dc3, // n0x17c4 c0x0000 (---------------) + I com - 0x0032ea07, // n0x17c5 c0x0000 (---------------) + I czeladz - 0x002283c5, // n0x17c6 c0x0000 (---------------) + I czest - 0x002b0789, // n0x17c7 c0x0000 (---------------) + I dlugoleka - 0x0021e083, // n0x17c8 c0x0000 (---------------) + I edu - 0x00228b46, // n0x17c9 c0x0000 (---------------) + I elblag - 0x002af303, // n0x17ca c0x0000 (---------------) + I elk - 0x00018cc3, // n0x17cb c0x0000 (---------------) + gda - 0x000f2c86, // n0x17cc c0x0000 (---------------) + gdansk - 0x00166b06, // n0x17cd c0x0000 (---------------) + gdynia - 0x000079c7, // n0x17ce c0x0000 (---------------) + gliwice - 0x00211506, // n0x17cf c0x0000 (---------------) + I glogow - 0x00224f05, // n0x17d0 c0x0000 (---------------) + I gmina - 0x00237fc7, // n0x17d1 c0x0000 (---------------) + I gniezno - 0x002e4c87, // n0x17d2 c0x0000 (---------------) + I gorlice - 0x41209ac3, // n0x17d3 c0x0104 (n0x1856-n0x185f) + I gov - 0x00306a07, // n0x17d4 c0x0000 (---------------) + I grajewo - 0x0033ba43, // n0x17d5 c0x0000 (---------------) + I gsm - 0x0036d945, // n0x17d6 c0x0000 (---------------) + I ilawa - 0x00208a44, // n0x17d7 c0x0000 (---------------) + I info - 0x0022cfc8, // n0x17d8 c0x0000 (---------------) + I jaworzno - 0x003507cc, // n0x17d9 c0x0000 (---------------) + I jelenia-gora - 0x002a1b85, // n0x17da c0x0000 (---------------) + I jgora - 0x002a6b06, // n0x17db c0x0000 (---------------) + I kalisz - 0x0032e8c7, // n0x17dc c0x0000 (---------------) + I karpacz - 0x0037bd47, // n0x17dd c0x0000 (---------------) + I kartuzy - 0x002061c7, // n0x17de c0x0000 (---------------) + I kaszuby - 0x00214008, // n0x17df c0x0000 (---------------) + I katowice - 0x00269dcf, // n0x17e0 c0x0000 (---------------) + I kazimierz-dolny - 0x00349b45, // n0x17e1 c0x0000 (---------------) + I kepno - 0x00241007, // n0x17e2 c0x0000 (---------------) + I ketrzyn - 0x00248947, // n0x17e3 c0x0000 (---------------) + I klodzko - 0x00298fca, // n0x17e4 c0x0000 (---------------) + I kobierzyce - 0x0029a889, // n0x17e5 c0x0000 (---------------) + I kolobrzeg - 0x002bd185, // n0x17e6 c0x0000 (---------------) + I konin - 0x002c0cca, // n0x17e7 c0x0000 (---------------) + I konskowola - 0x000a4106, // n0x17e8 c0x0000 (---------------) + krakow - 0x002af385, // n0x17e9 c0x0000 (---------------) + I kutno - 0x002b9804, // n0x17ea c0x0000 (---------------) + I lapy - 0x00262ac6, // n0x17eb c0x0000 (---------------) + I lebork - 0x00278087, // n0x17ec c0x0000 (---------------) + I legnica - 0x002dda47, // n0x17ed c0x0000 (---------------) + I lezajsk - 0x00345708, // n0x17ee c0x0000 (---------------) + I limanowa - 0x002cba85, // n0x17ef c0x0000 (---------------) + I lomza - 0x002282c6, // n0x17f0 c0x0000 (---------------) + I lowicz - 0x0034cb05, // n0x17f1 c0x0000 (---------------) + I lubin - 0x00359945, // n0x17f2 c0x0000 (---------------) + I lukow - 0x00214f44, // n0x17f3 c0x0000 (---------------) + I mail - 0x0030e687, // n0x17f4 c0x0000 (---------------) + I malbork - 0x002f99ca, // n0x17f5 c0x0000 (---------------) + I malopolska - 0x002011c8, // n0x17f6 c0x0000 (---------------) + I mazowsze - 0x002df1c6, // n0x17f7 c0x0000 (---------------) + I mazury - 0x00010e83, // n0x17f8 c0x0000 (---------------) + med - 0x0021e585, // n0x17f9 c0x0000 (---------------) + I media - 0x0029dd46, // n0x17fa c0x0000 (---------------) + I miasta - 0x00201f86, // n0x17fb c0x0000 (---------------) + I mielec - 0x0030a3c6, // n0x17fc c0x0000 (---------------) + I mielno - 0x00240443, // n0x17fd c0x0000 (---------------) + I mil - 0x00353f87, // n0x17fe c0x0000 (---------------) + I mragowo - 0x002488c5, // n0x17ff c0x0000 (---------------) + I naklo - 0x00218643, // n0x1800 c0x0000 (---------------) + I net - 0x0026f98d, // n0x1801 c0x0000 (---------------) + I nieruchomosci - 0x002104c3, // n0x1802 c0x0000 (---------------) + I nom - 0x00345808, // n0x1803 c0x0000 (---------------) + I nowaruda - 0x00233644, // n0x1804 c0x0000 (---------------) + I nysa - 0x0026ee45, // n0x1805 c0x0000 (---------------) + I olawa - 0x00298ec6, // n0x1806 c0x0000 (---------------) + I olecko - 0x00275106, // n0x1807 c0x0000 (---------------) + I olkusz - 0x0021ce87, // n0x1808 c0x0000 (---------------) + I olsztyn - 0x0023a907, // n0x1809 c0x0000 (---------------) + I opoczno - 0x00297245, // n0x180a c0x0000 (---------------) + I opole - 0x0024d043, // n0x180b c0x0000 (---------------) + I org - 0x00217287, // n0x180c c0x0000 (---------------) + I ostroda - 0x0021c3c9, // n0x180d c0x0000 (---------------) + I ostroleka - 0x0021dc09, // n0x180e c0x0000 (---------------) + I ostrowiec - 0x0022b40a, // n0x180f c0x0000 (---------------) + I ostrowwlkp - 0x0021c6c2, // n0x1810 c0x0000 (---------------) + I pc - 0x0036d904, // n0x1811 c0x0000 (---------------) + I pila - 0x002c9884, // n0x1812 c0x0000 (---------------) + I pisz - 0x00242907, // n0x1813 c0x0000 (---------------) + I podhale - 0x002cc648, // n0x1814 c0x0000 (---------------) + I podlasie - 0x002cd309, // n0x1815 c0x0000 (---------------) + I polkowice - 0x00221809, // n0x1816 c0x0000 (---------------) + I pomorskie - 0x002cda47, // n0x1817 c0x0000 (---------------) + I pomorze - 0x00226a06, // n0x1818 c0x0000 (---------------) + I powiat - 0x000cf186, // n0x1819 c0x0000 (---------------) + poznan - 0x002cfac4, // n0x181a c0x0000 (---------------) + I priv - 0x002cfc4a, // n0x181b c0x0000 (---------------) + I prochowice - 0x002d3048, // n0x181c c0x0000 (---------------) + I pruszkow - 0x002d3249, // n0x181d c0x0000 (---------------) + I przeworsk - 0x002a9786, // n0x181e c0x0000 (---------------) + I pulawy - 0x002fdd05, // n0x181f c0x0000 (---------------) + I radom - 0x00201088, // n0x1820 c0x0000 (---------------) + I rawa-maz - 0x002b614a, // n0x1821 c0x0000 (---------------) + I realestate - 0x0027f043, // n0x1822 c0x0000 (---------------) + I rel - 0x00273646, // n0x1823 c0x0000 (---------------) + I rybnik - 0x002cdb47, // n0x1824 c0x0000 (---------------) + I rzeszow - 0x003795c5, // n0x1825 c0x0000 (---------------) + I sanok - 0x002546c5, // n0x1826 c0x0000 (---------------) + I sejny - 0x0022ae43, // n0x1827 c0x0000 (---------------) + I sex - 0x0022e644, // n0x1828 c0x0000 (---------------) + I shop - 0x0022b1c5, // n0x1829 c0x0000 (---------------) + I sklep - 0x0027c807, // n0x182a c0x0000 (---------------) + I skoczow - 0x002e74c5, // n0x182b c0x0000 (---------------) + I slask - 0x00331b86, // n0x182c c0x0000 (---------------) + I slupsk - 0x000d5645, // n0x182d c0x0000 (---------------) + sopot - 0x002624c3, // n0x182e c0x0000 (---------------) + I sos - 0x002624c9, // n0x182f c0x0000 (---------------) + I sosnowiec - 0x0026ec0c, // n0x1830 c0x0000 (---------------) + I stalowa-wola - 0x002e088c, // n0x1831 c0x0000 (---------------) + I starachowice - 0x002bd588, // n0x1832 c0x0000 (---------------) + I stargard - 0x002add87, // n0x1833 c0x0000 (---------------) + I suwalki - 0x002e2048, // n0x1834 c0x0000 (---------------) + I swidnica - 0x002e238a, // n0x1835 c0x0000 (---------------) + I swiebodzin - 0x002e2b0b, // n0x1836 c0x0000 (---------------) + I swinoujscie - 0x00274548, // n0x1837 c0x0000 (---------------) + I szczecin - 0x002a6c08, // n0x1838 c0x0000 (---------------) + I szczytno - 0x003818c6, // n0x1839 c0x0000 (---------------) + I szkola - 0x002fb5c5, // n0x183a c0x0000 (---------------) + I targi - 0x0036384a, // n0x183b c0x0000 (---------------) + I tarnobrzeg - 0x002276c5, // n0x183c c0x0000 (---------------) + I tgory - 0x002032c2, // n0x183d c0x0000 (---------------) + I tm - 0x002b3cc7, // n0x183e c0x0000 (---------------) + I tourism - 0x00290186, // n0x183f c0x0000 (---------------) + I travel - 0x00332305, // n0x1840 c0x0000 (---------------) + I turek - 0x002e4149, // n0x1841 c0x0000 (---------------) + I turystyka - 0x00232f85, // n0x1842 c0x0000 (---------------) + I tychy - 0x0029cf05, // n0x1843 c0x0000 (---------------) + I ustka - 0x002fc789, // n0x1844 c0x0000 (---------------) + I walbrzych - 0x0033e5c6, // n0x1845 c0x0000 (---------------) + I warmia - 0x0025e288, // n0x1846 c0x0000 (---------------) + I warszawa - 0x00276e43, // n0x1847 c0x0000 (---------------) + I waw - 0x00211646, // n0x1848 c0x0000 (---------------) + I wegrow - 0x0026dcc6, // n0x1849 c0x0000 (---------------) + I wielun - 0x0029a685, // n0x184a c0x0000 (---------------) + I wlocl - 0x0029a689, // n0x184b c0x0000 (---------------) + I wloclawek - 0x0029dfc9, // n0x184c c0x0000 (---------------) + I wodzislaw - 0x00255e47, // n0x184d c0x0000 (---------------) + I wolomin - 0x000a4244, // n0x184e c0x0000 (---------------) + wroc - 0x002a4247, // n0x184f c0x0000 (---------------) + I wroclaw - 0x00221709, // n0x1850 c0x0000 (---------------) + I zachpomor - 0x0023d0c5, // n0x1851 c0x0000 (---------------) + I zagan - 0x00043988, // n0x1852 c0x0000 (---------------) + zakopane - 0x0035fa45, // n0x1853 c0x0000 (---------------) + I zarow - 0x0022af45, // n0x1854 c0x0000 (---------------) + I zgora - 0x0022db89, // n0x1855 c0x0000 (---------------) + I zgorzelec - 0x00200ac2, // n0x1856 c0x0000 (---------------) + I pa - 0x002167c2, // n0x1857 c0x0000 (---------------) + I po - 0x00209f02, // n0x1858 c0x0000 (---------------) + I so - 0x002dc282, // n0x1859 c0x0000 (---------------) + I sr - 0x0029de09, // n0x185a c0x0000 (---------------) + I starostwo - 0x00201b02, // n0x185b c0x0000 (---------------) + I ug - 0x00200f02, // n0x185c c0x0000 (---------------) + I um - 0x002269c4, // n0x185d c0x0000 (---------------) + I upow - 0x0023f482, // n0x185e c0x0000 (---------------) + I uw - 0x00200882, // n0x185f c0x0000 (---------------) + I co - 0x0021e083, // n0x1860 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1861 c0x0000 (---------------) + I gov - 0x00218643, // n0x1862 c0x0000 (---------------) + I net - 0x0024d043, // n0x1863 c0x0000 (---------------) + I org - 0x00200b82, // n0x1864 c0x0000 (---------------) + I ac - 0x00202183, // n0x1865 c0x0000 (---------------) + I biz - 0x00232dc3, // n0x1866 c0x0000 (---------------) + I com - 0x0021e083, // n0x1867 c0x0000 (---------------) + I edu - 0x00209c03, // n0x1868 c0x0000 (---------------) + I est - 0x00209ac3, // n0x1869 c0x0000 (---------------) + I gov - 0x00208a44, // n0x186a c0x0000 (---------------) + I info - 0x0029e0c4, // n0x186b c0x0000 (---------------) + I isla - 0x00267944, // n0x186c c0x0000 (---------------) + I name - 0x00218643, // n0x186d c0x0000 (---------------) + I net - 0x0024d043, // n0x186e c0x0000 (---------------) + I org - 0x002cfc43, // n0x186f c0x0000 (---------------) + I pro - 0x002d0d84, // n0x1870 c0x0000 (---------------) + I prof - 0x002bfb83, // n0x1871 c0x0000 (---------------) + I aca - 0x00210ac3, // n0x1872 c0x0000 (---------------) + I bar - 0x002256c3, // n0x1873 c0x0000 (---------------) + I cpa - 0x002abd43, // n0x1874 c0x0000 (---------------) + I eng - 0x002a3043, // n0x1875 c0x0000 (---------------) + I jur - 0x002000c3, // n0x1876 c0x0000 (---------------) + I law - 0x00210e83, // n0x1877 c0x0000 (---------------) + I med - 0x00232dc3, // n0x1878 c0x0000 (---------------) + I com - 0x0021e083, // n0x1879 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x187a c0x0000 (---------------) + I gov - 0x00218643, // n0x187b c0x0000 (---------------) + I net - 0x0024d043, // n0x187c c0x0000 (---------------) + I org - 0x002cba43, // n0x187d c0x0000 (---------------) + I plo - 0x002e6143, // n0x187e c0x0000 (---------------) + I sec - 0x0009e448, // n0x187f c0x0000 (---------------) + blogspot - 0x00232dc3, // n0x1880 c0x0000 (---------------) + I com - 0x0021e083, // n0x1881 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1882 c0x0000 (---------------) + I gov - 0x002188c3, // n0x1883 c0x0000 (---------------) + I int - 0x00218643, // n0x1884 c0x0000 (---------------) + I net - 0x0023e704, // n0x1885 c0x0000 (---------------) + I nome - 0x0024d043, // n0x1886 c0x0000 (---------------) + I org - 0x002d7ec4, // n0x1887 c0x0000 (---------------) + I publ - 0x002aa845, // n0x1888 c0x0000 (---------------) + I belau - 0x00200882, // n0x1889 c0x0000 (---------------) + I co - 0x00205742, // n0x188a c0x0000 (---------------) + I ed - 0x00209ac2, // n0x188b c0x0000 (---------------) + I go - 0x00209e82, // n0x188c c0x0000 (---------------) + I ne - 0x00200d02, // n0x188d c0x0000 (---------------) + I or - 0x00232dc3, // n0x188e c0x0000 (---------------) + I com - 0x0023a884, // n0x188f c0x0000 (---------------) + I coop - 0x0021e083, // n0x1890 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1891 c0x0000 (---------------) + I gov - 0x00240443, // n0x1892 c0x0000 (---------------) + I mil - 0x00218643, // n0x1893 c0x0000 (---------------) + I net - 0x0024d043, // n0x1894 c0x0000 (---------------) + I org - 0x00232dc3, // n0x1895 c0x0000 (---------------) + I com - 0x0021e083, // n0x1896 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1897 c0x0000 (---------------) + I gov - 0x00240443, // n0x1898 c0x0000 (---------------) + I mil - 0x00267944, // n0x1899 c0x0000 (---------------) + I name - 0x00218643, // n0x189a c0x0000 (---------------) + I net - 0x0024d043, // n0x189b c0x0000 (---------------) + I org - 0x00251983, // n0x189c c0x0000 (---------------) + I sch - 0x00278344, // n0x189d c0x0000 (---------------) + I asso - 0x0009e448, // n0x189e c0x0000 (---------------) + blogspot - 0x00232dc3, // n0x189f c0x0000 (---------------) + I com - 0x002104c3, // n0x18a0 c0x0000 (---------------) + I nom - 0x0020b384, // n0x18a1 c0x0000 (---------------) + I arts - 0x0009e448, // n0x18a2 c0x0000 (---------------) + blogspot - 0x00232dc3, // n0x18a3 c0x0000 (---------------) + I com - 0x0024a304, // n0x18a4 c0x0000 (---------------) + I firm - 0x00208a44, // n0x18a5 c0x0000 (---------------) + I info - 0x002104c3, // n0x18a6 c0x0000 (---------------) + I nom - 0x00200902, // n0x18a7 c0x0000 (---------------) + I nt - 0x0024d043, // n0x18a8 c0x0000 (---------------) + I org - 0x0022a143, // n0x18a9 c0x0000 (---------------) + I rec - 0x002dc745, // n0x18aa c0x0000 (---------------) + I store - 0x002032c2, // n0x18ab c0x0000 (---------------) + I tm - 0x002b1c03, // n0x18ac c0x0000 (---------------) + I www - 0x00200b82, // n0x18ad c0x0000 (---------------) + I ac - 0x00200882, // n0x18ae c0x0000 (---------------) + I co - 0x0021e083, // n0x18af c0x0000 (---------------) + I edu - 0x00209ac3, // n0x18b0 c0x0000 (---------------) + I gov - 0x00200242, // n0x18b1 c0x0000 (---------------) + I in - 0x0024d043, // n0x18b2 c0x0000 (---------------) + I org - 0x00200b82, // n0x18b3 c0x0000 (---------------) + I ac - 0x00202907, // n0x18b4 c0x0000 (---------------) + I adygeya - 0x0036d205, // n0x18b5 c0x0000 (---------------) + I altai - 0x002340c4, // n0x18b6 c0x0000 (---------------) + I amur - 0x00332486, // n0x18b7 c0x0000 (---------------) + I amursk - 0x002ff34b, // n0x18b8 c0x0000 (---------------) + I arkhangelsk - 0x00231d89, // n0x18b9 c0x0000 (---------------) + I astrakhan - 0x002a6a46, // n0x18ba c0x0000 (---------------) + I baikal - 0x00334709, // n0x18bb c0x0000 (---------------) + I bashkiria - 0x002c4cc8, // n0x18bc c0x0000 (---------------) + I belgorod - 0x00208c43, // n0x18bd c0x0000 (---------------) + I bir - 0x0009e448, // n0x18be c0x0000 (---------------) + blogspot - 0x0022b087, // n0x18bf c0x0000 (---------------) + I bryansk - 0x002295c8, // n0x18c0 c0x0000 (---------------) + I buryatia - 0x0033ffc3, // n0x18c1 c0x0000 (---------------) + I cbg - 0x00313944, // n0x18c2 c0x0000 (---------------) + I chel - 0x0034c18b, // n0x18c3 c0x0000 (---------------) + I chelyabinsk - 0x00276ac5, // n0x18c4 c0x0000 (---------------) + I chita - 0x00205b48, // n0x18c5 c0x0000 (---------------) + I chukotka - 0x0031d989, // n0x18c6 c0x0000 (---------------) + I chuvashia - 0x002a4443, // n0x18c7 c0x0000 (---------------) + I cmw - 0x00232dc3, // n0x18c8 c0x0000 (---------------) + I com - 0x00344008, // n0x18c9 c0x0000 (---------------) + I dagestan - 0x00275b87, // n0x18ca c0x0000 (---------------) + I dudinka - 0x002db146, // n0x18cb c0x0000 (---------------) + I e-burg - 0x0021e083, // n0x18cc c0x0000 (---------------) + I edu - 0x002fa247, // n0x18cd c0x0000 (---------------) + I fareast - 0x00209ac3, // n0x18ce c0x0000 (---------------) + I gov - 0x00239d46, // n0x18cf c0x0000 (---------------) + I grozny - 0x002188c3, // n0x18d0 c0x0000 (---------------) + I int - 0x0022ec87, // n0x18d1 c0x0000 (---------------) + I irkutsk - 0x002fec47, // n0x18d2 c0x0000 (---------------) + I ivanovo - 0x0035ad47, // n0x18d3 c0x0000 (---------------) + I izhevsk - 0x0030e605, // n0x18d4 c0x0000 (---------------) + I jamal - 0x00203fc3, // n0x18d5 c0x0000 (---------------) + I jar - 0x002a888b, // n0x18d6 c0x0000 (---------------) + I joshkar-ola - 0x0036e648, // n0x18d7 c0x0000 (---------------) + I k-uralsk - 0x002246c8, // n0x18d8 c0x0000 (---------------) + I kalmykia - 0x0022f506, // n0x18d9 c0x0000 (---------------) + I kaluga - 0x0020f049, // n0x18da c0x0000 (---------------) + I kamchatka - 0x00343107, // n0x18db c0x0000 (---------------) + I karelia - 0x002f1805, // n0x18dc c0x0000 (---------------) + I kazan - 0x00351684, // n0x18dd c0x0000 (---------------) + I kchr - 0x00360a48, // n0x18de c0x0000 (---------------) + I kemerovo - 0x0023cb0a, // n0x18df c0x0000 (---------------) + I khabarovsk - 0x0023cd49, // n0x18e0 c0x0000 (---------------) + I khakassia - 0x00267283, // n0x18e1 c0x0000 (---------------) + I khv - 0x0022d7c5, // n0x18e2 c0x0000 (---------------) + I kirov - 0x00286643, // n0x18e3 c0x0000 (---------------) + I kms - 0x002d2bc6, // n0x18e4 c0x0000 (---------------) + I koenig - 0x002a1884, // n0x18e5 c0x0000 (---------------) + I komi - 0x002f4e48, // n0x18e6 c0x0000 (---------------) + I kostroma - 0x002a470b, // n0x18e7 c0x0000 (---------------) + I krasnoyarsk - 0x00327605, // n0x18e8 c0x0000 (---------------) + I kuban - 0x002aa5c6, // n0x18e9 c0x0000 (---------------) + I kurgan - 0x002ad2c5, // n0x18ea c0x0000 (---------------) + I kursk - 0x002ae788, // n0x18eb c0x0000 (---------------) + I kustanai - 0x002af4c7, // n0x18ec c0x0000 (---------------) + I kuzbass - 0x00369d07, // n0x18ed c0x0000 (---------------) + I lipetsk - 0x0030ab07, // n0x18ee c0x0000 (---------------) + I magadan - 0x00213608, // n0x18ef c0x0000 (---------------) + I magnitka - 0x0023d384, // n0x18f0 c0x0000 (---------------) + I mari - 0x0023d387, // n0x18f1 c0x0000 (---------------) + I mari-el - 0x0033f146, // n0x18f2 c0x0000 (---------------) + I marine - 0x00240443, // n0x18f3 c0x0000 (---------------) + I mil - 0x002b8708, // n0x18f4 c0x0000 (---------------) + I mordovia - 0x0024e983, // n0x18f5 c0x0000 (---------------) + I msk - 0x002c0b08, // n0x18f6 c0x0000 (---------------) + I murmansk - 0x002c4305, // n0x18f7 c0x0000 (---------------) + I mytis - 0x002cbfc8, // n0x18f8 c0x0000 (---------------) + I nakhodka - 0x0021e287, // n0x18f9 c0x0000 (---------------) + I nalchik - 0x00218643, // n0x18fa c0x0000 (---------------) + I net - 0x00371383, // n0x18fb c0x0000 (---------------) + I nkz - 0x00283004, // n0x18fc c0x0000 (---------------) + I nnov - 0x00200cc7, // n0x18fd c0x0000 (---------------) + I norilsk - 0x0020a143, // n0x18fe c0x0000 (---------------) + I nov - 0x002fed0b, // n0x18ff c0x0000 (---------------) + I novosibirsk - 0x0020ae83, // n0x1900 c0x0000 (---------------) + I nsk - 0x0024e944, // n0x1901 c0x0000 (---------------) + I omsk - 0x002dc7c8, // n0x1902 c0x0000 (---------------) + I orenburg - 0x0024d043, // n0x1903 c0x0000 (---------------) + I org - 0x002dba85, // n0x1904 c0x0000 (---------------) + I oryol - 0x002f5205, // n0x1905 c0x0000 (---------------) + I oskol - 0x00203346, // n0x1906 c0x0000 (---------------) + I palana - 0x00212f85, // n0x1907 c0x0000 (---------------) + I penza - 0x002c46c4, // n0x1908 c0x0000 (---------------) + I perm - 0x00200a82, // n0x1909 c0x0000 (---------------) + I pp - 0x002d3503, // n0x190a c0x0000 (---------------) + I ptz - 0x002b988a, // n0x190b c0x0000 (---------------) + I pyatigorsk - 0x0032e3c3, // n0x190c c0x0000 (---------------) + I rnd - 0x00360f89, // n0x190d c0x0000 (---------------) + I rubtsovsk - 0x002374c6, // n0x190e c0x0000 (---------------) + I ryazan - 0x00223048, // n0x190f c0x0000 (---------------) + I sakhalin - 0x00284d46, // n0x1910 c0x0000 (---------------) + I samara - 0x00246287, // n0x1911 c0x0000 (---------------) + I saratov - 0x00316648, // n0x1912 c0x0000 (---------------) + I simbirsk - 0x002b3e08, // n0x1913 c0x0000 (---------------) + I smolensk - 0x00339f83, // n0x1914 c0x0000 (---------------) + I snz - 0x002d9bc3, // n0x1915 c0x0000 (---------------) + I spb - 0x0023dbc9, // n0x1916 c0x0000 (---------------) + I stavropol - 0x002eb3c3, // n0x1917 c0x0000 (---------------) + I stv - 0x002d0506, // n0x1918 c0x0000 (---------------) + I surgut - 0x0037e7c6, // n0x1919 c0x0000 (---------------) + I syzran - 0x00366046, // n0x191a c0x0000 (---------------) + I tambov - 0x00210249, // n0x191b c0x0000 (---------------) + I tatarstan - 0x0029e284, // n0x191c c0x0000 (---------------) + I test - 0x0022bac3, // n0x191d c0x0000 (---------------) + I tom - 0x00337545, // n0x191e c0x0000 (---------------) + I tomsk - 0x00378609, // n0x191f c0x0000 (---------------) + I tsaritsyn - 0x0021d443, // n0x1920 c0x0000 (---------------) + I tsk - 0x002e3e84, // n0x1921 c0x0000 (---------------) + I tula - 0x002e5704, // n0x1922 c0x0000 (---------------) + I tuva - 0x002e5c44, // n0x1923 c0x0000 (---------------) + I tver - 0x002d2ec6, // n0x1924 c0x0000 (---------------) + I tyumen - 0x00365503, // n0x1925 c0x0000 (---------------) + I udm - 0x00365508, // n0x1926 c0x0000 (---------------) + I udmurtia - 0x00254408, // n0x1927 c0x0000 (---------------) + I ulan-ude - 0x002e6a86, // n0x1928 c0x0000 (---------------) + I vdonsk - 0x002f160b, // n0x1929 c0x0000 (---------------) + I vladikavkaz - 0x002f1948, // n0x192a c0x0000 (---------------) + I vladimir - 0x002f1b4b, // n0x192b c0x0000 (---------------) + I vladivostok - 0x002f22c9, // n0x192c c0x0000 (---------------) + I volgograd - 0x002f2b87, // n0x192d c0x0000 (---------------) + I vologda - 0x002f3908, // n0x192e c0x0000 (---------------) + I voronezh - 0x002f4843, // n0x192f c0x0000 (---------------) + I vrn - 0x00204a86, // n0x1930 c0x0000 (---------------) + I vyatka - 0x002e5547, // n0x1931 c0x0000 (---------------) + I yakutia - 0x0028db05, // n0x1932 c0x0000 (---------------) + I yamal - 0x00326a09, // n0x1933 c0x0000 (---------------) + I yaroslavl - 0x0036680d, // n0x1934 c0x0000 (---------------) + I yekaterinburg - 0x00222e91, // n0x1935 c0x0000 (---------------) + I yuzhno-sakhalinsk - 0x0023af45, // n0x1936 c0x0000 (---------------) + I zgrad - 0x00200b82, // n0x1937 c0x0000 (---------------) + I ac - 0x00200882, // n0x1938 c0x0000 (---------------) + I co - 0x00232dc3, // n0x1939 c0x0000 (---------------) + I com - 0x0021e083, // n0x193a c0x0000 (---------------) + I edu - 0x00252544, // n0x193b c0x0000 (---------------) + I gouv - 0x00209ac3, // n0x193c c0x0000 (---------------) + I gov - 0x002188c3, // n0x193d c0x0000 (---------------) + I int - 0x00240443, // n0x193e c0x0000 (---------------) + I mil - 0x00218643, // n0x193f c0x0000 (---------------) + I net - 0x00232dc3, // n0x1940 c0x0000 (---------------) + I com - 0x0021e083, // n0x1941 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1942 c0x0000 (---------------) + I gov - 0x00210e83, // n0x1943 c0x0000 (---------------) + I med - 0x00218643, // n0x1944 c0x0000 (---------------) + I net - 0x0024d043, // n0x1945 c0x0000 (---------------) + I org - 0x0025dbc3, // n0x1946 c0x0000 (---------------) + I pub - 0x00251983, // n0x1947 c0x0000 (---------------) + I sch - 0x00232dc3, // n0x1948 c0x0000 (---------------) + I com - 0x0021e083, // n0x1949 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x194a c0x0000 (---------------) + I gov - 0x00218643, // n0x194b c0x0000 (---------------) + I net - 0x0024d043, // n0x194c c0x0000 (---------------) + I org - 0x00232dc3, // n0x194d c0x0000 (---------------) + I com - 0x0021e083, // n0x194e c0x0000 (---------------) + I edu - 0x00209ac3, // n0x194f c0x0000 (---------------) + I gov - 0x00218643, // n0x1950 c0x0000 (---------------) + I net - 0x0024d043, // n0x1951 c0x0000 (---------------) + I org - 0x00232dc3, // n0x1952 c0x0000 (---------------) + I com - 0x0021e083, // n0x1953 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1954 c0x0000 (---------------) + I gov - 0x00208a44, // n0x1955 c0x0000 (---------------) + I info - 0x00210e83, // n0x1956 c0x0000 (---------------) + I med - 0x00218643, // n0x1957 c0x0000 (---------------) + I net - 0x0024d043, // n0x1958 c0x0000 (---------------) + I org - 0x0028dc82, // n0x1959 c0x0000 (---------------) + I tv - 0x00200101, // n0x195a c0x0000 (---------------) + I a - 0x00200b82, // n0x195b c0x0000 (---------------) + I ac - 0x00200001, // n0x195c c0x0000 (---------------) + I b - 0x00303202, // n0x195d c0x0000 (---------------) + I bd - 0x0009e448, // n0x195e c0x0000 (---------------) + blogspot - 0x0021e6c5, // n0x195f c0x0000 (---------------) + I brand - 0x00200401, // n0x1960 c0x0000 (---------------) + I c - 0x00032dc3, // n0x1961 c0x0000 (---------------) + com - 0x002003c1, // n0x1962 c0x0000 (---------------) + I d - 0x00200081, // n0x1963 c0x0000 (---------------) + I e - 0x00203841, // n0x1964 c0x0000 (---------------) + I f - 0x00246f02, // n0x1965 c0x0000 (---------------) + I fh - 0x00379704, // n0x1966 c0x0000 (---------------) + I fhsk - 0x00246f03, // n0x1967 c0x0000 (---------------) + I fhv - 0x002002c1, // n0x1968 c0x0000 (---------------) + I g - 0x00200201, // n0x1969 c0x0000 (---------------) + I h - 0x00200041, // n0x196a c0x0000 (---------------) + I i - 0x00200481, // n0x196b c0x0000 (---------------) + I k - 0x0032cec7, // n0x196c c0x0000 (---------------) + I komforb - 0x002c114f, // n0x196d c0x0000 (---------------) + I kommunalforbund - 0x002b9ac6, // n0x196e c0x0000 (---------------) + I komvux - 0x002000c1, // n0x196f c0x0000 (---------------) + I l - 0x00381b06, // n0x1970 c0x0000 (---------------) + I lanbib - 0x00200f41, // n0x1971 c0x0000 (---------------) + I m - 0x00200281, // n0x1972 c0x0000 (---------------) + I n - 0x0031a18e, // n0x1973 c0x0000 (---------------) + I naturbruksgymn - 0x00200341, // n0x1974 c0x0000 (---------------) + I o - 0x0024d043, // n0x1975 c0x0000 (---------------) + I org - 0x00200a81, // n0x1976 c0x0000 (---------------) + I p - 0x00297f05, // n0x1977 c0x0000 (---------------) + I parti - 0x00200a82, // n0x1978 c0x0000 (---------------) + I pp - 0x0022ad45, // n0x1979 c0x0000 (---------------) + I press - 0x002006c1, // n0x197a c0x0000 (---------------) + I r - 0x002001c1, // n0x197b c0x0000 (---------------) + I s - 0x00200301, // n0x197c c0x0000 (---------------) + I t - 0x002032c2, // n0x197d c0x0000 (---------------) + I tm - 0x00200541, // n0x197e c0x0000 (---------------) + I u - 0x00200141, // n0x197f c0x0000 (---------------) + I w - 0x002013c1, // n0x1980 c0x0000 (---------------) + I x - 0x00202981, // n0x1981 c0x0000 (---------------) + I y - 0x00201241, // n0x1982 c0x0000 (---------------) + I z - 0x0009e448, // n0x1983 c0x0000 (---------------) + blogspot - 0x00232dc3, // n0x1984 c0x0000 (---------------) + I com - 0x0021e083, // n0x1985 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1986 c0x0000 (---------------) + I gov - 0x00218643, // n0x1987 c0x0000 (---------------) + I net - 0x0024d043, // n0x1988 c0x0000 (---------------) + I org - 0x0020c783, // n0x1989 c0x0000 (---------------) + I per - 0x00232dc3, // n0x198a c0x0000 (---------------) + I com - 0x00209ac3, // n0x198b c0x0000 (---------------) + I gov - 0x00240443, // n0x198c c0x0000 (---------------) + I mil - 0x00218643, // n0x198d c0x0000 (---------------) + I net - 0x0024d043, // n0x198e c0x0000 (---------------) + I org - 0x0009e448, // n0x198f c0x0000 (---------------) + blogspot - 0x00232dc3, // n0x1990 c0x0000 (---------------) + I com - 0x0021e083, // n0x1991 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1992 c0x0000 (---------------) + I gov - 0x00218643, // n0x1993 c0x0000 (---------------) + I net - 0x0024d043, // n0x1994 c0x0000 (---------------) + I org - 0x00208d43, // n0x1995 c0x0000 (---------------) + I art - 0x00232dc3, // n0x1996 c0x0000 (---------------) + I com - 0x0021e083, // n0x1997 c0x0000 (---------------) + I edu - 0x00252544, // n0x1998 c0x0000 (---------------) + I gouv - 0x0024d043, // n0x1999 c0x0000 (---------------) + I org - 0x002a80c5, // n0x199a c0x0000 (---------------) + I perso - 0x003826c4, // n0x199b c0x0000 (---------------) + I univ - 0x00232dc3, // n0x199c c0x0000 (---------------) + I com - 0x00218643, // n0x199d c0x0000 (---------------) + I net - 0x0024d043, // n0x199e c0x0000 (---------------) + I org - 0x00200882, // n0x199f c0x0000 (---------------) + I co - 0x00232dc3, // n0x19a0 c0x0000 (---------------) + I com - 0x00235e09, // n0x19a1 c0x0000 (---------------) + I consulado - 0x0021e083, // n0x19a2 c0x0000 (---------------) + I edu - 0x002aaf09, // n0x19a3 c0x0000 (---------------) + I embaixada - 0x00209ac3, // n0x19a4 c0x0000 (---------------) + I gov - 0x00240443, // n0x19a5 c0x0000 (---------------) + I mil - 0x00218643, // n0x19a6 c0x0000 (---------------) + I net - 0x0024d043, // n0x19a7 c0x0000 (---------------) + I org - 0x002cf8c8, // n0x19a8 c0x0000 (---------------) + I principe - 0x00241fc7, // n0x19a9 c0x0000 (---------------) + I saotome - 0x002dc745, // n0x19aa c0x0000 (---------------) + I store - 0x00232dc3, // n0x19ab c0x0000 (---------------) + I com - 0x0021e083, // n0x19ac c0x0000 (---------------) + I edu - 0x003704c3, // n0x19ad c0x0000 (---------------) + I gob - 0x0024d043, // n0x19ae c0x0000 (---------------) + I org - 0x0023fbc3, // n0x19af c0x0000 (---------------) + I red - 0x00209ac3, // n0x19b0 c0x0000 (---------------) + I gov - 0x00232dc3, // n0x19b1 c0x0000 (---------------) + I com - 0x0021e083, // n0x19b2 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x19b3 c0x0000 (---------------) + I gov - 0x00240443, // n0x19b4 c0x0000 (---------------) + I mil - 0x00218643, // n0x19b5 c0x0000 (---------------) + I net - 0x0024d043, // n0x19b6 c0x0000 (---------------) + I org - 0x00200b82, // n0x19b7 c0x0000 (---------------) + I ac - 0x00200882, // n0x19b8 c0x0000 (---------------) + I co - 0x0024d043, // n0x19b9 c0x0000 (---------------) + I org - 0x0009e448, // n0x19ba c0x0000 (---------------) + blogspot - 0x00200b82, // n0x19bb c0x0000 (---------------) + I ac - 0x00200882, // n0x19bc c0x0000 (---------------) + I co - 0x00209ac2, // n0x19bd c0x0000 (---------------) + I go - 0x00200242, // n0x19be c0x0000 (---------------) + I in - 0x00200f42, // n0x19bf c0x0000 (---------------) + I mi - 0x00218643, // n0x19c0 c0x0000 (---------------) + I net - 0x00200d02, // n0x19c1 c0x0000 (---------------) + I or - 0x00200b82, // n0x19c2 c0x0000 (---------------) + I ac - 0x00202183, // n0x19c3 c0x0000 (---------------) + I biz - 0x00200882, // n0x19c4 c0x0000 (---------------) + I co - 0x00232dc3, // n0x19c5 c0x0000 (---------------) + I com - 0x0021e083, // n0x19c6 c0x0000 (---------------) + I edu - 0x00209ac2, // n0x19c7 c0x0000 (---------------) + I go - 0x00209ac3, // n0x19c8 c0x0000 (---------------) + I gov - 0x002188c3, // n0x19c9 c0x0000 (---------------) + I int - 0x00240443, // n0x19ca c0x0000 (---------------) + I mil - 0x00267944, // n0x19cb c0x0000 (---------------) + I name - 0x00218643, // n0x19cc c0x0000 (---------------) + I net - 0x00219583, // n0x19cd c0x0000 (---------------) + I nic - 0x0024d043, // n0x19ce c0x0000 (---------------) + I org - 0x0029e284, // n0x19cf c0x0000 (---------------) + I test - 0x002071c3, // n0x19d0 c0x0000 (---------------) + I web - 0x00209ac3, // n0x19d1 c0x0000 (---------------) + I gov - 0x00200882, // n0x19d2 c0x0000 (---------------) + I co - 0x00232dc3, // n0x19d3 c0x0000 (---------------) + I com - 0x0021e083, // n0x19d4 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x19d5 c0x0000 (---------------) + I gov - 0x00240443, // n0x19d6 c0x0000 (---------------) + I mil - 0x00218643, // n0x19d7 c0x0000 (---------------) + I net - 0x002104c3, // n0x19d8 c0x0000 (---------------) + I nom - 0x0024d043, // n0x19d9 c0x0000 (---------------) + I org - 0x00371147, // n0x19da c0x0000 (---------------) + I agrinet - 0x00232dc3, // n0x19db c0x0000 (---------------) + I com - 0x00254587, // n0x19dc c0x0000 (---------------) + I defense - 0x0030cc46, // n0x19dd c0x0000 (---------------) + I edunet - 0x0020ae43, // n0x19de c0x0000 (---------------) + I ens - 0x00206f03, // n0x19df c0x0000 (---------------) + I fin - 0x00209ac3, // n0x19e0 c0x0000 (---------------) + I gov - 0x002202c3, // n0x19e1 c0x0000 (---------------) + I ind - 0x00208a44, // n0x19e2 c0x0000 (---------------) + I info - 0x002ea3c4, // n0x19e3 c0x0000 (---------------) + I intl - 0x002ca586, // n0x19e4 c0x0000 (---------------) + I mincom - 0x0020f603, // n0x19e5 c0x0000 (---------------) + I nat - 0x00218643, // n0x19e6 c0x0000 (---------------) + I net - 0x0024d043, // n0x19e7 c0x0000 (---------------) + I org - 0x002a80c5, // n0x19e8 c0x0000 (---------------) + I perso - 0x0021cc84, // n0x19e9 c0x0000 (---------------) + I rnrt - 0x002273c3, // n0x19ea c0x0000 (---------------) + I rns - 0x0037bb83, // n0x19eb c0x0000 (---------------) + I rnu - 0x002b3cc7, // n0x19ec c0x0000 (---------------) + I tourism - 0x002ee1c5, // n0x19ed c0x0000 (---------------) + I turen - 0x00232dc3, // n0x19ee c0x0000 (---------------) + I com - 0x0021e083, // n0x19ef c0x0000 (---------------) + I edu - 0x00209ac3, // n0x19f0 c0x0000 (---------------) + I gov - 0x00240443, // n0x19f1 c0x0000 (---------------) + I mil - 0x00218643, // n0x19f2 c0x0000 (---------------) + I net - 0x0024d043, // n0x19f3 c0x0000 (---------------) + I org - 0x00201602, // n0x19f4 c0x0000 (---------------) + I av - 0x0020bfc3, // n0x19f5 c0x0000 (---------------) + I bbs - 0x002111c3, // n0x19f6 c0x0000 (---------------) + I bel - 0x00202183, // n0x19f7 c0x0000 (---------------) + I biz - 0x4aa32dc3, // n0x19f8 c0x012a (n0x1a09-n0x1a0a) + I com - 0x0020fc02, // n0x19f9 c0x0000 (---------------) + I dr - 0x0021e083, // n0x19fa c0x0000 (---------------) + I edu - 0x0020a0c3, // n0x19fb c0x0000 (---------------) + I gen - 0x00209ac3, // n0x19fc c0x0000 (---------------) + I gov - 0x00208a44, // n0x19fd c0x0000 (---------------) + I info - 0x0036e803, // n0x19fe c0x0000 (---------------) + I k12 - 0x00349b43, // n0x19ff c0x0000 (---------------) + I kep - 0x00240443, // n0x1a00 c0x0000 (---------------) + I mil - 0x00267944, // n0x1a01 c0x0000 (---------------) + I name - 0x4ae0a682, // n0x1a02 c0x012b (n0x1a0a-n0x1a0b) + I nc - 0x00218643, // n0x1a03 c0x0000 (---------------) + I net - 0x0024d043, // n0x1a04 c0x0000 (---------------) + I org - 0x002369c3, // n0x1a05 c0x0000 (---------------) + I pol - 0x00218943, // n0x1a06 c0x0000 (---------------) + I tel - 0x0028dc82, // n0x1a07 c0x0000 (---------------) + I tv - 0x002071c3, // n0x1a08 c0x0000 (---------------) + I web - 0x0009e448, // n0x1a09 c0x0000 (---------------) + blogspot - 0x00209ac3, // n0x1a0a c0x0000 (---------------) + I gov - 0x00233784, // n0x1a0b c0x0000 (---------------) + I aero - 0x00202183, // n0x1a0c c0x0000 (---------------) + I biz - 0x00200882, // n0x1a0d c0x0000 (---------------) + I co - 0x00232dc3, // n0x1a0e c0x0000 (---------------) + I com - 0x0023a884, // n0x1a0f c0x0000 (---------------) + I coop - 0x0021e083, // n0x1a10 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1a11 c0x0000 (---------------) + I gov - 0x00208a44, // n0x1a12 c0x0000 (---------------) + I info - 0x002188c3, // n0x1a13 c0x0000 (---------------) + I int - 0x0027e604, // n0x1a14 c0x0000 (---------------) + I jobs - 0x00277f84, // n0x1a15 c0x0000 (---------------) + I mobi - 0x002c2a06, // n0x1a16 c0x0000 (---------------) + I museum - 0x00267944, // n0x1a17 c0x0000 (---------------) + I name - 0x00218643, // n0x1a18 c0x0000 (---------------) + I net - 0x0024d043, // n0x1a19 c0x0000 (---------------) + I org - 0x002cfc43, // n0x1a1a c0x0000 (---------------) + I pro - 0x00290186, // n0x1a1b c0x0000 (---------------) + I travel - 0x00050f4b, // n0x1a1c c0x0000 (---------------) + better-than - 0x0000dc06, // n0x1a1d c0x0000 (---------------) + dyndns - 0x0001618a, // n0x1a1e c0x0000 (---------------) + on-the-web - 0x001540ca, // n0x1a1f c0x0000 (---------------) + worse-than - 0x0009e448, // n0x1a20 c0x0000 (---------------) + blogspot - 0x0034cac4, // n0x1a21 c0x0000 (---------------) + I club - 0x00232dc3, // n0x1a22 c0x0000 (---------------) + I com - 0x00202144, // n0x1a23 c0x0000 (---------------) + I ebiz - 0x0021e083, // n0x1a24 c0x0000 (---------------) + I edu - 0x0028cc84, // n0x1a25 c0x0000 (---------------) + I game - 0x00209ac3, // n0x1a26 c0x0000 (---------------) + I gov - 0x00309c83, // n0x1a27 c0x0000 (---------------) + I idv - 0x00240443, // n0x1a28 c0x0000 (---------------) + I mil - 0x00218643, // n0x1a29 c0x0000 (---------------) + I net - 0x0024d043, // n0x1a2a c0x0000 (---------------) + I org - 0x00310e0b, // n0x1a2b c0x0000 (---------------) + I xn--czrw28b - 0x00372b4a, // n0x1a2c c0x0000 (---------------) + I xn--uc0atv - 0x003841cc, // n0x1a2d c0x0000 (---------------) + I xn--zf0ao64a - 0x00200b82, // n0x1a2e c0x0000 (---------------) + I ac - 0x00200882, // n0x1a2f c0x0000 (---------------) + I co - 0x00209ac2, // n0x1a30 c0x0000 (---------------) + I go - 0x0029d805, // n0x1a31 c0x0000 (---------------) + I hotel - 0x00208a44, // n0x1a32 c0x0000 (---------------) + I info - 0x00204342, // n0x1a33 c0x0000 (---------------) + I me - 0x00240443, // n0x1a34 c0x0000 (---------------) + I mil - 0x00277f84, // n0x1a35 c0x0000 (---------------) + I mobi - 0x00209e82, // n0x1a36 c0x0000 (---------------) + I ne - 0x00200d02, // n0x1a37 c0x0000 (---------------) + I or - 0x0021bcc2, // n0x1a38 c0x0000 (---------------) + I sc - 0x0028dc82, // n0x1a39 c0x0000 (---------------) + I tv - 0x002a3649, // n0x1a3a c0x0000 (---------------) + I cherkassy - 0x0037e648, // n0x1a3b c0x0000 (---------------) + I cherkasy - 0x0025ec89, // n0x1a3c c0x0000 (---------------) + I chernigov - 0x002626c9, // n0x1a3d c0x0000 (---------------) + I chernihiv - 0x002634ca, // n0x1a3e c0x0000 (---------------) + I chernivtsi - 0x002669ca, // n0x1a3f c0x0000 (---------------) + I chernovtsy - 0x00206182, // n0x1a40 c0x0000 (---------------) + I ck - 0x0022fe42, // n0x1a41 c0x0000 (---------------) + I cn - 0x00200882, // n0x1a42 c0x0000 (---------------) + I co - 0x00232dc3, // n0x1a43 c0x0000 (---------------) + I com - 0x0020b542, // n0x1a44 c0x0000 (---------------) + I cr - 0x002411c6, // n0x1a45 c0x0000 (---------------) + I crimea - 0x00333d82, // n0x1a46 c0x0000 (---------------) + I cv - 0x0020dcc2, // n0x1a47 c0x0000 (---------------) + I dn - 0x002da50e, // n0x1a48 c0x0000 (---------------) + I dnepropetrovsk - 0x002f9d4e, // n0x1a49 c0x0000 (---------------) + I dnipropetrovsk - 0x00276947, // n0x1a4a c0x0000 (---------------) + I dominic - 0x0021d347, // n0x1a4b c0x0000 (---------------) + I donetsk - 0x00218802, // n0x1a4c c0x0000 (---------------) + I dp - 0x0021e083, // n0x1a4d c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1a4e c0x0000 (---------------) + I gov - 0x00203802, // n0x1a4f c0x0000 (---------------) + I if - 0x00200242, // n0x1a50 c0x0000 (---------------) + I in - 0x0024bd8f, // n0x1a51 c0x0000 (---------------) + I ivano-frankivsk - 0x002230c2, // n0x1a52 c0x0000 (---------------) + I kh - 0x0023d887, // n0x1a53 c0x0000 (---------------) + I kharkiv - 0x00240607, // n0x1a54 c0x0000 (---------------) + I kharkov - 0x00247987, // n0x1a55 c0x0000 (---------------) + I kherson - 0x0024c10c, // n0x1a56 c0x0000 (---------------) + I khmelnitskiy - 0x00254f8c, // n0x1a57 c0x0000 (---------------) + I khmelnytskyi - 0x0029ae84, // n0x1a58 c0x0000 (---------------) + I kiev - 0x0022d7ca, // n0x1a59 c0x0000 (---------------) + I kirovograd - 0x00286642, // n0x1a5a c0x0000 (---------------) + I km - 0x0020c642, // n0x1a5b c0x0000 (---------------) + I kr - 0x002a5dc4, // n0x1a5c c0x0000 (---------------) + I krym - 0x0021f9c2, // n0x1a5d c0x0000 (---------------) + I ks - 0x002b0202, // n0x1a5e c0x0000 (---------------) + I kv - 0x002551c4, // n0x1a5f c0x0000 (---------------) + I kyiv - 0x00219682, // n0x1a60 c0x0000 (---------------) + I lg - 0x0021a082, // n0x1a61 c0x0000 (---------------) + I lt - 0x0022f587, // n0x1a62 c0x0000 (---------------) + I lugansk - 0x00240505, // n0x1a63 c0x0000 (---------------) + I lutsk - 0x00218582, // n0x1a64 c0x0000 (---------------) + I lv - 0x0024bd04, // n0x1a65 c0x0000 (---------------) + I lviv - 0x00340582, // n0x1a66 c0x0000 (---------------) + I mk - 0x002feac8, // n0x1a67 c0x0000 (---------------) + I mykolaiv - 0x00218643, // n0x1a68 c0x0000 (---------------) + I net - 0x0020d188, // n0x1a69 c0x0000 (---------------) + I nikolaev - 0x00202bc2, // n0x1a6a c0x0000 (---------------) + I od - 0x00238e85, // n0x1a6b c0x0000 (---------------) + I odesa - 0x0034a206, // n0x1a6c c0x0000 (---------------) + I odessa - 0x0024d043, // n0x1a6d c0x0000 (---------------) + I org - 0x0020a402, // n0x1a6e c0x0000 (---------------) + I pl - 0x002cd547, // n0x1a6f c0x0000 (---------------) + I poltava - 0x00200a82, // n0x1a70 c0x0000 (---------------) + I pp - 0x002cfb05, // n0x1a71 c0x0000 (---------------) + I rivne - 0x00210885, // n0x1a72 c0x0000 (---------------) + I rovno - 0x0020bf02, // n0x1a73 c0x0000 (---------------) + I rv - 0x00214502, // n0x1a74 c0x0000 (---------------) + I sb - 0x002ec84a, // n0x1a75 c0x0000 (---------------) + I sebastopol - 0x002970ca, // n0x1a76 c0x0000 (---------------) + I sevastopol - 0x0024f102, // n0x1a77 c0x0000 (---------------) + I sm - 0x002f3284, // n0x1a78 c0x0000 (---------------) + I sumy - 0x00207302, // n0x1a79 c0x0000 (---------------) + I te - 0x0036d7c8, // n0x1a7a c0x0000 (---------------) + I ternopil - 0x0021eac2, // n0x1a7b c0x0000 (---------------) + I uz - 0x00353048, // n0x1a7c c0x0000 (---------------) + I uzhgorod - 0x002eed87, // n0x1a7d c0x0000 (---------------) + I vinnica - 0x002ef949, // n0x1a7e c0x0000 (---------------) + I vinnytsia - 0x0020d142, // n0x1a7f c0x0000 (---------------) + I vn - 0x002f36c5, // n0x1a80 c0x0000 (---------------) + I volyn - 0x0036d1c5, // n0x1a81 c0x0000 (---------------) + I yalta - 0x002cbb4b, // n0x1a82 c0x0000 (---------------) + I zaporizhzhe - 0x002b738c, // n0x1a83 c0x0000 (---------------) + I zaporizhzhia - 0x0022eb08, // n0x1a84 c0x0000 (---------------) + I zhitomir - 0x002f3a88, // n0x1a85 c0x0000 (---------------) + I zhytomyr - 0x00279542, // n0x1a86 c0x0000 (---------------) + I zp - 0x0021cf42, // n0x1a87 c0x0000 (---------------) + I zt - 0x00200b82, // n0x1a88 c0x0000 (---------------) + I ac - 0x00200882, // n0x1a89 c0x0000 (---------------) + I co - 0x00232dc3, // n0x1a8a c0x0000 (---------------) + I com - 0x00209ac2, // n0x1a8b c0x0000 (---------------) + I go - 0x00209e82, // n0x1a8c c0x0000 (---------------) + I ne - 0x00200d02, // n0x1a8d c0x0000 (---------------) + I or - 0x0024d043, // n0x1a8e c0x0000 (---------------) + I org - 0x0021bcc2, // n0x1a8f c0x0000 (---------------) + I sc - 0x00200b82, // n0x1a90 c0x0000 (---------------) + I ac - 0x4ce00882, // n0x1a91 c0x0133 (n0x1a9b-n0x1a9c) + I co - 0x4d209ac3, // n0x1a92 c0x0134 (n0x1a9c-n0x1a9d) + I gov - 0x00220e43, // n0x1a93 c0x0000 (---------------) + I ltd - 0x00204342, // n0x1a94 c0x0000 (---------------) + I me - 0x00218643, // n0x1a95 c0x0000 (---------------) + I net - 0x002096c3, // n0x1a96 c0x0000 (---------------) + I nhs - 0x0024d043, // n0x1a97 c0x0000 (---------------) + I org - 0x002cb543, // n0x1a98 c0x0000 (---------------) + I plc - 0x0023dd46, // n0x1a99 c0x0000 (---------------) + I police - 0x01651983, // n0x1a9a c0x0005 (---------------)* o I sch - 0x0009e448, // n0x1a9b c0x0000 (---------------) + blogspot - 0x00090b07, // n0x1a9c c0x0000 (---------------) + service - 0x4da019c2, // n0x1a9d c0x0136 (n0x1adc-n0x1adf) + I ak - 0x4de00b02, // n0x1a9e c0x0137 (n0x1adf-n0x1ae2) + I al - 0x4e2030c2, // n0x1a9f c0x0138 (n0x1ae2-n0x1ae5) + I ar - 0x4e600182, // n0x1aa0 c0x0139 (n0x1ae5-n0x1ae8) + I as - 0x4ea01202, // n0x1aa1 c0x013a (n0x1ae8-n0x1aeb) + I az - 0x4ee14582, // n0x1aa2 c0x013b (n0x1aeb-n0x1aee) + I ca - 0x4f200882, // n0x1aa3 c0x013c (n0x1aee-n0x1af1) + I co - 0x4f62a082, // n0x1aa4 c0x013d (n0x1af1-n0x1af4) + I ct - 0x4fa003c2, // n0x1aa5 c0x013e (n0x1af4-n0x1af7) + I dc - 0x4fe07cc2, // n0x1aa6 c0x013f (n0x1af7-n0x1afa) + I de - 0x002e2103, // n0x1aa7 c0x0000 (---------------) + I dni - 0x0023ee83, // n0x1aa8 c0x0000 (---------------) + I fed - 0x5024c602, // n0x1aa9 c0x0140 (n0x1afa-n0x1afd) + I fl - 0x50600fc2, // n0x1aaa c0x0141 (n0x1afd-n0x1b00) + I ga - 0x50a02642, // n0x1aab c0x0142 (n0x1b00-n0x1b03) + I gu - 0x50e00202, // n0x1aac c0x0143 (n0x1b03-n0x1b05) + I hi - 0x51208bc2, // n0x1aad c0x0144 (n0x1b05-n0x1b08) + I ia - 0x51605942, // n0x1aae c0x0145 (n0x1b08-n0x1b0b) + I id - 0x51a00d82, // n0x1aaf c0x0146 (n0x1b0b-n0x1b0e) + I il - 0x51e00242, // n0x1ab0 c0x0147 (n0x1b0e-n0x1b11) + I in - 0x000d0905, // n0x1ab1 c0x0000 (---------------) + is-by - 0x00213e43, // n0x1ab2 c0x0000 (---------------) + I isa - 0x0025c784, // n0x1ab3 c0x0000 (---------------) + I kids - 0x5221f9c2, // n0x1ab4 c0x0148 (n0x1b11-n0x1b14) + I ks - 0x52611002, // n0x1ab5 c0x0149 (n0x1b14-n0x1b17) + I ky - 0x52a000c2, // n0x1ab6 c0x014a (n0x1b17-n0x1b1a) + I la - 0x0007e10b, // n0x1ab7 c0x0000 (---------------) + land-4-sale - 0x52e011c2, // n0x1ab8 c0x014b (n0x1b1a-n0x1b1d) + I ma - 0x5364a3c2, // n0x1ab9 c0x014d (n0x1b20-n0x1b23) + I md - 0x53a04342, // n0x1aba c0x014e (n0x1b23-n0x1b26) + I me - 0x53e00f42, // n0x1abb c0x014f (n0x1b26-n0x1b29) + I mi - 0x5422c7c2, // n0x1abc c0x0150 (n0x1b29-n0x1b2c) + I mn - 0x54605202, // n0x1abd c0x0151 (n0x1b2c-n0x1b2f) + I mo - 0x54a0e602, // n0x1abe c0x0152 (n0x1b2f-n0x1b32) + I ms - 0x54e66782, // n0x1abf c0x0153 (n0x1b32-n0x1b35) + I mt - 0x5520a682, // n0x1ac0 c0x0154 (n0x1b35-n0x1b38) + I nc - 0x55600382, // n0x1ac1 c0x0155 (n0x1b38-n0x1b3a) + I nd - 0x55a09e82, // n0x1ac2 c0x0156 (n0x1b3a-n0x1b3d) + I ne - 0x55e096c2, // n0x1ac3 c0x0157 (n0x1b3d-n0x1b40) + I nh - 0x56202dc2, // n0x1ac4 c0x0158 (n0x1b40-n0x1b43) + I nj - 0x56626d42, // n0x1ac5 c0x0159 (n0x1b43-n0x1b46) + I nm - 0x0036ef43, // n0x1ac6 c0x0000 (---------------) + I nsn - 0x56a15642, // n0x1ac7 c0x015a (n0x1b46-n0x1b49) + I nv - 0x56e1af42, // n0x1ac8 c0x015b (n0x1b49-n0x1b4c) + I ny - 0x57203dc2, // n0x1ac9 c0x015c (n0x1b4c-n0x1b4f) + I oh - 0x5760d582, // n0x1aca c0x015d (n0x1b4f-n0x1b52) + I ok - 0x57a00d02, // n0x1acb c0x015e (n0x1b52-n0x1b55) + I or - 0x57e00ac2, // n0x1acc c0x015f (n0x1b55-n0x1b58) + I pa - 0x5822ad42, // n0x1acd c0x0160 (n0x1b58-n0x1b5b) + I pr - 0x58600d42, // n0x1ace c0x0161 (n0x1b5b-n0x1b5e) + I ri - 0x58a1bcc2, // n0x1acf c0x0162 (n0x1b5e-n0x1b61) + I sc - 0x58e0acc2, // n0x1ad0 c0x0163 (n0x1b61-n0x1b63) + I sd - 0x000dd7cc, // n0x1ad1 c0x0000 (---------------) + stuff-4-sale - 0x59201942, // n0x1ad2 c0x0164 (n0x1b63-n0x1b66) + I tn - 0x59655c42, // n0x1ad3 c0x0165 (n0x1b66-n0x1b69) + I tx - 0x59a02fc2, // n0x1ad4 c0x0166 (n0x1b69-n0x1b6c) + I ut - 0x59e03242, // n0x1ad5 c0x0167 (n0x1b6c-n0x1b6f) + I va - 0x5a201642, // n0x1ad6 c0x0168 (n0x1b6f-n0x1b72) + I vi - 0x5a6170c2, // n0x1ad7 c0x0169 (n0x1b72-n0x1b75) + I vt - 0x5aa00142, // n0x1ad8 c0x016a (n0x1b75-n0x1b78) + I wa - 0x5ae07a82, // n0x1ad9 c0x016b (n0x1b78-n0x1b7b) + I wi - 0x5b26e042, // n0x1ada c0x016c (n0x1b7b-n0x1b7c) + I wv - 0x5b61f8c2, // n0x1adb c0x016d (n0x1b7c-n0x1b7f) + I wy - 0x002020c2, // n0x1adc c0x0000 (---------------) + I cc - 0x0036e803, // n0x1add c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1ade c0x0000 (---------------) + I lib - 0x002020c2, // n0x1adf c0x0000 (---------------) + I cc - 0x0036e803, // n0x1ae0 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1ae1 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1ae2 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1ae3 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1ae4 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1ae5 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1ae6 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1ae7 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1ae8 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1ae9 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1aea c0x0000 (---------------) + I lib - 0x002020c2, // n0x1aeb c0x0000 (---------------) + I cc - 0x0036e803, // n0x1aec c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1aed c0x0000 (---------------) + I lib - 0x002020c2, // n0x1aee c0x0000 (---------------) + I cc - 0x0036e803, // n0x1aef c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1af0 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1af1 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1af2 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1af3 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1af4 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1af5 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1af6 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1af7 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1af8 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1af9 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1afa c0x0000 (---------------) + I cc - 0x0036e803, // n0x1afb c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1afc c0x0000 (---------------) + I lib - 0x002020c2, // n0x1afd c0x0000 (---------------) + I cc - 0x0036e803, // n0x1afe c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1aff c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b00 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b01 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b02 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b03 c0x0000 (---------------) + I cc - 0x0027a703, // n0x1b04 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b05 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b06 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b07 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b08 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b09 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b0a c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b0b c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b0c c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b0d c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b0e c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b0f c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b10 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b11 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b12 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b13 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b14 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b15 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b16 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b17 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b18 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b19 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b1a c0x0000 (---------------) + I cc - 0x5336e803, // n0x1b1b c0x014c (n0x1b1d-n0x1b20) + I k12 - 0x0027a703, // n0x1b1c c0x0000 (---------------) + I lib - 0x002f5bc4, // n0x1b1d c0x0000 (---------------) + I chtr - 0x0037e546, // n0x1b1e c0x0000 (---------------) + I paroch - 0x002d35c3, // n0x1b1f c0x0000 (---------------) + I pvt - 0x002020c2, // n0x1b20 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b21 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b22 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b23 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b24 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b25 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b26 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b27 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b28 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b29 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b2a c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b2b c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b2c c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b2d c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b2e c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b2f c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b30 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b31 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b32 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b33 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b34 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b35 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b36 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b37 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b38 c0x0000 (---------------) + I cc - 0x0027a703, // n0x1b39 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b3a c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b3b c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b3c c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b3d c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b3e c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b3f c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b40 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b41 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b42 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b43 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b44 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b45 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b46 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b47 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b48 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b49 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b4a c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b4b c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b4c c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b4d c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b4e c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b4f c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b50 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b51 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b52 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b53 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b54 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b55 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b56 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b57 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b58 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b59 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b5a c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b5b c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b5c c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b5d c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b5e c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b5f c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b60 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b61 c0x0000 (---------------) + I cc - 0x0027a703, // n0x1b62 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b63 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b64 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b65 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b66 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b67 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b68 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b69 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b6a c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b6b c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b6c c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b6d c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b6e c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b6f c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b70 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b71 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b72 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b73 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b74 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b75 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b76 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b77 c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b78 c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b79 c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b7a c0x0000 (---------------) + I lib - 0x002020c2, // n0x1b7b c0x0000 (---------------) + I cc - 0x002020c2, // n0x1b7c c0x0000 (---------------) + I cc - 0x0036e803, // n0x1b7d c0x0000 (---------------) + I k12 - 0x0027a703, // n0x1b7e c0x0000 (---------------) + I lib - 0x00232dc3, // n0x1b7f c0x0000 (---------------) + I com - 0x0021e083, // n0x1b80 c0x0000 (---------------) + I edu - 0x00345fc3, // n0x1b81 c0x0000 (---------------) + I gub - 0x00240443, // n0x1b82 c0x0000 (---------------) + I mil - 0x00218643, // n0x1b83 c0x0000 (---------------) + I net - 0x0024d043, // n0x1b84 c0x0000 (---------------) + I org - 0x00200882, // n0x1b85 c0x0000 (---------------) + I co - 0x00232dc3, // n0x1b86 c0x0000 (---------------) + I com - 0x00218643, // n0x1b87 c0x0000 (---------------) + I net - 0x0024d043, // n0x1b88 c0x0000 (---------------) + I org - 0x00232dc3, // n0x1b89 c0x0000 (---------------) + I com - 0x0021e083, // n0x1b8a c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1b8b c0x0000 (---------------) + I gov - 0x00240443, // n0x1b8c c0x0000 (---------------) + I mil - 0x00218643, // n0x1b8d c0x0000 (---------------) + I net - 0x0024d043, // n0x1b8e c0x0000 (---------------) + I org - 0x0020b384, // n0x1b8f c0x0000 (---------------) + I arts - 0x00200882, // n0x1b90 c0x0000 (---------------) + I co - 0x00232dc3, // n0x1b91 c0x0000 (---------------) + I com - 0x002db583, // n0x1b92 c0x0000 (---------------) + I e12 - 0x0021e083, // n0x1b93 c0x0000 (---------------) + I edu - 0x0024a304, // n0x1b94 c0x0000 (---------------) + I firm - 0x003704c3, // n0x1b95 c0x0000 (---------------) + I gob - 0x00209ac3, // n0x1b96 c0x0000 (---------------) + I gov - 0x00208a44, // n0x1b97 c0x0000 (---------------) + I info - 0x002188c3, // n0x1b98 c0x0000 (---------------) + I int - 0x00240443, // n0x1b99 c0x0000 (---------------) + I mil - 0x00218643, // n0x1b9a c0x0000 (---------------) + I net - 0x0024d043, // n0x1b9b c0x0000 (---------------) + I org - 0x0022a143, // n0x1b9c c0x0000 (---------------) + I rec - 0x002dc745, // n0x1b9d c0x0000 (---------------) + I store - 0x0022a003, // n0x1b9e c0x0000 (---------------) + I tec - 0x002071c3, // n0x1b9f c0x0000 (---------------) + I web - 0x00200882, // n0x1ba0 c0x0000 (---------------) + I co - 0x00232dc3, // n0x1ba1 c0x0000 (---------------) + I com - 0x0036e803, // n0x1ba2 c0x0000 (---------------) + I k12 - 0x00218643, // n0x1ba3 c0x0000 (---------------) + I net - 0x0024d043, // n0x1ba4 c0x0000 (---------------) + I org - 0x00200b82, // n0x1ba5 c0x0000 (---------------) + I ac - 0x00202183, // n0x1ba6 c0x0000 (---------------) + I biz - 0x00232dc3, // n0x1ba7 c0x0000 (---------------) + I com - 0x0021e083, // n0x1ba8 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1ba9 c0x0000 (---------------) + I gov - 0x00241cc6, // n0x1baa c0x0000 (---------------) + I health - 0x00208a44, // n0x1bab c0x0000 (---------------) + I info - 0x002188c3, // n0x1bac c0x0000 (---------------) + I int - 0x00267944, // n0x1bad c0x0000 (---------------) + I name - 0x00218643, // n0x1bae c0x0000 (---------------) + I net - 0x0024d043, // n0x1baf c0x0000 (---------------) + I org - 0x002cfc43, // n0x1bb0 c0x0000 (---------------) + I pro - 0x00232dc3, // n0x1bb1 c0x0000 (---------------) + I com - 0x0021e083, // n0x1bb2 c0x0000 (---------------) + I edu - 0x00218643, // n0x1bb3 c0x0000 (---------------) + I net - 0x0024d043, // n0x1bb4 c0x0000 (---------------) + I org - 0x00232dc3, // n0x1bb5 c0x0000 (---------------) + I com - 0x0000dc06, // n0x1bb6 c0x0000 (---------------) + dyndns - 0x0021e083, // n0x1bb7 c0x0000 (---------------) + I edu - 0x00209ac3, // n0x1bb8 c0x0000 (---------------) + I gov - 0x000f3306, // n0x1bb9 c0x0000 (---------------) + mypets - 0x00218643, // n0x1bba c0x0000 (---------------) + I net - 0x0024d043, // n0x1bbb c0x0000 (---------------) + I org - 0x002fd588, // n0x1bbc c0x0000 (---------------) + I xn--80au - 0x002ffbc9, // n0x1bbd c0x0000 (---------------) + I xn--90azh - 0x0030d409, // n0x1bbe c0x0000 (---------------) + I xn--c1avg - 0x00313188, // n0x1bbf c0x0000 (---------------) + I xn--d1at - 0x0034bfc8, // n0x1bc0 c0x0000 (---------------) + I xn--o1ac - 0x0034bfc9, // n0x1bc1 c0x0000 (---------------) + I xn--o1ach + 0x00301443, // n0x0000 c0x0000 (---------------) + I aaa + 0x002293c4, // n0x0001 c0x0000 (---------------) + I aarp + 0x0036eb43, // n0x0002 c0x0000 (---------------) + I abb + 0x0036eb46, // n0x0003 c0x0000 (---------------) + I abbott + 0x0030cb04, // n0x0004 c0x0000 (---------------) + I able + 0x00355b87, // n0x0005 c0x0000 (---------------) + I abogado + 0x01a01e82, // n0x0006 c0x0006 (n0x0539-n0x053f) + I ac + 0x002f2787, // n0x0007 c0x0000 (---------------) + I academy + 0x0033dcc9, // n0x0008 c0x0000 (---------------) + I accenture + 0x002d7e4a, // n0x0009 c0x0000 (---------------) + I accountant + 0x002d7e4b, // n0x000a c0x0000 (---------------) + I accountants + 0x00221483, // n0x000b c0x0000 (---------------) + I aco + 0x0027d3c6, // n0x000c c0x0000 (---------------) + I active + 0x00226f45, // n0x000d c0x0000 (---------------) + I actor + 0x01e10a02, // n0x000e c0x0007 (n0x053f-n0x0540) + I ad + 0x00262ac3, // n0x000f c0x0000 (---------------) + I ads + 0x0036b745, // n0x0010 c0x0000 (---------------) + I adult + 0x02204342, // n0x0011 c0x0008 (n0x0540-n0x0548) + I ae + 0x003285c3, // n0x0012 c0x0000 (---------------) + I aeg + 0x026751c4, // n0x0013 c0x0009 (n0x0548-n0x05a1) + I aero + 0x0036de85, // n0x0014 c0x0000 (---------------) + I aetna + 0x02a0c702, // n0x0015 c0x000a (n0x05a1-n0x05a6) + I af + 0x0023a2c3, // n0x0016 c0x0000 (---------------) + I afl + 0x00367c06, // n0x0017 c0x0000 (---------------) + I africa + 0x00367c0b, // n0x0018 c0x0000 (---------------) + I africamagic + 0x02e015c2, // n0x0019 c0x000b (n0x05a6-n0x05ab) + I ag + 0x002d6e47, // n0x001a c0x0000 (---------------) + I agakhan + 0x00229d46, // n0x001b c0x0000 (---------------) + I agency + 0x032002c2, // n0x001c c0x000c (n0x05ab-n0x05af) + I ai + 0x0024ef43, // n0x001d c0x0000 (---------------) + I aig + 0x002e75c8, // n0x001e c0x0000 (---------------) + I airforce + 0x00273406, // n0x001f c0x0000 (---------------) + I airtel + 0x0036acc4, // n0x0020 c0x0000 (---------------) + I akdn + 0x03600882, // n0x0021 c0x000d (n0x05af-n0x05b6) + I al + 0x00342747, // n0x0022 c0x0000 (---------------) + I alibaba + 0x002ab4c6, // n0x0023 c0x0000 (---------------) + I alipay + 0x002a6589, // n0x0024 c0x0000 (---------------) + I allfinanz + 0x0029c184, // n0x0025 c0x0000 (---------------) + I ally + 0x00215bc6, // n0x0026 c0x0000 (---------------) + I alsace + 0x03a04942, // n0x0027 c0x000e (n0x05b6-n0x05b7) + I am + 0x0027d885, // n0x0028 c0x0000 (---------------) + I amica + 0x002b6cc9, // n0x0029 c0x0000 (---------------) + I amsterdam + 0x03e01242, // n0x002a c0x000f (n0x05b7-n0x05bb) + I an + 0x0022f449, // n0x002b c0x0000 (---------------) + I analytics + 0x002f8647, // n0x002c c0x0000 (---------------) + I android + 0x00295a46, // n0x002d c0x0000 (---------------) + I anquan + 0x0420fd82, // n0x002e c0x0010 (n0x05bb-n0x05c1) + I ao + 0x0028df8a, // n0x002f c0x0000 (---------------) + I apartments + 0x00212a03, // n0x0030 c0x0000 (---------------) + I app + 0x002f0145, // n0x0031 c0x0000 (---------------) + I apple + 0x00273fc2, // n0x0032 c0x0000 (---------------) + I aq + 0x00273fc9, // n0x0033 c0x0000 (---------------) + I aquarelle + 0x04600602, // n0x0034 c0x0011 (n0x05c1-n0x05ca) + I ar + 0x00387dc6, // n0x0035 c0x0000 (---------------) + I aramco + 0x0025dd45, // n0x0036 c0x0000 (---------------) + I archi + 0x00333fc4, // n0x0037 c0x0000 (---------------) + I army + 0x04e29404, // n0x0038 c0x0013 (n0x05cb-n0x05d1) + I arpa + 0x00359e04, // n0x0039 c0x0000 (---------------) + I arte + 0x05200182, // n0x003a c0x0014 (n0x05d1-n0x05d2) + I as + 0x00209144, // n0x003b c0x0000 (---------------) + I asia + 0x002729ca, // n0x003c c0x0000 (---------------) + I associates + 0x05601642, // n0x003d c0x0015 (n0x05d2-n0x05d9) + I at + 0x00389588, // n0x003e c0x0000 (---------------) + I attorney + 0x05e05ac2, // n0x003f c0x0017 (n0x05da-n0x05ec) + I au + 0x002f4487, // n0x0040 c0x0000 (---------------) + I auction + 0x00222244, // n0x0041 c0x0000 (---------------) + I audi + 0x00251707, // n0x0042 c0x0000 (---------------) + I audible + 0x00222245, // n0x0043 c0x0000 (---------------) + I audio + 0x002f8406, // n0x0044 c0x0000 (---------------) + I author + 0x002eaac4, // n0x0045 c0x0000 (---------------) + I auto + 0x00309645, // n0x0046 c0x0000 (---------------) + I autos + 0x002c0747, // n0x0047 c0x0000 (---------------) + I avianca + 0x06e02502, // n0x0048 c0x001b (n0x05fa-n0x05fb) + I aw + 0x00272583, // n0x0049 c0x0000 (---------------) + I aws + 0x00215282, // n0x004a c0x0000 (---------------) + I ax + 0x0032b343, // n0x004b c0x0000 (---------------) + I axa + 0x07208cc2, // n0x004c c0x001c (n0x05fb-n0x0607) + I az + 0x0026c905, // n0x004d c0x0000 (---------------) + I azure + 0x07605a82, // n0x004e c0x001d (n0x0607-n0x0612) + I ba + 0x00343204, // n0x004f c0x0000 (---------------) + I baby + 0x0026a085, // n0x0050 c0x0000 (---------------) + I baidu + 0x00255704, // n0x0051 c0x0000 (---------------) + I band + 0x00235dc4, // n0x0052 c0x0000 (---------------) + I bank + 0x0020c103, // n0x0053 c0x0000 (---------------) + I bar + 0x0036bf49, // n0x0054 c0x0000 (---------------) + I barcelona + 0x0031934b, // n0x0055 c0x0000 (---------------) + I barclaycard + 0x002afe08, // n0x0056 c0x0000 (---------------) + I barclays + 0x0030b788, // n0x0057 c0x0000 (---------------) + I barefoot + 0x002ed0c8, // n0x0058 c0x0000 (---------------) + I bargains + 0x003297c7, // n0x0059 c0x0000 (---------------) + I bauhaus + 0x002eef46, // n0x005a c0x0000 (---------------) + I bayern + 0x07a791c2, // n0x005b c0x001e (n0x0612-n0x061c) + I bb + 0x00331f83, // n0x005c c0x0000 (---------------) + I bbc + 0x00340184, // n0x005d c0x0000 (---------------) + I bbva + 0x0028bfc3, // n0x005e c0x0000 (---------------) + I bcg + 0x002dbf83, // n0x005f c0x0000 (---------------) + I bcn + 0x016fbc02, // n0x0060 c0x0005 (---------------)* o I bd + 0x07e04702, // n0x0061 c0x001f (n0x061c-n0x061e) + I be + 0x00243505, // n0x0062 c0x0000 (---------------) + I beats + 0x00391984, // n0x0063 c0x0000 (---------------) + I beer + 0x00352147, // n0x0064 c0x0000 (---------------) + I bentley + 0x00251d46, // n0x0065 c0x0000 (---------------) + I berlin + 0x0030a2c4, // n0x0066 c0x0000 (---------------) + I best + 0x00227703, // n0x0067 c0x0000 (---------------) + I bet + 0x08349f02, // n0x0068 c0x0020 (n0x061e-n0x061f) + I bf + 0x08755e02, // n0x0069 c0x0021 (n0x061f-n0x0644) + I bg + 0x08af6202, // n0x006a c0x0022 (n0x0644-n0x0649) + I bh + 0x00375006, // n0x006b c0x0000 (---------------) + I bharti + 0x08e00002, // n0x006c c0x0023 (n0x0649-n0x064e) + I bi + 0x003628c5, // n0x006d c0x0000 (---------------) + I bible + 0x002fd143, // n0x006e c0x0000 (---------------) + I bid + 0x00390e04, // n0x006f c0x0000 (---------------) + I bike + 0x002c7a44, // n0x0070 c0x0000 (---------------) + I bing + 0x002c7a45, // n0x0071 c0x0000 (---------------) + I bingo + 0x00200003, // n0x0072 c0x0000 (---------------) + I bio + 0x09310603, // n0x0073 c0x0024 (n0x064e-n0x0655) + I biz + 0x09602642, // n0x0074 c0x0025 (n0x0655-n0x0659) + I bj + 0x00277b85, // n0x0075 c0x0000 (---------------) + I black + 0x00277b8b, // n0x0076 c0x0000 (---------------) + I blackfriday + 0x002d0084, // n0x0077 c0x0000 (---------------) + I blog + 0x00205849, // n0x0078 c0x0000 (---------------) + I bloomberg + 0x00207184, // n0x0079 c0x0000 (---------------) + I blue + 0x09a09242, // n0x007a c0x0026 (n0x0659-n0x065e) + I bm + 0x00209243, // n0x007b c0x0000 (---------------) + I bms + 0x0020a103, // n0x007c c0x0000 (---------------) + I bmw + 0x0160a282, // n0x007d c0x0005 (---------------)* o I bn + 0x00243903, // n0x007e c0x0000 (---------------) + I bnl + 0x0020a28a, // n0x007f c0x0000 (---------------) + I bnpparibas + 0x09e0a702, // n0x0080 c0x0027 (n0x065e-n0x0667) + I bo + 0x0034eb85, // n0x0081 c0x0000 (---------------) + I boats + 0x0020aac3, // n0x0082 c0x0000 (---------------) + I bom + 0x0020b104, // n0x0083 c0x0000 (---------------) + I bond + 0x0020c2c3, // n0x0084 c0x0000 (---------------) + I boo + 0x0020c2c5, // n0x0085 c0x0000 (---------------) + I boots + 0x0020cb05, // n0x0086 c0x0000 (---------------) + I bosch + 0x0020d286, // n0x0087 c0x0000 (---------------) + I bostik + 0x0020e2c3, // n0x0088 c0x0000 (---------------) + I bot + 0x00211048, // n0x0089 c0x0000 (---------------) + I boutique + 0x0a212e82, // n0x008a c0x0028 (n0x0667-n0x06ad) + I br + 0x00212e88, // n0x008b c0x0000 (---------------) + I bradesco + 0x0021a14b, // n0x008c c0x0000 (---------------) + I bridgestone + 0x002172c8, // n0x008d c0x0000 (---------------) + I broadway + 0x00218046, // n0x008e c0x0000 (---------------) + I broker + 0x00219347, // n0x008f c0x0000 (---------------) + I brother + 0x0021be88, // n0x0090 c0x0000 (---------------) + I brussels + 0x0aa35102, // n0x0091 c0x002a (n0x06ae-n0x06b3) + I bs + 0x0ae16fc2, // n0x0092 c0x002b (n0x06b3-n0x06b8) + I bt + 0x0033c648, // n0x0093 c0x0000 (---------------) + I budapest + 0x002c67c5, // n0x0094 c0x0000 (---------------) + I build + 0x00324908, // n0x0095 c0x0000 (---------------) + I builders + 0x0025f008, // n0x0096 c0x0000 (---------------) + I business + 0x0021d903, // n0x0097 c0x0000 (---------------) + I buy + 0x0021e484, // n0x0098 c0x0000 (---------------) + I buzz + 0x003401c2, // n0x0099 c0x0000 (---------------) + I bv + 0x0b21f782, // n0x009a c0x002c (n0x06b8-n0x06ba) + I bw + 0x0b616b42, // n0x009b c0x002d (n0x06ba-n0x06be) + I by + 0x0be203c2, // n0x009c c0x002f (n0x06bf-n0x06c5) + I bz + 0x002203c3, // n0x009d c0x0000 (---------------) + I bzh + 0x0c2055c2, // n0x009e c0x0030 (n0x06c5-n0x06d6) + I ca + 0x0036eb03, // n0x009f c0x0000 (---------------) + I cab + 0x0026ab84, // n0x00a0 c0x0000 (---------------) + I cafe + 0x0020e443, // n0x00a1 c0x0000 (---------------) + I cal + 0x002a6544, // n0x00a2 c0x0000 (---------------) + I call + 0x0022b406, // n0x00a3 c0x0000 (---------------) + I camera + 0x0022c604, // n0x00a4 c0x0000 (---------------) + I camp + 0x0029668e, // n0x00a5 c0x0000 (---------------) + I cancerresearch + 0x00312a05, // n0x00a6 c0x0000 (---------------) + I canon + 0x0021aac8, // n0x00a7 c0x0000 (---------------) + I capetown + 0x002c0887, // n0x00a8 c0x0000 (---------------) + I capital + 0x00205f83, // n0x00a9 c0x0000 (---------------) + I car + 0x002291c7, // n0x00aa c0x0000 (---------------) + I caravan + 0x00319505, // n0x00ab c0x0000 (---------------) + I cards + 0x00205f84, // n0x00ac c0x0000 (---------------) + I care + 0x00205f86, // n0x00ad c0x0000 (---------------) + I career + 0x00205f87, // n0x00ae c0x0000 (---------------) + I careers + 0x002b9384, // n0x00af c0x0000 (---------------) + I cars + 0x00332007, // n0x00b0 c0x0000 (---------------) + I cartier + 0x00383904, // n0x00b1 c0x0000 (---------------) + I casa + 0x00261b04, // n0x00b2 c0x0000 (---------------) + I cash + 0x002112c6, // n0x00b3 c0x0000 (---------------) + I casino + 0x0020d0c3, // n0x00b4 c0x0000 (---------------) + I cat + 0x003246c8, // n0x00b5 c0x0000 (---------------) + I catering + 0x00235d83, // n0x00b6 c0x0000 (---------------) + I cba + 0x002438c3, // n0x00b7 c0x0000 (---------------) + I cbn + 0x0037dc04, // n0x00b8 c0x0000 (---------------) + I cbre + 0x0c61aa82, // n0x00b9 c0x0031 (n0x06d6-n0x06da) + I cc + 0x0ca2a082, // n0x00ba c0x0032 (n0x06da-n0x06db) + I cd + 0x00215cc3, // n0x00bb c0x0000 (---------------) + I ceb + 0x00233a06, // n0x00bc c0x0000 (---------------) + I center + 0x002e7743, // n0x00bd c0x0000 (---------------) + I ceo + 0x0021d7c4, // n0x00be c0x0000 (---------------) + I cern + 0x0cf104c2, // n0x00bf c0x0033 (n0x06db-n0x06dc) + I cf + 0x003104c3, // n0x00c0 c0x0000 (---------------) + I cfa + 0x00366243, // n0x00c1 c0x0000 (---------------) + I cfd + 0x0020ea02, // n0x00c2 c0x0000 (---------------) + I cg + 0x0d204a02, // n0x00c3 c0x0034 (n0x06dc-n0x06dd) + I ch + 0x002a9f06, // n0x00c4 c0x0000 (---------------) + I chanel + 0x00227d87, // n0x00c5 c0x0000 (---------------) + I channel + 0x002facc5, // n0x00c6 c0x0000 (---------------) + I chase + 0x0023a704, // n0x00c7 c0x0000 (---------------) + I chat + 0x0028dec5, // n0x00c8 c0x0000 (---------------) + I cheap + 0x00353cc7, // n0x00c9 c0x0000 (---------------) + I chintai + 0x00297d05, // n0x00ca c0x0000 (---------------) + I chloe + 0x002e5109, // n0x00cb c0x0000 (---------------) + I christmas + 0x002e6f86, // n0x00cc c0x0000 (---------------) + I chrome + 0x002fabc6, // n0x00cd c0x0000 (---------------) + I church + 0x0d6155c2, // n0x00ce c0x0035 (n0x06dd-n0x06ec) + I ci + 0x0025f788, // n0x00cf c0x0000 (---------------) + I cipriani + 0x00322106, // n0x00d0 c0x0000 (---------------) + I circle + 0x00237905, // n0x00d1 c0x0000 (---------------) + I cisco + 0x003245c5, // n0x00d2 c0x0000 (---------------) + I citic + 0x002735c4, // n0x00d3 c0x0000 (---------------) + I city + 0x002735c8, // n0x00d4 c0x0000 (---------------) + I cityeats + 0x0da01782, // n0x00d5 c0x0036 (n0x06ec-n0x06ed)* o I ck + 0x0de0af42, // n0x00d6 c0x0037 (n0x06ed-n0x06f2) + I cl + 0x00357546, // n0x00d7 c0x0000 (---------------) + I claims + 0x0032d748, // n0x00d8 c0x0000 (---------------) + I cleaning + 0x00367205, // n0x00d9 c0x0000 (---------------) + I click + 0x00368f86, // n0x00da c0x0000 (---------------) + I clinic + 0x003784c8, // n0x00db c0x0000 (---------------) + I clothing + 0x00332205, // n0x00dc c0x0000 (---------------) + I cloud + 0x002752c4, // n0x00dd c0x0000 (---------------) + I club + 0x002752c7, // n0x00de c0x0000 (---------------) + I clubmed + 0x0e249082, // n0x00df c0x0038 (n0x06f2-n0x06f6) + I cm + 0x0e6211c2, // n0x00e0 c0x0039 (n0x06f6-n0x0723) + I cn + 0x0fe00742, // n0x00e1 c0x003f (n0x0728-n0x0735) + I co + 0x0032ca05, // n0x00e2 c0x0000 (---------------) + I coach + 0x0028ca05, // n0x00e3 c0x0000 (---------------) + I codes + 0x003690c6, // n0x00e4 c0x0000 (---------------) + I coffee + 0x0021e787, // n0x00e5 c0x0000 (---------------) + I college + 0x002214c7, // n0x00e6 c0x0000 (---------------) + I cologne + 0x10622ac3, // n0x00e7 c0x0041 (n0x0736-n0x07ff) + I com + 0x002d4148, // n0x00e8 c0x0000 (---------------) + I commbank + 0x00222ac9, // n0x00e9 c0x0000 (---------------) + I community + 0x002232c7, // n0x00ea c0x0000 (---------------) + I company + 0x002236c8, // n0x00eb c0x0000 (---------------) + I computer + 0x00223ec6, // n0x00ec c0x0000 (---------------) + I comsec + 0x00224306, // n0x00ed c0x0000 (---------------) + I condos + 0x00224c0c, // n0x00ee c0x0000 (---------------) + I construction + 0x00225a8a, // n0x00ef c0x0000 (---------------) + I consulting + 0x00225f47, // n0x00f0 c0x0000 (---------------) + I contact + 0x00226e0b, // n0x00f1 c0x0000 (---------------) + I contractors + 0x00227bc7, // n0x00f2 c0x0000 (---------------) + I cooking + 0x00227bce, // n0x00f3 c0x0000 (---------------) + I cookingchannel + 0x00228384, // n0x00f4 c0x0000 (---------------) + I cool + 0x00228d44, // n0x00f5 c0x0000 (---------------) + I coop + 0x0022b2c7, // n0x00f6 c0x0000 (---------------) + I corsica + 0x00362c07, // n0x00f7 c0x0000 (---------------) + I country + 0x0022d906, // n0x00f8 c0x0000 (---------------) + I coupon + 0x0022d907, // n0x00f9 c0x0000 (---------------) + I coupons + 0x0022dc87, // n0x00fa c0x0000 (---------------) + I courses + 0x11a0c502, // n0x00fb c0x0046 (n0x081d-n0x0824) + I cr + 0x00230646, // n0x00fc c0x0000 (---------------) + I credit + 0x0023064a, // n0x00fd c0x0000 (---------------) + I creditcard + 0x002308cb, // n0x00fe c0x0000 (---------------) + I creditunion + 0x002319c7, // n0x00ff c0x0000 (---------------) + I cricket + 0x00232885, // n0x0100 c0x0000 (---------------) + I crown + 0x002329c3, // n0x0101 c0x0000 (---------------) + I crs + 0x00233147, // n0x0102 c0x0000 (---------------) + I cruises + 0x00355d43, // n0x0103 c0x0000 (---------------) + I csc + 0x11e24002, // n0x0104 c0x0047 (n0x0824-n0x082a) + I cu + 0x002333ca, // n0x0105 c0x0000 (---------------) + I cuisinella + 0x1233f802, // n0x0106 c0x0048 (n0x082a-n0x082b) + I cv + 0x126b8942, // n0x0107 c0x0049 (n0x082b-n0x082f) + I cw + 0x12a35882, // n0x0108 c0x004a (n0x082f-n0x0831) + I cx + 0x12e29e42, // n0x0109 c0x004b (n0x0831-n0x083e) o I cy + 0x002dcac5, // n0x010a c0x0000 (---------------) + I cymru + 0x00235b84, // n0x010b c0x0000 (---------------) + I cyou + 0x13614442, // n0x010c c0x004d (n0x083f-n0x0840) + I cz + 0x0021bc05, // n0x010d c0x0000 (---------------) + I dabur + 0x00264503, // n0x010e c0x0000 (---------------) + I dad + 0x0032dec5, // n0x010f c0x0000 (---------------) + I dance + 0x00209004, // n0x0110 c0x0000 (---------------) + I date + 0x00390f06, // n0x0111 c0x0000 (---------------) + I dating + 0x00207286, // n0x0112 c0x0000 (---------------) + I datsun + 0x00277d83, // n0x0113 c0x0000 (---------------) + I day + 0x00251304, // n0x0114 c0x0000 (---------------) + I dclk + 0x002f9383, // n0x0115 c0x0000 (---------------) + I dds + 0x13a006c2, // n0x0116 c0x004e (n0x0840-n0x0848) + I de + 0x002196c4, // n0x0117 c0x0000 (---------------) + I deal + 0x00364186, // n0x0118 c0x0000 (---------------) + I dealer + 0x002196c5, // n0x0119 c0x0000 (---------------) + I deals + 0x002528c6, // n0x011a c0x0000 (---------------) + I degree + 0x00268948, // n0x011b c0x0000 (---------------) + I delivery + 0x002297c4, // n0x011c c0x0000 (---------------) + I dell + 0x0024f885, // n0x011d c0x0000 (---------------) + I delta + 0x0022e548, // n0x011e c0x0000 (---------------) + I democrat + 0x00298646, // n0x011f c0x0000 (---------------) + I dental + 0x00234ec7, // n0x0120 c0x0000 (---------------) + I dentist + 0x00232f44, // n0x0121 c0x0000 (---------------) + I desi + 0x00232f46, // n0x0122 c0x0000 (---------------) + I design + 0x0030a403, // n0x0123 c0x0000 (---------------) + I dev + 0x002b0cc8, // n0x0124 c0x0000 (---------------) + I diamonds + 0x002f6384, // n0x0125 c0x0000 (---------------) + I diet + 0x002ef0c7, // n0x0126 c0x0000 (---------------) + I digital + 0x00352bc6, // n0x0127 c0x0000 (---------------) + I direct + 0x00352bc9, // n0x0128 c0x0000 (---------------) + I directory + 0x002f87c8, // n0x0129 c0x0000 (---------------) + I discount + 0x0020c7c2, // n0x012a c0x0000 (---------------) + I dj + 0x13e71742, // n0x012b c0x004f (n0x0848-n0x0849) + I dk + 0x142618c2, // n0x012c c0x0050 (n0x0849-n0x084e) + I dm + 0x0036cd83, // n0x012d c0x0000 (---------------) + I dnp + 0x14604002, // n0x012e c0x0051 (n0x084e-n0x0858) + I do + 0x00355cc4, // n0x012f c0x0000 (---------------) + I docs + 0x00204003, // n0x0130 c0x0000 (---------------) + I dog + 0x0024f044, // n0x0131 c0x0000 (---------------) + I doha + 0x002e8307, // n0x0132 c0x0000 (---------------) + I domains + 0x0023ba06, // n0x0133 c0x0000 (---------------) + I doosan + 0x0031c343, // n0x0134 c0x0000 (---------------) + I dot + 0x002ede08, // n0x0135 c0x0000 (---------------) + I download + 0x003102c5, // n0x0136 c0x0000 (---------------) + I drive + 0x00394cc4, // n0x0137 c0x0000 (---------------) + I dstv + 0x002482c3, // n0x0138 c0x0000 (---------------) + I dtv + 0x0026a005, // n0x0139 c0x0000 (---------------) + I dubai + 0x0031b886, // n0x013a c0x0000 (---------------) + I dunlop + 0x0032a4c6, // n0x013b c0x0000 (---------------) + I dupont + 0x00383246, // n0x013c c0x0000 (---------------) + I durban + 0x00300bc4, // n0x013d c0x0000 (---------------) + I dvag + 0x14aa3602, // n0x013e c0x0052 (n0x0858-n0x0860) + I dz + 0x00330d85, // n0x013f c0x0000 (---------------) + I earth + 0x00242e03, // n0x0140 c0x0000 (---------------) + I eat + 0x14e00702, // n0x0141 c0x0053 (n0x0860-n0x086c) + I ec + 0x0038d785, // n0x0142 c0x0000 (---------------) + I edeka + 0x002d75c3, // n0x0143 c0x0000 (---------------) + I edu + 0x00375549, // n0x0144 c0x0000 (---------------) + I education + 0x15206042, // n0x0145 c0x0054 (n0x086c-n0x0876) + I ee + 0x15a0df82, // n0x0146 c0x0056 (n0x0877-n0x0880) + I eg + 0x003435c5, // n0x0147 c0x0000 (---------------) + I email + 0x00312206, // n0x0148 c0x0000 (---------------) + I emerck + 0x002ba7c6, // n0x0149 c0x0000 (---------------) + I energy + 0x002a8a88, // n0x014a c0x0000 (---------------) + I engineer + 0x002a8a8b, // n0x014b c0x0000 (---------------) + I engineering + 0x0037a7cb, // n0x014c c0x0000 (---------------) + I enterprises + 0x00364485, // n0x014d c0x0000 (---------------) + I epson + 0x0021f909, // n0x014e c0x0000 (---------------) + I equipment + 0x01600ec2, // n0x014f c0x0005 (---------------)* o I er + 0x00259904, // n0x0150 c0x0000 (---------------) + I erni + 0x162010c2, // n0x0151 c0x0058 (n0x0881-n0x0886) + I es + 0x00275b43, // n0x0152 c0x0000 (---------------) + I esq + 0x002b1846, // n0x0153 c0x0000 (---------------) + I estate + 0x16a0bbc2, // n0x0154 c0x005a (n0x0887-n0x088f) + I et + 0x0021d5c2, // n0x0155 c0x0000 (---------------) + I eu + 0x002ef88a, // n0x0156 c0x0000 (---------------) + I eurovision + 0x002b71c3, // n0x0157 c0x0000 (---------------) + I eus + 0x003423c6, // n0x0158 c0x0000 (---------------) + I events + 0x00381fc8, // n0x0159 c0x0000 (---------------) + I everbank + 0x002f3908, // n0x015a c0x0000 (---------------) + I exchange + 0x0030c7c6, // n0x015b c0x0000 (---------------) + I expert + 0x00203e87, // n0x015c c0x0000 (---------------) + I exposed + 0x0029ab47, // n0x015d c0x0000 (---------------) + I express + 0x0021008a, // n0x015e c0x0000 (---------------) + I extraspace + 0x00310504, // n0x015f c0x0000 (---------------) + I fage + 0x00328344, // n0x0160 c0x0000 (---------------) + I fail + 0x0033d609, // n0x0161 c0x0000 (---------------) + I fairwinds + 0x00377ec5, // n0x0162 c0x0000 (---------------) + I faith + 0x00311c06, // n0x0163 c0x0000 (---------------) + I family + 0x0020f1c3, // n0x0164 c0x0000 (---------------) + I fan + 0x002c5544, // n0x0165 c0x0000 (---------------) + I fans + 0x0021f804, // n0x0166 c0x0000 (---------------) + I farm + 0x0025d247, // n0x0167 c0x0000 (---------------) + I fashion + 0x00287d44, // n0x0168 c0x0000 (---------------) + I fast + 0x00369188, // n0x0169 c0x0000 (---------------) + I feedback + 0x003127c7, // n0x016a c0x0000 (---------------) + I ferrero + 0x16e099c2, // n0x016b c0x005b (n0x088f-n0x0892) + I fi + 0x00356104, // n0x016c c0x0000 (---------------) + I film + 0x00356745, // n0x016d c0x0000 (---------------) + I final + 0x00368607, // n0x016e c0x0000 (---------------) + I finance + 0x00371509, // n0x016f c0x0000 (---------------) + I financial + 0x00236744, // n0x0170 c0x0000 (---------------) + I fire + 0x00237d49, // n0x0171 c0x0000 (---------------) + I firestone + 0x00238548, // n0x0172 c0x0000 (---------------) + I firmdale + 0x00238904, // n0x0173 c0x0000 (---------------) + I fish + 0x00238907, // n0x0174 c0x0000 (---------------) + I fishing + 0x00238f03, // n0x0175 c0x0000 (---------------) + I fit + 0x00239087, // n0x0176 c0x0000 (---------------) + I fitness + 0x016241c2, // n0x0177 c0x0005 (---------------)* o I fj + 0x01697782, // n0x0178 c0x0005 (---------------)* o I fk + 0x0023a306, // n0x0179 c0x0000 (---------------) + I flickr + 0x0023b147, // n0x017a c0x0000 (---------------) + I flights + 0x0023c607, // n0x017b c0x0000 (---------------) + I florist + 0x0023ef07, // n0x017c c0x0000 (---------------) + I flowers + 0x0023f508, // n0x017d c0x0000 (---------------) + I flsmidth + 0x0023ffc3, // n0x017e c0x0000 (---------------) + I fly + 0x00358002, // n0x017f c0x0000 (---------------) + I fm + 0x00200382, // n0x0180 c0x0000 (---------------) + I fo + 0x00241943, // n0x0181 c0x0000 (---------------) + I foo + 0x0024194b, // n0x0182 c0x0000 (---------------) + I foodnetwork + 0x0030b888, // n0x0183 c0x0000 (---------------) + I football + 0x003640c4, // n0x0184 c0x0000 (---------------) + I ford + 0x00242f85, // n0x0185 c0x0000 (---------------) + I forex + 0x00244a47, // n0x0186 c0x0000 (---------------) + I forsale + 0x00246b45, // n0x0187 c0x0000 (---------------) + I forum + 0x00303b8a, // n0x0188 c0x0000 (---------------) + I foundation + 0x17200d42, // n0x0189 c0x005c (n0x0892-n0x08aa) + I fr + 0x0024c6c3, // n0x018a c0x0000 (---------------) + I frl + 0x0024c787, // n0x018b c0x0000 (---------------) + I frogans + 0x003906c9, // n0x018c c0x0000 (---------------) + I frontdoor + 0x00200d48, // n0x018d c0x0000 (---------------) + I frontier + 0x0026fc04, // n0x018e c0x0000 (---------------) + I fund + 0x00272f09, // n0x018f c0x0000 (---------------) + I furniture + 0x002764c6, // n0x0190 c0x0000 (---------------) + I futbol + 0x00277503, // n0x0191 c0x0000 (---------------) + I fyi + 0x00201602, // n0x0192 c0x0000 (---------------) + I ga + 0x00215b83, // n0x0193 c0x0000 (---------------) + I gal + 0x00226607, // n0x0194 c0x0000 (---------------) + I gallery + 0x00362a05, // n0x0195 c0x0000 (---------------) + I gallo + 0x0036bd46, // n0x0196 c0x0000 (---------------) + I gallup + 0x00212084, // n0x0197 c0x0000 (---------------) + I game + 0x002d2145, // n0x0198 c0x0000 (---------------) + I games + 0x0020e506, // n0x0199 c0x0000 (---------------) + I garden + 0x00205a42, // n0x019a c0x0000 (---------------) + I gb + 0x00378f84, // n0x019b c0x0000 (---------------) + I gbiz + 0x0021b342, // n0x019c c0x0000 (---------------) + I gd + 0x002d0143, // n0x019d c0x0000 (---------------) + I gdn + 0x176012c2, // n0x019e c0x005d (n0x08aa-n0x08b1) + I ge + 0x00237543, // n0x019f c0x0000 (---------------) + I gea + 0x0020db04, // n0x01a0 c0x0000 (---------------) + I gent + 0x0020db07, // n0x01a1 c0x0000 (---------------) + I genting + 0x00248e82, // n0x01a2 c0x0000 (---------------) + I gf + 0x17a00402, // n0x01a3 c0x005e (n0x08b1-n0x08b4) + I gg + 0x00268b44, // n0x01a4 c0x0000 (---------------) + I ggee + 0x17e36e42, // n0x01a5 c0x005f (n0x08b4-n0x08b9) + I gh + 0x18200442, // n0x01a6 c0x0060 (n0x08b9-n0x08bf) + I gi + 0x0030f704, // n0x01a7 c0x0000 (---------------) + I gift + 0x0030f705, // n0x01a8 c0x0000 (---------------) + I gifts + 0x002b9bc5, // n0x01a9 c0x0000 (---------------) + I gives + 0x00213586, // n0x01aa c0x0000 (---------------) + I giving + 0x18629902, // n0x01ab c0x0061 (n0x08bf-n0x08c4) + I gl + 0x0034da85, // n0x01ac c0x0000 (---------------) + I glass + 0x00274303, // n0x01ad c0x0000 (---------------) + I gle + 0x00389c06, // n0x01ae c0x0000 (---------------) + I global + 0x0038a445, // n0x01af c0x0000 (---------------) + I globo + 0x002047c2, // n0x01b0 c0x0000 (---------------) + I gm + 0x00323145, // n0x01b1 c0x0000 (---------------) + I gmail + 0x00208443, // n0x01b2 c0x0000 (---------------) + I gmo + 0x0020a643, // n0x01b3 c0x0000 (---------------) + I gmx + 0x18a050c2, // n0x01b4 c0x0062 (n0x08c4-n0x08ca) + I gn + 0x00238a84, // n0x01b5 c0x0000 (---------------) + I gold + 0x00238a89, // n0x01b6 c0x0000 (---------------) + I goldpoint + 0x0025d184, // n0x01b7 c0x0000 (---------------) + I golf + 0x0028c883, // n0x01b8 c0x0000 (---------------) + I goo + 0x00330c48, // n0x01b9 c0x0000 (---------------) + I goodyear + 0x0028c884, // n0x01ba c0x0000 (---------------) + I goog + 0x0028c886, // n0x01bb c0x0000 (---------------) + I google + 0x0028e783, // n0x01bc c0x0000 (---------------) + I gop + 0x0020bec3, // n0x01bd c0x0000 (---------------) + I got + 0x0020bec4, // n0x01be c0x0000 (---------------) + I gotv + 0x0021e283, // n0x01bf c0x0000 (---------------) + I gov + 0x18ece142, // n0x01c0 c0x0063 (n0x08ca-n0x08d0) + I gp + 0x0034f382, // n0x01c1 c0x0000 (---------------) + I gq + 0x1920dc82, // n0x01c2 c0x0064 (n0x08d0-n0x08d6) + I gr + 0x002ff248, // n0x01c3 c0x0000 (---------------) + I grainger + 0x00341188, // n0x01c4 c0x0000 (---------------) + I graphics + 0x0037d046, // n0x01c5 c0x0000 (---------------) + I gratis + 0x0025b105, // n0x01c6 c0x0000 (---------------) + I green + 0x0021dd45, // n0x01c7 c0x0000 (---------------) + I gripe + 0x002646c5, // n0x01c8 c0x0000 (---------------) + I group + 0x0026cd02, // n0x01c9 c0x0000 (---------------) + I gs + 0x19651202, // n0x01ca c0x0065 (n0x08d6-n0x08dd) + I gt + 0x01629702, // n0x01cb c0x0005 (---------------)* o I gu + 0x0025f6c5, // n0x01cc c0x0000 (---------------) + I gucci + 0x002ca244, // n0x01cd c0x0000 (---------------) + I guge + 0x00229705, // n0x01ce c0x0000 (---------------) + I guide + 0x0022d647, // n0x01cf c0x0000 (---------------) + I guitars + 0x0025bc44, // n0x01d0 c0x0000 (---------------) + I guru + 0x00204b82, // n0x01d1 c0x0000 (---------------) + I gw + 0x19a25a02, // n0x01d2 c0x0066 (n0x08dd-n0x08e0) + I gy + 0x0038a2c7, // n0x01d3 c0x0000 (---------------) + I hamburg + 0x00392287, // n0x01d4 c0x0000 (---------------) + I hangout + 0x00329884, // n0x01d5 c0x0000 (---------------) + I haus + 0x00235cc8, // n0x01d6 c0x0000 (---------------) + I hdfcbank + 0x00205e06, // n0x01d7 c0x0000 (---------------) + I health + 0x00205e0a, // n0x01d8 c0x0000 (---------------) + I healthcare + 0x00205204, // n0x01d9 c0x0000 (---------------) + I help + 0x00209408, // n0x01da c0x0000 (---------------) + I helsinki + 0x0023f784, // n0x01db c0x0000 (---------------) + I here + 0x00219446, // n0x01dc c0x0000 (---------------) + I hermes + 0x00280244, // n0x01dd c0x0000 (---------------) + I hgtv + 0x0032cd06, // n0x01de c0x0000 (---------------) + I hiphop + 0x0028d547, // n0x01df c0x0000 (---------------) + I hitachi + 0x0025f603, // n0x01e0 c0x0000 (---------------) + I hiv + 0x19e2ea02, // n0x01e1 c0x0067 (n0x08e0-n0x08f8) + I hk + 0x0025ab03, // n0x01e2 c0x0000 (---------------) + I hkt + 0x00206182, // n0x01e3 c0x0000 (---------------) + I hm + 0x1a217542, // n0x01e4 c0x0068 (n0x08f8-n0x08fe) + I hn + 0x002cd886, // n0x01e5 c0x0000 (---------------) + I hockey + 0x0034b148, // n0x01e6 c0x0000 (---------------) + I holdings + 0x00290807, // n0x01e7 c0x0000 (---------------) + I holiday + 0x002b8e89, // n0x01e8 c0x0000 (---------------) + I homedepot + 0x00291385, // n0x01e9 c0x0000 (---------------) + I homes + 0x00292005, // n0x01ea c0x0000 (---------------) + I honda + 0x00293c05, // n0x01eb c0x0000 (---------------) + I horse + 0x00202fc4, // n0x01ec c0x0000 (---------------) + I host + 0x002836c7, // n0x01ed c0x0000 (---------------) + I hosting + 0x00294947, // n0x01ee c0x0000 (---------------) + I hoteles + 0x00294fc7, // n0x01ef c0x0000 (---------------) + I hotmail + 0x0021da05, // n0x01f0 c0x0000 (---------------) + I house + 0x00297383, // n0x01f1 c0x0000 (---------------) + I how + 0x1a625842, // n0x01f2 c0x0069 (n0x08fe-n0x0903) + I hr + 0x002dbf04, // n0x01f3 c0x0000 (---------------) + I hsbc + 0x1aa36e82, // n0x01f4 c0x006a (n0x0903-n0x0914) + I ht + 0x00249003, // n0x01f5 c0x0000 (---------------) + I htc + 0x1ae17d42, // n0x01f6 c0x006b (n0x0914-n0x0934) + I hu + 0x0031b7c3, // n0x01f7 c0x0000 (---------------) + I ibm + 0x0028bf44, // n0x01f8 c0x0000 (---------------) + I icbc + 0x00200b43, // n0x01f9 c0x0000 (---------------) + I ice + 0x0033c383, // n0x01fa c0x0000 (---------------) + I icu + 0x1b206202, // n0x01fb c0x006c (n0x0934-n0x093f) + I id + 0x1ba00e82, // n0x01fc c0x006e (n0x0940-n0x0942) + I ie + 0x00370d03, // n0x01fd c0x0000 (---------------) + I ifm + 0x0031cac5, // n0x01fe c0x0000 (---------------) + I iinet + 0x1be036c2, // n0x01ff c0x006f (n0x0942-n0x0943)* o I il + 0x1c603582, // n0x0200 c0x0071 (n0x0944-n0x094b) + I im + 0x002f4284, // n0x0201 c0x0000 (---------------) + I imdb + 0x00203584, // n0x0202 c0x0000 (---------------) + I immo + 0x0020358a, // n0x0203 c0x0000 (---------------) + I immobilien + 0x1ce00242, // n0x0204 c0x0073 (n0x094d-n0x095a) + I in + 0x00353eca, // n0x0205 c0x0000 (---------------) + I industries + 0x00355408, // n0x0206 c0x0000 (---------------) + I infiniti + 0x1d200304, // n0x0207 c0x0074 (n0x095a-n0x0964) + I info + 0x0020dc03, // n0x0208 c0x0000 (---------------) + I ing + 0x00209503, // n0x0209 c0x0000 (---------------) + I ink + 0x002ed209, // n0x020a c0x0000 (---------------) + I institute + 0x002806c9, // n0x020b c0x0000 (---------------) + I insurance + 0x002e8406, // n0x020c c0x0000 (---------------) + I insure + 0x1d638c03, // n0x020d c0x0075 (n0x0964-n0x0965) + I int + 0x002dd88d, // n0x020e c0x0000 (---------------) + I international + 0x002087cb, // n0x020f c0x0000 (---------------) + I investments + 0x1da00042, // n0x0210 c0x0076 (n0x0965-n0x0968) + I io + 0x00331048, // n0x0211 c0x0000 (---------------) + I ipiranga + 0x1de11142, // n0x0212 c0x0077 (n0x0968-n0x096e) + I iq + 0x1e200542, // n0x0213 c0x0078 (n0x096e-n0x0977) + I ir + 0x00294c05, // n0x0214 c0x0000 (---------------) + I irish + 0x1e602b42, // n0x0215 c0x0079 (n0x0977-n0x097f) + I is + 0x00370f07, // n0x0216 c0x0000 (---------------) + I iselect + 0x00202b43, // n0x0217 c0x0000 (---------------) + I ist + 0x00210c48, // n0x0218 c0x0000 (---------------) + I istanbul + 0x1ea06e82, // n0x0219 c0x007a (n0x097f-n0x0af0) + I it + 0x00206e84, // n0x021a c0x0000 (---------------) + I itau + 0x003664c3, // n0x021b c0x0000 (---------------) + I iwc + 0x002a51c6, // n0x021c c0x0000 (---------------) + I jaguar + 0x0030ad84, // n0x021d c0x0000 (---------------) + I java + 0x00243883, // n0x021e c0x0000 (---------------) + I jcb + 0x0025b3c3, // n0x021f c0x0000 (---------------) + I jcp + 0x1ee01f82, // n0x0220 c0x007b (n0x0af0-n0x0af3) + I je + 0x0033ab85, // n0x0221 c0x0000 (---------------) + I jetzt + 0x0034f487, // n0x0222 c0x0000 (---------------) + I jewelry + 0x00266583, // n0x0223 c0x0000 (---------------) + I jio + 0x00297c83, // n0x0224 c0x0000 (---------------) + I jlc + 0x00297e43, // n0x0225 c0x0000 (---------------) + I jll + 0x01697f02, // n0x0226 c0x0005 (---------------)* o I jm + 0x00297f03, // n0x0227 c0x0000 (---------------) + I jmp + 0x002987c3, // n0x0228 c0x0000 (---------------) + I jnj + 0x1f202982, // n0x0229 c0x007c (n0x0af3-n0x0afb) + I jo + 0x002a5d04, // n0x022a c0x0000 (---------------) + I jobs + 0x00389ac6, // n0x022b c0x0000 (---------------) + I joburg + 0x00203903, // n0x022c c0x0000 (---------------) + I jot + 0x00298c43, // n0x022d c0x0000 (---------------) + I joy + 0x1f6990c2, // n0x022e c0x007d (n0x0afb-n0x0b6a) + I jp + 0x002990c8, // n0x022f c0x0000 (---------------) + I jpmorgan + 0x0029ccc4, // n0x0230 c0x0000 (---------------) + I jprs + 0x002faec6, // n0x0231 c0x0000 (---------------) + I juegos + 0x002e6846, // n0x0232 c0x0000 (---------------) + I kaufen + 0x00233fc4, // n0x0233 c0x0000 (---------------) + I kddi + 0x2d201002, // n0x0234 c0x00b4 (n0x11fe-n0x11ff)* o I ke + 0x002a6f4b, // n0x0235 c0x0000 (---------------) + I kerryhotels + 0x0021268e, // n0x0236 c0x0000 (---------------) + I kerrylogistics + 0x0021810f, // n0x0237 c0x0000 (---------------) + I kerryproperties + 0x00235e83, // n0x0238 c0x0000 (---------------) + I kfh + 0x2daa3fc2, // n0x0239 c0x00b6 (n0x1200-n0x1206) + I kg + 0x016176c2, // n0x023a c0x0005 (---------------)* o I kh + 0x2de01b02, // n0x023b c0x00b7 (n0x1206-n0x120d) + I ki + 0x0027d603, // n0x023c c0x0000 (---------------) + I kim + 0x002f33c6, // n0x023d c0x0000 (---------------) + I kinder + 0x00227886, // n0x023e c0x0000 (---------------) + I kindle + 0x00298f07, // n0x023f c0x0000 (---------------) + I kitchen + 0x002d2dc4, // n0x0240 c0x0000 (---------------) + I kiwi + 0x2e268d82, // n0x0241 c0x00b8 (n0x120d-n0x121e) + I km + 0x2e656942, // n0x0242 c0x00b9 (n0x121e-n0x1222) + I kn + 0x0021f385, // n0x0243 c0x0000 (---------------) + I koeln + 0x002be447, // n0x0244 c0x0000 (---------------) + I komatsu + 0x2ea08382, // n0x0245 c0x00ba (n0x1222-n0x1228) + I kp + 0x00208384, // n0x0246 c0x0000 (---------------) + I kpmg + 0x00360183, // n0x0247 c0x0000 (---------------) + I kpn + 0x2ee034c2, // n0x0248 c0x00bb (n0x1228-n0x1246) + I kr + 0x0033a003, // n0x0249 c0x0000 (---------------) + I krd + 0x0029d8c4, // n0x024a c0x0000 (---------------) + I kred + 0x002a3f09, // n0x024b c0x0000 (---------------) + I kuokgroup + 0x016ac642, // n0x024c c0x0005 (---------------)* o I kw + 0x2f229082, // n0x024d c0x00bc (n0x1246-n0x124b) + I ky + 0x002568c6, // n0x024e c0x0000 (---------------) + I kyknet + 0x002acb05, // n0x024f c0x0000 (---------------) + I kyoto + 0x2f6f0002, // n0x0250 c0x00bd (n0x124b-n0x1251) + I kz + 0x2fa01e42, // n0x0251 c0x00be (n0x1251-n0x125a) + I la + 0x003276c7, // n0x0252 c0x0000 (---------------) + I lacaixa + 0x0033df4b, // n0x0253 c0x0000 (---------------) + I lamborghini + 0x002335c9, // n0x0254 c0x0000 (---------------) + I lancaster + 0x002364c4, // n0x0255 c0x0000 (---------------) + I land + 0x0036d989, // n0x0256 c0x0000 (---------------) + I landrover + 0x002679c7, // n0x0257 c0x0000 (---------------) + I lasalle + 0x00222143, // n0x0258 c0x0000 (---------------) + I lat + 0x002c18c7, // n0x0259 c0x0000 (---------------) + I latrobe + 0x00253883, // n0x025a c0x0000 (---------------) + I law + 0x00270406, // n0x025b c0x0000 (---------------) + I lawyer + 0x2fe0a542, // n0x025c c0x00bf (n0x125a-n0x125f) + I lb + 0x302339c2, // n0x025d c0x00c0 (n0x125f-n0x1265) + I lc + 0x0021fbc3, // n0x025e c0x0000 (---------------) + I lds + 0x00267b05, // n0x025f c0x0000 (---------------) + I lease + 0x0025f307, // n0x0260 c0x0000 (---------------) + I leclerc + 0x00362985, // n0x0261 c0x0000 (---------------) + I legal + 0x002ad945, // n0x0262 c0x0000 (---------------) + I lexus + 0x002ce984, // n0x0263 c0x0000 (---------------) + I lgbt + 0x306008c2, // n0x0264 c0x00c1 (n0x1265-n0x1266) + I li + 0x002ee607, // n0x0265 c0x0000 (---------------) + I liaison + 0x002aadc4, // n0x0266 c0x0000 (---------------) + I lidl + 0x00247844, // n0x0267 c0x0000 (---------------) + I life + 0x0031c64d, // n0x0268 c0x0000 (---------------) + I lifeinsurance + 0x00247849, // n0x0269 c0x0000 (---------------) + I lifestyle + 0x00236dc8, // n0x026a c0x0000 (---------------) + I lighting + 0x00261684, // n0x026b c0x0000 (---------------) + I like + 0x002edc87, // n0x026c c0x0000 (---------------) + I limited + 0x002ee1c4, // n0x026d c0x0000 (---------------) + I limo + 0x00251e07, // n0x026e c0x0000 (---------------) + I lincoln + 0x0031dc05, // n0x026f c0x0000 (---------------) + I linde + 0x0031e2c4, // n0x0270 c0x0000 (---------------) + I link + 0x002bfc05, // n0x0271 c0x0000 (---------------) + I lipsy + 0x0024b384, // n0x0272 c0x0000 (---------------) + I live + 0x002d7785, // n0x0273 c0x0000 (---------------) + I lixil + 0x30a08342, // n0x0274 c0x00c2 (n0x1266-n0x1275) + I lk + 0x0020ab84, // n0x0275 c0x0000 (---------------) + I loan + 0x0020ab85, // n0x0276 c0x0000 (---------------) + I loans + 0x0020af86, // n0x0277 c0x0000 (---------------) + I locker + 0x00362ac5, // n0x0278 c0x0000 (---------------) + I locus + 0x002c1ec3, // n0x0279 c0x0000 (---------------) + I lol + 0x00340906, // n0x027a c0x0000 (---------------) + I london + 0x00364385, // n0x027b c0x0000 (---------------) + I lotte + 0x00206505, // n0x027c c0x0000 (---------------) + I lotto + 0x002123c4, // n0x027d c0x0000 (---------------) + I love + 0x30e76e02, // n0x027e c0x00c3 (n0x1275-n0x127a) + I lr + 0x31202d42, // n0x027f c0x00c4 (n0x127a-n0x127c) + I ls + 0x31605ec2, // n0x0280 c0x00c5 (n0x127c-n0x127e) + I lt + 0x003413c3, // n0x0281 c0x0000 (---------------) + I ltd + 0x003413c4, // n0x0282 c0x0000 (---------------) + I ltda + 0x31a071c2, // n0x0283 c0x00c6 (n0x127e-n0x127f) + I lu + 0x0036be05, // n0x0284 c0x0000 (---------------) + I lupin + 0x00222d84, // n0x0285 c0x0000 (---------------) + I luxe + 0x00225dc6, // n0x0286 c0x0000 (---------------) + I luxury + 0x31e27f02, // n0x0287 c0x00c7 (n0x127f-n0x1288) + I lv + 0x32228b42, // n0x0288 c0x00c8 (n0x1288-n0x1291) + I ly + 0x32604302, // n0x0289 c0x00c9 (n0x1291-n0x1297) + I ma + 0x00300a86, // n0x028a c0x0000 (---------------) + I madrid + 0x0032a984, // n0x028b c0x0000 (---------------) + I maif + 0x0033c9c6, // n0x028c c0x0000 (---------------) + I maison + 0x0026a746, // n0x028d c0x0000 (---------------) + I makeup + 0x00206903, // n0x028e c0x0000 (---------------) + I man + 0x00351aca, // n0x028f c0x0000 (---------------) + I management + 0x0022a685, // n0x0290 c0x0000 (---------------) + I mango + 0x0029bcc6, // n0x0291 c0x0000 (---------------) + I market + 0x00357049, // n0x0292 c0x0000 (---------------) + I marketing + 0x0029bcc7, // n0x0293 c0x0000 (---------------) + I markets + 0x0027c808, // n0x0294 c0x0000 (---------------) + I marriott + 0x00271143, // n0x0295 c0x0000 (---------------) + I mba + 0x32a3a6c2, // n0x0296 c0x00ca (n0x1297-n0x1299) + I mc + 0x32e38602, // n0x0297 c0x00cb (n0x1299-n0x129a) + I md + 0x33208942, // n0x0298 c0x00cc (n0x129a-n0x12a2) + I me + 0x002dc385, // n0x0299 c0x0000 (---------------) + I media + 0x00282f44, // n0x029a c0x0000 (---------------) + I meet + 0x0020fe89, // n0x029b c0x0000 (---------------) + I melbourne + 0x003121c4, // n0x029c c0x0000 (---------------) + I meme + 0x002edac8, // n0x029d c0x0000 (---------------) + I memorial + 0x00208943, // n0x029e c0x0000 (---------------) + I men + 0x0034d804, // n0x029f c0x0000 (---------------) + I menu + 0x0023a883, // n0x02a0 c0x0000 (---------------) + I meo + 0x0031c587, // n0x02a1 c0x0000 (---------------) + I metlife + 0x33608402, // n0x02a2 c0x00cd (n0x12a2-n0x12aa) + I mg + 0x00249282, // n0x02a3 c0x0000 (---------------) + I mh + 0x002322c5, // n0x02a4 c0x0000 (---------------) + I miami + 0x00257ec9, // n0x02a5 c0x0000 (---------------) + I microsoft + 0x0023fa03, // n0x02a6 c0x0000 (---------------) + I mil + 0x0026b344, // n0x02a7 c0x0000 (---------------) + I mini + 0x00246f03, // n0x02a8 c0x0000 (---------------) + I mit + 0x33b56d82, // n0x02a9 c0x00ce (n0x12aa-n0x12b2) + I mk + 0x33e0ab42, // n0x02aa c0x00cf (n0x12b2-n0x12b9) + I ml + 0x002afd83, // n0x02ab c0x0000 (---------------) + I mlb + 0x00358b83, // n0x02ac c0x0000 (---------------) + I mls + 0x016035c2, // n0x02ad c0x0005 (---------------)* o I mm + 0x003679c3, // n0x02ae c0x0000 (---------------) + I mma + 0x34217082, // n0x02af c0x00d0 (n0x12b9-n0x12bd) + I mn + 0x00217084, // n0x02b0 c0x0000 (---------------) + I mnet + 0x34603602, // n0x02b1 c0x00d1 (n0x12bd-n0x12c2) + I mo + 0x00203604, // n0x02b2 c0x0000 (---------------) + I mobi + 0x002cc406, // n0x02b3 c0x0000 (---------------) + I mobily + 0x00208484, // n0x02b4 c0x0000 (---------------) + I moda + 0x002fb2c3, // n0x02b5 c0x0000 (---------------) + I moe + 0x0038f703, // n0x02b6 c0x0000 (---------------) + I moi + 0x0021cfc3, // n0x02b7 c0x0000 (---------------) + I mom + 0x00230c46, // n0x02b8 c0x0000 (---------------) + I monash + 0x002b6185, // n0x02b9 c0x0000 (---------------) + I money + 0x00261909, // n0x02ba c0x0000 (---------------) + I montblanc + 0x002b60c6, // n0x02bb c0x0000 (---------------) + I mormon + 0x002b66c8, // n0x02bc c0x0000 (---------------) + I mortgage + 0x002b68c6, // n0x02bd c0x0000 (---------------) + I moscow + 0x0025cb84, // n0x02be c0x0000 (---------------) + I moto + 0x0028678b, // n0x02bf c0x0000 (---------------) + I motorcycles + 0x002b8403, // n0x02c0 c0x0000 (---------------) + I mov + 0x002b8405, // n0x02c1 c0x0000 (---------------) + I movie + 0x002b8548, // n0x02c2 c0x0000 (---------------) + I movistar + 0x00214902, // n0x02c3 c0x0000 (---------------) + I mp + 0x003279c2, // n0x02c4 c0x0000 (---------------) + I mq + 0x34adcb42, // n0x02c5 c0x00d2 (n0x12c2-n0x12c4) + I mr + 0x34e09282, // n0x02c6 c0x00d3 (n0x12c4-n0x12c9) + I ms + 0x35259642, // n0x02c7 c0x00d4 (n0x12c9-n0x12cd) + I mt + 0x00259643, // n0x02c8 c0x0000 (---------------) + I mtn + 0x002b8844, // n0x02c9 c0x0000 (---------------) + I mtpc + 0x002b9703, // n0x02ca c0x0000 (---------------) + I mtr + 0x35a000c2, // n0x02cb c0x00d6 (n0x12ce-n0x12d5) + I mu + 0x002bae0b, // n0x02cc c0x0000 (---------------) + I multichoice + 0x35ebd646, // n0x02cd c0x00d7 (n0x12d5-n0x14f9) + I museum + 0x0036d306, // n0x02ce c0x0000 (---------------) + I mutual + 0x002bdc88, // n0x02cf c0x0000 (---------------) + I mutuelle + 0x3624ffc2, // n0x02d0 c0x00d8 (n0x14f9-n0x1507) + I mv + 0x3660a142, // n0x02d1 c0x00d9 (n0x1507-n0x1512) + I mw + 0x36a0a682, // n0x02d2 c0x00da (n0x1512-n0x1518) + I mx + 0x36e20282, // n0x02d3 c0x00db (n0x1518-n0x1520) + I my + 0x372c6c02, // n0x02d4 c0x00dc (n0x1520-n0x1521)* o I mz + 0x002c6c0b, // n0x02d5 c0x0000 (---------------) + I mzansimagic + 0x37600282, // n0x02d6 c0x00dd (n0x1521-n0x1532) + I na + 0x002f3845, // n0x02d7 c0x0000 (---------------) + I nadex + 0x0032b206, // n0x02d8 c0x0000 (---------------) + I nagoya + 0x37a98944, // n0x02d9 c0x00de (n0x1532-n0x1534) + I name + 0x0030cfc7, // n0x02da c0x0000 (---------------) + I naspers + 0x00240dc6, // n0x02db c0x0000 (---------------) + I natura + 0x0037fe44, // n0x02dc c0x0000 (---------------) + I navy + 0x3861c742, // n0x02dd c0x00e1 (n0x1536-n0x1537) + I nc + 0x00201082, // n0x02de c0x0000 (---------------) + I ne + 0x00305e83, // n0x02df c0x0000 (---------------) + I nec + 0x38a170c3, // n0x02e0 c0x00e2 (n0x1537-n0x1568) + I net + 0x002efe87, // n0x02e1 c0x0000 (---------------) + I netbank + 0x002d7687, // n0x02e2 c0x0000 (---------------) + I netflix + 0x00241a47, // n0x02e3 c0x0000 (---------------) + I network + 0x00319107, // n0x02e4 c0x0000 (---------------) + I neustar + 0x0021a383, // n0x02e5 c0x0000 (---------------) + I new + 0x00366d44, // n0x02e6 c0x0000 (---------------) + I news + 0x00210044, // n0x02e7 c0x0000 (---------------) + I next + 0x00352aca, // n0x02e8 c0x0000 (---------------) + I nextdirect + 0x00255985, // n0x02e9 c0x0000 (---------------) + I nexus + 0x39e00342, // n0x02ea c0x00e7 (n0x1570-n0x157a) + I nf + 0x3a201282, // n0x02eb c0x00e8 (n0x157a-n0x1583) + I ng + 0x00202303, // n0x02ec c0x0000 (---------------) + I ngo + 0x0025aac3, // n0x02ed c0x0000 (---------------) + I nhk + 0x01603d42, // n0x02ee c0x0005 (---------------)* o I ni + 0x00369044, // n0x02ef c0x0000 (---------------) + I nico + 0x00204c45, // n0x02f0 c0x0000 (---------------) + I nikon + 0x002bbfc5, // n0x02f1 c0x0000 (---------------) + I ninja + 0x0029c346, // n0x02f2 c0x0000 (---------------) + I nissan + 0x3aa36482, // n0x02f3 c0x00ea (n0x1584-n0x1587) + I nl + 0x3ae00c02, // n0x02f4 c0x00eb (n0x1587-n0x185d) + I no + 0x00201b85, // n0x02f5 c0x0000 (---------------) + I nokia + 0x0036d012, // n0x02f6 c0x0000 (---------------) + I northwesternmutual + 0x00204546, // n0x02f7 c0x0000 (---------------) + I norton + 0x0021c343, // n0x02f8 c0x0000 (---------------) + I now + 0x0036a0c6, // n0x02f9 c0x0000 (---------------) + I nowruz + 0x0030b645, // n0x02fa c0x0000 (---------------) + I nowtv + 0x0160a2c2, // n0x02fb c0x0005 (---------------)* o I np + 0x43209e82, // n0x02fc c0x010c (n0x1885-n0x188c) + I nr + 0x002cc1c3, // n0x02fd c0x0000 (---------------) + I nra + 0x00345903, // n0x02fe c0x0000 (---------------) + I nrw + 0x00361083, // n0x02ff c0x0000 (---------------) + I ntt + 0x43605bc2, // n0x0300 c0x010d (n0x188c-n0x188f) + I nu + 0x00223403, // n0x0301 c0x0000 (---------------) + I nyc + 0x43a078c2, // n0x0302 c0x010e (n0x188f-n0x189f) + I nz + 0x00203643, // n0x0303 c0x0000 (---------------) + I obi + 0x002a5d48, // n0x0304 c0x0000 (---------------) + I observer + 0x00219c46, // n0x0305 c0x0000 (---------------) + I office + 0x003954c7, // n0x0306 c0x0000 (---------------) + I okinawa + 0x002a2686, // n0x0307 c0x0000 (---------------) + I olayan + 0x002a268b, // n0x0308 c0x0000 (---------------) + I olayangroup + 0x002dbd04, // n0x0309 c0x0000 (---------------) + I ollo + 0x44200082, // n0x030a c0x0110 (n0x18a0-n0x18a9) + I om + 0x0036bc85, // n0x030b c0x0000 (---------------) + I omega + 0x0021a343, // n0x030c c0x0000 (---------------) + I one + 0x00292c83, // n0x030d c0x0000 (---------------) + I ong + 0x003023c3, // n0x030e c0x0000 (---------------) + I onl + 0x003023c6, // n0x030f c0x0000 (---------------) + I online + 0x0027b9c3, // n0x0310 c0x0000 (---------------) + I ooo + 0x0032d686, // n0x0311 c0x0000 (---------------) + I oracle + 0x002a6246, // n0x0312 c0x0000 (---------------) + I orange + 0x4461dcc3, // n0x0313 c0x0111 (n0x18a9-n0x18e3) + I org + 0x00299187, // n0x0314 c0x0000 (---------------) + I organic + 0x0029a9cd, // n0x0315 c0x0000 (---------------) + I orientexpress + 0x002864c5, // n0x0316 c0x0000 (---------------) + I osaka + 0x00239506, // n0x0317 c0x0000 (---------------) + I otsuka + 0x00206543, // n0x0318 c0x0000 (---------------) + I ott + 0x00389e43, // n0x0319 c0x0000 (---------------) + I ovh + 0x45e052c2, // n0x031a c0x0117 (n0x1920-n0x192b) + I pa + 0x00310fc4, // n0x031b c0x0000 (---------------) + I page + 0x002fb60c, // n0x031c c0x0000 (---------------) + I pamperedchef + 0x0022c007, // n0x031d c0x0000 (---------------) + I panerai + 0x0025b445, // n0x031e c0x0000 (---------------) + I paris + 0x0028f504, // n0x031f c0x0000 (---------------) + I pars + 0x00297f88, // n0x0320 c0x0000 (---------------) + I partners + 0x002c2085, // n0x0321 c0x0000 (---------------) + I parts + 0x002a2905, // n0x0322 c0x0000 (---------------) + I party + 0x002a4109, // n0x0323 c0x0000 (---------------) + I passagens + 0x002ab584, // n0x0324 c0x0000 (---------------) + I payu + 0x002b88c4, // n0x0325 c0x0000 (---------------) + I pccw + 0x46214942, // n0x0326 c0x0118 (n0x192b-n0x1933) + I pe + 0x0021ab43, // n0x0327 c0x0000 (---------------) + I pet + 0x46764f42, // n0x0328 c0x0119 (n0x1933-n0x1936) + I pf + 0x016bf182, // n0x0329 c0x0005 (---------------)* o I pg + 0x46a8f0c2, // n0x032a c0x011a (n0x1936-n0x193e) + I ph + 0x002dc948, // n0x032b c0x0000 (---------------) + I pharmacy + 0x002bfb47, // n0x032c c0x0000 (---------------) + I philips + 0x0028f0c5, // n0x032d c0x0000 (---------------) + I photo + 0x002c014b, // n0x032e c0x0000 (---------------) + I photography + 0x002bea86, // n0x032f c0x0000 (---------------) + I photos + 0x002c0346, // n0x0330 c0x0000 (---------------) + I physio + 0x002c04c6, // n0x0331 c0x0000 (---------------) + I piaget + 0x0021ec04, // n0x0332 c0x0000 (---------------) + I pics + 0x002c0a46, // n0x0333 c0x0000 (---------------) + I pictet + 0x002c0fc8, // n0x0334 c0x0000 (---------------) + I pictures + 0x0022c6c3, // n0x0335 c0x0000 (---------------) + I pid + 0x00243743, // n0x0336 c0x0000 (---------------) + I pin + 0x00243744, // n0x0337 c0x0000 (---------------) + I ping + 0x002c1c84, // n0x0338 c0x0000 (---------------) + I pink + 0x002c3cc5, // n0x0339 c0x0000 (---------------) + I pizza + 0x46ec3e02, // n0x033a c0x011b (n0x193e-n0x194c) + I pk + 0x47201e02, // n0x033b c0x011c (n0x194c-n0x19f1) + I pl + 0x00201e05, // n0x033c c0x0000 (---------------) + I place + 0x00290d44, // n0x033d c0x0000 (---------------) + I play + 0x002c61cb, // n0x033e c0x0000 (---------------) + I playstation + 0x002c7948, // n0x033f c0x0000 (---------------) + I plumbing + 0x002c7b84, // n0x0340 c0x0000 (---------------) + I plus + 0x002083c2, // n0x0341 c0x0000 (---------------) + I pm + 0x47a3df82, // n0x0342 c0x011e (n0x1a20-n0x1a25) + I pn + 0x0029a743, // n0x0343 c0x0000 (---------------) + I pnc + 0x002c7fc4, // n0x0344 c0x0000 (---------------) + I pohl + 0x002c80c5, // n0x0345 c0x0000 (---------------) + I poker + 0x002c9d84, // n0x0346 c0x0000 (---------------) + I porn + 0x002b31c4, // n0x0347 c0x0000 (---------------) + I post + 0x47e18242, // n0x0348 c0x011f (n0x1a25-n0x1a32) + I pr + 0x0025da85, // n0x0349 c0x0000 (---------------) + I praxi + 0x0029abc5, // n0x034a c0x0000 (---------------) + I press + 0x002cad45, // n0x034b c0x0000 (---------------) + I prime + 0x48218243, // n0x034c c0x0120 (n0x1a32-n0x1a39) + I pro + 0x002cbe44, // n0x034d c0x0000 (---------------) + I prod + 0x002cbe4b, // n0x034e c0x0000 (---------------) + I productions + 0x002cc284, // n0x034f c0x0000 (---------------) + I prof + 0x002cca85, // n0x0350 c0x0000 (---------------) + I promo + 0x0021824a, // n0x0351 c0x0000 (---------------) + I properties + 0x002cce48, // n0x0352 c0x0000 (---------------) + I property + 0x002cd04a, // n0x0353 c0x0000 (---------------) + I protection + 0x4861dc02, // n0x0354 c0x0121 (n0x1a39-n0x1a40) + I ps + 0x48a95982, // n0x0355 c0x0122 (n0x1a40-n0x1a49) + I pt + 0x00296543, // n0x0356 c0x0000 (---------------) + I pub + 0x48f8ae42, // n0x0357 c0x0123 (n0x1a49-n0x1a4f) + I pw + 0x492be202, // n0x0358 c0x0124 (n0x1a4f-n0x1a56) + I py + 0x496fd8c2, // n0x0359 c0x0125 (n0x1a56-n0x1a5f) + I qa + 0x002ce804, // n0x035a c0x0000 (---------------) + I qpon + 0x00211186, // n0x035b c0x0000 (---------------) + I quebec + 0x00222685, // n0x035c c0x0000 (---------------) + I quest + 0x00301a06, // n0x035d c0x0000 (---------------) + I racing + 0x49a030c2, // n0x035e c0x0126 (n0x1a5f-n0x1a63) + I re + 0x0033d884, // n0x035f c0x0000 (---------------) + I read + 0x0032e0c7, // n0x0360 c0x0000 (---------------) + I realtor + 0x0036b3c6, // n0x0361 c0x0000 (---------------) + I realty + 0x00307487, // n0x0362 c0x0000 (---------------) + I recipes + 0x00230683, // n0x0363 c0x0000 (---------------) + I red + 0x0029d908, // n0x0364 c0x0000 (---------------) + I redstone + 0x003237cb, // n0x0365 c0x0000 (---------------) + I redumbrella + 0x002730c5, // n0x0366 c0x0000 (---------------) + I rehab + 0x002e8505, // n0x0367 c0x0000 (---------------) + I reise + 0x002e8506, // n0x0368 c0x0000 (---------------) + I reisen + 0x002a45c4, // n0x0369 c0x0000 (---------------) + I reit + 0x00365b48, // n0x036a c0x0000 (---------------) + I reliance + 0x00210403, // n0x036b c0x0000 (---------------) + I ren + 0x00210404, // n0x036c c0x0000 (---------------) + I rent + 0x00210407, // n0x036d c0x0000 (---------------) + I rentals + 0x002117c6, // n0x036e c0x0000 (---------------) + I repair + 0x0030ea86, // n0x036f c0x0000 (---------------) + I report + 0x002964ca, // n0x0370 c0x0000 (---------------) + I republican + 0x00237dc4, // n0x0371 c0x0000 (---------------) + I rest + 0x003386ca, // n0x0372 c0x0000 (---------------) + I restaurant + 0x0031bd06, // n0x0373 c0x0000 (---------------) + I review + 0x0031bd07, // n0x0374 c0x0000 (---------------) + I reviews + 0x00243007, // n0x0375 c0x0000 (---------------) + I rexroth + 0x002614c4, // n0x0376 c0x0000 (---------------) + I rich + 0x002614c9, // n0x0377 c0x0000 (---------------) + I richardli + 0x0024ea05, // n0x0378 c0x0000 (---------------) + I ricoh + 0x00228f83, // n0x0379 c0x0000 (---------------) + I ril + 0x0022ad03, // n0x037a c0x0000 (---------------) + I rio + 0x0021dd83, // n0x037b c0x0000 (---------------) + I rip + 0x49e00d82, // n0x037c c0x0127 (n0x1a63-n0x1a6f) + I ro + 0x0028e886, // n0x037d c0x0000 (---------------) + I rocher + 0x00297105, // n0x037e c0x0000 (---------------) + I rocks + 0x002c1b45, // n0x037f c0x0000 (---------------) + I rodeo + 0x0023a144, // n0x0380 c0x0000 (---------------) + I room + 0x4a2060c2, // n0x0381 c0x0128 (n0x1a6f-n0x1a76) + I rs + 0x00324a84, // n0x0382 c0x0000 (---------------) + I rsvp + 0x4a6044c2, // n0x0383 c0x0129 (n0x1a76-n0x1afa) + I ru + 0x0024f144, // n0x0384 c0x0000 (---------------) + I ruhr + 0x002044c3, // n0x0385 c0x0000 (---------------) + I run + 0x4ab0d882, // n0x0386 c0x012a (n0x1afa-n0x1b03) + I rw + 0x0031d103, // n0x0387 c0x0000 (---------------) + I rwe + 0x00289606, // n0x0388 c0x0000 (---------------) + I ryukyu + 0x4ae01a02, // n0x0389 c0x012b (n0x1b03-n0x1b0b) + I sa + 0x00272608, // n0x038a c0x0000 (---------------) + I saarland + 0x00234784, // n0x038b c0x0000 (---------------) + I safe + 0x00234786, // n0x038c c0x0000 (---------------) + I safety + 0x002f4d46, // n0x038d c0x0000 (---------------) + I sakura + 0x00244b04, // n0x038e c0x0000 (---------------) + I sale + 0x00340885, // n0x038f c0x0000 (---------------) + I salon + 0x00395107, // n0x0390 c0x0000 (---------------) + I samsung + 0x0029c407, // n0x0391 c0x0000 (---------------) + I sandvik + 0x0029c40f, // n0x0392 c0x0000 (---------------) + I sandvikcoromant + 0x002098c6, // n0x0393 c0x0000 (---------------) + I sanofi + 0x00210583, // n0x0394 c0x0000 (---------------) + I sap + 0x00210584, // n0x0395 c0x0000 (---------------) + I sapo + 0x0021d684, // n0x0396 c0x0000 (---------------) + I sarl + 0x002275c3, // n0x0397 c0x0000 (---------------) + I sas + 0x00219584, // n0x0398 c0x0000 (---------------) + I save + 0x002332c4, // n0x0399 c0x0000 (---------------) + I saxo + 0x4b2046c2, // n0x039a c0x012c (n0x1b0b-n0x1b10) + I sb + 0x00277ac3, // n0x039b c0x0000 (---------------) + I sbi + 0x002350c3, // n0x039c c0x0000 (---------------) + I sbs + 0x4b600982, // n0x039d c0x012d (n0x1b10-n0x1b15) + I sc + 0x00229183, // n0x039e c0x0000 (---------------) + I sca + 0x00355d83, // n0x039f c0x0000 (---------------) + I scb + 0x00206107, // n0x03a0 c0x0000 (---------------) + I schmidt + 0x0023514c, // n0x03a1 c0x0000 (---------------) + I scholarships + 0x00235406, // n0x03a2 c0x0000 (---------------) + I school + 0x002c39c6, // n0x03a3 c0x0000 (---------------) + I schule + 0x00370987, // n0x03a4 c0x0000 (---------------) + I schwarz + 0x00223b07, // n0x03a5 c0x0000 (---------------) + I science + 0x00212fc4, // n0x03a6 c0x0000 (---------------) + I scor + 0x00237984, // n0x03a7 c0x0000 (---------------) + I scot + 0x4ba4f842, // n0x03a8 c0x012e (n0x1b15-n0x1b1d) + I sd + 0x4be02e82, // n0x03a9 c0x012f (n0x1b1d-n0x1b46) + I se + 0x003004c4, // n0x03aa c0x0000 (---------------) + I seat + 0x00223f88, // n0x03ab c0x0000 (---------------) + I security + 0x00267bc4, // n0x03ac c0x0000 (---------------) + I seek + 0x002ba785, // n0x03ad c0x0000 (---------------) + I sener + 0x00243bc8, // n0x03ae c0x0000 (---------------) + I services + 0x002476c3, // n0x03af c0x0000 (---------------) + I sew + 0x0029acc3, // n0x03b0 c0x0000 (---------------) + I sex + 0x0029acc4, // n0x03b1 c0x0000 (---------------) + I sexy + 0x4c262dc2, // n0x03b2 c0x0130 (n0x1b46-n0x1b4d) + I sg + 0x4c6001c2, // n0x03b3 c0x0131 (n0x1b4d-n0x1b53) + I sh + 0x00255e05, // n0x03b4 c0x0000 (---------------) + I sharp + 0x00256344, // n0x03b5 c0x0000 (---------------) + I shaw + 0x00208c04, // n0x03b6 c0x0000 (---------------) + I shia + 0x002cb1c7, // n0x03b7 c0x0000 (---------------) + I shiksha + 0x00369905, // n0x03b8 c0x0000 (---------------) + I shoes + 0x002b0e86, // n0x03b9 c0x0000 (---------------) + I shouji + 0x002b3884, // n0x03ba c0x0000 (---------------) + I show + 0x002b6b87, // n0x03bb c0x0000 (---------------) + I shriram + 0x4ca09182, // n0x03bc c0x0132 (n0x1b53-n0x1b54) + I si + 0x0036b904, // n0x03bd c0x0000 (---------------) + I silk + 0x002914c4, // n0x03be c0x0000 (---------------) + I sina + 0x00274247, // n0x03bf c0x0000 (---------------) + I singles + 0x00242b84, // n0x03c0 c0x0000 (---------------) + I site + 0x0022e942, // n0x03c1 c0x0000 (---------------) + I sj + 0x4ce07b42, // n0x03c2 c0x0133 (n0x1b54-n0x1b55) + I sk + 0x00207b43, // n0x03c3 c0x0000 (---------------) + I ski + 0x002f3384, // n0x03c4 c0x0000 (---------------) + I skin + 0x00229043, // n0x03c5 c0x0000 (---------------) + I sky + 0x00229045, // n0x03c6 c0x0000 (---------------) + I skype + 0x4d212582, // n0x03c7 c0x0134 (n0x1b55-n0x1b5a) + I sl + 0x0023f582, // n0x03c8 c0x0000 (---------------) + I sm + 0x0034b305, // n0x03c9 c0x0000 (---------------) + I smile + 0x4d610b02, // n0x03ca c0x0135 (n0x1b5a-n0x1b62) + I sn + 0x00310444, // n0x03cb c0x0000 (---------------) + I sncf + 0x4da01102, // n0x03cc c0x0136 (n0x1b62-n0x1b65) + I so + 0x00240a06, // n0x03cd c0x0000 (---------------) + I soccer + 0x002c27c6, // n0x03ce c0x0000 (---------------) + I social + 0x00258008, // n0x03cf c0x0000 (---------------) + I softbank + 0x002a6c88, // n0x03d0 c0x0000 (---------------) + I software + 0x002de604, // n0x03d1 c0x0000 (---------------) + I sohu + 0x002d0205, // n0x03d2 c0x0000 (---------------) + I solar + 0x002d9149, // n0x03d3 c0x0000 (---------------) + I solutions + 0x00364504, // n0x03d4 c0x0000 (---------------) + I song + 0x002bc184, // n0x03d5 c0x0000 (---------------) + I sony + 0x00207fc3, // n0x03d6 c0x0000 (---------------) + I soy + 0x002101c5, // n0x03d7 c0x0000 (---------------) + I space + 0x00379247, // n0x03d8 c0x0000 (---------------) + I spiegel + 0x00236a04, // n0x03d9 c0x0000 (---------------) + I spot + 0x0033d80d, // n0x03da c0x0000 (---------------) + I spreadbetting + 0x002ceec2, // n0x03db c0x0000 (---------------) + I sr + 0x002ceec3, // n0x03dc c0x0000 (---------------) + I srl + 0x4de023c2, // n0x03dd c0x0137 (n0x1b65-n0x1b71) + I st + 0x0035bf05, // n0x03de c0x0000 (---------------) + I stada + 0x00232444, // n0x03df c0x0000 (---------------) + I star + 0x003191c7, // n0x03e0 c0x0000 (---------------) + I starhub + 0x002b1889, // n0x03e1 c0x0000 (---------------) + I statebank + 0x0029ca07, // n0x03e2 c0x0000 (---------------) + I statoil + 0x00264603, // n0x03e3 c0x0000 (---------------) + I stc + 0x00264608, // n0x03e4 c0x0000 (---------------) + I stcgroup + 0x00259f09, // n0x03e5 c0x0000 (---------------) + I stockholm + 0x002cf147, // n0x03e6 c0x0000 (---------------) + I storage + 0x002cf4c5, // n0x03e7 c0x0000 (---------------) + I store + 0x002cfc86, // n0x03e8 c0x0000 (---------------) + I studio + 0x002cfe05, // n0x03e9 c0x0000 (---------------) + I study + 0x00247945, // n0x03ea c0x0000 (---------------) + I style + 0x4e203a42, // n0x03eb c0x0138 (n0x1b71-n0x1b91) + I su + 0x002f0b45, // n0x03ec c0x0000 (---------------) + I sucks + 0x002acd0a, // n0x03ed c0x0000 (---------------) + I supersport + 0x002b49c8, // n0x03ee c0x0000 (---------------) + I supplies + 0x002cccc6, // n0x03ef c0x0000 (---------------) + I supply + 0x00243e87, // n0x03f0 c0x0000 (---------------) + I support + 0x00287c84, // n0x03f1 c0x0000 (---------------) + I surf + 0x00330647, // n0x03f2 c0x0000 (---------------) + I surgery + 0x002d2f46, // n0x03f3 c0x0000 (---------------) + I suzuki + 0x4e61d0c2, // n0x03f4 c0x0139 (n0x1b91-n0x1b96) + I sv + 0x0020ac86, // n0x03f5 c0x0000 (---------------) + I swatch + 0x002d6685, // n0x03f6 c0x0000 (---------------) + I swiss + 0x4ead6b42, // n0x03f7 c0x013a (n0x1b96-n0x1b97) + I sx + 0x4ee84ec2, // n0x03f8 c0x013b (n0x1b97-n0x1b9d) + I sy + 0x00368086, // n0x03f9 c0x0000 (---------------) + I sydney + 0x0029d448, // n0x03fa c0x0000 (---------------) + I symantec + 0x00392447, // n0x03fb c0x0000 (---------------) + I systems + 0x4f207582, // n0x03fc c0x013c (n0x1b9d-n0x1ba0) + I sz + 0x0020c083, // n0x03fd c0x0000 (---------------) + I tab + 0x00382f46, // n0x03fe c0x0000 (---------------) + I taipei + 0x00216944, // n0x03ff c0x0000 (---------------) + I talk + 0x003879c6, // n0x0400 c0x0000 (---------------) + I taobao + 0x0031cbca, // n0x0401 c0x0000 (---------------) + I tatamotors + 0x0031df05, // n0x0402 c0x0000 (---------------) + I tatar + 0x0020f886, // n0x0403 c0x0000 (---------------) + I tattoo + 0x00217c43, // n0x0404 c0x0000 (---------------) + I tax + 0x00217c44, // n0x0405 c0x0000 (---------------) + I taxi + 0x0020ad42, // n0x0406 c0x0000 (---------------) + I tc + 0x002f4203, // n0x0407 c0x0000 (---------------) + I tci + 0x4f600682, // n0x0408 c0x013d (n0x1ba0-n0x1ba1) + I td + 0x002c9803, // n0x0409 c0x0000 (---------------) + I tdk + 0x00354144, // n0x040a c0x0000 (---------------) + I team + 0x0029d584, // n0x040b c0x0000 (---------------) + I tech + 0x0029d58a, // n0x040c c0x0000 (---------------) + I technology + 0x0022ba83, // n0x040d c0x0000 (---------------) + I tel + 0x002734c8, // n0x040e c0x0000 (---------------) + I telecity + 0x00250a0a, // n0x040f c0x0000 (---------------) + I telefonica + 0x00325507, // n0x0410 c0x0000 (---------------) + I temasek + 0x002dab46, // n0x0411 c0x0000 (---------------) + I tennis + 0x0033f0c4, // n0x0412 c0x0000 (---------------) + I teva + 0x0027e202, // n0x0413 c0x0000 (---------------) + I tf + 0x0021e342, // n0x0414 c0x0000 (---------------) + I tg + 0x4fa01d82, // n0x0415 c0x013e (n0x1ba1-n0x1ba8) + I th + 0x00235c83, // n0x0416 c0x0000 (---------------) + I thd + 0x002f9147, // n0x0417 c0x0000 (---------------) + I theater + 0x00242d87, // n0x0418 c0x0000 (---------------) + I theatre + 0x00377f8b, // n0x0419 c0x0000 (---------------) + I theguardian + 0x00340707, // n0x041a c0x0000 (---------------) + I tickets + 0x0021bb06, // n0x041b c0x0000 (---------------) + I tienda + 0x00375107, // n0x041c c0x0000 (---------------) + I tiffany + 0x00354984, // n0x041d c0x0000 (---------------) + I tips + 0x00355585, // n0x041e c0x0000 (---------------) + I tires + 0x002a4985, // n0x041f c0x0000 (---------------) + I tirol + 0x4fe02bc2, // n0x0420 c0x013f (n0x1ba8-n0x1bb7) + I tj + 0x0023a7c2, // n0x0421 c0x0000 (---------------) + I tk + 0x5020fc42, // n0x0422 c0x0140 (n0x1bb7-n0x1bb8) + I tl + 0x50608902, // n0x0423 c0x0141 (n0x1bb8-n0x1bc0) + I tm + 0x0026a9c5, // n0x0424 c0x0000 (---------------) + I tmall + 0x50a1d1c2, // n0x0425 c0x0142 (n0x1bc0-n0x1bd4) + I tn + 0x50e01682, // n0x0426 c0x0143 (n0x1bd4-n0x1bda) + I to + 0x00312e85, // n0x0427 c0x0000 (---------------) + I today + 0x00316545, // n0x0428 c0x0000 (---------------) + I tokyo + 0x0020f945, // n0x0429 c0x0000 (---------------) + I tools + 0x002469c3, // n0x042a c0x0000 (---------------) + I top + 0x00338cc5, // n0x042b c0x0000 (---------------) + I toray + 0x002beb47, // n0x042c c0x0000 (---------------) + I toshiba + 0x00339905, // n0x042d c0x0000 (---------------) + I tours + 0x0021abc4, // n0x042e c0x0000 (---------------) + I town + 0x00338906, // n0x042f c0x0000 (---------------) + I toyota + 0x00247b04, // n0x0430 c0x0000 (---------------) + I toys + 0x00285142, // n0x0431 c0x0000 (---------------) + I tp + 0x51202402, // n0x0432 c0x0144 (n0x1bda-n0x1bef) + I tr + 0x00229a45, // n0x0433 c0x0000 (---------------) + I trade + 0x0028fe07, // n0x0434 c0x0000 (---------------) + I trading + 0x002b9748, // n0x0435 c0x0000 (---------------) + I training + 0x0027f186, // n0x0436 c0x0000 (---------------) + I travel + 0x0027f18d, // n0x0437 c0x0000 (---------------) + I travelchannel + 0x00280489, // n0x0438 c0x0000 (---------------) + I travelers + 0x00280492, // n0x0439 c0x0000 (---------------) + I travelersinsurance + 0x00313185, // n0x043a c0x0000 (---------------) + I trust + 0x0034a4c3, // n0x043b c0x0000 (---------------) + I trv + 0x51e06582, // n0x043c c0x0147 (n0x1bf1-n0x1c02) + I tt + 0x0035a104, // n0x043d c0x0000 (---------------) + I tube + 0x002d71c3, // n0x043e c0x0000 (---------------) + I tui + 0x002d83c5, // n0x043f c0x0000 (---------------) + I tunes + 0x002d8e45, // n0x0440 c0x0000 (---------------) + I tushu + 0x5220bf42, // n0x0441 c0x0148 (n0x1c02-n0x1c06) + I tv + 0x0020bf43, // n0x0442 c0x0000 (---------------) + I tvs + 0x52641ac2, // n0x0443 c0x0149 (n0x1c06-n0x1c14) + I tw + 0x52a17142, // n0x0444 c0x014a (n0x1c14-n0x1c20) + I tz + 0x52e17d82, // n0x0445 c0x014b (n0x1c20-n0x1c6e) + I ua + 0x0032ba03, // n0x0446 c0x0000 (---------------) + I ubs + 0x53205082, // n0x0447 c0x014c (n0x1c6e-n0x1c77) + I ug + 0x5360cf02, // n0x0448 c0x014d (n0x1c77-n0x1c82) + I uk + 0x0029f04a, // n0x0449 c0x0000 (---------------) + I university + 0x00203a83, // n0x044a c0x0000 (---------------) + I uno + 0x00245543, // n0x044b c0x0000 (---------------) + I uol + 0x002c16c3, // n0x044c c0x0000 (---------------) + I ups + 0x54209f42, // n0x044d c0x0150 (n0x1c84-n0x1cc3) + I us + 0x62606842, // n0x044e c0x0189 (n0x1d66-n0x1d6c) + I uy + 0x62e018c2, // n0x044f c0x018b (n0x1d6d-n0x1d71) + I uz + 0x002013c2, // n0x0450 c0x0000 (---------------) + I va + 0x00340209, // n0x0451 c0x0000 (---------------) + I vacations + 0x002aba84, // n0x0452 c0x0000 (---------------) + I vana + 0x6334a542, // n0x0453 c0x018c (n0x1d71-n0x1d77) + I vc + 0x636014c2, // n0x0454 c0x018d (n0x1d77-n0x1d88) + I ve + 0x0027d4c5, // n0x0455 c0x0000 (---------------) + I vegas + 0x00227248, // n0x0456 c0x0000 (---------------) + I ventures + 0x002d998c, // n0x0457 c0x0000 (---------------) + I versicherung + 0x0022b9c3, // n0x0458 c0x0000 (---------------) + I vet + 0x0024ad42, // n0x0459 c0x0000 (---------------) + I vg + 0x63a13602, // n0x045a c0x018e (n0x1d88-n0x1d8d) + I vi + 0x002b4346, // n0x045b c0x0000 (---------------) + I viajes + 0x002db9c5, // n0x045c c0x0000 (---------------) + I video + 0x002b1403, // n0x045d c0x0000 (---------------) + I vig + 0x002c7686, // n0x045e c0x0000 (---------------) + I viking + 0x002dbb06, // n0x045f c0x0000 (---------------) + I villas + 0x00213603, // n0x0460 c0x0000 (---------------) + I vin + 0x002dc703, // n0x0461 c0x0000 (---------------) + I vip + 0x002dd146, // n0x0462 c0x0000 (---------------) + I virgin + 0x00248b46, // n0x0463 c0x0000 (---------------) + I vision + 0x002b85c5, // n0x0464 c0x0000 (---------------) + I vista + 0x002dd6ca, // n0x0465 c0x0000 (---------------) + I vistaprint + 0x0022cf04, // n0x0466 c0x0000 (---------------) + I viva + 0x00332eca, // n0x0467 c0x0000 (---------------) + I vlaanderen + 0x63e08102, // n0x0468 c0x018f (n0x1d8d-n0x1d9a) + I vn + 0x002716c5, // n0x0469 c0x0000 (---------------) + I vodka + 0x002e0c8a, // n0x046a c0x0000 (---------------) + I volkswagen + 0x002e2484, // n0x046b c0x0000 (---------------) + I vote + 0x002e2586, // n0x046c c0x0000 (---------------) + I voting + 0x002e2704, // n0x046d c0x0000 (---------------) + I voto + 0x0030c486, // n0x046e c0x0000 (---------------) + I voyage + 0x6421d102, // n0x046f c0x0190 (n0x1d9a-n0x1d9e) + I vu + 0x002a4e86, // n0x0470 c0x0000 (---------------) + I vuelos + 0x0036cbc5, // n0x0471 c0x0000 (---------------) + I wales + 0x0038b0c6, // n0x0472 c0x0000 (---------------) + I walter + 0x003578c4, // n0x0473 c0x0000 (---------------) + I wang + 0x003578c7, // n0x0474 c0x0000 (---------------) + I wanggou + 0x00351a06, // n0x0475 c0x0000 (---------------) + I warman + 0x0020acc5, // n0x0476 c0x0000 (---------------) + I watch + 0x00293647, // n0x0477 c0x0000 (---------------) + I watches + 0x00384107, // n0x0478 c0x0000 (---------------) + I weather + 0x0038410e, // n0x0479 c0x0000 (---------------) + I weatherchannel + 0x00219fc6, // n0x047a c0x0000 (---------------) + I webcam + 0x00251cc5, // n0x047b c0x0000 (---------------) + I weber + 0x002af047, // n0x047c c0x0000 (---------------) + I website + 0x002d4bc3, // n0x047d c0x0000 (---------------) + I wed + 0x0031c007, // n0x047e c0x0000 (---------------) + I wedding + 0x003912c5, // n0x047f c0x0000 (---------------) + I weibo + 0x0020a184, // n0x0480 c0x0000 (---------------) + I weir + 0x0021f7c2, // n0x0481 c0x0000 (---------------) + I wf + 0x002c7207, // n0x0482 c0x0000 (---------------) + I whoswho + 0x002d2e44, // n0x0483 c0x0000 (---------------) + I wien + 0x0025a484, // n0x0484 c0x0000 (---------------) + I wiki + 0x0024910b, // n0x0485 c0x0000 (---------------) + I williamhill + 0x00213c83, // n0x0486 c0x0000 (---------------) + I win + 0x002b6a07, // n0x0487 c0x0000 (---------------) + I windows + 0x00213c84, // n0x0488 c0x0000 (---------------) + I wine + 0x00231fc3, // n0x0489 c0x0000 (---------------) + I wme + 0x00241b04, // n0x048a c0x0000 (---------------) + I work + 0x0029b085, // n0x048b c0x0000 (---------------) + I works + 0x00314905, // n0x048c c0x0000 (---------------) + I world + 0x6460ba82, // n0x048d c0x0191 (n0x1d9e-n0x1da5) + I ws + 0x002e34c3, // n0x048e c0x0000 (---------------) + I wtc + 0x002e3b03, // n0x048f c0x0000 (---------------) + I wtf + 0x0020a6c4, // n0x0490 c0x0000 (---------------) + I xbox + 0x0020a785, // n0x0491 c0x0000 (---------------) + I xerox + 0x00217cc6, // n0x0492 c0x0000 (---------------) + I xihuan + 0x00356e83, // n0x0493 c0x0000 (---------------) + I xin + 0x002358cb, // n0x0494 c0x0000 (---------------) + I xn--11b4c3d + 0x0023d74b, // n0x0495 c0x0000 (---------------) + I xn--1ck2e1b + 0x00291d4b, // n0x0496 c0x0000 (---------------) + I xn--1qqw23a + 0x002bfeca, // n0x0497 c0x0000 (---------------) + I xn--30rr7y + 0x0033588b, // n0x0498 c0x0000 (---------------) + I xn--3bst00m + 0x003942cb, // n0x0499 c0x0000 (---------------) + I xn--3ds443g + 0x0039658c, // n0x049a c0x0000 (---------------) + I xn--3e0b707e + 0x00397251, // n0x049b c0x0000 (---------------) + I xn--3oq18vl8pn36a + 0x002e480a, // n0x049c c0x0000 (---------------) + I xn--3pxu8k + 0x002e4bcb, // n0x049d c0x0000 (---------------) + I xn--42c2d9a + 0x002e4e8b, // n0x049e c0x0000 (---------------) + I xn--45brj9c + 0x002e6d4a, // n0x049f c0x0000 (---------------) + I xn--45q11c + 0x002e780a, // n0x04a0 c0x0000 (---------------) + I xn--4gbrim + 0x002e8b8e, // n0x04a1 c0x0000 (---------------) + I xn--54b7fta0cc + 0x002e9e4b, // n0x04a2 c0x0000 (---------------) + I xn--55qw42g + 0x002ea10a, // n0x04a3 c0x0000 (---------------) + I xn--55qx5d + 0x002eb14a, // n0x04a4 c0x0000 (---------------) + I xn--5tzm5g + 0x002eb64b, // n0x04a5 c0x0000 (---------------) + I xn--6frz82g + 0x002ebb8e, // n0x04a6 c0x0000 (---------------) + I xn--6qq986b3xl + 0x002ec6cc, // n0x04a7 c0x0000 (---------------) + I xn--80adxhks + 0x002ecb4b, // n0x04a8 c0x0000 (---------------) + I xn--80ao21a + 0x002ece0c, // n0x04a9 c0x0000 (---------------) + I xn--80asehdb + 0x002f108a, // n0x04aa c0x0000 (---------------) + I xn--80aswg + 0x002f228c, // n0x04ab c0x0000 (---------------) + I xn--8y0a063a + 0x64af258a, // n0x04ac c0x0192 (n0x1da5-n0x1dab) + I xn--90a3ac + 0x002f5849, // n0x04ad c0x0000 (---------------) + I xn--90ais + 0x002f664a, // n0x04ae c0x0000 (---------------) + I xn--9dbq2a + 0x002f68ca, // n0x04af c0x0000 (---------------) + I xn--9et52u + 0x002f6b4b, // n0x04b0 c0x0000 (---------------) + I xn--9krt00a + 0x002fa44e, // n0x04b1 c0x0000 (---------------) + I xn--b4w605ferd + 0x002fa7d1, // n0x04b2 c0x0000 (---------------) + I xn--bck1b9a5dre4c + 0x00303289, // n0x04b3 c0x0000 (---------------) + I xn--c1avg + 0x003034ca, // n0x04b4 c0x0000 (---------------) + I xn--c2br7g + 0x00303e0b, // n0x04b5 c0x0000 (---------------) + I xn--cck2b3b + 0x0030618a, // n0x04b6 c0x0000 (---------------) + I xn--cg4bki + 0x00306d16, // n0x04b7 c0x0000 (---------------) + I xn--clchc0ea0b2g2a9gcd + 0x003091cb, // n0x04b8 c0x0000 (---------------) + I xn--czr694b + 0x0030a9ca, // n0x04b9 c0x0000 (---------------) + I xn--czrs0t + 0x0030d48a, // n0x04ba c0x0000 (---------------) + I xn--czru2d + 0x0030f2cb, // n0x04bb c0x0000 (---------------) + I xn--d1acj3b + 0x003125c9, // n0x04bc c0x0000 (---------------) + I xn--d1alf + 0x0031508d, // n0x04bd c0x0000 (---------------) + I xn--eckvdtc9d + 0x003167cb, // n0x04be c0x0000 (---------------) + I xn--efvy88h + 0x003178cb, // n0x04bf c0x0000 (---------------) + I xn--estv75g + 0x0031828b, // n0x04c0 c0x0000 (---------------) + I xn--fct429k + 0x00319909, // n0x04c1 c0x0000 (---------------) + I xn--fhbei + 0x00319f4e, // n0x04c2 c0x0000 (---------------) + I xn--fiq228c5hs + 0x0031a48a, // n0x04c3 c0x0000 (---------------) + I xn--fiq64b + 0x0031ec0a, // n0x04c4 c0x0000 (---------------) + I xn--fiqs8s + 0x0031f0ca, // n0x04c5 c0x0000 (---------------) + I xn--fiqz9s + 0x0031fa8b, // n0x04c6 c0x0000 (---------------) + I xn--fjq720a + 0x003202cb, // n0x04c7 c0x0000 (---------------) + I xn--flw351e + 0x0032058d, // n0x04c8 c0x0000 (---------------) + I xn--fpcrj9c3d + 0x00321e0d, // n0x04c9 c0x0000 (---------------) + I xn--fzc2c9e2c + 0x00322dd0, // n0x04ca c0x0000 (---------------) + I xn--fzys8d69uvgm + 0x0032328b, // n0x04cb c0x0000 (---------------) + I xn--g2xx48c + 0x00323ecc, // n0x04cc c0x0000 (---------------) + I xn--gckr3f0f + 0x0032434b, // n0x04cd c0x0000 (---------------) + I xn--gecrj9c + 0x0032880b, // n0x04ce c0x0000 (---------------) + I xn--h2brj9c + 0x0032f14b, // n0x04cf c0x0000 (---------------) + I xn--hxt814e + 0x0032fbcf, // n0x04d0 c0x0000 (---------------) + I xn--i1b6b1a6a2e + 0x0032ff8b, // n0x04d1 c0x0000 (---------------) + I xn--imr513n + 0x0033080a, // n0x04d2 c0x0000 (---------------) + I xn--io0a7i + 0x00331249, // n0x04d3 c0x0000 (---------------) + I xn--j1aef + 0x003315c9, // n0x04d4 c0x0000 (---------------) + I xn--j1amh + 0x0033198b, // n0x04d5 c0x0000 (---------------) + I xn--j6w193g + 0x00331c4e, // n0x04d6 c0x0000 (---------------) + I xn--jlq61u9w7b + 0x0033354b, // n0x04d7 c0x0000 (---------------) + I xn--jvr189m + 0x0033444f, // n0x04d8 c0x0000 (---------------) + I xn--kcrx77d1x4a + 0x00337d8b, // n0x04d9 c0x0000 (---------------) + I xn--kprw13d + 0x0033804b, // n0x04da c0x0000 (---------------) + I xn--kpry57d + 0x0033830b, // n0x04db c0x0000 (---------------) + I xn--kpu716f + 0x00338e0a, // n0x04dc c0x0000 (---------------) + I xn--kput3i + 0x0033db49, // n0x04dd c0x0000 (---------------) + I xn--l1acc + 0x00344dcf, // n0x04de c0x0000 (---------------) + I xn--lgbbat1ad8j + 0x0034978c, // n0x04df c0x0000 (---------------) + I xn--mgb2ddes + 0x00349c8c, // n0x04e0 c0x0000 (---------------) + I xn--mgb9awbf + 0x0034a18e, // n0x04e1 c0x0000 (---------------) + I xn--mgba3a3ejt + 0x0034a88f, // n0x04e2 c0x0000 (---------------) + I xn--mgba3a4f16a + 0x0034ac4e, // n0x04e3 c0x0000 (---------------) + I xn--mgba3a4fra + 0x0034b750, // n0x04e4 c0x0000 (---------------) + I xn--mgba7c0bbn0a + 0x0034bb4e, // n0x04e5 c0x0000 (---------------) + I xn--mgbaam7a8h + 0x0034c10c, // n0x04e6 c0x0000 (---------------) + I xn--mgbab2bd + 0x0034c412, // n0x04e7 c0x0000 (---------------) + I xn--mgbai9a5eva00b + 0x0034f091, // n0x04e8 c0x0000 (---------------) + I xn--mgbai9azgqp6j + 0x0034f64e, // n0x04e9 c0x0000 (---------------) + I xn--mgbayh7gpa + 0x0034fa8e, // n0x04ea c0x0000 (---------------) + I xn--mgbb9fbpob + 0x0034ffce, // n0x04eb c0x0000 (---------------) + I xn--mgbbh1a71e + 0x0035034f, // n0x04ec c0x0000 (---------------) + I xn--mgbc0a9azcg + 0x00350713, // n0x04ed c0x0000 (---------------) + I xn--mgberp4a5d4a87g + 0x00350bd1, // n0x04ee c0x0000 (---------------) + I xn--mgberp4a5d4ar + 0x0035100c, // n0x04ef c0x0000 (---------------) + I xn--mgbpl2fh + 0x00351453, // n0x04f0 c0x0000 (---------------) + I xn--mgbqly7c0a67fbc + 0x00352310, // n0x04f1 c0x0000 (---------------) + I xn--mgbqly7cvafr + 0x00352e0c, // n0x04f2 c0x0000 (---------------) + I xn--mgbt3dhd + 0x0035310c, // n0x04f3 c0x0000 (---------------) + I xn--mgbtf8fl + 0x003535cb, // n0x04f4 c0x0000 (---------------) + I xn--mgbtx2b + 0x0035588e, // n0x04f5 c0x0000 (---------------) + I xn--mgbx4cd0ab + 0x00355e8b, // n0x04f6 c0x0000 (---------------) + I xn--mix082f + 0x003564cb, // n0x04f7 c0x0000 (---------------) + I xn--mix891f + 0x0035728c, // n0x04f8 c0x0000 (---------------) + I xn--mk1bu44c + 0x0035c6ca, // n0x04f9 c0x0000 (---------------) + I xn--mxtq1m + 0x0035ca8c, // n0x04fa c0x0000 (---------------) + I xn--ngbc5azd + 0x0035cd8c, // n0x04fb c0x0000 (---------------) + I xn--ngbe9e0a + 0x0035f64b, // n0x04fc c0x0000 (---------------) + I xn--nnx388a + 0x0035f908, // n0x04fd c0x0000 (---------------) + I xn--node + 0x00360249, // n0x04fe c0x0000 (---------------) + I xn--nqv7f + 0x0036024f, // n0x04ff c0x0000 (---------------) + I xn--nqv7fs00ema + 0x00361f8b, // n0x0500 c0x0000 (---------------) + I xn--nyqy26a + 0x003633ca, // n0x0501 c0x0000 (---------------) + I xn--o3cw4h + 0x00364d8c, // n0x0502 c0x0000 (---------------) + I xn--ogbpf8fl + 0x00366089, // n0x0503 c0x0000 (---------------) + I xn--p1acf + 0x00366308, // n0x0504 c0x0000 (---------------) + I xn--p1ai + 0x00366f8b, // n0x0505 c0x0000 (---------------) + I xn--pbt977c + 0x003676cb, // n0x0506 c0x0000 (---------------) + I xn--pgbs0dh + 0x00368a8a, // n0x0507 c0x0000 (---------------) + I xn--pssy2u + 0x00368d0b, // n0x0508 c0x0000 (---------------) + I xn--q9jyb4c + 0x0036944c, // n0x0509 c0x0000 (---------------) + I xn--qcka1pmc + 0x0036a988, // n0x050a c0x0000 (---------------) + I xn--qxam + 0x0037230b, // n0x050b c0x0000 (---------------) + I xn--rhqv96g + 0x00374d8b, // n0x050c c0x0000 (---------------) + I xn--rovu88b + 0x0037824b, // n0x050d c0x0000 (---------------) + I xn--s9brj9c + 0x00379a8b, // n0x050e c0x0000 (---------------) + I xn--ses554g + 0x00383c8b, // n0x050f c0x0000 (---------------) + I xn--t60b56a + 0x00383f49, // n0x0510 c0x0000 (---------------) + I xn--tckwe + 0x00387fca, // n0x0511 c0x0000 (---------------) + I xn--unup4y + 0x00388f17, // n0x0512 c0x0000 (---------------) + I xn--vermgensberater-ctb + 0x0038a918, // n0x0513 c0x0000 (---------------) + I xn--vermgensberatung-pwb + 0x0038dcc9, // n0x0514 c0x0000 (---------------) + I xn--vhquv + 0x0038f00b, // n0x0515 c0x0000 (---------------) + I xn--vuq861b + 0x0038fb94, // n0x0516 c0x0000 (---------------) + I xn--w4r85el8fhu5dnra + 0x0039034a, // n0x0517 c0x0000 (---------------) + I xn--wgbh1c + 0x0039090a, // n0x0518 c0x0000 (---------------) + I xn--wgbl6a + 0x00390b8b, // n0x0519 c0x0000 (---------------) + I xn--xhq521b + 0x00391a90, // n0x051a c0x0000 (---------------) + I xn--xkc2al3hye2a + 0x00391e91, // n0x051b c0x0000 (---------------) + I xn--xkc2dl3a5ee0h + 0x0039290a, // n0x051c c0x0000 (---------------) + I xn--y9a3aq + 0x003938cd, // n0x051d c0x0000 (---------------) + I xn--yfro4i67o + 0x00393fcd, // n0x051e c0x0000 (---------------) + I xn--ygbi2ammx + 0x0039688b, // n0x051f c0x0000 (---------------) + I xn--zfr164b + 0x00397046, // n0x0520 c0x0000 (---------------) + I xperia + 0x003971c3, // n0x0521 c0x0000 (---------------) + I xxx + 0x0029ad43, // n0x0522 c0x0000 (---------------) + I xyz + 0x00269586, // n0x0523 c0x0000 (---------------) + I yachts + 0x0027b905, // n0x0524 c0x0000 (---------------) + I yahoo + 0x002151c7, // n0x0525 c0x0000 (---------------) + I yamaxun + 0x00326dc6, // n0x0526 c0x0000 (---------------) + I yandex + 0x01614d82, // n0x0527 c0x0005 (---------------)* o I ye + 0x002e3609, // n0x0528 c0x0000 (---------------) + I yodobashi + 0x00301804, // n0x0529 c0x0000 (---------------) + I yoga + 0x0032b5c8, // n0x052a c0x0000 (---------------) + I yokohama + 0x00235bc3, // n0x052b c0x0000 (---------------) + I you + 0x0035a047, // n0x052c c0x0000 (---------------) + I youtube + 0x0022f542, // n0x052d c0x0000 (---------------) + I yt + 0x00201943, // n0x052e c0x0000 (---------------) + I yun + 0x64e043c2, // n0x052f c0x0193 (n0x1dab-n0x1dbc) o I za + 0x002b3106, // n0x0530 c0x0000 (---------------) + I zappos + 0x002b3c84, // n0x0531 c0x0000 (---------------) + I zara + 0x00311384, // n0x0532 c0x0000 (---------------) + I zero + 0x0023b443, // n0x0533 c0x0000 (---------------) + I zip + 0x0023b445, // n0x0534 c0x0000 (---------------) + I zippo + 0x016e4582, // n0x0535 c0x0005 (---------------)* o I zm + 0x002c7ec4, // n0x0536 c0x0000 (---------------) + I zone + 0x00261407, // n0x0537 c0x0000 (---------------) + I zuerich + 0x016a0202, // n0x0538 c0x0005 (---------------)* o I zw + 0x00222ac3, // n0x0539 c0x0000 (---------------) + I com + 0x002d75c3, // n0x053a c0x0000 (---------------) + I edu + 0x0021e283, // n0x053b c0x0000 (---------------) + I gov + 0x0023fa03, // n0x053c c0x0000 (---------------) + I mil + 0x002170c3, // n0x053d c0x0000 (---------------) + I net + 0x0021dcc3, // n0x053e c0x0000 (---------------) + I org + 0x00207cc3, // n0x053f c0x0000 (---------------) + I nom + 0x00201e82, // n0x0540 c0x0000 (---------------) + I ac + 0x000e4188, // n0x0541 c0x0000 (---------------) + blogspot + 0x00200742, // n0x0542 c0x0000 (---------------) + I co + 0x0021e283, // n0x0543 c0x0000 (---------------) + I gov + 0x0023fa03, // n0x0544 c0x0000 (---------------) + I mil + 0x002170c3, // n0x0545 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x0546 c0x0000 (---------------) + I org + 0x00206103, // n0x0547 c0x0000 (---------------) + I sch + 0x002ffad6, // n0x0548 c0x0000 (---------------) + I accident-investigation + 0x00301f93, // n0x0549 c0x0000 (---------------) + I accident-prevention + 0x00340589, // n0x054a c0x0000 (---------------) + I aerobatic + 0x002751c8, // n0x054b c0x0000 (---------------) + I aeroclub + 0x0036bb09, // n0x054c c0x0000 (---------------) + I aerodrome + 0x002e0e06, // n0x054d c0x0000 (---------------) + I agents + 0x0032cf10, // n0x054e c0x0000 (---------------) + I air-surveillance + 0x00211893, // n0x054f c0x0000 (---------------) + I air-traffic-control + 0x002fb3c8, // n0x0550 c0x0000 (---------------) + I aircraft + 0x00262307, // n0x0551 c0x0000 (---------------) + I airline + 0x00266e47, // n0x0552 c0x0000 (---------------) + I airport + 0x0028bd4a, // n0x0553 c0x0000 (---------------) + I airtraffic + 0x003541c9, // n0x0554 c0x0000 (---------------) + I ambulance + 0x00309f89, // n0x0555 c0x0000 (---------------) + I amusement + 0x002bbacb, // n0x0556 c0x0000 (---------------) + I association + 0x002f8406, // n0x0557 c0x0000 (---------------) + I author + 0x002ed6ca, // n0x0558 c0x0000 (---------------) + I ballooning + 0x00218046, // n0x0559 c0x0000 (---------------) + I broker + 0x00301403, // n0x055a c0x0000 (---------------) + I caa + 0x002dc1c5, // n0x055b c0x0000 (---------------) + I cargo + 0x003246c8, // n0x055c c0x0000 (---------------) + I catering + 0x00240acd, // n0x055d c0x0000 (---------------) + I certification + 0x0032cacc, // n0x055e c0x0000 (---------------) + I championship + 0x0036c887, // n0x055f c0x0000 (---------------) + I charter + 0x00328a8d, // n0x0560 c0x0000 (---------------) + I civilaviation + 0x002752c4, // n0x0561 c0x0000 (---------------) + I club + 0x0022498a, // n0x0562 c0x0000 (---------------) + I conference + 0x002255ca, // n0x0563 c0x0000 (---------------) + I consultant + 0x00225a8a, // n0x0564 c0x0000 (---------------) + I consulting + 0x00211b87, // n0x0565 c0x0000 (---------------) + I control + 0x0022cd47, // n0x0566 c0x0000 (---------------) + I council + 0x00231004, // n0x0567 c0x0000 (---------------) + I crew + 0x00232f46, // n0x0568 c0x0000 (---------------) + I design + 0x002727c4, // n0x0569 c0x0000 (---------------) + I dgca + 0x00336208, // n0x056a c0x0000 (---------------) + I educator + 0x00343bc9, // n0x056b c0x0000 (---------------) + I emergency + 0x002a8a86, // n0x056c c0x0000 (---------------) + I engine + 0x002a8a88, // n0x056d c0x0000 (---------------) + I engineer + 0x00233a4d, // n0x056e c0x0000 (---------------) + I entertainment + 0x0021f909, // n0x056f c0x0000 (---------------) + I equipment + 0x002f3908, // n0x0570 c0x0000 (---------------) + I exchange + 0x0029ab47, // n0x0571 c0x0000 (---------------) + I express + 0x0026ac0a, // n0x0572 c0x0000 (---------------) + I federation + 0x0023b146, // n0x0573 c0x0000 (---------------) + I flight + 0x00248ec7, // n0x0574 c0x0000 (---------------) + I freight + 0x0024d004, // n0x0575 c0x0000 (---------------) + I fuel + 0x00256007, // n0x0576 c0x0000 (---------------) + I gliding + 0x00259a0a, // n0x0577 c0x0000 (---------------) + I government + 0x0037868e, // n0x0578 c0x0000 (---------------) + I groundhandling + 0x002646c5, // n0x0579 c0x0000 (---------------) + I group + 0x0038718b, // n0x057a c0x0000 (---------------) + I hanggliding + 0x00278ac9, // n0x057b c0x0000 (---------------) + I homebuilt + 0x002806c9, // n0x057c c0x0000 (---------------) + I insurance + 0x00202987, // n0x057d c0x0000 (---------------) + I journal + 0x0020298a, // n0x057e c0x0000 (---------------) + I journalist + 0x00274187, // n0x057f c0x0000 (---------------) + I leasing + 0x002127c9, // n0x0580 c0x0000 (---------------) + I logistics + 0x00395948, // n0x0581 c0x0000 (---------------) + I magazine + 0x002a0c8b, // n0x0582 c0x0000 (---------------) + I maintenance + 0x003117cb, // n0x0583 c0x0000 (---------------) + I marketplace + 0x002dc385, // n0x0584 c0x0000 (---------------) + I media + 0x00236c8a, // n0x0585 c0x0000 (---------------) + I microlight + 0x002371c9, // n0x0586 c0x0000 (---------------) + I modelling + 0x00354cca, // n0x0587 c0x0000 (---------------) + I navigation + 0x0022948b, // n0x0588 c0x0000 (---------------) + I parachuting + 0x00255f0b, // n0x0589 c0x0000 (---------------) + I paragliding + 0x002bb855, // n0x058a c0x0000 (---------------) + I passenger-association + 0x002c1505, // n0x058b c0x0000 (---------------) + I pilot + 0x0029abc5, // n0x058c c0x0000 (---------------) + I press + 0x002cbe4a, // n0x058d c0x0000 (---------------) + I production + 0x002e634a, // n0x058e c0x0000 (---------------) + I recreation + 0x002e0107, // n0x058f c0x0000 (---------------) + I repbody + 0x00215503, // n0x0590 c0x0000 (---------------) + I res + 0x00296808, // n0x0591 c0x0000 (---------------) + I research + 0x002bc30a, // n0x0592 c0x0000 (---------------) + I rotorcraft + 0x00234786, // n0x0593 c0x0000 (---------------) + I safety + 0x0023f089, // n0x0594 c0x0000 (---------------) + I scientist + 0x00243bc8, // n0x0595 c0x0000 (---------------) + I services + 0x002b3884, // n0x0596 c0x0000 (---------------) + I show + 0x00253c09, // n0x0597 c0x0000 (---------------) + I skydiving + 0x002a6c88, // n0x0598 c0x0000 (---------------) + I software + 0x00298587, // n0x0599 c0x0000 (---------------) + I student + 0x00217c44, // n0x059a c0x0000 (---------------) + I taxi + 0x00229a46, // n0x059b c0x0000 (---------------) + I trader + 0x0028fe07, // n0x059c c0x0000 (---------------) + I trading + 0x0028aec7, // n0x059d c0x0000 (---------------) + I trainer + 0x00230a45, // n0x059e c0x0000 (---------------) + I union + 0x002e04cc, // n0x059f c0x0000 (---------------) + I workinggroup + 0x0029b085, // n0x05a0 c0x0000 (---------------) + I works + 0x00222ac3, // n0x05a1 c0x0000 (---------------) + I com + 0x002d75c3, // n0x05a2 c0x0000 (---------------) + I edu + 0x0021e283, // n0x05a3 c0x0000 (---------------) + I gov + 0x002170c3, // n0x05a4 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x05a5 c0x0000 (---------------) + I org + 0x00200742, // n0x05a6 c0x0000 (---------------) + I co + 0x00222ac3, // n0x05a7 c0x0000 (---------------) + I com + 0x002170c3, // n0x05a8 c0x0000 (---------------) + I net + 0x00207cc3, // n0x05a9 c0x0000 (---------------) + I nom + 0x0021dcc3, // n0x05aa c0x0000 (---------------) + I org + 0x00222ac3, // n0x05ab c0x0000 (---------------) + I com + 0x002170c3, // n0x05ac c0x0000 (---------------) + I net + 0x00219c43, // n0x05ad c0x0000 (---------------) + I off + 0x0021dcc3, // n0x05ae c0x0000 (---------------) + I org + 0x000e4188, // n0x05af c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x05b0 c0x0000 (---------------) + I com + 0x002d75c3, // n0x05b1 c0x0000 (---------------) + I edu + 0x0021e283, // n0x05b2 c0x0000 (---------------) + I gov + 0x0023fa03, // n0x05b3 c0x0000 (---------------) + I mil + 0x002170c3, // n0x05b4 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x05b5 c0x0000 (---------------) + I org + 0x000e4188, // n0x05b6 c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x05b7 c0x0000 (---------------) + I com + 0x002d75c3, // n0x05b8 c0x0000 (---------------) + I edu + 0x002170c3, // n0x05b9 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x05ba c0x0000 (---------------) + I org + 0x00200742, // n0x05bb c0x0000 (---------------) + I co + 0x00203fc2, // n0x05bc c0x0000 (---------------) + I ed + 0x00225cc2, // n0x05bd c0x0000 (---------------) + I gv + 0x00206e82, // n0x05be c0x0000 (---------------) + I it + 0x002003c2, // n0x05bf c0x0000 (---------------) + I og + 0x00268e82, // n0x05c0 c0x0000 (---------------) + I pb + 0x04a22ac3, // n0x05c1 c0x0012 (n0x05ca-n0x05cb) + I com + 0x002d75c3, // n0x05c2 c0x0000 (---------------) + I edu + 0x0034eb03, // n0x05c3 c0x0000 (---------------) + I gob + 0x0021e283, // n0x05c4 c0x0000 (---------------) + I gov + 0x00238c03, // n0x05c5 c0x0000 (---------------) + I int + 0x0023fa03, // n0x05c6 c0x0000 (---------------) + I mil + 0x002170c3, // n0x05c7 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x05c8 c0x0000 (---------------) + I org + 0x00227303, // n0x05c9 c0x0000 (---------------) + I tur + 0x000e4188, // n0x05ca c0x0000 (---------------) + blogspot + 0x002f5744, // n0x05cb c0x0000 (---------------) + I e164 + 0x0034d5c7, // n0x05cc c0x0000 (---------------) + I in-addr + 0x00213a43, // n0x05cd c0x0000 (---------------) + I ip6 + 0x00234684, // n0x05ce c0x0000 (---------------) + I iris + 0x00202803, // n0x05cf c0x0000 (---------------) + I uri + 0x00202a03, // n0x05d0 c0x0000 (---------------) + I urn + 0x0021e283, // n0x05d1 c0x0000 (---------------) + I gov + 0x00201e82, // n0x05d2 c0x0000 (---------------) + I ac + 0x00110603, // n0x05d3 c0x0000 (---------------) + biz + 0x05a00742, // n0x05d4 c0x0016 (n0x05d9-n0x05da) + I co + 0x00225cc2, // n0x05d5 c0x0000 (---------------) + I gv + 0x00000304, // n0x05d6 c0x0000 (---------------) + info + 0x00200c42, // n0x05d7 c0x0000 (---------------) + I or + 0x000cba44, // n0x05d8 c0x0000 (---------------) + priv + 0x000e4188, // n0x05d9 c0x0000 (---------------) + blogspot + 0x00226043, // n0x05da c0x0000 (---------------) + I act + 0x002a00c3, // n0x05db c0x0000 (---------------) + I asn + 0x06222ac3, // n0x05dc c0x0018 (n0x05ec-n0x05ed) + I com + 0x00224984, // n0x05dd c0x0000 (---------------) + I conf + 0x066d75c3, // n0x05de c0x0019 (n0x05ed-n0x05f5) + I edu + 0x06a1e283, // n0x05df c0x001a (n0x05f5-n0x05fa) + I gov + 0x00206202, // n0x05e0 c0x0000 (---------------) + I id + 0x00200304, // n0x05e1 c0x0000 (---------------) + I info + 0x002170c3, // n0x05e2 c0x0000 (---------------) + I net + 0x0020ac43, // n0x05e3 c0x0000 (---------------) + I nsw + 0x00200e02, // n0x05e4 c0x0000 (---------------) + I nt + 0x0021dcc3, // n0x05e5 c0x0000 (---------------) + I org + 0x00212bc2, // n0x05e6 c0x0000 (---------------) + I oz + 0x002ce743, // n0x05e7 c0x0000 (---------------) + I qld + 0x00201a02, // n0x05e8 c0x0000 (---------------) + I sa + 0x00200143, // n0x05e9 c0x0000 (---------------) + I tas + 0x00243c83, // n0x05ea c0x0000 (---------------) + I vic + 0x00202542, // n0x05eb c0x0000 (---------------) + I wa + 0x000e4188, // n0x05ec c0x0000 (---------------) + blogspot + 0x00226043, // n0x05ed c0x0000 (---------------) + I act + 0x0020ac43, // n0x05ee c0x0000 (---------------) + I nsw + 0x00200e02, // n0x05ef c0x0000 (---------------) + I nt + 0x002ce743, // n0x05f0 c0x0000 (---------------) + I qld + 0x00201a02, // n0x05f1 c0x0000 (---------------) + I sa + 0x00200143, // n0x05f2 c0x0000 (---------------) + I tas + 0x00243c83, // n0x05f3 c0x0000 (---------------) + I vic + 0x00202542, // n0x05f4 c0x0000 (---------------) + I wa + 0x002ce743, // n0x05f5 c0x0000 (---------------) + I qld + 0x00201a02, // n0x05f6 c0x0000 (---------------) + I sa + 0x00200143, // n0x05f7 c0x0000 (---------------) + I tas + 0x00243c83, // n0x05f8 c0x0000 (---------------) + I vic + 0x00202542, // n0x05f9 c0x0000 (---------------) + I wa + 0x00222ac3, // n0x05fa c0x0000 (---------------) + I com + 0x00310603, // n0x05fb c0x0000 (---------------) + I biz + 0x00222ac3, // n0x05fc c0x0000 (---------------) + I com + 0x002d75c3, // n0x05fd c0x0000 (---------------) + I edu + 0x0021e283, // n0x05fe c0x0000 (---------------) + I gov + 0x00200304, // n0x05ff c0x0000 (---------------) + I info + 0x00238c03, // n0x0600 c0x0000 (---------------) + I int + 0x0023fa03, // n0x0601 c0x0000 (---------------) + I mil + 0x00298944, // n0x0602 c0x0000 (---------------) + I name + 0x002170c3, // n0x0603 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x0604 c0x0000 (---------------) + I org + 0x00207742, // n0x0605 c0x0000 (---------------) + I pp + 0x00218243, // n0x0606 c0x0000 (---------------) + I pro + 0x000e4188, // n0x0607 c0x0000 (---------------) + blogspot + 0x00200742, // n0x0608 c0x0000 (---------------) + I co + 0x00222ac3, // n0x0609 c0x0000 (---------------) + I com + 0x002d75c3, // n0x060a c0x0000 (---------------) + I edu + 0x0021e283, // n0x060b c0x0000 (---------------) + I gov + 0x0023fa03, // n0x060c c0x0000 (---------------) + I mil + 0x002170c3, // n0x060d c0x0000 (---------------) + I net + 0x0021dcc3, // n0x060e c0x0000 (---------------) + I org + 0x002060c2, // n0x060f c0x0000 (---------------) + I rs + 0x00262644, // n0x0610 c0x0000 (---------------) + I unbi + 0x00201984, // n0x0611 c0x0000 (---------------) + I unsa + 0x00310603, // n0x0612 c0x0000 (---------------) + I biz + 0x00200742, // n0x0613 c0x0000 (---------------) + I co + 0x00222ac3, // n0x0614 c0x0000 (---------------) + I com + 0x002d75c3, // n0x0615 c0x0000 (---------------) + I edu + 0x0021e283, // n0x0616 c0x0000 (---------------) + I gov + 0x00200304, // n0x0617 c0x0000 (---------------) + I info + 0x002170c3, // n0x0618 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x0619 c0x0000 (---------------) + I org + 0x002cf4c5, // n0x061a c0x0000 (---------------) + I store + 0x0020bf42, // n0x061b c0x0000 (---------------) + I tv + 0x00201e82, // n0x061c c0x0000 (---------------) + I ac + 0x000e4188, // n0x061d c0x0000 (---------------) + blogspot + 0x0021e283, // n0x061e c0x0000 (---------------) + I gov + 0x00225381, // n0x061f c0x0000 (---------------) + I 0 + 0x00223681, // n0x0620 c0x0000 (---------------) + I 1 + 0x0023d901, // n0x0621 c0x0000 (---------------) + I 2 + 0x00235b01, // n0x0622 c0x0000 (---------------) + I 3 + 0x00235a81, // n0x0623 c0x0000 (---------------) + I 4 + 0x002b1e01, // n0x0624 c0x0000 (---------------) + I 5 + 0x00213ac1, // n0x0625 c0x0000 (---------------) + I 6 + 0x00225481, // n0x0626 c0x0000 (---------------) + I 7 + 0x002e4a01, // n0x0627 c0x0000 (---------------) + I 8 + 0x002e4e01, // n0x0628 c0x0000 (---------------) + I 9 + 0x00200181, // n0x0629 c0x0000 (---------------) + I a + 0x00200001, // n0x062a c0x0000 (---------------) + I b + 0x000e4188, // n0x062b c0x0000 (---------------) + blogspot + 0x00200741, // n0x062c c0x0000 (---------------) + I c + 0x002005c1, // n0x062d c0x0000 (---------------) + I d + 0x00200701, // n0x062e c0x0000 (---------------) + I e + 0x00200381, // n0x062f c0x0000 (---------------) + I f + 0x00200401, // n0x0630 c0x0000 (---------------) + I g + 0x00200201, // n0x0631 c0x0000 (---------------) + I h + 0x00200041, // n0x0632 c0x0000 (---------------) + I i + 0x00201f81, // n0x0633 c0x0000 (---------------) + I j + 0x00201001, // n0x0634 c0x0000 (---------------) + I k + 0x002008c1, // n0x0635 c0x0000 (---------------) + I l + 0x002000c1, // n0x0636 c0x0000 (---------------) + I m + 0x00200281, // n0x0637 c0x0000 (---------------) + I n + 0x00200081, // n0x0638 c0x0000 (---------------) + I o + 0x00200b01, // n0x0639 c0x0000 (---------------) + I p + 0x00211181, // n0x063a c0x0000 (---------------) + I q + 0x00200581, // n0x063b c0x0000 (---------------) + I r + 0x002001c1, // n0x063c c0x0000 (---------------) + I s + 0x00200141, // n0x063d c0x0000 (---------------) + I t + 0x00200101, // n0x063e c0x0000 (---------------) + I u + 0x002013c1, // n0x063f c0x0000 (---------------) + I v + 0x00202541, // n0x0640 c0x0000 (---------------) + I w + 0x00203ec1, // n0x0641 c0x0000 (---------------) + I x + 0x00200801, // n0x0642 c0x0000 (---------------) + I y + 0x00201901, // n0x0643 c0x0000 (---------------) + I z + 0x00222ac3, // n0x0644 c0x0000 (---------------) + I com + 0x002d75c3, // n0x0645 c0x0000 (---------------) + I edu + 0x0021e283, // n0x0646 c0x0000 (---------------) + I gov + 0x002170c3, // n0x0647 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x0648 c0x0000 (---------------) + I org + 0x00200742, // n0x0649 c0x0000 (---------------) + I co + 0x00222ac3, // n0x064a c0x0000 (---------------) + I com + 0x002d75c3, // n0x064b c0x0000 (---------------) + I edu + 0x00200c42, // n0x064c c0x0000 (---------------) + I or + 0x0021dcc3, // n0x064d c0x0000 (---------------) + I org + 0x00009ac6, // n0x064e c0x0000 (---------------) + dyndns + 0x00041d4a, // n0x064f c0x0000 (---------------) + for-better + 0x00076a48, // n0x0650 c0x0000 (---------------) + for-more + 0x00042348, // n0x0651 c0x0000 (---------------) + for-some + 0x00042c87, // n0x0652 c0x0000 (---------------) + for-the + 0x00130f46, // n0x0653 c0x0000 (---------------) + selfip + 0x00110e86, // n0x0654 c0x0000 (---------------) + webhop + 0x002729c4, // n0x0655 c0x0000 (---------------) + I asso + 0x002f4347, // n0x0656 c0x0000 (---------------) + I barreau + 0x000e4188, // n0x0657 c0x0000 (---------------) + blogspot + 0x003579c4, // n0x0658 c0x0000 (---------------) + I gouv + 0x00222ac3, // n0x0659 c0x0000 (---------------) + I com + 0x002d75c3, // n0x065a c0x0000 (---------------) + I edu + 0x0021e283, // n0x065b c0x0000 (---------------) + I gov + 0x002170c3, // n0x065c c0x0000 (---------------) + I net + 0x0021dcc3, // n0x065d c0x0000 (---------------) + I org + 0x00222ac3, // n0x065e c0x0000 (---------------) + I com + 0x002d75c3, // n0x065f c0x0000 (---------------) + I edu + 0x0034eb03, // n0x0660 c0x0000 (---------------) + I gob + 0x0021e283, // n0x0661 c0x0000 (---------------) + I gov + 0x00238c03, // n0x0662 c0x0000 (---------------) + I int + 0x0023fa03, // n0x0663 c0x0000 (---------------) + I mil + 0x002170c3, // n0x0664 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x0665 c0x0000 (---------------) + I org + 0x0020bf42, // n0x0666 c0x0000 (---------------) + I tv + 0x002b2643, // n0x0667 c0x0000 (---------------) + I adm + 0x002db303, // n0x0668 c0x0000 (---------------) + I adv + 0x00256e43, // n0x0669 c0x0000 (---------------) + I agr + 0x00204942, // n0x066a c0x0000 (---------------) + I am + 0x00323d83, // n0x066b c0x0000 (---------------) + I arq + 0x00200603, // n0x066c c0x0000 (---------------) + I art + 0x00201643, // n0x066d c0x0000 (---------------) + I ato + 0x00200001, // n0x066e c0x0000 (---------------) + I b + 0x00200003, // n0x066f c0x0000 (---------------) + I bio + 0x002d0084, // n0x0670 c0x0000 (---------------) + I blog + 0x0031b803, // n0x0671 c0x0000 (---------------) + I bmd + 0x002f4243, // n0x0672 c0x0000 (---------------) + I cim + 0x002dbfc3, // n0x0673 c0x0000 (---------------) + I cng + 0x002211c3, // n0x0674 c0x0000 (---------------) + I cnt + 0x0a622ac3, // n0x0675 c0x0029 (n0x06ad-n0x06ae) + I com + 0x00228d44, // n0x0676 c0x0000 (---------------) + I coop + 0x00305ec3, // n0x0677 c0x0000 (---------------) + I ecn + 0x00200703, // n0x0678 c0x0000 (---------------) + I eco + 0x002d75c3, // n0x0679 c0x0000 (---------------) + I edu + 0x00226343, // n0x067a c0x0000 (---------------) + I emp + 0x002674c3, // n0x067b c0x0000 (---------------) + I eng + 0x0028cac3, // n0x067c c0x0000 (---------------) + I esp + 0x002f41c3, // n0x067d c0x0000 (---------------) + I etc + 0x0021bac3, // n0x067e c0x0000 (---------------) + I eti + 0x0021f803, // n0x067f c0x0000 (---------------) + I far + 0x0023bdc4, // n0x0680 c0x0000 (---------------) + I flog + 0x00358002, // n0x0681 c0x0000 (---------------) + I fm + 0x002416c3, // n0x0682 c0x0000 (---------------) + I fnd + 0x00247a83, // n0x0683 c0x0000 (---------------) + I fot + 0x002645c3, // n0x0684 c0x0000 (---------------) + I fst + 0x0036f1c3, // n0x0685 c0x0000 (---------------) + I g12 + 0x00311b83, // n0x0686 c0x0000 (---------------) + I ggf + 0x0021e283, // n0x0687 c0x0000 (---------------) + I gov + 0x002b7c43, // n0x0688 c0x0000 (---------------) + I imb + 0x00215703, // n0x0689 c0x0000 (---------------) + I ind + 0x00200303, // n0x068a c0x0000 (---------------) + I inf + 0x00202c03, // n0x068b c0x0000 (---------------) + I jor + 0x002de8c3, // n0x068c c0x0000 (---------------) + I jus + 0x0021e843, // n0x068d c0x0000 (---------------) + I leg + 0x002e3403, // n0x068e c0x0000 (---------------) + I lel + 0x0020f583, // n0x068f c0x0000 (---------------) + I mat + 0x0020b403, // n0x0690 c0x0000 (---------------) + I med + 0x0023fa03, // n0x0691 c0x0000 (---------------) + I mil + 0x00214902, // n0x0692 c0x0000 (---------------) + I mp + 0x002bc903, // n0x0693 c0x0000 (---------------) + I mus + 0x002170c3, // n0x0694 c0x0000 (---------------) + I net + 0x01607cc3, // n0x0695 c0x0005 (---------------)* o I nom + 0x0023fdc3, // n0x0696 c0x0000 (---------------) + I not + 0x00211c03, // n0x0697 c0x0000 (---------------) + I ntr + 0x0024f003, // n0x0698 c0x0000 (---------------) + I odo + 0x0021dcc3, // n0x0699 c0x0000 (---------------) + I org + 0x002bf143, // n0x069a c0x0000 (---------------) + I ppg + 0x00218243, // n0x069b c0x0000 (---------------) + I pro + 0x002353c3, // n0x069c c0x0000 (---------------) + I psc + 0x002dc783, // n0x069d c0x0000 (---------------) + I psi + 0x002ce903, // n0x069e c0x0000 (---------------) + I qsl + 0x00250685, // n0x069f c0x0000 (---------------) + I radio + 0x002e6343, // n0x06a0 c0x0000 (---------------) + I rec + 0x002ce943, // n0x06a1 c0x0000 (---------------) + I slg + 0x002cef83, // n0x06a2 c0x0000 (---------------) + I srv + 0x00217c44, // n0x06a3 c0x0000 (---------------) + I taxi + 0x00362ec3, // n0x06a4 c0x0000 (---------------) + I teo + 0x002fb583, // n0x06a5 c0x0000 (---------------) + I tmp + 0x00283003, // n0x06a6 c0x0000 (---------------) + I trd + 0x00227303, // n0x06a7 c0x0000 (---------------) + I tur + 0x0020bf42, // n0x06a8 c0x0000 (---------------) + I tv + 0x0022b9c3, // n0x06a9 c0x0000 (---------------) + I vet + 0x002df504, // n0x06aa c0x0000 (---------------) + I vlog + 0x0025a484, // n0x06ab c0x0000 (---------------) + I wiki + 0x0024dc43, // n0x06ac c0x0000 (---------------) + I zlg + 0x000e4188, // n0x06ad c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x06ae c0x0000 (---------------) + I com + 0x002d75c3, // n0x06af c0x0000 (---------------) + I edu + 0x0021e283, // n0x06b0 c0x0000 (---------------) + I gov + 0x002170c3, // n0x06b1 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x06b2 c0x0000 (---------------) + I org + 0x00222ac3, // n0x06b3 c0x0000 (---------------) + I com + 0x002d75c3, // n0x06b4 c0x0000 (---------------) + I edu + 0x0021e283, // n0x06b5 c0x0000 (---------------) + I gov + 0x002170c3, // n0x06b6 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x06b7 c0x0000 (---------------) + I org + 0x00200742, // n0x06b8 c0x0000 (---------------) + I co + 0x0021dcc3, // n0x06b9 c0x0000 (---------------) + I org + 0x0ba22ac3, // n0x06ba c0x002e (n0x06be-n0x06bf) + I com + 0x0021e283, // n0x06bb c0x0000 (---------------) + I gov + 0x0023fa03, // n0x06bc c0x0000 (---------------) + I mil + 0x00209982, // n0x06bd c0x0000 (---------------) + I of + 0x000e4188, // n0x06be c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x06bf c0x0000 (---------------) + I com + 0x002d75c3, // n0x06c0 c0x0000 (---------------) + I edu + 0x0021e283, // n0x06c1 c0x0000 (---------------) + I gov + 0x002170c3, // n0x06c2 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x06c3 c0x0000 (---------------) + I org + 0x000043c2, // n0x06c4 c0x0000 (---------------) + za + 0x002004c2, // n0x06c5 c0x0000 (---------------) + I ab + 0x0021a042, // n0x06c6 c0x0000 (---------------) + I bc + 0x000e4188, // n0x06c7 c0x0000 (---------------) + blogspot + 0x00000742, // n0x06c8 c0x0000 (---------------) + co + 0x00227d42, // n0x06c9 c0x0000 (---------------) + I gc + 0x00205942, // n0x06ca c0x0000 (---------------) + I mb + 0x00210d42, // n0x06cb c0x0000 (---------------) + I nb + 0x00200342, // n0x06cc c0x0000 (---------------) + I nf + 0x00236482, // n0x06cd c0x0000 (---------------) + I nl + 0x002019c2, // n0x06ce c0x0000 (---------------) + I ns + 0x00200e02, // n0x06cf c0x0000 (---------------) + I nt + 0x00205bc2, // n0x06d0 c0x0000 (---------------) + I nu + 0x00200dc2, // n0x06d1 c0x0000 (---------------) + I on + 0x00214942, // n0x06d2 c0x0000 (---------------) + I pe + 0x00369542, // n0x06d3 c0x0000 (---------------) + I qc + 0x00207b42, // n0x06d4 c0x0000 (---------------) + I sk + 0x002202c2, // n0x06d5 c0x0000 (---------------) + I yk + 0x00085109, // n0x06d6 c0x0000 (---------------) + ftpaccess + 0x0001208b, // n0x06d7 c0x0000 (---------------) + game-server + 0x000bea08, // n0x06d8 c0x0000 (---------------) + myphotos + 0x00043609, // n0x06d9 c0x0000 (---------------) + scrapping + 0x0021e283, // n0x06da c0x0000 (---------------) + I gov + 0x000e4188, // n0x06db c0x0000 (---------------) + blogspot + 0x000e4188, // n0x06dc c0x0000 (---------------) + blogspot + 0x00201e82, // n0x06dd c0x0000 (---------------) + I ac + 0x002729c4, // n0x06de c0x0000 (---------------) + I asso + 0x00200742, // n0x06df c0x0000 (---------------) + I co + 0x00222ac3, // n0x06e0 c0x0000 (---------------) + I com + 0x00203fc2, // n0x06e1 c0x0000 (---------------) + I ed + 0x002d75c3, // n0x06e2 c0x0000 (---------------) + I edu + 0x00202342, // n0x06e3 c0x0000 (---------------) + I go + 0x003579c4, // n0x06e4 c0x0000 (---------------) + I gouv + 0x00238c03, // n0x06e5 c0x0000 (---------------) + I int + 0x00238602, // n0x06e6 c0x0000 (---------------) + I md + 0x002170c3, // n0x06e7 c0x0000 (---------------) + I net + 0x00200c42, // n0x06e8 c0x0000 (---------------) + I or + 0x0021dcc3, // n0x06e9 c0x0000 (---------------) + I org + 0x0029abc6, // n0x06ea c0x0000 (---------------) + I presse + 0x002f710f, // n0x06eb c0x0000 (---------------) + I xn--aroport-bya + 0x006e3e83, // n0x06ec c0x0001 (---------------) ! I www + 0x000e4188, // n0x06ed c0x0000 (---------------) + blogspot + 0x00200742, // n0x06ee c0x0000 (---------------) + I co + 0x0034eb03, // n0x06ef c0x0000 (---------------) + I gob + 0x0021e283, // n0x06f0 c0x0000 (---------------) + I gov + 0x0023fa03, // n0x06f1 c0x0000 (---------------) + I mil + 0x00200742, // n0x06f2 c0x0000 (---------------) + I co + 0x00222ac3, // n0x06f3 c0x0000 (---------------) + I com + 0x0021e283, // n0x06f4 c0x0000 (---------------) + I gov + 0x002170c3, // n0x06f5 c0x0000 (---------------) + I net + 0x00201e82, // n0x06f6 c0x0000 (---------------) + I ac + 0x002076c2, // n0x06f7 c0x0000 (---------------) + I ah + 0x0ea72409, // n0x06f8 c0x003a (n0x0723-n0x0724) o I amazonaws + 0x00202642, // n0x06f9 c0x0000 (---------------) + I bj + 0x0f222ac3, // n0x06fa c0x003c (n0x0725-n0x0726) + I com + 0x0022e082, // n0x06fb c0x0000 (---------------) + I cq + 0x002d75c3, // n0x06fc c0x0000 (---------------) + I edu + 0x002241c2, // n0x06fd c0x0000 (---------------) + I fj + 0x0021b342, // n0x06fe c0x0000 (---------------) + I gd + 0x0021e283, // n0x06ff c0x0000 (---------------) + I gov + 0x0026cd02, // n0x0700 c0x0000 (---------------) + I gs + 0x0023d702, // n0x0701 c0x0000 (---------------) + I gx + 0x00243802, // n0x0702 c0x0000 (---------------) + I gz + 0x00202dc2, // n0x0703 c0x0000 (---------------) + I ha + 0x002f6242, // n0x0704 c0x0000 (---------------) + I hb + 0x00205202, // n0x0705 c0x0000 (---------------) + I he + 0x00200202, // n0x0706 c0x0000 (---------------) + I hi + 0x0022ea02, // n0x0707 c0x0000 (---------------) + I hk + 0x0020cc02, // n0x0708 c0x0000 (---------------) + I hl + 0x00217542, // n0x0709 c0x0000 (---------------) + I hn + 0x00297c82, // n0x070a c0x0000 (---------------) + I jl + 0x002bdf02, // n0x070b c0x0000 (---------------) + I js + 0x002fc642, // n0x070c c0x0000 (---------------) + I jx + 0x0021f442, // n0x070d c0x0000 (---------------) + I ln + 0x0023fa03, // n0x070e c0x0000 (---------------) + I mil + 0x00203602, // n0x070f c0x0000 (---------------) + I mo + 0x002170c3, // n0x0710 c0x0000 (---------------) + I net + 0x00233c42, // n0x0711 c0x0000 (---------------) + I nm + 0x0026d802, // n0x0712 c0x0000 (---------------) + I nx + 0x0021dcc3, // n0x0713 c0x0000 (---------------) + I org + 0x0022e0c2, // n0x0714 c0x0000 (---------------) + I qh + 0x00200982, // n0x0715 c0x0000 (---------------) + I sc + 0x0024f842, // n0x0716 c0x0000 (---------------) + I sd + 0x002001c2, // n0x0717 c0x0000 (---------------) + I sh + 0x00210b02, // n0x0718 c0x0000 (---------------) + I sn + 0x002d6b42, // n0x0719 c0x0000 (---------------) + I sx + 0x00202bc2, // n0x071a c0x0000 (---------------) + I tj + 0x00241ac2, // n0x071b c0x0000 (---------------) + I tw + 0x0020a882, // n0x071c c0x0000 (---------------) + I xj + 0x002ea10a, // n0x071d c0x0000 (---------------) + I xn--55qx5d + 0x0033080a, // n0x071e c0x0000 (---------------) + I xn--io0a7i + 0x0036394a, // n0x071f c0x0000 (---------------) + I xn--od0alg + 0x00397682, // n0x0720 c0x0000 (---------------) + I xz + 0x00200802, // n0x0721 c0x0000 (---------------) + I yn + 0x00243842, // n0x0722 c0x0000 (---------------) + I zj + 0x0ec23487, // n0x0723 c0x003b (n0x0724-n0x0725) + compute + 0x00105f0a, // n0x0724 c0x0000 (---------------) + cn-north-1 + 0x0f672409, // n0x0725 c0x003d (n0x0726-n0x0727) o I amazonaws + 0x0fb05f0a, // n0x0726 c0x003e (n0x0727-n0x0728) o I cn-north-1 + 0x000413c2, // n0x0727 c0x0000 (---------------) + s3 + 0x00246584, // n0x0728 c0x0000 (---------------) + I arts + 0x10222ac3, // n0x0729 c0x0040 (n0x0735-n0x0736) + I com + 0x002d75c3, // n0x072a c0x0000 (---------------) + I edu + 0x00238544, // n0x072b c0x0000 (---------------) + I firm + 0x0021e283, // n0x072c c0x0000 (---------------) + I gov + 0x00200304, // n0x072d c0x0000 (---------------) + I info + 0x00238c03, // n0x072e c0x0000 (---------------) + I int + 0x0023fa03, // n0x072f c0x0000 (---------------) + I mil + 0x002170c3, // n0x0730 c0x0000 (---------------) + I net + 0x00207cc3, // n0x0731 c0x0000 (---------------) + I nom + 0x0021dcc3, // n0x0732 c0x0000 (---------------) + I org + 0x002e6343, // n0x0733 c0x0000 (---------------) + I rec + 0x00219fc3, // n0x0734 c0x0000 (---------------) + I web + 0x000e4188, // n0x0735 c0x0000 (---------------) + blogspot + 0x000f00c5, // n0x0736 c0x0000 (---------------) + 1kapp + 0x000f2202, // n0x0737 c0x0000 (---------------) + 4u + 0x00167c06, // n0x0738 c0x0000 (---------------) + africa + 0x10a72409, // n0x0739 c0x0042 (n0x07ff-n0x0811) o I amazonaws + 0x00036947, // n0x073a c0x0000 (---------------) + appspot + 0x00000602, // n0x073b c0x0000 (---------------) + ar + 0x00163e4a, // n0x073c c0x0000 (---------------) + betainabox + 0x000d0087, // n0x073d c0x0000 (---------------) + blogdns + 0x000e4188, // n0x073e c0x0000 (---------------) + blogspot + 0x00012e82, // n0x073f c0x0000 (---------------) + br + 0x0012df87, // n0x0740 c0x0000 (---------------) + cechire + 0x0013220f, // n0x0741 c0x0000 (---------------) + cloudcontrolapp + 0x0018d44f, // n0x0742 c0x0000 (---------------) + cloudcontrolled + 0x000211c2, // n0x0743 c0x0000 (---------------) + cn + 0x00000742, // n0x0744 c0x0000 (---------------) + co + 0x0008ca08, // n0x0745 c0x0000 (---------------) + codespot + 0x000006c2, // n0x0746 c0x0000 (---------------) + de + 0x00146fc8, // n0x0747 c0x0000 (---------------) + dnsalias + 0x0006a247, // n0x0748 c0x0000 (---------------) + dnsdojo + 0x00010a4b, // n0x0749 c0x0000 (---------------) + doesntexist + 0x0015fdc9, // n0x074a c0x0000 (---------------) + dontexist + 0x00146ec7, // n0x074b c0x0000 (---------------) + doomdns + 0x000da58c, // n0x074c c0x0000 (---------------) + dreamhosters + 0x0015aeca, // n0x074d c0x0000 (---------------) + dyn-o-saur + 0x000007c8, // n0x074e c0x0000 (---------------) + dynalias + 0x000b8c0e, // n0x074f c0x0000 (---------------) + dyndns-at-home + 0x000e024e, // n0x0750 c0x0000 (---------------) + dyndns-at-work + 0x000cfecb, // n0x0751 c0x0000 (---------------) + dyndns-blog + 0x00009acb, // n0x0752 c0x0000 (---------------) + dyndns-free + 0x0000b1cb, // n0x0753 c0x0000 (---------------) + dyndns-home + 0x00013889, // n0x0754 c0x0000 (---------------) + dyndns-ip + 0x00018d4b, // n0x0755 c0x0000 (---------------) + dyndns-mail + 0x00019a8d, // n0x0756 c0x0000 (---------------) + dyndns-office + 0x0001ea4b, // n0x0757 c0x0000 (---------------) + dyndns-pics + 0x0001fd0d, // n0x0758 c0x0000 (---------------) + dyndns-remote + 0x00020d4d, // n0x0759 c0x0000 (---------------) + dyndns-server + 0x00051b0a, // n0x075a c0x0000 (---------------) + dyndns-web + 0x0005a2cb, // n0x075b c0x0000 (---------------) + dyndns-wiki + 0x0009aecb, // n0x075c c0x0000 (---------------) + dyndns-work + 0x00016650, // n0x075d c0x0000 (---------------) + elasticbeanstalk + 0x0013c78f, // n0x075e c0x0000 (---------------) + est-a-la-maison + 0x0002270f, // n0x075f c0x0000 (---------------) + est-a-la-masion + 0x000144cd, // n0x0760 c0x0000 (---------------) + est-le-patron + 0x000ef550, // n0x0761 c0x0000 (---------------) + est-mon-blogueur + 0x0001d5c2, // n0x0762 c0x0000 (---------------) + eu + 0x0003674b, // n0x0763 c0x0000 (---------------) + firebaseapp + 0x0003ffc8, // n0x0764 c0x0000 (---------------) + flynnhub + 0x0004d407, // n0x0765 c0x0000 (---------------) + from-ak + 0x0004d747, // n0x0766 c0x0000 (---------------) + from-al + 0x0004d907, // n0x0767 c0x0000 (---------------) + from-ar + 0x0004dd07, // n0x0768 c0x0000 (---------------) + from-ca + 0x0004f607, // n0x0769 c0x0000 (---------------) + from-ct + 0x00050247, // n0x076a c0x0000 (---------------) + from-dc + 0x00052787, // n0x076b c0x0000 (---------------) + from-de + 0x00052a47, // n0x076c c0x0000 (---------------) + from-fl + 0x00054187, // n0x076d c0x0000 (---------------) + from-ga + 0x00054487, // n0x076e c0x0000 (---------------) + from-hi + 0x00054ec7, // n0x076f c0x0000 (---------------) + from-ia + 0x00055087, // n0x0770 c0x0000 (---------------) + from-id + 0x00055247, // n0x0771 c0x0000 (---------------) + from-il + 0x00055407, // n0x0772 c0x0000 (---------------) + from-in + 0x000561c7, // n0x0773 c0x0000 (---------------) + from-ks + 0x00056787, // n0x0774 c0x0000 (---------------) + from-ky + 0x00057247, // n0x0775 c0x0000 (---------------) + from-ma + 0x00057647, // n0x0776 c0x0000 (---------------) + from-md + 0x00057d87, // n0x0777 c0x0000 (---------------) + from-mi + 0x00058887, // n0x0778 c0x0000 (---------------) + from-mn + 0x00058a47, // n0x0779 c0x0000 (---------------) + from-mo + 0x00059007, // n0x077a c0x0000 (---------------) + from-ms + 0x00059507, // n0x077b c0x0000 (---------------) + from-mt + 0x00059707, // n0x077c c0x0000 (---------------) + from-nc + 0x0005a607, // n0x077d c0x0000 (---------------) + from-nd + 0x0005a7c7, // n0x077e c0x0000 (---------------) + from-ne + 0x0005a987, // n0x077f c0x0000 (---------------) + from-nh + 0x0005b247, // n0x0780 c0x0000 (---------------) + from-nj + 0x0005b747, // n0x0781 c0x0000 (---------------) + from-nm + 0x0005c247, // n0x0782 c0x0000 (---------------) + from-nv + 0x0005c847, // n0x0783 c0x0000 (---------------) + from-oh + 0x0005cf07, // n0x0784 c0x0000 (---------------) + from-ok + 0x0005d407, // n0x0785 c0x0000 (---------------) + from-or + 0x0005d5c7, // n0x0786 c0x0000 (---------------) + from-pa + 0x0005d947, // n0x0787 c0x0000 (---------------) + from-pr + 0x0005e307, // n0x0788 c0x0000 (---------------) + from-ri + 0x0005e687, // n0x0789 c0x0000 (---------------) + from-sc + 0x0005ea87, // n0x078a c0x0000 (---------------) + from-sd + 0x000604c7, // n0x078b c0x0000 (---------------) + from-tn + 0x00060687, // n0x078c c0x0000 (---------------) + from-tx + 0x00061207, // n0x078d c0x0000 (---------------) + from-ut + 0x00062947, // n0x078e c0x0000 (---------------) + from-va + 0x00062f87, // n0x078f c0x0000 (---------------) + from-vt + 0x00063287, // n0x0790 c0x0000 (---------------) + from-wa + 0x00063447, // n0x0791 c0x0000 (---------------) + from-wi + 0x000637c7, // n0x0792 c0x0000 (---------------) + from-wv + 0x00063a07, // n0x0793 c0x0000 (---------------) + from-wy + 0x00005a42, // n0x0794 c0x0000 (---------------) + gb + 0x000c0587, // n0x0795 c0x0000 (---------------) + getmyip + 0x000b53d1, // n0x0796 c0x0000 (---------------) + githubusercontent + 0x000cd58a, // n0x0797 c0x0000 (---------------) + googleapis + 0x0008c88a, // n0x0798 c0x0000 (---------------) + googlecode + 0x00043a86, // n0x0799 c0x0000 (---------------) + gotdns + 0x0000dc82, // n0x079a c0x0000 (---------------) + gr + 0x0008f309, // n0x079b c0x0000 (---------------) + herokuapp + 0x0007fe89, // n0x079c c0x0000 (---------------) + herokussl + 0x0002ea02, // n0x079d c0x0000 (---------------) + hk + 0x0013eeca, // n0x079e c0x0000 (---------------) + hobby-site + 0x00090ec9, // n0x079f c0x0000 (---------------) + homelinux + 0x00091b88, // n0x07a0 c0x0000 (---------------) + homeunix + 0x00017d42, // n0x07a1 c0x0000 (---------------) + hu + 0x001008c9, // n0x07a2 c0x0000 (---------------) + iamallama + 0x0005db8e, // n0x07a3 c0x0000 (---------------) + is-a-anarchist + 0x000d5d8c, // n0x07a4 c0x0000 (---------------) + is-a-blogger + 0x000bf2cf, // n0x07a5 c0x0000 (---------------) + is-a-bookkeeper + 0x0017d14e, // n0x07a6 c0x0000 (---------------) + is-a-bulls-fan + 0x0000cf8c, // n0x07a7 c0x0000 (---------------) + is-a-caterer + 0x00166909, // n0x07a8 c0x0000 (---------------) + is-a-chef + 0x00099ad1, // n0x07a9 c0x0000 (---------------) + is-a-conservative + 0x0009a088, // n0x07aa c0x0000 (---------------) + is-a-cpa + 0x00169b92, // n0x07ab c0x0000 (---------------) + is-a-cubicle-slave + 0x0002e40d, // n0x07ac c0x0000 (---------------) + is-a-democrat + 0x00032e0d, // n0x07ad c0x0000 (---------------) + is-a-designer + 0x00138acb, // n0x07ae c0x0000 (---------------) + is-a-doctor + 0x001713d5, // n0x07af c0x0000 (---------------) + is-a-financialadvisor + 0x00052e49, // n0x07b0 c0x0000 (---------------) + is-a-geek + 0x0005afca, // n0x07b1 c0x0000 (---------------) + is-a-green + 0x0005bb09, // n0x07b2 c0x0000 (---------------) + is-a-guru + 0x00060e10, // n0x07b3 c0x0000 (---------------) + is-a-hard-worker + 0x00065f4b, // n0x07b4 c0x0000 (---------------) + is-a-hunter + 0x0006f74f, // n0x07b5 c0x0000 (---------------) + is-a-landscaper + 0x000702cb, // n0x07b6 c0x0000 (---------------) + is-a-lawyer + 0x00073b0c, // n0x07b7 c0x0000 (---------------) + is-a-liberal + 0x00077590, // n0x07b8 c0x0000 (---------------) + is-a-libertarian + 0x001370ca, // n0x07b9 c0x0000 (---------------) + is-a-llama + 0x00137a4d, // n0x07ba c0x0000 (---------------) + is-a-musician + 0x00174a0e, // n0x07bb c0x0000 (---------------) + is-a-nascarfan + 0x0007bf4a, // n0x07bc c0x0000 (---------------) + is-a-nurse + 0x0007d0cc, // n0x07bd c0x0000 (---------------) + is-a-painter + 0x0008ab94, // n0x07be c0x0000 (---------------) + is-a-personaltrainer + 0x0008ef91, // n0x07bf c0x0000 (---------------) + is-a-photographer + 0x00090c0b, // n0x07c0 c0x0000 (---------------) + is-a-player + 0x0009638f, // n0x07c1 c0x0000 (---------------) + is-a-republican + 0x00096fcd, // n0x07c2 c0x0000 (---------------) + is-a-rockstar + 0x000c268e, // n0x07c3 c0x0000 (---------------) + is-a-socialist + 0x0009844c, // n0x07c4 c0x0000 (---------------) + is-a-student + 0x0009d08c, // n0x07c5 c0x0000 (---------------) + is-a-teacher + 0x000c238b, // n0x07c6 c0x0000 (---------------) + is-a-techie + 0x000c3f4e, // n0x07c7 c0x0000 (---------------) + is-a-therapist + 0x000d7cd0, // n0x07c8 c0x0000 (---------------) + is-an-accountant + 0x0015d44b, // n0x07c9 c0x0000 (---------------) + is-an-actor + 0x000a110d, // n0x07ca c0x0000 (---------------) + is-an-actress + 0x000a768f, // n0x07cb c0x0000 (---------------) + is-an-anarchist + 0x000a814c, // n0x07cc c0x0000 (---------------) + is-an-artist + 0x000a890e, // n0x07cd c0x0000 (---------------) + is-an-engineer + 0x000a97d1, // n0x07ce c0x0000 (---------------) + is-an-entertainer + 0x00135f8c, // n0x07cf c0x0000 (---------------) + is-certified + 0x00133a07, // n0x07d0 c0x0000 (---------------) + is-gone + 0x000b0fcd, // n0x07d1 c0x0000 (---------------) + is-into-anime + 0x000b918c, // n0x07d2 c0x0000 (---------------) + is-into-cars + 0x000d1250, // n0x07d3 c0x0000 (---------------) + is-into-cartoons + 0x000d1f4d, // n0x07d4 c0x0000 (---------------) + is-into-games + 0x000d3087, // n0x07d5 c0x0000 (---------------) + is-leet + 0x000d7250, // n0x07d6 c0x0000 (---------------) + is-not-certified + 0x000f9e48, // n0x07d7 c0x0000 (---------------) + is-slick + 0x001063cb, // n0x07d8 c0x0000 (---------------) + is-uberleet + 0x00146b4f, // n0x07d9 c0x0000 (---------------) + is-with-theband + 0x00191648, // n0x07da c0x0000 (---------------) + isa-geek + 0x000cd78d, // n0x07db c0x0000 (---------------) + isa-hockeynut + 0x00159d10, // n0x07dc c0x0000 (---------------) + issmarterthanyou + 0x0009a703, // n0x07dd c0x0000 (---------------) + jpn + 0x000034c2, // n0x07de c0x0000 (---------------) + kr + 0x00061689, // n0x07df c0x0000 (---------------) + likes-pie + 0x0007878a, // n0x07e0 c0x0000 (---------------) + likescandy + 0x00032003, // n0x07e1 c0x0000 (---------------) + mex + 0x001024c8, // n0x07e2 c0x0000 (---------------) + neat-url + 0x00002f07, // n0x07e3 c0x0000 (---------------) + nfshost + 0x00000c02, // n0x07e4 c0x0000 (---------------) + no + 0x0005080a, // n0x07e5 c0x0000 (---------------) + operaunite + 0x0019238f, // n0x07e6 c0x0000 (---------------) + outsystemscloud + 0x00110fd2, // n0x07e7 c0x0000 (---------------) + pagespeedmobilizer + 0x00169542, // n0x07e8 c0x0000 (---------------) + qc + 0x00132187, // n0x07e9 c0x0000 (---------------) + rhcloud + 0x00000d82, // n0x07ea c0x0000 (---------------) + ro + 0x000044c2, // n0x07eb c0x0000 (---------------) + ru + 0x00001a02, // n0x07ec c0x0000 (---------------) + sa + 0x0002f650, // n0x07ed c0x0000 (---------------) + saves-the-whales + 0x00002e82, // n0x07ee c0x0000 (---------------) + se + 0x00130f46, // n0x07ef c0x0000 (---------------) + selfip + 0x000c368e, // n0x07f0 c0x0000 (---------------) + sells-for-less + 0x0007ac8b, // n0x07f1 c0x0000 (---------------) + sells-for-u + 0x00079088, // n0x07f2 c0x0000 (---------------) + servebbs + 0x000e0a0a, // n0x07f3 c0x0000 (---------------) + simple-url + 0x000dc7c7, // n0x07f4 c0x0000 (---------------) + sinaapp + 0x000101cd, // n0x07f5 c0x0000 (---------------) + space-to-rent + 0x0010160c, // n0x07f6 c0x0000 (---------------) + teaches-yoga + 0x0000cf02, // n0x07f7 c0x0000 (---------------) + uk + 0x00009f42, // n0x07f8 c0x0000 (---------------) + us + 0x00006842, // n0x07f9 c0x0000 (---------------) + uy + 0x000dc70a, // n0x07fa c0x0000 (---------------) + vipsinaapp + 0x000cd48a, // n0x07fb c0x0000 (---------------) + withgoogle + 0x000e3f0e, // n0x07fc c0x0000 (---------------) + writesthisblog + 0x000c9948, // n0x07fd c0x0000 (---------------) + yolasite + 0x000043c2, // n0x07fe c0x0000 (---------------) + za + 0x10c23487, // n0x07ff c0x0043 (n0x0811-n0x081a) + compute + 0x11023489, // n0x0800 c0x0044 (n0x081a-n0x081c) + compute-1 + 0x0000a503, // n0x0801 c0x0000 (---------------) + elb + 0x1172bb4c, // n0x0802 c0x0045 (n0x081c-n0x081d) o I eu-central-1 + 0x000413c2, // n0x0803 c0x0000 (---------------) + s3 + 0x000f0c51, // n0x0804 c0x0000 (---------------) + s3-ap-northeast-1 + 0x000f9411, // n0x0805 c0x0000 (---------------) + s3-ap-southeast-1 + 0x0010a591, // n0x0806 c0x0000 (---------------) + s3-ap-southeast-2 + 0x0012ba8f, // n0x0807 c0x0000 (---------------) + s3-eu-central-1 + 0x00072c0c, // n0x0808 c0x0000 (---------------) + s3-eu-west-1 + 0x0010d14d, // n0x0809 c0x0000 (---------------) + s3-external-1 + 0x0010ef8d, // n0x080a c0x0000 (---------------) + s3-external-2 + 0x0011e6d5, // n0x080b c0x0000 (---------------) + s3-fips-us-gov-west-1 + 0x000413cc, // n0x080c c0x0000 (---------------) + s3-sa-east-1 + 0x000696d0, // n0x080d c0x0000 (---------------) + s3-us-gov-west-1 + 0x0011960c, // n0x080e c0x0000 (---------------) + s3-us-west-1 + 0x000affcc, // n0x080f c0x0000 (---------------) + s3-us-west-2 + 0x0002b6c9, // n0x0810 c0x0000 (---------------) + us-east-1 + 0x000f0d0e, // n0x0811 c0x0000 (---------------) + ap-northeast-1 + 0x000f94ce, // n0x0812 c0x0000 (---------------) + ap-southeast-1 + 0x0010a64e, // n0x0813 c0x0000 (---------------) + ap-southeast-2 + 0x0012bb4c, // n0x0814 c0x0000 (---------------) + eu-central-1 + 0x00072cc9, // n0x0815 c0x0000 (---------------) + eu-west-1 + 0x00041489, // n0x0816 c0x0000 (---------------) + sa-east-1 + 0x0006978d, // n0x0817 c0x0000 (---------------) + us-gov-west-1 + 0x001196c9, // n0x0818 c0x0000 (---------------) + us-west-1 + 0x000b0089, // n0x0819 c0x0000 (---------------) + us-west-2 + 0x000f0043, // n0x081a c0x0000 (---------------) + z-1 + 0x000c8c03, // n0x081b c0x0000 (---------------) + z-2 + 0x000413c2, // n0x081c c0x0000 (---------------) + s3 + 0x00201e82, // n0x081d c0x0000 (---------------) + I ac + 0x00200742, // n0x081e c0x0000 (---------------) + I co + 0x00203fc2, // n0x081f c0x0000 (---------------) + I ed + 0x002099c2, // n0x0820 c0x0000 (---------------) + I fi + 0x00202342, // n0x0821 c0x0000 (---------------) + I go + 0x00200c42, // n0x0822 c0x0000 (---------------) + I or + 0x00201a02, // n0x0823 c0x0000 (---------------) + I sa + 0x00222ac3, // n0x0824 c0x0000 (---------------) + I com + 0x002d75c3, // n0x0825 c0x0000 (---------------) + I edu + 0x0021e283, // n0x0826 c0x0000 (---------------) + I gov + 0x00200303, // n0x0827 c0x0000 (---------------) + I inf + 0x002170c3, // n0x0828 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x0829 c0x0000 (---------------) + I org + 0x000e4188, // n0x082a c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x082b c0x0000 (---------------) + I com + 0x002d75c3, // n0x082c c0x0000 (---------------) + I edu + 0x002170c3, // n0x082d c0x0000 (---------------) + I net + 0x0021dcc3, // n0x082e c0x0000 (---------------) + I org + 0x00078a43, // n0x082f c0x0000 (---------------) + ath + 0x0021e283, // n0x0830 c0x0000 (---------------) + I gov + 0x00201e82, // n0x0831 c0x0000 (---------------) + I ac + 0x00310603, // n0x0832 c0x0000 (---------------) + I biz + 0x13222ac3, // n0x0833 c0x004c (n0x083e-n0x083f) + I com + 0x00267c47, // n0x0834 c0x0000 (---------------) + I ekloges + 0x0021e283, // n0x0835 c0x0000 (---------------) + I gov + 0x003413c3, // n0x0836 c0x0000 (---------------) + I ltd + 0x00298944, // n0x0837 c0x0000 (---------------) + I name + 0x002170c3, // n0x0838 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x0839 c0x0000 (---------------) + I org + 0x002647ca, // n0x083a c0x0000 (---------------) + I parliament + 0x0029abc5, // n0x083b c0x0000 (---------------) + I press + 0x00218243, // n0x083c c0x0000 (---------------) + I pro + 0x00208902, // n0x083d c0x0000 (---------------) + I tm + 0x000e4188, // n0x083e c0x0000 (---------------) + blogspot + 0x000e4188, // n0x083f c0x0000 (---------------) + blogspot + 0x000e4188, // n0x0840 c0x0000 (---------------) + blogspot + 0x00022ac3, // n0x0841 c0x0000 (---------------) + com + 0x0009fe8f, // n0x0842 c0x0000 (---------------) + fuettertdasnetz + 0x0015ff4a, // n0x0843 c0x0000 (---------------) + isteingeek + 0x000c2947, // n0x0844 c0x0000 (---------------) + istmein + 0x00016f4a, // n0x0845 c0x0000 (---------------) + lebtimnetz + 0x0006cb8a, // n0x0846 c0x0000 (---------------) + leitungsen + 0x000e8f4d, // n0x0847 c0x0000 (---------------) + traeumtgerade + 0x000e4188, // n0x0848 c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x0849 c0x0000 (---------------) + I com + 0x002d75c3, // n0x084a c0x0000 (---------------) + I edu + 0x0021e283, // n0x084b c0x0000 (---------------) + I gov + 0x002170c3, // n0x084c c0x0000 (---------------) + I net + 0x0021dcc3, // n0x084d c0x0000 (---------------) + I org + 0x00200603, // n0x084e c0x0000 (---------------) + I art + 0x00222ac3, // n0x084f c0x0000 (---------------) + I com + 0x002d75c3, // n0x0850 c0x0000 (---------------) + I edu + 0x0034eb03, // n0x0851 c0x0000 (---------------) + I gob + 0x0021e283, // n0x0852 c0x0000 (---------------) + I gov + 0x0023fa03, // n0x0853 c0x0000 (---------------) + I mil + 0x002170c3, // n0x0854 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x0855 c0x0000 (---------------) + I org + 0x00280043, // n0x0856 c0x0000 (---------------) + I sld + 0x00219fc3, // n0x0857 c0x0000 (---------------) + I web + 0x00200603, // n0x0858 c0x0000 (---------------) + I art + 0x002729c4, // n0x0859 c0x0000 (---------------) + I asso + 0x00222ac3, // n0x085a c0x0000 (---------------) + I com + 0x002d75c3, // n0x085b c0x0000 (---------------) + I edu + 0x0021e283, // n0x085c c0x0000 (---------------) + I gov + 0x002170c3, // n0x085d c0x0000 (---------------) + I net + 0x0021dcc3, // n0x085e c0x0000 (---------------) + I org + 0x00218943, // n0x085f c0x0000 (---------------) + I pol + 0x00222ac3, // n0x0860 c0x0000 (---------------) + I com + 0x002d75c3, // n0x0861 c0x0000 (---------------) + I edu + 0x00236403, // n0x0862 c0x0000 (---------------) + I fin + 0x0034eb03, // n0x0863 c0x0000 (---------------) + I gob + 0x0021e283, // n0x0864 c0x0000 (---------------) + I gov + 0x00200304, // n0x0865 c0x0000 (---------------) + I info + 0x00312503, // n0x0866 c0x0000 (---------------) + I k12 + 0x0020b403, // n0x0867 c0x0000 (---------------) + I med + 0x0023fa03, // n0x0868 c0x0000 (---------------) + I mil + 0x002170c3, // n0x0869 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x086a c0x0000 (---------------) + I org + 0x00218243, // n0x086b c0x0000 (---------------) + I pro + 0x00382f83, // n0x086c c0x0000 (---------------) + I aip + 0x15622ac3, // n0x086d c0x0055 (n0x0876-n0x0877) + I com + 0x002d75c3, // n0x086e c0x0000 (---------------) + I edu + 0x002099c3, // n0x086f c0x0000 (---------------) + I fie + 0x0021e283, // n0x0870 c0x0000 (---------------) + I gov + 0x00273c43, // n0x0871 c0x0000 (---------------) + I lib + 0x0020b403, // n0x0872 c0x0000 (---------------) + I med + 0x0021dcc3, // n0x0873 c0x0000 (---------------) + I org + 0x0025f803, // n0x0874 c0x0000 (---------------) + I pri + 0x00375bc4, // n0x0875 c0x0000 (---------------) + I riik + 0x000e4188, // n0x0876 c0x0000 (---------------) + blogspot + 0x15e22ac3, // n0x0877 c0x0057 (n0x0880-n0x0881) + I com + 0x002d75c3, // n0x0878 c0x0000 (---------------) + I edu + 0x00291c43, // n0x0879 c0x0000 (---------------) + I eun + 0x0021e283, // n0x087a c0x0000 (---------------) + I gov + 0x0023fa03, // n0x087b c0x0000 (---------------) + I mil + 0x00298944, // n0x087c c0x0000 (---------------) + I name + 0x002170c3, // n0x087d c0x0000 (---------------) + I net + 0x0021dcc3, // n0x087e c0x0000 (---------------) + I org + 0x00215583, // n0x087f c0x0000 (---------------) + I sci + 0x000e4188, // n0x0880 c0x0000 (---------------) + blogspot + 0x16622ac3, // n0x0881 c0x0059 (n0x0886-n0x0887) + I com + 0x002d75c3, // n0x0882 c0x0000 (---------------) + I edu + 0x0034eb03, // n0x0883 c0x0000 (---------------) + I gob + 0x00207cc3, // n0x0884 c0x0000 (---------------) + I nom + 0x0021dcc3, // n0x0885 c0x0000 (---------------) + I org + 0x000e4188, // n0x0886 c0x0000 (---------------) + blogspot + 0x00310603, // n0x0887 c0x0000 (---------------) + I biz + 0x00222ac3, // n0x0888 c0x0000 (---------------) + I com + 0x002d75c3, // n0x0889 c0x0000 (---------------) + I edu + 0x0021e283, // n0x088a c0x0000 (---------------) + I gov + 0x00200304, // n0x088b c0x0000 (---------------) + I info + 0x00298944, // n0x088c c0x0000 (---------------) + I name + 0x002170c3, // n0x088d c0x0000 (---------------) + I net + 0x0021dcc3, // n0x088e c0x0000 (---------------) + I org + 0x002f85c5, // n0x088f c0x0000 (---------------) + I aland + 0x000e4188, // n0x0890 c0x0000 (---------------) + blogspot + 0x00006743, // n0x0891 c0x0000 (---------------) + iki + 0x0036e608, // n0x0892 c0x0000 (---------------) + I aeroport + 0x00383787, // n0x0893 c0x0000 (---------------) + I assedic + 0x002729c4, // n0x0894 c0x0000 (---------------) + I asso + 0x00310b86, // n0x0895 c0x0000 (---------------) + I avocat + 0x0031e586, // n0x0896 c0x0000 (---------------) + I avoues + 0x000e4188, // n0x0897 c0x0000 (---------------) + blogspot + 0x0025f743, // n0x0898 c0x0000 (---------------) + I cci + 0x003011c9, // n0x0899 c0x0000 (---------------) + I chambagri + 0x00234bd5, // n0x089a c0x0000 (---------------) + I chirurgiens-dentistes + 0x00222ac3, // n0x089b c0x0000 (---------------) + I com + 0x0030c7d2, // n0x089c c0x0000 (---------------) + I experts-comptables + 0x0030c58f, // n0x089d c0x0000 (---------------) + I geometre-expert + 0x003579c4, // n0x089e c0x0000 (---------------) + I gouv + 0x0020dc85, // n0x089f c0x0000 (---------------) + I greta + 0x002de690, // n0x08a0 c0x0000 (---------------) + I huissier-justice + 0x002753c7, // n0x08a1 c0x0000 (---------------) + I medecin + 0x00207cc3, // n0x08a2 c0x0000 (---------------) + I nom + 0x00265c08, // n0x08a3 c0x0000 (---------------) + I notaires + 0x002e078a, // n0x08a4 c0x0000 (---------------) + I pharmacien + 0x0021a444, // n0x08a5 c0x0000 (---------------) + I port + 0x002ca783, // n0x08a6 c0x0000 (---------------) + I prd + 0x0029abc6, // n0x08a7 c0x0000 (---------------) + I presse + 0x00208902, // n0x08a8 c0x0000 (---------------) + I tm + 0x0036b18b, // n0x08a9 c0x0000 (---------------) + I veterinaire + 0x00222ac3, // n0x08aa c0x0000 (---------------) + I com + 0x002d75c3, // n0x08ab c0x0000 (---------------) + I edu + 0x0021e283, // n0x08ac c0x0000 (---------------) + I gov + 0x0023fa03, // n0x08ad c0x0000 (---------------) + I mil + 0x002170c3, // n0x08ae c0x0000 (---------------) + I net + 0x0021dcc3, // n0x08af c0x0000 (---------------) + I org + 0x002ce243, // n0x08b0 c0x0000 (---------------) + I pvt + 0x00200742, // n0x08b1 c0x0000 (---------------) + I co + 0x002170c3, // n0x08b2 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x08b3 c0x0000 (---------------) + I org + 0x00222ac3, // n0x08b4 c0x0000 (---------------) + I com + 0x002d75c3, // n0x08b5 c0x0000 (---------------) + I edu + 0x0021e283, // n0x08b6 c0x0000 (---------------) + I gov + 0x0023fa03, // n0x08b7 c0x0000 (---------------) + I mil + 0x0021dcc3, // n0x08b8 c0x0000 (---------------) + I org + 0x00222ac3, // n0x08b9 c0x0000 (---------------) + I com + 0x002d75c3, // n0x08ba c0x0000 (---------------) + I edu + 0x0021e283, // n0x08bb c0x0000 (---------------) + I gov + 0x003413c3, // n0x08bc c0x0000 (---------------) + I ltd + 0x00208483, // n0x08bd c0x0000 (---------------) + I mod + 0x0021dcc3, // n0x08be c0x0000 (---------------) + I org + 0x00200742, // n0x08bf c0x0000 (---------------) + I co + 0x00222ac3, // n0x08c0 c0x0000 (---------------) + I com + 0x002d75c3, // n0x08c1 c0x0000 (---------------) + I edu + 0x002170c3, // n0x08c2 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x08c3 c0x0000 (---------------) + I org + 0x00201e82, // n0x08c4 c0x0000 (---------------) + I ac + 0x00222ac3, // n0x08c5 c0x0000 (---------------) + I com + 0x002d75c3, // n0x08c6 c0x0000 (---------------) + I edu + 0x0021e283, // n0x08c7 c0x0000 (---------------) + I gov + 0x002170c3, // n0x08c8 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x08c9 c0x0000 (---------------) + I org + 0x002729c4, // n0x08ca c0x0000 (---------------) + I asso + 0x00222ac3, // n0x08cb c0x0000 (---------------) + I com + 0x002d75c3, // n0x08cc c0x0000 (---------------) + I edu + 0x00203604, // n0x08cd c0x0000 (---------------) + I mobi + 0x002170c3, // n0x08ce c0x0000 (---------------) + I net + 0x0021dcc3, // n0x08cf c0x0000 (---------------) + I org + 0x000e4188, // n0x08d0 c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x08d1 c0x0000 (---------------) + I com + 0x002d75c3, // n0x08d2 c0x0000 (---------------) + I edu + 0x0021e283, // n0x08d3 c0x0000 (---------------) + I gov + 0x002170c3, // n0x08d4 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x08d5 c0x0000 (---------------) + I org + 0x00222ac3, // n0x08d6 c0x0000 (---------------) + I com + 0x002d75c3, // n0x08d7 c0x0000 (---------------) + I edu + 0x0034eb03, // n0x08d8 c0x0000 (---------------) + I gob + 0x00215703, // n0x08d9 c0x0000 (---------------) + I ind + 0x0023fa03, // n0x08da c0x0000 (---------------) + I mil + 0x002170c3, // n0x08db c0x0000 (---------------) + I net + 0x0021dcc3, // n0x08dc c0x0000 (---------------) + I org + 0x00200742, // n0x08dd c0x0000 (---------------) + I co + 0x00222ac3, // n0x08de c0x0000 (---------------) + I com + 0x002170c3, // n0x08df c0x0000 (---------------) + I net + 0x000e4188, // n0x08e0 c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x08e1 c0x0000 (---------------) + I com + 0x002d75c3, // n0x08e2 c0x0000 (---------------) + I edu + 0x0021e283, // n0x08e3 c0x0000 (---------------) + I gov + 0x00300b83, // n0x08e4 c0x0000 (---------------) + I idv + 0x00051e43, // n0x08e5 c0x0000 (---------------) + inc + 0x001413c3, // n0x08e6 c0x0000 (---------------) + ltd + 0x002170c3, // n0x08e7 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x08e8 c0x0000 (---------------) + I org + 0x002ea10a, // n0x08e9 c0x0000 (---------------) + I xn--55qx5d + 0x00306ac9, // n0x08ea c0x0000 (---------------) + I xn--ciqpn + 0x0032788b, // n0x08eb c0x0000 (---------------) + I xn--gmq050i + 0x00327f0a, // n0x08ec c0x0000 (---------------) + I xn--gmqw5a + 0x0033080a, // n0x08ed c0x0000 (---------------) + I xn--io0a7i + 0x0033f6cb, // n0x08ee c0x0000 (---------------) + I xn--lcvr32d + 0x00356c8a, // n0x08ef c0x0000 (---------------) + I xn--mk0axi + 0x0035c6ca, // n0x08f0 c0x0000 (---------------) + I xn--mxtq1m + 0x0036394a, // n0x08f1 c0x0000 (---------------) + I xn--od0alg + 0x00363bcb, // n0x08f2 c0x0000 (---------------) + I xn--od0aq3b + 0x00384789, // n0x08f3 c0x0000 (---------------) + I xn--tn0ag + 0x003867ca, // n0x08f4 c0x0000 (---------------) + I xn--uc0atv + 0x00386c4b, // n0x08f5 c0x0000 (---------------) + I xn--uc0ay4a + 0x0039008b, // n0x08f6 c0x0000 (---------------) + I xn--wcvs22d + 0x0039634a, // n0x08f7 c0x0000 (---------------) + I xn--zf0avx + 0x00222ac3, // n0x08f8 c0x0000 (---------------) + I com + 0x002d75c3, // n0x08f9 c0x0000 (---------------) + I edu + 0x0034eb03, // n0x08fa c0x0000 (---------------) + I gob + 0x0023fa03, // n0x08fb c0x0000 (---------------) + I mil + 0x002170c3, // n0x08fc c0x0000 (---------------) + I net + 0x0021dcc3, // n0x08fd c0x0000 (---------------) + I org + 0x000e4188, // n0x08fe c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x08ff c0x0000 (---------------) + I com + 0x0024d404, // n0x0900 c0x0000 (---------------) + I from + 0x00209782, // n0x0901 c0x0000 (---------------) + I iz + 0x00298944, // n0x0902 c0x0000 (---------------) + I name + 0x0036b745, // n0x0903 c0x0000 (---------------) + I adult + 0x00200603, // n0x0904 c0x0000 (---------------) + I art + 0x002729c4, // n0x0905 c0x0000 (---------------) + I asso + 0x00222ac3, // n0x0906 c0x0000 (---------------) + I com + 0x00228d44, // n0x0907 c0x0000 (---------------) + I coop + 0x002d75c3, // n0x0908 c0x0000 (---------------) + I edu + 0x00238544, // n0x0909 c0x0000 (---------------) + I firm + 0x003579c4, // n0x090a c0x0000 (---------------) + I gouv + 0x00200304, // n0x090b c0x0000 (---------------) + I info + 0x0020b403, // n0x090c c0x0000 (---------------) + I med + 0x002170c3, // n0x090d c0x0000 (---------------) + I net + 0x0021dcc3, // n0x090e c0x0000 (---------------) + I org + 0x0028acc5, // n0x090f c0x0000 (---------------) + I perso + 0x00218943, // n0x0910 c0x0000 (---------------) + I pol + 0x00218243, // n0x0911 c0x0000 (---------------) + I pro + 0x00241283, // n0x0912 c0x0000 (---------------) + I rel + 0x0029b184, // n0x0913 c0x0000 (---------------) + I shop + 0x0036f244, // n0x0914 c0x0000 (---------------) + I 2000 + 0x00256e45, // n0x0915 c0x0000 (---------------) + I agrar + 0x000e4188, // n0x0916 c0x0000 (---------------) + blogspot + 0x002de404, // n0x0917 c0x0000 (---------------) + I bolt + 0x002112c6, // n0x0918 c0x0000 (---------------) + I casino + 0x002735c4, // n0x0919 c0x0000 (---------------) + I city + 0x00200742, // n0x091a c0x0000 (---------------) + I co + 0x003128c7, // n0x091b c0x0000 (---------------) + I erotica + 0x0023a507, // n0x091c c0x0000 (---------------) + I erotika + 0x00356104, // n0x091d c0x0000 (---------------) + I film + 0x00246b45, // n0x091e c0x0000 (---------------) + I forum + 0x002d2145, // n0x091f c0x0000 (---------------) + I games + 0x00294945, // n0x0920 c0x0000 (---------------) + I hotel + 0x00200304, // n0x0921 c0x0000 (---------------) + I info + 0x0032d888, // n0x0922 c0x0000 (---------------) + I ingatlan + 0x00207486, // n0x0923 c0x0000 (---------------) + I jogasz + 0x002c1d48, // n0x0924 c0x0000 (---------------) + I konyvelo + 0x0023b745, // n0x0925 c0x0000 (---------------) + I lakas + 0x002dc385, // n0x0926 c0x0000 (---------------) + I media + 0x00366d44, // n0x0927 c0x0000 (---------------) + I news + 0x0021dcc3, // n0x0928 c0x0000 (---------------) + I org + 0x002cba44, // n0x0929 c0x0000 (---------------) + I priv + 0x0033de86, // n0x092a c0x0000 (---------------) + I reklam + 0x0029acc3, // n0x092b c0x0000 (---------------) + I sex + 0x0029b184, // n0x092c c0x0000 (---------------) + I shop + 0x0027cac5, // n0x092d c0x0000 (---------------) + I sport + 0x00278704, // n0x092e c0x0000 (---------------) + I suli + 0x0020bac4, // n0x092f c0x0000 (---------------) + I szex + 0x00208902, // n0x0930 c0x0000 (---------------) + I tm + 0x0024f786, // n0x0931 c0x0000 (---------------) + I tozsde + 0x0037a486, // n0x0932 c0x0000 (---------------) + I utazas + 0x002db9c5, // n0x0933 c0x0000 (---------------) + I video + 0x00201e82, // n0x0934 c0x0000 (---------------) + I ac + 0x00310603, // n0x0935 c0x0000 (---------------) + I biz + 0x1b600742, // n0x0936 c0x006d (n0x093f-n0x0940) + I co + 0x00227544, // n0x0937 c0x0000 (---------------) + I desa + 0x00202342, // n0x0938 c0x0000 (---------------) + I go + 0x0023fa03, // n0x0939 c0x0000 (---------------) + I mil + 0x00220282, // n0x093a c0x0000 (---------------) + I my + 0x002170c3, // n0x093b c0x0000 (---------------) + I net + 0x00200c42, // n0x093c c0x0000 (---------------) + I or + 0x00206103, // n0x093d c0x0000 (---------------) + I sch + 0x00219fc3, // n0x093e c0x0000 (---------------) + I web + 0x000e4188, // n0x093f c0x0000 (---------------) + blogspot + 0x000e4188, // n0x0940 c0x0000 (---------------) + blogspot + 0x0021e283, // n0x0941 c0x0000 (---------------) + I gov + 0x1c200742, // n0x0942 c0x0070 (n0x0943-n0x0944) o I co + 0x000e4188, // n0x0943 c0x0000 (---------------) + blogspot + 0x00201e82, // n0x0944 c0x0000 (---------------) + I ac + 0x1ca00742, // n0x0945 c0x0072 (n0x094b-n0x094d) + I co + 0x00222ac3, // n0x0946 c0x0000 (---------------) + I com + 0x002170c3, // n0x0947 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x0948 c0x0000 (---------------) + I org + 0x00206582, // n0x0949 c0x0000 (---------------) + I tt + 0x0020bf42, // n0x094a c0x0000 (---------------) + I tv + 0x003413c3, // n0x094b c0x0000 (---------------) + I ltd + 0x002c65c3, // n0x094c c0x0000 (---------------) + I plc + 0x00201e82, // n0x094d c0x0000 (---------------) + I ac + 0x000e4188, // n0x094e c0x0000 (---------------) + blogspot + 0x00200742, // n0x094f c0x0000 (---------------) + I co + 0x002d75c3, // n0x0950 c0x0000 (---------------) + I edu + 0x00238544, // n0x0951 c0x0000 (---------------) + I firm + 0x002012c3, // n0x0952 c0x0000 (---------------) + I gen + 0x0021e283, // n0x0953 c0x0000 (---------------) + I gov + 0x00215703, // n0x0954 c0x0000 (---------------) + I ind + 0x0023fa03, // n0x0955 c0x0000 (---------------) + I mil + 0x002170c3, // n0x0956 c0x0000 (---------------) + I net + 0x0020e3c3, // n0x0957 c0x0000 (---------------) + I nic + 0x0021dcc3, // n0x0958 c0x0000 (---------------) + I org + 0x00215503, // n0x0959 c0x0000 (---------------) + I res + 0x000f7e53, // n0x095a c0x0000 (---------------) + barrel-of-knowledge + 0x00104094, // n0x095b c0x0000 (---------------) + barrell-of-knowledge + 0x00009ac6, // n0x095c c0x0000 (---------------) + dyndns + 0x00042187, // n0x095d c0x0000 (---------------) + for-our + 0x000f8fc9, // n0x095e c0x0000 (---------------) + groks-the + 0x0009b5ca, // n0x095f c0x0000 (---------------) + groks-this + 0x0007690d, // n0x0960 c0x0000 (---------------) + here-for-more + 0x0009bfca, // n0x0961 c0x0000 (---------------) + knowsitall + 0x00130f46, // n0x0962 c0x0000 (---------------) + selfip + 0x00110e86, // n0x0963 c0x0000 (---------------) + webhop + 0x0021d5c2, // n0x0964 c0x0000 (---------------) + I eu + 0x00222ac3, // n0x0965 c0x0000 (---------------) + I com + 0x000b53c6, // n0x0966 c0x0000 (---------------) + github + 0x000f9303, // n0x0967 c0x0000 (---------------) + nid + 0x00222ac3, // n0x0968 c0x0000 (---------------) + I com + 0x002d75c3, // n0x0969 c0x0000 (---------------) + I edu + 0x0021e283, // n0x096a c0x0000 (---------------) + I gov + 0x0023fa03, // n0x096b c0x0000 (---------------) + I mil + 0x002170c3, // n0x096c c0x0000 (---------------) + I net + 0x0021dcc3, // n0x096d c0x0000 (---------------) + I org + 0x00201e82, // n0x096e c0x0000 (---------------) + I ac + 0x00200742, // n0x096f c0x0000 (---------------) + I co + 0x0021e283, // n0x0970 c0x0000 (---------------) + I gov + 0x00206202, // n0x0971 c0x0000 (---------------) + I id + 0x002170c3, // n0x0972 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x0973 c0x0000 (---------------) + I org + 0x00206103, // n0x0974 c0x0000 (---------------) + I sch + 0x0034a88f, // n0x0975 c0x0000 (---------------) + I xn--mgba3a4f16a + 0x0034ac4e, // n0x0976 c0x0000 (---------------) + I xn--mgba3a4fra + 0x000e4188, // n0x0977 c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x0978 c0x0000 (---------------) + I com + 0x000356c7, // n0x0979 c0x0000 (---------------) + cupcake + 0x002d75c3, // n0x097a c0x0000 (---------------) + I edu + 0x0021e283, // n0x097b c0x0000 (---------------) + I gov + 0x00238c03, // n0x097c c0x0000 (---------------) + I int + 0x002170c3, // n0x097d c0x0000 (---------------) + I net + 0x0021dcc3, // n0x097e c0x0000 (---------------) + I org + 0x00214a43, // n0x097f c0x0000 (---------------) + I abr + 0x0038b8c7, // n0x0980 c0x0000 (---------------) + I abruzzo + 0x002015c2, // n0x0981 c0x0000 (---------------) + I ag + 0x00384949, // n0x0982 c0x0000 (---------------) + I agrigento + 0x00200882, // n0x0983 c0x0000 (---------------) + I al + 0x0022f94b, // n0x0984 c0x0000 (---------------) + I alessandria + 0x002d344a, // n0x0985 c0x0000 (---------------) + I alto-adige + 0x002c0d89, // n0x0986 c0x0000 (---------------) + I altoadige + 0x00201242, // n0x0987 c0x0000 (---------------) + I an + 0x00295b46, // n0x0988 c0x0000 (---------------) + I ancona + 0x00264b95, // n0x0989 c0x0000 (---------------) + I andria-barletta-trani + 0x0022fa95, // n0x098a c0x0000 (---------------) + I andria-trani-barletta + 0x0026bc93, // n0x098b c0x0000 (---------------) + I andriabarlettatrani + 0x00230013, // n0x098c c0x0000 (---------------) + I andriatranibarletta + 0x0020fd82, // n0x098d c0x0000 (---------------) + I ao + 0x00218745, // n0x098e c0x0000 (---------------) + I aosta + 0x00341e0c, // n0x098f c0x0000 (---------------) + I aosta-valley + 0x0029a44b, // n0x0990 c0x0000 (---------------) + I aostavalley + 0x00258685, // n0x0991 c0x0000 (---------------) + I aoste + 0x002105c2, // n0x0992 c0x0000 (---------------) + I ap + 0x00273fc2, // n0x0993 c0x0000 (---------------) + I aq + 0x002d7986, // n0x0994 c0x0000 (---------------) + I aquila + 0x00200602, // n0x0995 c0x0000 (---------------) + I ar + 0x00363106, // n0x0996 c0x0000 (---------------) + I arezzo + 0x0020094d, // n0x0997 c0x0000 (---------------) + I ascoli-piceno + 0x0034714c, // n0x0998 c0x0000 (---------------) + I ascolipiceno + 0x002166c4, // n0x0999 c0x0000 (---------------) + I asti + 0x00201642, // n0x099a c0x0000 (---------------) + I at + 0x00201482, // n0x099b c0x0000 (---------------) + I av + 0x00369f48, // n0x099c c0x0000 (---------------) + I avellino + 0x00205a82, // n0x099d c0x0000 (---------------) + I ba + 0x00240186, // n0x099e c0x0000 (---------------) + I balsan + 0x0027eb84, // n0x099f c0x0000 (---------------) + I bari + 0x00264d55, // n0x09a0 c0x0000 (---------------) + I barletta-trani-andria + 0x0026be13, // n0x09a1 c0x0000 (---------------) + I barlettatraniandria + 0x0020a443, // n0x09a2 c0x0000 (---------------) + I bas + 0x0030d98a, // n0x09a3 c0x0000 (---------------) + I basilicata + 0x002c5dc7, // n0x09a4 c0x0000 (---------------) + I belluno + 0x0030fa49, // n0x09a5 c0x0000 (---------------) + I benevento + 0x0023dc87, // n0x09a6 c0x0000 (---------------) + I bergamo + 0x00355e02, // n0x09a7 c0x0000 (---------------) + I bg + 0x00200002, // n0x09a8 c0x0000 (---------------) + I bi + 0x00381ac6, // n0x09a9 c0x0000 (---------------) + I biella + 0x00205842, // n0x09aa c0x0000 (---------------) + I bl + 0x000e4188, // n0x09ab c0x0000 (---------------) + blogspot + 0x0020a282, // n0x09ac c0x0000 (---------------) + I bn + 0x0020a702, // n0x09ad c0x0000 (---------------) + I bo + 0x0034c987, // n0x09ae c0x0000 (---------------) + I bologna + 0x00391387, // n0x09af c0x0000 (---------------) + I bolzano + 0x00212b85, // n0x09b0 c0x0000 (---------------) + I bozen + 0x00212e82, // n0x09b1 c0x0000 (---------------) + I br + 0x002154c7, // n0x09b2 c0x0000 (---------------) + I brescia + 0x00215688, // n0x09b3 c0x0000 (---------------) + I brindisi + 0x00235102, // n0x09b4 c0x0000 (---------------) + I bs + 0x00216fc2, // n0x09b5 c0x0000 (---------------) + I bt + 0x002203c2, // n0x09b6 c0x0000 (---------------) + I bz + 0x002055c2, // n0x09b7 c0x0000 (---------------) + I ca + 0x00250c08, // n0x09b8 c0x0000 (---------------) + I cagliari + 0x0020e443, // n0x09b9 c0x0000 (---------------) + I cal + 0x00238348, // n0x09ba c0x0000 (---------------) + I calabria + 0x002e718d, // n0x09bb c0x0000 (---------------) + I caltanissetta + 0x0021a083, // n0x09bc c0x0000 (---------------) + I cam + 0x00300748, // n0x09bd c0x0000 (---------------) + I campania + 0x0022c60f, // n0x09be c0x0000 (---------------) + I campidano-medio + 0x0022c9ce, // n0x09bf c0x0000 (---------------) + I campidanomedio + 0x0027284a, // n0x09c0 c0x0000 (---------------) + I campobasso + 0x002d4e91, // n0x09c1 c0x0000 (---------------) + I carbonia-iglesias + 0x002d5310, // n0x09c2 c0x0000 (---------------) + I carboniaiglesias + 0x0031888d, // n0x09c3 c0x0000 (---------------) + I carrara-massa + 0x00318bcc, // n0x09c4 c0x0000 (---------------) + I carraramassa + 0x002055c7, // n0x09c5 c0x0000 (---------------) + I caserta + 0x0030db07, // n0x09c6 c0x0000 (---------------) + I catania + 0x00310c49, // n0x09c7 c0x0000 (---------------) + I catanzaro + 0x002167c2, // n0x09c8 c0x0000 (---------------) + I cb + 0x00200b82, // n0x09c9 c0x0000 (---------------) + I ce + 0x0024434c, // n0x09ca c0x0000 (---------------) + I cesena-forli + 0x0024464b, // n0x09cb c0x0000 (---------------) + I cesenaforli + 0x00204a02, // n0x09cc c0x0000 (---------------) + I ch + 0x002c2546, // n0x09cd c0x0000 (---------------) + I chieti + 0x002155c2, // n0x09ce c0x0000 (---------------) + I ci + 0x0020af42, // n0x09cf c0x0000 (---------------) + I cl + 0x002211c2, // n0x09d0 c0x0000 (---------------) + I cn + 0x00200742, // n0x09d1 c0x0000 (---------------) + I co + 0x002230c4, // n0x09d2 c0x0000 (---------------) + I como + 0x0022bdc7, // n0x09d3 c0x0000 (---------------) + I cosenza + 0x0020c502, // n0x09d4 c0x0000 (---------------) + I cr + 0x00230b87, // n0x09d5 c0x0000 (---------------) + I cremona + 0x00231e07, // n0x09d6 c0x0000 (---------------) + I crotone + 0x0020f142, // n0x09d7 c0x0000 (---------------) + I cs + 0x00223d82, // n0x09d8 c0x0000 (---------------) + I ct + 0x00235585, // n0x09d9 c0x0000 (---------------) + I cuneo + 0x00214442, // n0x09da c0x0000 (---------------) + I cz + 0x002e990e, // n0x09db c0x0000 (---------------) + I dell-ogliastra + 0x002297cd, // n0x09dc c0x0000 (---------------) + I dellogliastra + 0x002d75c3, // n0x09dd c0x0000 (---------------) + I edu + 0x0023f9ce, // n0x09de c0x0000 (---------------) + I emilia-romagna + 0x00261dcd, // n0x09df c0x0000 (---------------) + I emiliaromagna + 0x0034efc3, // n0x09e0 c0x0000 (---------------) + I emr + 0x00200bc2, // n0x09e1 c0x0000 (---------------) + I en + 0x00201504, // n0x09e2 c0x0000 (---------------) + I enna + 0x00235d42, // n0x09e3 c0x0000 (---------------) + I fc + 0x0020c742, // n0x09e4 c0x0000 (---------------) + I fe + 0x002cc345, // n0x09e5 c0x0000 (---------------) + I fermo + 0x002e3b87, // n0x09e6 c0x0000 (---------------) + I ferrara + 0x00331442, // n0x09e7 c0x0000 (---------------) + I fg + 0x002099c2, // n0x09e8 c0x0000 (---------------) + I fi + 0x00237b87, // n0x09e9 c0x0000 (---------------) + I firenze + 0x0023c248, // n0x09ea c0x0000 (---------------) + I florence + 0x00358002, // n0x09eb c0x0000 (---------------) + I fm + 0x00200386, // n0x09ec c0x0000 (---------------) + I foggia + 0x002441cc, // n0x09ed c0x0000 (---------------) + I forli-cesena + 0x0024450b, // n0x09ee c0x0000 (---------------) + I forlicesena + 0x00200d42, // n0x09ef c0x0000 (---------------) + I fr + 0x002495cf, // n0x09f0 c0x0000 (---------------) + I friuli-v-giulia + 0x00249990, // n0x09f1 c0x0000 (---------------) + I friuli-ve-giulia + 0x00249d8f, // n0x09f2 c0x0000 (---------------) + I friuli-vegiulia + 0x0024a155, // n0x09f3 c0x0000 (---------------) + I friuli-venezia-giulia + 0x0024a694, // n0x09f4 c0x0000 (---------------) + I friuli-veneziagiulia + 0x0024ab8e, // n0x09f5 c0x0000 (---------------) + I friuli-vgiulia + 0x0024af0e, // n0x09f6 c0x0000 (---------------) + I friuliv-giulia + 0x0024b28f, // n0x09f7 c0x0000 (---------------) + I friulive-giulia + 0x0024b64e, // n0x09f8 c0x0000 (---------------) + I friulivegiulia + 0x0024b9d4, // n0x09f9 c0x0000 (---------------) + I friulivenezia-giulia + 0x0024bed3, // n0x09fa c0x0000 (---------------) + I friuliveneziagiulia + 0x0024c38d, // n0x09fb c0x0000 (---------------) + I friulivgiulia + 0x00263bc9, // n0x09fc c0x0000 (---------------) + I frosinone + 0x00277443, // n0x09fd c0x0000 (---------------) + I fvg + 0x002012c2, // n0x09fe c0x0000 (---------------) + I ge + 0x0030bb05, // n0x09ff c0x0000 (---------------) + I genoa + 0x002012c6, // n0x0a00 c0x0000 (---------------) + I genova + 0x00202342, // n0x0a01 c0x0000 (---------------) + I go + 0x002473c7, // n0x0a02 c0x0000 (---------------) + I gorizia + 0x0021e283, // n0x0a03 c0x0000 (---------------) + I gov + 0x0020dc82, // n0x0a04 c0x0000 (---------------) + I gr + 0x002e9508, // n0x0a05 c0x0000 (---------------) + I grosseto + 0x002d50d1, // n0x0a06 c0x0000 (---------------) + I iglesias-carbonia + 0x002d5510, // n0x0a07 c0x0000 (---------------) + I iglesiascarbonia + 0x00203582, // n0x0a08 c0x0000 (---------------) + I im + 0x002148c7, // n0x0a09 c0x0000 (---------------) + I imperia + 0x00202b42, // n0x0a0a c0x0000 (---------------) + I is + 0x002ee307, // n0x0a0b c0x0000 (---------------) + I isernia + 0x002034c2, // n0x0a0c c0x0000 (---------------) + I kr + 0x0036dc89, // n0x0a0d c0x0000 (---------------) + I la-spezia + 0x002d7947, // n0x0a0e c0x0000 (---------------) + I laquila + 0x00257b88, // n0x0a0f c0x0000 (---------------) + I laspezia + 0x0035dc46, // n0x0a10 c0x0000 (---------------) + I latina + 0x002c64c3, // n0x0a11 c0x0000 (---------------) + I laz + 0x0035a705, // n0x0a12 c0x0000 (---------------) + I lazio + 0x002339c2, // n0x0a13 c0x0000 (---------------) + I lc + 0x00203c42, // n0x0a14 c0x0000 (---------------) + I le + 0x00381ec5, // n0x0a15 c0x0000 (---------------) + I lecce + 0x0021e6c5, // n0x0a16 c0x0000 (---------------) + I lecco + 0x002008c2, // n0x0a17 c0x0000 (---------------) + I li + 0x0021a583, // n0x0a18 c0x0000 (---------------) + I lig + 0x00244887, // n0x0a19 c0x0000 (---------------) + I liguria + 0x003436c7, // n0x0a1a c0x0000 (---------------) + I livorno + 0x00205882, // n0x0a1b c0x0000 (---------------) + I lo + 0x002455c4, // n0x0a1c c0x0000 (---------------) + I lodi + 0x00260a43, // n0x0a1d c0x0000 (---------------) + I lom + 0x002b0b49, // n0x0a1e c0x0000 (---------------) + I lombardia + 0x002b8a88, // n0x0a1f c0x0000 (---------------) + I lombardy + 0x00205ec2, // n0x0a20 c0x0000 (---------------) + I lt + 0x002071c2, // n0x0a21 c0x0000 (---------------) + I lu + 0x00219187, // n0x0a22 c0x0000 (---------------) + I lucania + 0x0021aa05, // n0x0a23 c0x0000 (---------------) + I lucca + 0x00300248, // n0x0a24 c0x0000 (---------------) + I macerata + 0x0029c6c7, // n0x0a25 c0x0000 (---------------) + I mantova + 0x00216503, // n0x0a26 c0x0000 (---------------) + I mar + 0x0028de06, // n0x0a27 c0x0000 (---------------) + I marche + 0x0031870d, // n0x0a28 c0x0000 (---------------) + I massa-carrara + 0x00318a8c, // n0x0a29 c0x0000 (---------------) + I massacarrara + 0x0029f6c6, // n0x0a2a c0x0000 (---------------) + I matera + 0x00205942, // n0x0a2b c0x0000 (---------------) + I mb + 0x0023a6c2, // n0x0a2c c0x0000 (---------------) + I mc + 0x00208942, // n0x0a2d c0x0000 (---------------) + I me + 0x0022c48f, // n0x0a2e c0x0000 (---------------) + I medio-campidano + 0x0022c88e, // n0x0a2f c0x0000 (---------------) + I mediocampidano + 0x00291407, // n0x0a30 c0x0000 (---------------) + I messina + 0x00204802, // n0x0a31 c0x0000 (---------------) + I mi + 0x002f1e85, // n0x0a32 c0x0000 (---------------) + I milan + 0x002f1e86, // n0x0a33 c0x0000 (---------------) + I milano + 0x00217082, // n0x0a34 c0x0000 (---------------) + I mn + 0x00203602, // n0x0a35 c0x0000 (---------------) + I mo + 0x00278386, // n0x0a36 c0x0000 (---------------) + I modena + 0x00285643, // n0x0a37 c0x0000 (---------------) + I mol + 0x002ee246, // n0x0a38 c0x0000 (---------------) + I molise + 0x002b1e85, // n0x0a39 c0x0000 (---------------) + I monza + 0x002b1e8d, // n0x0a3a c0x0000 (---------------) + I monza-brianza + 0x002b26d5, // n0x0a3b c0x0000 (---------------) + I monza-e-della-brianza + 0x002b2e8c, // n0x0a3c c0x0000 (---------------) + I monzabrianza + 0x002b39cd, // n0x0a3d c0x0000 (---------------) + I monzaebrianza + 0x002b3d92, // n0x0a3e c0x0000 (---------------) + I monzaedellabrianza + 0x00209282, // n0x0a3f c0x0000 (---------------) + I ms + 0x00259642, // n0x0a40 c0x0000 (---------------) + I mt + 0x00200282, // n0x0a41 c0x0000 (---------------) + I na + 0x002b7886, // n0x0a42 c0x0000 (---------------) + I naples + 0x002d5c46, // n0x0a43 c0x0000 (---------------) + I napoli + 0x00200c02, // n0x0a44 c0x0000 (---------------) + I no + 0x00201346, // n0x0a45 c0x0000 (---------------) + I novara + 0x00205bc2, // n0x0a46 c0x0000 (---------------) + I nu + 0x00205bc5, // n0x0a47 c0x0000 (---------------) + I nuoro + 0x002003c2, // n0x0a48 c0x0000 (---------------) + I og + 0x002298c9, // n0x0a49 c0x0000 (---------------) + I ogliastra + 0x0022840c, // n0x0a4a c0x0000 (---------------) + I olbia-tempio + 0x0022874b, // n0x0a4b c0x0000 (---------------) + I olbiatempio + 0x00200c42, // n0x0a4c c0x0000 (---------------) + I or + 0x0023c688, // n0x0a4d c0x0000 (---------------) + I oristano + 0x002031c2, // n0x0a4e c0x0000 (---------------) + I ot + 0x002052c2, // n0x0a4f c0x0000 (---------------) + I pa + 0x0029a206, // n0x0a50 c0x0000 (---------------) + I padova + 0x0034f945, // n0x0a51 c0x0000 (---------------) + I padua + 0x0021ce87, // n0x0a52 c0x0000 (---------------) + I palermo + 0x003927c5, // n0x0a53 c0x0000 (---------------) + I parma + 0x002c0705, // n0x0a54 c0x0000 (---------------) + I pavia + 0x00203e02, // n0x0a55 c0x0000 (---------------) + I pc + 0x0029b282, // n0x0a56 c0x0000 (---------------) + I pd + 0x00214942, // n0x0a57 c0x0000 (---------------) + I pe + 0x0026fa47, // n0x0a58 c0x0000 (---------------) + I perugia + 0x0030758d, // n0x0a59 c0x0000 (---------------) + I pesaro-urbino + 0x0030790c, // n0x0a5a c0x0000 (---------------) + I pesarourbino + 0x00229107, // n0x0a5b c0x0000 (---------------) + I pescara + 0x002bf182, // n0x0a5c c0x0000 (---------------) + I pg + 0x00200b02, // n0x0a5d c0x0000 (---------------) + I pi + 0x00207788, // n0x0a5e c0x0000 (---------------) + I piacenza + 0x00261808, // n0x0a5f c0x0000 (---------------) + I piedmont + 0x002c1308, // n0x0a60 c0x0000 (---------------) + I piemonte + 0x002cd744, // n0x0a61 c0x0000 (---------------) + I pisa + 0x002c41c7, // n0x0a62 c0x0000 (---------------) + I pistoia + 0x002c7d43, // n0x0a63 c0x0000 (---------------) + I pmn + 0x0023df82, // n0x0a64 c0x0000 (---------------) + I pn + 0x00203f02, // n0x0a65 c0x0000 (---------------) + I po + 0x002c9b49, // n0x0a66 c0x0000 (---------------) + I pordenone + 0x00236a47, // n0x0a67 c0x0000 (---------------) + I potenza + 0x00218242, // n0x0a68 c0x0000 (---------------) + I pr + 0x00346185, // n0x0a69 c0x0000 (---------------) + I prato + 0x00295982, // n0x0a6a c0x0000 (---------------) + I pt + 0x00223542, // n0x0a6b c0x0000 (---------------) + I pu + 0x0025cd83, // n0x0a6c c0x0000 (---------------) + I pug + 0x0025cd86, // n0x0a6d c0x0000 (---------------) + I puglia + 0x002ce242, // n0x0a6e c0x0000 (---------------) + I pv + 0x002ce6c2, // n0x0a6f c0x0000 (---------------) + I pz + 0x00201442, // n0x0a70 c0x0000 (---------------) + I ra + 0x0038c106, // n0x0a71 c0x0000 (---------------) + I ragusa + 0x00201447, // n0x0a72 c0x0000 (---------------) + I ravenna + 0x00227b82, // n0x0a73 c0x0000 (---------------) + I rc + 0x002030c2, // n0x0a74 c0x0000 (---------------) + I re + 0x0023818f, // n0x0a75 c0x0000 (---------------) + I reggio-calabria + 0x0023f80d, // n0x0a76 c0x0000 (---------------) + I reggio-emilia + 0x0025df8e, // n0x0a77 c0x0000 (---------------) + I reggiocalabria + 0x00261c4c, // n0x0a78 c0x0000 (---------------) + I reggioemilia + 0x00204782, // n0x0a79 c0x0000 (---------------) + I rg + 0x00202842, // n0x0a7a c0x0000 (---------------) + I ri + 0x0021ba45, // n0x0a7b c0x0000 (---------------) + I rieti + 0x002e79c6, // n0x0a7c c0x0000 (---------------) + I rimini + 0x002194c2, // n0x0a7d c0x0000 (---------------) + I rm + 0x00202182, // n0x0a7e c0x0000 (---------------) + I rn + 0x00200d82, // n0x0a7f c0x0000 (---------------) + I ro + 0x0023fb84, // n0x0a80 c0x0000 (---------------) + I roma + 0x002e7004, // n0x0a81 c0x0000 (---------------) + I rome + 0x002b1386, // n0x0a82 c0x0000 (---------------) + I rovigo + 0x00201a02, // n0x0a83 c0x0000 (---------------) + I sa + 0x002671c7, // n0x0a84 c0x0000 (---------------) + I salerno + 0x00218483, // n0x0a85 c0x0000 (---------------) + I sar + 0x0021c048, // n0x0a86 c0x0000 (---------------) + I sardegna + 0x0021ca08, // n0x0a87 c0x0000 (---------------) + I sardinia + 0x00369a07, // n0x0a88 c0x0000 (---------------) + I sassari + 0x00232a46, // n0x0a89 c0x0000 (---------------) + I savona + 0x00209182, // n0x0a8a c0x0000 (---------------) + I si + 0x00215803, // n0x0a8b c0x0000 (---------------) + I sic + 0x00215807, // n0x0a8c c0x0000 (---------------) + I sicilia + 0x0026b646, // n0x0a8d c0x0000 (---------------) + I sicily + 0x002b77c5, // n0x0a8e c0x0000 (---------------) + I siena + 0x002d0d88, // n0x0a8f c0x0000 (---------------) + I siracusa + 0x00201102, // n0x0a90 c0x0000 (---------------) + I so + 0x0033ca87, // n0x0a91 c0x0000 (---------------) + I sondrio + 0x002101c2, // n0x0a92 c0x0000 (---------------) + I sp + 0x002ceec2, // n0x0a93 c0x0000 (---------------) + I sr + 0x00211f02, // n0x0a94 c0x0000 (---------------) + I ss + 0x002b5a09, // n0x0a95 c0x0000 (---------------) + I suedtirol + 0x0021d0c2, // n0x0a96 c0x0000 (---------------) + I sv + 0x00200142, // n0x0a97 c0x0000 (---------------) + I ta + 0x00229383, // n0x0a98 c0x0000 (---------------) + I taa + 0x0036ec87, // n0x0a99 c0x0000 (---------------) + I taranto + 0x00203202, // n0x0a9a c0x0000 (---------------) + I te + 0x0022858c, // n0x0a9b c0x0000 (---------------) + I tempio-olbia + 0x0022888b, // n0x0a9c c0x0000 (---------------) + I tempioolbia + 0x0029f746, // n0x0a9d c0x0000 (---------------) + I teramo + 0x002f9245, // n0x0a9e c0x0000 (---------------) + I terni + 0x0021d1c2, // n0x0a9f c0x0000 (---------------) + I tn + 0x00201682, // n0x0aa0 c0x0000 (---------------) + I to + 0x0029f3c6, // n0x0aa1 c0x0000 (---------------) + I torino + 0x002520c3, // n0x0aa2 c0x0000 (---------------) + I tos + 0x003096c7, // n0x0aa3 c0x0000 (---------------) + I toscana + 0x00285142, // n0x0aa4 c0x0000 (---------------) + I tp + 0x00202402, // n0x0aa5 c0x0000 (---------------) + I tr + 0x00264a15, // n0x0aa6 c0x0000 (---------------) + I trani-andria-barletta + 0x0022fc55, // n0x0aa7 c0x0000 (---------------) + I trani-barletta-andria + 0x0026bb53, // n0x0aa8 c0x0000 (---------------) + I traniandriabarletta + 0x00230193, // n0x0aa9 c0x0000 (---------------) + I tranibarlettaandria + 0x0027cbc7, // n0x0aaa c0x0000 (---------------) + I trapani + 0x0028cbc8, // n0x0aab c0x0000 (---------------) + I trentino + 0x0028cbd0, // n0x0aac c0x0000 (---------------) + I trentino-a-adige + 0x0029458f, // n0x0aad c0x0000 (---------------) + I trentino-aadige + 0x002d3213, // n0x0aae c0x0000 (---------------) + I trentino-alto-adige + 0x00306652, // n0x0aaf c0x0000 (---------------) + I trentino-altoadige + 0x0033be90, // n0x0ab0 c0x0000 (---------------) + I trentino-s-tirol + 0x0033ac8f, // n0x0ab1 c0x0000 (---------------) + I trentino-stirol + 0x0038cd52, // n0x0ab2 c0x0000 (---------------) + I trentino-sud-tirol + 0x002a4691, // n0x0ab3 c0x0000 (---------------) + I trentino-sudtirol + 0x002acf53, // n0x0ab4 c0x0000 (---------------) + I trentino-sued-tirol + 0x002b57d2, // n0x0ab5 c0x0000 (---------------) + I trentino-suedtirol + 0x002bc54f, // n0x0ab6 c0x0000 (---------------) + I trentinoa-adige + 0x002bd2ce, // n0x0ab7 c0x0000 (---------------) + I trentinoaadige + 0x002dccd2, // n0x0ab8 c0x0000 (---------------) + I trentinoalto-adige + 0x002c0b91, // n0x0ab9 c0x0000 (---------------) + I trentinoaltoadige + 0x002cb48f, // n0x0aba c0x0000 (---------------) + I trentinos-tirol + 0x002cc70e, // n0x0abb c0x0000 (---------------) + I trentinostirol + 0x002cda91, // n0x0abc c0x0000 (---------------) + I trentinosud-tirol + 0x002ce2d0, // n0x0abd c0x0000 (---------------) + I trentinosudtirol + 0x002cea52, // n0x0abe c0x0000 (---------------) + I trentinosued-tirol + 0x002dde91, // n0x0abf c0x0000 (---------------) + I trentinosuedtirol + 0x002d0946, // n0x0ac0 c0x0000 (---------------) + I trento + 0x002de4c7, // n0x0ac1 c0x0000 (---------------) + I treviso + 0x00354007, // n0x0ac2 c0x0000 (---------------) + I trieste + 0x00203a02, // n0x0ac3 c0x0000 (---------------) + I ts + 0x002af6c5, // n0x0ac4 c0x0000 (---------------) + I turin + 0x002d8c87, // n0x0ac5 c0x0000 (---------------) + I tuscany + 0x0020bf42, // n0x0ac6 c0x0000 (---------------) + I tv + 0x002070c2, // n0x0ac7 c0x0000 (---------------) + I ud + 0x00319045, // n0x0ac8 c0x0000 (---------------) + I udine + 0x00215fc3, // n0x0ac9 c0x0000 (---------------) + I umb + 0x002570c6, // n0x0aca c0x0000 (---------------) + I umbria + 0x0030774d, // n0x0acb c0x0000 (---------------) + I urbino-pesaro + 0x00307a8c, // n0x0acc c0x0000 (---------------) + I urbinopesaro + 0x002013c2, // n0x0acd c0x0000 (---------------) + I va + 0x00341c8b, // n0x0ace c0x0000 (---------------) + I val-d-aosta + 0x0029a30a, // n0x0acf c0x0000 (---------------) + I val-daosta + 0x0030ae0a, // n0x0ad0 c0x0000 (---------------) + I vald-aosta + 0x0029e789, // n0x0ad1 c0x0000 (---------------) + I valdaosta + 0x0029c80b, // n0x0ad2 c0x0000 (---------------) + I valle-aosta + 0x002c904d, // n0x0ad3 c0x0000 (---------------) + I valle-d-aosta + 0x002d958c, // n0x0ad4 c0x0000 (---------------) + I valle-daosta + 0x0021860a, // n0x0ad5 c0x0000 (---------------) + I valleaosta + 0x00244e8c, // n0x0ad6 c0x0000 (---------------) + I valled-aosta + 0x0024cc8b, // n0x0ad7 c0x0000 (---------------) + I valledaosta + 0x002584cc, // n0x0ad8 c0x0000 (---------------) + I vallee-aoste + 0x0025be8b, // n0x0ad9 c0x0000 (---------------) + I valleeaoste + 0x00263943, // n0x0ada c0x0000 (---------------) + I vao + 0x00278f86, // n0x0adb c0x0000 (---------------) + I varese + 0x0030b742, // n0x0adc c0x0000 (---------------) + I vb + 0x0034a542, // n0x0add c0x0000 (---------------) + I vc + 0x00211603, // n0x0ade c0x0000 (---------------) + I vda + 0x002014c2, // n0x0adf c0x0000 (---------------) + I ve + 0x002014c3, // n0x0ae0 c0x0000 (---------------) + I ven + 0x003546c6, // n0x0ae1 c0x0000 (---------------) + I veneto + 0x0024a307, // n0x0ae2 c0x0000 (---------------) + I venezia + 0x0025c506, // n0x0ae3 c0x0000 (---------------) + I venice + 0x00220fc8, // n0x0ae4 c0x0000 (---------------) + I verbania + 0x002a5e88, // n0x0ae5 c0x0000 (---------------) + I vercelli + 0x00248346, // n0x0ae6 c0x0000 (---------------) + I verona + 0x00213602, // n0x0ae7 c0x0000 (---------------) + I vi + 0x002db38d, // n0x0ae8 c0x0000 (---------------) + I vibo-valentia + 0x002db6cc, // n0x0ae9 c0x0000 (---------------) + I vibovalentia + 0x00357a87, // n0x0aea c0x0000 (---------------) + I vicenza + 0x002de2c7, // n0x0aeb c0x0000 (---------------) + I viterbo + 0x00211782, // n0x0aec c0x0000 (---------------) + I vr + 0x0020bf82, // n0x0aed c0x0000 (---------------) + I vs + 0x0021e302, // n0x0aee c0x0000 (---------------) + I vt + 0x0024ebc2, // n0x0aef c0x0000 (---------------) + I vv + 0x00200742, // n0x0af0 c0x0000 (---------------) + I co + 0x002170c3, // n0x0af1 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x0af2 c0x0000 (---------------) + I org + 0x00222ac3, // n0x0af3 c0x0000 (---------------) + I com + 0x002d75c3, // n0x0af4 c0x0000 (---------------) + I edu + 0x0021e283, // n0x0af5 c0x0000 (---------------) + I gov + 0x0023fa03, // n0x0af6 c0x0000 (---------------) + I mil + 0x00298944, // n0x0af7 c0x0000 (---------------) + I name + 0x002170c3, // n0x0af8 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x0af9 c0x0000 (---------------) + I org + 0x00206103, // n0x0afa c0x0000 (---------------) + I sch + 0x00201e82, // n0x0afb c0x0000 (---------------) + I ac + 0x00210a02, // n0x0afc c0x0000 (---------------) + I ad + 0x1fa34b45, // n0x0afd c0x007e (n0x0b6a-n0x0b9e) + I aichi + 0x1fe06e05, // n0x0afe c0x007f (n0x0b9e-n0x0bba) + I akita + 0x20387ac6, // n0x0aff c0x0080 (n0x0bba-n0x0bd0) + I aomori + 0x000e4188, // n0x0b00 c0x0000 (---------------) + blogspot + 0x206a0745, // n0x0b01 c0x0081 (n0x0bd0-n0x0c0a) + I chiba + 0x00200742, // n0x0b02 c0x0000 (---------------) + I co + 0x00203fc2, // n0x0b03 c0x0000 (---------------) + I ed + 0x20b1c945, // n0x0b04 c0x0082 (n0x0c0a-n0x0c20) + I ehime + 0x20e6b7c5, // n0x0b05 c0x0083 (n0x0c20-n0x0c2f) + I fukui + 0x2126c587, // n0x0b06 c0x0084 (n0x0c2f-n0x0c6e) + I fukuoka + 0x2166d5c9, // n0x0b07 c0x0085 (n0x0c6e-n0x0ca1) + I fukushima + 0x21a89804, // n0x0b08 c0x0086 (n0x0ca1-n0x0cc7) + I gifu + 0x00202342, // n0x0b09 c0x0000 (---------------) + I go + 0x0020dc82, // n0x0b0a c0x0000 (---------------) + I gr + 0x21e36f85, // n0x0b0b c0x0087 (n0x0cc7-n0x0ceb) + I gunma + 0x2227c649, // n0x0b0c c0x0088 (n0x0ceb-n0x0d04) + I hiroshima + 0x2275fc48, // n0x0b0d c0x0089 (n0x0d04-n0x0d92) + I hokkaido + 0x22a96e45, // n0x0b0e c0x008a (n0x0d92-n0x0dc0) + I hyogo + 0x22f16ac7, // n0x0b0f c0x008b (n0x0dc0-n0x0df3) + I ibaraki + 0x23214e48, // n0x0b10 c0x008c (n0x0df3-n0x0e06) + I ishikawa + 0x236c3b85, // n0x0b11 c0x008d (n0x0e06-n0x0e28) + I iwate + 0x23a81f06, // n0x0b12 c0x008e (n0x0e28-n0x0e37) + I kagawa + 0x23e71789, // n0x0b13 c0x008f (n0x0e37-n0x0e4b) + I kagoshima + 0x24332808, // n0x0b14 c0x0090 (n0x0e4b-n0x0e69) + I kanagawa + 0x246a8748, // n0x0b15 c0x0091 (n0x0e69-n0x0e6a)* o I kawasaki + 0x24a8294a, // n0x0b16 c0x0092 (n0x0e6a-n0x0e6b)* o I kitakyushu + 0x24e43484, // n0x0b17 c0x0093 (n0x0e6b-n0x0e6c)* o I kobe + 0x252ba445, // n0x0b18 c0x0094 (n0x0e6c-n0x0e8b) + I kochi + 0x256a2d48, // n0x0b19 c0x0095 (n0x0e8b-n0x0ea5) + I kumamoto + 0x25aacb05, // n0x0b1a c0x0096 (n0x0ea5-n0x0ec4) + I kyoto + 0x0020e4c2, // n0x0b1b c0x0000 (---------------) + I lg + 0x25e28143, // n0x0b1c c0x0097 (n0x0ec4-n0x0ee2) + I mie + 0x2628d746, // n0x0b1d c0x0098 (n0x0ee2-n0x0f03) + I miyagi + 0x26771208, // n0x0b1e c0x0099 (n0x0f03-n0x0f1e) + I miyazaki + 0x26b4e4c6, // n0x0b1f c0x009a (n0x0f1e-n0x0f69) + I nagano + 0x26f6c108, // n0x0b20 c0x009b (n0x0f69-n0x0f7f) + I nagasaki + 0x2732b206, // n0x0b21 c0x009c (n0x0f7f-n0x0f80)* o I nagoya + 0x276da0c4, // n0x0b22 c0x009d (n0x0f80-n0x0fa6) + I nara + 0x00201082, // n0x0b23 c0x0000 (---------------) + I ne + 0x27a5f907, // n0x0b24 c0x009e (n0x0fa6-n0x0fc8) + I niigata + 0x27e98d04, // n0x0b25 c0x009f (n0x0fc8-n0x0fdb) + I oita + 0x28266607, // n0x0b26 c0x00a0 (n0x0fdb-n0x0ff5) + I okayama + 0x287954c7, // n0x0b27 c0x00a1 (n0x0ff5-n0x101f) + I okinawa + 0x00200c42, // n0x0b28 c0x0000 (---------------) + I or + 0x28a864c5, // n0x0b29 c0x00a2 (n0x101f-n0x1051) + I osaka + 0x28e75104, // n0x0b2a c0x00a3 (n0x1051-n0x106b) + I saga + 0x292d0f07, // n0x0b2b c0x00a4 (n0x106b-n0x10b0) + I saitama + 0x296129c7, // n0x0b2c c0x00a5 (n0x10b0-n0x10b1)* o I sapporo + 0x29a6f286, // n0x0b2d c0x00a6 (n0x10b1-n0x10b2)* o I sendai + 0x29e525c5, // n0x0b2e c0x00a7 (n0x10b2-n0x10c9) + I shiga + 0x2a281087, // n0x0b2f c0x00a8 (n0x10c9-n0x10e0) + I shimane + 0x2a6b4b88, // n0x0b30 c0x00a9 (n0x10e0-n0x1104) + I shizuoka + 0x2ab0f5c7, // n0x0b31 c0x00aa (n0x1104-n0x1123) + I tochigi + 0x2ae8e2c9, // n0x0b32 c0x00ab (n0x1123-n0x1134) + I tokushima + 0x2b316545, // n0x0b33 c0x00ac (n0x1134-n0x116d) + I tokyo + 0x2b6e2787, // n0x0b34 c0x00ad (n0x116d-n0x117a) + I tottori + 0x2ba7c346, // n0x0b35 c0x00ae (n0x117a-n0x1192) + I toyama + 0x2bf2dc48, // n0x0b36 c0x00af (n0x1192-n0x11af) + I wakayama + 0x0022528d, // n0x0b37 c0x0000 (---------------) + I xn--0trq7p7nn + 0x00260809, // n0x0b38 c0x0000 (---------------) + I xn--1ctwo + 0x0026d84b, // n0x0b39 c0x0000 (---------------) + I xn--1lqs03n + 0x002910cb, // n0x0b3a c0x0000 (---------------) + I xn--1lqs71d + 0x002b1bcb, // n0x0b3b c0x0000 (---------------) + I xn--2m4a15e + 0x002d6b8b, // n0x0b3c c0x0000 (---------------) + I xn--32vp30h + 0x002e7bcb, // n0x0b3d c0x0000 (---------------) + I xn--4it168d + 0x002e7e8b, // n0x0b3e c0x0000 (---------------) + I xn--4it797k + 0x002e8689, // n0x0b3f c0x0000 (---------------) + I xn--4pvxs + 0x002ea38b, // n0x0b40 c0x0000 (---------------) + I xn--5js045d + 0x002ea64b, // n0x0b41 c0x0000 (---------------) + I xn--5rtp49c + 0x002ead0b, // n0x0b42 c0x0000 (---------------) + I xn--5rtq34k + 0x002eb3ca, // n0x0b43 c0x0000 (---------------) + I xn--6btw5a + 0x002eb90a, // n0x0b44 c0x0000 (---------------) + I xn--6orx2r + 0x002ebf0c, // n0x0b45 c0x0000 (---------------) + I xn--7t0a264c + 0x002f16cb, // n0x0b46 c0x0000 (---------------) + I xn--8ltr62k + 0x002f200a, // n0x0b47 c0x0000 (---------------) + I xn--8pvr4u + 0x0030374a, // n0x0b48 c0x0000 (---------------) + I xn--c3s14m + 0x0031334e, // n0x0b49 c0x0000 (---------------) + I xn--d5qv7z876c + 0x00313d4e, // n0x0b4a c0x0000 (---------------) + I xn--djrs72d6uy + 0x003140ca, // n0x0b4b c0x0000 (---------------) + I xn--djty4k + 0x003153ca, // n0x0b4c c0x0000 (---------------) + I xn--efvn9s + 0x0031700b, // n0x0b4d c0x0000 (---------------) + I xn--ehqz56n + 0x003172cb, // n0x0b4e c0x0000 (---------------) + I xn--elqq16h + 0x00317fcb, // n0x0b4f c0x0000 (---------------) + I xn--f6qx53a + 0x00333bcb, // n0x0b50 c0x0000 (---------------) + I xn--k7yn95e + 0x003341ca, // n0x0b51 c0x0000 (---------------) + I xn--kbrq7o + 0x00334e8b, // n0x0b52 c0x0000 (---------------) + I xn--klt787d + 0x0033514a, // n0x0b53 c0x0000 (---------------) + I xn--kltp7d + 0x003353ca, // n0x0b54 c0x0000 (---------------) + I xn--kltx9a + 0x0033564a, // n0x0b55 c0x0000 (---------------) + I xn--klty5x + 0x00357c4b, // n0x0b56 c0x0000 (---------------) + I xn--mkru45i + 0x0035d08b, // n0x0b57 c0x0000 (---------------) + I xn--nit225k + 0x0036094e, // n0x0b58 c0x0000 (---------------) + I xn--ntso0iqx3a + 0x00360ccb, // n0x0b59 c0x0000 (---------------) + I xn--ntsq17g + 0x003687cb, // n0x0b5a c0x0000 (---------------) + I xn--pssu33l + 0x0036a44b, // n0x0b5b c0x0000 (---------------) + I xn--qqqt11m + 0x003725ca, // n0x0b5c c0x0000 (---------------) + I xn--rht27z + 0x00372849, // n0x0b5d c0x0000 (---------------) + I xn--rht3d + 0x00372a8a, // n0x0b5e c0x0000 (---------------) + I xn--rht61e + 0x0037410a, // n0x0b5f c0x0000 (---------------) + I xn--rny31h + 0x0038548b, // n0x0b60 c0x0000 (---------------) + I xn--tor131o + 0x00386f0b, // n0x0b61 c0x0000 (---------------) + I xn--uist22h + 0x0038744a, // n0x0b62 c0x0000 (---------------) + I xn--uisz3g + 0x0038824b, // n0x0b63 c0x0000 (---------------) + I xn--uuwu58a + 0x0038d1cb, // n0x0b64 c0x0000 (---------------) + I xn--vgu402c + 0x00395d8b, // n0x0b65 c0x0000 (---------------) + I xn--zbx025d + 0x2c26dc08, // n0x0b66 c0x00b0 (n0x11af-n0x11d1) + I yamagata + 0x2c676089, // n0x0b67 c0x00b1 (n0x11d1-n0x11e1) + I yamaguchi + 0x2ca9b889, // n0x0b68 c0x00b2 (n0x11e1-n0x11fd) + I yamanashi + 0x2cf2b5c8, // n0x0b69 c0x00b3 (n0x11fd-n0x11fe)* o I yokohama + 0x002a2345, // n0x0b6a c0x0000 (---------------) + I aisai + 0x002068c3, // n0x0b6b c0x0000 (---------------) + I ama + 0x00203884, // n0x0b6c c0x0000 (---------------) + I anjo + 0x0034cb85, // n0x0b6d c0x0000 (---------------) + I asuke + 0x00289546, // n0x0b6e c0x0000 (---------------) + I chiryu + 0x002932c5, // n0x0b6f c0x0000 (---------------) + I chita + 0x00274d84, // n0x0b70 c0x0000 (---------------) + I fuso + 0x002472c8, // n0x0b71 c0x0000 (---------------) + I gamagori + 0x00242045, // n0x0b72 c0x0000 (---------------) + I handa + 0x0027ed44, // n0x0b73 c0x0000 (---------------) + I hazu + 0x002b2387, // n0x0b74 c0x0000 (---------------) + I hekinan + 0x00288d0a, // n0x0b75 c0x0000 (---------------) + I higashiura + 0x002a580a, // n0x0b76 c0x0000 (---------------) + I ichinomiya + 0x0031f587, // n0x0b77 c0x0000 (---------------) + I inazawa + 0x002067c7, // n0x0b78 c0x0000 (---------------) + I inuyama + 0x002d6707, // n0x0b79 c0x0000 (---------------) + I isshiki + 0x00248887, // n0x0b7a c0x0000 (---------------) + I iwakura + 0x00203cc5, // n0x0b7b c0x0000 (---------------) + I kanie + 0x00311ec6, // n0x0b7c c0x0000 (---------------) + I kariya + 0x0027cf47, // n0x0b7d c0x0000 (---------------) + I kasugai + 0x0025a504, // n0x0b7e c0x0000 (---------------) + I kira + 0x002fe646, // n0x0b7f c0x0000 (---------------) + I kiyosu + 0x002977c6, // n0x0b80 c0x0000 (---------------) + I komaki + 0x00204cc5, // n0x0b81 c0x0000 (---------------) + I konan + 0x0024c9c4, // n0x0b82 c0x0000 (---------------) + I kota + 0x00292a46, // n0x0b83 c0x0000 (---------------) + I mihama + 0x00288107, // n0x0b84 c0x0000 (---------------) + I miyoshi + 0x002209c6, // n0x0b85 c0x0000 (---------------) + I nishio + 0x002e6987, // n0x0b86 c0x0000 (---------------) + I nisshin + 0x0027e743, // n0x0b87 c0x0000 (---------------) + I obu + 0x0023be46, // n0x0b88 c0x0000 (---------------) + I oguchi + 0x0024f085, // n0x0b89 c0x0000 (---------------) + I oharu + 0x0026c687, // n0x0b8a c0x0000 (---------------) + I okazaki + 0x002ad48a, // n0x0b8b c0x0000 (---------------) + I owariasahi + 0x002e9604, // n0x0b8c c0x0000 (---------------) + I seto + 0x00213308, // n0x0b8d c0x0000 (---------------) + I shikatsu + 0x0028d309, // n0x0b8e c0x0000 (---------------) + I shinshiro + 0x002a4307, // n0x0b8f c0x0000 (---------------) + I shitara + 0x0026a586, // n0x0b90 c0x0000 (---------------) + I tahara + 0x00248648, // n0x0b91 c0x0000 (---------------) + I takahama + 0x0030be89, // n0x0b92 c0x0000 (---------------) + I tobishima + 0x00366704, // n0x0b93 c0x0000 (---------------) + I toei + 0x0025d104, // n0x0b94 c0x0000 (---------------) + I togo + 0x002df1c5, // n0x0b95 c0x0000 (---------------) + I tokai + 0x00376b08, // n0x0b96 c0x0000 (---------------) + I tokoname + 0x002aeb47, // n0x0b97 c0x0000 (---------------) + I toyoake + 0x00286c89, // n0x0b98 c0x0000 (---------------) + I toyohashi + 0x00322bc8, // n0x0b99 c0x0000 (---------------) + I toyokawa + 0x00221246, // n0x0b9a c0x0000 (---------------) + I toyone + 0x00338906, // n0x0b9b c0x0000 (---------------) + I toyota + 0x00283308, // n0x0b9c c0x0000 (---------------) + I tsushima + 0x00359906, // n0x0b9d c0x0000 (---------------) + I yatomi + 0x00206e05, // n0x0b9e c0x0000 (---------------) + I akita + 0x0026f346, // n0x0b9f c0x0000 (---------------) + I daisen + 0x00266b48, // n0x0ba0 c0x0000 (---------------) + I fujisato + 0x002dc286, // n0x0ba1 c0x0000 (---------------) + I gojome + 0x0024608b, // n0x0ba2 c0x0000 (---------------) + I hachirogata + 0x00279ac6, // n0x0ba3 c0x0000 (---------------) + I happou + 0x00284b0d, // n0x0ba4 c0x0000 (---------------) + I higashinaruse + 0x002028c5, // n0x0ba5 c0x0000 (---------------) + I honjo + 0x00292e46, // n0x0ba6 c0x0000 (---------------) + I honjyo + 0x00214f05, // n0x0ba7 c0x0000 (---------------) + I ikawa + 0x00281889, // n0x0ba8 c0x0000 (---------------) + I kamikoani + 0x0022ea47, // n0x0ba9 c0x0000 (---------------) + I kamioka + 0x0033ecc8, // n0x0baa c0x0000 (---------------) + I katagami + 0x0022bc46, // n0x0bab c0x0000 (---------------) + I kazuno + 0x00283c89, // n0x0bac c0x0000 (---------------) + I kitaakita + 0x0036b9c6, // n0x0bad c0x0000 (---------------) + I kosaka + 0x002ad405, // n0x0bae c0x0000 (---------------) + I kyowa + 0x0028b246, // n0x0baf c0x0000 (---------------) + I misato + 0x002bf786, // n0x0bb0 c0x0000 (---------------) + I mitane + 0x002b5c49, // n0x0bb1 c0x0000 (---------------) + I moriyoshi + 0x00226806, // n0x0bb2 c0x0000 (---------------) + I nikaho + 0x0023a007, // n0x0bb3 c0x0000 (---------------) + I noshiro + 0x0027bd85, // n0x0bb4 c0x0000 (---------------) + I odate + 0x00204043, // n0x0bb5 c0x0000 (---------------) + I oga + 0x00246205, // n0x0bb6 c0x0000 (---------------) + I ogata + 0x00293cc7, // n0x0bb7 c0x0000 (---------------) + I semboku + 0x0023ed06, // n0x0bb8 c0x0000 (---------------) + I yokote + 0x002027c9, // n0x0bb9 c0x0000 (---------------) + I yurihonjo + 0x00387ac6, // n0x0bba c0x0000 (---------------) + I aomori + 0x0026f586, // n0x0bbb c0x0000 (---------------) + I gonohe + 0x0022e109, // n0x0bbc c0x0000 (---------------) + I hachinohe + 0x0026ed49, // n0x0bbd c0x0000 (---------------) + I hashikami + 0x0028bc07, // n0x0bbe c0x0000 (---------------) + I hiranai + 0x00305a08, // n0x0bbf c0x0000 (---------------) + I hirosaki + 0x00273909, // n0x0bc0 c0x0000 (---------------) + I itayanagi + 0x0026ce88, // n0x0bc1 c0x0000 (---------------) + I kuroishi + 0x0035c906, // n0x0bc2 c0x0000 (---------------) + I misawa + 0x002bda45, // n0x0bc3 c0x0000 (---------------) + I mutsu + 0x0021638a, // n0x0bc4 c0x0000 (---------------) + I nakadomari + 0x0026f606, // n0x0bc5 c0x0000 (---------------) + I noheji + 0x0035e1c6, // n0x0bc6 c0x0000 (---------------) + I oirase + 0x0028d905, // n0x0bc7 c0x0000 (---------------) + I owani + 0x00205c88, // n0x0bc8 c0x0000 (---------------) + I rokunohe + 0x002092c7, // n0x0bc9 c0x0000 (---------------) + I sannohe + 0x0029ba0a, // n0x0bca c0x0000 (---------------) + I shichinohe + 0x00238986, // n0x0bcb c0x0000 (---------------) + I shingo + 0x0024cec5, // n0x0bcc c0x0000 (---------------) + I takko + 0x0023e846, // n0x0bcd c0x0000 (---------------) + I towada + 0x002e1a47, // n0x0bce c0x0000 (---------------) + I tsugaru + 0x0026a447, // n0x0bcf c0x0000 (---------------) + I tsuruta + 0x002041c5, // n0x0bd0 c0x0000 (---------------) + I abiko + 0x002ad5c5, // n0x0bd1 c0x0000 (---------------) + I asahi + 0x002c6646, // n0x0bd2 c0x0000 (---------------) + I chonan + 0x002c6e86, // n0x0bd3 c0x0000 (---------------) + I chosei + 0x0034a586, // n0x0bd4 c0x0000 (---------------) + I choshi + 0x002be904, // n0x0bd5 c0x0000 (---------------) + I chuo + 0x0026e2c9, // n0x0bd6 c0x0000 (---------------) + I funabashi + 0x00276f46, // n0x0bd7 c0x0000 (---------------) + I futtsu + 0x002d6f4a, // n0x0bd8 c0x0000 (---------------) + I hanamigawa + 0x0027df48, // n0x0bd9 c0x0000 (---------------) + I ichihara + 0x00312b88, // n0x0bda c0x0000 (---------------) + I ichikawa + 0x002a580a, // n0x0bdb c0x0000 (---------------) + I ichinomiya + 0x00329b05, // n0x0bdc c0x0000 (---------------) + I inzai + 0x00288045, // n0x0bdd c0x0000 (---------------) + I isumi + 0x00269408, // n0x0bde c0x0000 (---------------) + I kamagaya + 0x002ba1c8, // n0x0bdf c0x0000 (---------------) + I kamogawa + 0x00375c87, // n0x0be0 c0x0000 (---------------) + I kashiwa + 0x00335e46, // n0x0be1 c0x0000 (---------------) + I katori + 0x002fd408, // n0x0be2 c0x0000 (---------------) + I katsuura + 0x0027d607, // n0x0be3 c0x0000 (---------------) + I kimitsu + 0x0026c7c8, // n0x0be4 c0x0000 (---------------) + I kisarazu + 0x0035d306, // n0x0be5 c0x0000 (---------------) + I kozaki + 0x0026ff48, // n0x0be6 c0x0000 (---------------) + I kujukuri + 0x0027e846, // n0x0be7 c0x0000 (---------------) + I kyonan + 0x0023b8c7, // n0x0be8 c0x0000 (---------------) + I matsudo + 0x002846c6, // n0x0be9 c0x0000 (---------------) + I midori + 0x00292a46, // n0x0bea c0x0000 (---------------) + I mihama + 0x00213fca, // n0x0beb c0x0000 (---------------) + I minamiboso + 0x00223146, // n0x0bec c0x0000 (---------------) + I mobara + 0x002bda49, // n0x0bed c0x0000 (---------------) + I mutsuzawa + 0x0032a1c6, // n0x0bee c0x0000 (---------------) + I nagara + 0x002a0a8a, // n0x0bef c0x0000 (---------------) + I nagareyama + 0x002da0c9, // n0x0bf0 c0x0000 (---------------) + I narashino + 0x0025ac46, // n0x0bf1 c0x0000 (---------------) + I narita + 0x00384f84, // n0x0bf2 c0x0000 (---------------) + I noda + 0x0030bbcd, // n0x0bf3 c0x0000 (---------------) + I oamishirasato + 0x00276307, // n0x0bf4 c0x0000 (---------------) + I omigawa + 0x002fffc6, // n0x0bf5 c0x0000 (---------------) + I onjuku + 0x002a8605, // n0x0bf6 c0x0000 (---------------) + I otaki + 0x0036ba45, // n0x0bf7 c0x0000 (---------------) + I sakae + 0x002f4d46, // n0x0bf8 c0x0000 (---------------) + I sakura + 0x002a73c9, // n0x0bf9 c0x0000 (---------------) + I shimofusa + 0x002b9447, // n0x0bfa c0x0000 (---------------) + I shirako + 0x00268406, // n0x0bfb c0x0000 (---------------) + I shiroi + 0x0029fd06, // n0x0bfc c0x0000 (---------------) + I shisui + 0x00274e09, // n0x0bfd c0x0000 (---------------) + I sodegaura + 0x0022a904, // n0x0bfe c0x0000 (---------------) + I sosa + 0x0021f304, // n0x0bff c0x0000 (---------------) + I tako + 0x0038a708, // n0x0c00 c0x0000 (---------------) + I tateyama + 0x002e5fc6, // n0x0c01 c0x0000 (---------------) + I togane + 0x00340e08, // n0x0c02 c0x0000 (---------------) + I tohnosho + 0x003163c8, // n0x0c03 c0x0000 (---------------) + I tomisato + 0x00206f47, // n0x0c04 c0x0000 (---------------) + I urayasu + 0x00382d89, // n0x0c05 c0x0000 (---------------) + I yachimata + 0x002e38c7, // n0x0c06 c0x0000 (---------------) + I yachiyo + 0x002a060a, // n0x0c07 c0x0000 (---------------) + I yokaichiba + 0x00315d8f, // n0x0c08 c0x0000 (---------------) + I yokoshibahikari + 0x002394ca, // n0x0c09 c0x0000 (---------------) + I yotsukaido + 0x0021b845, // n0x0c0a c0x0000 (---------------) + I ainan + 0x00266d85, // n0x0c0b c0x0000 (---------------) + I honai + 0x0020f345, // n0x0c0c c0x0000 (---------------) + I ikata + 0x0027eac7, // n0x0c0d c0x0000 (---------------) + I imabari + 0x00253503, // n0x0c0e c0x0000 (---------------) + I iyo + 0x00305c08, // n0x0c0f c0x0000 (---------------) + I kamijima + 0x0037aec6, // n0x0c10 c0x0000 (---------------) + I kihoku + 0x0037afc9, // n0x0c11 c0x0000 (---------------) + I kumakogen + 0x002e5286, // n0x0c12 c0x0000 (---------------) + I masaki + 0x00392e07, // n0x0c13 c0x0000 (---------------) + I matsuno + 0x00283a49, // n0x0c14 c0x0000 (---------------) + I matsuyama + 0x0033ebc8, // n0x0c15 c0x0000 (---------------) + I namikata + 0x0028d9c7, // n0x0c16 c0x0000 (---------------) + I niihama + 0x002e3dc3, // n0x0c17 c0x0000 (---------------) + I ozu + 0x002a23c5, // n0x0c18 c0x0000 (---------------) + I saijo + 0x00315cc5, // n0x0c19 c0x0000 (---------------) + I seiyo + 0x002be74b, // n0x0c1a c0x0000 (---------------) + I shikokuchuo + 0x002acbc4, // n0x0c1b c0x0000 (---------------) + I tobe + 0x002d1504, // n0x0c1c c0x0000 (---------------) + I toon + 0x00265706, // n0x0c1d c0x0000 (---------------) + I uchiko + 0x002e4647, // n0x0c1e c0x0000 (---------------) + I uwajima + 0x003804ca, // n0x0c1f c0x0000 (---------------) + I yawatahama + 0x002281c7, // n0x0c20 c0x0000 (---------------) + I echizen + 0x00366787, // n0x0c21 c0x0000 (---------------) + I eiheiji + 0x0026b7c5, // n0x0c22 c0x0000 (---------------) + I fukui + 0x00390e45, // n0x0c23 c0x0000 (---------------) + I ikeda + 0x00260149, // n0x0c24 c0x0000 (---------------) + I katsuyama + 0x00292a46, // n0x0c25 c0x0000 (---------------) + I mihama + 0x0022804d, // n0x0c26 c0x0000 (---------------) + I minamiechizen + 0x00395885, // n0x0c27 c0x0000 (---------------) + I obama + 0x00285a43, // n0x0c28 c0x0000 (---------------) + I ohi + 0x00207e83, // n0x0c29 c0x0000 (---------------) + I ono + 0x003404c5, // n0x0c2a c0x0000 (---------------) + I sabae + 0x003396c5, // n0x0c2b c0x0000 (---------------) + I sakai + 0x00248648, // n0x0c2c c0x0000 (---------------) + I takahama + 0x00277007, // n0x0c2d c0x0000 (---------------) + I tsuruga + 0x0021f086, // n0x0c2e c0x0000 (---------------) + I wakasa + 0x00289c86, // n0x0c2f c0x0000 (---------------) + I ashiya + 0x0021de85, // n0x0c30 c0x0000 (---------------) + I buzen + 0x00375807, // n0x0c31 c0x0000 (---------------) + I chikugo + 0x00206a47, // n0x0c32 c0x0000 (---------------) + I chikuho + 0x002a5bc7, // n0x0c33 c0x0000 (---------------) + I chikujo + 0x002ba4ca, // n0x0c34 c0x0000 (---------------) + I chikushino + 0x0023bf08, // n0x0c35 c0x0000 (---------------) + I chikuzen + 0x002be904, // n0x0c36 c0x0000 (---------------) + I chuo + 0x0024ed47, // n0x0c37 c0x0000 (---------------) + I dazaifu + 0x0026ae87, // n0x0c38 c0x0000 (---------------) + I fukuchi + 0x00317546, // n0x0c39 c0x0000 (---------------) + I hakata + 0x002545c7, // n0x0c3a c0x0000 (---------------) + I higashi + 0x002c5b48, // n0x0c3b c0x0000 (---------------) + I hirokawa + 0x0029b788, // n0x0c3c c0x0000 (---------------) + I hisayama + 0x00246d86, // n0x0c3d c0x0000 (---------------) + I iizuka + 0x0023abc8, // n0x0c3e c0x0000 (---------------) + I inatsuki + 0x00226884, // n0x0c3f c0x0000 (---------------) + I kaho + 0x0027cf46, // n0x0c40 c0x0000 (---------------) + I kasuga + 0x0020d3c6, // n0x0c41 c0x0000 (---------------) + I kasuya + 0x003824c6, // n0x0c42 c0x0000 (---------------) + I kawara + 0x00295dc6, // n0x0c43 c0x0000 (---------------) + I keisen + 0x00232c84, // n0x0c44 c0x0000 (---------------) + I koga + 0x00248946, // n0x0c45 c0x0000 (---------------) + I kurate + 0x002a67c6, // n0x0c46 c0x0000 (---------------) + I kurogi + 0x00282e46, // n0x0c47 c0x0000 (---------------) + I kurume + 0x00213fc6, // n0x0c48 c0x0000 (---------------) + I minami + 0x00207d46, // n0x0c49 c0x0000 (---------------) + I miyako + 0x00272346, // n0x0c4a c0x0000 (---------------) + I miyama + 0x0021ef88, // n0x0c4b c0x0000 (---------------) + I miyawaka + 0x002fe4c8, // n0x0c4c c0x0000 (---------------) + I mizumaki + 0x002bb0c8, // n0x0c4d c0x0000 (---------------) + I munakata + 0x002934c8, // n0x0c4e c0x0000 (---------------) + I nakagawa + 0x00269386, // n0x0c4f c0x0000 (---------------) + I nakama + 0x00208b85, // n0x0c50 c0x0000 (---------------) + I nishi + 0x0031b486, // n0x0c51 c0x0000 (---------------) + I nogata + 0x00296ec5, // n0x0c52 c0x0000 (---------------) + I ogori + 0x0035c047, // n0x0c53 c0x0000 (---------------) + I okagaki + 0x0022ecc5, // n0x0c54 c0x0000 (---------------) + I okawa + 0x00201bc3, // n0x0c55 c0x0000 (---------------) + I oki + 0x00200085, // n0x0c56 c0x0000 (---------------) + I omuta + 0x002c7844, // n0x0c57 c0x0000 (---------------) + I onga + 0x00207e85, // n0x0c58 c0x0000 (---------------) + I onojo + 0x0020fdc3, // n0x0c59 c0x0000 (---------------) + I oto + 0x002d47c7, // n0x0c5a c0x0000 (---------------) + I saigawa + 0x00333848, // n0x0c5b c0x0000 (---------------) + I sasaguri + 0x002e6a46, // n0x0c5c c0x0000 (---------------) + I shingu + 0x0029230d, // n0x0c5d c0x0000 (---------------) + I shinyoshitomi + 0x00266d46, // n0x0c5e c0x0000 (---------------) + I shonai + 0x002823c5, // n0x0c5f c0x0000 (---------------) + I soeda + 0x002ad183, // n0x0c60 c0x0000 (---------------) + I sue + 0x002a2189, // n0x0c61 c0x0000 (---------------) + I tachiarai + 0x0020e146, // n0x0c62 c0x0000 (---------------) + I tagawa + 0x002344c6, // n0x0c63 c0x0000 (---------------) + I takata + 0x00253644, // n0x0c64 c0x0000 (---------------) + I toho + 0x00239447, // n0x0c65 c0x0000 (---------------) + I toyotsu + 0x00227786, // n0x0c66 c0x0000 (---------------) + I tsuiki + 0x00289985, // n0x0c67 c0x0000 (---------------) + I ukiha + 0x00209803, // n0x0c68 c0x0000 (---------------) + I umi + 0x00211e04, // n0x0c69 c0x0000 (---------------) + I usui + 0x0026b046, // n0x0c6a c0x0000 (---------------) + I yamada + 0x002d81c4, // n0x0c6b c0x0000 (---------------) + I yame + 0x002f7448, // n0x0c6c c0x0000 (---------------) + I yanagawa + 0x0038f4c9, // n0x0c6d c0x0000 (---------------) + I yukuhashi + 0x0022ef49, // n0x0c6e c0x0000 (---------------) + I aizubange + 0x0028b14a, // n0x0c6f c0x0000 (---------------) + I aizumisato + 0x0023124d, // n0x0c70 c0x0000 (---------------) + I aizuwakamatsu + 0x00240607, // n0x0c71 c0x0000 (---------------) + I asakawa + 0x00255706, // n0x0c72 c0x0000 (---------------) + I bandai + 0x00209004, // n0x0c73 c0x0000 (---------------) + I date + 0x0026d5c9, // n0x0c74 c0x0000 (---------------) + I fukushima + 0x00274788, // n0x0c75 c0x0000 (---------------) + I furudono + 0x00275f06, // n0x0c76 c0x0000 (---------------) + I futaba + 0x002e9cc6, // n0x0c77 c0x0000 (---------------) + I hanawa + 0x002545c7, // n0x0c78 c0x0000 (---------------) + I higashi + 0x0029cdc6, // n0x0c79 c0x0000 (---------------) + I hirata + 0x0021af86, // n0x0c7a c0x0000 (---------------) + I hirono + 0x00329c06, // n0x0c7b c0x0000 (---------------) + I iitate + 0x0039554a, // n0x0c7c c0x0000 (---------------) + I inawashiro + 0x00214e48, // n0x0c7d c0x0000 (---------------) + I ishikawa + 0x002999c5, // n0x0c7e c0x0000 (---------------) + I iwaki + 0x00254a09, // n0x0c7f c0x0000 (---------------) + I izumizaki + 0x002af2ca, // n0x0c80 c0x0000 (---------------) + I kagamiishi + 0x002150c8, // n0x0c81 c0x0000 (---------------) + I kaneyama + 0x00287448, // n0x0c82 c0x0000 (---------------) + I kawamata + 0x002815c8, // n0x0c83 c0x0000 (---------------) + I kitakata + 0x00293e8c, // n0x0c84 c0x0000 (---------------) + I kitashiobara + 0x002c3e45, // n0x0c85 c0x0000 (---------------) + I koori + 0x00289f08, // n0x0c86 c0x0000 (---------------) + I koriyama + 0x002f1d86, // n0x0c87 c0x0000 (---------------) + I kunimi + 0x002d1906, // n0x0c88 c0x0000 (---------------) + I miharu + 0x00392cc7, // n0x0c89 c0x0000 (---------------) + I mishima + 0x002280c5, // n0x0c8a c0x0000 (---------------) + I namie + 0x0026f4c5, // n0x0c8b c0x0000 (---------------) + I nango + 0x0022ee09, // n0x0c8c c0x0000 (---------------) + I nishiaizu + 0x0020bd87, // n0x0c8d c0x0000 (---------------) + I nishigo + 0x0037af85, // n0x0c8e c0x0000 (---------------) + I okuma + 0x0021e147, // n0x0c8f c0x0000 (---------------) + I omotego + 0x00207e83, // n0x0c90 c0x0000 (---------------) + I ono + 0x002af985, // n0x0c91 c0x0000 (---------------) + I otama + 0x00355688, // n0x0c92 c0x0000 (---------------) + I samegawa + 0x0026e047, // n0x0c93 c0x0000 (---------------) + I shimogo + 0x00287309, // n0x0c94 c0x0000 (---------------) + I shirakawa + 0x002b3885, // n0x0c95 c0x0000 (---------------) + I showa + 0x0038e104, // n0x0c96 c0x0000 (---------------) + I soma + 0x0028c348, // n0x0c97 c0x0000 (---------------) + I sukagawa + 0x0025fa47, // n0x0c98 c0x0000 (---------------) + I taishin + 0x0028db88, // n0x0c99 c0x0000 (---------------) + I tamakawa + 0x003109c8, // n0x0c9a c0x0000 (---------------) + I tanagura + 0x0034d1c5, // n0x0c9b c0x0000 (---------------) + I tenei + 0x00389946, // n0x0c9c c0x0000 (---------------) + I yabuki + 0x0022afc6, // n0x0c9d c0x0000 (---------------) + I yamato + 0x00346949, // n0x0c9e c0x0000 (---------------) + I yamatsuri + 0x002fdc47, // n0x0c9f c0x0000 (---------------) + I yanaizu + 0x00297546, // n0x0ca0 c0x0000 (---------------) + I yugawa + 0x00383347, // n0x0ca1 c0x0000 (---------------) + I anpachi + 0x00244403, // n0x0ca2 c0x0000 (---------------) + I ena + 0x00289804, // n0x0ca3 c0x0000 (---------------) + I gifu + 0x003746c5, // n0x0ca4 c0x0000 (---------------) + I ginan + 0x0024efc4, // n0x0ca5 c0x0000 (---------------) + I godo + 0x00232644, // n0x0ca6 c0x0000 (---------------) + I gujo + 0x0023ca07, // n0x0ca7 c0x0000 (---------------) + I hashima + 0x002e5907, // n0x0ca8 c0x0000 (---------------) + I hichiso + 0x002685c4, // n0x0ca9 c0x0000 (---------------) + I hida + 0x00287150, // n0x0caa c0x0000 (---------------) + I higashishirakawa + 0x002f4ec7, // n0x0cab c0x0000 (---------------) + I ibigawa + 0x00390e45, // n0x0cac c0x0000 (---------------) + I ikeda + 0x00271acc, // n0x0cad c0x0000 (---------------) + I kakamigahara + 0x00203cc4, // n0x0cae c0x0000 (---------------) + I kani + 0x0028f7c8, // n0x0caf c0x0000 (---------------) + I kasahara + 0x0023b7c9, // n0x0cb0 c0x0000 (---------------) + I kasamatsu + 0x00298ac6, // n0x0cb1 c0x0000 (---------------) + I kawaue + 0x0022aa08, // n0x0cb2 c0x0000 (---------------) + I kitagata + 0x0022ec04, // n0x0cb3 c0x0000 (---------------) + I mino + 0x002fb148, // n0x0cb4 c0x0000 (---------------) + I minokamo + 0x002adf86, // n0x0cb5 c0x0000 (---------------) + I mitake + 0x00252288, // n0x0cb6 c0x0000 (---------------) + I mizunami + 0x00289146, // n0x0cb7 c0x0000 (---------------) + I motosu + 0x0021d20b, // n0x0cb8 c0x0000 (---------------) + I nakatsugawa + 0x00394b85, // n0x0cb9 c0x0000 (---------------) + I ogaki + 0x002b5248, // n0x0cba c0x0000 (---------------) + I sakahogi + 0x0020d984, // n0x0cbb c0x0000 (---------------) + I seki + 0x0038f90a, // n0x0cbc c0x0000 (---------------) + I sekigahara + 0x00287309, // n0x0cbd c0x0000 (---------------) + I shirakawa + 0x0024f946, // n0x0cbe c0x0000 (---------------) + I tajimi + 0x0020f408, // n0x0cbf c0x0000 (---------------) + I takayama + 0x002b9085, // n0x0cc0 c0x0000 (---------------) + I tarui + 0x002add84, // n0x0cc1 c0x0000 (---------------) + I toki + 0x0028f6c6, // n0x0cc2 c0x0000 (---------------) + I tomika + 0x002e55c8, // n0x0cc3 c0x0000 (---------------) + I wanouchi + 0x0026dc08, // n0x0cc4 c0x0000 (---------------) + I yamagata + 0x00330546, // n0x0cc5 c0x0000 (---------------) + I yaotsu + 0x00396e84, // n0x0cc6 c0x0000 (---------------) + I yoro + 0x00216306, // n0x0cc7 c0x0000 (---------------) + I annaka + 0x002e3947, // n0x0cc8 c0x0000 (---------------) + I chiyoda + 0x00266507, // n0x0cc9 c0x0000 (---------------) + I fujioka + 0x002545cf, // n0x0cca c0x0000 (---------------) + I higashiagatsuma + 0x0037a987, // n0x0ccb c0x0000 (---------------) + I isesaki + 0x0025ad07, // n0x0ccc c0x0000 (---------------) + I itakura + 0x002d17c5, // n0x0ccd c0x0000 (---------------) + I kanna + 0x002cc145, // n0x0cce c0x0000 (---------------) + I kanra + 0x0028b8c9, // n0x0ccf c0x0000 (---------------) + I katashina + 0x00343106, // n0x0cd0 c0x0000 (---------------) + I kawaba + 0x00258d45, // n0x0cd1 c0x0000 (---------------) + I kiryu + 0x0026f047, // n0x0cd2 c0x0000 (---------------) + I kusatsu + 0x002cb088, // n0x0cd3 c0x0000 (---------------) + I maebashi + 0x002cae05, // n0x0cd4 c0x0000 (---------------) + I meiwa + 0x002846c6, // n0x0cd5 c0x0000 (---------------) + I midori + 0x00204808, // n0x0cd6 c0x0000 (---------------) + I minakami + 0x0034e4ca, // n0x0cd7 c0x0000 (---------------) + I naganohara + 0x00364808, // n0x0cd8 c0x0000 (---------------) + I nakanojo + 0x00278107, // n0x0cd9 c0x0000 (---------------) + I nanmoku + 0x003161c6, // n0x0cda c0x0000 (---------------) + I numata + 0x002549c6, // n0x0cdb c0x0000 (---------------) + I oizumi + 0x0020ca43, // n0x0cdc c0x0000 (---------------) + I ora + 0x00203943, // n0x0cdd c0x0000 (---------------) + I ota + 0x0034a649, // n0x0cde c0x0000 (---------------) + I shibukawa + 0x00273789, // n0x0cdf c0x0000 (---------------) + I shimonita + 0x0028e1c6, // n0x0ce0 c0x0000 (---------------) + I shinto + 0x002b3885, // n0x0ce1 c0x0000 (---------------) + I showa + 0x00298d88, // n0x0ce2 c0x0000 (---------------) + I takasaki + 0x0020f408, // n0x0ce3 c0x0000 (---------------) + I takayama + 0x003936c8, // n0x0ce4 c0x0000 (---------------) + I tamamura + 0x00329c8b, // n0x0ce5 c0x0000 (---------------) + I tatebayashi + 0x00292547, // n0x0ce6 c0x0000 (---------------) + I tomioka + 0x0030b489, // n0x0ce7 c0x0000 (---------------) + I tsukiyono + 0x00254848, // n0x0ce8 c0x0000 (---------------) + I tsumagoi + 0x0038dac4, // n0x0ce9 c0x0000 (---------------) + I ueno + 0x002b5d48, // n0x0cea c0x0000 (---------------) + I yoshioka + 0x0027a909, // n0x0ceb c0x0000 (---------------) + I asaminami + 0x002557c5, // n0x0cec c0x0000 (---------------) + I daiwa + 0x0020dd07, // n0x0ced c0x0000 (---------------) + I etajima + 0x002ab685, // n0x0cee c0x0000 (---------------) + I fuchu + 0x0026db08, // n0x0cef c0x0000 (---------------) + I fukuyama + 0x0027dd8b, // n0x0cf0 c0x0000 (---------------) + I hatsukaichi + 0x00280dd0, // n0x0cf1 c0x0000 (---------------) + I higashihiroshima + 0x00292c45, // n0x0cf2 c0x0000 (---------------) + I hongo + 0x0020d8cc, // n0x0cf3 c0x0000 (---------------) + I jinsekikogen + 0x0021f245, // n0x0cf4 c0x0000 (---------------) + I kaita + 0x0026b843, // n0x0cf5 c0x0000 (---------------) + I kui + 0x00285906, // n0x0cf6 c0x0000 (---------------) + I kumano + 0x002a4544, // n0x0cf7 c0x0000 (---------------) + I kure + 0x003637c6, // n0x0cf8 c0x0000 (---------------) + I mihara + 0x00288107, // n0x0cf9 c0x0000 (---------------) + I miyoshi + 0x00204884, // n0x0cfa c0x0000 (---------------) + I naka + 0x002a5708, // n0x0cfb c0x0000 (---------------) + I onomichi + 0x00305acd, // n0x0cfc c0x0000 (---------------) + I osakikamijima + 0x00335c85, // n0x0cfd c0x0000 (---------------) + I otake + 0x00201a04, // n0x0cfe c0x0000 (---------------) + I saka + 0x0021b584, // n0x0cff c0x0000 (---------------) + I sera + 0x00325cc9, // n0x0d00 c0x0000 (---------------) + I seranishi + 0x0027f9c8, // n0x0d01 c0x0000 (---------------) + I shinichi + 0x002f5a47, // n0x0d02 c0x0000 (---------------) + I shobara + 0x002ae008, // n0x0d03 c0x0000 (---------------) + I takehara + 0x0026e388, // n0x0d04 c0x0000 (---------------) + I abashiri + 0x00268705, // n0x0d05 c0x0000 (---------------) + I abira + 0x0031acc7, // n0x0d06 c0x0000 (---------------) + I aibetsu + 0x00268687, // n0x0d07 c0x0000 (---------------) + I akabira + 0x00300f87, // n0x0d08 c0x0000 (---------------) + I akkeshi + 0x002ad5c9, // n0x0d09 c0x0000 (---------------) + I asahikawa + 0x00227609, // n0x0d0a c0x0000 (---------------) + I ashibetsu + 0x00230d06, // n0x0d0b c0x0000 (---------------) + I ashoro + 0x00318dc6, // n0x0d0c c0x0000 (---------------) + I assabu + 0x00254806, // n0x0d0d c0x0000 (---------------) + I atsuma + 0x00323c05, // n0x0d0e c0x0000 (---------------) + I bibai + 0x002626c4, // n0x0d0f c0x0000 (---------------) + I biei + 0x0038aec6, // n0x0d10 c0x0000 (---------------) + I bifuka + 0x0038f286, // n0x0d11 c0x0000 (---------------) + I bihoro + 0x00268748, // n0x0d12 c0x0000 (---------------) + I biratori + 0x002e170b, // n0x0d13 c0x0000 (---------------) + I chippubetsu + 0x00366547, // n0x0d14 c0x0000 (---------------) + I chitose + 0x00209004, // n0x0d15 c0x0000 (---------------) + I date + 0x0032d2c6, // n0x0d16 c0x0000 (---------------) + I ebetsu + 0x00337807, // n0x0d17 c0x0000 (---------------) + I embetsu + 0x00271f45, // n0x0d18 c0x0000 (---------------) + I eniwa + 0x002f35c5, // n0x0d19 c0x0000 (---------------) + I erimo + 0x00202244, // n0x0d1a c0x0000 (---------------) + I esan + 0x00227586, // n0x0d1b c0x0000 (---------------) + I esashi + 0x0038af48, // n0x0d1c c0x0000 (---------------) + I fukagawa + 0x0026d5c9, // n0x0d1d c0x0000 (---------------) + I fukushima + 0x00239f06, // n0x0d1e c0x0000 (---------------) + I furano + 0x00273e08, // n0x0d1f c0x0000 (---------------) + I furubira + 0x002e3146, // n0x0d20 c0x0000 (---------------) + I haboro + 0x00325388, // n0x0d21 c0x0000 (---------------) + I hakodate + 0x002a154c, // n0x0d22 c0x0000 (---------------) + I hamatonbetsu + 0x002685c6, // n0x0d23 c0x0000 (---------------) + I hidaka + 0x0028208d, // n0x0d24 c0x0000 (---------------) + I higashikagura + 0x0028250b, // n0x0d25 c0x0000 (---------------) + I higashikawa + 0x0023a0c5, // n0x0d26 c0x0000 (---------------) + I hiroo + 0x00206b87, // n0x0d27 c0x0000 (---------------) + I hokuryu + 0x00226906, // n0x0d28 c0x0000 (---------------) + I hokuto + 0x00305188, // n0x0d29 c0x0000 (---------------) + I honbetsu + 0x00230d89, // n0x0d2a c0x0000 (---------------) + I horokanai + 0x00346588, // n0x0d2b c0x0000 (---------------) + I horonobe + 0x00390e45, // n0x0d2c c0x0000 (---------------) + I ikeda + 0x0020de07, // n0x0d2d c0x0000 (---------------) + I imakane + 0x002af448, // n0x0d2e c0x0000 (---------------) + I ishikari + 0x0023e509, // n0x0d2f c0x0000 (---------------) + I iwamizawa + 0x002e74c6, // n0x0d30 c0x0000 (---------------) + I iwanai + 0x00239e0a, // n0x0d31 c0x0000 (---------------) + I kamifurano + 0x00304f08, // n0x0d32 c0x0000 (---------------) + I kamikawa + 0x003463cb, // n0x0d33 c0x0000 (---------------) + I kamishihoro + 0x002ae7cc, // n0x0d34 c0x0000 (---------------) + I kamisunagawa + 0x002fb248, // n0x0d35 c0x0000 (---------------) + I kamoenai + 0x00269e06, // n0x0d36 c0x0000 (---------------) + I kayabe + 0x002a5a88, // n0x0d37 c0x0000 (---------------) + I kembuchi + 0x0035e007, // n0x0d38 c0x0000 (---------------) + I kikonai + 0x002e5389, // n0x0d39 c0x0000 (---------------) + I kimobetsu + 0x0027c54d, // n0x0d3a c0x0000 (---------------) + I kitahiroshima + 0x002885c6, // n0x0d3b c0x0000 (---------------) + I kitami + 0x002534c8, // n0x0d3c c0x0000 (---------------) + I kiyosato + 0x002fe389, // n0x0d3d c0x0000 (---------------) + I koshimizu + 0x002a3148, // n0x0d3e c0x0000 (---------------) + I kunneppu + 0x00270048, // n0x0d3f c0x0000 (---------------) + I kuriyama + 0x002a7e8c, // n0x0d40 c0x0000 (---------------) + I kuromatsunai + 0x002a9347, // n0x0d41 c0x0000 (---------------) + I kushiro + 0x002a9e47, // n0x0d42 c0x0000 (---------------) + I kutchan + 0x002ad405, // n0x0d43 c0x0000 (---------------) + I kyowa + 0x00260307, // n0x0d44 c0x0000 (---------------) + I mashike + 0x002caf48, // n0x0d45 c0x0000 (---------------) + I matsumae + 0x0028f746, // n0x0d46 c0x0000 (---------------) + I mikasa + 0x003561cc, // n0x0d47 c0x0000 (---------------) + I minamifurano + 0x002ccb48, // n0x0d48 c0x0000 (---------------) + I mombetsu + 0x002b7108, // n0x0d49 c0x0000 (---------------) + I moseushi + 0x0024e806, // n0x0d4a c0x0000 (---------------) + I mukawa + 0x002a6187, // n0x0d4b c0x0000 (---------------) + I muroran + 0x00230f04, // n0x0d4c c0x0000 (---------------) + I naie + 0x002934c8, // n0x0d4d c0x0000 (---------------) + I nakagawa + 0x0026208c, // n0x0d4e c0x0000 (---------------) + I nakasatsunai + 0x0027848c, // n0x0d4f c0x0000 (---------------) + I nakatombetsu + 0x0021b8c5, // n0x0d50 c0x0000 (---------------) + I nanae + 0x0037c587, // n0x0d51 c0x0000 (---------------) + I nanporo + 0x00396e06, // n0x0d52 c0x0000 (---------------) + I nayoro + 0x002a6106, // n0x0d53 c0x0000 (---------------) + I nemuro + 0x00281a48, // n0x0d54 c0x0000 (---------------) + I niikappu + 0x0037ae44, // n0x0d55 c0x0000 (---------------) + I niki + 0x002209cb, // n0x0d56 c0x0000 (---------------) + I nishiokoppe + 0x002f090b, // n0x0d57 c0x0000 (---------------) + I noboribetsu + 0x003161c6, // n0x0d58 c0x0000 (---------------) + I numata + 0x00305947, // n0x0d59 c0x0000 (---------------) + I obihiro + 0x00311445, // n0x0d5a c0x0000 (---------------) + I obira + 0x0025d045, // n0x0d5b c0x0000 (---------------) + I oketo + 0x00220b06, // n0x0d5c c0x0000 (---------------) + I okoppe + 0x002b9045, // n0x0d5d c0x0000 (---------------) + I otaru + 0x002acb85, // n0x0d5e c0x0000 (---------------) + I otobe + 0x002c4a07, // n0x0d5f c0x0000 (---------------) + I otofuke + 0x0025cbc9, // n0x0d60 c0x0000 (---------------) + I otoineppu + 0x002f3c44, // n0x0d61 c0x0000 (---------------) + I oumu + 0x0035f085, // n0x0d62 c0x0000 (---------------) + I ozora + 0x002c2c45, // n0x0d63 c0x0000 (---------------) + I pippu + 0x0028eb48, // n0x0d64 c0x0000 (---------------) + I rankoshi + 0x002a5545, // n0x0d65 c0x0000 (---------------) + I rebun + 0x002b4809, // n0x0d66 c0x0000 (---------------) + I rikubetsu + 0x00294c47, // n0x0d67 c0x0000 (---------------) + I rishiri + 0x00294c4b, // n0x0d68 c0x0000 (---------------) + I rishirifuji + 0x00307c86, // n0x0d69 c0x0000 (---------------) + I saroma + 0x0021ecc9, // n0x0d6a c0x0000 (---------------) + I sarufutsu + 0x0024c908, // n0x0d6b c0x0000 (---------------) + I shakotan + 0x00252d45, // n0x0d6c c0x0000 (---------------) + I shari + 0x00301088, // n0x0d6d c0x0000 (---------------) + I shibecha + 0x00227648, // n0x0d6e c0x0000 (---------------) + I shibetsu + 0x00251507, // n0x0d6f c0x0000 (---------------) + I shikabe + 0x002d3e87, // n0x0d70 c0x0000 (---------------) + I shikaoi + 0x0023ca89, // n0x0d71 c0x0000 (---------------) + I shimamaki + 0x002521c7, // n0x0d72 c0x0000 (---------------) + I shimizu + 0x00266909, // n0x0d73 c0x0000 (---------------) + I shimokawa + 0x00287a0c, // n0x0d74 c0x0000 (---------------) + I shinshinotsu + 0x0028e1c8, // n0x0d75 c0x0000 (---------------) + I shintoku + 0x002d1609, // n0x0d76 c0x0000 (---------------) + I shiranuka + 0x002d2247, // n0x0d77 c0x0000 (---------------) + I shiraoi + 0x0026e449, // n0x0d78 c0x0000 (---------------) + I shiriuchi + 0x002e5a47, // n0x0d79 c0x0000 (---------------) + I sobetsu + 0x002ae8c8, // n0x0d7a c0x0000 (---------------) + I sunagawa + 0x0028ee85, // n0x0d7b c0x0000 (---------------) + I taiki + 0x0027cec6, // n0x0d7c c0x0000 (---------------) + I takasu + 0x002a8648, // n0x0d7d c0x0000 (---------------) + I takikawa + 0x00245108, // n0x0d7e c0x0000 (---------------) + I takinoue + 0x002af189, // n0x0d7f c0x0000 (---------------) + I teshikaga + 0x002acbc7, // n0x0d80 c0x0000 (---------------) + I tobetsu + 0x0028b345, // n0x0d81 c0x0000 (---------------) + I tohma + 0x002b9dc9, // n0x0d82 c0x0000 (---------------) + I tomakomai + 0x0036edc6, // n0x0d83 c0x0000 (---------------) + I tomari + 0x0027c344, // n0x0d84 c0x0000 (---------------) + I toya + 0x00346246, // n0x0d85 c0x0000 (---------------) + I toyako + 0x00371088, // n0x0d86 c0x0000 (---------------) + I toyotomi + 0x00244007, // n0x0d87 c0x0000 (---------------) + I toyoura + 0x002e1908, // n0x0d88 c0x0000 (---------------) + I tsubetsu + 0x0023ac89, // n0x0d89 c0x0000 (---------------) + I tsukigata + 0x0024e587, // n0x0d8a c0x0000 (---------------) + I urakawa + 0x00288ec6, // n0x0d8b c0x0000 (---------------) + I urausu + 0x00206c44, // n0x0d8c c0x0000 (---------------) + I uryu + 0x00200109, // n0x0d8d c0x0000 (---------------) + I utashinai + 0x00342f08, // n0x0d8e c0x0000 (---------------) + I wakkanai + 0x0024e6c7, // n0x0d8f c0x0000 (---------------) + I wassamu + 0x00311fc6, // n0x0d90 c0x0000 (---------------) + I yakumo + 0x00292f46, // n0x0d91 c0x0000 (---------------) + I yoichi + 0x0035e144, // n0x0d92 c0x0000 (---------------) + I aioi + 0x00341846, // n0x0d93 c0x0000 (---------------) + I akashi + 0x00207e03, // n0x0d94 c0x0000 (---------------) + I ako + 0x0038bd09, // n0x0d95 c0x0000 (---------------) + I amagasaki + 0x00394b46, // n0x0d96 c0x0000 (---------------) + I aogaki + 0x0028c7c5, // n0x0d97 c0x0000 (---------------) + I asago + 0x00289c86, // n0x0d98 c0x0000 (---------------) + I ashiya + 0x0028dcc5, // n0x0d99 c0x0000 (---------------) + I awaji + 0x0026d348, // n0x0d9a c0x0000 (---------------) + I fukusaki + 0x002faf87, // n0x0d9b c0x0000 (---------------) + I goshiki + 0x002f8c46, // n0x0d9c c0x0000 (---------------) + I harima + 0x0031c986, // n0x0d9d c0x0000 (---------------) + I himeji + 0x00312b88, // n0x0d9e c0x0000 (---------------) + I ichikawa + 0x0028ba47, // n0x0d9f c0x0000 (---------------) + I inagawa + 0x00288605, // n0x0da0 c0x0000 (---------------) + I itami + 0x0028a188, // n0x0da1 c0x0000 (---------------) + I kakogawa + 0x003744c8, // n0x0da2 c0x0000 (---------------) + I kamigori + 0x00304f08, // n0x0da3 c0x0000 (---------------) + I kamikawa + 0x0021f105, // n0x0da4 c0x0000 (---------------) + I kasai + 0x0027cf46, // n0x0da5 c0x0000 (---------------) + I kasuga + 0x0022ed09, // n0x0da6 c0x0000 (---------------) + I kawanishi + 0x0022ae44, // n0x0da7 c0x0000 (---------------) + I miki + 0x00359a0b, // n0x0da8 c0x0000 (---------------) + I minamiawaji + 0x0021ac8b, // n0x0da9 c0x0000 (---------------) + I nishinomiya + 0x002998c9, // n0x0daa c0x0000 (---------------) + I nishiwaki + 0x00207e83, // n0x0dab c0x0000 (---------------) + I ono + 0x002453c5, // n0x0dac c0x0000 (---------------) + I sanda + 0x00208a46, // n0x0dad c0x0000 (---------------) + I sannan + 0x002937c8, // n0x0dae c0x0000 (---------------) + I sasayama + 0x002349c4, // n0x0daf c0x0000 (---------------) + I sayo + 0x002e6a46, // n0x0db0 c0x0000 (---------------) + I shingu + 0x002ba609, // n0x0db1 c0x0000 (---------------) + I shinonsen + 0x003081c5, // n0x0db2 c0x0000 (---------------) + I shiso + 0x002c4946, // n0x0db3 c0x0000 (---------------) + I sumoto + 0x0025fa46, // n0x0db4 c0x0000 (---------------) + I taishi + 0x00209604, // n0x0db5 c0x0000 (---------------) + I taka + 0x002875ca, // n0x0db6 c0x0000 (---------------) + I takarazuka + 0x0028c708, // n0x0db7 c0x0000 (---------------) + I takasago + 0x00245106, // n0x0db8 c0x0000 (---------------) + I takino + 0x0035ee85, // n0x0db9 c0x0000 (---------------) + I tamba + 0x00203987, // n0x0dba c0x0000 (---------------) + I tatsuno + 0x0022e707, // n0x0dbb c0x0000 (---------------) + I toyooka + 0x00389944, // n0x0dbc c0x0000 (---------------) + I yabu + 0x0021aec7, // n0x0dbd c0x0000 (---------------) + I yashiro + 0x002a0604, // n0x0dbe c0x0000 (---------------) + I yoka + 0x00322c46, // n0x0dbf c0x0000 (---------------) + I yokawa + 0x00204943, // n0x0dc0 c0x0000 (---------------) + I ami + 0x002ad5c5, // n0x0dc1 c0x0000 (---------------) + I asahi + 0x00346e05, // n0x0dc2 c0x0000 (---------------) + I bando + 0x002e5708, // n0x0dc3 c0x0000 (---------------) + I chikusei + 0x0024ef05, // n0x0dc4 c0x0000 (---------------) + I daigo + 0x00268309, // n0x0dc5 c0x0000 (---------------) + I fujishiro + 0x0028d547, // n0x0dc6 c0x0000 (---------------) + I hitachi + 0x0029330b, // n0x0dc7 c0x0000 (---------------) + I hitachinaka + 0x0028d54c, // n0x0dc8 c0x0000 (---------------) + I hitachiomiya + 0x0028e50a, // n0x0dc9 c0x0000 (---------------) + I hitachiota + 0x00316ac7, // n0x0dca c0x0000 (---------------) + I ibaraki + 0x00200243, // n0x0dcb c0x0000 (---------------) + I ina + 0x00291508, // n0x0dcc c0x0000 (---------------) + I inashiki + 0x0021f2c5, // n0x0dcd c0x0000 (---------------) + I itako + 0x002cae85, // n0x0dce c0x0000 (---------------) + I iwama + 0x00207f44, // n0x0dcf c0x0000 (---------------) + I joso + 0x002ae7c6, // n0x0dd0 c0x0000 (---------------) + I kamisu + 0x0023b7c6, // n0x0dd1 c0x0000 (---------------) + I kasama + 0x00341887, // n0x0dd2 c0x0000 (---------------) + I kashima + 0x0020b60b, // n0x0dd3 c0x0000 (---------------) + I kasumigaura + 0x00232c84, // n0x0dd4 c0x0000 (---------------) + I koga + 0x0033ee44, // n0x0dd5 c0x0000 (---------------) + I miho + 0x00246f04, // n0x0dd6 c0x0000 (---------------) + I mito + 0x002b4f86, // n0x0dd7 c0x0000 (---------------) + I moriya + 0x00204884, // n0x0dd8 c0x0000 (---------------) + I naka + 0x00376c08, // n0x0dd9 c0x0000 (---------------) + I namegata + 0x00321385, // n0x0dda c0x0000 (---------------) + I oarai + 0x00204045, // n0x0ddb c0x0000 (---------------) + I ogawa + 0x00393607, // n0x0ddc c0x0000 (---------------) + I omitama + 0x00206c89, // n0x0ddd c0x0000 (---------------) + I ryugasaki + 0x003396c5, // n0x0dde c0x0000 (---------------) + I sakai + 0x0030cc0a, // n0x0ddf c0x0000 (---------------) + I sakuragawa + 0x0027bc89, // n0x0de0 c0x0000 (---------------) + I shimodate + 0x0027db0a, // n0x0de1 c0x0000 (---------------) + I shimotsuma + 0x00395689, // n0x0de2 c0x0000 (---------------) + I shirosato + 0x00339a04, // n0x0de3 c0x0000 (---------------) + I sowa + 0x0029fdc5, // n0x0de4 c0x0000 (---------------) + I suifu + 0x0029cec8, // n0x0de5 c0x0000 (---------------) + I takahagi + 0x002d0fcb, // n0x0de6 c0x0000 (---------------) + I tamatsukuri + 0x002df1c5, // n0x0de7 c0x0000 (---------------) + I tokai + 0x00352046, // n0x0de8 c0x0000 (---------------) + I tomobe + 0x0021a304, // n0x0de9 c0x0000 (---------------) + I tone + 0x00268846, // n0x0dea c0x0000 (---------------) + I toride + 0x0024e409, // n0x0deb c0x0000 (---------------) + I tsuchiura + 0x0032d387, // n0x0dec c0x0000 (---------------) + I tsukuba + 0x00387c88, // n0x0ded c0x0000 (---------------) + I uchihara + 0x002315c6, // n0x0dee c0x0000 (---------------) + I ushiku + 0x002e38c7, // n0x0def c0x0000 (---------------) + I yachiyo + 0x0026dc08, // n0x0df0 c0x0000 (---------------) + I yamagata + 0x00377b86, // n0x0df1 c0x0000 (---------------) + I yawara + 0x00260d44, // n0x0df2 c0x0000 (---------------) + I yuki + 0x0034af87, // n0x0df3 c0x0000 (---------------) + I anamizu + 0x00336fc5, // n0x0df4 c0x0000 (---------------) + I hakui + 0x0034be87, // n0x0df5 c0x0000 (---------------) + I hakusan + 0x00281f04, // n0x0df6 c0x0000 (---------------) + I kaga + 0x00226886, // n0x0df7 c0x0000 (---------------) + I kahoku + 0x0037ffc8, // n0x0df8 c0x0000 (---------------) + I kanazawa + 0x002826c8, // n0x0df9 c0x0000 (---------------) + I kawakita + 0x002be447, // n0x0dfa c0x0000 (---------------) + I komatsu + 0x0023fcc8, // n0x0dfb c0x0000 (---------------) + I nakanoto + 0x0027e905, // n0x0dfc c0x0000 (---------------) + I nanao + 0x00207cc4, // n0x0dfd c0x0000 (---------------) + I nomi + 0x00312a88, // n0x0dfe c0x0000 (---------------) + I nonoichi + 0x0023fdc4, // n0x0dff c0x0000 (---------------) + I noto + 0x0020f2c5, // n0x0e00 c0x0000 (---------------) + I shika + 0x002d2b84, // n0x0e01 c0x0000 (---------------) + I suzu + 0x0027d707, // n0x0e02 c0x0000 (---------------) + I tsubata + 0x0031c3c7, // n0x0e03 c0x0000 (---------------) + I tsurugi + 0x0026e588, // n0x0e04 c0x0000 (---------------) + I uchinada + 0x0028dd06, // n0x0e05 c0x0000 (---------------) + I wajima + 0x0024ee85, // n0x0e06 c0x0000 (---------------) + I fudai + 0x00268108, // n0x0e07 c0x0000 (---------------) + I fujisawa + 0x00364a08, // n0x0e08 c0x0000 (---------------) + I hanamaki + 0x0028b089, // n0x0e09 c0x0000 (---------------) + I hiraizumi + 0x0021af86, // n0x0e0a c0x0000 (---------------) + I hirono + 0x0029ba88, // n0x0e0b c0x0000 (---------------) + I ichinohe + 0x0038f78a, // n0x0e0c c0x0000 (---------------) + I ichinoseki + 0x00271fc8, // n0x0e0d c0x0000 (---------------) + I iwaizumi + 0x002c3b85, // n0x0e0e c0x0000 (---------------) + I iwate + 0x0031b686, // n0x0e0f c0x0000 (---------------) + I joboji + 0x0027bb48, // n0x0e10 c0x0000 (---------------) + I kamaishi + 0x0020deca, // n0x0e11 c0x0000 (---------------) + I kanegasaki + 0x0032a887, // n0x0e12 c0x0000 (---------------) + I karumai + 0x00273345, // n0x0e13 c0x0000 (---------------) + I kawai + 0x00209588, // n0x0e14 c0x0000 (---------------) + I kitakami + 0x00224704, // n0x0e15 c0x0000 (---------------) + I kuji + 0x00205d06, // n0x0e16 c0x0000 (---------------) + I kunohe + 0x002aa5c8, // n0x0e17 c0x0000 (---------------) + I kuzumaki + 0x00207d46, // n0x0e18 c0x0000 (---------------) + I miyako + 0x00304788, // n0x0e19 c0x0000 (---------------) + I mizusawa + 0x0022ac87, // n0x0e1a c0x0000 (---------------) + I morioka + 0x00205106, // n0x0e1b c0x0000 (---------------) + I ninohe + 0x00384f84, // n0x0e1c c0x0000 (---------------) + I noda + 0x0029a847, // n0x0e1d c0x0000 (---------------) + I ofunato + 0x002e13c4, // n0x0e1e c0x0000 (---------------) + I oshu + 0x0024e3c7, // n0x0e1f c0x0000 (---------------) + I otsuchi + 0x0023430d, // n0x0e20 c0x0000 (---------------) + I rikuzentakata + 0x0026d185, // n0x0e21 c0x0000 (---------------) + I shiwa + 0x002a71cb, // n0x0e22 c0x0000 (---------------) + I shizukuishi + 0x002fe746, // n0x0e23 c0x0000 (---------------) + I sumita + 0x0023c788, // n0x0e24 c0x0000 (---------------) + I tanohata + 0x00369804, // n0x0e25 c0x0000 (---------------) + I tono + 0x00264306, // n0x0e26 c0x0000 (---------------) + I yahaba + 0x0026b046, // n0x0e27 c0x0000 (---------------) + I yamada + 0x0036c587, // n0x0e28 c0x0000 (---------------) + I ayagawa + 0x00281d4d, // n0x0e29 c0x0000 (---------------) + I higashikagawa + 0x002982c7, // n0x0e2a c0x0000 (---------------) + I kanonji + 0x002e8108, // n0x0e2b c0x0000 (---------------) + I kotohira + 0x00257385, // n0x0e2c c0x0000 (---------------) + I manno + 0x00283488, // n0x0e2d c0x0000 (---------------) + I marugame + 0x002aeac6, // n0x0e2e c0x0000 (---------------) + I mitoyo + 0x0027e988, // n0x0e2f c0x0000 (---------------) + I naoshima + 0x0020ce46, // n0x0e30 c0x0000 (---------------) + I sanuki + 0x0031c2c7, // n0x0e31 c0x0000 (---------------) + I tadotsu + 0x0026e909, // n0x0e32 c0x0000 (---------------) + I takamatsu + 0x00369807, // n0x0e33 c0x0000 (---------------) + I tonosho + 0x002761c8, // n0x0e34 c0x0000 (---------------) + I uchinomi + 0x00261345, // n0x0e35 c0x0000 (---------------) + I utazu + 0x00212c08, // n0x0e36 c0x0000 (---------------) + I zentsuji + 0x00305dc5, // n0x0e37 c0x0000 (---------------) + I akune + 0x00236bc5, // n0x0e38 c0x0000 (---------------) + I amami + 0x0030fcc5, // n0x0e39 c0x0000 (---------------) + I hioki + 0x00209883, // n0x0e3a c0x0000 (---------------) + I isa + 0x0026f3c4, // n0x0e3b c0x0000 (---------------) + I isen + 0x00209785, // n0x0e3c c0x0000 (---------------) + I izumi + 0x00271789, // n0x0e3d c0x0000 (---------------) + I kagoshima + 0x002b4d06, // n0x0e3e c0x0000 (---------------) + I kanoya + 0x002c5c48, // n0x0e3f c0x0000 (---------------) + I kawanabe + 0x002d3c45, // n0x0e40 c0x0000 (---------------) + I kinko + 0x00314307, // n0x0e41 c0x0000 (---------------) + I kouyama + 0x002d3a4a, // n0x0e42 c0x0000 (---------------) + I makurazaki + 0x002c4889, // n0x0e43 c0x0000 (---------------) + I matsumoto + 0x002bf68a, // n0x0e44 c0x0000 (---------------) + I minamitane + 0x002bb148, // n0x0e45 c0x0000 (---------------) + I nakatane + 0x0021df8c, // n0x0e46 c0x0000 (---------------) + I nishinoomote + 0x0026f0cd, // n0x0e47 c0x0000 (---------------) + I satsumasendai + 0x002da843, // n0x0e48 c0x0000 (---------------) + I soo + 0x00304688, // n0x0e49 c0x0000 (---------------) + I tarumizu + 0x00211dc5, // n0x0e4a c0x0000 (---------------) + I yusui + 0x00343086, // n0x0e4b c0x0000 (---------------) + I aikawa + 0x0033ea46, // n0x0e4c c0x0000 (---------------) + I atsugi + 0x002c35c5, // n0x0e4d c0x0000 (---------------) + I ayase + 0x00383449, // n0x0e4e c0x0000 (---------------) + I chigasaki + 0x00240845, // n0x0e4f c0x0000 (---------------) + I ebina + 0x00268108, // n0x0e50 c0x0000 (---------------) + I fujisawa + 0x00265b06, // n0x0e51 c0x0000 (---------------) + I hadano + 0x00328686, // n0x0e52 c0x0000 (---------------) + I hakone + 0x0028c209, // n0x0e53 c0x0000 (---------------) + I hiratsuka + 0x00377387, // n0x0e54 c0x0000 (---------------) + I isehara + 0x00315c06, // n0x0e55 c0x0000 (---------------) + I kaisei + 0x002d39c8, // n0x0e56 c0x0000 (---------------) + I kamakura + 0x003823c8, // n0x0e57 c0x0000 (---------------) + I kiyokawa + 0x0032b7c7, // n0x0e58 c0x0000 (---------------) + I matsuda + 0x0025240e, // n0x0e59 c0x0000 (---------------) + I minamiashigara + 0x002aed05, // n0x0e5a c0x0000 (---------------) + I miura + 0x0023e405, // n0x0e5b c0x0000 (---------------) + I nakai + 0x00207c48, // n0x0e5c c0x0000 (---------------) + I ninomiya + 0x00202487, // n0x0e5d c0x0000 (---------------) + I odawara + 0x00238bc2, // n0x0e5e c0x0000 (---------------) + I oi + 0x002a6c04, // n0x0e5f c0x0000 (---------------) + I oiso + 0x003636ca, // n0x0e60 c0x0000 (---------------) + I sagamihara + 0x0024e788, // n0x0e61 c0x0000 (---------------) + I samukawa + 0x00337906, // n0x0e62 c0x0000 (---------------) + I tsukui + 0x00283b88, // n0x0e63 c0x0000 (---------------) + I yamakita + 0x0022afc6, // n0x0e64 c0x0000 (---------------) + I yamato + 0x00311d48, // n0x0e65 c0x0000 (---------------) + I yokosuka + 0x00297548, // n0x0e66 c0x0000 (---------------) + I yugawara + 0x00236b84, // n0x0e67 c0x0000 (---------------) + I zama + 0x003185c5, // n0x0e68 c0x0000 (---------------) + I zushi + 0x006735c4, // n0x0e69 c0x0001 (---------------) ! I city + 0x006735c4, // n0x0e6a c0x0001 (---------------) ! I city + 0x006735c4, // n0x0e6b c0x0001 (---------------) ! I city + 0x00201ac3, // n0x0e6c c0x0000 (---------------) + I aki + 0x002f3a86, // n0x0e6d c0x0000 (---------------) + I geisei + 0x002685c6, // n0x0e6e c0x0000 (---------------) + I hidaka + 0x0028888c, // n0x0e6f c0x0000 (---------------) + I higashitsuno + 0x00201b43, // n0x0e70 c0x0000 (---------------) + I ino + 0x002ab006, // n0x0e71 c0x0000 (---------------) + I kagami + 0x00204904, // n0x0e72 c0x0000 (---------------) + I kami + 0x0020e0c8, // n0x0e73 c0x0000 (---------------) + I kitagawa + 0x002ba445, // n0x0e74 c0x0000 (---------------) + I kochi + 0x003637c6, // n0x0e75 c0x0000 (---------------) + I mihara + 0x002a2e48, // n0x0e76 c0x0000 (---------------) + I motoyama + 0x002bc286, // n0x0e77 c0x0000 (---------------) + I muroto + 0x002f8bc6, // n0x0e78 c0x0000 (---------------) + I nahari + 0x00248448, // n0x0e79 c0x0000 (---------------) + I nakamura + 0x00374747, // n0x0e7a c0x0000 (---------------) + I nankoku + 0x00251f89, // n0x0e7b c0x0000 (---------------) + I nishitosa + 0x00328d8a, // n0x0e7c c0x0000 (---------------) + I niyodogawa + 0x00240384, // n0x0e7d c0x0000 (---------------) + I ochi + 0x0022ecc5, // n0x0e7e c0x0000 (---------------) + I okawa + 0x00286c45, // n0x0e7f c0x0000 (---------------) + I otoyo + 0x0022a786, // n0x0e80 c0x0000 (---------------) + I otsuki + 0x00240646, // n0x0e81 c0x0000 (---------------) + I sakawa + 0x00291906, // n0x0e82 c0x0000 (---------------) + I sukumo + 0x002d1e06, // n0x0e83 c0x0000 (---------------) + I susaki + 0x002520c4, // n0x0e84 c0x0000 (---------------) + I tosa + 0x002520cb, // n0x0e85 c0x0000 (---------------) + I tosashimizu + 0x00221244, // n0x0e86 c0x0000 (---------------) + I toyo + 0x00203a05, // n0x0e87 c0x0000 (---------------) + I tsuno + 0x00296285, // n0x0e88 c0x0000 (---------------) + I umaji + 0x00207006, // n0x0e89 c0x0000 (---------------) + I yasuda + 0x00209f08, // n0x0e8a c0x0000 (---------------) + I yusuhara + 0x0026ef87, // n0x0e8b c0x0000 (---------------) + I amakusa + 0x0034e684, // n0x0e8c c0x0000 (---------------) + I arao + 0x00209dc3, // n0x0e8d c0x0000 (---------------) + I aso + 0x002e3545, // n0x0e8e c0x0000 (---------------) + I choyo + 0x00351f07, // n0x0e8f c0x0000 (---------------) + I gyokuto + 0x0028f9c9, // n0x0e90 c0x0000 (---------------) + I hitoyoshi + 0x0026ee8b, // n0x0e91 c0x0000 (---------------) + I kamiamakusa + 0x00341887, // n0x0e92 c0x0000 (---------------) + I kashima + 0x0036c287, // n0x0e93 c0x0000 (---------------) + I kikuchi + 0x002d4744, // n0x0e94 c0x0000 (---------------) + I kosa + 0x002a2d48, // n0x0e95 c0x0000 (---------------) + I kumamoto + 0x00253387, // n0x0e96 c0x0000 (---------------) + I mashiki + 0x0028fc06, // n0x0e97 c0x0000 (---------------) + I mifune + 0x00260ac8, // n0x0e98 c0x0000 (---------------) + I minamata + 0x0029ee4b, // n0x0e99 c0x0000 (---------------) + I minamioguni + 0x0034cac6, // n0x0e9a c0x0000 (---------------) + I nagasu + 0x0020e649, // n0x0e9b c0x0000 (---------------) + I nishihara + 0x0029efc5, // n0x0e9c c0x0000 (---------------) + I oguni + 0x002e3dc3, // n0x0e9d c0x0000 (---------------) + I ozu + 0x002c4946, // n0x0e9e c0x0000 (---------------) + I sumoto + 0x0022ab88, // n0x0e9f c0x0000 (---------------) + I takamori + 0x0020cf03, // n0x0ea0 c0x0000 (---------------) + I uki + 0x002269c3, // n0x0ea1 c0x0000 (---------------) + I uto + 0x0026dc06, // n0x0ea2 c0x0000 (---------------) + I yamaga + 0x0022afc6, // n0x0ea3 c0x0000 (---------------) + I yamato + 0x00373e8a, // n0x0ea4 c0x0000 (---------------) + I yatsushiro + 0x00269e45, // n0x0ea5 c0x0000 (---------------) + I ayabe + 0x0026ae8b, // n0x0ea6 c0x0000 (---------------) + I fukuchiyama + 0x00289bcb, // n0x0ea7 c0x0000 (---------------) + I higashiyama + 0x00229783, // n0x0ea8 c0x0000 (---------------) + I ide + 0x00213cc3, // n0x0ea9 c0x0000 (---------------) + I ine + 0x00298c44, // n0x0eaa c0x0000 (---------------) + I joyo + 0x0023a807, // n0x0eab c0x0000 (---------------) + I kameoka + 0x0022ac04, // n0x0eac c0x0000 (---------------) + I kamo + 0x00206e44, // n0x0ead c0x0000 (---------------) + I kita + 0x002f1c04, // n0x0eae c0x0000 (---------------) + I kizu + 0x002722c8, // n0x0eaf c0x0000 (---------------) + I kumiyama + 0x0035edc8, // n0x0eb0 c0x0000 (---------------) + I kyotamba + 0x0030f889, // n0x0eb1 c0x0000 (---------------) + I kyotanabe + 0x003165c8, // n0x0eb2 c0x0000 (---------------) + I kyotango + 0x0030c047, // n0x0eb3 c0x0000 (---------------) + I maizuru + 0x00213fc6, // n0x0eb4 c0x0000 (---------------) + I minami + 0x002c588f, // n0x0eb5 c0x0000 (---------------) + I minamiyamashiro + 0x002aee46, // n0x0eb6 c0x0000 (---------------) + I miyazu + 0x002ba3c4, // n0x0eb7 c0x0000 (---------------) + I muko + 0x0035ec0a, // n0x0eb8 c0x0000 (---------------) + I nagaokakyo + 0x00351e07, // n0x0eb9 c0x0000 (---------------) + I nakagyo + 0x00204d46, // n0x0eba c0x0000 (---------------) + I nantan + 0x0027c389, // n0x0ebb c0x0000 (---------------) + I oyamazaki + 0x0030f805, // n0x0ebc c0x0000 (---------------) + I sakyo + 0x002c6f45, // n0x0ebd c0x0000 (---------------) + I seika + 0x0030f946, // n0x0ebe c0x0000 (---------------) + I tanabe + 0x00212d43, // n0x0ebf c0x0000 (---------------) + I uji + 0x00224749, // n0x0ec0 c0x0000 (---------------) + I ujitawara + 0x00214fc6, // n0x0ec1 c0x0000 (---------------) + I wazuka + 0x0023aa49, // n0x0ec2 c0x0000 (---------------) + I yamashina + 0x003804c6, // n0x0ec3 c0x0000 (---------------) + I yawata + 0x002ad5c5, // n0x0ec4 c0x0000 (---------------) + I asahi + 0x0035dd05, // n0x0ec5 c0x0000 (---------------) + I inabe + 0x00233203, // n0x0ec6 c0x0000 (---------------) + I ise + 0x0023a948, // n0x0ec7 c0x0000 (---------------) + I kameyama + 0x002406c7, // n0x0ec8 c0x0000 (---------------) + I kawagoe + 0x0037aec4, // n0x0ec9 c0x0000 (---------------) + I kiho + 0x0022a888, // n0x0eca c0x0000 (---------------) + I kisosaki + 0x002927c4, // n0x0ecb c0x0000 (---------------) + I kiwa + 0x002d4306, // n0x0ecc c0x0000 (---------------) + I komono + 0x00285906, // n0x0ecd c0x0000 (---------------) + I kumano + 0x0022f386, // n0x0ece c0x0000 (---------------) + I kuwana + 0x002b5109, // n0x0ecf c0x0000 (---------------) + I matsusaka + 0x002cae05, // n0x0ed0 c0x0000 (---------------) + I meiwa + 0x00292a46, // n0x0ed1 c0x0000 (---------------) + I mihama + 0x00370d89, // n0x0ed2 c0x0000 (---------------) + I minamiise + 0x002adc06, // n0x0ed3 c0x0000 (---------------) + I misugi + 0x00272346, // n0x0ed4 c0x0000 (---------------) + I miyama + 0x0036fe46, // n0x0ed5 c0x0000 (---------------) + I nabari + 0x0021f645, // n0x0ed6 c0x0000 (---------------) + I shima + 0x002d2b86, // n0x0ed7 c0x0000 (---------------) + I suzuka + 0x0031c2c4, // n0x0ed8 c0x0000 (---------------) + I tado + 0x0028ee85, // n0x0ed9 c0x0000 (---------------) + I taiki + 0x00245104, // n0x0eda c0x0000 (---------------) + I taki + 0x002f1b06, // n0x0edb c0x0000 (---------------) + I tamaki + 0x00395844, // n0x0edc c0x0000 (---------------) + I toba + 0x00203a03, // n0x0edd c0x0000 (---------------) + I tsu + 0x00274845, // n0x0ede c0x0000 (---------------) + I udono + 0x00227348, // n0x0edf c0x0000 (---------------) + I ureshino + 0x00253ec7, // n0x0ee0 c0x0000 (---------------) + I watarai + 0x00234a49, // n0x0ee1 c0x0000 (---------------) + I yokkaichi + 0x00274a88, // n0x0ee2 c0x0000 (---------------) + I furukawa + 0x002830d1, // n0x0ee3 c0x0000 (---------------) + I higashimatsushima + 0x0025faca, // n0x0ee4 c0x0000 (---------------) + I ishinomaki + 0x00316107, // n0x0ee5 c0x0000 (---------------) + I iwanuma + 0x0038d846, // n0x0ee6 c0x0000 (---------------) + I kakuda + 0x00204904, // n0x0ee7 c0x0000 (---------------) + I kami + 0x002a8748, // n0x0ee8 c0x0000 (---------------) + I kawasaki + 0x0034cc49, // n0x0ee9 c0x0000 (---------------) + I kesennuma + 0x0028b408, // n0x0eea c0x0000 (---------------) + I marumori + 0x0028328a, // n0x0eeb c0x0000 (---------------) + I matsushima + 0x002b45cd, // n0x0eec c0x0000 (---------------) + I minamisanriku + 0x0028b246, // n0x0eed c0x0000 (---------------) + I misato + 0x00248546, // n0x0eee c0x0000 (---------------) + I murata + 0x0029a906, // n0x0eef c0x0000 (---------------) + I natori + 0x00204047, // n0x0ef0 c0x0000 (---------------) + I ogawara + 0x002e81c5, // n0x0ef1 c0x0000 (---------------) + I ohira + 0x00295c07, // n0x0ef2 c0x0000 (---------------) + I onagawa + 0x0022a945, // n0x0ef3 c0x0000 (---------------) + I osaki + 0x00294d84, // n0x0ef4 c0x0000 (---------------) + I rifu + 0x0027c146, // n0x0ef5 c0x0000 (---------------) + I semine + 0x003416c7, // n0x0ef6 c0x0000 (---------------) + I shibata + 0x0022444d, // n0x0ef7 c0x0000 (---------------) + I shichikashuku + 0x0027ba87, // n0x0ef8 c0x0000 (---------------) + I shikama + 0x002471c8, // n0x0ef9 c0x0000 (---------------) + I shiogama + 0x00268409, // n0x0efa c0x0000 (---------------) + I shiroishi + 0x0031b586, // n0x0efb c0x0000 (---------------) + I tagajo + 0x002e7445, // n0x0efc c0x0000 (---------------) + I taiwa + 0x0020fe04, // n0x0efd c0x0000 (---------------) + I tome + 0x00371186, // n0x0efe c0x0000 (---------------) + I tomiya + 0x00389846, // n0x0eff c0x0000 (---------------) + I wakuya + 0x0024e906, // n0x0f00 c0x0000 (---------------) + I watari + 0x00286688, // n0x0f01 c0x0000 (---------------) + I yamamoto + 0x00395443, // n0x0f02 c0x0000 (---------------) + I zao + 0x00206fc3, // n0x0f03 c0x0000 (---------------) + I aya + 0x0036cf45, // n0x0f04 c0x0000 (---------------) + I ebino + 0x00375946, // n0x0f05 c0x0000 (---------------) + I gokase + 0x00297505, // n0x0f06 c0x0000 (---------------) + I hyuga + 0x002317c8, // n0x0f07 c0x0000 (---------------) + I kadogawa + 0x002882ca, // n0x0f08 c0x0000 (---------------) + I kawaminami + 0x00389a44, // n0x0f09 c0x0000 (---------------) + I kijo + 0x0020e0c8, // n0x0f0a c0x0000 (---------------) + I kitagawa + 0x002815c8, // n0x0f0b c0x0000 (---------------) + I kitakata + 0x00206e47, // n0x0f0c c0x0000 (---------------) + I kitaura + 0x002d3d09, // n0x0f0d c0x0000 (---------------) + I kobayashi + 0x002a2a48, // n0x0f0e c0x0000 (---------------) + I kunitomi + 0x0026d647, // n0x0f0f c0x0000 (---------------) + I kushima + 0x0027cdc6, // n0x0f10 c0x0000 (---------------) + I mimata + 0x00207d4a, // n0x0f11 c0x0000 (---------------) + I miyakonojo + 0x00371208, // n0x0f12 c0x0000 (---------------) + I miyazaki + 0x002ae609, // n0x0f13 c0x0000 (---------------) + I morotsuka + 0x0027fa88, // n0x0f14 c0x0000 (---------------) + I nichinan + 0x00217e09, // n0x0f15 c0x0000 (---------------) + I nishimera + 0x00346687, // n0x0f16 c0x0000 (---------------) + I nobeoka + 0x00302b45, // n0x0f17 c0x0000 (---------------) + I saito + 0x00329e86, // n0x0f18 c0x0000 (---------------) + I shiiba + 0x0028f5c8, // n0x0f19 c0x0000 (---------------) + I shintomi + 0x0023ae48, // n0x0f1a c0x0000 (---------------) + I takaharu + 0x002462c8, // n0x0f1b c0x0000 (---------------) + I takanabe + 0x002f9c88, // n0x0f1c c0x0000 (---------------) + I takazaki + 0x00203a05, // n0x0f1d c0x0000 (---------------) + I tsuno + 0x0022e144, // n0x0f1e c0x0000 (---------------) + I achi + 0x0034e008, // n0x0f1f c0x0000 (---------------) + I agematsu + 0x00204e44, // n0x0f20 c0x0000 (---------------) + I anan + 0x00395484, // n0x0f21 c0x0000 (---------------) + I aoki + 0x002ad5c5, // n0x0f22 c0x0000 (---------------) + I asahi + 0x0027ed87, // n0x0f23 c0x0000 (---------------) + I azumino + 0x00206a49, // n0x0f24 c0x0000 (---------------) + I chikuhoku + 0x0036c387, // n0x0f25 c0x0000 (---------------) + I chikuma + 0x0022e185, // n0x0f26 c0x0000 (---------------) + I chino + 0x00265906, // n0x0f27 c0x0000 (---------------) + I fujimi + 0x003317c6, // n0x0f28 c0x0000 (---------------) + I hakuba + 0x0020a004, // n0x0f29 c0x0000 (---------------) + I hara + 0x0028c546, // n0x0f2a c0x0000 (---------------) + I hiraya + 0x0024ecc4, // n0x0f2b c0x0000 (---------------) + I iida + 0x00256486, // n0x0f2c c0x0000 (---------------) + I iijima + 0x0034d2c6, // n0x0f2d c0x0000 (---------------) + I iiyama + 0x0020ec86, // n0x0f2e c0x0000 (---------------) + I iizuna + 0x00390e45, // n0x0f2f c0x0000 (---------------) + I ikeda + 0x00231687, // n0x0f30 c0x0000 (---------------) + I ikusaka + 0x00200243, // n0x0f31 c0x0000 (---------------) + I ina + 0x0031e389, // n0x0f32 c0x0000 (---------------) + I karuizawa + 0x00315908, // n0x0f33 c0x0000 (---------------) + I kawakami + 0x0022a884, // n0x0f34 c0x0000 (---------------) + I kiso + 0x0026d4cd, // n0x0f35 c0x0000 (---------------) + I kisofukushima + 0x002827c8, // n0x0f36 c0x0000 (---------------) + I kitaaiki + 0x002fa008, // n0x0f37 c0x0000 (---------------) + I komagane + 0x002ae586, // n0x0f38 c0x0000 (---------------) + I komoro + 0x0026ea09, // n0x0f39 c0x0000 (---------------) + I matsukawa + 0x002c4889, // n0x0f3a c0x0000 (---------------) + I matsumoto + 0x0024e105, // n0x0f3b c0x0000 (---------------) + I miasa + 0x002883ca, // n0x0f3c c0x0000 (---------------) + I minamiaiki + 0x0025b8ca, // n0x0f3d c0x0000 (---------------) + I minamimaki + 0x002b064c, // n0x0f3e c0x0000 (---------------) + I minamiminowa + 0x002b07c6, // n0x0f3f c0x0000 (---------------) + I minowa + 0x00266386, // n0x0f40 c0x0000 (---------------) + I miyada + 0x002af8c6, // n0x0f41 c0x0000 (---------------) + I miyota + 0x00258b89, // n0x0f42 c0x0000 (---------------) + I mochizuki + 0x0034e4c6, // n0x0f43 c0x0000 (---------------) + I nagano + 0x0028ba86, // n0x0f44 c0x0000 (---------------) + I nagawa + 0x00240906, // n0x0f45 c0x0000 (---------------) + I nagiso + 0x002934c8, // n0x0f46 c0x0000 (---------------) + I nakagawa + 0x0023fcc6, // n0x0f47 c0x0000 (---------------) + I nakano + 0x002fe9cb, // n0x0f48 c0x0000 (---------------) + I nozawaonsen + 0x0027ef05, // n0x0f49 c0x0000 (---------------) + I obuse + 0x00204045, // n0x0f4a c0x0000 (---------------) + I ogawa + 0x00266605, // n0x0f4b c0x0000 (---------------) + I okaya + 0x00353c06, // n0x0f4c c0x0000 (---------------) + I omachi + 0x00207d03, // n0x0f4d c0x0000 (---------------) + I omi + 0x0022f306, // n0x0f4e c0x0000 (---------------) + I ookuwa + 0x0027ba07, // n0x0f4f c0x0000 (---------------) + I ooshika + 0x002a8605, // n0x0f50 c0x0000 (---------------) + I otaki + 0x003389c5, // n0x0f51 c0x0000 (---------------) + I otari + 0x0036ba45, // n0x0f52 c0x0000 (---------------) + I sakae + 0x00201a06, // n0x0f53 c0x0000 (---------------) + I sakaki + 0x0024e1c4, // n0x0f54 c0x0000 (---------------) + I saku + 0x0035fb46, // n0x0f55 c0x0000 (---------------) + I sakuho + 0x00325709, // n0x0f56 c0x0000 (---------------) + I shimosuwa + 0x00353a8c, // n0x0f57 c0x0000 (---------------) + I shinanomachi + 0x00294ac8, // n0x0f58 c0x0000 (---------------) + I shiojiri + 0x002e5544, // n0x0f59 c0x0000 (---------------) + I suwa + 0x002d2806, // n0x0f5a c0x0000 (---------------) + I suzaka + 0x002fe846, // n0x0f5b c0x0000 (---------------) + I takagi + 0x0022ab88, // n0x0f5c c0x0000 (---------------) + I takamori + 0x0020f408, // n0x0f5d c0x0000 (---------------) + I takayama + 0x00353989, // n0x0f5e c0x0000 (---------------) + I tateshina + 0x00203987, // n0x0f5f c0x0000 (---------------) + I tatsuno + 0x002e5d09, // n0x0f60 c0x0000 (---------------) + I togakushi + 0x0025e506, // n0x0f61 c0x0000 (---------------) + I togura + 0x002204c4, // n0x0f62 c0x0000 (---------------) + I tomi + 0x00207204, // n0x0f63 c0x0000 (---------------) + I ueda + 0x0023e8c4, // n0x0f64 c0x0000 (---------------) + I wada + 0x0026dc08, // n0x0f65 c0x0000 (---------------) + I yamagata + 0x0020688a, // n0x0f66 c0x0000 (---------------) + I yamanouchi + 0x00339646, // n0x0f67 c0x0000 (---------------) + I yasaka + 0x0033e847, // n0x0f68 c0x0000 (---------------) + I yasuoka + 0x002e9747, // n0x0f69 c0x0000 (---------------) + I chijiwa + 0x0021edc5, // n0x0f6a c0x0000 (---------------) + I futsu + 0x00286c04, // n0x0f6b c0x0000 (---------------) + I goto + 0x0027a8c6, // n0x0f6c c0x0000 (---------------) + I hasami + 0x002e8206, // n0x0f6d c0x0000 (---------------) + I hirado + 0x00206743, // n0x0f6e c0x0000 (---------------) + I iki + 0x00315747, // n0x0f6f c0x0000 (---------------) + I isahaya + 0x003108c8, // n0x0f70 c0x0000 (---------------) + I kawatana + 0x0024e24a, // n0x0f71 c0x0000 (---------------) + I kuchinotsu + 0x002bcc48, // n0x0f72 c0x0000 (---------------) + I matsuura + 0x0036c108, // n0x0f73 c0x0000 (---------------) + I nagasaki + 0x00395885, // n0x0f74 c0x0000 (---------------) + I obama + 0x0023a1c5, // n0x0f75 c0x0000 (---------------) + I omura + 0x00366645, // n0x0f76 c0x0000 (---------------) + I oseto + 0x0021f186, // n0x0f77 c0x0000 (---------------) + I saikai + 0x0029be46, // n0x0f78 c0x0000 (---------------) + I sasebo + 0x002e5845, // n0x0f79 c0x0000 (---------------) + I seihi + 0x0038bf49, // n0x0f7a c0x0000 (---------------) + I shimabara + 0x00286a0c, // n0x0f7b c0x0000 (---------------) + I shinkamigoto + 0x00226a07, // n0x0f7c c0x0000 (---------------) + I togitsu + 0x00283308, // n0x0f7d c0x0000 (---------------) + I tsushima + 0x00279c05, // n0x0f7e c0x0000 (---------------) + I unzen + 0x006735c4, // n0x0f7f c0x0001 (---------------) ! I city + 0x00346e44, // n0x0f80 c0x0000 (---------------) + I ando + 0x0026e184, // n0x0f81 c0x0000 (---------------) + I gose + 0x0022e2c6, // n0x0f82 c0x0000 (---------------) + I heguri + 0x0028a74e, // n0x0f83 c0x0000 (---------------) + I higashiyoshino + 0x00344187, // n0x0f84 c0x0000 (---------------) + I ikaruga + 0x00204245, // n0x0f85 c0x0000 (---------------) + I ikoma + 0x0022adcc, // n0x0f86 c0x0000 (---------------) + I kamikitayama + 0x00292687, // n0x0f87 c0x0000 (---------------) + I kanmaki + 0x00341647, // n0x0f88 c0x0000 (---------------) + I kashiba + 0x00344809, // n0x0f89 c0x0000 (---------------) + I kashihara + 0x002133c9, // n0x0f8a c0x0000 (---------------) + I katsuragi + 0x00273345, // n0x0f8b c0x0000 (---------------) + I kawai + 0x00315908, // n0x0f8c c0x0000 (---------------) + I kawakami + 0x0022ed09, // n0x0f8d c0x0000 (---------------) + I kawanishi + 0x002c9885, // n0x0f8e c0x0000 (---------------) + I koryo + 0x002a8548, // n0x0f8f c0x0000 (---------------) + I kurotaki + 0x002b5f46, // n0x0f90 c0x0000 (---------------) + I mitsue + 0x002a5986, // n0x0f91 c0x0000 (---------------) + I miyake + 0x002da0c4, // n0x0f92 c0x0000 (---------------) + I nara + 0x00257448, // n0x0f93 c0x0000 (---------------) + I nosegawa + 0x002432c3, // n0x0f94 c0x0000 (---------------) + I oji + 0x00392684, // n0x0f95 c0x0000 (---------------) + I ouda + 0x002e35c5, // n0x0f96 c0x0000 (---------------) + I oyodo + 0x002f4d47, // n0x0f97 c0x0000 (---------------) + I sakurai + 0x00202285, // n0x0f98 c0x0000 (---------------) + I sango + 0x0038f649, // n0x0f99 c0x0000 (---------------) + I shimoichi + 0x00267dcd, // n0x0f9a c0x0000 (---------------) + I shimokitayama + 0x00285306, // n0x0f9b c0x0000 (---------------) + I shinjo + 0x00220944, // n0x0f9c c0x0000 (---------------) + I soni + 0x00335dc8, // n0x0f9d c0x0000 (---------------) + I takatori + 0x0025ca0a, // n0x0f9e c0x0000 (---------------) + I tawaramoto + 0x00203207, // n0x0f9f c0x0000 (---------------) + I tenkawa + 0x0027be45, // n0x0fa0 c0x0000 (---------------) + I tenri + 0x002070c3, // n0x0fa1 c0x0000 (---------------) + I uda + 0x00289d8e, // n0x0fa2 c0x0000 (---------------) + I yamatokoriyama + 0x0022afcc, // n0x0fa3 c0x0000 (---------------) + I yamatotakada + 0x0027b147, // n0x0fa4 c0x0000 (---------------) + I yamazoe + 0x0028a907, // n0x0fa5 c0x0000 (---------------) + I yoshino + 0x002015c3, // n0x0fa6 c0x0000 (---------------) + I aga + 0x0034e505, // n0x0fa7 c0x0000 (---------------) + I agano + 0x0026e185, // n0x0fa8 c0x0000 (---------------) + I gosen + 0x00283f48, // n0x0fa9 c0x0000 (---------------) + I itoigawa + 0x00281409, // n0x0faa c0x0000 (---------------) + I izumozaki + 0x0026a386, // n0x0fab c0x0000 (---------------) + I joetsu + 0x0022ac04, // n0x0fac c0x0000 (---------------) + I kamo + 0x00316046, // n0x0fad c0x0000 (---------------) + I kariwa + 0x0038218b, // n0x0fae c0x0000 (---------------) + I kashiwazaki + 0x002c460c, // n0x0faf c0x0000 (---------------) + I minamiuonuma + 0x0026c3c7, // n0x0fb0 c0x0000 (---------------) + I mitsuke + 0x002ba105, // n0x0fb1 c0x0000 (---------------) + I muika + 0x003743c8, // n0x0fb2 c0x0000 (---------------) + I murakami + 0x0032b585, // n0x0fb3 c0x0000 (---------------) + I myoko + 0x0035ec07, // n0x0fb4 c0x0000 (---------------) + I nagaoka + 0x0025f907, // n0x0fb5 c0x0000 (---------------) + I niigata + 0x002432c5, // n0x0fb6 c0x0000 (---------------) + I ojiya + 0x00207d03, // n0x0fb7 c0x0000 (---------------) + I omi + 0x002109c4, // n0x0fb8 c0x0000 (---------------) + I sado + 0x00203845, // n0x0fb9 c0x0000 (---------------) + I sanjo + 0x002f3b45, // n0x0fba c0x0000 (---------------) + I seiro + 0x002f3b46, // n0x0fbb c0x0000 (---------------) + I seirou + 0x00274588, // n0x0fbc c0x0000 (---------------) + I sekikawa + 0x003416c7, // n0x0fbd c0x0000 (---------------) + I shibata + 0x0033ed46, // n0x0fbe c0x0000 (---------------) + I tagami + 0x0031abc6, // n0x0fbf c0x0000 (---------------) + I tainai + 0x0030fc06, // n0x0fc0 c0x0000 (---------------) + I tochio + 0x002e1589, // n0x0fc1 c0x0000 (---------------) + I tokamachi + 0x0031adc7, // n0x0fc2 c0x0000 (---------------) + I tsubame + 0x00207306, // n0x0fc3 c0x0000 (---------------) + I tsunan + 0x002c4786, // n0x0fc4 c0x0000 (---------------) + I uonuma + 0x00243386, // n0x0fc5 c0x0000 (---------------) + I yahiko + 0x00298cc5, // n0x0fc6 c0x0000 (---------------) + I yoita + 0x00216b86, // n0x0fc7 c0x0000 (---------------) + I yuzawa + 0x0034c845, // n0x0fc8 c0x0000 (---------------) + I beppu + 0x002a55c8, // n0x0fc9 c0x0000 (---------------) + I bungoono + 0x0024facb, // n0x0fca c0x0000 (---------------) + I bungotakada + 0x0027a6c6, // n0x0fcb c0x0000 (---------------) + I hasama + 0x002e9784, // n0x0fcc c0x0000 (---------------) + I hiji + 0x00336449, // n0x0fcd c0x0000 (---------------) + I himeshima + 0x0028d544, // n0x0fce c0x0000 (---------------) + I hita + 0x002b5ec8, // n0x0fcf c0x0000 (---------------) + I kamitsue + 0x0027b5c7, // n0x0fd0 c0x0000 (---------------) + I kokonoe + 0x0026ff44, // n0x0fd1 c0x0000 (---------------) + I kuju + 0x002a0f48, // n0x0fd2 c0x0000 (---------------) + I kunisaki + 0x002a9c04, // n0x0fd3 c0x0000 (---------------) + I kusu + 0x00298d04, // n0x0fd4 c0x0000 (---------------) + I oita + 0x002a7585, // n0x0fd5 c0x0000 (---------------) + I saiki + 0x00335cc6, // n0x0fd6 c0x0000 (---------------) + I taketa + 0x00272207, // n0x0fd7 c0x0000 (---------------) + I tsukumi + 0x00222503, // n0x0fd8 c0x0000 (---------------) + I usa + 0x00288f85, // n0x0fd9 c0x0000 (---------------) + I usuki + 0x002ab604, // n0x0fda c0x0000 (---------------) + I yufu + 0x0023e446, // n0x0fdb c0x0000 (---------------) + I akaiwa + 0x0024e188, // n0x0fdc c0x0000 (---------------) + I asakuchi + 0x00310605, // n0x0fdd c0x0000 (---------------) + I bizen + 0x0027e449, // n0x0fde c0x0000 (---------------) + I hayashima + 0x002b9fc5, // n0x0fdf c0x0000 (---------------) + I ibara + 0x002ab008, // n0x0fe0 c0x0000 (---------------) + I kagamino + 0x00340b47, // n0x0fe1 c0x0000 (---------------) + I kasaoka + 0x0035c188, // n0x0fe2 c0x0000 (---------------) + I kibichuo + 0x002a0447, // n0x0fe3 c0x0000 (---------------) + I kumenan + 0x0025adc9, // n0x0fe4 c0x0000 (---------------) + I kurashiki + 0x002487c6, // n0x0fe5 c0x0000 (---------------) + I maniwa + 0x00303986, // n0x0fe6 c0x0000 (---------------) + I misaki + 0x00240904, // n0x0fe7 c0x0000 (---------------) + I nagi + 0x0027cd05, // n0x0fe8 c0x0000 (---------------) + I niimi + 0x002dac0c, // n0x0fe9 c0x0000 (---------------) + I nishiawakura + 0x00266607, // n0x0fea c0x0000 (---------------) + I okayama + 0x00266c47, // n0x0feb c0x0000 (---------------) + I satosho + 0x002e9608, // n0x0fec c0x0000 (---------------) + I setouchi + 0x00285306, // n0x0fed c0x0000 (---------------) + I shinjo + 0x00340f44, // n0x0fee c0x0000 (---------------) + I shoo + 0x0030ad04, // n0x0fef c0x0000 (---------------) + I soja + 0x0023c909, // n0x0ff0 c0x0000 (---------------) + I takahashi + 0x002af9c6, // n0x0ff1 c0x0000 (---------------) + I tamano + 0x002601c7, // n0x0ff2 c0x0000 (---------------) + I tsuyama + 0x00295d44, // n0x0ff3 c0x0000 (---------------) + I wake + 0x002b4e06, // n0x0ff4 c0x0000 (---------------) + I yakage + 0x00342185, // n0x0ff5 c0x0000 (---------------) + I aguni + 0x0028d847, // n0x0ff6 c0x0000 (---------------) + I ginowan + 0x002fe946, // n0x0ff7 c0x0000 (---------------) + I ginoza + 0x00239cc9, // n0x0ff8 c0x0000 (---------------) + I gushikami + 0x002b0487, // n0x0ff9 c0x0000 (---------------) + I haebaru + 0x002545c7, // n0x0ffa c0x0000 (---------------) + I higashi + 0x0028c086, // n0x0ffb c0x0000 (---------------) + I hirara + 0x00231145, // n0x0ffc c0x0000 (---------------) + I iheya + 0x0026cf88, // n0x0ffd c0x0000 (---------------) + I ishigaki + 0x00214e48, // n0x0ffe c0x0000 (---------------) + I ishikawa + 0x0022a5c6, // n0x0fff c0x0000 (---------------) + I itoman + 0x00310645, // n0x1000 c0x0000 (---------------) + I izena + 0x002f0446, // n0x1001 c0x0000 (---------------) + I kadena + 0x00201b03, // n0x1002 c0x0000 (---------------) + I kin + 0x00283dc9, // n0x1003 c0x0000 (---------------) + I kitadaito + 0x0029168e, // n0x1004 c0x0000 (---------------) + I kitanakagusuku + 0x0029f548, // n0x1005 c0x0000 (---------------) + I kumejima + 0x002928c8, // n0x1006 c0x0000 (---------------) + I kunigami + 0x0022a3cb, // n0x1007 c0x0000 (---------------) + I minamidaito + 0x0027e686, // n0x1008 c0x0000 (---------------) + I motobu + 0x002402c4, // n0x1009 c0x0000 (---------------) + I nago + 0x0026ecc4, // n0x100a c0x0000 (---------------) + I naha + 0x0029178a, // n0x100b c0x0000 (---------------) + I nakagusuku + 0x0020d7c7, // n0x100c c0x0000 (---------------) + I nakijin + 0x002073c5, // n0x100d c0x0000 (---------------) + I nanjo + 0x0020e649, // n0x100e c0x0000 (---------------) + I nishihara + 0x002a6885, // n0x100f c0x0000 (---------------) + I ogimi + 0x003954c7, // n0x1010 c0x0000 (---------------) + I okinawa + 0x002988c4, // n0x1011 c0x0000 (---------------) + I onna + 0x00265dc7, // n0x1012 c0x0000 (---------------) + I shimoji + 0x003162c8, // n0x1013 c0x0000 (---------------) + I taketomi + 0x002a43c6, // n0x1014 c0x0000 (---------------) + I tarama + 0x002065c9, // n0x1015 c0x0000 (---------------) + I tokashiki + 0x002a2b4a, // n0x1016 c0x0000 (---------------) + I tomigusuku + 0x0020d746, // n0x1017 c0x0000 (---------------) + I tonaki + 0x00282306, // n0x1018 c0x0000 (---------------) + I urasoe + 0x00296205, // n0x1019 c0x0000 (---------------) + I uruma + 0x00361285, // n0x101a c0x0000 (---------------) + I yaese + 0x00305787, // n0x101b c0x0000 (---------------) + I yomitan + 0x00309c08, // n0x101c c0x0000 (---------------) + I yonabaru + 0x003420c8, // n0x101d c0x0000 (---------------) + I yonaguni + 0x00236b86, // n0x101e c0x0000 (---------------) + I zamami + 0x0031d9c5, // n0x101f c0x0000 (---------------) + I abeno + 0x002403ce, // n0x1020 c0x0000 (---------------) + I chihayaakasaka + 0x002be904, // n0x1021 c0x0000 (---------------) + I chuo + 0x0022a545, // n0x1022 c0x0000 (---------------) + I daito + 0x00265289, // n0x1023 c0x0000 (---------------) + I fujiidera + 0x00273148, // n0x1024 c0x0000 (---------------) + I habikino + 0x00278046, // n0x1025 c0x0000 (---------------) + I hannan + 0x0028630c, // n0x1026 c0x0000 (---------------) + I higashiosaka + 0x00287ed0, // n0x1027 c0x0000 (---------------) + I higashisumiyoshi + 0x0028a38f, // n0x1028 c0x0000 (---------------) + I higashiyodogawa + 0x0028b7c8, // n0x1029 c0x0000 (---------------) + I hirakata + 0x00316ac7, // n0x102a c0x0000 (---------------) + I ibaraki + 0x00390e45, // n0x102b c0x0000 (---------------) + I ikeda + 0x00209785, // n0x102c c0x0000 (---------------) + I izumi + 0x00272089, // n0x102d c0x0000 (---------------) + I izumiotsu + 0x00209789, // n0x102e c0x0000 (---------------) + I izumisano + 0x00216406, // n0x102f c0x0000 (---------------) + I kadoma + 0x002df247, // n0x1030 c0x0000 (---------------) + I kaizuka + 0x0037c505, // n0x1031 c0x0000 (---------------) + I kanan + 0x00375c89, // n0x1032 c0x0000 (---------------) + I kashiwara + 0x003175c6, // n0x1033 c0x0000 (---------------) + I katano + 0x0034e30d, // n0x1034 c0x0000 (---------------) + I kawachinagano + 0x0026d109, // n0x1035 c0x0000 (---------------) + I kishiwada + 0x00206e44, // n0x1036 c0x0000 (---------------) + I kita + 0x0029f2c8, // n0x1037 c0x0000 (---------------) + I kumatori + 0x0034e0c9, // n0x1038 c0x0000 (---------------) + I matsubara + 0x00339806, // n0x1039 c0x0000 (---------------) + I minato + 0x00265a05, // n0x103a c0x0000 (---------------) + I minoh + 0x00303986, // n0x103b c0x0000 (---------------) + I misaki + 0x00387b49, // n0x103c c0x0000 (---------------) + I moriguchi + 0x003896c8, // n0x103d c0x0000 (---------------) + I neyagawa + 0x00208b85, // n0x103e c0x0000 (---------------) + I nishi + 0x00257444, // n0x103f c0x0000 (---------------) + I nose + 0x002864cb, // n0x1040 c0x0000 (---------------) + I osakasayama + 0x003396c5, // n0x1041 c0x0000 (---------------) + I sakai + 0x00286606, // n0x1042 c0x0000 (---------------) + I sayama + 0x0026f406, // n0x1043 c0x0000 (---------------) + I sennan + 0x00243d86, // n0x1044 c0x0000 (---------------) + I settsu + 0x003260cb, // n0x1045 c0x0000 (---------------) + I shijonawate + 0x0027e549, // n0x1046 c0x0000 (---------------) + I shimamoto + 0x002e5b85, // n0x1047 c0x0000 (---------------) + I suita + 0x0035bf47, // n0x1048 c0x0000 (---------------) + I tadaoka + 0x0025fa46, // n0x1049 c0x0000 (---------------) + I taishi + 0x002345c6, // n0x104a c0x0000 (---------------) + I tajiri + 0x0026df08, // n0x104b c0x0000 (---------------) + I takaishi + 0x00376d89, // n0x104c c0x0000 (---------------) + I takatsuki + 0x00246f8c, // n0x104d c0x0000 (---------------) + I tondabayashi + 0x00351d08, // n0x104e c0x0000 (---------------) + I toyonaka + 0x002260c6, // n0x104f c0x0000 (---------------) + I toyono + 0x00330543, // n0x1050 c0x0000 (---------------) + I yao + 0x0027ebc6, // n0x1051 c0x0000 (---------------) + I ariake + 0x0025ac85, // n0x1052 c0x0000 (---------------) + I arita + 0x0026b1c8, // n0x1053 c0x0000 (---------------) + I fukudomi + 0x0021b746, // n0x1054 c0x0000 (---------------) + I genkai + 0x0028da88, // n0x1055 c0x0000 (---------------) + I hamatama + 0x00228245, // n0x1056 c0x0000 (---------------) + I hizen + 0x00352985, // n0x1057 c0x0000 (---------------) + I imari + 0x002877c8, // n0x1058 c0x0000 (---------------) + I kamimine + 0x002d2c87, // n0x1059 c0x0000 (---------------) + I kanzaki + 0x0033e987, // n0x105a c0x0000 (---------------) + I karatsu + 0x00341887, // n0x105b c0x0000 (---------------) + I kashima + 0x0022aa08, // n0x105c c0x0000 (---------------) + I kitagata + 0x00316c08, // n0x105d c0x0000 (---------------) + I kitahata + 0x00253286, // n0x105e c0x0000 (---------------) + I kiyama + 0x002f1947, // n0x105f c0x0000 (---------------) + I kouhoku + 0x002896c7, // n0x1060 c0x0000 (---------------) + I kyuragi + 0x00325dca, // n0x1061 c0x0000 (---------------) + I nishiarita + 0x00212803, // n0x1062 c0x0000 (---------------) + I ogi + 0x00353c06, // n0x1063 c0x0000 (---------------) + I omachi + 0x002069c5, // n0x1064 c0x0000 (---------------) + I ouchi + 0x00275104, // n0x1065 c0x0000 (---------------) + I saga + 0x00268409, // n0x1066 c0x0000 (---------------) + I shiroishi + 0x0025ad44, // n0x1067 c0x0000 (---------------) + I taku + 0x00253f44, // n0x1068 c0x0000 (---------------) + I tara + 0x002891c4, // n0x1069 c0x0000 (---------------) + I tosu + 0x0028a90b, // n0x106a c0x0000 (---------------) + I yoshinogari + 0x0034e247, // n0x106b c0x0000 (---------------) + I arakawa + 0x00240605, // n0x106c c0x0000 (---------------) + I asaka + 0x00280ac8, // n0x106d c0x0000 (---------------) + I chichibu + 0x00265906, // n0x106e c0x0000 (---------------) + I fujimi + 0x00265908, // n0x106f c0x0000 (---------------) + I fujimino + 0x00269d86, // n0x1070 c0x0000 (---------------) + I fukaya + 0x00278e45, // n0x1071 c0x0000 (---------------) + I hanno + 0x002794c5, // n0x1072 c0x0000 (---------------) + I hanyu + 0x0027b306, // n0x1073 c0x0000 (---------------) + I hasuda + 0x0027b788, // n0x1074 c0x0000 (---------------) + I hatogaya + 0x0027c2c8, // n0x1075 c0x0000 (---------------) + I hatoyama + 0x002685c6, // n0x1076 c0x0000 (---------------) + I hidaka + 0x0028090f, // n0x1077 c0x0000 (---------------) + I higashichichibu + 0x00283890, // n0x1078 c0x0000 (---------------) + I higashimatsuyama + 0x002028c5, // n0x1079 c0x0000 (---------------) + I honjo + 0x00200243, // n0x107a c0x0000 (---------------) + I ina + 0x002797c5, // n0x107b c0x0000 (---------------) + I iruma + 0x0030b3c8, // n0x107c c0x0000 (---------------) + I iwatsuki + 0x00209689, // n0x107d c0x0000 (---------------) + I kamiizumi + 0x00304f08, // n0x107e c0x0000 (---------------) + I kamikawa + 0x00340c88, // n0x107f c0x0000 (---------------) + I kamisato + 0x00391808, // n0x1080 c0x0000 (---------------) + I kasukabe + 0x002406c7, // n0x1081 c0x0000 (---------------) + I kawagoe + 0x002655c9, // n0x1082 c0x0000 (---------------) + I kawaguchi + 0x0028dc88, // n0x1083 c0x0000 (---------------) + I kawajima + 0x00369344, // n0x1084 c0x0000 (---------------) + I kazo + 0x00289048, // n0x1085 c0x0000 (---------------) + I kitamoto + 0x0028ec09, // n0x1086 c0x0000 (---------------) + I koshigaya + 0x00307ec7, // n0x1087 c0x0000 (---------------) + I kounosu + 0x00293e04, // n0x1088 c0x0000 (---------------) + I kuki + 0x0036c448, // n0x1089 c0x0000 (---------------) + I kumagaya + 0x0023144a, // n0x108a c0x0000 (---------------) + I matsubushi + 0x0031ef46, // n0x108b c0x0000 (---------------) + I minano + 0x0028b246, // n0x108c c0x0000 (---------------) + I misato + 0x0021ae49, // n0x108d c0x0000 (---------------) + I miyashiro + 0x00288107, // n0x108e c0x0000 (---------------) + I miyoshi + 0x002b62c8, // n0x108f c0x0000 (---------------) + I moroyama + 0x00201588, // n0x1090 c0x0000 (---------------) + I nagatoro + 0x00342d88, // n0x1091 c0x0000 (---------------) + I namegawa + 0x003721c5, // n0x1092 c0x0000 (---------------) + I niiza + 0x00362605, // n0x1093 c0x0000 (---------------) + I ogano + 0x00204045, // n0x1094 c0x0000 (---------------) + I ogawa + 0x0026e145, // n0x1095 c0x0000 (---------------) + I ogose + 0x00259347, // n0x1096 c0x0000 (---------------) + I okegawa + 0x00207d05, // n0x1097 c0x0000 (---------------) + I omiya + 0x002a8605, // n0x1098 c0x0000 (---------------) + I otaki + 0x00386ac6, // n0x1099 c0x0000 (---------------) + I ranzan + 0x00304e47, // n0x109a c0x0000 (---------------) + I ryokami + 0x002d0f07, // n0x109b c0x0000 (---------------) + I saitama + 0x00231746, // n0x109c c0x0000 (---------------) + I sakado + 0x002bb685, // n0x109d c0x0000 (---------------) + I satte + 0x00286606, // n0x109e c0x0000 (---------------) + I sayama + 0x002066c5, // n0x109f c0x0000 (---------------) + I shiki + 0x00298148, // n0x10a0 c0x0000 (---------------) + I shiraoka + 0x002cc0c4, // n0x10a1 c0x0000 (---------------) + I soka + 0x002adc86, // n0x10a2 c0x0000 (---------------) + I sugito + 0x00312e84, // n0x10a3 c0x0000 (---------------) + I toda + 0x002add88, // n0x10a4 c0x0000 (---------------) + I tokigawa + 0x00302c0a, // n0x10a5 c0x0000 (---------------) + I tokorozawa + 0x0027700c, // n0x10a6 c0x0000 (---------------) + I tsurugashima + 0x0020b805, // n0x10a7 c0x0000 (---------------) + I urawa + 0x00204106, // n0x10a8 c0x0000 (---------------) + I warabi + 0x00247146, // n0x10a9 c0x0000 (---------------) + I yashio + 0x002f5606, // n0x10aa c0x0000 (---------------) + I yokoze + 0x00226144, // n0x10ab c0x0000 (---------------) + I yono + 0x00375b45, // n0x10ac c0x0000 (---------------) + I yorii + 0x00269bc7, // n0x10ad c0x0000 (---------------) + I yoshida + 0x00288189, // n0x10ae c0x0000 (---------------) + I yoshikawa + 0x0028fac7, // n0x10af c0x0000 (---------------) + I yoshimi + 0x006735c4, // n0x10b0 c0x0001 (---------------) ! I city + 0x006735c4, // n0x10b1 c0x0001 (---------------) ! I city + 0x002f59c5, // n0x10b2 c0x0000 (---------------) + I aisho + 0x0023dd44, // n0x10b3 c0x0000 (---------------) + I gamo + 0x00285cca, // n0x10b4 c0x0000 (---------------) + I higashiomi + 0x00265786, // n0x10b5 c0x0000 (---------------) + I hikone + 0x00346344, // n0x10b6 c0x0000 (---------------) + I koka + 0x00204cc5, // n0x10b7 c0x0000 (---------------) + I konan + 0x002d7bc5, // n0x10b8 c0x0000 (---------------) + I kosei + 0x002e8104, // n0x10b9 c0x0000 (---------------) + I koto + 0x0026f047, // n0x10ba c0x0000 (---------------) + I kusatsu + 0x002b9f47, // n0x10bb c0x0000 (---------------) + I maibara + 0x002b4f88, // n0x10bc c0x0000 (---------------) + I moriyama + 0x0036df48, // n0x10bd c0x0000 (---------------) + I nagahama + 0x00208b89, // n0x10be c0x0000 (---------------) + I nishiazai + 0x003176c8, // n0x10bf c0x0000 (---------------) + I notogawa + 0x00285e8b, // n0x10c0 c0x0000 (---------------) + I omihachiman + 0x0022a784, // n0x10c1 c0x0000 (---------------) + I otsu + 0x0025e445, // n0x10c2 c0x0000 (---------------) + I ritto + 0x00258dc5, // n0x10c3 c0x0000 (---------------) + I ryuoh + 0x00341809, // n0x10c4 c0x0000 (---------------) + I takashima + 0x00376d89, // n0x10c5 c0x0000 (---------------) + I takatsuki + 0x00336348, // n0x10c6 c0x0000 (---------------) + I torahime + 0x00233d48, // n0x10c7 c0x0000 (---------------) + I toyosato + 0x00207004, // n0x10c8 c0x0000 (---------------) + I yasu + 0x002fe885, // n0x10c9 c0x0000 (---------------) + I akagi + 0x002068c3, // n0x10ca c0x0000 (---------------) + I ama + 0x0022a745, // n0x10cb c0x0000 (---------------) + I gotsu + 0x00292ac6, // n0x10cc c0x0000 (---------------) + I hamada + 0x0028124c, // n0x10cd c0x0000 (---------------) + I higashiizumo + 0x00214ec6, // n0x10ce c0x0000 (---------------) + I hikawa + 0x002fb046, // n0x10cf c0x0000 (---------------) + I hikimi + 0x002782c5, // n0x10d0 c0x0000 (---------------) + I izumo + 0x00201a88, // n0x10d1 c0x0000 (---------------) + I kakinoki + 0x002a2fc6, // n0x10d2 c0x0000 (---------------) + I masuda + 0x0038d9c6, // n0x10d3 c0x0000 (---------------) + I matsue + 0x0028b246, // n0x10d4 c0x0000 (---------------) + I misato + 0x0021f48c, // n0x10d5 c0x0000 (---------------) + I nishinoshima + 0x0024eac4, // n0x10d6 c0x0000 (---------------) + I ohda + 0x0030fd4a, // n0x10d7 c0x0000 (---------------) + I okinoshima + 0x00278208, // n0x10d8 c0x0000 (---------------) + I okuizumo + 0x00281087, // n0x10d9 c0x0000 (---------------) + I shimane + 0x00260c46, // n0x10da c0x0000 (---------------) + I tamayu + 0x002e5507, // n0x10db c0x0000 (---------------) + I tsuwano + 0x002ca4c5, // n0x10dc c0x0000 (---------------) + I unnan + 0x00311fc6, // n0x10dd c0x0000 (---------------) + I yakumo + 0x0033ba46, // n0x10de c0x0000 (---------------) + I yasugi + 0x00365987, // n0x10df c0x0000 (---------------) + I yatsuka + 0x00253f84, // n0x10e0 c0x0000 (---------------) + I arai + 0x0027d805, // n0x10e1 c0x0000 (---------------) + I atami + 0x00265284, // n0x10e2 c0x0000 (---------------) + I fuji + 0x00294e07, // n0x10e3 c0x0000 (---------------) + I fujieda + 0x002654c8, // n0x10e4 c0x0000 (---------------) + I fujikawa + 0x0026620a, // n0x10e5 c0x0000 (---------------) + I fujinomiya + 0x0026ce07, // n0x10e6 c0x0000 (---------------) + I fukuroi + 0x00271047, // n0x10e7 c0x0000 (---------------) + I gotemba + 0x00316a47, // n0x10e8 c0x0000 (---------------) + I haibara + 0x0032b6c9, // n0x10e9 c0x0000 (---------------) + I hamamatsu + 0x0028124a, // n0x10ea c0x0000 (---------------) + I higashiizu + 0x00220483, // n0x10eb c0x0000 (---------------) + I ito + 0x00253e85, // n0x10ec c0x0000 (---------------) + I iwata + 0x00209783, // n0x10ed c0x0000 (---------------) + I izu + 0x002f1c49, // n0x10ee c0x0000 (---------------) + I izunokuni + 0x002c7008, // n0x10ef c0x0000 (---------------) + I kakegawa + 0x002d17c7, // n0x10f0 c0x0000 (---------------) + I kannami + 0x00305009, // n0x10f1 c0x0000 (---------------) + I kawanehon + 0x00214f46, // n0x10f2 c0x0000 (---------------) + I kawazu + 0x0025fcc8, // n0x10f3 c0x0000 (---------------) + I kikugawa + 0x002d4745, // n0x10f4 c0x0000 (---------------) + I kosai + 0x00364b0a, // n0x10f5 c0x0000 (---------------) + I makinohara + 0x00355209, // n0x10f6 c0x0000 (---------------) + I matsuzaki + 0x00246c49, // n0x10f7 c0x0000 (---------------) + I minamiizu + 0x00392cc7, // n0x10f8 c0x0000 (---------------) + I mishima + 0x0028b509, // n0x10f9 c0x0000 (---------------) + I morimachi + 0x0020eb88, // n0x10fa c0x0000 (---------------) + I nishiizu + 0x002d2986, // n0x10fb c0x0000 (---------------) + I numazu + 0x002042c8, // n0x10fc c0x0000 (---------------) + I omaezaki + 0x0034e807, // n0x10fd c0x0000 (---------------) + I shimada + 0x002521c7, // n0x10fe c0x0000 (---------------) + I shimizu + 0x0027bc87, // n0x10ff c0x0000 (---------------) + I shimoda + 0x002b4b88, // n0x1100 c0x0000 (---------------) + I shizuoka + 0x002d2686, // n0x1101 c0x0000 (---------------) + I susono + 0x00231205, // n0x1102 c0x0000 (---------------) + I yaizu + 0x00269bc7, // n0x1103 c0x0000 (---------------) + I yoshida + 0x00281e08, // n0x1104 c0x0000 (---------------) + I ashikaga + 0x0030f544, // n0x1105 c0x0000 (---------------) + I bato + 0x002d6e04, // n0x1106 c0x0000 (---------------) + I haga + 0x00315b07, // n0x1107 c0x0000 (---------------) + I ichikai + 0x00255847, // n0x1108 c0x0000 (---------------) + I iwafune + 0x0022eb8a, // n0x1109 c0x0000 (---------------) + I kaminokawa + 0x002d2906, // n0x110a c0x0000 (---------------) + I kanuma + 0x0027afca, // n0x110b c0x0000 (---------------) + I karasuyama + 0x002a6b47, // n0x110c c0x0000 (---------------) + I kuroiso + 0x002be6c7, // n0x110d c0x0000 (---------------) + I mashiko + 0x0024fa44, // n0x110e c0x0000 (---------------) + I mibu + 0x002669c4, // n0x110f c0x0000 (---------------) + I moka + 0x0021ff46, // n0x1110 c0x0000 (---------------) + I motegi + 0x002efac4, // n0x1111 c0x0000 (---------------) + I nasu + 0x002efacc, // n0x1112 c0x0000 (---------------) + I nasushiobara + 0x0033e185, // n0x1113 c0x0000 (---------------) + I nikko + 0x0020f249, // n0x1114 c0x0000 (---------------) + I nishikata + 0x00267304, // n0x1115 c0x0000 (---------------) + I nogi + 0x002e81c5, // n0x1116 c0x0000 (---------------) + I ohira + 0x0025c988, // n0x1117 c0x0000 (---------------) + I ohtawara + 0x0027c385, // n0x1118 c0x0000 (---------------) + I oyama + 0x002f4d46, // n0x1119 c0x0000 (---------------) + I sakura + 0x002098c4, // n0x111a c0x0000 (---------------) + I sano + 0x0027924a, // n0x111b c0x0000 (---------------) + I shimotsuke + 0x002d80c6, // n0x111c c0x0000 (---------------) + I shioya + 0x00316d8a, // n0x111d c0x0000 (---------------) + I takanezawa + 0x0030f5c7, // n0x111e c0x0000 (---------------) + I tochigi + 0x0021d305, // n0x111f c0x0000 (---------------) + I tsuga + 0x00212d45, // n0x1120 c0x0000 (---------------) + I ujiie + 0x0021ee0a, // n0x1121 c0x0000 (---------------) + I utsunomiya + 0x0028c645, // n0x1122 c0x0000 (---------------) + I yaita + 0x00272046, // n0x1123 c0x0000 (---------------) + I aizumi + 0x00204e44, // n0x1124 c0x0000 (---------------) + I anan + 0x002a0706, // n0x1125 c0x0000 (---------------) + I ichiba + 0x00305845, // n0x1126 c0x0000 (---------------) + I itano + 0x0021b806, // n0x1127 c0x0000 (---------------) + I kainan + 0x002be44c, // n0x1128 c0x0000 (---------------) + I komatsushima + 0x002b644a, // n0x1129 c0x0000 (---------------) + I matsushige + 0x0025b9c4, // n0x112a c0x0000 (---------------) + I mima + 0x00213fc6, // n0x112b c0x0000 (---------------) + I minami + 0x00288107, // n0x112c c0x0000 (---------------) + I miyoshi + 0x002b9b44, // n0x112d c0x0000 (---------------) + I mugi + 0x002934c8, // n0x112e c0x0000 (---------------) + I nakagawa + 0x00376a06, // n0x112f c0x0000 (---------------) + I naruto + 0x00240249, // n0x1130 c0x0000 (---------------) + I sanagochi + 0x002c2189, // n0x1131 c0x0000 (---------------) + I shishikui + 0x0028e2c9, // n0x1132 c0x0000 (---------------) + I tokushima + 0x00359bc6, // n0x1133 c0x0000 (---------------) + I wajiki + 0x0034e906, // n0x1134 c0x0000 (---------------) + I adachi + 0x00204407, // n0x1135 c0x0000 (---------------) + I akiruno + 0x0038be88, // n0x1136 c0x0000 (---------------) + I akishima + 0x0034e709, // n0x1137 c0x0000 (---------------) + I aogashima + 0x0034e247, // n0x1138 c0x0000 (---------------) + I arakawa + 0x0027e786, // n0x1139 c0x0000 (---------------) + I bunkyo + 0x002e3947, // n0x113a c0x0000 (---------------) + I chiyoda + 0x0029a7c5, // n0x113b c0x0000 (---------------) + I chofu + 0x002be904, // n0x113c c0x0000 (---------------) + I chuo + 0x00203fc7, // n0x113d c0x0000 (---------------) + I edogawa + 0x002ab685, // n0x113e c0x0000 (---------------) + I fuchu + 0x00275045, // n0x113f c0x0000 (---------------) + I fussa + 0x00389ec7, // n0x1140 c0x0000 (---------------) + I hachijo + 0x00243188, // n0x1141 c0x0000 (---------------) + I hachioji + 0x00374346, // n0x1142 c0x0000 (---------------) + I hamura + 0x00282c8d, // n0x1143 c0x0000 (---------------) + I higashikurume + 0x0028414f, // n0x1144 c0x0000 (---------------) + I higashimurayama + 0x00289bcd, // n0x1145 c0x0000 (---------------) + I higashiyamato + 0x0021ad44, // n0x1146 c0x0000 (---------------) + I hino + 0x00227446, // n0x1147 c0x0000 (---------------) + I hinode + 0x002bcf88, // n0x1148 c0x0000 (---------------) + I hinohara + 0x002408c5, // n0x1149 c0x0000 (---------------) + I inagi + 0x00325f88, // n0x114a c0x0000 (---------------) + I itabashi + 0x002513ca, // n0x114b c0x0000 (---------------) + I katsushika + 0x00206e44, // n0x114c c0x0000 (---------------) + I kita + 0x002978c6, // n0x114d c0x0000 (---------------) + I kiyose + 0x002a3687, // n0x114e c0x0000 (---------------) + I kodaira + 0x00232c87, // n0x114f c0x0000 (---------------) + I koganei + 0x00374809, // n0x1150 c0x0000 (---------------) + I kokubunji + 0x00204285, // n0x1151 c0x0000 (---------------) + I komae + 0x002e8104, // n0x1152 c0x0000 (---------------) + I koto + 0x0031850a, // n0x1153 c0x0000 (---------------) + I kouzushima + 0x002a2089, // n0x1154 c0x0000 (---------------) + I kunitachi + 0x0028b607, // n0x1155 c0x0000 (---------------) + I machida + 0x002b1286, // n0x1156 c0x0000 (---------------) + I meguro + 0x00339806, // n0x1157 c0x0000 (---------------) + I minato + 0x002fe7c6, // n0x1158 c0x0000 (---------------) + I mitaka + 0x0034b046, // n0x1159 c0x0000 (---------------) + I mizuho + 0x002bc90f, // n0x115a c0x0000 (---------------) + I musashimurayama + 0x002bce49, // n0x115b c0x0000 (---------------) + I musashino + 0x0023fcc6, // n0x115c c0x0000 (---------------) + I nakano + 0x003528c6, // n0x115d c0x0000 (---------------) + I nerima + 0x00301849, // n0x115e c0x0000 (---------------) + I ogasawara + 0x002f1a47, // n0x115f c0x0000 (---------------) + I okutama + 0x0020b3c3, // n0x1160 c0x0000 (---------------) + I ome + 0x0021f606, // n0x1161 c0x0000 (---------------) + I oshima + 0x00203943, // n0x1162 c0x0000 (---------------) + I ota + 0x002c3488, // n0x1163 c0x0000 (---------------) + I setagaya + 0x002e3787, // n0x1164 c0x0000 (---------------) + I shibuya + 0x0028b9c9, // n0x1165 c0x0000 (---------------) + I shinagawa + 0x00285788, // n0x1166 c0x0000 (---------------) + I shinjuku + 0x0033eac8, // n0x1167 c0x0000 (---------------) + I suginami + 0x00289246, // n0x1168 c0x0000 (---------------) + I sumida + 0x0032da89, // n0x1169 c0x0000 (---------------) + I tachikawa + 0x002e5c45, // n0x116a c0x0000 (---------------) + I taito + 0x00260c44, // n0x116b c0x0000 (---------------) + I tama + 0x002d0a47, // n0x116c c0x0000 (---------------) + I toshima + 0x00258c05, // n0x116d c0x0000 (---------------) + I chizu + 0x0021ad44, // n0x116e c0x0000 (---------------) + I hino + 0x00274b88, // n0x116f c0x0000 (---------------) + I kawahara + 0x0020da84, // n0x1170 c0x0000 (---------------) + I koge + 0x002eaf87, // n0x1171 c0x0000 (---------------) + I kotoura + 0x003337c6, // n0x1172 c0x0000 (---------------) + I misasa + 0x002c6705, // n0x1173 c0x0000 (---------------) + I nanbu + 0x0027fa88, // n0x1174 c0x0000 (---------------) + I nichinan + 0x003396cb, // n0x1175 c0x0000 (---------------) + I sakaiminato + 0x002e2787, // n0x1176 c0x0000 (---------------) + I tottori + 0x0021f086, // n0x1177 c0x0000 (---------------) + I wakasa + 0x002aeec4, // n0x1178 c0x0000 (---------------) + I yazu + 0x0032b186, // n0x1179 c0x0000 (---------------) + I yonago + 0x002ad5c5, // n0x117a c0x0000 (---------------) + I asahi + 0x002ab685, // n0x117b c0x0000 (---------------) + I fuchu + 0x0026c2c9, // n0x117c c0x0000 (---------------) + I fukumitsu + 0x0026ec49, // n0x117d c0x0000 (---------------) + I funahashi + 0x00252204, // n0x117e c0x0000 (---------------) + I himi + 0x00252245, // n0x117f c0x0000 (---------------) + I imizu + 0x00214005, // n0x1180 c0x0000 (---------------) + I inami + 0x00364986, // n0x1181 c0x0000 (---------------) + I johana + 0x00315a08, // n0x1182 c0x0000 (---------------) + I kamiichi + 0x002a4c46, // n0x1183 c0x0000 (---------------) + I kurobe + 0x0031070b, // n0x1184 c0x0000 (---------------) + I nakaniikawa + 0x0029894a, // n0x1185 c0x0000 (---------------) + I namerikawa + 0x002e14c5, // n0x1186 c0x0000 (---------------) + I nanto + 0x00279546, // n0x1187 c0x0000 (---------------) + I nyuzen + 0x0031d945, // n0x1188 c0x0000 (---------------) + I oyabe + 0x00205705, // n0x1189 c0x0000 (---------------) + I taira + 0x00281747, // n0x118a c0x0000 (---------------) + I takaoka + 0x0038a708, // n0x118b c0x0000 (---------------) + I tateyama + 0x0027b804, // n0x118c c0x0000 (---------------) + I toga + 0x0024e006, // n0x118d c0x0000 (---------------) + I tonami + 0x0027c346, // n0x118e c0x0000 (---------------) + I toyama + 0x0020ed47, // n0x118f c0x0000 (---------------) + I unazuki + 0x002e3d84, // n0x1190 c0x0000 (---------------) + I uozu + 0x0026b046, // n0x1191 c0x0000 (---------------) + I yamada + 0x00250d45, // n0x1192 c0x0000 (---------------) + I arida + 0x00250d49, // n0x1193 c0x0000 (---------------) + I aridagawa + 0x0034eb04, // n0x1194 c0x0000 (---------------) + I gobo + 0x00286d89, // n0x1195 c0x0000 (---------------) + I hashimoto + 0x002685c6, // n0x1196 c0x0000 (---------------) + I hidaka + 0x002a9408, // n0x1197 c0x0000 (---------------) + I hirogawa + 0x00214005, // n0x1198 c0x0000 (---------------) + I inami + 0x002e9845, // n0x1199 c0x0000 (---------------) + I iwade + 0x0021b806, // n0x119a c0x0000 (---------------) + I kainan + 0x00246e89, // n0x119b c0x0000 (---------------) + I kamitonda + 0x002133c9, // n0x119c c0x0000 (---------------) + I katsuragi + 0x002fb0c6, // n0x119d c0x0000 (---------------) + I kimino + 0x00273248, // n0x119e c0x0000 (---------------) + I kinokawa + 0x0022aec8, // n0x119f c0x0000 (---------------) + I kitayama + 0x0031d904, // n0x11a0 c0x0000 (---------------) + I koya + 0x0034b544, // n0x11a1 c0x0000 (---------------) + I koza + 0x0034b548, // n0x11a2 c0x0000 (---------------) + I kozagawa + 0x003000c8, // n0x11a3 c0x0000 (---------------) + I kudoyama + 0x002e5e09, // n0x11a4 c0x0000 (---------------) + I kushimoto + 0x00292a46, // n0x11a5 c0x0000 (---------------) + I mihama + 0x0028b246, // n0x11a6 c0x0000 (---------------) + I misato + 0x002fd2cd, // n0x11a7 c0x0000 (---------------) + I nachikatsuura + 0x002e6a46, // n0x11a8 c0x0000 (---------------) + I shingu + 0x002a1409, // n0x11a9 c0x0000 (---------------) + I shirahama + 0x00353dc5, // n0x11aa c0x0000 (---------------) + I taiji + 0x0030f946, // n0x11ab c0x0000 (---------------) + I tanabe + 0x0032dc48, // n0x11ac c0x0000 (---------------) + I wakayama + 0x002f9ac5, // n0x11ad c0x0000 (---------------) + I yuasa + 0x00289704, // n0x11ae c0x0000 (---------------) + I yura + 0x002ad5c5, // n0x11af c0x0000 (---------------) + I asahi + 0x0026e788, // n0x11b0 c0x0000 (---------------) + I funagata + 0x00285a89, // n0x11b1 c0x0000 (---------------) + I higashine + 0x00265344, // n0x11b2 c0x0000 (---------------) + I iide + 0x00226886, // n0x11b3 c0x0000 (---------------) + I kahoku + 0x003467ca, // n0x11b4 c0x0000 (---------------) + I kaminoyama + 0x002150c8, // n0x11b5 c0x0000 (---------------) + I kaneyama + 0x0022ed09, // n0x11b6 c0x0000 (---------------) + I kawanishi + 0x002f8d4a, // n0x11b7 c0x0000 (---------------) + I mamurogawa + 0x00304f86, // n0x11b8 c0x0000 (---------------) + I mikawa + 0x00284308, // n0x11b9 c0x0000 (---------------) + I murayama + 0x0035e9c5, // n0x11ba c0x0000 (---------------) + I nagai + 0x00355088, // n0x11bb c0x0000 (---------------) + I nakayama + 0x002a0545, // n0x11bc c0x0000 (---------------) + I nanyo + 0x00214e09, // n0x11bd c0x0000 (---------------) + I nishikawa + 0x0034fd89, // n0x11be c0x0000 (---------------) + I obanazawa + 0x00210a82, // n0x11bf c0x0000 (---------------) + I oe + 0x0029efc5, // n0x11c0 c0x0000 (---------------) + I oguni + 0x00258e86, // n0x11c1 c0x0000 (---------------) + I ohkura + 0x00268507, // n0x11c2 c0x0000 (---------------) + I oishida + 0x00275105, // n0x11c3 c0x0000 (---------------) + I sagae + 0x002f9b86, // n0x11c4 c0x0000 (---------------) + I sakata + 0x00359408, // n0x11c5 c0x0000 (---------------) + I sakegawa + 0x00285306, // n0x11c6 c0x0000 (---------------) + I shinjo + 0x0029cd89, // n0x11c7 c0x0000 (---------------) + I shirataka + 0x00266d46, // n0x11c8 c0x0000 (---------------) + I shonai + 0x0026dd88, // n0x11c9 c0x0000 (---------------) + I takahata + 0x00294245, // n0x11ca c0x0000 (---------------) + I tendo + 0x00247ec6, // n0x11cb c0x0000 (---------------) + I tozawa + 0x003052c8, // n0x11cc c0x0000 (---------------) + I tsuruoka + 0x0026dc08, // n0x11cd c0x0000 (---------------) + I yamagata + 0x0034d348, // n0x11ce c0x0000 (---------------) + I yamanobe + 0x002212c8, // n0x11cf c0x0000 (---------------) + I yonezawa + 0x00216b84, // n0x11d0 c0x0000 (---------------) + I yuza + 0x0021bc43, // n0x11d1 c0x0000 (---------------) + I abu + 0x0029cfc4, // n0x11d2 c0x0000 (---------------) + I hagi + 0x002af4c6, // n0x11d3 c0x0000 (---------------) + I hikari + 0x0029a804, // n0x11d4 c0x0000 (---------------) + I hofu + 0x00292807, // n0x11d5 c0x0000 (---------------) + I iwakuni + 0x0038d8c9, // n0x11d6 c0x0000 (---------------) + I kudamatsu + 0x002ae205, // n0x11d7 c0x0000 (---------------) + I mitou + 0x00201586, // n0x11d8 c0x0000 (---------------) + I nagato + 0x0021f606, // n0x11d9 c0x0000 (---------------) + I oshima + 0x002743cb, // n0x11da c0x0000 (---------------) + I shimonoseki + 0x002e1406, // n0x11db c0x0000 (---------------) + I shunan + 0x003003c6, // n0x11dc c0x0000 (---------------) + I tabuse + 0x00286f48, // n0x11dd c0x0000 (---------------) + I tokuyama + 0x00338906, // n0x11de c0x0000 (---------------) + I toyota + 0x002b48c3, // n0x11df c0x0000 (---------------) + I ube + 0x00213ec3, // n0x11e0 c0x0000 (---------------) + I yuu + 0x002be904, // n0x11e1 c0x0000 (---------------) + I chuo + 0x002243c5, // n0x11e2 c0x0000 (---------------) + I doshi + 0x00289887, // n0x11e3 c0x0000 (---------------) + I fuefuki + 0x002654c8, // n0x11e4 c0x0000 (---------------) + I fujikawa + 0x002654cf, // n0x11e5 c0x0000 (---------------) + I fujikawaguchiko + 0x00269acb, // n0x11e6 c0x0000 (---------------) + I fujiyoshida + 0x00315808, // n0x11e7 c0x0000 (---------------) + I hayakawa + 0x00226906, // n0x11e8 c0x0000 (---------------) + I hokuto + 0x00312b8e, // n0x11e9 c0x0000 (---------------) + I ichikawamisato + 0x0021b803, // n0x11ea c0x0000 (---------------) + I kai + 0x0024cf84, // n0x11eb c0x0000 (---------------) + I kofu + 0x002e1385, // n0x11ec c0x0000 (---------------) + I koshu + 0x002e4a46, // n0x11ed c0x0000 (---------------) + I kosuge + 0x0027a9cb, // n0x11ee c0x0000 (---------------) + I minami-alps + 0x0027ee46, // n0x11ef c0x0000 (---------------) + I minobu + 0x00204889, // n0x11f0 c0x0000 (---------------) + I nakamichi + 0x002c6705, // n0x11f1 c0x0000 (---------------) + I nanbu + 0x00372f88, // n0x11f2 c0x0000 (---------------) + I narusawa + 0x00208608, // n0x11f3 c0x0000 (---------------) + I nirasaki + 0x0021328c, // n0x11f4 c0x0000 (---------------) + I nishikatsura + 0x0028a946, // n0x11f5 c0x0000 (---------------) + I oshino + 0x0022a786, // n0x11f6 c0x0000 (---------------) + I otsuki + 0x002b3885, // n0x11f7 c0x0000 (---------------) + I showa + 0x00275f88, // n0x11f8 c0x0000 (---------------) + I tabayama + 0x0026a445, // n0x11f9 c0x0000 (---------------) + I tsuru + 0x0038dac8, // n0x11fa c0x0000 (---------------) + I uenohara + 0x0028a00a, // n0x11fb c0x0000 (---------------) + I yamanakako + 0x0029b889, // n0x11fc c0x0000 (---------------) + I yamanashi + 0x006735c4, // n0x11fd c0x0001 (---------------) ! I city + 0x2d600742, // n0x11fe c0x00b5 (n0x11ff-n0x1200) o I co + 0x000e4188, // n0x11ff c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x1200 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1201 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1202 c0x0000 (---------------) + I gov + 0x0023fa03, // n0x1203 c0x0000 (---------------) + I mil + 0x002170c3, // n0x1204 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1205 c0x0000 (---------------) + I org + 0x00310603, // n0x1206 c0x0000 (---------------) + I biz + 0x00222ac3, // n0x1207 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1208 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1209 c0x0000 (---------------) + I gov + 0x00200304, // n0x120a c0x0000 (---------------) + I info + 0x002170c3, // n0x120b c0x0000 (---------------) + I net + 0x0021dcc3, // n0x120c c0x0000 (---------------) + I org + 0x002362c3, // n0x120d c0x0000 (---------------) + I ass + 0x002729c4, // n0x120e c0x0000 (---------------) + I asso + 0x00222ac3, // n0x120f c0x0000 (---------------) + I com + 0x00228d44, // n0x1210 c0x0000 (---------------) + I coop + 0x002d75c3, // n0x1211 c0x0000 (---------------) + I edu + 0x003579c4, // n0x1212 c0x0000 (---------------) + I gouv + 0x0021e283, // n0x1213 c0x0000 (---------------) + I gov + 0x002753c7, // n0x1214 c0x0000 (---------------) + I medecin + 0x0023fa03, // n0x1215 c0x0000 (---------------) + I mil + 0x00207cc3, // n0x1216 c0x0000 (---------------) + I nom + 0x00265c08, // n0x1217 c0x0000 (---------------) + I notaires + 0x0021dcc3, // n0x1218 c0x0000 (---------------) + I org + 0x002e078b, // n0x1219 c0x0000 (---------------) + I pharmaciens + 0x002ca783, // n0x121a c0x0000 (---------------) + I prd + 0x0029abc6, // n0x121b c0x0000 (---------------) + I presse + 0x00208902, // n0x121c c0x0000 (---------------) + I tm + 0x0036b18b, // n0x121d c0x0000 (---------------) + I veterinaire + 0x002d75c3, // n0x121e c0x0000 (---------------) + I edu + 0x0021e283, // n0x121f c0x0000 (---------------) + I gov + 0x002170c3, // n0x1220 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1221 c0x0000 (---------------) + I org + 0x00222ac3, // n0x1222 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1223 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1224 c0x0000 (---------------) + I gov + 0x0021dcc3, // n0x1225 c0x0000 (---------------) + I org + 0x002117c3, // n0x1226 c0x0000 (---------------) + I rep + 0x00210103, // n0x1227 c0x0000 (---------------) + I tra + 0x00201e82, // n0x1228 c0x0000 (---------------) + I ac + 0x000e4188, // n0x1229 c0x0000 (---------------) + blogspot + 0x002224c5, // n0x122a c0x0000 (---------------) + I busan + 0x002ea8c8, // n0x122b c0x0000 (---------------) + I chungbuk + 0x002ec1c8, // n0x122c c0x0000 (---------------) + I chungnam + 0x00200742, // n0x122d c0x0000 (---------------) + I co + 0x0032b905, // n0x122e c0x0000 (---------------) + I daegu + 0x0023e947, // n0x122f c0x0000 (---------------) + I daejeon + 0x002010c2, // n0x1230 c0x0000 (---------------) + I es + 0x00204ac7, // n0x1231 c0x0000 (---------------) + I gangwon + 0x00202342, // n0x1232 c0x0000 (---------------) + I go + 0x00370b47, // n0x1233 c0x0000 (---------------) + I gwangju + 0x00384c89, // n0x1234 c0x0000 (---------------) + I gyeongbuk + 0x0035e588, // n0x1235 c0x0000 (---------------) + I gyeonggi + 0x00342c09, // n0x1236 c0x0000 (---------------) + I gyeongnam + 0x00203802, // n0x1237 c0x0000 (---------------) + I hs + 0x002c2a87, // n0x1238 c0x0000 (---------------) + I incheon + 0x002fae44, // n0x1239 c0x0000 (---------------) + I jeju + 0x0023ea07, // n0x123a c0x0000 (---------------) + I jeonbuk + 0x00298847, // n0x123b c0x0000 (---------------) + I jeonnam + 0x002a3fc2, // n0x123c c0x0000 (---------------) + I kg + 0x0023fa03, // n0x123d c0x0000 (---------------) + I mil + 0x00209282, // n0x123e c0x0000 (---------------) + I ms + 0x00201082, // n0x123f c0x0000 (---------------) + I ne + 0x00200c42, // n0x1240 c0x0000 (---------------) + I or + 0x00214942, // n0x1241 c0x0000 (---------------) + I pe + 0x002030c2, // n0x1242 c0x0000 (---------------) + I re + 0x00200982, // n0x1243 c0x0000 (---------------) + I sc + 0x002baac5, // n0x1244 c0x0000 (---------------) + I seoul + 0x002376c5, // n0x1245 c0x0000 (---------------) + I ulsan + 0x00222ac3, // n0x1246 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1247 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1248 c0x0000 (---------------) + I gov + 0x002170c3, // n0x1249 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x124a c0x0000 (---------------) + I org + 0x00222ac3, // n0x124b c0x0000 (---------------) + I com + 0x002d75c3, // n0x124c c0x0000 (---------------) + I edu + 0x0021e283, // n0x124d c0x0000 (---------------) + I gov + 0x0023fa03, // n0x124e c0x0000 (---------------) + I mil + 0x002170c3, // n0x124f c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1250 c0x0000 (---------------) + I org + 0x00000741, // n0x1251 c0x0000 (---------------) + c + 0x00222ac3, // n0x1252 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1253 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1254 c0x0000 (---------------) + I gov + 0x00200304, // n0x1255 c0x0000 (---------------) + I info + 0x00238c03, // n0x1256 c0x0000 (---------------) + I int + 0x002170c3, // n0x1257 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1258 c0x0000 (---------------) + I org + 0x00214943, // n0x1259 c0x0000 (---------------) + I per + 0x00222ac3, // n0x125a c0x0000 (---------------) + I com + 0x002d75c3, // n0x125b c0x0000 (---------------) + I edu + 0x0021e283, // n0x125c c0x0000 (---------------) + I gov + 0x002170c3, // n0x125d c0x0000 (---------------) + I net + 0x0021dcc3, // n0x125e c0x0000 (---------------) + I org + 0x00200742, // n0x125f c0x0000 (---------------) + I co + 0x00222ac3, // n0x1260 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1261 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1262 c0x0000 (---------------) + I gov + 0x002170c3, // n0x1263 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1264 c0x0000 (---------------) + I org + 0x000e4188, // n0x1265 c0x0000 (---------------) + blogspot + 0x00201e82, // n0x1266 c0x0000 (---------------) + I ac + 0x002aa2c4, // n0x1267 c0x0000 (---------------) + I assn + 0x00222ac3, // n0x1268 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1269 c0x0000 (---------------) + I edu + 0x0021e283, // n0x126a c0x0000 (---------------) + I gov + 0x00221d03, // n0x126b c0x0000 (---------------) + I grp + 0x00294945, // n0x126c c0x0000 (---------------) + I hotel + 0x00238c03, // n0x126d c0x0000 (---------------) + I int + 0x003413c3, // n0x126e c0x0000 (---------------) + I ltd + 0x002170c3, // n0x126f c0x0000 (---------------) + I net + 0x00202303, // n0x1270 c0x0000 (---------------) + I ngo + 0x0021dcc3, // n0x1271 c0x0000 (---------------) + I org + 0x00206103, // n0x1272 c0x0000 (---------------) + I sch + 0x00240a03, // n0x1273 c0x0000 (---------------) + I soc + 0x00219fc3, // n0x1274 c0x0000 (---------------) + I web + 0x00222ac3, // n0x1275 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1276 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1277 c0x0000 (---------------) + I gov + 0x002170c3, // n0x1278 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1279 c0x0000 (---------------) + I org + 0x00200742, // n0x127a c0x0000 (---------------) + I co + 0x0021dcc3, // n0x127b c0x0000 (---------------) + I org + 0x000e4188, // n0x127c c0x0000 (---------------) + blogspot + 0x0021e283, // n0x127d c0x0000 (---------------) + I gov + 0x000e4188, // n0x127e c0x0000 (---------------) + blogspot + 0x002a00c3, // n0x127f c0x0000 (---------------) + I asn + 0x00222ac3, // n0x1280 c0x0000 (---------------) + I com + 0x00224984, // n0x1281 c0x0000 (---------------) + I conf + 0x002d75c3, // n0x1282 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1283 c0x0000 (---------------) + I gov + 0x00206202, // n0x1284 c0x0000 (---------------) + I id + 0x0023fa03, // n0x1285 c0x0000 (---------------) + I mil + 0x002170c3, // n0x1286 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1287 c0x0000 (---------------) + I org + 0x00222ac3, // n0x1288 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1289 c0x0000 (---------------) + I edu + 0x0021e283, // n0x128a c0x0000 (---------------) + I gov + 0x00206202, // n0x128b c0x0000 (---------------) + I id + 0x0020b403, // n0x128c c0x0000 (---------------) + I med + 0x002170c3, // n0x128d c0x0000 (---------------) + I net + 0x0021dcc3, // n0x128e c0x0000 (---------------) + I org + 0x002c65c3, // n0x128f c0x0000 (---------------) + I plc + 0x00206103, // n0x1290 c0x0000 (---------------) + I sch + 0x00201e82, // n0x1291 c0x0000 (---------------) + I ac + 0x00200742, // n0x1292 c0x0000 (---------------) + I co + 0x0021e283, // n0x1293 c0x0000 (---------------) + I gov + 0x002170c3, // n0x1294 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1295 c0x0000 (---------------) + I org + 0x0029abc5, // n0x1296 c0x0000 (---------------) + I press + 0x002729c4, // n0x1297 c0x0000 (---------------) + I asso + 0x00208902, // n0x1298 c0x0000 (---------------) + I tm + 0x000e4188, // n0x1299 c0x0000 (---------------) + blogspot + 0x00201e82, // n0x129a c0x0000 (---------------) + I ac + 0x00200742, // n0x129b c0x0000 (---------------) + I co + 0x002d75c3, // n0x129c c0x0000 (---------------) + I edu + 0x0021e283, // n0x129d c0x0000 (---------------) + I gov + 0x00226ac3, // n0x129e c0x0000 (---------------) + I its + 0x002170c3, // n0x129f c0x0000 (---------------) + I net + 0x0021dcc3, // n0x12a0 c0x0000 (---------------) + I org + 0x002cba44, // n0x12a1 c0x0000 (---------------) + I priv + 0x00222ac3, // n0x12a2 c0x0000 (---------------) + I com + 0x002d75c3, // n0x12a3 c0x0000 (---------------) + I edu + 0x0021e283, // n0x12a4 c0x0000 (---------------) + I gov + 0x0023fa03, // n0x12a5 c0x0000 (---------------) + I mil + 0x00207cc3, // n0x12a6 c0x0000 (---------------) + I nom + 0x0021dcc3, // n0x12a7 c0x0000 (---------------) + I org + 0x002ca783, // n0x12a8 c0x0000 (---------------) + I prd + 0x00208902, // n0x12a9 c0x0000 (---------------) + I tm + 0x000e4188, // n0x12aa c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x12ab c0x0000 (---------------) + I com + 0x002d75c3, // n0x12ac c0x0000 (---------------) + I edu + 0x0021e283, // n0x12ad c0x0000 (---------------) + I gov + 0x00200303, // n0x12ae c0x0000 (---------------) + I inf + 0x00298944, // n0x12af c0x0000 (---------------) + I name + 0x002170c3, // n0x12b0 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x12b1 c0x0000 (---------------) + I org + 0x00222ac3, // n0x12b2 c0x0000 (---------------) + I com + 0x002d75c3, // n0x12b3 c0x0000 (---------------) + I edu + 0x003579c4, // n0x12b4 c0x0000 (---------------) + I gouv + 0x0021e283, // n0x12b5 c0x0000 (---------------) + I gov + 0x002170c3, // n0x12b6 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x12b7 c0x0000 (---------------) + I org + 0x0029abc6, // n0x12b8 c0x0000 (---------------) + I presse + 0x002d75c3, // n0x12b9 c0x0000 (---------------) + I edu + 0x0021e283, // n0x12ba c0x0000 (---------------) + I gov + 0x00023403, // n0x12bb c0x0000 (---------------) + nyc + 0x0021dcc3, // n0x12bc c0x0000 (---------------) + I org + 0x00222ac3, // n0x12bd c0x0000 (---------------) + I com + 0x002d75c3, // n0x12be c0x0000 (---------------) + I edu + 0x0021e283, // n0x12bf c0x0000 (---------------) + I gov + 0x002170c3, // n0x12c0 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x12c1 c0x0000 (---------------) + I org + 0x000e4188, // n0x12c2 c0x0000 (---------------) + blogspot + 0x0021e283, // n0x12c3 c0x0000 (---------------) + I gov + 0x00222ac3, // n0x12c4 c0x0000 (---------------) + I com + 0x002d75c3, // n0x12c5 c0x0000 (---------------) + I edu + 0x0021e283, // n0x12c6 c0x0000 (---------------) + I gov + 0x002170c3, // n0x12c7 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x12c8 c0x0000 (---------------) + I org + 0x35622ac3, // n0x12c9 c0x00d5 (n0x12cd-n0x12ce) + I com + 0x002d75c3, // n0x12ca c0x0000 (---------------) + I edu + 0x002170c3, // n0x12cb c0x0000 (---------------) + I net + 0x0021dcc3, // n0x12cc c0x0000 (---------------) + I org + 0x000e4188, // n0x12cd c0x0000 (---------------) + blogspot + 0x00201e82, // n0x12ce c0x0000 (---------------) + I ac + 0x00200742, // n0x12cf c0x0000 (---------------) + I co + 0x00222ac3, // n0x12d0 c0x0000 (---------------) + I com + 0x0021e283, // n0x12d1 c0x0000 (---------------) + I gov + 0x002170c3, // n0x12d2 c0x0000 (---------------) + I net + 0x00200c42, // n0x12d3 c0x0000 (---------------) + I or + 0x0021dcc3, // n0x12d4 c0x0000 (---------------) + I org + 0x002f2787, // n0x12d5 c0x0000 (---------------) + I academy + 0x0033c2cb, // n0x12d6 c0x0000 (---------------) + I agriculture + 0x00205743, // n0x12d7 c0x0000 (---------------) + I air + 0x0022c148, // n0x12d8 c0x0000 (---------------) + I airguard + 0x0038bc07, // n0x12d9 c0x0000 (---------------) + I alabama + 0x00267646, // n0x12da c0x0000 (---------------) + I alaska + 0x002b6e85, // n0x12db c0x0000 (---------------) + I amber + 0x003541c9, // n0x12dc c0x0000 (---------------) + I ambulance + 0x002757c8, // n0x12dd c0x0000 (---------------) + I american + 0x002757c9, // n0x12de c0x0000 (---------------) + I americana + 0x002757d0, // n0x12df c0x0000 (---------------) + I americanantiques + 0x0031aecb, // n0x12e0 c0x0000 (---------------) + I americanart + 0x002b6cc9, // n0x12e1 c0x0000 (---------------) + I amsterdam + 0x00208f03, // n0x12e2 c0x0000 (---------------) + I and + 0x003228c9, // n0x12e3 c0x0000 (---------------) + I annefrank + 0x00225786, // n0x12e4 c0x0000 (---------------) + I anthro + 0x0022578c, // n0x12e5 c0x0000 (---------------) + I anthropology + 0x00222588, // n0x12e6 c0x0000 (---------------) + I antiques + 0x00392b08, // n0x12e7 c0x0000 (---------------) + I aquarium + 0x00256f09, // n0x12e8 c0x0000 (---------------) + I arboretum + 0x0029690e, // n0x12e9 c0x0000 (---------------) + I archaeological + 0x003429cb, // n0x12ea c0x0000 (---------------) + I archaeology + 0x002a52cc, // n0x12eb c0x0000 (---------------) + I architecture + 0x00200603, // n0x12ec c0x0000 (---------------) + I art + 0x0031b0cc, // n0x12ed c0x0000 (---------------) + I artanddesign + 0x0037a6c9, // n0x12ee c0x0000 (---------------) + I artcenter + 0x00200607, // n0x12ef c0x0000 (---------------) + I artdeco + 0x0037548c, // n0x12f0 c0x0000 (---------------) + I arteducation + 0x0022654a, // n0x12f1 c0x0000 (---------------) + I artgallery + 0x00246584, // n0x12f2 c0x0000 (---------------) + I arts + 0x0024658d, // n0x12f3 c0x0000 (---------------) + I artsandcrafts + 0x0037a588, // n0x12f4 c0x0000 (---------------) + I asmatart + 0x0034db0d, // n0x12f5 c0x0000 (---------------) + I assassination + 0x0037ac46, // n0x12f6 c0x0000 (---------------) + I assisi + 0x002bbacb, // n0x12f7 c0x0000 (---------------) + I association + 0x0032b3c9, // n0x12f8 c0x0000 (---------------) + I astronomy + 0x0032d947, // n0x12f9 c0x0000 (---------------) + I atlanta + 0x0030ce46, // n0x12fa c0x0000 (---------------) + I austin + 0x002ee489, // n0x12fb c0x0000 (---------------) + I australia + 0x0030494a, // n0x12fc c0x0000 (---------------) + I automotive + 0x00328bc8, // n0x12fd c0x0000 (---------------) + I aviation + 0x0025db04, // n0x12fe c0x0000 (---------------) + I axis + 0x0035ef47, // n0x12ff c0x0000 (---------------) + I badajoz + 0x00264407, // n0x1300 c0x0000 (---------------) + I baghdad + 0x00295f44, // n0x1301 c0x0000 (---------------) + I bahn + 0x0023d9c4, // n0x1302 c0x0000 (---------------) + I bale + 0x002708c9, // n0x1303 c0x0000 (---------------) + I baltimore + 0x0036bf49, // n0x1304 c0x0000 (---------------) + I barcelona + 0x002ed5c8, // n0x1305 c0x0000 (---------------) + I baseball + 0x0020a445, // n0x1306 c0x0000 (---------------) + I basel + 0x002dbe45, // n0x1307 c0x0000 (---------------) + I baths + 0x00205a86, // n0x1308 c0x0000 (---------------) + I bauern + 0x00246449, // n0x1309 c0x0000 (---------------) + I beauxarts + 0x0035a18d, // n0x130a c0x0000 (---------------) + I beeldengeluid + 0x002a4d48, // n0x130b c0x0000 (---------------) + I bellevue + 0x00205987, // n0x130c c0x0000 (---------------) + I bergbau + 0x002b6f08, // n0x130d c0x0000 (---------------) + I berkeley + 0x00251d46, // n0x130e c0x0000 (---------------) + I berlin + 0x00353844, // n0x130f c0x0000 (---------------) + I bern + 0x003628c5, // n0x1310 c0x0000 (---------------) + I bible + 0x00394a46, // n0x1311 c0x0000 (---------------) + I bilbao + 0x00396b04, // n0x1312 c0x0000 (---------------) + I bill + 0x00200507, // n0x1313 c0x0000 (---------------) + I birdart + 0x00201cca, // n0x1314 c0x0000 (---------------) + I birthplace + 0x0020bcc4, // n0x1315 c0x0000 (---------------) + I bonn + 0x0020d686, // n0x1316 c0x0000 (---------------) + I boston + 0x0020e2c9, // n0x1317 c0x0000 (---------------) + I botanical + 0x0020e2cf, // n0x1318 c0x0000 (---------------) + I botanicalgarden + 0x0020e88d, // n0x1319 c0x0000 (---------------) + I botanicgarden + 0x002107c6, // n0x131a c0x0000 (---------------) + I botany + 0x00213b10, // n0x131b c0x0000 (---------------) + I brandywinevalley + 0x00214a86, // n0x131c c0x0000 (---------------) + I brasil + 0x002159c7, // n0x131d c0x0000 (---------------) + I bristol + 0x00215d47, // n0x131e c0x0000 (---------------) + I british + 0x00215d4f, // n0x131f c0x0000 (---------------) + I britishcolumbia + 0x00216d09, // n0x1320 c0x0000 (---------------) + I broadcast + 0x0021b106, // n0x1321 c0x0000 (---------------) + I brunel + 0x0021be87, // n0x1322 c0x0000 (---------------) + I brussel + 0x0021be88, // n0x1323 c0x0000 (---------------) + I brussels + 0x0021c809, // n0x1324 c0x0000 (---------------) + I bruxelles + 0x002c67c8, // n0x1325 c0x0000 (---------------) + I building + 0x002c2fc7, // n0x1326 c0x0000 (---------------) + I burghof + 0x002224c3, // n0x1327 c0x0000 (---------------) + I bus + 0x00280c46, // n0x1328 c0x0000 (---------------) + I bushey + 0x0027d948, // n0x1329 c0x0000 (---------------) + I cadaques + 0x00296bca, // n0x132a c0x0000 (---------------) + I california + 0x0021a089, // n0x132b c0x0000 (---------------) + I cambridge + 0x00208ec3, // n0x132c c0x0000 (---------------) + I can + 0x00309786, // n0x132d c0x0000 (---------------) + I canada + 0x0024de4a, // n0x132e c0x0000 (---------------) + I capebreton + 0x0031bb87, // n0x132f c0x0000 (---------------) + I carrier + 0x003752ca, // n0x1330 c0x0000 (---------------) + I cartoonart + 0x0038390e, // n0x1331 c0x0000 (---------------) + I casadelamoneda + 0x00216e46, // n0x1332 c0x0000 (---------------) + I castle + 0x0021c487, // n0x1333 c0x0000 (---------------) + I castres + 0x0020f006, // n0x1334 c0x0000 (---------------) + I celtic + 0x00233a06, // n0x1335 c0x0000 (---------------) + I center + 0x0036240b, // n0x1336 c0x0000 (---------------) + I chattanooga + 0x002503ca, // n0x1337 c0x0000 (---------------) + I cheltenham + 0x002eeccd, // n0x1338 c0x0000 (---------------) + I chesapeakebay + 0x0034e9c7, // n0x1339 c0x0000 (---------------) + I chicago + 0x00262bc8, // n0x133a c0x0000 (---------------) + I children + 0x00262bc9, // n0x133b c0x0000 (---------------) + I childrens + 0x00262bcf, // n0x133c c0x0000 (---------------) + I childrensgarden + 0x0029300c, // n0x133d c0x0000 (---------------) + I chiropractic + 0x00299309, // n0x133e c0x0000 (---------------) + I chocolate + 0x0022d30e, // n0x133f c0x0000 (---------------) + I christiansburg + 0x002754ca, // n0x1340 c0x0000 (---------------) + I cincinnati + 0x00343506, // n0x1341 c0x0000 (---------------) + I cinema + 0x00323506, // n0x1342 c0x0000 (---------------) + I circus + 0x0034564c, // n0x1343 c0x0000 (---------------) + I civilisation + 0x0034878c, // n0x1344 c0x0000 (---------------) + I civilization + 0x003518c8, // n0x1345 c0x0000 (---------------) + I civilwar + 0x00369707, // n0x1346 c0x0000 (---------------) + I clinton + 0x0020af45, // n0x1347 c0x0000 (---------------) + I clock + 0x00387ec4, // n0x1348 c0x0000 (---------------) + I coal + 0x0032ac0e, // n0x1349 c0x0000 (---------------) + I coastaldefence + 0x00200744, // n0x134a c0x0000 (---------------) + I cody + 0x00232107, // n0x134b c0x0000 (---------------) + I coldwar + 0x0024f38a, // n0x134c c0x0000 (---------------) + I collection + 0x00221854, // n0x134d c0x0000 (---------------) + I colonialwilliamsburg + 0x00221f0f, // n0x134e c0x0000 (---------------) + I coloradoplateau + 0x00215f08, // n0x134f c0x0000 (---------------) + I columbia + 0x00222388, // n0x1350 c0x0000 (---------------) + I columbus + 0x002b354d, // n0x1351 c0x0000 (---------------) + I communication + 0x002b354e, // n0x1352 c0x0000 (---------------) + I communications + 0x00222ac9, // n0x1353 c0x0000 (---------------) + I community + 0x002236c8, // n0x1354 c0x0000 (---------------) + I computer + 0x002236cf, // n0x1355 c0x0000 (---------------) + I computerhistory + 0x0022624c, // n0x1356 c0x0000 (---------------) + I contemporary + 0x0022624f, // n0x1357 c0x0000 (---------------) + I contemporaryart + 0x00227187, // n0x1358 c0x0000 (---------------) + I convent + 0x00229bca, // n0x1359 c0x0000 (---------------) + I copenhagen + 0x0021300b, // n0x135a c0x0000 (---------------) + I corporation + 0x0022b908, // n0x135b c0x0000 (---------------) + I corvette + 0x0022c347, // n0x135c c0x0000 (---------------) + I costume + 0x00362c0d, // n0x135d c0x0000 (---------------) + I countryestate + 0x002f8886, // n0x135e c0x0000 (---------------) + I county + 0x00246746, // n0x135f c0x0000 (---------------) + I crafts + 0x0022f189, // n0x1360 c0x0000 (---------------) + I cranbrook + 0x002e63c8, // n0x1361 c0x0000 (---------------) + I creation + 0x00233808, // n0x1362 c0x0000 (---------------) + I cultural + 0x0023380e, // n0x1363 c0x0000 (---------------) + I culturalcenter + 0x00323687, // n0x1364 c0x0000 (---------------) + I culture + 0x00343d85, // n0x1365 c0x0000 (---------------) + I cyber + 0x002dcac5, // n0x1366 c0x0000 (---------------) + I cymru + 0x00270d04, // n0x1367 c0x0000 (---------------) + I dali + 0x00267906, // n0x1368 c0x0000 (---------------) + I dallas + 0x002ed4c8, // n0x1369 c0x0000 (---------------) + I database + 0x0034d6c3, // n0x136a c0x0000 (---------------) + I ddr + 0x00255ace, // n0x136b c0x0000 (---------------) + I decorativearts + 0x00362fc8, // n0x136c c0x0000 (---------------) + I delaware + 0x0029f90b, // n0x136d c0x0000 (---------------) + I delmenhorst + 0x00311707, // n0x136e c0x0000 (---------------) + I denmark + 0x002b8f85, // n0x136f c0x0000 (---------------) + I depot + 0x00232f46, // n0x1370 c0x0000 (---------------) + I design + 0x00295507, // n0x1371 c0x0000 (---------------) + I detroit + 0x002dff48, // n0x1372 c0x0000 (---------------) + I dinosaur + 0x00304c89, // n0x1373 c0x0000 (---------------) + I discovery + 0x002250c5, // n0x1374 c0x0000 (---------------) + I dolls + 0x00274888, // n0x1375 c0x0000 (---------------) + I donostia + 0x0038a206, // n0x1376 c0x0000 (---------------) + I durham + 0x00367b0a, // n0x1377 c0x0000 (---------------) + I eastafrica + 0x0032ab09, // n0x1378 c0x0000 (---------------) + I eastcoast + 0x00375549, // n0x1379 c0x0000 (---------------) + I education + 0x0037554b, // n0x137a c0x0000 (---------------) + I educational + 0x002958c8, // n0x137b c0x0000 (---------------) + I egyptian + 0x00295e09, // n0x137c c0x0000 (---------------) + I eisenbahn + 0x0020a506, // n0x137d c0x0000 (---------------) + I elburg + 0x0034cf0a, // n0x137e c0x0000 (---------------) + I elvendrell + 0x0032af4a, // n0x137f c0x0000 (---------------) + I embroidery + 0x00229dcc, // n0x1380 c0x0000 (---------------) + I encyclopedic + 0x0037b187, // n0x1381 c0x0000 (---------------) + I england + 0x00384a8a, // n0x1382 c0x0000 (---------------) + I entomology + 0x0030de4b, // n0x1383 c0x0000 (---------------) + I environment + 0x0030de59, // n0x1384 c0x0000 (---------------) + I environmentalconservation + 0x003543c8, // n0x1385 c0x0000 (---------------) + I epilepsy + 0x0029ac45, // n0x1386 c0x0000 (---------------) + I essex + 0x002b1846, // n0x1387 c0x0000 (---------------) + I estate + 0x002f6409, // n0x1388 c0x0000 (---------------) + I ethnology + 0x0020bb46, // n0x1389 c0x0000 (---------------) + I exeter + 0x00326eca, // n0x138a c0x0000 (---------------) + I exhibition + 0x00311c06, // n0x138b c0x0000 (---------------) + I family + 0x0021f804, // n0x138c c0x0000 (---------------) + I farm + 0x0021f80d, // n0x138d c0x0000 (---------------) + I farmequipment + 0x00366b07, // n0x138e c0x0000 (---------------) + I farmers + 0x002fb8c9, // n0x138f c0x0000 (---------------) + I farmstead + 0x002099c5, // n0x1390 c0x0000 (---------------) + I field + 0x00338588, // n0x1391 c0x0000 (---------------) + I figueres + 0x00349f49, // n0x1392 c0x0000 (---------------) + I filatelia + 0x00356104, // n0x1393 c0x0000 (---------------) + I film + 0x003707c7, // n0x1394 c0x0000 (---------------) + I fineart + 0x003707c8, // n0x1395 c0x0000 (---------------) + I finearts + 0x00236407, // n0x1396 c0x0000 (---------------) + I finland + 0x00252b88, // n0x1397 c0x0000 (---------------) + I flanders + 0x0023c447, // n0x1398 c0x0000 (---------------) + I florida + 0x002e7685, // n0x1399 c0x0000 (---------------) + I force + 0x002457cc, // n0x139a c0x0000 (---------------) + I fortmissoula + 0x00245e89, // n0x139b c0x0000 (---------------) + I fortworth + 0x00303b8a, // n0x139c c0x0000 (---------------) + I foundation + 0x00377209, // n0x139d c0x0000 (---------------) + I francaise + 0x003229c9, // n0x139e c0x0000 (---------------) + I frankfurt + 0x0035268c, // n0x139f c0x0000 (---------------) + I franziskaner + 0x00209c8b, // n0x13a0 c0x0000 (---------------) + I freemasonry + 0x00248cc8, // n0x13a1 c0x0000 (---------------) + I freiburg + 0x002493c8, // n0x13a2 c0x0000 (---------------) + I fribourg + 0x0024c784, // n0x13a3 c0x0000 (---------------) + I frog + 0x0026fc08, // n0x13a4 c0x0000 (---------------) + I fundacio + 0x00272f09, // n0x13a5 c0x0000 (---------------) + I furniture + 0x00226607, // n0x13a6 c0x0000 (---------------) + I gallery + 0x0020e506, // n0x13a7 c0x0000 (---------------) + I garden + 0x0021a607, // n0x13a8 c0x0000 (---------------) + I gateway + 0x00268b89, // n0x13a9 c0x0000 (---------------) + I geelvinck + 0x002a634b, // n0x13aa c0x0000 (---------------) + I gemological + 0x00309a87, // n0x13ab c0x0000 (---------------) + I geology + 0x002f8287, // n0x13ac c0x0000 (---------------) + I georgia + 0x00267387, // n0x13ad c0x0000 (---------------) + I giessen + 0x0034da84, // n0x13ae c0x0000 (---------------) + I glas + 0x0034da85, // n0x13af c0x0000 (---------------) + I glass + 0x00292d05, // n0x13b0 c0x0000 (---------------) + I gorge + 0x00320b0b, // n0x13b1 c0x0000 (---------------) + I grandrapids + 0x00380d44, // n0x13b2 c0x0000 (---------------) + I graz + 0x002e6b48, // n0x13b3 c0x0000 (---------------) + I guernsey + 0x002a19ca, // n0x13b4 c0x0000 (---------------) + I halloffame + 0x0038a2c7, // n0x13b5 c0x0000 (---------------) + I hamburg + 0x00372047, // n0x13b6 c0x0000 (---------------) + I handson + 0x0027a252, // n0x13b7 c0x0000 (---------------) + I harvestcelebration + 0x00256386, // n0x13b8 c0x0000 (---------------) + I hawaii + 0x00205e06, // n0x13b9 c0x0000 (---------------) + I health + 0x002f518e, // n0x13ba c0x0000 (---------------) + I heimatunduhren + 0x00257ac6, // n0x13bb c0x0000 (---------------) + I hellas + 0x00209408, // n0x13bc c0x0000 (---------------) + I helsinki + 0x0027f4cf, // n0x13bd c0x0000 (---------------) + I hembygdsforbund + 0x0034dec8, // n0x13be c0x0000 (---------------) + I heritage + 0x0025de08, // n0x13bf c0x0000 (---------------) + I histoire + 0x002a794a, // n0x13c0 c0x0000 (---------------) + I historical + 0x002a7951, // n0x13c1 c0x0000 (---------------) + I historicalsociety + 0x0028cfce, // n0x13c2 c0x0000 (---------------) + I historichouses + 0x0024268a, // n0x13c3 c0x0000 (---------------) + I historisch + 0x0024268c, // n0x13c4 c0x0000 (---------------) + I historisches + 0x002238c7, // n0x13c5 c0x0000 (---------------) + I history + 0x002238d0, // n0x13c6 c0x0000 (---------------) + I historyofscience + 0x0038f308, // n0x13c7 c0x0000 (---------------) + I horology + 0x0021da05, // n0x13c8 c0x0000 (---------------) + I house + 0x002d8f0a, // n0x13c9 c0x0000 (---------------) + I humanities + 0x00396b4c, // n0x13ca c0x0000 (---------------) + I illustration + 0x003365cd, // n0x13cb c0x0000 (---------------) + I imageandsound + 0x002d5b06, // n0x13cc c0x0000 (---------------) + I indian + 0x002d5b07, // n0x13cd c0x0000 (---------------) + I indiana + 0x002d5b0c, // n0x13ce c0x0000 (---------------) + I indianapolis + 0x00356ecc, // n0x13cf c0x0000 (---------------) + I indianmarket + 0x00238c0c, // n0x13d0 c0x0000 (---------------) + I intelligence + 0x0027d28b, // n0x13d1 c0x0000 (---------------) + I interactive + 0x00273f44, // n0x13d2 c0x0000 (---------------) + I iraq + 0x0021afc4, // n0x13d3 c0x0000 (---------------) + I iron + 0x00357ec9, // n0x13d4 c0x0000 (---------------) + I isleofman + 0x002bc087, // n0x13d5 c0x0000 (---------------) + I jamison + 0x002207c9, // n0x13d6 c0x0000 (---------------) + I jefferson + 0x00337649, // n0x13d7 c0x0000 (---------------) + I jerusalem + 0x0034f487, // n0x13d8 c0x0000 (---------------) + I jewelry + 0x0038cb46, // n0x13d9 c0x0000 (---------------) + I jewish + 0x0038cb49, // n0x13da c0x0000 (---------------) + I jewishart + 0x00297743, // n0x13db c0x0000 (---------------) + I jfk + 0x0028540a, // n0x13dc c0x0000 (---------------) + I journalism + 0x0036e9c7, // n0x13dd c0x0000 (---------------) + I judaica + 0x0035f38b, // n0x13de c0x0000 (---------------) + I judygarland + 0x002eeb4a, // n0x13df c0x0000 (---------------) + I juedisches + 0x00370c84, // n0x13e0 c0x0000 (---------------) + I juif + 0x002df386, // n0x13e1 c0x0000 (---------------) + I karate + 0x002af549, // n0x13e2 c0x0000 (---------------) + I karikatur + 0x00394c44, // n0x13e3 c0x0000 (---------------) + I kids + 0x0033e24a, // n0x13e4 c0x0000 (---------------) + I koebenhavn + 0x0021f385, // n0x13e5 c0x0000 (---------------) + I koeln + 0x002a3845, // n0x13e6 c0x0000 (---------------) + I kunst + 0x002a384d, // n0x13e7 c0x0000 (---------------) + I kunstsammlung + 0x002a3b8e, // n0x13e8 c0x0000 (---------------) + I kunstunddesign + 0x002feec5, // n0x13e9 c0x0000 (---------------) + I labor + 0x0037c986, // n0x13ea c0x0000 (---------------) + I labour + 0x00323a07, // n0x13eb c0x0000 (---------------) + I lajolla + 0x00261a4a, // n0x13ec c0x0000 (---------------) + I lancashire + 0x0035b146, // n0x13ed c0x0000 (---------------) + I landes + 0x002856c4, // n0x13ee c0x0000 (---------------) + I lans + 0x002d0287, // n0x13ef c0x0000 (---------------) + I larsson + 0x0029eacb, // n0x13f0 c0x0000 (---------------) + I lewismiller + 0x00251e07, // n0x13f1 c0x0000 (---------------) + I lincoln + 0x00329ac4, // n0x13f2 c0x0000 (---------------) + I linz + 0x002a1d46, // n0x13f3 c0x0000 (---------------) + I living + 0x002a1d4d, // n0x13f4 c0x0000 (---------------) + I livinghistory + 0x0032254c, // n0x13f5 c0x0000 (---------------) + I localhistory + 0x00340906, // n0x13f6 c0x0000 (---------------) + I london + 0x002a4f4a, // n0x13f7 c0x0000 (---------------) + I losangeles + 0x002116c6, // n0x13f8 c0x0000 (---------------) + I louvre + 0x00259d88, // n0x13f9 c0x0000 (---------------) + I loyalist + 0x0021d747, // n0x13fa c0x0000 (---------------) + I lucerne + 0x00222d8a, // n0x13fb c0x0000 (---------------) + I luxembourg + 0x00226c86, // n0x13fc c0x0000 (---------------) + I luzern + 0x0026b0c3, // n0x13fd c0x0000 (---------------) + I mad + 0x00300a86, // n0x13fe c0x0000 (---------------) + I madrid + 0x0026aa08, // n0x13ff c0x0000 (---------------) + I mallorca + 0x0028608a, // n0x1400 c0x0000 (---------------) + I manchester + 0x00266747, // n0x1401 c0x0000 (---------------) + I mansion + 0x00266748, // n0x1402 c0x0000 (---------------) + I mansions + 0x0026d784, // n0x1403 c0x0000 (---------------) + I manx + 0x00277287, // n0x1404 c0x0000 (---------------) + I marburg + 0x0036ee48, // n0x1405 c0x0000 (---------------) + I maritime + 0x00237048, // n0x1406 c0x0000 (---------------) + I maritimo + 0x00256588, // n0x1407 c0x0000 (---------------) + I maryland + 0x003372ca, // n0x1408 c0x0000 (---------------) + I marylhurst + 0x002dc385, // n0x1409 c0x0000 (---------------) + I media + 0x002e7087, // n0x140a c0x0000 (---------------) + I medical + 0x002424d3, // n0x140b c0x0000 (---------------) + I medizinhistorisches + 0x00257946, // n0x140c c0x0000 (---------------) + I meeres + 0x002edac8, // n0x140d c0x0000 (---------------) + I memorial + 0x00219509, // n0x140e c0x0000 (---------------) + I mesaverde + 0x00204988, // n0x140f c0x0000 (---------------) + I michigan + 0x002892cb, // n0x1410 c0x0000 (---------------) + I midatlantic + 0x002a6948, // n0x1411 c0x0000 (---------------) + I military + 0x0029ec04, // n0x1412 c0x0000 (---------------) + I mill + 0x002878c6, // n0x1413 c0x0000 (---------------) + I miners + 0x002e7a46, // n0x1414 c0x0000 (---------------) + I mining + 0x00335b09, // n0x1415 c0x0000 (---------------) + I minnesota + 0x002ad807, // n0x1416 c0x0000 (---------------) + I missile + 0x002458c8, // n0x1417 c0x0000 (---------------) + I missoula + 0x00291a06, // n0x1418 c0x0000 (---------------) + I modern + 0x0021cfc4, // n0x1419 c0x0000 (---------------) + I moma + 0x002b6185, // n0x141a c0x0000 (---------------) + I money + 0x002b02c8, // n0x141b c0x0000 (---------------) + I monmouth + 0x002b094a, // n0x141c c0x0000 (---------------) + I monticello + 0x002b1648, // n0x141d c0x0000 (---------------) + I montreal + 0x002b68c6, // n0x141e c0x0000 (---------------) + I moscow + 0x0028678a, // n0x141f c0x0000 (---------------) + I motorcycle + 0x002f3cc8, // n0x1420 c0x0000 (---------------) + I muenchen + 0x002b9948, // n0x1421 c0x0000 (---------------) + I muenster + 0x002ba948, // n0x1422 c0x0000 (---------------) + I mulhouse + 0x002bb346, // n0x1423 c0x0000 (---------------) + I muncie + 0x002bd186, // n0x1424 c0x0000 (---------------) + I museet + 0x0030e7cc, // n0x1425 c0x0000 (---------------) + I museumcenter + 0x002bd650, // n0x1426 c0x0000 (---------------) + I museumvereniging + 0x00337b85, // n0x1427 c0x0000 (---------------) + I music + 0x002dd9c8, // n0x1428 c0x0000 (---------------) + I national + 0x002dd9d0, // n0x1429 c0x0000 (---------------) + I nationalfirearms + 0x0034dcd0, // n0x142a c0x0000 (---------------) + I nationalheritage + 0x0027564e, // n0x142b c0x0000 (---------------) + I nativeamerican + 0x0030e44e, // n0x142c c0x0000 (---------------) + I naturalhistory + 0x0030e454, // n0x142d c0x0000 (---------------) + I naturalhistorymuseum + 0x00240dcf, // n0x142e c0x0000 (---------------) + I naturalsciences + 0x00241186, // n0x142f c0x0000 (---------------) + I nature + 0x002f4951, // n0x1430 c0x0000 (---------------) + I naturhistorisches + 0x0031cfd3, // n0x1431 c0x0000 (---------------) + I natuurwetenschappen + 0x0031d448, // n0x1432 c0x0000 (---------------) + I naumburg + 0x00341c05, // n0x1433 c0x0000 (---------------) + I naval + 0x00262448, // n0x1434 c0x0000 (---------------) + I nebraska + 0x0021d585, // n0x1435 c0x0000 (---------------) + I neues + 0x002e60cc, // n0x1436 c0x0000 (---------------) + I newhampshire + 0x00221609, // n0x1437 c0x0000 (---------------) + I newjersey + 0x00231f49, // n0x1438 c0x0000 (---------------) + I newmexico + 0x0021a387, // n0x1439 c0x0000 (---------------) + I newport + 0x00366d49, // n0x143a c0x0000 (---------------) + I newspaper + 0x00237f07, // n0x143b c0x0000 (---------------) + I newyork + 0x00203d46, // n0x143c c0x0000 (---------------) + I niepce + 0x003626c7, // n0x143d c0x0000 (---------------) + I norfolk + 0x002f0dc5, // n0x143e c0x0000 (---------------) + I north + 0x00345903, // n0x143f c0x0000 (---------------) + I nrw + 0x0034d889, // n0x1440 c0x0000 (---------------) + I nuernberg + 0x002e9309, // n0x1441 c0x0000 (---------------) + I nuremberg + 0x00223403, // n0x1442 c0x0000 (---------------) + I nyc + 0x002108c4, // n0x1443 c0x0000 (---------------) + I nyny + 0x0034100d, // n0x1444 c0x0000 (---------------) + I oceanographic + 0x0034384f, // n0x1445 c0x0000 (---------------) + I oceanographique + 0x002e3085, // n0x1446 c0x0000 (---------------) + I omaha + 0x003023c6, // n0x1447 c0x0000 (---------------) + I online + 0x0032a587, // n0x1448 c0x0000 (---------------) + I ontario + 0x0032ce07, // n0x1449 c0x0000 (---------------) + I openair + 0x00276b86, // n0x144a c0x0000 (---------------) + I oregon + 0x00276b8b, // n0x144b c0x0000 (---------------) + I oregontrail + 0x0028e6c5, // n0x144c c0x0000 (---------------) + I otago + 0x00364046, // n0x144d c0x0000 (---------------) + I oxford + 0x0031ba07, // n0x144e c0x0000 (---------------) + I pacific + 0x0025d709, // n0x144f c0x0000 (---------------) + I paderborn + 0x0036ce06, // n0x1450 c0x0000 (---------------) + I palace + 0x002052c5, // n0x1451 c0x0000 (---------------) + I paleo + 0x00324b4b, // n0x1452 c0x0000 (---------------) + I palmsprings + 0x00221d86, // n0x1453 c0x0000 (---------------) + I panama + 0x0025b445, // n0x1454 c0x0000 (---------------) + I paris + 0x002a3348, // n0x1455 c0x0000 (---------------) + I pasadena + 0x002dc948, // n0x1456 c0x0000 (---------------) + I pharmacy + 0x0036150c, // n0x1457 c0x0000 (---------------) + I philadelphia + 0x00361510, // n0x1458 c0x0000 (---------------) + I philadelphiaarea + 0x002bf909, // n0x1459 c0x0000 (---------------) + I philately + 0x002bfd47, // n0x145a c0x0000 (---------------) + I phoenix + 0x002c014b, // n0x145b c0x0000 (---------------) + I photography + 0x002c1506, // n0x145c c0x0000 (---------------) + I pilots + 0x002c2e8a, // n0x145d c0x0000 (---------------) + I pittsburgh + 0x002c438b, // n0x145e c0x0000 (---------------) + I planetarium + 0x002c4bca, // n0x145f c0x0000 (---------------) + I plantation + 0x002c4e46, // n0x1460 c0x0000 (---------------) + I plants + 0x002c6485, // n0x1461 c0x0000 (---------------) + I plaza + 0x0036e706, // n0x1462 c0x0000 (---------------) + I portal + 0x00266f08, // n0x1463 c0x0000 (---------------) + I portland + 0x0021a44a, // n0x1464 c0x0000 (---------------) + I portlligat + 0x002b31dc, // n0x1465 c0x0000 (---------------) + I posts-and-telecommunications + 0x002ca84c, // n0x1466 c0x0000 (---------------) + I preservation + 0x002cab48, // n0x1467 c0x0000 (---------------) + I presidio + 0x0029abc5, // n0x1468 c0x0000 (---------------) + I press + 0x002cc587, // n0x1469 c0x0000 (---------------) + I project + 0x00296546, // n0x146a c0x0000 (---------------) + I public + 0x0034c905, // n0x146b c0x0000 (---------------) + I pubol + 0x00211186, // n0x146c c0x0000 (---------------) + I quebec + 0x00276d48, // n0x146d c0x0000 (---------------) + I railroad + 0x00253fc7, // n0x146e c0x0000 (---------------) + I railway + 0x00296808, // n0x146f c0x0000 (---------------) + I research + 0x0021c58a, // n0x1470 c0x0000 (---------------) + I resistance + 0x0033cb8c, // n0x1471 c0x0000 (---------------) + I riodejaneiro + 0x0033ce09, // n0x1472 c0x0000 (---------------) + I rochester + 0x00201707, // n0x1473 c0x0000 (---------------) + I rockart + 0x0023fb84, // n0x1474 c0x0000 (---------------) + I roma + 0x0023afc6, // n0x1475 c0x0000 (---------------) + I russia + 0x002c508a, // n0x1476 c0x0000 (---------------) + I saintlouis + 0x00337745, // n0x1477 c0x0000 (---------------) + I salem + 0x0034250c, // n0x1478 c0x0000 (---------------) + I salvadordali + 0x0034ec88, // n0x1479 c0x0000 (---------------) + I salzburg + 0x00270ec8, // n0x147a c0x0000 (---------------) + I sandiego + 0x0023774c, // n0x147b c0x0000 (---------------) + I sanfrancisco + 0x0020bfcc, // n0x147c c0x0000 (---------------) + I santabarbara + 0x0020c3c9, // n0x147d c0x0000 (---------------) + I santacruz + 0x0020c607, // n0x147e c0x0000 (---------------) + I santafe + 0x0035768c, // n0x147f c0x0000 (---------------) + I saskatchewan + 0x002251c4, // n0x1480 c0x0000 (---------------) + I satx + 0x0022de0a, // n0x1481 c0x0000 (---------------) + I savannahga + 0x0020cb8c, // n0x1482 c0x0000 (---------------) + I schlesisches + 0x00354a4b, // n0x1483 c0x0000 (---------------) + I schoenbrunn + 0x0025e7cb, // n0x1484 c0x0000 (---------------) + I schokoladen + 0x00235406, // n0x1485 c0x0000 (---------------) + I school + 0x0023b2c7, // n0x1486 c0x0000 (---------------) + I schweiz + 0x00223b07, // n0x1487 c0x0000 (---------------) + I science + 0x00223b0f, // n0x1488 c0x0000 (---------------) + I science-fiction + 0x002d6251, // n0x1489 c0x0000 (---------------) + I scienceandhistory + 0x0037f9d2, // n0x148a c0x0000 (---------------) + I scienceandindustry + 0x0023ce0d, // n0x148b c0x0000 (---------------) + I sciencecenter + 0x0023ce0e, // n0x148c c0x0000 (---------------) + I sciencecenters + 0x0023d14e, // n0x148d c0x0000 (---------------) + I sciencehistory + 0x00240f88, // n0x148e c0x0000 (---------------) + I sciences + 0x00240f92, // n0x148f c0x0000 (---------------) + I sciencesnaturelles + 0x00237988, // n0x1490 c0x0000 (---------------) + I scotland + 0x002dfc87, // n0x1491 c0x0000 (---------------) + I seaport + 0x0023920a, // n0x1492 c0x0000 (---------------) + I settlement + 0x0020fb88, // n0x1493 c0x0000 (---------------) + I settlers + 0x00257a85, // n0x1494 c0x0000 (---------------) + I shell + 0x0025918a, // n0x1495 c0x0000 (---------------) + I sherbrooke + 0x0037ad47, // n0x1496 c0x0000 (---------------) + I sibenik + 0x0036b904, // n0x1497 c0x0000 (---------------) + I silk + 0x00207b43, // n0x1498 c0x0000 (---------------) + I ski + 0x002e3345, // n0x1499 c0x0000 (---------------) + I skole + 0x002a7bc7, // n0x149a c0x0000 (---------------) + I society + 0x002d84c7, // n0x149b c0x0000 (---------------) + I sologne + 0x003367ce, // n0x149c c0x0000 (---------------) + I soundandvision + 0x0031f30d, // n0x149d c0x0000 (---------------) + I southcarolina + 0x00320d89, // n0x149e c0x0000 (---------------) + I southwest + 0x002101c5, // n0x149f c0x0000 (---------------) + I space + 0x00349a43, // n0x14a0 c0x0000 (---------------) + I spy + 0x00275b86, // n0x14a1 c0x0000 (---------------) + I square + 0x00248205, // n0x14a2 c0x0000 (---------------) + I stadt + 0x0029fb48, // n0x14a3 c0x0000 (---------------) + I stalbans + 0x00232449, // n0x14a4 c0x0000 (---------------) + I starnberg + 0x002b1885, // n0x14a5 c0x0000 (---------------) + I state + 0x00362e0f, // n0x14a6 c0x0000 (---------------) + I stateofdelaware + 0x002c62c7, // n0x14a7 c0x0000 (---------------) + I station + 0x00354105, // n0x14a8 c0x0000 (---------------) + I steam + 0x002f2f8a, // n0x14a9 c0x0000 (---------------) + I steiermark + 0x002a83c6, // n0x14aa c0x0000 (---------------) + I stjohn + 0x00259f09, // n0x14ab c0x0000 (---------------) + I stockholm + 0x002cf98c, // n0x14ac c0x0000 (---------------) + I stpetersburg + 0x002d0749, // n0x14ad c0x0000 (---------------) + I stuttgart + 0x00211e46, // n0x14ae c0x0000 (---------------) + I suisse + 0x002a17cc, // n0x14af c0x0000 (---------------) + I surgeonshall + 0x002d1c86, // n0x14b0 c0x0000 (---------------) + I surrey + 0x002d4988, // n0x14b1 c0x0000 (---------------) + I svizzera + 0x002d4b86, // n0x14b2 c0x0000 (---------------) + I sweden + 0x00368086, // n0x14b3 c0x0000 (---------------) + I sydney + 0x0024ca44, // n0x14b4 c0x0000 (---------------) + I tank + 0x00249043, // n0x14b5 c0x0000 (---------------) + I tcm + 0x0029d58a, // n0x14b6 c0x0000 (---------------) + I technology + 0x002994d1, // n0x14b7 c0x0000 (---------------) + I telekommunikation + 0x00248a4a, // n0x14b8 c0x0000 (---------------) + I television + 0x00209085, // n0x14b9 c0x0000 (---------------) + I texas + 0x00326307, // n0x14ba c0x0000 (---------------) + I textile + 0x002f9147, // n0x14bb c0x0000 (---------------) + I theater + 0x0036ef44, // n0x14bc c0x0000 (---------------) + I time + 0x0036ef4b, // n0x14bd c0x0000 (---------------) + I timekeeping + 0x0035e408, // n0x14be c0x0000 (---------------) + I topology + 0x0029f3c6, // n0x14bf c0x0000 (---------------) + I torino + 0x002e9685, // n0x14c0 c0x0000 (---------------) + I touch + 0x0021abc4, // n0x14c1 c0x0000 (---------------) + I town + 0x0027c9c9, // n0x14c2 c0x0000 (---------------) + I transport + 0x00242e84, // n0x14c3 c0x0000 (---------------) + I tree + 0x00211c47, // n0x14c4 c0x0000 (---------------) + I trolley + 0x00313185, // n0x14c5 c0x0000 (---------------) + I trust + 0x00313187, // n0x14c6 c0x0000 (---------------) + I trustee + 0x002f53c5, // n0x14c7 c0x0000 (---------------) + I uhren + 0x00213f43, // n0x14c8 c0x0000 (---------------) + I ulm + 0x002dfb48, // n0x14c9 c0x0000 (---------------) + I undersea + 0x0029f04a, // n0x14ca c0x0000 (---------------) + I university + 0x00222503, // n0x14cb c0x0000 (---------------) + I usa + 0x0022250a, // n0x14cc c0x0000 (---------------) + I usantiques + 0x0038c1c6, // n0x14cd c0x0000 (---------------) + I usarts + 0x00362b8f, // n0x14ce c0x0000 (---------------) + I uscountryestate + 0x00323609, // n0x14cf c0x0000 (---------------) + I usculture + 0x00255a50, // n0x14d0 c0x0000 (---------------) + I usdecorativearts + 0x002ada08, // n0x14d1 c0x0000 (---------------) + I usgarden + 0x002b7209, // n0x14d2 c0x0000 (---------------) + I ushistory + 0x00282ac7, // n0x14d3 c0x0000 (---------------) + I ushuaia + 0x002a1ccf, // n0x14d4 c0x0000 (---------------) + I uslivinghistory + 0x0026a544, // n0x14d5 c0x0000 (---------------) + I utah + 0x00357a44, // n0x14d6 c0x0000 (---------------) + I uvic + 0x00213d86, // n0x14d7 c0x0000 (---------------) + I valley + 0x002292c6, // n0x14d8 c0x0000 (---------------) + I vantaa + 0x0038deca, // n0x14d9 c0x0000 (---------------) + I versailles + 0x002c7686, // n0x14da c0x0000 (---------------) + I viking + 0x00327d47, // n0x14db c0x0000 (---------------) + I village + 0x002dd148, // n0x14dc c0x0000 (---------------) + I virginia + 0x002dd347, // n0x14dd c0x0000 (---------------) + I virtual + 0x002dd507, // n0x14de c0x0000 (---------------) + I virtuel + 0x00332eca, // n0x14df c0x0000 (---------------) + I vlaanderen + 0x002df98b, // n0x14e0 c0x0000 (---------------) + I volkenkunde + 0x0036cbc5, // n0x14e1 c0x0000 (---------------) + I wales + 0x0037f608, // n0x14e2 c0x0000 (---------------) + I wallonie + 0x00202543, // n0x14e3 c0x0000 (---------------) + I war + 0x0025108c, // n0x14e4 c0x0000 (---------------) + I washingtondc + 0x0020accf, // n0x14e5 c0x0000 (---------------) + I watch-and-clock + 0x0025fe4d, // n0x14e6 c0x0000 (---------------) + I watchandclock + 0x0036d147, // n0x14e7 c0x0000 (---------------) + I western + 0x00320ec9, // n0x14e8 c0x0000 (---------------) + I westfalen + 0x0029b447, // n0x14e9 c0x0000 (---------------) + I whaling + 0x00247748, // n0x14ea c0x0000 (---------------) + I wildlife + 0x00221a4c, // n0x14eb c0x0000 (---------------) + I williamsburg + 0x002a0248, // n0x14ec c0x0000 (---------------) + I windmill + 0x0029b088, // n0x14ed c0x0000 (---------------) + I workshop + 0x002f608e, // n0x14ee c0x0000 (---------------) + I xn--9dbhblg6di + 0x003083d4, // n0x14ef c0x0000 (---------------) + I xn--comunicaes-v6a2o + 0x003088e4, // n0x14f0 c0x0000 (---------------) + I xn--correios-e-telecomunicaes-ghc29a + 0x0032844a, // n0x14f1 c0x0000 (---------------) + I xn--h1aegh + 0x0034760b, // n0x14f2 c0x0000 (---------------) + I xn--lns-qla + 0x00237fc4, // n0x14f3 c0x0000 (---------------) + I york + 0x00237fc9, // n0x14f4 c0x0000 (---------------) + I yorkshire + 0x00297948, // n0x14f5 c0x0000 (---------------) + I yosemite + 0x00235bc5, // n0x14f6 c0x0000 (---------------) + I youth + 0x0038ba0a, // n0x14f7 c0x0000 (---------------) + I zoological + 0x00363207, // n0x14f8 c0x0000 (---------------) + I zoology + 0x002751c4, // n0x14f9 c0x0000 (---------------) + I aero + 0x00310603, // n0x14fa c0x0000 (---------------) + I biz + 0x00222ac3, // n0x14fb c0x0000 (---------------) + I com + 0x00228d44, // n0x14fc c0x0000 (---------------) + I coop + 0x002d75c3, // n0x14fd c0x0000 (---------------) + I edu + 0x0021e283, // n0x14fe c0x0000 (---------------) + I gov + 0x00200304, // n0x14ff c0x0000 (---------------) + I info + 0x00238c03, // n0x1500 c0x0000 (---------------) + I int + 0x0023fa03, // n0x1501 c0x0000 (---------------) + I mil + 0x002bd646, // n0x1502 c0x0000 (---------------) + I museum + 0x00298944, // n0x1503 c0x0000 (---------------) + I name + 0x002170c3, // n0x1504 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1505 c0x0000 (---------------) + I org + 0x00218243, // n0x1506 c0x0000 (---------------) + I pro + 0x00201e82, // n0x1507 c0x0000 (---------------) + I ac + 0x00310603, // n0x1508 c0x0000 (---------------) + I biz + 0x00200742, // n0x1509 c0x0000 (---------------) + I co + 0x00222ac3, // n0x150a c0x0000 (---------------) + I com + 0x00228d44, // n0x150b c0x0000 (---------------) + I coop + 0x002d75c3, // n0x150c c0x0000 (---------------) + I edu + 0x0021e283, // n0x150d c0x0000 (---------------) + I gov + 0x00238c03, // n0x150e c0x0000 (---------------) + I int + 0x002bd646, // n0x150f c0x0000 (---------------) + I museum + 0x002170c3, // n0x1510 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1511 c0x0000 (---------------) + I org + 0x000e4188, // n0x1512 c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x1513 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1514 c0x0000 (---------------) + I edu + 0x0034eb03, // n0x1515 c0x0000 (---------------) + I gob + 0x002170c3, // n0x1516 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1517 c0x0000 (---------------) + I org + 0x000e4188, // n0x1518 c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x1519 c0x0000 (---------------) + I com + 0x002d75c3, // n0x151a c0x0000 (---------------) + I edu + 0x0021e283, // n0x151b c0x0000 (---------------) + I gov + 0x0023fa03, // n0x151c c0x0000 (---------------) + I mil + 0x00298944, // n0x151d c0x0000 (---------------) + I name + 0x002170c3, // n0x151e c0x0000 (---------------) + I net + 0x0021dcc3, // n0x151f c0x0000 (---------------) + I org + 0x006ed3c8, // n0x1520 c0x0001 (---------------) ! I teledata + 0x002055c2, // n0x1521 c0x0000 (---------------) + I ca + 0x0021aa82, // n0x1522 c0x0000 (---------------) + I cc + 0x00200742, // n0x1523 c0x0000 (---------------) + I co + 0x00222ac3, // n0x1524 c0x0000 (---------------) + I com + 0x0022fb02, // n0x1525 c0x0000 (---------------) + I dr + 0x00200242, // n0x1526 c0x0000 (---------------) + I in + 0x00200304, // n0x1527 c0x0000 (---------------) + I info + 0x00203604, // n0x1528 c0x0000 (---------------) + I mobi + 0x0020a682, // n0x1529 c0x0000 (---------------) + I mx + 0x00298944, // n0x152a c0x0000 (---------------) + I name + 0x00200c42, // n0x152b c0x0000 (---------------) + I or + 0x0021dcc3, // n0x152c c0x0000 (---------------) + I org + 0x00218243, // n0x152d c0x0000 (---------------) + I pro + 0x00235406, // n0x152e c0x0000 (---------------) + I school + 0x0020bf42, // n0x152f c0x0000 (---------------) + I tv + 0x00209f42, // n0x1530 c0x0000 (---------------) + I us + 0x0020ba82, // n0x1531 c0x0000 (---------------) + I ws + 0x37e19443, // n0x1532 c0x00df (n0x1534-n0x1535) o I her + 0x382238c3, // n0x1533 c0x00e0 (n0x1535-n0x1536) o I his + 0x000439c6, // n0x1534 c0x0000 (---------------) + forgot + 0x000439c6, // n0x1535 c0x0000 (---------------) + forgot + 0x002729c4, // n0x1536 c0x0000 (---------------) + I asso + 0x0010054c, // n0x1537 c0x0000 (---------------) + at-band-camp + 0x0006c90c, // n0x1538 c0x0000 (---------------) + azure-mobile + 0x000aef0d, // n0x1539 c0x0000 (---------------) + azurewebsites + 0x000d0087, // n0x153a c0x0000 (---------------) + blogdns + 0x00017a88, // n0x153b c0x0000 (---------------) + broke-it + 0x0001d90a, // n0x153c c0x0000 (---------------) + buyshouses + 0x38e2a085, // n0x153d c0x00e3 (n0x1568-n0x1569) o I cdn77 + 0x0002a089, // n0x153e c0x0000 (---------------) + cdn77-ssl + 0x00192608, // n0x153f c0x0000 (---------------) + cloudapp + 0x0019058a, // n0x1540 c0x0000 (---------------) + cloudfront + 0x00146fc8, // n0x1541 c0x0000 (---------------) + dnsalias + 0x0006a247, // n0x1542 c0x0000 (---------------) + dnsdojo + 0x0018a587, // n0x1543 c0x0000 (---------------) + does-it + 0x0015fdc9, // n0x1544 c0x0000 (---------------) + dontexist + 0x000007c8, // n0x1545 c0x0000 (---------------) + dynalias + 0x00078989, // n0x1546 c0x0000 (---------------) + dynathome + 0x0009428d, // n0x1547 c0x0000 (---------------) + endofinternet + 0x39287d46, // n0x1548 c0x00e4 (n0x1569-n0x156b) o I fastly + 0x0004dac7, // n0x1549 c0x0000 (---------------) + from-az + 0x0004f247, // n0x154a c0x0000 (---------------) + from-co + 0x00056b87, // n0x154b c0x0000 (---------------) + from-la + 0x0005c687, // n0x154c c0x0000 (---------------) + from-ny + 0x00005a42, // n0x154d c0x0000 (---------------) + gb + 0x00104507, // n0x154e c0x0000 (---------------) + gets-it + 0x0005058c, // n0x154f c0x0000 (---------------) + ham-radio-op + 0x00085007, // n0x1550 c0x0000 (---------------) + homeftp + 0x000909c6, // n0x1551 c0x0000 (---------------) + homeip + 0x00090ec9, // n0x1552 c0x0000 (---------------) + homelinux + 0x00091b88, // n0x1553 c0x0000 (---------------) + homeunix + 0x00017d42, // n0x1554 c0x0000 (---------------) + hu + 0x00000242, // n0x1555 c0x0000 (---------------) + in + 0x0005554b, // n0x1556 c0x0000 (---------------) + in-the-band + 0x00166909, // n0x1557 c0x0000 (---------------) + is-a-chef + 0x00052e49, // n0x1558 c0x0000 (---------------) + is-a-geek + 0x00191648, // n0x1559 c0x0000 (---------------) + isa-geek + 0x000990c2, // n0x155a c0x0000 (---------------) + jp + 0x00183609, // n0x155b c0x0000 (---------------) + kicks-ass + 0x00019c4d, // n0x155c c0x0000 (---------------) + office-on-the + 0x000c7e07, // n0x155d c0x0000 (---------------) + podzone + 0x0004294d, // n0x155e c0x0000 (---------------) + scrapper-site + 0x00002e82, // n0x155f c0x0000 (---------------) + se + 0x00130f46, // n0x1560 c0x0000 (---------------) + selfip + 0x0007efc8, // n0x1561 c0x0000 (---------------) + sells-it + 0x00079088, // n0x1562 c0x0000 (---------------) + servebbs + 0x00161348, // n0x1563 c0x0000 (---------------) + serveftp + 0x0003f688, // n0x1564 c0x0000 (---------------) + thruhere + 0x0000cf02, // n0x1565 c0x0000 (---------------) + uk + 0x00110e86, // n0x1566 c0x0000 (---------------) + webhop + 0x000043c2, // n0x1567 c0x0000 (---------------) + za + 0x00000581, // n0x1568 c0x0000 (---------------) + r + 0x396cbe44, // n0x1569 c0x00e5 (n0x156b-n0x156d) o I prod + 0x39a2a203, // n0x156a c0x00e6 (n0x156d-n0x1570) o I ssl + 0x00000181, // n0x156b c0x0000 (---------------) + a + 0x00189c06, // n0x156c c0x0000 (---------------) + global + 0x00000181, // n0x156d c0x0000 (---------------) + a + 0x00000001, // n0x156e c0x0000 (---------------) + b + 0x00189c06, // n0x156f c0x0000 (---------------) + global + 0x00246584, // n0x1570 c0x0000 (---------------) + I arts + 0x00222ac3, // n0x1571 c0x0000 (---------------) + I com + 0x00238544, // n0x1572 c0x0000 (---------------) + I firm + 0x00200304, // n0x1573 c0x0000 (---------------) + I info + 0x002170c3, // n0x1574 c0x0000 (---------------) + I net + 0x002193c5, // n0x1575 c0x0000 (---------------) + I other + 0x00214943, // n0x1576 c0x0000 (---------------) + I per + 0x002e6343, // n0x1577 c0x0000 (---------------) + I rec + 0x002cf4c5, // n0x1578 c0x0000 (---------------) + I store + 0x00219fc3, // n0x1579 c0x0000 (---------------) + I web + 0x3a622ac3, // n0x157a c0x00e9 (n0x1583-n0x1584) + I com + 0x002d75c3, // n0x157b c0x0000 (---------------) + I edu + 0x0021e283, // n0x157c c0x0000 (---------------) + I gov + 0x0023fa03, // n0x157d c0x0000 (---------------) + I mil + 0x00203604, // n0x157e c0x0000 (---------------) + I mobi + 0x00298944, // n0x157f c0x0000 (---------------) + I name + 0x002170c3, // n0x1580 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1581 c0x0000 (---------------) + I org + 0x00206103, // n0x1582 c0x0000 (---------------) + I sch + 0x000e4188, // n0x1583 c0x0000 (---------------) + blogspot + 0x000e4188, // n0x1584 c0x0000 (---------------) + blogspot + 0x003401c2, // n0x1585 c0x0000 (---------------) + I bv + 0x00000742, // n0x1586 c0x0000 (---------------) + co + 0x3b21b502, // n0x1587 c0x00ec (n0x185d-n0x185e) + I aa + 0x00301488, // n0x1588 c0x0000 (---------------) + I aarborte + 0x0021b986, // n0x1589 c0x0000 (---------------) + I aejrie + 0x002aa946, // n0x158a c0x0000 (---------------) + I afjord + 0x0021b307, // n0x158b c0x0000 (---------------) + I agdenes + 0x3b6076c2, // n0x158c c0x00ed (n0x185e-n0x185f) + I ah + 0x3ba2b548, // n0x158d c0x00ee (n0x185f-n0x1860) o I akershus + 0x0031a9ca, // n0x158e c0x0000 (---------------) + I aknoluokta + 0x0024d548, // n0x158f c0x0000 (---------------) + I akrehamn + 0x00200882, // n0x1590 c0x0000 (---------------) + I al + 0x0036e809, // n0x1591 c0x0000 (---------------) + I alaheadju + 0x0036cc07, // n0x1592 c0x0000 (---------------) + I alesund + 0x0020e486, // n0x1593 c0x0000 (---------------) + I algard + 0x00219749, // n0x1594 c0x0000 (---------------) + I alstahaug + 0x0028ee04, // n0x1595 c0x0000 (---------------) + I alta + 0x002ab3c6, // n0x1596 c0x0000 (---------------) + I alvdal + 0x002aad44, // n0x1597 c0x0000 (---------------) + I amli + 0x0025cb44, // n0x1598 c0x0000 (---------------) + I amot + 0x00245409, // n0x1599 c0x0000 (---------------) + I andasuolo + 0x002ee906, // n0x159a c0x0000 (---------------) + I andebu + 0x0034bfc5, // n0x159b c0x0000 (---------------) + I andoy + 0x00279985, // n0x159c c0x0000 (---------------) + I ardal + 0x002a6dc7, // n0x159d c0x0000 (---------------) + I aremark + 0x00275c47, // n0x159e c0x0000 (---------------) + I arendal + 0x002798c4, // n0x159f c0x0000 (---------------) + I arna + 0x0021b546, // n0x15a0 c0x0000 (---------------) + I aseral + 0x00212605, // n0x15a1 c0x0000 (---------------) + I asker + 0x0027d585, // n0x15a2 c0x0000 (---------------) + I askim + 0x0031d885, // n0x15a3 c0x0000 (---------------) + I askoy + 0x002dbc07, // n0x15a4 c0x0000 (---------------) + I askvoll + 0x00311545, // n0x15a5 c0x0000 (---------------) + I asnes + 0x002f1489, // n0x15a6 c0x0000 (---------------) + I audnedaln + 0x00271305, // n0x15a7 c0x0000 (---------------) + I aukra + 0x002e0084, // n0x15a8 c0x0000 (---------------) + I aure + 0x0035b087, // n0x15a9 c0x0000 (---------------) + I aurland + 0x0036d70e, // n0x15aa c0x0000 (---------------) + I aurskog-holand + 0x003298c9, // n0x15ab c0x0000 (---------------) + I austevoll + 0x002f5049, // n0x15ac c0x0000 (---------------) + I austrheim + 0x0030dc86, // n0x15ad c0x0000 (---------------) + I averoy + 0x00321688, // n0x15ae c0x0000 (---------------) + I badaddja + 0x00329f8b, // n0x15af c0x0000 (---------------) + I bahcavuotna + 0x002a080c, // n0x15b0 c0x0000 (---------------) + I bahccavuotna + 0x00323c86, // n0x15b1 c0x0000 (---------------) + I baidar + 0x00342887, // n0x15b2 c0x0000 (---------------) + I bajddar + 0x0035dbc5, // n0x15b3 c0x0000 (---------------) + I balat + 0x0023d9ca, // n0x15b4 c0x0000 (---------------) + I balestrand + 0x0030b989, // n0x15b5 c0x0000 (---------------) + I ballangen + 0x00268ec9, // n0x15b6 c0x0000 (---------------) + I balsfjord + 0x002c8706, // n0x15b7 c0x0000 (---------------) + I bamble + 0x002d37c5, // n0x15b8 c0x0000 (---------------) + I bardu + 0x002b0545, // n0x15b9 c0x0000 (---------------) + I barum + 0x0031a6c9, // n0x15ba c0x0000 (---------------) + I batsfjord + 0x0035ddcb, // n0x15bb c0x0000 (---------------) + I bearalvahki + 0x00269f06, // n0x15bc c0x0000 (---------------) + I beardu + 0x00319a86, // n0x15bd c0x0000 (---------------) + I beiarn + 0x00204704, // n0x15be c0x0000 (---------------) + I berg + 0x00279f06, // n0x15bf c0x0000 (---------------) + I bergen + 0x00343e08, // n0x15c0 c0x0000 (---------------) + I berlevag + 0x00389486, // n0x15c1 c0x0000 (---------------) + I bievat + 0x00219046, // n0x15c2 c0x0000 (---------------) + I bindal + 0x00200f48, // n0x15c3 c0x0000 (---------------) + I birkenes + 0x00202647, // n0x15c4 c0x0000 (---------------) + I bjarkoy + 0x002033c9, // n0x15c5 c0x0000 (---------------) + I bjerkreim + 0x00205005, // n0x15c6 c0x0000 (---------------) + I bjugn + 0x000e4188, // n0x15c7 c0x0000 (---------------) + blogspot + 0x0038a504, // n0x15c8 c0x0000 (---------------) + I bodo + 0x0029bf44, // n0x15c9 c0x0000 (---------------) + I bokn + 0x0020aac5, // n0x15ca c0x0000 (---------------) + I bomlo + 0x0037dc49, // n0x15cb c0x0000 (---------------) + I bremanger + 0x00218ac7, // n0x15cc c0x0000 (---------------) + I bronnoy + 0x00218acb, // n0x15cd c0x0000 (---------------) + I bronnoysund + 0x0021a7ca, // n0x15ce c0x0000 (---------------) + I brumunddal + 0x0021d4c5, // n0x15cf c0x0000 (---------------) + I bryne + 0x3be0a582, // n0x15d0 c0x00ef (n0x1860-n0x1861) + I bu + 0x002eea07, // n0x15d1 c0x0000 (---------------) + I budejju + 0x3c318ec8, // n0x15d2 c0x00f0 (n0x1861-n0x1862) o I buskerud + 0x00251987, // n0x15d3 c0x0000 (---------------) + I bygland + 0x0025f245, // n0x15d4 c0x0000 (---------------) + I bykle + 0x0032234a, // n0x15d5 c0x0000 (---------------) + I cahcesuolo + 0x00000742, // n0x15d6 c0x0000 (---------------) + co + 0x0024fd0b, // n0x15d7 c0x0000 (---------------) + I davvenjarga + 0x0024eb4a, // n0x15d8 c0x0000 (---------------) + I davvesiida + 0x002e9206, // n0x15d9 c0x0000 (---------------) + I deatnu + 0x002b8f83, // n0x15da c0x0000 (---------------) + I dep + 0x0023404d, // n0x15db c0x0000 (---------------) + I dielddanuorri + 0x0023e18c, // n0x15dc c0x0000 (---------------) + I divtasvuodna + 0x002690cd, // n0x15dd c0x0000 (---------------) + I divttasvuotna + 0x002f8b05, // n0x15de c0x0000 (---------------) + I donna + 0x002396c5, // n0x15df c0x0000 (---------------) + I dovre + 0x0034d707, // n0x15e0 c0x0000 (---------------) + I drammen + 0x002edfc9, // n0x15e1 c0x0000 (---------------) + I drangedal + 0x0031a8c6, // n0x15e2 c0x0000 (---------------) + I drobak + 0x002c7505, // n0x15e3 c0x0000 (---------------) + I dyroy + 0x0021e888, // n0x15e4 c0x0000 (---------------) + I egersund + 0x00262743, // n0x15e5 c0x0000 (---------------) + I eid + 0x002ec4c8, // n0x15e6 c0x0000 (---------------) + I eidfjord + 0x00279e08, // n0x15e7 c0x0000 (---------------) + I eidsberg + 0x002ac2c7, // n0x15e8 c0x0000 (---------------) + I eidskog + 0x00262748, // n0x15e9 c0x0000 (---------------) + I eidsvoll + 0x00383049, // n0x15ea c0x0000 (---------------) + I eigersund + 0x00227ec7, // n0x15eb c0x0000 (---------------) + I elverum + 0x00300e87, // n0x15ec c0x0000 (---------------) + I enebakk + 0x002674c8, // n0x15ed c0x0000 (---------------) + I engerdal + 0x002569c4, // n0x15ee c0x0000 (---------------) + I etne + 0x002569c7, // n0x15ef c0x0000 (---------------) + I etnedal + 0x0037ab48, // n0x15f0 c0x0000 (---------------) + I evenassi + 0x0030a446, // n0x15f1 c0x0000 (---------------) + I evenes + 0x00201f0f, // n0x15f2 c0x0000 (---------------) + I evje-og-hornnes + 0x00345a07, // n0x15f3 c0x0000 (---------------) + I farsund + 0x002c3146, // n0x15f4 c0x0000 (---------------) + I fauske + 0x0020c745, // n0x15f5 c0x0000 (---------------) + I fedje + 0x00234803, // n0x15f6 c0x0000 (---------------) + I fet + 0x00324187, // n0x15f7 c0x0000 (---------------) + I fetsund + 0x00235ec3, // n0x15f8 c0x0000 (---------------) + I fhs + 0x002365c6, // n0x15f9 c0x0000 (---------------) + I finnoy + 0x00238f06, // n0x15fa c0x0000 (---------------) + I fitjar + 0x00239986, // n0x15fb c0x0000 (---------------) + I fjaler + 0x0027e245, // n0x15fc c0x0000 (---------------) + I fjell + 0x00252b83, // n0x15fd c0x0000 (---------------) + I fla + 0x0035be08, // n0x15fe c0x0000 (---------------) + I flakstad + 0x00353389, // n0x15ff c0x0000 (---------------) + I flatanger + 0x0036500b, // n0x1600 c0x0000 (---------------) + I flekkefjord + 0x00239b08, // n0x1601 c0x0000 (---------------) + I flesberg + 0x0023c105, // n0x1602 c0x0000 (---------------) + I flora + 0x0023d4c5, // n0x1603 c0x0000 (---------------) + I floro + 0x3c758002, // n0x1604 c0x00f1 (n0x1862-n0x1863) + I fm + 0x00362789, // n0x1605 c0x0000 (---------------) + I folkebibl + 0x00241787, // n0x1606 c0x0000 (---------------) + I folldal + 0x003640c5, // n0x1607 c0x0000 (---------------) + I forde + 0x00245307, // n0x1608 c0x0000 (---------------) + I forsand + 0x00247586, // n0x1609 c0x0000 (---------------) + I fosnes + 0x0034af05, // n0x160a c0x0000 (---------------) + I frana + 0x0024804b, // n0x160b c0x0000 (---------------) + I fredrikstad + 0x00248cc4, // n0x160c c0x0000 (---------------) + I frei + 0x0024d105, // n0x160d c0x0000 (---------------) + I frogn + 0x0024d247, // n0x160e c0x0000 (---------------) + I froland + 0x00263e06, // n0x160f c0x0000 (---------------) + I frosta + 0x00264245, // n0x1610 c0x0000 (---------------) + I froya + 0x0026fe07, // n0x1611 c0x0000 (---------------) + I fuoisku + 0x00270587, // n0x1612 c0x0000 (---------------) + I fuossko + 0x002a7504, // n0x1613 c0x0000 (---------------) + I fusa + 0x0027798a, // n0x1614 c0x0000 (---------------) + I fylkesbibl + 0x00277e48, // n0x1615 c0x0000 (---------------) + I fyresdal + 0x0035ea49, // n0x1616 c0x0000 (---------------) + I gaivuotna + 0x00215b85, // n0x1617 c0x0000 (---------------) + I galsa + 0x0024ff46, // n0x1618 c0x0000 (---------------) + I gamvik + 0x00343fca, // n0x1619 c0x0000 (---------------) + I gangaviika + 0x003442c6, // n0x161a c0x0000 (---------------) + I gaular + 0x002542c7, // n0x161b c0x0000 (---------------) + I gausdal + 0x0035e70d, // n0x161c c0x0000 (---------------) + I giehtavuoatna + 0x00220049, // n0x161d c0x0000 (---------------) + I gildeskal + 0x00347945, // n0x161e c0x0000 (---------------) + I giske + 0x0030ee07, // n0x161f c0x0000 (---------------) + I gjemnes + 0x002ed908, // n0x1620 c0x0000 (---------------) + I gjerdrum + 0x0031c188, // n0x1621 c0x0000 (---------------) + I gjerstad + 0x0031d607, // n0x1622 c0x0000 (---------------) + I gjesdal + 0x003446c6, // n0x1623 c0x0000 (---------------) + I gjovik + 0x00395287, // n0x1624 c0x0000 (---------------) + I gloppen + 0x00238a83, // n0x1625 c0x0000 (---------------) + I gol + 0x00320b04, // n0x1626 c0x0000 (---------------) + I gran + 0x00348cc5, // n0x1627 c0x0000 (---------------) + I grane + 0x00376107, // n0x1628 c0x0000 (---------------) + I granvin + 0x00379f89, // n0x1629 c0x0000 (---------------) + I gratangen + 0x002136c8, // n0x162a c0x0000 (---------------) + I grimstad + 0x002c77c5, // n0x162b c0x0000 (---------------) + I grong + 0x00222fc4, // n0x162c c0x0000 (---------------) + I grue + 0x00331485, // n0x162d c0x0000 (---------------) + I gulen + 0x002373cd, // n0x162e c0x0000 (---------------) + I guovdageaidnu + 0x00202dc2, // n0x162f c0x0000 (---------------) + I ha + 0x00289a46, // n0x1630 c0x0000 (---------------) + I habmer + 0x00330e86, // n0x1631 c0x0000 (---------------) + I hadsel + 0x002f5e0a, // n0x1632 c0x0000 (---------------) + I hagebostad + 0x0035b7c6, // n0x1633 c0x0000 (---------------) + I halden + 0x00363605, // n0x1634 c0x0000 (---------------) + I halsa + 0x0036e045, // n0x1635 c0x0000 (---------------) + I hamar + 0x0036e047, // n0x1636 c0x0000 (---------------) + I hamaroy + 0x0036794c, // n0x1637 c0x0000 (---------------) + I hammarfeasta + 0x002ef38a, // n0x1638 c0x0000 (---------------) + I hammerfest + 0x002796c6, // n0x1639 c0x0000 (---------------) + I hapmir + 0x002ae105, // n0x163a c0x0000 (---------------) + I haram + 0x00279d46, // n0x163b c0x0000 (---------------) + I hareid + 0x0027a087, // n0x163c c0x0000 (---------------) + I harstad + 0x0027b486, // n0x163d c0x0000 (---------------) + I hasvik + 0x0027e14c, // n0x163e c0x0000 (---------------) + I hattfjelldal + 0x00219889, // n0x163f c0x0000 (---------------) + I haugesund + 0x3ca9bc07, // n0x1640 c0x00f2 (n0x1863-n0x1866) o I hedmark + 0x0027f885, // n0x1641 c0x0000 (---------------) + I hemne + 0x0027f886, // n0x1642 c0x0000 (---------------) + I hemnes + 0x0027fc88, // n0x1643 c0x0000 (---------------) + I hemsedal + 0x0022db45, // n0x1644 c0x0000 (---------------) + I herad + 0x0028fd85, // n0x1645 c0x0000 (---------------) + I hitra + 0x0028ffc8, // n0x1646 c0x0000 (---------------) + I hjartdal + 0x002901ca, // n0x1647 c0x0000 (---------------) + I hjelmeland + 0x3ce0cc02, // n0x1648 c0x00f3 (n0x1866-n0x1867) + I hl + 0x3d206182, // n0x1649 c0x00f4 (n0x1867-n0x1868) + I hm + 0x002536c5, // n0x164a c0x0000 (---------------) + I hobol + 0x0029a803, // n0x164b c0x0000 (---------------) + I hof + 0x002c7348, // n0x164c c0x0000 (---------------) + I hokksund + 0x002351c3, // n0x164d c0x0000 (---------------) + I hol + 0x00290444, // n0x164e c0x0000 (---------------) + I hole + 0x0025a04b, // n0x164f c0x0000 (---------------) + I holmestrand + 0x00271dc8, // n0x1650 c0x0000 (---------------) + I holtalen + 0x00292148, // n0x1651 c0x0000 (---------------) + I honefoss + 0x3d6f84c9, // n0x1652 c0x00f5 (n0x1868-n0x1869) o I hordaland + 0x002939c9, // n0x1653 c0x0000 (---------------) + I hornindal + 0x00294186, // n0x1654 c0x0000 (---------------) + I horten + 0x00295188, // n0x1655 c0x0000 (---------------) + I hoyanger + 0x00295389, // n0x1656 c0x0000 (---------------) + I hoylandet + 0x00296046, // n0x1657 c0x0000 (---------------) + I hurdal + 0x002961c5, // n0x1658 c0x0000 (---------------) + I hurum + 0x003512c6, // n0x1659 c0x0000 (---------------) + I hvaler + 0x0036b589, // n0x165a c0x0000 (---------------) + I hyllestad + 0x0030a287, // n0x165b c0x0000 (---------------) + I ibestad + 0x00247d86, // n0x165c c0x0000 (---------------) + I idrett + 0x002f3407, // n0x165d c0x0000 (---------------) + I inderoy + 0x00304b07, // n0x165e c0x0000 (---------------) + I iveland + 0x0025f644, // n0x165f c0x0000 (---------------) + I ivgu + 0x3da14c09, // n0x1660 c0x00f6 (n0x1869-n0x186a) + I jan-mayen + 0x002b4408, // n0x1661 c0x0000 (---------------) + I jessheim + 0x00345148, // n0x1662 c0x0000 (---------------) + I jevnaker + 0x002326c7, // n0x1663 c0x0000 (---------------) + I jolster + 0x002afb86, // n0x1664 c0x0000 (---------------) + I jondal + 0x0038a009, // n0x1665 c0x0000 (---------------) + I jorpeland + 0x002ab887, // n0x1666 c0x0000 (---------------) + I kafjord + 0x0022e84a, // n0x1667 c0x0000 (---------------) + I karasjohka + 0x002d8848, // n0x1668 c0x0000 (---------------) + I karasjok + 0x0023eb87, // n0x1669 c0x0000 (---------------) + I karlsoy + 0x00305646, // n0x166a c0x0000 (---------------) + I karmoy + 0x002eaa8a, // n0x166b c0x0000 (---------------) + I kautokeino + 0x0023cc48, // n0x166c c0x0000 (---------------) + I kirkenes + 0x0025ef45, // n0x166d c0x0000 (---------------) + I klabu + 0x0021cd85, // n0x166e c0x0000 (---------------) + I klepp + 0x002ac947, // n0x166f c0x0000 (---------------) + I kommune + 0x002b7dc9, // n0x1670 c0x0000 (---------------) + I kongsberg + 0x002b814b, // n0x1671 c0x0000 (---------------) + I kongsvinger + 0x002c83c8, // n0x1672 c0x0000 (---------------) + I kopervik + 0x00271389, // n0x1673 c0x0000 (---------------) + I kraanghke + 0x0023a407, // n0x1674 c0x0000 (---------------) + I kragero + 0x0029db0c, // n0x1675 c0x0000 (---------------) + I kristiansand + 0x0029df8c, // n0x1676 c0x0000 (---------------) + I kristiansund + 0x0029e28a, // n0x1677 c0x0000 (---------------) + I krodsherad + 0x0029e50c, // n0x1678 c0x0000 (---------------) + I krokstadelva + 0x002aa8c8, // n0x1679 c0x0000 (---------------) + I kvafjord + 0x002aaac8, // n0x167a c0x0000 (---------------) + I kvalsund + 0x002aacc4, // n0x167b c0x0000 (---------------) + I kvam + 0x002aba49, // n0x167c c0x0000 (---------------) + I kvanangen + 0x002abc89, // n0x167d c0x0000 (---------------) + I kvinesdal + 0x002abeca, // n0x167e c0x0000 (---------------) + I kvinnherad + 0x002ac149, // n0x167f c0x0000 (---------------) + I kviteseid + 0x002ac487, // n0x1680 c0x0000 (---------------) + I kvitsoy + 0x00381bcc, // n0x1681 c0x0000 (---------------) + I laakesvuemie + 0x00207686, // n0x1682 c0x0000 (---------------) + I lahppi + 0x00256cc8, // n0x1683 c0x0000 (---------------) + I langevag + 0x00344386, // n0x1684 c0x0000 (---------------) + I lardal + 0x002d7a86, // n0x1685 c0x0000 (---------------) + I larvik + 0x00347847, // n0x1686 c0x0000 (---------------) + I lavagis + 0x00358888, // n0x1687 c0x0000 (---------------) + I lavangen + 0x002f020b, // n0x1688 c0x0000 (---------------) + I leangaviika + 0x00251847, // n0x1689 c0x0000 (---------------) + I lebesby + 0x00227989, // n0x168a c0x0000 (---------------) + I leikanger + 0x002386c9, // n0x168b c0x0000 (---------------) + I leirfjord + 0x00244b87, // n0x168c c0x0000 (---------------) + I leirvik + 0x00203c44, // n0x168d c0x0000 (---------------) + I leka + 0x0034b3c7, // n0x168e c0x0000 (---------------) + I leksvik + 0x00364686, // n0x168f c0x0000 (---------------) + I lenvik + 0x00364246, // n0x1690 c0x0000 (---------------) + I lerdal + 0x002a5105, // n0x1691 c0x0000 (---------------) + I lesja + 0x00326448, // n0x1692 c0x0000 (---------------) + I levanger + 0x002a6004, // n0x1693 c0x0000 (---------------) + I lier + 0x002a6006, // n0x1694 c0x0000 (---------------) + I lierne + 0x002ef24b, // n0x1695 c0x0000 (---------------) + I lillehammer + 0x00270d89, // n0x1696 c0x0000 (---------------) + I lillesand + 0x0031d786, // n0x1697 c0x0000 (---------------) + I lindas + 0x0031dc09, // n0x1698 c0x0000 (---------------) + I lindesnes + 0x002dbd86, // n0x1699 c0x0000 (---------------) + I loabat + 0x002455c8, // n0x169a c0x0000 (---------------) + I lodingen + 0x00260a43, // n0x169b c0x0000 (---------------) + I lom + 0x0031b945, // n0x169c c0x0000 (---------------) + I loppa + 0x003444c9, // n0x169d c0x0000 (---------------) + I lorenskog + 0x0034d145, // n0x169e c0x0000 (---------------) + I loten + 0x002d8b84, // n0x169f c0x0000 (---------------) + I lund + 0x00263646, // n0x16a0 c0x0000 (---------------) + I lunner + 0x0029cb85, // n0x16a1 c0x0000 (---------------) + I luroy + 0x002c7bc6, // n0x16a2 c0x0000 (---------------) + I luster + 0x002e1c87, // n0x16a3 c0x0000 (---------------) + I lyngdal + 0x0029c206, // n0x16a4 c0x0000 (---------------) + I lyngen + 0x0028448b, // n0x16a5 c0x0000 (---------------) + I malatvuopmi + 0x0034ce07, // n0x16a6 c0x0000 (---------------) + I malselv + 0x00307d86, // n0x16a7 c0x0000 (---------------) + I malvik + 0x00358046, // n0x16a8 c0x0000 (---------------) + I mandal + 0x002a6e86, // n0x16a9 c0x0000 (---------------) + I marker + 0x00279889, // n0x16aa c0x0000 (---------------) + I marnardal + 0x003419ca, // n0x16ab c0x0000 (---------------) + I masfjorden + 0x00314445, // n0x16ac c0x0000 (---------------) + I masoy + 0x0020f58d, // n0x16ad c0x0000 (---------------) + I matta-varjjat + 0x002902c6, // n0x16ae c0x0000 (---------------) + I meland + 0x002d8246, // n0x16af c0x0000 (---------------) + I meldal + 0x002a1bc6, // n0x16b0 c0x0000 (---------------) + I melhus + 0x00259d05, // n0x16b1 c0x0000 (---------------) + I meloy + 0x0022b487, // n0x16b2 c0x0000 (---------------) + I meraker + 0x002886c7, // n0x16b3 c0x0000 (---------------) + I midsund + 0x002061ce, // n0x16b4 c0x0000 (---------------) + I midtre-gauldal + 0x0023fa03, // n0x16b5 c0x0000 (---------------) + I mil + 0x002afb49, // n0x16b6 c0x0000 (---------------) + I mjondalen + 0x002f3689, // n0x16b7 c0x0000 (---------------) + I mo-i-rana + 0x0023ddc7, // n0x16b8 c0x0000 (---------------) + I moareke + 0x00208487, // n0x16b9 c0x0000 (---------------) + I modalen + 0x003120c5, // n0x16ba c0x0000 (---------------) + I modum + 0x0029f845, // n0x16bb c0x0000 (---------------) + I molde + 0x3de70a0f, // n0x16bc c0x00f7 (n0x186a-n0x186c) o I more-og-romsdal + 0x002b7447, // n0x16bd c0x0000 (---------------) + I mosjoen + 0x002b7608, // n0x16be c0x0000 (---------------) + I moskenes + 0x002b7b44, // n0x16bf c0x0000 (---------------) + I moss + 0x002b8006, // n0x16c0 c0x0000 (---------------) + I mosvik + 0x3e2dcb42, // n0x16c1 c0x00f8 (n0x186c-n0x186d) + I mr + 0x002bb5c6, // n0x16c2 c0x0000 (---------------) + I muosat + 0x002bd646, // n0x16c3 c0x0000 (---------------) + I museum + 0x002f054e, // n0x16c4 c0x0000 (---------------) + I naamesjevuemie + 0x002ec30a, // n0x16c5 c0x0000 (---------------) + I namdalseid + 0x0021c1c6, // n0x16c6 c0x0000 (---------------) + I namsos + 0x00232b4a, // n0x16c7 c0x0000 (---------------) + I namsskogan + 0x002b2489, // n0x16c8 c0x0000 (---------------) + I nannestad + 0x002ff685, // n0x16c9 c0x0000 (---------------) + I naroy + 0x0037c388, // n0x16ca c0x0000 (---------------) + I narviika + 0x00393206, // n0x16cb c0x0000 (---------------) + I narvik + 0x0031e108, // n0x16cc c0x0000 (---------------) + I naustdal + 0x00354f08, // n0x16cd c0x0000 (---------------) + I navuotna + 0x00395acb, // n0x16ce c0x0000 (---------------) + I nedre-eiker + 0x0021b405, // n0x16cf c0x0000 (---------------) + I nesna + 0x003115c8, // n0x16d0 c0x0000 (---------------) + I nesodden + 0x0020108c, // n0x16d1 c0x0000 (---------------) + I nesoddtangen + 0x0025f107, // n0x16d2 c0x0000 (---------------) + I nesseby + 0x00239146, // n0x16d3 c0x0000 (---------------) + I nesset + 0x002e6588, // n0x16d4 c0x0000 (---------------) + I nissedal + 0x002677c8, // n0x16d5 c0x0000 (---------------) + I nittedal + 0x3e636482, // n0x16d6 c0x00f9 (n0x186d-n0x186e) + I nl + 0x002ab18b, // n0x16d7 c0x0000 (---------------) + I nord-aurdal + 0x00200c09, // n0x16d8 c0x0000 (---------------) + I nord-fron + 0x003473c9, // n0x16d9 c0x0000 (---------------) + I nord-odal + 0x0031da87, // n0x16da c0x0000 (---------------) + I norddal + 0x002befc8, // n0x16db c0x0000 (---------------) + I nordkapp + 0x3ea3dfc8, // n0x16dc c0x00fa (n0x186e-n0x1872) o I nordland + 0x002c5f0b, // n0x16dd c0x0000 (---------------) + I nordre-land + 0x003914c9, // n0x16de c0x0000 (---------------) + I nordreisa + 0x002113cd, // n0x16df c0x0000 (---------------) + I nore-og-uvdal + 0x0023fdc8, // n0x16e0 c0x0000 (---------------) + I notodden + 0x00288b08, // n0x16e1 c0x0000 (---------------) + I notteroy + 0x3ee00e02, // n0x16e2 c0x00fb (n0x1872-n0x1873) + I nt + 0x00396f44, // n0x16e3 c0x0000 (---------------) + I odda + 0x3f209982, // n0x16e4 c0x00fc (n0x1873-n0x1874) + I of + 0x002d89c6, // n0x16e5 c0x0000 (---------------) + I oksnes + 0x3f600a02, // n0x16e6 c0x00fd (n0x1874-n0x1875) + I ol + 0x0021d00a, // n0x16e7 c0x0000 (---------------) + I omasvuotna + 0x0029b206, // n0x16e8 c0x0000 (---------------) + I oppdal + 0x00220b88, // n0x16e9 c0x0000 (---------------) + I oppegard + 0x00241b48, // n0x16ea c0x0000 (---------------) + I orkanger + 0x00321986, // n0x16eb c0x0000 (---------------) + I orkdal + 0x0032e206, // n0x16ec c0x0000 (---------------) + I orland + 0x002ce006, // n0x16ed c0x0000 (---------------) + I orskog + 0x0029fac5, // n0x16ee c0x0000 (---------------) + I orsta + 0x0022be04, // n0x16ef c0x0000 (---------------) + I osen + 0x3fab8a04, // n0x16f0 c0x00fe (n0x1875-n0x1876) + I oslo + 0x00207f86, // n0x16f1 c0x0000 (---------------) + I osoyro + 0x002586c7, // n0x16f2 c0x0000 (---------------) + I osteroy + 0x3fed6987, // n0x16f3 c0x00ff (n0x1876-n0x1877) o I ostfold + 0x0020300b, // n0x16f4 c0x0000 (---------------) + I ostre-toten + 0x0036dac9, // n0x16f5 c0x0000 (---------------) + I overhalla + 0x0023970a, // n0x16f6 c0x0000 (---------------) + I ovre-eiker + 0x002f3544, // n0x16f7 c0x0000 (---------------) + I oyer + 0x00300d08, // n0x16f8 c0x0000 (---------------) + I oygarden + 0x00247b4d, // n0x16f9 c0x0000 (---------------) + I oystre-slidre + 0x002c9e89, // n0x16fa c0x0000 (---------------) + I porsanger + 0x002ca0c8, // n0x16fb c0x0000 (---------------) + I porsangu + 0x002ca349, // n0x16fc c0x0000 (---------------) + I porsgrunn + 0x002cba44, // n0x16fd c0x0000 (---------------) + I priv + 0x00212ec4, // n0x16fe c0x0000 (---------------) + I rade + 0x00254d85, // n0x16ff c0x0000 (---------------) + I radoy + 0x0035f14b, // n0x1700 c0x0000 (---------------) + I rahkkeravju + 0x00271d46, // n0x1701 c0x0000 (---------------) + I raholt + 0x002a2305, // n0x1702 c0x0000 (---------------) + I raisa + 0x0032a2c9, // n0x1703 c0x0000 (---------------) + I rakkestad + 0x0021b608, // n0x1704 c0x0000 (---------------) + I ralingen + 0x0025abc4, // n0x1705 c0x0000 (---------------) + I rana + 0x0023db49, // n0x1706 c0x0000 (---------------) + I randaberg + 0x0026a685, // n0x1707 c0x0000 (---------------) + I rauma + 0x00275c88, // n0x1708 c0x0000 (---------------) + I rendalen + 0x0033c507, // n0x1709 c0x0000 (---------------) + I rennebu + 0x002f5448, // n0x170a c0x0000 (---------------) + I rennesoy + 0x002af746, // n0x170b c0x0000 (---------------) + I rindal + 0x003247c7, // n0x170c c0x0000 (---------------) + I ringebu + 0x002a8c49, // n0x170d c0x0000 (---------------) + I ringerike + 0x00324cc9, // n0x170e c0x0000 (---------------) + I ringsaker + 0x0025b4c5, // n0x170f c0x0000 (---------------) + I risor + 0x002346c5, // n0x1710 c0x0000 (---------------) + I rissa + 0x4021d702, // n0x1711 c0x0100 (n0x1877-n0x1878) + I rl + 0x002dfe44, // n0x1712 c0x0000 (---------------) + I roan + 0x0036a305, // n0x1713 c0x0000 (---------------) + I rodoy + 0x0033c1c6, // n0x1714 c0x0000 (---------------) + I rollag + 0x00302a85, // n0x1715 c0x0000 (---------------) + I romsa + 0x0023d587, // n0x1716 c0x0000 (---------------) + I romskog + 0x002e3245, // n0x1717 c0x0000 (---------------) + I roros + 0x00263e44, // n0x1718 c0x0000 (---------------) + I rost + 0x0030dd46, // n0x1719 c0x0000 (---------------) + I royken + 0x002c7587, // n0x171a c0x0000 (---------------) + I royrvik + 0x002dcb86, // n0x171b c0x0000 (---------------) + I ruovat + 0x00268ac5, // n0x171c c0x0000 (---------------) + I rygge + 0x0031ce08, // n0x171d c0x0000 (---------------) + I salangen + 0x0031de05, // n0x171e c0x0000 (---------------) + I salat + 0x00341347, // n0x171f c0x0000 (---------------) + I saltdal + 0x0035b289, // n0x1720 c0x0000 (---------------) + I samnanger + 0x0029dd0a, // n0x1721 c0x0000 (---------------) + I sandefjord + 0x0023bac7, // n0x1722 c0x0000 (---------------) + I sandnes + 0x0023bacc, // n0x1723 c0x0000 (---------------) + I sandnessjoen + 0x0034bf86, // n0x1724 c0x0000 (---------------) + I sandoy + 0x0021db49, // n0x1725 c0x0000 (---------------) + I sarpsborg + 0x0022d7c5, // n0x1726 c0x0000 (---------------) + I sauda + 0x0022da88, // n0x1727 c0x0000 (---------------) + I sauherad + 0x0020a4c3, // n0x1728 c0x0000 (---------------) + I sel + 0x0020a4c5, // n0x1729 c0x0000 (---------------) + I selbu + 0x002fad85, // n0x172a c0x0000 (---------------) + I selje + 0x00245cc7, // n0x172b c0x0000 (---------------) + I seljord + 0x4060f182, // n0x172c c0x0101 (n0x1878-n0x1879) + I sf + 0x0023b647, // n0x172d c0x0000 (---------------) + I siellak + 0x002b79c6, // n0x172e c0x0000 (---------------) + I sigdal + 0x00214b46, // n0x172f c0x0000 (---------------) + I siljan + 0x002c1186, // n0x1730 c0x0000 (---------------) + I sirdal + 0x00267706, // n0x1731 c0x0000 (---------------) + I skanit + 0x00310108, // n0x1732 c0x0000 (---------------) + I skanland + 0x00262585, // n0x1733 c0x0000 (---------------) + I skaun + 0x002c3207, // n0x1734 c0x0000 (---------------) + I skedsmo + 0x002c320d, // n0x1735 c0x0000 (---------------) + I skedsmokorset + 0x00207b43, // n0x1736 c0x0000 (---------------) + I ski + 0x00207b45, // n0x1737 c0x0000 (---------------) + I skien + 0x002f2d47, // n0x1738 c0x0000 (---------------) + I skierva + 0x0036b048, // n0x1739 c0x0000 (---------------) + I skiptvet + 0x0036ac05, // n0x173a c0x0000 (---------------) + I skjak + 0x0030c348, // n0x173b c0x0000 (---------------) + I skjervoy + 0x002206c6, // n0x173c c0x0000 (---------------) + I skodje + 0x0022a247, // n0x173d c0x0000 (---------------) + I slattum + 0x00285605, // n0x173e c0x0000 (---------------) + I smola + 0x0021b486, // n0x173f c0x0000 (---------------) + I snaase + 0x00340405, // n0x1740 c0x0000 (---------------) + I snasa + 0x002aa34a, // n0x1741 c0x0000 (---------------) + I snillfjord + 0x002c4f86, // n0x1742 c0x0000 (---------------) + I snoasa + 0x002141c7, // n0x1743 c0x0000 (---------------) + I sogndal + 0x00308285, // n0x1744 c0x0000 (---------------) + I sogne + 0x002d9347, // n0x1745 c0x0000 (---------------) + I sokndal + 0x002d0204, // n0x1746 c0x0000 (---------------) + I sola + 0x002d8b06, // n0x1747 c0x0000 (---------------) + I solund + 0x002da005, // n0x1748 c0x0000 (---------------) + I somna + 0x002ee70b, // n0x1749 c0x0000 (---------------) + I sondre-land + 0x00364509, // n0x174a c0x0000 (---------------) + I songdalen + 0x0037184a, // n0x174b c0x0000 (---------------) + I sor-aurdal + 0x0025b548, // n0x174c c0x0000 (---------------) + I sor-fron + 0x002e0f48, // n0x174d c0x0000 (---------------) + I sor-odal + 0x002e888c, // n0x174e c0x0000 (---------------) + I sor-varanger + 0x002ec987, // n0x174f c0x0000 (---------------) + I sorfold + 0x00315608, // n0x1750 c0x0000 (---------------) + I sorreisa + 0x0031a288, // n0x1751 c0x0000 (---------------) + I sortland + 0x0031ee45, // n0x1752 c0x0000 (---------------) + I sorum + 0x002ac70a, // n0x1753 c0x0000 (---------------) + I spjelkavik + 0x00349a49, // n0x1754 c0x0000 (---------------) + I spydeberg + 0x40a023c2, // n0x1755 c0x0102 (n0x1879-n0x187a) + I st + 0x00309986, // n0x1756 c0x0000 (---------------) + I stange + 0x0029ca04, // n0x1757 c0x0000 (---------------) + I stat + 0x0029e909, // n0x1758 c0x0000 (---------------) + I stathelle + 0x002c92c9, // n0x1759 c0x0000 (---------------) + I stavanger + 0x002d97c7, // n0x175a c0x0000 (---------------) + I stavern + 0x0025c087, // n0x175b c0x0000 (---------------) + I steigen + 0x003374c9, // n0x175c c0x0000 (---------------) + I steinkjer + 0x00202b88, // n0x175d c0x0000 (---------------) + I stjordal + 0x00202b8f, // n0x175e c0x0000 (---------------) + I stjordalshalsen + 0x00228bc6, // n0x175f c0x0000 (---------------) + I stokke + 0x0023f24b, // n0x1760 c0x0000 (---------------) + I stor-elvdal + 0x002cf305, // n0x1761 c0x0000 (---------------) + I stord + 0x002cf307, // n0x1762 c0x0000 (---------------) + I stordal + 0x002cf749, // n0x1763 c0x0000 (---------------) + I storfjord + 0x0023dac6, // n0x1764 c0x0000 (---------------) + I strand + 0x0023dac7, // n0x1765 c0x0000 (---------------) + I stranda + 0x0037fd45, // n0x1766 c0x0000 (---------------) + I stryn + 0x00224fc4, // n0x1767 c0x0000 (---------------) + I sula + 0x00226b46, // n0x1768 c0x0000 (---------------) + I suldal + 0x00218c84, // n0x1769 c0x0000 (---------------) + I sund + 0x002a9c87, // n0x176a c0x0000 (---------------) + I sunndal + 0x002d1a88, // n0x176b c0x0000 (---------------) + I surnadal + 0x40ed36c8, // n0x176c c0x0103 (n0x187a-n0x187b) + I svalbard + 0x002d4485, // n0x176d c0x0000 (---------------) + I sveio + 0x002d45c7, // n0x176e c0x0000 (---------------) + I svelvik + 0x00354549, // n0x176f c0x0000 (---------------) + I sykkylven + 0x00204e04, // n0x1770 c0x0000 (---------------) + I tana + 0x00204e08, // n0x1771 c0x0000 (---------------) + I tananger + 0x4122ba88, // n0x1772 c0x0104 (n0x187b-n0x187d) o I telemark + 0x0036ef44, // n0x1773 c0x0000 (---------------) + I time + 0x00225c08, // n0x1774 c0x0000 (---------------) + I tingvoll + 0x0030cf04, // n0x1775 c0x0000 (---------------) + I tinn + 0x0021fb09, // n0x1776 c0x0000 (---------------) + I tjeldsund + 0x00259c45, // n0x1777 c0x0000 (---------------) + I tjome + 0x41608902, // n0x1778 c0x0105 (n0x187d-n0x187e) + I tm + 0x00228c05, // n0x1779 c0x0000 (---------------) + I tokke + 0x00215ac5, // n0x177a c0x0000 (---------------) + I tolga + 0x00204608, // n0x177b c0x0000 (---------------) + I tonsberg + 0x00226fc7, // n0x177c c0x0000 (---------------) + I torsken + 0x41a02402, // n0x177d c0x0106 (n0x187e-n0x187f) + I tr + 0x0025ab85, // n0x177e c0x0000 (---------------) + I trana + 0x00263106, // n0x177f c0x0000 (---------------) + I tranby + 0x00278cc6, // n0x1780 c0x0000 (---------------) + I tranoy + 0x002dfe08, // n0x1781 c0x0000 (---------------) + I troandin + 0x002e4348, // n0x1782 c0x0000 (---------------) + I trogstad + 0x00302a46, // n0x1783 c0x0000 (---------------) + I tromsa + 0x0030ac06, // n0x1784 c0x0000 (---------------) + I tromso + 0x00214709, // n0x1785 c0x0000 (---------------) + I trondheim + 0x0036b846, // n0x1786 c0x0000 (---------------) + I trysil + 0x00394d4b, // n0x1787 c0x0000 (---------------) + I tvedestrand + 0x00222c85, // n0x1788 c0x0000 (---------------) + I tydal + 0x0020fac6, // n0x1789 c0x0000 (---------------) + I tynset + 0x00224108, // n0x178a c0x0000 (---------------) + I tysfjord + 0x00234886, // n0x178b c0x0000 (---------------) + I tysnes + 0x002f8986, // n0x178c c0x0000 (---------------) + I tysvar + 0x00210dca, // n0x178d c0x0000 (---------------) + I ullensaker + 0x002bab8a, // n0x178e c0x0000 (---------------) + I ullensvang + 0x0025bd05, // n0x178f c0x0000 (---------------) + I ulvik + 0x00215307, // n0x1790 c0x0000 (---------------) + I unjarga + 0x002d0d06, // n0x1791 c0x0000 (---------------) + I utsira + 0x41e013c2, // n0x1792 c0x0107 (n0x187f-n0x1880) + I va + 0x002f2e87, // n0x1793 c0x0000 (---------------) + I vaapste + 0x00262a85, // n0x1794 c0x0000 (---------------) + I vadso + 0x00343f44, // n0x1795 c0x0000 (---------------) + I vaga + 0x00343f45, // n0x1796 c0x0000 (---------------) + I vagan + 0x00300c06, // n0x1797 c0x0000 (---------------) + I vagsoy + 0x0033f147, // n0x1798 c0x0000 (---------------) + I vaksdal + 0x00213d85, // n0x1799 c0x0000 (---------------) + I valle + 0x002bad04, // n0x179a c0x0000 (---------------) + I vang + 0x0025c3c8, // n0x179b c0x0000 (---------------) + I vanylven + 0x002f8a45, // n0x179c c0x0000 (---------------) + I vardo + 0x00280307, // n0x179d c0x0000 (---------------) + I varggat + 0x002cf005, // n0x179e c0x0000 (---------------) + I varoy + 0x00310385, // n0x179f c0x0000 (---------------) + I vefsn + 0x00212004, // n0x17a0 c0x0000 (---------------) + I vega + 0x00299e89, // n0x17a1 c0x0000 (---------------) + I vegarshei + 0x00212448, // n0x17a2 c0x0000 (---------------) + I vennesla + 0x00212286, // n0x17a3 c0x0000 (---------------) + I verdal + 0x00386a06, // n0x17a4 c0x0000 (---------------) + I verran + 0x002b9c46, // n0x17a5 c0x0000 (---------------) + I vestby + 0x422d9c88, // n0x17a6 c0x0108 (n0x1880-n0x1881) o I vestfold + 0x002d9e87, // n0x17a7 c0x0000 (---------------) + I vestnes + 0x002da30d, // n0x17a8 c0x0000 (---------------) + I vestre-slidre + 0x002da90c, // n0x17a9 c0x0000 (---------------) + I vestre-toten + 0x002daf09, // n0x17aa c0x0000 (---------------) + I vestvagoy + 0x002db149, // n0x17ab c0x0000 (---------------) + I vevelstad + 0x4273b4c2, // n0x17ac c0x0109 (n0x1881-n0x1882) + I vf + 0x0038c903, // n0x17ad c0x0000 (---------------) + I vgs + 0x00244c83, // n0x17ae c0x0000 (---------------) + I vik + 0x00364745, // n0x17af c0x0000 (---------------) + I vikna + 0x0037620a, // n0x17b0 c0x0000 (---------------) + I vindafjord + 0x00302906, // n0x17b1 c0x0000 (---------------) + I voagat + 0x002df605, // n0x17b2 c0x0000 (---------------) + I volda + 0x002e21c4, // n0x17b3 c0x0000 (---------------) + I voss + 0x002e21cb, // n0x17b4 c0x0000 (---------------) + I vossevangen + 0x002f6e0c, // n0x17b5 c0x0000 (---------------) + I xn--andy-ira + 0x002f764c, // n0x17b6 c0x0000 (---------------) + I xn--asky-ira + 0x002f7955, // n0x17b7 c0x0000 (---------------) + I xn--aurskog-hland-jnb + 0x002f984d, // n0x17b8 c0x0000 (---------------) + I xn--avery-yua + 0x002fbb0f, // n0x17b9 c0x0000 (---------------) + I xn--bdddj-mrabd + 0x002fbed2, // n0x17ba c0x0000 (---------------) + I xn--bearalvhki-y4a + 0x002fc34f, // n0x17bb c0x0000 (---------------) + I xn--berlevg-jxa + 0x002fc712, // n0x17bc c0x0000 (---------------) + I xn--bhcavuotna-s4a + 0x002fcb93, // n0x17bd c0x0000 (---------------) + I xn--bhccavuotna-k7a + 0x002fd04d, // n0x17be c0x0000 (---------------) + I xn--bidr-5nac + 0x002fd60d, // n0x17bf c0x0000 (---------------) + I xn--bievt-0qa + 0x002fd94e, // n0x17c0 c0x0000 (---------------) + I xn--bjarky-fya + 0x002fde0e, // n0x17c1 c0x0000 (---------------) + I xn--bjddar-pta + 0x002fec8c, // n0x17c2 c0x0000 (---------------) + I xn--blt-elab + 0x002ff00c, // n0x17c3 c0x0000 (---------------) + I xn--bmlo-gra + 0x002ff44b, // n0x17c4 c0x0000 (---------------) + I xn--bod-2na + 0x002ff7ce, // n0x17c5 c0x0000 (---------------) + I xn--brnny-wuac + 0x00301b92, // n0x17c6 c0x0000 (---------------) + I xn--brnnysund-m8ac + 0x003026cc, // n0x17c7 c0x0000 (---------------) + I xn--brum-voa + 0x00302e90, // n0x17c8 c0x0000 (---------------) + I xn--btsfjord-9za + 0x003138d2, // n0x17c9 c0x0000 (---------------) + I xn--davvenjrga-y4a + 0x0031458c, // n0x17ca c0x0000 (---------------) + I xn--dnna-gra + 0x00314a4d, // n0x17cb c0x0000 (---------------) + I xn--drbak-wua + 0x00314d8c, // n0x17cc c0x0000 (---------------) + I xn--dyry-ira + 0x00317b91, // n0x17cd c0x0000 (---------------) + I xn--eveni-0qa01ga + 0x00319c0d, // n0x17ce c0x0000 (---------------) + I xn--finny-yua + 0x0031f74d, // n0x17cf c0x0000 (---------------) + I xn--fjord-lra + 0x0031fd4a, // n0x17d0 c0x0000 (---------------) + I xn--fl-zia + 0x0031ffcc, // n0x17d1 c0x0000 (---------------) + I xn--flor-jra + 0x003208cc, // n0x17d2 c0x0000 (---------------) + I xn--frde-gra + 0x0032110c, // n0x17d3 c0x0000 (---------------) + I xn--frna-woa + 0x00321b0c, // n0x17d4 c0x0000 (---------------) + I xn--frya-hra + 0x00324f13, // n0x17d5 c0x0000 (---------------) + I xn--ggaviika-8ya47h + 0x00326650, // n0x17d6 c0x0000 (---------------) + I xn--gildeskl-g0a + 0x00326a50, // n0x17d7 c0x0000 (---------------) + I xn--givuotna-8ya + 0x0032714d, // n0x17d8 c0x0000 (---------------) + I xn--gjvik-wua + 0x0032748c, // n0x17d9 c0x0000 (---------------) + I xn--gls-elac + 0x00328189, // n0x17da c0x0000 (---------------) + I xn--h-2fa + 0x0032900d, // n0x17db c0x0000 (---------------) + I xn--hbmer-xqa + 0x00329353, // n0x17dc c0x0000 (---------------) + I xn--hcesuolo-7ya35b + 0x0032c151, // n0x17dd c0x0000 (---------------) + I xn--hgebostad-g3a + 0x0032c593, // n0x17de c0x0000 (---------------) + I xn--hmmrfeasta-s4ac + 0x0032e38f, // n0x17df c0x0000 (---------------) + I xn--hnefoss-q1a + 0x0032e74c, // n0x17e0 c0x0000 (---------------) + I xn--hobl-ira + 0x0032ea4f, // n0x17e1 c0x0000 (---------------) + I xn--holtlen-hxa + 0x0032ee0d, // n0x17e2 c0x0000 (---------------) + I xn--hpmir-xqa + 0x0032f40f, // n0x17e3 c0x0000 (---------------) + I xn--hyanger-q1a + 0x0032f7d0, // n0x17e4 c0x0000 (---------------) + I xn--hylandet-54a + 0x0033024e, // n0x17e5 c0x0000 (---------------) + I xn--indery-fya + 0x00332a0e, // n0x17e6 c0x0000 (---------------) + I xn--jlster-bya + 0x00333150, // n0x17e7 c0x0000 (---------------) + I xn--jrpeland-54a + 0x00333e8d, // n0x17e8 c0x0000 (---------------) + I xn--karmy-yua + 0x0033480e, // n0x17e9 c0x0000 (---------------) + I xn--kfjord-iua + 0x00334b8c, // n0x17ea c0x0000 (---------------) + I xn--klbu-woa + 0x00336b53, // n0x17eb c0x0000 (---------------) + I xn--koluokta-7ya57h + 0x0033934e, // n0x17ec c0x0000 (---------------) + I xn--krager-gya + 0x00339b10, // n0x17ed c0x0000 (---------------) + I xn--kranghke-b0a + 0x00339f11, // n0x17ee c0x0000 (---------------) + I xn--krdsherad-m8a + 0x0033a34f, // n0x17ef c0x0000 (---------------) + I xn--krehamn-dxa + 0x0033a713, // n0x17f0 c0x0000 (---------------) + I xn--krjohka-hwab49j + 0x0033b04d, // n0x17f1 c0x0000 (---------------) + I xn--ksnes-uua + 0x0033b38f, // n0x17f2 c0x0000 (---------------) + I xn--kvfjord-nxa + 0x0033b74e, // n0x17f3 c0x0000 (---------------) + I xn--kvitsy-fya + 0x0033d050, // n0x17f4 c0x0000 (---------------) + I xn--kvnangen-k0a + 0x0033d449, // n0x17f5 c0x0000 (---------------) + I xn--l-1fa + 0x0033e4d0, // n0x17f6 c0x0000 (---------------) + I xn--laheadju-7ya + 0x0033f30f, // n0x17f7 c0x0000 (---------------) + I xn--langevg-jxa + 0x0033f98f, // n0x17f8 c0x0000 (---------------) + I xn--ldingen-q1a + 0x0033fd52, // n0x17f9 c0x0000 (---------------) + I xn--leagaviika-52b + 0x00344a4e, // n0x17fa c0x0000 (---------------) + I xn--lesund-hua + 0x0034534d, // n0x17fb c0x0000 (---------------) + I xn--lgrd-poac + 0x00345bcd, // n0x17fc c0x0000 (---------------) + I xn--lhppi-xqa + 0x00345f0d, // n0x17fd c0x0000 (---------------) + I xn--linds-pra + 0x00347a8d, // n0x17fe c0x0000 (---------------) + I xn--loabt-0qa + 0x00347dcd, // n0x17ff c0x0000 (---------------) + I xn--lrdal-sra + 0x00348110, // n0x1800 c0x0000 (---------------) + I xn--lrenskog-54a + 0x0034850b, // n0x1801 c0x0000 (---------------) + I xn--lt-liac + 0x00348a8c, // n0x1802 c0x0000 (---------------) + I xn--lten-gra + 0x00348e0c, // n0x1803 c0x0000 (---------------) + I xn--lury-ira + 0x0034910c, // n0x1804 c0x0000 (---------------) + I xn--mely-ira + 0x0034940e, // n0x1805 c0x0000 (---------------) + I xn--merker-kua + 0x00356890, // n0x1806 c0x0000 (---------------) + I xn--mjndalen-64a + 0x003581d2, // n0x1807 c0x0000 (---------------) + I xn--mlatvuopmi-s4a + 0x0035864b, // n0x1808 c0x0000 (---------------) + I xn--mli-tla + 0x00358a8e, // n0x1809 c0x0000 (---------------) + I xn--mlselv-iua + 0x00358e0e, // n0x180a c0x0000 (---------------) + I xn--moreke-jua + 0x0035960e, // n0x180b c0x0000 (---------------) + I xn--mosjen-eya + 0x0035a4cb, // n0x180c c0x0000 (---------------) + I xn--mot-tla + 0x42b5a856, // n0x180d c0x010a (n0x1882-n0x1884) o I xn--mre-og-romsdal-qqb + 0x0035b4cd, // n0x180e c0x0000 (---------------) + I xn--msy-ula0h + 0x0035b954, // n0x180f c0x0000 (---------------) + I xn--mtta-vrjjat-k7af + 0x0035c38d, // n0x1810 c0x0000 (---------------) + I xn--muost-0qa + 0x0035d715, // n0x1811 c0x0000 (---------------) + I xn--nmesjevuemie-tcba + 0x0036060d, // n0x1812 c0x0000 (---------------) + I xn--nry-yla5g + 0x00360f8f, // n0x1813 c0x0000 (---------------) + I xn--nttery-byae + 0x00361bcf, // n0x1814 c0x0000 (---------------) + I xn--nvuotna-hwa + 0x003652cf, // n0x1815 c0x0000 (---------------) + I xn--oppegrd-ixa + 0x0036568e, // n0x1816 c0x0000 (---------------) + I xn--ostery-fya + 0x00365d4d, // n0x1817 c0x0000 (---------------) + I xn--osyro-wua + 0x00368211, // n0x1818 c0x0000 (---------------) + I xn--porsgu-sta26f + 0x0036f34c, // n0x1819 c0x0000 (---------------) + I xn--rady-ira + 0x0036f64c, // n0x181a c0x0000 (---------------) + I xn--rdal-poa + 0x0036f94b, // n0x181b c0x0000 (---------------) + I xn--rde-ula + 0x0036fc0c, // n0x181c c0x0000 (---------------) + I xn--rdy-0nab + 0x0036ffcf, // n0x181d c0x0000 (---------------) + I xn--rennesy-v1a + 0x00370392, // n0x181e c0x0000 (---------------) + I xn--rhkkervju-01af + 0x00371acd, // n0x181f c0x0000 (---------------) + I xn--rholt-mra + 0x00372d0c, // n0x1820 c0x0000 (---------------) + I xn--risa-5na + 0x0037318c, // n0x1821 c0x0000 (---------------) + I xn--risr-ira + 0x0037348d, // n0x1822 c0x0000 (---------------) + I xn--rland-uua + 0x003737cf, // n0x1823 c0x0000 (---------------) + I xn--rlingen-mxa + 0x00373b8e, // n0x1824 c0x0000 (---------------) + I xn--rmskog-bya + 0x00375ecc, // n0x1825 c0x0000 (---------------) + I xn--rros-gra + 0x0037648d, // n0x1826 c0x0000 (---------------) + I xn--rskog-uua + 0x003767cb, // n0x1827 c0x0000 (---------------) + I xn--rst-0na + 0x00376fcc, // n0x1828 c0x0000 (---------------) + I xn--rsta-fra + 0x0037754d, // n0x1829 c0x0000 (---------------) + I xn--ryken-vua + 0x0037788e, // n0x182a c0x0000 (---------------) + I xn--ryrvik-bya + 0x00377d09, // n0x182b c0x0000 (---------------) + I xn--s-1fa + 0x00378b53, // n0x182c c0x0000 (---------------) + I xn--sandnessjen-ogb + 0x0037940d, // n0x182d c0x0000 (---------------) + I xn--sandy-yua + 0x0037974d, // n0x182e c0x0000 (---------------) + I xn--seral-lra + 0x00379d4c, // n0x182f c0x0000 (---------------) + I xn--sgne-gra + 0x0037a1ce, // n0x1830 c0x0000 (---------------) + I xn--skierv-uta + 0x0037b34f, // n0x1831 c0x0000 (---------------) + I xn--skjervy-v1a + 0x0037b70c, // n0x1832 c0x0000 (---------------) + I xn--skjk-soa + 0x0037ba0d, // n0x1833 c0x0000 (---------------) + I xn--sknit-yqa + 0x0037bd4f, // n0x1834 c0x0000 (---------------) + I xn--sknland-fxa + 0x0037c10c, // n0x1835 c0x0000 (---------------) + I xn--slat-5na + 0x0037c74c, // n0x1836 c0x0000 (---------------) + I xn--slt-elab + 0x0037cb0c, // n0x1837 c0x0000 (---------------) + I xn--smla-hra + 0x0037ce0c, // n0x1838 c0x0000 (---------------) + I xn--smna-gra + 0x0037d4cd, // n0x1839 c0x0000 (---------------) + I xn--snase-nra + 0x0037d812, // n0x183a c0x0000 (---------------) + I xn--sndre-land-0cb + 0x0037de8c, // n0x183b c0x0000 (---------------) + I xn--snes-poa + 0x0037e18c, // n0x183c c0x0000 (---------------) + I xn--snsa-roa + 0x0037e491, // n0x183d c0x0000 (---------------) + I xn--sr-aurdal-l8a + 0x0037e8cf, // n0x183e c0x0000 (---------------) + I xn--sr-fron-q1a + 0x0037ec8f, // n0x183f c0x0000 (---------------) + I xn--sr-odal-q1a + 0x0037f053, // n0x1840 c0x0000 (---------------) + I xn--sr-varanger-ggb + 0x003801ce, // n0x1841 c0x0000 (---------------) + I xn--srfold-bya + 0x0038074f, // n0x1842 c0x0000 (---------------) + I xn--srreisa-q1a + 0x00380b0c, // n0x1843 c0x0000 (---------------) + I xn--srum-gra + 0x42f80e4e, // n0x1844 c0x010b (n0x1884-n0x1885) o I xn--stfold-9xa + 0x003811cf, // n0x1845 c0x0000 (---------------) + I xn--stjrdal-s1a + 0x00381596, // n0x1846 c0x0000 (---------------) + I xn--stjrdalshalsen-sqb + 0x00382652, // n0x1847 c0x0000 (---------------) + I xn--stre-toten-zcb + 0x0038448c, // n0x1848 c0x0000 (---------------) + I xn--tjme-hra + 0x003850cf, // n0x1849 c0x0000 (---------------) + I xn--tnsberg-q1a + 0x0038574d, // n0x184a c0x0000 (---------------) + I xn--trany-yua + 0x00385a8f, // n0x184b c0x0000 (---------------) + I xn--trgstad-r1a + 0x00385e4c, // n0x184c c0x0000 (---------------) + I xn--trna-woa + 0x0038614d, // n0x184d c0x0000 (---------------) + I xn--troms-zua + 0x0038648d, // n0x184e c0x0000 (---------------) + I xn--tysvr-vra + 0x003876ce, // n0x184f c0x0000 (---------------) + I xn--unjrga-rta + 0x0038850c, // n0x1850 c0x0000 (---------------) + I xn--vads-jra + 0x0038880c, // n0x1851 c0x0000 (---------------) + I xn--vard-jra + 0x00388b10, // n0x1852 c0x0000 (---------------) + I xn--vegrshei-c0a + 0x0038b251, // n0x1853 c0x0000 (---------------) + I xn--vestvgy-ixa6o + 0x0038b68b, // n0x1854 c0x0000 (---------------) + I xn--vg-yiab + 0x0038c50c, // n0x1855 c0x0000 (---------------) + I xn--vgan-qoa + 0x0038c80e, // n0x1856 c0x0000 (---------------) + I xn--vgsy-qoa0j + 0x0038e511, // n0x1857 c0x0000 (---------------) + I xn--vre-eiker-k8a + 0x0038e94e, // n0x1858 c0x0000 (---------------) + I xn--vrggt-xqad + 0x0038eccd, // n0x1859 c0x0000 (---------------) + I xn--vry-yla5g + 0x00392fcb, // n0x185a c0x0000 (---------------) + I xn--yer-zna + 0x00393c0f, // n0x185b c0x0000 (---------------) + I xn--ygarden-p1a + 0x00394594, // n0x185c c0x0000 (---------------) + I xn--ystre-slidre-ujb + 0x0026cd02, // n0x185d c0x0000 (---------------) + I gs + 0x0026cd02, // n0x185e c0x0000 (---------------) + I gs + 0x00201083, // n0x185f c0x0000 (---------------) + I nes + 0x0026cd02, // n0x1860 c0x0000 (---------------) + I gs + 0x00201083, // n0x1861 c0x0000 (---------------) + I nes + 0x0026cd02, // n0x1862 c0x0000 (---------------) + I gs + 0x00202382, // n0x1863 c0x0000 (---------------) + I os + 0x00351305, // n0x1864 c0x0000 (---------------) + I valer + 0x0038e20c, // n0x1865 c0x0000 (---------------) + I xn--vler-qoa + 0x0026cd02, // n0x1866 c0x0000 (---------------) + I gs + 0x0026cd02, // n0x1867 c0x0000 (---------------) + I gs + 0x00202382, // n0x1868 c0x0000 (---------------) + I os + 0x0026cd02, // n0x1869 c0x0000 (---------------) + I gs + 0x00280105, // n0x186a c0x0000 (---------------) + I heroy + 0x0029dd05, // n0x186b c0x0000 (---------------) + I sande + 0x0026cd02, // n0x186c c0x0000 (---------------) + I gs + 0x0026cd02, // n0x186d c0x0000 (---------------) + I gs + 0x0020a702, // n0x186e c0x0000 (---------------) + I bo + 0x00280105, // n0x186f c0x0000 (---------------) + I heroy + 0x002fa209, // n0x1870 c0x0000 (---------------) + I xn--b-5ga + 0x0032be4c, // n0x1871 c0x0000 (---------------) + I xn--hery-ira + 0x0026cd02, // n0x1872 c0x0000 (---------------) + I gs + 0x0026cd02, // n0x1873 c0x0000 (---------------) + I gs + 0x0026cd02, // n0x1874 c0x0000 (---------------) + I gs + 0x0026cd02, // n0x1875 c0x0000 (---------------) + I gs + 0x00351305, // n0x1876 c0x0000 (---------------) + I valer + 0x0026cd02, // n0x1877 c0x0000 (---------------) + I gs + 0x0026cd02, // n0x1878 c0x0000 (---------------) + I gs + 0x0026cd02, // n0x1879 c0x0000 (---------------) + I gs + 0x0026cd02, // n0x187a c0x0000 (---------------) + I gs + 0x0020a702, // n0x187b c0x0000 (---------------) + I bo + 0x002fa209, // n0x187c c0x0000 (---------------) + I xn--b-5ga + 0x0026cd02, // n0x187d c0x0000 (---------------) + I gs + 0x0026cd02, // n0x187e c0x0000 (---------------) + I gs + 0x0026cd02, // n0x187f c0x0000 (---------------) + I gs + 0x0029dd05, // n0x1880 c0x0000 (---------------) + I sande + 0x0026cd02, // n0x1881 c0x0000 (---------------) + I gs + 0x0029dd05, // n0x1882 c0x0000 (---------------) + I sande + 0x0032be4c, // n0x1883 c0x0000 (---------------) + I xn--hery-ira + 0x0038e20c, // n0x1884 c0x0000 (---------------) + I xn--vler-qoa + 0x00310603, // n0x1885 c0x0000 (---------------) + I biz + 0x00222ac3, // n0x1886 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1887 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1888 c0x0000 (---------------) + I gov + 0x00200304, // n0x1889 c0x0000 (---------------) + I info + 0x002170c3, // n0x188a c0x0000 (---------------) + I net + 0x0021dcc3, // n0x188b c0x0000 (---------------) + I org + 0x00166bc8, // n0x188c c0x0000 (---------------) + merseine + 0x0007c1c4, // n0x188d c0x0000 (---------------) + mine + 0x000cb2c8, // n0x188e c0x0000 (---------------) + shacknet + 0x00201e82, // n0x188f c0x0000 (---------------) + I ac + 0x43e00742, // n0x1890 c0x010f (n0x189f-n0x18a0) + I co + 0x002319c3, // n0x1891 c0x0000 (---------------) + I cri + 0x00252f84, // n0x1892 c0x0000 (---------------) + I geek + 0x002012c3, // n0x1893 c0x0000 (---------------) + I gen + 0x0021e284, // n0x1894 c0x0000 (---------------) + I govt + 0x00205e06, // n0x1895 c0x0000 (---------------) + I health + 0x002d2e03, // n0x1896 c0x0000 (---------------) + I iwi + 0x002d2dc4, // n0x1897 c0x0000 (---------------) + I kiwi + 0x002701c5, // n0x1898 c0x0000 (---------------) + I maori + 0x0023fa03, // n0x1899 c0x0000 (---------------) + I mil + 0x002170c3, // n0x189a c0x0000 (---------------) + I net + 0x0021dcc3, // n0x189b c0x0000 (---------------) + I org + 0x002647ca, // n0x189c c0x0000 (---------------) + I parliament + 0x00235406, // n0x189d c0x0000 (---------------) + I school + 0x0035918c, // n0x189e c0x0000 (---------------) + I xn--mori-qsa + 0x000e4188, // n0x189f c0x0000 (---------------) + blogspot + 0x00200742, // n0x18a0 c0x0000 (---------------) + I co + 0x00222ac3, // n0x18a1 c0x0000 (---------------) + I com + 0x002d75c3, // n0x18a2 c0x0000 (---------------) + I edu + 0x0021e283, // n0x18a3 c0x0000 (---------------) + I gov + 0x0020b403, // n0x18a4 c0x0000 (---------------) + I med + 0x002bd646, // n0x18a5 c0x0000 (---------------) + I museum + 0x002170c3, // n0x18a6 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x18a7 c0x0000 (---------------) + I org + 0x00218243, // n0x18a8 c0x0000 (---------------) + I pro + 0x00004342, // n0x18a9 c0x0000 (---------------) + ae + 0x000d0087, // n0x18aa c0x0000 (---------------) + blogdns + 0x001619c8, // n0x18ab c0x0000 (---------------) + blogsite + 0x00076592, // n0x18ac c0x0000 (---------------) + boldlygoingnowhere + 0x44a2a085, // n0x18ad c0x0112 (n0x18e3-n0x18e5) o I cdn77 + 0x44f0720c, // n0x18ae c0x0113 (n0x18e5-n0x18e6) o I cdn77-secure + 0x00146fc8, // n0x18af c0x0000 (---------------) + dnsalias + 0x0006a247, // n0x18b0 c0x0000 (---------------) + dnsdojo + 0x00010a4b, // n0x18b1 c0x0000 (---------------) + doesntexist + 0x0015fdc9, // n0x18b2 c0x0000 (---------------) + dontexist + 0x00146ec7, // n0x18b3 c0x0000 (---------------) + doomdns + 0x0006a147, // n0x18b4 c0x0000 (---------------) + duckdns + 0x00194fc6, // n0x18b5 c0x0000 (---------------) + dvrdns + 0x000007c8, // n0x18b6 c0x0000 (---------------) + dynalias + 0x45409ac6, // n0x18b7 c0x0115 (n0x18e7-n0x18e9) + dyndns + 0x0009428d, // n0x18b8 c0x0000 (---------------) + endofinternet + 0x000f3e50, // n0x18b9 c0x0000 (---------------) + endoftheinternet + 0x4581d5c2, // n0x18ba c0x0116 (n0x18e9-n0x1920) + eu + 0x00057807, // n0x18bb c0x0000 (---------------) + from-me + 0x00083589, // n0x18bc c0x0000 (---------------) + game-host + 0x00043a86, // n0x18bd c0x0000 (---------------) + gotdns + 0x0002ea02, // n0x18be c0x0000 (---------------) + hk + 0x0013eeca, // n0x18bf c0x0000 (---------------) + hobby-site + 0x0000b387, // n0x18c0 c0x0000 (---------------) + homedns + 0x00085007, // n0x18c1 c0x0000 (---------------) + homeftp + 0x00090ec9, // n0x18c2 c0x0000 (---------------) + homelinux + 0x00091b88, // n0x18c3 c0x0000 (---------------) + homeunix + 0x000c528e, // n0x18c4 c0x0000 (---------------) + is-a-bruinsfan + 0x00008d8e, // n0x18c5 c0x0000 (---------------) + is-a-candidate + 0x0000eecf, // n0x18c6 c0x0000 (---------------) + is-a-celticsfan + 0x00166909, // n0x18c7 c0x0000 (---------------) + is-a-chef + 0x00052e49, // n0x18c8 c0x0000 (---------------) + is-a-geek + 0x0006b8cb, // n0x18c9 c0x0000 (---------------) + is-a-knight + 0x001259cf, // n0x18ca c0x0000 (---------------) + is-a-linux-user + 0x0008480c, // n0x18cb c0x0000 (---------------) + is-a-patsfan + 0x000d23cb, // n0x18cc c0x0000 (---------------) + is-a-soxfan + 0x00103ac8, // n0x18cd c0x0000 (---------------) + is-found + 0x000d6887, // n0x18ce c0x0000 (---------------) + is-lost + 0x000e2908, // n0x18cf c0x0000 (---------------) + is-saved + 0x0012148b, // n0x18d0 c0x0000 (---------------) + is-very-bad + 0x00127b0c, // n0x18d1 c0x0000 (---------------) + is-very-evil + 0x00130a4c, // n0x18d2 c0x0000 (---------------) + is-very-good + 0x0013904c, // n0x18d3 c0x0000 (---------------) + is-very-nice + 0x0013bb8d, // n0x18d4 c0x0000 (---------------) + is-very-sweet + 0x00191648, // n0x18d5 c0x0000 (---------------) + isa-geek + 0x00183609, // n0x18d6 c0x0000 (---------------) + kicks-ass + 0x0016a6cb, // n0x18d7 c0x0000 (---------------) + misconfused + 0x000c7e07, // n0x18d8 c0x0000 (---------------) + podzone + 0x0016184a, // n0x18d9 c0x0000 (---------------) + readmyblog + 0x00130f46, // n0x18da c0x0000 (---------------) + selfip + 0x00084dcd, // n0x18db c0x0000 (---------------) + sellsyourhome + 0x00079088, // n0x18dc c0x0000 (---------------) + servebbs + 0x00161348, // n0x18dd c0x0000 (---------------) + serveftp + 0x00011f49, // n0x18de c0x0000 (---------------) + servegame + 0x000d044c, // n0x18df c0x0000 (---------------) + stuff-4-sale + 0x00009f42, // n0x18e0 c0x0000 (---------------) + us + 0x00110e86, // n0x18e1 c0x0000 (---------------) + webhop + 0x000043c2, // n0x18e2 c0x0000 (---------------) + za + 0x00000741, // n0x18e3 c0x0000 (---------------) + c + 0x000060c3, // n0x18e4 c0x0000 (---------------) + rsc + 0x45374606, // n0x18e5 c0x0114 (n0x18e6-n0x18e7) o I origin + 0x0002a203, // n0x18e6 c0x0000 (---------------) + ssl + 0x00002342, // n0x18e7 c0x0000 (---------------) + go + 0x0000b384, // n0x18e8 c0x0000 (---------------) + home + 0x00000882, // n0x18e9 c0x0000 (---------------) + al + 0x000729c4, // n0x18ea c0x0000 (---------------) + asso + 0x00001642, // n0x18eb c0x0000 (---------------) + at + 0x00005ac2, // n0x18ec c0x0000 (---------------) + au + 0x00004702, // n0x18ed c0x0000 (---------------) + be + 0x00155e02, // n0x18ee c0x0000 (---------------) + bg + 0x000055c2, // n0x18ef c0x0000 (---------------) + ca + 0x0002a082, // n0x18f0 c0x0000 (---------------) + cd + 0x00004a02, // n0x18f1 c0x0000 (---------------) + ch + 0x000211c2, // n0x18f2 c0x0000 (---------------) + cn + 0x00029e42, // n0x18f3 c0x0000 (---------------) + cy + 0x00014442, // n0x18f4 c0x0000 (---------------) + cz + 0x000006c2, // n0x18f5 c0x0000 (---------------) + de + 0x00071742, // n0x18f6 c0x0000 (---------------) + dk + 0x000d75c3, // n0x18f7 c0x0000 (---------------) + edu + 0x00006042, // n0x18f8 c0x0000 (---------------) + ee + 0x000010c2, // n0x18f9 c0x0000 (---------------) + es + 0x000099c2, // n0x18fa c0x0000 (---------------) + fi + 0x00000d42, // n0x18fb c0x0000 (---------------) + fr + 0x0000dc82, // n0x18fc c0x0000 (---------------) + gr + 0x00025842, // n0x18fd c0x0000 (---------------) + hr + 0x00017d42, // n0x18fe c0x0000 (---------------) + hu + 0x00000e82, // n0x18ff c0x0000 (---------------) + ie + 0x000036c2, // n0x1900 c0x0000 (---------------) + il + 0x00000242, // n0x1901 c0x0000 (---------------) + in + 0x00038c03, // n0x1902 c0x0000 (---------------) + int + 0x00002b42, // n0x1903 c0x0000 (---------------) + is + 0x00006e82, // n0x1904 c0x0000 (---------------) + it + 0x000990c2, // n0x1905 c0x0000 (---------------) + jp + 0x000034c2, // n0x1906 c0x0000 (---------------) + kr + 0x00005ec2, // n0x1907 c0x0000 (---------------) + lt + 0x000071c2, // n0x1908 c0x0000 (---------------) + lu + 0x00027f02, // n0x1909 c0x0000 (---------------) + lv + 0x0003a6c2, // n0x190a c0x0000 (---------------) + mc + 0x00008942, // n0x190b c0x0000 (---------------) + me + 0x00156d82, // n0x190c c0x0000 (---------------) + mk + 0x00059642, // n0x190d c0x0000 (---------------) + mt + 0x00020282, // n0x190e c0x0000 (---------------) + my + 0x000170c3, // n0x190f c0x0000 (---------------) + net + 0x00001282, // n0x1910 c0x0000 (---------------) + ng + 0x00036482, // n0x1911 c0x0000 (---------------) + nl + 0x00000c02, // n0x1912 c0x0000 (---------------) + no + 0x000078c2, // n0x1913 c0x0000 (---------------) + nz + 0x0005b445, // n0x1914 c0x0000 (---------------) + paris + 0x00001e02, // n0x1915 c0x0000 (---------------) + pl + 0x00095982, // n0x1916 c0x0000 (---------------) + pt + 0x00123e03, // n0x1917 c0x0000 (---------------) + q-a + 0x00000d82, // n0x1918 c0x0000 (---------------) + ro + 0x000044c2, // n0x1919 c0x0000 (---------------) + ru + 0x00002e82, // n0x191a c0x0000 (---------------) + se + 0x00009182, // n0x191b c0x0000 (---------------) + si + 0x00007b42, // n0x191c c0x0000 (---------------) + sk + 0x00002402, // n0x191d c0x0000 (---------------) + tr + 0x0000cf02, // n0x191e c0x0000 (---------------) + uk + 0x00009f42, // n0x191f c0x0000 (---------------) + us + 0x0020c283, // n0x1920 c0x0000 (---------------) + I abo + 0x00201e82, // n0x1921 c0x0000 (---------------) + I ac + 0x00222ac3, // n0x1922 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1923 c0x0000 (---------------) + I edu + 0x0034eb03, // n0x1924 c0x0000 (---------------) + I gob + 0x0020dc03, // n0x1925 c0x0000 (---------------) + I ing + 0x0020b403, // n0x1926 c0x0000 (---------------) + I med + 0x002170c3, // n0x1927 c0x0000 (---------------) + I net + 0x00207cc3, // n0x1928 c0x0000 (---------------) + I nom + 0x0021dcc3, // n0x1929 c0x0000 (---------------) + I org + 0x00280043, // n0x192a c0x0000 (---------------) + I sld + 0x000e4188, // n0x192b c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x192c c0x0000 (---------------) + I com + 0x002d75c3, // n0x192d c0x0000 (---------------) + I edu + 0x0034eb03, // n0x192e c0x0000 (---------------) + I gob + 0x0023fa03, // n0x192f c0x0000 (---------------) + I mil + 0x002170c3, // n0x1930 c0x0000 (---------------) + I net + 0x00207cc3, // n0x1931 c0x0000 (---------------) + I nom + 0x0021dcc3, // n0x1932 c0x0000 (---------------) + I org + 0x00222ac3, // n0x1933 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1934 c0x0000 (---------------) + I edu + 0x0021dcc3, // n0x1935 c0x0000 (---------------) + I org + 0x00222ac3, // n0x1936 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1937 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1938 c0x0000 (---------------) + I gov + 0x00200041, // n0x1939 c0x0000 (---------------) + I i + 0x0023fa03, // n0x193a c0x0000 (---------------) + I mil + 0x002170c3, // n0x193b c0x0000 (---------------) + I net + 0x00202303, // n0x193c c0x0000 (---------------) + I ngo + 0x0021dcc3, // n0x193d c0x0000 (---------------) + I org + 0x00310603, // n0x193e c0x0000 (---------------) + I biz + 0x00222ac3, // n0x193f c0x0000 (---------------) + I com + 0x002d75c3, // n0x1940 c0x0000 (---------------) + I edu + 0x002a1b43, // n0x1941 c0x0000 (---------------) + I fam + 0x0034eb03, // n0x1942 c0x0000 (---------------) + I gob + 0x00375943, // n0x1943 c0x0000 (---------------) + I gok + 0x0026f583, // n0x1944 c0x0000 (---------------) + I gon + 0x0028e783, // n0x1945 c0x0000 (---------------) + I gop + 0x00202343, // n0x1946 c0x0000 (---------------) + I gos + 0x0021e283, // n0x1947 c0x0000 (---------------) + I gov + 0x00200304, // n0x1948 c0x0000 (---------------) + I info + 0x002170c3, // n0x1949 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x194a c0x0000 (---------------) + I org + 0x00219fc3, // n0x194b c0x0000 (---------------) + I web + 0x002f8f84, // n0x194c c0x0000 (---------------) + I agro + 0x002375c3, // n0x194d c0x0000 (---------------) + I aid + 0x00000603, // n0x194e c0x0000 (---------------) + art + 0x0026a983, // n0x194f c0x0000 (---------------) + I atm + 0x0023e708, // n0x1950 c0x0000 (---------------) + I augustow + 0x002eaac4, // n0x1951 c0x0000 (---------------) + I auto + 0x0032d4ca, // n0x1952 c0x0000 (---------------) + I babia-gora + 0x0034d4c6, // n0x1953 c0x0000 (---------------) + I bedzin + 0x0035ad87, // n0x1954 c0x0000 (---------------) + I beskidy + 0x0021604a, // n0x1955 c0x0000 (---------------) + I bialowieza + 0x00228a89, // n0x1956 c0x0000 (---------------) + I bialystok + 0x0037f4c7, // n0x1957 c0x0000 (---------------) + I bielawa + 0x00382a8a, // n0x1958 c0x0000 (---------------) + I bieszczady + 0x00310603, // n0x1959 c0x0000 (---------------) + I biz + 0x0025374b, // n0x195a c0x0000 (---------------) + I boleslawiec + 0x00343289, // n0x195b c0x0000 (---------------) + I bydgoszcz + 0x002b9d45, // n0x195c c0x0000 (---------------) + I bytom + 0x002bb407, // n0x195d c0x0000 (---------------) + I cieszyn + 0x00000742, // n0x195e c0x0000 (---------------) + co + 0x00222ac3, // n0x195f c0x0000 (---------------) + I com + 0x002c8a87, // n0x1960 c0x0000 (---------------) + I czeladz + 0x00214445, // n0x1961 c0x0000 (---------------) + I czest + 0x002aae49, // n0x1962 c0x0000 (---------------) + I dlugoleka + 0x002d75c3, // n0x1963 c0x0000 (---------------) + I edu + 0x0021b206, // n0x1964 c0x0000 (---------------) + I elblag + 0x002aa003, // n0x1965 c0x0000 (---------------) + I elk + 0x000b7a43, // n0x1966 c0x0000 (---------------) + gda + 0x000e1246, // n0x1967 c0x0000 (---------------) + gdansk + 0x0016e4c6, // n0x1968 c0x0000 (---------------) + gdynia + 0x0014ee47, // n0x1969 c0x0000 (---------------) + gliwice + 0x00391046, // n0x196a c0x0000 (---------------) + I glogow + 0x002047c5, // n0x196b c0x0000 (---------------) + I gmina + 0x0031b347, // n0x196c c0x0000 (---------------) + I gniezno + 0x002b1487, // n0x196d c0x0000 (---------------) + I gorlice + 0x4761e283, // n0x196e c0x011d (n0x19f1-n0x1a20) + I gov + 0x003147c7, // n0x196f c0x0000 (---------------) + I grajewo + 0x0034b2c3, // n0x1970 c0x0000 (---------------) + I gsm + 0x0036cb05, // n0x1971 c0x0000 (---------------) + I ilawa + 0x00200304, // n0x1972 c0x0000 (---------------) + I info + 0x0020a8c8, // n0x1973 c0x0000 (---------------) + I jaworzno + 0x0020c80c, // n0x1974 c0x0000 (---------------) + I jelenia-gora + 0x00297b45, // n0x1975 c0x0000 (---------------) + I jgora + 0x002bed46, // n0x1976 c0x0000 (---------------) + I kalisz + 0x002c8947, // n0x1977 c0x0000 (---------------) + I karpacz + 0x002017c7, // n0x1978 c0x0000 (---------------) + I kartuzy + 0x00216a07, // n0x1979 c0x0000 (---------------) + I kaszuby + 0x00217888, // n0x197a c0x0000 (---------------) + I katowice + 0x0036730f, // n0x197b c0x0000 (---------------) + I kazimierz-dolny + 0x0023df05, // n0x197c c0x0000 (---------------) + I kepno + 0x00231ac7, // n0x197d c0x0000 (---------------) + I ketrzyn + 0x002a3547, // n0x197e c0x0000 (---------------) + I klodzko + 0x0029058a, // n0x197f c0x0000 (---------------) + I kobierzyce + 0x00295709, // n0x1980 c0x0000 (---------------) + I kolobrzeg + 0x002bbf45, // n0x1981 c0x0000 (---------------) + I konin + 0x002bdf8a, // n0x1982 c0x0000 (---------------) + I konskowola + 0x0011bec6, // n0x1983 c0x0000 (---------------) + krakow + 0x002aa085, // n0x1984 c0x0000 (---------------) + I kutno + 0x002be184, // n0x1985 c0x0000 (---------------) + I lapy + 0x002c8806, // n0x1986 c0x0000 (---------------) + I lebork + 0x00322207, // n0x1987 c0x0000 (---------------) + I legnica + 0x002bde07, // n0x1988 c0x0000 (---------------) + I lezajsk + 0x0036d448, // n0x1989 c0x0000 (---------------) + I limanowa + 0x002c6b85, // n0x198a c0x0000 (---------------) + I lomza + 0x00214346, // n0x198b c0x0000 (---------------) + I lowicz + 0x00218fc5, // n0x198c c0x0000 (---------------) + I lubin + 0x0029b345, // n0x198d c0x0000 (---------------) + I lukow + 0x00218f04, // n0x198e c0x0000 (---------------) + I mail + 0x00321887, // n0x198f c0x0000 (---------------) + I malbork + 0x0030ff4a, // n0x1990 c0x0000 (---------------) + I malopolska + 0x0020b988, // n0x1991 c0x0000 (---------------) + I mazowsze + 0x002d2a06, // n0x1992 c0x0000 (---------------) + I mazury + 0x0000b403, // n0x1993 c0x0000 (---------------) + med + 0x002dc385, // n0x1994 c0x0000 (---------------) + I media + 0x00232386, // n0x1995 c0x0000 (---------------) + I miasta + 0x00381e06, // n0x1996 c0x0000 (---------------) + I mielec + 0x002f0806, // n0x1997 c0x0000 (---------------) + I mielno + 0x0023fa03, // n0x1998 c0x0000 (---------------) + I mil + 0x00371d47, // n0x1999 c0x0000 (---------------) + I mragowo + 0x002a34c5, // n0x199a c0x0000 (---------------) + I naklo + 0x002170c3, // n0x199b c0x0000 (---------------) + I net + 0x0037f74d, // n0x199c c0x0000 (---------------) + I nieruchomosci + 0x00207cc3, // n0x199d c0x0000 (---------------) + I nom + 0x0036d548, // n0x199e c0x0000 (---------------) + I nowaruda + 0x00210944, // n0x199f c0x0000 (---------------) + I nysa + 0x00264105, // n0x19a0 c0x0000 (---------------) + I olawa + 0x00290486, // n0x19a1 c0x0000 (---------------) + I olecko + 0x002c1f06, // n0x19a2 c0x0000 (---------------) + I olkusz + 0x0020f9c7, // n0x19a3 c0x0000 (---------------) + I olsztyn + 0x00228dc7, // n0x19a4 c0x0000 (---------------) + I opoczno + 0x00246a05, // n0x19a5 c0x0000 (---------------) + I opole + 0x0021dcc3, // n0x19a6 c0x0000 (---------------) + I org + 0x00202387, // n0x19a7 c0x0000 (---------------) + I ostroda + 0x00203b09, // n0x19a8 c0x0000 (---------------) + I ostroleka + 0x002053c9, // n0x19a9 c0x0000 (---------------) + I ostrowiec + 0x0020818a, // n0x19aa c0x0000 (---------------) + I ostrowwlkp + 0x00203e02, // n0x19ab c0x0000 (---------------) + I pc + 0x0036cac4, // n0x19ac c0x0000 (---------------) + I pila + 0x002c2d84, // n0x19ad c0x0000 (---------------) + I pisz + 0x00210607, // n0x19ae c0x0000 (---------------) + I podhale + 0x0023b508, // n0x19af c0x0000 (---------------) + I podlasie + 0x002c8cc9, // n0x19b0 c0x0000 (---------------) + I polkowice + 0x00207a09, // n0x19b1 c0x0000 (---------------) + I pomorskie + 0x002c9507, // n0x19b2 c0x0000 (---------------) + I pomorze + 0x0026a886, // n0x19b3 c0x0000 (---------------) + I powiat + 0x000ca606, // n0x19b4 c0x0000 (---------------) + poznan + 0x002cba44, // n0x19b5 c0x0000 (---------------) + I priv + 0x002cbbca, // n0x19b6 c0x0000 (---------------) + I prochowice + 0x002cd2c8, // n0x19b7 c0x0000 (---------------) + I pruszkow + 0x002cdec9, // n0x19b8 c0x0000 (---------------) + I przeworsk + 0x00281bc6, // n0x19b9 c0x0000 (---------------) + I pulawy + 0x002e8285, // n0x19ba c0x0000 (---------------) + I radom + 0x0020b848, // n0x19bb c0x0000 (---------------) + I rawa-maz + 0x002b174a, // n0x19bc c0x0000 (---------------) + I realestate + 0x00241283, // n0x19bd c0x0000 (---------------) + I rel + 0x00226746, // n0x19be c0x0000 (---------------) + I rybnik + 0x002c9607, // n0x19bf c0x0000 (---------------) + I rzeszow + 0x0020b505, // n0x19c0 c0x0000 (---------------) + I sanok + 0x00375a45, // n0x19c1 c0x0000 (---------------) + I sejny + 0x0029acc3, // n0x19c2 c0x0000 (---------------) + I sex + 0x0029b184, // n0x19c3 c0x0000 (---------------) + I shop + 0x0021cd45, // n0x19c4 c0x0000 (---------------) + I sklep + 0x00270687, // n0x19c5 c0x0000 (---------------) + I skoczow + 0x00212585, // n0x19c6 c0x0000 (---------------) + I slask + 0x002c1646, // n0x19c7 c0x0000 (---------------) + I slupsk + 0x000ddd85, // n0x19c8 c0x0000 (---------------) + sopot + 0x0021c283, // n0x19c9 c0x0000 (---------------) + I sos + 0x0021c289, // n0x19ca c0x0000 (---------------) + I sosnowiec + 0x00263ecc, // n0x19cb c0x0000 (---------------) + I stalowa-wola + 0x0029720c, // n0x19cc c0x0000 (---------------) + I starachowice + 0x002b8648, // n0x19cd c0x0000 (---------------) + I stargard + 0x00325847, // n0x19ce c0x0000 (---------------) + I suwalki + 0x002d4d08, // n0x19cf c0x0000 (---------------) + I swidnica + 0x002d590a, // n0x19d0 c0x0000 (---------------) + I swiebodzin + 0x002d608b, // n0x19d1 c0x0000 (---------------) + I swinoujscie + 0x003433c8, // n0x19d2 c0x0000 (---------------) + I szczecin + 0x002bee48, // n0x19d3 c0x0000 (---------------) + I szczytno + 0x00207586, // n0x19d4 c0x0000 (---------------) + I szkola + 0x0030a185, // n0x19d5 c0x0000 (---------------) + I targi + 0x0030ebca, // n0x19d6 c0x0000 (---------------) + I tarnobrzeg + 0x0021e345, // n0x19d7 c0x0000 (---------------) + I tgory + 0x00208902, // n0x19d8 c0x0000 (---------------) + I tm + 0x002ae287, // n0x19d9 c0x0000 (---------------) + I tourism + 0x0027f186, // n0x19da c0x0000 (---------------) + I travel + 0x0033de05, // n0x19db c0x0000 (---------------) + I turek + 0x002d8689, // n0x19dc c0x0000 (---------------) + I turystyka + 0x0036b4c5, // n0x19dd c0x0000 (---------------) + I tychy + 0x0027af05, // n0x19de c0x0000 (---------------) + I ustka + 0x0036c6c9, // n0x19df c0x0000 (---------------) + I walbrzych + 0x00232206, // n0x19e0 c0x0000 (---------------) + I warmia + 0x00250f08, // n0x19e1 c0x0000 (---------------) + I warszawa + 0x002c7183, // n0x19e2 c0x0000 (---------------) + I waw + 0x00391186, // n0x19e3 c0x0000 (---------------) + I wegrow + 0x00263586, // n0x19e4 c0x0000 (---------------) + I wielun + 0x002e2d45, // n0x19e5 c0x0000 (---------------) + I wlocl + 0x002e2d49, // n0x19e6 c0x0000 (---------------) + I wloclawek + 0x0030b189, // n0x19e7 c0x0000 (---------------) + I wodzislaw + 0x002609c7, // n0x19e8 c0x0000 (---------------) + I wolomin + 0x000e2bc4, // n0x19e9 c0x0000 (---------------) + wroc + 0x002e2bc7, // n0x19ea c0x0000 (---------------) + I wroclaw + 0x00207909, // n0x19eb c0x0000 (---------------) + I zachpomor + 0x00216245, // n0x19ec c0x0000 (---------------) + I zagan + 0x0002bf08, // n0x19ed c0x0000 (---------------) + zakopane + 0x00310d85, // n0x19ee c0x0000 (---------------) + I zarow + 0x00217185, // n0x19ef c0x0000 (---------------) + I zgora + 0x0021e549, // n0x19f0 c0x0000 (---------------) + I zgorzelec + 0x002105c2, // n0x19f1 c0x0000 (---------------) + I ap + 0x00253e04, // n0x19f2 c0x0000 (---------------) + I griw + 0x00200b42, // n0x19f3 c0x0000 (---------------) + I ic + 0x00202b42, // n0x19f4 c0x0000 (---------------) + I is + 0x00268d85, // n0x19f5 c0x0000 (---------------) + I kmpsp + 0x002c1788, // n0x19f6 c0x0000 (---------------) + I konsulat + 0x00379185, // n0x19f7 c0x0000 (---------------) + I kppsp + 0x002ac643, // n0x19f8 c0x0000 (---------------) + I kwp + 0x002ac645, // n0x19f9 c0x0000 (---------------) + I kwpsp + 0x002bb7c3, // n0x19fa c0x0000 (---------------) + I mup + 0x0020a142, // n0x19fb c0x0000 (---------------) + I mw + 0x002d3fc4, // n0x19fc c0x0000 (---------------) + I oirm + 0x002f3c43, // n0x19fd c0x0000 (---------------) + I oum + 0x002052c2, // n0x19fe c0x0000 (---------------) + I pa + 0x0036be84, // n0x19ff c0x0000 (---------------) + I pinb + 0x002c3b43, // n0x1a00 c0x0000 (---------------) + I piw + 0x00203f02, // n0x1a01 c0x0000 (---------------) + I po + 0x002369c3, // n0x1a02 c0x0000 (---------------) + I psp + 0x0027ac04, // n0x1a03 c0x0000 (---------------) + I psse + 0x002a32c3, // n0x1a04 c0x0000 (---------------) + I pup + 0x00370ac4, // n0x1a05 c0x0000 (---------------) + I rzgw + 0x00201a02, // n0x1a06 c0x0000 (---------------) + I sa + 0x0025ebc3, // n0x1a07 c0x0000 (---------------) + I sdn + 0x002206c3, // n0x1a08 c0x0000 (---------------) + I sko + 0x00201102, // n0x1a09 c0x0000 (---------------) + I so + 0x002ceec2, // n0x1a0a c0x0000 (---------------) + I sr + 0x0030afc9, // n0x1a0b c0x0000 (---------------) + I starostwo + 0x00205082, // n0x1a0c c0x0000 (---------------) + I ug + 0x0031c4c4, // n0x1a0d c0x0000 (---------------) + I ugim + 0x00209802, // n0x1a0e c0x0000 (---------------) + I um + 0x0020b6c4, // n0x1a0f c0x0000 (---------------) + I umig + 0x0026a844, // n0x1a10 c0x0000 (---------------) + I upow + 0x00243ec4, // n0x1a11 c0x0000 (---------------) + I uppo + 0x00209f42, // n0x1a12 c0x0000 (---------------) + I us + 0x0022f3c2, // n0x1a13 c0x0000 (---------------) + I uw + 0x0020c583, // n0x1a14 c0x0000 (---------------) + I uzs + 0x00345983, // n0x1a15 c0x0000 (---------------) + I wif + 0x002310c4, // n0x1a16 c0x0000 (---------------) + I wiih + 0x00270804, // n0x1a17 c0x0000 (---------------) + I winb + 0x002b8984, // n0x1a18 c0x0000 (---------------) + I wios + 0x002c9784, // n0x1a19 c0x0000 (---------------) + I witd + 0x0030b383, // n0x1a1a c0x0000 (---------------) + I wiw + 0x002725c3, // n0x1a1b c0x0000 (---------------) + I wsa + 0x0031be44, // n0x1a1c c0x0000 (---------------) + I wskr + 0x002e3d44, // n0x1a1d c0x0000 (---------------) + I wuoz + 0x002e4546, // n0x1a1e c0x0000 (---------------) + I wzmiuw + 0x002c2042, // n0x1a1f c0x0000 (---------------) + I zp + 0x00200742, // n0x1a20 c0x0000 (---------------) + I co + 0x002d75c3, // n0x1a21 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1a22 c0x0000 (---------------) + I gov + 0x002170c3, // n0x1a23 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1a24 c0x0000 (---------------) + I org + 0x00201e82, // n0x1a25 c0x0000 (---------------) + I ac + 0x00310603, // n0x1a26 c0x0000 (---------------) + I biz + 0x00222ac3, // n0x1a27 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1a28 c0x0000 (---------------) + I edu + 0x00208883, // n0x1a29 c0x0000 (---------------) + I est + 0x0021e283, // n0x1a2a c0x0000 (---------------) + I gov + 0x00200304, // n0x1a2b c0x0000 (---------------) + I info + 0x0030b284, // n0x1a2c c0x0000 (---------------) + I isla + 0x00298944, // n0x1a2d c0x0000 (---------------) + I name + 0x002170c3, // n0x1a2e c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1a2f c0x0000 (---------------) + I org + 0x00218243, // n0x1a30 c0x0000 (---------------) + I pro + 0x002cc284, // n0x1a31 c0x0000 (---------------) + I prof + 0x002f2783, // n0x1a32 c0x0000 (---------------) + I aca + 0x0020c103, // n0x1a33 c0x0000 (---------------) + I bar + 0x0025b403, // n0x1a34 c0x0000 (---------------) + I cpa + 0x002674c3, // n0x1a35 c0x0000 (---------------) + I eng + 0x0029d803, // n0x1a36 c0x0000 (---------------) + I jur + 0x00253883, // n0x1a37 c0x0000 (---------------) + I law + 0x0020b403, // n0x1a38 c0x0000 (---------------) + I med + 0x00222ac3, // n0x1a39 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1a3a c0x0000 (---------------) + I edu + 0x0021e283, // n0x1a3b c0x0000 (---------------) + I gov + 0x002170c3, // n0x1a3c c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1a3d c0x0000 (---------------) + I org + 0x002c6b43, // n0x1a3e c0x0000 (---------------) + I plo + 0x00223f83, // n0x1a3f c0x0000 (---------------) + I sec + 0x000e4188, // n0x1a40 c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x1a41 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1a42 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1a43 c0x0000 (---------------) + I gov + 0x00238c03, // n0x1a44 c0x0000 (---------------) + I int + 0x002170c3, // n0x1a45 c0x0000 (---------------) + I net + 0x0022cb84, // n0x1a46 c0x0000 (---------------) + I nome + 0x0021dcc3, // n0x1a47 c0x0000 (---------------) + I org + 0x00296544, // n0x1a48 c0x0000 (---------------) + I publ + 0x00251645, // n0x1a49 c0x0000 (---------------) + I belau + 0x00200742, // n0x1a4a c0x0000 (---------------) + I co + 0x00203fc2, // n0x1a4b c0x0000 (---------------) + I ed + 0x00202342, // n0x1a4c c0x0000 (---------------) + I go + 0x00201082, // n0x1a4d c0x0000 (---------------) + I ne + 0x00200c42, // n0x1a4e c0x0000 (---------------) + I or + 0x00222ac3, // n0x1a4f c0x0000 (---------------) + I com + 0x00228d44, // n0x1a50 c0x0000 (---------------) + I coop + 0x002d75c3, // n0x1a51 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1a52 c0x0000 (---------------) + I gov + 0x0023fa03, // n0x1a53 c0x0000 (---------------) + I mil + 0x002170c3, // n0x1a54 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1a55 c0x0000 (---------------) + I org + 0x000e4188, // n0x1a56 c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x1a57 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1a58 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1a59 c0x0000 (---------------) + I gov + 0x0023fa03, // n0x1a5a c0x0000 (---------------) + I mil + 0x00298944, // n0x1a5b c0x0000 (---------------) + I name + 0x002170c3, // n0x1a5c c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1a5d c0x0000 (---------------) + I org + 0x00206103, // n0x1a5e c0x0000 (---------------) + I sch + 0x002729c4, // n0x1a5f c0x0000 (---------------) + I asso + 0x000e4188, // n0x1a60 c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x1a61 c0x0000 (---------------) + I com + 0x00207cc3, // n0x1a62 c0x0000 (---------------) + I nom + 0x00246584, // n0x1a63 c0x0000 (---------------) + I arts + 0x000e4188, // n0x1a64 c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x1a65 c0x0000 (---------------) + I com + 0x00238544, // n0x1a66 c0x0000 (---------------) + I firm + 0x00200304, // n0x1a67 c0x0000 (---------------) + I info + 0x00207cc3, // n0x1a68 c0x0000 (---------------) + I nom + 0x00200e02, // n0x1a69 c0x0000 (---------------) + I nt + 0x0021dcc3, // n0x1a6a c0x0000 (---------------) + I org + 0x002e6343, // n0x1a6b c0x0000 (---------------) + I rec + 0x002cf4c5, // n0x1a6c c0x0000 (---------------) + I store + 0x00208902, // n0x1a6d c0x0000 (---------------) + I tm + 0x002e3e83, // n0x1a6e c0x0000 (---------------) + I www + 0x00201e82, // n0x1a6f c0x0000 (---------------) + I ac + 0x000e4188, // n0x1a70 c0x0000 (---------------) + blogspot + 0x00200742, // n0x1a71 c0x0000 (---------------) + I co + 0x002d75c3, // n0x1a72 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1a73 c0x0000 (---------------) + I gov + 0x00200242, // n0x1a74 c0x0000 (---------------) + I in + 0x0021dcc3, // n0x1a75 c0x0000 (---------------) + I org + 0x00201e82, // n0x1a76 c0x0000 (---------------) + I ac + 0x00382c47, // n0x1a77 c0x0000 (---------------) + I adygeya + 0x0028ee05, // n0x1a78 c0x0000 (---------------) + I altai + 0x00248504, // n0x1a79 c0x0000 (---------------) + I amur + 0x0036ab06, // n0x1a7a c0x0000 (---------------) + I amursk + 0x002f314b, // n0x1a7b c0x0000 (---------------) + I arkhangelsk + 0x002e9b49, // n0x1a7c c0x0000 (---------------) + I astrakhan + 0x002bec86, // n0x1a7d c0x0000 (---------------) + I baikal + 0x00309449, // n0x1a7e c0x0000 (---------------) + I bashkiria + 0x002c1a08, // n0x1a7f c0x0000 (---------------) + I belgorod + 0x00200503, // n0x1a80 c0x0000 (---------------) + I bir + 0x000e4188, // n0x1a81 c0x0000 (---------------) + blogspot + 0x0021cc07, // n0x1a82 c0x0000 (---------------) + I bryansk + 0x0021bc88, // n0x1a83 c0x0000 (---------------) + I buryatia + 0x00355dc3, // n0x1a84 c0x0000 (---------------) + I cbg + 0x002503c4, // n0x1a85 c0x0000 (---------------) + I chel + 0x002539cb, // n0x1a86 c0x0000 (---------------) + I chelyabinsk + 0x002932c5, // n0x1a87 c0x0000 (---------------) + I chita + 0x002ab708, // n0x1a88 c0x0000 (---------------) + I chukotka + 0x00313689, // n0x1a89 c0x0000 (---------------) + I chuvashia + 0x00249083, // n0x1a8a c0x0000 (---------------) + I cmw + 0x00222ac3, // n0x1a8b c0x0000 (---------------) + I com + 0x00309888, // n0x1a8c c0x0000 (---------------) + I dagestan + 0x002d3887, // n0x1a8d c0x0000 (---------------) + I dudinka + 0x00311a46, // n0x1a8e c0x0000 (---------------) + I e-burg + 0x002d75c3, // n0x1a8f c0x0000 (---------------) + I edu + 0x0032aa47, // n0x1a90 c0x0000 (---------------) + I fareast + 0x0021e283, // n0x1a91 c0x0000 (---------------) + I gov + 0x003789c6, // n0x1a92 c0x0000 (---------------) + I grozny + 0x00238c03, // n0x1a93 c0x0000 (---------------) + I int + 0x00220587, // n0x1a94 c0x0000 (---------------) + I irkutsk + 0x002f2a47, // n0x1a95 c0x0000 (---------------) + I ivanovo + 0x00379007, // n0x1a96 c0x0000 (---------------) + I izhevsk + 0x00321805, // n0x1a97 c0x0000 (---------------) + I jamal + 0x00202683, // n0x1a98 c0x0000 (---------------) + I jar + 0x002a248b, // n0x1a99 c0x0000 (---------------) + I joshkar-ola + 0x00312348, // n0x1a9a c0x0000 (---------------) + I k-uralsk + 0x002201c8, // n0x1a9b c0x0000 (---------------) + I kalmykia + 0x00305446, // n0x1a9c c0x0000 (---------------) + I kaluga + 0x0023a649, // n0x1a9d c0x0000 (---------------) + I kamchatka + 0x00365ac7, // n0x1a9e c0x0000 (---------------) + I karelia + 0x002dec85, // n0x1a9f c0x0000 (---------------) + I kazan + 0x0022d2c4, // n0x1aa0 c0x0000 (---------------) + I kchr + 0x00271548, // n0x1aa1 c0x0000 (---------------) + I kemerovo + 0x00235f8a, // n0x1aa2 c0x0000 (---------------) + I khabarovsk + 0x002361c9, // n0x1aa3 c0x0000 (---------------) + I khakassia + 0x0025be03, // n0x1aa4 c0x0000 (---------------) + I khv + 0x00254bc5, // n0x1aa5 c0x0000 (---------------) + I kirov + 0x00325683, // n0x1aa6 c0x0000 (---------------) + I kms + 0x002b9586, // n0x1aa7 c0x0000 (---------------) + I koenig + 0x003935c4, // n0x1aa8 c0x0000 (---------------) + I komi + 0x002e2f48, // n0x1aa9 c0x0000 (---------------) + I kostroma + 0x0039334b, // n0x1aaa c0x0000 (---------------) + I krasnoyarsk + 0x00331845, // n0x1aab c0x0000 (---------------) + I kuban + 0x002a4ac6, // n0x1aac c0x0000 (---------------) + I kurgan + 0x002a8e85, // n0x1aad c0x0000 (---------------) + I kursk + 0x002a9608, // n0x1aae c0x0000 (---------------) + I kustanai + 0x002aa1c7, // n0x1aaf c0x0000 (---------------) + I kuzbass + 0x003414c7, // n0x1ab0 c0x0000 (---------------) + I lipetsk + 0x0032ddc7, // n0x1ab1 c0x0000 (---------------) + I magadan + 0x00271948, // n0x1ab2 c0x0000 (---------------) + I magnitka + 0x00216504, // n0x1ab3 c0x0000 (---------------) + I mari + 0x00216507, // n0x1ab4 c0x0000 (---------------) + I mari-el + 0x003529c6, // n0x1ab5 c0x0000 (---------------) + I marine + 0x0023fa03, // n0x1ab6 c0x0000 (---------------) + I mil + 0x002b4208, // n0x1ab7 c0x0000 (---------------) + I mordovia + 0x00233f43, // n0x1ab8 c0x0000 (---------------) + I msk + 0x002bbd88, // n0x1ab9 c0x0000 (---------------) + I murmansk + 0x002bf205, // n0x1aba c0x0000 (---------------) + I mytis + 0x00332688, // n0x1abb c0x0000 (---------------) + I nakhodka + 0x00375747, // n0x1abc c0x0000 (---------------) + I nalchik + 0x002170c3, // n0x1abd c0x0000 (---------------) + I net + 0x002effc3, // n0x1abe c0x0000 (---------------) + I nkz + 0x00278ec4, // n0x1abf c0x0000 (---------------) + I nnov + 0x00228f07, // n0x1ac0 c0x0000 (---------------) + I norilsk + 0x00201343, // n0x1ac1 c0x0000 (---------------) + I nov + 0x002f2b0b, // n0x1ac2 c0x0000 (---------------) + I novosibirsk + 0x00217803, // n0x1ac3 c0x0000 (---------------) + I nsk + 0x00233f04, // n0x1ac4 c0x0000 (---------------) + I omsk + 0x002cf548, // n0x1ac5 c0x0000 (---------------) + I orenburg + 0x0021dcc3, // n0x1ac6 c0x0000 (---------------) + I org + 0x002c98c5, // n0x1ac7 c0x0000 (---------------) + I oryol + 0x002e3305, // n0x1ac8 c0x0000 (---------------) + I oskol + 0x00332586, // n0x1ac9 c0x0000 (---------------) + I palana + 0x00395385, // n0x1aca c0x0000 (---------------) + I penza + 0x002bf5c4, // n0x1acb c0x0000 (---------------) + I perm + 0x00207742, // n0x1acc c0x0000 (---------------) + I pp + 0x002ce183, // n0x1acd c0x0000 (---------------) + I ptz + 0x002be20a, // n0x1ace c0x0000 (---------------) + I pyatigorsk + 0x002ef043, // n0x1acf c0x0000 (---------------) + I rnd + 0x0030c189, // n0x1ad0 c0x0000 (---------------) + I rubtsovsk + 0x003227c6, // n0x1ad1 c0x0000 (---------------) + I ryazan + 0x00217648, // n0x1ad2 c0x0000 (---------------) + I sakhalin + 0x0027a746, // n0x1ad3 c0x0000 (---------------) + I samara + 0x00218487, // n0x1ad4 c0x0000 (---------------) + I saratov + 0x002b7c08, // n0x1ad5 c0x0000 (---------------) + I simbirsk + 0x002ae3c8, // n0x1ad6 c0x0000 (---------------) + I smolensk + 0x002c5603, // n0x1ad7 c0x0000 (---------------) + I snz + 0x00268e43, // n0x1ad8 c0x0000 (---------------) + I spb + 0x002187c9, // n0x1ad9 c0x0000 (---------------) + I stavropol + 0x002daf83, // n0x1ada c0x0000 (---------------) + I stv + 0x002d0c06, // n0x1adb c0x0000 (---------------) + I surgut + 0x0028ea86, // n0x1adc c0x0000 (---------------) + I syzran + 0x002fe106, // n0x1add c0x0000 (---------------) + I tambov + 0x0031df09, // n0x1ade c0x0000 (---------------) + I tatarstan + 0x002e3fc4, // n0x1adf c0x0000 (---------------) + I test + 0x0020fe03, // n0x1ae0 c0x0000 (---------------) + I tom + 0x00233ec5, // n0x1ae1 c0x0000 (---------------) + I tomsk + 0x0038c2c9, // n0x1ae2 c0x0000 (---------------) + I tsaritsyn + 0x00220683, // n0x1ae3 c0x0000 (---------------) + I tsk + 0x002d78c4, // n0x1ae4 c0x0000 (---------------) + I tula + 0x002d9504, // n0x1ae5 c0x0000 (---------------) + I tuva + 0x00248304, // n0x1ae6 c0x0000 (---------------) + I tver + 0x002a7d06, // n0x1ae7 c0x0000 (---------------) + I tyumen + 0x00309dc3, // n0x1ae8 c0x0000 (---------------) + I udm + 0x00309dc8, // n0x1ae9 c0x0000 (---------------) + I udmurtia + 0x00245a08, // n0x1aea c0x0000 (---------------) + I ulan-ude + 0x002fe246, // n0x1aeb c0x0000 (---------------) + I vdonsk + 0x002dea8b, // n0x1aec c0x0000 (---------------) + I vladikavkaz + 0x002dedc8, // n0x1aed c0x0000 (---------------) + I vladimir + 0x002defcb, // n0x1aee c0x0000 (---------------) + I vladivostok + 0x002df749, // n0x1aef c0x0000 (---------------) + I volgograd + 0x002e1147, // n0x1af0 c0x0000 (---------------) + I vologda + 0x002e1e48, // n0x1af1 c0x0000 (---------------) + I voronezh + 0x002e2b03, // n0x1af2 c0x0000 (---------------) + I vrn + 0x0037fec6, // n0x1af3 c0x0000 (---------------) + I vyatka + 0x0020d4c7, // n0x1af4 c0x0000 (---------------) + I yakutia + 0x00284405, // n0x1af5 c0x0000 (---------------) + I yamal + 0x00332d09, // n0x1af6 c0x0000 (---------------) + I yaroslavl + 0x0036e1cd, // n0x1af7 c0x0000 (---------------) + I yekaterinburg + 0x00217491, // n0x1af8 c0x0000 (---------------) + I yuzhno-sakhalinsk + 0x0029adc5, // n0x1af9 c0x0000 (---------------) + I zgrad + 0x00201e82, // n0x1afa c0x0000 (---------------) + I ac + 0x00200742, // n0x1afb c0x0000 (---------------) + I co + 0x00222ac3, // n0x1afc c0x0000 (---------------) + I com + 0x002d75c3, // n0x1afd c0x0000 (---------------) + I edu + 0x003579c4, // n0x1afe c0x0000 (---------------) + I gouv + 0x0021e283, // n0x1aff c0x0000 (---------------) + I gov + 0x00238c03, // n0x1b00 c0x0000 (---------------) + I int + 0x0023fa03, // n0x1b01 c0x0000 (---------------) + I mil + 0x002170c3, // n0x1b02 c0x0000 (---------------) + I net + 0x00222ac3, // n0x1b03 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1b04 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1b05 c0x0000 (---------------) + I gov + 0x0020b403, // n0x1b06 c0x0000 (---------------) + I med + 0x002170c3, // n0x1b07 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1b08 c0x0000 (---------------) + I org + 0x00296543, // n0x1b09 c0x0000 (---------------) + I pub + 0x00206103, // n0x1b0a c0x0000 (---------------) + I sch + 0x00222ac3, // n0x1b0b c0x0000 (---------------) + I com + 0x002d75c3, // n0x1b0c c0x0000 (---------------) + I edu + 0x0021e283, // n0x1b0d c0x0000 (---------------) + I gov + 0x002170c3, // n0x1b0e c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1b0f c0x0000 (---------------) + I org + 0x00222ac3, // n0x1b10 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1b11 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1b12 c0x0000 (---------------) + I gov + 0x002170c3, // n0x1b13 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1b14 c0x0000 (---------------) + I org + 0x00222ac3, // n0x1b15 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1b16 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1b17 c0x0000 (---------------) + I gov + 0x00200304, // n0x1b18 c0x0000 (---------------) + I info + 0x0020b403, // n0x1b19 c0x0000 (---------------) + I med + 0x002170c3, // n0x1b1a c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1b1b c0x0000 (---------------) + I org + 0x0020bf42, // n0x1b1c c0x0000 (---------------) + I tv + 0x00200181, // n0x1b1d c0x0000 (---------------) + I a + 0x00201e82, // n0x1b1e c0x0000 (---------------) + I ac + 0x00200001, // n0x1b1f c0x0000 (---------------) + I b + 0x002fbc02, // n0x1b20 c0x0000 (---------------) + I bd + 0x000e4188, // n0x1b21 c0x0000 (---------------) + blogspot + 0x00213b05, // n0x1b22 c0x0000 (---------------) + I brand + 0x00200741, // n0x1b23 c0x0000 (---------------) + I c + 0x00022ac3, // n0x1b24 c0x0000 (---------------) + com + 0x002005c1, // n0x1b25 c0x0000 (---------------) + I d + 0x00200701, // n0x1b26 c0x0000 (---------------) + I e + 0x00200381, // n0x1b27 c0x0000 (---------------) + I f + 0x00235ec2, // n0x1b28 c0x0000 (---------------) + I fh + 0x00235ec4, // n0x1b29 c0x0000 (---------------) + I fhsk + 0x00351283, // n0x1b2a c0x0000 (---------------) + I fhv + 0x00200401, // n0x1b2b c0x0000 (---------------) + I g + 0x00200201, // n0x1b2c c0x0000 (---------------) + I h + 0x00200041, // n0x1b2d c0x0000 (---------------) + I i + 0x00201001, // n0x1b2e c0x0000 (---------------) + I k + 0x002c8587, // n0x1b2f c0x0000 (---------------) + I komforb + 0x002a8f8f, // n0x1b30 c0x0000 (---------------) + I kommunalforbund + 0x002b1a86, // n0x1b31 c0x0000 (---------------) + I komvux + 0x002008c1, // n0x1b32 c0x0000 (---------------) + I l + 0x00323b46, // n0x1b33 c0x0000 (---------------) + I lanbib + 0x002000c1, // n0x1b34 c0x0000 (---------------) + I m + 0x00200281, // n0x1b35 c0x0000 (---------------) + I n + 0x002f460e, // n0x1b36 c0x0000 (---------------) + I naturbruksgymn + 0x00200081, // n0x1b37 c0x0000 (---------------) + I o + 0x0021dcc3, // n0x1b38 c0x0000 (---------------) + I org + 0x00200b01, // n0x1b39 c0x0000 (---------------) + I p + 0x00290b05, // n0x1b3a c0x0000 (---------------) + I parti + 0x00207742, // n0x1b3b c0x0000 (---------------) + I pp + 0x0029abc5, // n0x1b3c c0x0000 (---------------) + I press + 0x00200581, // n0x1b3d c0x0000 (---------------) + I r + 0x002001c1, // n0x1b3e c0x0000 (---------------) + I s + 0x00200141, // n0x1b3f c0x0000 (---------------) + I t + 0x00208902, // n0x1b40 c0x0000 (---------------) + I tm + 0x00200101, // n0x1b41 c0x0000 (---------------) + I u + 0x00202541, // n0x1b42 c0x0000 (---------------) + I w + 0x00203ec1, // n0x1b43 c0x0000 (---------------) + I x + 0x00200801, // n0x1b44 c0x0000 (---------------) + I y + 0x00201901, // n0x1b45 c0x0000 (---------------) + I z + 0x000e4188, // n0x1b46 c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x1b47 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1b48 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1b49 c0x0000 (---------------) + I gov + 0x002170c3, // n0x1b4a c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1b4b c0x0000 (---------------) + I org + 0x00214943, // n0x1b4c c0x0000 (---------------) + I per + 0x00222ac3, // n0x1b4d c0x0000 (---------------) + I com + 0x0021e283, // n0x1b4e c0x0000 (---------------) + I gov + 0x0023fa03, // n0x1b4f c0x0000 (---------------) + I mil + 0x002170c3, // n0x1b50 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1b51 c0x0000 (---------------) + I org + 0x014c56c8, // n0x1b52 c0x0005 (---------------)* o platform + 0x000e4188, // n0x1b53 c0x0000 (---------------) + blogspot + 0x000e4188, // n0x1b54 c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x1b55 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1b56 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1b57 c0x0000 (---------------) + I gov + 0x002170c3, // n0x1b58 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1b59 c0x0000 (---------------) + I org + 0x00200603, // n0x1b5a c0x0000 (---------------) + I art + 0x000e4188, // n0x1b5b c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x1b5c c0x0000 (---------------) + I com + 0x002d75c3, // n0x1b5d c0x0000 (---------------) + I edu + 0x003579c4, // n0x1b5e c0x0000 (---------------) + I gouv + 0x0021dcc3, // n0x1b5f c0x0000 (---------------) + I org + 0x0028acc5, // n0x1b60 c0x0000 (---------------) + I perso + 0x0029f044, // n0x1b61 c0x0000 (---------------) + I univ + 0x00222ac3, // n0x1b62 c0x0000 (---------------) + I com + 0x002170c3, // n0x1b63 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1b64 c0x0000 (---------------) + I org + 0x00200742, // n0x1b65 c0x0000 (---------------) + I co + 0x00222ac3, // n0x1b66 c0x0000 (---------------) + I com + 0x00224f09, // n0x1b67 c0x0000 (---------------) + I consulado + 0x002d75c3, // n0x1b68 c0x0000 (---------------) + I edu + 0x00271109, // n0x1b69 c0x0000 (---------------) + I embaixada + 0x0021e283, // n0x1b6a c0x0000 (---------------) + I gov + 0x0023fa03, // n0x1b6b c0x0000 (---------------) + I mil + 0x002170c3, // n0x1b6c c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1b6d c0x0000 (---------------) + I org + 0x002cb848, // n0x1b6e c0x0000 (---------------) + I principe + 0x0020fd47, // n0x1b6f c0x0000 (---------------) + I saotome + 0x002cf4c5, // n0x1b70 c0x0000 (---------------) + I store + 0x00382c47, // n0x1b71 c0x0000 (---------------) + I adygeya + 0x002f314b, // n0x1b72 c0x0000 (---------------) + I arkhangelsk + 0x00389cc8, // n0x1b73 c0x0000 (---------------) + I balashov + 0x00309449, // n0x1b74 c0x0000 (---------------) + I bashkiria + 0x0021cc07, // n0x1b75 c0x0000 (---------------) + I bryansk + 0x00309888, // n0x1b76 c0x0000 (---------------) + I dagestan + 0x003789c6, // n0x1b77 c0x0000 (---------------) + I grozny + 0x002f2a47, // n0x1b78 c0x0000 (---------------) + I ivanovo + 0x002201c8, // n0x1b79 c0x0000 (---------------) + I kalmykia + 0x00305446, // n0x1b7a c0x0000 (---------------) + I kaluga + 0x00365ac7, // n0x1b7b c0x0000 (---------------) + I karelia + 0x002361c9, // n0x1b7c c0x0000 (---------------) + I khakassia + 0x00384e89, // n0x1b7d c0x0000 (---------------) + I krasnodar + 0x002a4ac6, // n0x1b7e c0x0000 (---------------) + I kurgan + 0x00275dc5, // n0x1b7f c0x0000 (---------------) + I lenug + 0x002b4208, // n0x1b80 c0x0000 (---------------) + I mordovia + 0x00233f43, // n0x1b81 c0x0000 (---------------) + I msk + 0x002bbd88, // n0x1b82 c0x0000 (---------------) + I murmansk + 0x00375747, // n0x1b83 c0x0000 (---------------) + I nalchik + 0x00201343, // n0x1b84 c0x0000 (---------------) + I nov + 0x0032a707, // n0x1b85 c0x0000 (---------------) + I obninsk + 0x00395385, // n0x1b86 c0x0000 (---------------) + I penza + 0x002c8208, // n0x1b87 c0x0000 (---------------) + I pokrovsk + 0x00262b45, // n0x1b88 c0x0000 (---------------) + I sochi + 0x00268e43, // n0x1b89 c0x0000 (---------------) + I spb + 0x003547c9, // n0x1b8a c0x0000 (---------------) + I togliatti + 0x00295587, // n0x1b8b c0x0000 (---------------) + I troitsk + 0x002d78c4, // n0x1b8c c0x0000 (---------------) + I tula + 0x002d9504, // n0x1b8d c0x0000 (---------------) + I tuva + 0x002dea8b, // n0x1b8e c0x0000 (---------------) + I vladikavkaz + 0x002dedc8, // n0x1b8f c0x0000 (---------------) + I vladimir + 0x002e1147, // n0x1b90 c0x0000 (---------------) + I vologda + 0x00222ac3, // n0x1b91 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1b92 c0x0000 (---------------) + I edu + 0x0034eb03, // n0x1b93 c0x0000 (---------------) + I gob + 0x0021dcc3, // n0x1b94 c0x0000 (---------------) + I org + 0x00230683, // n0x1b95 c0x0000 (---------------) + I red + 0x0021e283, // n0x1b96 c0x0000 (---------------) + I gov + 0x00222ac3, // n0x1b97 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1b98 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1b99 c0x0000 (---------------) + I gov + 0x0023fa03, // n0x1b9a c0x0000 (---------------) + I mil + 0x002170c3, // n0x1b9b c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1b9c c0x0000 (---------------) + I org + 0x00201e82, // n0x1b9d c0x0000 (---------------) + I ac + 0x00200742, // n0x1b9e c0x0000 (---------------) + I co + 0x0021dcc3, // n0x1b9f c0x0000 (---------------) + I org + 0x000e4188, // n0x1ba0 c0x0000 (---------------) + blogspot + 0x00201e82, // n0x1ba1 c0x0000 (---------------) + I ac + 0x00200742, // n0x1ba2 c0x0000 (---------------) + I co + 0x00202342, // n0x1ba3 c0x0000 (---------------) + I go + 0x00200242, // n0x1ba4 c0x0000 (---------------) + I in + 0x00204802, // n0x1ba5 c0x0000 (---------------) + I mi + 0x002170c3, // n0x1ba6 c0x0000 (---------------) + I net + 0x00200c42, // n0x1ba7 c0x0000 (---------------) + I or + 0x00201e82, // n0x1ba8 c0x0000 (---------------) + I ac + 0x00310603, // n0x1ba9 c0x0000 (---------------) + I biz + 0x00200742, // n0x1baa c0x0000 (---------------) + I co + 0x00222ac3, // n0x1bab c0x0000 (---------------) + I com + 0x002d75c3, // n0x1bac c0x0000 (---------------) + I edu + 0x00202342, // n0x1bad c0x0000 (---------------) + I go + 0x0021e283, // n0x1bae c0x0000 (---------------) + I gov + 0x00238c03, // n0x1baf c0x0000 (---------------) + I int + 0x0023fa03, // n0x1bb0 c0x0000 (---------------) + I mil + 0x00298944, // n0x1bb1 c0x0000 (---------------) + I name + 0x002170c3, // n0x1bb2 c0x0000 (---------------) + I net + 0x0020e3c3, // n0x1bb3 c0x0000 (---------------) + I nic + 0x0021dcc3, // n0x1bb4 c0x0000 (---------------) + I org + 0x002e3fc4, // n0x1bb5 c0x0000 (---------------) + I test + 0x00219fc3, // n0x1bb6 c0x0000 (---------------) + I web + 0x0021e283, // n0x1bb7 c0x0000 (---------------) + I gov + 0x00200742, // n0x1bb8 c0x0000 (---------------) + I co + 0x00222ac3, // n0x1bb9 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1bba c0x0000 (---------------) + I edu + 0x0021e283, // n0x1bbb c0x0000 (---------------) + I gov + 0x0023fa03, // n0x1bbc c0x0000 (---------------) + I mil + 0x002170c3, // n0x1bbd c0x0000 (---------------) + I net + 0x00207cc3, // n0x1bbe c0x0000 (---------------) + I nom + 0x0021dcc3, // n0x1bbf c0x0000 (---------------) + I org + 0x002efd87, // n0x1bc0 c0x0000 (---------------) + I agrinet + 0x00222ac3, // n0x1bc1 c0x0000 (---------------) + I com + 0x00245b87, // n0x1bc2 c0x0000 (---------------) + I defense + 0x002d75c6, // n0x1bc3 c0x0000 (---------------) + I edunet + 0x00210e83, // n0x1bc4 c0x0000 (---------------) + I ens + 0x00236403, // n0x1bc5 c0x0000 (---------------) + I fin + 0x0021e283, // n0x1bc6 c0x0000 (---------------) + I gov + 0x00215703, // n0x1bc7 c0x0000 (---------------) + I ind + 0x00200304, // n0x1bc8 c0x0000 (---------------) + I info + 0x002c5104, // n0x1bc9 c0x0000 (---------------) + I intl + 0x002d4086, // n0x1bca c0x0000 (---------------) + I mincom + 0x0023ac03, // n0x1bcb c0x0000 (---------------) + I nat + 0x002170c3, // n0x1bcc c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1bcd c0x0000 (---------------) + I org + 0x0028acc5, // n0x1bce c0x0000 (---------------) + I perso + 0x003538c4, // n0x1bcf c0x0000 (---------------) + I rnrt + 0x002e6c03, // n0x1bd0 c0x0000 (---------------) + I rns + 0x00205b83, // n0x1bd1 c0x0000 (---------------) + I rnu + 0x002ae287, // n0x1bd2 c0x0000 (---------------) + I tourism + 0x0033c485, // n0x1bd3 c0x0000 (---------------) + I turen + 0x00222ac3, // n0x1bd4 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1bd5 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1bd6 c0x0000 (---------------) + I gov + 0x0023fa03, // n0x1bd7 c0x0000 (---------------) + I mil + 0x002170c3, // n0x1bd8 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1bd9 c0x0000 (---------------) + I org + 0x00201482, // n0x1bda c0x0000 (---------------) + I av + 0x002791c3, // n0x1bdb c0x0000 (---------------) + I bbs + 0x00251643, // n0x1bdc c0x0000 (---------------) + I bel + 0x00310603, // n0x1bdd c0x0000 (---------------) + I biz + 0x51622ac3, // n0x1bde c0x0145 (n0x1bef-n0x1bf0) + I com + 0x0022fb02, // n0x1bdf c0x0000 (---------------) + I dr + 0x002d75c3, // n0x1be0 c0x0000 (---------------) + I edu + 0x002012c3, // n0x1be1 c0x0000 (---------------) + I gen + 0x0021e283, // n0x1be2 c0x0000 (---------------) + I gov + 0x00200304, // n0x1be3 c0x0000 (---------------) + I info + 0x00312503, // n0x1be4 c0x0000 (---------------) + I k12 + 0x0023df03, // n0x1be5 c0x0000 (---------------) + I kep + 0x0023fa03, // n0x1be6 c0x0000 (---------------) + I mil + 0x00298944, // n0x1be7 c0x0000 (---------------) + I name + 0x51a1c742, // n0x1be8 c0x0146 (n0x1bf0-n0x1bf1) + I nc + 0x002170c3, // n0x1be9 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1bea c0x0000 (---------------) + I org + 0x00218943, // n0x1beb c0x0000 (---------------) + I pol + 0x0022ba83, // n0x1bec c0x0000 (---------------) + I tel + 0x0020bf42, // n0x1bed c0x0000 (---------------) + I tv + 0x00219fc3, // n0x1bee c0x0000 (---------------) + I web + 0x000e4188, // n0x1bef c0x0000 (---------------) + blogspot + 0x0021e283, // n0x1bf0 c0x0000 (---------------) + I gov + 0x002751c4, // n0x1bf1 c0x0000 (---------------) + I aero + 0x00310603, // n0x1bf2 c0x0000 (---------------) + I biz + 0x00200742, // n0x1bf3 c0x0000 (---------------) + I co + 0x00222ac3, // n0x1bf4 c0x0000 (---------------) + I com + 0x00228d44, // n0x1bf5 c0x0000 (---------------) + I coop + 0x002d75c3, // n0x1bf6 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1bf7 c0x0000 (---------------) + I gov + 0x00200304, // n0x1bf8 c0x0000 (---------------) + I info + 0x00238c03, // n0x1bf9 c0x0000 (---------------) + I int + 0x002a5d04, // n0x1bfa c0x0000 (---------------) + I jobs + 0x00203604, // n0x1bfb c0x0000 (---------------) + I mobi + 0x002bd646, // n0x1bfc c0x0000 (---------------) + I museum + 0x00298944, // n0x1bfd c0x0000 (---------------) + I name + 0x002170c3, // n0x1bfe c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1bff c0x0000 (---------------) + I org + 0x00218243, // n0x1c00 c0x0000 (---------------) + I pro + 0x0027f186, // n0x1c01 c0x0000 (---------------) + I travel + 0x00041e4b, // n0x1c02 c0x0000 (---------------) + better-than + 0x00009ac6, // n0x1c03 c0x0000 (---------------) + dyndns + 0x00019e0a, // n0x1c04 c0x0000 (---------------) + on-the-web + 0x00171e8a, // n0x1c05 c0x0000 (---------------) + worse-than + 0x000e4188, // n0x1c06 c0x0000 (---------------) + blogspot + 0x002752c4, // n0x1c07 c0x0000 (---------------) + I club + 0x00222ac3, // n0x1c08 c0x0000 (---------------) + I com + 0x003105c4, // n0x1c09 c0x0000 (---------------) + I ebiz + 0x002d75c3, // n0x1c0a c0x0000 (---------------) + I edu + 0x00212084, // n0x1c0b c0x0000 (---------------) + I game + 0x0021e283, // n0x1c0c c0x0000 (---------------) + I gov + 0x00300b83, // n0x1c0d c0x0000 (---------------) + I idv + 0x0023fa03, // n0x1c0e c0x0000 (---------------) + I mil + 0x002170c3, // n0x1c0f c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1c10 c0x0000 (---------------) + I org + 0x0030d70b, // n0x1c11 c0x0000 (---------------) + I xn--czrw28b + 0x003867ca, // n0x1c12 c0x0000 (---------------) + I xn--uc0atv + 0x0039604c, // n0x1c13 c0x0000 (---------------) + I xn--zf0ao64a + 0x00201e82, // n0x1c14 c0x0000 (---------------) + I ac + 0x00200742, // n0x1c15 c0x0000 (---------------) + I co + 0x00202342, // n0x1c16 c0x0000 (---------------) + I go + 0x00294945, // n0x1c17 c0x0000 (---------------) + I hotel + 0x00200304, // n0x1c18 c0x0000 (---------------) + I info + 0x00208942, // n0x1c19 c0x0000 (---------------) + I me + 0x0023fa03, // n0x1c1a c0x0000 (---------------) + I mil + 0x00203604, // n0x1c1b c0x0000 (---------------) + I mobi + 0x00201082, // n0x1c1c c0x0000 (---------------) + I ne + 0x00200c42, // n0x1c1d c0x0000 (---------------) + I or + 0x00200982, // n0x1c1e c0x0000 (---------------) + I sc + 0x0020bf42, // n0x1c1f c0x0000 (---------------) + I tv + 0x0029d289, // n0x1c20 c0x0000 (---------------) + I cherkassy + 0x0028e908, // n0x1c21 c0x0000 (---------------) + I cherkasy + 0x00259889, // n0x1c22 c0x0000 (---------------) + I chernigov + 0x0025f489, // n0x1c23 c0x0000 (---------------) + I chernihiv + 0x0026b44a, // n0x1c24 c0x0000 (---------------) + I chernivtsi + 0x00367e8a, // n0x1c25 c0x0000 (---------------) + I chernovtsy + 0x00201782, // n0x1c26 c0x0000 (---------------) + I ck + 0x002211c2, // n0x1c27 c0x0000 (---------------) + I cn + 0x00200742, // n0x1c28 c0x0000 (---------------) + I co + 0x00222ac3, // n0x1c29 c0x0000 (---------------) + I com + 0x0020c502, // n0x1c2a c0x0000 (---------------) + I cr + 0x00231c86, // n0x1c2b c0x0000 (---------------) + I crimea + 0x0033f802, // n0x1c2c c0x0000 (---------------) + I cv + 0x00209b82, // n0x1c2d c0x0000 (---------------) + I dn + 0x0036ad4e, // n0x1c2e c0x0000 (---------------) + I dnepropetrovsk + 0x0025ec0e, // n0x1c2f c0x0000 (---------------) + I dnipropetrovsk + 0x0026b2c7, // n0x1c30 c0x0000 (---------------) + I dominic + 0x003409c7, // n0x1c31 c0x0000 (---------------) + I donetsk + 0x00238b42, // n0x1c32 c0x0000 (---------------) + I dp + 0x002d75c3, // n0x1c33 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1c34 c0x0000 (---------------) + I gov + 0x00239ec2, // n0x1c35 c0x0000 (---------------) + I if + 0x00200242, // n0x1c36 c0x0000 (---------------) + I in + 0x0022cf4f, // n0x1c37 c0x0000 (---------------) + I ivano-frankivsk + 0x002176c2, // n0x1c38 c0x0000 (---------------) + I kh + 0x00244d07, // n0x1c39 c0x0000 (---------------) + I kharkiv + 0x0024cb07, // n0x1c3a c0x0000 (---------------) + I kharkov + 0x00250087, // n0x1c3b c0x0000 (---------------) + I kherson + 0x0025304c, // n0x1c3c c0x0000 (---------------) + I khmelnitskiy + 0x002581cc, // n0x1c3d c0x0000 (---------------) + I khmelnytskyi + 0x0037aac4, // n0x1c3e c0x0000 (---------------) + I kiev + 0x00254bca, // n0x1c3f c0x0000 (---------------) + I kirovograd + 0x00268d82, // n0x1c40 c0x0000 (---------------) + I km + 0x002034c2, // n0x1c41 c0x0000 (---------------) + I kr + 0x0029ed84, // n0x1c42 c0x0000 (---------------) + I krym + 0x00238082, // n0x1c43 c0x0000 (---------------) + I ks + 0x002aa8c2, // n0x1c44 c0x0000 (---------------) + I kv + 0x00258404, // n0x1c45 c0x0000 (---------------) + I kyiv + 0x0020e4c2, // n0x1c46 c0x0000 (---------------) + I lg + 0x00205ec2, // n0x1c47 c0x0000 (---------------) + I lt + 0x003054c7, // n0x1c48 c0x0000 (---------------) + I lugansk + 0x002e6745, // n0x1c49 c0x0000 (---------------) + I lutsk + 0x00227f02, // n0x1c4a c0x0000 (---------------) + I lv + 0x0022cec4, // n0x1c4b c0x0000 (---------------) + I lviv + 0x00356d82, // n0x1c4c c0x0000 (---------------) + I mk + 0x002f28c8, // n0x1c4d c0x0000 (---------------) + I mykolaiv + 0x002170c3, // n0x1c4e c0x0000 (---------------) + I net + 0x00342248, // n0x1c4f c0x0000 (---------------) + I nikolaev + 0x00200782, // n0x1c50 c0x0000 (---------------) + I od + 0x00227505, // n0x1c51 c0x0000 (---------------) + I odesa + 0x0035fa46, // n0x1c52 c0x0000 (---------------) + I odessa + 0x0021dcc3, // n0x1c53 c0x0000 (---------------) + I org + 0x00201e02, // n0x1c54 c0x0000 (---------------) + I pl + 0x002c8f07, // n0x1c55 c0x0000 (---------------) + I poltava + 0x00207742, // n0x1c56 c0x0000 (---------------) + I pp + 0x002cba85, // n0x1c57 c0x0000 (---------------) + I rivne + 0x00208085, // n0x1c58 c0x0000 (---------------) + I rovno + 0x00211fc2, // n0x1c59 c0x0000 (---------------) + I rv + 0x002046c2, // n0x1c5a c0x0000 (---------------) + I sb + 0x0035e2ca, // n0x1c5b c0x0000 (---------------) + I sebastopol + 0x0024688a, // n0x1c5c c0x0000 (---------------) + I sevastopol + 0x0023f582, // n0x1c5d c0x0000 (---------------) + I sm + 0x00308004, // n0x1c5e c0x0000 (---------------) + I sumy + 0x00203202, // n0x1c5f c0x0000 (---------------) + I te + 0x0036c988, // n0x1c60 c0x0000 (---------------) + I ternopil + 0x002018c2, // n0x1c61 c0x0000 (---------------) + I uz + 0x0036a1c8, // n0x1c62 c0x0000 (---------------) + I uzhgorod + 0x002dc087, // n0x1c63 c0x0000 (---------------) + I vinnica + 0x002dc4c9, // n0x1c64 c0x0000 (---------------) + I vinnytsia + 0x00208102, // n0x1c65 c0x0000 (---------------) + I vn + 0x002e1c05, // n0x1c66 c0x0000 (---------------) + I volyn + 0x0028edc5, // n0x1c67 c0x0000 (---------------) + I yalta + 0x002b214b, // n0x1c68 c0x0000 (---------------) + I zaporizhzhe + 0x002b2b8c, // n0x1c69 c0x0000 (---------------) + I zaporizhzhia + 0x00220408, // n0x1c6a c0x0000 (---------------) + I zhitomir + 0x002e1fc8, // n0x1c6b c0x0000 (---------------) + I zhytomyr + 0x002c2042, // n0x1c6c c0x0000 (---------------) + I zp + 0x0020fa82, // n0x1c6d c0x0000 (---------------) + I zt + 0x00201e82, // n0x1c6e c0x0000 (---------------) + I ac + 0x000e4188, // n0x1c6f c0x0000 (---------------) + blogspot + 0x00200742, // n0x1c70 c0x0000 (---------------) + I co + 0x00222ac3, // n0x1c71 c0x0000 (---------------) + I com + 0x00202342, // n0x1c72 c0x0000 (---------------) + I go + 0x00201082, // n0x1c73 c0x0000 (---------------) + I ne + 0x00200c42, // n0x1c74 c0x0000 (---------------) + I or + 0x0021dcc3, // n0x1c75 c0x0000 (---------------) + I org + 0x00200982, // n0x1c76 c0x0000 (---------------) + I sc + 0x00201e82, // n0x1c77 c0x0000 (---------------) + I ac + 0x53a00742, // n0x1c78 c0x014e (n0x1c82-n0x1c83) + I co + 0x53e1e283, // n0x1c79 c0x014f (n0x1c83-n0x1c84) + I gov + 0x003413c3, // n0x1c7a c0x0000 (---------------) + I ltd + 0x00208942, // n0x1c7b c0x0000 (---------------) + I me + 0x002170c3, // n0x1c7c c0x0000 (---------------) + I net + 0x002037c3, // n0x1c7d c0x0000 (---------------) + I nhs + 0x0021dcc3, // n0x1c7e c0x0000 (---------------) + I org + 0x002c65c3, // n0x1c7f c0x0000 (---------------) + I plc + 0x00218946, // n0x1c80 c0x0000 (---------------) + I police + 0x01606103, // n0x1c81 c0x0005 (---------------)* o I sch + 0x000e4188, // n0x1c82 c0x0000 (---------------) + blogspot + 0x00043bc7, // n0x1c83 c0x0000 (---------------) + service + 0x54601a42, // n0x1c84 c0x0151 (n0x1cc3-n0x1cc6) + I ak + 0x54a00882, // n0x1c85 c0x0152 (n0x1cc6-n0x1cc9) + I al + 0x54e00602, // n0x1c86 c0x0153 (n0x1cc9-n0x1ccc) + I ar + 0x55200182, // n0x1c87 c0x0154 (n0x1ccc-n0x1ccf) + I as + 0x55608cc2, // n0x1c88 c0x0155 (n0x1ccf-n0x1cd2) + I az + 0x55a055c2, // n0x1c89 c0x0156 (n0x1cd2-n0x1cd5) + I ca + 0x55e00742, // n0x1c8a c0x0157 (n0x1cd5-n0x1cd8) + I co + 0x56223d82, // n0x1c8b c0x0158 (n0x1cd8-n0x1cdb) + I ct + 0x56616e02, // n0x1c8c c0x0159 (n0x1cdb-n0x1cde) + I dc + 0x56a006c2, // n0x1c8d c0x015a (n0x1cde-n0x1ce1) + I de + 0x0025ec03, // n0x1c8e c0x0000 (---------------) + I dni + 0x0020c743, // n0x1c8f c0x0000 (---------------) + I fed + 0x56e39b02, // n0x1c90 c0x015b (n0x1ce1-n0x1ce4) + I fl + 0x57201602, // n0x1c91 c0x015c (n0x1ce4-n0x1ce7) + I ga + 0x57629702, // n0x1c92 c0x015d (n0x1ce7-n0x1cea) + I gu + 0x57a00202, // n0x1c93 c0x015e (n0x1cea-n0x1cec) + I hi + 0x57e00482, // n0x1c94 c0x015f (n0x1cec-n0x1cef) + I ia + 0x58206202, // n0x1c95 c0x0160 (n0x1cef-n0x1cf2) + I id + 0x586036c2, // n0x1c96 c0x0161 (n0x1cf2-n0x1cf5) + I il + 0x58a00242, // n0x1c97 c0x0162 (n0x1cf5-n0x1cf8) + I in + 0x000aa785, // n0x1c98 c0x0000 (---------------) + is-by + 0x00209883, // n0x1c99 c0x0000 (---------------) + I isa + 0x00394c44, // n0x1c9a c0x0000 (---------------) + I kids + 0x58e38082, // n0x1c9b c0x0163 (n0x1cf8-n0x1cfb) + I ks + 0x59229082, // n0x1c9c c0x0164 (n0x1cfb-n0x1cfe) + I ky + 0x59601e42, // n0x1c9d c0x0165 (n0x1cfe-n0x1d01) + I la + 0x0006700b, // n0x1c9e c0x0000 (---------------) + land-4-sale + 0x59a04302, // n0x1c9f c0x0166 (n0x1d01-n0x1d04) + I ma + 0x5a238602, // n0x1ca0 c0x0168 (n0x1d07-n0x1d0a) + I md + 0x5a608942, // n0x1ca1 c0x0169 (n0x1d0a-n0x1d0d) + I me + 0x5aa04802, // n0x1ca2 c0x016a (n0x1d0d-n0x1d10) + I mi + 0x5ae17082, // n0x1ca3 c0x016b (n0x1d10-n0x1d13) + I mn + 0x5b203602, // n0x1ca4 c0x016c (n0x1d13-n0x1d16) + I mo + 0x5b609282, // n0x1ca5 c0x016d (n0x1d16-n0x1d19) + I ms + 0x5ba59642, // n0x1ca6 c0x016e (n0x1d19-n0x1d1c) + I mt + 0x5be1c742, // n0x1ca7 c0x016f (n0x1d1c-n0x1d1f) + I nc + 0x5c208f42, // n0x1ca8 c0x0170 (n0x1d1f-n0x1d21) + I nd + 0x5c601082, // n0x1ca9 c0x0171 (n0x1d21-n0x1d24) + I ne + 0x5ca037c2, // n0x1caa c0x0172 (n0x1d24-n0x1d27) + I nh + 0x5ce02942, // n0x1cab c0x0173 (n0x1d27-n0x1d2a) + I nj + 0x5d233c42, // n0x1cac c0x0174 (n0x1d2a-n0x1d2d) + I nm + 0x002c55c3, // n0x1cad c0x0000 (---------------) + I nsn + 0x5d608802, // n0x1cae c0x0175 (n0x1d2d-n0x1d30) + I nv + 0x5da108c2, // n0x1caf c0x0176 (n0x1d30-n0x1d33) + I ny + 0x5de051c2, // n0x1cb0 c0x0177 (n0x1d33-n0x1d36) + I oh + 0x5e201bc2, // n0x1cb1 c0x0178 (n0x1d36-n0x1d39) + I ok + 0x5e600c42, // n0x1cb2 c0x0179 (n0x1d39-n0x1d3c) + I or + 0x5ea052c2, // n0x1cb3 c0x017a (n0x1d3c-n0x1d3f) + I pa + 0x5ee18242, // n0x1cb4 c0x017b (n0x1d3f-n0x1d42) + I pr + 0x5f202842, // n0x1cb5 c0x017c (n0x1d42-n0x1d45) + I ri + 0x5f600982, // n0x1cb6 c0x017d (n0x1d45-n0x1d48) + I sc + 0x5fa4f842, // n0x1cb7 c0x017e (n0x1d48-n0x1d4a) + I sd + 0x000d044c, // n0x1cb8 c0x0000 (---------------) + stuff-4-sale + 0x5fe1d1c2, // n0x1cb9 c0x017f (n0x1d4a-n0x1d4d) + I tn + 0x60225242, // n0x1cba c0x0180 (n0x1d4d-n0x1d50) + I tx + 0x60600102, // n0x1cbb c0x0181 (n0x1d50-n0x1d53) + I ut + 0x60a013c2, // n0x1cbc c0x0182 (n0x1d53-n0x1d56) + I va + 0x60e13602, // n0x1cbd c0x0183 (n0x1d56-n0x1d59) + I vi + 0x6121e302, // n0x1cbe c0x0184 (n0x1d59-n0x1d5c) + I vt + 0x61602542, // n0x1cbf c0x0185 (n0x1d5c-n0x1d5f) + I wa + 0x61a05502, // n0x1cc0 c0x0186 (n0x1d5f-n0x1d62) + I wi + 0x61e63902, // n0x1cc1 c0x0187 (n0x1d62-n0x1d63) + I wv + 0x62237f82, // n0x1cc2 c0x0188 (n0x1d63-n0x1d66) + I wy + 0x0021aa82, // n0x1cc3 c0x0000 (---------------) + I cc + 0x00312503, // n0x1cc4 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1cc5 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1cc6 c0x0000 (---------------) + I cc + 0x00312503, // n0x1cc7 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1cc8 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1cc9 c0x0000 (---------------) + I cc + 0x00312503, // n0x1cca c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1ccb c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1ccc c0x0000 (---------------) + I cc + 0x00312503, // n0x1ccd c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1cce c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1ccf c0x0000 (---------------) + I cc + 0x00312503, // n0x1cd0 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1cd1 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1cd2 c0x0000 (---------------) + I cc + 0x00312503, // n0x1cd3 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1cd4 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1cd5 c0x0000 (---------------) + I cc + 0x00312503, // n0x1cd6 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1cd7 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1cd8 c0x0000 (---------------) + I cc + 0x00312503, // n0x1cd9 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1cda c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1cdb c0x0000 (---------------) + I cc + 0x00312503, // n0x1cdc c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1cdd c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1cde c0x0000 (---------------) + I cc + 0x00312503, // n0x1cdf c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1ce0 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1ce1 c0x0000 (---------------) + I cc + 0x00312503, // n0x1ce2 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1ce3 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1ce4 c0x0000 (---------------) + I cc + 0x00312503, // n0x1ce5 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1ce6 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1ce7 c0x0000 (---------------) + I cc + 0x00312503, // n0x1ce8 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1ce9 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1cea c0x0000 (---------------) + I cc + 0x00273c43, // n0x1ceb c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1cec c0x0000 (---------------) + I cc + 0x00312503, // n0x1ced c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1cee c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1cef c0x0000 (---------------) + I cc + 0x00312503, // n0x1cf0 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1cf1 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1cf2 c0x0000 (---------------) + I cc + 0x00312503, // n0x1cf3 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1cf4 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1cf5 c0x0000 (---------------) + I cc + 0x00312503, // n0x1cf6 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1cf7 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1cf8 c0x0000 (---------------) + I cc + 0x00312503, // n0x1cf9 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1cfa c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1cfb c0x0000 (---------------) + I cc + 0x00312503, // n0x1cfc c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1cfd c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1cfe c0x0000 (---------------) + I cc + 0x00312503, // n0x1cff c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d00 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d01 c0x0000 (---------------) + I cc + 0x59f12503, // n0x1d02 c0x0167 (n0x1d04-n0x1d07) + I k12 + 0x00273c43, // n0x1d03 c0x0000 (---------------) + I lib + 0x002e8ec4, // n0x1d04 c0x0000 (---------------) + I chtr + 0x0028e806, // n0x1d05 c0x0000 (---------------) + I paroch + 0x002ce243, // n0x1d06 c0x0000 (---------------) + I pvt + 0x0021aa82, // n0x1d07 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d08 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d09 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d0a c0x0000 (---------------) + I cc + 0x00312503, // n0x1d0b c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d0c c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d0d c0x0000 (---------------) + I cc + 0x00312503, // n0x1d0e c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d0f c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d10 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d11 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d12 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d13 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d14 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d15 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d16 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d17 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d18 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d19 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d1a c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d1b c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d1c c0x0000 (---------------) + I cc + 0x00312503, // n0x1d1d c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d1e c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d1f c0x0000 (---------------) + I cc + 0x00273c43, // n0x1d20 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d21 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d22 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d23 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d24 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d25 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d26 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d27 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d28 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d29 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d2a c0x0000 (---------------) + I cc + 0x00312503, // n0x1d2b c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d2c c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d2d c0x0000 (---------------) + I cc + 0x00312503, // n0x1d2e c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d2f c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d30 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d31 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d32 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d33 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d34 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d35 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d36 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d37 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d38 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d39 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d3a c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d3b c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d3c c0x0000 (---------------) + I cc + 0x00312503, // n0x1d3d c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d3e c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d3f c0x0000 (---------------) + I cc + 0x00312503, // n0x1d40 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d41 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d42 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d43 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d44 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d45 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d46 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d47 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d48 c0x0000 (---------------) + I cc + 0x00273c43, // n0x1d49 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d4a c0x0000 (---------------) + I cc + 0x00312503, // n0x1d4b c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d4c c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d4d c0x0000 (---------------) + I cc + 0x00312503, // n0x1d4e c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d4f c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d50 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d51 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d52 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d53 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d54 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d55 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d56 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d57 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d58 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d59 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d5a c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d5b c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d5c c0x0000 (---------------) + I cc + 0x00312503, // n0x1d5d c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d5e c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d5f c0x0000 (---------------) + I cc + 0x00312503, // n0x1d60 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d61 c0x0000 (---------------) + I lib + 0x0021aa82, // n0x1d62 c0x0000 (---------------) + I cc + 0x0021aa82, // n0x1d63 c0x0000 (---------------) + I cc + 0x00312503, // n0x1d64 c0x0000 (---------------) + I k12 + 0x00273c43, // n0x1d65 c0x0000 (---------------) + I lib + 0x62a22ac3, // n0x1d66 c0x018a (n0x1d6c-n0x1d6d) + I com + 0x002d75c3, // n0x1d67 c0x0000 (---------------) + I edu + 0x0032b9c3, // n0x1d68 c0x0000 (---------------) + I gub + 0x0023fa03, // n0x1d69 c0x0000 (---------------) + I mil + 0x002170c3, // n0x1d6a c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1d6b c0x0000 (---------------) + I org + 0x000e4188, // n0x1d6c c0x0000 (---------------) + blogspot + 0x00200742, // n0x1d6d c0x0000 (---------------) + I co + 0x00222ac3, // n0x1d6e c0x0000 (---------------) + I com + 0x002170c3, // n0x1d6f c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1d70 c0x0000 (---------------) + I org + 0x00222ac3, // n0x1d71 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1d72 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1d73 c0x0000 (---------------) + I gov + 0x0023fa03, // n0x1d74 c0x0000 (---------------) + I mil + 0x002170c3, // n0x1d75 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1d76 c0x0000 (---------------) + I org + 0x00246584, // n0x1d77 c0x0000 (---------------) + I arts + 0x00200742, // n0x1d78 c0x0000 (---------------) + I co + 0x00222ac3, // n0x1d79 c0x0000 (---------------) + I com + 0x0023ee43, // n0x1d7a c0x0000 (---------------) + I e12 + 0x002d75c3, // n0x1d7b c0x0000 (---------------) + I edu + 0x00238544, // n0x1d7c c0x0000 (---------------) + I firm + 0x0034eb03, // n0x1d7d c0x0000 (---------------) + I gob + 0x0021e283, // n0x1d7e c0x0000 (---------------) + I gov + 0x00200304, // n0x1d7f c0x0000 (---------------) + I info + 0x00238c03, // n0x1d80 c0x0000 (---------------) + I int + 0x0023fa03, // n0x1d81 c0x0000 (---------------) + I mil + 0x002170c3, // n0x1d82 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1d83 c0x0000 (---------------) + I org + 0x002e6343, // n0x1d84 c0x0000 (---------------) + I rec + 0x002cf4c5, // n0x1d85 c0x0000 (---------------) + I store + 0x0029d583, // n0x1d86 c0x0000 (---------------) + I tec + 0x00219fc3, // n0x1d87 c0x0000 (---------------) + I web + 0x00200742, // n0x1d88 c0x0000 (---------------) + I co + 0x00222ac3, // n0x1d89 c0x0000 (---------------) + I com + 0x00312503, // n0x1d8a c0x0000 (---------------) + I k12 + 0x002170c3, // n0x1d8b c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1d8c c0x0000 (---------------) + I org + 0x00201e82, // n0x1d8d c0x0000 (---------------) + I ac + 0x00310603, // n0x1d8e c0x0000 (---------------) + I biz + 0x000e4188, // n0x1d8f c0x0000 (---------------) + blogspot + 0x00222ac3, // n0x1d90 c0x0000 (---------------) + I com + 0x002d75c3, // n0x1d91 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1d92 c0x0000 (---------------) + I gov + 0x00205e06, // n0x1d93 c0x0000 (---------------) + I health + 0x00200304, // n0x1d94 c0x0000 (---------------) + I info + 0x00238c03, // n0x1d95 c0x0000 (---------------) + I int + 0x00298944, // n0x1d96 c0x0000 (---------------) + I name + 0x002170c3, // n0x1d97 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1d98 c0x0000 (---------------) + I org + 0x00218243, // n0x1d99 c0x0000 (---------------) + I pro + 0x00222ac3, // n0x1d9a c0x0000 (---------------) + I com + 0x002d75c3, // n0x1d9b c0x0000 (---------------) + I edu + 0x002170c3, // n0x1d9c c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1d9d c0x0000 (---------------) + I org + 0x00222ac3, // n0x1d9e c0x0000 (---------------) + I com + 0x00009ac6, // n0x1d9f c0x0000 (---------------) + dyndns + 0x002d75c3, // n0x1da0 c0x0000 (---------------) + I edu + 0x0021e283, // n0x1da1 c0x0000 (---------------) + I gov + 0x00108086, // n0x1da2 c0x0000 (---------------) + mypets + 0x002170c3, // n0x1da3 c0x0000 (---------------) + I net + 0x0021dcc3, // n0x1da4 c0x0000 (---------------) + I org + 0x002f1308, // n0x1da5 c0x0000 (---------------) + I xn--80au + 0x002f5c09, // n0x1da6 c0x0000 (---------------) + I xn--90azh + 0x00303289, // n0x1da7 c0x0000 (---------------) + I xn--c1avg + 0x00312fc8, // n0x1da8 c0x0000 (---------------) + I xn--d1at + 0x00362248, // n0x1da9 c0x0000 (---------------) + I xn--o1ac + 0x00362249, // n0x1daa c0x0000 (---------------) + I xn--o1ach + 0x00201e82, // n0x1dab c0x0000 (---------------) + I ac + 0x00301306, // n0x1dac c0x0000 (---------------) + I agrica + 0x00205e83, // n0x1dad c0x0000 (---------------) + I alt + 0x65200742, // n0x1dae c0x0194 (n0x1dbc-n0x1dbd) + I co + 0x002d75c3, // n0x1daf c0x0000 (---------------) + I edu + 0x0021e283, // n0x1db0 c0x0000 (---------------) + I gov + 0x002c6987, // n0x1db1 c0x0000 (---------------) + I grondar + 0x00253883, // n0x1db2 c0x0000 (---------------) + I law + 0x0023fa03, // n0x1db3 c0x0000 (---------------) + I mil + 0x002170c3, // n0x1db4 c0x0000 (---------------) + I net + 0x00202303, // n0x1db5 c0x0000 (---------------) + I ngo + 0x00208b83, // n0x1db6 c0x0000 (---------------) + I nis + 0x00207cc3, // n0x1db7 c0x0000 (---------------) + I nom + 0x0021dcc3, // n0x1db8 c0x0000 (---------------) + I org + 0x00235406, // n0x1db9 c0x0000 (---------------) + I school + 0x00208902, // n0x1dba c0x0000 (---------------) + I tm + 0x00219fc3, // n0x1dbb c0x0000 (---------------) + I web + 0x000e4188, // n0x1dbc c0x0000 (---------------) + blogspot } // children is the list of nodes' children, the parent's wildcard bit and the @@ -7560,379 +8084,409 @@ 0x40000000, // c0x0003 (---------------)* + 0x50000000, // c0x0004 (---------------)* ! 0x60000000, // c0x0005 (---------------)* o - 0x0105440f, // c0x0006 (n0x040f-n0x0415) + - 0x01058415, // c0x0007 (n0x0415-n0x0416) + - 0x01078416, // c0x0008 (n0x0416-n0x041e) + - 0x011dc41e, // c0x0009 (n0x041e-n0x0477) + - 0x011f0477, // c0x000a (n0x0477-n0x047c) + - 0x0120447c, // c0x000b (n0x047c-n0x0481) + - 0x01214481, // c0x000c (n0x0481-n0x0485) + - 0x0122c485, // c0x000d (n0x0485-n0x048b) + - 0x0123c48b, // c0x000e (n0x048b-n0x048f) + - 0x0125448f, // c0x000f (n0x048f-n0x0495) + - 0x01278495, // c0x0010 (n0x0495-n0x049e) + - 0x0127c49e, // c0x0011 (n0x049e-n0x049f) + - 0x0129449f, // c0x0012 (n0x049f-n0x04a5) + - 0x012984a5, // c0x0013 (n0x04a5-n0x04a6) + - 0x012b44a6, // c0x0014 (n0x04a6-n0x04ad) + - 0x012b84ad, // c0x0015 (n0x04ad-n0x04ae) + - 0x013004ae, // c0x0016 (n0x04ae-n0x04c0) + - 0x013044c0, // c0x0017 (n0x04c0-n0x04c1) + - 0x013244c1, // c0x0018 (n0x04c1-n0x04c9) + - 0x013384c9, // c0x0019 (n0x04c9-n0x04ce) + - 0x0133c4ce, // c0x001a (n0x04ce-n0x04cf) + - 0x0136c4cf, // c0x001b (n0x04cf-n0x04db) + - 0x013944db, // c0x001c (n0x04db-n0x04e5) + - 0x013bc4e5, // c0x001d (n0x04e5-n0x04ef) + - 0x013c44ef, // c0x001e (n0x04ef-n0x04f1) + - 0x013c84f1, // c0x001f (n0x04f1-n0x04f2) + - 0x014584f2, // c0x0020 (n0x04f2-n0x0516) + - 0x0146c516, // c0x0021 (n0x0516-n0x051b) + - 0x0148051b, // c0x0022 (n0x051b-n0x0520) + - 0x0149c520, // c0x0023 (n0x0520-n0x0527) + - 0x014ac527, // c0x0024 (n0x0527-n0x052b) + - 0x014c052b, // c0x0025 (n0x052b-n0x0530) + - 0x014e4530, // c0x0026 (n0x0530-n0x0539) + - 0x015fc539, // c0x0027 (n0x0539-n0x057f) + - 0x0160057f, // c0x0028 (n0x057f-n0x0580) + - 0x01614580, // c0x0029 (n0x0580-n0x0585) + - 0x01628585, // c0x002a (n0x0585-n0x058a) + - 0x0163058a, // c0x002b (n0x058a-n0x058c) + - 0x0164058c, // c0x002c (n0x058c-n0x0590) + - 0x01658590, // c0x002d (n0x0590-n0x0596) + - 0x0169c596, // c0x002e (n0x0596-n0x05a7) + - 0x016ac5a7, // c0x002f (n0x05a7-n0x05ab) + - 0x016b05ab, // c0x0030 (n0x05ab-n0x05ac) + - 0x016b45ac, // c0x0031 (n0x05ac-n0x05ad) + - 0x016b85ad, // c0x0032 (n0x05ad-n0x05ae) + - 0x016f45ae, // c0x0033 (n0x05ae-n0x05bd) + - 0x616f85bd, // c0x0034 (n0x05bd-n0x05be)* o - 0x017085be, // c0x0035 (n0x05be-n0x05c2) + - 0x017185c2, // c0x0036 (n0x05c2-n0x05c6) + - 0x017cc5c6, // c0x0037 (n0x05c6-n0x05f3) + - 0x217d05f3, // c0x0038 (n0x05f3-n0x05f4) o - 0x017d45f4, // c0x0039 (n0x05f4-n0x05f5) + - 0x018085f5, // c0x003a (n0x05f5-n0x0602) + - 0x01b1c602, // c0x003b (n0x0602-n0x06c7) + - 0x21b786c7, // c0x003c (n0x06c7-n0x06de) o - 0x01b9c6de, // c0x003d (n0x06de-n0x06e7) + - 0x01ba46e7, // c0x003e (n0x06e7-n0x06e9) + - 0x01bc06e9, // c0x003f (n0x06e9-n0x06f0) + - 0x01bd86f0, // c0x0040 (n0x06f0-n0x06f6) + - 0x01bdc6f6, // c0x0041 (n0x06f6-n0x06f7) + - 0x01bec6f7, // c0x0042 (n0x06f7-n0x06fb) + - 0x01bf46fb, // c0x0043 (n0x06fb-n0x06fd) + - 0x01bf86fd, // c0x0044 (n0x06fd-n0x06fe) + - 0x01c186fe, // c0x0045 (n0x06fe-n0x0706) + - 0x01c1c706, // c0x0046 (n0x0706-n0x0707) + - 0x01c30707, // c0x0047 (n0x0707-n0x070c) + - 0x01c5870c, // c0x0048 (n0x070c-n0x0716) + - 0x01c78716, // c0x0049 (n0x0716-n0x071e) + - 0x01ca871e, // c0x004a (n0x071e-n0x072a) + - 0x01cd072a, // c0x004b (n0x072a-n0x0734) + - 0x01cf4734, // c0x004c (n0x0734-n0x073d) + - 0x01d0873d, // c0x004d (n0x073d-n0x0742) + - 0x01d0c742, // c0x004e (n0x0742-n0x0743) + - 0x01d28743, // c0x004f (n0x0743-n0x074a) + - 0x01d3474a, // c0x0050 (n0x074a-n0x074d) + - 0x01d9474d, // c0x0051 (n0x074d-n0x0765) + - 0x01db0765, // c0x0052 (n0x0765-n0x076c) + - 0x01dbc76c, // c0x0053 (n0x076c-n0x076f) + - 0x01dd076f, // c0x0054 (n0x076f-n0x0774) + - 0x01de8774, // c0x0055 (n0x0774-n0x077a) + - 0x01e0077a, // c0x0056 (n0x077a-n0x0780) + - 0x01e18780, // c0x0057 (n0x0780-n0x0786) + - 0x01e30786, // c0x0058 (n0x0786-n0x078c) + - 0x01e4c78c, // c0x0059 (n0x078c-n0x0793) + - 0x01e58793, // c0x005a (n0x0793-n0x0796) + - 0x01eb8796, // c0x005b (n0x0796-n0x07ae) + - 0x01ed07ae, // c0x005c (n0x07ae-n0x07b4) + - 0x01ee07b4, // c0x005d (n0x07b4-n0x07b8) + - 0x01f247b8, // c0x005e (n0x07b8-n0x07c9) + - 0x01fa47c9, // c0x005f (n0x07c9-n0x07e9) + - 0x01fd07e9, // c0x0060 (n0x07e9-n0x07f4) + - 0x01fd87f4, // c0x0061 (n0x07f4-n0x07f6) + - 0x61fdc7f6, // c0x0062 (n0x07f6-n0x07f7)* o - 0x21fe07f7, // c0x0063 (n0x07f7-n0x07f8) o - 0x01ffc7f8, // c0x0064 (n0x07f8-n0x07ff) + - 0x020047ff, // c0x0065 (n0x07ff-n0x0801) + - 0x02038801, // c0x0066 (n0x0801-n0x080e) + - 0x0206080e, // c0x0067 (n0x080e-n0x0818) + - 0x02064818, // c0x0068 (n0x0818-n0x0819) + - 0x02070819, // c0x0069 (n0x0819-n0x081c) + - 0x0208881c, // c0x006a (n0x081c-n0x0822) + - 0x020ac822, // c0x006b (n0x0822-n0x082b) + - 0x020c882b, // c0x006c (n0x082b-n0x0832) + - 0x0268c832, // c0x006d (n0x0832-n0x09a3) + - 0x026989a3, // c0x006e (n0x09a3-n0x09a6) + - 0x026b89a6, // c0x006f (n0x09a6-n0x09ae) + - 0x028749ae, // c0x0070 (n0x09ae-n0x0a1d) + - 0x02944a1d, // c0x0071 (n0x0a1d-n0x0a51) + - 0x029b4a51, // c0x0072 (n0x0a51-n0x0a6d) + - 0x02a0ca6d, // c0x0073 (n0x0a6d-n0x0a83) + - 0x02af4a83, // c0x0074 (n0x0a83-n0x0abd) + - 0x02b4cabd, // c0x0075 (n0x0abd-n0x0ad3) + - 0x02b88ad3, // c0x0076 (n0x0ad3-n0x0ae2) + - 0x02c84ae2, // c0x0077 (n0x0ae2-n0x0b21) + - 0x02d50b21, // c0x0078 (n0x0b21-n0x0b54) + - 0x02de8b54, // c0x0079 (n0x0b54-n0x0b7a) + - 0x02e78b7a, // c0x007a (n0x0b7a-n0x0b9e) + - 0x02edcb9e, // c0x007b (n0x0b9e-n0x0bb7) + - 0x03114bb7, // c0x007c (n0x0bb7-n0x0c45) + - 0x031ccc45, // c0x007d (n0x0c45-n0x0c73) + - 0x03298c73, // c0x007e (n0x0c73-n0x0ca6) + - 0x032e4ca6, // c0x007f (n0x0ca6-n0x0cb9) + - 0x0336ccb9, // c0x0080 (n0x0cb9-n0x0cdb) + - 0x033a8cdb, // c0x0081 (n0x0cdb-n0x0cea) + - 0x033f8cea, // c0x0082 (n0x0cea-n0x0cfe) + - 0x03470cfe, // c0x0083 (n0x0cfe-n0x0d1c) + - 0x63474d1c, // c0x0084 (n0x0d1c-n0x0d1d)* o - 0x63478d1d, // c0x0085 (n0x0d1d-n0x0d1e)* o - 0x6347cd1e, // c0x0086 (n0x0d1e-n0x0d1f)* o - 0x034f8d1f, // c0x0087 (n0x0d1f-n0x0d3e) + - 0x03560d3e, // c0x0088 (n0x0d3e-n0x0d58) + - 0x035dcd58, // c0x0089 (n0x0d58-n0x0d77) + - 0x03654d77, // c0x008a (n0x0d77-n0x0d95) + - 0x036d8d95, // c0x008b (n0x0d95-n0x0db6) + - 0x03744db6, // c0x008c (n0x0db6-n0x0dd1) + - 0x03870dd1, // c0x008d (n0x0dd1-n0x0e1c) + - 0x038c8e1c, // c0x008e (n0x0e1c-n0x0e32) + - 0x638cce32, // c0x008f (n0x0e32-n0x0e33)* o - 0x03964e33, // c0x0090 (n0x0e33-n0x0e59) + - 0x039ece59, // c0x0091 (n0x0e59-n0x0e7b) + - 0x03a38e7b, // c0x0092 (n0x0e7b-n0x0e8e) + - 0x03aa0e8e, // c0x0093 (n0x0e8e-n0x0ea8) + - 0x03b48ea8, // c0x0094 (n0x0ea8-n0x0ed2) + - 0x03c10ed2, // c0x0095 (n0x0ed2-n0x0f04) + - 0x03c78f04, // c0x0096 (n0x0f04-n0x0f1e) + - 0x03d8cf1e, // c0x0097 (n0x0f1e-n0x0f63) + - 0x63d90f63, // c0x0098 (n0x0f63-n0x0f64)* o - 0x63d94f64, // c0x0099 (n0x0f64-n0x0f65)* o - 0x03df0f65, // c0x009a (n0x0f65-n0x0f7c) + - 0x03e4cf7c, // c0x009b (n0x0f7c-n0x0f93) + - 0x03edcf93, // c0x009c (n0x0f93-n0x0fb7) + - 0x03f58fb7, // c0x009d (n0x0fb7-n0x0fd6) + - 0x03f9cfd6, // c0x009e (n0x0fd6-n0x0fe7) + - 0x04080fe7, // c0x009f (n0x0fe7-n0x1020) + - 0x040b5020, // c0x00a0 (n0x1020-n0x102d) + - 0x0411502d, // c0x00a1 (n0x102d-n0x1045) + - 0x04189045, // c0x00a2 (n0x1045-n0x1062) + - 0x04211062, // c0x00a3 (n0x1062-n0x1084) + - 0x04251084, // c0x00a4 (n0x1084-n0x1094) + - 0x042c1094, // c0x00a5 (n0x1094-n0x10b0) + - 0x642c50b0, // c0x00a6 (n0x10b0-n0x10b1)* o - 0x042dd0b1, // c0x00a7 (n0x10b1-n0x10b7) + - 0x042f90b7, // c0x00a8 (n0x10b7-n0x10be) + - 0x0433d0be, // c0x00a9 (n0x10be-n0x10cf) + - 0x0434d0cf, // c0x00aa (n0x10cf-n0x10d3) + - 0x043650d3, // c0x00ab (n0x10d3-n0x10d9) + - 0x043dd0d9, // c0x00ac (n0x10d9-n0x10f7) + - 0x043f10f7, // c0x00ad (n0x10f7-n0x10fc) + - 0x044090fc, // c0x00ae (n0x10fc-n0x1102) + - 0x0442d102, // c0x00af (n0x1102-n0x110b) + - 0x0444110b, // c0x00b0 (n0x110b-n0x1110) + - 0x04459110, // c0x00b1 (n0x1110-n0x1116) + - 0x04491116, // c0x00b2 (n0x1116-n0x1124) + - 0x044a5124, // c0x00b3 (n0x1124-n0x1129) + - 0x044ad129, // c0x00b4 (n0x1129-n0x112b) + - 0x044b112b, // c0x00b5 (n0x112b-n0x112c) + - 0x044d512c, // c0x00b6 (n0x112c-n0x1135) + - 0x044f9135, // c0x00b7 (n0x1135-n0x113e) + - 0x0451113e, // c0x00b8 (n0x113e-n0x1144) + - 0x04519144, // c0x00b9 (n0x1144-n0x1146) + - 0x04539146, // c0x00ba (n0x1146-n0x114e) + - 0x0455914e, // c0x00bb (n0x114e-n0x1156) + - 0x04575156, // c0x00bc (n0x1156-n0x115d) + - 0x0459115d, // c0x00bd (n0x115d-n0x1164) + - 0x045a1164, // c0x00be (n0x1164-n0x1168) + - 0x045b5168, // c0x00bf (n0x1168-n0x116d) + - 0x045bd16d, // c0x00c0 (n0x116d-n0x116f) + - 0x045d116f, // c0x00c1 (n0x116f-n0x1174) + - 0x045e1174, // c0x00c2 (n0x1174-n0x1178) + - 0x045fd178, // c0x00c3 (n0x1178-n0x117f) + - 0x04e8d17f, // c0x00c4 (n0x117f-n0x13a3) + - 0x04ec53a3, // c0x00c5 (n0x13a3-n0x13b1) + - 0x04ef13b1, // c0x00c6 (n0x13b1-n0x13bc) + - 0x04f093bc, // c0x00c7 (n0x13bc-n0x13c2) + - 0x04f253c2, // c0x00c8 (n0x13c2-n0x13c9) + - 0x64f293c9, // c0x00c9 (n0x13c9-n0x13ca)* o - 0x04f6d3ca, // c0x00ca (n0x13ca-n0x13db) + - 0x04f753db, // c0x00cb (n0x13db-n0x13dd) + - 0x24f793dd, // c0x00cc (n0x13dd-n0x13de) o - 0x24f7d3de, // c0x00cd (n0x13de-n0x13df) o - 0x04f813df, // c0x00ce (n0x13df-n0x13e0) + - 0x0503d3e0, // c0x00cf (n0x13e0-n0x140f) + - 0x2504540f, // c0x00d0 (n0x140f-n0x1411) o - 0x2504d411, // c0x00d1 (n0x1411-n0x1413) o - 0x25059413, // c0x00d2 (n0x1413-n0x1416) o - 0x05081416, // c0x00d3 (n0x1416-n0x1420) + - 0x050a5420, // c0x00d4 (n0x1420-n0x1429) + - 0x050b1429, // c0x00d5 (n0x1429-n0x142c) + - 0x05c0942c, // c0x00d6 (n0x142c-n0x1702) + - 0x05c0d702, // c0x00d7 (n0x1702-n0x1703) + - 0x05c11703, // c0x00d8 (n0x1703-n0x1704) + - 0x25c15704, // c0x00d9 (n0x1704-n0x1705) o - 0x05c19705, // c0x00da (n0x1705-n0x1706) + - 0x25c1d706, // c0x00db (n0x1706-n0x1707) o - 0x05c21707, // c0x00dc (n0x1707-n0x1708) + - 0x25c2d708, // c0x00dd (n0x1708-n0x170b) o - 0x05c3170b, // c0x00de (n0x170b-n0x170c) + - 0x05c3570c, // c0x00df (n0x170c-n0x170d) + - 0x25c3970d, // c0x00e0 (n0x170d-n0x170e) o - 0x05c3d70e, // c0x00e1 (n0x170e-n0x170f) + - 0x25c4570f, // c0x00e2 (n0x170f-n0x1711) o - 0x05c49711, // c0x00e3 (n0x1711-n0x1712) + - 0x05c4d712, // c0x00e4 (n0x1712-n0x1713) + - 0x25c5d713, // c0x00e5 (n0x1713-n0x1717) o - 0x05c61717, // c0x00e6 (n0x1717-n0x1718) + - 0x05c65718, // c0x00e7 (n0x1718-n0x1719) + - 0x05c69719, // c0x00e8 (n0x1719-n0x171a) + - 0x05c6d71a, // c0x00e9 (n0x171a-n0x171b) + - 0x25c7171b, // c0x00ea (n0x171b-n0x171c) o - 0x05c7571c, // c0x00eb (n0x171c-n0x171d) + - 0x05c7971d, // c0x00ec (n0x171d-n0x171e) + - 0x05c7d71e, // c0x00ed (n0x171e-n0x171f) + - 0x05c8171f, // c0x00ee (n0x171f-n0x1720) + - 0x25c89720, // c0x00ef (n0x1720-n0x1722) o - 0x05c8d722, // c0x00f0 (n0x1722-n0x1723) + - 0x05c91723, // c0x00f1 (n0x1723-n0x1724) + - 0x05c95724, // c0x00f2 (n0x1724-n0x1725) + - 0x25c99725, // c0x00f3 (n0x1725-n0x1726) o - 0x05c9d726, // c0x00f4 (n0x1726-n0x1727) + - 0x25ca5727, // c0x00f5 (n0x1727-n0x1729) o - 0x25ca9729, // c0x00f6 (n0x1729-n0x172a) o - 0x05cc572a, // c0x00f7 (n0x172a-n0x1731) + - 0x05cd1731, // c0x00f8 (n0x1731-n0x1734) + - 0x05d11734, // c0x00f9 (n0x1734-n0x1744) + - 0x05d15744, // c0x00fa (n0x1744-n0x1745) + - 0x05d39745, // c0x00fb (n0x1745-n0x174e) + - 0x05e1174e, // c0x00fc (n0x174e-n0x1784) + - 0x05e19784, // c0x00fd (n0x1784-n0x1786) + - 0x05e45786, // c0x00fe (n0x1786-n0x1791) + - 0x05e61791, // c0x00ff (n0x1791-n0x1798) + - 0x05e6d798, // c0x0100 (n0x1798-n0x179b) + - 0x05e8d79b, // c0x0101 (n0x179b-n0x17a3) + - 0x05ec57a3, // c0x0102 (n0x17a3-n0x17b1) + - 0x061597b1, // c0x0103 (n0x17b1-n0x1856) + - 0x0617d856, // c0x0104 (n0x1856-n0x185f) + - 0x0619185f, // c0x0105 (n0x185f-n0x1864) + - 0x061c5864, // c0x0106 (n0x1864-n0x1871) + - 0x061e1871, // c0x0107 (n0x1871-n0x1878) + - 0x061fd878, // c0x0108 (n0x1878-n0x187f) + - 0x0622187f, // c0x0109 (n0x187f-n0x1888) + - 0x06239888, // c0x010a (n0x1888-n0x188e) + - 0x0625588e, // c0x010b (n0x188e-n0x1895) + - 0x06275895, // c0x010c (n0x1895-n0x189d) + - 0x0628589d, // c0x010d (n0x189d-n0x18a1) + - 0x062b58a1, // c0x010e (n0x18a1-n0x18ad) + - 0x062cd8ad, // c0x010f (n0x18ad-n0x18b3) + - 0x064dd8b3, // c0x0110 (n0x18b3-n0x1937) + - 0x06501937, // c0x0111 (n0x1937-n0x1940) + - 0x06521940, // c0x0112 (n0x1940-n0x1948) + - 0x06535948, // c0x0113 (n0x1948-n0x194d) + - 0x0654994d, // c0x0114 (n0x194d-n0x1952) + - 0x06569952, // c0x0115 (n0x1952-n0x195a) + - 0x0660d95a, // c0x0116 (n0x195a-n0x1983) + - 0x06629983, // c0x0117 (n0x1983-n0x198a) + - 0x0663d98a, // c0x0118 (n0x198a-n0x198f) + - 0x0664198f, // c0x0119 (n0x198f-n0x1990) + - 0x06655990, // c0x011a (n0x1990-n0x1995) + - 0x06671995, // c0x011b (n0x1995-n0x199c) + - 0x0667d99c, // c0x011c (n0x199c-n0x199f) + - 0x066ad99f, // c0x011d (n0x199f-n0x19ab) + - 0x066c19ab, // c0x011e (n0x19ab-n0x19b0) + - 0x066c59b0, // c0x011f (n0x19b0-n0x19b1) + - 0x066dd9b1, // c0x0120 (n0x19b1-n0x19b7) + - 0x066e99b7, // c0x0121 (n0x19b7-n0x19ba) + - 0x066ed9ba, // c0x0122 (n0x19ba-n0x19bb) + - 0x067099bb, // c0x0123 (n0x19bb-n0x19c2) + - 0x067459c2, // c0x0124 (n0x19c2-n0x19d1) + - 0x067499d1, // c0x0125 (n0x19d1-n0x19d2) + - 0x067699d2, // c0x0126 (n0x19d2-n0x19da) + - 0x067b99da, // c0x0127 (n0x19da-n0x19ee) + - 0x067d19ee, // c0x0128 (n0x19ee-n0x19f4) + - 0x068259f4, // c0x0129 (n0x19f4-n0x1a09) + - 0x06829a09, // c0x012a (n0x1a09-n0x1a0a) + - 0x0682da0a, // c0x012b (n0x1a0a-n0x1a0b) + - 0x06871a0b, // c0x012c (n0x1a0b-n0x1a1c) + - 0x06881a1c, // c0x012d (n0x1a1c-n0x1a20) + - 0x068b9a20, // c0x012e (n0x1a20-n0x1a2e) + - 0x068e9a2e, // c0x012f (n0x1a2e-n0x1a3a) + - 0x06a21a3a, // c0x0130 (n0x1a3a-n0x1a88) + - 0x06a41a88, // c0x0131 (n0x1a88-n0x1a90) + - 0x06a6da90, // c0x0132 (n0x1a90-n0x1a9b) + - 0x06a71a9b, // c0x0133 (n0x1a9b-n0x1a9c) + - 0x06a75a9c, // c0x0134 (n0x1a9c-n0x1a9d) + - 0x06b71a9d, // c0x0135 (n0x1a9d-n0x1adc) + - 0x06b7dadc, // c0x0136 (n0x1adc-n0x1adf) + - 0x06b89adf, // c0x0137 (n0x1adf-n0x1ae2) + - 0x06b95ae2, // c0x0138 (n0x1ae2-n0x1ae5) + - 0x06ba1ae5, // c0x0139 (n0x1ae5-n0x1ae8) + - 0x06badae8, // c0x013a (n0x1ae8-n0x1aeb) + - 0x06bb9aeb, // c0x013b (n0x1aeb-n0x1aee) + - 0x06bc5aee, // c0x013c (n0x1aee-n0x1af1) + - 0x06bd1af1, // c0x013d (n0x1af1-n0x1af4) + - 0x06bddaf4, // c0x013e (n0x1af4-n0x1af7) + - 0x06be9af7, // c0x013f (n0x1af7-n0x1afa) + - 0x06bf5afa, // c0x0140 (n0x1afa-n0x1afd) + - 0x06c01afd, // c0x0141 (n0x1afd-n0x1b00) + - 0x06c0db00, // c0x0142 (n0x1b00-n0x1b03) + - 0x06c15b03, // c0x0143 (n0x1b03-n0x1b05) + - 0x06c21b05, // c0x0144 (n0x1b05-n0x1b08) + - 0x06c2db08, // c0x0145 (n0x1b08-n0x1b0b) + - 0x06c39b0b, // c0x0146 (n0x1b0b-n0x1b0e) + - 0x06c45b0e, // c0x0147 (n0x1b0e-n0x1b11) + - 0x06c51b11, // c0x0148 (n0x1b11-n0x1b14) + - 0x06c5db14, // c0x0149 (n0x1b14-n0x1b17) + - 0x06c69b17, // c0x014a (n0x1b17-n0x1b1a) + - 0x06c75b1a, // c0x014b (n0x1b1a-n0x1b1d) + - 0x06c81b1d, // c0x014c (n0x1b1d-n0x1b20) + - 0x06c8db20, // c0x014d (n0x1b20-n0x1b23) + - 0x06c99b23, // c0x014e (n0x1b23-n0x1b26) + - 0x06ca5b26, // c0x014f (n0x1b26-n0x1b29) + - 0x06cb1b29, // c0x0150 (n0x1b29-n0x1b2c) + - 0x06cbdb2c, // c0x0151 (n0x1b2c-n0x1b2f) + - 0x06cc9b2f, // c0x0152 (n0x1b2f-n0x1b32) + - 0x06cd5b32, // c0x0153 (n0x1b32-n0x1b35) + - 0x06ce1b35, // c0x0154 (n0x1b35-n0x1b38) + - 0x06ce9b38, // c0x0155 (n0x1b38-n0x1b3a) + - 0x06cf5b3a, // c0x0156 (n0x1b3a-n0x1b3d) + - 0x06d01b3d, // c0x0157 (n0x1b3d-n0x1b40) + - 0x06d0db40, // c0x0158 (n0x1b40-n0x1b43) + - 0x06d19b43, // c0x0159 (n0x1b43-n0x1b46) + - 0x06d25b46, // c0x015a (n0x1b46-n0x1b49) + - 0x06d31b49, // c0x015b (n0x1b49-n0x1b4c) + - 0x06d3db4c, // c0x015c (n0x1b4c-n0x1b4f) + - 0x06d49b4f, // c0x015d (n0x1b4f-n0x1b52) + - 0x06d55b52, // c0x015e (n0x1b52-n0x1b55) + - 0x06d61b55, // c0x015f (n0x1b55-n0x1b58) + - 0x06d6db58, // c0x0160 (n0x1b58-n0x1b5b) + - 0x06d79b5b, // c0x0161 (n0x1b5b-n0x1b5e) + - 0x06d85b5e, // c0x0162 (n0x1b5e-n0x1b61) + - 0x06d8db61, // c0x0163 (n0x1b61-n0x1b63) + - 0x06d99b63, // c0x0164 (n0x1b63-n0x1b66) + - 0x06da5b66, // c0x0165 (n0x1b66-n0x1b69) + - 0x06db1b69, // c0x0166 (n0x1b69-n0x1b6c) + - 0x06dbdb6c, // c0x0167 (n0x1b6c-n0x1b6f) + - 0x06dc9b6f, // c0x0168 (n0x1b6f-n0x1b72) + - 0x06dd5b72, // c0x0169 (n0x1b72-n0x1b75) + - 0x06de1b75, // c0x016a (n0x1b75-n0x1b78) + - 0x06dedb78, // c0x016b (n0x1b78-n0x1b7b) + - 0x06df1b7b, // c0x016c (n0x1b7b-n0x1b7c) + - 0x06dfdb7c, // c0x016d (n0x1b7c-n0x1b7f) + - 0x06e15b7f, // c0x016e (n0x1b7f-n0x1b85) + - 0x06e25b85, // c0x016f (n0x1b85-n0x1b89) + - 0x06e3db89, // c0x0170 (n0x1b89-n0x1b8f) + - 0x06e81b8f, // c0x0171 (n0x1b8f-n0x1ba0) + - 0x06e95ba0, // c0x0172 (n0x1ba0-n0x1ba5) + - 0x06ec5ba5, // c0x0173 (n0x1ba5-n0x1bb1) + - 0x06ed5bb1, // c0x0174 (n0x1bb1-n0x1bb5) + - 0x06ef1bb5, // c0x0175 (n0x1bb5-n0x1bbc) + - 0x06f09bbc, // c0x0176 (n0x1bbc-n0x1bc2) + + 0x014fc539, // c0x0006 (n0x0539-n0x053f) + + 0x0150053f, // c0x0007 (n0x053f-n0x0540) + + 0x01520540, // c0x0008 (n0x0540-n0x0548) + + 0x01684548, // c0x0009 (n0x0548-n0x05a1) + + 0x016985a1, // c0x000a (n0x05a1-n0x05a6) + + 0x016ac5a6, // c0x000b (n0x05a6-n0x05ab) + + 0x016bc5ab, // c0x000c (n0x05ab-n0x05af) + + 0x016d85af, // c0x000d (n0x05af-n0x05b6) + + 0x016dc5b6, // c0x000e (n0x05b6-n0x05b7) + + 0x016ec5b7, // c0x000f (n0x05b7-n0x05bb) + + 0x017045bb, // c0x0010 (n0x05bb-n0x05c1) + + 0x017285c1, // c0x0011 (n0x05c1-n0x05ca) + + 0x0172c5ca, // c0x0012 (n0x05ca-n0x05cb) + + 0x017445cb, // c0x0013 (n0x05cb-n0x05d1) + + 0x017485d1, // c0x0014 (n0x05d1-n0x05d2) + + 0x017645d2, // c0x0015 (n0x05d2-n0x05d9) + + 0x017685d9, // c0x0016 (n0x05d9-n0x05da) + + 0x017b05da, // c0x0017 (n0x05da-n0x05ec) + + 0x017b45ec, // c0x0018 (n0x05ec-n0x05ed) + + 0x017d45ed, // c0x0019 (n0x05ed-n0x05f5) + + 0x017e85f5, // c0x001a (n0x05f5-n0x05fa) + + 0x017ec5fa, // c0x001b (n0x05fa-n0x05fb) + + 0x0181c5fb, // c0x001c (n0x05fb-n0x0607) + + 0x01848607, // c0x001d (n0x0607-n0x0612) + + 0x01870612, // c0x001e (n0x0612-n0x061c) + + 0x0187861c, // c0x001f (n0x061c-n0x061e) + + 0x0187c61e, // c0x0020 (n0x061e-n0x061f) + + 0x0191061f, // c0x0021 (n0x061f-n0x0644) + + 0x01924644, // c0x0022 (n0x0644-n0x0649) + + 0x01938649, // c0x0023 (n0x0649-n0x064e) + + 0x0195464e, // c0x0024 (n0x064e-n0x0655) + + 0x01964655, // c0x0025 (n0x0655-n0x0659) + + 0x01978659, // c0x0026 (n0x0659-n0x065e) + + 0x0199c65e, // c0x0027 (n0x065e-n0x0667) + + 0x01ab4667, // c0x0028 (n0x0667-n0x06ad) + + 0x01ab86ad, // c0x0029 (n0x06ad-n0x06ae) + + 0x01acc6ae, // c0x002a (n0x06ae-n0x06b3) + + 0x01ae06b3, // c0x002b (n0x06b3-n0x06b8) + + 0x01ae86b8, // c0x002c (n0x06b8-n0x06ba) + + 0x01af86ba, // c0x002d (n0x06ba-n0x06be) + + 0x01afc6be, // c0x002e (n0x06be-n0x06bf) + + 0x01b146bf, // c0x002f (n0x06bf-n0x06c5) + + 0x01b586c5, // c0x0030 (n0x06c5-n0x06d6) + + 0x01b686d6, // c0x0031 (n0x06d6-n0x06da) + + 0x01b6c6da, // c0x0032 (n0x06da-n0x06db) + + 0x01b706db, // c0x0033 (n0x06db-n0x06dc) + + 0x01b746dc, // c0x0034 (n0x06dc-n0x06dd) + + 0x01bb06dd, // c0x0035 (n0x06dd-n0x06ec) + + 0x61bb46ec, // c0x0036 (n0x06ec-n0x06ed)* o + 0x01bc86ed, // c0x0037 (n0x06ed-n0x06f2) + + 0x01bd86f2, // c0x0038 (n0x06f2-n0x06f6) + + 0x01c8c6f6, // c0x0039 (n0x06f6-n0x0723) + + 0x21c90723, // c0x003a (n0x0723-n0x0724) o + 0x01c94724, // c0x003b (n0x0724-n0x0725) + + 0x01c98725, // c0x003c (n0x0725-n0x0726) + + 0x21c9c726, // c0x003d (n0x0726-n0x0727) o + 0x21ca0727, // c0x003e (n0x0727-n0x0728) o + 0x01cd4728, // c0x003f (n0x0728-n0x0735) + + 0x01cd8735, // c0x0040 (n0x0735-n0x0736) + + 0x01ffc736, // c0x0041 (n0x0736-n0x07ff) + + 0x220447ff, // c0x0042 (n0x07ff-n0x0811) o + 0x02068811, // c0x0043 (n0x0811-n0x081a) + + 0x0207081a, // c0x0044 (n0x081a-n0x081c) + + 0x2207481c, // c0x0045 (n0x081c-n0x081d) o + 0x0209081d, // c0x0046 (n0x081d-n0x0824) + + 0x020a8824, // c0x0047 (n0x0824-n0x082a) + + 0x020ac82a, // c0x0048 (n0x082a-n0x082b) + + 0x020bc82b, // c0x0049 (n0x082b-n0x082f) + + 0x020c482f, // c0x004a (n0x082f-n0x0831) + + 0x220f8831, // c0x004b (n0x0831-n0x083e) o + 0x020fc83e, // c0x004c (n0x083e-n0x083f) + + 0x0210083f, // c0x004d (n0x083f-n0x0840) + + 0x02120840, // c0x004e (n0x0840-n0x0848) + + 0x02124848, // c0x004f (n0x0848-n0x0849) + + 0x02138849, // c0x0050 (n0x0849-n0x084e) + + 0x0216084e, // c0x0051 (n0x084e-n0x0858) + + 0x02180858, // c0x0052 (n0x0858-n0x0860) + + 0x021b0860, // c0x0053 (n0x0860-n0x086c) + + 0x021d886c, // c0x0054 (n0x086c-n0x0876) + + 0x021dc876, // c0x0055 (n0x0876-n0x0877) + + 0x02200877, // c0x0056 (n0x0877-n0x0880) + + 0x02204880, // c0x0057 (n0x0880-n0x0881) + + 0x02218881, // c0x0058 (n0x0881-n0x0886) + + 0x0221c886, // c0x0059 (n0x0886-n0x0887) + + 0x0223c887, // c0x005a (n0x0887-n0x088f) + + 0x0224888f, // c0x005b (n0x088f-n0x0892) + + 0x022a8892, // c0x005c (n0x0892-n0x08aa) + + 0x022c48aa, // c0x005d (n0x08aa-n0x08b1) + + 0x022d08b1, // c0x005e (n0x08b1-n0x08b4) + + 0x022e48b4, // c0x005f (n0x08b4-n0x08b9) + + 0x022fc8b9, // c0x0060 (n0x08b9-n0x08bf) + + 0x023108bf, // c0x0061 (n0x08bf-n0x08c4) + + 0x023288c4, // c0x0062 (n0x08c4-n0x08ca) + + 0x023408ca, // c0x0063 (n0x08ca-n0x08d0) + + 0x023588d0, // c0x0064 (n0x08d0-n0x08d6) + + 0x023748d6, // c0x0065 (n0x08d6-n0x08dd) + + 0x023808dd, // c0x0066 (n0x08dd-n0x08e0) + + 0x023e08e0, // c0x0067 (n0x08e0-n0x08f8) + + 0x023f88f8, // c0x0068 (n0x08f8-n0x08fe) + + 0x0240c8fe, // c0x0069 (n0x08fe-n0x0903) + + 0x02450903, // c0x006a (n0x0903-n0x0914) + + 0x024d0914, // c0x006b (n0x0914-n0x0934) + + 0x024fc934, // c0x006c (n0x0934-n0x093f) + + 0x0250093f, // c0x006d (n0x093f-n0x0940) + + 0x02508940, // c0x006e (n0x0940-n0x0942) + + 0x6250c942, // c0x006f (n0x0942-n0x0943)* o + 0x22510943, // c0x0070 (n0x0943-n0x0944) o + 0x0252c944, // c0x0071 (n0x0944-n0x094b) + + 0x0253494b, // c0x0072 (n0x094b-n0x094d) + + 0x0256894d, // c0x0073 (n0x094d-n0x095a) + + 0x0259095a, // c0x0074 (n0x095a-n0x0964) + + 0x02594964, // c0x0075 (n0x0964-n0x0965) + + 0x025a0965, // c0x0076 (n0x0965-n0x0968) + + 0x025b8968, // c0x0077 (n0x0968-n0x096e) + + 0x025dc96e, // c0x0078 (n0x096e-n0x0977) + + 0x025fc977, // c0x0079 (n0x0977-n0x097f) + + 0x02bc097f, // c0x007a (n0x097f-n0x0af0) + + 0x02bccaf0, // c0x007b (n0x0af0-n0x0af3) + + 0x02becaf3, // c0x007c (n0x0af3-n0x0afb) + + 0x02da8afb, // c0x007d (n0x0afb-n0x0b6a) + + 0x02e78b6a, // c0x007e (n0x0b6a-n0x0b9e) + + 0x02ee8b9e, // c0x007f (n0x0b9e-n0x0bba) + + 0x02f40bba, // c0x0080 (n0x0bba-n0x0bd0) + + 0x03028bd0, // c0x0081 (n0x0bd0-n0x0c0a) + + 0x03080c0a, // c0x0082 (n0x0c0a-n0x0c20) + + 0x030bcc20, // c0x0083 (n0x0c20-n0x0c2f) + + 0x031b8c2f, // c0x0084 (n0x0c2f-n0x0c6e) + + 0x03284c6e, // c0x0085 (n0x0c6e-n0x0ca1) + + 0x0331cca1, // c0x0086 (n0x0ca1-n0x0cc7) + + 0x033accc7, // c0x0087 (n0x0cc7-n0x0ceb) + + 0x03410ceb, // c0x0088 (n0x0ceb-n0x0d04) + + 0x03648d04, // c0x0089 (n0x0d04-n0x0d92) + + 0x03700d92, // c0x008a (n0x0d92-n0x0dc0) + + 0x037ccdc0, // c0x008b (n0x0dc0-n0x0df3) + + 0x03818df3, // c0x008c (n0x0df3-n0x0e06) + + 0x038a0e06, // c0x008d (n0x0e06-n0x0e28) + + 0x038dce28, // c0x008e (n0x0e28-n0x0e37) + + 0x0392ce37, // c0x008f (n0x0e37-n0x0e4b) + + 0x039a4e4b, // c0x0090 (n0x0e4b-n0x0e69) + + 0x639a8e69, // c0x0091 (n0x0e69-n0x0e6a)* o + 0x639ace6a, // c0x0092 (n0x0e6a-n0x0e6b)* o + 0x639b0e6b, // c0x0093 (n0x0e6b-n0x0e6c)* o + 0x03a2ce6c, // c0x0094 (n0x0e6c-n0x0e8b) + + 0x03a94e8b, // c0x0095 (n0x0e8b-n0x0ea5) + + 0x03b10ea5, // c0x0096 (n0x0ea5-n0x0ec4) + + 0x03b88ec4, // c0x0097 (n0x0ec4-n0x0ee2) + + 0x03c0cee2, // c0x0098 (n0x0ee2-n0x0f03) + + 0x03c78f03, // c0x0099 (n0x0f03-n0x0f1e) + + 0x03da4f1e, // c0x009a (n0x0f1e-n0x0f69) + + 0x03dfcf69, // c0x009b (n0x0f69-n0x0f7f) + + 0x63e00f7f, // c0x009c (n0x0f7f-n0x0f80)* o + 0x03e98f80, // c0x009d (n0x0f80-n0x0fa6) + + 0x03f20fa6, // c0x009e (n0x0fa6-n0x0fc8) + + 0x03f6cfc8, // c0x009f (n0x0fc8-n0x0fdb) + + 0x03fd4fdb, // c0x00a0 (n0x0fdb-n0x0ff5) + + 0x0407cff5, // c0x00a1 (n0x0ff5-n0x101f) + + 0x0414501f, // c0x00a2 (n0x101f-n0x1051) + + 0x041ad051, // c0x00a3 (n0x1051-n0x106b) + + 0x042c106b, // c0x00a4 (n0x106b-n0x10b0) + + 0x642c50b0, // c0x00a5 (n0x10b0-n0x10b1)* o + 0x642c90b1, // c0x00a6 (n0x10b1-n0x10b2)* o + 0x043250b2, // c0x00a7 (n0x10b2-n0x10c9) + + 0x043810c9, // c0x00a8 (n0x10c9-n0x10e0) + + 0x044110e0, // c0x00a9 (n0x10e0-n0x1104) + + 0x0448d104, // c0x00aa (n0x1104-n0x1123) + + 0x044d1123, // c0x00ab (n0x1123-n0x1134) + + 0x045b5134, // c0x00ac (n0x1134-n0x116d) + + 0x045e916d, // c0x00ad (n0x116d-n0x117a) + + 0x0464917a, // c0x00ae (n0x117a-n0x1192) + + 0x046bd192, // c0x00af (n0x1192-n0x11af) + + 0x047451af, // c0x00b0 (n0x11af-n0x11d1) + + 0x047851d1, // c0x00b1 (n0x11d1-n0x11e1) + + 0x047f51e1, // c0x00b2 (n0x11e1-n0x11fd) + + 0x647f91fd, // c0x00b3 (n0x11fd-n0x11fe)* o + 0x647fd1fe, // c0x00b4 (n0x11fe-n0x11ff)* o + 0x248011ff, // c0x00b5 (n0x11ff-n0x1200) o + 0x04819200, // c0x00b6 (n0x1200-n0x1206) + + 0x04835206, // c0x00b7 (n0x1206-n0x120d) + + 0x0487920d, // c0x00b8 (n0x120d-n0x121e) + + 0x0488921e, // c0x00b9 (n0x121e-n0x1222) + + 0x048a1222, // c0x00ba (n0x1222-n0x1228) + + 0x04919228, // c0x00bb (n0x1228-n0x1246) + + 0x0492d246, // c0x00bc (n0x1246-n0x124b) + + 0x0494524b, // c0x00bd (n0x124b-n0x1251) + + 0x04969251, // c0x00be (n0x1251-n0x125a) + + 0x0497d25a, // c0x00bf (n0x125a-n0x125f) + + 0x0499525f, // c0x00c0 (n0x125f-n0x1265) + + 0x04999265, // c0x00c1 (n0x1265-n0x1266) + + 0x049d5266, // c0x00c2 (n0x1266-n0x1275) + + 0x049e9275, // c0x00c3 (n0x1275-n0x127a) + + 0x049f127a, // c0x00c4 (n0x127a-n0x127c) + + 0x049f927c, // c0x00c5 (n0x127c-n0x127e) + + 0x049fd27e, // c0x00c6 (n0x127e-n0x127f) + + 0x04a2127f, // c0x00c7 (n0x127f-n0x1288) + + 0x04a45288, // c0x00c8 (n0x1288-n0x1291) + + 0x04a5d291, // c0x00c9 (n0x1291-n0x1297) + + 0x04a65297, // c0x00ca (n0x1297-n0x1299) + + 0x04a69299, // c0x00cb (n0x1299-n0x129a) + + 0x04a8929a, // c0x00cc (n0x129a-n0x12a2) + + 0x04aa92a2, // c0x00cd (n0x12a2-n0x12aa) + + 0x04ac92aa, // c0x00ce (n0x12aa-n0x12b2) + + 0x04ae52b2, // c0x00cf (n0x12b2-n0x12b9) + + 0x04af52b9, // c0x00d0 (n0x12b9-n0x12bd) + + 0x04b092bd, // c0x00d1 (n0x12bd-n0x12c2) + + 0x04b112c2, // c0x00d2 (n0x12c2-n0x12c4) + + 0x04b252c4, // c0x00d3 (n0x12c4-n0x12c9) + + 0x04b352c9, // c0x00d4 (n0x12c9-n0x12cd) + + 0x04b392cd, // c0x00d5 (n0x12cd-n0x12ce) + + 0x04b552ce, // c0x00d6 (n0x12ce-n0x12d5) + + 0x053e52d5, // c0x00d7 (n0x12d5-n0x14f9) + + 0x0541d4f9, // c0x00d8 (n0x14f9-n0x1507) + + 0x05449507, // c0x00d9 (n0x1507-n0x1512) + + 0x05461512, // c0x00da (n0x1512-n0x1518) + + 0x05481518, // c0x00db (n0x1518-n0x1520) + + 0x65485520, // c0x00dc (n0x1520-n0x1521)* o + 0x054c9521, // c0x00dd (n0x1521-n0x1532) + + 0x054d1532, // c0x00de (n0x1532-n0x1534) + + 0x254d5534, // c0x00df (n0x1534-n0x1535) o + 0x254d9535, // c0x00e0 (n0x1535-n0x1536) o + 0x054dd536, // c0x00e1 (n0x1536-n0x1537) + + 0x055a1537, // c0x00e2 (n0x1537-n0x1568) + + 0x255a5568, // c0x00e3 (n0x1568-n0x1569) o + 0x255ad569, // c0x00e4 (n0x1569-n0x156b) o + 0x255b556b, // c0x00e5 (n0x156b-n0x156d) o + 0x255c156d, // c0x00e6 (n0x156d-n0x1570) o + 0x055e9570, // c0x00e7 (n0x1570-n0x157a) + + 0x0560d57a, // c0x00e8 (n0x157a-n0x1583) + + 0x05611583, // c0x00e9 (n0x1583-n0x1584) + + 0x0561d584, // c0x00ea (n0x1584-n0x1587) + + 0x06175587, // c0x00eb (n0x1587-n0x185d) + + 0x0617985d, // c0x00ec (n0x185d-n0x185e) + + 0x0617d85e, // c0x00ed (n0x185e-n0x185f) + + 0x2618185f, // c0x00ee (n0x185f-n0x1860) o + 0x06185860, // c0x00ef (n0x1860-n0x1861) + + 0x26189861, // c0x00f0 (n0x1861-n0x1862) o + 0x0618d862, // c0x00f1 (n0x1862-n0x1863) + + 0x26199863, // c0x00f2 (n0x1863-n0x1866) o + 0x0619d866, // c0x00f3 (n0x1866-n0x1867) + + 0x061a1867, // c0x00f4 (n0x1867-n0x1868) + + 0x261a5868, // c0x00f5 (n0x1868-n0x1869) o + 0x061a9869, // c0x00f6 (n0x1869-n0x186a) + + 0x261b186a, // c0x00f7 (n0x186a-n0x186c) o + 0x061b586c, // c0x00f8 (n0x186c-n0x186d) + + 0x061b986d, // c0x00f9 (n0x186d-n0x186e) + + 0x261c986e, // c0x00fa (n0x186e-n0x1872) o + 0x061cd872, // c0x00fb (n0x1872-n0x1873) + + 0x061d1873, // c0x00fc (n0x1873-n0x1874) + + 0x061d5874, // c0x00fd (n0x1874-n0x1875) + + 0x061d9875, // c0x00fe (n0x1875-n0x1876) + + 0x261dd876, // c0x00ff (n0x1876-n0x1877) o + 0x061e1877, // c0x0100 (n0x1877-n0x1878) + + 0x061e5878, // c0x0101 (n0x1878-n0x1879) + + 0x061e9879, // c0x0102 (n0x1879-n0x187a) + + 0x061ed87a, // c0x0103 (n0x187a-n0x187b) + + 0x261f587b, // c0x0104 (n0x187b-n0x187d) o + 0x061f987d, // c0x0105 (n0x187d-n0x187e) + + 0x061fd87e, // c0x0106 (n0x187e-n0x187f) + + 0x0620187f, // c0x0107 (n0x187f-n0x1880) + + 0x26205880, // c0x0108 (n0x1880-n0x1881) o + 0x06209881, // c0x0109 (n0x1881-n0x1882) + + 0x26211882, // c0x010a (n0x1882-n0x1884) o + 0x26215884, // c0x010b (n0x1884-n0x1885) o + 0x06231885, // c0x010c (n0x1885-n0x188c) + + 0x0623d88c, // c0x010d (n0x188c-n0x188f) + + 0x0627d88f, // c0x010e (n0x188f-n0x189f) + + 0x0628189f, // c0x010f (n0x189f-n0x18a0) + + 0x062a58a0, // c0x0110 (n0x18a0-n0x18a9) + + 0x0638d8a9, // c0x0111 (n0x18a9-n0x18e3) + + 0x263958e3, // c0x0112 (n0x18e3-n0x18e5) o + 0x263998e5, // c0x0113 (n0x18e5-n0x18e6) o + 0x2639d8e6, // c0x0114 (n0x18e6-n0x18e7) o + 0x063a58e7, // c0x0115 (n0x18e7-n0x18e9) + + 0x064818e9, // c0x0116 (n0x18e9-n0x1920) + + 0x064ad920, // c0x0117 (n0x1920-n0x192b) + + 0x064cd92b, // c0x0118 (n0x192b-n0x1933) + + 0x064d9933, // c0x0119 (n0x1933-n0x1936) + + 0x064f9936, // c0x011a (n0x1936-n0x193e) + + 0x0653193e, // c0x011b (n0x193e-n0x194c) + + 0x067c594c, // c0x011c (n0x194c-n0x19f1) + + 0x068819f1, // c0x011d (n0x19f1-n0x1a20) + + 0x06895a20, // c0x011e (n0x1a20-n0x1a25) + + 0x068c9a25, // c0x011f (n0x1a25-n0x1a32) + + 0x068e5a32, // c0x0120 (n0x1a32-n0x1a39) + + 0x06901a39, // c0x0121 (n0x1a39-n0x1a40) + + 0x06925a40, // c0x0122 (n0x1a40-n0x1a49) + + 0x0693da49, // c0x0123 (n0x1a49-n0x1a4f) + + 0x06959a4f, // c0x0124 (n0x1a4f-n0x1a56) + + 0x0697da56, // c0x0125 (n0x1a56-n0x1a5f) + + 0x0698da5f, // c0x0126 (n0x1a5f-n0x1a63) + + 0x069bda63, // c0x0127 (n0x1a63-n0x1a6f) + + 0x069d9a6f, // c0x0128 (n0x1a6f-n0x1a76) + + 0x06be9a76, // c0x0129 (n0x1a76-n0x1afa) + + 0x06c0dafa, // c0x012a (n0x1afa-n0x1b03) + + 0x06c2db03, // c0x012b (n0x1b03-n0x1b0b) + + 0x06c41b0b, // c0x012c (n0x1b0b-n0x1b10) + + 0x06c55b10, // c0x012d (n0x1b10-n0x1b15) + + 0x06c75b15, // c0x012e (n0x1b15-n0x1b1d) + + 0x06d19b1d, // c0x012f (n0x1b1d-n0x1b46) + + 0x06d35b46, // c0x0130 (n0x1b46-n0x1b4d) + + 0x06d4db4d, // c0x0131 (n0x1b4d-n0x1b53) + + 0x06d51b53, // c0x0132 (n0x1b53-n0x1b54) + + 0x06d55b54, // c0x0133 (n0x1b54-n0x1b55) + + 0x06d69b55, // c0x0134 (n0x1b55-n0x1b5a) + + 0x06d89b5a, // c0x0135 (n0x1b5a-n0x1b62) + + 0x06d95b62, // c0x0136 (n0x1b62-n0x1b65) + + 0x06dc5b65, // c0x0137 (n0x1b65-n0x1b71) + + 0x06e45b71, // c0x0138 (n0x1b71-n0x1b91) + + 0x06e59b91, // c0x0139 (n0x1b91-n0x1b96) + + 0x06e5db96, // c0x013a (n0x1b96-n0x1b97) + + 0x06e75b97, // c0x013b (n0x1b97-n0x1b9d) + + 0x06e81b9d, // c0x013c (n0x1b9d-n0x1ba0) + + 0x06e85ba0, // c0x013d (n0x1ba0-n0x1ba1) + + 0x06ea1ba1, // c0x013e (n0x1ba1-n0x1ba8) + + 0x06eddba8, // c0x013f (n0x1ba8-n0x1bb7) + + 0x06ee1bb7, // c0x0140 (n0x1bb7-n0x1bb8) + + 0x06f01bb8, // c0x0141 (n0x1bb8-n0x1bc0) + + 0x06f51bc0, // c0x0142 (n0x1bc0-n0x1bd4) + + 0x06f69bd4, // c0x0143 (n0x1bd4-n0x1bda) + + 0x06fbdbda, // c0x0144 (n0x1bda-n0x1bef) + + 0x06fc1bef, // c0x0145 (n0x1bef-n0x1bf0) + + 0x06fc5bf0, // c0x0146 (n0x1bf0-n0x1bf1) + + 0x07009bf1, // c0x0147 (n0x1bf1-n0x1c02) + + 0x07019c02, // c0x0148 (n0x1c02-n0x1c06) + + 0x07051c06, // c0x0149 (n0x1c06-n0x1c14) + + 0x07081c14, // c0x014a (n0x1c14-n0x1c20) + + 0x071b9c20, // c0x014b (n0x1c20-n0x1c6e) + + 0x071ddc6e, // c0x014c (n0x1c6e-n0x1c77) + + 0x07209c77, // c0x014d (n0x1c77-n0x1c82) + + 0x0720dc82, // c0x014e (n0x1c82-n0x1c83) + + 0x07211c83, // c0x014f (n0x1c83-n0x1c84) + + 0x0730dc84, // c0x0150 (n0x1c84-n0x1cc3) + + 0x07319cc3, // c0x0151 (n0x1cc3-n0x1cc6) + + 0x07325cc6, // c0x0152 (n0x1cc6-n0x1cc9) + + 0x07331cc9, // c0x0153 (n0x1cc9-n0x1ccc) + + 0x0733dccc, // c0x0154 (n0x1ccc-n0x1ccf) + + 0x07349ccf, // c0x0155 (n0x1ccf-n0x1cd2) + + 0x07355cd2, // c0x0156 (n0x1cd2-n0x1cd5) + + 0x07361cd5, // c0x0157 (n0x1cd5-n0x1cd8) + + 0x0736dcd8, // c0x0158 (n0x1cd8-n0x1cdb) + + 0x07379cdb, // c0x0159 (n0x1cdb-n0x1cde) + + 0x07385cde, // c0x015a (n0x1cde-n0x1ce1) + + 0x07391ce1, // c0x015b (n0x1ce1-n0x1ce4) + + 0x0739dce4, // c0x015c (n0x1ce4-n0x1ce7) + + 0x073a9ce7, // c0x015d (n0x1ce7-n0x1cea) + + 0x073b1cea, // c0x015e (n0x1cea-n0x1cec) + + 0x073bdcec, // c0x015f (n0x1cec-n0x1cef) + + 0x073c9cef, // c0x0160 (n0x1cef-n0x1cf2) + + 0x073d5cf2, // c0x0161 (n0x1cf2-n0x1cf5) + + 0x073e1cf5, // c0x0162 (n0x1cf5-n0x1cf8) + + 0x073edcf8, // c0x0163 (n0x1cf8-n0x1cfb) + + 0x073f9cfb, // c0x0164 (n0x1cfb-n0x1cfe) + + 0x07405cfe, // c0x0165 (n0x1cfe-n0x1d01) + + 0x07411d01, // c0x0166 (n0x1d01-n0x1d04) + + 0x0741dd04, // c0x0167 (n0x1d04-n0x1d07) + + 0x07429d07, // c0x0168 (n0x1d07-n0x1d0a) + + 0x07435d0a, // c0x0169 (n0x1d0a-n0x1d0d) + + 0x07441d0d, // c0x016a (n0x1d0d-n0x1d10) + + 0x0744dd10, // c0x016b (n0x1d10-n0x1d13) + + 0x07459d13, // c0x016c (n0x1d13-n0x1d16) + + 0x07465d16, // c0x016d (n0x1d16-n0x1d19) + + 0x07471d19, // c0x016e (n0x1d19-n0x1d1c) + + 0x0747dd1c, // c0x016f (n0x1d1c-n0x1d1f) + + 0x07485d1f, // c0x0170 (n0x1d1f-n0x1d21) + + 0x07491d21, // c0x0171 (n0x1d21-n0x1d24) + + 0x0749dd24, // c0x0172 (n0x1d24-n0x1d27) + + 0x074a9d27, // c0x0173 (n0x1d27-n0x1d2a) + + 0x074b5d2a, // c0x0174 (n0x1d2a-n0x1d2d) + + 0x074c1d2d, // c0x0175 (n0x1d2d-n0x1d30) + + 0x074cdd30, // c0x0176 (n0x1d30-n0x1d33) + + 0x074d9d33, // c0x0177 (n0x1d33-n0x1d36) + + 0x074e5d36, // c0x0178 (n0x1d36-n0x1d39) + + 0x074f1d39, // c0x0179 (n0x1d39-n0x1d3c) + + 0x074fdd3c, // c0x017a (n0x1d3c-n0x1d3f) + + 0x07509d3f, // c0x017b (n0x1d3f-n0x1d42) + + 0x07515d42, // c0x017c (n0x1d42-n0x1d45) + + 0x07521d45, // c0x017d (n0x1d45-n0x1d48) + + 0x07529d48, // c0x017e (n0x1d48-n0x1d4a) + + 0x07535d4a, // c0x017f (n0x1d4a-n0x1d4d) + + 0x07541d4d, // c0x0180 (n0x1d4d-n0x1d50) + + 0x0754dd50, // c0x0181 (n0x1d50-n0x1d53) + + 0x07559d53, // c0x0182 (n0x1d53-n0x1d56) + + 0x07565d56, // c0x0183 (n0x1d56-n0x1d59) + + 0x07571d59, // c0x0184 (n0x1d59-n0x1d5c) + + 0x0757dd5c, // c0x0185 (n0x1d5c-n0x1d5f) + + 0x07589d5f, // c0x0186 (n0x1d5f-n0x1d62) + + 0x0758dd62, // c0x0187 (n0x1d62-n0x1d63) + + 0x07599d63, // c0x0188 (n0x1d63-n0x1d66) + + 0x075b1d66, // c0x0189 (n0x1d66-n0x1d6c) + + 0x075b5d6c, // c0x018a (n0x1d6c-n0x1d6d) + + 0x075c5d6d, // c0x018b (n0x1d6d-n0x1d71) + + 0x075ddd71, // c0x018c (n0x1d71-n0x1d77) + + 0x07621d77, // c0x018d (n0x1d77-n0x1d88) + + 0x07635d88, // c0x018e (n0x1d88-n0x1d8d) + + 0x07669d8d, // c0x018f (n0x1d8d-n0x1d9a) + + 0x07679d9a, // c0x0190 (n0x1d9a-n0x1d9e) + + 0x07695d9e, // c0x0191 (n0x1d9e-n0x1da5) + + 0x076adda5, // c0x0192 (n0x1da5-n0x1dab) + + 0x276f1dab, // c0x0193 (n0x1dab-n0x1dbc) o + 0x076f5dbc, // c0x0194 (n0x1dbc-n0x1dbd) + } -// max children 374 (capacity 511) -// max text offset 24971 (capacity 32767) +// max children 404 (capacity 511) +// max text offset 26074 (capacity 32767) // max text length 36 (capacity 63) -// max hi 7106 (capacity 16383) -// max lo 7100 (capacity 16383) +// max hi 7613 (capacity 16383) +// max lo 7612 (capacity 16383) === modified file 'src/golang.org/x/net/publicsuffix/table_test.go' --- src/golang.org/x/net/publicsuffix/table_test.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/publicsuffix/table_test.go 2016-03-22 15:18:22 +0000 @@ -552,7 +552,19 @@ "org.cw", "cx", "gov.cx", - "*.cy", + "ac.cy", + "biz.cy", + "com.cy", + "ekloges.cy", + "gov.cy", + "ltd.cy", + "name.cy", + "net.cy", + "org.cy", + "parliament.cy", + "press.cy", + "pro.cy", + "tm.cy", "cz", "de", "dj", @@ -633,6 +645,7 @@ "biz.et", "name.et", "info.et", + "net.et", "eu", "fi", "aland.fi", @@ -694,6 +707,11 @@ "edu.gi", "org.gi", "gl", + "co.gl", + "com.gl", + "edu.gl", + "net.gl", + "org.gl", "gm", "gn", "ac.gn", @@ -3192,6 +3210,7 @@ "assn.lk", "grp.lk", "hotel.lk", + "ac.lk", "lr", "com.lr", "edu.lr", @@ -4790,9 +4809,6 @@ "com.pl", "net.pl", "org.pl", - "info.pl", - "waw.pl", - "gov.pl", "aid.pl", "agro.pl", "atm.pl", @@ -4801,6 +4817,7 @@ "edu.pl", "gmina.pl", "gsm.pl", + "info.pl", "mail.pl", "miasta.pl", "media.pl", @@ -4822,15 +4839,54 @@ "tourism.pl", "travel.pl", "turystyka.pl", - "uw.gov.pl", + "gov.pl", + "ap.gov.pl", + "ic.gov.pl", + "is.gov.pl", + "us.gov.pl", + "kmpsp.gov.pl", + "kppsp.gov.pl", + "kwpsp.gov.pl", + "psp.gov.pl", + "wskr.gov.pl", + "kwp.gov.pl", + "mw.gov.pl", + "ug.gov.pl", "um.gov.pl", - "ug.gov.pl", + "umig.gov.pl", + "ugim.gov.pl", "upow.gov.pl", + "uw.gov.pl", "starostwo.gov.pl", + "pa.gov.pl", + "po.gov.pl", + "psse.gov.pl", + "pup.gov.pl", + "rzgw.gov.pl", + "sa.gov.pl", "so.gov.pl", "sr.gov.pl", - "po.gov.pl", - "pa.gov.pl", + "wsa.gov.pl", + "sko.gov.pl", + "uzs.gov.pl", + "wiih.gov.pl", + "winb.gov.pl", + "pinb.gov.pl", + "wios.gov.pl", + "witd.gov.pl", + "wzmiuw.gov.pl", + "piw.gov.pl", + "wiw.gov.pl", + "griw.gov.pl", + "wif.gov.pl", + "oum.gov.pl", + "sdn.gov.pl", + "zp.gov.pl", + "uppo.gov.pl", + "mup.gov.pl", + "wuoz.gov.pl", + "konsulat.gov.pl", + "oirm.gov.pl", "augustow.pl", "babia-gora.pl", "bedzin.pl", @@ -4937,6 +4993,7 @@ "walbrzych.pl", "warmia.pl", "warszawa.pl", + "waw.pl", "wegrow.pl", "wielun.pl", "wlocl.pl", @@ -5305,6 +5362,38 @@ "saotome.st", "store.st", "su", + "adygeya.su", + "arkhangelsk.su", + "balashov.su", + "bashkiria.su", + "bryansk.su", + "dagestan.su", + "grozny.su", + "ivanovo.su", + "kalmykia.su", + "kaluga.su", + "karelia.su", + "khakassia.su", + "krasnodar.su", + "kurgan.su", + "lenug.su", + "mordovia.su", + "msk.su", + "murmansk.su", + "nalchik.su", + "nov.su", + "obninsk.su", + "penza.su", + "pokrovsk.su", + "sochi.su", + "spb.su", + "togliatti.su", + "troitsk.su", + "tula.su", + "tuva.su", + "vladikavkaz.su", + "vladimir.su", + "vologda.su", "sv", "com.sv", "edu.sv", @@ -5860,12 +5949,15 @@ "edu.ws", "yt", "xn--mgbaam7a8h", + "xn--y9a3aq", "xn--54b7fta0cc", + "xn--90ais", "xn--fiqs8s", "xn--fiqz9s", "xn--lgbbat1ad8j", "xn--wgbh1c", "xn--node", + "xn--qxam", "xn--j6w193g", "xn--h2brj9c", "xn--mgbbh1a71e", @@ -5876,15 +5968,21 @@ "xn--xkc2dl3a5ee0h", "xn--mgba3a4f16a", "xn--mgba3a4fra", + "xn--mgbtx2b", "xn--mgbayh7gpa", "xn--3e0b707e", "xn--80ao21a", "xn--fzc2c9e2c", "xn--xkc2al3hye2a", "xn--mgbc0a9azcg", + "xn--d1alf", "xn--l1acc", + "xn--mix891f", + "xn--mix082f", "xn--mgbx4cd0ab", "xn--mgb9awbf", + "xn--mgbai9azgqp6j", + "xn--mgbai9a5eva00b", "xn--ygbi2ammx", "xn--90a3ac", "xn--o1ac.xn--90a3ac", @@ -5899,10 +5997,11 @@ "xn--mgberp4a5d4a87g", "xn--mgbqly7c0a67fbc", "xn--mgbqly7cvafr", + "xn--mgbpl2fh", + "xn--yfro4i67o", + "xn--clchc0ea0b2g2a9gcd", "xn--ogbpf8fl", "xn--mgbtf8fl", - "xn--yfro4i67o", - "xn--clchc0ea0b2g2a9gcd", "xn--o3cw4h", "xn--pgbs0dh", "xn--kpry57d", @@ -5912,32 +6011,64 @@ "xn--mgb2ddes", "xxx", "*.ye", - "*.za", + "ac.za", + "agrica.za", + "alt.za", + "co.za", + "edu.za", + "gov.za", + "grondar.za", + "law.za", + "mil.za", + "net.za", + "ngo.za", + "nis.za", + "nom.za", + "org.za", + "school.za", + "tm.za", + "web.za", "*.zm", "*.zw", + "aaa", + "aarp", "abb", "abbott", + "able", "abogado", "academy", "accenture", "accountant", "accountants", + "aco", "active", "actor", "ads", "adult", + "aeg", + "aetna", "afl", "africa", + "africamagic", + "agakhan", "agency", "aig", "airforce", "airtel", + "akdn", + "alibaba", + "alipay", "allfinanz", + "ally", "alsace", + "amica", "amsterdam", "analytics", "android", + "anquan", "apartments", + "app", + "apple", "aquarelle", "aramco", "archi", @@ -5946,28 +6077,38 @@ "associates", "attorney", "auction", + "audi", + "audible", "audio", "author", "auto", "autos", + "avianca", + "aws", "axa", "azure", + "baby", + "baidu", "band", "bank", "bar", "barcelona", "barclaycard", "barclays", + "barefoot", "bargains", "bauhaus", "bayern", "bbc", "bbva", + "bcg", "bcn", + "beats", "beer", "bentley", "berlin", "best", + "bet", "bharti", "bible", "bid", @@ -5977,6 +6118,7 @@ "bio", "black", "blackfriday", + "blog", "bloomberg", "blue", "bms", @@ -5987,11 +6129,16 @@ "bom", "bond", "boo", + "boots", + "bosch", + "bostik", "bot", "boutique", "bradesco", "bridgestone", + "broadway", "broker", + "brother", "brussels", "budapest", "build", @@ -6001,6 +6148,7 @@ "buzz", "bzh", "cab", + "cafe", "cal", "call", "camera", @@ -6009,6 +6157,7 @@ "canon", "capetown", "capital", + "car", "caravan", "cards", "care", @@ -6022,19 +6171,26 @@ "catering", "cba", "cbn", + "cbre", + "ceb", "center", "ceo", "cern", "cfa", "cfd", + "chanel", "channel", + "chase", "chat", "cheap", + "chintai", "chloe", "christmas", "chrome", "church", + "cipriani", "circle", + "cisco", "citic", "city", "cityeats", @@ -6043,7 +6199,9 @@ "click", "clinic", "clothing", + "cloud", "club", + "clubmed", "coach", "codes", "coffee", @@ -6053,17 +6211,23 @@ "community", "company", "computer", + "comsec", "condos", "construction", "consulting", + "contact", "contractors", "cooking", + "cookingchannel", "cool", "corsica", "country", + "coupon", + "coupons", "courses", "credit", "creditcard", + "creditunion", "cricket", "crown", "crs", @@ -6071,6 +6235,7 @@ "csc", "cuisinella", "cymru", + "cyou", "dabur", "dad", "dance", @@ -6079,10 +6244,14 @@ "datsun", "day", "dclk", + "dds", + "deal", + "dealer", "deals", "degree", "delivery", "dell", + "delta", "democrat", "dental", "dentist", @@ -6101,7 +6270,14 @@ "doha", "domains", "doosan", + "dot", "download", + "drive", + "dstv", + "dtv", + "dubai", + "dunlop", + "dupont", "durban", "dvag", "earth", @@ -6126,10 +6302,13 @@ "exchange", "expert", "exposed", + "express", + "extraspace", "fage", "fail", "fairwinds", "faith", + "family", "fan", "fans", "farm", @@ -6137,38 +6316,51 @@ "fast", "feedback", "ferrero", + "film", "final", "finance", "financial", + "fire", "firestone", "firmdale", "fish", "fishing", "fit", "fitness", + "flickr", "flights", "florist", "flowers", "flsmidth", "fly", "foo", + "foodnetwork", "football", "ford", "forex", "forsale", + "forum", "foundation", "frl", "frogans", + "frontdoor", + "frontier", "fund", "furniture", "futbol", + "fyi", "gal", "gallery", + "gallo", + "gallup", + "game", + "games", "garden", "gbiz", "gdn", "gea", "gent", + "genting", "ggee", "gift", "gifts", @@ -6181,13 +6373,17 @@ "gmail", "gmo", "gmx", + "gold", "goldpoint", "golf", "goo", + "goodyear", "goog", "google", "gop", "got", + "gotv", + "grainger", "graphics", "gratis", "green", @@ -6201,28 +6397,40 @@ "hamburg", "hangout", "haus", + "hdfcbank", + "health", "healthcare", "help", + "helsinki", "here", "hermes", + "hgtv", "hiphop", "hitachi", "hiv", + "hkt", + "hockey", "holdings", "holiday", + "homedepot", "homes", "honda", "horse", "host", "hosting", + "hoteles", "hotmail", "house", "how", "hsbc", + "htc", "ibm", + "icbc", "ice", + "icu", "ifm", "iinet", + "imdb", "immo", "immobilien", "industries", @@ -6230,11 +6438,13 @@ "ing", "ink", "institute", + "insurance", "insure", "international", "investments", "ipiranga", "irish", + "iselect", "ist", "istanbul", "itau", @@ -6242,38 +6452,60 @@ "jaguar", "java", "jcb", + "jcp", "jetzt", + "jewelry", + "jio", "jlc", + "jll", + "jmp", + "jnj", "joburg", "jot", "joy", + "jpmorgan", "jprs", "juegos", "kaufen", "kddi", + "kerryhotels", + "kerrylogistics", + "kerryproperties", "kfh", "kim", "kinder", + "kindle", "kitchen", "kiwi", "koeln", + "komatsu", + "kpmg", + "kpn", "krd", "kred", + "kuokgroup", + "kyknet", "kyoto", "lacaixa", + "lamborghini", + "lancaster", "land", "landrover", + "lasalle", "lat", "latrobe", + "law", "lawyer", "lds", "lease", "leclerc", "legal", + "lexus", "lgbt", "liaison", "lidl", "life", + "lifeinsurance", "lifestyle", "lighting", "like", @@ -6282,12 +6514,18 @@ "lincoln", "linde", "link", + "lipsy", "live", + "lixil", "loan", "loans", + "locker", + "locus", + "lol", "london", "lotte", "lotto", + "love", "ltd", "ltda", "lupin", @@ -6296,6 +6534,7 @@ "madrid", "maif", "maison", + "makeup", "man", "management", "mango", @@ -6303,70 +6542,111 @@ "marketing", "markets", "marriott", + "mba", "media", "meet", "melbourne", "meme", "memorial", + "men", "menu", "meo", + "metlife", "miami", "microsoft", "mini", + "mit", + "mlb", + "mls", "mma", + "mnet", "mobily", "moda", "moe", "moi", + "mom", "monash", "money", "montblanc", "mormon", "mortgage", "moscow", + "moto", "motorcycles", "mov", + "movie", "movistar", "mtn", "mtpc", + "mtr", + "multichoice", + "mutual", + "mutuelle", + "mzansimagic", "nadex", "nagoya", + "naspers", + "natura", "navy", + "nec", "netbank", + "netflix", "network", "neustar", "new", "news", + "next", + "nextdirect", "nexus", "ngo", "nhk", "nico", + "nikon", "ninja", "nissan", + "nokia", + "northwesternmutual", "norton", + "now", "nowruz", + "nowtv", "nra", "nrw", "ntt", "nyc", "obi", + "observer", + "office", "okinawa", + "olayan", + "olayangroup", + "ollo", + "omega", "one", "ong", "onl", + "online", "ooo", "oracle", + "orange", "organic", + "orientexpress", "osaka", "otsuka", + "ott", "ovh", "page", + "pamperedchef", "panerai", "paris", "pars", "partners", "parts", "party", + "passagens", + "payu", + "pccw", + "pet", "pharmacy", "philips", "photo", @@ -6377,35 +6657,47 @@ "pics", "pictet", "pictures", + "pid", "pin", + "ping", "pink", "pizza", "place", + "play", + "playstation", "plumbing", + "plus", + "pnc", "pohl", "poker", "porn", "praxi", "press", + "prime", "prod", "productions", "prof", "promo", "properties", "property", + "protection", "pub", "qpon", "quebec", + "quest", "racing", "read", "realtor", + "realty", "recipes", "red", "redstone", + "redumbrella", "rehab", "reise", "reisen", "reit", + "reliance", "ren", "rent", "rentals", @@ -6416,8 +6708,11 @@ "restaurant", "review", "reviews", + "rexroth", "rich", + "richardli", "ricoh", + "ril", "rio", "rip", "rocher", @@ -6426,9 +6721,12 @@ "room", "rsvp", "ruhr", + "run", + "rwe", "ryukyu", "saarland", "safe", + "safety", "sakura", "sale", "salon", @@ -6439,7 +6737,10 @@ "sap", "sapo", "sarl", + "sas", + "save", "saxo", + "sbi", "sbs", "sca", "scb", @@ -6452,6 +6753,7 @@ "scor", "scot", "seat", + "security", "seek", "sener", "services", @@ -6459,72 +6761,122 @@ "sex", "sexy", "sharp", + "shaw", "shia", "shiksha", "shoes", + "shouji", + "show", "shriram", + "silk", + "sina", "singles", + "site", + "ski", + "skin", "sky", "skype", "smile", + "sncf", + "soccer", "social", + "softbank", "software", "sohu", "solar", "solutions", + "song", + "sony", "soy", "space", "spiegel", + "spot", "spreadbetting", + "srl", "stada", + "star", + "starhub", + "statebank", "statoil", "stc", "stcgroup", "stockholm", + "storage", + "store", + "studio", "study", "style", + "sucks", + "supersport", "supplies", "supply", "support", "surf", "surgery", "suzuki", + "swatch", "swiss", "sydney", "symantec", "systems", "tab", "taipei", + "talk", + "taobao", + "tatamotors", "tatar", "tattoo", "tax", + "taxi", "tci", + "tdk", + "team", + "tech", "technology", + "telecity", "telefonica", "temasek", "tennis", + "teva", + "thd", + "theater", + "theatre", + "theguardian", + "tickets", "tienda", + "tiffany", "tips", "tires", "tirol", + "tmall", "today", "tokyo", "tools", "top", "toray", "toshiba", + "tours", "town", + "toyota", "toys", "trade", "trading", "training", + "travelchannel", + "travelers", + "travelersinsurance", "trust", + "trv", + "tube", "tui", + "tunes", "tushu", + "tvs", "ubs", "university", "uno", "uol", + "ups", "vacations", "vana", "vegas", @@ -6533,7 +6885,11 @@ "vet", "viajes", "video", + "vig", + "viking", "villas", + "vin", + "vip", "virgin", "vision", "vista", @@ -6541,25 +6897,35 @@ "viva", "vlaanderen", "vodka", + "volkswagen", "vote", "voting", "voto", "voyage", + "vuelos", "wales", "walter", "wang", "wanggou", + "warman", "watch", + "watches", + "weather", + "weatherchannel", "webcam", + "weber", "website", "wed", "wedding", + "weibo", + "weir", "whoswho", "wien", "wiki", "williamhill", "win", "windows", + "wine", "wme", "work", "works", @@ -6568,23 +6934,36 @@ "wtf", "xbox", "xerox", + "xihuan", "xin", + "xn--11b4c3d", + "xn--1ck2e1b", "xn--1qqw23a", "xn--30rr7y", "xn--3bst00m", "xn--3ds443g", + "xn--3oq18vl8pn36a", + "xn--3pxu8k", + "xn--42c2d9a", "xn--45q11c", "xn--4gbrim", "xn--55qw42g", "xn--55qx5d", + "xn--5tzm5g", "xn--6frz82g", "xn--6qq986b3xl", "xn--80adxhks", "xn--80asehdb", "xn--80aswg", + "xn--8y0a063a", + "xn--9dbq2a", "xn--9et52u", + "xn--9krt00a", "xn--b4w605ferd", + "xn--bck1b9a5dre4c", "xn--c1avg", + "xn--c2br7g", + "xn--cck2b3b", "xn--cg4bki", "xn--czr694b", "xn--czrs0t", @@ -6592,20 +6971,32 @@ "xn--d1acj3b", "xn--eckvdtc9d", "xn--efvy88h", + "xn--estv75g", + "xn--fct429k", + "xn--fhbei", "xn--fiq228c5hs", "xn--fiq64b", "xn--fjq720a", "xn--flw351e", + "xn--fzys8d69uvgm", + "xn--g2xx48c", + "xn--gckr3f0f", "xn--hxt814e", "xn--i1b6b1a6a2e", "xn--imr513n", "xn--io0a7i", + "xn--j1aef", + "xn--jlq61u9w7b", + "xn--jvr189m", "xn--kcrx77d1x4a", + "xn--kpu716f", "xn--kput3i", "xn--mgba3a3ejt", + "xn--mgba7c0bbn0a", "xn--mgbab2bd", "xn--mgbb9fbpob", "xn--mgbt3dhd", + "xn--mk1bu44c", "xn--mxtq1m", "xn--ngbc5azd", "xn--ngbe9e0a", @@ -6613,28 +7004,40 @@ "xn--nqv7fs00ema", "xn--nyqy26a", "xn--p1acf", + "xn--pbt977c", + "xn--pssy2u", "xn--q9jyb4c", "xn--qcka1pmc", "xn--rhqv96g", + "xn--rovu88b", "xn--ses554g", + "xn--t60b56a", + "xn--tckwe", "xn--unup4y", "xn--vermgensberater-ctb", "xn--vermgensberatung-pwb", "xn--vhquv", "xn--vuq861b", + "xn--w4r85el8fhu5dnra", "xn--xhq521b", "xn--zfr164b", + "xperia", "xyz", "yachts", + "yahoo", "yamaxun", "yandex", "yodobashi", "yoga", "yokohama", + "you", "youtube", + "yun", + "zappos", "zara", "zero", "zip", + "zippo", "zone", "zuerich", "cloudfront.net", @@ -6657,24 +7060,20 @@ "elasticbeanstalk.com", "elb.amazonaws.com", "s3.amazonaws.com", - "s3-us-west-2.amazonaws.com", - "s3-us-west-1.amazonaws.com", - "s3-eu-west-1.amazonaws.com", + "s3-ap-northeast-1.amazonaws.com", "s3-ap-southeast-1.amazonaws.com", "s3-ap-southeast-2.amazonaws.com", - "s3-ap-northeast-1.amazonaws.com", + "s3-external-1.amazonaws.com", + "s3-external-2.amazonaws.com", + "s3-fips-us-gov-west-1.amazonaws.com", + "s3-eu-central-1.amazonaws.com", + "s3-eu-west-1.amazonaws.com", "s3-sa-east-1.amazonaws.com", "s3-us-gov-west-1.amazonaws.com", - "s3-fips-us-gov-west-1.amazonaws.com", - "s3-website-us-east-1.amazonaws.com", - "s3-website-us-west-2.amazonaws.com", - "s3-website-us-west-1.amazonaws.com", - "s3-website-eu-west-1.amazonaws.com", - "s3-website-ap-southeast-1.amazonaws.com", - "s3-website-ap-southeast-2.amazonaws.com", - "s3-website-ap-northeast-1.amazonaws.com", - "s3-website-sa-east-1.amazonaws.com", - "s3-website-us-gov-west-1.amazonaws.com", + "s3-us-west-1.amazonaws.com", + "s3-us-west-2.amazonaws.com", + "s3.cn-north-1.amazonaws.com.cn", + "s3.eu-central-1.amazonaws.com", "betainabox.com", "ae.org", "ar.com", @@ -6713,10 +7112,17 @@ "cloudcontrolled.com", "cloudcontrolapp.com", "co.ca", + "c.cdn77.org", + "cdn77-ssl.net", + "r.cdn77.net", + "rsc.cdn77.org", + "ssl.origin.cdn77-secure.org", "co.nl", "co.no", + "*.platform.sh", "cupcake.is", "dreamhosters.com", + "duckdns.org", "dyndns-at-home.com", "dyndns-at-work.com", "dyndns-blog.com", @@ -6996,6 +7402,62 @@ "webhop.org", "worse-than.tv", "writesthisblog.com", + "eu.org", + "al.eu.org", + "asso.eu.org", + "at.eu.org", + "au.eu.org", + "be.eu.org", + "bg.eu.org", + "ca.eu.org", + "cd.eu.org", + "ch.eu.org", + "cn.eu.org", + "cy.eu.org", + "cz.eu.org", + "de.eu.org", + "dk.eu.org", + "edu.eu.org", + "ee.eu.org", + "es.eu.org", + "fi.eu.org", + "fr.eu.org", + "gr.eu.org", + "hr.eu.org", + "hu.eu.org", + "ie.eu.org", + "il.eu.org", + "in.eu.org", + "int.eu.org", + "is.eu.org", + "it.eu.org", + "jp.eu.org", + "kr.eu.org", + "lt.eu.org", + "lu.eu.org", + "lv.eu.org", + "mc.eu.org", + "me.eu.org", + "mk.eu.org", + "mt.eu.org", + "my.eu.org", + "net.eu.org", + "ng.eu.org", + "nl.eu.org", + "no.eu.org", + "nz.eu.org", + "paris.eu.org", + "pl.eu.org", + "pt.eu.org", + "q-a.eu.org", + "ro.eu.org", + "ru.eu.org", + "se.eu.org", + "si.eu.org", + "sk.eu.org", + "tr.eu.org", + "uk.eu.org", + "us.eu.org", "a.ssl.fastly.net", "b.ssl.fastly.net", "global.ssl.fastly.net", @@ -7003,26 +7465,43 @@ "global.prod.fastly.net", "firebaseapp.com", "flynnhub.com", + "service.gov.uk", "github.io", "githubusercontent.com", "ro.com", "appspot.com", "blogspot.ae", + "blogspot.al", + "blogspot.am", + "blogspot.ba", "blogspot.be", + "blogspot.bg", "blogspot.bj", "blogspot.ca", "blogspot.cf", "blogspot.ch", + "blogspot.cl", "blogspot.co.at", + "blogspot.co.id", "blogspot.co.il", + "blogspot.co.ke", "blogspot.co.nz", "blogspot.co.uk", + "blogspot.co.za", "blogspot.com", "blogspot.com.ar", "blogspot.com.au", "blogspot.com.br", + "blogspot.com.by", + "blogspot.com.co", + "blogspot.com.cy", + "blogspot.com.ee", + "blogspot.com.eg", "blogspot.com.es", + "blogspot.com.mt", + "blogspot.com.ng", "blogspot.com.tr", + "blogspot.com.uy", "blogspot.cv", "blogspot.cz", "blogspot.de", @@ -7031,25 +7510,40 @@ "blogspot.fr", "blogspot.gr", "blogspot.hk", + "blogspot.hr", "blogspot.hu", "blogspot.ie", "blogspot.in", + "blogspot.is", "blogspot.it", "blogspot.jp", "blogspot.kr", + "blogspot.li", + "blogspot.lt", + "blogspot.lu", + "blogspot.md", + "blogspot.mk", "blogspot.mr", "blogspot.mx", + "blogspot.my", "blogspot.nl", "blogspot.no", + "blogspot.pe", "blogspot.pt", + "blogspot.qa", "blogspot.re", "blogspot.ro", + "blogspot.rs", "blogspot.ru", "blogspot.se", "blogspot.sg", + "blogspot.si", "blogspot.sk", + "blogspot.sn", "blogspot.td", "blogspot.tw", + "blogspot.ug", + "blogspot.vn", "codespot.com", "googleapis.com", "googlecode.com", @@ -7064,6 +7558,7 @@ "azurewebsites.net", "azure-mobile.net", "cloudapp.net", + "4u.com", "nfshost.com", "nyc.mn", "nid.io", @@ -7075,9 +7570,11 @@ "poznan.pl", "wroc.pl", "zakopane.pl", + "priv.at", "rhcloud.com", - "service.gov.uk", - "priv.at", + "sinaapp.com", + "vipsinaapp.com", + "1kapp.com", "gda.pl", "gdansk.pl", "gdynia.pl", @@ -7093,40 +7590,56 @@ } var nodeLabels = [...]string{ + "aaa", + "aarp", "abb", "abbott", + "able", "abogado", "ac", "academy", "accenture", "accountant", "accountants", + "aco", "active", "actor", "ad", "ads", "adult", "ae", + "aeg", "aero", + "aetna", "af", "afl", "africa", + "africamagic", "ag", + "agakhan", "agency", "ai", "aig", "airforce", "airtel", + "akdn", "al", + "alibaba", + "alipay", "allfinanz", + "ally", "alsace", "am", + "amica", "amsterdam", "an", "analytics", "android", + "anquan", "ao", "apartments", + "app", + "apple", "aq", "aquarelle", "ar", @@ -7142,35 +7655,45 @@ "attorney", "au", "auction", + "audi", + "audible", "audio", "author", "auto", "autos", + "avianca", "aw", + "aws", "ax", "axa", "az", "azure", "ba", + "baby", + "baidu", "band", "bank", "bar", "barcelona", "barclaycard", "barclays", + "barefoot", "bargains", "bauhaus", "bayern", "bb", "bbc", "bbva", + "bcg", "bcn", "bd", "be", + "beats", "beer", "bentley", "berlin", "best", + "bet", "bf", "bg", "bh", @@ -7186,6 +7709,7 @@ "bj", "black", "blackfriday", + "blog", "bloomberg", "blue", "bm", @@ -7199,12 +7723,17 @@ "bom", "bond", "boo", + "boots", + "bosch", + "bostik", "bot", "boutique", "br", "bradesco", "bridgestone", + "broadway", "broker", + "brother", "brussels", "bs", "bt", @@ -7221,6 +7750,7 @@ "bzh", "ca", "cab", + "cafe", "cal", "call", "camera", @@ -7229,6 +7759,7 @@ "canon", "capetown", "capital", + "car", "caravan", "cards", "care", @@ -7243,8 +7774,10 @@ "catering", "cba", "cbn", + "cbre", "cc", "cd", + "ceb", "center", "ceo", "cern", @@ -7253,15 +7786,20 @@ "cfd", "cg", "ch", + "chanel", "channel", + "chase", "chat", "cheap", + "chintai", "chloe", "christmas", "chrome", "church", "ci", + "cipriani", "circle", + "cisco", "citic", "city", "cityeats", @@ -7272,7 +7810,9 @@ "click", "clinic", "clothing", + "cloud", "club", + "clubmed", "cm", "cn", "co", @@ -7286,19 +7826,25 @@ "community", "company", "computer", + "comsec", "condos", "construction", "consulting", + "contact", "contractors", "cooking", + "cookingchannel", "cool", "coop", "corsica", "country", + "coupon", + "coupons", "courses", "cr", "credit", "creditcard", + "creditunion", "cricket", "crown", "crs", @@ -7311,6 +7857,7 @@ "cx", "cy", "cymru", + "cyou", "cz", "dabur", "dad", @@ -7320,11 +7867,15 @@ "datsun", "day", "dclk", + "dds", "de", + "deal", + "dealer", "deals", "degree", "delivery", "dell", + "delta", "democrat", "dental", "dentist", @@ -7347,7 +7898,14 @@ "doha", "domains", "doosan", + "dot", "download", + "drive", + "dstv", + "dtv", + "dubai", + "dunlop", + "dupont", "durban", "dvag", "dz", @@ -7381,10 +7939,13 @@ "exchange", "expert", "exposed", + "express", + "extraspace", "fage", "fail", "fairwinds", "faith", + "family", "fan", "fans", "farm", @@ -7393,9 +7954,11 @@ "feedback", "ferrero", "fi", + "film", "final", "finance", "financial", + "fire", "firestone", "firmdale", "fish", @@ -7404,6 +7967,7 @@ "fitness", "fj", "fk", + "flickr", "flights", "florist", "flowers", @@ -7412,20 +7976,29 @@ "fm", "fo", "foo", + "foodnetwork", "football", "ford", "forex", "forsale", + "forum", "foundation", "fr", "frl", "frogans", + "frontdoor", + "frontier", "fund", "furniture", "futbol", + "fyi", "ga", "gal", "gallery", + "gallo", + "gallup", + "game", + "games", "garden", "gb", "gbiz", @@ -7434,6 +8007,7 @@ "ge", "gea", "gent", + "genting", "gf", "gg", "ggee", @@ -7453,17 +8027,21 @@ "gmo", "gmx", "gn", + "gold", "goldpoint", "golf", "goo", + "goodyear", "goog", "google", "gop", "got", + "gotv", "gov", "gp", "gq", "gr", + "grainger", "graphics", "gratis", "green", @@ -7482,38 +8060,50 @@ "hamburg", "hangout", "haus", + "hdfcbank", + "health", "healthcare", "help", + "helsinki", "here", "hermes", + "hgtv", "hiphop", "hitachi", "hiv", "hk", + "hkt", "hm", "hn", + "hockey", "holdings", "holiday", + "homedepot", "homes", "honda", "horse", "host", "hosting", + "hoteles", "hotmail", "house", "how", "hr", "hsbc", "ht", + "htc", "hu", "ibm", + "icbc", "ice", + "icu", "id", "ie", "ifm", "iinet", "il", "im", + "imdb", "immo", "immobilien", "in", @@ -7523,6 +8113,7 @@ "ing", "ink", "institute", + "insurance", "insure", "int", "international", @@ -7533,6 +8124,7 @@ "ir", "irish", "is", + "iselect", "ist", "istanbul", "it", @@ -7541,46 +8133,66 @@ "jaguar", "java", "jcb", + "jcp", "je", "jetzt", + "jewelry", + "jio", "jlc", + "jll", "jm", + "jmp", + "jnj", "jo", "jobs", "joburg", "jot", "joy", "jp", + "jpmorgan", "jprs", "juegos", "kaufen", "kddi", "ke", + "kerryhotels", + "kerrylogistics", + "kerryproperties", "kfh", "kg", "kh", "ki", "kim", "kinder", + "kindle", "kitchen", "kiwi", "km", "kn", "koeln", + "komatsu", "kp", + "kpmg", + "kpn", "kr", "krd", "kred", + "kuokgroup", "kw", "ky", + "kyknet", "kyoto", "kz", "la", "lacaixa", + "lamborghini", + "lancaster", "land", "landrover", + "lasalle", "lat", "latrobe", + "law", "lawyer", "lb", "lc", @@ -7588,11 +8200,13 @@ "lease", "leclerc", "legal", + "lexus", "lgbt", "li", "liaison", "lidl", "life", + "lifeinsurance", "lifestyle", "lighting", "like", @@ -7601,13 +8215,19 @@ "lincoln", "linde", "link", + "lipsy", "live", + "lixil", "lk", "loan", "loans", + "locker", + "locus", + "lol", "london", "lotte", "lotto", + "love", "lr", "ls", "lt", @@ -7623,6 +8243,7 @@ "madrid", "maif", "maison", + "makeup", "man", "management", "mango", @@ -7630,6 +8251,7 @@ "marketing", "markets", "marriott", + "mba", "mc", "md", "me", @@ -7638,33 +8260,42 @@ "melbourne", "meme", "memorial", + "men", "menu", "meo", + "metlife", "mg", "mh", "miami", "microsoft", "mil", "mini", + "mit", "mk", "ml", + "mlb", + "mls", "mm", "mma", "mn", + "mnet", "mo", "mobi", "mobily", "moda", "moe", "moi", + "mom", "monash", "money", "montblanc", "mormon", "mortgage", "moscow", + "moto", "motorcycles", "mov", + "movie", "movistar", "mp", "mq", @@ -7673,26 +8304,37 @@ "mt", "mtn", "mtpc", + "mtr", "mu", + "multichoice", "museum", + "mutual", + "mutuelle", "mv", "mw", "mx", "my", "mz", + "mzansimagic", "na", "nadex", "nagoya", "name", + "naspers", + "natura", "navy", "nc", "ne", + "nec", "net", "netbank", + "netflix", "network", "neustar", "new", "news", + "next", + "nextdirect", "nexus", "nf", "ng", @@ -7700,12 +8342,17 @@ "nhk", "ni", "nico", + "nikon", "ninja", "nissan", "nl", "no", + "nokia", + "northwesternmutual", "norton", + "now", "nowruz", + "nowtv", "np", "nr", "nra", @@ -7715,27 +8362,42 @@ "nyc", "nz", "obi", + "observer", + "office", "okinawa", + "olayan", + "olayangroup", + "ollo", "om", + "omega", "one", "ong", "onl", + "online", "ooo", "oracle", + "orange", "org", "organic", + "orientexpress", "osaka", "otsuka", + "ott", "ovh", "pa", "page", + "pamperedchef", "panerai", "paris", "pars", "partners", "parts", "party", + "passagens", + "payu", + "pccw", "pe", + "pet", "pf", "pg", "ph", @@ -7749,15 +8411,21 @@ "pics", "pictet", "pictures", + "pid", "pin", + "ping", "pink", "pizza", "pk", "pl", "place", + "play", + "playstation", "plumbing", + "plus", "pm", "pn", + "pnc", "pohl", "poker", "porn", @@ -7765,6 +8433,7 @@ "pr", "praxi", "press", + "prime", "pro", "prod", "productions", @@ -7772,6 +8441,7 @@ "promo", "properties", "property", + "protection", "ps", "pt", "pub", @@ -7780,17 +8450,21 @@ "qa", "qpon", "quebec", + "quest", "racing", "re", "read", "realtor", + "realty", "recipes", "red", "redstone", + "redumbrella", "rehab", "reise", "reisen", "reit", + "reliance", "ren", "rent", "rentals", @@ -7801,8 +8475,11 @@ "restaurant", "review", "reviews", + "rexroth", "rich", + "richardli", "ricoh", + "ril", "rio", "rip", "ro", @@ -7814,11 +8491,14 @@ "rsvp", "ru", "ruhr", + "run", "rw", + "rwe", "ryukyu", "sa", "saarland", "safe", + "safety", "sakura", "sale", "salon", @@ -7829,8 +8509,11 @@ "sap", "sapo", "sarl", + "sas", + "save", "saxo", "sb", + "sbi", "sbs", "sc", "sca", @@ -7846,6 +8529,7 @@ "sd", "se", "seat", + "security", "seek", "sener", "services", @@ -7855,40 +8539,63 @@ "sg", "sh", "sharp", + "shaw", "shia", "shiksha", "shoes", + "shouji", + "show", "shriram", "si", + "silk", + "sina", "singles", + "site", "sj", "sk", + "ski", + "skin", "sky", "skype", "sl", "sm", "smile", "sn", + "sncf", "so", + "soccer", "social", + "softbank", "software", "sohu", "solar", "solutions", + "song", + "sony", "soy", "space", "spiegel", + "spot", "spreadbetting", "sr", + "srl", "st", "stada", + "star", + "starhub", + "statebank", "statoil", "stc", "stcgroup", "stockholm", + "storage", + "store", + "studio", "study", "style", "su", + "sucks", + "supersport", "supplies", "supply", "support", @@ -7896,6 +8603,7 @@ "surgery", "suzuki", "sv", + "swatch", "swiss", "sx", "sy", @@ -7905,21 +8613,36 @@ "sz", "tab", "taipei", + "talk", + "taobao", + "tatamotors", "tatar", "tattoo", "tax", + "taxi", "tc", "tci", "td", + "tdk", + "team", + "tech", "technology", "tel", + "telecity", "telefonica", "temasek", "tennis", + "teva", "tf", "tg", "th", + "thd", + "theater", + "theatre", + "theguardian", + "tickets", "tienda", + "tiffany", "tips", "tires", "tirol", @@ -7927,6 +8650,7 @@ "tk", "tl", "tm", + "tmall", "tn", "to", "today", @@ -7935,7 +8659,9 @@ "top", "toray", "toshiba", + "tours", "town", + "toyota", "toys", "tp", "tr", @@ -7943,11 +8669,18 @@ "trading", "training", "travel", + "travelchannel", + "travelers", + "travelersinsurance", "trust", + "trv", "tt", + "tube", "tui", + "tunes", "tushu", "tv", + "tvs", "tw", "tz", "ua", @@ -7957,6 +8690,7 @@ "university", "uno", "uol", + "ups", "us", "uy", "uz", @@ -7973,7 +8707,11 @@ "vi", "viajes", "video", + "vig", + "viking", "villas", + "vin", + "vip", "virgin", "vision", "vista", @@ -7982,20 +8720,29 @@ "vlaanderen", "vn", "vodka", + "volkswagen", "vote", "voting", "voto", "voyage", "vu", + "vuelos", "wales", "walter", "wang", "wanggou", + "warman", "watch", + "watches", + "weather", + "weatherchannel", "webcam", + "weber", "website", "wed", "wedding", + "weibo", + "weir", "wf", "whoswho", "wien", @@ -8003,6 +8750,7 @@ "williamhill", "win", "windows", + "wine", "wme", "work", "works", @@ -8012,36 +8760,54 @@ "wtf", "xbox", "xerox", + "xihuan", "xin", + "xn--11b4c3d", + "xn--1ck2e1b", "xn--1qqw23a", "xn--30rr7y", "xn--3bst00m", "xn--3ds443g", "xn--3e0b707e", + "xn--3oq18vl8pn36a", + "xn--3pxu8k", + "xn--42c2d9a", "xn--45brj9c", "xn--45q11c", "xn--4gbrim", "xn--54b7fta0cc", "xn--55qw42g", "xn--55qx5d", + "xn--5tzm5g", "xn--6frz82g", "xn--6qq986b3xl", "xn--80adxhks", "xn--80ao21a", "xn--80asehdb", "xn--80aswg", + "xn--8y0a063a", "xn--90a3ac", + "xn--90ais", + "xn--9dbq2a", "xn--9et52u", + "xn--9krt00a", "xn--b4w605ferd", + "xn--bck1b9a5dre4c", "xn--c1avg", + "xn--c2br7g", + "xn--cck2b3b", "xn--cg4bki", "xn--clchc0ea0b2g2a9gcd", "xn--czr694b", "xn--czrs0t", "xn--czru2d", "xn--d1acj3b", + "xn--d1alf", "xn--eckvdtc9d", "xn--efvy88h", + "xn--estv75g", + "xn--fct429k", + "xn--fhbei", "xn--fiq228c5hs", "xn--fiq64b", "xn--fiqs8s", @@ -8050,17 +8816,24 @@ "xn--flw351e", "xn--fpcrj9c3d", "xn--fzc2c9e2c", + "xn--fzys8d69uvgm", + "xn--g2xx48c", + "xn--gckr3f0f", "xn--gecrj9c", "xn--h2brj9c", "xn--hxt814e", "xn--i1b6b1a6a2e", "xn--imr513n", "xn--io0a7i", + "xn--j1aef", "xn--j1amh", "xn--j6w193g", + "xn--jlq61u9w7b", + "xn--jvr189m", "xn--kcrx77d1x4a", "xn--kprw13d", "xn--kpry57d", + "xn--kpu716f", "xn--kput3i", "xn--l1acc", "xn--lgbbat1ad8j", @@ -8069,19 +8842,27 @@ "xn--mgba3a3ejt", "xn--mgba3a4f16a", "xn--mgba3a4fra", + "xn--mgba7c0bbn0a", "xn--mgbaam7a8h", "xn--mgbab2bd", + "xn--mgbai9a5eva00b", + "xn--mgbai9azgqp6j", "xn--mgbayh7gpa", "xn--mgbb9fbpob", "xn--mgbbh1a71e", "xn--mgbc0a9azcg", "xn--mgberp4a5d4a87g", "xn--mgberp4a5d4ar", + "xn--mgbpl2fh", "xn--mgbqly7c0a67fbc", "xn--mgbqly7cvafr", "xn--mgbt3dhd", "xn--mgbtf8fl", + "xn--mgbtx2b", "xn--mgbx4cd0ab", + "xn--mix082f", + "xn--mix891f", + "xn--mk1bu44c", "xn--mxtq1m", "xn--ngbc5azd", "xn--ngbe9e0a", @@ -8094,40 +8875,54 @@ "xn--ogbpf8fl", "xn--p1acf", "xn--p1ai", + "xn--pbt977c", "xn--pgbs0dh", + "xn--pssy2u", "xn--q9jyb4c", "xn--qcka1pmc", + "xn--qxam", "xn--rhqv96g", + "xn--rovu88b", "xn--s9brj9c", "xn--ses554g", + "xn--t60b56a", + "xn--tckwe", "xn--unup4y", "xn--vermgensberater-ctb", "xn--vermgensberatung-pwb", "xn--vhquv", "xn--vuq861b", + "xn--w4r85el8fhu5dnra", "xn--wgbh1c", "xn--wgbl6a", "xn--xhq521b", "xn--xkc2al3hye2a", "xn--xkc2dl3a5ee0h", + "xn--y9a3aq", "xn--yfro4i67o", "xn--ygbi2ammx", "xn--zfr164b", + "xperia", "xxx", "xyz", "yachts", + "yahoo", "yamaxun", "yandex", "ye", "yodobashi", "yoga", "yokohama", + "you", "youtube", "yt", + "yun", "za", + "zappos", "zara", "zero", "zip", + "zippo", "zm", "zone", "zuerich", @@ -8250,12 +9045,14 @@ "net", "off", "org", + "blogspot", "com", "edu", "gov", "mil", "net", "org", + "blogspot", "com", "edu", "net", @@ -8336,6 +9133,7 @@ "org", "pp", "pro", + "blogspot", "co", "com", "edu", @@ -8371,6 +9169,7 @@ "9", "a", "b", + "blogspot", "c", "d", "e", @@ -8517,6 +9316,7 @@ "gov", "mil", "of", + "blogspot", "com", "edu", "gov", @@ -8563,6 +9363,7 @@ "presse", "xn--aroport-bya", "www", + "blogspot", "co", "gob", "gov", @@ -8618,6 +9419,9 @@ "zj", "compute", "cn-north-1", + "amazonaws", + "cn-north-1", + "s3", "arts", "com", "edu", @@ -8631,6 +9435,9 @@ "org", "rec", "web", + "blogspot", + "1kapp", + "4u", "africa", "amazonaws", "appspot", @@ -8819,11 +9626,13 @@ "sells-for-u", "servebbs", "simple-url", + "sinaapp", "space-to-rent", "teaches-yoga", "uk", "us", "uy", + "vipsinaapp", "withgoogle", "writesthisblog", "yolasite", @@ -8831,25 +9640,20 @@ "compute", "compute-1", "elb", + "eu-central-1", "s3", "s3-ap-northeast-1", "s3-ap-southeast-1", "s3-ap-southeast-2", + "s3-eu-central-1", "s3-eu-west-1", + "s3-external-1", + "s3-external-2", "s3-fips-us-gov-west-1", "s3-sa-east-1", "s3-us-gov-west-1", "s3-us-west-1", "s3-us-west-2", - "s3-website-ap-northeast-1", - "s3-website-ap-southeast-1", - "s3-website-ap-southeast-2", - "s3-website-eu-west-1", - "s3-website-sa-east-1", - "s3-website-us-east-1", - "s3-website-us-gov-west-1", - "s3-website-us-west-1", - "s3-website-us-west-2", "us-east-1", "ap-northeast-1", "ap-southeast-1", @@ -8862,6 +9666,7 @@ "us-west-2", "z-1", "z-2", + "s3", "ac", "co", "ed", @@ -8882,6 +9687,20 @@ "org", "ath", "gov", + "ac", + "biz", + "com", + "ekloges", + "gov", + "ltd", + "name", + "net", + "org", + "parliament", + "press", + "pro", + "tm", + "blogspot", "blogspot", "blogspot", "com", @@ -8937,6 +9756,7 @@ "org", "pri", "riik", + "blogspot", "com", "edu", "eun", @@ -8946,6 +9766,7 @@ "net", "org", "sci", + "blogspot", "com", "edu", "gob", @@ -8958,6 +9779,7 @@ "gov", "info", "name", + "net", "org", "aland", "blogspot", @@ -9007,6 +9829,11 @@ "ltd", "mod", "org", + "co", + "com", + "edu", + "net", + "org", "ac", "com", "edu", @@ -9065,6 +9892,7 @@ "mil", "net", "org", + "blogspot", "com", "from", "iz", @@ -9130,6 +9958,7 @@ "sch", "web", "blogspot", + "blogspot", "gov", "co", "blogspot", @@ -9184,6 +10013,7 @@ "sch", "xn--mgba3a4f16a", "xn--mgba3a4fra", + "blogspot", "com", "cupcake", "edu", @@ -11366,6 +12196,8 @@ "yamanakako", "yamanashi", "city", + "co", + "blogspot", "com", "edu", "gov", @@ -11467,6 +12299,8 @@ "gov", "net", "org", + "blogspot", + "ac", "assn", "com", "edu", @@ -11488,7 +12322,9 @@ "org", "co", "org", + "blogspot", "gov", + "blogspot", "asn", "com", "conf", @@ -11515,6 +12351,7 @@ "press", "asso", "tm", + "blogspot", "ac", "co", "edu", @@ -11531,6 +12368,7 @@ "org", "prd", "tm", + "blogspot", "com", "edu", "gov", @@ -11565,6 +12403,7 @@ "edu", "net", "org", + "blogspot", "ac", "co", "com", @@ -12151,6 +12990,7 @@ "gob", "net", "org", + "blogspot", "com", "edu", "gov", @@ -12187,6 +13027,8 @@ "blogdns", "broke-it", "buyshouses", + "cdn77", + "cdn77-ssl", "cloudapp", "cloudfront", "dnsalias", @@ -12228,6 +13070,7 @@ "uk", "webhop", "za", + "r", "prod", "ssl", "a", @@ -12255,6 +13098,7 @@ "org", "sch", "blogspot", + "blogspot", "bv", "co", "aa", @@ -13063,16 +13907,20 @@ "blogdns", "blogsite", "boldlygoingnowhere", + "cdn77", + "cdn77-secure", "dnsalias", "dnsdojo", "doesntexist", "dontexist", "doomdns", + "duckdns", "dvrdns", "dynalias", "dyndns", "endofinternet", "endoftheinternet", + "eu", "from-me", "game-host", "gotdns", @@ -13113,8 +13961,67 @@ "us", "webhop", "za", + "c", + "rsc", + "origin", + "ssl", "go", "home", + "al", + "asso", + "at", + "au", + "be", + "bg", + "ca", + "cd", + "ch", + "cn", + "cy", + "cz", + "de", + "dk", + "edu", + "ee", + "es", + "fi", + "fr", + "gr", + "hr", + "hu", + "ie", + "il", + "in", + "int", + "is", + "it", + "jp", + "kr", + "lt", + "lu", + "lv", + "mc", + "me", + "mk", + "mt", + "my", + "net", + "ng", + "nl", + "no", + "nz", + "paris", + "pl", + "pt", + "q-a", + "ro", + "ru", + "se", + "si", + "sk", + "tr", + "uk", + "us", "abo", "ac", "com", @@ -13126,6 +14033,7 @@ "nom", "org", "sld", + "blogspot", "com", "edu", "gob", @@ -13323,15 +14231,53 @@ "zarow", "zgora", "zgorzelec", + "ap", + "griw", + "ic", + "is", + "kmpsp", + "konsulat", + "kppsp", + "kwp", + "kwpsp", + "mup", + "mw", + "oirm", + "oum", "pa", + "pinb", + "piw", "po", + "psp", + "psse", + "pup", + "rzgw", + "sa", + "sdn", + "sko", "so", "sr", "starostwo", "ug", + "ugim", "um", + "umig", "upow", + "uppo", + "us", "uw", + "uzs", + "wif", + "wiih", + "winb", + "wios", + "witd", + "wiw", + "wsa", + "wskr", + "wuoz", + "wzmiuw", + "zp", "co", "edu", "gov", @@ -13386,6 +14332,7 @@ "mil", "net", "org", + "blogspot", "com", "edu", "gov", @@ -13411,6 +14358,7 @@ "tm", "www", "ac", + "blogspot", "co", "edu", "gov", @@ -13636,6 +14584,8 @@ "mil", "net", "org", + "platform", + "blogspot", "blogspot", "com", "edu", @@ -13643,6 +14593,7 @@ "net", "org", "art", + "blogspot", "com", "edu", "gouv", @@ -13664,6 +14615,38 @@ "principe", "saotome", "store", + "adygeya", + "arkhangelsk", + "balashov", + "bashkiria", + "bryansk", + "dagestan", + "grozny", + "ivanovo", + "kalmykia", + "kaluga", + "karelia", + "khakassia", + "krasnodar", + "kurgan", + "lenug", + "mordovia", + "msk", + "murmansk", + "nalchik", + "nov", + "obninsk", + "penza", + "pokrovsk", + "sochi", + "spb", + "togliatti", + "troitsk", + "tula", + "tuva", + "vladikavkaz", + "vladimir", + "vologda", "com", "edu", "gob", @@ -13886,6 +14869,7 @@ "zp", "zt", "ac", + "blogspot", "co", "com", "go", @@ -14138,6 +15122,7 @@ "mil", "net", "org", + "blogspot", "co", "com", "net", @@ -14172,6 +15157,7 @@ "org", "ac", "biz", + "blogspot", "com", "edu", "gov", @@ -14199,4 +15185,22 @@ "xn--d1at", "xn--o1ac", "xn--o1ach", + "ac", + "agrica", + "alt", + "co", + "edu", + "gov", + "grondar", + "law", + "mil", + "net", + "ngo", + "nis", + "nom", + "org", + "school", + "tm", + "web", + "blogspot", } === added directory 'src/golang.org/x/net/trace' === added file 'src/golang.org/x/net/trace/events.go' --- src/golang.org/x/net/trace/events.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/trace/events.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,524 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, +}).Parse(eventsHTML)) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking; see AuthRequest for the default auth check +// used by the handler registered on http.DefaultServeMux. +// req may be nil. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl.Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}
{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` === added file 'src/golang.org/x/net/trace/histogram.go' --- src/golang.org/x/net/trace/histogram.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/trace/histogram.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,356 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl.Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +// Input: data +var distTmpl = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) === added file 'src/golang.org/x/net/trace/histogram_test.go' --- src/golang.org/x/net/trace/histogram_test.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/trace/histogram_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,325 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "math" + "testing" +) + +type sumTest struct { + value int64 + sum int64 + sumOfSquares float64 + total int64 +} + +var sumTests = []sumTest{ + {100, 100, 10000, 1}, + {50, 150, 12500, 2}, + {50, 200, 15000, 3}, + {50, 250, 17500, 4}, +} + +type bucketingTest struct { + in int64 + log int + bucket int +} + +var bucketingTests = []bucketingTest{ + {0, 0, 0}, + {1, 1, 0}, + {2, 2, 1}, + {3, 2, 1}, + {4, 3, 2}, + {1000, 10, 9}, + {1023, 10, 9}, + {1024, 11, 10}, + {1000000, 20, 19}, +} + +type multiplyTest struct { + in int64 + ratio float64 + expectedSum int64 + expectedTotal int64 + expectedSumOfSquares float64 +} + +var multiplyTests = []multiplyTest{ + {15, 2.5, 37, 2, 562.5}, + {128, 4.6, 758, 13, 77953.9}, +} + +type percentileTest struct { + fraction float64 + expected int64 +} + +var percentileTests = []percentileTest{ + {0.25, 48}, + {0.5, 96}, + {0.6, 109}, + {0.75, 128}, + {0.90, 205}, + {0.95, 230}, + {0.99, 256}, +} + +func TestSum(t *testing.T) { + var h histogram + + for _, test := range sumTests { + h.addMeasurement(test.value) + sum := h.sum + if sum != test.sum { + t.Errorf("h.Sum = %v WANT: %v", sum, test.sum) + } + + sumOfSquares := h.sumOfSquares + if sumOfSquares != test.sumOfSquares { + t.Errorf("h.SumOfSquares = %v WANT: %v", sumOfSquares, test.sumOfSquares) + } + + total := h.total() + if total != test.total { + t.Errorf("h.Total = %v WANT: %v", total, test.total) + } + } +} + +func TestMultiply(t *testing.T) { + var h histogram + for i, test := range multiplyTests { + h.addMeasurement(test.in) + h.Multiply(test.ratio) + if h.sum != test.expectedSum { + t.Errorf("#%v: h.sum = %v WANT: %v", i, h.sum, test.expectedSum) + } + if h.total() != test.expectedTotal { + t.Errorf("#%v: h.total = %v WANT: %v", i, h.total(), test.expectedTotal) + } + if h.sumOfSquares != test.expectedSumOfSquares { + t.Errorf("#%v: h.SumOfSquares = %v WANT: %v", i, test.expectedSumOfSquares, h.sumOfSquares) + } + } +} + +func TestBucketingFunctions(t *testing.T) { + for _, test := range bucketingTests { + log := log2(test.in) + if log != test.log { + t.Errorf("log2 = %v WANT: %v", log, test.log) + } + + bucket := getBucket(test.in) + if bucket != test.bucket { + t.Errorf("getBucket = %v WANT: %v", bucket, test.bucket) + } + } +} + +func TestAverage(t *testing.T) { + a := new(histogram) + average := a.average() + if average != 0 { + t.Errorf("Average of empty histogram was %v WANT: 0", average) + } + + a.addMeasurement(1) + a.addMeasurement(1) + a.addMeasurement(3) + const expected = float64(5) / float64(3) + average = a.average() + + if !isApproximate(average, expected) { + t.Errorf("Average = %g WANT: %v", average, expected) + } +} + +func TestStandardDeviation(t *testing.T) { + a := new(histogram) + add(a, 10, 1<<4) + add(a, 10, 1<<5) + add(a, 10, 1<<6) + stdDev := a.standardDeviation() + const expected = 19.95 + + if !isApproximate(stdDev, expected) { + t.Errorf("StandardDeviation = %v WANT: %v", stdDev, expected) + } + + // No values + a = new(histogram) + stdDev = a.standardDeviation() + + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } + + add(a, 1, 1<<4) + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } + + add(a, 10, 1<<4) + if !isApproximate(stdDev, 0) { + t.Errorf("StandardDeviation = %v WANT: 0", stdDev) + } +} + +func TestPercentileBoundary(t *testing.T) { + a := new(histogram) + add(a, 5, 1<<4) + add(a, 10, 1<<6) + add(a, 5, 1<<7) + + for _, test := range percentileTests { + percentile := a.percentileBoundary(test.fraction) + if percentile != test.expected { + t.Errorf("h.PercentileBoundary (fraction=%v) = %v WANT: %v", test.fraction, percentile, test.expected) + } + } +} + +func TestCopyFrom(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := histogram{6, 36, []int64{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, 5, -1} + + a.CopyFrom(&b) + + if a.String() != b.String() { + t.Errorf("a.String = %s WANT: %s", a.String(), b.String()) + } +} + +func TestClear(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + + a.Clear() + + expected := "0, 0.000000, 0, 0, []" + if a.String() != expected { + t.Errorf("a.String = %s WANT %s", a.String(), expected) + } +} + +func TestNew(t *testing.T) { + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := a.New() + + expected := "0, 0.000000, 0, 0, []" + if b.(*histogram).String() != expected { + t.Errorf("b.(*histogram).String = %s WANT: %s", b.(*histogram).String(), expected) + } +} + +func TestAdd(t *testing.T) { + // The tests here depend on the associativity of addMeasurement and Add. + // Add empty observation + a := histogram{5, 25, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, 4, -1} + b := a.New() + + expected := a.String() + a.Add(b) + if a.String() != expected { + t.Errorf("a.String = %s WANT: %s", a.String(), expected) + } + + // Add same bucketed value, no new buckets + c := new(histogram) + d := new(histogram) + e := new(histogram) + c.addMeasurement(12) + d.addMeasurement(11) + e.addMeasurement(12) + e.addMeasurement(11) + c.Add(d) + if c.String() != e.String() { + t.Errorf("c.String = %s WANT: %s", c.String(), e.String()) + } + + // Add bucketed values + f := new(histogram) + g := new(histogram) + h := new(histogram) + f.addMeasurement(4) + f.addMeasurement(12) + f.addMeasurement(100) + g.addMeasurement(18) + g.addMeasurement(36) + g.addMeasurement(255) + h.addMeasurement(4) + h.addMeasurement(12) + h.addMeasurement(100) + h.addMeasurement(18) + h.addMeasurement(36) + h.addMeasurement(255) + f.Add(g) + if f.String() != h.String() { + t.Errorf("f.String = %q WANT: %q", f.String(), h.String()) + } + + // add buckets to no buckets + i := new(histogram) + j := new(histogram) + k := new(histogram) + j.addMeasurement(18) + j.addMeasurement(36) + j.addMeasurement(255) + k.addMeasurement(18) + k.addMeasurement(36) + k.addMeasurement(255) + i.Add(j) + if i.String() != k.String() { + t.Errorf("i.String = %q WANT: %q", i.String(), k.String()) + } + + // add buckets to single value (no overlap) + l := new(histogram) + m := new(histogram) + n := new(histogram) + l.addMeasurement(0) + m.addMeasurement(18) + m.addMeasurement(36) + m.addMeasurement(255) + n.addMeasurement(0) + n.addMeasurement(18) + n.addMeasurement(36) + n.addMeasurement(255) + l.Add(m) + if l.String() != n.String() { + t.Errorf("l.String = %q WANT: %q", l.String(), n.String()) + } + + // mixed order + o := new(histogram) + p := new(histogram) + o.addMeasurement(0) + o.addMeasurement(2) + o.addMeasurement(0) + p.addMeasurement(0) + p.addMeasurement(0) + p.addMeasurement(2) + if o.String() != p.String() { + t.Errorf("o.String = %q WANT: %q", o.String(), p.String()) + } +} + +func add(h *histogram, times int, val int64) { + for i := 0; i < times; i++ { + h.addMeasurement(val) + } +} + +func isApproximate(x, y float64) bool { + return math.Abs(x-y) < 1e-2 +} === added file 'src/golang.org/x/net/trace/trace.go' --- src/golang.org/x/net/trace/trace.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/trace/trace.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1057 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customise its authorisation requirements. +// +// The default AuthRequest function returns (true, true) iff the request comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + host, _, err := net.SplitHostPort(req.RemoteAddr) + switch { + case err != nil: // Badly formed address; fail closed. + return false, false + case host == "localhost" || host == "127.0.0.1" || host == "::1": + return true, true + default: + return false, false + } +} + +func init() { + http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + Render(w, req, sensitive) + }) + http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + RenderEvents(w, req, sensitive) + }) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking; see AuthRequest for the default auth check +// used by the handler registered on http.DefaultServeMux. +// req may be nil. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam, _ := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.events = make([]event, 0, maxEventsPerTrace) + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + tr.Elapsed = time.Now().Sub(tr.Start) + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + What interface{} // string or fmt.Stringer + Sensitive bool // whether this event contains sensitive information +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Timing information. + Start time.Time + Elapsed time.Duration // zero while active + + // Trace information if non-zero. + traceID uint64 + spanID uint64 + + // Whether this trace resulted in an error. + IsError bool + + // Append-only sequence of events (modulo discards). + mu sync.RWMutex + events []event + + refs int32 // how many buckets this is in + recycler func(interface{}) + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.events = nil + tr.refs = 0 + tr.recycler = nil + tr.disc = 0 + tr.finishStack = nil +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a requestz.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < cap(tr.events) { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((cap(tr.events) - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[cap(tr.events)-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { tr.IsError = true } + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.recycler = f +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.traceID, tr.spanID = traceID, spanID +} + +func (tr *trace) SetMaxEvents(m int) { + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.events = make([]event, 0, m) + } +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + t := tr.Elapsed + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, +}).Parse(pageHTML)) + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` === added file 'src/golang.org/x/net/trace/trace_test.go' --- src/golang.org/x/net/trace/trace_test.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/trace/trace_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,46 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "reflect" + "testing" +) + +type s struct{} + +func (s) String() string { return "lazy string" } + +// TestReset checks whether all the fields are zeroed after reset. +func TestReset(t *testing.T) { + tr := New("foo", "bar") + tr.LazyLog(s{}, false) + tr.LazyPrintf("%d", 1) + tr.SetRecycler(func(_ interface{}) {}) + tr.SetTraceInfo(3, 4) + tr.SetMaxEvents(100) + tr.SetError() + tr.Finish() + + tr.(*trace).reset() + + if !reflect.DeepEqual(tr, new(trace)) { + t.Errorf("reset didn't clear all fields: %+v", tr) + } +} + +// TestResetLog checks whether all the fields are zeroed after reset. +func TestResetLog(t *testing.T) { + el := NewEventLog("foo", "bar") + el.Printf("message") + el.Errorf("error") + el.Finish() + + el.(*eventLog).reset() + + if !reflect.DeepEqual(el, new(eventLog)) { + t.Errorf("reset didn't clear all fields: %+v", el) + } +} === modified file 'src/golang.org/x/net/webdav/file.go' --- src/golang.org/x/net/webdav/file.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/webdav/file.go 2016-03-22 15:18:22 +0000 @@ -13,6 +13,8 @@ "strings" "sync" "time" + + "golang.org/x/net/webdav/internal/xml" ) // slashClean is equivalent to but slightly more efficient than @@ -44,6 +46,9 @@ // A File is returned by a FileSystem's OpenFile method and can be served by a // Handler. +// +// A File may optionally implement the DeadPropsHolder interface, if it can +// load and save dead properties. type File interface { http.File io.Writer @@ -401,10 +406,11 @@ // children is protected by memFS.mu. children map[string]*memFSNode - mu sync.Mutex - data []byte - mode os.FileMode - modTime time.Time + mu sync.Mutex + data []byte + mode os.FileMode + modTime time.Time + deadProps map[xml.Name]Property } func (n *memFSNode) stat(name string) *memFileInfo { @@ -418,6 +424,39 @@ } } +func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) { + n.mu.Lock() + defer n.mu.Unlock() + if len(n.deadProps) == 0 { + return nil, nil + } + ret := make(map[xml.Name]Property, len(n.deadProps)) + for k, v := range n.deadProps { + ret[k] = v + } + return ret, nil +} + +func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) { + n.mu.Lock() + defer n.mu.Unlock() + pstat := Propstat{Status: http.StatusOK} + for _, patch := range patches { + for _, p := range patch.Props { + pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) + if patch.Remove { + delete(n.deadProps, p.XMLName) + continue + } + if n.deadProps == nil { + n.deadProps = map[xml.Name]Property{} + } + n.deadProps[p.XMLName] = p + } + } + return []Propstat{pstat}, nil +} + type memFileInfo struct { name string size int64 @@ -443,6 +482,12 @@ pos int } +// A *memFile implements the optional DeadPropsHolder interface. +var _ DeadPropsHolder = (*memFile)(nil) + +func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() } +func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) } + func (f *memFile) Close() error { return nil } @@ -582,6 +627,27 @@ return http.StatusNoContent, nil } +func copyProps(dst, src File) error { + d, ok := dst.(DeadPropsHolder) + if !ok { + return nil + } + s, ok := src.(DeadPropsHolder) + if !ok { + return nil + } + m, err := s.DeadProps() + if err != nil { + return err + } + props := make([]Property, 0, len(m)) + for _, prop := range m { + props = append(props, prop) + } + _, err = d.Patch([]Proppatch{{Props: props}}) + return err +} + // copyFiles copies files and/or directories from src to dst. // // See section 9.8.5 for when various HTTP status codes apply. @@ -658,10 +724,14 @@ } _, copyErr := io.Copy(dstFile, srcFile) + propsErr := copyProps(dstFile, srcFile) closeErr := dstFile.Close() if copyErr != nil { return http.StatusInternalServerError, copyErr } + if propsErr != nil { + return http.StatusInternalServerError, propsErr + } if closeErr != nil { return http.StatusInternalServerError, closeErr } === modified file 'src/golang.org/x/net/webdav/file_test.go' --- src/golang.org/x/net/webdav/file_test.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/webdav/file_test.go 2016-03-22 15:18:22 +0000 @@ -12,10 +12,13 @@ "path" "path/filepath" "reflect" + "runtime" "sort" "strconv" "strings" "testing" + + "golang.org/x/net/webdav/internal/xml" ) func TestSlashClean(t *testing.T) { @@ -504,6 +507,13 @@ } func TestDir(t *testing.T) { + switch runtime.GOOS { + case "nacl": + t.Skip("see golang.org/issue/12004") + case "plan9": + t.Skip("see golang.org/issue/11453") + } + td, err := ioutil.TempDir("", "webdav-test") if err != nil { t.Fatal(err) @@ -824,6 +834,115 @@ } } +func TestCopyMoveProps(t *testing.T) { + fs := NewMemFS() + create := func(name string) error { + f, err := fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + _, wErr := f.Write([]byte("contents")) + cErr := f.Close() + if wErr != nil { + return wErr + } + return cErr + } + patch := func(name string, patches ...Proppatch) error { + f, err := fs.OpenFile(name, os.O_RDWR, 0666) + if err != nil { + return err + } + _, pErr := f.(DeadPropsHolder).Patch(patches) + cErr := f.Close() + if pErr != nil { + return pErr + } + return cErr + } + props := func(name string) (map[xml.Name]Property, error) { + f, err := fs.OpenFile(name, os.O_RDWR, 0666) + if err != nil { + return nil, err + } + m, pErr := f.(DeadPropsHolder).DeadProps() + cErr := f.Close() + if pErr != nil { + return nil, pErr + } + if cErr != nil { + return nil, cErr + } + return m, nil + } + + p0 := Property{ + XMLName: xml.Name{Space: "x:", Local: "boat"}, + InnerXML: []byte("pea-green"), + } + p1 := Property{ + XMLName: xml.Name{Space: "x:", Local: "ring"}, + InnerXML: []byte("1 shilling"), + } + p2 := Property{ + XMLName: xml.Name{Space: "x:", Local: "spoon"}, + InnerXML: []byte("runcible"), + } + p3 := Property{ + XMLName: xml.Name{Space: "x:", Local: "moon"}, + InnerXML: []byte("light"), + } + + if err := create("/src"); err != nil { + t.Fatalf("create /src: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p0, p1}}); err != nil { + t.Fatalf("patch /src +p0 +p1: %v", err) + } + if _, err := copyFiles(fs, "/src", "/tmp", true, infiniteDepth, 0); err != nil { + t.Fatalf("copyFiles /src /tmp: %v", err) + } + if _, err := moveFiles(fs, "/tmp", "/dst", true); err != nil { + t.Fatalf("moveFiles /tmp /dst: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p0}, Remove: true}); err != nil { + t.Fatalf("patch /src -p0: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p2}}); err != nil { + t.Fatalf("patch /src +p2: %v", err) + } + if err := patch("/dst", Proppatch{Props: []Property{p1}, Remove: true}); err != nil { + t.Fatalf("patch /dst -p1: %v", err) + } + if err := patch("/dst", Proppatch{Props: []Property{p3}}); err != nil { + t.Fatalf("patch /dst +p3: %v", err) + } + + gotSrc, err := props("/src") + if err != nil { + t.Fatalf("props /src: %v", err) + } + wantSrc := map[xml.Name]Property{ + p1.XMLName: p1, + p2.XMLName: p2, + } + if !reflect.DeepEqual(gotSrc, wantSrc) { + t.Fatalf("props /src:\ngot %v\nwant %v", gotSrc, wantSrc) + } + + gotDst, err := props("/dst") + if err != nil { + t.Fatalf("props /dst: %v", err) + } + wantDst := map[xml.Name]Property{ + p0.XMLName: p0, + p3.XMLName: p3, + } + if !reflect.DeepEqual(gotDst, wantDst) { + t.Fatalf("props /dst:\ngot %v\nwant %v", gotDst, wantDst) + } +} + func TestWalkFS(t *testing.T) { testCases := []struct { desc string === added directory 'src/golang.org/x/net/webdav/internal' === added directory 'src/golang.org/x/net/webdav/internal/xml' === added file 'src/golang.org/x/net/webdav/internal/xml/README' --- src/golang.org/x/net/webdav/internal/xml/README 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/webdav/internal/xml/README 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +This is a fork of the encoding/xml package at ca1d6c4, the last commit before +https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name +space behavior" made late in the lead-up to the Go 1.5 release. + +The list of encoding/xml changes is at +https://go.googlesource.com/go/+log/master/src/encoding/xml + +This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is +released. + +See http://golang.org/issue/11841 === added file 'src/golang.org/x/net/webdav/internal/xml/atom_test.go' --- src/golang.org/x/net/webdav/internal/xml/atom_test.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/webdav/internal/xml/atom_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,56 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import "time" + +var atomValue = &Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Example Feed", + Link: []Link{{Href: "http://example.org/"}}, + Updated: ParseTime("2003-12-13T18:30:02Z"), + Author: Person{Name: "John Doe"}, + Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6", + + Entry: []Entry{ + { + Title: "Atom-Powered Robots Run Amok", + Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}}, + Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a", + Updated: ParseTime("2003-12-13T18:30:02Z"), + Summary: NewText("Some text."), + }, + }, +} + +var atomXml = `` + + `` + + `Example Feed` + + `urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6` + + `` + + `John Doe` + + `` + + `Atom-Powered Robots Run Amok` + + `urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a` + + `` + + `2003-12-13T18:30:02Z` + + `` + + `Some text.` + + `` + + `` + +func ParseTime(str string) time.Time { + t, err := time.Parse(time.RFC3339, str) + if err != nil { + panic(err) + } + return t +} + +func NewText(text string) Text { + return Text{ + Body: text, + } +} === added file 'src/golang.org/x/net/webdav/internal/xml/example_test.go' --- src/golang.org/x/net/webdav/internal/xml/example_test.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/webdav/internal/xml/example_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,151 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml_test + +import ( + "encoding/xml" + "fmt" + "os" +) + +func ExampleMarshalIndent() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + output, err := xml.MarshalIndent(v, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + os.Stdout.Write(output) + // Output: + // + // + // John + // Doe + // + // 42 + // false + // Hanga Roa + // Easter Island + // + // +} + +func ExampleEncoder() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + enc := xml.NewEncoder(os.Stdout) + enc.Indent(" ", " ") + if err := enc.Encode(v); err != nil { + fmt.Printf("error: %v\n", err) + } + + // Output: + // + // + // John + // Doe + // + // 42 + // false + // Hanga Roa + // Easter Island + // + // +} + +// This example demonstrates unmarshaling an XML excerpt into a value with +// some preset fields. Note that the Phone field isn't modified and that +// the XML element is ignored. Also, the Groups field is assigned +// considering the element path provided in its tag. +func ExampleUnmarshal() { + type Email struct { + Where string `xml:"where,attr"` + Addr string + } + type Address struct { + City, State string + } + type Result struct { + XMLName xml.Name `xml:"Person"` + Name string `xml:"FullName"` + Phone string + Email []Email + Groups []string `xml:"Group>Value"` + Address + } + v := Result{Name: "none", Phone: "none"} + + data := ` + + Grace R. Emlin + Example Inc. + + gre@example.com + + + gre@work.com + + + Friends + Squash + + Hanga Roa + Easter Island + + ` + err := xml.Unmarshal([]byte(data), &v) + if err != nil { + fmt.Printf("error: %v", err) + return + } + fmt.Printf("XMLName: %#v\n", v.XMLName) + fmt.Printf("Name: %q\n", v.Name) + fmt.Printf("Phone: %q\n", v.Phone) + fmt.Printf("Email: %v\n", v.Email) + fmt.Printf("Groups: %v\n", v.Groups) + fmt.Printf("Address: %v\n", v.Address) + // Output: + // XMLName: xml.Name{Space:"", Local:"Person"} + // Name: "Grace R. Emlin" + // Phone: "none" + // Email: [{home gre@example.com} {work gre@work.com}] + // Groups: [Friends Squash] + // Address: {Hanga Roa Easter Island} +} === added file 'src/golang.org/x/net/webdav/internal/xml/marshal.go' --- src/golang.org/x/net/webdav/internal/xml/marshal.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/webdav/internal/xml/marshal.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1223 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bufio" + "bytes" + "encoding" + "fmt" + "io" + "reflect" + "strconv" + "strings" +) + +const ( + // A generic XML header suitable for use with the output of Marshal. + // This is not automatically added to any output of this package, + // it is provided as a convenience. + Header = `` + "\n" +) + +// Marshal returns the XML encoding of v. +// +// Marshal handles an array or slice by marshalling each of the elements. +// Marshal handles a pointer by marshalling the value it points at or, if the +// pointer is nil, by writing nothing. Marshal handles an interface value by +// marshalling the value it contains or, if the interface value is nil, by +// writing nothing. Marshal handles all other data by writing one or more XML +// elements containing the data. +// +// The name for the XML elements is taken from, in order of preference: +// - the tag on the XMLName field, if the data is a struct +// - the value of the XMLName field of type xml.Name +// - the tag of the struct field used to obtain the data +// - the name of the struct field used to obtain the data +// - the name of the marshalled type +// +// The XML element for a struct contains marshalled elements for each of the +// exported fields of the struct, with these exceptions: +// - the XMLName field, described above, is omitted. +// - a field with tag "-" is omitted. +// - a field with tag "name,attr" becomes an attribute with +// the given name in the XML element. +// - a field with tag ",attr" becomes an attribute with the +// field name in the XML element. +// - a field with tag ",chardata" is written as character data, +// not as an XML element. +// - a field with tag ",innerxml" is written verbatim, not subject +// to the usual marshalling procedure. +// - a field with tag ",comment" is written as an XML comment, not +// subject to the usual marshalling procedure. It must not contain +// the "--" string within it. +// - a field with a tag including the "omitempty" option is omitted +// if the field value is empty. The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or +// string of length zero. +// - an anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// If a field uses a tag "a>b>c", then the element c will be nested inside +// parent elements a and b. Fields that appear next to each other that name +// the same parent will be enclosed in one XML element. +// +// See MarshalIndent for an example. +// +// Marshal will return an error if asked to marshal a channel, function, or map. +func Marshal(v interface{}) ([]byte, error) { + var b bytes.Buffer + if err := NewEncoder(&b).Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Marshaler is the interface implemented by objects that can marshal +// themselves into valid XML elements. +// +// MarshalXML encodes the receiver as zero or more XML elements. +// By convention, arrays or slices are typically encoded as a sequence +// of elements, one per entry. +// Using start as the element tag is not required, but doing so +// will enable Unmarshal to match the XML elements to the correct +// struct field. +// One common implementation strategy is to construct a separate +// value with a layout corresponding to the desired XML and then +// to encode it using e.EncodeElement. +// Another common strategy is to use repeated calls to e.EncodeToken +// to generate the XML output one token at a time. +// The sequence of encoded tokens must make up zero or more valid +// XML elements. +type Marshaler interface { + MarshalXML(e *Encoder, start StartElement) error +} + +// MarshalerAttr is the interface implemented by objects that can marshal +// themselves into valid XML attributes. +// +// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver. +// Using name as the attribute name is not required, but doing so +// will enable Unmarshal to match the attribute to the correct +// struct field. +// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute +// will be generated in the output. +// MarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type MarshalerAttr interface { + MarshalXMLAttr(name Name) (Attr, error) +} + +// MarshalIndent works like Marshal, but each XML element begins on a new +// indented line that starts with prefix and is followed by one or more +// copies of indent according to the nesting depth. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + var b bytes.Buffer + enc := NewEncoder(&b) + enc.Indent(prefix, indent) + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// An Encoder writes XML data to an output stream. +type Encoder struct { + p printer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{printer{Writer: bufio.NewWriter(w)}} + e.p.encoder = e + return e +} + +// Indent sets the encoder to generate XML in which each element +// begins on a new indented line that starts with prefix and is followed by +// one or more copies of indent according to the nesting depth. +func (enc *Encoder) Indent(prefix, indent string) { + enc.p.prefix = prefix + enc.p.indent = indent +} + +// Encode writes the XML encoding of v to the stream. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// Encode calls Flush before returning. +func (enc *Encoder) Encode(v interface{}) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil) + if err != nil { + return err + } + return enc.p.Flush() +} + +// EncodeElement writes the XML encoding of v to the stream, +// using start as the outermost tag in the encoding. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// EncodeElement calls Flush before returning. +func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start) + if err != nil { + return err + } + return enc.p.Flush() +} + +var ( + begComment = []byte("") + endProcInst = []byte("?>") + endDirective = []byte(">") +) + +// EncodeToken writes the given XML token to the stream. +// It returns an error if StartElement and EndElement tokens are not +// properly matched. +// +// EncodeToken does not call Flush, because usually it is part of a +// larger operation such as Encode or EncodeElement (or a custom +// Marshaler's MarshalXML invoked during those), and those will call +// Flush when finished. Callers that create an Encoder and then invoke +// EncodeToken directly, without using Encode or EncodeElement, need to +// call Flush when finished to ensure that the XML is written to the +// underlying writer. +// +// EncodeToken allows writing a ProcInst with Target set to "xml" only +// as the first token in the stream. +// +// When encoding a StartElement holding an XML namespace prefix +// declaration for a prefix that is not already declared, contained +// elements (including the StartElement itself) will use the declared +// prefix when encoding names with matching namespace URIs. +func (enc *Encoder) EncodeToken(t Token) error { + + p := &enc.p + switch t := t.(type) { + case StartElement: + if err := p.writeStart(&t); err != nil { + return err + } + case EndElement: + if err := p.writeEnd(t.Name); err != nil { + return err + } + case CharData: + escapeText(p, t, false) + case Comment: + if bytes.Contains(t, endComment) { + return fmt.Errorf("xml: EncodeToken of Comment containing --> marker") + } + p.WriteString("") + return p.cachedWriteError() + case ProcInst: + // First token to be encoded which is also a ProcInst with target of xml + // is the xml declaration. The only ProcInst where target of xml is allowed. + if t.Target == "xml" && p.Buffered() != 0 { + return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") + } + if !isNameString(t.Target) { + return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target") + } + if bytes.Contains(t.Inst, endProcInst) { + return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker") + } + p.WriteString(" 0 { + p.WriteByte(' ') + p.Write(t.Inst) + } + p.WriteString("?>") + case Directive: + if !isValidDirective(t) { + return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers") + } + p.WriteString("") + default: + return fmt.Errorf("xml: EncodeToken of invalid token type") + + } + return p.cachedWriteError() +} + +// isValidDirective reports whether dir is a valid directive text, +// meaning angle brackets are matched, ignoring comments and strings. +func isValidDirective(dir Directive) bool { + var ( + depth int + inquote uint8 + incomment bool + ) + for i, c := range dir { + switch { + case incomment: + if c == '>' { + if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) { + incomment = false + } + } + // Just ignore anything in comment + case inquote != 0: + if c == inquote { + inquote = 0 + } + // Just ignore anything within quotes + case c == '\'' || c == '"': + inquote = c + case c == '<': + if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) { + incomment = true + } else { + depth++ + } + case c == '>': + if depth == 0 { + return false + } + depth-- + } + } + return depth == 0 && inquote == 0 && !incomment +} + +// Flush flushes any buffered XML to the underlying writer. +// See the EncodeToken documentation for details about when it is necessary. +func (enc *Encoder) Flush() error { + return enc.p.Flush() +} + +type printer struct { + *bufio.Writer + encoder *Encoder + seq int + indent string + prefix string + depth int + indentedIn bool + putNewline bool + defaultNS string + attrNS map[string]string // map prefix -> name space + attrPrefix map[string]string // map name space -> prefix + prefixes []printerPrefix + tags []Name +} + +// printerPrefix holds a namespace undo record. +// When an element is popped, the prefix record +// is set back to the recorded URL. The empty +// prefix records the URL for the default name space. +// +// The start of an element is recorded with an element +// that has mark=true. +type printerPrefix struct { + prefix string + url string + mark bool +} + +func (p *printer) prefixForNS(url string, isAttr bool) string { + // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml" + // and must be referred to that way. + // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns", + // but users should not be trying to use that one directly - that's our job.) + if url == xmlURL { + return "xml" + } + if !isAttr && url == p.defaultNS { + // We can use the default name space. + return "" + } + return p.attrPrefix[url] +} + +// defineNS pushes any namespace definition found in the given attribute. +// If ignoreNonEmptyDefault is true, an xmlns="nonempty" +// attribute will be ignored. +func (p *printer) defineNS(attr Attr, ignoreNonEmptyDefault bool) error { + var prefix string + if attr.Name.Local == "xmlns" { + if attr.Name.Space != "" && attr.Name.Space != "xml" && attr.Name.Space != xmlURL { + return fmt.Errorf("xml: cannot redefine xmlns attribute prefix") + } + } else if attr.Name.Space == "xmlns" && attr.Name.Local != "" { + prefix = attr.Name.Local + if attr.Value == "" { + // Technically, an empty XML namespace is allowed for an attribute. + // From http://www.w3.org/TR/xml-names11/#scoping-defaulting: + // + // The attribute value in a namespace declaration for a prefix may be + // empty. This has the effect, within the scope of the declaration, of removing + // any association of the prefix with a namespace name. + // + // However our namespace prefixes here are used only as hints. There's + // no need to respect the removal of a namespace prefix, so we ignore it. + return nil + } + } else { + // Ignore: it's not a namespace definition + return nil + } + if prefix == "" { + if attr.Value == p.defaultNS { + // No need for redefinition. + return nil + } + if attr.Value != "" && ignoreNonEmptyDefault { + // We have an xmlns="..." value but + // it can't define a name space in this context, + // probably because the element has an empty + // name space. In this case, we just ignore + // the name space declaration. + return nil + } + } else if _, ok := p.attrPrefix[attr.Value]; ok { + // There's already a prefix for the given name space, + // so use that. This prevents us from + // having two prefixes for the same name space + // so attrNS and attrPrefix can remain bijective. + return nil + } + p.pushPrefix(prefix, attr.Value) + return nil +} + +// createNSPrefix creates a name space prefix attribute +// to use for the given name space, defining a new prefix +// if necessary. +// If isAttr is true, the prefix is to be created for an attribute +// prefix, which means that the default name space cannot +// be used. +func (p *printer) createNSPrefix(url string, isAttr bool) { + if _, ok := p.attrPrefix[url]; ok { + // We already have a prefix for the given URL. + return + } + switch { + case !isAttr && url == p.defaultNS: + // We can use the default name space. + return + case url == "": + // The only way we can encode names in the empty + // name space is by using the default name space, + // so we must use that. + if p.defaultNS != "" { + // The default namespace is non-empty, so we + // need to set it to empty. + p.pushPrefix("", "") + } + return + case url == xmlURL: + return + } + // TODO If the URL is an existing prefix, we could + // use it as is. That would enable the + // marshaling of elements that had been unmarshaled + // and with a name space prefix that was not found. + // although technically it would be incorrect. + + // Pick a name. We try to use the final element of the path + // but fall back to _. + prefix := strings.TrimRight(url, "/") + if i := strings.LastIndex(prefix, "/"); i >= 0 { + prefix = prefix[i+1:] + } + if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") { + prefix = "_" + } + if strings.HasPrefix(prefix, "xml") { + // xmlanything is reserved. + prefix = "_" + prefix + } + if p.attrNS[prefix] != "" { + // Name is taken. Find a better one. + for p.seq++; ; p.seq++ { + if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" { + prefix = id + break + } + } + } + + p.pushPrefix(prefix, url) +} + +// writeNamespaces writes xmlns attributes for all the +// namespace prefixes that have been defined in +// the current element. +func (p *printer) writeNamespaces() { + for i := len(p.prefixes) - 1; i >= 0; i-- { + prefix := p.prefixes[i] + if prefix.mark { + return + } + p.WriteString(" ") + if prefix.prefix == "" { + // Default name space. + p.WriteString(`xmlns="`) + } else { + p.WriteString("xmlns:") + p.WriteString(prefix.prefix) + p.WriteString(`="`) + } + EscapeText(p, []byte(p.nsForPrefix(prefix.prefix))) + p.WriteString(`"`) + } +} + +// pushPrefix pushes a new prefix on the prefix stack +// without checking to see if it is already defined. +func (p *printer) pushPrefix(prefix, url string) { + p.prefixes = append(p.prefixes, printerPrefix{ + prefix: prefix, + url: p.nsForPrefix(prefix), + }) + p.setAttrPrefix(prefix, url) +} + +// nsForPrefix returns the name space for the given +// prefix. Note that this is not valid for the +// empty attribute prefix, which always has an empty +// name space. +func (p *printer) nsForPrefix(prefix string) string { + if prefix == "" { + return p.defaultNS + } + return p.attrNS[prefix] +} + +// markPrefix marks the start of an element on the prefix +// stack. +func (p *printer) markPrefix() { + p.prefixes = append(p.prefixes, printerPrefix{ + mark: true, + }) +} + +// popPrefix pops all defined prefixes for the current +// element. +func (p *printer) popPrefix() { + for len(p.prefixes) > 0 { + prefix := p.prefixes[len(p.prefixes)-1] + p.prefixes = p.prefixes[:len(p.prefixes)-1] + if prefix.mark { + break + } + p.setAttrPrefix(prefix.prefix, prefix.url) + } +} + +// setAttrPrefix sets an attribute name space prefix. +// If url is empty, the attribute is removed. +// If prefix is empty, the default name space is set. +func (p *printer) setAttrPrefix(prefix, url string) { + if prefix == "" { + p.defaultNS = url + return + } + if url == "" { + delete(p.attrPrefix, p.attrNS[prefix]) + delete(p.attrNS, prefix) + return + } + if p.attrPrefix == nil { + // Need to define a new name space. + p.attrPrefix = make(map[string]string) + p.attrNS = make(map[string]string) + } + // Remove any old prefix value. This is OK because we maintain a + // strict one-to-one mapping between prefix and URL (see + // defineNS) + delete(p.attrPrefix, p.attrNS[prefix]) + p.attrPrefix[url] = prefix + p.attrNS[prefix] = url +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +) + +// marshalValue writes one or more XML elements representing val. +// If val was obtained from a struct field, finfo must have its details. +func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error { + if startTemplate != nil && startTemplate.Name.Local == "" { + return fmt.Errorf("xml: EncodeElement of StartElement with missing name") + } + + if !val.IsValid() { + return nil + } + if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) { + return nil + } + + // Drill into interfaces and pointers. + // This can turn into an infinite loop given a cyclic chain, + // but it matches the Go 1 behavior. + for val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + if val.IsNil() { + return nil + } + val = val.Elem() + } + + kind := val.Kind() + typ := val.Type() + + // Check for marshaler. + if val.CanInterface() && typ.Implements(marshalerType) { + return p.marshalInterface(val.Interface().(Marshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerType) { + return p.marshalInterface(pv.Interface().(Marshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Check for text marshaler. + if val.CanInterface() && typ.Implements(textMarshalerType) { + return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Slices and arrays iterate over the elements. They do not have an enclosing tag. + if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 { + for i, n := 0, val.Len(); i < n; i++ { + if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil { + return err + } + } + return nil + } + + tinfo, err := getTypeInfo(typ) + if err != nil { + return err + } + + // Create start element. + // Precedence for the XML element name is: + // 0. startTemplate + // 1. XMLName field in underlying struct; + // 2. field name/tag in the struct field; and + // 3. type name + var start StartElement + + // explicitNS records whether the element's name space has been + // explicitly set (for example an XMLName field). + explicitNS := false + + if startTemplate != nil { + start.Name = startTemplate.Name + explicitNS = true + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if tinfo.xmlname != nil { + xmlname := tinfo.xmlname + if xmlname.name != "" { + start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name + } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" { + start.Name = v + } + explicitNS = true + } + if start.Name.Local == "" && finfo != nil { + start.Name.Local = finfo.name + if finfo.xmlns != "" { + start.Name.Space = finfo.xmlns + explicitNS = true + } + } + if start.Name.Local == "" { + name := typ.Name() + if name == "" { + return &UnsupportedTypeError{typ} + } + start.Name.Local = name + } + + // defaultNS records the default name space as set by a xmlns="..." + // attribute. We don't set p.defaultNS because we want to let + // the attribute writing code (in p.defineNS) be solely responsible + // for maintaining that. + defaultNS := p.defaultNS + + // Attributes + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr == 0 { + continue + } + attr, err := p.fieldAttr(finfo, val) + if err != nil { + return err + } + if attr.Name.Local == "" { + continue + } + start.Attr = append(start.Attr, attr) + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + defaultNS = attr.Value + } + } + if !explicitNS { + // Historic behavior: elements use the default name space + // they are contained in by default. + start.Name.Space = defaultNS + } + // Historic behaviour: an element that's in a namespace sets + // the default namespace for all elements contained within it. + start.setDefaultNamespace() + + if err := p.writeStart(&start); err != nil { + return err + } + + if val.Kind() == reflect.Struct { + err = p.marshalStruct(tinfo, val) + } else { + s, b, err1 := p.marshalSimple(typ, val) + if err1 != nil { + err = err1 + } else if b != nil { + EscapeText(p, b) + } else { + p.EscapeString(s) + } + } + if err != nil { + return err + } + + if err := p.writeEnd(start.Name); err != nil { + return err + } + + return p.cachedWriteError() +} + +// fieldAttr returns the attribute of the given field. +// If the returned attribute has an empty Name.Local, +// it should not be used. +// The given value holds the value containing the field. +func (p *printer) fieldAttr(finfo *fieldInfo, val reflect.Value) (Attr, error) { + fv := finfo.value(val) + name := Name{Space: finfo.xmlns, Local: finfo.name} + if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) { + return Attr{}, nil + } + if fv.Kind() == reflect.Interface && fv.IsNil() { + return Attr{}, nil + } + if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) { + attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { + attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + } + if fv.CanInterface() && fv.Type().Implements(textMarshalerType) { + text, err := fv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + } + // Dereference or skip nil pointer, interface values. + switch fv.Kind() { + case reflect.Ptr, reflect.Interface: + if fv.IsNil() { + return Attr{}, nil + } + fv = fv.Elem() + } + s, b, err := p.marshalSimple(fv.Type(), fv) + if err != nil { + return Attr{}, err + } + if b != nil { + s = string(b) + } + return Attr{name, s}, nil +} + +// defaultStart returns the default start element to use, +// given the reflect type, field info, and start template. +func (p *printer) defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement { + var start StartElement + // Precedence for the XML element name is as above, + // except that we do not look inside structs for the first field. + if startTemplate != nil { + start.Name = startTemplate.Name + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if finfo != nil && finfo.name != "" { + start.Name.Local = finfo.name + start.Name.Space = finfo.xmlns + } else if typ.Name() != "" { + start.Name.Local = typ.Name() + } else { + // Must be a pointer to a named type, + // since it has the Marshaler methods. + start.Name.Local = typ.Elem().Name() + } + // Historic behaviour: elements use the name space of + // the element they are contained in by default. + if start.Name.Space == "" { + start.Name.Space = p.defaultNS + } + start.setDefaultNamespace() + return start +} + +// marshalInterface marshals a Marshaler interface value. +func (p *printer) marshalInterface(val Marshaler, start StartElement) error { + // Push a marker onto the tag stack so that MarshalXML + // cannot close the XML tags that it did not open. + p.tags = append(p.tags, Name{}) + n := len(p.tags) + + err := val.MarshalXML(p.encoder, start) + if err != nil { + return err + } + + // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark. + if len(p.tags) > n { + return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local) + } + p.tags = p.tags[:n-1] + return nil +} + +// marshalTextInterface marshals a TextMarshaler interface value. +func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error { + if err := p.writeStart(&start); err != nil { + return err + } + text, err := val.MarshalText() + if err != nil { + return err + } + EscapeText(p, text) + return p.writeEnd(start.Name) +} + +// writeStart writes the given start element. +func (p *printer) writeStart(start *StartElement) error { + if start.Name.Local == "" { + return fmt.Errorf("xml: start tag with no name") + } + + p.tags = append(p.tags, start.Name) + p.markPrefix() + // Define any name spaces explicitly declared in the attributes. + // We do this as a separate pass so that explicitly declared prefixes + // will take precedence over implicitly declared prefixes + // regardless of the order of the attributes. + ignoreNonEmptyDefault := start.Name.Space == "" + for _, attr := range start.Attr { + if err := p.defineNS(attr, ignoreNonEmptyDefault); err != nil { + return err + } + } + // Define any new name spaces implied by the attributes. + for _, attr := range start.Attr { + name := attr.Name + // From http://www.w3.org/TR/xml-names11/#defaulting + // "Default namespace declarations do not apply directly + // to attribute names; the interpretation of unprefixed + // attributes is determined by the element on which they + // appear." + // This means we don't need to create a new namespace + // when an attribute name space is empty. + if name.Space != "" && !name.isNamespace() { + p.createNSPrefix(name.Space, true) + } + } + p.createNSPrefix(start.Name.Space, false) + + p.writeIndent(1) + p.WriteByte('<') + p.writeName(start.Name, false) + p.writeNamespaces() + for _, attr := range start.Attr { + name := attr.Name + if name.Local == "" || name.isNamespace() { + // Namespaces have already been written by writeNamespaces above. + continue + } + p.WriteByte(' ') + p.writeName(name, true) + p.WriteString(`="`) + p.EscapeString(attr.Value) + p.WriteByte('"') + } + p.WriteByte('>') + return nil +} + +// writeName writes the given name. It assumes +// that p.createNSPrefix(name) has already been called. +func (p *printer) writeName(name Name, isAttr bool) { + if prefix := p.prefixForNS(name.Space, isAttr); prefix != "" { + p.WriteString(prefix) + p.WriteByte(':') + } + p.WriteString(name.Local) +} + +func (p *printer) writeEnd(name Name) error { + if name.Local == "" { + return fmt.Errorf("xml: end tag with no name") + } + if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" { + return fmt.Errorf("xml: end tag without start tag", name.Local) + } + if top := p.tags[len(p.tags)-1]; top != name { + if top.Local != name.Local { + return fmt.Errorf("xml: end tag does not match start tag <%s>", name.Local, top.Local) + } + return fmt.Errorf("xml: end tag in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space) + } + p.tags = p.tags[:len(p.tags)-1] + + p.writeIndent(-1) + p.WriteByte('<') + p.WriteByte('/') + p.writeName(name, false) + p.WriteByte('>') + p.popPrefix() + return nil +} + +func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) { + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil, nil + case reflect.Float32, reflect.Float64: + return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil + case reflect.String: + return val.String(), nil, nil + case reflect.Bool: + return strconv.FormatBool(val.Bool()), nil, nil + case reflect.Array: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // [...]byte + var bytes []byte + if val.CanAddr() { + bytes = val.Slice(0, val.Len()).Bytes() + } else { + bytes = make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(bytes), val) + } + return "", bytes, nil + case reflect.Slice: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // []byte + return "", val.Bytes(), nil + } + return "", nil, &UnsupportedTypeError{typ} +} + +var ddBytes = []byte("--") + +func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { + s := parentStack{p: p} + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr != 0 { + continue + } + vf := finfo.value(val) + + // Dereference or skip nil pointer, interface values. + switch vf.Kind() { + case reflect.Ptr, reflect.Interface: + if !vf.IsNil() { + vf = vf.Elem() + } + } + + switch finfo.flags & fMode { + case fCharData: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { + data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + if vf.CanAddr() { + pv := vf.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + data, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + } + var scratch [64]byte + switch vf.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)) + case reflect.Float32, reflect.Float64: + Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())) + case reflect.Bool: + Escape(p, strconv.AppendBool(scratch[:0], vf.Bool())) + case reflect.String: + if err := EscapeText(p, []byte(vf.String())); err != nil { + return err + } + case reflect.Slice: + if elem, ok := vf.Interface().([]byte); ok { + if err := EscapeText(p, elem); err != nil { + return err + } + } + } + continue + + case fComment: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + k := vf.Kind() + if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) { + return fmt.Errorf("xml: bad type for comment field of %s", val.Type()) + } + if vf.Len() == 0 { + continue + } + p.writeIndent(0) + p.WriteString("" is invalid grammar. Make it "- -->" + p.WriteByte(' ') + } + p.WriteString("-->") + continue + + case fInnerXml: + iface := vf.Interface() + switch raw := iface.(type) { + case []byte: + p.Write(raw) + continue + case string: + p.WriteString(raw) + continue + } + + case fElement, fElement | fAny: + if err := s.setParents(finfo, vf); err != nil { + return err + } + } + if err := p.marshalValue(vf, finfo, nil); err != nil { + return err + } + } + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + return p.cachedWriteError() +} + +var noField fieldInfo + +// return the bufio Writer's cached write error +func (p *printer) cachedWriteError() error { + _, err := p.Write(nil) + return err +} + +func (p *printer) writeIndent(depthDelta int) { + if len(p.prefix) == 0 && len(p.indent) == 0 { + return + } + if depthDelta < 0 { + p.depth-- + if p.indentedIn { + p.indentedIn = false + return + } + p.indentedIn = false + } + if p.putNewline { + p.WriteByte('\n') + } else { + p.putNewline = true + } + if len(p.prefix) > 0 { + p.WriteString(p.prefix) + } + if len(p.indent) > 0 { + for i := 0; i < p.depth; i++ { + p.WriteString(p.indent) + } + } + if depthDelta > 0 { + p.depth++ + p.indentedIn = true + } +} + +type parentStack struct { + p *printer + xmlns string + parents []string +} + +// setParents sets the stack of current parents to those found in finfo. +// It only writes the start elements if vf holds a non-nil value. +// If finfo is &noField, it pops all elements. +func (s *parentStack) setParents(finfo *fieldInfo, vf reflect.Value) error { + xmlns := s.p.defaultNS + if finfo.xmlns != "" { + xmlns = finfo.xmlns + } + commonParents := 0 + if xmlns == s.xmlns { + for ; commonParents < len(finfo.parents) && commonParents < len(s.parents); commonParents++ { + if finfo.parents[commonParents] != s.parents[commonParents] { + break + } + } + } + // Pop off any parents that aren't in common with the previous field. + for i := len(s.parents) - 1; i >= commonParents; i-- { + if err := s.p.writeEnd(Name{ + Space: s.xmlns, + Local: s.parents[i], + }); err != nil { + return err + } + } + s.parents = finfo.parents + s.xmlns = xmlns + if commonParents >= len(s.parents) { + // No new elements to push. + return nil + } + if (vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) && vf.IsNil() { + // The element is nil, so no need for the start elements. + s.parents = s.parents[:commonParents] + return nil + } + // Push any new parents required. + for _, name := range s.parents[commonParents:] { + start := &StartElement{ + Name: Name{ + Space: s.xmlns, + Local: name, + }, + } + // Set the default name space for parent elements + // to match what we do with other elements. + if s.xmlns != s.p.defaultNS { + start.setDefaultNamespace() + } + if err := s.p.writeStart(start); err != nil { + return err + } + } + return nil +} + +// A MarshalXMLError is returned when Marshal encounters a type +// that cannot be converted into XML. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "xml: unsupported type: " + e.Type.String() +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} === added file 'src/golang.org/x/net/webdav/internal/xml/marshal_test.go' --- src/golang.org/x/net/webdav/internal/xml/marshal_test.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/webdav/internal/xml/marshal_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1939 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +type DriveType int + +const ( + HyperDrive DriveType = iota + ImprobabilityDrive +) + +type Passenger struct { + Name []string `xml:"name"` + Weight float32 `xml:"weight"` +} + +type Ship struct { + XMLName struct{} `xml:"spaceship"` + + Name string `xml:"name,attr"` + Pilot string `xml:"pilot,attr"` + Drive DriveType `xml:"drive"` + Age uint `xml:"age"` + Passenger []*Passenger `xml:"passenger"` + secret string +} + +type NamedType string + +type Port struct { + XMLName struct{} `xml:"port"` + Type string `xml:"type,attr,omitempty"` + Comment string `xml:",comment"` + Number string `xml:",chardata"` +} + +type Domain struct { + XMLName struct{} `xml:"domain"` + Country string `xml:",attr,omitempty"` + Name []byte `xml:",chardata"` + Comment []byte `xml:",comment"` +} + +type Book struct { + XMLName struct{} `xml:"book"` + Title string `xml:",chardata"` +} + +type Event struct { + XMLName struct{} `xml:"event"` + Year int `xml:",chardata"` +} + +type Movie struct { + XMLName struct{} `xml:"movie"` + Length uint `xml:",chardata"` +} + +type Pi struct { + XMLName struct{} `xml:"pi"` + Approximation float32 `xml:",chardata"` +} + +type Universe struct { + XMLName struct{} `xml:"universe"` + Visible float64 `xml:",chardata"` +} + +type Particle struct { + XMLName struct{} `xml:"particle"` + HasMass bool `xml:",chardata"` +} + +type Departure struct { + XMLName struct{} `xml:"departure"` + When time.Time `xml:",chardata"` +} + +type SecretAgent struct { + XMLName struct{} `xml:"agent"` + Handle string `xml:"handle,attr"` + Identity string + Obfuscate string `xml:",innerxml"` +} + +type NestedItems struct { + XMLName struct{} `xml:"result"` + Items []string `xml:">item"` + Item1 []string `xml:"Items>item1"` +} + +type NestedOrder struct { + XMLName struct{} `xml:"result"` + Field1 string `xml:"parent>c"` + Field2 string `xml:"parent>b"` + Field3 string `xml:"parent>a"` +} + +type MixedNested struct { + XMLName struct{} `xml:"result"` + A string `xml:"parent1>a"` + B string `xml:"b"` + C string `xml:"parent1>parent2>c"` + D string `xml:"parent1>d"` +} + +type NilTest struct { + A interface{} `xml:"parent1>parent2>a"` + B interface{} `xml:"parent1>b"` + C interface{} `xml:"parent1>parent2>c"` +} + +type Service struct { + XMLName struct{} `xml:"service"` + Domain *Domain `xml:"host>domain"` + Port *Port `xml:"host>port"` + Extra1 interface{} + Extra2 interface{} `xml:"host>extra2"` +} + +var nilStruct *Ship + +type EmbedA struct { + EmbedC + EmbedB EmbedB + FieldA string +} + +type EmbedB struct { + FieldB string + *EmbedC +} + +type EmbedC struct { + FieldA1 string `xml:"FieldA>A1"` + FieldA2 string `xml:"FieldA>A2"` + FieldB string + FieldC string +} + +type NameCasing struct { + XMLName struct{} `xml:"casing"` + Xy string + XY string + XyA string `xml:"Xy,attr"` + XYA string `xml:"XY,attr"` +} + +type NamePrecedence struct { + XMLName Name `xml:"Parent"` + FromTag XMLNameWithoutTag `xml:"InTag"` + FromNameVal XMLNameWithoutTag + FromNameTag XMLNameWithTag + InFieldName string +} + +type XMLNameWithTag struct { + XMLName Name `xml:"InXMLNameTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithNSTag struct { + XMLName Name `xml:"ns InXMLNameWithNSTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithoutTag struct { + XMLName Name + Value string `xml:",chardata"` +} + +type NameInField struct { + Foo Name `xml:"ns foo"` +} + +type AttrTest struct { + Int int `xml:",attr"` + Named int `xml:"int,attr"` + Float float64 `xml:",attr"` + Uint8 uint8 `xml:",attr"` + Bool bool `xml:",attr"` + Str string `xml:",attr"` + Bytes []byte `xml:",attr"` +} + +type OmitAttrTest struct { + Int int `xml:",attr,omitempty"` + Named int `xml:"int,attr,omitempty"` + Float float64 `xml:",attr,omitempty"` + Uint8 uint8 `xml:",attr,omitempty"` + Bool bool `xml:",attr,omitempty"` + Str string `xml:",attr,omitempty"` + Bytes []byte `xml:",attr,omitempty"` +} + +type OmitFieldTest struct { + Int int `xml:",omitempty"` + Named int `xml:"int,omitempty"` + Float float64 `xml:",omitempty"` + Uint8 uint8 `xml:",omitempty"` + Bool bool `xml:",omitempty"` + Str string `xml:",omitempty"` + Bytes []byte `xml:",omitempty"` + Ptr *PresenceTest `xml:",omitempty"` +} + +type AnyTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField AnyHolder `xml:",any"` +} + +type AnyOmitTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField *AnyHolder `xml:",any,omitempty"` +} + +type AnySliceTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField []AnyHolder `xml:",any"` +} + +type AnyHolder struct { + XMLName Name + XML string `xml:",innerxml"` +} + +type RecurseA struct { + A string + B *RecurseB +} + +type RecurseB struct { + A *RecurseA + B string +} + +type PresenceTest struct { + Exists *struct{} +} + +type IgnoreTest struct { + PublicSecret string `xml:"-"` +} + +type MyBytes []byte + +type Data struct { + Bytes []byte + Attr []byte `xml:",attr"` + Custom MyBytes +} + +type Plain struct { + V interface{} +} + +type MyInt int + +type EmbedInt struct { + MyInt +} + +type Strings struct { + X []string `xml:"A>B,omitempty"` +} + +type PointerFieldsTest struct { + XMLName Name `xml:"dummy"` + Name *string `xml:"name,attr"` + Age *uint `xml:"age,attr"` + Empty *string `xml:"empty,attr"` + Contents *string `xml:",chardata"` +} + +type ChardataEmptyTest struct { + XMLName Name `xml:"test"` + Contents *string `xml:",chardata"` +} + +type MyMarshalerTest struct { +} + +var _ Marshaler = (*MyMarshalerTest)(nil) + +func (m *MyMarshalerTest) MarshalXML(e *Encoder, start StartElement) error { + e.EncodeToken(start) + e.EncodeToken(CharData([]byte("hello world"))) + e.EncodeToken(EndElement{start.Name}) + return nil +} + +type MyMarshalerAttrTest struct{} + +var _ MarshalerAttr = (*MyMarshalerAttrTest)(nil) + +func (m *MyMarshalerAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MyMarshalerValueAttrTest struct{} + +var _ MarshalerAttr = MyMarshalerValueAttrTest{} + +func (m MyMarshalerValueAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MarshalerStruct struct { + Foo MyMarshalerAttrTest `xml:",attr"` +} + +type MarshalerValueStruct struct { + Foo MyMarshalerValueAttrTest `xml:",attr"` +} + +type InnerStruct struct { + XMLName Name `xml:"testns outer"` +} + +type OuterStruct struct { + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterNamedStruct struct { + InnerStruct + XMLName Name `xml:"outerns test"` + IntAttr int `xml:"int,attr"` +} + +type OuterNamedOrderedStruct struct { + XMLName Name `xml:"outerns test"` + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterOuterStruct struct { + OuterStruct +} + +type NestedAndChardata struct { + AB []string `xml:"A>B"` + Chardata string `xml:",chardata"` +} + +type NestedAndComment struct { + AB []string `xml:"A>B"` + Comment string `xml:",comment"` +} + +type XMLNSFieldStruct struct { + Ns string `xml:"xmlns,attr"` + Body string +} + +type NamedXMLNSFieldStruct struct { + XMLName struct{} `xml:"testns test"` + Ns string `xml:"xmlns,attr"` + Body string +} + +type XMLNSFieldStructWithOmitEmpty struct { + Ns string `xml:"xmlns,attr,omitempty"` + Body string +} + +type NamedXMLNSFieldStructWithEmptyNamespace struct { + XMLName struct{} `xml:"test"` + Ns string `xml:"xmlns,attr"` + Body string +} + +type RecursiveXMLNSFieldStruct struct { + Ns string `xml:"xmlns,attr"` + Body *RecursiveXMLNSFieldStruct `xml:",omitempty"` + Text string `xml:",omitempty"` +} + +func ifaceptr(x interface{}) interface{} { + return &x +} + +var ( + nameAttr = "Sarah" + ageAttr = uint(12) + contentsAttr = "lorem ipsum" +) + +// Unless explicitly stated as such (or *Plain), all of the +// tests below are two-way tests. When introducing new tests, +// please try to make them two-way as well to ensure that +// marshalling and unmarshalling are as symmetrical as feasible. +var marshalTests = []struct { + Value interface{} + ExpectXML string + MarshalOnly bool + UnmarshalOnly bool +}{ + // Test nil marshals to nothing + {Value: nil, ExpectXML: ``, MarshalOnly: true}, + {Value: nilStruct, ExpectXML: ``, MarshalOnly: true}, + + // Test value types + {Value: &Plain{true}, ExpectXML: `true`}, + {Value: &Plain{false}, ExpectXML: `false`}, + {Value: &Plain{int(42)}, ExpectXML: `42`}, + {Value: &Plain{int8(42)}, ExpectXML: `42`}, + {Value: &Plain{int16(42)}, ExpectXML: `42`}, + {Value: &Plain{int32(42)}, ExpectXML: `42`}, + {Value: &Plain{uint(42)}, ExpectXML: `42`}, + {Value: &Plain{uint8(42)}, ExpectXML: `42`}, + {Value: &Plain{uint16(42)}, ExpectXML: `42`}, + {Value: &Plain{uint32(42)}, ExpectXML: `42`}, + {Value: &Plain{float32(1.25)}, ExpectXML: `1.25`}, + {Value: &Plain{float64(1.25)}, ExpectXML: `1.25`}, + {Value: &Plain{uintptr(0xFFDD)}, ExpectXML: `65501`}, + {Value: &Plain{"gopher"}, ExpectXML: `gopher`}, + {Value: &Plain{[]byte("gopher")}, ExpectXML: `gopher`}, + {Value: &Plain{""}, ExpectXML: `</>`}, + {Value: &Plain{[]byte("")}, ExpectXML: `</>`}, + {Value: &Plain{[3]byte{'<', '/', '>'}}, ExpectXML: `</>`}, + {Value: &Plain{NamedType("potato")}, ExpectXML: `potato`}, + {Value: &Plain{[]int{1, 2, 3}}, ExpectXML: `123`}, + {Value: &Plain{[3]int{1, 2, 3}}, ExpectXML: `123`}, + {Value: ifaceptr(true), MarshalOnly: true, ExpectXML: `true`}, + + // Test time. + { + Value: &Plain{time.Unix(1e9, 123456789).UTC()}, + ExpectXML: `2001-09-09T01:46:40.123456789Z`, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: ``, + }, + { + Value: &PresenceTest{}, + ExpectXML: ``, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: ``, + }, + { + Value: &PresenceTest{}, + ExpectXML: ``, + }, + + // A []byte field is only nil if the element was not found. + { + Value: &Data{}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + { + Value: &Data{Bytes: []byte{}, Custom: MyBytes{}, Attr: []byte{}}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + + // Check that []byte works, including named []byte types. + { + Value: &Data{Bytes: []byte("ab"), Custom: MyBytes("cd"), Attr: []byte{'v'}}, + ExpectXML: `abcd`, + }, + + // Test innerxml + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "", + }, + ExpectXML: `James Bond`, + MarshalOnly: true, + }, + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "James Bond", + }, + ExpectXML: `James Bond`, + UnmarshalOnly: true, + }, + + // Test structs + {Value: &Port{Type: "ssl", Number: "443"}, ExpectXML: `443`}, + {Value: &Port{Number: "443"}, ExpectXML: `443`}, + {Value: &Port{Type: ""}, ExpectXML: ``}, + {Value: &Port{Number: "443", Comment: "https"}, ExpectXML: `443`}, + {Value: &Port{Number: "443", Comment: "add space-"}, ExpectXML: `443`, MarshalOnly: true}, + {Value: &Domain{Name: []byte("google.com&friends")}, ExpectXML: `google.com&friends`}, + {Value: &Domain{Name: []byte("google.com"), Comment: []byte(" &friends ")}, ExpectXML: `google.com`}, + {Value: &Book{Title: "Pride & Prejudice"}, ExpectXML: `Pride & Prejudice`}, + {Value: &Event{Year: -3114}, ExpectXML: `-3114`}, + {Value: &Movie{Length: 13440}, ExpectXML: `13440`}, + {Value: &Pi{Approximation: 3.14159265}, ExpectXML: `3.1415927`}, + {Value: &Universe{Visible: 9.3e13}, ExpectXML: `9.3e+13`}, + {Value: &Particle{HasMass: true}, ExpectXML: `true`}, + {Value: &Departure{When: ParseTime("2013-01-09T00:15:00-09:00")}, ExpectXML: `2013-01-09T00:15:00-09:00`}, + {Value: atomValue, ExpectXML: atomXml}, + { + Value: &Ship{ + Name: "Heart of Gold", + Pilot: "Computer", + Age: 1, + Drive: ImprobabilityDrive, + Passenger: []*Passenger{ + { + Name: []string{"Zaphod", "Beeblebrox"}, + Weight: 7.25, + }, + { + Name: []string{"Trisha", "McMillen"}, + Weight: 5.5, + }, + { + Name: []string{"Ford", "Prefect"}, + Weight: 7, + }, + { + Name: []string{"Arthur", "Dent"}, + Weight: 6.75, + }, + }, + }, + ExpectXML: `` + + `` + strconv.Itoa(int(ImprobabilityDrive)) + `` + + `1` + + `` + + `Zaphod` + + `Beeblebrox` + + `7.25` + + `` + + `` + + `Trisha` + + `McMillen` + + `5.5` + + `` + + `` + + `Ford` + + `Prefect` + + `7` + + `` + + `` + + `Arthur` + + `Dent` + + `6.75` + + `` + + ``, + }, + + // Test a>b + { + Value: &NestedItems{Items: nil, Item1: nil}, + ExpectXML: `` + + `` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{}, Item1: []string{}}, + ExpectXML: `` + + `` + + `` + + ``, + MarshalOnly: true, + }, + { + Value: &NestedItems{Items: nil, Item1: []string{"A"}}, + ExpectXML: `` + + `` + + `A` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: nil}, + ExpectXML: `` + + `` + + `A` + + `B` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: []string{"C"}}, + ExpectXML: `` + + `` + + `A` + + `B` + + `C` + + `` + + ``, + }, + { + Value: &NestedOrder{Field1: "C", Field2: "B", Field3: "A"}, + ExpectXML: `` + + `` + + `C` + + `B` + + `A` + + `` + + ``, + }, + { + Value: &NilTest{A: "A", B: nil, C: "C"}, + ExpectXML: `` + + `` + + `A` + + `C` + + `` + + ``, + MarshalOnly: true, // Uses interface{} + }, + { + Value: &MixedNested{A: "A", B: "B", C: "C", D: "D"}, + ExpectXML: `` + + `A` + + `B` + + `` + + `C` + + `D` + + `` + + ``, + }, + { + Value: &Service{Port: &Port{Number: "80"}}, + ExpectXML: `80`, + }, + { + Value: &Service{}, + ExpectXML: ``, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra1: "A", Extra2: "B"}, + ExpectXML: `` + + `80` + + `A` + + `B` + + ``, + MarshalOnly: true, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra2: "example"}, + ExpectXML: `` + + `80` + + `example` + + ``, + MarshalOnly: true, + }, + { + Value: &struct { + XMLName struct{} `xml:"space top"` + A string `xml:"x>a"` + B string `xml:"x>b"` + C string `xml:"space x>c"` + C1 string `xml:"space1 x>c"` + D1 string `xml:"space1 x>d"` + E1 string `xml:"x>e"` + }{ + A: "a", + B: "b", + C: "c", + C1: "c1", + D1: "d1", + E1: "e1", + }, + ExpectXML: `` + + `abc` + + `` + + `c1` + + `d1` + + `` + + `` + + `e1` + + `` + + ``, + }, + { + Value: &struct { + XMLName Name + A string `xml:"x>a"` + B string `xml:"x>b"` + C string `xml:"space x>c"` + C1 string `xml:"space1 x>c"` + D1 string `xml:"space1 x>d"` + }{ + XMLName: Name{ + Space: "space0", + Local: "top", + }, + A: "a", + B: "b", + C: "c", + C1: "c1", + D1: "d1", + }, + ExpectXML: `` + + `ab` + + `c` + + `` + + `c1` + + `d1` + + `` + + ``, + }, + { + Value: &struct { + XMLName struct{} `xml:"top"` + B string `xml:"space x>b"` + B1 string `xml:"space1 x>b"` + }{ + B: "b", + B1: "b1", + }, + ExpectXML: `` + + `b` + + `b1` + + ``, + }, + + // Test struct embedding + { + Value: &EmbedA{ + EmbedC: EmbedC{ + FieldA1: "", // Shadowed by A.A + FieldA2: "", // Shadowed by A.A + FieldB: "A.C.B", + FieldC: "A.C.C", + }, + EmbedB: EmbedB{ + FieldB: "A.B.B", + EmbedC: &EmbedC{ + FieldA1: "A.B.C.A1", + FieldA2: "A.B.C.A2", + FieldB: "", // Shadowed by A.B.B + FieldC: "A.B.C.C", + }, + }, + FieldA: "A.A", + }, + ExpectXML: `` + + `A.C.B` + + `A.C.C` + + `` + + `A.B.B` + + `` + + `A.B.C.A1` + + `A.B.C.A2` + + `` + + `A.B.C.C` + + `` + + `A.A` + + ``, + }, + + // Test that name casing matters + { + Value: &NameCasing{Xy: "mixed", XY: "upper", XyA: "mixedA", XYA: "upperA"}, + ExpectXML: `mixedupper`, + }, + + // Test the order in which the XML element name is chosen + { + Value: &NamePrecedence{ + FromTag: XMLNameWithoutTag{Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "InXMLName"}, Value: "B"}, + FromNameTag: XMLNameWithTag{Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `` + + `A` + + `B` + + `C` + + `D` + + ``, + MarshalOnly: true, + }, + { + Value: &NamePrecedence{ + XMLName: Name{Local: "Parent"}, + FromTag: XMLNameWithoutTag{XMLName: Name{Local: "InTag"}, Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "FromNameVal"}, Value: "B"}, + FromNameTag: XMLNameWithTag{XMLName: Name{Local: "InXMLNameTag"}, Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `` + + `A` + + `B` + + `C` + + `D` + + ``, + UnmarshalOnly: true, + }, + + // xml.Name works in a plain field as well. + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: ``, + }, + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + + // Marshaling zero xml.Name uses the tag or field name. + { + Value: &NameInField{}, + ExpectXML: ``, + MarshalOnly: true, + }, + + // Test attributes + { + Value: &AttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: ``, + }, + { + Value: &AttrTest{Bytes: []byte{}}, + ExpectXML: ``, + }, + { + Value: &OmitAttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: ``, + }, + { + Value: &OmitAttrTest{}, + ExpectXML: ``, + }, + + // pointer fields + { + Value: &PointerFieldsTest{Name: &nameAttr, Age: &ageAttr, Contents: &contentsAttr}, + ExpectXML: `lorem ipsum`, + MarshalOnly: true, + }, + + // empty chardata pointer field + { + Value: &ChardataEmptyTest{}, + ExpectXML: ``, + MarshalOnly: true, + }, + + // omitempty on fields + { + Value: &OmitFieldTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + Ptr: &PresenceTest{}, + }, + ExpectXML: `` + + `8` + + `9` + + `23.5` + + `255` + + `true` + + `str` + + `byt` + + `` + + ``, + }, + { + Value: &OmitFieldTest{}, + ExpectXML: ``, + }, + + // Test ",any" + { + ExpectXML: `knownunknown`, + Value: &AnyTest{ + Nested: "known", + AnyField: AnyHolder{ + XMLName: Name{Local: "other"}, + XML: "unknown", + }, + }, + }, + { + Value: &AnyTest{Nested: "known", + AnyField: AnyHolder{ + XML: "", + XMLName: Name{Local: "AnyField"}, + }, + }, + ExpectXML: `known`, + }, + { + ExpectXML: `b`, + Value: &AnyOmitTest{ + Nested: "b", + }, + }, + { + ExpectXML: `bei`, + Value: &AnySliceTest{ + Nested: "b", + AnyField: []AnyHolder{ + { + XMLName: Name{Local: "c"}, + XML: "e", + }, + { + XMLName: Name{Space: "f", Local: "g"}, + XML: "i", + }, + }, + }, + }, + { + ExpectXML: `b`, + Value: &AnySliceTest{ + Nested: "b", + }, + }, + + // Test recursive types. + { + Value: &RecurseA{ + A: "a1", + B: &RecurseB{ + A: &RecurseA{"a2", nil}, + B: "b1", + }, + }, + ExpectXML: `a1a2b1`, + }, + + // Test ignoring fields via "-" tag + { + ExpectXML: ``, + Value: &IgnoreTest{}, + }, + { + ExpectXML: ``, + Value: &IgnoreTest{PublicSecret: "can't tell"}, + MarshalOnly: true, + }, + { + ExpectXML: `ignore me`, + Value: &IgnoreTest{}, + UnmarshalOnly: true, + }, + + // Test escaping. + { + ExpectXML: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + Value: &AnyTest{ + Nested: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + AnyField: AnyHolder{XMLName: Name{Local: "empty"}}, + }, + }, + { + ExpectXML: `newline: ; cr: ; tab: ;`, + Value: &AnyTest{ + Nested: "newline: \n; cr: \r; tab: \t;", + AnyField: AnyHolder{XMLName: Name{Local: "AnyField"}}, + }, + }, + { + ExpectXML: "1\r2\r\n3\n\r4\n5", + Value: &AnyTest{ + Nested: "1\n2\n3\n\n4\n5", + }, + UnmarshalOnly: true, + }, + { + ExpectXML: `42`, + Value: &EmbedInt{ + MyInt: 42, + }, + }, + // Test omitempty with parent chain; see golang.org/issue/4168. + { + ExpectXML: ``, + Value: &Strings{}, + }, + // Custom marshalers. + { + ExpectXML: `hello world`, + Value: &MyMarshalerTest{}, + }, + { + ExpectXML: ``, + Value: &MarshalerStruct{}, + }, + { + ExpectXML: ``, + Value: &MarshalerValueStruct{}, + }, + { + ExpectXML: ``, + Value: &OuterStruct{IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterNamedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterNamedOrderedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterOuterStruct{OuterStruct{IntAttr: 10}}, + }, + { + ExpectXML: `test`, + Value: &NestedAndChardata{AB: make([]string, 2), Chardata: "test"}, + }, + { + ExpectXML: ``, + Value: &NestedAndComment{AB: make([]string, 2), Comment: "test"}, + }, + { + ExpectXML: `hello world`, + Value: &XMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStruct{Ns: "", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &XMLNSFieldStructWithOmitEmpty{Body: "hello world"}, + }, + { + // The xmlns attribute must be ignored because the + // element is in the empty namespace, so it's not possible + // to set the default namespace to something non-empty. + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStructWithEmptyNamespace{Ns: "foo", Body: "hello world"}, + MarshalOnly: true, + }, + { + ExpectXML: `hello world`, + Value: &RecursiveXMLNSFieldStruct{ + Ns: "foo", + Body: &RecursiveXMLNSFieldStruct{ + Text: "hello world", + }, + }, + }, +} + +func TestMarshal(t *testing.T) { + for idx, test := range marshalTests { + if test.UnmarshalOnly { + continue + } + data, err := Marshal(test.Value) + if err != nil { + t.Errorf("#%d: marshal(%#v): %s", idx, test.Value, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + if strings.Contains(want, "\n") { + t.Errorf("#%d: marshal(%#v):\nHAVE:\n%s\nWANT:\n%s", idx, test.Value, got, want) + } else { + t.Errorf("#%d: marshal(%#v):\nhave %#q\nwant %#q", idx, test.Value, got, want) + } + } + } +} + +type AttrParent struct { + X string `xml:"X>Y,attr"` +} + +type BadAttr struct { + Name []string `xml:"name,attr"` +} + +var marshalErrorTests = []struct { + Value interface{} + Err string + Kind reflect.Kind +}{ + { + Value: make(chan bool), + Err: "xml: unsupported type: chan bool", + Kind: reflect.Chan, + }, + { + Value: map[string]string{ + "question": "What do you get when you multiply six by nine?", + "answer": "42", + }, + Err: "xml: unsupported type: map[string]string", + Kind: reflect.Map, + }, + { + Value: map[*Ship]bool{nil: false}, + Err: "xml: unsupported type: map[*xml.Ship]bool", + Kind: reflect.Map, + }, + { + Value: &Domain{Comment: []byte("f--bar")}, + Err: `xml: comments must not contain "--"`, + }, + // Reject parent chain with attr, never worked; see golang.org/issue/5033. + { + Value: &AttrParent{}, + Err: `xml: X>Y chain not valid with attr flag`, + }, + { + Value: BadAttr{[]string{"X", "Y"}}, + Err: `xml: unsupported type: []string`, + }, +} + +var marshalIndentTests = []struct { + Value interface{} + Prefix string + Indent string + ExpectXML string +}{ + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "", + }, + Prefix: "", + Indent: "\t", + ExpectXML: fmt.Sprintf("\n\tJames Bond\n"), + }, +} + +func TestMarshalErrors(t *testing.T) { + for idx, test := range marshalErrorTests { + data, err := Marshal(test.Value) + if err == nil { + t.Errorf("#%d: marshal(%#v) = [success] %q, want error %v", idx, test.Value, data, test.Err) + continue + } + if err.Error() != test.Err { + t.Errorf("#%d: marshal(%#v) = [error] %v, want %v", idx, test.Value, err, test.Err) + } + if test.Kind != reflect.Invalid { + if kind := err.(*UnsupportedTypeError).Type.Kind(); kind != test.Kind { + t.Errorf("#%d: marshal(%#v) = [error kind] %s, want %s", idx, test.Value, kind, test.Kind) + } + } + } +} + +// Do invertibility testing on the various structures that we test +func TestUnmarshal(t *testing.T) { + for i, test := range marshalTests { + if test.MarshalOnly { + continue + } + if _, ok := test.Value.(*Plain); ok { + continue + } + vt := reflect.TypeOf(test.Value) + dest := reflect.New(vt.Elem()).Interface() + err := Unmarshal([]byte(test.ExpectXML), dest) + + switch fix := dest.(type) { + case *Feed: + fix.Author.InnerXML = "" + for i := range fix.Entry { + fix.Entry[i].Author.InnerXML = "" + } + } + + if err != nil { + t.Errorf("#%d: unexpected error: %#v", i, err) + } else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) { + t.Errorf("#%d: unmarshal(%q):\nhave %#v\nwant %#v", i, test.ExpectXML, got, want) + } + } +} + +func TestMarshalIndent(t *testing.T) { + for i, test := range marshalIndentTests { + data, err := MarshalIndent(test.Value, test.Prefix, test.Indent) + if err != nil { + t.Errorf("#%d: Error: %s", i, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + t.Errorf("#%d: MarshalIndent:\nGot:%s\nWant:\n%s", i, got, want) + } + } +} + +type limitedBytesWriter struct { + w io.Writer + remain int // until writes fail +} + +func (lw *limitedBytesWriter) Write(p []byte) (n int, err error) { + if lw.remain <= 0 { + println("error") + return 0, errors.New("write limit hit") + } + if len(p) > lw.remain { + p = p[:lw.remain] + n, _ = lw.w.Write(p) + lw.remain = 0 + return n, errors.New("write limit hit") + } + n, err = lw.w.Write(p) + lw.remain -= n + return n, err +} + +func TestMarshalWriteErrors(t *testing.T) { + var buf bytes.Buffer + const writeCap = 1024 + w := &limitedBytesWriter{&buf, writeCap} + enc := NewEncoder(w) + var err error + var i int + const n = 4000 + for i = 1; i <= n; i++ { + err = enc.Encode(&Passenger{ + Name: []string{"Alice", "Bob"}, + Weight: 5, + }) + if err != nil { + break + } + } + if err == nil { + t.Error("expected an error") + } + if i == n { + t.Errorf("expected to fail before the end") + } + if buf.Len() != writeCap { + t.Errorf("buf.Len() = %d; want %d", buf.Len(), writeCap) + } +} + +func TestMarshalWriteIOErrors(t *testing.T) { + enc := NewEncoder(errWriter{}) + + expectErr := "unwritable" + err := enc.Encode(&Passenger{}) + if err == nil || err.Error() != expectErr { + t.Errorf("EscapeTest = [error] %v, want %v", err, expectErr) + } +} + +func TestMarshalFlush(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(CharData("hello world")); err != nil { + t.Fatalf("enc.EncodeToken: %v", err) + } + if buf.Len() > 0 { + t.Fatalf("enc.EncodeToken caused actual write: %q", buf.Bytes()) + } + if err := enc.Flush(); err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if buf.String() != "hello world" { + t.Fatalf("after enc.Flush, buf.String() = %q, want %q", buf.String(), "hello world") + } +} + +var encodeElementTests = []struct { + desc string + value interface{} + start StartElement + expectXML string +}{{ + desc: "simple string", + value: "hello", + start: StartElement{ + Name: Name{Local: "a"}, + }, + expectXML: `hello`, +}, { + desc: "string with added attributes", + value: "hello", + start: StartElement{ + Name: Name{Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "x"}, + Value: "y", + }, { + Name: Name{Local: "foo"}, + Value: "bar", + }}, + }, + expectXML: `hello`, +}, { + desc: "start element with default name space", + value: struct { + Foo XMLNameWithNSTag + }{ + Foo: XMLNameWithNSTag{ + Value: "hello", + }, + }, + start: StartElement{ + Name: Name{Space: "ns", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello`, +}, { + desc: "start element in name space with different default name space", + value: struct { + Foo XMLNameWithNSTag + }{ + Foo: XMLNameWithNSTag{ + Value: "hello", + }, + }, + start: StartElement{ + Name: Name{Space: "ns2", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello`, +}, { + desc: "XMLMarshaler with start element with default name space", + value: &MyMarshalerTest{}, + start: StartElement{ + Name: Name{Space: "ns2", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello world`, +}} + +func TestEncodeElement(t *testing.T) { + for idx, test := range encodeElementTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + err := enc.EncodeElement(test.value, test.start) + if err != nil { + t.Fatalf("enc.EncodeElement: %v", err) + } + err = enc.Flush() + if err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if got, want := buf.String(), test.expectXML; got != want { + t.Errorf("#%d(%s): EncodeElement(%#v, %#v):\nhave %#q\nwant %#q", idx, test.desc, test.value, test.start, got, want) + } + } +} + +func BenchmarkMarshal(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + Marshal(atomValue) + } +} + +func BenchmarkUnmarshal(b *testing.B) { + b.ReportAllocs() + xml := []byte(atomXml) + for i := 0; i < b.N; i++ { + Unmarshal(xml, &Feed{}) + } +} + +// golang.org/issue/6556 +func TestStructPointerMarshal(t *testing.T) { + type A struct { + XMLName string `xml:"a"` + B []interface{} + } + type C struct { + XMLName Name + Value string `xml:"value"` + } + + a := new(A) + a.B = append(a.B, &C{ + XMLName: Name{Local: "c"}, + Value: "x", + }) + + b, err := Marshal(a) + if err != nil { + t.Fatal(err) + } + if x := string(b); x != "x" { + t.Fatal(x) + } + var v A + err = Unmarshal(b, &v) + if err != nil { + t.Fatal(err) + } +} + +var encodeTokenTests = []struct { + desc string + toks []Token + want string + err string +}{{ + desc: "start element with name space", + toks: []Token{ + StartElement{Name{"space", "local"}, nil}, + }, + want: ``, +}, { + desc: "start element with no name", + toks: []Token{ + StartElement{Name{"space", ""}, nil}, + }, + err: "xml: start tag with no name", +}, { + desc: "end element with no name", + toks: []Token{ + EndElement{Name{"space", ""}}, + }, + err: "xml: end tag with no name", +}, { + desc: "char data", + toks: []Token{ + CharData("foo"), + }, + want: `foo`, +}, { + desc: "char data with escaped chars", + toks: []Token{ + CharData(" \t\n"), + }, + want: " \n", +}, { + desc: "comment", + toks: []Token{ + Comment("foo"), + }, + want: ``, +}, { + desc: "comment with invalid content", + toks: []Token{ + Comment("foo-->"), + }, + err: "xml: EncodeToken of Comment containing --> marker", +}, { + desc: "proc instruction", + toks: []Token{ + ProcInst{"Target", []byte("Instruction")}, + }, + want: ``, +}, { + desc: "proc instruction with empty target", + toks: []Token{ + ProcInst{"", []byte("Instruction")}, + }, + err: "xml: EncodeToken of ProcInst with invalid Target", +}, { + desc: "proc instruction with bad content", + toks: []Token{ + ProcInst{"", []byte("Instruction?>")}, + }, + err: "xml: EncodeToken of ProcInst with invalid Target", +}, { + desc: "directive", + toks: []Token{ + Directive("foo"), + }, + want: ``, +}, { + desc: "more complex directive", + toks: []Token{ + Directive("DOCTYPE doc [ '> ]"), + }, + want: `'> ]>`, +}, { + desc: "directive instruction with bad name", + toks: []Token{ + Directive("foo>"), + }, + err: "xml: EncodeToken of Directive containing wrong < or > markers", +}, { + desc: "end tag without start tag", + toks: []Token{ + EndElement{Name{"foo", "bar"}}, + }, + err: "xml: end tag without start tag", +}, { + desc: "mismatching end tag local name", + toks: []Token{ + StartElement{Name{"", "foo"}, nil}, + EndElement{Name{"", "bar"}}, + }, + err: "xml: end tag does not match start tag ", + want: ``, +}, { + desc: "mismatching end tag namespace", + toks: []Token{ + StartElement{Name{"space", "foo"}, nil}, + EndElement{Name{"another", "foo"}}, + }, + err: "xml: end tag in namespace another does not match start tag in namespace space", + want: ``, +}, { + desc: "start element with explicit namespace", + toks: []Token{ + StartElement{Name{"space", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + {Name{"space", "foo"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "start element with explicit namespace and colliding prefix", + toks: []Token{ + StartElement{Name{"space", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + {Name{"space", "foo"}, "value"}, + {Name{"x", "bar"}, "other"}, + }}, + }, + want: ``, +}, { + desc: "start element using previously defined namespace", + toks: []Token{ + StartElement{Name{"", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"space", "x"}, "y"}, + }}, + }, + want: ``, +}, { + desc: "nested name space with same prefix", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space1"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space2"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"space1", "a"}, "space1 value"}, + {Name{"space2", "b"}, "space2 value"}, + }}, + EndElement{Name{"", "foo"}}, + EndElement{Name{"", "foo"}}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"space1", "a"}, "space1 value"}, + {Name{"space2", "b"}, "space2 value"}, + }}, + }, + want: ``, +}, { + desc: "start element defining several prefixes for the same name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "a"}, "space"}, + {Name{"xmlns", "b"}, "space"}, + {Name{"space", "x"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element redefines name space", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "y"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element creates alias for default name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "y"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element defines default name space with existing prefix", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element uses empty attribute name space when default ns defined", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "attr"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "redefine xmlns", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"foo", "xmlns"}, "space"}, + }}, + }, + err: `xml: cannot redefine xmlns attribute prefix`, +}, { + desc: "xmlns with explicit name space #1", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xml", "xmlns"}, "space"}, + }}, + }, + want: ``, +}, { + desc: "xmlns with explicit name space #2", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{xmlURL, "xmlns"}, "space"}, + }}, + }, + want: ``, +}, { + desc: "empty name space declaration is ignored", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "foo"}, ""}, + }}, + }, + want: ``, +}, { + desc: "attribute with no name is ignored", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", ""}, "value"}, + }}, + }, + want: ``, +}, { + desc: "namespace URL with non-valid name", + toks: []Token{ + StartElement{Name{"/34", "foo"}, []Attr{ + {Name{"/34", "x"}, "value"}, + }}, + }, + want: `<_:foo xmlns:_="/34" _:x="value">`, +}, { + desc: "nested element resets default namespace to empty", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", "xmlns"}, ""}, + {Name{"", "x"}, "value"}, + {Name{"space", "x"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element requires empty default name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"", "foo"}, nil}, + }, + want: ``, +}, { + desc: "attribute uses name space from xmlns", + toks: []Token{ + StartElement{Name{"some/space", "foo"}, []Attr{ + {Name{"", "attr"}, "value"}, + {Name{"some/space", "other"}, "other value"}, + }}, + }, + want: ``, +}, { + desc: "default name space should not be used by attributes", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"xmlns", "bar"}, "space"}, + {Name{"space", "baz"}, "foo"}, + }}, + StartElement{Name{"space", "baz"}, nil}, + EndElement{Name{"space", "baz"}}, + EndElement{Name{"space", "foo"}}, + }, + want: ``, +}, { + desc: "default name space not used by attributes, not explicitly defined", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"space", "baz"}, "foo"}, + }}, + StartElement{Name{"space", "baz"}, nil}, + EndElement{Name{"space", "baz"}}, + EndElement{Name{"space", "foo"}}, + }, + want: ``, +}, { + desc: "impossible xmlns declaration", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "bar"}, []Attr{ + {Name{"space", "attr"}, "value"}, + }}, + }, + want: ``, +}} + +func TestEncodeToken(t *testing.T) { +loop: + for i, tt := range encodeTokenTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + var err error + for j, tok := range tt.toks { + err = enc.EncodeToken(tok) + if err != nil && j < len(tt.toks)-1 { + t.Errorf("#%d %s token #%d: %v", i, tt.desc, j, err) + continue loop + } + } + errorf := func(f string, a ...interface{}) { + t.Errorf("#%d %s token #%d:%s", i, tt.desc, len(tt.toks)-1, fmt.Sprintf(f, a...)) + } + switch { + case tt.err != "" && err == nil: + errorf(" expected error; got none") + continue + case tt.err == "" && err != nil: + errorf(" got error: %v", err) + continue + case tt.err != "" && err != nil && tt.err != err.Error(): + errorf(" error mismatch; got %v, want %v", err, tt.err) + continue + } + if err := enc.Flush(); err != nil { + errorf(" %v", err) + continue + } + if got := buf.String(); got != tt.want { + errorf("\ngot %v\nwant %v", got, tt.want) + continue + } + } +} + +func TestProcInstEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to encode xml target ProcInst as first token, %s", err) + } + + if err := enc.EncodeToken(ProcInst{"Target", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to add non-xml target ProcInst") + } + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err == nil { + t.Fatalf("enc.EncodeToken: expected to not be allowed to encode xml target ProcInst when not first token") + } +} + +func TestDecodeEncode(t *testing.T) { + var in, out bytes.Buffer + in.WriteString(` + + + +`) + dec := NewDecoder(&in) + enc := NewEncoder(&out) + for tok, err := dec.Token(); err == nil; tok, err = dec.Token() { + err = enc.EncodeToken(tok) + if err != nil { + t.Fatalf("enc.EncodeToken: Unable to encode token (%#v), %v", tok, err) + } + } +} + +// Issue 9796. Used to fail with GORACE="halt_on_error=1" -race. +func TestRace9796(t *testing.T) { + type A struct{} + type B struct { + C []A `xml:"X>Y"` + } + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + Marshal(B{[]A{A{}}}) + wg.Done() + }() + } + wg.Wait() +} + +func TestIsValidDirective(t *testing.T) { + testOK := []string{ + "<>", + "< < > >", + "' '>' >", + " ]>", + " '<' ' doc ANY> ]>", + ">>> a < comment --> [ ] >", + } + testKO := []string{ + "<", + ">", + "", + "< > > < < >", + " -->", + "", + "'", + "", + } + for _, s := range testOK { + if !isValidDirective(Directive(s)) { + t.Errorf("Directive %q is expected to be valid", s) + } + } + for _, s := range testKO { + if isValidDirective(Directive(s)) { + t.Errorf("Directive %q is expected to be invalid", s) + } + } +} + +// Issue 11719. EncodeToken used to silently eat tokens with an invalid type. +func TestSimpleUseOfEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(&StartElement{Name: Name{"", "object1"}}); err == nil { + t.Errorf("enc.EncodeToken: pointer type should be rejected") + } + if err := enc.EncodeToken(&EndElement{Name: Name{"", "object1"}}); err == nil { + t.Errorf("enc.EncodeToken: pointer type should be rejected") + } + if err := enc.EncodeToken(StartElement{Name: Name{"", "object2"}}); err != nil { + t.Errorf("enc.EncodeToken: StartElement %s", err) + } + if err := enc.EncodeToken(EndElement{Name: Name{"", "object2"}}); err != nil { + t.Errorf("enc.EncodeToken: EndElement %s", err) + } + if err := enc.EncodeToken(Universe{}); err == nil { + t.Errorf("enc.EncodeToken: invalid type not caught") + } + if err := enc.Flush(); err != nil { + t.Errorf("enc.Flush: %s", err) + } + if buf.Len() == 0 { + t.Errorf("enc.EncodeToken: empty buffer") + } + want := "" + if buf.String() != want { + t.Errorf("enc.EncodeToken: expected %q; got %q", want, buf.String()) + } +} === added file 'src/golang.org/x/net/webdav/internal/xml/read.go' --- src/golang.org/x/net/webdav/internal/xml/read.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/webdav/internal/xml/read.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,692 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +// BUG(rsc): Mapping between XML elements and data structures is inherently flawed: +// an XML element is an order-dependent collection of anonymous +// values, while a data structure is an order-independent collection +// of named values. +// See package json for a textual representation more suitable +// to data structures. + +// Unmarshal parses the XML-encoded data and stores the result in +// the value pointed to by v, which must be an arbitrary struct, +// slice, or string. Well-formed data that does not fit into v is +// discarded. +// +// Because Unmarshal uses the reflect package, it can only assign +// to exported (upper case) fields. Unmarshal uses a case-sensitive +// comparison to match XML element names to tag values and struct +// field names. +// +// Unmarshal maps an XML element to a struct using the following rules. +// In the rules, the tag of a field refers to the value associated with the +// key 'xml' in the struct field's tag (see the example above). +// +// * If the struct has a field of type []byte or string with tag +// ",innerxml", Unmarshal accumulates the raw XML nested inside the +// element in that field. The rest of the rules still apply. +// +// * If the struct has a field named XMLName of type xml.Name, +// Unmarshal records the element name in that field. +// +// * If the XMLName field has an associated tag of the form +// "name" or "namespace-URL name", the XML element must have +// the given name (and, optionally, name space) or else Unmarshal +// returns an error. +// +// * If the XML element has an attribute whose name matches a +// struct field name with an associated tag containing ",attr" or +// the explicit name in a struct field tag of the form "name,attr", +// Unmarshal records the attribute value in that field. +// +// * If the XML element contains character data, that data is +// accumulated in the first struct field that has tag ",chardata". +// The struct field may have type []byte or string. +// If there is no such field, the character data is discarded. +// +// * If the XML element contains comments, they are accumulated in +// the first struct field that has tag ",comment". The struct +// field may have type []byte or string. If there is no such +// field, the comments are discarded. +// +// * If the XML element contains a sub-element whose name matches +// the prefix of a tag formatted as "a" or "a>b>c", unmarshal +// will descend into the XML structure looking for elements with the +// given names, and will map the innermost elements to that struct +// field. A tag starting with ">" is equivalent to one starting +// with the field name followed by ">". +// +// * If the XML element contains a sub-element whose name matches +// a struct field's XMLName tag and the struct field has no +// explicit name tag as per the previous rule, unmarshal maps +// the sub-element to that struct field. +// +// * If the XML element contains a sub-element whose name matches a +// field without any mode flags (",attr", ",chardata", etc), Unmarshal +// maps the sub-element to that struct field. +// +// * If the XML element contains a sub-element that hasn't matched any +// of the above rules and the struct has a field with tag ",any", +// unmarshal maps the sub-element to that struct field. +// +// * An anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// * A struct field with tag "-" is never unmarshalled into. +// +// Unmarshal maps an XML element to a string or []byte by saving the +// concatenation of that element's character data in the string or +// []byte. The saved []byte is never nil. +// +// Unmarshal maps an attribute value to a string or []byte by saving +// the value in the string or slice. +// +// Unmarshal maps an XML element to a slice by extending the length of +// the slice and mapping the element to the newly created value. +// +// Unmarshal maps an XML element or attribute value to a bool by +// setting it to the boolean value represented by the string. +// +// Unmarshal maps an XML element or attribute value to an integer or +// floating-point field by setting the field to the result of +// interpreting the string value in decimal. There is no check for +// overflow. +// +// Unmarshal maps an XML element to an xml.Name by recording the +// element name. +// +// Unmarshal maps an XML element to a pointer by setting the pointer +// to a freshly allocated value and then mapping the element to that value. +// +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +// Decode works like xml.Unmarshal, except it reads the decoder +// stream to find the start element. +func (d *Decoder) Decode(v interface{}) error { + return d.DecodeElement(v, nil) +} + +// DecodeElement works like xml.Unmarshal except that it takes +// a pointer to the start XML element to decode into v. +// It is useful when a client reads some raw XML tokens itself +// but also wants to defer to Unmarshal for some elements. +func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error { + val := reflect.ValueOf(v) + if val.Kind() != reflect.Ptr { + return errors.New("non-pointer passed to Unmarshal") + } + return d.unmarshal(val.Elem(), start) +} + +// An UnmarshalError represents an error in the unmarshalling process. +type UnmarshalError string + +func (e UnmarshalError) Error() string { return string(e) } + +// Unmarshaler is the interface implemented by objects that can unmarshal +// an XML element description of themselves. +// +// UnmarshalXML decodes a single XML element +// beginning with the given start element. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXML must consume exactly one XML element. +// One common implementation strategy is to unmarshal into +// a separate value with a layout matching the expected XML +// using d.DecodeElement, and then to copy the data from +// that value into the receiver. +// Another common strategy is to use d.Token to process the +// XML object one token at a time. +// UnmarshalXML may not use d.RawToken. +type Unmarshaler interface { + UnmarshalXML(d *Decoder, start StartElement) error +} + +// UnmarshalerAttr is the interface implemented by objects that can unmarshal +// an XML attribute description of themselves. +// +// UnmarshalXMLAttr decodes a single XML attribute. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type UnmarshalerAttr interface { + UnmarshalXMLAttr(attr Attr) error +} + +// receiverType returns the receiver type to use in an expression like "%s.MethodName". +func receiverType(val interface{}) string { + t := reflect.TypeOf(val) + if t.Name() != "" { + return t.String() + } + return "(" + t.String() + ")" +} + +// unmarshalInterface unmarshals a single XML element into val. +// start is the opening tag of the element. +func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { + // Record that decoder must stop at end tag corresponding to start. + p.pushEOF() + + p.unmarshalDepth++ + err := val.UnmarshalXML(p, *start) + p.unmarshalDepth-- + if err != nil { + p.popEOF() + return err + } + + if !p.popEOF() { + return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) + } + + return nil +} + +// unmarshalTextInterface unmarshals a single XML element into val. +// The chardata contained in the element (but not its children) +// is passed to the text unmarshaler. +func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error { + var buf []byte + depth := 1 + for depth > 0 { + t, err := p.Token() + if err != nil { + return err + } + switch t := t.(type) { + case CharData: + if depth == 1 { + buf = append(buf, t...) + } + case StartElement: + depth++ + case EndElement: + depth-- + } + } + return val.UnmarshalText(buf) +} + +// unmarshalAttr unmarshals a single XML attribute into val. +func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { + return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + } + + // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + } + + copyValue(val, []byte(attr.Value)) + return nil +} + +var ( + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// Unmarshal a single XML element into val. +func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { + // Find start element if we need it. + if start == nil { + for { + tok, err := p.Token() + if err != nil { + return err + } + if t, ok := tok.(StartElement); ok { + start = &t + break + } + } + } + + // Load value from interface, but only if the result will be + // usefully addressable. + if val.Kind() == reflect.Interface && !val.IsNil() { + e := val.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() { + val = e + } + } + + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return p.unmarshalInterface(val.Interface().(Unmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { + return p.unmarshalInterface(pv.Interface().(Unmarshaler), start) + } + } + + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start) + } + } + + var ( + data []byte + saveData reflect.Value + comment []byte + saveComment reflect.Value + saveXML reflect.Value + saveXMLIndex int + saveXMLData []byte + saveAny reflect.Value + sv reflect.Value + tinfo *typeInfo + err error + ) + + switch v := val; v.Kind() { + default: + return errors.New("unknown type " + v.Type().String()) + + case reflect.Interface: + // TODO: For now, simply ignore the field. In the near + // future we may choose to unmarshal the start + // element on it, if not nil. + return p.Skip() + + case reflect.Slice: + typ := v.Type() + if typ.Elem().Kind() == reflect.Uint8 { + // []byte + saveData = v + break + } + + // Slice of element values. + // Grow slice. + n := v.Len() + if n >= v.Cap() { + ncap := 2 * n + if ncap < 4 { + ncap = 4 + } + new := reflect.MakeSlice(typ, n, ncap) + reflect.Copy(new, v) + v.Set(new) + } + v.SetLen(n + 1) + + // Recur to read element into slice. + if err := p.unmarshal(v.Index(n), start); err != nil { + v.SetLen(n) + return err + } + return nil + + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: + saveData = v + + case reflect.Struct: + typ := v.Type() + if typ == nameType { + v.Set(reflect.ValueOf(start.Name)) + break + } + + sv = v + tinfo, err = getTypeInfo(typ) + if err != nil { + return err + } + + // Validate and assign element name. + if tinfo.xmlname != nil { + finfo := tinfo.xmlname + if finfo.name != "" && finfo.name != start.Name.Local { + return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">") + } + if finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have " + if start.Name.Space == "" { + e += "no name space" + } else { + e += start.Name.Space + } + return UnmarshalError(e) + } + fv := finfo.value(sv) + if _, ok := fv.Interface().(Name); ok { + fv.Set(reflect.ValueOf(start.Name)) + } + } + + // Assign attributes. + // Also, determine whether we need to save character data or comments. + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + switch finfo.flags & fMode { + case fAttr: + strv := finfo.value(sv) + // Look for attribute. + for _, a := range start.Attr { + if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { + if err := p.unmarshalAttr(strv, a); err != nil { + return err + } + break + } + } + + case fCharData: + if !saveData.IsValid() { + saveData = finfo.value(sv) + } + + case fComment: + if !saveComment.IsValid() { + saveComment = finfo.value(sv) + } + + case fAny, fAny | fElement: + if !saveAny.IsValid() { + saveAny = finfo.value(sv) + } + + case fInnerXml: + if !saveXML.IsValid() { + saveXML = finfo.value(sv) + if p.saved == nil { + saveXMLIndex = 0 + p.saved = new(bytes.Buffer) + } else { + saveXMLIndex = p.savedOffset() + } + } + } + } + } + + // Find end element. + // Process sub-elements along the way. +Loop: + for { + var savedOffset int + if saveXML.IsValid() { + savedOffset = p.savedOffset() + } + tok, err := p.Token() + if err != nil { + return err + } + switch t := tok.(type) { + case StartElement: + consumed := false + if sv.IsValid() { + consumed, err = p.unmarshalPath(tinfo, sv, nil, &t) + if err != nil { + return err + } + if !consumed && saveAny.IsValid() { + consumed = true + if err := p.unmarshal(saveAny, &t); err != nil { + return err + } + } + } + if !consumed { + if err := p.Skip(); err != nil { + return err + } + } + + case EndElement: + if saveXML.IsValid() { + saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset] + if saveXMLIndex == 0 { + p.saved = nil + } + } + break Loop + + case CharData: + if saveData.IsValid() { + data = append(data, t...) + } + + case Comment: + if saveComment.IsValid() { + comment = append(comment, t...) + } + } + } + + if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { + if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + + if saveData.IsValid() && saveData.CanAddr() { + pv := saveData.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + } + + if err := copyValue(saveData, data); err != nil { + return err + } + + switch t := saveComment; t.Kind() { + case reflect.String: + t.SetString(string(comment)) + case reflect.Slice: + t.Set(reflect.ValueOf(comment)) + } + + switch t := saveXML; t.Kind() { + case reflect.String: + t.SetString(string(saveXMLData)) + case reflect.Slice: + t.Set(reflect.ValueOf(saveXMLData)) + } + + return nil +} + +func copyValue(dst reflect.Value, src []byte) (err error) { + dst0 := dst + + if dst.Kind() == reflect.Ptr { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + + // Save accumulated data. + switch dst.Kind() { + case reflect.Invalid: + // Probably a comment. + default: + return errors.New("cannot unmarshal into " + dst0.Type().String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetInt(itmp) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetUint(utmp) + case reflect.Float32, reflect.Float64: + ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits()) + if err != nil { + return err + } + dst.SetFloat(ftmp) + case reflect.Bool: + value, err := strconv.ParseBool(strings.TrimSpace(string(src))) + if err != nil { + return err + } + dst.SetBool(value) + case reflect.String: + dst.SetString(string(src)) + case reflect.Slice: + if len(src) == 0 { + // non-nil to flag presence + src = []byte{} + } + dst.SetBytes(src) + } + return nil +} + +// unmarshalPath walks down an XML structure looking for wanted +// paths, and calls unmarshal on them. +// The consumed result tells whether XML elements have been consumed +// from the Decoder until start's matching end element, or if it's +// still untouched because start is uninteresting for sv's fields. +func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { + recurse := false +Loop: + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + continue + } + for j := range parents { + if parents[j] != finfo.parents[j] { + continue Loop + } + } + if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { + // It's a perfect match, unmarshal the field. + return true, p.unmarshal(finfo.value(sv), start) + } + if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { + // It's a prefix for the field. Break and recurse + // since it's not ok for one field path to be itself + // the prefix for another field path. + recurse = true + + // We can reuse the same slice as long as we + // don't try to append to it. + parents = finfo.parents[:len(parents)+1] + break + } + } + if !recurse { + // We have no business with this element. + return false, nil + } + // The element is not a perfect match for any field, but one + // or more fields have the path to this element as a parent + // prefix. Recurse and attempt to match these. + for { + var tok Token + tok, err = p.Token() + if err != nil { + return true, err + } + switch t := tok.(type) { + case StartElement: + consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t) + if err != nil { + return true, err + } + if !consumed2 { + if err := p.Skip(); err != nil { + return true, err + } + } + case EndElement: + return true, nil + } + } +} + +// Skip reads tokens until it has consumed the end element +// matching the most recent start element already consumed. +// It recurs if it encounters a start element, so it can be used to +// skip nested structures. +// It returns nil if it finds an end element matching the start +// element; otherwise it returns an error describing the problem. +func (d *Decoder) Skip() error { + for { + tok, err := d.Token() + if err != nil { + return err + } + switch tok.(type) { + case StartElement: + if err := d.Skip(); err != nil { + return err + } + case EndElement: + return nil + } + } +} === added file 'src/golang.org/x/net/webdav/internal/xml/read_test.go' --- src/golang.org/x/net/webdav/internal/xml/read_test.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/webdav/internal/xml/read_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,744 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" + "testing" + "time" +) + +// Stripped down Atom feed data structures. + +func TestUnmarshalFeed(t *testing.T) { + var f Feed + if err := Unmarshal([]byte(atomFeedString), &f); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(f, atomFeed) { + t.Fatalf("have %#v\nwant %#v", f, atomFeed) + } +} + +// hget http://codereview.appspot.com/rss/mine/rsc +const atomFeedString = ` + +Code Review - My issueshttp://codereview.appspot.com/rietveld<>rietveld: an attempt at pubsubhubbub +2009-10-04T01:35:58+00:00email-address-removedurn:md5:134d9179c41f806be79b3a5f7877d19a + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can&#39;t quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed&#39;s actual URL in +the link rel=&quot;self&quot;, but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +rietveld: correct tab handling +2009-10-03T23:02:17+00:00email-address-removedurn:md5:0a2a4f19bb815101f0ba2904aed7c35a + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn&#39;t know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + + ` + +type Feed struct { + XMLName Name `xml:"http://www.w3.org/2005/Atom feed"` + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated,attr"` + Author Person `xml:"author"` + Entry []Entry `xml:"entry"` +} + +type Entry struct { + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated"` + Author Person `xml:"author"` + Summary Text `xml:"summary"` +} + +type Link struct { + Rel string `xml:"rel,attr,omitempty"` + Href string `xml:"href,attr"` +} + +type Person struct { + Name string `xml:"name"` + URI string `xml:"uri"` + Email string `xml:"email"` + InnerXML string `xml:",innerxml"` +} + +type Text struct { + Type string `xml:"type,attr,omitempty"` + Body string `xml:",chardata"` +} + +var atomFeed = Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Code Review - My issues", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/"}, + {Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"}, + }, + Id: "http://codereview.appspot.com/", + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "rietveld<>", + InnerXML: "rietveld<>", + }, + Entry: []Entry{ + { + Title: "rietveld: an attempt at pubsubhubbub\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/126085"}, + }, + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "email-address-removed", + }, + Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a", + Summary: Text{ + Type: "html", + Body: ` + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a <link rel="hub" href="hub-server"> tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can't quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed's actual URL in +the link rel="self", but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +`, + }, + }, + { + Title: "rietveld: correct tab handling\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/124106"}, + }, + Updated: ParseTime("2009-10-03T23:02:17+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "email-address-removed", + }, + Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a", + Summary: Text{ + Type: "html", + Body: ` + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn't know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + +`, + }, + }, + }, +} + +const pathTestString = ` + + 1 + + + A + + + B + + + C + D + + <_> + E + + + 2 + +` + +type PathTestItem struct { + Value string +} + +type PathTestA struct { + Items []PathTestItem `xml:">Item1"` + Before, After string +} + +type PathTestB struct { + Other []PathTestItem `xml:"Items>Item1"` + Before, After string +} + +type PathTestC struct { + Values1 []string `xml:"Items>Item1>Value"` + Values2 []string `xml:"Items>Item2>Value"` + Before, After string +} + +type PathTestSet struct { + Item1 []PathTestItem +} + +type PathTestD struct { + Other PathTestSet `xml:"Items"` + Before, After string +} + +type PathTestE struct { + Underline string `xml:"Items>_>Value"` + Before, After string +} + +var pathTests = []interface{}{ + &PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"}, + &PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"}, + &PathTestE{Underline: "E", Before: "1", After: "2"}, +} + +func TestUnmarshalPaths(t *testing.T) { + for _, pt := range pathTests { + v := reflect.New(reflect.TypeOf(pt).Elem()).Interface() + if err := Unmarshal([]byte(pathTestString), v); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(v, pt) { + t.Fatalf("have %#v\nwant %#v", v, pt) + } + } +} + +type BadPathTestA struct { + First string `xml:"items>item1"` + Other string `xml:"items>item2"` + Second string `xml:"items"` +} + +type BadPathTestB struct { + Other string `xml:"items>item2>value"` + First string `xml:"items>item1"` + Second string `xml:"items>item1>value"` +} + +type BadPathTestC struct { + First string + Second string `xml:"First"` +} + +type BadPathTestD struct { + BadPathEmbeddedA + BadPathEmbeddedB +} + +type BadPathEmbeddedA struct { + First string +} + +type BadPathEmbeddedB struct { + Second string `xml:"First"` +} + +var badPathTests = []struct { + v, e interface{} +}{ + {&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items"}}, + {&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}}, + {&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), "First", "", "Second", "First"}}, + {&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), "First", "", "Second", "First"}}, +} + +func TestUnmarshalBadPaths(t *testing.T) { + for _, tt := range badPathTests { + err := Unmarshal([]byte(pathTestString), tt.v) + if !reflect.DeepEqual(err, tt.e) { + t.Fatalf("Unmarshal with %#v didn't fail properly:\nhave %#v,\nwant %#v", tt.v, err, tt.e) + } + } +} + +const OK = "OK" +const withoutNameTypeData = ` + +` + +type TestThree struct { + XMLName Name `xml:"Test3"` + Attr string `xml:",attr"` +} + +func TestUnmarshalWithoutNameType(t *testing.T) { + var x TestThree + if err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if x.Attr != OK { + t.Fatalf("have %v\nwant %v", x.Attr, OK) + } +} + +func TestUnmarshalAttr(t *testing.T) { + type ParamVal struct { + Int int `xml:"int,attr"` + } + + type ParamPtr struct { + Int *int `xml:"int,attr"` + } + + type ParamStringPtr struct { + Int *string `xml:"int,attr"` + } + + x := []byte(``) + + p1 := &ParamPtr{} + if err := Unmarshal(x, p1); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p1.Int == nil { + t.Fatalf("Unmarshal failed in to *int field") + } else if *p1.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p1.Int, 1) + } + + p2 := &ParamVal{} + if err := Unmarshal(x, p2); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p2.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p2.Int, 1) + } + + p3 := &ParamStringPtr{} + if err := Unmarshal(x, p3); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p3.Int == nil { + t.Fatalf("Unmarshal failed in to *string field") + } else if *p3.Int != "1" { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p3.Int, 1) + } +} + +type Tables struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table"` + FTable string `xml:"http://www.w3schools.com/furniture table"` +} + +var tables = []struct { + xml string + tab Tables + ns string +}{ + { + xml: `` + + `hello
` + + `world
` + + `
`, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `world
` + + `hello
` + + `
`, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `world` + + `hello` + + ``, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `bogus
` + + `
`, + tab: Tables{}, + }, + { + xml: `` + + `only
` + + `
`, + tab: Tables{HTable: "only"}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: `` + + `only
` + + `
`, + tab: Tables{FTable: "only"}, + ns: "http://www.w3schools.com/furniture", + }, + { + xml: `` + + `only
` + + `
`, + tab: Tables{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNS(t *testing.T) { + for i, tt := range tables { + var dst Tables + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestRoundTrip(t *testing.T) { + // From issue 7535 + const s = `` + in := bytes.NewBufferString(s) + for i := 0; i < 10; i++ { + out := &bytes.Buffer{} + d := NewDecoder(in) + e := NewEncoder(out) + + for { + t, err := d.Token() + if err == io.EOF { + break + } + if err != nil { + fmt.Println("failed:", err) + return + } + e.EncodeToken(t) + } + e.Flush() + in = out + } + if got := in.String(); got != s { + t.Errorf("have: %q\nwant: %q\n", got, s) + } +} + +func TestMarshalNS(t *testing.T) { + dst := Tables{"hello", "world"} + data, err := Marshal(&dst) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `hello
world
` + str := string(data) + if str != want { + t.Errorf("have: %q\nwant: %q\n", str, want) + } +} + +type TableAttrs struct { + TAttr TAttr +} + +type TAttr struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table,attr"` + FTable string `xml:"http://www.w3schools.com/furniture table,attr"` + Lang string `xml:"http://www.w3.org/XML/1998/namespace lang,attr,omitempty"` + Other1 string `xml:"http://golang.org/xml/ other,attr,omitempty"` + Other2 string `xml:"http://golang.org/xmlfoo/ other,attr,omitempty"` + Other3 string `xml:"http://golang.org/json/ other,attr,omitempty"` + Other4 string `xml:"http://golang.org/2/json/ other,attr,omitempty"` +} + +var tableAttrs = []struct { + xml string + tab TableAttrs + ns string +}{ + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + ns: "http://www.w3schools.com/furniture", + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: ``, + tab: TableAttrs{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNSAttr(t *testing.T) { + for i, tt := range tableAttrs { + var dst TableAttrs + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestMarshalNSAttr(t *testing.T) { + src := TableAttrs{TAttr{"hello", "world", "en_US", "other1", "other2", "other3", "other4"}} + data, err := Marshal(&src) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `` + str := string(data) + if str != want { + t.Errorf("Marshal:\nhave: %#q\nwant: %#q\n", str, want) + } + + var dst TableAttrs + if err := Unmarshal(data, &dst); err != nil { + t.Errorf("Unmarshal: %v", err) + } + + if dst != src { + t.Errorf("Unmarshal = %q, want %q", dst, src) + } +} + +type MyCharData struct { + body string +} + +func (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error { + for { + t, err := d.Token() + if err == io.EOF { // found end of element + break + } + if err != nil { + return err + } + if char, ok := t.(CharData); ok { + m.body += string(char) + } + } + return nil +} + +var _ Unmarshaler = (*MyCharData)(nil) + +func (m *MyCharData) UnmarshalXMLAttr(attr Attr) error { + panic("must not call") +} + +type MyAttr struct { + attr string +} + +func (m *MyAttr) UnmarshalXMLAttr(attr Attr) error { + m.attr = attr.Value + return nil +} + +var _ UnmarshalerAttr = (*MyAttr)(nil) + +type MyStruct struct { + Data *MyCharData + Attr *MyAttr `xml:",attr"` + + Data2 MyCharData + Attr2 MyAttr `xml:",attr"` +} + +func TestUnmarshaler(t *testing.T) { + xml := ` + + hello world + howdy world + + ` + + var m MyStruct + if err := Unmarshal([]byte(xml), &m); err != nil { + t.Fatal(err) + } + + if m.Data == nil || m.Attr == nil || m.Data.body != "hello world" || m.Attr.attr != "attr1" || m.Data2.body != "howdy world" || m.Attr2.attr != "attr2" { + t.Errorf("m=%#+v\n", m) + } +} + +type Pea struct { + Cotelydon string +} + +type Pod struct { + Pea interface{} `xml:"Pea"` +} + +// https://golang.org/issue/6836 +func TestUnmarshalIntoInterface(t *testing.T) { + pod := new(Pod) + pod.Pea = new(Pea) + xml := `Green stuff` + err := Unmarshal([]byte(xml), pod) + if err != nil { + t.Fatalf("failed to unmarshal %q: %v", xml, err) + } + pea, ok := pod.Pea.(*Pea) + if !ok { + t.Fatalf("unmarshalled into wrong type: have %T want *Pea", pod.Pea) + } + have, want := pea.Cotelydon, "Green stuff" + if have != want { + t.Errorf("failed to unmarshal into interface, have %q want %q", have, want) + } +} === added file 'src/golang.org/x/net/webdav/internal/xml/typeinfo.go' --- src/golang.org/x/net/webdav/internal/xml/typeinfo.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/webdav/internal/xml/typeinfo.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,371 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// typeInfo holds details for the xml representation of a type. +type typeInfo struct { + xmlname *fieldInfo + fields []fieldInfo +} + +// fieldInfo holds details for the xml representation of a single field. +type fieldInfo struct { + idx []int + name string + xmlns string + flags fieldFlags + parents []string +} + +type fieldFlags int + +const ( + fElement fieldFlags = 1 << iota + fAttr + fCharData + fInnerXml + fComment + fAny + + fOmitEmpty + + fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny +) + +var tinfoMap = make(map[reflect.Type]*typeInfo) +var tinfoLock sync.RWMutex + +var nameType = reflect.TypeOf(Name{}) + +// getTypeInfo returns the typeInfo structure with details necessary +// for marshalling and unmarshalling typ. +func getTypeInfo(typ reflect.Type) (*typeInfo, error) { + tinfoLock.RLock() + tinfo, ok := tinfoMap[typ] + tinfoLock.RUnlock() + if ok { + return tinfo, nil + } + tinfo = &typeInfo{} + if typ.Kind() == reflect.Struct && typ != nameType { + n := typ.NumField() + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.PkgPath != "" || f.Tag.Get("xml") == "-" { + continue // Private field + } + + // For embedded structs, embed its fields. + if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Struct { + inner, err := getTypeInfo(t) + if err != nil { + return nil, err + } + if tinfo.xmlname == nil { + tinfo.xmlname = inner.xmlname + } + for _, finfo := range inner.fields { + finfo.idx = append([]int{i}, finfo.idx...) + if err := addFieldInfo(typ, tinfo, &finfo); err != nil { + return nil, err + } + } + continue + } + } + + finfo, err := structFieldInfo(typ, &f) + if err != nil { + return nil, err + } + + if f.Name == "XMLName" { + tinfo.xmlname = finfo + continue + } + + // Add the field if it doesn't conflict with other fields. + if err := addFieldInfo(typ, tinfo, finfo); err != nil { + return nil, err + } + } + } + tinfoLock.Lock() + tinfoMap[typ] = tinfo + tinfoLock.Unlock() + return tinfo, nil +} + +// structFieldInfo builds and returns a fieldInfo for f. +func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) { + finfo := &fieldInfo{idx: f.Index} + + // Split the tag from the xml namespace if necessary. + tag := f.Tag.Get("xml") + if i := strings.Index(tag, " "); i >= 0 { + finfo.xmlns, tag = tag[:i], tag[i+1:] + } + + // Parse flags. + tokens := strings.Split(tag, ",") + if len(tokens) == 1 { + finfo.flags = fElement + } else { + tag = tokens[0] + for _, flag := range tokens[1:] { + switch flag { + case "attr": + finfo.flags |= fAttr + case "chardata": + finfo.flags |= fCharData + case "innerxml": + finfo.flags |= fInnerXml + case "comment": + finfo.flags |= fComment + case "any": + finfo.flags |= fAny + case "omitempty": + finfo.flags |= fOmitEmpty + } + } + + // Validate the flags used. + valid := true + switch mode := finfo.flags & fMode; mode { + case 0: + finfo.flags |= fElement + case fAttr, fCharData, fInnerXml, fComment, fAny: + if f.Name == "XMLName" || tag != "" && mode != fAttr { + valid = false + } + default: + // This will also catch multiple modes in a single field. + valid = false + } + if finfo.flags&fMode == fAny { + finfo.flags |= fElement + } + if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 { + valid = false + } + if !valid { + return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + } + + // Use of xmlns without a name is not allowed. + if finfo.xmlns != "" && tag == "" { + return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + + if f.Name == "XMLName" { + // The XMLName field records the XML element name. Don't + // process it as usual because its name should default to + // empty rather than to the field name. + finfo.name = tag + return finfo, nil + } + + if tag == "" { + // If the name part of the tag is completely empty, get + // default from XMLName of underlying struct if feasible, + // or field name otherwise. + if xmlname := lookupXMLName(f.Type); xmlname != nil { + finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name + } else { + finfo.name = f.Name + } + return finfo, nil + } + + if finfo.xmlns == "" && finfo.flags&fAttr == 0 { + // If it's an element no namespace specified, get the default + // from the XMLName of enclosing struct if possible. + if xmlname := lookupXMLName(typ); xmlname != nil { + finfo.xmlns = xmlname.xmlns + } + } + + // Prepare field name and parents. + parents := strings.Split(tag, ">") + if parents[0] == "" { + parents[0] = f.Name + } + if parents[len(parents)-1] == "" { + return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ) + } + finfo.name = parents[len(parents)-1] + if len(parents) > 1 { + if (finfo.flags & fElement) == 0 { + return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ",")) + } + finfo.parents = parents[:len(parents)-1] + } + + // If the field type has an XMLName field, the names must match + // so that the behavior of both marshalling and unmarshalling + // is straightforward and unambiguous. + if finfo.flags&fElement != 0 { + ftyp := f.Type + xmlname := lookupXMLName(ftyp) + if xmlname != nil && xmlname.name != finfo.name { + return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName", + finfo.name, typ, f.Name, xmlname.name, ftyp) + } + } + return finfo, nil +} + +// lookupXMLName returns the fieldInfo for typ's XMLName field +// in case it exists and has a valid xml field tag, otherwise +// it returns nil. +func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + if typ.Kind() != reflect.Struct { + return nil + } + for i, n := 0, typ.NumField(); i < n; i++ { + f := typ.Field(i) + if f.Name != "XMLName" { + continue + } + finfo, err := structFieldInfo(typ, &f) + if finfo.name != "" && err == nil { + return finfo + } + // Also consider errors as a non-existent field tag + // and let getTypeInfo itself report the error. + break + } + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// addFieldInfo adds finfo to tinfo.fields if there are no +// conflicts, or if conflicts arise from previous fields that were +// obtained from deeper embedded structures than finfo. In the latter +// case, the conflicting entries are dropped. +// A conflict occurs when the path (parent + name) to a field is +// itself a prefix of another path, or when two paths match exactly. +// It is okay for field paths to share a common, shorter prefix. +func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error { + var conflicts []int +Loop: + // First, figure all conflicts. Most working code will have none. + for i := range tinfo.fields { + oldf := &tinfo.fields[i] + if oldf.flags&fMode != newf.flags&fMode { + continue + } + if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns { + continue + } + minl := min(len(newf.parents), len(oldf.parents)) + for p := 0; p < minl; p++ { + if oldf.parents[p] != newf.parents[p] { + continue Loop + } + } + if len(oldf.parents) > len(newf.parents) { + if oldf.parents[len(newf.parents)] == newf.name { + conflicts = append(conflicts, i) + } + } else if len(oldf.parents) < len(newf.parents) { + if newf.parents[len(oldf.parents)] == oldf.name { + conflicts = append(conflicts, i) + } + } else { + if newf.name == oldf.name { + conflicts = append(conflicts, i) + } + } + } + // Without conflicts, add the new field and return. + if conflicts == nil { + tinfo.fields = append(tinfo.fields, *newf) + return nil + } + + // If any conflict is shallower, ignore the new field. + // This matches the Go field resolution on embedding. + for _, i := range conflicts { + if len(tinfo.fields[i].idx) < len(newf.idx) { + return nil + } + } + + // Otherwise, if any of them is at the same depth level, it's an error. + for _, i := range conflicts { + oldf := &tinfo.fields[i] + if len(oldf.idx) == len(newf.idx) { + f1 := typ.FieldByIndex(oldf.idx) + f2 := typ.FieldByIndex(newf.idx) + return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")} + } + } + + // Otherwise, the new field is shallower, and thus takes precedence, + // so drop the conflicting fields from tinfo and append the new one. + for c := len(conflicts) - 1; c >= 0; c-- { + i := conflicts[c] + copy(tinfo.fields[i:], tinfo.fields[i+1:]) + tinfo.fields = tinfo.fields[:len(tinfo.fields)-1] + } + tinfo.fields = append(tinfo.fields, *newf) + return nil +} + +// A TagPathError represents an error in the unmarshalling process +// caused by the use of field tags with conflicting paths. +type TagPathError struct { + Struct reflect.Type + Field1, Tag1 string + Field2, Tag2 string +} + +func (e *TagPathError) Error() string { + return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2) +} + +// value returns v's field value corresponding to finfo. +// It's equivalent to v.FieldByIndex(finfo.idx), but initializes +// and dereferences pointers as necessary. +func (finfo *fieldInfo) value(v reflect.Value) reflect.Value { + for i, x := range finfo.idx { + if i > 0 { + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + v = v.Field(x) + } + return v +} === added file 'src/golang.org/x/net/webdav/internal/xml/xml.go' --- src/golang.org/x/net/webdav/internal/xml/xml.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/webdav/internal/xml/xml.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1998 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xml implements a simple XML 1.0 parser that +// understands XML name spaces. +package xml + +// References: +// Annotated XML spec: http://www.xml.com/axml/testaxml.htm +// XML name spaces: http://www.w3.org/TR/REC-xml-names/ + +// TODO(rsc): +// Test error handling. + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A SyntaxError represents a syntax error in the XML input stream. +type SyntaxError struct { + Msg string + Line int +} + +func (e *SyntaxError) Error() string { + return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg +} + +// A Name represents an XML name (Local) annotated with a name space +// identifier (Space). In tokens returned by Decoder.Token, the Space +// identifier is given as a canonical URL, not the short prefix used in +// the document being parsed. +// +// As a special case, XML namespace declarations will use the literal +// string "xmlns" for the Space field instead of the fully resolved URL. +// See Encoder.EncodeToken for more information on namespace encoding +// behaviour. +type Name struct { + Space, Local string +} + +// isNamespace reports whether the name is a namespace-defining name. +func (name Name) isNamespace() bool { + return name.Local == "xmlns" || name.Space == "xmlns" +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +// A Token is an interface holding one of the token types: +// StartElement, EndElement, CharData, Comment, ProcInst, or Directive. +type Token interface{} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// setDefaultNamespace sets the namespace of the element +// as the default for all elements contained within it. +func (e *StartElement) setDefaultNamespace() { + if e.Name.Space == "" { + // If there's no namespace on the element, don't + // set the default. Strictly speaking this might be wrong, as + // we can't tell if the element had no namespace set + // or was just using the default namespace. + return + } + // Don't add a default name space if there's already one set. + for _, attr := range e.Attr { + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + return + } + } + e.Attr = append(e.Attr, Attr{ + Name: Name{ + Local: "xmlns", + }, + Value: e.Name.Space, + }) +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// A CharData represents XML character data (raw text), +// in which XML escape sequences have been replaced by +// the characters they represent. +type CharData []byte + +func makeCopy(b []byte) []byte { + b1 := make([]byte, len(b)) + copy(b1, b) + return b1 +} + +func (c CharData) Copy() CharData { return CharData(makeCopy(c)) } + +// A Comment represents an XML comment of the form . +// The bytes do not include the comment markers. +type Comment []byte + +func (c Comment) Copy() Comment { return Comment(makeCopy(c)) } + +// A ProcInst represents an XML processing instruction of the form +type ProcInst struct { + Target string + Inst []byte +} + +func (p ProcInst) Copy() ProcInst { + p.Inst = makeCopy(p.Inst) + return p +} + +// A Directive represents an XML directive of the form . +// The bytes do not include the markers. +type Directive []byte + +func (d Directive) Copy() Directive { return Directive(makeCopy(d)) } + +// CopyToken returns a copy of a Token. +func CopyToken(t Token) Token { + switch v := t.(type) { + case CharData: + return v.Copy() + case Comment: + return v.Copy() + case Directive: + return v.Copy() + case ProcInst: + return v.Copy() + case StartElement: + return v.Copy() + } + return t +} + +// A Decoder represents an XML parser reading a particular input stream. +// The parser assumes that its input is encoded in UTF-8. +type Decoder struct { + // Strict defaults to true, enforcing the requirements + // of the XML specification. + // If set to false, the parser allows input containing common + // mistakes: + // * If an element is missing an end tag, the parser invents + // end tags as necessary to keep the return values from Token + // properly balanced. + // * In attribute values and character data, unknown or malformed + // character entities (sequences beginning with &) are left alone. + // + // Setting: + // + // d.Strict = false; + // d.AutoClose = HTMLAutoClose; + // d.Entity = HTMLEntity + // + // creates a parser that can handle typical HTML. + // + // Strict mode does not enforce the requirements of the XML name spaces TR. + // In particular it does not reject name space tags using undefined prefixes. + // Such tags are recorded with the unknown prefix as the name space URL. + Strict bool + + // When Strict == false, AutoClose indicates a set of elements to + // consider closed immediately after they are opened, regardless + // of whether an end element is present. + AutoClose []string + + // Entity can be used to map non-standard entity names to string replacements. + // The parser behaves as if these standard mappings are present in the map, + // regardless of the actual map content: + // + // "lt": "<", + // "gt": ">", + // "amp": "&", + // "apos": "'", + // "quot": `"`, + Entity map[string]string + + // CharsetReader, if non-nil, defines a function to generate + // charset-conversion readers, converting from the provided + // non-UTF-8 charset into UTF-8. If CharsetReader is nil or + // returns an error, parsing stops with an error. One of the + // the CharsetReader's result values must be non-nil. + CharsetReader func(charset string, input io.Reader) (io.Reader, error) + + // DefaultSpace sets the default name space used for unadorned tags, + // as if the entire XML stream were wrapped in an element containing + // the attribute xmlns="DefaultSpace". + DefaultSpace string + + r io.ByteReader + buf bytes.Buffer + saved *bytes.Buffer + stk *stack + free *stack + needClose bool + toClose Name + nextToken Token + nextByte int + ns map[string]string + err error + line int + offset int64 + unmarshalDepth int +} + +// NewDecoder creates a new XML parser reading from r. +// If r does not implement io.ByteReader, NewDecoder will +// do its own buffering. +func NewDecoder(r io.Reader) *Decoder { + d := &Decoder{ + ns: make(map[string]string), + nextByte: -1, + line: 1, + Strict: true, + } + d.switchToReader(r) + return d +} + +// Token returns the next XML token in the input stream. +// At the end of the input stream, Token returns nil, io.EOF. +// +// Slices of bytes in the returned token data refer to the +// parser's internal buffer and remain valid only until the next +// call to Token. To acquire a copy of the bytes, call CopyToken +// or the token's Copy method. +// +// Token expands self-closing elements such as
+// into separate start and end elements returned by successive calls. +// +// Token guarantees that the StartElement and EndElement +// tokens it returns are properly nested and matched: +// if Token encounters an unexpected end element, +// it will return an error. +// +// Token implements XML name spaces as described by +// http://www.w3.org/TR/REC-xml-names/. Each of the +// Name structures contained in the Token has the Space +// set to the URL identifying its name space when known. +// If Token encounters an unrecognized name space prefix, +// it uses the prefix as the Space rather than report an error. +func (d *Decoder) Token() (t Token, err error) { + if d.stk != nil && d.stk.kind == stkEOF { + err = io.EOF + return + } + if d.nextToken != nil { + t = d.nextToken + d.nextToken = nil + } else if t, err = d.rawToken(); err != nil { + return + } + + if !d.Strict { + if t1, ok := d.autoClose(t); ok { + d.nextToken = t + t = t1 + } + } + switch t1 := t.(type) { + case StartElement: + // In XML name spaces, the translations listed in the + // attributes apply to the element name and + // to the other attribute names, so process + // the translations first. + for _, a := range t1.Attr { + if a.Name.Space == "xmlns" { + v, ok := d.ns[a.Name.Local] + d.pushNs(a.Name.Local, v, ok) + d.ns[a.Name.Local] = a.Value + } + if a.Name.Space == "" && a.Name.Local == "xmlns" { + // Default space for untagged names + v, ok := d.ns[""] + d.pushNs("", v, ok) + d.ns[""] = a.Value + } + } + + d.translate(&t1.Name, true) + for i := range t1.Attr { + d.translate(&t1.Attr[i].Name, false) + } + d.pushElement(t1.Name) + t = t1 + + case EndElement: + d.translate(&t1.Name, true) + if !d.popElement(&t1) { + return nil, d.err + } + t = t1 + } + return +} + +const xmlURL = "http://www.w3.org/XML/1998/namespace" + +// Apply name space translation to name n. +// The default name space (for Space=="") +// applies only to element names, not to attribute names. +func (d *Decoder) translate(n *Name, isElementName bool) { + switch { + case n.Space == "xmlns": + return + case n.Space == "" && !isElementName: + return + case n.Space == "xml": + n.Space = xmlURL + case n.Space == "" && n.Local == "xmlns": + return + } + if v, ok := d.ns[n.Space]; ok { + n.Space = v + } else if n.Space == "" { + n.Space = d.DefaultSpace + } +} + +func (d *Decoder) switchToReader(r io.Reader) { + // Get efficient byte at a time reader. + // Assume that if reader has its own + // ReadByte, it's efficient enough. + // Otherwise, use bufio. + if rb, ok := r.(io.ByteReader); ok { + d.r = rb + } else { + d.r = bufio.NewReader(r) + } +} + +// Parsing state - stack holds old name space translations +// and the current set of open elements. The translations to pop when +// ending a given tag are *below* it on the stack, which is +// more work but forced on us by XML. +type stack struct { + next *stack + kind int + name Name + ok bool +} + +const ( + stkStart = iota + stkNs + stkEOF +) + +func (d *Decoder) push(kind int) *stack { + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.next = d.stk + s.kind = kind + d.stk = s + return s +} + +func (d *Decoder) pop() *stack { + s := d.stk + if s != nil { + d.stk = s.next + s.next = d.free + d.free = s + } + return s +} + +// Record that after the current element is finished +// (that element is already pushed on the stack) +// Token should return EOF until popEOF is called. +func (d *Decoder) pushEOF() { + // Walk down stack to find Start. + // It might not be the top, because there might be stkNs + // entries above it. + start := d.stk + for start.kind != stkStart { + start = start.next + } + // The stkNs entries below a start are associated with that + // element too; skip over them. + for start.next != nil && start.next.kind == stkNs { + start = start.next + } + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.kind = stkEOF + s.next = start.next + start.next = s +} + +// Undo a pushEOF. +// The element must have been finished, so the EOF should be at the top of the stack. +func (d *Decoder) popEOF() bool { + if d.stk == nil || d.stk.kind != stkEOF { + return false + } + d.pop() + return true +} + +// Record that we are starting an element with the given name. +func (d *Decoder) pushElement(name Name) { + s := d.push(stkStart) + s.name = name +} + +// Record that we are changing the value of ns[local]. +// The old value is url, ok. +func (d *Decoder) pushNs(local string, url string, ok bool) { + s := d.push(stkNs) + s.name.Local = local + s.name.Space = url + s.ok = ok +} + +// Creates a SyntaxError with the current line number. +func (d *Decoder) syntaxError(msg string) error { + return &SyntaxError{Msg: msg, Line: d.line} +} + +// Record that we are ending an element with the given name. +// The name must match the record at the top of the stack, +// which must be a pushElement record. +// After popping the element, apply any undo records from +// the stack to restore the name translations that existed +// before we saw this element. +func (d *Decoder) popElement(t *EndElement) bool { + s := d.pop() + name := t.Name + switch { + case s == nil || s.kind != stkStart: + d.err = d.syntaxError("unexpected end element ") + return false + case s.name.Local != name.Local: + if !d.Strict { + d.needClose = true + d.toClose = t.Name + t.Name = s.name + return true + } + d.err = d.syntaxError("element <" + s.name.Local + "> closed by ") + return false + case s.name.Space != name.Space: + d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space + + "closed by in space " + name.Space) + return false + } + + // Pop stack until a Start or EOF is on the top, undoing the + // translations that were associated with the element we just closed. + for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF { + s := d.pop() + if s.ok { + d.ns[s.name.Local] = s.name.Space + } else { + delete(d.ns, s.name.Local) + } + } + + return true +} + +// If the top element on the stack is autoclosing and +// t is not the end tag, invent the end tag. +func (d *Decoder) autoClose(t Token) (Token, bool) { + if d.stk == nil || d.stk.kind != stkStart { + return nil, false + } + name := strings.ToLower(d.stk.name.Local) + for _, s := range d.AutoClose { + if strings.ToLower(s) == name { + // This one should be auto closed if t doesn't close it. + et, ok := t.(EndElement) + if !ok || et.Name.Local != name { + return EndElement{d.stk.name}, true + } + break + } + } + return nil, false +} + +var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method") + +// RawToken is like Token but does not verify that +// start and end elements match and does not translate +// name space prefixes to their corresponding URLs. +func (d *Decoder) RawToken() (Token, error) { + if d.unmarshalDepth > 0 { + return nil, errRawToken + } + return d.rawToken() +} + +func (d *Decoder) rawToken() (Token, error) { + if d.err != nil { + return nil, d.err + } + if d.needClose { + // The last element we read was self-closing and + // we returned just the StartElement half. + // Return the EndElement half now. + d.needClose = false + return EndElement{d.toClose}, nil + } + + b, ok := d.getc() + if !ok { + return nil, d.err + } + + if b != '<' { + // Text section. + d.ungetc(b) + data := d.text(-1, false) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + switch b { + case '/': + // ' { + d.err = d.syntaxError("invalid characters between ") + return nil, d.err + } + return EndElement{name}, nil + + case '?': + // ' { + break + } + b0 = b + } + data := d.buf.Bytes() + data = data[0 : len(data)-2] // chop ?> + + if target == "xml" { + content := string(data) + ver := procInst("version", content) + if ver != "" && ver != "1.0" { + d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver) + return nil, d.err + } + enc := procInst("encoding", content) + if enc != "" && enc != "utf-8" && enc != "UTF-8" { + if d.CharsetReader == nil { + d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc) + return nil, d.err + } + newr, err := d.CharsetReader(enc, d.r.(io.Reader)) + if err != nil { + d.err = fmt.Errorf("xml: opening charset %q: %v", enc, err) + return nil, d.err + } + if newr == nil { + panic("CharsetReader returned a nil Reader for charset " + enc) + } + d.switchToReader(newr) + } + } + return ProcInst{target, data}, nil + + case '!': + // ' { + break + } + b0, b1 = b1, b + } + data := d.buf.Bytes() + data = data[0 : len(data)-3] // chop --> + return Comment(data), nil + + case '[': // . + data := d.text(-1, true) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + // Probably a directive: , , etc. + // We don't care, but accumulate for caller. Quoted angle + // brackets do not count for nesting. + d.buf.Reset() + d.buf.WriteByte(b) + inquote := uint8(0) + depth := 0 + for { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if inquote == 0 && b == '>' && depth == 0 { + break + } + HandleB: + d.buf.WriteByte(b) + switch { + case b == inquote: + inquote = 0 + + case inquote != 0: + // in quotes, no special action + + case b == '\'' || b == '"': + inquote = b + + case b == '>' && inquote == 0: + depth-- + + case b == '<' && inquote == 0: + // Look for ` + +var testEntity = map[string]string{"何": "What", "is-it": "is it?"} + +var rawTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"", "hello"}}, + CharData("\n "), + StartElement{Name{"", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"", "query"}}, + CharData("\n "), + StartElement{Name{"", "goodbye"}, []Attr{}}, + EndElement{Name{"", "goodbye"}}, + CharData("\n "), + StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"", "inner"}, []Attr{}}, + EndElement{Name{"", "inner"}}, + CharData("\n "), + EndElement{Name{"", "outer"}}, + CharData("\n "), + StartElement{Name{"tag", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"tag", "name"}}, + CharData("\n"), + EndElement{Name{"", "body"}}, + Comment(" missing final newline "), +} + +var cookedTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"ns2", "hello"}}, + CharData("\n "), + StartElement{Name{"ns2", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"ns2", "query"}}, + CharData("\n "), + StartElement{Name{"ns2", "goodbye"}, []Attr{}}, + EndElement{Name{"ns2", "goodbye"}}, + CharData("\n "), + StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"ns2", "inner"}, []Attr{}}, + EndElement{Name{"ns2", "inner"}}, + CharData("\n "), + EndElement{Name{"ns2", "outer"}}, + CharData("\n "), + StartElement{Name{"ns3", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"ns3", "name"}}, + CharData("\n"), + EndElement{Name{"ns2", "body"}}, + Comment(" missing final newline "), +} + +const testInputAltEncoding = ` + +VALUE` + +var rawTokensAltEncoding = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("value"), + EndElement{Name{"", "tag"}}, +} + +var xmlInput = []string{ + // unexpected EOF cases + "<", + "", + "", + "", + // "", // let the Token() caller handle + "", + "", + "", + "", + " c;", + "", + "", + "", + // "", // let the Token() caller handle + "
", + "", + "cdata]]>", +} + +func TestRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + testRawToken(t, d, testInput, rawTokens) +} + +const nonStrictInput = ` +non&entity +&unknown;entity +{ +&#zzz; +&ãªã¾ãˆ3; +<-gt; +&; +&0a; +` + +var nonStringEntity = map[string]string{"": "oops!", "0a": "oops!"} + +var nonStrictTokens = []Token{ + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("non&entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&unknown;entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("{"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&#zzz;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&ãªã¾ãˆ3;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("<-gt;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&0a;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), +} + +func TestNonStrictRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(nonStrictInput)) + d.Strict = false + testRawToken(t, d, nonStrictInput, nonStrictTokens) +} + +type downCaser struct { + t *testing.T + r io.ByteReader +} + +func (d *downCaser) ReadByte() (c byte, err error) { + c, err = d.r.ReadByte() + if c >= 'A' && c <= 'Z' { + c += 'a' - 'A' + } + return +} + +func (d *downCaser) Read(p []byte) (int, error) { + d.t.Fatalf("unexpected Read call on downCaser reader") + panic("unreachable") +} + +func TestRawTokenAltEncoding(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + d.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) { + if charset != "x-testing-uppercase" { + t.Fatalf("unexpected charset %q", charset) + } + return &downCaser{t, input.(io.ByteReader)}, nil + } + testRawToken(t, d, testInputAltEncoding, rawTokensAltEncoding) +} + +func TestRawTokenAltEncodingNoConverter(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + token, err := d.RawToken() + if token == nil { + t.Fatalf("expected a token on first RawToken call") + } + if err != nil { + t.Fatal(err) + } + token, err = d.RawToken() + if token != nil { + t.Errorf("expected a nil token; got %#v", token) + } + if err == nil { + t.Fatalf("expected an error on second RawToken call") + } + const encoding = "x-testing-uppercase" + if !strings.Contains(err.Error(), encoding) { + t.Errorf("expected error to contain %q; got error: %v", + encoding, err) + } +} + +func testRawToken(t *testing.T, d *Decoder, raw string, rawTokens []Token) { + lastEnd := int64(0) + for i, want := range rawTokens { + start := d.InputOffset() + have, err := d.RawToken() + end := d.InputOffset() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + var shave, swant string + if _, ok := have.(CharData); ok { + shave = fmt.Sprintf("CharData(%q)", have) + } else { + shave = fmt.Sprintf("%#v", have) + } + if _, ok := want.(CharData); ok { + swant = fmt.Sprintf("CharData(%q)", want) + } else { + swant = fmt.Sprintf("%#v", want) + } + t.Errorf("token %d = %s, want %s", i, shave, swant) + } + + // Check that InputOffset returned actual token. + switch { + case start < lastEnd: + t.Errorf("token %d: position [%d,%d) for %T is before previous token", i, start, end, have) + case start >= end: + // Special case: EndElement can be synthesized. + if start == end && end == lastEnd { + break + } + t.Errorf("token %d: position [%d,%d) for %T is empty", i, start, end, have) + case end > int64(len(raw)): + t.Errorf("token %d: position [%d,%d) for %T extends beyond input", i, start, end, have) + default: + text := raw[start:end] + if strings.ContainsAny(text, "<>") && (!strings.HasPrefix(text, "<") || !strings.HasSuffix(text, ">")) { + t.Errorf("token %d: misaligned raw token %#q for %T", i, text, have) + } + } + lastEnd = end + } +} + +// Ensure that directives (specifically !DOCTYPE) include the complete +// text of any nested directives, noting that < and > do not change +// nesting depth if they are in single or double quotes. + +var nestedDirectivesInput = ` +]> +">]> +]> +'>]> +]> +'>]> +]> +` + +var nestedDirectivesTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE [">]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE ['>]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE ['>]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), +} + +func TestNestedDirectives(t *testing.T) { + d := NewDecoder(strings.NewReader(nestedDirectivesInput)) + + for i, want := range nestedDirectivesTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + + for i, want := range cookedTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestSyntax(t *testing.T) { + for i := range xmlInput { + d := NewDecoder(strings.NewReader(xmlInput[i])) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if _, ok := err.(*SyntaxError); !ok { + t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i]) + } + } +} + +type allScalars struct { + True1 bool + True2 bool + False1 bool + False2 bool + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint int + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + Uintptr uintptr + Float32 float32 + Float64 float64 + String string + PtrString *string +} + +var all = allScalars{ + True1: true, + True2: true, + False1: false, + False2: false, + Int: 1, + Int8: -2, + Int16: 3, + Int32: -4, + Int64: 5, + Uint: 6, + Uint8: 7, + Uint16: 8, + Uint32: 9, + Uint64: 10, + Uintptr: 11, + Float32: 13.0, + Float64: 14.0, + String: "15", + PtrString: &sixteen, +} + +var sixteen = "16" + +const testScalarsInput = ` + true + 1 + false + 0 + 1 + -2 + 3 + -4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12.0 + 13.0 + 14.0 + 15 + 16 +` + +func TestAllScalars(t *testing.T) { + var a allScalars + err := Unmarshal([]byte(testScalarsInput), &a) + + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, all) { + t.Errorf("have %+v want %+v", a, all) + } +} + +type item struct { + Field_a string +} + +func TestIssue569(t *testing.T) { + data := `abcd` + var i item + err := Unmarshal([]byte(data), &i) + + if err != nil || i.Field_a != "abcd" { + t.Fatal("Expecting abcd") + } +} + +func TestUnquotedAttrs(t *testing.T) { + data := "" + d := NewDecoder(strings.NewReader(data)) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != "tag" { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != "azAZ09:-_" { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != "attr" { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } +} + +func TestValuelessAttrs(t *testing.T) { + tests := [][3]string{ + {"

", "p", "nowrap"}, + {"

", "p", "nowrap"}, + {"", "input", "checked"}, + {"", "input", "checked"}, + } + for _, test := range tests { + d := NewDecoder(strings.NewReader(test[0])) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != test[1] { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != test[2] { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != test[2] { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } + } +} + +func TestCopyTokenCharData(t *testing.T) { + data := []byte("same data") + var tok1 Token = CharData(data) + tok2 := CopyToken(tok1) + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) != CharData") + } + data[1] = 'o' + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestCopyTokenStartElement(t *testing.T) { + elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}} + var tok1 Token = elt + tok2 := CopyToken(tok1) + if tok1.(StartElement).Attr[0].Value != "en" { + t.Error("CopyToken overwrote Attr[0]") + } + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(StartElement) != StartElement") + } + tok1.(StartElement).Attr[0] = Attr{Name{"", "lang"}, "de"} + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestSyntaxErrorLineNum(t *testing.T) { + testInput := "

Foo

\n\n

Bar\n" + d := NewDecoder(strings.NewReader(testInput)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Error("Expected SyntaxError.") + } + if synerr.Line != 3 { + t.Error("SyntaxError didn't have correct line number.") + } +} + +func TestTrailingRawToken(t *testing.T) { + input := ` ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.RawToken(); err == nil; _, err = d.RawToken() { + } + if err != io.EOF { + t.Fatalf("d.RawToken() = _, %v, want _, io.EOF", err) + } +} + +func TestTrailingToken(t *testing.T) { + input := ` ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +func TestEntityInsideCDATA(t *testing.T) { + input := `` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +var characterTests = []struct { + in string + err string +}{ + {"\x12", "illegal character code U+0012"}, + {"\x0b", "illegal character code U+000B"}, + {"\xef\xbf\xbe", "illegal character code U+FFFE"}, + {"\r\n\x07", "illegal character code U+0007"}, + {"what's up", "expected attribute name in element"}, + {"&abc\x01;", "invalid character entity &abc (no semicolon)"}, + {"&\x01;", "invalid character entity & (no semicolon)"}, + {"&\xef\xbf\xbe;", "invalid character entity &\uFFFE;"}, + {"&hello;", "invalid character entity &hello;"}, +} + +func TestDisallowedCharacters(t *testing.T) { + + for i, tt := range characterTests { + d := NewDecoder(strings.NewReader(tt.in)) + var err error + + for err == nil { + _, err = d.Token() + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Fatalf("input %d d.Token() = _, %v, want _, *SyntaxError", i, err) + } + if synerr.Msg != tt.err { + t.Fatalf("input %d synerr.Msg wrong: want %q, got %q", i, tt.err, synerr.Msg) + } + } +} + +type procInstEncodingTest struct { + expect, got string +} + +var procInstTests = []struct { + input string + expect [2]string +}{ + {`version="1.0" encoding="utf-8"`, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding='utf-8'`, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding='utf-8' `, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding=utf-8`, [2]string{"1.0", ""}}, + {`encoding="FOO" `, [2]string{"", "FOO"}}, +} + +func TestProcInstEncoding(t *testing.T) { + for _, test := range procInstTests { + if got := procInst("version", test.input); got != test.expect[0] { + t.Errorf("procInst(version, %q) = %q; want %q", test.input, got, test.expect[0]) + } + if got := procInst("encoding", test.input); got != test.expect[1] { + t.Errorf("procInst(encoding, %q) = %q; want %q", test.input, got, test.expect[1]) + } + } +} + +// Ensure that directives with comments include the complete +// text of any nested directives. + +var directivesWithCommentsInput = ` +]> +]> + --> --> []> +` + +var directivesWithCommentsTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), +} + +func TestDirectivesWithComments(t *testing.T) { + d := NewDecoder(strings.NewReader(directivesWithCommentsInput)) + + for i, want := range directivesWithCommentsTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +// Writer whose Write method always returns an error. +type errWriter struct{} + +func (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("unwritable") } + +func TestEscapeTextIOErrors(t *testing.T) { + expectErr := "unwritable" + err := EscapeText(errWriter{}, []byte{'A'}) + + if err == nil || err.Error() != expectErr { + t.Errorf("have %v, want %v", err, expectErr) + } +} + +func TestEscapeTextInvalidChar(t *testing.T) { + input := []byte("A \x00 terminated string.") + expected := "A \uFFFD terminated string." + + buff := new(bytes.Buffer) + if err := EscapeText(buff, input); err != nil { + t.Fatalf("have %v, want nil", err) + } + text := buff.String() + + if text != expected { + t.Errorf("have %v, want %v", text, expected) + } +} + +func TestIssue5880(t *testing.T) { + type T []byte + data, err := Marshal(T{192, 168, 0, 1}) + if err != nil { + t.Errorf("Marshal error: %v", err) + } + if !utf8.Valid(data) { + t.Errorf("Marshal generated invalid UTF-8: %x", data) + } +} === modified file 'src/golang.org/x/net/webdav/litmus_test_server.go' --- src/golang.org/x/net/webdav/litmus_test_server.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/webdav/litmus_test_server.go 2016-03-22 15:18:22 +0000 @@ -32,12 +32,9 @@ func main() { flag.Parse() log.SetFlags(0) - fs := webdav.NewMemFS() - ls := webdav.NewMemLS() - http.Handle("/", &webdav.Handler{ - FileSystem: fs, - LockSystem: ls, - PropSystem: webdav.NewMemPS(fs, ls, webdav.ReadWrite), + h := &webdav.Handler{ + FileSystem: webdav.NewMemFS(), + LockSystem: webdav.NewMemLS(), Logger: func(r *http.Request, err error) { litmus := r.Header.Get("X-Litmus") if len(litmus) > 19 { @@ -56,7 +53,40 @@ log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err) } }, - }) + } + + // The next line would normally be: + // http.Handle("/", h) + // but we wrap that HTTP handler h to cater for a special case. + // + // The propfind_invalid2 litmus test case expects an empty namespace prefix + // declaration to be an error. The FAQ in the webdav litmus test says: + // + // "What does the "propfind_invalid2" test check for?... + // + // If a request was sent with an XML body which included an empty namespace + // prefix declaration (xmlns:ns1=""), then the server must reject that with + // a "400 Bad Request" response, as it is invalid according to the XML + // Namespace specification." + // + // On the other hand, the Go standard library's encoding/xml package + // accepts an empty xmlns namespace, as per the discussion at + // https://github.com/golang/go/issues/8068 + // + // Empty namespaces seem disallowed in the second (2006) edition of the XML + // standard, but allowed in a later edition. The grammar differs between + // http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and + // http://www.w3.org/TR/REC-xml-names/#dt-prefix + // + // Thus, we assume that the propfind_invalid2 test is obsolete, and + // hard-code the 400 Bad Request response that the test expects. + http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" { + http.Error(w, "400 Bad Request", http.StatusBadRequest) + return + } + h.ServeHTTP(w, r) + })) addr := fmt.Sprintf(":%d", *port) log.Printf("Serving %v", addr) === modified file 'src/golang.org/x/net/webdav/prop.go' --- src/golang.org/x/net/webdav/prop.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/webdav/prop.go 2016-03-22 15:18:22 +0000 @@ -5,7 +5,6 @@ package webdav import ( - "encoding/xml" "fmt" "io" "mime" @@ -13,51 +12,10 @@ "os" "path/filepath" "strconv" - "sync" + + "golang.org/x/net/webdav/internal/xml" ) -// PropSystem manages the properties of named resources. It allows finding -// and setting properties as defined in RFC 4918. -// -// The elements in a resource name are separated by slash ('/', U+002F) -// characters, regardless of host operating system convention. -type PropSystem interface { - // Find returns the status of properties named propnames for resource name. - // - // Each Propstat must have a unique status and each property name must - // only be part of one Propstat element. - Find(name string, propnames []xml.Name) ([]Propstat, error) - - // TODO(nigeltao) merge Find and Allprop? - - // Allprop returns the properties defined for resource name and the - // properties named in include. The returned Propstats are handled - // as in Find. - // - // Note that RFC 4918 defines 'allprop' to return the DAV: properties - // defined within the RFC plus dead properties. Other live properties - // should only be returned if they are named in 'include'. - // - // See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND - Allprop(name string, include []xml.Name) ([]Propstat, error) - - // Propnames returns the property names defined for resource name. - Propnames(name string) ([]xml.Name, error) - - // Patch patches the properties of resource name. - // - // If all patches can be applied without conflict, Patch returns a slice - // of length one and a Propstat element of status 200, naming all patched - // properties. In case of conflict, Patch returns an arbitrary long slice - // and no Propstat element must have status 200. In either case, properties - // in Propstat must not have values. - // - // Note that the WebDAV RFC requires either all patches to succeed or none. - Patch(name string, patches []Proppatch) ([]Propstat, error) - - // TODO(rost) COPY/MOVE/DELETE. -} - // Proppatch describes a property update instruction as defined in RFC 4918. // See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH type Proppatch struct { @@ -91,71 +49,76 @@ ResponseDescription string } -// memPS implements an in-memory PropSystem. It supports all of the mandatory -// live properties of RFC 4918. -type memPS struct { - fs FileSystem - ls LockSystem - m Mutability - - mu sync.RWMutex - nodes map[string]*memPSNode -} - -// memPSNode stores the dead properties of a resource. -type memPSNode struct { - mu sync.RWMutex - deadProps map[xml.Name]Property -} - -// BUG(rost): In this development version, the in-memory property system does -// not handle COPY/MOVE/DELETE requests. As a result, dead properties are not -// released if the according DAV resource is deleted or moved. It is not -// recommended to use a read-writeable property system in production. - -// Mutability indicates the mutability of a property system. -type Mutability bool - -const ( - ReadOnly = Mutability(false) - ReadWrite = Mutability(true) -) - -// NewMemPS returns a new in-memory PropSystem implementation. A read-only -// property system rejects all patches. A read-writeable property system -// stores arbitrary properties but refuses to change any DAV: property -// specified in RFC 4918. It imposes no limit on the size of property values. -func NewMemPS(fs FileSystem, ls LockSystem, m Mutability) PropSystem { - return &memPS{ - fs: fs, - ls: ls, - m: m, - nodes: make(map[string]*memPSNode), - } +// makePropstats returns a slice containing those of x and y whose Props slice +// is non-empty. If both are empty, it returns a slice containing an otherwise +// zero Propstat whose HTTP status code is 200 OK. +func makePropstats(x, y Propstat) []Propstat { + pstats := make([]Propstat, 0, 2) + if len(x.Props) != 0 { + pstats = append(pstats, x) + } + if len(y.Props) != 0 { + pstats = append(pstats, y) + } + if len(pstats) == 0 { + pstats = append(pstats, Propstat{ + Status: http.StatusOK, + }) + } + return pstats +} + +// DeadPropsHolder holds the dead properties of a resource. +// +// Dead properties are those properties that are explicitly defined. In +// comparison, live properties, such as DAV:getcontentlength, are implicitly +// defined by the underlying resource, and cannot be explicitly overridden or +// removed. See the Terminology section of +// http://www.webdav.org/specs/rfc4918.html#rfc.section.3 +// +// There is a whitelist of the names of live properties. This package handles +// all live properties, and will only pass non-whitelisted names to the Patch +// method of DeadPropsHolder implementations. +type DeadPropsHolder interface { + // DeadProps returns a copy of the dead properties held. + DeadProps() (map[xml.Name]Property, error) + + // Patch patches the dead properties held. + // + // Patching is atomic; either all or no patches succeed. It returns (nil, + // non-nil) if an internal server error occurred, otherwise the Propstats + // collectively contain one Property for each proposed patch Property. If + // all patches succeed, Patch returns a slice of length one and a Propstat + // element with a 200 OK HTTP status code. If none succeed, for reasons + // other than an internal server error, no Propstat has status 200 OK. + // + // For more details on when various HTTP status codes apply, see + // http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status + Patch([]Proppatch) ([]Propstat, error) } // liveProps contains all supported, protected DAV: properties. var liveProps = map[xml.Name]struct { // findFn implements the propfind function of this property. If nil, // it indicates a hidden property. - findFn func(*memPS, string, os.FileInfo) (string, error) + findFn func(FileSystem, LockSystem, string, os.FileInfo) (string, error) // dir is true if the property applies to directories. dir bool }{ xml.Name{Space: "DAV:", Local: "resourcetype"}: { - findFn: (*memPS).findResourceType, + findFn: findResourceType, dir: true, }, xml.Name{Space: "DAV:", Local: "displayname"}: { - findFn: (*memPS).findDisplayName, + findFn: findDisplayName, dir: true, }, xml.Name{Space: "DAV:", Local: "getcontentlength"}: { - findFn: (*memPS).findContentLength, + findFn: findContentLength, dir: true, }, xml.Name{Space: "DAV:", Local: "getlastmodified"}: { - findFn: (*memPS).findLastModified, + findFn: findLastModified, dir: true, }, xml.Name{Space: "DAV:", Local: "creationdate"}: { @@ -167,184 +130,211 @@ dir: true, }, xml.Name{Space: "DAV:", Local: "getcontenttype"}: { - findFn: (*memPS).findContentType, + findFn: findContentType, dir: true, }, xml.Name{Space: "DAV:", Local: "getetag"}: { - findFn: (*memPS).findETag, - // memPS implements ETag as the concatenated hex values of a file's + findFn: findETag, + // findETag implements ETag as the concatenated hex values of a file's // modification time and size. This is not a reliable synchronization - // mechanism for directories, so we do not advertise getetag for - // DAV collections. + // mechanism for directories, so we do not advertise getetag for DAV + // collections. dir: false, }, - // TODO(nigeltao) Lock properties will be defined later. + // TODO: The lockdiscovery property requires LockSystem to list the + // active locks on a resource. xml.Name{Space: "DAV:", Local: "lockdiscovery"}: {}, - xml.Name{Space: "DAV:", Local: "supportedlock"}: {}, + xml.Name{Space: "DAV:", Local: "supportedlock"}: { + findFn: findSupportedLock, + dir: true, + }, } -func (ps *memPS) Find(name string, propnames []xml.Name) ([]Propstat, error) { - ps.mu.RLock() - defer ps.mu.RUnlock() - - fi, err := ps.fs.Stat(name) - if err != nil { - return nil, err - } - - // Lookup the dead properties of this resource. It's OK if there are none. - n, ok := ps.nodes[name] - if ok { - n.mu.RLock() - defer n.mu.RUnlock() - } - - pm := make(map[int]Propstat) - for _, pn := range propnames { - // If this node has dead properties, check if they contain pn. - if n != nil { - if dp, ok := n.deadProps[pn]; ok { - pstat := pm[http.StatusOK] - pstat.Props = append(pstat.Props, dp) - pm[http.StatusOK] = pstat - continue - } +// TODO(nigeltao) merge props and allprop? + +// Props returns the status of the properties named pnames for resource name. +// +// Each Propstat has a unique status and each property name will only be part +// of one Propstat element. +func props(fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) { + f, err := fs.OpenFile(name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return nil, err + } + isDir := fi.IsDir() + + var deadProps map[xml.Name]Property + if dph, ok := f.(DeadPropsHolder); ok { + deadProps, err = dph.DeadProps() + if err != nil { + return nil, err + } + } + + pstatOK := Propstat{Status: http.StatusOK} + pstatNotFound := Propstat{Status: http.StatusNotFound} + for _, pn := range pnames { + // If this file has dead properties, check if they contain pn. + if dp, ok := deadProps[pn]; ok { + pstatOK.Props = append(pstatOK.Props, dp) + continue } // Otherwise, it must either be a live property or we don't know it. - p := Property{XMLName: pn} - s := http.StatusNotFound - if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !fi.IsDir()) { - xmlvalue, err := prop.findFn(ps, name, fi) + if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) { + innerXML, err := prop.findFn(fs, ls, name, fi) if err != nil { return nil, err } - s = http.StatusOK - p.InnerXML = []byte(xmlvalue) + pstatOK.Props = append(pstatOK.Props, Property{ + XMLName: pn, + InnerXML: []byte(innerXML), + }) + } else { + pstatNotFound.Props = append(pstatNotFound.Props, Property{ + XMLName: pn, + }) } - pstat := pm[s] - pstat.Props = append(pstat.Props, p) - pm[s] = pstat - } - - pstats := make([]Propstat, 0, len(pm)) - for s, pstat := range pm { - pstat.Status = s - pstats = append(pstats, pstat) - } - return pstats, nil + } + return makePropstats(pstatOK, pstatNotFound), nil } -func (ps *memPS) Propnames(name string) ([]xml.Name, error) { - fi, err := ps.fs.Stat(name) - if err != nil { - return nil, err - } - - propnames := make([]xml.Name, 0, len(liveProps)) +// Propnames returns the property names defined for resource name. +func propnames(fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) { + f, err := fs.OpenFile(name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return nil, err + } + isDir := fi.IsDir() + + var deadProps map[xml.Name]Property + if dph, ok := f.(DeadPropsHolder); ok { + deadProps, err = dph.DeadProps() + if err != nil { + return nil, err + } + } + + pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps)) for pn, prop := range liveProps { - if prop.findFn != nil && (prop.dir || !fi.IsDir()) { - propnames = append(propnames, pn) - } - } - - ps.mu.RLock() - defer ps.mu.RUnlock() - if n, ok := ps.nodes[name]; ok { - n.mu.RLock() - defer n.mu.RUnlock() - for pn := range n.deadProps { - propnames = append(propnames, pn) - } - } - - return propnames, nil + if prop.findFn != nil && (prop.dir || !isDir) { + pnames = append(pnames, pn) + } + } + for pn := range deadProps { + pnames = append(pnames, pn) + } + return pnames, nil } -func (ps *memPS) Allprop(name string, include []xml.Name) ([]Propstat, error) { - propnames, err := ps.Propnames(name) +// Allprop returns the properties defined for resource name and the properties +// named in include. +// +// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined +// within the RFC plus dead properties. Other live properties should only be +// returned if they are named in 'include'. +// +// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND +func allprop(fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) { + pnames, err := propnames(fs, ls, name) if err != nil { return nil, err } - // Add names from include if they are not already covered in propnames. + // Add names from include if they are not already covered in pnames. nameset := make(map[xml.Name]bool) - for _, pn := range propnames { + for _, pn := range pnames { nameset[pn] = true } for _, pn := range include { if !nameset[pn] { - propnames = append(propnames, pn) + pnames = append(pnames, pn) } } - return ps.Find(name, propnames) + return props(fs, ls, name, pnames) } -func (ps *memPS) Patch(name string, patches []Proppatch) ([]Propstat, error) { - // A DELETE/COPY/MOVE might fly in, so we need to keep all nodes locked until - // the end of this PROPPATCH. - ps.mu.Lock() - defer ps.mu.Unlock() - n, ok := ps.nodes[name] - if !ok { - n = &memPSNode{deadProps: make(map[xml.Name]Property)} - } - n.mu.Lock() - defer n.mu.Unlock() +// Patch patches the properties of resource name. The return values are +// constrained in the same manner as DeadPropsHolder.Patch. +func patch(fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) { + conflict := false +loop: + for _, patch := range patches { + for _, p := range patch.Props { + if _, ok := liveProps[p.XMLName]; ok { + conflict = true + break loop + } + } + } + if conflict { + pstatForbidden := Propstat{ + Status: http.StatusForbidden, + XMLError: ``, + } + pstatFailedDep := Propstat{ + Status: StatusFailedDependency, + } + for _, patch := range patches { + for _, p := range patch.Props { + if _, ok := liveProps[p.XMLName]; ok { + pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName}) + } else { + pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName}) + } + } + } + return makePropstats(pstatForbidden, pstatFailedDep), nil + } - _, err := ps.fs.Stat(name) + f, err := fs.OpenFile(name, os.O_RDWR, 0) if err != nil { return nil, err } - - // Perform a dry-run to identify any patch conflicts. A read-only property - // system always fails at this stage. - pm := make(map[int]Propstat) + defer f.Close() + if dph, ok := f.(DeadPropsHolder); ok { + ret, err := dph.Patch(patches) + if err != nil { + return nil, err + } + // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that + // "The contents of the prop XML element must only list the names of + // properties to which the result in the status element applies." + for _, pstat := range ret { + for i, p := range pstat.Props { + pstat.Props[i] = Property{XMLName: p.XMLName} + } + } + return ret, nil + } + // The file doesn't implement the optional DeadPropsHolder interface, so + // all patches are forbidden. + pstat := Propstat{Status: http.StatusForbidden} for _, patch := range patches { for _, p := range patch.Props { - s := http.StatusOK - if _, ok := liveProps[p.XMLName]; ok || ps.m == ReadOnly { - s = http.StatusForbidden - } - pstat := pm[s] pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) - pm[s] = pstat - } - } - // Based on the dry-run, either apply the patches or handle conflicts. - if _, ok = pm[http.StatusOK]; ok { - if len(pm) == 1 { - for _, patch := range patches { - for _, p := range patch.Props { - if patch.Remove { - delete(n.deadProps, p.XMLName) - } else { - n.deadProps[p.XMLName] = p - } - } - } - ps.nodes[name] = n - } else { - pm[StatusFailedDependency] = pm[http.StatusOK] - delete(pm, http.StatusOK) - } - } - - pstats := make([]Propstat, 0, len(pm)) - for s, pstat := range pm { - pstat.Status = s - pstats = append(pstats, pstat) - } - return pstats, nil + } + } + return []Propstat{pstat}, nil } -func (ps *memPS) findResourceType(name string, fi os.FileInfo) (string, error) { +func findResourceType(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { if fi.IsDir() { - return ``, nil + return ``, nil } return "", nil } -func (ps *memPS) findDisplayName(name string, fi os.FileInfo) (string, error) { +func findDisplayName(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { if slashClean(name) == "/" { // Hide the real name of a possibly prefixed root directory. return "", nil @@ -352,16 +342,16 @@ return fi.Name(), nil } -func (ps *memPS) findContentLength(name string, fi os.FileInfo) (string, error) { +func findContentLength(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { return strconv.FormatInt(fi.Size(), 10), nil } -func (ps *memPS) findLastModified(name string, fi os.FileInfo) (string, error) { +func findLastModified(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { return fi.ModTime().Format(http.TimeFormat), nil } -func (ps *memPS) findContentType(name string, fi os.FileInfo) (string, error) { - f, err := ps.fs.OpenFile(name, os.O_RDONLY, 0) +func findContentType(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + f, err := fs.OpenFile(name, os.O_RDONLY, 0) if err != nil { return "", err } @@ -379,14 +369,17 @@ return ctype, err } -func (ps *memPS) findETag(name string, fi os.FileInfo) (string, error) { - return detectETag(fi), nil -} - -// detectETag determines the ETag for the file described by fi. -func detectETag(fi os.FileInfo) string { +func findETag(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { // The Apache http 2.4 web server by default concatenates the // modification time and size of a file. We replicate the heuristic // with nanosecond granularity. - return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()) + return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil +} + +func findSupportedLock(fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return `` + + `` + + `` + + `` + + ``, nil } === modified file 'src/golang.org/x/net/webdav/prop_test.go' --- src/golang.org/x/net/webdav/prop_test.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/webdav/prop_test.go 2016-03-22 15:18:22 +0000 @@ -5,18 +5,20 @@ package webdav import ( - "encoding/xml" "fmt" "net/http" + "os" "reflect" "sort" "testing" + + "golang.org/x/net/webdav/internal/xml" ) func TestMemPS(t *testing.T) { // calcProps calculates the getlastmodified and getetag DAV: property // values in pstats for resource name in file-system fs. - calcProps := func(name string, fs FileSystem, pstats []Propstat) error { + calcProps := func(name string, fs FileSystem, ls LockSystem, pstats []Propstat) error { fi, err := fs.Stat(name) if err != nil { return err @@ -31,7 +33,11 @@ if fi.IsDir() { continue } - p.InnerXML = []byte(detectETag(fi)) + etag, err := findETag(fs, ls, name, fi) + if err != nil { + return err + } + p.InnerXML = []byte(etag) pst.Props[i] = p } } @@ -39,43 +45,54 @@ return nil } + const ( + lockEntry = `` + + `` + + `` + + `` + + `` + statForbiddenError = `` + ) + type propOp struct { op string name string - propnames []xml.Name + pnames []xml.Name patches []Proppatch - wantNames []xml.Name + wantPnames []xml.Name wantPropstats []Propstat } testCases := []struct { - desc string - mutability Mutability - buildfs []string - propOp []propOp + desc string + noDeadProps bool + buildfs []string + propOp []propOp }{{ desc: "propname", buildfs: []string{"mkdir /dir", "touch /file"}, propOp: []propOp{{ op: "propname", name: "/dir", - wantNames: []xml.Name{ + wantPnames: []xml.Name{ xml.Name{Space: "DAV:", Local: "resourcetype"}, xml.Name{Space: "DAV:", Local: "displayname"}, xml.Name{Space: "DAV:", Local: "getcontentlength"}, xml.Name{Space: "DAV:", Local: "getlastmodified"}, xml.Name{Space: "DAV:", Local: "getcontenttype"}, + xml.Name{Space: "DAV:", Local: "supportedlock"}, }, }, { op: "propname", name: "/file", - wantNames: []xml.Name{ + wantPnames: []xml.Name{ xml.Name{Space: "DAV:", Local: "resourcetype"}, xml.Name{Space: "DAV:", Local: "displayname"}, xml.Name{Space: "DAV:", Local: "getcontentlength"}, xml.Name{Space: "DAV:", Local: "getlastmodified"}, xml.Name{Space: "DAV:", Local: "getcontenttype"}, xml.Name{Space: "DAV:", Local: "getetag"}, + xml.Name{Space: "DAV:", Local: "supportedlock"}, }, }}, }, { @@ -88,7 +105,7 @@ Status: http.StatusOK, Props: []Property{{ XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, - InnerXML: []byte(``), + InnerXML: []byte(``), }, { XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, InnerXML: []byte("dir"), @@ -101,6 +118,9 @@ }, { XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, InnerXML: []byte("text/plain; charset=utf-8"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), }}, }}, }, { @@ -126,12 +146,15 @@ }, { XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), }}, }}, }, { op: "allprop", name: "/file", - propnames: []xml.Name{ + pnames: []xml.Name{ {"DAV:", "resourcetype"}, {"foo", "bar"}, }, @@ -155,6 +178,9 @@ }, { XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), }}}, { Status: http.StatusNotFound, Props: []Property{{ @@ -166,20 +192,20 @@ desc: "propfind DAV:resourcetype", buildfs: []string{"mkdir /dir", "touch /file"}, propOp: []propOp{{ - op: "propfind", - name: "/dir", - propnames: []xml.Name{{"DAV:", "resourcetype"}}, + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "resourcetype"}}, wantPropstats: []Propstat{{ Status: http.StatusOK, Props: []Property{{ XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, - InnerXML: []byte(``), + InnerXML: []byte(``), }}, }}, }, { - op: "propfind", - name: "/file", - propnames: []xml.Name{{"DAV:", "resourcetype"}}, + op: "propfind", + name: "/file", + pnames: []xml.Name{{"DAV:", "resourcetype"}}, wantPropstats: []Propstat{{ Status: http.StatusOK, Props: []Property{{ @@ -192,9 +218,9 @@ desc: "propfind unsupported DAV properties", buildfs: []string{"mkdir /dir"}, propOp: []propOp{{ - op: "propfind", - name: "/dir", - propnames: []xml.Name{{"DAV:", "getcontentlanguage"}}, + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "getcontentlanguage"}}, wantPropstats: []Propstat{{ Status: http.StatusNotFound, Props: []Property{{ @@ -202,9 +228,9 @@ }}, }}, }, { - op: "propfind", - name: "/dir", - propnames: []xml.Name{{"DAV:", "creationdate"}}, + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "creationdate"}}, wantPropstats: []Propstat{{ Status: http.StatusNotFound, Props: []Property{{ @@ -216,9 +242,9 @@ desc: "propfind getetag for files but not for directories", buildfs: []string{"mkdir /dir", "touch /file"}, propOp: []propOp{{ - op: "propfind", - name: "/dir", - propnames: []xml.Name{{"DAV:", "getetag"}}, + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"DAV:", "getetag"}}, wantPropstats: []Propstat{{ Status: http.StatusNotFound, Props: []Property{{ @@ -226,9 +252,9 @@ }}, }}, }, { - op: "propfind", - name: "/file", - propnames: []xml.Name{{"DAV:", "getetag"}}, + op: "propfind", + name: "/file", + pnames: []xml.Name{{"DAV:", "getetag"}}, wantPropstats: []Propstat{{ Status: http.StatusOK, Props: []Property{{ @@ -238,73 +264,72 @@ }}, }}, }, { - desc: "proppatch property on read-only property system", - buildfs: []string{"mkdir /dir"}, - mutability: ReadOnly, - propOp: []propOp{{ - op: "proppatch", - name: "/dir", - patches: []Proppatch{{ - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - }}, - }}, - wantPropstats: []Propstat{{ - Status: http.StatusForbidden, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - }}, - }}, - }, { - op: "proppatch", - name: "/dir", - patches: []Proppatch{{ - Props: []Property{{ - XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, - }}, - }}, - wantPropstats: []Propstat{{ - Status: http.StatusForbidden, - Props: []Property{{ - XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, - }}, - }}, - }}, - }, { - desc: "proppatch dead property", - buildfs: []string{"mkdir /dir"}, - mutability: ReadWrite, - propOp: []propOp{{ - op: "proppatch", - name: "/dir", - patches: []Proppatch{{ - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - InnerXML: []byte("baz"), - }}, - }}, - wantPropstats: []Propstat{{ - Status: http.StatusOK, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - }}, - }}, - }, { - op: "propfind", - name: "/dir", - propnames: []xml.Name{{Space: "foo", Local: "bar"}}, - wantPropstats: []Propstat{{ - Status: http.StatusOK, - Props: []Property{{ - XMLName: xml.Name{Space: "foo", Local: "bar"}, - InnerXML: []byte("baz"), - }}, - }}, - }}, - }, { - desc: "proppatch dead property with failed dependency", - buildfs: []string{"mkdir /dir"}, - mutability: ReadWrite, + desc: "proppatch property on no-dead-properties file system", + buildfs: []string{"mkdir /dir"}, + noDeadProps: true, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + XMLError: statForbiddenError, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + }}, + }, { + desc: "proppatch dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo", Local: "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + }}, + }, { + desc: "proppatch dead property with failed dependency", + buildfs: []string{"mkdir /dir"}, propOp: []propOp{{ op: "proppatch", name: "/dir", @@ -320,7 +345,8 @@ }}, }}, wantPropstats: []Propstat{{ - Status: http.StatusForbidden, + Status: http.StatusForbidden, + XMLError: statForbiddenError, Props: []Property{{ XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, }}, @@ -331,9 +357,9 @@ }}, }}, }, { - op: "propfind", - name: "/dir", - propnames: []xml.Name{{Space: "foo", Local: "bar"}}, + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo", Local: "bar"}}, wantPropstats: []Propstat{{ Status: http.StatusNotFound, Props: []Property{{ @@ -342,9 +368,8 @@ }}, }}, }, { - desc: "proppatch remove dead property", - buildfs: []string{"mkdir /dir"}, - mutability: ReadWrite, + desc: "proppatch remove dead property", + buildfs: []string{"mkdir /dir"}, propOp: []propOp{{ op: "proppatch", name: "/dir", @@ -368,7 +393,7 @@ }, { op: "propfind", name: "/dir", - propnames: []xml.Name{ + pnames: []xml.Name{ {Space: "foo", Local: "bar"}, {Space: "spam", Local: "ham"}, }, @@ -400,7 +425,7 @@ }, { op: "propfind", name: "/dir", - propnames: []xml.Name{ + pnames: []xml.Name{ {Space: "foo", Local: "bar"}, {Space: "spam", Local: "ham"}, }, @@ -418,9 +443,8 @@ }}, }}, }, { - desc: "propname with dead property", - buildfs: []string{"touch /file"}, - mutability: ReadWrite, + desc: "propname with dead property", + buildfs: []string{"touch /file"}, propOp: []propOp{{ op: "proppatch", name: "/file", @@ -439,20 +463,20 @@ }, { op: "propname", name: "/file", - wantNames: []xml.Name{ + wantPnames: []xml.Name{ xml.Name{Space: "DAV:", Local: "resourcetype"}, xml.Name{Space: "DAV:", Local: "displayname"}, xml.Name{Space: "DAV:", Local: "getcontentlength"}, xml.Name{Space: "DAV:", Local: "getlastmodified"}, xml.Name{Space: "DAV:", Local: "getcontenttype"}, xml.Name{Space: "DAV:", Local: "getetag"}, + xml.Name{Space: "DAV:", Local: "supportedlock"}, xml.Name{Space: "foo", Local: "bar"}, }, }}, }, { - desc: "proppatch remove unknown dead property", - buildfs: []string{"mkdir /dir"}, - mutability: ReadWrite, + desc: "proppatch remove unknown dead property", + buildfs: []string{"mkdir /dir"}, propOp: []propOp{{ op: "proppatch", name: "/dir", @@ -473,9 +497,9 @@ desc: "bad: propfind unknown property", buildfs: []string{"mkdir /dir"}, propOp: []propOp{{ - op: "propfind", - name: "/dir", - propnames: []xml.Name{{"foo:", "bar"}}, + op: "propfind", + name: "/dir", + pnames: []xml.Name{{"foo:", "bar"}}, wantPropstats: []Propstat{{ Status: http.StatusNotFound, Props: []Property{{ @@ -490,11 +514,13 @@ if err != nil { t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) } + if tc.noDeadProps { + fs = noDeadPropsFS{fs} + } ls := NewMemLS() - ps := NewMemPS(fs, ls, tc.mutability) for _, op := range tc.propOp { desc := fmt.Sprintf("%s: %s %s", tc.desc, op.op, op.name) - if err = calcProps(op.name, fs, op.wantPropstats); err != nil { + if err = calcProps(op.name, fs, ls, op.wantPropstats); err != nil { t.Fatalf("%s: calcProps: %v", desc, err) } @@ -502,23 +528,23 @@ var propstats []Propstat switch op.op { case "propname": - names, err := ps.Propnames(op.name) + pnames, err := propnames(fs, ls, op.name) if err != nil { t.Errorf("%s: got error %v, want nil", desc, err) continue } - sort.Sort(byXMLName(names)) - sort.Sort(byXMLName(op.wantNames)) - if !reflect.DeepEqual(names, op.wantNames) { - t.Errorf("%s: names\ngot %q\nwant %q", desc, names, op.wantNames) + sort.Sort(byXMLName(pnames)) + sort.Sort(byXMLName(op.wantPnames)) + if !reflect.DeepEqual(pnames, op.wantPnames) { + t.Errorf("%s: pnames\ngot %q\nwant %q", desc, pnames, op.wantPnames) } continue case "allprop": - propstats, err = ps.Allprop(op.name, op.propnames) + propstats, err = allprop(fs, ls, op.name, op.pnames) case "propfind": - propstats, err = ps.Find(op.name, op.propnames) + propstats, err = props(fs, ls, op.name, op.pnames) case "proppatch": - propstats, err = ps.Patch(op.name, op.patches) + propstats, err = patch(fs, ls, op.name, op.patches) default: t.Fatalf("%s: %s not implemented", desc, op.op) } @@ -551,36 +577,43 @@ type byXMLName []xml.Name -func (b byXMLName) Len() int { - return len(b) -} -func (b byXMLName) Swap(i, j int) { - b[i], b[j] = b[j], b[i] -} -func (b byXMLName) Less(i, j int) bool { - return cmpXMLName(b[i], b[j]) -} +func (b byXMLName) Len() int { return len(b) } +func (b byXMLName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byXMLName) Less(i, j int) bool { return cmpXMLName(b[i], b[j]) } type byPropname []Property -func (b byPropname) Len() int { - return len(b) -} -func (b byPropname) Swap(i, j int) { - b[i], b[j] = b[j], b[i] -} -func (b byPropname) Less(i, j int) bool { - return cmpXMLName(b[i].XMLName, b[j].XMLName) -} +func (b byPropname) Len() int { return len(b) } +func (b byPropname) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byPropname) Less(i, j int) bool { return cmpXMLName(b[i].XMLName, b[j].XMLName) } type byStatus []Propstat -func (b byStatus) Len() int { - return len(b) -} -func (b byStatus) Swap(i, j int) { - b[i], b[j] = b[j], b[i] -} -func (b byStatus) Less(i, j int) bool { - return b[i].Status < b[j].Status -} +func (b byStatus) Len() int { return len(b) } +func (b byStatus) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byStatus) Less(i, j int) bool { return b[i].Status < b[j].Status } + +type noDeadPropsFS struct { + FileSystem +} + +func (fs noDeadPropsFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, err := fs.FileSystem.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + return noDeadPropsFile{f}, nil +} + +// noDeadPropsFile wraps a File but strips any optional DeadPropsHolder methods +// provided by the underlying File implementation. +type noDeadPropsFile struct { + f File +} + +func (f noDeadPropsFile) Close() error { return f.f.Close() } +func (f noDeadPropsFile) Read(p []byte) (int, error) { return f.f.Read(p) } +func (f noDeadPropsFile) Readdir(count int) ([]os.FileInfo, error) { return f.f.Readdir(count) } +func (f noDeadPropsFile) Seek(off int64, whence int) (int64, error) { return f.f.Seek(off, whence) } +func (f noDeadPropsFile) Stat() (os.FileInfo, error) { return f.f.Stat() } +func (f noDeadPropsFile) Write(p []byte) (int, error) { return f.f.Write(p) } === modified file 'src/golang.org/x/net/webdav/webdav.go' --- src/golang.org/x/net/webdav/webdav.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/webdav/webdav.go 2016-03-22 15:18:22 +0000 @@ -5,26 +5,43 @@ // Package webdav etc etc TODO. package webdav // import "golang.org/x/net/webdav" -// TODO: ETag, properties. - import ( - "encoding/xml" "errors" "fmt" "io" + "log" "net/http" "net/url" "os" + "runtime" + "strings" "time" ) +// Package webdav's XML output requires the standard library's encoding/xml +// package version 1.5 or greater. Otherwise, it will produce malformed XML. +// +// As of May 2015, the Go stable release is version 1.4, so we print a message +// to let users know that this golang.org/x/etc package won't work yet. +// +// This package also won't work with Go 1.3 and earlier, but making this +// runtime version check catch all the earlier versions too, and not just +// "1.4.x", isn't worth the complexity. +// +// TODO: delete this check at some point after Go 1.5 is released. +var go1Dot4 = strings.HasPrefix(runtime.Version(), "go1.4.") + +func init() { + if go1Dot4 { + log.Println("package webdav requires Go version 1.5 or greater") + } +} + type Handler struct { // FileSystem is the virtual file system. FileSystem FileSystem // LockSystem is the lock management system. LockSystem LockSystem - // PropSystem is the property management system. - PropSystem PropSystem // Logger is an optional error logger. If non-nil, it will be called // for all HTTP requests. Logger func(*http.Request, error) @@ -36,8 +53,6 @@ status, err = http.StatusInternalServerError, errNoFileSystem } else if h.LockSystem == nil { status, err = http.StatusInternalServerError, errNoLockSystem - } else if h.PropSystem == nil { - status, err = http.StatusInternalServerError, errNoPropSystem } else { switch r.Method { case "OPTIONS": @@ -187,14 +202,14 @@ if err != nil { return http.StatusNotFound, err } - pstats, err := h.PropSystem.Find(r.URL.Path, []xml.Name{ - {Space: "DAV:", Local: "getetag"}, - {Space: "DAV:", Local: "getcontenttype"}, - }) - if err != nil { - return http.StatusInternalServerError, err + if !fi.IsDir() { + etag, err := findETag(h.FileSystem, h.LockSystem, r.URL.Path, fi) + if err != nil { + return http.StatusInternalServerError, err + } + w.Header().Set("ETag", etag) } - writeDAVHeaders(w, pstats) + // Let ServeContent determine the Content-Type header. http.ServeContent(w, r, r.URL.Path, fi.ModTime(), f) return 0, nil } @@ -229,26 +244,31 @@ return status, err } defer release() + // TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz' + // comments in http.checkEtag. f, err := h.FileSystem.OpenFile(r.URL.Path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return http.StatusNotFound, err } _, copyErr := io.Copy(f, r.Body) + fi, statErr := f.Stat() closeErr := f.Close() + // TODO(rost): Returning 405 Method Not Allowed might not be appropriate. if copyErr != nil { return http.StatusMethodNotAllowed, copyErr } + if statErr != nil { + return http.StatusMethodNotAllowed, statErr + } if closeErr != nil { return http.StatusMethodNotAllowed, closeErr } - pstats, err := h.PropSystem.Find(r.URL.Path, []xml.Name{ - {Space: "DAV:", Local: "getetag"}, - }) + etag, err := findETag(h.FileSystem, h.LockSystem, r.URL.Path, fi) if err != nil { return http.StatusInternalServerError, err } - writeDAVHeaders(w, pstats) + w.Header().Set("ETag", etag) return http.StatusCreated, nil } @@ -272,8 +292,6 @@ } func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) { - // TODO: COPY/MOVE for Properties, as per sections 9.8.2 and 9.9.1. - hdr := r.Header.Get("Destination") if hdr == "" { return http.StatusBadRequest, errInvalidDestination @@ -285,9 +303,6 @@ if u.Host != r.Host { return http.StatusBadGateway, errInvalidDestination } - // TODO: do we need a webdav.StripPrefix HTTP handler that's like the - // standard library's http.StripPrefix handler, but also strips the - // prefix in the Destination header? dst, src := u.Path, r.URL.Path if dst == "" { @@ -455,7 +470,7 @@ func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) { fi, err := h.FileSystem.Stat(r.URL.Path) if err != nil { - if err == os.ErrNotExist { + if os.IsNotExist(err) { return http.StatusNotFound, err } return http.StatusMethodNotAllowed, err @@ -480,19 +495,19 @@ } var pstats []Propstat if pf.Propname != nil { - propnames, err := h.PropSystem.Propnames(path) + pnames, err := propnames(h.FileSystem, h.LockSystem, path) if err != nil { return err } pstat := Propstat{Status: http.StatusOK} - for _, xmlname := range propnames { + for _, xmlname := range pnames { pstat.Props = append(pstat.Props, Property{XMLName: xmlname}) } pstats = append(pstats, pstat) } else if pf.Allprop != nil { - pstats, err = h.PropSystem.Allprop(path, pf.Prop) + pstats, err = allprop(h.FileSystem, h.LockSystem, path, pf.Prop) } else { - pstats, err = h.PropSystem.Find(path, pf.Prop) + pstats, err = props(h.FileSystem, h.LockSystem, path, pf.Prop) } if err != nil { return err @@ -519,7 +534,7 @@ defer release() if _, err := h.FileSystem.Stat(r.URL.Path); err != nil { - if err == os.ErrNotExist { + if os.IsNotExist(err) { return http.StatusNotFound, err } return http.StatusMethodNotAllowed, err @@ -528,7 +543,7 @@ if err != nil { return status, err } - pstats, err := h.PropSystem.Patch(r.URL.Path, patches) + pstats, err := patch(h.FileSystem, h.LockSystem, r.URL.Path, patches) if err != nil { return http.StatusInternalServerError, err } @@ -544,26 +559,6 @@ return 0, nil } -// davHeaderNames maps the names of DAV properties to their corresponding -// HTTP response headers. -var davHeaderNames = map[xml.Name]string{ - xml.Name{Space: "DAV:", Local: "getetag"}: "ETag", - xml.Name{Space: "DAV:", Local: "getcontenttype"}: "Content-Type", -} - -func writeDAVHeaders(w http.ResponseWriter, pstats []Propstat) { - for _, pst := range pstats { - if pst.Status == http.StatusOK { - for _, p := range pst.Props { - if n, ok := davHeaderNames[p.XMLName]; ok { - w.Header().Set(n, string(p.InnerXML)) - } - } - break - } - } -} - func makePropstatResponse(href string, pstats []Propstat) *response { resp := response{ Href: []string{href}, @@ -610,6 +605,29 @@ return invalidDepth } +// StripPrefix is like http.StripPrefix but it also strips the prefix from any +// Destination headers, so that COPY and MOVE requests also see stripped paths. +func StripPrefix(prefix string, h http.Handler) http.Handler { + if prefix == "" { + return h + } + h = http.StripPrefix(prefix, h) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + dsts := r.Header["Destination"] + for i, dst := range dsts { + u, err := url.Parse(dst) + if err != nil { + continue + } + if p := strings.TrimPrefix(u.Path, prefix); len(p) < len(u.Path) { + u.Path = p + dsts[i] = u.String() + } + } + h.ServeHTTP(w, r) + }) +} + // http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 const ( StatusMulti = 207 @@ -649,7 +667,6 @@ errInvalidTimeout = errors.New("webdav: invalid timeout") errNoFileSystem = errors.New("webdav: no file system") errNoLockSystem = errors.New("webdav: no lock system") - errNoPropSystem = errors.New("webdav: no property system") errNotADirectory = errors.New("webdav: not a directory") errRecursionTooDeep = errors.New("webdav: recursion too deep") errUnsupportedLockInfo = errors.New("webdav: unsupported lock info") === added file 'src/golang.org/x/net/webdav/webdav_test.go' --- src/golang.org/x/net/webdav/webdav_test.go 1970-01-01 00:00:00 +0000 +++ src/golang.org/x/net/webdav/webdav_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,161 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "reflect" + "sort" + "strings" + "testing" +) + +// TestStripPrefix tests the StripPrefix function. We can't test the +// StripPrefix function with the litmus test, even though all of the litmus +// test paths start with "/litmus/", because one of the first things that the +// litmus test does is "MKCOL /litmus/". That request succeeds without a +// StripPrefix, but fails with a StripPrefix because you cannot MKCOL the root +// directory of a FileSystem. +func TestStripPrefix(t *testing.T) { + const dst, blah = "Destination", "blah blah blah" + + do := func(method, urlStr string, body io.Reader, wantStatusCode int, headers ...string) error { + req, err := http.NewRequest(method, urlStr, body) + if err != nil { + return err + } + for len(headers) >= 2 { + req.Header.Add(headers[0], headers[1]) + headers = headers[2:] + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + if res.StatusCode != wantStatusCode { + return fmt.Errorf("got status code %d, want %d", res.StatusCode, wantStatusCode) + } + return nil + } + + prefixes := []string{ + "/", + "/a/", + "/a/b/", + "/a/b/c/", + } + for _, prefix := range prefixes { + fs := NewMemFS() + h := http.Handler(&Handler{ + FileSystem: fs, + LockSystem: NewMemLS(), + }) + mux := http.NewServeMux() + if prefix != "/" { + // Note that this is webdav.StripPrefix, not http.StripPrefix. + h = StripPrefix(prefix, h) + } + mux.Handle(prefix, h) + srv := httptest.NewServer(mux) + defer srv.Close() + + // The script is: + // MKCOL /a + // MKCOL /a/b + // PUT /a/b/c + // COPY /a/b/c /a/b/d + // MKCOL /a/b/e + // MOVE /a/b/d /a/b/e/f + // which should yield the (possibly stripped) filenames /a/b/c and + // /a/b/e/f, plus their parent directories. + + wantA := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusMovedPermanently, + "/a/b/": http.StatusNotFound, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if err := do("MKCOL", srv.URL+"/a", nil, wantA); err != nil { + t.Errorf("prefix=%-9q MKCOL /a: %v", prefix, err) + continue + } + + wantB := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusMovedPermanently, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if err := do("MKCOL", srv.URL+"/a/b", nil, wantB); err != nil { + t.Errorf("prefix=%-9q MKCOL /a/b: %v", prefix, err) + continue + } + + wantC := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusMovedPermanently, + }[prefix] + if err := do("PUT", srv.URL+"/a/b/c", strings.NewReader(blah), wantC); err != nil { + t.Errorf("prefix=%-9q PUT /a/b/c: %v", prefix, err) + continue + } + + wantD := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusMovedPermanently, + }[prefix] + if err := do("COPY", srv.URL+"/a/b/c", nil, wantD, dst, srv.URL+"/a/b/d"); err != nil { + t.Errorf("prefix=%-9q COPY /a/b/c /a/b/d: %v", prefix, err) + continue + } + + wantE := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if err := do("MKCOL", srv.URL+"/a/b/e", nil, wantE); err != nil { + t.Errorf("prefix=%-9q MKCOL /a/b/e: %v", prefix, err) + continue + } + + wantF := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if err := do("MOVE", srv.URL+"/a/b/d", nil, wantF, dst, srv.URL+"/a/b/e/f"); err != nil { + t.Errorf("prefix=%-9q MOVE /a/b/d /a/b/e/f: %v", prefix, err) + continue + } + + got, err := find(nil, fs, "/") + if err != nil { + t.Errorf("prefix=%-9q find: %v", prefix, err) + continue + } + sort.Strings(got) + want := map[string][]string{ + "/": []string{"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f"}, + "/a/": []string{"/", "/b", "/b/c", "/b/e", "/b/e/f"}, + "/a/b/": []string{"/", "/c", "/e", "/e/f"}, + "/a/b/c/": []string{"/"}, + }[prefix] + if !reflect.DeepEqual(got, want) { + t.Errorf("prefix=%-9q find:\ngot %v\nwant %v", prefix, got, want) + continue + } + } +} === modified file 'src/golang.org/x/net/webdav/xml.go' --- src/golang.org/x/net/webdav/xml.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/webdav/xml.go 2016-03-22 15:18:22 +0000 @@ -9,11 +9,12 @@ import ( "bytes" - "encoding/xml" "fmt" "io" "net/http" "time" + + "golang.org/x/net/webdav/internal/xml" ) // http://www.webdav.org/specs/rfc4918.html#ELEMENT_lockinfo @@ -206,32 +207,55 @@ } // http://www.webdav.org/specs/rfc4918.html#ELEMENT_error +// See multistatusWriter for the "D:" namespace prefix. type xmlError struct { - XMLName xml.Name `xml:"DAV: error"` + XMLName xml.Name `xml:"D:error"` InnerXML []byte `xml:",innerxml"` } // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat +// See multistatusWriter for the "D:" namespace prefix. type propstat struct { - Prop []Property `xml:"DAV: prop>_ignored_"` - Status string `xml:"DAV: status"` - Error *xmlError `xml:"DAV: error"` - ResponseDescription string `xml:"DAV: responsedescription,omitempty"` + Prop []Property `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace +// before encoding. See multistatusWriter. +func (ps propstat) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + for k, prop := range ps.Prop { + if prop.XMLName.Space == "DAV:" { + prop.XMLName = xml.Name{Space: "", Local: "D:" + prop.XMLName.Local} + ps.Prop[k] = prop + } + } + // Distinct type to avoid infinite recursion of MarshalXML. + type newpropstat propstat + return e.EncodeElement(newpropstat(ps), start) } // http://www.webdav.org/specs/rfc4918.html#ELEMENT_response +// See multistatusWriter for the "D:" namespace prefix. type response struct { - XMLName xml.Name `xml:"DAV: response"` - Href []string `xml:"DAV: href"` - Propstat []propstat `xml:"DAV: propstat"` - Status string `xml:"DAV: status,omitempty"` - Error *xmlError `xml:"DAV: error"` - ResponseDescription string `xml:"DAV: responsedescription,omitempty"` + XMLName xml.Name `xml:"D:response"` + Href []string `xml:"D:href"` + Propstat []propstat `xml:"D:propstat"` + Status string `xml:"D:status,omitempty"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` } // MultistatusWriter marshals one or more Responses into a XML // multistatus response. // See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus +// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as +// "DAV:" on this element, is prepended on the nested response, as well as on all +// its nested elements. All property names in the DAV: namespace are prefixed as +// well. This is because some versions of Mini-Redirector (on windows 7) ignore +// elements with a default namespace (no prefixed namespace). A less intrusive fix +// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177 type multistatusWriter struct { // ResponseDescription contains the optional responsedescription // of the multistatus XML element. Only the latest content before @@ -291,7 +315,7 @@ Local: "multistatus", }, Attr: []xml.Attr{{ - Name: xml.Name{Local: "xmlns"}, + Name: xml.Name{Space: "xmlns", Local: "D"}, Value: "DAV:", }}, }) @@ -340,6 +364,35 @@ return d } +type xmlValue []byte + +func (v *xmlValue) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + // The XML value of a property can be arbitrary, mixed-content XML. + // To make sure that the unmarshalled value contains all required + // namespaces, we encode all the property value XML tokens into a + // buffer. This forces the encoder to redeclare any used namespaces. + var b bytes.Buffer + e := xml.NewEncoder(&b) + for { + t, err := next(d) + if err != nil { + return err + } + if e, ok := t.(xml.EndElement); ok && e.Name == start.Name { + break + } + if err = e.EncodeToken(t); err != nil { + return err + } + } + err := e.Flush() + if err != nil { + return err + } + *v = b.Bytes() + return nil +} + // UnmarshalXML appends the property names and values enclosed within start // to ps. // @@ -355,7 +408,7 @@ if err != nil { return err } - switch t.(type) { + switch elem := t.(type) { case xml.EndElement: if len(*ps) == 0 { return fmt.Errorf("%s must not be empty", start.Name.Local) @@ -366,29 +419,10 @@ XMLName: t.(xml.StartElement).Name, Lang: xmlLang(t.(xml.StartElement), lang), } - // The XML value of a property can be arbitrary, mixed-content XML. - // To make sure that the unmarshalled value contains all required - // namespaces, we encode all the property value XML tokens into a - // buffer. This forces the encoder to redeclare any used namespaces. - var b bytes.Buffer - e := xml.NewEncoder(&b) - for { - t, err = next(d) - if err != nil { - return err - } - if e, ok := t.(xml.EndElement); ok && e.Name == p.XMLName { - break - } - if err = e.EncodeToken(t); err != nil { - return err - } - } - err = e.Flush() + err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem) if err != nil { return err } - p.InnerXML = b.Bytes() *ps = append(*ps, p) } } === modified file 'src/golang.org/x/net/webdav/xml_test.go' --- src/golang.org/x/net/webdav/xml_test.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/webdav/xml_test.go 2016-03-22 15:18:22 +0000 @@ -6,13 +6,16 @@ import ( "bytes" - "encoding/xml" + "fmt" "io" "net/http" "net/http/httptest" "reflect" + "sort" "strings" "testing" + + "golang.org/x/net/webdav/internal/xml" ) func TestReadLockInfo(t *testing.T) { @@ -345,6 +348,10 @@ } func TestMultistatusWriter(t *testing.T) { + if go1Dot4 { + t.Skip("TestMultistatusWriter requires Go version 1.5 or greater") + } + ///The "section x.y.z" test cases come from section x.y.z of the spec at // http://www.webdav.org/specs/rfc4918.html testCases := []struct { @@ -450,20 +457,20 @@ respdesc: "There has been an access violation error.", wantXML: `` + `` + - `` + + `` + ` ` + ` http://example.com/foo` + ` ` + ` ` + - ` Box type A` + - ` J.J. Johnson` + + ` Box type A` + + ` J.J. Johnson` + ` ` + ` HTTP/1.1 200 OK` + ` ` + ` ` + ` ` + - ` ` + - ` ` + + ` ` + + ` ` + ` ` + ` HTTP/1.1 403 Forbidden` + ` The user does not have access to the DingALing property.` + @@ -558,6 +565,7 @@ wantCode: http.StatusOK, }} + n := xmlNormalizer{omitWhitespace: true} loop: for _, tc := range testCases { rec := httptest.NewRecorder() @@ -587,70 +595,46 @@ tc.desc, rec.Code, tc.wantCode) continue } - - // normalize returns the normalized XML content of s. In contrast to - // the WebDAV specification, it ignores whitespace within property - // values of mixed XML content. - normalize := func(s string) string { - d := xml.NewDecoder(strings.NewReader(s)) - var b bytes.Buffer - e := xml.NewEncoder(&b) - for { - tok, err := d.Token() - if err != nil { - if err == io.EOF { - break - } - t.Fatalf("%s: Token %v", tc.desc, err) - } - switch val := tok.(type) { - case xml.Comment, xml.Directive, xml.ProcInst: - continue - case xml.CharData: - if len(bytes.TrimSpace(val)) == 0 { - continue - } - } - if err := e.EncodeToken(tok); err != nil { - t.Fatalf("%s: EncodeToken: %v", tc.desc, err) - } - } - if err := e.Flush(); err != nil { - t.Fatalf("%s: Flush: %v", tc.desc, err) - } - return b.String() + gotXML := rec.Body.String() + eq, err := n.equalXML(strings.NewReader(gotXML), strings.NewReader(tc.wantXML)) + if err != nil { + t.Errorf("%s: equalXML: %v", tc.desc, err) + continue } - - gotXML := normalize(rec.Body.String()) - wantXML := normalize(tc.wantXML) - if gotXML != wantXML { - t.Errorf("%s: XML body\ngot %q\nwant %q", tc.desc, gotXML, wantXML) + if !eq { + t.Errorf("%s: XML body\ngot %s\nwant %s", tc.desc, gotXML, tc.wantXML) } } } func TestReadProppatch(t *testing.T) { - // TODO(rost): These "golden XML" tests easily break with changes in the - // xml package. A whitespace-preserving normalizer of XML content is - // required to make these tests more robust. + ppStr := func(pps []Proppatch) string { + var outer []string + for _, pp := range pps { + var inner []string + for _, p := range pp.Props { + inner = append(inner, fmt.Sprintf("{XMLName: %q, Lang: %q, InnerXML: %q}", + p.XMLName, p.Lang, p.InnerXML)) + } + outer = append(outer, fmt.Sprintf("{Remove: %t, Props: [%s]}", + pp.Remove, strings.Join(inner, ", "))) + } + return "[" + strings.Join(outer, ", ") + "]" + } + testCases := []struct { desc string input string wantPP []Proppatch wantStatus int }{{ - desc: "proppatch: section 9.2", + desc: "proppatch: section 9.2 (with simple property value)", input: `` + `` + `` + ` ` + - ` ` + - ` ` + - ` Jim Whitehead` + - ` Roy Fielding` + - ` ` + - ` ` + + ` somevalue` + ` ` + ` ` + ` ` + @@ -660,17 +644,7 @@ Props: []Property{{ xml.Name{Space: "http://ns.example.com/z/", Local: "Authors"}, "", - []byte(`` + - ` ` + - `` + - `Jim Whitehead` + - `` + - ` ` + - `` + - `Roy Fielding` + - `` + - ` `, - ), + []byte(`somevalue`), }}, }, { Remove: true, @@ -681,59 +655,6 @@ }}, }}, }, { - desc: "proppatch: section 4.3.1 (mixed content)", - input: `` + - `` + - `` + - ` ` + - ` ` + - ` ` + - ` Jane Doe` + - ` ` + - ` mailto:jane.doe@example.com` + - ` http://www.example.com` + - ` ` + - ` Jane has been working way too long on the` + - ` long-awaited revision of ]]>.` + - ` ` + - ` ` + - ` ` + - ` ` + - ``, - wantPP: []Proppatch{{ - Props: []Property{{ - xml.Name{Space: "http://example.com/ns", Local: "author"}, - "en", - []byte(`` + - ` ` + - `Jane Doe` + - ` ` + - `` + - `mailto:jane.doe@example.com` + - `` + - ` ` + - `` + - `http://www.example.com` + - `` + - ` ` + - - `` + - ` ` + - ` Jane has been working way` + - ` too` + - ` long on the` + ` ` + - ` long-awaited revision of <RFC2518>.` + - ` ` + - `` + - ` `, - ), - }}, - }}, - }, { desc: "proppatch: lang attribute on prop", input: `` + `` + @@ -802,7 +723,187 @@ continue } if !reflect.DeepEqual(pp, tc.wantPP) || status != tc.wantStatus { - t.Errorf("%s: proppatch\ngot %v\nwant %v", tc.desc, pp, tc.wantPP) - } - } + t.Errorf("%s: proppatch\ngot %v\nwant %v", tc.desc, ppStr(pp), ppStr(tc.wantPP)) + } + } +} + +func TestUnmarshalXMLValue(t *testing.T) { + testCases := []struct { + desc string + input string + wantVal string + }{{ + desc: "simple char data", + input: "foo", + wantVal: "foo", + }, { + desc: "empty element", + input: "", + wantVal: "", + }, { + desc: "preserve namespace", + input: ``, + wantVal: ``, + }, { + desc: "preserve root element namespace", + input: ``, + wantVal: ``, + }, { + desc: "preserve whitespace", + input: " \t ", + wantVal: " \t ", + }, { + desc: "preserve mixed content", + input: ` a `, + wantVal: ` a `, + }, { + desc: "section 9.2", + input: `` + + `` + + ` Jim Whitehead` + + ` Roy Fielding` + + ``, + wantVal: `` + + ` Jim Whitehead` + + ` Roy Fielding`, + }, { + desc: "section 4.3.1 (mixed content)", + input: `` + + `` + + ` Jane Doe` + + ` ` + + ` mailto:jane.doe@example.com` + + ` http://www.example.com` + + ` ` + + ` Jane has been working way too long on the` + + ` long-awaited revision of ]]>.` + + ` ` + + ``, + wantVal: `` + + ` Jane Doe` + + ` ` + + ` mailto:jane.doe@example.com` + + ` http://www.example.com` + + ` ` + + ` Jane has been working way too long on the` + + ` long-awaited revision of <RFC2518>.` + + ` `, + }} + + var n xmlNormalizer + for _, tc := range testCases { + d := xml.NewDecoder(strings.NewReader(tc.input)) + var v xmlValue + if err := d.Decode(&v); err != nil { + t.Errorf("%s: got error %v, want nil", tc.desc, err) + continue + } + eq, err := n.equalXML(bytes.NewReader(v), strings.NewReader(tc.wantVal)) + if err != nil { + t.Errorf("%s: equalXML: %v", tc.desc, err) + continue + } + if !eq { + t.Errorf("%s:\ngot %s\nwant %s", tc.desc, string(v), tc.wantVal) + } + } +} + +// xmlNormalizer normalizes XML. +type xmlNormalizer struct { + // omitWhitespace instructs to ignore whitespace between element tags. + omitWhitespace bool + // omitComments instructs to ignore XML comments. + omitComments bool +} + +// normalize writes the normalized XML content of r to w. It applies the +// following rules +// +// * Rename namespace prefixes according to an internal heuristic. +// * Remove unnecessary namespace declarations. +// * Sort attributes in XML start elements in lexical order of their +// fully qualified name. +// * Remove XML directives and processing instructions. +// * Remove CDATA between XML tags that only contains whitespace, if +// instructed to do so. +// * Remove comments, if instructed to do so. +// +func (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error { + d := xml.NewDecoder(r) + e := xml.NewEncoder(w) + for { + t, err := d.Token() + if err != nil { + if t == nil && err == io.EOF { + break + } + return err + } + switch val := t.(type) { + case xml.Directive, xml.ProcInst: + continue + case xml.Comment: + if n.omitComments { + continue + } + case xml.CharData: + if n.omitWhitespace && len(bytes.TrimSpace(val)) == 0 { + continue + } + case xml.StartElement: + start, _ := xml.CopyToken(val).(xml.StartElement) + attr := start.Attr[:0] + for _, a := range start.Attr { + if a.Name.Space == "xmlns" || a.Name.Local == "xmlns" { + continue + } + attr = append(attr, a) + } + sort.Sort(byName(attr)) + start.Attr = attr + t = start + } + err = e.EncodeToken(t) + if err != nil { + return err + } + } + return e.Flush() +} + +// equalXML tests for equality of the normalized XML contents of a and b. +func (n *xmlNormalizer) equalXML(a, b io.Reader) (bool, error) { + var buf bytes.Buffer + if err := n.normalize(&buf, a); err != nil { + return false, err + } + normA := buf.String() + buf.Reset() + if err := n.normalize(&buf, b); err != nil { + return false, err + } + normB := buf.String() + return normA == normB, nil +} + +type byName []xml.Attr + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { + if a[i].Name.Space != a[j].Name.Space { + return a[i].Name.Space < a[j].Name.Space + } + return a[i].Name.Local < a[j].Name.Local } === modified file 'src/golang.org/x/net/websocket/hybi.go' --- src/golang.org/x/net/websocket/hybi.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/websocket/hybi.go 2016-03-22 15:18:22 +0000 @@ -157,6 +157,9 @@ if err != nil { return } + if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits + b &= 0x7f + } header = append(header, b) hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b) } @@ -264,7 +267,7 @@ payloadType byte } -func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (r frameReader, err error) { +func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) { if handler.conn.IsServerConn() { // The client MUST mask all frames sent to the server. if frame.(*hybiFrameReader).header.MaskingKey == nil { @@ -288,20 +291,19 @@ handler.payloadType = frame.PayloadType() case CloseFrame: return nil, io.EOF - case PingFrame: - pingMsg := make([]byte, maxControlFramePayloadLength) - n, err := io.ReadFull(frame, pingMsg) - if err != nil && err != io.ErrUnexpectedEOF { + case PingFrame, PongFrame: + b := make([]byte, maxControlFramePayloadLength) + n, err := io.ReadFull(frame, b) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { return nil, err } io.Copy(ioutil.Discard, frame) - n, err = handler.WritePong(pingMsg[:n]) - if err != nil { - return nil, err + if frame.PayloadType() == PingFrame { + if _, err := handler.WritePong(b[:n]); err != nil { + return nil, err + } } return nil, nil - case PongFrame: - return nil, ErrNotImplemented } return frame, nil } @@ -370,6 +372,23 @@ return } +// removeZone removes IPv6 zone identifer from host. +// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080" +func removeZone(host string) string { + if !strings.HasPrefix(host, "[") { + return host + } + i := strings.LastIndex(host, "]") + if i < 0 { + return host + } + j := strings.LastIndex(host[:i], "%") + if j < 0 { + return host + } + return host[:j] + host[i:] +} + // getNonceAccept computes the base64-encoded SHA-1 of the concatenation of // the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string. func getNonceAccept(nonce []byte) (expected []byte, err error) { @@ -389,7 +408,10 @@ func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) { bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n") - bw.WriteString("Host: " + config.Location.Host + "\r\n") + // According to RFC 6874, an HTTP client, proxy, or other + // intermediary must remove any IPv6 zone identifier attached + // to an outgoing URI. + bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n") bw.WriteString("Upgrade: websocket\r\n") bw.WriteString("Connection: Upgrade\r\n") nonce := generateNonce() @@ -515,15 +537,15 @@ return http.StatusSwitchingProtocols, nil } -// Origin parses Origin header in "req". -// If origin is "null", returns (nil, nil). +// Origin parses the Origin header in req. +// If the Origin header is not set, it returns nil and nil. func Origin(config *Config, req *http.Request) (*url.URL, error) { var origin string switch config.Version { case ProtocolVersionHybi13: origin = req.Header.Get("Origin") } - if origin == "null" { + if origin == "" { return nil, nil } return url.ParseRequestURI(origin) === modified file 'src/golang.org/x/net/websocket/hybi_test.go' --- src/golang.org/x/net/websocket/hybi_test.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/websocket/hybi_test.go 2016-03-22 15:18:22 +0000 @@ -31,63 +31,74 @@ } func TestHybiClientHandshake(t *testing.T) { - b := bytes.NewBuffer([]byte{}) - bw := bufio.NewWriter(b) - br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols + type test struct { + url, host string + } + tests := []test{ + {"ws://server.example.com/chat", "server.example.com"}, + {"ws://127.0.0.1/chat", "127.0.0.1"}, + } + if _, err := url.ParseRequestURI("http://[fe80::1%25lo0]"); err == nil { + tests = append(tests, test{"ws://[fe80::1%25lo0]/chat", "[fe80::1]"}) + } + + for _, tt := range tests { + var b bytes.Buffer + bw := bufio.NewWriter(&b) + br := bufio.NewReader(strings.NewReader(`HTTP/1.1 101 Switching Protocols Upgrade: websocket Connection: Upgrade Sec-WebSocket-Accept: s3pPLMBiTxaQ9kYGzzhZRbK+xOo= Sec-WebSocket-Protocol: chat `)) - var err error - config := new(Config) - config.Location, err = url.ParseRequestURI("ws://server.example.com/chat") - if err != nil { - t.Fatal("location url", err) - } - config.Origin, err = url.ParseRequestURI("http://example.com") - if err != nil { - t.Fatal("origin url", err) - } - config.Protocol = append(config.Protocol, "chat") - config.Protocol = append(config.Protocol, "superchat") - config.Version = ProtocolVersionHybi13 - - config.handshakeData = map[string]string{ - "key": "dGhlIHNhbXBsZSBub25jZQ==", - } - err = hybiClientHandshake(config, br, bw) - if err != nil { - t.Errorf("handshake failed: %v", err) - } - req, err := http.ReadRequest(bufio.NewReader(b)) - if err != nil { - t.Fatalf("read request: %v", err) - } - if req.Method != "GET" { - t.Errorf("request method expected GET, but got %q", req.Method) - } - if req.URL.Path != "/chat" { - t.Errorf("request path expected /chat, but got %q", req.URL.Path) - } - if req.Proto != "HTTP/1.1" { - t.Errorf("request proto expected HTTP/1.1, but got %q", req.Proto) - } - if req.Host != "server.example.com" { - t.Errorf("request Host expected server.example.com, but got %v", req.Host) - } - var expectedHeader = map[string]string{ - "Connection": "Upgrade", - "Upgrade": "websocket", - "Sec-Websocket-Key": config.handshakeData["key"], - "Origin": config.Origin.String(), - "Sec-Websocket-Protocol": "chat, superchat", - "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13), - } - for k, v := range expectedHeader { - if req.Header.Get(k) != v { - t.Errorf(fmt.Sprintf("%s expected %q but got %q", k, v, req.Header.Get(k))) + var err error + var config Config + config.Location, err = url.ParseRequestURI(tt.url) + if err != nil { + t.Fatal("location url", err) + } + config.Origin, err = url.ParseRequestURI("http://example.com") + if err != nil { + t.Fatal("origin url", err) + } + config.Protocol = append(config.Protocol, "chat") + config.Protocol = append(config.Protocol, "superchat") + config.Version = ProtocolVersionHybi13 + config.handshakeData = map[string]string{ + "key": "dGhlIHNhbXBsZSBub25jZQ==", + } + if err := hybiClientHandshake(&config, br, bw); err != nil { + t.Fatal("handshake", err) + } + req, err := http.ReadRequest(bufio.NewReader(&b)) + if err != nil { + t.Fatal("read request", err) + } + if req.Method != "GET" { + t.Errorf("request method expected GET, but got %s", req.Method) + } + if req.URL.Path != "/chat" { + t.Errorf("request path expected /chat, but got %s", req.URL.Path) + } + if req.Proto != "HTTP/1.1" { + t.Errorf("request proto expected HTTP/1.1, but got %s", req.Proto) + } + if req.Host != tt.host { + t.Errorf("request host expected %s, but got %s", tt.host, req.Host) + } + var expectedHeader = map[string]string{ + "Connection": "Upgrade", + "Upgrade": "websocket", + "Sec-Websocket-Key": config.handshakeData["key"], + "Origin": config.Origin.String(), + "Sec-Websocket-Protocol": "chat, superchat", + "Sec-Websocket-Version": fmt.Sprintf("%d", ProtocolVersionHybi13), + } + for k, v := range expectedHeader { + if req.Header.Get(k) != v { + t.Errorf("%s expected %s, but got %v", k, v, req.Header.Get(k)) + } } } } @@ -326,7 +337,7 @@ } payload := make([]byte, len(testPayload)) _, err = r.Read(payload) - if err != nil { + if err != nil && err != io.EOF { t.Errorf("read %v", err) } if !bytes.Equal(testPayload, payload) { @@ -363,13 +374,20 @@ } func TestHybiControlFrame(t *testing.T) { + payload := []byte("hello") + frameHeader := &hybiFrameHeader{Fin: true, OpCode: PingFrame} - payload := []byte("hello") testHybiFrame(t, []byte{0x89, 0x05}, payload, payload, frameHeader) + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PingFrame} + testHybiFrame(t, []byte{0x89, 0x00}, nil, nil, frameHeader) + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame} testHybiFrame(t, []byte{0x8A, 0x05}, payload, payload, frameHeader) + frameHeader = &hybiFrameHeader{Fin: true, OpCode: PongFrame} + testHybiFrame(t, []byte{0x8A, 0x00}, nil, nil, frameHeader) + frameHeader = &hybiFrameHeader{Fin: true, OpCode: CloseFrame} payload = []byte{0x03, 0xe8} // 1000 testHybiFrame(t, []byte{0x88, 0x02}, payload, payload, frameHeader) === modified file 'src/golang.org/x/net/websocket/server.go' --- src/golang.org/x/net/websocket/server.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/websocket/server.go 2016-03-22 15:18:22 +0000 @@ -74,7 +74,6 @@ rwc, buf, err := w.(http.Hijacker).Hijack() if err != nil { panic("Hijack failed: " + err.Error()) - return } // The server should abort the WebSocket connection if it finds // the client did not send a handshake that matches with protocol @@ -95,8 +94,8 @@ // You might want to verify websocket.Conn.Config().Origin in the func. // If you use Server instead of Handler, you could call websocket.Origin and // check the origin in your Handshake func. So, if you want to accept -// non-browser client, which doesn't send Origin header, you could use Server -//. that doesn't check origin in its Handshake. +// non-browser clients, which do not send an Origin header, set a +// Server.Handshake that does not check the origin. type Handler func(*Conn) func checkOrigin(config *Config, req *http.Request) (err error) { === modified file 'src/golang.org/x/net/websocket/websocket_test.go' --- src/golang.org/x/net/websocket/websocket_test.go 2015-06-05 17:40:37 +0000 +++ src/golang.org/x/net/websocket/websocket_test.go 2016-03-22 15:18:22 +0000 @@ -13,6 +13,8 @@ "net/http" "net/http/httptest" "net/url" + "reflect" + "runtime" "strings" "sync" "testing" @@ -22,7 +24,10 @@ var serverAddr string var once sync.Once -func echoServer(ws *Conn) { io.Copy(ws, ws) } +func echoServer(ws *Conn) { + defer ws.Close() + io.Copy(ws, ws) +} type Count struct { S string @@ -30,6 +35,7 @@ } func countServer(ws *Conn) { + defer ws.Close() for { var count Count err := JSON.Receive(ws, &count) @@ -45,6 +51,55 @@ } } +type testCtrlAndDataHandler struct { + hybiFrameHandler +} + +func (h *testCtrlAndDataHandler) WritePing(b []byte) (int, error) { + h.hybiFrameHandler.conn.wio.Lock() + defer h.hybiFrameHandler.conn.wio.Unlock() + w, err := h.hybiFrameHandler.conn.frameWriterFactory.NewFrameWriter(PingFrame) + if err != nil { + return 0, err + } + n, err := w.Write(b) + w.Close() + return n, err +} + +func ctrlAndDataServer(ws *Conn) { + defer ws.Close() + h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}} + ws.frameHandler = h + + go func() { + for i := 0; ; i++ { + var b []byte + if i%2 != 0 { // with or without payload + b = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-SERVER", i)) + } + if _, err := h.WritePing(b); err != nil { + break + } + if _, err := h.WritePong(b); err != nil { // unsolicited pong + break + } + time.Sleep(10 * time.Millisecond) + } + }() + + b := make([]byte, 128) + for { + n, err := ws.Read(b) + if err != nil { + break + } + if _, err := ws.Write(b[:n]); err != nil { + break + } + } +} + func subProtocolHandshake(config *Config, req *http.Request) error { for _, proto := range config.Protocol { if proto == "chat" { @@ -64,6 +119,7 @@ func startServer() { http.Handle("/echo", Handler(echoServer)) http.Handle("/count", Handler(countServer)) + http.Handle("/ctrldata", Handler(ctrlAndDataServer)) subproto := Server{ Handshake: subProtocolHandshake, Handler: Handler(subProtoServer), @@ -425,6 +481,10 @@ } func TestClose(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skip("see golang.org/issue/11454") + } + once.Do(startServer) conn, err := net.Dial("tcp", serverAddr) @@ -450,3 +510,78 @@ t.Fatalf("ws.Close(): expected underlying ws.rwc.Close to be called > 0 times, got: %v", cc.closed) } } + +var originTests = []struct { + req *http.Request + origin *url.URL +}{ + { + req: &http.Request{ + Header: http.Header{ + "Origin": []string{"http://www.example.com"}, + }, + }, + origin: &url.URL{ + Scheme: "http", + Host: "www.example.com", + }, + }, + { + req: &http.Request{}, + }, +} + +func TestOrigin(t *testing.T) { + conf := newConfig(t, "/echo") + conf.Version = ProtocolVersionHybi13 + for i, tt := range originTests { + origin, err := Origin(conf, tt.req) + if err != nil { + t.Error(err) + continue + } + if !reflect.DeepEqual(origin, tt.origin) { + t.Errorf("#%d: got origin %v; want %v", i, origin, tt.origin) + continue + } + } +} + +func TestCtrlAndData(t *testing.T) { + once.Do(startServer) + + c, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatal(err) + } + ws, err := NewClient(newConfig(t, "/ctrldata"), c) + if err != nil { + t.Fatal(err) + } + defer ws.Close() + + h := &testCtrlAndDataHandler{hybiFrameHandler: hybiFrameHandler{conn: ws}} + ws.frameHandler = h + + b := make([]byte, 128) + for i := 0; i < 2; i++ { + data := []byte(fmt.Sprintf("#%d-DATA-FRAME-FROM-CLIENT", i)) + if _, err := ws.Write(data); err != nil { + t.Fatalf("#%d: %v", i, err) + } + var ctrl []byte + if i%2 != 0 { // with or without payload + ctrl = []byte(fmt.Sprintf("#%d-CONTROL-FRAME-FROM-CLIENT", i)) + } + if _, err := h.WritePing(ctrl); err != nil { + t.Fatalf("#%d: %v", i, err) + } + n, err := ws.Read(b) + if err != nil { + t.Fatalf("#%d: %v", i, err) + } + if !bytes.Equal(b[:n], data) { + t.Fatalf("#%d: got %v; want %v", i, b[:n], data) + } + } +} === modified file 'src/gopkg.in/errgo.v1/errors.go' --- src/gopkg.in/errgo.v1/errors.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/errgo.v1/errors.go 2016-03-22 15:18:22 +0000 @@ -233,6 +233,15 @@ // the result if allowed by the specific pass functions // (see Mask for an explanation of the pass parameter). func NoteMask(underlying error, msg string, pass ...func(error) bool) error { + err := noteMask(underlying, msg, pass...) + setLocation(err, 1) + return err +} + +// noteMask is exactly like NoteMask except it doesn't set the location +// of the returned error, so that we can avoid setting it twice +// when it's used in other functions. +func noteMask(underlying error, msg string, pass ...func(error) bool) error { newErr := &Err{ Underlying_: underlying, Message_: msg, @@ -251,6 +260,7 @@ log.Printf("new error %#v", newErr) } } + newErr.SetLocation(1) return newErr } @@ -281,7 +291,7 @@ if underlying == nil { return nil } - err := NoteMask(underlying, "", pass...) + err := noteMask(underlying, "", pass...) setLocation(err, 1) return err } @@ -291,7 +301,7 @@ // The returned error has no cause (use NoteMask // or WithCausef to add a message while retaining a cause). func Notef(underlying error, f string, a ...interface{}) error { - err := NoteMask(underlying, fmt.Sprintf(f, a...)) + err := noteMask(underlying, fmt.Sprintf(f, a...)) setLocation(err, 1) return err } === modified file 'src/gopkg.in/errgo.v1/errors_test.go' --- src/gopkg.in/errgo.v1/errors_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/errgo.v1/errors_test.go 2016-03-22 15:18:22 +0000 @@ -82,6 +82,18 @@ checkErr(c, err, nil, "bar", "[{$TestNotef#2$: bar}]", err) } +func (*errorsSuite) TestNoteMask(c *gc.C) { + err0 := errgo.WithCausef(nil, someErr, "foo") //err TestNoteMask#0 + err := errgo.NoteMask(err0, "bar") //err TestNoteMask#1 + checkErr(c, err, err0, "bar: foo", "[{$TestNoteMask#1$: bar} {$TestNoteMask#0$: foo}]", err) + + err = errgo.NoteMask(err0, "bar", errgo.Is(someErr)) //err TestNoteMask#2 + checkErr(c, err, err0, "bar: foo", "[{$TestNoteMask#2$: bar} {$TestNoteMask#0$: foo}]", someErr) + + err = errgo.NoteMask(err0, "") //err TestNoteMask#3 + checkErr(c, err, err0, "foo", "[{$TestNoteMask#3$: } {$TestNoteMask#0$: foo}]", err) +} + func (*errorsSuite) TestMaskFunc(c *gc.C) { err0 := errgo.New("zero") err1 := errgo.New("one") === modified file 'src/gopkg.in/goose.v1/nova/live_test.go' --- src/gopkg.in/goose.v1/nova/live_test.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/goose.v1/nova/live_test.go 2016-03-22 15:18:22 +0000 @@ -286,6 +286,35 @@ } } +func (s *LiveTests) TestUpdateSecurityGroup(c *gc.C) { + group, err := s.nova.CreateSecurityGroup("test_secgroup", "test_desc") + c.Assert(err, gc.IsNil) + c.Check(group.Name, gc.Equals, "test_secgroup") + c.Check(group.Description, gc.Equals, "test_desc") + + groupUpdated, err := s.nova.UpdateSecurityGroup(group.Id, "test_secgroup_new", "test_desc_new") + c.Assert(err, gc.IsNil) + c.Check(groupUpdated.Name, gc.Equals, "test_secgroup_new") + c.Check(groupUpdated.Description, gc.Equals, "test_desc_new") + + groups, err := s.nova.ListSecurityGroups() + found := false + for _, g := range groups { + if g.Id == group.Id { + found = true + c.Assert(g.Name, gc.Equals, "test_secgroup_new") + c.Assert(g.Description, gc.Equals, "test_desc_new") + break + } + } + if found { + err = s.nova.DeleteSecurityGroup(group.Id) + c.Check(err, gc.IsNil) + } else { + c.Fatalf("test security group (%d) not found", group.Id) + } +} + func (s *LiveTests) TestDuplicateSecurityGroupError(c *gc.C) { group, err := s.nova.CreateSecurityGroup("test_dupgroup", "test_desc") c.Assert(err, gc.IsNil) @@ -538,6 +567,26 @@ c.Assert(err, gc.ErrorMatches, "(.|\n)*The requested availability zone is not available(.|\n)*") } +func (s *LiveTests) TestUpdateServerName(c *gc.C) { + entity, err := s.nova.RunServer(nova.RunServerOpts{ + Name: "oldName", + FlavorId: s.testFlavorId, + ImageId: s.testImageId, + AvailabilityZone: s.testAvailabilityZone, + Metadata: map[string]string{}, + }) + c.Assert(err, gc.IsNil) + defer s.nova.DeleteServer(entity.Id) + + newEntity, err := s.nova.UpdateServerName(entity.Id, "newName") + c.Assert(err, gc.IsNil) + c.Assert(newEntity.Name, gc.Equals, "newName") + + server, err := s.nova.GetServer(entity.Id) + c.Assert(err, gc.IsNil) + c.Assert(server.Name, gc.Equals, "newName") +} + func (s *LiveTests) TestInstanceMetadata(c *gc.C) { metadata := map[string]string{"my-key": "my-value"} entity, err := s.nova.RunServer(nova.RunServerOpts{ === modified file 'src/gopkg.in/goose.v1/nova/nova.go' --- src/gopkg.in/goose.v1/nova/nova.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/goose.v1/nova/nova.go 2016-03-22 15:18:22 +0000 @@ -342,6 +342,28 @@ return &resp.Server, nil } +type serverUpdateNameOpts struct { + Name string `json:"name"` +} + +// UpdateServerName updates the name of the given server. +func (c *Client) UpdateServerName(serverID, name string) (*Entity, error) { + var req struct { + Server serverUpdateNameOpts `json:"server"` + } + var resp struct { + Server Entity `json:"server"` + } + req.Server = serverUpdateNameOpts{Name: name} + requestData := goosehttp.RequestData{ReqValue: req, RespValue: &resp, ExpectedStatus: []int{http.StatusOK}} + url := fmt.Sprintf("%s/%s", apiServers, serverID) + err := c.client.SendRequest(client.PUT, "compute", url, &requestData) + if err != nil { + return nil, errors.Newf(err, "failed to update server name to %q", name) + } + return &resp.Server, nil +} + // SecurityGroupRef refers to an existing named security group type SecurityGroupRef struct { TenantId string `json:"tenant_id"` @@ -462,6 +484,28 @@ return err } +// UpdateSecurityGroup updates the name and description of the given group. +func (c *Client) UpdateSecurityGroup(groupId, name, description string) (*SecurityGroup, error) { + var req struct { + SecurityGroup struct { + Name string `json:"name"` + Description string `json:"description"` + } `json:"security_group"` + } + req.SecurityGroup.Name = name + req.SecurityGroup.Description = description + var resp struct { + SecurityGroup SecurityGroup `json:"security_group"` + } + url := fmt.Sprintf("%s/%s", apiSecurityGroups, groupId) + requestData := goosehttp.RequestData{ReqValue: req, RespValue: &resp, ExpectedStatus: []int{http.StatusOK}} + err := c.client.SendRequest(client.PUT, "compute", url, &requestData) + if err != nil { + return nil, errors.Newf(err, "failed to update security group with Id %s to name: %s", groupId, name) + } + return &resp.SecurityGroup, nil +} + // RuleInfo allows the callers of CreateSecurityGroupRule() to // create 2 types of security group rules: ingress rules and group // rules. The difference stems from how the "source" is defined. === modified file 'src/gopkg.in/goose.v1/testservices/novaservice/service.go' --- src/gopkg.in/goose.v1/testservices/novaservice/service.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/goose.v1/testservices/novaservice/service.go 2016-03-22 15:18:22 +0000 @@ -260,6 +260,20 @@ return nil } +// updateServerName creates a new server. +func (n *Nova) updateServerName(serverId, name string) error { + if err := n.ProcessFunctionHook(n, serverId); err != nil { + return err + } + server, err := n.server(serverId) + if err != nil { + return testservices.NewServerByIDNotFoundError(serverId) + } + server.Name = name + n.servers[serverId] = *server + return nil +} + // server retrieves an existing server by ID. func (n *Nova) server(serverId string) (*nova.ServerDetail, error) { if err := n.ProcessFunctionHook(n, serverId); err != nil { @@ -406,6 +420,20 @@ return nil } +func (n *Nova) updateSecurityGroup(group nova.SecurityGroup) error { + if err := n.ProcessFunctionHook(n, group); err != nil { + return err + } + existingGroup, err := n.securityGroup(group.Id) + if err != nil { + return testservices.NewSecurityGroupByIDNotFoundError(group.Id) + } + existingGroup.Name = group.Name + existingGroup.Description = group.Description + n.groups[group.Id] = *existingGroup + return nil +} + // addSecurityGroup creates a new security group. func (n *Nova) addSecurityGroup(group nova.SecurityGroup) error { if err := n.ProcessFunctionHook(n, group); err != nil { === modified file 'src/gopkg.in/goose.v1/testservices/novaservice/service_http.go' --- src/gopkg.in/goose.v1/testservices/novaservice/service_http.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/goose.v1/testservices/novaservice/service_http.go 2016-03-22 15:18:22 +0000 @@ -765,10 +765,39 @@ } return n.handleRunServer(body, w, r) case "PUT": - if serverId := path.Base(r.URL.Path); serverId != "servers" { + serverId := path.Base(r.URL.Path) + if serverId == "servers" { + return errNotFound + } + + var req struct { + Server struct { + Name string `json:"name"` + } `json:"server"` + } + + body, err := ioutil.ReadAll(r.Body) + if err != nil || len(body) == 0 { return errBadRequest2 } - return errNotFound + if err := json.Unmarshal(body, &req); err != nil { + return err + } + + err = n.updateServerName(serverId, req.Server.Name) + if err != nil { + return err + } + + server, err := n.server(serverId) + if err != nil { + return err + } + var resp struct { + Server nova.ServerDetail `json:"server"` + } + resp.Server = *server + return sendJSON(http.StatusOK, resp, w, r) case "DELETE": if serverId := path.Base(r.URL.Path); serverId != "servers" { if _, err := n.server(serverId); err != nil { @@ -905,10 +934,47 @@ return sendJSON(http.StatusOK, resp, w, r) } case "PUT": - if groupId := path.Base(r.URL.Path); groupId != "os-security-groups" { - return errNotFoundJSON - } - return errNotFound + if groupId := path.Base(r.URL.Path); groupId == "os-security-groups" { + return errNotFound + } + group, err := n.processGroupId(w, r) + if err != nil { + return err + } + + var req struct { + Group struct { + Name string + Description string + } `json:"security_group"` + } + body, err := ioutil.ReadAll(r.Body) + if err != nil || len(body) == 0 { + return errBadRequest2 + } + if err := json.Unmarshal(body, &req); err != nil { + return err + } + + err = n.updateSecurityGroup(nova.SecurityGroup{ + Id: group.Id, + Name: req.Group.Name, + Description: req.Group.Description, + TenantId: group.TenantId, + }) + if err != nil { + return err + } + group, err = n.securityGroup(group.Id) + if err != nil { + return err + } + var resp struct { + Group nova.SecurityGroup `json:"security_group"` + } + resp.Group = *group + return sendJSON(http.StatusOK, resp, w, r) + case "DELETE": if group, err := n.processGroupId(w, r); group != nil { if err := n.removeSecurityGroup(group.Id); err != nil { === modified file 'src/gopkg.in/goose.v1/testservices/novaservice/service_http_test.go' --- src/gopkg.in/goose.v1/testservices/novaservice/service_http_test.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/goose.v1/testservices/novaservice/service_http_test.go 2016-03-22 15:18:22 +0000 @@ -354,7 +354,7 @@ { method: "PUT", url: "/os-security-groups/invalid", - expect: errNotFoundJSON, + expect: errNotFoundJSONSG, }, { method: "DELETE", === added directory 'src/gopkg.in/inconshreveable' === added directory 'src/gopkg.in/inconshreveable/log15.v2' === added file 'src/gopkg.in/inconshreveable/log15.v2/.travis.yml' --- src/gopkg.in/inconshreveable/log15.v2/.travis.yml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/.travis.yml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - 1.3 + - release + - tip === added file 'src/gopkg.in/inconshreveable/log15.v2/CONTRIBUTORS' --- src/gopkg.in/inconshreveable/log15.v2/CONTRIBUTORS 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/CONTRIBUTORS 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +Contributors to log15: + +- Aaron L +- Alan Shreve +- Chris Hines +- Ciaran Downey +- Dmitry Chestnykh +- Evan Shaw +- Péter Szilágyi +- Trevor Gattis +- Vincent Vanackere === added file 'src/gopkg.in/inconshreveable/log15.v2/LICENSE' --- src/gopkg.in/inconshreveable/log15.v2/LICENSE 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,13 @@ +Copyright 2014 Alan Shreve + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. === added file 'src/gopkg.in/inconshreveable/log15.v2/README.md' --- src/gopkg.in/inconshreveable/log15.v2/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,60 @@ +![obligatory xkcd](http://imgs.xkcd.com/comics/standards.png) + +# log15 [![godoc reference](https://godoc.org/gopkg.in/inconshreveable/log15.v2?status.png)](https://godoc.org/gopkg.in/inconshreveable/log15.v2) + +Package log15 provides an opinionated, simple toolkit for best-practice logging in Go (golang) that is both human and machine readable. It is modeled after the Go standard library's [`io`](http://golang.org/pkg/io/) and [`net/http`](http://golang.org/pkg/net/http/) packages and is an alternative to the standard library's [`log`](http://golang.org/pkg/log/) package. + +## Features +- A simple, easy-to-understand API +- Promotes structured logging by encouraging use of key/value pairs +- Child loggers which inherit and add their own private context +- Lazy evaluation of expensive operations +- Simple Handler interface allowing for construction of flexible, custom logging configurations with a tiny API. +- Color terminal support +- Built-in support for logging to files, streams, syslog, and the network +- Support for forking records to multiple handlers, buffering records for output, failing over from failed handler writes, + more + +## Versioning +The API of the master branch of log15 should always be considered unstable. Using a stable version +of the log15 package is supported by gopkg.in. Include your dependency like so: + +```go +import log "gopkg.in/inconshreveable/log15.v2" +``` + +## Examples + +```go +// all loggers can have key/value context +srvlog := log.New("module", "app/server") + +// all log messages can have key/value context +srvlog.Warn("abnormal conn rate", "rate", curRate, "low", lowRate, "high", highRate) + +// child loggers with inherited context +connlog := srvlog.New("raddr", c.RemoteAddr()) +connlog.Info("connection open") + +// lazy evaluation +connlog.Debug("ping remote", "latency", log.Lazy(pingRemote)) + +// flexible configuration +srvlog.SetHandler(log.MultiHandler( + log.StreamHandler(os.Stderr, log.LogfmtFormat()), + log.LvlFilterHandler( + log.LvlError, + log.Must.FileHandler("errors.json", log.JsonHandler()))) +``` + +## FAQ + +### The varargs style is brittle and error prone! Can I have type safety please? +Yes. Use `log.Ctx`: + +```go +srvlog := log.New(log.Ctx{"module": "app/server"}) +srvlog.Warn("abnormal conn rate", log.Ctx{"rate": curRate, "low": lowRate, "high": highRate}) +``` + +## License +Apache === added file 'src/gopkg.in/inconshreveable/log15.v2/RELEASING.md' --- src/gopkg.in/inconshreveable/log15.v2/RELEASING.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/RELEASING.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +# log15's release strategy + +log15 uses gopkg.in to manage versioning releases so that consumers who don't vendor dependencies can rely upon a stable API. + +## Master + +Master is considered to have no API stability guarantee, so merging new code that passes tests into master is always okay. + +## Releasing a new API-compatible version + +The process to release a new API-compatible version is described below. For the purposes of this example, we'll assume you're trying to release a new version of v2 + +1. `git checkout v2` +1. `git merge master` +1. Audit the code for any imports of sub-packages. Modify any import references from `github.com/inconshrevealbe/log15/` -> `gopkg.in/inconshreveable/log15.v2/` +1. `git commit` +1. `git tag`, find the latest tag of the style v2.X. +1. `git tag v2.X+1` If the last version was v2.6, you would run `git tag v2.7` +1. `git push --tags git@github.com:inconshreveable/log15.git v2` === added file 'src/gopkg.in/inconshreveable/log15.v2/bench_test.go' --- src/gopkg.in/inconshreveable/log15.v2/bench_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/bench_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,129 @@ +package log15 + +import ( + "bytes" + "testing" + "time" +) + +func BenchmarkStreamNoCtx(b *testing.B) { + lg := New() + + buf := bytes.Buffer{} + lg.SetHandler(StreamHandler(&buf, LogfmtFormat())) + + for i := 0; i < b.N; i++ { + lg.Info("test message") + buf.Reset() + } +} + +func BenchmarkDiscard(b *testing.B) { + lg := New() + lg.SetHandler(DiscardHandler()) + + for i := 0; i < b.N; i++ { + lg.Info("test message") + } +} + +func BenchmarkCallerFileHandler(b *testing.B) { + lg := New() + lg.SetHandler(CallerFileHandler(DiscardHandler())) + + for i := 0; i < b.N; i++ { + lg.Info("test message") + } +} + +func BenchmarkCallerFuncHandler(b *testing.B) { + lg := New() + lg.SetHandler(CallerFuncHandler(DiscardHandler())) + + for i := 0; i < b.N; i++ { + lg.Info("test message") + } +} + +func BenchmarkLogfmtNoCtx(b *testing.B) { + r := Record{ + Time: time.Now(), + Lvl: LvlInfo, + Msg: "test message", + Ctx: []interface{}{}, + } + + logfmt := LogfmtFormat() + for i := 0; i < b.N; i++ { + logfmt.Format(&r) + } +} + +func BenchmarkJsonNoCtx(b *testing.B) { + r := Record{ + Time: time.Now(), + Lvl: LvlInfo, + Msg: "test message", + Ctx: []interface{}{}, + } + + jsonfmt := JsonFormat() + for i := 0; i < b.N; i++ { + jsonfmt.Format(&r) + } +} + +func BenchmarkMultiLevelFilter(b *testing.B) { + handler := MultiHandler( + LvlFilterHandler(LvlDebug, DiscardHandler()), + LvlFilterHandler(LvlError, DiscardHandler()), + ) + + lg := New() + lg.SetHandler(handler) + for i := 0; i < b.N; i++ { + lg.Info("test message") + } +} + +func BenchmarkDescendant1(b *testing.B) { + lg := New() + lg.SetHandler(DiscardHandler()) + lg = lg.New() + for i := 0; i < b.N; i++ { + lg.Info("test message") + } +} + +func BenchmarkDescendant2(b *testing.B) { + lg := New() + lg.SetHandler(DiscardHandler()) + for i := 0; i < 2; i++ { + lg = lg.New() + } + for i := 0; i < b.N; i++ { + lg.Info("test message") + } +} + +func BenchmarkDescendant4(b *testing.B) { + lg := New() + lg.SetHandler(DiscardHandler()) + for i := 0; i < 4; i++ { + lg = lg.New() + } + for i := 0; i < b.N; i++ { + lg.Info("test message") + } +} + +func BenchmarkDescendant8(b *testing.B) { + lg := New() + lg.SetHandler(DiscardHandler()) + for i := 0; i < 8; i++ { + lg = lg.New() + } + for i := 0; i < b.N; i++ { + lg.Info("test message") + } +} === added file 'src/gopkg.in/inconshreveable/log15.v2/doc.go' --- src/gopkg.in/inconshreveable/log15.v2/doc.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/doc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,333 @@ +/* +Package log15 provides an opinionated, simple toolkit for best-practice logging that is +both human and machine readable. It is modeled after the standard library's io and net/http +packages. + +This package enforces you to only log key/value pairs. Keys must be strings. Values may be +any type that you like. The default output format is logfmt, but you may also choose to use +JSON instead if that suits you. Here's how you log: + + log.Info("page accessed", "path", r.URL.Path, "user_id", user.id) + +This will output a line that looks like: + + lvl=info t=2014-05-02T16:07:23-0700 msg="page accessed" path=/org/71/profile user_id=9 + +Getting Started + +To get started, you'll want to import the library: + + import log "gopkg.in/inconshreveable/log15.v2" + + +Now you're ready to start logging: + + func main() { + log.Info("Program starting", "args", os.Args()) + } + + +Convention + +Because recording a human-meaningful message is common and good practice, the first argument to every +logging method is the value to the *implicit* key 'msg'. + +Additionally, the level you choose for a message will be automatically added with the key 'lvl', and so +will the current timestamp with key 't'. + +You may supply any additional context as a set of key/value pairs to the logging function. log15 allows +you to favor terseness, ordering, and speed over safety. This is a reasonable tradeoff for +logging functions. You don't need to explicitly state keys/values, log15 understands that they alternate +in the variadic argument list: + + log.Warn("size out of bounds", "low", lowBound, "high", highBound, "val", val) + +If you really do favor your type-safety, you may choose to pass a log.Ctx instead: + + log.Warn("size out of bounds", log.Ctx{"low": lowBound, "high": highBound, "val": val}) + + +Context loggers + +Frequently, you want to add context to a logger so that you can track actions associated with it. An http +request is a good example. You can easily create new loggers that have context that is automatically included +with each log line: + + requestlogger := log.New("path", r.URL.Path) + + // later + requestlogger.Debug("db txn commit", "duration", txnTimer.Finish()) + +This will output a log line that includes the path context that is attached to the logger: + + lvl=dbug t=2014-05-02T16:07:23-0700 path=/repo/12/add_hook msg="db txn commit" duration=0.12 + + +Handlers + +The Handler interface defines where log lines are printed to and how they are formated. Handler is a +single interface that is inspired by net/http's handler interface: + + type Handler interface { + Log(r *Record) + } + + +Handlers can filter records, format them, or dispatch to multiple other Handlers. +This package implements a number of Handlers for common logging patterns that are +easily composed to create flexible, custom logging structures. + +Here's an example handler that prints logfmt output to Stdout: + + handler := log.StreamHandler(os.Stdout, log.LogfmtFormat()) + +Here's an example handler that defers to two other handlers. One handler only prints records +from the rpc package in logfmt to standard out. The other prints records at Error level +or above in JSON formatted output to the file /var/log/service.json + + handler := log.MultiHandler( + log.LvlFilterHandler(log.LvlError, log.Must.FileHandler("/var/log/service.json", log.JsonFormat())), + log.MatchFilterHandler("pkg", "app/rpc" log.StdoutHandler()) + ) + +Logging File Names and Line Numbers + +This package implements three Handlers that add debugging information to the +context, CallerFileHandler, CallerFuncHandler and CallerStackHandler. Here's +an example that adds the source file and line number of each logging call to +the context. + + h := log.CallerFileHandler(log.StdoutHandler()) + log.Root().SetHandler(h) + ... + log.Error("open file", "err", err) + +This will output a line that looks like: + + lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" caller=data.go:42 + +Here's an example that logs the call stack rather than just the call site. + + h := log.CallerStackHandler("%+v", log.StdoutHandler()) + log.Root().SetHandler(h) + ... + log.Error("open file", "err", err) + +This will output a line that looks like: + + lvl=eror t=2014-05-02T16:07:23-0700 msg="open file" err="file not found" stack="[pkg/data.go:42 pkg/cmd/main.go]" + +The "%+v" format instructs the handler to include the path of the source file +relative to the compile time GOPATH. The log15/stack package documents the +full list of formatting verbs and modifiers available. + +Custom Handlers + +The Handler interface is so simple that it's also trivial to write your own. Let's create an +example handler which tries to write to one handler, but if that fails it falls back to +writing to another handler and includes the error that it encountered when trying to write +to the primary. This might be useful when trying to log over a network socket, but if that +fails you want to log those records to a file on disk. + + type BackupHandler struct { + Primary Handler + Secondary Handler + } + + func (h *BackupHandler) Log (r *Record) error { + err := h.Primary.Log(r) + if err != nil { + r.Ctx = append(ctx, "primary_err", err) + return h.Secondary.Log(r) + } + return nil + } + +This pattern is so useful that a generic version that handles an arbitrary number of Handlers +is included as part of this library called FailoverHandler. + +Logging Expensive Operations + +Sometimes, you want to log values that are extremely expensive to compute, but you don't want to pay +the price of computing them if you haven't turned up your logging level to a high level of detail. + +This package provides a simple type to annotate a logging operation that you want to be evaluated +lazily, just when it is about to be logged, so that it would not be evaluated if an upstream Handler +filters it out. Just wrap any function which takes no arguments with the log.Lazy type. For example: + + func factorRSAKey() (factors []int) { + // return the factors of a very large number + } + + log.Debug("factors", log.Lazy{factorRSAKey}) + +If this message is not logged for any reason (like logging at the Error level), then +factorRSAKey is never evaluated. + +Dynamic context values + +The same log.Lazy mechanism can be used to attach context to a logger which you want to be +evaluated when the message is logged, but not when the logger is created. For example, let's imagine +a game where you have Player objects: + + type Player struct { + name string + alive bool + log.Logger + } + +You always want to log a player's name and whether they're alive or dead, so when you create the player +object, you might do: + + p := &Player{name: name, alive: true} + p.Logger = log.New("name", p.name, "alive", p.alive) + +Only now, even after a player has died, the logger will still report they are alive because the logging +context is evaluated when the logger was created. By using the Lazy wrapper, we can defer the evaluation +of whether the player is alive or not to each log message, so that the log records will reflect the player's +current state no matter when the log message is written: + + p := &Player{name: name, alive: true} + isAlive := func() bool { return p.alive } + player.Logger = log.New("name", p.name, "alive", log.Lazy{isAlive}) + +Terminal Format + +If log15 detects that stdout is a terminal, it will configure the default +handler for it (which is log.StdoutHandler) to use TerminalFormat. This format +logs records nicely for your terminal, including color-coded output based +on log level. + +Error Handling + +Becasuse log15 allows you to step around the type system, there are a few ways you can specify +invalid arguments to the logging functions. You could, for example, wrap something that is not +a zero-argument function with log.Lazy or pass a context key that is not a string. Since logging libraries +are typically the mechanism by which errors are reported, it would be onerous for the logging functions +to return errors. Instead, log15 handles errors by making these guarantees to you: + +- Any log record containing an error will still be printed with the error explained to you as part of the log record. + +- Any log record containing an error will include the context key LOG15_ERROR, enabling you to easily +(and if you like, automatically) detect if any of your logging calls are passing bad values. + +Understanding this, you might wonder why the Handler interface can return an error value in its Log method. Handlers +are encouraged to return errors only if they fail to write their log records out to an external source like if the +syslog daemon is not responding. This allows the construction of useful handlers which cope with those failures +like the FailoverHandler. + +Library Use + +log15 is intended to be useful for library authors as a way to provide configurable logging to +users of their library. Best practice for use in a library is to always disable all output for your logger +by default and to provide a public Logger instance that consumers of your library can configure. Like so: + + package yourlib + + import "gopkg.in/inconshreveable/log15.v2" + + var Log = log.New() + + func init() { + Log.SetHandler(log.DiscardHandler()) + } + +Users of your library may then enable it if they like: + + import "gopkg.in/inconshreveable/log15.v2" + import "example.com/yourlib" + + func main() { + handler := // custom handler setup + yourlib.Log.SetHandler(handler) + } + +Best practices attaching logger context + +The ability to attach context to a logger is a powerful one. Where should you do it and why? +I favor embedding a Logger directly into any persistent object in my application and adding +unique, tracing context keys to it. For instance, imagine I am writing a web browser: + + type Tab struct { + url string + render *RenderingContext + // ... + + Logger + } + + func NewTab(url string) *Tab { + return &Tab { + // ... + url: url, + + Logger: log.New("url", url), + } + } + +When a new tab is created, I assign a logger to it with the url of +the tab as context so it can easily be traced through the logs. +Now, whenever we perform any operation with the tab, we'll log with its +embedded logger and it will include the tab title automatically: + + tab.Debug("moved position", "idx", tab.idx) + +There's only one problem. What if the tab url changes? We could +use log.Lazy to make sure the current url is always written, but that +would mean that we couldn't trace a tab's full lifetime through our +logs after the user navigate to a new URL. + +Instead, think about what values to attach to your loggers the +same way you think about what to use as a key in a SQL database schema. +If it's possible to use a natural key that is unique for the lifetime of the +object, do so. But otherwise, log15's ext package has a handy RandId +function to let you generate what you might call "surrogate keys" +They're just random hex identifiers to use for tracing. Back to our +Tab example, we would prefer to set up our Logger like so: + + import logext "gopkg.in/inconshreveable/log15.v2/ext" + + t := &Tab { + // ... + url: url, + } + + t.Logger = log.New("id", logext.RandId(8), "url", log.Lazy{t.getUrl}) + return t + +Now we'll have a unique traceable identifier even across loading new urls, but +we'll still be able to see the tab's current url in the log messages. + +Must + +For all Handler functions which can return an error, there is a version of that +function which will return no error but panics on failure. They are all available +on the Must object. For example: + + log.Must.FileHandler("/path", log.JsonFormat) + log.Must.NetHandler("tcp", ":1234", log.JsonFormat) + +Inspiration and Credit + +All of the following excellent projects inspired the design of this library: + +code.google.com/p/log4go + +github.com/op/go-logging + +github.com/technoweenie/grohl + +github.com/Sirupsen/logrus + +github.com/kr/logfmt + +github.com/spacemonkeygo/spacelog + +golang's stdlib, notably io and net/http + +The Name + +https://xkcd.com/927/ + +*/ +package log15 === added directory 'src/gopkg.in/inconshreveable/log15.v2/ext' === added file 'src/gopkg.in/inconshreveable/log15.v2/ext/ext_test.go' --- src/gopkg.in/inconshreveable/log15.v2/ext/ext_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/ext/ext_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,109 @@ +package ext + +import ( + "errors" + log "gopkg.in/inconshreveable/log15.v2" + "math" + "testing" +) + +func testHandler() (log.Handler, *log.Record) { + rec := new(log.Record) + return log.FuncHandler(func(r *log.Record) error { + *rec = *r + return nil + }), rec +} + +func TestHotSwapHandler(t *testing.T) { + t.Parallel() + + h1, r1 := testHandler() + + l := log.New() + h := HotSwapHandler(h1) + l.SetHandler(h) + + l.Info("to h1") + if r1.Msg != "to h1" { + t.Fatalf("didn't get expected message to h1") + } + + h2, r2 := testHandler() + h.Swap(h2) + l.Info("to h2") + if r2.Msg != "to h2" { + t.Fatalf("didn't get expected message to h2") + } +} + +func TestSpeculativeHandler(t *testing.T) { + t.Parallel() + + // test with an even multiple of the buffer size, less than full buffer size + // and not a multiple of the buffer size + for _, count := range []int{10000, 50, 432} { + recs := make(chan *log.Record) + done := make(chan int) + spec := SpeculativeHandler(100, log.ChannelHandler(recs)) + + go func() { + defer close(done) + expectedCount := int(math.Min(float64(count), float64(100))) + expectedIdx := count - expectedCount + for r := range recs { + if r.Ctx[1] != expectedIdx { + t.Errorf("Bad ctx 'i', got %d expected %d", r.Ctx[1], expectedIdx) + return + } + expectedIdx++ + expectedCount-- + + if expectedCount == 0 { + // got everything we expected + break + } + } + + select { + case <-recs: + t.Errorf("got an extra record we shouldn't have!") + default: + } + }() + + lg := log.New() + lg.SetHandler(spec) + for i := 0; i < count; i++ { + lg.Debug("test speculative", "i", i) + } + + go spec.Flush() + + // wait for the go routine to finish + <-done + } +} + +func TestErrorHandler(t *testing.T) { + t.Parallel() + + h, r := testHandler() + lg := log.New() + lg.SetHandler(EscalateErrHandler( + log.LvlFilterHandler(log.LvlError, h))) + + lg.Debug("some function result", "err", nil) + if r.Msg != "" { + t.Fatalf("Expected debug level message to be filtered") + } + + lg.Debug("some function result", "err", errors.New("failed operation")) + if r.Msg != "some function result" { + t.Fatalf("Expected debug level message to be escalated and pass lvlfilter") + } + + if r.Lvl != log.LvlError { + t.Fatalf("Expected debug level message to be escalated to LvlError") + } +} === added file 'src/gopkg.in/inconshreveable/log15.v2/ext/handler.go' --- src/gopkg.in/inconshreveable/log15.v2/ext/handler.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/ext/handler.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,130 @@ +package ext + +import ( + "os" + "sync" + "sync/atomic" + "unsafe" + + log "gopkg.in/inconshreveable/log15.v2" +) + +// EscalateErrHandler wraps another handler and passes all records through +// unchanged except if the logged context contains a non-nil error +// value in its context. In that case, the record's level is raised +// to LvlError unless it was already more serious (LvlCrit). +// +// This allows you to log the result of all functions for debugging +// and still capture error conditions when in production with a single +// log line. As an example, the following the log record will be written +// out only if there was an error writing a value to redis: +// +// logger := logext.EscalateErrHandler( +// log.LvlFilterHandler(log.LvlInfo, log.StdoutHandler)) +// +// reply, err := redisConn.Do("SET", "foo", "bar") +// logger.Debug("Wrote value to redis", "reply", reply, "err", err) +// if err != nil { +// return err +// } +// +func EscalateErrHandler(h log.Handler) log.Handler { + return log.FuncHandler(func(r *log.Record) error { + if r.Lvl > log.LvlError { + for i := 1; i < len(r.Ctx); i++ { + if v, ok := r.Ctx[i].(error); ok && v != nil { + r.Lvl = log.LvlError + break + } + } + } + return h.Log(r) + }) +} + +// SpeculativeHandler is a handler for speculative logging. It +// keeps a ring buffer of the given size full of the last events +// logged into it. When Flush is called, all buffered log records +// are written to the wrapped handler. This is extremely for +// continuosly capturing debug level output, but only flushing those +// log records if an exceptional condition is encountered. +func SpeculativeHandler(size int, h log.Handler) *Speculative { + return &Speculative{ + handler: h, + recs: make([]*log.Record, size), + } +} + +type Speculative struct { + mu sync.Mutex + idx int + recs []*log.Record + handler log.Handler + full bool +} + +func (h *Speculative) Log(r *log.Record) error { + h.mu.Lock() + defer h.mu.Unlock() + h.recs[h.idx] = r + h.idx = (h.idx + 1) % len(h.recs) + h.full = h.full || h.idx == 0 + return nil +} + +func (h *Speculative) Flush() { + recs := make([]*log.Record, 0) + func() { + h.mu.Lock() + defer h.mu.Unlock() + if h.full { + recs = append(recs, h.recs[h.idx:]...) + } + recs = append(recs, h.recs[:h.idx]...) + + // reset state + h.full = false + h.idx = 0 + }() + + // don't hold the lock while we flush to the wrapped handler + for _, r := range recs { + h.handler.Log(r) + } +} + +// HotSwapHandler wraps another handler that may swapped out +// dynamically at runtime in a thread-safe fashion. +// HotSwapHandler is the same functionality +// used to implement the SetHandler method for the default +// implementation of Logger. +func HotSwapHandler(h log.Handler) *HotSwap { + hs := new(HotSwap) + hs.Swap(h) + return hs +} + +type HotSwap struct { + handler unsafe.Pointer +} + +func (h *HotSwap) Log(r *log.Record) error { + return (*(*log.Handler)(atomic.LoadPointer(&h.handler))).Log(r) +} + +func (h *HotSwap) Swap(newHandler log.Handler) { + atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler)) +} + +// FatalHandler makes critical errors exit the program +// immediately, much like the log.Fatal* methods from the +// standard log package +func FatalHandler(h log.Handler) log.Handler { + return log.FuncHandler(func(r *log.Record) error { + err := h.Log(r) + if r.Lvl == log.LvlCrit { + os.Exit(1) + } + return err + }) +} === added file 'src/gopkg.in/inconshreveable/log15.v2/ext/id.go' --- src/gopkg.in/inconshreveable/log15.v2/ext/id.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/ext/id.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,47 @@ +package ext + +import ( + "fmt" + "math/rand" + "sync" + "time" +) + +var r = rand.New(&lockedSource{src: rand.NewSource(time.Now().Unix())}) + +// RandId creates a random identifier of the requested length. +// Useful for assigning mostly-unique identifiers for logging +// and identification that are unlikely to collide because of +// short lifespan or low set cardinality +func RandId(idlen int) string { + b := make([]byte, idlen) + var randVal uint32 + for i := 0; i < idlen; i++ { + byteIdx := i % 4 + if byteIdx == 0 { + randVal = r.Uint32() + } + b[i] = byte((randVal >> (8 * uint(byteIdx))) & 0xFF) + } + return fmt.Sprintf("%x", b) +} + +// lockedSource is a wrapper to allow a rand.Source to be used +// concurrently (same type as the one used internally in math/rand). +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} === added file 'src/gopkg.in/inconshreveable/log15.v2/format.go' --- src/gopkg.in/inconshreveable/log15.v2/format.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/format.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,257 @@ +package log15 + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +const ( + timeFormat = "2006-01-02T15:04:05-0700" + termTimeFormat = "01-02|15:04:05" + floatFormat = 'f' + termMsgJust = 40 +) + +type Format interface { + Format(r *Record) []byte +} + +// FormatFunc returns a new Format object which uses +// the given function to perform record formatting. +func FormatFunc(f func(*Record) []byte) Format { + return formatFunc(f) +} + +type formatFunc func(*Record) []byte + +func (f formatFunc) Format(r *Record) []byte { + return f(r) +} + +// TerminalFormat formats log records optimized for human readability on +// a terminal with color-coded level output and terser human friendly timestamp. +// This format should only be used for interactive programs or while developing. +// +// [TIME] [LEVEL] MESAGE key=value key=value ... +// +// Example: +// +// [May 16 20:58:45] [DBUG] remove route ns=haproxy addr=127.0.0.1:50002 +// +func TerminalFormat() Format { + return FormatFunc(func(r *Record) []byte { + var color = 0 + switch r.Lvl { + case LvlCrit: + color = 35 + case LvlError: + color = 31 + case LvlWarn: + color = 33 + case LvlInfo: + color = 32 + case LvlDebug: + color = 36 + } + + b := &bytes.Buffer{} + lvl := strings.ToUpper(r.Lvl.String()) + if color > 0 { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %s ", color, lvl, r.Time.Format(termTimeFormat), r.Msg) + } else { + fmt.Fprintf(b, "[%s] [%s] %s ", lvl, r.Time.Format(termTimeFormat), r.Msg) + } + + // try to justify the log output for short messages + if len(r.Ctx) > 0 && len(r.Msg) < termMsgJust { + b.Write(bytes.Repeat([]byte{' '}, termMsgJust-len(r.Msg))) + } + + // print the keys logfmt style + logfmt(b, r.Ctx, color) + return b.Bytes() + }) +} + +// LogfmtFormat prints records in logfmt format, an easy machine-parseable but human-readable +// format for key/value pairs. +// +// For more details see: http://godoc.org/github.com/kr/logfmt +// +func LogfmtFormat() Format { + return FormatFunc(func(r *Record) []byte { + common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg} + buf := &bytes.Buffer{} + logfmt(buf, append(common, r.Ctx...), 0) + return buf.Bytes() + }) +} + +func logfmt(buf *bytes.Buffer, ctx []interface{}, color int) { + for i := 0; i < len(ctx); i += 2 { + if i != 0 { + buf.WriteByte(' ') + } + + k, ok := ctx[i].(string) + v := formatLogfmtValue(ctx[i+1]) + if !ok { + k, v = errorKey, formatLogfmtValue(k) + } + + // XXX: we should probably check that all of your key bytes aren't invalid + if color > 0 { + fmt.Fprintf(buf, "\x1b[%dm%s\x1b[0m=%s", color, k, v) + } else { + fmt.Fprintf(buf, "%s=%s", k, v) + } + } + + buf.WriteByte('\n') +} + +// JsonFormat formats log records as JSON objects separated by newlines. +// It is the equivalent of JsonFormatEx(false, true). +func JsonFormat() Format { + return JsonFormatEx(false, true) +} + +// JsonFormatEx formats log records as JSON objects. If pretty is true, +// records will be pretty-printed. If lineSeparated is true, records +// will be logged with a new line between each record. +func JsonFormatEx(pretty, lineSeparated bool) Format { + jsonMarshal := json.Marshal + if pretty { + jsonMarshal = func(v interface{}) ([]byte, error) { + return json.MarshalIndent(v, "", " ") + } + } + + return FormatFunc(func(r *Record) []byte { + props := make(map[string]interface{}) + + props[r.KeyNames.Time] = r.Time + props[r.KeyNames.Lvl] = r.Lvl + props[r.KeyNames.Msg] = r.Msg + + for i := 0; i < len(r.Ctx); i += 2 { + k, ok := r.Ctx[i].(string) + if !ok { + props[errorKey] = fmt.Sprintf("%+v is not a string key", r.Ctx[i]) + } + props[k] = formatJsonValue(r.Ctx[i+1]) + } + + b, err := jsonMarshal(props) + if err != nil { + b, _ = jsonMarshal(map[string]string{ + errorKey: err.Error(), + }) + return b + } + + if lineSeparated { + b = append(b, '\n') + } + + return b + }) +} + +func formatShared(value interface{}) (result interface{}) { + defer func() { + if err := recover(); err != nil { + if v := reflect.ValueOf(value); v.Kind() == reflect.Ptr && v.IsNil() { + result = "nil" + } else { + panic(err) + } + } + }() + + switch v := value.(type) { + case time.Time: + return v.Format(timeFormat) + + case error: + return v.Error() + + case fmt.Stringer: + return v.String() + + default: + return v + } +} + +func formatJsonValue(value interface{}) interface{} { + value = formatShared(value) + switch value.(type) { + case int, int8, int16, int32, int64, float32, float64, uint, uint8, uint16, uint32, uint64, string: + return value + default: + return fmt.Sprintf("%+v", value) + } +} + +// formatValue formats a value for serialization +func formatLogfmtValue(value interface{}) string { + if value == nil { + return "nil" + } + + value = formatShared(value) + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case float32: + return strconv.FormatFloat(float64(v), floatFormat, 3, 64) + case float64: + return strconv.FormatFloat(v, floatFormat, 3, 64) + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + return fmt.Sprintf("%d", value) + case string: + return escapeString(v) + default: + return escapeString(fmt.Sprintf("%+v", value)) + } +} + +func escapeString(s string) string { + needQuotes := false + e := bytes.Buffer{} + e.WriteByte('"') + for _, r := range s { + if r <= ' ' || r == '=' || r == '"' { + needQuotes = true + } + + switch r { + case '\\', '"': + e.WriteByte('\\') + e.WriteByte(byte(r)) + case '\n': + e.WriteByte('\\') + e.WriteByte('n') + case '\r': + e.WriteByte('\\') + e.WriteByte('r') + case '\t': + e.WriteByte('\\') + e.WriteByte('t') + default: + e.WriteRune(r) + } + } + e.WriteByte('"') + start, stop := 0, e.Len() + if !needQuotes { + start, stop = 1, stop-1 + } + return string(e.Bytes()[start:stop]) +} === added file 'src/gopkg.in/inconshreveable/log15.v2/handler.go' --- src/gopkg.in/inconshreveable/log15.v2/handler.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/handler.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,371 @@ +package log15 + +import ( + "bytes" + "fmt" + "io" + "net" + "os" + "reflect" + "sync" + + "gopkg.in/inconshreveable/log15.v2/stack" +) + +// A Logger prints its log records by writing to a Handler. +// The Handler interface defines where and how log records are written. +// Handlers are composable, providing you great flexibility in combining +// them to achieve the logging structure that suits your applications. +type Handler interface { + Log(r *Record) error +} + +// FuncHandler returns a Handler that logs records with the given +// function. +func FuncHandler(fn func(r *Record) error) Handler { + return funcHandler(fn) +} + +type funcHandler func(r *Record) error + +func (h funcHandler) Log(r *Record) error { + return h(r) +} + +// StreamHandler writes log records to an io.Writer +// with the given format. StreamHandler can be used +// to easily begin writing log records to other +// outputs. +// +// StreamHandler wraps itself with LazyHandler and SyncHandler +// to evaluate Lazy objects and perform safe concurrent writes. +func StreamHandler(wr io.Writer, fmtr Format) Handler { + h := FuncHandler(func(r *Record) error { + _, err := wr.Write(fmtr.Format(r)) + return err + }) + return LazyHandler(SyncHandler(h)) +} + +// SyncHandler can be wrapped around a handler to guarantee that +// only a single Log operation can proceed at a time. It's necessary +// for thread-safe concurrent writes. +func SyncHandler(h Handler) Handler { + var mu sync.Mutex + return FuncHandler(func(r *Record) error { + defer mu.Unlock() + mu.Lock() + return h.Log(r) + }) +} + +// FileHandler returns a handler which writes log records to the give file +// using the given format. If the path +// already exists, FileHandler will append to the given file. If it does not, +// FileHandler will create the file with mode 0644. +func FileHandler(path string, fmtr Format) (Handler, error) { + f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + return closingHandler{f, StreamHandler(f, fmtr)}, nil +} + +// NetHandler opens a socket to the given address and writes records +// over the connection. +func NetHandler(network, addr string, fmtr Format) (Handler, error) { + conn, err := net.Dial(network, addr) + if err != nil { + return nil, err + } + + return closingHandler{conn, StreamHandler(conn, fmtr)}, nil +} + +// XXX: closingHandler is essentially unused at the moment +// it's meant for a future time when the Handler interface supports +// a possible Close() operation +type closingHandler struct { + io.WriteCloser + Handler +} + +func (h *closingHandler) Close() error { + return h.WriteCloser.Close() +} + +// CallerFileHandler returns a Handler that adds the line number and file of +// the calling function to the context with key "caller". +func CallerFileHandler(h Handler) Handler { + return FuncHandler(func(r *Record) error { + call := stack.Call(r.CallPC[0]) + r.Ctx = append(r.Ctx, "caller", fmt.Sprint(call)) + return h.Log(r) + }) +} + +// CallerFuncHandler returns a Handler that adds the calling function name to +// the context with key "fn". +func CallerFuncHandler(h Handler) Handler { + return FuncHandler(func(r *Record) error { + call := stack.Call(r.CallPC[0]) + r.Ctx = append(r.Ctx, "fn", fmt.Sprintf("%+n", call)) + return h.Log(r) + }) +} + +// CallerStackHandler returns a Handler that adds a stack trace to the context +// with key "stack". The stack trace is formated as a space separated list of +// call sites inside matching []'s. The most recent call site is listed first. +// Each call site is formatted according to format. See the documentation of +// log15/stack.Call.Format for the list of supported formats. +func CallerStackHandler(format string, h Handler) Handler { + return FuncHandler(func(r *Record) error { + s := stack.Callers(). + TrimBelow(stack.Call(r.CallPC[0])). + TrimRuntime() + if len(s) > 0 { + buf := &bytes.Buffer{} + buf.WriteByte('[') + for i, pc := range s { + if i > 0 { + buf.WriteByte(' ') + } + fmt.Fprintf(buf, format, pc) + } + buf.WriteByte(']') + r.Ctx = append(r.Ctx, "stack", buf.String()) + } + return h.Log(r) + }) +} + +// FilterHandler returns a Handler that only writes records to the +// wrapped Handler if the given function evaluates true. For example, +// to only log records where the 'err' key is not nil: +// +// logger.SetHandler(FilterHandler(func(r *Record) bool { +// for i := 0; i < len(r.Ctx); i += 2 { +// if r.Ctx[i] == "err" { +// return r.Ctx[i+1] != nil +// } +// } +// return false +// }, h)) +// +func FilterHandler(fn func(r *Record) bool, h Handler) Handler { + return FuncHandler(func(r *Record) error { + if fn(r) { + return h.Log(r) + } + return nil + }) +} + +// MatchFilterHandler returns a Handler that only writes records +// to the wrapped Handler if the given key in the logged +// context matches the value. For example, to only log records +// from your ui package: +// +// log.MatchFilterHandler("pkg", "app/ui", log.StdoutHandler) +// +func MatchFilterHandler(key string, value interface{}, h Handler) Handler { + return FilterHandler(func(r *Record) (pass bool) { + switch key { + case r.KeyNames.Lvl: + return r.Lvl == value + case r.KeyNames.Time: + return r.Time == value + case r.KeyNames.Msg: + return r.Msg == value + } + + for i := 0; i < len(r.Ctx); i += 2 { + if r.Ctx[i] == key { + return r.Ctx[i+1] == value + } + } + return false + }, h) +} + +// LvlFilterHandler returns a Handler that only writes +// records which are less than the given verbosity +// level to the wrapped Handler. For example, to only +// log Error/Crit records: +// +// log.LvlFilterHandler(log.Error, log.StdoutHandler) +// +func LvlFilterHandler(maxLvl Lvl, h Handler) Handler { + return FilterHandler(func(r *Record) (pass bool) { + return r.Lvl <= maxLvl + }, h) +} + +// A MultiHandler dispatches any write to each of its handlers. +// This is useful for writing different types of log information +// to different locations. For example, to log to a file and +// standard error: +// +// log.MultiHandler( +// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()), +// log.StderrHandler) +// +func MultiHandler(hs ...Handler) Handler { + return FuncHandler(func(r *Record) error { + for _, h := range hs { + // what to do about failures? + h.Log(r) + } + return nil + }) +} + +// A FailoverHandler writes all log records to the first handler +// specified, but will failover and write to the second handler if +// the first handler has failed, and so on for all handlers specified. +// For example you might want to log to a network socket, but failover +// to writing to a file if the network fails, and then to +// standard out if the file write fails: +// +// log.FailoverHandler( +// log.Must.NetHandler("tcp", ":9090", log.JsonFormat()), +// log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()), +// log.StdoutHandler) +// +// All writes that do not go to the first handler will add context with keys of +// the form "failover_err_{idx}" which explain the error encountered while +// trying to write to the handlers before them in the list. +func FailoverHandler(hs ...Handler) Handler { + return FuncHandler(func(r *Record) error { + var err error + for i, h := range hs { + err = h.Log(r) + if err == nil { + return nil + } else { + r.Ctx = append(r.Ctx, fmt.Sprintf("failover_err_%d", i), err) + } + } + + return err + }) +} + +// ChannelHandler writes all records to the given channel. +// It blocks if the channel is full. Useful for async processing +// of log messages, it's used by BufferedHandler. +func ChannelHandler(recs chan<- *Record) Handler { + return FuncHandler(func(r *Record) error { + recs <- r + return nil + }) +} + +// BufferedHandler writes all records to a buffered +// channel of the given size which flushes into the wrapped +// handler whenever it is available for writing. Since these +// writes happen asynchronously, all writes to a BufferedHandler +// never return an error and any errors from the wrapped handler are ignored. +func BufferedHandler(bufSize int, h Handler) Handler { + recs := make(chan *Record, bufSize) + go func() { + for m := range recs { + _ = h.Log(m) + } + }() + return ChannelHandler(recs) +} + +// LazyHandler writes all values to the wrapped handler after evaluating +// any lazy functions in the record's context. It is already wrapped +// around StreamHandler and SyslogHandler in this library, you'll only need +// it if you write your own Handler. +func LazyHandler(h Handler) Handler { + return FuncHandler(func(r *Record) error { + // go through the values (odd indices) and reassign + // the values of any lazy fn to the result of its execution + hadErr := false + for i := 1; i < len(r.Ctx); i += 2 { + lz, ok := r.Ctx[i].(Lazy) + if ok { + v, err := evaluateLazy(lz) + if err != nil { + hadErr = true + r.Ctx[i] = err + } else { + if cs, ok := v.(stack.Trace); ok { + v = cs.TrimBelow(stack.Call(r.CallPC[0])). + TrimRuntime() + } + r.Ctx[i] = v + } + } + } + + if hadErr { + r.Ctx = append(r.Ctx, errorKey, "bad lazy") + } + + return h.Log(r) + }) +} + +func evaluateLazy(lz Lazy) (interface{}, error) { + t := reflect.TypeOf(lz.Fn) + + if t.Kind() != reflect.Func { + return nil, fmt.Errorf("INVALID_LAZY, not func: %+v", lz.Fn) + } + + if t.NumIn() > 0 { + return nil, fmt.Errorf("INVALID_LAZY, func takes args: %+v", lz.Fn) + } + + if t.NumOut() == 0 { + return nil, fmt.Errorf("INVALID_LAZY, no func return val: %+v", lz.Fn) + } + + value := reflect.ValueOf(lz.Fn) + results := value.Call([]reflect.Value{}) + if len(results) == 1 { + return results[0].Interface(), nil + } else { + values := make([]interface{}, len(results)) + for i, v := range results { + values[i] = v.Interface() + } + return values, nil + } +} + +// DiscardHandler reports success for all writes but does nothing. +// It is useful for dynamically disabling logging at runtime via +// a Logger's SetHandler method. +func DiscardHandler() Handler { + return FuncHandler(func(r *Record) error { + return nil + }) +} + +// The Must object provides the following Handler creation functions +// which instead of returning an error parameter only return a Handler +// and panic on failure: FileHandler, NetHandler, SyslogHandler, SyslogNetHandler +var Must muster + +func must(h Handler, err error) Handler { + if err != nil { + panic(err) + } + return h +} + +type muster struct{} + +func (m muster) FileHandler(path string, fmtr Format) Handler { + return must(FileHandler(path, fmtr)) +} + +func (m muster) NetHandler(network, addr string, fmtr Format) Handler { + return must(NetHandler(network, addr, fmtr)) +} === added file 'src/gopkg.in/inconshreveable/log15.v2/handler_appengine.go' --- src/gopkg.in/inconshreveable/log15.v2/handler_appengine.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/handler_appengine.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,26 @@ +// +build appengine + +package log15 + +import "sync" + +// swapHandler wraps another handler that may be swapped out +// dynamically at runtime in a thread-safe fashion. +type swapHandler struct { + handler interface{} + lock sync.RWMutex +} + +func (h *swapHandler) Log(r *Record) error { + h.lock.RLock() + defer h.lock.RUnlock() + + return h.handler.(Handler).Log(r) +} + +func (h *swapHandler) Swap(newHandler Handler) { + h.lock.Lock() + defer h.lock.Unlock() + + h.handler = newHandler +} === added file 'src/gopkg.in/inconshreveable/log15.v2/handler_other.go' --- src/gopkg.in/inconshreveable/log15.v2/handler_other.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/handler_other.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +// +build !appengine + +package log15 + +import ( + "sync/atomic" + "unsafe" +) + +// swapHandler wraps another handler that may be swapped out +// dynamically at runtime in a thread-safe fashion. +type swapHandler struct { + handler unsafe.Pointer +} + +func (h *swapHandler) Log(r *Record) error { + return (*(*Handler)(atomic.LoadPointer(&h.handler))).Log(r) +} + +func (h *swapHandler) Swap(newHandler Handler) { + atomic.StorePointer(&h.handler, unsafe.Pointer(&newHandler)) +} === added file 'src/gopkg.in/inconshreveable/log15.v2/log15_test.go' --- src/gopkg.in/inconshreveable/log15.v2/log15_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/log15_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,566 @@ +package log15 + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "net" + "regexp" + "runtime" + "sync" + "testing" + "time" +) + +func testHandler() (Handler, *Record) { + rec := new(Record) + return FuncHandler(func(r *Record) error { + *rec = *r + return nil + }), rec +} + +func testLogger() (Logger, Handler, *Record) { + l := New() + h, r := testHandler() + l.SetHandler(LazyHandler(h)) + return l, h, r +} + +func TestLazy(t *testing.T) { + t.Parallel() + + x := 1 + lazy := func() int { + return x + } + + l, _, r := testLogger() + l.Info("", "x", Lazy{lazy}) + if r.Ctx[1] != 1 { + t.Fatalf("Lazy function not evaluated, got %v, expected %d", r.Ctx[1], 1) + } + + x = 2 + l.Info("", "x", Lazy{lazy}) + if r.Ctx[1] != 2 { + t.Fatalf("Lazy function not evaluated, got %v, expected %d", r.Ctx[1], 1) + } +} + +func TestInvalidLazy(t *testing.T) { + t.Parallel() + + l, _, r := testLogger() + validate := func() { + if len(r.Ctx) < 4 { + t.Fatalf("Invalid lazy, got %d args, expecting at least 4", len(r.Ctx)) + } + + if r.Ctx[2] != errorKey { + t.Fatalf("Invalid lazy, got key %s expecting %s", r.Ctx[2], errorKey) + } + } + + l.Info("", "x", Lazy{1}) + validate() + + l.Info("", "x", Lazy{func(x int) int { return x }}) + validate() + + l.Info("", "x", Lazy{func() {}}) + validate() +} + +func TestCtx(t *testing.T) { + t.Parallel() + + l, _, r := testLogger() + l.Info("", Ctx{"x": 1, "y": "foo", "tester": t}) + if len(r.Ctx) != 6 { + t.Fatalf("Expecting Ctx tansformed into %d ctx args, got %d: %v", 6, len(r.Ctx), r.Ctx) + } +} + +func testFormatter(f Format) (Logger, *bytes.Buffer) { + l := New() + var buf bytes.Buffer + l.SetHandler(StreamHandler(&buf, f)) + return l, &buf +} + +func TestJson(t *testing.T) { + t.Parallel() + + l, buf := testFormatter(JsonFormat()) + l.Error("some message", "x", 1, "y", 3.2) + + var v map[string]interface{} + decoder := json.NewDecoder(buf) + if err := decoder.Decode(&v); err != nil { + t.Fatalf("Error decoding JSON: %v", v) + } + + validate := func(key string, expected interface{}) { + if v[key] != expected { + t.Fatalf("Got %v expected %v for %v", v[key], expected, key) + } + } + + validate("msg", "some message") + validate("x", float64(1)) // all numbers are floats in JSON land + validate("y", 3.2) +} + +type testtype struct { + name string +} + +func (tt testtype) String() string { + return tt.name +} + +func TestLogfmt(t *testing.T) { + t.Parallel() + + var nilVal *testtype + + l, buf := testFormatter(LogfmtFormat()) + l.Error("some message", "x", 1, "y", 3.2, "equals", "=", "quote", "\"", "nil", nilVal) + + // skip timestamp in comparison + got := buf.Bytes()[27:buf.Len()] + expected := []byte(`lvl=eror msg="some message" x=1 y=3.200 equals="=" quote="\"" nil=nil` + "\n") + if !bytes.Equal(got, expected) { + t.Fatalf("Got %s, expected %s", got, expected) + } +} + +func TestMultiHandler(t *testing.T) { + t.Parallel() + + h1, r1 := testHandler() + h2, r2 := testHandler() + l := New() + l.SetHandler(MultiHandler(h1, h2)) + l.Debug("clone") + + if r1.Msg != "clone" { + t.Fatalf("wrong value for h1.Msg. Got %s expected %s", r1.Msg, "clone") + } + + if r2.Msg != "clone" { + t.Fatalf("wrong value for h2.Msg. Got %s expected %s", r2.Msg, "clone") + } + +} + +type waitHandler struct { + ch chan Record +} + +func (h *waitHandler) Log(r *Record) error { + h.ch <- *r + return nil +} + +func TestBufferedHandler(t *testing.T) { + t.Parallel() + + ch := make(chan Record) + l := New() + l.SetHandler(BufferedHandler(0, &waitHandler{ch})) + + l.Debug("buffer") + if r := <-ch; r.Msg != "buffer" { + t.Fatalf("wrong value for r.Msg. Got %s expected %s", r.Msg, "") + } +} + +func TestLogContext(t *testing.T) { + t.Parallel() + + l, _, r := testLogger() + l = l.New("foo", "bar") + l.Crit("baz") + + if len(r.Ctx) != 2 { + t.Fatalf("Expected logger context in record context. Got length %d, expected %d", len(r.Ctx), 2) + } + + if r.Ctx[0] != "foo" { + t.Fatalf("Wrong context key, got %s expected %s", r.Ctx[0], "foo") + } + + if r.Ctx[1] != "bar" { + t.Fatalf("Wrong context value, got %s expected %s", r.Ctx[1], "bar") + } +} + +func TestMapCtx(t *testing.T) { + t.Parallel() + + l, _, r := testLogger() + l.Crit("test", Ctx{"foo": "bar"}) + + if len(r.Ctx) != 2 { + t.Fatalf("Wrong context length, got %d, expected %d", len(r.Ctx), 2) + } + + if r.Ctx[0] != "foo" { + t.Fatalf("Wrong context key, got %s expected %s", r.Ctx[0], "foo") + } + + if r.Ctx[1] != "bar" { + t.Fatalf("Wrong context value, got %s expected %s", r.Ctx[1], "bar") + } +} + +func TestLvlFilterHandler(t *testing.T) { + t.Parallel() + + l := New() + h, r := testHandler() + l.SetHandler(LvlFilterHandler(LvlWarn, h)) + l.Info("info'd") + + if r.Msg != "" { + t.Fatalf("Expected zero record, but got record with msg: %v", r.Msg) + } + + l.Warn("warned") + if r.Msg != "warned" { + t.Fatalf("Got record msg %s expected %s", r.Msg, "warned") + } + + l.Warn("error'd") + if r.Msg != "error'd" { + t.Fatalf("Got record msg %s expected %s", r.Msg, "error'd") + } +} + +func TestNetHandler(t *testing.T) { + t.Parallel() + + l, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("Failed to listen: %v", l) + } + + errs := make(chan error) + go func() { + c, err := l.Accept() + if err != nil { + t.Errorf("Failed to accept conneciton: %v", err) + return + } + + rd := bufio.NewReader(c) + s, err := rd.ReadString('\n') + if err != nil { + t.Errorf("Failed to read string: %v", err) + } + + got := s[27:] + expected := "lvl=info msg=test x=1\n" + if got != expected { + t.Errorf("Got log line %s, expected %s", got, expected) + } + + errs <- nil + }() + + lg := New() + h, err := NetHandler("tcp", l.Addr().String(), LogfmtFormat()) + if err != nil { + t.Fatal(err) + } + lg.SetHandler(h) + lg.Info("test", "x", 1) + + select { + case <-time.After(time.Second): + t.Fatalf("Test timed out!") + case <-errs: + // ok + } +} + +func TestMatchFilterHandler(t *testing.T) { + t.Parallel() + + l, h, r := testLogger() + l.SetHandler(MatchFilterHandler("err", nil, h)) + + l.Crit("test", "foo", "bar") + if r.Msg != "" { + t.Fatalf("expected filter handler to discard msg") + } + + l.Crit("test2", "err", "bad fd") + if r.Msg != "" { + t.Fatalf("expected filter handler to discard msg") + } + + l.Crit("test3", "err", nil) + if r.Msg != "test3" { + t.Fatalf("expected filter handler to allow msg") + } +} + +func TestMatchFilterBuiltin(t *testing.T) { + t.Parallel() + + l, h, r := testLogger() + l.SetHandler(MatchFilterHandler("lvl", LvlError, h)) + l.Info("does not pass") + + if r.Msg != "" { + t.Fatalf("got info level record that should not have matched") + } + + l.Error("error!") + if r.Msg != "error!" { + t.Fatalf("did not get error level record that should have matched") + } + + r.Msg = "" + l.SetHandler(MatchFilterHandler("msg", "matching message", h)) + l.Info("doesn't match") + if r.Msg != "" { + t.Fatalf("got record with wrong message matched") + } + + l.Debug("matching message") + if r.Msg != "matching message" { + t.Fatalf("did not get record which matches") + } +} + +type failingWriter struct { + fail bool +} + +func (w *failingWriter) Write(buf []byte) (int, error) { + if w.fail { + return 0, errors.New("fail") + } else { + return len(buf), nil + } +} + +func TestFailoverHandler(t *testing.T) { + t.Parallel() + + l := New() + h, r := testHandler() + w := &failingWriter{false} + + l.SetHandler(FailoverHandler( + StreamHandler(w, JsonFormat()), + h)) + + l.Debug("test ok") + if r.Msg != "" { + t.Fatalf("expected no failover") + } + + w.fail = true + l.Debug("test failover", "x", 1) + if r.Msg != "test failover" { + t.Fatalf("expected failover") + } + + if len(r.Ctx) != 4 { + t.Fatalf("expected additional failover ctx") + } + + got := r.Ctx[2] + expected := "failover_err_0" + if got != expected { + t.Fatalf("expected failover ctx. got: %s, expected %s", got, expected) + } +} + +// https://github.com/inconshreveable/log15/issues/16 +func TestIndependentSetHandler(t *testing.T) { + t.Parallel() + + parent, _, r := testLogger() + child := parent.New() + child.SetHandler(DiscardHandler()) + parent.Info("test") + if r.Msg != "test" { + t.Fatalf("parent handler affected by child") + } +} + +// https://github.com/inconshreveable/log15/issues/16 +func TestInheritHandler(t *testing.T) { + t.Parallel() + + parent, _, r := testLogger() + child := parent.New() + parent.SetHandler(DiscardHandler()) + child.Info("test") + if r.Msg == "test" { + t.Fatalf("child handler affected not affected by parent") + } +} + +func TestCallerFileHandler(t *testing.T) { + t.Parallel() + + l := New() + h, r := testHandler() + l.SetHandler(CallerFileHandler(h)) + + l.Info("baz") + _, _, line, _ := runtime.Caller(0) + + if len(r.Ctx) != 2 { + t.Fatalf("Expected caller in record context. Got length %d, expected %d", len(r.Ctx), 2) + } + + const key = "caller" + + if r.Ctx[0] != key { + t.Fatalf("Wrong context key, got %s expected %s", r.Ctx[0], key) + } + + s, ok := r.Ctx[1].(string) + if !ok { + t.Fatalf("Wrong context value type, got %T expected string", r.Ctx[1]) + } + + exp := fmt.Sprint("log15_test.go:", line-1) + if s != exp { + t.Fatalf("Wrong context value, got %s expected string matching %s", s, exp) + } +} + +func TestCallerFuncHandler(t *testing.T) { + t.Parallel() + + l := New() + h, r := testHandler() + l.SetHandler(CallerFuncHandler(h)) + + l.Info("baz") + + if len(r.Ctx) != 2 { + t.Fatalf("Expected caller in record context. Got length %d, expected %d", len(r.Ctx), 2) + } + + const key = "fn" + + if r.Ctx[0] != key { + t.Fatalf("Wrong context key, got %s expected %s", r.Ctx[0], key) + } + + const regex = ".*\\.TestCallerFuncHandler" + + s, ok := r.Ctx[1].(string) + if !ok { + t.Fatalf("Wrong context value type, got %T expected string", r.Ctx[1]) + } + + match, err := regexp.MatchString(regex, s) + if err != nil { + t.Fatalf("Error matching %s to regex %s: %v", s, regex, err) + } + + if !match { + t.Fatalf("Wrong context value, got %s expected string matching %s", s, regex) + } +} + +// https://github.com/inconshreveable/log15/issues/27 +func TestCallerStackHandler(t *testing.T) { + t.Parallel() + + l := New() + h, r := testHandler() + l.SetHandler(CallerStackHandler("%#v", h)) + + lines := []int{} + + func() { + l.Info("baz") + _, _, line, _ := runtime.Caller(0) + lines = append(lines, line-1) + }() + _, file, line, _ := runtime.Caller(0) + lines = append(lines, line-1) + + if len(r.Ctx) != 2 { + t.Fatalf("Expected stack in record context. Got length %d, expected %d", len(r.Ctx), 2) + } + + const key = "stack" + + if r.Ctx[0] != key { + t.Fatalf("Wrong context key, got %s expected %s", r.Ctx[0], key) + } + + s, ok := r.Ctx[1].(string) + if !ok { + t.Fatalf("Wrong context value type, got %T expected string", r.Ctx[1]) + } + + exp := "[" + for i, line := range lines { + if i > 0 { + exp += " " + } + exp += fmt.Sprint(file, ":", line) + } + exp += "]" + + if s != exp { + t.Fatalf("Wrong context value, got %s expected string matching %s", s, exp) + } +} + +// tests that when logging concurrently to the same logger +// from multiple goroutines that the calls are handled independently +// this test tries to trigger a previous bug where concurrent calls could +// corrupt each other's context values +// +// this test runs N concurrent goroutines each logging a fixed number of +// records and a handler that buckets them based on the index passed in the context. +// if the logger is not concurrent-safe then the values in the buckets will not all be the same +// +// https://github.com/inconshreveable/log15/pull/30 +func TestConcurrent(t *testing.T) { + root := New() + // this was the first value that triggered + // go to allocate extra capacity in the logger's context slice which + // was necessary to trigger the bug + const ctxLen = 34 + l := root.New(make([]interface{}, ctxLen)...) + const goroutines = 8 + var res [goroutines]int + l.SetHandler(SyncHandler(FuncHandler(func(r *Record) error { + res[r.Ctx[ctxLen+1].(int)]++ + return nil + }))) + var wg sync.WaitGroup + wg.Add(goroutines) + for i := 0; i < goroutines; i++ { + go func(idx int) { + defer wg.Done() + for j := 0; j < 10000; j++ { + l.Info("test message", "goroutine_idx", idx) + } + }(i) + } + wg.Wait() + for _, val := range res[:] { + if val != 10000 { + t.Fatalf("Wrong number of messages for context: %+v", res) + } + } +} === added file 'src/gopkg.in/inconshreveable/log15.v2/logger.go' --- src/gopkg.in/inconshreveable/log15.v2/logger.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/logger.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,201 @@ +package log15 + +import ( + "fmt" + "runtime" + "time" +) + +const timeKey = "t" +const lvlKey = "lvl" +const msgKey = "msg" +const errorKey = "LOG15_ERROR" + +type Lvl int + +const ( + LvlCrit Lvl = iota + LvlError + LvlWarn + LvlInfo + LvlDebug +) + +// Returns the name of a Lvl +func (l Lvl) String() string { + switch l { + case LvlDebug: + return "dbug" + case LvlInfo: + return "info" + case LvlWarn: + return "warn" + case LvlError: + return "eror" + case LvlCrit: + return "crit" + default: + panic("bad level") + } +} + +// Returns the appropriate Lvl from a string name. +// Useful for parsing command line args and configuration files. +func LvlFromString(lvlString string) (Lvl, error) { + switch lvlString { + case "debug", "dbug": + return LvlDebug, nil + case "info": + return LvlInfo, nil + case "warn": + return LvlWarn, nil + case "error", "eror": + return LvlError, nil + case "crit": + return LvlCrit, nil + default: + return LvlDebug, fmt.Errorf("Unknown level: %v", lvlString) + } +} + +// A Record is what a Logger asks its handler to write +type Record struct { + Time time.Time + Lvl Lvl + Msg string + Ctx []interface{} + CallPC [1]uintptr + KeyNames RecordKeyNames +} + +type RecordKeyNames struct { + Time string + Msg string + Lvl string +} + +// A Logger writes key/value pairs to a Handler +type Logger interface { + // New returns a new Logger that has this logger's context plus the given context + New(ctx ...interface{}) Logger + + // SetHandler updates the logger to write records to the specified handler. + SetHandler(h Handler) + + // Log a message at the given level with context key/value pairs + Debug(msg string, ctx ...interface{}) + Info(msg string, ctx ...interface{}) + Warn(msg string, ctx ...interface{}) + Error(msg string, ctx ...interface{}) + Crit(msg string, ctx ...interface{}) +} + +type logger struct { + ctx []interface{} + h *swapHandler +} + +func (l *logger) write(msg string, lvl Lvl, ctx []interface{}) { + r := Record{ + Time: time.Now(), + Lvl: lvl, + Msg: msg, + Ctx: newContext(l.ctx, ctx), + KeyNames: RecordKeyNames{ + Time: timeKey, + Msg: msgKey, + Lvl: lvlKey, + }, + } + runtime.Callers(3, r.CallPC[:]) + l.h.Log(&r) +} + +func (l *logger) New(ctx ...interface{}) Logger { + child := &logger{newContext(l.ctx, ctx), new(swapHandler)} + child.SetHandler(l.h) + return child +} + +func newContext(prefix []interface{}, suffix []interface{}) []interface{} { + normalizedSuffix := normalize(suffix) + newCtx := make([]interface{}, len(prefix)+len(normalizedSuffix)) + n := copy(newCtx, prefix) + copy(newCtx[n:], normalizedSuffix) + return newCtx +} + +func (l *logger) Debug(msg string, ctx ...interface{}) { + l.write(msg, LvlDebug, ctx) +} + +func (l *logger) Info(msg string, ctx ...interface{}) { + l.write(msg, LvlInfo, ctx) +} + +func (l *logger) Warn(msg string, ctx ...interface{}) { + l.write(msg, LvlWarn, ctx) +} + +func (l *logger) Error(msg string, ctx ...interface{}) { + l.write(msg, LvlError, ctx) +} + +func (l *logger) Crit(msg string, ctx ...interface{}) { + l.write(msg, LvlCrit, ctx) +} + +func (l *logger) SetHandler(h Handler) { + l.h.Swap(h) +} + +func normalize(ctx []interface{}) []interface{} { + // if the caller passed a Ctx object, then expand it + if len(ctx) == 1 { + if ctxMap, ok := ctx[0].(Ctx); ok { + ctx = ctxMap.toArray() + } + } + + // ctx needs to be even because it's a series of key/value pairs + // no one wants to check for errors on logging functions, + // so instead of erroring on bad input, we'll just make sure + // that things are the right length and users can fix bugs + // when they see the output looks wrong + if len(ctx)%2 != 0 { + ctx = append(ctx, nil, errorKey, "Normalized odd number of arguments by adding nil") + } + + return ctx +} + +// Lazy allows you to defer calculation of a logged value that is expensive +// to compute until it is certain that it must be evaluated with the given filters. +// +// Lazy may also be used in conjunction with a Logger's New() function +// to generate a child logger which always reports the current value of changing +// state. +// +// You may wrap any function which takes no arguments to Lazy. It may return any +// number of values of any type. +type Lazy struct { + Fn interface{} +} + +// Ctx is a map of key/value pairs to pass as context to a log function +// Use this only if you really need greater safety around the arguments you pass +// to the logging functions. +type Ctx map[string]interface{} + +func (c Ctx) toArray() []interface{} { + arr := make([]interface{}, len(c)*2) + + i := 0 + for k, v := range c { + arr[i] = k + arr[i+1] = v + i += 2 + } + + return arr +} === added file 'src/gopkg.in/inconshreveable/log15.v2/root.go' --- src/gopkg.in/inconshreveable/log15.v2/root.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/root.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,67 @@ +package log15 + +import ( + "os" + + "github.com/mattn/go-colorable" + "gopkg.in/inconshreveable/log15.v2/term" +) + +var ( + root *logger + StdoutHandler = StreamHandler(os.Stdout, LogfmtFormat()) + StderrHandler = StreamHandler(os.Stderr, LogfmtFormat()) +) + +func init() { + if term.IsTty(os.Stdout.Fd()) { + StdoutHandler = StreamHandler(colorable.NewColorableStdout(), TerminalFormat()) + } + + if term.IsTty(os.Stderr.Fd()) { + StderrHandler = StreamHandler(colorable.NewColorableStderr(), TerminalFormat()) + } + + root = &logger{[]interface{}{}, new(swapHandler)} + root.SetHandler(StdoutHandler) +} + +// New returns a new logger with the given context. +// New is a convenient alias for Root().New +func New(ctx ...interface{}) Logger { + return root.New(ctx...) +} + +// Root returns the root logger +func Root() Logger { + return root +} + +// The following functions bypass the exported logger methods (logger.Debug, +// etc.) to keep the call depth the same for all paths to logger.write so +// runtime.Caller(2) always refers to the call site in client code. + +// Debug is a convenient alias for Root().Debug +func Debug(msg string, ctx ...interface{}) { + root.write(msg, LvlDebug, ctx) +} + +// Info is a convenient alias for Root().Info +func Info(msg string, ctx ...interface{}) { + root.write(msg, LvlInfo, ctx) +} + +// Warn is a convenient alias for Root().Warn +func Warn(msg string, ctx ...interface{}) { + root.write(msg, LvlWarn, ctx) +} + +// Error is a convenient alias for Root().Error +func Error(msg string, ctx ...interface{}) { + root.write(msg, LvlError, ctx) +} + +// Crit is a convenient alias for Root().Crit +func Crit(msg string, ctx ...interface{}) { + root.write(msg, LvlCrit, ctx) +} === added directory 'src/gopkg.in/inconshreveable/log15.v2/stack' === added file 'src/gopkg.in/inconshreveable/log15.v2/stack/stack.go' --- src/gopkg.in/inconshreveable/log15.v2/stack/stack.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/stack/stack.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,225 @@ +// Package stack implements utilities to capture, manipulate, and format call +// stacks. +package stack + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" +) + +// Call records a single function invocation from a goroutine stack. It is a +// wrapper for the program counter values returned by runtime.Caller and +// runtime.Callers and consumed by runtime.FuncForPC. +type Call uintptr + +// Format implements fmt.Formatter with support for the following verbs. +// +// %s source file +// %d line number +// %n function name +// %v equivalent to %s:%d +// +// It accepts the '+' and '#' flags for most of the verbs as follows. +// +// %+s path of source file relative to the compile time GOPATH +// %#s full path of source file +// %+n import path qualified function name +// %+v equivalent to %+s:%d +// %#v equivalent to %#s:%d +func (pc Call) Format(s fmt.State, c rune) { + // BUG(ChrisHines): Subtracting one from pc is a work around for + // https://code.google.com/p/go/issues/detail?id=7690. The idea for this + // work around comes from rsc's initial patch at + // https://codereview.appspot.com/84100043/#ps20001, but as noted in the + // issue discussion, it is not a complete fix since it doesn't handle some + // cases involving signals. Just the same, it handles all of the other + // cases I have tested. + pcFix := uintptr(pc) - 1 + fn := runtime.FuncForPC(pcFix) + if fn == nil { + fmt.Fprintf(s, "%%!%c(NOFUNC)", c) + return + } + + switch c { + case 's', 'v': + file, line := fn.FileLine(pcFix) + switch { + case s.Flag('#'): + // done + case s.Flag('+'): + // Here we want to get the source file path relative to the + // compile time GOPATH. As of Go 1.3.x there is no direct way to + // know the compiled GOPATH at runtime, but we can infer the + // number of path segments in the GOPATH. We note that fn.Name() + // returns the function name qualified by the import path, which + // does not include the GOPATH. Thus we can trim segments from the + // beginning of the file path until the number of path separators + // remaining is one more than the number of path separators in the + // function name. For example, given: + // + // GOPATH /home/user + // file /home/user/src/pkg/sub/file.go + // fn.Name() pkg/sub.Type.Method + // + // We want to produce: + // + // pkg/sub/file.go + // + // From this we can easily see that fn.Name() has one less path + // separator than our desired output. + const sep = "/" + impCnt := strings.Count(fn.Name(), sep) + 1 + pathCnt := strings.Count(file, sep) + for pathCnt > impCnt { + i := strings.Index(file, sep) + if i == -1 { + break + } + file = file[i+len(sep):] + pathCnt-- + } + default: + const sep = "/" + if i := strings.LastIndex(file, sep); i != -1 { + file = file[i+len(sep):] + } + } + fmt.Fprint(s, file) + if c == 'v' { + fmt.Fprint(s, ":", line) + } + + case 'd': + _, line := fn.FileLine(pcFix) + fmt.Fprint(s, line) + + case 'n': + name := fn.Name() + if !s.Flag('+') { + const pathSep = "/" + if i := strings.LastIndex(name, pathSep); i != -1 { + name = name[i+len(pathSep):] + } + const pkgSep = "." + if i := strings.Index(name, pkgSep); i != -1 { + name = name[i+len(pkgSep):] + } + } + fmt.Fprint(s, name) + } +} + +// Callers returns a Trace for the current goroutine with element 0 +// identifying the calling function. +func Callers() Trace { + pcs := poolBuf() + pcs = pcs[:cap(pcs)] + n := runtime.Callers(2, pcs) + cs := make([]Call, n) + for i, pc := range pcs[:n] { + cs[i] = Call(pc) + } + putPoolBuf(pcs) + return cs +} + +// name returns the import path qualified name of the function containing the +// call. +func (pc Call) name() string { + pcFix := uintptr(pc) - 1 // work around for go issue #7690 + fn := runtime.FuncForPC(pcFix) + if fn == nil { + return "???" + } + return fn.Name() +} + +func (pc Call) file() string { + pcFix := uintptr(pc) - 1 // work around for go issue #7690 + fn := runtime.FuncForPC(pcFix) + if fn == nil { + return "???" + } + file, _ := fn.FileLine(pcFix) + return file +} + +// Trace records a sequence of function invocations from a goroutine stack. +type Trace []Call + +// Format implements fmt.Formatter by printing the Trace as square brackes ([, +// ]) surrounding a space separated list of Calls each formatted with the +// supplied verb and options. +func (pcs Trace) Format(s fmt.State, c rune) { + s.Write([]byte("[")) + for i, pc := range pcs { + if i > 0 { + s.Write([]byte(" ")) + } + pc.Format(s, c) + } + s.Write([]byte("]")) +} + +// TrimBelow returns a slice of the Trace with all entries below pc removed. +func (pcs Trace) TrimBelow(pc Call) Trace { + for len(pcs) > 0 && pcs[0] != pc { + pcs = pcs[1:] + } + return pcs +} + +// TrimAbove returns a slice of the Trace with all entries above pc removed. +func (pcs Trace) TrimAbove(pc Call) Trace { + for len(pcs) > 0 && pcs[len(pcs)-1] != pc { + pcs = pcs[:len(pcs)-1] + } + return pcs +} + +// TrimBelowName returns a slice of the Trace with all entries below the +// lowest with function name name removed. +func (pcs Trace) TrimBelowName(name string) Trace { + for len(pcs) > 0 && pcs[0].name() != name { + pcs = pcs[1:] + } + return pcs +} + +// TrimAboveName returns a slice of the Trace with all entries above the +// highest with function name name removed. +func (pcs Trace) TrimAboveName(name string) Trace { + for len(pcs) > 0 && pcs[len(pcs)-1].name() != name { + pcs = pcs[:len(pcs)-1] + } + return pcs +} + +var goroot string + +func init() { + goroot = filepath.ToSlash(runtime.GOROOT()) + if runtime.GOOS == "windows" { + goroot = strings.ToLower(goroot) + } +} + +func inGoroot(path string) bool { + if runtime.GOOS == "windows" { + path = strings.ToLower(path) + } + return strings.HasPrefix(path, goroot) +} + +// TrimRuntime returns a slice of the Trace with the topmost entries from the +// go runtime removed. It considers any calls originating from files under +// GOROOT as part of the runtime. +func (pcs Trace) TrimRuntime() Trace { + for len(pcs) > 0 && inGoroot(pcs[len(pcs)-1].file()) { + pcs = pcs[:len(pcs)-1] + } + return pcs +} === added file 'src/gopkg.in/inconshreveable/log15.v2/stack/stack_pool.go' --- src/gopkg.in/inconshreveable/log15.v2/stack/stack_pool.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/stack/stack_pool.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +// +build go1.3 + +package stack + +import ( + "sync" +) + +var pcStackPool = sync.Pool{ + New: func() interface{} { return make([]uintptr, 1000) }, +} + +func poolBuf() []uintptr { + return pcStackPool.Get().([]uintptr) +} + +func putPoolBuf(p []uintptr) { + pcStackPool.Put(p) +} === added file 'src/gopkg.in/inconshreveable/log15.v2/stack/stack_pool_chan.go' --- src/gopkg.in/inconshreveable/log15.v2/stack/stack_pool_chan.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/stack/stack_pool_chan.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,27 @@ +// +build !go1.3 appengine + +package stack + +const ( + stackPoolSize = 64 +) + +var ( + pcStackPool = make(chan []uintptr, stackPoolSize) +) + +func poolBuf() []uintptr { + select { + case p := <-pcStackPool: + return p + default: + return make([]uintptr, 1000) + } +} + +func putPoolBuf(p []uintptr) { + select { + case pcStackPool <- p: + default: + } +} === added file 'src/gopkg.in/inconshreveable/log15.v2/stack/stack_test.go' --- src/gopkg.in/inconshreveable/log15.v2/stack/stack_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/stack/stack_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,231 @@ +package stack_test + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "runtime" + "testing" + + "gopkg.in/inconshreveable/log15.v2/stack" +) + +type testType struct{} + +func (tt testType) testMethod() (pc uintptr, file string, line int, ok bool) { + return runtime.Caller(0) +} + +func TestCallFormat(t *testing.T) { + t.Parallel() + + pc, file, line, ok := runtime.Caller(0) + if !ok { + t.Fatal("runtime.Caller(0) failed") + } + + gopathSrc := filepath.Join(os.Getenv("GOPATH"), "src") + relFile, err := filepath.Rel(gopathSrc, file) + if err != nil { + t.Fatalf("failed to determine path relative to GOPATH: %v", err) + } + relFile = filepath.ToSlash(relFile) + + pc2, file2, line2, ok2 := testType{}.testMethod() + if !ok2 { + t.Fatal("runtime.Caller(0) failed") + } + relFile2, err := filepath.Rel(gopathSrc, file) + if err != nil { + t.Fatalf("failed to determine path relative to GOPATH: %v", err) + } + relFile2 = filepath.ToSlash(relFile2) + + data := []struct { + pc uintptr + desc string + fmt string + out string + }{ + {0, "error", "%s", "%!s(NOFUNC)"}, + + {pc, "func", "%s", path.Base(file)}, + {pc, "func", "%+s", relFile}, + {pc, "func", "%#s", file}, + {pc, "func", "%d", fmt.Sprint(line)}, + {pc, "func", "%n", "TestCallFormat"}, + {pc, "func", "%+n", runtime.FuncForPC(pc).Name()}, + {pc, "func", "%v", fmt.Sprint(path.Base(file), ":", line)}, + {pc, "func", "%+v", fmt.Sprint(relFile, ":", line)}, + {pc, "func", "%#v", fmt.Sprint(file, ":", line)}, + {pc, "func", "%v|%[1]n()", fmt.Sprint(path.Base(file), ":", line, "|", "TestCallFormat()")}, + + {pc2, "meth", "%s", path.Base(file2)}, + {pc2, "meth", "%+s", relFile2}, + {pc2, "meth", "%#s", file2}, + {pc2, "meth", "%d", fmt.Sprint(line2)}, + {pc2, "meth", "%n", "testType.testMethod"}, + {pc2, "meth", "%+n", runtime.FuncForPC(pc2).Name()}, + {pc2, "meth", "%v", fmt.Sprint(path.Base(file2), ":", line2)}, + {pc2, "meth", "%+v", fmt.Sprint(relFile2, ":", line2)}, + {pc2, "meth", "%#v", fmt.Sprint(file2, ":", line2)}, + {pc2, "meth", "%v|%[1]n()", fmt.Sprint(path.Base(file2), ":", line2, "|", "testType.testMethod()")}, + } + + for _, d := range data { + got := fmt.Sprintf(d.fmt, stack.Call(d.pc)) + if got != d.out { + t.Errorf("fmt.Sprintf(%q, Call(%s)) = %s, want %s", d.fmt, d.desc, got, d.out) + } + } +} + +func BenchmarkCallVFmt(b *testing.B) { + pc, _, _, ok := runtime.Caller(0) + if !ok { + b.Fatal("runtime.Caller(0) failed") + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprint(ioutil.Discard, stack.Call(pc)) + } +} + +func BenchmarkCallPlusVFmt(b *testing.B) { + pc, _, _, ok := runtime.Caller(0) + if !ok { + b.Fatal("runtime.Caller(0) failed") + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprintf(ioutil.Discard, "%+v", stack.Call(pc)) + } +} + +func BenchmarkCallSharpVFmt(b *testing.B) { + pc, _, _, ok := runtime.Caller(0) + if !ok { + b.Fatal("runtime.Caller(0) failed") + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprintf(ioutil.Discard, "%#v", stack.Call(pc)) + } +} + +func BenchmarkCallSFmt(b *testing.B) { + pc, _, _, ok := runtime.Caller(0) + if !ok { + b.Fatal("runtime.Caller(0) failed") + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprintf(ioutil.Discard, "%s", stack.Call(pc)) + } +} + +func BenchmarkCallPlusSFmt(b *testing.B) { + pc, _, _, ok := runtime.Caller(0) + if !ok { + b.Fatal("runtime.Caller(0) failed") + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprintf(ioutil.Discard, "%+s", stack.Call(pc)) + } +} + +func BenchmarkCallSharpSFmt(b *testing.B) { + pc, _, _, ok := runtime.Caller(0) + if !ok { + b.Fatal("runtime.Caller(0) failed") + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprintf(ioutil.Discard, "%#s", stack.Call(pc)) + } +} + +func BenchmarkCallDFmt(b *testing.B) { + pc, _, _, ok := runtime.Caller(0) + if !ok { + b.Fatal("runtime.Caller(0) failed") + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprintf(ioutil.Discard, "%d", stack.Call(pc)) + } +} + +func BenchmarkCallNFmt(b *testing.B) { + pc, _, _, ok := runtime.Caller(0) + if !ok { + b.Fatal("runtime.Caller(0) failed") + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprintf(ioutil.Discard, "%n", stack.Call(pc)) + } +} + +func BenchmarkCallPlusNFmt(b *testing.B) { + pc, _, _, ok := runtime.Caller(0) + if !ok { + b.Fatal("runtime.Caller(0) failed") + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + fmt.Fprintf(ioutil.Discard, "%+n", stack.Call(pc)) + } +} + +func BenchmarkCallers(b *testing.B) { + for i := 0; i < b.N; i++ { + stack.Callers() + } +} + +func deepStack(depth int, b *testing.B) stack.Trace { + if depth > 0 { + return deepStack(depth-1, b) + } + b.StartTimer() + s := stack.Callers() + b.StopTimer() + return s +} + +func BenchmarkCallers10(b *testing.B) { + b.StopTimer() + + for i := 0; i < b.N; i++ { + deepStack(10, b) + } +} + +func BenchmarkCallers50(b *testing.B) { + b.StopTimer() + + for i := 0; i < b.N; i++ { + deepStack(50, b) + } +} + +func BenchmarkCallers100(b *testing.B) { + b.StopTimer() + + for i := 0; i < b.N; i++ { + deepStack(100, b) + } +} === added file 'src/gopkg.in/inconshreveable/log15.v2/syslog.go' --- src/gopkg.in/inconshreveable/log15.v2/syslog.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/syslog.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,55 @@ +// +build !windows,!plan9 + +package log15 + +import ( + "log/syslog" + "strings" +) + +// SyslogHandler opens a connection to the system syslog daemon by calling +// syslog.New and writes all records to it. +func SyslogHandler(tag string, fmtr Format) (Handler, error) { + wr, err := syslog.New(syslog.LOG_INFO, tag) + return sharedSyslog(fmtr, wr, err) +} + +// SyslogHandler opens a connection to a log daemon over the network and writes +// all log records to it. +func SyslogNetHandler(net, addr string, tag string, fmtr Format) (Handler, error) { + wr, err := syslog.Dial(net, addr, syslog.LOG_INFO, tag) + return sharedSyslog(fmtr, wr, err) +} + +func sharedSyslog(fmtr Format, sysWr *syslog.Writer, err error) (Handler, error) { + if err != nil { + return nil, err + } + h := FuncHandler(func(r *Record) error { + var syslogFn = sysWr.Info + switch r.Lvl { + case LvlCrit: + syslogFn = sysWr.Crit + case LvlError: + syslogFn = sysWr.Err + case LvlWarn: + syslogFn = sysWr.Warning + case LvlInfo: + syslogFn = sysWr.Info + case LvlDebug: + syslogFn = sysWr.Debug + } + + s := strings.TrimSpace(string(fmtr.Format(r))) + return syslogFn(s) + }) + return LazyHandler(&closingHandler{sysWr, h}), nil +} + +func (m muster) SyslogHandler(tag string, fmtr Format) Handler { + return must(SyslogHandler(tag, fmtr)) +} + +func (m muster) SyslogNetHandler(net, addr string, tag string, fmtr Format) Handler { + return must(SyslogNetHandler(net, addr, tag, fmtr)) +} === added directory 'src/gopkg.in/inconshreveable/log15.v2/term' === added file 'src/gopkg.in/inconshreveable/log15.v2/term/LICENSE' --- src/gopkg.in/inconshreveable/log15.v2/term/LICENSE 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/term/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. === added file 'src/gopkg.in/inconshreveable/log15.v2/term/terminal_appengine.go' --- src/gopkg.in/inconshreveable/log15.v2/term/terminal_appengine.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/term/terminal_appengine.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,13 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +package term + +// IsTty always returns false on AppEngine. +func IsTty(fd uintptr) bool { + return false +} === added file 'src/gopkg.in/inconshreveable/log15.v2/term/terminal_darwin.go' --- src/gopkg.in/inconshreveable/log15.v2/term/terminal_darwin.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/term/terminal_darwin.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,12 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package term + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios === added file 'src/gopkg.in/inconshreveable/log15.v2/term/terminal_freebsd.go' --- src/gopkg.in/inconshreveable/log15.v2/term/terminal_freebsd.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/term/terminal_freebsd.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,18 @@ +package term + +import ( + "syscall" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. +type Termios struct { + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [20]uint8 + Ispeed uint32 + Ospeed uint32 +} === added file 'src/gopkg.in/inconshreveable/log15.v2/term/terminal_linux.go' --- src/gopkg.in/inconshreveable/log15.v2/term/terminal_linux.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/term/terminal_linux.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +package term + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios === added file 'src/gopkg.in/inconshreveable/log15.v2/term/terminal_notwindows.go' --- src/gopkg.in/inconshreveable/log15.v2/term/terminal_notwindows.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/term/terminal_notwindows.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,20 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!appengine darwin freebsd openbsd + +package term + +import ( + "syscall" + "unsafe" +) + +// IsTty returns true if the given file descriptor is a terminal. +func IsTty(fd uintptr) bool { + var termios Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} === added file 'src/gopkg.in/inconshreveable/log15.v2/term/terminal_openbsd.go' --- src/gopkg.in/inconshreveable/log15.v2/term/terminal_openbsd.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/term/terminal_openbsd.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,7 @@ +package term + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios === added file 'src/gopkg.in/inconshreveable/log15.v2/term/terminal_windows.go' --- src/gopkg.in/inconshreveable/log15.v2/term/terminal_windows.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/inconshreveable/log15.v2/term/terminal_windows.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,26 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package term + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +// IsTty returns true if the given file descriptor is a terminal. +func IsTty(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} === added directory 'src/gopkg.in/ini.v1' === added file 'src/gopkg.in/ini.v1/.gitignore' --- src/gopkg.in/ini.v1/.gitignore 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/ini.v1/.gitignore 2016-03-22 15:18:22 +0000 @@ -0,0 +1,4 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini === added file 'src/gopkg.in/ini.v1/LICENSE' --- src/gopkg.in/ini.v1/LICENSE 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/ini.v1/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. === added file 'src/gopkg.in/ini.v1/Makefile' --- src/gopkg.in/ini.v1/Makefile 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/ini.v1/Makefile 2016-03-22 15:18:22 +0000 @@ -0,0 +1,12 @@ +.PHONY: build test bench vet + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -race -test.bench=. -test.benchmem + +vet: + go vet === added file 'src/gopkg.in/ini.v1/README.md' --- src/gopkg.in/ini.v1/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/ini.v1/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,632 @@ +ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte` or file) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +To use a tagged revision: + + go get gopkg.in/ini.v1 + +To use with latest changes: + + go get github.com/go-ini/ini + +Please add `-u` flag to update in the future. + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +Please add `-u` flag to update in the future. + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To check if a key exists: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +To check if raw value exists: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### Auto-split values into a slice + +To use zero value of type for invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +To exclude invalid values out of result slice: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +Or to return nothing but error when have invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. === added file 'src/gopkg.in/ini.v1/README_ZH.md' --- src/gopkg.in/ini.v1/README_ZH.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/ini.v1/README_ZH.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,619 @@ +本包æ供了 Go 语言中读写 INI 文件的功能。 + +## 功能特性 + +- 支æŒè¦†ç›–加载多个数æ®æºï¼ˆ`[]byte` 或文件) +- 支æŒé€’归读å–键值 +- 支æŒè¯»å–父å­åˆ†åŒº +- 支æŒè¯»å–自增键å +- 支æŒè¯»å–多行的键值 +- 支æŒå¤§é‡è¾…助方法 +- 支æŒåœ¨è¯»å–时直接转æ¢ä¸º Go 语言类型 +- 支æŒè¯»å–å’Œ **写入** 分区和键的注释 +- è½»æ¾æ“作分区ã€é”®å€¼å’Œæ³¨é‡Š +- 在ä¿å­˜æ–‡ä»¶æ—¶åˆ†åŒºå’Œé”®å€¼ä¼šä¿æŒåŽŸæœ‰çš„é¡ºåº + +## 下载安装 + +使用一个特定版本: + + go get gopkg.in/ini.v1 + +使用最新版: + + go get github.com/go-ini/ini + +如需更新请添加 `-u` 选项。 + +### 测试安装 + +如果您想è¦åœ¨è‡ªå·±çš„机器上è¿è¡Œæµ‹è¯•ï¼Œè¯·ä½¿ç”¨ `-t` 标记: + + go get -t gopkg.in/ini.v1 + +如需更新请添加 `-u` 选项。 + +## 开始使用 + +### 从数æ®æºåŠ è½½ + +一个 **æ•°æ®æº** å¯ä»¥æ˜¯ `[]byte` 类型的原始数æ®ï¼Œæˆ– `string` 类型的文件路径。您å¯ä»¥åŠ è½½ **ä»»æ„多个** æ•°æ®æºã€‚如果您传递其它类型的数æ®æºï¼Œåˆ™ä¼šç›´æŽ¥è¿”回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename") +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需è¦åŠ è½½å“ªäº›æ•°æ®æºæ—¶ï¼Œä»å¯ä»¥ä½¿ç”¨ **Append()** 在需è¦çš„时候加载它们。 + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +当您想è¦åŠ è½½ä¸€ç³»åˆ—文件,但是ä¸èƒ½å¤Ÿç¡®å®šå…¶ä¸­å“ªäº›æ–‡ä»¶æ˜¯ä¸å­˜åœ¨çš„,å¯ä»¥é€šè¿‡è°ƒç”¨å‡½æ•° `LooseLoad` æ¥å¿½ç•¥å®ƒä»¬ï¼ˆ`Load` 会因为文件ä¸å­˜åœ¨è€Œè¿”回错误): + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +更牛逼的是,当那些之å‰ä¸å­˜åœ¨çš„文件在é‡æ–°è°ƒç”¨ `Reload` 方法的时候çªç„¶å‡ºçŽ°äº†ï¼Œé‚£ä¹ˆå®ƒä»¬ä¼šè¢«æ­£å¸¸åŠ è½½ã€‚ + +### æ“作分区(Section) + +获å–指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想è¦èŽ·å–默认分区,则å¯ä»¥ç”¨ç©ºå­—符串代替分区å: + +```go +section, err := cfg.GetSection("") +``` + +当您éžå¸¸ç¡®å®šæŸä¸ªåˆ†åŒºæ˜¯å­˜åœ¨çš„,å¯ä»¥ä½¿ç”¨ä»¥ä¸‹ç®€ä¾¿æ–¹æ³•ï¼š + +```go +section := cfg.Section("") +``` + +如果ä¸å°å¿ƒåˆ¤æ–­é”™äº†ï¼Œè¦èŽ·å–的分区其实是ä¸å­˜åœ¨çš„,那会å‘生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获å–所有分区对象或å称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### æ“作键(Key) + +获å–æŸä¸ªåˆ†åŒºä¸‹çš„键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +和分区一样,您也å¯ä»¥ç›´æŽ¥èŽ·å–键而忽略错误处ç†ï¼š + +```go +key := cfg.Section("").Key("key name") +``` + +判断æŸä¸ªé”®æ˜¯å¦å­˜åœ¨ï¼š + +```go +yes := cfg.Section("").HasKey("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获å–分区下的所有键或键å: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获å–分区下的所有键值对的克隆: + +```go +hash := cfg.GetSection("").KeysHash() +``` + +### æ“作键值(Value) + +获å–一个类型为字符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获å–值的åŒæ—¶é€šè¿‡è‡ªå®šä¹‰å‡½æ•°è¿›è¡Œå¤„ç†éªŒè¯ï¼š + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +如果您ä¸éœ€è¦ä»»ä½•å¯¹å€¼çš„自动转å˜åŠŸèƒ½ï¼ˆä¾‹å¦‚递归读å–),å¯ä»¥ç›´æŽ¥èŽ·å–原值(这ç§æ–¹å¼æ€§èƒ½æœ€ä½³ï¼‰ï¼š + +```go +val := cfg.Section("").Key("key name").Value() +``` + +判断æŸä¸ªåŽŸå€¼æ˜¯å¦å­˜åœ¨ï¼š + +```go +yes := cfg.Section("").HasValue("test value") +``` + +获å–其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// ç”± Must 开头的方法åå…许接收一个相åŒç±»åž‹çš„å‚æ•°æ¥ä½œä¸ºé»˜è®¤å€¼ï¼Œ +// 当键ä¸å­˜åœ¨æˆ–者转æ¢å¤±è´¥æ—¶ï¼Œåˆ™ä¼šç›´æŽ¥è¿”回该默认值。 +// 但是,MustString 方法必须传递一个默认值。 + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +å—¯å“¼ï¼Ÿå° caseï¼ + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了ï¼é‚£è¦æ˜¯æˆ‘属于一行的内容写ä¸ä¸‹æƒ³è¦å†™åˆ°ç¬¬äºŒè¡Œæ€Žä¹ˆåŠžï¼Ÿ + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是å°èœä¸€ç¢Ÿï¼ + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +需è¦æ³¨æ„的是,值两侧的å•å¼•å·ä¼šè¢«è‡ªåŠ¨å‰”除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然ä¸æ˜¯ã€‚ + +#### æ“作键值的辅助方法 + +获å–键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获å–到的值ä¸æ˜¯å€™é€‰å€¼çš„ä»»æ„一个,则会返回默认值,而默认值ä¸éœ€è¦æ˜¯å€™é€‰å€¼ä¸­çš„一员。 + +验è¯èŽ·å–的值是å¦åœ¨æŒ‡å®šèŒƒå›´å†…: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### 自动分割键值到切片(slice) + +当存在无效输入时,使用零值代替: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +从结果切片中剔除无效输入: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +当存在无效输入时,直接返回错误: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### ä¿å­˜é…ç½® + +终于到了这个时刻,是时候ä¿å­˜ä¸€ä¸‹é…置了。 + +比较原始的åšæ³•æ˜¯è¾“出é…置到æŸä¸ªæ–‡ä»¶ï¼š + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +å¦ä¸€ä¸ªæ¯”较高级的åšæ³•æ˜¯å†™å…¥åˆ°ä»»ä½•å®žçŽ° `io.Writer` 接å£çš„对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +### 高级用法 + +#### 递归读å–键值 + +在获å–所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` å¯ä»¥æ˜¯ç›¸åŒåˆ†åŒºæˆ–者默认分区下的键å。字符串 `%()s` 会被相应的键值所替代,如果指定的键ä¸å­˜åœ¨ï¼Œåˆ™ä¼šç”¨ç©ºå­—符串替代。您å¯ä»¥æœ€å¤šä½¿ç”¨ 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +#### 读å–父å­åˆ†åŒº + +您å¯ä»¥åœ¨åˆ†åŒºå称中使用 `.` æ¥è¡¨ç¤ºä¸¤ä¸ªæˆ–多个分区之间的父å­å…³ç³»ã€‚如果æŸä¸ªé”®åœ¨å­åˆ†åŒºä¸­ä¸å­˜åœ¨ï¼Œåˆ™ä¼šåŽ»å®ƒçš„父分区中å†æ¬¡å¯»æ‰¾ï¼Œç›´åˆ°æ²¡æœ‰çˆ¶åˆ†åŒºä¸ºæ­¢ã€‚ + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 读å–自增键å + +如果数æ®æºä¸­çš„é”®å为 `-`,则认为该键使用了自增键å的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### 映射到结构 + +想è¦ä½¿ç”¨æ›´åŠ é¢å‘对象的方å¼çŽ©è½¬ INI å—?好主æ„。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟å¯ä»¥å¦‚此的简å•ã€‚ + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?åªéœ€è¦æ˜ å°„一个分区å—? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎么设置默认值呢?很简å•ï¼Œåªè¦åœ¨æ˜ å°„之å‰å¯¹æŒ‡å®šå­—段进行赋值就å¯ä»¥äº†ã€‚如果键未找到或者类型错误,该值ä¸ä¼šå‘生改å˜ã€‚ + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊ï¼ç„¶è€Œï¼Œå¦‚æžœä¸èƒ½è¿˜ç»™æˆ‘原æ¥çš„é…置文件,有什么åµç”¨ï¼Ÿ + +### 从结构åå°„ + +å¯æ˜¯ï¼Œæˆ‘有说ä¸èƒ½å—? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹å‘生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +Places = HangZhou,Boston +None = +``` + +#### å称映射器(Name Mapper) + +为了节çœæ‚¨çš„时间并简化代ç ï¼Œæœ¬åº“支æŒç±»åž‹ä¸º [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) çš„å称映射器,该映射器负责结构字段å与分区å和键å之间的映射。 + +ç›®å‰æœ‰ 2 款内置的映射器: + +- `AllCapsUnderscore`:该映射器将字段å转æ¢è‡³æ ¼å¼ `ALL_CAPS_UNDERSCORE` åŽå†åŽ»åŒ¹é…分区å和键å。 +- `TitleUnderscore`:该映射器将字段å转æ¢è‡³æ ¼å¼ `title_underscore` åŽå†åŽ»åŒ¹é…分区å和键å。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也å¯åº”用相åŒçš„规则。 + +#### 映射/å射的其它说明 + +任何嵌入的结构都会被默认认作一个ä¸åŒçš„分区,并且ä¸ä¼šè‡ªåŠ¨äº§ç”Ÿæ‰€è°“的父å­åˆ†åŒºå…³è”: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例é…置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是è¦åµŒå…¥ç»“构也在åŒä¸€ä¸ªåˆ†åŒºã€‚好å§ï¼Œä½ çˆ¹æ˜¯æŽåˆšï¼ + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例é…置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获å–帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工å•](https://github.com/go-ini/ini/issues/new) + +## 常è§é—®é¢˜ + +### 字段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写æ“作时采用é”机制æ¥ç¡®ä¿æ•°æ®æ—¶é—´ã€‚但在æŸäº›æƒ…况下,您éžå¸¸ç¡®å®šåªè¿›è¡Œè¯»æ“作。此时,您å¯ä»¥é€šè¿‡è®¾ç½® `cfg.BlockMode = false` æ¥å°†è¯»æ“作æå‡å¤§çº¦ **50-70%** 的性能。 + +### 为什么è¦å†™å¦ä¸€ä¸ª INI 解æžåº“? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) æ¥å®Œæˆå¯¹ INI 文件的æ“作,但我希望使用更加 Go 风格的代ç ã€‚并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能æå‡ã€‚ + +为了åšå‡ºè¿™äº›æ”¹å˜ï¼Œæˆ‘必须对 API 进行破å,所以新开一个仓库是最安全的åšæ³•ã€‚除此之外,本库直接使用 `gopkg.in` æ¥è¿›è¡Œç‰ˆæœ¬åŒ–å‘布。(其实真相是导入路径更短了) === added file 'src/gopkg.in/ini.v1/ini.go' --- src/gopkg.in/ini.v1/ini.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/ini.v1/ini.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,462 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. + DEFAULT_SECTION = "DEFAULT" + + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + _VERSION = "1.10.1" +) + +// Version returns current package version literal. +func Version() string { + return _VERSION +} + +var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return &bytesReadCloser{bytes.NewReader(s.data)}, nil +} + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + // Whether the parser should ignore nonexistent files or return error. + looseMode bool + + NameMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, looseMode bool) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + looseMode: looseMode, + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +func loadSources(looseMode bool, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, looseMode) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return loadSources(false, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return loadSources(true, source, others...) +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("section '%s' does not exist", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.looseMode { + f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + // Count and generate alignment length and buffer spaces + alignLength := 0 + if PrettyFormat { + for i := 0; i < len(sec.keyList); i++ { + if len(sec.keyList[i]) > alignLength { + alignLength = len(sec.keyList[i]) + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncr: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + if _, err = buf.WriteString(kname); err != nil { + return 0, err + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + val := key.value + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { + return 0, err + } + } + + // Put a line between sections + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} === added file 'src/gopkg.in/ini.v1/ini_test.go' --- src/gopkg.in/ini.v1/ini_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/ini.v1/ini_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,277 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Version(t *testing.T) { + Convey("Get version", t, func() { + So(Version(), ShouldEqual, _VERSION) + }) +} + +const _CONF_DATA = ` +; Package name +NAME = ini +; Package version +VERSION = v1 +; Package import path +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +# Information about package author +# Bio can be written in multiple lines. +[author] +NAME = Unknwon ; Succeeding comment +E-MAIL = fake@localhost +GITHUB = https://github.com/%(NAME)s +BIO = """Gopher. +Coding addict. +Good man. +""" # Succeeding comment + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +UNUSED_KEY = should be deleted + +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values + +[types] +STRING = str +BOOL = true +BOOL_FALSE = false +FLOAT64 = 1.25 +INT = 10 +TIME = 2015-01-01T20:17:05Z +DURATION = 2h45m +UINT = 3 + +[array] +STRINGS = en, zh, de +FLOAT64S = 1.1, 2.2, 3.3 +INTS = 1, 2, 3 +UINTS = 1, 2, 3 +TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z + +[note] +empty_lines = next line is empty\ + +; Comment before the section +[comments] ; This is a comment for the section too +; Comment before key +key = "value" +key2 = "value2" ; This is a comment for key2 +key3 = "one", "two", "three" + +[advance] +value with quotes = "some value" +value quote2 again = 'some value' +true = 2+3=5 +"1+1=2" = true +"""6+1=7""" = true +"""` + "`" + `5+5` + "`" + `""" = 10 +` + "`" + `"6+6"` + "`" + ` = 12 +` + "`" + `7-2=4` + "`" + ` = false +ADDRESS = ` + "`" + `404 road, +NotFound, State, 50000` + "`" + ` + +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 \ +` + +func Test_Load(t *testing.T) { + Convey("Load from data sources", t, func() { + + Convey("Load with empty data", func() { + So(Empty(), ShouldNotBeNil) + }) + + Convey("Load with multiple data sources", func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + f, err := Load([]byte(_CONF_DATA), "testdata/404.ini") + So(err, ShouldNotBeNil) + So(f, ShouldBeNil) + }) + }) + + Convey("Bad load process", t, func() { + + Convey("Load from invalid data sources", func() { + _, err := Load(_CONF_DATA) + So(err, ShouldNotBeNil) + + f, err := Load("testdata/404.ini") + So(err, ShouldNotBeNil) + So(f, ShouldBeNil) + + _, err = Load(1) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(""), 1) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad section name", func() { + _, err := Load([]byte("[]")) + So(err, ShouldNotBeNil) + + _, err = Load([]byte("[")) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad keys", func() { + _, err := Load([]byte(`"""name`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`"""name"""`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`""=1`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`=`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`name`)) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad values", func() { + _, err := Load([]byte(`name="""Unknwon`)) + So(err, ShouldNotBeNil) + }) + }) +} + +func Test_LooseLoad(t *testing.T) { + Convey("Loose load from data sources", t, func() { + Convey("Loose load mixed with nonexistent file", func() { + cfg, err := LooseLoad("testdata/404.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + var fake struct { + Name string `ini:"name"` + } + So(cfg.MapTo(&fake), ShouldBeNil) + + cfg, err = LooseLoad([]byte("name=Unknwon"), "testdata/404.ini") + So(err, ShouldBeNil) + So(cfg.Section("").Key("name").String(), ShouldEqual, "Unknwon") + So(cfg.MapTo(&fake), ShouldBeNil) + So(fake.Name, ShouldEqual, "Unknwon") + }) + }) + +} + +func Test_File_Append(t *testing.T) { + Convey("Append data sources", t, func() { + cfg, err := Load([]byte("")) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.Append([]byte(""), []byte("")), ShouldBeNil) + + Convey("Append bad data sources", func() { + So(cfg.Append(1), ShouldNotBeNil) + So(cfg.Append([]byte(""), 1), ShouldNotBeNil) + }) + }) +} + +func Test_File_WriteTo(t *testing.T) { + Convey("Write to somewhere", t, func() { + var buf bytes.Buffer + cfg := Empty() + cfg.WriteTo(&buf) + }) +} + +func Test_File_SaveTo(t *testing.T) { + Convey("Save file", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.Section("").Key("NAME").Comment = "Package name" + cfg.Section("author").Comment = `Information about package author +# Bio can be written in multiple lines.` + cfg.Section("advanced").Key("val w/ pound").SetValue("my#password") + So(cfg.SaveTo("testdata/conf_out.ini"), ShouldBeNil) + + cfg.Section("author").Key("NAME").Comment = "This is author name" + So(cfg.SaveToIndent("testdata/conf_out.ini", "\t"), ShouldBeNil) + }) +} + +// Helpers for slice tests. +func float64sEqual(values []float64, expected ...float64) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) + } +} + +func intsEqual(values []int, expected ...int) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) + } +} + +func int64sEqual(values []int64, expected ...int64) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) + } +} + +func uintsEqual(values []uint, expected ...uint) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) + } +} + +func uint64sEqual(values []uint64, expected ...uint64) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) + } +} + +func timesEqual(values []time.Time, expected ...time.Time) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i].String(), ShouldEqual, v.String()) + } +} === added file 'src/gopkg.in/ini.v1/key.go' --- src/gopkg.in/ini.v1/key.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/ini.v1/key.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,616 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncr bool +} + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// String returns string representation of value. +func (k *Key) String() string { + val := k.value + if strings.Index(val, "%") == -1 { + return val + } + + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.getInts(delim, true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.getInt64s(delim, true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.getUints(delim, true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.getInts(delim, false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.getInt64s(delim, false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.getUints(delim, false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.getFloat64s(delim, false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.getInts(delim, false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.getInt64s(delim, false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.getUints(delim, false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.getUint64s(delim, false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.getTimesFormat(format, delim, false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// getFloat64s returns list of float64 divided by given delimiter. +func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) { + strs := k.Strings(delim) + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getInts returns list of int divided by given delimiter. +func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) { + strs := k.Strings(delim) + vals := make([]int, 0, len(strs)) + for _, str := range strs { + val, err := strconv.Atoi(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getInt64s returns list of int64 divided by given delimiter. +func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) { + strs := k.Strings(delim) + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getUints returns list of uint divided by given delimiter. +func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) { + strs := k.Strings(delim) + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// getUint64s returns list of uint64 divided by given delimiter. +func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + strs := k.Strings(delim) + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + strs := k.Strings(delim) + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} === added file 'src/gopkg.in/ini.v1/key_test.go' --- src/gopkg.in/ini.v1/key_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/ini.v1/key_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,518 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Key(t *testing.T) { + Convey("Test getting and setting values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Get values in default section", func() { + sec := cfg.Section("") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").Value(), ShouldEqual, "ini") + So(sec.Key("NAME").String(), ShouldEqual, "ini") + So(sec.Key("NAME").Validate(func(in string) string { + return in + }), ShouldEqual, "ini") + So(sec.Key("NAME").Comment, ShouldEqual, "; Package name") + So(sec.Key("IMPORT_PATH").String(), ShouldEqual, "gopkg.in/ini.v1") + }) + + Convey("Get values in non-default section", func() { + sec := cfg.Section("author") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").String(), ShouldEqual, "Unknwon") + So(sec.Key("GITHUB").String(), ShouldEqual, "https://github.com/Unknwon") + + sec = cfg.Section("package") + So(sec, ShouldNotBeNil) + So(sec.Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) + + Convey("Get auto-increment key names", func() { + keys := cfg.Section("features").Keys() + for i, k := range keys { + So(k.Name(), ShouldEqual, fmt.Sprintf("#%d", i+1)) + } + }) + + Convey("Get overwrite value", func() { + So(cfg.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io") + }) + + Convey("Get sections", func() { + sections := cfg.Sections() + for i, name := range []string{DEFAULT_SECTION, "author", "package", "package.sub", "features", "types", "array", "note", "comments", "advance"} { + So(sections[i].Name(), ShouldEqual, name) + } + }) + + Convey("Get parent section value", func() { + So(cfg.Section("package.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + So(cfg.Section("package.fake.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) + + Convey("Get multiple line value", func() { + So(cfg.Section("author").Key("BIO").String(), ShouldEqual, "Gopher.\nCoding addict.\nGood man.\n") + }) + + Convey("Get values with type", func() { + sec := cfg.Section("types") + v1, err := sec.Key("BOOL").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeTrue) + + v1, err = sec.Key("BOOL_FALSE").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeFalse) + + v2, err := sec.Key("FLOAT64").Float64() + So(err, ShouldBeNil) + So(v2, ShouldEqual, 1.25) + + v3, err := sec.Key("INT").Int() + So(err, ShouldBeNil) + So(v3, ShouldEqual, 10) + + v4, err := sec.Key("INT").Int64() + So(err, ShouldBeNil) + So(v4, ShouldEqual, 10) + + v5, err := sec.Key("UINT").Uint() + So(err, ShouldBeNil) + So(v5, ShouldEqual, 3) + + v6, err := sec.Key("UINT").Uint64() + So(err, ShouldBeNil) + So(v6, ShouldEqual, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + v7, err := sec.Key("TIME").Time() + So(err, ShouldBeNil) + So(v7.String(), ShouldEqual, t.String()) + + Convey("Must get values with type", func() { + So(sec.Key("STRING").MustString("404"), ShouldEqual, "str") + So(sec.Key("BOOL").MustBool(), ShouldBeTrue) + So(sec.Key("FLOAT64").MustFloat64(), ShouldEqual, 1.25) + So(sec.Key("INT").MustInt(), ShouldEqual, 10) + So(sec.Key("INT").MustInt64(), ShouldEqual, 10) + So(sec.Key("UINT").MustUint(), ShouldEqual, 3) + So(sec.Key("UINT").MustUint64(), ShouldEqual, 3) + So(sec.Key("TIME").MustTime().String(), ShouldEqual, t.String()) + + dur, err := time.ParseDuration("2h45m") + So(err, ShouldBeNil) + So(sec.Key("DURATION").MustDuration().Seconds(), ShouldEqual, dur.Seconds()) + + Convey("Must get values with default value", func() { + So(sec.Key("STRING_404").MustString("404"), ShouldEqual, "404") + So(sec.Key("BOOL_404").MustBool(true), ShouldBeTrue) + So(sec.Key("FLOAT64_404").MustFloat64(2.5), ShouldEqual, 2.5) + So(sec.Key("INT_404").MustInt(15), ShouldEqual, 15) + So(sec.Key("INT_404").MustInt64(15), ShouldEqual, 15) + So(sec.Key("UINT_404").MustUint(6), ShouldEqual, 6) + So(sec.Key("UINT_404").MustUint64(6), ShouldEqual, 6) + + t, err := time.Parse(time.RFC3339, "2014-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME_404").MustTime(t).String(), ShouldEqual, t.String()) + + So(sec.Key("DURATION_404").MustDuration(dur).Seconds(), ShouldEqual, dur.Seconds()) + }) + }) + }) + + Convey("Get value with candidates", func() { + sec := cfg.Section("types") + So(sec.Key("STRING").In("", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64").InFloat64(0, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT").InInt(0, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT").InInt64(0, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT").InUint(0, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT").InUint64(0, []uint64{3, 6, 9}), ShouldEqual, 3) + + zt, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").InTime(zt, []time.Time{t, time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + + Convey("Get value with candidates and default value", func() { + So(sec.Key("STRING_404").In("str", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64_404").InFloat64(1.25, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT_404").InInt(10, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT64_404").InInt64(10, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT_404").InUint(3, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT_404").InUint64(3, []uint64{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("TIME_404").InTime(t, []time.Time{time.Now(), time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values in range", func() { + sec := cfg.Section("types") + So(sec.Key("FLOAT64").RangeFloat64(0, 1, 2), ShouldEqual, 1.25) + So(sec.Key("INT").RangeInt(0, 10, 20), ShouldEqual, 10) + So(sec.Key("INT").RangeInt64(0, 10, 20), ShouldEqual, 10) + + minT, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + midT, err := time.Parse(time.RFC3339, "2013-01-01T01:00:00Z") + So(err, ShouldBeNil) + maxT, err := time.Parse(time.RFC3339, "9999-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").RangeTime(t, minT, maxT).String(), ShouldEqual, t.String()) + + Convey("Get value in range with default value", func() { + So(sec.Key("FLOAT64").RangeFloat64(5, 0, 1), ShouldEqual, 5) + So(sec.Key("INT").RangeInt(7, 0, 5), ShouldEqual, 7) + So(sec.Key("INT").RangeInt64(7, 0, 5), ShouldEqual, 7) + So(sec.Key("TIME").RangeTime(t, minT, midT).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values into slice", func() { + sec := cfg.Section("array") + So(strings.Join(sec.Key("STRINGS").Strings(","), ","), ShouldEqual, "en,zh,de") + So(len(sec.Key("STRINGS_404").Strings(",")), ShouldEqual, 0) + + vals1 := sec.Key("FLOAT64S").Float64s(",") + float64sEqual(vals1, 1.1, 2.2, 3.3) + + vals2 := sec.Key("INTS").Ints(",") + intsEqual(vals2, 1, 2, 3) + + vals3 := sec.Key("INTS").Int64s(",") + int64sEqual(vals3, 1, 2, 3) + + vals4 := sec.Key("UINTS").Uints(",") + uintsEqual(vals4, 1, 2, 3) + + vals5 := sec.Key("UINTS").Uint64s(",") + uint64sEqual(vals5, 1, 2, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6 := sec.Key("TIMES").Times(",") + timesEqual(vals6, t, t, t) + }) + + Convey("Get valid values into slice", func() { + sec := cfg.Section("array") + vals1 := sec.Key("FLOAT64S").ValidFloat64s(",") + float64sEqual(vals1, 1.1, 2.2, 3.3) + + vals2 := sec.Key("INTS").ValidInts(",") + intsEqual(vals2, 1, 2, 3) + + vals3 := sec.Key("INTS").ValidInt64s(",") + int64sEqual(vals3, 1, 2, 3) + + vals4 := sec.Key("UINTS").ValidUints(",") + uintsEqual(vals4, 1, 2, 3) + + vals5 := sec.Key("UINTS").ValidUint64s(",") + uint64sEqual(vals5, 1, 2, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6 := sec.Key("TIMES").ValidTimes(",") + timesEqual(vals6, t, t, t) + }) + + Convey("Get values one type into slice of another type", func() { + sec := cfg.Section("array") + vals1 := sec.Key("STRINGS").ValidFloat64s(",") + So(vals1, ShouldBeEmpty) + + vals2 := sec.Key("STRINGS").ValidInts(",") + So(vals2, ShouldBeEmpty) + + vals3 := sec.Key("STRINGS").ValidInt64s(",") + So(vals3, ShouldBeEmpty) + + vals4 := sec.Key("STRINGS").ValidUints(",") + So(vals4, ShouldBeEmpty) + + vals5 := sec.Key("STRINGS").ValidUint64s(",") + So(vals5, ShouldBeEmpty) + + vals6 := sec.Key("STRINGS").ValidTimes(",") + So(vals6, ShouldBeEmpty) + }) + + Convey("Get valid values into slice without errors", func() { + sec := cfg.Section("array") + vals1, err := sec.Key("FLOAT64S").StrictFloat64s(",") + So(err, ShouldBeNil) + float64sEqual(vals1, 1.1, 2.2, 3.3) + + vals2, err := sec.Key("INTS").StrictInts(",") + So(err, ShouldBeNil) + intsEqual(vals2, 1, 2, 3) + + vals3, err := sec.Key("INTS").StrictInt64s(",") + So(err, ShouldBeNil) + int64sEqual(vals3, 1, 2, 3) + + vals4, err := sec.Key("UINTS").StrictUints(",") + So(err, ShouldBeNil) + uintsEqual(vals4, 1, 2, 3) + + vals5, err := sec.Key("UINTS").StrictUint64s(",") + So(err, ShouldBeNil) + uint64sEqual(vals5, 1, 2, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6, err := sec.Key("TIMES").StrictTimes(",") + So(err, ShouldBeNil) + timesEqual(vals6, t, t, t) + }) + + Convey("Get invalid values into slice", func() { + sec := cfg.Section("array") + vals1, err := sec.Key("STRINGS").StrictFloat64s(",") + So(vals1, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals2, err := sec.Key("STRINGS").StrictInts(",") + So(vals2, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals3, err := sec.Key("STRINGS").StrictInt64s(",") + So(vals3, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals4, err := sec.Key("STRINGS").StrictUints(",") + So(vals4, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals5, err := sec.Key("STRINGS").StrictUint64s(",") + So(vals5, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals6, err := sec.Key("STRINGS").StrictTimes(",") + So(vals6, ShouldBeEmpty) + So(err, ShouldNotBeNil) + }) + + Convey("Get key hash", func() { + cfg.Section("").KeysHash() + }) + + Convey("Set key value", func() { + k := cfg.Section("author").Key("NAME") + k.SetValue("æ— é—»") + So(k.String(), ShouldEqual, "æ— é—»") + }) + + Convey("Get key strings", func() { + So(strings.Join(cfg.Section("types").KeyStrings(), ","), ShouldEqual, "STRING,BOOL,BOOL_FALSE,FLOAT64,INT,TIME,DURATION,UINT") + }) + + Convey("Delete a key", func() { + cfg.Section("package.sub").DeleteKey("UNUSED_KEY") + _, err := cfg.Section("package.sub").GetKey("UNUSED_KEY") + So(err, ShouldNotBeNil) + }) + + Convey("Has Key (backwards compatible)", func() { + sec := cfg.Section("package.sub") + haskey1 := sec.Haskey("UNUSED_KEY") + haskey2 := sec.Haskey("CLONE_URL") + haskey3 := sec.Haskey("CLONE_URL_NO") + So(haskey1, ShouldBeTrue) + So(haskey2, ShouldBeTrue) + So(haskey3, ShouldBeFalse) + }) + + Convey("Has Key", func() { + sec := cfg.Section("package.sub") + haskey1 := sec.HasKey("UNUSED_KEY") + haskey2 := sec.HasKey("CLONE_URL") + haskey3 := sec.HasKey("CLONE_URL_NO") + So(haskey1, ShouldBeTrue) + So(haskey2, ShouldBeTrue) + So(haskey3, ShouldBeFalse) + }) + + Convey("Has Value", func() { + sec := cfg.Section("author") + hasvalue1 := sec.HasValue("Unknwon") + hasvalue2 := sec.HasValue("doc") + So(hasvalue1, ShouldBeTrue) + So(hasvalue2, ShouldBeFalse) + }) + }) + + Convey("Test getting and setting bad values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Create new key with empty name", func() { + k, err := cfg.Section("").NewKey("", "") + So(err, ShouldNotBeNil) + So(k, ShouldBeNil) + }) + + Convey("Create new section with empty name", func() { + s, err := cfg.NewSection("") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + }) + + Convey("Create new sections with empty name", func() { + So(cfg.NewSections(""), ShouldNotBeNil) + }) + + Convey("Get section that not exists", func() { + s, err := cfg.GetSection("404") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + + s = cfg.Section("404") + So(s, ShouldNotBeNil) + }) + }) + + Convey("Test key hash clone", t, func() { + cfg, err := Load([]byte(strings.Replace("network=tcp,addr=127.0.0.1:6379,db=4,pool_size=100,idle_timeout=180", ",", "\n", -1))) + So(err, ShouldBeNil) + for _, v := range cfg.Section("").KeysHash() { + So(len(v), ShouldBeGreaterThan, 0) + } + }) + + Convey("Key has empty value", t, func() { + _conf := `key1= +key2= ; comment` + cfg, err := Load([]byte(_conf)) + So(err, ShouldBeNil) + So(cfg.Section("").Key("key1").Value(), ShouldBeEmpty) + }) +} + +func newTestFile(block bool) *File { + c, _ := Load([]byte(_CONF_DATA)) + c.BlockMode = block + return c +} + +func Benchmark_Key_Value(b *testing.B) { + c := newTestFile(true) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() + } +} + +func Benchmark_Key_Value_NonBlock(b *testing.B) { + c := newTestFile(false) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() + } +} + +func Benchmark_Key_Value_ViaSection(b *testing.B) { + c := newTestFile(true) + sec := c.Section("") + for i := 0; i < b.N; i++ { + sec.Key("NAME").Value() + } +} + +func Benchmark_Key_Value_ViaSection_NonBlock(b *testing.B) { + c := newTestFile(false) + sec := c.Section("") + for i := 0; i < b.N; i++ { + sec.Key("NAME").Value() + } +} + +func Benchmark_Key_Value_Direct(b *testing.B) { + c := newTestFile(true) + key := c.Section("").Key("NAME") + for i := 0; i < b.N; i++ { + key.Value() + } +} + +func Benchmark_Key_Value_Direct_NonBlock(b *testing.B) { + c := newTestFile(false) + key := c.Section("").Key("NAME") + for i := 0; i < b.N; i++ { + key.Value() + } +} + +func Benchmark_Key_String(b *testing.B) { + c := newTestFile(true) + for i := 0; i < b.N; i++ { + _ = c.Section("").Key("NAME").String() + } +} + +func Benchmark_Key_String_NonBlock(b *testing.B) { + c := newTestFile(false) + for i := 0; i < b.N; i++ { + _ = c.Section("").Key("NAME").String() + } +} + +func Benchmark_Key_String_ViaSection(b *testing.B) { + c := newTestFile(true) + sec := c.Section("") + for i := 0; i < b.N; i++ { + _ = sec.Key("NAME").String() + } +} + +func Benchmark_Key_String_ViaSection_NonBlock(b *testing.B) { + c := newTestFile(false) + sec := c.Section("") + for i := 0; i < b.N; i++ { + _ = sec.Key("NAME").String() + } +} + +func Benchmark_Key_SetValue(b *testing.B) { + c := newTestFile(true) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").SetValue("10") + } +} + +func Benchmark_Key_SetValue_VisSection(b *testing.B) { + c := newTestFile(true) + sec := c.Section("") + for i := 0; i < b.N; i++ { + sec.Key("NAME").SetValue("10") + } +} === added file 'src/gopkg.in/ini.v1/parser.go' --- src/gopkg.in/ini.v1/parser.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/ini.v1/parser.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,312 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of BOM-UTF8 format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + p.buf.Read(mask) + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, fmt.Errorf("key-value delimiter not found: %s", line) + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, fmt.Errorf("key-value delimiter not found: %s", line) + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace. + line = strings.TrimSpace(line) + + // Check continuation lines + if line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + closeIdx := bytes.IndexByte(line, ']') + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + section, err = f.NewSection(string(line[1:closeIdx])) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + key, err := section.NewKey(kname, "") + if err != nil { + return err + } + key.isAutoIncr = isAutoIncr + + value, err := p.readValue(line[offset:]) + if err != nil { + return err + } + key.SetValue(value) + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} === added file 'src/gopkg.in/ini.v1/section.go' --- src/gopkg.in/ini.v1/section.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/ini.v1/section.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,177 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string +} + +func newSection(f *File, name string) *Section { + return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + s.keys[name].value = val + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = &Key{s, "", name, val, false} + s.keysHash[name] = val + return s.keys[name], nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} === added file 'src/gopkg.in/ini.v1/section_test.go' --- src/gopkg.in/ini.v1/section_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/ini.v1/section_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,47 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "strings" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Section(t *testing.T) { + Convey("Test CRD sections", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Get section strings", func() { + So(strings.Join(cfg.SectionStrings(), ","), ShouldEqual, "DEFAULT,author,package,package.sub,features,types,array,note,comments,advance") + }) + + Convey("Delete a section", func() { + cfg.DeleteSection("") + So(cfg.SectionStrings()[0], ShouldNotEqual, DEFAULT_SECTION) + }) + + Convey("Create new sections", func() { + cfg.NewSections("test", "test2") + _, err := cfg.GetSection("test") + So(err, ShouldBeNil) + _, err = cfg.GetSection("test2") + So(err, ShouldBeNil) + }) + }) +} === added file 'src/gopkg.in/ini.v1/struct.go' --- src/gopkg.in/ini.v1/struct.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/ini.v1/struct.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,351 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return nil + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil || intVal == 0 { + return nil + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + if err == nil { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return nil + } + field.SetUint(uintVal) + + case reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return nil + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return nil + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + vals := key.Strings(delim) + numVals := len(vals) + if numVals == 0 { + return nil + } + + sliceOf := field.Type().Elem().Kind() + + var times []time.Time + if sliceOf == reflectTime { + times = key.Times(delim) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(times[i])) + default: + slice.Index(i).Set(reflect.ValueOf(vals[i])) + } + } + field.Set(slice) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) mapTo(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// reflectWithProperType does the opposite thing with setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float64, + reflectTime: + key.SetValue(fmt.Sprint(field)) + case reflect.Slice: + vals := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + isTime := fmt.Sprint(field.Type()) == "[]time.Time" + for i := 0; i < field.Len(); i++ { + if isTime { + buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339)) + } else { + buf.WriteString(fmt.Sprint(vals.Index(i))) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + fieldName := s.parseFieldName(tpField.Name, tag) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct) { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field(%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} === added file 'src/gopkg.in/ini.v1/struct_test.go' --- src/gopkg.in/ini.v1/struct_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/ini.v1/struct_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,239 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +type testNested struct { + Cities []string `delim:"|"` + Visits []time.Time + Note string + Unused int `ini:"-"` +} + +type testEmbeded struct { + GPA float64 +} + +type testStruct struct { + Name string `ini:"NAME"` + Age int + Male bool + Money float64 + Born time.Time + Time time.Duration `ini:"Duration"` + Others testNested + *testEmbeded `ini:"grade"` + Unused int `ini:"-"` + Unsigned uint +} + +const _CONF_DATA_STRUCT = ` +NAME = Unknwon +Age = 21 +Male = true +Money = 1.25 +Born = 1993-10-07T20:17:05Z +Duration = 2h45m +Unsigned = 3 + +[Others] +Cities = HangZhou|Boston +Visits = 1993-10-07T20:17:05Z, 1993-10-07T20:17:05Z +Note = Hello world! + +[grade] +GPA = 2.8 + +[foo.bar] +Here = there +When = then +` + +type unsupport struct { + Byte byte +} + +type unsupport2 struct { + Others struct { + Cities byte + } +} + +type unsupport3 struct { + Cities byte +} + +type unsupport4 struct { + *unsupport3 `ini:"Others"` +} + +type defaultValue struct { + Name string + Age int + Male bool + Money float64 + Born time.Time + Cities []string +} + +type fooBar struct { + Here, When string +} + +const _INVALID_DATA_CONF_STRUCT = ` +Name = +Age = age +Male = 123 +Money = money +Born = nil +Cities = +` + +func Test_Struct(t *testing.T) { + Convey("Map to struct", t, func() { + Convey("Map file to struct", func() { + ts := new(testStruct) + So(MapTo(ts, []byte(_CONF_DATA_STRUCT)), ShouldBeNil) + + So(ts.Name, ShouldEqual, "Unknwon") + So(ts.Age, ShouldEqual, 21) + So(ts.Male, ShouldBeTrue) + So(ts.Money, ShouldEqual, 1.25) + So(ts.Unsigned, ShouldEqual, 3) + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + So(ts.Born.String(), ShouldEqual, t.String()) + + dur, err := time.ParseDuration("2h45m") + So(err, ShouldBeNil) + So(ts.Time.Seconds(), ShouldEqual, dur.Seconds()) + + So(strings.Join(ts.Others.Cities, ","), ShouldEqual, "HangZhou,Boston") + So(ts.Others.Visits[0].String(), ShouldEqual, t.String()) + So(ts.Others.Note, ShouldEqual, "Hello world!") + So(ts.testEmbeded.GPA, ShouldEqual, 2.8) + }) + + Convey("Map section to struct", func() { + foobar := new(fooBar) + f, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + + So(f.Section("foo.bar").MapTo(foobar), ShouldBeNil) + So(foobar.Here, ShouldEqual, "there") + So(foobar.When, ShouldEqual, "then") + }) + + Convey("Map to non-pointer struct", func() { + cfg, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.MapTo(testStruct{}), ShouldNotBeNil) + }) + + Convey("Map to unsupported type", func() { + cfg, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.NameMapper = func(raw string) string { + if raw == "Byte" { + return "NAME" + } + return raw + } + So(cfg.MapTo(&unsupport{}), ShouldNotBeNil) + So(cfg.MapTo(&unsupport2{}), ShouldNotBeNil) + So(cfg.MapTo(&unsupport4{}), ShouldNotBeNil) + }) + + Convey("Map from invalid data source", func() { + So(MapTo(&testStruct{}, "hi"), ShouldNotBeNil) + }) + + Convey("Map to wrong types and gain default values", func() { + cfg, err := Load([]byte(_INVALID_DATA_CONF_STRUCT)) + So(err, ShouldBeNil) + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + dv := &defaultValue{"Joe", 10, true, 1.25, t, []string{"HangZhou", "Boston"}} + So(cfg.MapTo(dv), ShouldBeNil) + So(dv.Name, ShouldEqual, "Joe") + So(dv.Age, ShouldEqual, 10) + So(dv.Male, ShouldBeTrue) + So(dv.Money, ShouldEqual, 1.25) + So(dv.Born.String(), ShouldEqual, t.String()) + So(strings.Join(dv.Cities, ","), ShouldEqual, "HangZhou,Boston") + }) + }) + + Convey("Reflect from struct", t, func() { + type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + None []int + } + type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded `ini:"infos"` + } + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := Empty() + So(ReflectFrom(cfg, a), ShouldBeNil) + cfg.SaveTo("testdata/conf_reflect.ini") + + Convey("Reflect from non-point struct", func() { + So(ReflectFrom(cfg, Author{}), ShouldNotBeNil) + }) + }) +} + +type testMapper struct { + PackageName string +} + +func Test_NameGetter(t *testing.T) { + Convey("Test name mappers", t, func() { + So(MapToWithMapper(&testMapper{}, TitleUnderscore, []byte("packag_name=ini")), ShouldBeNil) + + cfg, err := Load([]byte("PACKAGE_NAME=ini")) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.NameMapper = AllCapsUnderscore + tg := new(testMapper) + So(cfg.MapTo(tg), ShouldBeNil) + So(tg.PackageName, ShouldEqual, "ini") + }) +} === added directory 'src/gopkg.in/ini.v1/testdata' === added file 'src/gopkg.in/ini.v1/testdata/conf.ini' --- src/gopkg.in/ini.v1/testdata/conf.ini 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/ini.v1/testdata/conf.ini 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +[author] +E-MAIL = u@gogs.io \ No newline at end of file === added directory 'src/gopkg.in/juju/blobstore.v2' === added file 'src/gopkg.in/juju/blobstore.v2/LICENSE' --- src/gopkg.in/juju/blobstore.v2/LICENSE 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/blobstore.v2/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,191 @@ +All files in this repository are licensed as follows. If you contribute +to this repository, it is assumed that you license your contribution +under the same license unless you state otherwise. + +All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. === added file 'src/gopkg.in/juju/blobstore.v2/README.md' --- src/gopkg.in/juju/blobstore.v2/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/blobstore.v2/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,4 @@ +juju/blobstore +============== + +This package provides a Mongo GridFS-backed blob storage engine. === added file 'src/gopkg.in/juju/blobstore.v2/export_test.go' --- src/gopkg.in/juju/blobstore.v2/export_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/blobstore.v2/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,29 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package blobstore + +var ( + NewResourceCatalog = newResourceCatalog + NewResource = newResource + TxnRunner = &txnRunner + PutResourceTxn = &putResourceTxn + RequestExpiry = &requestExpiry + AfterFunc = &afterFunc +) + +func GetResourceCatalog(ms ManagedStorage) ResourceCatalog { + return ms.(*managedStorage).resourceCatalog +} + +func PutManagedResource(ms ManagedStorage, managedResource ManagedResource, id string) (string, error) { + return ms.(*managedStorage).putManagedResource(managedResource, id) +} + +func ResourceStoragePath(ms ManagedStorage, bucketUUID, user, resourcePath string) (string, error) { + return ms.(*managedStorage).resourceStoragePath(bucketUUID, user, resourcePath) +} + +func RequestQueueLength(ms ManagedStorage) int { + return len(ms.(*managedStorage).queuedRequests) +} === added file 'src/gopkg.in/juju/blobstore.v2/gridfs.go' --- src/gopkg.in/juju/blobstore.v2/gridfs.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/blobstore.v2/gridfs.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,77 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package blobstore + +import ( + "io" + + "github.com/juju/errors" + "github.com/juju/loggo" + "gopkg.in/mgo.v2" +) + +var logger = loggo.GetLogger("juju.storage") + +type gridFSStorage struct { + dbName string + namespace string + session *mgo.Session +} + +var _ ResourceStorage = (*gridFSStorage)(nil) + +// NewGridFS returns a ResourceStorage instance backed by a mongo GridFS. +// namespace is used to segregate different sets of data. +func NewGridFS(dbName, namespace string, session *mgo.Session) ResourceStorage { + return &gridFSStorage{ + dbName: dbName, + namespace: namespace, + session: session, + } +} + +func (g *gridFSStorage) db() *mgo.Database { + return g.session.DB(g.dbName) +} + +func (g *gridFSStorage) gridFS() *mgo.GridFS { + return g.db().GridFS(g.namespace) +} + +// Get is defined on ResourceStorage. +func (g *gridFSStorage) Get(path string) (io.ReadCloser, error) { + file, err := g.gridFS().Open(path) + if err != nil { + return nil, errors.Annotatef(err, "failed to open GridFS file %q", path) + } + return file, nil +} + +// Put is defined on ResourceStorage. +func (g *gridFSStorage) Put(path string, r io.Reader, length int64) (checksum string, err error) { + file, err := g.gridFS().Create(path) + if err != nil { + return "", errors.Annotatef(err, "failed to create GridFS file %q", path) + } + defer func() { + if err != nil { + file.Close() + if removeErr := g.Remove(path); removeErr != nil { + logger.Warningf("error cleaning up after failed write: %v", removeErr) + } + } + }() + if _, err = io.CopyN(file, r, length); err != nil { + return "", errors.Annotatef(err, "failed to write data") + } + if err = file.Close(); err != nil { + return "", errors.Annotatef(err, "failed to flush data") + } + return file.MD5(), nil +} + +// Remove is defined on ResourceStorage. +func (g *gridFSStorage) Remove(path string) error { + return g.gridFS().Remove(path) +} === added file 'src/gopkg.in/juju/blobstore.v2/gridfs_test.go' --- src/gopkg.in/juju/blobstore.v2/gridfs_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/blobstore.v2/gridfs_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,118 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package blobstore_test + +import ( + "crypto/md5" + "encoding/hex" + "io/ioutil" + "strings" + + "github.com/juju/testing" + gc "gopkg.in/check.v1" + "gopkg.in/juju/blobstore.v2" +) + +var _ = gc.Suite(&gridfsSuite{}) + +type gridfsSuite struct { + testing.IsolationSuite + testing.MgoSuite + stor blobstore.ResourceStorage +} + +func (s *gridfsSuite) SetUpSuite(c *gc.C) { + s.IsolationSuite.SetUpSuite(c) + s.MgoSuite.SetUpSuite(c) +} + +func (s *gridfsSuite) TearDownSuite(c *gc.C) { + s.MgoSuite.TearDownSuite(c) + s.IsolationSuite.TearDownSuite(c) +} + +func (s *gridfsSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.MgoSuite.SetUpTest(c) + s.stor = blobstore.NewGridFS("juju", "test", s.Session) +} + +func (s *gridfsSuite) TearDownTest(c *gc.C) { + s.MgoSuite.TearDownTest(c) + s.IsolationSuite.TearDownTest(c) +} + +func assertPut(c *gc.C, stor blobstore.ResourceStorage, path, data string) { + r := strings.NewReader(data) + checksum, err := stor.Put(path, r, int64(len(data))) + c.Assert(err, gc.IsNil) + md5Hash := md5.New() + _, err = md5Hash.Write([]byte(data)) + c.Assert(err, gc.IsNil) + c.Assert(checksum, gc.Equals, hex.EncodeToString(md5Hash.Sum(nil))) + assertGet(c, stor, path, data) +} + +func (s *gridfsSuite) TestPut(c *gc.C) { + assertPut(c, s.stor, "/path/to/file", "hello world") +} + +func (s *gridfsSuite) TestPutSameFileOverwrites(c *gc.C) { + assertPut(c, s.stor, "/path/to/file", "hello world") + assertPut(c, s.stor, "/path/to/file", "hello again") +} + +func assertGet(c *gc.C, stor blobstore.ResourceStorage, path, expected string) { + r, err := stor.Get(path) + c.Assert(err, gc.IsNil) + defer r.Close() + data, err := ioutil.ReadAll(r) + c.Assert(err, gc.IsNil) + c.Assert(data, gc.DeepEquals, []byte(expected)) +} + +func (s *gridfsSuite) TestGetNonExistent(c *gc.C) { + _, err := s.stor.Get("missing") + c.Assert(err, gc.ErrorMatches, `failed to open GridFS file "missing": not found`) +} + +func (s *gridfsSuite) TestGet(c *gc.C) { + data := "hello world" + r := strings.NewReader(data) + _, err := s.stor.Put("/path/to/file", r, int64(len(data))) + c.Assert(err, gc.IsNil) + assertGet(c, s.stor, "/path/to/file", data) +} + +func (s *gridfsSuite) TestRemove(c *gc.C) { + path := "/path/to/file" + assertPut(c, s.stor, path, "hello world") + err := s.stor.Remove(path) + c.Assert(err, gc.IsNil) + _, err = s.stor.Get(path) + c.Assert(err, gc.ErrorMatches, `failed to open GridFS file "/path/to/file": not found`) +} + +func (s *gridfsSuite) TestRemoveNonExistent(c *gc.C) { + err := s.stor.Remove("/path/to/file") + c.Assert(err, gc.IsNil) +} + +func (s *gridfsSuite) TestNamespaceSeparation(c *gc.C) { + anotherStor := blobstore.NewGridFS("juju", "another", s.Session) + path := "/path/to/file" + assertPut(c, anotherStor, path, "hello world") + _, err := s.stor.Get(path) + c.Assert(err, gc.ErrorMatches, `failed to open GridFS file "/path/to/file": not found`) +} + +func (s *gridfsSuite) TestNamespaceSeparationRemove(c *gc.C) { + anotherStor := blobstore.NewGridFS("juju", "another", s.Session) + path := "/path/to/file" + assertPut(c, s.stor, path, "hello world") + assertPut(c, anotherStor, path, "hello again") + err := s.stor.Remove(path) + c.Assert(err, gc.IsNil) + assertGet(c, anotherStor, "/path/to/file", "hello again") +} === added file 'src/gopkg.in/juju/blobstore.v2/interface.go' --- src/gopkg.in/juju/blobstore.v2/interface.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/blobstore.v2/interface.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,93 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package blobstore + +import ( + "io" +) + +// ResourceStorage instances save and retrieve data from an underlying storage implementation. +type ResourceStorage interface { + // Get returns a reader for the resource located at path. + Get(path string) (io.ReadCloser, error) + + // Put writes data from the specified reader to path and returns a checksum of the data written. + Put(path string, r io.Reader, length int64) (checksum string, err error) + + // Remove deletes the data at the specified path. + Remove(path string) error +} + +// ResourceCatalog instances persist Resources. +// Resources with the same hash values are not duplicated; instead a reference count is incremented. +// Similarly, when a Resource is removed, the reference count is decremented. When the reference +// count reaches zero, the Resource is deleted. +type ResourceCatalog interface { + // Get fetches a Resource with the given id. + Get(id string) (*Resource, error) + + // Find returns the resource id for the Resource with the given hash. + Find(hash string) (id string, err error) + + // Put ensures a Resource entry exists for the given hash, + // returning the id and path recorded by UploadComplete. + // If UploadComplete has not been called, path will be empty. + // + // If the Resource entry exists, its reference count is incremented, + // otherwise a new entry is created with a reference count of 1. + Put(hash string, length int64) (id, path string, err error) + + // UploadComplete records that the underlying resource described by + // the Resource entry with id is now fully uploaded to the specified + // storage path, and the resource is available for use. If another + // uploader already recorded a path, then UploadComplete will return + // an error satisfiying juju/errors.IsAlreadyExists. + UploadComplete(id, path string) error + + // Remove decrements the reference count for a Resource with the given id, deleting it + // if the reference count reaches zero. The path of the Resource is returned. + // If the Resource is deleted, wasDeleted is returned as true. + Remove(id string) (wasDeleted bool, path string, err error) +} + +// ManagedStorage instances persist data for a bucket, for a user, or globally. +// (Only bucket storage is currently implemented). +type ManagedStorage interface { + // GetForBucket returns a reader for data at path, namespaced to the bucket. + // If the data is still being uploaded and is not fully written yet, + // an ErrUploadPending error is returned. This means the path is valid but the caller + // should try again to retrieve the data. + GetForBucket(bucketUUID, path string) (r io.ReadCloser, length int64, err error) + + // PutForBucket stores data from reader at path, namespaced to the bucket. + // + // PutForBucket is equivalent to PutForBucketAndCheckHash with an empty + // hash string. + PutForBucket(bucketUUID, path string, r io.Reader, length int64) error + + // PutForBucketAndCheckHash is the same as PutForBucket + // except that it also checks that the content matches the provided + // hash. The hash must be hex-encoded SHA-384. + // + // If checkHash is empty, then the hash check is elided. + // + // If length is < 0, then the reader will be consumed until EOF. + PutForBucketAndCheckHash(bucketUUID, path string, r io.Reader, length int64, checkHash string) error + + // RemoveForBucket deletes data at path, namespaced to the bucket. + RemoveForBucket(bucketUUID, path string) error + + // PutForBucketRequest requests that data, which may already exist in storage, + // be saved at path, namespaced to the bucket. It allows callers who can + // demonstrate proof of ownership of the data to store a reference to it without + // having to upload it all. If no such data exists, a NotFound error is returned + // and a call to bucket is required. If matching data is found, the caller + // is returned a response indicating the random byte range to for which they must + // provide a checksum to complete the process. + PutForBucketRequest(bucketUUID, path string, hash string) (*RequestResponse, error) + + // ProofOfAccessResponse is called to respond to a Put..Request call in order to + // prove ownership of data for which a storage reference is created. + ProofOfAccessResponse(putResponse) error +} === added file 'src/gopkg.in/juju/blobstore.v2/managedstorage.go' --- src/gopkg.in/juju/blobstore.v2/managedstorage.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/blobstore.v2/managedstorage.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,614 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package blobstore + +import ( + "crypto/sha512" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "path" + "strings" + "sync" + "time" + + "github.com/juju/errors" + "github.com/juju/utils" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/txn" + + jujutxn "github.com/juju/txn" +) + +// ManagedResource is a catalog entry for stored data. +// The data may be associated with a specified bucket and/or user. +// The data is logically considered to be stored at the specified path. +type ManagedResource struct { + BucketUUID string + User string + Path string +} + +// managedResourceDoc is the persistent representation of a ManagedResource. +type managedResourceDoc struct { + Id string `bson:"_id"` + BucketUUID string `bson:"bucketuuid"` + User string `bson:"user"` + Path string `bson:"path"` + ResourceId string `bson:"resourceid"` +} + +// managedStorage is a mongo backed ManagedResource instance. +type managedStorage struct { + resourceStore ResourceStorage + resourceCatalog ResourceCatalog + managedResourceCollection *mgo.Collection + db *mgo.Database + + // The following attributes are used to manage the processing + // of put requests based on proof of access. + requestMutex sync.Mutex + nextRequestId int64 + queuedRequests map[int64]PutRequest +} + +var _ ManagedStorage = (*managedStorage)(nil) + +// newManagedResourceDoc constructs a managedResourceDoc from a ManagedResource and resource id. +// This is used when writing new data to the managed storage catalog. +func newManagedResourceDoc(r ManagedResource, resourceId string) managedResourceDoc { + return managedResourceDoc{ + Id: r.Path, + ResourceId: resourceId, + Path: r.Path, + BucketUUID: r.BucketUUID, + User: r.User, + } +} + +const ( + // managedResourceCollection is the name of the collection + // which stores the managedResourceDoc records. + managedResourceCollection = "managedStoredResources" +) + +// NewManagedStorage creates a new ManagedStorage using the transaction runner, +// storing resource entries in the specified database, and resource data in the +// specified resource storage. +func NewManagedStorage(db *mgo.Database, rs ResourceStorage) ManagedStorage { + // Ensure random number generator used to calculate checksum byte range is seeded. + rand.Seed(int64(time.Now().Nanosecond())) + ms := &managedStorage{ + resourceStore: rs, + resourceCatalog: newResourceCatalog(db), + db: db, + queuedRequests: make(map[int64]PutRequest), + } + ms.managedResourceCollection = db.C(managedResourceCollection) + ms.managedResourceCollection.EnsureIndex(mgo.Index{Key: []string{"path"}, Unique: true}) + return ms +} + +// resourceStoragePath returns the full path used to store a resource with resourcePath +// in the specified bucket for the specified user. +func (ms *managedStorage) resourceStoragePath(bucketUUID, user, resourcePath string) (string, error) { + // No bucketUUID or user should contain "/" but we perform a sanity check just in case. + if strings.Index(bucketUUID, "/") >= 0 { + return "", errors.Errorf("bucket UUID %q cannot contain %q", bucketUUID, "/") + } + if strings.Index(user, "/") >= 0 { + return "", errors.Errorf("user %q cannot contain %q", user, "/") + } + storagePath := resourcePath + if user != "" { + storagePath = path.Join("users", user, storagePath) + } + if bucketUUID != "" { + storagePath = path.Join("buckets", bucketUUID, storagePath) + } + if user == "" && bucketUUID == "" { + storagePath = path.Join("global", storagePath) + } + return storagePath, nil +} + +// preprocessUpload pulls in data from the reader, storing it in a temp file and +// calculating the sha384 checksum. +// The caller is expected to remove the temporary file if and only if we return a nil error. +func (ms *managedStorage) preprocessUpload(r io.Reader, length int64) ( + f *os.File, n int64, hash string, err error, +) { + sha384hash := sha512.New384() + // Set up a chain of readers to pull in the data and calculate the checksum. + rdr := io.TeeReader(r, sha384hash) + f, err = ioutil.TempFile(os.TempDir(), "juju-resource") + if err != nil { + return nil, -1, "", err + } + tempFilename := f.Name() + // Add a cleanup function to remove the data file if we exit with an error. + defer func() { + if err != nil { + f.Close() + os.Remove(tempFilename) + } + }() + if length >= 0 { + rdr = &io.LimitedReader{rdr, length} + } + // Write the data to a temp file. + length, err = io.Copy(f, rdr) + if err != nil { + return nil, -1, "", err + } + // Reset the file so when we return it, it can be read from to get the data. + _, err = f.Seek(0, 0) + if err != nil { + return nil, -1, "", err + } + return f, length, fmt.Sprintf("%x", sha384hash.Sum(nil)), nil +} + +// GetForBucket is defined on the ManagedStorage interface. +func (ms *managedStorage) GetForBucket(bucketUUID, path string) (io.ReadCloser, int64, error) { + managedPath, err := ms.resourceStoragePath(bucketUUID, "", path) + if err != nil { + return nil, 0, err + } + var doc managedResourceDoc + if err := ms.managedResourceCollection.Find(bson.D{{"path", managedPath}}).One(&doc); err != nil { + if err == mgo.ErrNotFound { + return nil, 0, errors.NotFoundf("resource at path %q", managedPath) + } + return nil, 0, errors.Annotatef(err, "cannot load record for resource with path %q", managedPath) + } + return ms.getResource(doc.ResourceId, managedPath) +} + +// getResource returns a reader for the resource with the given resource id. +func (ms *managedStorage) getResource(resourceId string, path string) (io.ReadCloser, int64, error) { + r, err := ms.resourceCatalog.Get(resourceId) + if err == ErrUploadPending { + return nil, 0, err + } else if err != nil { + return nil, 0, errors.Annotatef(err, "cannot load catalog entry for resource with path %q", path) + } + rdr, err := ms.resourceStore.Get(r.Path) + return rdr, r.Length, err +} + +// cleanupResourceCatalog is used to delete a resource catalog record if a put operation fails. +func cleanupResourceCatalog(rc ResourceCatalog, id string, err *error) { + if *err == nil || errors.Cause(*err) == ErrUploadPending { + return + } + logger.Warningf("cleaning up resource catalog after failed put") + _, _, removeErr := rc.Remove(id) + if removeErr != nil && !errors.IsNotFound(removeErr) { + finalErr := errors.Annotatef(*err, "cannot clean up after failed storage operation because: %v", removeErr) + *err = finalErr + } +} + +// cleanupResource is usd to delete a resource blob if a put operation fails. +func cleanupResource(rs ResourceStorage, resourcePath string, err *error) { + if *err == nil { + return + } + logger.Warningf("cleaning up resource storage after failed put") + removeErr := rs.Remove(resourcePath) + if removeErr != nil && !errors.IsNotFound(removeErr) { + finalErr := errors.Annotatef(*err, "cannot clean up after failed storage operation because: %v", removeErr) + *err = finalErr + } +} + +// PutForBucketAndCheckHash is defined on the ManagedStorage interface. +func (ms *managedStorage) PutForBucketAndCheckHash(bucketUUID, path string, r io.Reader, length int64, checkHash string) error { + return ms.putForEnvironment(bucketUUID, path, r, length, checkHash) +} + +// PutForBucket is defined on the ManagedStorage interface. +func (ms *managedStorage) PutForBucket(bucketUUID, path string, r io.Reader, length int64) error { + return ms.putForEnvironment(bucketUUID, path, r, length, "") +} + +// putForEnvironment is the internal implementation for both the above +// methods. It checks the hash if checkHash is non-nil. +func (ms *managedStorage) putForEnvironment(bucketUUID, path string, r io.Reader, length int64, checkHash string) (putError error) { + dataFile, length, hash, err := ms.preprocessUpload(r, length) + if err != nil { + return errors.Annotate(err, "cannot calculate data checksums") + } + // Remove the data file when we're done. + defer func() { + dataFile.Close() + os.Remove(dataFile.Name()) + }() + if checkHash != "" && checkHash != hash { + return errors.New("hash mismatch") + } + resourceId, resourcePath, err := ms.resourceCatalog.Put(hash, length) + if err != nil { + return errors.Annotate(err, "cannot update resource catalog") + } + + logger.Debugf("resource catalog entry created with id %q", resourceId) + // If there's an error saving the resource data, ensure the resource catalog is cleaned up. + defer cleanupResourceCatalog(ms.resourceCatalog, resourceId, &putError) + + managedPath, err := ms.resourceStoragePath(bucketUUID, "", path) + if err != nil { + return err + } + + // Newly added resource data needs to be saved to the storage. + if resourcePath == "" { + uuid, err := utils.NewUUID() + if err != nil { + return errors.Annotate(err, "cannot generate UUID to store resource") + } + resourcePath = uuid.String() + + _, err = ms.resourceStore.Put(resourcePath, dataFile, length) + if err != nil { + return errors.Annotatef(err, "cannot add resource %q to store at storage path %q", managedPath, resourcePath) + } + + // If there's an error from here on, we need to ensure the saved resource data is cleaned up. + defer cleanupResource(ms.resourceStore, resourcePath, &putError) + err = ms.resourceCatalog.UploadComplete(resourceId, resourcePath) + if errors.IsAlreadyExists(err) { + // Another client uploaded the resource and recorded it in the + // catalog before us, so remove the resource we just stored. + if err := ms.resourceStore.Remove(resourcePath); err != nil { + // This is not fatal, there's nothing we can do about it. + logger.Errorf( + "cannot remove already-uploaded duplicate resource from storage at %q", + resourcePath, + ) + } + } else if err != nil { + return errors.Annotatef(err, "cannot mark resource %q as upload complete", managedPath) + } + } + // Resource data is saved, resource catalog entry is created/updated, now write the + // managed storage entry. + return ms.putResourceReference(bucketUUID, managedPath, resourceId) +} + +// putResourceReference saves a managed resource record for the given path and resource id. +func (ms *managedStorage) putResourceReference(bucketUUID, managedPath, resourceId string) error { + managedResource := ManagedResource{ + BucketUUID: bucketUUID, + Path: managedPath, + } + existingResourceId, err := ms.putManagedResource(managedResource, resourceId) + if err != nil { + return err + } + logger.Debugf("managed resource entry created with path %q -> %q", managedPath, resourceId) + // If we are overwriting an existing resource with the same path, the managed resource + // entry will no longer reference the same resource catalog entry, so we need to remove + // the reference. + if existingResourceId != "" { + if _, _, err = ms.resourceCatalog.Remove(existingResourceId); err != nil { + return errors.Annotatef(err, "cannot remove old resource catalog entry with id %q", existingResourceId) + } + } + // Sanity check - ensure resource catalog entry for resourceId still exists. + _, err = ms.resourceCatalog.Get(resourceId) + if err != nil { + return errors.Annotatef(err, "unexpected deletion of resource catalog entry with id %q", resourceId) + } + return nil +} + +// Override for testing. +var txnRunner = func(db *mgo.Database) jujutxn.Runner { + return jujutxn.NewRunner(jujutxn.RunnerParams{Database: db}) +} + +// putManagedResource saves the managed resource record and returns the resource id of any +// existing record with the same path. +func (ms *managedStorage) putManagedResource(managedResource ManagedResource, resourceId string) ( + existingResourceId string, err error, +) { + buildTxn := func(attempt int) ([]txn.Op, error) { + var addManagedResourceOps []txn.Op + existingResourceId, addManagedResourceOps, err = ms.putResourceTxn(managedResource, resourceId) + return addManagedResourceOps, err + } + + txnRunner := txnRunner(ms.db) + if err = txnRunner.Run(buildTxn); err != nil { + return "", errors.Annotate(err, "cannot update managed resource catalog") + } + return existingResourceId, nil +} + +// RemoveForBucket is defined on the ManagedStorage interface. +func (ms *managedStorage) RemoveForBucket(bucketUUID, path string) (err error) { + // This operation may leave the db in an inconsistent state if any of the + // latter steps fail, but not in a way that will impact external users. + // eg if the managed resource record is removed, but the subsequent call to + // remove the resource catalog entry fails, the resource at the path will + // not be visible anymore, but the data will still be stored. + + managedPath, err := ms.resourceStoragePath(bucketUUID, "", path) + if err != nil { + return err + } + + // First remove the managed resource catalog entry. + var resourceId string + buildTxn := func(attempt int) ([]txn.Op, error) { + var removeManagedResourceOps []txn.Op + resourceId, removeManagedResourceOps, err = ms.removeResourceTxn(managedPath) + return removeManagedResourceOps, err + } + txnRunner := txnRunner(ms.db) + if err := txnRunner.Run(buildTxn); err != nil { + if err == mgo.ErrNotFound { + return errors.NotFoundf("resource at path %q", managedPath) + } + return errors.Annotate(err, "cannot update managed resource catalog") + } + + // Now remove the resource catalog entry. + wasDeleted, resourcePath, err := ms.resourceCatalog.Remove(resourceId) + if err != nil { + return errors.Annotatef(err, "cannot delete resource %q from resource catalog", resourceId) + } + // If the there are no more references to the data, delete from the resource store. + if wasDeleted { + if err := ms.resourceStore.Remove(resourcePath); err != nil { + return errors.Annotatef(err, "cannot delete resource %q at storage path %q", managedPath, resourcePath) + } + } + return nil +} + +func (ms *managedStorage) putResourceTxn(managedResource ManagedResource, resourceId string) (string, []txn.Op, error) { + return putResourceTxn(ms.managedResourceCollection, managedResource, resourceId) +} + +// putResourceTxn is split out so it can be overridden for testing. +var putResourceTxn = func(coll *mgo.Collection, managedResource ManagedResource, resourceId string) (string, []txn.Op, error) { + doc := newManagedResourceDoc(managedResource, resourceId) + var existingDoc managedResourceDoc + err := coll.FindId(doc.Id).One(&existingDoc) + if err != nil && err != mgo.ErrNotFound { + return "", nil, err + } + if err == mgo.ErrNotFound { + return "", []txn.Op{{ + C: coll.Name, + Id: doc.Id, + Assert: txn.DocMissing, + Insert: doc, + }}, nil + } + return existingDoc.ResourceId, []txn.Op{{ + C: coll.Name, + Id: doc.Id, + Assert: txn.DocExists, + Update: bson.D{{"$set", + bson.D{{"path", doc.Path}, {"resourceid", resourceId}}, + }}, + }}, nil +} + +func (ms *managedStorage) removeResourceTxn(managedPath string) (string, []txn.Op, error) { + var existingDoc managedResourceDoc + if err := ms.managedResourceCollection.FindId(managedPath).One(&existingDoc); err != nil { + return "", nil, err + } + return existingDoc.ResourceId, []txn.Op{{ + C: ms.managedResourceCollection.Name, + Id: existingDoc.Id, + Assert: txn.DocExists, + Remove: true, + }}, nil +} + +var ( + requestExpiry = 60 * time.Second +) + +// putResponse is used when responding to a put request. +type putResponse struct { + requestId int64 + sha384Hash string +} + +// PutRequest is to record a request to put a file pending proof of access. +type PutRequest struct { + expiryTime time.Time + resourceId string + bucketUUID string + user string + path string + expectedHash string +} + +// RequestResponse is returned by a put request to inform the caller +// the data range over which to calculate the hashes for the response. +type RequestResponse struct { + RequestId int64 + RangeStart int64 + RangeLength int64 +} + +// NewPutResponse creates a new putResponse for the given requestId and hashes. +func NewPutResponse(requestId int64, sha384hash string) putResponse { + return putResponse{ + requestId: requestId, + sha384Hash: sha384hash, + } +} + +// calculateExpectedHash picks a random range of bytes from the data cataloged by resourceId +// and calculates a sha384 checksum of that data. +func (ms *managedStorage) calculateExpectedHash(resourceId, path string) (string, int64, int64, error) { + rdr, length, err := ms.getResource(resourceId, path) + if err != nil { + return "", 0, 0, err + } + defer rdr.Close() + rangeLength := rand.Int63n(length) + // Restrict the minimum range to 512 or length/2, whichever is smaller. + minLength := int64(512) + if minLength > length/2 { + minLength = length / 2 + } + if rangeLength < minLength { + rangeLength = minLength + } + // Restrict the maximum range to 2048 bytes. + if rangeLength > 2048 { + rangeLength = 2048 + } + start := rand.Int63n(length - rangeLength) + _, err = rdr.(io.ReadSeeker).Seek(start, 0) + if err != nil { + return "", 0, 0, err + } + sha384hash := sha512.New384() + dataRdr := io.LimitReader(rdr, rangeLength) + dataRdr = io.TeeReader(dataRdr, sha384hash) + if _, err = ioutil.ReadAll(dataRdr); err != nil { + return "", 0, 0, err + } + sha384hashHex := fmt.Sprintf("%x", sha384hash.Sum(nil)) + return sha384hashHex, start, rangeLength, nil +} + +// PutForBucketRequest is defined on the ManagedStorage interface. +func (ms *managedStorage) PutForBucketRequest(bucketUUID, path string, hash string) (*RequestResponse, error) { + ms.requestMutex.Lock() + defer ms.requestMutex.Unlock() + + // Find the resource id (if it exists) matching the supplied checksums. + // If there's no matching data already stored, a NotFound error is returned. + resourceId, err := ms.resourceCatalog.Find(hash) + if err != nil { + return nil, err + } + expectedHash, rangeStart, rangeLength, err := ms.calculateExpectedHash(resourceId, path) + if err != nil { + return nil, errors.Annotatef(err, "cannot calculate response hashes for resource at path %q", path) + } + + requestId := ms.nextRequestId + ms.nextRequestId++ + putRequest := PutRequest{ + expiryTime: time.Now().Add(requestExpiry), + bucketUUID: bucketUUID, + path: path, + resourceId: resourceId, + expectedHash: expectedHash, + } + ms.queuedRequests[requestId] = putRequest + // If this is the only request queued up, start the timer to + // expire the request after an interval of requestExpiry. + if len(ms.queuedRequests) == 1 { + ms.updatePollTimer(requestId) + } + return &RequestResponse{ + RequestId: requestId, + RangeStart: rangeStart, + RangeLength: rangeLength, + }, nil +} + +// Wrap time.AfterFunc so we can patch for testing. +var afterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +func (ms *managedStorage) updatePollTimer(nextRequestIdToExpire int64) { + firstUnexpiredRequest := ms.queuedRequests[nextRequestIdToExpire] + waitInterval := firstUnexpiredRequest.expiryTime.Sub(time.Now()) + afterFunc(waitInterval, func() { + ms.processRequestExpiry(nextRequestIdToExpire) + }) +} + +// processRequestExpiry is used to remove an expired put request from the queue. +func (ms *managedStorage) processRequestExpiry(requestId int64) { + ms.requestMutex.Lock() + defer ms.requestMutex.Unlock() + delete(ms.queuedRequests, requestId) + + // If there are still pending requests, update the timer + //to trigger when the next one is due to expire. + if len(ms.queuedRequests) > 0 { + var lowestRequestId int64 + for i := requestId + 1; i < ms.nextRequestId; i++ { + if _, ok := ms.queuedRequests[i]; ok { + lowestRequestId = i + break + } + } + if lowestRequestId == 0 { + panic("logic error: lowest request id is 0") + } + ms.updatePollTimer(lowestRequestId) + } +} + +// ErrRequestExpired is used to indicate that a put request has already expired +// when an attempt is made to supply a response. +var ErrRequestExpired = fmt.Errorf("request expired") + +// ErrResponseMismatch is used to indicate that a put response did not contain +// the expected checksums. +var ErrResponseMismatch = fmt.Errorf("response checksums do not match") + +// ErrResourceDeleted is used to indicate that a resource was deleted before the +// put response could be acted on. +var ErrResourceDeleted = fmt.Errorf("resource was deleted") + +// PutResponse is defined on the ManagedStorage interface. +func (ms *managedStorage) ProofOfAccessResponse(response putResponse) error { + ms.requestMutex.Lock() + request, ok := ms.queuedRequests[response.requestId] + delete(ms.queuedRequests, response.requestId) + ms.requestMutex.Unlock() + if !ok { + return ErrRequestExpired + } + if request.expectedHash != response.sha384Hash { + return ErrResponseMismatch + } + // Sanity check - ensure resource hasn't been deleted between when the put request + // was made and now. + resource, err := ms.resourceCatalog.Get(request.resourceId) + if errors.IsNotFound(err) { + return ErrResourceDeleted + } else if err != nil { + return errors.Annotate(err, "confirming resource exists") + } + + // Increment the resource catalog reference count. + resourceId, resourcePath, err := ms.resourceCatalog.Put(resource.SHA384Hash, resource.Length) + if err != nil { + return errors.Annotate(err, "cannot update resource catalog") + } + defer cleanupResourceCatalog(ms.resourceCatalog, resourceId, &err) + // We expect an existing catalog entry else it has been deleted from underneath us. + if resourcePath == "" || resourceId != request.resourceId { + return ErrResourceDeleted + } + + managedPath, err := ms.resourceStoragePath(request.bucketUUID, request.user, request.path) + if err != nil { + return err + } + return ms.putResourceReference(request.bucketUUID, managedPath, request.resourceId) +} === added file 'src/gopkg.in/juju/blobstore.v2/managedstorage_test.go' --- src/gopkg.in/juju/blobstore.v2/managedstorage_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/blobstore.v2/managedstorage_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,704 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package blobstore_test + +import ( + "bytes" + "crypto/sha512" + "fmt" + "io/ioutil" + "math/rand" + "strings" + "sync" + "time" + + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + jujutxn "github.com/juju/txn" + txntesting "github.com/juju/txn/testing" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + "gopkg.in/juju/blobstore.v2" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/txn" +) + +var _ = gc.Suite(&managedStorageSuite{}) + +type managedStorageSuite struct { + testing.IsolationSuite + testing.MgoSuite + txnRunner jujutxn.Runner + managedStorage blobstore.ManagedStorage + db *mgo.Database + resourceStorage blobstore.ResourceStorage +} + +func (s *managedStorageSuite) SetUpSuite(c *gc.C) { + s.IsolationSuite.SetUpSuite(c) + s.MgoSuite.SetUpSuite(c) +} + +func (s *managedStorageSuite) TearDownSuite(c *gc.C) { + s.MgoSuite.TearDownSuite(c) + s.IsolationSuite.TearDownSuite(c) +} + +func (s *managedStorageSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.MgoSuite.SetUpTest(c) + s.db = s.Session.DB("blobstore") + s.resourceStorage = blobstore.NewGridFS("storage", "test", s.Session) + s.managedStorage = blobstore.NewManagedStorage(s.db, s.resourceStorage) + + // For testing, we need to ensure there's a single txnRunner for all operations. + s.txnRunner = jujutxn.NewRunner(jujutxn.RunnerParams{Database: s.db}) + txnRunnerFunc := func(db *mgo.Database) jujutxn.Runner { + return s.txnRunner + } + s.PatchValue(blobstore.TxnRunner, txnRunnerFunc) +} + +func (s *managedStorageSuite) TearDownTest(c *gc.C) { + s.MgoSuite.TearDownTest(c) + s.IsolationSuite.TearDownTest(c) +} + +func (s *managedStorageSuite) TestResourceStoragePath(c *gc.C) { + for _, test := range []struct { + bucketUUID string + user string + path string + storagePath string + error string + }{ + { + bucketUUID: "", + user: "", + path: "/path/to/blob", + storagePath: "global/path/to/blob", + }, { + bucketUUID: "bucketuuid", + user: "", + path: "/path/to/blob", + storagePath: "buckets/bucketuuid/path/to/blob", + }, { + bucketUUID: "", + user: "user", + path: "/path/to/blob", + storagePath: "users/user/path/to/blob", + }, { + bucketUUID: "bucketuuid", + user: "user", + path: "/path/to/blob", + storagePath: "buckets/bucketuuid/users/user/path/to/blob", + }, { + bucketUUID: "env/123", + user: "user", + path: "/path/to/blob", + error: `.* cannot contain "/"`, + }, { + bucketUUID: "bucketuuid", + user: "user/123", + path: "/path/to/blob", + error: `.* cannot contain "/"`, + }, + } { + result, err := blobstore.ResourceStoragePath(s.managedStorage, test.bucketUUID, test.user, test.path) + if test.error == "" { + c.Check(err, gc.IsNil) + c.Check(result, gc.Equals, test.storagePath) + } else { + c.Check(err, gc.ErrorMatches, test.error) + } + } +} + +type managedResourceDocStub struct { + Path string + ResourceId string +} + +type resourceDocStub struct { + Path string +} + +func (s *managedStorageSuite) TestGetPendingUpload(c *gc.C) { + // Manually set up a scenario where there's a resource recorded + // but the upload has not occurred. + rc := blobstore.GetResourceCatalog(s.managedStorage) + id, _, err := rc.Put("foo", 100) + c.Assert(err, gc.IsNil) + managedResource := blobstore.ManagedResource{ + BucketUUID: "bucketuuid", + User: "user", + Path: "buckets/bucketuuid/path/to/blob", + } + _, err = blobstore.PutManagedResource(s.managedStorage, managedResource, id) + c.Assert(err, gc.IsNil) + _, _, err = s.managedStorage.GetForBucket("bucketuuid", "/path/to/blob") + c.Assert(err, gc.Equals, blobstore.ErrUploadPending) +} + +func (s *managedStorageSuite) TestPutPendingUpload(c *gc.C) { + // Manually set up a scenario where there's a resource recorded + // but the upload has not occurred. + rc := blobstore.GetResourceCatalog(s.managedStorage) + hash := "cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed8086072ba1e7cc2358baeca134c825a7" + + id, path, err := rc.Put(hash, 3) + c.Assert(err, gc.IsNil) + c.Assert(path, gc.Equals, "") + managedResource := blobstore.ManagedResource{ + BucketUUID: "bucketuuid", + User: "user", + Path: "buckets/bucketuuid/path/to/blob", + } + c.Assert(err, gc.IsNil) + + _, err = blobstore.PutManagedResource(s.managedStorage, managedResource, id) + _, _, err = s.managedStorage.GetForBucket("bucketuuid", "/path/to/blob") + c.Assert(errors.Cause(err), gc.Equals, blobstore.ErrUploadPending) + + // Despite the upload being pending, a second concurrent upload will succeed. + rdr := bytes.NewReader([]byte("abc")) + err = s.managedStorage.PutForBucket("bucketuuid", "/path/to/blob", rdr, 3) + c.Assert(err, gc.IsNil) + s.assertGet(c, "/path/to/blob", []byte("abc")) +} + +func (s *managedStorageSuite) assertPut(c *gc.C, path string, blob []byte) string { + // Put the data. + rdr := bytes.NewReader(blob) + err := s.managedStorage.PutForBucket("bucketuuid", path, rdr, int64(len(blob))) + c.Assert(err, gc.IsNil) + + // Load the managed resource record. + var mrDoc managedResourceDocStub + err = s.db.C("managedStoredResources").Find(bson.D{{"path", "buckets/bucketuuid" + path}}).One(&mrDoc) + c.Assert(err, gc.IsNil) + + // Load the corresponding resource catalog record. + var rd resourceDocStub + err = s.db.C("storedResources").FindId(mrDoc.ResourceId).One(&rd) + c.Assert(err, gc.IsNil) + + // Use the resource catalog record to load the underlying data from blobstore. + r, err := s.resourceStorage.Get(rd.Path) + c.Assert(err, gc.IsNil) + defer r.Close() + data, err := ioutil.ReadAll(r) + c.Assert(err, gc.IsNil) + c.Assert(data, gc.DeepEquals, blob) + return rd.Path +} + +func (s *managedStorageSuite) assertResourceCatalogCount(c *gc.C, expected int) { + num, err := s.db.C("storedResources").Count() + c.Assert(err, gc.IsNil) + c.Assert(num, gc.Equals, expected) +} + +func (s *managedStorageSuite) TestPut(c *gc.C) { + s.assertPut(c, "/path/to/blob", []byte("some resource")) + s.assertResourceCatalogCount(c, 1) +} + +func (s *managedStorageSuite) TestPutSamePathDifferentData(c *gc.C) { + resPath := s.assertPut(c, "/path/to/blob", []byte("some resource")) + secondResPath := s.assertPut(c, "/path/to/blob", []byte("another resource")) + c.Assert(resPath, gc.Not(gc.Equals), secondResPath) + s.assertResourceCatalogCount(c, 1) +} + +func (s *managedStorageSuite) TestPutDifferentPathSameData(c *gc.C) { + resPath := s.assertPut(c, "/path/to/blob", []byte("some resource")) + secondResPath := s.assertPut(c, "/anotherpath/to/blob", []byte("some resource")) + c.Assert(resPath, gc.Equals, secondResPath) + s.assertResourceCatalogCount(c, 1) +} + +func (s *managedStorageSuite) TestPutSamePathDifferentDataMulti(c *gc.C) { + resPath := s.assertPut(c, "/path/to/blob", []byte("another resource")) + secondResPath := s.assertPut(c, "/anotherpath/to/blob", []byte("some resource")) + c.Assert(resPath, gc.Not(gc.Equals), secondResPath) + s.assertResourceCatalogCount(c, 2) + + thirdResPath := s.assertPut(c, "/path/to/blob", []byte("some resource")) + c.Assert(resPath, gc.Not(gc.Equals), secondResPath) + c.Assert(secondResPath, gc.Equals, thirdResPath) + s.assertResourceCatalogCount(c, 1) +} + +func (s *managedStorageSuite) TestPutManagedResourceFail(c *gc.C) { + var resourcePath string + s.PatchValue(blobstore.PutResourceTxn, func( + coll *mgo.Collection, managedResource blobstore.ManagedResource, resourceId string) (string, []txn.Op, error) { + rc := blobstore.GetResourceCatalog(s.managedStorage) + r, err := rc.Get(resourceId) + c.Assert(err, gc.IsNil) + resourcePath = r.Path + return "", nil, errors.Errorf("some error") + }) + // Attempt to put the data. + blob := []byte("data") + rdr := bytes.NewReader(blob) + err := s.managedStorage.PutForBucket("bucketuuid", "/some/path", rdr, int64(len(blob))) + c.Assert(err, gc.ErrorMatches, "cannot update managed resource catalog: some error") + + // Now ensure there's no blob data left behind in storage, nor a resource catalog record. + s.assertResourceCatalogCount(c, 0) + _, err = s.resourceStorage.Get(resourcePath) + c.Assert(err, gc.ErrorMatches, ".*not found") +} + +func (s *managedStorageSuite) TestPutForEnvironmentAndCheckHash(c *gc.C) { + blob := []byte("data") + rdr := bytes.NewReader(blob) + sha384Hash := calculateCheckSum(c, 0, 5, []byte("wrong")) + err := s.managedStorage.PutForBucketAndCheckHash("bucketuuid", "/some/path", rdr, int64(len(blob)), sha384Hash) + c.Assert(err, gc.ErrorMatches, "hash mismatch") + + rdr.Seek(0, 0) + sha384Hash = calculateCheckSum(c, 0, int64(len(blob)), blob) + err = s.managedStorage.PutForBucketAndCheckHash("bucketuuid", "/some/path", rdr, int64(len(blob)), sha384Hash) + c.Assert(err, gc.IsNil) +} + +func (s *managedStorageSuite) TestPutForEnvironmentAndCheckHashEmptyHash(c *gc.C) { + // Passing "" as the hash to PutForBucketAndCheckHash will elide + // the hash check. + rdr := strings.NewReader("data") + err := s.managedStorage.PutForBucketAndCheckHash("bucketuuid", "/some/path", rdr, int64(rdr.Len()), "") + c.Assert(err, jc.ErrorIsNil) +} + +func (s *managedStorageSuite) TestPutForEnvironmentUnknownLen(c *gc.C) { + // Passing -1 for the size of the data directs PutForBucket + // to read in the whole amount. + blob := []byte("data") + rdr := bytes.NewReader(blob) + err := s.managedStorage.PutForBucket("bucketuuid", "/some/path", rdr, -1) + c.Assert(err, jc.ErrorIsNil) + s.assertGet(c, "/some/path", blob) +} + +func (s *managedStorageSuite) TestPutForEnvironmentOverLong(c *gc.C) { + // Passing a size to PutForBucket that exceeds the actual + // size of the data will result in metadata recording the actual + // size. + blob := []byte("data") + rdr := bytes.NewReader(blob) + err := s.managedStorage.PutForBucket("bucketuuid", "/some/path", rdr, int64(len(blob)+1)) + c.Assert(err, jc.ErrorIsNil) + s.assertGet(c, "/some/path", blob) +} + +func (s *managedStorageSuite) assertGet(c *gc.C, path string, blob []byte) { + r, length, err := s.managedStorage.GetForBucket("bucketuuid", path) + c.Assert(err, gc.IsNil) + defer r.Close() + data, err := ioutil.ReadAll(r) + c.Assert(err, gc.IsNil) + c.Assert(data, gc.DeepEquals, blob) + c.Assert(int(length), gc.Equals, len(blob)) +} + +func (s *managedStorageSuite) TestGet(c *gc.C) { + blob := []byte("some resource") + s.assertPut(c, "/path/to/blob", blob) + s.assertGet(c, "/path/to/blob", blob) +} + +func (s *managedStorageSuite) TestGetNonExistent(c *gc.C) { + _, _, err := s.managedStorage.GetForBucket("bucketuuid", "/path/to/nowhere") + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} + +func (s *managedStorageSuite) TestRemove(c *gc.C) { + blob := []byte("some resource") + resPath := s.assertPut(c, "/path/to/blob", blob) + err := s.managedStorage.RemoveForBucket("bucketuuid", "/path/to/blob") + c.Assert(err, gc.IsNil) + + // Check the data and catalog entry really are removed. + _, _, err = s.managedStorage.GetForBucket("bucketuuid", "path/to/blob") + c.Assert(err, jc.Satisfies, errors.IsNotFound) + _, err = s.resourceStorage.Get(resPath) + c.Assert(err, gc.NotNil) + + s.assertResourceCatalogCount(c, 0) +} + +func (s *managedStorageSuite) TestRemoveNonExistent(c *gc.C) { + err := s.managedStorage.RemoveForBucket("bucketuuid", "/path/to/nowhere") + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} + +func (s *managedStorageSuite) TestRemoveDifferentPathKeepsData(c *gc.C) { + blob := []byte("some resource") + s.assertPut(c, "/path/to/blob", blob) + s.assertPut(c, "/anotherpath/to/blob", blob) + s.assertResourceCatalogCount(c, 1) + err := s.managedStorage.RemoveForBucket("bucketuuid", "/path/to/blob") + c.Assert(err, gc.IsNil) + s.assertGet(c, "/anotherpath/to/blob", blob) + s.assertResourceCatalogCount(c, 1) +} + +func (s *managedStorageSuite) TestPutRace(c *gc.C) { + blob := []byte("some resource") + beforeFunc := func() { + s.assertPut(c, "/path/to/blob", blob) + } + defer txntesting.SetBeforeHooks(c, s.txnRunner, beforeFunc).Check() + anotherblob := []byte("another resource") + s.assertPut(c, "/path/to/blob", anotherblob) + s.assertResourceCatalogCount(c, 1) +} + +func (s *managedStorageSuite) TestPutDeleteRace(c *gc.C) { + blob := []byte("some resource") + s.assertPut(c, "/path/to/blob", blob) + beforeFunc := func() { + err := s.managedStorage.RemoveForBucket("bucketuuid", "/path/to/blob") + c.Assert(err, gc.IsNil) + } + defer txntesting.SetBeforeHooks(c, s.txnRunner, beforeFunc).Check() + anotherblob := []byte("another resource") + s.assertPut(c, "/path/to/blob", anotherblob) + s.assertResourceCatalogCount(c, 1) +} + +func (s *managedStorageSuite) TestPutRaceWhereCatalogEntryRemoved(c *gc.C) { + blob := []byte("some resource") + // Remove the resource catalog entry with the resourceId that we are about + // to write to a managed resource entry. + beforeFunc := []func(){ + nil, // resourceCatalog Put() + nil, // managedResource Put() + func() { + // Shamelessly exploit our knowledge of how ids are made. + sha384Hash := calculateCheckSum(c, 0, int64(len(blob)), blob) + _, _, err := blobstore.GetResourceCatalog(s.managedStorage).Remove(sha384Hash) + c.Assert(err, gc.IsNil) + }, + } + defer txntesting.SetBeforeHooks(c, s.txnRunner, beforeFunc...).Check() + rdr := bytes.NewReader(blob) + err := s.managedStorage.PutForBucket("bucketuuid", "/path/to/blob", rdr, int64(len(blob))) + c.Assert(err, gc.ErrorMatches, "unexpected deletion .*") + s.assertResourceCatalogCount(c, 0) +} + +func (s *managedStorageSuite) TestRemoveRace(c *gc.C) { + blob := []byte("some resource") + s.assertPut(c, "/path/to/blob", blob) + beforeFunc := func() { + err := s.managedStorage.RemoveForBucket("bucketuuid", "/path/to/blob") + c.Assert(err, gc.IsNil) + } + defer txntesting.SetBeforeHooks(c, s.txnRunner, beforeFunc).Check() + err := s.managedStorage.RemoveForBucket("bucketuuid", "/path/to/blob") + c.Assert(err, jc.Satisfies, errors.IsNotFound) + _, _, err = s.managedStorage.GetForBucket("bucketuuid", "/path/to/blob") + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} + +func (s *managedStorageSuite) TestPutRequestNotFound(c *gc.C) { + _, err := s.managedStorage.PutForBucketRequest("bucketuuid", "path/to/blob", "sha384") + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} + +func (s *managedStorageSuite) putTestRandomBlob(c *gc.C, path string) (blob []byte, sha384HashHex string) { + id := bson.NewObjectId().Hex() + blob = []byte(id) + return blob, s.putTestBlob(c, path, blob) +} + +func (s *managedStorageSuite) putTestBlob(c *gc.C, path string, blob []byte) (sha384HashHex string) { + rdr := bytes.NewReader(blob) + err := s.managedStorage.PutForBucket("bucketuuid", path, rdr, int64(len(blob))) + c.Assert(err, gc.IsNil) + s.assertGet(c, path, blob) + sha384HashHex = calculateCheckSum(c, 0, int64(len(blob)), blob) + return sha384HashHex +} + +func calculateCheckSum(c *gc.C, start, length int64, blob []byte) (sha384HashHex string) { + data := blob[start : start+length] + sha384Hash := sha512.New384() + _, err := sha384Hash.Write(data) + c.Assert(err, gc.IsNil) + sha384HashHex = fmt.Sprintf("%x", sha384Hash.Sum(nil)) + return sha384HashHex +} + +func (s *managedStorageSuite) TestPutRequestResponseHashMismatch(c *gc.C) { + _, sha384Hash := s.putTestRandomBlob(c, "path/to/blob") + reqResp, err := s.managedStorage.PutForBucketRequest("bucketuuid", "path/to/blob", sha384Hash) + c.Assert(err, gc.IsNil) + response := blobstore.NewPutResponse(reqResp.RequestId, "notsha384") + err = s.managedStorage.ProofOfAccessResponse(response) + c.Assert(err, gc.Equals, blobstore.ErrResponseMismatch) + c.Assert(blobstore.RequestQueueLength(s.managedStorage), gc.Equals, 0) +} + +func (s *managedStorageSuite) assertPutRequestSingle(c *gc.C, blob []byte, resourceCount int) { + if blob == nil { + id := bson.NewObjectId().Hex() + blob = []byte(id) + } + rdr := bytes.NewReader(blob) + err := s.managedStorage.PutForBucket("bucketuuid", "path/to/blob", rdr, int64(len(blob))) + c.Assert(err, gc.IsNil) + sha384Hash := calculateCheckSum(c, 0, int64(len(blob)), blob) + reqResp, err := s.managedStorage.PutForBucketRequest("bucketuuid", "path/to/blob", sha384Hash) + c.Assert(err, gc.IsNil) + sha384Response := calculateCheckSum(c, reqResp.RangeStart, reqResp.RangeLength, blob) + response := blobstore.NewPutResponse(reqResp.RequestId, sha384Response) + err = s.managedStorage.ProofOfAccessResponse(response) + c.Assert(err, gc.IsNil) + s.assertGet(c, "path/to/blob", blob) + s.assertResourceCatalogCount(c, resourceCount) +} + +var trigger struct{} = struct{}{} + +// patchedAfterFunc returns a function like time.AfterFunc, but is triggered on a channel +// select rather than the expiry of a timer interval. +func patchedAfterFunc(ch chan struct{}) func(d time.Duration, f func()) *time.Timer { + return func(d time.Duration, f func()) *time.Timer { + go func() { + select { + case <-ch: + f() + ch <- trigger + } + }() + return nil + } +} + +func (s *managedStorageSuite) TestPutRequestSingle(c *gc.C) { + ch := make(chan struct{}) + s.PatchValue(blobstore.AfterFunc, patchedAfterFunc(ch)) + s.assertPutRequestSingle(c, nil, 1) + c.Assert(blobstore.RequestQueueLength(s.managedStorage), gc.Equals, 0) + // Trigger the request timeout. + ch <- trigger + <-ch + c.Assert(blobstore.RequestQueueLength(s.managedStorage), gc.Equals, 0) +} + +func (s *managedStorageSuite) TestPutRequestLarge(c *gc.C) { + ch := make(chan struct{}) + s.PatchValue(blobstore.AfterFunc, patchedAfterFunc(ch)) + // Use a blob size of 4096 which is greater than max range of put response range length. + blob := make([]byte, 4096) + for i := 0; i < 4096; i++ { + blob[i] = byte(rand.Intn(255)) + } + s.assertPutRequestSingle(c, blob, 1) + c.Assert(blobstore.RequestQueueLength(s.managedStorage), gc.Equals, 0) + // Trigger the request timeout. + ch <- trigger + <-ch + c.Assert(blobstore.RequestQueueLength(s.managedStorage), gc.Equals, 0) +} + +func (s *managedStorageSuite) TestPutRequestMultiSequential(c *gc.C) { + ch := make(chan struct{}) + s.PatchValue(blobstore.AfterFunc, patchedAfterFunc(ch)) + s.assertPutRequestSingle(c, nil, 1) + c.Assert(blobstore.RequestQueueLength(s.managedStorage), gc.Equals, 0) + // Trigger the request timeout. + ch <- trigger + <-ch + c.Assert(blobstore.RequestQueueLength(s.managedStorage), gc.Equals, 0) + s.assertPutRequestSingle(c, nil, 1) + c.Assert(blobstore.RequestQueueLength(s.managedStorage), gc.Equals, 0) + // Trigger the request timeout. + ch <- trigger + <-ch + c.Assert(blobstore.RequestQueueLength(s.managedStorage), gc.Equals, 0) +} + +func (s *managedStorageSuite) checkPutResponse(c *gc.C, index int, wg *sync.WaitGroup, + requestId int64, sha384Hash string, blob []byte) { + + // After a random time, respond to a previously queued put request and check the result. + go func() { + delay := rand.Intn(3) + time.Sleep(time.Duration(delay) * time.Millisecond) + expectError := index == 2 + if expectError { + sha384Hash = "bad" + } + response := blobstore.NewPutResponse(requestId, sha384Hash) + err := s.managedStorage.ProofOfAccessResponse(response) + if expectError { + c.Check(err, gc.NotNil) + } else { + c.Check(err, gc.IsNil) + if err == nil { + r, length, err := s.managedStorage.GetForBucket("bucketuuid", fmt.Sprintf("path/to/blob%d", index)) + c.Check(err, gc.IsNil) + if err == nil { + data, err := ioutil.ReadAll(r) + c.Check(err, gc.IsNil) + c.Check(data, gc.DeepEquals, blob) + c.Check(int(length), gc.DeepEquals, len(blob)) + } + } + } + wg.Done() + }() +} + +func (s *managedStorageSuite) queuePutRequests(c *gc.C, done chan struct{}) { + var wg sync.WaitGroup + // One request is allowed to expire so set up wait group for 1 less than number of requests. + wg.Add(9) + go func() { + for i := 0; i < 10; i++ { + blobPath := fmt.Sprintf("path/to/blob%d", i) + blob, sha384Hash := s.putTestRandomBlob(c, blobPath) + reqResp, err := s.managedStorage.PutForBucketRequest("bucketuuid", "path/to/blob", sha384Hash) + c.Assert(err, gc.IsNil) + // Let one request timeout + if i == 3 { + continue + } + sha384Response := calculateCheckSum(c, reqResp.RangeStart, reqResp.RangeLength, blob) + s.checkPutResponse(c, i, &wg, reqResp.RequestId, sha384Response, blob) + } + wg.Wait() + close(done) + }() +} + +const ( + ShortWait = 50 * time.Millisecond + LongWait = 10 * time.Second +) + +var LongAttempt = &utils.AttemptStrategy{ + Total: LongWait, + Delay: ShortWait, +} + +func (s *managedStorageSuite) TestPutRequestMultiRandom(c *gc.C) { + ch := make(chan struct{}) + s.PatchValue(blobstore.AfterFunc, patchedAfterFunc(ch)) + done := make(chan struct{}) + s.queuePutRequests(c, done) + select { + case <-done: + c.Logf("all done") + case <-time.After(LongWait): + c.Fatalf("timed out waiting for put requests to be processed") + } + // One request hasn't been processed since we left it to timeout. + c.Assert(blobstore.RequestQueueLength(s.managedStorage), gc.Equals, 1) + // Trigger the request timeout. + ch <- trigger + <-ch + c.Assert(blobstore.RequestQueueLength(s.managedStorage), gc.Equals, 0) +} + +func (s *managedStorageSuite) TestPutRequestExpired(c *gc.C) { + ch := make(chan struct{}) + s.PatchValue(blobstore.AfterFunc, patchedAfterFunc(ch)) + blob, sha384Hash := s.putTestRandomBlob(c, "path/to/blob") + reqResp, err := s.managedStorage.PutForBucketRequest("bucketuuid", "path/to/blob", sha384Hash) + c.Assert(err, gc.IsNil) + sha384Response := calculateCheckSum(c, reqResp.RangeStart, reqResp.RangeLength, blob) + // Trigger the request timeout. + ch <- trigger + <-ch + response := blobstore.NewPutResponse(reqResp.RequestId, sha384Response) + err = s.managedStorage.ProofOfAccessResponse(response) + c.Assert(err, gc.Equals, blobstore.ErrRequestExpired) + c.Assert(blobstore.RequestQueueLength(s.managedStorage), gc.Equals, 0) +} + +// Run one simple test with the real time.AfterFunc to ensure it works. +func (s *managedStorageSuite) TestPutRequestExpiredWithRealTimeAfter(c *gc.C) { + s.PatchValue(blobstore.RequestExpiry, 5*time.Millisecond) + blob, sha384Hash := s.putTestRandomBlob(c, "path/to/blob") + reqResp, err := s.managedStorage.PutForBucketRequest("bucketuuid", "path/to/blob", sha384Hash) + c.Assert(err, gc.IsNil) + sha384Response := calculateCheckSum(c, reqResp.RangeStart, reqResp.RangeLength, blob) + // Wait for request timer to trigger. + time.Sleep(7 * time.Millisecond) + response := blobstore.NewPutResponse(reqResp.RequestId, sha384Response) + err = s.managedStorage.ProofOfAccessResponse(response) + c.Assert(err, gc.Equals, blobstore.ErrRequestExpired) + c.Assert(blobstore.RequestQueueLength(s.managedStorage), gc.Equals, 0) + c.Assert(blobstore.RequestQueueLength(s.managedStorage), gc.Equals, 0) +} + +func (s *managedStorageSuite) TestPutRequestExpiredMulti(c *gc.C) { + ch := make(chan struct{}) + s.PatchValue(blobstore.AfterFunc, patchedAfterFunc(ch)) + blob, sha384Hash := s.putTestRandomBlob(c, "path/to/blob") + reqResp, err := s.managedStorage.PutForBucketRequest("bucketuuid", "path/to/blob", sha384Hash) + c.Assert(err, gc.IsNil) + sha384Response := calculateCheckSum(c, reqResp.RangeStart, reqResp.RangeLength, blob) + reqResp2, err := s.managedStorage.PutForBucketRequest("bucketuuid", "path/to/blob2", sha384Hash) + c.Assert(err, gc.IsNil) + sha384Response2 := calculateCheckSum(c, reqResp.RangeStart, reqResp.RangeLength, blob) + // Trigger the request timeouts. + ch <- trigger + <-ch + ch <- trigger + <-ch + c.Assert(blobstore.RequestQueueLength(s.managedStorage), gc.Equals, 0) + response := blobstore.NewPutResponse(reqResp.RequestId, sha384Response) + response2 := blobstore.NewPutResponse(reqResp2.RequestId, sha384Response2) + err = s.managedStorage.ProofOfAccessResponse(response) + c.Assert(err, gc.Equals, blobstore.ErrRequestExpired) + err = s.managedStorage.ProofOfAccessResponse(response2) + c.Assert(err, gc.Equals, blobstore.ErrRequestExpired) +} + +func (s *managedStorageSuite) TestPutRequestDeleted(c *gc.C) { + blob, sha384Hash := s.putTestRandomBlob(c, "path/to/blob") + reqResp, err := s.managedStorage.PutForBucketRequest("bucketuuid", "path/to/blob", sha384Hash) + c.Assert(err, gc.IsNil) + err = s.managedStorage.RemoveForBucket("bucketuuid", "path/to/blob") + c.Assert(err, gc.IsNil) + + sha384Response := calculateCheckSum(c, reqResp.RangeStart, reqResp.RangeLength, blob) + response := blobstore.NewPutResponse(reqResp.RequestId, sha384Response) + err = s.managedStorage.ProofOfAccessResponse(response) + c.Assert(err, gc.Equals, blobstore.ErrResourceDeleted) +} + +func (s *managedStorageSuite) TestPutMultiSameData(c *gc.C) { + blob := bytes.Repeat([]byte("blobalob"), 1024*1024*10) + done := make(chan struct{}) + go func() { + var wg sync.WaitGroup + wg.Add(10) + for i := 0; i < 10; i++ { + go func() { + defer wg.Done() + rdr := bytes.NewReader(blob) + err := s.managedStorage.PutForBucket("bucketuuid", "path", rdr, int64(len(blob))) + c.Assert(err, gc.IsNil) + }() + } + wg.Wait() + done <- struct{}{} + }() + select { + case <-done: + case <-time.After(1 * time.Minute): + c.Fatalf("timed out waiting for puts to be processed") + } +} === added file 'src/gopkg.in/juju/blobstore.v2/package_test.go' --- src/gopkg.in/juju/blobstore.v2/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/blobstore.v2/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package blobstore_test + +import ( + "testing" + + gitjujutesting "github.com/juju/testing" +) + +func Test(t *testing.T) { + gitjujutesting.MgoTestPackage(t, nil) +} === added file 'src/gopkg.in/juju/blobstore.v2/resourcecatalog.go' --- src/gopkg.in/juju/blobstore.v2/resourcecatalog.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/blobstore.v2/resourcecatalog.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,221 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package blobstore + +import ( + "github.com/juju/errors" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + "gopkg.in/mgo.v2/txn" +) + +var ( + // ErrUploadPending is used to indicate that the underlying resource for a catalog entry + // is not yet fully uploaded. + ErrUploadPending = errors.New("Resource not available because upload is not yet complete") + + // errUploadedConcurrently is used to indicate that another client uploaded the + // resource already. + errUploadedConcurrently = errors.AlreadyExistsf("resource") +) + +// Resource is a catalog entry for stored data. +// It contains the path where the data is stored as well as +// a hash of the data which are used for de-duping. +type Resource struct { + SHA384Hash string + Path string + Length int64 +} + +// resourceDoc is the persistent representation of a Resource. +type resourceDoc struct { + Id string `bson:"_id"` + // Path is the storage path of the resource, which will be + // the empty string until the upload has been completed. + Path string `bson:"path"` + SHA384Hash string `bson:"sha384hash"` + Length int64 `bson:"length"` + RefCount int64 `bson:"refcount"` +} + +// resourceCatalog is a mongo backed ResourceCatalog instance. +type resourceCatalog struct { + collection *mgo.Collection +} + +var _ ResourceCatalog = (*resourceCatalog)(nil) + +// newResource constructs a Resource from its attributes. +func newResource(path, sha384hash string, length int64) *Resource { + return &Resource{ + Path: path, + Length: length, + SHA384Hash: sha384hash, + } +} + +// newResourceDoc constructs a resourceDoc from a sha384 hash. +// This is used when writing new data to the resource store. +// Path is opaque and is generated using a bson object id. +func newResourceDoc(sha384Hash string, length int64) resourceDoc { + return resourceDoc{ + Id: sha384Hash, + SHA384Hash: sha384Hash, + RefCount: 1, + Length: length, + } +} + +const ( + // resourceCatalogCollection is the name of the collection + // which stores the resourceDoc records. + resourceCatalogCollection = "storedResources" +) + +// newResourceCatalog creates a new ResourceCatalog +// storing resource entries in the mongo database. +func newResourceCatalog(db *mgo.Database) ResourceCatalog { + return &resourceCatalog{ + collection: db.C(resourceCatalogCollection), + } +} + +// Get is defined on the ResourceCatalog interface. +func (rc *resourceCatalog) Get(id string) (*Resource, error) { + var doc resourceDoc + if err := rc.collection.FindId(id).One(&doc); err == mgo.ErrNotFound { + return nil, errors.NotFoundf("resource with id %q", id) + } else if err != nil { + return nil, err + } + if doc.Path == "" { + return nil, ErrUploadPending + } + return newResource(doc.Path, doc.SHA384Hash, doc.Length), nil +} + +// Find is defined on the ResourceCatalog interface. +func (rc *resourceCatalog) Find(hash string) (string, error) { + var doc resourceDoc + if err := rc.collection.Find(checksumMatch(hash)).One(&doc); err == mgo.ErrNotFound { + return "", errors.NotFoundf("resource with sha384=%q", hash) + } else if err != nil { + return "", err + } + if doc.Path == "" { + return "", ErrUploadPending + } + return doc.Id, nil +} + +// Put is defined on the ResourceCatalog interface. +func (rc *resourceCatalog) Put(hash string, length int64) (id, path string, err error) { + buildTxn := func(attempt int) (ops []txn.Op, err error) { + id, path, ops, err = rc.resourceIncRefOps(hash, length) + return ops, err + } + txnRunner := txnRunner(rc.collection.Database) + if err = txnRunner.Run(buildTxn); err != nil { + return "", "", err + } + return id, path, nil +} + +// UploadComplete is defined on the ResourceCatalog interface. +func (rc *resourceCatalog) UploadComplete(id, path string) error { + buildTxn := func(attempt int) (ops []txn.Op, err error) { + if ops, err = rc.uploadCompleteOps(id, path); err == mgo.ErrNotFound { + return nil, errors.NotFoundf("resource with id %q", id) + } + return ops, err + } + txnRunner := txnRunner(rc.collection.Database) + return txnRunner.Run(buildTxn) +} + +// Remove is defined on the ResourceCatalog interface. +func (rc *resourceCatalog) Remove(id string) (wasDeleted bool, path string, err error) { + buildTxn := func(attempt int) (ops []txn.Op, err error) { + if wasDeleted, path, ops, err = rc.resourceDecRefOps(id); err == mgo.ErrNotFound { + return nil, errors.NotFoundf("resource with id %q", id) + } + return ops, err + } + txnRunner := txnRunner(rc.collection.Database) + return wasDeleted, path, txnRunner.Run(buildTxn) +} + +func checksumMatch(hash string) bson.D { + return bson.D{{"sha384hash", hash}} +} + +func (rc *resourceCatalog) resourceIncRefOps(hash string, length int64) ( + id, path string, ops []txn.Op, err error, +) { + var doc resourceDoc + exists := false + checksumMatchTerm := checksumMatch(hash) + err = rc.collection.Find(checksumMatchTerm).One(&doc) + if err != nil && err != mgo.ErrNotFound { + return "", "", nil, err + } else if err == nil { + exists = true + } + if !exists { + doc := newResourceDoc(hash, length) + return doc.Id, "", []txn.Op{{ + C: rc.collection.Name, + Id: doc.Id, + Assert: txn.DocMissing, + Insert: doc, + }}, nil + } + if doc.Length != length { + return "", "", nil, errors.Errorf("length mismatch in resource document %d != %d", doc.Length, length) + } + return doc.Id, doc.Path, []txn.Op{{ + C: rc.collection.Name, + Id: doc.Id, + Assert: checksumMatchTerm, + Update: bson.D{{"$inc", bson.D{{"refcount", 1}}}}, + }}, nil +} + +func (rc *resourceCatalog) uploadCompleteOps(id, path string) ([]txn.Op, error) { + var doc resourceDoc + if err := rc.collection.FindId(id).One(&doc); err != nil { + return nil, err + } + if doc.Path != "" { + return nil, errUploadedConcurrently + } + return []txn.Op{{ + C: rc.collection.Name, + Id: doc.Id, + Assert: bson.D{{"path", ""}}, // doc exists, path is unset + Update: bson.D{{"$set", bson.D{{"path", path}}}}, + }}, nil +} + +func (rc *resourceCatalog) resourceDecRefOps(id string) (wasDeleted bool, path string, ops []txn.Op, err error) { + var doc resourceDoc + if err = rc.collection.FindId(id).One(&doc); err != nil { + return false, "", nil, err + } + if doc.RefCount == 1 { + return true, doc.Path, []txn.Op{{ + C: rc.collection.Name, + Id: doc.Id, + Assert: bson.D{{"refcount", 1}}, + Remove: true, + }}, nil + } + return false, doc.Path, []txn.Op{{ + C: rc.collection.Name, + Id: doc.Id, + Assert: bson.D{{"refcount", bson.D{{"$gt", 1}}}}, + Update: bson.D{{"$inc", bson.D{{"refcount", -1}}}}, + }}, nil +} === added file 'src/gopkg.in/juju/blobstore.v2/resourcecatalog_test.go' --- src/gopkg.in/juju/blobstore.v2/resourcecatalog_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/blobstore.v2/resourcecatalog_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,260 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package blobstore_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/txn" + txntesting "github.com/juju/txn/testing" + gc "gopkg.in/check.v1" + "gopkg.in/juju/blobstore.v2" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" +) + +var _ = gc.Suite(&resourceCatalogSuite{}) + +type resourceCatalogSuite struct { + testing.IsolationSuite + testing.MgoSuite + txnRunner txn.Runner + rCatalog blobstore.ResourceCatalog + collection *mgo.Collection +} + +func (s *resourceCatalogSuite) SetUpSuite(c *gc.C) { + s.IsolationSuite.SetUpSuite(c) + s.MgoSuite.SetUpSuite(c) +} + +func (s *resourceCatalogSuite) TearDownSuite(c *gc.C) { + s.MgoSuite.TearDownSuite(c) + s.IsolationSuite.TearDownSuite(c) +} + +func (s *resourceCatalogSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.MgoSuite.SetUpTest(c) + db := s.Session.DB("blobstore") + s.collection = db.C("storedResources") + s.rCatalog = blobstore.NewResourceCatalog(db) + + // For testing, we need to ensure there's a single txnRunner for all operations. + s.txnRunner = txn.NewRunner(txn.RunnerParams{Database: db}) + txnRunnerFunc := func(db *mgo.Database) txn.Runner { + return s.txnRunner + } + s.PatchValue(blobstore.TxnRunner, txnRunnerFunc) +} + +func (s *resourceCatalogSuite) TearDownTest(c *gc.C) { + s.MgoSuite.TearDownTest(c) + s.IsolationSuite.TearDownTest(c) +} + +func (s *resourceCatalogSuite) assertPut(c *gc.C, expectedNew bool, sha384Hash string) ( + id, path string, +) { + id, path, err := s.rCatalog.Put(sha384Hash, 200) + c.Assert(err, gc.IsNil) + c.Assert(id, gc.Not(gc.Equals), "") + c.Assert(path, gc.Equals, "") + s.assertGetPending(c, id) + return id, path +} + +func (s *resourceCatalogSuite) assertGetPending(c *gc.C, id string) { + r, err := s.rCatalog.Get(id) + c.Assert(err, gc.Equals, blobstore.ErrUploadPending) + c.Assert(r, gc.IsNil) +} + +func (s *resourceCatalogSuite) asserGetUploaded(c *gc.C, id string, hash string, length int64) { + r, err := s.rCatalog.Get(id) + c.Assert(err, gc.IsNil) + c.Assert(r.SHA384Hash, gc.DeepEquals, hash) + c.Assert(r.Length, gc.Equals, length) + c.Assert(r.Path, gc.Not(gc.Equals), "") +} + +type resourceDoc struct { + Id bson.ObjectId `bson:"_id"` + RefCount int64 +} + +func (s *resourceCatalogSuite) assertRefCount(c *gc.C, id string, expected int64) { + var doc resourceDoc + err := s.collection.FindId(id).One(&doc) + c.Assert(err, gc.IsNil) + c.Assert(doc.RefCount, gc.Equals, expected) +} + +func (s *resourceCatalogSuite) TestPut(c *gc.C) { + id, _ := s.assertPut(c, true, "sha384foo") + s.assertRefCount(c, id, 1) +} + +func (s *resourceCatalogSuite) TestPutLengthMismatch(c *gc.C) { + id, _ := s.assertPut(c, true, "sha384foo") + _, _, err := s.rCatalog.Put("sha384foo", 100) + c.Assert(err, gc.ErrorMatches, "length mismatch in resource document 200 != 100") + s.assertRefCount(c, id, 1) +} + +func (s *resourceCatalogSuite) TestPutSameHashesIncRefCount(c *gc.C) { + id, _ := s.assertPut(c, true, "sha384foo") + s.assertPut(c, false, "sha384foo") + s.assertRefCount(c, id, 2) +} + +func (s *resourceCatalogSuite) TestGetNonExistent(c *gc.C) { + _, err := s.rCatalog.Get(bson.NewObjectId().Hex()) + c.Assert(err, gc.ErrorMatches, `resource with id ".*" not found`) +} + +func (s *resourceCatalogSuite) TestGet(c *gc.C) { + id, path, err := s.rCatalog.Put("sha384foo", 100) + c.Assert(err, gc.IsNil) + c.Assert(path, gc.Equals, "") + s.assertGetPending(c, id) +} + +func (s *resourceCatalogSuite) TestFindNonExistent(c *gc.C) { + _, err := s.rCatalog.Find("sha384foo") + c.Assert(err, gc.ErrorMatches, `resource with sha384=.* not found`) +} + +func (s *resourceCatalogSuite) TestFind(c *gc.C) { + id, path, err := s.rCatalog.Put("sha384foo", 100) + c.Assert(err, gc.IsNil) + c.Assert(path, gc.Equals, "") + err = s.rCatalog.UploadComplete(id, "wherever") + c.Assert(err, gc.IsNil) + foundId, err := s.rCatalog.Find("sha384foo") + c.Assert(err, gc.IsNil) + c.Assert(foundId, gc.Equals, id) +} + +func (s *resourceCatalogSuite) TestUploadComplete(c *gc.C) { + id, _, err := s.rCatalog.Put("sha384foo", 100) + c.Assert(err, gc.IsNil) + s.assertGetPending(c, id) + err = s.rCatalog.UploadComplete(id, "wherever") + c.Assert(err, gc.IsNil) + s.asserGetUploaded(c, id, "sha384foo", 100) + // A second call yields an AlreadyExists error. + err = s.rCatalog.UploadComplete(id, "wherever") + c.Assert(err, jc.Satisfies, errors.IsAlreadyExists) + s.asserGetUploaded(c, id, "sha384foo", 100) +} + +func (s *resourceCatalogSuite) TestRemoveOnlyRecord(c *gc.C) { + id, path := s.assertPut(c, true, "sha384foo") + wasDeleted, removedPath, err := s.rCatalog.Remove(id) + c.Assert(err, gc.IsNil) + c.Assert(wasDeleted, jc.IsTrue) + c.Assert(removedPath, gc.Equals, path) + _, err = s.rCatalog.Get(id) + c.Assert(err, gc.ErrorMatches, `resource with id ".*" not found`) +} + +func (s *resourceCatalogSuite) TestRemoveDecRefCount(c *gc.C) { + id, _ := s.assertPut(c, true, "sha384foo") + s.assertPut(c, false, "sha384foo") + s.assertRefCount(c, id, 2) + wasDeleted, _, err := s.rCatalog.Remove(id) + c.Assert(err, gc.IsNil) + c.Assert(wasDeleted, jc.IsFalse) + s.assertRefCount(c, id, 1) + s.assertGetPending(c, id) +} + +func (s *resourceCatalogSuite) TestRemoveLastCopy(c *gc.C) { + id, _ := s.assertPut(c, true, "sha384foo") + s.assertPut(c, false, "sha384foo") + s.assertRefCount(c, id, 2) + _, _, err := s.rCatalog.Remove(id) + c.Assert(err, gc.IsNil) + s.assertRefCount(c, id, 1) + _, _, err = s.rCatalog.Remove(id) + c.Assert(err, gc.IsNil) + _, err = s.rCatalog.Get(id) + c.Assert(err, gc.ErrorMatches, `resource with id ".*" not found`) +} + +func (s *resourceCatalogSuite) TestRemoveNonExistent(c *gc.C) { + _, _, err := s.rCatalog.Remove(bson.NewObjectId().Hex()) + c.Assert(err, gc.ErrorMatches, `resource with id ".*" not found`) +} + +func (s *resourceCatalogSuite) TestPutNewResourceRace(c *gc.C) { + var firstId string + beforeFuncs := []func(){ + func() { firstId, _ = s.assertPut(c, true, "sha384foo") }, + } + defer txntesting.SetBeforeHooks(c, s.txnRunner, beforeFuncs...).Check() + id, _, err := s.rCatalog.Put("sha384foo", 200) + c.Assert(err, gc.IsNil) + c.Assert(id, gc.Equals, firstId) + err = s.rCatalog.UploadComplete(id, "wherever") + c.Assert(err, gc.IsNil) + r, err := s.rCatalog.Get(id) + c.Assert(err, gc.IsNil) + s.assertRefCount(c, id, 2) + c.Assert(r.SHA384Hash, gc.Equals, "sha384foo") + c.Assert(int(r.Length), gc.Equals, 200) +} + +func (s *resourceCatalogSuite) TestPutDeletedResourceRace(c *gc.C) { + firstId, _ := s.assertPut(c, true, "sha384foo") + err := s.rCatalog.UploadComplete(firstId, "wherever") + c.Assert(err, gc.IsNil) + beforeFuncs := []func(){ + func() { + _, _, err := s.rCatalog.Remove(firstId) + c.Assert(err, gc.IsNil) + }, + } + defer txntesting.SetBeforeHooks(c, s.txnRunner, beforeFuncs...).Check() + id, _, err := s.rCatalog.Put("sha384foo", 200) + c.Assert(err, gc.IsNil) + c.Assert(firstId, gc.Equals, id) + err = s.rCatalog.UploadComplete(id, "wherever") + c.Assert(err, gc.IsNil) + r, err := s.rCatalog.Get(id) + c.Assert(err, gc.IsNil) + s.assertRefCount(c, id, 1) + c.Assert(r.SHA384Hash, gc.Equals, "sha384foo") + c.Assert(r.Length, gc.Equals, int64(200)) +} + +func (s *resourceCatalogSuite) TestDeleteResourceRace(c *gc.C) { + id, _ := s.assertPut(c, true, "sha384foo") + s.assertPut(c, false, "sha384foo") + beforeFuncs := []func(){ + func() { + _, _, err := s.rCatalog.Remove(id) + c.Assert(err, gc.IsNil) + }, + } + defer txntesting.SetBeforeHooks(c, s.txnRunner, beforeFuncs...).Check() + _, _, err := s.rCatalog.Remove(id) + c.Assert(err, gc.IsNil) + _, err = s.rCatalog.Get(id) + c.Assert(err, gc.ErrorMatches, `resource with id ".*" not found`) +} + +func (s *resourceCatalogSuite) TestUploadCompleteDeleted(c *gc.C) { + id, _, err := s.rCatalog.Put("sha384foo", 100) + c.Assert(err, gc.IsNil) + remove := func() { + _, _, err := s.rCatalog.Remove(id) + c.Assert(err, gc.IsNil) + } + defer txntesting.SetBeforeHooks(c, s.txnRunner, remove).Check() + err = s.rCatalog.UploadComplete(id, "wherever") + c.Assert(err, jc.Satisfies, errors.IsNotFound) +} === removed directory 'src/gopkg.in/juju/charm.v5' === removed file 'src/gopkg.in/juju/charm.v5/.gitignore' --- src/gopkg.in/juju/charm.v5/.gitignore 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/.gitignore 1970-01-01 00:00:00 +0000 @@ -1,5 +0,0 @@ -tags -TAGS -.emacs* -*.test -*.sw[nop] === removed file 'src/gopkg.in/juju/charm.v5/HACKING.md' --- src/gopkg.in/juju/charm.v5/HACKING.md 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/HACKING.md 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -# HACKING - -See README for information about gopkg.in - -## Developing - -If you are to develop on a versioned branch, use gopkg.in. - - go get -u -v -t gopkg.in/juju/charm.v2/... - -gopkg.in names the local branch master. To submit a pull request, push to -your github branch using a refspec which reflects the version tag you are using. - - git push git@github.com:jrwren/charm +master:v2 === removed file 'src/gopkg.in/juju/charm.v5/LICENCE' --- src/gopkg.in/juju/charm.v5/LICENCE 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/LICENCE 1970-01-01 00:00:00 +0000 @@ -1,191 +0,0 @@ -All files in this repository are licensed as follows. If you contribute -to this repository, it is assumed that you license your contribution -under the same license unless you state otherwise. - -All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. - -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. === removed file 'src/gopkg.in/juju/charm.v5/README.md' --- src/gopkg.in/juju/charm.v5/README.md 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/README.md 1970-01-01 00:00:00 +0000 @@ -1,11 +0,0 @@ -Juju charms -=========== - -This package parses juju charms. - -## Versions - -Stable versions of this API are available on gopkg.in at -gopkg.in/juju/charm.vD where D is a version spec. If you are viewing this -readme on github.com you can click the 'branch:' button above to view tags -and branches. See http://labix.org/gopkg.in for more information. === removed file 'src/gopkg.in/juju/charm.v5/actions.go' --- src/gopkg.in/juju/charm.v5/actions.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/actions.go 1970-01-01 00:00:00 +0000 @@ -1,279 +0,0 @@ -// Copyright 2011-2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm - -import ( - "fmt" - "io" - "io/ioutil" - "regexp" - "strings" - - "github.com/juju/errors" - gjs "github.com/juju/gojsonschema" - "gopkg.in/yaml.v1" -) - -var prohibitedSchemaKeys = map[string]bool{"$ref": true, "$schema": true} - -var actionNameRule = regexp.MustCompile("^[a-z](?:[a-z-]*[a-z])?$") - -// Actions defines the available actions for the charm. Additional params -// may be added as metadata at a future time (e.g. version.) -type Actions struct { - ActionSpecs map[string]ActionSpec `yaml:"actions,omitempty" bson:",omitempty"` -} - -// Build this out further if it becomes necessary. -func NewActions() *Actions { - return &Actions{} -} - -// ActionSpec is a definition of the parameters and traits of an Action. -// The Params map is expected to conform to JSON-Schema Draft 4 as defined at -// http://json-schema.org/draft-04/schema# (see http://json-schema.org/latest/json-schema-core.html) -type ActionSpec struct { - Description string - Params map[string]interface{} -} - -// ValidateParams validates the passed params map against the given ActionSpec -// and returns any error encountered. -// Usage: -// err := ch.Actions().ActionSpecs["snapshot"].ValidateParams(someMap) -func (spec *ActionSpec) ValidateParams(params map[string]interface{}) error { - // Load the schema from the Charm. - specLoader := gjs.NewGoLoader(spec.Params) - schema, err := gjs.NewSchema(specLoader) - if err != nil { - return err - } - - // Load the params as a document to validate. - // If an empty map was passed, we need an empty map to validate against. - p := map[string]interface{}{} - if len(params) > 0 { - p = params - } - docLoader := gjs.NewGoLoader(p) - results, err := schema.Validate(docLoader) - if err != nil { - return err - } - if results.Valid() { - return nil - } - - // Handle any errors generated by the Validate(). - var errorStrings []string - for _, validationError := range results.Errors() { - errorStrings = append(errorStrings, validationError.String()) - } - return errors.Errorf("validation failed: %s", strings.Join(errorStrings, "; ")) -} - -// InsertDefaults inserts the schema's default values in target using -// github.com/juju/gojsonschema. If a nil target is received, an empty map -// will be created as the target. The target is then mutated to include the -// defaults. -// -// The returned map will be the transformed or created target map. -func (spec *ActionSpec) InsertDefaults(target map[string]interface{}) (map[string]interface{}, error) { - specLoader := gjs.NewGoLoader(spec.Params) - schema, err := gjs.NewSchema(specLoader) - if err != nil { - return target, err - } - - return schema.InsertDefaults(target) -} - -// ReadActions builds an Actions spec from a charm's actions.yaml. -func ReadActionsYaml(r io.Reader) (*Actions, error) { - data, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - result := &Actions{ - ActionSpecs: map[string]ActionSpec{}, - } - - var unmarshaledActions map[string]map[string]interface{} - if err := yaml.Unmarshal(data, &unmarshaledActions); err != nil { - return nil, err - } - - for name, actionSpec := range unmarshaledActions { - if valid := actionNameRule.MatchString(name); !valid { - return nil, fmt.Errorf("bad action name %s", name) - } - - desc := "No description" - thisActionSchema := map[string]interface{}{ - "description": desc, - "type": "object", - "title": name, - "properties": map[string]interface{}{}, - } - - for key, value := range actionSpec { - switch key { - case "description": - // These fields must be strings. - typed, ok := value.(string) - if !ok { - return nil, errors.Errorf("value for schema key %q must be a string", key) - } - thisActionSchema[key] = typed - desc = typed - case "title": - // These fields must be strings. - typed, ok := value.(string) - if !ok { - return nil, errors.Errorf("value for schema key %q must be a string", key) - } - thisActionSchema[key] = typed - case "required": - typed, ok := value.([]interface{}) - if !ok { - return nil, errors.Errorf("value for schema key %q must be a YAML list", key) - } - thisActionSchema[key] = typed - case "params": - // Clean any map[interface{}]interface{}s out so they don't - // cause problems with BSON serialization later. - cleansedParams, err := cleanse(value) - if err != nil { - return nil, err - } - - // JSON-Schema must be a map - typed, ok := cleansedParams.(map[string]interface{}) - if !ok { - return nil, errors.New("params failed to parse as a map") - } - thisActionSchema["properties"] = typed - default: - // In case this has nested maps, we must clean them out. - typed, err := cleanse(value) - if err != nil { - return nil, err - } - thisActionSchema[key] = typed - } - } - - // Make sure the new Params doc conforms to JSON-Schema - // Draft 4 (http://json-schema.org/latest/json-schema-core.html) - schemaLoader := gjs.NewGoLoader(thisActionSchema) - _, err := gjs.NewSchema(schemaLoader) - if err != nil { - return nil, errors.Annotatef(err, "invalid params schema for action schema %s", name) - } - - // Now assign the resulting schema to the final entry for the result. - result.ActionSpecs[name] = ActionSpec{ - Description: desc, - Params: thisActionSchema, - } - } - return result, nil -} - -// cleanse rejects schemas containing references or maps keyed with non- -// strings, and coerces acceptable maps to contain only maps with string keys. -func cleanse(input interface{}) (interface{}, error) { - switch typedInput := input.(type) { - - // In this case, recurse in. - case map[string]interface{}: - newMap := make(map[string]interface{}) - for key, value := range typedInput { - - if prohibitedSchemaKeys[key] { - return nil, fmt.Errorf("schema key %q not compatible with this version of juju", key) - } - - newValue, err := cleanse(value) - if err != nil { - return nil, err - } - newMap[key] = newValue - } - return newMap, nil - - // Coerce keys to strings and error out if there's a problem; then recurse. - case map[interface{}]interface{}: - newMap := make(map[string]interface{}) - for key, value := range typedInput { - typedKey, ok := key.(string) - if !ok { - return nil, errors.New("map keyed with non-string value") - } - newMap[typedKey] = value - } - return cleanse(newMap) - - // Recurse - case []interface{}: - newSlice := make([]interface{}, 0) - for _, sliceValue := range typedInput { - newSliceValue, err := cleanse(sliceValue) - if err != nil { - return nil, errors.New("map keyed with non-string value") - } - newSlice = append(newSlice, newSliceValue) - } - return newSlice, nil - - // Other kinds of values are OK. - default: - return input, nil - } -} - -// recurseMapOnKeys returns the value of a map keyed recursively by the -// strings given in "keys". Thus, recurseMapOnKeys({a,b}, {a:{b:{c:d}}}) -// would return {c:d}. -func recurseMapOnKeys(keys []string, params map[string]interface{}) (interface{}, bool) { - key, rest := keys[0], keys[1:] - answer, ok := params[key] - - // If we're out of keys, we have our answer. - if len(rest) == 0 { - return answer, ok - } - - // If we're not out of keys, but we tried a key that wasn't in the - // map, there's no answer. - if !ok { - return nil, false - } - - switch typed := answer.(type) { - // If our value is a map[s]i{}, we can keep recursing. - case map[string]interface{}: - return recurseMapOnKeys(keys[1:], typed) - // If it's a map[i{}]i{}, we need to check whether it's a map[s]i{}. - case map[interface{}]interface{}: - m := make(map[string]interface{}) - for k, v := range typed { - if tK, ok := k.(string); ok { - m[tK] = v - } else { - // If it's not, we don't have something we - // can work with. - return nil, false - } - } - // If it is, recurse into it. - return recurseMapOnKeys(keys[1:], m) - - // Otherwise, we're trying to recurse into something we don't know - // how to deal with, so our answer is that we don't have an answer. - default: - return nil, false - } -} === removed file 'src/gopkg.in/juju/charm.v5/actions_test.go' --- src/gopkg.in/juju/charm.v5/actions_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/actions_test.go 1970-01-01 00:00:00 +0000 @@ -1,864 +0,0 @@ -// Copyright 2011-2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm - -import ( - "bytes" - "encoding/json" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" -) - -type ActionsSuite struct{} - -var _ = gc.Suite(&ActionsSuite{}) - -func (s *ActionsSuite) TestNewActions(c *gc.C) { - emptyAction := NewActions() - c.Assert(emptyAction, jc.DeepEquals, &Actions{}) -} - -func (s *ActionsSuite) TestValidateOk(c *gc.C) { - for i, test := range []struct { - description string - actionSpec *ActionSpec - objectToValidate map[string]interface{} - }{{ - description: "Validation of an empty object is ok.", - actionSpec: &ActionSpec{ - Description: "Take a snapshot of the database.", - Params: map[string]interface{}{ - "title": "snapshot", - "description": "Take a snapshot of the database.", - "type": "object", - "properties": map[string]interface{}{ - "outfile": map[string]interface{}{ - "description": "The file to write out to.", - "type": "string"}}}}, - objectToValidate: nil, - }, { - description: "Validation of one required value.", - actionSpec: &ActionSpec{ - Description: "Take a snapshot of the database.", - Params: map[string]interface{}{ - "title": "snapshot", - "description": "Take a snapshot of the database.", - "type": "object", - "properties": map[string]interface{}{ - "outfile": map[string]interface{}{ - "description": "The file to write out to.", - "type": "string"}}, - "required": []interface{}{"outfile"}}}, - objectToValidate: map[string]interface{}{ - "outfile": "out-2014-06-12.bz2", - }, - }, { - description: "Validation of one required and one optional value.", - actionSpec: &ActionSpec{ - Description: "Take a snapshot of the database.", - Params: map[string]interface{}{ - "title": "snapshot", - "description": "Take a snapshot of the database.", - "type": "object", - "properties": map[string]interface{}{ - "outfile": map[string]interface{}{ - "description": "The file to write out to.", - "type": "string"}, - "quality": map[string]interface{}{ - "description": "Compression quality", - "type": "integer", - "minimum": 0, - "maximum": 9}}, - "required": []interface{}{"outfile"}}}, - objectToValidate: map[string]interface{}{ - "outfile": "out-2014-06-12.bz2", - }, - }, { - description: "Validation of an optional, range limited value.", - actionSpec: &ActionSpec{ - Description: "Take a snapshot of the database.", - Params: map[string]interface{}{ - "title": "snapshot", - "description": "Take a snapshot of the database.", - "type": "object", - "properties": map[string]interface{}{ - "outfile": map[string]interface{}{ - "description": "The file to write out to.", - "type": "string"}, - "quality": map[string]interface{}{ - "description": "Compression quality", - "type": "integer", - "minimum": 0, - "maximum": 9}}, - "required": []interface{}{"outfile"}}}, - objectToValidate: map[string]interface{}{ - "outfile": "out-2014-06-12.bz2", - "quality": 5, - }, - }} { - c.Logf("test %d: %s", i, test.description) - err := test.actionSpec.ValidateParams(test.objectToValidate) - c.Assert(err, jc.ErrorIsNil) - } -} - -func (s *ActionsSuite) TestValidateFail(c *gc.C) { - var validActionTests = []struct { - description string - actionSpec *ActionSpec - badActionJson string - expectedError string - }{{ - description: "Validation of one required value.", - actionSpec: &ActionSpec{ - Description: "Take a snapshot of the database.", - Params: map[string]interface{}{ - "title": "snapshot", - "description": "Take a snapshot of the database.", - "type": "object", - "properties": map[string]interface{}{ - "outfile": map[string]interface{}{ - "description": "The file to write out to.", - "type": "string"}}, - "required": []interface{}{"outfile"}}}, - badActionJson: `{"outfile": 5}`, - expectedError: "validation failed: (root).outfile : must be of type string, given 5", - }, { - description: "Restrict to only one property", - actionSpec: &ActionSpec{ - Description: "Take a snapshot of the database.", - Params: map[string]interface{}{ - "title": "snapshot", - "description": "Take a snapshot of the database.", - "type": "object", - "properties": map[string]interface{}{ - "outfile": map[string]interface{}{ - "description": "The file to write out to.", - "type": "string"}}, - "required": []interface{}{"outfile"}, - "additionalProperties": false}}, - badActionJson: `{"outfile": "foo.bz", "bar": "foo"}`, - expectedError: "validation failed: (root) : additional property \"bar\" is not allowed, given {\"bar\":\"foo\",\"outfile\":\"foo.bz\"}", - }, { - description: "Validation of one required and one optional value.", - actionSpec: &ActionSpec{ - Description: "Take a snapshot of the database.", - Params: map[string]interface{}{ - "title": "snapshot", - "description": "Take a snapshot of the database.", - "type": "object", - "properties": map[string]interface{}{ - "outfile": map[string]interface{}{ - "description": "The file to write out to.", - "type": "string"}, - "quality": map[string]interface{}{ - "description": "Compression quality", - "type": "integer", - "minimum": 0, - "maximum": 9}}, - "required": []interface{}{"outfile"}}}, - badActionJson: `{"quality": 5}`, - expectedError: "validation failed: (root) : \"outfile\" property is missing and required, given {\"quality\":5}", - }, { - description: "Validation of an optional, range limited value.", - actionSpec: &ActionSpec{ - Description: "Take a snapshot of the database.", - Params: map[string]interface{}{ - "title": "snapshot", - "description": "Take a snapshot of the database.", - "type": "object", - "properties": map[string]interface{}{ - "outfile": map[string]interface{}{ - "description": "The file to write out to.", - "type": "string"}, - "quality": map[string]interface{}{ - "description": "Compression quality", - "type": "integer", - "minimum": 0, - "maximum": 9}}, - "required": []interface{}{"outfile"}}}, - badActionJson: ` -{ "outfile": "out-2014-06-12.bz2", "quality": "two" }`, - expectedError: "validation failed: (root).quality : must be of type integer, given \"two\"", - }} - - for i, test := range validActionTests { - c.Logf("test %d: %s", i, test.description) - var params map[string]interface{} - jsonBytes := []byte(test.badActionJson) - err := json.Unmarshal(jsonBytes, ¶ms) - c.Assert(err, gc.IsNil) - err = test.actionSpec.ValidateParams(params) - c.Assert(err.Error(), gc.Equals, test.expectedError) - } -} - -func (s *ActionsSuite) TestCleanseOk(c *gc.C) { - - var goodInterfaceTests = []struct { - description string - acceptableInterface map[string]interface{} - expectedInterface map[string]interface{} - }{{ - description: "An interface requiring no changes.", - acceptableInterface: map[string]interface{}{ - "key1": "value1", - "key2": "value2", - "key3": map[string]interface{}{ - "foo1": "val1", - "foo2": "val2"}}, - expectedInterface: map[string]interface{}{ - "key1": "value1", - "key2": "value2", - "key3": map[string]interface{}{ - "foo1": "val1", - "foo2": "val2"}}, - }, { - description: "Substitute a single inner map[i]i.", - acceptableInterface: map[string]interface{}{ - "key1": "value1", - "key2": "value2", - "key3": map[interface{}]interface{}{ - "foo1": "val1", - "foo2": "val2"}}, - expectedInterface: map[string]interface{}{ - "key1": "value1", - "key2": "value2", - "key3": map[string]interface{}{ - "foo1": "val1", - "foo2": "val2"}}, - }, { - description: "Substitute nested inner map[i]i.", - acceptableInterface: map[string]interface{}{ - "key1a": "val1a", - "key2a": "val2a", - "key3a": map[interface{}]interface{}{ - "key1b": "val1b", - "key2b": map[interface{}]interface{}{ - "key1c": "val1c"}}}, - expectedInterface: map[string]interface{}{ - "key1a": "val1a", - "key2a": "val2a", - "key3a": map[string]interface{}{ - "key1b": "val1b", - "key2b": map[string]interface{}{ - "key1c": "val1c"}}}, - }, { - description: "Substitute nested map[i]i within []i.", - acceptableInterface: map[string]interface{}{ - "key1a": "val1a", - "key2a": []interface{}{5, "foo", map[string]interface{}{ - "key1b": "val1b", - "key2b": map[interface{}]interface{}{ - "key1c": "val1c"}}}}, - expectedInterface: map[string]interface{}{ - "key1a": "val1a", - "key2a": []interface{}{5, "foo", map[string]interface{}{ - "key1b": "val1b", - "key2b": map[string]interface{}{ - "key1c": "val1c"}}}}, - }} - - for i, test := range goodInterfaceTests { - c.Logf("test %d: %s", i, test.description) - cleansedInterfaceMap, err := cleanse(test.acceptableInterface) - c.Assert(err, gc.IsNil) - c.Assert(cleansedInterfaceMap, jc.DeepEquals, test.expectedInterface) - } -} - -func (s *ActionsSuite) TestCleanseFail(c *gc.C) { - - var badInterfaceTests = []struct { - description string - failInterface map[string]interface{} - expectedError string - }{{ - description: "An inner map[interface{}]interface{} with an int key.", - failInterface: map[string]interface{}{ - "key1": "value1", - "key2": "value2", - "key3": map[interface{}]interface{}{ - "foo1": "val1", - 5: "val2"}}, - expectedError: "map keyed with non-string value", - }, { - description: "An inner []interface{} containing a map[i]i with an int key.", - failInterface: map[string]interface{}{ - "key1a": "val1b", - "key2a": "val2b", - "key3a": []interface{}{"foo1", 5, map[interface{}]interface{}{ - "key1b": "val1b", - "key2b": map[interface{}]interface{}{ - "key1c": "val1c", - 5: "val2c"}}}}, - expectedError: "map keyed with non-string value", - }} - - for i, test := range badInterfaceTests { - c.Logf("test %d: %s", i, test.description) - _, err := cleanse(test.failInterface) - c.Assert(err, gc.NotNil) - c.Assert(err.Error(), gc.Equals, test.expectedError) - } -} - -func (s *ActionsSuite) TestReadGoodActionsYaml(c *gc.C) { - var goodActionsYamlTests = []struct { - description string - yaml string - expectedActions *Actions - }{{ - description: "A simple snapshot actions YAML with one parameter.", - yaml: ` -snapshot: - description: Take a snapshot of the database. - params: - outfile: - description: "The file to write out to." - type: string - required: ["outfile"] -`, - expectedActions: &Actions{map[string]ActionSpec{ - "snapshot": ActionSpec{ - Description: "Take a snapshot of the database.", - Params: map[string]interface{}{ - "title": "snapshot", - "description": "Take a snapshot of the database.", - "type": "object", - "properties": map[string]interface{}{ - "outfile": map[string]interface{}{ - "description": "The file to write out to.", - "type": "string"}}, - "required": []interface{}{"outfile"}}}}}, - }, { - description: "An empty Actions definition.", - yaml: "", - expectedActions: &Actions{ - ActionSpecs: map[string]ActionSpec{}, - }, - }, { - description: "A more complex schema with hyphenated names and multiple parameters.", - yaml: ` -snapshot: - description: "Take a snapshot of the database." - params: - outfile: - description: "The file to write out to." - type: "string" - compression-quality: - description: "The compression quality." - type: "integer" - minimum: 0 - maximum: 9 - exclusiveMaximum: false -remote-sync: - description: "Sync a file to a remote host." - params: - file: - description: "The file to send out." - type: "string" - format: "uri" - remote-uri: - description: "The host to sync to." - type: "string" - format: "uri" - util: - description: "The util to perform the sync (rsync or scp.)" - type: "string" - enum: ["rsync", "scp"] - required: ["file", "remote-uri"] -`, - expectedActions: &Actions{map[string]ActionSpec{ - "snapshot": ActionSpec{ - Description: "Take a snapshot of the database.", - Params: map[string]interface{}{ - "title": "snapshot", - "description": "Take a snapshot of the database.", - "type": "object", - "properties": map[string]interface{}{ - "outfile": map[string]interface{}{ - "description": "The file to write out to.", - "type": "string"}, - "compression-quality": map[string]interface{}{ - "description": "The compression quality.", - "type": "integer", - "minimum": 0, - "maximum": 9, - "exclusiveMaximum": false}}}}, - "remote-sync": ActionSpec{ - Description: "Sync a file to a remote host.", - Params: map[string]interface{}{ - "title": "remote-sync", - "description": "Sync a file to a remote host.", - "type": "object", - "properties": map[string]interface{}{ - "file": map[string]interface{}{ - "description": "The file to send out.", - "type": "string", - "format": "uri"}, - "remote-uri": map[string]interface{}{ - "description": "The host to sync to.", - "type": "string", - "format": "uri"}, - "util": map[string]interface{}{ - "description": "The util to perform the sync (rsync or scp.)", - "type": "string", - "enum": []interface{}{"rsync", "scp"}}}, - "required": []interface{}{"file", "remote-uri"}}}}}, - }, { - description: "A schema with other keys, e.g. \"definitions\"", - yaml: ` -snapshot: - description: "Take a snapshot of the database." - params: - outfile: - description: "The file to write out to." - type: "string" - compression-quality: - description: "The compression quality." - type: "integer" - minimum: 0 - maximum: 9 - exclusiveMaximum: false - definitions: - diskdevice: {} - something-else: {} -`, - expectedActions: &Actions{map[string]ActionSpec{ - "snapshot": ActionSpec{ - Description: "Take a snapshot of the database.", - Params: map[string]interface{}{ - "title": "snapshot", - "description": "Take a snapshot of the database.", - "type": "object", - "properties": map[string]interface{}{ - "outfile": map[string]interface{}{ - "description": "The file to write out to.", - "type": "string", - }, - "compression-quality": map[string]interface{}{ - "description": "The compression quality.", - "type": "integer", - "minimum": 0, - "maximum": 9, - "exclusiveMaximum": false, - }, - }, - "definitions": map[string]interface{}{ - "diskdevice": map[string]interface{}{}, - "something-else": map[string]interface{}{}, - }, - }, - }, - }}, - }, { - description: "A schema with no \"params\" key, implying no options.", - yaml: ` -snapshot: - description: Take a snapshot of the database. -`, - - expectedActions: &Actions{map[string]ActionSpec{ - "snapshot": ActionSpec{ - Description: "Take a snapshot of the database.", - Params: map[string]interface{}{ - "description": "Take a snapshot of the database.", - "title": "snapshot", - "type": "object", - "properties": map[string]interface{}{}, - }}}}, - }, { - description: "A schema with no values at all, implying no options.", - yaml: ` -snapshot: -`, - - expectedActions: &Actions{map[string]ActionSpec{ - "snapshot": ActionSpec{ - Description: "No description", - Params: map[string]interface{}{ - "description": "No description", - "title": "snapshot", - "type": "object", - "properties": map[string]interface{}{}, - }}}}, - }} - - // Beginning of testing loop - for i, test := range goodActionsYamlTests { - c.Logf("test %d: %s", i, test.description) - reader := bytes.NewReader([]byte(test.yaml)) - loadedAction, err := ReadActionsYaml(reader) - c.Assert(err, gc.IsNil) - c.Check(loadedAction, jc.DeepEquals, test.expectedActions) - } -} - -func (s *ActionsSuite) TestReadBadActionsYaml(c *gc.C) { - - var badActionsYamlTests = []struct { - description string - yaml string - expectedError string - }{{ - description: "Reject JSON-Schema containing references.", - yaml: ` -snapshot: - description: Take a snapshot of the database. - params: - $schema: "http://json-schema.org/draft-03/schema#" -`, - expectedError: "schema key \"$schema\" not compatible with this version of juju", - }, { - description: "Reject JSON-Schema containing references.", - yaml: ` -snapshot: - description: Take a snapshot of the database. - params: - outfile: { $ref: "http://json-schema.org/draft-03/schema#" } -`, - expectedError: "schema key \"$ref\" not compatible with this version of juju", - }, { - description: "Malformed YAML: missing key in \"outfile\".", - yaml: ` -snapshot: - description: Take a snapshot of the database. - params: - outfile: - The file to write out to. - type: string - default: foo.bz2 -`, - - expectedError: "YAML error: line 6: mapping values are not allowed in this context", - }, { - description: "Malformed JSON-Schema: $schema element misplaced.", - yaml: ` -snapshot: -description: Take a snapshot of the database. - params: - outfile: - $schema: http://json-schema.org/draft-03/schema# - description: The file to write out to. - type: string - default: foo.bz2 -`, - - expectedError: "YAML error: line 3: mapping values are not allowed in this context", - }, { - description: "Malformed Actions: hyphen at beginning of action name.", - yaml: ` --snapshot: - description: Take a snapshot of the database. -`, - - expectedError: "bad action name -snapshot", - }, { - description: "Malformed Actions: hyphen after action name.", - yaml: ` -snapshot-: - description: Take a snapshot of the database. -`, - - expectedError: "bad action name snapshot-", - }, { - description: "Malformed Actions: caps in action name.", - yaml: ` -Snapshot: - description: Take a snapshot of the database. -`, - - expectedError: "bad action name Snapshot", - }, { - description: "A non-string description fails to parse", - yaml: ` -snapshot: - description: ["Take a snapshot of the database."] -`, - expectedError: "value for schema key \"description\" must be a string", - }, { - description: "A non-list \"required\" key", - yaml: ` -snapshot: - description: Take a snapshot of the database. - params: - outfile: - description: "The file to write out to." - type: string - required: "outfile" -`, - expectedError: "value for schema key \"required\" must be a YAML list", - }, { - description: "A schema with an empty \"params\" key fails to parse", - yaml: ` -snapshot: - description: Take a snapshot of the database. - params: -`, - expectedError: "params failed to parse as a map", - }, { - description: "A schema with a non-map \"params\" value fails to parse", - yaml: ` -snapshot: - description: Take a snapshot of the database. - params: ["a", "b"] -`, - expectedError: "params failed to parse as a map", - }, { - description: "\"definitions\" goes against JSON-Schema definition", - yaml: ` -snapshot: - description: "Take a snapshot of the database." - params: - outfile: - description: "The file to write out to." - type: "string" - definitions: - diskdevice: ["a"] - something-else: {"a": "b"} -`, - expectedError: "invalid params schema for action schema snapshot: definitions must be of type array of schemas", - }, { - description: "excess keys not in the JSON-Schema spec will be rejected", - yaml: ` -snapshot: - description: "Take a snapshot of the database." - params: - outfile: - description: "The file to write out to." - type: "string" - compression-quality: - description: "The compression quality." - type: "integer" - minimum: 0 - maximum: 9 - exclusiveMaximum: false - definitions: - diskdevice: {} - something-else: {} - other-key: ["some", "values"], -`, - expectedError: "YAML error: line 16: did not find expected key", - }} - - for i, test := range badActionsYamlTests { - c.Logf("test %d: %s", i, test.description) - reader := bytes.NewReader([]byte(test.yaml)) - _, err := ReadActionsYaml(reader) - c.Assert(err, gc.NotNil) - c.Check(err.Error(), gc.Equals, test.expectedError) - } -} - -func (s *ActionsSuite) TestRecurseMapOnKeys(c *gc.C) { - tests := []struct { - should string - givenKeys []string - givenMap map[string]interface{} - expected interface{} - shouldFail bool - }{{ - should: "fail if the specified key was not in the map", - givenKeys: []string{"key", "key2"}, - givenMap: map[string]interface{}{ - "key": map[string]interface{}{ - "key": "value", - }, - }, - shouldFail: true, - }, { - should: "fail if a key was not a string", - givenKeys: []string{"key", "key2"}, - givenMap: map[string]interface{}{ - "key": map[interface{}]interface{}{ - 5: "value", - }, - }, - shouldFail: true, - }, { - should: "fail if we have more keys but not a recursable val", - givenKeys: []string{"key", "key2"}, - givenMap: map[string]interface{}{ - "key": []string{"a", "b", "c"}, - }, - shouldFail: true, - }, { - should: "retrieve a good value", - givenKeys: []string{"key", "key2"}, - givenMap: map[string]interface{}{ - "key": map[string]interface{}{ - "key2": "value", - }, - }, - expected: "value", - }, { - should: "retrieve a map", - givenKeys: []string{"key"}, - givenMap: map[string]interface{}{ - "key": map[string]interface{}{ - "key": "value", - }, - }, - expected: map[string]interface{}{ - "key": "value", - }, - }, { - should: "retrieve a slice", - givenKeys: []string{"key"}, - givenMap: map[string]interface{}{ - "key": []string{"a", "b", "c"}, - }, - expected: []string{"a", "b", "c"}, - }} - - for i, t := range tests { - c.Logf("test %d: should %s\n map: %#v\n keys: %#v", i, t.should, t.givenMap, t.givenKeys) - obtained, failed := recurseMapOnKeys(t.givenKeys, t.givenMap) - c.Assert(!failed, gc.Equals, t.shouldFail) - if !t.shouldFail { - c.Check(obtained, jc.DeepEquals, t.expected) - } - } -} - -func (s *ActionsSuite) TestInsertDefaultValues(c *gc.C) { - schemas := map[string]string{ - "simple": ` -act: - params: - val: - type: string - default: somestr -`[1:], - "complicated": ` -act: - params: - val: - type: object - properties: - foo: - type: string - bar: - type: object - properties: - baz: - type: string - default: boz -`[1:], - "default-object": ` -act: - params: - val: - type: object - default: - foo: bar - bar: - baz: woz -`[1:], - "none": ` -act: - params: - val: - type: object - properties: - var: - type: object - properties: - x: - type: string -`[1:]} - - for i, t := range []struct { - should string - schema string - withParams map[string]interface{} - expectedResult map[string]interface{} - expectedError string - }{{ - should: "error with no schema", - expectedError: "schema must be of type object", - }, { - should: "create a map if handed nil", - schema: schemas["none"], - withParams: nil, - expectedResult: map[string]interface{}{}, - }, { - should: "create and fill target if handed nil", - schema: schemas["simple"], - withParams: nil, - expectedResult: map[string]interface{}{"val": "somestr"}, - }, { - should: "create a simple default value", - schema: schemas["simple"], - withParams: map[string]interface{}{}, - expectedResult: map[string]interface{}{"val": "somestr"}, - }, { - should: "do nothing for no default value", - schema: schemas["none"], - withParams: map[string]interface{}{}, - expectedResult: map[string]interface{}{}, - }, { - should: "insert a default value within a nested map", - schema: schemas["complicated"], - withParams: map[string]interface{}{}, - expectedResult: map[string]interface{}{ - "val": map[string]interface{}{ - "bar": map[string]interface{}{ - "baz": "boz", - }}}, - }, { - should: "create a default value which is an object", - schema: schemas["default-object"], - withParams: map[string]interface{}{}, - expectedResult: map[string]interface{}{ - "val": map[string]interface{}{ - "foo": "bar", - "bar": map[string]interface{}{ - "baz": "woz", - }}}, - }, { - should: "not overwrite existing values with default objects", - schema: schemas["default-object"], - withParams: map[string]interface{}{"val": 5}, - expectedResult: map[string]interface{}{"val": 5}, - }, { - should: "interleave defaults into existing objects", - schema: schemas["complicated"], - withParams: map[string]interface{}{ - "val": map[string]interface{}{ - "foo": "bar", - "bar": map[string]interface{}{ - "faz": "foz", - }}}, - expectedResult: map[string]interface{}{ - "val": map[string]interface{}{ - "foo": "bar", - "bar": map[string]interface{}{ - "baz": "boz", - "faz": "foz", - }}}, - }} { - c.Logf("test %d: should %s", i, t.should) - schema := getSchemaForAction(c, t.schema) - // Testing this method - result, err := schema.InsertDefaults(t.withParams) - if t.expectedError != "" { - c.Check(err, gc.ErrorMatches, t.expectedError) - continue - } - c.Assert(err, jc.ErrorIsNil) - c.Check(result, jc.DeepEquals, t.expectedResult) - } -} - -func getSchemaForAction(c *gc.C, wholeSchema string) ActionSpec { - // Load up the YAML schema definition. - reader := bytes.NewReader([]byte(wholeSchema)) - loadedActions, err := ReadActionsYaml(reader) - c.Assert(err, gc.IsNil) - // Same action name for all tests, "act". - return loadedActions.ActionSpecs["act"] -} === removed file 'src/gopkg.in/juju/charm.v5/bundle.go' --- src/gopkg.in/juju/charm.v5/bundle.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/bundle.go 1970-01-01 00:00:00 +0000 @@ -1,29 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm - -import "os" - -// The Bundle interface is implemented by any type that -// may be handled as a bundle. It encapsulates all -// the data of a bundle. -type Bundle interface { - // Data returns the contents of the bundle's bundle.yaml file. - Data() *BundleData - // Data returns the contents of the bundle's README.md file. - ReadMe() string -} - -// ReadBundle reads a Bundle from path, which can point to either a -// bundle archive or a bundle directory. -func ReadBundle(path string) (Bundle, error) { - info, err := os.Stat(path) - if err != nil { - return nil, err - } - if info.IsDir() { - return ReadBundleDir(path) - } - return ReadBundleArchive(path) -} === removed file 'src/gopkg.in/juju/charm.v5/bundle_test.go' --- src/gopkg.in/juju/charm.v5/bundle_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/bundle_test.go 1970-01-01 00:00:00 +0000 @@ -1,75 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm_test - -import ( - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charm.v5" -) - -var _ = gc.Suite(&BundleSuite{}) - -type BundleSuite struct { - testing.IsolationSuite -} - -func (*BundleSuite) TestReadBundleDir(c *gc.C) { - path := TestCharms.BundleDirPath("wordpress-simple") - b, err := charm.ReadBundle(path) - c.Assert(err, gc.IsNil) - c.Assert(b, gc.FitsTypeOf, (*charm.BundleDir)(nil)) - checkWordpressBundle(c, b, path) -} - -func (*BundleSuite) TestReadBundleArchive(c *gc.C) { - path := TestCharms.BundleDirPath("wordpress-simple") - b, err := charm.ReadBundle(path) - c.Assert(err, gc.IsNil) - c.Assert(b, gc.FitsTypeOf, (*charm.BundleDir)(nil)) - checkWordpressBundle(c, b, path) -} - -func checkWordpressBundle(c *gc.C, b charm.Bundle, path string) { - // Load the charms required by the bundle. - wordpressCharm := TestCharms.CharmDir("wordpress") - mysqlCharm := TestCharms.CharmDir("mysql") - - bd := b.Data() - c.Assert(bd.RequiredCharms(), jc.DeepEquals, []string{"mysql", "wordpress"}) - - charms := map[string]charm.Charm{ - "wordpress": wordpressCharm, - "mysql": mysqlCharm, - } - err := bd.VerifyWithCharms(verifyOk, charms) - c.Assert(err, gc.IsNil) - - c.Assert(bd.Services, jc.DeepEquals, map[string]*charm.ServiceSpec{ - "wordpress": { - Charm: "wordpress", - NumUnits: 1, - }, - "mysql": { - Charm: "mysql", - NumUnits: 1, - }, - }) - c.Assert(bd.Relations, jc.DeepEquals, [][]string{ - {"wordpress:db", "mysql:server"}, - }) - c.Assert(b.ReadMe(), gc.Equals, "A dummy bundle\n") - switch b := b.(type) { - case *charm.BundleArchive: - c.Assert(b.Path, gc.Equals, path) - case *charm.BundleDir: - c.Assert(b.Path, gc.Equals, path) - } -} - -func verifyOk(string) error { - return nil -} === removed file 'src/gopkg.in/juju/charm.v5/bundlearchive.go' --- src/gopkg.in/juju/charm.v5/bundlearchive.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/bundlearchive.go 1970-01-01 00:00:00 +0000 @@ -1,99 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm - -import ( - "bytes" - "io" - "io/ioutil" - - ziputil "github.com/juju/utils/zip" -) - -type BundleArchive struct { - zopen zipOpener - - Path string - data *BundleData - readMe string -} - -// ReadBundleArchive reads a bundle archive from the given file path. -func ReadBundleArchive(path string) (*BundleArchive, error) { - a, err := readBundleArchive(newZipOpenerFromPath(path)) - if err != nil { - return nil, err - } - a.Path = path - return a, nil -} - -// ReadBundleArchiveBytes reads a bundle archive from the given byte -// slice. -func ReadBundleArchiveBytes(data []byte) (*BundleArchive, error) { - zopener := newZipOpenerFromReader(bytes.NewReader(data), int64(len(data))) - return readBundleArchive(zopener) -} - -// ReadBundleArchiveFromReader returns a BundleArchive that uses -// r to read the bundle. The given size must hold the number -// of available bytes in the file. -// -// Note that the caller is responsible for closing r - methods on -// the returned BundleArchive may fail after that. -func ReadBundleArchiveFromReader(r io.ReaderAt, size int64) (*BundleArchive, error) { - return readBundleArchive(newZipOpenerFromReader(r, size)) -} - -func readBundleArchive(zopen zipOpener) (*BundleArchive, error) { - a := &BundleArchive{ - zopen: zopen, - } - zipr, err := zopen.openZip() - if err != nil { - return nil, err - } - defer zipr.Close() - reader, err := zipOpenFile(zipr, "bundle.yaml") - if err != nil { - return nil, err - } - a.data, err = ReadBundleData(reader) - reader.Close() - if err != nil { - return nil, err - } - reader, err = zipOpenFile(zipr, "README.md") - if err != nil { - return nil, err - } - readMe, err := ioutil.ReadAll(reader) - if err != nil { - return nil, err - } - a.readMe = string(readMe) - return a, nil -} - -// Data implements Bundle.Data. -func (a *BundleArchive) Data() *BundleData { - return a.data -} - -// ReadMe implements Bundle.ReadMe. -func (a *BundleArchive) ReadMe() string { - return a.readMe -} - -// ExpandTo expands the bundle archive into dir, creating it if necessary. -// If any errors occur during the expansion procedure, the process will -// abort. -func (a *BundleArchive) ExpandTo(dir string) error { - zipr, err := a.zopen.openZip() - if err != nil { - return err - } - defer zipr.Close() - return ziputil.ExtractAll(zipr.Reader, dir) -} === removed file 'src/gopkg.in/juju/charm.v5/bundlearchive_test.go' --- src/gopkg.in/juju/charm.v5/bundlearchive_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/bundlearchive_test.go 1970-01-01 00:00:00 +0000 @@ -1,98 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm_test - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charm.v5" -) - -var _ = gc.Suite(&BundleArchiveSuite{}) - -type BundleArchiveSuite struct { - archivePath string -} - -func (s *BundleArchiveSuite) SetUpSuite(c *gc.C) { - s.archivePath = TestCharms.BundleArchivePath(c.MkDir(), "wordpress-simple") -} - -func (s *BundleArchiveSuite) TestReadBundleArchive(c *gc.C) { - archive, err := charm.ReadBundleArchive(s.archivePath) - c.Assert(err, gc.IsNil) - checkWordpressBundle(c, archive, s.archivePath) -} - -func (s *BundleArchiveSuite) TestReadBundleArchiveBytes(c *gc.C) { - data, err := ioutil.ReadFile(s.archivePath) - c.Assert(err, gc.IsNil) - - archive, err := charm.ReadBundleArchiveBytes(data) - c.Assert(err, gc.IsNil) - checkWordpressBundle(c, archive, "") -} - -func (s *BundleArchiveSuite) TestReadBundleArchiveFromReader(c *gc.C) { - f, err := os.Open(s.archivePath) - c.Assert(err, gc.IsNil) - defer f.Close() - info, err := f.Stat() - c.Assert(err, gc.IsNil) - - archive, err := charm.ReadBundleArchiveFromReader(f, info.Size()) - c.Assert(err, gc.IsNil) - checkWordpressBundle(c, archive, "") -} - -func (s *BundleArchiveSuite) TestReadBundleArchiveWithoutBundleYAML(c *gc.C) { - testReadBundleArchiveWithoutFile(c, "bundle.yaml") -} - -func (s *BundleArchiveSuite) TestReadBundleArchiveWithoutREADME(c *gc.C) { - testReadBundleArchiveWithoutFile(c, "README.md") -} - -func testReadBundleArchiveWithoutFile(c *gc.C, fileToRemove string) { - path := TestCharms.ClonedBundleDirPath(c.MkDir(), "wordpress-simple") - dir, err := charm.ReadBundleDir(path) - c.Assert(err, gc.IsNil) - - // Remove the file from the bundle directory. - // ArchiveTo just zips the contents of the directory as-is, - // so the resulting bundle archive not contain the - // file. - err = os.Remove(filepath.Join(dir.Path, fileToRemove)) - c.Assert(err, gc.IsNil) - - archivePath := filepath.Join(c.MkDir(), "out.bundle") - dstf, err := os.Create(archivePath) - c.Assert(err, gc.IsNil) - - err = dir.ArchiveTo(dstf) - dstf.Close() - - archive, err := charm.ReadBundleArchive(archivePath) - // Slightly dubious assumption: the quoted file name has no - // regexp metacharacters worth worrying about. - c.Assert(err, gc.ErrorMatches, fmt.Sprintf("archive file %q not found", fileToRemove)) - c.Assert(archive, gc.IsNil) -} - -func (s *BundleArchiveSuite) TestExpandTo(c *gc.C) { - dir := c.MkDir() - archive, err := charm.ReadBundleArchive(s.archivePath) - c.Assert(err, gc.IsNil) - err = archive.ExpandTo(dir) - c.Assert(err, gc.IsNil) - bdir, err := charm.ReadBundleDir(dir) - c.Assert(err, gc.IsNil) - c.Assert(bdir.ReadMe(), gc.Equals, archive.ReadMe()) - c.Assert(bdir.Data(), gc.DeepEquals, archive.Data()) -} === removed file 'src/gopkg.in/juju/charm.v5/bundledata.go' --- src/gopkg.in/juju/charm.v5/bundledata.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/bundledata.go 1970-01-01 00:00:00 +0000 @@ -1,760 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm - -import ( - "fmt" - "io" - "io/ioutil" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/juju/names" - "gopkg.in/yaml.v1" -) - -// BundleData holds the contents of the bundle. -type BundleData struct { - // Services holds one entry for each service - // that the bundle will create, indexed by - // the service name. - Services map[string]*ServiceSpec - - // Machines holds one entry for each machine referred to - // by unit placements. These will be mapped onto actual - // machines at bundle deployment time. - // It is an error if a machine is specified but - // not referred to by a unit placement directive. - Machines map[string]*MachineSpec `bson:",omitempty" json:",omitempty" yaml:",omitempty"` - - // Series holds the default series to use when - // the bundle chooses charms. - Series string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` - - // Relations holds a slice of 2-element slices, - // each specifying a relation between two services. - // Each two-element slice holds two endpoints, - // each specified as either colon-separated - // (service, relation) pair or just a service name. - // The relation is made between each. If the relation - // name is omitted, it will be inferred from the available - // relations defined in the services' charms. - Relations [][]string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` - - // White listed set of tags to categorize bundles as we do charms. - Tags []string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` - - // Short paragraph explaining what the bundle is useful for. - Description string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` -} - -// MachineSpec represents a notional machine that will be mapped -// onto an actual machine at bundle deployment time. -type MachineSpec struct { - Constraints string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` - Annotations map[string]string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` - Series string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` -} - -// ServiceSpec represents a single service that will -// be deployed as part of the bundle. -type ServiceSpec struct { - // Charm holds the charm URL of the charm to - // use for the given service. - Charm string - - // NumUnits holds the number of units of the - // service that will be deployed. - NumUnits int `yaml:"num_units"` - - // To may hold up to NumUnits members with - // each member specifying a desired placement - // for the respective unit of the service. - // - // In regular-expression-like notation, each - // element matches the following pattern: - // - // (:)?(||new) - // - // If containertype is specified, the unit is deployed - // into a new container of that type, otherwise - // it will be "hulk-smashed" into the specified location, - // by co-locating it with any other units that happen to - // be there, which may result in unintended behavior. - // - // The second part (after the colon) specifies where - // the new unit should be placed - it may refer to - // a unit of another service specified in the bundle, - // a machine id specified in the machines section, - // or the special name "new" which specifies a newly - // created machine. - // - // A unit placement may be specified with a service name only, - // in which case its unit number is assumed to - // be one more than the unit number of the previous - // unit in the list with the same service, or zero - // if there were none. - // - // If there are less elements in To than NumUnits, - // the last element is replicated to fill it. If there - // are no elements (or To is omitted), "new" is replicated. - // - // For example: - // - // wordpress/0 wordpress/1 lxc:0 kvm:new - // - // specifies that the first two units get hulk-smashed - // onto the first two units of the wordpress service, - // the third unit gets allocated onto an lxc container - // on machine 0, and subsequent units get allocated - // on kvm containers on new machines. - // - // The above example is the same as this: - // - // wordpress wordpress lxc:0 kvm:new - To []string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` - - // Options holds the configuration values - // to apply to the new service. They should - // be compatible with the charm configuration. - Options map[string]interface{} `bson:",omitempty" json:",omitempty" yaml:",omitempty"` - - // Annotations holds any annotations to apply to the - // service when deployed. - Annotations map[string]string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` - - // Constraints holds the default constraints to apply - // when creating new machines for units of the service. - // This is ignored for units with explicit placement directives. - Constraints string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` -} - -// ReadBundleData reads bundle data from the given reader. -// The returned data is not verified - call Verify to ensure -// that it is OK. -func ReadBundleData(r io.Reader) (*BundleData, error) { - bytes, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - var bd BundleData - if err := yaml.Unmarshal(bytes, &bd); err != nil { - return nil, fmt.Errorf("cannot unmarshal bundle data: %v", err) - } - return &bd, nil -} - -// VerificationError holds an error generated by BundleData.Verify, -// holding all the verification errors found when verifying. -type VerificationError struct { - Errors []error -} - -func (err *VerificationError) Error() string { - switch len(err.Errors) { - case 0: - return "no verification errors!" - case 1: - return err.Errors[0].Error() - } - return fmt.Sprintf("%s (and %d more errors)", err.Errors[0], len(err.Errors)-1) -} - -type bundleDataVerifier struct { - bd *BundleData - - // machines holds the reference counts of all machines - // as referred to by placement directives. - machineRefCounts map[string]int - - charms map[string]Charm - - errors []error - verifyConstraints func(c string) error -} - -func (verifier *bundleDataVerifier) addErrorf(f string, a ...interface{}) { - verifier.addError(fmt.Errorf(f, a...)) -} - -func (verifier *bundleDataVerifier) addError(err error) { - verifier.errors = append(verifier.errors, err) -} - -func (verifier *bundleDataVerifier) err() error { - if len(verifier.errors) > 0 { - return &VerificationError{verifier.errors} - } - return nil -} - -// RequiredCharms returns a sorted slice of all the charm URLs -// required by the bundle. -func (bd *BundleData) RequiredCharms() []string { - req := make([]string, 0, len(bd.Services)) - for _, svc := range bd.Services { - req = append(req, svc.Charm) - } - sort.Strings(req) - return req -} - -// Verify is a convenience method that calls VerifyWithCharms -// with a nil charms map. -func (bd *BundleData) Verify( - verifyConstraints func(c string) error, -) error { - return bd.VerifyWithCharms(verifyConstraints, nil) -} - -// VerifyWithCharms verifies that the bundle is consistent. -// The verifyConstraints function is called to verify any constraints -// that are found. If verifyConstraints is nil, no checking -// of constraints will be done. -// -// It verifies the following: -// -// - All defined machines are referred to by placement directives. -// - All services referred to by placement directives are specified in the bundle. -// - All services referred to by relations are specified in the bundle. -// - All constraints are valid. -// -// If charms is not nil, it should hold a map with an entry for each -// charm url returned by bd.RequiredCharms. The verification will then -// also check that services are defined with valid charms, -// relations are correctly made and options are defined correctly. -// -// If the verification fails, Verify returns a *VerificationError describing -// all the problems found. -func (bd *BundleData) VerifyWithCharms( - verifyConstraints func(c string) error, - charms map[string]Charm, -) error { - if verifyConstraints == nil { - verifyConstraints = func(string) error { - return nil - } - } - verifier := &bundleDataVerifier{ - verifyConstraints: verifyConstraints, - bd: bd, - machineRefCounts: make(map[string]int), - charms: charms, - } - for id := range bd.Machines { - verifier.machineRefCounts[id] = 0 - } - if bd.Series != "" && !IsValidSeries(bd.Series) { - verifier.addErrorf("bundle declares an invalid series %q", bd.Series) - } - verifier.verifyMachines() - verifier.verifyServices() - verifier.verifyRelations() - verifier.verifyOptions() - - for id, count := range verifier.machineRefCounts { - if count == 0 { - verifier.addErrorf("machine %q is not referred to by a placement directive", id) - } - } - return verifier.err() -} - -var validMachineId = regexp.MustCompile("^" + names.NumberSnippet + "$") - -func (verifier *bundleDataVerifier) verifyMachines() { - for id, m := range verifier.bd.Machines { - if !validMachineId.MatchString(id) { - verifier.addErrorf("invalid machine id %q found in machines", id) - } - if m == nil { - continue - } - if m.Constraints != "" { - if err := verifier.verifyConstraints(m.Constraints); err != nil { - verifier.addErrorf("invalid constraints %q in machine %q: %v", m.Constraints, id, err) - } - } - if m.Series != "" && !IsValidSeries(m.Series) { - verifier.addErrorf("invalid series %s for machine %q", m.Series, id) - } - } -} - -func (verifier *bundleDataVerifier) verifyServices() { - if len(verifier.bd.Services) == 0 { - verifier.addErrorf("at least one service must be specified") - return - } - for name, svc := range verifier.bd.Services { - if _, err := ParseReference(svc.Charm); err != nil { - verifier.addErrorf("invalid charm URL in service %q: %v", name, err) - } - if err := verifier.verifyConstraints(svc.Constraints); err != nil { - verifier.addErrorf("invalid constraints %q in service %q: %v", svc.Constraints, name, err) - } - verifier.verifyPlacement(svc.To) - if svc.NumUnits < 0 { - verifier.addErrorf("negative number of units specified on service %q", name) - } else if len(svc.To) > svc.NumUnits { - verifier.addErrorf("too many units specified in unit placement for service %q", name) - } - if verifier.charms != nil { - if _, ok := verifier.charms[svc.Charm]; !ok { - verifier.addErrorf("service %q refers to non-existent charm %q", name, svc.Charm) - } - } - } -} - -func (verifier *bundleDataVerifier) verifyPlacement(to []string) { - for _, p := range to { - up, err := ParsePlacement(p) - if err != nil { - verifier.addError(err) - continue - } - switch { - case up.Service != "": - spec, ok := verifier.bd.Services[up.Service] - if !ok { - verifier.addErrorf("placement %q refers to a service not defined in this bundle", p) - continue - } - if up.Unit >= 0 && up.Unit >= spec.NumUnits { - verifier.addErrorf("placement %q specifies a unit greater than the %d unit(s) started by the target service", p, spec.NumUnits) - } - case up.Machine == "new": - default: - _, ok := verifier.bd.Machines[up.Machine] - if !ok { - verifier.addErrorf("placement %q refers to a machine not defined in this bundle", p) - continue - } - verifier.machineRefCounts[up.Machine]++ - } - } -} - -func (verifier *bundleDataVerifier) getCharmMetaForService(svcName string) (*Meta, error) { - svc, ok := verifier.bd.Services[svcName] - if !ok { - return nil, fmt.Errorf("service %q not found", svcName) - } - ch, ok := verifier.charms[svc.Charm] - if !ok { - return nil, fmt.Errorf("charm %q from service %q not found", svc.Charm, svcName) - } - return ch.Meta(), nil -} - -func (verifier *bundleDataVerifier) verifyRelations() { - seen := make(map[[2]endpoint]bool) - for _, relPair := range verifier.bd.Relations { - if len(relPair) != 2 { - verifier.addErrorf("relation %q has %d endpoint(s), not 2", relPair, len(relPair)) - continue - } - var epPair [2]endpoint - relParseErr := false - for i, svcRel := range relPair { - ep, err := parseEndpoint(svcRel) - if err != nil { - verifier.addError(err) - relParseErr = true - continue - } - if _, ok := verifier.bd.Services[ep.service]; !ok { - verifier.addErrorf("relation %q refers to service %q not defined in this bundle", relPair, ep.service) - } - epPair[i] = ep - } - if relParseErr { - // We failed to parse at least one relation, so don't - // bother checking further. - continue - } - if epPair[0].service == epPair[1].service { - verifier.addErrorf("relation %q relates a service to itself", relPair) - } - // Resolve endpoint relations if necessary and we have - // the necessary charm information. - if (epPair[0].relation == "" || epPair[1].relation == "") && verifier.charms != nil { - iep0, iep1, err := inferEndpoints(epPair[0], epPair[1], verifier.getCharmMetaForService) - if err != nil { - verifier.addErrorf("cannot infer endpoint between %s and %s: %v", epPair[0], epPair[1], err) - } else { - // Change the endpoints that get recorded - // as seen, so we'll diagnose a duplicate - // relation even if one relation specifies - // the relations explicitly and the other does - // not. - epPair[0], epPair[1] = iep0, iep1 - } - } - - // Re-order pairs so that we diagnose duplicate relations - // whichever way they're specified. - if epPair[1].less(epPair[0]) { - epPair[1], epPair[0] = epPair[0], epPair[1] - } - if _, ok := seen[epPair]; ok { - verifier.addErrorf("relation %q is defined more than once", relPair) - } - if verifier.charms != nil && epPair[0].relation != "" && epPair[1].relation != "" { - // We have charms to verify against, and the - // endpoint has been fully specified or inferred. - verifier.verifyRelation(epPair[0], epPair[1]) - } - seen[epPair] = true - } -} - -var infoRelation = Relation{ - Name: "juju-info", - Role: RoleProvider, - Interface: "juju-info", - Scope: ScopeContainer, -} - -// verifyRelation verifies a single relation. -// It checks that both endpoints of the relation are -// defined, and that the relationship is correctly -// symmetrical (provider to requirer) and shares -// the same interface. -func (verifier *bundleDataVerifier) verifyRelation(ep0, ep1 endpoint) { - svc0 := verifier.bd.Services[ep0.service] - svc1 := verifier.bd.Services[ep1.service] - if svc0 == nil || svc1 == nil || svc0 == svc1 { - // An error will be produced by verifyRelations for this case. - return - } - charm0 := verifier.charms[svc0.Charm] - charm1 := verifier.charms[svc1.Charm] - if charm0 == nil || charm1 == nil { - // An error will be produced by verifyServices for this case. - return - } - relProv0, okProv0 := charm0.Meta().Provides[ep0.relation] - // The juju-info relation is provided implicitly by every - // charm - use it if required. - if !okProv0 && ep0.relation == infoRelation.Name { - relProv0, okProv0 = infoRelation, true - } - relReq0, okReq0 := charm0.Meta().Requires[ep0.relation] - if !okProv0 && !okReq0 { - verifier.addErrorf("charm %q used by service %q does not define relation %q", svc0.Charm, ep0.service, ep0.relation) - } - relProv1, okProv1 := charm1.Meta().Provides[ep1.relation] - // The juju-info relation is provided implicitly by every - // charm - use it if required. - if !okProv1 && ep1.relation == infoRelation.Name { - relProv1, okProv1 = infoRelation, true - } - relReq1, okReq1 := charm1.Meta().Requires[ep1.relation] - if !okProv1 && !okReq1 { - verifier.addErrorf("charm %q used by service %q does not define relation %q", svc1.Charm, ep1.service, ep1.relation) - } - - var relProv, relReq Relation - var epProv, epReq endpoint - switch { - case okProv0 && okReq1: - relProv, relReq = relProv0, relReq1 - epProv, epReq = ep0, ep1 - case okReq0 && okProv1: - relProv, relReq = relProv1, relReq0 - epProv, epReq = ep1, ep0 - case okProv0 && okProv1: - verifier.addErrorf("relation %q to %q relates provider to provider", ep0, ep1) - return - case okReq0 && okReq1: - verifier.addErrorf("relation %q to %q relates requirer to requirer", ep0, ep1) - return - default: - // Errors were added above. - return - } - if relProv.Interface != relReq.Interface { - verifier.addErrorf("mismatched interface between %q and %q (%q vs %q)", epProv, epReq, relProv.Interface, relReq.Interface) - } -} - -// verifyOptions verifies that the options are correctly defined -// with respect to the charm config options. -func (verifier *bundleDataVerifier) verifyOptions() { - if verifier.charms == nil { - return - } - for svcName, svc := range verifier.bd.Services { - charm := verifier.charms[svc.Charm] - if charm == nil { - // An error will be produced by verifyServices for this case. - continue - } - config := charm.Config() - for name, value := range svc.Options { - opt, ok := config.Options[name] - if !ok { - verifier.addErrorf("cannot validate service %q: configuration option %q not found in charm %q", svcName, name, svc.Charm) - continue - } - _, err := opt.validate(name, value) - if err != nil { - verifier.addErrorf("cannot validate service %q: %v", svcName, err) - } - } - } -} - -var validServiceRelation = regexp.MustCompile("^(" + names.ServiceSnippet + "):(" + names.RelationSnippet + ")$") - -type endpoint struct { - service string - relation string -} - -func (ep endpoint) String() string { - if ep.relation == "" { - return ep.service - } - return fmt.Sprintf("%s:%s", ep.service, ep.relation) -} - -func (ep1 endpoint) less(ep2 endpoint) bool { - if ep1.service == ep2.service { - return ep1.relation < ep2.relation - } - return ep1.service < ep2.service -} - -func parseEndpoint(ep string) (endpoint, error) { - m := validServiceRelation.FindStringSubmatch(ep) - if m != nil { - return endpoint{ - service: m[1], - relation: m[2], - }, nil - } - if !names.IsValidService(ep) { - return endpoint{}, fmt.Errorf("invalid relation syntax %q", ep) - } - return endpoint{ - service: ep, - }, nil -} - -// endpointInfo holds information about one endpoint of a relation. -type endpointInfo struct { - serviceName string - Relation -} - -// String returns the unique identifier of the relation endpoint. -func (ep endpointInfo) String() string { - return ep.serviceName + ":" + ep.Name -} - -// canRelateTo returns whether a relation may be established between ep -// and other. -func (ep endpointInfo) canRelateTo(other endpointInfo) bool { - return ep.serviceName != other.serviceName && - ep.Interface == other.Interface && - ep.Role != RolePeer && - counterpartRole(ep.Role) == other.Role -} - -// endpoint returns the endpoint specifier for ep. -func (ep endpointInfo) endpoint() endpoint { - return endpoint{ - service: ep.serviceName, - relation: ep.Name, - } -} - -// counterpartRole returns the RelationRole that the given RelationRole -// can relate to. -func counterpartRole(r RelationRole) RelationRole { - switch r { - case RoleProvider: - return RoleRequirer - case RoleRequirer: - return RoleProvider - case RolePeer: - return RolePeer - } - panic(fmt.Errorf("unknown relation role %q", r)) -} - -type UnitPlacement struct { - // ContainerType holds the container type of the new - // new unit, or empty if unspecified. - ContainerType string - - // Machine holds the numeric machine id, or "new", - // or empty if the placement specifies a service. - Machine string - - // Service holds the service name, or empty if - // the placement specifies a machine. - Service string - - // Unit holds the unit number of the service, or -1 - // if unspecified. - Unit int -} - -var snippetReplacer = strings.NewReplacer( - "container", names.ContainerTypeSnippet, - "number", names.NumberSnippet, - "service", names.ServiceSnippet, -) - -// validPlacement holds regexp that matches valid placement requests. To -// make the expression easier to comprehend and maintain, we replace -// symbolic snippet references in the regexp by their actual regexps -// using snippetReplacer. -var validPlacement = regexp.MustCompile( - snippetReplacer.Replace( - "^(?:(container):)?(?:(service)(?:/(number))?|(number))$", - ), -) - -// ParsePlacement parses a unit placement directive, as -// specified in the To clause of a service entry in the -// services section of a bundle. -func ParsePlacement(p string) (*UnitPlacement, error) { - m := validPlacement.FindStringSubmatch(p) - if m == nil { - return nil, fmt.Errorf("invalid placement syntax %q", p) - } - up := UnitPlacement{ - ContainerType: m[1], - Service: m[2], - Machine: m[4], - } - if unitStr := m[3]; unitStr != "" { - // We know that unitStr must be a valid integer because - // it's specified as such in the regexp. - up.Unit, _ = strconv.Atoi(unitStr) - } else { - up.Unit = -1 - } - if up.Service == "new" { - if up.Unit != -1 { - return nil, fmt.Errorf("invalid placement syntax %q", p) - } - up.Machine, up.Service = "new", "" - } - return &up, nil -} - -// inferEndpoints infers missing relation names from the given endpoint -// specifications, using the given get function to retrieve charm -// data if necessary. It returns the fully specified endpoints. -func inferEndpoints(epSpec0, epSpec1 endpoint, get func(svc string) (*Meta, error)) (endpoint, endpoint, error) { - if epSpec0.relation != "" && epSpec1.relation != "" { - // The endpoints are already specified explicitly so - // there is no need to fetch any charm data to infer - // them. - return epSpec0, epSpec1, nil - } - eps0, err := possibleEndpoints(epSpec0, get) - if err != nil { - return endpoint{}, endpoint{}, err - } - eps1, err := possibleEndpoints(epSpec1, get) - if err != nil { - return endpoint{}, endpoint{}, err - } - var candidates [][]endpointInfo - for _, ep0 := range eps0 { - for _, ep1 := range eps1 { - if ep0.canRelateTo(ep1) { - candidates = append(candidates, []endpointInfo{ep0, ep1}) - } - } - } - switch len(candidates) { - case 0: - return endpoint{}, endpoint{}, fmt.Errorf("no relations found") - case 1: - return candidates[0][0].endpoint(), candidates[0][1].endpoint(), nil - } - - // There's ambiguity; try discarding implicit relations. - filtered := discardImplicitRelations(candidates) - if len(filtered) == 1 { - return filtered[0][0].endpoint(), filtered[0][1].endpoint(), nil - } - // The ambiguity cannot be resolved, so return an error. - var keys []string - for _, cand := range candidates { - keys = append(keys, fmt.Sprintf("%q", relationKey(cand))) - } - sort.Strings(keys) - return endpoint{}, endpoint{}, fmt.Errorf("ambiguous relation: %s %s could refer to %s", - epSpec0, epSpec1, strings.Join(keys, "; ")) -} - -func discardImplicitRelations(candidates [][]endpointInfo) [][]endpointInfo { - var filtered [][]endpointInfo -outer: - for _, cand := range candidates { - for _, ep := range cand { - if ep.IsImplicit() { - continue outer - } - } - filtered = append(filtered, cand) - } - return filtered -} - -// relationKey returns a string describing the relation defined by -// endpoints, for use in various contexts (including error messages). -func relationKey(endpoints []endpointInfo) string { - var names []string - for _, ep := range endpoints { - names = append(names, ep.String()) - } - sort.Strings(names) - return strings.Join(names, " ") -} - -// possibleEndpoints returns all the endpoints that the given endpoint spec -// could refer to. -func possibleEndpoints(epSpec endpoint, get func(svc string) (*Meta, error)) ([]endpointInfo, error) { - meta, err := get(epSpec.service) - if err != nil { - return nil, err - } - - var eps []endpointInfo - add := func(r Relation) { - if epSpec.relation == "" || epSpec.relation == r.Name { - eps = append(eps, endpointInfo{ - serviceName: epSpec.service, - Relation: r, - }) - } - } - - for _, r := range meta.Provides { - add(r) - } - for _, r := range meta.Requires { - add(r) - } - // Every service implicitly provides a juju-info relation. - add(Relation{ - Name: "juju-info", - Role: RoleProvider, - Interface: "juju-info", - Scope: ScopeGlobal, - }) - return eps, nil -} === removed file 'src/gopkg.in/juju/charm.v5/bundledata_test.go' --- src/gopkg.in/juju/charm.v5/bundledata_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/bundledata_test.go 1970-01-01 00:00:00 +0000 @@ -1,810 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm_test - -import ( - "fmt" - "sort" - "strings" - - "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charm.v5" -) - -type bundleDataSuite struct { - testing.IsolationSuite -} - -var _ = gc.Suite(&bundleDataSuite{}) - -const mediawikiBundle = ` -series: precise -services: - mediawiki: - charm: "cs:precise/mediawiki-10" - num_units: 1 - options: - debug: false - name: Please set name of wiki - skin: vector - annotations: - "gui-x": 609 - "gui-y": -15 - mysql: - charm: "cs:precise/mysql-28" - num_units: 2 - to: [0, mediawiki/0] - options: - "binlog-format": MIXED - "block-size": 5 - "dataset-size": "80%" - flavor: distro - "ha-bindiface": eth0 - "ha-mcastport": 5411 - annotations: - "gui-x": 610 - "gui-y": 255 - constraints: "mem=8g" -relations: - - ["mediawiki:db", "mysql:db"] - - ["mysql:foo", "mediawiki:bar"] -machines: - 0: - constraints: 'arch=amd64 mem=4g' - annotations: - foo: bar -tags: - - super - - awesome -description: | - Everything is awesome. Everything is cool when we work as a team. - Lovely day. -` - -var parseTests = []struct { - about string - data string - expectedBD *charm.BundleData - expectedErr string -}{{ - about: "mediawiki", - data: mediawikiBundle, - expectedBD: &charm.BundleData{ - Series: "precise", - Services: map[string]*charm.ServiceSpec{ - "mediawiki": { - Charm: "cs:precise/mediawiki-10", - NumUnits: 1, - Options: map[string]interface{}{ - "debug": false, - "name": "Please set name of wiki", - "skin": "vector", - }, - Annotations: map[string]string{ - "gui-x": "609", - "gui-y": "-15", - }, - }, - "mysql": { - Charm: "cs:precise/mysql-28", - NumUnits: 2, - To: []string{"0", "mediawiki/0"}, - Options: map[string]interface{}{ - "binlog-format": "MIXED", - "block-size": 5, - "dataset-size": "80%", - "flavor": "distro", - "ha-bindiface": "eth0", - "ha-mcastport": 5411, - }, - Annotations: map[string]string{ - "gui-x": "610", - "gui-y": "255", - }, - Constraints: "mem=8g", - }, - }, - Machines: map[string]*charm.MachineSpec{ - "0": { - Constraints: "arch=amd64 mem=4g", - Annotations: map[string]string{ - "foo": "bar", - }, - }, - }, - Relations: [][]string{ - {"mediawiki:db", "mysql:db"}, - {"mysql:foo", "mediawiki:bar"}, - }, - Tags: []string{"super", "awesome"}, - Description: `Everything is awesome. Everything is cool when we work as a team. -Lovely day. -`, - }, -}, { - about: "relations specified with hyphens", - data: ` -relations: - - - "mediawiki:db" - - "mysql:db" - - - "mysql:foo" - - "mediawiki:bar" -`, - expectedBD: &charm.BundleData{ - Relations: [][]string{ - {"mediawiki:db", "mysql:db"}, - {"mysql:foo", "mediawiki:bar"}, - }, - }, -}} - -func (*bundleDataSuite) TestParse(c *gc.C) { - for i, test := range parseTests { - c.Logf("test %d: %s", i, test.about) - bd, err := charm.ReadBundleData(strings.NewReader(test.data)) - if test.expectedErr != "" { - c.Assert(err, gc.ErrorMatches, test.expectedErr) - continue - } - c.Assert(err, gc.IsNil) - c.Assert(bd, jc.DeepEquals, test.expectedBD) - } -} - -var verifyErrorsTests = []struct { - about string - data string - errors []string -}{{ - about: "as many errors as possible", - data: ` -series: "9wrong" - -machines: - 0: - constraints: 'bad constraints' - annotations: - foo: bar - series: 'bad series' - bogus: - 3: -services: - mediawiki: - charm: "bogus:precise/mediawiki-10" - num_units: -4 - options: - debug: false - name: Please set name of wiki - skin: vector - annotations: - "gui-x": 609 - "gui-y": -15 - mysql: - charm: "cs:precise/mysql-28" - num_units: 2 - to: [0, mediawiki/0, nowhere/3, 2, "bad placement"] - options: - "binlog-format": MIXED - "block-size": 5 - "dataset-size": "80%" - flavor: distro - "ha-bindiface": eth0 - "ha-mcastport": 5411 - annotations: - "gui-x": 610 - "gui-y": 255 - constraints: "bad constraints" - wordpress: - charm: wordpress -relations: - - ["mediawiki:db", "mysql:db"] - - ["mysql:foo", "mediawiki:bar"] - - ["arble:bar"] - - ["arble:bar", "mediawiki:db"] - - ["mysql:foo", "mysql:bar"] - - ["mysql:db", "mediawiki:db"] - - ["mediawiki/db", "mysql:db"] - - ["wordpress", "mysql"] -`, - errors: []string{ - `bundle declares an invalid series "9wrong"`, - `machine "3" is not referred to by a placement directive`, - `machine "bogus" is not referred to by a placement directive`, - `invalid machine id "bogus" found in machines`, - `invalid constraints "bad constraints" in machine "0": bad constraint`, - `invalid charm URL in service "mediawiki": charm URL has invalid schema: "bogus:precise/mediawiki-10"`, - `invalid constraints "bad constraints" in service "mysql": bad constraint`, - `negative number of units specified on service "mediawiki"`, - `too many units specified in unit placement for service "mysql"`, - `placement "nowhere/3" refers to a service not defined in this bundle`, - `placement "mediawiki/0" specifies a unit greater than the -4 unit(s) started by the target service`, - `placement "2" refers to a machine not defined in this bundle`, - `relation ["arble:bar"] has 1 endpoint(s), not 2`, - `relation ["arble:bar" "mediawiki:db"] refers to service "arble" not defined in this bundle`, - `relation ["mysql:foo" "mysql:bar"] relates a service to itself`, - `relation ["mysql:db" "mediawiki:db"] is defined more than once`, - `invalid placement syntax "bad placement"`, - `invalid relation syntax "mediawiki/db"`, - `invalid series bad series for machine "0"`, - }, -}, { - about: "mediawiki should be ok", - data: mediawikiBundle, -}} - -func (*bundleDataSuite) TestVerifyErrors(c *gc.C) { - for i, test := range verifyErrorsTests { - c.Logf("test %d: %s", i, test.about) - assertVerifyWithCharmsErrors(c, test.data, nil, test.errors) - } -} - -func assertVerifyWithCharmsErrors(c *gc.C, bundleData string, charms map[string]charm.Charm, expectErrors []string) { - bd, err := charm.ReadBundleData(strings.NewReader(bundleData)) - c.Assert(err, gc.IsNil) - - err = bd.VerifyWithCharms(func(c string) error { - if c == "bad constraints" { - return fmt.Errorf("bad constraint") - } - return nil - }, charms) - if len(expectErrors) == 0 { - if err == nil { - return - } - // Let the rest of the function deal with the - // error, so that we'll see the actual errors - // that resulted. - } - c.Assert(err, gc.FitsTypeOf, (*charm.VerificationError)(nil)) - errors := err.(*charm.VerificationError).Errors - errStrings := make([]string, len(errors)) - for i, err := range errors { - errStrings[i] = err.Error() - } - sort.Strings(errStrings) - sort.Strings(expectErrors) - c.Assert(errStrings, jc.DeepEquals, expectErrors) -} - -func (*bundleDataSuite) TestVerifyCharmURL(c *gc.C) { - bd, err := charm.ReadBundleData(strings.NewReader(mediawikiBundle)) - c.Assert(err, gc.IsNil) - for i, u := range []string{ - "wordpress", - "cs:wordpress", - "cs:precise/wordpress", - "precise/wordpress", - "precise/wordpress-2", - "local:foo", - "local:foo-45", - } { - c.Logf("test %d: %s", i, u) - bd.Services["mediawiki"].Charm = u - err := bd.Verify(nil) - c.Assert(err, gc.IsNil, gc.Commentf("charm url %q", u)) - } -} - -func (*bundleDataSuite) TestVerifyBundleUsingJujuInfoRelation(c *gc.C) { - b := TestCharms.BundleDir("wordpress-with-logging") - bd := b.Data() - - charms := map[string]charm.Charm{ - "wordpress": TestCharms.CharmDir("wordpress"), - "mysql": TestCharms.CharmDir("mysql"), - "logging": TestCharms.CharmDir("logging"), - } - err := bd.VerifyWithCharms(nil, charms) - c.Assert(err, gc.IsNil) -} - -func (*bundleDataSuite) TestRequiredCharms(c *gc.C) { - bd, err := charm.ReadBundleData(strings.NewReader(mediawikiBundle)) - c.Assert(err, gc.IsNil) - reqCharms := bd.RequiredCharms() - - c.Assert(reqCharms, gc.DeepEquals, []string{"cs:precise/mediawiki-10", "cs:precise/mysql-28"}) -} - -// testCharm returns a charm with the given name -// and relations. The relations are specified as -// a string of the form: -// -// | -// -// Within each section, each white-space separated -// relation is specified as: -/// : -// -// So, for example: -// -// testCharm("wordpress", "web:http | db:mysql") -// -// is equivalent to a charm with metadata.yaml containing -// -// name: wordpress -// description: wordpress -// provides: -// web: -// interface: http -// requires: -// db: -// interface: mysql -// -func testCharm(name string, relations string) charm.Charm { - var provides, requires string - parts := strings.Split(relations, "|") - provides = parts[0] - if len(parts) > 1 { - requires = parts[1] - } - meta := &charm.Meta{ - Name: name, - Summary: name, - Description: name, - Provides: parseRelations(provides, charm.RoleProvider), - Requires: parseRelations(requires, charm.RoleRequirer), - } - configStr := ` -options: - title: {default: My Title, description: title, type: string} - skill-level: {description: skill, type: int} -` - config, err := charm.ReadConfig(strings.NewReader(configStr)) - if err != nil { - panic(err) - } - return testCharmImpl{ - meta: meta, - config: config, - } -} - -func parseRelations(s string, role charm.RelationRole) map[string]charm.Relation { - rels := make(map[string]charm.Relation) - for _, r := range strings.Fields(s) { - parts := strings.Split(r, ":") - if len(parts) != 2 { - panic(fmt.Errorf("invalid relation specifier %q", r)) - } - name, interf := parts[0], parts[1] - rels[name] = charm.Relation{ - Name: name, - Role: role, - Interface: interf, - Scope: charm.ScopeGlobal, - } - } - return rels -} - -type testCharmImpl struct { - meta *charm.Meta - config *charm.Config - // Implement charm.Charm, but panic if anything other than - // Meta or Config methods are called. - charm.Charm -} - -func (c testCharmImpl) Meta() *charm.Meta { - return c.meta -} - -func (c testCharmImpl) Config() *charm.Config { - return c.config -} - -var verifyWithCharmsErrorsTests = []struct { - about string - data string - charms map[string]charm.Charm - - errors []string -}{{ - about: "no charms", - data: mediawikiBundle, - charms: map[string]charm.Charm{}, - errors: []string{ - `service "mediawiki" refers to non-existent charm "cs:precise/mediawiki-10"`, - `service "mysql" refers to non-existent charm "cs:precise/mysql-28"`, - }, -}, { - about: "all present and correct", - data: ` -services: - service1: - charm: "test" - service2: - charm: "test" - service3: - charm: "test" -relations: - - ["service1:prova", "service2:reqa"] - - ["service1:reqa", "service3:prova"] - - ["service3:provb", "service2:reqb"] -`, - charms: map[string]charm.Charm{ - "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), - }, -}, { - about: "undefined relations", - data: ` -services: - service1: - charm: "test" - service2: - charm: "test" -relations: - - ["service1:prova", "service2:blah"] - - ["service1:blah", "service2:prova"] -`, - charms: map[string]charm.Charm{ - "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), - }, - errors: []string{ - `charm "test" used by service "service1" does not define relation "blah"`, - `charm "test" used by service "service2" does not define relation "blah"`, - }, -}, { - about: "undefined services", - data: ` -services: - service1: - charm: "test" - service2: - charm: "test" -relations: - - ["unknown:prova", "service2:blah"] - - ["service1:blah", "unknown:prova"] -`, - charms: map[string]charm.Charm{ - "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), - }, - errors: []string{ - `relation ["service1:blah" "unknown:prova"] refers to service "unknown" not defined in this bundle`, - `relation ["unknown:prova" "service2:blah"] refers to service "unknown" not defined in this bundle`, - }, -}, { - about: "equal services", - data: ` -services: - service1: - charm: "test" - service2: - charm: "test" -relations: - - ["service2:prova", "service2:reqa"] -`, - charms: map[string]charm.Charm{ - "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), - }, - errors: []string{ - `relation ["service2:prova" "service2:reqa"] relates a service to itself`, - }, -}, { - about: "provider to provider relation", - data: ` -services: - service1: - charm: "test" - service2: - charm: "test" -relations: - - ["service1:prova", "service2:prova"] -`, - charms: map[string]charm.Charm{ - "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), - }, - errors: []string{ - `relation "service1:prova" to "service2:prova" relates provider to provider`, - }, -}, { - about: "provider to provider relation", - data: ` -services: - service1: - charm: "test" - service2: - charm: "test" -relations: - - ["service1:reqa", "service2:reqa"] -`, - charms: map[string]charm.Charm{ - "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), - }, - errors: []string{ - `relation "service1:reqa" to "service2:reqa" relates requirer to requirer`, - }, -}, { - about: "interface mismatch", - data: ` -services: - service1: - charm: "test" - service2: - charm: "test" -relations: - - ["service1:reqa", "service2:provb"] -`, - charms: map[string]charm.Charm{ - "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), - }, - errors: []string{ - `mismatched interface between "service2:provb" and "service1:reqa" ("b" vs "a")`, - }, -}, { - about: "different charms", - data: ` -services: - service1: - charm: "test1" - service2: - charm: "test2" -relations: - - ["service1:reqa", "service2:prova"] -`, - charms: map[string]charm.Charm{ - "test1": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), - "test2": testCharm("test", ""), - }, - errors: []string{ - `charm "test2" used by service "service2" does not define relation "prova"`, - }, -}, { - about: "ambiguous relation", - data: ` -services: - service1: - charm: "test1" - service2: - charm: "test2" -relations: - - [service1, service2] -`, - charms: map[string]charm.Charm{ - "test1": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), - "test2": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), - }, - errors: []string{ - `cannot infer endpoint between service1 and service2: ambiguous relation: service1 service2 could refer to "service1:prova service2:reqa"; "service1:provb service2:reqb"; "service1:reqa service2:prova"; "service1:reqb service2:provb"`, - }, -}, { - about: "relation using juju-info", - data: ` -services: - service1: - charm: "provider" - service2: - charm: "requirer" -relations: - - [service1, service2] -`, - charms: map[string]charm.Charm{ - "provider": testCharm("provider", ""), - "requirer": testCharm("requirer", "| req:juju-info"), - }, -}, { - about: "ambiguous when implicit relations taken into account", - data: ` -services: - service1: - charm: "provider" - service2: - charm: "requirer" -relations: - - [service1, service2] -`, - charms: map[string]charm.Charm{ - "provider": testCharm("provider", "provdb:db | "), - "requirer": testCharm("requirer", "| reqdb:db reqinfo:juju-info"), - }, -}, { - about: "half of relation left open", - data: ` -services: - service1: - charm: "provider" - service2: - charm: "requirer" -relations: - - ["service1:prova2", service2] -`, - charms: map[string]charm.Charm{ - "provider": testCharm("provider", "prova1:a prova2:a | "), - "requirer": testCharm("requirer", "| reqa:a"), - }, -}, { - about: "duplicate relation between open and fully-specified relations", - data: ` -services: - service1: - charm: "provider" - service2: - charm: "requirer" -relations: - - ["service1:prova", "service2:reqa"] - - ["service1", "service2"] -`, - charms: map[string]charm.Charm{ - "provider": testCharm("provider", "prova:a | "), - "requirer": testCharm("requirer", "| reqa:a"), - }, - errors: []string{ - `relation ["service1" "service2"] is defined more than once`, - }, -}, { - about: "configuration options specified", - data: ` -services: - service1: - charm: "test" - options: - title: "some title" - skill-level: 245 - service2: - charm: "test" - options: - title: "another title" -`, - charms: map[string]charm.Charm{ - "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), - }, -}, { - about: "invalid type for option", - data: ` -services: - service1: - charm: "test" - options: - title: "some title" - skill-level: "too much" - service2: - charm: "test" - options: - title: "another title" -`, - charms: map[string]charm.Charm{ - "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), - }, - errors: []string{ - `cannot validate service "service1": option "skill-level" expected int, got "too much"`, - }, -}, { - about: "unknown option", - data: ` -services: - service1: - charm: "test" - options: - title: "some title" - unknown-option: 2345 -`, - charms: map[string]charm.Charm{ - "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), - }, - errors: []string{ - `cannot validate service "service1": configuration option "unknown-option" not found in charm "test"`, - }, -}, { - about: "multiple config problems", - data: ` -services: - service1: - charm: "test" - options: - title: "some title" - unknown-option: 2345 - service2: - charm: "test" - options: - title: 123 - another-unknown: 2345 -`, - charms: map[string]charm.Charm{ - "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), - }, - errors: []string{ - `cannot validate service "service1": configuration option "unknown-option" not found in charm "test"`, - `cannot validate service "service2": configuration option "another-unknown" not found in charm "test"`, - `cannot validate service "service2": option "title" expected string, got 123`, - }, -}} - -func (*bundleDataSuite) TestVerifyWithCharmsErrors(c *gc.C) { - for i, test := range verifyWithCharmsErrorsTests { - c.Logf("test %d: %s", i, test.about) - assertVerifyWithCharmsErrors(c, test.data, test.charms, test.errors) - } -} - -var parsePlacementTests = []struct { - placement string - expect *charm.UnitPlacement - expectErr string -}{{ - placement: "lxc:service/0", - expect: &charm.UnitPlacement{ - ContainerType: "lxc", - Service: "service", - Unit: 0, - }, -}, { - placement: "lxc:service", - expect: &charm.UnitPlacement{ - ContainerType: "lxc", - Service: "service", - Unit: -1, - }, -}, { - placement: "lxc:99", - expect: &charm.UnitPlacement{ - ContainerType: "lxc", - Machine: "99", - Unit: -1, - }, -}, { - placement: "lxc:new", - expect: &charm.UnitPlacement{ - ContainerType: "lxc", - Machine: "new", - Unit: -1, - }, -}, { - placement: "service/0", - expect: &charm.UnitPlacement{ - Service: "service", - Unit: 0, - }, -}, { - placement: "service", - expect: &charm.UnitPlacement{ - Service: "service", - Unit: -1, - }, -}, { - placement: "service45", - expect: &charm.UnitPlacement{ - Service: "service45", - Unit: -1, - }, -}, { - placement: "99", - expect: &charm.UnitPlacement{ - Machine: "99", - Unit: -1, - }, -}, { - placement: "new", - expect: &charm.UnitPlacement{ - Machine: "new", - Unit: -1, - }, -}, { - placement: ":0", - expectErr: `invalid placement syntax ":0"`, -}, { - placement: "05", - expectErr: `invalid placement syntax "05"`, -}, { - placement: "new/2", - expectErr: `invalid placement syntax "new/2"`, -}} - -func (*bundleDataSuite) TestParsePlacement(c *gc.C) { - for i, test := range parsePlacementTests { - c.Logf("test %d: %q", i, test.placement) - up, err := charm.ParsePlacement(test.placement) - if test.expectErr != "" { - c.Assert(err, gc.ErrorMatches, test.expectErr) - } else { - c.Assert(err, gc.IsNil) - c.Assert(up, jc.DeepEquals, test.expect) - } - } -} === removed file 'src/gopkg.in/juju/charm.v5/bundledir.go' --- src/gopkg.in/juju/charm.v5/bundledir.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/bundledir.go 1970-01-01 00:00:00 +0000 @@ -1,61 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" -) - -type BundleDir struct { - Path string - data *BundleData - readMe string -} - -// Trick to ensure *BundleDir implements the Bundle interface. -var _ Bundle = (*BundleDir)(nil) - -// ReadBundleDir returns a BundleDir representing an expanded -// bundle directory. It does not verify the bundle data. -func ReadBundleDir(path string) (dir *BundleDir, err error) { - dir = &BundleDir{Path: path} - file, err := os.Open(dir.join("bundle.yaml")) - if err != nil { - return nil, err - } - dir.data, err = ReadBundleData(file) - file.Close() - if err != nil { - return nil, err - } - readMe, err := ioutil.ReadFile(dir.join("README.md")) - if err != nil { - return nil, fmt.Errorf("cannot read README file: %v", err) - } - dir.readMe = string(readMe) - return dir, nil -} - -func (dir *BundleDir) Data() *BundleData { - return dir.data -} - -func (dir *BundleDir) ReadMe() string { - return dir.readMe -} - -func (dir *BundleDir) ArchiveTo(w io.Writer) error { - return writeArchive(w, dir.Path, -1, nil) -} - -// join builds a path rooted at the bundle's expanded directory -// path and the extra path components provided. -func (dir *BundleDir) join(parts ...string) string { - parts = append([]string{dir.Path}, parts...) - return filepath.Join(parts...) -} === removed file 'src/gopkg.in/juju/charm.v5/bundledir_test.go' --- src/gopkg.in/juju/charm.v5/bundledir_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/bundledir_test.go 1970-01-01 00:00:00 +0000 @@ -1,58 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm_test - -import ( - "os" - "path/filepath" - - "github.com/juju/testing" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charm.v5" -) - -type BundleDirSuite struct { - testing.IsolationSuite -} - -var _ = gc.Suite(&BundleDirSuite{}) - -func (s *BundleDirSuite) TestReadBundleDir(c *gc.C) { - path := TestCharms.BundleDirPath("wordpress-simple") - dir, err := charm.ReadBundleDir(path) - c.Assert(err, gc.IsNil) - checkWordpressBundle(c, dir, path) -} - -func (s *BundleDirSuite) TestReadBundleDirWithoutREADME(c *gc.C) { - path := TestCharms.ClonedBundleDirPath(c.MkDir(), "wordpress-simple") - err := os.Remove(filepath.Join(path, "README.md")) - c.Assert(err, gc.IsNil) - dir, err := charm.ReadBundleDir(path) - c.Assert(err, gc.ErrorMatches, "cannot read README file: .*") - c.Assert(dir, gc.IsNil) -} - -func (s *BundleDirSuite) TestArchiveTo(c *gc.C) { - baseDir := c.MkDir() - charmDir := TestCharms.ClonedBundleDirPath(baseDir, "wordpress-simple") - s.assertArchiveTo(c, baseDir, charmDir) -} - -func (s *BundleDirSuite) assertArchiveTo(c *gc.C, baseDir, bundleDir string) { - dir, err := charm.ReadBundleDir(bundleDir) - c.Assert(err, gc.IsNil) - path := filepath.Join(baseDir, "archive.bundle") - file, err := os.Create(path) - c.Assert(err, gc.IsNil) - err = dir.ArchiveTo(file) - file.Close() - c.Assert(err, gc.IsNil) - - archive, err := charm.ReadBundleArchive(path) - c.Assert(err, gc.IsNil) - c.Assert(archive.ReadMe(), gc.Equals, dir.ReadMe()) - c.Assert(archive.Data(), gc.DeepEquals, dir.Data()) -} === removed file 'src/gopkg.in/juju/charm.v5/charm.go' --- src/gopkg.in/juju/charm.v5/charm.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/charm.go 1970-01-01 00:00:00 +0000 @@ -1,40 +0,0 @@ -// Copyright 2011, 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm - -import ( - "os" - - "github.com/juju/loggo" -) - -var logger = loggo.GetLogger("juju.charm") - -// The Charm interface is implemented by any type that -// may be handled as a charm. -type Charm interface { - Meta() *Meta - Config() *Config - Metrics() *Metrics - Actions() *Actions - Revision() int -} - -// ReadCharm reads a Charm from path, which can point to either a charm archive or a -// charm directory. -func ReadCharm(path string) (charm Charm, err error) { - info, err := os.Stat(path) - if err != nil { - return nil, err - } - if info.IsDir() { - charm, err = ReadCharmDir(path) - } else { - charm, err = ReadCharmArchive(path) - } - if err != nil { - return nil, err - } - return charm, nil -} === removed file 'src/gopkg.in/juju/charm.v5/charm_test.go' --- src/gopkg.in/juju/charm.v5/charm_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/charm_test.go 1970-01-01 00:00:00 +0000 @@ -1,105 +0,0 @@ -// Copyright 2011, 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm_test - -import ( - "bytes" - "io" - "io/ioutil" - "path/filepath" - stdtesting "testing" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/yaml.v1" - - "gopkg.in/juju/charm.v5" - charmtesting "gopkg.in/juju/charm.v5/testing" -) - -func Test(t *stdtesting.T) { - gc.TestingT(t) -} - -var TestCharms = charmtesting.NewRepo("internal/test-charm-repo", "quantal") - -type CharmSuite struct{} - -var _ = gc.Suite(&CharmSuite{}) - -func (s *CharmSuite) TestReadCharm(c *gc.C) { - bPath := TestCharms.CharmArchivePath(c.MkDir(), "dummy") - ch, err := charm.ReadCharm(bPath) - c.Assert(err, gc.IsNil) - c.Assert(ch.Meta().Name, gc.Equals, "dummy") - dPath := TestCharms.CharmDirPath("dummy") - ch, err = charm.ReadCharm(dPath) - c.Assert(err, gc.IsNil) - c.Assert(ch.Meta().Name, gc.Equals, "dummy") -} - -func (s *CharmSuite) TestReadCharmDirError(c *gc.C) { - ch, err := charm.ReadCharm(c.MkDir()) - c.Assert(err, gc.NotNil) - c.Assert(ch, gc.Equals, nil) -} - -func (s *CharmSuite) TestReadCharmArchiveError(c *gc.C) { - path := filepath.Join(c.MkDir(), "path") - err := ioutil.WriteFile(path, []byte("foo"), 0644) - c.Assert(err, gc.IsNil) - ch, err := charm.ReadCharm(path) - c.Assert(err, gc.NotNil) - c.Assert(ch, gc.Equals, nil) -} - -func checkDummy(c *gc.C, f charm.Charm, path string) { - c.Assert(f.Revision(), gc.Equals, 1) - c.Assert(f.Meta().Name, gc.Equals, "dummy") - c.Assert(f.Config().Options["title"].Default, gc.Equals, "My Title") - c.Assert(f.Actions(), jc.DeepEquals, - &charm.Actions{ - map[string]charm.ActionSpec{ - "snapshot": charm.ActionSpec{ - Description: "Take a snapshot of the database.", - Params: map[string]interface{}{ - "type": "object", - "description": "Take a snapshot of the database.", - "title": "snapshot", - "properties": map[string]interface{}{ - "outfile": map[string]interface{}{ - "description": "The file to write out to.", - "type": "string", - "default": "foo.bz2", - }}}}}}) - switch f := f.(type) { - case *charm.CharmArchive: - c.Assert(f.Path, gc.Equals, path) - case *charm.CharmDir: - c.Assert(f.Path, gc.Equals, path) - } -} - -type YamlHacker map[interface{}]interface{} - -func ReadYaml(r io.Reader) YamlHacker { - data, err := ioutil.ReadAll(r) - if err != nil { - panic(err) - } - m := make(map[interface{}]interface{}) - err = yaml.Unmarshal(data, m) - if err != nil { - panic(err) - } - return YamlHacker(m) -} - -func (yh YamlHacker) Reader() io.Reader { - data, err := yaml.Marshal(yh) - if err != nil { - panic(err) - } - return bytes.NewBuffer(data) -} === removed file 'src/gopkg.in/juju/charm.v5/charmarchive.go' --- src/gopkg.in/juju/charm.v5/charmarchive.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/charmarchive.go 1970-01-01 00:00:00 +0000 @@ -1,315 +0,0 @@ -// Copyright 2011, 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm - -import ( - "archive/zip" - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - - "github.com/juju/utils/set" - ziputil "github.com/juju/utils/zip" -) - -// The CharmArchive type encapsulates access to data and operations -// on a charm archive. -type CharmArchive struct { - zopen zipOpener - - Path string // May be empty if CharmArchive wasn't read from a file - meta *Meta - config *Config - metrics *Metrics - actions *Actions - revision int -} - -// Trick to ensure *CharmArchive implements the Charm interface. -var _ Charm = (*CharmArchive)(nil) - -// ReadCharmArchive returns a CharmArchive for the charm in path. -func ReadCharmArchive(path string) (*CharmArchive, error) { - a, err := readCharmArchive(newZipOpenerFromPath(path)) - if err != nil { - return nil, err - } - a.Path = path - return a, nil -} - -// ReadCharmArchiveBytes returns a CharmArchive read from the given data. -// Make sure the archive fits in memory before using this. -func ReadCharmArchiveBytes(data []byte) (archive *CharmArchive, err error) { - zopener := newZipOpenerFromReader(bytes.NewReader(data), int64(len(data))) - return readCharmArchive(zopener) -} - -// ReadCharmArchiveFromReader returns a CharmArchive that uses -// r to read the charm. The given size must hold the number -// of available bytes in the file. -// -// Note that the caller is responsible for closing r - methods on -// the returned CharmArchive may fail after that. -func ReadCharmArchiveFromReader(r io.ReaderAt, size int64) (archive *CharmArchive, err error) { - return readCharmArchive(newZipOpenerFromReader(r, size)) -} - -func readCharmArchive(zopen zipOpener) (archive *CharmArchive, err error) { - b := &CharmArchive{ - zopen: zopen, - } - zipr, err := zopen.openZip() - if err != nil { - return nil, err - } - defer zipr.Close() - reader, err := zipOpenFile(zipr, "metadata.yaml") - if err != nil { - return nil, err - } - b.meta, err = ReadMeta(reader) - reader.Close() - if err != nil { - return nil, err - } - - reader, err = zipOpenFile(zipr, "config.yaml") - if _, ok := err.(*noCharmArchiveFile); ok { - b.config = NewConfig() - } else if err != nil { - return nil, err - } else { - b.config, err = ReadConfig(reader) - reader.Close() - if err != nil { - return nil, err - } - } - - reader, err = zipOpenFile(zipr, "metrics.yaml") - if err == nil { - b.metrics, err = ReadMetrics(reader) - reader.Close() - if err != nil { - return nil, err - } - } else if _, ok := err.(*noCharmArchiveFile); !ok { - return nil, err - } - - reader, err = zipOpenFile(zipr, "actions.yaml") - if _, ok := err.(*noCharmArchiveFile); ok { - b.actions = NewActions() - } else if err != nil { - return nil, err - } else { - b.actions, err = ReadActionsYaml(reader) - reader.Close() - if err != nil { - return nil, err - } - } - - reader, err = zipOpenFile(zipr, "revision") - if err != nil { - if _, ok := err.(*noCharmArchiveFile); !ok { - return nil, err - } - b.revision = b.meta.OldRevision - } else { - _, err = fmt.Fscan(reader, &b.revision) - if err != nil { - return nil, errors.New("invalid revision file") - } - } - - return b, nil -} - -func zipOpenFile(zipr *zipReadCloser, path string) (rc io.ReadCloser, err error) { - for _, fh := range zipr.File { - if fh.Name == path { - return fh.Open() - } - } - return nil, &noCharmArchiveFile{path} -} - -type noCharmArchiveFile struct { - path string -} - -func (err noCharmArchiveFile) Error() string { - return fmt.Sprintf("archive file %q not found", err.path) -} - -// Revision returns the revision number for the charm -// expanded in dir. -func (a *CharmArchive) Revision() int { - return a.revision -} - -// SetRevision changes the charm revision number. This affects the -// revision reported by Revision and the revision of the charm -// directory created by ExpandTo. -func (a *CharmArchive) SetRevision(revision int) { - a.revision = revision -} - -// Meta returns the Meta representing the metadata.yaml file from archive. -func (a *CharmArchive) Meta() *Meta { - return a.meta -} - -// Config returns the Config representing the config.yaml file -// for the charm archive. -func (a *CharmArchive) Config() *Config { - return a.config -} - -// Metrics returns the Metrics representing the metrics.yaml file -// for the charm archive. -func (a *CharmArchive) Metrics() *Metrics { - return a.metrics -} - -// Actions returns the Actions map for the actions.yaml file for the charm -// archive. -func (a *CharmArchive) Actions() *Actions { - return a.actions -} - -type zipReadCloser struct { - io.Closer - *zip.Reader -} - -// zipOpener holds the information needed to open a zip -// file. -type zipOpener interface { - openZip() (*zipReadCloser, error) -} - -// newZipOpenerFromPath returns a zipOpener that can be -// used to read the archive from the given path. -func newZipOpenerFromPath(path string) zipOpener { - return &zipPathOpener{path: path} -} - -// newZipOpenerFromReader returns a zipOpener that can be -// used to read the archive from the given ReaderAt -// holding the given number of bytes. -func newZipOpenerFromReader(r io.ReaderAt, size int64) zipOpener { - return &zipReaderOpener{ - r: r, - size: size, - } -} - -type zipPathOpener struct { - path string -} - -func (zo *zipPathOpener) openZip() (*zipReadCloser, error) { - f, err := os.Open(zo.path) - if err != nil { - return nil, err - } - fi, err := f.Stat() - if err != nil { - f.Close() - return nil, err - } - r, err := zip.NewReader(f, fi.Size()) - if err != nil { - f.Close() - return nil, err - } - return &zipReadCloser{Closer: f, Reader: r}, nil -} - -type zipReaderOpener struct { - r io.ReaderAt - size int64 -} - -func (zo *zipReaderOpener) openZip() (*zipReadCloser, error) { - r, err := zip.NewReader(zo.r, zo.size) - if err != nil { - return nil, err - } - return &zipReadCloser{Closer: ioutil.NopCloser(nil), Reader: r}, nil -} - -// Manifest returns a set of the charm's contents. -func (a *CharmArchive) Manifest() (set.Strings, error) { - zipr, err := a.zopen.openZip() - if err != nil { - return set.NewStrings(), err - } - defer zipr.Close() - paths, err := ziputil.Find(zipr.Reader, "*") - if err != nil { - return set.NewStrings(), err - } - manifest := set.NewStrings(paths...) - // We always write out a revision file, even if there isn't one in the - // archive; and we always strip ".", because that's sometimes not present. - manifest.Add("revision") - manifest.Remove(".") - return manifest, nil -} - -// ExpandTo expands the charm archive into dir, creating it if necessary. -// If any errors occur during the expansion procedure, the process will -// abort. -func (a *CharmArchive) ExpandTo(dir string) error { - zipr, err := a.zopen.openZip() - if err != nil { - return err - } - defer zipr.Close() - if err := ziputil.ExtractAll(zipr.Reader, dir); err != nil { - return err - } - hooksDir := filepath.Join(dir, "hooks") - fixHook := fixHookFunc(hooksDir, a.meta.Hooks()) - if err := filepath.Walk(hooksDir, fixHook); err != nil { - if !os.IsNotExist(err) { - return err - } - } - revFile, err := os.Create(filepath.Join(dir, "revision")) - if err != nil { - return err - } - _, err = revFile.Write([]byte(strconv.Itoa(a.revision))) - revFile.Close() - return err -} - -// fixHookFunc returns a WalkFunc that makes sure hooks are owner-executable. -func fixHookFunc(hooksDir string, hookNames map[string]bool) filepath.WalkFunc { - return func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - mode := info.Mode() - if path != hooksDir && mode.IsDir() { - return filepath.SkipDir - } - if name := filepath.Base(path); hookNames[name] { - if mode&0100 == 0 { - return os.Chmod(path, mode|0100) - } - } - return nil - } -} === removed file 'src/gopkg.in/juju/charm.v5/charmarchive_test.go' --- src/gopkg.in/juju/charm.v5/charmarchive_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/charmarchive_test.go 1970-01-01 00:00:00 +0000 @@ -1,382 +0,0 @@ -// Copyright 2011, 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm_test - -import ( - "archive/zip" - "bytes" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strconv" - "syscall" - - jc "github.com/juju/testing/checkers" - "github.com/juju/utils/set" - gc "gopkg.in/check.v1" - "gopkg.in/yaml.v1" - - "gopkg.in/juju/charm.v5" -) - -type CharmArchiveSuite struct { - archivePath string -} - -var _ = gc.Suite(&CharmArchiveSuite{}) - -func (s *CharmArchiveSuite) SetUpSuite(c *gc.C) { - s.archivePath = TestCharms.CharmArchivePath(c.MkDir(), "dummy") -} - -var dummyManifest = []string{ - "actions.yaml", - "config.yaml", - "empty", - "empty/.gitkeep", - "hooks", - "hooks/install", - "metadata.yaml", - "revision", - "src", - "src/hello.c", -} - -func (s *CharmArchiveSuite) TestReadCharmArchive(c *gc.C) { - archive, err := charm.ReadCharmArchive(s.archivePath) - c.Assert(err, gc.IsNil) - checkDummy(c, archive, s.archivePath) -} - -func (s *CharmArchiveSuite) TestReadCharmArchiveWithoutConfig(c *gc.C) { - // Technically varnish has no config AND no actions. - // Perhaps we should make this more orthogonal? - path := TestCharms.CharmArchivePath(c.MkDir(), "varnish") - archive, err := charm.ReadCharmArchive(path) - c.Assert(err, gc.IsNil) - - // A lacking config.yaml file still causes a proper - // Config value to be returned. - c.Assert(archive.Config().Options, gc.HasLen, 0) -} - -func (s *CharmArchiveSuite) TestReadCharmArchiveWithoutMetrics(c *gc.C) { - path := TestCharms.CharmArchivePath(c.MkDir(), "varnish") - dir, err := charm.ReadCharmArchive(path) - c.Assert(err, gc.IsNil) - - // A lacking metrics.yaml file indicates the unit will not - // be metered. - c.Assert(dir.Metrics(), gc.IsNil) -} - -func (s *CharmArchiveSuite) TestReadCharmArchiveWithEmptyMetrics(c *gc.C) { - path := TestCharms.CharmArchivePath(c.MkDir(), "metered-empty") - dir, err := charm.ReadCharmArchive(path) - c.Assert(err, gc.IsNil) - c.Assert(Keys(dir.Metrics()), gc.HasLen, 0) -} - -func (s *CharmArchiveSuite) TestReadCharmArchiveWithCustomMetrics(c *gc.C) { - path := TestCharms.CharmArchivePath(c.MkDir(), "metered") - dir, err := charm.ReadCharmArchive(path) - c.Assert(err, gc.IsNil) - - c.Assert(dir.Metrics(), gc.NotNil) - c.Assert(Keys(dir.Metrics()), gc.DeepEquals, []string{"juju-unit-time", "pings"}) -} - -func (s *CharmArchiveSuite) TestReadCharmArchiveWithoutActions(c *gc.C) { - // Wordpress has config but no actions. - path := TestCharms.CharmArchivePath(c.MkDir(), "wordpress") - archive, err := charm.ReadCharmArchive(path) - c.Assert(err, gc.IsNil) - - // A lacking actions.yaml file still causes a proper - // Actions value to be returned. - c.Assert(archive.Actions().ActionSpecs, gc.HasLen, 0) -} - -func (s *CharmArchiveSuite) TestReadCharmArchiveBytes(c *gc.C) { - data, err := ioutil.ReadFile(s.archivePath) - c.Assert(err, gc.IsNil) - - archive, err := charm.ReadCharmArchiveBytes(data) - c.Assert(err, gc.IsNil) - checkDummy(c, archive, "") -} - -func (s *CharmArchiveSuite) TestReadCharmArchiveFromReader(c *gc.C) { - f, err := os.Open(s.archivePath) - c.Assert(err, gc.IsNil) - defer f.Close() - info, err := f.Stat() - c.Assert(err, gc.IsNil) - - archive, err := charm.ReadCharmArchiveFromReader(f, info.Size()) - c.Assert(err, gc.IsNil) - checkDummy(c, archive, "") -} - -func (s *CharmArchiveSuite) TestManifest(c *gc.C) { - archive, err := charm.ReadCharmArchive(s.archivePath) - c.Assert(err, gc.IsNil) - manifest, err := archive.Manifest() - c.Assert(err, gc.IsNil) - c.Assert(manifest, jc.DeepEquals, set.NewStrings(dummyManifest...)) -} - -func (s *CharmArchiveSuite) TestManifestNoRevision(c *gc.C) { - archive, err := charm.ReadCharmArchive(s.archivePath) - c.Assert(err, gc.IsNil) - dirPath := c.MkDir() - err = archive.ExpandTo(dirPath) - c.Assert(err, gc.IsNil) - err = os.Remove(filepath.Join(dirPath, "revision")) - c.Assert(err, gc.IsNil) - - archive = extCharmArchiveDir(c, dirPath) - manifest, err := archive.Manifest() - c.Assert(err, gc.IsNil) - c.Assert(manifest, gc.DeepEquals, set.NewStrings(dummyManifest...)) -} - -func (s *CharmArchiveSuite) TestManifestSymlink(c *gc.C) { - srcPath := TestCharms.ClonedDirPath(c.MkDir(), "dummy") - if err := os.Symlink("../target", filepath.Join(srcPath, "hooks/symlink")); err != nil { - c.Skip("cannot symlink") - } - expected := append([]string{"hooks/symlink"}, dummyManifest...) - - archive := archiveDir(c, srcPath) - manifest, err := archive.Manifest() - c.Assert(err, gc.IsNil) - c.Assert(manifest, gc.DeepEquals, set.NewStrings(expected...)) -} - -func (s *CharmArchiveSuite) TestExpandTo(c *gc.C) { - archive, err := charm.ReadCharmArchive(s.archivePath) - c.Assert(err, gc.IsNil) - - path := filepath.Join(c.MkDir(), "charm") - err = archive.ExpandTo(path) - c.Assert(err, gc.IsNil) - - dir, err := charm.ReadCharmDir(path) - c.Assert(err, gc.IsNil) - checkDummy(c, dir, path) -} - -func (s *CharmArchiveSuite) prepareCharmArchive(c *gc.C, charmDir *charm.CharmDir, archivePath string) { - file, err := os.Create(archivePath) - c.Assert(err, gc.IsNil) - defer file.Close() - zipw := zip.NewWriter(file) - defer zipw.Close() - - h := &zip.FileHeader{Name: "revision"} - h.SetMode(syscall.S_IFREG | 0644) - w, err := zipw.CreateHeader(h) - c.Assert(err, gc.IsNil) - _, err = w.Write([]byte(strconv.Itoa(charmDir.Revision()))) - - h = &zip.FileHeader{Name: "metadata.yaml", Method: zip.Deflate} - h.SetMode(0644) - w, err = zipw.CreateHeader(h) - c.Assert(err, gc.IsNil) - data, err := yaml.Marshal(charmDir.Meta()) - c.Assert(err, gc.IsNil) - _, err = w.Write(data) - c.Assert(err, gc.IsNil) - - for name := range charmDir.Meta().Hooks() { - hookName := filepath.Join("hooks", name) - h = &zip.FileHeader{ - Name: hookName, - Method: zip.Deflate, - } - // Force it non-executable - h.SetMode(0644) - w, err := zipw.CreateHeader(h) - c.Assert(err, gc.IsNil) - _, err = w.Write([]byte("not important")) - c.Assert(err, gc.IsNil) - } -} - -func (s *CharmArchiveSuite) TestExpandToSetsHooksExecutable(c *gc.C) { - charmDir := TestCharms.ClonedDir(c.MkDir(), "all-hooks") - // CharmArchive manually, so we can check ExpandTo(), unaffected - // by ArchiveTo()'s behavior - archivePath := filepath.Join(c.MkDir(), "archive.charm") - s.prepareCharmArchive(c, charmDir, archivePath) - archive, err := charm.ReadCharmArchive(archivePath) - c.Assert(err, gc.IsNil) - - path := filepath.Join(c.MkDir(), "charm") - err = archive.ExpandTo(path) - c.Assert(err, gc.IsNil) - - _, err = charm.ReadCharmDir(path) - c.Assert(err, gc.IsNil) - - for name := range archive.Meta().Hooks() { - hookName := string(name) - info, err := os.Stat(filepath.Join(path, "hooks", hookName)) - c.Assert(err, gc.IsNil) - perm := info.Mode() & 0777 - c.Assert(perm&0100 != 0, gc.Equals, true, gc.Commentf("hook %q is not executable", hookName)) - } -} - -func (s *CharmArchiveSuite) TestCharmArchiveFileModes(c *gc.C) { - // Apply subtler mode differences than can be expressed in Bazaar. - srcPath := TestCharms.ClonedDirPath(c.MkDir(), "dummy") - modes := []struct { - path string - mode os.FileMode - }{ - {"hooks/install", 0751}, - {"empty", 0750}, - {"src/hello.c", 0614}, - } - for _, m := range modes { - err := os.Chmod(filepath.Join(srcPath, m.path), m.mode) - c.Assert(err, gc.IsNil) - } - var haveSymlinks = true - if err := os.Symlink("../target", filepath.Join(srcPath, "hooks/symlink")); err != nil { - haveSymlinks = false - } - - // CharmArchive and extract the charm to a new directory. - archive := archiveDir(c, srcPath) - path := c.MkDir() - err := archive.ExpandTo(path) - c.Assert(err, gc.IsNil) - - // Check sensible file modes once round-tripped. - info, err := os.Stat(filepath.Join(path, "src", "hello.c")) - c.Assert(err, gc.IsNil) - c.Assert(info.Mode()&0777, gc.Equals, os.FileMode(0644)) - c.Assert(info.Mode()&os.ModeType, gc.Equals, os.FileMode(0)) - - info, err = os.Stat(filepath.Join(path, "hooks", "install")) - c.Assert(err, gc.IsNil) - c.Assert(info.Mode()&0777, gc.Equals, os.FileMode(0755)) - c.Assert(info.Mode()&os.ModeType, gc.Equals, os.FileMode(0)) - - info, err = os.Stat(filepath.Join(path, "empty")) - c.Assert(err, gc.IsNil) - c.Assert(info.Mode()&0777, gc.Equals, os.FileMode(0755)) - - if haveSymlinks { - target, err := os.Readlink(filepath.Join(path, "hooks", "symlink")) - c.Assert(err, gc.IsNil) - c.Assert(target, gc.Equals, "../target") - } -} - -func (s *CharmArchiveSuite) TestCharmArchiveRevisionFile(c *gc.C) { - charmDir := TestCharms.ClonedDirPath(c.MkDir(), "dummy") - revPath := filepath.Join(charmDir, "revision") - - // Missing revision file - err := os.Remove(revPath) - c.Assert(err, gc.IsNil) - - archive := extCharmArchiveDir(c, charmDir) - c.Assert(archive.Revision(), gc.Equals, 0) - - // Missing revision file with old revision in metadata - file, err := os.OpenFile(filepath.Join(charmDir, "metadata.yaml"), os.O_WRONLY|os.O_APPEND, 0) - c.Assert(err, gc.IsNil) - _, err = file.Write([]byte("\nrevision: 1234\n")) - c.Assert(err, gc.IsNil) - - archive = extCharmArchiveDir(c, charmDir) - c.Assert(archive.Revision(), gc.Equals, 1234) - - // Revision file with bad content - err = ioutil.WriteFile(revPath, []byte("garbage"), 0666) - c.Assert(err, gc.IsNil) - - path := extCharmArchiveDirPath(c, charmDir) - archive, err = charm.ReadCharmArchive(path) - c.Assert(err, gc.ErrorMatches, "invalid revision file") - c.Assert(archive, gc.IsNil) -} - -func (s *CharmArchiveSuite) TestCharmArchiveSetRevision(c *gc.C) { - archive, err := charm.ReadCharmArchive(s.archivePath) - c.Assert(err, gc.IsNil) - - c.Assert(archive.Revision(), gc.Equals, 1) - archive.SetRevision(42) - c.Assert(archive.Revision(), gc.Equals, 42) - - path := filepath.Join(c.MkDir(), "charm") - err = archive.ExpandTo(path) - c.Assert(err, gc.IsNil) - - dir, err := charm.ReadCharmDir(path) - c.Assert(err, gc.IsNil) - c.Assert(dir.Revision(), gc.Equals, 42) -} - -func (s *CharmArchiveSuite) TestExpandToWithBadLink(c *gc.C) { - charmDir := TestCharms.ClonedDirPath(c.MkDir(), "dummy") - badLink := filepath.Join(charmDir, "hooks", "badlink") - - // Symlink targeting a path outside of the charm. - err := os.Symlink("../../target", badLink) - c.Assert(err, gc.IsNil) - - archive := extCharmArchiveDir(c, charmDir) - c.Assert(err, gc.IsNil) - - path := filepath.Join(c.MkDir(), "charm") - err = archive.ExpandTo(path) - c.Assert(err, gc.ErrorMatches, `cannot extract "hooks/badlink": symlink "../../target" leads out of scope`) - - // Symlink targeting an absolute path. - os.Remove(badLink) - err = os.Symlink("/target", badLink) - c.Assert(err, gc.IsNil) - - archive = extCharmArchiveDir(c, charmDir) - c.Assert(err, gc.IsNil) - - path = filepath.Join(c.MkDir(), "charm") - err = archive.ExpandTo(path) - c.Assert(err, gc.ErrorMatches, `cannot extract "hooks/badlink": symlink "/target" is absolute`) -} - -func extCharmArchiveDirPath(c *gc.C, dirpath string) string { - path := filepath.Join(c.MkDir(), "archive.charm") - cmd := exec.Command("/bin/sh", "-c", fmt.Sprintf("cd %s; zip --fifo --symlinks -r %s .", dirpath, path)) - output, err := cmd.CombinedOutput() - c.Assert(err, gc.IsNil, gc.Commentf("Command output: %s", output)) - return path -} - -func extCharmArchiveDir(c *gc.C, dirpath string) *charm.CharmArchive { - path := extCharmArchiveDirPath(c, dirpath) - archive, err := charm.ReadCharmArchive(path) - c.Assert(err, gc.IsNil) - return archive -} - -func archiveDir(c *gc.C, dirpath string) *charm.CharmArchive { - dir, err := charm.ReadCharmDir(dirpath) - c.Assert(err, gc.IsNil) - buf := new(bytes.Buffer) - err = dir.ArchiveTo(buf) - c.Assert(err, gc.IsNil) - archive, err := charm.ReadCharmArchiveBytes(buf.Bytes()) - c.Assert(err, gc.IsNil) - return archive -} === removed file 'src/gopkg.in/juju/charm.v5/charmdir.go' --- src/gopkg.in/juju/charm.v5/charmdir.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/charmdir.go 1970-01-01 00:00:00 +0000 @@ -1,312 +0,0 @@ -// Copyright 2011, 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm - -import ( - "archive/zip" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strconv" - "strings" - "syscall" -) - -// The CharmDir type encapsulates access to data and operations -// on a charm directory. -type CharmDir struct { - Path string - meta *Meta - config *Config - metrics *Metrics - actions *Actions - revision int -} - -// Trick to ensure *CharmDir implements the Charm interface. -var _ Charm = (*CharmDir)(nil) - -// ReadCharmDir returns a CharmDir representing an expanded charm directory. -func ReadCharmDir(path string) (dir *CharmDir, err error) { - dir = &CharmDir{Path: path} - file, err := os.Open(dir.join("metadata.yaml")) - if err != nil { - return nil, err - } - dir.meta, err = ReadMeta(file) - file.Close() - if err != nil { - return nil, err - } - - file, err = os.Open(dir.join("config.yaml")) - if _, ok := err.(*os.PathError); ok { - dir.config = NewConfig() - } else if err != nil { - return nil, err - } else { - dir.config, err = ReadConfig(file) - file.Close() - if err != nil { - return nil, err - } - } - - file, err = os.Open(dir.join("metrics.yaml")) - if err == nil { - dir.metrics, err = ReadMetrics(file) - file.Close() - if err != nil { - return nil, err - } - } else if !os.IsNotExist(err) { - return nil, err - } - - file, err = os.Open(dir.join("actions.yaml")) - if _, ok := err.(*os.PathError); ok { - dir.actions = NewActions() - } else if err != nil { - return nil, err - } else { - dir.actions, err = ReadActionsYaml(file) - file.Close() - if err != nil { - return nil, err - } - } - - if file, err = os.Open(dir.join("revision")); err == nil { - _, err = fmt.Fscan(file, &dir.revision) - file.Close() - if err != nil { - return nil, errors.New("invalid revision file") - } - } else { - dir.revision = dir.meta.OldRevision - } - - return dir, nil -} - -// join builds a path rooted at the charm's expanded directory -// path and the extra path components provided. -func (dir *CharmDir) join(parts ...string) string { - parts = append([]string{dir.Path}, parts...) - return filepath.Join(parts...) -} - -// Revision returns the revision number for the charm -// expanded in dir. -func (dir *CharmDir) Revision() int { - return dir.revision -} - -// Meta returns the Meta representing the metadata.yaml file -// for the charm expanded in dir. -func (dir *CharmDir) Meta() *Meta { - return dir.meta -} - -// Config returns the Config representing the config.yaml file -// for the charm expanded in dir. -func (dir *CharmDir) Config() *Config { - return dir.config -} - -// Metrics returns the Metrics representing the metrics.yaml file -// for the charm expanded in dir. -func (dir *CharmDir) Metrics() *Metrics { - return dir.metrics -} - -// Actions returns the Actions representing the actions.yaml file -// for the charm expanded in dir. -func (dir *CharmDir) Actions() *Actions { - return dir.actions -} - -// SetRevision changes the charm revision number. This affects -// the revision reported by Revision and the revision of the -// charm archived by ArchiveTo. -// The revision file in the charm directory is not modified. -func (dir *CharmDir) SetRevision(revision int) { - dir.revision = revision -} - -// SetDiskRevision does the same as SetRevision but also changes -// the revision file in the charm directory. -func (dir *CharmDir) SetDiskRevision(revision int) error { - dir.SetRevision(revision) - file, err := os.OpenFile(dir.join("revision"), os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return err - } - _, err = file.Write([]byte(strconv.Itoa(revision))) - file.Close() - return err -} - -// resolveSymlinkedRoot returns the target destination of a -// charm root directory if the root directory is a symlink. -func resolveSymlinkedRoot(rootPath string) (string, error) { - info, err := os.Lstat(rootPath) - if err == nil && info.Mode()&os.ModeSymlink != 0 { - rootPath, err = filepath.EvalSymlinks(rootPath) - if err != nil { - return "", fmt.Errorf("cannot read path symlink at %q: %v", rootPath, err) - } - } - return rootPath, nil -} - -// ArchiveTo creates a charm file from the charm expanded in dir. -// By convention a charm archive should have a ".charm" suffix. -func (dir *CharmDir) ArchiveTo(w io.Writer) error { - return writeArchive(w, dir.Path, dir.revision, dir.Meta().Hooks()) -} - -func writeArchive(w io.Writer, path string, revision int, hooks map[string]bool) error { - zipw := zip.NewWriter(w) - defer zipw.Close() - - // The root directory may be symlinked elsewhere so - // resolve that before creating the zip. - rootPath, err := resolveSymlinkedRoot(path) - if err != nil { - return err - } - zp := zipPacker{zipw, rootPath, hooks} - if revision != -1 { - zp.AddRevision(revision) - } - return filepath.Walk(rootPath, zp.WalkFunc()) -} - -type zipPacker struct { - *zip.Writer - root string - hooks map[string]bool -} - -func (zp *zipPacker) WalkFunc() filepath.WalkFunc { - return func(path string, fi os.FileInfo, err error) error { - return zp.visit(path, fi, err) - } -} - -func (zp *zipPacker) AddRevision(revision int) error { - h := &zip.FileHeader{Name: "revision"} - h.SetMode(syscall.S_IFREG | 0644) - w, err := zp.CreateHeader(h) - if err == nil { - _, err = w.Write([]byte(strconv.Itoa(revision))) - } - return err -} - -func (zp *zipPacker) visit(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - relpath, err := filepath.Rel(zp.root, path) - if err != nil { - return err - } - method := zip.Deflate - hidden := len(relpath) > 1 && relpath[0] == '.' - if fi.IsDir() { - if relpath == "build" { - return filepath.SkipDir - } - if hidden { - return filepath.SkipDir - } - relpath += "/" - method = zip.Store - } - - mode := fi.Mode() - if err := checkFileType(relpath, mode); err != nil { - return err - } - if mode&os.ModeSymlink != 0 { - method = zip.Store - } - if hidden || relpath == "revision" { - return nil - } - h := &zip.FileHeader{ - Name: relpath, - Method: method, - } - - perm := os.FileMode(0644) - if mode&os.ModeSymlink != 0 { - perm = 0777 - } else if mode&0100 != 0 { - perm = 0755 - } - if filepath.Dir(relpath) == "hooks" { - hookName := filepath.Base(relpath) - if _, ok := zp.hooks[hookName]; ok && !fi.IsDir() && mode&0100 == 0 { - logger.Warningf("making %q executable in charm", path) - perm = perm | 0100 - } - } - h.SetMode(mode&^0777 | perm) - - w, err := zp.CreateHeader(h) - if err != nil || fi.IsDir() { - return err - } - var data []byte - if mode&os.ModeSymlink != 0 { - target, err := os.Readlink(path) - if err != nil { - return err - } - if err := checkSymlinkTarget(zp.root, relpath, target); err != nil { - return err - } - data = []byte(target) - _, err = w.Write(data) - } else { - file, err := os.Open(path) - if err != nil { - return err - } - defer file.Close() - _, err = io.Copy(w, file) - } - return err -} - -func checkSymlinkTarget(basedir, symlink, target string) error { - if filepath.IsAbs(target) { - return fmt.Errorf("symlink %q is absolute: %q", symlink, target) - } - p := filepath.Join(filepath.Dir(symlink), target) - if p == ".." || strings.HasPrefix(p, "../") { - return fmt.Errorf("symlink %q links out of charm: %q", symlink, target) - } - return nil -} - -func checkFileType(path string, mode os.FileMode) error { - e := "file has an unknown type: %q" - switch mode & os.ModeType { - case os.ModeDir, os.ModeSymlink, 0: - return nil - case os.ModeNamedPipe: - e = "file is a named pipe: %q" - case os.ModeSocket: - e = "file is a socket: %q" - case os.ModeDevice: - e = "file is a device: %q" - } - return fmt.Errorf(e, path) -} === removed file 'src/gopkg.in/juju/charm.v5/charmdir_test.go' --- src/gopkg.in/juju/charm.v5/charmdir_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/charmdir_test.go 1970-01-01 00:00:00 +0000 @@ -1,318 +0,0 @@ -// Copyright 2011, 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm_test - -import ( - "archive/zip" - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/juju/testing" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charm.v5" -) - -type CharmDirSuite struct { - testing.IsolationSuite -} - -var _ = gc.Suite(&CharmDirSuite{}) - -func (s *CharmDirSuite) TestReadCharmDir(c *gc.C) { - path := TestCharms.CharmDirPath("dummy") - dir, err := charm.ReadCharmDir(path) - c.Assert(err, gc.IsNil) - checkDummy(c, dir, path) -} - -func (s *CharmDirSuite) TestReadCharmDirWithoutConfig(c *gc.C) { - path := TestCharms.CharmDirPath("varnish") - dir, err := charm.ReadCharmDir(path) - c.Assert(err, gc.IsNil) - - // A lacking config.yaml file still causes a proper - // Config value to be returned. - c.Assert(dir.Config().Options, gc.HasLen, 0) -} - -func (s *CharmDirSuite) TestReadCharmDirWithoutMetrics(c *gc.C) { - path := TestCharms.CharmDirPath("varnish") - dir, err := charm.ReadCharmDir(path) - c.Assert(err, gc.IsNil) - - // A lacking metrics.yaml file indicates the unit will not - // be metered. - c.Assert(dir.Metrics(), gc.IsNil) -} - -func (s *CharmDirSuite) TestReadCharmDirWithEmptyMetrics(c *gc.C) { - path := TestCharms.CharmDirPath("metered-empty") - dir, err := charm.ReadCharmDir(path) - c.Assert(err, gc.IsNil) - c.Assert(Keys(dir.Metrics()), gc.HasLen, 0) -} - -func (s *CharmDirSuite) TestReadCharmDirWithCustomMetrics(c *gc.C) { - path := TestCharms.CharmDirPath("metered") - dir, err := charm.ReadCharmDir(path) - c.Assert(err, gc.IsNil) - - c.Assert(dir.Metrics(), gc.NotNil) - c.Assert(Keys(dir.Metrics()), gc.DeepEquals, []string{"juju-unit-time", "pings"}) -} - -func (s *CharmDirSuite) TestReadCharmDirWithoutActions(c *gc.C) { - path := TestCharms.CharmDirPath("wordpress") - dir, err := charm.ReadCharmDir(path) - c.Assert(err, gc.IsNil) - - // A lacking actions.yaml file still causes a proper - // Actions value to be returned. - c.Assert(dir.Actions().ActionSpecs, gc.HasLen, 0) -} - -func (s *CharmDirSuite) TestArchiveTo(c *gc.C) { - baseDir := c.MkDir() - charmDir := TestCharms.ClonedDirPath(baseDir, "dummy") - s.assertArchiveTo(c, baseDir, charmDir) -} - -func (s *CharmDirSuite) TestArchiveToWithSymLinkedRootDir(c *gc.C) { - dir := c.MkDir() - baseDir := filepath.Join(dir, "precise") - err := os.MkdirAll(baseDir, 0755) - c.Assert(err, gc.IsNil) - TestCharms.ClonedDirPath(dir, "dummy") - err = os.Symlink(filepath.Join("..", "dummy"), filepath.Join(baseDir, "dummy")) - c.Assert(err, gc.IsNil) - charmDir := filepath.Join(baseDir, "dummy") - s.assertArchiveTo(c, baseDir, charmDir) -} - -func (s *CharmDirSuite) assertArchiveTo(c *gc.C, baseDir, charmDir string) { - haveSymlinks := true - if err := os.Symlink("../target", filepath.Join(charmDir, "hooks/symlink")); err != nil { - haveSymlinks = false - } - dir, err := charm.ReadCharmDir(charmDir) - c.Assert(err, gc.IsNil) - path := filepath.Join(baseDir, "archive.charm") - file, err := os.Create(path) - c.Assert(err, gc.IsNil) - err = dir.ArchiveTo(file) - file.Close() - c.Assert(err, gc.IsNil) - - zipr, err := zip.OpenReader(path) - c.Assert(err, gc.IsNil) - defer zipr.Close() - - var metaf, instf, emptyf, revf, symf *zip.File - for _, f := range zipr.File { - c.Logf("Archived file: %s", f.Name) - switch f.Name { - case "revision": - revf = f - case "metadata.yaml": - metaf = f - case "hooks/install": - instf = f - case "hooks/symlink": - symf = f - case "empty/": - emptyf = f - case "build/ignored": - c.Errorf("archive includes build/*: %s", f.Name) - case ".ignored", ".dir/ignored": - c.Errorf("archive includes .* entries: %s", f.Name) - } - } - - c.Assert(revf, gc.NotNil) - reader, err := revf.Open() - c.Assert(err, gc.IsNil) - data, err := ioutil.ReadAll(reader) - reader.Close() - c.Assert(err, gc.IsNil) - c.Assert(string(data), gc.Equals, "1") - - c.Assert(metaf, gc.NotNil) - reader, err = metaf.Open() - c.Assert(err, gc.IsNil) - meta, err := charm.ReadMeta(reader) - reader.Close() - c.Assert(err, gc.IsNil) - c.Assert(meta.Name, gc.Equals, "dummy") - - c.Assert(instf, gc.NotNil) - // Despite it being 0751, we pack and unpack it as 0755. - c.Assert(instf.Mode()&0777, gc.Equals, os.FileMode(0755)) - - if haveSymlinks { - c.Assert(symf, gc.NotNil) - c.Assert(symf.Mode()&0777, gc.Equals, os.FileMode(0777)) - reader, err = symf.Open() - c.Assert(err, gc.IsNil) - data, err = ioutil.ReadAll(reader) - reader.Close() - c.Assert(err, gc.IsNil) - c.Assert(string(data), gc.Equals, "../target") - } else { - c.Assert(symf, gc.IsNil) - } - - c.Assert(emptyf, gc.NotNil) - c.Assert(emptyf.Mode()&os.ModeType, gc.Equals, os.ModeDir) - // Despite it being 0750, we pack and unpack it as 0755. - c.Assert(emptyf.Mode()&0777, gc.Equals, os.FileMode(0755)) -} - -// Bug #864164: Must complain if charm hooks aren't executable -func (s *CharmDirSuite) TestArchiveToWithNonExecutableHooks(c *gc.C) { - hooks := []string{"install", "start", "config-changed", "upgrade-charm", "stop", "collect-metrics", "meter-status-changed"} - for _, relName := range []string{"foo", "bar", "self"} { - for _, kind := range []string{"joined", "changed", "departed", "broken"} { - hooks = append(hooks, relName+"-relation-"+kind) - } - } - - dir := TestCharms.CharmDir("all-hooks") - path := filepath.Join(c.MkDir(), "archive.charm") - file, err := os.Create(path) - c.Assert(err, gc.IsNil) - err = dir.ArchiveTo(file) - file.Close() - c.Assert(err, gc.IsNil) - - tlog := c.GetTestLog() - for _, hook := range hooks { - fullpath := filepath.Join(dir.Path, "hooks", hook) - exp := fmt.Sprintf(`^(.|\n)*WARNING juju.charm making "%s" executable in charm(.|\n)*$`, fullpath) - c.Assert(tlog, gc.Matches, exp, gc.Commentf("hook %q was not made executable", fullpath)) - } - - // Expand it and check the hooks' permissions - // (But do not use ExpandTo(), just use the raw zip) - f, err := os.Open(path) - c.Assert(err, gc.IsNil) - defer f.Close() - fi, err := f.Stat() - c.Assert(err, gc.IsNil) - size := fi.Size() - zipr, err := zip.NewReader(f, size) - c.Assert(err, gc.IsNil) - allhooks := dir.Meta().Hooks() - for _, zfile := range zipr.File { - cleanName := filepath.Clean(zfile.Name) - if strings.HasPrefix(cleanName, "hooks") { - hookName := filepath.Base(cleanName) - if _, ok := allhooks[hookName]; ok { - perms := zfile.Mode() - c.Assert(perms&0100 != 0, gc.Equals, true, gc.Commentf("hook %q is not executable", hookName)) - } - } - } -} - -func (s *CharmDirSuite) TestArchiveToWithBadType(c *gc.C) { - charmDir := TestCharms.ClonedDirPath(c.MkDir(), "dummy") - badFile := filepath.Join(charmDir, "hooks", "badfile") - - // Symlink targeting a path outside of the charm. - err := os.Symlink("../../target", badFile) - c.Assert(err, gc.IsNil) - - dir, err := charm.ReadCharmDir(charmDir) - c.Assert(err, gc.IsNil) - - err = dir.ArchiveTo(&bytes.Buffer{}) - c.Assert(err, gc.ErrorMatches, `symlink "hooks/badfile" links out of charm: "../../target"`) - - // Symlink targeting an absolute path. - os.Remove(badFile) - err = os.Symlink("/target", badFile) - c.Assert(err, gc.IsNil) - - dir, err = charm.ReadCharmDir(charmDir) - c.Assert(err, gc.IsNil) - - err = dir.ArchiveTo(&bytes.Buffer{}) - c.Assert(err, gc.ErrorMatches, `symlink "hooks/badfile" is absolute: "/target"`) - - // Can't archive special files either. - os.Remove(badFile) - err = syscall.Mkfifo(badFile, 0644) - c.Assert(err, gc.IsNil) - - dir, err = charm.ReadCharmDir(charmDir) - c.Assert(err, gc.IsNil) - - err = dir.ArchiveTo(&bytes.Buffer{}) - c.Assert(err, gc.ErrorMatches, `file is a named pipe: "hooks/badfile"`) -} - -func (s *CharmDirSuite) TestDirRevisionFile(c *gc.C) { - charmDir := TestCharms.ClonedDirPath(c.MkDir(), "dummy") - revPath := filepath.Join(charmDir, "revision") - - // Missing revision file - err := os.Remove(revPath) - c.Assert(err, gc.IsNil) - - dir, err := charm.ReadCharmDir(charmDir) - c.Assert(err, gc.IsNil) - c.Assert(dir.Revision(), gc.Equals, 0) - - // Missing revision file with old revision in metadata - file, err := os.OpenFile(filepath.Join(charmDir, "metadata.yaml"), os.O_WRONLY|os.O_APPEND, 0) - c.Assert(err, gc.IsNil) - _, err = file.Write([]byte("\nrevision: 1234\n")) - c.Assert(err, gc.IsNil) - - dir, err = charm.ReadCharmDir(charmDir) - c.Assert(err, gc.IsNil) - c.Assert(dir.Revision(), gc.Equals, 1234) - - // Revision file with bad content - err = ioutil.WriteFile(revPath, []byte("garbage"), 0666) - c.Assert(err, gc.IsNil) - - dir, err = charm.ReadCharmDir(charmDir) - c.Assert(err, gc.ErrorMatches, "invalid revision file") - c.Assert(dir, gc.IsNil) -} - -func (s *CharmDirSuite) TestDirSetRevision(c *gc.C) { - dir := TestCharms.ClonedDir(c.MkDir(), "dummy") - c.Assert(dir.Revision(), gc.Equals, 1) - dir.SetRevision(42) - c.Assert(dir.Revision(), gc.Equals, 42) - - var b bytes.Buffer - err := dir.ArchiveTo(&b) - c.Assert(err, gc.IsNil) - - archive, err := charm.ReadCharmArchiveBytes(b.Bytes()) - c.Assert(archive.Revision(), gc.Equals, 42) -} - -func (s *CharmDirSuite) TestDirSetDiskRevision(c *gc.C) { - charmDir := TestCharms.ClonedDirPath(c.MkDir(), "dummy") - dir, err := charm.ReadCharmDir(charmDir) - c.Assert(err, gc.IsNil) - - c.Assert(dir.Revision(), gc.Equals, 1) - dir.SetDiskRevision(42) - c.Assert(dir.Revision(), gc.Equals, 42) - - dir, err = charm.ReadCharmDir(charmDir) - c.Assert(err, gc.IsNil) - c.Assert(dir.Revision(), gc.Equals, 42) -} === removed directory 'src/gopkg.in/juju/charm.v5/charmrepo' === removed file 'src/gopkg.in/juju/charm.v5/charmrepo/charmstore.go' --- src/gopkg.in/juju/charm.v5/charmrepo/charmstore.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/juju/charm.v5/charmrepo/charmstore.go 1970-01-01 00:00:00 +0000 @@ -1,251 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charmrepo - -import ( - "crypto/sha512" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - - "github.com/juju/utils" - "gopkg.in/errgo.v1" - "gopkg.in/juju/charmstore.v4/csclient" - "gopkg.in/juju/charmstore.v4/params" - - "gopkg.in/juju/charm.v5" -) - -// CacheDir stores the charm cache directory path. -var CacheDir string - -// CharmStore is a repository Interface that provides access to the public Juju -// charm store. -type CharmStore struct { - client *csclient.Client -} - -var _ Interface = (*CharmStore)(nil) - -// NewCharmStoreParams holds parameters for instantiating a new CharmStore. -type NewCharmStoreParams struct { - // URL holds the root endpoint URL of the charm store, - // with no trailing slash, not including the version. - // For example https://api.jujucharms.com/charmstore - // If empty, the default charm store client location is used. - URL string - - // HTTPClient holds the HTTP client to use when making - // requests to the store. If nil, httpbakery.NewHTTPClient will - // be used. - HTTPClient *http.Client - - // VisitWebPage is called when authorization requires that - // the user visits a web page to authenticate themselves. - // If nil, a default function that returns an error will be used. - VisitWebPage func(url *url.URL) error -} - -// NewCharmStore creates and returns a charm store repository. -// The given parameters are used to instantiate the charm store. -// -// The errors returned from the interface methods will -// preserve the causes returned from the underlying csclient -// methods. -func NewCharmStore(p NewCharmStoreParams) Interface { - return &CharmStore{ - client: csclient.New(csclient.Params{ - URL: p.URL, - HTTPClient: p.HTTPClient, - VisitWebPage: p.VisitWebPage, - }), - } -} - -// Get implements Interface.Get. -func (s *CharmStore) Get(curl *charm.URL) (charm.Charm, error) { - // The cache location must have been previously set. - if CacheDir == "" { - panic("charm cache directory path is empty") - } - if curl.Series == "bundle" { - return nil, errgo.Newf("expected a charm URL, got bundle URL %q", curl) - } - - // Prepare the cache directory and retrieve the charm. - if err := os.MkdirAll(CacheDir, 0755); err != nil { - return nil, errgo.Notef(err, "cannot create the cache directory") - } - r, id, expectHash, expectSize, err := s.client.GetArchive(curl.Reference()) - if err != nil { - if errgo.Cause(err) == params.ErrNotFound { - // Make a prettier error message for the user. - return nil, errgo.WithCausef(nil, params.ErrNotFound, "cannot retrieve charm %q: charm not found", curl) - } - return nil, errgo.NoteMask(err, fmt.Sprintf("cannot retrieve charm %q", curl), errgo.Any) - } - defer r.Close() - - // Check if the archive already exists in the cache. - path := filepath.Join(CacheDir, charm.Quote(id.String())+".charm") - if verifyHash384AndSize(path, expectHash, expectSize) == nil { - return charm.ReadCharmArchive(path) - } - - // Verify and save the new archive. - f, err := ioutil.TempFile(CacheDir, "charm-download") - if err != nil { - return nil, errgo.Notef(err, "cannot make temporary file") - } - defer f.Close() - hash := sha512.New384() - size, err := io.Copy(io.MultiWriter(hash, f), r) - if err != nil { - return nil, errgo.Notef(err, "cannot read charm archive") - } - if size != expectSize { - return nil, errgo.Newf("size mismatch; network corruption?") - } - if fmt.Sprintf("%x", hash.Sum(nil)) != expectHash { - return nil, errgo.Newf("hash mismatch; network corruption?") - } - - // Move the archive to the expected place, and return the charm. - err = f.Close() - if err != nil { - return nil, err - } - if err := utils.ReplaceFile(f.Name(), path); err != nil { - return nil, errgo.Notef(err, "cannot move the charm archive") - } - return charm.ReadCharmArchive(path) -} - -func verifyHash384AndSize(path, expectHash string, expectSize int64) error { - f, err := os.Open(path) - if err != nil { - return errgo.Mask(err) - } - defer f.Close() - hash := sha512.New384() - size, err := io.Copy(hash, f) - if err != nil { - return errgo.Mask(err) - } - if size != expectSize { - logger.Debugf("size mismatch for %q", path) - return errgo.Newf("size mismatch for %q", path) - } - if fmt.Sprintf("%x", hash.Sum(nil)) != expectHash { - logger.Debugf("hash mismatch for %q", path) - return errgo.Newf("hash mismatch for %q", path) - } - return nil -} - -// Latest implements Interface.Latest. -func (s *CharmStore) Latest(curls ...*charm.URL) ([]CharmRevision, error) { - if len(curls) == 0 { - return nil, nil - } - - // Prepare the request to the charm store. - urls := make([]string, len(curls)) - values := url.Values{} - // Include the ignore-auth flag so that non-public results do not generate - // an error for the whole request. - values.Add("ignore-auth", "1") - values.Add("include", "id-revision") - values.Add("include", "hash256") - for i, curl := range curls { - url := curl.WithRevision(-1).String() - urls[i] = url - values.Add("id", url) - } - u := url.URL{ - Path: "/meta/any", - RawQuery: values.Encode(), - } - - // Execute the request and retrieve results. - var results map[string]struct { - Meta struct { - IdRevision params.IdRevisionResponse `json:"id-revision"` - Hash256 params.HashResponse `json:"hash256"` - } - } - if err := s.client.Get(u.String(), &results); err != nil { - return nil, errgo.NoteMask(err, "cannot get metadata from the charm store", errgo.Any) - } - - // Build the response. - responses := make([]CharmRevision, len(curls)) - for i, url := range urls { - result, found := results[url] - if !found { - responses[i] = CharmRevision{ - Err: CharmNotFound(url), - } - continue - } - responses[i] = CharmRevision{ - Revision: result.Meta.IdRevision.Revision, - Sha256: result.Meta.Hash256.Sum, - } - } - return responses, nil -} - -// Resolve implements Interface.Resolve. -func (s *CharmStore) Resolve(ref *charm.Reference) (*charm.URL, error) { - var result struct { - Id params.IdResponse - } - if _, err := s.client.Meta(ref, &result); err != nil { - if errgo.Cause(err) == params.ErrNotFound { - // Make a prettier error message for the user. - return nil, errgo.WithCausef(nil, params.ErrNotFound, "cannot resolve charm URL %q: charm not found", ref) - } - return nil, errgo.NoteMask(err, fmt.Sprintf("cannot resolve charm URL %q", ref), errgo.Any) - } - url, err := result.Id.Id.URL("") - if err != nil { - return nil, errgo.Notef(err, "cannot make fully resolved entity URL from %s", url) - } - return url, nil -} - -// URL returns the root endpoint URL of the charm store. -func (s *CharmStore) URL() string { - return s.client.ServerURL() -} - -// WithTestMode returns a repository Interface where test mode is enabled, -// meaning charm store download stats are not increased when charms are -// retrieved. -func (s *CharmStore) WithTestMode() Interface { - newRepo := *s - newRepo.client.DisableStats() - return &newRepo -} - -// JujuMetadataHTTPHeader is the HTTP header name used to send Juju metadata -// attributes to the charm store. -const JujuMetadataHTTPHeader = "Juju-Metadata" - -// WithJujuAttrs returns a repository Interface with the Juju metadata -// attributes set. -func (s *CharmStore) WithJujuAttrs(attrs map[string]string) Interface { - newRepo := *s - header := make(http.Header) - for k, v := range attrs { - header.Add(JujuMetadataHTTPHeader, k+"="+v) - } - newRepo.client.SetHTTPHeader(header) - return &newRepo -} === removed file 'src/gopkg.in/juju/charm.v5/charmrepo/charmstore_test.go' --- src/gopkg.in/juju/charm.v5/charmrepo/charmstore_test.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/juju/charm.v5/charmrepo/charmstore_test.go 1970-01-01 00:00:00 +0000 @@ -1,514 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charmrepo_test - -import ( - "crypto/sha256" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "time" - - jujutesting "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charmstore.v4" - "gopkg.in/juju/charmstore.v4/charmstoretesting" - "gopkg.in/juju/charmstore.v4/csclient" - "gopkg.in/juju/charmstore.v4/params" - - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/charmrepo" - charmtesting "gopkg.in/juju/charm.v5/testing" -) - -type charmStoreSuite struct { - jujutesting.IsolationSuite -} - -var _ = gc.Suite(&charmStoreSuite{}) - -func (s *charmStoreSuite) TestURL(c *gc.C) { - repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ - URL: "https://1.2.3.4/charmstore", - }) - c.Assert(repo.(*charmrepo.CharmStore).URL(), gc.Equals, "https://1.2.3.4/charmstore") -} - -func (s *charmStoreSuite) TestDefaultURL(c *gc.C) { - repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{}) - c.Assert(repo.(*charmrepo.CharmStore).URL(), gc.Equals, csclient.ServerURL) -} - -var serverParams = charmstore.ServerParams{ - AuthUsername: "test-user", - AuthPassword: "test-password", -} - -type charmStoreBaseSuite struct { - charmtesting.IsolatedMgoSuite - srv *charmstoretesting.Server - repo charmrepo.Interface -} - -var _ = gc.Suite(&charmStoreBaseSuite{}) - -func (s *charmStoreBaseSuite) SetUpTest(c *gc.C) { - s.IsolatedMgoSuite.SetUpTest(c) - s.srv = charmstoretesting.OpenServer(c, s.Session, serverParams) - s.repo = charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ - URL: s.srv.URL(), - }) - s.PatchValue(&charmrepo.CacheDir, c.MkDir()) -} - -func (s *charmStoreBaseSuite) TearDownTest(c *gc.C) { - s.srv.Close() - s.IsolatedMgoSuite.TearDownTest(c) -} - -// addCharm uploads a charm to the testing charm store, and returns the -// resulting charm and charm URL. -func (s *charmStoreBaseSuite) addCharm(c *gc.C, url, name string) (charm.Charm, *charm.URL) { - id := charm.MustParseReference(url) - promulgated := false - if id.User == "" { - id.User = "who" - promulgated = true - } - ch := TestCharms.CharmArchive(c.MkDir(), name) - id = s.srv.UploadCharm(c, ch, id, promulgated) - return ch, (*charm.URL)(id) -} - -type charmStoreRepoSuite struct { - charmStoreBaseSuite -} - -var _ = gc.Suite(&charmStoreRepoSuite{}) - -// checkCharmDownloads checks that the charm represented by the given URL has -// been downloaded the expected number of times. -func (s *charmStoreRepoSuite) checkCharmDownloads(c *gc.C, url *charm.URL, expect int) { - client := csclient.New(csclient.Params{ - URL: s.srv.URL(), - }) - - key := []string{params.StatsArchiveDownload, url.Series, url.Name, url.User, strconv.Itoa(url.Revision)} - path := "/stats/counter/" + strings.Join(key, ":") - var count int - - getDownloads := func() int { - var result []params.Statistic - err := client.Get(path, &result) - c.Assert(err, jc.ErrorIsNil) - return int(result[0].Count) - } - - for retry := 0; retry < 10; retry++ { - time.Sleep(100 * time.Millisecond) - if count = getDownloads(); count == expect { - if expect == 0 && retry < 2 { - // Wait a bit to make sure. - continue - } - return - } - } - c.Errorf("downloads count for %s is %d, expected %d", url, count, expect) -} - -func (s *charmStoreRepoSuite) TestGet(c *gc.C) { - expect, url := s.addCharm(c, "~who/trusty/mysql-0", "mysql") - ch, err := s.repo.Get(url) - c.Assert(err, jc.ErrorIsNil) - checkCharm(c, ch, expect) -} - -func (s *charmStoreRepoSuite) TestGetPromulgated(c *gc.C) { - expect, url := s.addCharm(c, "trusty/mysql-42", "mysql") - ch, err := s.repo.Get(url) - c.Assert(err, jc.ErrorIsNil) - checkCharm(c, ch, expect) -} - -func (s *charmStoreRepoSuite) TestGetRevisions(c *gc.C) { - s.addCharm(c, "~dalek/trusty/riak-0", "riak") - expect1, url1 := s.addCharm(c, "~dalek/trusty/riak-1", "riak") - expect2, _ := s.addCharm(c, "~dalek/trusty/riak-2", "riak") - - // Retrieve an old revision. - ch, err := s.repo.Get(url1) - c.Assert(err, jc.ErrorIsNil) - checkCharm(c, ch, expect1) - - // Retrieve the latest revision. - ch, err = s.repo.Get(charm.MustParseURL("cs:~dalek/trusty/riak")) - c.Assert(err, jc.ErrorIsNil) - checkCharm(c, ch, expect2) -} - -func (s *charmStoreRepoSuite) TestGetCache(c *gc.C) { - _, url := s.addCharm(c, "~who/trusty/mysql-42", "mysql") - ch, err := s.repo.Get(url) - c.Assert(err, jc.ErrorIsNil) - path := ch.(*charm.CharmArchive).Path - c.Assert(hashOfPath(c, path), gc.Equals, hashOfCharm(c, "mysql")) -} - -func (s *charmStoreRepoSuite) TestGetSameCharm(c *gc.C) { - _, url := s.addCharm(c, "precise/wordpress-47", "wordpress") - getModTime := func(path string) time.Time { - info, err := os.Stat(path) - c.Assert(err, jc.ErrorIsNil) - return info.ModTime() - } - - // Retrieve a charm. - ch1, err := s.repo.Get(url) - c.Assert(err, jc.ErrorIsNil) - - // Retrieve its cache file modification time. - path := ch1.(*charm.CharmArchive).Path - modTime := getModTime(path) - - // Retrieve the same charm again. - ch2, err := s.repo.Get(url.WithRevision(-1)) - c.Assert(err, jc.ErrorIsNil) - - // Check this is the same charm, and its underlying cache file is the same. - checkCharm(c, ch2, ch1) - c.Assert(ch2.(*charm.CharmArchive).Path, gc.Equals, path) - - // Check the same file has been reused. - c.Assert(modTime.Equal(getModTime(path)), jc.IsTrue) -} - -func (s *charmStoreRepoSuite) TestGetInvalidCache(c *gc.C) { - _, url := s.addCharm(c, "~who/trusty/mysql-1", "mysql") - - // Retrieve a charm. - ch1, err := s.repo.Get(url) - c.Assert(err, jc.ErrorIsNil) - - // Modify its cache file to make it invalid. - path := ch1.(*charm.CharmArchive).Path - err = ioutil.WriteFile(path, []byte("invalid"), 0644) - c.Assert(err, jc.ErrorIsNil) - - // Retrieve the same charm again. - _, err = s.repo.Get(url) - c.Assert(err, jc.ErrorIsNil) - - // Check that the cache file have been properly rewritten. - c.Assert(hashOfPath(c, path), gc.Equals, hashOfCharm(c, "mysql")) -} - -func (s *charmStoreRepoSuite) TestGetIncreaseStats(c *gc.C) { - _, url := s.addCharm(c, "~who/precise/wordpress-2", "wordpress") - - // Retrieve the charm. - _, err := s.repo.Get(url) - c.Assert(err, jc.ErrorIsNil) - s.checkCharmDownloads(c, url, 1) - - // Retrieve the charm again. - _, err = s.repo.Get(url) - c.Assert(err, jc.ErrorIsNil) - s.checkCharmDownloads(c, url, 2) -} - -func (s *charmStoreRepoSuite) TestGetWithTestMode(c *gc.C) { - _, url := s.addCharm(c, "~who/precise/wordpress-42", "wordpress") - - // Use a repo with test mode enabled to download a charm a couple of - // times, and check the downloads count is not increased. - repo := s.repo.(*charmrepo.CharmStore).WithTestMode() - _, err := repo.Get(url) - c.Assert(err, jc.ErrorIsNil) - _, err = repo.Get(url) - c.Assert(err, jc.ErrorIsNil) - s.checkCharmDownloads(c, url, 0) -} - -func (s *charmStoreRepoSuite) TestGetWithJujuAttrs(c *gc.C) { - _, url := s.addCharm(c, "trusty/riak-0", "riak") - - // Set up a proxy server that stores the request header. - var header http.Header - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - header = r.Header - s.srv.Handler().ServeHTTP(w, r) - })) - defer srv.Close() - - repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ - URL: srv.URL, - }) - - // Make a first request without Juju attrs. - _, err := repo.Get(url) - c.Assert(err, jc.ErrorIsNil) - c.Assert(header.Get(charmrepo.JujuMetadataHTTPHeader), gc.Equals, "") - - // Make a second request after setting Juju attrs. - repo = repo.(*charmrepo.CharmStore).WithJujuAttrs(map[string]string{ - "k1": "v1", - "k2": "v2", - }) - _, err = repo.Get(url) - c.Assert(err, jc.ErrorIsNil) - values := header[http.CanonicalHeaderKey(charmrepo.JujuMetadataHTTPHeader)] - sort.Strings(values) - c.Assert(values, jc.DeepEquals, []string{"k1=v1", "k2=v2"}) - - // Make a third request after restoring empty attrs. - repo = repo.(*charmrepo.CharmStore).WithJujuAttrs(nil) - _, err = repo.Get(url) - c.Assert(err, jc.ErrorIsNil) - c.Assert(header.Get(charmrepo.JujuMetadataHTTPHeader), gc.Equals, "") -} - -func (s *charmStoreRepoSuite) TestGetErrorBundle(c *gc.C) { - ch, err := s.repo.Get(charm.MustParseURL("cs:bundle/django")) - c.Assert(err, gc.ErrorMatches, `expected a charm URL, got bundle URL "cs:bundle/django"`) - c.Assert(ch, gc.IsNil) -} - -func (s *charmStoreRepoSuite) TestGetErrorCacheDir(c *gc.C) { - parentDir := c.MkDir() - err := os.Chmod(parentDir, 0) - c.Assert(err, jc.ErrorIsNil) - defer os.Chmod(parentDir, 0755) - s.PatchValue(&charmrepo.CacheDir, filepath.Join(parentDir, "cache")) - - ch, err := s.repo.Get(charm.MustParseURL("cs:trusty/django")) - c.Assert(err, gc.ErrorMatches, `cannot create the cache directory: .*: permission denied`) - c.Assert(ch, gc.IsNil) -} - -func (s *charmStoreRepoSuite) TestGetErrorCharmNotFound(c *gc.C) { - ch, err := s.repo.Get(charm.MustParseURL("cs:trusty/no-such")) - c.Assert(err, gc.ErrorMatches, `cannot retrieve charm "cs:trusty/no-such": charm not found`) - c.Assert(ch, gc.IsNil) -} - -func (s *charmStoreRepoSuite) TestGetErrorServer(c *gc.C) { - // Set up a server always returning errors. - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - http.Error(w, `{"Message": "bad wolf", "Code": "bad request"}`, http.StatusBadRequest) - })) - defer srv.Close() - - // Try getting a charm from the server. - repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ - URL: srv.URL, - }) - ch, err := repo.Get(charm.MustParseURL("cs:trusty/django")) - c.Assert(err, gc.ErrorMatches, `cannot retrieve charm "cs:trusty/django": cannot get archive: bad wolf`) - c.Assert(ch, gc.IsNil) -} - -func (s *charmStoreRepoSuite) TestGetErrorHashMismatch(c *gc.C) { - _, url := s.addCharm(c, "trusty/riak-0", "riak") - - // Set up a proxy server that modifies the returned hash. - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - rec := httptest.NewRecorder() - s.srv.Handler().ServeHTTP(rec, r) - w.Header().Set(params.EntityIdHeader, rec.Header().Get(params.EntityIdHeader)) - w.Header().Set(params.ContentHashHeader, "invalid") - w.Write(rec.Body.Bytes()) - })) - defer srv.Close() - - // Try getting a charm from the server. - repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ - URL: srv.URL, - }) - ch, err := repo.Get(url) - c.Assert(err, gc.ErrorMatches, `hash mismatch; network corruption\?`) - c.Assert(ch, gc.IsNil) -} - -func (s *charmStoreRepoSuite) TestLatest(c *gc.C) { - // Add some charms to the charm store. - s.addCharm(c, "~who/trusty/mysql-0", "mysql") - s.addCharm(c, "~who/precise/wordpress-1", "wordpress") - s.addCharm(c, "~dalek/trusty/riak-0", "riak") - s.addCharm(c, "~dalek/trusty/riak-1", "riak") - s.addCharm(c, "~dalek/trusty/riak-3", "riak") - _, url := s.addCharm(c, "~who/utopic/varnish-0", "varnish") - - // Change permissions on one of the charms so that it is not readable by - // anyone. - err := s.srv.NewClient().Put("/"+url.Path()+"/meta/perm/read", []string{"dalek"}) - c.Assert(err, jc.ErrorIsNil) - - // Calculate and store the expected hashes for the uploaded charms. - mysqlHash := hashOfCharm(c, "mysql") - wordpressHash := hashOfCharm(c, "wordpress") - riakHash := hashOfCharm(c, "riak") - - // Define the tests to be run. - tests := []struct { - about string - urls []*charm.URL - revs []charmrepo.CharmRevision - }{{ - about: "no urls", - }, { - about: "charm not found", - urls: []*charm.URL{charm.MustParseURL("cs:trusty/no-such-42")}, - revs: []charmrepo.CharmRevision{{ - Err: charmrepo.CharmNotFound("cs:trusty/no-such"), - }}, - }, { - about: "resolve", - urls: []*charm.URL{ - charm.MustParseURL("cs:~who/trusty/mysql-42"), - charm.MustParseURL("cs:~who/trusty/mysql-0"), - charm.MustParseURL("cs:~who/trusty/mysql"), - }, - revs: []charmrepo.CharmRevision{{ - Revision: 0, - Sha256: mysqlHash, - }, { - Revision: 0, - Sha256: mysqlHash, - }, { - Revision: 0, - Sha256: mysqlHash, - }}, - }, { - about: "multiple charms", - urls: []*charm.URL{ - charm.MustParseURL("cs:~who/precise/wordpress"), - charm.MustParseURL("cs:~who/trusty/mysql-47"), - charm.MustParseURL("cs:~dalek/trusty/no-such"), - charm.MustParseURL("cs:~dalek/trusty/riak-0"), - }, - revs: []charmrepo.CharmRevision{{ - Revision: 1, - Sha256: wordpressHash, - }, { - Revision: 0, - Sha256: mysqlHash, - }, { - Err: charmrepo.CharmNotFound("cs:~dalek/trusty/no-such"), - }, { - Revision: 3, - Sha256: riakHash, - }}, - }, { - about: "unauthorized", - urls: []*charm.URL{ - charm.MustParseURL("cs:~who/precise/wordpress"), - url, - }, - revs: []charmrepo.CharmRevision{{ - Revision: 1, - Sha256: wordpressHash, - }, { - Err: charmrepo.CharmNotFound("cs:~who/utopic/varnish"), - }}, - }} - - // Run the tests. - for i, test := range tests { - c.Logf("test %d: %s", i, test.about) - revs, err := s.repo.Latest(test.urls...) - c.Assert(err, jc.ErrorIsNil) - c.Assert(revs, jc.DeepEquals, test.revs) - } -} - -func (s *charmStoreRepoSuite) TestResolve(c *gc.C) { - // Add some charms to the charm store. - s.addCharm(c, "~who/trusty/mysql-0", "mysql") - s.addCharm(c, "~who/precise/wordpress-2", "wordpress") - s.addCharm(c, "~dalek/utopic/riak-42", "riak") - s.addCharm(c, "utopic/mysql-47", "mysql") - - // Define the tests to be run. - tests := []struct { - id string - url string - err string - }{{ - id: "~who/mysql", - url: "cs:~who/trusty/mysql-0", - }, { - id: "~who/trusty/mysql", - url: "cs:~who/trusty/mysql-0", - }, { - id: "~who/wordpress", - url: "cs:~who/precise/wordpress-2", - }, { - id: "~who/wordpress-2", - url: "cs:~who/precise/wordpress-2", - }, { - id: "~dalek/riak", - url: "cs:~dalek/utopic/riak-42", - }, { - id: "~dalek/utopic/riak-42", - url: "cs:~dalek/utopic/riak-42", - }, { - id: "utopic/mysql", - url: "cs:utopic/mysql-47", - }, { - id: "utopic/mysql-47", - url: "cs:utopic/mysql-47", - }, { - id: "~dalek/utopic/riak-100", - err: `cannot resolve charm URL "cs:~dalek/utopic/riak-100": charm not found`, - }, { - id: "no-such", - err: `cannot resolve charm URL "cs:no-such": charm not found`, - }} - - // Run the tests. - for i, test := range tests { - c.Logf("test %d: %s", i, test.id) - url, err := s.repo.Resolve(charm.MustParseReference(test.id)) - if test.err != "" { - c.Assert(err.Error(), gc.Equals, test.err) - c.Assert(url, gc.IsNil) - continue - } - c.Assert(err, jc.ErrorIsNil) - c.Assert(url, jc.DeepEquals, charm.MustParseURL(test.url)) - } -} - -// hashOfCharm returns the SHA256 hash sum for the given charm name. -func hashOfCharm(c *gc.C, name string) string { - path := TestCharms.CharmArchivePath(c.MkDir(), name) - return hashOfPath(c, path) -} - -// hashOfPath returns the SHA256 hash sum for the given path. -func hashOfPath(c *gc.C, path string) string { - f, err := os.Open(path) - c.Assert(err, jc.ErrorIsNil) - defer f.Close() - hash := sha256.New() - _, err = io.Copy(hash, f) - c.Assert(err, jc.ErrorIsNil) - return fmt.Sprintf("%x", hash.Sum(nil)) -} - -// checkCharm checks that the given charms have the same attributes. -func checkCharm(c *gc.C, ch, expect charm.Charm) { - c.Assert(ch.Actions(), jc.DeepEquals, expect.Actions()) - c.Assert(ch.Config(), jc.DeepEquals, expect.Config()) - c.Assert(ch.Meta(), jc.DeepEquals, expect.Meta()) -} === removed file 'src/gopkg.in/juju/charm.v5/charmrepo/legacy.go' --- src/gopkg.in/juju/charm.v5/charmrepo/legacy.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/charmrepo/legacy.go 1970-01-01 00:00:00 +0000 @@ -1,365 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charmrepo - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - - "github.com/juju/utils" - - "gopkg.in/juju/charm.v5" -) - -// LegacyCharmStore is a repository Interface that provides access to the -// legacy Juju charm store. -type LegacyCharmStore struct { - BaseURL string - authAttrs string // a list of attr=value pairs, comma separated - jujuAttrs string // a list of attr=value pairs, comma separated - testMode bool -} - -var _ Interface = (*LegacyCharmStore)(nil) - -var LegacyStore = &LegacyCharmStore{BaseURL: "https://store.juju.ubuntu.com"} - -// WithAuthAttrs return a repository Interface with the authentication token -// list set. authAttrs is a list of attr=value pairs. -func (s *LegacyCharmStore) WithAuthAttrs(authAttrs string) Interface { - authCS := *s - authCS.authAttrs = authAttrs - return &authCS -} - -// WithTestMode returns a repository Interface where testMode is set to value -// passed to this method. -func (s *LegacyCharmStore) WithTestMode(testMode bool) Interface { - newRepo := *s - newRepo.testMode = testMode - return &newRepo -} - -// WithJujuAttrs returns a repository Interface with the Juju metadata -// attributes set. jujuAttrs is a list of attr=value pairs. -func (s *LegacyCharmStore) WithJujuAttrs(jujuAttrs string) Interface { - jujuCS := *s - jujuCS.jujuAttrs = jujuAttrs - return &jujuCS -} - -// Perform an http get, adding custom auth header if necessary. -func (s *LegacyCharmStore) get(url string) (resp *http.Response, err error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - if s.authAttrs != "" { - // To comply with RFC 2617, we send the authentication data in - // the Authorization header with a custom auth scheme - // and the authentication attributes. - req.Header.Add("Authorization", "charmstore "+s.authAttrs) - } - if s.jujuAttrs != "" { - // The use of "X-" to prefix custom header values is deprecated. - req.Header.Add("Juju-Metadata", s.jujuAttrs) - } - return http.DefaultClient.Do(req) -} - -// Resolve canonicalizes charm URLs any implied series in the reference. -func (s *LegacyCharmStore) Resolve(ref *charm.Reference) (*charm.URL, error) { - infos, err := s.Info(ref) - if err != nil { - return nil, err - } - if len(infos) == 0 { - return nil, fmt.Errorf("missing response when resolving charm URL: %q", ref) - } - if infos[0].CanonicalURL == "" { - return nil, fmt.Errorf("cannot resolve charm URL: %q", ref) - } - curl, err := charm.ParseURL(infos[0].CanonicalURL) - if err != nil { - return nil, err - } - return curl, nil -} - -// Info returns details for all the specified charms in the charm store. -func (s *LegacyCharmStore) Info(curls ...charm.Location) ([]*InfoResponse, error) { - baseURL := s.BaseURL + "/charm-info?" - queryParams := make([]string, len(curls), len(curls)+1) - for i, curl := range curls { - queryParams[i] = "charms=" + url.QueryEscape(curl.String()) - } - if s.testMode { - queryParams = append(queryParams, "stats=0") - } - resp, err := s.get(baseURL + strings.Join(queryParams, "&")) - if err != nil { - if url_error, ok := err.(*url.Error); ok { - switch url_error.Err.(type) { - case *net.DNSError, *net.OpError: - return nil, fmt.Errorf("Cannot access the charm store. Are you connected to the internet? Error details: %v", err) - } - } - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - errMsg := fmt.Errorf("Cannot access the charm store. Invalid response code: %q", resp.Status) - body, readErr := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, readErr - } - logger.Errorf("%v Response body: %s", errMsg, body) - return nil, errMsg - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - infos := make(map[string]*InfoResponse) - if err = json.Unmarshal(body, &infos); err != nil { - return nil, err - } - result := make([]*InfoResponse, len(curls)) - for i, curl := range curls { - key := curl.String() - info, found := infos[key] - if !found { - return nil, fmt.Errorf("charm store returned response without charm %q", key) - } - if len(info.Errors) == 1 && info.Errors[0] == "entry not found" { - info.Errors[0] = fmt.Sprintf("charm not found: %s", curl) - } - result[i] = info - } - return result, nil -} - -// Event returns details for a charm event in the charm store. -// -// If digest is empty, the latest event is returned. -func (s *LegacyCharmStore) Event(curl *charm.URL, digest string) (*EventResponse, error) { - key := curl.String() - query := key - if digest != "" { - query += "@" + digest - } - resp, err := s.get(s.BaseURL + "/charm-event?charms=" + url.QueryEscape(query)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - events := make(map[string]*EventResponse) - if err = json.Unmarshal(body, &events); err != nil { - return nil, err - } - event, found := events[key] - if !found { - return nil, fmt.Errorf("charm store returned response without charm %q", key) - } - if len(event.Errors) == 1 && event.Errors[0] == "entry not found" { - if digest == "" { - return nil, &NotFoundError{fmt.Sprintf("charm event not found for %q", curl)} - } else { - return nil, &NotFoundError{fmt.Sprintf("charm event not found for %q with digest %q", curl, digest)} - } - } - return event, nil -} - -// revisions returns the revisions of the charms referenced by curls. -func (s *LegacyCharmStore) revisions(curls ...charm.Location) (revisions []CharmRevision, err error) { - infos, err := s.Info(curls...) - if err != nil { - return nil, err - } - revisions = make([]CharmRevision, len(infos)) - for i, info := range infos { - for _, w := range info.Warnings { - logger.Warningf("charm store reports for %q: %s", curls[i], w) - } - if info.Errors == nil { - revisions[i].Revision = info.Revision - revisions[i].Sha256 = info.Sha256 - } else { - // If a charm is not found, we are more concise with the error message. - if len(info.Errors) == 1 && strings.HasPrefix(info.Errors[0], "charm not found") { - revisions[i].Err = fmt.Errorf(info.Errors[0]) - } else { - revisions[i].Err = fmt.Errorf("charm info errors for %q: %s", curls[i], strings.Join(info.Errors, "; ")) - } - } - } - return revisions, nil -} - -// Latest returns the latest revision of the charms referenced by curls, regardless -// of the revision set on each curl. -func (s *LegacyCharmStore) Latest(curls ...*charm.URL) ([]CharmRevision, error) { - baseCurls := make([]charm.Location, len(curls)) - for i, curl := range curls { - baseCurls[i] = curl.WithRevision(-1) - } - return s.revisions(baseCurls...) -} - -// BranchLocation returns the location for the branch holding the charm at curl. -func (s *LegacyCharmStore) BranchLocation(curl *charm.URL) string { - if curl.User != "" { - return fmt.Sprintf("lp:~%s/charms/%s/%s/trunk", curl.User, curl.Series, curl.Name) - } - return fmt.Sprintf("lp:charms/%s/%s", curl.Series, curl.Name) -} - -var branchPrefixes = []string{ - "lp:", - "bzr+ssh://bazaar.launchpad.net/+branch/", - "bzr+ssh://bazaar.launchpad.net/", - "http://launchpad.net/+branch/", - "http://launchpad.net/", - "https://launchpad.net/+branch/", - "https://launchpad.net/", - "http://code.launchpad.net/+branch/", - "http://code.launchpad.net/", - "https://code.launchpad.net/+branch/", - "https://code.launchpad.net/", -} - -// CharmURL returns the charm URL for the branch at location. -func (s *LegacyCharmStore) CharmURL(location string) (*charm.URL, error) { - var l string - if len(location) > 0 && location[0] == '~' { - l = location - } else { - for _, prefix := range branchPrefixes { - if strings.HasPrefix(location, prefix) { - l = location[len(prefix):] - break - } - } - } - if l != "" { - for len(l) > 0 && l[len(l)-1] == '/' { - l = l[:len(l)-1] - } - u := strings.Split(l, "/") - if len(u) == 3 && u[0] == "charms" { - return charm.ParseURL(fmt.Sprintf("cs:%s/%s", u[1], u[2])) - } - if len(u) == 4 && u[0] == "charms" && u[3] == "trunk" { - return charm.ParseURL(fmt.Sprintf("cs:%s/%s", u[1], u[2])) - } - if len(u) == 5 && u[1] == "charms" && u[4] == "trunk" && len(u[0]) > 0 && u[0][0] == '~' { - return charm.ParseURL(fmt.Sprintf("cs:%s/%s/%s", u[0], u[2], u[3])) - } - } - return nil, fmt.Errorf("unknown branch location: %q", location) -} - -// verify returns an error unless a file exists at path with a hex-encoded -// SHA256 matching digest. -func verify(path, digest string) error { - hash, _, err := utils.ReadFileSHA256(path) - if err != nil { - return err - } - if hash != digest { - return fmt.Errorf("bad SHA256 of %q", path) - } - return nil -} - -// Get returns the charm referenced by curl. -// CacheDir must have been set, otherwise Get will panic. -func (s *LegacyCharmStore) Get(curl *charm.URL) (charm.Charm, error) { - // The cache location must have been previously set. - if CacheDir == "" { - panic("charm cache directory path is empty") - } - if err := os.MkdirAll(CacheDir, os.FileMode(0755)); err != nil { - return nil, err - } - revInfo, err := s.revisions(curl) - if err != nil { - return nil, err - } - if len(revInfo) != 1 { - return nil, fmt.Errorf("expected 1 result, got %d", len(revInfo)) - } - if revInfo[0].Err != nil { - return nil, revInfo[0].Err - } - rev, digest := revInfo[0].Revision, revInfo[0].Sha256 - if curl.Revision == -1 { - curl = curl.WithRevision(rev) - } else if curl.Revision != rev { - return nil, fmt.Errorf("store returned charm with wrong revision %d for %q", rev, curl.String()) - } - path := filepath.Join(CacheDir, charm.Quote(curl.String())+".charm") - if verify(path, digest) != nil { - store_url := s.BaseURL + "/charm/" + url.QueryEscape(curl.Path()) - if s.testMode { - store_url = store_url + "?stats=0" - } - resp, err := s.get(store_url) - if err != nil { - return nil, err - } - defer resp.Body.Close() - f, err := ioutil.TempFile(CacheDir, "charm-download") - if err != nil { - return nil, err - } - dlPath := f.Name() - _, err = io.Copy(f, resp.Body) - if cerr := f.Close(); err == nil { - err = cerr - } - if err != nil { - os.Remove(dlPath) - return nil, err - } - if err := utils.ReplaceFile(dlPath, path); err != nil { - return nil, err - } - } - if err := verify(path, digest); err != nil { - return nil, err - } - return charm.ReadCharmArchive(path) -} - -// LegacyInferRepository returns a charm repository inferred from the provided -// charm or bundle reference. Local references will use the provided path. -func LegacyInferRepository(ref *charm.Reference, localRepoPath string) (repo Interface, err error) { - switch ref.Schema { - case "cs": - repo = LegacyStore - case "local": - if localRepoPath == "" { - return nil, errors.New("path to local repository not specified") - } - repo = &LocalRepository{Path: localRepoPath} - default: - return nil, fmt.Errorf("unknown schema for charm reference %q", ref) - } - return -} === removed file 'src/gopkg.in/juju/charm.v5/charmrepo/legacy_test.go' --- src/gopkg.in/juju/charm.v5/charmrepo/legacy_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/charmrepo/legacy_test.go 1970-01-01 00:00:00 +0000 @@ -1,407 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charmrepo_test - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - gitjujutesting "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/charmrepo" - charmtesting "gopkg.in/juju/charm.v5/testing" -) - -type legacyCharmStoreSuite struct { - gitjujutesting.FakeHomeSuite - server *charmtesting.MockStore - store *charmrepo.LegacyCharmStore -} - -var _ = gc.Suite(&legacyCharmStoreSuite{}) - -func (s *legacyCharmStoreSuite) SetUpSuite(c *gc.C) { - s.FakeHomeSuite.SetUpSuite(c) - s.server = charmtesting.NewMockStore(c, TestCharms, map[string]int{ - "cs:series/good": 23, - "cs:series/unwise": 23, - "cs:series/better": 24, - "cs:series/best": 25, - }) -} - -func (s *legacyCharmStoreSuite) SetUpTest(c *gc.C) { - s.FakeHomeSuite.SetUpTest(c) - s.PatchValue(&charmrepo.CacheDir, c.MkDir()) - s.store = newLegacyStore(s.server.Address()) - s.server.Downloads = nil - s.server.Authorizations = nil - s.server.Metadata = nil - s.server.DownloadsNoStats = nil - s.server.InfoRequestCount = 0 - s.server.InfoRequestCountNoStats = 0 -} - -func (s *legacyCharmStoreSuite) TearDownSuite(c *gc.C) { - s.server.Close() - s.FakeHomeSuite.TearDownSuite(c) -} - -func (s *legacyCharmStoreSuite) TestMissing(c *gc.C) { - charmURL := charm.MustParseURL("cs:series/missing") - expect := `charm not found: cs:series/missing` - _, err := charmrepo.Latest(s.store, charmURL) - c.Assert(err, gc.ErrorMatches, expect) - _, err = s.store.Get(charmURL) - c.Assert(err, gc.ErrorMatches, expect) -} - -func (s *legacyCharmStoreSuite) TestError(c *gc.C) { - charmURL := charm.MustParseURL("cs:series/borken") - expect := `charm info errors for "cs:series/borken": badness` - _, err := charmrepo.Latest(s.store, charmURL) - c.Assert(err, gc.ErrorMatches, expect) - _, err = s.store.Get(charmURL) - c.Assert(err, gc.ErrorMatches, expect) -} - -func (s *legacyCharmStoreSuite) TestWarning(c *gc.C) { - charmURL := charm.MustParseURL("cs:series/unwise") - expect := `.* WARNING juju.charm.charmrepo charm store reports for "cs:series/unwise": foolishness` + "\n" - r, err := charmrepo.Latest(s.store, charmURL) - c.Assert(r, gc.Equals, 23) - c.Assert(err, gc.IsNil) - c.Assert(c.GetTestLog(), gc.Matches, expect) - ch, err := s.store.Get(charmURL) - c.Assert(ch, gc.NotNil) - c.Assert(err, gc.IsNil) - c.Assert(c.GetTestLog(), gc.Matches, expect+expect) -} - -func (s *legacyCharmStoreSuite) TestLatest(c *gc.C) { - urls := []*charm.URL{ - charm.MustParseURL("cs:series/good"), - charm.MustParseURL("cs:series/good-2"), - charm.MustParseURL("cs:series/good-99"), - } - revInfo, err := s.store.Latest(urls...) - c.Assert(err, gc.IsNil) - c.Assert(revInfo, jc.DeepEquals, []charmrepo.CharmRevision{ - {23, "843f8bba130a9705249f038202fab24e5151e3a2f7b6626f4508a5725739a5b5", nil}, - {23, "843f8bba130a9705249f038202fab24e5151e3a2f7b6626f4508a5725739a5b5", nil}, - {23, "843f8bba130a9705249f038202fab24e5151e3a2f7b6626f4508a5725739a5b5", nil}, - }) -} - -func (s *legacyCharmStoreSuite) assertCached(c *gc.C, charmURL *charm.URL) { - s.server.Downloads = nil - ch, err := s.store.Get(charmURL) - c.Assert(err, gc.IsNil) - c.Assert(ch, gc.NotNil) - c.Assert(s.server.Downloads, gc.IsNil) -} - -func (s *legacyCharmStoreSuite) TestGetCacheImplicitRevision(c *gc.C) { - base := "cs:series/good" - charmURL := charm.MustParseURL(base) - revCharmURL := charm.MustParseURL(base + "-23") - ch, err := s.store.Get(charmURL) - c.Assert(err, gc.IsNil) - c.Assert(ch, gc.NotNil) - c.Assert(s.server.Downloads, jc.DeepEquals, []*charm.URL{revCharmURL}) - s.assertCached(c, charmURL) - s.assertCached(c, revCharmURL) -} - -func (s *legacyCharmStoreSuite) TestGetCacheExplicitRevision(c *gc.C) { - base := "cs:series/good-12" - charmURL := charm.MustParseURL(base) - ch, err := s.store.Get(charmURL) - c.Assert(err, gc.IsNil) - c.Assert(ch, gc.NotNil) - c.Assert(s.server.Downloads, jc.DeepEquals, []*charm.URL{charmURL}) - s.assertCached(c, charmURL) -} - -func (s *legacyCharmStoreSuite) TestGetBadCache(c *gc.C) { - c.Assert(os.Mkdir(filepath.Join(charmrepo.CacheDir, "cache"), 0777), gc.IsNil) - base := "cs:series/good" - charmURL := charm.MustParseURL(base) - revCharmURL := charm.MustParseURL(base + "-23") - name := charm.Quote(revCharmURL.String()) + ".charm" - err := ioutil.WriteFile(filepath.Join(charmrepo.CacheDir, "cache", name), nil, 0666) - c.Assert(err, gc.IsNil) - ch, err := s.store.Get(charmURL) - c.Assert(err, gc.IsNil) - c.Assert(ch, gc.NotNil) - c.Assert(s.server.Downloads, jc.DeepEquals, []*charm.URL{revCharmURL}) - s.assertCached(c, charmURL) - s.assertCached(c, revCharmURL) -} - -func (s *legacyCharmStoreSuite) TestGetTestModeFlag(c *gc.C) { - base := "cs:series/good-12" - charmURL := charm.MustParseURL(base) - ch, err := s.store.Get(charmURL) - c.Assert(err, gc.IsNil) - c.Assert(ch, gc.NotNil) - c.Assert(s.server.Downloads, jc.DeepEquals, []*charm.URL{charmURL}) - c.Assert(s.server.DownloadsNoStats, gc.IsNil) - c.Assert(s.server.InfoRequestCount, gc.Equals, 1) - c.Assert(s.server.InfoRequestCountNoStats, gc.Equals, 0) - - storeInTestMode := s.store.WithTestMode(true) - other := "cs:series/good-23" - otherURL := charm.MustParseURL(other) - ch, err = storeInTestMode.Get(otherURL) - c.Assert(err, gc.IsNil) - c.Assert(ch, gc.NotNil) - c.Assert(s.server.Downloads, jc.DeepEquals, []*charm.URL{charmURL}) - c.Assert(s.server.DownloadsNoStats, jc.DeepEquals, []*charm.URL{otherURL}) - c.Assert(s.server.InfoRequestCount, gc.Equals, 1) - c.Assert(s.server.InfoRequestCountNoStats, gc.Equals, 1) -} - -// The following tests cover the low-level CharmStore-specific API. - -func (s *legacyCharmStoreSuite) TestInfo(c *gc.C) { - charmURLs := []charm.Location{ - charm.MustParseURL("cs:series/good"), - charm.MustParseURL("cs:series/better"), - charm.MustParseURL("cs:series/best"), - } - infos, err := s.store.Info(charmURLs...) - c.Assert(err, gc.IsNil) - c.Assert(infos, gc.HasLen, 3) - expected := []int{23, 24, 25} - for i, info := range infos { - c.Assert(info.Errors, gc.IsNil) - c.Assert(info.Revision, gc.Equals, expected[i]) - } -} - -func (s *legacyCharmStoreSuite) TestInfoNotFound(c *gc.C) { - charmURL := charm.MustParseURL("cs:series/missing") - info, err := s.store.Info(charmURL) - c.Assert(err, gc.IsNil) - c.Assert(info, gc.HasLen, 1) - c.Assert(info[0].Errors, gc.HasLen, 1) - c.Assert(info[0].Errors[0], gc.Matches, `charm not found: cs:series/missing`) -} - -func (s *legacyCharmStoreSuite) TestInfoError(c *gc.C) { - charmURL := charm.MustParseURL("cs:series/borken") - info, err := s.store.Info(charmURL) - c.Assert(err, gc.IsNil) - c.Assert(info, gc.HasLen, 1) - c.Assert(info[0].Errors, jc.DeepEquals, []string{"badness"}) -} - -func (s *legacyCharmStoreSuite) TestInfoWarning(c *gc.C) { - charmURL := charm.MustParseURL("cs:series/unwise") - info, err := s.store.Info(charmURL) - c.Assert(err, gc.IsNil) - c.Assert(info, gc.HasLen, 1) - c.Assert(info[0].Warnings, jc.DeepEquals, []string{"foolishness"}) -} - -func (s *legacyCharmStoreSuite) TestInfoTestModeFlag(c *gc.C) { - charmURL := charm.MustParseURL("cs:series/good") - _, err := s.store.Info(charmURL) - c.Assert(err, gc.IsNil) - c.Assert(s.server.InfoRequestCount, gc.Equals, 1) - c.Assert(s.server.InfoRequestCountNoStats, gc.Equals, 0) - - storeInTestMode, ok := s.store.WithTestMode(true).(*charmrepo.LegacyCharmStore) - c.Assert(ok, gc.Equals, true) - _, err = storeInTestMode.Info(charmURL) - c.Assert(err, gc.IsNil) - c.Assert(s.server.InfoRequestCount, gc.Equals, 1) - c.Assert(s.server.InfoRequestCountNoStats, gc.Equals, 1) -} - -func (s *legacyCharmStoreSuite) TestInfoDNSError(c *gc.C) { - store := newLegacyStore("http://127.1.2.3") - charmURL := charm.MustParseURL("cs:series/good") - resp, err := store.Info(charmURL) - c.Assert(resp, gc.IsNil) - expect := `Cannot access the charm store. .*` - c.Assert(err, gc.ErrorMatches, expect) -} - -func (s *legacyCharmStoreSuite) TestEvent(c *gc.C) { - charmURL := charm.MustParseURL("cs:series/good") - event, err := s.store.Event(charmURL, "") - c.Assert(err, gc.IsNil) - c.Assert(event.Errors, gc.IsNil) - c.Assert(event.Revision, gc.Equals, 23) - c.Assert(event.Digest, gc.Equals, "the-digest") -} - -func (s *legacyCharmStoreSuite) TestEventWithDigest(c *gc.C) { - charmURL := charm.MustParseURL("cs:series/good") - event, err := s.store.Event(charmURL, "the-digest") - c.Assert(err, gc.IsNil) - c.Assert(event.Errors, gc.IsNil) - c.Assert(event.Revision, gc.Equals, 23) - c.Assert(event.Digest, gc.Equals, "the-digest") -} - -func (s *legacyCharmStoreSuite) TestEventNotFound(c *gc.C) { - charmURL := charm.MustParseURL("cs:series/missing") - event, err := s.store.Event(charmURL, "") - c.Assert(err, gc.ErrorMatches, `charm event not found for "cs:series/missing"`) - c.Assert(event, gc.IsNil) -} - -func (s *legacyCharmStoreSuite) TestEventNotFoundDigest(c *gc.C) { - charmURL := charm.MustParseURL("cs:series/good") - event, err := s.store.Event(charmURL, "missing-digest") - c.Assert(err, gc.ErrorMatches, `charm event not found for "cs:series/good" with digest "missing-digest"`) - c.Assert(event, gc.IsNil) -} - -func (s *legacyCharmStoreSuite) TestEventError(c *gc.C) { - charmURL := charm.MustParseURL("cs:series/borken") - event, err := s.store.Event(charmURL, "") - c.Assert(err, gc.IsNil) - c.Assert(event.Errors, jc.DeepEquals, []string{"badness"}) -} - -func (s *legacyCharmStoreSuite) TestAuthorization(c *gc.C) { - store := s.store.WithAuthAttrs("token=value") - - base := "cs:series/good" - charmURL := charm.MustParseURL(base) - _, err := store.Get(charmURL) - - c.Assert(err, gc.IsNil) - - c.Assert(s.server.Authorizations, gc.HasLen, 1) - c.Assert(s.server.Authorizations[0], gc.Equals, "charmstore token=value") -} - -func (s *legacyCharmStoreSuite) TestNilAuthorization(c *gc.C) { - store := s.store.WithAuthAttrs("") - - base := "cs:series/good" - charmURL := charm.MustParseURL(base) - _, err := store.Get(charmURL) - - c.Assert(err, gc.IsNil) - c.Assert(s.server.Authorizations, gc.HasLen, 0) -} - -func (s *legacyCharmStoreSuite) TestMetadata(c *gc.C) { - store := s.store.WithJujuAttrs("juju-metadata") - - base := "cs:series/good" - charmURL := charm.MustParseURL(base) - _, err := store.Get(charmURL) - - c.Assert(err, gc.IsNil) - c.Assert(s.server.Metadata, gc.HasLen, 1) - c.Assert(s.server.Metadata[0], gc.Equals, "juju-metadata") -} - -func (s *legacyCharmStoreSuite) TestNilMetadata(c *gc.C) { - base := "cs:series/good" - charmURL := charm.MustParseURL(base) - _, err := s.store.Get(charmURL) - - c.Assert(err, gc.IsNil) - c.Assert(s.server.Metadata, gc.HasLen, 0) -} - -func (s *legacyCharmStoreSuite) TestEventWarning(c *gc.C) { - charmURL := charm.MustParseURL("cs:series/unwise") - event, err := s.store.Event(charmURL, "") - c.Assert(err, gc.IsNil) - c.Assert(event.Warnings, jc.DeepEquals, []string{"foolishness"}) -} - -func (s *legacyCharmStoreSuite) TestBranchLocation(c *gc.C) { - charmURL := charm.MustParseURL("cs:series/name") - location := s.store.BranchLocation(charmURL) - c.Assert(location, gc.Equals, "lp:charms/series/name") - - charmURL = charm.MustParseURL("cs:~user/series/name") - location = s.store.BranchLocation(charmURL) - c.Assert(location, gc.Equals, "lp:~user/charms/series/name/trunk") -} - -func (s *legacyCharmStoreSuite) TestCharmURL(c *gc.C) { - tests := []struct{ url, loc string }{ - {"cs:precise/wordpress", "lp:charms/precise/wordpress"}, - {"cs:precise/wordpress", "http://launchpad.net/+branch/charms/precise/wordpress"}, - {"cs:precise/wordpress", "https://launchpad.net/+branch/charms/precise/wordpress"}, - {"cs:precise/wordpress", "http://code.launchpad.net/+branch/charms/precise/wordpress"}, - {"cs:precise/wordpress", "https://code.launchpad.net/+branch/charms/precise/wordpress"}, - {"cs:precise/wordpress", "bzr+ssh://bazaar.launchpad.net/+branch/charms/precise/wordpress"}, - {"cs:~charmers/precise/wordpress", "lp:~charmers/charms/precise/wordpress/trunk"}, - {"cs:~charmers/precise/wordpress", "http://launchpad.net/~charmers/charms/precise/wordpress/trunk"}, - {"cs:~charmers/precise/wordpress", "https://launchpad.net/~charmers/charms/precise/wordpress/trunk"}, - {"cs:~charmers/precise/wordpress", "http://code.launchpad.net/~charmers/charms/precise/wordpress/trunk"}, - {"cs:~charmers/precise/wordpress", "https://code.launchpad.net/~charmers/charms/precise/wordpress/trunk"}, - {"cs:~charmers/precise/wordpress", "http://launchpad.net/+branch/~charmers/charms/precise/wordpress/trunk"}, - {"cs:~charmers/precise/wordpress", "https://launchpad.net/+branch/~charmers/charms/precise/wordpress/trunk"}, - {"cs:~charmers/precise/wordpress", "http://code.launchpad.net/+branch/~charmers/charms/precise/wordpress/trunk"}, - {"cs:~charmers/precise/wordpress", "https://code.launchpad.net/+branch/~charmers/charms/precise/wordpress/trunk"}, - {"cs:~charmers/precise/wordpress", "bzr+ssh://bazaar.launchpad.net/~charmers/charms/precise/wordpress/trunk"}, - {"cs:~charmers/precise/wordpress", "bzr+ssh://bazaar.launchpad.net/~charmers/charms/precise/wordpress/trunk/"}, - {"cs:~charmers/precise/wordpress", "~charmers/charms/precise/wordpress/trunk"}, - {"", "lp:~charmers/charms/precise/wordpress/whatever"}, - {"", "lp:~charmers/whatever/precise/wordpress/trunk"}, - {"", "lp:whatever/precise/wordpress"}, - } - for _, t := range tests { - charmURL, err := s.store.CharmURL(t.loc) - if t.url == "" { - c.Assert(err, gc.ErrorMatches, fmt.Sprintf("unknown branch location: %q", t.loc)) - } else { - c.Assert(err, gc.IsNil) - c.Assert(charmURL.String(), gc.Equals, t.url) - } - } -} - -var legacyInferRepositoryTests = []struct { - url string - path string -}{ - {"cs:precise/wordpress", ""}, - {"local:oneiric/wordpress", "/some/path"}, -} - -func (s *legacyCharmStoreSuite) TestInferRepository(c *gc.C) { - for i, t := range legacyInferRepositoryTests { - c.Logf("test %d", i) - ref, err := charm.ParseReference(t.url) - c.Assert(err, gc.IsNil) - repo, err := charmrepo.LegacyInferRepository(ref, "/some/path") - c.Assert(err, gc.IsNil) - switch repo := repo.(type) { - case *charmrepo.LocalRepository: - c.Assert(repo.Path, gc.Equals, t.path) - default: - c.Assert(repo, gc.Equals, charmrepo.LegacyStore) - } - } - ref, err := charm.ParseReference("local:whatever") - c.Assert(err, gc.IsNil) - _, err = charmrepo.LegacyInferRepository(ref, "") - c.Assert(err, gc.ErrorMatches, "path to local repository not specified") - ref.Schema = "foo" - _, err = charmrepo.LegacyInferRepository(ref, "") - c.Assert(err, gc.ErrorMatches, "unknown schema for charm reference.*") -} - -func newLegacyStore(url string) *charmrepo.LegacyCharmStore { - return &charmrepo.LegacyCharmStore{BaseURL: url} -} === removed file 'src/gopkg.in/juju/charm.v5/charmrepo/local.go' --- src/gopkg.in/juju/charm.v5/charmrepo/local.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/charmrepo/local.go 1970-01-01 00:00:00 +0000 @@ -1,133 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charmrepo - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "gopkg.in/errgo.v1" - - "gopkg.in/juju/charm.v5" -) - -// LocalRepository represents a local directory containing subdirectories -// named after an Ubuntu series, each of which contains charms targeted for -// that series. For example: -// -// /path/to/repository/oneiric/mongodb/ -// /path/to/repository/precise/mongodb.charm -// /path/to/repository/precise/wordpress/ -type LocalRepository struct { - Path string -} - -var _ Interface = (*LocalRepository)(nil) - -// NewLocalRepository creates and return a new local Juju repository pointing -// to the given local path. -func NewLocalRepository(path string) (Interface, error) { - if path == "" { - return nil, errgo.New("path to local repository not specified") - } - return &LocalRepository{ - Path: path, - }, nil -} - -// Resolve implements Interface.Resolve. -func (r *LocalRepository) Resolve(ref *charm.Reference) (*charm.URL, error) { - if ref.Series == "" { - return nil, errgo.Newf("no series specified for %s", ref) - } - u, err := ref.URL("") - if err != nil { - return nil, err - } - if ref.Revision != -1 { - return u, nil - } - ch, err := r.Get(u) - if err != nil { - return nil, err - } - return u.WithRevision(ch.Revision()), nil -} - -// Latest implements Interface.Latest by finding the -// latest revision of each of the given charm URLs in -// the local repository. -func (r *LocalRepository) Latest(curls ...*charm.URL) ([]CharmRevision, error) { - result := make([]CharmRevision, len(curls)) - for i, curl := range curls { - ch, err := r.Get(curl.WithRevision(-1)) - if err == nil { - result[i].Revision = ch.Revision() - } else { - result[i].Err = err - } - } - return result, nil -} - -func mightBeCharm(info os.FileInfo) bool { - if info.IsDir() { - return !strings.HasPrefix(info.Name(), ".") - } - return strings.HasSuffix(info.Name(), ".charm") -} - -// Get returns a charm matching curl, if one exists. If curl has a revision of -// -1, it returns the latest charm that matches curl. If multiple candidates -// satisfy the foregoing, the first one encountered will be returned. -func (r *LocalRepository) Get(curl *charm.URL) (charm.Charm, error) { - if curl.Schema != "local" { - return nil, fmt.Errorf("local repository got URL with non-local schema: %q", curl) - } - info, err := os.Stat(r.Path) - if err != nil { - if os.IsNotExist(err) { - err = repoNotFound(r.Path) - } - return nil, err - } - if !info.IsDir() { - return nil, repoNotFound(r.Path) - } - path := filepath.Join(r.Path, curl.Series) - infos, err := ioutil.ReadDir(path) - if err != nil { - return nil, charmNotFound(curl, r.Path) - } - var latest charm.Charm - for _, info := range infos { - chPath := filepath.Join(path, info.Name()) - if info.Mode()&os.ModeSymlink != 0 { - var err error - if info, err = os.Stat(chPath); err != nil { - return nil, err - } - } - if !mightBeCharm(info) { - continue - } - if ch, err := charm.ReadCharm(chPath); err != nil { - logger.Warningf("failed to load charm at %q: %s", chPath, err) - } else if ch.Meta().Name == curl.Name { - if ch.Revision() == curl.Revision { - return ch, nil - } - if latest == nil || ch.Revision() > latest.Revision() { - latest = ch - } - } - } - if curl.Revision == -1 && latest != nil { - return latest, nil - } - return nil, charmNotFound(curl, r.Path) -} === removed file 'src/gopkg.in/juju/charm.v5/charmrepo/local_test.go' --- src/gopkg.in/juju/charm.v5/charmrepo/local_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/charmrepo/local_test.go 1970-01-01 00:00:00 +0000 @@ -1,228 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charmrepo_test - -import ( - "io/ioutil" - "os" - "path/filepath" - - gitjujutesting "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/charmrepo" -) - -type LocalRepoSuite struct { - gitjujutesting.FakeHomeSuite - repo *charmrepo.LocalRepository - seriesPath string -} - -var _ = gc.Suite(&LocalRepoSuite{}) - -func (s *LocalRepoSuite) SetUpTest(c *gc.C) { - s.FakeHomeSuite.SetUpTest(c) - root := c.MkDir() - s.repo = &charmrepo.LocalRepository{Path: root} - s.seriesPath = filepath.Join(root, "quantal") - c.Assert(os.Mkdir(s.seriesPath, 0777), gc.IsNil) -} - -func (s *LocalRepoSuite) addCharmArchive(name string) string { - return TestCharms.CharmArchivePath(s.seriesPath, name) -} - -func (s *LocalRepoSuite) addDir(name string) string { - return TestCharms.ClonedDirPath(s.seriesPath, name) -} - -func (s *LocalRepoSuite) checkNotFoundErr(c *gc.C, err error, charmURL *charm.URL) { - expect := `charm not found in "` + s.repo.Path + `": ` + charmURL.String() - c.Check(err, gc.ErrorMatches, expect) -} - -func (s *LocalRepoSuite) TestMissingCharm(c *gc.C) { - for i, str := range []string{ - "local:quantal/zebra", "local:badseries/zebra", - } { - c.Logf("test %d: %s", i, str) - charmURL := charm.MustParseURL(str) - _, err := charmrepo.Latest(s.repo, charmURL) - s.checkNotFoundErr(c, err, charmURL) - _, err = s.repo.Get(charmURL) - s.checkNotFoundErr(c, err, charmURL) - } -} - -func (s *LocalRepoSuite) TestMissingRepo(c *gc.C) { - c.Assert(os.RemoveAll(s.repo.Path), gc.IsNil) - _, err := charmrepo.Latest(s.repo, charm.MustParseURL("local:quantal/zebra")) - c.Assert(err, gc.ErrorMatches, `no repository found at ".*"`) - _, err = s.repo.Get(charm.MustParseURL("local:quantal/zebra")) - c.Assert(err, gc.ErrorMatches, `no repository found at ".*"`) - c.Assert(ioutil.WriteFile(s.repo.Path, nil, 0666), gc.IsNil) - _, err = charmrepo.Latest(s.repo, charm.MustParseURL("local:quantal/zebra")) - c.Assert(err, gc.ErrorMatches, `no repository found at ".*"`) - _, err = s.repo.Get(charm.MustParseURL("local:quantal/zebra")) - c.Assert(err, gc.ErrorMatches, `no repository found at ".*"`) -} - -func (s *LocalRepoSuite) TestMultipleVersions(c *gc.C) { - charmURL := charm.MustParseURL("local:quantal/upgrade") - s.addDir("upgrade1") - rev, err := charmrepo.Latest(s.repo, charmURL) - c.Assert(err, gc.IsNil) - c.Assert(rev, gc.Equals, 1) - ch, err := s.repo.Get(charmURL) - c.Assert(err, gc.IsNil) - c.Assert(ch.Revision(), gc.Equals, 1) - - s.addDir("upgrade2") - rev, err = charmrepo.Latest(s.repo, charmURL) - c.Assert(err, gc.IsNil) - c.Assert(rev, gc.Equals, 2) - ch, err = s.repo.Get(charmURL) - c.Assert(err, gc.IsNil) - c.Assert(ch.Revision(), gc.Equals, 2) - - revCharmURL := charmURL.WithRevision(1) - rev, err = charmrepo.Latest(s.repo, revCharmURL) - c.Assert(err, gc.IsNil) - c.Assert(rev, gc.Equals, 2) - ch, err = s.repo.Get(revCharmURL) - c.Assert(err, gc.IsNil) - c.Assert(ch.Revision(), gc.Equals, 1) - - badRevCharmURL := charmURL.WithRevision(33) - rev, err = charmrepo.Latest(s.repo, badRevCharmURL) - c.Assert(err, gc.IsNil) - c.Assert(rev, gc.Equals, 2) - _, err = s.repo.Get(badRevCharmURL) - s.checkNotFoundErr(c, err, badRevCharmURL) -} - -func (s *LocalRepoSuite) TestCharmArchive(c *gc.C) { - charmURL := charm.MustParseURL("local:quantal/dummy") - s.addCharmArchive("dummy") - - rev, err := charmrepo.Latest(s.repo, charmURL) - c.Assert(err, gc.IsNil) - c.Assert(rev, gc.Equals, 1) - ch, err := s.repo.Get(charmURL) - c.Assert(err, gc.IsNil) - c.Assert(ch.Revision(), gc.Equals, 1) -} - -func (s *LocalRepoSuite) TestLogsErrors(c *gc.C) { - err := ioutil.WriteFile(filepath.Join(s.seriesPath, "blah.charm"), nil, 0666) - c.Assert(err, gc.IsNil) - err = os.Mkdir(filepath.Join(s.seriesPath, "blah"), 0666) - c.Assert(err, gc.IsNil) - samplePath := s.addDir("upgrade2") - gibberish := []byte("don't parse me by") - err = ioutil.WriteFile(filepath.Join(samplePath, "metadata.yaml"), gibberish, 0666) - c.Assert(err, gc.IsNil) - - charmURL := charm.MustParseURL("local:quantal/dummy") - s.addDir("dummy") - ch, err := s.repo.Get(charmURL) - c.Assert(err, gc.IsNil) - c.Assert(ch.Revision(), gc.Equals, 1) - c.Assert(c.GetTestLog(), gc.Matches, ` -.* WARNING juju.charm.charmrepo failed to load charm at ".*/quantal/blah": .* -.* WARNING juju.charm.charmrepo failed to load charm at ".*/quantal/blah.charm": .* -.* WARNING juju.charm.charmrepo failed to load charm at ".*/quantal/upgrade2": .* -`[1:]) -} - -func renameSibling(c *gc.C, path, name string) { - c.Assert(os.Rename(path, filepath.Join(filepath.Dir(path), name)), gc.IsNil) -} - -func (s *LocalRepoSuite) TestIgnoresUnpromisingNames(c *gc.C) { - err := ioutil.WriteFile(filepath.Join(s.seriesPath, "blah.notacharm"), nil, 0666) - c.Assert(err, gc.IsNil) - err = os.Mkdir(filepath.Join(s.seriesPath, ".blah"), 0666) - c.Assert(err, gc.IsNil) - renameSibling(c, s.addDir("dummy"), ".dummy") - renameSibling(c, s.addCharmArchive("dummy"), "dummy.notacharm") - charmURL := charm.MustParseURL("local:quantal/dummy") - - _, err = s.repo.Get(charmURL) - s.checkNotFoundErr(c, err, charmURL) - _, err = charmrepo.Latest(s.repo, charmURL) - s.checkNotFoundErr(c, err, charmURL) - c.Assert(c.GetTestLog(), gc.Equals, "") -} - -func (s *LocalRepoSuite) TestFindsSymlinks(c *gc.C) { - realPath := TestCharms.ClonedDirPath(c.MkDir(), "dummy") - linkPath := filepath.Join(s.seriesPath, "dummy") - err := os.Symlink(realPath, linkPath) - c.Assert(err, gc.IsNil) - ch, err := s.repo.Get(charm.MustParseURL("local:quantal/dummy")) - c.Assert(err, gc.IsNil) - c.Assert(ch.Revision(), gc.Equals, 1) - c.Assert(ch.Meta().Name, gc.Equals, "dummy") - c.Assert(ch.Config().Options["title"].Default, gc.Equals, "My Title") - c.Assert(ch.(*charm.CharmDir).Path, gc.Equals, linkPath) -} - -func (s *LocalRepoSuite) TestResolve(c *gc.C) { - // Add some charms to the local repo. - s.addDir("upgrade1") - s.addDir("upgrade2") - s.addDir("wordpress") - s.addDir("riak") - - // Define the tests to be run. - tests := []struct { - id string - url string - err string - }{{ - id: "local:quantal/upgrade", - url: "local:quantal/upgrade-2", - }, { - id: "local:quantal/upgrade-1", - url: "local:quantal/upgrade-1", - }, { - id: "local:quantal/wordpress", - url: "local:quantal/wordpress-3", - }, { - id: "local:quantal/riak", - url: "local:quantal/riak-7", - }, { - id: "local:quantal/wordpress-3", - url: "local:quantal/wordpress-3", - }, { - id: "local:quantal/wordpress-2", - url: "local:quantal/wordpress-2", - }, { - id: "local:trusty/riak", - err: "charm not found .*: local:trusty/riak", - }, { - id: "local:quantal/no-such", - err: "charm not found .*: local:quantal/no-such", - }, { - id: "local:upgrade", - err: "no series specified for local:upgrade", - }} - - // Run the tests. - for i, test := range tests { - c.Logf("test %d: %s", i, test.id) - url, err := s.repo.Resolve(charm.MustParseReference(test.id)) - if test.err != "" { - c.Assert(err.Error(), gc.Matches, test.err) - c.Assert(url, gc.IsNil) - continue - } - c.Assert(err, jc.ErrorIsNil) - c.Assert(url, jc.DeepEquals, charm.MustParseURL(test.url)) - } -} === removed file 'src/gopkg.in/juju/charm.v5/charmrepo/package_test.go' --- src/gopkg.in/juju/charm.v5/charmrepo/package_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/charmrepo/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charmrepo_test - -import ( - "testing" - - jujutesting "github.com/juju/testing" -) - -func TestPackage(t *testing.T) { - jujutesting.MgoTestPackage(t, nil) -} === removed file 'src/gopkg.in/juju/charm.v5/charmrepo/params.go' --- src/gopkg.in/juju/charm.v5/charmrepo/params.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/charmrepo/params.go 1970-01-01 00:00:00 +0000 @@ -1,61 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charmrepo - -import ( - "fmt" - - "gopkg.in/juju/charm.v5" -) - -// InfoResponse is sent by the charm store in response to charm-info requests. -type InfoResponse struct { - CanonicalURL string `json:"canonical-url,omitempty"` - Revision int `json:"revision"` // Zero is valid. Can't omitempty. - Sha256 string `json:"sha256,omitempty"` - Digest string `json:"digest,omitempty"` - Errors []string `json:"errors,omitempty"` - Warnings []string `json:"warnings,omitempty"` -} - -// EventResponse is sent by the charm store in response to charm-event requests. -type EventResponse struct { - Kind string `json:"kind"` - Revision int `json:"revision"` // Zero is valid. Can't omitempty. - Digest string `json:"digest,omitempty"` - Errors []string `json:"errors,omitempty"` - Warnings []string `json:"warnings,omitempty"` - Time string `json:"time,omitempty"` -} - -// CharmRevision holds the revision number of a charm and any error -// encountered in retrieving it. -type CharmRevision struct { - Revision int - Sha256 string - Err error -} - -// NotFoundError represents an error indicating that the requested data wasn't found. -type NotFoundError struct { - msg string -} - -func (e *NotFoundError) Error() string { - return e.msg -} - -func repoNotFound(path string) error { - return &NotFoundError{fmt.Sprintf("no repository found at %q", path)} -} - -func charmNotFound(curl *charm.URL, repoPath string) error { - return &NotFoundError{fmt.Sprintf("charm not found in %q: %s", repoPath, curl)} -} - -func CharmNotFound(url string) error { - return &NotFoundError{ - msg: "charm not found: " + url, - } -} === removed file 'src/gopkg.in/juju/charm.v5/charmrepo/repo.go' --- src/gopkg.in/juju/charm.v5/charmrepo/repo.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/charmrepo/repo.go 1970-01-01 00:00:00 +0000 @@ -1,66 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -// Package charmrepo implements access to charm repositories. - -package charmrepo - -import ( - "fmt" - - "github.com/juju/loggo" - - "gopkg.in/juju/charm.v5" -) - -var logger = loggo.GetLogger("juju.charm.charmrepo") - -// Interface represents a charm repository (a collection of charms). -type Interface interface { - // Get returns the charm referenced by curl. - Get(curl *charm.URL) (charm.Charm, error) - - // Latest returns the latest revision of the charms referenced by curls, - // regardless of the revision set on each curl. - Latest(curls ...*charm.URL) ([]CharmRevision, error) - - // Resolve resolves the series and revision of the given entity - // reference. If the series is not specified, it may be resolved - // by the charm store or rejected. After the series is resolved, - // if the revision is not specified, it will be resolved to the latest - // available revision for that series. - Resolve(ref *charm.Reference) (*charm.URL, error) -} - -// Latest returns the latest revision of the charm referenced by curl, regardless -// of the revision set on each curl. -// This is a helper which calls the bulk method and unpacks a single result. -func Latest(repo Interface, curl *charm.URL) (int, error) { - revs, err := repo.Latest(curl) - if err != nil { - return 0, err - } - if len(revs) != 1 { - return 0, fmt.Errorf("expected 1 result, got %d", len(revs)) - } - rev := revs[0] - if rev.Err != nil { - return 0, rev.Err - } - return rev.Revision, nil -} - -// InferRepository returns a charm repository inferred from the provided charm -// or bundle reference. -// Charm store references will use the provided parameters. -// Local references will use the provided path. -func InferRepository(ref *charm.Reference, charmStoreParams NewCharmStoreParams, localRepoPath string) (Interface, error) { - switch ref.Schema { - case "cs": - return NewCharmStore(charmStoreParams), nil - case "local": - return NewLocalRepository(localRepoPath) - } - // TODO fix this error message to reference bundles too? - return nil, fmt.Errorf("unknown schema for charm reference %q", ref) -} === removed file 'src/gopkg.in/juju/charm.v5/charmrepo/repo_test.go' --- src/gopkg.in/juju/charm.v5/charmrepo/repo_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/charmrepo/repo_test.go 1970-01-01 00:00:00 +0000 @@ -1,57 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charmrepo_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charmstore.v4/csclient" - - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/charmrepo" - charmtesting "gopkg.in/juju/charm.v5/testing" -) - -var TestCharms = charmtesting.NewRepo("../internal/test-charm-repo", "quantal") - -type inferRepoSuite struct{} - -var _ = gc.Suite(&inferRepoSuite{}) - -var inferRepositoryTests = []struct { - url string - localRepoPath string - err string -}{{ - url: "cs:trusty/django", -}, { - url: "local:precise/wordpress", - err: "path to local repository not specified", -}, { - url: "local:precise/haproxy-47", - localRepoPath: "/tmp/repo-path", -}} - -func (s *inferRepoSuite) TestInferRepository(c *gc.C) { - for i, test := range inferRepositoryTests { - c.Logf("test %d: %s", i, test.url) - ref := charm.MustParseReference(test.url) - repo, err := charmrepo.InferRepository( - ref, charmrepo.NewCharmStoreParams{}, test.localRepoPath) - if test.err != "" { - c.Assert(err, gc.ErrorMatches, test.err) - c.Assert(repo, gc.IsNil) - continue - } - c.Assert(err, jc.ErrorIsNil) - switch store := repo.(type) { - case *charmrepo.LocalRepository: - c.Assert(store.Path, gc.Equals, test.localRepoPath) - case *charmrepo.CharmStore: - c.Assert(store.URL(), gc.Equals, csclient.ServerURL) - default: - c.Fatal("unknown repository type") - } - } -} === removed file 'src/gopkg.in/juju/charm.v5/config.go' --- src/gopkg.in/juju/charm.v5/config.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/config.go 1970-01-01 00:00:00 +0000 @@ -1,234 +0,0 @@ -// Copyright 2011, 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm - -import ( - "fmt" - "io" - "io/ioutil" - "strconv" - - "github.com/juju/schema" - "gopkg.in/yaml.v1" -) - -// Settings is a group of charm config option names and values. A Settings -// S is considered valid by the Config C if every key in S is an option in -// C, and every value either has the correct type or is nil. -type Settings map[string]interface{} - -// Option represents a single charm config option. -type Option struct { - Type string `yaml:"type"` - Description string `yaml:"description,omitempty"` - Default interface{} `yaml:"default,omitempty"` -} - -// error replaces any supplied non-nil error with a new error describing a -// validation failure for the supplied value. -func (option Option) error(err *error, name string, value interface{}) { - if *err != nil { - *err = fmt.Errorf("option %q expected %s, got %#v", name, option.Type, value) - } -} - -// validate returns an appropriately-typed value for the supplied value, or -// returns an error if it cannot be converted to the correct type. Nil values -// are always considered valid. -func (option Option) validate(name string, value interface{}) (_ interface{}, err error) { - if value == nil { - return nil, nil - } - defer option.error(&err, name, value) - if checker := optionTypeCheckers[option.Type]; checker != nil { - if value, err = checker.Coerce(value, nil); err != nil { - return nil, err - } - return value, nil - } - panic(fmt.Errorf("option %q has unknown type %q", name, option.Type)) -} - -var optionTypeCheckers = map[string]schema.Checker{ - "string": schema.String(), - "int": schema.Int(), - "float": schema.Float(), - "boolean": schema.Bool(), -} - -// parse returns an appropriately-typed value for the supplied string, or -// returns an error if it cannot be parsed to the correct type. -func (option Option) parse(name, str string) (_ interface{}, err error) { - defer option.error(&err, name, str) - switch option.Type { - case "string": - return str, nil - case "int": - return strconv.ParseInt(str, 10, 64) - case "float": - return strconv.ParseFloat(str, 64) - case "boolean": - return strconv.ParseBool(str) - } - panic(fmt.Errorf("option %q has unknown type %q", name, option.Type)) -} - -// Config represents the supported configuration options for a charm, -// as declared in its config.yaml file. -type Config struct { - Options map[string]Option -} - -// NewConfig returns a new Config without any options. -func NewConfig() *Config { - return &Config{map[string]Option{}} -} - -// ReadConfig reads a Config in YAML format. -func ReadConfig(r io.Reader) (*Config, error) { - data, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - var config *Config - if err := yaml.Unmarshal(data, &config); err != nil { - return nil, err - } - if config == nil { - return nil, fmt.Errorf("invalid config: empty configuration") - } - if config.Options == nil { - // We are allowed an empty configuration if the options - // field is explicitly specified, but there is no easy way - // to tell if it was specified or not without unmarshaling - // into interface{} and explicitly checking the field. - var configInterface interface{} - if err := yaml.Unmarshal(data, &configInterface); err != nil { - return nil, err - } - m, _ := configInterface.(map[interface{}]interface{}) - if _, ok := m["options"]; !ok { - return nil, fmt.Errorf("invalid config: empty configuration") - } - } - for name, option := range config.Options { - switch option.Type { - case "string", "int", "float", "boolean": - case "": - // Missing type is valid in python. - option.Type = "string" - default: - return nil, fmt.Errorf("invalid config: option %q has unknown type %q", name, option.Type) - } - def := option.Default - if def == "" && option.Type == "string" { - // Skip normal validation for compatibility with pyjuju. - } else if option.Default, err = option.validate(name, def); err != nil { - option.error(&err, name, def) - return nil, fmt.Errorf("invalid config default: %v", err) - } - config.Options[name] = option - } - return config, nil -} - -// option returns the named option from the config, or an error if none -// such exists. -func (c *Config) option(name string) (Option, error) { - if option, ok := c.Options[name]; ok { - return option, nil - } - return Option{}, fmt.Errorf("unknown option %q", name) -} - -// DefaultSettings returns settings containing the default value of every -// option in the config. Default values may be nil. -func (c *Config) DefaultSettings() Settings { - out := make(Settings) - for name, option := range c.Options { - out[name] = option.Default - } - return out -} - -// ValidateSettings returns a copy of the supplied settings with a consistent type -// for each value. It returns an error if the settings contain unknown keys -// or invalid values. -func (c *Config) ValidateSettings(settings Settings) (Settings, error) { - out := make(Settings) - for name, value := range settings { - if option, err := c.option(name); err != nil { - return nil, err - } else if value, err = option.validate(name, value); err != nil { - return nil, err - } - out[name] = value - } - return out, nil -} - -// FilterSettings returns the subset of the supplied settings that are valid. -func (c *Config) FilterSettings(settings Settings) Settings { - out := make(Settings) - for name, value := range settings { - if option, err := c.option(name); err == nil { - if value, err := option.validate(name, value); err == nil { - out[name] = value - } - } - } - return out -} - -// ParseSettingsStrings returns settings derived from the supplied map. Every -// value in the map must be parseable to the correct type for the option -// identified by its key. Empty values are interpreted as nil. -func (c *Config) ParseSettingsStrings(values map[string]string) (Settings, error) { - out := make(Settings) - for name, str := range values { - option, err := c.option(name) - if err != nil { - return nil, err - } - value, err := option.parse(name, str) - if err != nil { - return nil, err - } - out[name] = value - } - return out, nil -} - -// ParseSettingsYAML returns settings derived from the supplied YAML data. The -// YAML must unmarshal to a map of strings to settings data; the supplied key -// must be present in the map, and must point to a map in which every value -// must have, or be a string parseable to, the correct type for the associated -// config option. Empty strings and nil values are both interpreted as nil. -func (c *Config) ParseSettingsYAML(yamlData []byte, key string) (Settings, error) { - var allSettings map[string]Settings - if err := yaml.Unmarshal(yamlData, &allSettings); err != nil { - return nil, fmt.Errorf("cannot parse settings data: %v", err) - } - settings, ok := allSettings[key] - if !ok { - return nil, fmt.Errorf("no settings found for %q", key) - } - out := make(Settings) - for name, value := range settings { - option, err := c.option(name) - if err != nil { - return nil, err - } - // Accept string values for compatibility with python. - if str, ok := value.(string); ok { - if value, err = option.parse(name, str); err != nil { - return nil, err - } - } else if value, err = option.validate(name, value); err != nil { - return nil, err - } - out[name] = value - } - return out, nil -} === removed file 'src/gopkg.in/juju/charm.v5/config_test.go' --- src/gopkg.in/juju/charm.v5/config_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/config_test.go 1970-01-01 00:00:00 +0000 @@ -1,471 +0,0 @@ -// Copyright 2011, 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm_test - -import ( - "bytes" - "fmt" - "strings" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/yaml.v1" - - "gopkg.in/juju/charm.v5" -) - -type ConfigSuite struct { - config *charm.Config -} - -var _ = gc.Suite(&ConfigSuite{}) - -func (s *ConfigSuite) SetUpSuite(c *gc.C) { - // Just use a single shared config for the whole suite. There's no use case - // for mutating a config, we assume that nobody will do so here. - var err error - s.config, err = charm.ReadConfig(bytes.NewBuffer([]byte(` -options: - title: - default: My Title - description: A descriptive title used for the service. - type: string - subtitle: - default: "" - description: An optional subtitle used for the service. - outlook: - description: No default outlook. - # type defaults to string in python - username: - default: admin001 - description: The name of the initial account (given admin permissions). - type: string - skill-level: - description: A number indicating skill. - type: int - agility-ratio: - description: A number from 0 to 1 indicating agility. - type: float - reticulate-splines: - description: Whether to reticulate splines on launch, or not. - type: boolean -`))) - c.Assert(err, gc.IsNil) -} - -func (s *ConfigSuite) TestReadSample(c *gc.C) { - c.Assert(s.config.Options, jc.DeepEquals, map[string]charm.Option{ - "title": { - Default: "My Title", - Description: "A descriptive title used for the service.", - Type: "string", - }, - "subtitle": { - Default: "", - Description: "An optional subtitle used for the service.", - Type: "string", - }, - "username": { - Default: "admin001", - Description: "The name of the initial account (given admin permissions).", - Type: "string", - }, - "outlook": { - Description: "No default outlook.", - Type: "string", - }, - "skill-level": { - Description: "A number indicating skill.", - Type: "int", - }, - "agility-ratio": { - Description: "A number from 0 to 1 indicating agility.", - Type: "float", - }, - "reticulate-splines": { - Description: "Whether to reticulate splines on launch, or not.", - Type: "boolean", - }, - }) -} - -func (s *ConfigSuite) TestDefaultSettings(c *gc.C) { - c.Assert(s.config.DefaultSettings(), jc.DeepEquals, charm.Settings{ - "title": "My Title", - "subtitle": "", - "username": "admin001", - "outlook": nil, - "skill-level": nil, - "agility-ratio": nil, - "reticulate-splines": nil, - }) -} - -func (s *ConfigSuite) TestFilterSettings(c *gc.C) { - settings := s.config.FilterSettings(charm.Settings{ - "title": "something valid", - "username": nil, - "unknown": "whatever", - "outlook": "", - "skill-level": 5.5, - "agility-ratio": true, - "reticulate-splines": "hullo", - }) - c.Assert(settings, jc.DeepEquals, charm.Settings{ - "title": "something valid", - "username": nil, - "outlook": "", - }) -} - -func (s *ConfigSuite) TestValidateSettings(c *gc.C) { - for i, test := range []struct { - info string - input charm.Settings - expect charm.Settings - err string - }{{ - info: "nil settings are valid", - expect: charm.Settings{}, - }, { - info: "empty settings are valid", - input: charm.Settings{}, - }, { - info: "unknown keys are not valid", - input: charm.Settings{"foo": nil}, - err: `unknown option "foo"`, - }, { - info: "nil is valid for every value type", - input: charm.Settings{ - "outlook": nil, - "skill-level": nil, - "agility-ratio": nil, - "reticulate-splines": nil, - }, - }, { - info: "correctly-typed values are valid", - input: charm.Settings{ - "outlook": "stormy", - "skill-level": int64(123), - "agility-ratio": 0.5, - "reticulate-splines": true, - }, - }, { - info: "empty string-typed values stay empty", - input: charm.Settings{"outlook": ""}, - expect: charm.Settings{"outlook": ""}, - }, { - info: "almost-correctly-typed values are valid", - input: charm.Settings{ - "skill-level": 123, - "agility-ratio": float32(0.5), - }, - expect: charm.Settings{ - "skill-level": int64(123), - "agility-ratio": 0.5, - }, - }, { - info: "bad string", - input: charm.Settings{"outlook": false}, - err: `option "outlook" expected string, got false`, - }, { - info: "bad int", - input: charm.Settings{"skill-level": 123.4}, - err: `option "skill-level" expected int, got 123.4`, - }, { - info: "bad float", - input: charm.Settings{"agility-ratio": "cheese"}, - err: `option "agility-ratio" expected float, got "cheese"`, - }, { - info: "bad boolean", - input: charm.Settings{"reticulate-splines": 101}, - err: `option "reticulate-splines" expected boolean, got 101`, - }} { - c.Logf("test %d: %s", i, test.info) - result, err := s.config.ValidateSettings(test.input) - if test.err != "" { - c.Check(err, gc.ErrorMatches, test.err) - } else { - c.Check(err, gc.IsNil) - if test.expect == nil { - c.Check(result, jc.DeepEquals, test.input) - } else { - c.Check(result, jc.DeepEquals, test.expect) - } - } - } -} - -var settingsWithNils = charm.Settings{ - "outlook": nil, - "skill-level": nil, - "agility-ratio": nil, - "reticulate-splines": nil, -} - -var settingsWithValues = charm.Settings{ - "outlook": "whatever", - "skill-level": int64(123), - "agility-ratio": 2.22, - "reticulate-splines": true, -} - -func (s *ConfigSuite) TestParseSettingsYAML(c *gc.C) { - for i, test := range []struct { - info string - yaml string - key string - expect charm.Settings - err string - }{{ - info: "bad structure", - yaml: "`", - err: `cannot parse settings data: .*`, - }, { - info: "bad key", - yaml: "{}", - key: "blah", - err: `no settings found for "blah"`, - }, { - info: "bad settings key", - yaml: "blah:\n ping: pong", - key: "blah", - err: `unknown option "ping"`, - }, { - info: "bad type for string", - yaml: "blah:\n outlook: 123", - key: "blah", - err: `option "outlook" expected string, got 123`, - }, { - info: "bad type for int", - yaml: "blah:\n skill-level: 12.345", - key: "blah", - err: `option "skill-level" expected int, got 12.345`, - }, { - info: "bad type for float", - yaml: "blah:\n agility-ratio: blob", - key: "blah", - err: `option "agility-ratio" expected float, got "blob"`, - }, { - info: "bad type for boolean", - yaml: "blah:\n reticulate-splines: 123", - key: "blah", - err: `option "reticulate-splines" expected boolean, got 123`, - }, { - info: "bad string for int", - yaml: "blah:\n skill-level: cheese", - key: "blah", - err: `option "skill-level" expected int, got "cheese"`, - }, { - info: "bad string for float", - yaml: "blah:\n agility-ratio: blob", - key: "blah", - err: `option "agility-ratio" expected float, got "blob"`, - }, { - info: "bad string for boolean", - yaml: "blah:\n reticulate-splines: cannonball", - key: "blah", - err: `option "reticulate-splines" expected boolean, got "cannonball"`, - }, { - info: "empty dict is valid", - yaml: "blah: {}", - key: "blah", - expect: charm.Settings{}, - }, { - info: "nil values are valid", - yaml: `blah: - outlook: null - skill-level: null - agility-ratio: null - reticulate-splines: null`, - key: "blah", - expect: settingsWithNils, - }, { - info: "empty strings for bool options are not accepted", - yaml: `blah: - outlook: "" - skill-level: 123 - agility-ratio: 12.0 - reticulate-splines: ""`, - key: "blah", - err: `option "reticulate-splines" expected boolean, got ""`, - }, { - info: "empty strings for int options are not accepted", - yaml: `blah: - outlook: "" - skill-level: "" - agility-ratio: 12.0 - reticulate-splines: false`, - key: "blah", - err: `option "skill-level" expected int, got ""`, - }, { - info: "empty strings for float options are not accepted", - yaml: `blah: - outlook: "" - skill-level: 123 - agility-ratio: "" - reticulate-splines: false`, - key: "blah", - err: `option "agility-ratio" expected float, got ""`, - }, { - info: "appropriate strings are valid", - yaml: `blah: - outlook: whatever - skill-level: "123" - agility-ratio: "2.22" - reticulate-splines: "true"`, - key: "blah", - expect: settingsWithValues, - }, { - info: "appropriate types are valid", - yaml: `blah: - outlook: whatever - skill-level: 123 - agility-ratio: 2.22 - reticulate-splines: y`, - key: "blah", - expect: settingsWithValues, - }} { - c.Logf("test %d: %s", i, test.info) - result, err := s.config.ParseSettingsYAML([]byte(test.yaml), test.key) - if test.err != "" { - c.Check(err, gc.ErrorMatches, test.err) - } else { - c.Check(err, gc.IsNil) - c.Check(result, jc.DeepEquals, test.expect) - } - } -} - -func (s *ConfigSuite) TestParseSettingsStrings(c *gc.C) { - for i, test := range []struct { - info string - input map[string]string - expect charm.Settings - err string - }{{ - info: "nil map is valid", - expect: charm.Settings{}, - }, { - info: "empty map is valid", - input: map[string]string{}, - expect: charm.Settings{}, - }, { - info: "empty strings for string options are valid", - input: map[string]string{"outlook": ""}, - expect: charm.Settings{"outlook": ""}, - }, { - info: "empty strings for non-string options are invalid", - input: map[string]string{"skill-level": ""}, - err: `option "skill-level" expected int, got ""`, - }, { - info: "strings are converted", - input: map[string]string{ - "outlook": "whatever", - "skill-level": "123", - "agility-ratio": "2.22", - "reticulate-splines": "true", - }, - expect: settingsWithValues, - }, { - info: "bad string for int", - input: map[string]string{"skill-level": "cheese"}, - err: `option "skill-level" expected int, got "cheese"`, - }, { - info: "bad string for float", - input: map[string]string{"agility-ratio": "blob"}, - err: `option "agility-ratio" expected float, got "blob"`, - }, { - info: "bad string for boolean", - input: map[string]string{"reticulate-splines": "cannonball"}, - err: `option "reticulate-splines" expected boolean, got "cannonball"`, - }} { - c.Logf("test %d: %s", i, test.info) - result, err := s.config.ParseSettingsStrings(test.input) - if test.err != "" { - c.Check(err, gc.ErrorMatches, test.err) - } else { - c.Check(err, gc.IsNil) - c.Check(result, jc.DeepEquals, test.expect) - } - } -} - -func (s *ConfigSuite) TestConfigError(c *gc.C) { - _, err := charm.ReadConfig(bytes.NewBuffer([]byte(`options: {t: {type: foo}}`))) - c.Assert(err, gc.ErrorMatches, `invalid config: option "t" has unknown type "foo"`) -} - -func (s *ConfigSuite) TestConfigWithNoOptions(c *gc.C) { - _, err := charm.ReadConfig(strings.NewReader("other:\n")) - c.Assert(err, gc.ErrorMatches, "invalid config: empty configuration") - - _, err = charm.ReadConfig(strings.NewReader("\n")) - c.Assert(err, gc.ErrorMatches, "invalid config: empty configuration") - - _, err = charm.ReadConfig(strings.NewReader("null\n")) - c.Assert(err, gc.ErrorMatches, "invalid config: empty configuration") - - _, err = charm.ReadConfig(strings.NewReader("options:\n")) - c.Assert(err, gc.IsNil) -} - -func (s *ConfigSuite) TestDefaultType(c *gc.C) { - assertDefault := func(type_ string, value string, expected interface{}) { - config := fmt.Sprintf(`options: {t: {type: %s, default: %s}}`, type_, value) - result, err := charm.ReadConfig(bytes.NewBuffer([]byte(config))) - c.Assert(err, gc.IsNil) - c.Assert(result.Options["t"].Default, gc.Equals, expected) - } - - assertDefault("boolean", "true", true) - assertDefault("string", "golden grahams", "golden grahams") - assertDefault("string", `""`, "") - assertDefault("float", "2.2e11", 2.2e11) - assertDefault("int", "99", int64(99)) - - assertTypeError := func(type_, str, value string) { - config := fmt.Sprintf(`options: {t: {type: %s, default: %s}}`, type_, str) - _, err := charm.ReadConfig(bytes.NewBuffer([]byte(config))) - expected := fmt.Sprintf(`invalid config default: option "t" expected %s, got %s`, type_, value) - c.Assert(err, gc.ErrorMatches, expected) - } - - assertTypeError("boolean", "henry", `"henry"`) - assertTypeError("string", "2.5", "2.5") - assertTypeError("float", "123", "123") - assertTypeError("int", "true", "true") -} - -// When an empty config is supplied an error should be returned -func (s *ConfigSuite) TestEmptyConfigReturnsError(c *gc.C) { - config := "" - result, err := charm.ReadConfig(bytes.NewBuffer([]byte(config))) - c.Assert(result, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "invalid config: empty configuration") -} - -func (s *ConfigSuite) TestYAMLMarshal(c *gc.C) { - cfg, err := charm.ReadConfig(strings.NewReader(` -options: - minimal: - type: string - withdescription: - type: int - description: d - withdefault: - type: boolean - description: d - default: true -`)) - c.Assert(err, gc.IsNil) - c.Assert(cfg.Options, gc.HasLen, 3) - - newYAML, err := yaml.Marshal(cfg) - c.Assert(err, gc.IsNil) - - newCfg, err := charm.ReadConfig(bytes.NewReader(newYAML)) - c.Assert(err, gc.IsNil) - c.Assert(newCfg, jc.DeepEquals, cfg) -} === removed file 'src/gopkg.in/juju/charm.v5/export_test.go' --- src/gopkg.in/juju/charm.v5/export_test.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/juju/charm.v5/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,12 +0,0 @@ -// Copyright 2011, 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm - -// Export meaningful bits for tests only. - -var ( - IfaceExpander = ifaceExpander - - ParsePayloadClass = parsePayloadClass -) === removed directory 'src/gopkg.in/juju/charm.v5/hooks' === removed file 'src/gopkg.in/juju/charm.v5/hooks/hooks.go' --- src/gopkg.in/juju/charm.v5/hooks/hooks.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/juju/charm.v5/hooks/hooks.go 1970-01-01 00:00:00 +0000 @@ -1,111 +0,0 @@ -// Copyright 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -// hooks provides types and constants that define the hooks known to Juju. -package hooks - -// Kind enumerates the different kinds of hooks that exist. -type Kind string - -const ( - // None of these hooks are ever associated with a relation; each of them - // represents a change to the state of the unit as a whole. The values - // themselves are all valid hook names. - Install Kind = "install" - Start Kind = "start" - ConfigChanged Kind = "config-changed" - UpgradeCharm Kind = "upgrade-charm" - Stop Kind = "stop" - ActionRequested Kind = "action-requested" // TODO: remove in charm v5, DEPRECATED - Action Kind = "action" - CollectMetrics Kind = "collect-metrics" - MeterStatusChanged Kind = "meter-status-changed" - LeaderElected Kind = "leader-elected" - LeaderDeposed Kind = "leader-deposed" - LeaderSettingsChanged Kind = "leader-settings-changed" - UpdateStatus Kind = "update-status" - - // These hooks require an associated relation, and the name of the relation - // unit whose change triggered the hook. The hook file names that these - // kinds represent will be prefixed by the relation name; for example, - // "db-relation-joined". - RelationJoined Kind = "relation-joined" - RelationChanged Kind = "relation-changed" - RelationDeparted Kind = "relation-departed" - - // This hook requires an associated relation. The represented hook file name - // will be prefixed by the relation name, just like the other Relation* Kind - // values. - RelationBroken Kind = "relation-broken" - - // These hooks require an associated storage. The hook file names that these - // kinds represent will be prefixed by the storage name; for example, - // "shared-fs-storage-attached". - StorageAttached Kind = "storage-attached" - StorageDetaching Kind = "storage-detaching" -) - -var unitHooks = []Kind{ - Install, - Start, - ConfigChanged, - UpgradeCharm, - Stop, - CollectMetrics, - MeterStatusChanged, - LeaderElected, - LeaderDeposed, - LeaderSettingsChanged, - UpdateStatus, -} - -// UnitHooks returns all known unit hook kinds. -func UnitHooks() []Kind { - hooks := make([]Kind, len(unitHooks)) - copy(hooks, unitHooks) - return hooks -} - -var relationHooks = []Kind{ - RelationJoined, - RelationChanged, - RelationDeparted, - RelationBroken, -} - -// RelationHooks returns all known relation hook kinds. -func RelationHooks() []Kind { - hooks := make([]Kind, len(relationHooks)) - copy(hooks, relationHooks) - return hooks -} - -var storageHooks = []Kind{ - StorageAttached, - StorageDetaching, -} - -// StorageHooks returns all known storage hook kinds. -func StorageHooks() []Kind { - hooks := make([]Kind, len(storageHooks)) - copy(hooks, storageHooks) - return hooks -} - -// IsRelation returns whether the Kind represents a relation hook. -func (kind Kind) IsRelation() bool { - switch kind { - case RelationJoined, RelationChanged, RelationDeparted, RelationBroken: - return true - } - return false -} - -// IsStorage returns whether the Kind represents a storage hook. -func (kind Kind) IsStorage() bool { - switch kind { - case StorageAttached, StorageDetaching: - return true - } - return false -} === removed directory 'src/gopkg.in/juju/charm.v5/internal' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/bad' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/bad/README.md' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/bad/README.md 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/bad/README.md 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -A dummy bundle === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/bad/bundle.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/bad/bundle.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/bad/bundle.yaml 1970-01-01 00:00:00 +0000 @@ -1,11 +0,0 @@ -# This bundle has a bad relation, which will cause it to fail -# its verification. -services: - wordpress: - charm: wordpress - num_units: 1 - mysql: - charm: mysql - num_units: 1 -relations: - - ["foo:db", "mysql:server"] === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/openstack' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/openstack/README.md' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/openstack/README.md 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/openstack/README.md 1970-01-01 00:00:00 +0000 @@ -1,46 +0,0 @@ -OpenStack Bundle for Juju -========================= - -Overview --------- - -This bundle deploys a reference OpenStack architecture including all core projects: - - - OpenStack Compute - - OpenStack Networking (using Open vSwitch plugin) - - OpenStack Block Storage (backed with Ceph storage) - - OpenStack Image - - OpenStack Object Storage - - OpenStack Identity - - OpenStack Dashboard - - OpenStack Telemetry - - OpenStack Orchestration - -The charm configuration is an opinioned set for deploying OpenStack for testing on Cloud environments which support nested KVM. Instance types also need to have ephemeral storage (these block devices are used for Ceph and Swift storage). - -The Ubuntu Server Team use this bundle for testing OpenStack-on-OpenStack. - -Usage ------ - -Once deployed, the cloud can be accessed either using the OpenStack command line tools or using the OpenStack Dashboard: - - http:///horizon - -The charms configure the 'admin' user with a password of 'openstack' by default. - -The OpenStack cloud deployed is completely clean; the charms don't attempt to configure networking or upload images. Read the OpenStack User Guide on how to configure your cloud for use: - - http://docs.openstack.org/user-guide/content/ - -Niggles -------- - -The neutron-gateway service requires a service unit with two network interfaces to provide full functionality; this part of OpenStack provides L3 routing between tenant networks and the rest of the world. Its possible todo this when testing on OpenStack by adding a second network interface to the neutron-gateway service: - - nova interface-attach --net-id - juju set neutron-gateway ext-port=eth1 - -Note that you will need to be running this bundle on an OpenStack cloud that supports MAC address learning of some description; this includes using OpenStack Havana with the Neutron Open vSwitch plugin. - -For actual OpenStack deployments, this service would reside of a physical server with network ports attached to both the internal network (for communication with nova-compute service units) and the external network (for inbound/outbound network access to/from instances within the cloud). === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/openstack/bundle.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/openstack/bundle.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/openstack/bundle.yaml 1970-01-01 00:00:00 +0000 @@ -1,202 +0,0 @@ -series: precise -services: - mysql: - charm: cs:precise/mysql - constraints: mem=1G - options: - dataset-size: 50% - rabbitmq-server: - charm: cs:precise/rabbitmq-server - constraints: mem=1G - ceph: - charm: cs:precise/ceph - num_units: 3 - constraints: mem=1G - options: - monitor-count: 3 - fsid: 6547bd3e-1397-11e2-82e5-53567c8d32dc - monitor-secret: AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ== - osd-devices: /dev/vdb - osd-reformat: "yes" - ephemeral-unmount: /mnt - keystone: - charm: cs:precise/keystone - constraints: mem=1G - options: - admin-password: openstack - admin-token: ubuntutesting - openstack-dashboard: - charm: cs:precise/openstack-dashboard - constraints: mem=1G - nova-compute: - charm: cs:precise/nova-compute - num_units: 3 - constraints: mem=4G - options: - config-flags: "auto_assign_floating_ip=False" - enable-live-migration: False - virt-type: kvm - nova-cloud-controller: - charm: cs:precise/nova-cloud-controller - constraints: mem=1G - options: - network-manager: Neutron - quantum-security-groups: "yes" - neutron-gateway: - charm: cs:precise/quantum-gateway - constraints: mem=1G - cinder: - charm: cs:precise/cinder - options: - block-device: "None" - constraints": mem=1G - glance: - charm: cs:precise/glance - constraints: mem=1G - swift-proxy: - charm: cs:precise/swift-proxy - constraints: mem=1G - options: - zone-assignment: manual - replicas: 3 - use-https: 'no' - swift-hash: fdfef9d4-8b06-11e2-8ac0-531c923c8fae - swift-storage-z1: - charm: cs:precise/swift-storage - constraints: mem=1G - options: - zone: 1 - block-device: vdb - overwrite: "true" - swift-storage-z2: - charm: cs:precise/swift-storage - constraints: mem=1G - options: - zone: 2 - block-device: vdb - overwrite: "true" - swift-storage-z3: - charm: cs:precise/swift-storage - constraints: mem=1G - options: - zone: 3 - block-device: vdb - overwrite: "true" - ceilometer: - charm: cs:precise/ceilometer - constraints: mem=1G - ceilometer-agent: - charm: cs:precise/ceilometer-agent - mongodb: - charm: cs:precise/mongodb - constraints: mem=1G - heat: - charm: cs:precise/heat - constraints: mem=1G - ntp: - charm: cs:precise/ntp -relations: - - - keystone:shared-db - - mysql:shared-db - - - nova-cloud-controller:shared-db - - mysql:shared-db - - - nova-cloud-controller:amqp - - rabbitmq-server:amqp - - - nova-cloud-controller:image-service - - glance:image-service - - - nova-cloud-controller:identity-service - - keystone:identity-service - - - nova-compute:cloud-compute - - nova-cloud-controller:cloud-compute - - - nova-compute:shared-db - - mysql:shared-db - - - nova-compute:amqp - - rabbitmq-server:amqp - - - nova-compute:image-service - - glance:image-service - - - nova-compute:ceph - - ceph:client - - - glance:shared-db - - mysql:shared-db - - - glance:identity-service - - keystone:identity-service - - - glance:ceph - - ceph:client - - - glance:image-service - - cinder:image-service - - - cinder:shared-db - - mysql:shared-db - - - cinder:amqp - - rabbitmq-server:amqp - - - cinder:cinder-volume-service - - nova-cloud-controller:cinder-volume-service - - - cinder:identity-service - - keystone:identity-service - - - cinder:ceph - - ceph:client - - - neutron-gateway:shared-db - - mysql:shared-db - - - neutron-gateway:amqp - - rabbitmq-server:amqp - - - neutron-gateway:quantum-network-service - - nova-cloud-controller:quantum-network-service - - - openstack-dashboard:identity-service - - keystone:identity-service - - - swift-proxy:identity-service - - keystone:identity-service - - - swift-proxy:swift-storage - - swift-storage-z1:swift-storage - - - swift-proxy:swift-storage - - swift-storage-z2:swift-storage - - - swift-proxy:swift-storage - - swift-storage-z3:swift-storage - - - ceilometer:identity-service - - keystone:identity-service - - - ceilometer:amqp - - rabbitmq-server:amqp - - - ceilometer:shared-db - - mongodb:database - - - ceilometer-agent:nova-ceilometer - - nova-compute:nova-ceilometer - - - ceilometer-agent:ceilometer-service - - ceilometer:ceilometer-service - - - heat:identity-service - - keystone:identity-service - - - heat:shared-db - - mysql:shared-db - - - heat:amqp - - rabbitmq-server:amqp - - - ntp:juju-info - - nova-compute:juju-info - - - ntp:juju-info - - nova-cloud-controller:juju-info - - - ntp:juju-info - - neutron-gateway:juju-info - - - ntp:juju-info - - ceph:juju-info - - - ntp:juju-info - - cinder:juju-info - - - ntp:juju-info - - keystone:juju-info - - - ntp:juju-info - - glance:juju-info - - - ntp:juju-info - - swift-proxy:juju-info - - - ntp:juju-info - - swift-storage-z1:juju-info - - - ntp:juju-info - - swift-storage-z2:juju-info - - - ntp:juju-info - - swift-storage-z3:juju-info - - - ntp:juju-info - - ceilometer:juju-info - - - ntp:juju-info - - mongodb:juju-info - - - ntp:juju-info - - rabbitmq-server:juju-info - - - ntp:juju-info - - mysql:juju-info - - - ntp:juju-info - - openstack-dashboard:juju-info - - - ntp:juju-info - - heat:juju-info === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/wordpress-simple' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/wordpress-simple/README.md' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/wordpress-simple/README.md 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/wordpress-simple/README.md 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -A dummy bundle === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/wordpress-simple/bundle.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/wordpress-simple/bundle.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/wordpress-simple/bundle.yaml 1970-01-01 00:00:00 +0000 @@ -1,9 +0,0 @@ -services: - wordpress: - charm: wordpress - num_units: 1 - mysql: - charm: mysql - num_units: 1 -relations: - - ["wordpress:db", "mysql:server"] === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/wordpress-with-logging' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/wordpress-with-logging/README.md' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/wordpress-with-logging/README.md 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/wordpress-with-logging/README.md 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -A dummy bundle === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/wordpress-with-logging/bundle.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/wordpress-with-logging/bundle.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/bundle/wordpress-with-logging/bundle.yaml 1970-01-01 00:00:00 +0000 @@ -1,13 +0,0 @@ -services: - wordpress: - charm: wordpress - num_units: 1 - mysql: - charm: mysql - num_units: 1 - logging: - charm: logging -relations: - - ["wordpress:db", "mysql:server"] - - ["wordpress:juju-info", "logging:info"] - - ["mysql:juju-info", "logging:info"] === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-broken' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-broken 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-broken 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-changed' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-changed 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-changed 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-departed' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-departed 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-departed 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-joined' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-joined 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-joined 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/collect-metrics' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/collect-metrics 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/collect-metrics 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/config-changed' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/config-changed 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/config-changed 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-broken' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-broken 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-broken 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-changed' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-changed 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-changed 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-departed' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-departed 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-departed 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-joined' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-joined 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-joined 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/install' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/install 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/install 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/meter-status-changed' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/meter-status-changed 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/meter-status-changed 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/otherdata' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/otherdata 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/otherdata 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -some text === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-broken' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-broken 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-broken 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-changed' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-changed 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-changed 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-departed' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-departed 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-departed 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-joined' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-joined 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-joined 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/start' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/start 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/start 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/stop' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/stop 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/stop 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/subdir' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/subdir/stuff' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/subdir/stuff 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/subdir/stuff 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -non hook related stuff === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/upgrade-charm' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/upgrade-charm 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/hooks/upgrade-charm 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,12 +0,0 @@ -name: all-hooks -summary: "That's a dummy charm with hook scrips for all types of hooks." -description: "This is a longer description." -provides: - foo: - interface: phony -requires: - bar: - interface: fake -peers: - self: - interface: dummy === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/revision' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/all-hooks/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -1 === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/category' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/category/.dir' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/category/.dir/ignored' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/category/.ignored' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/category/.ignored 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/category/.ignored 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -# \ No newline at end of file === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/category/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/category/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/category/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,6 +0,0 @@ -name: categories -summary: "Sample charm with a category" -description: | - That's a boring charm that has a category. -categories: ["database"] -tags: ["openstack", "storage"] \ No newline at end of file === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/.dir' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/.dir/ignored' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/.ignored' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/.ignored 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/.ignored 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -# \ No newline at end of file === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/actions.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/actions.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/actions.yaml 1970-01-01 00:00:00 +0000 @@ -1,7 +0,0 @@ -snapshot: - description: Take a snapshot of the database. - params: - outfile: - description: The file to write out to. - type: string - default: foo.bz2 === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/build' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/build/ignored' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/config.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/config.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/config.yaml 1970-01-01 00:00:00 +0000 @@ -1,5 +0,0 @@ -options: - title: {default: My Title, description: A descriptive title used for the service., type: string} - outlook: {description: No default outlook., type: string} - username: {default: admin001, description: The name of the initial account (given admin permissions)., type: string} - skill-level: {description: A number indicating skill., type: int} === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/empty' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/empty/.gitkeep' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/hooks' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/hooks/install' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/hooks/install 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/hooks/install 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/bash -echo "Done!" === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,5 +0,0 @@ -name: dummy -summary: "That's a dummy charm." -description: | - This is a longer description which - potentially contains multiple lines. === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/revision' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -1 \ No newline at end of file === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/src' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/src/hello.c' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/src/hello.c 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/dummy/src/hello.c 1970-01-01 00:00:00 +0000 @@ -1,7 +0,0 @@ -#include - -main() -{ - printf ("Hello World!\n"); - return 0; -} === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/format2' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/format2/.dir' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/format2/.dir/ignored' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/format2/.ignored' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/format2/.ignored 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/format2/.ignored 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -# \ No newline at end of file === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/format2/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/format2/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/format2/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,6 +0,0 @@ -name: format2 -format: 2 -summary: "Sample charm described in format 2" -description: | - That's a boring charm that is described in - terms of format 2. === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/logging' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/logging/hooks' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/logging/hooks/.gitkeep' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/logging/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/logging/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/logging/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,16 +0,0 @@ -name: logging -summary: "Subordinate logging test charm" -description: | - This is a longer description which - potentially contains multiple lines. -subordinate: true -provides: - logging-client: - interface: logging -requires: - logging-directory: - interface: logging - scope: container - info: - interface: juju-info - scope: container === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/logging/revision' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/logging/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/logging/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -1 === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered-empty' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered-empty/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered-empty/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered-empty/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,3 +0,0 @@ -name: metered-empty -summary: "Metered charm with empty metrics" -description: "A charm that will not send metrics" === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered-empty/metrics.yaml' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered-empty/revision' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered-empty/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered-empty/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -1 \ No newline at end of file === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,3 +0,0 @@ -name: metered -summary: "A metered charm with custom metrics" -description: "" === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered/metrics.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered/metrics.yaml 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered/metrics.yaml 1970-01-01 00:00:00 +0000 @@ -1,5 +0,0 @@ -metrics: - pings: - type: gauge - description: Description of the metric. - juju-unit-time: === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered/revision' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/metered/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -1 \ No newline at end of file === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/monitoring' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/monitoring/hooks' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/monitoring/hooks/.gitkeep' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/monitoring/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/monitoring/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/monitoring/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,16 +0,0 @@ -name: monitoring -summary: "Subordinate monitoring test charm" -description: | - This is a longer description which - potentially contains multiple lines. -subordinate: true -provides: - monitoring-client: - interface: monitoring -requires: - monitoring-port: - interface: monitoring - scope: container - info: - interface: juju-info - scope: container === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/mysql' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/mysql-alternative' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/mysql-alternative/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/mysql-alternative/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/mysql-alternative/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,9 +0,0 @@ -name: mysql-alternative -summary: "Database engine" -description: "A pretty popular database" -provides: - prod: - interface: mysql - dev: - interface: mysql - limit: 2 === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/mysql-alternative/revision' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/mysql-alternative/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/mysql-alternative/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -1 \ No newline at end of file === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/mysql/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/mysql/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/mysql/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,5 +0,0 @@ -name: mysql -summary: "Database engine" -description: "A pretty popular database" -provides: - server: mysql === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/mysql/revision' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/mysql/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/mysql/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -1 \ No newline at end of file === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/riak' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/riak/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/riak/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/riak/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,11 +0,0 @@ -name: riak -summary: "K/V storage engine" -description: "Scalable K/V Store in Erlang with Clocks :-)" -provides: - endpoint: - interface: http - admin: - interface: http -peers: - ring: - interface: riak === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/riak/revision' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/riak/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/riak/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -7 \ No newline at end of file === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/terracotta' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/terracotta/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/terracotta/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/terracotta/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,15 +0,0 @@ -name: terracotta -summary: Distributed HA caching/storage platform for Java -maintainer: Robert Ayres -description: | - Distributed HA caching/storage platform for Java. - . - Terracotta provides out of the box clustering for a number of well known Java - frameworks, including EHCache, Hibernate and Quartz as well as clustering - for J2EE containers. -provides: - dso: - interface: terracotta - optional: true -peers: - server-array: terracotta-server === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/terracotta/revision' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/terracotta/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/terracotta/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -3 === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/upgrade1' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/upgrade1/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/upgrade1/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/upgrade1/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,5 +0,0 @@ -name: upgrade -summary: "Sample charm to test version changes" -description: | - Sample charm to test version changes. - This is the old charm. === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/upgrade1/revision' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/upgrade1/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/upgrade1/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -1 === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/upgrade2' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/upgrade2/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/upgrade2/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/upgrade2/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,5 +0,0 @@ -name: upgrade -summary: "Sample charm to test version changes" -description: | - Sample charm to test version changes. - This is the new charm. === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/upgrade2/revision' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/upgrade2/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/upgrade2/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -2 === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish-alternative' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish-alternative/hooks' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish-alternative/hooks/install' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish-alternative/hooks/install 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish-alternative/hooks/install 1970-01-01 00:00:00 +0000 @@ -1,3 +0,0 @@ -#!/bin/bash - -echo hello world \ No newline at end of file === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish-alternative/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish-alternative/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish-alternative/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,5 +0,0 @@ -name: varnish-alternative -summary: "Database engine" -description: "Another popular database" -provides: - webcache: varnish === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish-alternative/revision' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish-alternative/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish-alternative/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -1 \ No newline at end of file === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,5 +0,0 @@ -name: varnish -summary: "Database engine" -description: "Another popular database" -provides: - webcache: varnish === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish/revision' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/varnish/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -1 \ No newline at end of file === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/wordpress' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/wordpress/actions' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/wordpress/actions/.gitkeep' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/wordpress/config.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/wordpress/config.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/wordpress/config.yaml 1970-01-01 00:00:00 +0000 @@ -1,3 +0,0 @@ -options: - blog-title: {default: My Title, description: A descriptive title used for the blog., type: string} - === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/wordpress/hooks' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/wordpress/hooks/.gitkeep' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/wordpress/metadata.yaml' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/wordpress/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/wordpress/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,23 +0,0 @@ -name: wordpress -summary: "Blog engine" -description: "A pretty popular blog engine" -provides: - url: - interface: http - limit: - optional: false - logging-dir: - interface: logging - scope: container - monitoring-port: - interface: monitoring - scope: container -requires: - db: - interface: mysql - limit: 1 - optional: false - cache: - interface: varnish - limit: 2 - optional: true === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/wordpress/revision' --- src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/wordpress/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/internal/test-charm-repo/quantal/wordpress/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -3 \ No newline at end of file === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/series' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/series/format2' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/series/format2/build' === removed file 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/series/format2/build/ignored' === removed directory 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/series/format2/hooks' === removed symlink 'src/gopkg.in/juju/charm.v5/internal/test-charm-repo/series/format2/hooks/symlink' === target was u'../target' === removed file 'src/gopkg.in/juju/charm.v5/meta.go' --- src/gopkg.in/juju/charm.v5/meta.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/juju/charm.v5/meta.go 1970-01-01 00:00:00 +0000 @@ -1,692 +0,0 @@ -// Copyright 2011, 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "regexp" - "strconv" - "strings" - - "github.com/juju/schema" - "github.com/juju/utils" - "gopkg.in/yaml.v1" - - "gopkg.in/juju/charm.v5/hooks" -) - -// RelationScope describes the scope of a relation. -type RelationScope string - -// Note that schema doesn't support custom string types, -// so when we use these values in a schema.Checker, -// we must store them as strings, not RelationScopes. - -const ( - ScopeGlobal RelationScope = "global" - ScopeContainer RelationScope = "container" -) - -// RelationRole defines the role of a relation. -type RelationRole string - -const ( - RoleProvider RelationRole = "provider" - RoleRequirer RelationRole = "requirer" - RolePeer RelationRole = "peer" -) - -// StorageType defines a storage type. -type StorageType string - -const ( - StorageBlock StorageType = "block" - StorageFilesystem StorageType = "filesystem" -) - -// Storage represents a charm's storage requirement. -type Storage struct { - // Name is the name of the store. - // - // Name has no default, and must be specified. - Name string `bson:"name"` - - // Description is a description of the store. - // - // Description has no default, and is optional. - Description string `bson:"description"` - - // Type is the storage type: filesystem or block-device. - // - // Type has no default, and must be specified. - Type StorageType `bson:"type"` - - // Shared indicates that the storage is shared between all units of - // a service deployed from the charm. It is an error to attempt to - // assign non-shareable storage to a "shared" storage requirement. - // - // Shared defaults to false. - Shared bool `bson:"shared"` - - // ReadOnly indicates that the storage should be made read-only if - // possible. If the storage cannot be made read-only, Juju will warn - // the user. - // - // ReadOnly defaults to false. - ReadOnly bool `bson:"read-only"` - - // CountMin is the number of storage instances that must be attached - // to the charm for it to be useful; the charm will not install until - // this number has been satisfied. This must be a non-negative number. - // - // CountMin defaults to 1 for singleton stores. - CountMin int `bson:"countmin"` - - // CountMax is the largest number of storage instances that can be - // attached to the charm. If CountMax is -1, then there is no upper - // bound. - // - // CountMax defaults to 1 for singleton stores. - CountMax int `bson:"countmax"` - - // MinimumSize is the minimum size of store that the charm needs to - // work at all. This is not a recommended size or a comfortable size - // or a will-work-well size, just a bare minimum below which the charm - // is going to break. - // MinimumSize requires a unit, one of MGTPEZY, and is stored as MiB. - // - // There is no default MinimumSize; if left unspecified, a provider - // specific default will be used, typically 1GB for block storage. - MinimumSize uint64 `bson:"minimum-size"` - - // Location is the mount location for filesystem stores. For multi- - // stores, the location acts as the parent directory for each mounted - // store. - // - // Location has no default, and is optional. - Location string `bson:"location,omitempty"` - - // Properties allow the charm author to characterise the relative storage - // performance requirements and sensitivities for each store. - // eg “transient†is used to indicate that non persistent storage is acceptable, - // such as tmpfs or ephemeral instance disks. - // - // Properties has no default, and is optional. - Properties []string `bson:"properties,omitempty"` -} - -// Relation represents a single relation defined in the charm -// metadata.yaml file. -type Relation struct { - Name string `bson:"name"` - Role RelationRole `bson:"role"` - Interface string `bson:"interface"` - Optional bool `bson:"optional"` - Limit int `bson:"limit"` - Scope RelationScope `bson:"scope"` -} - -// ImplementedBy returns whether the relation is implemented by the supplied charm. -func (r Relation) ImplementedBy(ch Charm) bool { - if r.IsImplicit() { - return true - } - var m map[string]Relation - switch r.Role { - case RoleProvider: - m = ch.Meta().Provides - case RoleRequirer: - m = ch.Meta().Requires - case RolePeer: - m = ch.Meta().Peers - default: - panic(fmt.Errorf("unknown relation role %q", r.Role)) - } - rel, found := m[r.Name] - if !found { - return false - } - if rel.Interface == r.Interface { - switch r.Scope { - case ScopeGlobal: - return rel.Scope != ScopeContainer - case ScopeContainer: - return true - default: - panic(fmt.Errorf("unknown relation scope %q", r.Scope)) - } - } - return false -} - -// IsImplicit returns whether the relation is supplied by juju itself, -// rather than by a charm. -func (r Relation) IsImplicit() bool { - return (r.Name == "juju-info" && - r.Interface == "juju-info" && - r.Role == RoleProvider) -} - -// Meta represents all the known content that may be defined -// within a charm's metadata.yaml file. -type Meta struct { - Name string `bson:"name"` - Summary string `bson:"summary"` - Description string `bson:"description"` - Subordinate bool `bson:"subordinate"` - Provides map[string]Relation `bson:"provides,omitempty"` - Requires map[string]Relation `bson:"requires,omitempty"` - Peers map[string]Relation `bson:"peers,omitempty"` - Format int `bson:"format,omitempty"` - OldRevision int `bson:"oldrevision,omitempty"` // Obsolete - Categories []string `bson:"categories,omitempty"` - Tags []string `bson:"tags,omitempty"` - Series string `bson:"series,omitempty"` - Storage map[string]Storage `bson:"storage,omitempty"` - PayloadClasses map[string]PayloadClass `bson:"payloadclasses,omitempty" json:"payloadclasses,omitempty"` -} - -func generateRelationHooks(relName string, allHooks map[string]bool) { - for _, hookName := range hooks.RelationHooks() { - allHooks[fmt.Sprintf("%s-%s", relName, hookName)] = true - } -} - -// Hooks returns a map of all possible valid hooks, taking relations -// into account. It's a map to enable fast lookups, and the value is -// always true. -func (m Meta) Hooks() map[string]bool { - allHooks := make(map[string]bool) - // Unit hooks - for _, hookName := range hooks.UnitHooks() { - allHooks[string(hookName)] = true - } - // Relation hooks - for hookName := range m.Provides { - generateRelationHooks(hookName, allHooks) - } - for hookName := range m.Requires { - generateRelationHooks(hookName, allHooks) - } - for hookName := range m.Peers { - generateRelationHooks(hookName, allHooks) - } - return allHooks -} - -// Used for parsing Categories and Tags. -func parseStringList(list interface{}) []string { - if list == nil { - return nil - } - slice := list.([]interface{}) - result := make([]string, 0, len(slice)) - for _, elem := range slice { - result = append(result, elem.(string)) - } - return result -} - -// ReadMeta reads the content of a metadata.yaml file and returns -// its representation. -func ReadMeta(r io.Reader) (meta *Meta, err error) { - data, err := ioutil.ReadAll(r) - if err != nil { - return - } - raw := make(map[interface{}]interface{}) - err = yaml.Unmarshal(data, raw) - if err != nil { - return - } - v, err := charmSchema.Coerce(raw, nil) - if err != nil { - return nil, errors.New("metadata: " + err.Error()) - } - m := v.(map[string]interface{}) - meta = &Meta{} - meta.Name = m["name"].(string) - // Schema decodes as int64, but the int range should be good - // enough for revisions. - meta.Summary = m["summary"].(string) - meta.Description = m["description"].(string) - meta.Provides = parseRelations(m["provides"], RoleProvider) - meta.Requires = parseRelations(m["requires"], RoleRequirer) - meta.Peers = parseRelations(m["peers"], RolePeer) - meta.Format = int(m["format"].(int64)) - meta.Categories = parseStringList(m["categories"]) - meta.Tags = parseStringList(m["tags"]) - if subordinate := m["subordinate"]; subordinate != nil { - meta.Subordinate = subordinate.(bool) - } - if rev := m["revision"]; rev != nil { - // Obsolete - meta.OldRevision = int(m["revision"].(int64)) - } - if series, ok := m["series"]; ok && series != nil { - meta.Series = series.(string) - } - meta.Storage = parseStorage(m["storage"]) - meta.PayloadClasses = parsePayloadClasses(m["payloads"]) - if err := meta.Check(); err != nil { - return nil, err - } - return meta, nil -} - -// GetYAML implements yaml.Getter.GetYAML. -func (m Meta) GetYAML() (tag string, value interface{}) { - marshaledRelations := func(rs map[string]Relation) map[string]marshaledRelation { - mrs := make(map[string]marshaledRelation) - for name, r := range rs { - mrs[name] = marshaledRelation(r) - } - return mrs - } - return "", struct { - Name string `yaml:"name"` - Summary string `yaml:"summary"` - Description string `yaml:"description"` - Provides map[string]marshaledRelation `yaml:"provides,omitempty"` - Requires map[string]marshaledRelation `yaml:"requires,omitempty"` - Peers map[string]marshaledRelation `yaml:"peers,omitempty"` - Categories []string `yaml:"categories,omitempty"` - Tags []string `yaml:"tags,omitempty"` - Subordinate bool `yaml:"subordinate,omitempty"` - Series string `yaml:"series,omitempty"` - }{ - Name: m.Name, - Summary: m.Summary, - Description: m.Description, - Provides: marshaledRelations(m.Provides), - Requires: marshaledRelations(m.Requires), - Peers: marshaledRelations(m.Peers), - Categories: m.Categories, - Tags: m.Tags, - Subordinate: m.Subordinate, - Series: m.Series, - } -} - -type marshaledRelation Relation - -func (r marshaledRelation) GetYAML() (tag string, value interface{}) { - // See calls to ifaceExpander in charmSchema. - noLimit := 1 - if r.Role == RoleProvider { - noLimit = 0 - } - - if !r.Optional && r.Limit == noLimit && r.Scope == ScopeGlobal { - // All attributes are default, so use the simple string form of the relation. - return "", r.Interface - } - mr := struct { - Interface string `yaml:"interface"` - Limit *int `yaml:"limit,omitempty"` - Optional bool `yaml:"optional,omitempty"` - Scope RelationScope `yaml:"scope,omitempty"` - }{ - Interface: r.Interface, - Optional: r.Optional, - } - if r.Limit != noLimit { - mr.Limit = &r.Limit - } - if r.Scope != ScopeGlobal { - mr.Scope = r.Scope - } - return "", mr -} - -// Check checks that the metadata is well-formed. -func (meta Meta) Check() error { - // Check for duplicate or forbidden relation names or interfaces. - names := map[string]bool{} - checkRelations := func(src map[string]Relation, role RelationRole) error { - for name, rel := range src { - if rel.Name != name { - return fmt.Errorf("charm %q has mismatched relation name %q; expected %q", meta.Name, rel.Name, name) - } - if rel.Role != role { - return fmt.Errorf("charm %q has mismatched role %q; expected %q", meta.Name, rel.Role, role) - } - // Container-scoped require relations on subordinates are allowed - // to use the otherwise-reserved juju-* namespace. - if !meta.Subordinate || role != RoleRequirer || rel.Scope != ScopeContainer { - if reservedName(name) { - return fmt.Errorf("charm %q using a reserved relation name: %q", meta.Name, name) - } - } - if role != RoleRequirer { - if reservedName(rel.Interface) { - return fmt.Errorf("charm %q relation %q using a reserved interface: %q", meta.Name, name, rel.Interface) - } - } - if names[name] { - return fmt.Errorf("charm %q using a duplicated relation name: %q", meta.Name, name) - } - names[name] = true - } - return nil - } - if err := checkRelations(meta.Provides, RoleProvider); err != nil { - return err - } - if err := checkRelations(meta.Requires, RoleRequirer); err != nil { - return err - } - if err := checkRelations(meta.Peers, RolePeer); err != nil { - return err - } - - // Subordinate charms must have at least one relation that - // has container scope, otherwise they can't relate to the - // principal. - if meta.Subordinate { - valid := false - if meta.Requires != nil { - for _, relationData := range meta.Requires { - if relationData.Scope == ScopeContainer { - valid = true - break - } - } - } - if !valid { - return fmt.Errorf("subordinate charm %q lacks \"requires\" relation with container scope", meta.Name) - } - } - - if meta.Series != "" { - if !IsValidSeries(meta.Series) { - return fmt.Errorf("charm %q declares invalid series: %q", meta.Name, meta.Series) - } - } - - names = make(map[string]bool) - for name, store := range meta.Storage { - if store.Location != "" && store.Type != StorageFilesystem { - return fmt.Errorf(`charm %q storage %q: location may not be specified for "type: %s"`, meta.Name, name, store.Type) - } - if store.Type == "" { - return fmt.Errorf("charm %q storage %q: type must be specified", meta.Name, name) - } - if store.CountMin < 0 { - return fmt.Errorf("charm %q storage %q: invalid minimum count %d", meta.Name, name, store.CountMin) - } - if store.CountMax == 0 || store.CountMax < -1 { - return fmt.Errorf("charm %q storage %q: invalid maximum count %d", meta.Name, name, store.CountMax) - } - if names[name] { - return fmt.Errorf("charm %q storage %q: duplicated storage name", meta.Name, name) - } - names[name] = true - } - - for name, payloadClass := range meta.PayloadClasses { - if payloadClass.Name != name { - return fmt.Errorf("mismatch on payload class name (%q != %q)", payloadClass.Name, name) - } - if err := payloadClass.Validate(); err != nil { - return err - } - } - - return nil -} - -func reservedName(name string) bool { - return name == "juju" || strings.HasPrefix(name, "juju-") -} - -func parseRelations(relations interface{}, role RelationRole) map[string]Relation { - if relations == nil { - return nil - } - result := make(map[string]Relation) - for name, rel := range relations.(map[string]interface{}) { - relMap := rel.(map[string]interface{}) - relation := Relation{ - Name: name, - Role: role, - Interface: relMap["interface"].(string), - Optional: relMap["optional"].(bool), - } - if scope := relMap["scope"]; scope != nil { - relation.Scope = RelationScope(scope.(string)) - } - if relMap["limit"] != nil { - // Schema defaults to int64, but we know - // the int range should be more than enough. - relation.Limit = int(relMap["limit"].(int64)) - } - result[name] = relation - } - return result -} - -// Schema coercer that expands the interface shorthand notation. -// A consistent format is easier to work with than considering the -// potential difference everywhere. -// -// Supports the following variants:: -// -// provides: -// server: riak -// admin: http -// foobar: -// interface: blah -// -// provides: -// server: -// interface: mysql -// limit: -// optional: false -// -// In all input cases, the output is the fully specified interface -// representation as seen in the mysql interface description above. -func ifaceExpander(limit interface{}) schema.Checker { - return ifaceExpC{limit} -} - -type ifaceExpC struct { - limit interface{} -} - -var ( - stringC = schema.String() - mapC = schema.StringMap(schema.Any()) -) - -func (c ifaceExpC) Coerce(v interface{}, path []string) (newv interface{}, err error) { - s, err := stringC.Coerce(v, path) - if err == nil { - newv = map[string]interface{}{ - "interface": s, - "limit": c.limit, - "optional": false, - "scope": string(ScopeGlobal), - } - return - } - - v, err = mapC.Coerce(v, path) - if err != nil { - return - } - m := v.(map[string]interface{}) - if _, ok := m["limit"]; !ok { - m["limit"] = c.limit - } - return ifaceSchema.Coerce(m, path) -} - -var ifaceSchema = schema.FieldMap( - schema.Fields{ - "interface": schema.String(), - "limit": schema.OneOf(schema.Const(nil), schema.Int()), - "scope": schema.OneOf(schema.Const(string(ScopeGlobal)), schema.Const(string(ScopeContainer))), - "optional": schema.Bool(), - }, - schema.Defaults{ - "scope": string(ScopeGlobal), - "optional": false, - }, -) - -func parseStorage(stores interface{}) map[string]Storage { - if stores == nil { - return nil - } - result := make(map[string]Storage) - for name, store := range stores.(map[string]interface{}) { - storeMap := store.(map[string]interface{}) - store := Storage{ - Name: name, - Type: StorageType(storeMap["type"].(string)), - Shared: storeMap["shared"].(bool), - ReadOnly: storeMap["read-only"].(bool), - CountMin: 1, - CountMax: 1, - } - if desc, ok := storeMap["description"].(string); ok { - store.Description = desc - } - if multiple, ok := storeMap["multiple"].(map[string]interface{}); ok { - if r, ok := multiple["range"].([2]int); ok { - store.CountMin, store.CountMax = r[0], r[1] - } - } - if minSize, ok := storeMap["minimum-size"].(uint64); ok { - store.MinimumSize = minSize - } - if loc, ok := storeMap["location"].(string); ok { - store.Location = loc - } - if properties, ok := storeMap["properties"].([]interface{}); ok { - for _, p := range properties { - store.Properties = append(store.Properties, p.(string)) - } - } - result[name] = store - } - return result -} - -var storageSchema = schema.FieldMap( - schema.Fields{ - "type": schema.OneOf(schema.Const(string(StorageBlock)), schema.Const(string(StorageFilesystem))), - "shared": schema.Bool(), - "read-only": schema.Bool(), - "multiple": schema.FieldMap( - schema.Fields{ - "range": storageCountC{}, // m, m-n, m+, m- - }, - schema.Defaults{}, - ), - "minimum-size": storageSizeC{}, - "location": schema.String(), - "description": schema.String(), - "properties": schema.List(propertiesC{}), - }, - schema.Defaults{ - "shared": false, - "read-only": false, - "multiple": schema.Omit, - "location": schema.Omit, - "description": schema.Omit, - "properties": schema.Omit, - "minimum-size": schema.Omit, - }, -) - -type storageCountC struct{} - -var storageCountRE = regexp.MustCompile("^([0-9]+)([-+]|-[0-9]+)$") - -func (c storageCountC) Coerce(v interface{}, path []string) (newv interface{}, err error) { - s, err := schema.OneOf(schema.Int(), stringC).Coerce(v, path) - if err != nil { - return nil, err - } - if m, ok := s.(int64); ok { - // We've got a count of the form "m": m represents - // both the minimum and maximum. - if m <= 0 { - return nil, fmt.Errorf("%s: invalid count %v", strings.Join(path[1:], ""), m) - } - return [2]int{int(m), int(m)}, nil - } - match := storageCountRE.FindStringSubmatch(s.(string)) - if match == nil { - return nil, fmt.Errorf("%s: value %q does not match 'm', 'm-n', or 'm+'", strings.Join(path[1:], ""), s) - } - var m, n int - if m, err = strconv.Atoi(match[1]); err != nil { - return nil, err - } - if len(match[2]) == 1 { - // We've got a count of the form "m+" or "m-": - // m represents the minimum, and there is no - // upper bound. - n = -1 - } else { - if n, err = strconv.Atoi(match[2][1:]); err != nil { - return nil, err - } - } - return [2]int{m, n}, nil -} - -type storageSizeC struct{} - -func (c storageSizeC) Coerce(v interface{}, path []string) (newv interface{}, err error) { - s, err := schema.String().Coerce(v, path) - if err != nil { - return nil, err - } - return utils.ParseSize(s.(string)) -} - -type propertiesC struct{} - -func (c propertiesC) Coerce(v interface{}, path []string) (newv interface{}, err error) { - return schema.OneOf(schema.Const("transient")).Coerce(v, path) -} - -var charmSchema = schema.FieldMap( - schema.Fields{ - "name": schema.String(), - "summary": schema.String(), - "description": schema.String(), - "peers": schema.StringMap(ifaceExpander(int64(1))), - "provides": schema.StringMap(ifaceExpander(nil)), - "requires": schema.StringMap(ifaceExpander(int64(1))), - "revision": schema.Int(), // Obsolete - "format": schema.Int(), - "subordinate": schema.Bool(), - "categories": schema.List(schema.String()), - "tags": schema.List(schema.String()), - "series": schema.String(), - "storage": schema.StringMap(storageSchema), - "payloads": schema.StringMap(payloadClassSchema), - }, - schema.Defaults{ - "provides": schema.Omit, - "requires": schema.Omit, - "peers": schema.Omit, - "revision": schema.Omit, - "format": 1, - "subordinate": schema.Omit, - "categories": schema.Omit, - "tags": schema.Omit, - "series": schema.Omit, - "storage": schema.Omit, - "payloads": schema.Omit, - }, -) === removed file 'src/gopkg.in/juju/charm.v5/meta_test.go' --- src/gopkg.in/juju/charm.v5/meta_test.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/juju/charm.v5/meta_test.go 1970-01-01 00:00:00 +0000 @@ -1,834 +0,0 @@ -// Copyright 2011, 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm_test - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/yaml.v1" - - "gopkg.in/juju/charm.v5" -) - -func repoMeta(name string) io.Reader { - charmDir := TestCharms.CharmDirPath(name) - file, err := os.Open(filepath.Join(charmDir, "metadata.yaml")) - if err != nil { - panic(err) - } - defer file.Close() - data, err := ioutil.ReadAll(file) - if err != nil { - panic(err) - } - return bytes.NewBuffer(data) -} - -type MetaSuite struct{} - -var _ = gc.Suite(&MetaSuite{}) - -func (s *MetaSuite) TestReadMetaVersion1(c *gc.C) { - meta, err := charm.ReadMeta(repoMeta("dummy")) - c.Assert(err, gc.IsNil) - c.Assert(meta.Name, gc.Equals, "dummy") - c.Assert(meta.Summary, gc.Equals, "That's a dummy charm.") - c.Assert(meta.Description, gc.Equals, - "This is a longer description which\npotentially contains multiple lines.\n") - c.Assert(meta.Format, gc.Equals, 1) - c.Assert(meta.OldRevision, gc.Equals, 0) - c.Assert(meta.Subordinate, gc.Equals, false) -} - -func (s *MetaSuite) TestReadMetaVersion2(c *gc.C) { - meta, err := charm.ReadMeta(repoMeta("format2")) - c.Assert(err, gc.IsNil) - c.Assert(meta.Name, gc.Equals, "format2") - c.Assert(meta.Format, gc.Equals, 2) - c.Assert(meta.Categories, gc.HasLen, 0) -} - -func (s *MetaSuite) TestReadCategory(c *gc.C) { - meta, err := charm.ReadMeta(repoMeta("category")) - c.Assert(err, gc.IsNil) - c.Assert(meta.Categories, jc.DeepEquals, []string{"database"}) -} - -func (s *MetaSuite) TestReadTags(c *gc.C) { - meta, err := charm.ReadMeta(repoMeta("category")) - c.Assert(err, gc.IsNil) - c.Assert(meta.Tags, jc.DeepEquals, []string{"openstack", "storage"}) -} - -func (s *MetaSuite) TestSubordinate(c *gc.C) { - meta, err := charm.ReadMeta(repoMeta("logging")) - c.Assert(err, gc.IsNil) - c.Assert(meta.Subordinate, gc.Equals, true) -} - -func (s *MetaSuite) TestSubordinateWithoutContainerRelation(c *gc.C) { - r := repoMeta("dummy") - hackYaml := ReadYaml(r) - hackYaml["subordinate"] = true - _, err := charm.ReadMeta(hackYaml.Reader()) - c.Assert(err, gc.ErrorMatches, "subordinate charm \"dummy\" lacks \"requires\" relation with container scope") -} - -func (s *MetaSuite) TestScopeConstraint(c *gc.C) { - meta, err := charm.ReadMeta(repoMeta("logging")) - c.Assert(err, gc.IsNil) - c.Assert(meta.Provides["logging-client"].Scope, gc.Equals, charm.ScopeGlobal) - c.Assert(meta.Requires["logging-directory"].Scope, gc.Equals, charm.ScopeContainer) - c.Assert(meta.Subordinate, gc.Equals, true) -} - -func (s *MetaSuite) TestParseMetaRelations(c *gc.C) { - meta, err := charm.ReadMeta(repoMeta("mysql")) - c.Assert(err, gc.IsNil) - c.Assert(meta.Provides["server"], gc.Equals, charm.Relation{ - Name: "server", - Role: charm.RoleProvider, - Interface: "mysql", - Scope: charm.ScopeGlobal, - }) - c.Assert(meta.Requires, gc.IsNil) - c.Assert(meta.Peers, gc.IsNil) - - meta, err = charm.ReadMeta(repoMeta("riak")) - c.Assert(err, gc.IsNil) - c.Assert(meta.Provides["endpoint"], gc.Equals, charm.Relation{ - Name: "endpoint", - Role: charm.RoleProvider, - Interface: "http", - Scope: charm.ScopeGlobal, - }) - c.Assert(meta.Provides["admin"], gc.Equals, charm.Relation{ - Name: "admin", - Role: charm.RoleProvider, - Interface: "http", - Scope: charm.ScopeGlobal, - }) - c.Assert(meta.Peers["ring"], gc.Equals, charm.Relation{ - Name: "ring", - Role: charm.RolePeer, - Interface: "riak", - Limit: 1, - Scope: charm.ScopeGlobal, - }) - c.Assert(meta.Requires, gc.IsNil) - - meta, err = charm.ReadMeta(repoMeta("terracotta")) - c.Assert(err, gc.IsNil) - c.Assert(meta.Provides["dso"], gc.Equals, charm.Relation{ - Name: "dso", - Role: charm.RoleProvider, - Interface: "terracotta", - Optional: true, - Scope: charm.ScopeGlobal, - }) - c.Assert(meta.Peers["server-array"], gc.Equals, charm.Relation{ - Name: "server-array", - Role: charm.RolePeer, - Interface: "terracotta-server", - Limit: 1, - Scope: charm.ScopeGlobal, - }) - c.Assert(meta.Requires, gc.IsNil) - - meta, err = charm.ReadMeta(repoMeta("wordpress")) - c.Assert(err, gc.IsNil) - c.Assert(meta.Provides["url"], gc.Equals, charm.Relation{ - Name: "url", - Role: charm.RoleProvider, - Interface: "http", - Scope: charm.ScopeGlobal, - }) - c.Assert(meta.Requires["db"], gc.Equals, charm.Relation{ - Name: "db", - Role: charm.RoleRequirer, - Interface: "mysql", - Limit: 1, - Scope: charm.ScopeGlobal, - }) - c.Assert(meta.Requires["cache"], gc.Equals, charm.Relation{ - Name: "cache", - Role: charm.RoleRequirer, - Interface: "varnish", - Limit: 2, - Optional: true, - Scope: charm.ScopeGlobal, - }) - c.Assert(meta.Peers, gc.IsNil) -} - -var relationsConstraintsTests = []struct { - rels string - err string -}{ - { - "provides:\n foo: ping\nrequires:\n foo: pong", - `charm "a" using a duplicated relation name: "foo"`, - }, { - "requires:\n foo: ping\npeers:\n foo: pong", - `charm "a" using a duplicated relation name: "foo"`, - }, { - "peers:\n foo: ping\nprovides:\n foo: pong", - `charm "a" using a duplicated relation name: "foo"`, - }, { - "provides:\n juju: blob", - `charm "a" using a reserved relation name: "juju"`, - }, { - "requires:\n juju: blob", - `charm "a" using a reserved relation name: "juju"`, - }, { - "peers:\n juju: blob", - `charm "a" using a reserved relation name: "juju"`, - }, { - "provides:\n juju-snap: blub", - `charm "a" using a reserved relation name: "juju-snap"`, - }, { - "requires:\n juju-crackle: blub", - `charm "a" using a reserved relation name: "juju-crackle"`, - }, { - "peers:\n juju-pop: blub", - `charm "a" using a reserved relation name: "juju-pop"`, - }, { - "provides:\n innocuous: juju", - `charm "a" relation "innocuous" using a reserved interface: "juju"`, - }, { - "peers:\n innocuous: juju", - `charm "a" relation "innocuous" using a reserved interface: "juju"`, - }, { - "provides:\n innocuous: juju-snap", - `charm "a" relation "innocuous" using a reserved interface: "juju-snap"`, - }, { - "peers:\n innocuous: juju-snap", - `charm "a" relation "innocuous" using a reserved interface: "juju-snap"`, - }, -} - -func (s *MetaSuite) TestRelationsConstraints(c *gc.C) { - check := func(s, e string) { - meta, err := charm.ReadMeta(strings.NewReader(s)) - if e != "" { - c.Assert(err, gc.ErrorMatches, e) - c.Assert(meta, gc.IsNil) - } else { - c.Assert(err, gc.IsNil) - c.Assert(meta, gc.NotNil) - } - } - prefix := "name: a\nsummary: b\ndescription: c\n" - for i, t := range relationsConstraintsTests { - c.Logf("test %d", i) - check(prefix+t.rels, t.err) - check(prefix+"subordinate: true\n"+t.rels, t.err) - } - // The juju-* namespace is accessible to container-scoped require - // relations on subordinate charms. - check(prefix+` -subordinate: true -requires: - juju-info: - interface: juju-info - scope: container`, "") - // The juju-* interfaces are allowed on any require relation. - check(prefix+` -requires: - innocuous: juju-info`, "") -} - -// dummyMetadata contains a minimally valid charm metadata.yaml -// for testing valid and invalid series. -const dummyMetadata = "name: a\nsummary: b\ndescription: c" - -// TestSeries ensures that valid series values are parsed correctly when specified -// in the charm metadata. -func (s *MetaSuite) TestSeries(c *gc.C) { - // series not specified - meta, err := charm.ReadMeta(strings.NewReader(dummyMetadata)) - c.Assert(err, gc.IsNil) - c.Check(meta.Series, gc.Equals, "") - - for _, seriesName := range []string{"precise", "trusty", "plan9"} { - meta, err := charm.ReadMeta(strings.NewReader( - fmt.Sprintf("%s\nseries: %s\n", dummyMetadata, seriesName))) - c.Assert(err, gc.IsNil) - c.Check(meta.Series, gc.Equals, seriesName) - } -} - -// TestInvalidSeries ensures that invalid series values cause a parse error -// when specified in the charm metadata. -func (s *MetaSuite) TestInvalidSeries(c *gc.C) { - for _, seriesName := range []string{"pre-c1se", "pre^cise", "cp/m", "OpenVMS"} { - _, err := charm.ReadMeta(strings.NewReader( - fmt.Sprintf("%s\nseries: %s\n", dummyMetadata, seriesName))) - c.Assert(err, gc.NotNil) - c.Check(err, gc.ErrorMatches, `charm "a" declares invalid series: .*`) - } -} - -func (s *MetaSuite) TestCheckMismatchedRelationName(c *gc.C) { - // This Check case cannot be covered by the above - // TestRelationsConstraints tests. - meta := charm.Meta{ - Name: "foo", - Provides: map[string]charm.Relation{ - "foo": { - Name: "foo", - Role: charm.RolePeer, - Interface: "x", - Limit: 1, - Scope: charm.ScopeGlobal, - }, - }, - } - err := meta.Check() - c.Assert(err, gc.ErrorMatches, `charm "foo" has mismatched role "peer"; expected "provider"`) -} - -func (s *MetaSuite) TestCheckMismatchedRole(c *gc.C) { - // This Check case cannot be covered by the above - // TestRelationsConstraints tests. - meta := charm.Meta{ - Name: "foo", - Provides: map[string]charm.Relation{ - "foo": { - Role: charm.RolePeer, - Interface: "foo", - Limit: 1, - Scope: charm.ScopeGlobal, - }, - }, - } - err := meta.Check() - c.Assert(err, gc.ErrorMatches, `charm "foo" has mismatched relation name ""; expected "foo"`) -} - -// Test rewriting of a given interface specification into long form. -// -// InterfaceExpander uses `coerce` to do one of two things: -// -// - Rewrite shorthand to the long form used for actual storage -// - Fills in defaults, including a configurable `limit` -// -// This test ensures test coverage on each of these branches, along -// with ensuring the conversion object properly raises SchemaError -// exceptions on invalid data. -func (s *MetaSuite) TestIfaceExpander(c *gc.C) { - e := charm.IfaceExpander(nil) - - path := []string{""} - - // Shorthand is properly rewritten - v, err := e.Coerce("http", path) - c.Assert(err, gc.IsNil) - c.Assert(v, jc.DeepEquals, map[string]interface{}{"interface": "http", "limit": nil, "optional": false, "scope": string(charm.ScopeGlobal)}) - - // Defaults are properly applied - v, err = e.Coerce(map[string]interface{}{"interface": "http"}, path) - c.Assert(err, gc.IsNil) - c.Assert(v, jc.DeepEquals, map[string]interface{}{"interface": "http", "limit": nil, "optional": false, "scope": string(charm.ScopeGlobal)}) - - v, err = e.Coerce(map[string]interface{}{"interface": "http", "limit": 2}, path) - c.Assert(err, gc.IsNil) - c.Assert(v, jc.DeepEquals, map[string]interface{}{"interface": "http", "limit": int64(2), "optional": false, "scope": string(charm.ScopeGlobal)}) - - v, err = e.Coerce(map[string]interface{}{"interface": "http", "optional": true}, path) - c.Assert(err, gc.IsNil) - c.Assert(v, jc.DeepEquals, map[string]interface{}{"interface": "http", "limit": nil, "optional": true, "scope": string(charm.ScopeGlobal)}) - - // Invalid data raises an error. - v, err = e.Coerce(42, path) - c.Assert(err, gc.ErrorMatches, `: expected map, got int\(42\)`) - - v, err = e.Coerce(map[string]interface{}{"interface": "http", "optional": nil}, path) - c.Assert(err, gc.ErrorMatches, ".optional: expected bool, got nothing") - - v, err = e.Coerce(map[string]interface{}{"interface": "http", "limit": "none, really"}, path) - c.Assert(err, gc.ErrorMatches, ".limit: unexpected value.*") - - // Can change default limit - e = charm.IfaceExpander(1) - v, err = e.Coerce(map[string]interface{}{"interface": "http"}, path) - c.Assert(err, gc.IsNil) - c.Assert(v, jc.DeepEquals, map[string]interface{}{"interface": "http", "limit": int64(1), "optional": false, "scope": string(charm.ScopeGlobal)}) -} - -func (s *MetaSuite) TestMetaHooks(c *gc.C) { - meta, err := charm.ReadMeta(repoMeta("wordpress")) - c.Assert(err, gc.IsNil) - hooks := meta.Hooks() - expectedHooks := map[string]bool{ - "install": true, - "start": true, - "config-changed": true, - "upgrade-charm": true, - "stop": true, - "collect-metrics": true, - "meter-status-changed": true, - "leader-elected": true, - "leader-deposed": true, - "leader-settings-changed": true, - "update-status": true, - "cache-relation-joined": true, - "cache-relation-changed": true, - "cache-relation-departed": true, - "cache-relation-broken": true, - "db-relation-joined": true, - "db-relation-changed": true, - "db-relation-departed": true, - "db-relation-broken": true, - "logging-dir-relation-joined": true, - "logging-dir-relation-changed": true, - "logging-dir-relation-departed": true, - "logging-dir-relation-broken": true, - "monitoring-port-relation-joined": true, - "monitoring-port-relation-changed": true, - "monitoring-port-relation-departed": true, - "monitoring-port-relation-broken": true, - "url-relation-joined": true, - "url-relation-changed": true, - "url-relation-departed": true, - "url-relation-broken": true, - } - c.Assert(hooks, jc.DeepEquals, expectedHooks) -} - -func (s *MetaSuite) TestCodecRoundTripEmpty(c *gc.C) { - for i, codec := range codecs { - c.Logf("codec %d", i) - empty_input := charm.Meta{} - data, err := codec.Marshal(empty_input) - c.Assert(err, gc.IsNil) - var empty_output charm.Meta - err = codec.Unmarshal(data, &empty_output) - c.Assert(err, gc.IsNil) - c.Assert(empty_input, jc.DeepEquals, empty_output) - } -} - -func (s *MetaSuite) TestCodecRoundTrip(c *gc.C) { - var input = charm.Meta{ - Name: "Foo", - Summary: "Bar", - Description: "Baz", - Subordinate: true, - Provides: map[string]charm.Relation{ - "qux": { - Interface: "quxx", - Optional: true, - Limit: 42, - Scope: "quxxx", - }, - }, - Requires: map[string]charm.Relation{ - "qux": { - Interface: "quxx", - Optional: true, - Limit: 42, - Scope: "quxxx", - }, - }, - Peers: map[string]charm.Relation{ - "qux": { - Interface: "quxx", - Optional: true, - Limit: 42, - Scope: "quxxx", - }, - }, - Categories: []string{"quxxxx", "quxxxxx"}, - Tags: []string{"openstack", "storage"}, - Format: 10, - OldRevision: 11, - } - for i, codec := range codecs { - c.Logf("codec %d", i) - data, err := codec.Marshal(input) - c.Assert(err, gc.IsNil) - var output charm.Meta - err = codec.Unmarshal(data, &output) - c.Assert(err, gc.IsNil) - c.Assert(input, jc.DeepEquals, output) - } -} - -var implementedByTests = []struct { - ifce string - name string - role charm.RelationRole - scope charm.RelationScope - match bool - implicit bool -}{ - {"ifce-pro", "pro", charm.RoleProvider, charm.ScopeGlobal, true, false}, - {"blah", "pro", charm.RoleProvider, charm.ScopeGlobal, false, false}, - {"ifce-pro", "blah", charm.RoleProvider, charm.ScopeGlobal, false, false}, - {"ifce-pro", "pro", charm.RoleRequirer, charm.ScopeGlobal, false, false}, - {"ifce-pro", "pro", charm.RoleProvider, charm.ScopeContainer, true, false}, - - {"juju-info", "juju-info", charm.RoleProvider, charm.ScopeGlobal, true, true}, - {"blah", "juju-info", charm.RoleProvider, charm.ScopeGlobal, false, false}, - {"juju-info", "blah", charm.RoleProvider, charm.ScopeGlobal, false, false}, - {"juju-info", "juju-info", charm.RoleRequirer, charm.ScopeGlobal, false, false}, - {"juju-info", "juju-info", charm.RoleProvider, charm.ScopeContainer, true, true}, - - {"ifce-req", "req", charm.RoleRequirer, charm.ScopeGlobal, true, false}, - {"blah", "req", charm.RoleRequirer, charm.ScopeGlobal, false, false}, - {"ifce-req", "blah", charm.RoleRequirer, charm.ScopeGlobal, false, false}, - {"ifce-req", "req", charm.RolePeer, charm.ScopeGlobal, false, false}, - {"ifce-req", "req", charm.RoleRequirer, charm.ScopeContainer, true, false}, - - {"juju-info", "info", charm.RoleRequirer, charm.ScopeContainer, true, false}, - {"blah", "info", charm.RoleRequirer, charm.ScopeContainer, false, false}, - {"juju-info", "blah", charm.RoleRequirer, charm.ScopeContainer, false, false}, - {"juju-info", "info", charm.RolePeer, charm.ScopeContainer, false, false}, - {"juju-info", "info", charm.RoleRequirer, charm.ScopeGlobal, false, false}, - - {"ifce-peer", "peer", charm.RolePeer, charm.ScopeGlobal, true, false}, - {"blah", "peer", charm.RolePeer, charm.ScopeGlobal, false, false}, - {"ifce-peer", "blah", charm.RolePeer, charm.ScopeGlobal, false, false}, - {"ifce-peer", "peer", charm.RoleProvider, charm.ScopeGlobal, false, false}, - {"ifce-peer", "peer", charm.RolePeer, charm.ScopeContainer, true, false}, -} - -func (s *MetaSuite) TestImplementedBy(c *gc.C) { - for i, t := range implementedByTests { - c.Logf("test %d", i) - r := charm.Relation{ - Interface: t.ifce, - Name: t.name, - Role: t.role, - Scope: t.scope, - } - c.Assert(r.ImplementedBy(&dummyCharm{}), gc.Equals, t.match) - c.Assert(r.IsImplicit(), gc.Equals, t.implicit) - } -} - -var metaYAMLMarshalTests = []struct { - about string - yaml string -}{{ - about: "minimal charm", - yaml: ` -name: minimal -description: d -summary: s -`, -}, { - about: "charm with lots of stuff", - yaml: ` -name: big -description: d -summary: s -subordinate: true -provides: - provideSimple: someinterface - provideLessSimple: - interface: anotherinterface - optional: true - scope: container - limit: 3 -requires: - requireSimple: someinterface - requireLessSimple: - interface: anotherinterface - optional: true - scope: container - limit: 3 -peers: - peerSimple: someinterface - peerLessSimple: - interface: peery - optional: true -categories: [c1, c1] -tags: [t1, t2] -series: someseries -`, -}} - -func (s *MetaSuite) TestYAMLMarshal(c *gc.C) { - for i, test := range metaYAMLMarshalTests { - c.Logf("test %d: %s", i, test.about) - ch, err := charm.ReadMeta(strings.NewReader(test.yaml)) - c.Assert(err, gc.IsNil) - gotYAML, err := yaml.Marshal(ch) - c.Assert(err, gc.IsNil) - gotCh, err := charm.ReadMeta(bytes.NewReader(gotYAML)) - c.Assert(err, gc.IsNil) - c.Assert(gotCh, jc.DeepEquals, ch) - } -} - -func (s *MetaSuite) TestYAMLMarshalSimpleRelation(c *gc.C) { - // Check that a simple relation gets marshaled as a string. - chYAML := ` -name: minimal -description: d -summary: s -provides: - server: http -requires: - client: http -peers: - me: http -` - ch, err := charm.ReadMeta(strings.NewReader(chYAML)) - c.Assert(err, gc.IsNil) - gotYAML, err := yaml.Marshal(ch) - c.Assert(err, gc.IsNil) - - var x interface{} - err = yaml.Unmarshal(gotYAML, &x) - c.Assert(err, gc.IsNil) - c.Assert(x, jc.DeepEquals, map[interface{}]interface{}{ - "name": "minimal", - "description": "d", - "summary": "s", - "provides": map[interface{}]interface{}{ - "server": "http", - }, - "requires": map[interface{}]interface{}{ - "client": "http", - }, - "peers": map[interface{}]interface{}{ - "me": "http", - }, - }) -} - -func (s *MetaSuite) TestStorage(c *gc.C) { - // "type" is the only required attribute for storage. - meta, err := charm.ReadMeta(strings.NewReader(` -name: a -summary: b -description: c -storage: - store0: - description: woo tee bix - type: block - store1: - type: filesystem -`)) - c.Assert(err, gc.IsNil) - c.Assert(meta.Storage, gc.DeepEquals, map[string]charm.Storage{ - "store0": charm.Storage{ - Name: "store0", - Description: "woo tee bix", - Type: charm.StorageBlock, - CountMin: 1, // singleton - CountMax: 1, - }, - "store1": charm.Storage{ - Name: "store1", - Type: charm.StorageFilesystem, - CountMin: 1, // singleton - CountMax: 1, - }, - }) -} - -func (s *MetaSuite) TestStorageErrors(c *gc.C) { - prefix := ` -name: a -summary: b -description: c -storage: - store-bad: -`[1:] - - type test struct { - desc string - yaml string - err string - } - - tests := []test{{ - desc: "type is required", - yaml: " required: false", - err: "metadata: storage.store-bad.type: unexpected value ", - }, { - desc: "range must be an integer, or integer range (1)", - yaml: " type: filesystem\n multiple:\n range: woat", - err: `metadata: storage.store-bad.multiple.range: value "woat" does not match 'm', 'm-n', or 'm\+'`, - }, { - desc: "range must be an integer, or integer range (2)", - yaml: " type: filesystem\n multiple:\n range: 0-abc", - err: `metadata: storage.store-bad.multiple.range: value "0-abc" does not match 'm', 'm-n', or 'm\+'`, - }, { - desc: "range must be non-negative", - yaml: " type: filesystem\n multiple:\n range: -1", - err: `metadata: storage.store-bad.multiple.range: invalid count -1`, - }, { - desc: "range must be positive", - yaml: " type: filesystem\n multiple:\n range: 0", - err: `metadata: storage.store-bad.multiple.range: invalid count 0`, - }, { - desc: "location cannot be specified for block type storage", - yaml: " type: block\n location: /dev/sdc", - err: `charm "a" storage "store-bad": location may not be specified for "type: block"`, - }, { - desc: "minimum size must parse correctly", - yaml: " type: block\n minimum-size: foo", - err: `metadata: expected a non-negative number, got "foo"`, - }, { - desc: "minimum size must have valid suffix", - yaml: " type: block\n minimum-size: 10Q", - err: `metadata: invalid multiplier suffix "Q", expected one of MGTPEZY`, - }, { - desc: "properties must contain valid values", - yaml: " type: block\n properties: [transient, foo]", - err: `metadata: .* unexpected value "foo"`, - }} - - for i, test := range tests { - c.Logf("test %d: %s", i, test.desc) - c.Logf("\n%s\n", prefix+test.yaml) - _, err := charm.ReadMeta(strings.NewReader(prefix + test.yaml)) - c.Assert(err, gc.ErrorMatches, test.err) - } -} - -func (s *MetaSuite) TestStorageCount(c *gc.C) { - testStorageCount := func(count string, min, max int) { - meta, err := charm.ReadMeta(strings.NewReader(fmt.Sprintf(` -name: a -summary: b -description: c -storage: - store0: - type: filesystem - multiple: - range: %s -`, count))) - c.Assert(err, gc.IsNil) - store := meta.Storage["store0"] - c.Assert(store, gc.NotNil) - c.Assert(store.CountMin, gc.Equals, min) - c.Assert(store.CountMax, gc.Equals, max) - } - testStorageCount("1", 1, 1) - testStorageCount("0-1", 0, 1) - testStorageCount("1-1", 1, 1) - testStorageCount("1+", 1, -1) - // n- is equivalent to n+ - testStorageCount("1-", 1, -1) -} - -func (s *MetaSuite) TestStorageLocation(c *gc.C) { - meta, err := charm.ReadMeta(strings.NewReader(` -name: a -summary: b -description: c -storage: - store0: - type: filesystem - location: /var/lib/things -`)) - c.Assert(err, gc.IsNil) - store := meta.Storage["store0"] - c.Assert(store, gc.NotNil) - c.Assert(store.Location, gc.Equals, "/var/lib/things") -} - -func (s *MetaSuite) TestStorageMinimumSize(c *gc.C) { - meta, err := charm.ReadMeta(strings.NewReader(` -name: a -summary: b -description: c -storage: - store0: - type: filesystem - minimum-size: 10G -`)) - c.Assert(err, gc.IsNil) - store := meta.Storage["store0"] - c.Assert(store, gc.NotNil) - c.Assert(store.MinimumSize, gc.Equals, uint64(10*1024)) -} - -func (s *MetaSuite) TestStorageProperties(c *gc.C) { - meta, err := charm.ReadMeta(strings.NewReader(` -name: a -summary: b -description: c -storage: - store0: - type: filesystem - properties: [transient] -`)) - c.Assert(err, gc.IsNil) - store := meta.Storage["store0"] - c.Assert(store, gc.NotNil) - c.Assert(store.Properties, jc.SameContents, []string{"transient"}) -} - -func (s *MetaSuite) TestPayloadClasses(c *gc.C) { - meta, err := charm.ReadMeta(strings.NewReader(` -name: a -summary: b -description: c -payloads: - monitor: - type: docker - kvm-guest: - type: kvm -`)) - c.Assert(err, gc.IsNil) - - c.Check(meta.PayloadClasses, jc.DeepEquals, map[string]charm.PayloadClass{ - "monitor": charm.PayloadClass{ - Name: "monitor", - Type: "docker", - }, - "kvm-guest": charm.PayloadClass{ - Name: "kvm-guest", - Type: "kvm", - }, - }) -} - -type dummyCharm struct{} - -func (c *dummyCharm) Config() *charm.Config { - panic("unused") -} - -func (c *dummyCharm) Metrics() *charm.Metrics { - panic("unused") -} - -func (c *dummyCharm) Actions() *charm.Actions { - panic("unused") -} - -func (c *dummyCharm) Revision() int { - panic("unused") -} - -func (c *dummyCharm) Meta() *charm.Meta { - return &charm.Meta{ - Provides: map[string]charm.Relation{ - "pro": {Interface: "ifce-pro", Scope: charm.ScopeGlobal}, - }, - Requires: map[string]charm.Relation{ - "req": {Interface: "ifce-req", Scope: charm.ScopeGlobal}, - "info": {Interface: "juju-info", Scope: charm.ScopeContainer}, - }, - Peers: map[string]charm.Relation{ - "peer": {Interface: "ifce-peer", Scope: charm.ScopeGlobal}, - }, - } -} === removed file 'src/gopkg.in/juju/charm.v5/metrics.go' --- src/gopkg.in/juju/charm.v5/metrics.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/juju/charm.v5/metrics.go 1970-01-01 00:00:00 +0000 @@ -1,104 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm - -import ( - "fmt" - "io" - "io/ioutil" - "strconv" - "strings" - - goyaml "gopkg.in/yaml.v1" -) - -// MetricType is used to identify metric types supported by juju. -type MetricType string - -const ( - builtinMetricPrefix = "juju" - - // Supported metric types. - MetricTypeGauge MetricType = "gauge" - MetricTypeAbsolute MetricType = "absolute" -) - -// IsBuiltinMetric reports whether the given metric key is in the builtin metric namespace -func IsBuiltinMetric(key string) bool { - return strings.HasPrefix(key, builtinMetricPrefix) -} - -// validateValue checks if the supplied metric value fits the requirements -// of its expected type. -func (m MetricType) validateValue(value string) error { - switch m { - case MetricTypeGauge, MetricTypeAbsolute: - // The largest number of digits that can be returned by strconv.FormatFloat is 24, so - // choose an arbitrary limit somewhat higher than that. - if len(value) > 30 { - return fmt.Errorf("metric value is too large") - } - _, err := strconv.ParseFloat(value, 64) - if err != nil { - return fmt.Errorf("invalid value type: expected float, got %q", value) - } - default: - return fmt.Errorf("unknown metric type %q", m) - } - return nil -} - -// Metric represents a single metric definition -type Metric struct { - Type MetricType - Description string -} - -// Metrics contains the metrics declarations encoded in the metrics.yaml -// file. -type Metrics struct { - Metrics map[string]Metric -} - -// ReadMetrics reads a MetricsDeclaration in YAML format. -func ReadMetrics(r io.Reader) (*Metrics, error) { - data, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - var metrics Metrics - if err := goyaml.Unmarshal(data, &metrics); err != nil { - return nil, err - } - if metrics.Metrics == nil { - return &metrics, nil - } - for name, metric := range metrics.Metrics { - if IsBuiltinMetric(name) { - if metric.Type != MetricType("") || metric.Description != "" { - return nil, fmt.Errorf("metric %q is using a prefix reserved for built-in metrics: it should not have type or description specification", name) - } - continue - } - switch metric.Type { - case MetricTypeGauge, MetricTypeAbsolute: - default: - return nil, fmt.Errorf("invalid metrics declaration: metric %q has unknown type %q", name, metric.Type) - } - if metric.Description == "" { - return nil, fmt.Errorf("invalid metrics declaration: metric %q lacks description", name) - } - } - return &metrics, nil -} - -// ValidateMetric validates the supplied metric name and value against the loaded -// metric definitions. -func (m Metrics) ValidateMetric(name, value string) error { - metric, exists := m.Metrics[name] - if !exists { - return fmt.Errorf("metric %q not defined", name) - } - return metric.Type.validateValue(value) -} === removed file 'src/gopkg.in/juju/charm.v5/metrics_test.go' --- src/gopkg.in/juju/charm.v5/metrics_test.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/juju/charm.v5/metrics_test.go 1970-01-01 00:00:00 +0000 @@ -1,197 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm_test - -import ( - "sort" - "strings" - - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charm.v5" -) - -// Keys returns a list of all defined metrics keys. -func Keys(m *charm.Metrics) []string { - result := make([]string, 0, len(m.Metrics)) - - for name := range m.Metrics { - result = append(result, name) - - } - sort.Strings(result) - return result -} - -type MetricsSuite struct{} - -var _ = gc.Suite(&MetricsSuite{}) - -func (s *MetricsSuite) TestReadEmpty(c *gc.C) { - metrics, err := charm.ReadMetrics(strings.NewReader("")) - c.Assert(err, gc.IsNil) - c.Assert(metrics, gc.NotNil) -} - -func (s *MetricsSuite) TestReadAlmostEmpty(c *gc.C) { - metrics, err := charm.ReadMetrics(strings.NewReader(` -metrics: -`)) - c.Assert(err, gc.IsNil) - c.Assert(metrics, gc.NotNil) -} - -func (s *MetricsSuite) TestNoDescription(c *gc.C) { - metrics, err := charm.ReadMetrics(strings.NewReader(` -metrics: - some-metric: - type: gauge -`)) - c.Assert(err, gc.ErrorMatches, "invalid metrics declaration: metric \"some-metric\" lacks description") - c.Assert(metrics, gc.IsNil) -} - -func (s *MetricsSuite) TestIncorrectType(c *gc.C) { - metrics, err := charm.ReadMetrics(strings.NewReader(` -metrics: - some-metric: - type: not-a-type - description: Some description. -`)) - c.Assert(err, gc.ErrorMatches, "invalid metrics declaration: metric \"some-metric\" has unknown type \"not-a-type\"") - c.Assert(metrics, gc.IsNil) -} - -func (s *MetricsSuite) TestMultipleDefinition(c *gc.C) { - metrics, err := charm.ReadMetrics(strings.NewReader(` -metrics: - some-metric: - type: gauge - description: Some description. - some-metric: - type: absolute - description: Some other description. - -`)) - c.Assert(err, gc.IsNil) - c.Assert(metrics.Metrics, gc.HasLen, 1) - c.Assert(metrics.Metrics["some-metric"].Type, gc.Equals, charm.MetricTypeAbsolute) -} - -func (s *MetricsSuite) TestIsBuiltinMetric(c *gc.C) { - tests := []struct { - input string - isbuiltin bool - }{{ - "juju-thing", - true, - }, { - "jujuthing", - true, - }, { - "thing", - false, - }, - } - - for i, test := range tests { - c.Logf("test %d isBuiltinMetric(%v) = %v", i, test.input, test.isbuiltin) - is := charm.IsBuiltinMetric(test.input) - c.Assert(is, gc.Equals, test.isbuiltin) - } -} - -func (s *MetricsSuite) TestValidYaml(c *gc.C) { - metrics, err := charm.ReadMetrics(strings.NewReader(` -metrics: - blips: - type: absolute - description: An absolute metric. - blops: - type: gauge - description: A gauge metric. - juju-unit-time: -`)) - c.Assert(err, gc.IsNil) - c.Assert(metrics, gc.NotNil) - c.Assert(Keys(metrics), gc.DeepEquals, []string{"blips", "blops", "juju-unit-time"}) - - testCases := []struct { - about string - name string - value string - err string - }{{ - about: "valid gauge metric", - name: "blops", - value: "1", - err: "", - }, { - about: "valid absolute metric", - name: "blips", - value: "0", - err: "", - }, { - about: "valid gauge metric, float value", - name: "blops", - value: "0.15", - err: "", - }, { - about: "valid absolute metric, float value", - name: "blips", - value: "6.015e15", - err: "", - }, { - about: "undeclared metric", - name: "undeclared", - value: "6.015e15", - err: "metric \"undeclared\" not defined", - }, { - about: "invalid type for gauge metric", - name: "blops", - value: "true", - err: "invalid value type: expected float, got \"true\"", - }, { - about: "metric value too large", - name: "blips", - value: "1111111111111111111111111111111", - err: "metric value is too large", - }, - } - - for i, t := range testCases { - c.Logf("test %d: %s", i, t.about) - err := metrics.ValidateMetric(t.name, t.value) - if t.err == "" { - c.Check(err, gc.IsNil) - } else { - c.Check(err, gc.ErrorMatches, t.err) - } - } - -} - -func (s *MetricsSuite) TestBuiltInMetrics(c *gc.C) { - tests := []string{` -metrics: - some-metric: - type: gauge - description: Some description. - juju-unit-time: - type: absolute -`, ` -metrics: - some-metric: - type: gauge - description: Some description. - juju-unit-time: - description: Some description -`, - } - for _, test := range tests { - c.Logf("%s", test) - _, err := charm.ReadMetrics(strings.NewReader(test)) - c.Assert(err, gc.ErrorMatches, `metric "juju-unit-time" is using a prefix reserved for built-in metrics: it should not have type or description specification`) - } -} === removed directory 'src/gopkg.in/juju/charm.v5/migratebundle' === removed file 'src/gopkg.in/juju/charm.v5/migratebundle/allbundles.txt.gz' Binary files src/gopkg.in/juju/charm.v5/migratebundle/allbundles.txt.gz 2015-09-22 15:27:01 +0000 and src/gopkg.in/juju/charm.v5/migratebundle/allbundles.txt.gz 1970-01-01 00:00:00 +0000 differ === removed file 'src/gopkg.in/juju/charm.v5/migratebundle/allcharms.json.gz' Binary files src/gopkg.in/juju/charm.v5/migratebundle/allcharms.json.gz 2015-09-22 15:27:01 +0000 and src/gopkg.in/juju/charm.v5/migratebundle/allcharms.json.gz 1970-01-01 00:00:00 +0000 differ === removed file 'src/gopkg.in/juju/charm.v5/migratebundle/migrate.go' --- src/gopkg.in/juju/charm.v5/migratebundle/migrate.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/migratebundle/migrate.go 1970-01-01 00:00:00 +0000 @@ -1,252 +0,0 @@ -package migratebundle - -import ( - "gopkg.in/errgo.v1" - "gopkg.in/yaml.v1" - - "gopkg.in/juju/charm.v5" -) - -// legacyBundle represents an old-style bundle. -type legacyBundle struct { - Series string `yaml:",omitempty"` - Inherits interface{} `yaml:",omitempty"` // string or []string - Services map[string]*legacyService - // A relation can be in one of two styles: - // ["r1", "r2"] or ["r1", ["r2", "r3", ...]] - Relations []interface{} `yaml:",omitempty"` // []string or []interface{}{"", []string{...}} - Overrides map[string]interface{} `yaml:",omitempty"` - Tags []string `yaml:",omitempty"` -} - -// legacyService represents a service from a legacy bundle. -type legacyService struct { - Charm string `yaml:",omitempty"` - Branch string `yaml:",omitempty"` - NumUnits *int `yaml:"num_units,omitempty"` - Constraints string `yaml:",omitempty"` - Expose bool `yaml:",omitempty"` - Annotations map[string]string `yaml:",omitempty"` - To string `yaml:",omitempty"` - Options map[string]interface{} `yaml:",omitempty"` - - // Spurious fields, used by existing bundles but not - // valid in the specification. Kept here so that - // the reversability tests can work. - Name string `yaml:",omitempty"` - Exposed bool `yaml:",omitempty"` - Local string `yaml:",omitempty"` -} - -// Migrate parses the old-style bundles.yaml file in bundlesYAML -// and returns a map containing an entry for each bundle -// found in that basket, keyed by the name of the bundle. -// -// It performs the following changes: -// -// - Any inheritance is expanded. -// -// - when a "to" placement directive refers to machine 0, -// an explicit machines section is added. Also, convert -// it to a slice. -// -// - If the charm URL is not specified, it is taken from the -// service name. -// -// - num_units is renamed to numunits, and set to 1 if omitted. -// -// - A relation clause with multiple targets is expanded -// into multiple relation clauses. -// -// The getCharm argument is ignored and provided for -// backward compatibility only. It will be removed in -// charm.v5. -func Migrate(bundlesYAML []byte, getCharm func(id *charm.Reference) (*charm.Meta, error)) (map[string]*charm.BundleData, error) { - var bundles map[string]*legacyBundle - if err := yaml.Unmarshal(bundlesYAML, &bundles); err != nil { - return nil, errgo.Notef(err, "cannot parse legacy bundle") - } - // First expand any inherits clauses. - newBundles := make(map[string]*charm.BundleData) - for name, bundle := range bundles { - bundle, err := inherit(bundle, bundles) - if err != nil { - return nil, errgo.Notef(err, "bundle inheritance failed for %q", name) - } - newBundle, err := migrate(bundle) - if err != nil { - return nil, errgo.Notef(err, "bundle migration failed for %q", name) - } - newBundles[name] = newBundle - } - return newBundles, nil -} - -func migrate(b *legacyBundle) (*charm.BundleData, error) { - data := &charm.BundleData{ - Services: make(map[string]*charm.ServiceSpec), - Series: b.Series, - Machines: make(map[string]*charm.MachineSpec), - Tags: b.Tags, - } - for name, svc := range b.Services { - if svc == nil { - svc = new(legacyService) - } - newSvc := &charm.ServiceSpec{ - Charm: svc.Charm, - NumUnits: 1, // default - Options: svc.Options, - Annotations: svc.Annotations, - Constraints: svc.Constraints, - } - if newSvc.Charm == "" { - newSvc.Charm = name - } - if svc.NumUnits != nil { - newSvc.NumUnits = *svc.NumUnits - } - if svc.To != "" { - newSvc.To = []string{svc.To} - place, err := charm.ParsePlacement(svc.To) - if err != nil { - return nil, errgo.Notef(err, "cannot parse 'to' placment clause %q", svc.To) - } - if place.Machine != "" { - data.Machines[place.Machine] = new(charm.MachineSpec) - } - } - data.Services[name] = newSvc - } - var err error - data.Relations, err = expandRelations(b.Relations) - if err != nil { - return nil, errgo.Notef(err, "cannot expand relations") - } - if len(data.Machines) == 0 { - data.Machines = nil - } - return data, nil -} - -// expandRelations expands any relations that are -// in the form [r1, [r2, r3, ...]] into the form [r1, r2], [r1, r3], .... -func expandRelations(relations []interface{}) ([][]string, error) { - var newRelations [][]string - for _, rel := range relations { - rel, ok := rel.([]interface{}) - if !ok || len(rel) != 2 { - return nil, errgo.Newf("unexpected relation clause %#v", rel) - } - ep0, ok := rel[0].(string) - if !ok { - return nil, errgo.Newf("first relation endpoint is %#v not string", rel[0]) - } - if ep1, ok := rel[1].(string); ok { - newRelations = append(newRelations, []string{ep0, ep1}) - continue - } - eps, ok := rel[1].([]interface{}) - if !ok { - return nil, errgo.Newf("second relation endpoint is %#v not list or string", rel[1]) - } - for _, ep1 := range eps { - ep1, ok := ep1.(string) - if !ok { - return nil, errgo.Newf("relation list member is not string") - } - newRelations = append(newRelations, []string{ep0, ep1}) - } - } - return newRelations, nil -} - -// inherit adds any inherited attributes to the given bundle b. It does -// not modify b, returning a new bundle if necessary. -// -// The bundles map holds all the bundles from the basket (the possible -// bundles that can be inherited from). -func inherit(b *legacyBundle, bundles map[string]*legacyBundle) (*legacyBundle, error) { - if b.Inherits == nil { - return b, nil - } - // The Inherits clause can be specified as a string or a list. - // There are no known bundles which have more than one element in - // the list, so fail if there are, as we don't want to implement - // multiple inheritance when we don't have to. - inherits, ok := b.Inherits.(string) - if !ok { - list, ok := b.Inherits.([]interface{}) - if !ok || len(list) != 1 { - return nil, errgo.Newf("bad inherits clause %#v", b.Inherits) - } - inherits, ok = list[0].(string) - if !ok { - return nil, errgo.Newf("bad inherits clause %#v", b.Inherits) - } - } - from := bundles[inherits] - if from == nil { - return nil, errgo.Newf("inherited-from bundle %q not found", inherits) - } - if from.Inherits != nil { - return nil, errgo.Newf("only a single level of inheritance is supported") - } - // Make a generic copy of both the base and target bundles, - // so we can apply inheritance regardless of Go types. - var target map[interface{}]interface{} - err := yamlCopy(&target, from) - if err != nil { - return nil, errgo.Notef(err, "copy target") - } - var source map[interface{}]interface{} - err = yamlCopy(&source, b) - if err != nil { - return nil, errgo.Notef(err, "copy source") - } - // Apply the inherited attributes. - copyOnto(target, source, true) - - // Convert back to Go types. - var newb legacyBundle - err = yamlCopy(&newb, target) - if err != nil { - return nil, errgo.Notef(err, "copy result") - } - return &newb, nil -} - -// yamlCopy copies the source value into the value -// pointed to by the target value by marshaling -// and unmarshaling YAML. -func yamlCopy(target, source interface{}) error { - data, err := yaml.Marshal(source) - if err != nil { - return errgo.Notef(err, "marshal copy") - } - if err := yaml.Unmarshal(data, target); err != nil { - return errgo.Notef(err, "unmarshal copy") - } - return nil -} - -// copyOnto copies the source onto the target, -// preserving any of the source that is not present -// in the target. -func copyOnto(target, source map[interface{}]interface{}, isRoot bool) { - for key, val := range source { - if key == "inherits" && isRoot { - continue - } - switch val := val.(type) { - case map[interface{}]interface{}: - if targetVal, ok := target[key].(map[interface{}]interface{}); ok { - copyOnto(targetVal, val, false) - } else { - target[key] = val - } - default: - target[key] = val - } - } -} === removed file 'src/gopkg.in/juju/charm.v5/migratebundle/migrate_test.go' --- src/gopkg.in/juju/charm.v5/migratebundle/migrate_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/migratebundle/migrate_test.go 1970-01-01 00:00:00 +0000 @@ -1,873 +0,0 @@ -package migratebundle - -import ( - "bufio" - "compress/gzip" - "encoding/json" - "flag" - "fmt" - "io" - "log" - "os" - "strings" - "sync" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/errgo.v1" - "gopkg.in/yaml.v1" - - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/charmrepo" -) - -var _ = gc.Suite(&migrateSuite{}) - -type migrateSuite struct{} - -// The charm data cache caches results from -// fetching charms from the charm store. -// If the update-charms flag is specified, the -// contents of charmDataCache is written to -// allcharms.json.gz; otherwise the contents -// of allcharms.json are read and the charm -// store is not touched. -// -var ( - charmDataCacheMutex sync.Mutex - charmDataCache = make(map[string]*charmData) -) - -var updateCharms = flag.Bool("update-charms", false, "fetch and update local charms for test bundles") - -const charmCacheFile = "allcharms.json.gz" - -func (*migrateSuite) SetUpSuite(c *gc.C) { - if *updateCharms { - charmrepo.CacheDir = c.MkDir() - return - } - f, err := os.Open(charmCacheFile) - if err != nil { - c.Logf("cannot open charms data: %v", err) - return - } - defer f.Close() - gzr, err := gzip.NewReader(f) - c.Assert(err, gc.IsNil) - dec := json.NewDecoder(gzr) - err = dec.Decode(&charmDataCache) - c.Assert(err, gc.IsNil) -} - -func (*migrateSuite) TearDownSuite(c *gc.C) { - if !*updateCharms { - return - } - data, err := json.Marshal(charmDataCache) - c.Assert(err, gc.IsNil) - f, err := os.Create(charmCacheFile) - c.Assert(err, gc.IsNil) - defer f.Close() - gzw := gzip.NewWriter(f) - defer gzw.Close() - _, err = gzw.Write(data) - c.Assert(err, gc.IsNil) -} - -var migrateTests = []struct { - about string - bundles string - expect map[string]*charm.BundleData - expectError string -}{{ - about: "single bundle, no relations cs:~jorge/bundle/wordpress", - bundles: ` - |wordpress-simple: - | series: precise - | tags: ["foo", "bar"] - | services: - | wordpress: - | charm: "cs:precise/wordpress-20" - | num_units: 1 - | options: - | debug: "no" - | engine: nginx - | tuning: single - | "wp-content": "" - | annotations: - | "gui-x": 529 - | "gui-y": -97 - | mysql: - | charm: "cs:precise/mysql-28" - | num_units: 2 - | options: - | "binlog-format": MIXED - | "block-size": 5 - | "dataset-size": "80%" - | flavor: distro - | "query-cache-size": -1 - | "query-cache-type": "OFF" - | vip_iface: eth0 - | annotations: - | "gui-x": 530 - | "gui-y": 185 - |`, - expect: map[string]*charm.BundleData{ - "wordpress-simple": { - Series: "precise", - Tags: []string{"foo", "bar"}, - Services: map[string]*charm.ServiceSpec{ - "wordpress": { - Charm: "cs:precise/wordpress-20", - NumUnits: 1, - Options: map[string]interface{}{ - "debug": "no", - "engine": "nginx", - "tuning": "single", - "wp-content": "", - }, - Annotations: map[string]string{ - "gui-x": "529", - "gui-y": "-97", - }, - }, - "mysql": { - Charm: "cs:precise/mysql-28", - NumUnits: 2, - Options: map[string]interface{}{ - "binlog-format": "MIXED", - "block-size": 5, - "dataset-size": "80%", - "flavor": "distro", - "query-cache-size": -1, - "query-cache-type": "OFF", - "vip_iface": "eth0", - }, - Annotations: map[string]string{ - "gui-x": "530", - "gui-y": "185", - }, - }, - }, - }, - }, -}, { - about: "missing num_units interpreted as single unit", - bundles: ` - |wordpress-simple: - | services: - | wordpress: - | charm: "cs:precise/wordpress-20" - |`, - expect: map[string]*charm.BundleData{ - "wordpress-simple": { - Services: map[string]*charm.ServiceSpec{ - "wordpress": { - Charm: "cs:precise/wordpress-20", - NumUnits: 1, - }, - }, - }, - }, -}, { - about: "missing charm taken from service name", - bundles: ` - |wordpress-simple: - | services: - | wordpress: - |`, - expect: map[string]*charm.BundleData{ - "wordpress-simple": { - Services: map[string]*charm.ServiceSpec{ - "wordpress": { - Charm: "wordpress", - NumUnits: 1, - }, - }, - }, - }, -}, { - about: "services with placement directives", - bundles: ` - |wordpress: - | services: - | wordpress1: - | num_units: 1 - | to: 0 - | wordpress2: - | num_units: 1 - | to: kvm:0 - | wordpress3: - | num_units: 1 - | to: mysql - | wordpress4: - | num_units: 1 - | to: kvm:mysql - | mysql: - | num_units: 1 - |`, - expect: map[string]*charm.BundleData{ - "wordpress": { - Services: map[string]*charm.ServiceSpec{ - "wordpress1": { - Charm: "wordpress1", - NumUnits: 1, - To: []string{"0"}, - }, - "wordpress2": { - Charm: "wordpress2", - NumUnits: 1, - To: []string{"kvm:0"}, - }, - "wordpress3": { - Charm: "wordpress3", - NumUnits: 1, - To: []string{"mysql"}, - }, - "wordpress4": { - Charm: "wordpress4", - NumUnits: 1, - To: []string{"kvm:mysql"}, - }, - "mysql": { - Charm: "mysql", - NumUnits: 1, - }, - }, - Machines: map[string]*charm.MachineSpec{ - "0": {}, - }, - }, - }, -}, { - about: "service with single indirect placement directive", - bundles: ` - |wordpress: - | services: - | wordpress: - | to: kvm:0 - |`, - expect: map[string]*charm.BundleData{ - "wordpress": { - Services: map[string]*charm.ServiceSpec{ - "wordpress": { - Charm: "wordpress", - NumUnits: 1, - To: []string{"kvm:0"}, - }, - }, - Machines: map[string]*charm.MachineSpec{ - "0": {}, - }, - }, - }, -}, { - about: "service with invalid placement directive", - bundles: ` - |wordpress: - | services: - | wordpress: - | to: kvm::0 - |`, - expectError: `bundle migration failed for "wordpress": cannot parse 'to' placment clause "kvm::0": invalid placement syntax "kvm::0"`, -}, { - about: "service with inheritance", - bundles: ` - |wordpress: - | inherits: base - | services: - | wordpress: - | charm: precise/wordpress - | annotations: - | foo: yes - | base: arble - |base: - | services: - | logging: - | charm: precise/logging - | wordpress: - | annotations: - | foo: bar - | base: arble - |`, - expect: map[string]*charm.BundleData{ - "wordpress": { - Services: map[string]*charm.ServiceSpec{ - "wordpress": { - Charm: "precise/wordpress", - NumUnits: 1, - Annotations: map[string]string{ - "foo": "yes", - "base": "arble", - }, - }, - "logging": { - Charm: "precise/logging", - NumUnits: 1, - }, - }, - }, - "base": { - Services: map[string]*charm.ServiceSpec{ - "logging": { - Charm: "precise/logging", - NumUnits: 1, - }, - "wordpress": { - Charm: "wordpress", - NumUnits: 1, - Annotations: map[string]string{ - "foo": "bar", - "base": "arble", - }, - }, - }, - }, - }, -}, { - about: "open relations", - bundles: ` - |wordpress: - | services: - | wordpress: - | charm: precise/wordpress - | mysql: - | charm: precise/mysql - | logging: - | charm: precise/logging - | monitoring: - | charm: precise/monitor - | relations: - | - [wordpress, mysql] - | - [logging, [mysql, wordpress]] - | - [monitoring, wordpress] - |`, - expect: map[string]*charm.BundleData{ - "wordpress": { - Services: map[string]*charm.ServiceSpec{ - "wordpress": { - Charm: "precise/wordpress", - NumUnits: 1, - }, - "mysql": { - Charm: "precise/mysql", - NumUnits: 1, - }, - "logging": { - Charm: "precise/logging", - NumUnits: 1, - }, - "monitoring": { - Charm: "precise/monitor", - NumUnits: 1, - }, - }, - Relations: [][]string{ - {"wordpress", "mysql"}, - {"logging", "mysql"}, - {"logging", "wordpress"}, - {"monitoring", "wordpress"}, - }, - }, - }, -}} - -func (*migrateSuite) TestMigrate(c *gc.C) { - for i, test := range migrateTests { - c.Logf("test %d: %s", i, test.about) - result, err := Migrate(unbeautify(test.bundles), nil) - if test.expectError != "" { - c.Assert(err, gc.ErrorMatches, test.expectError) - } else { - c.Assert(err, gc.IsNil) - c.Assert(result, jc.DeepEquals, test.expect) - } - } -} - -func (*migrateSuite) TestMigrateAll(c *gc.C) { - c.ExpectFailure("all bundles do not migrate successfully") - passed, total := 0, 0 - doAllBundles(c, func(c *gc.C, id string, data []byte) { - c.Logf("\nmigrate test %s", id) - ok := true - bundles, err := Migrate(data, nil) - if err != nil { - c.Logf("cannot migrate: %v", err) - ok = false - } - for _, bundle := range bundles { - ok = checkBundleData(c, bundle) && ok - } - if ok { - passed++ - } - total++ - }) - c.Logf("%d/%d passed", passed, total) - c.Check(passed, gc.Equals, total) -} - -func checkBundleData(c *gc.C, bd *charm.BundleData) bool { - charms := make(map[string]charm.Charm) - ok := true - for _, svc := range bd.Services { - id, err := charm.ParseReference(svc.Charm) - if err != nil { - ok = false - c.Logf("cannot parse %q: %v", svc.Charm, err) - continue - } - if id.Series == "" { - id.Series = bd.Series - } - ch, err := getCharm(id) - if err != nil { - ok = false - c.Logf("cannot find %q: %v", id, err) - continue - } - charms[svc.Charm] = ch - } - if ok { - if err := bd.VerifyWithCharms(nil, charms); err != nil { - for _, err := range err.(*charm.VerificationError).Errors { - c.Logf("verification error: %v", err) - } - ok = false - } - } - return ok -} - -var inheritTests = []struct { - about string - bundle string - base string - baseName string - expect string - expectError string -}{{ - about: "inherited-from not found", - bundle: `inherits: non-existent`, - expectError: `inherited-from bundle "non-existent" not found`, -}, { - about: "bad inheritance #1", - bundle: `inherits: 200`, - expectError: `bad inherits clause 200`, -}, { - about: "bad inheritance #2", - bundle: `inherits: [10]`, - expectError: `bad inherits clause .*`, -}, { - about: "bad inheritance #3", - bundle: `inherits: ['a', 'b']`, - expectError: `bad inherits clause .*`, -}, { - about: "inherit everything", - bundle: ` - |inherits: base - `, - baseName: "base", - base: ` - |series: precise - |services: - | wordpress: - | charm: 'cs:precise/wordpress' - `, - expect: ` - |series: precise - |services: - | wordpress: - | charm: 'cs:precise/wordpress' - `, -}, { - about: "inherit everything, specified as list", - bundle: ` - |inherits: [base] - `, - baseName: "base", - base: ` - |series: precise - |services: - | wordpress: - | charm: 'cs:precise/wordpress' - `, - expect: ` - |series: precise - |services: - | wordpress: - | charm: 'cs:precise/wordpress' - `, -}, { - about: "different base name", - bundle: ` - |inherits: something - `, - baseName: "something", - base: ` - |series: precise - |services: - | wordpress: - | charm: 'cs:precise/wordpress' - `, - expect: ` - |series: precise - |services: - | wordpress: - | charm: 'cs:precise/wordpress' - `, -}, { - about: "override series", - bundle: ` - |inherits: base - |series: trusty - `, - baseName: "base", - base: ` - |series: precise - |services: - | wordpress: - | charm: 'cs:precise/wordpress' - `, - expect: ` - |series: trusty - |services: - | wordpress: - | charm: 'cs:precise/wordpress' - `, -}, { - about: "override wordpress charm", - bundle: ` - |inherits: base - |services: - | wordpress: - | charm: 'cs:quantal/different' - `, - baseName: "base", - base: ` - |series: precise - |services: - | wordpress: - | charm: "cs:precise/wordpress" - | options: - | foo: bar - `, - expect: ` - |series: precise - |services: - | wordpress: - | charm: "cs:quantal/different" - | options: - | foo: bar - `, -}, { - about: "override to clause", - bundle: ` - |inherits: base - |services: - | wordpress: - | to: 0 - `, - baseName: "base", - base: ` - |series: precise - |services: - | wordpress: - | charm: 'cs:precise/wordpress' - | options: - | foo: bar - `, - expect: ` - |series: precise - |services: - | wordpress: - | charm: 'cs:precise/wordpress' - | options: - | foo: bar - | to: 0 - `, -}, { - about: "deep inheritance", - bundle: ` - |inherits: base - `, - baseName: "base", - base: ` - |inherits: "other" - `, - expectError: `only a single level of inheritance is supported`, -}} - -var otherBundle = parseBundle(` - |series: quantal - |overrides: - | something: other -`) - -func (*migrateSuite) TestInherit(c *gc.C) { - for i, test := range inheritTests { - c.Logf("test %d: %s", i, test.about) - bundle := parseBundle(test.bundle) - base := parseBundle(test.base) - expect := parseBundle(test.expect) - // Add another bundle so we know that is - bundles := map[string]*legacyBundle{ - test.baseName: base, - "other": otherBundle, - } - b, err := inherit(bundle, bundles) - if test.expectError != "" { - c.Check(err, gc.ErrorMatches, test.expectError) - } else { - c.Assert(err, gc.IsNil) - c.Assert(b, jc.DeepEquals, expect) - } - } -} - -func (s *migrateSuite) TestNoNameClashes(c *gc.C) { - nameCounts := make(map[string]int) - doAllBundles(c, func(c *gc.C, id string, data []byte) { - nameCounts[id]++ - }) - // There are actually two name clashes in the real - // in-the-wild bundles: - // cs:~charmers/bundle/mediawiki-scalable - // cs:~charmers/bundle/mongodb-cluster - // Both of these actually fit with our proposed scheme, - // because they're (almost) identical with the bundles - // within mediawiki and mongodb respectively. - // - // So we discount them from our example bundles. - delete(nameCounts, "cs:~charmers/bundle/mongodb-cluster") - delete(nameCounts, "cs:~charmers/bundle/mediawiki-scalable") - - doAllBundles(c, func(c *gc.C, id string, data []byte) { - var bundles map[string]*legacyBundle - err := yaml.Unmarshal(data, &bundles) - c.Assert(err, gc.IsNil) - if len(bundles) == 1 { - return - } - for name := range bundles { - subId := id + "-" + name - nameCounts[subId]++ - } - }) - for name, count := range nameCounts { - if count != 1 { - c.Errorf("%d clashes at %s", count-1, name) - } - } -} - -func (s *migrateSuite) TestReversible(c *gc.C) { - doAllBundles(c, s.testReversible) -} - -func (*migrateSuite) testReversible(c *gc.C, id string, data []byte) { - var bundles map[string]*legacyBundle - err := yaml.Unmarshal(data, &bundles) - c.Assert(err, gc.IsNil) - for _, b := range bundles { - if len(b.Relations) == 0 { - b.Relations = nil - } - } - var allInterface interface{} - err = yaml.Unmarshal(data, &allInterface) - c.Assert(err, gc.IsNil) - all, ok := allInterface.(map[interface{}]interface{}) - c.Assert(ok, gc.Equals, true) - for _, b := range all { - b := ymap(b) - // Remove empty relations line. - if rels, ok := b["relations"].([]interface{}); ok && len(rels) == 0 { - delete(b, "relations") - } - // Convert all annotation values and "to" values - // to strings. - // Strictly speaking this means that the bundles - // are non-reversible, but juju converts annotations - // to string anyway, so it doesn't matter. - for _, svc := range ymap(b["services"]) { - svc := ymap(svc) - annot := ymap(svc["annotations"]) - for key, val := range annot { - if _, ok := val.(string); !ok { - annot[key] = fmt.Sprint(val) - } - } - if to, ok := svc["to"]; ok { - svc["to"] = fmt.Sprint(to) - } - } - - } - data1, err := yaml.Marshal(bundles) - c.Assert(err, gc.IsNil) - var all1 interface{} - err = yaml.Unmarshal(data1, &all1) - c.Assert(err, gc.IsNil) - c.Assert(all1, jc.DeepEquals, all) -} - -// ymap returns the default form of a map -// when unmarshaled by YAML. -func ymap(v interface{}) map[interface{}]interface{} { - if v == nil { - return nil - } - return v.(map[interface{}]interface{}) -} - -// doAllBundles calls the given function for each bundle -// in all the available test bundles. -func doAllBundles(c *gc.C, f func(c *gc.C, id string, data []byte)) { - a := openAllBundles() - defer a.Close() - for { - title, data, err := a.readSection() - if len(data) > 0 { - f(c, title, data) - } - if err != nil { - c.Assert(errgo.Cause(err), gc.Equals, io.EOF) - break - } - } -} - -type allBundles struct { - file *os.File - r *bufio.Reader -} - -func openAllBundles() *allBundles { - f, err := os.Open("allbundles.txt.gz") - if err != nil { - log.Fatal(err) - } - gzr, err := gzip.NewReader(f) - if err != nil { - log.Fatal(err) - } - r := bufio.NewReader(gzr) - return &allBundles{ - file: f, - r: r, - } -} - -func (a *allBundles) Close() error { - return a.file.Close() -} - -// sectionMarker delimits a section in the bundles file. -// Note that no bundles contain non-ASCII characters -// so the first byte of this string is a sufficient -// sentinel. -const sectionMarker = "¶ " - -func (a *allBundles) readSection() (title string, data []byte, err error) { - title, err = a.r.ReadString('\n') - if err != nil { - return "", nil, err - } - if !strings.HasPrefix(title, sectionMarker) || !strings.HasSuffix(title, "\n") { - return "", nil, fmt.Errorf("invalid title line %q", title) - } - title = strings.TrimPrefix(title, sectionMarker) - title = strings.TrimSuffix(title, "\n") - for { - c, err := a.r.ReadByte() - switch { - case err == io.EOF: - return title, data, nil - case err != nil: - return "", nil, err - case c == sectionMarker[0]: - a.r.UnreadByte() - return title, data, nil - } - data = append(data, c) - } -} - -func parseBundle(s string) *legacyBundle { - var b *legacyBundle - err := yaml.Unmarshal(unbeautify(s), &b) - if err != nil { - panic(fmt.Errorf("cannot unmarshal %q: %v", s, err)) - } - return b -} - -// indentReplacer deletes tabs and | beautifier characters. -var indentReplacer = strings.NewReplacer("\t", "", "|", "") - -// unbeautify strip the tabs and | characters that -// we use to make the tests look nicer. -func unbeautify(s string) []byte { - return []byte(indentReplacer.Replace(s)) -} - -func noCharms(id *charm.Reference) (*charm.Meta, error) { - return nil, fmt.Errorf("charm %q not found", id) -} - -func getCharm(id *charm.Reference) (charm.Charm, error) { - url, err := id.URL("") - if err != nil { - return nil, fmt.Errorf("cannot make URL from %q: %v", id, err) - } - charmDataCacheMutex.Lock() - defer charmDataCacheMutex.Unlock() - if m, ok := charmDataCache[url.String()]; ok || !*updateCharms { - if m == nil { - return nil, fmt.Errorf("charm %q not found in cache", id) - } - return m, nil - } - log.Printf("getting %s", url) - ch, err := charmrepo.LegacyStore.Get(url) - if err != nil { - charmDataCache[url.String()] = nil - return nil, err - } - chData := &charmData{ - Meta_: ch.Meta(), - Config_: ch.Config(), - Metrics_: ch.Metrics(), - } - charmDataCache[url.String()] = chData - return chData, nil -} - -type charmData struct { - Meta_ *charm.Meta `json:"Meta"` - Config_ *charm.Config `json:"Config"` - Metrics_ *charm.Metrics `json:"Metrics"` -} - -func (c *charmData) Meta() *charm.Meta { - return c.Meta_ -} - -func (c *charmData) Metrics() *charm.Metrics { - return c.Metrics_ -} - -func (c *charmData) Config() *charm.Config { - return c.Config_ -} - -func (c *charmData) Actions() *charm.Actions { - return nil -} - -func (c *charmData) Revision() int { - return 0 -} === removed file 'src/gopkg.in/juju/charm.v5/migratebundle/package_test.go' --- src/gopkg.in/juju/charm.v5/migratebundle/package_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/migratebundle/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,11 +0,0 @@ -package migratebundle - -import ( - "testing" - - gc "gopkg.in/check.v1" -) - -func TestPackage(t *testing.T) { - gc.TestingT(t) -} === removed file 'src/gopkg.in/juju/charm.v5/payloads.go' --- src/gopkg.in/juju/charm.v5/payloads.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/juju/charm.v5/payloads.go 1970-01-01 00:00:00 +0000 @@ -1,67 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm - -import ( - "fmt" - - "github.com/juju/schema" -) - -var payloadClassSchema = schema.FieldMap( - schema.Fields{ - "type": schema.String(), - }, - schema.Defaults{}, -) - -// PayloadClass holds the information about a payload class, as stored -// in a charm's metadata. -type PayloadClass struct { - // Name identifies the payload class. - Name string - - // Type identifies the type of payload (e.g. kvm, docker). - Type string -} - -func parsePayloadClasses(data interface{}) map[string]PayloadClass { - if data == nil { - return nil - } - - result := make(map[string]PayloadClass) - for name, val := range data.(map[string]interface{}) { - result[name] = parsePayloadClass(name, val) - } - - return result -} - -func parsePayloadClass(name string, data interface{}) PayloadClass { - payloadClass := PayloadClass{ - Name: name, - } - if data == nil { - return payloadClass - } - pcMap := data.(map[string]interface{}) - - if val := pcMap["type"]; val != nil { - payloadClass.Type = val.(string) - } - - return payloadClass -} - -// Validate checks the payload class to ensure its data is valid. -func (pc PayloadClass) Validate() error { - if pc.Name == "" { - return fmt.Errorf("payload class missing name") - } - if pc.Type == "" { - return fmt.Errorf("payload class missing type") - } - return nil -} === removed file 'src/gopkg.in/juju/charm.v5/payloads_test.go' --- src/gopkg.in/juju/charm.v5/payloads_test.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/juju/charm.v5/payloads_test.go 1970-01-01 00:00:00 +0000 @@ -1,86 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charm.v5" -) - -var _ = gc.Suite(&payloadClassSuite{}) - -type payloadClassSuite struct{} - -func (s *payloadClassSuite) TestParsePayloadClassOkay(c *gc.C) { - name := "my-payload" - data := map[string]interface{}{ - "type": "docker", - } - payloadClass := charm.ParsePayloadClass(name, data) - - c.Check(payloadClass, jc.DeepEquals, charm.PayloadClass{ - Name: "my-payload", - Type: "docker", - }) -} - -func (s *payloadClassSuite) TestParsePayloadClassMissingName(c *gc.C) { - name := "" - data := map[string]interface{}{ - "type": "docker", - } - payloadClass := charm.ParsePayloadClass(name, data) - - c.Check(payloadClass, jc.DeepEquals, charm.PayloadClass{ - Name: "", - Type: "docker", - }) -} - -func (s *payloadClassSuite) TestParsePayloadClassEmpty(c *gc.C) { - name := "my-payload" - var data map[string]interface{} - payloadClass := charm.ParsePayloadClass(name, data) - - c.Check(payloadClass, jc.DeepEquals, charm.PayloadClass{ - Name: "my-payload", - }) -} - -func (s *payloadClassSuite) TestValidateFull(c *gc.C) { - payloadClass := charm.PayloadClass{ - Name: "my-payload", - Type: "docker", - } - err := payloadClass.Validate() - - c.Check(err, jc.ErrorIsNil) -} - -func (s *payloadClassSuite) TestValidateZeroValue(c *gc.C) { - var payloadClass charm.PayloadClass - err := payloadClass.Validate() - - c.Check(err, gc.NotNil) -} - -func (s *payloadClassSuite) TestValidateMissingName(c *gc.C) { - payloadClass := charm.PayloadClass{ - Type: "docker", - } - err := payloadClass.Validate() - - c.Check(err, gc.ErrorMatches, `payload class missing name`) -} - -func (s *payloadClassSuite) TestValidateMissingType(c *gc.C) { - payloadClass := charm.PayloadClass{ - Name: "my-payload", - } - err := payloadClass.Validate() - - c.Check(err, gc.ErrorMatches, `payload class missing type`) -} === removed directory 'src/gopkg.in/juju/charm.v5/testing' === removed file 'src/gopkg.in/juju/charm.v5/testing/charm.go' --- src/gopkg.in/juju/charm.v5/testing/charm.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/testing/charm.go 1970-01-01 00:00:00 +0000 @@ -1,298 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package testing - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "sync" - - "github.com/juju/utils/fs" - - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/charmrepo" -) - -func check(err error) { - if err != nil { - panic(err) - } -} - -// NewRepo returns a new testing charm repository rooted at the given -// path, relative to the package directory of the calling package, using -// defaultSeries as the default series. -func NewRepo(path, defaultSeries string) *Repo { - // Find the repo directory. This is only OK to do - // because this is running in a test context - // so we know the source is available. - _, file, _, ok := runtime.Caller(1) - if !ok { - panic("cannot get caller") - } - r := &Repo{ - path: filepath.Join(filepath.Dir(file), path), - defaultSeries: defaultSeries, - } - _, err := os.Stat(r.path) - if err != nil { - panic(fmt.Errorf("cannot read repository found at %q: %v", r.path, err)) - } - return r -} - -// Repo represents a charm repository used for testing. -type Repo struct { - path string - defaultSeries string -} - -func (r *Repo) Path() string { - return r.path -} - -func clone(dst, src string) string { - dst = filepath.Join(dst, filepath.Base(src)) - check(fs.Copy(src, dst)) - return dst -} - -// BundleDirPath returns the path to a bundle directory with the given name in the -// default series -func (r *Repo) BundleDirPath(name string) string { - return filepath.Join(r.Path(), "bundle", name) -} - -// BundleDir returns the actual charm.BundleDir named name. -func (r *Repo) BundleDir(name string) *charm.BundleDir { - b, err := charm.ReadBundleDir(r.BundleDirPath(name)) - check(err) - return b -} - -// CharmDirPath returns the path to a charm directory with the given name in the -// default series -func (r *Repo) CharmDirPath(name string) string { - return filepath.Join(r.Path(), r.defaultSeries, name) -} - -// CharmDir returns the actual charm.CharmDir named name. -func (r *Repo) CharmDir(name string) *charm.CharmDir { - ch, err := charm.ReadCharmDir(r.CharmDirPath(name)) - check(err) - return ch -} - -// ClonedDirPath returns the path to a new copy of the default charm directory -// named name. -func (r *Repo) ClonedDirPath(dst, name string) string { - return clone(dst, r.CharmDirPath(name)) -} - -// ClonedDirPath returns the path to a new copy of the default bundle directory -// named name. -func (r *Repo) ClonedBundleDirPath(dst, name string) string { - return clone(dst, r.BundleDirPath(name)) -} - -// RenamedClonedDirPath returns the path to a new copy of the default -// charm directory named name, renamed to newName. -func (r *Repo) RenamedClonedDirPath(dst, name, newName string) string { - dstPath := filepath.Join(dst, newName) - err := fs.Copy(r.CharmDirPath(name), dstPath) - check(err) - return dstPath -} - -// ClonedDir returns an actual charm.CharmDir based on a new copy of the charm directory -// named name, in the directory dst. -func (r *Repo) ClonedDir(dst, name string) *charm.CharmDir { - ch, err := charm.ReadCharmDir(r.ClonedDirPath(dst, name)) - check(err) - return ch -} - -// ClonedURL makes a copy of the charm directory. It will create a directory -// with the series name if it does not exist, and then clone the charm named -// name into that directory. The return value is a URL pointing at the local -// charm. -func (r *Repo) ClonedURL(dst, series, name string) *charm.URL { - dst = filepath.Join(dst, series) - if err := os.MkdirAll(dst, os.FileMode(0777)); err != nil { - panic(fmt.Errorf("cannot make destination directory: %v", err)) - } - clone(dst, r.CharmDirPath(name)) - return &charm.URL{ - Schema: "local", - Name: name, - Revision: -1, - Series: series, - } -} - -// CharmArchivePath returns the path to a new charm archive file -// in the directory dst, created from the charm directory named name. -func (r *Repo) CharmArchivePath(dst, name string) string { - dir := r.CharmDir(name) - path := filepath.Join(dst, "archive.charm") - file, err := os.Create(path) - check(err) - defer file.Close() - check(dir.ArchiveTo(file)) - return path -} - -// BundleArchivePath returns the path to a new bundle archive file -// in the directory dst, created from the bundle directory named name. -func (r *Repo) BundleArchivePath(dst, name string) string { - dir := r.BundleDir(name) - path := filepath.Join(dst, "archive.bundle") - file, err := os.Create(path) - check(err) - defer file.Close() - check(dir.ArchiveTo(file)) - return path -} - -// CharmArchive returns an actual charm.CharmArchive created from a new -// charm archive file created from the charm directory named name, in -// the directory dst. -func (r *Repo) CharmArchive(dst, name string) *charm.CharmArchive { - ch, err := charm.ReadCharmArchive(r.CharmArchivePath(dst, name)) - check(err) - return ch -} - -// MockCharmStore implements charm/charmrepo.Interface and is used to isolate -// tests that would otherwise need to hit the real charm store. -type MockCharmStore struct { - charms map[string]map[int]*charm.CharmArchive - - mu sync.Mutex // protects the following fields - authAttrs string - testMode bool - defaultSeries string -} - -func NewMockCharmStore() *MockCharmStore { - return &MockCharmStore{charms: map[string]map[int]*charm.CharmArchive{}} -} - -// SetAuthAttrs overwrites the value returned by AuthAttrs -func (s *MockCharmStore) SetAuthAttrs(auth string) { - s.mu.Lock() - defer s.mu.Unlock() - s.authAttrs = auth -} - -// AuthAttrs returns the AuthAttrs for this charm store. -func (s *MockCharmStore) AuthAttrs() string { - s.mu.Lock() - defer s.mu.Unlock() - return s.authAttrs -} - -// WithTestMode returns a repository Interface where testMode is set to value -// passed to this method. -func (s *MockCharmStore) WithTestMode(testMode bool) charmrepo.Interface { - s.mu.Lock() - defer s.mu.Unlock() - s.testMode = testMode - return s -} - -// TestMode returns the test mode setting of this charm store. -func (s *MockCharmStore) TestMode() bool { - s.mu.Lock() - defer s.mu.Unlock() - return s.testMode -} - -// SetDefaultSeries overwrites the default series for this charm store. -func (s *MockCharmStore) SetDefaultSeries(series string) { - s.mu.Lock() - defer s.mu.Unlock() - s.defaultSeries = series -} - -// DefaultSeries returns the default series for this charm store. -func (s *MockCharmStore) DefaultSeries() string { - s.mu.Lock() - defer s.mu.Unlock() - return s.defaultSeries -} - -// Resolve implements charm/charmrepo.Interface.Resolve. -func (s *MockCharmStore) Resolve(ref *charm.Reference) (*charm.URL, error) { - return ref.URL(s.DefaultSeries()) -} - -// SetCharm adds and removes charms in s. The affected charm is identified by -// charmURL, which must be revisioned. If archive is nil, the charm will be -// removed; otherwise, it will be stored. It is an error to store a archive -// under a charmURL that does not share its name and revision. -func (s *MockCharmStore) SetCharm(charmURL *charm.URL, archive *charm.CharmArchive) error { - base := charmURL.WithRevision(-1).String() - if charmURL.Revision < 0 { - return fmt.Errorf("bad charm url revision") - } - if archive == nil { - delete(s.charms[base], charmURL.Revision) - return nil - } - archiveRev := archive.Revision() - archiveName := archive.Meta().Name - if archiveName != charmURL.Name || archiveRev != charmURL.Revision { - return fmt.Errorf("charm url %s mismatch with archive %s-%d", charmURL, archiveName, archiveRev) - } - if _, found := s.charms[base]; !found { - s.charms[base] = map[int]*charm.CharmArchive{} - } - s.charms[base][charmURL.Revision] = archive - return nil -} - -// interpret extracts from charmURL information relevant to both Latest and -// Get. The returned "base" is always the string representation of the -// unrevisioned part of charmURL; the "rev" wil be taken from the charmURL if -// available, and will otherwise be the revision of the latest charm in the -// store with the same "base". -func (s *MockCharmStore) interpret(charmURL *charm.URL) (base string, rev int) { - base, rev = charmURL.WithRevision(-1).String(), charmURL.Revision - if rev == -1 { - for candidate := range s.charms[base] { - if candidate > rev { - rev = candidate - } - } - } - return -} - -// Get implements charm/charmrepo.Interface.Get. -func (s *MockCharmStore) Get(charmURL *charm.URL) (charm.Charm, error) { - base, rev := s.interpret(charmURL) - charm, found := s.charms[base][rev] - if !found { - return nil, fmt.Errorf("charm not found in mock store: %s", charmURL) - } - return charm, nil -} - -// Latest implements charm/charmrepo.Interface.Latest. -func (s *MockCharmStore) Latest(charmURLs ...*charm.URL) ([]charmrepo.CharmRevision, error) { - result := make([]charmrepo.CharmRevision, len(charmURLs)) - for i, curl := range charmURLs { - charmURL := curl.WithRevision(-1) - base, rev := s.interpret(charmURL) - if _, found := s.charms[base][rev]; !found { - result[i].Err = fmt.Errorf("charm not found in mock store: %s", charmURL) - } else { - result[i].Revision = rev - } - } - return result, nil -} === removed file 'src/gopkg.in/juju/charm.v5/testing/mockstore.go' --- src/gopkg.in/juju/charm.v5/testing/mockstore.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/testing/mockstore.go 1970-01-01 00:00:00 +0000 @@ -1,213 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package testing - -import ( - "bytes" - "encoding/json" - "io" - "net" - "net/http" - "os" - "strconv" - "strings" - - "github.com/juju/loggo" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/charmrepo" -) - -var logger = loggo.GetLogger("juju.charm.testing.mockstore") - -// MockStore provides a mock charm store implementation useful when testing. -type MockStore struct { - mux *http.ServeMux - listener net.Listener - archiveBytes []byte - archiveSha256 string - Downloads []*charm.URL - DownloadsNoStats []*charm.URL - Authorizations []string - Metadata []string - InfoRequestCount int - InfoRequestCountNoStats int - DefaultSeries string - - charms map[string]int -} - -// NewMockStore creates a mock charm store containing the specified charms. -func NewMockStore(c *gc.C, repo *Repo, charms map[string]int) *MockStore { - s := &MockStore{charms: charms, DefaultSeries: "precise"} - f, err := os.Open(repo.CharmArchivePath(c.MkDir(), "dummy")) - c.Assert(err, gc.IsNil) - defer f.Close() - buf := &bytes.Buffer{} - s.archiveSha256, _, err = utils.ReadSHA256(io.TeeReader(f, buf)) - c.Assert(err, gc.IsNil) - s.archiveBytes = buf.Bytes() - c.Assert(err, gc.IsNil) - s.mux = http.NewServeMux() - s.mux.HandleFunc("/charm-info", s.serveInfo) - s.mux.HandleFunc("/charm-event", s.serveEvent) - s.mux.HandleFunc("/charm/", s.serveCharm) - lis, err := net.Listen("tcp", "127.0.0.1:0") - c.Assert(err, gc.IsNil) - s.listener = lis - go http.Serve(s.listener, s) - return s -} - -// Close closes the mock store's socket. -func (s *MockStore) Close() { - s.listener.Close() -} - -// Address returns the URL used to make requests to the mock store. -func (s *MockStore) Address() string { - return "http://" + s.listener.Addr().String() -} - -// UpdateStoreRevision sets the revision of the specified charm to rev. -func (s *MockStore) UpdateStoreRevision(ch string, rev int) { - s.charms[ch] = rev -} - -// ServeHTTP implements http.ServeHTTP -func (s *MockStore) ServeHTTP(w http.ResponseWriter, r *http.Request) { - s.mux.ServeHTTP(w, r) -} - -func (s *MockStore) serveInfo(w http.ResponseWriter, r *http.Request) { - if metadata := r.Header.Get("Juju-Metadata"); metadata != "" { - s.Metadata = append(s.Metadata, metadata) - logger.Infof("Juju metadata: " + metadata) - } - - r.ParseForm() - if r.Form.Get("stats") == "0" { - s.InfoRequestCountNoStats += 1 - } else { - s.InfoRequestCount += 1 - } - - response := map[string]*charmrepo.InfoResponse{} - for _, url := range r.Form["charms"] { - cr := &charmrepo.InfoResponse{} - response[url] = cr - charmURL, err := charm.ParseURL(url) - if err == charm.ErrUnresolvedUrl { - ref, err := charm.ParseReference(url) - if err != nil { - panic(err) - } - charmURL, err = ref.URL(s.DefaultSeries) - if err != nil { - panic(err) - } - } - switch charmURL.Name { - case "borken": - cr.Errors = append(cr.Errors, "badness") - case "terracotta": - cr.Errors = append(cr.Errors, "cannot get revision") - case "unwise": - cr.Warnings = append(cr.Warnings, "foolishness") - fallthrough - default: - if rev, ok := s.charms[charmURL.WithRevision(-1).String()]; ok { - if charmURL.Revision == -1 { - cr.Revision = rev - } else { - cr.Revision = charmURL.Revision - } - cr.Sha256 = s.archiveSha256 - cr.CanonicalURL = charmURL.String() - } else { - cr.Errors = append(cr.Errors, "entry not found") - } - } - } - data, err := json.Marshal(response) - if err != nil { - panic(err) - } - w.Header().Set("Content-Type", "application/json") - _, err = w.Write(data) - if err != nil { - panic(err) - } -} - -func (s *MockStore) serveEvent(w http.ResponseWriter, r *http.Request) { - r.ParseForm() - response := map[string]*charmrepo.EventResponse{} - for _, url := range r.Form["charms"] { - digest := "" - if i := strings.Index(url, "@"); i >= 0 { - digest = url[i+1:] - url = url[:i] - } - er := &charmrepo.EventResponse{} - response[url] = er - if digest != "" && digest != "the-digest" { - er.Kind = "not-found" - er.Errors = []string{"entry not found"} - continue - } - charmURL := charm.MustParseURL(url) - switch charmURL.Name { - case "borken": - er.Kind = "publish-error" - er.Errors = append(er.Errors, "badness") - case "unwise": - er.Warnings = append(er.Warnings, "foolishness") - fallthrough - default: - if rev, ok := s.charms[charmURL.WithRevision(-1).String()]; ok { - er.Kind = "published" - er.Revision = rev - er.Digest = "the-digest" - } else { - er.Kind = "not-found" - er.Errors = []string{"entry not found"} - } - } - } - data, err := json.Marshal(response) - if err != nil { - panic(err) - } - w.Header().Set("Content-Type", "application/json") - _, err = w.Write(data) - if err != nil { - panic(err) - } -} - -func (s *MockStore) serveCharm(w http.ResponseWriter, r *http.Request) { - charmURL := charm.MustParseURL("cs:" + r.URL.Path[len("/charm/"):]) - - r.ParseForm() - if r.Form.Get("stats") == "0" { - s.DownloadsNoStats = append(s.DownloadsNoStats, charmURL) - } else { - s.Downloads = append(s.Downloads, charmURL) - } - - if auth := r.Header.Get("Authorization"); auth != "" { - s.Authorizations = append(s.Authorizations, auth) - } - - w.Header().Set("Connection", "close") - w.Header().Set("Content-Type", "application/octet-stream") - w.Header().Set("Content-Length", strconv.Itoa(len(s.archiveBytes))) - _, err := w.Write(s.archiveBytes) - if err != nil { - panic(err) - } -} === removed file 'src/gopkg.in/juju/charm.v5/testing/package_test.go' --- src/gopkg.in/juju/charm.v5/testing/package_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/testing/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,11 +0,0 @@ -package testing_test - -import ( - "testing" - - gc "gopkg.in/check.v1" -) - -func TestPackage(t *testing.T) { - gc.TestingT(t) -} === removed file 'src/gopkg.in/juju/charm.v5/testing/suite.go' --- src/gopkg.in/juju/charm.v5/testing/suite.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/testing/suite.go 1970-01-01 00:00:00 +0000 @@ -1,34 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package testing - -import ( - jujutesting "github.com/juju/testing" - gc "gopkg.in/check.v1" -) - -type IsolatedMgoSuite struct { - jujutesting.IsolationSuite - jujutesting.MgoSuite -} - -func (s *IsolatedMgoSuite) SetUpSuite(c *gc.C) { - s.IsolationSuite.SetUpSuite(c) - s.MgoSuite.SetUpSuite(c) -} - -func (s *IsolatedMgoSuite) TearDownSuite(c *gc.C) { - s.MgoSuite.TearDownSuite(c) - s.IsolationSuite.TearDownSuite(c) -} - -func (s *IsolatedMgoSuite) SetUpTest(c *gc.C) { - s.IsolationSuite.SetUpTest(c) - s.MgoSuite.SetUpTest(c) -} - -func (s *IsolatedMgoSuite) TearDownTest(c *gc.C) { - s.MgoSuite.TearDownTest(c) - s.IsolationSuite.TearDownTest(c) -} === removed file 'src/gopkg.in/juju/charm.v5/testing/testcharm.go' --- src/gopkg.in/juju/charm.v5/testing/testcharm.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/testing/testcharm.go 1970-01-01 00:00:00 +0000 @@ -1,227 +0,0 @@ -// Copyright 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package testing - -import ( - "archive/zip" - "bytes" - "fmt" - "os" - "path" - "strings" - "sync" - - "github.com/juju/testing/filetesting" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charm.v5" -) - -// Charm holds a charm for testing. It does not -// have a representation on disk by default, but -// can be written to disk using Archive and its ExpandTo -// method. It implements the charm.Charm interface. -// -// All methods on Charm may be called concurrently. -type Charm struct { - meta *charm.Meta - config *charm.Config - actions *charm.Actions - metrics *charm.Metrics - revision int - - files filetesting.Entries - - makeArchiveOnce sync.Once - archiveBytes []byte - archive *charm.CharmArchive -} - -// CharmSpec holds the specification for a charm. The fields -// hold data in YAML format. -type CharmSpec struct { - // Meta holds the contents of metadata.yaml. - Meta string - - // Config holds the contents of config.yaml. - Config string - - // Actions holds the contents of actions.yaml. - Actions string - - // Metrics holds the contents of metrics.yaml. - Metrics string - - // Files holds any additional files that should be - // added to the charm. If this is nil, a minimal set - // of files will be added to ensure the charm is readable. - Files []filetesting.Entry - - // Revision specifies the revision of the charm. - Revision int -} - -// NewCharm returns a new charm -func NewCharm(c *gc.C, spec CharmSpec) *Charm { - ch := &Charm{ - revision: spec.Revision, - } - var err error - ch.meta, err = charm.ReadMeta(strings.NewReader(spec.Meta)) - c.Assert(err, gc.IsNil) - ch.files = append(ch.files, filetesting.File{ - Path: "metadata.yaml", - Data: spec.Meta, - Perm: 0644, - }) - - if spec.Config != "" { - ch.config, err = charm.ReadConfig(strings.NewReader(spec.Config)) - c.Assert(err, gc.IsNil) - ch.files = append(ch.files, filetesting.File{ - Path: "config.yaml", - Data: spec.Config, - Perm: 0644, - }) - } - if spec.Actions != "" { - ch.actions, err = charm.ReadActionsYaml(strings.NewReader(spec.Actions)) - c.Assert(err, gc.IsNil) - ch.files = append(ch.files, filetesting.File{ - Path: "actions.yaml", - Data: spec.Actions, - Perm: 0644, - }) - } - if spec.Metrics != "" { - ch.metrics, err = charm.ReadMetrics(strings.NewReader(spec.Metrics)) - c.Assert(err, gc.IsNil) - ch.files = append(ch.files, filetesting.File{ - Path: "metrics.yaml", - Data: spec.Metrics, - Perm: 0644, - }) - } - if spec.Files == nil { - ch.files = append(ch.files, filetesting.File{ - Path: "hooks/install", - Data: "#!/bin/sh\n", - Perm: 0755, - }, filetesting.File{ - Path: "hooks/start", - Data: "#!/bin/sh\n", - Perm: 0755, - }) - } else { - ch.files = append(ch.files, spec.Files...) - // Check for duplicates. - names := make(map[string]bool) - for _, f := range ch.files { - name := path.Clean(f.GetPath()) - if names[name] { - panic(fmt.Errorf("duplicate file entry %q", f.GetPath())) - } - names[name] = true - } - } - return ch -} - -// Meta implements charm.Charm.Meta. -func (ch *Charm) Meta() *charm.Meta { - return ch.meta -} - -// Config implements charm.Charm.Config. -func (ch *Charm) Config() *charm.Config { - if ch.config == nil { - return &charm.Config{ - Options: map[string]charm.Option{}, - } - } - return ch.config -} - -// Metrics implements charm.Charm.Metrics. -func (ch *Charm) Metrics() *charm.Metrics { - return ch.metrics -} - -// Actions implements charm.Charm.Actions. -func (ch *Charm) Actions() *charm.Actions { - if ch.actions == nil { - return &charm.Actions{} - } - return ch.actions -} - -// Revision implements charm.Charm.Revision. -func (ch *Charm) Revision() int { - return ch.revision -} - -// Archive returns a charm archive holding the charm. -func (ch *Charm) Archive() *charm.CharmArchive { - ch.makeArchiveOnce.Do(ch.makeArchive) - return ch.archive -} - -// ArchiveBytes returns the contents of the charm archive -// holding the charm. -func (ch *Charm) ArchiveBytes() []byte { - ch.makeArchiveOnce.Do(ch.makeArchive) - return ch.archiveBytes -} - -func (ch *Charm) makeArchive() { - var buf bytes.Buffer - zw := zip.NewWriter(&buf) - - for _, f := range ch.files { - addZipEntry(zw, f) - } - if err := zw.Close(); err != nil { - panic(err) - } - // ReadCharmArchiveFromReader requires a ReaderAt, so make one. - r := bytes.NewReader(buf.Bytes()) - - // Actually make the charm archive. - archive, err := charm.ReadCharmArchiveFromReader(r, int64(buf.Len())) - if err != nil { - panic(err) - } - ch.archiveBytes = buf.Bytes() - ch.archive = archive - ch.archive.SetRevision(ch.revision) -} - -func addZipEntry(zw *zip.Writer, f filetesting.Entry) { - h := &zip.FileHeader{ - Name: f.GetPath(), - // Don't bother compressing - the contents are so small that - // it will just slow things down for no particular benefit. - Method: zip.Store, - } - contents := "" - switch f := f.(type) { - case filetesting.Dir: - h.SetMode(os.ModeDir | 0755) - case filetesting.File: - h.SetMode(f.Perm) - contents = f.Data - case filetesting.Symlink: - h.SetMode(os.ModeSymlink | 0777) - contents = f.Link - } - w, err := zw.CreateHeader(h) - if err != nil { - panic(err) - } - if contents != "" { - if _, err := w.Write([]byte(contents)); err != nil { - panic(err) - } - } -} === removed file 'src/gopkg.in/juju/charm.v5/testing/testcharm_test.go' --- src/gopkg.in/juju/charm.v5/testing/testcharm_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/testing/testcharm_test.go 1970-01-01 00:00:00 +0000 @@ -1,182 +0,0 @@ -package testing_test - -import ( - jc "github.com/juju/testing/checkers" - "github.com/juju/testing/filetesting" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/testing" -) - -var _ = gc.Suite(&testCharmSuite{}) - -type testCharmSuite struct{} - -var newCharmTests = []struct { - about string - spec testing.CharmSpec - expectMeta *charm.Meta - expectConfig *charm.Config - expectActions *charm.Actions - expectMetrics *charm.Metrics - expectFiles filetesting.Entries - expectRevision int -}{{ - about: "all charm populated without files", - spec: testing.CharmSpec{ - Meta: ` -name: mysql -summary: "Database engine" -description: "A pretty popular database" -provides: - server: mysql -`, - Config: ` -options: - blog-title: {default: My Title, description: Config description, type: string} -`, - Actions: ` -snapshot: - description: Take a snapshot of the database. - params: - outfile: - description: outfile description - type: string - default: foo.bz2 -`, - Metrics: ` -metrics: - pings: - type: gauge - description: Description of the metric. -`, - Revision: 99, - }, - expectMeta: &charm.Meta{ - Name: "mysql", - Format: 1, - Summary: "Database engine", - Description: "A pretty popular database", - Provides: map[string]charm.Relation{ - "server": { - Name: "server", - Role: charm.RoleProvider, - Interface: "mysql", - Scope: charm.ScopeGlobal, - }, - }, - }, - expectConfig: &charm.Config{ - Options: map[string]charm.Option{ - "blog-title": { - Type: "string", - Description: "Config description", - Default: "My Title", - }, - }, - }, - expectActions: &charm.Actions{ - ActionSpecs: map[string]charm.ActionSpec{ - "snapshot": { - Description: "Take a snapshot of the database.", - Params: map[string]interface{}{ - "title": "snapshot", - "description": "Take a snapshot of the database.", - "type": "object", - "properties": map[string]interface{}{ - "outfile": map[string]interface{}{ - "description": "outfile description", - "type": "string", - "default": "foo.bz2", - }, - }, - }, - }, - }, - }, - expectMetrics: &charm.Metrics{ - Metrics: map[string]charm.Metric{ - "pings": { - Type: charm.MetricTypeGauge, - Description: "Description of the metric.", - }, - }, - }, - expectFiles: filetesting.Entries{ - filetesting.File{ - Path: "hooks/install", - Data: "#!/bin/sh\n", - Perm: 0755, - }, - filetesting.File{ - Path: "hooks/start", - Data: "#!/bin/sh\n", - Perm: 0755, - }, - }, - expectRevision: 99, -}, { - about: "charm with some extra files specified", - spec: testing.CharmSpec{ - Meta: ` -name: mycharm -summary: summary -description: description -`, - Files: filetesting.Entries{ - filetesting.File{ - Path: "hooks/customhook", - Data: "custom stuff", - Perm: 0755, - }, - }, - }, - expectMeta: &charm.Meta{ - Name: "mycharm", - Summary: "summary", - Description: "description", - Format: 1, - }, - expectConfig: &charm.Config{ - Options: map[string]charm.Option{}, - }, - expectActions: &charm.Actions{}, - expectFiles: filetesting.Entries{ - filetesting.File{ - Path: "hooks/customhook", - Data: "custom stuff", - Perm: 0755, - }, - }, -}, -} - -func (*testCharmSuite) TestNewCharm(c *gc.C) { - for i, test := range newCharmTests { - c.Logf("test %d: %s", i, test.about) - ch := testing.NewCharm(c, test.spec) - c.Assert(ch.Meta(), jc.DeepEquals, test.expectMeta) - c.Assert(ch.Config(), jc.DeepEquals, test.expectConfig) - c.Assert(ch.Metrics(), jc.DeepEquals, test.expectMetrics) - c.Assert(ch.Actions(), jc.DeepEquals, test.expectActions) - c.Assert(ch.Revision(), gc.Equals, test.expectRevision) - - archive := ch.Archive() - c.Assert(archive.Meta(), jc.DeepEquals, test.expectMeta) - c.Assert(archive.Config(), jc.DeepEquals, test.expectConfig) - c.Assert(archive.Metrics(), jc.DeepEquals, test.expectMetrics) - c.Assert(archive.Actions(), jc.DeepEquals, test.expectActions) - c.Assert(archive.Revision(), gc.Equals, test.expectRevision) - - // Check that we get the same archive again. - c.Assert(ch.Archive(), gc.Equals, archive) - c.Assert(ch.ArchiveBytes(), gc.Not(gc.HasLen), 0) - - dir := c.MkDir() - err := archive.ExpandTo(dir) - c.Assert(err, gc.IsNil) - test.expectFiles.Check(c, dir) - - } -} === removed file 'src/gopkg.in/juju/charm.v5/url.go' --- src/gopkg.in/juju/charm.v5/url.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/url.go 1970-01-01 00:00:00 +0000 @@ -1,388 +0,0 @@ -// Copyright 2011, 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm - -import ( - "encoding/json" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/juju/names" - "gopkg.in/mgo.v2/bson" -) - -// Location represents a charm location, which must declare a path component -// and a string representaion. -type Location interface { - Path() string - String() string -} - -// URL represents a fully resolved charm location with a specific series, such -// as: -// -// cs:~joe/oneiric/wordpress -// cs:oneiric/wordpress-42 -// local:oneiric/wordpress -// -type URL struct { - Schema string // "cs" or "local" - User string // "joe" - Name string // "wordpress" - Revision int // -1 if unset, N otherwise - Series string -} - -// Reference represents a charm location with a series -// that may be unresolved. -// -// cs:~joe/wordpress -// cs:wordpress-42 -// cs:precise/wordpress -type Reference URL - -var ErrUnresolvedUrl error = fmt.Errorf("charm url series is not resolved") - -var ( - validSeries = regexp.MustCompile("^[a-z]+([a-z0-9]+)?$") - validName = regexp.MustCompile("^[a-z][a-z0-9]*(-[a-z0-9]*[a-z][a-z0-9]*)*$") -) - -// IsValidSeries returns whether series is a valid series in charm URLs. -func IsValidSeries(series string) bool { - return validSeries.MatchString(series) -} - -// IsValidName returns whether name is a valid charm name. -func IsValidName(name string) bool { - return validName.MatchString(name) -} - -// WithRevision returns a URL equivalent to url but with Revision set -// to revision. -func (url *URL) WithRevision(revision int) *URL { - urlCopy := *url - urlCopy.Revision = revision - return &urlCopy -} - -// MustParseURL works like ParseURL, but panics in case of errors. -func MustParseURL(url string) *URL { - u, err := ParseURL(url) - if err != nil { - panic(err) - } - return u -} - -// ParseURL parses the provided charm URL string into its respective -// structure. -func ParseURL(urlStr string) (*URL, error) { - r, err := parseReference(urlStr) - if err != nil { - return nil, err - } - if r.Series == "" { - return nil, ErrUnresolvedUrl - } - if r.Schema == "" { - return nil, fmt.Errorf("charm URL has no schema: %q", urlStr) - } - url, err := r.URL("") - if err != nil { - return nil, err // should never happen, because series is set. - } - return url, nil -} - -// URL returns a full URL from the reference, creating -// a new URL value if necessary with the given default -// series. It returns an error if ref does not specify -// a series and defaultSeries is empty. -func (ref *Reference) URL(defaultSeries string) (*URL, error) { - if ref.Series != "" { - return (*URL)(ref), nil - } - if defaultSeries == "" { - return nil, ErrUnresolvedUrl - } - if !IsValidSeries(defaultSeries) { - return nil, fmt.Errorf("default series %q is invalid", defaultSeries) - } - url := *(*URL)(ref) - url.Series = defaultSeries - return &url, nil -} - -// MustParseReference works like ParseReference, but panics in case of errors. -func MustParseReference(url string) *Reference { - u, err := ParseReference(url) - if err != nil { - panic(err) - } - return u -} - -// ParseReference returns a charm reference inferred from src. The provided -// src may be a valid URL or it may be an alias in one of the following formats: -// -// name -// name-revision -// series/name -// series/name-revision -// schema:name -// schema:name-revision -// schema:~user/name -// schema:~user/name-revision -// -// A missing schema is assumed to be 'cs'. -func ParseReference(url string) (*Reference, error) { - ref, err := parseReference(url) - if err != nil { - return nil, err - } - if ref.Schema == "" { - ref.Schema = "cs" - } - return ref, nil -} - -func parseReference(url string) (*Reference, error) { - var r Reference - i := strings.Index(url, ":") - if i >= 0 { - r.Schema = url[:i] - if r.Schema != "cs" && r.Schema != "local" { - return nil, fmt.Errorf("charm URL has invalid schema: %q", url) - } - i++ - } else { - i = 0 - } - parts := strings.Split(url[i:], "/") - if len(parts) < 1 || len(parts) > 3 { - return nil, fmt.Errorf("charm URL has invalid form: %q", url) - } - - // ~ - if strings.HasPrefix(parts[0], "~") { - if r.Schema == "local" { - return nil, fmt.Errorf("local charm URL with user name: %q", url) - } - r.User = parts[0][1:] - if !names.IsValidUser(r.User) { - return nil, fmt.Errorf("charm URL has invalid user name: %q", url) - } - parts = parts[1:] - } - if len(parts) > 2 { - return nil, fmt.Errorf("charm URL has invalid form: %q", url) - } - // - if len(parts) == 2 { - r.Series = parts[0] - if !IsValidSeries(r.Series) { - return nil, fmt.Errorf("charm URL has invalid series: %q", url) - } - parts = parts[1:] - } - if len(parts) < 1 { - return nil, fmt.Errorf("charm URL without charm name: %q", url) - } - - // [-] - r.Name = parts[0] - r.Revision = -1 - for i := len(r.Name) - 1; i > 0; i-- { - c := r.Name[i] - if c >= '0' && c <= '9' { - continue - } - if c == '-' && i != len(r.Name)-1 { - var err error - r.Revision, err = strconv.Atoi(r.Name[i+1:]) - if err != nil { - panic(err) // We just checked it was right. - } - r.Name = r.Name[:i] - } - break - } - if !IsValidName(r.Name) { - return nil, fmt.Errorf("charm URL has invalid charm name: %q", url) - } - return &r, nil -} - -func (r *Reference) path() string { - var parts []string - if r.User != "" { - parts = append(parts, fmt.Sprintf("~%s", r.User)) - } - if r.Series != "" { - parts = append(parts, r.Series) - } - if r.Revision >= 0 { - parts = append(parts, fmt.Sprintf("%s-%d", r.Name, r.Revision)) - } else { - parts = append(parts, r.Name) - } - return strings.Join(parts, "/") -} - -func (r Reference) Path() string { - return r.path() -} - -// InferURL parses src as a reference, fills out the series in the -// returned URL using defaultSeries if necessary. -// -// This function is deprecated. New code should use ParseReference -// and/or Reference.URL instead. -func InferURL(src, defaultSeries string) (*URL, error) { - ref, err := ParseReference(src) - if err != nil { - return nil, err - } - url, err := ref.URL(defaultSeries) - if err != nil { - return nil, fmt.Errorf("cannot infer charm URL for %q: %v", src, err) - } - return url, nil -} - -// Reference returns a reference aliased to u. Note that -// all URLs are valid references. -func (u *URL) Reference() *Reference { - return (*Reference)(u) -} - -func (u *URL) Path() string { - return (*Reference)(u).path() -} - -func (u *URL) String() string { - return fmt.Sprintf("%s:%s", u.Schema, u.Path()) -} - -func (r Reference) String() string { - return fmt.Sprintf("%s:%s", r.Schema, r.Path()) -} - -// GetBSON turns u into a bson.Getter so it can be saved directly -// on a MongoDB database with mgo. -func (u *URL) GetBSON() (interface{}, error) { - if u == nil { - return nil, nil - } - return u.String(), nil -} - -// SetBSON turns u into a bson.Setter so it can be loaded directly -// from a MongoDB database with mgo. -func (u *URL) SetBSON(raw bson.Raw) error { - if raw.Kind == 10 { - return bson.SetZero - } - var s string - err := raw.Unmarshal(&s) - if err != nil { - return err - } - url, err := ParseURL(s) - if err != nil { - return err - } - *u = *url - return nil -} - -func (u *URL) MarshalJSON() ([]byte, error) { - if u == nil { - panic("cannot marshal nil *charm.URL") - } - return json.Marshal(u.String()) -} - -func (u *URL) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - url, err := ParseURL(s) - if err != nil { - return err - } - *u = *url - return nil -} - -// GetBSON turns r into a bson.Getter so it can be saved directly -// on a MongoDB database with mgo. -func (r *Reference) GetBSON() (interface{}, error) { - if r == nil { - return nil, nil - } - return r.String(), nil -} - -// SetBSON turns u into a bson.Setter so it can be loaded directly -// from a MongoDB database with mgo. -func (r *Reference) SetBSON(raw bson.Raw) error { - if raw.Kind == 10 { - return bson.SetZero - } - var s string - err := raw.Unmarshal(&s) - if err != nil { - return err - } - ref, err := ParseReference(s) - if err != nil { - return err - } - *r = *ref - return nil -} - -func (r *Reference) MarshalJSON() ([]byte, error) { - return json.Marshal(r.String()) -} - -func (r *Reference) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - ref, err := ParseReference(s) - if err != nil { - return err - } - *r = *ref - return nil -} - -// Quote translates a charm url string into one which can be safely used -// in a file path. ASCII letters, ASCII digits, dot and dash stay the -// same; other characters are translated to their hex representation -// surrounded by underscores. -func Quote(unsafe string) string { - safe := make([]byte, 0, len(unsafe)*4) - for i := 0; i < len(unsafe); i++ { - b := unsafe[i] - switch { - case b >= 'a' && b <= 'z', - b >= 'A' && b <= 'Z', - b >= '0' && b <= '9', - b == '.', - b == '-': - safe = append(safe, b) - default: - safe = append(safe, fmt.Sprintf("_%02x_", b)...) - } - } - return string(safe) -} === removed file 'src/gopkg.in/juju/charm.v5/url_test.go' --- src/gopkg.in/juju/charm.v5/url_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charm.v5/url_test.go 1970-01-01 00:00:00 +0000 @@ -1,395 +0,0 @@ -// Copyright 2011, 2012, 2013 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm_test - -import ( - "encoding/json" - "fmt" - "strings" - - gc "gopkg.in/check.v1" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charm.v5" -) - -type URLSuite struct{} - -var _ = gc.Suite(&URLSuite{}) - -var urlTests = []struct { - s, err string - exact string - ref *charm.Reference -}{{ - s: "cs:~user/series/name", - ref: &charm.Reference{"cs", "user", "name", -1, "series"}, -}, { - s: "cs:~user/series/name-0", - ref: &charm.Reference{"cs", "user", "name", 0, "series"}, -}, { - s: "cs:series/name", - ref: &charm.Reference{"cs", "", "name", -1, "series"}, -}, { - s: "cs:series/name-42", - ref: &charm.Reference{"cs", "", "name", 42, "series"}, -}, { - s: "local:series/name-1", - ref: &charm.Reference{"local", "", "name", 1, "series"}, -}, { - s: "local:series/name", - ref: &charm.Reference{"local", "", "name", -1, "series"}, -}, { - s: "local:series/n0-0n-n0", - ref: &charm.Reference{"local", "", "n0-0n-n0", -1, "series"}, -}, { - s: "cs:~user/name", - ref: &charm.Reference{"cs", "user", "name", -1, ""}, -}, { - s: "cs:name", - ref: &charm.Reference{"cs", "", "name", -1, ""}, -}, { - s: "local:name", - ref: &charm.Reference{"local", "", "name", -1, ""}, -}, { - s: "bs:~user/series/name-1", - err: "charm URL has invalid schema: .*", -}, { - s: ":foo", - err: "charm URL has invalid schema: .*", -}, { - s: "cs:~1/series/name-1", - err: "charm URL has invalid user name: .*", -}, { - s: "cs:~user", - err: "charm URL without charm name: .*", -}, { - s: "cs:~user/1/name-1", - err: "charm URL has invalid series: .*", -}, { - s: "cs:~user/series/name-1-2", - err: "charm URL has invalid charm name: .*", -}, { - s: "cs:~user/series/name-1-name-2", - err: "charm URL has invalid charm name: .*", -}, { - s: "cs:~user/series/name--name-2", - err: "charm URL has invalid charm name: .*", -}, { - s: "cs:foo-1-2", - err: "charm URL has invalid charm name: .*", -}, { - s: "cs:~user/series/huh/name-1", - err: "charm URL has invalid form: .*", -}, { - s: "cs:/name", - err: "charm URL has invalid series: .*", -}, { - s: "local:~user/series/name", - err: "local charm URL with user name: .*", -}, { - s: "local:~user/name", - err: "local charm URL with user name: .*", -}, { - s: "precise/wordpress", - exact: "cs:precise/wordpress", - ref: &charm.Reference{"cs", "", "wordpress", -1, "precise"}, - err: `charm URL has no schema: "precise/wordpress"`, -}, { - s: "foo", - exact: "cs:foo", - ref: &charm.Reference{"cs", "", "foo", -1, ""}, -}, { - s: "foo-1", - exact: "cs:foo-1", - ref: &charm.Reference{"cs", "", "foo", 1, ""}, -}, { - s: "n0-n0-n0", - exact: "cs:n0-n0-n0", - ref: &charm.Reference{"cs", "", "n0-n0-n0", -1, ""}, -}, { - s: "cs:foo", - exact: "cs:foo", - ref: &charm.Reference{"cs", "", "foo", -1, ""}, -}, { - s: "local:foo", - exact: "local:foo", - ref: &charm.Reference{"local", "", "foo", -1, ""}, -}, { - s: "series/foo", - exact: "cs:series/foo", - ref: &charm.Reference{"cs", "", "foo", -1, "series"}, - err: `charm URL has no schema: "series/foo"`, -}, { - s: "series/foo/bar", - err: `charm URL has invalid form: "series/foo/bar"`, -}, { - s: "cs:foo/~blah", - err: `charm URL has invalid charm name: "cs:foo/~blah"`, -}} - -func (s *URLSuite) TestParseURL(c *gc.C) { - for i, t := range urlTests { - c.Logf("test %d: %q", i, t.s) - url, uerr := charm.ParseURL(t.s) - ref, rerr := charm.ParseReference(t.s) - - expectStr := t.s - if t.exact != "" { - expectStr = t.exact - } - if t.ref != nil { - // ParseReference, at least, should have succeeded. - c.Assert(rerr, gc.IsNil) - c.Assert(ref, gc.DeepEquals, t.ref) - c.Check(ref.String(), gc.Equals, expectStr) - } - if t.err != "" { - c.Assert(uerr, gc.ErrorMatches, t.err) - c.Assert(url, gc.IsNil) - if t.ref == nil { - c.Assert(rerr, gc.NotNil) - // Errors from both ParseURL and ParseReference should match. - c.Check(uerr.Error(), gc.Equals, rerr.Error()) - c.Check(ref, gc.IsNil) - } - continue - } - if t.ref.Series == "" { - // ParseURL with an empty series should report an unresolved error. - c.Assert(url, gc.IsNil) - c.Assert(uerr, gc.Equals, charm.ErrUnresolvedUrl) - continue - } - // When ParseURL succeeds, it should return the same thing - // as ParseReference. - c.Assert(uerr, gc.IsNil) - c.Check(url.Reference(), gc.DeepEquals, ref) - - // URL parsing should always be reversible. - c.Check(url.String(), gc.Equals, t.s) - } -} - -var inferTests = []struct { - vague, exact string -}{ - {"foo", "cs:defseries/foo"}, - {"foo-1", "cs:defseries/foo-1"}, - {"n0-n0-n0", "cs:defseries/n0-n0-n0"}, - {"cs:foo", "cs:defseries/foo"}, - {"local:foo", "local:defseries/foo"}, - {"series/foo", "cs:series/foo"}, - {"cs:series/foo", "cs:series/foo"}, - {"local:series/foo", "local:series/foo"}, - {"cs:~user/foo", "cs:~user/defseries/foo"}, - {"cs:~user/series/foo", "cs:~user/series/foo"}, - {"local:~user/series/foo", "local:~user/series/foo"}, - {"bs:foo", "bs:defseries/foo"}, - {"cs:~1/foo", "cs:~1/defseries/foo"}, - {"cs:foo-1-2", "cs:defseries/foo-1-2"}, -} - -func (s *URLSuite) TestInferURL(c *gc.C) { - for i, t := range inferTests { - c.Logf("test %d", i) - comment := gc.Commentf("InferURL(%q, %q)", t.vague, "defseries") - inferred, ierr := charm.InferURL(t.vague, "defseries") - parsed, perr := charm.ParseURL(t.exact) - if perr == nil { - c.Check(inferred, gc.DeepEquals, parsed, comment) - c.Check(ierr, gc.IsNil) - } else { - expect := perr.Error() - if t.vague != t.exact { - if colIdx := strings.Index(expect, ":"); colIdx > 0 { - expect = expect[:colIdx] - } - } - c.Check(ierr.Error(), gc.Matches, expect+".*", comment) - } - } - u, err := charm.InferURL("~blah", "defseries") - c.Assert(u, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "charm URL without charm name: .*") -} - -var inferNoDefaultSeriesTests = []struct { - vague, exact string - resolved bool -}{ - {"foo", "", false}, - {"foo-1", "", false}, - {"cs:foo", "", false}, - {"cs:~user/foo", "", false}, - {"series/foo", "cs:series/foo", true}, - {"cs:series/foo", "cs:series/foo", true}, - {"cs:~user/series/foo", "cs:~user/series/foo", true}, -} - -func (s *URLSuite) TestInferURLNoDefaultSeries(c *gc.C) { - for i, t := range inferNoDefaultSeriesTests { - c.Logf("%d: %s", i, t.vague) - inferred, err := charm.InferURL(t.vague, "") - if t.exact == "" { - c.Assert(err, gc.ErrorMatches, fmt.Sprintf("cannot infer charm URL for %q: charm url series is not resolved", t.vague)) - } else { - parsed, err := charm.ParseURL(t.exact) - c.Assert(err, gc.IsNil) - c.Assert(inferred, gc.DeepEquals, parsed, gc.Commentf(`InferURL(%q, "")`, t.vague)) - } - } -} - -var validTests = []struct { - valid func(string) bool - string string - expect bool -}{ - - {charm.IsValidName, "", false}, - {charm.IsValidName, "wordpress", true}, - {charm.IsValidName, "Wordpress", false}, - {charm.IsValidName, "word-press", true}, - {charm.IsValidName, "word press", false}, - {charm.IsValidName, "word^press", false}, - {charm.IsValidName, "-wordpress", false}, - {charm.IsValidName, "wordpress-", false}, - {charm.IsValidName, "wordpress2", true}, - {charm.IsValidName, "wordpress-2", false}, - {charm.IsValidName, "word2-press2", true}, - - {charm.IsValidSeries, "", false}, - {charm.IsValidSeries, "precise", true}, - {charm.IsValidSeries, "Precise", false}, - {charm.IsValidSeries, "pre cise", false}, - {charm.IsValidSeries, "pre-cise", false}, - {charm.IsValidSeries, "pre^cise", false}, - {charm.IsValidSeries, "prec1se", true}, - {charm.IsValidSeries, "-precise", false}, - {charm.IsValidSeries, "precise-", false}, - {charm.IsValidSeries, "precise-1", false}, - {charm.IsValidSeries, "precise1", true}, - {charm.IsValidSeries, "pre-c1se", false}, -} - -func (s *URLSuite) TestValidCheckers(c *gc.C) { - for i, t := range validTests { - c.Logf("test %d: %s", i, t.string) - c.Assert(t.valid(t.string), gc.Equals, t.expect, gc.Commentf("%s", t.string)) - } -} - -func (s *URLSuite) TestMustParseReference(c *gc.C) { - ref := charm.MustParseReference("wordpress") - c.Assert(ref, gc.DeepEquals, &charm.Reference{ - Schema: "cs", - Name: "wordpress", - Revision: -1, - }) - f := func() { - charm.MustParseReference("bad:bad") - } - c.Assert(f, gc.PanicMatches, `charm URL has invalid schema: "bad:bad"`) -} - -func (s *URLSuite) TestMustParseURL(c *gc.C) { - url := charm.MustParseURL("cs:series/name") - c.Assert(url, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series"}) - f := func() { charm.MustParseURL("local:@@/name") } - c.Assert(f, gc.PanicMatches, "charm URL has invalid series: .*") - f = func() { charm.MustParseURL("cs:~user") } - c.Assert(f, gc.PanicMatches, "charm URL without charm name: .*") - f = func() { charm.MustParseURL("cs:~user") } - c.Assert(f, gc.PanicMatches, "charm URL without charm name: .*") - f = func() { charm.MustParseURL("cs:name") } - c.Assert(f, gc.PanicMatches, "charm url series is not resolved") -} - -func (s *URLSuite) TestWithRevision(c *gc.C) { - url := charm.MustParseURL("cs:series/name") - other := url.WithRevision(1) - c.Assert(url, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series"}) - c.Assert(other, gc.DeepEquals, &charm.URL{"cs", "", "name", 1, "series"}) - - // Should always copy. The opposite behavior is error prone. - c.Assert(other.WithRevision(1), gc.Not(gc.Equals), other) - c.Assert(other.WithRevision(1), gc.DeepEquals, other) -} - -var codecs = []struct { - Marshal func(interface{}) ([]byte, error) - Unmarshal func([]byte, interface{}) error -}{{ - Marshal: bson.Marshal, - Unmarshal: bson.Unmarshal, -}, { - Marshal: json.Marshal, - Unmarshal: json.Unmarshal, -}} - -func (s *URLSuite) TestURLCodecs(c *gc.C) { - for i, codec := range codecs { - c.Logf("codec %d", i) - type doc struct { - URL *charm.URL - Ref *charm.Reference - } - url := charm.MustParseURL("cs:series/name") - v0 := doc{url, url.Reference()} - data, err := codec.Marshal(v0) - c.Assert(err, gc.IsNil) - var v doc - err = codec.Unmarshal(data, &v) - c.Assert(v, gc.DeepEquals, v0) - - // Check that the underlying representation - // is a string. - type strDoc struct { - URL string - Ref string - } - var vs strDoc - err = codec.Unmarshal(data, &vs) - c.Assert(err, gc.IsNil) - c.Assert(vs.URL, gc.Equals, "cs:series/name") - c.Assert(vs.Ref, gc.Equals, "cs:series/name") - - data, err = codec.Marshal(doc{}) - c.Assert(err, gc.IsNil) - err = codec.Unmarshal(data, &v) - c.Assert(err, gc.IsNil) - c.Assert(v.URL, gc.IsNil) - c.Assert(v.Ref, gc.IsNil) - } -} - -func (s *URLSuite) TestJSONGarbage(c *gc.C) { - // unmarshalling json gibberish - for _, value := range []string{":{", `"cs:{}+<"`, `"cs:~_~/f00^^&^/baaaar$%-?"`} { - err := json.Unmarshal([]byte(value), new(struct{ URL *charm.URL })) - c.Check(err, gc.NotNil) - err = json.Unmarshal([]byte(value), new(struct{ Ref *charm.Reference })) - c.Check(err, gc.NotNil) - } -} - -type QuoteSuite struct{} - -var _ = gc.Suite(&QuoteSuite{}) - -func (s *QuoteSuite) TestUnmodified(c *gc.C) { - // Check that a string containing only valid - // chars stays unmodified. - in := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-" - out := charm.Quote(in) - c.Assert(out, gc.Equals, in) -} - -func (s *QuoteSuite) TestQuote(c *gc.C) { - // Check that invalid chars are translated correctly. - in := "hello_there/how'are~you-today.sir" - out := charm.Quote(in) - c.Assert(out, gc.Equals, "hello_5f_there_2f_how_27_are_7e_you-today.sir") -} === removed file 'src/gopkg.in/juju/charm.v5/workload.go' --- src/gopkg.in/juju/charm.v5/workload.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/juju/charm.v5/workload.go 1970-01-01 00:00:00 +0000 @@ -1,609 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE file for details. - -package charm - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "strconv" - "strings" - - "github.com/juju/schema" - "gopkg.in/yaml.v1" -) - -// Workload is the static definition of a workload workload in a charm. -type Workload struct { - // Name is the name of the workload. - Name string - // Description is a brief description of the workload. - Description string - // Type is the name of the workload type. - Type string - // TypeOptions is a map of arguments for the workload type. - TypeOptions map[string]string - // Command is use command executed used by the workload, if any. - Command string - // Image is the image used by the workload, if any. - Image string - // Ports is a list of WorkloadPort. - Ports []WorkloadPort - // Volumes is a list of WorkloadVolume. - Volumes []WorkloadVolume - // EnvVars is map of environment variables used by the workload. - EnvVars map[string]string -} - -// ReadWorkloads -func ReadWorkloads(r io.Reader, provides map[string]Relation, storage map[string]Storage) (map[string]Workload, error) { - data, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - raw := make(map[interface{}]interface{}) - err = yaml.Unmarshal(data, raw) - if err != nil { - return nil, err - } - v, err := workloadsSchema.Coerce(raw, nil) - if err != nil { - return nil, errors.New("workloads: " + err.Error()) - } - m := v.(map[string]interface{}) - - workloads := parseWorkloads(m["workloads"], provides, storage) - return workloads, checkWorkloads(workloads) -} - -// ParseWorkload parses the provided data and converts it to a Workload. -// The data will most likely have been de-serialized, perhaps from YAML. -func ParseWorkload(name string, data map[interface{}]interface{}) (*Workload, error) { - return ParseWorkloadWithRefs(name, data, nil, nil) -} - -// ParseWorkloadWithRefs parses the provided data and converts it to a -// Workload. The data will most likely have been de-serialized, perhaps -// from YAML. -func ParseWorkloadWithRefs(name string, data map[interface{}]interface{}, provides map[string]Relation, storage map[string]Storage) (*Workload, error) { - raw, err := workloadSchema.Coerce(data, []string{name}) - if err != nil { - return nil, err - } - workload := parseWorkload(name, raw.(map[string]interface{}), provides, storage) - if err := workload.Validate(); err != nil { - return nil, err - } - return &workload, nil -} - -// Copy creates a deep copy of the Workload. -func (copied Workload) Copy() Workload { - if copied.TypeOptions != nil { - typeOptions := make(map[string]string) - for k, v := range copied.TypeOptions { - typeOptions[k] = v - } - copied.TypeOptions = typeOptions - } - - if copied.EnvVars != nil { - envVars := make(map[string]string) - for k, v := range copied.EnvVars { - envVars[k] = v - } - copied.EnvVars = envVars - } - - var ports []WorkloadPort - for _, port := range copied.Ports { - ports = append(ports, port) - } - copied.Ports = ports - - var volumes []WorkloadVolume - for _, volume := range copied.Volumes { - volumes = append(volumes, volume.Copy()) - } - copied.Volumes = volumes - - return copied -} - -// WorkloadFieldValue describes a requested change to a Workload. -type WorkloadFieldValue struct { - // Field is the name of the metadata field. - Field string - // Field is the name of the metadata sub-field, if applicable. - Subfield string - // Value is the value to assign to the field. - Value string -} - -// Override updates the Workload with the provided value. If the -// identified field is not already set then Override fails. -func (w *Workload) Override(value WorkloadFieldValue) error { - switch value.Field { - case "name": - // TODO(ericsnow) Allow overriding the name (for multiple copies)? - return fmt.Errorf(`cannot override "name"`) - case "description": - if w.Description == "" { - return fmt.Errorf(`cannot override "description", not set`) - } - w.Description = value.Value - case "type": - return fmt.Errorf(`cannot override "type"`) - case "type-options": - if value.Subfield == "" { - return fmt.Errorf(`cannot override "type-options" without sub-field`) - } - if _, ok := w.TypeOptions[value.Subfield]; !ok { - return fmt.Errorf(`cannot override "type-options" field %q, not set`, value.Subfield) - } - w.TypeOptions[value.Subfield] = value.Value - case "command": - if w.Command == "" { - return fmt.Errorf(`cannot override "command", not set`) - } - w.Command = value.Value - case "image": - if w.Image == "" { - return fmt.Errorf(`cannot override "image", not set`) - } - w.Image = value.Value - case "ports": - if value.Subfield == "" { - return fmt.Errorf(`cannot override "ports" without sub-field`) - } - index, err := strconv.Atoi(value.Subfield) - if err != nil { - return fmt.Errorf(`"ports" sub-field must be an integer index`) - } - if index < 0 || index >= len(w.Ports) { - return fmt.Errorf(`"ports" index %d out of range`, index) - } - var port WorkloadPort - if err := port.Set(value.Value); err != nil { - return err - } - w.Ports[index] = port - case "volumes": - if value.Subfield == "" { - return fmt.Errorf(`cannot override "volumes" without sub-field`) - } - index, err := strconv.Atoi(value.Subfield) - if err != nil { - return fmt.Errorf(`"ports" sub-field must be an integer index`) - } - if index < 0 || index >= len(w.Ports) { - return fmt.Errorf(`"ports" index %d out of range`, index) - } - var volume WorkloadVolume - if err := volume.Set(value.Value); err != nil { - return err - } - w.Volumes[index] = volume - case "env": - if value.Subfield == "" { - return fmt.Errorf(`cannot override "env" without sub-field`) - } - if _, ok := w.EnvVars[value.Subfield]; !ok { - return fmt.Errorf(`cannot override "env" field %q, not set`, value.Subfield) - } - w.EnvVars[value.Subfield] = value.Value - default: - return fmt.Errorf("unrecognized field %q", value.Field) - } - return nil -} - -// Extend updates the Workload with the provided value. If the -// identified field is already set then Extend fails. -func (w *Workload) Extend(value WorkloadFieldValue) error { - switch value.Field { - case "name": - // TODO(ericsnow) Allow overriding the name (for multiple copies)? - return fmt.Errorf(`"name" already set`) - case "description": - if w.Description != "" { - return fmt.Errorf(`"description" already set`) - } - w.Description = value.Value - case "type": - return fmt.Errorf(`"type" already set`) - case "type-options": - if value.Subfield == "" { - return fmt.Errorf(`cannot extend "type-options" without sub-field`) - } - if w.TypeOptions == nil { - w.TypeOptions = make(map[string]string) - } else if _, ok := w.TypeOptions[value.Subfield]; ok { - return fmt.Errorf(`"type-options" field %q already set`, value.Subfield) - } - w.TypeOptions[value.Subfield] = value.Value - case "command": - if w.Command != "" { - return fmt.Errorf(`cannot extend "command" already set`) - } - w.Command = value.Value - case "image": - if w.Image != "" { - return fmt.Errorf(`cannot extend "image" already set`) - } - w.Image = value.Value - case "ports": - if value.Subfield != "" { - return fmt.Errorf(`cannot extend "ports" with sub-field`) - } - var port WorkloadPort - if err := port.Set(value.Value); err != nil { - return err - } - w.Ports = append(w.Ports, port) - case "volumes": - if value.Subfield != "" { - return fmt.Errorf(`cannot extend "volumes" with sub-field`) - } - var volume WorkloadVolume - if err := volume.Set(value.Value); err != nil { - return err - } - w.Volumes = append(w.Volumes, volume) - case "env": - if value.Subfield == "" { - return fmt.Errorf(`cannot extend "env" without sub-field`) - } - if w.EnvVars == nil { - w.EnvVars = make(map[string]string) - } else if _, ok := w.EnvVars[value.Subfield]; ok { - return fmt.Errorf(`"env" field %q already set`, value.Subfield) - } - w.EnvVars[value.Subfield] = value.Value - default: - return fmt.Errorf("unrecognized field %q", value.Field) - } - return nil -} - -// Apply makes a copy of the Workload and applies the given overrides -// and additions to that copy. -func (w *Workload) Apply(overrides []WorkloadFieldValue, additions []WorkloadFieldValue) (*Workload, error) { - workload := w.Copy() - for _, value := range overrides { - if err := workload.Override(value); err != nil { - return nil, err - } - } - for _, value := range additions { - if err := workload.Extend(value); err != nil { - return nil, err - } - } - return &workload, nil -} - -// Validate checks the Workload for errors. -func (w Workload) Validate() error { - if w.Name == "" { - return fmt.Errorf("missing name") - } - if w.Type == "" { - return fmt.Errorf("workloads: workloads.%s.type: name is required", w.Name) - } - - if err := w.validatePorts(); err != nil { - return err - } - - if err := w.validateStorage(); err != nil { - return err - } - - return nil -} - -func (w Workload) validatePorts() error { - for _, port := range w.Ports { - if port.External < 0 { - return fmt.Errorf("workloads: workloads.%s.ports: specified endpoint %q unknown for %v", w.Name, port.Endpoint, port) - } - } - return nil -} - -func (w Workload) validateStorage() error { - for _, volume := range w.Volumes { - if volume.Name != "" && volume.ExternalMount == "" { - if volume.storage == nil { - return fmt.Errorf("workloads: workloads.%s.volumes: specified storage %q unknown for %v", w.Name, volume.Name, volume) - } - if volume.storage.Type != StorageFilesystem { - return fmt.Errorf("workloads: workloads.%s.volumes: linked storage %q must be filesystem for %v", w.Name, volume.Name, volume) - } - if volume.storage.Location == "" { - return fmt.Errorf("workloads: workloads.%s.volumes: linked storage %q missing location for %v", w.Name, volume.Name, volume) - } - } - } - return nil -} - -// WorkloadPort is network port information for a workload workload. -type WorkloadPort struct { - // External is the port on the host. - External int - // Internal is the port on the workload. - Internal int - // Endpoint is the unit-relation endpoint matching the external - // port, if any. - Endpoint string -} - -// Set parses the provided string and sets the appropriate fields. -func (w *WorkloadPort) Set(raw string) error { - parts := strings.SplitN(raw, ":", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid value %q", raw) - } - if err := w.SetExternal(parts[0]); err != nil { - return err - } - if err := w.SetInternal(parts[1]); err != nil { - return err - } - return nil -} - -// SetExternal parses the provided string and sets the appropriate fields. -func (w *WorkloadPort) SetExternal(portStr string) error { - w.External = 0 - w.Endpoint = "" - if strings.HasPrefix(portStr, "<") && strings.HasSuffix(portStr, ">") { - // The port was specified by a relation endpoint rather than a - // port number. - w.Endpoint = portStr[1 : len(portStr)-1] - } else { - // It's just a port number. - port, err := strconv.Atoi(portStr) - if err != nil { - return fmt.Errorf("expected int got %q", portStr) - } - w.External = port - } - return nil -} - -// SetInternal parses the provided string and sets the appropriate fields. -func (w *WorkloadPort) SetInternal(portStr string) error { - port, err := strconv.Atoi(portStr) - if err != nil { - return fmt.Errorf("expected int got %q", portStr) - } - w.Internal = port - return nil -} - -// WorkloadVolume is storage volume information for a workload workload. -type WorkloadVolume struct { - // ExternalMount is the path on the host. - ExternalMount string - // InternalMount is the path on the workload. - InternalMount string - // Mode is the "ro" OR "rw" - Mode string - // Name is the name of the storage metadata entry, if any. - Name string - - // storage is the storage that matched the Storage field. - storage *Storage -} - -// Copy create a deep copy of the WorkloadVolume. -func (copied WorkloadVolume) Copy() WorkloadVolume { - copied.storage = nil - return copied -} - -// Set parses the provided string and sets the appropriate fields. -func (pv *WorkloadVolume) Set(raw string) error { - parts := strings.SplitN(raw, ":", 3) - if len(parts) < 2 { - return fmt.Errorf("invalid value %q", raw) - } - pv.SetExternal(parts[0]) - pv.SetInternal(parts[1]) - if len(parts) == 3 { - if err := pv.SetMode(parts[2]); err != nil { - return err - } - } - return nil -} - -// SetExternal parses the provided string and sets the appropriate fields. -func (pv *WorkloadVolume) SetExternal(volume string) { - pv.Name = "" - pv.ExternalMount = "" - if strings.HasPrefix(volume, "<") && strings.HasSuffix(volume, ">") { - // It's a reference to a defined storage attachment. - pv.Name = volume[1 : len(volume)-1] - } else { - // It's just a volume name. - pv.ExternalMount = volume - } -} - -// SetInternal parses the provided string and sets the appropriate fields. -func (pv *WorkloadVolume) SetInternal(volume string) { - pv.InternalMount = volume -} - -// SetMode parses the provided string and sets the appropriate fields. -func (pv *WorkloadVolume) SetMode(mode string) error { - if _, err := schema.OneOf(schema.Const("rw"), schema.Const("ro")).Coerce(mode, nil); err != nil { - return fmt.Errorf(`expected "rw" or "ro" for mode, got %q`, mode) - } - pv.Mode = mode - return nil -} - -func parseWorkloads(data interface{}, provides map[string]Relation, storage map[string]Storage) map[string]Workload { - if data == nil { - return nil - } - result := make(map[string]Workload) - for name, workloadData := range data.(map[string]interface{}) { - workloadMap := workloadData.(map[string]interface{}) - result[name] = parseWorkload(name, workloadMap, provides, storage) - } - return result -} - -func parseWorkload(name string, coerced map[string]interface{}, provides map[string]Relation, storage map[string]Storage) Workload { - workload := Workload{ - Name: name, - Type: coerced["type"].(string), - } - - if description, ok := coerced["description"]; ok { - workload.Description = description.(string) - } - - if typeMap, ok := coerced["type-options"]; ok { - options := typeMap.(map[string]interface{}) - if len(options) > 0 { - workload.TypeOptions = make(map[string]string) - for k, v := range options { - workload.TypeOptions[k] = v.(string) - } - } - } - - if command, ok := coerced["command"]; ok { - workload.Command = command.(string) - } - - if image, ok := coerced["image"]; ok { - workload.Image = image.(string) - } - - if portsList, ok := coerced["ports"]; ok { - for _, portRaw := range portsList.([]interface{}) { - port := portRaw.(*WorkloadPort) - if port.External == 0 { - port.External = -1 - for endpoint := range provides { - if port.Endpoint == endpoint { - port.External = 0 - break - } - } - } - workload.Ports = append(workload.Ports, *port) - } - } - - if volumeList, ok := coerced["volumes"]; ok { - for _, volumeRaw := range volumeList.([]interface{}) { - volume := *volumeRaw.(*WorkloadVolume) - if volume.Name != "" { - volume.ExternalMount = "" - for sName, s := range storage { - if volume.Name == sName { - copied := s - volume.storage = &copied - if s.Type == StorageFilesystem { - volume.ExternalMount = s.Location - } - break - } - } - } - workload.Volumes = append(workload.Volumes, volume) - } - } - - if envMap, ok := coerced["env"]; ok { - workload.EnvVars = make(map[string]string) - for k, v := range envMap.(map[string]interface{}) { - workload.EnvVars[k] = v.(string) - } - } - - return workload -} - -func checkWorkloads(workloads map[string]Workload) error { - for _, workload := range workloads { - if err := workload.Validate(); err != nil { - return err - } - } - return nil -} - -var workloadsSchema = schema.FieldMap( - schema.Fields{ - "workloads": schema.StringMap(workloadSchema), - }, - schema.Defaults{ - "workloads": schema.Omit, - }, -) - -var workloadSchema = schema.FieldMap( - schema.Fields{ - "description": schema.String(), - "type": schema.String(), - "type-options": schema.StringMap(schema.Stringified()), - "command": schema.String(), - "image": schema.String(), - "ports": schema.List(workloadPortsChecker{}), - "volumes": schema.List(workloadVolumeChecker{}), - "env": schema.StringMap(schema.Stringified()), - }, - schema.Defaults{ - "description": schema.Omit, - "type-options": schema.Omit, - "command": schema.Omit, - "image": schema.Omit, - "ports": schema.Omit, - "volumes": schema.Omit, - "env": schema.Omit, - }, -) - -type workloadPortsChecker struct{} - -// Coerce implements schema.Checker. -func (c workloadPortsChecker) Coerce(v interface{}, path []string) (interface{}, error) { - if _, err := schema.String().Coerce(v, path); err != nil { - return nil, err - } - item := v.(string) - - var port WorkloadPort - if err := port.Set(item); err != nil { - return nil, fmt.Errorf("%s: %v", strings.Join(path[1:], ""), err) - } - return &port, nil -} - -type workloadVolumeChecker struct{} - -// Coerce implements schema.Checker. -func (c workloadVolumeChecker) Coerce(v interface{}, path []string) (interface{}, error) { - if _, err := schema.String().Coerce(v, path); err != nil { - return nil, err - } - item := v.(string) - - var volume WorkloadVolume - if err := volume.Set(item); err != nil { - return nil, fmt.Errorf("%s: %v", strings.Join(path[1:], ""), err) - } - return &volume, nil -} === removed file 'src/gopkg.in/juju/charm.v5/workload_test.go' --- src/gopkg.in/juju/charm.v5/workload_test.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/juju/charm.v5/workload_test.go 1970-01-01 00:00:00 +0000 @@ -1,796 +0,0 @@ -package charm_test - -import ( - "strings" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/yaml.v1" - - "gopkg.in/juju/charm.v5" -) - -func (s *MetaSuite) TestWorkloadParseOkay(c *gc.C) { - raw := make(map[interface{}]interface{}) - err := yaml.Unmarshal([]byte(` -description: a workload -type: docker -type-options: - publish_all: true -command: foocmd -image: nginx/nginx -ports: - - 80:8080 - - 443:8081 -volumes: - - /var/www/html:/usr/share/nginx/html:ro - - /var/nginx/conf:/etc/nginx:ro -env: - ENV_VAR: config:config-var - OTHER_VAR: some value -`), raw) - c.Assert(err, jc.ErrorIsNil) - workload, err := charm.ParseWorkload("workload0", raw) - c.Assert(err, jc.ErrorIsNil) - - c.Check(workload, jc.DeepEquals, &charm.Workload{ - Name: "workload0", - Description: "a workload", - Type: "docker", - TypeOptions: map[string]string{ - "publish_all": "true", - }, - Command: "foocmd", - Image: "nginx/nginx", - Ports: []charm.WorkloadPort{{ - External: 80, - Internal: 8080, - }, { - External: 443, - Internal: 8081, - }}, - Volumes: []charm.WorkloadVolume{{ - ExternalMount: "/var/www/html", - InternalMount: "/usr/share/nginx/html", - Mode: "ro", - }, { - ExternalMount: "/var/nginx/conf", - InternalMount: "/etc/nginx", - Mode: "ro", - }}, - EnvVars: map[string]string{ - "ENV_VAR": "config:config-var", - "OTHER_VAR": "some value", - }, - }) -} - -func (s *MetaSuite) TestWorkloadParseMinimal(c *gc.C) { - raw := make(map[interface{}]interface{}) - err := yaml.Unmarshal([]byte(` -type: docker -`), raw) - c.Assert(err, jc.ErrorIsNil) - workload, err := charm.ParseWorkload("workload0", raw) - c.Assert(err, jc.ErrorIsNil) - - c.Check(workload, jc.DeepEquals, &charm.Workload{ - Name: "workload0", - Description: "", - Type: "docker", - TypeOptions: nil, - Command: "", - Image: "", - Ports: nil, - Volumes: nil, - EnvVars: nil, - }) - c.Check(workload, jc.DeepEquals, &charm.Workload{ - Name: "workload0", - Type: "docker", - }) -} - -func (s *MetaSuite) TestWorkloadCopyVolume(c *gc.C) { - vol := charm.WorkloadVolume{ - ExternalMount: "a", - InternalMount: "b", - Mode: "ro", - Name: "spam", - } - copied := vol.Copy() - - c.Check(copied, jc.DeepEquals, vol) -} - -func (s *MetaSuite) TestWorkloadCopyWorkloadOkay(c *gc.C) { - workload := charm.Workload{ - Name: "workload0", - Description: "a workload", - Type: "docker", - TypeOptions: map[string]string{ - "publish_all": "true", - }, - Command: "foocmd", - Image: "nginx/nginx", - Ports: []charm.WorkloadPort{{ - External: 80, - Internal: 8080, - }, { - External: 443, - Internal: 8081, - }}, - Volumes: []charm.WorkloadVolume{{ - ExternalMount: "/var/www/html", - InternalMount: "/usr/share/nginx/html", - Mode: "ro", - }, { - ExternalMount: "/var/nginx/conf", - InternalMount: "/etc/nginx", - Mode: "ro", - }}, - EnvVars: map[string]string{ - "ENV_VAR": "config:config-var", - "OTHER_VAR": "some value", - }, - } - copied := workload.Copy() - - c.Check(copied, jc.DeepEquals, workload) -} - -func (s *MetaSuite) TestWorkloadCopyWorkloadMinimal(c *gc.C) { - workload := charm.Workload{ - Name: "workload0", - Type: "docker", - } - copied := workload.Copy() - - c.Check(copied, jc.DeepEquals, workload) -} - -func (s *MetaSuite) TestWorkloadApplyOkay(c *gc.C) { - workload := &charm.Workload{ - Name: "a workload", - Type: "docker", - TypeOptions: map[string]string{ - "publish_all": "true", - }, - Image: "nginx/nginx-2", - Ports: []charm.WorkloadPort{{ - External: 81, - Internal: 8001, - }}, - Volumes: []charm.WorkloadVolume{{ - ExternalMount: "/var/www/html", - InternalMount: "/usr/share/nginx/html", - Mode: "rw", - }}, - EnvVars: map[string]string{ - "ENV_VAR": "spam", - }, - } - overrides := []charm.WorkloadFieldValue{{ - Field: "type-options", - Subfield: "publish_all", - Value: "NO", - }, { - Field: "image", - Value: "nginx/nginx", - }, { - Field: "ports", - Subfield: "0", - Value: "80:8080", - }, { - Field: "volumes", - Subfield: "0", - Value: "/var/www/html:/usr/share/nginx/html:ro", - }, { - Field: "env", - Subfield: "ENV_VAR", - Value: "config:config-var", - }} - additions := []charm.WorkloadFieldValue{{ - Field: "description", - Value: "my workload", - }, { - Field: "command", - Value: "foocmd", - }, { - Field: "ports", - Value: "443:8081", - }, { - Field: "volumes", - Value: "/var/nginx/conf:/etc/nginx:ro", - }, { - Field: "env", - Subfield: "OTHER_VAR", - Value: "some value", - }} - applied, err := workload.Apply(overrides, additions) - c.Assert(err, jc.ErrorIsNil) - - c.Check(applied, jc.DeepEquals, &charm.Workload{ - Name: "a workload", - Type: "docker", - Description: "my workload", - TypeOptions: map[string]string{ - "publish_all": "NO", - }, - Command: "foocmd", - Image: "nginx/nginx", - Ports: []charm.WorkloadPort{{ - External: 80, - Internal: 8080, - }, { - External: 443, - Internal: 8081, - }}, - Volumes: []charm.WorkloadVolume{{ - ExternalMount: "/var/www/html", - InternalMount: "/usr/share/nginx/html", - Mode: "ro", - }, { - ExternalMount: "/var/nginx/conf", - InternalMount: "/etc/nginx", - Mode: "ro", - }}, - EnvVars: map[string]string{ - "ENV_VAR": "config:config-var", - "OTHER_VAR": "some value", - }, - }) -} - -func (s *MetaSuite) TestWorkloadApplyEmpty(c *gc.C) { - workload := &charm.Workload{} - var overrides []charm.WorkloadFieldValue - additions := []charm.WorkloadFieldValue{{ - Field: "type-options", - Subfield: "publish_all", - Value: "NO", - }, { - Field: "description", - Value: "my workload", - }, { - Field: "image", - Value: "nginx/nginx", - }, { - Field: "command", - Value: "foocmd", - }, { - Field: "ports", - Value: "80:8080", - }, { - Field: "ports", - Value: "443:8081", - }, { - Field: "volumes", - Value: "/var/www/html:/usr/share/nginx/html:ro", - }, { - Field: "volumes", - Value: "/var/nginx/conf:/etc/nginx:ro", - }, { - Field: "env", - Subfield: "ENV_VAR", - Value: "config:config-var", - }, { - Field: "env", - Subfield: "OTHER_VAR", - Value: "some value", - }} - applied, err := workload.Apply(overrides, additions) - c.Assert(err, jc.ErrorIsNil) - - c.Check(applied, jc.DeepEquals, &charm.Workload{ - Description: "my workload", - TypeOptions: map[string]string{ - "publish_all": "NO", - }, - Command: "foocmd", - Image: "nginx/nginx", - Ports: []charm.WorkloadPort{{ - External: 80, - Internal: 8080, - }, { - External: 443, - Internal: 8081, - }}, - Volumes: []charm.WorkloadVolume{{ - ExternalMount: "/var/www/html", - InternalMount: "/usr/share/nginx/html", - Mode: "ro", - }, { - ExternalMount: "/var/nginx/conf", - InternalMount: "/etc/nginx", - Mode: "ro", - }}, - EnvVars: map[string]string{ - "ENV_VAR": "config:config-var", - "OTHER_VAR": "some value", - }, - }) -} - -func (s *MetaSuite) TestWorkloadApplyMinimal(c *gc.C) { - workload := &charm.Workload{ - Name: "workload0", - Type: "docker", - Image: "nginx/nginx", - } - overrides := []charm.WorkloadFieldValue{{ - Field: "image", - Value: "nginx/nginx-2", - }} - additions := []charm.WorkloadFieldValue{{ - Field: "description", - Value: "my workload", - }} - applied, err := workload.Apply(overrides, additions) - c.Assert(err, jc.ErrorIsNil) - - c.Check(applied, jc.DeepEquals, &charm.Workload{ - Name: "workload0", - Description: "my workload", - Type: "docker", - Image: "nginx/nginx-2", - }) -} - -func (s *MetaSuite) TestWorkloadApplyNoChange(c *gc.C) { - workload := &charm.Workload{ - Name: "a workload", - Type: "docker", - Description: "my workload", - TypeOptions: map[string]string{ - "publish_all": "NO", - }, - Command: "foocmd", - Image: "nginx/nginx", - Ports: []charm.WorkloadPort{{ - External: 80, - Internal: 8080, - }, { - External: 443, - Internal: 8081, - }}, - Volumes: []charm.WorkloadVolume{{ - ExternalMount: "/var/www/html", - InternalMount: "/usr/share/nginx/html", - Mode: "ro", - }, { - ExternalMount: "/var/nginx/conf", - InternalMount: "/etc/nginx", - Mode: "ro", - }}, - EnvVars: map[string]string{ - "ENV_VAR": "config:config-var", - "OTHER_VAR": "some value", - }, - } - var overrides, additions []charm.WorkloadFieldValue - applied, err := workload.Apply(overrides, additions) - c.Assert(err, jc.ErrorIsNil) - - c.Check(applied, jc.DeepEquals, &charm.Workload{ - Name: "a workload", - Type: "docker", - Description: "my workload", - TypeOptions: map[string]string{ - "publish_all": "NO", - }, - Command: "foocmd", - Image: "nginx/nginx", - Ports: []charm.WorkloadPort{{ - External: 80, - Internal: 8080, - }, { - External: 443, - Internal: 8081, - }}, - Volumes: []charm.WorkloadVolume{{ - ExternalMount: "/var/www/html", - InternalMount: "/usr/share/nginx/html", - Mode: "ro", - }, { - ExternalMount: "/var/nginx/conf", - InternalMount: "/etc/nginx", - Mode: "ro", - }}, - EnvVars: map[string]string{ - "ENV_VAR": "config:config-var", - "OTHER_VAR": "some value", - }, - }) -} - -type workloadTest struct { - desc string - field string - subfield string - value string - err string -} - -func (t workloadTest) log(c *gc.C, i int) { - c.Logf("test %d: %s", i, t.desc) -} - -func (t workloadTest) changes() []charm.WorkloadFieldValue { - return []charm.WorkloadFieldValue{{ - Field: t.field, - Subfield: t.subfield, - Value: t.value, - }} -} - -func (s *MetaSuite) TestWorkloadApplyBadOverride(c *gc.C) { - tests := []workloadTest{{ - desc: "unknown field", - field: "spam", - err: "unrecognized field.*", - }, { - desc: "name", - field: "name", - err: "cannot override.*", - }, { - desc: "type", - field: "type", - err: "cannot override.*", - }, { - desc: "simple field not set", - field: "description", - err: "cannot override.*, not set", - }, { - desc: "map missing subfield", - field: "env", - err: "cannot override.* without sub-field", - }, { - desc: "map field not set", - field: "env", - subfield: "ENV_VAR", - err: "cannot override.* field.*, not set", - }, { - desc: "list missing subfield", - field: "ports", - err: "cannot override.* without sub-field", - }, { - desc: "list bad index", - field: "ports", - subfield: "spam", - err: ".* sub-field must be an integer index", - }, { - desc: "list index out of range", - field: "ports", - subfield: "1", - err: ".* index 1 out of range", - }} - - workload := &charm.Workload{ - Name: "a workload", - Type: "docker", - } - - for i, t := range tests { - t.log(c, i) - var additions []charm.WorkloadFieldValue - overrides := t.changes() - _, err := workload.Apply(overrides, additions) - c.Assert(err, gc.NotNil) - - c.Check(err, gc.ErrorMatches, t.err) - } -} - -func (s *MetaSuite) TestWorkloadApplyBadAddition(c *gc.C) { - tests := []workloadTest{{ - desc: "unknown field", - field: "spam", - err: "unrecognized field.*", - }, { - desc: "name", - field: "name", - err: ".* already set", - }, { - desc: "type", - field: "type", - err: ".* already set", - }, { - desc: "simple field already set", - field: "description", - err: ".* already set", - }, { - desc: "map missing subfield", - field: "env", - err: "cannot extend.* without sub-field", - }, { - desc: "map field already set", - field: "env", - subfield: "ENV_VAR", - err: ".* field.* already set", - }, { - desc: "list unexpected subfield", - field: "ports", - subfield: "10", - err: "cannot extend.* with sub-field", - }} - - workload := &charm.Workload{ - Name: "a workload", - Type: "docker", - Description: "my workload", - EnvVars: map[string]string{ - "ENV_VAR": "yes", - }, - Ports: []charm.WorkloadPort{{ - External: 80, - Internal: 8080, - }}, - } - - for i, t := range tests { - t.log(c, i) - var overrides []charm.WorkloadFieldValue - additions := t.changes() - _, err := workload.Apply(overrides, additions) - c.Assert(err, gc.NotNil) - - c.Check(err, gc.ErrorMatches, t.err) - } -} - -func (s *MetaSuite) TestWorkloadNameRequired(c *gc.C) { - workload := charm.Workload{} - c.Assert(workload.Validate(), gc.ErrorMatches, "missing name") -} - -func (s *MetaSuite) TestWorkloads(c *gc.C) { - // "type" is the only required attribute for storage. - workloads, err := charm.ReadWorkloads(strings.NewReader(` -workloads: - workload0: - description: a workload - type: docker - type-options: - publish_all: true - command: foocmd - image: nginx/nginx - ports: - - 80:8080 - - 443:8081 - volumes: - - /var/www/html:/usr/share/nginx/html:ro - - /var/nginx/conf:/etc/nginx:ro - env: - ENV_VAR: config:config-var - OTHER_VAR: some value - workload1: - type: rkt -`), nil, nil) - c.Assert(err, jc.ErrorIsNil) - c.Assert(workloads, gc.DeepEquals, map[string]charm.Workload{ - "workload0": { - Name: "workload0", - Description: "a workload", - Type: "docker", - TypeOptions: map[string]string{ - "publish_all": "true", - }, - Command: "foocmd", - Image: "nginx/nginx", - Ports: []charm.WorkloadPort{{ - External: 80, - Internal: 8080, - }, { - External: 443, - Internal: 8081, - }}, - Volumes: []charm.WorkloadVolume{{ - ExternalMount: "/var/www/html", - InternalMount: "/usr/share/nginx/html", - Mode: "ro", - }, { - ExternalMount: "/var/nginx/conf", - InternalMount: "/etc/nginx", - Mode: "ro", - }}, - EnvVars: map[string]string{ - "ENV_VAR": "config:config-var", - "OTHER_VAR": "some value", - }, - }, - "workload1": { - Name: "workload1", - Type: "rkt", - }, - }) -} - -func (s *MetaSuite) TestWorkloadsNotRequired(c *gc.C) { - noWorkload := strings.NewReader(` -name: a -summary: b -description: c -`) - _, err := charm.ReadWorkloads(noWorkload, nil, nil) - c.Assert(err, jc.ErrorIsNil) -} - -func (s *MetaSuite) TestWorkloadsTypeRequired(c *gc.C) { - badMeta := strings.NewReader(` -name: a -summary: b -description: c -`) - meta, err := charm.ReadMeta(badMeta) - c.Assert(err, jc.ErrorIsNil) - - badWorkload := strings.NewReader(` -workloads: - badworkload: - -`) - _, err = charm.ReadWorkloads(badWorkload, meta.Provides, meta.Storage) - c.Assert(err, gc.ErrorMatches, "workloads: workloads.badworkload: expected map, got nothing") -} - -func (s *MetaSuite) TestWorkloadsTypeNameRequired(c *gc.C) { - badWorkload := strings.NewReader(` -workloads: - badworkload: - foo: bar -`) - _, err := charm.ReadWorkloads(badWorkload, nil, nil) - c.Assert(err, gc.ErrorMatches, "workloads: workloads.badworkload.type: expected string, got nothing") -} - -func (s *MetaSuite) TestWorkloadsPortEndpointFound(c *gc.C) { - portMeta := strings.NewReader(` -name: a -summary: b -description: c -provides: - website: - interface: http -`) - meta, err := charm.ReadMeta(portMeta) - c.Assert(err, jc.ErrorIsNil) - - portWorkload := strings.NewReader(` -workloads: - endpointworkload: - type: docker - ports: - - :8080 - - 443:8081 -`) - workloads, err := charm.ReadWorkloads(portWorkload, meta.Provides, meta.Storage) - c.Assert(err, jc.ErrorIsNil) - - c.Check(workloads["endpointworkload"].Ports[0].External, gc.Equals, 0) - c.Check(workloads["endpointworkload"].Ports[0].Internal, gc.Equals, 8080) - c.Check(workloads["endpointworkload"].Ports[0].Endpoint, gc.Equals, "website") - c.Check(workloads["endpointworkload"].Ports[1].External, gc.Equals, 443) - c.Check(workloads["endpointworkload"].Ports[1].Internal, gc.Equals, 8081) - c.Check(workloads["endpointworkload"].Ports[1].Endpoint, gc.Equals, "") -} - -func (s *MetaSuite) TestWorkloadsPortEndpointNotFound(c *gc.C) { - endpointMeta := strings.NewReader(` -name: a -summary: b -description: c -provides: - mysql: - interface: db -`) - endpointWorkloads := strings.NewReader(` -workloads: - endpointworkload: - type: docker - ports: - - :8080 - - 443:8081 -`) - meta, err := charm.ReadMeta(endpointMeta) - c.Assert(err, jc.ErrorIsNil) - - _, err = charm.ReadWorkloads(endpointWorkloads, meta.Provides, meta.Storage) - c.Assert(err, gc.ErrorMatches, `.* specified endpoint "website" unknown for .*`) -} - -func (s *MetaSuite) TestWorkloadsStorageFound(c *gc.C) { - storageMeta := strings.NewReader(` -name: a -summary: b -description: c -storage: - store0: - type: filesystem - location: /var/lib/things -`) - storageWorkload := strings.NewReader(` -workloads: - storageworkload: - type: docker - volumes: - - :/var/www/html:ro -`) - meta, err := charm.ReadMeta(storageMeta) - c.Assert(err, jc.ErrorIsNil) - workloads, err := charm.ReadWorkloads(storageWorkload, meta.Provides, meta.Storage) - c.Assert(err, jc.ErrorIsNil) - - c.Check(workloads["storageworkload"].Volumes[0].ExternalMount, gc.Equals, "/var/lib/things") - c.Check(workloads["storageworkload"].Volumes[0].Name, gc.Equals, "store0") -} - -func (s *MetaSuite) TestWorkloadsStorageNotFound(c *gc.C) { - storageMeta := strings.NewReader(` -name: a -summary: b -description: c -storage: - store0: - type: filesystem - location: /var/lib/things -`) - storageWorkloads := strings.NewReader(` -workloads: - badworkload: - type: docker - volumes: - - :/var/www/html:ro -`) - meta, err := charm.ReadMeta(storageMeta) - c.Assert(err, jc.ErrorIsNil) - - _, err = charm.ReadWorkloads(storageWorkloads, meta.Provides, meta.Storage) - c.Assert(err, gc.ErrorMatches, "workloads: workloads.badworkload.volumes: specified storage \"store1\" unknown for .*") -} - -func (s *MetaSuite) TestWorkloadsStorageNotFilesystem(c *gc.C) { - storageMeta := strings.NewReader(` -name: a -summary: b -description: c -storage: - store0: - type: block -`) - storageWorkloads := strings.NewReader(` -workloads: - badworkload: - type: docker - volumes: - - :/var/www/html:ro -`) - meta, err := charm.ReadMeta(storageMeta) - c.Assert(err, jc.ErrorIsNil) - - _, err = charm.ReadWorkloads(storageWorkloads, meta.Provides, meta.Storage) - c.Assert(err, gc.ErrorMatches, "workloads: workloads.badworkload.volumes: linked storage \"store0\" must be filesystem for .*") -} - -func (s *MetaSuite) TestWorkloadsStorageMissingLocation(c *gc.C) { - storageMeta := strings.NewReader(` -name: a -summary: b -description: c -storage: - store0: - type: filesystem -`) - storageWorkloads := strings.NewReader(` -workloads: - badworkload: - type: docker - volumes: - - :/var/www/html:ro -`) - meta, err := charm.ReadMeta(storageMeta) - c.Assert(err, jc.ErrorIsNil) - - _, err = charm.ReadWorkloads(storageWorkloads, meta.Provides, meta.Storage) - c.Assert(err, gc.ErrorMatches, "workloads: workloads.badworkload.volumes: linked storage \"store0\" missing location for .*") -} === added directory 'src/gopkg.in/juju/charm.v6-unstable' === added file 'src/gopkg.in/juju/charm.v6-unstable/.gitignore' --- src/gopkg.in/juju/charm.v6-unstable/.gitignore 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/.gitignore 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +tags +TAGS +.emacs* +*.test +*.sw[nop] === added file 'src/gopkg.in/juju/charm.v6-unstable/.reviewboardrc' --- src/gopkg.in/juju/charm.v6-unstable/.reviewboardrc 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/.reviewboardrc 2016-03-22 15:18:22 +0000 @@ -0,0 +1,4 @@ +REVIEWBOARD_URL = "https://reviews.vapour.ws/" +REPOSITORY = "juju-charm" +BRANCH = "master" +TRACKING_BRANCH = "origin/master" === added file 'src/gopkg.in/juju/charm.v6-unstable/HACKING.md' --- src/gopkg.in/juju/charm.v6-unstable/HACKING.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/HACKING.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +# HACKING + +See README for information about gopkg.in + +## Developing + +If you are to develop on a versioned branch, use gopkg.in. + + go get -u -v -t gopkg.in/juju/charm.v2/... + +gopkg.in names the local branch master. To submit a pull request, push to +your github branch using a refspec which reflects the version tag you are using. + + git push git@github.com:jrwren/charm +master:v2 === added file 'src/gopkg.in/juju/charm.v6-unstable/LICENCE' --- src/gopkg.in/juju/charm.v6-unstable/LICENCE 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/LICENCE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,191 @@ +All files in this repository are licensed as follows. If you contribute +to this repository, it is assumed that you license your contribution +under the same license unless you state otherwise. + +All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. === added file 'src/gopkg.in/juju/charm.v6-unstable/README.md' --- src/gopkg.in/juju/charm.v6-unstable/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +Juju charms +=========== + +This package parses juju charms. + +## Versions + +Stable versions of this API are available on gopkg.in at +gopkg.in/juju/charm.vD where D is a version spec. If you are viewing this +readme on github.com you can click the 'branch:' button above to view tags +and branches. See http://labix.org/gopkg.in for more information. === added file 'src/gopkg.in/juju/charm.v6-unstable/actions.go' --- src/gopkg.in/juju/charm.v6-unstable/actions.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/actions.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,279 @@ +// Copyright 2011-2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm + +import ( + "fmt" + "io" + "io/ioutil" + "regexp" + "strings" + + "github.com/juju/errors" + gjs "github.com/juju/gojsonschema" + "gopkg.in/yaml.v1" +) + +var prohibitedSchemaKeys = map[string]bool{"$ref": true, "$schema": true} + +var actionNameRule = regexp.MustCompile("^[a-z](?:[a-z-]*[a-z])?$") + +// Actions defines the available actions for the charm. Additional params +// may be added as metadata at a future time (e.g. version.) +type Actions struct { + ActionSpecs map[string]ActionSpec `yaml:"actions,omitempty" bson:",omitempty"` +} + +// Build this out further if it becomes necessary. +func NewActions() *Actions { + return &Actions{} +} + +// ActionSpec is a definition of the parameters and traits of an Action. +// The Params map is expected to conform to JSON-Schema Draft 4 as defined at +// http://json-schema.org/draft-04/schema# (see http://json-schema.org/latest/json-schema-core.html) +type ActionSpec struct { + Description string + Params map[string]interface{} +} + +// ValidateParams validates the passed params map against the given ActionSpec +// and returns any error encountered. +// Usage: +// err := ch.Actions().ActionSpecs["snapshot"].ValidateParams(someMap) +func (spec *ActionSpec) ValidateParams(params map[string]interface{}) error { + // Load the schema from the Charm. + specLoader := gjs.NewGoLoader(spec.Params) + schema, err := gjs.NewSchema(specLoader) + if err != nil { + return err + } + + // Load the params as a document to validate. + // If an empty map was passed, we need an empty map to validate against. + p := map[string]interface{}{} + if len(params) > 0 { + p = params + } + docLoader := gjs.NewGoLoader(p) + results, err := schema.Validate(docLoader) + if err != nil { + return err + } + if results.Valid() { + return nil + } + + // Handle any errors generated by the Validate(). + var errorStrings []string + for _, validationError := range results.Errors() { + errorStrings = append(errorStrings, validationError.String()) + } + return errors.Errorf("validation failed: %s", strings.Join(errorStrings, "; ")) +} + +// InsertDefaults inserts the schema's default values in target using +// github.com/juju/gojsonschema. If a nil target is received, an empty map +// will be created as the target. The target is then mutated to include the +// defaults. +// +// The returned map will be the transformed or created target map. +func (spec *ActionSpec) InsertDefaults(target map[string]interface{}) (map[string]interface{}, error) { + specLoader := gjs.NewGoLoader(spec.Params) + schema, err := gjs.NewSchema(specLoader) + if err != nil { + return target, err + } + + return schema.InsertDefaults(target) +} + +// ReadActions builds an Actions spec from a charm's actions.yaml. +func ReadActionsYaml(r io.Reader) (*Actions, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + result := &Actions{ + ActionSpecs: map[string]ActionSpec{}, + } + + var unmarshaledActions map[string]map[string]interface{} + if err := yaml.Unmarshal(data, &unmarshaledActions); err != nil { + return nil, err + } + + for name, actionSpec := range unmarshaledActions { + if valid := actionNameRule.MatchString(name); !valid { + return nil, fmt.Errorf("bad action name %s", name) + } + + desc := "No description" + thisActionSchema := map[string]interface{}{ + "description": desc, + "type": "object", + "title": name, + "properties": map[string]interface{}{}, + } + + for key, value := range actionSpec { + switch key { + case "description": + // These fields must be strings. + typed, ok := value.(string) + if !ok { + return nil, errors.Errorf("value for schema key %q must be a string", key) + } + thisActionSchema[key] = typed + desc = typed + case "title": + // These fields must be strings. + typed, ok := value.(string) + if !ok { + return nil, errors.Errorf("value for schema key %q must be a string", key) + } + thisActionSchema[key] = typed + case "required": + typed, ok := value.([]interface{}) + if !ok { + return nil, errors.Errorf("value for schema key %q must be a YAML list", key) + } + thisActionSchema[key] = typed + case "params": + // Clean any map[interface{}]interface{}s out so they don't + // cause problems with BSON serialization later. + cleansedParams, err := cleanse(value) + if err != nil { + return nil, err + } + + // JSON-Schema must be a map + typed, ok := cleansedParams.(map[string]interface{}) + if !ok { + return nil, errors.New("params failed to parse as a map") + } + thisActionSchema["properties"] = typed + default: + // In case this has nested maps, we must clean them out. + typed, err := cleanse(value) + if err != nil { + return nil, err + } + thisActionSchema[key] = typed + } + } + + // Make sure the new Params doc conforms to JSON-Schema + // Draft 4 (http://json-schema.org/latest/json-schema-core.html) + schemaLoader := gjs.NewGoLoader(thisActionSchema) + _, err := gjs.NewSchema(schemaLoader) + if err != nil { + return nil, errors.Annotatef(err, "invalid params schema for action schema %s", name) + } + + // Now assign the resulting schema to the final entry for the result. + result.ActionSpecs[name] = ActionSpec{ + Description: desc, + Params: thisActionSchema, + } + } + return result, nil +} + +// cleanse rejects schemas containing references or maps keyed with non- +// strings, and coerces acceptable maps to contain only maps with string keys. +func cleanse(input interface{}) (interface{}, error) { + switch typedInput := input.(type) { + + // In this case, recurse in. + case map[string]interface{}: + newMap := make(map[string]interface{}) + for key, value := range typedInput { + + if prohibitedSchemaKeys[key] { + return nil, fmt.Errorf("schema key %q not compatible with this version of juju", key) + } + + newValue, err := cleanse(value) + if err != nil { + return nil, err + } + newMap[key] = newValue + } + return newMap, nil + + // Coerce keys to strings and error out if there's a problem; then recurse. + case map[interface{}]interface{}: + newMap := make(map[string]interface{}) + for key, value := range typedInput { + typedKey, ok := key.(string) + if !ok { + return nil, errors.New("map keyed with non-string value") + } + newMap[typedKey] = value + } + return cleanse(newMap) + + // Recurse + case []interface{}: + newSlice := make([]interface{}, 0) + for _, sliceValue := range typedInput { + newSliceValue, err := cleanse(sliceValue) + if err != nil { + return nil, errors.New("map keyed with non-string value") + } + newSlice = append(newSlice, newSliceValue) + } + return newSlice, nil + + // Other kinds of values are OK. + default: + return input, nil + } +} + +// recurseMapOnKeys returns the value of a map keyed recursively by the +// strings given in "keys". Thus, recurseMapOnKeys({a,b}, {a:{b:{c:d}}}) +// would return {c:d}. +func recurseMapOnKeys(keys []string, params map[string]interface{}) (interface{}, bool) { + key, rest := keys[0], keys[1:] + answer, ok := params[key] + + // If we're out of keys, we have our answer. + if len(rest) == 0 { + return answer, ok + } + + // If we're not out of keys, but we tried a key that wasn't in the + // map, there's no answer. + if !ok { + return nil, false + } + + switch typed := answer.(type) { + // If our value is a map[s]i{}, we can keep recursing. + case map[string]interface{}: + return recurseMapOnKeys(keys[1:], typed) + // If it's a map[i{}]i{}, we need to check whether it's a map[s]i{}. + case map[interface{}]interface{}: + m := make(map[string]interface{}) + for k, v := range typed { + if tK, ok := k.(string); ok { + m[tK] = v + } else { + // If it's not, we don't have something we + // can work with. + return nil, false + } + } + // If it is, recurse into it. + return recurseMapOnKeys(keys[1:], m) + + // Otherwise, we're trying to recurse into something we don't know + // how to deal with, so our answer is that we don't have an answer. + default: + return nil, false + } +} === added file 'src/gopkg.in/juju/charm.v6-unstable/actions_test.go' --- src/gopkg.in/juju/charm.v6-unstable/actions_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/actions_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,864 @@ +// Copyright 2011-2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm + +import ( + "bytes" + "encoding/json" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" +) + +type ActionsSuite struct{} + +var _ = gc.Suite(&ActionsSuite{}) + +func (s *ActionsSuite) TestNewActions(c *gc.C) { + emptyAction := NewActions() + c.Assert(emptyAction, jc.DeepEquals, &Actions{}) +} + +func (s *ActionsSuite) TestValidateOk(c *gc.C) { + for i, test := range []struct { + description string + actionSpec *ActionSpec + objectToValidate map[string]interface{} + }{{ + description: "Validation of an empty object is ok.", + actionSpec: &ActionSpec{ + Description: "Take a snapshot of the database.", + Params: map[string]interface{}{ + "title": "snapshot", + "description": "Take a snapshot of the database.", + "type": "object", + "properties": map[string]interface{}{ + "outfile": map[string]interface{}{ + "description": "The file to write out to.", + "type": "string"}}}}, + objectToValidate: nil, + }, { + description: "Validation of one required value.", + actionSpec: &ActionSpec{ + Description: "Take a snapshot of the database.", + Params: map[string]interface{}{ + "title": "snapshot", + "description": "Take a snapshot of the database.", + "type": "object", + "properties": map[string]interface{}{ + "outfile": map[string]interface{}{ + "description": "The file to write out to.", + "type": "string"}}, + "required": []interface{}{"outfile"}}}, + objectToValidate: map[string]interface{}{ + "outfile": "out-2014-06-12.bz2", + }, + }, { + description: "Validation of one required and one optional value.", + actionSpec: &ActionSpec{ + Description: "Take a snapshot of the database.", + Params: map[string]interface{}{ + "title": "snapshot", + "description": "Take a snapshot of the database.", + "type": "object", + "properties": map[string]interface{}{ + "outfile": map[string]interface{}{ + "description": "The file to write out to.", + "type": "string"}, + "quality": map[string]interface{}{ + "description": "Compression quality", + "type": "integer", + "minimum": 0, + "maximum": 9}}, + "required": []interface{}{"outfile"}}}, + objectToValidate: map[string]interface{}{ + "outfile": "out-2014-06-12.bz2", + }, + }, { + description: "Validation of an optional, range limited value.", + actionSpec: &ActionSpec{ + Description: "Take a snapshot of the database.", + Params: map[string]interface{}{ + "title": "snapshot", + "description": "Take a snapshot of the database.", + "type": "object", + "properties": map[string]interface{}{ + "outfile": map[string]interface{}{ + "description": "The file to write out to.", + "type": "string"}, + "quality": map[string]interface{}{ + "description": "Compression quality", + "type": "integer", + "minimum": 0, + "maximum": 9}}, + "required": []interface{}{"outfile"}}}, + objectToValidate: map[string]interface{}{ + "outfile": "out-2014-06-12.bz2", + "quality": 5, + }, + }} { + c.Logf("test %d: %s", i, test.description) + err := test.actionSpec.ValidateParams(test.objectToValidate) + c.Assert(err, jc.ErrorIsNil) + } +} + +func (s *ActionsSuite) TestValidateFail(c *gc.C) { + var validActionTests = []struct { + description string + actionSpec *ActionSpec + badActionJson string + expectedError string + }{{ + description: "Validation of one required value.", + actionSpec: &ActionSpec{ + Description: "Take a snapshot of the database.", + Params: map[string]interface{}{ + "title": "snapshot", + "description": "Take a snapshot of the database.", + "type": "object", + "properties": map[string]interface{}{ + "outfile": map[string]interface{}{ + "description": "The file to write out to.", + "type": "string"}}, + "required": []interface{}{"outfile"}}}, + badActionJson: `{"outfile": 5}`, + expectedError: "validation failed: (root).outfile : must be of type string, given 5", + }, { + description: "Restrict to only one property", + actionSpec: &ActionSpec{ + Description: "Take a snapshot of the database.", + Params: map[string]interface{}{ + "title": "snapshot", + "description": "Take a snapshot of the database.", + "type": "object", + "properties": map[string]interface{}{ + "outfile": map[string]interface{}{ + "description": "The file to write out to.", + "type": "string"}}, + "required": []interface{}{"outfile"}, + "additionalProperties": false}}, + badActionJson: `{"outfile": "foo.bz", "bar": "foo"}`, + expectedError: "validation failed: (root) : additional property \"bar\" is not allowed, given {\"bar\":\"foo\",\"outfile\":\"foo.bz\"}", + }, { + description: "Validation of one required and one optional value.", + actionSpec: &ActionSpec{ + Description: "Take a snapshot of the database.", + Params: map[string]interface{}{ + "title": "snapshot", + "description": "Take a snapshot of the database.", + "type": "object", + "properties": map[string]interface{}{ + "outfile": map[string]interface{}{ + "description": "The file to write out to.", + "type": "string"}, + "quality": map[string]interface{}{ + "description": "Compression quality", + "type": "integer", + "minimum": 0, + "maximum": 9}}, + "required": []interface{}{"outfile"}}}, + badActionJson: `{"quality": 5}`, + expectedError: "validation failed: (root) : \"outfile\" property is missing and required, given {\"quality\":5}", + }, { + description: "Validation of an optional, range limited value.", + actionSpec: &ActionSpec{ + Description: "Take a snapshot of the database.", + Params: map[string]interface{}{ + "title": "snapshot", + "description": "Take a snapshot of the database.", + "type": "object", + "properties": map[string]interface{}{ + "outfile": map[string]interface{}{ + "description": "The file to write out to.", + "type": "string"}, + "quality": map[string]interface{}{ + "description": "Compression quality", + "type": "integer", + "minimum": 0, + "maximum": 9}}, + "required": []interface{}{"outfile"}}}, + badActionJson: ` +{ "outfile": "out-2014-06-12.bz2", "quality": "two" }`, + expectedError: "validation failed: (root).quality : must be of type integer, given \"two\"", + }} + + for i, test := range validActionTests { + c.Logf("test %d: %s", i, test.description) + var params map[string]interface{} + jsonBytes := []byte(test.badActionJson) + err := json.Unmarshal(jsonBytes, ¶ms) + c.Assert(err, gc.IsNil) + err = test.actionSpec.ValidateParams(params) + c.Assert(err.Error(), gc.Equals, test.expectedError) + } +} + +func (s *ActionsSuite) TestCleanseOk(c *gc.C) { + + var goodInterfaceTests = []struct { + description string + acceptableInterface map[string]interface{} + expectedInterface map[string]interface{} + }{{ + description: "An interface requiring no changes.", + acceptableInterface: map[string]interface{}{ + "key1": "value1", + "key2": "value2", + "key3": map[string]interface{}{ + "foo1": "val1", + "foo2": "val2"}}, + expectedInterface: map[string]interface{}{ + "key1": "value1", + "key2": "value2", + "key3": map[string]interface{}{ + "foo1": "val1", + "foo2": "val2"}}, + }, { + description: "Substitute a single inner map[i]i.", + acceptableInterface: map[string]interface{}{ + "key1": "value1", + "key2": "value2", + "key3": map[interface{}]interface{}{ + "foo1": "val1", + "foo2": "val2"}}, + expectedInterface: map[string]interface{}{ + "key1": "value1", + "key2": "value2", + "key3": map[string]interface{}{ + "foo1": "val1", + "foo2": "val2"}}, + }, { + description: "Substitute nested inner map[i]i.", + acceptableInterface: map[string]interface{}{ + "key1a": "val1a", + "key2a": "val2a", + "key3a": map[interface{}]interface{}{ + "key1b": "val1b", + "key2b": map[interface{}]interface{}{ + "key1c": "val1c"}}}, + expectedInterface: map[string]interface{}{ + "key1a": "val1a", + "key2a": "val2a", + "key3a": map[string]interface{}{ + "key1b": "val1b", + "key2b": map[string]interface{}{ + "key1c": "val1c"}}}, + }, { + description: "Substitute nested map[i]i within []i.", + acceptableInterface: map[string]interface{}{ + "key1a": "val1a", + "key2a": []interface{}{5, "foo", map[string]interface{}{ + "key1b": "val1b", + "key2b": map[interface{}]interface{}{ + "key1c": "val1c"}}}}, + expectedInterface: map[string]interface{}{ + "key1a": "val1a", + "key2a": []interface{}{5, "foo", map[string]interface{}{ + "key1b": "val1b", + "key2b": map[string]interface{}{ + "key1c": "val1c"}}}}, + }} + + for i, test := range goodInterfaceTests { + c.Logf("test %d: %s", i, test.description) + cleansedInterfaceMap, err := cleanse(test.acceptableInterface) + c.Assert(err, gc.IsNil) + c.Assert(cleansedInterfaceMap, jc.DeepEquals, test.expectedInterface) + } +} + +func (s *ActionsSuite) TestCleanseFail(c *gc.C) { + + var badInterfaceTests = []struct { + description string + failInterface map[string]interface{} + expectedError string + }{{ + description: "An inner map[interface{}]interface{} with an int key.", + failInterface: map[string]interface{}{ + "key1": "value1", + "key2": "value2", + "key3": map[interface{}]interface{}{ + "foo1": "val1", + 5: "val2"}}, + expectedError: "map keyed with non-string value", + }, { + description: "An inner []interface{} containing a map[i]i with an int key.", + failInterface: map[string]interface{}{ + "key1a": "val1b", + "key2a": "val2b", + "key3a": []interface{}{"foo1", 5, map[interface{}]interface{}{ + "key1b": "val1b", + "key2b": map[interface{}]interface{}{ + "key1c": "val1c", + 5: "val2c"}}}}, + expectedError: "map keyed with non-string value", + }} + + for i, test := range badInterfaceTests { + c.Logf("test %d: %s", i, test.description) + _, err := cleanse(test.failInterface) + c.Assert(err, gc.NotNil) + c.Assert(err.Error(), gc.Equals, test.expectedError) + } +} + +func (s *ActionsSuite) TestReadGoodActionsYaml(c *gc.C) { + var goodActionsYamlTests = []struct { + description string + yaml string + expectedActions *Actions + }{{ + description: "A simple snapshot actions YAML with one parameter.", + yaml: ` +snapshot: + description: Take a snapshot of the database. + params: + outfile: + description: "The file to write out to." + type: string + required: ["outfile"] +`, + expectedActions: &Actions{map[string]ActionSpec{ + "snapshot": { + Description: "Take a snapshot of the database.", + Params: map[string]interface{}{ + "title": "snapshot", + "description": "Take a snapshot of the database.", + "type": "object", + "properties": map[string]interface{}{ + "outfile": map[string]interface{}{ + "description": "The file to write out to.", + "type": "string"}}, + "required": []interface{}{"outfile"}}}}}, + }, { + description: "An empty Actions definition.", + yaml: "", + expectedActions: &Actions{ + ActionSpecs: map[string]ActionSpec{}, + }, + }, { + description: "A more complex schema with hyphenated names and multiple parameters.", + yaml: ` +snapshot: + description: "Take a snapshot of the database." + params: + outfile: + description: "The file to write out to." + type: "string" + compression-quality: + description: "The compression quality." + type: "integer" + minimum: 0 + maximum: 9 + exclusiveMaximum: false +remote-sync: + description: "Sync a file to a remote host." + params: + file: + description: "The file to send out." + type: "string" + format: "uri" + remote-uri: + description: "The host to sync to." + type: "string" + format: "uri" + util: + description: "The util to perform the sync (rsync or scp.)" + type: "string" + enum: ["rsync", "scp"] + required: ["file", "remote-uri"] +`, + expectedActions: &Actions{map[string]ActionSpec{ + "snapshot": { + Description: "Take a snapshot of the database.", + Params: map[string]interface{}{ + "title": "snapshot", + "description": "Take a snapshot of the database.", + "type": "object", + "properties": map[string]interface{}{ + "outfile": map[string]interface{}{ + "description": "The file to write out to.", + "type": "string"}, + "compression-quality": map[string]interface{}{ + "description": "The compression quality.", + "type": "integer", + "minimum": 0, + "maximum": 9, + "exclusiveMaximum": false}}}}, + "remote-sync": { + Description: "Sync a file to a remote host.", + Params: map[string]interface{}{ + "title": "remote-sync", + "description": "Sync a file to a remote host.", + "type": "object", + "properties": map[string]interface{}{ + "file": map[string]interface{}{ + "description": "The file to send out.", + "type": "string", + "format": "uri"}, + "remote-uri": map[string]interface{}{ + "description": "The host to sync to.", + "type": "string", + "format": "uri"}, + "util": map[string]interface{}{ + "description": "The util to perform the sync (rsync or scp.)", + "type": "string", + "enum": []interface{}{"rsync", "scp"}}}, + "required": []interface{}{"file", "remote-uri"}}}}}, + }, { + description: "A schema with other keys, e.g. \"definitions\"", + yaml: ` +snapshot: + description: "Take a snapshot of the database." + params: + outfile: + description: "The file to write out to." + type: "string" + compression-quality: + description: "The compression quality." + type: "integer" + minimum: 0 + maximum: 9 + exclusiveMaximum: false + definitions: + diskdevice: {} + something-else: {} +`, + expectedActions: &Actions{map[string]ActionSpec{ + "snapshot": { + Description: "Take a snapshot of the database.", + Params: map[string]interface{}{ + "title": "snapshot", + "description": "Take a snapshot of the database.", + "type": "object", + "properties": map[string]interface{}{ + "outfile": map[string]interface{}{ + "description": "The file to write out to.", + "type": "string", + }, + "compression-quality": map[string]interface{}{ + "description": "The compression quality.", + "type": "integer", + "minimum": 0, + "maximum": 9, + "exclusiveMaximum": false, + }, + }, + "definitions": map[string]interface{}{ + "diskdevice": map[string]interface{}{}, + "something-else": map[string]interface{}{}, + }, + }, + }, + }}, + }, { + description: "A schema with no \"params\" key, implying no options.", + yaml: ` +snapshot: + description: Take a snapshot of the database. +`, + + expectedActions: &Actions{map[string]ActionSpec{ + "snapshot": { + Description: "Take a snapshot of the database.", + Params: map[string]interface{}{ + "description": "Take a snapshot of the database.", + "title": "snapshot", + "type": "object", + "properties": map[string]interface{}{}, + }}}}, + }, { + description: "A schema with no values at all, implying no options.", + yaml: ` +snapshot: +`, + + expectedActions: &Actions{map[string]ActionSpec{ + "snapshot": { + Description: "No description", + Params: map[string]interface{}{ + "description": "No description", + "title": "snapshot", + "type": "object", + "properties": map[string]interface{}{}, + }}}}, + }} + + // Beginning of testing loop + for i, test := range goodActionsYamlTests { + c.Logf("test %d: %s", i, test.description) + reader := bytes.NewReader([]byte(test.yaml)) + loadedAction, err := ReadActionsYaml(reader) + c.Assert(err, gc.IsNil) + c.Check(loadedAction, jc.DeepEquals, test.expectedActions) + } +} + +func (s *ActionsSuite) TestReadBadActionsYaml(c *gc.C) { + + var badActionsYamlTests = []struct { + description string + yaml string + expectedError string + }{{ + description: "Reject JSON-Schema containing references.", + yaml: ` +snapshot: + description: Take a snapshot of the database. + params: + $schema: "http://json-schema.org/draft-03/schema#" +`, + expectedError: "schema key \"$schema\" not compatible with this version of juju", + }, { + description: "Reject JSON-Schema containing references.", + yaml: ` +snapshot: + description: Take a snapshot of the database. + params: + outfile: { $ref: "http://json-schema.org/draft-03/schema#" } +`, + expectedError: "schema key \"$ref\" not compatible with this version of juju", + }, { + description: "Malformed YAML: missing key in \"outfile\".", + yaml: ` +snapshot: + description: Take a snapshot of the database. + params: + outfile: + The file to write out to. + type: string + default: foo.bz2 +`, + + expectedError: "YAML error: line 6: mapping values are not allowed in this context", + }, { + description: "Malformed JSON-Schema: $schema element misplaced.", + yaml: ` +snapshot: +description: Take a snapshot of the database. + params: + outfile: + $schema: http://json-schema.org/draft-03/schema# + description: The file to write out to. + type: string + default: foo.bz2 +`, + + expectedError: "YAML error: line 3: mapping values are not allowed in this context", + }, { + description: "Malformed Actions: hyphen at beginning of action name.", + yaml: ` +-snapshot: + description: Take a snapshot of the database. +`, + + expectedError: "bad action name -snapshot", + }, { + description: "Malformed Actions: hyphen after action name.", + yaml: ` +snapshot-: + description: Take a snapshot of the database. +`, + + expectedError: "bad action name snapshot-", + }, { + description: "Malformed Actions: caps in action name.", + yaml: ` +Snapshot: + description: Take a snapshot of the database. +`, + + expectedError: "bad action name Snapshot", + }, { + description: "A non-string description fails to parse", + yaml: ` +snapshot: + description: ["Take a snapshot of the database."] +`, + expectedError: "value for schema key \"description\" must be a string", + }, { + description: "A non-list \"required\" key", + yaml: ` +snapshot: + description: Take a snapshot of the database. + params: + outfile: + description: "The file to write out to." + type: string + required: "outfile" +`, + expectedError: "value for schema key \"required\" must be a YAML list", + }, { + description: "A schema with an empty \"params\" key fails to parse", + yaml: ` +snapshot: + description: Take a snapshot of the database. + params: +`, + expectedError: "params failed to parse as a map", + }, { + description: "A schema with a non-map \"params\" value fails to parse", + yaml: ` +snapshot: + description: Take a snapshot of the database. + params: ["a", "b"] +`, + expectedError: "params failed to parse as a map", + }, { + description: "\"definitions\" goes against JSON-Schema definition", + yaml: ` +snapshot: + description: "Take a snapshot of the database." + params: + outfile: + description: "The file to write out to." + type: "string" + definitions: + diskdevice: ["a"] + something-else: {"a": "b"} +`, + expectedError: "invalid params schema for action schema snapshot: definitions must be of type array of schemas", + }, { + description: "excess keys not in the JSON-Schema spec will be rejected", + yaml: ` +snapshot: + description: "Take a snapshot of the database." + params: + outfile: + description: "The file to write out to." + type: "string" + compression-quality: + description: "The compression quality." + type: "integer" + minimum: 0 + maximum: 9 + exclusiveMaximum: false + definitions: + diskdevice: {} + something-else: {} + other-key: ["some", "values"], +`, + expectedError: "YAML error: line 16: did not find expected key", + }} + + for i, test := range badActionsYamlTests { + c.Logf("test %d: %s", i, test.description) + reader := bytes.NewReader([]byte(test.yaml)) + _, err := ReadActionsYaml(reader) + c.Assert(err, gc.NotNil) + c.Check(err.Error(), gc.Equals, test.expectedError) + } +} + +func (s *ActionsSuite) TestRecurseMapOnKeys(c *gc.C) { + tests := []struct { + should string + givenKeys []string + givenMap map[string]interface{} + expected interface{} + shouldFail bool + }{{ + should: "fail if the specified key was not in the map", + givenKeys: []string{"key", "key2"}, + givenMap: map[string]interface{}{ + "key": map[string]interface{}{ + "key": "value", + }, + }, + shouldFail: true, + }, { + should: "fail if a key was not a string", + givenKeys: []string{"key", "key2"}, + givenMap: map[string]interface{}{ + "key": map[interface{}]interface{}{ + 5: "value", + }, + }, + shouldFail: true, + }, { + should: "fail if we have more keys but not a recursable val", + givenKeys: []string{"key", "key2"}, + givenMap: map[string]interface{}{ + "key": []string{"a", "b", "c"}, + }, + shouldFail: true, + }, { + should: "retrieve a good value", + givenKeys: []string{"key", "key2"}, + givenMap: map[string]interface{}{ + "key": map[string]interface{}{ + "key2": "value", + }, + }, + expected: "value", + }, { + should: "retrieve a map", + givenKeys: []string{"key"}, + givenMap: map[string]interface{}{ + "key": map[string]interface{}{ + "key": "value", + }, + }, + expected: map[string]interface{}{ + "key": "value", + }, + }, { + should: "retrieve a slice", + givenKeys: []string{"key"}, + givenMap: map[string]interface{}{ + "key": []string{"a", "b", "c"}, + }, + expected: []string{"a", "b", "c"}, + }} + + for i, t := range tests { + c.Logf("test %d: should %s\n map: %#v\n keys: %#v", i, t.should, t.givenMap, t.givenKeys) + obtained, failed := recurseMapOnKeys(t.givenKeys, t.givenMap) + c.Assert(!failed, gc.Equals, t.shouldFail) + if !t.shouldFail { + c.Check(obtained, jc.DeepEquals, t.expected) + } + } +} + +func (s *ActionsSuite) TestInsertDefaultValues(c *gc.C) { + schemas := map[string]string{ + "simple": ` +act: + params: + val: + type: string + default: somestr +`[1:], + "complicated": ` +act: + params: + val: + type: object + properties: + foo: + type: string + bar: + type: object + properties: + baz: + type: string + default: boz +`[1:], + "default-object": ` +act: + params: + val: + type: object + default: + foo: bar + bar: + baz: woz +`[1:], + "none": ` +act: + params: + val: + type: object + properties: + var: + type: object + properties: + x: + type: string +`[1:]} + + for i, t := range []struct { + should string + schema string + withParams map[string]interface{} + expectedResult map[string]interface{} + expectedError string + }{{ + should: "error with no schema", + expectedError: "schema must be of type object", + }, { + should: "create a map if handed nil", + schema: schemas["none"], + withParams: nil, + expectedResult: map[string]interface{}{}, + }, { + should: "create and fill target if handed nil", + schema: schemas["simple"], + withParams: nil, + expectedResult: map[string]interface{}{"val": "somestr"}, + }, { + should: "create a simple default value", + schema: schemas["simple"], + withParams: map[string]interface{}{}, + expectedResult: map[string]interface{}{"val": "somestr"}, + }, { + should: "do nothing for no default value", + schema: schemas["none"], + withParams: map[string]interface{}{}, + expectedResult: map[string]interface{}{}, + }, { + should: "insert a default value within a nested map", + schema: schemas["complicated"], + withParams: map[string]interface{}{}, + expectedResult: map[string]interface{}{ + "val": map[string]interface{}{ + "bar": map[string]interface{}{ + "baz": "boz", + }}}, + }, { + should: "create a default value which is an object", + schema: schemas["default-object"], + withParams: map[string]interface{}{}, + expectedResult: map[string]interface{}{ + "val": map[string]interface{}{ + "foo": "bar", + "bar": map[string]interface{}{ + "baz": "woz", + }}}, + }, { + should: "not overwrite existing values with default objects", + schema: schemas["default-object"], + withParams: map[string]interface{}{"val": 5}, + expectedResult: map[string]interface{}{"val": 5}, + }, { + should: "interleave defaults into existing objects", + schema: schemas["complicated"], + withParams: map[string]interface{}{ + "val": map[string]interface{}{ + "foo": "bar", + "bar": map[string]interface{}{ + "faz": "foz", + }}}, + expectedResult: map[string]interface{}{ + "val": map[string]interface{}{ + "foo": "bar", + "bar": map[string]interface{}{ + "baz": "boz", + "faz": "foz", + }}}, + }} { + c.Logf("test %d: should %s", i, t.should) + schema := getSchemaForAction(c, t.schema) + // Testing this method + result, err := schema.InsertDefaults(t.withParams) + if t.expectedError != "" { + c.Check(err, gc.ErrorMatches, t.expectedError) + continue + } + c.Assert(err, jc.ErrorIsNil) + c.Check(result, jc.DeepEquals, t.expectedResult) + } +} + +func getSchemaForAction(c *gc.C, wholeSchema string) ActionSpec { + // Load up the YAML schema definition. + reader := bytes.NewReader([]byte(wholeSchema)) + loadedActions, err := ReadActionsYaml(reader) + c.Assert(err, gc.IsNil) + // Same action name for all tests, "act". + return loadedActions.ActionSpecs["act"] +} === added file 'src/gopkg.in/juju/charm.v6-unstable/bundle.go' --- src/gopkg.in/juju/charm.v6-unstable/bundle.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/bundle.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,29 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm + +import "os" + +// The Bundle interface is implemented by any type that +// may be handled as a bundle. It encapsulates all +// the data of a bundle. +type Bundle interface { + // Data returns the contents of the bundle's bundle.yaml file. + Data() *BundleData + // Data returns the contents of the bundle's README.md file. + ReadMe() string +} + +// ReadBundle reads a Bundle from path, which can point to either a +// bundle archive or a bundle directory. +func ReadBundle(path string) (Bundle, error) { + info, err := os.Stat(path) + if err != nil { + return nil, err + } + if info.IsDir() { + return ReadBundleDir(path) + } + return ReadBundleArchive(path) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/bundle_test.go' --- src/gopkg.in/juju/charm.v6-unstable/bundle_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/bundle_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,74 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm_test + +import ( + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charm.v6-unstable" +) + +var _ = gc.Suite(&BundleSuite{}) + +type BundleSuite struct { + testing.IsolationSuite +} + +func (*BundleSuite) TestReadBundleDir(c *gc.C) { + path := bundleDirPath(c, "wordpress-simple") + b, err := charm.ReadBundle(path) + c.Assert(err, gc.IsNil) + c.Assert(b, gc.FitsTypeOf, (*charm.BundleDir)(nil)) + checkWordpressBundle(c, b, path) +} + +func (*BundleSuite) TestReadBundleArchive(c *gc.C) { + path := bundleDirPath(c, "wordpress-simple") + b, err := charm.ReadBundle(path) + c.Assert(err, gc.IsNil) + c.Assert(b, gc.FitsTypeOf, (*charm.BundleDir)(nil)) + checkWordpressBundle(c, b, path) +} + +func checkWordpressBundle(c *gc.C, b charm.Bundle, path string) { + // Load the charms required by the bundle. + wordpressCharm := readCharmDir(c, "wordpress") + mysqlCharm := readCharmDir(c, "mysql") + + bd := b.Data() + c.Assert(bd.RequiredCharms(), jc.DeepEquals, []string{"mysql", "wordpress"}) + + charms := map[string]charm.Charm{ + "wordpress": wordpressCharm, + "mysql": mysqlCharm, + } + err := bd.VerifyWithCharms(verifyOk, nil, charms) + c.Assert(err, gc.IsNil) + + c.Assert(bd.Services, jc.DeepEquals, map[string]*charm.ServiceSpec{ + "wordpress": { + Charm: "wordpress", + }, + "mysql": { + Charm: "mysql", + NumUnits: 1, + }, + }) + c.Assert(bd.Relations, jc.DeepEquals, [][]string{ + {"wordpress:db", "mysql:server"}, + }) + c.Assert(b.ReadMe(), gc.Equals, "A dummy bundle\n") + switch b := b.(type) { + case *charm.BundleArchive: + c.Assert(b.Path, gc.Equals, path) + case *charm.BundleDir: + c.Assert(b.Path, gc.Equals, path) + } +} + +func verifyOk(string) error { + return nil +} === added file 'src/gopkg.in/juju/charm.v6-unstable/bundlearchive.go' --- src/gopkg.in/juju/charm.v6-unstable/bundlearchive.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/bundlearchive.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,99 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm + +import ( + "bytes" + "io" + "io/ioutil" + + ziputil "github.com/juju/utils/zip" +) + +type BundleArchive struct { + zopen zipOpener + + Path string + data *BundleData + readMe string +} + +// ReadBundleArchive reads a bundle archive from the given file path. +func ReadBundleArchive(path string) (*BundleArchive, error) { + a, err := readBundleArchive(newZipOpenerFromPath(path)) + if err != nil { + return nil, err + } + a.Path = path + return a, nil +} + +// ReadBundleArchiveBytes reads a bundle archive from the given byte +// slice. +func ReadBundleArchiveBytes(data []byte) (*BundleArchive, error) { + zopener := newZipOpenerFromReader(bytes.NewReader(data), int64(len(data))) + return readBundleArchive(zopener) +} + +// ReadBundleArchiveFromReader returns a BundleArchive that uses +// r to read the bundle. The given size must hold the number +// of available bytes in the file. +// +// Note that the caller is responsible for closing r - methods on +// the returned BundleArchive may fail after that. +func ReadBundleArchiveFromReader(r io.ReaderAt, size int64) (*BundleArchive, error) { + return readBundleArchive(newZipOpenerFromReader(r, size)) +} + +func readBundleArchive(zopen zipOpener) (*BundleArchive, error) { + a := &BundleArchive{ + zopen: zopen, + } + zipr, err := zopen.openZip() + if err != nil { + return nil, err + } + defer zipr.Close() + reader, err := zipOpenFile(zipr, "bundle.yaml") + if err != nil { + return nil, err + } + a.data, err = ReadBundleData(reader) + reader.Close() + if err != nil { + return nil, err + } + reader, err = zipOpenFile(zipr, "README.md") + if err != nil { + return nil, err + } + readMe, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + a.readMe = string(readMe) + return a, nil +} + +// Data implements Bundle.Data. +func (a *BundleArchive) Data() *BundleData { + return a.data +} + +// ReadMe implements Bundle.ReadMe. +func (a *BundleArchive) ReadMe() string { + return a.readMe +} + +// ExpandTo expands the bundle archive into dir, creating it if necessary. +// If any errors occur during the expansion procedure, the process will +// abort. +func (a *BundleArchive) ExpandTo(dir string) error { + zipr, err := a.zopen.openZip() + if err != nil { + return err + } + defer zipr.Close() + return ziputil.ExtractAll(zipr.Reader, dir) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/bundlearchive_test.go' --- src/gopkg.in/juju/charm.v6-unstable/bundlearchive_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/bundlearchive_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,98 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm_test + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charm.v6-unstable" +) + +var _ = gc.Suite(&BundleArchiveSuite{}) + +type BundleArchiveSuite struct { + archivePath string +} + +func (s *BundleArchiveSuite) SetUpSuite(c *gc.C) { + s.archivePath = archivePath(c, readBundleDir(c, "wordpress-simple")) +} + +func (s *BundleArchiveSuite) TestReadBundleArchive(c *gc.C) { + archive, err := charm.ReadBundleArchive(s.archivePath) + c.Assert(err, gc.IsNil) + checkWordpressBundle(c, archive, s.archivePath) +} + +func (s *BundleArchiveSuite) TestReadBundleArchiveBytes(c *gc.C) { + data, err := ioutil.ReadFile(s.archivePath) + c.Assert(err, gc.IsNil) + + archive, err := charm.ReadBundleArchiveBytes(data) + c.Assert(err, gc.IsNil) + checkWordpressBundle(c, archive, "") +} + +func (s *BundleArchiveSuite) TestReadBundleArchiveFromReader(c *gc.C) { + f, err := os.Open(s.archivePath) + c.Assert(err, gc.IsNil) + defer f.Close() + info, err := f.Stat() + c.Assert(err, gc.IsNil) + + archive, err := charm.ReadBundleArchiveFromReader(f, info.Size()) + c.Assert(err, gc.IsNil) + checkWordpressBundle(c, archive, "") +} + +func (s *BundleArchiveSuite) TestReadBundleArchiveWithoutBundleYAML(c *gc.C) { + testReadBundleArchiveWithoutFile(c, "bundle.yaml") +} + +func (s *BundleArchiveSuite) TestReadBundleArchiveWithoutREADME(c *gc.C) { + testReadBundleArchiveWithoutFile(c, "README.md") +} + +func testReadBundleArchiveWithoutFile(c *gc.C, fileToRemove string) { + path := cloneDir(c, bundleDirPath(c, "wordpress-simple")) + dir, err := charm.ReadBundleDir(path) + c.Assert(err, gc.IsNil) + + // Remove the file from the bundle directory. + // ArchiveTo just zips the contents of the directory as-is, + // so the resulting bundle archive not contain the + // file. + err = os.Remove(filepath.Join(dir.Path, fileToRemove)) + c.Assert(err, gc.IsNil) + + archivePath := filepath.Join(c.MkDir(), "out.bundle") + dstf, err := os.Create(archivePath) + c.Assert(err, gc.IsNil) + + err = dir.ArchiveTo(dstf) + dstf.Close() + + archive, err := charm.ReadBundleArchive(archivePath) + // Slightly dubious assumption: the quoted file name has no + // regexp metacharacters worth worrying about. + c.Assert(err, gc.ErrorMatches, fmt.Sprintf("archive file %q not found", fileToRemove)) + c.Assert(archive, gc.IsNil) +} + +func (s *BundleArchiveSuite) TestExpandTo(c *gc.C) { + dir := c.MkDir() + archive, err := charm.ReadBundleArchive(s.archivePath) + c.Assert(err, gc.IsNil) + err = archive.ExpandTo(dir) + c.Assert(err, gc.IsNil) + bdir, err := charm.ReadBundleDir(dir) + c.Assert(err, gc.IsNil) + c.Assert(bdir.ReadMe(), gc.Equals, archive.ReadMe()) + c.Assert(bdir.Data(), gc.DeepEquals, archive.Data()) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/bundledata.go' --- src/gopkg.in/juju/charm.v6-unstable/bundledata.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/bundledata.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,829 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm + +import ( + "fmt" + "io" + "io/ioutil" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/juju/names" + "gopkg.in/yaml.v1" +) + +// BundleData holds the contents of the bundle. +type BundleData struct { + // Services holds one entry for each service + // that the bundle will create, indexed by + // the service name. + Services map[string]*ServiceSpec + + // Machines holds one entry for each machine referred to + // by unit placements. These will be mapped onto actual + // machines at bundle deployment time. + // It is an error if a machine is specified but + // not referred to by a unit placement directive. + Machines map[string]*MachineSpec `bson:",omitempty" json:",omitempty" yaml:",omitempty"` + + // Series holds the default series to use when + // the bundle chooses charms. + Series string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` + + // Relations holds a slice of 2-element slices, + // each specifying a relation between two services. + // Each two-element slice holds two endpoints, + // each specified as either colon-separated + // (service, relation) pair or just a service name. + // The relation is made between each. If the relation + // name is omitted, it will be inferred from the available + // relations defined in the services' charms. + Relations [][]string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` + + // White listed set of tags to categorize bundles as we do charms. + Tags []string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` + + // Short paragraph explaining what the bundle is useful for. + Description string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` +} + +// MachineSpec represents a notional machine that will be mapped +// onto an actual machine at bundle deployment time. +type MachineSpec struct { + Constraints string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` + Annotations map[string]string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` + Series string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` +} + +// ServiceSpec represents a single service that will +// be deployed as part of the bundle. +type ServiceSpec struct { + // Charm holds the charm URL of the charm to + // use for the given service. + Charm string + + // NumUnits holds the number of units of the + // service that will be deployed. + // + // For a subordinate service, this actually represents + // an arbitrary number of units depending on + // the service it is related to. + NumUnits int `yaml:"num_units,omitempty" json:",omitempty"` + + // To may hold up to NumUnits members with + // each member specifying a desired placement + // for the respective unit of the service. + // + // In regular-expression-like notation, each + // element matches the following pattern: + // + // (:)?(||new) + // + // If containertype is specified, the unit is deployed + // into a new container of that type, otherwise + // it will be "hulk-smashed" into the specified location, + // by co-locating it with any other units that happen to + // be there, which may result in unintended behavior. + // + // The second part (after the colon) specifies where + // the new unit should be placed - it may refer to + // a unit of another service specified in the bundle, + // a machine id specified in the machines section, + // or the special name "new" which specifies a newly + // created machine. + // + // A unit placement may be specified with a service name only, + // in which case its unit number is assumed to + // be one more than the unit number of the previous + // unit in the list with the same service, or zero + // if there were none. + // + // If there are less elements in To than NumUnits, + // the last element is replicated to fill it. If there + // are no elements (or To is omitted), "new" is replicated. + // + // For example: + // + // wordpress/0 wordpress/1 lxc:0 kvm:new + // + // specifies that the first two units get hulk-smashed + // onto the first two units of the wordpress service, + // the third unit gets allocated onto an lxc container + // on machine 0, and subsequent units get allocated + // on kvm containers on new machines. + // + // The above example is the same as this: + // + // wordpress wordpress lxc:0 kvm:new + To []string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` + + // Expose holds whether the service must be exposed. + Expose bool `bson:",omitempty" json:",omitempty" yaml:",omitempty"` + + // Options holds the configuration values + // to apply to the new service. They should + // be compatible with the charm configuration. + Options map[string]interface{} `bson:",omitempty" json:",omitempty" yaml:",omitempty"` + + // Annotations holds any annotations to apply to the + // service when deployed. + Annotations map[string]string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` + + // Constraints holds the default constraints to apply + // when creating new machines for units of the service. + // This is ignored for units with explicit placement directives. + Constraints string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` + + // Storage holds the constraints for storage to assign + // to units of the service. + Storage map[string]string `bson:",omitempty" json:",omitempty" yaml:",omitempty"` + + // EndpointBindings maps how endpoints are bound to spaces + EndpointBindings map[string]string `bson:"bindings,omitempty" json:"bindings,omitempty" yaml:"bindings,omitempty"` +} + +// ReadBundleData reads bundle data from the given reader. +// The returned data is not verified - call Verify to ensure +// that it is OK. +func ReadBundleData(r io.Reader) (*BundleData, error) { + bytes, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + var bd BundleData + if err := yaml.Unmarshal(bytes, &bd); err != nil { + return nil, fmt.Errorf("cannot unmarshal bundle data: %v", err) + } + return &bd, nil +} + +// VerificationError holds an error generated by BundleData.Verify, +// holding all the verification errors found when verifying. +type VerificationError struct { + Errors []error +} + +func (err *VerificationError) Error() string { + switch len(err.Errors) { + case 0: + return "no verification errors!" + case 1: + return err.Errors[0].Error() + } + return fmt.Sprintf("%s (and %d more errors)", err.Errors[0], len(err.Errors)-1) +} + +type bundleDataVerifier struct { + bd *BundleData + + // machines holds the reference counts of all machines + // as referred to by placement directives. + machineRefCounts map[string]int + + charms map[string]Charm + + errors []error + verifyConstraints func(c string) error + verifyStorage func(s string) error +} + +func (verifier *bundleDataVerifier) addErrorf(f string, a ...interface{}) { + verifier.addError(fmt.Errorf(f, a...)) +} + +func (verifier *bundleDataVerifier) addError(err error) { + verifier.errors = append(verifier.errors, err) +} + +func (verifier *bundleDataVerifier) err() error { + if len(verifier.errors) > 0 { + return &VerificationError{verifier.errors} + } + return nil +} + +// RequiredCharms returns a sorted slice of all the charm URLs +// required by the bundle. +func (bd *BundleData) RequiredCharms() []string { + req := make([]string, 0, len(bd.Services)) + for _, svc := range bd.Services { + req = append(req, svc.Charm) + } + sort.Strings(req) + return req +} + +// Verify is a convenience method that calls VerifyWithCharms +// with a nil charms map. +func (bd *BundleData) Verify( + verifyConstraints func(c string) error, + verifyStorage func(s string) error, +) error { + return bd.VerifyWithCharms(verifyConstraints, verifyStorage, nil) +} + +// VerifyWithCharms verifies that the bundle is consistent. +// The verifyConstraints function is called to verify any constraints +// that are found. If verifyConstraints is nil, no checking +// of constraints will be done. Similarly, a non-nil verifyStorage +// function is called to verify any storage constraints. +// +// It verifies the following: +// +// - All defined machines are referred to by placement directives. +// - All services referred to by placement directives are specified in the bundle. +// - All services referred to by relations are specified in the bundle. +// - All basic constraints are valid. +// - All storage constraints are valid. +// +// If charms is not nil, it should hold a map with an entry for each +// charm url returned by bd.RequiredCharms. The verification will then +// also check that services are defined with valid charms, +// relations are correctly made and options are defined correctly. +// +// If the verification fails, Verify returns a *VerificationError describing +// all the problems found. +func (bd *BundleData) VerifyWithCharms( + verifyConstraints func(c string) error, + verifyStorage func(s string) error, + charms map[string]Charm, +) error { + if verifyConstraints == nil { + verifyConstraints = func(string) error { + return nil + } + } + if verifyStorage == nil { + verifyStorage = func(string) error { + return nil + } + } + verifier := &bundleDataVerifier{ + verifyConstraints: verifyConstraints, + verifyStorage: verifyStorage, + bd: bd, + machineRefCounts: make(map[string]int), + charms: charms, + } + for id := range bd.Machines { + verifier.machineRefCounts[id] = 0 + } + if bd.Series != "" && !IsValidSeries(bd.Series) { + verifier.addErrorf("bundle declares an invalid series %q", bd.Series) + } + verifier.verifyMachines() + verifier.verifyServices() + verifier.verifyRelations() + verifier.verifyOptions() + verifier.verifyEndpointBindings() + + for id, count := range verifier.machineRefCounts { + if count == 0 { + verifier.addErrorf("machine %q is not referred to by a placement directive", id) + } + } + return verifier.err() +} + +var ( + validMachineId = regexp.MustCompile("^" + names.NumberSnippet + "$") + validStorageName = regexp.MustCompile("^" + names.StorageNameSnippet + "$") +) + +func (verifier *bundleDataVerifier) verifyMachines() { + for id, m := range verifier.bd.Machines { + if !validMachineId.MatchString(id) { + verifier.addErrorf("invalid machine id %q found in machines", id) + } + if m == nil { + continue + } + if m.Constraints != "" { + if err := verifier.verifyConstraints(m.Constraints); err != nil { + verifier.addErrorf("invalid constraints %q in machine %q: %v", m.Constraints, id, err) + } + } + if m.Series != "" && !IsValidSeries(m.Series) { + verifier.addErrorf("invalid series %s for machine %q", m.Series, id) + } + } +} + +func (verifier *bundleDataVerifier) verifyServices() { + if len(verifier.bd.Services) == 0 { + verifier.addErrorf("at least one service must be specified") + return + } + for name, svc := range verifier.bd.Services { + if _, err := ParseURL(svc.Charm); err != nil { + verifier.addErrorf("invalid charm URL in service %q: %v", name, err) + } + if err := verifier.verifyConstraints(svc.Constraints); err != nil { + verifier.addErrorf("invalid constraints %q in service %q: %v", svc.Constraints, name, err) + } + for storageName, storageConstraints := range svc.Storage { + if !validStorageName.MatchString(storageName) { + verifier.addErrorf("invalid storage name %q in service %q", storageName, name) + } + if err := verifier.verifyStorage(storageConstraints); err != nil { + verifier.addErrorf("invalid storage %q in service %q: %v", storageName, name, err) + } + } + if verifier.charms != nil { + if ch, ok := verifier.charms[svc.Charm]; ok { + if ch.Meta().Subordinate { + if len(svc.To) > 0 { + verifier.addErrorf("service %q is subordinate but specifies unit placement", name) + } + if svc.NumUnits > 0 { + verifier.addErrorf("service %q is subordinate but has non-zero num_units", name) + } + } + } else { + verifier.addErrorf("service %q refers to non-existent charm %q", name, svc.Charm) + } + } + if svc.NumUnits < 0 { + verifier.addErrorf("negative number of units specified on service %q", name) + } else if len(svc.To) > svc.NumUnits { + verifier.addErrorf("too many units specified in unit placement for service %q", name) + } + verifier.verifyPlacement(svc.To) + } +} + +func (verifier *bundleDataVerifier) verifyPlacement(to []string) { + for _, p := range to { + up, err := ParsePlacement(p) + if err != nil { + verifier.addError(err) + continue + } + switch { + case up.Service != "": + spec, ok := verifier.bd.Services[up.Service] + if !ok { + verifier.addErrorf("placement %q refers to a service not defined in this bundle", p) + continue + } + if up.Unit >= 0 && up.Unit >= spec.NumUnits { + verifier.addErrorf("placement %q specifies a unit greater than the %d unit(s) started by the target service", p, spec.NumUnits) + } + case up.Machine == "new": + default: + _, ok := verifier.bd.Machines[up.Machine] + if !ok { + verifier.addErrorf("placement %q refers to a machine not defined in this bundle", p) + continue + } + verifier.machineRefCounts[up.Machine]++ + } + } +} + +func (verifier *bundleDataVerifier) getCharmMetaForService(svcName string) (*Meta, error) { + svc, ok := verifier.bd.Services[svcName] + if !ok { + return nil, fmt.Errorf("service %q not found", svcName) + } + ch, ok := verifier.charms[svc.Charm] + if !ok { + return nil, fmt.Errorf("charm %q from service %q not found", svc.Charm, svcName) + } + return ch.Meta(), nil +} + +func (verifier *bundleDataVerifier) verifyRelations() { + seen := make(map[[2]endpoint]bool) + for _, relPair := range verifier.bd.Relations { + if len(relPair) != 2 { + verifier.addErrorf("relation %q has %d endpoint(s), not 2", relPair, len(relPair)) + continue + } + var epPair [2]endpoint + relParseErr := false + for i, svcRel := range relPair { + ep, err := parseEndpoint(svcRel) + if err != nil { + verifier.addError(err) + relParseErr = true + continue + } + if _, ok := verifier.bd.Services[ep.service]; !ok { + verifier.addErrorf("relation %q refers to service %q not defined in this bundle", relPair, ep.service) + } + epPair[i] = ep + } + if relParseErr { + // We failed to parse at least one relation, so don't + // bother checking further. + continue + } + if epPair[0].service == epPair[1].service { + verifier.addErrorf("relation %q relates a service to itself", relPair) + } + // Resolve endpoint relations if necessary and we have + // the necessary charm information. + if (epPair[0].relation == "" || epPair[1].relation == "") && verifier.charms != nil { + iep0, iep1, err := inferEndpoints(epPair[0], epPair[1], verifier.getCharmMetaForService) + if err != nil { + verifier.addErrorf("cannot infer endpoint between %s and %s: %v", epPair[0], epPair[1], err) + } else { + // Change the endpoints that get recorded + // as seen, so we'll diagnose a duplicate + // relation even if one relation specifies + // the relations explicitly and the other does + // not. + epPair[0], epPair[1] = iep0, iep1 + } + } + + // Re-order pairs so that we diagnose duplicate relations + // whichever way they're specified. + if epPair[1].less(epPair[0]) { + epPair[1], epPair[0] = epPair[0], epPair[1] + } + if _, ok := seen[epPair]; ok { + verifier.addErrorf("relation %q is defined more than once", relPair) + } + if verifier.charms != nil && epPair[0].relation != "" && epPair[1].relation != "" { + // We have charms to verify against, and the + // endpoint has been fully specified or inferred. + verifier.verifyRelation(epPair[0], epPair[1]) + } + seen[epPair] = true + } +} + +func (verifier *bundleDataVerifier) verifyEndpointBindings() { + for name, svc := range verifier.bd.Services { + charm, ok := verifier.charms[name] + // Only thest the ok path here because the !ok path is tested in verifyServices + if !ok { + continue + } + for endpoint, space := range svc.EndpointBindings { + _, matchedProvides := charm.Meta().Provides[endpoint] + _, matchedRequires := charm.Meta().Requires[endpoint] + _, matchedPeers := charm.Meta().Peers[endpoint] + + if !(matchedProvides || matchedRequires || matchedPeers) { + verifier.addErrorf( + "service %q wants to bind endpoint %q to space %q, "+ + "but the endpoint is not defined by the charm", + name, endpoint, space) + } + } + + } +} + +var infoRelation = Relation{ + Name: "juju-info", + Role: RoleProvider, + Interface: "juju-info", + Scope: ScopeContainer, +} + +// verifyRelation verifies a single relation. +// It checks that both endpoints of the relation are +// defined, and that the relationship is correctly +// symmetrical (provider to requirer) and shares +// the same interface. +func (verifier *bundleDataVerifier) verifyRelation(ep0, ep1 endpoint) { + svc0 := verifier.bd.Services[ep0.service] + svc1 := verifier.bd.Services[ep1.service] + if svc0 == nil || svc1 == nil || svc0 == svc1 { + // An error will be produced by verifyRelations for this case. + return + } + charm0 := verifier.charms[svc0.Charm] + charm1 := verifier.charms[svc1.Charm] + if charm0 == nil || charm1 == nil { + // An error will be produced by verifyServices for this case. + return + } + relProv0, okProv0 := charm0.Meta().Provides[ep0.relation] + // The juju-info relation is provided implicitly by every + // charm - use it if required. + if !okProv0 && ep0.relation == infoRelation.Name { + relProv0, okProv0 = infoRelation, true + } + relReq0, okReq0 := charm0.Meta().Requires[ep0.relation] + if !okProv0 && !okReq0 { + verifier.addErrorf("charm %q used by service %q does not define relation %q", svc0.Charm, ep0.service, ep0.relation) + } + relProv1, okProv1 := charm1.Meta().Provides[ep1.relation] + // The juju-info relation is provided implicitly by every + // charm - use it if required. + if !okProv1 && ep1.relation == infoRelation.Name { + relProv1, okProv1 = infoRelation, true + } + relReq1, okReq1 := charm1.Meta().Requires[ep1.relation] + if !okProv1 && !okReq1 { + verifier.addErrorf("charm %q used by service %q does not define relation %q", svc1.Charm, ep1.service, ep1.relation) + } + + var relProv, relReq Relation + var epProv, epReq endpoint + switch { + case okProv0 && okReq1: + relProv, relReq = relProv0, relReq1 + epProv, epReq = ep0, ep1 + case okReq0 && okProv1: + relProv, relReq = relProv1, relReq0 + epProv, epReq = ep1, ep0 + case okProv0 && okProv1: + verifier.addErrorf("relation %q to %q relates provider to provider", ep0, ep1) + return + case okReq0 && okReq1: + verifier.addErrorf("relation %q to %q relates requirer to requirer", ep0, ep1) + return + default: + // Errors were added above. + return + } + if relProv.Interface != relReq.Interface { + verifier.addErrorf("mismatched interface between %q and %q (%q vs %q)", epProv, epReq, relProv.Interface, relReq.Interface) + } +} + +// verifyOptions verifies that the options are correctly defined +// with respect to the charm config options. +func (verifier *bundleDataVerifier) verifyOptions() { + if verifier.charms == nil { + return + } + for svcName, svc := range verifier.bd.Services { + charm := verifier.charms[svc.Charm] + if charm == nil { + // An error will be produced by verifyServices for this case. + continue + } + config := charm.Config() + for name, value := range svc.Options { + opt, ok := config.Options[name] + if !ok { + verifier.addErrorf("cannot validate service %q: configuration option %q not found in charm %q", svcName, name, svc.Charm) + continue + } + _, err := opt.validate(name, value) + if err != nil { + verifier.addErrorf("cannot validate service %q: %v", svcName, err) + } + } + } +} + +var validServiceRelation = regexp.MustCompile("^(" + names.ServiceSnippet + "):(" + names.RelationSnippet + ")$") + +type endpoint struct { + service string + relation string +} + +func (ep endpoint) String() string { + if ep.relation == "" { + return ep.service + } + return fmt.Sprintf("%s:%s", ep.service, ep.relation) +} + +func (ep1 endpoint) less(ep2 endpoint) bool { + if ep1.service == ep2.service { + return ep1.relation < ep2.relation + } + return ep1.service < ep2.service +} + +func parseEndpoint(ep string) (endpoint, error) { + m := validServiceRelation.FindStringSubmatch(ep) + if m != nil { + return endpoint{ + service: m[1], + relation: m[2], + }, nil + } + if !names.IsValidService(ep) { + return endpoint{}, fmt.Errorf("invalid relation syntax %q", ep) + } + return endpoint{ + service: ep, + }, nil +} + +// endpointInfo holds information about one endpoint of a relation. +type endpointInfo struct { + serviceName string + Relation +} + +// String returns the unique identifier of the relation endpoint. +func (ep endpointInfo) String() string { + return ep.serviceName + ":" + ep.Name +} + +// canRelateTo returns whether a relation may be established between ep +// and other. +func (ep endpointInfo) canRelateTo(other endpointInfo) bool { + return ep.serviceName != other.serviceName && + ep.Interface == other.Interface && + ep.Role != RolePeer && + counterpartRole(ep.Role) == other.Role +} + +// endpoint returns the endpoint specifier for ep. +func (ep endpointInfo) endpoint() endpoint { + return endpoint{ + service: ep.serviceName, + relation: ep.Name, + } +} + +// counterpartRole returns the RelationRole that the given RelationRole +// can relate to. +func counterpartRole(r RelationRole) RelationRole { + switch r { + case RoleProvider: + return RoleRequirer + case RoleRequirer: + return RoleProvider + case RolePeer: + return RolePeer + } + panic(fmt.Errorf("unknown relation role %q", r)) +} + +type UnitPlacement struct { + // ContainerType holds the container type of the new + // new unit, or empty if unspecified. + ContainerType string + + // Machine holds the numeric machine id, or "new", + // or empty if the placement specifies a service. + Machine string + + // Service holds the service name, or empty if + // the placement specifies a machine. + Service string + + // Unit holds the unit number of the service, or -1 + // if unspecified. + Unit int +} + +var snippetReplacer = strings.NewReplacer( + "container", names.ContainerTypeSnippet, + "number", names.NumberSnippet, + "service", names.ServiceSnippet, +) + +// validPlacement holds regexp that matches valid placement requests. To +// make the expression easier to comprehend and maintain, we replace +// symbolic snippet references in the regexp by their actual regexps +// using snippetReplacer. +var validPlacement = regexp.MustCompile( + snippetReplacer.Replace( + "^(?:(container):)?(?:(service)(?:/(number))?|(number))$", + ), +) + +// ParsePlacement parses a unit placement directive, as +// specified in the To clause of a service entry in the +// services section of a bundle. +func ParsePlacement(p string) (*UnitPlacement, error) { + m := validPlacement.FindStringSubmatch(p) + if m == nil { + return nil, fmt.Errorf("invalid placement syntax %q", p) + } + up := UnitPlacement{ + ContainerType: m[1], + Service: m[2], + Machine: m[4], + } + if unitStr := m[3]; unitStr != "" { + // We know that unitStr must be a valid integer because + // it's specified as such in the regexp. + up.Unit, _ = strconv.Atoi(unitStr) + } else { + up.Unit = -1 + } + if up.Service == "new" { + if up.Unit != -1 { + return nil, fmt.Errorf("invalid placement syntax %q", p) + } + up.Machine, up.Service = "new", "" + } + return &up, nil +} + +// inferEndpoints infers missing relation names from the given endpoint +// specifications, using the given get function to retrieve charm +// data if necessary. It returns the fully specified endpoints. +func inferEndpoints(epSpec0, epSpec1 endpoint, get func(svc string) (*Meta, error)) (endpoint, endpoint, error) { + if epSpec0.relation != "" && epSpec1.relation != "" { + // The endpoints are already specified explicitly so + // there is no need to fetch any charm data to infer + // them. + return epSpec0, epSpec1, nil + } + eps0, err := possibleEndpoints(epSpec0, get) + if err != nil { + return endpoint{}, endpoint{}, err + } + eps1, err := possibleEndpoints(epSpec1, get) + if err != nil { + return endpoint{}, endpoint{}, err + } + var candidates [][]endpointInfo + for _, ep0 := range eps0 { + for _, ep1 := range eps1 { + if ep0.canRelateTo(ep1) { + candidates = append(candidates, []endpointInfo{ep0, ep1}) + } + } + } + switch len(candidates) { + case 0: + return endpoint{}, endpoint{}, fmt.Errorf("no relations found") + case 1: + return candidates[0][0].endpoint(), candidates[0][1].endpoint(), nil + } + + // There's ambiguity; try discarding implicit relations. + filtered := discardImplicitRelations(candidates) + if len(filtered) == 1 { + return filtered[0][0].endpoint(), filtered[0][1].endpoint(), nil + } + // The ambiguity cannot be resolved, so return an error. + var keys []string + for _, cand := range candidates { + keys = append(keys, fmt.Sprintf("%q", relationKey(cand))) + } + sort.Strings(keys) + return endpoint{}, endpoint{}, fmt.Errorf("ambiguous relation: %s %s could refer to %s", + epSpec0, epSpec1, strings.Join(keys, "; ")) +} + +func discardImplicitRelations(candidates [][]endpointInfo) [][]endpointInfo { + var filtered [][]endpointInfo +outer: + for _, cand := range candidates { + for _, ep := range cand { + if ep.IsImplicit() { + continue outer + } + } + filtered = append(filtered, cand) + } + return filtered +} + +// relationKey returns a string describing the relation defined by +// endpoints, for use in various contexts (including error messages). +func relationKey(endpoints []endpointInfo) string { + var names []string + for _, ep := range endpoints { + names = append(names, ep.String()) + } + sort.Strings(names) + return strings.Join(names, " ") +} + +// possibleEndpoints returns all the endpoints that the given endpoint spec +// could refer to. +func possibleEndpoints(epSpec endpoint, get func(svc string) (*Meta, error)) ([]endpointInfo, error) { + meta, err := get(epSpec.service) + if err != nil { + return nil, err + } + + var eps []endpointInfo + add := func(r Relation) { + if epSpec.relation == "" || epSpec.relation == r.Name { + eps = append(eps, endpointInfo{ + serviceName: epSpec.service, + Relation: r, + }) + } + } + + for _, r := range meta.Provides { + add(r) + } + for _, r := range meta.Requires { + add(r) + } + // Every service implicitly provides a juju-info relation. + add(Relation{ + Name: "juju-info", + Role: RoleProvider, + Interface: "juju-info", + Scope: ScopeGlobal, + }) + return eps, nil +} === added file 'src/gopkg.in/juju/charm.v6-unstable/bundledata_test.go' --- src/gopkg.in/juju/charm.v6-unstable/bundledata_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/bundledata_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,930 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm_test + +import ( + "fmt" + "sort" + "strings" + + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charm.v6-unstable" +) + +type bundleDataSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&bundleDataSuite{}) + +const mediawikiBundle = ` +series: precise +services: + mediawiki: + charm: "cs:precise/mediawiki-10" + num_units: 1 + expose: true + options: + debug: false + name: Please set name of wiki + skin: vector + annotations: + "gui-x": 609 + "gui-y": -15 + storage: + valid-store: 10G + bindings: + db: db + website: public + mysql: + charm: "cs:precise/mysql-28" + num_units: 2 + to: [0, mediawiki/0] + options: + "binlog-format": MIXED + "block-size": 5 + "dataset-size": "80%" + flavor: distro + "ha-bindiface": eth0 + "ha-mcastport": 5411 + annotations: + "gui-x": 610 + "gui-y": 255 + constraints: "mem=8g" + bindings: + db: db +relations: + - ["mediawiki:db", "mysql:db"] + - ["mysql:foo", "mediawiki:bar"] +machines: + 0: + constraints: 'arch=amd64 mem=4g' + annotations: + foo: bar +tags: + - super + - awesome +description: | + Everything is awesome. Everything is cool when we work as a team. + Lovely day. +` + +var parseTests = []struct { + about string + data string + expectedBD *charm.BundleData + expectedErr string +}{{ + about: "mediawiki", + data: mediawikiBundle, + expectedBD: &charm.BundleData{ + Series: "precise", + Services: map[string]*charm.ServiceSpec{ + "mediawiki": { + Charm: "cs:precise/mediawiki-10", + NumUnits: 1, + Expose: true, + Options: map[string]interface{}{ + "debug": false, + "name": "Please set name of wiki", + "skin": "vector", + }, + Annotations: map[string]string{ + "gui-x": "609", + "gui-y": "-15", + }, + Storage: map[string]string{ + "valid-store": "10G", + }, + EndpointBindings: map[string]string{ + "db": "db", + "website": "public", + }, + }, + "mysql": { + Charm: "cs:precise/mysql-28", + NumUnits: 2, + To: []string{"0", "mediawiki/0"}, + Options: map[string]interface{}{ + "binlog-format": "MIXED", + "block-size": 5, + "dataset-size": "80%", + "flavor": "distro", + "ha-bindiface": "eth0", + "ha-mcastport": 5411, + }, + Annotations: map[string]string{ + "gui-x": "610", + "gui-y": "255", + }, + Constraints: "mem=8g", + EndpointBindings: map[string]string{ + "db": "db", + }, + }, + }, + Machines: map[string]*charm.MachineSpec{ + "0": { + Constraints: "arch=amd64 mem=4g", + Annotations: map[string]string{ + "foo": "bar", + }, + }, + }, + Relations: [][]string{ + {"mediawiki:db", "mysql:db"}, + {"mysql:foo", "mediawiki:bar"}, + }, + Tags: []string{"super", "awesome"}, + Description: `Everything is awesome. Everything is cool when we work as a team. +Lovely day. +`, + }, +}, { + about: "relations specified with hyphens", + data: ` +relations: + - - "mediawiki:db" + - "mysql:db" + - - "mysql:foo" + - "mediawiki:bar" +`, + expectedBD: &charm.BundleData{ + Relations: [][]string{ + {"mediawiki:db", "mysql:db"}, + {"mysql:foo", "mediawiki:bar"}, + }, + }, +}} + +func (*bundleDataSuite) TestParse(c *gc.C) { + for i, test := range parseTests { + c.Logf("test %d: %s", i, test.about) + bd, err := charm.ReadBundleData(strings.NewReader(test.data)) + if test.expectedErr != "" { + c.Assert(err, gc.ErrorMatches, test.expectedErr) + continue + } + c.Assert(err, gc.IsNil) + c.Assert(bd, jc.DeepEquals, test.expectedBD) + } +} + +var verifyErrorsTests = []struct { + about string + data string + errors []string +}{{ + about: "as many errors as possible", + data: ` +series: "9wrong" + +machines: + 0: + constraints: 'bad constraints' + annotations: + foo: bar + series: 'bad series' + bogus: + 3: +services: + mediawiki: + charm: "bogus:precise/mediawiki-10" + num_units: -4 + options: + debug: false + name: Please set name of wiki + skin: vector + annotations: + "gui-x": 609 + "gui-y": -15 + mysql: + charm: "cs:precise/mysql-28" + num_units: 2 + to: [0, mediawiki/0, nowhere/3, 2, "bad placement"] + options: + "binlog-format": MIXED + "block-size": 5 + "dataset-size": "80%" + flavor: distro + "ha-bindiface": eth0 + "ha-mcastport": 5411 + annotations: + "gui-x": 610 + "gui-y": 255 + constraints: "bad constraints" + wordpress: + charm: wordpress + ceph: + charm: ceph + storage: + valid-storage: 3,10G + no_underscores: 123 + ceph-osd: + charm: ceph-osd + storage: + invalid-storage: "bad storage constraints" +relations: + - ["mediawiki:db", "mysql:db"] + - ["mysql:foo", "mediawiki:bar"] + - ["arble:bar"] + - ["arble:bar", "mediawiki:db"] + - ["mysql:foo", "mysql:bar"] + - ["mysql:db", "mediawiki:db"] + - ["mediawiki/db", "mysql:db"] + - ["wordpress", "mysql"] +`, + errors: []string{ + `bundle declares an invalid series "9wrong"`, + `invalid storage name "no_underscores" in service "ceph"`, + `invalid storage "invalid-storage" in service "ceph-osd": bad storage constraint`, + `machine "3" is not referred to by a placement directive`, + `machine "bogus" is not referred to by a placement directive`, + `invalid machine id "bogus" found in machines`, + `invalid constraints "bad constraints" in machine "0": bad constraint`, + `invalid charm URL in service "mediawiki": charm or bundle URL has invalid schema: "bogus:precise/mediawiki-10"`, + `invalid constraints "bad constraints" in service "mysql": bad constraint`, + `negative number of units specified on service "mediawiki"`, + `too many units specified in unit placement for service "mysql"`, + `placement "nowhere/3" refers to a service not defined in this bundle`, + `placement "mediawiki/0" specifies a unit greater than the -4 unit(s) started by the target service`, + `placement "2" refers to a machine not defined in this bundle`, + `relation ["arble:bar"] has 1 endpoint(s), not 2`, + `relation ["arble:bar" "mediawiki:db"] refers to service "arble" not defined in this bundle`, + `relation ["mysql:foo" "mysql:bar"] relates a service to itself`, + `relation ["mysql:db" "mediawiki:db"] is defined more than once`, + `invalid placement syntax "bad placement"`, + `invalid relation syntax "mediawiki/db"`, + `invalid series bad series for machine "0"`, + }, +}, { + about: "mediawiki should be ok", + data: mediawikiBundle, +}} + +func (*bundleDataSuite) TestVerifyErrors(c *gc.C) { + for i, test := range verifyErrorsTests { + c.Logf("test %d: %s", i, test.about) + assertVerifyWithCharmsErrors(c, test.data, nil, test.errors) + } +} + +func assertVerifyWithCharmsErrors(c *gc.C, bundleData string, charms map[string]charm.Charm, expectErrors []string) { + bd, err := charm.ReadBundleData(strings.NewReader(bundleData)) + c.Assert(err, gc.IsNil) + + validateConstraints := func(c string) error { + if c == "bad constraints" { + return fmt.Errorf("bad constraint") + } + return nil + } + validateStorage := func(c string) error { + if c == "bad storage constraints" { + return fmt.Errorf("bad storage constraint") + } + return nil + } + + err = bd.VerifyWithCharms(validateConstraints, validateStorage, charms) + if len(expectErrors) == 0 { + if err == nil { + return + } + // Let the rest of the function deal with the + // error, so that we'll see the actual errors + // that resulted. + } + c.Assert(err, gc.FitsTypeOf, (*charm.VerificationError)(nil)) + errors := err.(*charm.VerificationError).Errors + errStrings := make([]string, len(errors)) + for i, err := range errors { + errStrings[i] = err.Error() + } + sort.Strings(errStrings) + sort.Strings(expectErrors) + c.Assert(errStrings, jc.DeepEquals, expectErrors) +} + +func (*bundleDataSuite) TestVerifyCharmURL(c *gc.C) { + bd, err := charm.ReadBundleData(strings.NewReader(mediawikiBundle)) + c.Assert(err, gc.IsNil) + for i, u := range []string{ + "wordpress", + "cs:wordpress", + "cs:precise/wordpress", + "precise/wordpress", + "precise/wordpress-2", + "local:foo", + "local:foo-45", + } { + c.Logf("test %d: %s", i, u) + bd.Services["mediawiki"].Charm = u + err := bd.Verify(nil, nil) + c.Assert(err, gc.IsNil, gc.Commentf("charm url %q", u)) + } +} + +func (*bundleDataSuite) TestVerifyBundleUsingJujuInfoRelation(c *gc.C) { + b := readBundleDir(c, "wordpress-with-logging") + bd := b.Data() + + charms := map[string]charm.Charm{ + "wordpress": readCharmDir(c, "wordpress"), + "mysql": readCharmDir(c, "mysql"), + "logging": readCharmDir(c, "logging"), + } + err := bd.VerifyWithCharms(nil, nil, charms) + c.Assert(err, gc.IsNil) +} + +func (*bundleDataSuite) TestVerifyBundleUsingJujuInfoRelationBindingFail(c *gc.C) { + b := readBundleDir(c, "wordpress-with-logging") + bd := b.Data() + + charms := map[string]charm.Charm{ + "wordpress": readCharmDir(c, "wordpress"), + "mysql": readCharmDir(c, "mysql"), + "logging": readCharmDir(c, "logging"), + } + bd.Services["wordpress"].EndpointBindings["foo"] = "bar" + err := bd.VerifyWithCharms(nil, nil, charms) + + c.Assert(err, gc.ErrorMatches, + "service \"wordpress\" wants to bind endpoint \"foo\" to space \"bar\", "+ + "but the endpoint is not defined by the charm") +} + +func (*bundleDataSuite) TestRequiredCharms(c *gc.C) { + bd, err := charm.ReadBundleData(strings.NewReader(mediawikiBundle)) + c.Assert(err, gc.IsNil) + reqCharms := bd.RequiredCharms() + + c.Assert(reqCharms, gc.DeepEquals, []string{"cs:precise/mediawiki-10", "cs:precise/mysql-28"}) +} + +// testCharm returns a charm with the given name +// and relations. The relations are specified as +// a string of the form: +// +// | +// +// Within each section, each white-space separated +// relation is specified as: +/// : +// +// So, for example: +// +// testCharm("wordpress", "web:http | db:mysql") +// +// is equivalent to a charm with metadata.yaml containing +// +// name: wordpress +// description: wordpress +// provides: +// web: +// interface: http +// requires: +// db: +// interface: mysql +// +// If the charm name has a "-sub" suffix, the +// returned charm will have Meta.Subordinate = true. +// +func testCharm(name string, relations string) charm.Charm { + var provides, requires string + parts := strings.Split(relations, "|") + provides = parts[0] + if len(parts) > 1 { + requires = parts[1] + } + meta := &charm.Meta{ + Name: name, + Summary: name, + Description: name, + Provides: parseRelations(provides, charm.RoleProvider), + Requires: parseRelations(requires, charm.RoleRequirer), + } + if strings.HasSuffix(name, "-sub") { + meta.Subordinate = true + } + configStr := ` +options: + title: {default: My Title, description: title, type: string} + skill-level: {description: skill, type: int} +` + config, err := charm.ReadConfig(strings.NewReader(configStr)) + if err != nil { + panic(err) + } + return testCharmImpl{ + meta: meta, + config: config, + } +} + +func parseRelations(s string, role charm.RelationRole) map[string]charm.Relation { + rels := make(map[string]charm.Relation) + for _, r := range strings.Fields(s) { + parts := strings.Split(r, ":") + if len(parts) != 2 { + panic(fmt.Errorf("invalid relation specifier %q", r)) + } + name, interf := parts[0], parts[1] + rels[name] = charm.Relation{ + Name: name, + Role: role, + Interface: interf, + Scope: charm.ScopeGlobal, + } + } + return rels +} + +type testCharmImpl struct { + meta *charm.Meta + config *charm.Config + // Implement charm.Charm, but panic if anything other than + // Meta or Config methods are called. + charm.Charm +} + +func (c testCharmImpl) Meta() *charm.Meta { + return c.meta +} + +func (c testCharmImpl) Config() *charm.Config { + return c.config +} + +var verifyWithCharmsErrorsTests = []struct { + about string + data string + charms map[string]charm.Charm + + errors []string +}{{ + about: "no charms", + data: mediawikiBundle, + charms: map[string]charm.Charm{}, + errors: []string{ + `service "mediawiki" refers to non-existent charm "cs:precise/mediawiki-10"`, + `service "mysql" refers to non-existent charm "cs:precise/mysql-28"`, + }, +}, { + about: "all present and correct", + data: ` +services: + service1: + charm: "test" + service2: + charm: "test" + service3: + charm: "test" +relations: + - ["service1:prova", "service2:reqa"] + - ["service1:reqa", "service3:prova"] + - ["service3:provb", "service2:reqb"] +`, + charms: map[string]charm.Charm{ + "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), + }, +}, { + about: "undefined relations", + data: ` +services: + service1: + charm: "test" + service2: + charm: "test" +relations: + - ["service1:prova", "service2:blah"] + - ["service1:blah", "service2:prova"] +`, + charms: map[string]charm.Charm{ + "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), + }, + errors: []string{ + `charm "test" used by service "service1" does not define relation "blah"`, + `charm "test" used by service "service2" does not define relation "blah"`, + }, +}, { + about: "undefined services", + data: ` +services: + service1: + charm: "test" + service2: + charm: "test" +relations: + - ["unknown:prova", "service2:blah"] + - ["service1:blah", "unknown:prova"] +`, + charms: map[string]charm.Charm{ + "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), + }, + errors: []string{ + `relation ["service1:blah" "unknown:prova"] refers to service "unknown" not defined in this bundle`, + `relation ["unknown:prova" "service2:blah"] refers to service "unknown" not defined in this bundle`, + }, +}, { + about: "equal services", + data: ` +services: + service1: + charm: "test" + service2: + charm: "test" +relations: + - ["service2:prova", "service2:reqa"] +`, + charms: map[string]charm.Charm{ + "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), + }, + errors: []string{ + `relation ["service2:prova" "service2:reqa"] relates a service to itself`, + }, +}, { + about: "provider to provider relation", + data: ` +services: + service1: + charm: "test" + service2: + charm: "test" +relations: + - ["service1:prova", "service2:prova"] +`, + charms: map[string]charm.Charm{ + "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), + }, + errors: []string{ + `relation "service1:prova" to "service2:prova" relates provider to provider`, + }, +}, { + about: "provider to provider relation", + data: ` +services: + service1: + charm: "test" + service2: + charm: "test" +relations: + - ["service1:reqa", "service2:reqa"] +`, + charms: map[string]charm.Charm{ + "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), + }, + errors: []string{ + `relation "service1:reqa" to "service2:reqa" relates requirer to requirer`, + }, +}, { + about: "interface mismatch", + data: ` +services: + service1: + charm: "test" + service2: + charm: "test" +relations: + - ["service1:reqa", "service2:provb"] +`, + charms: map[string]charm.Charm{ + "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), + }, + errors: []string{ + `mismatched interface between "service2:provb" and "service1:reqa" ("b" vs "a")`, + }, +}, { + about: "different charms", + data: ` +services: + service1: + charm: "test1" + service2: + charm: "test2" +relations: + - ["service1:reqa", "service2:prova"] +`, + charms: map[string]charm.Charm{ + "test1": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), + "test2": testCharm("test", ""), + }, + errors: []string{ + `charm "test2" used by service "service2" does not define relation "prova"`, + }, +}, { + about: "ambiguous relation", + data: ` +services: + service1: + charm: "test1" + service2: + charm: "test2" +relations: + - [service1, service2] +`, + charms: map[string]charm.Charm{ + "test1": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), + "test2": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), + }, + errors: []string{ + `cannot infer endpoint between service1 and service2: ambiguous relation: service1 service2 could refer to "service1:prova service2:reqa"; "service1:provb service2:reqb"; "service1:reqa service2:prova"; "service1:reqb service2:provb"`, + }, +}, { + about: "relation using juju-info", + data: ` +services: + service1: + charm: "provider" + service2: + charm: "requirer" +relations: + - [service1, service2] +`, + charms: map[string]charm.Charm{ + "provider": testCharm("provider", ""), + "requirer": testCharm("requirer", "| req:juju-info"), + }, +}, { + about: "ambiguous when implicit relations taken into account", + data: ` +services: + service1: + charm: "provider" + service2: + charm: "requirer" +relations: + - [service1, service2] +`, + charms: map[string]charm.Charm{ + "provider": testCharm("provider", "provdb:db | "), + "requirer": testCharm("requirer", "| reqdb:db reqinfo:juju-info"), + }, +}, { + about: "half of relation left open", + data: ` +services: + service1: + charm: "provider" + service2: + charm: "requirer" +relations: + - ["service1:prova2", service2] +`, + charms: map[string]charm.Charm{ + "provider": testCharm("provider", "prova1:a prova2:a | "), + "requirer": testCharm("requirer", "| reqa:a"), + }, +}, { + about: "duplicate relation between open and fully-specified relations", + data: ` +services: + service1: + charm: "provider" + service2: + charm: "requirer" +relations: + - ["service1:prova", "service2:reqa"] + - ["service1", "service2"] +`, + charms: map[string]charm.Charm{ + "provider": testCharm("provider", "prova:a | "), + "requirer": testCharm("requirer", "| reqa:a"), + }, + errors: []string{ + `relation ["service1" "service2"] is defined more than once`, + }, +}, { + about: "configuration options specified", + data: ` +services: + service1: + charm: "test" + options: + title: "some title" + skill-level: 245 + service2: + charm: "test" + options: + title: "another title" +`, + charms: map[string]charm.Charm{ + "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), + }, +}, { + about: "invalid type for option", + data: ` +services: + service1: + charm: "test" + options: + title: "some title" + skill-level: "too much" + service2: + charm: "test" + options: + title: "another title" +`, + charms: map[string]charm.Charm{ + "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), + }, + errors: []string{ + `cannot validate service "service1": option "skill-level" expected int, got "too much"`, + }, +}, { + about: "unknown option", + data: ` +services: + service1: + charm: "test" + options: + title: "some title" + unknown-option: 2345 +`, + charms: map[string]charm.Charm{ + "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), + }, + errors: []string{ + `cannot validate service "service1": configuration option "unknown-option" not found in charm "test"`, + }, +}, { + about: "multiple config problems", + data: ` +services: + service1: + charm: "test" + options: + title: "some title" + unknown-option: 2345 + service2: + charm: "test" + options: + title: 123 + another-unknown: 2345 +`, + charms: map[string]charm.Charm{ + "test": testCharm("test", "prova:a provb:b | reqa:a reqb:b"), + }, + errors: []string{ + `cannot validate service "service1": configuration option "unknown-option" not found in charm "test"`, + `cannot validate service "service2": configuration option "another-unknown" not found in charm "test"`, + `cannot validate service "service2": option "title" expected string, got 123`, + }, +}, { + about: "subordinate charm with more than zero units", + data: ` +services: + testsub: + charm: "testsub" + num_units: 1 +`, + charms: map[string]charm.Charm{ + "testsub": testCharm("test-sub", ""), + }, + errors: []string{ + `service "testsub" is subordinate but has non-zero num_units`, + }, +}, { + about: "subordinate charm with more than one unit", + data: ` +services: + testsub: + charm: "testsub" + num_units: 1 +`, + charms: map[string]charm.Charm{ + "testsub": testCharm("test-sub", ""), + }, + errors: []string{ + `service "testsub" is subordinate but has non-zero num_units`, + }, +}, { + about: "subordinate charm with to-clause", + data: ` +services: + testsub: + charm: "testsub" + to: [0] +machines: + 0: +`, + charms: map[string]charm.Charm{ + "testsub": testCharm("test-sub", ""), + }, + errors: []string{ + `service "testsub" is subordinate but specifies unit placement`, + `too many units specified in unit placement for service "testsub"`, + }, +}, { + about: "charm with unspecified units and more than one to: entry", + data: ` +services: + test: + charm: "test" + to: [0, 1] +machines: + 0: + 1: +`, + errors: []string{ + `too many units specified in unit placement for service "test"`, + }, +}} + +func (*bundleDataSuite) TestVerifyWithCharmsErrors(c *gc.C) { + for i, test := range verifyWithCharmsErrorsTests { + c.Logf("test %d: %s", i, test.about) + assertVerifyWithCharmsErrors(c, test.data, test.charms, test.errors) + } +} + +var parsePlacementTests = []struct { + placement string + expect *charm.UnitPlacement + expectErr string +}{{ + placement: "lxc:service/0", + expect: &charm.UnitPlacement{ + ContainerType: "lxc", + Service: "service", + Unit: 0, + }, +}, { + placement: "lxc:service", + expect: &charm.UnitPlacement{ + ContainerType: "lxc", + Service: "service", + Unit: -1, + }, +}, { + placement: "lxc:99", + expect: &charm.UnitPlacement{ + ContainerType: "lxc", + Machine: "99", + Unit: -1, + }, +}, { + placement: "lxc:new", + expect: &charm.UnitPlacement{ + ContainerType: "lxc", + Machine: "new", + Unit: -1, + }, +}, { + placement: "service/0", + expect: &charm.UnitPlacement{ + Service: "service", + Unit: 0, + }, +}, { + placement: "service", + expect: &charm.UnitPlacement{ + Service: "service", + Unit: -1, + }, +}, { + placement: "service45", + expect: &charm.UnitPlacement{ + Service: "service45", + Unit: -1, + }, +}, { + placement: "99", + expect: &charm.UnitPlacement{ + Machine: "99", + Unit: -1, + }, +}, { + placement: "new", + expect: &charm.UnitPlacement{ + Machine: "new", + Unit: -1, + }, +}, { + placement: ":0", + expectErr: `invalid placement syntax ":0"`, +}, { + placement: "05", + expectErr: `invalid placement syntax "05"`, +}, { + placement: "new/2", + expectErr: `invalid placement syntax "new/2"`, +}} + +func (*bundleDataSuite) TestParsePlacement(c *gc.C) { + for i, test := range parsePlacementTests { + c.Logf("test %d: %q", i, test.placement) + up, err := charm.ParsePlacement(test.placement) + if test.expectErr != "" { + c.Assert(err, gc.ErrorMatches, test.expectErr) + } else { + c.Assert(err, gc.IsNil) + c.Assert(up, jc.DeepEquals, test.expect) + } + } +} === added file 'src/gopkg.in/juju/charm.v6-unstable/bundledir.go' --- src/gopkg.in/juju/charm.v6-unstable/bundledir.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/bundledir.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,61 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" +) + +type BundleDir struct { + Path string + data *BundleData + readMe string +} + +// Trick to ensure *BundleDir implements the Bundle interface. +var _ Bundle = (*BundleDir)(nil) + +// ReadBundleDir returns a BundleDir representing an expanded +// bundle directory. It does not verify the bundle data. +func ReadBundleDir(path string) (dir *BundleDir, err error) { + dir = &BundleDir{Path: path} + file, err := os.Open(dir.join("bundle.yaml")) + if err != nil { + return nil, err + } + dir.data, err = ReadBundleData(file) + file.Close() + if err != nil { + return nil, err + } + readMe, err := ioutil.ReadFile(dir.join("README.md")) + if err != nil { + return nil, fmt.Errorf("cannot read README file: %v", err) + } + dir.readMe = string(readMe) + return dir, nil +} + +func (dir *BundleDir) Data() *BundleData { + return dir.data +} + +func (dir *BundleDir) ReadMe() string { + return dir.readMe +} + +func (dir *BundleDir) ArchiveTo(w io.Writer) error { + return writeArchive(w, dir.Path, -1, nil) +} + +// join builds a path rooted at the bundle's expanded directory +// path and the extra path components provided. +func (dir *BundleDir) join(parts ...string) string { + parts = append([]string{dir.Path}, parts...) + return filepath.Join(parts...) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/bundledir_test.go' --- src/gopkg.in/juju/charm.v6-unstable/bundledir_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/bundledir_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,58 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm_test + +import ( + "os" + "path/filepath" + + "github.com/juju/testing" + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charm.v6-unstable" +) + +type BundleDirSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&BundleDirSuite{}) + +func (s *BundleDirSuite) TestReadBundleDir(c *gc.C) { + path := bundleDirPath(c, "wordpress-simple") + dir, err := charm.ReadBundleDir(path) + c.Assert(err, gc.IsNil) + checkWordpressBundle(c, dir, path) +} + +func (s *BundleDirSuite) TestReadBundleDirWithoutREADME(c *gc.C) { + path := cloneDir(c, bundleDirPath(c, "wordpress-simple")) + err := os.Remove(filepath.Join(path, "README.md")) + c.Assert(err, gc.IsNil) + dir, err := charm.ReadBundleDir(path) + c.Assert(err, gc.ErrorMatches, "cannot read README file: .*") + c.Assert(dir, gc.IsNil) +} + +func (s *BundleDirSuite) TestArchiveTo(c *gc.C) { + baseDir := c.MkDir() + charmDir := cloneDir(c, bundleDirPath(c, "wordpress-simple")) + s.assertArchiveTo(c, baseDir, charmDir) +} + +func (s *BundleDirSuite) assertArchiveTo(c *gc.C, baseDir, bundleDir string) { + dir, err := charm.ReadBundleDir(bundleDir) + c.Assert(err, gc.IsNil) + path := filepath.Join(baseDir, "archive.bundle") + file, err := os.Create(path) + c.Assert(err, gc.IsNil) + err = dir.ArchiveTo(file) + file.Close() + c.Assert(err, gc.IsNil) + + archive, err := charm.ReadBundleArchive(path) + c.Assert(err, gc.IsNil) + c.Assert(archive.ReadMe(), gc.Equals, dir.ReadMe()) + c.Assert(archive.Data(), gc.DeepEquals, dir.Data()) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/charm.go' --- src/gopkg.in/juju/charm.v6-unstable/charm.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/charm.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,101 @@ +// Copyright 2011, 2012, 2013 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm + +import ( + "fmt" + "os" + "strings" + + "github.com/juju/loggo" +) + +var logger = loggo.GetLogger("juju.charm") + +// The Charm interface is implemented by any type that +// may be handled as a charm. +type Charm interface { + Meta() *Meta + Config() *Config + Metrics() *Metrics + Actions() *Actions + Revision() int +} + +// ReadCharm reads a Charm from path, which can point to either a charm archive or a +// charm directory. +func ReadCharm(path string) (charm Charm, err error) { + info, err := os.Stat(path) + if err != nil { + return nil, err + } + if info.IsDir() { + charm, err = ReadCharmDir(path) + } else { + charm, err = ReadCharmArchive(path) + } + if err != nil { + return nil, err + } + return charm, nil +} + +// SeriesForCharm takes a requested series and a list of series supported by a +// charm and returns the series which is relevant. +// If the requested series is empty, then the first supported series is used, +// otherwise the requested series is validated against the supported series. +func SeriesForCharm(requestedSeries string, supportedSeries []string) (string, error) { + // Old charm with no supported series. + if len(supportedSeries) == 0 { + if requestedSeries == "" { + return "", missingSeriesError + } + return requestedSeries, nil + } + // Use the charm default. + if requestedSeries == "" { + return supportedSeries[0], nil + } + for _, s := range supportedSeries { + if s == requestedSeries { + return requestedSeries, nil + } + } + return "", &unsupportedSeriesError{requestedSeries, supportedSeries} +} + +// missingSeriesError is used to denote that SeriesForCharm could not determine +// a series because a legacy charm did not declare any. +var missingSeriesError = fmt.Errorf("series not specified and charm does not define any") + +// IsMissingSeriesError returns true if err is an missingSeriesError. +func IsMissingSeriesError(err error) bool { + return err == missingSeriesError +} + +// UnsupportedSeriesError represents an error indicating that the requested series +// is not supported by the charm. +type unsupportedSeriesError struct { + requestedSeries string + supportedSeries []string +} + +func (e *unsupportedSeriesError) Error() string { + return fmt.Sprintf( + "series %q not supported by charm, supported series are: %s", + e.requestedSeries, strings.Join(e.supportedSeries, ","), + ) +} + +// NewUnsupportedSeriesError returns an error indicating that the requested series +// is not supported by a charm. +func NewUnsupportedSeriesError(requestedSeries string, supportedSeries []string) error { + return &unsupportedSeriesError{requestedSeries, supportedSeries} +} + +// IsUnsupportedSeriesError returns true if err is an UnsupportedSeriesError. +func IsUnsupportedSeriesError(err error) bool { + _, ok := err.(*unsupportedSeriesError) + return ok +} === added file 'src/gopkg.in/juju/charm.v6-unstable/charm_test.go' --- src/gopkg.in/juju/charm.v6-unstable/charm_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/charm_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,221 @@ +// Copyright 2011, 2012, 2013 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm_test + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + stdtesting "testing" + + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/fs" + gc "gopkg.in/check.v1" + "gopkg.in/yaml.v1" + + "gopkg.in/juju/charm.v6-unstable" +) + +func Test(t *stdtesting.T) { + gc.TestingT(t) +} + +type CharmSuite struct{} + +var _ = gc.Suite(&CharmSuite{}) + +func (s *CharmSuite) TestReadCharm(c *gc.C) { + ch, err := charm.ReadCharm(charmDirPath(c, "dummy")) + c.Assert(err, gc.IsNil) + c.Assert(ch.Meta().Name, gc.Equals, "dummy") + + bPath := archivePath(c, readCharmDir(c, "dummy")) + ch, err = charm.ReadCharm(bPath) + c.Assert(err, gc.IsNil) + c.Assert(ch.Meta().Name, gc.Equals, "dummy") +} + +func (s *CharmSuite) TestReadCharmDirError(c *gc.C) { + ch, err := charm.ReadCharm(c.MkDir()) + c.Assert(err, gc.NotNil) + c.Assert(ch, gc.Equals, nil) +} + +func (s *CharmSuite) TestReadCharmArchiveError(c *gc.C) { + path := filepath.Join(c.MkDir(), "path") + err := ioutil.WriteFile(path, []byte("foo"), 0644) + c.Assert(err, gc.IsNil) + ch, err := charm.ReadCharm(path) + c.Assert(err, gc.NotNil) + c.Assert(ch, gc.Equals, nil) +} + +func (s *CharmSuite) TestSeriesToUse(c *gc.C) { + tests := []struct { + series string + supportedSeries []string + seriesToUse string + err string + }{{ + series: "", + err: "series not specified and charm does not define any", + }, { + series: "trusty", + seriesToUse: "trusty", + }, { + series: "trusty", + supportedSeries: []string{"precise", "trusty"}, + seriesToUse: "trusty", + }, { + series: "", + supportedSeries: []string{"precise", "trusty"}, + seriesToUse: "precise", + }, { + series: "wily", + supportedSeries: []string{"precise", "trusty"}, + err: `series "wily" not supported by charm.*`, + }} + for _, test := range tests { + series, err := charm.SeriesForCharm(test.series, test.supportedSeries) + if test.err != "" { + c.Assert(err, gc.ErrorMatches, test.err) + continue + } + c.Assert(err, jc.ErrorIsNil) + c.Assert(series, jc.DeepEquals, test.seriesToUse) + } +} + +func (s *CharmSuite) IsUnsupportedSeriesError(c *gc.C) { + err := charm.NewUnsupportedSeriesError("series", []string{"supported"}) + c.Assert(charm.IsUnsupportedSeriesError(err), jc.IsTrue) + c.Assert(charm.IsUnsupportedSeriesError(fmt.Errorf("foo")), jc.IsFalse) +} + +func (s *CharmSuite) IsMissingSeriesError(c *gc.C) { + err := charm.MissingSeriesError() + c.Assert(charm.IsMissingSeriesError(err), jc.IsTrue) + c.Assert(charm.IsMissingSeriesError(fmt.Errorf("foo")), jc.IsFalse) +} + +func checkDummy(c *gc.C, f charm.Charm, path string) { + c.Assert(f.Revision(), gc.Equals, 1) + c.Assert(f.Meta().Name, gc.Equals, "dummy") + c.Assert(f.Config().Options["title"].Default, gc.Equals, "My Title") + c.Assert(f.Actions(), jc.DeepEquals, + &charm.Actions{ + map[string]charm.ActionSpec{ + "snapshot": { + Description: "Take a snapshot of the database.", + Params: map[string]interface{}{ + "type": "object", + "description": "Take a snapshot of the database.", + "title": "snapshot", + "properties": map[string]interface{}{ + "outfile": map[string]interface{}{ + "description": "The file to write out to.", + "type": "string", + "default": "foo.bz2", + }}}}}}) + switch f := f.(type) { + case *charm.CharmArchive: + c.Assert(f.Path, gc.Equals, path) + case *charm.CharmDir: + c.Assert(f.Path, gc.Equals, path) + } +} + +type YamlHacker map[interface{}]interface{} + +func ReadYaml(r io.Reader) YamlHacker { + data, err := ioutil.ReadAll(r) + if err != nil { + panic(err) + } + m := make(map[interface{}]interface{}) + err = yaml.Unmarshal(data, m) + if err != nil { + panic(err) + } + return YamlHacker(m) +} + +func (yh YamlHacker) Reader() io.Reader { + data, err := yaml.Marshal(yh) + if err != nil { + panic(err) + } + return bytes.NewBuffer(data) +} + +// charmDirPath returns the path to the charm with the +// given name in the testing repository. +func charmDirPath(c *gc.C, name string) string { + path := filepath.Join("internal/test-charm-repo/quantal", name) + assertIsDir(c, path) + return path +} + +// bundleDirPath returns the path to the bundle with the +// given name in the testing repository. +func bundleDirPath(c *gc.C, name string) string { + path := filepath.Join("internal/test-charm-repo/bundle", name) + assertIsDir(c, path) + return path +} + +func assertIsDir(c *gc.C, path string) { + info, err := os.Stat(path) + c.Assert(err, gc.IsNil) + c.Assert(info.IsDir(), gc.Equals, true) +} + +// readCharmDir returns the charm with the given +// name from the testing repository. +func readCharmDir(c *gc.C, name string) *charm.CharmDir { + path := charmDirPath(c, name) + ch, err := charm.ReadCharmDir(path) + c.Assert(err, gc.IsNil) + return ch +} + +// readBundleDir returns the bundle with the +// given name from the testing repository. +func readBundleDir(c *gc.C, name string) *charm.BundleDir { + path := bundleDirPath(c, name) + ch, err := charm.ReadBundleDir(path) + c.Assert(err, gc.IsNil) + return ch +} + +type ArchiverTo interface { + ArchiveTo(w io.Writer) error +} + +// archivePath archives the given charm or bundle +// to a newly created file and returns the path to the +// file. +func archivePath(c *gc.C, a ArchiverTo) string { + dir := c.MkDir() + path := filepath.Join(dir, "archive") + file, err := os.Create(path) + c.Assert(err, gc.IsNil) + defer file.Close() + err = a.ArchiveTo(file) + c.Assert(err, gc.IsNil) + return path +} + +// cloneDir recursively copies the path directory +// into a new directory and returns the path +// to it. +func cloneDir(c *gc.C, path string) string { + newPath := filepath.Join(c.MkDir(), filepath.Base(path)) + err := fs.Copy(path, newPath) + c.Assert(err, gc.IsNil) + return newPath +} === added file 'src/gopkg.in/juju/charm.v6-unstable/charmarchive.go' --- src/gopkg.in/juju/charm.v6-unstable/charmarchive.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/charmarchive.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,315 @@ +// Copyright 2011, 2012, 2013 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm + +import ( + "archive/zip" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + "github.com/juju/utils/set" + ziputil "github.com/juju/utils/zip" +) + +// The CharmArchive type encapsulates access to data and operations +// on a charm archive. +type CharmArchive struct { + zopen zipOpener + + Path string // May be empty if CharmArchive wasn't read from a file + meta *Meta + config *Config + metrics *Metrics + actions *Actions + revision int +} + +// Trick to ensure *CharmArchive implements the Charm interface. +var _ Charm = (*CharmArchive)(nil) + +// ReadCharmArchive returns a CharmArchive for the charm in path. +func ReadCharmArchive(path string) (*CharmArchive, error) { + a, err := readCharmArchive(newZipOpenerFromPath(path)) + if err != nil { + return nil, err + } + a.Path = path + return a, nil +} + +// ReadCharmArchiveBytes returns a CharmArchive read from the given data. +// Make sure the archive fits in memory before using this. +func ReadCharmArchiveBytes(data []byte) (archive *CharmArchive, err error) { + zopener := newZipOpenerFromReader(bytes.NewReader(data), int64(len(data))) + return readCharmArchive(zopener) +} + +// ReadCharmArchiveFromReader returns a CharmArchive that uses +// r to read the charm. The given size must hold the number +// of available bytes in the file. +// +// Note that the caller is responsible for closing r - methods on +// the returned CharmArchive may fail after that. +func ReadCharmArchiveFromReader(r io.ReaderAt, size int64) (archive *CharmArchive, err error) { + return readCharmArchive(newZipOpenerFromReader(r, size)) +} + +func readCharmArchive(zopen zipOpener) (archive *CharmArchive, err error) { + b := &CharmArchive{ + zopen: zopen, + } + zipr, err := zopen.openZip() + if err != nil { + return nil, err + } + defer zipr.Close() + reader, err := zipOpenFile(zipr, "metadata.yaml") + if err != nil { + return nil, err + } + b.meta, err = ReadMeta(reader) + reader.Close() + if err != nil { + return nil, err + } + + reader, err = zipOpenFile(zipr, "config.yaml") + if _, ok := err.(*noCharmArchiveFile); ok { + b.config = NewConfig() + } else if err != nil { + return nil, err + } else { + b.config, err = ReadConfig(reader) + reader.Close() + if err != nil { + return nil, err + } + } + + reader, err = zipOpenFile(zipr, "metrics.yaml") + if err == nil { + b.metrics, err = ReadMetrics(reader) + reader.Close() + if err != nil { + return nil, err + } + } else if _, ok := err.(*noCharmArchiveFile); !ok { + return nil, err + } + + reader, err = zipOpenFile(zipr, "actions.yaml") + if _, ok := err.(*noCharmArchiveFile); ok { + b.actions = NewActions() + } else if err != nil { + return nil, err + } else { + b.actions, err = ReadActionsYaml(reader) + reader.Close() + if err != nil { + return nil, err + } + } + + reader, err = zipOpenFile(zipr, "revision") + if err != nil { + if _, ok := err.(*noCharmArchiveFile); !ok { + return nil, err + } + b.revision = b.meta.OldRevision + } else { + _, err = fmt.Fscan(reader, &b.revision) + if err != nil { + return nil, errors.New("invalid revision file") + } + } + + return b, nil +} + +func zipOpenFile(zipr *zipReadCloser, path string) (rc io.ReadCloser, err error) { + for _, fh := range zipr.File { + if fh.Name == path { + return fh.Open() + } + } + return nil, &noCharmArchiveFile{path} +} + +type noCharmArchiveFile struct { + path string +} + +func (err noCharmArchiveFile) Error() string { + return fmt.Sprintf("archive file %q not found", err.path) +} + +// Revision returns the revision number for the charm +// expanded in dir. +func (a *CharmArchive) Revision() int { + return a.revision +} + +// SetRevision changes the charm revision number. This affects the +// revision reported by Revision and the revision of the charm +// directory created by ExpandTo. +func (a *CharmArchive) SetRevision(revision int) { + a.revision = revision +} + +// Meta returns the Meta representing the metadata.yaml file from archive. +func (a *CharmArchive) Meta() *Meta { + return a.meta +} + +// Config returns the Config representing the config.yaml file +// for the charm archive. +func (a *CharmArchive) Config() *Config { + return a.config +} + +// Metrics returns the Metrics representing the metrics.yaml file +// for the charm archive. +func (a *CharmArchive) Metrics() *Metrics { + return a.metrics +} + +// Actions returns the Actions map for the actions.yaml file for the charm +// archive. +func (a *CharmArchive) Actions() *Actions { + return a.actions +} + +type zipReadCloser struct { + io.Closer + *zip.Reader +} + +// zipOpener holds the information needed to open a zip +// file. +type zipOpener interface { + openZip() (*zipReadCloser, error) +} + +// newZipOpenerFromPath returns a zipOpener that can be +// used to read the archive from the given path. +func newZipOpenerFromPath(path string) zipOpener { + return &zipPathOpener{path: path} +} + +// newZipOpenerFromReader returns a zipOpener that can be +// used to read the archive from the given ReaderAt +// holding the given number of bytes. +func newZipOpenerFromReader(r io.ReaderAt, size int64) zipOpener { + return &zipReaderOpener{ + r: r, + size: size, + } +} + +type zipPathOpener struct { + path string +} + +func (zo *zipPathOpener) openZip() (*zipReadCloser, error) { + f, err := os.Open(zo.path) + if err != nil { + return nil, err + } + fi, err := f.Stat() + if err != nil { + f.Close() + return nil, err + } + r, err := zip.NewReader(f, fi.Size()) + if err != nil { + f.Close() + return nil, err + } + return &zipReadCloser{Closer: f, Reader: r}, nil +} + +type zipReaderOpener struct { + r io.ReaderAt + size int64 +} + +func (zo *zipReaderOpener) openZip() (*zipReadCloser, error) { + r, err := zip.NewReader(zo.r, zo.size) + if err != nil { + return nil, err + } + return &zipReadCloser{Closer: ioutil.NopCloser(nil), Reader: r}, nil +} + +// Manifest returns a set of the charm's contents. +func (a *CharmArchive) Manifest() (set.Strings, error) { + zipr, err := a.zopen.openZip() + if err != nil { + return set.NewStrings(), err + } + defer zipr.Close() + paths, err := ziputil.Find(zipr.Reader, "*") + if err != nil { + return set.NewStrings(), err + } + manifest := set.NewStrings(paths...) + // We always write out a revision file, even if there isn't one in the + // archive; and we always strip ".", because that's sometimes not present. + manifest.Add("revision") + manifest.Remove(".") + return manifest, nil +} + +// ExpandTo expands the charm archive into dir, creating it if necessary. +// If any errors occur during the expansion procedure, the process will +// abort. +func (a *CharmArchive) ExpandTo(dir string) error { + zipr, err := a.zopen.openZip() + if err != nil { + return err + } + defer zipr.Close() + if err := ziputil.ExtractAll(zipr.Reader, dir); err != nil { + return err + } + hooksDir := filepath.Join(dir, "hooks") + fixHook := fixHookFunc(hooksDir, a.meta.Hooks()) + if err := filepath.Walk(hooksDir, fixHook); err != nil { + if !os.IsNotExist(err) { + return err + } + } + revFile, err := os.Create(filepath.Join(dir, "revision")) + if err != nil { + return err + } + _, err = revFile.Write([]byte(strconv.Itoa(a.revision))) + revFile.Close() + return err +} + +// fixHookFunc returns a WalkFunc that makes sure hooks are owner-executable. +func fixHookFunc(hooksDir string, hookNames map[string]bool) filepath.WalkFunc { + return func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + mode := info.Mode() + if path != hooksDir && mode.IsDir() { + return filepath.SkipDir + } + if name := filepath.Base(path); hookNames[name] { + if mode&0100 == 0 { + return os.Chmod(path, mode|0100) + } + } + return nil + } +} === added file 'src/gopkg.in/juju/charm.v6-unstable/charmarchive_test.go' --- src/gopkg.in/juju/charm.v6-unstable/charmarchive_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/charmarchive_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,383 @@ +// Copyright 2011, 2012, 2013 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm_test + +import ( + "archive/zip" + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "syscall" + + jc "github.com/juju/testing/checkers" + "github.com/juju/utils/set" + gc "gopkg.in/check.v1" + "gopkg.in/yaml.v1" + + "gopkg.in/juju/charm.v6-unstable" +) + +type CharmArchiveSuite struct { + archivePath string +} + +var _ = gc.Suite(&CharmArchiveSuite{}) + +func (s *CharmArchiveSuite) SetUpSuite(c *gc.C) { + s.archivePath = archivePath(c, readCharmDir(c, "dummy")) +} + +var dummyManifest = []string{ + "actions.yaml", + "config.yaml", + "empty", + "empty/.gitkeep", + "hooks", + "hooks/install", + "metadata.yaml", + "revision", + "src", + "src/hello.c", +} + +func (s *CharmArchiveSuite) TestReadCharmArchive(c *gc.C) { + archive, err := charm.ReadCharmArchive(s.archivePath) + c.Assert(err, gc.IsNil) + checkDummy(c, archive, s.archivePath) +} + +func (s *CharmArchiveSuite) TestReadCharmArchiveWithoutConfig(c *gc.C) { + // Technically varnish has no config AND no actions. + // Perhaps we should make this more orthogonal? + path := archivePath(c, readCharmDir(c, "varnish")) + archive, err := charm.ReadCharmArchive(path) + c.Assert(err, gc.IsNil) + + // A lacking config.yaml file still causes a proper + // Config value to be returned. + c.Assert(archive.Config().Options, gc.HasLen, 0) +} + +func (s *CharmArchiveSuite) TestReadCharmArchiveWithoutMetrics(c *gc.C) { + path := archivePath(c, readCharmDir(c, "varnish")) + dir, err := charm.ReadCharmArchive(path) + c.Assert(err, gc.IsNil) + + // A lacking metrics.yaml file indicates the unit will not + // be metered. + c.Assert(dir.Metrics(), gc.IsNil) +} + +func (s *CharmArchiveSuite) TestReadCharmArchiveWithEmptyMetrics(c *gc.C) { + path := archivePath(c, readCharmDir(c, "metered-empty")) + dir, err := charm.ReadCharmArchive(path) + c.Assert(err, gc.IsNil) + c.Assert(Keys(dir.Metrics()), gc.HasLen, 0) +} + +func (s *CharmArchiveSuite) TestReadCharmArchiveWithCustomMetrics(c *gc.C) { + path := archivePath(c, readCharmDir(c, "metered")) + dir, err := charm.ReadCharmArchive(path) + c.Assert(err, gc.IsNil) + + c.Assert(dir.Metrics(), gc.NotNil) + c.Assert(Keys(dir.Metrics()), gc.DeepEquals, []string{"juju-unit-time", "pings"}) +} + +func (s *CharmArchiveSuite) TestReadCharmArchiveWithoutActions(c *gc.C) { + // Wordpress has config but no actions. + path := archivePath(c, readCharmDir(c, "wordpress")) + archive, err := charm.ReadCharmArchive(path) + c.Assert(err, gc.IsNil) + + // A lacking actions.yaml file still causes a proper + // Actions value to be returned. + c.Assert(archive.Actions().ActionSpecs, gc.HasLen, 0) +} + +func (s *CharmArchiveSuite) TestReadCharmArchiveBytes(c *gc.C) { + data, err := ioutil.ReadFile(s.archivePath) + c.Assert(err, gc.IsNil) + + archive, err := charm.ReadCharmArchiveBytes(data) + c.Assert(err, gc.IsNil) + checkDummy(c, archive, "") +} + +func (s *CharmArchiveSuite) TestReadCharmArchiveFromReader(c *gc.C) { + f, err := os.Open(s.archivePath) + c.Assert(err, gc.IsNil) + defer f.Close() + info, err := f.Stat() + c.Assert(err, gc.IsNil) + + archive, err := charm.ReadCharmArchiveFromReader(f, info.Size()) + c.Assert(err, gc.IsNil) + checkDummy(c, archive, "") +} + +func (s *CharmArchiveSuite) TestManifest(c *gc.C) { + archive, err := charm.ReadCharmArchive(s.archivePath) + c.Assert(err, gc.IsNil) + manifest, err := archive.Manifest() + c.Assert(err, gc.IsNil) + c.Assert(manifest, jc.DeepEquals, set.NewStrings(dummyManifest...)) +} + +func (s *CharmArchiveSuite) TestManifestNoRevision(c *gc.C) { + archive, err := charm.ReadCharmArchive(s.archivePath) + c.Assert(err, gc.IsNil) + dirPath := c.MkDir() + err = archive.ExpandTo(dirPath) + c.Assert(err, gc.IsNil) + err = os.Remove(filepath.Join(dirPath, "revision")) + c.Assert(err, gc.IsNil) + + archive = extCharmArchiveDir(c, dirPath) + manifest, err := archive.Manifest() + c.Assert(err, gc.IsNil) + c.Assert(manifest, gc.DeepEquals, set.NewStrings(dummyManifest...)) +} + +func (s *CharmArchiveSuite) TestManifestSymlink(c *gc.C) { + srcPath := cloneDir(c, charmDirPath(c, "dummy")) + if err := os.Symlink("../target", filepath.Join(srcPath, "hooks/symlink")); err != nil { + c.Skip("cannot symlink") + } + expected := append([]string{"hooks/symlink"}, dummyManifest...) + + archive := archiveDir(c, srcPath) + manifest, err := archive.Manifest() + c.Assert(err, gc.IsNil) + c.Assert(manifest, gc.DeepEquals, set.NewStrings(expected...)) +} + +func (s *CharmArchiveSuite) TestExpandTo(c *gc.C) { + archive, err := charm.ReadCharmArchive(s.archivePath) + c.Assert(err, gc.IsNil) + + path := filepath.Join(c.MkDir(), "charm") + err = archive.ExpandTo(path) + c.Assert(err, gc.IsNil) + + dir, err := charm.ReadCharmDir(path) + c.Assert(err, gc.IsNil) + checkDummy(c, dir, path) +} + +func (s *CharmArchiveSuite) prepareCharmArchive(c *gc.C, charmDir *charm.CharmDir, archivePath string) { + file, err := os.Create(archivePath) + c.Assert(err, gc.IsNil) + defer file.Close() + zipw := zip.NewWriter(file) + defer zipw.Close() + + h := &zip.FileHeader{Name: "revision"} + h.SetMode(syscall.S_IFREG | 0644) + w, err := zipw.CreateHeader(h) + c.Assert(err, gc.IsNil) + _, err = w.Write([]byte(strconv.Itoa(charmDir.Revision()))) + + h = &zip.FileHeader{Name: "metadata.yaml", Method: zip.Deflate} + h.SetMode(0644) + w, err = zipw.CreateHeader(h) + c.Assert(err, gc.IsNil) + data, err := yaml.Marshal(charmDir.Meta()) + c.Assert(err, gc.IsNil) + _, err = w.Write(data) + c.Assert(err, gc.IsNil) + + for name := range charmDir.Meta().Hooks() { + hookName := filepath.Join("hooks", name) + h = &zip.FileHeader{ + Name: hookName, + Method: zip.Deflate, + } + // Force it non-executable + h.SetMode(0644) + w, err := zipw.CreateHeader(h) + c.Assert(err, gc.IsNil) + _, err = w.Write([]byte("not important")) + c.Assert(err, gc.IsNil) + } +} + +func (s *CharmArchiveSuite) TestExpandToSetsHooksExecutable(c *gc.C) { + charmDir, err := charm.ReadCharmDir(cloneDir(c, charmDirPath(c, "all-hooks"))) + c.Assert(err, gc.IsNil) + // CharmArchive manually, so we can check ExpandTo(), unaffected + // by ArchiveTo()'s behavior + archivePath := filepath.Join(c.MkDir(), "archive.charm") + s.prepareCharmArchive(c, charmDir, archivePath) + archive, err := charm.ReadCharmArchive(archivePath) + c.Assert(err, gc.IsNil) + + path := filepath.Join(c.MkDir(), "charm") + err = archive.ExpandTo(path) + c.Assert(err, gc.IsNil) + + _, err = charm.ReadCharmDir(path) + c.Assert(err, gc.IsNil) + + for name := range archive.Meta().Hooks() { + hookName := string(name) + info, err := os.Stat(filepath.Join(path, "hooks", hookName)) + c.Assert(err, gc.IsNil) + perm := info.Mode() & 0777 + c.Assert(perm&0100 != 0, gc.Equals, true, gc.Commentf("hook %q is not executable", hookName)) + } +} + +func (s *CharmArchiveSuite) TestCharmArchiveFileModes(c *gc.C) { + // Apply subtler mode differences than can be expressed in Bazaar. + srcPath := cloneDir(c, charmDirPath(c, "dummy")) + modes := []struct { + path string + mode os.FileMode + }{ + {"hooks/install", 0751}, + {"empty", 0750}, + {"src/hello.c", 0614}, + } + for _, m := range modes { + err := os.Chmod(filepath.Join(srcPath, m.path), m.mode) + c.Assert(err, gc.IsNil) + } + var haveSymlinks = true + if err := os.Symlink("../target", filepath.Join(srcPath, "hooks/symlink")); err != nil { + haveSymlinks = false + } + + // CharmArchive and extract the charm to a new directory. + archive := archiveDir(c, srcPath) + path := c.MkDir() + err := archive.ExpandTo(path) + c.Assert(err, gc.IsNil) + + // Check sensible file modes once round-tripped. + info, err := os.Stat(filepath.Join(path, "src", "hello.c")) + c.Assert(err, gc.IsNil) + c.Assert(info.Mode()&0777, gc.Equals, os.FileMode(0644)) + c.Assert(info.Mode()&os.ModeType, gc.Equals, os.FileMode(0)) + + info, err = os.Stat(filepath.Join(path, "hooks", "install")) + c.Assert(err, gc.IsNil) + c.Assert(info.Mode()&0777, gc.Equals, os.FileMode(0755)) + c.Assert(info.Mode()&os.ModeType, gc.Equals, os.FileMode(0)) + + info, err = os.Stat(filepath.Join(path, "empty")) + c.Assert(err, gc.IsNil) + c.Assert(info.Mode()&0777, gc.Equals, os.FileMode(0755)) + + if haveSymlinks { + target, err := os.Readlink(filepath.Join(path, "hooks", "symlink")) + c.Assert(err, gc.IsNil) + c.Assert(target, gc.Equals, "../target") + } +} + +func (s *CharmArchiveSuite) TestCharmArchiveRevisionFile(c *gc.C) { + charmDir := cloneDir(c, charmDirPath(c, "dummy")) + revPath := filepath.Join(charmDir, "revision") + + // Missing revision file + err := os.Remove(revPath) + c.Assert(err, gc.IsNil) + + archive := extCharmArchiveDir(c, charmDir) + c.Assert(archive.Revision(), gc.Equals, 0) + + // Missing revision file with old revision in metadata + file, err := os.OpenFile(filepath.Join(charmDir, "metadata.yaml"), os.O_WRONLY|os.O_APPEND, 0) + c.Assert(err, gc.IsNil) + _, err = file.Write([]byte("\nrevision: 1234\n")) + c.Assert(err, gc.IsNil) + + archive = extCharmArchiveDir(c, charmDir) + c.Assert(archive.Revision(), gc.Equals, 1234) + + // Revision file with bad content + err = ioutil.WriteFile(revPath, []byte("garbage"), 0666) + c.Assert(err, gc.IsNil) + + path := extCharmArchiveDirPath(c, charmDir) + archive, err = charm.ReadCharmArchive(path) + c.Assert(err, gc.ErrorMatches, "invalid revision file") + c.Assert(archive, gc.IsNil) +} + +func (s *CharmArchiveSuite) TestCharmArchiveSetRevision(c *gc.C) { + archive, err := charm.ReadCharmArchive(s.archivePath) + c.Assert(err, gc.IsNil) + + c.Assert(archive.Revision(), gc.Equals, 1) + archive.SetRevision(42) + c.Assert(archive.Revision(), gc.Equals, 42) + + path := filepath.Join(c.MkDir(), "charm") + err = archive.ExpandTo(path) + c.Assert(err, gc.IsNil) + + dir, err := charm.ReadCharmDir(path) + c.Assert(err, gc.IsNil) + c.Assert(dir.Revision(), gc.Equals, 42) +} + +func (s *CharmArchiveSuite) TestExpandToWithBadLink(c *gc.C) { + charmDir := cloneDir(c, charmDirPath(c, "dummy")) + badLink := filepath.Join(charmDir, "hooks", "badlink") + + // Symlink targeting a path outside of the charm. + err := os.Symlink("../../target", badLink) + c.Assert(err, gc.IsNil) + + archive := extCharmArchiveDir(c, charmDir) + c.Assert(err, gc.IsNil) + + path := filepath.Join(c.MkDir(), "charm") + err = archive.ExpandTo(path) + c.Assert(err, gc.ErrorMatches, `cannot extract "hooks/badlink": symlink "../../target" leads out of scope`) + + // Symlink targeting an absolute path. + os.Remove(badLink) + err = os.Symlink("/target", badLink) + c.Assert(err, gc.IsNil) + + archive = extCharmArchiveDir(c, charmDir) + c.Assert(err, gc.IsNil) + + path = filepath.Join(c.MkDir(), "charm") + err = archive.ExpandTo(path) + c.Assert(err, gc.ErrorMatches, `cannot extract "hooks/badlink": symlink "/target" is absolute`) +} + +func extCharmArchiveDirPath(c *gc.C, dirpath string) string { + path := filepath.Join(c.MkDir(), "archive.charm") + cmd := exec.Command("/bin/sh", "-c", fmt.Sprintf("cd %s; zip --fifo --symlinks -r %s .", dirpath, path)) + output, err := cmd.CombinedOutput() + c.Assert(err, gc.IsNil, gc.Commentf("Command output: %s", output)) + return path +} + +func extCharmArchiveDir(c *gc.C, dirpath string) *charm.CharmArchive { + path := extCharmArchiveDirPath(c, dirpath) + archive, err := charm.ReadCharmArchive(path) + c.Assert(err, gc.IsNil) + return archive +} + +func archiveDir(c *gc.C, dirpath string) *charm.CharmArchive { + dir, err := charm.ReadCharmDir(dirpath) + c.Assert(err, gc.IsNil) + buf := new(bytes.Buffer) + err = dir.ArchiveTo(buf) + c.Assert(err, gc.IsNil) + archive, err := charm.ReadCharmArchiveBytes(buf.Bytes()) + c.Assert(err, gc.IsNil) + return archive +} === added file 'src/gopkg.in/juju/charm.v6-unstable/charmdir.go' --- src/gopkg.in/juju/charm.v6-unstable/charmdir.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/charmdir.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,312 @@ +// Copyright 2011, 2012, 2013 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm + +import ( + "archive/zip" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" +) + +// The CharmDir type encapsulates access to data and operations +// on a charm directory. +type CharmDir struct { + Path string + meta *Meta + config *Config + metrics *Metrics + actions *Actions + revision int +} + +// Trick to ensure *CharmDir implements the Charm interface. +var _ Charm = (*CharmDir)(nil) + +// ReadCharmDir returns a CharmDir representing an expanded charm directory. +func ReadCharmDir(path string) (dir *CharmDir, err error) { + dir = &CharmDir{Path: path} + file, err := os.Open(dir.join("metadata.yaml")) + if err != nil { + return nil, err + } + dir.meta, err = ReadMeta(file) + file.Close() + if err != nil { + return nil, err + } + + file, err = os.Open(dir.join("config.yaml")) + if _, ok := err.(*os.PathError); ok { + dir.config = NewConfig() + } else if err != nil { + return nil, err + } else { + dir.config, err = ReadConfig(file) + file.Close() + if err != nil { + return nil, err + } + } + + file, err = os.Open(dir.join("metrics.yaml")) + if err == nil { + dir.metrics, err = ReadMetrics(file) + file.Close() + if err != nil { + return nil, err + } + } else if !os.IsNotExist(err) { + return nil, err + } + + file, err = os.Open(dir.join("actions.yaml")) + if _, ok := err.(*os.PathError); ok { + dir.actions = NewActions() + } else if err != nil { + return nil, err + } else { + dir.actions, err = ReadActionsYaml(file) + file.Close() + if err != nil { + return nil, err + } + } + + if file, err = os.Open(dir.join("revision")); err == nil { + _, err = fmt.Fscan(file, &dir.revision) + file.Close() + if err != nil { + return nil, errors.New("invalid revision file") + } + } else { + dir.revision = dir.meta.OldRevision + } + + return dir, nil +} + +// join builds a path rooted at the charm's expanded directory +// path and the extra path components provided. +func (dir *CharmDir) join(parts ...string) string { + parts = append([]string{dir.Path}, parts...) + return filepath.Join(parts...) +} + +// Revision returns the revision number for the charm +// expanded in dir. +func (dir *CharmDir) Revision() int { + return dir.revision +} + +// Meta returns the Meta representing the metadata.yaml file +// for the charm expanded in dir. +func (dir *CharmDir) Meta() *Meta { + return dir.meta +} + +// Config returns the Config representing the config.yaml file +// for the charm expanded in dir. +func (dir *CharmDir) Config() *Config { + return dir.config +} + +// Metrics returns the Metrics representing the metrics.yaml file +// for the charm expanded in dir. +func (dir *CharmDir) Metrics() *Metrics { + return dir.metrics +} + +// Actions returns the Actions representing the actions.yaml file +// for the charm expanded in dir. +func (dir *CharmDir) Actions() *Actions { + return dir.actions +} + +// SetRevision changes the charm revision number. This affects +// the revision reported by Revision and the revision of the +// charm archived by ArchiveTo. +// The revision file in the charm directory is not modified. +func (dir *CharmDir) SetRevision(revision int) { + dir.revision = revision +} + +// SetDiskRevision does the same as SetRevision but also changes +// the revision file in the charm directory. +func (dir *CharmDir) SetDiskRevision(revision int) error { + dir.SetRevision(revision) + file, err := os.OpenFile(dir.join("revision"), os.O_WRONLY|os.O_CREATE, 0644) + if err != nil { + return err + } + _, err = file.Write([]byte(strconv.Itoa(revision))) + file.Close() + return err +} + +// resolveSymlinkedRoot returns the target destination of a +// charm root directory if the root directory is a symlink. +func resolveSymlinkedRoot(rootPath string) (string, error) { + info, err := os.Lstat(rootPath) + if err == nil && info.Mode()&os.ModeSymlink != 0 { + rootPath, err = filepath.EvalSymlinks(rootPath) + if err != nil { + return "", fmt.Errorf("cannot read path symlink at %q: %v", rootPath, err) + } + } + return rootPath, nil +} + +// ArchiveTo creates a charm file from the charm expanded in dir. +// By convention a charm archive should have a ".charm" suffix. +func (dir *CharmDir) ArchiveTo(w io.Writer) error { + return writeArchive(w, dir.Path, dir.revision, dir.Meta().Hooks()) +} + +func writeArchive(w io.Writer, path string, revision int, hooks map[string]bool) error { + zipw := zip.NewWriter(w) + defer zipw.Close() + + // The root directory may be symlinked elsewhere so + // resolve that before creating the zip. + rootPath, err := resolveSymlinkedRoot(path) + if err != nil { + return err + } + zp := zipPacker{zipw, rootPath, hooks} + if revision != -1 { + zp.AddRevision(revision) + } + return filepath.Walk(rootPath, zp.WalkFunc()) +} + +type zipPacker struct { + *zip.Writer + root string + hooks map[string]bool +} + +func (zp *zipPacker) WalkFunc() filepath.WalkFunc { + return func(path string, fi os.FileInfo, err error) error { + return zp.visit(path, fi, err) + } +} + +func (zp *zipPacker) AddRevision(revision int) error { + h := &zip.FileHeader{Name: "revision"} + h.SetMode(syscall.S_IFREG | 0644) + w, err := zp.CreateHeader(h) + if err == nil { + _, err = w.Write([]byte(strconv.Itoa(revision))) + } + return err +} + +func (zp *zipPacker) visit(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + relpath, err := filepath.Rel(zp.root, path) + if err != nil { + return err + } + method := zip.Deflate + hidden := len(relpath) > 1 && relpath[0] == '.' + if fi.IsDir() { + if relpath == "build" { + return filepath.SkipDir + } + if hidden { + return filepath.SkipDir + } + relpath += "/" + method = zip.Store + } + + mode := fi.Mode() + if err := checkFileType(relpath, mode); err != nil { + return err + } + if mode&os.ModeSymlink != 0 { + method = zip.Store + } + if hidden || relpath == "revision" { + return nil + } + h := &zip.FileHeader{ + Name: relpath, + Method: method, + } + + perm := os.FileMode(0644) + if mode&os.ModeSymlink != 0 { + perm = 0777 + } else if mode&0100 != 0 { + perm = 0755 + } + if filepath.Dir(relpath) == "hooks" { + hookName := filepath.Base(relpath) + if _, ok := zp.hooks[hookName]; ok && !fi.IsDir() && mode&0100 == 0 { + logger.Warningf("making %q executable in charm", path) + perm = perm | 0100 + } + } + h.SetMode(mode&^0777 | perm) + + w, err := zp.CreateHeader(h) + if err != nil || fi.IsDir() { + return err + } + var data []byte + if mode&os.ModeSymlink != 0 { + target, err := os.Readlink(path) + if err != nil { + return err + } + if err := checkSymlinkTarget(zp.root, relpath, target); err != nil { + return err + } + data = []byte(target) + _, err = w.Write(data) + } else { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(w, file) + } + return err +} + +func checkSymlinkTarget(basedir, symlink, target string) error { + if filepath.IsAbs(target) { + return fmt.Errorf("symlink %q is absolute: %q", symlink, target) + } + p := filepath.Join(filepath.Dir(symlink), target) + if p == ".." || strings.HasPrefix(p, "../") { + return fmt.Errorf("symlink %q links out of charm: %q", symlink, target) + } + return nil +} + +func checkFileType(path string, mode os.FileMode) error { + e := "file has an unknown type: %q" + switch mode & os.ModeType { + case os.ModeDir, os.ModeSymlink, 0: + return nil + case os.ModeNamedPipe: + e = "file is a named pipe: %q" + case os.ModeSocket: + e = "file is a socket: %q" + case os.ModeDevice: + e = "file is a device: %q" + } + return fmt.Errorf(e, path) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/charmdir_test.go' --- src/gopkg.in/juju/charm.v6-unstable/charmdir_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/charmdir_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,318 @@ +// Copyright 2011, 2012, 2013 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm_test + +import ( + "archive/zip" + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/juju/testing" + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charm.v6-unstable" +) + +type CharmDirSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&CharmDirSuite{}) + +func (s *CharmDirSuite) TestReadCharmDir(c *gc.C) { + path := charmDirPath(c, "dummy") + dir, err := charm.ReadCharmDir(path) + c.Assert(err, gc.IsNil) + checkDummy(c, dir, path) +} + +func (s *CharmDirSuite) TestReadCharmDirWithoutConfig(c *gc.C) { + path := charmDirPath(c, "varnish") + dir, err := charm.ReadCharmDir(path) + c.Assert(err, gc.IsNil) + + // A lacking config.yaml file still causes a proper + // Config value to be returned. + c.Assert(dir.Config().Options, gc.HasLen, 0) +} + +func (s *CharmDirSuite) TestReadCharmDirWithoutMetrics(c *gc.C) { + path := charmDirPath(c, "varnish") + dir, err := charm.ReadCharmDir(path) + c.Assert(err, gc.IsNil) + + // A lacking metrics.yaml file indicates the unit will not + // be metered. + c.Assert(dir.Metrics(), gc.IsNil) +} + +func (s *CharmDirSuite) TestReadCharmDirWithEmptyMetrics(c *gc.C) { + path := charmDirPath(c, "metered-empty") + dir, err := charm.ReadCharmDir(path) + c.Assert(err, gc.IsNil) + c.Assert(Keys(dir.Metrics()), gc.HasLen, 0) +} + +func (s *CharmDirSuite) TestReadCharmDirWithCustomMetrics(c *gc.C) { + path := charmDirPath(c, "metered") + dir, err := charm.ReadCharmDir(path) + c.Assert(err, gc.IsNil) + + c.Assert(dir.Metrics(), gc.NotNil) + c.Assert(Keys(dir.Metrics()), gc.DeepEquals, []string{"juju-unit-time", "pings"}) +} + +func (s *CharmDirSuite) TestReadCharmDirWithoutActions(c *gc.C) { + path := charmDirPath(c, "wordpress") + dir, err := charm.ReadCharmDir(path) + c.Assert(err, gc.IsNil) + + // A lacking actions.yaml file still causes a proper + // Actions value to be returned. + c.Assert(dir.Actions().ActionSpecs, gc.HasLen, 0) +} + +func (s *CharmDirSuite) TestArchiveTo(c *gc.C) { + baseDir := c.MkDir() + charmDir := cloneDir(c, charmDirPath(c, "dummy")) + s.assertArchiveTo(c, baseDir, charmDir) +} + +func (s *CharmDirSuite) TestArchiveToWithSymlinkedRootDir(c *gc.C) { + path := cloneDir(c, charmDirPath(c, "dummy")) + baseDir := filepath.Dir(path) + err := os.Symlink(filepath.Join("dummy"), filepath.Join(baseDir, "newdummy")) + c.Assert(err, gc.IsNil) + charmDir := filepath.Join(baseDir, "newdummy") + + s.assertArchiveTo(c, baseDir, charmDir) +} + +func (s *CharmDirSuite) assertArchiveTo(c *gc.C, baseDir, charmDir string) { + haveSymlinks := true + if err := os.Symlink("../target", filepath.Join(charmDir, "hooks/symlink")); err != nil { + haveSymlinks = false + } + dir, err := charm.ReadCharmDir(charmDir) + c.Assert(err, gc.IsNil) + path := filepath.Join(baseDir, "archive.charm") + file, err := os.Create(path) + c.Assert(err, gc.IsNil) + err = dir.ArchiveTo(file) + file.Close() + c.Assert(err, gc.IsNil) + + zipr, err := zip.OpenReader(path) + c.Assert(err, gc.IsNil) + defer zipr.Close() + + var metaf, instf, emptyf, revf, symf *zip.File + for _, f := range zipr.File { + c.Logf("Archived file: %s", f.Name) + switch f.Name { + case "revision": + revf = f + case "metadata.yaml": + metaf = f + case "hooks/install": + instf = f + case "hooks/symlink": + symf = f + case "empty/": + emptyf = f + case "build/ignored": + c.Errorf("archive includes build/*: %s", f.Name) + case ".ignored", ".dir/ignored": + c.Errorf("archive includes .* entries: %s", f.Name) + } + } + + c.Assert(revf, gc.NotNil) + reader, err := revf.Open() + c.Assert(err, gc.IsNil) + data, err := ioutil.ReadAll(reader) + reader.Close() + c.Assert(err, gc.IsNil) + c.Assert(string(data), gc.Equals, "1") + + c.Assert(metaf, gc.NotNil) + reader, err = metaf.Open() + c.Assert(err, gc.IsNil) + meta, err := charm.ReadMeta(reader) + reader.Close() + c.Assert(err, gc.IsNil) + c.Assert(meta.Name, gc.Equals, "dummy") + + c.Assert(instf, gc.NotNil) + // Despite it being 0751, we pack and unpack it as 0755. + c.Assert(instf.Mode()&0777, gc.Equals, os.FileMode(0755)) + + if haveSymlinks { + c.Assert(symf, gc.NotNil) + c.Assert(symf.Mode()&0777, gc.Equals, os.FileMode(0777)) + reader, err = symf.Open() + c.Assert(err, gc.IsNil) + data, err = ioutil.ReadAll(reader) + reader.Close() + c.Assert(err, gc.IsNil) + c.Assert(string(data), gc.Equals, "../target") + } else { + c.Assert(symf, gc.IsNil) + } + + c.Assert(emptyf, gc.NotNil) + c.Assert(emptyf.Mode()&os.ModeType, gc.Equals, os.ModeDir) + // Despite it being 0750, we pack and unpack it as 0755. + c.Assert(emptyf.Mode()&0777, gc.Equals, os.FileMode(0755)) +} + +// Bug #864164: Must complain if charm hooks aren't executable +func (s *CharmDirSuite) TestArchiveToWithNonExecutableHooks(c *gc.C) { + hooks := []string{"install", "start", "config-changed", "upgrade-charm", "stop", "collect-metrics", "meter-status-changed"} + for _, relName := range []string{"foo", "bar", "self"} { + for _, kind := range []string{"joined", "changed", "departed", "broken"} { + hooks = append(hooks, relName+"-relation-"+kind) + } + } + + dir := readCharmDir(c, "all-hooks") + path := filepath.Join(c.MkDir(), "archive.charm") + file, err := os.Create(path) + c.Assert(err, gc.IsNil) + err = dir.ArchiveTo(file) + file.Close() + c.Assert(err, gc.IsNil) + + tlog := c.GetTestLog() + for _, hook := range hooks { + fullpath := filepath.Join(dir.Path, "hooks", hook) + exp := fmt.Sprintf(`^(.|\n)*WARNING juju.charm making "%s" executable in charm(.|\n)*$`, fullpath) + c.Assert(tlog, gc.Matches, exp, gc.Commentf("hook %q was not made executable", fullpath)) + } + + // Expand it and check the hooks' permissions + // (But do not use ExpandTo(), just use the raw zip) + f, err := os.Open(path) + c.Assert(err, gc.IsNil) + defer f.Close() + fi, err := f.Stat() + c.Assert(err, gc.IsNil) + size := fi.Size() + zipr, err := zip.NewReader(f, size) + c.Assert(err, gc.IsNil) + allhooks := dir.Meta().Hooks() + for _, zfile := range zipr.File { + cleanName := filepath.Clean(zfile.Name) + if strings.HasPrefix(cleanName, "hooks") { + hookName := filepath.Base(cleanName) + if _, ok := allhooks[hookName]; ok { + perms := zfile.Mode() + c.Assert(perms&0100 != 0, gc.Equals, true, gc.Commentf("hook %q is not executable", hookName)) + } + } + } +} + +func (s *CharmDirSuite) TestArchiveToWithBadType(c *gc.C) { + charmDir := cloneDir(c, charmDirPath(c, "dummy")) + badFile := filepath.Join(charmDir, "hooks", "badfile") + + // Symlink targeting a path outside of the charm. + err := os.Symlink("../../target", badFile) + c.Assert(err, gc.IsNil) + + dir, err := charm.ReadCharmDir(charmDir) + c.Assert(err, gc.IsNil) + + err = dir.ArchiveTo(&bytes.Buffer{}) + c.Assert(err, gc.ErrorMatches, `symlink "hooks/badfile" links out of charm: "../../target"`) + + // Symlink targeting an absolute path. + os.Remove(badFile) + err = os.Symlink("/target", badFile) + c.Assert(err, gc.IsNil) + + dir, err = charm.ReadCharmDir(charmDir) + c.Assert(err, gc.IsNil) + + err = dir.ArchiveTo(&bytes.Buffer{}) + c.Assert(err, gc.ErrorMatches, `symlink "hooks/badfile" is absolute: "/target"`) + + // Can't archive special files either. + os.Remove(badFile) + err = syscall.Mkfifo(badFile, 0644) + c.Assert(err, gc.IsNil) + + dir, err = charm.ReadCharmDir(charmDir) + c.Assert(err, gc.IsNil) + + err = dir.ArchiveTo(&bytes.Buffer{}) + c.Assert(err, gc.ErrorMatches, `file is a named pipe: "hooks/badfile"`) +} + +func (s *CharmDirSuite) TestDirRevisionFile(c *gc.C) { + charmDir := cloneDir(c, charmDirPath(c, "dummy")) + revPath := filepath.Join(charmDir, "revision") + + // Missing revision file + err := os.Remove(revPath) + c.Assert(err, gc.IsNil) + + dir, err := charm.ReadCharmDir(charmDir) + c.Assert(err, gc.IsNil) + c.Assert(dir.Revision(), gc.Equals, 0) + + // Missing revision file with old revision in metadata + file, err := os.OpenFile(filepath.Join(charmDir, "metadata.yaml"), os.O_WRONLY|os.O_APPEND, 0) + c.Assert(err, gc.IsNil) + _, err = file.Write([]byte("\nrevision: 1234\n")) + c.Assert(err, gc.IsNil) + + dir, err = charm.ReadCharmDir(charmDir) + c.Assert(err, gc.IsNil) + c.Assert(dir.Revision(), gc.Equals, 1234) + + // Revision file with bad content + err = ioutil.WriteFile(revPath, []byte("garbage"), 0666) + c.Assert(err, gc.IsNil) + + dir, err = charm.ReadCharmDir(charmDir) + c.Assert(err, gc.ErrorMatches, "invalid revision file") + c.Assert(dir, gc.IsNil) +} + +func (s *CharmDirSuite) TestDirSetRevision(c *gc.C) { + path := cloneDir(c, charmDirPath(c, "dummy")) + dir, err := charm.ReadCharmDir(path) + c.Assert(err, gc.IsNil) + c.Assert(dir.Revision(), gc.Equals, 1) + dir.SetRevision(42) + c.Assert(dir.Revision(), gc.Equals, 42) + + var b bytes.Buffer + err = dir.ArchiveTo(&b) + c.Assert(err, gc.IsNil) + + archive, err := charm.ReadCharmArchiveBytes(b.Bytes()) + c.Assert(archive.Revision(), gc.Equals, 42) +} + +func (s *CharmDirSuite) TestDirSetDiskRevision(c *gc.C) { + charmDir := cloneDir(c, charmDirPath(c, "dummy")) + dir, err := charm.ReadCharmDir(charmDir) + c.Assert(err, gc.IsNil) + + c.Assert(dir.Revision(), gc.Equals, 1) + dir.SetDiskRevision(42) + c.Assert(dir.Revision(), gc.Equals, 42) + + dir, err = charm.ReadCharmDir(charmDir) + c.Assert(err, gc.IsNil) + c.Assert(dir.Revision(), gc.Equals, 42) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/config.go' --- src/gopkg.in/juju/charm.v6-unstable/config.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/config.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,234 @@ +// Copyright 2011, 2012, 2013 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm + +import ( + "fmt" + "io" + "io/ioutil" + "strconv" + + "github.com/juju/schema" + "gopkg.in/yaml.v1" +) + +// Settings is a group of charm config option names and values. A Settings +// S is considered valid by the Config C if every key in S is an option in +// C, and every value either has the correct type or is nil. +type Settings map[string]interface{} + +// Option represents a single charm config option. +type Option struct { + Type string `yaml:"type"` + Description string `yaml:"description,omitempty"` + Default interface{} `yaml:"default,omitempty"` +} + +// error replaces any supplied non-nil error with a new error describing a +// validation failure for the supplied value. +func (option Option) error(err *error, name string, value interface{}) { + if *err != nil { + *err = fmt.Errorf("option %q expected %s, got %#v", name, option.Type, value) + } +} + +// validate returns an appropriately-typed value for the supplied value, or +// returns an error if it cannot be converted to the correct type. Nil values +// are always considered valid. +func (option Option) validate(name string, value interface{}) (_ interface{}, err error) { + if value == nil { + return nil, nil + } + defer option.error(&err, name, value) + if checker := optionTypeCheckers[option.Type]; checker != nil { + if value, err = checker.Coerce(value, nil); err != nil { + return nil, err + } + return value, nil + } + panic(fmt.Errorf("option %q has unknown type %q", name, option.Type)) +} + +var optionTypeCheckers = map[string]schema.Checker{ + "string": schema.String(), + "int": schema.Int(), + "float": schema.Float(), + "boolean": schema.Bool(), +} + +// parse returns an appropriately-typed value for the supplied string, or +// returns an error if it cannot be parsed to the correct type. +func (option Option) parse(name, str string) (_ interface{}, err error) { + defer option.error(&err, name, str) + switch option.Type { + case "string": + return str, nil + case "int": + return strconv.ParseInt(str, 10, 64) + case "float": + return strconv.ParseFloat(str, 64) + case "boolean": + return strconv.ParseBool(str) + } + panic(fmt.Errorf("option %q has unknown type %q", name, option.Type)) +} + +// Config represents the supported configuration options for a charm, +// as declared in its config.yaml file. +type Config struct { + Options map[string]Option +} + +// NewConfig returns a new Config without any options. +func NewConfig() *Config { + return &Config{map[string]Option{}} +} + +// ReadConfig reads a Config in YAML format. +func ReadConfig(r io.Reader) (*Config, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + var config *Config + if err := yaml.Unmarshal(data, &config); err != nil { + return nil, err + } + if config == nil { + return nil, fmt.Errorf("invalid config: empty configuration") + } + if config.Options == nil { + // We are allowed an empty configuration if the options + // field is explicitly specified, but there is no easy way + // to tell if it was specified or not without unmarshaling + // into interface{} and explicitly checking the field. + var configInterface interface{} + if err := yaml.Unmarshal(data, &configInterface); err != nil { + return nil, err + } + m, _ := configInterface.(map[interface{}]interface{}) + if _, ok := m["options"]; !ok { + return nil, fmt.Errorf("invalid config: empty configuration") + } + } + for name, option := range config.Options { + switch option.Type { + case "string", "int", "float", "boolean": + case "": + // Missing type is valid in python. + option.Type = "string" + default: + return nil, fmt.Errorf("invalid config: option %q has unknown type %q", name, option.Type) + } + def := option.Default + if def == "" && option.Type == "string" { + // Skip normal validation for compatibility with pyjuju. + } else if option.Default, err = option.validate(name, def); err != nil { + option.error(&err, name, def) + return nil, fmt.Errorf("invalid config default: %v", err) + } + config.Options[name] = option + } + return config, nil +} + +// option returns the named option from the config, or an error if none +// such exists. +func (c *Config) option(name string) (Option, error) { + if option, ok := c.Options[name]; ok { + return option, nil + } + return Option{}, fmt.Errorf("unknown option %q", name) +} + +// DefaultSettings returns settings containing the default value of every +// option in the config. Default values may be nil. +func (c *Config) DefaultSettings() Settings { + out := make(Settings) + for name, option := range c.Options { + out[name] = option.Default + } + return out +} + +// ValidateSettings returns a copy of the supplied settings with a consistent type +// for each value. It returns an error if the settings contain unknown keys +// or invalid values. +func (c *Config) ValidateSettings(settings Settings) (Settings, error) { + out := make(Settings) + for name, value := range settings { + if option, err := c.option(name); err != nil { + return nil, err + } else if value, err = option.validate(name, value); err != nil { + return nil, err + } + out[name] = value + } + return out, nil +} + +// FilterSettings returns the subset of the supplied settings that are valid. +func (c *Config) FilterSettings(settings Settings) Settings { + out := make(Settings) + for name, value := range settings { + if option, err := c.option(name); err == nil { + if value, err := option.validate(name, value); err == nil { + out[name] = value + } + } + } + return out +} + +// ParseSettingsStrings returns settings derived from the supplied map. Every +// value in the map must be parseable to the correct type for the option +// identified by its key. Empty values are interpreted as nil. +func (c *Config) ParseSettingsStrings(values map[string]string) (Settings, error) { + out := make(Settings) + for name, str := range values { + option, err := c.option(name) + if err != nil { + return nil, err + } + value, err := option.parse(name, str) + if err != nil { + return nil, err + } + out[name] = value + } + return out, nil +} + +// ParseSettingsYAML returns settings derived from the supplied YAML data. The +// YAML must unmarshal to a map of strings to settings data; the supplied key +// must be present in the map, and must point to a map in which every value +// must have, or be a string parseable to, the correct type for the associated +// config option. Empty strings and nil values are both interpreted as nil. +func (c *Config) ParseSettingsYAML(yamlData []byte, key string) (Settings, error) { + var allSettings map[string]Settings + if err := yaml.Unmarshal(yamlData, &allSettings); err != nil { + return nil, fmt.Errorf("cannot parse settings data: %v", err) + } + settings, ok := allSettings[key] + if !ok { + return nil, fmt.Errorf("no settings found for %q", key) + } + out := make(Settings) + for name, value := range settings { + option, err := c.option(name) + if err != nil { + return nil, err + } + // Accept string values for compatibility with python. + if str, ok := value.(string); ok { + if value, err = option.parse(name, str); err != nil { + return nil, err + } + } else if value, err = option.validate(name, value); err != nil { + return nil, err + } + out[name] = value + } + return out, nil +} === added file 'src/gopkg.in/juju/charm.v6-unstable/config_test.go' --- src/gopkg.in/juju/charm.v6-unstable/config_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/config_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,471 @@ +// Copyright 2011, 2012, 2013 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm_test + +import ( + "bytes" + "fmt" + "strings" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/yaml.v1" + + "gopkg.in/juju/charm.v6-unstable" +) + +type ConfigSuite struct { + config *charm.Config +} + +var _ = gc.Suite(&ConfigSuite{}) + +func (s *ConfigSuite) SetUpSuite(c *gc.C) { + // Just use a single shared config for the whole suite. There's no use case + // for mutating a config, we assume that nobody will do so here. + var err error + s.config, err = charm.ReadConfig(bytes.NewBuffer([]byte(` +options: + title: + default: My Title + description: A descriptive title used for the service. + type: string + subtitle: + default: "" + description: An optional subtitle used for the service. + outlook: + description: No default outlook. + # type defaults to string in python + username: + default: admin001 + description: The name of the initial account (given admin permissions). + type: string + skill-level: + description: A number indicating skill. + type: int + agility-ratio: + description: A number from 0 to 1 indicating agility. + type: float + reticulate-splines: + description: Whether to reticulate splines on launch, or not. + type: boolean +`))) + c.Assert(err, gc.IsNil) +} + +func (s *ConfigSuite) TestReadSample(c *gc.C) { + c.Assert(s.config.Options, jc.DeepEquals, map[string]charm.Option{ + "title": { + Default: "My Title", + Description: "A descriptive title used for the service.", + Type: "string", + }, + "subtitle": { + Default: "", + Description: "An optional subtitle used for the service.", + Type: "string", + }, + "username": { + Default: "admin001", + Description: "The name of the initial account (given admin permissions).", + Type: "string", + }, + "outlook": { + Description: "No default outlook.", + Type: "string", + }, + "skill-level": { + Description: "A number indicating skill.", + Type: "int", + }, + "agility-ratio": { + Description: "A number from 0 to 1 indicating agility.", + Type: "float", + }, + "reticulate-splines": { + Description: "Whether to reticulate splines on launch, or not.", + Type: "boolean", + }, + }) +} + +func (s *ConfigSuite) TestDefaultSettings(c *gc.C) { + c.Assert(s.config.DefaultSettings(), jc.DeepEquals, charm.Settings{ + "title": "My Title", + "subtitle": "", + "username": "admin001", + "outlook": nil, + "skill-level": nil, + "agility-ratio": nil, + "reticulate-splines": nil, + }) +} + +func (s *ConfigSuite) TestFilterSettings(c *gc.C) { + settings := s.config.FilterSettings(charm.Settings{ + "title": "something valid", + "username": nil, + "unknown": "whatever", + "outlook": "", + "skill-level": 5.5, + "agility-ratio": true, + "reticulate-splines": "hullo", + }) + c.Assert(settings, jc.DeepEquals, charm.Settings{ + "title": "something valid", + "username": nil, + "outlook": "", + }) +} + +func (s *ConfigSuite) TestValidateSettings(c *gc.C) { + for i, test := range []struct { + info string + input charm.Settings + expect charm.Settings + err string + }{{ + info: "nil settings are valid", + expect: charm.Settings{}, + }, { + info: "empty settings are valid", + input: charm.Settings{}, + }, { + info: "unknown keys are not valid", + input: charm.Settings{"foo": nil}, + err: `unknown option "foo"`, + }, { + info: "nil is valid for every value type", + input: charm.Settings{ + "outlook": nil, + "skill-level": nil, + "agility-ratio": nil, + "reticulate-splines": nil, + }, + }, { + info: "correctly-typed values are valid", + input: charm.Settings{ + "outlook": "stormy", + "skill-level": int64(123), + "agility-ratio": 0.5, + "reticulate-splines": true, + }, + }, { + info: "empty string-typed values stay empty", + input: charm.Settings{"outlook": ""}, + expect: charm.Settings{"outlook": ""}, + }, { + info: "almost-correctly-typed values are valid", + input: charm.Settings{ + "skill-level": 123, + "agility-ratio": float32(0.5), + }, + expect: charm.Settings{ + "skill-level": int64(123), + "agility-ratio": 0.5, + }, + }, { + info: "bad string", + input: charm.Settings{"outlook": false}, + err: `option "outlook" expected string, got false`, + }, { + info: "bad int", + input: charm.Settings{"skill-level": 123.4}, + err: `option "skill-level" expected int, got 123.4`, + }, { + info: "bad float", + input: charm.Settings{"agility-ratio": "cheese"}, + err: `option "agility-ratio" expected float, got "cheese"`, + }, { + info: "bad boolean", + input: charm.Settings{"reticulate-splines": 101}, + err: `option "reticulate-splines" expected boolean, got 101`, + }} { + c.Logf("test %d: %s", i, test.info) + result, err := s.config.ValidateSettings(test.input) + if test.err != "" { + c.Check(err, gc.ErrorMatches, test.err) + } else { + c.Check(err, gc.IsNil) + if test.expect == nil { + c.Check(result, jc.DeepEquals, test.input) + } else { + c.Check(result, jc.DeepEquals, test.expect) + } + } + } +} + +var settingsWithNils = charm.Settings{ + "outlook": nil, + "skill-level": nil, + "agility-ratio": nil, + "reticulate-splines": nil, +} + +var settingsWithValues = charm.Settings{ + "outlook": "whatever", + "skill-level": int64(123), + "agility-ratio": 2.22, + "reticulate-splines": true, +} + +func (s *ConfigSuite) TestParseSettingsYAML(c *gc.C) { + for i, test := range []struct { + info string + yaml string + key string + expect charm.Settings + err string + }{{ + info: "bad structure", + yaml: "`", + err: `cannot parse settings data: .*`, + }, { + info: "bad key", + yaml: "{}", + key: "blah", + err: `no settings found for "blah"`, + }, { + info: "bad settings key", + yaml: "blah:\n ping: pong", + key: "blah", + err: `unknown option "ping"`, + }, { + info: "bad type for string", + yaml: "blah:\n outlook: 123", + key: "blah", + err: `option "outlook" expected string, got 123`, + }, { + info: "bad type for int", + yaml: "blah:\n skill-level: 12.345", + key: "blah", + err: `option "skill-level" expected int, got 12.345`, + }, { + info: "bad type for float", + yaml: "blah:\n agility-ratio: blob", + key: "blah", + err: `option "agility-ratio" expected float, got "blob"`, + }, { + info: "bad type for boolean", + yaml: "blah:\n reticulate-splines: 123", + key: "blah", + err: `option "reticulate-splines" expected boolean, got 123`, + }, { + info: "bad string for int", + yaml: "blah:\n skill-level: cheese", + key: "blah", + err: `option "skill-level" expected int, got "cheese"`, + }, { + info: "bad string for float", + yaml: "blah:\n agility-ratio: blob", + key: "blah", + err: `option "agility-ratio" expected float, got "blob"`, + }, { + info: "bad string for boolean", + yaml: "blah:\n reticulate-splines: cannonball", + key: "blah", + err: `option "reticulate-splines" expected boolean, got "cannonball"`, + }, { + info: "empty dict is valid", + yaml: "blah: {}", + key: "blah", + expect: charm.Settings{}, + }, { + info: "nil values are valid", + yaml: `blah: + outlook: null + skill-level: null + agility-ratio: null + reticulate-splines: null`, + key: "blah", + expect: settingsWithNils, + }, { + info: "empty strings for bool options are not accepted", + yaml: `blah: + outlook: "" + skill-level: 123 + agility-ratio: 12.0 + reticulate-splines: ""`, + key: "blah", + err: `option "reticulate-splines" expected boolean, got ""`, + }, { + info: "empty strings for int options are not accepted", + yaml: `blah: + outlook: "" + skill-level: "" + agility-ratio: 12.0 + reticulate-splines: false`, + key: "blah", + err: `option "skill-level" expected int, got ""`, + }, { + info: "empty strings for float options are not accepted", + yaml: `blah: + outlook: "" + skill-level: 123 + agility-ratio: "" + reticulate-splines: false`, + key: "blah", + err: `option "agility-ratio" expected float, got ""`, + }, { + info: "appropriate strings are valid", + yaml: `blah: + outlook: whatever + skill-level: "123" + agility-ratio: "2.22" + reticulate-splines: "true"`, + key: "blah", + expect: settingsWithValues, + }, { + info: "appropriate types are valid", + yaml: `blah: + outlook: whatever + skill-level: 123 + agility-ratio: 2.22 + reticulate-splines: y`, + key: "blah", + expect: settingsWithValues, + }} { + c.Logf("test %d: %s", i, test.info) + result, err := s.config.ParseSettingsYAML([]byte(test.yaml), test.key) + if test.err != "" { + c.Check(err, gc.ErrorMatches, test.err) + } else { + c.Check(err, gc.IsNil) + c.Check(result, jc.DeepEquals, test.expect) + } + } +} + +func (s *ConfigSuite) TestParseSettingsStrings(c *gc.C) { + for i, test := range []struct { + info string + input map[string]string + expect charm.Settings + err string + }{{ + info: "nil map is valid", + expect: charm.Settings{}, + }, { + info: "empty map is valid", + input: map[string]string{}, + expect: charm.Settings{}, + }, { + info: "empty strings for string options are valid", + input: map[string]string{"outlook": ""}, + expect: charm.Settings{"outlook": ""}, + }, { + info: "empty strings for non-string options are invalid", + input: map[string]string{"skill-level": ""}, + err: `option "skill-level" expected int, got ""`, + }, { + info: "strings are converted", + input: map[string]string{ + "outlook": "whatever", + "skill-level": "123", + "agility-ratio": "2.22", + "reticulate-splines": "true", + }, + expect: settingsWithValues, + }, { + info: "bad string for int", + input: map[string]string{"skill-level": "cheese"}, + err: `option "skill-level" expected int, got "cheese"`, + }, { + info: "bad string for float", + input: map[string]string{"agility-ratio": "blob"}, + err: `option "agility-ratio" expected float, got "blob"`, + }, { + info: "bad string for boolean", + input: map[string]string{"reticulate-splines": "cannonball"}, + err: `option "reticulate-splines" expected boolean, got "cannonball"`, + }} { + c.Logf("test %d: %s", i, test.info) + result, err := s.config.ParseSettingsStrings(test.input) + if test.err != "" { + c.Check(err, gc.ErrorMatches, test.err) + } else { + c.Check(err, gc.IsNil) + c.Check(result, jc.DeepEquals, test.expect) + } + } +} + +func (s *ConfigSuite) TestConfigError(c *gc.C) { + _, err := charm.ReadConfig(bytes.NewBuffer([]byte(`options: {t: {type: foo}}`))) + c.Assert(err, gc.ErrorMatches, `invalid config: option "t" has unknown type "foo"`) +} + +func (s *ConfigSuite) TestConfigWithNoOptions(c *gc.C) { + _, err := charm.ReadConfig(strings.NewReader("other:\n")) + c.Assert(err, gc.ErrorMatches, "invalid config: empty configuration") + + _, err = charm.ReadConfig(strings.NewReader("\n")) + c.Assert(err, gc.ErrorMatches, "invalid config: empty configuration") + + _, err = charm.ReadConfig(strings.NewReader("null\n")) + c.Assert(err, gc.ErrorMatches, "invalid config: empty configuration") + + _, err = charm.ReadConfig(strings.NewReader("options:\n")) + c.Assert(err, gc.IsNil) +} + +func (s *ConfigSuite) TestDefaultType(c *gc.C) { + assertDefault := func(type_ string, value string, expected interface{}) { + config := fmt.Sprintf(`options: {t: {type: %s, default: %s}}`, type_, value) + result, err := charm.ReadConfig(bytes.NewBuffer([]byte(config))) + c.Assert(err, gc.IsNil) + c.Assert(result.Options["t"].Default, gc.Equals, expected) + } + + assertDefault("boolean", "true", true) + assertDefault("string", "golden grahams", "golden grahams") + assertDefault("string", `""`, "") + assertDefault("float", "2.2e11", 2.2e11) + assertDefault("int", "99", int64(99)) + + assertTypeError := func(type_, str, value string) { + config := fmt.Sprintf(`options: {t: {type: %s, default: %s}}`, type_, str) + _, err := charm.ReadConfig(bytes.NewBuffer([]byte(config))) + expected := fmt.Sprintf(`invalid config default: option "t" expected %s, got %s`, type_, value) + c.Assert(err, gc.ErrorMatches, expected) + } + + assertTypeError("boolean", "henry", `"henry"`) + assertTypeError("string", "2.5", "2.5") + assertTypeError("float", "123", "123") + assertTypeError("int", "true", "true") +} + +// When an empty config is supplied an error should be returned +func (s *ConfigSuite) TestEmptyConfigReturnsError(c *gc.C) { + config := "" + result, err := charm.ReadConfig(bytes.NewBuffer([]byte(config))) + c.Assert(result, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "invalid config: empty configuration") +} + +func (s *ConfigSuite) TestYAMLMarshal(c *gc.C) { + cfg, err := charm.ReadConfig(strings.NewReader(` +options: + minimal: + type: string + withdescription: + type: int + description: d + withdefault: + type: boolean + description: d + default: true +`)) + c.Assert(err, gc.IsNil) + c.Assert(cfg.Options, gc.HasLen, 3) + + newYAML, err := yaml.Marshal(cfg) + c.Assert(err, gc.IsNil) + + newCfg, err := charm.ReadConfig(bytes.NewReader(newYAML)) + c.Assert(err, gc.IsNil) + c.Assert(newCfg, jc.DeepEquals, cfg) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/dependencies.tsv' --- src/gopkg.in/juju/charm.v6-unstable/dependencies.tsv 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/dependencies.tsv 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +github.com/juju/errors git 4567a5e69fd3130ca0d89f69478e7ac025b67452 2015-03-27T19:24:31Z +github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z +github.com/juju/gojsonreference git f0d24ac5ee330baa21721cdff56d45e4ee42628e 2015-02-04T19:46:33Z +github.com/juju/gojsonschema git e1ad140384f254c82f89450d9a7c8dd38a632838 2015-03-12T17:00:16Z +github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z +github.com/juju/names git a6a253b0a94cc79e99a68d284b970ffce2a11ecd 2015-07-09T13:59:32Z +github.com/juju/schema git afe1151cb49d1d7ed3c75592dfc6f38703f2e988 2015-08-07T07:58:08Z +github.com/juju/testing git ee18040b46bb1f8c93438383bd51ec77eb8c02ab 2016-01-12T21:04:04Z +github.com/juju/utils git ef8480bcaabae506777530725c81d83a4de2fb06 2016-01-12T23:14:21Z +golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z +gopkg.in/check.v1 git b3d3430320d4260e5fea99841af984b3badcea63 2015-06-26T10:50:28Z +gopkg.in/mgo.v2 git f4923a569136442e900b8cf5c1a706c0a8b0883c 2015-08-21T15:31:23Z +gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z +gopkg.in/yaml.v2 git 53feefa2559fb8dfa8d81baad31be332c97d6c77 2015-09-24T14:23:14Z === added file 'src/gopkg.in/juju/charm.v6-unstable/export_test.go' --- src/gopkg.in/juju/charm.v6-unstable/export_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,18 @@ +// Copyright 2011, 2012, 2013 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm + +// Export meaningful bits for tests only. + +var ( + IfaceExpander = ifaceExpander + ValidateValue = validateValue + + ParsePayloadClass = parsePayloadClass + ResourceSchema = resourceSchema +) + +func MissingSeriesError() error { + return missingSeriesError +} === added directory 'src/gopkg.in/juju/charm.v6-unstable/hooks' === added file 'src/gopkg.in/juju/charm.v6-unstable/hooks/hooks.go' --- src/gopkg.in/juju/charm.v6-unstable/hooks/hooks.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/hooks/hooks.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,110 @@ +// Copyright 2013 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// hooks provides types and constants that define the hooks known to Juju. +package hooks + +// Kind enumerates the different kinds of hooks that exist. +type Kind string + +const ( + // None of these hooks are ever associated with a relation; each of them + // represents a change to the state of the unit as a whole. The values + // themselves are all valid hook names. + Install Kind = "install" + Start Kind = "start" + ConfigChanged Kind = "config-changed" + UpgradeCharm Kind = "upgrade-charm" + Stop Kind = "stop" + Action Kind = "action" + CollectMetrics Kind = "collect-metrics" + MeterStatusChanged Kind = "meter-status-changed" + LeaderElected Kind = "leader-elected" + LeaderDeposed Kind = "leader-deposed" + LeaderSettingsChanged Kind = "leader-settings-changed" + UpdateStatus Kind = "update-status" + + // These hooks require an associated relation, and the name of the relation + // unit whose change triggered the hook. The hook file names that these + // kinds represent will be prefixed by the relation name; for example, + // "db-relation-joined". + RelationJoined Kind = "relation-joined" + RelationChanged Kind = "relation-changed" + RelationDeparted Kind = "relation-departed" + + // This hook requires an associated relation. The represented hook file name + // will be prefixed by the relation name, just like the other Relation* Kind + // values. + RelationBroken Kind = "relation-broken" + + // These hooks require an associated storage. The hook file names that these + // kinds represent will be prefixed by the storage name; for example, + // "shared-fs-storage-attached". + StorageAttached Kind = "storage-attached" + StorageDetaching Kind = "storage-detaching" +) + +var unitHooks = []Kind{ + Install, + Start, + ConfigChanged, + UpgradeCharm, + Stop, + CollectMetrics, + MeterStatusChanged, + LeaderElected, + LeaderDeposed, + LeaderSettingsChanged, + UpdateStatus, +} + +// UnitHooks returns all known unit hook kinds. +func UnitHooks() []Kind { + hooks := make([]Kind, len(unitHooks)) + copy(hooks, unitHooks) + return hooks +} + +var relationHooks = []Kind{ + RelationJoined, + RelationChanged, + RelationDeparted, + RelationBroken, +} + +// RelationHooks returns all known relation hook kinds. +func RelationHooks() []Kind { + hooks := make([]Kind, len(relationHooks)) + copy(hooks, relationHooks) + return hooks +} + +var storageHooks = []Kind{ + StorageAttached, + StorageDetaching, +} + +// StorageHooks returns all known storage hook kinds. +func StorageHooks() []Kind { + hooks := make([]Kind, len(storageHooks)) + copy(hooks, storageHooks) + return hooks +} + +// IsRelation returns whether the Kind represents a relation hook. +func (kind Kind) IsRelation() bool { + switch kind { + case RelationJoined, RelationChanged, RelationDeparted, RelationBroken: + return true + } + return false +} + +// IsStorage returns whether the Kind represents a storage hook. +func (kind Kind) IsStorage() bool { + switch kind { + case StorageAttached, StorageDetaching: + return true + } + return false +} === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/bad' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/bad/README.md' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/bad/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/bad/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +A dummy bundle === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/bad/bundle.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/bad/bundle.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/bad/bundle.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +# This bundle has a bad relation, which will cause it to fail +# its verification. +services: + wordpress: + charm: wordpress + num_units: 1 + mysql: + charm: mysql + num_units: 1 +relations: + - ["foo:db", "mysql:server"] === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/openstack' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/openstack/README.md' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/openstack/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/openstack/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,46 @@ +OpenStack Bundle for Juju +========================= + +Overview +-------- + +This bundle deploys a reference OpenStack architecture including all core projects: + + - OpenStack Compute + - OpenStack Networking (using Open vSwitch plugin) + - OpenStack Block Storage (backed with Ceph storage) + - OpenStack Image + - OpenStack Object Storage + - OpenStack Identity + - OpenStack Dashboard + - OpenStack Telemetry + - OpenStack Orchestration + +The charm configuration is an opinioned set for deploying OpenStack for testing on Cloud environments which support nested KVM. Instance types also need to have ephemeral storage (these block devices are used for Ceph and Swift storage). + +The Ubuntu Server Team use this bundle for testing OpenStack-on-OpenStack. + +Usage +----- + +Once deployed, the cloud can be accessed either using the OpenStack command line tools or using the OpenStack Dashboard: + + http:///horizon + +The charms configure the 'admin' user with a password of 'openstack' by default. + +The OpenStack cloud deployed is completely clean; the charms don't attempt to configure networking or upload images. Read the OpenStack User Guide on how to configure your cloud for use: + + http://docs.openstack.org/user-guide/content/ + +Niggles +------- + +The neutron-gateway service requires a service unit with two network interfaces to provide full functionality; this part of OpenStack provides L3 routing between tenant networks and the rest of the world. Its possible todo this when testing on OpenStack by adding a second network interface to the neutron-gateway service: + + nova interface-attach --net-id + juju set neutron-gateway ext-port=eth1 + +Note that you will need to be running this bundle on an OpenStack cloud that supports MAC address learning of some description; this includes using OpenStack Havana with the Neutron Open vSwitch plugin. + +For actual OpenStack deployments, this service would reside of a physical server with network ports attached to both the internal network (for communication with nova-compute service units) and the external network (for inbound/outbound network access to/from instances within the cloud). === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/openstack/bundle.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/openstack/bundle.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/openstack/bundle.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,202 @@ +series: precise +services: + mysql: + charm: cs:precise/mysql + constraints: mem=1G + options: + dataset-size: 50% + rabbitmq-server: + charm: cs:precise/rabbitmq-server + constraints: mem=1G + ceph: + charm: cs:precise/ceph + num_units: 3 + constraints: mem=1G + options: + monitor-count: 3 + fsid: 6547bd3e-1397-11e2-82e5-53567c8d32dc + monitor-secret: AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ== + osd-devices: /dev/vdb + osd-reformat: "yes" + ephemeral-unmount: /mnt + keystone: + charm: cs:precise/keystone + constraints: mem=1G + options: + admin-password: openstack + admin-token: ubuntutesting + openstack-dashboard: + charm: cs:precise/openstack-dashboard + constraints: mem=1G + nova-compute: + charm: cs:precise/nova-compute + num_units: 3 + constraints: mem=4G + options: + config-flags: "auto_assign_floating_ip=False" + enable-live-migration: False + virt-type: kvm + nova-cloud-controller: + charm: cs:precise/nova-cloud-controller + constraints: mem=1G + options: + network-manager: Neutron + quantum-security-groups: "yes" + neutron-gateway: + charm: cs:precise/quantum-gateway + constraints: mem=1G + cinder: + charm: cs:precise/cinder + options: + block-device: "None" + constraints": mem=1G + glance: + charm: cs:precise/glance + constraints: mem=1G + swift-proxy: + charm: cs:precise/swift-proxy + constraints: mem=1G + options: + zone-assignment: manual + replicas: 3 + use-https: 'no' + swift-hash: fdfef9d4-8b06-11e2-8ac0-531c923c8fae + swift-storage-z1: + charm: cs:precise/swift-storage + constraints: mem=1G + options: + zone: 1 + block-device: vdb + overwrite: "true" + swift-storage-z2: + charm: cs:precise/swift-storage + constraints: mem=1G + options: + zone: 2 + block-device: vdb + overwrite: "true" + swift-storage-z3: + charm: cs:precise/swift-storage + constraints: mem=1G + options: + zone: 3 + block-device: vdb + overwrite: "true" + ceilometer: + charm: cs:precise/ceilometer + constraints: mem=1G + ceilometer-agent: + charm: cs:precise/ceilometer-agent + mongodb: + charm: cs:precise/mongodb + constraints: mem=1G + heat: + charm: cs:precise/heat + constraints: mem=1G + ntp: + charm: cs:precise/ntp +relations: + - - keystone:shared-db + - mysql:shared-db + - - nova-cloud-controller:shared-db + - mysql:shared-db + - - nova-cloud-controller:amqp + - rabbitmq-server:amqp + - - nova-cloud-controller:image-service + - glance:image-service + - - nova-cloud-controller:identity-service + - keystone:identity-service + - - nova-compute:cloud-compute + - nova-cloud-controller:cloud-compute + - - nova-compute:shared-db + - mysql:shared-db + - - nova-compute:amqp + - rabbitmq-server:amqp + - - nova-compute:image-service + - glance:image-service + - - nova-compute:ceph + - ceph:client + - - glance:shared-db + - mysql:shared-db + - - glance:identity-service + - keystone:identity-service + - - glance:ceph + - ceph:client + - - glance:image-service + - cinder:image-service + - - cinder:shared-db + - mysql:shared-db + - - cinder:amqp + - rabbitmq-server:amqp + - - cinder:cinder-volume-service + - nova-cloud-controller:cinder-volume-service + - - cinder:identity-service + - keystone:identity-service + - - cinder:ceph + - ceph:client + - - neutron-gateway:shared-db + - mysql:shared-db + - - neutron-gateway:amqp + - rabbitmq-server:amqp + - - neutron-gateway:quantum-network-service + - nova-cloud-controller:quantum-network-service + - - openstack-dashboard:identity-service + - keystone:identity-service + - - swift-proxy:identity-service + - keystone:identity-service + - - swift-proxy:swift-storage + - swift-storage-z1:swift-storage + - - swift-proxy:swift-storage + - swift-storage-z2:swift-storage + - - swift-proxy:swift-storage + - swift-storage-z3:swift-storage + - - ceilometer:identity-service + - keystone:identity-service + - - ceilometer:amqp + - rabbitmq-server:amqp + - - ceilometer:shared-db + - mongodb:database + - - ceilometer-agent:nova-ceilometer + - nova-compute:nova-ceilometer + - - ceilometer-agent:ceilometer-service + - ceilometer:ceilometer-service + - - heat:identity-service + - keystone:identity-service + - - heat:shared-db + - mysql:shared-db + - - heat:amqp + - rabbitmq-server:amqp + - - ntp:juju-info + - nova-compute:juju-info + - - ntp:juju-info + - nova-cloud-controller:juju-info + - - ntp:juju-info + - neutron-gateway:juju-info + - - ntp:juju-info + - ceph:juju-info + - - ntp:juju-info + - cinder:juju-info + - - ntp:juju-info + - keystone:juju-info + - - ntp:juju-info + - glance:juju-info + - - ntp:juju-info + - swift-proxy:juju-info + - - ntp:juju-info + - swift-storage-z1:juju-info + - - ntp:juju-info + - swift-storage-z2:juju-info + - - ntp:juju-info + - swift-storage-z3:juju-info + - - ntp:juju-info + - ceilometer:juju-info + - - ntp:juju-info + - mongodb:juju-info + - - ntp:juju-info + - rabbitmq-server:juju-info + - - ntp:juju-info + - mysql:juju-info + - - ntp:juju-info + - openstack-dashboard:juju-info + - - ntp:juju-info + - heat:juju-info === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-simple' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-simple/README.md' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-simple/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-simple/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +A dummy bundle === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-simple/bundle.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-simple/bundle.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-simple/bundle.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +services: + wordpress: + charm: wordpress + mysql: + charm: mysql + num_units: 1 +relations: + - ["wordpress:db", "mysql:server"] === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-with-logging' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/README.md' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +A dummy bundle === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/bundle.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/bundle.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/bundle.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,18 @@ +services: + wordpress: + charm: wordpress + num_units: 1 + bindings: + db: db + url: public + mysql: + charm: mysql + num_units: 1 + bindings: + server: db + logging: + charm: logging +relations: + - ["wordpress:db", "mysql:server"] + - ["wordpress:juju-info", "logging:info"] + - ["mysql:juju-info", "logging:info"] === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-broken' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-broken 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-broken 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-changed' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-changed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-changed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-departed' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-departed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-departed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-joined' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-joined 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-joined 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/collect-metrics' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/collect-metrics 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/collect-metrics 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/config-changed' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/config-changed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/config-changed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-broken' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-broken 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-broken 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-changed' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-changed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-changed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-departed' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-departed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-departed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-joined' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-joined 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-joined 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/install' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/install 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/install 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/meter-status-changed' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/meter-status-changed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/meter-status-changed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/otherdata' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/otherdata 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/otherdata 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +some text === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-broken' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-broken 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-broken 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-changed' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-changed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-changed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-departed' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-departed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-departed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-joined' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-joined 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-joined 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/start' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/start 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/start 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/stop' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/stop 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/stop 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/subdir' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/subdir/stuff' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/subdir/stuff 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/subdir/stuff 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +non hook related stuff === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/upgrade-charm' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/upgrade-charm 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/upgrade-charm 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,12 @@ +name: all-hooks +summary: "That's a dummy charm with hook scrips for all types of hooks." +description: "This is a longer description." +provides: + foo: + interface: phony +requires: + bar: + interface: fake +peers: + self: + interface: dummy === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/revision' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/all-hooks/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/category' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/category/.dir' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/category/.dir/ignored' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/category/.ignored' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/category/.ignored 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/category/.ignored 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +# \ No newline at end of file === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/category/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/category/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/category/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,6 @@ +name: categories +summary: "Sample charm with a category" +description: | + That's a boring charm that has a category. +categories: ["database"] +tags: ["openstack", "storage"] \ No newline at end of file === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/.dir' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/.dir/ignored' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/.ignored' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/.ignored 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/.ignored 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +# \ No newline at end of file === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/actions.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/actions.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/actions.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,7 @@ +snapshot: + description: Take a snapshot of the database. + params: + outfile: + description: The file to write out to. + type: string + default: foo.bz2 === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/build' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/build/ignored' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/config.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/config.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/config.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +options: + title: {default: My Title, description: A descriptive title used for the service., type: string} + outlook: {description: No default outlook., type: string} + username: {default: admin001, description: The name of the initial account (given admin permissions)., type: string} + skill-level: {description: A number indicating skill., type: int} === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/empty' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/empty/.gitkeep' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/hooks' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/hooks/install' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/hooks/install 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/hooks/install 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/bash +echo "Done!" === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +name: dummy +summary: "That's a dummy charm." +description: | + This is a longer description which + potentially contains multiple lines. === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/revision' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/src' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/src/hello.c' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/src/hello.c 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/dummy/src/hello.c 2016-03-22 15:18:22 +0000 @@ -0,0 +1,7 @@ +#include + +main() +{ + printf ("Hello World!\n"); + return 0; +} === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/format2' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/format2/.dir' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/format2/.dir/ignored' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/format2/.ignored' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/format2/.ignored 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/format2/.ignored 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +# \ No newline at end of file === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/format2/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/format2/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/format2/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,6 @@ +name: format2 +format: 2 +summary: "Sample charm described in format 2" +description: | + That's a boring charm that is described in + terms of format 2. === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/logging' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/logging/hooks' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/logging/hooks/.gitkeep' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/logging/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/logging/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/logging/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +name: logging +summary: "Subordinate logging test charm" +description: | + This is a longer description which + potentially contains multiple lines. +subordinate: true +provides: + logging-client: + interface: logging +requires: + logging-directory: + interface: logging + scope: container + info: + interface: juju-info + scope: container === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/logging/revision' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/logging/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/logging/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered-empty' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered-empty/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered-empty/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered-empty/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,3 @@ +name: metered-empty +summary: "Metered charm with empty metrics" +description: "A charm that will not send metrics" === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered-empty/metrics.yaml' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered-empty/revision' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered-empty/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered-empty/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,3 @@ +name: metered +summary: "A metered charm with custom metrics" +description: "" === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered/metrics.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered/metrics.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered/metrics.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +metrics: + pings: + type: gauge + description: Description of the metric. + juju-unit-time: === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered/revision' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/metered/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/monitoring' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/monitoring/hooks' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/monitoring/hooks/.gitkeep' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/monitoring/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/monitoring/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/monitoring/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +name: monitoring +summary: "Subordinate monitoring test charm" +description: | + This is a longer description which + potentially contains multiple lines. +subordinate: true +provides: + monitoring-client: + interface: monitoring +requires: + monitoring-port: + interface: monitoring + scope: container + info: + interface: juju-info + scope: container === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql-alternative' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql-alternative/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql-alternative/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql-alternative/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +name: mysql-alternative +summary: "Database engine" +description: "A pretty popular database" +provides: + prod: + interface: mysql + dev: + interface: mysql + limit: 2 === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql-alternative/revision' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql-alternative/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql-alternative/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +name: mysql +summary: "Database engine" +description: "A pretty popular database" +provides: + server: mysql === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql/revision' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/mysql/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/riak' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/riak/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/riak/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/riak/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +name: riak +summary: "K/V storage engine" +description: "Scalable K/V Store in Erlang with Clocks :-)" +provides: + endpoint: + interface: http + admin: + interface: http +peers: + ring: + interface: riak === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/riak/revision' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/riak/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/riak/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +7 \ No newline at end of file === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terms' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terms/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terms/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terms/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +name: terms +summary: "Sample charm with terms and conditions" +description: | + That's a boring charm that requires certain terms. +terms: ["term1", "term2"] === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terracotta' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terracotta/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terracotta/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terracotta/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,15 @@ +name: terracotta +summary: Distributed HA caching/storage platform for Java +maintainer: Robert Ayres +description: | + Distributed HA caching/storage platform for Java. + . + Terracotta provides out of the box clustering for a number of well known Java + frameworks, including EHCache, Hibernate and Quartz as well as clustering + for J2EE containers. +provides: + dso: + interface: terracotta + optional: true +peers: + server-array: terracotta-server === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terracotta/revision' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terracotta/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/terracotta/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +3 === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade1' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade1/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade1/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade1/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +name: upgrade +summary: "Sample charm to test version changes" +description: | + Sample charm to test version changes. + This is the old charm. === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade1/revision' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade1/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade1/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade2' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade2/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade2/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade2/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +name: upgrade +summary: "Sample charm to test version changes" +description: | + Sample charm to test version changes. + This is the new charm. === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade2/revision' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade2/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/upgrade2/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +2 === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative/hooks' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative/hooks/install' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative/hooks/install 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative/hooks/install 2016-03-22 15:18:22 +0000 @@ -0,0 +1,3 @@ +#!/bin/bash + +echo hello world \ No newline at end of file === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +name: varnish-alternative +summary: "Database engine" +description: "Another popular database" +provides: + webcache: varnish === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative/revision' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish-alternative/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +name: varnish +summary: "Database engine" +description: "Another popular database" +provides: + webcache: varnish === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish/revision' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/varnish/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/actions' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/actions/.gitkeep' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/config.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/config.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/config.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,3 @@ +options: + blog-title: {default: My Title, description: A descriptive title used for the blog., type: string} + === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/hooks' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/hooks/.gitkeep' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/metadata.yaml' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,23 @@ +name: wordpress +summary: "Blog engine" +description: "A pretty popular blog engine" +provides: + url: + interface: http + limit: + optional: false + logging-dir: + interface: logging + scope: container + monitoring-port: + interface: monitoring + scope: container +requires: + db: + interface: mysql + limit: 1 + optional: false + cache: + interface: varnish + limit: 2 + optional: true === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/revision' --- src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/quantal/wordpress/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +3 \ No newline at end of file === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/series' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/series/format2' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/series/format2/build' === added file 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/series/format2/build/ignored' === added directory 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/series/format2/hooks' === added symlink 'src/gopkg.in/juju/charm.v6-unstable/internal/test-charm-repo/series/format2/hooks/symlink' === target is u'../target' === added file 'src/gopkg.in/juju/charm.v6-unstable/meta.go' --- src/gopkg.in/juju/charm.v6-unstable/meta.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/meta.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,748 @@ +// Copyright 2011, 2012, 2013 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "regexp" + "strconv" + "strings" + + "github.com/juju/schema" + "github.com/juju/utils" + "gopkg.in/yaml.v1" + + "gopkg.in/juju/charm.v6-unstable/hooks" + "gopkg.in/juju/charm.v6-unstable/resource" +) + +// RelationScope describes the scope of a relation. +type RelationScope string + +// Note that schema doesn't support custom string types, +// so when we use these values in a schema.Checker, +// we must store them as strings, not RelationScopes. + +const ( + ScopeGlobal RelationScope = "global" + ScopeContainer RelationScope = "container" +) + +// RelationRole defines the role of a relation. +type RelationRole string + +const ( + RoleProvider RelationRole = "provider" + RoleRequirer RelationRole = "requirer" + RolePeer RelationRole = "peer" +) + +// StorageType defines a storage type. +type StorageType string + +const ( + StorageBlock StorageType = "block" + StorageFilesystem StorageType = "filesystem" +) + +// Storage represents a charm's storage requirement. +type Storage struct { + // Name is the name of the store. + // + // Name has no default, and must be specified. + Name string `bson:"name"` + + // Description is a description of the store. + // + // Description has no default, and is optional. + Description string `bson:"description"` + + // Type is the storage type: filesystem or block-device. + // + // Type has no default, and must be specified. + Type StorageType `bson:"type"` + + // Shared indicates that the storage is shared between all units of + // a service deployed from the charm. It is an error to attempt to + // assign non-shareable storage to a "shared" storage requirement. + // + // Shared defaults to false. + Shared bool `bson:"shared"` + + // ReadOnly indicates that the storage should be made read-only if + // possible. If the storage cannot be made read-only, Juju will warn + // the user. + // + // ReadOnly defaults to false. + ReadOnly bool `bson:"read-only"` + + // CountMin is the number of storage instances that must be attached + // to the charm for it to be useful; the charm will not install until + // this number has been satisfied. This must be a non-negative number. + // + // CountMin defaults to 1 for singleton stores. + CountMin int `bson:"countmin"` + + // CountMax is the largest number of storage instances that can be + // attached to the charm. If CountMax is -1, then there is no upper + // bound. + // + // CountMax defaults to 1 for singleton stores. + CountMax int `bson:"countmax"` + + // MinimumSize is the minimum size of store that the charm needs to + // work at all. This is not a recommended size or a comfortable size + // or a will-work-well size, just a bare minimum below which the charm + // is going to break. + // MinimumSize requires a unit, one of MGTPEZY, and is stored as MiB. + // + // There is no default MinimumSize; if left unspecified, a provider + // specific default will be used, typically 1GB for block storage. + MinimumSize uint64 `bson:"minimum-size"` + + // Location is the mount location for filesystem stores. For multi- + // stores, the location acts as the parent directory for each mounted + // store. + // + // Location has no default, and is optional. + Location string `bson:"location,omitempty"` + + // Properties allow the charm author to characterise the relative storage + // performance requirements and sensitivities for each store. + // eg “transient†is used to indicate that non persistent storage is acceptable, + // such as tmpfs or ephemeral instance disks. + // + // Properties has no default, and is optional. + Properties []string `bson:"properties,omitempty"` +} + +// Relation represents a single relation defined in the charm +// metadata.yaml file. +type Relation struct { + Name string `bson:"name"` + Role RelationRole `bson:"role"` + Interface string `bson:"interface"` + Optional bool `bson:"optional"` + Limit int `bson:"limit"` + Scope RelationScope `bson:"scope"` +} + +// ImplementedBy returns whether the relation is implemented by the supplied charm. +func (r Relation) ImplementedBy(ch Charm) bool { + if r.IsImplicit() { + return true + } + var m map[string]Relation + switch r.Role { + case RoleProvider: + m = ch.Meta().Provides + case RoleRequirer: + m = ch.Meta().Requires + case RolePeer: + m = ch.Meta().Peers + default: + panic(fmt.Errorf("unknown relation role %q", r.Role)) + } + rel, found := m[r.Name] + if !found { + return false + } + if rel.Interface == r.Interface { + switch r.Scope { + case ScopeGlobal: + return rel.Scope != ScopeContainer + case ScopeContainer: + return true + default: + panic(fmt.Errorf("unknown relation scope %q", r.Scope)) + } + } + return false +} + +// IsImplicit returns whether the relation is supplied by juju itself, +// rather than by a charm. +func (r Relation) IsImplicit() bool { + return (r.Name == "juju-info" && + r.Interface == "juju-info" && + r.Role == RoleProvider) +} + +// Meta represents all the known content that may be defined +// within a charm's metadata.yaml file. +// Note: Series is serialised for backward compatibility +// as "supported-series" because a previous +// charm version had an incompatible Series field that +// was unused in practice but still serialized. This +// only applies to JSON because Meta has a custom +// YAML marshaller. +type Meta struct { + Name string `bson:"name" json:"Name"` + Summary string `bson:"summary" json:"Summary"` + Description string `bson:"description" json:"Description"` + Subordinate bool `bson:"subordinate" json:"Subordinate"` + Provides map[string]Relation `bson:"provides,omitempty" json:"Provides,omitempty"` + Requires map[string]Relation `bson:"requires,omitempty" json:"Requires,omitempty"` + Peers map[string]Relation `bson:"peers,omitempty" json:"Peers,omitempty"` + Format int `bson:"format,omitempty" json:"Format,omitempty"` + OldRevision int `bson:"oldrevision,omitempty" json:"OldRevision"` // Obsolete + Categories []string `bson:"categories,omitempty" json:"Categories,omitempty"` + Tags []string `bson:"tags,omitempty" json:"Tags,omitempty"` + Series []string `bson:"series,omitempty" json:"SupportedSeries,omitempty"` + Storage map[string]Storage `bson:"storage,omitempty" json:"Storage,omitempty"` + PayloadClasses map[string]PayloadClass `bson:"payloadclasses,omitempty" json:"PayloadClasses,omitempty"` + Resources map[string]resource.Meta `bson:"resources,omitempty" json:"Resources,omitempty"` + Terms []string `bson:"terms,omitempty" json:"Terms,omitempty` +} + +func generateRelationHooks(relName string, allHooks map[string]bool) { + for _, hookName := range hooks.RelationHooks() { + allHooks[fmt.Sprintf("%s-%s", relName, hookName)] = true + } +} + +// Hooks returns a map of all possible valid hooks, taking relations +// into account. It's a map to enable fast lookups, and the value is +// always true. +func (m Meta) Hooks() map[string]bool { + allHooks := make(map[string]bool) + // Unit hooks + for _, hookName := range hooks.UnitHooks() { + allHooks[string(hookName)] = true + } + // Relation hooks + for hookName := range m.Provides { + generateRelationHooks(hookName, allHooks) + } + for hookName := range m.Requires { + generateRelationHooks(hookName, allHooks) + } + for hookName := range m.Peers { + generateRelationHooks(hookName, allHooks) + } + return allHooks +} + +// Used for parsing Categories and Tags. +func parseStringList(list interface{}) []string { + if list == nil { + return nil + } + slice := list.([]interface{}) + result := make([]string, 0, len(slice)) + for _, elem := range slice { + result = append(result, elem.(string)) + } + return result +} + +var termNameRE = regexp.MustCompile("^[a-z]+([a-z0-9-]+)/[0-9]+?$") + +func checkTerm(s string) error { + match := termNameRE.FindStringSubmatch(s) + if match == nil { + return fmt.Errorf("invalid term name %q: must match %s", s, termNameRE.String()) + } + return nil +} + +// ReadMeta reads the content of a metadata.yaml file and returns +// its representation. +func ReadMeta(r io.Reader) (meta *Meta, err error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return + } + raw := make(map[interface{}]interface{}) + err = yaml.Unmarshal(data, raw) + if err != nil { + return + } + v, err := charmSchema.Coerce(raw, nil) + if err != nil { + return nil, errors.New("metadata: " + err.Error()) + } + + m := v.(map[string]interface{}) + meta, err = parseMeta(m) + if err != nil { + return nil, err + } + + if err := meta.Check(); err != nil { + return nil, err + } + + // TODO(ericsnow) This line should be moved into parseMeta as soon + // as the terms code gets fixed. + meta.Terms = parseStringList(m["terms"]) + + return meta, nil +} + +func parseMeta(m map[string]interface{}) (*Meta, error) { + var meta Meta + + meta.Name = m["name"].(string) + // Schema decodes as int64, but the int range should be good + // enough for revisions. + meta.Summary = m["summary"].(string) + meta.Description = m["description"].(string) + meta.Provides = parseRelations(m["provides"], RoleProvider) + meta.Requires = parseRelations(m["requires"], RoleRequirer) + meta.Peers = parseRelations(m["peers"], RolePeer) + meta.Format = int(m["format"].(int64)) + meta.Categories = parseStringList(m["categories"]) + meta.Tags = parseStringList(m["tags"]) + if subordinate := m["subordinate"]; subordinate != nil { + meta.Subordinate = subordinate.(bool) + } + if rev := m["revision"]; rev != nil { + // Obsolete + meta.OldRevision = int(m["revision"].(int64)) + } + meta.Series = parseStringList(m["series"]) + meta.Storage = parseStorage(m["storage"]) + meta.PayloadClasses = parsePayloadClasses(m["payloads"]) + + resources, err := parseMetaResources(m["resources"]) + if err != nil { + return nil, err + } + meta.Resources = resources + + return &meta, nil +} + +// GetYAML implements yaml.Getter.GetYAML. +func (m Meta) GetYAML() (tag string, value interface{}) { + marshaledRelations := func(rs map[string]Relation) map[string]marshaledRelation { + mrs := make(map[string]marshaledRelation) + for name, r := range rs { + mrs[name] = marshaledRelation(r) + } + return mrs + } + return "", struct { + Name string `yaml:"name"` + Summary string `yaml:"summary"` + Description string `yaml:"description"` + Provides map[string]marshaledRelation `yaml:"provides,omitempty"` + Requires map[string]marshaledRelation `yaml:"requires,omitempty"` + Peers map[string]marshaledRelation `yaml:"peers,omitempty"` + Categories []string `yaml:"categories,omitempty"` + Tags []string `yaml:"tags,omitempty"` + Subordinate bool `yaml:"subordinate,omitempty"` + Series []string `yaml:"series,omitempty"` + Terms []string `yaml:"terms,omitempty"` + }{ + Name: m.Name, + Summary: m.Summary, + Description: m.Description, + Provides: marshaledRelations(m.Provides), + Requires: marshaledRelations(m.Requires), + Peers: marshaledRelations(m.Peers), + Categories: m.Categories, + Tags: m.Tags, + Subordinate: m.Subordinate, + Series: m.Series, + Terms: m.Terms, + } +} + +type marshaledRelation Relation + +func (r marshaledRelation) GetYAML() (tag string, value interface{}) { + // See calls to ifaceExpander in charmSchema. + noLimit := 1 + if r.Role == RoleProvider { + noLimit = 0 + } + + if !r.Optional && r.Limit == noLimit && r.Scope == ScopeGlobal { + // All attributes are default, so use the simple string form of the relation. + return "", r.Interface + } + mr := struct { + Interface string `yaml:"interface"` + Limit *int `yaml:"limit,omitempty"` + Optional bool `yaml:"optional,omitempty"` + Scope RelationScope `yaml:"scope,omitempty"` + }{ + Interface: r.Interface, + Optional: r.Optional, + } + if r.Limit != noLimit { + mr.Limit = &r.Limit + } + if r.Scope != ScopeGlobal { + mr.Scope = r.Scope + } + return "", mr +} + +// Check checks that the metadata is well-formed. +func (meta Meta) Check() error { + // Check for duplicate or forbidden relation names or interfaces. + names := map[string]bool{} + checkRelations := func(src map[string]Relation, role RelationRole) error { + for name, rel := range src { + if rel.Name != name { + return fmt.Errorf("charm %q has mismatched relation name %q; expected %q", meta.Name, rel.Name, name) + } + if rel.Role != role { + return fmt.Errorf("charm %q has mismatched role %q; expected %q", meta.Name, rel.Role, role) + } + // Container-scoped require relations on subordinates are allowed + // to use the otherwise-reserved juju-* namespace. + if !meta.Subordinate || role != RoleRequirer || rel.Scope != ScopeContainer { + if reservedName(name) { + return fmt.Errorf("charm %q using a reserved relation name: %q", meta.Name, name) + } + } + if role != RoleRequirer { + if reservedName(rel.Interface) { + return fmt.Errorf("charm %q relation %q using a reserved interface: %q", meta.Name, name, rel.Interface) + } + } + if names[name] { + return fmt.Errorf("charm %q using a duplicated relation name: %q", meta.Name, name) + } + names[name] = true + } + return nil + } + if err := checkRelations(meta.Provides, RoleProvider); err != nil { + return err + } + if err := checkRelations(meta.Requires, RoleRequirer); err != nil { + return err + } + if err := checkRelations(meta.Peers, RolePeer); err != nil { + return err + } + + // Subordinate charms must have at least one relation that + // has container scope, otherwise they can't relate to the + // principal. + if meta.Subordinate { + valid := false + if meta.Requires != nil { + for _, relationData := range meta.Requires { + if relationData.Scope == ScopeContainer { + valid = true + break + } + } + } + if !valid { + return fmt.Errorf("subordinate charm %q lacks \"requires\" relation with container scope", meta.Name) + } + } + + for _, series := range meta.Series { + if !IsValidSeries(series) { + return fmt.Errorf("charm %q declares invalid series: %q", meta.Name, series) + } + } + + names = make(map[string]bool) + for name, store := range meta.Storage { + if store.Location != "" && store.Type != StorageFilesystem { + return fmt.Errorf(`charm %q storage %q: location may not be specified for "type: %s"`, meta.Name, name, store.Type) + } + if store.Type == "" { + return fmt.Errorf("charm %q storage %q: type must be specified", meta.Name, name) + } + if store.CountMin < 0 { + return fmt.Errorf("charm %q storage %q: invalid minimum count %d", meta.Name, name, store.CountMin) + } + if store.CountMax == 0 || store.CountMax < -1 { + return fmt.Errorf("charm %q storage %q: invalid maximum count %d", meta.Name, name, store.CountMax) + } + if names[name] { + return fmt.Errorf("charm %q storage %q: duplicated storage name", meta.Name, name) + } + names[name] = true + } + + for name, payloadClass := range meta.PayloadClasses { + if payloadClass.Name != name { + return fmt.Errorf("mismatch on payload class name (%q != %q)", payloadClass.Name, name) + } + if err := payloadClass.Validate(); err != nil { + return err + } + } + + if err := validateMetaResources(meta.Resources); err != nil { + return err + } + + for _, term := range meta.Terms { + if terr := checkTerm(term); terr != nil { + return terr + } + } + + return nil +} + +func reservedName(name string) bool { + return name == "juju" || strings.HasPrefix(name, "juju-") +} + +func parseRelations(relations interface{}, role RelationRole) map[string]Relation { + if relations == nil { + return nil + } + result := make(map[string]Relation) + for name, rel := range relations.(map[string]interface{}) { + relMap := rel.(map[string]interface{}) + relation := Relation{ + Name: name, + Role: role, + Interface: relMap["interface"].(string), + Optional: relMap["optional"].(bool), + } + if scope := relMap["scope"]; scope != nil { + relation.Scope = RelationScope(scope.(string)) + } + if relMap["limit"] != nil { + // Schema defaults to int64, but we know + // the int range should be more than enough. + relation.Limit = int(relMap["limit"].(int64)) + } + result[name] = relation + } + return result +} + +// Schema coercer that expands the interface shorthand notation. +// A consistent format is easier to work with than considering the +// potential difference everywhere. +// +// Supports the following variants:: +// +// provides: +// server: riak +// admin: http +// foobar: +// interface: blah +// +// provides: +// server: +// interface: mysql +// limit: +// optional: false +// +// In all input cases, the output is the fully specified interface +// representation as seen in the mysql interface description above. +func ifaceExpander(limit interface{}) schema.Checker { + return ifaceExpC{limit} +} + +type ifaceExpC struct { + limit interface{} +} + +var ( + stringC = schema.String() + mapC = schema.StringMap(schema.Any()) +) + +func (c ifaceExpC) Coerce(v interface{}, path []string) (newv interface{}, err error) { + s, err := stringC.Coerce(v, path) + if err == nil { + newv = map[string]interface{}{ + "interface": s, + "limit": c.limit, + "optional": false, + "scope": string(ScopeGlobal), + } + return + } + + v, err = mapC.Coerce(v, path) + if err != nil { + return + } + m := v.(map[string]interface{}) + if _, ok := m["limit"]; !ok { + m["limit"] = c.limit + } + return ifaceSchema.Coerce(m, path) +} + +var ifaceSchema = schema.FieldMap( + schema.Fields{ + "interface": schema.String(), + "limit": schema.OneOf(schema.Const(nil), schema.Int()), + "scope": schema.OneOf(schema.Const(string(ScopeGlobal)), schema.Const(string(ScopeContainer))), + "optional": schema.Bool(), + }, + schema.Defaults{ + "scope": string(ScopeGlobal), + "optional": false, + }, +) + +func parseStorage(stores interface{}) map[string]Storage { + if stores == nil { + return nil + } + result := make(map[string]Storage) + for name, store := range stores.(map[string]interface{}) { + storeMap := store.(map[string]interface{}) + store := Storage{ + Name: name, + Type: StorageType(storeMap["type"].(string)), + Shared: storeMap["shared"].(bool), + ReadOnly: storeMap["read-only"].(bool), + CountMin: 1, + CountMax: 1, + } + if desc, ok := storeMap["description"].(string); ok { + store.Description = desc + } + if multiple, ok := storeMap["multiple"].(map[string]interface{}); ok { + if r, ok := multiple["range"].([2]int); ok { + store.CountMin, store.CountMax = r[0], r[1] + } + } + if minSize, ok := storeMap["minimum-size"].(uint64); ok { + store.MinimumSize = minSize + } + if loc, ok := storeMap["location"].(string); ok { + store.Location = loc + } + if properties, ok := storeMap["properties"].([]interface{}); ok { + for _, p := range properties { + store.Properties = append(store.Properties, p.(string)) + } + } + result[name] = store + } + return result +} + +var storageSchema = schema.FieldMap( + schema.Fields{ + "type": schema.OneOf(schema.Const(string(StorageBlock)), schema.Const(string(StorageFilesystem))), + "shared": schema.Bool(), + "read-only": schema.Bool(), + "multiple": schema.FieldMap( + schema.Fields{ + "range": storageCountC{}, // m, m-n, m+, m- + }, + schema.Defaults{}, + ), + "minimum-size": storageSizeC{}, + "location": schema.String(), + "description": schema.String(), + "properties": schema.List(propertiesC{}), + }, + schema.Defaults{ + "shared": false, + "read-only": false, + "multiple": schema.Omit, + "location": schema.Omit, + "description": schema.Omit, + "properties": schema.Omit, + "minimum-size": schema.Omit, + }, +) + +type storageCountC struct{} + +var storageCountRE = regexp.MustCompile("^([0-9]+)([-+]|-[0-9]+)$") + +func (c storageCountC) Coerce(v interface{}, path []string) (newv interface{}, err error) { + s, err := schema.OneOf(schema.Int(), stringC).Coerce(v, path) + if err != nil { + return nil, err + } + if m, ok := s.(int64); ok { + // We've got a count of the form "m": m represents + // both the minimum and maximum. + if m <= 0 { + return nil, fmt.Errorf("%s: invalid count %v", strings.Join(path[1:], ""), m) + } + return [2]int{int(m), int(m)}, nil + } + match := storageCountRE.FindStringSubmatch(s.(string)) + if match == nil { + return nil, fmt.Errorf("%s: value %q does not match 'm', 'm-n', or 'm+'", strings.Join(path[1:], ""), s) + } + var m, n int + if m, err = strconv.Atoi(match[1]); err != nil { + return nil, err + } + if len(match[2]) == 1 { + // We've got a count of the form "m+" or "m-": + // m represents the minimum, and there is no + // upper bound. + n = -1 + } else { + if n, err = strconv.Atoi(match[2][1:]); err != nil { + return nil, err + } + } + return [2]int{m, n}, nil +} + +type storageSizeC struct{} + +func (c storageSizeC) Coerce(v interface{}, path []string) (newv interface{}, err error) { + s, err := schema.String().Coerce(v, path) + if err != nil { + return nil, err + } + return utils.ParseSize(s.(string)) +} + +type propertiesC struct{} + +func (c propertiesC) Coerce(v interface{}, path []string) (newv interface{}, err error) { + return schema.OneOf(schema.Const("transient")).Coerce(v, path) +} + +var charmSchema = schema.FieldMap( + schema.Fields{ + "name": schema.String(), + "summary": schema.String(), + "description": schema.String(), + "peers": schema.StringMap(ifaceExpander(int64(1))), + "provides": schema.StringMap(ifaceExpander(nil)), + "requires": schema.StringMap(ifaceExpander(int64(1))), + "revision": schema.Int(), // Obsolete + "format": schema.Int(), + "subordinate": schema.Bool(), + "categories": schema.List(schema.String()), + "tags": schema.List(schema.String()), + "series": schema.List(schema.String()), + "storage": schema.StringMap(storageSchema), + "payloads": schema.StringMap(payloadClassSchema), + "resources": schema.StringMap(resourceSchema), + "terms": schema.List(schema.String()), + }, + schema.Defaults{ + "provides": schema.Omit, + "requires": schema.Omit, + "peers": schema.Omit, + "revision": schema.Omit, + "format": 1, + "subordinate": schema.Omit, + "categories": schema.Omit, + "tags": schema.Omit, + "series": schema.Omit, + "storage": schema.Omit, + "payloads": schema.Omit, + "resources": schema.Omit, + "terms": schema.Omit, + }, +) === added file 'src/gopkg.in/juju/charm.v6-unstable/meta_test.go' --- src/gopkg.in/juju/charm.v6-unstable/meta_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/meta_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,917 @@ +// Copyright 2011, 2012, 2013 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm_test + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/yaml.v1" + + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charm.v6-unstable/resource" +) + +func repoMeta(c *gc.C, name string) io.Reader { + charmDir := charmDirPath(c, name) + file, err := os.Open(filepath.Join(charmDir, "metadata.yaml")) + c.Assert(err, gc.IsNil) + defer file.Close() + data, err := ioutil.ReadAll(file) + c.Assert(err, gc.IsNil) + return bytes.NewReader(data) +} + +type MetaSuite struct{} + +var _ = gc.Suite(&MetaSuite{}) + +func (s *MetaSuite) TestReadMetaVersion1(c *gc.C) { + meta, err := charm.ReadMeta(repoMeta(c, "dummy")) + c.Assert(err, gc.IsNil) + c.Assert(meta.Name, gc.Equals, "dummy") + c.Assert(meta.Summary, gc.Equals, "That's a dummy charm.") + c.Assert(meta.Description, gc.Equals, + "This is a longer description which\npotentially contains multiple lines.\n") + c.Assert(meta.Format, gc.Equals, 1) + c.Assert(meta.OldRevision, gc.Equals, 0) + c.Assert(meta.Subordinate, gc.Equals, false) +} + +func (s *MetaSuite) TestReadMetaVersion2(c *gc.C) { + meta, err := charm.ReadMeta(repoMeta(c, "format2")) + c.Assert(err, gc.IsNil) + c.Assert(meta.Name, gc.Equals, "format2") + c.Assert(meta.Format, gc.Equals, 2) + c.Assert(meta.Categories, gc.HasLen, 0) + c.Assert(meta.Terms, gc.HasLen, 0) +} + +func (s *MetaSuite) TestCheckTerms(c *gc.C) { + tests := []struct { + about string + terms []string + expectError string + }{{ + about: "valid terms", + terms: []string{"term/1", "term/2"}, + }, { + about: "missing revision number", + terms: []string{"term/1", "term"}, + expectError: "invalid term name \"term\": must match.*", + }, { + about: "revision not a number", + terms: []string{"term/1", "term/a"}, + expectError: "invalid term name \"term/a\": must match.*", + }, { + about: "wrong format", + terms: []string{"term/1", "term/a/1"}, + expectError: "invalid term name \"term/a/1\": must match.*", + }, { + about: "term may not contain spaces", + terms: []string{"term/1", "term about a term"}, + expectError: "invalid term name \"term about a term\": must match.*", + }, { + about: "term name must start with lowercase letter", + terms: []string{"Term/1"}, + expectError: `invalid term name "Term/1": must match.*`, + }, { + about: "term name match the regexp", + terms: []string{"term_123-23aAf/1"}, + expectError: "invalid term name \"term_123-23aAf/1\": must match.*", + }, + } + for i, test := range tests { + c.Logf("running test %v: %v", i, test.about) + meta := charm.Meta{Terms: test.terms} + err := meta.Check() + if test.expectError == "" { + c.Assert(err, jc.ErrorIsNil) + } else { + c.Assert(err, gc.ErrorMatches, test.expectError) + } + } +} + +func (s *MetaSuite) TestReadCategory(c *gc.C) { + meta, err := charm.ReadMeta(repoMeta(c, "category")) + c.Assert(err, gc.IsNil) + c.Assert(meta.Categories, jc.DeepEquals, []string{"database"}) +} + +func (s *MetaSuite) TestReadTerms(c *gc.C) { + meta, err := charm.ReadMeta(repoMeta(c, "terms")) + c.Assert(err, gc.IsNil) + c.Assert(meta.Terms, jc.DeepEquals, []string{"term1", "term2"}) +} + +func (s *MetaSuite) TestReadTags(c *gc.C) { + meta, err := charm.ReadMeta(repoMeta(c, "category")) + c.Assert(err, gc.IsNil) + c.Assert(meta.Tags, jc.DeepEquals, []string{"openstack", "storage"}) +} + +func (s *MetaSuite) TestSubordinate(c *gc.C) { + meta, err := charm.ReadMeta(repoMeta(c, "logging")) + c.Assert(err, gc.IsNil) + c.Assert(meta.Subordinate, gc.Equals, true) +} + +func (s *MetaSuite) TestSubordinateWithoutContainerRelation(c *gc.C) { + r := repoMeta(c, "dummy") + hackYaml := ReadYaml(r) + hackYaml["subordinate"] = true + _, err := charm.ReadMeta(hackYaml.Reader()) + c.Assert(err, gc.ErrorMatches, "subordinate charm \"dummy\" lacks \"requires\" relation with container scope") +} + +func (s *MetaSuite) TestScopeConstraint(c *gc.C) { + meta, err := charm.ReadMeta(repoMeta(c, "logging")) + c.Assert(err, gc.IsNil) + c.Assert(meta.Provides["logging-client"].Scope, gc.Equals, charm.ScopeGlobal) + c.Assert(meta.Requires["logging-directory"].Scope, gc.Equals, charm.ScopeContainer) + c.Assert(meta.Subordinate, gc.Equals, true) +} + +func (s *MetaSuite) TestParseMetaRelations(c *gc.C) { + meta, err := charm.ReadMeta(repoMeta(c, "mysql")) + c.Assert(err, gc.IsNil) + c.Assert(meta.Provides["server"], gc.Equals, charm.Relation{ + Name: "server", + Role: charm.RoleProvider, + Interface: "mysql", + Scope: charm.ScopeGlobal, + }) + c.Assert(meta.Requires, gc.IsNil) + c.Assert(meta.Peers, gc.IsNil) + + meta, err = charm.ReadMeta(repoMeta(c, "riak")) + c.Assert(err, gc.IsNil) + c.Assert(meta.Provides["endpoint"], gc.Equals, charm.Relation{ + Name: "endpoint", + Role: charm.RoleProvider, + Interface: "http", + Scope: charm.ScopeGlobal, + }) + c.Assert(meta.Provides["admin"], gc.Equals, charm.Relation{ + Name: "admin", + Role: charm.RoleProvider, + Interface: "http", + Scope: charm.ScopeGlobal, + }) + c.Assert(meta.Peers["ring"], gc.Equals, charm.Relation{ + Name: "ring", + Role: charm.RolePeer, + Interface: "riak", + Limit: 1, + Scope: charm.ScopeGlobal, + }) + c.Assert(meta.Requires, gc.IsNil) + + meta, err = charm.ReadMeta(repoMeta(c, "terracotta")) + c.Assert(err, gc.IsNil) + c.Assert(meta.Provides["dso"], gc.Equals, charm.Relation{ + Name: "dso", + Role: charm.RoleProvider, + Interface: "terracotta", + Optional: true, + Scope: charm.ScopeGlobal, + }) + c.Assert(meta.Peers["server-array"], gc.Equals, charm.Relation{ + Name: "server-array", + Role: charm.RolePeer, + Interface: "terracotta-server", + Limit: 1, + Scope: charm.ScopeGlobal, + }) + c.Assert(meta.Requires, gc.IsNil) + + meta, err = charm.ReadMeta(repoMeta(c, "wordpress")) + c.Assert(err, gc.IsNil) + c.Assert(meta.Provides["url"], gc.Equals, charm.Relation{ + Name: "url", + Role: charm.RoleProvider, + Interface: "http", + Scope: charm.ScopeGlobal, + }) + c.Assert(meta.Requires["db"], gc.Equals, charm.Relation{ + Name: "db", + Role: charm.RoleRequirer, + Interface: "mysql", + Limit: 1, + Scope: charm.ScopeGlobal, + }) + c.Assert(meta.Requires["cache"], gc.Equals, charm.Relation{ + Name: "cache", + Role: charm.RoleRequirer, + Interface: "varnish", + Limit: 2, + Optional: true, + Scope: charm.ScopeGlobal, + }) + c.Assert(meta.Peers, gc.IsNil) +} + +var relationsConstraintsTests = []struct { + rels string + err string +}{ + { + "provides:\n foo: ping\nrequires:\n foo: pong", + `charm "a" using a duplicated relation name: "foo"`, + }, { + "requires:\n foo: ping\npeers:\n foo: pong", + `charm "a" using a duplicated relation name: "foo"`, + }, { + "peers:\n foo: ping\nprovides:\n foo: pong", + `charm "a" using a duplicated relation name: "foo"`, + }, { + "provides:\n juju: blob", + `charm "a" using a reserved relation name: "juju"`, + }, { + "requires:\n juju: blob", + `charm "a" using a reserved relation name: "juju"`, + }, { + "peers:\n juju: blob", + `charm "a" using a reserved relation name: "juju"`, + }, { + "provides:\n juju-snap: blub", + `charm "a" using a reserved relation name: "juju-snap"`, + }, { + "requires:\n juju-crackle: blub", + `charm "a" using a reserved relation name: "juju-crackle"`, + }, { + "peers:\n juju-pop: blub", + `charm "a" using a reserved relation name: "juju-pop"`, + }, { + "provides:\n innocuous: juju", + `charm "a" relation "innocuous" using a reserved interface: "juju"`, + }, { + "peers:\n innocuous: juju", + `charm "a" relation "innocuous" using a reserved interface: "juju"`, + }, { + "provides:\n innocuous: juju-snap", + `charm "a" relation "innocuous" using a reserved interface: "juju-snap"`, + }, { + "peers:\n innocuous: juju-snap", + `charm "a" relation "innocuous" using a reserved interface: "juju-snap"`, + }, +} + +func (s *MetaSuite) TestRelationsConstraints(c *gc.C) { + check := func(s, e string) { + meta, err := charm.ReadMeta(strings.NewReader(s)) + if e != "" { + c.Assert(err, gc.ErrorMatches, e) + c.Assert(meta, gc.IsNil) + } else { + c.Assert(err, gc.IsNil) + c.Assert(meta, gc.NotNil) + } + } + prefix := "name: a\nsummary: b\ndescription: c\n" + for i, t := range relationsConstraintsTests { + c.Logf("test %d", i) + check(prefix+t.rels, t.err) + check(prefix+"subordinate: true\n"+t.rels, t.err) + } + // The juju-* namespace is accessible to container-scoped require + // relations on subordinate charms. + check(prefix+` +subordinate: true +requires: + juju-info: + interface: juju-info + scope: container`, "") + // The juju-* interfaces are allowed on any require relation. + check(prefix+` +requires: + innocuous: juju-info`, "") +} + +// dummyMetadata contains a minimally valid charm metadata.yaml +// for testing valid and invalid series. +const dummyMetadata = "name: a\nsummary: b\ndescription: c" + +// TestSeries ensures that valid series values are parsed correctly when specified +// in the charm metadata. +func (s *MetaSuite) TestSeries(c *gc.C) { + // series not specified + meta, err := charm.ReadMeta(strings.NewReader(dummyMetadata)) + c.Assert(err, gc.IsNil) + c.Check(meta.Series, gc.HasLen, 0) + charmMeta := fmt.Sprintf("%s\nseries:", dummyMetadata) + for _, seriesName := range []string{"precise", "trusty", "plan9"} { + charmMeta = fmt.Sprintf("%s\n - %s", charmMeta, seriesName) + } + meta, err = charm.ReadMeta(strings.NewReader(charmMeta)) + c.Assert(err, gc.IsNil) + c.Assert(meta.Series, gc.DeepEquals, []string{"precise", "trusty", "plan9"}) +} + +// TestInvalidSeries ensures that invalid series values cause a parse error +// when specified in the charm metadata. +func (s *MetaSuite) TestInvalidSeries(c *gc.C) { + for _, seriesName := range []string{"pre-c1se", "pre^cise", "cp/m", "OpenVMS"} { + _, err := charm.ReadMeta(strings.NewReader( + fmt.Sprintf("%s\nseries:\n - %s\n", dummyMetadata, seriesName))) + c.Assert(err, gc.NotNil) + c.Check(err, gc.ErrorMatches, `charm "a" declares invalid series: .*`) + } +} + +func (s *MetaSuite) TestCheckMismatchedRelationName(c *gc.C) { + // This Check case cannot be covered by the above + // TestRelationsConstraints tests. + meta := charm.Meta{ + Name: "foo", + Provides: map[string]charm.Relation{ + "foo": { + Name: "foo", + Role: charm.RolePeer, + Interface: "x", + Limit: 1, + Scope: charm.ScopeGlobal, + }, + }, + } + err := meta.Check() + c.Assert(err, gc.ErrorMatches, `charm "foo" has mismatched role "peer"; expected "provider"`) +} + +func (s *MetaSuite) TestCheckMismatchedRole(c *gc.C) { + // This Check case cannot be covered by the above + // TestRelationsConstraints tests. + meta := charm.Meta{ + Name: "foo", + Provides: map[string]charm.Relation{ + "foo": { + Role: charm.RolePeer, + Interface: "foo", + Limit: 1, + Scope: charm.ScopeGlobal, + }, + }, + } + err := meta.Check() + c.Assert(err, gc.ErrorMatches, `charm "foo" has mismatched relation name ""; expected "foo"`) +} + +// Test rewriting of a given interface specification into long form. +// +// InterfaceExpander uses `coerce` to do one of two things: +// +// - Rewrite shorthand to the long form used for actual storage +// - Fills in defaults, including a configurable `limit` +// +// This test ensures test coverage on each of these branches, along +// with ensuring the conversion object properly raises SchemaError +// exceptions on invalid data. +func (s *MetaSuite) TestIfaceExpander(c *gc.C) { + e := charm.IfaceExpander(nil) + + path := []string{""} + + // Shorthand is properly rewritten + v, err := e.Coerce("http", path) + c.Assert(err, gc.IsNil) + c.Assert(v, jc.DeepEquals, map[string]interface{}{"interface": "http", "limit": nil, "optional": false, "scope": string(charm.ScopeGlobal)}) + + // Defaults are properly applied + v, err = e.Coerce(map[string]interface{}{"interface": "http"}, path) + c.Assert(err, gc.IsNil) + c.Assert(v, jc.DeepEquals, map[string]interface{}{"interface": "http", "limit": nil, "optional": false, "scope": string(charm.ScopeGlobal)}) + + v, err = e.Coerce(map[string]interface{}{"interface": "http", "limit": 2}, path) + c.Assert(err, gc.IsNil) + c.Assert(v, jc.DeepEquals, map[string]interface{}{"interface": "http", "limit": int64(2), "optional": false, "scope": string(charm.ScopeGlobal)}) + + v, err = e.Coerce(map[string]interface{}{"interface": "http", "optional": true}, path) + c.Assert(err, gc.IsNil) + c.Assert(v, jc.DeepEquals, map[string]interface{}{"interface": "http", "limit": nil, "optional": true, "scope": string(charm.ScopeGlobal)}) + + // Invalid data raises an error. + v, err = e.Coerce(42, path) + c.Assert(err, gc.ErrorMatches, `: expected map, got int\(42\)`) + + v, err = e.Coerce(map[string]interface{}{"interface": "http", "optional": nil}, path) + c.Assert(err, gc.ErrorMatches, ".optional: expected bool, got nothing") + + v, err = e.Coerce(map[string]interface{}{"interface": "http", "limit": "none, really"}, path) + c.Assert(err, gc.ErrorMatches, ".limit: unexpected value.*") + + // Can change default limit + e = charm.IfaceExpander(1) + v, err = e.Coerce(map[string]interface{}{"interface": "http"}, path) + c.Assert(err, gc.IsNil) + c.Assert(v, jc.DeepEquals, map[string]interface{}{"interface": "http", "limit": int64(1), "optional": false, "scope": string(charm.ScopeGlobal)}) +} + +func (s *MetaSuite) TestMetaHooks(c *gc.C) { + meta, err := charm.ReadMeta(repoMeta(c, "wordpress")) + c.Assert(err, gc.IsNil) + hooks := meta.Hooks() + expectedHooks := map[string]bool{ + "install": true, + "start": true, + "config-changed": true, + "upgrade-charm": true, + "stop": true, + "collect-metrics": true, + "meter-status-changed": true, + "leader-elected": true, + "leader-deposed": true, + "leader-settings-changed": true, + "update-status": true, + "cache-relation-joined": true, + "cache-relation-changed": true, + "cache-relation-departed": true, + "cache-relation-broken": true, + "db-relation-joined": true, + "db-relation-changed": true, + "db-relation-departed": true, + "db-relation-broken": true, + "logging-dir-relation-joined": true, + "logging-dir-relation-changed": true, + "logging-dir-relation-departed": true, + "logging-dir-relation-broken": true, + "monitoring-port-relation-joined": true, + "monitoring-port-relation-changed": true, + "monitoring-port-relation-departed": true, + "monitoring-port-relation-broken": true, + "url-relation-joined": true, + "url-relation-changed": true, + "url-relation-departed": true, + "url-relation-broken": true, + } + c.Assert(hooks, jc.DeepEquals, expectedHooks) +} + +func (s *MetaSuite) TestCodecRoundTripEmpty(c *gc.C) { + for i, codec := range codecs { + c.Logf("codec %d", i) + empty_input := charm.Meta{} + data, err := codec.Marshal(empty_input) + c.Assert(err, gc.IsNil) + var empty_output charm.Meta + err = codec.Unmarshal(data, &empty_output) + c.Assert(err, gc.IsNil) + c.Assert(empty_input, jc.DeepEquals, empty_output) + } +} + +func (s *MetaSuite) TestCodecRoundTrip(c *gc.C) { + var input = charm.Meta{ + Name: "Foo", + Summary: "Bar", + Description: "Baz", + Subordinate: true, + Provides: map[string]charm.Relation{ + "qux": { + Interface: "quxx", + Optional: true, + Limit: 42, + Scope: "quxxx", + }, + }, + Requires: map[string]charm.Relation{ + "qux": { + Interface: "quxx", + Optional: true, + Limit: 42, + Scope: "quxxx", + }, + }, + Peers: map[string]charm.Relation{ + "qux": { + Interface: "quxx", + Optional: true, + Limit: 42, + Scope: "quxxx", + }, + }, + Categories: []string{"quxxxx", "quxxxxx"}, + Tags: []string{"openstack", "storage"}, + Format: 10, + OldRevision: 11, + Terms: []string{"test term 1", "test term 2"}, + } + for i, codec := range codecs { + c.Logf("codec %d", i) + data, err := codec.Marshal(input) + c.Assert(err, gc.IsNil) + var output charm.Meta + err = codec.Unmarshal(data, &output) + c.Assert(err, gc.IsNil) + c.Assert(input, jc.DeepEquals, output) + } +} + +var implementedByTests = []struct { + ifce string + name string + role charm.RelationRole + scope charm.RelationScope + match bool + implicit bool +}{ + {"ifce-pro", "pro", charm.RoleProvider, charm.ScopeGlobal, true, false}, + {"blah", "pro", charm.RoleProvider, charm.ScopeGlobal, false, false}, + {"ifce-pro", "blah", charm.RoleProvider, charm.ScopeGlobal, false, false}, + {"ifce-pro", "pro", charm.RoleRequirer, charm.ScopeGlobal, false, false}, + {"ifce-pro", "pro", charm.RoleProvider, charm.ScopeContainer, true, false}, + + {"juju-info", "juju-info", charm.RoleProvider, charm.ScopeGlobal, true, true}, + {"blah", "juju-info", charm.RoleProvider, charm.ScopeGlobal, false, false}, + {"juju-info", "blah", charm.RoleProvider, charm.ScopeGlobal, false, false}, + {"juju-info", "juju-info", charm.RoleRequirer, charm.ScopeGlobal, false, false}, + {"juju-info", "juju-info", charm.RoleProvider, charm.ScopeContainer, true, true}, + + {"ifce-req", "req", charm.RoleRequirer, charm.ScopeGlobal, true, false}, + {"blah", "req", charm.RoleRequirer, charm.ScopeGlobal, false, false}, + {"ifce-req", "blah", charm.RoleRequirer, charm.ScopeGlobal, false, false}, + {"ifce-req", "req", charm.RolePeer, charm.ScopeGlobal, false, false}, + {"ifce-req", "req", charm.RoleRequirer, charm.ScopeContainer, true, false}, + + {"juju-info", "info", charm.RoleRequirer, charm.ScopeContainer, true, false}, + {"blah", "info", charm.RoleRequirer, charm.ScopeContainer, false, false}, + {"juju-info", "blah", charm.RoleRequirer, charm.ScopeContainer, false, false}, + {"juju-info", "info", charm.RolePeer, charm.ScopeContainer, false, false}, + {"juju-info", "info", charm.RoleRequirer, charm.ScopeGlobal, false, false}, + + {"ifce-peer", "peer", charm.RolePeer, charm.ScopeGlobal, true, false}, + {"blah", "peer", charm.RolePeer, charm.ScopeGlobal, false, false}, + {"ifce-peer", "blah", charm.RolePeer, charm.ScopeGlobal, false, false}, + {"ifce-peer", "peer", charm.RoleProvider, charm.ScopeGlobal, false, false}, + {"ifce-peer", "peer", charm.RolePeer, charm.ScopeContainer, true, false}, +} + +func (s *MetaSuite) TestImplementedBy(c *gc.C) { + for i, t := range implementedByTests { + c.Logf("test %d", i) + r := charm.Relation{ + Interface: t.ifce, + Name: t.name, + Role: t.role, + Scope: t.scope, + } + c.Assert(r.ImplementedBy(&dummyCharm{}), gc.Equals, t.match) + c.Assert(r.IsImplicit(), gc.Equals, t.implicit) + } +} + +var metaYAMLMarshalTests = []struct { + about string + yaml string +}{{ + about: "minimal charm", + yaml: ` +name: minimal +description: d +summary: s +`, +}, { + about: "charm with lots of stuff", + yaml: ` +name: big +description: d +summary: s +subordinate: true +provides: + provideSimple: someinterface + provideLessSimple: + interface: anotherinterface + optional: true + scope: container + limit: 3 +requires: + requireSimple: someinterface + requireLessSimple: + interface: anotherinterface + optional: true + scope: container + limit: 3 +peers: + peerSimple: someinterface + peerLessSimple: + interface: peery + optional: true +categories: [c1, c1] +tags: [t1, t2] +series: + - someseries +`, +}} + +func (s *MetaSuite) TestYAMLMarshal(c *gc.C) { + for i, test := range metaYAMLMarshalTests { + c.Logf("test %d: %s", i, test.about) + ch, err := charm.ReadMeta(strings.NewReader(test.yaml)) + c.Assert(err, gc.IsNil) + gotYAML, err := yaml.Marshal(ch) + c.Assert(err, gc.IsNil) + gotCh, err := charm.ReadMeta(bytes.NewReader(gotYAML)) + c.Assert(err, gc.IsNil) + c.Assert(gotCh, jc.DeepEquals, ch) + } +} + +func (s *MetaSuite) TestYAMLMarshalSimpleRelation(c *gc.C) { + // Check that a simple relation gets marshaled as a string. + chYAML := ` +name: minimal +description: d +summary: s +provides: + server: http +requires: + client: http +peers: + me: http +` + ch, err := charm.ReadMeta(strings.NewReader(chYAML)) + c.Assert(err, gc.IsNil) + gotYAML, err := yaml.Marshal(ch) + c.Assert(err, gc.IsNil) + + var x interface{} + err = yaml.Unmarshal(gotYAML, &x) + c.Assert(err, gc.IsNil) + c.Assert(x, jc.DeepEquals, map[interface{}]interface{}{ + "name": "minimal", + "description": "d", + "summary": "s", + "provides": map[interface{}]interface{}{ + "server": "http", + }, + "requires": map[interface{}]interface{}{ + "client": "http", + }, + "peers": map[interface{}]interface{}{ + "me": "http", + }, + }) +} + +func (s *MetaSuite) TestStorage(c *gc.C) { + // "type" is the only required attribute for storage. + meta, err := charm.ReadMeta(strings.NewReader(` +name: a +summary: b +description: c +storage: + store0: + description: woo tee bix + type: block + store1: + type: filesystem +`)) + c.Assert(err, gc.IsNil) + c.Assert(meta.Storage, gc.DeepEquals, map[string]charm.Storage{ + "store0": { + Name: "store0", + Description: "woo tee bix", + Type: charm.StorageBlock, + CountMin: 1, // singleton + CountMax: 1, + }, + "store1": { + Name: "store1", + Type: charm.StorageFilesystem, + CountMin: 1, // singleton + CountMax: 1, + }, + }) +} + +func (s *MetaSuite) TestStorageErrors(c *gc.C) { + prefix := ` +name: a +summary: b +description: c +storage: + store-bad: +`[1:] + + type test struct { + desc string + yaml string + err string + } + + tests := []test{{ + desc: "type is required", + yaml: " required: false", + err: "metadata: storage.store-bad.type: unexpected value ", + }, { + desc: "range must be an integer, or integer range (1)", + yaml: " type: filesystem\n multiple:\n range: woat", + err: `metadata: storage.store-bad.multiple.range: value "woat" does not match 'm', 'm-n', or 'm\+'`, + }, { + desc: "range must be an integer, or integer range (2)", + yaml: " type: filesystem\n multiple:\n range: 0-abc", + err: `metadata: storage.store-bad.multiple.range: value "0-abc" does not match 'm', 'm-n', or 'm\+'`, + }, { + desc: "range must be non-negative", + yaml: " type: filesystem\n multiple:\n range: -1", + err: `metadata: storage.store-bad.multiple.range: invalid count -1`, + }, { + desc: "range must be positive", + yaml: " type: filesystem\n multiple:\n range: 0", + err: `metadata: storage.store-bad.multiple.range: invalid count 0`, + }, { + desc: "location cannot be specified for block type storage", + yaml: " type: block\n location: /dev/sdc", + err: `charm "a" storage "store-bad": location may not be specified for "type: block"`, + }, { + desc: "minimum size must parse correctly", + yaml: " type: block\n minimum-size: foo", + err: `metadata: expected a non-negative number, got "foo"`, + }, { + desc: "minimum size must have valid suffix", + yaml: " type: block\n minimum-size: 10Q", + err: `metadata: invalid multiplier suffix "Q", expected one of MGTPEZY`, + }, { + desc: "properties must contain valid values", + yaml: " type: block\n properties: [transient, foo]", + err: `metadata: .* unexpected value "foo"`, + }} + + for i, test := range tests { + c.Logf("test %d: %s", i, test.desc) + c.Logf("\n%s\n", prefix+test.yaml) + _, err := charm.ReadMeta(strings.NewReader(prefix + test.yaml)) + c.Assert(err, gc.ErrorMatches, test.err) + } +} + +func (s *MetaSuite) TestStorageCount(c *gc.C) { + testStorageCount := func(count string, min, max int) { + meta, err := charm.ReadMeta(strings.NewReader(fmt.Sprintf(` +name: a +summary: b +description: c +storage: + store0: + type: filesystem + multiple: + range: %s +`, count))) + c.Assert(err, gc.IsNil) + store := meta.Storage["store0"] + c.Assert(store, gc.NotNil) + c.Assert(store.CountMin, gc.Equals, min) + c.Assert(store.CountMax, gc.Equals, max) + } + testStorageCount("1", 1, 1) + testStorageCount("0-1", 0, 1) + testStorageCount("1-1", 1, 1) + testStorageCount("1+", 1, -1) + // n- is equivalent to n+ + testStorageCount("1-", 1, -1) +} + +func (s *MetaSuite) TestStorageLocation(c *gc.C) { + meta, err := charm.ReadMeta(strings.NewReader(` +name: a +summary: b +description: c +storage: + store0: + type: filesystem + location: /var/lib/things +`)) + c.Assert(err, gc.IsNil) + store := meta.Storage["store0"] + c.Assert(store, gc.NotNil) + c.Assert(store.Location, gc.Equals, "/var/lib/things") +} + +func (s *MetaSuite) TestStorageMinimumSize(c *gc.C) { + meta, err := charm.ReadMeta(strings.NewReader(` +name: a +summary: b +description: c +storage: + store0: + type: filesystem + minimum-size: 10G +`)) + c.Assert(err, gc.IsNil) + store := meta.Storage["store0"] + c.Assert(store, gc.NotNil) + c.Assert(store.MinimumSize, gc.Equals, uint64(10*1024)) +} + +func (s *MetaSuite) TestStorageProperties(c *gc.C) { + meta, err := charm.ReadMeta(strings.NewReader(` +name: a +summary: b +description: c +storage: + store0: + type: filesystem + properties: [transient] +`)) + c.Assert(err, gc.IsNil) + store := meta.Storage["store0"] + c.Assert(store, gc.NotNil) + c.Assert(store.Properties, jc.SameContents, []string{"transient"}) +} + +func (s *MetaSuite) TestPayloadClasses(c *gc.C) { + meta, err := charm.ReadMeta(strings.NewReader(` +name: a +summary: b +description: c +payloads: + monitor: + type: docker + kvm-guest: + type: kvm +`)) + c.Assert(err, gc.IsNil) + + c.Check(meta.PayloadClasses, jc.DeepEquals, map[string]charm.PayloadClass{ + "monitor": charm.PayloadClass{ + Name: "monitor", + Type: "docker", + }, + "kvm-guest": charm.PayloadClass{ + Name: "kvm-guest", + Type: "kvm", + }, + }) +} + +func (s *MetaSuite) TestResources(c *gc.C) { + meta, err := charm.ReadMeta(strings.NewReader(` +name: a +summary: b +description: c +resources: + resource-name: + type: file + filename: filename.tgz + description: "One line that is useful when operators need to push it." + other-resource: + type: file + filename: other.zip +`)) + c.Assert(err, gc.IsNil) + + c.Check(meta.Resources, jc.DeepEquals, map[string]resource.Meta{ + "resource-name": resource.Meta{ + Name: "resource-name", + Type: resource.TypeFile, + Path: "filename.tgz", + Description: "One line that is useful when operators need to push it.", + }, + "other-resource": resource.Meta{ + Name: "other-resource", + Type: resource.TypeFile, + Path: "other.zip", + }, + }) +} + +type dummyCharm struct{} + +func (c *dummyCharm) Config() *charm.Config { + panic("unused") +} + +func (c *dummyCharm) Metrics() *charm.Metrics { + panic("unused") +} + +func (c *dummyCharm) Actions() *charm.Actions { + panic("unused") +} + +func (c *dummyCharm) Revision() int { + panic("unused") +} + +func (c *dummyCharm) Meta() *charm.Meta { + return &charm.Meta{ + Provides: map[string]charm.Relation{ + "pro": {Interface: "ifce-pro", Scope: charm.ScopeGlobal}, + }, + Requires: map[string]charm.Relation{ + "req": {Interface: "ifce-req", Scope: charm.ScopeGlobal}, + "info": {Interface: "juju-info", Scope: charm.ScopeContainer}, + }, + Peers: map[string]charm.Relation{ + "peer": {Interface: "ifce-peer", Scope: charm.ScopeGlobal}, + }, + } +} === added file 'src/gopkg.in/juju/charm.v6-unstable/metrics.go' --- src/gopkg.in/juju/charm.v6-unstable/metrics.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/metrics.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,114 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm + +import ( + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" + + goyaml "gopkg.in/yaml.v1" +) + +// MetricType is used to identify metric types supported by juju. +type MetricType string + +const ( + builtinMetricPrefix = "juju" + + // Supported metric types. + MetricTypeGauge MetricType = "gauge" + MetricTypeAbsolute MetricType = "absolute" +) + +// IsBuiltinMetric reports whether the given metric key is in the builtin metric namespace +func IsBuiltinMetric(key string) bool { + return strings.HasPrefix(key, builtinMetricPrefix) +} + +func validateValue(value string) error { + // The largest number of digits that can be returned by strconv.FormatFloat is 24, so + // choose an arbitrary limit somewhat higher than that. + if len(value) > 30 { + return fmt.Errorf("metric value is too large") + } + fValue, err := strconv.ParseFloat(value, 64) + if err != nil { + return fmt.Errorf("invalid value type: expected float, got %q", value) + } + if fValue < 0 { + return fmt.Errorf("invalid value: value must be greater or equal to zero, got %v", value) + } + return nil +} + +// validateValue checks if the supplied metric value fits the requirements +// of its expected type. +func (m MetricType) validateValue(value string) error { + switch m { + case MetricTypeGauge, MetricTypeAbsolute: + return validateValue(value) + default: + return fmt.Errorf("unknown metric type %q", m) + } +} + +// Metric represents a single metric definition +type Metric struct { + Type MetricType + Description string +} + +// Metrics contains the metrics declarations encoded in the metrics.yaml +// file. +type Metrics struct { + Metrics map[string]Metric +} + +// ReadMetrics reads a MetricsDeclaration in YAML format. +func ReadMetrics(r io.Reader) (*Metrics, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + var metrics Metrics + if err := goyaml.Unmarshal(data, &metrics); err != nil { + return nil, err + } + if metrics.Metrics == nil { + return &metrics, nil + } + for name, metric := range metrics.Metrics { + if IsBuiltinMetric(name) { + if metric.Type != MetricType("") || metric.Description != "" { + return nil, fmt.Errorf("metric %q is using a prefix reserved for built-in metrics: it should not have type or description specification", name) + } + continue + } + switch metric.Type { + case MetricTypeGauge, MetricTypeAbsolute: + default: + return nil, fmt.Errorf("invalid metrics declaration: metric %q has unknown type %q", name, metric.Type) + } + if metric.Description == "" { + return nil, fmt.Errorf("invalid metrics declaration: metric %q lacks description", name) + } + } + return &metrics, nil +} + +// ValidateMetric validates the supplied metric name and value against the loaded +// metric definitions. +func (m Metrics) ValidateMetric(name, value string) error { + metric, exists := m.Metrics[name] + if !exists { + return fmt.Errorf("metric %q not defined", name) + } + if IsBuiltinMetric(name) { + return validateValue(value) + } + return metric.Type.validateValue(value) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/metrics_test.go' --- src/gopkg.in/juju/charm.v6-unstable/metrics_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/metrics_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,227 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm_test + +import ( + "sort" + "strings" + + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charm.v6-unstable" +) + +// Keys returns a list of all defined metrics keys. +func Keys(m *charm.Metrics) []string { + result := make([]string, 0, len(m.Metrics)) + + for name := range m.Metrics { + result = append(result, name) + + } + sort.Strings(result) + return result +} + +type MetricsSuite struct{} + +var _ = gc.Suite(&MetricsSuite{}) + +func (s *MetricsSuite) TestReadEmpty(c *gc.C) { + metrics, err := charm.ReadMetrics(strings.NewReader("")) + c.Assert(err, gc.IsNil) + c.Assert(metrics, gc.NotNil) +} + +func (s *MetricsSuite) TestReadAlmostEmpty(c *gc.C) { + metrics, err := charm.ReadMetrics(strings.NewReader(` +metrics: +`)) + c.Assert(err, gc.IsNil) + c.Assert(metrics, gc.NotNil) +} + +func (s *MetricsSuite) TestNoDescription(c *gc.C) { + metrics, err := charm.ReadMetrics(strings.NewReader(` +metrics: + some-metric: + type: gauge +`)) + c.Assert(err, gc.ErrorMatches, "invalid metrics declaration: metric \"some-metric\" lacks description") + c.Assert(metrics, gc.IsNil) +} + +func (s *MetricsSuite) TestIncorrectType(c *gc.C) { + metrics, err := charm.ReadMetrics(strings.NewReader(` +metrics: + some-metric: + type: not-a-type + description: Some description. +`)) + c.Assert(err, gc.ErrorMatches, "invalid metrics declaration: metric \"some-metric\" has unknown type \"not-a-type\"") + c.Assert(metrics, gc.IsNil) +} + +func (s *MetricsSuite) TestMultipleDefinition(c *gc.C) { + metrics, err := charm.ReadMetrics(strings.NewReader(` +metrics: + some-metric: + type: gauge + description: Some description. + some-metric: + type: absolute + description: Some other description. + +`)) + c.Assert(err, gc.IsNil) + c.Assert(metrics.Metrics, gc.HasLen, 1) + c.Assert(metrics.Metrics["some-metric"].Type, gc.Equals, charm.MetricTypeAbsolute) +} + +func (s *MetricsSuite) TestIsBuiltinMetric(c *gc.C) { + tests := []struct { + input string + isbuiltin bool + }{{ + "juju-thing", + true, + }, { + "jujuthing", + true, + }, { + "thing", + false, + }, + } + + for i, test := range tests { + c.Logf("test %d isBuiltinMetric(%v) = %v", i, test.input, test.isbuiltin) + is := charm.IsBuiltinMetric(test.input) + c.Assert(is, gc.Equals, test.isbuiltin) + } +} + +func (s *MetricsSuite) TestValidYaml(c *gc.C) { + metrics, err := charm.ReadMetrics(strings.NewReader(` +metrics: + blips: + type: absolute + description: An absolute metric. + blops: + type: gauge + description: A gauge metric. + juju-unit-time: +`)) + c.Assert(err, gc.IsNil) + c.Assert(metrics, gc.NotNil) + c.Assert(Keys(metrics), gc.DeepEquals, []string{"blips", "blops", "juju-unit-time"}) + + testCases := []struct { + about string + name string + value string + err string + }{{ + about: "valid gauge metric", + name: "blops", + value: "1", + err: "", + }, { + about: "valid absolute metric", + name: "blips", + value: "0", + err: "", + }, { + about: "valid gauge metric, float value", + name: "blops", + value: "0.15", + err: "", + }, { + about: "valid absolute metric, float value", + name: "blips", + value: "6.015e15", + err: "", + }, { + about: "undeclared metric", + name: "undeclared", + value: "6.015e15", + err: "metric \"undeclared\" not defined", + }, { + about: "invalid type for gauge metric", + name: "blops", + value: "true", + err: "invalid value type: expected float, got \"true\"", + }, { + about: "metric value too large", + name: "blips", + value: "1111111111111111111111111111111", + err: "metric value is too large", + }, + } + + for i, t := range testCases { + c.Logf("test %d: %s", i, t.about) + err := metrics.ValidateMetric(t.name, t.value) + if t.err == "" { + c.Check(err, gc.IsNil) + } else { + c.Check(err, gc.ErrorMatches, t.err) + } + } + +} + +func (s *MetricsSuite) TestBuiltInMetrics(c *gc.C) { + tests := []string{` +metrics: + some-metric: + type: gauge + description: Some description. + juju-unit-time: + type: absolute +`, ` +metrics: + some-metric: + type: gauge + description: Some description. + juju-unit-time: + description: Some description +`, + } + for _, test := range tests { + c.Logf("%s", test) + _, err := charm.ReadMetrics(strings.NewReader(test)) + c.Assert(err, gc.ErrorMatches, `metric "juju-unit-time" is using a prefix reserved for built-in metrics: it should not have type or description specification`) + } +} + +func (s *MetricsSuite) TestValidateValue(c *gc.C) { + tests := []struct { + value string + expectedError string + }{{ + value: "1234567890", + }, { + value: "0", + }, { + value: "abcd", + expectedError: `invalid value type: expected float, got "abcd"`, + }, { + value: "1234567890123456789012345678901234567890", + expectedError: "metric value is too large", + }, { + value: "-42", + expectedError: "invalid value: value must be greater or equal to zero, got -42", + }, + } + + for _, test := range tests { + err := charm.ValidateValue(test.value) + if test.expectedError != "" { + c.Assert(err, gc.ErrorMatches, test.expectedError) + } else { + c.Assert(err, gc.IsNil) + } + } +} === added file 'src/gopkg.in/juju/charm.v6-unstable/payloads.go' --- src/gopkg.in/juju/charm.v6-unstable/payloads.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/payloads.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,67 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm + +import ( + "fmt" + + "github.com/juju/schema" +) + +var payloadClassSchema = schema.FieldMap( + schema.Fields{ + "type": schema.String(), + }, + schema.Defaults{}, +) + +// PayloadClass holds the information about a payload class, as stored +// in a charm's metadata. +type PayloadClass struct { + // Name identifies the payload class. + Name string + + // Type identifies the type of payload (e.g. kvm, docker). + Type string +} + +func parsePayloadClasses(data interface{}) map[string]PayloadClass { + if data == nil { + return nil + } + + result := make(map[string]PayloadClass) + for name, val := range data.(map[string]interface{}) { + result[name] = parsePayloadClass(name, val) + } + + return result +} + +func parsePayloadClass(name string, data interface{}) PayloadClass { + payloadClass := PayloadClass{ + Name: name, + } + if data == nil { + return payloadClass + } + pcMap := data.(map[string]interface{}) + + if val := pcMap["type"]; val != nil { + payloadClass.Type = val.(string) + } + + return payloadClass +} + +// Validate checks the payload class to ensure its data is valid. +func (pc PayloadClass) Validate() error { + if pc.Name == "" { + return fmt.Errorf("payload class missing name") + } + if pc.Type == "" { + return fmt.Errorf("payload class missing type") + } + return nil +} === added file 'src/gopkg.in/juju/charm.v6-unstable/payloads_test.go' --- src/gopkg.in/juju/charm.v6-unstable/payloads_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/payloads_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,86 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charm.v6-unstable" +) + +var _ = gc.Suite(&payloadClassSuite{}) + +type payloadClassSuite struct{} + +func (s *payloadClassSuite) TestParsePayloadClassOkay(c *gc.C) { + name := "my-payload" + data := map[string]interface{}{ + "type": "docker", + } + payloadClass := charm.ParsePayloadClass(name, data) + + c.Check(payloadClass, jc.DeepEquals, charm.PayloadClass{ + Name: "my-payload", + Type: "docker", + }) +} + +func (s *payloadClassSuite) TestParsePayloadClassMissingName(c *gc.C) { + name := "" + data := map[string]interface{}{ + "type": "docker", + } + payloadClass := charm.ParsePayloadClass(name, data) + + c.Check(payloadClass, jc.DeepEquals, charm.PayloadClass{ + Name: "", + Type: "docker", + }) +} + +func (s *payloadClassSuite) TestParsePayloadClassEmpty(c *gc.C) { + name := "my-payload" + var data map[string]interface{} + payloadClass := charm.ParsePayloadClass(name, data) + + c.Check(payloadClass, jc.DeepEquals, charm.PayloadClass{ + Name: "my-payload", + }) +} + +func (s *payloadClassSuite) TestValidateFull(c *gc.C) { + payloadClass := charm.PayloadClass{ + Name: "my-payload", + Type: "docker", + } + err := payloadClass.Validate() + + c.Check(err, jc.ErrorIsNil) +} + +func (s *payloadClassSuite) TestValidateZeroValue(c *gc.C) { + var payloadClass charm.PayloadClass + err := payloadClass.Validate() + + c.Check(err, gc.NotNil) +} + +func (s *payloadClassSuite) TestValidateMissingName(c *gc.C) { + payloadClass := charm.PayloadClass{ + Type: "docker", + } + err := payloadClass.Validate() + + c.Check(err, gc.ErrorMatches, `payload class missing name`) +} + +func (s *payloadClassSuite) TestValidateMissingType(c *gc.C) { + payloadClass := charm.PayloadClass{ + Name: "my-payload", + } + err := payloadClass.Validate() + + c.Check(err, gc.ErrorMatches, `payload class missing type`) +} === added directory 'src/gopkg.in/juju/charm.v6-unstable/resource' === added file 'src/gopkg.in/juju/charm.v6-unstable/resource/fingerprint.go' --- src/gopkg.in/juju/charm.v6-unstable/resource/fingerprint.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/resource/fingerprint.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,66 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package resource + +import ( + stdhash "hash" + "io" + + "github.com/juju/errors" + "github.com/juju/utils/hash" +) + +var newHash, validateSum = hash.SHA384() + +// Fingerprint represents the unique fingerprint value of a resource's data. +type Fingerprint struct { + hash.Fingerprint +} + +// NewFingerprint returns wraps the provided raw fingerprint bytes. +// This function roundtrips with Fingerprint.Bytes(). +func NewFingerprint(raw []byte) (Fingerprint, error) { + fp, err := hash.NewFingerprint(raw, validateSum) + if err != nil { + return Fingerprint{}, errors.Trace(err) + } + return Fingerprint{fp}, nil +} + +// ParseFingerprint returns wraps the provided raw fingerprint string. +// This function roundtrips with Fingerprint.String(). +func ParseFingerprint(raw string) (Fingerprint, error) { + fp, err := hash.ParseHexFingerprint(raw, validateSum) + if err != nil { + return Fingerprint{}, errors.Trace(err) + } + return Fingerprint{fp}, nil +} + +// GenerateFingerprint returns the fingerprint for the provided data. +func GenerateFingerprint(reader io.Reader) (Fingerprint, error) { + fp, err := hash.GenerateFingerprint(reader, newHash) + if err != nil { + return Fingerprint{}, errors.Trace(err) + } + return Fingerprint{fp}, nil +} + +// Fingerprint is a hash that may be used to generate fingerprints. +type FingerprintHash struct { + stdhash.Hash +} + +// NewFingerprintHash returns a hash that may be used to create fingerprints. +func NewFingerprintHash() *FingerprintHash { + return &FingerprintHash{ + Hash: newHash(), + } +} + +// Fingerprint returns the current fingerprint of the hash. +func (fph FingerprintHash) Fingerprint() Fingerprint { + fp := hash.NewValidFingerprint(fph) + return Fingerprint{fp} +} === added file 'src/gopkg.in/juju/charm.v6-unstable/resource/fingerprint_test.go' --- src/gopkg.in/juju/charm.v6-unstable/resource/fingerprint_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/resource/fingerprint_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,143 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package resource_test + +import ( + "crypto/sha512" + "encoding/hex" + "strings" + + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charm.v6-unstable/resource" +) + +func newFingerprint(c *gc.C, data string) ([]byte, string) { + hash := sha512.New384() + _, err := hash.Write([]byte(data)) + c.Assert(err, jc.ErrorIsNil) + raw := hash.Sum(nil) + + hexStr := hex.EncodeToString(raw) + return raw, hexStr +} + +var _ = gc.Suite(&FingerprintSuite{}) + +type FingerprintSuite struct{} + +func (s *FingerprintSuite) TestNewFingerprintOkay(c *gc.C) { + expected, _ := newFingerprint(c, "spamspamspam") + + fp, err := resource.NewFingerprint(expected) + c.Assert(err, jc.ErrorIsNil) + raw := fp.Bytes() + + c.Check(raw, jc.DeepEquals, expected) +} + +func (s *FingerprintSuite) TestNewFingerprintTooSmall(c *gc.C) { + expected, _ := newFingerprint(c, "spamspamspam") + + _, err := resource.NewFingerprint(expected[:10]) + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `.*too small.*`) +} + +func (s *FingerprintSuite) TestNewFingerprintTooBig(c *gc.C) { + expected, _ := newFingerprint(c, "spamspamspam") + + _, err := resource.NewFingerprint(append(expected, 1, 2, 3)) + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `.*too big.*`) +} + +func (s *FingerprintSuite) TestParseFingerprintOkay(c *gc.C) { + _, expected := newFingerprint(c, "spamspamspam") + + fp, err := resource.ParseFingerprint(expected) + c.Assert(err, jc.ErrorIsNil) + hex := fp.String() + + c.Check(hex, jc.DeepEquals, expected) +} + +func (s *FingerprintSuite) TestParseFingerprintNonHex(c *gc.C) { + _, err := resource.ParseFingerprint("XYZ") // not hex + + c.Check(err, gc.ErrorMatches, `.*odd length hex string.*`) +} + +func (s *FingerprintSuite) TestGenerateFingerprint(c *gc.C) { + expected, _ := newFingerprint(c, "spamspamspam") + data := strings.NewReader("spamspamspam") + + fp, err := resource.GenerateFingerprint(data) + c.Assert(err, jc.ErrorIsNil) + raw := fp.Bytes() + + c.Check(raw, jc.DeepEquals, expected) +} + +func (s *FingerprintSuite) TestString(c *gc.C) { + raw, expected := newFingerprint(c, "spamspamspam") + fp, err := resource.NewFingerprint(raw) + c.Assert(err, jc.ErrorIsNil) + + hex := fp.String() + + c.Check(hex, gc.Equals, expected) +} + +func (s *FingerprintSuite) TestRoundtripString(c *gc.C) { + _, expected := newFingerprint(c, "spamspamspam") + + fp, err := resource.ParseFingerprint(expected) + c.Assert(err, jc.ErrorIsNil) + hex := fp.String() + + c.Check(hex, gc.Equals, expected) +} + +func (s *FingerprintSuite) TestBytes(c *gc.C) { + expected, _ := newFingerprint(c, "spamspamspam") + fp, err := resource.NewFingerprint(expected) + c.Assert(err, jc.ErrorIsNil) + + raw := fp.Bytes() + + c.Check(raw, jc.DeepEquals, expected) +} + +func (s *FingerprintSuite) TestRoundtripBytes(c *gc.C) { + expected, _ := newFingerprint(c, "spamspamspam") + + fp, err := resource.NewFingerprint(expected) + c.Assert(err, jc.ErrorIsNil) + raw := fp.Bytes() + + c.Check(raw, jc.DeepEquals, expected) +} + +func (s *FingerprintSuite) TestValidateOkay(c *gc.C) { + raw, _ := newFingerprint(c, "spamspamspam") + fp, err := resource.NewFingerprint(raw) + c.Assert(err, jc.ErrorIsNil) + + err = fp.Validate() + + c.Check(err, jc.ErrorIsNil) +} + +func (s *FingerprintSuite) TestValidateZero(c *gc.C) { + var fp resource.Fingerprint + err := fp.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `zero-value fingerprint not valid`) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/resource/meta.go' --- src/gopkg.in/juju/charm.v6-unstable/resource/meta.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/resource/meta.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,94 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package resource + +import ( + "fmt" + "strings" + + "github.com/juju/errors" +) + +// Meta holds the information about a resource, as stored +// in a charm's metadata. +type Meta struct { + // Name identifies the resource. + Name string + + // Type identifies the type of resource (e.g. "file"). + Type Type + + // TODO(ericsnow) Rename Path to Filename? + + // Path is the relative path of the file or directory where the + // resource will be stored under the unit's data directory. The path + // is resolved against a subdirectory assigned to the resource. For + // example, given a service named "spam", a resource "eggs", and a + // path "eggs.tgz", the fully resolved storage path for the resource + // would be: + // /var/lib/juju/agent/spam-0/resources/eggs/eggs.tgz + Path string + + // Description holds optional user-facing info for the resource. + Description string +} + +// ParseMeta parses the provided data into a Meta. +func ParseMeta(name string, data interface{}) (Meta, error) { + var meta Meta + meta.Name = name + + if data == nil { + return meta, nil + } + rMap := data.(map[string]interface{}) + + if val := rMap["type"]; val != nil { + var err error + meta.Type, err = ParseType(val.(string)) + if err != nil { + return meta, errors.Trace(err) + } + } + + if val := rMap["filename"]; val != nil { + meta.Path = val.(string) + } + + if val := rMap["description"]; val != nil { + meta.Description = val.(string) + } + + return meta, nil +} + +// Validate checks the resource metadata to ensure the data is valid. +func (meta Meta) Validate() error { + if meta.Name == "" { + return errors.NewNotValid(nil, "resource missing name") + } + + var typeUnknown Type + if meta.Type == typeUnknown { + return errors.NewNotValid(nil, "resource missing type") + } + if err := meta.Type.Validate(); err != nil { + msg := fmt.Sprintf("invalid resource type %v: %v", meta.Type, err) + return errors.NewNotValid(nil, msg) + } + + if meta.Path == "" { + // TODO(ericsnow) change "filename" to "path" + return errors.NewNotValid(nil, "resource missing filename") + } + if meta.Type == TypeFile { + if strings.Contains(meta.Path, "/") { + msg := fmt.Sprintf(`filename cannot contain "/" (got %q)`, meta.Path) + return errors.NewNotValid(nil, msg) + } + // TODO(ericsnow) Constrain Path to alphanumeric? + } + + return nil +} === added file 'src/gopkg.in/juju/charm.v6-unstable/resource/meta_test.go' --- src/gopkg.in/juju/charm.v6-unstable/resource/meta_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/resource/meta_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,251 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package resource_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charm.v6-unstable/resource" +) + +var _ = gc.Suite(&MetaSuite{}) + +type MetaSuite struct{} + +func (s *MetaSuite) TestParseMetaOkay(c *gc.C) { + name := "my-resource" + data := map[string]interface{}{ + "type": "file", + "filename": "filename.tgz", + "description": "One line that is useful when operators need to push it.", + } + res, err := resource.ParseMeta(name, data) + c.Assert(err, jc.ErrorIsNil) + + c.Check(res, jc.DeepEquals, resource.Meta{ + Name: "my-resource", + Type: resource.TypeFile, + Path: "filename.tgz", + Description: "One line that is useful when operators need to push it.", + }) +} + +func (s *MetaSuite) TestParseMetaMissingName(c *gc.C) { + name := "" + data := map[string]interface{}{ + "type": "file", + "filename": "filename.tgz", + "description": "One line that is useful when operators need to push it.", + } + res, err := resource.ParseMeta(name, data) + c.Assert(err, jc.ErrorIsNil) + + c.Check(res, jc.DeepEquals, resource.Meta{ + Name: "", + Type: resource.TypeFile, + Path: "filename.tgz", + Description: "One line that is useful when operators need to push it.", + }) +} + +func (s *MetaSuite) TestParseMetaMissingType(c *gc.C) { + name := "my-resource" + data := map[string]interface{}{ + "filename": "filename.tgz", + "description": "One line that is useful when operators need to push it.", + } + res, err := resource.ParseMeta(name, data) + c.Assert(err, jc.ErrorIsNil) + + c.Check(res, jc.DeepEquals, resource.Meta{ + Name: "my-resource", + // Type is the zero value. + Path: "filename.tgz", + Description: "One line that is useful when operators need to push it.", + }) +} + +func (s *MetaSuite) TestParseMetaEmptyType(c *gc.C) { + name := "my-resource" + data := map[string]interface{}{ + "type": "", + "filename": "filename.tgz", + "description": "One line that is useful when operators need to push it.", + } + _, err := resource.ParseMeta(name, data) + + c.Check(err, gc.ErrorMatches, `unsupported resource type .*`) +} + +func (s *MetaSuite) TestParseMetaUnknownType(c *gc.C) { + name := "my-resource" + data := map[string]interface{}{ + "type": "spam", + "filename": "filename.tgz", + "description": "One line that is useful when operators need to push it.", + } + _, err := resource.ParseMeta(name, data) + + c.Check(err, gc.ErrorMatches, `unsupported resource type .*`) +} + +func (s *MetaSuite) TestParseMetaMissingPath(c *gc.C) { + name := "my-resource" + data := map[string]interface{}{ + "type": "file", + "description": "One line that is useful when operators need to push it.", + } + res, err := resource.ParseMeta(name, data) + c.Assert(err, jc.ErrorIsNil) + + c.Check(res, jc.DeepEquals, resource.Meta{ + Name: "my-resource", + Type: resource.TypeFile, + Path: "", + Description: "One line that is useful when operators need to push it.", + }) +} + +func (s *MetaSuite) TestParseMetaMissingComment(c *gc.C) { + name := "my-resource" + data := map[string]interface{}{ + "type": "file", + "filename": "filename.tgz", + } + res, err := resource.ParseMeta(name, data) + c.Assert(err, jc.ErrorIsNil) + + c.Check(res, jc.DeepEquals, resource.Meta{ + Name: "my-resource", + Type: resource.TypeFile, + Path: "filename.tgz", + Description: "", + }) +} + +func (s *MetaSuite) TestParseMetaEmpty(c *gc.C) { + name := "my-resource" + data := make(map[string]interface{}) + res, err := resource.ParseMeta(name, data) + c.Assert(err, jc.ErrorIsNil) + + c.Check(res, jc.DeepEquals, resource.Meta{ + Name: "my-resource", + }) +} + +func (s *MetaSuite) TestParseMetaNil(c *gc.C) { + name := "my-resource" + var data map[string]interface{} + res, err := resource.ParseMeta(name, data) + c.Assert(err, jc.ErrorIsNil) + + c.Check(res, jc.DeepEquals, resource.Meta{ + Name: "my-resource", + }) +} + +func (s *MetaSuite) TestValidateFull(c *gc.C) { + res := resource.Meta{ + Name: "my-resource", + Type: resource.TypeFile, + Path: "filename.tgz", + Description: "One line that is useful when operators need to push it.", + } + err := res.Validate() + + c.Check(err, jc.ErrorIsNil) +} + +func (s *MetaSuite) TestValidateZeroValue(c *gc.C) { + var res resource.Meta + err := res.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) +} + +func (s *MetaSuite) TestValidateMissingName(c *gc.C) { + res := resource.Meta{ + Type: resource.TypeFile, + Path: "filename.tgz", + Description: "One line that is useful when operators need to push it.", + } + err := res.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `resource missing name`) +} + +func (s *MetaSuite) TestValidateMissingType(c *gc.C) { + res := resource.Meta{ + Name: "my-resource", + Path: "filename.tgz", + Description: "One line that is useful when operators need to push it.", + } + err := res.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `resource missing type`) +} + +func (s *MetaSuite) TestValidateMissingPath(c *gc.C) { + res := resource.Meta{ + Name: "my-resource", + Type: resource.TypeFile, + Description: "One line that is useful when operators need to push it.", + } + err := res.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `resource missing filename`) +} + +func (s *MetaSuite) TestValidateNestedPath(c *gc.C) { + res := resource.Meta{ + Name: "my-resource", + Type: resource.TypeFile, + Path: "spam/eggs", + } + err := res.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `.*filename cannot contain "/" .*`) +} + +func (s *MetaSuite) TestValidateAbsolutePath(c *gc.C) { + res := resource.Meta{ + Name: "my-resource", + Type: resource.TypeFile, + Path: "/spam/eggs", + } + err := res.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `.*filename cannot contain "/" .*`) +} + +func (s *MetaSuite) TestValidateSuspectPath(c *gc.C) { + res := resource.Meta{ + Name: "my-resource", + Type: resource.TypeFile, + Path: "git@github.com:juju/juju.git", + } + err := res.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `.*filename cannot contain "/" .*`) +} + +func (s *MetaSuite) TestValidateMissingComment(c *gc.C) { + res := resource.Meta{ + Name: "my-resource", + Type: resource.TypeFile, + Path: "filename.tgz", + } + err := res.Validate() + + c.Check(err, jc.ErrorIsNil) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/resource/origin.go' --- src/gopkg.in/juju/charm.v6-unstable/resource/origin.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/resource/origin.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,50 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resource + +import ( + "github.com/juju/errors" +) + +// These are the valid resource origins. +const ( + originUnknown Origin = iota + OriginUpload + OriginStore +) + +var origins = map[Origin]string{ + OriginUpload: "upload", + OriginStore: "store", +} + +// Origin identifies where a charm's resource comes from. +type Origin int + +// ParseOrigin converts the provided string into an Origin. +// If it is not a known origin then an error is returned. +func ParseOrigin(value string) (Origin, error) { + for o, str := range origins { + if value == str { + return o, nil + } + } + return originUnknown, errors.Errorf("unknown origin %q", value) +} + +// String returns the printable representation of the origin. +func (o Origin) String() string { + return origins[o] +} + +// Validate ensures that the origin is correct. +func (o Origin) Validate() error { + // Ideally, only the (unavoidable) zero value would be invalid. + // However, typedef'ing int means that the use of int literals + // could result in invalid Type values other than the zero value. + if _, ok := origins[o]; !ok { + return errors.NewNotValid(nil, "unknown origin") + } + return nil +} === added file 'src/gopkg.in/juju/charm.v6-unstable/resource/origin_test.go' --- src/gopkg.in/juju/charm.v6-unstable/resource/origin_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/resource/origin_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,58 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package resource_test + +import ( + "github.com/juju/errors" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charm.v6-unstable/resource" +) + +type OriginSuite struct { + testing.IsolationSuite +} + +var _ = gc.Suite(&OriginSuite{}) + +func (OriginSuite) TestParseOriginKnown(c *gc.C) { + recognized := map[string]resource.Origin{ + "upload": resource.OriginUpload, + "store": resource.OriginStore, + } + for value, expected := range recognized { + origin, err := resource.ParseOrigin(value) + + c.Check(err, jc.ErrorIsNil) + c.Check(origin, gc.Equals, expected) + } +} + +func (OriginSuite) TestParseOriginUnknown(c *gc.C) { + _, err := resource.ParseOrigin("") + + c.Check(err, gc.ErrorMatches, `.*unknown origin "".*`) +} + +func (OriginSuite) TestValidateKnown(c *gc.C) { + recognized := []resource.Origin{ + resource.OriginUpload, + resource.OriginStore, + } + for _, origin := range recognized { + err := origin.Validate() + + c.Check(err, jc.ErrorIsNil) + } +} + +func (OriginSuite) TestValidateUnknown(c *gc.C) { + var origin resource.Origin + err := origin.Validate() + + c.Check(errors.Cause(err), jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `.*unknown origin.*`) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/resource/package_test.go' --- src/gopkg.in/juju/charm.v6-unstable/resource/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/resource/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package resource_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func Test(t *testing.T) { + gc.TestingT(t) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/resource/resource.go' --- src/gopkg.in/juju/charm.v6-unstable/resource/resource.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/resource/resource.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,57 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package resource + +import ( + "github.com/juju/errors" +) + +// Resource describes a charm's resource in the charm store. +type Resource struct { + Meta + + // Origin identifies where the resource will come from. + Origin Origin + + // Revision is the charm store revision of the resource. + Revision int + + // Fingerprint is the SHA-384 checksum for the resource blob. + Fingerprint Fingerprint + + // Size is the size of the resource, in bytes. + Size int64 +} + +// Validate checks the payload class to ensure its data is valid. +func (res Resource) Validate() error { + if err := res.Meta.Validate(); err != nil { + return errors.Annotate(err, "invalid resource (bad metadata)") + } + + if err := res.Origin.Validate(); err != nil { + return errors.Annotate(err, "invalid resource (bad origin)") + } + + if res.Revision < 0 { + return errors.NewNotValid(nil, "invalid resource (revision must be non-negative)") + } + // TODO(ericsnow) Ensure Revision is 0 for OriginUpload? + + if res.Fingerprint.IsZero() { + if res.Size > 0 { + return errors.NewNotValid(nil, "missing fingerprint") + } + } else { + if err := res.Fingerprint.Validate(); err != nil { + return errors.Annotate(err, "bad fingerprint") + } + } + + if res.Size < 0 { + return errors.NotValidf("negative size") + } + + return nil +} === added file 'src/gopkg.in/juju/charm.v6-unstable/resource/resource_test.go' --- src/gopkg.in/juju/charm.v6-unstable/resource/resource_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/resource/resource_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,168 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package resource_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charm.v6-unstable/resource" +) + +var fingerprint = []byte("123456789012345678901234567890123456789012345678") + +var _ = gc.Suite(&ResourceSuite{}) + +type ResourceSuite struct{} + +func (s *ResourceSuite) TestValidateFull(c *gc.C) { + fp, err := resource.NewFingerprint(fingerprint) + c.Assert(err, jc.ErrorIsNil) + res := resource.Resource{ + Meta: resource.Meta{ + Name: "my-resource", + Type: resource.TypeFile, + Path: "filename.tgz", + Description: "One line that is useful when operators need to push it.", + }, + Origin: resource.OriginStore, + Revision: 1, + Fingerprint: fp, + Size: 1, + } + err = res.Validate() + + c.Check(err, jc.ErrorIsNil) +} + +func (s *ResourceSuite) TestValidateZeroValue(c *gc.C) { + var res resource.Resource + err := res.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) +} + +func (s *ResourceSuite) TestValidateBadMetadata(c *gc.C) { + var meta resource.Meta + c.Assert(meta.Validate(), gc.NotNil) + + fp, err := resource.NewFingerprint(fingerprint) + c.Assert(err, jc.ErrorIsNil) + res := resource.Resource{ + Meta: meta, + Origin: resource.OriginStore, + Revision: 1, + Fingerprint: fp, + } + err = res.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `.*bad metadata.*`) +} + +func (s *ResourceSuite) TestValidateBadOrigin(c *gc.C) { + var origin resource.Origin + c.Assert(origin.Validate(), gc.NotNil) + fp, err := resource.NewFingerprint(fingerprint) + c.Assert(err, jc.ErrorIsNil) + res := resource.Resource{ + Meta: resource.Meta{ + Name: "my-resource", + Type: resource.TypeFile, + Path: "filename.tgz", + Description: "One line that is useful when operators need to push it.", + }, + Origin: origin, + Revision: 1, + Fingerprint: fp, + } + err = res.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `.*bad origin.*`) +} + +func (s *ResourceSuite) TestValidateBadRevision(c *gc.C) { + fp, err := resource.NewFingerprint(fingerprint) + c.Assert(err, jc.ErrorIsNil) + res := resource.Resource{ + Meta: resource.Meta{ + Name: "my-resource", + Type: resource.TypeFile, + Path: "filename.tgz", + Description: "One line that is useful when operators need to push it.", + }, + Origin: resource.OriginStore, + Revision: -1, + Fingerprint: fp, + } + err = res.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `.*revision must be non-negative.*`) +} + +func (s *ResourceSuite) TestValidateZeroValueFingerprint(c *gc.C) { + var fp resource.Fingerprint + c.Assert(fp.Validate(), gc.NotNil) + + res := resource.Resource{ + Meta: resource.Meta{ + Name: "my-resource", + Type: resource.TypeFile, + Path: "filename.tgz", + Description: "One line that is useful when operators need to push it.", + }, + Origin: resource.OriginStore, + Revision: 1, + Fingerprint: fp, + } + err := res.Validate() + + c.Check(err, jc.ErrorIsNil) +} + +func (s *ResourceSuite) TestValidateMissingFingerprint(c *gc.C) { + var fp resource.Fingerprint + c.Assert(fp.Validate(), gc.NotNil) + + res := resource.Resource{ + Meta: resource.Meta{ + Name: "my-resource", + Type: resource.TypeFile, + Path: "filename.tgz", + Description: "One line that is useful when operators need to push it.", + }, + Origin: resource.OriginStore, + Revision: 1, + Fingerprint: fp, + Size: 10, + } + err := res.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `.*missing fingerprint.*`) +} + +func (s *ResourceSuite) TestValidateBadSize(c *gc.C) { + fp, err := resource.NewFingerprint(fingerprint) + c.Assert(err, jc.ErrorIsNil) + res := resource.Resource{ + Meta: resource.Meta{ + Name: "my-resource", + Type: resource.TypeFile, + Path: "filename.tgz", + Description: "One line that is useful when operators need to push it.", + }, + Origin: resource.OriginStore, + Revision: 1, + Fingerprint: fp, + Size: -1, + } + err = res.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `negative size not valid`) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/resource/type.go' --- src/gopkg.in/juju/charm.v6-unstable/resource/type.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/resource/type.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,48 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package resource + +import ( + "github.com/juju/errors" +) + +// These are the valid resource types (except for unknown). +const ( + typeUnknown Type = iota + TypeFile +) + +var types = map[Type]string{ + TypeFile: "file", +} + +// Type enumerates the recognized resource types. +type Type int + +// ParseType converts a string to a Type. If the given value does not +// match a recognized type then an error is returned. +func ParseType(value string) (Type, error) { + for rt, str := range types { + if value == str { + return rt, nil + } + } + return typeUnknown, errors.Errorf("unsupported resource type %q", value) +} + +// String returns the printable representation of the type. +func (rt Type) String() string { + return types[rt] +} + +// Validate ensures that the type is valid. +func (rt Type) Validate() error { + // Ideally, only the (unavoidable) zero value would be invalid. + // However, typedef'ing int means that the use of int literals + // could result in invalid Type values other than the zero value. + if _, ok := types[rt]; !ok { + return errors.NewNotValid(nil, "unknown resource type") + } + return nil +} === added file 'src/gopkg.in/juju/charm.v6-unstable/resource/type_test.go' --- src/gopkg.in/juju/charm.v6-unstable/resource/type_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/resource/type_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,88 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package resource_test + +import ( + "github.com/juju/errors" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charm.v6-unstable/resource" +) + +var _ = gc.Suite(&TypeSuite{}) + +type TypeSuite struct{} + +func (s *TypeSuite) TestParseTypeOkay(c *gc.C) { + rt, err := resource.ParseType("file") + c.Assert(err, jc.ErrorIsNil) + + c.Check(rt, gc.Equals, resource.TypeFile) +} + +func (s *TypeSuite) TestParseTypeRecognized(c *gc.C) { + supported := []resource.Type{ + resource.TypeFile, + } + for _, expected := range supported { + rt, err := resource.ParseType(expected.String()) + c.Assert(err, jc.ErrorIsNil) + + c.Check(rt, gc.Equals, expected) + } +} + +func (s *TypeSuite) TestParseTypeEmpty(c *gc.C) { + rt, err := resource.ParseType("") + + c.Check(err, gc.ErrorMatches, `unsupported resource type ""`) + var unknown resource.Type + c.Check(rt, gc.Equals, unknown) +} + +func (s *TypeSuite) TestParseTypeUnsupported(c *gc.C) { + rt, err := resource.ParseType("spam") + + c.Check(err, gc.ErrorMatches, `unsupported resource type "spam"`) + var unknown resource.Type + c.Check(rt, gc.Equals, unknown) +} + +func (s *TypeSuite) TestTypeStringSupported(c *gc.C) { + supported := map[resource.Type]string{ + resource.TypeFile: "file", + } + for rt, expected := range supported { + str := rt.String() + + c.Check(str, gc.Equals, expected) + } +} + +func (s *TypeSuite) TestTypeStringUnknown(c *gc.C) { + var unknown resource.Type + str := unknown.String() + + c.Check(str, gc.Equals, "") +} + +func (s *TypeSuite) TestTypeValidateSupported(c *gc.C) { + supported := []resource.Type{ + resource.TypeFile, + } + for _, rt := range supported { + err := rt.Validate() + + c.Check(err, jc.ErrorIsNil) + } +} + +func (s *TypeSuite) TestTypeValidateUnknown(c *gc.C) { + var unknown resource.Type + err := unknown.Validate() + + c.Check(err, jc.Satisfies, errors.IsNotValid) + c.Check(err, gc.ErrorMatches, `unknown resource type`) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/resources.go' --- src/gopkg.in/juju/charm.v6-unstable/resources.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/resources.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,53 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm + +import ( + "fmt" + + "github.com/juju/schema" + + "gopkg.in/juju/charm.v6-unstable/resource" +) + +var resourceSchema = schema.FieldMap( + schema.Fields{ + "type": schema.String(), + "filename": schema.String(), // TODO(ericsnow) Change to "path"? + "description": schema.String(), + }, + schema.Defaults{ + "type": resource.TypeFile.String(), + "description": "", + }, +) + +func parseMetaResources(data interface{}) (map[string]resource.Meta, error) { + if data == nil { + return nil, nil + } + + result := make(map[string]resource.Meta) + for name, val := range data.(map[string]interface{}) { + meta, err := resource.ParseMeta(name, val) + if err != nil { + return nil, err + } + result[name] = meta + } + + return result, nil +} + +func validateMetaResources(resources map[string]resource.Meta) error { + for name, res := range resources { + if res.Name != name { + return fmt.Errorf("mismatch on resource name (%q != %q)", res.Name, name) + } + if err := res.Validate(); err != nil { + return err + } + } + return nil +} === added file 'src/gopkg.in/juju/charm.v6-unstable/resources_test.go' --- src/gopkg.in/juju/charm.v6-unstable/resources_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/resources_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,87 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charm.v6-unstable" +) + +var _ = gc.Suite(&resourceSuite{}) + +type resourceSuite struct{} + +func (s *resourceSuite) TestSchemaOkay(c *gc.C) { + raw := map[interface{}]interface{}{ + "type": "file", + "filename": "filename.tgz", + "description": "One line that is useful when operators need to push it.", + } + v, err := charm.ResourceSchema.Coerce(raw, nil) + c.Assert(err, jc.ErrorIsNil) + + c.Check(v, jc.DeepEquals, map[string]interface{}{ + "type": "file", + "filename": "filename.tgz", + "description": "One line that is useful when operators need to push it.", + }) +} + +func (s *resourceSuite) TestSchemaMissingType(c *gc.C) { + raw := map[interface{}]interface{}{ + "filename": "filename.tgz", + "description": "One line that is useful when operators need to push it.", + } + v, err := charm.ResourceSchema.Coerce(raw, nil) + c.Assert(err, jc.ErrorIsNil) + + c.Check(v, jc.DeepEquals, map[string]interface{}{ + "type": "file", + "filename": "filename.tgz", + "description": "One line that is useful when operators need to push it.", + }) +} + +func (s *resourceSuite) TestSchemaUnknownType(c *gc.C) { + raw := map[interface{}]interface{}{ + "type": "repo", + "filename": "juju", + "description": "One line that is useful when operators need to push it.", + } + v, err := charm.ResourceSchema.Coerce(raw, nil) + c.Assert(err, jc.ErrorIsNil) + + c.Check(v, jc.DeepEquals, map[string]interface{}{ + "type": "repo", + "filename": "juju", + "description": "One line that is useful when operators need to push it.", + }) +} + +func (s *resourceSuite) TestSchemaMissingPath(c *gc.C) { + raw := map[interface{}]interface{}{ + "type": "file", + "description": "One line that is useful when operators need to push it.", + } + _, err := charm.ResourceSchema.Coerce(raw, nil) + + c.Check(err, gc.NotNil) +} + +func (s *resourceSuite) TestSchemaMissingComment(c *gc.C) { + raw := map[interface{}]interface{}{ + "type": "file", + "filename": "filename.tgz", + } + v, err := charm.ResourceSchema.Coerce(raw, nil) + c.Assert(err, jc.ErrorIsNil) + + c.Check(v, jc.DeepEquals, map[string]interface{}{ + "type": "file", + "filename": "filename.tgz", + "description": "", + }) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/url.go' --- src/gopkg.in/juju/charm.v6-unstable/url.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/url.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,396 @@ +// Copyright 2011, 2012, 2013 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm + +import ( + "encoding/json" + "fmt" + gourl "net/url" + "regexp" + "strconv" + "strings" + + "github.com/juju/names" + "gopkg.in/mgo.v2/bson" +) + +// Location represents a charm location, which must declare a path component +// and a string representaion. +type Location interface { + Path() string + String() string +} + +// URL represents a charm or bundle location: +// +// cs:~joe/oneiric/wordpress +// cs:oneiric/wordpress-42 +// local:oneiric/wordpress +// cs:~joe/wordpress +// cs:wordpress +// cs:precise/wordpress-20 +// cs:development/precise/wordpress-20 +// cs:~joe/development/wordpress +// +type URL struct { + Schema string // "cs" or "local". + User string // "joe". + Name string // "wordpress". + Revision int // -1 if unset, N otherwise. + Series string // "precise" or "" if unset; "bundle" if it's a bundle. + Channel Channel // "development" or "" if no channel. +} + +var ErrUnresolvedUrl error = fmt.Errorf("charm or bundle url series is not resolved") + +// Channel represents different stages in the development of a charm or bundle. +type Channel string + +const ( + // DevelopmentChannel is the channel used for charms or bundles under + // development. + DevelopmentChannel Channel = "development" +) + +var ( + validSeries = regexp.MustCompile("^[a-z]+([a-z0-9]+)?$") + validName = regexp.MustCompile("^[a-z][a-z0-9]*(-[a-z0-9]*[a-z][a-z0-9]*)*$") +) + +// IsValidSeries reports whether series is a valid series in charm or bundle +// URLs. +func IsValidSeries(series string) bool { + return validSeries.MatchString(series) +} + +// IsValidChannel reports whether channel is a valid channel in charm or bundle +// URLs. +func IsValidChannel(channel Channel) bool { + return channel == DevelopmentChannel +} + +// IsValidName reports whether name is a valid charm or bundle name. +func IsValidName(name string) bool { + return validName.MatchString(name) +} + +// WithRevision returns a URL equivalent to url but with Revision set +// to revision. +func (url *URL) WithRevision(revision int) *URL { + urlCopy := *url + urlCopy.Revision = revision + return &urlCopy +} + +// WithChannel returns a URL equivalent to url but with the given channel. +func (url *URL) WithChannel(channel Channel) *URL { + urlCopy := *url + urlCopy.Channel = channel + return &urlCopy +} + +// MustParseURL works like ParseURL, but panics in case of errors. +func MustParseURL(url string) *URL { + u, err := ParseURL(url) + if err != nil { + panic(err) + } + return u +} + +// ParseURL parses the provided charm URL string into its respective +// structure. +// +// Additionally, fully-qualified charmstore URLs are supported; note that this +// currently assumes that they will map to jujucharms.com (that is, +// fully-qualified URLs currently map to the 'cs' schema): +// +// https://jujucharms.com/name +// https://jujucharms.com/name/series +// https://jujucharms.com/name/revision +// https://jujucharms.com/name/series/revision +// https://jujucharms.com/u/user/name +// https://jujucharms.com/u/user/name/series +// https://jujucharms.com/u/user/name/revision +// https://jujucharms.com/u/user/name/series/revision +// https://jujucharms.com/channel/name +// https://jujucharms.com/channel/name/series +// https://jujucharms.com/channel/name/revision +// https://jujucharms.com/channel/name/series/revision +// https://jujucharms.com/u/user/channel/name +// https://jujucharms.com/u/user/channel/name/series +// https://jujucharms.com/u/user/channel/name/revision +// https://jujucharms.com/u/user/channel/name/series/revision +// +// A missing schema is assumed to be 'cs'. +func ParseURL(url string) (*URL, error) { + // Check if we're dealing with a v1 or v2 URL. + u, err := gourl.Parse(url) + if err != nil { + return nil, fmt.Errorf("cannot parse charm or bundle URL: %q", url) + } + if u.RawQuery != "" || u.Fragment != "" || u.User != nil { + return nil, fmt.Errorf("charm or bundle URL %q has unrecognized parts", url) + } + var curl *URL + switch { + case u.Opaque != "": + // Shortcut old-style URLs. + u.Path = u.Opaque + curl, err = parseV1URL(u, url) + case u.Scheme == "http" || u.Scheme == "https": + // Shortcut new-style URLs. + curl, err = parseV2URL(u) + default: + // TODO: for now, fall through to parsing v1 references; this will be + // expanded to be more robust in the future. + curl, err = parseV1URL(u, url) + } + if err != nil { + return nil, err + } + if curl.Schema == "" { + curl.Schema = "cs" + } + return curl, nil +} + +func parseV1URL(url *gourl.URL, originalURL string) (*URL, error) { + var r URL + if url.Scheme != "" { + r.Schema = url.Scheme + if r.Schema != "cs" && r.Schema != "local" { + return nil, fmt.Errorf("charm or bundle URL has invalid schema: %q", originalURL) + } + } + i := 0 + parts := strings.Split(url.Path[i:], "/") + if len(parts) < 1 || len(parts) > 4 { + return nil, fmt.Errorf("charm or bundle URL has invalid form: %q", originalURL) + } + + // ~ + if strings.HasPrefix(parts[0], "~") { + if r.Schema == "local" { + return nil, fmt.Errorf("local charm or bundle URL with user name: %q", originalURL) + } + r.User, parts = parts[0][1:], parts[1:] + } + + // + if len(parts) > 1 { + if IsValidChannel(Channel(parts[0])) { + if r.Schema == "local" { + return nil, fmt.Errorf("local charm or bundle URL with channel: %q", originalURL) + } + r.Channel, parts = Channel(parts[0]), parts[1:] + } + } + + if len(parts) > 2 { + return nil, fmt.Errorf("charm or bundle URL has invalid form: %q", originalURL) + } + + // + if len(parts) == 2 { + r.Series, parts = parts[0], parts[1:] + if !IsValidSeries(r.Series) { + return nil, fmt.Errorf("charm or bundle URL has invalid series: %q", originalURL) + } + } + if len(parts) < 1 { + return nil, fmt.Errorf("URL without charm or bundle name: %q", originalURL) + } + + // [-] + r.Name = parts[0] + r.Revision = -1 + for i := len(r.Name) - 1; i > 0; i-- { + c := r.Name[i] + if c >= '0' && c <= '9' { + continue + } + if c == '-' && i != len(r.Name)-1 { + var err error + r.Revision, err = strconv.Atoi(r.Name[i+1:]) + if err != nil { + panic(err) // We just checked it was right. + } + r.Name = r.Name[:i] + } + break + } + if r.User != "" { + if !names.IsValidUser(r.User) { + return nil, fmt.Errorf("charm or bundle URL has invalid user name: %q", originalURL) + } + } + if !IsValidName(r.Name) { + return nil, fmt.Errorf("URL has invalid charm or bundle name: %q", originalURL) + } + return &r, nil +} + +func parseV2URL(url *gourl.URL) (*URL, error) { + var r URL + r.Schema = "cs" + parts := strings.Split(strings.Trim(url.Path, "/"), "/") + if parts[0] == "u" { + if len(parts) < 3 { + return nil, fmt.Errorf(`charm or bundle URL %q malformed, expected "/u//"`, url) + } + r.User, parts = parts[1], parts[2:] + } + if len(parts) > 1 && IsValidChannel(Channel(parts[0])) { + r.Channel, parts = Channel(parts[0]), parts[1:] + } + r.Name, parts = parts[0], parts[1:] + r.Revision = -1 + if len(parts) > 0 { + revision, err := strconv.Atoi(parts[0]) + if err == nil { + r.Revision = revision + } else { + r.Series = parts[0] + if !IsValidSeries(r.Series) { + return nil, fmt.Errorf("charm or bundle URL has invalid series: %q", url) + } + parts = parts[1:] + if len(parts) == 1 { + r.Revision, err = strconv.Atoi(parts[0]) + if err != nil { + return nil, fmt.Errorf("charm or bundle URL has malformed revision: %q in %q", parts[0], url) + } + } else { + if len(parts) != 0 { + return nil, fmt.Errorf("charm or bundle URL has invalid form: %q", url) + } + } + } + } + if r.User != "" { + if !names.IsValidUser(r.User) { + return nil, fmt.Errorf("charm or bundle URL has invalid user name: %q", url) + } + } + if !IsValidName(r.Name) { + return nil, fmt.Errorf("URL has invalid charm or bundle name: %q", url) + } + return &r, nil +} + +func (r *URL) path() string { + var parts []string + if r.User != "" { + parts = append(parts, fmt.Sprintf("~%s", r.User)) + } + if r.Channel != "" { + parts = append(parts, string(r.Channel)) + } + if r.Series != "" { + parts = append(parts, r.Series) + } + if r.Revision >= 0 { + parts = append(parts, fmt.Sprintf("%s-%d", r.Name, r.Revision)) + } else { + parts = append(parts, r.Name) + } + return strings.Join(parts, "/") +} + +func (r URL) Path() string { + return r.path() +} + +// InferURL parses src as a reference, fills out the series in the +// returned URL using defaultSeries if necessary. +// +// This function is deprecated. New code should use ParseURL instead. +func InferURL(src, defaultSeries string) (*URL, error) { + u, err := ParseURL(src) + if err != nil { + return nil, err + } + if u.Series == "" { + if defaultSeries == "" { + return nil, fmt.Errorf("cannot infer charm or bundle URL for %q: charm or bundle url series is not resolved", src) + } + u.Series = defaultSeries + } + return u, nil +} + +func (u URL) String() string { + return fmt.Sprintf("%s:%s", u.Schema, u.Path()) +} + +// GetBSON turns u into a bson.Getter so it can be saved directly +// on a MongoDB database with mgo. +func (u *URL) GetBSON() (interface{}, error) { + if u == nil { + return nil, nil + } + return u.String(), nil +} + +// SetBSON turns u into a bson.Setter so it can be loaded directly +// from a MongoDB database with mgo. +func (u *URL) SetBSON(raw bson.Raw) error { + if raw.Kind == 10 { + return bson.SetZero + } + var s string + err := raw.Unmarshal(&s) + if err != nil { + return err + } + url, err := ParseURL(s) + if err != nil { + return err + } + *u = *url + return nil +} + +func (u *URL) MarshalJSON() ([]byte, error) { + if u == nil { + panic("cannot marshal nil *charm.URL") + } + return json.Marshal(u.String()) +} + +func (u *URL) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + url, err := ParseURL(s) + if err != nil { + return err + } + *u = *url + return nil +} + +// Quote translates a charm url string into one which can be safely used +// in a file path. ASCII letters, ASCII digits, dot and dash stay the +// same; other characters are translated to their hex representation +// surrounded by underscores. +func Quote(unsafe string) string { + safe := make([]byte, 0, len(unsafe)*4) + for i := 0; i < len(unsafe); i++ { + b := unsafe[i] + switch { + case b >= 'a' && b <= 'z', + b >= 'A' && b <= 'Z', + b >= '0' && b <= '9', + b == '.', + b == '-': + safe = append(safe, b) + default: + safe = append(safe, fmt.Sprintf("_%02x_", b)...) + } + } + return string(safe) +} === added file 'src/gopkg.in/juju/charm.v6-unstable/url_test.go' --- src/gopkg.in/juju/charm.v6-unstable/url_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charm.v6-unstable/url_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,601 @@ +// Copyright 2011, 2012, 2013 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package charm_test + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + + gc "gopkg.in/check.v1" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charm.v6-unstable" +) + +type URLSuite struct{} + +var _ = gc.Suite(&URLSuite{}) + +var urlTests = []struct { + s, err string + exact string + url *charm.URL +}{{ + s: "cs:~user/series/name", + url: &charm.URL{"cs", "user", "name", -1, "series", ""}, +}, { + s: "cs:~user/series/name-0", + url: &charm.URL{"cs", "user", "name", 0, "series", ""}, +}, { + s: "cs:series/name", + url: &charm.URL{"cs", "", "name", -1, "series", ""}, +}, { + s: "cs:series/name-42", + url: &charm.URL{"cs", "", "name", 42, "series", ""}, +}, { + s: "local:series/name-1", + url: &charm.URL{"local", "", "name", 1, "series", ""}, +}, { + s: "local:series/name", + url: &charm.URL{"local", "", "name", -1, "series", ""}, +}, { + s: "local:series/n0-0n-n0", + url: &charm.URL{"local", "", "n0-0n-n0", -1, "series", ""}, +}, { + s: "cs:~user/name", + url: &charm.URL{"cs", "user", "name", -1, "", ""}, +}, { + s: "cs:name", + url: &charm.URL{"cs", "", "name", -1, "", ""}, +}, { + s: "local:name", + url: &charm.URL{"local", "", "name", -1, "", ""}, +}, { + s: "cs:~user/development/series/name-0", + url: &charm.URL{"cs", "user", "name", 0, "series", charm.DevelopmentChannel}, +}, { + s: "cs:~user/development/series/name-0", + url: &charm.URL{"cs", "user", "name", 0, "series", charm.DevelopmentChannel}, +}, { + s: "cs:development/series/name", + url: &charm.URL{"cs", "", "name", -1, "series", charm.DevelopmentChannel}, +}, { + s: "cs:development/series/name-42", + url: &charm.URL{"cs", "", "name", 42, "series", charm.DevelopmentChannel}, +}, { + s: "cs:~user/development/name", + url: &charm.URL{"cs", "user", "name", -1, "", charm.DevelopmentChannel}, +}, { + s: "cs:development/name", + url: &charm.URL{"cs", "", "name", -1, "", charm.DevelopmentChannel}, +}, { + s: "http://jujucharms.com/u/user/name/series/1", + url: &charm.URL{"cs", "user", "name", 1, "series", ""}, + exact: "cs:~user/series/name-1", +}, { + s: "http://www.jujucharms.com/u/user/name/series/1", + url: &charm.URL{"cs", "user", "name", 1, "series", ""}, + exact: "cs:~user/series/name-1", +}, { + s: "https://www.jujucharms.com/u/user/name/series/1", + url: &charm.URL{"cs", "user", "name", 1, "series", ""}, + exact: "cs:~user/series/name-1", +}, { + s: "https://jujucharms.com/u/user/name/series/1", + url: &charm.URL{"cs", "user", "name", 1, "series", ""}, + exact: "cs:~user/series/name-1", +}, { + s: "https://jujucharms.com/u/user/name/series", + url: &charm.URL{"cs", "user", "name", -1, "series", ""}, + exact: "cs:~user/series/name", +}, { + s: "https://jujucharms.com/u/user/name/1", + url: &charm.URL{"cs", "user", "name", 1, "", ""}, + exact: "cs:~user/name-1", +}, { + s: "https://jujucharms.com/u/user/name", + url: &charm.URL{"cs", "user", "name", -1, "", ""}, + exact: "cs:~user/name", +}, { + s: "https://jujucharms.com/name", + url: &charm.URL{"cs", "", "name", -1, "", ""}, + exact: "cs:name", +}, { + s: "https://jujucharms.com/name/series", + url: &charm.URL{"cs", "", "name", -1, "series", ""}, + exact: "cs:series/name", +}, { + s: "https://jujucharms.com/name/1", + url: &charm.URL{"cs", "", "name", 1, "", ""}, + exact: "cs:name-1", +}, { + s: "https://jujucharms.com/name/series/1", + url: &charm.URL{"cs", "", "name", 1, "series", ""}, + exact: "cs:series/name-1", +}, { + s: "https://jujucharms.com/u/user/name/series/1/", + url: &charm.URL{"cs", "user", "name", 1, "series", ""}, + exact: "cs:~user/series/name-1", +}, { + s: "https://jujucharms.com/u/user/name/series/", + url: &charm.URL{"cs", "user", "name", -1, "series", ""}, + exact: "cs:~user/series/name", +}, { + s: "https://jujucharms.com/u/user/name/1/", + url: &charm.URL{"cs", "user", "name", 1, "", ""}, + exact: "cs:~user/name-1", +}, { + s: "https://jujucharms.com/u/user/name/", + url: &charm.URL{"cs", "user", "name", -1, "", ""}, + exact: "cs:~user/name", +}, { + s: "https://jujucharms.com/name/", + url: &charm.URL{"cs", "", "name", -1, "", ""}, + exact: "cs:name", +}, { + s: "https://jujucharms.com/name/series/", + url: &charm.URL{"cs", "", "name", -1, "series", ""}, + exact: "cs:series/name", +}, { + s: "https://jujucharms.com/name/1/", + url: &charm.URL{"cs", "", "name", 1, "", ""}, + exact: "cs:name-1", +}, { + s: "https://jujucharms.com/name/series/1/", + url: &charm.URL{"cs", "", "name", 1, "series", ""}, + exact: "cs:series/name-1", +}, { + s: "https://jujucharms.com/u/user/development/name/series/1", + url: &charm.URL{"cs", "user", "name", 1, "series", charm.DevelopmentChannel}, + exact: "cs:~user/development/series/name-1", +}, { + s: "https://jujucharms.com/u/user/development/name/series", + url: &charm.URL{"cs", "user", "name", -1, "series", charm.DevelopmentChannel}, + exact: "cs:~user/development/series/name", +}, { + s: "https://jujucharms.com/u/user/development/name/1", + url: &charm.URL{"cs", "user", "name", 1, "", charm.DevelopmentChannel}, + exact: "cs:~user/development/name-1", +}, { + s: "https://jujucharms.com/u/user/development/name", + url: &charm.URL{"cs", "user", "name", -1, "", charm.DevelopmentChannel}, + exact: "cs:~user/development/name", +}, { + s: "https://jujucharms.com/development/name", + url: &charm.URL{"cs", "", "name", -1, "", charm.DevelopmentChannel}, + exact: "cs:development/name", +}, { + s: "https://jujucharms.com/development/name/series", + url: &charm.URL{"cs", "", "name", -1, "series", charm.DevelopmentChannel}, + exact: "cs:development/series/name", +}, { + s: "https://jujucharms.com/development/name/1", + url: &charm.URL{"cs", "", "name", 1, "", charm.DevelopmentChannel}, + exact: "cs:development/name-1", +}, { + s: "https://jujucharms.com/development/name/series/1", + url: &charm.URL{"cs", "", "name", 1, "series", charm.DevelopmentChannel}, + exact: "cs:development/series/name-1", +}, { + s: "https://jujucharms.com/u/user/development/name/series/", + url: &charm.URL{"cs", "user", "name", -1, "series", charm.DevelopmentChannel}, + exact: "cs:~user/development/series/name", +}, { + s: "https://jujucharms.com/u/user/development/name/1/", + url: &charm.URL{"cs", "user", "name", 1, "", charm.DevelopmentChannel}, + exact: "cs:~user/development/name-1", +}, { + s: "https://jujucharms.com/u/user/development/name/", + url: &charm.URL{"cs", "user", "name", -1, "", charm.DevelopmentChannel}, + exact: "cs:~user/development/name", +}, { + s: "https://jujucharms.com/", + err: `URL has invalid charm or bundle name: $URL`, +}, { + s: "https://jujucharms.com/bad.wolf", + err: `URL has invalid charm or bundle name: $URL`, +}, { + s: "https://jujucharms.com/u/", + err: "charm or bundle URL $URL malformed, expected \"/u//\"", +}, { + s: "https://jujucharms.com/u/badwolf", + err: "charm or bundle URL $URL malformed, expected \"/u//\"", +}, { + s: "https://jujucharms.com/name/series/badwolf", + err: "charm or bundle URL has malformed revision: \"badwolf\" in $URL", +}, { + s: "https://jujucharms.com/name/bad.wolf/42", + err: `charm or bundle URL has invalid series: $URL`, +}, { + s: "https://badwolf@jujucharms.com/name/series/42", + err: `charm or bundle URL $URL has unrecognized parts`, +}, { + s: "https://jujucharms.com/name/series/42#bad-wolf", + err: `charm or bundle URL $URL has unrecognized parts`, +}, { + s: "https://jujucharms.com/name/series/42?bad=wolf", + err: `charm or bundle URL $URL has unrecognized parts`, +}, { + s: "bs:~user/series/name-1", + err: `charm or bundle URL has invalid schema: $URL`, +}, { + s: ":foo", + err: `cannot parse charm or bundle URL: $URL`, +}, { + s: "cs:~1/series/name-1", + err: `charm or bundle URL has invalid user name: $URL`, +}, { + s: "cs:~user", + err: `URL without charm or bundle name: $URL`, +}, { + s: "cs:~user/1/name-1", + err: `charm or bundle URL has invalid series: $URL`, +}, { + s: "cs:~user/series/name-1-2", + err: `URL has invalid charm or bundle name: $URL`, +}, { + s: "cs:~user/series/name-1-name-2", + err: `URL has invalid charm or bundle name: $URL`, +}, { + s: "cs:~user/series/name--name-2", + err: `URL has invalid charm or bundle name: $URL`, +}, { + s: "cs:foo-1-2", + err: `URL has invalid charm or bundle name: $URL`, +}, { + s: "cs:~user/series/huh/name-1", + err: `charm or bundle URL has invalid form: $URL`, +}, { + s: "cs:~user/production/series/name-1", + err: `charm or bundle URL has invalid form: $URL`, +}, { + s: "cs:~user/development/series/badwolf/name-1", + err: `charm or bundle URL has invalid form: $URL`, +}, { + s: "cs:/name", + err: `charm or bundle URL has invalid series: $URL`, +}, { + s: "local:~user/series/name", + err: `local charm or bundle URL with user name: $URL`, +}, { + s: "local:~user/name", + err: `local charm or bundle URL with user name: $URL`, +}, { + s: "local:development/name", + err: `local charm or bundle URL with channel: $URL`, +}, { + s: "local:development/series/name-1", + err: `local charm or bundle URL with channel: $URL`, +}, { + s: "precise/wordpress", + exact: "cs:precise/wordpress", + url: &charm.URL{"cs", "", "wordpress", -1, "precise", ""}, +}, { + s: "foo", + exact: "cs:foo", + url: &charm.URL{"cs", "", "foo", -1, "", ""}, +}, { + s: "foo-1", + exact: "cs:foo-1", + url: &charm.URL{"cs", "", "foo", 1, "", ""}, +}, { + s: "n0-n0-n0", + exact: "cs:n0-n0-n0", + url: &charm.URL{"cs", "", "n0-n0-n0", -1, "", ""}, +}, { + s: "cs:foo", + exact: "cs:foo", + url: &charm.URL{"cs", "", "foo", -1, "", ""}, +}, { + s: "local:foo", + exact: "local:foo", + url: &charm.URL{"local", "", "foo", -1, "", ""}, +}, { + s: "series/foo", + exact: "cs:series/foo", + url: &charm.URL{"cs", "", "foo", -1, "series", ""}, +}, { + s: "development/foo", + exact: "cs:development/foo", + url: &charm.URL{"cs", "", "foo", -1, "", charm.DevelopmentChannel}, +}, { + s: "development/foo-1", + exact: "cs:development/foo-1", + url: &charm.URL{"cs", "", "foo", 1, "", charm.DevelopmentChannel}, +}, { + s: "development/n0-n0-n0", + exact: "cs:development/n0-n0-n0", + url: &charm.URL{"cs", "", "n0-n0-n0", -1, "", charm.DevelopmentChannel}, +}, { + s: "development/series/foo", + exact: "cs:development/series/foo", + url: &charm.URL{"cs", "", "foo", -1, "series", charm.DevelopmentChannel}, +}, { + s: "series/foo/bar", + err: `charm or bundle URL has invalid form: "series/foo/bar"`, +}, { + s: "cs:foo/~blah", + err: `URL has invalid charm or bundle name: "cs:foo/~blah"`, +}} + +func (s *URLSuite) TestParseURL(c *gc.C) { + for i, t := range urlTests { + c.Logf("test %d: %q", i, t.s) + + expectStr := t.s + if t.exact != "" { + expectStr = t.exact + } + url, uerr := charm.ParseURL(t.s) + if t.err != "" { + t.err = strings.Replace(t.err, "$URL", regexp.QuoteMeta(fmt.Sprintf("%q", t.s)), -1) + c.Assert(uerr, gc.ErrorMatches, t.err) + c.Assert(url, gc.IsNil) + continue + } + c.Assert(uerr, gc.IsNil) + c.Assert(url, gc.DeepEquals, t.url) + c.Assert(url.String(), gc.Equals, expectStr) + + // URL strings are generated as expected. Reversability is preserved + // with v1 URLs. + if t.exact != "" { + c.Check(url.String(), gc.Equals, t.exact) + } else { + c.Check(url.String(), gc.Equals, t.s) + } + } +} + +var inferTests = []struct { + vague, exact string +}{ + {"foo", "cs:defseries/foo"}, + {"foo-1", "cs:defseries/foo-1"}, + {"n0-n0-n0", "cs:defseries/n0-n0-n0"}, + {"cs:foo", "cs:defseries/foo"}, + {"local:foo", "local:defseries/foo"}, + {"series/foo", "cs:series/foo"}, + {"cs:series/foo", "cs:series/foo"}, + {"local:series/foo", "local:series/foo"}, + {"cs:~user/foo", "cs:~user/defseries/foo"}, + {"cs:~user/series/foo", "cs:~user/series/foo"}, + {"local:~user/series/foo", "local:~user/series/foo"}, + {"bs:foo", "bs:defseries/foo"}, + {"cs:~1/foo", "cs:~1/defseries/foo"}, + {"cs:foo-1-2", "cs:defseries/foo-1-2"}, + {"development/foo", "cs:development/defseries/foo"}, + {"development/foo-1", "cs:development/defseries/foo-1"}, + {"development/series/foo", "cs:development/series/foo"}, + {"local:development/series/foo", "local:development/series/foo"}, + {"cs:~user/development/foo", "cs:~user/development/defseries/foo"}, + {"local:~user/development/series/foo", "local:~user/development/series/foo"}, + {"cs:~1/development/foo", "cs:~1/development/defseries/foo"}, +} + +func (s *URLSuite) TestInferURL(c *gc.C) { + for i, t := range inferTests { + c.Logf("test %d", i) + comment := gc.Commentf("InferURL(%q, %q)", t.vague, "defseries") + inferred, ierr := charm.InferURL(t.vague, "defseries") + parsed, perr := charm.ParseURL(t.exact) + if perr == nil { + c.Check(inferred, gc.DeepEquals, parsed, comment) + c.Check(ierr, gc.IsNil) + } else { + expect := perr.Error() + if t.vague != t.exact { + if colIdx := strings.Index(expect, ":"); colIdx > 0 { + expect = expect[:colIdx] + } + } + c.Check(ierr.Error(), gc.Matches, expect+".*", comment) + } + } + u, err := charm.InferURL("~blah", "defseries") + c.Assert(u, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "URL without charm or bundle name: .*") +} + +var inferNoDefaultSeriesTests = []struct { + vague, exact string + resolved bool +}{ + {"foo", "", false}, + {"foo-1", "", false}, + {"cs:foo", "", false}, + {"cs:~user/foo", "", false}, + {"series/foo", "cs:series/foo", true}, + {"cs:series/foo", "cs:series/foo", true}, + {"cs:~user/series/foo", "cs:~user/series/foo", true}, + {"development/foo", "", false}, + {"development/foo-1", "", false}, + {"cs:development/foo", "", false}, + {"cs:~user/development/foo", "", false}, + {"development/series/foo", "cs:development/series/foo", true}, + {"cs:development/series/foo", "cs:development/series/foo", true}, + {"cs:~user/development/series/foo", "cs:~user/development/series/foo", true}, +} + +func (s *URLSuite) TestInferURLNoDefaultSeries(c *gc.C) { + for i, t := range inferNoDefaultSeriesTests { + c.Logf("%d: %s", i, t.vague) + inferred, err := charm.InferURL(t.vague, "") + if t.exact == "" { + c.Assert(err, gc.ErrorMatches, fmt.Sprintf("cannot infer charm or bundle URL for %q: charm or bundle url series is not resolved", t.vague)) + } else { + parsed, err := charm.ParseURL(t.exact) + c.Assert(err, gc.IsNil) + c.Assert(inferred, gc.DeepEquals, parsed, gc.Commentf(`InferURL(%q, "")`, t.vague)) + } + } +} + +var validTests = []struct { + valid func(string) bool + string string + expect bool +}{ + + {charm.IsValidName, "", false}, + {charm.IsValidName, "wordpress", true}, + {charm.IsValidName, "Wordpress", false}, + {charm.IsValidName, "word-press", true}, + {charm.IsValidName, "word press", false}, + {charm.IsValidName, "word^press", false}, + {charm.IsValidName, "-wordpress", false}, + {charm.IsValidName, "wordpress-", false}, + {charm.IsValidName, "wordpress2", true}, + {charm.IsValidName, "wordpress-2", false}, + {charm.IsValidName, "word2-press2", true}, + + {charm.IsValidSeries, "", false}, + {charm.IsValidSeries, "precise", true}, + {charm.IsValidSeries, "Precise", false}, + {charm.IsValidSeries, "pre cise", false}, + {charm.IsValidSeries, "pre-cise", false}, + {charm.IsValidSeries, "pre^cise", false}, + {charm.IsValidSeries, "prec1se", true}, + {charm.IsValidSeries, "-precise", false}, + {charm.IsValidSeries, "precise-", false}, + {charm.IsValidSeries, "precise-1", false}, + {charm.IsValidSeries, "precise1", true}, + {charm.IsValidSeries, "pre-c1se", false}, +} + +func (s *URLSuite) TestValidCheckers(c *gc.C) { + for i, t := range validTests { + c.Logf("test %d: %s", i, t.string) + c.Assert(t.valid(t.string), gc.Equals, t.expect, gc.Commentf("%s", t.string)) + } +} + +var isValidChannelTests = []struct { + channel charm.Channel + expect bool +}{{ + channel: charm.DevelopmentChannel, + expect: true, +}, { + channel: "", +}, { + channel: "-development", +}, { + channel: "bad wolf", +}} + +func (s *URLSuite) TestIsValidChannel(c *gc.C) { + for i, t := range isValidChannelTests { + c.Logf("test %d: %s", i, t.channel) + c.Assert(charm.IsValidChannel(t.channel), gc.Equals, t.expect, gc.Commentf("%s", t.channel)) + } +} + +func (s *URLSuite) TestMustParseURL(c *gc.C) { + url := charm.MustParseURL("cs:series/name") + c.Assert(url, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series", ""}) + f := func() { charm.MustParseURL("local:@@/name") } + c.Assert(f, gc.PanicMatches, "charm or bundle URL has invalid series: .*") + f = func() { charm.MustParseURL("cs:~user") } + c.Assert(f, gc.PanicMatches, "URL without charm or bundle name: .*") + f = func() { charm.MustParseURL("cs:~user") } + c.Assert(f, gc.PanicMatches, "URL without charm or bundle name: .*") +} + +func (s *URLSuite) TestWithRevision(c *gc.C) { + url := charm.MustParseURL("cs:series/name") + other := url.WithRevision(1) + c.Assert(url, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series", ""}) + c.Assert(other, gc.DeepEquals, &charm.URL{"cs", "", "name", 1, "series", ""}) + + // Should always copy. The opposite behavior is error prone. + c.Assert(other.WithRevision(1), gc.Not(gc.Equals), other) + c.Assert(other.WithRevision(1), gc.DeepEquals, other) +} + +func (s *URLSuite) TestWithChannel(c *gc.C) { + url := charm.MustParseURL("cs:series/name") + other := url.WithChannel("development") + c.Assert(url, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series", ""}) + c.Assert(other, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series", "development"}) + + // Should always copy. The opposite behavior is error prone. + c.Assert(other.WithRevision(1), gc.Not(gc.Equals), other) + + // Set the channel back to empty. + other = url.WithChannel("") + c.Assert(other, gc.DeepEquals, &charm.URL{"cs", "", "name", -1, "series", ""}) +} + +var codecs = []struct { + Marshal func(interface{}) ([]byte, error) + Unmarshal func([]byte, interface{}) error +}{{ + Marshal: bson.Marshal, + Unmarshal: bson.Unmarshal, +}, { + Marshal: json.Marshal, + Unmarshal: json.Unmarshal, +}} + +func (s *URLSuite) TestURLCodecs(c *gc.C) { + for i, codec := range codecs { + c.Logf("codec %d", i) + type doc struct { + URL *charm.URL + } + url := charm.MustParseURL("cs:series/name") + v0 := doc{url} + data, err := codec.Marshal(v0) + c.Assert(err, gc.IsNil) + var v doc + err = codec.Unmarshal(data, &v) + c.Assert(v, gc.DeepEquals, v0) + + // Check that the underlying representation + // is a string. + type strDoc struct { + URL string + } + var vs strDoc + err = codec.Unmarshal(data, &vs) + c.Assert(err, gc.IsNil) + c.Assert(vs.URL, gc.Equals, "cs:series/name") + + data, err = codec.Marshal(doc{}) + c.Assert(err, gc.IsNil) + err = codec.Unmarshal(data, &v) + c.Assert(err, gc.IsNil) + c.Assert(v.URL, gc.IsNil) + } +} + +func (s *URLSuite) TestJSONGarbage(c *gc.C) { + // unmarshalling json gibberish + for _, value := range []string{":{", `"cs:{}+<"`, `"cs:~_~/f00^^&^/baaaar$%-?"`} { + err := json.Unmarshal([]byte(value), new(struct{ URL *charm.URL })) + c.Check(err, gc.NotNil) + } +} + +type QuoteSuite struct{} + +var _ = gc.Suite(&QuoteSuite{}) + +func (s *QuoteSuite) TestUnmodified(c *gc.C) { + // Check that a string containing only valid + // chars stays unmodified. + in := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-" + out := charm.Quote(in) + c.Assert(out, gc.Equals, in) +} + +func (s *QuoteSuite) TestQuote(c *gc.C) { + // Check that invalid chars are translated correctly. + in := "hello_there/how'are~you-today.sir" + out := charm.Quote(in) + c.Assert(out, gc.Equals, "hello_5f_there_2f_how_27_are_7e_you-today.sir") +} === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/LICENCE' --- src/gopkg.in/juju/charmrepo.v2-unstable/LICENCE 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/LICENCE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,191 @@ +All files in this repository are licensed as follows. If you contribute +to this repository, it is assumed that you license your contribution +under the same license unless you state otherwise. + +All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/README.md' --- src/gopkg.in/juju/charmrepo.v2-unstable/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +# charmrepo +Charm repositories and charmstore client packages === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/bundlepath.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/bundlepath.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/bundlepath.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,59 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrepo + +import ( + "os" + "path/filepath" + + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" +) + +// NewBundleAtPath creates and returns a bundle at a given path, +// and a URL that describes it. +func NewBundleAtPath(path string) (charm.Bundle, *charm.URL, error) { + if path == "" { + return nil, nil, errgo.New("path to bundle not specified") + } + _, err := os.Stat(path) + if isNotExistsError(err) { + return nil, nil, os.ErrNotExist + } else if err == nil && !isValidCharmOrBundlePath(path) { + return nil, nil, InvalidPath(path) + } + b, err := charm.ReadBundle(path) + if err != nil { + if isNotExistsError(err) { + return nil, nil, BundleNotFound(path) + } + return nil, nil, err + } + absPath, err := filepath.Abs(path) + if err != nil { + return nil, nil, err + } + _, name := filepath.Split(absPath) + url := &charm.URL{ + Schema: "local", + Name: name, + Series: "bundle", + Revision: 0, + } + return b, url, nil +} + +// ReadBundleFile attempts to read the file at path +// and interpret it as a bundle. +func ReadBundleFile(path string) (*charm.BundleData, error) { + f, err := os.Open(path) + if err != nil { + if isNotExistsError(err) { + return nil, BundleNotFound(path) + } + return nil, err + } + defer f.Close() + return charm.ReadBundleData(f) +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/bundlepath_test.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/bundlepath_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/bundlepath_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,112 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrepo_test + +import ( + "io/ioutil" + "os" + "path/filepath" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/yaml.v1" + + "gopkg.in/juju/charmrepo.v2-unstable" +) + +type bundlePathSuite struct { + repoPath string +} + +var _ = gc.Suite(&bundlePathSuite{}) + +func (s *bundlePathSuite) SetUpTest(c *gc.C) { + s.repoPath = c.MkDir() +} + +func (s *bundlePathSuite) cloneCharmDir(path, name string) string { + return TestCharms.ClonedDirPath(path, name) +} + +func (s *bundlePathSuite) TestNoPath(c *gc.C) { + _, _, err := charmrepo.NewBundleAtPath("") + c.Assert(err, gc.ErrorMatches, "path to bundle not specified") +} + +func (s *bundlePathSuite) TestInvalidPath(c *gc.C) { + _, _, err := charmrepo.NewBundleAtPath("/foo") + c.Assert(err, gc.Equals, os.ErrNotExist) +} + +func (s *bundlePathSuite) TestRepoURL(c *gc.C) { + _, _, err := charmrepo.NewCharmAtPath("cs:foo", "trusty") + c.Assert(err, gc.Equals, os.ErrNotExist) +} + +func (s *bundlePathSuite) TestInvalidRelativePath(c *gc.C) { + _, _, err := charmrepo.NewBundleAtPath("./foo") + c.Assert(err, gc.Equals, os.ErrNotExist) +} + +func (s *bundlePathSuite) TestRelativePath(c *gc.C) { + relDir := filepath.Join(TestCharms.Path(), "bundle") + cwd, err := os.Getwd() + c.Assert(err, jc.ErrorIsNil) + defer os.Chdir(cwd) + c.Assert(os.Chdir(relDir), jc.ErrorIsNil) + _, _, err = charmrepo.NewBundleAtPath("openstack") + c.Assert(charmrepo.IsInvalidPathError(err), jc.IsTrue) +} + +func (s *bundlePathSuite) TestNoBundleAtPath(c *gc.C) { + _, _, err := charmrepo.NewBundleAtPath(c.MkDir()) + c.Assert(err, gc.ErrorMatches, `bundle not found:.*`) +} + +func (s *bundlePathSuite) TestGetBundle(c *gc.C) { + bundleDir := filepath.Join(TestCharms.Path(), "bundle", "openstack") + b, url, err := charmrepo.NewBundleAtPath(bundleDir) + c.Assert(err, jc.ErrorIsNil) + c.Assert(b.Data(), jc.DeepEquals, TestCharms.BundleDir("openstack").Data()) + c.Assert(url, gc.DeepEquals, charm.MustParseURL("local:bundle/openstack-0")) +} + +func (s *bundlePathSuite) TestGetBundleSymlink(c *gc.C) { + realPath := TestCharms.ClonedBundleDirPath(c.MkDir(), "wordpress-simple") + bundlesPath := c.MkDir() + linkPath := filepath.Join(bundlesPath, "wordpress-simple") + err := os.Symlink(realPath, linkPath) + c.Assert(err, jc.ErrorIsNil) + url := charm.MustParseURL("local:bundle/wordpress-simple") + + b, url, err := charmrepo.NewBundleAtPath(filepath.Join(bundlesPath, "wordpress-simple")) + c.Assert(err, jc.ErrorIsNil) + c.Assert(b.Data(), jc.DeepEquals, TestCharms.BundleDir("wordpress-simple").Data()) + c.Assert(url, gc.DeepEquals, charm.MustParseURL("local:bundle/wordpress-simple-0")) +} + +func (s *bundlePathSuite) TestGetBundleLocalFile(c *gc.C) { + bundlePath := filepath.Join(c.MkDir(), "mybundle") + data := ` +services: + wordpress: + charm: wordpress + num_units: 1 +`[1:] + err := ioutil.WriteFile(bundlePath, []byte(data), 0644) + c.Assert(err, jc.ErrorIsNil) + + bundleData, err := charmrepo.ReadBundleFile(bundlePath) + c.Assert(err, jc.ErrorIsNil) + out, err := yaml.Marshal(bundleData) + c.Assert(err, jc.ErrorIsNil) + c.Assert(string(out), jc.DeepEquals, data) +} + +func (s *bundlePathSuite) TestGetBundleLocalFileNotExists(c *gc.C) { + bundlePath := filepath.Join(c.MkDir(), "mybundle") + _, err := charmrepo.ReadBundleFile(bundlePath) + c.Assert(err, gc.ErrorMatches, `bundle not found:.*`) +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/charmpath.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/charmpath.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/charmpath.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,88 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrepo + +import ( + "os" + "path/filepath" + "strings" + + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" +) + +func isNotExistsError(err error) bool { + if os.IsNotExist(err) { + return true + } + // On Windows, we get a path error due to a GetFileAttributesEx syscall. + // To avoid being too proscriptive, we'll simply check for the error + // type and not any content. + if _, ok := err.(*os.PathError); ok { + return true + } + return false +} + +func isValidCharmOrBundlePath(path string) bool { + //Exclude relative paths. + return strings.HasPrefix(path, ".") || filepath.IsAbs(path) +} + +// NewCharmAtPath returns the charm represented by this path, +// and a URL that describes it. If the series is empty, +// the charm's default series is used, if any. +// Otherwise, the series is validated against those the +// charm declares it supports. +func NewCharmAtPath(path, series string) (charm.Charm, *charm.URL, error) { + return NewCharmAtPathForceSeries(path, series, false) +} + +// NewCharmAtPathForSeries returns the charm represented by this path, +// and a URL that describes it. If the series is empty, +// the charm's default series is used, if any. +// Otherwise, the series is validated against those the +// charm declares it supports. If force is true, then any +// series validation errors are ignored and the requested +// series is used regardless. Note though that is it still +// an error if the series is not specified and the charm does not +// define any. +func NewCharmAtPathForceSeries(path, series string, force bool) (charm.Charm, *charm.URL, error) { + if path == "" { + return nil, nil, errgo.New("empty charm path") + } + _, err := os.Stat(path) + if isNotExistsError(err) { + return nil, nil, os.ErrNotExist + } else if err == nil && !isValidCharmOrBundlePath(path) { + return nil, nil, InvalidPath(path) + } + ch, err := charm.ReadCharm(path) + if err != nil { + if isNotExistsError(err) { + return nil, nil, CharmNotFound(path) + } + return nil, nil, err + } + absPath, err := filepath.Abs(path) + if err != nil { + return nil, nil, err + } + _, name := filepath.Split(absPath) + meta := ch.Meta() + seriesToUse := series + if !force || series == "" { + seriesToUse, err = charm.SeriesForCharm(series, meta.Series) + if err != nil { + return nil, nil, err + } + } + url := &charm.URL{ + Schema: "local", + Name: name, + Series: seriesToUse, + Revision: ch.Revision(), + } + return ch, url, nil +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/charmpath_test.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/charmpath_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/charmpath_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,148 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrepo_test + +import ( + "os" + "path/filepath" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "gopkg.in/juju/charmrepo.v2-unstable" +) + +type charmPathSuite struct { + repoPath string +} + +var _ = gc.Suite(&charmPathSuite{}) + +func (s *charmPathSuite) SetUpTest(c *gc.C) { + s.repoPath = c.MkDir() +} + +func (s *charmPathSuite) cloneCharmDir(path, name string) string { + return TestCharms.ClonedDirPath(path, name) +} + +func (s *charmPathSuite) TestNoPath(c *gc.C) { + _, _, err := charmrepo.NewCharmAtPath("", "trusty") + c.Assert(err, gc.ErrorMatches, "empty charm path") +} + +func (s *charmPathSuite) TestInvalidPath(c *gc.C) { + _, _, err := charmrepo.NewCharmAtPath("/foo", "trusty") + c.Assert(err, gc.Equals, os.ErrNotExist) +} + +func (s *charmPathSuite) TestRepoURL(c *gc.C) { + _, _, err := charmrepo.NewCharmAtPath("cs:foo", "trusty") + c.Assert(err, gc.Equals, os.ErrNotExist) +} + +func (s *charmPathSuite) TestInvalidRelativePath(c *gc.C) { + _, _, err := charmrepo.NewCharmAtPath("./foo", "trusty") + c.Assert(err, gc.Equals, os.ErrNotExist) +} + +func (s *charmPathSuite) TestRelativePath(c *gc.C) { + s.cloneCharmDir(s.repoPath, "mysql") + cwd, err := os.Getwd() + c.Assert(err, jc.ErrorIsNil) + defer os.Chdir(cwd) + c.Assert(os.Chdir(s.repoPath), jc.ErrorIsNil) + _, _, err = charmrepo.NewCharmAtPath("mysql", "trusty") + c.Assert(charmrepo.IsInvalidPathError(err), jc.IsTrue) +} + +func (s *charmPathSuite) TestNoCharmAtPath(c *gc.C) { + _, _, err := charmrepo.NewCharmAtPath(c.MkDir(), "trusty") + c.Assert(err, gc.ErrorMatches, "charm not found.*") +} + +func (s *charmPathSuite) TestCharm(c *gc.C) { + charmDir := filepath.Join(s.repoPath, "mysql") + s.cloneCharmDir(s.repoPath, "mysql") + ch, url, err := charmrepo.NewCharmAtPath(charmDir, "quantal") + c.Assert(err, jc.ErrorIsNil) + c.Assert(ch.Meta().Name, gc.Equals, "mysql") + c.Assert(ch.Revision(), gc.Equals, 1) + c.Assert(url, gc.DeepEquals, charm.MustParseURL("local:quantal/mysql-1")) +} + +func (s *charmPathSuite) TestNoSeriesSpecified(c *gc.C) { + charmDir := filepath.Join(s.repoPath, "mysql") + s.cloneCharmDir(s.repoPath, "mysql") + _, _, err := charmrepo.NewCharmAtPath(charmDir, "") + c.Assert(err, gc.ErrorMatches, "series not specified and charm does not define any") +} + +func (s *charmPathSuite) TestNoSeriesSpecifiedForceStillFails(c *gc.C) { + charmDir := filepath.Join(s.repoPath, "mysql") + s.cloneCharmDir(s.repoPath, "mysql") + _, _, err := charmrepo.NewCharmAtPathForceSeries(charmDir, "", true) + c.Assert(err, gc.ErrorMatches, "series not specified and charm does not define any") +} + +func (s *charmPathSuite) TestMuliSeriesDefault(c *gc.C) { + charmDir := filepath.Join(s.repoPath, "multi-series") + s.cloneCharmDir(s.repoPath, "multi-series") + ch, url, err := charmrepo.NewCharmAtPath(charmDir, "") + c.Assert(err, gc.IsNil) + c.Assert(ch.Meta().Name, gc.Equals, "new-charm-with-multi-series") + c.Assert(ch.Revision(), gc.Equals, 7) + c.Assert(url, gc.DeepEquals, charm.MustParseURL("local:precise/multi-series-7")) +} + +func (s *charmPathSuite) TestMuliSeries(c *gc.C) { + charmDir := filepath.Join(s.repoPath, "multi-series") + s.cloneCharmDir(s.repoPath, "multi-series") + ch, url, err := charmrepo.NewCharmAtPath(charmDir, "trusty") + c.Assert(err, gc.IsNil) + c.Assert(ch.Meta().Name, gc.Equals, "new-charm-with-multi-series") + c.Assert(ch.Revision(), gc.Equals, 7) + c.Assert(url, gc.DeepEquals, charm.MustParseURL("local:trusty/multi-series-7")) +} + +func (s *charmPathSuite) TestUnsupportedSeries(c *gc.C) { + charmDir := filepath.Join(s.repoPath, "multi-series") + s.cloneCharmDir(s.repoPath, "multi-series") + _, _, err := charmrepo.NewCharmAtPath(charmDir, "wily") + c.Assert(err, gc.ErrorMatches, `series "wily" not supported by charm, supported series are.*`) +} + +func (s *charmPathSuite) TestUnsupportedSeriesNoForce(c *gc.C) { + charmDir := filepath.Join(s.repoPath, "multi-series") + s.cloneCharmDir(s.repoPath, "multi-series") + _, _, err := charmrepo.NewCharmAtPathForceSeries(charmDir, "wily", false) + c.Assert(err, gc.ErrorMatches, `series "wily" not supported by charm, supported series are.*`) +} + +func (s *charmPathSuite) TestUnsupportedSeriesForce(c *gc.C) { + charmDir := filepath.Join(s.repoPath, "multi-series") + s.cloneCharmDir(s.repoPath, "multi-series") + ch, url, err := charmrepo.NewCharmAtPathForceSeries(charmDir, "wily", true) + c.Assert(err, jc.ErrorIsNil) + c.Assert(ch.Meta().Name, gc.Equals, "new-charm-with-multi-series") + c.Assert(ch.Revision(), gc.Equals, 7) + c.Assert(url, gc.DeepEquals, charm.MustParseURL("local:wily/multi-series-7")) +} + +func (s *charmPathSuite) TestFindsSymlinks(c *gc.C) { + realPath := TestCharms.ClonedDirPath(c.MkDir(), "dummy") + charmsPath := c.MkDir() + linkPath := filepath.Join(charmsPath, "dummy") + err := os.Symlink(realPath, linkPath) + c.Assert(err, gc.IsNil) + + ch, url, err := charmrepo.NewCharmAtPath(filepath.Join(charmsPath, "dummy"), "quantal") + c.Assert(err, gc.IsNil) + c.Assert(ch.Revision(), gc.Equals, 1) + c.Assert(ch.Meta().Name, gc.Equals, "dummy") + c.Assert(ch.Config().Options["title"].Default, gc.Equals, "My Title") + c.Assert(ch.(*charm.CharmDir).Path, gc.Equals, linkPath) + c.Assert(url, gc.DeepEquals, charm.MustParseURL("local:quantal/dummy-1")) +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/charmstore.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/charmstore.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/charmstore.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,286 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrepo + +import ( + "crypto/sha512" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + + "github.com/juju/utils" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + + "gopkg.in/juju/charmrepo.v2-unstable/csclient" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" +) + +// CacheDir stores the charm cache directory path. +var CacheDir string + +// CharmStore is a repository Interface that provides access to the public Juju +// charm store. +type CharmStore struct { + client *csclient.Client +} + +var _ Interface = (*CharmStore)(nil) + +// NewCharmStoreParams holds parameters for instantiating a new CharmStore. +type NewCharmStoreParams struct { + // URL holds the root endpoint URL of the charm store, + // with no trailing slash, not including the version. + // For example https://api.jujucharms.com/charmstore + // If empty, the default charm store client location is used. + URL string + + // HTTPClient holds the HTTP client to use when making + // requests to the store. If nil, httpbakery.NewHTTPClient will + // be used. + HTTPClient *http.Client + + // VisitWebPage is called when authorization requires that + // the user visits a web page to authenticate themselves. + // If nil, a default function that returns an error will be used. + VisitWebPage func(url *url.URL) error +} + +// NewCharmStore creates and returns a charm store repository. +// The given parameters are used to instantiate the charm store. +// +// The errors returned from the interface methods will +// preserve the causes returned from the underlying csclient +// methods. +func NewCharmStore(p NewCharmStoreParams) *CharmStore { + return &CharmStore{ + client: csclient.New(csclient.Params{ + URL: p.URL, + HTTPClient: p.HTTPClient, + VisitWebPage: p.VisitWebPage, + }), + } +} + +// Get implements Interface.Get. +func (s *CharmStore) Get(curl *charm.URL) (charm.Charm, error) { + // The cache location must have been previously set. + if CacheDir == "" { + panic("charm cache directory path is empty") + } + if curl.Series == "bundle" { + return nil, errgo.Newf("expected a charm URL, got bundle URL %q", curl) + } + path, err := s.archivePath(curl) + if err != nil { + return nil, errgo.Mask(err, errgo.Any) + } + return charm.ReadCharmArchive(path) +} + +// GetBundle implements Interface.GetBundle. +func (s *CharmStore) GetBundle(curl *charm.URL) (charm.Bundle, error) { + // The cache location must have been previously set. + if CacheDir == "" { + panic("charm cache directory path is empty") + } + if curl.Series != "bundle" { + return nil, errgo.Newf("expected a bundle URL, got charm URL %q", curl) + } + path, err := s.archivePath(curl) + if err != nil { + return nil, errgo.Mask(err, errgo.Any) + } + return charm.ReadBundleArchive(path) +} + +// archivePath returns a local path to the downloaded archive of the given +// charm or bundle URL, storing it in CacheDir, which it creates if necessary. +// If an archive with a matching SHA hash already exists locally, it will use +// the local version. +func (s *CharmStore) archivePath(curl *charm.URL) (string, error) { + // Prepare the cache directory and retrieve the entity archive. + if err := os.MkdirAll(CacheDir, 0755); err != nil { + return "", errgo.Notef(err, "cannot create the cache directory") + } + etype := "charm" + if curl.Series == "bundle" { + etype = "bundle" + } + r, id, expectHash, expectSize, err := s.client.GetArchive(curl) + if err != nil { + if errgo.Cause(err) == params.ErrNotFound { + // Make a prettier error message for the user. + return "", errgo.WithCausef(nil, params.ErrNotFound, "cannot retrieve %q: %s not found", curl, etype) + } + return "", errgo.NoteMask(err, fmt.Sprintf("cannot retrieve %s %q", etype, curl), errgo.Any) + } + defer r.Close() + + // Check if the archive already exists in the cache. + path := filepath.Join(CacheDir, charm.Quote(id.String())+"."+etype) + if verifyHash384AndSize(path, expectHash, expectSize) == nil { + return path, nil + } + + // Verify and save the new archive. + f, err := ioutil.TempFile(CacheDir, "charm-download") + if err != nil { + return "", errgo.Notef(err, "cannot make temporary file") + } + defer f.Close() + hash := sha512.New384() + size, err := io.Copy(io.MultiWriter(hash, f), r) + if err != nil { + return "", errgo.Notef(err, "cannot read entity archive") + } + if size != expectSize { + return "", errgo.Newf("size mismatch; network corruption?") + } + if fmt.Sprintf("%x", hash.Sum(nil)) != expectHash { + return "", errgo.Newf("hash mismatch; network corruption?") + } + + // Move the archive to the expected place, and return the charm. + + // Note that we need to close the temporary file before moving + // it because otherwise Windows prohibits the rename. + f.Close() + if err := utils.ReplaceFile(f.Name(), path); err != nil { + return "", errgo.Notef(err, "cannot move the entity archive") + } + return path, nil +} + +func verifyHash384AndSize(path, expectHash string, expectSize int64) error { + f, err := os.Open(path) + if err != nil { + return errgo.Mask(err) + } + defer f.Close() + hash := sha512.New384() + size, err := io.Copy(hash, f) + if err != nil { + return errgo.Mask(err) + } + if size != expectSize { + logger.Debugf("size mismatch for %q", path) + return errgo.Newf("size mismatch for %q", path) + } + if fmt.Sprintf("%x", hash.Sum(nil)) != expectHash { + logger.Debugf("hash mismatch for %q", path) + return errgo.Newf("hash mismatch for %q", path) + } + return nil +} + +// Latest implements Interface.Latest. +func (s *CharmStore) Latest(curls ...*charm.URL) ([]CharmRevision, error) { + if len(curls) == 0 { + return nil, nil + } + + // Prepare the request to the charm store. + urls := make([]string, len(curls)) + values := url.Values{} + // Include the ignore-auth flag so that non-public results do not generate + // an error for the whole request. + values.Add("ignore-auth", "1") + values.Add("include", "id-revision") + values.Add("include", "hash256") + for i, curl := range curls { + url := curl.WithRevision(-1).String() + urls[i] = url + values.Add("id", url) + } + u := url.URL{ + Path: "/meta/any", + RawQuery: values.Encode(), + } + + // Execute the request and retrieve results. + var results map[string]struct { + Meta struct { + IdRevision params.IdRevisionResponse `json:"id-revision"` + Hash256 params.HashResponse `json:"hash256"` + } + } + if err := s.client.Get(u.String(), &results); err != nil { + return nil, errgo.NoteMask(err, "cannot get metadata from the charm store", errgo.Any) + } + + // Build the response. + responses := make([]CharmRevision, len(curls)) + for i, url := range urls { + result, found := results[url] + if !found { + responses[i] = CharmRevision{ + Err: CharmNotFound(url), + } + continue + } + responses[i] = CharmRevision{ + Revision: result.Meta.IdRevision.Revision, + Sha256: result.Meta.Hash256.Sum, + } + } + return responses, nil +} + +// Resolve implements Interface.Resolve. +func (s *CharmStore) Resolve(ref *charm.URL) (*charm.URL, []string, error) { + var result struct { + Id params.IdResponse + SupportedSeries params.SupportedSeriesResponse + } + if _, err := s.client.Meta(ref, &result); err != nil { + if errgo.Cause(err) == params.ErrNotFound { + // Make a prettier error message for the user. + etype := "charm" + switch ref.Series { + case "bundle": + etype = "bundle" + case "": + etype = "charm or bundle" + } + return nil, nil, errgo.WithCausef(nil, params.ErrNotFound, "cannot resolve URL %q: %s not found", ref, etype) + } + return nil, nil, errgo.NoteMask(err, fmt.Sprintf("cannot resolve charm URL %q", ref), errgo.Any) + } + return result.Id.Id, result.SupportedSeries.SupportedSeries, nil +} + +// URL returns the root endpoint URL of the charm store. +func (s *CharmStore) URL() string { + return s.client.ServerURL() +} + +// WithTestMode returns a repository Interface where test mode is enabled, +// meaning charm store download stats are not increased when charms are +// retrieved. +func (s *CharmStore) WithTestMode() *CharmStore { + newRepo := *s + newRepo.client.DisableStats() + return &newRepo +} + +// JujuMetadataHTTPHeader is the HTTP header name used to send Juju metadata +// attributes to the charm store. +const JujuMetadataHTTPHeader = "Juju-Metadata" + +// WithJujuAttrs returns a repository Interface with the Juju metadata +// attributes set. +func (s *CharmStore) WithJujuAttrs(attrs map[string]string) *CharmStore { + newRepo := *s + header := make(http.Header) + for k, v := range attrs { + header.Add(JujuMetadataHTTPHeader, k+"="+v) + } + newRepo.client.SetHTTPHeader(header) + return &newRepo +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/charmstore_test.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/charmstore_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/charmstore_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,617 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrepo_test + +import ( + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmstore.v5-unstable" + + "gopkg.in/juju/charmrepo.v2-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + charmtesting "gopkg.in/juju/charmrepo.v2-unstable/testing" +) + +type charmStoreSuite struct { + jujutesting.IsolationSuite +} + +var _ = gc.Suite(&charmStoreSuite{}) + +func (s *charmStoreSuite) TestURL(c *gc.C) { + repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ + URL: "https://1.2.3.4/charmstore", + }) + c.Assert(repo.URL(), gc.Equals, "https://1.2.3.4/charmstore") +} + +func (s *charmStoreSuite) TestDefaultURL(c *gc.C) { + repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{}) + c.Assert(repo.URL(), gc.Equals, csclient.ServerURL) +} + +type charmStoreBaseSuite struct { + charmtesting.IsolatedMgoSuite + srv *httptest.Server + client *csclient.Client + handler charmstore.HTTPCloseHandler + repo *charmrepo.CharmStore +} + +var _ = gc.Suite(&charmStoreBaseSuite{}) + +func (s *charmStoreBaseSuite) SetUpTest(c *gc.C) { + s.IsolatedMgoSuite.SetUpTest(c) + s.startServer(c) + s.repo = charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ + URL: s.srv.URL, + }) + s.PatchValue(&charmrepo.CacheDir, c.MkDir()) +} + +func (s *charmStoreBaseSuite) TearDownTest(c *gc.C) { + s.srv.Close() + s.handler.Close() + s.IsolatedMgoSuite.TearDownTest(c) +} + +func (s *charmStoreBaseSuite) startServer(c *gc.C) { + serverParams := charmstore.ServerParams{ + AuthUsername: "test-user", + AuthPassword: "test-password", + } + + db := s.Session.DB("charmstore") + handler, err := charmstore.NewServer(db, nil, "", serverParams, charmstore.V4) + c.Assert(err, gc.IsNil) + s.handler = handler + s.srv = httptest.NewServer(handler) + s.client = csclient.New(csclient.Params{ + URL: s.srv.URL, + User: serverParams.AuthUsername, + Password: serverParams.AuthPassword, + }) +} + +// addCharm uploads a charm a promulgated revision to the testing charm store, +// and returns the resulting charm and charm URL. +func (s *charmStoreBaseSuite) addCharm(c *gc.C, urlStr, name string) (charm.Charm, *charm.URL) { + id := charm.MustParseURL(urlStr) + promulgatedRevision := -1 + if id.User == "" { + id.User = "who" + promulgatedRevision = id.Revision + } + ch := TestCharms.CharmArchive(c.MkDir(), name) + + // Upload the charm. + err := s.client.UploadCharmWithRevision(id, ch, promulgatedRevision) + c.Assert(err, gc.IsNil) + + // Allow read permissions to everyone. + err = s.client.Put("/"+id.Path()+"/meta/perm/read", []string{params.Everyone}) + c.Assert(err, jc.ErrorIsNil) + + return ch, id +} + +// addCharmNoRevision uploads a charm to the testing charm store, and returns the +// resulting charm and charm URL. +func (s *charmStoreBaseSuite) addCharmNoRevision(c *gc.C, urlStr, name string) (charm.Charm, *charm.URL) { + id := charm.MustParseURL(urlStr) + if id.User == "" { + id.User = "who" + } + ch := TestCharms.CharmArchive(c.MkDir(), name) + + // Upload the charm. + url, err := s.client.UploadCharm(id, ch) + c.Assert(err, gc.IsNil) + + // Allow read permissions to everyone. + err = s.client.Put("/"+url.Path()+"/meta/perm/read", []string{params.Everyone}) + c.Assert(err, jc.ErrorIsNil) + + return ch, url +} + +// addBundle uploads a bundle to the testing charm store, and returns the +// resulting bundle and bundle URL. +func (s *charmStoreBaseSuite) addBundle(c *gc.C, urlStr, name string) (charm.Bundle, *charm.URL) { + id := charm.MustParseURL(urlStr) + promulgatedRevision := -1 + if id.User == "" { + id.User = "who" + promulgatedRevision = id.Revision + } + b := TestCharms.BundleArchive(c.MkDir(), name) + + // Upload the bundle. + err := s.client.UploadBundleWithRevision(id, b, promulgatedRevision) + c.Assert(err, gc.IsNil) + + // Allow read permissions to everyone. + err = s.client.Put("/"+id.Path()+"/meta/perm/read", []string{params.Everyone}) + c.Assert(err, jc.ErrorIsNil) + + // Return the bundle and its URL. + return b, id +} + +type charmStoreRepoSuite struct { + charmStoreBaseSuite +} + +var _ = gc.Suite(&charmStoreRepoSuite{}) + +// checkCharmDownloads checks that the charm represented by the given URL has +// been downloaded the expected number of times. +func (s *charmStoreRepoSuite) checkCharmDownloads(c *gc.C, url *charm.URL, expect int) { + key := []string{params.StatsArchiveDownload, url.Series, url.Name, url.User, strconv.Itoa(url.Revision)} + path := "/stats/counter/" + strings.Join(key, ":") + var count int + + getDownloads := func() int { + var result []params.Statistic + err := s.client.Get(path, &result) + c.Assert(err, jc.ErrorIsNil) + return int(result[0].Count) + } + + for retry := 0; retry < 10; retry++ { + time.Sleep(100 * time.Millisecond) + if count = getDownloads(); count == expect { + if expect == 0 && retry < 2 { + // Wait a bit to make sure. + continue + } + return + } + } + c.Errorf("downloads count for %s is %d, expected %d", url, count, expect) +} + +func (s *charmStoreRepoSuite) TestGet(c *gc.C) { + expect, url := s.addCharm(c, "cs:~who/trusty/mysql-0", "mysql") + ch, err := s.repo.Get(url) + c.Assert(err, jc.ErrorIsNil) + checkCharm(c, ch, expect) +} + +func (s *charmStoreRepoSuite) TestGetPromulgated(c *gc.C) { + expect, url := s.addCharm(c, "trusty/mysql-42", "mysql") + ch, err := s.repo.Get(url) + c.Assert(err, jc.ErrorIsNil) + checkCharm(c, ch, expect) +} + +func (s *charmStoreRepoSuite) TestGetRevisions(c *gc.C) { + s.addCharm(c, "~dalek/trusty/riak-0", "riak") + expect1, url1 := s.addCharm(c, "~dalek/trusty/riak-1", "riak") + expect2, _ := s.addCharm(c, "~dalek/trusty/riak-2", "riak") + + // Retrieve an old revision. + ch, err := s.repo.Get(url1) + c.Assert(err, jc.ErrorIsNil) + checkCharm(c, ch, expect1) + + // Retrieve the latest revision. + ch, err = s.repo.Get(charm.MustParseURL("cs:~dalek/trusty/riak")) + c.Assert(err, jc.ErrorIsNil) + checkCharm(c, ch, expect2) +} + +func (s *charmStoreRepoSuite) TestGetCache(c *gc.C) { + _, url := s.addCharm(c, "~who/trusty/mysql-42", "mysql") + ch, err := s.repo.Get(url) + c.Assert(err, jc.ErrorIsNil) + path := ch.(*charm.CharmArchive).Path + c.Assert(hashOfPath(c, path), gc.Equals, hashOfCharm(c, "mysql")) +} + +func (s *charmStoreRepoSuite) TestGetSameCharm(c *gc.C) { + _, url := s.addCharm(c, "precise/wordpress-47", "wordpress") + getModTime := func(path string) time.Time { + info, err := os.Stat(path) + c.Assert(err, jc.ErrorIsNil) + return info.ModTime() + } + + // Retrieve a charm. + ch1, err := s.repo.Get(url) + c.Assert(err, jc.ErrorIsNil) + + // Retrieve its cache file modification time. + path := ch1.(*charm.CharmArchive).Path + modTime := getModTime(path) + + // Retrieve the same charm again. + ch2, err := s.repo.Get(url.WithRevision(-1)) + c.Assert(err, jc.ErrorIsNil) + + // Check this is the same charm, and its underlying cache file is the same. + checkCharm(c, ch2, ch1) + c.Assert(ch2.(*charm.CharmArchive).Path, gc.Equals, path) + + // Check the same file has been reused. + c.Assert(modTime.Equal(getModTime(path)), jc.IsTrue) +} + +func (s *charmStoreRepoSuite) TestGetInvalidCache(c *gc.C) { + _, url := s.addCharm(c, "~who/trusty/mysql-1", "mysql") + + // Retrieve a charm. + ch1, err := s.repo.Get(url) + c.Assert(err, jc.ErrorIsNil) + + // Modify its cache file to make it invalid. + path := ch1.(*charm.CharmArchive).Path + err = ioutil.WriteFile(path, []byte("invalid"), 0644) + c.Assert(err, jc.ErrorIsNil) + + // Retrieve the same charm again. + _, err = s.repo.Get(url) + c.Assert(err, jc.ErrorIsNil) + + // Check that the cache file have been properly rewritten. + c.Assert(hashOfPath(c, path), gc.Equals, hashOfCharm(c, "mysql")) +} + +func (s *charmStoreRepoSuite) TestGetIncreaseStats(c *gc.C) { + if jujutesting.MgoServer.WithoutV8 { + c.Skip("mongo javascript not enabled") + } + _, url := s.addCharm(c, "~who/precise/wordpress-2", "wordpress") + + // Retrieve the charm. + _, err := s.repo.Get(url) + c.Assert(err, jc.ErrorIsNil) + s.checkCharmDownloads(c, url, 1) + + // Retrieve the charm again. + _, err = s.repo.Get(url) + c.Assert(err, jc.ErrorIsNil) + s.checkCharmDownloads(c, url, 2) +} + +func (s *charmStoreRepoSuite) TestGetWithTestMode(c *gc.C) { + _, url := s.addCharm(c, "~who/precise/wordpress-42", "wordpress") + + // Use a repo with test mode enabled to download a charm a couple of + // times, and check the downloads count is not increased. + repo := s.repo.WithTestMode() + _, err := repo.Get(url) + c.Assert(err, jc.ErrorIsNil) + _, err = repo.Get(url) + c.Assert(err, jc.ErrorIsNil) + s.checkCharmDownloads(c, url, 0) +} + +func (s *charmStoreRepoSuite) TestGetWithJujuAttrs(c *gc.C) { + _, url := s.addCharm(c, "trusty/riak-0", "riak") + + // Set up a proxy server that stores the request header. + var header http.Header + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + header = r.Header + s.handler.ServeHTTP(w, r) + })) + defer srv.Close() + + repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ + URL: srv.URL, + }) + + // Make a first request without Juju attrs. + _, err := repo.Get(url) + c.Assert(err, jc.ErrorIsNil) + c.Assert(header.Get(charmrepo.JujuMetadataHTTPHeader), gc.Equals, "") + + // Make a second request after setting Juju attrs. + repo = repo.WithJujuAttrs(map[string]string{ + "k1": "v1", + "k2": "v2", + }) + _, err = repo.Get(url) + c.Assert(err, jc.ErrorIsNil) + values := header[http.CanonicalHeaderKey(charmrepo.JujuMetadataHTTPHeader)] + sort.Strings(values) + c.Assert(values, jc.DeepEquals, []string{"k1=v1", "k2=v2"}) + + // Make a third request after restoring empty attrs. + repo = repo.WithJujuAttrs(nil) + _, err = repo.Get(url) + c.Assert(err, jc.ErrorIsNil) + c.Assert(header.Get(charmrepo.JujuMetadataHTTPHeader), gc.Equals, "") +} + +func (s *charmStoreRepoSuite) TestGetErrorBundle(c *gc.C) { + ch, err := s.repo.Get(charm.MustParseURL("cs:bundle/django")) + c.Assert(err, gc.ErrorMatches, `expected a charm URL, got bundle URL "cs:bundle/django"`) + c.Assert(ch, gc.IsNil) +} + +func (s *charmStoreRepoSuite) TestGetErrorCacheDir(c *gc.C) { + parentDir := c.MkDir() + err := os.Chmod(parentDir, 0) + c.Assert(err, jc.ErrorIsNil) + defer os.Chmod(parentDir, 0755) + s.PatchValue(&charmrepo.CacheDir, filepath.Join(parentDir, "cache")) + + ch, err := s.repo.Get(charm.MustParseURL("cs:trusty/django")) + c.Assert(err, gc.ErrorMatches, `cannot create the cache directory: .*: permission denied`) + c.Assert(ch, gc.IsNil) +} + +func (s *charmStoreRepoSuite) TestGetErrorCharmNotFound(c *gc.C) { + ch, err := s.repo.Get(charm.MustParseURL("cs:trusty/no-such")) + c.Assert(err, gc.ErrorMatches, `cannot retrieve "cs:trusty/no-such": charm not found`) + c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) + c.Assert(ch, gc.IsNil) +} + +func (s *charmStoreRepoSuite) TestGetErrorServer(c *gc.C) { + // Set up a server always returning errors. + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + http.Error(w, `{"Message": "bad wolf", "Code": "bad request"}`, http.StatusBadRequest) + })) + defer srv.Close() + + // Try getting a charm from the server. + repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ + URL: srv.URL, + }) + ch, err := repo.Get(charm.MustParseURL("cs:trusty/django")) + c.Assert(err, gc.ErrorMatches, `cannot retrieve charm "cs:trusty/django": cannot get archive: bad wolf`) + c.Assert(errgo.Cause(err), gc.Equals, params.ErrBadRequest) + c.Assert(ch, gc.IsNil) +} + +func (s *charmStoreRepoSuite) TestGetErrorHashMismatch(c *gc.C) { + _, url := s.addCharm(c, "trusty/riak-0", "riak") + + // Set up a proxy server that modifies the returned hash. + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + rec := httptest.NewRecorder() + s.handler.ServeHTTP(rec, r) + w.Header().Set(params.EntityIdHeader, rec.Header().Get(params.EntityIdHeader)) + w.Header().Set(params.ContentHashHeader, "invalid") + w.Write(rec.Body.Bytes()) + })) + defer srv.Close() + + // Try getting a charm from the server. + repo := charmrepo.NewCharmStore(charmrepo.NewCharmStoreParams{ + URL: srv.URL, + }) + ch, err := repo.Get(url) + c.Assert(err, gc.ErrorMatches, `hash mismatch; network corruption\?`) + c.Assert(ch, gc.IsNil) +} + +func (s *charmStoreRepoSuite) TestGetBundle(c *gc.C) { + // Note that getting a bundle shares most of the logic with charm + // retrieval. For this reason, only bundle specific code is tested. + s.addCharm(c, "cs:trusty/mysql-0", "mysql") + s.addCharm(c, "cs:trusty/wordpress-0", "wordpress") + expect, url := s.addBundle(c, "cs:~who/bundle/wordpress-simple-42", "wordpress-simple") + b, err := s.repo.GetBundle(url) + c.Assert(err, jc.ErrorIsNil) + c.Assert(b.Data(), jc.DeepEquals, expect.Data()) + c.Assert(b.ReadMe(), gc.Equals, expect.ReadMe()) +} + +func (s *charmStoreRepoSuite) TestGetBundleErrorCharm(c *gc.C) { + ch, err := s.repo.GetBundle(charm.MustParseURL("cs:trusty/django")) + c.Assert(err, gc.ErrorMatches, `expected a bundle URL, got charm URL "cs:trusty/django"`) + c.Assert(ch, gc.IsNil) +} + +func (s *charmStoreRepoSuite) TestLatest(c *gc.C) { + // Add some charms to the charm store. + s.addCharm(c, "~who/trusty/mysql-0", "mysql") + s.addCharm(c, "~who/precise/wordpress-1", "wordpress") + s.addCharm(c, "~dalek/trusty/riak-0", "riak") + s.addCharm(c, "~dalek/trusty/riak-1", "riak") + s.addCharm(c, "~dalek/trusty/riak-3", "riak") + _, url := s.addCharm(c, "~who/utopic/varnish-0", "varnish") + + // Change permissions on one of the charms so that it is not readable by + // anyone. + err := s.client.Put("/"+url.Path()+"/meta/perm/read", []string{"dalek"}) + c.Assert(err, jc.ErrorIsNil) + + // Calculate and store the expected hashes for the uploaded charms. + mysqlHash := hashOfCharm(c, "mysql") + wordpressHash := hashOfCharm(c, "wordpress") + riakHash := hashOfCharm(c, "riak") + + // Define the tests to be run. + tests := []struct { + about string + urls []*charm.URL + revs []charmrepo.CharmRevision + }{{ + about: "no urls", + }, { + about: "charm not found", + urls: []*charm.URL{charm.MustParseURL("cs:trusty/no-such-42")}, + revs: []charmrepo.CharmRevision{{ + Err: charmrepo.CharmNotFound("cs:trusty/no-such"), + }}, + }, { + about: "resolve", + urls: []*charm.URL{ + charm.MustParseURL("cs:~who/trusty/mysql-42"), + charm.MustParseURL("cs:~who/trusty/mysql-0"), + charm.MustParseURL("cs:~who/trusty/mysql"), + }, + revs: []charmrepo.CharmRevision{{ + Revision: 0, + Sha256: mysqlHash, + }, { + Revision: 0, + Sha256: mysqlHash, + }, { + Revision: 0, + Sha256: mysqlHash, + }}, + }, { + about: "multiple charms", + urls: []*charm.URL{ + charm.MustParseURL("cs:~who/precise/wordpress"), + charm.MustParseURL("cs:~who/trusty/mysql-47"), + charm.MustParseURL("cs:~dalek/trusty/no-such"), + charm.MustParseURL("cs:~dalek/trusty/riak-0"), + }, + revs: []charmrepo.CharmRevision{{ + Revision: 1, + Sha256: wordpressHash, + }, { + Revision: 0, + Sha256: mysqlHash, + }, { + Err: charmrepo.CharmNotFound("cs:~dalek/trusty/no-such"), + }, { + Revision: 3, + Sha256: riakHash, + }}, + }, { + about: "unauthorized", + urls: []*charm.URL{ + charm.MustParseURL("cs:~who/precise/wordpress"), + url, + }, + revs: []charmrepo.CharmRevision{{ + Revision: 1, + Sha256: wordpressHash, + }, { + Err: charmrepo.CharmNotFound("cs:~who/utopic/varnish"), + }}, + }} + + // Run the tests. + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + revs, err := s.repo.Latest(test.urls...) + c.Assert(err, jc.ErrorIsNil) + c.Assert(revs, jc.DeepEquals, test.revs) + } +} + +func (s *charmStoreRepoSuite) TestResolve(c *gc.C) { + // Add some charms to the charm store. + s.addCharm(c, "~who/trusty/mysql-0", "mysql") + s.addCharm(c, "~who/precise/wordpress-2", "wordpress") + s.addCharm(c, "~dalek/utopic/riak-42", "riak") + s.addCharmNoRevision(c, "multi-series", "multi-series") + s.addCharm(c, "utopic/mysql-47", "mysql") + + // Define the tests to be run. + tests := []struct { + id string + url string + supportedSeries []string + err string + }{{ + id: "~who/mysql", + url: "cs:~who/trusty/mysql-0", + supportedSeries: []string{"trusty"}, + }, { + id: "~who/trusty/mysql", + url: "cs:~who/trusty/mysql-0", + supportedSeries: []string{"trusty"}, + }, { + id: "~who/wordpress", + url: "cs:~who/precise/wordpress-2", + supportedSeries: []string{"precise"}, + }, { + id: "~who/wordpress-2", + err: `cannot resolve URL "cs:~who/wordpress-2": charm or bundle not found`, + }, { + id: "~dalek/riak", + url: "cs:~dalek/utopic/riak-42", + supportedSeries: []string{"utopic"}, + }, { + id: "~dalek/utopic/riak-42", + url: "cs:~dalek/utopic/riak-42", + supportedSeries: []string{"utopic"}, + }, { + id: "utopic/mysql", + url: "cs:utopic/mysql-47", + supportedSeries: []string{"utopic"}, + }, { + id: "utopic/mysql-47", + url: "cs:utopic/mysql-47", + supportedSeries: []string{"utopic"}, + }, { + id: "~who/multi-series", + url: "cs:~who/multi-series-0", + supportedSeries: []string{"trusty", "precise", "quantal"}, + }, { + id: "~dalek/utopic/riak-100", + err: `cannot resolve URL "cs:~dalek/utopic/riak-100": charm not found`, + }, { + id: "bundle/no-such", + err: `cannot resolve URL "cs:bundle/no-such": bundle not found`, + }, { + id: "no-such", + err: `cannot resolve URL "cs:no-such": charm or bundle not found`, + }} + + // Run the tests. + for i, test := range tests { + c.Logf("test %d: %s", i, test.id) + ref, supportedSeries, err := s.repo.Resolve(charm.MustParseURL(test.id)) + if test.err != "" { + c.Check(err.Error(), gc.Equals, test.err) + c.Check(ref, gc.IsNil) + continue + } + c.Assert(err, jc.ErrorIsNil) + c.Check(ref, jc.DeepEquals, charm.MustParseURL(test.url)) + c.Check(supportedSeries, jc.SameContents, test.supportedSeries) + } +} + +// hashOfCharm returns the SHA256 hash sum for the given charm name. +func hashOfCharm(c *gc.C, name string) string { + path := TestCharms.CharmArchivePath(c.MkDir(), name) + return hashOfPath(c, path) +} + +// hashOfPath returns the SHA256 hash sum for the given path. +func hashOfPath(c *gc.C, path string) string { + f, err := os.Open(path) + c.Assert(err, jc.ErrorIsNil) + defer f.Close() + hash := sha256.New() + _, err = io.Copy(hash, f) + c.Assert(err, jc.ErrorIsNil) + return fmt.Sprintf("%x", hash.Sum(nil)) +} + +// checkCharm checks that the given charms have the same attributes. +func checkCharm(c *gc.C, ch, expect charm.Charm) { + c.Assert(ch.Actions(), jc.DeepEquals, expect.Actions()) + c.Assert(ch.Config(), jc.DeepEquals, expect.Config()) + c.Assert(ch.Meta(), jc.DeepEquals, expect.Meta()) +} === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/csclient' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/csclient/archive.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/csclient/archive.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/csclient/archive.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,105 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package csclient + +import ( + "crypto/sha512" + "fmt" + "io" + "io/ioutil" + "os" + + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" +) + +// ReadSeekCloser implements io.ReadSeeker and io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// openArchive is used to turn the current charm or bundle implementations +// into readers for their corresponding archive. +// It returns the corresponding archive reader, its SHA384 hash and size. +func openArchive(entity interface{}) (r ReadSeekCloser, hash string, size int64, err error) { + var path string + switch entity := entity.(type) { + case archiverTo: + // For example: charm.CharmDir or charm.BundleDir. + file, err := newRemoveOnCloseTempFile("entity-archive") + if err != nil { + return nil, "", 0, errgo.Notef(err, "cannot make temporary file") + } + if err := entity.ArchiveTo(file); err != nil { + file.Close() + return nil, "", 0, errgo.Notef(err, "cannot create entity archive") + } + if _, err := file.Seek(0, 0); err != nil { + file.Close() + return nil, "", 0, errgo.Notef(err, "cannot seek") + } + hash, size, err = readerHashAndSize(file) + if err != nil { + file.Close() + return nil, "", 0, errgo.Mask(err) + } + return file, hash, size, nil + case *charm.BundleArchive: + path = entity.Path + case *charm.CharmArchive: + path = entity.Path + default: + return nil, "", 0, errgo.Newf("cannot get the archive for entity type %T", entity) + } + file, err := os.Open(path) + if err != nil { + return nil, "", 0, errgo.Mask(err) + } + hash, size, err = readerHashAndSize(file) + if err != nil { + file.Close() + return nil, "", 0, errgo.Mask(err) + } + return file, hash, size, nil +} + +// readerHashAndSize returns the SHA384 and size of the data included in the +// given reader. +func readerHashAndSize(r io.ReadSeeker) (hash string, size int64, err error) { + h := sha512.New384() + size, err = io.Copy(h, r) + if err != nil { + return "", 0, errgo.Notef(err, "cannot calculate hash") + } + if _, err := r.Seek(0, 0); err != nil { + return "", 0, errgo.Notef(err, "cannot seek") + } + return fmt.Sprintf("%x", h.Sum(nil)), size, nil +} + +type archiverTo interface { + ArchiveTo(io.Writer) error +} + +// newRemoveOnCloseTempFile creates a new temporary file in the default +// directory for temporary files with a name beginning with prefix. +// The resulting file is removed when the file is closed. +func newRemoveOnCloseTempFile(prefix string) (*removeOnCloseFile, error) { + file, err := ioutil.TempFile("", prefix) + if err != nil { + return nil, err + } + return &removeOnCloseFile{file}, nil +} + +// removeOnCloseFile represents a file which is removed when closed. +type removeOnCloseFile struct { + *os.File +} + +func (r *removeOnCloseFile) Close() error { + r.File.Close() + return os.Remove(r.File.Name()) +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/csclient/csclient.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/csclient/csclient.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/csclient/csclient.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,646 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// The csclient package provides access to the charm store API. +package csclient + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "unicode" + + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" +) + +const apiVersion = "v4" + +// ServerURL holds the default location of the global charm store. +// An alternate location can be configured by changing the URL field in the +// Params struct. +// For live testing or QAing the application, a different charm store +// location should be used, for instance "https://api.staging.jujucharms.com". +var ServerURL = "https://api.jujucharms.com/charmstore" + +// Client represents the client side of a charm store. +type Client struct { + params Params + bclient *httpbakery.Client + header http.Header + statsDisabled bool +} + +// Params holds parameters for creating a new charm store client. +type Params struct { + // URL holds the root endpoint URL of the charmstore, + // with no trailing slash, not including the version. + // For example https://api.jujucharms.com/charmstore + // If empty, the default charm store client location is used. + URL string + + // User and Password hold the authentication credentials + // for the client. If User is empty, no credentials will be + // sent. + User string + Password string + + // HTTPClient holds the HTTP client to use when making + // requests to the store. If nil, httpbakery.NewHTTPClient will + // be used. + HTTPClient *http.Client + + // VisitWebPage is called when authorization requires that + // the user visits a web page to authenticate themselves. + // If nil, no interaction will be allowed. + VisitWebPage func(url *url.URL) error +} + +// New returns a new charm store client. +func New(p Params) *Client { + if p.URL == "" { + p.URL = ServerURL + } + if p.HTTPClient == nil { + p.HTTPClient = httpbakery.NewHTTPClient() + } + return &Client{ + params: p, + bclient: &httpbakery.Client{ + Client: p.HTTPClient, + VisitWebPage: p.VisitWebPage, + }, + } +} + +// ServerURL returns the charm store URL used by the client. +func (c *Client) ServerURL() string { + return c.params.URL +} + +// DisableStats disables incrementing download stats when retrieving archives +// from the charm store. +func (c *Client) DisableStats() { + c.statsDisabled = true +} + +// SetHTTPHeader sets custom HTTP headers that will be sent to the charm store +// on each request. +func (c *Client) SetHTTPHeader(header http.Header) { + c.header = header +} + +// GetArchive retrieves the archive for the given charm or bundle, returning a +// reader its data can be read from, the fully qualified id of the +// corresponding entity, the SHA384 hash of the data and its size. +func (c *Client) GetArchive(id *charm.URL) (r io.ReadCloser, eid *charm.URL, hash string, size int64, err error) { + // Create the request. + req, err := http.NewRequest("GET", "", nil) + if err != nil { + return nil, nil, "", 0, errgo.Notef(err, "cannot make new request") + } + + // Send the request. + v := url.Values{} + if c.statsDisabled { + v.Set("stats", "0") + } + u := url.URL{ + Path: "/" + id.Path() + "/archive", + RawQuery: v.Encode(), + } + resp, err := c.Do(req, u.String()) + if err != nil { + return nil, nil, "", 0, errgo.NoteMask(err, "cannot get archive", errgo.Any) + } + + // Validate the response headers. + entityId := resp.Header.Get(params.EntityIdHeader) + if entityId == "" { + resp.Body.Close() + return nil, nil, "", 0, errgo.Newf("no %s header found in response", params.EntityIdHeader) + } + eid, err = charm.ParseURL(entityId) + if err != nil { + // The server did not return a valid id. + resp.Body.Close() + return nil, nil, "", 0, errgo.Notef(err, "invalid entity id found in response") + } + if eid.Revision == -1 { + // The server did not return a fully qualified entity id. + resp.Body.Close() + return nil, nil, "", 0, errgo.Newf("archive get returned not fully qualified entity id %q", eid) + } + hash = resp.Header.Get(params.ContentHashHeader) + if hash == "" { + resp.Body.Close() + return nil, nil, "", 0, errgo.Newf("no %s header found in response", params.ContentHashHeader) + } + + // Validate the response contents. + if resp.ContentLength < 0 { + // TODO frankban: handle the case the contents are chunked. + resp.Body.Close() + return nil, nil, "", 0, errgo.Newf("no content length found in response") + } + return resp.Body, eid, hash, resp.ContentLength, nil +} + +// StatsUpdate updates the download stats for the given id and specific time. +func (c *Client) StatsUpdate(req params.StatsUpdateRequest) error { + return c.Put("/stats/update", req) +} + +// UploadCharm uploads the given charm to the charm store with the given id, +// which must not specify a revision. +// The accepted charm implementations are charm.CharmDir and +// charm.CharmArchive. +// +// UploadCharm returns the id that the charm has been given in the +// store - this will be the same as id except the revision. +func (c *Client) UploadCharm(id *charm.URL, ch charm.Charm) (*charm.URL, error) { + if id.Revision != -1 { + return nil, errgo.Newf("revision specified in %q, but should not be specified", id) + } + r, hash, size, err := openArchive(ch) + if err != nil { + return nil, errgo.Notef(err, "cannot open charm archive") + } + defer r.Close() + return c.uploadArchive(id, r, hash, size, -1) +} + +// UploadCharmWithRevision uploads the given charm to the +// given id in the charm store, which must contain a revision. +// If promulgatedRevision is not -1, it specifies that the charm +// should be marked as promulgated with that revision. +// +// This method is provided only for testing and should not +// generally be used otherwise. +func (c *Client) UploadCharmWithRevision(id *charm.URL, ch charm.Charm, promulgatedRevision int) error { + if id.Revision == -1 { + return errgo.Newf("revision not specified in %q", id) + } + r, hash, size, err := openArchive(ch) + if err != nil { + return errgo.Notef(err, "cannot open charm archive") + } + defer r.Close() + _, err = c.uploadArchive(id, r, hash, size, promulgatedRevision) + return errgo.Mask(err) +} + +// UploadBundle uploads the given charm to the charm store with the given id, +// which must not specify a revision. +// The accepted bundle implementations are charm.BundleDir and +// charm.BundleArchive. +// +// UploadBundle returns the id that the bundle has been given in the +// store - this will be the same as id except the revision. +func (c *Client) UploadBundle(id *charm.URL, b charm.Bundle) (*charm.URL, error) { + if id.Revision != -1 { + return nil, errgo.Newf("revision specified in %q, but should not be specified", id) + } + r, hash, size, err := openArchive(b) + if err != nil { + return nil, errgo.Notef(err, "cannot open bundle archive") + } + defer r.Close() + return c.uploadArchive(id, r, hash, size, -1) +} + +// UploadBundleWithRevision uploads the given bundle to the +// given id in the charm store, which must contain a revision. +// If promulgatedRevision is not -1, it specifies that the charm +// should be marked as promulgated with that revision. +// +// This method is provided only for testing and should not +// generally be used otherwise. +func (c *Client) UploadBundleWithRevision(id *charm.URL, b charm.Bundle, promulgatedRevision int) error { + if id.Revision == -1 { + return errgo.Newf("revision not specified in %q", id) + } + r, hash, size, err := openArchive(b) + if err != nil { + return errgo.Notef(err, "cannot open charm archive") + } + defer r.Close() + _, err = c.uploadArchive(id, r, hash, size, promulgatedRevision) + return errgo.Mask(err) +} + +// uploadArchive pushes the archive for the charm or bundle represented by +// the given body, its SHA384 hash and its size. It returns the resulting +// entity reference. The given id should include the series and should not +// include the revision. +func (c *Client) uploadArchive(id *charm.URL, body io.ReadSeeker, hash string, size int64, promulgatedRevision int) (*charm.URL, error) { + // When uploading archives, it can be a problem that the + // an error response is returned while we are still writing + // the body data. + // To avoid this, we log in first so that we don't need to + // do the macaroon exchange after POST. + // Unfortunately this won't help matters if the user is logged in but + // doesn't have privileges to write to the stated charm. + // A better solution would be to fix https://github.com/golang/go/issues/3665 + // and use the 100-Continue client functionality. + // + // We only need to do this when basic auth credentials are not provided. + if c.params.User == "" { + if err := c.Login(); err != nil { + return nil, errgo.Notef(err, "cannot log in") + } + } + method := "POST" + promulgatedArg := "" + if id.Revision != -1 { + method = "PUT" + if promulgatedRevision != -1 { + pr := *id + pr.User = "" + pr.Revision = promulgatedRevision + promulgatedArg = "&promulgated=" + pr.Path() + } + } + + // Prepare the request. + req, err := http.NewRequest(method, "", nil) + if err != nil { + return nil, errgo.Notef(err, "cannot make new request") + } + req.Header.Set("Content-Type", "application/zip") + req.ContentLength = size + + // Send the request. + resp, err := c.DoWithBody( + req, + "/"+id.Path()+"/archive?hash="+hash+promulgatedArg, + body, + ) + if err != nil { + return nil, errgo.NoteMask(err, "cannot post archive", errgo.Any) + } + defer resp.Body.Close() + + // Parse the response. + var result params.ArchiveUploadResponse + if err := parseResponseBody(resp.Body, &result); err != nil { + return nil, errgo.Mask(err) + } + return result.Id, nil +} + +// PutExtraInfo puts extra-info data for the given id. +// Each entry in the info map causes a value in extra-info with +// that key to be set to the associated value. +// Entries not set in the map will be unchanged. +func (c *Client) PutExtraInfo(id *charm.URL, info map[string]interface{}) error { + return c.Put("/"+id.Path()+"/meta/extra-info", info) +} + +// PutCommonInfo puts common-info data for the given id. +// Each entry in the info map causes a value in common-info with +// that key to be set to the associated value. +// Entries not set in the map will be unchanged. +func (c *Client) PutCommonInfo(id *charm.URL, info map[string]interface{}) error { + return c.Put("/"+id.Path()+"/meta/common-info", info) +} + +// Meta fetches metadata on the charm or bundle with the +// given id. The result value provides a value +// to be filled in with the result, which must be +// a pointer to a struct containing members corresponding +// to possible metadata include parameters +// (see https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmeta). +// +// It returns the fully qualified id of the entity. +// +// The name of the struct member is translated to +// a lower case hyphen-separated form; for example, +// ArchiveSize becomes "archive-size", and BundleMachineCount +// becomes "bundle-machine-count", but may also +// be specified in the field's tag +// +// This example will fill in the result structure with information +// about the given id, including information on its archive +// size (include archive-size), upload time (include archive-upload-time) +// and digest (include extra-info/digest). +// +// var result struct { +// ArchiveSize params.ArchiveSizeResponse +// ArchiveUploadTime params.ArchiveUploadTimeResponse +// Digest string `csclient:"extra-info/digest"` +// } +// id, err := client.Meta(id, &result) +func (c *Client) Meta(id *charm.URL, result interface{}) (*charm.URL, error) { + if result == nil { + return nil, fmt.Errorf("expected valid result pointer, not nil") + } + resultv := reflect.ValueOf(result) + resultt := resultv.Type() + if resultt.Kind() != reflect.Ptr { + return nil, fmt.Errorf("expected pointer, not %T", result) + } + resultt = resultt.Elem() + if resultt.Kind() != reflect.Struct { + return nil, fmt.Errorf("expected pointer to struct, not %T", result) + } + resultv = resultv.Elem() + + // At this point, resultv refers to the struct value pointed + // to by result, and resultt is its type. + + numField := resultt.NumField() + includes := make([]string, 0, numField) + + // results holds an entry for each field in the result value, + // pointing to the value for that field. + results := make(map[string]reflect.Value) + for i := 0; i < numField; i++ { + field := resultt.Field(i) + if field.PkgPath != "" { + // Field is private; ignore it. + continue + } + if field.Anonymous { + // At some point in the future, it might be nice to + // support anonymous fields, but for now the + // additional complexity doesn't seem worth it. + return nil, fmt.Errorf("anonymous fields not supported") + } + apiName := field.Tag.Get("csclient") + if apiName == "" { + apiName = hyphenate(field.Name) + } + includes = append(includes, "include="+apiName) + results[apiName] = resultv.FieldByName(field.Name).Addr() + } + // We unmarshal into rawResult, then unmarshal each field + // separately into its place in the final result value. + // Note that we can't use params.MetaAnyResponse because + // that will unpack all the values inside the Meta field, + // but we want to keep them raw so that we can unmarshal + // them ourselves. + var rawResult struct { + Id *charm.URL + Meta map[string]json.RawMessage + } + path := "/" + id.Path() + "/meta/any" + if len(includes) > 0 { + path += "?" + strings.Join(includes, "&") + } + if err := c.Get(path, &rawResult); err != nil { + return nil, errgo.NoteMask(err, fmt.Sprintf("cannot get %q", path), errgo.Any) + } + // Note that the server is not required to send back values + // for all fields. "If there is no metadata for the given meta path, the + // element will be omitted" + // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaany + for name, r := range rawResult.Meta { + v, ok := results[name] + if !ok { + // The server has produced a result that we + // don't know about. Ignore it. + continue + } + // Unmarshal the raw JSON into the final struct field. + err := json.Unmarshal(r, v.Interface()) + if err != nil { + return nil, errgo.Notef(err, "cannot unmarshal %s", name) + } + } + return rawResult.Id, nil +} + +// hyphenate returns the hyphenated version of the given +// field name, as specified in the Client.Meta method. +func hyphenate(s string) string { + // TODO hyphenate FooHTTPBar as foo-http-bar? + var buf bytes.Buffer + var prevLower bool + for _, r := range s { + if !unicode.IsUpper(r) { + prevLower = true + buf.WriteRune(r) + continue + } + if prevLower { + buf.WriteRune('-') + } + buf.WriteRune(unicode.ToLower(r)) + prevLower = false + } + return buf.String() +} + +// Get makes a GET request to the given path in the charm store (not +// including the host name or version prefix but including a leading /), +// parsing the result as JSON into the given result value, which should +// be a pointer to the expected data, but may be nil if no result is +// desired. +func (c *Client) Get(path string, result interface{}) error { + req, err := http.NewRequest("GET", "", nil) + if err != nil { + return errgo.Notef(err, "cannot make new request") + } + resp, err := c.Do(req, path) + if err != nil { + return errgo.Mask(err, errgo.Any) + } + defer resp.Body.Close() + // Parse the response. + if err := parseResponseBody(resp.Body, result); err != nil { + return errgo.Mask(err) + } + return nil +} + +// Put makes a PUT request to the given path in the charm store +// (not including the host name or version prefix, but including a leading /), +// marshaling the given value as JSON to use as the request body. +func (c *Client) Put(path string, val interface{}) error { + return c.PutWithResponse(path, val, nil) +} + +// PutWithResponse makes a PUT request to the given path in the charm store +// (not including the host name or version prefix, but including a leading /), +// marshaling the given value as JSON to use as the request body. Additionally, +// this method parses the result as JSON into the given result value, which +// should be a pointer to the expected data, but may be nil if no result is +// desired. +func (c *Client) PutWithResponse(path string, val, result interface{}) error { + req, _ := http.NewRequest("PUT", "", nil) + req.Header.Set("Content-Type", "application/json") + data, err := json.Marshal(val) + if err != nil { + return errgo.Notef(err, "cannot marshal PUT body") + } + body := bytes.NewReader(data) + resp, err := c.DoWithBody(req, path, body) + if err != nil { + return errgo.Mask(err, errgo.Any) + } + defer resp.Body.Close() + // Parse the response. + if err := parseResponseBody(resp.Body, result); err != nil { + return errgo.Mask(err) + } + return nil +} + +func parseResponseBody(body io.Reader, result interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return errgo.Notef(err, "cannot read response body") + } + if result == nil { + // The caller doesn't care about the response body. + return nil + } + if err := json.Unmarshal(data, result); err != nil { + return errgo.Notef(err, "cannot unmarshal response %q", sizeLimit(data)) + } + return nil +} + +// DoWithBody is like Do except that the given body is used +// as the body of the HTTP request. +// +// Any error returned from the underlying httpbakery.DoWithBody +// request will have an unchanged error cause. +func (c *Client) DoWithBody(req *http.Request, path string, body io.ReadSeeker) (*http.Response, error) { + if c.params.User != "" { + userPass := c.params.User + ":" + c.params.Password + authBasic := base64.StdEncoding.EncodeToString([]byte(userPass)) + req.Header.Set("Authorization", "Basic "+authBasic) + } + + // Prepare the request. + if !strings.HasPrefix(path, "/") { + return nil, errgo.Newf("path %q is not absolute", path) + } + for k, vv := range c.header { + req.Header[k] = append(req.Header[k], vv...) + } + u, err := url.Parse(c.params.URL + "/" + apiVersion + path) + if err != nil { + return nil, errgo.Mask(err) + } + req.URL = u + + // Send the request. + resp, err := c.bclient.DoWithBody(req, body) + if err != nil { + return nil, errgo.Mask(err, errgo.Any) + } + + if resp.StatusCode == http.StatusOK { + return resp, nil + } + defer resp.Body.Close() + + // Parse the response error. + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errgo.Notef(err, "cannot read response body") + } + var perr params.Error + if err := json.Unmarshal(data, &perr); err != nil { + return nil, errgo.Notef(err, "cannot unmarshal error response %q", sizeLimit(data)) + } + if perr.Message == "" { + return nil, errgo.Newf("error response with empty message %s", sizeLimit(data)) + } + return nil, &perr +} + +// Do makes an arbitrary request to the charm store. +// It adds appropriate headers to the given HTTP request, +// sends it to the charm store, and returns the resulting +// response. Do never returns a response with a status +// that is not http.StatusOK. +// +// The URL field in the request is ignored and overwritten. +// +// This is a low level method - more specific Client methods +// should be used when possible. +// +// For requests with a body (for example PUT or POST) use DoWithBody +// instead. +func (c *Client) Do(req *http.Request, path string) (*http.Response, error) { + if req.Body != nil { + return nil, errgo.New("body unexpectedly provided in http request - use DoWithBody") + } + return c.DoWithBody(req, path, nil) +} + +func sizeLimit(data []byte) []byte { + const max = 1024 + if len(data) < max { + return data + } + return append(data[0:max], fmt.Sprintf(" ... [%d bytes omitted]", len(data)-max)...) +} + +// Log sends a log message to the charmstore's log database. +func (cs *Client) Log(typ params.LogType, level params.LogLevel, message string, urls ...*charm.URL) error { + b, err := json.Marshal(message) + if err != nil { + return errgo.Notef(err, "cannot marshal log message") + } + + // Prepare and send the log. + // TODO (frankban): we might want to buffer logs in order to reduce + // requests. + logs := []params.Log{{ + Data: (*json.RawMessage)(&b), + Level: level, + Type: typ, + URLs: urls, + }} + b, err = json.Marshal(logs) + if err != nil { + return errgo.Notef(err, "cannot marshal log message") + } + + req, err := http.NewRequest("POST", "", nil) + if err != nil { + return errgo.Notef(err, "cannot create log request") + } + req.Header.Set("Content-Type", "application/json") + resp, err := cs.DoWithBody(req, "/log", bytes.NewReader(b)) + if err != nil { + return errgo.NoteMask(err, "cannot send log message", errgo.Any) + } + resp.Body.Close() + return nil +} + +// Login explicitly obtains authorization credentials +// for the charm store and stores them in the client's +// cookie jar. +func (cs *Client) Login() error { + if err := cs.Get("/delegatable-macaroon", &struct{}{}); err != nil { + return errgo.Notef(err, "cannot retrieve the authentication macaroon") + } + return nil +} + +// WhoAmI returns the user and list of groups associated with the macaroon +// used to authenticate. +func (cs *Client) WhoAmI() (*params.WhoAmIResponse, error) { + var response params.WhoAmIResponse + if err := cs.Get("/whoami", &response); err != nil { + return nil, errgo.Mask(err) + } + return &response, nil +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/csclient/csclient_test.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/csclient/csclient_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/csclient/csclient_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1400 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package csclient_test + +import ( + "bytes" + "crypto/sha512" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + neturl "net/url" + "os" + "reflect" + "strings" + "time" + + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmstore.v5-unstable" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/bakerytest" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/mgo.v2" + + "gopkg.in/juju/charmrepo.v2-unstable/csclient" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + charmtesting "gopkg.in/juju/charmrepo.v2-unstable/testing" +) + +var charmRepo = charmtesting.NewRepo("../internal/test-charm-repo", "quantal") + +// Define fake attributes to be used in tests. +var fakeReader, fakeHash, fakeSize = func() (io.ReadSeeker, string, int64) { + content := []byte("fake content") + h := sha512.New384() + h.Write(content) + return bytes.NewReader(content), fmt.Sprintf("%x", h.Sum(nil)), int64(len(content)) +}() + +type suite struct { + jujutesting.IsolatedMgoSuite + client *csclient.Client + srv *httptest.Server + handler charmstore.HTTPCloseHandler + serverParams charmstore.ServerParams + discharge func(cond, arg string) ([]checkers.Caveat, error) +} + +var _ = gc.Suite(&suite{}) + +func (s *suite) SetUpTest(c *gc.C) { + s.IsolatedMgoSuite.SetUpTest(c) + s.startServer(c, s.Session) + s.client = csclient.New(csclient.Params{ + URL: s.srv.URL, + User: s.serverParams.AuthUsername, + Password: s.serverParams.AuthPassword, + }) +} + +func (s *suite) TearDownTest(c *gc.C) { + s.srv.Close() + s.handler.Close() + s.IsolatedMgoSuite.TearDownTest(c) +} + +func (s *suite) startServer(c *gc.C, session *mgo.Session) { + s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { + return nil, fmt.Errorf("no discharge") + } + + discharger := bakerytest.NewDischarger(nil, func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { + return s.discharge(cond, arg) + }) + + serverParams := charmstore.ServerParams{ + AuthUsername: "test-user", + AuthPassword: "test-password", + IdentityLocation: discharger.Service.Location(), + PublicKeyLocator: discharger, + } + + db := session.DB("charmstore") + handler, err := charmstore.NewServer(db, nil, "", serverParams, charmstore.V4) + c.Assert(err, gc.IsNil) + s.handler = handler + s.srv = httptest.NewServer(handler) + s.serverParams = serverParams + +} + +func (s *suite) TestDefaultServerURL(c *gc.C) { + // Add a charm used for tests. + err := s.client.UploadCharmWithRevision( + charm.MustParseURL("~charmers/vivid/testing-wordpress-42"), + charmRepo.CharmDir("wordpress"), + 42, + ) + c.Assert(err, gc.IsNil) + + // Patch the default server URL. + s.PatchValue(&csclient.ServerURL, s.srv.URL) + + // Instantiate a client using the default server URL. + client := csclient.New(csclient.Params{ + User: s.serverParams.AuthUsername, + Password: s.serverParams.AuthPassword, + }) + c.Assert(client.ServerURL(), gc.Equals, s.srv.URL) + + // Check that the request succeeds. + err = client.Get("/vivid/testing-wordpress-42/expand-id", nil) + c.Assert(err, gc.IsNil) +} + +func (s *suite) TestSetHTTPHeader(c *gc.C) { + var header http.Header + srv := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, req *http.Request) { + header = req.Header + })) + defer srv.Close() + + sendRequest := func(client *csclient.Client) { + req, err := http.NewRequest("GET", "", nil) + c.Assert(err, jc.ErrorIsNil) + _, err = client.Do(req, "/") + c.Assert(err, jc.ErrorIsNil) + } + client := csclient.New(csclient.Params{ + URL: srv.URL, + }) + + // Make a first request without custom headers. + sendRequest(client) + defaultHeaderLen := len(header) + + // Make a second request adding a couple of custom headers. + h := make(http.Header) + h.Set("k1", "v1") + h.Add("k2", "v2") + h.Add("k2", "v3") + client.SetHTTPHeader(h) + sendRequest(client) + c.Assert(header, gc.HasLen, defaultHeaderLen+len(h)) + c.Assert(header.Get("k1"), gc.Equals, "v1") + c.Assert(header[http.CanonicalHeaderKey("k2")], jc.DeepEquals, []string{"v2", "v3"}) + + // Make a third request without custom headers. + client.SetHTTPHeader(nil) + sendRequest(client) + c.Assert(header, gc.HasLen, defaultHeaderLen) +} + +var getTests = []struct { + about string + path string + nilResult bool + expectResult interface{} + expectError string + expectErrorCode params.ErrorCode +}{{ + about: "success", + path: "/wordpress/expand-id", + expectResult: []params.ExpandedId{{ + Id: "cs:utopic/wordpress-42", + }}, +}, { + about: "success with nil result", + path: "/wordpress/expand-id", + nilResult: true, +}, { + about: "non-absolute path", + path: "wordpress", + expectError: `path "wordpress" is not absolute`, +}, { + about: "URL parse error", + path: "/wordpress/%zz", + expectError: `parse .*: invalid URL escape "%zz"`, +}, { + about: "result with error code", + path: "/blahblah", + expectError: "not found", + expectErrorCode: params.ErrNotFound, +}} + +func (s *suite) TestGet(c *gc.C) { + ch := charmRepo.CharmDir("wordpress") + url := charm.MustParseURL("~charmers/utopic/wordpress-42") + err := s.client.UploadCharmWithRevision(url, ch, 42) + c.Assert(err, gc.IsNil) + + for i, test := range getTests { + c.Logf("test %d: %s", i, test.about) + + // Send the request. + var result json.RawMessage + var resultPtr interface{} + if !test.nilResult { + resultPtr = &result + } + err = s.client.Get(test.path, resultPtr) + + // Check the response. + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError, gc.Commentf("error is %T; %#v", err, err)) + c.Assert(result, gc.IsNil) + cause := errgo.Cause(err) + if code, ok := cause.(params.ErrorCode); ok { + c.Assert(code, gc.Equals, test.expectErrorCode) + } else { + c.Assert(test.expectErrorCode, gc.Equals, params.ErrorCode("")) + } + continue + } + c.Assert(err, gc.IsNil) + if test.expectResult != nil { + c.Assert(string(result), jc.JSONEquals, test.expectResult) + } + } +} + +var putErrorTests = []struct { + about string + path string + val interface{} + expectError string + expectErrorCode params.ErrorCode +}{{ + about: "bad JSON val", + path: "/~charmers/utopic/wordpress-42/meta/extra-info/foo", + val: make(chan int), + expectError: `cannot marshal PUT body: json: unsupported type: chan int`, +}, { + about: "non-absolute path", + path: "wordpress", + expectError: `path "wordpress" is not absolute`, +}, { + about: "URL parse error", + path: "/wordpress/%zz", + expectError: `parse .*: invalid URL escape "%zz"`, +}, { + about: "result with error code", + path: "/blahblah", + expectError: "not found", + expectErrorCode: params.ErrNotFound, +}} + +func (s *suite) TestPutError(c *gc.C) { + err := s.client.UploadCharmWithRevision( + charm.MustParseURL("~charmers/utopic/wordpress-42"), + charmRepo.CharmDir("wordpress"), + 42) + c.Assert(err, gc.IsNil) + + checkErr := func(err error, expectError string, expectErrorCode params.ErrorCode) { + c.Assert(err, gc.ErrorMatches, expectError) + cause := errgo.Cause(err) + if code, ok := cause.(params.ErrorCode); ok { + c.Assert(code, gc.Equals, expectErrorCode) + } else { + c.Assert(expectErrorCode, gc.Equals, params.ErrorCode("")) + } + } + var result string + + for i, test := range putErrorTests { + c.Logf("test %d: %s", i, test.about) + err := s.client.Put(test.path, test.val) + checkErr(err, test.expectError, test.expectErrorCode) + err = s.client.PutWithResponse(test.path, test.val, &result) + checkErr(err, test.expectError, test.expectErrorCode) + c.Assert(result, gc.Equals, "") + } +} + +func (s *suite) TestPutSuccess(c *gc.C) { + err := s.client.UploadCharmWithRevision( + charm.MustParseURL("~charmers/utopic/wordpress-42"), + charmRepo.CharmDir("wordpress"), + 42) + c.Assert(err, gc.IsNil) + + perms := []string{"bob"} + err = s.client.Put("/~charmers/utopic/wordpress-42/meta/perm/read", perms) + c.Assert(err, gc.IsNil) + var got []string + err = s.client.Get("/~charmers/utopic/wordpress-42/meta/perm/read", &got) + c.Assert(err, gc.IsNil) + c.Assert(got, jc.DeepEquals, perms) +} + +func (s *suite) TestPutWithResponseSuccess(c *gc.C) { + err := s.client.UploadCharmWithRevision( + charm.MustParseURL("~charmers/development/wily/wordpress-42"), + charmRepo.CharmDir("wordpress"), + 42) + c.Assert(err, gc.IsNil) + + publish := ¶ms.PublishRequest{ + Published: true, + } + var result params.PublishResponse + err = s.client.PutWithResponse("/~charmers/wily/wordpress-42/publish", publish, &result) + c.Assert(err, gc.IsNil) + c.Assert(result.Id, jc.DeepEquals, charm.MustParseURL("~charmers/wily/wordpress-42")) + + // Check that the method accepts a nil result. + err = s.client.PutWithResponse("/~charmers/wily/wordpress-42/publish", publish, nil) + c.Assert(err, gc.IsNil) +} + +func (s *suite) TestGetArchive(c *gc.C) { + if jujutesting.MgoServer.WithoutV8 { + c.Skip("mongo javascript not enabled") + } + key := s.checkGetArchive(c) + + // Check that the downloads count for the entity has been updated. + s.checkCharmDownloads(c, key, 1) +} + +func (s *suite) TestGetArchiveWithStatsDisabled(c *gc.C) { + s.client.DisableStats() + key := s.checkGetArchive(c) + + // Check that the downloads count for the entity has not been updated. + s.checkCharmDownloads(c, key, 0) +} + +func (s *suite) TestStatsUpdate(c *gc.C) { + if jujutesting.MgoServer.WithoutV8 { + c.Skip("mongo javascript not enabled") + } + key := s.checkGetArchive(c) + s.checkCharmDownloads(c, key, 1) + err := s.client.StatsUpdate(params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + CharmReference: charm.MustParseURL("~charmers/utopic/wordpress-42"), + Timestamp: time.Now(), + Type: params.UpdateDeploy, + }}, + }) + c.Assert(err, gc.IsNil) + s.checkCharmDownloads(c, key, 2) +} + +var checkDownloadsAttempt = utils.AttemptStrategy{ + Total: 1 * time.Second, + Delay: 100 * time.Millisecond, +} + +func (s *suite) checkCharmDownloads(c *gc.C, key string, expect int64) { + stableCount := 0 + for a := checkDownloadsAttempt.Start(); a.Next(); { + count := s.statsForKey(c, key) + if count == expect { + // Wait for a couple of iterations to make sure that it's stable. + if stableCount++; stableCount >= 2 { + return + } + } else { + stableCount = 0 + } + if !a.HasNext() { + c.Errorf("unexpected download count for %s, got %d, want %d", key, count, expect) + } + } +} + +func (s *suite) statsForKey(c *gc.C, key string) int64 { + var result []params.Statistic + err := s.client.Get("/stats/counter/"+key, &result) + c.Assert(err, gc.IsNil) + c.Assert(result, gc.HasLen, 1) + return result[0].Count +} + +func (s *suite) checkGetArchive(c *gc.C) string { + ch := charmRepo.CharmArchive(c.MkDir(), "wordpress") + + // Open the archive and calculate its hash and size. + r, expectHash, expectSize := archiveHashAndSize(c, ch.Path) + r.Close() + + url := charm.MustParseURL("~charmers/utopic/wordpress-42") + err := s.client.UploadCharmWithRevision(url, ch, 42) + c.Assert(err, gc.IsNil) + + rb, id, hash, size, err := s.client.GetArchive(url) + c.Assert(err, gc.IsNil) + defer rb.Close() + c.Assert(id, jc.DeepEquals, url) + c.Assert(hash, gc.Equals, expectHash) + c.Assert(size, gc.Equals, expectSize) + + h := sha512.New384() + size, err = io.Copy(h, rb) + c.Assert(err, gc.IsNil) + c.Assert(size, gc.Equals, expectSize) + c.Assert(fmt.Sprintf("%x", h.Sum(nil)), gc.Equals, expectHash) + + // Return the stats key for the archive download. + keys := []string{params.StatsArchiveDownload, "utopic", "wordpress", "charmers", "42"} + return strings.Join(keys, ":") +} + +func (s *suite) TestGetArchiveErrorNotFound(c *gc.C) { + url := charm.MustParseURL("no-such") + r, id, hash, size, err := s.client.GetArchive(url) + c.Assert(err, gc.ErrorMatches, `cannot get archive: no matching charm or bundle for "cs:no-such"`) + c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) + c.Assert(r, gc.IsNil) + c.Assert(id, gc.IsNil) + c.Assert(hash, gc.Equals, "") + c.Assert(size, gc.Equals, int64(0)) +} + +var getArchiveWithBadResponseTests = []struct { + about string + response *http.Response + error error + expectError string +}{{ + about: "http client Get failure", + error: errgo.New("round trip failure"), + expectError: "cannot get archive: Get .*: round trip failure", +}, { + about: "no entity id header", + response: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Header: http.Header{ + params.ContentHashHeader: {fakeHash}, + }, + Body: ioutil.NopCloser(strings.NewReader("")), + ContentLength: fakeSize, + }, + expectError: "no " + params.EntityIdHeader + " header found in response", +}, { + about: "invalid entity id header", + response: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Header: http.Header{ + params.ContentHashHeader: {fakeHash}, + params.EntityIdHeader: {"no:such"}, + }, + Body: ioutil.NopCloser(strings.NewReader("")), + ContentLength: fakeSize, + }, + expectError: `invalid entity id found in response: charm or bundle URL has invalid schema: "no:such"`, +}, { + about: "partial entity id header", + response: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Header: http.Header{ + params.ContentHashHeader: {fakeHash}, + params.EntityIdHeader: {"django"}, + }, + Body: ioutil.NopCloser(strings.NewReader("")), + ContentLength: fakeSize, + }, + expectError: `archive get returned not fully qualified entity id "cs:django"`, +}, { + about: "no hash header", + response: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Header: http.Header{ + params.EntityIdHeader: {"cs:utopic/django-42"}, + }, + Body: ioutil.NopCloser(strings.NewReader("")), + ContentLength: fakeSize, + }, + expectError: "no " + params.ContentHashHeader + " header found in response", +}, { + about: "no content length", + response: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Header: http.Header{ + params.ContentHashHeader: {fakeHash}, + params.EntityIdHeader: {"cs:utopic/django-42"}, + }, + Body: ioutil.NopCloser(strings.NewReader("")), + ContentLength: -1, + }, + expectError: "no content length found in response", +}} + +func (s *suite) TestGetArchiveWithBadResponse(c *gc.C) { + id := charm.MustParseURL("wordpress") + for i, test := range getArchiveWithBadResponseTests { + c.Logf("test %d: %s", i, test.about) + cl := badResponseClient(test.response, test.error) + _, _, _, _, err := cl.GetArchive(id) + c.Assert(err, gc.ErrorMatches, test.expectError) + } +} + +func (s *suite) TestUploadArchiveWithCharm(c *gc.C) { + path := charmRepo.CharmArchivePath(c.MkDir(), "wordpress") + + // Post the archive. + s.checkUploadArchive(c, path, "~charmers/utopic/wordpress", "cs:~charmers/utopic/wordpress-0") + + // Posting the same archive a second time does not change its resulting id. + s.checkUploadArchive(c, path, "~charmers/utopic/wordpress", "cs:~charmers/utopic/wordpress-0") + + // Posting a different archive to the same URL increases the resulting id + // revision. + path = charmRepo.CharmArchivePath(c.MkDir(), "mysql") + s.checkUploadArchive(c, path, "~charmers/utopic/wordpress", "cs:~charmers/utopic/wordpress-1") +} + +func (s *suite) prepareBundleCharms(c *gc.C) { + // Add the charms required by the wordpress-simple bundle to the store. + err := s.client.UploadCharmWithRevision( + charm.MustParseURL("~charmers/utopic/wordpress-42"), + charmRepo.CharmArchive(c.MkDir(), "wordpress"), + 42, + ) + c.Assert(err, gc.IsNil) + err = s.client.UploadCharmWithRevision( + charm.MustParseURL("~charmers/utopic/mysql-47"), + charmRepo.CharmArchive(c.MkDir(), "mysql"), + 47, + ) + c.Assert(err, gc.IsNil) +} + +func (s *suite) TestUploadArchiveWithBundle(c *gc.C) { + s.prepareBundleCharms(c) + path := charmRepo.BundleArchivePath(c.MkDir(), "wordpress-simple") + // Post the archive. + s.checkUploadArchive(c, path, "~charmers/bundle/wordpress-simple", "cs:~charmers/bundle/wordpress-simple-0") +} + +var uploadArchiveWithBadResponseTests = []struct { + about string + response *http.Response + error error + expectError string +}{{ + about: "http client Post failure", + error: errgo.New("round trip failure"), + expectError: "cannot post archive: Post .*: round trip failure", +}, { + about: "invalid JSON in body", + response: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Body: ioutil.NopCloser(strings.NewReader("no id here")), + ContentLength: 0, + }, + expectError: `cannot unmarshal response "no id here": .*`, +}} + +func (s *suite) TestUploadArchiveWithBadResponse(c *gc.C) { + id := charm.MustParseURL("trusty/wordpress") + for i, test := range uploadArchiveWithBadResponseTests { + c.Logf("test %d: %s", i, test.about) + cl := badResponseClient(test.response, test.error) + id, err := csclient.UploadArchive(cl, id, fakeReader, fakeHash, fakeSize, -1) + c.Assert(id, gc.IsNil) + c.Assert(err, gc.ErrorMatches, test.expectError) + } +} + +func (s *suite) TestUploadMultiSeriesArchive(c *gc.C) { + path := charmRepo.CharmArchivePath(c.MkDir(), "multi-series") + s.checkUploadArchive(c, path, "~charmers/wordpress", "cs:~charmers/wordpress-0") +} + +func (s *suite) TestUploadArchiveWithServerError(c *gc.C) { + path := charmRepo.CharmArchivePath(c.MkDir(), "wordpress") + body, hash, size := archiveHashAndSize(c, path) + defer body.Close() + + // Send an invalid hash so that the server returns an error. + url := charm.MustParseURL("~charmers/trusty/wordpress") + id, err := csclient.UploadArchive(s.client, url, body, hash+"mismatch", size, -1) + c.Assert(id, gc.IsNil) + c.Assert(err, gc.ErrorMatches, "cannot post archive: cannot put archive blob: hash mismatch") +} + +func (s *suite) checkUploadArchive(c *gc.C, path, url, expectId string) { + // Open the archive and calculate its hash and size. + body, hash, size := archiveHashAndSize(c, path) + defer body.Close() + + // Post the archive. + id, err := csclient.UploadArchive(s.client, charm.MustParseURL(url), body, hash, size, -1) + c.Assert(err, gc.IsNil) + c.Assert(id.String(), gc.Equals, expectId) + + // Ensure the entity has been properly added to the db. + r, resultingId, resultingHash, resultingSize, err := s.client.GetArchive(id) + c.Assert(err, gc.IsNil) + defer r.Close() + c.Assert(resultingId, gc.DeepEquals, id) + c.Assert(resultingHash, gc.Equals, hash) + c.Assert(resultingSize, gc.Equals, size) +} + +func archiveHashAndSize(c *gc.C, path string) (r csclient.ReadSeekCloser, hash string, size int64) { + f, err := os.Open(path) + c.Assert(err, gc.IsNil) + h := sha512.New384() + size, err = io.Copy(h, f) + c.Assert(err, gc.IsNil) + _, err = f.Seek(0, 0) + c.Assert(err, gc.IsNil) + return f, fmt.Sprintf("%x", h.Sum(nil)), size +} + +func (s *suite) TestUploadCharmDir(c *gc.C) { + ch := charmRepo.CharmDir("wordpress") + id, err := s.client.UploadCharm(charm.MustParseURL("~charmers/utopic/wordpress"), ch) + c.Assert(err, gc.IsNil) + c.Assert(id.String(), gc.Equals, "cs:~charmers/utopic/wordpress-0") + s.checkUploadCharm(c, id, ch) +} + +func (s *suite) TestUploadCharmArchive(c *gc.C) { + ch := charmRepo.CharmArchive(c.MkDir(), "wordpress") + id, err := s.client.UploadCharm(charm.MustParseURL("~charmers/trusty/wordpress"), ch) + c.Assert(err, gc.IsNil) + c.Assert(id.String(), gc.Equals, "cs:~charmers/trusty/wordpress-0") + s.checkUploadCharm(c, id, ch) +} + +func (s *suite) TestUploadCharmArchiveWithRevision(c *gc.C) { + id := charm.MustParseURL("~charmers/trusty/wordpress-42") + err := s.client.UploadCharmWithRevision( + id, + charmRepo.CharmDir("wordpress"), + 10, + ) + c.Assert(err, gc.IsNil) + ch := charmRepo.CharmArchive(c.MkDir(), "wordpress") + s.checkUploadCharm(c, id, ch) + id.User = "" + id.Revision = 10 + s.checkUploadCharm(c, id, ch) +} + +func (s *suite) TestUploadCharmArchiveWithUnwantedRevision(c *gc.C) { + ch := charmRepo.CharmDir("wordpress") + _, err := s.client.UploadCharm(charm.MustParseURL("~charmers/bundle/wp-20"), ch) + c.Assert(err, gc.ErrorMatches, `revision specified in "cs:~charmers/bundle/wp-20", but should not be specified`) +} + +func (s *suite) TestUploadCharmErrorUnknownType(c *gc.C) { + ch := charmRepo.CharmDir("wordpress") + unknown := struct { + charm.Charm + }{ch} + id, err := s.client.UploadCharm(charm.MustParseURL("~charmers/trusty/wordpress"), unknown) + c.Assert(err, gc.ErrorMatches, `cannot open charm archive: cannot get the archive for entity type .*`) + c.Assert(id, gc.IsNil) +} + +func (s *suite) TestUploadCharmErrorOpenArchive(c *gc.C) { + // Since the internal code path is shared between charms and bundles, just + // using a charm for this test also exercises the same failure for bundles. + ch := charmRepo.CharmArchive(c.MkDir(), "wordpress") + ch.Path = "no-such-file" + id, err := s.client.UploadCharm(charm.MustParseURL("trusty/wordpress"), ch) + c.Assert(err, gc.ErrorMatches, `cannot open charm archive: open no-such-file: no such file or directory`) + c.Assert(id, gc.IsNil) +} + +func (s *suite) TestUploadCharmErrorArchiveTo(c *gc.C) { + // Since the internal code path is shared between charms and bundles, just + // using a charm for this test also exercises the same failure for bundles. + id, err := s.client.UploadCharm(charm.MustParseURL("trusty/wordpress"), failingArchiverTo{}) + c.Assert(err, gc.ErrorMatches, `cannot open charm archive: cannot create entity archive: bad wolf`) + c.Assert(id, gc.IsNil) +} + +type failingArchiverTo struct { + charm.Charm +} + +func (failingArchiverTo) ArchiveTo(io.Writer) error { + return errgo.New("bad wolf") +} + +func (s *suite) checkUploadCharm(c *gc.C, id *charm.URL, ch charm.Charm) { + r, _, _, _, err := s.client.GetArchive(id) + c.Assert(err, gc.IsNil) + data, err := ioutil.ReadAll(r) + c.Assert(err, gc.IsNil) + result, err := charm.ReadCharmArchiveBytes(data) + c.Assert(err, gc.IsNil) + // Comparing the charm metadata is sufficient for ensuring the result is + // the same charm previously uploaded. + c.Assert(result.Meta(), jc.DeepEquals, ch.Meta()) +} + +func (s *suite) TestUploadBundleDir(c *gc.C) { + s.prepareBundleCharms(c) + b := charmRepo.BundleDir("wordpress-simple") + id, err := s.client.UploadBundle(charm.MustParseURL("~charmers/bundle/wordpress-simple"), b) + c.Assert(err, gc.IsNil) + c.Assert(id.String(), gc.Equals, "cs:~charmers/bundle/wordpress-simple-0") + s.checkUploadBundle(c, id, b) +} + +func (s *suite) TestUploadBundleArchive(c *gc.C) { + s.prepareBundleCharms(c) + path := charmRepo.BundleArchivePath(c.MkDir(), "wordpress-simple") + b, err := charm.ReadBundleArchive(path) + c.Assert(err, gc.IsNil) + id, err := s.client.UploadBundle(charm.MustParseURL("~charmers/bundle/wp"), b) + c.Assert(err, gc.IsNil) + c.Assert(id.String(), gc.Equals, "cs:~charmers/bundle/wp-0") + s.checkUploadBundle(c, id, b) +} + +func (s *suite) TestUploadBundleArchiveWithUnwantedRevision(c *gc.C) { + s.prepareBundleCharms(c) + path := charmRepo.BundleArchivePath(c.MkDir(), "wordpress-simple") + b, err := charm.ReadBundleArchive(path) + c.Assert(err, gc.IsNil) + _, err = s.client.UploadBundle(charm.MustParseURL("~charmers/bundle/wp-20"), b) + c.Assert(err, gc.ErrorMatches, `revision specified in "cs:~charmers/bundle/wp-20", but should not be specified`) +} + +func (s *suite) TestUploadBundleArchiveWithRevision(c *gc.C) { + s.prepareBundleCharms(c) + path := charmRepo.BundleArchivePath(c.MkDir(), "wordpress-simple") + b, err := charm.ReadBundleArchive(path) + c.Assert(err, gc.IsNil) + id := charm.MustParseURL("~charmers/bundle/wp-22") + err = s.client.UploadBundleWithRevision(id, b, 34) + c.Assert(err, gc.IsNil) + s.checkUploadBundle(c, id, b) + id.User = "" + id.Revision = 34 + s.checkUploadBundle(c, id, b) +} + +func (s *suite) TestUploadBundleErrorUploading(c *gc.C) { + // Uploading without specifying the series should return an error. + // Note that the possible upload errors are already extensively exercised + // as part of the client.uploadArchive tests. + id, err := s.client.UploadBundle( + charm.MustParseURL("~charmers/wordpress-simple"), + charmRepo.BundleDir("wordpress-simple"), + ) + c.Assert(err, gc.ErrorMatches, `cannot post archive: cannot read charm archive: archive file "metadata.yaml" not found`) + c.Assert(id, gc.IsNil) +} + +func (s *suite) TestUploadBundleErrorUnknownType(c *gc.C) { + b := charmRepo.BundleDir("wordpress-simple") + unknown := struct { + charm.Bundle + }{b} + id, err := s.client.UploadBundle(charm.MustParseURL("bundle/wordpress"), unknown) + c.Assert(err, gc.ErrorMatches, `cannot open bundle archive: cannot get the archive for entity type .*`) + c.Assert(id, gc.IsNil) +} + +func (s *suite) checkUploadBundle(c *gc.C, id *charm.URL, b charm.Bundle) { + r, _, _, _, err := s.client.GetArchive(id) + c.Assert(err, gc.IsNil) + data, err := ioutil.ReadAll(r) + c.Assert(err, gc.IsNil) + result, err := charm.ReadBundleArchiveBytes(data) + c.Assert(err, gc.IsNil) + // Comparing the bundle data is sufficient for ensuring the result is + // the same bundle previously uploaded. + c.Assert(result.Data(), jc.DeepEquals, b.Data()) +} + +func (s *suite) TestDoAuthorization(c *gc.C) { + // Add a charm to be deleted. + err := s.client.UploadCharmWithRevision( + charm.MustParseURL("~charmers/utopic/wordpress-42"), + charmRepo.CharmArchive(c.MkDir(), "wordpress"), + 42, + ) + c.Assert(err, gc.IsNil) + + // Check that when we use incorrect authorization, + // we get an error trying to delete the charm + client := csclient.New(csclient.Params{ + URL: s.srv.URL, + User: s.serverParams.AuthUsername, + Password: "bad password", + }) + req, err := http.NewRequest("DELETE", "", nil) + c.Assert(err, gc.IsNil) + _, err = client.Do(req, "/~charmers/utopic/wordpress-42/archive") + c.Assert(err, gc.ErrorMatches, "invalid user name or password") + c.Assert(errgo.Cause(err), gc.Equals, params.ErrUnauthorized) + + client = csclient.New(csclient.Params{ + URL: s.srv.URL, + User: s.serverParams.AuthUsername, + Password: s.serverParams.AuthPassword, + }) + + // Check that the charm is still there. + err = client.Get("/~charmers/utopic/wordpress-42/expand-id", nil) + c.Assert(err, gc.IsNil) + + // Then check that when we use the correct authorization, + // the delete succeeds. + req, err = http.NewRequest("DELETE", "", nil) + c.Assert(err, gc.IsNil) + resp, err := client.Do(req, "/~charmers/utopic/wordpress-42/archive") + c.Assert(err, gc.IsNil) + resp.Body.Close() + + // Check that it's now really gone. + err = client.Get("/utopic/wordpress-42/expand-id", nil) + c.Assert(err, gc.ErrorMatches, `no matching charm or bundle for "cs:utopic/wordpress-42"`) +} + +var getWithBadResponseTests = []struct { + about string + error error + response *http.Response + responseErr error + expectError string +}{{ + about: "http client Get failure", + error: errgo.New("round trip failure"), + expectError: "Get .*: round trip failure", +}, { + about: "body read error", + response: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Body: ioutil.NopCloser(&errorReader{"body read error"}), + ContentLength: -1, + }, + expectError: "cannot read response body: body read error", +}, { + about: "badly formatted json response", + response: &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Body: ioutil.NopCloser(strings.NewReader("bad")), + ContentLength: -1, + }, + expectError: `cannot unmarshal response "bad": .*`, +}, { + about: "badly formatted json error", + response: &http.Response{ + Status: "404 Not found", + StatusCode: 404, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Body: ioutil.NopCloser(strings.NewReader("bad")), + ContentLength: -1, + }, + expectError: `cannot unmarshal error response "bad": .*`, +}, { + about: "error response with empty message", + response: &http.Response{ + Status: "404 Not found", + StatusCode: 404, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Body: ioutil.NopCloser(bytes.NewReader(mustMarshalJSON(¶ms.Error{ + Code: "foo", + }))), + ContentLength: -1, + }, + expectError: "error response with empty message .*", +}} + +func (s *suite) TestGetWithBadResponse(c *gc.C) { + for i, test := range getWithBadResponseTests { + c.Logf("test %d: %s", i, test.about) + cl := badResponseClient(test.response, test.error) + var result interface{} + err := cl.Get("/foo", &result) + c.Assert(err, gc.ErrorMatches, test.expectError) + } +} + +func badResponseClient(resp *http.Response, err error) *csclient.Client { + client := httpbakery.NewHTTPClient() + client.Transport = &cannedRoundTripper{ + resp: resp, + error: err, + } + return csclient.New(csclient.Params{ + URL: "http://0.1.2.3", + User: "bob", + HTTPClient: client, + }) +} + +var hyphenateTests = []struct { + val string + expect string +}{{ + val: "Hello", + expect: "hello", +}, { + val: "HelloThere", + expect: "hello-there", +}, { + val: "HelloHTTP", + expect: "hello-http", +}, { + val: "helloHTTP", + expect: "hello-http", +}, { + val: "hellothere", + expect: "hellothere", +}, { + val: "Long4Camel32WithDigits45", + expect: "long4-camel32-with-digits45", +}, { + // The result here is equally dubious, but Go identifiers + // should not contain underscores. + val: "With_Dubious_Underscore", + expect: "with_-dubious_-underscore", +}} + +func (s *suite) TestHyphenate(c *gc.C) { + for i, test := range hyphenateTests { + c.Logf("test %d. %q", i, test.val) + c.Assert(csclient.Hyphenate(test.val), gc.Equals, test.expect) + } +} + +func (s *suite) TestDo(c *gc.C) { + // Do is tested fairly comprehensively (but indirectly) + // in TestGet, so just a trivial smoke test here. + url := charm.MustParseURL("~charmers/utopic/wordpress-42") + err := s.client.UploadCharmWithRevision( + url, + charmRepo.CharmArchive(c.MkDir(), "wordpress"), + 42, + ) + c.Assert(err, gc.IsNil) + err = s.client.PutExtraInfo(url, map[string]interface{}{ + "foo": "bar", + }) + c.Assert(err, gc.IsNil) + + req, _ := http.NewRequest("GET", "", nil) + resp, err := s.client.Do(req, "/wordpress/meta/extra-info/foo") + c.Assert(err, gc.IsNil) + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + c.Assert(err, gc.IsNil) + c.Assert(string(data), gc.Equals, `"bar"`) +} + +var metaBadTypeTests = []struct { + result interface{} + expectError string +}{{ + result: "", + expectError: "expected pointer, not string", +}, { + result: new(string), + expectError: `expected pointer to struct, not \*string`, +}, { + result: new(struct{ Embed }), + expectError: "anonymous fields not supported", +}, { + expectError: "expected valid result pointer, not nil", +}} + +func (s *suite) TestMetaBadType(c *gc.C) { + id := charm.MustParseURL("wordpress") + for _, test := range metaBadTypeTests { + _, err := s.client.Meta(id, test.result) + c.Assert(err, gc.ErrorMatches, test.expectError) + } +} + +type Embed struct{} +type embed struct{} + +func (s *suite) TestMeta(c *gc.C) { + ch := charmRepo.CharmDir("wordpress") + url := charm.MustParseURL("~charmers/utopic/wordpress-42") + purl := charm.MustParseURL("utopic/wordpress-42") + err := s.client.UploadCharmWithRevision(url, ch, 42) + c.Assert(err, gc.IsNil) + + // Put some extra-info. + err = s.client.PutExtraInfo(url, map[string]interface{}{ + "attr": "value", + }) + c.Assert(err, gc.IsNil) + + tests := []struct { + about string + id string + expectResult interface{} + expectError string + expectErrorCode params.ErrorCode + }{{ + about: "no fields", + id: "utopic/wordpress", + expectResult: &struct{}{}, + }, { + about: "single field", + id: "utopic/wordpress", + expectResult: &struct { + CharmMetadata *charm.Meta + }{ + CharmMetadata: ch.Meta(), + }, + }, { + about: "three fields", + id: "wordpress", + expectResult: &struct { + CharmMetadata *charm.Meta + CharmConfig *charm.Config + ExtraInfo map[string]string + }{ + CharmMetadata: ch.Meta(), + CharmConfig: ch.Config(), + ExtraInfo: map[string]string{"attr": "value"}, + }, + }, { + about: "tagged field", + id: "wordpress", + expectResult: &struct { + Foo *charm.Meta `csclient:"charm-metadata"` + Attr string `csclient:"extra-info/attr"` + }{ + Foo: ch.Meta(), + Attr: "value", + }, + }, { + about: "id not found", + id: "bogus", + expectResult: &struct{}{}, + expectError: `cannot get "/bogus/meta/any": no matching charm or bundle for "cs:bogus"`, + expectErrorCode: params.ErrNotFound, + }, { + about: "unmarshal into invalid type", + id: "wordpress", + expectResult: new(struct { + CharmMetadata []string + }), + expectError: `cannot unmarshal charm-metadata: json: cannot unmarshal object into Go value of type \[]string`, + }, { + about: "unmarshal into struct with unexported fields", + id: "wordpress", + expectResult: &struct { + unexported int + CharmMetadata *charm.Meta + // Embedded anonymous fields don't get tagged as unexported + // due to https://code.google.com/p/go/issues/detail?id=7247 + // TODO fix in go 1.5. + // embed + }{ + CharmMetadata: ch.Meta(), + }, + }, { + about: "metadata not appropriate for charm", + id: "wordpress", + expectResult: &struct { + CharmMetadata *charm.Meta + BundleMetadata *charm.BundleData + }{ + CharmMetadata: ch.Meta(), + }, + }} + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + // Make a result value of the same type as the expected result, + // but empty. + result := reflect.New(reflect.TypeOf(test.expectResult).Elem()).Interface() + id, err := s.client.Meta(charm.MustParseURL(test.id), result) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + if code, ok := errgo.Cause(err).(params.ErrorCode); ok { + c.Assert(code, gc.Equals, test.expectErrorCode) + } else { + c.Assert(test.expectErrorCode, gc.Equals, params.ErrorCode("")) + } + c.Assert(id, gc.IsNil) + continue + } + c.Assert(err, gc.IsNil) + c.Assert(id, jc.DeepEquals, purl) + c.Assert(result, jc.DeepEquals, test.expectResult) + } +} + +func (s *suite) TestPutExtraInfo(c *gc.C) { + s.checkPutInfo(c, false) +} + +func (s *suite) TestPutCommonInfo(c *gc.C) { + s.checkPutInfo(c, true) +} + +func (s *suite) checkPutInfo(c *gc.C, common bool) { + ch := charmRepo.CharmDir("wordpress") + url := charm.MustParseURL("~charmers/utopic/wordpress-42") + err := s.client.UploadCharmWithRevision(url, ch, 42) + c.Assert(err, gc.IsNil) + + // Put some info in. + info := map[string]interface{}{ + "attr1": "value1", + "attr2": []interface{}{"one", "two"}, + } + if common { + err = s.client.PutCommonInfo(url, info) + c.Assert(err, gc.IsNil) + } else { + err = s.client.PutExtraInfo(url, info) + c.Assert(err, gc.IsNil) + } + + // Verify that we get it back OK. + var valExtraInfo struct { + ExtraInfo map[string]interface{} + } + var valCommonInfo struct { + CommonInfo map[string]interface{} + } + if common { + _, err = s.client.Meta(url, &valCommonInfo) + c.Assert(err, gc.IsNil) + c.Assert(valCommonInfo.CommonInfo, jc.DeepEquals, info) + } else { + _, err = s.client.Meta(url, &valExtraInfo) + c.Assert(err, gc.IsNil) + c.Assert(valExtraInfo.ExtraInfo, jc.DeepEquals, info) + } + + // Put some more in. + if common { + err = s.client.PutCommonInfo(url, map[string]interface{}{ + "attr3": "three", + }) + c.Assert(err, gc.IsNil) + } else { + err = s.client.PutExtraInfo(url, map[string]interface{}{ + "attr3": "three", + }) + c.Assert(err, gc.IsNil) + } + // Verify that we get all the previous results and the new value. + info["attr3"] = "three" + if common { + _, err = s.client.Meta(url, &valCommonInfo) + c.Assert(err, gc.IsNil) + c.Assert(valCommonInfo.CommonInfo, jc.DeepEquals, info) + } else { + _, err = s.client.Meta(url, &valExtraInfo) + c.Assert(err, gc.IsNil) + c.Assert(valExtraInfo.ExtraInfo, jc.DeepEquals, info) + } +} + +func (s *suite) TestPutExtraInfoWithError(c *gc.C) { + err := s.client.PutExtraInfo(charm.MustParseURL("wordpress"), map[string]interface{}{"attr": "val"}) + c.Assert(err, gc.ErrorMatches, `no matching charm or bundle for "cs:wordpress"`) + c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) +} + +func (s *suite) TestPutCommonInfoWithError(c *gc.C) { + err := s.client.PutCommonInfo(charm.MustParseURL("wordpress"), map[string]interface{}{"homepage": "val"}) + c.Assert(err, gc.ErrorMatches, `no matching charm or bundle for "cs:wordpress"`) + c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) +} + +type errorReader struct { + error string +} + +func (e *errorReader) Read(buf []byte) (int, error) { + return 0, errgo.New(e.error) +} + +type cannedRoundTripper struct { + resp *http.Response + error error +} + +func (r *cannedRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return r.resp, r.error +} + +func mustMarshalJSON(x interface{}) []byte { + data, err := json.Marshal(x) + if err != nil { + panic(err) + } + return data +} + +func (s *suite) TestLog(c *gc.C) { + logs := []struct { + typ params.LogType + level params.LogLevel + message string + urls []*charm.URL + }{{ + typ: params.IngestionType, + level: params.InfoLevel, + message: "ingestion info", + urls: nil, + }, { + typ: params.LegacyStatisticsType, + level: params.ErrorLevel, + message: "statistics error", + urls: []*charm.URL{ + charm.MustParseURL("cs:mysql"), + charm.MustParseURL("cs:wordpress"), + }, + }} + + for _, log := range logs { + err := s.client.Log(log.typ, log.level, log.message, log.urls...) + c.Assert(err, gc.IsNil) + } + var result []*params.LogResponse + err := s.client.Get("/log", &result) + c.Assert(err, gc.IsNil) + c.Assert(result, gc.HasLen, len(logs)) + for i, l := range result { + c.Assert(l.Type, gc.Equals, logs[len(logs)-(1+i)].typ) + c.Assert(l.Level, gc.Equals, logs[len(logs)-(1+i)].level) + var msg string + err := json.Unmarshal([]byte(l.Data), &msg) + c.Assert(err, gc.IsNil) + c.Assert(msg, gc.Equals, logs[len(logs)-(1+i)].message) + c.Assert(l.URLs, jc.DeepEquals, logs[len(logs)-(1+i)].urls) + } +} + +func (s *suite) TestMacaroonAuthorization(c *gc.C) { + ch := charmRepo.CharmDir("wordpress") + curl := charm.MustParseURL("~charmers/utopic/wordpress-42") + purl := charm.MustParseURL("utopic/wordpress-42") + err := s.client.UploadCharmWithRevision(curl, ch, 42) + c.Assert(err, gc.IsNil) + + err = s.client.Put("/"+curl.Path()+"/meta/perm/read", []string{"bob"}) + c.Assert(err, gc.IsNil) + + // Create a client without basic auth credentials + client := csclient.New(csclient.Params{ + URL: s.srv.URL, + }) + + var result struct{ IdRevision struct{ Revision int } } + // TODO 2015-01-23: once supported, rewrite the test using POST requests. + _, err = client.Meta(purl, &result) + c.Assert(err, gc.ErrorMatches, `cannot get "/utopic/wordpress-42/meta/any\?include=id-revision": cannot get discharge from ".*": third party refused discharge: cannot discharge: no discharge`) + c.Assert(httpbakery.IsDischargeError(errgo.Cause(err)), gc.Equals, true) + + s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { + return []checkers.Caveat{checkers.DeclaredCaveat("username", "bob")}, nil + } + _, err = client.Meta(curl, &result) + c.Assert(err, gc.IsNil) + c.Assert(result.IdRevision.Revision, gc.Equals, curl.Revision) + + visitURL := "http://0.1.2.3/visitURL" + s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { + return nil, &httpbakery.Error{ + Code: httpbakery.ErrInteractionRequired, + Message: "interaction required", + Info: &httpbakery.ErrorInfo{ + VisitURL: visitURL, + WaitURL: "http://0.1.2.3/waitURL", + }} + } + + client = csclient.New(csclient.Params{ + URL: s.srv.URL, + VisitWebPage: func(vurl *neturl.URL) error { + c.Check(vurl.String(), gc.Equals, visitURL) + return fmt.Errorf("stopping interaction") + }}) + + _, err = client.Meta(purl, &result) + c.Assert(err, gc.ErrorMatches, `cannot get "/utopic/wordpress-42/meta/any\?include=id-revision": cannot get discharge from ".*": cannot start interactive session: stopping interaction`) + c.Assert(result.IdRevision.Revision, gc.Equals, curl.Revision) + c.Assert(httpbakery.IsInteractionError(errgo.Cause(err)), gc.Equals, true) +} + +func (s *suite) TestLogin(c *gc.C) { + ch := charmRepo.CharmDir("wordpress") + url := charm.MustParseURL("~charmers/utopic/wordpress-42") + purl := charm.MustParseURL("utopic/wordpress-42") + err := s.client.UploadCharmWithRevision(url, ch, 42) + c.Assert(err, gc.IsNil) + + err = s.client.Put("/"+url.Path()+"/meta/perm/read", []string{"bob"}) + c.Assert(err, gc.IsNil) + httpClient := httpbakery.NewHTTPClient() + client := csclient.New(csclient.Params{ + URL: s.srv.URL, + HTTPClient: httpClient, + }) + + var result struct{ IdRevision struct{ Revision int } } + _, err = client.Meta(purl, &result) + c.Assert(err, gc.NotNil) + + // Try logging in when the discharger fails. + err = client.Login() + c.Assert(err, gc.ErrorMatches, `cannot retrieve the authentication macaroon: cannot get discharge from ".*": third party refused discharge: cannot discharge: no discharge`) + + // Allow the discharge. + s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { + return []checkers.Caveat{checkers.DeclaredCaveat("username", "bob")}, nil + } + err = client.Login() + c.Assert(err, gc.IsNil) + + // Change discharge so that we're sure the cookies are being + // used rather than the discharge mechanism. + s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { + return nil, fmt.Errorf("no discharge") + } + + // Check that the request still works. + _, err = client.Meta(purl, &result) + c.Assert(err, gc.IsNil) + c.Assert(result.IdRevision.Revision, gc.Equals, url.Revision) + + // Check that we've got one cookie. + srvURL, err := neturl.Parse(s.srv.URL) + c.Assert(err, gc.IsNil) + c.Assert(httpClient.Jar.Cookies(srvURL), gc.HasLen, 1) + + // Log in again. + err = client.Login() + c.Assert(err, gc.IsNil) + + // Check that we still only have one cookie. + c.Assert(httpClient.Jar.Cookies(srvURL), gc.HasLen, 1) +} + +func (s *suite) TestWhoAmI(c *gc.C) { + httpClient := httpbakery.NewHTTPClient() + client := csclient.New(csclient.Params{ + URL: s.srv.URL, + HTTPClient: httpClient, + }) + response, err := client.WhoAmI() + c.Assert(err, gc.ErrorMatches, `cannot get discharge from ".*": third party refused discharge: cannot discharge: no discharge`) + s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { + return []checkers.Caveat{checkers.DeclaredCaveat("username", "bob")}, nil + } + + response, err = client.WhoAmI() + c.Assert(err, gc.IsNil) + c.Assert(response.User, gc.Equals, "bob") +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/csclient/export_test.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/csclient/export_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/csclient/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package csclient + +var ( + Hyphenate = hyphenate + UploadArchive = (*Client).uploadArchive +) === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/csclient/package_test.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/csclient/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/csclient/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package csclient_test + +import ( + "testing" + + jujutesting "github.com/juju/testing" +) + +func TestPackage(t *testing.T) { + jujutesting.MgoTestPackage(t, nil) +} === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/error.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/error.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/error.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,83 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package params + +import ( + "fmt" +) + +// ErrorCode holds the class of an error in machine-readable format. +// It is also an error in its own right. +type ErrorCode string + +func (code ErrorCode) Error() string { + return string(code) +} + +func (code ErrorCode) ErrorCode() ErrorCode { + return code +} + +const ( + ErrNotFound ErrorCode = "not found" + ErrMetadataNotFound ErrorCode = "metadata not found" + ErrForbidden ErrorCode = "forbidden" + ErrBadRequest ErrorCode = "bad request" + // TODO change to ErrAlreadyExists + ErrDuplicateUpload ErrorCode = "duplicate upload" + ErrMultipleErrors ErrorCode = "multiple errors" + ErrUnauthorized ErrorCode = "unauthorized" + ErrMethodNotAllowed ErrorCode = "method not allowed" + ErrServiceUnavailable ErrorCode = "service unavailable" + ErrEntityIdNotAllowed ErrorCode = "charm or bundle id not allowed" + ErrInvalidEntity ErrorCode = "invalid charm or bundle" + + // Note that these error codes sit in the same name space + // as the bakery error codes defined in gopkg.in/macaroon-bakery.v0/httpbakery . + // In particular, ErrBadRequest is a shared error code + // which needs to share the message too. +) + +// Error represents an error - it is returned for any response that fails. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#errors +type Error struct { + Message string + Code ErrorCode + Info map[string]*Error `json:",omitempty"` +} + +// NewError returns a new *Error with the given error code +// and message. +func NewError(code ErrorCode, f string, a ...interface{}) error { + return &Error{ + Message: fmt.Sprintf(f, a...), + Code: code, + } +} + +// Error implements error.Error. +func (e *Error) Error() string { + return e.Message +} + +// ErrorCode holds the class of the error in +// machine readable format. +func (e *Error) ErrorCode() string { + return e.Code.Error() +} + +// ErrorInfo returns additional info on the error. +// TODO(rog) rename this so that it more accurately +// reflects its role. +func (e *Error) ErrorInfo() map[string]*Error { + return e.Info +} + +// Cause implements errgo.Causer.Cause. +func (e *Error) Cause() error { + if e.Code != "" { + return e.Code + } + return nil +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/package_test.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package params_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/params.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/params.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/params.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,339 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// The params package holds types that are a part of the charm store's external +// contract - they will be marshalled (or unmarshalled) as JSON +// and delivered through the HTTP API. +package params + +import ( + "encoding/json" + "time" + + "github.com/juju/utils/debugstatus" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/macaroon.v1" +) + +const ( + // ContentHashHeader specifies the header attribute + // that will hold the content hash for archive GET responses. + ContentHashHeader = "Content-Sha384" + + // EntityIdHeader specifies the header attribute that will hold the + // id of the entity for archive GET responses. + EntityIdHeader = "Entity-Id" +) + +// Special user/group names. +const ( + Everyone = "everyone" + Admin = "admin" +) + +// MetaAnyResponse holds the result of a meta/any request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaany +type MetaAnyResponse struct { + Id *charm.URL + Meta map[string]interface{} `json:",omitempty"` +} + +// ArchiveUploadResponse holds the result of a post or a put to /id/archive. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#post-idarchive +type ArchiveUploadResponse struct { + Id *charm.URL + PromulgatedId *charm.URL `json:",omitempty"` +} + +// Constants for the StatsUpdateRequest +type StatsUpdateType string + +const ( + UpdateDownload StatsUpdateType = "download" // Accesses with non listed clients and web browsers. + UpdateTraffic StatsUpdateType = "traffic" // Bots and unknown clients. + UpdateDeploy StatsUpdateType = "deploy" // known clients like juju client. +) + +// StatsUpdateRequest holds the parameters for a put to /stats/update. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#stats-update +type StatsUpdateRequest struct { + Entries []StatsUpdateEntry +} + +// StatsUpdateEntry holds an entry of the StatsUpdateRequest for a put to /stats/update. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#stats-update +type StatsUpdateEntry struct { + Timestamp time.Time // Time when the update did happen. + Type StatsUpdateType // One of the constant Download, Traffic or Deploy. + CharmReference *charm.URL // The charm to be updated. +} + +// ExpandedId holds a charm or bundle fully qualified id. +// A slice of ExpandedId is used as response for +// id/expand-id GET requests. +type ExpandedId struct { + Id string +} + +// ArchiveSizeResponse holds the result of an +// id/meta/archive-size GET request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaarchive-size +type ArchiveSizeResponse struct { + Size int64 +} + +// HashResponse holds the result of id/meta/hash and id/meta/hash256 GET +// requests. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetahash +// and https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetahash256 +type HashResponse struct { + Sum string +} + +// ManifestFile holds information about a charm or bundle file. +// A slice of ManifestFile is used as response for +// id/meta/manifest GET requests. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetamanifest +type ManifestFile struct { + Name string + Size int64 +} + +// ArchiveUploadTimeResponse holds the result of an id/meta/archive-upload-time +// GET request. See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaarchive-upload-time +type ArchiveUploadTimeResponse struct { + UploadTime time.Time +} + +// RelatedResponse holds the result of an id/meta/charm-related GET request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-related +type RelatedResponse struct { + // Requires holds an entry for each interface provided by + // the charm, containing all charms that require that interface. + Requires map[string][]MetaAnyResponse `json:",omitempty"` + + // Provides holds an entry for each interface required by the + // the charm, containing all charms that provide that interface. + Provides map[string][]MetaAnyResponse `json:",omitempty"` +} + +// RevisionInfoResponse holds the result of an id/meta/revision-info GET +// request. See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetarevision-info +type RevisionInfoResponse struct { + Revisions []*charm.URL +} + +// SupportedSeries holds the result of an id/meta/supported-series GET +// request. See See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetasupported-series +type SupportedSeriesResponse struct { + SupportedSeries []string +} + +// BundleCount holds the result of an id/meta/bundle-unit-count +// or bundle-machine-count GET request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundle-unit-count +// and https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundle-machine-count +type BundleCount struct { + Count int +} + +// TagsResponse holds the result of an id/meta/tags GET request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetatags +type TagsResponse struct { + Tags []string +} + +// Published holds the result of a changes/published GET request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-changespublished +type Published struct { + Id *charm.URL + PublishTime time.Time +} + +// DebugStatus holds the result of the status checks. +// This is defined for backward compatibility: new clients should use +// debugstatus.CheckResult directly. +type DebugStatus debugstatus.CheckResult + +// EntityResult holds a the resolved entity ID along with any requested metadata. +type EntityResult struct { + Id *charm.URL + // Meta holds at most one entry for each meta value + // specified in the include flags, holding the + // data that would be returned by reading /meta/meta?id=id. + // Metadata not relevant to a particular result will not + // be included. + Meta map[string]interface{} `json:",omitempty"` +} + +// SearchResponse holds the response from a search operation. +type SearchResponse struct { + SearchTime time.Duration + Total int + Results []EntityResult +} + +// ListResponse holds the response from a list operation. +type ListResponse struct { + Results []EntityResult +} + +// IdUserResponse holds the result of an id/meta/id-user GET request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-user +type IdUserResponse struct { + User string +} + +// IdSeriesResponse holds the result of an id/meta/id-series GET request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-series +type IdSeriesResponse struct { + Series string +} + +// IdNameResponse holds the result of an id/meta/id-name GET request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-name +type IdNameResponse struct { + Name string +} + +// IdRevisionResponse holds the result of an id/meta/id-revision GET request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-revision +type IdRevisionResponse struct { + Revision int +} + +// IdResponse holds the result of an id/meta/id GET request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid +type IdResponse struct { + Id *charm.URL + User string `json:",omitempty"` + Series string `json:",omitempty"` + Name string + Revision int +} + +// PermResponse holds the result of an id/meta/perm GET request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaperm +type PermResponse struct { + Read []string + Write []string +} + +// PermRequest holds the request of an id/meta/perm PUT request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmetaperm +type PermRequest struct { + Read []string + Write []string +} + +// PromulgatedResponse holds the result of an id/meta/promulgated GET request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetapromulgated +type PromulgatedResponse struct { + Promulgated bool +} + +// PromulgateRequest holds the request of an id/promulgate PUT request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idpromulgate +type PromulgateRequest struct { + Promulgated bool +} + +// PublishRequest holds the request of an id/publish PUT request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idpublish +type PublishRequest struct { + Published bool +} + +// PublishResponse holds the result of an id/publish PUT request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idpublish +type PublishResponse struct { + Id *charm.URL + PromulgatedId *charm.URL `json:",omitempty"` +} + +// WhoAmIResponse holds the result of a whoami GET request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#whoami +type WhoAmIResponse struct { + User string + Groups []string +} + +const ( + // BzrDigestKey is the extra-info key used to store the Bazaar digest + BzrDigestKey = "bzr-digest" + + // LegacyDownloadStats is the extra-info key used to store the legacy + // download counts, and to retrieve them when + // charmstore.LegacyDownloadCountsEnabled is set to true. + // TODO (frankban): remove this constant when removing the legacy counts + // logic. + LegacyDownloadStats = "legacy-download-stats" +) + +// Log holds the representation of a log message. +// This is used by clients to store log events in the charm store. +type Log struct { + // Data holds the log message as a JSON-encoded value. + Data *json.RawMessage + + // Level holds the log level as a string. + Level LogLevel + + // Type holds the log type as a string. + Type LogType + + // URLs holds a slice of entity URLs associated with the log message. + URLs []*charm.URL `json:",omitempty"` +} + +// LogResponse represents a single log message and is used in the responses +// to /log GET requests. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-log +type LogResponse struct { + // Data holds the log message as a JSON-encoded value. + Data json.RawMessage + + // Level holds the log level as a string. + Level LogLevel + + // Type holds the log type as a string. + Type LogType + + // URLs holds a slice of entity URLs associated with the log message. + URLs []*charm.URL `json:",omitempty"` + + // Time holds the time of the log. + Time time.Time +} + +// LogLevel defines log levels (e.g. "info" or "error") to be used in log +// requests and responses. +type LogLevel string + +const ( + InfoLevel LogLevel = "info" + WarningLevel LogLevel = "warning" + ErrorLevel LogLevel = "error" +) + +// LogType defines log types (e.g. "ingestion") to be used in log requests and +// responses. +type LogType string + +const ( + IngestionType LogType = "ingestion" + LegacyStatisticsType LogType = "legacyStatistics" + + IngestionStart = "ingestion started" + IngestionComplete = "ingestion completed" + + LegacyStatisticsImportStart = "legacy statistics import started" + LegacyStatisticsImportComplete = "legacy statistics import completed" +) + +// SetAuthCookie holds the parameters used to make a set-auth-cookie request +// to the charm store. +type SetAuthCookie struct { + // Macaroons holds a slice of macaroons. + Macaroons macaroon.Slice +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/params_test.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/params_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/params_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,41 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package params_test + +import ( + "encoding/json" + "net/textproto" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" +) + +type suite struct{} + +var _ = gc.Suite(&suite{}) + +func (*suite) TestContentHashHeaderCanonicalized(c *gc.C) { + // The header key should be canonicalized, because otherwise + // the actually produced header will be different from that + // specified. + canon := textproto.CanonicalMIMEHeaderKey(params.ContentHashHeader) + c.Assert(canon, gc.Equals, params.ContentHashHeader) +} + +func (*suite) TestBakeryErrorCompatibility(c *gc.C) { + err1 := httpbakery.Error{ + Code: httpbakery.ErrBadRequest, + Message: "some request", + } + err2 := params.Error{ + Code: params.ErrBadRequest, + Message: "some request", + } + data1, err := json.Marshal(err1) + c.Assert(err, gc.IsNil) + c.Assert(string(data1), jc.JSONEquals, err2) +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/stats.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/stats.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/csclient/params/stats.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,47 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package params + +// Define the kinds to be included in stats keys. +const ( + StatsArchiveDownload = "archive-download" + StatsArchiveDownloadPromulgated = "archive-download-promulgated" + StatsArchiveDelete = "archive-delete" + StatsArchiveFailedUpload = "archive-failed-upload" + StatsArchiveUpload = "archive-upload" + // The following kinds are in use in the legacy API. + StatsCharmInfo = "charm-info" + StatsCharmMissing = "charm-missing" + StatsCharmEvent = "charm-event" +) + +// Statistic holds one element of a stats/counter response. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-statscounter +type Statistic struct { + Key string `json:",omitempty"` + Date string `json:",omitempty"` + Count int64 +} + +// StatsResponse holds the result of an id/meta/stats GET request. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetastats +type StatsResponse struct { + // ArchiveDownloadCount is superceded by ArchiveDownload but maintained for + // backward compatibility. + ArchiveDownloadCount int64 + // ArchiveDownload holds the downloads count for a specific revision of the + // entity. + ArchiveDownload StatsCount + // ArchiveDownloadAllRevisions holds the downloads count for all revisions + // of the entity. + ArchiveDownloadAllRevisions StatsCount +} + +// StatsCount holds stats counts and is used as part of StatsResponse. +type StatsCount struct { + Total int64 // Total count over all time. + Day int64 // Count over the last day. + Week int64 // Count over the last week. + Month int64 // Count over the last month. +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/dependencies.tsv' --- src/gopkg.in/juju/charmrepo.v2-unstable/dependencies.tsv 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/dependencies.tsv 2016-03-22 15:18:22 +0000 @@ -0,0 +1,30 @@ +github.com/ajstarks/svgo git 89e3ac64b5b3e403a5e7c35ea4f98d45db7b4518 2014-10-04T21:11:59Z +github.com/juju/blobstore git 3e9b30af648f96e85d8f41f946ae4a1ce0ce588b 2015-06-11T10:42:44Z +github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z +github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z +github.com/juju/gojsonreference git f0d24ac5ee330baa21721cdff56d45e4ee42628e 2015-02-04T19:46:33Z +github.com/juju/gojsonschema git e1ad140384f254c82f89450d9a7c8dd38a632838 2015-03-12T17:00:16Z +github.com/juju/httpprof git 14bf14c307672fd2456bdbf35d19cf0ccd3cf565 2014-12-17T16:00:36Z +github.com/juju/httprequest git 1015665b66c26101695f2f51407b3b1e000176fd 2015-10-07T14:02:54Z +github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z +github.com/juju/names git e287fe4ae0dbda220cace3ed0e35cda4796c1aa3 2015-10-22T17:21:35Z +github.com/juju/schema git afe1151cb49d1d7ed3c75592dfc6f38703f2e988 2015-08-07T07:58:08Z +github.com/juju/testing git ad6f815f49f8209a27a3b7efb6d44876493e5939 2015-10-12T16:09:06Z +github.com/juju/txn git 99ec629d0066a4d73c54d8e021a7fc1dc07df614 2015-06-09T16:58:27Z +github.com/juju/utils git f2db28cef935aba0a7207254fa5dba273e649d0e 2015-11-09T11:51:43Z +github.com/juju/xml git eb759a627588d35166bc505fceb51b88500e291e 2015-04-13T13:11:21Z +github.com/julienschmidt/httprouter git 109e267447e95ad1bb48b758e40dd7453eb7b039 2015-09-05T17:25:33Z +golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z +golang.org/x/net git ea47fc708ee3e20177f3ca3716217c4ab75942cb 2015-08-29T23:03:18Z +gopkg.in/check.v1 git b3d3430320d4260e5fea99841af984b3badcea63 2015-06-26T10:50:28Z +gopkg.in/errgo.v1 git 66cb46252b94c1f3d65646f54ee8043ab38d766c 2015-10-07T15:31:57Z +gopkg.in/juju/charm.v6-unstable git a3d228ef5292531219d17d47679b260580fba1a8 2015-11-19T07:39:58Z +gopkg.in/juju/charmstore.v5-unstable git e3e4f7b9e2b930e84db3e21c44853a43f38d4c50 2015-11-30T13:47:17Z +gopkg.in/juju/jujusvg.v1 git 2c97ff517dee12dc48bb3c2d2b113e5045a75b71 2015-11-19T14:54:17Z +gopkg.in/macaroon-bakery.v1 git e569eb58bf9977eb8a1f20d405535d45c66035be 2015-10-22T13:30:53Z +gopkg.in/macaroon.v1 git ab3940c6c16510a850e1c2dd628b919f0f3f1464 2015-01-21T11:42:31Z +gopkg.in/mgo.v2 git 4d04138ffef2791c479c0c8bbffc30b34081b8d9 2015-10-26T16:34:53Z +gopkg.in/natefinch/lumberjack.v2 git 588a21fb0fa0ebdfde42670fa214576b6f0f22df 2015-05-21T01:59:18Z +gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z +gopkg.in/yaml.v2 git 7ad95dd0798a40da1ccdff6dff35fd177b5edf40 2015-06-24T10:29:02Z +launchpad.net/tomb bzr gustavo@niemeyer.net-20140529072043-hzcrlnl3ygvg914q 18 === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/bad' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/bad/README.md' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/bad/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/bad/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +A dummy bundle === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/bad/bundle.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/bad/bundle.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/bad/bundle.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +# This bundle has a bad relation, which will cause it to fail +# its verification. +services: + wordpress: + charm: wordpress + num_units: 1 + mysql: + charm: mysql + num_units: 1 +relations: + - ["foo:db", "mysql:server"] === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/openstack' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/openstack/README.md' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/openstack/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/openstack/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,46 @@ +OpenStack Bundle for Juju +========================= + +Overview +-------- + +This bundle deploys a reference OpenStack architecture including all core projects: + + - OpenStack Compute + - OpenStack Networking (using Open vSwitch plugin) + - OpenStack Block Storage (backed with Ceph storage) + - OpenStack Image + - OpenStack Object Storage + - OpenStack Identity + - OpenStack Dashboard + - OpenStack Telemetry + - OpenStack Orchestration + +The charm configuration is an opinioned set for deploying OpenStack for testing on Cloud environments which support nested KVM. Instance types also need to have ephemeral storage (these block devices are used for Ceph and Swift storage). + +The Ubuntu Server Team use this bundle for testing OpenStack-on-OpenStack. + +Usage +----- + +Once deployed, the cloud can be accessed either using the OpenStack command line tools or using the OpenStack Dashboard: + + http:///horizon + +The charms configure the 'admin' user with a password of 'openstack' by default. + +The OpenStack cloud deployed is completely clean; the charms don't attempt to configure networking or upload images. Read the OpenStack User Guide on how to configure your cloud for use: + + http://docs.openstack.org/user-guide/content/ + +Niggles +------- + +The neutron-gateway service requires a service unit with two network interfaces to provide full functionality; this part of OpenStack provides L3 routing between tenant networks and the rest of the world. Its possible todo this when testing on OpenStack by adding a second network interface to the neutron-gateway service: + + nova interface-attach --net-id + juju set neutron-gateway ext-port=eth1 + +Note that you will need to be running this bundle on an OpenStack cloud that supports MAC address learning of some description; this includes using OpenStack Havana with the Neutron Open vSwitch plugin. + +For actual OpenStack deployments, this service would reside of a physical server with network ports attached to both the internal network (for communication with nova-compute service units) and the external network (for inbound/outbound network access to/from instances within the cloud). === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/openstack/bundle.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/openstack/bundle.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/openstack/bundle.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,202 @@ +series: precise +services: + mysql: + charm: cs:precise/mysql + constraints: mem=1G + options: + dataset-size: 50% + rabbitmq-server: + charm: cs:precise/rabbitmq-server + constraints: mem=1G + ceph: + charm: cs:precise/ceph + num_units: 3 + constraints: mem=1G + options: + monitor-count: 3 + fsid: 6547bd3e-1397-11e2-82e5-53567c8d32dc + monitor-secret: AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ== + osd-devices: /dev/vdb + osd-reformat: "yes" + ephemeral-unmount: /mnt + keystone: + charm: cs:precise/keystone + constraints: mem=1G + options: + admin-password: openstack + admin-token: ubuntutesting + openstack-dashboard: + charm: cs:precise/openstack-dashboard + constraints: mem=1G + nova-compute: + charm: cs:precise/nova-compute + num_units: 3 + constraints: mem=4G + options: + config-flags: "auto_assign_floating_ip=False" + enable-live-migration: False + virt-type: kvm + nova-cloud-controller: + charm: cs:precise/nova-cloud-controller + constraints: mem=1G + options: + network-manager: Neutron + quantum-security-groups: "yes" + neutron-gateway: + charm: cs:precise/quantum-gateway + constraints: mem=1G + cinder: + charm: cs:precise/cinder + options: + block-device: "None" + constraints": mem=1G + glance: + charm: cs:precise/glance + constraints: mem=1G + swift-proxy: + charm: cs:precise/swift-proxy + constraints: mem=1G + options: + zone-assignment: manual + replicas: 3 + use-https: 'no' + swift-hash: fdfef9d4-8b06-11e2-8ac0-531c923c8fae + swift-storage-z1: + charm: cs:precise/swift-storage + constraints: mem=1G + options: + zone: 1 + block-device: vdb + overwrite: "true" + swift-storage-z2: + charm: cs:precise/swift-storage + constraints: mem=1G + options: + zone: 2 + block-device: vdb + overwrite: "true" + swift-storage-z3: + charm: cs:precise/swift-storage + constraints: mem=1G + options: + zone: 3 + block-device: vdb + overwrite: "true" + ceilometer: + charm: cs:precise/ceilometer + constraints: mem=1G + ceilometer-agent: + charm: cs:precise/ceilometer-agent + mongodb: + charm: cs:precise/mongodb + constraints: mem=1G + heat: + charm: cs:precise/heat + constraints: mem=1G + ntp: + charm: cs:precise/ntp +relations: + - - keystone:shared-db + - mysql:shared-db + - - nova-cloud-controller:shared-db + - mysql:shared-db + - - nova-cloud-controller:amqp + - rabbitmq-server:amqp + - - nova-cloud-controller:image-service + - glance:image-service + - - nova-cloud-controller:identity-service + - keystone:identity-service + - - nova-compute:cloud-compute + - nova-cloud-controller:cloud-compute + - - nova-compute:shared-db + - mysql:shared-db + - - nova-compute:amqp + - rabbitmq-server:amqp + - - nova-compute:image-service + - glance:image-service + - - nova-compute:ceph + - ceph:client + - - glance:shared-db + - mysql:shared-db + - - glance:identity-service + - keystone:identity-service + - - glance:ceph + - ceph:client + - - glance:image-service + - cinder:image-service + - - cinder:shared-db + - mysql:shared-db + - - cinder:amqp + - rabbitmq-server:amqp + - - cinder:cinder-volume-service + - nova-cloud-controller:cinder-volume-service + - - cinder:identity-service + - keystone:identity-service + - - cinder:ceph + - ceph:client + - - neutron-gateway:shared-db + - mysql:shared-db + - - neutron-gateway:amqp + - rabbitmq-server:amqp + - - neutron-gateway:quantum-network-service + - nova-cloud-controller:quantum-network-service + - - openstack-dashboard:identity-service + - keystone:identity-service + - - swift-proxy:identity-service + - keystone:identity-service + - - swift-proxy:swift-storage + - swift-storage-z1:swift-storage + - - swift-proxy:swift-storage + - swift-storage-z2:swift-storage + - - swift-proxy:swift-storage + - swift-storage-z3:swift-storage + - - ceilometer:identity-service + - keystone:identity-service + - - ceilometer:amqp + - rabbitmq-server:amqp + - - ceilometer:shared-db + - mongodb:database + - - ceilometer-agent:nova-ceilometer + - nova-compute:nova-ceilometer + - - ceilometer-agent:ceilometer-service + - ceilometer:ceilometer-service + - - heat:identity-service + - keystone:identity-service + - - heat:shared-db + - mysql:shared-db + - - heat:amqp + - rabbitmq-server:amqp + - - ntp:juju-info + - nova-compute:juju-info + - - ntp:juju-info + - nova-cloud-controller:juju-info + - - ntp:juju-info + - neutron-gateway:juju-info + - - ntp:juju-info + - ceph:juju-info + - - ntp:juju-info + - cinder:juju-info + - - ntp:juju-info + - keystone:juju-info + - - ntp:juju-info + - glance:juju-info + - - ntp:juju-info + - swift-proxy:juju-info + - - ntp:juju-info + - swift-storage-z1:juju-info + - - ntp:juju-info + - swift-storage-z2:juju-info + - - ntp:juju-info + - swift-storage-z3:juju-info + - - ntp:juju-info + - ceilometer:juju-info + - - ntp:juju-info + - mongodb:juju-info + - - ntp:juju-info + - rabbitmq-server:juju-info + - - ntp:juju-info + - mysql:juju-info + - - ntp:juju-info + - openstack-dashboard:juju-info + - - ntp:juju-info + - heat:juju-info === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-simple' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-simple/README.md' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-simple/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-simple/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +A dummy bundle === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-simple/bundle.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-simple/bundle.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-simple/bundle.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +services: + wordpress: + charm: wordpress + num_units: 1 + mysql: + charm: mysql + num_units: 1 +relations: + - ["wordpress:db", "mysql:server"] === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-with-logging' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/README.md' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +A dummy bundle === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/bundle.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/bundle.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/bundle/wordpress-with-logging/bundle.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,13 @@ +services: + wordpress: + charm: wordpress + num_units: 1 + mysql: + charm: mysql + num_units: 1 + logging: + charm: logging +relations: + - ["wordpress:db", "mysql:server"] + - ["wordpress:juju-info", "logging:info"] + - ["mysql:juju-info", "logging:info"] === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-broken' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-broken 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-broken 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-changed' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-changed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-changed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-departed' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-departed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-departed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-joined' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-joined 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/bar-relation-joined 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/collect-metrics' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/collect-metrics 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/collect-metrics 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/config-changed' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/config-changed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/config-changed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-broken' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-broken 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-broken 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-changed' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-changed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-changed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-departed' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-departed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-departed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-joined' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-joined 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/foo-relation-joined 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/install' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/install 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/install 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/meter-status-changed' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/meter-status-changed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/meter-status-changed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/otherdata' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/otherdata 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/otherdata 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +some text === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-broken' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-broken 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-broken 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-changed' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-changed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-changed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-departed' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-departed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-departed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-joined' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-joined 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/self-relation-joined 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/start' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/start 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/start 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/stop' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/stop 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/stop 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/subdir' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/subdir/stuff' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/subdir/stuff 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/subdir/stuff 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +non hook related stuff === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/upgrade-charm' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/upgrade-charm 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/hooks/upgrade-charm 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,12 @@ +name: all-hooks +summary: "That's a dummy charm with hook scrips for all types of hooks." +description: "This is a longer description." +provides: + foo: + interface: phony +requires: + bar: + interface: fake +peers: + self: + interface: dummy === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/revision' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/all-hooks/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category/.dir' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category/.dir/ignored' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category/.ignored' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category/.ignored 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category/.ignored 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +# \ No newline at end of file === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/category/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,6 @@ +name: categories +summary: "Sample charm with a category" +description: | + That's a boring charm that has a category. +categories: ["database"] +tags: ["openstack", "storage"] \ No newline at end of file === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/.dir' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/.dir/ignored' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/.ignored' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/.ignored 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/.ignored 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +# \ No newline at end of file === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/actions.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/actions.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/actions.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,7 @@ +snapshot: + description: Take a snapshot of the database. + params: + outfile: + description: The file to write out to. + type: string + default: foo.bz2 === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/build' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/build/ignored' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/config.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/config.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/config.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +options: + title: {default: My Title, description: A descriptive title used for the service., type: string} + outlook: {description: No default outlook., type: string} + username: {default: admin001, description: The name of the initial account (given admin permissions)., type: string} + skill-level: {description: A number indicating skill., type: int} === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/empty' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/empty/.gitkeep' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/hooks' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/hooks/install' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/hooks/install 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/hooks/install 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/bash +echo "Done!" === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +name: dummy +summary: "That's a dummy charm." +description: | + This is a longer description which + potentially contains multiple lines. === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/revision' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/src' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/src/hello.c' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/src/hello.c 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/dummy/src/hello.c 2016-03-22 15:18:22 +0000 @@ -0,0 +1,7 @@ +#include + +main() +{ + printf ("Hello World!\n"); + return 0; +} === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2/.dir' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2/.dir/ignored' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2/.ignored' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2/.ignored 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2/.ignored 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +# \ No newline at end of file === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/format2/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,6 @@ +name: format2 +format: 2 +summary: "Sample charm described in format 2" +description: | + That's a boring charm that is described in + terms of format 2. === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging/hooks' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging/hooks/.gitkeep' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +name: logging +summary: "Subordinate logging test charm" +description: | + This is a longer description which + potentially contains multiple lines. +subordinate: true +provides: + logging-client: + interface: logging +requires: + logging-directory: + interface: logging + scope: container + info: + interface: juju-info + scope: container === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging/revision' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/logging/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered-empty' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered-empty/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered-empty/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered-empty/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,3 @@ +name: metered-empty +summary: "Metered charm with empty metrics" +description: "A charm that will not send metrics" === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered-empty/metrics.yaml' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered-empty/revision' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered-empty/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered-empty/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,3 @@ +name: metered +summary: "A metered charm with custom metrics" +description: "" === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered/metrics.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered/metrics.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered/metrics.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,7 @@ +metrics: + pings: + type: gauge + description: Description of the metric. + juju-unit-time: + type: gauge + description: Builtin metric === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered/revision' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/metered/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/monitoring' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/monitoring/hooks' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/monitoring/hooks/.gitkeep' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/monitoring/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/monitoring/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/monitoring/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +name: monitoring +summary: "Subordinate monitoring test charm" +description: | + This is a longer description which + potentially contains multiple lines. +subordinate: true +provides: + monitoring-client: + interface: monitoring +requires: + monitoring-port: + interface: monitoring + scope: container + info: + interface: juju-info + scope: container === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series-bad' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series-bad/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series-bad/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series-bad/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +name: multi-series-bad +summary: "K/V storage engine" +description: "An example of an charm which exists in a repo under +the quantal series but which declares it only supports precise and +trusty." +series: + - precise + - trusty +provides: + endpoint: + interface: http + admin: + interface: http +peers: + ring: + interface: riak === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series-bad/revision' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series-bad/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series-bad/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +7 \ No newline at end of file === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,15 @@ +name: new-charm-with-multi-series +summary: "K/V storage engine" +description: "Scalable K/V Store in Erlang with Clocks :-)" +series: + - precise + - trusty + - quantal +provides: + endpoint: + interface: http + admin: + interface: http +peers: + ring: + interface: riak === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series/revision' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/multi-series/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +7 \ No newline at end of file === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql-alternative' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql-alternative/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql-alternative/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql-alternative/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +name: mysql-alternative +summary: "Database engine" +description: "A pretty popular database" +provides: + prod: + interface: mysql + dev: + interface: mysql + limit: 2 === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql-alternative/revision' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql-alternative/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql-alternative/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +name: mysql +summary: "Database engine" +description: "A pretty popular database" +provides: + server: mysql === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql/revision' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/mysql/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/riak' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/riak/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/riak/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/riak/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +name: riak +summary: "K/V storage engine" +description: "Scalable K/V Store in Erlang with Clocks :-)" +provides: + endpoint: + interface: http + admin: + interface: http +peers: + ring: + interface: riak === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/riak/revision' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/riak/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/riak/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +7 \ No newline at end of file === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terracotta' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terracotta/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terracotta/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terracotta/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,15 @@ +name: terracotta +summary: Distributed HA caching/storage platform for Java +maintainer: Robert Ayres +description: | + Distributed HA caching/storage platform for Java. + . + Terracotta provides out of the box clustering for a number of well known Java + frameworks, including EHCache, Hibernate and Quartz as well as clustering + for J2EE containers. +provides: + dso: + interface: terracotta + optional: true +peers: + server-array: terracotta-server === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terracotta/revision' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terracotta/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/terracotta/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +3 === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade1' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade1/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade1/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade1/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +name: upgrade +summary: "Sample charm to test version changes" +description: | + Sample charm to test version changes. + This is the old charm. === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade1/revision' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade1/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade1/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade2' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade2/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade2/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade2/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +name: upgrade +summary: "Sample charm to test version changes" +description: | + Sample charm to test version changes. + This is the new charm. === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade2/revision' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade2/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/upgrade2/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +2 === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alternative' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alternative/hooks' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alternative/hooks/install' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alternative/hooks/install 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alternative/hooks/install 2016-03-22 15:18:22 +0000 @@ -0,0 +1,3 @@ +#!/bin/bash + +echo hello world \ No newline at end of file === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alternative/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alternative/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alternative/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +name: varnish-alternative +summary: "Database engine" +description: "Another popular database" +provides: + webcache: varnish === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alternative/revision' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alternative/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish-alternative/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +name: varnish +summary: "Database engine" +description: "Another popular database" +provides: + webcache: varnish === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish/revision' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/varnish/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/actions' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/actions/.gitkeep' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/config.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/config.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/config.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,3 @@ +options: + blog-title: {default: My Title, description: A descriptive title used for the blog., type: string} + === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/hooks' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/hooks/.gitkeep' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/metadata.yaml' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,23 @@ +name: wordpress +summary: "Blog engine" +description: "A pretty popular blog engine" +provides: + url: + interface: http + limit: + optional: false + logging-dir: + interface: logging + scope: container + monitoring-port: + interface: monitoring + scope: container +requires: + db: + interface: mysql + limit: 1 + optional: false + cache: + interface: varnish + limit: 2 + optional: true === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/revision' --- src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/quantal/wordpress/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +3 \ No newline at end of file === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/series' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/series/format2' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/series/format2/build' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/series/format2/build/ignored' === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/series/format2/hooks' === added symlink 'src/gopkg.in/juju/charmrepo.v2-unstable/internal/test-charm-repo/series/format2/hooks/symlink' === target is u'../target' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/legacy.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/legacy.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/legacy.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,373 @@ +// Copyright 2012, 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrepo + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/juju/utils" + "gopkg.in/juju/charm.v6-unstable" +) + +// LegacyCharmStore is a repository Interface that provides access to the +// legacy Juju charm store. +type LegacyCharmStore struct { + BaseURL string + authAttrs string // a list of attr=value pairs, comma separated + jujuAttrs string // a list of attr=value pairs, comma separated + testMode bool +} + +var _ Interface = (*LegacyCharmStore)(nil) + +var LegacyStore = &LegacyCharmStore{BaseURL: "https://store.juju.ubuntu.com"} + +// WithAuthAttrs return a repository Interface with the authentication token +// list set. authAttrs is a list of attr=value pairs. +func (s *LegacyCharmStore) WithAuthAttrs(authAttrs string) Interface { + authCS := *s + authCS.authAttrs = authAttrs + return &authCS +} + +// WithTestMode returns a repository Interface where testMode is set to value +// passed to this method. +func (s *LegacyCharmStore) WithTestMode(testMode bool) Interface { + newRepo := *s + newRepo.testMode = testMode + return &newRepo +} + +// WithJujuAttrs returns a repository Interface with the Juju metadata +// attributes set. jujuAttrs is a list of attr=value pairs. +func (s *LegacyCharmStore) WithJujuAttrs(jujuAttrs string) Interface { + jujuCS := *s + jujuCS.jujuAttrs = jujuAttrs + return &jujuCS +} + +// Perform an http get, adding custom auth header if necessary. +func (s *LegacyCharmStore) get(url string) (resp *http.Response, err error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + if s.authAttrs != "" { + // To comply with RFC 2617, we send the authentication data in + // the Authorization header with a custom auth scheme + // and the authentication attributes. + req.Header.Add("Authorization", "charmstore "+s.authAttrs) + } + if s.jujuAttrs != "" { + // The use of "X-" to prefix custom header values is deprecated. + req.Header.Add("Juju-Metadata", s.jujuAttrs) + } + return http.DefaultClient.Do(req) +} + +// Resolve canonicalizes charm URLs any implied series in the reference. +func (s *LegacyCharmStore) Resolve(ref *charm.URL) (*charm.URL, []string, error) { + infos, err := s.Info(ref) + if err != nil { + return nil, nil, err + } + if len(infos) == 0 { + return nil, nil, fmt.Errorf("missing response when resolving charm URL: %q", ref) + } + if infos[0].CanonicalURL == "" { + return nil, nil, fmt.Errorf("cannot resolve charm URL: %q", ref) + } + curl, err := charm.ParseURL(infos[0].CanonicalURL) + if err != nil { + return nil, nil, err + } + // Legacy store does not support returning the supported series. + return curl, nil, nil +} + +// Info returns details for all the specified charms in the charm store. +func (s *LegacyCharmStore) Info(curls ...charm.Location) ([]*InfoResponse, error) { + baseURL := s.BaseURL + "/charm-info?" + queryParams := make([]string, len(curls), len(curls)+1) + for i, curl := range curls { + queryParams[i] = "charms=" + url.QueryEscape(curl.String()) + } + if s.testMode { + queryParams = append(queryParams, "stats=0") + } + resp, err := s.get(baseURL + strings.Join(queryParams, "&")) + if err != nil { + if url_error, ok := err.(*url.Error); ok { + switch url_error.Err.(type) { + case *net.DNSError, *net.OpError: + return nil, fmt.Errorf("Cannot access the charm store. Are you connected to the internet? Error details: %v", err) + } + } + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + errMsg := fmt.Errorf("Cannot access the charm store. Invalid response code: %q", resp.Status) + body, readErr := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, readErr + } + logger.Errorf("%v Response body: %s", errMsg, body) + return nil, errMsg + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + infos := make(map[string]*InfoResponse) + if err = json.Unmarshal(body, &infos); err != nil { + return nil, err + } + result := make([]*InfoResponse, len(curls)) + for i, curl := range curls { + key := curl.String() + info, found := infos[key] + if !found { + return nil, fmt.Errorf("charm store returned response without charm %q", key) + } + if len(info.Errors) == 1 && info.Errors[0] == "entry not found" { + info.Errors[0] = fmt.Sprintf("charm not found: %s", curl) + } + result[i] = info + } + return result, nil +} + +// Event returns details for a charm event in the charm store. +// +// If digest is empty, the latest event is returned. +func (s *LegacyCharmStore) Event(curl *charm.URL, digest string) (*EventResponse, error) { + key := curl.String() + query := key + if digest != "" { + query += "@" + digest + } + resp, err := s.get(s.BaseURL + "/charm-event?charms=" + url.QueryEscape(query)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + events := make(map[string]*EventResponse) + if err = json.Unmarshal(body, &events); err != nil { + return nil, err + } + event, found := events[key] + if !found { + return nil, fmt.Errorf("charm store returned response without charm %q", key) + } + if len(event.Errors) == 1 && event.Errors[0] == "entry not found" { + if digest == "" { + return nil, &NotFoundError{fmt.Sprintf("charm event not found for %q", curl)} + } else { + return nil, &NotFoundError{fmt.Sprintf("charm event not found for %q with digest %q", curl, digest)} + } + } + return event, nil +} + +// revisions returns the revisions of the charms referenced by curls. +func (s *LegacyCharmStore) revisions(curls ...charm.Location) (revisions []CharmRevision, err error) { + infos, err := s.Info(curls...) + if err != nil { + return nil, err + } + revisions = make([]CharmRevision, len(infos)) + for i, info := range infos { + for _, w := range info.Warnings { + logger.Warningf("charm store reports for %q: %s", curls[i], w) + } + if info.Errors == nil { + revisions[i].Revision = info.Revision + revisions[i].Sha256 = info.Sha256 + } else { + // If a charm is not found, we are more concise with the error message. + if len(info.Errors) == 1 && strings.HasPrefix(info.Errors[0], "charm not found") { + revisions[i].Err = fmt.Errorf(info.Errors[0]) + } else { + revisions[i].Err = fmt.Errorf("charm info errors for %q: %s", curls[i], strings.Join(info.Errors, "; ")) + } + } + } + return revisions, nil +} + +// Latest returns the latest revision of the charms referenced by curls, regardless +// of the revision set on each curl. +func (s *LegacyCharmStore) Latest(curls ...*charm.URL) ([]CharmRevision, error) { + baseCurls := make([]charm.Location, len(curls)) + for i, curl := range curls { + baseCurls[i] = curl.WithRevision(-1) + } + return s.revisions(baseCurls...) +} + +// BranchLocation returns the location for the branch holding the charm at curl. +func (s *LegacyCharmStore) BranchLocation(curl *charm.URL) string { + if curl.User != "" { + return fmt.Sprintf("lp:~%s/charms/%s/%s/trunk", curl.User, curl.Series, curl.Name) + } + return fmt.Sprintf("lp:charms/%s/%s", curl.Series, curl.Name) +} + +var branchPrefixes = []string{ + "lp:", + "bzr+ssh://bazaar.launchpad.net/+branch/", + "bzr+ssh://bazaar.launchpad.net/", + "http://launchpad.net/+branch/", + "http://launchpad.net/", + "https://launchpad.net/+branch/", + "https://launchpad.net/", + "http://code.launchpad.net/+branch/", + "http://code.launchpad.net/", + "https://code.launchpad.net/+branch/", + "https://code.launchpad.net/", +} + +// CharmURL returns the charm URL for the branch at location. +func (s *LegacyCharmStore) CharmURL(location string) (*charm.URL, error) { + var l string + if len(location) > 0 && location[0] == '~' { + l = location + } else { + for _, prefix := range branchPrefixes { + if strings.HasPrefix(location, prefix) { + l = location[len(prefix):] + break + } + } + } + if l != "" { + for len(l) > 0 && l[len(l)-1] == '/' { + l = l[:len(l)-1] + } + u := strings.Split(l, "/") + if len(u) == 3 && u[0] == "charms" { + return charm.ParseURL(fmt.Sprintf("cs:%s/%s", u[1], u[2])) + } + if len(u) == 4 && u[0] == "charms" && u[3] == "trunk" { + return charm.ParseURL(fmt.Sprintf("cs:%s/%s", u[1], u[2])) + } + if len(u) == 5 && u[1] == "charms" && u[4] == "trunk" && len(u[0]) > 0 && u[0][0] == '~' { + return charm.ParseURL(fmt.Sprintf("cs:%s/%s/%s", u[0], u[2], u[3])) + } + } + return nil, fmt.Errorf("unknown branch location: %q", location) +} + +// verify returns an error unless a file exists at path with a hex-encoded +// SHA256 matching digest. +func verify(path, digest string) error { + hash, _, err := utils.ReadFileSHA256(path) + if err != nil { + return err + } + if hash != digest { + return fmt.Errorf("bad SHA256 of %q", path) + } + return nil +} + +// Get returns the charm referenced by curl. +// CacheDir must have been set, otherwise Get will panic. +func (s *LegacyCharmStore) Get(curl *charm.URL) (charm.Charm, error) { + // The cache location must have been previously set. + if CacheDir == "" { + panic("charm cache directory path is empty") + } + if err := os.MkdirAll(CacheDir, os.FileMode(0755)); err != nil { + return nil, err + } + revInfo, err := s.revisions(curl) + if err != nil { + return nil, err + } + if len(revInfo) != 1 { + return nil, fmt.Errorf("expected 1 result, got %d", len(revInfo)) + } + if revInfo[0].Err != nil { + return nil, revInfo[0].Err + } + rev, digest := revInfo[0].Revision, revInfo[0].Sha256 + if curl.Revision == -1 { + curl = curl.WithRevision(rev) + } else if curl.Revision != rev { + return nil, fmt.Errorf("store returned charm with wrong revision %d for %q", rev, curl.String()) + } + path := filepath.Join(CacheDir, charm.Quote(curl.String())+".charm") + if verify(path, digest) != nil { + store_url := s.BaseURL + "/charm/" + curl.Path() + if s.testMode { + store_url = store_url + "?stats=0" + } + resp, err := s.get(store_url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("bad status from request for %q: %q", store_url, resp.Status) + } + f, err := ioutil.TempFile(CacheDir, "charm-download") + if err != nil { + return nil, err + } + dlPath := f.Name() + _, err = io.Copy(f, resp.Body) + if cerr := f.Close(); err == nil { + err = cerr + } + if err != nil { + os.Remove(dlPath) + return nil, err + } + if err := utils.ReplaceFile(dlPath, path); err != nil { + return nil, err + } + } + if err := verify(path, digest); err != nil { + return nil, err + } + return charm.ReadCharmArchive(path) +} + +// GetBundle is only defined for implementing Interface. +func (s *LegacyCharmStore) GetBundle(curl *charm.URL) (charm.Bundle, error) { + return nil, errors.New("not implemented: legacy API does not support bundles") +} + +// LegacyInferRepository returns a charm repository inferred from the provided +// charm or bundle reference. Local references will use the provided path. +func LegacyInferRepository(ref *charm.URL, localRepoPath string) (repo Interface, err error) { + switch ref.Schema { + case "cs": + repo = LegacyStore + case "local": + if localRepoPath == "" { + return nil, errors.New("path to local repository not specified") + } + repo = &LocalRepository{Path: localRepoPath} + default: + return nil, fmt.Errorf("unknown schema for charm reference %q", ref) + } + return +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/legacy_test.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/legacy_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/legacy_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,413 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrepo_test + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + gitjujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "gopkg.in/juju/charmrepo.v2-unstable" + charmtesting "gopkg.in/juju/charmrepo.v2-unstable/testing" +) + +type legacyCharmStoreSuite struct { + gitjujutesting.FakeHomeSuite + server *charmtesting.MockStore + store *charmrepo.LegacyCharmStore +} + +var _ = gc.Suite(&legacyCharmStoreSuite{}) + +func (s *legacyCharmStoreSuite) SetUpSuite(c *gc.C) { + s.FakeHomeSuite.SetUpSuite(c) + s.server = charmtesting.NewMockStore(c, TestCharms, map[string]int{ + "cs:series/good": 23, + "cs:series/unwise": 23, + "cs:series/better": 24, + "cs:series/best": 25, + }) +} + +func (s *legacyCharmStoreSuite) SetUpTest(c *gc.C) { + s.FakeHomeSuite.SetUpTest(c) + s.PatchValue(&charmrepo.CacheDir, c.MkDir()) + s.store = newLegacyStore(s.server.Address()) + s.server.Downloads = nil + s.server.Authorizations = nil + s.server.Metadata = nil + s.server.DownloadsNoStats = nil + s.server.InfoRequestCount = 0 + s.server.InfoRequestCountNoStats = 0 +} + +func (s *legacyCharmStoreSuite) TearDownSuite(c *gc.C) { + s.server.Close() + s.FakeHomeSuite.TearDownSuite(c) +} + +func (s *legacyCharmStoreSuite) TestMissing(c *gc.C) { + charmURL := charm.MustParseURL("cs:series/missing") + expect := `charm not found: cs:series/missing` + revs, err := s.store.Latest(charmURL) + c.Assert(err, jc.ErrorIsNil) + c.Assert(revs, gc.HasLen, 1) + c.Assert(revs[0].Err, gc.ErrorMatches, expect) + _, err = s.store.Get(charmURL) + c.Assert(err, gc.ErrorMatches, expect) +} + +func (s *legacyCharmStoreSuite) TestError(c *gc.C) { + charmURL := charm.MustParseURL("cs:series/borken") + expect := `charm info errors for "cs:series/borken": badness` + revs, err := s.store.Latest(charmURL) + c.Assert(err, jc.ErrorIsNil) + c.Assert(revs, gc.HasLen, 1) + c.Assert(revs[0].Err, gc.ErrorMatches, expect) + _, err = s.store.Get(charmURL) + c.Assert(err, gc.ErrorMatches, expect) +} + +func (s *legacyCharmStoreSuite) TestWarning(c *gc.C) { + charmURL := charm.MustParseURL("cs:series/unwise") + expect := `.* WARNING juju.charm.charmrepo charm store reports for "cs:series/unwise": foolishness` + "\n" + revs, err := s.store.Latest(charmURL) + c.Assert(err, jc.ErrorIsNil) + c.Assert(revs, gc.HasLen, 1) + c.Assert(revs[0].Revision, gc.Equals, 23) + c.Assert(err, gc.IsNil) + c.Assert(c.GetTestLog(), gc.Matches, expect) + ch, err := s.store.Get(charmURL) + c.Assert(ch, gc.NotNil) + c.Assert(err, gc.IsNil) + c.Assert(c.GetTestLog(), gc.Matches, expect+expect) +} + +func (s *legacyCharmStoreSuite) TestLatest(c *gc.C) { + urls := []*charm.URL{ + charm.MustParseURL("cs:series/good"), + charm.MustParseURL("cs:series/good-2"), + charm.MustParseURL("cs:series/good-99"), + } + revInfo, err := s.store.Latest(urls...) + c.Assert(err, gc.IsNil) + c.Assert(revInfo, jc.DeepEquals, []charmrepo.CharmRevision{ + {23, "843f8bba130a9705249f038202fab24e5151e3a2f7b6626f4508a5725739a5b5", nil}, + {23, "843f8bba130a9705249f038202fab24e5151e3a2f7b6626f4508a5725739a5b5", nil}, + {23, "843f8bba130a9705249f038202fab24e5151e3a2f7b6626f4508a5725739a5b5", nil}, + }) +} + +func (s *legacyCharmStoreSuite) assertCached(c *gc.C, charmURL *charm.URL) { + s.server.Downloads = nil + ch, err := s.store.Get(charmURL) + c.Assert(err, gc.IsNil) + c.Assert(ch, gc.NotNil) + c.Assert(s.server.Downloads, gc.IsNil) +} + +func (s *legacyCharmStoreSuite) TestGetCacheImplicitRevision(c *gc.C) { + base := "cs:series/good" + charmURL := charm.MustParseURL(base) + revCharmURL := charm.MustParseURL(base + "-23") + ch, err := s.store.Get(charmURL) + c.Assert(err, gc.IsNil) + c.Assert(ch, gc.NotNil) + c.Assert(s.server.Downloads, jc.DeepEquals, []*charm.URL{revCharmURL}) + s.assertCached(c, charmURL) + s.assertCached(c, revCharmURL) +} + +func (s *legacyCharmStoreSuite) TestGetCacheExplicitRevision(c *gc.C) { + base := "cs:series/good-12" + charmURL := charm.MustParseURL(base) + ch, err := s.store.Get(charmURL) + c.Assert(err, gc.IsNil) + c.Assert(ch, gc.NotNil) + c.Assert(s.server.Downloads, jc.DeepEquals, []*charm.URL{charmURL}) + s.assertCached(c, charmURL) +} + +func (s *legacyCharmStoreSuite) TestGetBadCache(c *gc.C) { + c.Assert(os.Mkdir(filepath.Join(charmrepo.CacheDir, "cache"), 0777), gc.IsNil) + base := "cs:series/good" + charmURL := charm.MustParseURL(base) + revCharmURL := charm.MustParseURL(base + "-23") + name := charm.Quote(revCharmURL.String()) + ".charm" + err := ioutil.WriteFile(filepath.Join(charmrepo.CacheDir, "cache", name), nil, 0666) + c.Assert(err, gc.IsNil) + ch, err := s.store.Get(charmURL) + c.Assert(err, gc.IsNil) + c.Assert(ch, gc.NotNil) + c.Assert(s.server.Downloads, jc.DeepEquals, []*charm.URL{revCharmURL}) + s.assertCached(c, charmURL) + s.assertCached(c, revCharmURL) +} + +func (s *legacyCharmStoreSuite) TestGetTestModeFlag(c *gc.C) { + base := "cs:series/good-12" + charmURL := charm.MustParseURL(base) + ch, err := s.store.Get(charmURL) + c.Assert(err, gc.IsNil) + c.Assert(ch, gc.NotNil) + c.Assert(s.server.Downloads, jc.DeepEquals, []*charm.URL{charmURL}) + c.Assert(s.server.DownloadsNoStats, gc.IsNil) + c.Assert(s.server.InfoRequestCount, gc.Equals, 1) + c.Assert(s.server.InfoRequestCountNoStats, gc.Equals, 0) + + storeInTestMode := s.store.WithTestMode(true) + other := "cs:series/good-23" + otherURL := charm.MustParseURL(other) + ch, err = storeInTestMode.Get(otherURL) + c.Assert(err, gc.IsNil) + c.Assert(ch, gc.NotNil) + c.Assert(s.server.Downloads, jc.DeepEquals, []*charm.URL{charmURL}) + c.Assert(s.server.DownloadsNoStats, jc.DeepEquals, []*charm.URL{otherURL}) + c.Assert(s.server.InfoRequestCount, gc.Equals, 1) + c.Assert(s.server.InfoRequestCountNoStats, gc.Equals, 1) +} + +// The following tests cover the low-level CharmStore-specific API. + +func (s *legacyCharmStoreSuite) TestInfo(c *gc.C) { + charmURLs := []charm.Location{ + charm.MustParseURL("cs:series/good"), + charm.MustParseURL("cs:series/better"), + charm.MustParseURL("cs:series/best"), + } + infos, err := s.store.Info(charmURLs...) + c.Assert(err, gc.IsNil) + c.Assert(infos, gc.HasLen, 3) + expected := []int{23, 24, 25} + for i, info := range infos { + c.Assert(info.Errors, gc.IsNil) + c.Assert(info.Revision, gc.Equals, expected[i]) + } +} + +func (s *legacyCharmStoreSuite) TestInfoNotFound(c *gc.C) { + charmURL := charm.MustParseURL("cs:series/missing") + info, err := s.store.Info(charmURL) + c.Assert(err, gc.IsNil) + c.Assert(info, gc.HasLen, 1) + c.Assert(info[0].Errors, gc.HasLen, 1) + c.Assert(info[0].Errors[0], gc.Matches, `charm not found: cs:series/missing`) +} + +func (s *legacyCharmStoreSuite) TestInfoError(c *gc.C) { + charmURL := charm.MustParseURL("cs:series/borken") + info, err := s.store.Info(charmURL) + c.Assert(err, gc.IsNil) + c.Assert(info, gc.HasLen, 1) + c.Assert(info[0].Errors, jc.DeepEquals, []string{"badness"}) +} + +func (s *legacyCharmStoreSuite) TestInfoWarning(c *gc.C) { + charmURL := charm.MustParseURL("cs:series/unwise") + info, err := s.store.Info(charmURL) + c.Assert(err, gc.IsNil) + c.Assert(info, gc.HasLen, 1) + c.Assert(info[0].Warnings, jc.DeepEquals, []string{"foolishness"}) +} + +func (s *legacyCharmStoreSuite) TestInfoTestModeFlag(c *gc.C) { + charmURL := charm.MustParseURL("cs:series/good") + _, err := s.store.Info(charmURL) + c.Assert(err, gc.IsNil) + c.Assert(s.server.InfoRequestCount, gc.Equals, 1) + c.Assert(s.server.InfoRequestCountNoStats, gc.Equals, 0) + + storeInTestMode, ok := s.store.WithTestMode(true).(*charmrepo.LegacyCharmStore) + c.Assert(ok, gc.Equals, true) + _, err = storeInTestMode.Info(charmURL) + c.Assert(err, gc.IsNil) + c.Assert(s.server.InfoRequestCount, gc.Equals, 1) + c.Assert(s.server.InfoRequestCountNoStats, gc.Equals, 1) +} + +func (s *legacyCharmStoreSuite) TestInfoDNSError(c *gc.C) { + store := newLegacyStore("http://127.1.2.3") + charmURL := charm.MustParseURL("cs:series/good") + resp, err := store.Info(charmURL) + c.Assert(resp, gc.IsNil) + expect := `Cannot access the charm store. .*` + c.Assert(err, gc.ErrorMatches, expect) +} + +func (s *legacyCharmStoreSuite) TestEvent(c *gc.C) { + charmURL := charm.MustParseURL("cs:series/good") + event, err := s.store.Event(charmURL, "") + c.Assert(err, gc.IsNil) + c.Assert(event.Errors, gc.IsNil) + c.Assert(event.Revision, gc.Equals, 23) + c.Assert(event.Digest, gc.Equals, "the-digest") +} + +func (s *legacyCharmStoreSuite) TestEventWithDigest(c *gc.C) { + charmURL := charm.MustParseURL("cs:series/good") + event, err := s.store.Event(charmURL, "the-digest") + c.Assert(err, gc.IsNil) + c.Assert(event.Errors, gc.IsNil) + c.Assert(event.Revision, gc.Equals, 23) + c.Assert(event.Digest, gc.Equals, "the-digest") +} + +func (s *legacyCharmStoreSuite) TestEventNotFound(c *gc.C) { + charmURL := charm.MustParseURL("cs:series/missing") + event, err := s.store.Event(charmURL, "") + c.Assert(err, gc.ErrorMatches, `charm event not found for "cs:series/missing"`) + c.Assert(event, gc.IsNil) +} + +func (s *legacyCharmStoreSuite) TestEventNotFoundDigest(c *gc.C) { + charmURL := charm.MustParseURL("cs:series/good") + event, err := s.store.Event(charmURL, "missing-digest") + c.Assert(err, gc.ErrorMatches, `charm event not found for "cs:series/good" with digest "missing-digest"`) + c.Assert(event, gc.IsNil) +} + +func (s *legacyCharmStoreSuite) TestEventError(c *gc.C) { + charmURL := charm.MustParseURL("cs:series/borken") + event, err := s.store.Event(charmURL, "") + c.Assert(err, gc.IsNil) + c.Assert(event.Errors, jc.DeepEquals, []string{"badness"}) +} + +func (s *legacyCharmStoreSuite) TestAuthorization(c *gc.C) { + store := s.store.WithAuthAttrs("token=value") + + base := "cs:series/good" + charmURL := charm.MustParseURL(base) + _, err := store.Get(charmURL) + + c.Assert(err, gc.IsNil) + + c.Assert(s.server.Authorizations, gc.HasLen, 1) + c.Assert(s.server.Authorizations[0], gc.Equals, "charmstore token=value") +} + +func (s *legacyCharmStoreSuite) TestNilAuthorization(c *gc.C) { + store := s.store.WithAuthAttrs("") + + base := "cs:series/good" + charmURL := charm.MustParseURL(base) + _, err := store.Get(charmURL) + + c.Assert(err, gc.IsNil) + c.Assert(s.server.Authorizations, gc.HasLen, 0) +} + +func (s *legacyCharmStoreSuite) TestMetadata(c *gc.C) { + store := s.store.WithJujuAttrs("juju-metadata") + + base := "cs:series/good" + charmURL := charm.MustParseURL(base) + _, err := store.Get(charmURL) + + c.Assert(err, gc.IsNil) + c.Assert(s.server.Metadata, gc.HasLen, 1) + c.Assert(s.server.Metadata[0], gc.Equals, "juju-metadata") +} + +func (s *legacyCharmStoreSuite) TestNilMetadata(c *gc.C) { + base := "cs:series/good" + charmURL := charm.MustParseURL(base) + _, err := s.store.Get(charmURL) + + c.Assert(err, gc.IsNil) + c.Assert(s.server.Metadata, gc.HasLen, 0) +} + +func (s *legacyCharmStoreSuite) TestEventWarning(c *gc.C) { + charmURL := charm.MustParseURL("cs:series/unwise") + event, err := s.store.Event(charmURL, "") + c.Assert(err, gc.IsNil) + c.Assert(event.Warnings, jc.DeepEquals, []string{"foolishness"}) +} + +func (s *legacyCharmStoreSuite) TestBranchLocation(c *gc.C) { + charmURL := charm.MustParseURL("cs:series/name") + location := s.store.BranchLocation(charmURL) + c.Assert(location, gc.Equals, "lp:charms/series/name") + + charmURL = charm.MustParseURL("cs:~user/series/name") + location = s.store.BranchLocation(charmURL) + c.Assert(location, gc.Equals, "lp:~user/charms/series/name/trunk") +} + +func (s *legacyCharmStoreSuite) TestCharmURL(c *gc.C) { + tests := []struct{ url, loc string }{ + {"cs:precise/wordpress", "lp:charms/precise/wordpress"}, + {"cs:precise/wordpress", "http://launchpad.net/+branch/charms/precise/wordpress"}, + {"cs:precise/wordpress", "https://launchpad.net/+branch/charms/precise/wordpress"}, + {"cs:precise/wordpress", "http://code.launchpad.net/+branch/charms/precise/wordpress"}, + {"cs:precise/wordpress", "https://code.launchpad.net/+branch/charms/precise/wordpress"}, + {"cs:precise/wordpress", "bzr+ssh://bazaar.launchpad.net/+branch/charms/precise/wordpress"}, + {"cs:~charmers/precise/wordpress", "lp:~charmers/charms/precise/wordpress/trunk"}, + {"cs:~charmers/precise/wordpress", "http://launchpad.net/~charmers/charms/precise/wordpress/trunk"}, + {"cs:~charmers/precise/wordpress", "https://launchpad.net/~charmers/charms/precise/wordpress/trunk"}, + {"cs:~charmers/precise/wordpress", "http://code.launchpad.net/~charmers/charms/precise/wordpress/trunk"}, + {"cs:~charmers/precise/wordpress", "https://code.launchpad.net/~charmers/charms/precise/wordpress/trunk"}, + {"cs:~charmers/precise/wordpress", "http://launchpad.net/+branch/~charmers/charms/precise/wordpress/trunk"}, + {"cs:~charmers/precise/wordpress", "https://launchpad.net/+branch/~charmers/charms/precise/wordpress/trunk"}, + {"cs:~charmers/precise/wordpress", "http://code.launchpad.net/+branch/~charmers/charms/precise/wordpress/trunk"}, + {"cs:~charmers/precise/wordpress", "https://code.launchpad.net/+branch/~charmers/charms/precise/wordpress/trunk"}, + {"cs:~charmers/precise/wordpress", "bzr+ssh://bazaar.launchpad.net/~charmers/charms/precise/wordpress/trunk"}, + {"cs:~charmers/precise/wordpress", "bzr+ssh://bazaar.launchpad.net/~charmers/charms/precise/wordpress/trunk/"}, + {"cs:~charmers/precise/wordpress", "~charmers/charms/precise/wordpress/trunk"}, + {"", "lp:~charmers/charms/precise/wordpress/whatever"}, + {"", "lp:~charmers/whatever/precise/wordpress/trunk"}, + {"", "lp:whatever/precise/wordpress"}, + } + for _, t := range tests { + charmURL, err := s.store.CharmURL(t.loc) + if t.url == "" { + c.Assert(err, gc.ErrorMatches, fmt.Sprintf("unknown branch location: %q", t.loc)) + } else { + c.Assert(err, gc.IsNil) + c.Assert(charmURL.String(), gc.Equals, t.url) + } + } +} + +var legacyInferRepositoryTests = []struct { + url string + path string +}{ + {"cs:precise/wordpress", ""}, + {"local:oneiric/wordpress", "/some/path"}, +} + +func (s *legacyCharmStoreSuite) TestInferRepository(c *gc.C) { + for i, t := range legacyInferRepositoryTests { + c.Logf("test %d", i) + ref, err := charm.ParseURL(t.url) + c.Assert(err, gc.IsNil) + repo, err := charmrepo.LegacyInferRepository(ref, "/some/path") + c.Assert(err, gc.IsNil) + switch repo := repo.(type) { + case *charmrepo.LocalRepository: + c.Assert(repo.Path, gc.Equals, t.path) + default: + c.Assert(repo, gc.Equals, charmrepo.LegacyStore) + } + } + ref, err := charm.ParseURL("local:whatever") + c.Assert(err, gc.IsNil) + _, err = charmrepo.LegacyInferRepository(ref, "") + c.Assert(err, gc.ErrorMatches, "path to local repository not specified") + ref.Schema = "foo" + _, err = charmrepo.LegacyInferRepository(ref, "") + c.Assert(err, gc.ErrorMatches, "unknown schema for charm reference.*") +} + +func newLegacyStore(url string) *charmrepo.LegacyCharmStore { + return &charmrepo.LegacyCharmStore{BaseURL: url} +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/local.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/local.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/local.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,164 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrepo + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" +) + +// LocalRepository represents a local directory containing subdirectories +// named after an Ubuntu series, each of which contains charms targeted for +// that series. For example: +// +// /path/to/repository/oneiric/mongodb/ +// /path/to/repository/precise/mongodb.charm +// /path/to/repository/precise/wordpress/ +type LocalRepository struct { + Path string +} + +var _ Interface = (*LocalRepository)(nil) + +// NewLocalRepository creates and return a new local Juju repository pointing +// to the given local path. +func NewLocalRepository(path string) (Interface, error) { + if path == "" { + return nil, errgo.New("path to local repository not specified") + } + return &LocalRepository{ + Path: path, + }, nil +} + +// Resolve implements Interface.Resolve. +func (r *LocalRepository) Resolve(ref *charm.URL) (*charm.URL, []string, error) { + if ref.Series == "" { + return nil, nil, errgo.Newf("no series specified for %s", ref) + } + if ref.Revision != -1 { + return ref, nil, nil + } + if ref.Series == "bundle" { + // Bundles do not have revision files and the revision is not included + // in metadata. For this reason, local bundles always have revision 0. + return ref.WithRevision(0), nil, nil + } + ch, err := r.Get(ref) + if err != nil { + return nil, nil, err + } + // This is strictly speaking unnecessary, but just in case a bad charm is + // used locally, we'll check the series. + _, err = charm.SeriesForCharm(ref.Series, ch.Meta().Series) + if err != nil { + return nil, nil, err + } + // We return nil for supported series because even though a charm in a local + // repository may declare series, it doesn't make sense because charms are + // expected to be for a single series only in the repository. The local + // repository concept is deprecated for multi series charms. + return ref.WithRevision(ch.Revision()), nil, nil +} + +func mightBeCharm(info os.FileInfo) bool { + if info.IsDir() { + return !strings.HasPrefix(info.Name(), ".") + } + return strings.HasSuffix(info.Name(), ".charm") +} + +// Get returns a charm matching curl, if one exists. If curl has a revision of +// -1, it returns the latest charm that matches curl. If multiple candidates +// satisfy the foregoing, the first one encountered will be returned. +func (r *LocalRepository) Get(curl *charm.URL) (charm.Charm, error) { + if err := r.checkUrlAndPath(curl); err != nil { + return nil, err + } + if curl.Series == "bundle" { + return nil, errgo.Newf("expected a charm URL, got bundle URL %q", curl) + } + path := filepath.Join(r.Path, curl.Series) + infos, err := ioutil.ReadDir(path) + if err != nil { + return nil, entityNotFound(curl, r.Path) + } + var latest charm.Charm + for _, info := range infos { + chPath := filepath.Join(path, info.Name()) + if info.Mode()&os.ModeSymlink != 0 { + var err error + if info, err = os.Stat(chPath); err != nil { + return nil, err + } + } + if !mightBeCharm(info) { + continue + } + if ch, err := charm.ReadCharm(chPath); err != nil { + logger.Warningf("failed to load charm at %q: %s", chPath, err) + } else if ch.Meta().Name == curl.Name { + if ch.Revision() == curl.Revision { + return ch, nil + } + if latest == nil || ch.Revision() > latest.Revision() { + latest = ch + } + } + } + if curl.Revision == -1 && latest != nil { + return latest, nil + } + return nil, entityNotFound(curl, r.Path) +} + +// GetBundle implements Interface.GetBundle. +func (r *LocalRepository) GetBundle(curl *charm.URL) (charm.Bundle, error) { + if err := r.checkUrlAndPath(curl); err != nil { + return nil, err + } + if curl.Series != "bundle" { + return nil, errgo.Newf("expected a bundle URL, got charm URL %q", curl) + } + // Note that the bundle does not inherently own a name different than the + // directory name. Neither the name nor the revision are included in the + // bundle metadata. + // TODO frankban: handle bundle revisions, totally ignored for now. + path := filepath.Join(r.Path, curl.Series, curl.Name) + info, err := os.Stat(path) + if err != nil { + return nil, entityNotFound(curl, r.Path) + } + // Do not support bundle archives for the time being. What archive name + // should we use? What's the use case for compressing bundles anyway? + if !info.IsDir() { + return nil, entityNotFound(curl, r.Path) + } + return charm.ReadBundleDir(path) +} + +// checkUrlAndPath checks that the given URL represents a local entity and that +// the repository path exists. +func (r *LocalRepository) checkUrlAndPath(curl *charm.URL) error { + if curl.Schema != "local" { + return fmt.Errorf("local repository got URL with non-local schema: %q", curl) + } + info, err := os.Stat(r.Path) + if err != nil { + if isNotExistsError(err) { + return repoNotFound(r.Path) + } + return err + } + if !info.IsDir() { + return repoNotFound(r.Path) + } + return nil +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/local_test.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/local_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/local_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,278 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrepo_test + +import ( + "io/ioutil" + "os" + "path/filepath" + + gitjujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "gopkg.in/juju/charmrepo.v2-unstable" +) + +type LocalRepoSuite struct { + gitjujutesting.FakeHomeSuite + repo *charmrepo.LocalRepository + charmsPath string + bundlesPath string +} + +var _ = gc.Suite(&LocalRepoSuite{}) + +func (s *LocalRepoSuite) SetUpTest(c *gc.C) { + s.FakeHomeSuite.SetUpTest(c) + root := c.MkDir() + s.repo = &charmrepo.LocalRepository{Path: root} + s.bundlesPath = filepath.Join(root, "bundle") + s.charmsPath = filepath.Join(root, "quantal") + c.Assert(os.Mkdir(s.bundlesPath, 0777), jc.ErrorIsNil) + c.Assert(os.Mkdir(s.charmsPath, 0777), jc.ErrorIsNil) +} + +func (s *LocalRepoSuite) addCharmArchive(name string) string { + return TestCharms.CharmArchivePath(s.charmsPath, name) +} + +func (s *LocalRepoSuite) addCharmDir(name string) string { + return TestCharms.ClonedDirPath(s.charmsPath, name) +} + +func (s *LocalRepoSuite) addBundleDir(name string) string { + return TestCharms.ClonedBundleDirPath(s.bundlesPath, name) +} + +func (s *LocalRepoSuite) checkNotFoundErr(c *gc.C, err error, charmURL *charm.URL) { + expect := `entity not found in "` + s.repo.Path + `": ` + charmURL.String() + c.Check(err, gc.ErrorMatches, expect) +} + +func (s *LocalRepoSuite) TestMissingCharm(c *gc.C) { + for i, str := range []string{ + "local:quantal/zebra", "local:badseries/zebra", + } { + c.Logf("test %d: %s", i, str) + charmURL := charm.MustParseURL(str) + _, err := s.repo.Get(charmURL) + s.checkNotFoundErr(c, err, charmURL) + } +} + +func (s *LocalRepoSuite) TestMissingRepo(c *gc.C) { + c.Assert(os.RemoveAll(s.repo.Path), gc.IsNil) + _, err := s.repo.Get(charm.MustParseURL("local:quantal/zebra")) + c.Assert(err, gc.ErrorMatches, `no repository found at ".*"`) + _, err = s.repo.GetBundle(charm.MustParseURL("local:bundle/wordpress-simple")) + c.Assert(err, gc.ErrorMatches, `no repository found at ".*"`) + c.Assert(ioutil.WriteFile(s.repo.Path, nil, 0666), gc.IsNil) + _, err = s.repo.Get(charm.MustParseURL("local:quantal/zebra")) + c.Assert(err, gc.ErrorMatches, `no repository found at ".*"`) + _, err = s.repo.GetBundle(charm.MustParseURL("local:bundle/wordpress-simple")) + c.Assert(err, gc.ErrorMatches, `no repository found at ".*"`) +} + +func (s *LocalRepoSuite) TestCharmArchive(c *gc.C) { + charmURL := charm.MustParseURL("local:quantal/dummy") + s.addCharmArchive("dummy") + + ch, err := s.repo.Get(charmURL) + c.Assert(err, gc.IsNil) + c.Assert(ch.Revision(), gc.Equals, 1) +} + +func (s *LocalRepoSuite) TestLogsErrors(c *gc.C) { + err := ioutil.WriteFile(filepath.Join(s.charmsPath, "blah.charm"), nil, 0666) + c.Assert(err, gc.IsNil) + err = os.Mkdir(filepath.Join(s.charmsPath, "blah"), 0666) + c.Assert(err, gc.IsNil) + samplePath := s.addCharmDir("upgrade2") + gibberish := []byte("don't parse me by") + err = ioutil.WriteFile(filepath.Join(samplePath, "metadata.yaml"), gibberish, 0666) + c.Assert(err, gc.IsNil) + + charmURL := charm.MustParseURL("local:quantal/dummy") + s.addCharmDir("dummy") + ch, err := s.repo.Get(charmURL) + c.Assert(err, gc.IsNil) + c.Assert(ch.Revision(), gc.Equals, 1) + c.Assert(c.GetTestLog(), gc.Matches, ` +.* WARNING juju.charm.charmrepo failed to load charm at ".*/quantal/blah": .* +.* WARNING juju.charm.charmrepo failed to load charm at ".*/quantal/blah.charm": .* +.* WARNING juju.charm.charmrepo failed to load charm at ".*/quantal/upgrade2": .* +`[1:]) +} + +func renameSibling(c *gc.C, path, name string) { + c.Assert(os.Rename(path, filepath.Join(filepath.Dir(path), name)), gc.IsNil) +} + +func (s *LocalRepoSuite) TestIgnoresUnpromisingNames(c *gc.C) { + err := ioutil.WriteFile(filepath.Join(s.charmsPath, "blah.notacharm"), nil, 0666) + c.Assert(err, gc.IsNil) + err = os.Mkdir(filepath.Join(s.charmsPath, ".blah"), 0666) + c.Assert(err, gc.IsNil) + renameSibling(c, s.addCharmDir("dummy"), ".dummy") + renameSibling(c, s.addCharmArchive("dummy"), "dummy.notacharm") + charmURL := charm.MustParseURL("local:quantal/dummy") + + _, err = s.repo.Get(charmURL) + s.checkNotFoundErr(c, err, charmURL) + c.Assert(c.GetTestLog(), gc.Equals, "") +} + +func (s *LocalRepoSuite) TestFindsSymlinks(c *gc.C) { + realPath := TestCharms.ClonedDirPath(c.MkDir(), "dummy") + linkPath := filepath.Join(s.charmsPath, "dummy") + err := os.Symlink(realPath, linkPath) + c.Assert(err, gc.IsNil) + ch, err := s.repo.Get(charm.MustParseURL("local:quantal/dummy")) + c.Assert(err, gc.IsNil) + c.Assert(ch.Revision(), gc.Equals, 1) + c.Assert(ch.Meta().Name, gc.Equals, "dummy") + c.Assert(ch.Config().Options["title"].Default, gc.Equals, "My Title") + c.Assert(ch.(*charm.CharmDir).Path, gc.Equals, linkPath) +} + +func (s *LocalRepoSuite) TestResolve(c *gc.C) { + // Add some charms to the local repo. + s.addCharmDir("upgrade1") + s.addCharmDir("upgrade2") + s.addCharmDir("wordpress") + s.addCharmDir("riak") + s.addCharmDir("multi-series") + s.addCharmDir("multi-series-bad") + + // Define the tests to be run. + tests := []struct { + id string + url string + series []string + err string + }{{ + id: "local:quantal/upgrade", + url: "local:quantal/upgrade-2", + }, { + id: "local:quantal/upgrade-1", + url: "local:quantal/upgrade-1", + }, { + id: "local:quantal/wordpress", + url: "local:quantal/wordpress-3", + }, { + id: "local:quantal/riak", + url: "local:quantal/riak-7", + }, { + id: "local:quantal/wordpress-3", + url: "local:quantal/wordpress-3", + }, { + id: "local:quantal/wordpress-2", + url: "local:quantal/wordpress-2", + }, { + id: "local:quantal/new-charm-with-multi-series", + url: "local:quantal/new-charm-with-multi-series-7", + series: []string{}, + }, { + id: "local:quantal/multi-series-bad", + err: `series \"quantal\" not supported by charm, supported series are: precise,trusty`, + }, { + id: "local:bundle/openstack", + url: "local:bundle/openstack-0", + }, { + id: "local:bundle/openstack-42", + url: "local:bundle/openstack-42", + }, { + id: "local:trusty/riak", + err: "entity not found .*: local:trusty/riak", + }, { + id: "local:quantal/no-such", + err: "entity not found .*: local:quantal/no-such", + }, { + id: "local:upgrade", + err: "no series specified for local:upgrade", + }} + + // Run the tests. + for i, test := range tests { + c.Logf("test %d: %s", i, test.id) + ref, series, err := s.repo.Resolve(charm.MustParseURL(test.id)) + if test.err != "" { + c.Assert(err, gc.ErrorMatches, test.err) + c.Assert(ref, gc.IsNil) + continue + } + c.Assert(err, jc.ErrorIsNil) + c.Assert(ref, jc.DeepEquals, charm.MustParseURL(test.url)) + c.Assert(series, jc.DeepEquals, test.series) + } +} + +func (s *LocalRepoSuite) TestGetBundle(c *gc.C) { + url := charm.MustParseURL("local:bundle/openstack") + s.addBundleDir("openstack") + b, err := s.repo.GetBundle(url) + c.Assert(err, jc.ErrorIsNil) + c.Assert(b.Data(), jc.DeepEquals, TestCharms.BundleDir("openstack").Data()) +} + +func (s *LocalRepoSuite) TestGetBundleSymlink(c *gc.C) { + realPath := TestCharms.ClonedBundleDirPath(c.MkDir(), "wordpress-simple") + linkPath := filepath.Join(s.bundlesPath, "wordpress-simple") + err := os.Symlink(realPath, linkPath) + c.Assert(err, jc.ErrorIsNil) + url := charm.MustParseURL("local:bundle/wordpress-simple") + b, err := s.repo.GetBundle(url) + c.Assert(err, jc.ErrorIsNil) + c.Assert(b.Data(), jc.DeepEquals, TestCharms.BundleDir("wordpress-simple").Data()) +} + +func (s *LocalRepoSuite) TestGetBundleErrorNotFound(c *gc.C) { + url := charm.MustParseURL("local:bundle/no-such") + b, err := s.repo.GetBundle(url) + s.checkNotFoundErr(c, err, url) + c.Assert(b, gc.IsNil) +} + +var invalidURLTests = []struct { + about string + bundle bool + url string + err string +}{{ + about: "get charm: non-local schema", + url: "cs:trusty/django-42", + err: `local repository got URL with non-local schema: "cs:trusty/django-42"`, +}, { + about: "get bundle: non-local schema", + bundle: true, + url: "cs:bundle/django-scalable", + err: `local repository got URL with non-local schema: "cs:bundle/django-scalable"`, +}, { + about: "get charm: bundle provided", + url: "local:bundle/rails", + err: `expected a charm URL, got bundle URL "local:bundle/rails"`, +}, { + about: "get bundle: charm provided", + bundle: true, + url: "local:trusty/rails", + err: `expected a bundle URL, got charm URL "local:trusty/rails"`, +}} + +func (s *LocalRepoSuite) TestInvalidURLTest(c *gc.C) { + var err error + var e interface{} + for i, test := range invalidURLTests { + c.Logf("test %d: %s", i, test.about) + curl := charm.MustParseURL(test.url) + if test.bundle { + e, err = s.repo.GetBundle(curl) + } else { + e, err = s.repo.Get(curl) + } + c.Assert(e, gc.IsNil) + c.Assert(err, gc.ErrorMatches, test.err) + } +} === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/allbundles.txt.gz' Binary files src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/allbundles.txt.gz 1970-01-01 00:00:00 +0000 and src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/allbundles.txt.gz 2016-03-22 15:18:22 +0000 differ === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/allcharms.json.gz' Binary files src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/allcharms.json.gz 1970-01-01 00:00:00 +0000 and src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/allcharms.json.gz 2016-03-22 15:18:22 +0000 differ === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/migrate.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/migrate.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/migrate.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,297 @@ +package migratebundle + +import ( + "fmt" + "strings" + + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/yaml.v1" +) + +// legacyBundle represents an old-style bundle. +type legacyBundle struct { + Series string `yaml:",omitempty"` + Inherits interface{} `yaml:",omitempty"` // string or []string + Services map[string]*legacyService + // A relation can be in one of two styles: + // ["r1", "r2"] or ["r1", ["r2", "r3", ...]] + Relations []interface{} `yaml:",omitempty"` // []string or []interface{}{"", []string{...}} + Overrides map[string]interface{} `yaml:",omitempty"` + Tags []string `yaml:",omitempty"` +} + +// legacyService represents a service from a legacy bundle. +type legacyService struct { + Charm string `yaml:",omitempty"` + Branch string `yaml:",omitempty"` + NumUnits *int `yaml:"num_units,omitempty"` + Constraints string `yaml:",omitempty"` + Expose bool `yaml:",omitempty"` + Annotations map[string]string `yaml:",omitempty"` + To interface{} `yaml:",omitempty"` + Options map[string]interface{} `yaml:",omitempty"` + + // Spurious fields, used by existing bundles but not + // valid in the specification. Kept here so that + // the reversability tests can work. + Name string `yaml:",omitempty"` + Exposed bool `yaml:",omitempty"` + Local string `yaml:",omitempty"` +} + +// Migrate parses the old-style bundles.yaml file in bundlesYAML +// and returns a map containing an entry for each bundle +// found in that basket, keyed by the name of the bundle. +// +// It performs the following changes: +// +// - Any inheritance is expanded. +// +// - when a "to" placement directive refers to machine 0, +// an explicit machines section is added. Also, convert +// it to a slice. +// +// - If the charm URL is not specified, it is taken from the +// service name. +// +// - num_units is renamed to numunits, and set to 1 if omitted. +// +// - A relation clause with multiple targets is expanded +// into multiple relation clauses. +// +// The isSubordinate argument is used to find out whether a charm is a subordinate. +func Migrate(bundlesYAML []byte, isSubordinate func(id *charm.URL) (bool, error)) (map[string]*charm.BundleData, error) { + var bundles map[string]*legacyBundle + if err := yaml.Unmarshal(bundlesYAML, &bundles); err != nil { + return nil, errgo.Notef(err, "cannot parse legacy bundle") + } + // First expand any inherits clauses. + newBundles := make(map[string]*charm.BundleData) + for name, bundle := range bundles { + bundle, err := inherit(bundle, bundles) + if err != nil { + return nil, errgo.Notef(err, "bundle inheritance failed for %q", name) + } + newBundle, err := migrate(bundle, isSubordinate) + if err != nil { + return nil, errgo.Notef(err, "bundle migration failed for %q", name) + } + newBundles[name] = newBundle + } + return newBundles, nil +} + +func migrate(b *legacyBundle, isSubordinate func(id *charm.URL) (bool, error)) (*charm.BundleData, error) { + data := &charm.BundleData{ + Services: make(map[string]*charm.ServiceSpec), + Series: b.Series, + Machines: make(map[string]*charm.MachineSpec), + Tags: b.Tags, + } + for name, svc := range b.Services { + if svc == nil { + svc = new(legacyService) + } + charmId := svc.Charm + if charmId == "" { + charmId = name + } + numUnits := 0 + if svc.NumUnits != nil { + numUnits = *svc.NumUnits + } else { + id, err := charm.ParseURL(charmId) + if err != nil { + return nil, errgo.Mask(err) + } + isSub, err := isSubordinate(id) + if err != nil { + return nil, errgo.Notef(err, "cannot get subordinate status for bundle charm %v", id) + } + if !isSub { + numUnits = 1 + } + } + newSvc := &charm.ServiceSpec{ + Charm: charmId, + NumUnits: numUnits, + Expose: svc.Expose, + Options: svc.Options, + Annotations: svc.Annotations, + Constraints: svc.Constraints, + } + if svc.To != nil { + to, err := stringList(svc.To) + if err != nil { + return nil, errgo.Notef(err, "bad 'to' placement clause") + } + // The old syntax differs from the new one only in that + // the lxc:foo=0 becomes lxc:foo/0 in the new syntax. + for i, p := range to { + to[i] = strings.Replace(p, "=", "/", 1) + place, err := charm.ParsePlacement(to[i]) + if err != nil { + return nil, errgo.Notef(err, "cannot parse 'to' placment clause %q", p) + } + if place.Machine != "" { + data.Machines[place.Machine] = new(charm.MachineSpec) + } + } + newSvc.To = to + } + data.Services[name] = newSvc + } + var err error + data.Relations, err = expandRelations(b.Relations) + if err != nil { + return nil, errgo.Notef(err, "cannot expand relations") + } + if len(data.Machines) == 0 { + data.Machines = nil + } + return data, nil +} + +// expandRelations expands any relations that are +// in the form [r1, [r2, r3, ...]] into the form [r1, r2], [r1, r3], .... +func expandRelations(relations []interface{}) ([][]string, error) { + var newRelations [][]string + for _, rel := range relations { + rel, ok := rel.([]interface{}) + if !ok || len(rel) != 2 { + return nil, errgo.Newf("unexpected relation clause %#v", rel) + } + ep0, ok := rel[0].(string) + if !ok { + return nil, errgo.Newf("first relation endpoint is %#v not string", rel[0]) + } + if ep1, ok := rel[1].(string); ok { + newRelations = append(newRelations, []string{ep0, ep1}) + continue + } + eps, ok := rel[1].([]interface{}) + if !ok { + return nil, errgo.Newf("second relation endpoint is %#v not list or string", rel[1]) + } + for _, ep1 := range eps { + ep1, ok := ep1.(string) + if !ok { + return nil, errgo.Newf("relation list member is not string") + } + newRelations = append(newRelations, []string{ep0, ep1}) + } + } + return newRelations, nil +} + +// inherit adds any inherited attributes to the given bundle b. It does +// not modify b, returning a new bundle if necessary. +// +// The bundles map holds all the bundles from the basket (the possible +// bundles that can be inherited from). +func inherit(b *legacyBundle, bundles map[string]*legacyBundle) (*legacyBundle, error) { + if b.Inherits == nil { + return b, nil + } + inheritsList, err := stringList(b.Inherits) + if err != nil { + return nil, errgo.Notef(err, "bad inherits clause") + } + if len(inheritsList) == 0 { + return b, nil + } + if len(inheritsList) > 1 { + return nil, errgo.Newf("multiple inheritance not supported") + } + inherits := inheritsList[0] + from := bundles[inherits] + if from == nil { + return nil, errgo.Newf("inherited-from bundle %q not found", inherits) + } + if from.Inherits != nil { + return nil, errgo.Newf("only a single level of inheritance is supported") + } + // Make a generic copy of both the base and target bundles, + // so we can apply inheritance regardless of Go types. + var target map[interface{}]interface{} + err = yamlCopy(&target, from) + if err != nil { + return nil, errgo.Notef(err, "copy target") + } + var source map[interface{}]interface{} + err = yamlCopy(&source, b) + if err != nil { + return nil, errgo.Notef(err, "copy source") + } + // Apply the inherited attributes. + copyOnto(target, source, true) + + // Convert back to Go types. + var newb legacyBundle + err = yamlCopy(&newb, target) + if err != nil { + return nil, errgo.Notef(err, "copy result") + } + return &newb, nil +} + +func stringList(v interface{}) ([]string, error) { + switch v := v.(type) { + case string: + return []string{v}, nil + case int, float64: + // Numbers are casually used as strings; allow that. + return []string{fmt.Sprint(v)}, nil + case []interface{}: + r := make([]string, len(v)) + for i, elem := range v { + switch elem := elem.(type) { + case string: + r[i] = elem + case float64, int: + // Numbers are casually used as strings; allow that. + r[i] = fmt.Sprint(elem) + default: + return nil, errgo.Newf("got %#v, expected string", elem) + } + } + return r, nil + } + return nil, errgo.Newf("got %#v, expected string", v) +} + +// yamlCopy copies the source value into the value +// pointed to by the target value by marshaling +// and unmarshaling YAML. +func yamlCopy(target, source interface{}) error { + data, err := yaml.Marshal(source) + if err != nil { + return errgo.Notef(err, "marshal copy") + } + if err := yaml.Unmarshal(data, target); err != nil { + return errgo.Notef(err, "unmarshal copy") + } + return nil +} + +// copyOnto copies the source onto the target, +// preserving any of the source that is not present +// in the target. +func copyOnto(target, source map[interface{}]interface{}, isRoot bool) { + for key, val := range source { + if key == "inherits" && isRoot { + continue + } + switch val := val.(type) { + case map[interface{}]interface{}: + if targetVal, ok := target[key].(map[interface{}]interface{}); ok { + copyOnto(targetVal, val, false) + } else { + target[key] = val + } + default: + target[key] = val + } + } +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/migrate_test.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/migrate_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/migrate_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,948 @@ +package migratebundle + +import ( + "bufio" + "compress/gzip" + "encoding/json" + "flag" + "fmt" + "io" + "log" + "os" + "strings" + "sync" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/yaml.v1" + + "gopkg.in/juju/charmrepo.v2-unstable" +) + +var _ = gc.Suite(&migrateSuite{}) + +type migrateSuite struct{} + +// The charm data cache caches results from +// fetching charms from the charm store. +// If the update-charms flag is specified, the +// contents of charmDataCache is written to +// allcharms.json.gz; otherwise the contents +// of allcharms.json are read and the charm +// store is not touched. +// +var ( + charmDataCacheMutex sync.Mutex + charmDataCache = make(map[string]*charmData) +) + +var updateCharms = flag.Bool("update-charms", false, "fetch and update local charms for test bundles") + +const charmCacheFile = "allcharms.json.gz" + +func (*migrateSuite) SetUpSuite(c *gc.C) { + if *updateCharms { + charmrepo.CacheDir = c.MkDir() + return + } + f, err := os.Open(charmCacheFile) + if err != nil { + c.Logf("cannot open charms data: %v", err) + return + } + defer f.Close() + gzr, err := gzip.NewReader(f) + c.Assert(err, gc.IsNil) + dec := json.NewDecoder(gzr) + err = dec.Decode(&charmDataCache) + c.Assert(err, gc.IsNil) +} + +func (*migrateSuite) TearDownSuite(c *gc.C) { + if !*updateCharms { + return + } + data, err := json.Marshal(charmDataCache) + c.Assert(err, gc.IsNil) + f, err := os.Create(charmCacheFile) + c.Assert(err, gc.IsNil) + defer f.Close() + gzw := gzip.NewWriter(f) + defer gzw.Close() + _, err = gzw.Write(data) + c.Assert(err, gc.IsNil) +} + +var migrateTests = []struct { + about string + bundles string + subords map[string]bool + expect map[string]*charm.BundleData + expectError string +}{{ + about: "single bundle, no relations cs:~jorge/bundle/wordpress", + bundles: ` + |wordpress-simple: + | series: precise + | tags: ["foo", "bar"] + | services: + | wordpress: + | charm: "cs:precise/wordpress-20" + | num_units: 1 + | expose: true + | options: + | debug: "no" + | engine: nginx + | tuning: single + | "wp-content": "" + | annotations: + | "gui-x": 529 + | "gui-y": -97 + | mysql: + | charm: "cs:precise/mysql-28" + | num_units: 2 + | options: + | "binlog-format": MIXED + | "block-size": 5 + | "dataset-size": "80%" + | flavor: distro + | "query-cache-size": -1 + | "query-cache-type": "OFF" + | vip_iface: eth0 + | annotations: + | "gui-x": 530 + | "gui-y": 185 + |`, + expect: map[string]*charm.BundleData{ + "wordpress-simple": { + Series: "precise", + Tags: []string{"foo", "bar"}, + Services: map[string]*charm.ServiceSpec{ + "wordpress": { + Charm: "cs:precise/wordpress-20", + NumUnits: 1, + Expose: true, + Options: map[string]interface{}{ + "debug": "no", + "engine": "nginx", + "tuning": "single", + "wp-content": "", + }, + Annotations: map[string]string{ + "gui-x": "529", + "gui-y": "-97", + }, + }, + "mysql": { + Charm: "cs:precise/mysql-28", + NumUnits: 2, + Options: map[string]interface{}{ + "binlog-format": "MIXED", + "block-size": 5, + "dataset-size": "80%", + "flavor": "distro", + "query-cache-size": -1, + "query-cache-type": "OFF", + "vip_iface": "eth0", + }, + Annotations: map[string]string{ + "gui-x": "530", + "gui-y": "185", + }, + }, + }, + }, + }, +}, { + about: "missing num_units interpreted as 1 for non-subordinates", + bundles: ` + |wordpress-simple: + | services: + | wordpress: + | charm: "cs:precise/wordpress-20" + |`, + expect: map[string]*charm.BundleData{ + "wordpress-simple": { + Services: map[string]*charm.ServiceSpec{ + "wordpress": { + Charm: "cs:precise/wordpress-20", + NumUnits: 1, + }, + }, + }, + }, +}, { + about: "missing num_units interpreted as 0 for subordinates", + bundles: ` + |wordpress-simple: + | services: + | wordpress: + | charm: "cs:precise/wordpress-20" + |`, + subords: map[string]bool{ + "cs:precise/wordpress-20": true, + }, + expect: map[string]*charm.BundleData{ + "wordpress-simple": { + Services: map[string]*charm.ServiceSpec{ + "wordpress": { + Charm: "cs:precise/wordpress-20", + }, + }, + }, + }, +}, { + about: "missing charm taken from service name", + bundles: ` + |wordpress-simple: + | services: + | wordpress: + |`, + expect: map[string]*charm.BundleData{ + "wordpress-simple": { + Services: map[string]*charm.ServiceSpec{ + "wordpress": { + Charm: "wordpress", + NumUnits: 1, + }, + }, + }, + }, +}, { + about: "services with placement directives", + bundles: ` + |wordpress: + | services: + | wordpress1: + | num_units: 1 + | to: 0 + | wordpress2: + | num_units: 1 + | to: kvm:0 + | wordpress3: + | num_units: 1 + | to: mysql + | wordpress4: + | num_units: 1 + | to: kvm:mysql + | mysql: + | num_units: 1 + |`, + expect: map[string]*charm.BundleData{ + "wordpress": { + Services: map[string]*charm.ServiceSpec{ + "wordpress1": { + Charm: "wordpress1", + NumUnits: 1, + To: []string{"0"}, + }, + "wordpress2": { + Charm: "wordpress2", + NumUnits: 1, + To: []string{"kvm:0"}, + }, + "wordpress3": { + Charm: "wordpress3", + NumUnits: 1, + To: []string{"mysql"}, + }, + "wordpress4": { + Charm: "wordpress4", + NumUnits: 1, + To: []string{"kvm:mysql"}, + }, + "mysql": { + Charm: "mysql", + NumUnits: 1, + }, + }, + Machines: map[string]*charm.MachineSpec{ + "0": {}, + }, + }, + }, +}, { + about: "service with single indirect placement directive", + bundles: ` + |wordpress: + | services: + | wordpress: + | to: kvm:0 + |`, + expect: map[string]*charm.BundleData{ + "wordpress": { + Services: map[string]*charm.ServiceSpec{ + "wordpress": { + Charm: "wordpress", + To: []string{"kvm:0"}, + NumUnits: 1, + }, + }, + Machines: map[string]*charm.MachineSpec{ + "0": {}, + }, + }, + }, +}, { + about: "service with invalid placement directive", + bundles: ` + |wordpress: + | services: + | wordpress: + | to: kvm::0 + |`, + expectError: `bundle migration failed for "wordpress": cannot parse 'to' placment clause "kvm::0": invalid placement syntax "kvm::0"`, +}, { + about: "service with inheritance", + bundles: ` + |wordpress: + | inherits: base + | services: + | wordpress: + | charm: precise/wordpress + | annotations: + | foo: yes + | base: arble + |base: + | services: + | logging: + | charm: precise/logging + | wordpress: + | expose: on + | annotations: + | foo: bar + | base: arble + |`, + subords: map[string]bool{ + "cs:precise/logging": true, + }, + expect: map[string]*charm.BundleData{ + "wordpress": { + Services: map[string]*charm.ServiceSpec{ + "wordpress": { + Charm: "precise/wordpress", + Expose: true, + Annotations: map[string]string{ + "foo": "yes", + "base": "arble", + }, + NumUnits: 1, + }, + "logging": { + Charm: "precise/logging", + }, + }, + }, + "base": { + Services: map[string]*charm.ServiceSpec{ + "logging": { + Charm: "precise/logging", + }, + "wordpress": { + Charm: "wordpress", + NumUnits: 1, + Expose: true, + Annotations: map[string]string{ + "foo": "bar", + "base": "arble", + }, + }, + }, + }, + }, +}, { + about: "open relations", + bundles: ` + |wordpress: + | services: + | wordpress: + | charm: precise/wordpress + | mysql: + | charm: precise/mysql + | logging: + | charm: precise/logging + | monitoring: + | charm: precise/monitor + | relations: + | - [wordpress, mysql] + | - [logging, [mysql, wordpress]] + | - [monitoring, wordpress] + |`, + subords: map[string]bool{ + "cs:precise/logging": true, + "cs:precise/monitor": true, + }, + expect: map[string]*charm.BundleData{ + "wordpress": { + Services: map[string]*charm.ServiceSpec{ + "wordpress": { + Charm: "precise/wordpress", + NumUnits: 1, + }, + "mysql": { + Charm: "precise/mysql", + NumUnits: 1, + }, + "logging": { + Charm: "precise/logging", + }, + "monitoring": { + Charm: "precise/monitor", + }, + }, + Relations: [][]string{ + {"wordpress", "mysql"}, + {"logging", "mysql"}, + {"logging", "wordpress"}, + {"monitoring", "wordpress"}, + }, + }, + }, +}, { + about: "multiple element to clause", + bundles: ` + |top: + | services: + | wordpress: + | num_units: 3 + | charm: 'cs:precise/wordpress' + | to: [0, 'lxc:0', mysql=0, 'lxc:mysql=1'] + | mysql: + | num_units: 2 + | charm: 'cs:mysql' + `, + expect: map[string]*charm.BundleData{ + "top": { + Services: map[string]*charm.ServiceSpec{ + "wordpress": { + Charm: "cs:precise/wordpress", + NumUnits: 3, + To: []string{"0", "lxc:0", "mysql/0", "lxc:mysql/1"}, + }, + "mysql": { + Charm: "cs:mysql", + NumUnits: 2, + }, + }, + Machines: map[string]*charm.MachineSpec{ + "0": {}, + }, + }, + }, +}} + +func (*migrateSuite) TestMigrate(c *gc.C) { + for i, test := range migrateTests { + c.Logf("test %d: %s", i, test.about) + result, err := Migrate(unbeautify(test.bundles), func(id *charm.URL) (bool, error) { + return test.subords[id.String()], nil + }) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + } else { + c.Assert(err, gc.IsNil) + c.Assert(result, jc.DeepEquals, test.expect) + } + } +} + +func (*migrateSuite) TestMigrateWithSubordinateStatusError(c *gc.C) { + bdata := unbeautify(` + |wordpress: + | services: + | wordpress: + | charm: precise/wordpress + |`, + ) + result, err := Migrate(bdata, func(*charm.URL) (bool, error) { + return false, fmt.Errorf("oops") + }) + c.Assert(result, gc.IsNil) + c.Assert(err, gc.ErrorMatches, `bundle migration failed for "wordpress": cannot get subordinate status for bundle charm cs:precise/wordpress: oops`) +} + +func (*migrateSuite) TestMigrateAll(c *gc.C) { + c.ExpectFailure("all bundles do not migrate successfully") + passed, total := 0, 0 + doAllBundles(c, func(c *gc.C, id string, data []byte) { + c.Logf("\nmigrate test %s", id) + ok := true + bundles, err := Migrate(data, func(id *charm.URL) (bool, error) { + meta, err := getCharm(id) + if err != nil { + return false, err + } + return meta.Meta().Subordinate, nil + }) + if err != nil { + c.Logf("cannot migrate: %v", err) + ok = false + } + for _, bundle := range bundles { + ok = checkBundleData(c, bundle) && ok + } + if ok { + passed++ + } + total++ + }) + c.Logf("%d/%d passed", passed, total) + c.Check(passed, gc.Equals, total) +} + +func checkBundleData(c *gc.C, bd *charm.BundleData) bool { + charms := make(map[string]charm.Charm) + ok := true + for _, svc := range bd.Services { + id, err := charm.ParseURL(svc.Charm) + if err != nil { + ok = false + c.Logf("cannot parse %q: %v", svc.Charm, err) + continue + } + if id.Series == "" { + id.Series = bd.Series + } + ch, err := getCharm(id) + if err != nil { + ok = false + c.Logf("cannot find %q: %v", id, err) + continue + } + charms[svc.Charm] = ch + } + if ok { + if err := bd.VerifyWithCharms(nil, nil, charms); err != nil { + for _, err := range err.(*charm.VerificationError).Errors { + c.Logf("verification error: %v", err) + } + ok = false + } + } + return ok +} + +var inheritTests = []struct { + about string + bundle string + base string + baseName string + expect string + expectError string +}{{ + about: "inherited-from not found", + bundle: `inherits: non-existent`, + expectError: `inherited-from bundle "non-existent" not found`, +}, { + about: "bad inheritance #1", + bundle: `inherits: {}`, + expectError: `bad inherits clause: got map\[interface \{\}]interface \{\}\{\}, expected string`, +}, { + about: "bad inheritance #2", + bundle: `inherits: [{}]`, + expectError: `bad inherits clause: got map\[interface \{\}]interface \{\}\{\}, expected string`, +}, { + about: "bad inheritance #3", + bundle: `inherits: ['a', 'b']`, + expectError: `multiple inheritance not supported`, +}, { + about: "inherit everything", + bundle: ` + |inherits: base + `, + baseName: "base", + base: ` + |series: precise + |services: + | wordpress: + | charm: 'cs:precise/wordpress' + `, + expect: ` + |series: precise + |services: + | wordpress: + | charm: 'cs:precise/wordpress' + `, +}, { + about: "inherit everything, specified as list", + bundle: ` + |inherits: [base] + `, + baseName: "base", + base: ` + |series: precise + |services: + | wordpress: + | charm: 'cs:precise/wordpress' + `, + expect: ` + |series: precise + |services: + | wordpress: + | charm: 'cs:precise/wordpress' + `, +}, { + about: "different base name", + bundle: ` + |inherits: something + `, + baseName: "something", + base: ` + |series: precise + |services: + | wordpress: + | charm: 'cs:precise/wordpress' + `, + expect: ` + |series: precise + |services: + | wordpress: + | charm: 'cs:precise/wordpress' + `, +}, { + about: "override series", + bundle: ` + |inherits: base + |series: trusty + `, + baseName: "base", + base: ` + |series: precise + |services: + | wordpress: + | charm: 'cs:precise/wordpress' + `, + expect: ` + |series: trusty + |services: + | wordpress: + | charm: 'cs:precise/wordpress' + `, +}, { + about: "override wordpress charm", + bundle: ` + |inherits: base + |services: + | wordpress: + | charm: 'cs:quantal/different' + `, + baseName: "base", + base: ` + |series: precise + |services: + | wordpress: + | charm: "cs:precise/wordpress" + | options: + | foo: bar + `, + expect: ` + |series: precise + |services: + | wordpress: + | charm: "cs:quantal/different" + | options: + | foo: bar + `, +}, { + about: "override to clause", + bundle: ` + |inherits: base + |services: + | wordpress: + | to: 0 + `, + baseName: "base", + base: ` + |series: precise + |services: + | wordpress: + | charm: 'cs:precise/wordpress' + | options: + | foo: bar + `, + expect: ` + |series: precise + |services: + | wordpress: + | charm: 'cs:precise/wordpress' + | options: + | foo: bar + | to: 0 + `, +}, { + about: "deep inheritance", + bundle: ` + |inherits: base + `, + baseName: "base", + base: ` + |inherits: "other" + `, + expectError: `only a single level of inheritance is supported`, +}} + +var otherBundle = parseBundle(` + |series: quantal + |overrides: + | something: other +`) + +func (*migrateSuite) TestInherit(c *gc.C) { + for i, test := range inheritTests { + c.Logf("test %d: %s", i, test.about) + bundle := parseBundle(test.bundle) + base := parseBundle(test.base) + expect := parseBundle(test.expect) + // Add another bundle so we know that is + bundles := map[string]*legacyBundle{ + test.baseName: base, + "other": otherBundle, + } + b, err := inherit(bundle, bundles) + if test.expectError != "" { + c.Check(err, gc.ErrorMatches, test.expectError) + } else { + c.Assert(err, gc.IsNil) + c.Assert(b, jc.DeepEquals, expect) + } + } +} + +func (s *migrateSuite) TestNoNameClashes(c *gc.C) { + nameCounts := make(map[string]int) + doAllBundles(c, func(c *gc.C, id string, data []byte) { + nameCounts[id]++ + }) + // There are actually two name clashes in the real + // in-the-wild bundles: + // cs:~charmers/bundle/mediawiki-scalable + // cs:~charmers/bundle/mongodb-cluster + // Both of these actually fit with our proposed scheme, + // because they're (almost) identical with the bundles + // within mediawiki and mongodb respectively. + // + // So we discount them from our example bundles. + delete(nameCounts, "cs:~charmers/bundle/mongodb-cluster") + delete(nameCounts, "cs:~charmers/bundle/mediawiki-scalable") + + doAllBundles(c, func(c *gc.C, id string, data []byte) { + var bundles map[string]*legacyBundle + err := yaml.Unmarshal(data, &bundles) + c.Assert(err, gc.IsNil) + if len(bundles) == 1 { + return + } + for name := range bundles { + subId := id + "-" + name + nameCounts[subId]++ + } + }) + for name, count := range nameCounts { + if count != 1 { + c.Errorf("%d clashes at %s", count-1, name) + } + } +} + +func (s *migrateSuite) TestReversible(c *gc.C) { + doAllBundles(c, s.testReversible) +} + +func (*migrateSuite) testReversible(c *gc.C, id string, data []byte) { + var bundles map[string]*legacyBundle + err := yaml.Unmarshal(data, &bundles) + c.Assert(err, gc.IsNil) + for _, b := range bundles { + if len(b.Relations) == 0 { + b.Relations = nil + } + } + var allInterface interface{} + err = yaml.Unmarshal(data, &allInterface) + c.Assert(err, gc.IsNil) + all, ok := allInterface.(map[interface{}]interface{}) + c.Assert(ok, gc.Equals, true) + for _, b := range all { + b := ymap(b) + // Remove empty relations line. + if rels, ok := b["relations"].([]interface{}); ok && len(rels) == 0 { + delete(b, "relations") + } + // Convert all annotation values to strings. Strictly + // speaking this means that the bundles are + // non-reversible, but juju converts annotations to + // string anyway, so it doesn't matter. + for _, svc := range ymap(b["services"]) { + svc := ymap(svc) + annot := ymap(svc["annotations"]) + for key, val := range annot { + if _, ok := val.(string); !ok { + annot[key] = fmt.Sprint(val) + } + } + } + + } + data1, err := yaml.Marshal(bundles) + c.Assert(err, gc.IsNil) + var all1 interface{} + err = yaml.Unmarshal(data1, &all1) + c.Assert(err, gc.IsNil) + c.Assert(all1, jc.DeepEquals, all) +} + +// ymap returns the default form of a map +// when unmarshaled by YAML. +func ymap(v interface{}) map[interface{}]interface{} { + if v == nil { + return nil + } + return v.(map[interface{}]interface{}) +} + +// doAllBundles calls the given function for each bundle +// in all the available test bundles. +func doAllBundles(c *gc.C, f func(c *gc.C, id string, data []byte)) { + a := openAllBundles() + defer a.Close() + for { + title, data, err := a.readSection() + if len(data) > 0 { + f(c, title, data) + } + if err != nil { + c.Assert(errgo.Cause(err), gc.Equals, io.EOF) + break + } + } +} + +type allBundles struct { + file *os.File + r *bufio.Reader +} + +func openAllBundles() *allBundles { + f, err := os.Open("allbundles.txt.gz") + if err != nil { + log.Fatal(err) + } + gzr, err := gzip.NewReader(f) + if err != nil { + log.Fatal(err) + } + r := bufio.NewReader(gzr) + return &allBundles{ + file: f, + r: r, + } +} + +func (a *allBundles) Close() error { + return a.file.Close() +} + +// sectionMarker delimits a section in the bundles file. +// Note that no bundles contain non-ASCII characters +// so the first byte of this string is a sufficient +// sentinel. +const sectionMarker = "¶ " + +func (a *allBundles) readSection() (title string, data []byte, err error) { + title, err = a.r.ReadString('\n') + if err != nil { + return "", nil, err + } + if !strings.HasPrefix(title, sectionMarker) || !strings.HasSuffix(title, "\n") { + return "", nil, fmt.Errorf("invalid title line %q", title) + } + title = strings.TrimPrefix(title, sectionMarker) + title = strings.TrimSuffix(title, "\n") + for { + c, err := a.r.ReadByte() + switch { + case err == io.EOF: + return title, data, nil + case err != nil: + return "", nil, err + case c == sectionMarker[0]: + a.r.UnreadByte() + return title, data, nil + } + data = append(data, c) + } +} + +func parseBundle(s string) *legacyBundle { + var b *legacyBundle + err := yaml.Unmarshal(unbeautify(s), &b) + if err != nil { + panic(fmt.Errorf("cannot unmarshal %q: %v", s, err)) + } + return b +} + +// indentReplacer deletes tabs and | beautifier characters. +var indentReplacer = strings.NewReplacer("\t", "", "|", "") + +// unbeautify strip the tabs and | characters that +// we use to make the tests look nicer. +func unbeautify(s string) []byte { + return []byte(indentReplacer.Replace(s)) +} + +func noCharms(id *charm.URL) (*charm.Meta, error) { + return nil, fmt.Errorf("charm %q not found", id) +} + +func getCharm(id *charm.URL) (charm.Charm, error) { + charmDataCacheMutex.Lock() + defer charmDataCacheMutex.Unlock() + if m, ok := charmDataCache[id.String()]; ok || !*updateCharms { + if m == nil { + return nil, fmt.Errorf("charm %q not found in cache", id) + } + return m, nil + } + log.Printf("getting %s", id) + ch, err := charmrepo.LegacyStore.Get(id) + if err != nil { + charmDataCache[id.String()] = nil + return nil, err + } + chData := &charmData{ + Meta_: ch.Meta(), + Config_: ch.Config(), + Metrics_: ch.Metrics(), + } + charmDataCache[id.String()] = chData + return chData, nil +} + +type charmData struct { + Meta_ *charm.Meta `json:"Meta"` + Config_ *charm.Config `json:"Config"` + Metrics_ *charm.Metrics `json:"Metrics"` +} + +func (c *charmData) Meta() *charm.Meta { + return c.Meta_ +} + +func (c *charmData) Metrics() *charm.Metrics { + return c.Metrics_ +} + +func (c *charmData) Config() *charm.Config { + return c.Config_ +} + +func (c *charmData) Actions() *charm.Actions { + return nil +} + +func (c *charmData) Revision() int { + return 0 +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/package_test.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/migratebundle/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +package migratebundle + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/package_test.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrepo_test + +import ( + "testing" + + jujutesting "github.com/juju/testing" +) + +func TestPackage(t *testing.T) { + jujutesting.MgoTestPackage(t, nil) +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/params.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/params.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/params.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,91 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrepo + +import ( + "fmt" + + "gopkg.in/juju/charm.v6-unstable" +) + +// InfoResponse is sent by the charm store in response to charm-info requests. +type InfoResponse struct { + CanonicalURL string `json:"canonical-url,omitempty"` + Revision int `json:"revision"` // Zero is valid. Can't omitempty. + Sha256 string `json:"sha256,omitempty"` + Digest string `json:"digest,omitempty"` + Errors []string `json:"errors,omitempty"` + Warnings []string `json:"warnings,omitempty"` +} + +// EventResponse is sent by the charm store in response to charm-event requests. +type EventResponse struct { + Kind string `json:"kind"` + Revision int `json:"revision"` // Zero is valid. Can't omitempty. + Digest string `json:"digest,omitempty"` + Errors []string `json:"errors,omitempty"` + Warnings []string `json:"warnings,omitempty"` + Time string `json:"time,omitempty"` +} + +// CharmRevision holds the revision number of a charm and any error +// encountered in retrieving it. +type CharmRevision struct { + Revision int + Sha256 string + Err error +} + +// NotFoundError represents an error indicating that the requested data wasn't found. +type NotFoundError struct { + msg string +} + +func (e *NotFoundError) Error() string { + return e.msg +} + +func repoNotFound(path string) error { + return &NotFoundError{fmt.Sprintf("no repository found at %q", path)} +} + +func entityNotFound(curl *charm.URL, repoPath string) error { + return &NotFoundError{fmt.Sprintf("entity not found in %q: %s", repoPath, curl)} +} + +// CharmNotFound returns an error indicating that the +// charm at the specified URL does not exist. +func CharmNotFound(url string) error { + return &NotFoundError{ + msg: "charm not found: " + url, + } +} + +// BundleNotFound returns an error indicating that the +// bundle at the specified URL does not exist. +func BundleNotFound(url string) error { + return &NotFoundError{ + msg: "bundle not found: " + url, + } +} + +// InvalidPath returns an invalidPathError. +func InvalidPath(path string) error { + return &invalidPathError{path} +} + +// invalidPathError represents an error indicating that the requested +// charm or bundle path is not valid as a charm or bundle path. +type invalidPathError struct { + path string +} + +func (e *invalidPathError) Error() string { + return fmt.Sprintf("path %q can not be a relative path", e.path) +} + +func IsInvalidPathError(err error) bool { + _, ok := err.(*invalidPathError) + return ok +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/repo.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/repo.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/repo.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,48 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package charmrepo implements access to charm repositories. + +package charmrepo + +import ( + "fmt" + + "github.com/juju/loggo" + "gopkg.in/juju/charm.v6-unstable" +) + +var logger = loggo.GetLogger("juju.charm.charmrepo") + +// Interface represents a charm repository (a collection of charms). +type Interface interface { + // Get returns the charm referenced by curl. + Get(curl *charm.URL) (charm.Charm, error) + + // GetBundle returns the bundle referenced by curl. + GetBundle(curl *charm.URL) (charm.Bundle, error) + + // Resolve resolves the given reference to a canonical form which refers + // unambiguously to a specific revision of an entity. If the entity + // is a charm that may support more than one series, canonRef.Series will + // be empty and supportedSeries will hold the list of series supported by + // the charm with the preferred series first. + // If ref holds a series, then Resolve will always ensure that the returned + // entity supports that series. + Resolve(ref *charm.URL) (canonRef *charm.URL, supportedSeries []string, err error) +} + +// InferRepository returns a charm repository inferred from the provided charm +// or bundle reference. +// Charm store references will use the provided parameters. +// Local references will use the provided path. +func InferRepository(ref *charm.URL, charmStoreParams NewCharmStoreParams, localRepoPath string) (Interface, error) { + switch ref.Schema { + case "cs": + return NewCharmStore(charmStoreParams), nil + case "local": + return NewLocalRepository(localRepoPath) + } + // TODO fix this error message to reference bundles too? + return nil, fmt.Errorf("unknown schema for charm reference %q", ref) +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/repo_test.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/repo_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/repo_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,57 @@ +// Copyright 2012, 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmrepo_test + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "gopkg.in/juju/charmrepo.v2-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient" + charmtesting "gopkg.in/juju/charmrepo.v2-unstable/testing" +) + +var TestCharms = charmtesting.NewRepo("internal/test-charm-repo", "quantal") + +type inferRepoSuite struct{} + +var _ = gc.Suite(&inferRepoSuite{}) + +var inferRepositoryTests = []struct { + url string + localRepoPath string + err string +}{{ + url: "cs:trusty/django", +}, { + url: "local:precise/wordpress", + err: "path to local repository not specified", +}, { + url: "local:precise/haproxy-47", + localRepoPath: "/tmp/repo-path", +}} + +func (s *inferRepoSuite) TestInferRepository(c *gc.C) { + for i, test := range inferRepositoryTests { + c.Logf("test %d: %s", i, test.url) + ref := charm.MustParseURL(test.url) + repo, err := charmrepo.InferRepository( + ref, charmrepo.NewCharmStoreParams{}, test.localRepoPath) + if test.err != "" { + c.Assert(err, gc.ErrorMatches, test.err) + c.Assert(repo, gc.IsNil) + continue + } + c.Assert(err, jc.ErrorIsNil) + switch store := repo.(type) { + case *charmrepo.LocalRepository: + c.Assert(store.Path, gc.Equals, test.localRepoPath) + case *charmrepo.CharmStore: + c.Assert(store.URL(), gc.Equals, csclient.ServerURL) + default: + c.Fatal("unknown repository type") + } + } +} === added directory 'src/gopkg.in/juju/charmrepo.v2-unstable/testing' === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/testing/charm.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/testing/charm.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/testing/charm.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,173 @@ +// Copyright 2012, 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package testing + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/juju/utils/fs" + "gopkg.in/juju/charm.v6-unstable" +) + +func check(err error) { + if err != nil { + panic(err) + } +} + +// NewRepo returns a new testing charm repository rooted at the given +// path, relative to the package directory of the calling package, using +// defaultSeries as the default series. +func NewRepo(path, defaultSeries string) *Repo { + // Find the repo directory. This is only OK to do + // because this is running in a test context + // so we know the source is available. + _, file, _, ok := runtime.Caller(1) + if !ok { + panic("cannot get caller") + } + r := &Repo{ + path: filepath.Join(filepath.Dir(file), path), + defaultSeries: defaultSeries, + } + _, err := os.Stat(r.path) + if err != nil { + panic(fmt.Errorf("cannot read repository found at %q: %v", r.path, err)) + } + return r +} + +// Repo represents a charm repository used for testing. +type Repo struct { + path string + defaultSeries string +} + +func (r *Repo) Path() string { + return r.path +} + +func clone(dst, src string) string { + dst = filepath.Join(dst, filepath.Base(src)) + check(fs.Copy(src, dst)) + return dst +} + +// BundleDirPath returns the path to a bundle directory with the given name in the +// default series +func (r *Repo) BundleDirPath(name string) string { + return filepath.Join(r.Path(), "bundle", name) +} + +// BundleDir returns the actual charm.BundleDir named name. +func (r *Repo) BundleDir(name string) *charm.BundleDir { + b, err := charm.ReadBundleDir(r.BundleDirPath(name)) + check(err) + return b +} + +// CharmDirPath returns the path to a charm directory with the given name in the +// default series +func (r *Repo) CharmDirPath(name string) string { + return filepath.Join(r.Path(), r.defaultSeries, name) +} + +// CharmDir returns the actual charm.CharmDir named name. +func (r *Repo) CharmDir(name string) *charm.CharmDir { + ch, err := charm.ReadCharmDir(r.CharmDirPath(name)) + check(err) + return ch +} + +// ClonedDirPath returns the path to a new copy of the default charm directory +// named name. +func (r *Repo) ClonedDirPath(dst, name string) string { + return clone(dst, r.CharmDirPath(name)) +} + +// ClonedDirPath returns the path to a new copy of the default bundle directory +// named name. +func (r *Repo) ClonedBundleDirPath(dst, name string) string { + return clone(dst, r.BundleDirPath(name)) +} + +// RenamedClonedDirPath returns the path to a new copy of the default +// charm directory named name, renamed to newName. +func (r *Repo) RenamedClonedDirPath(dst, name, newName string) string { + dstPath := filepath.Join(dst, newName) + err := fs.Copy(r.CharmDirPath(name), dstPath) + check(err) + return dstPath +} + +// ClonedDir returns an actual charm.CharmDir based on a new copy of the charm directory +// named name, in the directory dst. +func (r *Repo) ClonedDir(dst, name string) *charm.CharmDir { + ch, err := charm.ReadCharmDir(r.ClonedDirPath(dst, name)) + check(err) + return ch +} + +// ClonedURL makes a copy of the charm directory. It will create a directory +// with the series name if it does not exist, and then clone the charm named +// name into that directory. The return value is a URL pointing at the local +// charm. +func (r *Repo) ClonedURL(dst, series, name string) *charm.URL { + dst = filepath.Join(dst, series) + if err := os.MkdirAll(dst, os.FileMode(0777)); err != nil { + panic(fmt.Errorf("cannot make destination directory: %v", err)) + } + clone(dst, r.CharmDirPath(name)) + return &charm.URL{ + Schema: "local", + Name: name, + Revision: -1, + Series: series, + } +} + +// CharmArchivePath returns the path to a new charm archive file +// in the directory dst, created from the charm directory named name. +func (r *Repo) CharmArchivePath(dst, name string) string { + dir := r.CharmDir(name) + path := filepath.Join(dst, "archive.charm") + file, err := os.Create(path) + check(err) + defer file.Close() + check(dir.ArchiveTo(file)) + return path +} + +// BundleArchivePath returns the path to a new bundle archive file +// in the directory dst, created from the bundle directory named name. +func (r *Repo) BundleArchivePath(dst, name string) string { + dir := r.BundleDir(name) + path := filepath.Join(dst, "archive.bundle") + file, err := os.Create(path) + check(err) + defer file.Close() + check(dir.ArchiveTo(file)) + return path +} + +// CharmArchive returns an actual charm.CharmArchive created from a new +// charm archive file created from the charm directory named name, in +// the directory dst. +func (r *Repo) CharmArchive(dst, name string) *charm.CharmArchive { + ch, err := charm.ReadCharmArchive(r.CharmArchivePath(dst, name)) + check(err) + return ch +} + +// BundleArchive returns an actual charm.BundleArchive created from a new +// bundle archive file created from the bundle directory named name, in +// the directory dst. +func (r *Repo) BundleArchive(dst, name string) *charm.BundleArchive { + b, err := charm.ReadBundleArchive(r.BundleArchivePath(dst, name)) + check(err) + return b +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/testing/mockstore.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/testing/mockstore.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/testing/mockstore.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,209 @@ +// Copyright 2012, 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package testing + +import ( + "bytes" + "encoding/json" + "io" + "net" + "net/http" + "os" + "strconv" + "strings" + + "github.com/juju/loggo" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "gopkg.in/juju/charmrepo.v2-unstable" +) + +var logger = loggo.GetLogger("juju.charm.testing.mockstore") + +// MockStore provides a mock charm store implementation useful when testing. +type MockStore struct { + mux *http.ServeMux + listener net.Listener + archiveBytes []byte + archiveSha256 string + Downloads []*charm.URL + DownloadsNoStats []*charm.URL + Authorizations []string + Metadata []string + InfoRequestCount int + InfoRequestCountNoStats int + DefaultSeries string + + charms map[string]int +} + +// NewMockStore creates a mock charm store containing the specified charms. +func NewMockStore(c *gc.C, repo *Repo, charms map[string]int) *MockStore { + s := &MockStore{charms: charms, DefaultSeries: "precise"} + f, err := os.Open(repo.CharmArchivePath(c.MkDir(), "dummy")) + c.Assert(err, gc.IsNil) + defer f.Close() + buf := &bytes.Buffer{} + s.archiveSha256, _, err = utils.ReadSHA256(io.TeeReader(f, buf)) + c.Assert(err, gc.IsNil) + s.archiveBytes = buf.Bytes() + c.Assert(err, gc.IsNil) + s.mux = http.NewServeMux() + s.mux.HandleFunc("/charm-info", s.serveInfo) + s.mux.HandleFunc("/charm-event", s.serveEvent) + s.mux.HandleFunc("/charm/", s.serveCharm) + lis, err := net.Listen("tcp", "127.0.0.1:0") + c.Assert(err, gc.IsNil) + s.listener = lis + go http.Serve(s.listener, s) + return s +} + +// Close closes the mock store's socket. +func (s *MockStore) Close() { + s.listener.Close() +} + +// Address returns the URL used to make requests to the mock store. +func (s *MockStore) Address() string { + return "http://" + s.listener.Addr().String() +} + +// UpdateStoreRevision sets the revision of the specified charm to rev. +func (s *MockStore) UpdateStoreRevision(ch string, rev int) { + s.charms[ch] = rev +} + +// ServeHTTP implements http.ServeHTTP +func (s *MockStore) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.mux.ServeHTTP(w, r) +} + +func (s *MockStore) serveInfo(w http.ResponseWriter, r *http.Request) { + if metadata := r.Header.Get("Juju-Metadata"); metadata != "" { + s.Metadata = append(s.Metadata, metadata) + logger.Infof("Juju metadata: " + metadata) + } + + r.ParseForm() + if r.Form.Get("stats") == "0" { + s.InfoRequestCountNoStats += 1 + } else { + s.InfoRequestCount += 1 + } + + response := map[string]*charmrepo.InfoResponse{} + for _, url := range r.Form["charms"] { + cr := &charmrepo.InfoResponse{} + response[url] = cr + charmURL, err := charm.ParseURL(url) + if err != nil { + panic(err) + } + if charmURL.Series == "" { + charmURL.Series = s.DefaultSeries + } + switch charmURL.Name { + case "borken": + cr.Errors = append(cr.Errors, "badness") + case "terracotta": + cr.Errors = append(cr.Errors, "cannot get revision") + case "unwise": + cr.Warnings = append(cr.Warnings, "foolishness") + fallthrough + default: + if rev, ok := s.charms[charmURL.WithRevision(-1).String()]; ok { + if charmURL.Revision == -1 { + cr.Revision = rev + } else { + cr.Revision = charmURL.Revision + } + cr.Sha256 = s.archiveSha256 + cr.CanonicalURL = charmURL.String() + } else { + cr.Errors = append(cr.Errors, "entry not found") + } + } + } + data, err := json.Marshal(response) + if err != nil { + panic(err) + } + w.Header().Set("Content-Type", "application/json") + _, err = w.Write(data) + if err != nil { + panic(err) + } +} + +func (s *MockStore) serveEvent(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + response := map[string]*charmrepo.EventResponse{} + for _, url := range r.Form["charms"] { + digest := "" + if i := strings.Index(url, "@"); i >= 0 { + digest = url[i+1:] + url = url[:i] + } + er := &charmrepo.EventResponse{} + response[url] = er + if digest != "" && digest != "the-digest" { + er.Kind = "not-found" + er.Errors = []string{"entry not found"} + continue + } + charmURL := charm.MustParseURL(url) + switch charmURL.Name { + case "borken": + er.Kind = "publish-error" + er.Errors = append(er.Errors, "badness") + case "unwise": + er.Warnings = append(er.Warnings, "foolishness") + fallthrough + default: + if rev, ok := s.charms[charmURL.WithRevision(-1).String()]; ok { + er.Kind = "published" + er.Revision = rev + er.Digest = "the-digest" + } else { + er.Kind = "not-found" + er.Errors = []string{"entry not found"} + } + } + } + data, err := json.Marshal(response) + if err != nil { + panic(err) + } + w.Header().Set("Content-Type", "application/json") + _, err = w.Write(data) + if err != nil { + panic(err) + } +} + +func (s *MockStore) serveCharm(w http.ResponseWriter, r *http.Request) { + charmURL := charm.MustParseURL("cs:" + r.URL.Path[len("/charm/"):]) + + r.ParseForm() + if r.Form.Get("stats") == "0" { + s.DownloadsNoStats = append(s.DownloadsNoStats, charmURL) + } else { + s.Downloads = append(s.Downloads, charmURL) + } + + if auth := r.Header.Get("Authorization"); auth != "" { + s.Authorizations = append(s.Authorizations, auth) + } + + w.Header().Set("Connection", "close") + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Length", strconv.Itoa(len(s.archiveBytes))) + _, err := w.Write(s.archiveBytes) + if err != nil { + panic(err) + } +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/testing/package_test.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/testing/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/testing/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +package testing_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/testing/suite.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/testing/suite.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/testing/suite.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,34 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package testing + +import ( + jujutesting "github.com/juju/testing" + gc "gopkg.in/check.v1" +) + +type IsolatedMgoSuite struct { + jujutesting.IsolationSuite + jujutesting.MgoSuite +} + +func (s *IsolatedMgoSuite) SetUpSuite(c *gc.C) { + s.IsolationSuite.SetUpSuite(c) + s.MgoSuite.SetUpSuite(c) +} + +func (s *IsolatedMgoSuite) TearDownSuite(c *gc.C) { + s.MgoSuite.TearDownSuite(c) + s.IsolationSuite.TearDownSuite(c) +} + +func (s *IsolatedMgoSuite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.MgoSuite.SetUpTest(c) +} + +func (s *IsolatedMgoSuite) TearDownTest(c *gc.C) { + s.MgoSuite.TearDownTest(c) + s.IsolationSuite.TearDownTest(c) +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/testing/testcharm.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/testing/testcharm.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/testing/testcharm.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,226 @@ +// Copyright 2012, 2013 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package testing + +import ( + "archive/zip" + "bytes" + "fmt" + "os" + "path" + "strings" + "sync" + + "github.com/juju/testing/filetesting" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" +) + +// Charm holds a charm for testing. It does not +// have a representation on disk by default, but +// can be written to disk using Archive and its ExpandTo +// method. It implements the charm.Charm interface. +// +// All methods on Charm may be called concurrently. +type Charm struct { + meta *charm.Meta + config *charm.Config + actions *charm.Actions + metrics *charm.Metrics + revision int + + files filetesting.Entries + + makeArchiveOnce sync.Once + archiveBytes []byte + archive *charm.CharmArchive +} + +// CharmSpec holds the specification for a charm. The fields +// hold data in YAML format. +type CharmSpec struct { + // Meta holds the contents of metadata.yaml. + Meta string + + // Config holds the contents of config.yaml. + Config string + + // Actions holds the contents of actions.yaml. + Actions string + + // Metrics holds the contents of metrics.yaml. + Metrics string + + // Files holds any additional files that should be + // added to the charm. If this is nil, a minimal set + // of files will be added to ensure the charm is readable. + Files []filetesting.Entry + + // Revision specifies the revision of the charm. + Revision int +} + +// NewCharm returns a new charm +func NewCharm(c *gc.C, spec CharmSpec) *Charm { + ch := &Charm{ + revision: spec.Revision, + } + var err error + ch.meta, err = charm.ReadMeta(strings.NewReader(spec.Meta)) + c.Assert(err, gc.IsNil) + ch.files = append(ch.files, filetesting.File{ + Path: "metadata.yaml", + Data: spec.Meta, + Perm: 0644, + }) + + if spec.Config != "" { + ch.config, err = charm.ReadConfig(strings.NewReader(spec.Config)) + c.Assert(err, gc.IsNil) + ch.files = append(ch.files, filetesting.File{ + Path: "config.yaml", + Data: spec.Config, + Perm: 0644, + }) + } + if spec.Actions != "" { + ch.actions, err = charm.ReadActionsYaml(strings.NewReader(spec.Actions)) + c.Assert(err, gc.IsNil) + ch.files = append(ch.files, filetesting.File{ + Path: "actions.yaml", + Data: spec.Actions, + Perm: 0644, + }) + } + if spec.Metrics != "" { + ch.metrics, err = charm.ReadMetrics(strings.NewReader(spec.Metrics)) + c.Assert(err, gc.IsNil) + ch.files = append(ch.files, filetesting.File{ + Path: "metrics.yaml", + Data: spec.Metrics, + Perm: 0644, + }) + } + if spec.Files == nil { + ch.files = append(ch.files, filetesting.File{ + Path: "hooks/install", + Data: "#!/bin/sh\n", + Perm: 0755, + }, filetesting.File{ + Path: "hooks/start", + Data: "#!/bin/sh\n", + Perm: 0755, + }) + } else { + ch.files = append(ch.files, spec.Files...) + // Check for duplicates. + names := make(map[string]bool) + for _, f := range ch.files { + name := path.Clean(f.GetPath()) + if names[name] { + panic(fmt.Errorf("duplicate file entry %q", f.GetPath())) + } + names[name] = true + } + } + return ch +} + +// Meta implements charm.Charm.Meta. +func (ch *Charm) Meta() *charm.Meta { + return ch.meta +} + +// Config implements charm.Charm.Config. +func (ch *Charm) Config() *charm.Config { + if ch.config == nil { + return &charm.Config{ + Options: map[string]charm.Option{}, + } + } + return ch.config +} + +// Metrics implements charm.Charm.Metrics. +func (ch *Charm) Metrics() *charm.Metrics { + return ch.metrics +} + +// Actions implements charm.Charm.Actions. +func (ch *Charm) Actions() *charm.Actions { + if ch.actions == nil { + return &charm.Actions{} + } + return ch.actions +} + +// Revision implements charm.Charm.Revision. +func (ch *Charm) Revision() int { + return ch.revision +} + +// Archive returns a charm archive holding the charm. +func (ch *Charm) Archive() *charm.CharmArchive { + ch.makeArchiveOnce.Do(ch.makeArchive) + return ch.archive +} + +// ArchiveBytes returns the contents of the charm archive +// holding the charm. +func (ch *Charm) ArchiveBytes() []byte { + ch.makeArchiveOnce.Do(ch.makeArchive) + return ch.archiveBytes +} + +func (ch *Charm) makeArchive() { + var buf bytes.Buffer + zw := zip.NewWriter(&buf) + + for _, f := range ch.files { + addZipEntry(zw, f) + } + if err := zw.Close(); err != nil { + panic(err) + } + // ReadCharmArchiveFromReader requires a ReaderAt, so make one. + r := bytes.NewReader(buf.Bytes()) + + // Actually make the charm archive. + archive, err := charm.ReadCharmArchiveFromReader(r, int64(buf.Len())) + if err != nil { + panic(err) + } + ch.archiveBytes = buf.Bytes() + ch.archive = archive + ch.archive.SetRevision(ch.revision) +} + +func addZipEntry(zw *zip.Writer, f filetesting.Entry) { + h := &zip.FileHeader{ + Name: f.GetPath(), + // Don't bother compressing - the contents are so small that + // it will just slow things down for no particular benefit. + Method: zip.Store, + } + contents := "" + switch f := f.(type) { + case filetesting.Dir: + h.SetMode(os.ModeDir | 0755) + case filetesting.File: + h.SetMode(f.Perm) + contents = f.Data + case filetesting.Symlink: + h.SetMode(os.ModeSymlink | 0777) + contents = f.Link + } + w, err := zw.CreateHeader(h) + if err != nil { + panic(err) + } + if contents != "" { + if _, err := w.Write([]byte(contents)); err != nil { + panic(err) + } + } +} === added file 'src/gopkg.in/juju/charmrepo.v2-unstable/testing/testcharm_test.go' --- src/gopkg.in/juju/charmrepo.v2-unstable/testing/testcharm_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmrepo.v2-unstable/testing/testcharm_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,182 @@ +package testing_test + +import ( + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/filetesting" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "gopkg.in/juju/charmrepo.v2-unstable/testing" +) + +var _ = gc.Suite(&testCharmSuite{}) + +type testCharmSuite struct{} + +var newCharmTests = []struct { + about string + spec testing.CharmSpec + expectMeta *charm.Meta + expectConfig *charm.Config + expectActions *charm.Actions + expectMetrics *charm.Metrics + expectFiles filetesting.Entries + expectRevision int +}{{ + about: "all charm populated without files", + spec: testing.CharmSpec{ + Meta: ` +name: mysql +summary: "Database engine" +description: "A pretty popular database" +provides: + server: mysql +`, + Config: ` +options: + blog-title: {default: My Title, description: Config description, type: string} +`, + Actions: ` +snapshot: + description: Take a snapshot of the database. + params: + outfile: + description: outfile description + type: string + default: foo.bz2 +`, + Metrics: ` +metrics: + pings: + type: gauge + description: Description of the metric. +`, + Revision: 99, + }, + expectMeta: &charm.Meta{ + Name: "mysql", + Format: 1, + Summary: "Database engine", + Description: "A pretty popular database", + Provides: map[string]charm.Relation{ + "server": { + Name: "server", + Role: charm.RoleProvider, + Interface: "mysql", + Scope: charm.ScopeGlobal, + }, + }, + }, + expectConfig: &charm.Config{ + Options: map[string]charm.Option{ + "blog-title": { + Type: "string", + Description: "Config description", + Default: "My Title", + }, + }, + }, + expectActions: &charm.Actions{ + ActionSpecs: map[string]charm.ActionSpec{ + "snapshot": { + Description: "Take a snapshot of the database.", + Params: map[string]interface{}{ + "title": "snapshot", + "description": "Take a snapshot of the database.", + "type": "object", + "properties": map[string]interface{}{ + "outfile": map[string]interface{}{ + "description": "outfile description", + "type": "string", + "default": "foo.bz2", + }, + }, + }, + }, + }, + }, + expectMetrics: &charm.Metrics{ + Metrics: map[string]charm.Metric{ + "pings": { + Type: charm.MetricTypeGauge, + Description: "Description of the metric.", + }, + }, + }, + expectFiles: filetesting.Entries{ + filetesting.File{ + Path: "hooks/install", + Data: "#!/bin/sh\n", + Perm: 0755, + }, + filetesting.File{ + Path: "hooks/start", + Data: "#!/bin/sh\n", + Perm: 0755, + }, + }, + expectRevision: 99, +}, { + about: "charm with some extra files specified", + spec: testing.CharmSpec{ + Meta: ` +name: mycharm +summary: summary +description: description +`, + Files: filetesting.Entries{ + filetesting.File{ + Path: "hooks/customhook", + Data: "custom stuff", + Perm: 0755, + }, + }, + }, + expectMeta: &charm.Meta{ + Name: "mycharm", + Summary: "summary", + Description: "description", + Format: 1, + }, + expectConfig: &charm.Config{ + Options: map[string]charm.Option{}, + }, + expectActions: &charm.Actions{}, + expectFiles: filetesting.Entries{ + filetesting.File{ + Path: "hooks/customhook", + Data: "custom stuff", + Perm: 0755, + }, + }, +}, +} + +func (*testCharmSuite) TestNewCharm(c *gc.C) { + for i, test := range newCharmTests { + c.Logf("test %d: %s", i, test.about) + ch := testing.NewCharm(c, test.spec) + c.Assert(ch.Meta(), jc.DeepEquals, test.expectMeta) + c.Assert(ch.Config(), jc.DeepEquals, test.expectConfig) + c.Assert(ch.Metrics(), jc.DeepEquals, test.expectMetrics) + c.Assert(ch.Actions(), jc.DeepEquals, test.expectActions) + c.Assert(ch.Revision(), gc.Equals, test.expectRevision) + + archive := ch.Archive() + c.Assert(archive.Meta(), jc.DeepEquals, test.expectMeta) + c.Assert(archive.Config(), jc.DeepEquals, test.expectConfig) + c.Assert(archive.Metrics(), jc.DeepEquals, test.expectMetrics) + c.Assert(archive.Actions(), jc.DeepEquals, test.expectActions) + c.Assert(archive.Revision(), gc.Equals, test.expectRevision) + + // Check that we get the same archive again. + c.Assert(ch.Archive(), gc.Equals, archive) + c.Assert(ch.ArchiveBytes(), gc.Not(gc.HasLen), 0) + + dir := c.MkDir() + err := archive.ExpandTo(dir) + c.Assert(err, gc.IsNil) + test.expectFiles.Check(c, dir) + + } +} === removed directory 'src/gopkg.in/juju/charmstore.v4' === removed file 'src/gopkg.in/juju/charmstore.v4/LICENSE' --- src/gopkg.in/juju/charmstore.v4/LICENSE 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/LICENSE 1970-01-01 00:00:00 +0000 @@ -1,661 +0,0 @@ - GNU AFFERO GENERAL PUBLIC LICENSE - Version 3, 19 November 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU Affero General Public License is a free, copyleft license for -software and other kinds of works, specifically designed to ensure -cooperation with the community in the case of network server software. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -our General Public Licenses are intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - Developers that use our General Public Licenses protect your rights -with two steps: (1) assert copyright on the software, and (2) offer -you this License which gives you legal permission to copy, distribute -and/or modify the software. - - A secondary benefit of defending all users' freedom is that -improvements made in alternate versions of the program, if they -receive widespread use, become available for other developers to -incorporate. Many developers of free software are heartened and -encouraged by the resulting cooperation. However, in the case of -software used on network servers, this result may fail to come about. -The GNU General Public License permits making a modified version and -letting the public access it on a server without ever releasing its -source code to the public. - - The GNU Affero General Public License is designed specifically to -ensure that, in such cases, the modified source code becomes available -to the community. It requires the operator of a network server to -provide the source code of the modified version running there to the -users of that server. Therefore, public use of a modified version, on -a publicly accessible server, gives the public access to the source -code of the modified version. - - An older license, called the Affero General Public License and -published by Affero, was designed to accomplish similar goals. This is -a different license, not a version of the Affero GPL, but Affero has -released a new version of the Affero GPL which permits relicensing under -this license. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU Affero General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Remote Network Interaction; Use with the GNU General Public License. - - Notwithstanding any other provision of this License, if you modify the -Program, your modified version must prominently offer all users -interacting with it remotely through a computer network (if your version -supports such interaction) an opportunity to receive the Corresponding -Source of your version by providing access to the Corresponding Source -from a network server at no charge, through some standard or customary -means of facilitating copying of software. This Corresponding Source -shall include the Corresponding Source for any work covered by version 3 -of the GNU General Public License that is incorporated pursuant to the -following paragraph. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the work with which it is combined will remain governed by version -3 of the GNU General Public License. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU Affero General Public License from time to time. Such new versions -will be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU Affero General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU Affero General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU Affero General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If your software can interact with users remotely through a computer -network, you should also make sure that it provides a way for users to -get its source. For example, if your program is a web application, its -interface could display a "Source" link that leads users to an archive -of the code. There are many ways you could offer source, and different -solutions will be better for different programs; see section 13 for the -specific requirements. - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU AGPL, see -. === removed file 'src/gopkg.in/juju/charmstore.v4/LICENSE.client' --- src/gopkg.in/juju/charmstore.v4/LICENSE.client 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/LICENSE.client 1970-01-01 00:00:00 +0000 @@ -1,188 +0,0 @@ - -Copyright (c) 2011-2014 - Canonical Inc. - -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. === removed file 'src/gopkg.in/juju/charmstore.v4/Makefile' --- src/gopkg.in/juju/charmstore.v4/Makefile 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/Makefile 1970-01-01 00:00:00 +0000 @@ -1,119 +0,0 @@ -# Makefile for the charm store. - -ifndef GOPATH -$(warning You need to set up a GOPATH.) -endif - -PROJECT := gopkg.in/juju/charmstore.v4 -PROJECT_DIR := $(shell go list -e -f '{{.Dir}}' $(PROJECT)) - -ifeq ($(shell uname -p | sed -r 's/.*(x86|armel|armhf).*/golang/'), golang) - GO_C := golang - INSTALL_FLAGS := -else - GO_C := gccgo-4.9 gccgo-go - INSTALL_FLAGS := -gccgoflags=-static-libgo -endif - -define DEPENDENCIES - build-essential - bzr - juju-mongodb - mongodb-server - $(GO_C) - openjdk-7-jre-headless - elasticsearch -endef - -default: build - -$(GOPATH)/bin/godeps: - go get -v launchpad.net/godeps - -# Start of GOPATH-dependent targets. Some targets only make sense - -# and will only work - when this tree is found on the GOPATH. -ifeq ($(CURDIR),$(PROJECT_DIR)) - -build: - go build $(PROJECT)/... - -check: - go test $(PROJECT)/... - -install: - go install $(INSTALL_FLAGS) -v $(PROJECT)/... - -clean: - go clean $(PROJECT)/... - -else - -build: - $(error Cannot $@; $(CURDIR) is not on GOPATH) - -check: - $(error Cannot $@; $(CURDIR) is not on GOPATH) - -install: - $(error Cannot $@; $(CURDIR) is not on GOPATH) - -clean: - $(error Cannot $@; $(CURDIR) is not on GOPATH) - -endif -# End of GOPATH-dependent targets. - -# Reformat source files. -format: - gofmt -w -l . - -# Reformat and simplify source files. -simplify: - gofmt -w -l -s . - -# Run the charmd server. -server: install - charmd -logging-config INFO cmd/charmd/config.yaml - -# Update the project Go dependencies to the required revision. -deps: $(GOPATH)/bin/godeps - $(GOPATH)/bin/godeps -u dependencies.tsv - -# Generate the dependencies file. -create-deps: $(GOPATH)/bin/godeps - godeps -t $(shell go list $(PROJECT)/...) > dependencies.tsv || true - -# Install packages required to develop the charm store and run tests. -APT_BASED := $(shell command -v apt-get >/dev/null; echo $$?) -sysdeps: -ifeq ($(APT_BASED),0) -ifeq ($(shell lsb_release -cs|sed -r 's/precise|quantal|raring/old/'),old) - @echo Adding PPAs for golang and mongodb - @sudo apt-add-repository --yes ppa:juju/golang - @sudo apt-add-repository --yes ppa:juju/stable -endif - @echo Installing dependencies - [ "x$(apt-key export D88E42B4 2>&1 1>/dev/null)" = "x" ] || { curl -s http://packages.elasticsearch.org/GPG-KEY-elasticsearch | sudo apt-key add -;} - repo="http://packages.elasticsearch.org/elasticsearch/1.3/debian" file=/etc/apt/sources.list.d/packages_elasticsearch_org_elasticsearch_1_3_debian.list ; grep "$$repo" $$file || echo "deb $$repo stable main" | sudo tee $$file > /dev/null - sudo apt-get update - @sudo apt-get --force-yes install $(strip $(DEPENDENCIES)) \ - $(shell apt-cache madison juju-mongodb mongodb-server | head -1 | cut -d '|' -f1) -else - @echo sysdeps runs only on systems with apt-get - @echo on OS X with homebrew try: brew install bazaar mongodb elasticsearch -endif - -help: - @echo -e 'Charmstore - list of make targets:\n' - @echo 'make - Build the package.' - @echo 'make check - Run tests.' - @echo 'make install - Install the package.' - @echo 'make server - Start the charmd server.' - @echo 'make clean - Remove object files from package source directories.' - @echo 'make sysdeps - Install the development environment system packages.' - @echo 'make deps - Set up the project Go dependencies.' - @echo 'make create-deps - Generate the Go dependencies file.' - @echo 'make format - Format the source files.' - @echo 'make simplify - Format and simplify the source files.' - -.PHONY: build check install clean format simplify sysdeps help === removed file 'src/gopkg.in/juju/charmstore.v4/README.md' --- src/gopkg.in/juju/charmstore.v4/README.md 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/README.md 1970-01-01 00:00:00 +0000 @@ -1,70 +0,0 @@ -# juju/charmstore - -Store and publish Juju charms. - -## Installation - -To start using the charm store, first ensure you have a valid -Go environment, then run the following: - - go get -d gopkg.in/juju/charmstore.v4 - cd $GOPATH/gopkg.in/juju/charmstore.v4 - -## Go dependencies - -The project uses godeps (https://launchpad.net/godeps) to manage Go -dependencies. To install this, run: - - go get launchpad.net/godeps - -After installing it, you can update the dependencies -to the revision specified in the `dependencies.tsv` file with the following: - - make deps - -Use `make create-deps` to update the dependencies file. - -## Development environment - -A couple of system packages are required in order to set up a charm store -development environment. To install them, run the following: - - make sysdeps - -To run the elasticsearch tests you must run an elasticsearch server. If the -elasticsearch server is running at an address other than localhost:9200 then -set `JUJU_TEST_ELASTICSEARCH=:` where host and port provide -the address of the elasticsearch server. If you do not wish to run the -elasticsearh tests, set `JUJU_TEST_ELASTICSEARCH=none`. - -At this point, from the root of this branch, run the command:: - - make install - -The command above builds and installs the charm store binaries, and places them -in `$GOPATH/bin`. This is the list of the installed commands: - -- charmd: start the charm store server; -- essync: synchronize the contents of the Elastic Search database with the charm store. - -A description of each command can be found below. - -## Testing - -Run `make check` to test the application. -Run `make help` to display help about all the available make targets. - -## Charmstore server - -Once the charms database is fully populated, it is possible to interact with -charm data using the charm store server. It can be started with the following -command: - - charmd -logging-config INFO cmd/charmd/config.yaml - -The same result can be achieved more easily by running `make server`. -Note that this configuration *should not* be used when running -a production server, as it uses a known password for authentication. - -At this point the server starts listening on port 8080 (as specified in the -config YAML file). === removed directory 'src/gopkg.in/juju/charmstore.v4/charmstoretesting' === removed file 'src/gopkg.in/juju/charmstore.v4/charmstoretesting/server.go' --- src/gopkg.in/juju/charmstore.v4/charmstoretesting/server.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/charmstoretesting/server.go 1970-01-01 00:00:00 +0000 @@ -1,158 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstoretesting - -import ( - "crypto/sha512" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/macaroon-bakery.v0/httpbakery" - "gopkg.in/mgo.v2" - - "gopkg.in/juju/charmstore.v4" - "gopkg.in/juju/charmstore.v4/csclient" - "gopkg.in/juju/charmstore.v4/params" -) - -const ( - // If params.AuthUsername or params.AuthPassword are empty, - // AuthUsername and AuthPassword will be used. - AuthUsername = "charmstore-testing-user" - AuthPassword = "charmstore-testing-password" -) - -// OpenServer instantiates a new charm store server instance. -// Callers are responsible of closing the server by calling Close(). -func OpenServer(c *gc.C, session *mgo.Session, params charmstore.ServerParams) *Server { - db := session.DB("charmstore-testing") - if params.AuthUsername == "" { - params.AuthUsername = AuthUsername - } - if params.AuthPassword == "" { - params.AuthPassword = AuthPassword - } - handler, err := charmstore.NewServer(db, nil, "", params, charmstore.V4) - c.Assert(err, jc.ErrorIsNil) - - return &Server{ - srv: httptest.NewServer(handler), - handler: handler, - params: params, - } -} - -// Server is a charm store testing server. -type Server struct { - srv *httptest.Server - handler http.Handler - params charmstore.ServerParams -} - -// URL returns the URL the testing charm store is listening to. -func (s *Server) URL() string { - return s.srv.URL -} - -// Handler returns the HTTP handler used by this server. -func (s *Server) Handler() http.Handler { - return s.handler -} - -// Close shuts down the server. -func (s *Server) Close() { - s.srv.Close() -} - -// NewClient returns a new client that will talk to the Server using basic -// (non-macaroon) authentication. -func (s *Server) NewClient() *csclient.Client { - return csclient.New(csclient.Params{ - URL: s.srv.URL, - User: s.params.AuthUsername, - Password: s.params.AuthPassword, - }) -} - -// UploadCharm uploads the given charm to the testing charm store. -// The given id must include the charm user, series and revision. -// If promulgated is true, the charm will be promulgated. -func (s *Server) UploadCharm(c *gc.C, ch charm.Charm, id *charm.Reference, promulgated bool) *charm.Reference { - var path string - - // Validate the charm id. - c.Assert(id.User, gc.Not(gc.Equals), "") - c.Assert(id.Series, gc.Not(gc.Equals), "") - c.Assert(id.Series, gc.Not(gc.Equals), "bundle") - c.Assert(id.Revision, gc.Not(gc.Equals), -1) - - // Retrieve the charm archive path. - switch ch := ch.(type) { - case *charm.CharmArchive: - path = ch.Path - case *charm.CharmDir: - f, err := ioutil.TempFile(c.MkDir(), "charm") - c.Assert(err, jc.ErrorIsNil) - defer f.Close() - err = ch.ArchiveTo(f) - c.Assert(err, jc.ErrorIsNil) - path = f.Name() - default: - c.Errorf("cannot upload charm of entity type %T", ch) - } - - // Retrieve the charm reader, hash and size. - body, err := os.Open(path) - c.Assert(err, jc.ErrorIsNil) - defer body.Close() - h := sha512.New384() - size, err := io.Copy(h, body) - c.Assert(err, jc.ErrorIsNil) - hash := fmt.Sprintf("%x", h.Sum(nil)) - - // Prepare the request. - req, err := http.NewRequest("PUT", "", nil) - c.Assert(err, jc.ErrorIsNil) - req.Header.Set("Content-Type", "application/zip") - req.ContentLength = size - url := "/" + id.Path() + "/archive?hash=" + hash - if promulgated { - pid := *id - pid.User = "" - url += "&promulgated=" + pid.String() - } - - // Upload the charm. - client := s.NewClient() - resp, err := client.DoWithBody(req, url, httpbakery.SeekerBody(body)) - c.Assert(err, jc.ErrorIsNil) - defer resp.Body.Close() - c.Assert(resp.StatusCode, gc.Equals, http.StatusOK) - - // Retrieve the uploaded charm id. - var result params.ArchiveUploadResponse - dec := json.NewDecoder(resp.Body) - err = dec.Decode(&result) - c.Assert(err, jc.ErrorIsNil) - curl := result.Id - if promulgated { - curl = result.PromulgatedId - } - - // Set permissions for the charm. - err = client.Put( - "/"+curl.Path()+"/meta/perm/read", - []string{params.Everyone, id.User}) - c.Assert(err, jc.ErrorIsNil) - - return curl -} === removed directory 'src/gopkg.in/juju/charmstore.v4/cmd' === removed directory 'src/gopkg.in/juju/charmstore.v4/cmd/charmd' === removed file 'src/gopkg.in/juju/charmstore.v4/cmd/charmd/config.yaml' --- src/gopkg.in/juju/charmstore.v4/cmd/charmd/config.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/cmd/charmd/config.yaml 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -mongo-url: localhost:27017 -api-addr: localhost:8080 -auth-username: admin -auth-password: example-passwd -#elasticsearch-addr: localhost:9200 -# For locally running services. -identity-public-key: CIdWcEUN+0OZnKW9KwruRQnQDY/qqzVdD30CijwiWCk= -identity-location: http://localhost:8081/v1/discharger -identity-api-url: http://localhost:8081 -identity-api-username: admin -identity-api-password: password -# For production identity manager. -#identity-public-key: hmHaPgCC1UfuhYHUSX5+aihSAZesqpVdjRv0mgfIwjo= -#identity-location: https://api.jujucharms.com/identity/v1/discharger === removed file 'src/gopkg.in/juju/charmstore.v4/cmd/charmd/main.go' --- src/gopkg.in/juju/charmstore.v4/cmd/charmd/main.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/cmd/charmd/main.go 1970-01-01 00:00:00 +0000 @@ -1,97 +0,0 @@ -// Copyright 2012, 2013, 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package main - -import ( - "flag" - "fmt" - "net/http" - "os" - "path/filepath" - - "github.com/juju/loggo" - "gopkg.in/errgo.v1" - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/mgo.v2" - - "gopkg.in/juju/charmstore.v4" - "gopkg.in/juju/charmstore.v4/config" - "gopkg.in/juju/charmstore.v4/internal/debug" - "gopkg.in/juju/charmstore.v4/internal/elasticsearch" -) - -var ( - logger = loggo.GetLogger("charmd") - loggingConfig = flag.String("logging-config", "", "specify log levels for modules e.g. =TRACE") -) - -func main() { - flag.Usage = func() { - fmt.Fprintf(os.Stderr, "usage: %s [options] \n", filepath.Base(os.Args[0])) - flag.PrintDefaults() - os.Exit(2) - } - flag.Parse() - if flag.NArg() != 1 { - flag.Usage() - } - if *loggingConfig != "" { - if err := loggo.ConfigureLoggers(*loggingConfig); err != nil { - fmt.Fprintf(os.Stderr, "cannot configure loggers: %v", err) - os.Exit(1) - } - } - if err := serve(flag.Arg(0)); err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } -} - -func serve(confPath string) error { - logger.Infof("reading configuration") - conf, err := config.Read(confPath) - if err != nil { - return errgo.Notef(err, "cannot read config file %q", confPath) - } - - logger.Infof("connecting to mongo") - session, err := mgo.Dial(conf.MongoURL) - if err != nil { - return errgo.Notef(err, "cannot dial mongo at %q", conf.MongoURL) - } - defer session.Close() - db := session.DB("juju") - - var es *elasticsearch.Database - if conf.ESAddr != "" { - es = &elasticsearch.Database{ - conf.ESAddr, - } - } - - logger.Infof("setting up the API server") - cfg := charmstore.ServerParams{ - AuthUsername: conf.AuthUsername, - AuthPassword: conf.AuthPassword, - IdentityLocation: conf.IdentityLocation, - IdentityAPIURL: conf.IdentityAPIURL, - IdentityAPIUsername: conf.IdentityAPIUsername, - IdentityAPIPassword: conf.IdentityAPIPassword, - } - var identityPublicKey bakery.PublicKey - err = identityPublicKey.UnmarshalText([]byte(conf.IdentityPublicKey)) - if err != nil { - return errgo.Notef(err, "cannot create new server at %q", conf.APIAddr) - } - ring := bakery.NewPublicKeyRing() - ring.AddPublicKeyForLocation(cfg.IdentityLocation, false, &identityPublicKey) - cfg.PublicKeyLocator = ring - server, err := charmstore.NewServer(db, es, "cs", cfg, charmstore.Legacy, charmstore.V4) - if err != nil { - return errgo.Notef(err, "cannot create new server at %q", conf.APIAddr) - } - - logger.Infof("starting the API server") - return http.ListenAndServe(conf.APIAddr, debug.Handler("", server)) -} === removed directory 'src/gopkg.in/juju/charmstore.v4/cmd/cshash256' === removed file 'src/gopkg.in/juju/charmstore.v4/cmd/cshash256/main.go' --- src/gopkg.in/juju/charmstore.v4/cmd/cshash256/main.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/cmd/cshash256/main.go 1970-01-01 00:00:00 +0000 @@ -1,128 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// This command populates the blobhash256 field of all entities. -// This command is intended to be run on the production db and then discarded. -// The first time this command is executed, all the entities are updated. -// Subsequent runs have no effect. - -package main - -import ( - "crypto/sha256" - "flag" - "fmt" - "io" - "os" - "path/filepath" - - "github.com/juju/loggo" - "gopkg.in/errgo.v1" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/config" - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/mongodoc" -) - -var ( - logger = loggo.GetLogger("cshash256") - loggingConfig = flag.String("logging-config", "INFO", "specify log levels for modules e.g. =TRACE") -) - -func main() { - flag.Usage = func() { - fmt.Fprintf(os.Stderr, "usage: %s [options] \n", filepath.Base(os.Args[0])) - flag.PrintDefaults() - os.Exit(2) - } - flag.Parse() - if flag.NArg() != 1 { - flag.Usage() - } - if *loggingConfig != "" { - if err := loggo.ConfigureLoggers(*loggingConfig); err != nil { - fmt.Fprintf(os.Stderr, "cannot configure loggers: %v", err) - os.Exit(1) - } - } - if err := run(flag.Arg(0)); err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } -} - -func run(confPath string) error { - logger.Infof("reading configuration") - conf, err := config.Read(confPath) - if err != nil { - return errgo.Notef(err, "cannot read config file %q", confPath) - } - - logger.Infof("connecting to mongo") - session, err := mgo.Dial(conf.MongoURL) - if err != nil { - return errgo.Notef(err, "cannot dial mongo at %q", conf.MongoURL) - } - defer session.Close() - db := session.DB("juju") - - logger.Infof("instantiating the store") - pool, err := charmstore.NewPool(db, nil, nil) - if err != nil { - return errgo.Notef(err, "cannot create a new store") - } - store := pool.Store() - defer store.Close() - - logger.Infof("updating entities") - if err := update(store); err != nil { - return errgo.Notef(err, "cannot update entities") - } - - logger.Infof("done") - return nil -} - -func update(store *charmstore.Store) error { - entities := store.DB.Entities() - var entity mongodoc.Entity - iter := entities.Find(bson.D{{"blobhash256", ""}}).Select(bson.D{{"blobname", 1}}).Iter() - defer iter.Close() - - counter := 0 - for iter.Next(&entity) { - // Retrieve the archive contents. - r, _, err := store.BlobStore.Open(entity.BlobName) - if err != nil { - return errgo.Notef(err, "cannot open archive data for %s", entity.URL) - } - - // Calculate the contents hash. - hash := sha256.New() - if _, err = io.Copy(hash, r); err != nil { - r.Close() - return errgo.Notef(err, "cannot calculate archive sha256 for %s", entity.URL) - } - r.Close() - - // Update the entity document. - if err := entities.UpdateId(entity.URL, bson.D{{ - "$set", bson.D{{"blobhash256", fmt.Sprintf("%x", hash.Sum(nil))}}, - }}); err != nil { - return errgo.Notef(err, "cannot update entity id %s", entity.URL) - } - counter++ - if counter%100 == 0 { - logger.Infof("%d entities updated", counter) - } - - } - - if err := iter.Close(); err != nil { - return errgo.Notef(err, "cannot iterate entities") - } - logger.Infof("%d entities updated", counter) - return nil -} === removed directory 'src/gopkg.in/juju/charmstore.v4/cmd/essync' === removed file 'src/gopkg.in/juju/charmstore.v4/cmd/essync/main.go' --- src/gopkg.in/juju/charmstore.v4/cmd/essync/main.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/cmd/essync/main.go 1970-01-01 00:00:00 +0000 @@ -1,84 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package main - -import ( - "flag" - "fmt" - "os" - "path/filepath" - - "github.com/juju/loggo" - "gopkg.in/errgo.v1" - "gopkg.in/mgo.v2" - - "gopkg.in/juju/charmstore.v4/config" - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/elasticsearch" -) - -var logger = loggo.GetLogger("essync") - -var ( - index = flag.String("index", "cs", "Name of index to populate.") - loggingConfig = flag.String("logging-config", "", "specify log levels for modules e.g. =TRACE") - mapping = flag.String("mapping", "", "No longer used.") - settings = flag.String("settings", "", "No longer used.") -) - -func main() { - flag.Usage = func() { - fmt.Fprintf(os.Stderr, "usage: %s [options] \n", filepath.Base(os.Args[0])) - flag.PrintDefaults() - os.Exit(2) - } - flag.Parse() - if flag.NArg() != 1 { - flag.Usage() - } - if *loggingConfig != "" { - if err := loggo.ConfigureLoggers(*loggingConfig); err != nil { - fmt.Fprintf(os.Stderr, "cannot configure loggers: %v", err) - os.Exit(1) - } - } - if err := populate(flag.Arg(0)); err != nil { - logger.Errorf("cannot populate elasticsearch: %v", err) - os.Exit(1) - } -} - -func populate(confPath string) error { - logger.Debugf("reading config file %q", confPath) - conf, err := config.Read(confPath) - if err != nil { - return errgo.Notef(err, "cannot read config file %q", confPath) - } - if conf.ESAddr == "" { - return errgo.Newf("no elasticsearch-addr specified in config file %q", confPath) - } - si := &charmstore.SearchIndex{ - Database: &elasticsearch.Database{ - conf.ESAddr, - }, - Index: *index, - } - session, err := mgo.Dial(conf.MongoURL) - if err != nil { - return errgo.Notef(err, "cannot dial mongo at %q", conf.MongoURL) - } - defer session.Close() - db := session.DB("juju") - - pool, err := charmstore.NewPool(db, si, nil) - if err != nil { - return errgo.Notef(err, "cannot create a new store") - } - store := pool.Store() - defer store.Close() - if err := store.SynchroniseElasticsearch(); err != nil { - return errgo.Notef(err, "cannot synchronise elasticsearch") - } - return nil -} === removed directory 'src/gopkg.in/juju/charmstore.v4/config' === removed file 'src/gopkg.in/juju/charmstore.v4/config/config.go' --- src/gopkg.in/juju/charmstore.v4/config/config.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/config/config.go 1970-01-01 00:00:00 +0000 @@ -1,77 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// The config package defines configuration parameters for -// the charm store. -package config - -import ( - "fmt" - "io/ioutil" - "os" - "strings" - - "gopkg.in/errgo.v1" - "gopkg.in/yaml.v1" -) - -type Config struct { - // TODO(rog) rename this to MongoAddr - it's not a URL. - MongoURL string `yaml:"mongo-url"` - APIAddr string `yaml:"api-addr"` - AuthUsername string `yaml:"auth-username"` - AuthPassword string `yaml:"auth-password"` - ESAddr string `yaml:"elasticsearch-addr"` // elasticsearch is optional - IdentityPublicKey string `yaml:"identity-public-key"` - IdentityLocation string `yaml:"identity-location"` - // The identity API is optional - IdentityAPIURL string `yaml:"identity-api-url"` - IdentityAPIUsername string `yaml:"identity-api-username"` - IdentityAPIPassword string `yaml:"identity-api-password"` -} - -func (c *Config) validate() error { - var missing []string - if c.MongoURL == "" { - missing = append(missing, "mongo-url") - } - if c.APIAddr == "" { - missing = append(missing, "api-addr") - } - if c.AuthUsername == "" { - missing = append(missing, "auth-username") - } - if strings.Contains(c.AuthUsername, ":") { - return fmt.Errorf("invalid user name %q (contains ':')", c.AuthUsername) - } - if c.AuthPassword == "" { - missing = append(missing, "auth-password") - } - if len(missing) != 0 { - return fmt.Errorf("missing fields %s in config file", strings.Join(missing, ", ")) - } - return nil -} - -// Read reads a charm store configuration file from the -// given path. -func Read(path string) (*Config, error) { - f, err := os.Open(path) - if err != nil { - return nil, errgo.Notef(err, "cannot open config file") - } - defer f.Close() - data, err := ioutil.ReadAll(f) - if err != nil { - return nil, errgo.Notef(err, "cannot read %q", path) - } - var conf Config - err = yaml.Unmarshal(data, &conf) - if err != nil { - return nil, errgo.Notef(err, "cannot parse %q", path) - } - if err := conf.validate(); err != nil { - return nil, errgo.Mask(err) - } - return &conf, nil -} === removed file 'src/gopkg.in/juju/charmstore.v4/config/config_test.go' --- src/gopkg.in/juju/charmstore.v4/config/config_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/config/config_test.go 1970-01-01 00:00:00 +0000 @@ -1,72 +0,0 @@ -// Copyright 2012, 2013, 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package config_test - -import ( - "io/ioutil" - "path" - "testing" - - jujutesting "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charmstore.v4/config" -) - -func TestPackage(t *testing.T) { - gc.TestingT(t) -} - -type ConfigSuite struct { - jujutesting.IsolationSuite -} - -var _ = gc.Suite(&ConfigSuite{}) - -const testConfig = ` -mongo-url: localhost:23456 -api-addr: blah:2324 -foo: 1 -bar: false -auth-username: myuser -auth-password: mypasswd -identity-location: localhost:18082 -identity-public-key: 0000 -` - -func (s *ConfigSuite) readConfig(c *gc.C, content string) (*config.Config, error) { - // Write the configuration content to file. - path := path.Join(c.MkDir(), "charmd.conf") - err := ioutil.WriteFile(path, []byte(content), 0666) - c.Assert(err, gc.IsNil) - - // Read the configuration. - return config.Read(path) -} - -func (s *ConfigSuite) TestRead(c *gc.C) { - conf, err := s.readConfig(c, testConfig) - c.Assert(err, gc.IsNil) - c.Assert(conf, jc.DeepEquals, &config.Config{ - MongoURL: "localhost:23456", - APIAddr: "blah:2324", - AuthUsername: "myuser", - AuthPassword: "mypasswd", - IdentityLocation: "localhost:18082", - IdentityPublicKey: "0000", - }) -} - -func (s *ConfigSuite) TestReadConfigError(c *gc.C) { - cfg, err := config.Read(path.Join(c.MkDir(), "charmd.conf")) - c.Assert(err, gc.ErrorMatches, ".* no such file or directory") - c.Assert(cfg, gc.IsNil) -} - -func (s *ConfigSuite) TestValidateConfigError(c *gc.C) { - cfg, err := s.readConfig(c, "") - c.Assert(err, gc.ErrorMatches, "missing fields mongo-url, api-addr, auth-username, auth-password in config file") - c.Assert(cfg, gc.IsNil) -} === removed directory 'src/gopkg.in/juju/charmstore.v4/csclient' === removed file 'src/gopkg.in/juju/charmstore.v4/csclient/archive.go' --- src/gopkg.in/juju/charmstore.v4/csclient/archive.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/csclient/archive.go 1970-01-01 00:00:00 +0000 @@ -1,105 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE.client file for details. - -package csclient - -import ( - "crypto/sha512" - "fmt" - "io" - "io/ioutil" - "os" - - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" -) - -// ReadSeekCloser implements io.ReadSeeker and io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// openArchive is used to turn the current charm or bundle implementations -// into readers for their corresponding archive. -// It returns the corresponding archive reader, its SHA384 hash and size. -func openArchive(entity interface{}) (r ReadSeekCloser, hash string, size int64, err error) { - var path string - switch entity := entity.(type) { - case archiverTo: - // For example: charm.CharmDir or charm.BundleDir. - file, err := newRemoveOnCloseTempFile("entity-archive") - if err != nil { - return nil, "", 0, errgo.Notef(err, "cannot make temporary file") - } - if err := entity.ArchiveTo(file); err != nil { - file.Close() - return nil, "", 0, errgo.Notef(err, "cannot create entity archive") - } - if _, err := file.Seek(0, 0); err != nil { - file.Close() - return nil, "", 0, errgo.Notef(err, "cannot seek") - } - hash, size, err = readerHashAndSize(file) - if err != nil { - file.Close() - return nil, "", 0, errgo.Mask(err) - } - return file, hash, size, nil - case *charm.BundleArchive: - path = entity.Path - case *charm.CharmArchive: - path = entity.Path - default: - return nil, "", 0, errgo.Newf("cannot get the archive for entity type %T", entity) - } - file, err := os.Open(path) - if err != nil { - return nil, "", 0, errgo.Mask(err) - } - hash, size, err = readerHashAndSize(file) - if err != nil { - file.Close() - return nil, "", 0, errgo.Mask(err) - } - return file, hash, size, nil -} - -// readerHashAndSize returns the SHA384 and size of the data included in the -// given reader. -func readerHashAndSize(r io.ReadSeeker) (hash string, size int64, err error) { - h := sha512.New384() - size, err = io.Copy(h, r) - if err != nil { - return "", 0, errgo.Notef(err, "cannot calculate hash") - } - if _, err := r.Seek(0, 0); err != nil { - return "", 0, errgo.Notef(err, "cannot seek") - } - return fmt.Sprintf("%x", h.Sum(nil)), size, nil -} - -type archiverTo interface { - ArchiveTo(io.Writer) error -} - -// newRemoveOnCloseTempFile creates a new temporary file in the default -// directory for temporary files with a name beginning with prefix. -// The resulting file is removed when the file is closed. -func newRemoveOnCloseTempFile(prefix string) (*removeOnCloseFile, error) { - file, err := ioutil.TempFile("", prefix) - if err != nil { - return nil, err - } - return &removeOnCloseFile{file}, nil -} - -// removeOnCloseFile represents a file which is removed when closed. -type removeOnCloseFile struct { - *os.File -} - -func (r *removeOnCloseFile) Close() error { - r.File.Close() - return os.Remove(r.File.Name()) -} === removed file 'src/gopkg.in/juju/charmstore.v4/csclient/csclient.go' --- src/gopkg.in/juju/charmstore.v4/csclient/csclient.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/csclient/csclient.go 1970-01-01 00:00:00 +0000 @@ -1,637 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE.client file for details. - -// The csclient package provides access to the charm store API. -package csclient - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "strings" - "unicode" - - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/macaroon-bakery.v0/httpbakery" - "gopkg.in/macaroon.v1" - - "gopkg.in/juju/charmstore.v4/params" -) - -const apiVersion = "v4" - -// ServerURL holds the default location of the global charm store. -// An alternate location can be configured by changing the URL field in the -// Params struct. -// For live testing or QAing the application, a different charm store -// location should be used, for instance "https://api.staging.jujucharms.com". -var ServerURL = "https://api.jujucharms.com/charmstore" - -// Client represents the client side of a charm store. -type Client struct { - params Params - header http.Header - statsDisabled bool -} - -// Params holds parameters for creating a new charm store client. -type Params struct { - // URL holds the root endpoint URL of the charmstore, - // with no trailing slash, not including the version. - // For example https://api.jujucharms.com/charmstore - // If empty, the default charm store client location is used. - URL string - - // User and Password hold the authentication credentials - // for the client. If User is empty, no credentials will be - // sent. - User string - Password string - - // HTTPClient holds the HTTP client to use when making - // requests to the store. If nil, httpbakery.NewHTTPClient will - // be used. - HTTPClient *http.Client - - // VisitWebPage is called when authorization requires that - // the user visits a web page to authenticate themselves. - // If nil, a default function that returns ErrNoInteraction will be used. - VisitWebPage func(url *url.URL) error -} - -// New returns a new charm store client. -func New(p Params) *Client { - if p.URL == "" { - p.URL = ServerURL - } - if p.VisitWebPage == nil { - p.VisitWebPage = noVisit - } - if p.HTTPClient == nil { - p.HTTPClient = httpbakery.NewHTTPClient() - } - return &Client{ - params: p, - } -} - -// ErrNoInteraction is the error cause returned by the default Params.VisitWebPage -// value when it is nil. -var ErrNoInteraction = errgo.New("interaction required but no web browser configured") - -func noVisit(url *url.URL) error { - return ErrNoInteraction -} - -// ServerURL returns the charm store URL used by the client. -func (c *Client) ServerURL() string { - return c.params.URL -} - -// DisableStats disables incrementing download stats when retrieving archives -// from the charm store. -func (c *Client) DisableStats() { - c.statsDisabled = true -} - -// SetHTTPHeader sets custom HTTP headers that will be sent to the charm store -// on each request. -func (c *Client) SetHTTPHeader(header http.Header) { - c.header = header -} - -// GetArchive retrieves the archive for the given charm or bundle, returning a -// reader its data can be read from, the fully qualified id of the -// corresponding entity, the SHA384 hash of the data and its size. -func (c *Client) GetArchive(id *charm.Reference) (r io.ReadCloser, eid *charm.Reference, hash string, size int64, err error) { - // Create the request. - req, err := http.NewRequest("GET", "", nil) - if err != nil { - return nil, nil, "", 0, errgo.Notef(err, "cannot make new request") - } - - // Send the request. - v := url.Values{} - if c.statsDisabled { - v.Set("stats", "0") - } - u := url.URL{ - Path: "/" + id.Path() + "/archive", - RawQuery: v.Encode(), - } - resp, err := c.Do(req, u.String()) - if err != nil { - return nil, nil, "", 0, errgo.NoteMask(err, "cannot get archive", errgo.Any) - } - - // Validate the response headers. - entityId := resp.Header.Get(params.EntityIdHeader) - if entityId == "" { - resp.Body.Close() - return nil, nil, "", 0, errgo.Newf("no %s header found in response", params.EntityIdHeader) - } - eid, err = charm.ParseReference(entityId) - if err != nil { - // The server did not return a valid id. - resp.Body.Close() - return nil, nil, "", 0, errgo.Notef(err, "invalid entity id found in response") - } - if eid.Series == "" || eid.Revision == -1 { - // The server did not return a fully qualified entity id. - resp.Body.Close() - return nil, nil, "", 0, errgo.Newf("archive get returned not fully qualified entity id %q", eid) - } - hash = resp.Header.Get(params.ContentHashHeader) - if hash == "" { - resp.Body.Close() - return nil, nil, "", 0, errgo.Newf("no %s header found in response", params.ContentHashHeader) - } - - // Validate the response contents. - if resp.ContentLength < 0 { - // TODO frankban: handle the case the contents are chunked. - resp.Body.Close() - return nil, nil, "", 0, errgo.Newf("no content length found in response") - } - return resp.Body, eid, hash, resp.ContentLength, nil -} - -// UploadCharm uploads the given charm to the charm store with the given id, -// which must not specify a revision. -// The accepted charm implementations are charm.CharmDir and -// charm.CharmArchive. -// -// UploadCharm returns the id that the charm has been given in the -// store - this will be the same as id except the revision. -func (c *Client) UploadCharm(id *charm.Reference, ch charm.Charm) (*charm.Reference, error) { - if id.Revision != -1 { - return nil, errgo.Newf("revision specified in %q, but should not be specified", id) - } - r, hash, size, err := openArchive(ch) - if err != nil { - return nil, errgo.Notef(err, "cannot open charm archive") - } - defer r.Close() - return c.uploadArchive(id, r, hash, size, -1) -} - -// UploadCharmWithRevision uploads the given charm to the -// given id in the charm store, which must contain a revision. -// If promulgatedRevision is not -1, it specifies that the charm -// should be marked as promulgated with that revision. -// -// This method is provided only for testing and should not -// generally be used otherwise. -func (c *Client) UploadCharmWithRevision(id *charm.Reference, ch charm.Charm, promulgatedRevision int) error { - if id.Revision == -1 { - return errgo.Newf("revision not specified in %q", id) - } - r, hash, size, err := openArchive(ch) - if err != nil { - return errgo.Notef(err, "cannot open charm archive") - } - defer r.Close() - _, err = c.uploadArchive(id, r, hash, size, promulgatedRevision) - return errgo.Mask(err) -} - -// UploadBundle uploads the given charm to the charm store with the given id, -// which must not specify a revision. -// The accepted bundle implementations are charm.BundleDir and -// charm.BundleArchive. -// -// UploadBundle returns the id that the bundle has been given in the -// store - this will be the same as id except the revision. -func (c *Client) UploadBundle(id *charm.Reference, b charm.Bundle) (*charm.Reference, error) { - if id.Revision != -1 { - return nil, errgo.Newf("revision specified in %q, but should not be specified", id) - } - r, hash, size, err := openArchive(b) - if err != nil { - return nil, errgo.Notef(err, "cannot open bundle archive") - } - defer r.Close() - return c.uploadArchive(id, r, hash, size, -1) -} - -// UploadBundleWithRevision uploads the given bundle to the -// given id in the charm store, which must contain a revision. -// If promulgatedRevision is not -1, it specifies that the charm -// should be marked as promulgated with that revision. -// -// This method is provided only for testing and should not -// generally be used otherwise. -func (c *Client) UploadBundleWithRevision(id *charm.Reference, b charm.Bundle, promulgatedRevision int) error { - if id.Revision == -1 { - return errgo.Newf("revision not specified in %q", id) - } - r, hash, size, err := openArchive(b) - if err != nil { - return errgo.Notef(err, "cannot open charm archive") - } - defer r.Close() - _, err = c.uploadArchive(id, r, hash, size, promulgatedRevision) - return errgo.Mask(err) -} - -// uploadArchive pushes the archive for the charm or bundle represented by -// the given body, its SHA384 hash and its size. It returns the resulting -// entity reference. The given id should include the series and should not -// include the revision. -func (c *Client) uploadArchive(id *charm.Reference, body io.ReadSeeker, hash string, size int64, promulgatedRevision int) (*charm.Reference, error) { - // When uploading archives, it can be a problem that the - // an error response is returned while we are still writing - // the body data. - // To avoid this, we log in first so that we don't need to - // do the macaroon exchange after POST. - // Unfortunately this won't help matters if the user is logged in but - // doesn't have privileges to write to the stated charm. - // A better solution would be to fix https://github.com/golang/go/issues/3665 - // and use the 100-Continue client functionality. - // - // We only need to do this when basic auth credentials are not provided. - if c.params.User == "" { - if err := c.Login(); err != nil { - return nil, errgo.Notef(err, "cannot log in") - } - } - // Validate the entity id. - if id.Series == "" { - return nil, errgo.Newf("no series specified in %q", id) - } - method := "POST" - promulgatedArg := "" - if id.Revision != -1 { - method = "PUT" - if promulgatedRevision != -1 { - pr := *id - pr.User = "" - pr.Revision = promulgatedRevision - promulgatedArg = "&promulgated=" + pr.Path() - } - } - - // Prepare the request. - req, err := http.NewRequest(method, "", nil) - if err != nil { - return nil, errgo.Notef(err, "cannot make new request") - } - req.Header.Set("Content-Type", "application/zip") - req.ContentLength = size - - // Send the request. - resp, err := c.DoWithBody( - req, - "/"+id.Path()+"/archive?hash="+hash+promulgatedArg, - httpbakery.SeekerBody(body), - ) - if err != nil { - return nil, errgo.NoteMask(err, "cannot post archive", errgo.Any) - } - defer resp.Body.Close() - - // Parse the response. - var result params.ArchiveUploadResponse - if err := parseResponseBody(resp.Body, &result); err != nil { - return nil, errgo.Mask(err) - } - return result.Id, nil -} - -// PutExtraInfo puts extra-info data for the given id. -// Each entry in the info map causes a value in extra-info with -// that key to be set to the associated value. -// Entries not set in the map will be unchanged. -func (c *Client) PutExtraInfo(id *charm.Reference, info map[string]interface{}) error { - return c.Put("/"+id.Path()+"/meta/extra-info", info) -} - -// Meta fetches metadata on the charm or bundle with the -// given id. The result value provides a value -// to be filled in with the result, which must be -// a pointer to a struct containing members corresponding -// to possible metadata include parameters -// (see https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmeta). -// -// It returns the fully qualified id of the entity. -// -// The name of the struct member is translated to -// a lower case hyphen-separated form; for example, -// ArchiveSize becomes "archive-size", and BundleMachineCount -// becomes "bundle-machine-count", but may also -// be specified in the field's tag -// -// This example will fill in the result structure with information -// about the given id, including information on its archive -// size (include archive-size), upload time (include archive-upload-time) -// and digest (include extra-info/digest). -// -// var result struct { -// ArchiveSize params.ArchiveSizeResponse -// ArchiveUploadTime params.ArchiveUploadTimeResponse -// Digest string `csclient:"extra-info/digest"` -// } -// id, err := client.Meta(id, &result) -func (c *Client) Meta(id *charm.Reference, result interface{}) (*charm.Reference, error) { - if result == nil { - return nil, fmt.Errorf("expected valid result pointer, not nil") - } - resultv := reflect.ValueOf(result) - resultt := resultv.Type() - if resultt.Kind() != reflect.Ptr { - return nil, fmt.Errorf("expected pointer, not %T", result) - } - resultt = resultt.Elem() - if resultt.Kind() != reflect.Struct { - return nil, fmt.Errorf("expected pointer to struct, not %T", result) - } - resultv = resultv.Elem() - - // At this point, resultv refers to the struct value pointed - // to by result, and resultt is its type. - - numField := resultt.NumField() - includes := make([]string, 0, numField) - - // results holds an entry for each field in the result value, - // pointing to the value for that field. - results := make(map[string]reflect.Value) - for i := 0; i < numField; i++ { - field := resultt.Field(i) - if field.PkgPath != "" { - // Field is private; ignore it. - continue - } - if field.Anonymous { - // At some point in the future, it might be nice to - // support anonymous fields, but for now the - // additional complexity doesn't seem worth it. - return nil, fmt.Errorf("anonymous fields not supported") - } - apiName := field.Tag.Get("csclient") - if apiName == "" { - apiName = hyphenate(field.Name) - } - includes = append(includes, "include="+apiName) - results[apiName] = resultv.FieldByName(field.Name).Addr() - } - // We unmarshal into rawResult, then unmarshal each field - // separately into its place in the final result value. - // Note that we can't use params.MetaAnyResponse because - // that will unpack all the values inside the Meta field, - // but we want to keep them raw so that we can unmarshal - // them ourselves. - var rawResult struct { - Id *charm.Reference - Meta map[string]json.RawMessage - } - path := "/" + id.Path() + "/meta/any" - if len(includes) > 0 { - path += "?" + strings.Join(includes, "&") - } - if err := c.Get(path, &rawResult); err != nil { - return nil, errgo.NoteMask(err, fmt.Sprintf("cannot get %q", path), errgo.Any) - } - // Note that the server is not required to send back values - // for all fields. "If there is no metadata for the given meta path, the - // element will be omitted" - // See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaany - for name, r := range rawResult.Meta { - v, ok := results[name] - if !ok { - // The server has produced a result that we - // don't know about. Ignore it. - continue - } - // Unmarshal the raw JSON into the final struct field. - err := json.Unmarshal(r, v.Interface()) - if err != nil { - return nil, errgo.Notef(err, "cannot unmarshal %s", name) - } - } - return rawResult.Id, nil -} - -// hyphenate returns the hyphenated version of the given -// field name, as specified in the Client.Meta method. -func hyphenate(s string) string { - // TODO hyphenate FooHTTPBar as foo-http-bar? - var buf bytes.Buffer - var prevLower bool - for _, r := range s { - if !unicode.IsUpper(r) { - prevLower = true - buf.WriteRune(r) - continue - } - if prevLower { - buf.WriteRune('-') - } - buf.WriteRune(unicode.ToLower(r)) - prevLower = false - } - return buf.String() -} - -// Get makes a GET request to the given path in the charm store (not -// including the host name or version prefix but including a leading /), -// parsing the result as JSON into the given result value, which should -// be a pointer to the expected data, but may be nil if no result is -// desired. -func (c *Client) Get(path string, result interface{}) error { - req, err := http.NewRequest("GET", "", nil) - if err != nil { - return errgo.Notef(err, "cannot make new request") - } - resp, err := c.Do(req, path) - if err != nil { - return errgo.Mask(err, errgo.Any) - } - defer resp.Body.Close() - // Parse the response. - if err := parseResponseBody(resp.Body, result); err != nil { - return errgo.Mask(err) - } - return nil -} - -// Put makes a PUT request to the given path in the charm store (not -// including the host name or version prefix, but including a leading -// /), marshaling the given value as JSON to use as the request body. -func (c *Client) Put(path string, val interface{}) error { - req, _ := http.NewRequest("PUT", "", nil) - req.Header.Set("Content-Type", "application/json") - data, err := json.Marshal(val) - if err != nil { - return errgo.Notef(err, "cannot marshal PUT body") - } - body := bytes.NewReader(data) - resp, err := c.DoWithBody(req, path, httpbakery.SeekerBody(body)) - if err != nil { - return errgo.Mask(err, errgo.Any) - } - resp.Body.Close() - return nil -} - -func parseResponseBody(body io.Reader, result interface{}) error { - data, err := ioutil.ReadAll(body) - if err != nil { - return errgo.Notef(err, "cannot read response body") - } - if result == nil { - // The caller doesn't care about the response body. - return nil - } - if err := json.Unmarshal(data, result); err != nil { - return errgo.Notef(err, "cannot unmarshal response %q", sizeLimit(data)) - } - return nil -} - -// DoWithBody is like Do except that the given getBody function is -// called to obtain the body for the HTTP request. Any body returned -// by getBody will be closed before DoWithBody returns. -// -// Any error returned from the underlying httpbakery.DoWithBody -// request will have an unchanged error cause. -func (c *Client) DoWithBody(req *http.Request, path string, getBody httpbakery.BodyGetter) (*http.Response, error) { - if c.params.User != "" { - userPass := c.params.User + ":" + c.params.Password - authBasic := base64.StdEncoding.EncodeToString([]byte(userPass)) - req.Header.Set("Authorization", "Basic "+authBasic) - } - - // Prepare the request. - if !strings.HasPrefix(path, "/") { - return nil, errgo.Newf("path %q is not absolute", path) - } - for k, vv := range c.header { - req.Header[k] = append(req.Header[k], vv...) - } - u, err := url.Parse(c.params.URL + "/" + apiVersion + path) - if err != nil { - return nil, errgo.Mask(err) - } - req.URL = u - - // Send the request. - resp, err := httpbakery.DoWithBody(c.params.HTTPClient, req, getBody, c.params.VisitWebPage) - if err != nil { - return nil, errgo.Mask(err, errgo.Any) - } - if resp.StatusCode == http.StatusOK { - return resp, nil - } - defer resp.Body.Close() - - // Parse the response error. - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, errgo.Notef(err, "cannot read response body") - } - var perr params.Error - if err := json.Unmarshal(data, &perr); err != nil { - return nil, errgo.Notef(err, "cannot unmarshal error response %q", sizeLimit(data)) - } - if perr.Message == "" { - return nil, errgo.Newf("error response with empty message %s", sizeLimit(data)) - } - return nil, &perr -} - -// Do makes an arbitrary request to the charm store. -// It adds appropriate headers to the given HTTP request, -// sends it to the charm store, and returns the resulting -// response. Do never returns a response with a status -// that is not http.StatusOK. -// -// The URL field in the request is ignored and overwritten. -// -// This is a low level method - more specific Client methods -// should be used when possible. -// -// For requests with a body (for example PUT or POST) use DoWithBody -// instead. -func (c *Client) Do(req *http.Request, path string) (*http.Response, error) { - if req.Body != nil { - return nil, errgo.New("body unexpectedly provided in http request - use DoWithBody") - } - return c.DoWithBody(req, path, noBody) -} - -func noBody() (io.ReadCloser, error) { - return nil, nil -} - -func sizeLimit(data []byte) []byte { - const max = 1024 - if len(data) < max { - return data - } - return append(data[0:max], fmt.Sprintf(" ... [%d bytes omitted]", len(data)-max)...) -} - -// Log sends a log message to the charmstore's log database. -func (cs *Client) Log(typ params.LogType, level params.LogLevel, message string, urls ...*charm.Reference) error { - b, err := json.Marshal(message) - if err != nil { - return errgo.Notef(err, "cannot marshal log message") - } - - // Prepare and send the log. - // TODO (frankban): we might want to buffer logs in order to reduce - // requests. - logs := []params.Log{{ - Data: (*json.RawMessage)(&b), - Level: level, - Type: typ, - URLs: urls, - }} - b, err = json.Marshal(logs) - if err != nil { - return errgo.Notef(err, "cannot marshal log message") - } - - req, err := http.NewRequest("POST", "", nil) - if err != nil { - return errgo.Notef(err, "cannot create log request") - } - req.Header.Set("Content-Type", "application/json") - body := bytes.NewReader(b) - resp, err := cs.DoWithBody(req, "/log", httpbakery.SeekerBody(body)) - if err != nil { - return errgo.NoteMask(err, "cannot send log message", errgo.Any) - } - resp.Body.Close() - return nil -} - -// Login explicitly obtains authorization credentials -// for the charm store and stores them in the client's -// cookie jar. -func (cs *Client) Login() error { - var m macaroon.Macaroon - if err := cs.Get("/macaroon", &m); err != nil { - return errgo.Notef(err, "cannot retrieve the authentication macaroon") - } - ms, err := httpbakery.DischargeAll(&m, cs.params.HTTPClient, cs.params.VisitWebPage) - if err != nil { - return errgo.Notef(err, "cannot discharge login macaroon") - } - u, err := url.Parse(cs.ServerURL()) - if err != nil { - return errgo.Mask(err) - } - if err := httpbakery.SetCookie(cs.params.HTTPClient.Jar, u, ms); err != nil { - return errgo.Notef(err, "cannot set cookie") - } - return nil -} === removed file 'src/gopkg.in/juju/charmstore.v4/csclient/csclient_test.go' --- src/gopkg.in/juju/charmstore.v4/csclient/csclient_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/csclient/csclient_test.go 1970-01-01 00:00:00 +0000 @@ -1,1293 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE.client file for details. - -package csclient_test - -import ( - "bytes" - "crypto/sha512" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "os" - "reflect" - "strings" - "time" - - jujutesting "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" - "gopkg.in/macaroon-bakery.v0/bakerytest" - "gopkg.in/macaroon-bakery.v0/httpbakery" - "gopkg.in/mgo.v2" - - "gopkg.in/juju/charmstore.v4" - "gopkg.in/juju/charmstore.v4/csclient" - "gopkg.in/juju/charmstore.v4/internal/storetesting" - "gopkg.in/juju/charmstore.v4/params" -) - -var charmRepo = storetesting.Charms - -// Define fake attributes to be used in tests. -var fakeReader, fakeHash, fakeSize = func() (io.ReadSeeker, string, int64) { - content := []byte("fake content") - h := sha512.New384() - h.Write(content) - return bytes.NewReader(content), fmt.Sprintf("%x", h.Sum(nil)), int64(len(content)) -}() - -type suite struct { - jujutesting.IsolatedMgoSuite - client *csclient.Client - srv *httptest.Server - serverParams charmstore.ServerParams - discharge func(cond, arg string) ([]checkers.Caveat, error) -} - -var _ = gc.Suite(&suite{}) - -func (s *suite) SetUpTest(c *gc.C) { - s.IsolatedMgoSuite.SetUpTest(c) - s.startServer(c, s.Session) - s.client = csclient.New(csclient.Params{ - URL: s.srv.URL, - User: s.serverParams.AuthUsername, - Password: s.serverParams.AuthPassword, - }) -} - -func (s *suite) TearDownTest(c *gc.C) { - s.srv.Close() - s.IsolatedMgoSuite.TearDownTest(c) -} - -func (s *suite) startServer(c *gc.C, session *mgo.Session) { - s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { - return nil, fmt.Errorf("no discharge") - } - - discharger := bakerytest.NewDischarger(nil, func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { - return s.discharge(cond, arg) - }) - - serverParams := charmstore.ServerParams{ - AuthUsername: "test-user", - AuthPassword: "test-password", - IdentityLocation: discharger.Service.Location(), - PublicKeyLocator: discharger, - } - - db := session.DB("charmstore") - handler, err := charmstore.NewServer(db, nil, "", serverParams, charmstore.V4) - c.Assert(err, gc.IsNil) - s.srv = httptest.NewServer(handler) - s.serverParams = serverParams - -} - -func (s *suite) TestDefaultServerURL(c *gc.C) { - // Add a charm used for tests. - err := s.client.UploadCharmWithRevision( - charm.MustParseReference("~charmers/vivid/testing-wordpress-42"), - charmRepo.CharmDir("wordpress"), - 42, - ) - c.Assert(err, gc.IsNil) - - // Patch the default server URL. - s.PatchValue(&csclient.ServerURL, s.srv.URL) - - // Instantiate a client using the default server URL. - client := csclient.New(csclient.Params{ - User: s.serverParams.AuthUsername, - Password: s.serverParams.AuthPassword, - }) - c.Assert(client.ServerURL(), gc.Equals, s.srv.URL) - - // Check that the request succeeds. - err = client.Get("/vivid/testing-wordpress-42/expand-id", nil) - c.Assert(err, gc.IsNil) -} - -func (s *suite) TestSetHTTPHeader(c *gc.C) { - var header http.Header - srv := httptest.NewServer(http.HandlerFunc(func(_ http.ResponseWriter, req *http.Request) { - header = req.Header - })) - defer srv.Close() - - sendRequest := func(client *csclient.Client) { - req, err := http.NewRequest("GET", "", nil) - c.Assert(err, jc.ErrorIsNil) - _, err = client.Do(req, "/") - c.Assert(err, jc.ErrorIsNil) - } - client := csclient.New(csclient.Params{ - URL: srv.URL, - }) - - // Make a first request without custom headers. - sendRequest(client) - defaultHeaderLen := len(header) - - // Make a second request adding a couple of custom headers. - h := make(http.Header) - h.Set("k1", "v1") - h.Add("k2", "v2") - h.Add("k2", "v3") - client.SetHTTPHeader(h) - sendRequest(client) - c.Assert(header, gc.HasLen, defaultHeaderLen+len(h)) - c.Assert(header.Get("k1"), gc.Equals, "v1") - c.Assert(header[http.CanonicalHeaderKey("k2")], jc.DeepEquals, []string{"v2", "v3"}) - - // Make a third request without custom headers. - client.SetHTTPHeader(nil) - sendRequest(client) - c.Assert(header, gc.HasLen, defaultHeaderLen) -} - -var getTests = []struct { - about string - path string - nilResult bool - expectResult interface{} - expectError string - expectErrorCode params.ErrorCode -}{{ - about: "success", - path: "/wordpress/expand-id", - expectResult: []params.ExpandedId{{ - Id: "cs:utopic/wordpress-42", - }}, -}, { - about: "success with nil result", - path: "/wordpress/expand-id", - nilResult: true, -}, { - about: "non-absolute path", - path: "wordpress", - expectError: `path "wordpress" is not absolute`, -}, { - about: "URL parse error", - path: "/wordpress/%zz", - expectError: `parse .*: invalid URL escape "%zz"`, -}, { - about: "result with error code", - path: "/blahblah", - expectError: "not found", - expectErrorCode: params.ErrNotFound, -}} - -func (s *suite) TestGet(c *gc.C) { - ch := charmRepo.CharmDir("wordpress") - url := charm.MustParseReference("~charmers/utopic/wordpress-42") - err := s.client.UploadCharmWithRevision(url, ch, 42) - c.Assert(err, gc.IsNil) - - for i, test := range getTests { - c.Logf("test %d: %s", i, test.about) - - // Send the request. - var result json.RawMessage - var resultPtr interface{} - if !test.nilResult { - resultPtr = &result - } - err = s.client.Get(test.path, resultPtr) - - // Check the response. - if test.expectError != "" { - c.Assert(err, gc.ErrorMatches, test.expectError, gc.Commentf("error is %T; %#v", err, err)) - c.Assert(result, gc.IsNil) - cause := errgo.Cause(err) - if code, ok := cause.(params.ErrorCode); ok { - c.Assert(code, gc.Equals, test.expectErrorCode) - } else { - c.Assert(test.expectErrorCode, gc.Equals, params.ErrorCode("")) - } - continue - } - c.Assert(err, gc.IsNil) - if test.expectResult != nil { - c.Assert(string(result), jc.JSONEquals, test.expectResult) - } - } -} - -var putErrorTests = []struct { - about string - path string - val interface{} - expectError string - expectErrorCode params.ErrorCode -}{{ - about: "bad JSON val", - path: "/~charmers/utopic/wordpress-42/meta/extra-info/foo", - val: make(chan int), - expectError: `cannot marshal PUT body: json: unsupported type: chan int`, -}, { - about: "non-absolute path", - path: "wordpress", - expectError: `path "wordpress" is not absolute`, -}, { - about: "URL parse error", - path: "/wordpress/%zz", - expectError: `parse .*: invalid URL escape "%zz"`, -}, { - about: "result with error code", - path: "/blahblah", - expectError: "not found", - expectErrorCode: params.ErrNotFound, -}} - -func (s *suite) TestPutError(c *gc.C) { - err := s.client.UploadCharmWithRevision( - charm.MustParseReference("~charmers/utopic/wordpress-42"), - charmRepo.CharmDir("wordpress"), - 42) - c.Assert(err, gc.IsNil) - - for i, test := range putErrorTests { - c.Logf("test %d: %s", i, test.about) - err := s.client.Put(test.path, test.val) - c.Assert(err, gc.ErrorMatches, test.expectError) - cause := errgo.Cause(err) - if code, ok := cause.(params.ErrorCode); ok { - c.Assert(code, gc.Equals, test.expectErrorCode) - } else { - c.Assert(test.expectErrorCode, gc.Equals, params.ErrorCode("")) - } - } -} - -func (s *suite) TestPutSuccess(c *gc.C) { - err := s.client.UploadCharmWithRevision( - charm.MustParseReference("~charmers/utopic/wordpress-42"), - charmRepo.CharmDir("wordpress"), - 42) - c.Assert(err, gc.IsNil) - - perms := []string{"bob"} - err = s.client.Put("/~charmers/utopic/wordpress-42/meta/perm/read", perms) - c.Assert(err, gc.IsNil) - var got []string - err = s.client.Get("/~charmers/utopic/wordpress-42/meta/perm/read", &got) - c.Assert(err, gc.IsNil) - c.Assert(got, jc.DeepEquals, perms) -} - -func (s *suite) TestGetArchive(c *gc.C) { - key := s.checkGetArchive(c) - - // Check that the downloads count for the entity has been updated. - s.checkCharmDownloads(c, key, 1) -} - -func (s *suite) TestGetArchiveWithStatsDisabled(c *gc.C) { - s.client.DisableStats() - key := s.checkGetArchive(c) - - // Check that the downloads count for the entity has not been updated. - s.checkCharmDownloads(c, key, 0) -} - -var checkDownloadsAttempt = utils.AttemptStrategy{ - Total: 1 * time.Second, - Delay: 100 * time.Millisecond, -} - -func (s *suite) checkCharmDownloads(c *gc.C, key string, expect int64) { - stableCount := 0 - for a := checkDownloadsAttempt.Start(); a.Next(); { - count := s.statsForKey(c, key) - if count == expect { - // Wait for a couple of iterations to make sure that it's stable. - if stableCount++; stableCount >= 2 { - return - } - } else { - stableCount = 0 - } - if !a.HasNext() { - c.Errorf("unexpected download count for %s, got %d, want %d", key, count, expect) - } - } -} - -func (s *suite) statsForKey(c *gc.C, key string) int64 { - var result []params.Statistic - err := s.client.Get("/stats/counter/"+key, &result) - c.Assert(err, gc.IsNil) - c.Assert(result, gc.HasLen, 1) - return result[0].Count -} - -func (s *suite) checkGetArchive(c *gc.C) string { - ch := charmRepo.CharmArchive(c.MkDir(), "wordpress") - - // Open the archive and calculate its hash and size. - r, expectHash, expectSize := archiveHashAndSize(c, ch.Path) - r.Close() - - url := charm.MustParseReference("~charmers/utopic/wordpress-42") - err := s.client.UploadCharmWithRevision(url, ch, 42) - c.Assert(err, gc.IsNil) - - rb, id, hash, size, err := s.client.GetArchive(url) - c.Assert(err, gc.IsNil) - defer rb.Close() - c.Assert(id, jc.DeepEquals, url) - c.Assert(hash, gc.Equals, expectHash) - c.Assert(size, gc.Equals, expectSize) - - h := sha512.New384() - size, err = io.Copy(h, rb) - c.Assert(err, gc.IsNil) - c.Assert(size, gc.Equals, expectSize) - c.Assert(fmt.Sprintf("%x", h.Sum(nil)), gc.Equals, expectHash) - - // Return the stats key for the archive download. - keys := []string{params.StatsArchiveDownload, "utopic", "wordpress", "charmers", "42"} - return strings.Join(keys, ":") -} - -func (s *suite) TestGetArchiveErrorNotFound(c *gc.C) { - url := charm.MustParseReference("no-such") - r, id, hash, size, err := s.client.GetArchive(url) - c.Assert(err, gc.ErrorMatches, `cannot get archive: no matching charm or bundle for "cs:no-such"`) - c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) - c.Assert(r, gc.IsNil) - c.Assert(id, gc.IsNil) - c.Assert(hash, gc.Equals, "") - c.Assert(size, gc.Equals, int64(0)) -} - -var getArchiveWithBadResponseTests = []struct { - about string - response *http.Response - error error - expectError string -}{{ - about: "http client Get failure", - error: errgo.New("round trip failure"), - expectError: "cannot get archive: Get .*: round trip failure", -}, { - about: "no entity id header", - response: &http.Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Header: http.Header{ - params.ContentHashHeader: {fakeHash}, - }, - Body: ioutil.NopCloser(strings.NewReader("")), - ContentLength: fakeSize, - }, - expectError: "no " + params.EntityIdHeader + " header found in response", -}, { - about: "invalid entity id header", - response: &http.Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Header: http.Header{ - params.ContentHashHeader: {fakeHash}, - params.EntityIdHeader: {"no:such"}, - }, - Body: ioutil.NopCloser(strings.NewReader("")), - ContentLength: fakeSize, - }, - expectError: `invalid entity id found in response: charm URL has invalid schema: "no:such"`, -}, { - about: "partial entity id header", - response: &http.Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Header: http.Header{ - params.ContentHashHeader: {fakeHash}, - params.EntityIdHeader: {"django-42"}, - }, - Body: ioutil.NopCloser(strings.NewReader("")), - ContentLength: fakeSize, - }, - expectError: `archive get returned not fully qualified entity id "cs:django-42"`, -}, { - about: "no hash header", - response: &http.Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Header: http.Header{ - params.EntityIdHeader: {"cs:utopic/django-42"}, - }, - Body: ioutil.NopCloser(strings.NewReader("")), - ContentLength: fakeSize, - }, - expectError: "no " + params.ContentHashHeader + " header found in response", -}, { - about: "no content length", - response: &http.Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Header: http.Header{ - params.ContentHashHeader: {fakeHash}, - params.EntityIdHeader: {"cs:utopic/django-42"}, - }, - Body: ioutil.NopCloser(strings.NewReader("")), - ContentLength: -1, - }, - expectError: "no content length found in response", -}} - -func (s *suite) TestGetArchiveWithBadResponse(c *gc.C) { - id := charm.MustParseReference("wordpress") - for i, test := range getArchiveWithBadResponseTests { - c.Logf("test %d: %s", i, test.about) - cl := csclient.New(csclient.Params{ - URL: "http://0.1.2.3", - HTTPClient: &http.Client{ - Transport: &cannedRoundTripper{ - resp: test.response, - error: test.error, - }, - }, - }) - _, _, _, _, err := cl.GetArchive(id) - c.Assert(err, gc.ErrorMatches, test.expectError) - } -} - -func (s *suite) TestUploadArchiveWithCharm(c *gc.C) { - path := charmRepo.CharmArchivePath(c.MkDir(), "wordpress") - - // Post the archive. - s.checkUploadArchive(c, path, "~charmers/utopic/wordpress", "cs:~charmers/utopic/wordpress-0") - - // Posting the same archive a second time does not change its resulting id. - s.checkUploadArchive(c, path, "~charmers/utopic/wordpress", "cs:~charmers/utopic/wordpress-0") - - // Posting a different archive to the same URL increases the resulting id - // revision. - path = charmRepo.CharmArchivePath(c.MkDir(), "mysql") - s.checkUploadArchive(c, path, "~charmers/utopic/wordpress", "cs:~charmers/utopic/wordpress-1") -} - -func (s *suite) prepareBundleCharms(c *gc.C) { - // Add the charms required by the wordpress-simple bundle to the store. - err := s.client.UploadCharmWithRevision( - charm.MustParseReference("~charmers/utopic/wordpress-42"), - charmRepo.CharmArchive(c.MkDir(), "wordpress"), - 42, - ) - c.Assert(err, gc.IsNil) - err = s.client.UploadCharmWithRevision( - charm.MustParseReference("~charmers/utopic/mysql-47"), - charmRepo.CharmArchive(c.MkDir(), "mysql"), - 47, - ) - c.Assert(err, gc.IsNil) -} - -func (s *suite) TestUploadArchiveWithBundle(c *gc.C) { - s.prepareBundleCharms(c) - path := charmRepo.BundleArchivePath(c.MkDir(), "wordpress-simple") - // Post the archive. - s.checkUploadArchive(c, path, "~charmers/bundle/wordpress-simple", "cs:~charmers/bundle/wordpress-simple-0") -} - -var uploadArchiveWithBadResponseTests = []struct { - about string - response *http.Response - error error - expectError string -}{{ - about: "http client Post failure", - error: errgo.New("round trip failure"), - expectError: "cannot post archive: Post .*: round trip failure", -}, { - about: "invalid JSON in body", - response: &http.Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Body: ioutil.NopCloser(strings.NewReader("no id here")), - ContentLength: 0, - }, - expectError: `cannot unmarshal response "no id here": .*`, -}} - -func (s *suite) TestUploadArchiveWithBadResponse(c *gc.C) { - id := charm.MustParseReference("trusty/wordpress") - for i, test := range uploadArchiveWithBadResponseTests { - c.Logf("test %d: %s", i, test.about) - cl := csclient.New(csclient.Params{ - URL: "http://0.1.2.3", - User: "bob", - HTTPClient: &http.Client{ - Transport: &cannedRoundTripper{ - resp: test.response, - error: test.error, - }, - }, - }) - id, err := csclient.UploadArchive(cl, id, fakeReader, fakeHash, fakeSize, -1) - c.Assert(id, gc.IsNil) - c.Assert(err, gc.ErrorMatches, test.expectError) - } -} - -func (s *suite) TestUploadArchiveWithNoSeries(c *gc.C) { - id, err := csclient.UploadArchive( - s.client, - charm.MustParseReference("wordpress"), - fakeReader, fakeHash, fakeSize, -1) - c.Assert(id, gc.IsNil) - c.Assert(err, gc.ErrorMatches, `no series specified in "cs:wordpress"`) -} - -func (s *suite) TestUploadArchiveWithServerError(c *gc.C) { - path := charmRepo.CharmArchivePath(c.MkDir(), "wordpress") - body, hash, size := archiveHashAndSize(c, path) - defer body.Close() - - // Send an invalid hash so that the server returns an error. - url := charm.MustParseReference("~charmers/trusty/wordpress") - id, err := csclient.UploadArchive(s.client, url, body, hash+"mismatch", size, -1) - c.Assert(id, gc.IsNil) - c.Assert(err, gc.ErrorMatches, "cannot post archive: cannot put archive blob: hash mismatch") -} - -func (s *suite) checkUploadArchive(c *gc.C, path, url, expectId string) { - // Open the archive and calculate its hash and size. - body, hash, size := archiveHashAndSize(c, path) - defer body.Close() - - // Post the archive. - id, err := csclient.UploadArchive(s.client, charm.MustParseReference(url), body, hash, size, -1) - c.Assert(err, gc.IsNil) - c.Assert(id.String(), gc.Equals, expectId) - - // Ensure the entity has been properly added to the db. - r, resultingId, resultingHash, resultingSize, err := s.client.GetArchive(id) - c.Assert(err, gc.IsNil) - defer r.Close() - c.Assert(resultingId, gc.DeepEquals, id) - c.Assert(resultingHash, gc.Equals, hash) - c.Assert(resultingSize, gc.Equals, size) -} - -func archiveHashAndSize(c *gc.C, path string) (r csclient.ReadSeekCloser, hash string, size int64) { - f, err := os.Open(path) - c.Assert(err, gc.IsNil) - h := sha512.New384() - size, err = io.Copy(h, f) - c.Assert(err, gc.IsNil) - _, err = f.Seek(0, 0) - c.Assert(err, gc.IsNil) - return f, fmt.Sprintf("%x", h.Sum(nil)), size -} - -func (s *suite) TestUploadCharmDir(c *gc.C) { - ch := charmRepo.CharmDir("wordpress") - id, err := s.client.UploadCharm(charm.MustParseReference("~charmers/utopic/wordpress"), ch) - c.Assert(err, gc.IsNil) - c.Assert(id.String(), gc.Equals, "cs:~charmers/utopic/wordpress-0") - s.checkUploadCharm(c, id, ch) -} - -func (s *suite) TestUploadCharmArchive(c *gc.C) { - ch := charmRepo.CharmArchive(c.MkDir(), "wordpress") - id, err := s.client.UploadCharm(charm.MustParseReference("~charmers/trusty/wordpress"), ch) - c.Assert(err, gc.IsNil) - c.Assert(id.String(), gc.Equals, "cs:~charmers/trusty/wordpress-0") - s.checkUploadCharm(c, id, ch) -} - -func (s *suite) TestUploadCharmArchiveWithRevision(c *gc.C) { - id := charm.MustParseReference("~charmers/trusty/wordpress-42") - err := s.client.UploadCharmWithRevision( - id, - charmRepo.CharmDir("wordpress"), - 10, - ) - c.Assert(err, gc.IsNil) - ch := charmRepo.CharmArchive(c.MkDir(), "wordpress") - s.checkUploadCharm(c, id, ch) - id.User = "" - id.Revision = 10 - s.checkUploadCharm(c, id, ch) -} - -func (s *suite) TestUploadCharmArchiveWithUnwantedRevision(c *gc.C) { - ch := charmRepo.CharmDir("wordpress") - _, err := s.client.UploadCharm(charm.MustParseReference("~charmers/bundle/wp-20"), ch) - c.Assert(err, gc.ErrorMatches, `revision specified in "cs:~charmers/bundle/wp-20", but should not be specified`) -} - -func (s *suite) TestUploadCharmErrorUnknownType(c *gc.C) { - ch := charmRepo.CharmDir("wordpress") - unknown := struct { - charm.Charm - }{ch} - id, err := s.client.UploadCharm(charm.MustParseReference("~charmers/trusty/wordpress"), unknown) - c.Assert(err, gc.ErrorMatches, `cannot open charm archive: cannot get the archive for entity type .*`) - c.Assert(id, gc.IsNil) -} - -func (s *suite) TestUploadCharmErrorOpenArchive(c *gc.C) { - // Since the internal code path is shared between charms and bundles, just - // using a charm for this test also exercises the same failure for bundles. - ch := charmRepo.CharmArchive(c.MkDir(), "wordpress") - ch.Path = "no-such-file" - id, err := s.client.UploadCharm(charm.MustParseReference("trusty/wordpress"), ch) - c.Assert(err, gc.ErrorMatches, `cannot open charm archive: open no-such-file: no such file or directory`) - c.Assert(id, gc.IsNil) -} - -func (s *suite) TestUploadCharmErrorArchiveTo(c *gc.C) { - // Since the internal code path is shared between charms and bundles, just - // using a charm for this test also exercises the same failure for bundles. - id, err := s.client.UploadCharm(charm.MustParseReference("trusty/wordpress"), failingArchiverTo{}) - c.Assert(err, gc.ErrorMatches, `cannot open charm archive: cannot create entity archive: bad wolf`) - c.Assert(id, gc.IsNil) -} - -type failingArchiverTo struct { - charm.Charm -} - -func (failingArchiverTo) ArchiveTo(io.Writer) error { - return errgo.New("bad wolf") -} - -func (s *suite) checkUploadCharm(c *gc.C, id *charm.Reference, ch charm.Charm) { - r, _, _, _, err := s.client.GetArchive(id) - c.Assert(err, gc.IsNil) - data, err := ioutil.ReadAll(r) - c.Assert(err, gc.IsNil) - result, err := charm.ReadCharmArchiveBytes(data) - c.Assert(err, gc.IsNil) - // Comparing the charm metadata is sufficient for ensuring the result is - // the same charm previously uploaded. - c.Assert(result.Meta(), jc.DeepEquals, ch.Meta()) -} - -func (s *suite) TestUploadBundleDir(c *gc.C) { - s.prepareBundleCharms(c) - b := charmRepo.BundleDir("wordpress-simple") - id, err := s.client.UploadBundle(charm.MustParseReference("~charmers/bundle/wordpress-simple"), b) - c.Assert(err, gc.IsNil) - c.Assert(id.String(), gc.Equals, "cs:~charmers/bundle/wordpress-simple-0") - s.checkUploadBundle(c, id, b) -} - -func (s *suite) TestUploadBundleArchive(c *gc.C) { - s.prepareBundleCharms(c) - path := charmRepo.BundleArchivePath(c.MkDir(), "wordpress-simple") - b, err := charm.ReadBundleArchive(path) - c.Assert(err, gc.IsNil) - id, err := s.client.UploadBundle(charm.MustParseReference("~charmers/bundle/wp"), b) - c.Assert(err, gc.IsNil) - c.Assert(id.String(), gc.Equals, "cs:~charmers/bundle/wp-0") - s.checkUploadBundle(c, id, b) -} - -func (s *suite) TestUploadBundleArchiveWithUnwantedRevision(c *gc.C) { - s.prepareBundleCharms(c) - path := charmRepo.BundleArchivePath(c.MkDir(), "wordpress-simple") - b, err := charm.ReadBundleArchive(path) - c.Assert(err, gc.IsNil) - _, err = s.client.UploadBundle(charm.MustParseReference("~charmers/bundle/wp-20"), b) - c.Assert(err, gc.ErrorMatches, `revision specified in "cs:~charmers/bundle/wp-20", but should not be specified`) -} - -func (s *suite) TestUploadBundleArchiveWithRevision(c *gc.C) { - s.prepareBundleCharms(c) - path := charmRepo.BundleArchivePath(c.MkDir(), "wordpress-simple") - b, err := charm.ReadBundleArchive(path) - c.Assert(err, gc.IsNil) - id := charm.MustParseReference("~charmers/bundle/wp-22") - err = s.client.UploadBundleWithRevision(id, b, 34) - c.Assert(err, gc.IsNil) - s.checkUploadBundle(c, id, b) - id.User = "" - id.Revision = 34 - s.checkUploadBundle(c, id, b) -} - -func (s *suite) TestUploadBundleErrorUploading(c *gc.C) { - // Uploading without specifying the series should return an error. - // Note that the possible upload errors are already extensively exercised - // as part of the client.uploadArchive tests. - id, err := s.client.UploadBundle( - charm.MustParseReference("~charmers/wordpress-simple"), - charmRepo.BundleDir("wordpress-simple"), - ) - c.Assert(err, gc.ErrorMatches, `no series specified in "cs:~charmers/wordpress-simple"`) - c.Assert(id, gc.IsNil) -} - -func (s *suite) TestUploadBundleErrorUnknownType(c *gc.C) { - b := charmRepo.BundleDir("wordpress-simple") - unknown := struct { - charm.Bundle - }{b} - id, err := s.client.UploadBundle(charm.MustParseReference("bundle/wordpress"), unknown) - c.Assert(err, gc.ErrorMatches, `cannot open bundle archive: cannot get the archive for entity type .*`) - c.Assert(id, gc.IsNil) -} - -func (s *suite) checkUploadBundle(c *gc.C, id *charm.Reference, b charm.Bundle) { - r, _, _, _, err := s.client.GetArchive(id) - c.Assert(err, gc.IsNil) - data, err := ioutil.ReadAll(r) - c.Assert(err, gc.IsNil) - result, err := charm.ReadBundleArchiveBytes(data) - c.Assert(err, gc.IsNil) - // Comparing the bundle data is sufficient for ensuring the result is - // the same bundle previously uploaded. - c.Assert(result.Data(), jc.DeepEquals, b.Data()) -} - -func (s *suite) TestDoAuthorization(c *gc.C) { - // Add a charm to be deleted. - err := s.client.UploadCharmWithRevision( - charm.MustParseReference("~charmers/utopic/wordpress-42"), - charmRepo.CharmArchive(c.MkDir(), "wordpress"), - 42, - ) - c.Assert(err, gc.IsNil) - - // Check that when we use incorrect authorization, - // we get an error trying to delete the charm - client := csclient.New(csclient.Params{ - URL: s.srv.URL, - User: s.serverParams.AuthUsername, - Password: "bad password", - }) - req, err := http.NewRequest("DELETE", "", nil) - c.Assert(err, gc.IsNil) - _, err = client.Do(req, "/~charmers/utopic/wordpress-42/archive") - c.Assert(err, gc.ErrorMatches, "invalid user name or password") - c.Assert(errgo.Cause(err), gc.Equals, params.ErrUnauthorized) - - // Check that it's still there. - err = s.client.Get("/~charmers/utopic/wordpress-42/expand-id", nil) - c.Assert(err, gc.IsNil) - - // Then check that when we use the correct authorization, - // the delete succeeds. - client = csclient.New(csclient.Params{ - URL: s.srv.URL, - User: s.serverParams.AuthUsername, - Password: s.serverParams.AuthPassword, - }) - req, err = http.NewRequest("DELETE", "", nil) - c.Assert(err, gc.IsNil) - resp, err := client.Do(req, "/~charmers/utopic/wordpress-42/archive") - c.Assert(err, gc.IsNil) - resp.Body.Close() - - // Check that it's now really gone. - err = s.client.Get("/utopic/wordpress-42/expand-id", nil) - c.Assert(err, gc.ErrorMatches, `no matching charm or bundle for "cs:utopic/wordpress-42"`) -} - -var getWithBadResponseTests = []struct { - about string - error error - response *http.Response - responseErr error - expectError string -}{{ - about: "http client Get failure", - error: errgo.New("round trip failure"), - expectError: "Get .*: round trip failure", -}, { - about: "body read error", - response: &http.Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Body: ioutil.NopCloser(&errorReader{"body read error"}), - ContentLength: -1, - }, - expectError: "cannot read response body: body read error", -}, { - about: "badly formatted json response", - response: &http.Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Body: ioutil.NopCloser(strings.NewReader("bad")), - ContentLength: -1, - }, - expectError: `cannot unmarshal response "bad": .*`, -}, { - about: "badly formatted json error", - response: &http.Response{ - Status: "404 Not found", - StatusCode: 404, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Body: ioutil.NopCloser(strings.NewReader("bad")), - ContentLength: -1, - }, - expectError: `cannot unmarshal error response "bad": .*`, -}, { - about: "error response with empty message", - response: &http.Response{ - Status: "404 Not found", - StatusCode: 404, - Proto: "HTTP/1.0", - ProtoMajor: 1, - ProtoMinor: 0, - Body: ioutil.NopCloser(bytes.NewReader(mustMarshalJSON(¶ms.Error{ - Code: "foo", - }))), - ContentLength: -1, - }, - expectError: "error response with empty message .*", -}} - -func (s *suite) TestGetWithBadResponse(c *gc.C) { - for i, test := range getWithBadResponseTests { - c.Logf("test %d: %s", i, test.about) - cl := csclient.New(csclient.Params{ - URL: "http://0.1.2.3", - HTTPClient: &http.Client{ - Transport: &cannedRoundTripper{ - resp: test.response, - error: test.error, - }, - }, - }) - var result interface{} - err := cl.Get("/foo", &result) - c.Assert(err, gc.ErrorMatches, test.expectError) - } -} - -var hyphenateTests = []struct { - val string - expect string -}{{ - val: "Hello", - expect: "hello", -}, { - val: "HelloThere", - expect: "hello-there", -}, { - val: "HelloHTTP", - expect: "hello-http", -}, { - val: "helloHTTP", - expect: "hello-http", -}, { - val: "hellothere", - expect: "hellothere", -}, { - val: "Long4Camel32WithDigits45", - expect: "long4-camel32-with-digits45", -}, { - // The result here is equally dubious, but Go identifiers - // should not contain underscores. - val: "With_Dubious_Underscore", - expect: "with_-dubious_-underscore", -}} - -func (s *suite) TestHyphenate(c *gc.C) { - for i, test := range hyphenateTests { - c.Logf("test %d. %q", i, test.val) - c.Assert(csclient.Hyphenate(test.val), gc.Equals, test.expect) - } -} - -func (s *suite) TestDo(c *gc.C) { - // Do is tested fairly comprehensively (but indirectly) - // in TestGet, so just a trivial smoke test here. - url := charm.MustParseReference("~charmers/utopic/wordpress-42") - err := s.client.UploadCharmWithRevision( - url, - charmRepo.CharmArchive(c.MkDir(), "wordpress"), - 42, - ) - c.Assert(err, gc.IsNil) - err = s.client.PutExtraInfo(url, map[string]interface{}{ - "foo": "bar", - }) - c.Assert(err, gc.IsNil) - - req, _ := http.NewRequest("GET", "", nil) - resp, err := s.client.Do(req, "/wordpress/meta/extra-info/foo") - c.Assert(err, gc.IsNil) - defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) - c.Assert(err, gc.IsNil) - c.Assert(string(data), gc.Equals, `"bar"`) -} - -var metaBadTypeTests = []struct { - result interface{} - expectError string -}{{ - result: "", - expectError: "expected pointer, not string", -}, { - result: new(string), - expectError: `expected pointer to struct, not \*string`, -}, { - result: new(struct{ Embed }), - expectError: "anonymous fields not supported", -}, { - expectError: "expected valid result pointer, not nil", -}} - -func (s *suite) TestMetaBadType(c *gc.C) { - id := charm.MustParseReference("wordpress") - for _, test := range metaBadTypeTests { - _, err := s.client.Meta(id, test.result) - c.Assert(err, gc.ErrorMatches, test.expectError) - } -} - -type Embed struct{} -type embed struct{} - -func (s *suite) TestMeta(c *gc.C) { - ch := charmRepo.CharmDir("wordpress") - url := charm.MustParseReference("~charmers/utopic/wordpress-42") - purl := charm.MustParseReference("utopic/wordpress-42") - err := s.client.UploadCharmWithRevision(url, ch, 42) - c.Assert(err, gc.IsNil) - - // Put some extra-info. - err = s.client.PutExtraInfo(url, map[string]interface{}{ - "attr": "value", - }) - c.Assert(err, gc.IsNil) - - tests := []struct { - about string - id string - expectResult interface{} - expectError string - expectErrorCode params.ErrorCode - }{{ - about: "no fields", - id: "utopic/wordpress", - expectResult: &struct{}{}, - }, { - about: "single field", - id: "utopic/wordpress", - expectResult: &struct { - CharmMetadata *charm.Meta - }{ - CharmMetadata: ch.Meta(), - }, - }, { - about: "three fields", - id: "wordpress", - expectResult: &struct { - CharmMetadata *charm.Meta - CharmConfig *charm.Config - ExtraInfo map[string]string - }{ - CharmMetadata: ch.Meta(), - CharmConfig: ch.Config(), - ExtraInfo: map[string]string{"attr": "value"}, - }, - }, { - about: "tagged field", - id: "wordpress", - expectResult: &struct { - Foo *charm.Meta `csclient:"charm-metadata"` - Attr string `csclient:"extra-info/attr"` - }{ - Foo: ch.Meta(), - Attr: "value", - }, - }, { - about: "id not found", - id: "bogus", - expectResult: &struct{}{}, - expectError: `cannot get "/bogus/meta/any": no matching charm or bundle for "cs:bogus"`, - expectErrorCode: params.ErrNotFound, - }, { - about: "unmarshal into invalid type", - id: "wordpress", - expectResult: new(struct { - CharmMetadata []string - }), - expectError: `cannot unmarshal charm-metadata: json: cannot unmarshal object into Go value of type \[]string`, - }, { - about: "unmarshal into struct with unexported fields", - id: "wordpress", - expectResult: &struct { - unexported int - CharmMetadata *charm.Meta - // Embedded anonymous fields don't get tagged as unexported - // due to https://code.google.com/p/go/issues/detail?id=7247 - // TODO fix in go 1.5. - // embed - }{ - CharmMetadata: ch.Meta(), - }, - }, { - about: "metadata not appropriate for charm", - id: "wordpress", - expectResult: &struct { - CharmMetadata *charm.Meta - BundleMetadata *charm.BundleData - }{ - CharmMetadata: ch.Meta(), - }, - }} - for i, test := range tests { - c.Logf("test %d: %s", i, test.about) - // Make a result value of the same type as the expected result, - // but empty. - result := reflect.New(reflect.TypeOf(test.expectResult).Elem()).Interface() - id, err := s.client.Meta(charm.MustParseReference(test.id), result) - if test.expectError != "" { - c.Assert(err, gc.ErrorMatches, test.expectError) - if code, ok := errgo.Cause(err).(params.ErrorCode); ok { - c.Assert(code, gc.Equals, test.expectErrorCode) - } else { - c.Assert(test.expectErrorCode, gc.Equals, params.ErrorCode("")) - } - c.Assert(id, gc.IsNil) - continue - } - c.Assert(err, gc.IsNil) - c.Assert(id, jc.DeepEquals, purl) - c.Assert(result, jc.DeepEquals, test.expectResult) - } -} - -func (s *suite) TestPutExtraInfo(c *gc.C) { - ch := charmRepo.CharmDir("wordpress") - url := charm.MustParseReference("~charmers/utopic/wordpress-42") - err := s.client.UploadCharmWithRevision(url, ch, 42) - c.Assert(err, gc.IsNil) - - // Put some info in. - info := map[string]interface{}{ - "attr1": "value1", - "attr2": []interface{}{"one", "two"}, - } - err = s.client.PutExtraInfo(url, info) - c.Assert(err, gc.IsNil) - - // Verify that we get it back OK. - var val struct { - ExtraInfo map[string]interface{} - } - _, err = s.client.Meta(url, &val) - c.Assert(err, gc.IsNil) - c.Assert(val.ExtraInfo, jc.DeepEquals, info) - - // Put some more in. - err = s.client.PutExtraInfo(url, map[string]interface{}{ - "attr3": "three", - }) - c.Assert(err, gc.IsNil) - - // Verify that we get all the previous results and the new value. - info["attr3"] = "three" - _, err = s.client.Meta(url, &val) - c.Assert(err, gc.IsNil) - c.Assert(val.ExtraInfo, jc.DeepEquals, info) -} - -func (s *suite) TestPutExtraInfoWithError(c *gc.C) { - err := s.client.PutExtraInfo(charm.MustParseReference("wordpress"), map[string]interface{}{"attr": "val"}) - c.Assert(err, gc.ErrorMatches, `no matching charm or bundle for "cs:wordpress"`) - c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) -} - -type errorReader struct { - error string -} - -func (e *errorReader) Read(buf []byte) (int, error) { - return 0, errgo.New(e.error) -} - -type cannedRoundTripper struct { - resp *http.Response - error error -} - -func (r *cannedRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - return r.resp, r.error -} - -func mustMarshalJSON(x interface{}) []byte { - data, err := json.Marshal(x) - if err != nil { - panic(err) - } - return data -} - -func (s *suite) TestLog(c *gc.C) { - logs := []struct { - typ params.LogType - level params.LogLevel - message string - urls []*charm.Reference - }{{ - typ: params.IngestionType, - level: params.InfoLevel, - message: "ingestion info", - urls: nil, - }, { - typ: params.LegacyStatisticsType, - level: params.ErrorLevel, - message: "statistics error", - urls: []*charm.Reference{ - charm.MustParseReference("cs:mysql"), - charm.MustParseReference("cs:wordpress"), - }, - }} - - for _, log := range logs { - err := s.client.Log(log.typ, log.level, log.message, log.urls...) - c.Assert(err, gc.IsNil) - } - var result []*params.LogResponse - err := s.client.Get("/log", &result) - c.Assert(err, gc.IsNil) - c.Assert(result, gc.HasLen, len(logs)) - for i, l := range result { - c.Assert(l.Type, gc.Equals, logs[len(logs)-(1+i)].typ) - c.Assert(l.Level, gc.Equals, logs[len(logs)-(1+i)].level) - var msg string - err := json.Unmarshal([]byte(l.Data), &msg) - c.Assert(err, gc.IsNil) - c.Assert(msg, gc.Equals, logs[len(logs)-(1+i)].message) - c.Assert(l.URLs, jc.DeepEquals, logs[len(logs)-(1+i)].urls) - } -} - -func (s *suite) TestMacaroonAuthorization(c *gc.C) { - ch := charmRepo.CharmDir("wordpress") - curl := charm.MustParseReference("~charmers/utopic/wordpress-42") - purl := charm.MustParseReference("utopic/wordpress-42") - err := s.client.UploadCharmWithRevision(curl, ch, 42) - c.Assert(err, gc.IsNil) - - err = s.client.Put("/"+curl.Path()+"/meta/perm/read", []string{"bob"}) - c.Assert(err, gc.IsNil) - - // Create a client without basic auth credentials - client := csclient.New(csclient.Params{ - URL: s.srv.URL, - }) - - var result struct{ IdRevision struct{ Revision int } } - // TODO 2015-01-23: once supported, rewrite the test using POST requests. - _, err = client.Meta(purl, &result) - c.Assert(err, gc.ErrorMatches, `cannot get "/utopic/wordpress-42/meta/any\?include=id-revision": cannot get discharge from ".*": third party refused discharge: cannot discharge: no discharge`) - c.Assert(httpbakery.IsDischargeError(errgo.Cause(err)), gc.Equals, true) - - s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { - return []checkers.Caveat{checkers.DeclaredCaveat("username", "bob")}, nil - } - _, err = client.Meta(curl, &result) - c.Assert(err, gc.IsNil) - c.Assert(result.IdRevision.Revision, gc.Equals, curl.Revision) - - visitURL := "http://0.1.2.3/visitURL" - s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { - return nil, &httpbakery.Error{ - Code: httpbakery.ErrInteractionRequired, - Message: "interaction required", - Info: &httpbakery.ErrorInfo{ - VisitURL: visitURL, - WaitURL: "http://0.1.2.3/waitURL", - }} - } - - client = csclient.New(csclient.Params{ - URL: s.srv.URL, - VisitWebPage: func(vurl *url.URL) error { - c.Check(vurl.String(), gc.Equals, visitURL) - return fmt.Errorf("stopping interaction") - }}) - - _, err = client.Meta(purl, &result) - c.Assert(err, gc.ErrorMatches, `cannot get "/utopic/wordpress-42/meta/any\?include=id-revision": cannot get discharge from ".*": cannot start interactive session: stopping interaction`) - c.Assert(result.IdRevision.Revision, gc.Equals, curl.Revision) - c.Assert(httpbakery.IsInteractionError(errgo.Cause(err)), gc.Equals, true) -} - -func (s *suite) TestLogin(c *gc.C) { - ch := charmRepo.CharmDir("wordpress") - url := charm.MustParseReference("~charmers/utopic/wordpress-42") - purl := charm.MustParseReference("utopic/wordpress-42") - err := s.client.UploadCharmWithRevision(url, ch, 42) - c.Assert(err, gc.IsNil) - - err = s.client.Put("/"+url.Path()+"/meta/perm/read", []string{"bob"}) - c.Assert(err, gc.IsNil) - client := csclient.New(csclient.Params{ - URL: s.srv.URL, - }) - - var result struct{ IdRevision struct{ Revision int } } - _, err = client.Meta(purl, &result) - c.Assert(err, gc.NotNil) - - // Try logging in when the discharger fails. - err = client.Login() - c.Assert(err, gc.ErrorMatches, `cannot discharge login macaroon: cannot get discharge from ".*": third party refused discharge: cannot discharge: no discharge`) - - // Allow the discharge. - s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { - return []checkers.Caveat{checkers.DeclaredCaveat("username", "bob")}, nil - } - err = client.Login() - c.Assert(err, gc.IsNil) - - // Change discharge so that we're sure the cookies are being - // used rather than the discharge mechanism. - s.discharge = func(cond, arg string) ([]checkers.Caveat, error) { - return nil, fmt.Errorf("no discharge") - } - - // Check that the request still works. - _, err = client.Meta(purl, &result) - c.Assert(err, gc.IsNil) - c.Assert(result.IdRevision.Revision, gc.Equals, url.Revision) -} === removed file 'src/gopkg.in/juju/charmstore.v4/csclient/export_test.go' --- src/gopkg.in/juju/charmstore.v4/csclient/export_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/csclient/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,9 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE.client file for details. - -package csclient - -var ( - Hyphenate = hyphenate - UploadArchive = (*Client).uploadArchive -) === removed file 'src/gopkg.in/juju/charmstore.v4/csclient/package_test.go' --- src/gopkg.in/juju/charmstore.v4/csclient/package_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/csclient/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE.client file for details. - -package csclient_test - -import ( - "testing" - - jujutesting "github.com/juju/testing" -) - -func TestPackage(t *testing.T) { - jujutesting.MgoTestPackage(t, nil) -} === removed file 'src/gopkg.in/juju/charmstore.v4/dependencies.tsv' --- src/gopkg.in/juju/charmstore.v4/dependencies.tsv 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/dependencies.tsv 1970-01-01 00:00:00 +0000 @@ -1,26 +0,0 @@ -github.com/ajstarks/svgo git 89e3ac64b5b3e403a5e7c35ea4f98d45db7b4518 2014-10-04T21:11:59Z -github.com/juju/blobstore git 337aa7d5d712728d181dbda2547a6556d4189626 2015-05-08T07:43:36Z -github.com/juju/errors git 036046bfdccf6f576e2e5dec7f7878597bcaebe7 2015-02-11T20:59:49Z -github.com/juju/gojsonpointer git 0154bf5a168b672d8c97d8dd83a54cb60cd088e8 2014-07-18T03:59:30Z -github.com/juju/gojsonreference git f0d24ac5ee330baa21721cdff56d45e4ee42628e 2015-02-04T19:46:33Z -github.com/juju/gojsonschema git e1ad140384f254c82f89450d9a7c8dd38a632838 2015-03-12T17:00:16Z -github.com/juju/httpprof git 14bf14c307672fd2456bdbf35d19cf0ccd3cf565 2014-12-17T16:00:36Z -github.com/juju/jujusvg git 28683402583926ce903491c14a07cdc5cb371adb 2015-04-10T08:55:05Z -github.com/juju/loggo git dc8e19f7c70a62a59c69c40f85b8df09ff20742c 2014-11-17T04:05:26Z -github.com/juju/names git ce4ecb2967822062fc606e733919c677c584ab7e 2015-02-20T07:57:36Z -github.com/juju/schema git 27a52be50766490a6fd3531865095fda6c0eeb6d 2014-07-23T04:23:18Z -github.com/juju/testing git c7042d828963caa252862b759ef56ada297e8323 2015-04-21T10:32:42Z -github.com/juju/txn git e02f26c56cfb81c7c1236df499deebb0369bd97c 2014-09-25T11:49:22Z -github.com/juju/utils git a90aa2e02b9e7fe354ab816e05b1e0a77f27242d 2015-02-23T16:02:32Z -github.com/juju/xml git 91535ba18a6afd756e38a40c91fea0ed8e5dbaa6 2014-12-04T14:59:31Z -github.com/julienschmidt/httprouter git b59a38004596b696aca7aa2adccfa68760864d86 2015-04-08T17:04:29Z -golang.org/x/crypto git 4ed45ec682102c643324fae5dff8dab085b6c300 2015-01-12T22:01:33Z -golang.org/x/net git 7dbad50ab5b31073856416cdcfeb2796d682f844 2015-03-20T03:46:21Z -gopkg.in/check.v1 git 64131543e7896d5bcc6bd5a76287eb75ea96c673 2014-10-24T13:38:53Z -gopkg.in/errgo.v1 git 81357a83344ddd9f7772884874e5622c2a3da21c 2014-10-13T17:33:38Z -gopkg.in/juju/charm.v5 git 39463053128b672308c459958d00f260b58ce79e 2015-05-14T10:50:35Z -gopkg.in/macaroon-bakery.v0 git 9593b80b01ba04b519769d045dffd6abd827d2fd 2015-04-10T07:46:55Z -gopkg.in/macaroon.v1 git ab3940c6c16510a850e1c2dd628b919f0f3f1464 2015-01-21T11:42:31Z -gopkg.in/mgo.v2 git c6a7dce14133ccac2dcac3793f1d6e2ef048503a 2015-01-24T11:37:54Z -gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z -launchpad.net/tomb bzr gustavo@niemeyer.net-20140529072043-hzcrlnl3ygvg914q 18 === removed directory 'src/gopkg.in/juju/charmstore.v4/docs' === removed file 'src/gopkg.in/juju/charmstore.v4/docs/API.md' --- src/gopkg.in/juju/charmstore.v4/docs/API.md 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/docs/API.md 1970-01-01 00:00:00 +0000 @@ -1,1921 +0,0 @@ -# Charm store API - -The current live api lives at https://api.jujucharms.com/charmstore/v4 - -## Intro - -The charm store stores and indexes charms and bundles. A charm or bundle is -referred to by a charm store id which can take one of the following two forms: - -* ~*owner*/*series*/*name*(-*revision*) -* *series*/*name*(-*revision*) - -*Owner* is the name of the user that owns the charm. -*Series* is one of a small number of known possible series for charms -(currently just the Ubuntu series names) or the special name bundle to signify -that the charm id refers to a charm bundle. - -A charm store id referring to a charm (not a bundle) can also use one of the -following two forms, omitting the series: - -* ~*owner*/*name*(-*revision*) -* *name*(-*revision*) - -In this case the store will look at all charms with the same *owner* and -*name*, and choose one according to its preference (for example, it currently -prefers the latest LTS series). - -### Data format - -All endpoints that do not produce binary data produce a single JSON object as -their result. These will be described in terms of the Go types that produce and -consume the format, along with an example. A charm id is represented as a -`charm.Reference type`. - - -### Errors - -If any request returns an error, it will produce it in the following form: - -```go -type Error struct { - Message string - Code string - Info map[string] Error `json:",omitempty"` -} -``` - -Example: - -```json -{ - "Message": "unexpected Content-Type \"image/jpeg\"; expected \"application/json\"", - "Code": "bad request" -} -``` - -Note: this format is compatible with the error results used by juju-core. -Currently defined codes are the following: - -* not found -* metadata not found -* forbidden -* bad request -* duplicate upload -* multiple errors -* unauthorized -* method not allowed - -The `Info` field is set when a request returns a "multiple errors" error code; -currently the only two endpoints that can are "/meta" and "*id*/meta/any". -Each element in `Info` corresponds to an element in the PUT request, and holds -the error for that element. See those endpoints for examples. - -### Bulk requests and missing metadata - -There are two forms of "bulk" API request that can return information about -several items at once. The `/meta/any` endpoint (along with some others) have a -set of "include" flags that specify metadata to return. The `/meta` endpoint -has a set of "id" flags that specify a set of ids to return data on. - -In both of these cases, when the relevant data does not exist, the result will -be omitted from the returned map. For example a GET of -`/meta/archive-size?id=something` will return an empty map if the id -"something" is not found; a GET of -`/precise/wordpress-34/meta/any?include=bundle-metadata` will return an empty -map if the id "precise/wordpress-34" refers to a bundle rather than a charm. - -For the singular forms of these endpoints, a 404 "metadata not found" error -will be returned when this happens. - -In the `meta/any` GET bulk request, if some data requires authorization, the -default behavior is to return an authorization required response. Clients -interested in public data only can include a `ignore-auth=1` query so that only -public information is returned. In this case, results requiring authorization -(if any) will be omitted. - -### Versioning - -The version of the API is indicated by an initial "vN" prefix to the path. -Later versions will increment this number. This also means we can potentially -serve backwardly compatible paths to juju-core. All paths in this document -should be read as if they had a "v4" prefix. For example, the -`wordpress/meta/charm-metadata` path is actually at -`v4/wordpress/meta/charm-metadata`. - - -### Boolean values - -Where a flag specifies a boolean property, the value must be either "1", -signifying true, or empty or "0", signifying false. - -## Requests - -### Expand-id - -#### GET *id*/expand-id - -The expand-id path expands a general id into a set of specific ids. It strips -any revision number and series from id, and returns a slice of all the possible -ids matched by that, including all the versions and series. - -```go -[]Id - -type Id struct { - Id string -} -``` - -Example: `GET wordpress/expand-id` - -```json -[ - {"Id": "precise/wordpress-1"}, - {"Id": "precise/wordpress-2"}, - {"Id": "trusty/wordpress-1"}, - {"Id": "trusty/wordpress-2"} -] -``` - -Example: `GET precise/wordpress-34/expand-id` - -```json -[ - {"Id": "precise/wordpress-1"}, - {"Id": "precise/wordpress-2"}, - {"Id": "trusty/wordpress-1"}, - {"Id": "trusty/wordpress-2"} -] -``` - - -### Archive - -#### GET *id*/archive - -The `/archive` path returns the raw archive zip file for the charm with the -given charm id. The response header includes the SHA 384 hash of the archive -(Content-Sha384) and the fully qualified entity id (Entity-Id). - -Example: `GET wordpress/archive` - -Any additional elements attached to the `/charm` path retrieve the file from -the charm or bundle's zip file. The `Content-Sha384` header field in the -response will hold the hash checksum of the archive. - -#### GET *id*/archive/*path* - -Retrieve a file corresponding to *path* in the charm or bundle's zip archive. - -Example: `GET trusty/wordpress/archive/config.yaml` - -#### POST *id*/archive - -This uploads the given charm or bundle in zip format. - -

-POST id/archive?hash=sha384hash
-
- -The id specified must specify the series and must not contain a revision -number. The hash flag must specify the SHA384 hash of the uploaded archive in -hexadecimal format. If the same content has already been uploaded, the response -will return immediately without reading the entire body. - -The charm or bundle is verified before being made available. - -The response holds the full charm/bundle id including the revision number. - -```go -type UploadedId struct { - Id string -} -``` - -Example response body: - -```json -{ - "Id": "precise/wordpress-24" -} -``` - -#### DELETE *id*/archive - -This deletes the given charm or bundle with the given id. If the ID is not -fully specified, the charm series or revisions are not resolved and the charm -is not deleted. In order to delete the charm, the ID must include series as -well as revisions. In order to delete all versions of the charm, use -`/expand-id` and iterate on all elements in the result. - -### Visual diagram - -#### GET *id*/diagram.svg - -This returns a scalable vector-graphics image representing the entity with the -given id. This will return a not-found error for charms. - -#### GET *id*/icon.svg - -This returns the SVG image of the charm's icon. This reports a not-found error -for bundles. Unlike the `archive/icon.svg` where 404 is returned in case an -icon does not exist, this endpoint returns the default icon. - -#### GET *id*/readme - -This returns the README. - -### Promulgation - -#### PUT *id*/promulgate - -A PUT to ~*user*/*anyseries*/*name*-*anyrevision* sets whether entities -with the id *x*/*name* are considered to be aliases -for ~*user*/*x*/*name* for all series *x*. The series -and revision in the id are ignored (except that an -entity must exist that matches the id). - -If Promulgate is true, it means that any new charms published -to ~*user*/*x*/*name* will also be given the alias -*x*/*name*. The latest revision for all ids ~*user*/*anyseries*/*name* -will also be aliased likewise. - -If Promulgate is false, any new charms published -to ~*user*/*anyseries*/*name* will not be given a promulgated -alias, but no change is made to any existing aliases. - -The promulgated status can be retrieved from the -promulgated meta endpoint. - -```go -type PromulgateRequest struct { - Promulgate bool -} -``` - -Example: `PUT ~charmers/precise/wordpress-23/promulgate` - -Request body: -```json -{ - "Promulgate" : true, -} -``` - -### Stats - -#### GET stats/counter/... - -This endpoint can be used to retrieve stats related to entities. - -
-GET stats/counter/key[:key]...?[by=unit]&start=date][&stop=date][&list=1]
-
- -The stats path allows the retrieval of counts of operations in a general way. A -statistic is composed of an ordered tuple of keys: - -
-kind:series:name:user
-
-Operations on the store increment counts associated with a specific tuple, -determined by the operation and the charm being operated on. - -When querying statistics, it is possible to aggregate statistics by using a -`\*` as the last tuple element, standing for all tuples with the given prefix. -For example, `missing:\*` will retrieve the counts for all operations of kind -"missing", regardless of the series, name or user. - -If the list flag is specified, counts for all next level keys will be listed. - For example, a query for `stats/counter/download:*?list=1&by=week` will show - all the download counts for each series for each week. - -If a date range is specified, the returned counts will be restricted to the -given date range. Dates are specified in the form "yyyy-mm-dd". If the `by` -flag is specified, one count is shown for each unit in the specified period, -where unit can be `week` or `day`. - -Possible kinds are: - -* archive-download -* archive-delete -* archive-upload -* archive-failed-upload - -```go -[]Statistic - -type Statistic struct { - Key string `json:",omitempty"` - Date string `json:",omitempty"` - Count int64 -} -``` - -Example: `GET "stats/counter/missing:trusty:*"` - -```json -[ - {"Count": 1917} -] -``` - -Example: -`GET stats/counter/download/archive-download:*?by=week&list=1&start=2014-03-01` - -```json -[ - { - "Key": "charm-bundle:precise:*", - "Date": "2014-06-08", - "Count": 2715 - }, { - "Key": "charm-bundle:trusty:*", - "Date": "2014-06-08", - "Count": 2672 - }, { - "Key": "charm-bundle:oneiric:*", - "Date": "2014-06-08", - "Count": 14 - }, { - "Key": "charm-bundle:quantal:*", - "Date": "2014-06-08", - "Count": 1 - }, { - "Key": "charm-bundle:trusty:*", - "Date": "2014-06-15", - "Count": 3835 - }, { - "Key": "charm-bundle:precise:*", - "Date": "2014-06-15", - "Count": 3389 - } -] -``` - -**Update**: -We need to provide aggregated stats for downloads: -* promulgated and ~user counterpart charms should have the same download stats. - -### Meta - -#### GET meta - -The meta path returns an array of all the path names under meta, excluding the -`meta/any` path, as suitable for passing as "include=" flags to paths that -allow those. Note that the result does not include sub-paths of extra-info -because these vary according to each charm or bundle. - -Example: `GET /meta` - -```json -[ - "archive-size", - "archive-upload-time", - "bundle-machine-count", - "bundle-metadata", - "bundle-unit-count", - "bundles-containing", - "charm-actions", - "charm-config", - "charm-metadata", - "charm-related", - "extra-info", - "hash", - "hash256", - "id", - "id-name", - "id-revision", - "id-series", - "id-user", - "manifest", - "promulgated", - "revision-info", - "stats", - "tags" -] -``` - -#### GET meta/*endpoint* - -This endpoint allows a user to query any number of IDs for metadata. -
-GET meta/endpoint?id=id0[&id=id1...][otherflags]
-
- -This call is equivalent to calling "*id*/meta" for each id separately. The -result holds an element for each id in the request with the resulting metadata -exactly as returned by "GET *id*/meta/*endpoint*[?*otherflags*]". The map keys -are the ids exactly as specified in the request, although they are resolved to -fill in series and revision as usual when fetching the metadata. Any ids that -are not found, or with non-relevant metadata, will be omitted. - -```go -map[string] interface{} -``` - -Example: `GET meta/archive-size?id=wordpress&id=mysql` - -```json -{ - "wordpress": { - "Size": 1234 - }, - "mysql" : { - "Size": 4321 - } -} -``` - -Example: `GET /meta/any?include=archive-size&include=extra-info/featured&id=wordpress&id=mysql` - -```json -{ - "wordpress": { - "Id": "precise/wordpress-3", - "Meta": { - "archive-size": { - "Size": 1234 - }, - "extra-info/featured": true - } - }, - "mysql" : { - "Id": "precise/mysql-23", - "Meta": { - "archive-size": { - "Size": 4321 - }, - "extra-info/featured": true - } - } -} -``` - -#### PUT meta/*endpoint* - -A PUT to this endpoint allows the metadata endpoint of several ids to be -updated. The request body is as specified in the result of the above GET -request. The ids in the body specify the ids that will be updated. If there is -a failure, the error code will be "multiple errors", and the Info field will -holds one entry for each id in the request body that failed, holding the error -for that id. If there are no errors, PUT endpoints usually return an empty body -in the response. - -Example: `PUT meta/extra-info/featured` - -Request body: -```json -{ - "precise/wordpress-23" : true, - "precise/mysql-53" : true, - "precise/wordpress-22" : false, -} -``` - -Example: `PUT meta/any` - -Request body: -```json -{ - "precise/wordpress-23": { - "Meta": { - "extra-info/featured": true, - "extra-info/revision-info": "12dfede4ee23", - "bad-metaname": 3235 - } - }, - "trusty/mysql-23": { - "Meta": { - "extra-info/featured": false, - } - } -} -``` - -Response body (with HTTP status 500): -```json -{ - "Message": "multiple errors (1) found", - "Code": "multiple errors", - "Info": { - "precise/wordpress-23": { - "Message": "multiple errors", - "Code": "multiple errors", - "Info": { - "bad-metaname": { - "Message": "metadata not found", - "Code": "not found" - } - } - } - } -} -``` - -If the request succeeds, a 200 OK status code is returned with an empty -response body. - -#### GET *id*/meta - -This path returns the same information as the meta path. The results are the -same regardless of the actual id. - -Example: `GET foo/meta` - -```json -[ - "archive-size", - "archive-upload-time", - "bundle-machine-count", - "bundle-metadata", - "bundle-unit-count", - "bundles-containing", - "charm-actions", - "charm-config", - "charm-metadata", - "charm-related", - "extra-info", - "id", - "id-name", - "id-revision", - "id-series", - "id-user", - "manifest", - "promulgated", - "revision-info", - "stats", - "tags" -] -``` - -#### GET *id*/meta/any - -
-GET id/meta/any?[include=meta[&include=meta...]]
-
- -The `meta/any` path returns requested metadata information on the given id. If -the id is non-specific, the latest revision and preferred series for the id -will be assumed. - -Other metadata can be requested by specifying one or more `include` flags. The -value of each meta must be the name of one of the path elements defined under -the `/meta` path (for example: `charm-config`, `charm-meta`, `manifest`) and -causes the desired metadata to be included in the Meta field, keyed by meta. If -there is no metadata for the given meta path, the element will be omitted (for -example, if bundle-specific data is requested for a charm id). - -The `any` path may not itself be the subject of an include directive. It is -allowed to specify "charm-" or "bundle-"" specific metadata paths -- if the id -refers to a charm then bundle-specific metadata will be omitted and vice versa. - -Various other paths use the same `include` mechanism to allow retrieval of -arbitrary metadata. - -```go -type Meta struct { - Id string `json:",omitempty"` - Meta map[string] interface{} `json:",omitempty"` -} -``` - -Example: `GET wordpress/meta/any` - -```json -{ - "Id": "trusty/wordpress-32" -} -``` - -Example: `GET ubuntu/meta/any?include=archive-size&include=extra-info/featured` - -```json -{ - "Id": "trusty/ubuntu-3", - "Meta": { - "archive-size": { - "Size": 7580 - }, - "extra-info/featured": true - } -} -``` - -#### PUT *id*/meta/any - -This endpoint allows the updating of several metadata elements at once. These -must support PUT requests. The body of the PUT request is in the same form as -returned by the above GET request, except with the Id field omitted. The -elements inside the Meta field specify which meta endpoints will be updated. If -one or more of the update fails, the resulting error will contain an Info field -that has an entry for each update that fails, keyed by the endpoint name. - -Example: `PUT ubuntu/meta/any` - -Request body: -```json -{ - "Meta": { - "extra-info": { - "revision-info": "a46f45649f0d0e0b" - }, - "extra-info/featured": true - } -} -``` - -Example: `PUT ubuntu/meta/any` - -Request body: -```json -{ - "Meta": { - "extra-info/featured": false, - "archive-size": 12354, - } -} -``` - -Response body: -```json -{ - "Message": "multiple errors", - "Code": "multiple errors", - "Info": { - "archive-size": { - "Message": "method not allowed", - "Code": "bad request", - } - } -} -``` - -#### GET *id*/meta/charm-metadata - -The `/meta/charm.metadata` path returns the contents of the charm metadata file -for a charm. The id must refer to a charm, not a bundle. - -```go -type CharmMetadata struct { - Summary string - Description string - Subordinate bool `json:",omitempty"` - // Provides and Requires map from the relation name to - // information about the relation. - Provides map[string]Relation `json:",omitempty"` - Requires map[string]Relation `json:",omitempty"` - Peers map[string]Relation `json:",omitempty"` - Tags []string `json:",omitempty"` -} - -type Relation struct { - Interface string - Optional bool `json:",omitempty"` - Limit int `json:",omitempty"` - Scope RelationScope -} - -type RelationRole string -type RelationScope string -``` - -The possible values of a `RelationScope` are - -* global -* container - -Example: `GET wordpress/meta/charm-metadata` - -```json -{ - "Summary": "WordPress is a full featured web blogging tool, this charm deploys it.", - "Description": "This will install and setup WordPress optimized to run in the cloud. This install, in particular, will \n place Ngnix and php-fpm configured to scale horizontally with Nginx's reverse proxy\n", - "Provides": { - "website": { - "Interface": "http", - "Scope": "global" - } - }, - "Requires": { - "cache": { - "Interface": "cache", - "Scope": "global" - }, - "db": { - "Interface": "db", - "Scope": "global" - } - }, - "Peers": { - "loadbalancer": { - "Interface": "reversenginx", - "Scope": "global" - } - }, - "Tags": [ - "applications" - ] -} -``` - -#### GET *id*/meta/bundle-metadata - -The `meta/bundle-metadata` path returns the contents of the bundle metadata -file for a bundle. The id must refer to a bundle, not a charm. - -```go -type BundleData struct { - Services map[string] ServiceSpec - Machines map[string] MachineSpec `json:",omitempty"` - Series string `json:",omitempty"` - Relations [][]string `json:",omitempty"` -} - -type MachineSpec struct { - Constraints string `json:",omitempty"` - Annotations map[string]string `json:",omitempty"` -} - -type ServiceSpec struct { - Charm string - NumUnits int - To []string `json:",omitempty"` - - // Options holds the configuration values - // to apply to the new service. They should - // be compatible with the charm configuration. - Options map[string]interface{} `json:",omitempty"` - Annotations map[string]string `json:",omitempty"` - Constraints string `json:",omitempty"` -} -``` - -Example: `GET mediawiki/meta/bundle-metadata` - -```json -{ - "Services": { - "mediawiki": { - "Charm": "cs:precise/mediawiki-10", - "NumUnits": 1, - "Options": { - "debug": false, - "name": "Please set name of wiki", - "skin": "vector" - }, - "Annotations": { - "gui-x": "619", - "gui-y": "-128" - } - }, - "memcached": { - "Charm": "cs:precise/memcached-7", - "NumUnits": 1, - "Options": { - "connection_limit": "global", - "factor": 1.25 - }, - "Annotations": { - "gui-x": "926", - "gui-y": "-125" - } - } - }, - "Relations": [ - [ - "mediawiki:cache", - "memcached:cache" - ] - ] -} -``` - -#### GET *id*/meta/bundle-unit-count - -The `meta/bundle-unit-count` path returns a count of all the units that will be -created by a bundle. The id must refer to a bundle, not a charm. - -```go -type BundleCount struct { - Count int -} -``` - -Example: `GET bundle/mediawiki/meta/bundle-unit-count` - -```json -{ - "Count": 1 -} -``` - -#### GET *id*/meta/bundle-machine-count - -The `meta/bundle-machine-count` path returns a count of all the machines used -by a bundle. The id must refer to a bundle, not a charm. - -```go -type BundleCount struct { - Count int -} -``` - -Example: `GET bundle/mediawiki/meta/bundle-machine-count` - -```json -{ - "Count": 2 -} -``` - -#### GET *id*/meta/manifest - -The `meta/manifest` path returns the list of all files in the bundle or charm's -archive. - -```go -[]ManifestFile -type ManifestFile struct { - Name string - Size int64 -} -``` - -Example: `GET trusty/juju-gui-3/meta/manifest` - -```json -[ - { - "Name": "config.yaml", - "Size": 8254 - }, - { - "Name": "HACKING.md", - "Size": 11376 - }, - { - "Name": "Makefile", - "Size": 3304 - }, - { - "Name": "metadata.yaml", - "Size": 1110 - }, - { - "Name": "README.md", - "Size": 9243 - }, - { - "Name": "hooks/config-changed", - "Size": 1636 - }, - { - "Name": "hooks/install", - "Size": 3055 - }, - { - "Name": "hooks/start", - "Size": 1101 - }, - { - "Name": "hooks/stop", - "Size": 1053 - } -] -``` - -#### GET *id*/meta/charm-actions - - -The `meta/charm-actions` path returns the actions available in a charm as -stored in its `actions.yaml` file. Id must refer to a charm, not a bundle. - -```go -type Actions struct { - Actions map[string]ActionSpec `json:",omitempty"` -} - -type ActionSpec struct { - Description string - Params JSONSchema -} -``` - -The Params field holds a JSON schema specification of an action's parameters. -See [http://json-schema.org/latest/json-schema-core.html](http://json-schema.org/latest/json-schema-core.html). - -Example: `GET wordpress/meta/charm-actions` - -```json -{ - "Actions": { - "backup": { - "Description": "back up the charm", - "Params": { - "properties": { - "destination-host": { - "type": "string" - }, - "destination-name": { - "type": "string" - } - }, - "required": [ - "destination-host" - ], - "type": "object" - } - } - } -} -``` - -#### GET *id*/meta/charm-config - -The `meta/charm-config` path returns the charm's configuration specification as -stored in its `config.yaml` file. Id must refer to a charm, not a bundle. - -```go -type Config struct { - Options map[string] Option -} - -// Option represents a single charm config option. -type Option struct { - Type string - Description string - Default interface{} -} -``` - -Example: `GET trusty/juju-gui-3/meta/charm-config` - -```json -{ - "Options": { - "builtin-server": { - "Type": "boolean", - "Description": "Enable the built-in server.", - "Default": true - }, - "login-help": { - "Type": "string", - "Description": "The help text shown to the user.", - "Default": null - }, - "read-only": { - "Type": "boolean", - "Description": "Enable read-only mode.", - "Default": false - } - } -} -``` - -#### GET *id*/meta/archive-size - -The `meta/archive-size` path returns the archive size, in bytes, of the archive -of the given charm or bundle id. - -```go -type ArchiveSize struct { - Size int64 -} -``` - -Example: `GET wordpress/meta/archive-size` - -```json -{ - "Size": 4747 -} -``` - -#### GET *id*/meta/hash - -This path returns the SHA384 hash sum of the archive of the given charm or -bundle id. - -```go -type HashResponse struct { - Sum string -} -``` - -Example: `GET wordpress/meta/hash` - -Response body: -```json -{ - "Sum": "0a410321586d244d3981e2b23a27a7e86ebdcab8bd0ca8f818d3f4c34b2ea2791e0dbdc949f70b283a3f5efdf908abf1" -} -``` - -#### GET *id*/meta/hash256 - -This path returns the SHA256 hash sum of the archive of the given charm or -bundle id. - -```go -type HashResponse struct { - Sum string -} -``` - -Example: `GET wordpress/meta/hash256` - -Response body: -```json -{ - "Sum": "9ab5036cc18ba61a9d25fad389e46b3d407fc02c3eba917fe5f18fdf51ee6924" -} -``` - -#### GET *id*/meta/bundles-containing - -The `meta/bundles-containing` path returns information on the last revision of -any bundles that contain the charm with the given id. - -
-GET id/meta/bundles-containing[?include=meta[&include=meta...]]
-
- -The Meta field is populated with information on the returned bundles according -to the include flags - see the `meta/any` path for more info on how to use the -`include` flag. The only values that are valid for `any-series`, `any-revision` -or `all-results` flags are 0, 1 and empty. If `all-results` is enabled, all the -bundle revisions are returned, not just the last one. The API should validate -that and return bad request if any other value is provided. - -```go -[]Bundle -type Bundle struct { - Id string - Meta map[string]interface{} `json:",omitempty"` -} -``` - -Example: `GET mysql/meta/bundles-containing?include=featured` might return: - -```json -[ - { - "Id": "bundle/mysql-scalable", - "Meta": { - "featured": { - "Featured": false - } - } - }, - { - "Id": "bundle/wordpress-simple", - "Meta": { - "featured": { - "Featured": true - } - } - } -] -``` - -#### GET *id*/meta/extra-info - -The meta/extra-info path reports any additional metadata recorded for the -charm. This contains only information stored by clients - the API server itself -does not populate any fields. The resulting object holds an entry for each -piece of metadata recorded with a PUT to `meta/extra-info`. - -```go -type ExtraInfo struct { - Values map[string] interface{} -} -``` - -Example: `GET wordpress/meta/extra-info` - -```json -{ - "featured": true, - "vcs-digest": "4b6b3c7d795eb66ca5f82bc52c01eb57ab595ab2" -} -``` - -#### GET *id*/meta/extra-info/*key* - -This path returns the contents of the given `extra-info` key. The result is -exactly the JSON value stored as a result of the PUT request to `extra-info` or -`extra-info/key`. - -Example: `GET wordpress/meta/extra-info/featured` - -```json -true -``` - -#### PUT *id*/meta/extra-info - -This request updates the value of any metadata values. Any values that are not -mentioned in the request are left untouched. - -Example: `PUT precise/wordpress-32/meta/extra-info` - -Request body: -```json -{ - "vcs-digest": "7d6a853c7bb102d90027b6add67b15834d815e08", -} -``` - -#### PUT *id*/meta/extra-info/*key* - -This request creates or updates the value for a specific key. - -Example: `PUT precise/wordpress-32/meta/extra-info/vcs-digest` - -Request body: - -```json -"7d6a853c7bb102d90027b6add67b15834d815e08", -``` - -The above example is equivalent to the `meta/extra-info` example above. - -#### GET *id*/meta/charm-related - -The `meta/charm-related` path returns all charms that are related to the given -charm id, which must not refer to a bundle. It is possible to include -additional metadata for charms by using the `include` query: - -
-GET id/meta/charm-related[?include=meta[&include=meta...]]
-
- -```go -type Related struct { - // Requires holds an entry for each interface provided by - // the charm, containing all charms that require that interface. - Requires map[string] []Item `json:",omitempty"` - - - // Provides holds an entry for each interface required by the - // the charm, containing all charms that provide that interface. - Provides map[string] []Item `json:",omitempty"` -} - - -type Item struct { - Id string - Meta map[string] interface{} `json:",omitempty"` -} -``` - -The Meta field is populated according to the include flags - see the `meta` -path for more info on how to use this. - -Example: `GET wordpress/meta/charm-related` - -```json -{ - "Requires": { - "memcache": [ - {"Id": "precise/memcached-13"} - ], - "db": [ - {"Id": "precise/mysql-46"}, - {"Id": "~clint-fewbar/precise/galera-42"} - ] - }, - "Provides": { - "http": [ - {"Id": "precise/apache2-24"}, - {"Id": "precise/haproxy-31"}, - {"Id": "precise/squid-reverseproxy-8"} - ] - } -} -``` - -Example: `GET trusty/juju-gui-3/meta/charm-related?include=charm-config` - -```json -{ - "Provides": { - "http": [ - { - "Id": "precise/apache2-24", - "Meta": { - "charm-config": { - "Options": { - "logrotate_count": { - "Type": "int", - "Description": "The number of days", - "Default": 365 - } - } - } - } - } - ], - "nrpe-external-master": [ - { - "Id": "precise/nova-compute-31", - "Meta": { - "charm-config": { - "Options": { - "bridge-interface": { - "Type": "string", - "Description": "Bridge interface", - "Default": "br100" - }, - "bridge-ip": { - "Type": "string", - "Description": "IP to be assigned to bridge", - "Default": "11.0.0.1" - } - } - } - } - } - ] - } -} -``` - -#### GET *id*/meta/archive-upload-time - -The `meta/archive-upload-time` path returns the time the archives for the given -*id* was uploaded. The time is formatted according to RFC3339. - -```go -type ArchiveUploadTimeResponse struct { - UploadTime time.Time -} -``` - -Example: `GET trusty/wordpress-42/meta/archive-upload-time` - -```json -{ - "UploadTime": "2014-07-04T13:53:57.403506102Z" -} -``` - -#### GET *id*/meta/promulgated - -The `promulgated` path reports whether the entity with the given ID is promulgated. -Promulgated charms do not require the user portion of the ID to be specified. - -```go -type PromulgatedResponse struct { - Promulgated bool -} -``` - -Example: `GET trusty/wordpress-42/meta/promulgated` - -```json -{ - "Promulgated": true -} -``` - -#### GET *id*/meta/stats - -Many clients will need to use stats to determine the best result. Details for a -charm/bundle might require the stats as important information to users. -Currently we track deployment stats only. We intend to open this up to -additional data. The response includes downloads count for both the specific -requested entity revision and for all the revisions, and it is structured as -below: - -```go -// StatsResponse holds the result of an id/meta/stats GET request. -type StatsResponse struct { - // ArchiveDownloadCount is superceded by ArchiveDownload but maintained for - // backward compatibility. - ArchiveDownloadCount int64 - // ArchiveDownload holds the downloads count for a specific revision of the - // entity. - ArchiveDownload StatsCount - // ArchiveDownloadAllRevisions holds the downloads count for all revisions - // of the entity. - ArchiveDownloadAllRevisions StatsCount -} - -// StatsCount holds stats counts and is used as part of StatsResponse. -type StatsCount struct { - Total int64 // Total count over all time. - Day int64 // Count over the last day. - Week int64 // Count over the last week. - Month int64 // Count over the last month. -} -``` - -#### GET *id*/meta/tags - -The `tags` path returns any tags that are associated with the entity. - -Example: `GET trusty/wordpress-42/meta/tags` - -```json -{ - "Tags": [ - "blog", - "cms" - ] -} -``` - -#### GET *id*/meta/revision-info - -The `revision-info` path returns information about other available revisions of -the charm id that the charm store knows about. It will include both older and -newer revisions. The fully qualified ids of those charms will be returned in an -ordered list from newest to oldest revision. Note that the current revision -will be included in the list as it is also an available revision. - -```go -type RevisionInfo struct { - Revisions []*charm.Reference -} -``` - -Example: `GET trusty/wordpress-42/meta/revision-info` - -```json -{ - "Revisions": [ - "cs:trusty/wordpress-43", - "cs:trusty/wordpress-42", - "cs:trusty/wordpress-41", - "cs:trusty/wordpress-39" - ] -} -``` - -#### GET *id*/meta/id - -The `id` path returns information on the charm or bundle id, split apart into -its various components, including the id itself. The information is exactly -that contained within the entity id. - -```go -type Id struct { - Id *charm.Reference - User string - Series string `json:",omitempty"` - Name string - Revision int -} -``` - -Example: `GET ~bob/trusty/wordpress/meta/id` - -```json -{ - "Id": "~bob/trusty/wordpress-42", - "User": "bob", - "Series": "trusty", - "Name": "wordpress", - "Revision": 42 -} -``` - -Example: `GET precise/wordpress/meta/id` - -```json -{ - "Id": "precise/wordpress-42", - "Series": "precise", - "Name": "wordpress", - "Revision": 42 -} -``` - -Example: `GET bundle/openstack/meta/id` - -```json -{ - "Id": "bundle/openstack-3", - "Series": "bundle", - "Name": "openstack", - "Revision": 3 -} -``` - -#### GET *id*/meta/id-revision - -The `revision` path returns information on the revision of the id. The -information is exactly that contained within the id. - -```go -type Revision struct { - Revision int -} -``` - -Example: `GET trusty/wordpress-42/meta/id-revision` - -```json -{ - "Revision": 42 -} -``` - -#### GET *id*/meta/id-name - -The `name` path returns information on the name of the id. The information is -exactly that contained within the id. - -```go -type Name struct { - Name string -} -``` - -Example: `GET trusty/wordpress-42/meta/id-name` - -```json -{ - "Name": "wordpress" -} -``` - -#### GET *id*/meta/id-user - -The `id-user` path returns information on the user name in the id. This -information is exactly that contained within the id. - -```go -type User struct { - User string -} -``` - -Example: `GET ~bob/trusty/wordpress-42/meta/id-user` - -```json -{ - "User": "bob" -} -``` - -Example: `GET trusty/wordpress-42/meta/id-user` - -```json -{ - "User": "" -} -``` - -#### GET *id*/meta/id-series - -The `id-series` path returns information on the series in the id. This -information is exactly that contained within the id. For bundles, this will -return "bundle". - -```go -type Series struct { - Series string -} -``` - -Example: `GET ~bob/trusty/wordpress-42/meta/id-series` - -```json -{ - "Series": "trusty" -} -``` - -### Resources - -**Not yet implemented** - -#### POST *id*/resources/name.stream - -Posting to the resources path creates a new version of the given stream -for the charm with the given id. The request returns the new version. - -```go -type ResourcesRevision struct { - Revision int -} -``` - -#### GET *id*/resources/name.stream[-revision]/arch/filename - -Getting from the `/resources` path retrieves a charm resource from the charm -with the given id. If version is not specified, it retrieves the latest version -of the resource. The SHA-256 hash of the data is specified in the HTTP response -headers. - -#### PUT *id*/resources/[~user/]series/name.stream-revision/arch?sha256=hash - -Putting to the `resources` path uploads a resource (an arbitrary "blob" of -data) associated with the charm with id series/name, which must not be a -bundle. Stream and arch specify which of the charms resource streams and which -architecture the resource will be associated with, respectively. Revision -specifies the revision of the stream that's being uploaded to. - -The hash value must specify the hash of the stream. If the same series, name, -stream, revision combination is PUT again, it must specify the same hash. - -### Search - -#### GET search - -The `search` path searches within the latest version of charms and bundles -within the store. - -
-GET search[?text=text][&autocomplete=1][&filter=value...][&limit=limit][&skip=skip][&include=meta[&include=meta...]][&sort=field]
-
- -`text` specifies any text to search for. If `autocomplete` is specified, the -search will return only charms and bundles with a name that has text as a -prefix. `limit` limits the number of returned items to the specified limit -count. `skip` skips over the first skip items in the result. Any number of -filters may be specified, limiting the search to items with attributes that -match the specified filter value. Items matching any of the selected values for -a filter are selected, so `name=1&name=2` would match items whose name was -either 1 or 2. However, if multiple filters are specified, the charm must match -all of them, so `name=1&series=2` will only match charms whose name is 1 and -whose series is 2. Available filters are: - -* tags - the set of tags associated with the charm. -* name - the charm's name. -* owner - the charm's owner (the ~user element of the charm id) -* promulgated - the charm has been promulgated. -* provides - interfaces provided by the charm. -* requires - interfaces required by the charm. -* series - the charm's series. -* summary - the charm's summary text. -* description - the charm's description text. -* type - "charm" or "bundle" to search only one doctype or the other. - - -Notes - -1. filtering on a specified, but empty, owner is the same as filtering on promulgated=1. -2. a specified, but empty text field will return all charms and bundles. -3. the promulgated filter is only applied if specified. If the value is "1" then only - promulgated entities are returned if it is any other value only non-promulgated - entities are returned. - -The response contains a list of information on the charms or bundles that were -matched by the request. If no parameters are specified, all charms and bundles -will match. By default, only the charm store id is included. - -The results are sorted according to the given sort field, which may be one of -`owner`, `name` or `series`, corresponding to the filters of the same names. If -the field is prefixed with a hyphen (-), the sorting order will be reversed. If -the sort field is not specified, the results are returned in -most-relevant-first order if the text filter was specified, or an arbitrary -order otherwise. It is possible to specify more than one sort field to get -multi-level sorting, e.g. sort=name,-series will get charms in order of the -charm name and then in reverse order of series. - -The Meta field is populated according to the include flag - see the `meta` -path for more info on how to use this. - -```go -[]SearchResult - -type SearchResult struct { - Id string - // Meta holds at most one entry for each meta value - // specified in the include flags, holding the - // data that would be returned by reading /meta/meta?id=id. - // Metadata not relevant to a particular result will not - // be included. - Meta map[string] interface{} `json:",omitempty"` -} -``` - -Example: `GET search?text=word&autocomplete=1&limit=2&include=archive-size` - -```json -[ - { - "Id": "precise/wordpress-1", - "Meta": { - "archive-size": { - "Size": 1024 - } - } - }, - { - "Id": "precise/wordpress-2", - "Meta": { - "archive-size": { - "Size": 4242 - } - } - } -] -``` - -#### GET search/interesting - -This returns a list of bundles and charms which are interesting from the Juju -GUI perspective. Those are shown on the left sidebar of the GUI when no other -search requests are performed. - -`GET search/interesting[?limit=limit][&include=meta]` - -The Meta field is populated according to the include flag - see the `meta` -path for more info on how to use this. -The `limit` flag is the same as for the "search" path. - -### Debug info - -#### GET /debug - -**Not yet implemented** - -This returns metadata describing the current version of the software running -the server, and any other information deemed appropriate. The specific form of - the returned data is deliberately left unspecified for now. - -#### GET /debug/status - -Used as a health check of the service. The API will also be used for nagios -tests. The items that are checked: - -* connection to MongoDB -* connection to ElasticSearch (if needed) (based on charm config) (elasticsearch cluster status, all nodes up/etc see charmworld) -* number of charms and bundles in the blobstore -* number of promulgated items -* time and location of service start -* time of last ingestion process -* did ingestion finish -* did ingestion finished without errors (this should not count charm/bundle ingest errors) - -```go -type DebugStatuses map[string] struct { - Name string - Value string - Passed bool -} -``` - -Example: `GET /debug/status` - -```json -{ - "mongo_connected" : { - "Name": "MongoDB is connected", - "Value": "Connected", - "Passed": true - }, - "mongo_collections" : { - "Name": "MongoDB collections", - "Value": "All required collections exist", - "Passed": true - }, - "ES_connected": { - "Name": "ElasticSearch is connected", - "Value": "Connected", - "Passed": true - }, - "entities": { - "Name": "Entities in charm store", - "Value": "5701 charms; 2000 bundles; 42 promulgated", - "Passed": true, - }, - "server_started": { - "Name": "Server started", - "Value": "123.45.67.89 2014-09-16 11:12:29Z", - "Passed": true - }, -} -``` - -### Permissions - -All entities in the charm store have their own access control lists. Read and -write permissions are supported for specific users and groups. By default, all -charms and bundles are readable by everyone, meaning that anonymous users can -retrieve archives and metadata information without restrictions. The permission -endpoints can be used to retrieve or change entities' permissions. - -#### GET *id*/meta/perm - -This path reports the read and write ACLs for the charm or bundle. - -```go -type PermResponse struct { - Read []string - Write []string -} -``` - -If the `Read` ACL is empty, the entity and its metadata cannot be retrieved by -anyone. -If the `Write` ACL is empty, the entity cannot be modified by anyone. -The special user `everyone` indicates that the corresponding operation -(read or write) can be performed by everyone, including anonymous users. - -Example: `GET ~joe/wordpress/meta/perm` - -```json -{ - "Read": ["everyone"], - "Write": ["joe"] -} -``` - -#### PUT *id*/meta/perm - -This request updates the permissions associated with the charm or bundle. - -```go -type PermResponse struct { - Read []string - Write []string -} -``` - -If the Read or Write ACL is empty or missing from the request body, that -field will be overwritten as empty. See the *id*/meta/perm/*key* request -to PUT only Read or Write. - -Example: `PUT precise/wordpress-32/meta/perm` - -Request body: -```json -{ - "Read": ["everyone"], - "Write": ["joe"] -} -``` - -#### GET *id*/meta/perm/*key* - -This path returns the contents of the given permission *key* (that can be -`read` or `write`). The result is exactly the JSON value stored as a result of -the PUT request to `meta/perm/key`. - -Example: `GET wordpress/meta/perm/read` - -```json -["everyone"] -``` - -#### PUT *id*/meta/perm/*key* - -This request updates the *key* permission associated with the charm or bundle, -where *key* can be `read` or `write`. - -Example: `PUT precise/wordpress-32/meta/perm/read` - -Request body: - -```json -["joe", "frank"] -``` - -### Authorization - -#### GET /macaroon - -This endpoint returns a macaroon in JSON format that, when its third -party caveats are discharged, will allow access to the charm store. No -prior authorization is required. - -#### GET /delegatable-macaroon - -This endpoint returns a macaroon in JSON format that can be passed to -third parties to allow them to access the charm store on the user's -behalf. A first party "is-entity" caveat may be added to restrict those -parties so that they can only access a given charmstore entity with a -specified id. - -A delegatable macaroon will only be returned to an authorized user (not -including admin). It will carry the same privileges as the macaroon used -to authorize the request. - -### Logs - -#### GET /log - -This endpoint returns the log messages stored on the charm store. It is -possible to save them by sending POST requests to the same endpoint (see -below). For instance, the ingestion of charms/bundles produces logs that are -collected and send to the charm store by the ingestion client. - -`GET /log[?limit=count][&skip=count][&id=entity-id][&level=log-level][&type=log-type]` - - -Each log message is defined as: - -```go -type LogResponse struct { - // Data holds the log message as a JSON-encoded value. - Data json.RawMessage - - // Level holds the log level as a string. - Level LogLevel - - // Type holds the log type as a string. - Type LogType - - // URLs holds a slice of entity URLs associated with the log message. - URLs []`*`charm.Reference `json:",omitempty"` - - // Time holds the time of the log. - Time time.Time -} -``` - -The log entries are ordered by last inserted (most recent logs first), and by -default the last 1000 logs are returned. Use the `limit` and skip `query` -parameters to change the default behavior. Logs can further be filtered by log -level (“infoâ€, “warning†or “errorâ€) and by related entity id. The type query -parameter groups entries by type. For instance, to request all the ingestion -errors related to the *utopic/django* charm, use the following URL: - -`/log?type=ingestion&level=error&id=utopic/django` - -#### POST /log - -This endpoint uploads logs to the charm store. The request content type must be -`application/json`. The body must contain the JSON representation of a list of -logs, each one being in this format: - -```go -type Log struct { - // Data holds the log message as a JSON-encoded value. - Data *json.RawMessage - - // Level holds the log level as a string. - Level LogLevel - - // Type holds the log type as a string. - Type LogType - - // URLs holds a slice of entity URLs associated with the log message. - URLs []*charm.Reference `json:",omitempty"` -} -``` - -Nothing is returned if the request succeeds. Otherwise, an error is returned. - -### Changes - -Each charm store has a global feed for all new published charms and bundles. - -#### GET changes/published - -This endpoint returns the ids of published charms or bundles published, most -recently published first. - -`GET changes/published[?limit=count][&from=fromdate][&to=todate]` - -The `fromdate` and `todate` values constrain the range of publish dates, in -"yyyy-mm-dd" format. If `fromdate` is specified only charms published on or -after that date are returned; if `todate` is specified, only charms published -on or before that date are returned. If the `limit` count is specified, it must -be positive, and only the first count results are returned. The published time -is in RFC3339 format. - -```go -[]Published -type Published struct { - Id string - PublishTime time.Time -} -``` - -Example: `GET changes/published` - -```json -[ - { - "Id": "cs:trusty/wordpress-42", - "PublishTime": "2014-07-31T15:04:05Z" - }, - { - "Id": "cs:trusty/mysql-11", - "PublishTime": "2014-07-30T14:20:00Z" - }, - { - "Id": "cs:bundle/mediawiki", - "PublishTime": "2014-07-29T13:45:10Z" - } -] -``` - -Example: `GET changes/published?limit=10&from=31-07-2014` - -```json -[ - { - "Id": "cs:trusty/wordpress-42", - "PublishTime": "2014-07-31T15:04:05Z" - } -] -``` === removed file 'src/gopkg.in/juju/charmstore.v4/docs/bundles.md' --- src/gopkg.in/juju/charmstore.v4/docs/bundles.md 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/docs/bundles.md 1970-01-01 00:00:00 +0000 @@ -1,164 +0,0 @@ -# Bundles in The Charmstore - -The charmstore allows two versions of bundle specifications, as described by -github.com/juju/charm. The versions are numbered 3 and 4, relating to the API -version under which they can be hosted: charmworld (API v3) supports only -version 3 bundles, charmstore (API v4) supports version 3 and version 4. - -## Version 3 bundles - -Version 3 bundles are currently existing bundles that specify a deployment as a -list of services and, optionally, relations. The charmstore will not support -the idea of a "basket" or multiple bundles within one file. However, existing -baskets will still be imported, and split up into their component bundles. - -## Version 4 bundles - -Version 4 bundles are identical to version 3 bundles except for a few key -differences: the `branch` attribute of the service spec is no longer supported, -they may contain a machine specification, and their deployment directives are -different from version 3 bundles. - -### Deploying version 4 bundles - -Because version 4 bundles are not yet idempotent (i.e.: if a machine fails to -come up, running the bundle again will recreate all machines in the machine -spec), the juju deployer pessimistically assumes that a bundle is a version 4 -bundle *only* if it has a machine spec. This means that a bundle without a -machine spec must use the version 3 style of placement directives listed below -until further notice, when the deployer is updated. This does not affect -version 4 bundle support within the charmstore (that is, the machine spec is -still optional). - -The Juju GUI does not yet support version 4 bundles as of version 1.3.4, as the -GUI charm contains an older version of the deployer. - -### Machine Specifications - -A machine specification identifies a machine that will be created in the Juju -environment. These machines are named with an integer, and can have any of -three optional attributes: - -* *constraints* - Constraints are specified as a string as described by the Juju - constraints flag (see `juju help constraints` for more information). -* *annotations* - Annotations, provided as key-value pairs, are additional - information that is tacked onto the machine within the Juju state server. - These can be used for marking machines for your own use, or for use by Juju - clients. -* *series* - You may optionally specify the series of the machine to be created - (e.g.: "precise" or "trusty"). If you do not specify a series, the bundle - series will be used. - -Machines are specified under the `machines` top-level attribute. - -### Deployment directives - -Version 4 deployment directives (the `to` attribute on the service spec) is a -YAML list of items following the format: - - (:)?(||new) - -If containertype is specified, the unit is deployed into a new container of that -type, otherwise it will be "hulk-smashed" into the specified location, by -co-locating it with any other units that happen to be there, which may result in -unintended behavior. - -The second part (after the colon) specifies where the new unit should be placed; -it may refer to a unit of another service specified in the bundle, a machine -id specified in the machines section, or the special name "new" which specifies -a newly created machine. - -A unit placement may be specified with a service name only, in which case its -unit number is assumed to be one more than the unit number of the previous unit -in the list with the same service, or zero if there were none. - -If there are less elements in To than NumUnits, the last element is replicated -to fill it. If there are no elements (or To is omitted), "new" is replicated. - -For example: - - wordpress/0 wordpress/1 lxc:0 kvm:new - -specifies that the first two units get hulk-smashed onto the first two units of -the wordpress service, the third unit gets allocated onto an lxc container on -machine 0, and subsequent units get allocated on kvm containers on new machines. - -The above example is the same as this: - - wordpress wordpress lxc:0 kvm:new - -Version 3 placement directives take the format: - - ((:)?(=)?|0) - -meaning that a machine cannot be specified beyond colocating (either through a -container or hulk-smash) along with a specified unit of another service. -Version 3 placement directives may be either a string of a single directive or a -YAML list of directives in the above format. The only machine that may be -specified is machine 0, allowing colocation on the bootstrap node. - -## Example Bundles - -### Version 3 - -```yaml -series: precise -services: - nova-compute: - charm: cs:precise/nova-compute - units: 3 - ceph: - units: 3 - to: [nova-compute, nova-compute] - mysql: - to: 0 - quantum: - units: 4 - to: ["lxc:nova-compute", "lxc:nova-compute", "lxc:nova-compute", "lxc:nova-compute"] - verity: - to: lxc:nova-compute=2 - semper: - to: nova-compute=2 - lxc-service: - num_units: 5 - to: [ "lxc:nova-compute=1", "lxc:nova-compute=2", "lxc:nova-compute=0", "lxc:nova-compute=0", "lxc:nova-compute=2" ] -``` - -### Version 4 - -```yaml -series: precise -services: - # Automatically place - nova-compute: - charm: cs:precise/nova-compute - units: 3 - # Specify containers - ceph: - units: 3 - to: - # Specify a unit - - lxc:nova-compute/0 - # Specify a machine - - lxc:1 - # Create a new machine, deploy to container on that machine. - - lxc:new - # Specify a machine - mysql: - to: - - 0 - # Specify colocation - quantum: - units: 4 - to: - - ceph/1 - # Assume first unit - - nova-compute - # Repeats previous directive to fill out placements -machines: - 1: - constraints: "mem=16G arch=amd64" - annotations: - foo: bar - series: precise -``` === removed directory 'src/gopkg.in/juju/charmstore.v4/internal' === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/blobstore' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/blobstore/blobstore.go' --- src/gopkg.in/juju/charmstore.v4/internal/blobstore/blobstore.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/blobstore/blobstore.go 1970-01-01 00:00:00 +0000 @@ -1,159 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package blobstore - -import ( - "crypto/sha512" - "fmt" - "hash" - "io" - "strconv" - - "github.com/juju/blobstore" - "github.com/juju/errors" - "gopkg.in/errgo.v1" - "gopkg.in/mgo.v2" -) - -type ReadSeekCloser interface { - io.Reader - io.Seeker - io.Closer -} - -// ContentChallengeError holds a proof-of-content -// challenge produced by a blobstore. -type ContentChallengeError struct { - Req ContentChallenge -} - -func (e *ContentChallengeError) Error() string { - return "cannot upload because proof of content ownership is required" -} - -// ContentChallenge holds a proof-of-content challenge -// produced by a blobstore. A client can satisfy the request -// by producing a ContentChallengeResponse containing -// the same request id and a hash of RangeLength bytes -// of the content starting at RangeStart. -type ContentChallenge struct { - RequestId string - RangeStart int64 - RangeLength int64 -} - -// ContentChallengeResponse holds a response to a ContentChallenge. -type ContentChallengeResponse struct { - RequestId string - Hash string -} - -// NewHash is used to calculate checksums for the blob store. -func NewHash() hash.Hash { - return sha512.New384() -} - -// NewContentChallengeResponse can be used by a client to respond to a content -// challenge. The returned value should be passed to BlobStorage.Put -// when the client retries the request. -func NewContentChallengeResponse(chal *ContentChallenge, r io.ReadSeeker) (*ContentChallengeResponse, error) { - _, err := r.Seek(chal.RangeStart, 0) - if err != nil { - return nil, errgo.Mask(err) - } - hash := NewHash() - nw, err := io.CopyN(hash, r, chal.RangeLength) - if err != nil { - return nil, errgo.Mask(err) - } - if nw != chal.RangeLength { - return nil, errgo.Newf("content is not long enough") - } - return &ContentChallengeResponse{ - RequestId: chal.RequestId, - Hash: fmt.Sprintf("%x", hash.Sum(nil)), - }, nil -} - -// Store stores data blobs in mongodb, de-duplicating by -// blob hash. -type Store struct { - mstore blobstore.ManagedStorage -} - -// New returns a new blob store that writes to the given database, -// prefixing its collections with the given prefix. -func New(db *mgo.Database, prefix string) *Store { - rs := blobstore.NewGridFS(db.Name, prefix, db.Session) - return &Store{ - mstore: blobstore.NewManagedStorage(db, rs), - } -} - -func (s *Store) challengeResponse(resp *ContentChallengeResponse) error { - id, err := strconv.ParseInt(resp.RequestId, 10, 64) - if err != nil { - return errgo.Newf("invalid request id %q", id) - } - return s.mstore.ProofOfAccessResponse(blobstore.NewPutResponse(id, resp.Hash)) -} - -// Put tries to stream the content from the given reader into blob -// storage, with the provided name. The content should have the given -// size and hash. If the content is already in the store, a -// ContentChallengeError is returned containing a challenge that must be -// satisfied by a client to prove that they have access to the content. -// If the proof has already been acquired, it should be passed in as the -// proof argument. -func (s *Store) Put(r io.Reader, name string, size int64, hash string, proof *ContentChallengeResponse) (*ContentChallenge, error) { - if proof != nil { - err := s.challengeResponse(proof) - if err == nil { - return nil, nil - } - if err != blobstore.ErrResourceDeleted { - return nil, errgo.Mask(err) - } - // The blob has been deleted since the challenge - // was created, so continue on with uploading - // the content as if there was no previous challenge. - } - resp, err := s.mstore.PutForEnvironmentRequest("", name, hash) - if err != nil { - if errors.IsNotFound(err) { - if err := s.mstore.PutForEnvironmentAndCheckHash("", name, r, size, hash); err != nil { - return nil, errgo.Mask(err) - } - return nil, nil - } - return nil, err - } - return &ContentChallenge{ - RequestId: fmt.Sprint(resp.RequestId), - RangeStart: resp.RangeStart, - RangeLength: resp.RangeLength, - }, nil -} - -// PutUnchallenged stream the content from the given reader into blob -// storage, with the provided name. The content should have the given -// size and hash. In this case a challenge is never returned and a proof -// is not required. -func (s *Store) PutUnchallenged(r io.Reader, name string, size int64, hash string) error { - return s.mstore.PutForEnvironmentAndCheckHash("", name, r, size, hash) -} - -// Open opens the entry with the given name. -func (s *Store) Open(name string) (ReadSeekCloser, int64, error) { - r, length, err := s.mstore.GetForEnvironment("", name) - if err != nil { - return nil, 0, errgo.Mask(err) - } - return r.(ReadSeekCloser), length, nil -} - -// Remove the given name from the Store. -func (s *Store) Remove(name string) error { - return s.mstore.RemoveForEnvironment("", name) -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/blobstore/blobstore_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/blobstore/blobstore_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/blobstore/blobstore_test.go 1970-01-01 00:00:00 +0000 @@ -1,192 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package blobstore_test - -import ( - "fmt" - "io" - "io/ioutil" - "strconv" - "strings" - "testing" - - jujutesting "github.com/juju/testing" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charmstore.v4/internal/blobstore" - "gopkg.in/juju/charmstore.v4/internal/storetesting" -) - -func TestPackage(t *testing.T) { - jujutesting.MgoTestPackage(t, nil) -} - -type BlobStoreSuite struct { - storetesting.IsolatedMgoSuite -} - -var _ = gc.Suite(&BlobStoreSuite{}) - -func (s *BlobStoreSuite) TestPutOpen(c *gc.C) { - store := blobstore.New(s.Session.DB("db"), "blobstore") - content := "some data" - chal, err := store.Put(strings.NewReader(content), "x", int64(len(content)), hashOf(content), nil) - c.Assert(err, gc.IsNil) - c.Assert(chal, gc.IsNil) - - rc, length, err := store.Open("x") - c.Assert(err, gc.IsNil) - defer rc.Close() - c.Assert(length, gc.Equals, int64(len(content))) - - data, err := ioutil.ReadAll(rc) - c.Assert(err, gc.IsNil) - c.Assert(string(data), gc.Equals, content) - - // Putting the resource again should generate a challenge. - chal, err = store.Put(strings.NewReader(content), "y", int64(len(content)), hashOf(content), nil) - c.Assert(err, gc.IsNil) - c.Assert(chal, gc.NotNil) - - resp, err := blobstore.NewContentChallengeResponse(chal, strings.NewReader(content)) - c.Assert(err, gc.IsNil) - - chal, err = store.Put(strings.NewReader(content), "y", int64(len(content)), hashOf(content), resp) - c.Assert(err, gc.IsNil) - c.Assert(chal, gc.IsNil) -} - -func (s *BlobStoreSuite) TestPutInvalidHash(c *gc.C) { - store := blobstore.New(s.Session.DB("db"), "blobstore") - content := "some data" - chal, err := store.Put(strings.NewReader(content), "x", int64(len(content)), hashOf("wrong"), nil) - c.Assert(err, gc.ErrorMatches, "hash mismatch") - c.Assert(chal, gc.IsNil) - - rc, length, err := store.Open("x") - c.Assert(err, gc.ErrorMatches, "resource.*not found") - c.Assert(rc, gc.Equals, nil) - c.Assert(length, gc.Equals, int64(0)) -} - -func (s *BlobStoreSuite) TestPutUnchallenged(c *gc.C) { - store := blobstore.New(s.Session.DB("db"), "blobstore") - - content := "some data" - err := store.PutUnchallenged(strings.NewReader(content), "x", int64(len(content)), hashOf(content)) - c.Assert(err, gc.IsNil) - - rc, length, err := store.Open("x") - c.Assert(err, gc.IsNil) - defer rc.Close() - c.Assert(length, gc.Equals, int64(len(content))) - - data, err := ioutil.ReadAll(rc) - c.Assert(err, gc.IsNil) - c.Assert(string(data), gc.Equals, content) - - err = store.PutUnchallenged(strings.NewReader(content), "x", int64(len(content)), hashOf(content)) - c.Assert(err, gc.IsNil) -} - -func (s *BlobStoreSuite) TestPutUnchallengedInvalidHash(c *gc.C) { - store := blobstore.New(s.Session.DB("db"), "blobstore") - content := "some data" - err := store.PutUnchallenged(strings.NewReader(content), "x", int64(len(content)), hashOf("wrong")) - c.Assert(err, gc.ErrorMatches, "hash mismatch") -} - -func (s *BlobStoreSuite) TestRemove(c *gc.C) { - store := blobstore.New(s.Session.DB("db"), "blobstore") - content := "some data" - err := store.PutUnchallenged(strings.NewReader(content), "x", int64(len(content)), hashOf(content)) - c.Assert(err, gc.IsNil) - - rc, length, err := store.Open("x") - c.Assert(err, gc.IsNil) - defer rc.Close() - c.Assert(length, gc.Equals, int64(len(content))) - data, err := ioutil.ReadAll(rc) - c.Assert(err, gc.IsNil) - c.Assert(string(data), gc.Equals, content) - - err = store.Remove("x") - c.Assert(err, gc.IsNil) - - rc, length, err = store.Open("x") - c.Assert(err, gc.ErrorMatches, `resource at path "[^"]+" not found`) -} - -func (s *BlobStoreSuite) TestLarge(c *gc.C) { - store := blobstore.New(s.Session.DB("db"), "blobstore") - size := int64(20 * 1024 * 1024) - newContent := func() io.Reader { - return newDataSource(123, size) - } - hash := hashOfReader(c, newContent()) - - chal, err := store.Put(newContent(), "x", size, hash, nil) - c.Assert(err, gc.IsNil) - c.Assert(chal, gc.IsNil) - - rc, length, err := store.Open("x") - c.Assert(err, gc.IsNil) - defer rc.Close() - c.Assert(length, gc.Equals, size) - - c.Assert(hashOfReader(c, rc), gc.Equals, hash) -} - -func hashOfReader(c *gc.C, r io.Reader) string { - h := blobstore.NewHash() - _, err := io.Copy(h, r) - c.Assert(err, gc.IsNil) - return fmt.Sprintf("%x", h.Sum(nil)) -} - -func hashOf(s string) string { - h := blobstore.NewHash() - h.Write([]byte(s)) - return fmt.Sprintf("%x", h.Sum(nil)) -} - -type dataSource struct { - buf []byte - bufIndex int - remain int64 -} - -// newDataSource returns a stream of size bytes holding -// a repeated number. -func newDataSource(fillWith int64, size int64) io.Reader { - src := &dataSource{ - remain: size, - } - for len(src.buf) < 8*1024 { - src.buf = strconv.AppendInt(src.buf, fillWith, 10) - src.buf = append(src.buf, ' ') - } - return src -} - -func (s *dataSource) Read(buf []byte) (int, error) { - if int64(len(buf)) > s.remain { - buf = buf[:int(s.remain)] - } - total := len(buf) - if total == 0 { - return 0, io.EOF - } - - for len(buf) > 0 { - if s.bufIndex == len(s.buf) { - s.bufIndex = 0 - } - nb := copy(buf, s.buf[s.bufIndex:]) - s.bufIndex += nb - buf = buf[nb:] - s.remain -= int64(nb) - } - return total, nil -} === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/charmstore' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/archive.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/archive.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/archive.go 1970-01-01 00:00:00 +0000 @@ -1,59 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore - -import ( - "bytes" - "io" - "os" - - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - - "gopkg.in/juju/charmstore.v4/internal/blobstore" -) - -type archiverTo interface { - ArchiveTo(io.Writer) error -} - -// getArchive is used to turn the current charm and bundle implementations -// into ReadSeekClosers for their corresponding archive. -func getArchive(c interface{}) (blobstore.ReadSeekCloser, error) { - var path string - switch c := c.(type) { - case archiverTo: - // For example: charm.CharmDir or charm.BundleDir. - var buffer bytes.Buffer - if err := c.ArchiveTo(&buffer); err != nil { - return nil, errgo.Mask(err) - } - return nopCloser(bytes.NewReader(buffer.Bytes())), nil - case *charm.BundleArchive: - path = c.Path - case *charm.CharmArchive: - path = c.Path - default: - return nil, errgo.Newf("cannot get the archive for charm type %T", c) - } - file, err := os.Open(path) - if err != nil { - return nil, errgo.Mask(err) - } - return file, nil -} - -type nopCloserReadSeeker struct { - io.ReadSeeker -} - -func (nopCloserReadSeeker) Close() error { - return nil -} - -// nopCloser returns a blobstore.ReadSeekCloser with a no-op Close method -// wrapping the provided ReadSeeker r. -func nopCloser(r io.ReadSeeker) blobstore.ReadSeekCloser { - return nopCloserReadSeeker{r} -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/debug.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/debug.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/debug.go 1970-01-01 00:00:00 +0000 @@ -1,267 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore - -import ( - "bytes" - "encoding/json" - "fmt" - "math/rand" - "net/http" - "net/http/httptest" - "sort" - "strings" - "time" - - "github.com/juju/utils" - "gopkg.in/errgo.v1" - "gopkg.in/mgo.v2" - - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/params" - appver "gopkg.in/juju/charmstore.v4/version" -) - -// GET /debug/info . -func serveDebugInfo(http.Header, *http.Request) (interface{}, error) { - return appver.VersionInfo, nil -} - -// GET /debug/check. -func debugCheck(checks map[string]func() error) http.Handler { - return router.HandleJSON(func(http.Header, *http.Request) (interface{}, error) { - n := len(checks) - type result struct { - name string - err error - } - c := make(chan result) - for name, check := range checks { - name, check := name, check - go func() { - c <- result{name: name, err: check()} - }() - } - results := make(map[string]string, n) - var failed bool - for ; n > 0; n-- { - res := <-c - if res.err == nil { - results[res.name] = "OK" - } else { - failed = true - results[res.name] = res.err.Error() - } - } - if failed { - keys := make([]string, 0, len(results)) - for k := range results { - keys = append(keys, k) - } - sort.Strings(keys) - msgs := make([]string, len(results)) - for i, k := range keys { - msgs[i] = fmt.Sprintf("[%s: %s]", k, results[k]) - } - return nil, errgo.Newf("check failure: %s", strings.Join(msgs, " ")) - } - return results, nil - }) -} - -func checkDB(db *mgo.Database) func() error { - return func() error { - s := db.Session.Copy() - s.SetSyncTimeout(500 * time.Millisecond) - defer s.Close() - return s.Ping() - } -} - -func checkES(si *SearchIndex) func() error { - if si == nil || si.Database == nil { - return func() error { - return nil - } - } - return func() error { - _, err := si.Health() - return err - } -} - -// GET /debug/fullcheck -func debugFullCheck(hnd http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - code := http.StatusInternalServerError - resp := new(bytes.Buffer) - defer func() { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.WriteHeader(code) - resp.WriteTo(w) - }() - - fmt.Fprintln(resp, "Testing v4...") - - // test search - fmt.Fprintln(resp, "performing search...") - var sr params.SearchResponse - if err := get(hnd, "/v4/search?limit=2000", &sr); err != nil { - fmt.Fprintf(resp, "ERROR: search failed %s.\n", err) - return - } - if len(sr.Results) < 1 { - fmt.Fprintln(resp, "ERROR: no search results found.") - return - } - fmt.Fprintf(resp, "%d results found.\n", len(sr.Results)) - - // pick random charm - id := sr.Results[rand.Intn(len(sr.Results))].Id - fmt.Fprintf(resp, "using %s.\n", id) - - // test content - fmt.Fprintln(resp, "reading manifest...") - url := "/v4/" + id.Path() + "/meta/manifest" - fmt.Fprintln(resp, url) - var files []params.ManifestFile - if err := get(hnd, url, &files); err != nil { - fmt.Fprintf(resp, "ERROR: cannot retrieve manifest: %s.\n", err) - return - } - if len(files) == 0 { - fmt.Fprintln(resp, "ERROR: manifest empty.") - return - } - fmt.Fprintf(resp, "%d files found.\n", len(files)) - - // Choose a file to access - expectFile := "metadata.yaml" - if id.Series == "bundle" { - expectFile = "bundle.yaml" - } - var file params.ManifestFile - // default to metadata.yaml - for _, f := range files { - if f.Name == expectFile { - file = f - break - } - } - // find a random file - for i := 0; i < 5; i++ { - f := files[rand.Intn(len(files))] - if f.Size <= 16*1024 { - file = f - break - } - } - fmt.Fprintf(resp, "using %s.\n", file.Name) - - // read the file - fmt.Fprintln(resp, "reading file...") - url = "/v4/" + id.Path() + "/archive/" + file.Name - fmt.Fprintln(resp, url) - var buf []byte - if err := get(hnd, url, &buf); err != nil { - fmt.Fprintf(resp, "ERROR: cannot retrieve file: %s.\n", err) - return - } - if int64(len(buf)) != file.Size { - fmt.Fprintf(resp, "ERROR: incorrect file size, expected: %d, received %d.\n", file.Size, len(buf)) - return - } - fmt.Fprintf(resp, "%d bytes received.\n", len(buf)) - - // check if the charm is promulgated - fmt.Fprintln(resp, "checking promulgated...") - url = "/v4/" + id.Path() + "/meta/promulgated" - fmt.Fprintln(resp, url) - var promulgated params.PromulgatedResponse - if err := get(hnd, url, &promulgated); err != nil { - fmt.Fprintf(resp, "ERROR: cannot retrieve promulgated: %s.\n", err) - return - } - if promulgated.Promulgated != (id.User == "") { - fmt.Fprintf(resp, "ERROR: incorrect promulgated response, expected: %v, received %v.\n", (id.User == ""), promulgated.Promulgated) - return - } - fmt.Fprintf(resp, "promulgated: %v.\n", promulgated.Promulgated) - - // check expand-id - fmt.Fprintln(resp, "checking expand-id...") - url = "/v4/" + id.Path() + "/expand-id" - fmt.Fprintln(resp, url) - var expanded []params.ExpandedId - if err := get(hnd, url, &expanded); err != nil { - fmt.Fprintf(resp, "ERROR: cannot expand-id: %s.\n", err) - return - } - if len(expanded) == 0 { - fmt.Fprintln(resp, "ERROR: expand-id returned 0 results") - return - } - fmt.Fprintf(resp, "%d ids found.\n", len(expanded)) - - code = http.StatusOK - }) -} - -func newServiceDebugHandler(p *Pool, c ServerParams, hnd http.Handler) http.Handler { - mux := router.NewServeMux() - mux.Handle("/info", router.HandleJSON(serveDebugInfo)) - mux.Handle("/check", debugCheck(map[string]func() error{ - "mongodb": checkDB(p.db.Database), - "elasticsearch": checkES(p.es), - })) - mux.Handle("/fullcheck", authorized(c, debugFullCheck(hnd))) - return mux -} - -func authorized(c ServerParams, h http.Handler) http.Handler { - return router.HandleErrors(func(w http.ResponseWriter, r *http.Request) error { - u, p, err := utils.ParseBasicAuthHeader(r.Header) - if err != nil { - return errgo.WithCausef(err, params.ErrUnauthorized, "") - } - if u != c.AuthUsername || p != c.AuthPassword { - return errgo.WithCausef(nil, params.ErrUnauthorized, "username or password mismatch") - } - h.ServeHTTP(w, r) - return nil - }) -} - -func get(h http.Handler, url string, body interface{}) error { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return errgo.Notef(err, "cannot create request") - } - w := httptest.NewRecorder() - h.ServeHTTP(w, req) - if w.Code != http.StatusOK { - if w.HeaderMap.Get("Content-Type") != "application/json" { - return errgo.Newf("bad status %d", w.Code) - } - var e params.Error - if err := json.Unmarshal(w.Body.Bytes(), &e); err != nil { - return errgo.Notef(err, "cannot decode error") - } - return &e - } - if body == nil { - return nil - } - if bytes, ok := body.(*[]byte); ok { - *bytes = w.Body.Bytes() - return nil - } - if w.HeaderMap.Get("Content-Type") == "application/json" { - if err := json.Unmarshal(w.Body.Bytes(), body); err != nil { - return errgo.Notef(err, "cannot decode body") - } - return nil - } - return errgo.Newf("cannot decode body") -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/debug_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/debug_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/debug_test.go 1970-01-01 00:00:00 +0000 @@ -1,100 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore - -import ( - "errors" - "net/http" - - "github.com/juju/testing/httptesting" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/params" - appver "gopkg.in/juju/charmstore.v4/version" -) - -type debugSuite struct{} - -var _ = gc.Suite(&debugSuite{}) - -var debugCheckTests = []struct { - about string - checks map[string]func() error - expectStatus int - expectBody interface{} -}{{ - about: "no checks", - expectStatus: http.StatusOK, - expectBody: map[string]string{}, -}, { - about: "passing check", - checks: map[string]func() error{ - "pass": func() error { return nil }, - }, - expectStatus: http.StatusOK, - expectBody: map[string]string{ - "pass": "OK", - }, -}, { - about: "failing check", - checks: map[string]func() error{ - "fail": func() error { return errors.New("test fail") }, - }, - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Message: "check failure: [fail: test fail]", - }, -}, { - about: "many pass", - checks: map[string]func() error{ - "pass1": func() error { return nil }, - "pass2": func() error { return nil }, - }, - expectStatus: http.StatusOK, - expectBody: map[string]string{ - "pass1": "OK", - "pass2": "OK", - }, -}, { - about: "many fail", - checks: map[string]func() error{ - "fail1": func() error { return errors.New("test fail1") }, - "fail2": func() error { return errors.New("test fail2") }, - }, - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Message: "check failure: [fail1: test fail1] [fail2: test fail2]", - }, -}, { - about: "pass and fail", - checks: map[string]func() error{ - "pass": func() error { return nil }, - "fail": func() error { return errors.New("test fail") }, - }, - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Message: "check failure: [fail: test fail] [pass: OK]", - }, -}} - -func (s *debugSuite) TestDebugCheck(c *gc.C) { - for i, test := range debugCheckTests { - c.Logf("%d. %s", i, test.about) - hnd := debugCheck(test.checks) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: hnd, - ExpectStatus: test.expectStatus, - ExpectBody: test.expectBody, - }) - } -} - -func (s *debugSuite) TestDebugInfo(c *gc.C) { - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: router.HandleJSON(serveDebugInfo), - ExpectStatus: http.StatusOK, - ExpectBody: appver.VersionInfo, - }) -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/elasticsearch.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/elasticsearch.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/elasticsearch.go 1970-01-01 00:00:00 +0000 @@ -1,351 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore - -import "encoding/json" - -var ( - esIndex = mustParseJSON(esIndexJSON) - esMapping = mustParseJSON(esMappingJSON) -) - -const esSettingsVersion = 7 - -func mustParseJSON(s string) interface{} { - var j json.RawMessage - if err := json.Unmarshal([]byte(s), &j); err != nil { - panic(err) - } - return &j -} - -const esIndexJSON = ` -{ - "settings": { - "number_of_shards": 1, - "analysis": { - "filter": { - "n3_20grams_filter": { - "type": "nGram", - "min_gram": 3, - "max_gram": 20 - } - }, - "analyzer": { - "n3_20grams": { - "type": "custom", - "tokenizer": "standard", - "filter": [ - "lowercase", - "n3_20grams_filter" - ] - } - } - } - } -} -` - -const esMappingJSON = ` -{ - "entity" : { - "dynamic" : "false", - "properties" : { - "URL" : { - "type" : "multi_field", - "fields" : { - "URL" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "ngrams" : { - "type" : "string", - "analyzer" : "n3_20grams", - "include_in_all" : false - } - } - }, - "PromulgatedURL" : { - "type" : "string", - "index": "not_analyzed", - "index_options" : "docs" - }, - "BaseURL" : { - "type" : "string", - "index": "not_analyzed", - "index_options" : "docs" - }, - "User" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "Name" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "Revision" : { - "type" : "integer", - "index" : "not_analyzed" - }, - "Series" : { - "type" : "multi_field", - "fields" : { - "Series" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "ngrams" : { - "type" : "string", - "analyzer" : "n3_20grams", - "include_in_all" : false - } - } - }, - "TotalDownloads": { - "type": "long" - }, - "BlobHash" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "UploadTime" : { - "type" : "date", - "format" : "dateOptionalTime" - }, - "CharmMeta" : { - "dynamic" : "false", - "properties" : { - "Name" : { - "type" : "multi_field", - "fields" : { - "Name" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "ngrams" : { - "type" : "string", - "analyzer" : "n3_20grams", - "include_in_all" : false - } - } - }, - "Summary" : { - "type" : "string" - }, - "Description" : { - "type" : "string" - }, - "Provides" : { - "dynamic" : "false", - "properties" : { - "Name" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "Role" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "Interface" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "Scope" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - } - } - }, - "Requires" : { - "dynamic" : "false", - "properties" : { - "Name" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "Role" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "Interface" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "Scope" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - } - } - }, - "Peers" : { - "dynamic" : "false", - "properties" : { - "Name" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "Role" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "Interface" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "Scope" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - } - } - }, - "Categories" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "Tags" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - } - } - }, - "charmactions" : { - "dynamic" : "false", - "properties" : { - "description" : { - "type" : "string" - }, - "action_name" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - } - } - }, - "CharmProvidedInterfaces" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "CharmRequiredInterfaces" : { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - - - "BundleData" : { - "type": "object", - "dynamic": "false", - "properties" : { - "Services" : { - "type": "object", - "dynamic": "false", - "properties": { - "Charm": { - "type" : "string", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "NumUnits": { - "type" : "integer", - "index": "not_analyzed" - } - } - }, - "Series" : { - "type" : "string" - }, - "Relations" : { - "type" : "string", - "index": "not_analyzed" - }, - "Tags" : { - "type" : "string", - "index": "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - } - } - }, - "BundleReadMe" : { - "type": "string", - "index": "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "BundleCharms": { - "type": "string", - "index": "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "BundleMachineCount": { - "type": "integer" - }, - "BundleUnitCount": { - "type": "integer" - }, - "TotalDownloads": { - "type": "long" - }, - "Public": { - "type": "boolean", - "index" : "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - }, - "ReadACLs" : { - "type" : "string", - "index": "not_analyzed", - "omit_norms" : true, - "index_options" : "docs" - } - } - } -} -` === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/export_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/export_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,6 +0,0 @@ -// Copyright 2013, 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore - -var TimeToStamp = timeToStamp === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/hash.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/hash.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/hash.go 1970-01-01 00:00:00 +0000 @@ -1,53 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// TODO frankban: remove this file after updating entities in the production db -// with their SHA256 hash value. Entities are updated by running the cshash256 -// command. - -package charmstore - -import ( - "crypto/sha256" - "fmt" - "io" - - "gopkg.in/errgo.v1" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/router" -) - -// UpdateEntitySHA256 calculates and return the SHA256 hash of the archive of -// the given entity id. The entity document is then asynchronously updated with -// the resulting hash. This method will be removed soon. -func (s *Store) UpdateEntitySHA256(id *router.ResolvedURL) (string, error) { - r, _, _, err := s.OpenBlob(id) - defer r.Close() - hash := sha256.New() - _, err = io.Copy(hash, r) - if err != nil { - return "", errgo.Notef(err, "cannot calculate sha256 of archive") - } - sum256 := fmt.Sprintf("%x", hash.Sum(nil)) - - // Update the entry asynchronously because it doesn't matter if it succeeds - // or fails, or if several instances of the charm store do it concurrently, - // and it doesn't need to be on the critical path for API endpoints. - s.Go(func(s *Store) { - UpdateEntitySHA256(s, id, sum256) - }) - - return sum256, nil -} - -// UpdateEntitySHA256 updates the BlobHash256 entry for the entity. -// It is defined as a variable so that it can be mocked in tests. -// This function will be removed soon. -var UpdateEntitySHA256 = func(store *Store, id *router.ResolvedURL, sum256 string) { - err := store.DB.Entities().UpdateId(&id.URL, bson.D{{"$set", bson.D{{"blobhash256", sum256}}}}) - if err != nil && err != mgo.ErrNotFound { - logger.Errorf("cannot update sha256 of archive: %v", err) - } -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/migrations.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/migrations.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/migrations.go 1970-01-01 00:00:00 +0000 @@ -1,224 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore - -import ( - "gopkg.in/errgo.v1" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/params" -) - -// migrations holds all the migration functions that are executed in the order -// they are defined when the charm store server is started. Each migration is -// associated with a name that is used to check whether the migration has been -// already run. To introduce a new database migration, add the corresponding -// migration name and function to this list, and update the -// TestMigrateMigrationList test in migration_test.go adding the new name(s). -// Note that migration names must be unique across the list. -var migrations = []migration{{ - name: "entity ids denormalization", - migrate: denormalizeEntityIds, -}, { - name: "base entities creation", - migrate: createBaseEntities, -}, { - name: "read acl creation", - migrate: populateReadACL, -}, { - name: "write acl creation", - migrate: populateWriteACL, -}} - -// migration holds a migration function with its corresponding name. -type migration struct { - name string - migrate func(StoreDatabase) error -} - -// Migrate starts the migration process using the given database. -func migrate(db StoreDatabase) error { - // Retrieve already executed migrations. - executed, err := getExecuted(db) - if err != nil { - return errgo.Mask(err) - } - - // Execute required migrations. - for _, m := range migrations { - if executed[m.name] { - logger.Debugf("skipping already executed migration: %s", m.name) - continue - } - logger.Infof("starting migration: %s", m.name) - if err := m.migrate(db); err != nil { - return errgo.Notef(err, "error executing migration: %s", m.name) - } - if err := setExecuted(db, m.name); err != nil { - return errgo.Mask(err) - } - logger.Infof("migration completed: %s", m.name) - } - return nil -} - -func getExecuted(db StoreDatabase) (map[string]bool, error) { - // Retrieve the already executed migration names. - executed := make(map[string]bool) - var doc mongodoc.Migration - if err := db.Migrations().Find(nil).Select(bson.D{{"executed", 1}}).One(&doc); err != nil { - if err == mgo.ErrNotFound { - return executed, nil - } - return nil, errgo.Notef(err, "cannot retrieve executed migrations") - } - - names := make(map[string]bool, len(migrations)) - for _, m := range migrations { - names[m.name] = true - } - for _, name := range doc.Executed { - // Check that the already executed migrations are known. - if !names[name] { - return nil, errgo.Newf("found unknown migration %q; running old charm store code on newer charm store database?", name) - } - // Collect the name of the executed migration. - executed[name] = true - } - return executed, nil -} - -func setExecuted(db StoreDatabase, name string) error { - if _, err := db.Migrations().Upsert(nil, bson.D{{ - "$addToSet", bson.D{{"executed", name}}, - }}); err != nil { - return errgo.Notef(err, "cannot add %s to executed migrations", name) - } - return nil -} - -// denormalizeEntityIds adds the user, name, revision and series fields to -// entities where those fields are missing. -// This function is not supposed to be called directly. -func denormalizeEntityIds(db StoreDatabase) error { - entities := db.Entities() - var entity mongodoc.Entity - iter := entities.Find(bson.D{{ - // Use the name field to collect not migrated entities. - "name", bson.D{{"$exists", false}}, - }}).Select(bson.D{{"_id", 1}}).Iter() - defer iter.Close() - - for iter.Next(&entity) { - logger.Infof("updating %s", entity.URL) - if err := entities.UpdateId(entity.URL, bson.D{{ - "$set", bson.D{ - {"user", entity.URL.User}, - {"name", entity.URL.Name}, - {"revision", entity.URL.Revision}, - {"series", entity.URL.Series}, - }, - }}); err != nil { - return errgo.Notef(err, "cannot denormalize entity id %s", entity.URL) - } - } - if err := iter.Close(); err != nil { - return errgo.Notef(err, "cannot iterate entities") - } - return nil -} - -// createBaseEntities creates base entities for each entity in the database. -func createBaseEntities(db StoreDatabase) error { - baseEntities := db.BaseEntities() - counter := 0 - - var entity mongodoc.Entity - iter := db.Entities().Find(nil).Select(bson.D{{"baseurl", 1}}).Iter() - defer iter.Close() - - for iter.Next(&entity) { - baseEntity := &mongodoc.BaseEntity{ - URL: entity.BaseURL, - Name: entity.BaseURL.Name, - User: entity.BaseURL.User, - Public: true, - } - err := baseEntities.Insert(baseEntity) - if err == nil { - counter++ - } else if !mgo.IsDup(err) { - return errgo.Notef(err, "cannot create base entity %s", entity.BaseURL) - } - - } - if err := iter.Close(); err != nil { - return errgo.Notef(err, "cannot iterate base entities") - } - logger.Infof("%d base entities created", counter) - return nil -} - -// populateReadACL adds the read ACL to base entities not having it. -func populateReadACL(db StoreDatabase) error { - baseEntities := db.BaseEntities() - var entity mongodoc.BaseEntity - iter := baseEntities.Find(bson.D{{ - "$or", []bson.D{ - {{"acls", bson.D{{"$exists", false}}}}, - {{"acls.read", bson.D{{"$size", 0}}}}, - }, - }}).Select(bson.D{{"_id", 1}}).Iter() - - defer iter.Close() - - counter := 0 - for iter.Next(&entity) { - readPerm := everyonePerm - if entity.URL.User != "" { - readPerm = []string{params.Everyone, entity.URL.User} - } - if err := baseEntities.UpdateId(entity.URL, bson.D{{ - "$set", bson.D{{"acls.read", readPerm}}, - }}); err != nil { - return errgo.Notef(err, "cannot populate read ACL for base entity %s", entity.URL) - } - counter++ - } - if err := iter.Close(); err != nil { - return errgo.Notef(err, "cannot iterate base entities") - } - logger.Infof("%d base entities updated", counter) - return nil -} - -// populateWriteACL adds the write ACL to base entities not having the field. -func populateWriteACL(db StoreDatabase) error { - baseEntities := db.BaseEntities() - var entity mongodoc.BaseEntity - iter := baseEntities.Find(bson.D{{ - "acls.write", bson.D{{"$exists", false}}, - }, { - "user", bson.D{{"$ne", ""}}, - }}).Select(bson.D{{"_id", 1}}).Iter() - - defer iter.Close() - - counter := 0 - for iter.Next(&entity) { - if err := baseEntities.UpdateId(entity.URL, bson.D{{ - "$set", bson.D{{"acls.write", []string{entity.URL.User}}}, - }}); err != nil { - return errgo.Notef(err, "cannot populate write ACL for base entity %s", entity.URL) - } - counter++ - } - if err := iter.Close(); err != nil { - return errgo.Notef(err, "cannot iterate base entities") - } - logger.Infof("%d base entities updated", counter) - return nil -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/migrations_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/migrations_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/migrations_test.go 1970-01-01 00:00:00 +0000 @@ -1,870 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore - -import ( - "net/http" - "sort" - "sync" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/internal/storetesting" - "gopkg.in/juju/charmstore.v4/params" -) - -type migrationsSuite struct { - storetesting.IsolatedMgoSuite - db StoreDatabase - executed []string -} - -var _ = gc.Suite(&migrationsSuite{}) - -func (s *migrationsSuite) SetUpTest(c *gc.C) { - s.IsolatedMgoSuite.SetUpTest(c) - s.db = StoreDatabase{s.Session.DB("migration-testing")} - s.executed = []string{} -} - -func (s *migrationsSuite) newServer(c *gc.C) error { - apiHandler := func(p *Pool, config ServerParams) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {}) - } - _, err := NewServer(s.db.Database, nil, serverParams, map[string]NewAPIHandlerFunc{ - "version1": apiHandler, - }) - return err -} - -// patchMigrations patches the charm store migration list with the given ms. -func (s *migrationsSuite) patchMigrations(c *gc.C, ms []migration) { - original := migrations - s.AddCleanup(func(*gc.C) { - migrations = original - }) - migrations = ms -} - -// makeMigrations generates default migrations using the given names, and then -// patches the charm store migration list with the generated ones. -func (s *migrationsSuite) makeMigrations(c *gc.C, names ...string) { - ms := make([]migration, len(names)) - for i, name := range names { - name := name - ms[i] = migration{ - name: name, - migrate: func(StoreDatabase) error { - s.executed = append(s.executed, name) - return nil - }, - } - } - s.patchMigrations(c, ms) -} - -func (s *migrationsSuite) TestMigrate(c *gc.C) { - // Create migrations. - names := []string{"migr-1", "migr-2"} - s.makeMigrations(c, names...) - - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // The two migrations have been correctly executed in order. - c.Assert(s.executed, jc.DeepEquals, names) - - // The migration document in the db reports that the execution is done. - s.checkExecuted(c, names...) - - // Restart the server again and check migrations this time are not run. - err = s.newServer(c) - c.Assert(err, gc.IsNil) - c.Assert(s.executed, jc.DeepEquals, names) - s.checkExecuted(c, names...) -} - -func (s *migrationsSuite) TestMigrateNoMigrations(c *gc.C) { - // Empty the list of migrations. - s.makeMigrations(c) - - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // No migrations were executed. - c.Assert(s.executed, gc.HasLen, 0) - s.checkExecuted(c) -} - -func (s *migrationsSuite) TestMigrateNewMigration(c *gc.C) { - // Simulate two migrations were already run. - err := setExecuted(s.db, "migr-1") - c.Assert(err, gc.IsNil) - err = setExecuted(s.db, "migr-2") - c.Assert(err, gc.IsNil) - - // Create migrations. - s.makeMigrations(c, "migr-1", "migr-2", "migr-3") - - // Start the server. - err = s.newServer(c) - c.Assert(err, gc.IsNil) - - // Only one migration has been executed. - c.Assert(s.executed, jc.DeepEquals, []string{"migr-3"}) - - // The migration document in the db reports that the execution is done. - s.checkExecuted(c, "migr-1", "migr-2", "migr-3") -} - -func (s *migrationsSuite) TestMigrateErrorUnknownMigration(c *gc.C) { - // Simulate that a migration was already run. - err := setExecuted(s.db, "migr-1") - c.Assert(err, gc.IsNil) - - // Create migrations, without including the already executed one. - s.makeMigrations(c, "migr-2", "migr-3") - - // Start the server. - err = s.newServer(c) - c.Assert(err, gc.ErrorMatches, `database migration failed: found unknown migration "migr-1"; running old charm store code on newer charm store database\?`) - - // No new migrations were executed. - c.Assert(s.executed, gc.HasLen, 0) - s.checkExecuted(c, "migr-1") -} - -func (s *migrationsSuite) TestMigrateErrorExecutingMigration(c *gc.C) { - ms := []migration{{ - name: "migr-1", - migrate: func(StoreDatabase) error { - return nil - }, - }, { - name: "migr-2", - migrate: func(StoreDatabase) error { - return errgo.New("bad wolf") - }, - }, { - name: "migr-3", - migrate: func(StoreDatabase) error { - return nil - }, - }} - s.patchMigrations(c, ms) - - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.ErrorMatches, "database migration failed: error executing migration: migr-2: bad wolf") - - // Only one migration has been executed. - s.checkExecuted(c, "migr-1") -} - -func (s *migrationsSuite) TestMigrateMigrationNames(c *gc.C) { - names := make(map[string]bool, len(migrations)) - for _, m := range migrations { - c.Assert(names[m.name], jc.IsFalse, gc.Commentf("multiple migrations named %q", m.name)) - names[m.name] = true - } -} - -func (s *migrationsSuite) TestMigrateMigrationList(c *gc.C) { - // When adding migration, update the list below, but never remove existing - // migrations. - existing := []string{ - "entity ids denormalization", - "base entities creation", - "read acl creation", - "write acl creation", - } - for i, name := range existing { - m := migrations[i] - c.Assert(m.name, gc.Equals, name) - } -} - -func (s *migrationsSuite) TestMigrateParallelMigration(c *gc.C) { - // This test uses real migrations to check they are idempotent and works - // well when done in parallel, for example when multiple charm store units - // are deployed together. - - // Prepare a database for the denormalizeEntityIds migration. - id1 := charm.MustParseReference("trusty/django-42") - id2 := charm.MustParseReference("~who/utopic/rails-47") - s.insertEntity(c, id1, "", 12) - s.insertEntity(c, id2, "", 13) - - // Run the migrations in parallel. - var wg sync.WaitGroup - wg.Add(5) - errors := make(chan error, 5) - for i := 0; i < 5; i++ { - go func() { - errors <- s.newServer(c) - wg.Done() - }() - } - wg.Wait() - close(errors) - - // Check the server is correctly started in all the units. - for err := range errors { - c.Assert(err, gc.IsNil) - } - - // Ensure entities have been updated correctly by denormalizeEntityIds. - s.checkCount(c, s.db.Entities(), 2) - s.checkEntity(c, &mongodoc.Entity{ - URL: id1, - BaseURL: baseURL(id1), - User: "", - Name: "django", - Revision: 42, - Series: "trusty", - Size: 12, - }) - s.checkEntity(c, &mongodoc.Entity{ - URL: id2, - BaseURL: baseURL(id2), - User: "who", - Name: "rails", - Revision: 47, - Series: "utopic", - Size: 13, - }) -} - -func (s *migrationsSuite) checkExecuted(c *gc.C, expected ...string) { - var obtained []string - var doc mongodoc.Migration - if err := s.db.Migrations().Find(nil).One(&doc); err != mgo.ErrNotFound { - c.Assert(err, gc.IsNil) - obtained = doc.Executed - sort.Strings(obtained) - } - sort.Strings(expected) - c.Assert(obtained, jc.DeepEquals, expected) -} - -func getMigrations(names ...string) (ms []migration) { - for _, name := range names { - for _, m := range migrations { - if m.name == name { - ms = append(ms, m) - } - } - } - return ms -} - -func (s *migrationsSuite) TestDenormalizeEntityIds(c *gc.C) { - s.patchMigrations(c, getMigrations("entity ids denormalization")) - // Store entities with missing name in the db. - id1 := charm.MustParseReference("trusty/django-42") - id2 := charm.MustParseReference("~who/utopic/rails-47") - s.insertEntity(c, id1, "", 12) - s.insertEntity(c, id2, "", 13) - - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure entities have been updated correctly. - s.checkCount(c, s.db.Entities(), 2) - s.checkEntity(c, &mongodoc.Entity{ - URL: id1, - BaseURL: baseURL(id1), - User: "", - Name: "django", - Revision: 42, - Series: "trusty", - Size: 12, - }) - s.checkEntity(c, &mongodoc.Entity{ - URL: id2, - BaseURL: baseURL(id2), - User: "who", - Name: "rails", - Revision: 47, - Series: "utopic", - Size: 13, - }) -} - -func (s *migrationsSuite) TestDenormalizeEntityIdsNoEntities(c *gc.C) { - s.patchMigrations(c, getMigrations("entity ids denormalization")) - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure no new entities are added in the process. - s.checkCount(c, s.db.Entities(), 0) -} - -func (s *migrationsSuite) TestDenormalizeEntityIdsNoUpdates(c *gc.C) { - s.patchMigrations(c, getMigrations("entity ids denormalization")) - // Store entities with a name in the db. - id1 := charm.MustParseReference("trusty/django-42") - id2 := charm.MustParseReference("~who/utopic/rails-47") - s.insertEntity(c, id1, "django", 21) - s.insertEntity(c, id2, "rails2", 22) - - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure entities have been updated correctly. - s.checkCount(c, s.db.Entities(), 2) - s.checkEntity(c, &mongodoc.Entity{ - URL: id1, - BaseURL: baseURL(id1), - User: "", - Name: "django", - // Since the name field already existed, the Revision and Series fields - // have not been populated. - Size: 21, - }) - s.checkEntity(c, &mongodoc.Entity{ - URL: id2, - BaseURL: baseURL(id2), - // The name is left untouched (even if it's obviously wrong). - Name: "rails2", - // Since the name field already existed, the User, Revision and Series - // fields have not been populated. - Size: 22, - }) -} - -func (s *migrationsSuite) TestDenormalizeEntityIdsSomeUpdates(c *gc.C) { - s.patchMigrations(c, getMigrations("entity ids denormalization")) - // Store entities with and without names in the db - id1 := charm.MustParseReference("~dalek/utopic/django-42") - id2 := charm.MustParseReference("~dalek/utopic/django-47") - id3 := charm.MustParseReference("precise/postgres-0") - s.insertEntity(c, id1, "", 1) - s.insertEntity(c, id2, "django", 2) - s.insertEntity(c, id3, "", 3) - - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure entities have been updated correctly. - s.checkCount(c, s.db.Entities(), 3) - s.checkEntity(c, &mongodoc.Entity{ - URL: id1, - BaseURL: baseURL(id1), - User: "dalek", - Name: "django", - Revision: 42, - Series: "utopic", - Size: 1, - }) - s.checkEntity(c, &mongodoc.Entity{ - URL: id2, - BaseURL: baseURL(id2), - Name: "django", - Size: 2, - }) - s.checkEntity(c, &mongodoc.Entity{ - URL: id3, - BaseURL: baseURL(id3), - User: "", - Name: "postgres", - Revision: 0, - Series: "precise", - Size: 3, - }) -} - -func (s *migrationsSuite) TestCreateBaseEntities(c *gc.C) { - s.patchMigrations(c, getMigrations("base entities creation")) - // Store entities with missing base in the db. - id1 := charm.MustParseReference("trusty/django-42") - id2 := charm.MustParseReference("trusty/django-47") - id3 := charm.MustParseReference("~who/utopic/rails-47") - s.insertEntity(c, id1, "django", 12) - s.insertEntity(c, id2, "django", 12) - s.insertEntity(c, id3, "rails", 13) - - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure base entities have been created correctly. - s.checkCount(c, s.db.BaseEntities(), 2) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseURL(id1), - Name: "django", - Public: true, - }) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseURL(id3), - User: "who", - Name: "rails", - Public: true, - }) -} - -func (s *migrationsSuite) TestCreateBaseEntitiesNoEntities(c *gc.C) { - s.patchMigrations(c, getMigrations("base entities creation")) - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure no new base entities are added in the process. - s.checkCount(c, s.db.BaseEntities(), 0) -} - -func (s *migrationsSuite) TestCreateBaseEntitiesNoUpdates(c *gc.C) { - s.patchMigrations(c, getMigrations("base entities creation")) - // Store entities with their corresponding base in the db. - id1 := charm.MustParseReference("trusty/django-42") - id2 := charm.MustParseReference("~who/utopic/rails-47") - s.insertEntity(c, id1, "django", 21) - s.insertEntity(c, id2, "rails2", 22) - s.insertBaseEntity(c, baseURL(id1), nil) - s.insertBaseEntity(c, baseURL(id2), nil) - - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure no new base entities are added in the process. - s.checkCount(c, s.db.BaseEntities(), 2) -} - -func (s *migrationsSuite) TestCreateBaseEntitiesSomeUpdates(c *gc.C) { - s.patchMigrations(c, getMigrations("base entities creation")) - // Store entities with and without bases in the db - id1 := charm.MustParseReference("~dalek/utopic/django-42") - id2 := charm.MustParseReference("~dalek/utopic/django-47") - id3 := charm.MustParseReference("precise/postgres-0") - s.insertEntity(c, id1, "django", 1) - s.insertEntity(c, id2, "django", 2) - s.insertEntity(c, id3, "postgres", 3) - s.insertBaseEntity(c, baseURL(id2), nil) - - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure missing base entities have been created correctly. - s.checkCount(c, s.db.BaseEntities(), 2) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseURL(id1), - User: "dalek", - Name: "django", - Public: true, - }) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseURL(id3), - Name: "postgres", - Public: true, - }) -} - -func (s *migrationsSuite) TestPopulateReadACL(c *gc.C) { - s.patchMigrations(c, getMigrations("read acl creation")) - // Store entities with their base in the db. - // The base entities will not include any read permission. - id1 := charm.MustParseReference("trusty/django-42") - id2 := charm.MustParseReference("trusty/django-47") - id3 := charm.MustParseReference("~who/utopic/rails-47") - s.insertEntity(c, id1, "django", 12) - s.insertEntity(c, id2, "django", 12) - s.insertEntity(c, id3, "rails", 13) - baseId1 := baseURL(id1) - baseId3 := baseURL(id3) - s.insertBaseEntity(c, baseId1, nil) - s.insertBaseEntity(c, baseId3, nil) - - // Ensure read permission is empty. - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseId1, - Name: "django", - Public: true, - }) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseId3, - User: "who", - Name: "rails", - Public: true, - }) - - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure read permission has been correctly set. - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseId1, - Name: "django", - Public: true, - ACLs: mongodoc.ACL{ - Read: []string{params.Everyone}, - }, - }) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseId3, - User: "who", - Name: "rails", - Public: true, - ACLs: mongodoc.ACL{ - Read: []string{params.Everyone, "who"}, - }, - }) -} - -func (s *migrationsSuite) TestCreateBaseEntitiesAndPopulateReadACL(c *gc.C) { - s.patchMigrations(c, getMigrations("base entities creation", "read acl creation")) - // Store entities with missing base in the db. - id1 := charm.MustParseReference("trusty/django-42") - id2 := charm.MustParseReference("trusty/django-47") - id3 := charm.MustParseReference("~who/utopic/rails-47") - s.insertEntity(c, id1, "django", 12) - s.insertEntity(c, id2, "django", 12) - s.insertEntity(c, id3, "rails", 13) - - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure base entities have been created correctly. - s.checkCount(c, s.db.BaseEntities(), 2) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseURL(id1), - Name: "django", - Public: true, - ACLs: mongodoc.ACL{ - Read: []string{params.Everyone}, - }, - }) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseURL(id3), - User: "who", - Name: "rails", - Public: true, - ACLs: mongodoc.ACL{ - Read: []string{params.Everyone, "who"}, - }, - }) -} - -func (s *migrationsSuite) TestPopulateReadACLNoEntities(c *gc.C) { - s.patchMigrations(c, getMigrations("read acl creation")) - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure no new base entities are added in the process. - s.checkCount(c, s.db.BaseEntities(), 0) -} - -func (s *migrationsSuite) TestPopulateReadACLNoUpdates(c *gc.C) { - s.patchMigrations(c, getMigrations("read acl creation")) - // Store entities with their corresponding base in the db. - id1 := charm.MustParseReference("trusty/django-42") - id2 := charm.MustParseReference("~who/utopic/rails-47") - s.insertEntity(c, id1, "django", 21) - s.insertEntity(c, id2, "rails2", 22) - s.insertBaseEntity(c, baseURL(id1), &mongodoc.ACL{ - Read: []string{"jean-luc"}, - }) - s.insertBaseEntity(c, baseURL(id2), &mongodoc.ACL{ - Read: []string{"who"}, - }) - - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure no new base entities are added in the process, and read - // permissions were not changed. - s.checkCount(c, s.db.BaseEntities(), 2) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseURL(id1), - Name: "django", - Public: true, - ACLs: mongodoc.ACL{ - Read: []string{"jean-luc"}, - }, - }) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseURL(id2), - User: "who", - Name: "rails", - Public: true, - ACLs: mongodoc.ACL{ - Read: []string{"who"}, - }, - }) -} - -func (s *migrationsSuite) TestPopulateReadACLSomeUpdates(c *gc.C) { - s.patchMigrations(c, getMigrations("read acl creation")) - // Store entities with and without bases in the db - id1 := charm.MustParseReference("~dalek/utopic/django-42") - id2 := charm.MustParseReference("~dalek/utopic/django-47") - id3 := charm.MustParseReference("precise/postgres-0") - s.insertEntity(c, id1, "django", 1) - s.insertEntity(c, id2, "django", 2) - s.insertEntity(c, id3, "postgres", 3) - baseId1 := baseURL(id1) - baseId3 := baseURL(id3) - s.insertBaseEntity(c, baseId1, nil) - s.insertBaseEntity(c, baseId3, &mongodoc.ACL{ - Read: []string{"benjamin"}, - }) - - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure missing read permissions have been populated correctly. - s.checkCount(c, s.db.BaseEntities(), 2) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseId1, - User: "dalek", - Name: "django", - Public: true, - ACLs: mongodoc.ACL{ - Read: []string{params.Everyone, "dalek"}, - }, - }) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseId3, - Name: "postgres", - Public: true, - ACLs: mongodoc.ACL{ - Read: []string{"benjamin"}, - }, - }) -} - -func (s *migrationsSuite) TestPopulateWriteACL(c *gc.C) { - s.patchMigrations(c, getMigrations("write acl creation")) - // Store entities with their base in the db. - // The base entities will not include any write permission. - id1 := charm.MustParseReference("~who/trusty/django-42") - id2 := charm.MustParseReference("~who/django-47") - id3 := charm.MustParseReference("~dalek/utopic/rails-47") - s.insertEntity(c, id1, "django", 12) - s.insertEntity(c, id2, "django", 12) - s.insertEntity(c, id3, "rails", 13) - baseId1 := baseURL(id1) - baseId3 := baseURL(id3) - s.insertBaseEntity(c, baseId1, nil) - s.insertBaseEntity(c, baseId3, nil) - - // Ensure write permission is empty. - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseId1, - User: "who", - Name: "django", - Public: true, - }) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseId3, - User: "dalek", - Name: "rails", - Public: true, - }) - - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure write permission has been correctly set. - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseId1, - User: "who", - Name: "django", - Public: true, - ACLs: mongodoc.ACL{ - Write: []string{"who"}, - }, - }) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseId3, - User: "dalek", - Name: "rails", - Public: true, - ACLs: mongodoc.ACL{ - Write: []string{"dalek"}, - }, - }) -} - -func (s *migrationsSuite) TestPopulateWriteACLNoEntities(c *gc.C) { - s.patchMigrations(c, getMigrations("write acl creation")) - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure no new base entities are added in the process. - s.checkCount(c, s.db.BaseEntities(), 0) -} - -func (s *migrationsSuite) TestPopulateWriteACLNoUpdates(c *gc.C) { - s.patchMigrations(c, getMigrations("write acl creation")) - // Store entities with their corresponding base in the db. - id1 := charm.MustParseReference("trusty/django-42") - id2 := charm.MustParseReference("~who/utopic/rails-47") - s.insertEntity(c, id1, "django", 21) - s.insertEntity(c, id2, "rails2", 22) - s.insertBaseEntity(c, baseURL(id1), nil) - s.insertBaseEntity(c, baseURL(id2), &mongodoc.ACL{ - Write: []string{"dalek"}, - }) - - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure no new base entities are added in the process, and write - // permissions were not changed. - s.checkCount(c, s.db.BaseEntities(), 2) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseURL(id1), - Name: "django", - Public: true, - }) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseURL(id2), - User: "who", - Name: "rails", - Public: true, - ACLs: mongodoc.ACL{ - Write: []string{"dalek"}, - }, - }) -} - -func (s *migrationsSuite) TestPopulateWriteACLSomeUpdates(c *gc.C) { - s.patchMigrations(c, getMigrations("write acl creation")) - // Store entities with and without bases in the db - id1 := charm.MustParseReference("~dalek/utopic/django-42") - id2 := charm.MustParseReference("~dalek/utopic/django-47") - id3 := charm.MustParseReference("~jean-luc/precise/postgres-0") - s.insertEntity(c, id1, "django", 1) - s.insertEntity(c, id2, "django", 2) - s.insertEntity(c, id3, "postgres", 3) - baseId1 := baseURL(id1) - baseId3 := baseURL(id3) - s.insertBaseEntity(c, baseId1, nil) - s.insertBaseEntity(c, baseId3, &mongodoc.ACL{ - Write: []string{"benjamin"}, - }) - - // Start the server. - err := s.newServer(c) - c.Assert(err, gc.IsNil) - - // Ensure missing write permissions have been populated correctly. - s.checkCount(c, s.db.BaseEntities(), 2) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseId1, - User: "dalek", - Name: "django", - Public: true, - ACLs: mongodoc.ACL{ - Write: []string{"dalek"}, - }, - }) - s.checkBaseEntity(c, &mongodoc.BaseEntity{ - URL: baseId3, - User: "jean-luc", - Name: "postgres", - Public: true, - ACLs: mongodoc.ACL{ - Write: []string{"benjamin"}, - }, - }) -} - -func (s *migrationsSuite) checkEntity(c *gc.C, expectEntity *mongodoc.Entity) { - var entity mongodoc.Entity - err := s.db.Entities().FindId(expectEntity.URL).One(&entity) - c.Assert(err, gc.IsNil) - - // Ensure that the denormalized fields are now present, and the previously - // existing fields are still there. - c.Assert(&entity, jc.DeepEquals, expectEntity) -} - -func (s *migrationsSuite) checkCount(c *gc.C, coll *mgo.Collection, expectCount int) { - count, err := coll.Count() - c.Assert(err, gc.IsNil) - c.Assert(count, gc.Equals, expectCount) -} - -func (s *migrationsSuite) checkBaseEntity(c *gc.C, expectEntity *mongodoc.BaseEntity) { - var entity mongodoc.BaseEntity - err := s.db.BaseEntities().FindId(expectEntity.URL).One(&entity) - c.Assert(err, gc.IsNil) - c.Assert(&entity, jc.DeepEquals, expectEntity) -} - -func (s *migrationsSuite) checkBaseEntitiesCount(c *gc.C, expectCount int) { - count, err := s.db.Entities().Count() - c.Assert(err, gc.IsNil) - c.Assert(count, gc.Equals, expectCount) -} - -func (s *migrationsSuite) insertEntity(c *gc.C, id *charm.Reference, name string, size int64) { - entity := &mongodoc.Entity{ - URL: id, - BaseURL: baseURL(id), - Name: name, - Size: size, - } - err := s.db.Entities().Insert(entity) - c.Assert(err, gc.IsNil) - - // Remove the denormalized fields if required. - if name != "" { - return - } - err = s.db.Entities().UpdateId(id, bson.D{{ - "$unset", bson.D{ - {"user", true}, - {"name", true}, - {"revision", true}, - {"series", true}, - }, - }}) - c.Assert(err, gc.IsNil) -} - -func (s *migrationsSuite) insertBaseEntity(c *gc.C, id *charm.Reference, acls *mongodoc.ACL) { - entity := &mongodoc.BaseEntity{ - URL: id, - Name: id.Name, - User: id.User, - Public: true, - } - if acls != nil { - entity.ACLs = *acls - } - err := s.db.BaseEntities().Insert(entity) - c.Assert(err, gc.IsNil) - - // Unset the ACL fields if required to simulate a migration. - if acls == nil { - err = s.db.BaseEntities().UpdateId(id, bson.D{{"$unset", - bson.D{{"acls", true}}, - }}) - c.Assert(err, gc.IsNil) - } -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/package_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/package_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore_test - -import ( - "testing" - - jujutesting "github.com/juju/testing" -) - -func TestPackage(t *testing.T) { - jujutesting.MgoTestPackage(t, nil) -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/search.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/search.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/search.go 1970-01-01 00:00:00 +0000 @@ -1,795 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore - -import ( - "crypto/sha1" - "encoding/base64" - "encoding/json" - "strings" - "time" - - "github.com/juju/utils" - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/elasticsearch" - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/params" -) - -type SearchIndex struct { - *elasticsearch.Database - Index string -} - -const typeName = "entity" - -// seriesBoost defines how much the results for each -// series will be boosted. Series are currently ranked in -// reverse order of LTS releases, followed by the latest -// non-LTS release, followed by everything else. -var seriesBoost = map[string]float64{ - "bundle": 1.1255, - "trusty": 1.125, - "precise": 1.1125, - "utopic": 1.1, - "win2012hvr2": 1.1, - "win2012hv": 1.1, - "win2012r2": 1.1, - "win2012": 1.1, - "win7": 1.1, - "win8": 1.1, - "win81": 1.1, - "centos7": 1.1, -} - -// deprecatedSeries are series that should not show up in search -// results. This list is used to filter out the charms before they are -// indexed. -var deprecatedSeries = map[string]bool{ - "oneiric": true, - "quantal": true, - "raring": true, - "saucy": true, -} - -// SearchDoc is a mongodoc.Entity with additional fields useful for searching. -// This is the document that is stored in the search index. -type SearchDoc struct { - *mongodoc.Entity - TotalDownloads int64 - ReadACLs []string -} - -// UpdateSearchAsync will update the search record for the entity -// reference r in the backgroud. -func (s *Store) UpdateSearchAsync(r *router.ResolvedURL) { - s.Go(func(s *Store) { - if err := s.UpdateSearch(r); err != nil { - logger.Errorf("cannot update search record for %v: %s", r, err) - } - }) -} - -// UpdateSearch updates the search record for the entity reference r. -// The search index only includes the latest revision of each entity so -// the latest revision of the charm specified by r will be indexed. -func (s *Store) UpdateSearch(r *router.ResolvedURL) error { - if s.ES == nil || s.ES.Database == nil { - return nil - } - if deprecatedSeries[r.URL.Series] { - return nil - } - - var query *mgo.Query - query = s.DB.Entities().Find(bson.D{ - {"user", r.URL.User}, - {"name", r.URL.Name}, - {"series", r.URL.Series}, - }).Sort("-revision") - var entity mongodoc.Entity - if err := query.One(&entity); err != nil { - if err == mgo.ErrNotFound { - return errgo.WithCausef(nil, params.ErrNotFound, "entity not found %s", r) - } - return errgo.Notef(err, "cannot get %s", r) - } - baseEntity, err := s.FindBaseEntity(entity.BaseURL) - if err != nil { - return errgo.Notef(err, "cannot get %s", entity.BaseURL) - } - if err := s.updateSearchEntity(&entity, baseEntity); err != nil { - return errgo.Notef(err, "cannot update search record for %q", entity.URL) - } - return nil -} - -// UpdateSearchBaseURL updates the search record for all entities with -// the specified base URL. It must be called whenever the entry for the -// given URL in the BaseEntitites collection has changed. -func (s *Store) UpdateSearchBaseURL(baseURL *charm.Reference) error { - if s.ES == nil || s.ES.Database == nil { - return nil - } - if baseURL.Series != "" { - return errgo.New("base url cannot contain series") - } - if baseURL.Revision != -1 { - return errgo.New("base url cannot contain revision") - } - // From the entities with the specified base URL find the latest revision in - // each of the available series. - // - // Note: It is possible to return the complete entity here and save some - // database round trips. Unfortunately the version of mongoDB we support - // (2.4) would require every field to be enumerated in this query, which - // would make it too fragile. - iter := s.DB.Entities().Pipe([]bson.D{ - {{"$match", bson.D{{"baseurl", baseURL}}}}, - {{"$sort", bson.D{{"revision", 1}}}}, - {{"$group", bson.D{ - {"_id", "$series"}, - {"url", bson.D{{"$last", "$_id"}}}, - }}}, - }).Iter() - defer iter.Close() - var result struct { - URL *charm.Reference - } - for iter.Next(&result) { - if deprecatedSeries[result.URL.Series] { - continue - } - if err := s.UpdateSearch(&router.ResolvedURL{URL: *result.URL, PromulgatedRevision: -1}); err != nil { - return errgo.Notef(err, "cannot update search record for %q", result.URL) - } - } - if err := iter.Close(); err != nil { - return errgo.Mask(err) - } - return nil -} - -func (s *Store) updateSearchEntity(entity *mongodoc.Entity, baseEntity *mongodoc.BaseEntity) error { - doc, err := s.searchDocFromEntity(entity, baseEntity) - if err != nil { - return errgo.Mask(err) - } - if err := s.ES.update(doc); err != nil { - return errgo.Notef(err, "cannot update search index") - } - return nil -} - -// UpdateSearchFields updates the search record for the entity reference r -// with the updated values in fields. -func (s *Store) UpdateSearchFields(r *router.ResolvedURL, fields map[string]interface{}) error { - if s.ES == nil || s.ES.Database == nil { - return nil - } - var needUpdate bool - for k := range fields { - // Add any additional fields here that should update the search index. - if k == "extrainfo.legacy-download-stats" { - needUpdate = true - } - } - if !needUpdate { - return nil - } - if err := s.UpdateSearch(r); err != nil { - return errgo.Mask(err) - } - return nil -} - -// searchDocFromEntity performs the processing required to convert a -// mongodoc.Entity and the corresponding mongodoc.BaseEntity to an esDoc -// for indexing. -func (s *Store) searchDocFromEntity(e *mongodoc.Entity, be *mongodoc.BaseEntity) (*SearchDoc, error) { - doc := SearchDoc{Entity: e} - doc.ReadACLs = be.ACLs.Read - // There should only be one record for the promulgated entity, which - // should be the latest promulgated revision. In the case that the base - // entity is not promulgated assume that there is a later promulgated - // entity. - if !be.Promulgated { - doc.Entity.PromulgatedURL = nil - doc.Entity.PromulgatedRevision = -1 - } - _, allRevisions, err := s.ArchiveDownloadCounts(EntityResolvedURL(e).PreferredURL()) - if err != nil { - return nil, errgo.Mask(err) - } - doc.TotalDownloads = allRevisions.Total - return &doc, nil -} - -// update inserts an entity into elasticsearch if elasticsearch -// is configured. The entity with id r is extracted from mongodb -// and written into elasticsearch. -func (si *SearchIndex) update(doc *SearchDoc) error { - if si == nil || si.Database == nil { - return nil - } - err := si.PutDocumentVersionWithType( - si.Index, - typeName, - si.getID(doc.URL), - int64(doc.URL.Revision), - elasticsearch.ExternalGTE, - doc) - if err != nil && err != elasticsearch.ErrConflict { - return errgo.Mask(err) - } - return nil -} - -// getID returns an ID for the elasticsearch document based on the contents of the -// mongoDB document. This is to allow elasticsearch documents to be replaced with -// updated versions when charm data is changed. -func (si *SearchIndex) getID(r *charm.Reference) string { - ref := *r - ref.Revision = -1 - b := sha1.Sum([]byte(ref.String())) - s := base64.URLEncoding.EncodeToString(b[:]) - // Cut off any trailing = as there is no need for them and they will get URL escaped. - return strings.TrimRight(s, "=") -} - -// Search searches for matching entities in the configured elasticsearch index. -// If there is no elasticsearch index configured then it will return an empty -// SearchResult, as if no results were found. -func (si *SearchIndex) search(sp SearchParams) (SearchResult, error) { - if si == nil || si.Database == nil { - return SearchResult{}, nil - } - q := createSearchDSL(sp) - q.Fields = append(q.Fields, "URL", "PromulgatedURL") - esr, err := si.Search(si.Index, typeName, q) - if err != nil { - return SearchResult{}, errgo.Mask(err) - } - r := SearchResult{ - SearchTime: time.Duration(esr.Took) * time.Millisecond, - Total: esr.Hits.Total, - Results: make([]*router.ResolvedURL, 0, len(esr.Hits.Hits)), - } - for _, h := range esr.Hits.Hits { - urlStr := h.Fields.GetString("URL") - url, err := charm.ParseReference(urlStr) - if err != nil { - return SearchResult{}, errgo.Notef(err, "invalid URL in result %q", urlStr) - } - id := &router.ResolvedURL{ - URL: *url, - } - - if purlStr := h.Fields.GetString("PromulgatedURL"); purlStr != "" { - purl, err := charm.ParseReference(purlStr) - if err != nil { - return SearchResult{}, errgo.Notef(err, "invalid promulgated URL in result %q", purlStr) - } - id.PromulgatedRevision = purl.Revision - } else { - id.PromulgatedRevision = -1 - } - r.Results = append(r.Results, id) - } - return r, nil -} - -// GetSearchDocument retrieves the current search record for the charm -// reference id. -func (si *SearchIndex) GetSearchDocument(id *charm.Reference) (*SearchDoc, error) { - if si == nil || si.Database == nil { - return &SearchDoc{}, nil - } - var s SearchDoc - err := si.GetDocument(si.Index, "entity", si.getID(id), &s) - if err != nil { - return nil, errgo.Notef(err, "cannot retrieve search document for %v", id) - } - return &s, nil -} - -// version is a document that stores the structure information -// in the elasticsearch database. -type version struct { - Version int64 - Index string -} - -const versionIndex = ".versions" -const versionType = "version" - -// ensureIndexes makes sure that the required indexes exist and have the right -// settings. If force is true then ensureIndexes will create new indexes irrespective -// of the status of the current index. -func (si *SearchIndex) ensureIndexes(force bool) error { - if si == nil || si.Database == nil { - return nil - } - old, dv, err := si.getCurrentVersion() - if err != nil { - return errgo.Notef(err, "cannot get current version") - } - if !force && old.Version >= esSettingsVersion { - return nil - } - index, err := si.newIndex() - if err != nil { - return errgo.Notef(err, "cannot create index") - } - new := version{ - Version: esSettingsVersion, - Index: index, - } - updated, err := si.updateVersion(new, dv) - if err != nil { - return errgo.Notef(err, "cannot update version") - } - if !updated { - // Update failed so delete the new index - if err := si.DeleteIndex(index); err != nil { - return errgo.Notef(err, "cannot delete index") - } - return nil - } - // Update succeeded - update the aliases - if err := si.Alias(index, si.Index); err != nil { - return errgo.Notef(err, "cannot create alias") - } - // Delete the old unused index - if old.Index != "" { - if err := si.DeleteIndex(old.Index); err != nil { - return errgo.Notef(err, "cannot delete index") - } - } - return nil -} - -// getCurrentVersion gets the version of elasticsearch settings, if any -// that are deployed to elasticsearch. -func (si *SearchIndex) getCurrentVersion() (version, int64, error) { - var v version - d, err := si.GetESDocument(versionIndex, versionType, si.Index) - if err != nil && err != elasticsearch.ErrNotFound { - return version{}, 0, errgo.Notef(err, "cannot get settings version") - } - if d.Found { - if err := json.Unmarshal(d.Source, &v); err != nil { - return version{}, 0, errgo.Notef(err, "invalid version") - } - } - return v, d.Version, nil -} - -// newIndex creates a new index with current elasticsearch settings. -// The new Index will have a randomized name based on si.Index. -func (si *SearchIndex) newIndex() (string, error) { - uuid, err := utils.NewUUID() - if err != nil { - return "", errgo.Notef(err, "cannot create index name") - } - index := si.Index + "-" + uuid.String() - if err := si.PutIndex(index, esIndex); err != nil { - return "", errgo.Notef(err, "cannot set index settings") - } - if err := si.PutMapping(index, "entity", esMapping); err != nil { - return "", errgo.Notef(err, "cannot set index mapping") - } - return index, nil -} - -// updateVersion attempts to atomically update the document specifying the version of -// the elasticsearch settings. If it succeeds then err will be nil, if the update could not be -// made atomically then err will be elasticsearch.ErrConflict, otherwise err is a non-nil -// error. -func (si *SearchIndex) updateVersion(v version, dv int64) (bool, error) { - var err error - if dv == 0 { - err = si.CreateDocument(versionIndex, versionType, si.Index, v) - } else { - err = si.PutDocumentVersion(versionIndex, versionType, si.Index, dv, v) - } - if err != nil { - if errgo.Cause(err) == elasticsearch.ErrConflict { - return false, nil - } - return false, err - } - return true, nil -} - -// syncSearch populates the SearchIndex with all the data currently stored in -// mongodb. If the SearchIndex is not configured then this method returns a nil error. -func (s *Store) syncSearch() error { - if s.ES == nil || s.ES.Database == nil { - return nil - } - var result mongodoc.Entity - // Only get the IDs here, UpdateSearch will get the full document - // if it is in a series that is indexed. - iter := s.DB.Entities().Find(nil).Select(bson.M{"_id": 1, "promulgated-url": 1}).Iter() - defer iter.Close() // Make sure we always close on error. - for iter.Next(&result) { - rurl := EntityResolvedURL(&result) - if err := s.UpdateSearch(rurl); err != nil { - return errgo.Notef(err, "cannot index %s", rurl) - } - } - if err := iter.Close(); err != nil { - return err - } - return nil -} - -// SearchParams represents the search parameters used to search the store. -type SearchParams struct { - // The text to use in the full text search query. - Text string - // If autocomplete is specified, the search will return only charms and - // bundles with a name that has text as a prefix. - AutoComplete bool - // Limit the search to items with attributes that match the specified filter value. - Filters map[string][]string - // Limit the number of returned items to the specified count. - Limit int - // Include the following metadata items in the search results. - Include []string - // Start the the returned items at a specific offset. - Skip int - // ACL values to search in addition to everyone. ACL values may represent user names - // or group names. - Groups []string - // Admin searches will not filter on the ACL and will show results for all matching - // charms. - Admin bool - // Sort the returned items. - sort []sortParam -} - -func (sp *SearchParams) ParseSortFields(f ...string) error { - for _, s := range f { - for _, s := range strings.Split(s, ",") { - var sort sortParam - if strings.HasPrefix(s, "-") { - sort.Order = sortDescending - s = s[1:] - } - sort.Field = sortFields[s] - if sort.Field == "" { - return errgo.Newf("%s", s) - } - sp.sort = append(sp.sort, sort) - } - } - - return nil -} - -// sortOrder defines the order in which a field should be sorted. -type sortOrder int - -const ( - sortAscending sortOrder = iota - sortDescending -) - -// sortParam represents a field and direction on which results should be sorted. -type sortParam struct { - Field string - Order sortOrder -} - -// sortFields contains a mapping from api fieldnames to the entity fields to search. -var sortFields = map[string]string{ - "name": "Name", - "owner": "User", - "series": "Series", - "downloads": "TotalDownloads", -} - -// SearchResult represents the result of performing a search. -type SearchResult struct { - SearchTime time.Duration - Total int - Results []*router.ResolvedURL -} - -// queryFields provides a map of fields to weighting to use with the -// elasticsearch query. -func queryFields(sp SearchParams) map[string]float64 { - fields := map[string]float64{ - "URL.ngrams": 8, - "CharmMeta.Categories": 5, - "CharmMeta.Tags": 5, - "BundleData.Tags": 5, - "Series.ngrams": 5, - "CharmProvidedInterfaces": 3, - "CharmRequiredInterfaces": 3, - "CharmMeta.Description": 1, - "BundleReadMe": 1, - } - if sp.AutoComplete { - fields["CharmMeta.Name.ngrams"] = 10 - } else { - fields["CharmMeta.Name"] = 10 - } - return fields -} - -// encodeFields takes a map of field name to weight and builds a slice of strings -// representing those weighted fields for a MultiMatchQuery. -func encodeFields(fields map[string]float64) []string { - fs := make([]string, 0, len(fields)) - for k, v := range fields { - fs = append(fs, elasticsearch.BoostField(k, v)) - } - return fs -} - -// createSearchDSL builds an elasticsearch query from the query parameters. -// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html -func createSearchDSL(sp SearchParams) elasticsearch.QueryDSL { - qdsl := elasticsearch.QueryDSL{ - From: sp.Skip, - Size: sp.Limit, - } - - // Full text search - var q elasticsearch.Query - if sp.Text == "" { - q = elasticsearch.MatchAllQuery{} - } else { - q = elasticsearch.MultiMatchQuery{ - Query: sp.Text, - Fields: encodeFields(queryFields(sp)), - } - } - - // Boosting - f := []elasticsearch.Function{ - // TODO(mhilton) review this function in future if downloads get sufficiently - // large that the order becomes undesirable. - elasticsearch.FieldValueFactorFunction{ - Field: "TotalDownloads", - Factor: 0.000001, - Modifier: "ln2p", - }, - elasticsearch.BoostFactorFunction{ - Filter: promulgatedFilter("1"), - BoostFactor: 1.25, - }, - } - for k, v := range seriesBoost { - f = append(f, elasticsearch.BoostFactorFunction{ - Filter: seriesFilter(k), - BoostFactor: v, - }) - } - q = elasticsearch.FunctionScoreQuery{ - Query: q, - Functions: f, - } - - // Filters - qdsl.Query = elasticsearch.FilteredQuery{ - Query: q, - Filter: createFilters(sp.Filters, sp.Admin, sp.Groups), - } - - // Sorting - for _, s := range sp.sort { - qdsl.Sort = append(qdsl.Sort, createSort(s)) - } - - return qdsl -} - -// createFilters converts the filters requested with the search API into -// filters in the elasticsearch query DSL. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-search -// for details of how filters are specified in the API. For each key in f a -// filter is created that matches any one of the set of values specified for -// that key. The created filter will only match when at least one of the -// requested values matches for all of the requested keys. Any filter names -// that are not defined in the filters map will be silently skipped. -func createFilters(f map[string][]string, admin bool, groups []string) elasticsearch.Filter { - af := make(elasticsearch.AndFilter, 0, len(f)+1) - for k, vals := range f { - filter, ok := filters[k] - if !ok { - continue - } - of := make(elasticsearch.OrFilter, 0, len(vals)) - for _, v := range vals { - of = append(of, filter(v)) - } - af = append(af, of) - } - if admin { - return af - } - gf := make(elasticsearch.OrFilter, 0, len(groups)+1) - gf = append(gf, elasticsearch.TermFilter{ - Field: "ReadACLs", - Value: params.Everyone, - }) - for _, g := range groups { - gf = append(gf, elasticsearch.TermFilter{ - Field: "ReadACLs", - Value: g, - }) - } - af = append(af, gf) - return af -} - -// filters contains a mapping from a filter parameter in the API to a -// function that will generate an elasticsearch query DSL filter for the -// given value. -var filters = map[string]func(string) elasticsearch.Filter{ - "description": descriptionFilter, - "name": nameFilter, - "owner": ownerFilter, - "promulgated": promulgatedFilter, - "provides": termFilter("CharmProvidedInterfaces"), - "requires": termFilter("CharmRequiredInterfaces"), - "series": seriesFilter, - "summary": summaryFilter, - "tags": tagsFilter, - "type": typeFilter, -} - -// descriptionFilter generates a filter that will match against the -// description field of the charm data. -func descriptionFilter(value string) elasticsearch.Filter { - return elasticsearch.QueryFilter{ - Query: elasticsearch.MatchQuery{ - Field: "CharmMeta.Description", - Query: value, - Type: "phrase", - }, - } -} - -// nameFilter generates a filter that will match against the -// name of the charm or bundle. -func nameFilter(value string) elasticsearch.Filter { - return elasticsearch.QueryFilter{ - Query: elasticsearch.MatchQuery{ - Field: "Name", - Query: value, - Type: "phrase", - }, - } -} - -// ownerFilter generates a filter that will match against the -// owner taken from the URL. -func ownerFilter(value string) elasticsearch.Filter { - if value == "" { - return promulgatedFilter("1") - } - return elasticsearch.QueryFilter{ - Query: elasticsearch.MatchQuery{ - Field: "User", - Query: value, - Type: "phrase", - }, - } -} - -// promulgatedFilter generates a filter that will match against the -// existence of a promulgated URL. -func promulgatedFilter(value string) elasticsearch.Filter { - f := elasticsearch.ExistsFilter("PromulgatedURL") - if value == "1" { - return f - } - return elasticsearch.NotFilter{f} -} - -// seriesFilter generates a filter that will match against the -// series taken from the URL. -func seriesFilter(value string) elasticsearch.Filter { - return elasticsearch.QueryFilter{ - Query: elasticsearch.MatchQuery{ - Field: "Series", - Query: value, - Type: "phrase", - }, - } -} - -// summaryFilter generates a filter that will match against the -// summary field from the charm data. -func summaryFilter(value string) elasticsearch.Filter { - return elasticsearch.QueryFilter{ - Query: elasticsearch.MatchQuery{ - Field: "CharmMeta.Summary", - Query: value, - Type: "phrase", - }, - } -} - -// tagsFilter generates a filter that will match against the "tags" field -// in the data. For charms this is the Categories field and for bundles this -// is the Tags field. -func tagsFilter(value string) elasticsearch.Filter { - tags := strings.Split(value, " ") - af := make(elasticsearch.AndFilter, 0, len(tags)) - for _, t := range tags { - if t == "" { - continue - } - af = append(af, elasticsearch.OrFilter{ - elasticsearch.TermFilter{ - Field: "CharmMeta.Categories", - Value: t, - }, - elasticsearch.TermFilter{ - Field: "CharmMeta.Tags", - Value: t, - }, - elasticsearch.TermFilter{ - Field: "BundleData.Tags", - Value: t, - }, - }) - } - return af -} - -// termFilter creates a function that generates a filter on the specified -// document field. -func termFilter(field string) func(string) elasticsearch.Filter { - return func(value string) elasticsearch.Filter { - terms := strings.Split(value, " ") - af := make(elasticsearch.AndFilter, 0, len(terms)) - for _, t := range terms { - if t == "" { - continue - } - af = append(af, elasticsearch.TermFilter{ - Field: field, - Value: t, - }) - } - return af - } -} - -// bundleFilter is a filter that matches against bundles, based on -// the URL. -var bundleFilter = seriesFilter("bundle") - -// typeFilter generates a filter that is used to match either only charms, -// or only bundles. -func typeFilter(value string) elasticsearch.Filter { - if value == "bundle" { - return bundleFilter - } - return elasticsearch.NotFilter{bundleFilter} -} - -// createSort creates an elasticsearch.Sort query parameter out of a Sort parameter. -func createSort(s sortParam) elasticsearch.Sort { - sort := elasticsearch.Sort{ - Field: s.Field, - Order: elasticsearch.Ascending, - } - if s.Order == sortDescending { - sort.Order = elasticsearch.Descending - } - return sort -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/search_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/search_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/search_test.go 1970-01-01 00:00:00 +0000 @@ -1,803 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore - -import ( - "encoding/json" - "sort" - "strings" - "sync" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/internal/storetesting" - "gopkg.in/juju/charmstore.v4/params" -) - -type StoreSearchSuite struct { - storetesting.IsolatedMgoESSuite - store *Store - index SearchIndex -} - -var _ = gc.Suite(&StoreSearchSuite{}) - -func (s *StoreSearchSuite) SetUpTest(c *gc.C) { - s.IsolatedMgoESSuite.SetUpTest(c) - - // Temporarily set LegacyDownloadCountsEnabled to false, so that the real - // code path can be reached by tests in this suite. - // TODO (frankban): remove this block when removing the legacy counts - // logic. - original := LegacyDownloadCountsEnabled - LegacyDownloadCountsEnabled = false - s.AddCleanup(func(*gc.C) { - LegacyDownloadCountsEnabled = original - }) - - s.index = SearchIndex{s.ES, s.TestIndex} - s.ES.RefreshIndex(".versions") - pool, err := NewPool(s.Session.DB("foo"), &s.index, nil) - c.Assert(err, gc.IsNil) - s.store = pool.Store() - s.addCharmsToStore(c) - c.Assert(err, gc.IsNil) -} - -func (s *StoreSearchSuite) TearDownTest(c *gc.C) { - s.store.Close() - s.IsolatedMgoESSuite.TearDownTest(c) -} - -var newResolvedURL = router.MustNewResolvedURL - -var exportTestCharms = map[string]*router.ResolvedURL{ - "wordpress": newResolvedURL("cs:~charmers/precise/wordpress-23", 23), - "mysql": newResolvedURL("cs:~openstack-charmers/trusty/mysql-7", 7), - "varnish": newResolvedURL("cs:~foo/trusty/varnish-1", -1), - "riak": newResolvedURL("cs:~charmers/trusty/riak-67", 67), -} - -var exportTestBundles = map[string]*router.ResolvedURL{ - "wordpress-simple": newResolvedURL("cs:~charmers/bundle/wordpress-simple-4", 4), -} - -var charmDownloadCounts = map[string]int{ - "wordpress": 0, - "wordpress-simple": 1, - "mysql": 3, - "varnish": 5, -} - -func (s *StoreSearchSuite) TestSuccessfulExport(c *gc.C) { - for name, ref := range exportTestCharms { - entity, err := s.store.FindEntity(ref) - c.Assert(err, gc.IsNil) - var actual json.RawMessage - err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(entity.URL), &actual) - c.Assert(err, gc.IsNil) - readACLs := []string{ref.URL.User, params.Everyone} - if ref.URL.Name == "riak" { - readACLs = []string{ref.URL.User} - } - doc := SearchDoc{ - Entity: entity, - TotalDownloads: int64(charmDownloadCounts[name]), - ReadACLs: readACLs, - } - c.Assert(string(actual), jc.JSONEquals, doc) - } -} - -func (s *StoreSearchSuite) TestNoExportDeprecated(c *gc.C) { - charmArchive := storetesting.Charms.CharmDir("mysql") - url := newResolvedURL("cs:~charmers/saucy/mysql-4", -1) - err := s.store.AddCharmWithArchive(url, charmArchive) - c.Assert(err, gc.IsNil) - - var entity *mongodoc.Entity - err = s.store.DB.Entities().FindId("cs:~openstack-charmers/trusty/mysql-7").One(&entity) - c.Assert(err, gc.IsNil) - present, err := s.store.ES.HasDocument(s.TestIndex, typeName, s.store.ES.getID(entity.URL)) - c.Assert(err, gc.IsNil) - c.Assert(present, gc.Equals, true) - - err = s.store.DB.Entities().FindId("cs:~charmers/saucy/mysql-4").One(&entity) - c.Assert(err, gc.IsNil) - present, err = s.store.ES.HasDocument(s.TestIndex, typeName, s.store.ES.getID(entity.URL)) - c.Assert(err, gc.IsNil) - c.Assert(present, gc.Equals, false) -} - -func (s *StoreSearchSuite) TestExportOnlyLatest(c *gc.C) { - charmArchive := storetesting.Charms.CharmDir("wordpress") - url := newResolvedURL("cs:~charmers/precise/wordpress-24", -1) - err := s.store.AddCharmWithArchive(url, charmArchive) - c.Assert(err, gc.IsNil) - var expected, old *mongodoc.Entity - var actual json.RawMessage - err = s.store.DB.Entities().FindId("cs:~charmers/precise/wordpress-23").One(&old) - c.Assert(err, gc.IsNil) - err = s.store.DB.Entities().FindId("cs:~charmers/precise/wordpress-24").One(&expected) - c.Assert(err, gc.IsNil) - err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(old.URL), &actual) - c.Assert(err, gc.IsNil) - doc := SearchDoc{Entity: expected, ReadACLs: []string{"charmers", params.Everyone}} - c.Assert(string(actual), jc.JSONEquals, doc) -} - -func (s *StoreSearchSuite) TestExportSearchDocument(c *gc.C) { - var entity *mongodoc.Entity - var actual json.RawMessage - err := s.store.DB.Entities().FindId("cs:~charmers/precise/wordpress-23").One(&entity) - c.Assert(err, gc.IsNil) - doc := SearchDoc{Entity: entity, TotalDownloads: 4000} - err = s.store.ES.update(&doc) - c.Assert(err, gc.IsNil) - err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(entity.URL), &actual) - c.Assert(err, gc.IsNil) - c.Assert(string(actual), jc.JSONEquals, doc) -} - -func (s *StoreSearchSuite) addCharmsToStore(c *gc.C) { - for name, url := range exportTestCharms { - charmArchive := storetesting.Charms.CharmDir(name) - cats := strings.Split(name, "-") - charmArchive.Meta().Categories = cats - tags := make([]string, len(cats)) - for i, s := range cats { - tags[i] = s + "TAG" - } - charmArchive.Meta().Tags = tags - err := s.store.AddCharmWithArchive(url, charmArchive) - c.Assert(err, gc.IsNil) - for i := 0; i < charmDownloadCounts[name]; i++ { - err := s.store.IncrementDownloadCounts(url) - c.Assert(err, gc.IsNil) - } - if url.URL.Name == "riak" { - continue - } - bURL := baseURL(&url.URL) - baseEntity, err := s.store.FindBaseEntity(bURL) - baseEntity.ACLs.Read = append(baseEntity.ACLs.Read, params.Everyone) - err = s.store.DB.BaseEntities().UpdateId(baseEntity.URL, baseEntity) - c.Assert(err, gc.IsNil) - err = s.store.UpdateSearchBaseURL(baseEntity.URL) - c.Assert(err, gc.IsNil) - } - for name, url := range exportTestBundles { - bundleArchive := storetesting.Charms.BundleDir(name) - bundleArchive.Data().Tags = strings.Split(name, "-") - err := s.store.AddBundleWithArchive(url, bundleArchive) - c.Assert(err, gc.IsNil) - for i := 0; i < charmDownloadCounts[name]; i++ { - err := s.store.IncrementDownloadCounts(url) - c.Assert(err, gc.IsNil) - } - bURL := baseURL(&url.URL) - baseEntity, err := s.store.FindBaseEntity(bURL) - baseEntity.ACLs.Read = append(baseEntity.ACLs.Read, params.Everyone) - err = s.store.DB.BaseEntities().UpdateId(baseEntity.URL, baseEntity) - c.Assert(err, gc.IsNil) - err = s.store.UpdateSearchBaseURL(baseEntity.URL) - c.Assert(err, gc.IsNil) - } -} - -var searchTests = []struct { - about string - sp SearchParams - results []*router.ResolvedURL - totalDiff int // len(results) + totalDiff = expected total -}{ - { - about: "basic text search", - sp: SearchParams{ - Text: "wordpress", - }, - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "blank text search", - sp: SearchParams{ - Text: "", - }, - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestCharms["mysql"], - exportTestCharms["varnish"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "autocomplete search", - sp: SearchParams{ - Text: "word", - AutoComplete: true, - }, - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "description filter search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "description": {"blog"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - }, - }, { - about: "name filter search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "name": {"wordpress"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - }, - }, { - about: "owner filter search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "owner": {"foo"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["varnish"], - }, - }, { - about: "provides filter search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "provides": {"mysql"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - }, - }, { - about: "requires filter search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "requires": {"mysql"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - }, - }, { - about: "series filter search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "series": {"trusty"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - exportTestCharms["varnish"], - }, - }, { - about: "summary filter search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "summary": {"Database engine"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - exportTestCharms["varnish"], - }, - }, { - about: "tags filter search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "tags": {"wordpress"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "bundle type filter search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "type": {"bundle"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestBundles["wordpress-simple"], - }, - }, { - about: "charm type filter search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "type": {"charm"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestCharms["mysql"], - exportTestCharms["varnish"], - }, - }, { - about: "charm & bundle type filter search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "type": {"charm", "bundle"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestCharms["mysql"], - exportTestCharms["varnish"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "invalid filter search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "no such filter": {"foo"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestCharms["mysql"], - exportTestCharms["varnish"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "valid & invalid filter search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "no such filter": {"foo"}, - "type": {"charm"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestCharms["mysql"], - exportTestCharms["varnish"], - }, - }, { - about: "paginated search", - sp: SearchParams{ - Filters: map[string][]string{ - "name": {"mysql"}, - }, - Skip: 1, - }, - totalDiff: +1, - }, { - about: "additional groups", - sp: SearchParams{ - Groups: []string{"charmers"}, - }, - results: []*router.ResolvedURL{ - exportTestCharms["riak"], - exportTestCharms["wordpress"], - exportTestCharms["mysql"], - exportTestCharms["varnish"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "admin search", - sp: SearchParams{ - Admin: true, - }, - results: []*router.ResolvedURL{ - exportTestCharms["riak"], - exportTestCharms["wordpress"], - exportTestCharms["mysql"], - exportTestCharms["varnish"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "charm tags filter search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "tags": {"wordpressTAG"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - }, - }, { - about: "blank owner filter search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "owner": {""}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestCharms["mysql"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "promulgated search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "promulgated": {"1"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestCharms["mysql"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "not promulgated search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "promulgated": {"0"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["varnish"], - }, - }, { - about: "owner and promulgated filter search", - sp: SearchParams{ - Text: "", - Filters: map[string][]string{ - "promulgated": {"1"}, - "owner": {"openstack-charmers"}, - }, - }, - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - }, - }, -} - -func (s *StoreSearchSuite) TestSearches(c *gc.C) { - s.store.ES.Database.RefreshIndex(s.TestIndex) - for i, test := range searchTests { - c.Logf("test %d: %s", i, test.about) - res, err := s.store.Search(test.sp) - c.Assert(err, gc.IsNil) - c.Logf("results: %v", res.Results) - sort.Sort(resolvedURLsByString(res.Results)) - sort.Sort(resolvedURLsByString(test.results)) - c.Assert(res.Results, jc.DeepEquals, test.results) - c.Assert(res.Total, gc.Equals, len(test.results)+test.totalDiff) - } -} - -type resolvedURLsByString []*router.ResolvedURL - -func (r resolvedURLsByString) Less(i, j int) bool { - return r[i].URL.String() < r[j].URL.String() -} - -func (r resolvedURLsByString) Swap(i, j int) { - r[i], r[j] = r[j], r[i] -} - -func (r resolvedURLsByString) Len() int { - return len(r) -} - -func (s *StoreSearchSuite) TestPaginatedSearch(c *gc.C) { - err := s.store.ES.Database.RefreshIndex(s.TestIndex) - c.Assert(err, gc.IsNil) - sp := SearchParams{ - Text: "wordpress", - Skip: 1, - } - res, err := s.store.Search(sp) - c.Assert(err, gc.IsNil) - c.Assert(res.Results, gc.HasLen, 1) - c.Assert(res.Total, gc.Equals, 2) -} - -func (s *StoreSearchSuite) TestLimitTestSearch(c *gc.C) { - err := s.store.ES.Database.RefreshIndex(s.TestIndex) - c.Assert(err, gc.IsNil) - sp := SearchParams{ - Text: "wordpress", - Limit: 1, - } - res, err := s.store.Search(sp) - c.Assert(err, gc.IsNil) - c.Assert(res.Results, gc.HasLen, 1) -} - -func (s *StoreSearchSuite) TestPromulgatedRank(c *gc.C) { - charmArchive := storetesting.Charms.CharmDir("varnish") - url := newResolvedURL("cs:~charmers/trusty/varnish-1", 1) - s.store.AddCharmWithArchive(url, charmArchive) - bURL := baseURL(&url.URL) - baseEntity, err := s.store.FindBaseEntity(bURL) - baseEntity.ACLs.Read = append(baseEntity.ACLs.Read, params.Everyone) - err = s.store.DB.BaseEntities().UpdateId(baseEntity.URL, baseEntity) - c.Assert(err, gc.IsNil) - err = s.store.UpdateSearchBaseURL(baseEntity.URL) - c.Assert(err, gc.IsNil) - s.store.ES.Database.RefreshIndex(s.TestIndex) - sp := SearchParams{ - Filters: map[string][]string{ - "name": {"varnish"}, - }, - } - res, err := s.store.Search(sp) - c.Assert(err, gc.IsNil) - c.Logf("results: %s", res.Results) - c.Assert(res.Results, jc.DeepEquals, []*router.ResolvedURL{ - url, - exportTestCharms["varnish"], - }) -} - -func (s *StoreSearchSuite) TestSorting(c *gc.C) { - s.store.ES.Database.RefreshIndex(s.TestIndex) - tests := []struct { - about string - sortQuery string - results []*router.ResolvedURL - }{{ - about: "name ascending", - sortQuery: "name", - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - exportTestCharms["varnish"], - exportTestCharms["wordpress"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "name descending", - sortQuery: "-name", - results: []*router.ResolvedURL{ - exportTestBundles["wordpress-simple"], - exportTestCharms["wordpress"], - exportTestCharms["varnish"], - exportTestCharms["mysql"], - }, - }, { - about: "series ascending", - sortQuery: "series,name", - results: []*router.ResolvedURL{ - exportTestBundles["wordpress-simple"], - exportTestCharms["wordpress"], - exportTestCharms["mysql"], - exportTestCharms["varnish"], - }, - }, { - about: "series descending", - sortQuery: "-series,name", - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - exportTestCharms["varnish"], - exportTestCharms["wordpress"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "owner ascending", - sortQuery: "owner,name", - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestBundles["wordpress-simple"], - exportTestCharms["varnish"], - exportTestCharms["mysql"], - }, - }, { - about: "owner descending", - sortQuery: "-owner,name", - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - exportTestCharms["varnish"], - exportTestCharms["wordpress"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "downloads ascending", - sortQuery: "downloads", - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestBundles["wordpress-simple"], - exportTestCharms["mysql"], - exportTestCharms["varnish"], - }, - }, { - about: "downloads descending", - sortQuery: "-downloads", - results: []*router.ResolvedURL{ - exportTestCharms["varnish"], - exportTestCharms["mysql"], - exportTestBundles["wordpress-simple"], - exportTestCharms["wordpress"], - }, - }} - for i, test := range tests { - c.Logf("test %d. %s", i, test.about) - var sp SearchParams - err := sp.ParseSortFields(test.sortQuery) - c.Assert(err, gc.IsNil) - res, err := s.store.Search(sp) - c.Assert(err, gc.IsNil) - c.Assert(res.Results, jc.DeepEquals, test.results) - c.Assert(res.Total, gc.Equals, len(test.results)) - } -} - -func (s *StoreSearchSuite) TestBoosting(c *gc.C) { - s.store.ES.Database.RefreshIndex(s.TestIndex) - var sp SearchParams - res, err := s.store.Search(sp) - c.Assert(err, gc.IsNil) - c.Assert(res.Results, gc.HasLen, 4) - c.Logf("results: %s", res.Results) - c.Assert(res.Results, jc.DeepEquals, []*router.ResolvedURL{ - exportTestBundles["wordpress-simple"], - exportTestCharms["mysql"], - exportTestCharms["wordpress"], - exportTestCharms["varnish"], - }) -} - -func (s *StoreSearchSuite) TestEnsureIndex(c *gc.C) { - s.store.ES.Index = s.TestIndex + "-ensure-index" - defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) - indexes, err := s.ES.ListIndexesForAlias(s.store.ES.Index) - c.Assert(err, gc.Equals, nil) - c.Assert(indexes, gc.HasLen, 0) - err = s.store.ES.ensureIndexes(false) - c.Assert(err, gc.Equals, nil) - indexes, err = s.ES.ListIndexesForAlias(s.store.ES.Index) - c.Assert(err, gc.Equals, nil) - c.Assert(indexes, gc.HasLen, 1) - index := indexes[0] - err = s.store.ES.ensureIndexes(false) - c.Assert(err, gc.Equals, nil) - indexes, err = s.ES.ListIndexesForAlias(s.store.ES.Index) - c.Assert(err, gc.Equals, nil) - c.Assert(indexes, gc.HasLen, 1) - c.Assert(indexes[0], gc.Equals, index) -} - -func (s *StoreSearchSuite) TestEnsureConcurrent(c *gc.C) { - s.store.ES.Index = s.TestIndex + "-ensure-index-conc" - defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) - indexes, err := s.ES.ListIndexesForAlias(s.store.ES.Index) - c.Assert(err, gc.Equals, nil) - c.Assert(indexes, gc.HasLen, 0) - var wg sync.WaitGroup - wg.Add(1) - go func() { - err = s.store.ES.ensureIndexes(false) - c.Check(err, gc.Equals, nil) - wg.Done() - }() - err = s.store.ES.ensureIndexes(false) - c.Assert(err, gc.Equals, nil) - indexes, err = s.ES.ListIndexesForAlias(s.store.ES.Index) - c.Assert(err, gc.Equals, nil) - c.Assert(indexes, gc.HasLen, 1) - wg.Wait() -} - -func (s *StoreSearchSuite) TestEnsureIndexForce(c *gc.C) { - s.store.ES.Index = s.TestIndex + "-ensure-index-force" - defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) - indexes, err := s.ES.ListIndexesForAlias(s.store.ES.Index) - c.Assert(err, gc.Equals, nil) - c.Assert(indexes, gc.HasLen, 0) - err = s.store.ES.ensureIndexes(false) - c.Assert(err, gc.Equals, nil) - indexes, err = s.ES.ListIndexesForAlias(s.store.ES.Index) - c.Assert(err, gc.Equals, nil) - c.Assert(indexes, gc.HasLen, 1) - index := indexes[0] - err = s.store.ES.ensureIndexes(true) - c.Assert(err, gc.Equals, nil) - indexes, err = s.ES.ListIndexesForAlias(s.store.ES.Index) - c.Assert(err, gc.Equals, nil) - c.Assert(indexes, gc.HasLen, 1) - c.Assert(indexes[0], gc.Not(gc.Equals), index) -} - -func (s *StoreSearchSuite) TestGetCurrentVersionNoVersion(c *gc.C) { - s.store.ES.Index = s.TestIndex + "-current-version" - defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) - v, dv, err := s.store.ES.getCurrentVersion() - c.Assert(err, gc.Equals, nil) - c.Assert(v, gc.Equals, version{}) - c.Assert(dv, gc.Equals, int64(0)) -} - -func (s *StoreSearchSuite) TestGetCurrentVersionWithVersion(c *gc.C) { - s.store.ES.Index = s.TestIndex + "-current-version" - defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) - index, err := s.store.ES.newIndex() - c.Assert(err, gc.Equals, nil) - updated, err := s.store.ES.updateVersion(version{1, index}, 0) - c.Assert(err, gc.Equals, nil) - c.Assert(updated, gc.Equals, true) - v, dv, err := s.store.ES.getCurrentVersion() - c.Assert(err, gc.Equals, nil) - c.Assert(v, gc.Equals, version{1, index}) - c.Assert(dv, gc.Equals, int64(1)) -} - -func (s *StoreSearchSuite) TestUpdateVersionNew(c *gc.C) { - s.store.ES.Index = s.TestIndex + "-update-version" - defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) - index, err := s.store.ES.newIndex() - c.Assert(err, gc.Equals, nil) - updated, err := s.store.ES.updateVersion(version{1, index}, 0) - c.Assert(err, gc.Equals, nil) - c.Assert(updated, gc.Equals, true) -} - -func (s *StoreSearchSuite) TestUpdateVersionUpdate(c *gc.C) { - s.store.ES.Index = s.TestIndex + "-update-version" - defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) - index, err := s.store.ES.newIndex() - c.Assert(err, gc.Equals, nil) - updated, err := s.store.ES.updateVersion(version{1, index}, 0) - c.Assert(err, gc.Equals, nil) - c.Assert(updated, gc.Equals, true) - index, err = s.store.ES.newIndex() - c.Assert(err, gc.Equals, nil) - updated, err = s.store.ES.updateVersion(version{2, index}, 1) - c.Assert(err, gc.Equals, nil) - c.Assert(updated, gc.Equals, true) -} - -func (s *StoreSearchSuite) TestUpdateCreateConflict(c *gc.C) { - s.store.ES.Index = s.TestIndex + "-update-version" - defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) - index, err := s.store.ES.newIndex() - c.Assert(err, gc.Equals, nil) - updated, err := s.store.ES.updateVersion(version{1, index}, 0) - c.Assert(err, gc.Equals, nil) - c.Assert(updated, gc.Equals, true) - index, err = s.store.ES.newIndex() - c.Assert(err, gc.Equals, nil) - updated, err = s.store.ES.updateVersion(version{1, index}, 0) - c.Assert(err, gc.Equals, nil) - c.Assert(updated, gc.Equals, false) -} - -func (s *StoreSearchSuite) TestUpdateConflict(c *gc.C) { - s.store.ES.Index = s.TestIndex + "-update-version" - defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) - index, err := s.store.ES.newIndex() - c.Assert(err, gc.Equals, nil) - updated, err := s.store.ES.updateVersion(version{1, index}, 0) - c.Assert(err, gc.Equals, nil) - c.Assert(updated, gc.Equals, true) - index, err = s.store.ES.newIndex() - c.Assert(err, gc.Equals, nil) - updated, err = s.store.ES.updateVersion(version{1, index}, 3) - c.Assert(err, gc.Equals, nil) - c.Assert(updated, gc.Equals, false) -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/server.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/server.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/server.go 1970-01-01 00:00:00 +0000 @@ -1,106 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// This is the internal version of the charmstore package. -// It exposes details to the various API packages -// that we do not wish to expose to the world at large. -package charmstore - -import ( - "net/http" - "strings" - - "gopkg.in/errgo.v1" - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/mgo.v2" - - "gopkg.in/juju/charmstore.v4/internal/router" -) - -// NewAPIHandlerFunc is a function that returns a new API handler that uses -// the given Store. -type NewAPIHandlerFunc func(*Pool, ServerParams) http.Handler - -// ServerParams holds configuration for a new internal API server. -type ServerParams struct { - // AuthUsername and AuthPassword hold the credentials - // used for HTTP basic authentication. - AuthUsername string - AuthPassword string - - // IdentityLocation holds the location of the third party authorization - // service to use when creating third party caveats, - // for example: http://api.jujucharms.com/identity/v1/discharger - // If it is empty, IdentityURL+"/v1/discharger" will be used. - IdentityLocation string - - // PublicKeyLocator holds a public key store. - // It may be nil. - PublicKeyLocator bakery.PublicKeyLocator - - // IdentityAPIURL holds the URL of the identity manager, - // for example http://api.jujucharms.com/identity - IdentityAPIURL string - - // IdentityAPIUsername and IdentityAPIPassword hold the credentials - // to be used when querying the identity manager API. - IdentityAPIUsername string - IdentityAPIPassword string -} - -// NewServer returns a handler that serves the given charm store API -// versions using db to store that charm store data. -// An optional elasticsearch configuration can be specified in si. If -// elasticsearch is not being used then si can be set to nil. -// The key of the versions map is the version name. -// The handler configuration is provided to all version handlers. -func NewServer(db *mgo.Database, si *SearchIndex, config ServerParams, versions map[string]NewAPIHandlerFunc) (http.Handler, error) { - if len(versions) == 0 { - return nil, errgo.Newf("charm store server must serve at least one version of the API") - } - config.IdentityLocation = strings.Trim(config.IdentityLocation, "/") - config.IdentityAPIURL = strings.Trim(config.IdentityAPIURL, "/") - if config.IdentityLocation == "" && config.IdentityAPIURL != "" { - config.IdentityLocation = config.IdentityAPIURL + "/v1/discharger" - } - logger.Infof("identity discharge location: %s", config.IdentityLocation) - logger.Infof("identity API location: %s", config.IdentityAPIURL) - bparams := bakery.NewServiceParams{ - // TODO The location is attached to any macaroons that we - // mint. Currently we don't know the location of the current - // service. We potentially provide a way to configure this, - // but it probably doesn't matter, as nothing currently uses - // the macaroon location for anything. - Location: "charmstore", - Locator: config.PublicKeyLocator, - } - pool, err := NewPool(db, si, &bparams) - if err != nil { - return nil, errgo.Notef(err, "cannot make store") - } - store := pool.Store() - defer store.Close() - if err := migrate(store.DB); err != nil { - return nil, errgo.Notef(err, "database migration failed") - } - store.Go(func(store *Store) { - if err := store.syncSearch(); err != nil { - logger.Errorf("Cannot populate elasticsearch: %v", err) - } - }) - mux := router.NewServeMux() - // Version independent API. - handle(mux, "/debug", newServiceDebugHandler(pool, config, mux)) - for vers, newAPI := range versions { - handle(mux, "/"+vers, newAPI(pool, config)) - } - return mux, nil -} - -func handle(mux *router.ServeMux, path string, handler http.Handler) { - if path != "/" { - handler = http.StripPrefix(path, handler) - path += "/" - } - mux.Handle(path, handler) -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/server_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/server_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/server_test.go 1970-01-01 00:00:00 +0000 @@ -1,143 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore - -import ( - "net/http" - - "github.com/juju/testing/httptesting" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/internal/storetesting" -) - -var serverParams = ServerParams{ - AuthUsername: "test-user", - AuthPassword: "test-password", -} - -type ServerSuite struct { - storetesting.IsolatedMgoESSuite -} - -var _ = gc.Suite(&ServerSuite{}) - -func (s *ServerSuite) TestNewServerWithNoVersions(c *gc.C) { - h, err := NewServer(s.Session.DB("foo"), nil, serverParams, nil) - c.Assert(err, gc.ErrorMatches, `charm store server must serve at least one version of the API`) - c.Assert(h, gc.IsNil) -} - -type versionResponse struct { - Version string - Path string -} - -func (s *ServerSuite) TestNewServerWithVersions(c *gc.C) { - db := s.Session.DB("foo") - serveVersion := func(vers string) NewAPIHandlerFunc { - return func(p *Pool, config ServerParams) http.Handler { - return router.HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { - return versionResponse{ - Version: vers, - Path: req.URL.Path, - }, nil - }) - } - } - - h, err := NewServer(db, nil, serverParams, map[string]NewAPIHandlerFunc{ - "version1": serveVersion("version1"), - }) - c.Assert(err, gc.IsNil) - assertServesVersion(c, h, "version1") - assertDoesNotServeVersion(c, h, "version2") - assertDoesNotServeVersion(c, h, "version3") - - h, err = NewServer(db, nil, serverParams, map[string]NewAPIHandlerFunc{ - "version1": serveVersion("version1"), - "version2": serveVersion("version2"), - }) - c.Assert(err, gc.IsNil) - assertServesVersion(c, h, "version1") - assertServesVersion(c, h, "version2") - assertDoesNotServeVersion(c, h, "version3") - - h, err = NewServer(db, nil, serverParams, map[string]NewAPIHandlerFunc{ - "version1": serveVersion("version1"), - "version2": serveVersion("version2"), - "version3": serveVersion("version3"), - }) - c.Assert(err, gc.IsNil) - assertServesVersion(c, h, "version1") - assertServesVersion(c, h, "version2") - assertServesVersion(c, h, "version3") - - h, err = NewServer(db, nil, serverParams, map[string]NewAPIHandlerFunc{ - "version1": serveVersion("version1"), - "": serveVersion(""), - }) - c.Assert(err, gc.IsNil) - assertServesVersion(c, h, "") - assertServesVersion(c, h, "version1") -} - -func (s *ServerSuite) TestNewServerWithConfig(c *gc.C) { - serveConfig := func(p *Pool, config ServerParams) http.Handler { - return router.HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { - return config, nil - }) - } - h, err := NewServer(s.Session.DB("foo"), nil, serverParams, map[string]NewAPIHandlerFunc{ - "version1": serveConfig, - }) - c.Assert(err, gc.IsNil) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: h, - URL: "/version1/some/path", - ExpectBody: serverParams, - }) -} - -func (s *ServerSuite) TestNewServerWithElasticSearch(c *gc.C) { - serveConfig := func(p *Pool, config ServerParams) http.Handler { - return router.HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { - return config, nil - }) - } - h, err := NewServer(s.Session.DB("foo"), &SearchIndex{s.ES, s.TestIndex}, serverParams, - map[string]NewAPIHandlerFunc{ - "version1": serveConfig, - }) - c.Assert(err, gc.IsNil) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: h, - URL: "/version1/some/path", - ExpectBody: serverParams, - }) -} - -func assertServesVersion(c *gc.C, h http.Handler, vers string) { - path := vers - if path != "" { - path = "/" + path - } - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: h, - URL: path + "/some/path", - ExpectBody: versionResponse{ - Version: vers, - Path: "/some/path", - }, - }) -} - -func assertDoesNotServeVersion(c *gc.C, h http.Handler, vers string) { - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: h, - URL: "/" + vers + "/some/path", - }) - c.Assert(rec.Code, gc.Equals, http.StatusNotFound) -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/stats.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/stats.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/stats.go 1970-01-01 00:00:00 +0000 @@ -1,627 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore - -import ( - "encoding/json" - "fmt" - "sort" - "strconv" - "strings" - "sync" - "time" - - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/params" -) - -type stats struct { - // Cache for statistics key words (two generations). - cacheMu sync.RWMutex - statsIdNew map[string]int - statsIdOld map[string]int - statsTokenNew map[int]string - statsTokenOld map[int]string -} - -// Note that changing the StatsGranularity constant -// will not change the stats time granularity - it -// is defined for external code clarity. - -// StatsGranularity holds the time granularity of statistics -// gathering. IncCounter(Async) calls within this duration -// may be aggregated. -const StatsGranularity = time.Minute - -// The stats mechanism uses the following MongoDB collections: -// -// juju.stat.counters - Counters for statistics -// juju.stat.tokens - Tokens used in statistics counter keys - -func (s StoreDatabase) StatCounters() *mgo.Collection { - return s.C("juju.stat.counters") -} - -func (s StoreDatabase) StatTokens() *mgo.Collection { - return s.C("juju.stat.tokens") -} - -// key returns the compound statistics identifier that represents key. -// If write is true, the identifier will be created if necessary. -// Identifiers have a form similar to "ab:c:def:", where each section is a -// base-32 number that represents the respective word in key. This form -// allows efficiently indexing and searching for prefixes, while detaching -// the key content and size from the actual words used in key. -func (s *stats) key(db StoreDatabase, key []string, write bool) (string, error) { - if len(key) == 0 { - return "", errgo.New("store: empty statistics key") - } - tokens := db.StatTokens() - skey := make([]byte, 0, len(key)*4) - // Retry limit is mainly to prevent infinite recursion in edge cases, - // such as if the database is ever run in read-only mode. - // The logic below should deteministically stop in normal scenarios. - var err error - for i, retry := 0, 30; i < len(key) && retry > 0; retry-- { - err = nil - id, found := s.tokenId(key[i]) - if !found { - var t tokenId - err = tokens.Find(bson.D{{"t", key[i]}}).One(&t) - if err == mgo.ErrNotFound { - if !write { - return "", errgo.WithCausef(nil, params.ErrNotFound, "") - } - t.Id, err = tokens.Count() - if err != nil { - continue - } - t.Id++ - t.Token = key[i] - err = tokens.Insert(&t) - } - if err != nil { - continue - } - s.cacheTokenId(t.Token, t.Id) - id = t.Id - } - skey = strconv.AppendInt(skey, int64(id), 32) - skey = append(skey, ':') - i++ - } - if err != nil { - return "", err - } - return string(skey), nil -} - -const statsTokenCacheSize = 1024 - -type tokenId struct { - Id int `bson:"_id"` - Token string `bson:"t"` -} - -// cacheTokenId adds the id for token into the cache. -// The cache has two generations so that the least frequently used -// tokens are evicted regularly. -func (s *stats) cacheTokenId(token string, id int) { - s.cacheMu.Lock() - defer s.cacheMu.Unlock() - // Can't possibly be >, but reviews want it for defensiveness. - if len(s.statsIdNew) >= statsTokenCacheSize { - s.statsIdOld = s.statsIdNew - s.statsIdNew = nil - s.statsTokenOld = s.statsTokenNew - s.statsTokenNew = nil - } - if s.statsIdNew == nil { - s.statsIdNew = make(map[string]int, statsTokenCacheSize) - s.statsTokenNew = make(map[int]string, statsTokenCacheSize) - } - s.statsIdNew[token] = id - s.statsTokenNew[id] = token -} - -// tokenId returns the id for token from the cache, if found. -func (s *stats) tokenId(token string) (id int, found bool) { - s.cacheMu.RLock() - id, found = s.statsIdNew[token] - if found { - s.cacheMu.RUnlock() - return - } - id, found = s.statsIdOld[token] - s.cacheMu.RUnlock() - if found { - s.cacheTokenId(token, id) - } - return -} - -// idToken returns the token for id from the cache, if found. -func (s *stats) idToken(id int) (token string, found bool) { - s.cacheMu.RLock() - token, found = s.statsTokenNew[id] - if found { - s.cacheMu.RUnlock() - return - } - token, found = s.statsTokenOld[id] - s.cacheMu.RUnlock() - if found { - s.cacheTokenId(token, id) - } - return -} - -var counterEpoch = time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Unix() - -func timeToStamp(t time.Time) int32 { - return int32(t.Unix() - counterEpoch) -} - -// IncCounterAsync increases by one the counter associated with the composed -// key. The action is done in the background using a separate goroutine. -func (s *Store) IncCounterAsync(key []string) { - s.Go(func(s *Store) { - if err := s.IncCounter(key); err != nil { - logger.Errorf("cannot increase stats counter for key %v: %v", key, err) - } - }) -} - -// IncCounter increases by one the counter associated with the composed key. -func (s *Store) IncCounter(key []string) error { - return s.IncCounterAtTime(key, time.Now()) -} - -// IncCounterAtTime increases by one the counter associated with the composed -// key, associating it with the given time, which should be time.Now. -// This method is exposed for testing purposes only - production -// code should always call IncCounter or IncCounterAsync. -func (s *Store) IncCounterAtTime(key []string, t time.Time) error { - skey, err := s.stats.key(s.DB, key, true) - if err != nil { - return err - } - - // Round to the start of the minute so we get one document per minute at most. - t = t.UTC().Add(-time.Duration(t.Second()) * time.Second) - counters := s.DB.StatCounters() - _, err = counters.Upsert(bson.D{{"k", skey}, {"t", timeToStamp(t)}}, bson.D{{"$inc", bson.D{{"c", 1}}}}) - return err -} - -// CounterRequest represents a request to aggregate counter values. -type CounterRequest struct { - // Key and Prefix determine the counter keys to match. - // If Prefix is false, Key must match exactly. Otherwise, counters - // must begin with Key and have at least one more key token. - Key []string - Prefix bool - - // If List is true, matching counters are aggregated under their - // prefixes instead of being returned as a single overall sum. - // - // For example, given the following counts: - // - // {"a", "b"}: 1, - // {"a", "c"}: 3 - // {"a", "c", "d"}: 5 - // {"a", "c", "e"}: 7 - // - // and assuming that Prefix is true, the following keys will - // present the respective results if List is true: - // - // {"a"} => {{"a", "b"}, 1, false}, - // {{"a", "c"}, 3, false}, - // {{"a", "c"}, 12, true} - // {"a", "c"} => {{"a", "c", "d"}, 3, false}, - // {{"a", "c", "e"}, 5, false} - // - // If List is false, the same key prefixes will present: - // - // {"a"} => {{"a"}, 16, true} - // {"a", "c"} => {{"a", "c"}, 12, false} - // - List bool - - // By defines the period covered by each aggregated data point. - // If unspecified, it defaults to ByAll, which aggregates all - // matching data points in a single entry. - By CounterRequestBy - - // Start, if provided, changes the query so that only data points - // ocurring at the given time or afterwards are considered. - Start time.Time - - // Stop, if provided, changes the query so that only data points - // ocurring at the given time or before are considered. - Stop time.Time -} - -type CounterRequestBy int - -const ( - ByAll CounterRequestBy = iota - ByDay - ByWeek -) - -type Counter struct { - Key []string - Prefix bool - Count int64 - Time time.Time -} - -// Counters aggregates and returns counter values according to the provided request. -func (s *Store) Counters(req *CounterRequest) ([]Counter, error) { - db := s.DB.Copy() - defer db.Close() - - tokensColl := db.StatTokens() - countersColl := db.StatCounters() - - searchKey, err := s.stats.key(db, req.Key, false) - if errgo.Cause(err) == params.ErrNotFound { - if !req.List { - return []Counter{{ - Key: req.Key, - Prefix: req.Prefix, - Count: 0, - }}, nil - } - return nil, nil - } - if err != nil { - return nil, errgo.Mask(err) - } - var regex string - if req.Prefix { - regex = "^" + searchKey + ".+" - } else { - regex = "^" + searchKey + "$" - } - - // This reduce function simply sums, for each emitted key, all the values found under it. - job := mgo.MapReduce{Reduce: "function(key, values) { return Array.sum(values); }"} - var emit string - switch req.By { - case ByDay: - emit = "emit(k+'@'+NumberInt(this.t/86400), this.c);" - case ByWeek: - emit = "emit(k+'@'+NumberInt(this.t/604800), this.c);" - default: - emit = "emit(k, this.c);" - } - if req.List && req.Prefix { - // For a search key "a:b:" matching a key "a:b:c:d:e:", this map function emits "a:b:c:*". - // For a search key "a:b:" matching a key "a:b:c:", it emits "a:b:c:". - // For a search key "a:b:" matching a key "a:b:", it emits "a:b:". - job.Scope = bson.D{{"searchKeyLen", len(searchKey)}} - job.Map = fmt.Sprintf(` - function() { - var k = this.k; - var i = k.indexOf(':', searchKeyLen)+1; - if (k.length > i) { k = k.substr(0, i)+'*'; } - %s - }`, emit) - } else { - // For a search key "a:b:" matching a key "a:b:c:d:e:", this map function emits "a:b:*". - // For a search key "a:b:" matching a key "a:b:c:", it also emits "a:b:*". - // For a search key "a:b:" matching a key "a:b:", it emits "a:b:". - emitKey := searchKey - if req.Prefix { - emitKey += "*" - } - job.Scope = bson.D{{"emitKey", emitKey}} - job.Map = fmt.Sprintf(` - function() { - var k = emitKey; - %s - }`, emit) - } - - var result []struct { - Key string `bson:"_id"` - Value int64 - } - var query, tquery bson.D - if !req.Start.IsZero() { - tquery = append(tquery, bson.DocElem{ - Name: "$gte", - Value: timeToStamp(req.Start), - }) - } - if !req.Stop.IsZero() { - tquery = append(tquery, bson.DocElem{ - Name: "$lte", - Value: timeToStamp(req.Stop), - }) - } - if len(tquery) == 0 { - query = bson.D{{"k", bson.D{{"$regex", regex}}}} - } else { - query = bson.D{{"k", bson.D{{"$regex", regex}}}, {"t", tquery}} - } - _, err = countersColl.Find(query).MapReduce(&job, &result) - if err != nil { - return nil, err - } - var counters []Counter - for i := range result { - key := result[i].Key - when := time.Time{} - if req.By != ByAll { - var stamp int64 - if at := strings.Index(key, "@"); at != -1 && len(key) > at+1 { - stamp, _ = strconv.ParseInt(key[at+1:], 10, 32) - key = key[:at] - } - if stamp == 0 { - return nil, errgo.Newf("internal error: bad aggregated key: %q", result[i].Key) - } - switch req.By { - case ByDay: - stamp = stamp * 86400 - case ByWeek: - // The +1 puts it at the end of the period. - stamp = (stamp + 1) * 604800 - } - when = time.Unix(counterEpoch+stamp, 0).In(time.UTC) - } - ids := strings.Split(key, ":") - tokens := make([]string, 0, len(ids)) - for i := 0; i < len(ids)-1; i++ { - if ids[i] == "*" { - continue - } - id, err := strconv.ParseInt(ids[i], 32, 32) - if err != nil { - return nil, errgo.Newf("store: invalid id: %q", ids[i]) - } - token, found := s.stats.idToken(int(id)) - if !found { - var t tokenId - err = tokensColl.FindId(id).One(&t) - if err == mgo.ErrNotFound { - return nil, errgo.Newf("store: internal error; token id not found: %d", id) - } - s.stats.cacheTokenId(t.Token, t.Id) - token = t.Token - } - tokens = append(tokens, token) - } - counter := Counter{ - Key: tokens, - Prefix: len(ids) > 0 && ids[len(ids)-1] == "*", - Count: result[i].Value, - Time: when, - } - counters = append(counters, counter) - } - if !req.List && len(counters) == 0 { - counters = []Counter{{Key: req.Key, Prefix: req.Prefix, Count: 0}} - } else if len(counters) > 1 { - sort.Sort(sortableCounters(counters)) - } - return counters, nil -} - -type sortableCounters []Counter - -func (s sortableCounters) Len() int { return len(s) } -func (s sortableCounters) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s sortableCounters) Less(i, j int) bool { - // Earlier times first. - if !s[i].Time.Equal(s[j].Time) { - return s[i].Time.Before(s[j].Time) - } - // Then larger counts first. - if s[i].Count != s[j].Count { - return s[j].Count < s[i].Count - } - // Then smaller/shorter keys first. - ki := s[i].Key - kj := s[j].Key - for n := range ki { - if n >= len(kj) { - return false - } - if ki[n] != kj[n] { - return ki[n] < kj[n] - } - } - if len(ki) < len(kj) { - return true - } - // Then full keys first. - return !s[i].Prefix && s[j].Prefix -} - -// EntityStatsKey returns a stats key for the given charm or bundle -// reference and the given kind. -// Entity stats keys are generated using the following schema: -// kind:series:name:user:revision -// where user can be empty (for promulgated charms/bundles) and revision is -// optional (e.g. when uploading an entity the revision is not specified). -// For instance, entities' stats can then be retrieved like the following: -// - kind:utopic:* -> all charms of a specific series; -// - kind:trusty:django:* -> all revisions and user variations of a charm; -// - kind:trusty:django::* -> all revisions of a promulgated charm; -// - kind:trusty:django::42 -> a specific promulgated charm; -// - kind:trusty:django:who:* -> all revisions of a user owned charm; -// - kind:trusty:django:who:42 -> a specific user owned charm; -// The above also applies to bundles (where the series is "bundle"). -func EntityStatsKey(url *charm.Reference, kind string) []string { - key := []string{kind, url.Series, url.Name, url.User} - if url.Revision != -1 { - key = append(key, strconv.Itoa(url.Revision)) - } - return key -} - -// AggregatedCounts contains counts for a statistic aggregated over the -// lastDay, lastWeek, lastMonth and all time. -type AggregatedCounts struct { - LastDay, LastWeek, LastMonth, Total int64 -} - -// LegacyDownloadCountsEnabled represents whether aggregated download counts -// must be retrieved from the legacy infrastructure. In essence, if the value -// is true (enabled), aggregated counts are not calculated based on the data -// stored in the charm store stats; they are instead retrieved from the entity -// extra-info. For this reason, enabling this we assume an external program -// updated the extra-info for the entity, specifically the -// "legacy-download-stats" key. -// TODO (frankban): this is a temporary hack, and can be removed once we have -// a more consistent way to import the download counts from the legacy charm -// store (charms) and from charmworld (bundles). To remove the legacy download -// counts logic in the future, grep the code for "LegacyDownloadCountsEnabled" -// and remove as required. -var LegacyDownloadCountsEnabled = true - -// ArchiveDownloadCounts calculates the aggregated download counts for -// a charm or bundle. -func (s *Store) ArchiveDownloadCounts(id *charm.Reference) (thisRevision, allRevisions AggregatedCounts, err error) { - // Retrieve the aggregated stats. - thisRevision, err = s.aggregateStats(EntityStatsKey(id, params.StatsArchiveDownload), false) - if err != nil { - err = errgo.Notef(err, "cannot get aggregated count for the specific revision") - return - } - noRevisionId := *id - noRevisionId.Revision = -1 - allRevisions, err = s.aggregateStats(EntityStatsKey(&noRevisionId, params.StatsArchiveDownload), true) - if err != nil { - err = errgo.Notef(err, "cannot get aggregated count for all revisions") - return - } - // TODO (frankban): remove this condition when removing the legacy counts - // logic. - if LegacyDownloadCountsEnabled { - legacyRevision, legacyAll, err := s.legacyDownloadCounts(id) - if err != nil { - return AggregatedCounts{}, AggregatedCounts{}, err - } - thisRevision.LastDay += legacyRevision.LastDay - thisRevision.LastWeek += legacyRevision.LastWeek - thisRevision.LastMonth += legacyRevision.LastMonth - thisRevision.Total += legacyRevision.Total - allRevisions.LastDay += legacyAll.LastDay - allRevisions.LastWeek += legacyAll.LastWeek - allRevisions.LastMonth += legacyAll.LastMonth - allRevisions.Total += legacyAll.Total - } - return -} - -// legacyDownloadCounts retrieves the aggregated stats from the entity -// extra-info. This is used when LegacyDownloadCountsEnabled is true. -// TODO (frankban): remove this method when removing the legacy counts logic. -func (s *Store) legacyDownloadCounts(id *charm.Reference) (AggregatedCounts, AggregatedCounts, error) { - counts := AggregatedCounts{} - entities, err := s.FindEntities(id, "extrainfo") - if err != nil { - return counts, counts, errgo.Mask(err, errgo.Is(params.ErrNotFound)) - } - if len(entities) == 0 { - return counts, counts, errgo.WithCausef(nil, params.ErrNotFound, "entity not found") - } - entity := entities[0] - data, ok := entity.ExtraInfo[params.LegacyDownloadStats] - if ok { - if err := json.Unmarshal(data, &counts.Total); err != nil { - return counts, counts, errgo.Notef(err, "cannot unmarshal extra-info value") - } - } - return counts, counts, nil -} - -// aggregatedStats returns the aggregated downloads counts for the given stats -// key. -func (s *Store) aggregateStats(key []string, prefix bool) (AggregatedCounts, error) { - var counts AggregatedCounts - - req := CounterRequest{ - Key: key, - By: ByDay, - Prefix: prefix, - } - results, err := s.Counters(&req) - if err != nil { - return counts, errgo.Notef(err, "cannot retrieve stats") - } - - today := time.Now() - lastDay := today.AddDate(0, 0, -1) - lastWeek := today.AddDate(0, 0, -7) - lastMonth := today.AddDate(0, -1, 0) - - // Aggregate the results. - for _, result := range results { - if result.Time.After(lastMonth) { - counts.LastMonth += result.Count - if result.Time.After(lastWeek) { - counts.LastWeek += result.Count - if result.Time.After(lastDay) { - counts.LastDay += result.Count - } - } - } - counts.Total += result.Count - } - return counts, nil -} - -// IncrementDownloadCountsAsync updates the download statistics for entity id in both -// the statistics database and the search database. The action is done in the -// background using a separate goroutine. -func (s *Store) IncrementDownloadCountsAsync(id *router.ResolvedURL) { - s.Go(func(s *Store) { - if err := s.IncrementDownloadCounts(id); err != nil { - logger.Errorf("cannot increase download counter for %v: %s", id, err) - } - }) -} - -// IncrementDownloadCounts updates the download statistics for entity id in both -// the statistics database and the search database. -func (s *Store) IncrementDownloadCounts(id *router.ResolvedURL) error { - key := EntityStatsKey(&id.URL, params.StatsArchiveDownload) - if err := s.IncCounter(key); err != nil { - return errgo.Notef(err, "cannot increase stats counter for %v", key) - } - if id.PromulgatedRevision == -1 { - // Check that the id really is for an unpromulgated entity. - // This unfortunately adds an extra round trip to the database, - // but as incrementing statistics is performed asynchronously - // it will not be in the critical path. - entity, err := s.FindEntity(id, "promulgated-revision") - if err != nil { - return errgo.Notef(err, "cannot find entity %v", &id.URL) - } - id.PromulgatedRevision = entity.PromulgatedRevision - } - if id.PromulgatedRevision != -1 { - key := EntityStatsKey(id.PreferredURL(), params.StatsArchiveDownload) - if err := s.IncCounter(key); err != nil { - return errgo.Notef(err, "cannot increase stats counter for %v", key) - } - } - // TODO(mhilton) when this charmstore is being used by juju, find a more - // efficient way to update the download statistics for search. - if err := s.UpdateSearch(id); err != nil { - return errgo.Notef(err, "cannot update search record for %v", id) - } - return nil -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/stats_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/stats_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/stats_test.go 1970-01-01 00:00:00 +0000 @@ -1,713 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore_test - -import ( - "fmt" - "strconv" - "sync" - "time" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/internal/storetesting" - "gopkg.in/juju/charmstore.v4/params" -) - -type StatsSuite struct { - storetesting.IsolatedMgoSuite - store *charmstore.Store -} - -var _ = gc.Suite(&StatsSuite{}) - -func (s *StatsSuite) SetUpTest(c *gc.C) { - s.IsolatedMgoSuite.SetUpTest(c) - pool, err := charmstore.NewPool(s.Session.DB("foo"), nil, nil) - c.Assert(err, gc.IsNil) - s.store = pool.Store() -} - -func (s *StatsSuite) TearDownTest(c *gc.C) { - s.store.Close() - s.IsolatedMgoSuite.TearDownTest(c) -} - -func (s *StatsSuite) TestSumCounters(c *gc.C) { - if !storetesting.MongoJSEnabled() { - c.Skip("MongoDB JavaScript not available") - } - - req := charmstore.CounterRequest{Key: []string{"a"}} - cs, err := s.store.Counters(&req) - c.Assert(err, gc.IsNil) - c.Assert(cs, gc.DeepEquals, []charmstore.Counter{{Key: req.Key, Count: 0}}) - - for i := 0; i < 10; i++ { - err := s.store.IncCounter([]string{"a", "b", "c"}) - c.Assert(err, gc.IsNil) - } - for i := 0; i < 7; i++ { - s.store.IncCounter([]string{"a", "b"}) - c.Assert(err, gc.IsNil) - } - for i := 0; i < 3; i++ { - s.store.IncCounter([]string{"a", "z", "b"}) - c.Assert(err, gc.IsNil) - } - - tests := []struct { - key []string - prefix bool - result int64 - }{ - {[]string{"a", "b", "c"}, false, 10}, - {[]string{"a", "b"}, false, 7}, - {[]string{"a", "z", "b"}, false, 3}, - {[]string{"a", "b", "c"}, true, 0}, - {[]string{"a", "b", "c", "d"}, false, 0}, - {[]string{"a", "b"}, true, 10}, - {[]string{"a"}, true, 20}, - {[]string{"b"}, true, 0}, - } - - for _, t := range tests { - c.Logf("Test: %#v\n", t) - req = charmstore.CounterRequest{Key: t.key, Prefix: t.prefix} - cs, err := s.store.Counters(&req) - c.Assert(err, gc.IsNil) - c.Assert(cs, gc.DeepEquals, []charmstore.Counter{{Key: t.key, Prefix: t.prefix, Count: t.result}}) - } - - // High-level interface works. Now check that the data is - // stored correctly. - counters := s.store.DB.StatCounters() - docs1, err := counters.Count() - c.Assert(err, gc.IsNil) - if docs1 != 3 && docs1 != 4 { - fmt.Errorf("Expected 3 or 4 docs in counters collection, got %d", docs1) - } - - // Hack times so that the next operation adds another document. - err = counters.Update(nil, bson.D{{"$set", bson.D{{"t", 1}}}}) - c.Check(err, gc.IsNil) - - err = s.store.IncCounter([]string{"a", "b", "c"}) - c.Assert(err, gc.IsNil) - - docs2, err := counters.Count() - c.Assert(err, gc.IsNil) - c.Assert(docs2, gc.Equals, docs1+1) - - req = charmstore.CounterRequest{Key: []string{"a", "b", "c"}} - cs, err = s.store.Counters(&req) - c.Assert(err, gc.IsNil) - c.Assert(cs, gc.DeepEquals, []charmstore.Counter{{Key: req.Key, Count: 11}}) - - req = charmstore.CounterRequest{Key: []string{"a"}, Prefix: true} - cs, err = s.store.Counters(&req) - c.Assert(err, gc.IsNil) - c.Assert(cs, gc.DeepEquals, []charmstore.Counter{{Key: req.Key, Prefix: true, Count: 21}}) -} - -func (s *StatsSuite) TestCountersReadOnlySum(c *gc.C) { - if !storetesting.MongoJSEnabled() { - c.Skip("MongoDB JavaScript not available") - } - - // Summing up an unknown key shouldn't add the key to the database. - req := charmstore.CounterRequest{Key: []string{"a", "b", "c"}} - _, err := s.store.Counters(&req) - c.Assert(err, gc.IsNil) - - tokens := s.Session.DB("juju").C("stat.tokens") - n, err := tokens.Count() - c.Assert(err, gc.IsNil) - c.Assert(n, gc.Equals, 0) -} - -func (s *StatsSuite) TestCountersTokenCaching(c *gc.C) { - if !storetesting.MongoJSEnabled() { - c.Skip("MongoDB JavaScript not available") - } - - assertSum := func(i int, want int64) { - req := charmstore.CounterRequest{Key: []string{strconv.Itoa(i)}} - cs, err := s.store.Counters(&req) - c.Assert(err, gc.IsNil) - c.Assert(cs[0].Count, gc.Equals, want) - } - assertSum(100000, 0) - - const genSize = 1024 - - // All of these will be cached, as we have two generations - // of genSize entries each. - for i := 0; i < genSize*2; i++ { - err := s.store.IncCounter([]string{strconv.Itoa(i)}) - c.Assert(err, gc.IsNil) - } - - // Now go behind the scenes and corrupt all the tokens. - tokens := s.store.DB.StatTokens() - iter := tokens.Find(nil).Iter() - var t struct { - Id int "_id" - Token string "t" - } - for iter.Next(&t) { - err := tokens.UpdateId(t.Id, bson.M{"$set": bson.M{"t": "corrupted" + t.Token}}) - c.Assert(err, gc.IsNil) - } - c.Assert(iter.Err(), gc.IsNil) - - // We can consult the counters for the cached entries still. - // First, check that the newest generation is good. - for i := genSize; i < genSize*2; i++ { - assertSum(i, 1) - } - - // Now, we can still access a single entry of the older generation, - // but this will cause the generations to flip and thus the rest - // of the old generation will go away as the top half of the - // entries is turned into the old generation. - assertSum(0, 1) - - // Now we've lost access to the rest of the old generation. - for i := 1; i < genSize; i++ { - assertSum(i, 0) - } - - // But we still have all of the top half available since it was - // moved into the old generation. - for i := genSize; i < genSize*2; i++ { - assertSum(i, 1) - } -} - -func (s *StatsSuite) TestCounterTokenUniqueness(c *gc.C) { - if !storetesting.MongoJSEnabled() { - c.Skip("MongoDB JavaScript not available") - } - - var wg0, wg1 sync.WaitGroup - wg0.Add(10) - wg1.Add(10) - for i := 0; i < 10; i++ { - go func() { - wg0.Done() - wg0.Wait() - defer wg1.Done() - err := s.store.IncCounter([]string{"a"}) - c.Check(err, gc.IsNil) - }() - } - wg1.Wait() - - req := charmstore.CounterRequest{Key: []string{"a"}} - cs, err := s.store.Counters(&req) - c.Assert(err, gc.IsNil) - c.Assert(cs[0].Count, gc.Equals, int64(10)) -} - -func (s *StatsSuite) TestListCounters(c *gc.C) { - if !storetesting.MongoJSEnabled() { - c.Skip("MongoDB JavaScript not available") - } - - incs := [][]string{ - {"c", "b", "a"}, // Assign internal id c < id b < id a, to make sorting slightly trickier. - {"a"}, - {"a", "c"}, - {"a", "b"}, - {"a", "b", "c"}, - {"a", "b", "c"}, - {"a", "b", "e"}, - {"a", "b", "d"}, - {"a", "f", "g"}, - {"a", "f", "h"}, - {"a", "i"}, - {"a", "i", "j"}, - {"k", "l"}, - } - for _, key := range incs { - err := s.store.IncCounter(key) - c.Assert(err, gc.IsNil) - } - - tests := []struct { - prefix []string - result []charmstore.Counter - }{ - { - []string{"a"}, - []charmstore.Counter{ - {Key: []string{"a", "b"}, Prefix: true, Count: 4}, - {Key: []string{"a", "f"}, Prefix: true, Count: 2}, - {Key: []string{"a", "b"}, Prefix: false, Count: 1}, - {Key: []string{"a", "c"}, Prefix: false, Count: 1}, - {Key: []string{"a", "i"}, Prefix: false, Count: 1}, - {Key: []string{"a", "i"}, Prefix: true, Count: 1}, - }, - }, { - []string{"a", "b"}, - []charmstore.Counter{ - {Key: []string{"a", "b", "c"}, Prefix: false, Count: 2}, - {Key: []string{"a", "b", "d"}, Prefix: false, Count: 1}, - {Key: []string{"a", "b", "e"}, Prefix: false, Count: 1}, - }, - }, { - []string{"z"}, - []charmstore.Counter(nil), - }, - } - - // Use a different store to exercise cache filling. - pool, err := charmstore.NewPool(s.store.DB.Database, nil, nil) - c.Assert(err, gc.IsNil) - st := pool.Store() - defer st.Close() - - for i := range tests { - req := &charmstore.CounterRequest{Key: tests[i].prefix, Prefix: true, List: true} - result, err := st.Counters(req) - c.Assert(err, gc.IsNil) - c.Assert(result, gc.DeepEquals, tests[i].result) - } -} - -func (s *StatsSuite) TestListCountersBy(c *gc.C) { - if !storetesting.MongoJSEnabled() { - c.Skip("MongoDB JavaScript not available") - } - - incs := []struct { - key []string - day int - }{ - {[]string{"a"}, 1}, - {[]string{"a"}, 1}, - {[]string{"b"}, 1}, - {[]string{"a", "b"}, 1}, - {[]string{"a", "c"}, 1}, - {[]string{"a"}, 3}, - {[]string{"a", "b"}, 3}, - {[]string{"b"}, 9}, - {[]string{"b"}, 9}, - {[]string{"a", "c", "d"}, 9}, - {[]string{"a", "c", "e"}, 9}, - {[]string{"a", "c", "f"}, 9}, - } - - day := func(i int) time.Time { - return time.Date(2012, time.May, i, 0, 0, 0, 0, time.UTC) - } - - for i, inc := range incs { - t := day(inc.day) - // Ensure each entry is unique by adding - // a sufficient increment for each test. - t = t.Add(time.Duration(i) * charmstore.StatsGranularity) - - err := s.store.IncCounterAtTime(inc.key, t) - c.Assert(err, gc.IsNil) - } - - tests := []struct { - request charmstore.CounterRequest - result []charmstore.Counter - }{ - { - charmstore.CounterRequest{ - Key: []string{"a"}, - Prefix: false, - List: false, - By: charmstore.ByDay, - }, - []charmstore.Counter{ - {Key: []string{"a"}, Prefix: false, Count: 2, Time: day(1)}, - {Key: []string{"a"}, Prefix: false, Count: 1, Time: day(3)}, - }, - }, { - charmstore.CounterRequest{ - Key: []string{"a"}, - Prefix: true, - List: false, - By: charmstore.ByDay, - }, - []charmstore.Counter{ - {Key: []string{"a"}, Prefix: true, Count: 2, Time: day(1)}, - {Key: []string{"a"}, Prefix: true, Count: 1, Time: day(3)}, - {Key: []string{"a"}, Prefix: true, Count: 3, Time: day(9)}, - }, - }, { - charmstore.CounterRequest{ - Key: []string{"a"}, - Prefix: true, - List: false, - By: charmstore.ByDay, - Start: day(2), - }, - []charmstore.Counter{ - {Key: []string{"a"}, Prefix: true, Count: 1, Time: day(3)}, - {Key: []string{"a"}, Prefix: true, Count: 3, Time: day(9)}, - }, - }, { - charmstore.CounterRequest{ - Key: []string{"a"}, - Prefix: true, - List: false, - By: charmstore.ByDay, - Stop: day(4), - }, - []charmstore.Counter{ - {Key: []string{"a"}, Prefix: true, Count: 2, Time: day(1)}, - {Key: []string{"a"}, Prefix: true, Count: 1, Time: day(3)}, - }, - }, { - charmstore.CounterRequest{ - Key: []string{"a"}, - Prefix: true, - List: false, - By: charmstore.ByDay, - Start: day(3), - Stop: day(8), - }, - []charmstore.Counter{ - {Key: []string{"a"}, Prefix: true, Count: 1, Time: day(3)}, - }, - }, { - charmstore.CounterRequest{ - Key: []string{"a"}, - Prefix: true, - List: true, - By: charmstore.ByDay, - }, - []charmstore.Counter{ - {Key: []string{"a", "b"}, Prefix: false, Count: 1, Time: day(1)}, - {Key: []string{"a", "c"}, Prefix: false, Count: 1, Time: day(1)}, - {Key: []string{"a", "b"}, Prefix: false, Count: 1, Time: day(3)}, - {Key: []string{"a", "c"}, Prefix: true, Count: 3, Time: day(9)}, - }, - }, { - charmstore.CounterRequest{ - Key: []string{"a"}, - Prefix: true, - List: false, - By: charmstore.ByWeek, - }, - []charmstore.Counter{ - {Key: []string{"a"}, Prefix: true, Count: 3, Time: day(6)}, - {Key: []string{"a"}, Prefix: true, Count: 3, Time: day(13)}, - }, - }, { - charmstore.CounterRequest{ - Key: []string{"a"}, - Prefix: true, - List: true, - By: charmstore.ByWeek, - }, - []charmstore.Counter{ - {Key: []string{"a", "b"}, Prefix: false, Count: 2, Time: day(6)}, - {Key: []string{"a", "c"}, Prefix: false, Count: 1, Time: day(6)}, - {Key: []string{"a", "c"}, Prefix: true, Count: 3, Time: day(13)}, - }, - }, - } - - for _, test := range tests { - result, err := s.store.Counters(&test.request) - c.Assert(err, gc.IsNil) - c.Assert(result, gc.DeepEquals, test.result) - } -} - -type testStatsEntity struct { - id *router.ResolvedURL - lastDay int - lastWeek int - lastMonth int - total int - legacyTotal int -} - -var archiveDownloadCountsTests = []struct { - about string - charms []testStatsEntity - id *charm.Reference - expectThisRevision charmstore.AggregatedCounts - expectAllRevisions charmstore.AggregatedCounts -}{{ - about: "single revision", - charms: []testStatsEntity{{ - id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), - lastDay: 1, - lastWeek: 2, - lastMonth: 3, - total: 4, - legacyTotal: 0, - }}, - id: charm.MustParseReference("~charmers/trusty/wordpress-0"), - expectThisRevision: charmstore.AggregatedCounts{ - LastDay: 1, - LastWeek: 3, - LastMonth: 6, - Total: 10, - }, - expectAllRevisions: charmstore.AggregatedCounts{ - LastDay: 1, - LastWeek: 3, - LastMonth: 6, - Total: 10, - }, -}, { - about: "single revision with legacy count", - charms: []testStatsEntity{{ - id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), - lastDay: 1, - lastWeek: 2, - lastMonth: 3, - total: 4, - legacyTotal: 10, - }}, - id: charm.MustParseReference("~charmers/trusty/wordpress-0"), - expectThisRevision: charmstore.AggregatedCounts{ - LastDay: 1, - LastWeek: 3, - LastMonth: 6, - Total: 20, - }, - expectAllRevisions: charmstore.AggregatedCounts{ - LastDay: 1, - LastWeek: 3, - LastMonth: 6, - Total: 20, - }, -}, { - about: "multiple revisions", - charms: []testStatsEntity{{ - id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), - lastDay: 1, - lastWeek: 2, - lastMonth: 3, - total: 4, - legacyTotal: 0, - }, { - id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-1"), - lastDay: 2, - lastWeek: 3, - lastMonth: 4, - total: 5, - legacyTotal: 0, - }}, - id: charm.MustParseReference("~charmers/trusty/wordpress-1"), - expectThisRevision: charmstore.AggregatedCounts{ - LastDay: 2, - LastWeek: 5, - LastMonth: 9, - Total: 14, - }, - expectAllRevisions: charmstore.AggregatedCounts{ - LastDay: 3, - LastWeek: 8, - LastMonth: 15, - Total: 24, - }, -}, { - about: "multiple revisions with legacy count", - charms: []testStatsEntity{{ - id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), - lastDay: 1, - lastWeek: 2, - lastMonth: 3, - total: 4, - legacyTotal: 0, - }, { - id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-1"), - lastDay: 2, - lastWeek: 3, - lastMonth: 4, - total: 5, - legacyTotal: 100, - }}, - id: charm.MustParseReference("~charmers/trusty/wordpress-1"), - expectThisRevision: charmstore.AggregatedCounts{ - LastDay: 2, - LastWeek: 5, - LastMonth: 9, - Total: 114, - }, - expectAllRevisions: charmstore.AggregatedCounts{ - LastDay: 3, - LastWeek: 8, - LastMonth: 15, - Total: 124, - }, -}, { - about: "promulgated revision", - charms: []testStatsEntity{{ - id: charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-0"), - lastDay: 1, - lastWeek: 2, - lastMonth: 3, - total: 4, - legacyTotal: 0, - }}, - id: charm.MustParseReference("trusty/wordpress-0"), - expectThisRevision: charmstore.AggregatedCounts{ - LastDay: 1, - LastWeek: 3, - LastMonth: 6, - Total: 10, - }, - expectAllRevisions: charmstore.AggregatedCounts{ - LastDay: 1, - LastWeek: 3, - LastMonth: 6, - Total: 10, - }, -}, { - about: "promulgated revision with legacy count", - charms: []testStatsEntity{{ - id: charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-0"), - lastDay: 1, - lastWeek: 2, - lastMonth: 3, - total: 4, - legacyTotal: 10, - }}, - id: charm.MustParseReference("trusty/wordpress-0"), - expectThisRevision: charmstore.AggregatedCounts{ - LastDay: 1, - LastWeek: 3, - LastMonth: 6, - Total: 20, - }, - expectAllRevisions: charmstore.AggregatedCounts{ - LastDay: 1, - LastWeek: 3, - LastMonth: 6, - Total: 20, - }, -}, { - about: "promulgated revision with changed owner", - charms: []testStatsEntity{{ - id: charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-0"), - lastDay: 1, - lastWeek: 10, - lastMonth: 100, - total: 1000, - legacyTotal: 0, - }, { - id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-1"), - lastDay: 2, - lastWeek: 20, - lastMonth: 200, - total: 2000, - legacyTotal: 0, - }, { - id: charmstore.MustParseResolvedURL("~wordpress-charmers/trusty/wordpress-0"), - lastDay: 3, - lastWeek: 30, - lastMonth: 300, - total: 3000, - legacyTotal: 0, - }, { - id: charmstore.MustParseResolvedURL("1 ~wordpress-charmers/trusty/wordpress-1"), - lastDay: 4, - lastWeek: 40, - lastMonth: 400, - total: 4000, - legacyTotal: 0, - }}, - id: charm.MustParseReference("trusty/wordpress-1"), - expectThisRevision: charmstore.AggregatedCounts{ - LastDay: 4, - LastWeek: 44, - LastMonth: 444, - Total: 4444, - }, - expectAllRevisions: charmstore.AggregatedCounts{ - LastDay: 5, - LastWeek: 55, - LastMonth: 555, - Total: 5555, - }, -}} - -func (s *StatsSuite) TestArchiveDownloadCounts(c *gc.C) { - s.PatchValue(&charmstore.LegacyDownloadCountsEnabled, true) - for i, test := range archiveDownloadCountsTests { - c.Logf("%d: %s", i, test.about) - // Clear everything - s.store.DB.Entities().RemoveAll(nil) - s.store.DB.StatCounters().RemoveAll(nil) - for _, charm := range test.charms { - ch := storetesting.Charms.CharmDir(charm.id.URL.Name) - err := s.store.AddCharmWithArchive(charm.id, ch) - c.Assert(err, gc.IsNil) - url := charm.id.URL - now := time.Now() - setDownloadCounts(c, s.store, &url, now, charm.lastDay) - setDownloadCounts(c, s.store, &url, now.Add(-2*24*time.Hour), charm.lastWeek) - setDownloadCounts(c, s.store, &url, now.Add(-10*24*time.Hour), charm.lastMonth) - setDownloadCounts(c, s.store, &url, now.Add(-100*24*time.Hour), charm.total) - if charm.id.PromulgatedRevision > -1 { - url.Revision = charm.id.PromulgatedRevision - url.User = "" - setDownloadCounts(c, s.store, &url, now, charm.lastDay) - setDownloadCounts(c, s.store, &url, now.Add(-2*24*time.Hour), charm.lastWeek) - setDownloadCounts(c, s.store, &url, now.Add(-10*24*time.Hour), charm.lastMonth) - setDownloadCounts(c, s.store, &url, now.Add(-100*24*time.Hour), charm.total) - } - extraInfo := map[string][]byte{ - params.LegacyDownloadStats: []byte(fmt.Sprintf("%d", charm.legacyTotal)), - } - err = s.store.UpdateEntity(charm.id, bson.D{{ - "$set", bson.D{{"extrainfo", extraInfo}}, - }}) - c.Assert(err, gc.IsNil) - } - thisRevision, allRevisions, err := s.store.ArchiveDownloadCounts(test.id) - c.Assert(err, gc.IsNil) - c.Assert(thisRevision, jc.DeepEquals, test.expectThisRevision) - c.Assert(allRevisions, jc.DeepEquals, test.expectAllRevisions) - } -} - -func setDownloadCounts(c *gc.C, s *charmstore.Store, id *charm.Reference, t time.Time, n int) { - key := charmstore.EntityStatsKey(id, params.StatsArchiveDownload) - for i := 0; i < n; i++ { - err := s.IncCounterAtTime(key, t) - c.Assert(err, gc.IsNil) - } -} - -func (s *StatsSuite) TestIncrementDownloadCounts(c *gc.C) { - ch := storetesting.Charms.CharmDir("wordpress") - id := charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-1") - err := s.store.AddCharmWithArchive(id, ch) - c.Assert(err, gc.IsNil) - err = s.store.IncrementDownloadCounts(id) - c.Assert(err, gc.IsNil) - expect := charmstore.AggregatedCounts{ - LastDay: 1, - LastWeek: 1, - LastMonth: 1, - Total: 1, - } - thisRevision, allRevisions, err := s.store.ArchiveDownloadCounts(charm.MustParseReference("~charmers/trusty/wordpress-1")) - c.Assert(err, gc.IsNil) - c.Assert(thisRevision, jc.DeepEquals, expect) - c.Assert(allRevisions, jc.DeepEquals, expect) - thisRevision, allRevisions, err = s.store.ArchiveDownloadCounts(charm.MustParseReference("trusty/wordpress-0")) - c.Assert(err, gc.IsNil) - c.Assert(thisRevision, jc.DeepEquals, expect) - c.Assert(allRevisions, jc.DeepEquals, expect) -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/store.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/store.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/store.go 1970-01-01 00:00:00 +0000 @@ -1,1129 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore - -import ( - "archive/zip" - "crypto/sha256" - "encoding/json" - "fmt" - "io" - "time" - - "github.com/juju/loggo" - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/bakery/mgostorage" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/blobstore" - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/params" -) - -var logger = loggo.GetLogger("charmstore.internal.charmstore") - -// Pool holds a connection to the underlying charm and blob -// data stores. Calling its Store method returns a new Store -// from the pool that can be used to process short-lived requests -// to access and modify the store. -type Pool struct { - db StoreDatabase - blobStore *blobstore.Store - es *SearchIndex - Bakery *bakery.Service - stats stats -} - -// NewPool returns a Pool that uses the given database -// and search index. If bakeryParams is not nil, -// the Bakery field in the resulting Store will be set -// to a new Service that stores macaroons in mongo. -func NewPool(db *mgo.Database, si *SearchIndex, bakeryParams *bakery.NewServiceParams) (*Pool, error) { - p := &Pool{ - db: StoreDatabase{db}, - blobStore: blobstore.New(db, "entitystore"), - es: si, - } - store := p.Store() - defer store.Close() - if err := store.ensureIndexes(); err != nil { - return nil, errgo.Notef(err, "cannot ensure indexes") - } - if err := store.ES.ensureIndexes(false); err != nil { - return nil, errgo.Notef(err, "cannot ensure elasticsearch indexes") - } - if bakeryParams != nil { - // NB we use the pool database here because its lifetime - // is indefinite. - macStore, err := mgostorage.New(p.db.Macaroons()) - if err != nil { - return nil, errgo.Notef(err, "cannot create macaroon store") - } - bp := *bakeryParams - bp.Store = macStore - bsvc, err := bakery.NewService(bp) - if err != nil { - return nil, errgo.Notef(err, "cannot make bakery service") - } - p.Bakery = bsvc - } - return p, nil -} - -// Store returns a Store that can be used to access the data base. -// -// It must be closed (with the Close method) after use. -func (p *Pool) Store() *Store { - s := &Store{ - DB: p.db.Copy(), - BlobStore: p.blobStore, - ES: p.es, - Bakery: p.Bakery, - stats: &p.stats, - pool: p, - } - logger.Tracef("pool %p -> copy %p", p.db.Session, s.DB.Session) - return s -} - -// Store holds a connection to the underlying charm and blob -// data stores that is appropriate for short term use. -type Store struct { - DB StoreDatabase - BlobStore *blobstore.Store - ES *SearchIndex - Bakery *bakery.Service - stats *stats - pool *Pool - closed bool -} - -// Copy returns a new store with a lifetime -// independent of s. Use this method if you -// need to use a store in an independent goroutine. -// -// It must be closed (with the Close method) after use. -func (s *Store) Copy() *Store { - s1 := *s - s1.DB = s.DB.Copy() - logger.Tracef("store %p -> copy %p", s.DB.Session, s1.DB.Session) - return &s1 -} - -// Close closes the store instance. -func (s *Store) Close() { - logger.Tracef("store %p closed", s.DB.Session) - if s.closed { - logger.Errorf("session closed twice") - return - } - s.DB.Close() -} - -// SetReconnectTimeout sets the length of time that -// mongo requests will block waiting to reconnect -// to a disconnected mongo server. If it is zero, -// requests may block forever. -func (s *Store) SetReconnectTimeout(d time.Duration) { - s.DB.Session.SetSyncTimeout(d) -} - -// Go runs the given function in a new goroutine, -// passing it a copy of s, which will be closed -// after the function returns. -func (s *Store) Go(f func(*Store)) { - s = s.Copy() - go func() { - defer s.Close() - f(s) - }() -} - -// Pool returns the pool that the store originally -// came from. -func (s *Store) Pool() *Pool { - return s.pool -} - -func (s *Store) ensureIndexes() error { - indexes := []struct { - c *mgo.Collection - i mgo.Index - }{{ - s.DB.StatCounters(), - mgo.Index{Key: []string{"k", "t"}, Unique: true}, - }, { - s.DB.StatTokens(), - mgo.Index{Key: []string{"t"}, Unique: true}, - }, { - s.DB.Entities(), - mgo.Index{Key: []string{"baseurl"}}, - }, { - s.DB.Entities(), - mgo.Index{Key: []string{"uploadtime"}}, - }, { - s.DB.Entities(), - mgo.Index{Key: []string{"promulgated-url"}, Unique: true, Sparse: true}, - }, { - s.DB.BaseEntities(), - mgo.Index{Key: []string{"public"}}, - }, { - s.DB.Logs(), - mgo.Index{Key: []string{"urls"}}, - }} - for _, idx := range indexes { - err := idx.c.EnsureIndex(idx.i) - if err != nil { - return errgo.Notef(err, "cannot ensure index with keys %v on collection %s", idx.i, idx.c.Name) - } - } - return nil -} - -func (s *Store) putArchive(archive blobstore.ReadSeekCloser) (blobName, blobHash, blobHash256 string, size int64, err error) { - hash := blobstore.NewHash() - hash256 := sha256.New() - size, err = io.Copy(io.MultiWriter(hash, hash256), archive) - if err != nil { - return "", "", "", 0, errgo.Notef(err, "cannot copy archive") - } - if _, err = archive.Seek(0, 0); err != nil { - return "", "", "", 0, errgo.Notef(err, "cannot seek in archive") - } - blobHash = fmt.Sprintf("%x", hash.Sum(nil)) - blobName = bson.NewObjectId().Hex() - if err = s.BlobStore.PutUnchallenged(archive, blobName, size, blobHash); err != nil { - return "", "", "", 0, errgo.Notef(err, "cannot put archive into blob store") - } - return blobName, blobHash, fmt.Sprintf("%x", hash256.Sum(nil)), size, nil -} - -// AddCharmWithArchive is like AddCharm but -// also adds the charm archive to the blob store. -// This method is provided principally so that -// tests can easily create content in the store. -// -// If purl is not nil then the charm will also be -// available at the promulgated url specified. -func (s *Store) AddCharmWithArchive(url *router.ResolvedURL, ch charm.Charm) error { - blobName, blobHash, blobHash256, blobSize, err := s.uploadCharmOrBundle(ch) - if err != nil { - return errgo.Notef(err, "cannot upload charm") - } - return s.AddCharm(ch, AddParams{ - URL: url, - BlobName: blobName, - BlobHash: blobHash, - BlobHash256: blobHash256, - BlobSize: blobSize, - }) -} - -// AddBundleWithArchive is like AddBundle but -// also adds the charm archive to the blob store. -// This method is provided principally so that -// tests can easily create content in the store. -// -// If purl is not nil then the bundle will also be -// available at the promulgated url specified. -// -// TODO This could take a *router.ResolvedURL as an argument -// instead of two *charm.References. -func (s *Store) AddBundleWithArchive(url *router.ResolvedURL, b charm.Bundle) error { - blobName, blobHash, blobHash256, size, err := s.uploadCharmOrBundle(b) - if err != nil { - return errgo.Notef(err, "cannot upload bundle") - } - return s.AddBundle(b, AddParams{ - URL: url, - BlobName: blobName, - BlobHash: blobHash, - BlobHash256: blobHash256, - BlobSize: size, - }) -} - -func (s *Store) uploadCharmOrBundle(c interface{}) (blobName, blobHash, blobHash256 string, size int64, err error) { - archive, err := getArchive(c) - if err != nil { - return "", "", "", 0, errgo.Notef(err, "cannot get archive") - } - defer archive.Close() - return s.putArchive(archive) -} - -// AddParams holds parameters held in common between the -// Store.AddCharm and Store.AddBundle methods. -type AddParams struct { - // URL holds the id to be associated with the stored entity. - // If URL.PromulgatedRevision is not -1, the entity will - // be promulgated. - URL *router.ResolvedURL - - // BlobName holds the name of the entity's archive blob. - BlobName string - - // BlobHash holds the hash of the entity's archive blob. - BlobHash string - - // BlobHash256 holds the sha256 hash of the entity's archive blob. - BlobHash256 string - - // BlobHash holds the size of the entity's archive blob. - BlobSize int64 - - // Contents holds references to files inside the - // entity's archive blob. - Contents map[mongodoc.FileId]mongodoc.ZipFile -} - -// AddCharm adds a charm entities collection with the given -// parameters. -func (s *Store) AddCharm(c charm.Charm, p AddParams) (err error) { - // Strictly speaking this test is redundant, because a ResolvedURL should - // always be canonical, but check just in case anyway, as this is - // final gateway before a potentially invalid url might be stored - // in the database. - if p.URL.URL.Series == "bundle" || p.URL.URL.User == "" || p.URL.URL.Revision == -1 || p.URL.URL.Series == "" { - return errgo.Newf("charm added with invalid id %v", &p.URL.URL) - } - logger.Infof("add charm url %s; prev %d", &p.URL.URL, p.URL.PromulgatedRevision) - entity := &mongodoc.Entity{ - URL: &p.URL.URL, - BaseURL: baseURL(&p.URL.URL), - User: p.URL.URL.User, - Name: p.URL.URL.Name, - Revision: p.URL.URL.Revision, - Series: p.URL.URL.Series, - BlobHash: p.BlobHash, - BlobHash256: p.BlobHash256, - BlobName: p.BlobName, - Size: p.BlobSize, - UploadTime: time.Now(), - CharmMeta: c.Meta(), - CharmConfig: c.Config(), - CharmActions: c.Actions(), - CharmProvidedInterfaces: interfacesForRelations(c.Meta().Provides), - CharmRequiredInterfaces: interfacesForRelations(c.Meta().Requires), - Contents: p.Contents, - PromulgatedURL: p.URL.PromulgatedURL(), - PromulgatedRevision: p.URL.PromulgatedRevision, - } - - // Check that we're not going to create a charm that duplicates - // the name of a bundle. This is racy, but it's the best we can do. - entities, err := s.FindEntities(baseURL(&p.URL.URL)) - if err != nil { - return errgo.Notef(err, "cannot check for existing entities") - } - for _, entity := range entities { - if entity.URL.Series == "bundle" { - return errgo.Newf("charm name duplicates bundle name %v", entity.URL) - } - } - if err := s.insertEntity(entity); err != nil { - return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload)) - } - return nil -} - -var everyonePerm = []string{params.Everyone} - -func (s *Store) insertEntity(entity *mongodoc.Entity) (err error) { - // Add the base entity to the database. - perms := []string{entity.User} - baseEntity := &mongodoc.BaseEntity{ - URL: entity.BaseURL, - User: entity.User, - Name: entity.Name, - Public: false, - ACLs: mongodoc.ACL{ - Read: perms, - Write: perms, - }, - Promulgated: entity.PromulgatedURL != nil, - } - err = s.DB.BaseEntities().Insert(baseEntity) - if err != nil && !mgo.IsDup(err) { - return errgo.Notef(err, "cannot insert base entity") - } - - // Add the entity to the database. - err = s.DB.Entities().Insert(entity) - if mgo.IsDup(err) { - return params.ErrDuplicateUpload - } - if err != nil { - return errgo.Notef(err, "cannot insert entity") - } - // Ensure that if anything fails after this, that we delete - // the entity, otherwise we will be left in an internally - // inconsistent state. - defer func() { - if err != nil { - if err := s.DB.Entities().RemoveId(entity.URL); err != nil { - logger.Errorf("cannot remove entity after elastic search failure: %v", err) - } - } - }() - // Add entity to ElasticSearch. - if err := s.UpdateSearch(EntityResolvedURL(entity)); err != nil { - return errgo.Notef(err, "cannot index %s to ElasticSearch", entity.URL) - } - return nil -} - -// FindEntity finds the entity in the store with the given URL, -// which must be fully qualified. If any fields are specified, -// only those fields will be populated in the returned entities. -// If the given URL has no user then it is assumed to be a -// promulgated entity. -func (s *Store) FindEntity(url *router.ResolvedURL, fields ...string) (*mongodoc.Entity, error) { - entities, err := s.FindEntities(&url.URL, fields...) - if err != nil { - return nil, errgo.Mask(err) - } - if len(entities) == 0 { - return nil, errgo.WithCausef(nil, params.ErrNotFound, "entity not found") - } - // The URL is guaranteed to be fully qualified so we'll always - // get exactly one result. - return entities[0], nil -} - -// FindEntities finds all entities in the store matching the given URL. -// If any fields are specified, only those fields will be -// populated in the returned entities. If the given URL has no user then -// only promulgated entities will be queried. -func (s *Store) FindEntities(url *charm.Reference, fields ...string) ([]*mongodoc.Entity, error) { - query := selectFields(s.EntitiesQuery(url), fields) - var docs []*mongodoc.Entity - err := query.All(&docs) - if err != nil { - return nil, errgo.Notef(err, "cannot find entities matching %s", url) - } - return docs, nil -} - -// FindBestEntity finds the entity that provides the preferred match to -// the given URL. If any fields are specified, only those fields will be -// populated in the returned entities. If the given URL has no user then -// only promulgated entities will be queried. -func (s *Store) FindBestEntity(url *charm.Reference, fields ...string) (*mongodoc.Entity, error) { - if len(fields) > 0 { - // Make sure we have all the fields we need to make a decision. - fields = append(fields, "_id", "promulgated-url", "promulgated-revision", "series", "revision") - } - entities, err := s.FindEntities(url, fields...) - if err != nil { - return nil, errgo.Mask(err) - } - if len(entities) == 0 { - return nil, errgo.WithCausef(nil, params.ErrNotFound, "entity not found") - } - best := entities[0] - for _, e := range entities { - if seriesScore[e.Series] > seriesScore[best.Series] { - best = e - continue - } - if seriesScore[e.Series] < seriesScore[best.Series] { - continue - } - if url.User == "" { - if e.PromulgatedRevision > best.PromulgatedRevision { - best = e - continue - } - } else { - if e.Revision > best.Revision { - best = e - continue - } - } - } - return best, nil -} - -var seriesScore = map[string]int{ - "bundle": -1, - "lucid": 1000, - "precise": 1001, - "trusty": 1002, - "quantal": 1, - "raring": 2, - "saucy": 3, - "utopic": 4, -} - -// EntitiesQuery creates a mgo.Query object that can be used to find -// entities matching the given URL. If the given URL has no user then -// the produced query will only match promulgated entities. -func (s *Store) EntitiesQuery(url *charm.Reference) *mgo.Query { - if url.User != "" && url.Series != "" && url.Revision != -1 { - // Find a specific owned entity, for instance ~who/utopic/django-42. - return s.DB.Entities().FindId(url) - } - if url.Series != "" && url.Revision != -1 { - // Find a specific promulgated entity, for instance utopic/django-42. - return s.DB.Entities().Find(bson.D{{"promulgated-url", url}}) - } - // Find all entities matching the URL. - q := make(bson.D, 0, 3) - q = append(q, bson.DocElem{"name", url.Name}) - if url.User != "" { - q = append(q, bson.DocElem{"user", url.User}) - } else { - // If the URL user is empty, only search the promulgated entities. - q = append(q, bson.DocElem{"promulgated-url", bson.D{{"$exists", true}}}) - } - if url.Series != "" { - q = append(q, bson.DocElem{"series", url.Series}) - } - if url.Revision != -1 { - if url.User != "" { - q = append(q, bson.DocElem{"revision", url.Revision}) - } else { - q = append(q, bson.DocElem{"promulgated-revision", url.Revision}) - } - } - return s.DB.Entities().Find(q) -} - -// FindBaseEntity finds the base entity in the store using the given URL, -// which can either represent a fully qualified entity or a base id. -// If any fields are specified, only those fields will be populated in the -// returned base entity. -func (s *Store) FindBaseEntity(url *charm.Reference, fields ...string) (*mongodoc.BaseEntity, error) { - var query *mgo.Query - if url.User == "" { - query = s.DB.BaseEntities().Find(bson.D{{"name", url.Name}, {"promulgated", 1}}) - } else { - query = s.DB.BaseEntities().FindId(baseURL(url)) - } - query = selectFields(query, fields) - var baseEntity mongodoc.BaseEntity - if err := query.One(&baseEntity); err != nil { - if err == mgo.ErrNotFound { - return nil, errgo.WithCausef(nil, params.ErrNotFound, "base entity not found") - } - return nil, errgo.Notef(err, "cannot find base entity %v", url) - } - return &baseEntity, nil -} - -func selectFields(query *mgo.Query, fields []string) *mgo.Query { - if len(fields) > 0 { - sel := make(bson.D, len(fields)) - for i, field := range fields { - sel[i] = bson.DocElem{field, 1} - } - query = query.Select(sel) - } - return query -} - -// UpdateEntity applies the provided update to the entity described by url. -func (s *Store) UpdateEntity(url *router.ResolvedURL, update interface{}) error { - if err := s.DB.Entities().Update(bson.D{{"_id", &url.URL}}, update); err != nil { - if err == mgo.ErrNotFound { - return errgo.WithCausef(err, params.ErrNotFound, "cannot update %q", url) - } - return errgo.Notef(err, "cannot update %q", url) - } - return nil -} - -// UpdateBaseEntity applies the provided update to the base entity of url. -func (s *Store) UpdateBaseEntity(url *router.ResolvedURL, update interface{}) error { - if err := s.DB.BaseEntities().Update(bson.D{{"_id", baseURL(&url.URL)}}, update); err != nil { - if err == mgo.ErrNotFound { - return errgo.WithCausef(err, params.ErrNotFound, "cannot update base entity for %q", url) - } - return errgo.Notef(err, "cannot update base entity for %q", url) - } - return nil -} - -// SetPromulgated sets whether the base entity of url is promulgated, If -// promulgated is true it also unsets promulgated on any other base -// entity for entities with the same name. It also calculates the next -// promulgated URL for the entities owned by the new owner and sets those -// entities appropriately. -// -// Note: This code is known to have some unfortunate (but not dangerous) -// race conditions. It is possible that if one or more promulgations -// happens concurrently for the same entity name then it could result in -// more than one base entity being promulgated. If this happens then -// uploads to either user will get promulgated names, these names will -// never clash. This situation is easily remedied by setting the -// promulgated user for this charm again, even to one of the ones that is -// already promulgated. It can also result in the latest promulgated -// revision of the charm not being one created by the promulgated user. -// This will be remedied when a new charm is uploaded by the promulgated -// user. As promulgation is a rare operation, it is considered that the -// chances this will happen are slim. -func (s *Store) SetPromulgated(url *router.ResolvedURL, promulgate bool) error { - baseEntities := s.DB.BaseEntities() - base := baseURL(&url.URL) - if !promulgate { - err := baseEntities.UpdateId( - base, - bson.D{{"$set", bson.D{{"promulgated", mongodoc.IntBool(false)}}}}, - ) - if err != nil { - if errgo.Cause(err) == mgo.ErrNotFound { - return errgo.WithCausef(nil, params.ErrNotFound, "base entity %q not found", base) - } - return errgo.Notef(err, "cannot unpromulgate base entity %q", base) - } - if err := s.UpdateSearchBaseURL(base); err != nil { - return errgo.Notef(err, "cannot update search entities for %q", base) - } - return nil - } - - // Find any currently promulgated base entities for this charm name. - // Under normal circumstances there should be a maximum of one of these, - // but we should attempt to recover if there is an error condition. - iter := baseEntities.Find( - bson.D{ - {"_id", bson.D{{"$ne", base}}}, - {"name", base.Name}, - {"promulgated", mongodoc.IntBool(true)}, - }, - ).Iter() - defer iter.Close() - var baseEntity mongodoc.BaseEntity - for iter.Next(&baseEntity) { - err := baseEntities.UpdateId( - baseEntity.URL, - bson.D{{"$set", bson.D{{"promulgated", mongodoc.IntBool(false)}}}}, - ) - if err != nil { - return errgo.Notef(err, "cannot unpromulgate base entity %q", baseEntity.URL) - } - if err := s.UpdateSearchBaseURL(baseEntity.URL); err != nil { - return errgo.Notef(err, "cannot update search entities for %q", baseEntity.URL) - } - } - if err := iter.Close(); err != nil { - return errgo.Notef(err, "cannot close mgo iterator") - } - - // Set the promulgated flag on the base entity. - err := s.DB.BaseEntities().UpdateId(base, bson.D{{"$set", bson.D{{"promulgated", mongodoc.IntBool(true)}}}}) - if err != nil { - if errgo.Cause(err) == mgo.ErrNotFound { - return errgo.WithCausef(nil, params.ErrNotFound, "base entity %q not found", base) - } - return errgo.Notef(err, "cannot promulgate base entity %q", base) - } - - type result struct { - Series string `bson:"_id"` - Revision int - } - - // Find the latest revision in each series of entities with the promulgated base URL. - var latestOwned []result - err = s.DB.Entities().Pipe([]bson.D{ - {{"$match", bson.D{{"baseurl", base}}}}, - {{"$group", bson.D{{"_id", "$series"}, {"revision", bson.D{{"$max", "$revision"}}}}}}, - }).All(&latestOwned) - if err != nil { - return errgo.Notef(err, "cannot find latest revision for promulgated URL") - } - - // Find the latest revision in each series of the promulgated entitities - // with the same name as the base entity. Note that this works because: - // 1) promulgated URLs always have the same charm name as their - // non-promulgated counterparts. - // 2) bundles cannot have names that overlap with charms. - // Because of 1), we are sure that selecting on the entity name will - // select all entities with a matching promulgated URL name. Because of - // 2) we are sure that we are only updating all charms or the single - // bundle entity. - latestPromulgated := make(map[string]int) - iter = s.DB.Entities().Pipe([]bson.D{ - {{"$match", bson.D{{"name", base.Name}}}}, - {{"$group", bson.D{{"_id", "$series"}, {"revision", bson.D{{"$max", "$promulgated-revision"}}}}}}, - }).Iter() - var res result - for iter.Next(&res) { - latestPromulgated[res.Series] = res.Revision - } - if err := iter.Close(); err != nil { - return errgo.Notef(err, "cannot close mgo iterator") - } - - // Update the newest entity in each series with a base URL that matches the newly promulgated - // base entity to have a promulgated URL, if it does not already have one. - for _, r := range latestOwned { - id := *base - id.Series = r.Series - id.Revision = r.Revision - pID := id - pID.User = "" - pID.Revision = latestPromulgated[r.Series] + 1 - err := s.DB.Entities().Update( - bson.D{ - {"_id", &id}, - {"promulgated-revision", -1}, - }, - bson.D{ - {"$set", bson.D{ - {"promulgated-url", &pID}, - {"promulgated-revision", pID.Revision}, - }}, - }, - ) - if err != nil && err != mgo.ErrNotFound { - // If we get NotFound it is most likely because the latest owned revision is - // already promulgated, so carry on. - return errgo.Notef(err, "cannot update promulgated URLs") - } - } - - // Update the search record for the newest entity. - if err := s.UpdateSearchBaseURL(base); err != nil { - return errgo.Notef(err, "cannot update search entities for %q", base) - } - return nil -} - -func interfacesForRelations(rels map[string]charm.Relation) []string { - // Eliminate duplicates by storing interface names into a map. - interfaces := make(map[string]bool) - for _, rel := range rels { - interfaces[rel.Interface] = true - } - result := make([]string, 0, len(interfaces)) - for iface := range interfaces { - result = append(result, iface) - } - return result -} - -func baseURL(url *charm.Reference) *charm.Reference { - newURL := *url - newURL.Revision = -1 - newURL.Series = "" - return &newURL -} - -var errNotImplemented = errgo.Newf("not implemented") - -// AddBundle adds a bundle to the entities collection with the given -// parameters. -func (s *Store) AddBundle(b charm.Bundle, p AddParams) error { - // Strictly speaking this test is redundant, because a ResolvedURL should - // always be canonical, but check just in case anyway, as this is - // final gateway before a potentially invalid url might be stored - // in the database. - if p.URL.URL.Series != "bundle" || p.URL.URL.User == "" || p.URL.URL.Revision == -1 || p.URL.URL.Series == "" { - return errgo.Newf("bundle added with invalid id %v", p.URL) - } - bundleData := b.Data() - urls, err := bundleCharms(bundleData) - if err != nil { - return errgo.Mask(err) - } - entity := &mongodoc.Entity{ - URL: &p.URL.URL, - BaseURL: baseURL(&p.URL.URL), - User: p.URL.URL.User, - Name: p.URL.URL.Name, - Revision: p.URL.URL.Revision, - Series: p.URL.URL.Series, - BlobHash: p.BlobHash, - BlobHash256: p.BlobHash256, - BlobName: p.BlobName, - Size: p.BlobSize, - UploadTime: time.Now(), - BundleData: bundleData, - BundleUnitCount: newInt(bundleUnitCount(bundleData)), - BundleMachineCount: newInt(bundleMachineCount(bundleData)), - BundleReadMe: b.ReadMe(), - BundleCharms: urls, - Contents: p.Contents, - PromulgatedURL: p.URL.PromulgatedURL(), - PromulgatedRevision: p.URL.PromulgatedRevision, - } - - // Check that we're not going to create a bundle that duplicates - // the name of a charm. This is racy, but it's the best we can do. - entities, err := s.FindEntities(baseURL(&p.URL.URL)) - if err != nil { - return errgo.Notef(err, "cannot check for existing entities") - } - for _, entity := range entities { - if entity.URL.Series != "bundle" { - return errgo.Newf("bundle name duplicates charm name %s", entity.URL) - } - } - if err := s.insertEntity(entity); err != nil { - return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload)) - } - return nil -} - -// OpenBlob opens a blob given its entity id; it returns the blob's -// data source, its size and its hash. It returns a params.ErrNotFound -// error if the entity does not exist. -func (s *Store) OpenBlob(id *router.ResolvedURL) (r blobstore.ReadSeekCloser, size int64, hash string, err error) { - blobName, hash, err := s.BlobNameAndHash(id) - if err != nil { - return nil, 0, "", errgo.Mask(err, errgo.Is(params.ErrNotFound)) - } - r, size, err = s.BlobStore.Open(blobName) - if err != nil { - return nil, 0, "", errgo.Notef(err, "cannot open archive data for %s", id) - } - return r, size, hash, nil -} - -// BlobNameAndHash returns the name that is used to store the blob -// for the entity with the given id and its hash. It returns a params.ErrNotFound -// error if the entity does not exist. -func (s *Store) BlobNameAndHash(id *router.ResolvedURL) (name, hash string, err error) { - entity, err := s.FindEntity(id, "blobname", "blobhash") - if err != nil { - if errgo.Cause(err) == params.ErrNotFound { - return "", "", errgo.WithCausef(nil, params.ErrNotFound, "entity not found") - } - return "", "", errgo.Notef(err, "cannot get %s", id) - } - return entity.BlobName, entity.BlobHash, nil -} - -// OpenCachedBlobFile opens a file from the given entity's archive blob. -// The file is identified by the provided fileId. If the file has not -// previously been opened on this entity, the isFile function will be -// used to determine which file in the zip file to use. The result will -// be cached for the next time. -// -// When retrieving the entity, at least the BlobName and -// Contents fields must be populated. -func (s *Store) OpenCachedBlobFile( - entity *mongodoc.Entity, - fileId mongodoc.FileId, - isFile func(f *zip.File) bool, -) (_ io.ReadCloser, err error) { - if entity.BlobName == "" { - // We'd like to check that the Contents field was populated - // here but we can't because it doesn't necessarily - // exist in the entity. - return nil, errgo.New("provided entity does not have required fields") - } - zipf, ok := entity.Contents[fileId] - if ok && !zipf.IsValid() { - return nil, errgo.WithCausef(nil, params.ErrNotFound, "") - } - blob, size, err := s.BlobStore.Open(entity.BlobName) - if err != nil { - return nil, errgo.Notef(err, "cannot open archive blob") - } - defer func() { - // When there's an error, we want to close - // the blob, otherwise we need to keep the blob - // open because it's used by the returned Reader. - if err != nil { - blob.Close() - } - }() - if !ok { - // We haven't already searched the archive for the icon, - // so find its archive now. - zipf, err = s.findZipFile(blob, size, isFile) - if err != nil && errgo.Cause(err) != params.ErrNotFound { - return nil, errgo.Mask(err) - } - } - // We update the content entry regardless of whether we've - // found a file, so that the next time that serveIcon is called - // it can know that we've already looked. - err = s.DB.Entities().UpdateId( - entity.URL, - bson.D{{"$set", - bson.D{{"contents." + string(fileId), zipf}}, - }}, - ) - if err != nil { - return nil, errgo.Notef(err, "cannot update %q", entity.URL) - } - if !zipf.IsValid() { - // We searched for the file and didn't find it. - return nil, errgo.WithCausef(nil, params.ErrNotFound, "") - } - - // We know where the icon is stored. Now serve it up. - r, err := ZipFileReader(blob, zipf) - if err != nil { - return nil, errgo.Notef(err, "cannot make zip file reader") - } - // We return a ReadCloser that reads from the newly created - // zip file reader, but when closed, will close the originally - // opened blob. - return struct { - io.Reader - io.Closer - }{r, blob}, nil -} - -func (s *Store) findZipFile(blob io.ReadSeeker, size int64, isFile func(f *zip.File) bool) (mongodoc.ZipFile, error) { - zipReader, err := zip.NewReader(&readerAtSeeker{blob}, size) - if err != nil { - return mongodoc.ZipFile{}, errgo.Notef(err, "cannot read archive data") - } - for _, f := range zipReader.File { - if isFile(f) { - return NewZipFile(f) - } - } - return mongodoc.ZipFile{}, params.ErrNotFound -} - -// SetPerms sets the permissions for the base entity with -// the given id for "which" operations ("read" or "write") -// to the given ACL. This is mostly provided for testing. -func (s *Store) SetPerms(id *charm.Reference, which string, acl ...string) error { - return s.DB.BaseEntities().UpdateId(baseURL(id), bson.D{{"$set", - bson.D{{"acls." + which, acl}}, - }}) -} - -func newInt(x int) *int { - return &x -} - -// bundleUnitCount returns the number of units created by the bundle. -func bundleUnitCount(b *charm.BundleData) int { - count := 0 - for _, service := range b.Services { - count += service.NumUnits - } - return count -} - -// bundleMachineCount returns the number of machines -// that will be created or used by the bundle. -func bundleMachineCount(b *charm.BundleData) int { - count := len(b.Machines) - for _, service := range b.Services { - // The default placement is "new". - placement := &charm.UnitPlacement{ - Machine: "new", - } - // Check for "new" placements, which means a new machine - // must be added. - for _, location := range service.To { - var err error - placement, err = charm.ParsePlacement(location) - if err != nil { - // Ignore invalid placements - a bundle should always - // be verified before adding to the charm store so this - // should never happen in practice. - continue - } - if placement.Machine == "new" { - count++ - } - } - // If there are less elements in To than NumUnits, the last placement - // element is replicated. For this reason, if the last element is - // "new", we need to add more machines. - if placement != nil && placement.Machine == "new" { - count += service.NumUnits - len(service.To) - } - } - return count -} - -// bundleCharms returns all the charm URLs used by a bundle, -// without duplicates. -func bundleCharms(data *charm.BundleData) ([]*charm.Reference, error) { - // Use a map to de-duplicate the URL list: a bundle can include services - // deployed by the same charm. - urlMap := make(map[string]*charm.Reference) - for _, service := range data.Services { - url, err := charm.ParseReference(service.Charm) - if err != nil { - return nil, errgo.Mask(err) - } - urlMap[url.String()] = url - // Also add the corresponding base URL. - base := baseURL(url) - urlMap[base.String()] = base - } - urls := make([]*charm.Reference, 0, len(urlMap)) - for _, url := range urlMap { - urls = append(urls, url) - } - return urls, nil -} - -// AddLog adds a log message to the database. -func (s *Store) AddLog(data *json.RawMessage, logLevel mongodoc.LogLevel, logType mongodoc.LogType, urls []*charm.Reference) error { - // Encode the JSON data. - b, err := json.Marshal(data) - if err != nil { - return errgo.Notef(err, "cannot marshal log data") - } - - // Add the base URLs to the list of references associated with the log. - // Also remove duplicate URLs while maintaining the references' order. - var allUrls []*charm.Reference - urlMap := make(map[string]bool) - for _, url := range urls { - urlStr := url.String() - if ok, _ := urlMap[urlStr]; !ok { - urlMap[urlStr] = true - allUrls = append(allUrls, url) - } - base := baseURL(url) - urlStr = base.String() - if ok, _ := urlMap[urlStr]; !ok { - urlMap[urlStr] = true - allUrls = append(allUrls, base) - } - } - - // Add the log to the database. - log := &mongodoc.Log{ - Data: b, - Level: logLevel, - Type: logType, - URLs: allUrls, - Time: time.Now(), - } - if err := s.DB.Logs().Insert(log); err != nil { - return errgo.Mask(err) - } - return nil -} - -// StoreDatabase wraps an mgo.DB ands adds a few convenience methods. -type StoreDatabase struct { - *mgo.Database -} - -// Copy copies the StoreDatabase and its underlying mgo session. -func (s StoreDatabase) Copy() StoreDatabase { - return StoreDatabase{ - &mgo.Database{ - Name: s.Name, - Session: s.Session.Copy(), - }, - } -} - -// Close closes the store database's underlying session. -func (s StoreDatabase) Close() { - s.Session.Close() -} - -// Entities returns the mongo collection where entities are stored. -func (s StoreDatabase) Entities() *mgo.Collection { - return s.C("entities") -} - -// BaseEntities returns the mongo collection where base entities are stored. -func (s StoreDatabase) BaseEntities() *mgo.Collection { - return s.C("base_entities") -} - -// Logs returns the Mongo collection where charm store logs are stored. -func (s StoreDatabase) Logs() *mgo.Collection { - return s.C("logs") -} - -// Migrations returns the Mongo collection where the migration info is stored. -func (s StoreDatabase) Migrations() *mgo.Collection { - return s.C("migrations") -} - -func (s StoreDatabase) Macaroons() *mgo.Collection { - return s.C("macaroons") -} - -// allCollections holds for each collection used by the charm store a -// function returns that collection. -var allCollections = []func(StoreDatabase) *mgo.Collection{ - StoreDatabase.StatCounters, - StoreDatabase.StatTokens, - StoreDatabase.Entities, - StoreDatabase.BaseEntities, - StoreDatabase.Logs, - StoreDatabase.Migrations, - StoreDatabase.Macaroons, -} - -// Collections returns a slice of all the collections used -// by the charm store. -func (s StoreDatabase) Collections() []*mgo.Collection { - cs := make([]*mgo.Collection, len(allCollections)) - for i, f := range allCollections { - cs[i] = f(s) - } - return cs -} - -type readerAtSeeker struct { - r io.ReadSeeker -} - -func (r *readerAtSeeker) ReadAt(buf []byte, p int64) (int, error) { - if _, err := r.r.Seek(p, 0); err != nil { - return 0, errgo.Notef(err, "cannot seek") - } - return r.r.Read(buf) -} - -// ReaderAtSeeker adapts r so that it can be used as -// a ReaderAt. Note that, unlike some implementations -// of ReaderAt, it is not OK to use concurrently. -func ReaderAtSeeker(r io.ReadSeeker) io.ReaderAt { - return &readerAtSeeker{r} -} - -// Search searches the store for the given SearchParams. -// It returns a SearchResult containing the results of the search. -func (store *Store) Search(sp SearchParams) (SearchResult, error) { - result, err := store.ES.search(sp) - if err != nil { - return SearchResult{}, errgo.Mask(err) - } - return result, nil -} - -// SynchroniseElasticsearch creates new indexes in elasticsearch -// and populates them with the current data from the mongodb database. -func (s *Store) SynchroniseElasticsearch() error { - if err := s.ES.ensureIndexes(true); err != nil { - return errgo.Notef(err, "cannot create indexes") - } - if err := s.syncSearch(); err != nil { - return errgo.Notef(err, "cannot synchronise indexes") - } - return nil -} - -// EntityResolvedURL returns the ResolvedURL for the entity. -// It requires the PromulgatedURL field to have been -// filled out in the entity. -func EntityResolvedURL(e *mongodoc.Entity) *router.ResolvedURL { - promulgatedRev := -1 - if e.PromulgatedURL != nil { - promulgatedRev = e.PromulgatedURL.Revision - } - return &router.ResolvedURL{ - URL: *e.URL, - PromulgatedRevision: promulgatedRev, - } -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/store_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/store_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/store_test.go 1970-01-01 00:00:00 +0000 @@ -1,2165 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore - -import ( - "archive/zip" - "bytes" - "crypto/sha256" - "crypto/sha512" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "sort" - "strconv" - "strings" - "time" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/blobstore" - "gopkg.in/juju/charmstore.v4/internal/elasticsearch" - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/internal/storetesting" - "gopkg.in/juju/charmstore.v4/params" -) - -type StoreSuite struct { - storetesting.IsolatedMgoESSuite - index string -} - -var _ = gc.Suite(&StoreSuite{}) - -func (s *StoreSuite) checkAddCharm(c *gc.C, ch charm.Charm, addToES bool, url *router.ResolvedURL) { - var es *elasticsearch.Database - if addToES { - es = s.ES - } - store := s.newStore(c, true) - defer store.Close() - - // Add the charm to the store. - beforeAdding := time.Now() - err := store.AddCharmWithArchive(url, ch) - c.Assert(err, gc.IsNil) - afterAdding := time.Now() - - var doc mongodoc.Entity - err = store.DB.Entities().FindId(&url.URL).One(&doc) - c.Assert(err, gc.IsNil) - - // Ensure the document was indexed in ElasticSearch, if an ES database was provided. - if es != nil { - var result SearchDoc - id := store.ES.getID(doc.URL) - err = store.ES.GetDocument(s.TestIndex, typeName, id, &result) - c.Assert(err, gc.IsNil) - exists, err := store.ES.HasDocument(s.TestIndex, typeName, id) - c.Assert(err, gc.IsNil) - c.Assert(exists, gc.Equals, true) - if purl := url.PromulgatedURL(); purl != nil { - c.Assert(result.PromulgatedURL, jc.DeepEquals, purl) - } - } - // The entity doc has been correctly added to the mongo collection. - size, hash, hash256 := getSizeAndHashes(ch) - sort.Strings(doc.CharmProvidedInterfaces) - sort.Strings(doc.CharmRequiredInterfaces) - - // Check the upload time and then reset it to its zero value - // so that we can test the deterministic parts later. - c.Assert(doc.UploadTime, jc.TimeBetween(beforeAdding, afterAdding)) - - doc.UploadTime = time.Time{} - - blobName := doc.BlobName - c.Assert(blobName, gc.Matches, "[0-9a-z]+") - doc.BlobName = "" - - c.Assert(doc, jc.DeepEquals, mongodoc.Entity{ - URL: &url.URL, - BaseURL: baseURL(&url.URL), - User: url.URL.User, - Name: url.URL.Name, - Revision: url.URL.Revision, - Series: url.URL.Series, - BlobHash: hash, - BlobHash256: hash256, - Size: size, - CharmMeta: ch.Meta(), - CharmActions: ch.Actions(), - CharmConfig: ch.Config(), - CharmProvidedInterfaces: []string{"http", "logging", "monitoring"}, - CharmRequiredInterfaces: []string{"mysql", "varnish"}, - PromulgatedURL: url.PromulgatedURL(), - PromulgatedRevision: url.PromulgatedRevision, - }) - - // The charm archive has been properly added to the blob store. - r, obtainedSize, err := store.BlobStore.Open(blobName) - c.Assert(err, gc.IsNil) - defer r.Close() - c.Assert(obtainedSize, gc.Equals, size) - data, err := ioutil.ReadAll(r) - c.Assert(err, gc.IsNil) - charmArchive, err := charm.ReadCharmArchiveBytes(data) - c.Assert(err, gc.IsNil) - c.Assert(charmArchive.Meta(), jc.DeepEquals, ch.Meta()) - c.Assert(charmArchive.Config(), jc.DeepEquals, ch.Config()) - c.Assert(charmArchive.Actions(), jc.DeepEquals, ch.Actions()) - c.Assert(charmArchive.Revision(), jc.DeepEquals, ch.Revision()) - - // Check that the base entity has been properly created. - assertBaseEntity(c, store, baseURL(&url.URL), url.PromulgatedRevision != -1) - - // Try inserting the charm again - it should fail because the charm is - // already there. - err = store.AddCharmWithArchive(url, ch) - c.Assert(errgo.Cause(err), gc.Equals, params.ErrDuplicateUpload) -} - -func (s *StoreSuite) checkAddBundle(c *gc.C, bundle charm.Bundle, addToES bool, url *router.ResolvedURL) { - var es *elasticsearch.Database - - if addToES { - es = s.ES - } - store := s.newStore(c, true) - defer store.Close() - - // Add the bundle to the store. - beforeAdding := time.Now() - err := store.AddBundleWithArchive(url, bundle) - c.Assert(err, gc.IsNil) - afterAdding := time.Now() - - var doc mongodoc.Entity - err = store.DB.Entities().FindId(&url.URL).One(&doc) - c.Assert(err, gc.IsNil) - sort.Sort(orderedURLs(doc.BundleCharms)) - - // Ensure the document was indexed in ElasticSearch, if an ES database was provided. - if es != nil { - var result SearchDoc - id := store.ES.getID(doc.URL) - err = store.ES.GetDocument(s.TestIndex, typeName, id, &result) - c.Assert(err, gc.IsNil) - exists, err := store.ES.HasDocument(s.TestIndex, typeName, id) - c.Assert(err, gc.IsNil) - c.Assert(exists, gc.Equals, true) - if purl := url.PromulgatedURL(); purl != nil { - c.Assert(result.PromulgatedURL, jc.DeepEquals, purl) - } - } - - // Check the upload time and then reset it to its zero value - // so that we can test the deterministic parts later. - c.Assert(doc.UploadTime, jc.TimeBetween(beforeAdding, afterAdding)) - doc.UploadTime = time.Time{} - - // The blob name is random, but we check that it's - // in the correct format, and non-empty. - blobName := doc.BlobName - c.Assert(blobName, gc.Matches, "[0-9a-z]+") - doc.BlobName = "" - - // The entity doc has been correctly added to the mongo collection. - size, hash, hash256 := getSizeAndHashes(bundle) - c.Assert(doc, jc.DeepEquals, mongodoc.Entity{ - URL: &url.URL, - BaseURL: baseURL(&url.URL), - User: url.URL.User, - Name: url.URL.Name, - Revision: url.URL.Revision, - Series: url.URL.Series, - BlobHash: hash, - BlobHash256: hash256, - Size: size, - BundleData: bundle.Data(), - BundleReadMe: bundle.ReadMe(), - BundleCharms: []*charm.Reference{ - charm.MustParseReference("mysql"), - charm.MustParseReference("wordpress"), - }, - BundleMachineCount: newInt(2), - BundleUnitCount: newInt(2), - PromulgatedURL: url.PromulgatedURL(), - PromulgatedRevision: url.PromulgatedRevision, - }) - - // The bundle archive has been properly added to the blob store. - r, obtainedSize, err := store.BlobStore.Open(blobName) - c.Assert(err, gc.IsNil) - defer r.Close() - c.Assert(obtainedSize, gc.Equals, size) - data, err := ioutil.ReadAll(r) - c.Assert(err, gc.IsNil) - bundleArchive, err := charm.ReadBundleArchiveBytes(data) - c.Assert(err, gc.IsNil) - c.Assert(bundleArchive.Data(), jc.DeepEquals, bundle.Data()) - c.Assert(bundleArchive.ReadMe(), jc.DeepEquals, bundle.ReadMe()) - - // Check that the base entity has been properly created. - assertBaseEntity(c, store, baseURL(&url.URL), url.PromulgatedRevision != -1) - - // Try inserting the bundle again - it should fail because the bundle is - // already there. - err = store.AddBundleWithArchive(url, bundle) - c.Assert(errgo.Cause(err), gc.Equals, params.ErrDuplicateUpload) -} - -func assertBaseEntity(c *gc.C, store *Store, url *charm.Reference, promulgated bool) { - baseEntity, err := store.FindBaseEntity(url) - c.Assert(err, gc.IsNil) - expectACLs := mongodoc.ACL{ - Read: []string{url.User}, - Write: []string{url.User}, - } - c.Assert(baseEntity, jc.DeepEquals, &mongodoc.BaseEntity{ - URL: url, - User: url.User, - Name: url.Name, - Public: false, - ACLs: expectACLs, - Promulgated: mongodoc.IntBool(promulgated), - }) -} - -type orderedURLs []*charm.Reference - -func (o orderedURLs) Less(i, j int) bool { - return o[i].String() < o[j].String() -} - -func (o orderedURLs) Swap(i, j int) { - o[i], o[j] = o[j], o[i] -} - -func (o orderedURLs) Len() int { - return len(o) -} - -var urlFindingTests = []struct { - inStore []string - expand string - expect []string -}{{ - inStore: []string{"23 cs:~charmers/precise/wordpress-23"}, - expand: "wordpress", - expect: []string{"23 cs:~charmers/precise/wordpress-23"}, -}, { - inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/precise/wordpress-24"}, - expand: "wordpress", - expect: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/precise/wordpress-24"}, -}, { - inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/trusty/wordpress-24"}, - expand: "precise/wordpress", - expect: []string{"23 cs:~charmers/precise/wordpress-23"}, -}, { - inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/trusty/wordpress-24", "434 cs:~charmers/foo/bar-434"}, - expand: "wordpress", - expect: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/trusty/wordpress-24"}, -}, { - inStore: []string{"23 cs:~charmers/precise/wordpress-23", "23 cs:~charmers/trusty/wordpress-23", "24 cs:~charmers/trusty/wordpress-24"}, - expand: "wordpress-23", - expect: []string{"23 cs:~charmers/precise/wordpress-23", "23 cs:~charmers/trusty/wordpress-23"}, -}, { - inStore: []string{"cs:~user/precise/wordpress-23", "cs:~user/trusty/wordpress-23"}, - expand: "~user/precise/wordpress", - expect: []string{"cs:~user/precise/wordpress-23"}, -}, { - inStore: []string{"cs:~user/precise/wordpress-23", "cs:~user/trusty/wordpress-23"}, - expand: "~user/wordpress", - expect: []string{"cs:~user/precise/wordpress-23", "cs:~user/trusty/wordpress-23"}, -}, { - inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/trusty/wordpress-24", "434 cs:~charmers/foo/bar-434"}, - expand: "precise/wordpress-23", - expect: []string{"23 cs:~charmers/precise/wordpress-23"}, -}, { - inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/trusty/wordpress-24", "434 cs:~charmers/foo/bar-434"}, - expand: "arble", - expect: []string{}, -}, { - inStore: []string{}, - expand: "precise/wordpress-23", - expect: []string{}, -}} - -func (s *StoreSuite) testURLFinding(c *gc.C, check func(store *Store, expand *charm.Reference, expect []*router.ResolvedURL)) { - wordpress := storetesting.Charms.CharmDir("wordpress") - store := s.newStore(c, false) - defer store.Close() - for i, test := range urlFindingTests { - c.Logf("test %d: %q from %q", i, test.expand, test.inStore) - _, err := store.DB.Entities().RemoveAll(nil) - c.Assert(err, gc.IsNil) - urls := MustParseResolvedURLs(test.inStore) - for _, url := range urls { - err := store.AddCharmWithArchive(url, wordpress) - c.Assert(err, gc.IsNil) - } - check(store, charm.MustParseReference(test.expand), MustParseResolvedURLs(test.expect)) - } -} - -func (s *StoreSuite) TestFindEntities(c *gc.C) { - s.testURLFinding(c, func(store *Store, expand *charm.Reference, expect []*router.ResolvedURL) { - // Check FindEntities works when just retrieving the id and promulgated id. - gotEntities, err := store.FindEntities(expand, "_id", "promulgated-url") - c.Assert(err, gc.IsNil) - if expand.User == "" { - sort.Sort(entitiesByPromulgatedURL(gotEntities)) - } else { - sort.Sort(entitiesByURL(gotEntities)) - } - c.Assert(gotEntities, gc.HasLen, len(expect)) - for i, url := range expect { - c.Assert(gotEntities[i], jc.DeepEquals, &mongodoc.Entity{ - URL: &url.URL, - PromulgatedURL: url.PromulgatedURL(), - }) - } - - // check FindEntities works when retrieving all fields. - gotEntities, err = store.FindEntities(expand) - c.Assert(err, gc.IsNil) - if expand.User == "" { - sort.Sort(entitiesByPromulgatedURL(gotEntities)) - } else { - sort.Sort(entitiesByURL(gotEntities)) - } - c.Assert(gotEntities, gc.HasLen, len(expect)) - for i, url := range expect { - var entity mongodoc.Entity - err := store.DB.Entities().FindId(&url.URL).One(&entity) - c.Assert(err, gc.IsNil) - c.Assert(gotEntities[i], jc.DeepEquals, &entity) - } - }) -} - -func (s *StoreSuite) TestFindEntity(c *gc.C) { - s.testURLFinding(c, func(store *Store, expand *charm.Reference, expect []*router.ResolvedURL) { - if expand.Series == "" || expand.Revision == -1 || expand.User == "" { - return - } - rurl := &router.ResolvedURL{ - URL: *expand, - PromulgatedRevision: -1, - } - entity, err := store.FindEntity(rurl, "_id", "promulgated-url") - if len(expect) == 0 { - c.Assert(err, gc.ErrorMatches, "entity not found") - c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) - return - } - c.Assert(err, gc.IsNil) - c.Assert(len(expect), gc.Equals, 1) - c.Assert(entity.BlobName, gc.Equals, "") - c.Assert(entity.URL, jc.DeepEquals, expect[0]) - - // Check that it works when returning other fields too. - entity, err = store.FindEntity(rurl, "blobname") - c.Assert(err, gc.IsNil) - c.Assert(entity.BlobName, gc.Not(gc.Equals), "") - }) -} - -var findBaseEntityTests = []struct { - about string - stored []string - url string - fields []string - expect *mongodoc.BaseEntity -}{{ - about: "entity found, base url, all fields", - stored: []string{"42 cs:~charmers/utopic/django-42"}, - url: "django", - expect: &mongodoc.BaseEntity{ - URL: charm.MustParseReference("~charmers/django"), - User: "charmers", - Name: "django", - Public: false, - Promulgated: true, - ACLs: mongodoc.ACL{ - Read: []string{"charmers"}, - Write: []string{"charmers"}, - }, - }, -}, { - about: "entity found, fully qualified url, few fields", - stored: []string{"42 cs:~charmers/utopic/django-42", "~who/precise/django-47"}, - url: "~who/precise/django-0", - fields: []string{"public", "user"}, - expect: &mongodoc.BaseEntity{ - URL: charm.MustParseReference("~who/django"), - User: "who", - Public: false, - }, -}, { - about: "entity found, partial url, only the ACLs", - stored: []string{"42 cs:~charmers/utopic/django-42", "~who/trusty/django-47"}, - url: "~who/django-42", - fields: []string{"acls"}, - expect: &mongodoc.BaseEntity{ - URL: charm.MustParseReference("~who/django"), - ACLs: mongodoc.ACL{ - Read: []string{"who"}, - Write: []string{"who"}, - }, - }, -}, { - about: "entity not found, charm name", - stored: []string{"42 cs:~charmers/utopic/django-42", "~who/trusty/django-47"}, - url: "rails", -}, { - about: "entity not found, user", - stored: []string{"42 cs:~charmers/utopic/django-42", "~who/trusty/django-47"}, - url: "~dalek/django", - fields: []string{"acls"}, -}} - -func (s *StoreSuite) TestFindBaseEntity(c *gc.C) { - ch := storetesting.Charms.CharmDir("wordpress") - store := s.newStore(c, false) - defer store.Close() - for i, test := range findBaseEntityTests { - c.Logf("test %d: %s", i, test.about) - - // Add initial charms to the store. - for _, url := range MustParseResolvedURLs(test.stored) { - err := store.AddCharmWithArchive(url, ch) - c.Assert(err, gc.IsNil) - } - - // Find the entity. - id := charm.MustParseReference(test.url) - baseEntity, err := store.FindBaseEntity(id, test.fields...) - if test.expect == nil { - // We don't expect the entity to be found. - c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) - c.Assert(baseEntity, gc.IsNil) - } else { - c.Assert(err, gc.IsNil) - c.Assert(baseEntity, jc.DeepEquals, test.expect) - } - - // Remove all the entities from the store. - _, err = store.DB.Entities().RemoveAll(nil) - c.Assert(err, gc.IsNil) - _, err = store.DB.BaseEntities().RemoveAll(nil) - c.Assert(err, gc.IsNil) - } -} - -func (s *StoreSuite) TestAddCharmWithFailedESInsert(c *gc.C) { - // Make an elastic search with a non-existent address, - // so that will try to add the charm there, but fail. - esdb := &elasticsearch.Database{ - Addr: "0.1.2.3:0123", - } - - store := s.newStore(c, false) - defer store.Close() - store.ES = &SearchIndex{esdb, "no-index"} - - url := newResolvedURL("~charmers/precise/wordpress-12", -1) - err := store.AddCharmWithArchive(url, storetesting.Charms.CharmDir("wordpress")) - c.Assert(err, gc.ErrorMatches, "cannot index cs:~charmers/precise/wordpress-12 to ElasticSearch: .*") - - // Check that the entity has been correctly removed. - _, err = store.FindEntity(url) - c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) -} - -func (s *StoreSuite) TestAddCharmsWithTheSameBaseEntity(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - - // Add a charm to the database. - ch := storetesting.Charms.CharmDir("wordpress") - url := newResolvedURL("~charmers/trusty/wordpress-12", 12) - err := store.AddCharmWithArchive(url, ch) - c.Assert(err, gc.IsNil) - - // Add a second charm to the database, sharing the same base URL. - err = store.AddCharmWithArchive(newResolvedURL("~charmers/utopic/wordpress-13", -1), ch) - c.Assert(err, gc.IsNil) - - // Ensure a single base entity has been created. - num, err := store.DB.BaseEntities().Count() - c.Assert(err, gc.IsNil) - c.Assert(num, gc.Equals, 1) -} - -type entitiesByURL []*mongodoc.Entity - -func (s entitiesByURL) Len() int { return len(s) } -func (s entitiesByURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s entitiesByURL) Less(i, j int) bool { - return s[i].URL.String() < s[j].URL.String() -} - -type entitiesByPromulgatedURL []*mongodoc.Entity - -func (s entitiesByPromulgatedURL) Len() int { return len(s) } -func (s entitiesByPromulgatedURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s entitiesByPromulgatedURL) Less(i, j int) bool { - return s[i].PromulgatedURL.String() < s[j].PromulgatedURL.String() -} - -var bundleUnitCountTests = []struct { - about string - data *charm.BundleData - expectUnits int -}{{ - about: "empty bundle", - data: &charm.BundleData{}, -}, { - about: "no units", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:utopic/django-0", - NumUnits: 0, - }, - "haproxy": { - Charm: "cs:trusty/haproxy-0", - NumUnits: 0, - }, - }, - }, -}, { - about: "a single unit", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:trusty/django-42", - NumUnits: 1, - }, - "haproxy": { - Charm: "cs:trusty/haproxy-47", - NumUnits: 0, - }, - }, - }, - expectUnits: 1, -}, { - about: "multiple units", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:utopic/django-1", - NumUnits: 1, - }, - "haproxy": { - Charm: "cs:utopic/haproxy-2", - NumUnits: 2, - }, - "postgres": { - Charm: "cs:utopic/postgres-3", - NumUnits: 5, - }, - }, - }, - expectUnits: 8, -}} - -func (s *StoreSuite) TestBundleUnitCount(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - entities := store.DB.Entities() - for i, test := range bundleUnitCountTests { - c.Logf("test %d: %s", i, test.about) - url := newResolvedURL("cs:~charmers/bundle/django-0", -1) - url.URL.Revision = i - url.PromulgatedRevision = i - - // Add the bundle used for this test. - err := store.AddBundle(&testingBundle{ - data: test.data, - }, AddParams{ - URL: url, - BlobName: "blobName", - BlobHash: fakeBlobHash, - BlobSize: fakeBlobSize, - }) - c.Assert(err, gc.IsNil) - - // Retrieve the bundle from the database. - var doc mongodoc.Entity - err = entities.FindId(&url.URL).One(&doc) - c.Assert(err, gc.IsNil) - - c.Assert(*doc.BundleUnitCount, gc.Equals, test.expectUnits) - } -} - -var bundleMachineCountTests = []struct { - about string - data *charm.BundleData - expectMachines int -}{{ - about: "no machines", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:utopic/django-0", - NumUnits: 0, - }, - "haproxy": { - Charm: "cs:trusty/haproxy-0", - NumUnits: 0, - }, - }, - }, -}, { - about: "a single machine (no placement)", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:trusty/django-42", - NumUnits: 1, - }, - "haproxy": { - Charm: "cs:trusty/haproxy-47", - NumUnits: 0, - }, - }, - }, - expectMachines: 1, -}, { - about: "a single machine (machine placement)", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:trusty/django-42", - NumUnits: 1, - To: []string{"1"}, - }, - }, - Machines: map[string]*charm.MachineSpec{ - "1": nil, - }, - }, - expectMachines: 1, -}, { - about: "a single machine (hulk smash)", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:trusty/django-42", - NumUnits: 1, - To: []string{"1"}, - }, - "haproxy": { - Charm: "cs:trusty/haproxy-47", - NumUnits: 1, - To: []string{"1"}, - }, - }, - Machines: map[string]*charm.MachineSpec{ - "1": nil, - }, - }, - expectMachines: 1, -}, { - about: "a single machine (co-location)", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:trusty/django-42", - NumUnits: 1, - }, - "haproxy": { - Charm: "cs:trusty/haproxy-47", - NumUnits: 1, - To: []string{"django/0"}, - }, - }, - }, - expectMachines: 1, -}, { - about: "a single machine (containerization)", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:trusty/django-42", - NumUnits: 1, - To: []string{"1"}, - }, - "haproxy": { - Charm: "cs:trusty/haproxy-47", - NumUnits: 1, - To: []string{"lxc:1"}, - }, - "postgres": { - Charm: "cs:utopic/postgres-3", - NumUnits: 2, - To: []string{"kvm:1"}, - }, - }, - Machines: map[string]*charm.MachineSpec{ - "1": nil, - }, - }, - expectMachines: 1, -}, { - about: "multiple machines (no placement)", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:utopic/django-1", - NumUnits: 1, - }, - "haproxy": { - Charm: "cs:utopic/haproxy-2", - NumUnits: 2, - }, - "postgres": { - Charm: "cs:utopic/postgres-3", - NumUnits: 5, - }, - }, - }, - expectMachines: 1 + 2 + 5, -}, { - about: "multiple machines (machine placement)", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:utopic/django-1", - NumUnits: 2, - To: []string{"1", "3"}, - }, - "haproxy": { - Charm: "cs:utopic/haproxy-2", - NumUnits: 1, - To: []string{"2"}, - }, - }, - Machines: map[string]*charm.MachineSpec{ - "1": nil, "2": nil, "3": nil, - }, - }, - expectMachines: 2 + 1, -}, { - about: "multiple machines (hulk smash)", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:trusty/django-42", - NumUnits: 1, - To: []string{"1"}, - }, - "haproxy": { - Charm: "cs:trusty/haproxy-47", - NumUnits: 1, - To: []string{"2"}, - }, - "postgres": { - Charm: "cs:utopic/postgres-3", - NumUnits: 2, - To: []string{"1", "2"}, - }, - }, - Machines: map[string]*charm.MachineSpec{ - "1": nil, "2": nil, - }, - }, - expectMachines: 1 + 1 + 0, -}, { - about: "multiple machines (co-location)", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:trusty/django-42", - NumUnits: 2, - }, - "haproxy": { - Charm: "cs:trusty/haproxy-47", - NumUnits: 3, - To: []string{"django/0", "django/1", "new"}, - }, - }, - }, - expectMachines: 2 + 1, -}, { - about: "multiple machines (containerization)", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:trusty/django-42", - NumUnits: 2, - To: []string{"1", "2"}, - }, - "haproxy": { - Charm: "cs:trusty/haproxy-47", - NumUnits: 4, - To: []string{"lxc:1", "lxc:2", "lxc:3", "lxc:3"}, - }, - "postgres": { - Charm: "cs:utopic/postgres-3", - NumUnits: 1, - To: []string{"kvm:2"}, - }, - }, - Machines: map[string]*charm.MachineSpec{ - "1": nil, "2": nil, "3": nil, - }, - }, - expectMachines: 2 + 1 + 0, -}, { - about: "multiple machines (partial placement in a container)", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:trusty/django-42", - NumUnits: 1, - To: []string{"1"}, - }, - "haproxy": { - Charm: "cs:trusty/haproxy-47", - NumUnits: 10, - To: []string{"lxc:1", "lxc:2"}, - }, - }, - Machines: map[string]*charm.MachineSpec{ - "1": nil, "2": nil, - }, - }, - expectMachines: 1 + 1, -}, { - about: "multiple machines (partial placement in a new machine)", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:trusty/django-42", - NumUnits: 1, - To: []string{"1"}, - }, - "haproxy": { - Charm: "cs:trusty/haproxy-47", - NumUnits: 10, - To: []string{"lxc:1", "1", "new"}, - }, - }, - Machines: map[string]*charm.MachineSpec{ - "1": nil, - }, - }, - expectMachines: 1 + 8, -}, { - about: "multiple machines (partial placement with new machines)", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "django": { - Charm: "cs:trusty/django-42", - NumUnits: 3, - }, - "haproxy": { - Charm: "cs:trusty/haproxy-47", - NumUnits: 6, - To: []string{"new", "1", "lxc:1", "new"}, - }, - "postgres": { - Charm: "cs:utopic/postgres-3", - NumUnits: 10, - To: []string{"kvm:2", "lxc:django/1", "new", "new", "kvm:2"}, - }, - }, - Machines: map[string]*charm.MachineSpec{ - "1": nil, "2": nil, - }, - }, - expectMachines: 3 + 5 + 3, -}, { - about: "placement into container on new machine", - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "haproxy": { - Charm: "cs:trusty/haproxy-47", - NumUnits: 6, - To: []string{"lxc:new", "1", "lxc:1", "kvm:new"}, - }, - }, - Machines: map[string]*charm.MachineSpec{ - "1": nil, - }, - }, - expectMachines: 5, -}} - -func (s *StoreSuite) TestBundleMachineCount(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - entities := store.DB.Entities() - for i, test := range bundleMachineCountTests { - c.Logf("test %d: %s", i, test.about) - url := newResolvedURL("cs:~charmers/bundle/django-0", -1) - url.URL.Revision = i - url.PromulgatedRevision = i - err := test.data.Verify(func(string) error { return nil }) - c.Assert(err, gc.IsNil) - // Add the bundle used for this test. - err = store.AddBundle(&testingBundle{ - data: test.data, - }, AddParams{ - URL: url, - BlobName: "blobName", - BlobHash: fakeBlobHash, - BlobSize: fakeBlobSize, - }) - c.Assert(err, gc.IsNil) - - // Retrieve the bundle from the database. - var doc mongodoc.Entity - err = entities.FindId(&url.URL).One(&doc) - c.Assert(err, gc.IsNil) - - c.Assert(*doc.BundleMachineCount, gc.Equals, test.expectMachines) - } -} - -func urlStrings(urls []*charm.Reference) []string { - urlStrs := make([]string, len(urls)) - for i, url := range urls { - urlStrs[i] = url.String() - } - return urlStrs -} - -// MustParseResolvedURL parses a resolved URL in string form, with -// the optional promulgated revision preceding the entity URL -// separated by a space. -func MustParseResolvedURL(urlStr string) *router.ResolvedURL { - s := strings.Fields(urlStr) - promRev := -1 - switch len(s) { - default: - panic(fmt.Errorf("invalid resolved URL string %q", urlStr)) - case 2: - var err error - promRev, err = strconv.Atoi(s[0]) - if err != nil || promRev < 0 { - panic(fmt.Errorf("invalid resolved URL string %q", urlStr)) - } - case 1: - } - return &router.ResolvedURL{ - URL: *charm.MustParseReference(s[len(s)-1]), - PromulgatedRevision: promRev, - } -} - -func MustParseResolvedURLs(urlStrs []string) []*router.ResolvedURL { - urls := make([]*router.ResolvedURL, len(urlStrs)) - for i, u := range urlStrs { - urls[i] = MustParseResolvedURL(u) - } - return urls -} - -func mustParseReferences(urlStrs []string) []*charm.Reference { - urls := make([]*charm.Reference, len(urlStrs)) - for i, u := range urlStrs { - urls[i] = charm.MustParseReference(u) - } - return urls -} - -func (s *StoreSuite) TestAddPromulgatedCharmDir(c *gc.C) { - charmDir := storetesting.Charms.CharmDir("wordpress") - s.checkAddCharm(c, charmDir, false, newResolvedURL("~charmers/precise/wordpress-1", 1)) -} - -func (s *StoreSuite) TestAddPromulgatedCharmArchive(c *gc.C) { - charmArchive := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") - s.checkAddCharm(c, charmArchive, false, newResolvedURL("~charmers/precise/wordpress-1", 1)) -} - -func (s *StoreSuite) TestAddUserOwnedCharmDir(c *gc.C) { - charmDir := storetesting.Charms.CharmDir("wordpress") - s.checkAddCharm(c, charmDir, false, newResolvedURL("~charmers/precise/wordpress-1", -1)) -} - -func (s *StoreSuite) TestAddUserOwnedCharmArchive(c *gc.C) { - charmArchive := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") - s.checkAddCharm(c, charmArchive, false, newResolvedURL("~charmers/precise/wordpress-1", -1)) -} - -func (s *StoreSuite) TestAddBundleDir(c *gc.C) { - bundleDir := storetesting.Charms.BundleDir("wordpress-simple") - s.checkAddBundle(c, bundleDir, false, newResolvedURL("~charmers/bundle/wordpress-simple-2", 3)) -} - -func (s *StoreSuite) TestAddBundleArchive(c *gc.C) { - bundleArchive, err := charm.ReadBundleArchive( - storetesting.Charms.BundleArchivePath(c.MkDir(), "wordpress-simple"), - ) - c.Assert(err, gc.IsNil) - s.checkAddBundle(c, bundleArchive, false, newResolvedURL("~charmers/bundle/wordpress-simple-2", 3)) -} - -func (s *StoreSuite) TestAddUserOwnedBundleDir(c *gc.C) { - bundleDir := storetesting.Charms.BundleDir("wordpress-simple") - s.checkAddBundle(c, bundleDir, false, newResolvedURL("~charmers/bundle/wordpress-simple-1", -1)) -} - -func (s *StoreSuite) TestAddUserOwnedBundleArchive(c *gc.C) { - bundleArchive, err := charm.ReadBundleArchive( - storetesting.Charms.BundleArchivePath(c.MkDir(), "wordpress-simple"), - ) - c.Assert(err, gc.IsNil) - s.checkAddBundle(c, bundleArchive, false, newResolvedURL("~charmers/bundle/wordpress-simple-1", -1)) -} - -func (s *StoreSuite) newStore(c *gc.C, withES bool) *Store { - var si *SearchIndex - if withES { - si = &SearchIndex{s.ES, s.TestIndex} - } - p, err := NewPool(s.Session.DB("juju_test"), si, nil) - c.Assert(err, gc.IsNil) - return p.Store() -} - -func (s *StoreSuite) TestAddCharmWithBundleSeries(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - ch := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") - err := store.AddCharm(ch, AddParams{ - URL: newResolvedURL("~charmers/bundle/wordpress-2", -1), - }) - c.Assert(err, gc.ErrorMatches, `charm added with invalid id cs:~charmers/bundle/wordpress-2`) -} - -var addInvalidCharmURLTests = []string{ - "cs:precise/wordpress-2", // no user - "cs:~charmers/precise/wordpress", // no revision - "cs:~charmers/wordpress-3", // no series - "cs:~charmers/bundle/wordpress-2", // invalid series -} - -func (s *StoreSuite) TestAddInvalidCharmURL(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - ch := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") - for i, urlStr := range addInvalidCharmURLTests { - c.Logf("test %d: %s", i, urlStr) - err := store.AddCharm(ch, AddParams{ - URL: &router.ResolvedURL{ - URL: *charm.MustParseReference(urlStr), - PromulgatedRevision: -1, - }, - }) - c.Assert(err, gc.ErrorMatches, `charm added with invalid id .*`) - } -} - -var addInvalidBundleURLTests = []string{ - "cs:bundle/wordpress-2", // no user - "cs:~charmers/bundle/wordpress", // no revision - "cs:~charmers/wordpress-2", // no series - "cs:~charmers/precise/wordpress-3", // invalid series -} - -func (s *StoreSuite) TestAddBundleWithCharmSeries(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - b := storetesting.Charms.BundleDir("wordpress-simple") - for i, urlStr := range addInvalidBundleURLTests { - c.Logf("test %d: %s", i, urlStr) - err := store.AddBundle(b, AddParams{ - URL: &router.ResolvedURL{ - URL: *charm.MustParseReference(urlStr), - PromulgatedRevision: -1, - }, - }) - c.Assert(err, gc.ErrorMatches, `bundle added with invalid id .*`) - } -} - -func (s *StoreSuite) TestAddBundleDuplicatingCharm(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - ch := storetesting.Charms.CharmDir("wordpress") - err := store.AddCharmWithArchive(newResolvedURL("~charmers/precise/wordpress-2", -1), ch) - c.Assert(err, gc.IsNil) - - b := storetesting.Charms.BundleDir("wordpress-simple") - err = store.AddBundleWithArchive(newResolvedURL("~charmers/bundle/wordpress-5", -1), b) - c.Assert(err, gc.ErrorMatches, "bundle name duplicates charm name cs:~charmers/precise/wordpress-2") -} - -func (s *StoreSuite) TestAddCharmDuplicatingBundle(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - - b := storetesting.Charms.BundleDir("wordpress-simple") - err := store.AddBundleWithArchive(newResolvedURL("~charmers/bundle/wordpress-2", -1), b) - c.Assert(err, gc.IsNil) - - ch := storetesting.Charms.CharmDir("wordpress") - err = store.AddCharmWithArchive(newResolvedURL("~charmers/precise/wordpress-5", -1), ch) - c.Assert(err, gc.ErrorMatches, "charm name duplicates bundle name cs:~charmers/bundle/wordpress-2") -} - -func (s *StoreSuite) TestOpenBlob(c *gc.C) { - charmArchive := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") - store := s.newStore(c, false) - defer store.Close() - url := newResolvedURL("cs:~charmers/precise/wordpress-23", 23) - err := store.AddCharmWithArchive(url, charmArchive) - c.Assert(err, gc.IsNil) - - f, err := os.Open(charmArchive.Path) - c.Assert(err, gc.IsNil) - defer f.Close() - expectHash := hashOfReader(c, f) - - r, size, hash, err := store.OpenBlob(url) - c.Assert(err, gc.IsNil) - defer r.Close() - - c.Assert(hashOfReader(c, r), gc.Equals, expectHash) - c.Assert(hash, gc.Equals, expectHash) - - info, err := f.Stat() - c.Assert(err, gc.IsNil) - c.Assert(size, gc.Equals, info.Size()) -} - -func (s *StoreSuite) TestBlobNameAndHash(c *gc.C) { - charmArchive := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") - - store := s.newStore(c, false) - defer store.Close() - url := newResolvedURL("cs:~charmers/precise/wordpress-23", 23) - err := store.AddCharmWithArchive(url, charmArchive) - c.Assert(err, gc.IsNil) - - f, err := os.Open(charmArchive.Path) - c.Assert(err, gc.IsNil) - defer f.Close() - expectHash := hashOfReader(c, f) - - name, hash, err := store.BlobNameAndHash(url) - c.Assert(err, gc.IsNil) - - r, _, err := store.BlobStore.Open(name) - c.Assert(err, gc.IsNil) - defer r.Close() - - c.Assert(hash, gc.Equals, expectHash) - c.Assert(hashOfReader(c, r), gc.Equals, expectHash) -} - -func (s *StoreSuite) TestAddLog(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - urls := []*charm.Reference{ - charm.MustParseReference("cs:django"), - charm.MustParseReference("cs:rails"), - } - infoData := json.RawMessage([]byte(`"info data"`)) - errorData := json.RawMessage([]byte(`"error data"`)) - - // Add logs to the store. - beforeAdding := time.Now().Add(-time.Second) - err := store.AddLog(&infoData, mongodoc.InfoLevel, mongodoc.IngestionType, nil) - c.Assert(err, gc.IsNil) - err = store.AddLog(&errorData, mongodoc.ErrorLevel, mongodoc.IngestionType, urls) - c.Assert(err, gc.IsNil) - afterAdding := time.Now().Add(time.Second) - - // Retrieve the logs from the store. - var docs []mongodoc.Log - err = store.DB.Logs().Find(nil).Sort("_id").All(&docs) - c.Assert(err, gc.IsNil) - c.Assert(docs, gc.HasLen, 2) - - // The docs have been correctly added to the Mongo collection. - infoDoc, errorDoc := docs[0], docs[1] - c.Assert(infoDoc.Time, jc.TimeBetween(beforeAdding, afterAdding)) - c.Assert(errorDoc.Time, jc.TimeBetween(beforeAdding, afterAdding)) - infoDoc.Time = time.Time{} - errorDoc.Time = time.Time{} - c.Assert(infoDoc, jc.DeepEquals, mongodoc.Log{ - Data: []byte(infoData), - Level: mongodoc.InfoLevel, - Type: mongodoc.IngestionType, - URLs: nil, - }) - c.Assert(errorDoc, jc.DeepEquals, mongodoc.Log{ - Data: []byte(errorData), - Level: mongodoc.ErrorLevel, - Type: mongodoc.IngestionType, - URLs: urls, - }) -} - -func (s *StoreSuite) TestAddLogDataError(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - data := json.RawMessage([]byte("!")) - - // Try to add the invalid log message to the store. - err := store.AddLog(&data, mongodoc.InfoLevel, mongodoc.IngestionType, nil) - c.Assert(err, gc.ErrorMatches, "cannot marshal log data: json: error calling MarshalJSON .*") -} - -func (s *StoreSuite) TestAddLogBaseURLs(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - - // Add the log to the store with associated URLs. - data := json.RawMessage([]byte(`"info data"`)) - err := store.AddLog(&data, mongodoc.WarningLevel, mongodoc.IngestionType, []*charm.Reference{ - charm.MustParseReference("trusty/django-42"), - charm.MustParseReference("~who/utopic/wordpress"), - }) - c.Assert(err, gc.IsNil) - - // Retrieve the log from the store. - var doc mongodoc.Log - err = store.DB.Logs().Find(nil).One(&doc) - c.Assert(err, gc.IsNil) - - // The log includes the base URLs. - c.Assert(doc.URLs, jc.DeepEquals, []*charm.Reference{ - charm.MustParseReference("trusty/django-42"), - charm.MustParseReference("django"), - charm.MustParseReference("~who/utopic/wordpress"), - charm.MustParseReference("~who/wordpress"), - }) -} - -func (s *StoreSuite) TestAddLogDuplicateURLs(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - - // Add the log to the store with associated URLs. - data := json.RawMessage([]byte(`"info data"`)) - err := store.AddLog(&data, mongodoc.WarningLevel, mongodoc.IngestionType, []*charm.Reference{ - charm.MustParseReference("trusty/django-42"), - charm.MustParseReference("django"), - charm.MustParseReference("trusty/django-42"), - charm.MustParseReference("django"), - }) - c.Assert(err, gc.IsNil) - - // Retrieve the log from the store. - var doc mongodoc.Log - err = store.DB.Logs().Find(nil).One(&doc) - c.Assert(err, gc.IsNil) - - // The log excludes duplicate URLs. - c.Assert(doc.URLs, jc.DeepEquals, []*charm.Reference{ - charm.MustParseReference("trusty/django-42"), - charm.MustParseReference("django"), - }) -} - -func (s *StoreSuite) TestCollections(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - colls := store.DB.Collections() - names, err := store.DB.CollectionNames() - c.Assert(err, gc.IsNil) - // Some collections don't have indexes so they are created only when used. - createdOnUse := map[string]bool{ - "migrations": true, - "macaroons": true, - } - // Check that all collections mentioned by Collections are actually created. - for _, coll := range colls { - found := false - for _, name := range names { - if name == coll.Name || createdOnUse[coll.Name] { - found = true - } - } - if !found { - c.Errorf("collection %q not created", coll.Name) - } - - } - // Check that all created collections are mentioned in Collections. - for _, name := range names { - if name == "system.indexes" || name == "managedStoredResources" { - continue - } - found := false - for _, coll := range colls { - if coll.Name == name { - found = true - } - } - if !found { - c.Errorf("extra collection %q found", name) - } - } -} - -func (s *StoreSuite) TestOpenCachedBlobFileWithInvalidEntity(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - - wordpress := storetesting.Charms.CharmDir("wordpress") - url := newResolvedURL("cs:~charmers/precise/wordpress-23", 23) - err := store.AddCharmWithArchive(url, wordpress) - c.Assert(err, gc.IsNil) - - entity, err := store.FindEntity(url, "charmmeta") - c.Assert(err, gc.IsNil) - r, err := store.OpenCachedBlobFile(entity, "", nil) - c.Assert(err, gc.ErrorMatches, "provided entity does not have required fields") - c.Assert(r, gc.Equals, nil) -} - -func (s *StoreSuite) TestOpenCachedBlobFileWithFoundContent(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - - wordpress := storetesting.Charms.CharmDir("wordpress") - url := newResolvedURL("cs:~charmers/precise/wordpress-23", 23) - err := store.AddCharmWithArchive(url, wordpress) - c.Assert(err, gc.IsNil) - - // Get our expected content. - data, err := ioutil.ReadFile(filepath.Join(wordpress.Path, "metadata.yaml")) - c.Assert(err, gc.IsNil) - expectContent := string(data) - - entity, err := store.FindEntity(url, "blobname", "contents") - c.Assert(err, gc.IsNil) - - // Check that, when we open the file for the first time, - // we see the expected content. - r, err := store.OpenCachedBlobFile(entity, mongodoc.FileIcon, func(f *zip.File) bool { - return path.Clean(f.Name) == "metadata.yaml" - }) - c.Assert(err, gc.IsNil) - defer r.Close() - data, err = ioutil.ReadAll(r) - c.Assert(err, gc.IsNil) - c.Assert(string(data), gc.Equals, expectContent) - - // When retrieving the entity again, check that the Contents - // map has been set appropriately... - entity, err = store.FindEntity(url, "blobname", "contents") - c.Assert(err, gc.IsNil) - c.Assert(entity.Contents, gc.HasLen, 1) - c.Assert(entity.Contents[mongodoc.FileIcon].IsValid(), gc.Equals, true) - - // ... and that OpenCachedBlobFile still returns a reader with the - // same data, without making use of the isFile callback. - r, err = store.OpenCachedBlobFile(entity, mongodoc.FileIcon, func(f *zip.File) bool { - c.Errorf("isFile called unexpectedly") - return false - }) - c.Assert(err, gc.IsNil) - defer r.Close() - data, err = ioutil.ReadAll(r) - c.Assert(err, gc.IsNil) - c.Assert(string(data), gc.Equals, expectContent) -} - -func (s *StoreSuite) TestAddCharmWithUser(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - - wordpress := storetesting.Charms.CharmDir("wordpress") - url := newResolvedURL("cs:~who/precise/wordpress-23", -1) - err := store.AddCharmWithArchive(url, wordpress) - c.Assert(err, gc.IsNil) - assertBaseEntity(c, store, baseURL(&url.URL), false) -} - -func (s *StoreSuite) TestOpenCachedBlobFileWithNotFoundContent(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - - wordpress := storetesting.Charms.CharmDir("wordpress") - url := newResolvedURL("cs:~charmers/precise/wordpress-23", 23) - err := store.AddCharmWithArchive(url, wordpress) - c.Assert(err, gc.IsNil) - - entity, err := store.FindEntity(url, "blobname", "contents") - c.Assert(err, gc.IsNil) - - // Check that, when we open the file for the first time, - // we get a NotFound error. - r, err := store.OpenCachedBlobFile(entity, mongodoc.FileIcon, func(f *zip.File) bool { - return false - }) - c.Assert(err, gc.ErrorMatches, "not found") - c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) - c.Assert(r, gc.Equals, nil) - - // When retrieving the entity again, check that the Contents - // map has been set appropriately... - entity, err = store.FindEntity(url, "blobname", "contents") - c.Assert(err, gc.IsNil) - c.Assert(entity.Contents, gc.DeepEquals, map[mongodoc.FileId]mongodoc.ZipFile{ - mongodoc.FileIcon: {}, - }) - - // ... and that OpenCachedBlobFile still returns a NotFound - // error, without making use of the isFile callback. - r, err = store.OpenCachedBlobFile(entity, mongodoc.FileIcon, func(f *zip.File) bool { - c.Errorf("isFile called unexpectedly") - return false - }) - c.Assert(err, gc.ErrorMatches, "not found") - c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) - c.Assert(r, gc.Equals, nil) -} - -func hashOfReader(c *gc.C, r io.Reader) string { - hash := sha512.New384() - _, err := io.Copy(hash, r) - c.Assert(err, gc.IsNil) - return fmt.Sprintf("%x", hash.Sum(nil)) -} - -func getSizeAndHashes(c interface{}) (int64, string, string) { - var r io.ReadWriter - var err error - switch c := c.(type) { - case archiverTo: - r = new(bytes.Buffer) - err = c.ArchiveTo(r) - case *charm.BundleArchive: - r, err = os.Open(c.Path) - case *charm.CharmArchive: - r, err = os.Open(c.Path) - default: - panic(fmt.Sprintf("unable to get size and hash for type %T", c)) - } - if err != nil { - panic(err) - } - hash := blobstore.NewHash() - hash256 := sha256.New() - size, err := io.Copy(io.MultiWriter(hash, hash256), r) - if err != nil { - panic(err) - } - return size, fmt.Sprintf("%x", hash.Sum(nil)), fmt.Sprintf("%x", hash256.Sum(nil)) -} - -// testingBundle implements charm.Bundle, allowing tests -// to create a bundle with custom data. -type testingBundle struct { - data *charm.BundleData -} - -func (b *testingBundle) Data() *charm.BundleData { - return b.data -} - -func (b *testingBundle) ReadMe() string { - // For the purposes of this implementation, the charm readme is not - // relevant. - return "" -} - -// Define fake blob attributes to be used in tests. -var fakeBlobSize, fakeBlobHash = func() (int64, string) { - b := []byte("fake content") - h := blobstore.NewHash() - h.Write(b) - return int64(len(b)), fmt.Sprintf("%x", h.Sum(nil)) -}() - -func (s *StoreSuite) TestSESPutDoesNotErrorWithNoESConfigured(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - err := store.UpdateSearch(nil) - c.Assert(err, gc.IsNil) -} - -func (s *StoreSuite) TestAddCharmDirIndexed(c *gc.C) { - charmDir := storetesting.Charms.CharmDir("wordpress") - s.checkAddCharm(c, charmDir, true, newResolvedURL("cs:~charmers/precise/wordpress-2", -1)) -} - -func (s *StoreSuite) TestAddCharmArchiveIndexed(c *gc.C) { - charmArchive := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") - s.checkAddCharm(c, charmArchive, true, newResolvedURL("cs:~charmers/precise/wordpress-2", -1)) -} - -func (s *StoreSuite) TestAddBundleDirIndexed(c *gc.C) { - bundleDir := storetesting.Charms.BundleDir("wordpress-simple") - s.checkAddBundle(c, bundleDir, true, newResolvedURL("cs:~charmers/bundle/baboom-2", -1)) -} - -func (s *StoreSuite) TestAddBundleArchiveIndexed(c *gc.C) { - bundleArchive, err := charm.ReadBundleArchive( - storetesting.Charms.BundleArchivePath(c.MkDir(), "wordpress-simple"), - ) - c.Assert(err, gc.IsNil) - s.checkAddBundle(c, bundleArchive, true, newResolvedURL("cs:~charmers/bundle/baboom-2", -1)) -} - -func (s *StoreSuite) TestAddCharmDirIndexedAndPromulgated(c *gc.C) { - charmDir := storetesting.Charms.CharmDir("wordpress") - s.checkAddCharm(c, charmDir, true, newResolvedURL("cs:~charmers/precise/wordpress-2", -1)) -} - -func (s *StoreSuite) TestAddCharmArchiveIndexedAndPromulgated(c *gc.C) { - charmArchive := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") - s.checkAddCharm(c, charmArchive, true, newResolvedURL("cs:~charmers/precise/wordpress-2", 2)) -} - -func (s *StoreSuite) TestAddBundleDirIndexedAndPromulgated(c *gc.C) { - bundleDir := storetesting.Charms.BundleDir("wordpress-simple") - s.checkAddBundle(c, bundleDir, true, newResolvedURL("cs:~charmers/bundle/baboom-2", 2)) -} - -func (s *StoreSuite) TestAddBundleArchiveIndexedAndPromulgated(c *gc.C) { - bundleArchive, err := charm.ReadBundleArchive( - storetesting.Charms.BundleArchivePath(c.MkDir(), "wordpress-simple"), - ) - c.Assert(err, gc.IsNil) - s.checkAddBundle(c, bundleArchive, true, newResolvedURL("cs:~charmers/bundle/baboom-2", 2)) -} - -var findBestEntityTests = []struct { - url string - expectURL string - expectErr string -}{{ - url: "~charmers/trusty/wordpress-10", - expectURL: "~charmers/trusty/wordpress-10", -}, { - url: "~charmers/trusty/wordpress", - expectURL: "~charmers/trusty/wordpress-12", -}, { - url: "trusty/wordpress-11", - expectURL: "~charmers/trusty/wordpress-11", -}, { - url: "trusty/wordpress", - expectURL: "~mickey/trusty/wordpress-13", -}, { - url: "wordpress", - expectURL: "~mickey/trusty/wordpress-13", -}, { - url: "~mickey/wordpress-12", - expectURL: "~mickey/trusty/wordpress-12", -}, { - url: "~mickey/precise/wordpress", - expectURL: "~mickey/precise/wordpress-24", -}, { - url: "mysql", - expectErr: "entity not found", -}, { - url: "precise/wordpress", - expectURL: "~mickey/precise/wordpress-24", -}, { - url: "~donald/bundle/wordpress-simple-0", - expectURL: "~donald/bundle/wordpress-simple-0", -}, { - url: "~donald/bundle/wordpress-simple", - expectURL: "~donald/bundle/wordpress-simple-1", -}, { - url: "~donald/wordpress-simple-0", - expectURL: "~donald/bundle/wordpress-simple-0", -}, { - url: "bundle/wordpress-simple-0", - expectURL: "~donald/bundle/wordpress-simple-1", -}, { - url: "bundle/wordpress-simple", - expectURL: "~donald/bundle/wordpress-simple-1", -}, { - url: "wordpress-simple", - expectURL: "~donald/bundle/wordpress-simple-1", -}} - -func (s *StoreSuite) TestFindBestEntity(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - err := store.DB.Entities().Insert(&mongodoc.Entity{ - URL: charm.MustParseReference("~charmers/trusty/wordpress-9"), - BaseURL: charm.MustParseReference("~charmers/wordpress"), - User: "charmers", - Series: "trusty", - Name: "wordpress", - Revision: 9, - PromulgatedURL: charm.MustParseReference("trusty/wordpress-9"), - PromulgatedRevision: 9, - }) - c.Assert(err, gc.IsNil) - err = store.DB.Entities().Insert(&mongodoc.Entity{ - URL: charm.MustParseReference("~charmers/trusty/wordpress-10"), - BaseURL: charm.MustParseReference("~charmers/wordpress"), - User: "charmers", - Series: "trusty", - Name: "wordpress", - Revision: 10, - PromulgatedURL: charm.MustParseReference("trusty/wordpress-10"), - PromulgatedRevision: 10, - }) - c.Assert(err, gc.IsNil) - err = store.DB.Entities().Insert(&mongodoc.Entity{ - URL: charm.MustParseReference("~charmers/trusty/wordpress-11"), - BaseURL: charm.MustParseReference("~charmers/wordpress"), - User: "charmers", - Series: "trusty", - Name: "wordpress", - Revision: 11, - PromulgatedURL: charm.MustParseReference("trusty/wordpress-11"), - PromulgatedRevision: 11, - }) - c.Assert(err, gc.IsNil) - err = store.DB.Entities().Insert(&mongodoc.Entity{ - URL: charm.MustParseReference("~charmers/trusty/wordpress-12"), - BaseURL: charm.MustParseReference("~charmers/wordpress"), - User: "charmers", - Series: "trusty", - Name: "wordpress", - Revision: 12, - PromulgatedURL: charm.MustParseReference("trusty/wordpress-12"), - PromulgatedRevision: 12, - }) - c.Assert(err, gc.IsNil) - err = store.DB.Entities().Insert(&mongodoc.Entity{ - URL: charm.MustParseReference("~mickey/precise/wordpress-12"), - BaseURL: charm.MustParseReference("~mickey/wordpress"), - User: "mickey", - Series: "precise", - Name: "wordpress", - Revision: 12, - PromulgatedRevision: -1, - }) - c.Assert(err, gc.IsNil) - err = store.DB.Entities().Insert(&mongodoc.Entity{ - URL: charm.MustParseReference("~mickey/trusty/wordpress-12"), - BaseURL: charm.MustParseReference("~mickey/wordpress"), - User: "mickey", - Series: "trusty", - Name: "wordpress", - Revision: 12, - PromulgatedRevision: -1, - }) - c.Assert(err, gc.IsNil) - err = store.DB.Entities().Insert(&mongodoc.Entity{ - URL: charm.MustParseReference("~mickey/trusty/wordpress-13"), - BaseURL: charm.MustParseReference("~mickey/wordpress"), - User: "mickey", - Series: "trusty", - Name: "wordpress", - Revision: 13, - PromulgatedURL: charm.MustParseReference("trusty/wordpress-13"), - PromulgatedRevision: 13, - }) - c.Assert(err, gc.IsNil) - err = store.DB.Entities().Insert(&mongodoc.Entity{ - URL: charm.MustParseReference("~mickey/precise/wordpress-24"), - BaseURL: charm.MustParseReference("~mickey/wordpress"), - User: "mickey", - Series: "precise", - Name: "wordpress", - Revision: 24, - PromulgatedURL: charm.MustParseReference("precise/wordpress-24"), - PromulgatedRevision: 24, - }) - c.Assert(err, gc.IsNil) - err = store.DB.Entities().Insert(&mongodoc.Entity{ - URL: charm.MustParseReference("~donald/bundle/wordpress-simple-0"), - BaseURL: charm.MustParseReference("~donald/wordpress-simple"), - User: "donald", - Series: "bundle", - Name: "wordpress-simple", - Revision: 0, - PromulgatedURL: nil, - PromulgatedRevision: -1, - }) - c.Assert(err, gc.IsNil) - err = store.DB.Entities().Insert(&mongodoc.Entity{ - URL: charm.MustParseReference("~donald/bundle/wordpress-simple-1"), - BaseURL: charm.MustParseReference("~donald/wordpress-simple"), - User: "donald", - Series: "bundle", - Name: "wordpress-simple", - Revision: 1, - PromulgatedURL: charm.MustParseReference("bundle/wordpress-simple-0"), - PromulgatedRevision: 0, - }) - c.Assert(err, gc.IsNil) - for i, test := range findBestEntityTests { - c.Logf("test %d: %s", i, test.url) - entity, err := store.FindBestEntity(charm.MustParseReference(test.url)) - if test.expectErr != "" { - c.Assert(err, gc.ErrorMatches, test.expectErr) - } else { - c.Assert(err, gc.IsNil) - c.Assert(entity.URL.String(), gc.Equals, charm.MustParseReference(test.expectURL).String()) - } - } -} - -var updateEntityTests = []struct { - url string - expectErr string -}{{ - url: "~charmers/trusty/wordpress-10", -}, { - url: "~charmers/precise/wordpress-10", - expectErr: `cannot update "cs:precise/wordpress-10": not found`, -}} - -func (s *StoreSuite) TestUpdateEntity(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - for i, test := range updateEntityTests { - c.Logf("test %d. %s", i, test.url) - url := newResolvedURL(test.url, 10) - _, err := store.DB.Entities().RemoveAll(nil) - c.Assert(err, gc.IsNil) - err = store.DB.Entities().Insert(&mongodoc.Entity{ - URL: charm.MustParseReference("~charmers/trusty/wordpress-10"), - BaseURL: charm.MustParseReference("~charmers/wordpress"), - User: "charmers", - Series: "trusty", - Name: "wordpress", - Revision: 9, - PromulgatedURL: charm.MustParseReference("trusty/wordpress-4"), - PromulgatedRevision: 4, - }) - c.Assert(err, gc.IsNil) - err = store.UpdateEntity(url, bson.D{{"$set", bson.D{{"extrainfo.test", []byte("PASS")}}}}) - if test.expectErr != "" { - c.Assert(err, gc.ErrorMatches, test.expectErr) - } else { - c.Assert(err, gc.IsNil) - entity, err := store.FindEntity(url) - c.Assert(err, gc.IsNil) - c.Assert(string(entity.ExtraInfo["test"]), gc.Equals, "PASS") - } - } -} - -var updateBaseEntityTests = []struct { - url string - expectErr string -}{{ - url: "~charmers/trusty/wordpress-10", -}, { - url: "~charmers/precise/mysql-10", - expectErr: `cannot update base entity for "cs:precise/mysql-10": not found`, -}} - -func (s *StoreSuite) TestUpdateBaseEntity(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - for i, test := range updateBaseEntityTests { - c.Logf("test %d. %s", i, test.url) - url := newResolvedURL(test.url, 10) - _, err := store.DB.BaseEntities().RemoveAll(nil) - c.Assert(err, gc.IsNil) - err = store.DB.BaseEntities().Insert(&mongodoc.BaseEntity{ - URL: charm.MustParseReference("~charmers/wordpress"), - User: "charmers", - Name: "wordpress", - Promulgated: true, - }) - c.Assert(err, gc.IsNil) - err = store.UpdateBaseEntity(url, bson.D{{"$set", bson.D{{"acls", mongodoc.ACL{ - Read: []string{"test"}, - }}}}}) - if test.expectErr != "" { - c.Assert(err, gc.ErrorMatches, test.expectErr) - } else { - c.Assert(err, gc.IsNil) - baseEntity, err := store.FindBaseEntity(&url.URL) - c.Assert(err, gc.IsNil) - c.Assert(baseEntity.ACLs.Read, jc.DeepEquals, []string{"test"}) - } - } -} - -var promulgateTests = []struct { - about string - entities []*mongodoc.Entity - baseEntities []*mongodoc.BaseEntity - url string - promulgate bool - expectErr string - expectEntities []*mongodoc.Entity - expectBaseEntities []*mongodoc.BaseEntity -}{{ - about: "single charm not already promulgated", - entities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", ""), - }, - baseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", false), - }, - url: "~charmers/trusty/wordpress-0", - promulgate: true, - expectEntities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", true), - }, -}, { - about: "multiple series not already promulgated", - entities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", ""), - entity("~charmers/precise/wordpress-0", ""), - }, - baseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", false), - }, - url: "~charmers/trusty/wordpress-0", - promulgate: true, - expectEntities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), - entity("~charmers/precise/wordpress-0", "precise/wordpress-0"), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", true), - }, -}, { - about: "charm promulgated as different user", - entities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), - entity("~test-charmers/trusty/wordpress-0", ""), - }, - baseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", true), - baseEntity("~test-charmers/wordpress", false), - }, - url: "~test-charmers/trusty/wordpress-0", - promulgate: true, - expectEntities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), - entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-1"), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", false), - baseEntity("~test-charmers/wordpress", true), - }, -}, { - about: "single charm already promulgated", - entities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), - }, - baseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", true), - }, - url: "~charmers/trusty/wordpress-0", - promulgate: true, - expectEntities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", true), - }, -}, { - about: "unrelated charms are unaffected", - entities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", ""), - entity("~test-charmers/trusty/mysql-0", "trusty/mysql-0"), - }, - baseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", false), - baseEntity("~test-charmers/mysql", true), - }, - url: "~charmers/trusty/wordpress-0", - promulgate: true, - expectEntities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), - entity("~test-charmers/trusty/mysql-0", "trusty/mysql-0"), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", true), - baseEntity("~test-charmers/mysql", true), - }, -}, { - about: "only one owner promulgated", - entities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", ""), - entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-0"), - entity("~test2-charmers/trusty/wordpress-0", "trusty/wordpress-1"), - }, - baseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", false), - baseEntity("~test-charmers/wordpress", false), - baseEntity("~test2-charmers/wordpress", true), - }, - url: "~charmers/trusty/wordpress-0", - promulgate: true, - expectEntities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", "trusty/wordpress-2"), - entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-0"), - entity("~test2-charmers/trusty/wordpress-0", "trusty/wordpress-1"), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", true), - baseEntity("~test-charmers/wordpress", false), - baseEntity("~test2-charmers/wordpress", false), - }, -}, { - about: "recovers from two promulgated base entities", - entities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", ""), - entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-0"), - entity("~test-charmers/trusty/wordpress-1", "trusty/wordpress-2"), - entity("~test2-charmers/trusty/wordpress-0", "trusty/wordpress-1"), - }, - baseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", false), - baseEntity("~test-charmers/wordpress", true), - baseEntity("~test2-charmers/wordpress", true), - }, - url: "~test2-charmers/trusty/wordpress-0", - promulgate: true, - expectEntities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", ""), - entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-0"), - entity("~test-charmers/trusty/wordpress-1", "trusty/wordpress-2"), - entity("~test2-charmers/trusty/wordpress-0", "trusty/wordpress-1"), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", false), - baseEntity("~test-charmers/wordpress", false), - baseEntity("~test2-charmers/wordpress", true), - }, -}, { - about: "multiple series already promulgated", - entities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", "trusty/wordpress-2"), - entity("~charmers/precise/wordpress-0", "precise/wordpress-1"), - entity("~test-charmers/trusty/wordpress-0", ""), - entity("~test-charmers/utopic/wordpress-0", ""), - }, - baseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", true), - baseEntity("~test-charmers/wordpress", false), - }, - url: "~test-charmers/trusty/wordpress-0", - promulgate: true, - expectEntities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", "trusty/wordpress-2"), - entity("~charmers/precise/wordpress-0", "precise/wordpress-1"), - entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-3"), - entity("~test-charmers/utopic/wordpress-0", "utopic/wordpress-0"), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", false), - baseEntity("~test-charmers/wordpress", true), - }, -}, { - about: "unpromulgate single promulgated charm ", - entities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), - }, - baseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", true), - }, - url: "~charmers/trusty/wordpress-0", - promulgate: false, - expectEntities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", false), - }, -}, { - about: "unpromulgate single unpromulgated charm ", - entities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", ""), - }, - baseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", false), - }, - url: "~charmers/trusty/wordpress-0", - promulgate: false, - expectEntities: []*mongodoc.Entity{ - entity("~charmers/trusty/wordpress-0", ""), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - baseEntity("~charmers/wordpress", false), - }, -}} - -func (s *StoreSuite) TestSetPromulgated(c *gc.C) { - store := s.newStore(c, false) - defer store.Close() - for i, test := range promulgateTests { - c.Logf("test %d. %s", i, test.about) - url := newResolvedURL(test.url, -1) - _, err := store.DB.Entities().RemoveAll(nil) - c.Assert(err, gc.IsNil) - _, err = store.DB.BaseEntities().RemoveAll(nil) - c.Assert(err, gc.IsNil) - for _, entity := range test.entities { - err := store.DB.Entities().Insert(entity) - c.Assert(err, gc.IsNil) - } - for _, baseEntity := range test.baseEntities { - err := store.DB.BaseEntities().Insert(baseEntity) - c.Assert(err, gc.IsNil) - } - err = store.SetPromulgated(url, test.promulgate) - if test.expectErr != "" { - c.Assert(err, gc.ErrorMatches, test.expectErr) - continue - } - c.Assert(err, gc.IsNil) - n, err := store.DB.Entities().Count() - c.Assert(err, gc.IsNil) - c.Assert(n, gc.Equals, len(test.expectEntities)) - n, err = store.DB.BaseEntities().Count() - c.Assert(err, gc.IsNil) - c.Assert(n, gc.Equals, len(test.expectBaseEntities)) - for _, expectEntity := range test.expectEntities { - entity, err := store.FindEntity(EntityResolvedURL(expectEntity)) - c.Assert(err, gc.IsNil) - c.Assert(entity, jc.DeepEquals, expectEntity) - } - for _, expectBaseEntity := range test.expectBaseEntities { - baseEntity, err := store.FindBaseEntity(expectBaseEntity.URL) - c.Assert(err, gc.IsNil) - c.Assert(baseEntity, jc.DeepEquals, expectBaseEntity) - } - } -} - -func (s *StoreSuite) TestSetPromulgatedUpdateSearch(c *gc.C) { - store := s.newStore(c, true) - defer store.Close() - - // Insert some entities in the store, ensure there are a number of revisions of the same charm. - err := store.DB.Entities().Insert(entity("~charmers/trusty/wordpress-0", "trusty/wordpress-2")) - c.Assert(err, gc.IsNil) - err = store.DB.Entities().Insert(entity("~charmers/precise/wordpress-0", "precise/wordpress-1")) - c.Assert(err, gc.IsNil) - err = store.DB.Entities().Insert(entity("~openstack-charmers/trusty/wordpress-0", "")) - c.Assert(err, gc.IsNil) - err = store.DB.Entities().Insert(entity("~openstack-charmers/precise/wordpress-0", "")) - c.Assert(err, gc.IsNil) - err = store.DB.BaseEntities().Insert(baseEntity("~charmers/wordpress", true)) - c.Assert(err, gc.IsNil) - err = store.DB.BaseEntities().Insert(baseEntity("~openstack-charmers/wordpress", false)) - c.Assert(err, gc.IsNil) - url := newResolvedURL("~openstack-charmers/trusty/wordpress-0", -1) - - // Change the promulgated mysql version to openstack-charmers. - err = store.SetPromulgated(url, true) - c.Assert(err, gc.IsNil) - err = store.ES.RefreshIndex(s.TestIndex) - c.Assert(err, gc.IsNil) - // Check that the search records contain the correct information. - var zdoc SearchDoc - doc := zdoc - err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseReference("~charmers/trusty/wordpress-0")), &doc) - c.Assert(err, gc.IsNil) - c.Assert(doc.PromulgatedURL, gc.IsNil) - c.Assert(doc.PromulgatedRevision, gc.Equals, -1) - doc = zdoc - err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseReference("~charmers/precise/wordpress-0")), &doc) - c.Assert(err, gc.IsNil) - c.Assert(doc.PromulgatedURL, gc.IsNil) - c.Assert(doc.PromulgatedRevision, gc.Equals, -1) - doc = zdoc - err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseReference("~openstack-charmers/trusty/wordpress-0")), &doc) - c.Assert(err, gc.IsNil) - c.Assert(doc.PromulgatedURL.String(), gc.Equals, "cs:trusty/wordpress-3") - c.Assert(doc.PromulgatedRevision, gc.Equals, 3) - doc = zdoc - err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseReference("~openstack-charmers/precise/wordpress-0")), &doc) - c.Assert(err, gc.IsNil) - c.Assert(doc.PromulgatedURL.String(), gc.Equals, "cs:precise/wordpress-2") - c.Assert(doc.PromulgatedRevision, gc.Equals, 2) - - // Remove the promulgated flag from openstack-charmers, meaning mysql is - // no longer promulgated. - err = store.SetPromulgated(url, false) - c.Assert(err, gc.IsNil) - err = store.ES.RefreshIndex(s.TestIndex) - c.Assert(err, gc.IsNil) - // Check that the search records contain the correct information. - doc = zdoc - err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseReference("~charmers/trusty/wordpress-0")), &doc) - c.Assert(err, gc.IsNil) - c.Assert(doc.PromulgatedURL, gc.IsNil) - c.Assert(doc.PromulgatedRevision, gc.Equals, -1) - doc = zdoc - err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseReference("~charmers/precise/wordpress-0")), &doc) - c.Assert(err, gc.IsNil) - c.Assert(doc.PromulgatedURL, gc.IsNil) - c.Assert(doc.PromulgatedRevision, gc.Equals, -1) - doc = zdoc - err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseReference("~openstack-charmers/trusty/wordpress-0")), &doc) - c.Assert(err, gc.IsNil) - c.Assert(doc.PromulgatedURL, gc.IsNil) - c.Assert(doc.PromulgatedRevision, gc.Equals, -1) - doc = zdoc - err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseReference("~openstack-charmers/precise/wordpress-0")), &doc) - c.Assert(err, gc.IsNil) - c.Assert(doc.PromulgatedURL, gc.IsNil) - c.Assert(doc.PromulgatedRevision, gc.Equals, -1) -} - -func (s *StoreSuite) TestEntityResolvedURL(c *gc.C) { - c.Assert(EntityResolvedURL(&mongodoc.Entity{ - URL: charm.MustParseReference("~charmers/precise/wordpress-23"), - }), gc.DeepEquals, &router.ResolvedURL{ - URL: *charm.MustParseReference("~charmers/precise/wordpress-23"), - PromulgatedRevision: -1, - }) - c.Assert(EntityResolvedURL(&mongodoc.Entity{ - URL: charm.MustParseReference("~charmers/precise/wordpress-23"), - PromulgatedURL: charm.MustParseReference("precise/wordpress-4"), - }), gc.DeepEquals, &router.ResolvedURL{ - URL: *charm.MustParseReference("~charmers/precise/wordpress-23"), - PromulgatedRevision: 4, - }) -} - -func entity(url, purl string) *mongodoc.Entity { - id := charm.MustParseReference(url) - var pid *charm.Reference - pRev := -1 - if purl != "" { - pid = charm.MustParseReference(purl) - pRev = pid.Revision - } - return &mongodoc.Entity{ - URL: id, - User: id.User, - Name: id.Name, - Series: id.Series, - Revision: id.Revision, - BaseURL: baseURL(id), - PromulgatedURL: pid, - PromulgatedRevision: pRev, - } -} - -func baseEntity(url string, promulgated bool) *mongodoc.BaseEntity { - id := charm.MustParseReference(url) - return &mongodoc.BaseEntity{ - URL: id, - Name: id.Name, - User: id.User, - Promulgated: mongodoc.IntBool(promulgated), - } -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/zip.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/zip.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/zip.go 1970-01-01 00:00:00 +0000 @@ -1,49 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore - -import ( - "archive/zip" - "compress/flate" - "io" - - "gopkg.in/errgo.v1" - - "gopkg.in/juju/charmstore.v4/internal/mongodoc" -) - -// ZipFileReader returns a reader that will read -// content referred to by f within zipr, which should -// refer to the contents of a zip file, -func ZipFileReader(zipr io.ReadSeeker, f mongodoc.ZipFile) (io.Reader, error) { - if _, err := zipr.Seek(f.Offset, 0); err != nil { - return nil, errgo.Notef(err, "cannot seek to %d in zip content", f.Offset) - } - content := io.LimitReader(zipr, f.Size) - if !f.Compressed { - return content, nil - } - return flate.NewReader(content), nil -} - -// NewZipFile returns a new mongodoc zip file -// reference to the given zip file. -func NewZipFile(f *zip.File) (mongodoc.ZipFile, error) { - offset, err := f.DataOffset() - if err != nil { - return mongodoc.ZipFile{}, errgo.Notef(err, "cannot determine data offset for %q", f.Name) - } - zf := mongodoc.ZipFile{ - Offset: offset, - Size: int64(f.CompressedSize64), - } - switch f.Method { - case zip.Store: - case zip.Deflate: - zf.Compressed = true - default: - return mongodoc.ZipFile{}, errgo.Newf("unknown zip compression method for %q", f.Name) - } - return zf, nil -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/charmstore/zip_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/charmstore/zip_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/charmstore/zip_test.go 1970-01-01 00:00:00 +0000 @@ -1,123 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore_test - -import ( - "archive/zip" - "bytes" - "io" - "io/ioutil" - "strings" - - jujutesting "github.com/juju/testing" - gc "gopkg.in/check.v1" - "gopkg.in/errgo.v1" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/mongodoc" -) - -type zipSuite struct { - jujutesting.IsolationSuite - contents map[string]string -} - -var _ = gc.Suite(&zipSuite{}) - -func (s *zipSuite) SetUpSuite(c *gc.C) { - s.IsolationSuite.SetUpSuite(c) - s.contents = map[string]string{ - "readme.md": "readme contents", - "uncompressed_readme.md": "readme contents", - "icon.svg": "icon contents", - "metadata.yaml": "metadata contents", - "empty": "", - "uncompressed_empty": "", - } -} - -func (s *zipSuite) makeZipReader(c *gc.C, contents map[string]string) (io.ReadSeeker, []*zip.File) { - // Create a customized zip archive in memory. - var buf bytes.Buffer - w := zip.NewWriter(&buf) - for name, content := range contents { - header := &zip.FileHeader{ - Name: name, - Method: zip.Deflate, - } - if strings.HasPrefix(name, "uncompressed_") { - header.Method = zip.Store - } - f, err := w.CreateHeader(header) - c.Assert(err, gc.IsNil) - _, err = f.Write([]byte(content)) - c.Assert(err, gc.IsNil) - } - c.Assert(w.Close(), gc.IsNil) - - // Retrieve the zip files in the archive. - zipReader := bytes.NewReader(buf.Bytes()) - r, err := zip.NewReader(zipReader, int64(buf.Len())) - c.Assert(err, gc.IsNil) - c.Assert(r.File, gc.HasLen, len(contents)) - return zipReader, r.File -} - -func (s *zipSuite) TestZipFileReader(c *gc.C) { - zipReader, files := s.makeZipReader(c, s.contents) - - // Check that a ZipFile created from each file in the archive - // can be read correctly. - for i, f := range files { - c.Logf("test %d: %s", i, f.Name) - zf, err := charmstore.NewZipFile(f) - c.Assert(err, gc.IsNil) - zfr, err := charmstore.ZipFileReader(zipReader, zf) - c.Assert(err, gc.IsNil) - content, err := ioutil.ReadAll(zfr) - c.Assert(err, gc.IsNil) - c.Assert(string(content), gc.Equals, s.contents[f.Name]) - } -} - -func (s *zipSuite) TestZipFileReaderWithErrorOnSeek(c *gc.C) { - er := &seekErrorReader{} - r, err := charmstore.ZipFileReader(er, mongodoc.ZipFile{}) - c.Assert(err, gc.ErrorMatches, "cannot seek to 0 in zip content: foiled!") - c.Assert(r, gc.Equals, nil) -} - -type seekErrorReader struct { - io.Reader -} - -func (r *seekErrorReader) Seek(offset int64, whence int) (int64, error) { - return 0, errgo.New("foiled!") -} - -func (s *zipSuite) TestNewZipFile(c *gc.C) { - _, files := s.makeZipReader(c, s.contents) - - // Check that we can create a new ZipFile from - // each zip file in the archive. - for i, f := range files { - c.Logf("test %d: %s", i, f.Name) - zf, err := charmstore.NewZipFile(f) - c.Assert(err, gc.IsNil) - offset, err := f.DataOffset() - c.Assert(err, gc.IsNil) - - c.Assert(zf.Offset, gc.Equals, offset) - c.Assert(zf.Size, gc.Equals, int64(f.CompressedSize64)) - c.Assert(zf.Compressed, gc.Equals, !strings.HasPrefix(f.Name, "uncompressed_")) - } -} - -func (s *zipSuite) TestNewZipFileWithCompressionMethodError(c *gc.C) { - _, files := s.makeZipReader(c, map[string]string{"foo": "contents"}) - f := files[0] - f.Method = 99 - _, err := charmstore.NewZipFile(f) - c.Assert(err, gc.ErrorMatches, `unknown zip compression method for "foo"`) -} === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/debug' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/debug/handler.go' --- src/gopkg.in/juju/charmstore.v4/internal/debug/handler.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/debug/handler.go 1970-01-01 00:00:00 +0000 @@ -1,22 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// The debug package holds various functions that may -// be used for debugging but should not be included -// in production code. -package debug - -import ( - "log" - "net/http" -) - -// Handler returns a new handler that wraps h -// and logs the given message with the URL path -// every time the request is invoked. -func Handler(msg string, h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - log.Printf("%s got request at URL %q; headers %q", msg, req.URL, req.Header) - h.ServeHTTP(w, req) - }) -} === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/elasticsearch' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/elasticsearch/elasticsearch.go' --- src/gopkg.in/juju/charmstore.v4/internal/elasticsearch/elasticsearch.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/elasticsearch/elasticsearch.go 1970-01-01 00:00:00 +0000 @@ -1,525 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// elasticsearch package api attempts to name methods to match the -// corresponding elasticsearch endpoint. Methods names like CatIndices are -// named as such because they correspond to /_cat/indices elasticsearch -// endpoint. -// There is no reason to use different vocabulary from that of elasticsearch. -// Use the elasticsearch terminology and avoid mapping names of things. - -package elasticsearch - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "path" - "strings" - - "github.com/juju/loggo" - "gopkg.in/errgo.v1" -) - -const ( - // Internal provides elasticsearche's "internal" versioning system, as described in - // http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types - Internal = "internal" - - // External provides elasticsearche's "external" versioning system, as described in - // http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types - External = "external" - - // ExternalGTE provides elasticsearche's "external_gte" versioning system, as described in - // http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types - ExternalGTE = "external_gte" -) - -var log = loggo.GetLogger("charmstore.elasticsearch") - -var ErrConflict = errgo.New("elasticsearch document conflict") -var ErrNotFound = errgo.New("elasticsearch document not found") - -type ElasticSearchError struct { - Err string `json:"error"` - Status int `json:"status"` -} - -func (e ElasticSearchError) Error() string { - return e.Err -} - -type Database struct { - Addr string -} - -// Document represents a document in the elasticsearch database. -type Document struct { - Found bool `json:"found"` - Id string `json:"_id"` - Index string `json:"_index"` - Type string `json:"_type"` - Version int64 `json:"_version"` - Source json.RawMessage `json:"_source"` -} - -// Represents the response from _cluster/health on elastic search -// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-health.html -type ClusterHealth struct { - ClusterName string `json:"cluster_name"` - Status string `json:"status"` - TimedOut bool `json:"timed_out"` - NumberOfNodes int64 `json:"number_of_nodes"` - NumberOfDataNodes int64 `json:"number_of_data_nodes"` - ActivePrimaryShards int64 `json:"active_primary_shards"` - ActiveShards int64 `json:"active_shards"` - RelocatingShards int64 `json:"relocating_shards"` - InitializingShards int64 `json:"initializing_shards"` - UnassignedShards int64 `json:"unassigned_shards"` -} - -func (h *ClusterHealth) String() string { - return fmt.Sprintf("cluster_name: %s, status: %s, timed_out: %t"+ - ", number_of_nodes: %d, number_of_data_nodes: %d"+ - ", active_primary_shards: %d, active_shards: %d"+ - ", relocating_shards: %d, initializing_shards: %d"+ - ", unassigned_shards:%d", h.ClusterName, h.Status, - h.TimedOut, h.NumberOfNodes, h.NumberOfDataNodes, - h.ActivePrimaryShards, h.ActiveShards, - h.RelocatingShards, h.InitializingShards, - h.UnassignedShards) -} - -// Alias creates or updates an index alias. An alias a is created, -// or modified if it already exists, to point to i. See -// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-aliases.html#indices-aliases -// for further details. -func (db *Database) Alias(i, a string) error { - indexes, err := db.ListIndexesForAlias(a) - if err != nil { - return errgo.Notef(err, "cannot retrieve current aliases") - } - var actions struct { - Actions []action `json:"actions"` - } - for _, i := range indexes { - actions.Actions = append(actions.Actions, action{Remove: &alias{Index: i, Alias: a}}) - } - if i != "" { - actions.Actions = append(actions.Actions, action{Add: &alias{Index: i, Alias: a}}) - } - if len(actions.Actions) == 0 { - return nil - } - if err := db.post(db.url("_aliases"), actions, nil); err != nil { - return errgo.Notef(err, "error updating aliases") - } - return nil -} - -// Create document attempts to create a new document at index/type_/id with the -// contents in doc. If the document already exists then CreateDocument will return -// ErrConflict and return a non-nil error if any other error occurs. -// See http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/create-doc.html#create-doc -// for further details. -func (db *Database) CreateDocument(index, type_, id string, doc interface{}) error { - if err := db.put(db.url(index, type_, id, "_create"), doc, nil); err != nil { - return getError(err) - } - return nil -} - -// DeleteDocument deletes the document at index/type_/id from the elasticsearch -// database. See http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/delete-doc.html#delete-doc -// for further details. -func (db *Database) DeleteDocument(index, type_, id string) error { - if err := db.delete(db.url(index, type_, id), nil, nil); err != nil { - return getError(err) - } - return nil -} - -// DeleteIndex deletes the index with the given name from the database. -// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-delete-index.html -// If the index does not exist or if the database cannot be -// reached, then an error is returned. -func (db *Database) DeleteIndex(index string) error { - if err := db.delete(db.url(index), nil, nil); err != nil { - return getError(err) - } - return nil -} - -// GetDocument retrieves the document with the given index, type_ and id and -// unmarshals the json response into v. GetDocument returns ErrNotFound if the -// requested document is not present, and returns a non-nil error if any other error -// occurs. -func (db *Database) GetDocument(index, type_, id string, v interface{}) error { - d, err := db.GetESDocument(index, type_, id) - if err != nil { - return getError(err) - } - if !d.Found { - return ErrNotFound - } - if err := json.Unmarshal([]byte(d.Source), &v); err != nil { - return errgo.Mask(err) - } - return nil -} - -// GetESDocument returns elasticsearch's view of the document stored at -// index/type_/id. It is not an error if this document does not exist, in that case -// the Found field of the returned Document will be false. -func (db *Database) GetESDocument(index, type_, id string) (Document, error) { - var d Document - if err := db.get(db.url(index, type_, id), nil, &d); err != nil { - return Document{}, getError(err) - } - return d, nil -} - -// HasDocument tests to see a document of the given index, type_, and id exists -// in the elasticsearch database. A non-nil error is returned if there is an error -// communicating with the elasticsearch database. -func (db *Database) HasDocument(index, type_, id string) (bool, error) { - var d Document - if err := db.get(db.url(index, type_, id)+"?_source=false", nil, &d); err != nil { - return false, getError(err) - } - return d.Found, nil -} - -// Check the health status of Elastic search and retrieve general data from it. -// Calling get on /_cluster/health to retrieve status. -func (db *Database) Health() (ClusterHealth, error) { - var result ClusterHealth - if err := db.get(db.url("_cluster", "health"), nil, &result); err != nil { - return ClusterHealth{}, getError(err) - } - - return result, nil -} - -// ListAllIndexes retreieves the list of all user indexes in the elasticsearch database. -// indexes that are generated to to support plugins are filtered out of the list that -// is returned. -func (db *Database) ListAllIndexes() ([]string, error) { - var result map[string]interface{} - if err := db.get(db.url("_aliases"), nil, &result); err != nil { - return nil, getError(err) - } - var indexes []string - for key := range result { - // Some ElasticSearch plugins create indexes (e.g. ".marvel...") for their - // use. Ignore any that start with a dot. - if !strings.HasPrefix(key, ".") { - indexes = append(indexes, key) - } - } - return indexes, nil -} - -// ListIndexesForAlias retreieves the list of all indexes in the elasticsearch database -// that have the alias a. -func (db *Database) ListIndexesForAlias(a string) ([]string, error) { - var result map[string]struct{} - if err := db.get(db.url("*", "_alias", a), nil, &result); err != nil { - return nil, getError(err) - } - var indexes []string - for key := range result { - indexes = append(indexes, key) - } - return indexes, nil -} - -// PostDocument creates a new auto id document with the given index and _type -// and returns the generated id of the document. The type_ parameter controls how -// the document will be mapped in the index. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html -// for more details. -func (db *Database) PostDocument(index, type_ string, doc interface{}) (string, error) { - var resp struct { - ID string `json:"_id"` - } - if err := db.post(db.url(index, type_), doc, &resp); err != nil { - return "", getError(err) - } - return resp.ID, nil -} - -// PutDocument creates or updates the document with the given index, type_ and -// id. The type_ parameter controls how the document will be mapped in the index. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html -// for more details. -func (db *Database) PutDocument(index, type_, id string, doc interface{}) error { - if err := db.put(db.url(index, type_, id), doc, nil); err != nil { - return getError(err) - } - return nil -} - -// PutDocumentVersion creates or updates the document in the given index if the version -// parameter is the same as the currently stored version. The type_ parameter -// controls how the document will be indexed. PutDocumentVersion returns -// ErrConflict if the data cannot be stored due to a version mismatch, and a non-nil error if -// any other error occurs. -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#index-versioning -// for more information. -func (db *Database) PutDocumentVersion(index, type_, id string, version int64, doc interface{}) error { - return db.PutDocumentVersionWithType(index, type_, id, version, "internal", doc) -} - -// PutDocumentVersion creates or updates the document in the given index if the version -// parameter is the same as the currently stored version. The type_ parameter -// controls how the document will be indexed. PutDocumentVersionWithType returns -// ErrConflict if the data cannot be stored due to a version mismatch, and a non-nil error if -// any other error occurs. -// -// The constants Internal, External and ExternalGTE represent some of the available -// version types. Other version types may also be available, plese check the elasticsearch -// documentation. -// -// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#index-versioning -// and http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types for more information. -func (db *Database) PutDocumentVersionWithType( - index, type_, id string, - version int64, - versionType string, - doc interface{}) error { - url := fmt.Sprintf("%s?version=%d&version_type=%s", db.url(index, type_, id), version, versionType) - if err := db.put(url, doc, nil); err != nil { - return getError(err) - } - return nil -} - -// PutIndex creates the index with the given configuration. -func (db *Database) PutIndex(index string, config interface{}) error { - if err := db.put(db.url(index), config, nil); err != nil { - return getError(err) - } - return nil -} - -// PutMapping creates or updates the mapping with the given configuration. -func (db *Database) PutMapping(index, type_ string, config interface{}) error { - if err := db.put(db.url(index, "_mapping", type_), config, nil); err != nil { - return getError(err) - } - return nil -} - -// RefreshIndex posts a _refresh to the index in the database. -// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-refresh.html -func (db *Database) RefreshIndex(index string) error { - if err := db.post(db.url(index, "_refresh"), nil, nil); err != nil { - return getError(err) - } - return nil -} - -// Search performs the query specified in q on the values in index/type_ and returns a -// SearchResult. -func (db *Database) Search(index, type_ string, q QueryDSL) (SearchResult, error) { - var sr SearchResult - if err := db.get(db.url(index, type_, "_search"), q, &sr); err != nil { - return SearchResult{}, errgo.Notef(getError(err), "search failed") - } - return sr, nil -} - -// do performs a request on the elasticsearch server. If body is not nil it will be -// marsheled as a json object and sent with the request. If v is non nil the response -// body will be unmarshalled into the value it points to. -func (db *Database) do(method, url string, body, v interface{}) error { - log.Debugf(">>> %s %s", method, url) - var r io.Reader - if body != nil { - b, err := json.Marshal(body) - if err != nil { - return errgo.Notef(err, "can not marshaling body") - } - log.Debugf(">>> %s", b) - r = bytes.NewReader(b) - } - req, err := http.NewRequest(method, url, r) - if err != nil { - log.Debugf("*** %s", err) - return errgo.Notef(err, "cannot create request") - } - if body != nil { - req.Header.Add("Content-Type", "application/json") - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - log.Debugf("*** %s", err) - return errgo.Mask(err) - } - defer resp.Body.Close() - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Debugf("*** %s", err) - return errgo.Notef(err, "cannot read response") - } - log.Debugf("<<< %s", resp.Status) - log.Debugf("<<< %s", b) - var eserr *ElasticSearchError - // TODO(mhilton) don't try to parse every response as an error. - if err = json.Unmarshal(b, &eserr); err != nil { - log.Debugf("*** %s", err) - } - if eserr.Status != 0 { - return eserr - } - if v != nil { - if err = json.Unmarshal(b, v); err != nil { - log.Debugf("*** %s", err) - return errgo.Notef(err, "cannot unmarshal response") - } - } - return nil -} - -// delete makes a DELETE request to the database url. A non-nil body will be -// sent with the request and if v is not nill then the response will be unmarshaled -// into tha value it points to. -func (db *Database) delete(url string, body, v interface{}) error { - return db.do("DELETE", url, body, v) -} - -// get makes a GET request to the database url. A non-nil body will be -// sent with the request and if v is not nill then the response will be unmarshaled -// into tha value it points to. -func (db *Database) get(url string, body, v interface{}) error { - return db.do("GET", url, body, v) -} - -// post makes a POST request to the database url. A non-nil body will be -// sent with the request and if v is not nill then the response will be unmarshaled -// into tha value it points to. -func (db *Database) post(url string, body, v interface{}) error { - return db.do("POST", url, body, v) -} - -// put makes a PUT request to the database url. A non-nil body will be -// sent with the request and if v is not nill then the response will be unmarshaled -// into tha value it points to. -func (db *Database) put(url string, body, v interface{}) error { - return db.do("PUT", url, body, v) -} - -// url constructs the URL for accessing the database. -func (db *Database) url(pathParts ...string) string { - path := path.Join(pathParts...) - url := &url.URL{ - Scheme: "http", - Host: db.Addr, - Path: path, - } - return url.String() - -} - -// SearchResult is the result returned after performing a search in elasticsearch -type SearchResult struct { - Hits struct { - Total int `json:"total"` - MaxScore float64 `json:"max_score"` - Hits []Hit `json:"hits"` - } `json:"hits"` - Took int `json:"took"` - TimedOut bool `json:"timed_out"` -} - -// Hit represents an individual search hit returned from elasticsearch -type Hit struct { - Index string `json:"_index"` - Type string `json:"_type"` - ID string `json:"_id"` - Score float64 `json:"_score"` - Source json.RawMessage `json:"_source"` - Fields Fields `json:"fields"` -} - -type Fields map[string][]interface{} - -// Get retrieves the first value of key in the fields map. If no such value -// exists then it will return nil. -func (f Fields) Get(key string) interface{} { - if len(f[key]) < 1 { - return nil - } - return f[key][0] -} - -// Get retrieves the first value of key in the fields map, and coerces it into a -// string. If no such value exists or the value is not a string, then "" will be returned. -func (f Fields) GetString(key string) string { - s, ok := f.Get(key).(string) - if !ok { - return "" - } - return s -} - -// EscapeRegexp returns the supplied string with any special characters escaped. -// A regular expression match on the returned string will match exactly the characters -// in the supplied string. -func EscapeRegexp(s string) string { - return regexpReplacer.Replace(s) -} - -var regexpReplacer = strings.NewReplacer( - `.`, `\.`, - `?`, `\?`, - `+`, `\+`, - `*`, `\*`, - `|`, `\|`, - `{`, `\{`, - `}`, `\}`, - `[`, `\[`, - `]`, `\]`, - `(`, `\(`, - `)`, `\)`, - `"`, `\"`, - `\`, `\\`, - `#`, `\#`, - `@`, `\@`, - `&`, `\&`, - `<`, `\<`, - `>`, `\>`, - `~`, `\~`, -) - -// alias describes an alias in elasticsearch. -type alias struct { - Index string `json:"index"` - Alias string `json:"alias"` -} - -// action is an action that can be performed on an alias -type action struct { - Remove *alias `json:"remove,omitempty"` - Add *alias `json:"add,omitempty"` -} - -// getError derives an error from the underlaying error returned -// by elasticsearch. -func getError(err error) error { - if eserr, ok := err.(*ElasticSearchError); ok { - switch eserr.Status { - case http.StatusNotFound: - return ErrNotFound - case http.StatusConflict: - return ErrConflict - default: - return err - } - } - return err -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/elasticsearch/elasticsearch_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/elasticsearch/elasticsearch_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/elasticsearch/elasticsearch_test.go 1970-01-01 00:00:00 +0000 @@ -1,443 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package elasticsearch_test - -import ( - "encoding/json" - "testing" - "time" - - jujutesting "github.com/juju/testing" - "github.com/juju/utils" - gc "gopkg.in/check.v1" - - es "gopkg.in/juju/charmstore.v4/internal/elasticsearch" - "gopkg.in/juju/charmstore.v4/internal/storetesting" -) - -func TestPackage(t *testing.T) { - gc.TestingT(t) -} - -type Suite struct { - jujutesting.IsolationSuite - storetesting.ElasticSearchSuite - Indexes []string - TestIndex string -} - -func (s *Suite) SetUpSuite(c *gc.C) { - s.IsolationSuite.SetUpSuite(c) - s.ElasticSearchSuite.SetUpSuite(c) -} -func (s *Suite) TearDownSuite(c *gc.C) { - s.ElasticSearchSuite.TearDownSuite(c) - s.IsolationSuite.TearDownSuite(c) -} -func (s *Suite) SetUpTest(c *gc.C) { - s.IsolationSuite.SetUpTest(c) - s.ElasticSearchSuite.SetUpTest(c) - s.TestIndex = s.NewIndex(c) - err := s.ES.PutIndex(s.TestIndex, map[string]interface{}{"settings": map[string]interface{}{"number_of_shards": 1}}) - c.Assert(err, gc.Equals, nil) - err = s.ES.PutDocument(s.TestIndex, "testtype", s.TestIndex, struct{}{}) - c.Assert(err, gc.Equals, nil) - err = s.ES.RefreshIndex(s.TestIndex) - c.Assert(err, gc.Equals, nil) -} -func (s *Suite) TearDownTest(c *gc.C) { - for _, i := range s.Indexes { - s.ES.DeleteIndex(i) - } - s.ElasticSearchSuite.TearDownTest(c) - s.IsolationSuite.TearDownTest(c) -} - -func (s *Suite) NewIndex(c *gc.C) string { - uuid, err := utils.NewUUID() - c.Assert(err, gc.Equals, nil) - idx := time.Now().Format("20060102150405") + "-" + uuid.String() - s.Indexes = append(s.Indexes, idx) - return idx -} - -var _ = gc.Suite(&Suite{}) - -func (s *Suite) TestSuccessfulPostDocument(c *gc.C) { - doc := map[string]string{ - "a": "b", - } - id, err := s.ES.PostDocument(s.TestIndex, "testtype", doc) - c.Assert(err, gc.IsNil) - c.Assert(id, gc.NotNil) - var result map[string]string - err = s.ES.GetDocument(s.TestIndex, "testtype", id, &result) - c.Assert(err, gc.IsNil) -} - -func (s *Suite) TestSuccessfulPutNewDocument(c *gc.C) { - doc := map[string]string{ - "a": "b", - } - // Show that no document with this id exists. - exists, err := s.ES.HasDocument(s.TestIndex, "testtype", "a") - c.Assert(err, gc.IsNil) - c.Assert(exists, gc.Equals, false) - err = s.ES.PutDocument(s.TestIndex, "testtype", "a", doc) - c.Assert(err, gc.IsNil) - var result map[string]string - err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) - c.Assert(result["a"], gc.Equals, "b") - exists, err = s.ES.HasDocument(s.TestIndex, "testtype", "a") - c.Assert(err, gc.IsNil) - c.Assert(exists, gc.Equals, true) -} - -func (s *Suite) TestSuccessfulPutUpdatedDocument(c *gc.C) { - doc := map[string]string{ - "a": "b", - } - err := s.ES.PutDocument(s.TestIndex, "testtype", "a", doc) - c.Assert(err, gc.IsNil) - doc["a"] = "c" - err = s.ES.PutDocument(s.TestIndex, "testtype", "a", doc) - c.Assert(err, gc.IsNil) - var result map[string]string - err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) - c.Assert(result["a"], gc.Equals, "c") -} - -func (s *Suite) TestPutVersionWithTypeNewDocument(c *gc.C) { - doc := map[string]string{ - "a": "b", - } - // Show that no document with this id exists. - exists, err := s.ES.HasDocument(s.TestIndex, "testtype", "a") - c.Assert(err, gc.IsNil) - c.Assert(exists, gc.Equals, false) - err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 1, es.ExternalGTE, doc) - c.Assert(err, gc.IsNil) - var result map[string]string - err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) - c.Assert(result["a"], gc.Equals, "b") - exists, err = s.ES.HasDocument(s.TestIndex, "testtype", "a") - c.Assert(err, gc.IsNil) - c.Assert(exists, gc.Equals, true) -} - -func (s *Suite) TestPutVersionWithTypeUpdateCurrentDocumentVersion(c *gc.C) { - doc := map[string]string{ - "a": "b", - } - // Show that no document with this id exists. - exists, err := s.ES.HasDocument(s.TestIndex, "testtype", "a") - c.Assert(err, gc.IsNil) - c.Assert(exists, gc.Equals, false) - err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 1, es.ExternalGTE, doc) - c.Assert(err, gc.IsNil) - doc["a"] = "c" - err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 1, es.ExternalGTE, doc) - c.Assert(err, gc.IsNil) - var result map[string]string - err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) - c.Assert(result["a"], gc.Equals, "c") - exists, err = s.ES.HasDocument(s.TestIndex, "testtype", "a") - c.Assert(err, gc.IsNil) - c.Assert(exists, gc.Equals, true) -} - -func (s *Suite) TestPutVersionWithTypeUpdateLaterDocumentVersion(c *gc.C) { - doc := map[string]string{ - "a": "b", - } - // Show that no document with this id exists. - exists, err := s.ES.HasDocument(s.TestIndex, "testtype", "a") - c.Assert(err, gc.IsNil) - c.Assert(exists, gc.Equals, false) - err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 1, es.ExternalGTE, doc) - c.Assert(err, gc.IsNil) - doc["a"] = "c" - err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 3, es.ExternalGTE, doc) - c.Assert(err, gc.IsNil) - var result map[string]string - err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) - c.Assert(result["a"], gc.Equals, "c") - exists, err = s.ES.HasDocument(s.TestIndex, "testtype", "a") - c.Assert(err, gc.IsNil) - c.Assert(exists, gc.Equals, true) -} - -func (s *Suite) TestPutVersionWithTypeUpdateEarlierDocumentVersion(c *gc.C) { - doc := map[string]string{ - "a": "b", - } - // Show that no document with this id exists. - exists, err := s.ES.HasDocument(s.TestIndex, "testtype", "a") - c.Assert(err, gc.IsNil) - c.Assert(exists, gc.Equals, false) - err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 3, es.ExternalGTE, doc) - c.Assert(err, gc.IsNil) - doc["a"] = "c" - err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 1, es.ExternalGTE, doc) - c.Assert(err, gc.Equals, es.ErrConflict) - var result map[string]string - err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) - c.Assert(result["a"], gc.Equals, "b") - exists, err = s.ES.HasDocument(s.TestIndex, "testtype", "a") - c.Assert(err, gc.IsNil) - c.Assert(exists, gc.Equals, true) -} - -func (s *Suite) TestDelete(c *gc.C) { - doc := map[string]string{ - "a": "b", - } - _, err := s.ES.PostDocument(s.TestIndex, "testtype", doc) - c.Assert(err, gc.IsNil) - err = s.ES.DeleteIndex(s.TestIndex) - c.Assert(err, gc.IsNil) -} - -func (s *Suite) TestDeleteErrorOnNonExistingIndex(c *gc.C) { - err := s.ES.DeleteIndex("nope") - c.Assert(err, gc.NotNil) - c.Assert(err.Error(), gc.Equals, "elasticsearch document not found") -} - -func (s *Suite) TestIndexesCreatedAutomatically(c *gc.C) { - doc := map[string]string{"a": "b"} - _, err := s.ES.PostDocument(s.TestIndex, "testtype", doc) - c.Assert(err, gc.IsNil) - indexes, err := s.ES.ListAllIndexes() - c.Assert(err, gc.IsNil) - c.Assert(indexes, gc.Not(gc.HasLen), 0) - found := false - for _, index2 := range indexes { - if index2 == s.TestIndex { - found = true - } - } - c.Assert(found, gc.Equals, true) -} - -func (s *Suite) TestHealthIsWorking(c *gc.C) { - result, err := s.ES.Health() - c.Assert(err, gc.IsNil) - c.Assert(result.ClusterName, gc.NotNil) - c.Assert(result.ActivePrimaryShards, gc.NotNil) - c.Assert(result.ActiveShards, gc.NotNil) - c.Assert(result.InitializingShards, gc.NotNil) - c.Assert(result.NumberOfDataNodes, gc.NotNil) - c.Assert(result.NumberOfNodes, gc.NotNil) - c.Assert(result.RelocatingShards, gc.NotNil) - c.Assert(result.Status, gc.NotNil) - c.Assert(result.TimedOut, gc.NotNil) - c.Assert(result.UnassignedShards, gc.NotNil) -} - -func (s *Suite) TestSearch(c *gc.C) { - doc := map[string]string{"foo": "bar"} - _, err := s.ES.PostDocument(s.TestIndex, "testtype", doc) - c.Assert(err, gc.IsNil) - doc["foo"] = "baz" - id2, err := s.ES.PostDocument(s.TestIndex, "testtype", doc) - c.Assert(err, gc.IsNil) - s.ES.RefreshIndex(s.TestIndex) - q := es.QueryDSL{ - Query: es.TermQuery{Field: "foo", Value: "baz"}, - Fields: []string{"foo"}, - } - results, err := s.ES.Search(s.TestIndex, "testtype", q) - c.Assert(err, gc.IsNil) - c.Assert(results.Hits.Total, gc.Equals, 1) - c.Assert(results.Hits.Hits[0].ID, gc.Equals, id2) - c.Assert(results.Hits.Hits[0].Fields.GetString("foo"), gc.Equals, "baz") -} - -func (s *Suite) TestPutMapping(c *gc.C) { - var mapping = map[string]interface{}{ - "testtype": map[string]interface{}{ - "properties": map[string]interface{}{ - "foo": map[string]interface{}{ - "stored": true, - "type": "string", - }, - }, - }, - } - err := s.ES.PutMapping(s.TestIndex, "testtype", mapping) - c.Assert(err, gc.IsNil) -} - -func (s *Suite) TestEscapeRegexp(c *gc.C) { - var tests = []struct { - about string - original string - expected string - }{{ - about: `plain string`, - original: `foo`, - expected: `foo`, - }, { - about: `escape .`, - original: `foo.bar`, - expected: `foo\.bar`, - }, { - about: `escape ?`, - original: `foo?bar`, - expected: `foo\?bar`, - }, { - about: `escape +`, - original: `foo+bar`, - expected: `foo\+bar`, - }, { - about: `escape *`, - original: `foo*bar`, - expected: `foo\*bar`, - }, { - about: `escape |`, - original: `foo|bar`, - expected: `foo\|bar`, - }, { - about: `escape {`, - original: `foo{bar`, - expected: `foo\{bar`, - }, { - about: `escape }`, - original: `foo}bar`, - expected: `foo\}bar`, - }, { - about: `escape [`, - original: `foo[bar`, - expected: `foo\[bar`, - }, { - about: `escape ]`, - original: `foo]bar`, - expected: `foo\]bar`, - }, { - about: `escape (`, - original: `foo(bar`, - expected: `foo\(bar`, - }, { - about: `escape )`, - original: `foo)bar`, - expected: `foo\)bar`, - }, { - about: `escape "`, - original: `foo"bar`, - expected: `foo\"bar`, - }, { - about: `escape \`, - original: `foo\bar`, - expected: `foo\\bar`, - }, { - about: `escape #`, - original: `foo#bar`, - expected: `foo\#bar`, - }, { - about: `escape @`, - original: `foo@bar`, - expected: `foo\@bar`, - }, { - about: `escape &`, - original: `foo&bar`, - expected: `foo\&bar`, - }, { - about: `escape <`, - original: `foo`, - original: `foo>bar`, - expected: `foo\>bar`, - }, { - about: `escape ~`, - original: `foo~bar`, - expected: `foo\~bar`, - }, { - about: `escape start`, - original: `*foo`, - expected: `\*foo`, - }, { - about: `escape end`, - original: `foo\`, - expected: `foo\\`, - }, { - about: `escape many`, - original: `\"*\`, - expected: `\\\"\*\\`, - }} - for i, test := range tests { - c.Logf("%d: %s", i, test.about) - c.Assert(es.EscapeRegexp(test.original), gc.Equals, test.expected) - } -} - -func (s *Suite) TestAlias(c *gc.C) { - uuid, err := utils.NewUUID() - c.Assert(err, gc.Equals, nil) - alias := uuid.String() - index1 := alias + "-1" - index2 := alias + "-2" - - // Create first index - err = s.ES.PutIndex(index1, struct{}{}) - c.Assert(err, gc.Equals, nil) - defer s.ES.DeleteIndex(index1) - - // Create second index - err = s.ES.PutIndex(index2, struct{}{}) - c.Assert(err, gc.Equals, nil) - defer s.ES.DeleteIndex(index2) - - // Check alias is not aliased to anything - indexes, err := s.ES.ListIndexesForAlias(alias) - c.Assert(err, gc.Equals, nil) - c.Assert(indexes, gc.HasLen, 0) - - // Associate alias with index 1 - err = s.ES.Alias(index1, alias) - c.Assert(err, gc.Equals, nil) - indexes, err = s.ES.ListIndexesForAlias(alias) - c.Assert(err, gc.Equals, nil) - c.Assert(indexes, gc.HasLen, 1) - c.Assert(indexes[0], gc.Equals, index1) - - // Associate alias with index 2, removing it from index 1 - err = s.ES.Alias(index2, alias) - c.Assert(err, gc.Equals, nil) - indexes, err = s.ES.ListIndexesForAlias(alias) - c.Assert(err, gc.Equals, nil) - c.Assert(indexes, gc.HasLen, 1) - c.Assert(indexes[0], gc.Equals, index2) -} - -func (S *Suite) TestDecodingHealthStatus(c *gc.C) { - const health_message = `{ - "cluster_name":"elasticsearch", - "status": "green", - "timed_out": true, - "number_of_nodes": 2, - "number_of_data_nodes": 2, - "active_primary_shards": 14, - "active_shards": 28, - "relocating_shards": 2, - "initializing_shards": 2, - "unassigned_shards": 2 - }` - - var h es.ClusterHealth - err := json.Unmarshal([]byte(health_message), &h) - c.Assert(err, gc.IsNil) - c.Assert(h.ClusterName, gc.Equals, "elasticsearch") - c.Assert(h.Status, gc.Equals, "green") - c.Assert(h.TimedOut, gc.Equals, true) - c.Assert(h.NumberOfNodes, gc.Equals, int64(2)) - c.Assert(h.NumberOfDataNodes, gc.Equals, int64(2)) - c.Assert(h.ActivePrimaryShards, gc.Equals, int64(14)) - c.Assert(h.ActiveShards, gc.Equals, int64(28)) - c.Assert(h.RelocatingShards, gc.Equals, int64(2)) - c.Assert(h.InitializingShards, gc.Equals, int64(2)) - c.Assert(h.UnassignedShards, gc.Equals, int64(2)) -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/elasticsearch/query.go' --- src/gopkg.in/juju/charmstore.v4/internal/elasticsearch/query.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/elasticsearch/query.go 1970-01-01 00:00:00 +0000 @@ -1,252 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package elasticsearch - -import ( - "encoding/json" - "fmt" -) - -// Query DSL - Queries - -// Query represents a query in the elasticsearch DSL. -type Query interface { - json.Marshaler -} - -// Filter represents a filter in the elasticsearch DSL. -type Filter interface { - json.Marshaler -} - -// Function is a function definition for use with a FunctionScoreQuery. -type Function interface{} - -// BoostField creates a string which represents a field name with a boost value. -func BoostField(field string, boost float64) string { - return fmt.Sprintf("%s^%f", field, boost) -} - -// MatchAllQuery provides a query that matches all -// documents in the index. -type MatchAllQuery struct { -} - -func (m MatchAllQuery) MarshalJSON() ([]byte, error) { - return marshalNamedObject("match_all", struct{}{}) -} - -// MatchQuery provides a query that matches against -// a complete field. -type MatchQuery struct { - Field string - Query string - Type string -} - -func (m MatchQuery) MarshalJSON() ([]byte, error) { - params := map[string]interface{}{"query": m.Query} - if m.Type != "" { - params["type"] = m.Type - } - - return marshalNamedObject("match", map[string]interface{}{m.Field: params}) -} - -// MultiMatchQuery provides a query that matches on a number of fields. -type MultiMatchQuery struct { - Query string - Fields []string -} - -func (m MultiMatchQuery) MarshalJSON() ([]byte, error) { - return marshalNamedObject("multi_match", map[string]interface{}{ - "query": m.Query, - "fields": m.Fields, - }) -} - -// FilteredQuery provides a query that includes a filter. -type FilteredQuery struct { - Query Query - Filter Filter -} - -func (f FilteredQuery) MarshalJSON() ([]byte, error) { - return marshalNamedObject("filtered", map[string]interface{}{ - "query": f.Query, - "filter": f.Filter, - }) -} - -// FunctionScoreQuery provides a query that adjusts the scoring of a -// query by applying functions to it. -type FunctionScoreQuery struct { - Query Query - Functions []Function -} - -func (f FunctionScoreQuery) MarshalJSON() ([]byte, error) { - return marshalNamedObject("function_score", map[string]interface{}{ - "query": f.Query, - "functions": f.Functions, - }) -} - -// TermQuery provides a query that matches a term in a field. -type TermQuery struct { - Field string - Value string -} - -func (t TermQuery) MarshalJSON() ([]byte, error) { - return marshalNamedObject("term", map[string]interface{}{ - t.Field: t.Value, - }) -} - -// DecayFunction provides a function that boosts depending on -// the difference in values of a certain field. See -// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_decay_functions -// for details. -type DecayFunction struct { - Function string - Field string - Scale string -} - -func (f DecayFunction) MarshalJSON() ([]byte, error) { - return marshalNamedObject(f.Function, map[string]interface{}{ - f.Field: map[string]interface{}{ - "scale": f.Scale, - }, - }) -} - -// BoostFactorFunction provides a function that boosts results by the specified amount. -type BoostFactorFunction struct { - Filter Filter `json:"filter,omitempty"` - BoostFactor float64 `json:"boost_factor"` -} - -// FieldValueFactorFunction boosts the results by the value of a field in the document. -type FieldValueFactorFunction struct { - Field string `json:"field"` - Factor float64 `json:"factor,omitempty"` - Modifier string `json:"modifier,omitempty"` -} - -func (f FieldValueFactorFunction) MarshalJSON() ([]byte, error) { - type ffvf FieldValueFactorFunction - return marshalNamedObject("field_value_factor", ffvf(f)) -} - -// AndFilter provides a filter that matches if all of the internal -// filters match. -type AndFilter []Filter - -func (a AndFilter) MarshalJSON() ([]byte, error) { - return marshalNamedObject("and", map[string]interface{}{ - "filters": []Filter(a), - }) -} - -// OrFilter provides a filter that matches if any of the internal -// filters match. -type OrFilter []Filter - -func (o OrFilter) MarshalJSON() ([]byte, error) { - return marshalNamedObject("or", map[string]interface{}{ - "filters": []Filter(o), - }) -} - -// NotFilter provides a filter that matches the opposite of the -// wrapped filter. -type NotFilter struct { - Filter Filter -} - -func (n NotFilter) MarshalJSON() ([]byte, error) { - return marshalNamedObject("not", n.Filter) -} - -// QueryFilter provides a filter that matches when a query matches -// on a result -type QueryFilter struct { - Query Query -} - -func (q QueryFilter) MarshalJSON() ([]byte, error) { - return marshalNamedObject("query", q.Query) -} - -// RegexpFilter provides a filter that matches a field against a -// regular expression. -type RegexpFilter struct { - Field string - Regexp string -} - -func (r RegexpFilter) MarshalJSON() ([]byte, error) { - return marshalNamedObject("regexp", map[string]string{r.Field: r.Regexp}) -} - -// TermFilter provides a filter that requires a field to match. -type TermFilter struct { - Field string - Value string -} - -func (t TermFilter) MarshalJSON() ([]byte, error) { - return marshalNamedObject("term", map[string]string{t.Field: t.Value}) -} - -// ExistsFilter provides a filter that requres a field to be present. -type ExistsFilter string - -func (f ExistsFilter) MarshalJSON() ([]byte, error) { - return marshalNamedObject("exists", map[string]string{"field": string(f)}) -} - -// QueryDSL provides a structure to put together a query using the -// elasticsearch DSL. -type QueryDSL struct { - Fields []string `json:"fields"` - From int `json:"from,omitempty"` - Size int `json:"size,omitempty"` - Query Query `json:"query,omitempty"` - Sort []Sort `json:"sort,omitempty"` -} - -type Sort struct { - Field string - Order Order -} - -type Order struct { - Order string `json:"order"` -} - -func (s Sort) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]Order{ - s.Field: {s.Order.Order}, - }) -} - -// Ascending is an Order that orders a sort by ascending through the values. -var Ascending = Order{"asc"} - -// Descending is an Order that orders a sort by descending throuth the values. -var Descending = Order{"desc"} - -// marshalNamedObject provides a helper that creates json objects in a form -// often required by the elasticsearch query DSL. The objects created -// take the following form: -// { -// name: obj -// } -func marshalNamedObject(name string, obj interface{}) ([]byte, error) { - return json.Marshal(map[string]interface{}{name: obj}) -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/elasticsearch/query_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/elasticsearch/query_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/elasticsearch/query_test.go 1970-01-01 00:00:00 +0000 @@ -1,149 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package elasticsearch_test - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - . "gopkg.in/juju/charmstore.v4/internal/elasticsearch" -) - -type QuerySuite struct{} - -var _ = gc.Suite(&QuerySuite{}) - -func (s *QuerySuite) TestJSONEncodings(c *gc.C) { - var tests = []struct { - about string - query interface{} - json string - }{{ - about: "term query", - query: TermQuery{Field: "foo", Value: "bar"}, - json: `{"term": {"foo": "bar"}}`, - }, { - about: "match all query", - query: MatchAllQuery{}, - json: `{"match_all": {}}`, - }, { - about: "match query", - query: MatchQuery{Field: "foo", Query: "bar"}, - json: `{"match": {"foo": {"query": "bar"}}}`, - }, { - about: "match query with type", - query: MatchQuery{Field: "foo", Query: "bar", Type: "baz"}, - json: `{"match": {"foo": {"query": "bar", "type": "baz"}}}`, - }, { - about: "multi match query", - query: MultiMatchQuery{Query: "foo", Fields: []string{BoostField("bar", 2), "baz"}}, - json: `{"multi_match": {"query": "foo", "fields": ["bar^2.000000", "baz"]}}`, - }, { - about: "filtered query", - query: FilteredQuery{ - Query: TermQuery{Field: "foo", Value: "bar"}, - Filter: TermFilter{Field: "baz", Value: "quz"}}, - json: `{"filtered": {"query": {"term": {"foo": "bar"}}, "filter": {"term": {"baz": "quz"}}}}`, - }, { - about: "function score query", - query: FunctionScoreQuery{ - Query: TermQuery{Field: "foo", Value: "bar"}, - Functions: []Function{ - DecayFunction{ - Function: "baz", - Field: "foo", - Scale: "quz", - }, - }, - }, - json: `{"function_score": {"query": {"term": {"foo": "bar"}}, "functions": [{"baz": {"foo":{"scale": "quz"}}}]}}`, - }, { - about: "term filter", - query: TermFilter{Field: "foo", Value: "bar"}, - json: `{"term": {"foo": "bar"}}`, - }, { - about: "and filter", - query: AndFilter{ - TermFilter{Field: "foo", Value: "bar"}, - TermFilter{Field: "baz", Value: "quz"}, - }, - json: `{"and": {"filters": [{"term": {"foo": "bar"}}, {"term": {"baz": "quz"}}]}}`, - }, { - about: "or filter", - query: OrFilter{ - TermFilter{Field: "foo", Value: "bar"}, - TermFilter{Field: "baz", Value: "quz"}, - }, - json: `{"or": {"filters": [{"term": {"foo": "bar"}}, {"term": {"baz": "quz"}}]}}`, - }, { - about: "not filter", - query: NotFilter{TermFilter{Field: "foo", Value: "bar"}}, - json: `{"not": {"term": {"foo": "bar"}}}`, - }, { - about: "query filter", - query: QueryFilter{Query: TermQuery{Field: "foo", Value: "bar"}}, - json: `{"query": {"term": {"foo": "bar"}}}`, - }, { - about: "regexp filter", - query: RegexpFilter{Field: "foo", Regexp: ".*"}, - json: `{"regexp": {"foo": ".*"}}`, - }, { - about: "query dsl", - query: QueryDSL{ - Fields: []string{"foo", "bar"}, - Size: 10, - Query: TermQuery{Field: "baz", Value: "quz"}, - Sort: []Sort{{Field: "foo", Order: Order{"desc"}}}, - }, - json: `{"fields": ["foo", "bar"], "size": 10, "query": {"term": {"baz": "quz"}}, "sort": [{"foo": { "order": "desc"}}]}`, - }, { - about: "decay function", - query: DecayFunction{ - Function: "baz", - Field: "foo", - Scale: "quz", - }, - json: `{"baz": {"foo":{"scale": "quz"}}}`, - }, { - about: "boost_factor function", - query: BoostFactorFunction{ - BoostFactor: 1.5, - }, - json: `{"boost_factor": 1.5}`, - }, { - about: "boost_factor function with filter", - query: BoostFactorFunction{ - BoostFactor: 1.5, - Filter: TermFilter{ - Field: "foo", - Value: "bar", - }, - }, - json: `{"filter": {"term": {"foo": "bar"}}, "boost_factor": 1.5}`, - }, { - about: "paginated query", - query: QueryDSL{ - Fields: []string{"foo", "bar"}, - Size: 10, - Query: TermQuery{Field: "baz", Value: "quz"}, - Sort: []Sort{{Field: "foo", Order: Order{"desc"}}}, - From: 10, - }, - json: `{"fields": ["foo", "bar"], "size": 10, "query": {"term": {"baz": "quz"}}, "sort": [{"foo": { "order": "desc"}}], "from": 10}`, - }, { - about: "field value factor", - query: FieldValueFactorFunction{ - Field: "foo", - Factor: 1.2, - Modifier: "bar", - }, - json: `{"field_value_factor": {"field": "foo", "factor": 1.2, "modifier": "bar"}}`, - }} - for i, test := range tests { - c.Logf("%d: %s", i, test.about) - // Note JSONEquals is being used a bit backwards here, this is fine - // but any error results may be a little confusing. - c.Assert(test.json, jc.JSONEquals, test.query) - } -} === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/legacy' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/legacy/api.go' --- src/gopkg.in/juju/charmstore.v4/internal/legacy/api.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/legacy/api.go 1970-01-01 00:00:00 +0000 @@ -1,288 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// The legacy package implements the legacy API, as follows: -// -// /charm-info -// -// A GET call to `/charm-info` returns info about one or more charms, including -// its canonical URL, revision, SHA256 checksum and VCS revision digest. -// The returned info is in JSON format. -// For instance a request to `/charm-info?charms=cs:trusty/juju-gui` returns the -// following response: -// -// {"cs:trusty/juju-gui": { -// "canonical-url": "cs:trusty/juju-gui", -// "revision": 3, -// "sha256": "a15c77f3f92a0fb7b61e9...", -// "digest": jeff.pihach@canonical.com-20140612210347-6cc9su1jqjkhbi84" -// }} -// -// /charm-event: -// -// A GET call to `/charm-event` returns info about an event occurred in the life -// of the specified charm(s). Currently two types of events are logged: -// "published" (a charm has been published and it's available in the store) and -// "publish-error" (an error occurred while importing the charm). -// E.g. a call to `/charm-event?charms=cs:trusty/juju-gui` generates the following -// JSON response: -// -// {"cs:trusty/juju-gui": { -// "kind": "published", -// "revision": 3, -// "digest": "jeff.pihach@canonicalcom-20140612210347-6cc9su1jqjkhbi84", -// "time": "2014-06-16T14:41:19Z" -// }} -// -// /charm/ -// -// The `charm` API provides the ability to download a charm as a Zip archive, -// given the charm identifier. For instance, it is possible to download the Juju -// GUI charm by performing a GET call to `/charm/trusty/juju-gui-42`. Both the -// revision and OS series can be omitted, e.g. `/charm/juju-gui` will download the -// last revision of the Juju GUI charm with support to the more recent Ubuntu LTS -// series. -// -// /stats/counter/ -// -// Stats can be retrieved by calling `/stats/counter/{key}` where key is a query -// that specifies the counter stats to calculate and return. -// -// For instance, a call to `/stats/counter/charm-bundle:*` returns the number of -// times a charm has been downloaded from the store. To get the same value for -// a specific charm, it is possible to filter the results by passing the charm -// series and name, e.g. `/stats/counter/charm-bundle:trusty:juju-gui`. -// -// The results can be grouped by specifying the `by` query (possible values are -// `day` and `week`), and time delimited using the `start` and `stop` queries. -// -// It is also possible to list the results by passing `list=1`. For example, a GET -// call to `/stats/counter/charm-bundle:trusty:*?by=day&list=1` returns an -// aggregated count of trusty charms downloads, grouped by charm and day, similar -// to the following: -// -// charm-bundle:trusty:juju-gui 2014-06-17 5 -// charm-bundle:trusty:mysql 2014-06-17 1 -package legacy - -import ( - "encoding/json" - "fmt" - "net/http" - "strings" - "time" - - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/charmrepo" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/internal/v4" - "gopkg.in/juju/charmstore.v4/params" -) - -type Handler struct { - v4 *v4.Handler - pool *charmstore.Pool - mux *http.ServeMux -} - -func NewAPIHandler(pool *charmstore.Pool, config charmstore.ServerParams) http.Handler { - h := &Handler{ - v4: v4.New(pool, config), - pool: pool, - mux: http.NewServeMux(), - } - h.handle("/charm-info", router.HandleJSON(h.serveCharmInfo)) - h.handle("/charm/", router.HandleErrors(h.serveCharm)) - h.handle("/charm-event", router.HandleJSON(h.serveCharmEvent)) - return h -} - -func (h *Handler) handle(path string, handler http.Handler) { - prefix := strings.TrimSuffix(path, "/") - h.mux.Handle(path, http.StripPrefix(prefix, handler)) -} - -func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - req.ParseForm() - h.mux.ServeHTTP(w, req) -} - -func (h *Handler) serveCharm(w http.ResponseWriter, req *http.Request) error { - if req.Method != "GET" && req.Method != "HEAD" { - return params.ErrMethodNotAllowed - } - curl, err := charm.ParseReference(strings.TrimPrefix(req.URL.Path, "/")) - if err != nil { - return errgo.WithCausef(err, params.ErrNotFound, "") - } - return h.v4.Handlers().Id["archive"](curl, w, req) -} - -// charmStatsKey returns a stats key for the given charm reference and kind. -func charmStatsKey(url *charm.Reference, kind string) []string { - if url.User == "" { - return []string{kind, url.Series, url.Name} - } - return []string{kind, url.Series, url.Name, url.User} -} - -var errNotFound = fmt.Errorf("entry not found") - -func (h *Handler) serveCharmInfo(_ http.Header, req *http.Request) (interface{}, error) { - response := make(map[string]*charmrepo.InfoResponse) - store := h.pool.Store() - defer store.Close() - for _, url := range req.Form["charms"] { - c := &charmrepo.InfoResponse{} - response[url] = c - curl, err := charm.ParseReference(url) - if err != nil { - err = errNotFound - } - var entity *mongodoc.Entity - if err == nil { - entity, err = store.FindBestEntity(curl) - if errgo.Cause(err) == params.ErrNotFound { - // The old API actually returned "entry not found" - // on *any* error, but it seems reasonable to be - // a little more descriptive for other errors. - err = errNotFound - } - } - var rurl *router.ResolvedURL - if err == nil { - rurl = charmstore.EntityResolvedURL(entity) - if h.v4.AuthorizeEntity(rurl, req) != nil { - // The charm is unauthorized and there's no way to - // authorize it as part of the legacy API so we - // just treat it as a not-found error. - err = errNotFound - } - } - if err == nil && entity.BlobHash256 == "" { - // Lazily calculate SHA256 so that we don't burden - // non-legacy code with that task. - // TODO frankban: remove this lazy calculation after the cshash256 - // command is run in the production db. At that point, entities - // always have their blobhash256 field populated, and there is no - // need for this lazy evaluation anymore. - entity.BlobHash256, err = store.UpdateEntitySHA256(rurl) - } - // Prepare the response part for this charm. - if err == nil { - curl = entity.PreferredURL(curl.User == "") - c.CanonicalURL = curl.String() - c.Revision = curl.Revision - c.Sha256 = entity.BlobHash256 - c.Digest, err = entityBzrDigest(entity) - if err != nil { - c.Errors = append(c.Errors, err.Error()) - } - if v4.StatsEnabled(req) { - store.IncCounterAsync(charmStatsKey(curl, params.StatsCharmInfo)) - } - } else { - c.Errors = append(c.Errors, err.Error()) - if curl != nil && v4.StatsEnabled(req) { - store.IncCounterAsync(charmStatsKey(curl, params.StatsCharmMissing)) - } - } - } - return response, nil -} - -// serveCharmEvent returns events related to the charms specified in the -// "charms" query. In this implementation, the only supported event is -// "published", required by the "juju publish" command. -func (h *Handler) serveCharmEvent(_ http.Header, req *http.Request) (interface{}, error) { - response := make(map[string]*charmrepo.EventResponse) - store := h.pool.Store() - defer store.Close() - for _, url := range req.Form["charms"] { - c := &charmrepo.EventResponse{} - - // Ignore the digest part of the request. - if i := strings.Index(url, "@"); i != -1 { - url = url[:i] - } - // We intentionally do not implement the long_keys query parameter that - // the legacy charm store supported, as "juju publish" does not use it. - response[url] = c - - // Validate the charm URL. - id, err := charm.ParseReference(url) - if err != nil { - c.Errors = []string{"invalid charm URL: " + err.Error()} - continue - } - if id.Revision != -1 { - c.Errors = []string{"got charm URL with revision: " + id.String()} - continue - } - - // Retrieve the charm. - entity, err := store.FindBestEntity(id, "_id", "uploadtime", "extrainfo") - if err != nil { - if errgo.Cause(err) == params.ErrNotFound { - // The old API actually returned "entry not found" - // on *any* error, but it seems reasonable to be - // a little more descriptive for other errors. - err = errNotFound - } - c.Errors = []string{err.Error()} - continue - } - - // Retrieve the entity Bazaar digest. - c.Digest, err = entityBzrDigest(entity) - if err != nil { - c.Errors = []string{err.Error()} - } else if c.Digest == "" { - // There are two possible reasons why an entity is found without a - // digest: - // 1) the entity has been recently added in the ingestion process, - // but the extra-info has not been sent yet by "charmload"; - // 2) there was an error while ingesting the entity. - // If the entity has been recently published, we assume case 1), - // and therefore we return a not found error, forcing - // "juju publish" to keep retrying and possibly succeed later. - // Otherwise, we return an error so that "juju publish" exits with - // an error and avoids an infinite loop. - if time.Since(entity.UploadTime).Minutes() < 2 { - c.Errors = []string{errNotFound.Error()} - } else { - c.Errors = []string{"digest not found: this can be due to an error while ingesting the entity"} - } - continue - } - - // Prepare the response part for this charm. - c.Kind = "published" - if id.User == "" { - c.Revision = entity.PromulgatedRevision - } else { - c.Revision = entity.Revision - } - c.Time = entity.UploadTime.UTC().Format(time.RFC3339) - if v4.StatsEnabled(req) { - store.IncCounterAsync(charmStatsKey(id, params.StatsCharmEvent)) - } - } - return response, nil -} - -func entityBzrDigest(entity *mongodoc.Entity) (string, error) { - value, found := entity.ExtraInfo[params.BzrDigestKey] - if !found { - return "", nil - } - var digest string - if err := json.Unmarshal(value, &digest); err != nil { - return "", errgo.Notef(err, "cannot unmarshal digest") - } - return digest, nil -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/legacy/api_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/legacy/api_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/legacy/api_test.go 1970-01-01 00:00:00 +0000 @@ -1,636 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package legacy_test - -import ( - "crypto/sha256" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "time" - - jc "github.com/juju/testing/checkers" - "github.com/juju/testing/httptesting" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/juju/charm.v5/charmrepo" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/legacy" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/internal/storetesting" - "gopkg.in/juju/charmstore.v4/internal/storetesting/hashtesting" - "gopkg.in/juju/charmstore.v4/internal/storetesting/stats" - "gopkg.in/juju/charmstore.v4/params" -) - -var serverParams = charmstore.ServerParams{ - AuthUsername: "test-user", - AuthPassword: "test-password", -} - -type APISuite struct { - storetesting.IsolatedMgoSuite - srv http.Handler - store *charmstore.Store -} - -var _ = gc.Suite(&APISuite{}) - -func (s *APISuite) SetUpTest(c *gc.C) { - s.IsolatedMgoSuite.SetUpTest(c) - s.srv, s.store = newServer(c, s.Session, serverParams) -} - -func (s *APISuite) TearDownTest(c *gc.C) { - s.store.Close() - s.IsolatedMgoSuite.TearDownTest(c) -} - -func newServer(c *gc.C, session *mgo.Session, config charmstore.ServerParams) (http.Handler, *charmstore.Store) { - db := session.DB("charmstore") - pool, err := charmstore.NewPool(db, nil, nil) - c.Assert(err, gc.IsNil) - srv, err := charmstore.NewServer(db, nil, config, map[string]charmstore.NewAPIHandlerFunc{"": legacy.NewAPIHandler}) - c.Assert(err, gc.IsNil) - return srv, pool.Store() -} - -func (s *APISuite) TestCharmArchive(c *gc.C) { - _, wordpress := s.addPublicCharm(c, "wordpress", "cs:precise/wordpress-0") - archiveBytes, err := ioutil.ReadFile(wordpress.Path) - c.Assert(err, gc.IsNil) - - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: "/charm/precise/wordpress-0", - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes) - c.Assert(rec.Header().Get("Content-Length"), gc.Equals, fmt.Sprint(len(rec.Body.Bytes()))) - - // Test with unresolved URL. - rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: "/charm/wordpress", - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes) - c.Assert(rec.Header().Get("Content-Length"), gc.Equals, fmt.Sprint(len(rec.Body.Bytes()))) - - // Check that the HTTP range logic is plugged in OK. If this - // is working, we assume that the whole thing is working OK, - // as net/http is well-tested. - rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: "/charm/precise/wordpress-0", - Header: http.Header{"Range": {"bytes=10-100"}}, - }) - c.Assert(rec.Code, gc.Equals, http.StatusPartialContent, gc.Commentf("body: %q", rec.Body.Bytes())) - c.Assert(rec.Body.Bytes(), gc.HasLen, 100-10+1) - c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes[10:101]) -} - -func (s *APISuite) TestPostNotAllowed(c *gc.C) { - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - Method: "POST", - URL: "/charm/precise/wordpress", - ExpectStatus: http.StatusMethodNotAllowed, - ExpectBody: params.Error{ - Code: params.ErrMethodNotAllowed, - Message: params.ErrMethodNotAllowed.Error(), - }, - }) -} - -func (s *APISuite) TestCharmArchiveUnresolvedURL(c *gc.C) { - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: "/charm/wordpress", - ExpectStatus: http.StatusNotFound, - ExpectBody: params.Error{ - Code: params.ErrNotFound, - Message: `no matching charm or bundle for "cs:wordpress"`, - }, - }) -} - -func (s *APISuite) TestCharmInfoNotFound(c *gc.C) { - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: "/charm-info?charms=cs:precise/something-23", - ExpectStatus: http.StatusOK, - ExpectBody: map[string]charmrepo.InfoResponse{ - "cs:precise/something-23": { - Errors: []string{"entry not found"}, - }, - }, - }) -} - -func (s *APISuite) TestServeCharmInfo(c *gc.C) { - wordpressURL, wordpress := s.addPublicCharm(c, "wordpress", "cs:precise/wordpress-1") - hashSum := fileSHA256(c, wordpress.Path) - digest, err := json.Marshal("who@canonical.com-bzr-digest") - c.Assert(err, gc.IsNil) - - tests := []struct { - about string - url string - extrainfo map[string][]byte - canonical string - sha string - digest string - revision int - err string - }{{ - about: "full charm URL with digest extra info", - url: wordpressURL.String(), - extrainfo: map[string][]byte{ - params.BzrDigestKey: digest, - }, - canonical: "cs:precise/wordpress-1", - sha: hashSum, - digest: "who@canonical.com-bzr-digest", - revision: 1, - }, { - about: "full charm URL without digest extra info", - url: wordpressURL.String(), - canonical: "cs:precise/wordpress-1", - sha: hashSum, - revision: 1, - }, { - about: "partial charm URL with digest extra info", - url: "cs:wordpress", - extrainfo: map[string][]byte{ - params.BzrDigestKey: digest, - }, - canonical: "cs:precise/wordpress-1", - sha: hashSum, - digest: "who@canonical.com-bzr-digest", - revision: 1, - }, { - about: "partial charm URL without extra info", - url: "cs:wordpress", - canonical: "cs:precise/wordpress-1", - sha: hashSum, - revision: 1, - }, { - about: "invalid digest extra info", - url: "cs:wordpress", - extrainfo: map[string][]byte{ - params.BzrDigestKey: []byte("[]"), - }, - canonical: "cs:precise/wordpress-1", - sha: hashSum, - revision: 1, - err: `cannot unmarshal digest: json: cannot unmarshal array into Go value of type string`, - }, { - about: "charm not found", - url: "cs:precise/non-existent", - err: "entry not found", - }, { - about: "invalid charm URL", - url: "cs:/bad", - err: `entry not found`, - }, { - about: "invalid charm schema", - url: "gopher:archie-server", - err: `entry not found`, - }, { - about: "invalid URL", - url: "/charm-info?charms=cs:not-found", - err: "entry not found", - }} - - for i, test := range tests { - c.Logf("test %d: %s", i, test.about) - err = s.store.UpdateEntity(wordpressURL, bson.D{{ - "$set", bson.D{{"extrainfo", test.extrainfo}}, - }}) - c.Assert(err, gc.IsNil) - expectInfo := charmrepo.InfoResponse{ - CanonicalURL: test.canonical, - Sha256: test.sha, - Revision: test.revision, - Digest: test.digest, - } - if test.err != "" { - expectInfo.Errors = []string{test.err} - } - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: "/charm-info?charms=" + test.url, - ExpectStatus: http.StatusOK, - ExpectBody: map[string]charmrepo.InfoResponse{ - test.url: expectInfo, - }, - }) - } -} - -func (s *APISuite) TestCharmInfoCounters(c *gc.C) { - if !storetesting.MongoJSEnabled() { - c.Skip("MongoDB JavaScript not available") - } - - // Add two charms to the database, a promulgated one and a user owned one. - s.addPublicCharm(c, "wordpress", "cs:utopic/wordpress-42") - s.addPublicCharm(c, "wordpress", "cs:~who/trusty/wordpress-47") - - requestInfo := func(id string, times int) { - for i := 0; i < times; i++ { - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: "/charm-info?charms=" + id, - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - } - } - - // Request charm info several times for the promulgated charm, - // the user owned one and a missing charm. - requestInfo("utopic/wordpress-42", 4) - requestInfo("~who/trusty/wordpress-47", 3) - requestInfo("precise/django-0", 2) - - // The charm-info count for the promulgated charm has been updated. - key := []string{params.StatsCharmInfo, "utopic", "wordpress"} - stats.CheckCounterSum(c, s.store, key, false, 4) - - // The charm-info count for the user owned charm has been updated. - key = []string{params.StatsCharmInfo, "trusty", "wordpress", "who"} - stats.CheckCounterSum(c, s.store, key, false, 3) - - // The charm-missing count for the missing charm has been updated. - key = []string{params.StatsCharmMissing, "precise", "django"} - stats.CheckCounterSum(c, s.store, key, false, 2) - - // The charm-info count for the missing charm is still zero. - key = []string{params.StatsCharmInfo, "precise", "django"} - stats.CheckCounterSum(c, s.store, key, false, 0) -} - -func (s *APISuite) TestAPIInfoWithGatedCharm(c *gc.C) { - wordpressURL, _ := s.addPublicCharm(c, "wordpress", "cs:precise/wordpress-0") - s.store.SetPerms(&wordpressURL.URL, "read", "bob") - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: "/charm-info?charms=" + wordpressURL.URL.String(), - ExpectStatus: http.StatusOK, - ExpectBody: map[string]charmrepo.InfoResponse{ - wordpressURL.URL.String(): { - Errors: []string{"entry not found"}, - }, - }, - }) -} - -func fileSHA256(c *gc.C, path string) string { - f, err := os.Open(path) - c.Assert(err, gc.IsNil) - hash := sha256.New() - _, err = io.Copy(hash, f) - c.Assert(err, gc.IsNil) - return fmt.Sprintf("%x", hash.Sum(nil)) -} - -func (s *APISuite) TestCharmPackageGet(c *gc.C) { - wordpressURL, wordpress := s.addPublicCharm(c, "wordpress", "cs:precise/wordpress-0") - archiveBytes, err := ioutil.ReadFile(wordpress.Path) - c.Assert(err, gc.IsNil) - - srv := httptest.NewServer(s.srv) - defer srv.Close() - - s.PatchValue(&charmrepo.CacheDir, c.MkDir()) - s.PatchValue(&charmrepo.LegacyStore.BaseURL, srv.URL) - - url, _ := wordpressURL.URL.URL("") - ch, err := charmrepo.LegacyStore.Get(url) - c.Assert(err, gc.IsNil) - chArchive := ch.(*charm.CharmArchive) - - data, err := ioutil.ReadFile(chArchive.Path) - c.Assert(err, gc.IsNil) - c.Assert(data, gc.DeepEquals, archiveBytes) -} - -func (s *APISuite) TestCharmPackageCharmInfo(c *gc.C) { - wordpressURL, wordpress := s.addPublicCharm(c, "wordpress", "cs:precise/wordpress-0") - wordpressSHA256 := fileSHA256(c, wordpress.Path) - mysqlURL, mySQL := s.addPublicCharm(c, "wordpress", "cs:precise/mysql-2") - mysqlSHA256 := fileSHA256(c, mySQL.Path) - notFoundURL := charm.MustParseReference("cs:precise/not-found-3") - - srv := httptest.NewServer(s.srv) - defer srv.Close() - s.PatchValue(&charmrepo.LegacyStore.BaseURL, srv.URL) - - resp, err := charmrepo.LegacyStore.Info(wordpressURL.PreferredURL(), mysqlURL.PreferredURL(), notFoundURL) - c.Assert(err, gc.IsNil) - c.Assert(resp, gc.HasLen, 3) - c.Assert(resp, jc.DeepEquals, []*charmrepo.InfoResponse{{ - CanonicalURL: wordpressURL.String(), - Sha256: wordpressSHA256, - }, { - CanonicalURL: mysqlURL.String(), - Sha256: mysqlSHA256, - Revision: 2, - }, { - Errors: []string{"charm not found: " + notFoundURL.String()}, - }}) -} - -func (s *APISuite) TestSHA256Laziness(c *gc.C) { - // TODO frankban: remove this test after updating entities in the - // production db with their SHA256 hash value. Entities are updated by - // running the cshash256 command. - id, ch := s.addPublicCharm(c, "wordpress", "cs:~who/precise/wordpress-0") - url := id.String() - sum256 := fileSHA256(c, ch.Path) - - hashtesting.CheckSHA256Laziness(c, s.store, &id.URL, func() { - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: "/charm-info?charms=" + url, - ExpectStatus: http.StatusOK, - ExpectBody: map[string]charmrepo.InfoResponse{ - url: { - CanonicalURL: url, - Sha256: sum256, - Revision: 0, - }, - }, - }) - }) -} - -var serverStatusTests = []struct { - path string - code int -}{ - {"/charm-info/any", 404}, - {"/charm/bad-url", 404}, - {"/charm/bad-series/wordpress", 404}, -} - -func (s *APISuite) TestServerStatus(c *gc.C) { - // TODO(rog) add tests from old TestServerStatus tests - // when we implement charm-info. - for i, test := range serverStatusTests { - c.Logf("test %d: %s", i, test.path) - resp := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: test.path, - }) - c.Assert(resp.Code, gc.Equals, test.code, gc.Commentf("body: %s", resp.Body)) - } -} - -func (s *APISuite) addPublicCharm(c *gc.C, charmName, curl string) (*router.ResolvedURL, *charm.CharmArchive) { - rurl := &router.ResolvedURL{ - URL: *charm.MustParseReference(curl), - PromulgatedRevision: -1, - } - if rurl.URL.User == "" { - rurl.URL.User = "charmers" - rurl.PromulgatedRevision = rurl.URL.Revision - } - archive := storetesting.Charms.CharmArchive(c.MkDir(), charmName) - err := s.store.AddCharmWithArchive(rurl, archive) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) - c.Assert(err, gc.IsNil) - return rurl, archive -} - -var serveCharmEventErrorsTests = []struct { - about string - url string - responseUrl string - err string -}{{ - about: "invalid charm URL", - url: "no-such:charm", - err: `invalid charm URL: charm URL has invalid schema: "no-such:charm"`, -}, { - about: "revision specified", - url: "cs:utopic/django-42", - err: "got charm URL with revision: cs:utopic/django-42", -}, { - about: "charm not found", - url: "cs:trusty/django", - err: "entry not found", -}, { - about: "ignoring digest", - url: "precise/django-47@a-bzr-digest", - responseUrl: "precise/django-47", - err: "got charm URL with revision: cs:precise/django-47", -}} - -func (s *APISuite) TestServeCharmEventErrors(c *gc.C) { - for i, test := range serveCharmEventErrorsTests { - c.Logf("test %d: %s", i, test.about) - if test.responseUrl == "" { - test.responseUrl = test.url - } - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: "/charm-event?charms=" + test.url, - ExpectStatus: http.StatusOK, - ExpectBody: map[string]charmrepo.EventResponse{ - test.responseUrl: { - Errors: []string{test.err}, - }, - }, - }) - } -} - -func (s *APISuite) TestServeCharmEvent(c *gc.C) { - // Add three charms to the charm store. - mysqlUrl, _ := s.addPublicCharm(c, "mysql", "cs:trusty/mysql-2") - riakUrl, _ := s.addPublicCharm(c, "riak", "cs:utopic/riak-3") - - // Update the mysql charm with a valid digest extra-info. - s.addExtraInfoDigest(c, mysqlUrl, "who@canonical.com-bzr-digest") - - // Update the riak charm with an invalid digest extra-info. - err := s.store.UpdateEntity(riakUrl, bson.D{{ - "$set", bson.D{{"extrainfo", map[string][]byte{ - params.BzrDigestKey: []byte(":"), - }}}, - }}) - c.Assert(err, gc.IsNil) - - // Retrieve the entities. - mysql, err := s.store.FindEntity(mysqlUrl) - c.Assert(err, gc.IsNil) - riak, err := s.store.FindEntity(riakUrl) - c.Assert(err, gc.IsNil) - - tests := []struct { - about string - query string - expect map[string]*charmrepo.EventResponse - }{{ - about: "valid digest", - query: "?charms=cs:trusty/mysql", - expect: map[string]*charmrepo.EventResponse{ - "cs:trusty/mysql": { - Kind: "published", - Revision: mysql.Revision, - Time: mysql.UploadTime.UTC().Format(time.RFC3339), - Digest: "who@canonical.com-bzr-digest", - }, - }, - }, { - about: "invalid digest", - query: "?charms=cs:utopic/riak", - expect: map[string]*charmrepo.EventResponse{ - "cs:utopic/riak": { - Kind: "published", - Revision: riak.Revision, - Time: riak.UploadTime.UTC().Format(time.RFC3339), - Errors: []string{"cannot unmarshal digest: invalid character ':' looking for beginning of value"}, - }, - }, - }, { - about: "partial charm URL", - query: "?charms=cs:mysql", - expect: map[string]*charmrepo.EventResponse{ - "cs:mysql": { - Kind: "published", - Revision: mysql.Revision, - Time: mysql.UploadTime.UTC().Format(time.RFC3339), - Digest: "who@canonical.com-bzr-digest", - }, - }, - }, { - about: "digest in request", - query: "?charms=cs:trusty/mysql@my-digest", - expect: map[string]*charmrepo.EventResponse{ - "cs:trusty/mysql": { - Kind: "published", - Revision: mysql.Revision, - Time: mysql.UploadTime.UTC().Format(time.RFC3339), - Digest: "who@canonical.com-bzr-digest", - }, - }, - }, { - about: "multiple charms", - query: "?charms=cs:mysql&charms=utopic/riak", - expect: map[string]*charmrepo.EventResponse{ - "cs:mysql": { - Kind: "published", - Revision: mysql.Revision, - Time: mysql.UploadTime.UTC().Format(time.RFC3339), - Digest: "who@canonical.com-bzr-digest", - }, - "utopic/riak": { - Kind: "published", - Revision: riak.Revision, - Time: riak.UploadTime.UTC().Format(time.RFC3339), - Errors: []string{"cannot unmarshal digest: invalid character ':' looking for beginning of value"}, - }, - }, - }} - - for i, test := range tests { - c.Logf("test %d: %s", i, test.about) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: "/charm-event" + test.query, - ExpectStatus: http.StatusOK, - ExpectBody: test.expect, - }) - } -} - -func (s *APISuite) TestServeCharmEventDigestNotFound(c *gc.C) { - // Add a charm without a Bazaar digest. - url, _ := s.addPublicCharm(c, "wordpress", "cs:trusty/wordpress-42") - - // Pretend the entity has been uploaded right now, and assume the test does - // not take more than two minutes to run. - s.updateUploadTime(c, url, time.Now()) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: "/charm-event?charms=cs:trusty/wordpress", - ExpectStatus: http.StatusOK, - ExpectBody: map[string]charmrepo.EventResponse{ - "cs:trusty/wordpress": { - Errors: []string{"entry not found"}, - }, - }, - }) - - // Now change the entity upload time to be more than 2 minutes ago. - s.updateUploadTime(c, url, time.Now().Add(-121*time.Second)) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: "/charm-event?charms=cs:trusty/wordpress", - ExpectStatus: http.StatusOK, - ExpectBody: map[string]charmrepo.EventResponse{ - "cs:trusty/wordpress": { - Errors: []string{"digest not found: this can be due to an error while ingesting the entity"}, - }, - }, - }) -} - -func (s *APISuite) TestServeCharmEventLastRevision(c *gc.C) { - // Add two revisions of the same charm. - url1, _ := s.addPublicCharm(c, "wordpress", "cs:trusty/wordpress-1") - url2, _ := s.addPublicCharm(c, "wordpress", "cs:trusty/wordpress-2") - - // Update the resulting entities with Bazaar digests. - s.addExtraInfoDigest(c, url1, "digest-1") - s.addExtraInfoDigest(c, url2, "digest-2") - - // Retrieve the most recent revision of the entity. - entity, err := s.store.FindEntity(url2) - c.Assert(err, gc.IsNil) - - // Ensure the last revision is correctly returned. - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: "/charm-event?charms=wordpress", - ExpectStatus: http.StatusOK, - ExpectBody: map[string]*charmrepo.EventResponse{ - "wordpress": { - Kind: "published", - Revision: 2, - Time: entity.UploadTime.UTC().Format(time.RFC3339), - Digest: "digest-2", - }, - }, - }) -} - -func (s *APISuite) addExtraInfoDigest(c *gc.C, id *router.ResolvedURL, digest string) { - b, err := json.Marshal(digest) - c.Assert(err, gc.IsNil) - err = s.store.UpdateEntity(id, bson.D{{ - "$set", bson.D{{"extrainfo", map[string][]byte{ - params.BzrDigestKey: b, - }}}, - }}) - c.Assert(err, gc.IsNil) -} - -func (s *APISuite) updateUploadTime(c *gc.C, id *router.ResolvedURL, uploadTime time.Time) { - err := s.store.UpdateEntity(id, bson.D{{ - "$set", bson.D{{"uploadtime", uploadTime}}, - }}) - c.Assert(err, gc.IsNil) -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/legacy/package_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/legacy/package_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/legacy/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package legacy_test - -import ( - "testing" - - jujutesting "github.com/juju/testing" -) - -func TestPackage(t *testing.T) { - jujutesting.MgoTestPackage(t, nil) -} === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/mongodoc' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/mongodoc/doc.go' --- src/gopkg.in/juju/charmstore.v4/internal/mongodoc/doc.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/mongodoc/doc.go 1970-01-01 00:00:00 +0000 @@ -1,271 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package mongodoc - -import ( - "time" - - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/mgo.v2/bson" -) - -// Entity holds the in-database representation of charm or bundle's -// document in the charms collection. It holds information -// on one specific revision and series of the charm or bundle - see -// also BaseEntity. -// -// We ensure that there is always a single BaseEntity for any -// set of entities which share the same base URL. -type Entity struct { - // URL holds the fully specified URL of the charm or bundle. - // e.g. cs:precise/wordpress-34, cs:~user/trusty/foo-2 - URL *charm.Reference `bson:"_id"` - - // BaseURL holds the reference URL of the charm or bundle - // (this omits the series and revision from URL) - // e.g. cs:wordpress, cs:~user/foo - BaseURL *charm.Reference - - // User holds the user part of the entity URL (for instance, "joe"). - User string - - // Name holds the name of the entity (for instance "wordpress"). - Name string - - // Revision holds the entity revision (it cannot be -1/unset). - Revision int - - // Series holds the entity series (for instance "trusty" or "bundle"). - Series string - - // BlobHash holds the hash checksum of the blob, in hexadecimal format, - // as created by blobstore.NewHash. - BlobHash string - - // BlobHash256 holds the SHA256 hash checksum of the blob, - // in hexadecimal format. This is only used by the legacy - // API, and is calculated lazily the first time it is required. - BlobHash256 string - - // Size holds the size of the archive blob. - // TODO(rog) rename this to BlobSize. - Size int64 - - // BlobName holds the name that the archive blob is given in the blob store. - BlobName string - - UploadTime time.Time - - // ExtraInfo holds arbitrary extra metadata associated with - // the entity. The byte slices hold JSON-encoded data. - ExtraInfo map[string][]byte `bson:",omitempty" json:",omitempty"` - - // TODO(rog) verify that all these types marshal to the expected - // JSON form. - CharmMeta *charm.Meta - CharmConfig *charm.Config - CharmActions *charm.Actions - - // CharmProvidedInterfaces holds all the relation - // interfaces provided by the charm - CharmProvidedInterfaces []string - - // CharmRequiredInterfaces is similar to CharmProvidedInterfaces - // for required interfaces. - CharmRequiredInterfaces []string - - BundleData *charm.BundleData - BundleReadMe string - - // BundleCharms includes all the charm URLs referenced - // by the bundle, including base URLs where they are - // not already included. - BundleCharms []*charm.Reference - - // BundleMachineCount counts the machines used or created - // by the bundle. It is nil for charms. - BundleMachineCount *int - - // BundleUnitCount counts the units created by the bundle. - // It is nil for charms. - BundleUnitCount *int - - // TODO Add fields denormalized for search purposes - // and search ranking field(s). - - // Contents holds entries for frequently accessed - // entries in the file's blob. Storing this avoids - // the need to linearly read the zip file's manifest - // every time we access one of these files. - Contents map[FileId]ZipFile `json:",omitempty" bson:",omitempty"` - - // PromulgatedURL holds the promulgated URL of the entity. If the entity - // is not promulgated this should be set to nil. - PromulgatedURL *charm.Reference `json:",omitempty" bson:"promulgated-url,omitempty"` - - // PromulgatedRevision holds the revision number from the promulgated URL. - // If the entity is not promulgated this should be set to -1. - PromulgatedRevision int `bson:"promulgated-revision"` -} - -// PreferredURL returns the preferred way to refer to this entity. If -// the entity has a promulgated URL and usePromulgated is true then the -// promulgated URL will be used, otherwise the standard URL is used. -func (e *Entity) PreferredURL(usePromulgated bool) *charm.Reference { - if usePromulgated && e.PromulgatedURL != nil { - return e.PromulgatedURL - } - return e.URL -} - -// BaseEntity holds metadata for a charm or bundle -// independent of any specific uploaded revision or series. -type BaseEntity struct { - // URL holds the reference URL of of charm on bundle - // regardless of its revision, series or promulgation status - // (this omits the revision and series from URL). - // e.g., cs:~user/collection/foo - URL *charm.Reference `bson:"_id"` - - // User holds the user part of the entity URL (for instance, "joe"). - User string - - // Name holds the name of the entity (for instance "wordpress"). - Name string - - // Public specifies whether the charm or bundle - // is available to all users. If this is true, the ACLs will - // be ignored when reading a charm. - Public bool - - // ACLs holds permission information relevant to - // the base entity. The permissions apply to all - // revisions. - ACLs ACL - - // Promulgated specifies whether the charm or bundle should be - // promulgated. - Promulgated IntBool -} - -// ACL holds lists of users and groups that are -// allowed to perform specific actions. -type ACL struct { - // Read holds users and groups that are allowed to read the charm - // or bundle. - Read []string - // Write holds users and groups that are allowed to upload/modify the charm - // or bundle. - Write []string -} - -type FileId string - -const ( - FileReadMe FileId = "readme" - FileIcon FileId = "icon" -) - -// ZipFile refers to a specific file in the uploaded archive blob. -type ZipFile struct { - // Compressed specifies whether the file is compressed or not. - Compressed bool - - // Offset holds the offset into the zip archive of the start of - // the file's data. - Offset int64 - - // Size holds the size of the file before decompression. - Size int64 -} - -// Valid reports whether f is a valid (non-zero) reference to -// a zip file. -func (f ZipFile) IsValid() bool { - // Note that no valid zip files can start at offset zero, - // because that's where the zip header lives. - return f != ZipFile{} -} - -// Log holds the in-database representation of a log message sent to the charm -// store. -type Log struct { - // Data holds the JSON-encoded log message. - Data []byte - - // Level holds the log level: whether the log is a warning, an error, etc. - Level LogLevel - - // Type holds the log type. - Type LogType - - // URLs holds a slice of entity URLs associated with the log message. - URLs []*charm.Reference - - // Time holds the time of the log. - Time time.Time -} - -// LogLevel holds the level associated with a log. -type LogLevel int - -// When introducing a new log level, do the following: -// 1) add the new level as a constant below; -// 2) add the new level in params as a string for HTTP requests/responses; -// 3) include the new level in the mongodocLogLevels and paramsLogLevels maps -// in internal/v4. -const ( - _ LogLevel = iota - InfoLevel - WarningLevel - ErrorLevel -) - -// LogType holds the type of the log. -type LogType int - -// When introducing a new log type, do the following: -// 1) add the new type as a constant below; -// 2) add the new type in params as a string for HTTP requests/responses; -// 3) include the new type in the mongodocLogTypes and paramsLogTypes maps -// in internal/v4. -const ( - _ LogType = iota - IngestionType - LegacyStatisticsType -) - -// Migration holds information about the database migration. -type Migration struct { - // Executed holds the migration names for migrations already executed. - Executed []string -} - -// IntBool is a bool that will be represented internally in the database as 1 for -// true and -1 for false. -type IntBool bool - -func (b IntBool) GetBSON() (interface{}, error) { - if b { - return 1, nil - } - return -1, nil -} - -func (b *IntBool) SetBSON(raw bson.Raw) error { - var x int - if err := raw.Unmarshal(&x); err != nil { - return errgo.Notef(err, "cannot unmarshal value") - } - switch x { - case 1: - *b = IntBool(true) - case -1: - *b = IntBool(false) - default: - return errgo.Newf("invalid value %d", x) - } - return nil -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/mongodoc/doc_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/mongodoc/doc_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/mongodoc/doc_test.go 1970-01-01 00:00:00 +0000 @@ -1,77 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package mongodoc_test - -import ( - "testing" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/mongodoc" -) - -func TestPackage(t *testing.T) { - gc.TestingT(t) -} - -type DocSuite struct{} - -var _ = gc.Suite(&DocSuite{}) - -func (s *DocSuite) TestIntBoolGetBSON(c *gc.C) { - test := bson.D{{"true", mongodoc.IntBool(true)}, {"false", mongodoc.IntBool(false)}} - b, err := bson.Marshal(test) - c.Assert(err, gc.IsNil) - result := make(map[string]int, 2) - err = bson.Unmarshal(b, &result) - c.Assert(err, gc.IsNil) - c.Assert(result["true"], gc.Equals, 1) - c.Assert(result["false"], gc.Equals, -1) -} - -func (s *DocSuite) TestIntBoolSetBSON(c *gc.C) { - test := bson.D{{"true", 1}, {"false", -1}} - b, err := bson.Marshal(test) - c.Assert(err, gc.IsNil) - var result map[string]mongodoc.IntBool - err = bson.Unmarshal(b, &result) - c.Assert(err, gc.IsNil) - c.Assert(result, jc.DeepEquals, map[string]mongodoc.IntBool{"true": true, "false": false}) -} - -func (s *DocSuite) TestIntBoolSetBSONIncorrectType(c *gc.C) { - test := bson.D{{"test", "true"}} - b, err := bson.Marshal(test) - c.Assert(err, gc.IsNil) - var result map[string]mongodoc.IntBool - err = bson.Unmarshal(b, &result) - c.Assert(err, gc.ErrorMatches, "cannot unmarshal value: BSON kind 0x02 isn't compatible with type int") -} - -func (s *DocSuite) TestIntBoolSetBSONInvalidValue(c *gc.C) { - test := bson.D{{"test", 2}} - b, err := bson.Marshal(test) - c.Assert(err, gc.IsNil) - var result map[string]mongodoc.IntBool - err = bson.Unmarshal(b, &result) - c.Assert(err, gc.ErrorMatches, `invalid value 2`) -} - -func (s *DocSuite) TestPreferredURL(c *gc.C) { - e1 := &mongodoc.Entity{ - URL: charm.MustParseReference("~ken/trusty/b-1"), - } - e2 := &mongodoc.Entity{ - URL: charm.MustParseReference("~dmr/trusty/c-1"), - PromulgatedURL: charm.MustParseReference("trusty/c-1"), - } - - c.Assert(e1.PreferredURL(false), gc.Equals, e1.URL) - c.Assert(e1.PreferredURL(true), gc.Equals, e1.URL) - c.Assert(e2.PreferredURL(false), gc.Equals, e2.URL) - c.Assert(e2.PreferredURL(true), gc.Equals, e2.PromulgatedURL) -} === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/router' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/router/fieldinclude.go' --- src/gopkg.in/juju/charmstore.v4/internal/router/fieldinclude.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/router/fieldinclude.go 1970-01-01 00:00:00 +0000 @@ -1,181 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package router - -import ( - "encoding/json" - "net/http" - "net/url" - - "gopkg.in/errgo.v1" -) - -// A FieldQueryFunc is used to retrieve a metadata document for the given URL, -// selecting only those fields specified in keys of the given selector. -type FieldQueryFunc func(id *ResolvedURL, selector map[string]int, req *http.Request) (interface{}, error) - -// FieldUpdater records field changes made by a FieldUpdateFunc. -type FieldUpdater struct { - fields map[string]interface{} - search bool -} - -// UpdateField requests that the provided field is updated with -// the given value. -func (u *FieldUpdater) UpdateField(fieldName string, val interface{}) { - u.fields[fieldName] = val -} - -// UpdateSearch requests that search records are updated. -func (u *FieldUpdater) UpdateSearch() { - u.search = true -} - -// A FieldUpdateFunc is used to update a metadata document for the -// given id. For each field in fields, it should set that field to -// its corresponding value in the metadata document. -type FieldUpdateFunc func(id *ResolvedURL, fields map[string]interface{}) error - -// A FieldUpdateSearchFunc is used to update a search document for the -// given id. For each field in fields, it should set that field to -// its corresponding value in the search document. -type FieldUpdateSearchFunc func(id *ResolvedURL, fields map[string]interface{}) error - -// A FieldGetFunc returns some data from the given document. The -// document will have been returned from an earlier call to the -// associated QueryFunc. -type FieldGetFunc func(doc interface{}, id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) - -// FieldPutFunc sets using the given FieldUpdater corresponding to fields to be set -// in the metadata document for the given id. The path holds the metadata path -// after the initial prefix has been removed. -type FieldPutFunc func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error - -// FieldIncludeHandlerParams specifies the parameters for NewFieldIncludeHandler. -type FieldIncludeHandlerParams struct { - // Key is used to group together similar FieldIncludeHandlers - // (the same query should be generated for any given key). - Key interface{} - - // Query is used to retrieve the document from the database for - // GET requests. The fields passed to the query will be the - // union of all fields found in all the handlers in the bulk - // request. - Query FieldQueryFunc - - // Fields specifies which fields are required by the given handler. - Fields []string - - // Handle actually returns the data from the document retrieved - // by Query, for GET requests. - HandleGet FieldGetFunc - - // HandlePut generates update operations for a PUT - // operation. - HandlePut FieldPutFunc - - // Update is used to update the document in the database for - // PUT requests. - Update FieldUpdateFunc - - // UpdateSearch is used to update the document in the search - // database for PUT requests. - UpdateSearch FieldUpdateSearchFunc -} - -type fieldIncludeHandler struct { - p FieldIncludeHandlerParams -} - -// FieldIncludeHandler returns a BulkIncludeHandler that will perform -// only a single database query for several requests. See FieldIncludeHandlerParams -// for more detail. -// -// See in ../v4/api.go for an example of its use. -func FieldIncludeHandler(p FieldIncludeHandlerParams) BulkIncludeHandler { - return &fieldIncludeHandler{p} -} - -func (h *fieldIncludeHandler) Key() interface{} { - return h.p.Key -} - -func (h *fieldIncludeHandler) HandlePut(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, values []*json.RawMessage, req *http.Request) []error { - updater := &FieldUpdater{ - fields: make(map[string]interface{}), - } - var errs []error - errCount := 0 - setError := func(i int, err error) { - if errs == nil { - errs = make([]error, len(hs)) - } - if errs[i] == nil { - errs[i] = err - errCount++ - } - } - for i, h := range hs { - h := h.(*fieldIncludeHandler) - if h.p.HandlePut == nil { - setError(i, errgo.New("PUT not supported")) - continue - } - if err := h.p.HandlePut(id, paths[i], values[i], updater, req); err != nil { - setError(i, errgo.Mask(err, errgo.Any)) - } - } - if errCount == len(hs) { - // Every HandlePut request has drawn an error, - // no need to call Update. - return errs - } - if err := h.p.Update(id, updater.fields); err != nil { - for i := range hs { - setError(i, err) - } - } - if updater.search { - if err := h.p.UpdateSearch(id, updater.fields); err != nil { - for i := range hs { - setError(i, err) - } - } - } - return errs -} - -func (h *fieldIncludeHandler) HandleGet(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, flags url.Values, req *http.Request) ([]interface{}, error) { - funcs := make([]FieldGetFunc, len(hs)) - selector := make(map[string]int) - // Extract the handler functions and union all the fields. - for i, h := range hs { - h := h.(*fieldIncludeHandler) - funcs[i] = h.p.HandleGet - for _, field := range h.p.Fields { - selector[field] = 1 - } - } - // Make the single query. - doc, err := h.p.Query(id, selector, req) - if err != nil { - // Note: preserve error cause from handlers. - return nil, errgo.Mask(err, errgo.Any) - } - - // Call all the handlers with the resulting query document. - results := make([]interface{}, len(hs)) - for i, f := range funcs { - var err error - results[i], err = f(doc, id, paths[i], flags, req) - if err != nil { - // TODO correlate error with handler (perhaps return - // an error that identifies the slice position of the handler that - // failed). - // Note: preserve error cause from handlers. - return nil, errgo.Mask(err, errgo.Any) - } - } - return results, nil -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/router/package_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/router/package_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/router/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package router_test - -import ( - "testing" - - gc "gopkg.in/check.v1" -) - -func TestPackage(t *testing.T) { - gc.TestingT(t) -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/router/router.go' --- src/gopkg.in/juju/charmstore.v4/internal/router/router.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/router/router.go 1970-01-01 00:00:00 +0000 @@ -1,835 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// The router package implements an HTTP request router for charm store -// HTTP requests. -package router - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - "reflect" - "sort" - "strings" - "sync" - - "github.com/juju/utils/jsonhttp" - "github.com/juju/utils/parallel" - "gopkg.in/errgo.v1" - charm "gopkg.in/juju/charm.v5" - "gopkg.in/macaroon-bakery.v0/httpbakery" - - "gopkg.in/juju/charmstore.v4/params" -) - -// Implementation note on error handling: -// -// We use errgo.Any only when necessary, so that we can see at a glance -// which are the possible places that could be returning an error with a -// Cause (the only kind of error that can end up setting an HTTP status -// code) - -var knownSeries = map[string]bool{ - "bundle": true, - "oneiric": true, - "precise": true, - "quantal": true, - "raring": true, - "saucy": true, - "trusty": true, - "utopic": true, - "vivid": true, - "win2012hvr2": true, - "win2012hv": true, - "win2012r2": true, - "win2012": true, - "win7": true, - "win8": true, - "win81": true, - "centos7": true, -} - -// BulkIncludeHandler represents a metadata handler that can -// handle multiple metadata "include" requests in a single batch. -// -// For simple metadata handlers that cannot be -// efficiently combined, see SingleIncludeHandler. -// -// All handlers may assume that http.Request.ParseForm -// has been called to parse the URL form values. -type BulkIncludeHandler interface { - // Key returns a value that will be used to group handlers - // together in preparation for a call to HandleGet or HandlePut. - // The key should be comparable for equality. - // Please do not return NaN. That would be silly, OK? - Key() interface{} - - // HandleGet returns the results of invoking all the given handlers - // on the given charm or bundle id. Each result is held in - // the respective element of the returned slice. - // - // All of the handlers' Keys will be equal to the receiving handler's - // Key. - // - // Each item in paths holds the remaining metadata path - // for the handler in the corresponding position - // in hs after the prefix in Handlers.Meta has been stripped, - // and flags holds all the URL query values. - // - // TODO(rog) document indexed errors. - HandleGet(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, flags url.Values, req *http.Request) ([]interface{}, error) - - // HandlePut invokes a PUT request on all the given handlers on - // the given charm or bundle id. If there is an error, the - // returned errors slice should contain one element for each element - // in paths. The error for handler hs[i] should be returned in errors[i]. - // If there is no error, an empty slice should be returned. - // - // Each item in paths holds the remaining metadata path - // for the handler in the corresponding position - // in hs after the prefix in Handlers.Meta has been stripped, - // and flags holds all the url query values. - HandlePut(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, values []*json.RawMessage, req *http.Request) []error -} - -// IdHandler handles a charm store request rooted at the given id. -// The request path (req.URL.Path) holds the URL path after -// the id has been stripped off. -type IdHandler func(charmId *charm.Reference, w http.ResponseWriter, req *http.Request) error - -// Handlers specifies how HTTP requests will be routed -// by the router. All errors returned by the handlers will -// be processed by WriteError with their Cause left intact. -// This means that, for example, if they return an error -// with a Cause that is params.ErrNotFound, the HTTP -// status code will reflect that (assuming the error has -// not been absorbed by the bulk metadata logic). -type Handlers struct { - // Global holds handlers for paths not matched by Meta or Id. - // The map key is the path; the value is the handler that will - // be used to handle that path. - // - // Path matching is by matched by longest-prefix - the same as - // http.ServeMux. - // - // Note that, unlike http.ServeMux, the prefix is stripped - // from the URL path before the hander is invoked, - // matching the behaviour of the other handlers. - Global map[string]http.Handler - - // Id holds handlers for paths which correspond to a single - // charm or bundle id other than the meta path. The map key - // holds the first element of the path, which may end in a - // trailing slash (/) to indicate that longer paths are allowed - // too. - Id map[string]IdHandler - - // Meta holds metadata handlers for paths under the meta - // endpoint. The map key holds the first element of the path, - // which may end in a trailing slash (/) to indicate that longer - // paths are allowed too. - Meta map[string]BulkIncludeHandler -} - -// Router represents a charm store HTTP request router. -type Router struct { - handlers *Handlers - handler http.Handler - resolveURL func(id *charm.Reference) (*ResolvedURL, error) - authorize func(id *ResolvedURL, req *http.Request) error - exists func(id *ResolvedURL, req *http.Request) (bool, error) -} - -// ResolvedURL represents a URL that has been resolved by resolveURL. -// URL.User and URL.Series should always be non-empty and -// URL.Revision should never be -1. -// -// If PromulgatedRevision is not -1, it holds the revision of the -// promulgated version of the charm. -type ResolvedURL struct { - URL charm.Reference - PromulgatedRevision int -} - -// MustNewResolvedURL returns a new ResolvedURL by parsing -// the entity URL in urlStr. The promulgatedRev parameter -// specifies the value of PromulgatedRevision in the returned -// value. -// -// This function panics if urlStr cannot be parsed as a charm.Reference -// or if it is not fully specified, including user, series and revision. -func MustNewResolvedURL(urlStr string, promulgatedRev int) *ResolvedURL { - url := charm.MustParseReference(urlStr) - if url.User == "" || url.Series == "" || url.Revision == -1 { - panic(fmt.Errorf("incomplete url %v", urlStr)) - } - return &ResolvedURL{ - URL: *url, - PromulgatedRevision: promulgatedRev, - } -} - -// PreferredURL returns the promulgated URL for -// the given id if there is one, otherwise it -// returns the non-promulgated URL. The returned *charm.Reference -// may be modified freely. -func (id *ResolvedURL) PreferredURL() *charm.Reference { - u := id.URL - if id.PromulgatedRevision == -1 { - return &u - } - u.User = "" - u.Revision = id.PromulgatedRevision - return &u -} - -// PromulgatedURL returns the promulgated URL for id if there -// is one, or nil otherwise. -func (id *ResolvedURL) PromulgatedURL() *charm.Reference { - if id.PromulgatedRevision == -1 { - return nil - } - return id.PreferredURL() -} - -func (id *ResolvedURL) GoString() string { - if id.PromulgatedRevision != -1 { - return fmt.Sprintf("%d %s", id.PromulgatedRevision, &id.URL) - } - return id.URL.String() -} - -// String returns the preferred string representation of u. -// It prefers to use the promulgated URL when there is one. -func (u *ResolvedURL) String() string { - return u.PreferredURL().String() -} - -// New returns a charm store router that will route requests to -// the given handlers and retrieve metadata from the given database. -// -// The resolveURL function will be called to resolve ids in -// router paths - it should fill in the Series and Revision -// fields of its argument URL if they are not specified. -// The Cause of the resolveURL error will be left unchanged, -// as for the handlers. -// -// The authorize function will be called to authorize the request -// to any BulkIncludeHandlers. All other handlers are expected -// to handle their own authorization. The Cause of the authorize -// error will be left unchanged, as for the handlers. -// -// The exists function may be called to test whether an entity -// exists when an API endpoint needs to know that -// but has no appropriate handler to call. -func New( - handlers *Handlers, - resolveURL func(id *charm.Reference) (*ResolvedURL, error), - authorize func(id *ResolvedURL, req *http.Request) error, - exists func(id *ResolvedURL, req *http.Request) (bool, error), -) *Router { - r := &Router{ - handlers: handlers, - resolveURL: resolveURL, - authorize: authorize, - exists: exists, - } - mux := NewServeMux() - mux.Handle("/meta/", http.StripPrefix("/meta", HandleErrors(r.serveBulkMeta))) - for path, handler := range r.handlers.Global { - path = "/" + path - prefix := strings.TrimSuffix(path, "/") - mux.Handle(path, http.StripPrefix(prefix, handler)) - } - mux.Handle("/", HandleErrors(r.serveIds)) - r.handler = mux - return r -} - -// ServeHTTP implements http.Handler.ServeHTTP. -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - // Allow cross-domain access from anywhere, including AJAX - // requests. An AJAX request will add an X-Requested-With: - // XMLHttpRequest header, which is a non-standard header, and - // hence will require a pre-flight request, so we need to - // specify that that header is allowed, and we also need to - // implement the OPTIONS method so that the pre-flight request - // can work. - // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS - header := w.Header() - header.Set("Access-Control-Allow-Origin", "*") - header.Set("Access-Control-Allow-Headers", "X-Requested-With") - - if req.Method == "OPTIONS" { - // We cheat here and say that all methods are allowed, - // even though any individual endpoint will allow - // only a subset of these. This means we can avoid - // putting OPTIONS handling in every endpoint, - // and it shouldn't actually matter in practice. - header.Set("Allow", "DELETE,GET,HEAD,PUT,POST") - return - } - if err := req.ParseForm(); err != nil { - WriteError(w, errgo.Notef(err, "cannot parse form")) - return - } - r.handler.ServeHTTP(w, req) -} - -// Handlers returns the set of handlers that the router was created with. -// This should not be changed. -func (r *Router) Handlers() *Handlers { - return r.handlers -} - -// serveIds serves requests that may be rooted at a charm or bundle id. -func (r *Router) serveIds(w http.ResponseWriter, req *http.Request) error { - // We can ignore a trailing / because we do not return any - // relative URLs. If we start to return relative URL redirects, - // we will need to redirect non-slash-terminated URLs - // to slash-terminated URLs. - // http://cdivilly.wordpress.com/2014/03/11/why-trailing-slashes-on-uris-are-important/ - path := strings.TrimSuffix(req.URL.Path, "/") - url, path, err := splitId(path) - if err != nil { - return errgo.WithCausef(err, params.ErrNotFound, "") - } - key, path := handlerKey(path) - if key == "" { - return errgo.WithCausef(nil, params.ErrNotFound, "") - } - handler := r.handlers.Id[key] - if handler != nil { - req.URL.Path = path - err := handler(url, w, req) - // Note: preserve error cause from handlers. - return errgo.Mask(err, errgo.Any) - } - if key != "meta/" && key != "meta" { - return errgo.WithCausef(nil, params.ErrNotFound, params.ErrNotFound.Error()) - } - // Always resolve the entity id for meta requests. - rurl, err := r.resolveURL(url) - if err != nil { - // Note: preserve error cause from resolveURL. - return errgo.Mask(err, errgo.Any) - } - req.URL.Path = path - return r.serveMeta(rurl, w, req) -} - -func idHandlerNeedsResolveURL(req *http.Request) bool { - return req.Method != "POST" && req.Method != "PUT" -} - -// handlerKey returns a key that can be used to look up a handler at the -// given path, and the remaining path elements. If there is no possible -// key, the returned key is empty. -func handlerKey(path string) (key, rest string) { - path = strings.TrimPrefix(path, "/") - key, i := splitPath(path, 0) - if key == "" { - // TODO what *should* we get if we GET just an id? - return "", rest - } - if i < len(path)-1 { - // There are more elements, so include the / character - // that terminates the element. - return path[0 : i+1], path[i:] - } - return key, "" -} - -func (r *Router) serveMeta(id *ResolvedURL, w http.ResponseWriter, req *http.Request) error { - switch req.Method { - case "GET", "HEAD": - resp, err := r.serveMetaGet(id, req) - if err != nil { - // Note: preserve error causes from meta handlers. - return errgo.Mask(err, errgo.Any) - } - jsonhttp.WriteJSON(w, http.StatusOK, resp) - return nil - case "PUT": - // Put requests don't return any data unless there's - // an error. - return r.serveMetaPut(id, req) - } - return params.ErrMethodNotAllowed -} - -func (r *Router) serveMetaGet(id *ResolvedURL, req *http.Request) (interface{}, error) { - // TODO: consider whether we might want the capability to - // have different permissions for different meta endpoints. - if err := r.authorize(id, req); err != nil { - return nil, errgo.Mask(err, errgo.Any) - } - key, path := handlerKey(req.URL.Path) - if key == "" { - // GET id/meta - // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmeta - return r.metaNames(), nil - } - if key == "any" { - // GET id/meta/any?[include=meta[&include=meta...]] - // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaany - includes := req.Form["include"] - // If there are no includes, we have no handlers to generate - // a "not found" error when the id doesn't exist, so we need - // to check explicitly. - if len(includes) == 0 { - exists, err := r.exists(id, req) - if err != nil { - return nil, errgo.Notef(err, "cannot determine existence of %q", id) - } - if !exists { - return nil, errgo.WithCausef(nil, params.ErrNotFound, "") - } - return params.MetaAnyResponse{Id: id.PreferredURL()}, nil - } - meta, err := r.GetMetadata(id, includes, req) - if err != nil { - // Note: preserve error cause from handlers. - return nil, errgo.Mask(err, errgo.Any) - } - return params.MetaAnyResponse{ - Id: id.PreferredURL(), - Meta: meta, - }, nil - } - if handler := r.handlers.Meta[key]; handler != nil { - results, err := handler.HandleGet([]BulkIncludeHandler{handler}, id, []string{path}, req.Form, req) - if err != nil { - // Note: preserve error cause from handlers. - return nil, errgo.Mask(err, errgo.Any) - } - result := results[0] - if isNull(result) { - return nil, params.ErrMetadataNotFound - } - return results[0], nil - } - return nil, errgo.WithCausef(nil, params.ErrNotFound, "unknown metadata %q", strings.TrimPrefix(req.URL.Path, "/")) -} - -const jsonContentType = "application/json" - -func unmarshalJSONBody(req *http.Request, val interface{}) error { - if ct := req.Header.Get("Content-Type"); ct != jsonContentType { - return errgo.WithCausef(nil, params.ErrBadRequest, "unexpected Content-Type %q; expected %q", ct, jsonContentType) - } - dec := json.NewDecoder(req.Body) - if err := dec.Decode(val); err != nil { - return errgo.Notef(err, "cannot unmarshal body") - } - return nil -} - -// serveMetaPut serves a PUT request to the metadata for the given id. -// The metadata to be put is in the request body. -// PUT /$id/meta/... -func (r *Router) serveMetaPut(id *ResolvedURL, req *http.Request) error { - if err := r.authorize(id, req); err != nil { - return errgo.Mask(err, errgo.Any) - } - var body json.RawMessage - if err := unmarshalJSONBody(req, &body); err != nil { - return errgo.Mask(err, errgo.Is(params.ErrBadRequest)) - } - return r.serveMetaPutBody(id, req, &body) -} - -// serveMetaPutBody serves a PUT request to the metadata for the given id. -// The metadata to be put is in body. -// This method is used both for individual metadata PUTs and -// also bulk metadata PUTs. -func (r *Router) serveMetaPutBody(id *ResolvedURL, req *http.Request, body *json.RawMessage) error { - key, path := handlerKey(req.URL.Path) - if key == "" { - return params.ErrForbidden - } - if key == "any" { - // PUT id/meta/any - var bodyMeta struct { - Meta map[string]*json.RawMessage - } - if err := json.Unmarshal(*body, &bodyMeta); err != nil { - return errgo.Notef(err, "cannot unmarshal body") - } - if err := r.PutMetadata(id, bodyMeta.Meta, req); err != nil { - return errgo.Mask(err, errgo.Any) - } - return nil - } - if handler := r.handlers.Meta[key]; handler != nil { - errs := handler.HandlePut( - []BulkIncludeHandler{handler}, - id, - []string{path}, - []*json.RawMessage{body}, - req, - ) - if len(errs) > 0 && errs[0] != nil { - // Note: preserve error cause from handlers. - return errgo.Mask(errs[0], errgo.Any) - } - return nil - } - return errgo.WithCausef(nil, params.ErrNotFound, "") -} - -// isNull reports whether the given value will encode to -// a null JSON value. -func isNull(val interface{}) bool { - if val == nil { - return true - } - v := reflect.ValueOf(val) - if kind := v.Kind(); kind != reflect.Map && kind != reflect.Ptr && kind != reflect.Slice { - return false - } - return v.IsNil() -} - -// metaNames returns a slice of all the metadata endpoint names. -func (r *Router) metaNames() []string { - names := make([]string, 0, len(r.handlers.Meta)) - for name := range r.handlers.Meta { - // Ensure that we don't generate duplicate entries - // when there's an entry for both "x" and "x/". - trimmed := strings.TrimSuffix(name, "/") - if trimmed != name && r.handlers.Meta[trimmed] != nil { - continue - } - names = append(names, trimmed) - } - sort.Strings(names) - return names -} - -// serveBulkMeta serves bulk metadata requests (requests to /meta/...). -func (r *Router) serveBulkMeta(w http.ResponseWriter, req *http.Request) error { - switch req.Method { - case "GET", "HEAD": - // A bare meta returns all endpoints. - // See https://github.com/juju/charmstore/blob/v4/docs/API.md#bulk-requests-and-missing-metadata - if req.URL.Path == "/" || req.URL.Path == "" { - jsonhttp.WriteJSON(w, http.StatusOK, r.metaNames()) - return nil - } - resp, err := r.serveBulkMetaGet(req) - if err != nil { - return errgo.Mask(err, errgo.Any) - } - jsonhttp.WriteJSON(w, http.StatusOK, resp) - return nil - case "PUT": - return r.serveBulkMetaPut(req) - default: - return params.ErrMethodNotAllowed - } -} - -// serveBulkMetaGet serves the "bulk" metadata retrieval endpoint -// that can return information on several ids at once. -// -// GET meta/$endpoint?id=$id0[&id=$id1...][$otherflags] -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-metaendpoint -func (r *Router) serveBulkMetaGet(req *http.Request) (interface{}, error) { - // TODO get the metadata concurrently for each id. - ids := req.Form["id"] - if len(ids) == 0 { - return nil, errgo.WithCausef(nil, params.ErrBadRequest, "no ids specified in meta request") - } - delete(req.Form, "id") - ignoreAuth, err := ParseBool(req.Form.Get("ignore-auth")) - if err != nil { - return nil, errgo.WithCausef(err, params.ErrBadRequest, "") - } - delete(req.Form, "ignore-auth") - result := make(map[string]interface{}) - for _, id := range ids { - url, err := charm.ParseReference(id) - if err != nil { - return nil, errgo.WithCausef(err, params.ErrBadRequest, "") - } - rurl, err := r.resolveURL(url) - if err != nil { - if errgo.Cause(err) == params.ErrNotFound { - // URLs not found will be omitted from the result. - // https://github.com/juju/charmstore/blob/v4/docs/API.md#bulk-requests-and-missing-metadata - continue - } - // Note: preserve error cause from resolveURL. - return nil, errgo.Mask(err, errgo.Any) - } - meta, err := r.serveMetaGet(rurl, req) - if cause := errgo.Cause(err); cause == params.ErrNotFound || cause == params.ErrMetadataNotFound || (ignoreAuth && isAuthorizationError(cause)) { - // The relevant data does not exist, or it is not public and client - // asked not to authorize. - // https://github.com/juju/charmstore/blob/v4/docs/API.md#bulk-requests-and-missing-metadata - continue - } - if err != nil { - return nil, errgo.Mask(err) - } - result[id] = meta - } - return result, nil -} - -// ParseBool returns the boolean value represented by the string. -// It accepts "1" or "0". Any other value returns an error. -func ParseBool(value string) (bool, error) { - switch value { - case "0", "": - return false, nil - case "1": - return true, nil - } - return false, errgo.Newf(`unexpected bool value %q (must be "0" or "1")`, value) -} - -// isAuthorizationError reports whether the given error cause is an -// authorization error. -func isAuthorizationError(cause error) bool { - if cause == params.ErrUnauthorized { - return true - } - _, ok := cause.(*httpbakery.Error) - return ok -} - -// serveBulkMetaPut serves a bulk PUT request to several ids. -// PUT /meta/$endpoint -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-metaendpoint -func (r *Router) serveBulkMetaPut(req *http.Request) error { - if len(req.Form["id"]) > 0 { - return fmt.Errorf("ids may not be specified in meta PUT request") - } - var ids map[string]*json.RawMessage - if err := unmarshalJSONBody(req, &ids); err != nil { - return errgo.Mask(err, errgo.Is(params.ErrBadRequest)) - } - var multiErr multiError - for id, val := range ids { - if err := r.serveBulkMetaPutOne(req, id, val); err != nil { - if multiErr == nil { - multiErr = make(multiError) - } - multiErr[id] = errgo.Mask(err, errgo.Any) - } - } - if len(multiErr) != 0 { - return multiErr - } - return nil -} - -// serveBulkMetaPutOne serves a PUT to a single id as part of a bulk PUT -// request. It's in a separate function to make the error handling easier. -func (r *Router) serveBulkMetaPutOne(req *http.Request, id string, val *json.RawMessage) error { - url, err := charm.ParseReference(id) - if err != nil { - return errgo.Mask(err) - } - rurl, err := r.resolveURL(url) - if err != nil { - // Note: preserve error cause from resolveURL. - return errgo.Mask(err, errgo.Any) - } - if err := r.authorize(rurl, req); err != nil { - return errgo.Mask(err, errgo.Any) - } - if err := r.serveMetaPutBody(rurl, req, val); err != nil { - return errgo.Mask(err, errgo.Any) - } - return nil -} - -// maxMetadataConcurrency specifies the maximum number -// of goroutines started to service a given GetMetadata request. -// 5 is enough to more that cover the number of metadata -// group handlers in the current API. -const maxMetadataConcurrency = 5 - -// GetMetadata retrieves metadata for the given charm or bundle id, -// including information as specified by the includes slice. -func (r *Router) GetMetadata(id *ResolvedURL, includes []string, req *http.Request) (map[string]interface{}, error) { - groups := make(map[interface{}][]BulkIncludeHandler) - includesByGroup := make(map[interface{}][]string) - for _, include := range includes { - // Get the key that lets us choose the include handler. - includeKey, _ := handlerKey(include) - handler := r.handlers.Meta[includeKey] - if handler == nil { - return nil, errgo.Newf("unrecognized metadata name %q", include) - } - - // Get the key that lets us group this handler into the - // correct bulk group. - key := handler.Key() - groups[key] = append(groups[key], handler) - includesByGroup[key] = append(includesByGroup[key], include) - } - results := make(map[string]interface{}) - // TODO when the number of groups is 1 (a common case, - // using parallel.NewRun is actually slowing things down - // by creating a goroutine). We could optimise it so that - // it doesn't actually create a goroutine in that case. - run := parallel.NewRun(maxMetadataConcurrency) - var mu sync.Mutex - for _, g := range groups { - g := g - run.Do(func() error { - // We know that we must have at least one element in the - // slice here. We could use any member of the slice to - // actually handle the request, so arbitrarily choose - // g[0]. Note that g[0].Key() is equal to g[i].Key() for - // every i in the slice. - groupIncludes := includesByGroup[g[0].Key()] - - // Paths contains all the path elements after - // the handler key has been stripped off. - // TODO(rog) BUG shouldn't this be len(groupIncludes) ? - paths := make([]string, len(g)) - for i, include := range groupIncludes { - _, paths[i] = handlerKey(include) - } - groupResults, err := g[0].HandleGet(g, id, paths, nil, req) - if err != nil { - // TODO(rog) if it's a BulkError, attach - // the original include path to error (the BulkError - // should contain the index of the failed one). - return errgo.Mask(err, errgo.Any) - } - mu.Lock() - for i, result := range groupResults { - // Omit nil results from map. Note: omit statically typed - // nil results too to make it easy for handlers to return - // possibly nil data with a static type. - // https://github.com/juju/charmstore/blob/v4/docs/API.md#bulk-requests-and-missing-metadata - if !isNull(result) { - results[groupIncludes[i]] = result - } - } - mu.Unlock() - return nil - }) - } - if err := run.Wait(); err != nil { - // We could have got multiple errors, but we'll only return one of them. - return nil, errgo.Mask(err.(parallel.Errors)[0], errgo.Any) - } - return results, nil -} - -// PutMetadata puts metadata for the given id. Each key in data holds -// the name of a metadata endpoint; its associated value -// holds the value to be written. -func (r *Router) PutMetadata(id *ResolvedURL, data map[string]*json.RawMessage, req *http.Request) error { - groups := make(map[interface{}][]BulkIncludeHandler) - valuesByGroup := make(map[interface{}][]*json.RawMessage) - pathsByGroup := make(map[interface{}][]string) - for path, body := range data { - // Get the key that lets us choose the meta handler. - metaKey, _ := handlerKey(path) - handler := r.handlers.Meta[metaKey] - if handler == nil { - return errgo.Newf("unrecognized metadata name %q", path) - } - - // Get the key that lets us group this handler into the - // correct bulk group. - key := handler.Key() - groups[key] = append(groups[key], handler) - valuesByGroup[key] = append(valuesByGroup[key], body) - - // Paths contains all the path elements after - // the handler key has been stripped off. - pathsByGroup[key] = append(pathsByGroup[key], path) - } - var multiErr multiError - for _, g := range groups { - // We know that we must have at least one element in the - // slice here. We could use any member of the slice to - // actually handle the request, so arbitrarily choose - // g[0]. Note that g[0].Key() is equal to g[i].Key() for - // every i in the slice. - key := g[0].Key() - - paths := pathsByGroup[key] - // The paths passed to the handler contain all the path elements - // after the handler key has been stripped off. - strippedPaths := make([]string, len(paths)) - for i, path := range paths { - _, strippedPaths[i] = handlerKey(path) - } - - errs := g[0].HandlePut(g, id, strippedPaths, valuesByGroup[key], req) - if len(errs) > 0 { - if multiErr == nil { - multiErr = make(multiError) - } - if len(errs) != len(paths) { - return fmt.Errorf("unexpected error count; expected %d, got %q", len(paths), errs) - } - for i, err := range errs { - if err != nil { - multiErr[paths[i]] = err - } - } - } - } - if len(multiErr) != 0 { - return multiErr - } - return nil -} - -// splitPath returns the first path element -// after path[i:] and the start of the next -// element. -// -// For example, splitPath("/foo/bar/bzr", 4) returns ("bar", 8). -func splitPath(path string, i int) (elem string, nextIndex int) { - if i < len(path) && path[i] == '/' { - i++ - } - j := strings.Index(path[i:], "/") - if j == -1 { - return path[i:], len(path) - } - j += i - return path[i:j], j -} - -// splitId splits the given URL path into a charm or bundle -// URL and the rest of the path. -func splitId(path string) (url *charm.Reference, rest string, err error) { - path = strings.TrimPrefix(path, "/") - - part, i := splitPath(path, 0) - - // skip ~ - if strings.HasPrefix(part, "~") { - part, i = splitPath(path, i) - } - // skip series - if knownSeries[part] { - part, i = splitPath(path, i) - } - - // part should now contain the charm name, - // and path[0:i] should contain the entire - // charm id. - - urlStr := strings.TrimSuffix(path[0:i], "/") - url, err = charm.ParseReference(urlStr) - if err != nil { - return nil, "", errgo.Mask(err) - } - return url, path[i:], nil -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/router/router_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/router/router_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/router/router_test.go 1970-01-01 00:00:00 +0000 @@ -1,2401 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package router - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "sort" - "strings" - "sync" - "sync/atomic" - - jujutesting "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - "github.com/juju/testing/httptesting" - "github.com/juju/utils/jsonhttp" - gc "gopkg.in/check.v1" - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/macaroon-bakery.v0/httpbakery" - - "gopkg.in/juju/charmstore.v4/params" -) - -type RouterSuite struct { - jujutesting.IsolationSuite -} - -var _ = gc.Suite(&RouterSuite{}) - -var newResolvedURL = MustNewResolvedURL - -var routerGetTests = []struct { - about string - handlers Handlers - urlStr string - expectStatus int - expectBody interface{} - expectQueryCount int32 - resolveURL func(*charm.Reference) (*ResolvedURL, error) - authorize func(*ResolvedURL, *http.Request) error - exists func(*ResolvedURL, *http.Request) (bool, error) -}{{ - about: "global handler", - handlers: Handlers{ - Global: map[string]http.Handler{ - "foo": HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { - return ReqInfo{ - Method: req.Method, - Path: req.URL.Path, - Form: req.Form, - }, nil - }), - }, - }, - urlStr: "/foo", - expectStatus: http.StatusOK, - expectBody: ReqInfo{ - Method: "GET", - Path: "", - }, -}, { - about: "global handler with sub-path and flags", - handlers: Handlers{ - Global: map[string]http.Handler{ - "foo/bar/": HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { - return ReqInfo{ - Method: req.Method, - Path: req.URL.Path, - Form: req.Form, - }, nil - }), - }, - }, - urlStr: "/foo/bar/a/b?a=1&b=two", - expectStatus: http.StatusOK, - expectBody: ReqInfo{ - Path: "/a/b", - Method: "GET", - Form: url.Values{ - "a": {"1"}, - "b": {"two"}, - }, - }, -}, { - about: "invalid form", - urlStr: "/foo?a=%", - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Message: `cannot parse form: invalid URL escape "%"`, - }, -}, { - about: "id handler", - handlers: Handlers{ - Id: map[string]IdHandler{ - "foo": testIdHandler, - }, - }, - urlStr: "/precise/wordpress-34/foo", - expectStatus: http.StatusOK, - expectBody: idHandlerTestResp{ - Method: "GET", - CharmURL: "cs:precise/wordpress-34", - }, -}, { - about: "windows id handler", - handlers: Handlers{ - Id: map[string]IdHandler{ - "foo": testIdHandler, - }, - }, - urlStr: "/win81/visualstudio-2012/foo", - expectStatus: http.StatusOK, - expectBody: idHandlerTestResp{ - Method: "GET", - CharmURL: "cs:win81/visualstudio-2012", - }, -}, { - about: "id handler with no series in id", - handlers: Handlers{ - Id: map[string]IdHandler{ - "foo": testIdHandler, - }, - }, - urlStr: "/wordpress-34/foo", - expectStatus: http.StatusOK, - expectBody: idHandlerTestResp{ - Method: "GET", - CharmURL: "cs:wordpress-34", - }, -}, { - about: "id handler with no revision in id", - handlers: Handlers{ - Id: map[string]IdHandler{ - "foo": testIdHandler, - }, - }, - urlStr: "/precise/wordpress/foo", - expectStatus: http.StatusOK, - expectBody: idHandlerTestResp{ - Method: "GET", - CharmURL: "cs:precise/wordpress", - }, -}, { - about: "id handler with extra path", - handlers: Handlers{ - Id: map[string]IdHandler{ - "foo/": testIdHandler, - }, - }, - urlStr: "/precise/wordpress-34/foo/blah/arble", - expectStatus: http.StatusOK, - expectBody: idHandlerTestResp{ - Method: "GET", - CharmURL: "cs:precise/wordpress-34", - Path: "/blah/arble", - }, -}, { - about: "id handler with allowed extra path but none given", - handlers: Handlers{ - Id: map[string]IdHandler{ - "foo/": testIdHandler, - }, - }, - urlStr: "/precise/wordpress-34/foo", - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Code: params.ErrNotFound, - Message: "not found", - }, -}, { - about: "id handler with unwanted extra path", - handlers: Handlers{ - Id: map[string]IdHandler{ - "foo": testIdHandler, - }, - }, - urlStr: "/precise/wordpress-34/foo/blah", - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Code: params.ErrNotFound, - Message: "not found", - }, -}, { - about: "id handler with user", - handlers: Handlers{ - Id: map[string]IdHandler{ - "foo": testIdHandler, - }, - }, - urlStr: "/~joe/precise/wordpress-34/foo", - expectStatus: http.StatusOK, - expectBody: idHandlerTestResp{ - Method: "GET", - CharmURL: "cs:~joe/precise/wordpress-34", - }, -}, { - about: "id handler with user and extra path", - handlers: Handlers{ - Id: map[string]IdHandler{ - "foo/": testIdHandler, - }, - }, - urlStr: "/~joe/precise/wordpress-34/foo/blah/arble", - expectStatus: http.StatusOK, - expectBody: idHandlerTestResp{ - Method: "GET", - CharmURL: "cs:~joe/precise/wordpress-34", - Path: "/blah/arble", - }, -}, { - about: "id handler that returns an error", - handlers: Handlers{ - Id: map[string]IdHandler{ - "foo/": errorIdHandler, - }, - }, - urlStr: "/~joe/precise/wordpress-34/foo/blah/arble", - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Message: "errorIdHandler error", - }, -}, { - about: "id handler that returns a not-found error", - handlers: Handlers{ - Id: map[string]IdHandler{ - "foo": func(charmId *charm.Reference, w http.ResponseWriter, req *http.Request) error { - return params.ErrNotFound - }, - }, - }, - urlStr: "/~joe/precise/wordpress-34/foo", - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Message: "not found", - Code: params.ErrNotFound, - }, -}, { - about: "id handler that returns some other kind of coded error", - handlers: Handlers{ - Id: map[string]IdHandler{ - "foo": func(charmId *charm.Reference, w http.ResponseWriter, req *http.Request) error { - return errgo.WithCausef(nil, params.ErrorCode("foo"), "a message") - }, - }, - }, - urlStr: "/~joe/precise/wordpress-34/foo", - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Message: "a message", - Code: "foo", - }, -}, { - about: "id with unspecified series and revision, not resolved", - handlers: Handlers{ - Id: map[string]IdHandler{ - "foo": testIdHandler, - }, - }, - urlStr: "/~joe/wordpress/foo", - resolveURL: resolveTo("precise", 34), - expectStatus: http.StatusOK, - expectBody: idHandlerTestResp{ - Method: "GET", - CharmURL: "cs:~joe/wordpress", - }, -}, { - about: "id with error on resolving", - handlers: Handlers{ - Id: map[string]IdHandler{ - "foo": testIdHandler, - }, - }, - urlStr: "/wordpress/meta", - resolveURL: resolveURLError(errgo.New("resolve URL error")), - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Message: "resolve URL error", - }, -}, { - about: "id with error on resolving that has a Cause", - handlers: Handlers{ - Id: map[string]IdHandler{ - "foo": testIdHandler, - }, - }, - urlStr: "/wordpress/meta", - resolveURL: resolveURLError(params.ErrNotFound), - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Message: "not found", - Code: params.ErrNotFound, - }, -}, { - about: "meta list", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - "bar": testMetaHandler(1), - "bar/": testMetaHandler(2), - "foo/": testMetaHandler(3), - "baz": testMetaHandler(4), - }, - }, - urlStr: "/precise/wordpress-42/meta", - expectStatus: http.StatusOK, - expectBody: []string{"bar", "baz", "foo"}, -}, { - about: "meta list at root", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - "bar": testMetaHandler(1), - "bar/": testMetaHandler(2), - "foo/": testMetaHandler(3), - "baz": testMetaHandler(4), - }, - }, - urlStr: "/meta", - expectStatus: http.StatusOK, - expectBody: []string{"bar", "baz", "foo"}, -}, { - about: "meta list at root with trailing /", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - "bar": testMetaHandler(1), - "bar/": testMetaHandler(2), - "foo/": testMetaHandler(3), - "baz": testMetaHandler(4), - }, - }, - urlStr: "/meta/", - expectStatus: http.StatusOK, - expectBody: []string{"bar", "baz", "foo"}, -}, { - about: "meta handler", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - urlStr: "/precise/wordpress-42/meta/foo", - expectStatus: http.StatusOK, - expectBody: &metaHandlerTestResp{ - CharmURL: "cs:precise/wordpress-42", - }, -}, { - about: "meta handler with additional elements", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo/": testMetaHandler(0), - }, - }, - urlStr: "/precise/wordpress-42/meta/foo/bar/baz", - expectStatus: http.StatusOK, - expectBody: metaHandlerTestResp{ - CharmURL: "cs:precise/wordpress-42", - Path: "/bar/baz", - }, -}, { - about: "meta handler with params", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - urlStr: "/precise/wordpress-42/meta/foo?one=a&two=b&one=c", - expectStatus: http.StatusOK, - expectBody: metaHandlerTestResp{ - CharmURL: "cs:precise/wordpress-42", - Flags: url.Values{ - "one": {"a", "c"}, - "two": {"b"}, - }, - }, -}, { - about: "meta handler that's not found", - urlStr: "/precise/wordpress-42/meta/foo", - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Code: params.ErrNotFound, - Message: `unknown metadata "foo"`, - }, -}, { - about: "meta sub-handler that's not found", - urlStr: "/precise/wordpress-42/meta/foo/bar", - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Code: params.ErrNotFound, - Message: `unknown metadata "foo/bar"`, - }, -}, { - about: "meta handler with nil data", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": constMetaHandler(nil), - }, - }, - urlStr: "/precise/wordpress-42/meta/foo", - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Code: params.ErrMetadataNotFound, - Message: "metadata not found", - }, -}, { - about: "meta handler with typed nil data", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": constMetaHandler((*struct{})(nil)), - }, - }, - urlStr: "/precise/wordpress-42/meta/foo", - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Code: params.ErrMetadataNotFound, - Message: "metadata not found", - }, -}, { - about: "meta handler with field selector", - urlStr: "/precise/wordpress-42/meta/foo", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": fieldSelectHandler("handler1", 0, "field1", "field2"), - }, - }, - expectStatus: http.StatusOK, - expectQueryCount: 1, - expectBody: fieldSelectHandleGetInfo{ - HandlerId: "handler1", - Doc: fieldSelectQueryInfo{ - Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), - Selector: map[string]int{"field1": 1, "field2": 1}, - }, - Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), - }, -}, { - about: "meta handler returning error with code", - urlStr: "/precise/wordpress-42/meta/foo", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": errorMetaHandler(errgo.WithCausef(nil, params.ErrorCode("arble"), "a message")), - }, - }, - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Code: "arble", - Message: "a message", - }, -}, { - about: "unauthorized meta handler", - urlStr: "/precise/wordpress-42/meta/foo", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - authorize: neverAuthorize, - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: "bad wolf", - }, -}, { - about: "meta/any, no includes, id exists", - urlStr: "/precise/wordpress-42/meta/any", - exists: alwaysExists, - expectStatus: http.StatusOK, - expectBody: params.MetaAnyResponse{ - Id: charm.MustParseReference("cs:precise/wordpress-42"), - }, -}, { - about: "meta/any, no includes, id does not exist", - urlStr: "/precise/wordpress/meta/any", - exists: func(id *ResolvedURL, req *http.Request) (bool, error) { - return false, nil - }, - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Code: params.ErrNotFound, - Message: "not found", - }, -}, { - about: "meta/any, some includes all using same key", - urlStr: "/precise/wordpress-42/meta/any?include=field1-1&include=field2&include=field1-2", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "field1-1": fieldSelectHandler("handler1", 0, "field1"), - "field2": fieldSelectHandler("handler2", 0, "field2"), - "field1-2": fieldSelectHandler("handler3", 0, "field1"), - }, - }, - expectQueryCount: 1, - expectStatus: http.StatusOK, - expectBody: params.MetaAnyResponse{ - Id: charm.MustParseReference("cs:precise/wordpress-42"), - Meta: map[string]interface{}{ - "field1-1": fieldSelectHandleGetInfo{ - HandlerId: "handler1", - Doc: fieldSelectQueryInfo{ - Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), - Selector: map[string]int{"field1": 1, "field2": 1}, - }, - Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), - }, - "field2": fieldSelectHandleGetInfo{ - HandlerId: "handler2", - Doc: fieldSelectQueryInfo{ - Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), - Selector: map[string]int{"field1": 1, "field2": 1}, - }, - Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), - }, - "field1-2": fieldSelectHandleGetInfo{ - HandlerId: "handler3", - Doc: fieldSelectQueryInfo{ - Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), - Selector: map[string]int{"field1": 1, "field2": 1}, - }, - Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), - }, - }, - }, -}, { - about: "meta/any, includes with additional path elements", - urlStr: "/precise/wordpress-42/meta/any?include=item1/foo&include=item2/bar&include=item1", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "item1/": fieldSelectHandler("handler1", 0, "field1"), - "item2/": fieldSelectHandler("handler2", 0, "field2"), - "item1": fieldSelectHandler("handler3", 0, "field3"), - }, - }, - expectQueryCount: 1, - expectStatus: http.StatusOK, - expectBody: params.MetaAnyResponse{ - Id: charm.MustParseReference("cs:precise/wordpress-42"), - Meta: map[string]interface{}{ - "item1/foo": fieldSelectHandleGetInfo{ - HandlerId: "handler1", - Doc: fieldSelectQueryInfo{ - Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), - Selector: map[string]int{"field1": 1, "field2": 1, "field3": 1}, - }, - Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), - Path: "/foo", - }, - "item2/bar": fieldSelectHandleGetInfo{ - HandlerId: "handler2", - Doc: fieldSelectQueryInfo{ - Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), - Selector: map[string]int{"field1": 1, "field2": 1, "field3": 1}, - }, - Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), - Path: "/bar", - }, - "item1": fieldSelectHandleGetInfo{ - HandlerId: "handler3", - Doc: fieldSelectQueryInfo{ - Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), - Selector: map[string]int{"field1": 1, "field2": 1, "field3": 1}, - }, - Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), - }, - }, - }, -}, { - about: "meta/any, nil metadata omitted", - urlStr: "/precise/wordpress-42/meta/any?include=ok&include=nil", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "ok": testMetaHandler(0), - "nil": constMetaHandler(nil), - "typednil": constMetaHandler((*struct{})(nil)), - }, - }, - expectStatus: http.StatusOK, - expectBody: params.MetaAnyResponse{ - Id: charm.MustParseReference("cs:precise/wordpress-42"), - Meta: map[string]interface{}{ - "ok": metaHandlerTestResp{ - CharmURL: "cs:precise/wordpress-42", - }, - }, - }, -}, { - about: "meta/any, handler returns error with cause", - urlStr: "/precise/wordpress-42/meta/any?include=error", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "error": errorMetaHandler(errgo.WithCausef(nil, params.ErrorCode("foo"), "a message")), - }, - }, - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Code: "foo", - Message: "a message", - }, -}, { - about: "bulk meta handler, single id", - urlStr: "/meta/foo?id=precise/wordpress-42", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - expectStatus: http.StatusOK, - expectBody: map[string]metaHandlerTestResp{ - "precise/wordpress-42": { - CharmURL: "cs:precise/wordpress-42", - }, - }, -}, { - about: "bulk meta handler, several ids", - urlStr: "/meta/foo?id=precise/wordpress-42&id=utopic/foo-32", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - expectStatus: http.StatusOK, - expectBody: map[string]metaHandlerTestResp{ - "precise/wordpress-42": { - CharmURL: "cs:precise/wordpress-42", - }, - "utopic/foo-32": { - CharmURL: "cs:utopic/foo-32", - }, - }, -}, { - about: "bulk meta/any handler, several ids", - urlStr: "/meta/any?id=precise/wordpress-42&id=utopic/foo-32&include=foo&include=bar/something", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - "bar/": testMetaHandler(1), - }, - }, - expectStatus: http.StatusOK, - expectBody: map[string]params.MetaAnyResponse{ - "precise/wordpress-42": { - Id: charm.MustParseReference("cs:precise/wordpress-42"), - Meta: map[string]interface{}{ - "foo": metaHandlerTestResp{ - CharmURL: "cs:precise/wordpress-42", - }, - "bar/something": metaHandlerTestResp{ - CharmURL: "cs:precise/wordpress-42", - Path: "/something", - }, - }, - }, - "utopic/foo-32": { - Id: charm.MustParseReference("cs:utopic/foo-32"), - Meta: map[string]interface{}{ - "foo": metaHandlerTestResp{ - CharmURL: "cs:utopic/foo-32", - }, - "bar/something": metaHandlerTestResp{ - CharmURL: "cs:utopic/foo-32", - Path: "/something", - }, - }, - }, - }, -}, { - about: "bulk meta/any handler, discharge required", - urlStr: "/meta/any?id=precise/wordpress-42&include=foo", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - authorize: dischargeRequiredAuthorize, - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Message: "discharge required", - }, -}, { - about: "bulk meta/any handler, discharge required, ignore authorization", - urlStr: "/meta/any?id=precise/wordpress-42&include=foo&ignore-auth=1", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - authorize: dischargeRequiredAuthorize, - expectStatus: http.StatusOK, - expectBody: map[string]params.MetaAnyResponse{}, -}, { - about: "bulk meta/any handler, some unauthorized, ignore authorization", - urlStr: "/meta/any?id=precise/wordpress-42&id=utopic/foo-32&include=foo&ignore-auth=1", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - authorize: dischargeRequiredAuthorize, - expectStatus: http.StatusOK, - expectBody: map[string]params.MetaAnyResponse{ - "utopic/foo-32": { - Id: charm.MustParseReference("cs:utopic/foo-32"), - Meta: map[string]interface{}{ - "foo": metaHandlerTestResp{ - CharmURL: "cs:utopic/foo-32", - }, - }, - }, - }, -}, { - about: "bulk meta/any handler, unauthorized", - urlStr: "/meta/any?id=precise/wordpress-42&include=foo", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - authorize: neverAuthorize, - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Message: "bad wolf", - }, -}, { - about: "bulk meta/any handler, unauthorized, ignore authorization", - urlStr: "/meta/any?id=precise/wordpress-42&include=foo&ignore-auth=1", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - authorize: neverAuthorize, - expectStatus: http.StatusOK, - expectBody: map[string]params.MetaAnyResponse{}, -}, { - about: "bulk meta/any handler, invalid ignore-auth flag", - urlStr: "/meta/any?id=precise/wordpress-42&include=foo&ignore-auth=meh", - expectStatus: http.StatusBadRequest, - expectBody: params.Error{ - Code: params.ErrBadRequest, - Message: `bad request: unexpected bool value "meh" (must be "0" or "1")`, - }, -}, { - about: "bulk meta handler with unresolved id", - urlStr: "/meta/foo/bar?id=wordpress", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo/": testMetaHandler(0), - }, - }, - resolveURL: resolveTo("precise", 100), - expectStatus: http.StatusOK, - expectBody: map[string]metaHandlerTestResp{ - "wordpress": { - CharmURL: "cs:precise/wordpress-100", - Path: "/bar", - }, - }, -}, { - about: "bulk meta handler with extra flags", - urlStr: "/meta/foo/bar?id=wordpress&arble=bletch&z=w&z=p", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo/": testMetaHandler(0), - }, - }, - resolveURL: resolveTo("precise", 100), - expectStatus: http.StatusOK, - expectBody: map[string]metaHandlerTestResp{ - "wordpress": { - CharmURL: "cs:precise/wordpress-100", - Path: "/bar", - Flags: url.Values{ - "arble": {"bletch"}, - "z": {"w", "p"}, - }, - }, - }, -}, { - about: "bulk meta handler with no ids", - urlStr: "/meta/foo/bar", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo/": testMetaHandler(0), - }, - }, - expectStatus: http.StatusBadRequest, - expectBody: params.Error{ - Code: params.ErrBadRequest, - Message: "no ids specified in meta request", - }, -}, { - about: "bulk meta handler with unresolvable id", - urlStr: "/meta/foo?id=unresolved&id=~foo/precise/wordpress-23", - resolveURL: func(url *charm.Reference) (*ResolvedURL, error) { - if url.Name == "unresolved" { - return nil, params.ErrNotFound - } - return &ResolvedURL{URL: *url, PromulgatedRevision: 99}, nil - }, - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - expectStatus: http.StatusOK, - expectBody: map[string]metaHandlerTestResp{ - "~foo/precise/wordpress-23": { - CharmURL: "cs:precise/wordpress-99", - }, - }, -}, { - about: "bulk meta handler with id resolution error", - urlStr: "/meta/foo?id=resolveerror&id=precise/wordpress-23", - resolveURL: func(url *charm.Reference) (*ResolvedURL, error) { - if url.Name == "resolveerror" { - return nil, errgo.Newf("an error") - } - return &ResolvedURL{URL: *url}, nil - }, - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Message: "an error", - }, -}, { - about: "bulk meta handler with some nil data", - urlStr: "/meta/foo?id=bundle/something-24&id=precise/wordpress-23", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": selectiveIdHandler(map[string]interface{}{ - "cs:bundle/something-24": "bundlefoo", - }), - }, - }, - expectStatus: http.StatusOK, - expectBody: map[string]string{ - "bundle/something-24": "bundlefoo", - }, -}, { - about: "bulk meta handler with entity not found", - urlStr: "/meta/foo?id=bundle/something-24&id=precise/wordpress-23", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": SingleIncludeHandler(func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - if id.URL.Revision == 23 { - return nil, errgo.WithCausef(nil, params.ErrNotFound, "") - } - return "something", nil - }), - }, - }, - expectStatus: http.StatusOK, - expectBody: map[string]string{ - "bundle/something-24": "something", - }, -}, { - about: "meta request with invalid entity reference", - urlStr: "/robots.txt/meta/any", - handlers: Handlers{}, - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Code: params.ErrNotFound, - Message: `not found: charm URL has invalid charm name: "robots.txt"`, - }, -}, { - about: "bulk meta handler, invalid id", - urlStr: "/meta/foo?id=robots.txt", - handlers: Handlers{}, - expectStatus: http.StatusBadRequest, - expectBody: params.Error{ - Code: params.ErrBadRequest, - Message: `bad request: charm URL has invalid charm name: "robots.txt"`, - }, -}} - -// resolveTo returns a URL resolver that resolves -// unspecified series and revision to the given series -// and revision. -func resolveTo(series string, revision int) func(*charm.Reference) (*ResolvedURL, error) { - return func(url *charm.Reference) (*ResolvedURL, error) { - var rurl ResolvedURL - rurl.URL = *url - if url.Series == "" { - rurl.URL.Series = series - } - if url.Revision == -1 { - rurl.URL.Revision = revision - } - if url.User == "" { - rurl.URL.User = "charmers" - rurl.PromulgatedRevision = revision - } - return &rurl, nil - } -} - -func resolveURLError(err error) func(*charm.Reference) (*ResolvedURL, error) { - return func(*charm.Reference) (*ResolvedURL, error) { - return nil, err - } -} - -func alwaysResolveURL(u *charm.Reference) (*ResolvedURL, error) { - u1 := *u - if u1.Series == "" { - u1.Series = "precise" - } - if u1.Revision == -1 { - u1.Revision = 0 - } - promRev := -1 - if u1.User == "" { - u1.User = "charmers" - promRev = u1.Revision - } - return newResolvedURL(u1.String(), promRev), nil -} - -func (s *RouterSuite) TestRouterGet(c *gc.C) { - for i, test := range routerGetTests { - c.Logf("test %d: %s", i, test.about) - resolve := alwaysResolveURL - if test.resolveURL != nil { - resolve = test.resolveURL - } - authorize := alwaysAuthorize - if test.authorize != nil { - authorize = test.authorize - } - exists := alwaysExists - if test.exists != nil { - exists = test.exists - } - router := New(&test.handlers, resolve, authorize, exists) - // Note that fieldSelectHandler increments queryCount each time - // a query is made. - queryCount = 0 - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: router, - URL: test.urlStr, - ExpectStatus: test.expectStatus, - ExpectBody: test.expectBody, - }) - c.Assert(queryCount, gc.Equals, test.expectQueryCount) - } -} - -var parseBoolTests = []struct { - value string - result bool - err bool -}{{ - value: "0", -}, { - value: "", -}, { - value: "1", - result: true, -}, { - value: "invalid", - err: true, -}} - -func (s *RouterSuite) TestParseBool(c *gc.C) { - for i, test := range parseBoolTests { - c.Logf("test %d: %s", i, test.value) - result, err := ParseBool(test.value) - c.Assert(result, gc.Equals, test.result) - if test.err { - c.Assert(err, gc.ErrorMatches, "unexpected bool value .*") - continue - } - c.Assert(err, jc.ErrorIsNil) - } -} - -func (s *RouterSuite) TestCORSHeaders(c *gc.C) { - h := New(&Handlers{ - Global: map[string]http.Handler{ - "foo": http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {}), - }, - }, alwaysResolveURL, alwaysAuthorize, alwaysExists) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: h, - URL: "/foo", - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - c.Assert(rec.Header().Get("Access-Control-Allow-Origin"), gc.Equals, "*") - c.Assert(rec.Header().Get("Access-Control-Allow-Headers"), gc.Equals, "X-Requested-With") -} - -func (s *RouterSuite) TestHTTPRequestPassedThroughToMeta(c *gc.C) { - testReq, err := http.NewRequest("GET", "/wordpress/meta/foo", nil) - c.Assert(err, gc.IsNil) - doneQuery := false - query := func(id *ResolvedURL, selector map[string]int, req *http.Request) (interface{}, error) { - if req != testReq { - return nil, fmt.Errorf("unexpected request found in Query") - } - doneQuery = true - return 0, nil - } - doneGet := false - handleGet := func(doc interface{}, id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - if req != testReq { - return nil, fmt.Errorf("unexpected request found in HandleGet") - } - doneGet = true - return 0, nil - } - donePut := false - handlePut := func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { - if req != testReq { - return fmt.Errorf("unexpected request found in HandlePut") - } - donePut = true - return nil - } - update := func(id *ResolvedURL, fields map[string]interface{}) error { - return nil - } - h := New(&Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": FieldIncludeHandler(FieldIncludeHandlerParams{ - Key: 0, - Query: query, - Fields: []string{"foo"}, - HandleGet: handleGet, - HandlePut: handlePut, - Update: update, - }), - }, - }, alwaysResolveURL, alwaysAuthorize, alwaysExists) - resp := httptest.NewRecorder() - h.ServeHTTP(resp, testReq) - c.Assert(resp.Code, gc.Equals, http.StatusOK, gc.Commentf("response body: %s", resp.Body)) - c.Assert(doneGet, jc.IsTrue) - c.Assert(doneQuery, jc.IsTrue) - - testReq, err = http.NewRequest("PUT", "/wordpress/meta/foo", strings.NewReader(`"hello"`)) - testReq.Header.Set("Content-Type", "application/json") - c.Assert(err, gc.IsNil) - resp = httptest.NewRecorder() - h.ServeHTTP(resp, testReq) - c.Assert(resp.Code, gc.Equals, http.StatusOK, gc.Commentf("response body: %s", resp.Body)) - c.Assert(donePut, jc.IsTrue) -} - -func (s *RouterSuite) TestOptionsHTTPMethod(c *gc.C) { - h := New(&Handlers{}, alwaysResolveURL, alwaysAuthorize, alwaysExists) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: h, - Method: "OPTIONS", - URL: "/foo", - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - header := rec.Header() - c.Assert(header.Get("Access-Control-Allow-Origin"), gc.Equals, "*") - c.Assert(header.Get("Access-Control-Allow-Headers"), gc.Equals, "X-Requested-With") - c.Assert(header.Get("Allow"), gc.Equals, "DELETE,GET,HEAD,PUT,POST") -} - -var routerPutTests = []struct { - about string - handlers Handlers - urlStr string - body interface{} - expectCode int - expectBody interface{} - expectRecordedCalls []interface{} - resolveURL func(*charm.Reference) (*ResolvedURL, error) -}{{ - about: "global handler", - handlers: Handlers{ - Global: map[string]http.Handler{ - "foo": HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { - return ReqInfo{ - Method: req.Method, - Path: req.URL.Path, - Form: req.Form, - }, nil - }), - }, - }, - urlStr: "/foo", - expectCode: http.StatusOK, - expectBody: ReqInfo{ - Method: "PUT", - Path: "", - }, -}, { - about: "id handler", - handlers: Handlers{ - Id: map[string]IdHandler{ - "foo": testIdHandler, - }, - }, - urlStr: "/precise/wordpress-34/foo", - expectCode: http.StatusOK, - expectBody: idHandlerTestResp{ - Method: "PUT", - CharmURL: "cs:precise/wordpress-34", - }, -}, { - about: "meta handler", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - urlStr: "/precise/wordpress-42/meta/foo", - expectCode: http.StatusOK, - body: "hello", - expectRecordedCalls: []interface{}{ - metaHandlerTestPutParams{ - NumHandlers: 1, - Id: "cs:precise/wordpress-42", - Paths: []string{""}, - Values: []interface{}{"hello"}, - }, - }, -}, { - about: "meta/any", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - "bar": testMetaHandler(1), - }, - }, - urlStr: "/precise/wordpress-42/meta/any", - body: params.MetaAnyResponse{ - Meta: map[string]interface{}{ - "foo": "foo-value", - "bar": map[string]interface{}{ - "bar-value1": 234.0, - "bar-value2": "whee", - }, - }, - }, - expectRecordedCalls: []interface{}{ - metaHandlerTestPutParams{ - NumHandlers: 2, - Id: "cs:precise/wordpress-42", - Paths: []string{"", ""}, - Values: []interface{}{ - "foo-value", - map[string]interface{}{ - "bar-value1": 234.0, - "bar-value2": "whee", - }, - }, - }, - }, -}, { - about: "meta/any with extra paths", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo/": testMetaHandler(0), - "bar": testMetaHandler(1), - }, - }, - urlStr: "/precise/wordpress-42/meta/any", - body: params.MetaAnyResponse{ - Meta: map[string]interface{}{ - "foo/one": "foo-value-one", - "foo/two": "foo-value-two", - "bar": 1234.0, - }, - }, - expectRecordedCalls: []interface{}{ - metaHandlerTestPutParams{ - NumHandlers: 3, - Id: "cs:precise/wordpress-42", - Paths: []string{"/one", "/two", ""}, - Values: []interface{}{ - "foo-value-one", - "foo-value-two", - 1234.0, - }, - }, - }, -}, { - about: "bulk meta", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - urlStr: "/meta/foo", - body: map[string]string{ - "precise/wordpress-42": "forty two", - "precise/foo-134": "blah", - }, - expectRecordedCalls: []interface{}{ - metaHandlerTestPutParams{ - NumHandlers: 1, - Id: "cs:precise/foo-134", - Paths: []string{""}, - Values: []interface{}{"blah"}, - }, - metaHandlerTestPutParams{ - NumHandlers: 1, - Id: "cs:precise/wordpress-42", - Paths: []string{""}, - Values: []interface{}{"forty two"}, - }, - }, -}, { - about: "bulk meta any", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - "bar": testMetaHandler(1), - "baz/": testMetaHandler(2), - }, - }, - urlStr: "/meta/any", - body: map[string]params.MetaAnyResponse{ - "precise/wordpress-42": { - Meta: map[string]interface{}{ - "foo": "foo-wordpress-val", - "bar": "bar-wordpress-val", - }, - }, - "precise/mysql-134": { - Meta: map[string]interface{}{ - "foo": "foo-mysql-val", - "baz/blah": "baz/blah-mysql-val", - "baz/ppp": "baz/ppp-mysql-val", - }, - }, - }, - expectRecordedCalls: []interface{}{ - metaHandlerTestPutParams{ - NumHandlers: 3, - Id: "cs:precise/mysql-134", - Paths: []string{"", "/blah", "/ppp"}, - Values: []interface{}{"foo-mysql-val", "baz/blah-mysql-val", "baz/ppp-mysql-val"}, - }, - metaHandlerTestPutParams{ - NumHandlers: 2, - Id: "cs:precise/wordpress-42", - Paths: []string{"", ""}, - Values: []interface{}{"foo-wordpress-val", "bar-wordpress-val"}, - }, - }, -}, { - about: "field include handler with bulk meta any", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": fieldSelectHandler("handler1", 0, "field1", "field2"), - "bar": fieldSelectHandler("handler2", 0, "field3", "field4"), - "baz/": fieldSelectHandler("handler3", 1, "field5"), - }, - }, - urlStr: "/meta/any", - body: map[string]params.MetaAnyResponse{ - "precise/mysql-123": { - Meta: map[string]interface{}{ - "foo": "foo-mysql-val", - "baz/blah": "baz/blah-mysql-val", - "baz/ppp": "baz/ppp-mysql-val", - }, - }, - "precise/wordpress-42": { - Meta: map[string]interface{}{ - "foo": "foo-wordpress-val", - "bar": "bar-wordpress-val", - }, - }, - }, - expectRecordedCalls: []interface{}{ - fieldSelectHandleUpdateInfo{ - Id: "cs:precise/mysql-123", - Fields: map[string]fieldSelectHandlePutInfo{ - "field1": { - Id: "cs:precise/mysql-123", - Value: "foo-mysql-val", - }, - "field2": { - Id: "cs:precise/mysql-123", - Value: "foo-mysql-val", - }, - }, - }, - fieldSelectHandleUpdateInfo{ - Id: "cs:precise/mysql-123", - Fields: map[string]fieldSelectHandlePutInfo{ - "field5/blah": { - Id: "cs:precise/mysql-123", - Value: "baz/blah-mysql-val", - }, - "field5/ppp": { - Id: "cs:precise/mysql-123", - Value: "baz/ppp-mysql-val", - }, - }, - }, - fieldSelectHandleUpdateInfo{ - Id: "cs:precise/wordpress-42", - Fields: map[string]fieldSelectHandlePutInfo{ - "field1": { - Id: "cs:precise/wordpress-42", - Value: "foo-wordpress-val", - }, - "field2": { - Id: "cs:precise/wordpress-42", - Value: "foo-wordpress-val", - }, - "field3": { - Id: "cs:precise/wordpress-42", - Value: "bar-wordpress-val", - }, - "field4": { - Id: "cs:precise/wordpress-42", - Value: "bar-wordpress-val", - }, - }, - }, - }, -}, { - about: "field include handler with no HandlePut", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": FieldIncludeHandler(FieldIncludeHandlerParams{ - Key: 0, - }), - }, - }, - urlStr: "/precise/wordpress-23/meta/foo", - body: "something", - expectCode: http.StatusInternalServerError, - expectBody: params.Error{ - Message: "PUT not supported", - }, -}, { - about: "field include handler when HandlePut returns an error", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": FieldIncludeHandler(FieldIncludeHandlerParams{ - Key: 0, - HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { - return errgo.WithCausef(nil, params.ErrNotFound, "message") - }, - }), - }, - }, - urlStr: "/precise/wordpress-23/meta/foo", - body: "something", - expectCode: http.StatusNotFound, - expectBody: params.Error{ - Code: params.ErrNotFound, - Message: "message", - }, -}, { - about: "meta put to field include handler with several errors", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": FieldIncludeHandler(FieldIncludeHandlerParams{ - Key: 0, - HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { - return errgo.WithCausef(nil, params.ErrNotFound, "foo error") - }, - Update: nopUpdate, - }), - "bar": FieldIncludeHandler(FieldIncludeHandlerParams{ - Key: 0, - HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { - return errgo.New("bar error") - }, - Update: nopUpdate, - }), - "baz": FieldIncludeHandler(FieldIncludeHandlerParams{ - Key: 0, - HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { - return nil - }, - Update: nopUpdate, - }), - }, - }, - urlStr: "/precise/wordpress-23/meta/any", - body: params.MetaAnyResponse{ - Meta: map[string]interface{}{ - "foo": "one", - "bar": "two", - "baz": "three", - }, - }, - expectCode: http.StatusInternalServerError, - expectBody: params.Error{ - Code: params.ErrMultipleErrors, - Message: "multiple (2) errors", - Info: map[string]*params.Error{ - "foo": { - Code: params.ErrNotFound, - Message: "foo error", - }, - "bar": { - Message: "bar error", - }, - }, - }, -}, { - about: "meta/any put with update error", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo/": FieldIncludeHandler(FieldIncludeHandlerParams{ - Key: 0, - HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { - if path == "/bad" { - return fmt.Errorf("foo/bad error") - } - return nil - }, - Update: func(id *ResolvedURL, fields map[string]interface{}) error { - return params.ErrBadRequest - }, - }), - "bar": FieldIncludeHandler(FieldIncludeHandlerParams{ - Key: 1, - HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { - return fmt.Errorf("bar error") - }, - }), - }, - }, - urlStr: "/precise/wordpress-23/meta/any", - body: params.MetaAnyResponse{ - Meta: map[string]interface{}{ - "foo/one": "one", - "foo/two": "two", - "foo/bad": "bad", - "bar": "bar", - }, - }, - expectCode: http.StatusInternalServerError, - expectBody: params.Error{ - Code: params.ErrMultipleErrors, - Message: "multiple (4) errors", - Info: map[string]*params.Error{ - // All endpoints that share the same bulk key should - // get the same error, as the update pertains to all of them, - // but endpoints for which the HandlePut failed will - // not be included in that. - "foo/one": { - Code: params.ErrBadRequest, - Message: "bad request", - }, - "foo/two": { - Code: params.ErrBadRequest, - Message: "bad request", - }, - "foo/bad": { - Message: "foo/bad error", - }, - "bar": { - Message: "bar error", - }, - }, - }, -}, { - about: "bulk meta/any put with several errors", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": FieldIncludeHandler(FieldIncludeHandlerParams{ - Key: 0, - HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { - return nil - }, - Update: nopUpdate, - }), - "bar": FieldIncludeHandler(FieldIncludeHandlerParams{ - Key: 0, - HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { - return errgo.WithCausef(nil, params.ErrNotFound, "bar error") - }, - Update: nopUpdate, - }), - }, - }, - resolveURL: func(id *charm.Reference) (*ResolvedURL, error) { - if id.Name == "bad" { - return nil, params.ErrBadRequest - } - return &ResolvedURL{URL: *id}, nil - }, - urlStr: "/meta/any", - body: map[string]params.MetaAnyResponse{ - "precise/mysql-123": { - Meta: map[string]interface{}{ - "foo": "fooval", - "bar": "barval", - }, - }, - "bad": { - Meta: map[string]interface{}{ - "foo": "foo-wordpress-val", - "bar": "bar-wordpress-val", - }, - }, - }, - expectCode: http.StatusInternalServerError, - expectBody: params.Error{ - Code: params.ErrMultipleErrors, - Message: "multiple (2) errors", - Info: map[string]*params.Error{ - "precise/mysql-123": { - Code: params.ErrMultipleErrors, - Message: "multiple (1) errors", - Info: map[string]*params.Error{ - "bar": { - Code: params.ErrNotFound, - Message: "bar error", - }, - }, - }, - "bad": { - Message: "bad request", - Code: params.ErrBadRequest, - }, - }, - }, -}, { - about: "meta put with unresolved URL", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - urlStr: "/wordpress/meta/foo", - resolveURL: resolveTo("series", 245), - expectCode: http.StatusOK, - body: "hello", - expectRecordedCalls: []interface{}{ - metaHandlerTestPutParams{ - NumHandlers: 1, - Id: "cs:series/wordpress-245", - Paths: []string{""}, - Values: []interface{}{"hello"}, - }, - }, -}, { - about: "bulk put with unresolved URL", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - urlStr: "/meta/foo", - resolveURL: resolveTo("series", 245), - expectCode: http.StatusOK, - body: map[string]string{ - "wordpress": "hello", - }, - expectRecordedCalls: []interface{}{ - metaHandlerTestPutParams{ - NumHandlers: 1, - Id: "cs:series/wordpress-245", - Paths: []string{""}, - Values: []interface{}{"hello"}, - }, - }, -}, { - about: "bulk put with ids specified in URL", - handlers: Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - }, - urlStr: "/meta/foo?id=wordpress", - expectCode: http.StatusInternalServerError, - expectBody: params.Error{ - Message: "ids may not be specified in meta PUT request", - }, -}} - -func nopUpdate(id *ResolvedURL, fields map[string]interface{}) error { - return nil -} - -func (s *RouterSuite) TestRouterPut(c *gc.C) { - for i, test := range routerPutTests { - c.Logf("test %d: %s", i, test.about) - ResetRecordedCalls() - resolve := alwaysResolveURL - if test.resolveURL != nil { - resolve = test.resolveURL - } - bodyVal, err := json.Marshal(test.body) - c.Assert(err, gc.IsNil) - router := New(&test.handlers, resolve, alwaysAuthorize, alwaysExists) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: router, - URL: test.urlStr, - Body: bytes.NewReader(bodyVal), - Method: "PUT", - Header: map[string][]string{ - "Content-Type": {"application/json"}, - }, - ExpectStatus: test.expectCode, - ExpectBody: test.expectBody, - }) - c.Assert(RecordedCalls(), jc.DeepEquals, test.expectRecordedCalls) - } -} - -var routerPutWithInvalidContentTests = []struct { - about string - urlStr string - contentType string - body string - expectCode int - expectBody interface{} -}{{ - about: "invalid content type with meta", - urlStr: "/precise/wordpress-23/meta/foo", - contentType: "foo/bar", - expectCode: http.StatusBadRequest, - expectBody: params.Error{ - Message: `unexpected Content-Type "foo/bar"; expected "application/json"`, - Code: params.ErrBadRequest, - }, -}, { - about: "invalid content type with bulk meta", - urlStr: "/meta/foo", - contentType: "foo/bar", - expectCode: http.StatusBadRequest, - expectBody: params.Error{ - Message: `unexpected Content-Type "foo/bar"; expected "application/json"`, - Code: params.ErrBadRequest, - }, -}, { - about: "bad JSON with meta", - urlStr: "/precise/wordpress-23/meta/foo", - contentType: "application/json", - body: `"foo`, - expectCode: http.StatusInternalServerError, - expectBody: params.Error{ - Message: `cannot unmarshal body: unexpected EOF`, - }, -}, { - about: "bad JSON with bulk meta", - urlStr: "/meta/foo", - contentType: "application/json", - body: `"foo`, - expectCode: http.StatusInternalServerError, - expectBody: params.Error{ - Message: `cannot unmarshal body: unexpected EOF`, - }, -}} - -func (s *RouterSuite) TestRouterPutWithInvalidContent(c *gc.C) { - for i, test := range routerPutWithInvalidContentTests { - c.Logf("test %d: %s", i, test.about) - handlers := &Handlers{ - Meta: map[string]BulkIncludeHandler{ - "foo": testMetaHandler(0), - }, - } - router := New(handlers, alwaysResolveURL, alwaysAuthorize, alwaysExists) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: router, - URL: test.urlStr, - Body: strings.NewReader(test.body), - Method: "PUT", - Header: map[string][]string{ - "Content-Type": {test.contentType}, - }, - ExpectStatus: test.expectCode, - ExpectBody: test.expectBody, - }) - } -} - -func alwaysExists(id *ResolvedURL, req *http.Request) (bool, error) { - return true, nil -} - -func alwaysAuthorize(id *ResolvedURL, req *http.Request) error { - return nil -} - -func neverAuthorize(id *ResolvedURL, req *http.Request) error { - return errgo.WithCausef(nil, params.ErrUnauthorized, "bad wolf") -} - -func dischargeRequiredAuthorize(id *ResolvedURL, req *http.Request) error { - if id.String() == "cs:utopic/foo-32" { - return nil - } - return httpbakery.NewDischargeRequiredError(nil, "/", errgo.New("discharge required")) -} - -var getMetadataTests = []struct { - id *ResolvedURL - includes []string - expectResult map[string]interface{} - expectError string -}{{ - id: newResolvedURL("~charmers/precise/wordpress-34", 34), - includes: []string{}, - expectResult: map[string]interface{}{}, -}, { - id: newResolvedURL("~rog/precise/wordpress-2", -1), - includes: []string{"item1", "item2", "test"}, - expectResult: map[string]interface{}{ - "item1": fieldSelectHandleGetInfo{ - HandlerId: "handler1", - Doc: fieldSelectQueryInfo{ - Id: newResolvedURL("cs:~rog/precise/wordpress-2", -1), - Selector: map[string]int{"item1": 1, "item2": 1}, - }, - Id: newResolvedURL("cs:~rog/precise/wordpress-2", -1), - }, - "item2": fieldSelectHandleGetInfo{ - HandlerId: "handler2", - Doc: fieldSelectQueryInfo{ - Id: newResolvedURL("cs:~rog/precise/wordpress-2", -1), - Selector: map[string]int{"item1": 1, "item2": 1}, - }, - Id: newResolvedURL("cs:~rog/precise/wordpress-2", -1), - }, - "test": &metaHandlerTestResp{ - CharmURL: "cs:~rog/precise/wordpress-2", - }, - }, -}, { - id: newResolvedURL("~rog/precise/wordpress-2", -1), - includes: []string{"mistaek"}, - expectError: `unrecognized metadata name "mistaek"`, -}} - -func (s *RouterSuite) TestGetMetadata(c *gc.C) { - for i, test := range getMetadataTests { - c.Logf("test %d: %q", i, test.includes) - router := New(&Handlers{ - Meta: map[string]BulkIncludeHandler{ - "item1": fieldSelectHandler("handler1", 0, "item1"), - "item2": fieldSelectHandler("handler2", 0, "item2"), - "test": testMetaHandler(0), - }, - }, alwaysResolveURL, alwaysAuthorize, alwaysExists) - result, err := router.GetMetadata(test.id, test.includes, nil) - if test.expectError != "" { - c.Assert(err, gc.ErrorMatches, test.expectError) - c.Assert(result, gc.IsNil) - continue - } - c.Assert(err, gc.IsNil) - c.Assert(result, jc.DeepEquals, test.expectResult) - } -} - -var splitIdTests = []struct { - path string - expectURL string - expectError string -}{{ - path: "precise/wordpress-23", - expectURL: "cs:precise/wordpress-23", -}, { - path: "~user/precise/wordpress-23", - expectURL: "cs:~user/precise/wordpress-23", -}, { - path: "wordpress", - expectURL: "cs:wordpress", -}, { - path: "~user/wordpress", - expectURL: "cs:~user/wordpress", -}, { - path: "", - expectError: `charm URL has invalid charm name: ""`, -}, { - path: "~foo-bar-/wordpress", - expectError: `charm URL has invalid user name: "~foo-bar-/wordpress"`, -}} - -func (s *RouterSuite) TestSplitId(c *gc.C) { - for i, test := range splitIdTests { - c.Logf("test %d: %s", i, test.path) - url, rest, err := splitId(test.path) - if test.expectError != "" { - c.Assert(err, gc.ErrorMatches, test.expectError) - c.Assert(url, gc.IsNil) - c.Assert(rest, gc.Equals, "") - continue - } - c.Assert(url.String(), gc.Equals, test.expectURL) - c.Assert(rest, gc.Equals, "") - - url, rest, err = splitId(test.path + "/some/more") - c.Assert(err, gc.Equals, nil) - c.Assert(url.String(), gc.Equals, test.expectURL) - c.Assert(rest, gc.Equals, "/some/more") - } -} - -var handlerKeyTests = []struct { - path string - expectKey string - expectRest string -}{{ - path: "/foo/bar", - expectKey: "foo/", - expectRest: "/bar", -}, { - path: "/foo", - expectKey: "foo", - expectRest: "", -}, { - path: "/foo/bar/baz", - expectKey: "foo/", - expectRest: "/bar/baz", -}, { - path: "/foo/", - expectKey: "foo", - expectRest: "", -}, { - path: "foo/", - expectKey: "foo", - expectRest: "", -}} - -func (s *RouterSuite) TestHandlerKey(c *gc.C) { - for i, test := range handlerKeyTests { - c.Logf("test %d: %s", i, test.path) - key, rest := handlerKey(test.path) - c.Assert(key, gc.Equals, test.expectKey) - c.Assert(rest, gc.Equals, test.expectRest) - } -} - -var splitPathTests = []struct { - path string - index int - expectElem string - expectRest string -}{{ - path: "/foo/bar", - expectElem: "foo", - expectRest: "/bar", -}, { - path: "foo/bar", - expectElem: "foo", - expectRest: "/bar", -}, { - path: "foo/", - expectElem: "foo", - expectRest: "/", -}, { - path: "/foo/bar/baz", - expectElem: "foo", - expectRest: "/bar/baz", -}, { - path: "/foo", - expectElem: "foo", - expectRest: "", -}, { - path: "/foo/bar/baz", - index: 4, - expectElem: "bar", - expectRest: "/baz", -}} - -func (s *RouterSuite) TestSplitPath(c *gc.C) { - for i, test := range splitPathTests { - c.Logf("test %d: %s", i, test.path) - elem, index := splitPath(test.path, test.index) - c.Assert(elem, gc.Equals, test.expectElem) - c.Assert(index, jc.LessThan, len(test.path)+1) - c.Assert(test.path[index:], gc.Equals, test.expectRest) - } -} - -func (s *RouterSuite) TestWriteJSON(c *gc.C) { - rec := httptest.NewRecorder() - type Number struct { - N int - } - err := jsonhttp.WriteJSON(rec, http.StatusTeapot, Number{1234}) - c.Assert(err, gc.IsNil) - c.Assert(rec.Code, gc.Equals, http.StatusTeapot) - c.Assert(rec.Body.String(), gc.Equals, `{"N":1234}`) - c.Assert(rec.Header().Get("content-type"), gc.Equals, "application/json") -} - -func (s *RouterSuite) TestWriteError(c *gc.C) { - rec := httptest.NewRecorder() - WriteError(rec, errgo.Newf("an error")) - var errResp params.Error - err := json.Unmarshal(rec.Body.Bytes(), &errResp) - c.Assert(err, gc.IsNil) - c.Assert(errResp, gc.DeepEquals, params.Error{Message: "an error"}) - c.Assert(rec.Code, gc.Equals, http.StatusInternalServerError) - - rec = httptest.NewRecorder() - errResp0 := params.Error{ - Message: "a message", - Code: "some code", - } - WriteError(rec, &errResp0) - var errResp1 params.Error - err = json.Unmarshal(rec.Body.Bytes(), &errResp1) - c.Assert(err, gc.IsNil) - c.Assert(errResp1, gc.DeepEquals, errResp0) - c.Assert(rec.Code, gc.Equals, http.StatusInternalServerError) -} - -func (s *RouterSuite) TestServeMux(c *gc.C) { - mux := NewServeMux() - mux.Handle("/data", HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { - return Foo{"hello"}, nil - })) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: mux, - URL: "/data", - ExpectBody: Foo{"hello"}, - }) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: mux, - URL: "/foo", - ExpectStatus: http.StatusNotFound, - ExpectBody: params.Error{ - Message: `no handler for "/foo"`, - Code: params.ErrNotFound, - }, - }) -} - -var handlerTests = []struct { - about string - handler http.Handler - urlStr string - expectStatus int - expectBody interface{} -}{{ - about: "handleErrors, normal error", - handler: HandleErrors(func(http.ResponseWriter, *http.Request) error { - return errgo.Newf("an error") - }), - urlStr: "", - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Message: "an error", - }, -}, { - about: "handleErrors, error with code", - handler: HandleErrors(func(http.ResponseWriter, *http.Request) error { - return ¶ms.Error{ - Message: "something went wrong", - Code: "snafu", - } - }), - urlStr: "", - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Message: "something went wrong", - Code: "snafu", - }, -}, { - about: "handleErrors, no error", - handler: HandleErrors(func(w http.ResponseWriter, req *http.Request) error { - w.WriteHeader(http.StatusTeapot) - return nil - }), - expectStatus: http.StatusTeapot, -}, { - about: "handleErrors, params error", - handler: HandleErrors(func(w http.ResponseWriter, req *http.Request) error { - return params.ErrMetadataNotFound - }), - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Message: "metadata not found", - Code: params.ErrMetadataNotFound, - }, -}, { - about: "handleErrors, wrapped params error", - handler: HandleErrors(func(w http.ResponseWriter, req *http.Request) error { - err := params.ErrMetadataNotFound - return errgo.NoteMask(err, "annotation", errgo.Is(params.ErrMetadataNotFound)) - }), - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Message: "annotation: metadata not found", - Code: params.ErrMetadataNotFound, - }, -}, { - about: "handleErrors: error - bad request", - handler: HandleErrors(func(w http.ResponseWriter, req *http.Request) error { - return params.ErrBadRequest - }), - expectStatus: http.StatusBadRequest, - expectBody: params.Error{ - Message: "bad request", - Code: params.ErrBadRequest, - }, -}, { - about: "handleErrors: error - forbidden", - handler: HandleErrors(func(w http.ResponseWriter, req *http.Request) error { - return params.ErrForbidden - }), - expectStatus: http.StatusForbidden, - expectBody: params.Error{ - Message: "forbidden", - Code: params.ErrForbidden, - }, -}, { - about: "handleJSON, normal case", - handler: HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { - return Foo{"hello"}, nil - }), - expectStatus: http.StatusOK, - expectBody: Foo{"hello"}, -}, { - about: "handleJSON, error case", - handler: HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { - return nil, errgo.Newf("an error") - }), - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Message: "an error", - }, -}, { - about: "NotFoundHandler", - handler: NotFoundHandler(), - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Message: "not found", - Code: params.ErrNotFound, - }, -}} - -type Foo struct { - S string -} - -type ReqInfo struct { - Path string - Method string - Form url.Values `json:",omitempty"` -} - -func (s *RouterSuite) TestHandlers(c *gc.C) { - for i, test := range handlerTests { - c.Logf("test %d: %s", i, test.about) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: test.handler, - URL: "", - ExpectStatus: test.expectStatus, - ExpectBody: test.expectBody, - }) - } -} - -func (s *RouterSuite) TestResolvedURLPreferredURL(c *gc.C) { - r := MustNewResolvedURL("~charmers/precise/wordpress-23", 4) - // Ensure it's not aliased. - u := r.PreferredURL() - c.Assert(u, gc.DeepEquals, charm.MustParseReference("precise/wordpress-4")) - u.Series = "foo" - c.Assert(r.URL.Series, gc.Equals, "precise") - - r = MustNewResolvedURL("~charmers/precise/wordpress-23", -1) - // Ensure it's not aliased. - u = r.PreferredURL() - c.Assert(u, gc.DeepEquals, charm.MustParseReference("~charmers/precise/wordpress-23")) - u.Series = "foo" - c.Assert(r.URL.Series, gc.Equals, "precise") -} - -func errorIdHandler(charmId *charm.Reference, w http.ResponseWriter, req *http.Request) error { - return errgo.Newf("errorIdHandler error") -} - -type idHandlerTestResp struct { - Method string - CharmURL string - Path string -} - -func testIdHandler(charmId *charm.Reference, w http.ResponseWriter, req *http.Request) error { - jsonhttp.WriteJSON(w, http.StatusOK, idHandlerTestResp{ - CharmURL: charmId.String(), - Path: req.URL.Path, - Method: req.Method, - }) - return nil -} - -type metaHandlerTestResp struct { - CharmURL string - Path string - Flags url.Values -} - -var testMetaGetHandler = SingleIncludeHandler( - func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - if len(flags) == 0 { - flags = nil - } - return &metaHandlerTestResp{ - CharmURL: id.String(), - Path: path, - Flags: flags, - }, nil - }, -) - -type testMetaHandler int - -func (testMetaHandler) Key() interface{} { - type testMetaHandlerKey struct{} - return testMetaHandlerKey{} -} - -func (testMetaHandler) HandleGet(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, flags url.Values, req *http.Request) ([]interface{}, error) { - results := make([]interface{}, len(hs)) - for i, h := range hs { - _ = h.(testMetaHandler) - if len(flags) == 0 { - flags = nil - } - results[i] = &metaHandlerTestResp{ - CharmURL: id.String(), - Path: paths[i], - Flags: flags, - } - } - return results, nil -} - -type metaHandlerTestPutParams struct { - Id string - NumHandlers int - Paths []string - Values []interface{} -} - -func (testMetaHandler) HandlePut(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, rawValues []*json.RawMessage, req *http.Request) []error { - // Handlers are provided in arbitrary order, - // so we order them (and their associated paths - // and values) to enable easier testing. - keys := make(sort.StringSlice, len(hs)) - for i, h := range hs { - // Sort by handler primary, path secondary. - keys[i] = fmt.Sprintf("%d.%s", int(h.(testMetaHandler)), paths[i]) - } - sort.Sort(groupSort{ - key: keys, - other: []swapper{ - sort.StringSlice(paths), - swapFunc(func(i, j int) { - rawValues[i], rawValues[j] = rawValues[j], rawValues[i] - }), - }, - }) - - values := make([]interface{}, len(rawValues)) - for i, val := range rawValues { - err := json.Unmarshal(*val, &values[i]) - if err != nil { - panic(err) - } - } - RecordCall(metaHandlerTestPutParams{ - NumHandlers: len(hs), - Id: id.String(), - Paths: paths, - Values: values, - }) - return nil -} - -// constMetaHandler returns a handler that always returns the given -// value. -func constMetaHandler(val interface{}) BulkIncludeHandler { - return SingleIncludeHandler( - func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return val, nil - }, - ) -} - -func errorMetaHandler(err error) BulkIncludeHandler { - return SingleIncludeHandler( - func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return nil, err - }, - ) -} - -type fieldSelectQueryInfo struct { - Id *ResolvedURL - Selector map[string]int -} - -type fieldSelectHandleGetInfo struct { - HandlerId string - Doc fieldSelectQueryInfo - Id *ResolvedURL - Path string - Flags url.Values -} - -type fieldSelectHandleUpdateInfo struct { - Id string - Fields map[string]fieldSelectHandlePutInfo -} - -type fieldSelectHandlePutInfo struct { - Id string - Path string - Value interface{} -} - -var queryCount int32 - -var ( - callRecordsMutex sync.Mutex - callRecords byJSON -) - -// RecordCall adds a value that can be retrieved later with -// RecordedCalls. -// -// This is used to check the parameters passed to -// handlers that do not return results. -func RecordCall(x interface{}) { - callRecordsMutex.Lock() - defer callRecordsMutex.Unlock() - callRecords = append(callRecords, x) -} - -// ResetRecordedCalls clears the call records. -func ResetRecordedCalls() { - callRecordsMutex.Lock() - defer callRecordsMutex.Unlock() - callRecords = nil -} - -// RecordedCalls returns the values passed to RecordCall, -// ordered by their JSON serialization. -func RecordedCalls() []interface{} { - callRecordsMutex.Lock() - defer callRecordsMutex.Unlock() - - sort.Sort(callRecords) - return callRecords -} - -// byJSON implements sort.Interface, ordering its -// elements lexicographically by marshaled JSON -// representation. -type byJSON []interface{} - -func (b byJSON) Less(i, j int) bool { - idata, err := json.Marshal(b[i]) - if err != nil { - panic(err) - } - jdata, err := json.Marshal(b[j]) - if err != nil { - panic(err) - } - return bytes.Compare(idata, jdata) < 0 -} - -func (b byJSON) Swap(i, j int) { - b[i], b[j] = b[j], b[i] -} - -func (b byJSON) Len() int { - return len(b) -} - -// fieldSelectHandler returns a BulkIncludeHandler that returns -// information about the call for testing purposes. -// When the GET handler is invoked, it returns a fieldSelectHandleGetInfo value -// with the given handlerId. Key holds the grouping key, -// and fields holds the fields to select. -// -// When the PUT handler is invoked SetCallRecord is called with -// a fieldSelectHandlePutInfo value holding the parameters that were -// provided. -func fieldSelectHandler(handlerId string, key interface{}, fields ...string) BulkIncludeHandler { - query := func(id *ResolvedURL, selector map[string]int, req *http.Request) (interface{}, error) { - atomic.AddInt32(&queryCount, 1) - return fieldSelectQueryInfo{ - Id: id, - Selector: selector, - }, nil - } - handleGet := func(doc interface{}, id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - if len(flags) == 0 { - flags = nil - } - return fieldSelectHandleGetInfo{ - HandlerId: handlerId, - Doc: doc.(fieldSelectQueryInfo), - Id: id, - Path: path, - Flags: flags, - }, nil - } - - handlePut := func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { - var vali interface{} - err := json.Unmarshal(*val, &vali) - if err != nil { - panic(err) - } - for _, field := range fields { - updater.UpdateField(field+path, fieldSelectHandlePutInfo{ - Id: id.String(), - Value: vali, - }) - } - return nil - } - - update := func(id *ResolvedURL, fields map[string]interface{}) error { - // We make information on how update and handlePut have - // been called by calling SetCallRecord with the above - // parameters. The fields will have been created by - // handlePut, and therefore are known to contain - // fieldSelectHandlePutInfo values. We convert the - // values to static types so that it is more obvious - // what the values in fieldSelectHandleUpdateInfo.Fields - // contain. - infoFields := make(map[string]fieldSelectHandlePutInfo) - for name, val := range fields { - infoFields[name] = val.(fieldSelectHandlePutInfo) - } - RecordCall(fieldSelectHandleUpdateInfo{ - Id: id.String(), - Fields: infoFields, - }) - return nil - } - - return FieldIncludeHandler(FieldIncludeHandlerParams{ - Key: key, - Query: query, - Fields: fields, - HandleGet: handleGet, - HandlePut: handlePut, - Update: update, - }) -} - -// selectiveIdHandler handles metadata by returning the -// data found in the map for the requested id. -func selectiveIdHandler(m map[string]interface{}) BulkIncludeHandler { - return SingleIncludeHandler(func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return m[id.String()], nil - }) -} - -type swapper interface { - Swap(i, j int) -} - -type swapFunc func(i, j int) - -func (f swapFunc) Swap(i, j int) { - f(i, j) -} - -// groupSort is an implementation of sort.Interface -// that keeps a set of secondary values sorted according -// to the same criteria as key. -type groupSort struct { - key sort.Interface - other []swapper -} - -func (g groupSort) Less(i, j int) bool { - return g.key.Less(i, j) -} - -func (g groupSort) Swap(i, j int) { - g.key.Swap(i, j) - for _, o := range g.other { - o.Swap(i, j) - } -} - -func (g groupSort) Len() int { - return g.key.Len() -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/router/singleinclude.go' --- src/gopkg.in/juju/charmstore.v4/internal/router/singleinclude.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/router/singleinclude.go 1970-01-01 00:00:00 +0000 @@ -1,52 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package router - -import ( - "encoding/json" - "net/http" - "net/url" - - "gopkg.in/errgo.v1" -) - -var _ BulkIncludeHandler = SingleIncludeHandler(nil) - -// SingleIncludeHandler implements BulkMetaHander for a non-batching -// metadata retrieval function that can perform a GET only. -type SingleIncludeHandler func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) - -// Key implements BulkMetadataHander.Key. -func (h SingleIncludeHandler) Key() interface{} { - // Use a local type so that we are guaranteed that nothing - // other than SingleIncludeHandler can generate that key. - type singleMetaHandlerKey struct{} - return singleMetaHandlerKey(singleMetaHandlerKey{}) -} - -// HandleGet implements BulkMetadataHander.HandleGet. -func (h SingleIncludeHandler) HandleGet(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, flags url.Values, req *http.Request) ([]interface{}, error) { - results := make([]interface{}, len(hs)) - for i, h := range hs { - h := h.(SingleIncludeHandler) - result, err := h(id, paths[i], flags, req) - if err != nil { - // TODO(rog) include index of failed handler. - return nil, errgo.Mask(err, errgo.Any) - } - results[i] = result - } - return results, nil -} - -var errPutNotImplemented = errgo.New("PUT not implemented") - -// HandlePut implements BulkMetadataHander.HandlePut. -func (h SingleIncludeHandler) HandlePut(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, values []*json.RawMessage, req *http.Request) []error { - errs := make([]error, len(hs)) - for i := range hs { - errs[i] = errPutNotImplemented - } - return errs -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/router/util.go' --- src/gopkg.in/juju/charmstore.v4/internal/router/util.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/router/util.go 1970-01-01 00:00:00 +0000 @@ -1,174 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package router - -import ( - "fmt" - "net/http" - "strings" - - "github.com/juju/loggo" - "github.com/juju/utils/jsonhttp" - "gopkg.in/errgo.v1" - "gopkg.in/macaroon-bakery.v0/httpbakery" - - "gopkg.in/juju/charmstore.v4/params" -) - -var logger = loggo.GetLogger("charmstore.internal.router") - -var ( - HandleErrors = jsonhttp.HandleErrors(errorToResp) - HandleJSON = jsonhttp.HandleJSON(errorToResp) - WriteError = jsonhttp.WriteError(errorToResp) -) - -func errorToResp(err error) (int, interface{}) { - status, body := errorToResp1(err) - logger.Infof("error response %d; %s", status, errgo.Details(err)) - return status, body -} - -func errorToResp1(err error) (int, interface{}) { - // Allow bakery errors to be returned as the bakery would - // like them, so that httpbakery.Client.Do will work. - if err, ok := errgo.Cause(err).(*httpbakery.Error); ok { - return httpbakery.ErrorToResponse(err) - } - errorBody := errorResponseBody(err) - status := http.StatusInternalServerError - switch errorBody.Code { - case params.ErrNotFound, params.ErrMetadataNotFound: - status = http.StatusNotFound - case params.ErrBadRequest: - status = http.StatusBadRequest - case params.ErrForbidden: - status = http.StatusForbidden - case params.ErrUnauthorized: - status = http.StatusUnauthorized - case params.ErrMethodNotAllowed: - // TODO(rog) from RFC 2616, section 4.7: An Allow header - // field MUST be present in a 405 (Method Not Allowed) - // response. - // Perhaps we should not ever return StatusMethodNotAllowed. - status = http.StatusMethodNotAllowed - } - return status, errorBody -} - -// errorResponse returns an appropriate error -// response for the provided error. -func errorResponseBody(err error) *params.Error { - - errResp := ¶ms.Error{ - Message: err.Error(), - } - cause := errgo.Cause(err) - if coder, ok := cause.(errorCoder); ok { - errResp.Code = coder.ErrorCode() - } - if infoer, ok := cause.(errorInfoer); ok { - errResp.Info = infoer.ErrorInfo() - } - return errResp -} - -type errorInfoer interface { - ErrorInfo() map[string]*params.Error -} - -type errorCoder interface { - ErrorCode() params.ErrorCode -} - -// multiError holds multiple errors. -type multiError map[string]error - -func (err multiError) Error() string { - return fmt.Sprintf("multiple (%d) errors", len(err)) -} - -func (err multiError) ErrorCode() params.ErrorCode { - return params.ErrMultipleErrors -} - -func (err multiError) ErrorInfo() map[string]*params.Error { - m := make(map[string]*params.Error) - for key, err := range err { - m[key] = errorResponseBody(err) - } - return m -} - -// NotFoundHandler is like http.NotFoundHandler except it -// returns a JSON error response. -func NotFoundHandler() http.Handler { - return HandleErrors(func(w http.ResponseWriter, req *http.Request) error { - return errgo.WithCausef(nil, params.ErrNotFound, params.ErrNotFound.Error()) - }) -} - -func NewServeMux() *ServeMux { - return &ServeMux{http.NewServeMux()} -} - -// ServeMux is like http.ServeMux but returns -// JSON errors when pages are not found. -type ServeMux struct { - *http.ServeMux -} - -func (mux *ServeMux) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if req.RequestURI == "*" { - mux.ServeMux.ServeHTTP(w, req) - return - } - h, pattern := mux.Handler(req) - if pattern == "" { - WriteError(w, errgo.WithCausef(nil, params.ErrNotFound, "no handler for %q", req.URL.Path)) - return - } - h.ServeHTTP(w, req) -} - -// RelativeURLPath returns a relative URL path that is lexically equivalent to -// targpath when interpreted by url.URL.ResolveReference. -// On succes, the returned path will always be relative to basePath, even if basePath -// and targPath share no elements. An error is returned if targPath can't -// be made relative to basePath (for example when either basePath -// or targetPath are non-absolute). -func RelativeURLPath(basePath, targPath string) (string, error) { - if !strings.HasPrefix(basePath, "/") { - return "", errgo.Newf("non-absolute base URL") - } - if !strings.HasPrefix(targPath, "/") { - return "", errgo.Newf("non-absolute target URL") - } - baseParts := strings.Split(basePath, "/") - targParts := strings.Split(targPath, "/") - - // For the purposes of dotdot, the last element of - // the paths are irrelevant. We save the last part - // of the target path for later. - lastElem := targParts[len(targParts)-1] - baseParts = baseParts[0 : len(baseParts)-1] - targParts = targParts[0 : len(targParts)-1] - - // Find the common prefix between the two paths: - var i int - for ; i < len(baseParts); i++ { - if i >= len(targParts) || baseParts[i] != targParts[i] { - break - } - } - dotdotCount := len(baseParts) - i - targOnly := targParts[i:] - result := make([]string, 0, dotdotCount+len(targOnly)+1) - for i := 0; i < dotdotCount; i++ { - result = append(result, "..") - } - result = append(result, targOnly...) - result = append(result, lastElem) - return strings.Join(result, "/"), nil -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/router/util_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/router/util_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/router/util_test.go 1970-01-01 00:00:00 +0000 @@ -1,124 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package router_test - -import ( - "net/url" - - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charmstore.v4/internal/router" -) - -type utilSuite struct{} - -var _ = gc.Suite(&utilSuite{}) -var relativeURLTests = []struct { - base string - target string - expect string - expectError string -}{{ - expectError: "non-absolute base URL", -}, { - base: "/foo", - expectError: "non-absolute target URL", -}, { - base: "foo", - expectError: "non-absolute base URL", -}, { - base: "/foo", - target: "foo", - expectError: "non-absolute target URL", -}, { - base: "/foo", - target: "/bar", - expect: "bar", -}, { - base: "/foo/", - target: "/bar", - expect: "../bar", -}, { - base: "/foo/", - target: "/bar/", - expect: "../bar/", -}, { - base: "/foo/bar", - target: "/bar/", - expect: "../bar/", -}, { - base: "/foo/bar/", - target: "/bar/", - expect: "../../bar/", -}, { - base: "/foo/bar/baz", - target: "/foo/targ", - expect: "../targ", -}, { - base: "/foo/bar/baz/frob", - target: "/foo/bar/one/two/", - expect: "../one/two/", -}, { - base: "/foo/bar/baz/", - target: "/foo/targ", - expect: "../../targ", -}, { - base: "/foo/bar/baz/frob/", - target: "/foo/bar/one/two/", - expect: "../../one/two/", -}, { - base: "/foo/bar", - target: "/foot/bar", - expect: "../foot/bar", -}, { - base: "/foo/bar/baz/frob", - target: "/foo/bar", - expect: "../../bar", -}, { - base: "/foo/bar/baz/frob/", - target: "/foo/bar", - expect: "../../../bar", -}, { - base: "/foo/bar/baz/frob/", - target: "/foo/bar/", - expect: "../../", -}, { - base: "/foo/bar/baz", - target: "/foo/bar/other", - expect: "other", -}, { - base: "/foo/bar/", - target: "/foo/bar/", - expect: "", -}, { - base: "/foo/bar", - target: "/foo/bar", - expect: "bar", -}, { - base: "/foo/bar/", - target: "/foo/bar/", - expect: "", -}} - -func (*utilSuite) TestRelativeURL(c *gc.C) { - for i, test := range relativeURLTests { - c.Logf("test %d: %q %q", i, test.base, test.target) - // Sanity check the test itself. - if test.expectError == "" { - baseURL := &url.URL{Path: test.base} - expectURL := &url.URL{Path: test.expect} - targetURL := baseURL.ResolveReference(expectURL) - c.Check(targetURL.Path, gc.Equals, test.target, gc.Commentf("resolve reference failure")) - } - - result, err := router.RelativeURLPath(test.base, test.target) - if test.expectError != "" { - c.Assert(err, gc.ErrorMatches, test.expectError) - c.Assert(result, gc.Equals, "") - } else { - c.Assert(err, gc.IsNil) - c.Check(result, gc.Equals, test.expect) - } - } -} === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting' === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo' === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle' === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/bad' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/bad/README.md' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/bad/README.md 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/bad/README.md 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -A dummy bundle === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/bad/bundle.yaml' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/bad/bundle.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/bad/bundle.yaml 1970-01-01 00:00:00 +0000 @@ -1,11 +0,0 @@ -# This bundle has a bad relation, which will cause it to fail -# its verification. -services: - wordpress: - charm: wordpress - num_units: 1 - mysql: - charm: mysql - num_units: 1 -relations: - - ["foo:db", "mysql:server"] === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/openstack' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/openstack/README.md' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/openstack/README.md 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/openstack/README.md 1970-01-01 00:00:00 +0000 @@ -1,46 +0,0 @@ -OpenStack Bundle for Juju -========================= - -Overview --------- - -This bundle deploys a reference OpenStack architecture including all core projects: - - - OpenStack Compute - - OpenStack Networking (using Open vSwitch plugin) - - OpenStack Block Storage (backed with Ceph storage) - - OpenStack Image - - OpenStack Object Storage - - OpenStack Identity - - OpenStack Dashboard - - OpenStack Telemetry - - OpenStack Orchestration - -The charm configuration is an opinioned set for deploying OpenStack for testing on Cloud environments which support nested KVM. Instance types also need to have ephemeral storage (these block devices are used for Ceph and Swift storage). - -The Ubuntu Server Team use this bundle for testing OpenStack-on-OpenStack. - -Usage ------ - -Once deployed, the cloud can be accessed either using the OpenStack command line tools or using the OpenStack Dashboard: - - http:///horizon - -The charms configure the 'admin' user with a password of 'openstack' by default. - -The OpenStack cloud deployed is completely clean; the charms don't attempt to configure networking or upload images. Read the OpenStack User Guide on how to configure your cloud for use: - - http://docs.openstack.org/user-guide/content/ - -Niggles -------- - -The neutron-gateway service requires a service unit with two network interfaces to provide full functionality; this part of OpenStack provides L3 routing between tenant networks and the rest of the world. Its possible todo this when testing on OpenStack by adding a second network interface to the neutron-gateway service: - - nova interface-attach --net-id - juju set neutron-gateway ext-port=eth1 - -Note that you will need to be running this bundle on an OpenStack cloud that supports MAC address learning of some description; this includes using OpenStack Havana with the Neutron Open vSwitch plugin. - -For actual OpenStack deployments, this service would reside of a physical server with network ports attached to both the internal network (for communication with nova-compute service units) and the external network (for inbound/outbound network access to/from instances within the cloud). === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/openstack/bundle.yaml' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/openstack/bundle.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/openstack/bundle.yaml 1970-01-01 00:00:00 +0000 @@ -1,202 +0,0 @@ -series: precise -services: - mysql: - charm: cs:precise/mysql - constraints: mem=1G - options: - dataset-size: 50% - rabbitmq-server: - charm: cs:precise/rabbitmq-server - constraints: mem=1G - ceph: - charm: cs:precise/ceph - num_units: 3 - constraints: mem=1G - options: - monitor-count: 3 - fsid: 6547bd3e-1397-11e2-82e5-53567c8d32dc - monitor-secret: AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ== - osd-devices: /dev/vdb - osd-reformat: "yes" - ephemeral-unmount: /mnt - keystone: - charm: cs:precise/keystone - constraints: mem=1G - options: - admin-password: openstack - admin-token: ubuntutesting - openstack-dashboard: - charm: cs:precise/openstack-dashboard - constraints: mem=1G - nova-compute: - charm: cs:precise/nova-compute - num_units: 3 - constraints: mem=4G - options: - config-flags: "auto_assign_floating_ip=False" - enable-live-migration: False - virt-type: kvm - nova-cloud-controller: - charm: cs:precise/nova-cloud-controller - constraints: mem=1G - options: - network-manager: Neutron - quantum-security-groups: "yes" - neutron-gateway: - charm: cs:precise/quantum-gateway - constraints: mem=1G - cinder: - charm: cs:precise/cinder - options: - block-device: "None" - constraints": mem=1G - glance: - charm: cs:precise/glance - constraints: mem=1G - swift-proxy: - charm: cs:precise/swift-proxy - constraints: mem=1G - options: - zone-assignment: manual - replicas: 3 - use-https: 'no' - swift-hash: fdfef9d4-8b06-11e2-8ac0-531c923c8fae - swift-storage-z1: - charm: cs:precise/swift-storage - constraints: mem=1G - options: - zone: 1 - block-device: vdb - overwrite: "true" - swift-storage-z2: - charm: cs:precise/swift-storage - constraints: mem=1G - options: - zone: 2 - block-device: vdb - overwrite: "true" - swift-storage-z3: - charm: cs:precise/swift-storage - constraints: mem=1G - options: - zone: 3 - block-device: vdb - overwrite: "true" - ceilometer: - charm: cs:precise/ceilometer - constraints: mem=1G - ceilometer-agent: - charm: cs:precise/ceilometer-agent - mongodb: - charm: cs:precise/mongodb - constraints: mem=1G - heat: - charm: cs:precise/heat - constraints: mem=1G - ntp: - charm: cs:precise/ntp -relations: - - - keystone:shared-db - - mysql:shared-db - - - nova-cloud-controller:shared-db - - mysql:shared-db - - - nova-cloud-controller:amqp - - rabbitmq-server:amqp - - - nova-cloud-controller:image-service - - glance:image-service - - - nova-cloud-controller:identity-service - - keystone:identity-service - - - nova-compute:cloud-compute - - nova-cloud-controller:cloud-compute - - - nova-compute:shared-db - - mysql:shared-db - - - nova-compute:amqp - - rabbitmq-server:amqp - - - nova-compute:image-service - - glance:image-service - - - nova-compute:ceph - - ceph:client - - - glance:shared-db - - mysql:shared-db - - - glance:identity-service - - keystone:identity-service - - - glance:ceph - - ceph:client - - - glance:image-service - - cinder:image-service - - - cinder:shared-db - - mysql:shared-db - - - cinder:amqp - - rabbitmq-server:amqp - - - cinder:cinder-volume-service - - nova-cloud-controller:cinder-volume-service - - - cinder:identity-service - - keystone:identity-service - - - cinder:ceph - - ceph:client - - - neutron-gateway:shared-db - - mysql:shared-db - - - neutron-gateway:amqp - - rabbitmq-server:amqp - - - neutron-gateway:quantum-network-service - - nova-cloud-controller:quantum-network-service - - - openstack-dashboard:identity-service - - keystone:identity-service - - - swift-proxy:identity-service - - keystone:identity-service - - - swift-proxy:swift-storage - - swift-storage-z1:swift-storage - - - swift-proxy:swift-storage - - swift-storage-z2:swift-storage - - - swift-proxy:swift-storage - - swift-storage-z3:swift-storage - - - ceilometer:identity-service - - keystone:identity-service - - - ceilometer:amqp - - rabbitmq-server:amqp - - - ceilometer:shared-db - - mongodb:database - - - ceilometer-agent:nova-ceilometer - - nova-compute:nova-ceilometer - - - ceilometer-agent:ceilometer-service - - ceilometer:ceilometer-service - - - heat:identity-service - - keystone:identity-service - - - heat:shared-db - - mysql:shared-db - - - heat:amqp - - rabbitmq-server:amqp - - - ntp:juju-info - - nova-compute:juju-info - - - ntp:juju-info - - nova-cloud-controller:juju-info - - - ntp:juju-info - - neutron-gateway:juju-info - - - ntp:juju-info - - ceph:juju-info - - - ntp:juju-info - - cinder:juju-info - - - ntp:juju-info - - keystone:juju-info - - - ntp:juju-info - - glance:juju-info - - - ntp:juju-info - - swift-proxy:juju-info - - - ntp:juju-info - - swift-storage-z1:juju-info - - - ntp:juju-info - - swift-storage-z2:juju-info - - - ntp:juju-info - - swift-storage-z3:juju-info - - - ntp:juju-info - - ceilometer:juju-info - - - ntp:juju-info - - mongodb:juju-info - - - ntp:juju-info - - rabbitmq-server:juju-info - - - ntp:juju-info - - mysql:juju-info - - - ntp:juju-info - - openstack-dashboard:juju-info - - - ntp:juju-info - - heat:juju-info === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/wordpress-simple' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/wordpress-simple/README.md' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/wordpress-simple/README.md 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/wordpress-simple/README.md 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -A dummy bundle === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/wordpress-simple/bundle.yaml' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/wordpress-simple/bundle.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/wordpress-simple/bundle.yaml 1970-01-01 00:00:00 +0000 @@ -1,9 +0,0 @@ -services: - wordpress: - charm: wordpress - num_units: 1 - mysql: - charm: mysql - num_units: 1 -relations: - - ["wordpress:db", "mysql:server"] === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/wordpress-with-logging' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/wordpress-with-logging/README.md' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/wordpress-with-logging/README.md 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/wordpress-with-logging/README.md 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -A dummy bundle === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/wordpress-with-logging/bundle.yaml' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/wordpress-with-logging/bundle.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/bundle/wordpress-with-logging/bundle.yaml 1970-01-01 00:00:00 +0000 @@ -1,13 +0,0 @@ -services: - wordpress: - charm: wordpress - num_units: 1 - mysql: - charm: mysql - num_units: 1 - logging: - charm: logging -relations: - - ["wordpress:db", "mysql:server"] - - ["wordpress:juju-info", "logging:info"] - - ["mysql:juju-info", "logging:info"] === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal' === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks' === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-broken' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-broken 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-broken 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-changed' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-changed 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-changed 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-departed' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-departed 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-departed 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-joined' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-joined 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-joined 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/collect-metrics' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/collect-metrics 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/collect-metrics 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/config-changed' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/config-changed 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/config-changed 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-broken' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-broken 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-broken 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-changed' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-changed 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-changed 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-departed' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-departed 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-departed 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-joined' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-joined 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-joined 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/install' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/install 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/install 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/meter-status-changed' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/meter-status-changed 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/meter-status-changed 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/otherdata' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/otherdata 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/otherdata 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -some text === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-broken' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-broken 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-broken 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-changed' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-changed 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-changed 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-departed' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-departed 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-departed 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-joined' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-joined 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-joined 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/start' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/start 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/start 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/stop' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/stop 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/stop 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/subdir' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/subdir/stuff' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/subdir/stuff 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/subdir/stuff 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -non hook related stuff === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/upgrade-charm' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/upgrade-charm 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/hooks/upgrade-charm 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/sh -echo $0 === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/metadata.yaml' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,12 +0,0 @@ -name: all-hooks -summary: "That's a dummy charm with hook scrips for all types of hooks." -description: "This is a longer description." -provides: - foo: - interface: phony -requires: - bar: - interface: fake -peers: - self: - interface: dummy === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/revision' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/all-hooks/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -1 === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/category' === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/category/.dir' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/category/.dir/ignored' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/category/.ignored' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/category/.ignored 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/category/.ignored 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -# \ No newline at end of file === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/category/metadata.yaml' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/category/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/category/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,6 +0,0 @@ -name: categories -summary: "Sample charm with a category" -description: | - That's a boring charm that has a category. -categories: ["database"] -tags: ["openstack", "storage"] \ No newline at end of file === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy' === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/.dir' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/.dir/ignored' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/.ignored' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/.ignored 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/.ignored 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -# \ No newline at end of file === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/actions.yaml' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/actions.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/actions.yaml 1970-01-01 00:00:00 +0000 @@ -1,7 +0,0 @@ -snapshot: - description: Take a snapshot of the database. - params: - outfile: - description: The file to write out to. - type: string - default: foo.bz2 === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/build' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/build/ignored' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/config.yaml' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/config.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/config.yaml 1970-01-01 00:00:00 +0000 @@ -1,5 +0,0 @@ -options: - title: {default: My Title, description: A descriptive title used for the service., type: string} - outlook: {description: No default outlook., type: string} - username: {default: admin001, description: The name of the initial account (given admin permissions)., type: string} - skill-level: {description: A number indicating skill., type: int} === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/empty' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/empty/.gitkeep' === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/hooks' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/hooks/install' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/hooks/install 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/hooks/install 1970-01-01 00:00:00 +0000 @@ -1,2 +0,0 @@ -#!/bin/bash -echo "Done!" === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/metadata.yaml' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,5 +0,0 @@ -name: dummy -summary: "That's a dummy charm." -description: | - This is a longer description which - potentially contains multiple lines. === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/revision' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -1 \ No newline at end of file === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/src' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/src/hello.c' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/src/hello.c 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/dummy/src/hello.c 1970-01-01 00:00:00 +0000 @@ -1,7 +0,0 @@ -#include - -main() -{ - printf ("Hello World!\n"); - return 0; -} === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/logging' === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/logging/hooks' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/logging/hooks/.gitkeep' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/logging/metadata.yaml' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/logging/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/logging/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,16 +0,0 @@ -name: logging -summary: "Subordinate logging test charm" -description: | - This is a longer description which - potentially contains multiple lines. -subordinate: true -provides: - logging-client: - interface: logging -requires: - logging-directory: - interface: logging - scope: container - info: - interface: juju-info - scope: container === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/logging/revision' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/logging/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/logging/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -1 === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/mysql' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/mysql/metadata.yaml' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/mysql/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/mysql/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,5 +0,0 @@ -name: mysql -summary: "Database engine" -description: "A pretty popular database" -provides: - server: mysql === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/mysql/revision' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/mysql/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/mysql/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -1 \ No newline at end of file === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/riak' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/riak/metadata.yaml' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/riak/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/riak/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,11 +0,0 @@ -name: riak -summary: "K/V storage engine" -description: "Scalable K/V Store in Erlang with Clocks :-)" -provides: - endpoint: - interface: http - admin: - interface: http -peers: - ring: - interface: riak === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/riak/revision' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/riak/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/riak/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -7 \ No newline at end of file === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/varnish' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/varnish/metadata.yaml' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/varnish/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/varnish/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,5 +0,0 @@ -name: varnish -summary: "Database engine" -description: "Another popular database" -provides: - webcache: varnish === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/varnish/revision' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/varnish/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/varnish/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -1 \ No newline at end of file === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/wordpress' === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/wordpress/actions' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/wordpress/actions/.gitkeep' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/wordpress/config.yaml' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/wordpress/config.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/wordpress/config.yaml 1970-01-01 00:00:00 +0000 @@ -1,3 +0,0 @@ -options: - blog-title: {default: My Title, description: A descriptive title used for the blog., type: string} - === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/wordpress/hooks' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/wordpress/hooks/.gitkeep' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/wordpress/metadata.yaml' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/wordpress/metadata.yaml 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/wordpress/metadata.yaml 1970-01-01 00:00:00 +0000 @@ -1,23 +0,0 @@ -name: wordpress -summary: "Blog engine" -description: "A pretty popular blog engine" -provides: - url: - interface: http - limit: - optional: false - logging-dir: - interface: logging - scope: container - monitoring-port: - interface: monitoring - scope: container -requires: - db: - interface: mysql - limit: 1 - optional: false - cache: - interface: varnish - limit: 2 - optional: true === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/wordpress/revision' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/wordpress/revision 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm-repo/quantal/wordpress/revision 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -3 \ No newline at end of file === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm.go' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/charm.go 1970-01-01 00:00:00 +0000 @@ -1,10 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package storetesting - -import ( - "gopkg.in/juju/charm.v5/testing" -) - -var Charms = testing.NewRepo("charm-repo", "quantal") === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/elasticsearch.go' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/elasticsearch.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/elasticsearch.go 1970-01-01 00:00:00 +0000 @@ -1,76 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package storetesting - -import ( - "os" - "time" - - "github.com/juju/utils" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charmstore.v4/internal/elasticsearch" -) - -// ElasticSearchSuite defines a test suite that connects to an -// elastic-search server. The address of the server depends on the value -// of the JUJU_TEST_ELASTICSEARCH environment variable, which can be -// "none" (do not start or connect to a server) or host:port holding the -// address and port of the server to connect to. If -// JUJU_TEST_ELASTICSEARCH is not specified then localhost:9200 will be -// used. -type ElasticSearchSuite struct { - ES *elasticsearch.Database - indexes []string - TestIndex string -} - -var jujuTestElasticSearch = os.Getenv("JUJU_TEST_ELASTICSEARCH") - -func (s *ElasticSearchSuite) SetUpSuite(c *gc.C) { - serverAddr := jujuTestElasticSearch - switch serverAddr { - case "none": - c.Skip("elasticsearch disabled") - case "": - serverAddr = ":9200" - } - s.ES = &elasticsearch.Database{serverAddr} -} - -func (s *ElasticSearchSuite) TearDownSuite(c *gc.C) { -} - -func (s *ElasticSearchSuite) SetUpTest(c *gc.C) { - s.TestIndex = s.NewIndex(c) -} - -func (s *ElasticSearchSuite) TearDownTest(c *gc.C) { - for _, index := range s.indexes { - s.ES.DeleteIndex(index + "*") - s.ES.DeleteDocument(".versions", "version", index) - } - s.indexes = nil -} - -// NewIndex creates a new index name and ensures that it will be cleaned up at -// end of the test. -func (s *ElasticSearchSuite) NewIndex(c *gc.C) string { - uuid, err := utils.NewUUID() - c.Assert(err, gc.IsNil) - id := time.Now().Format("20060102") + uuid.String() - s.indexes = append(s.indexes, id) - return id -} - -// LoadESConfig loads a canned test configuration to the specified index -func (s *ElasticSearchSuite) LoadESConfig(index string, settings, mapping interface{}) error { - if err := s.ES.PutIndex(index, settings); err != nil { - return err - } - if err := s.ES.PutMapping(index, "entity", mapping); err != nil { - return err - } - return nil -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/entities.go' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/entities.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/entities.go 1970-01-01 00:00:00 +0000 @@ -1,131 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package storetesting - -import ( - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/mgo.v2" - - "gopkg.in/juju/charmstore.v4/internal/mongodoc" -) - -// EntityBuilder provides a convenient way to describe a mongodoc.Entity -// for tests that is correctly formed and contains the desired -// information. -type EntityBuilder struct { - entity *mongodoc.Entity -} - -// NewEntity creates a new EntityBuilder for the provided URL. -func NewEntity(url string) EntityBuilder { - URL := charm.MustParseReference(url) - return EntityBuilder{ - entity: &mongodoc.Entity{ - URL: URL, - Name: URL.Name, - Series: URL.Series, - Revision: URL.Revision, - User: URL.User, - BaseURL: baseURL(URL), - PromulgatedRevision: -1, - }, - } -} - -func copyURL(id *charm.Reference) *charm.Reference { - if id == nil { - return nil - } - id1 := *id - return &id1 -} - -func (b EntityBuilder) copy() EntityBuilder { - e := *b.entity - e.PromulgatedURL = copyURL(e.PromulgatedURL) - e.URL = copyURL(e.URL) - e.BaseURL = copyURL(e.BaseURL) - return EntityBuilder{&e} -} - -// WithPromulgatedURL sets the PromulgatedURL and PromulgatedRevision of the -// entity being built. -func (b EntityBuilder) WithPromulgatedURL(url string) EntityBuilder { - b = b.copy() - if url == "" { - b.entity.PromulgatedURL = nil - b.entity.PromulgatedRevision = -1 - } else { - b.entity.PromulgatedURL = charm.MustParseReference(url) - b.entity.PromulgatedRevision = b.entity.PromulgatedURL.Revision - } - return b -} - -// Build creates a mongodoc.Entity from the EntityBuilder. -func (b EntityBuilder) Build() *mongodoc.Entity { - return b.copy().entity -} - -// AssertEntity checks that db contains an entity that matches expect. -func AssertEntity(c *gc.C, db *mgo.Collection, expect *mongodoc.Entity) { - var entity mongodoc.Entity - err := db.FindId(expect.URL).One(&entity) - c.Assert(err, gc.IsNil) - c.Assert(&entity, jc.DeepEquals, expect) -} - -// BaseEntityBuilder provides a convenient way to describe a -// mongodoc.BaseEntity for tests that is correctly formed and contains the -// desired information. -type BaseEntityBuilder struct { - baseEntity *mongodoc.BaseEntity -} - -// NewBaseEntity creates a new BaseEntityBuilder for the provided URL. -func NewBaseEntity(url string) BaseEntityBuilder { - URL := charm.MustParseReference(url) - return BaseEntityBuilder{ - baseEntity: &mongodoc.BaseEntity{ - URL: URL, - Name: URL.Name, - User: URL.User, - }, - } -} - -func (b BaseEntityBuilder) copy() BaseEntityBuilder { - e := *b.baseEntity - e.URL = copyURL(e.URL) - return BaseEntityBuilder{&e} -} - -// WithPromulgated sets the promulgated flag on the BaseEntity. -func (b BaseEntityBuilder) WithPromulgated(promulgated bool) BaseEntityBuilder { - b = b.copy() - b.baseEntity.Promulgated = mongodoc.IntBool(promulgated) - return b -} - -// Build creates a mongodoc.BaseEntity from the BaseEntityBuilder. -func (b BaseEntityBuilder) Build() *mongodoc.BaseEntity { - return b.copy().baseEntity -} - -// AssertBaseEntity checks that db contains a base entity that matches expect. -func AssertBaseEntity(c *gc.C, db *mgo.Collection, expect *mongodoc.BaseEntity) { - var baseEntity mongodoc.BaseEntity - err := db.FindId(expect.URL).One(&baseEntity) - c.Assert(err, gc.IsNil) - c.Assert(&baseEntity, jc.DeepEquals, expect) -} - -func baseURL(url *charm.Reference) *charm.Reference { - baseURL := *url - baseURL.Series = "" - baseURL.Revision = -1 - return &baseURL -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/flag.go' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/flag.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/flag.go 1970-01-01 00:00:00 +0000 @@ -1,25 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package storetesting - -import ( - "flag" - "os" - - jujutesting "github.com/juju/testing" -) - -var noTestMongoJs *bool = flag.Bool("notest-mongojs", false, "Disable MongoDB tests that require JavaScript") - -func init() { - if os.Getenv("JUJU_NOTEST_MONGOJS") == "1" || jujutesting.MgoServer.WithoutV8 { - *noTestMongoJs = true - } -} - -// MongoJSEnabled reports whether testing code should run tests -// that rely on JavaScript inside MongoDB. -func MongoJSEnabled() bool { - return !*noTestMongoJs -} === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/hashtesting' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/hashtesting/hash.go' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/hashtesting/hash.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/hashtesting/hash.go 1970-01-01 00:00:00 +0000 @@ -1,60 +0,0 @@ -// Copyright 2015 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -// TODO frankban: remove this package after updating entities in the production -// db with their SHA256 hash value. Entities are updated by running the -// cshash256 command. - -package hashtesting - -import ( - "time" - - jujutesting "github.com/juju/testing" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/router" -) - -func CheckSHA256Laziness(c *gc.C, store *charmstore.Store, id *charm.Reference, check func()) { - updated := make(chan struct{}, 1) - - // Patch charmstore.UpdateEntitySHA256 so that we can know whether it has - // been called or not. - original := charmstore.UpdateEntitySHA256 - restore := jujutesting.PatchValue( - &charmstore.UpdateEntitySHA256, - func(store *charmstore.Store, id *router.ResolvedURL, sum256 string) { - original(store, id, sum256) - updated <- struct{}{} - }) - defer restore() - - // Update the entity removing the SHA256 hash. - store.DB.Entities().UpdateId(id, bson.D{{ - "$set", bson.D{{"blobhash256", ""}}, - }}) - - // Run the code under test. - check() - - // Ensure the db is updated asynchronously. - select { - case <-updated: - case <-time.After(5 * time.Second): - c.Fatalf("timed out waiting for update") - } - - // Run the code under test. again. - check() - - // We should not update the SHA256 the second time. - select { - case <-updated: - c.Fatalf("update called twice") - case <-time.After(10 * time.Millisecond): - } -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/json.go' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/json.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/json.go 1970-01-01 00:00:00 +0000 @@ -1,26 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package storetesting - -import ( - "bytes" - "encoding/json" - "io" -) - -// MustMarshalJSON marshals the specified value using json.Marshal and -// returns the corresponding byte slice. If there is an error marshalling -// the value then MustMarshalJSON will panic. -func MustMarshalJSON(v interface{}) []byte { - data, err := json.Marshal(v) - if err != nil { - panic(err) - } - return data -} - -// JSONReader creates an io.Reader which can read the Marshalled value of v. -func JSONReader(v interface{}) io.Reader { - return bytes.NewReader(MustMarshalJSON(v)) -} === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/stats' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/stats/stats.go' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/stats/stats.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/stats/stats.go 1970-01-01 00:00:00 +0000 @@ -1,54 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package stats - -import ( - "time" - - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" -) - -// CheckCounterSum checks that statistics are properly collected. -// It retries a few times as they are generally collected in background. -func CheckCounterSum(c *gc.C, store *charmstore.Store, key []string, prefix bool, expected int64) { - var sum int64 - for retry := 0; retry < 10; retry++ { - time.Sleep(100 * time.Millisecond) - req := charmstore.CounterRequest{ - Key: key, - Prefix: prefix, - } - cs, err := store.Counters(&req) - c.Assert(err, gc.IsNil) - if sum = cs[0].Count; sum == expected { - if expected == 0 && retry < 2 { - continue // Wait a bit to make sure. - } - return - } - } - c.Errorf("counter sum for %#v is %d, want %d", key, sum, expected) -} - -// CheckSearchTotalDownloads checks that the search index is properly updated. -// It retries a few times as they are generally updated in background. -func CheckSearchTotalDownloads(c *gc.C, store *charmstore.Store, id *charm.Reference, expected int64) { - var doc *charmstore.SearchDoc - for retry := 0; retry < 10; retry++ { - var err error - time.Sleep(100 * time.Millisecond) - doc, err = store.ES.GetSearchDocument(id) - c.Assert(err, gc.IsNil) - if doc.TotalDownloads == expected { - if expected == 0 && retry < 2 { - continue // Wait a bit to make sure. - } - return - } - } - c.Errorf("total downloads for %#v is %d, want %d", id, doc.TotalDownloads, expected) -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/storetesting/suite.go' --- src/gopkg.in/juju/charmstore.v4/internal/storetesting/suite.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/storetesting/suite.go 1970-01-01 00:00:00 +0000 @@ -1,64 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package storetesting - -import ( - jujutesting "github.com/juju/testing" - gc "gopkg.in/check.v1" -) - -type IsolatedMgoSuite struct { - jujutesting.IsolationSuite - jujutesting.MgoSuite -} - -func (s *IsolatedMgoSuite) SetUpSuite(c *gc.C) { - s.IsolationSuite.SetUpSuite(c) - s.MgoSuite.SetUpSuite(c) -} - -func (s *IsolatedMgoSuite) TearDownSuite(c *gc.C) { - s.MgoSuite.TearDownSuite(c) - s.IsolationSuite.TearDownSuite(c) -} - -func (s *IsolatedMgoSuite) SetUpTest(c *gc.C) { - s.IsolationSuite.SetUpTest(c) - s.MgoSuite.SetUpTest(c) -} - -func (s *IsolatedMgoSuite) TearDownTest(c *gc.C) { - s.MgoSuite.TearDownTest(c) - s.IsolationSuite.TearDownTest(c) -} - -type IsolatedMgoESSuite struct { - jujutesting.IsolationSuite - jujutesting.MgoSuite - ElasticSearchSuite -} - -func (s *IsolatedMgoESSuite) SetUpSuite(c *gc.C) { - s.IsolationSuite.SetUpSuite(c) - s.ElasticSearchSuite.SetUpSuite(c) - s.MgoSuite.SetUpSuite(c) -} - -func (s *IsolatedMgoESSuite) TearDownSuite(c *gc.C) { - s.MgoSuite.TearDownSuite(c) - s.ElasticSearchSuite.TearDownSuite(c) - s.IsolationSuite.TearDownSuite(c) -} - -func (s *IsolatedMgoESSuite) SetUpTest(c *gc.C) { - s.IsolationSuite.SetUpTest(c) - s.ElasticSearchSuite.SetUpTest(c) - s.MgoSuite.SetUpTest(c) -} - -func (s *IsolatedMgoESSuite) TearDownTest(c *gc.C) { - s.MgoSuite.TearDownTest(c) - s.ElasticSearchSuite.TearDownTest(c) - s.IsolationSuite.TearDownTest(c) -} === removed directory 'src/gopkg.in/juju/charmstore.v4/internal/v4' === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/api.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/api.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/api.go 1970-01-01 00:00:00 +0000 @@ -1,935 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4 - -import ( - "archive/zip" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/juju/loggo" - "github.com/juju/utils/jsonhttp" - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/params" -) - -var logger = loggo.GetLogger("charmstore.internal.v4") - -type Handler struct { - *router.Router - pool *charmstore.Pool - config charmstore.ServerParams - locator *bakery.PublicKeyRing -} - -const delegatableMacaroonExpiry = time.Minute - -// New returns a new instance of the v4 API handler. -func New(pool *charmstore.Pool, config charmstore.ServerParams) *Handler { - h := &Handler{ - pool: pool, - config: config, - locator: bakery.NewPublicKeyRing(), - } - - h.Router = router.New(&router.Handlers{ - Global: map[string]http.Handler{ - "changes/published": router.HandleJSON(h.serveChangesPublished), - "debug": http.HandlerFunc(h.serveDebug), - "debug/pprof/": newPprofHandler(h), - "debug/status": router.HandleJSON(h.serveDebugStatus), - "log": router.HandleErrors(h.serveLog), - "search": router.HandleJSON(h.serveSearch), - "search/interesting": http.HandlerFunc(h.serveSearchInteresting), - "stats/": router.NotFoundHandler(), - "stats/counter/": router.HandleJSON(h.serveStatsCounter), - "macaroon": router.HandleJSON(h.serveMacaroon), - "delegatable-macaroon": router.HandleJSON(h.serveDelegatableMacaroon), - }, - Id: map[string]router.IdHandler{ - "archive": h.serveArchive, - "archive/": h.resolveId(h.authId(h.serveArchiveFile)), - "diagram.svg": h.resolveId(h.authId(h.serveDiagram)), - "expand-id": h.resolveId(h.authId(h.serveExpandId)), - "icon.svg": h.resolveId(h.authId(h.serveIcon)), - "readme": h.resolveId(h.authId(h.serveReadMe)), - "resources": h.resolveId(h.authId(h.serveResources)), - "promulgate": h.resolveId(h.serveAdminPromulgate), - }, - Meta: map[string]router.BulkIncludeHandler{ - "archive-size": h.entityHandler(h.metaArchiveSize, "size"), - "archive-upload-time": h.entityHandler(h.metaArchiveUploadTime, "uploadtime"), - "bundle-machine-count": h.entityHandler(h.metaBundleMachineCount, "bundlemachinecount"), - "bundle-metadata": h.entityHandler(h.metaBundleMetadata, "bundledata"), - "bundles-containing": h.entityHandler(h.metaBundlesContaining), - "bundle-unit-count": h.entityHandler(h.metaBundleUnitCount, "bundleunitcount"), - "charm-actions": h.entityHandler(h.metaCharmActions, "charmactions"), - "charm-config": h.entityHandler(h.metaCharmConfig, "charmconfig"), - "charm-metadata": h.entityHandler(h.metaCharmMetadata, "charmmeta"), - "charm-related": h.entityHandler(h.metaCharmRelated, "charmprovidedinterfaces", "charmrequiredinterfaces"), - "extra-info": h.puttableEntityHandler( - h.metaExtraInfo, - h.putMetaExtraInfo, - "extrainfo", - ), - "extra-info/": h.puttableEntityHandler( - h.metaExtraInfoWithKey, - h.putMetaExtraInfoWithKey, - "extrainfo", - ), - "hash": h.entityHandler(h.metaHash, "blobhash"), - "hash256": h.entityHandler(h.metaHash256, "blobhash256"), - "id": h.entityHandler(h.metaId, "_id"), - "id-name": h.entityHandler(h.metaIdName, "_id"), - "id-user": h.entityHandler(h.metaIdUser, "_id"), - "id-revision": h.entityHandler(h.metaIdRevision, "_id"), - "id-series": h.entityHandler(h.metaIdSeries, "_id"), - "manifest": h.entityHandler(h.metaManifest, "blobname"), - "perm": h.puttableBaseEntityHandler(h.metaPerm, h.putMetaPerm, "acls"), - "perm/": h.puttableBaseEntityHandler(h.metaPermWithKey, h.putMetaPermWithKey, "acls"), - "promulgated": h.baseEntityHandler(h.metaPromulgated, "promulgated"), - "revision-info": router.SingleIncludeHandler(h.metaRevisionInfo), - "stats": h.entityHandler(h.metaStats), - "tags": h.entityHandler(h.metaTags, "charmmeta", "bundledata"), - - // endpoints not yet implemented: - // "color": router.SingleIncludeHandler(h.metaColor), - }, - }, h.resolveURL, h.AuthorizeEntity, h.entityExists) - return h -} - -// NewAPIHandler returns a new Handler as an http Handler. -// It is defined for the convenience of callers that require a -// charmstore.NewAPIHandlerFunc. -func NewAPIHandler(pool *charmstore.Pool, config charmstore.ServerParams) http.Handler { - return New(pool, config) -} - -func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - // When requests in this handler use router.RelativeURL, we want - // the "absolute path" there to be interpreted relative to the - // root of this handler, not the absolute root of the web server, - // which may be abitrarily many levels up. - req.RequestURI = req.URL.Path - h.Router.ServeHTTP(w, req) -} - -// ResolveURL resolves the series and revision of the given URL if either is -// unspecified by filling them out with information retrieved from the store. -func ResolveURL(store *charmstore.Store, url *charm.Reference) (*router.ResolvedURL, error) { - if url.Series != "" && url.Revision != -1 && url.User != "" { - // URL is fully specified; no need for a database lookup. - return &router.ResolvedURL{ - URL: *url, - PromulgatedRevision: -1, - }, nil - } - entity, err := store.FindBestEntity(url, "_id", "promulgated-revision") - if err != nil && errgo.Cause(err) != params.ErrNotFound { - return nil, errgo.Mask(err) - } - if errgo.Cause(err) == params.ErrNotFound { - return nil, noMatchingURLError(url) - } - if url.User == "" { - return &router.ResolvedURL{ - URL: *entity.URL, - PromulgatedRevision: entity.PromulgatedRevision, - }, nil - } - return &router.ResolvedURL{ - URL: *entity.URL, - PromulgatedRevision: -1, - }, nil -} - -func noMatchingURLError(url *charm.Reference) error { - return errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %q", url) -} - -func (h *Handler) resolveURL(url *charm.Reference) (*router.ResolvedURL, error) { - store := h.pool.Store() - defer store.Close() - return ResolveURL(store, url) -} - -type entityHandlerFunc func(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) - -type baseEntityHandlerFunc func(entity *mongodoc.BaseEntity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) - -// entityHandler returns a Handler that calls f with a *mongodoc.Entity that -// contains at least the given fields. It allows only GET requests. -func (h *Handler) entityHandler(f entityHandlerFunc, fields ...string) router.BulkIncludeHandler { - return h.puttableEntityHandler(f, nil, fields...) -} - -func (h *Handler) puttableEntityHandler(get entityHandlerFunc, handlePut router.FieldPutFunc, fields ...string) router.BulkIncludeHandler { - handleGet := func(doc interface{}, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - edoc := doc.(*mongodoc.Entity) - val, err := get(edoc, id, path, flags, req) - return val, errgo.Mask(err, errgo.Any) - } - type entityHandlerKey struct{} - return router.FieldIncludeHandler(router.FieldIncludeHandlerParams{ - Key: entityHandlerKey{}, - Query: h.entityQuery, - Fields: fields, - HandleGet: handleGet, - HandlePut: handlePut, - Update: h.updateEntity, - UpdateSearch: h.updateSearch, - }) -} - -// baseEntityHandler returns a Handler that calls f with a *mongodoc.Entity that -// contains at least the given fields. It allows only GET requests. -func (h *Handler) baseEntityHandler(f baseEntityHandlerFunc, fields ...string) router.BulkIncludeHandler { - return h.puttableBaseEntityHandler(f, nil, fields...) -} - -func (h *Handler) puttableBaseEntityHandler(get baseEntityHandlerFunc, handlePut router.FieldPutFunc, fields ...string) router.BulkIncludeHandler { - handleGet := func(doc interface{}, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - edoc := doc.(*mongodoc.BaseEntity) - val, err := get(edoc, id, path, flags, req) - return val, errgo.Mask(err, errgo.Any) - } - type baseEntityHandlerKey struct{} - return router.FieldIncludeHandler(router.FieldIncludeHandlerParams{ - Key: baseEntityHandlerKey{}, - Query: h.baseEntityQuery, - Fields: fields, - HandleGet: handleGet, - HandlePut: handlePut, - Update: h.updateBaseEntity, - UpdateSearch: h.updateSearchBase, - }) -} - -func (h *Handler) updateBaseEntity(id *router.ResolvedURL, fields map[string]interface{}) error { - store := h.pool.Store() - defer store.Close() - if err := store.UpdateBaseEntity(id, bson.D{{"$set", fields}}); err != nil { - return errgo.Notef(err, "cannot update base entity %q", id) - } - return nil -} - -func (h *Handler) updateEntity(id *router.ResolvedURL, fields map[string]interface{}) error { - store := h.pool.Store() - defer store.Close() - err := store.UpdateEntity(id, bson.D{{"$set", fields}}) - if err != nil { - return errgo.Notef(err, "cannot update %q", &id.URL) - } - err = store.UpdateSearchFields(id, fields) - if err != nil { - return errgo.Notef(err, "cannot update %q", &id.URL) - } - return nil -} - -func (h *Handler) updateSearch(id *router.ResolvedURL, fields map[string]interface{}) error { - store := h.pool.Store() - defer store.Close() - return store.UpdateSearch(id) -} - -// updateSearchBase updates the search records for all entities with -// the same base URL as the given id. -func (h *Handler) updateSearchBase(id *router.ResolvedURL, fields map[string]interface{}) error { - store := h.pool.Store() - defer store.Close() - baseURL := id.URL - baseURL.Series = "" - baseURL.Revision = -1 - if err := store.UpdateSearchBaseURL(&baseURL); err != nil { - return errgo.Mask(err) - } - return nil -} - -func (h *Handler) entityExists(id *router.ResolvedURL, req *http.Request) (bool, error) { - // TODO add http.Request to entityExists params - _, err := h.entityQuery(id, nil, req) - if errgo.Cause(err) == params.ErrNotFound { - return false, nil - } - if err != nil { - return false, errgo.Mask(err) - } - return true, nil -} - -func (h *Handler) baseEntityQuery(id *router.ResolvedURL, selector map[string]int, req *http.Request) (interface{}, error) { - fields := make([]string, 0, len(selector)) - for k, v := range selector { - if v == 0 { - continue - } - fields = append(fields, k) - } - store := h.pool.Store() - defer store.Close() - val, err := store.FindBaseEntity(&id.URL, fields...) - if errgo.Cause(err) == params.ErrNotFound { - return nil, errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %s", id) - } - if err != nil { - return nil, errgo.Mask(err) - } - return val, nil -} - -func (h *Handler) entityQuery(id *router.ResolvedURL, selector map[string]int, req *http.Request) (interface{}, error) { - store := h.pool.Store() - defer store.Close() - val, err := store.FindEntity(id, fieldsFromSelector(selector)...) - if errgo.Cause(err) == params.ErrNotFound { - return nil, errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %s", id) - } - if err != nil { - return nil, errgo.Mask(err) - } - return val, nil -} - -var ltsReleases = map[string]bool{ - "lucid": true, - "precise": true, - "trusty": true, -} - -func fieldsFromSelector(selector map[string]int) []string { - fields := make([]string, 0, len(selector)) - for k, v := range selector { - if v == 0 { - continue - } - fields = append(fields, k) - } - return fields -} - -var errNotImplemented = errgo.Newf("method not implemented") - -// GET /debug -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-debug -func (h *Handler) serveDebug(w http.ResponseWriter, req *http.Request) { - router.WriteError(w, errNotImplemented) -} - -// POST id/resources/name.stream -// https://github.com/juju/charmstore/blob/v4/docs/API.md#post-idresourcesnamestream -// -// GET id/resources/name.stream[-revision]/arch/filename -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idresourcesnamestream-revisionarchfilename -// -// PUT id/resources/[~user/]series/name.stream-revision/arch?sha256=hash -// https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idresourcesuserseriesnamestream-revisionarchsha256hash -func (h *Handler) serveResources(id *router.ResolvedURL, _ bool, w http.ResponseWriter, req *http.Request) error { - return errNotImplemented -} - -// GET id/expand-id -// https://docs.google.com/a/canonical.com/document/d/1TgRA7jW_mmXoKH3JiwBbtPvQu7WiM6XMrz1wSrhTMXw/edit#bookmark=id.4xdnvxphb2si -func (h *Handler) serveExpandId(id *router.ResolvedURL, _ bool, w http.ResponseWriter, req *http.Request) error { - baseURL := id.PreferredURL() - baseURL.Revision = -1 - baseURL.Series = "" - store := h.pool.Store() - defer store.Close() - - // baseURL now represents the base URL of the given id; - // it will be a promulgated URL iff the original URL was - // specified without a user, which will cause EntitiesQuery - // to return entities that match appropriately. - - // Retrieve all the entities with the same base URL. - q := store.EntitiesQuery(baseURL).Select(bson.D{{"_id", 1}, {"promulgated-url", 1}}) - if id.PromulgatedRevision != -1 { - q = q.Sort("-series", "-promulgated-revision") - } else { - q = q.Sort("-series", "-revision") - } - var docs []*mongodoc.Entity - err := q.All(&docs) - if err != nil && errgo.Cause(err) != mgo.ErrNotFound { - return errgo.Mask(err) - } - - // A not found error should have been already returned by the router in the - // case a partial id is provided. Here we do the same for the case when - // a fully qualified URL is provided, but no matching entities are found. - if len(docs) == 0 { - return noMatchingURLError(id.PreferredURL()) - } - - // Collect all the expanded identifiers for each entity. - response := make([]params.ExpandedId, 0, len(docs)) - for _, doc := range docs { - url := doc.PreferredURL(id.PromulgatedRevision != -1) - response = append(response, params.ExpandedId{Id: url.String()}) - } - - // Write the response in JSON format. - return jsonhttp.WriteJSON(w, http.StatusOK, response) -} - -func badRequestf(underlying error, f string, a ...interface{}) error { - err := errgo.WithCausef(underlying, params.ErrBadRequest, f, a...) - err.(*errgo.Err).SetLocation(1) - return err -} - -// GET id/meta/charm-metadata -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-metadata -func (h *Handler) metaCharmMetadata(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return entity.CharmMeta, nil -} - -// GET id/meta/bundle-metadata -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundle-metadata -func (h *Handler) metaBundleMetadata(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return entity.BundleData, nil -} - -// GET id/meta/bundle-unit-count -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundle-unit-count -func (h *Handler) metaBundleUnitCount(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return bundleCount(entity.BundleUnitCount), nil -} - -// GET id/meta/bundle-machine-count -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundle-machine-count -func (h *Handler) metaBundleMachineCount(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return bundleCount(entity.BundleMachineCount), nil -} - -func bundleCount(x *int) interface{} { - if x == nil { - return nil - } - return params.BundleCount{ - Count: *x, - } -} - -// GET id/meta/manifest -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetamanifest -func (h *Handler) metaManifest(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - store := h.pool.Store() - defer store.Close() - r, size, err := store.BlobStore.Open(entity.BlobName) - if err != nil { - return nil, errgo.Notef(err, "cannot open archive data for %s", id) - } - defer r.Close() - zipReader, err := zip.NewReader(charmstore.ReaderAtSeeker(r), size) - if err != nil { - return nil, errgo.Notef(err, "cannot read archive data for %s", id) - } - // Collect the files. - manifest := make([]params.ManifestFile, 0, len(zipReader.File)) - for _, file := range zipReader.File { - fileInfo := file.FileInfo() - if fileInfo.IsDir() { - continue - } - manifest = append(manifest, params.ManifestFile{ - Name: file.Name, - Size: fileInfo.Size(), - }) - } - return manifest, nil -} - -// GET id/meta/charm-actions -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-actions -func (h *Handler) metaCharmActions(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return entity.CharmActions, nil -} - -// GET id/meta/charm-config -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-config -func (h *Handler) metaCharmConfig(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return entity.CharmConfig, nil -} - -// GET id/meta/color -func (h *Handler) metaColor(id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return nil, errNotImplemented -} - -// GET id/meta/archive-size -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaarchive-size -func (h *Handler) metaArchiveSize(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return ¶ms.ArchiveSizeResponse{ - Size: entity.Size, - }, nil -} - -// GET id/meta/hash -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetahash -func (h *Handler) metaHash(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return ¶ms.HashResponse{ - Sum: entity.BlobHash, - }, nil -} - -// GET id/meta/hash256 -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetahash256 -func (h *Handler) metaHash256(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - // TODO frankban: remove this lazy calculation after the cshash256 - // command is run in the production db. At that point, entities - // always have their blobhash256 field populated, and there is no - // need for this lazy evaluation anymore. - if entity.BlobHash256 == "" { - store := h.pool.Store() - defer store.Close() - var err error - if entity.BlobHash256, err = store.UpdateEntitySHA256(id); err != nil { - return nil, errgo.Notef(err, "cannot retrieve the SHA256 hash for entity %s", entity.URL) - } - } - return ¶ms.HashResponse{ - Sum: entity.BlobHash256, - }, nil -} - -// GET id/meta/tags -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetatags -func (h *Handler) metaTags(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - var tags []string - switch { - case id.URL.Series == "bundle": - tags = entity.BundleData.Tags - case len(entity.CharmMeta.Tags) > 0: - // TODO only return whitelisted tags. - tags = entity.CharmMeta.Tags - default: - tags = entity.CharmMeta.Categories - } - return params.TagsResponse{ - Tags: tags, - }, nil -} - -// GET id/meta/stats/ -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetastats -func (h *Handler) metaStats(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - store := h.pool.Store() - defer store.Close() - // Retrieve the aggregated downloads count for the specific revision. - counts, countsAllRevisions, err := store.ArchiveDownloadCounts(id.PreferredURL()) - if err != nil { - return nil, errgo.Mask(err) - } - // Return the response. - return ¶ms.StatsResponse{ - ArchiveDownloadCount: counts.Total, - ArchiveDownload: params.StatsCount{ - Total: counts.Total, - Day: counts.LastDay, - Week: counts.LastWeek, - Month: counts.LastMonth, - }, - ArchiveDownloadAllRevisions: params.StatsCount{ - Total: countsAllRevisions.Total, - Day: countsAllRevisions.LastDay, - Week: countsAllRevisions.LastWeek, - Month: countsAllRevisions.LastMonth, - }, - }, nil -} - -// GET id/meta/revision-info -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetarevision-info -func (h *Handler) metaRevisionInfo(id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - searchURL := id.PreferredURL() - searchURL.Revision = -1 - - store := h.pool.Store() - defer store.Close() - q := store.EntitiesQuery(searchURL) - if id.PromulgatedRevision != -1 { - q = q.Sort("-promulgated-revision") - } else { - q = q.Sort("-revision") - } - var docs []*mongodoc.Entity - if err := q.Select(bson.D{{"_id", 1}, {"promulgated-url", 1}}).All(&docs); err != nil { - return "", errgo.Notef(err, "cannot get ids") - } - - if len(docs) == 0 { - return "", errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %s", id) - } - var response params.RevisionInfoResponse - for _, doc := range docs { - if id.PromulgatedRevision != -1 { - response.Revisions = append(response.Revisions, doc.PromulgatedURL) - } else { - response.Revisions = append(response.Revisions, doc.URL) - } - } - - // Write the response in JSON format. - return &response, nil -} - -// GET id/meta/id-user -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-user -func (h *Handler) metaIdUser(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return params.IdUserResponse{ - User: id.PreferredURL().User, - }, nil -} - -// GET id/meta/id-series -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-series -func (h *Handler) metaIdSeries(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return params.IdSeriesResponse{ - Series: id.PreferredURL().Series, - }, nil -} - -// GET id/meta/id -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid -func (h *Handler) metaId(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - u := id.PreferredURL() - return params.IdResponse{ - Id: u, - User: u.User, - Series: u.Series, - Name: u.Name, - Revision: u.Revision, - }, nil -} - -// GET id/meta/id-name -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-name -func (h *Handler) metaIdName(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return params.IdNameResponse{ - Name: id.URL.Name, - }, nil -} - -// GET id/meta/id-revision -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-revision -func (h *Handler) metaIdRevision(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return params.IdRevisionResponse{ - Revision: id.PreferredURL().Revision, - }, nil -} - -// GET id/meta/extra-info -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaextra-info -func (h *Handler) metaExtraInfo(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - // The extra-info is stored in mongo as simple byte - // slices, so convert the values to json.RawMessages - // so that the client will see the original JSON. - m := make(map[string]*json.RawMessage) - for key, val := range entity.ExtraInfo { - jmsg := json.RawMessage(val) - m[key] = &jmsg - } - return m, nil -} - -// GET id/meta/extra-info/key -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaextra-infokey -func (h *Handler) metaExtraInfoWithKey(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - path = strings.TrimPrefix(path, "/") - var data json.RawMessage = entity.ExtraInfo[path] - if len(data) == 0 { - return nil, nil - } - return &data, nil -} - -// PUT id/meta/extra-info -// https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmetaextra-info -func (h *Handler) putMetaExtraInfo(id *router.ResolvedURL, path string, val *json.RawMessage, updater *router.FieldUpdater, req *http.Request) error { - var fields map[string]*json.RawMessage - if err := json.Unmarshal(*val, &fields); err != nil { - return errgo.Notef(err, "cannot unmarshal extra info body") - } - // Check all the fields are OK before adding any fields to be updated. - for key := range fields { - if err := checkExtraInfoKey(key); err != nil { - return err - } - } - for key, val := range fields { - updater.UpdateField("extrainfo."+key, *val) - } - return nil -} - -// PUT id/meta/extra-info/key -// https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmetaextra-infokey -func (h *Handler) putMetaExtraInfoWithKey(id *router.ResolvedURL, path string, val *json.RawMessage, updater *router.FieldUpdater, req *http.Request) error { - key := strings.TrimPrefix(path, "/") - if err := checkExtraInfoKey(key); err != nil { - return err - } - updater.UpdateField("extrainfo."+key, *val) - return nil -} - -func checkExtraInfoKey(key string) error { - if strings.ContainsAny(key, "./$") { - return errgo.WithCausef(nil, params.ErrBadRequest, "bad key for extra-info") - } - return nil -} - -// GET id/meta/perm -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaperm -func (h *Handler) metaPerm(entity *mongodoc.BaseEntity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return params.PermResponse{ - Read: entity.ACLs.Read, - Write: entity.ACLs.Write, - }, nil -} - -// PUT id/meta/perm -// https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmeta -func (h *Handler) putMetaPerm(id *router.ResolvedURL, path string, val *json.RawMessage, updater *router.FieldUpdater, req *http.Request) error { - var perms params.PermRequest - if err := json.Unmarshal(*val, &perms); err != nil { - return errgo.Mask(err) - } - isPublic := false - for _, p := range perms.Read { - if p == params.Everyone { - isPublic = true - break - } - } - updater.UpdateField("acls.read", perms.Read) - updater.UpdateField("public", isPublic) - updater.UpdateField("acls.write", perms.Write) - updater.UpdateSearch() - return nil -} - -// GET id/meta/promulgated -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetapromulgated -func (h *Handler) metaPromulgated(entity *mongodoc.BaseEntity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return params.PromulgatedResponse{ - Promulgated: bool(entity.Promulgated), - }, nil -} - -// GET id/meta/perm/key -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetapermkey -func (h *Handler) metaPermWithKey(entity *mongodoc.BaseEntity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - switch path { - case "/read": - return entity.ACLs.Read, nil - case "/write": - return entity.ACLs.Write, nil - } - return nil, errgo.WithCausef(nil, params.ErrNotFound, "unknown permission") -} - -// PUT id/meta/perm/key -// https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmetapermkey -func (h *Handler) putMetaPermWithKey(id *router.ResolvedURL, path string, val *json.RawMessage, updater *router.FieldUpdater, req *http.Request) error { - var perms []string - if err := json.Unmarshal(*val, &perms); err != nil { - return errgo.Mask(err) - } - isPublic := false - for _, p := range perms { - if p == params.Everyone { - isPublic = true - break - } - } - switch path { - case "/read": - updater.UpdateField("acls.read", perms) - updater.UpdateField("public", isPublic) - updater.UpdateSearch() - return nil - case "/write": - updater.UpdateField("acls.write", perms) - return nil - } - return errgo.WithCausef(nil, params.ErrNotFound, "unknown permission") -} - -// GET id/meta/archive-upload-time -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaarchive-upload-time -func (h *Handler) metaArchiveUploadTime(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - return ¶ms.ArchiveUploadTimeResponse{ - UploadTime: entity.UploadTime.UTC(), - }, nil -} - -type PublishedResponse struct { - Id *charm.Reference - Published time.Time -} - -// GET changes/published[?limit=$count][&from=$fromdate][&to=$todate] -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-changespublished -func (h *Handler) serveChangesPublished(_ http.Header, r *http.Request) (interface{}, error) { - start, stop, err := parseDateRange(r.Form) - if err != nil { - return nil, errgo.Mask(err, errgo.Is(params.ErrBadRequest)) - } - limit := -1 - if limitStr := r.Form.Get("limit"); limitStr != "" { - limit, err = strconv.Atoi(limitStr) - if err != nil || limit <= 0 { - return nil, badRequestf(nil, "invalid 'limit' value") - } - } - var tquery bson.D - if !start.IsZero() { - tquery = make(bson.D, 0, 2) - tquery = append(tquery, bson.DocElem{ - Name: "$gte", - Value: start, - }) - } - if !stop.IsZero() { - tquery = append(tquery, bson.DocElem{ - Name: "$lte", - Value: stop, - }) - } - var findQuery bson.D - if len(tquery) > 0 { - findQuery = bson.D{{"uploadtime", tquery}} - } - store := h.pool.Store() - defer store.Close() - query := store.DB.Entities(). - Find(findQuery). - Sort("-uploadtime"). - Select(bson.D{{"_id", 1}, {"uploadtime", 1}}) - if limit != -1 { - query = query.Limit(limit) - } - - results := []params.Published{} - var entity mongodoc.Entity - for iter := query.Iter(); iter.Next(&entity); { - results = append(results, params.Published{ - Id: entity.URL, - PublishTime: entity.UploadTime.UTC(), - }) - } - return results, nil -} - -// GET /macaroon -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-macaroon -func (h *Handler) serveMacaroon(_ http.Header, _ *http.Request) (interface{}, error) { - return h.newMacaroon() -} - -// GET /delegatable-macaroon -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-delegatable-macaroon -func (h *Handler) serveDelegatableMacaroon(_ http.Header, req *http.Request) (interface{}, error) { - store := h.pool.Store() - defer store.Close() - // Note that we require authorization even though we allow - // anyone to obtain a delegatable macaroon. This means - // that we will be able to add the declared caveats to - // the returned macaroon. - auth, err := h.authorize(req, []string{params.Everyone}, true, nil) - if err != nil { - return nil, errgo.Mask(err, errgo.Any) - } - if auth.Username == "" { - return nil, errgo.WithCausef(nil, params.ErrForbidden, "delegatable macaroon is not obtainable using admin credentials") - } - // TODO propagate expiry time from macaroons in request. - m, err := store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ - checkers.DeclaredCaveat(usernameAttr, auth.Username), - checkers.TimeBeforeCaveat(time.Now().Add(delegatableMacaroonExpiry)), - }) - if err != nil { - return nil, errgo.Mask(err) - } - return m, nil -} - -// GET id/promulgate -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idpromulgate -func (h *Handler) serveAdminPromulgate(id *router.ResolvedURL, _ bool, w http.ResponseWriter, req *http.Request) error { - if _, err := h.authorize(req, []string{promulgatorsGroup}, false, id); err != nil { - return errgo.Mask(err, errgo.Any) - } - if req.Method != "PUT" { - return errgo.WithCausef(nil, params.ErrMethodNotAllowed, "%s not allowed", req.Method) - } - data, err := ioutil.ReadAll(req.Body) - if err != nil { - return errgo.Mask(err) - } - var promulgate params.PromulgateRequest - if err := json.Unmarshal(data, &promulgate); err != nil { - return errgo.WithCausef(err, params.ErrBadRequest, "") - } - store := h.pool.Store() - defer store.Close() - if err := store.SetPromulgated(id, promulgate.Promulgated); err != nil { - return errgo.Mask(err, errgo.Any) - } - return nil -} - -type resolvedIdHandler func(id *router.ResolvedURL, fullySpecified bool, w http.ResponseWriter, req *http.Request) error - -// authId returns a resolvedIdHandler that checks that the client -// is authorized to perform the HTTP request method before -// invoking f. -func (h *Handler) authId(f resolvedIdHandler) resolvedIdHandler { - return func(id *router.ResolvedURL, fullySpecified bool, w http.ResponseWriter, req *http.Request) error { - if err := h.AuthorizeEntity(id, req); err != nil { - return errgo.Mask(err, errgo.Any) - } - if err := f(id, fullySpecified, w, req); err != nil { - return errgo.Mask(err, errgo.Any) - } - return nil - } -} - -func isFullySpecified(id *charm.Reference) bool { - return id.Series != "" && id.Revision != -1 -} - -// resolveId returns an id handler that resolves any non-fully-specified -// entity ids using h.resolveURL before calling f with the resolved id. -func (h *Handler) resolveId(f resolvedIdHandler) router.IdHandler { - return func(id *charm.Reference, w http.ResponseWriter, req *http.Request) error { - rid, err := h.resolveURL(id) - if err != nil { - return errgo.Mask(err, errgo.Is(params.ErrNotFound)) - } - return f(rid, isFullySpecified(id), w, req) - } -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/api_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/api_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/api_test.go 1970-01-01 00:00:00 +0000 @@ -1,2513 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4_test - -import ( - "archive/zip" - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "time" - - jujutesting "github.com/juju/testing" - jc "github.com/juju/testing/checkers" - "github.com/juju/testing/httptesting" - gc "gopkg.in/check.v1" - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" - "gopkg.in/macaroon-bakery.v0/httpbakery" - "gopkg.in/macaroon.v1" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/elasticsearch" - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/internal/storetesting" - "gopkg.in/juju/charmstore.v4/internal/storetesting/hashtesting" - "gopkg.in/juju/charmstore.v4/internal/v4" - "gopkg.in/juju/charmstore.v4/params" -) - -var testPublicKey = bakery.PublicKey{ - bakery.Key{ - 0xf6, 0xfb, 0xcf, 0x67, 0x8c, 0x5a, 0xb6, 0x52, - 0xa9, 0x23, 0x4d, 0x7e, 0x01, 0xf5, 0x0a, 0x25, - 0xc4, 0x63, 0x69, 0x54, 0x42, 0x62, 0xaf, 0x62, - 0xbe, 0x40, 0x6a, 0x0b, 0xe2, 0x9a, 0xb0, 0x5f, - }, -} - -const ( - testUsername = "test-user" - testPassword = "test-password" -) - -var es *elasticsearch.Database = &elasticsearch.Database{"localhost:9200"} -var si *charmstore.SearchIndex = &charmstore.SearchIndex{ - Database: es, - Index: "cs", -} - -type APISuite struct { - commonSuite -} - -func (s *APISuite) SetUpSuite(c *gc.C) { - s.enableIdentity = true - s.commonSuite.SetUpSuite(c) -} - -var newResolvedURL = router.MustNewResolvedURL - -var _ = gc.Suite(&APISuite{}) - -// patchLegacyDownloadCountsEnabled sets LegacyDownloadCountsEnabled to the -// given value for the duration of the test. -// TODO (frankban): remove this function when removing the legacy counts logic. -func patchLegacyDownloadCountsEnabled(addCleanup func(jujutesting.CleanupFunc), value bool) { - original := charmstore.LegacyDownloadCountsEnabled - charmstore.LegacyDownloadCountsEnabled = value - addCleanup(func(*gc.C) { - charmstore.LegacyDownloadCountsEnabled = original - }) -} - -type metaEndpointExpectedValueGetter func(*charmstore.Store, *router.ResolvedURL) (interface{}, error) - -type metaEndpoint struct { - // name names the meta endpoint. - name string - - // exclusive specifies whether the endpoint is - // valid for charms only (charmOnly), bundles only (bundleOnly) - // or to both (zero). - exclusive int - - // get returns the expected data for the endpoint. - get metaEndpointExpectedValueGetter - - // checkURL holds one URL to sanity check data against. - checkURL *router.ResolvedURL - - // assertCheckData holds a function that will be used to check that - // the get function returns sane data for checkURL. - assertCheckData func(c *gc.C, data interface{}) -} - -const ( - charmOnly = iota + 1 - bundleOnly -) - -var metaEndpoints = []metaEndpoint{{ - name: "charm-config", - exclusive: charmOnly, - get: entityFieldGetter("CharmConfig"), - checkURL: newResolvedURL("cs:~charmers/precise/wordpress-23", 23), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data.(*charm.Config).Options["blog-title"].Default, gc.Equals, "My Title") - }, -}, { - name: "charm-metadata", - exclusive: charmOnly, - get: entityFieldGetter("CharmMeta"), - checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data.(*charm.Meta).Summary, gc.Equals, "Blog engine") - }, -}, { - name: "bundle-metadata", - exclusive: bundleOnly, - get: entityFieldGetter("BundleData"), - checkURL: newResolvedURL("cs:~charmers/bundle/wordpress-simple-42", 42), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data.(*charm.BundleData).Services["wordpress"].Charm, gc.Equals, "wordpress") - }, -}, { - name: "bundle-unit-count", - exclusive: bundleOnly, - get: entityGetter(func(entity *mongodoc.Entity) interface{} { - if entity.BundleData == nil { - return nil - } - return params.BundleCount{*entity.BundleUnitCount} - }), - checkURL: newResolvedURL("~charmers/bundle/wordpress-simple-42", 42), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data.(params.BundleCount).Count, gc.Equals, 2) - }, -}, { - name: "bundle-machine-count", - exclusive: bundleOnly, - get: entityGetter(func(entity *mongodoc.Entity) interface{} { - if entity.BundleData == nil { - return nil - } - return params.BundleCount{*entity.BundleMachineCount} - }), - checkURL: newResolvedURL("~charmers/bundle/wordpress-simple-42", 42), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data.(params.BundleCount).Count, gc.Equals, 2) - }, -}, { - name: "charm-actions", - exclusive: charmOnly, - get: entityFieldGetter("CharmActions"), - checkURL: newResolvedURL("~charmers/precise/dummy-10", 10), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data.(*charm.Actions).ActionSpecs["snapshot"].Description, gc.Equals, "Take a snapshot of the database.") - }, -}, { - name: "archive-size", - get: entityGetter(func(entity *mongodoc.Entity) interface{} { - return ¶ms.ArchiveSizeResponse{ - Size: entity.Size, - } - }), - checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), - assertCheckData: entitySizeChecker, -}, { - name: "hash", - get: entityGetter(func(entity *mongodoc.Entity) interface{} { - return ¶ms.HashResponse{ - Sum: entity.BlobHash, - } - }), - checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data.(*params.HashResponse).Sum, gc.Not(gc.Equals), "") - }, -}, { - name: "hash256", - get: entityGetter(func(entity *mongodoc.Entity) interface{} { - return ¶ms.HashResponse{ - Sum: entity.BlobHash256, - } - }), - checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data.(*params.HashResponse).Sum, gc.Not(gc.Equals), "") - }, -}, { - name: "manifest", - get: zipGetter(func(r *zip.Reader) interface{} { - var manifest []params.ManifestFile - for _, file := range r.File { - if strings.HasSuffix(file.Name, "/") { - continue - } - manifest = append(manifest, params.ManifestFile{ - Name: file.Name, - Size: int64(file.UncompressedSize64), - }) - } - return manifest - }), - checkURL: newResolvedURL("~charmers/bundle/wordpress-simple-42", 42), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data.([]params.ManifestFile), gc.Not(gc.HasLen), 0) - }, -}, { - name: "archive-upload-time", - get: entityGetter(func(entity *mongodoc.Entity) interface{} { - return ¶ms.ArchiveUploadTimeResponse{ - UploadTime: entity.UploadTime.UTC(), - } - }), - checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), - assertCheckData: func(c *gc.C, data interface{}) { - response := data.(*params.ArchiveUploadTimeResponse) - c.Assert(response.UploadTime, gc.Not(jc.Satisfies), time.Time.IsZero) - c.Assert(response.UploadTime.Location(), gc.Equals, time.UTC) - }, -}, { - name: "revision-info", - get: func(store *charmstore.Store, id *router.ResolvedURL) (interface{}, error) { - ref := &id.URL - if id.PromulgatedRevision != -1 { - ref = id.PreferredURL() - } - return params.RevisionInfoResponse{ - []*charm.Reference{ref}, - }, nil - }, - checkURL: newResolvedURL("~charmers/precise/wordpress-99", 99), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data, gc.DeepEquals, params.RevisionInfoResponse{ - []*charm.Reference{ - charm.MustParseReference("cs:precise/wordpress-99"), - }}) - }, -}, { - name: "charm-related", - exclusive: charmOnly, - get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { - // The charms we use for those tests are not related each other. - // Charm relations are independently tested in relations_test.go. - if url.URL.Series == "bundle" { - return nil, nil - } - return ¶ms.RelatedResponse{}, nil - }, - checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data, gc.FitsTypeOf, (*params.RelatedResponse)(nil)) - }, -}, { - name: "bundles-containing", - exclusive: charmOnly, - get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { - // The charms we use for those tests are not included in any bundle. - // Charm/bundle relations are tested in relations_test.go. - if url.URL.Series == "bundle" { - return nil, nil - } - return []*params.MetaAnyResponse{}, nil - }, - checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data, gc.FitsTypeOf, []*params.MetaAnyResponse(nil)) - }, -}, { - name: "stats", - get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { - // The entities used for those tests were never downloaded. - return ¶ms.StatsResponse{ - ArchiveDownloadCount: 0, - }, nil - }, - checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data, gc.FitsTypeOf, (*params.StatsResponse)(nil)) - }, -}, { - name: "extra-info", - get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { - return map[string]string{ - "key": "value " + url.URL.String(), - }, nil - }, - checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data, gc.DeepEquals, map[string]string{ - "key": "value cs:~charmers/precise/wordpress-23", - }) - }, -}, { - name: "extra-info/key", - get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { - return "value " + url.URL.String(), nil - }, - checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data, gc.Equals, "value cs:~charmers/precise/wordpress-23") - }, -}, { - name: "perm", - get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { - e, err := store.FindBaseEntity(&url.URL) - if err != nil { - return nil, err - } - return params.PermResponse{ - Read: e.ACLs.Read, - Write: e.ACLs.Write, - }, nil - }, - checkURL: newResolvedURL("~bob/utopic/wordpress-2", -1), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data, gc.DeepEquals, params.PermResponse{ - Read: []string{params.Everyone, "bob"}, - Write: []string{"bob"}, - }) - }, -}, { - name: "perm/read", - get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { - e, err := store.FindBaseEntity(&url.URL) - if err != nil { - return nil, err - } - return e.ACLs.Read, nil - }, - checkURL: newResolvedURL("cs:~bob/utopic/wordpress-2", -1), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data, gc.DeepEquals, []string{params.Everyone, "bob"}) - }, -}, { - name: "tags", - get: entityGetter(func(entity *mongodoc.Entity) interface{} { - if entity.URL.Series == "bundle" { - return params.TagsResponse{entity.BundleData.Tags} - } - if len(entity.CharmMeta.Tags) > 0 { - return params.TagsResponse{entity.CharmMeta.Tags} - } - return params.TagsResponse{entity.CharmMeta.Categories} - }), - checkURL: newResolvedURL("~charmers/utopic/category-2", 2), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data, jc.DeepEquals, params.TagsResponse{ - Tags: []string{"openstack", "storage"}, - }) - }, -}, { - name: "id-user", - get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { - return params.IdUserResponse{url.PreferredURL().User}, nil - }, - checkURL: newResolvedURL("cs:~bob/utopic/wordpress-2", -1), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data, gc.Equals, params.IdUserResponse{"bob"}) - }, -}, { - name: "id-series", - get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { - return params.IdSeriesResponse{url.URL.Series}, nil - }, - checkURL: newResolvedURL("~charmers/utopic/category-2", 2), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data, gc.Equals, params.IdSeriesResponse{"utopic"}) - }, -}, { - name: "id-name", - get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { - return params.IdNameResponse{url.URL.Name}, nil - }, - checkURL: newResolvedURL("~charmers/utopic/category-2", 2), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data, gc.Equals, params.IdNameResponse{"category"}) - }, -}, { - name: "id-revision", - get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { - return params.IdRevisionResponse{url.PreferredURL().Revision}, nil - }, - checkURL: newResolvedURL("~charmers/utopic/category-2", 2), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data, gc.Equals, params.IdRevisionResponse{2}) - }, -}, { - name: "id", - get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { - id := url.PreferredURL() - return params.IdResponse{ - Id: id, - User: id.User, - Series: id.Series, - Name: id.Name, - Revision: id.Revision, - }, nil - }, - checkURL: newResolvedURL("~charmers/utopic/category-2", 2), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data, jc.DeepEquals, params.IdResponse{ - Id: charm.MustParseReference("cs:utopic/category-2"), - User: "", - Series: "utopic", - Name: "category", - Revision: 2, - }) - }, -}, { - name: "promulgated", - get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { - e, err := store.FindBaseEntity(&url.URL) - if err != nil { - return nil, err - } - return params.PromulgatedResponse{ - Promulgated: bool(e.Promulgated), - }, nil - }, - checkURL: newResolvedURL("cs:~bob/utopic/wordpress-2", -1), - assertCheckData: func(c *gc.C, data interface{}) { - c.Assert(data, gc.Equals, params.PromulgatedResponse{Promulgated: false}) - }, -}} - -// TestEndpointGet tries to ensure that the endpoint -// test data getters correspond with reality. -func (s *APISuite) TestEndpointGet(c *gc.C) { - s.addTestEntities(c) - for i, ep := range metaEndpoints { - c.Logf("test %d: %s\n", i, ep.name) - data, err := ep.get(s.store, ep.checkURL) - c.Assert(err, gc.IsNil) - ep.assertCheckData(c, data) - } -} - -func (s *APISuite) TestAllMetaEndpointsTested(c *gc.C) { - // Make sure that we're testing all the metadata - // endpoints that we need to. - s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-23", 23)) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("precise/wordpress-23/meta"), - }) - c.Logf("meta response body: %s", rec.Body) - var list []string - err := json.Unmarshal(rec.Body.Bytes(), &list) - c.Assert(err, gc.IsNil) - - listNames := make(map[string]bool) - for _, name := range list { - c.Assert(listNames[name], gc.Equals, false, gc.Commentf("name %s", name)) - listNames[name] = true - } - - testNames := make(map[string]bool) - for _, test := range metaEndpoints { - if strings.Contains(test.name, "/") { - continue - } - testNames[test.name] = true - } - c.Assert(testNames, jc.DeepEquals, listNames) -} - -var testEntities = []*router.ResolvedURL{ - // A stock charm. - newResolvedURL("cs:~charmers/precise/wordpress-23", 23), - // A stock bundle. - newResolvedURL("cs:~charmers/bundle/wordpress-simple-42", 42), - // A charm with some actions. - newResolvedURL("cs:~charmers/precise/dummy-10", 10), - // A charm with some tags. - newResolvedURL("cs:~charmers/utopic/category-2", 2), - // A charm with a different user. - newResolvedURL("cs:~bob/utopic/wordpress-2", -1), -} - -func (s *APISuite) addTestEntities(c *gc.C) []*router.ResolvedURL { - for _, e := range testEntities { - if e.URL.Series == "bundle" { - s.addPublicBundle(c, e.URL.Name, e) - } else { - s.addPublicCharm(c, e.URL.Name, e) - } - // Associate some extra-info data with the entity. - key := e.URL.Path() + "/meta/extra-info/key" - s.assertPut(c, key, "value "+e.URL.String()) - } - return testEntities -} - -func (s *APISuite) TestMetaEndpointsSingle(c *gc.C) { - urls := s.addTestEntities(c) - for i, ep := range metaEndpoints { - c.Logf("test %d. %s", i, ep.name) - tested := false - for _, url := range urls { - charmId := strings.TrimPrefix(url.String(), "cs:") - path := charmId + "/meta/" + ep.name - expectData, err := ep.get(s.store, url) - c.Assert(err, gc.IsNil) - c.Logf(" expected data for %q: %#v", url, expectData) - if isNull(expectData) { - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(path), - ExpectStatus: http.StatusNotFound, - ExpectBody: params.Error{ - Message: params.ErrMetadataNotFound.Error(), - Code: params.ErrMetadataNotFound, - }, - }) - continue - } - tested = true - s.assertGet(c, path, expectData) - } - if !tested { - c.Errorf("endpoint %q is null for all endpoints, so is not properly tested", ep.name) - } - } -} - -func (s *APISuite) TestMetaPerm(c *gc.C) { - // Create a charm store server that will use the test third party for - // its third party caveat. - s.discharge = dischargeForUser("bob") - - s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-23", 23)) - s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-24", 24)) - s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/trusty/wordpress-1", 1)) - s.assertGet(c, "wordpress/meta/perm", params.PermResponse{ - Read: []string{params.Everyone, "charmers"}, - Write: []string{"charmers"}, - }) - e, err := s.store.FindBaseEntity(charm.MustParseReference("precise/wordpress-23")) - c.Assert(err, gc.IsNil) - c.Assert(e.ACLs.Read, gc.DeepEquals, []string{params.Everyone, "charmers"}) - - // Change the read perms to only include a specific user and the write - // perms to include an "admin" user. - s.assertPut(c, "precise/wordpress-23/meta/perm/read", []string{"bob"}) - s.assertPut(c, "precise/wordpress-23/meta/perm/write", []string{"admin"}) - - // Check that the perms have changed for all revisions and series. - for i, u := range []string{"precise/wordpress-23", "precise/wordpress-24", "trusty/wordpress-1"} { - c.Logf("id %d: %q", i, u) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - Do: bakeryDo(nil), - URL: storeURL(u + "/meta/perm"), - ExpectBody: params.PermResponse{ - Read: []string{"bob"}, - Write: []string{"admin"}, - }, - }) - } - e, err = s.store.FindBaseEntity(charm.MustParseReference("precise/wordpress-23")) - c.Assert(err, gc.IsNil) - c.Assert(e.Public, jc.IsFalse) - c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{ - Read: []string{"bob"}, - Write: []string{"admin"}, - }) - - // Try restoring everyone's read permission. - s.assertPut(c, "wordpress/meta/perm/read", []string{"bob", params.Everyone}) - s.assertGet(c, "wordpress/meta/perm", params.PermResponse{ - Read: []string{"bob", params.Everyone}, - Write: []string{"admin"}, - }) - s.assertGet(c, "wordpress/meta/perm/read", []string{"bob", params.Everyone}) - e, err = s.store.FindBaseEntity(charm.MustParseReference("precise/wordpress-23")) - c.Assert(err, gc.IsNil) - c.Assert(e.Public, jc.IsTrue) - c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{ - Read: []string{"bob", params.Everyone}, - Write: []string{"admin"}, - }) - - // Try deleting all permissions. - s.assertPut(c, "wordpress/meta/perm/read", []string{}) - s.assertPut(c, "wordpress/meta/perm/write", []string{}) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - Do: bakeryDo(nil), - URL: storeURL("wordpress/meta/perm"), - ExpectStatus: http.StatusUnauthorized, - ExpectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: `unauthorized: access denied for user "bob"`, - }, - }) - e, err = s.store.FindBaseEntity(charm.MustParseReference("precise/wordpress-23")) - c.Assert(err, gc.IsNil) - c.Assert(e.Public, jc.IsFalse) - c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{}) - c.Assert(e.ACLs.Read, gc.DeepEquals, []string{}) - - // Try setting all permissions in one request - s.assertPut(c, "wordpress/meta/perm", params.PermRequest{ - Read: []string{"bob"}, - Write: []string{"admin"}, - }) - e, err = s.store.FindBaseEntity(charm.MustParseReference("precise/wordpress-23")) - c.Assert(err, gc.IsNil) - c.Assert(e.Public, jc.IsFalse) - c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{ - Read: []string{"bob"}, - Write: []string{"admin"}, - }) - - // Try only read permissions to meta/perm endpoint - var readRequest = struct { - Read []string - }{Read: []string{"joe"}} - s.assertPut(c, "wordpress/meta/perm", readRequest) - e, err = s.store.FindBaseEntity(charm.MustParseReference("precise/wordpress-23")) - c.Assert(err, gc.IsNil) - c.Assert(e.Public, jc.IsFalse) - c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{ - Read: []string{"joe"}, - Write: []string{}, - }) -} - -func (s *APISuite) TestMetaPermPutUnauthorized(c *gc.C) { - s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/utopic/wordpress-23", 23)) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.noMacaroonSrv, - URL: storeURL("~charmers/precise/wordpress-23/meta/perm/read"), - Method: "PUT", - Header: http.Header{ - "Content-Type": {"application/json"}, - }, - Body: strings.NewReader(`["some-user"]`), - ExpectStatus: http.StatusUnauthorized, - ExpectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: "authentication failed: missing HTTP auth header", - }, - }) -} - -func (s *APISuite) TestExtraInfo(c *gc.C) { - id := "precise/wordpress-23" - s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/"+id, 23)) - - // Add one value and check that it's there. - s.assertPut(c, id+"/meta/extra-info/foo", "fooval") - s.assertGet(c, id+"/meta/extra-info/foo", "fooval") - s.assertGet(c, id+"/meta/extra-info", map[string]string{ - "foo": "fooval", - }) - - // Add another value and check that both values are there. - s.assertPut(c, id+"/meta/extra-info/bar", "barval") - s.assertGet(c, id+"/meta/extra-info/bar", "barval") - s.assertGet(c, id+"/meta/extra-info", map[string]string{ - "foo": "fooval", - "bar": "barval", - }) - - // Overwrite a value and check that it's changed. - s.assertPut(c, id+"/meta/extra-info/foo", "fooval2") - s.assertGet(c, id+"/meta/extra-info/foo", "fooval2") - s.assertGet(c, id+"/meta/extra-info", map[string]string{ - "foo": "fooval2", - "bar": "barval", - }) - - // Write several values at once. - s.assertPut(c, id+"/meta/any", params.MetaAnyResponse{ - Meta: map[string]interface{}{ - "extra-info": map[string]string{ - "foo": "fooval3", - "baz": "bazval", - }, - "extra-info/frob": []int{1, 4, 6}, - }, - }) - s.assertGet(c, id+"/meta/extra-info", map[string]interface{}{ - "foo": "fooval3", - "baz": "bazval", - "bar": "barval", - "frob": []int{1, 4, 6}, - }) -} - -var extraInfoBadPutRequestsTests = []struct { - about string - path string - body interface{} - contentType string - expectStatus int - expectBody params.Error -}{{ - about: "key with extra element", - path: "precise/wordpress-23/meta/extra-info/foo/bar", - body: "hello", - expectStatus: http.StatusBadRequest, - expectBody: params.Error{ - Code: params.ErrBadRequest, - Message: "bad key for extra-info", - }, -}, { - about: "key with a dot", - path: "precise/wordpress-23/meta/extra-info/foo.bar", - body: "hello", - expectStatus: http.StatusBadRequest, - expectBody: params.Error{ - Code: params.ErrBadRequest, - Message: "bad key for extra-info", - }, -}, { - about: "key with a dollar", - path: "precise/wordpress-23/meta/extra-info/foo$bar", - body: "hello", - expectStatus: http.StatusBadRequest, - expectBody: params.Error{ - Code: params.ErrBadRequest, - Message: "bad key for extra-info", - }, -}, { - about: "multi key with extra element", - path: "precise/wordpress-23/meta/extra-info", - body: map[string]string{ - "foo/bar": "value", - }, - expectStatus: http.StatusBadRequest, - expectBody: params.Error{ - Code: params.ErrBadRequest, - Message: "bad key for extra-info", - }, -}, { - about: "multi key with dot", - path: "precise/wordpress-23/meta/extra-info", - body: map[string]string{ - ".bar": "value", - }, - expectStatus: http.StatusBadRequest, - expectBody: params.Error{ - Code: params.ErrBadRequest, - Message: "bad key for extra-info", - }, -}, { - about: "multi key with dollar", - path: "precise/wordpress-23/meta/extra-info", - body: map[string]string{ - "$bar": "value", - }, - expectStatus: http.StatusBadRequest, - expectBody: params.Error{ - Code: params.ErrBadRequest, - Message: "bad key for extra-info", - }, -}, { - about: "multi key with bad map", - path: "precise/wordpress-23/meta/extra-info", - body: "bad", - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Message: `cannot unmarshal extra info body: json: cannot unmarshal string into Go value of type map[string]*json.RawMessage`, - }, -}} - -func (s *APISuite) TestExtraInfoBadPutRequests(c *gc.C) { - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) - for i, test := range extraInfoBadPutRequestsTests { - c.Logf("test %d: %s", i, test.about) - contentType := test.contentType - if contentType == "" { - contentType = "application/json" - } - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(test.path), - Method: "PUT", - Header: http.Header{ - "Content-Type": {contentType}, - }, - Username: testUsername, - Password: testPassword, - Body: strings.NewReader(mustMarshalJSON(test.body)), - ExpectStatus: test.expectStatus, - ExpectBody: test.expectBody, - }) - } -} - -func (s *APISuite) TestExtraInfoPutUnauthorized(c *gc.C) { - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("precise/wordpress-23/meta/extra-info"), - Method: "PUT", - Header: http.Header{ - "Content-Type": {"application/json"}, - }, - Body: strings.NewReader(mustMarshalJSON(map[string]string{ - "bar": "value", - })), - ExpectStatus: http.StatusProxyAuthRequired, - ExpectBody: dischargeRequiredBody, - }) -} - -func isNull(v interface{}) bool { - data, err := json.Marshal(v) - if err != nil { - panic(err) - } - return string(data) == "null" -} - -func (s *APISuite) TestMetaEndpointsAny(c *gc.C) { - rurls := s.addTestEntities(c) - // We check the meta endpoint for both promulgated and non-promulgated - // versions of each URL. - urls := make([]*router.ResolvedURL, 0, len(rurls)*2) - for _, rurl := range rurls { - urls = append(urls, rurl) - if rurl.PromulgatedRevision != -1 { - rurl1 := *rurl - rurl1.PromulgatedRevision = -1 - urls = append(urls, &rurl1) - } - } - for _, url := range urls { - charmId := strings.TrimPrefix(url.String(), "cs:") - var flags []string - expectData := params.MetaAnyResponse{ - Id: url.PreferredURL(), - Meta: make(map[string]interface{}), - } - for _, ep := range metaEndpoints { - flags = append(flags, "include="+ep.name) - isBundle := url.URL.Series == "bundle" - if ep.exclusive != 0 && isBundle != (ep.exclusive == bundleOnly) { - // endpoint not relevant. - continue - } - val, err := ep.get(s.store, url) - c.Assert(err, gc.IsNil) - if val != nil { - expectData.Meta[ep.name] = val - } - } - s.assertGet(c, charmId+"/meta/any?"+strings.Join(flags, "&"), expectData) - } -} - -func (s *APISuite) TestMetaAnyWithNoIncludesAndNoEntity(c *gc.C) { - wordpressURL, _ := s.addPublicCharm( - c, - "wordpress", - newResolvedURL("cs:~charmers/precise/wordpress-23", 23), - ) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("precise/wordpress-1/meta/any"), - ExpectStatus: http.StatusNotFound, - ExpectBody: params.Error{ - Code: params.ErrNotFound, - Message: `no matching charm or bundle for "cs:precise/wordpress-1"`, - }, - }) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("meta/any?id=precise/wordpress-23&id=precise/wordpress-1"), - ExpectStatus: http.StatusOK, - ExpectBody: map[string]interface{}{ - "precise/wordpress-23": params.MetaAnyResponse{ - Id: wordpressURL.PreferredURL(), - }, - }, - }) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("precise/wordpress-23/meta/any"), - ExpectStatus: http.StatusOK, - ExpectBody: params.MetaAnyResponse{ - Id: wordpressURL.PreferredURL(), - }, - }) -} - -// In this test we rely on the charm.v2 testing repo package and -// dummy charm that has actions included. -func (s *APISuite) TestMetaCharmActions(c *gc.C) { - url, dummy := s.addPublicCharm(c, "dummy", newResolvedURL("cs:~charmers/precise/dummy-10", 10)) - s.assertGet(c, "precise/dummy-10/meta/charm-actions", dummy.Actions()) - s.assertGet(c, "precise/dummy-10/meta/any?include=charm-actions", - params.MetaAnyResponse{ - Id: url.PreferredURL(), - Meta: map[string]interface{}{ - "charm-actions": dummy.Actions(), - }, - }, - ) -} - -func (s *APISuite) TestBulkMeta(c *gc.C) { - // We choose an arbitrary set of ids and metadata here, just to smoke-test - // whether the meta/any logic is hooked up correctly. - // Detailed tests for this feature are in the router package. - - _, wordpress := s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) - _, mysql := s.addPublicCharm(c, "mysql", newResolvedURL("cs:~charmers/precise/mysql-10", 10)) - s.assertGet(c, - "meta/charm-metadata?id=precise/wordpress-23&id=precise/mysql-10", - map[string]*charm.Meta{ - "precise/wordpress-23": wordpress.Meta(), - "precise/mysql-10": mysql.Meta(), - }, - ) -} - -func (s *APISuite) TestBulkMetaAny(c *gc.C) { - // We choose an arbitrary set of metadata here, just to smoke-test - // whether the meta/any logic is hooked up correctly. - // Detailed tests for this feature are in the router package. - - wordpressURL, wordpress := s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) - mysqlURL, mysql := s.addPublicCharm(c, "mysql", newResolvedURL("cs:~charmers/precise/mysql-10", 10)) - s.assertGet(c, - "meta/any?include=charm-metadata&include=charm-config&id=precise/wordpress-23&id=precise/mysql-10", - map[string]params.MetaAnyResponse{ - "precise/wordpress-23": { - Id: wordpressURL.PreferredURL(), - Meta: map[string]interface{}{ - "charm-config": wordpress.Config(), - "charm-metadata": wordpress.Meta(), - }, - }, - "precise/mysql-10": { - Id: mysqlURL.PreferredURL(), - Meta: map[string]interface{}{ - "charm-config": mysql.Config(), - "charm-metadata": mysql.Meta(), - }, - }, - }, - ) -} - -var metaCharmTagsTests = []struct { - about string - tags []string - categories []string - expectTags []string -}{{ - about: "tags only", - tags: []string{"foo", "bar"}, - expectTags: []string{"foo", "bar"}, -}, { - about: "categories only", - categories: []string{"foo", "bar"}, - expectTags: []string{"foo", "bar"}, -}, { - about: "tags and categories", - categories: []string{"foo", "bar"}, - tags: []string{"tag1", "tag2"}, - expectTags: []string{"tag1", "tag2"}, -}, { - about: "no tags or categories", -}} - -func (s *APISuite) TestMetaCharmTags(c *gc.C) { - url := newResolvedURL("~charmers/precise/wordpress-0", -1) - for i, test := range metaCharmTagsTests { - c.Logf("%d: %s", i, test.about) - wordpress := storetesting.Charms.CharmDir("wordpress") - meta := wordpress.Meta() - meta.Tags, meta.Categories = test.tags, test.categories - url.URL.Revision = i - err := s.store.AddCharm(&testMetaCharm{ - meta: meta, - Charm: wordpress, - }, charmstore.AddParams{ - URL: url, - BlobName: "no-such-name", - BlobHash: fakeBlobHash, - BlobSize: fakeBlobSize, - }) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) - c.Assert(err, gc.IsNil) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(url.URL.Path() + "/meta/tags"), - ExpectStatus: http.StatusOK, - ExpectBody: params.TagsResponse{test.expectTags}, - }) - } -} - -func (s *APISuite) TestPromulgatedMetaCharmTags(c *gc.C) { - url := newResolvedURL("~charmers/precise/wordpress-0", 0) - for i, test := range metaCharmTagsTests { - c.Logf("%d: %s", i, test.about) - wordpress := storetesting.Charms.CharmDir("wordpress") - meta := wordpress.Meta() - meta.Tags, meta.Categories = test.tags, test.categories - url.URL.Revision = i - url.PromulgatedRevision = i - err := s.store.AddCharm(&testMetaCharm{ - meta: meta, - Charm: wordpress, - }, charmstore.AddParams{ - URL: url, - BlobName: "no-such-name", - BlobHash: fakeBlobHash, - BlobSize: fakeBlobSize, - }) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) - c.Assert(err, gc.IsNil) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(url.PromulgatedURL().Path() + "/meta/tags"), - ExpectStatus: http.StatusOK, - ExpectBody: params.TagsResponse{test.expectTags}, - }) - } -} - -func (s *APISuite) TestBundleTags(c *gc.C) { - b := storetesting.Charms.BundleDir("wordpress-simple") - url := newResolvedURL("~charmers/bundle/wordpress-2", -1) - data := b.Data() - data.Tags = []string{"foo", "bar"} - err := s.store.AddBundle(&testingBundle{data}, charmstore.AddParams{ - URL: url, - BlobName: "no-such-name", - BlobHash: fakeBlobHash, - BlobSize: fakeBlobSize, - }) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) - c.Assert(err, gc.IsNil) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(url.URL.Path() + "/meta/tags"), - ExpectStatus: http.StatusOK, - ExpectBody: params.TagsResponse{[]string{"foo", "bar"}}, - }) -} - -func (s *APISuite) TestPromulgatedBundleTags(c *gc.C) { - b := storetesting.Charms.BundleDir("wordpress-simple") - url := newResolvedURL("~charmers/bundle/wordpress-2", 2) - data := b.Data() - data.Tags = []string{"foo", "bar"} - err := s.store.AddBundle(&testingBundle{data}, charmstore.AddParams{ - URL: url, - BlobName: "no-such-name", - BlobHash: fakeBlobHash, - BlobSize: fakeBlobSize, - }) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) - c.Assert(err, gc.IsNil) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(url.PromulgatedURL().Path() + "/meta/tags"), - ExpectStatus: http.StatusOK, - ExpectBody: params.TagsResponse{[]string{"foo", "bar"}}, - }) -} - -type testMetaCharm struct { - meta *charm.Meta - charm.Charm -} - -func (c *testMetaCharm) Meta() *charm.Meta { - return c.meta -} - -func (s *APISuite) TestIdsAreResolved(c *gc.C) { - // This is just testing that ResolveURL is actually - // passed to the router. Given how Router is - // defined, and the ResolveURL tests, this should - // be sufficient to "join the dots". - _, wordpress := s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) - s.assertGet(c, "wordpress/meta/charm-metadata", wordpress.Meta()) -} - -func (s *APISuite) TestMetaCharmNotFound(c *gc.C) { - for i, ep := range metaEndpoints { - c.Logf("test %d: %s", i, ep.name) - expected := params.Error{ - Message: `no matching charm or bundle for "cs:precise/wordpress-23"`, - Code: params.ErrNotFound, - } - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("precise/wordpress-23/meta/" + ep.name), - ExpectStatus: http.StatusNotFound, - ExpectBody: expected, - }) - expected.Message = `no matching charm or bundle for "cs:wordpress"` - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("wordpress/meta/" + ep.name), - ExpectStatus: http.StatusNotFound, - ExpectBody: expected, - }) - } -} - -var resolveURLTests = []struct { - url string - expect *router.ResolvedURL - notFound bool -}{{ - url: "wordpress", - expect: newResolvedURL("cs:~charmers/trusty/wordpress-25", 25), -}, { - url: "precise/wordpress", - expect: newResolvedURL("cs:~charmers/precise/wordpress-24", 24), -}, { - url: "utopic/bigdata", - expect: newResolvedURL("cs:~charmers/utopic/bigdata-10", 10), -}, { - url: "~charmers/precise/wordpress", - expect: newResolvedURL("cs:~charmers/precise/wordpress-24", -1), -}, { - url: "~charmers/precise/wordpress-99", - expect: newResolvedURL("cs:~charmers/precise/wordpress-99", -1), -}, { - url: "~charmers/wordpress", - expect: newResolvedURL("cs:~charmers/trusty/wordpress-25", -1), -}, { - url: "~charmers/wordpress-24", - expect: newResolvedURL("cs:~charmers/trusty/wordpress-24", -1), -}, { - url: "~bob/wordpress", - expect: newResolvedURL("cs:~bob/trusty/wordpress-1", -1), -}, { - url: "~bob/precise/wordpress", - expect: newResolvedURL("cs:~bob/precise/wordpress-2", -1), -}, { - url: "bigdata", - expect: newResolvedURL("cs:~charmers/utopic/bigdata-10", 10), -}, { - url: "wordpress-24", - expect: newResolvedURL("cs:~charmers/trusty/wordpress-24", 24), -}, { - url: "bundlelovin", - expect: newResolvedURL("cs:~charmers/bundle/bundlelovin-10", 10), -}, { - url: "wordpress-26", - notFound: true, -}, { - url: "foo", - notFound: true, -}, { - url: "trusty/bigdata", - notFound: true, -}} - -func (s *APISuite) TestResolveURL(c *gc.C) { - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-24", 24)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-24", 24)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-25", 25)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/utopic/wordpress-10", 10)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/saucy/bigdata-99", 99)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/utopic/bigdata-10", 10)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/trusty/wordpress-1", -1)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/precise/wordpress-2", -1)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/precise/other-2", -1)) - s.addPublicBundle(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/bundlelovin-10", 10)) - s.addPublicBundle(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/wordpress-simple-10", 10)) - - for i, test := range resolveURLTests { - c.Logf("test %d: %s", i, test.url) - url := charm.MustParseReference(test.url) - rurl, err := v4.ResolveURL(s.store, url) - if test.notFound { - c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) - c.Assert(err, gc.ErrorMatches, `no matching charm or bundle for ".*"`) - c.Assert(rurl, gc.IsNil) - continue - } - c.Assert(err, gc.IsNil) - c.Assert(rurl, jc.DeepEquals, test.expect) - } -} - -var serveExpandIdTests = []struct { - about string - url string - expect []params.ExpandedId - err string -}{{ - about: "fully qualified URL", - url: "~charmers/trusty/wordpress-47", - expect: []params.ExpandedId{ - {Id: "cs:~charmers/utopic/wordpress-42"}, - {Id: "cs:~charmers/trusty/wordpress-47"}, - }, -}, { - about: "fully qualified URL that does not exist", - url: "~charmers/trusty/wordpress-99", - expect: []params.ExpandedId{ - {Id: "cs:~charmers/utopic/wordpress-42"}, - {Id: "cs:~charmers/trusty/wordpress-47"}, - }, -}, { - about: "partial URL", - url: "haproxy", - expect: []params.ExpandedId{ - {Id: "cs:trusty/haproxy-1"}, - {Id: "cs:precise/haproxy-1"}, - }, -}, { - about: "single result", - url: "mongo-0", - expect: []params.ExpandedId{ - {Id: "cs:bundle/mongo-0"}, - }, -}, { - about: "fully qualified URL with no entities found", - url: "~charmers/precise/no-such-42", - err: `entity "cs:~charmers/precise/no-such-42" not found`, -}, { - about: "partial URL with no entities found", - url: "no-such", - err: `no matching charm or bundle for "cs:no-such"`, -}} - -func (s *APISuite) TestServeExpandId(c *gc.C) { - // Add a bunch of entities in the database. - // Note that expand-id only cares about entity identifiers, - // so it is ok to reuse the same charm for all the entities. - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/utopic/wordpress-42", 42)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-47", 47)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/haproxy-1", 1)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/haproxy-1", 1)) - s.addPublicBundle(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/mongo-0", 0)) - s.addPublicBundle(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/wordpress-simple-0", 0)) - - for i, test := range serveExpandIdTests { - c.Logf("test %d: %s", i, test.about) - storeURL := storeURL(test.url + "/expand-id") - var expectStatus int - var expectBody interface{} - if test.err == "" { - expectStatus = http.StatusOK - expectBody = test.expect - } else { - expectStatus = http.StatusNotFound - expectBody = params.Error{ - Code: params.ErrNotFound, - Message: test.err, - } - } - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL, - ExpectStatus: expectStatus, - ExpectBody: expectBody, - }) - } -} - -var serveMetaRevisionInfoTests = []struct { - about string - url string - expect params.RevisionInfoResponse - err string -}{{ - about: "fully qualified url", - url: "trusty/wordpress-42", - expect: params.RevisionInfoResponse{ - []*charm.Reference{ - charm.MustParseReference("cs:trusty/wordpress-43"), - charm.MustParseReference("cs:trusty/wordpress-42"), - charm.MustParseReference("cs:trusty/wordpress-41"), - charm.MustParseReference("cs:trusty/wordpress-9"), - }}, -}, { - about: "partial url uses a default series", - url: "wordpress", - expect: params.RevisionInfoResponse{ - []*charm.Reference{ - charm.MustParseReference("cs:trusty/wordpress-43"), - charm.MustParseReference("cs:trusty/wordpress-42"), - charm.MustParseReference("cs:trusty/wordpress-41"), - charm.MustParseReference("cs:trusty/wordpress-9"), - }}, -}, { - about: "non-promulgated URL gives non-promulgated revisions (~charmers)", - url: "~charmers/trusty/cinder", - expect: params.RevisionInfoResponse{ - []*charm.Reference{ - charm.MustParseReference("cs:~charmers/trusty/cinder-6"), - charm.MustParseReference("cs:~charmers/trusty/cinder-5"), - charm.MustParseReference("cs:~charmers/trusty/cinder-4"), - charm.MustParseReference("cs:~charmers/trusty/cinder-3"), - charm.MustParseReference("cs:~charmers/trusty/cinder-2"), - charm.MustParseReference("cs:~charmers/trusty/cinder-1"), - charm.MustParseReference("cs:~charmers/trusty/cinder-0"), - }}, -}, { - about: "non-promulgated URL gives non-promulgated revisions (~openstack-charmers)", - url: "~openstack-charmers/trusty/cinder", - expect: params.RevisionInfoResponse{ - []*charm.Reference{ - charm.MustParseReference("cs:~openstack-charmers/trusty/cinder-1"), - charm.MustParseReference("cs:~openstack-charmers/trusty/cinder-0"), - }}, -}, { - about: "promulgated URL gives promulgated revisions", - url: "trusty/cinder", - expect: params.RevisionInfoResponse{ - []*charm.Reference{ - charm.MustParseReference("cs:trusty/cinder-5"), - charm.MustParseReference("cs:trusty/cinder-4"), - charm.MustParseReference("cs:trusty/cinder-3"), - charm.MustParseReference("cs:trusty/cinder-2"), - charm.MustParseReference("cs:trusty/cinder-1"), - charm.MustParseReference("cs:trusty/cinder-0"), - }}, -}, { - about: "no entities found", - url: "precise/no-such-33", - err: `no matching charm or bundle for "cs:precise/no-such-33"`, -}} - -func (s *APISuite) TestServeMetaRevisionInfo(c *gc.C) { - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mysql-41", 41)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mysql-42", 42)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-41", 41)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-42", 42)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-43", 43)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-9", 9)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-42", 42)) - - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-0", -1)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-1", -1)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-2", 0)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-3", 1)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~openstack-charmers/trusty/cinder-0", 2)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~openstack-charmers/trusty/cinder-1", 3)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-4", -1)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-5", 4)) - s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-6", 5)) - - for i, test := range serveMetaRevisionInfoTests { - c.Logf("test %d: %s", i, test.about) - storeURL := storeURL(test.url + "/meta/revision-info") - var expectStatus int - var expectBody interface{} - if test.err == "" { - expectStatus = http.StatusOK - expectBody = test.expect - } else { - expectStatus = http.StatusNotFound - expectBody = params.Error{ - Code: params.ErrNotFound, - Message: test.err, - } - } - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL, - ExpectStatus: expectStatus, - ExpectBody: expectBody, - }) - } -} - -var metaStatsTests = []struct { - // about describes the test. - about string - // url is the entity id to use when making the meta/stats request. - url string - // downloads maps entity ids to a numeric key/value pair where the key is - // the number of days in the past when the entity was downloaded and the - // value is the number of downloads performed that day. - downloads map[string]map[int]int - // expectResponse is the expected response from the meta/stats endpoint. - expectResponse params.StatsResponse -}{{ - about: "no downloads", - url: "trusty/mysql-0", - downloads: map[string]map[int]int{"trusty/mysql-0": {}}, -}, { - about: "single download", - url: "utopic/django-42", - downloads: map[string]map[int]int{ - "utopic/django-42": {0: 1}, - }, - expectResponse: params.StatsResponse{ - ArchiveDownloadCount: 1, - ArchiveDownload: params.StatsCount{ - Total: 1, - Day: 1, - Week: 1, - Month: 1, - }, - ArchiveDownloadAllRevisions: params.StatsCount{ - Total: 1, - Day: 1, - Week: 1, - Month: 1, - }, - }, -}, { - about: "single download a long time ago", - url: "utopic/django-42", - downloads: map[string]map[int]int{ - "utopic/django-42": {100: 1}, - }, - expectResponse: params.StatsResponse{ - ArchiveDownloadCount: 1, - ArchiveDownload: params.StatsCount{ - Total: 1, - }, - ArchiveDownloadAllRevisions: params.StatsCount{ - Total: 1, - }, - }, -}, { - about: "some downloads this month", - url: "utopic/wordpress-47", - downloads: map[string]map[int]int{ - "utopic/wordpress-47": {20: 2, 25: 5}, - }, - expectResponse: params.StatsResponse{ - ArchiveDownloadCount: 2 + 5, - ArchiveDownload: params.StatsCount{ - Total: 2 + 5, - Month: 2 + 5, - }, - ArchiveDownloadAllRevisions: params.StatsCount{ - Total: 2 + 5, - Month: 2 + 5, - }, - }, -}, { - about: "multiple recent downloads", - url: "utopic/django-42", - downloads: map[string]map[int]int{ - "utopic/django-42": {100: 1, 12: 3, 8: 5, 4: 10, 2: 1, 0: 3}, - }, - expectResponse: params.StatsResponse{ - ArchiveDownloadCount: 1 + 3 + 5 + 10 + 1 + 3, - ArchiveDownload: params.StatsCount{ - Total: 1 + 3 + 5 + 10 + 1 + 3, - Day: 3, - Week: 10 + 1 + 3, - Month: 3 + 5 + 10 + 1 + 3, - }, - ArchiveDownloadAllRevisions: params.StatsCount{ - Total: 1 + 3 + 5 + 10 + 1 + 3, - Day: 3, - Week: 10 + 1 + 3, - Month: 3 + 5 + 10 + 1 + 3, - }, - }, -}, { - about: "sparse downloads", - url: "utopic/django-42", - downloads: map[string]map[int]int{ - "utopic/django-42": {200: 3, 27: 4, 3: 5}, - }, - expectResponse: params.StatsResponse{ - ArchiveDownloadCount: 3 + 4 + 5, - ArchiveDownload: params.StatsCount{ - Total: 3 + 4 + 5, - Week: 5, - Month: 4 + 5, - }, - ArchiveDownloadAllRevisions: params.StatsCount{ - Total: 3 + 4 + 5, - Week: 5, - Month: 4 + 5, - }, - }, -}, { - about: "bundle downloads", - url: "bundle/django-simple-2", - downloads: map[string]map[int]int{ - "bundle/django-simple-2": {200: 3, 27: 4, 3: 5}, - }, - expectResponse: params.StatsResponse{ - ArchiveDownloadCount: 3 + 4 + 5, - ArchiveDownload: params.StatsCount{ - Total: 3 + 4 + 5, - Week: 5, - Month: 4 + 5, - }, - ArchiveDownloadAllRevisions: params.StatsCount{ - Total: 3 + 4 + 5, - Week: 5, - Month: 4 + 5, - }, - }, -}, { - about: "different charms", - url: "trusty/rails-47", - downloads: map[string]map[int]int{ - "utopic/rails-47": {200: 3, 27: 4, 3: 5}, - "trusty/rails-47": {20: 2, 6: 10}, - "trusty/mysql-0": {200: 1, 14: 2, 1: 7}, - }, - expectResponse: params.StatsResponse{ - ArchiveDownloadCount: 2 + 10, - ArchiveDownload: params.StatsCount{ - Total: 2 + 10, - Week: 10, - Month: 2 + 10, - }, - ArchiveDownloadAllRevisions: params.StatsCount{ - Total: 2 + 10, - Week: 10, - Month: 2 + 10, - }, - }, -}, { - about: "different revisions of the same charm", - url: "precise/rails-1", - downloads: map[string]map[int]int{ - "precise/rails-0": {300: 1, 200: 2}, - "precise/rails-1": {100: 5, 10: 3, 2: 7}, - "precise/rails-2": {6: 10, 0: 9}, - }, - expectResponse: params.StatsResponse{ - ArchiveDownloadCount: 5 + 3 + 7, - ArchiveDownload: params.StatsCount{ - Total: 5 + 3 + 7, - Week: 7, - Month: 3 + 7, - }, - ArchiveDownloadAllRevisions: params.StatsCount{ - Total: (1 + 2) + (5 + 3 + 7) + (10 + 9), - Day: 0 + 0 + 9, - Week: 0 + 7 + (10 + 9), - Month: 0 + (3 + 7) + (10 + 9), - }, - }, -}, { - about: "downloads only in an old revision", - url: "trusty/wordpress-2", - downloads: map[string]map[int]int{ - "precise/wordpress-2": {2: 2, 0: 1}, - "trusty/wordpress-0": {100: 10}, - "trusty/wordpress-2": {}, - }, - expectResponse: params.StatsResponse{ - ArchiveDownloadAllRevisions: params.StatsCount{ - Total: 10, - }, - }, -}, { - about: "downloads only in newer revision", - url: "utopic/wordpress-0", - downloads: map[string]map[int]int{ - "utopic/wordpress-0": {}, - "utopic/wordpress-1": {31: 7, 10: 1, 3: 2, 0: 1}, - "utopic/wordpress-2": {6: 9, 0: 2}, - }, - expectResponse: params.StatsResponse{ - ArchiveDownloadAllRevisions: params.StatsCount{ - Total: (7 + 1 + 2 + 1) + (9 + 2), - Day: 1 + 2, - Week: (2 + 1) + (9 + 2), - Month: (1 + 2 + 1) + (9 + 2), - }, - }, -}, { - about: "non promulgated charms", - url: "~who/utopic/django-0", - downloads: map[string]map[int]int{ - "utopic/django-0": {100: 1, 10: 2, 1: 3, 0: 4}, - "~who/utopic/django-0": {2: 5}, - }, - expectResponse: params.StatsResponse{ - ArchiveDownloadCount: 5, - ArchiveDownload: params.StatsCount{ - Total: 5, - Week: 5, - Month: 5, - }, - ArchiveDownloadAllRevisions: params.StatsCount{ - Total: 5, - Week: 5, - Month: 5, - }, - }, -}} - -func (s *APISuite) TestMetaStats(c *gc.C) { - if !storetesting.MongoJSEnabled() { - c.Skip("MongoDB JavaScript not available") - } - // TODO (frankban): remove this call when removing the legacy counts logic. - patchLegacyDownloadCountsEnabled(s.AddCleanup, false) - - today := time.Now() - for i, test := range metaStatsTests { - c.Logf("test %d: %s", i, test.about) - - for id, downloadsPerDay := range test.downloads { - url := &router.ResolvedURL{ - URL: *charm.MustParseReference(id), - PromulgatedRevision: -1, - } - if url.URL.User == "" { - url.URL.User = "charmers" - url.PromulgatedRevision = url.URL.Revision - } - - // Add the required entities to the database. - if url.URL.Series == "bundle" { - s.addPublicBundle(c, "wordpress-simple", url) - } else { - s.addPublicCharm(c, "wordpress", url) - } - - // Simulate the entity was downloaded at the specified dates. - for daysAgo, downloads := range downloadsPerDay { - date := today.AddDate(0, 0, -daysAgo) - key := []string{params.StatsArchiveDownload, url.URL.Series, url.URL.Name, url.URL.User, strconv.Itoa(url.URL.Revision)} - for i := 0; i < downloads; i++ { - err := s.store.IncCounterAtTime(key, date) - c.Assert(err, gc.IsNil) - } - if url.PromulgatedRevision > -1 { - key := []string{params.StatsArchiveDownload, url.URL.Series, url.URL.Name, "", strconv.Itoa(url.PromulgatedRevision)} - for i := 0; i < downloads; i++ { - err := s.store.IncCounterAtTime(key, date) - c.Assert(err, gc.IsNil) - } - } - } - } - - // Ensure the meta/stats response reports the correct downloads count. - s.assertGet(c, test.url+"/meta/stats", test.expectResponse) - - // Clean up the collections. - _, err := s.store.DB.Entities().RemoveAll(nil) - c.Assert(err, gc.IsNil) - _, err = s.store.DB.StatCounters().RemoveAll(nil) - c.Assert(err, gc.IsNil) - } -} - -var metaStatsWithLegacyDownloadCountsTests = []struct { - about string - count string - expectValue int64 - expectError string -}{{ - about: "no extra-info", -}, { - about: "zero downloads", - count: "0", -}, { - about: "some downloads", - count: "47", - expectValue: 47, -}, { - about: "invalid value", - count: "invalid", - expectError: "cannot unmarshal extra-info value: invalid character 'i' looking for beginning of value", -}} - -// Tests meta/stats with LegacyDownloadCountsEnabled set to true. -// TODO (frankban): remove this test case when removing the legacy counts -// logic. -func (s *APISuite) TestMetaStatsWithLegacyDownloadCounts(c *gc.C) { - patchLegacyDownloadCountsEnabled(s.AddCleanup, true) - id, _ := s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/utopic/wordpress-42", 42)) - url := storeURL("utopic/wordpress-42/meta/stats") - - for i, test := range metaStatsWithLegacyDownloadCountsTests { - c.Logf("test %d: %s", i, test.about) - - // Update the entity extra info if required. - if test.count != "" { - extraInfo := map[string][]byte{ - params.LegacyDownloadStats: []byte(test.count), - } - err := s.store.UpdateEntity(id, bson.D{{ - "$set", bson.D{{"extrainfo", extraInfo}}, - }}) - c.Assert(err, gc.IsNil) - } - - var expectBody interface{} - var expectStatus int - if test.expectError == "" { - // Ensure the downloads count is correctly returned. - expectBody = params.StatsResponse{ - ArchiveDownloadCount: test.expectValue, - ArchiveDownload: params.StatsCount{ - Total: test.expectValue, - }, - ArchiveDownloadAllRevisions: params.StatsCount{ - Total: test.expectValue, - }, - } - expectStatus = http.StatusOK - } else { - // Ensure an error is returned. - expectBody = params.Error{ - Message: test.expectError, - } - expectStatus = http.StatusInternalServerError - } - - // Perform the request. - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: url, - ExpectStatus: expectStatus, - ExpectBody: expectBody, - }) - } -} - -type publishSpec struct { - id *router.ResolvedURL - time string -} - -func (p publishSpec) published() params.Published { - t, err := time.Parse("2006-01-02 15:04", p.time) - if err != nil { - panic(err) - } - return params.Published{&p.id.URL, t} -} - -var publishedCharms = []publishSpec{{ - id: newResolvedURL("cs:~charmers/precise/wordpress-1", 1), - time: "5432-10-12 00:00", -}, { - id: newResolvedURL("cs:~charmers/precise/mysql-1", 1), - time: "5432-10-12 13:00", -}, { - id: newResolvedURL("cs:~charmers/precise/wordpress-2", 2), - time: "5432-10-12 23:59", -}, { - id: newResolvedURL("cs:~charmers/precise/mysql-2", 2), - time: "5432-10-13 00:00", -}, { - id: newResolvedURL("cs:~charmers/precise/mysql-5", 5), - time: "5432-10-13 10:00", -}, { - id: newResolvedURL("cs:~charmers/precise/wordpress-3", 3), - time: "5432-10-14 01:00", -}} - -var changesPublishedTests = []struct { - args string - // expect holds indexes into publishedCharms - // of the expected indexes returned by charms/published - expect []int -}{{ - args: "", - expect: []int{5, 4, 3, 2, 1, 0}, -}, { - args: "?start=5432-10-13", - expect: []int{5, 4, 3}, -}, { - args: "?stop=5432-10-13", - expect: []int{4, 3, 2, 1, 0}, -}, { - args: "?start=5432-10-13&stop=5432-10-13", - expect: []int{4, 3}, -}, { - args: "?start=5432-10-12&stop=5432-10-13", - expect: []int{4, 3, 2, 1, 0}, -}, { - args: "?start=5432-10-13&stop=5432-10-12", - expect: []int{}, -}, { - args: "?limit=3", - expect: []int{5, 4, 3}, -}, { - args: "?start=5432-10-12&stop=5432-10-13&limit=2", - expect: []int{4, 3}, -}} - -func (s *APISuite) TestChangesPublished(c *gc.C) { - s.publishCharmsAtKnownTimes(c, publishedCharms) - for i, test := range changesPublishedTests { - c.Logf("test %d: %q", i, test.args) - expect := make([]params.Published, len(test.expect)) - for j, index := range test.expect { - expect[j] = publishedCharms[index].published() - } - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("changes/published") + test.args, - ExpectBody: expect, - }) - } -} - -var changesPublishedErrorsTests = []struct { - args string - expect params.Error - status int -}{{ - args: "?limit=0", - expect: params.Error{ - Code: params.ErrBadRequest, - Message: "invalid 'limit' value", - }, - status: http.StatusBadRequest, -}, { - args: "?limit=-1", - expect: params.Error{ - Code: params.ErrBadRequest, - Message: "invalid 'limit' value", - }, - status: http.StatusBadRequest, -}, { - args: "?limit=-9999", - expect: params.Error{ - Code: params.ErrBadRequest, - Message: "invalid 'limit' value", - }, - status: http.StatusBadRequest, -}, { - args: "?start=baddate", - expect: params.Error{ - Code: params.ErrBadRequest, - Message: `invalid 'start' value "baddate": parsing time "baddate" as "2006-01-02": cannot parse "baddate" as "2006"`, - }, - status: http.StatusBadRequest, -}, { - args: "?stop=baddate", - expect: params.Error{ - Code: params.ErrBadRequest, - Message: `invalid 'stop' value "baddate": parsing time "baddate" as "2006-01-02": cannot parse "baddate" as "2006"`, - }, - status: http.StatusBadRequest, -}} - -func (s *APISuite) TestChangesPublishedErrors(c *gc.C) { - s.publishCharmsAtKnownTimes(c, publishedCharms) - for i, test := range changesPublishedErrorsTests { - c.Logf("test %d: %q", i, test.args) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("changes/published") + test.args, - ExpectStatus: test.status, - ExpectBody: test.expect, - }) - } -} - -// publishCharmsAtKnownTimes populates the store with -// a range of charms with known time stamps. -func (s *APISuite) publishCharmsAtKnownTimes(c *gc.C, charms []publishSpec) { - for _, ch := range publishedCharms { - id, _ := s.addPublicCharm(c, "wordpress", ch.id) - t := ch.published().PublishTime - err := s.store.UpdateEntity(id, bson.D{{"$set", bson.D{{"uploadtime", t}}}}) - c.Assert(err, gc.IsNil) - } -} - -var debugPprofTests = []struct { - path string - match string -}{{ - path: "debug/pprof/", - match: `(?s).*profiles:.*heap.*`, -}, { - path: "debug/pprof/goroutine?debug=2", - match: "(?s)goroutine [0-9]+.*", -}, { - path: "debug/pprof/cmdline", - match: ".+charmstore.+", -}} - -func (s *APISuite) TestDebugPprof(c *gc.C) { - for i, test := range debugPprofTests { - c.Logf("test %d: %s", i, test.path) - - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - Header: basicAuthHeader(testUsername, testPassword), - URL: storeURL(test.path), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.String())) - c.Assert(rec.Body.String(), gc.Matches, test.match) - } -} - -func (s *APISuite) TestDebugPprofFailsWithoutAuth(c *gc.C) { - for i, test := range debugPprofTests { - c.Logf("test %d: %s", i, test.path) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(test.path), - ExpectStatus: http.StatusProxyAuthRequired, - ExpectBody: dischargeRequiredBody, - }) - } -} - -func (s *APISuite) TestHash256Laziness(c *gc.C) { - // TODO frankban: remove this test after updating entities in the - // production db with their SHA256 hash value. Entities are updated by - // running the cshash256 command. - id, _ := s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~who/precise/wordpress-0", -1)) - - // Retrieve the SHA256 hash. - entity, err := s.store.FindEntity(id, "blobhash256") - c.Assert(err, gc.IsNil) - c.Assert(entity.BlobHash256, gc.Not(gc.Equals), "") - - hashtesting.CheckSHA256Laziness(c, s.store, &id.URL, func() { - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(id.URL.Path() + "/meta/hash256"), - ExpectStatus: http.StatusOK, - ExpectBody: params.HashResponse{ - Sum: entity.BlobHash256, - }, - }) - }) -} - -func basicAuthHeader(username, password string) http.Header { - // It's a pity we have to jump through this hoop. - req := &http.Request{ - Header: make(http.Header), - } - req.SetBasicAuth(username, password) - return req.Header -} - -func entityFieldGetter(fieldName string) metaEndpointExpectedValueGetter { - return entityGetter(func(entity *mongodoc.Entity) interface{} { - field := reflect.ValueOf(entity).Elem().FieldByName(fieldName) - if !field.IsValid() { - panic(errgo.Newf("entity has no field %q", fieldName)) - } - return field.Interface() - }) -} - -func entityGetter(get func(*mongodoc.Entity) interface{}) metaEndpointExpectedValueGetter { - return func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { - doc, err := store.FindEntity(url) - if err != nil { - return nil, errgo.Mask(err) - } - return get(doc), nil - } -} - -func zipGetter(get func(*zip.Reader) interface{}) metaEndpointExpectedValueGetter { - return func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { - doc, err := store.FindEntity(url, "blobname") - if err != nil { - return nil, errgo.Mask(err) - } - blob, size, err := store.BlobStore.Open(doc.BlobName) - if err != nil { - return nil, errgo.Mask(err) - } - defer blob.Close() - content, err := ioutil.ReadAll(blob) - if err != nil { - return nil, errgo.Mask(err) - } - r, err := zip.NewReader(bytes.NewReader(content), size) - if err != nil { - return nil, errgo.Mask(err) - } - return get(r), nil - } -} - -func entitySizeChecker(c *gc.C, data interface{}) { - response := data.(*params.ArchiveSizeResponse) - c.Assert(response.Size, gc.Not(gc.Equals), int64(0)) -} - -func (s *APISuite) addPublicCharm(c *gc.C, charmName string, rurl *router.ResolvedURL) (*router.ResolvedURL, charm.Charm) { - ch := storetesting.Charms.CharmDir(charmName) - err := s.store.AddCharmWithArchive(rurl, ch) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) - c.Assert(err, gc.IsNil) - return rurl, ch -} - -func (s *APISuite) addPublicBundle(c *gc.C, bundleName string, rurl *router.ResolvedURL) (*router.ResolvedURL, charm.Bundle) { - bundle := storetesting.Charms.BundleDir(bundleName) - err := s.store.AddBundleWithArchive(rurl, bundle) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) - c.Assert(err, gc.IsNil) - return rurl, bundle -} - -func (s *APISuite) assertPut(c *gc.C, url string, val interface{}) { - body, err := json.Marshal(val) - c.Assert(err, gc.IsNil) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL(url), - Method: "PUT", - Do: bakeryDo(nil), - Header: http.Header{ - "Content-Type": {"application/json"}, - }, - Username: testUsername, - Password: testPassword, - Body: bytes.NewReader(body), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.String())) - c.Assert(rec.Body.String(), gc.HasLen, 0) -} - -func (s *APISuite) assertGet(c *gc.C, url string, expectVal interface{}) { - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - Do: bakeryDo(nil), - URL: storeURL(url), - ExpectBody: expectVal, - }) -} - -func (s *APISuite) addLog(c *gc.C, log *mongodoc.Log) { - err := s.store.DB.Logs().Insert(log) - c.Assert(err, gc.Equals, nil) -} - -func mustMarshalJSON(val interface{}) string { - data, err := json.Marshal(val) - if err != nil { - panic(fmt.Errorf("cannot marshal %#v: %v", val, err)) - } - return string(data) -} - -func (s *APISuite) TestMacaroon(c *gc.C) { - var checkedCaveats []string - var mu sync.Mutex - var dischargeError error - s.discharge = func(cond string, arg string) ([]checkers.Caveat, error) { - mu.Lock() - defer mu.Unlock() - checkedCaveats = append(checkedCaveats, cond+" "+arg) - return []checkers.Caveat{checkers.DeclaredCaveat("username", "who")}, dischargeError - } - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("macaroon"), - Method: "GET", - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.String())) - var m macaroon.Macaroon - err := json.Unmarshal(rec.Body.Bytes(), &m) - c.Assert(err, gc.IsNil) - c.Assert(m.Location(), gc.Equals, "charmstore") - ms, err := httpbakery.DischargeAll(&m, httpbakery.NewHTTPClient(), noInteraction) - c.Assert(err, gc.IsNil) - sort.Strings(checkedCaveats) - c.Assert(checkedCaveats, jc.DeepEquals, []string{ - "is-authenticated-user ", - }) - macaroonCookie, err := httpbakery.NewCookie(ms) - c.Assert(err, gc.IsNil) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("log"), - Do: bakeryDo(nil), - Cookies: []*http.Cookie{macaroonCookie}, - ExpectStatus: http.StatusUnauthorized, - ExpectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: `unauthorized: access denied for user "who"`, - }, - }) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.noMacaroonSrv, - URL: storeURL("log"), - ExpectStatus: http.StatusUnauthorized, - ExpectBody: params.Error{ - Message: "authentication failed: missing HTTP auth header", - Code: params.ErrUnauthorized, - }, - }) -} - -var promulgateTests = []struct { - about string - entities []*mongodoc.Entity - baseEntities []*mongodoc.BaseEntity - id string - useHTTPDo bool - method string - caveats []checkers.Caveat - groups map[string][]string - body io.Reader - username string - password string - expectStatus int - expectBody interface{} - expectEntities []*mongodoc.Entity - expectBaseEntities []*mongodoc.BaseEntity -}{{ - about: "unpromulgate base entity", - entities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), - }, - baseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), - }, - id: "~charmers/wordpress", - body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), - username: testUsername, - password: testPassword, - expectStatus: http.StatusOK, - expectEntities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").Build(), - }, -}, { - about: "promulgate base entity", - entities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), - }, - baseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").Build(), - }, - id: "~charmers/wordpress", - body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), - username: testUsername, - password: testPassword, - expectStatus: http.StatusOK, - expectEntities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), - }, -}, { - about: "unpromulgate base entity not found", - entities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), - }, - baseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), - }, - id: "~charmers/mysql", - body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), - username: testUsername, - password: testPassword, - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Code: params.ErrNotFound, - Message: `no matching charm or bundle for "cs:~charmers/mysql"`, - }, - expectEntities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), - }, -}, { - about: "promulgate base entity not found", - entities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), - }, - baseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").Build(), - }, - id: "~charmers/mysql", - body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), - username: testUsername, - password: testPassword, - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Code: params.ErrNotFound, - Message: `no matching charm or bundle for "cs:~charmers/mysql"`, - }, - expectEntities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").Build(), - }, -}, { - about: "promulgate base entity not found, fully qualified URL", - entities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), - }, - baseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").Build(), - }, - id: "~charmers/precise/mysql-9", - body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), - username: testUsername, - password: testPassword, - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Code: params.ErrNotFound, - Message: `base entity "cs:~charmers/mysql" not found`, - }, - expectEntities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").Build(), - }, -}, { - about: "unpromulgate base entity not found, fully qualified URL", - entities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), - }, - baseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").Build(), - }, - id: "~charmers/precise/mysql-9", - body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), - username: testUsername, - password: testPassword, - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Code: params.ErrNotFound, - Message: `base entity "cs:~charmers/mysql" not found`, - }, - expectEntities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").Build(), - }, -}, { - about: "bad method", - entities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), - }, - baseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), - }, - id: "~charmers/wordpress", - body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), - username: testUsername, - password: testPassword, - method: "POST", - expectStatus: http.StatusMethodNotAllowed, - expectBody: params.Error{ - Code: params.ErrMethodNotAllowed, - Message: "POST not allowed", - }, - expectEntities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), - }, -}, { - about: "bad JSON", - entities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), - }, - baseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), - }, - id: "~charmers/wordpress", - body: bytes.NewReader([]byte("tru")), - username: testUsername, - password: testPassword, - expectStatus: http.StatusBadRequest, - expectBody: params.Error{ - Code: params.ErrBadRequest, - Message: "bad request: invalid character ' ' in literal true (expecting 'e')", - }, - expectEntities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), - }, -}, { - about: "unpromulgate base entity with macaroon", - entities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), - }, - baseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), - }, - id: "~charmers/wordpress", - body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), - caveats: []checkers.Caveat{ - checkers.DeclaredCaveat(v4.UsernameAttr, "promulgators"), - }, - expectStatus: http.StatusOK, - expectEntities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").Build(), - }, -}, { - about: "promulgate base entity with macaroon", - entities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), - }, - baseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").Build(), - }, - id: "~charmers/wordpress", - body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), - caveats: []checkers.Caveat{ - checkers.DeclaredCaveat(v4.UsernameAttr, "promulgators"), - }, - expectStatus: http.StatusOK, - expectEntities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), - }, -}, { - about: "promulgate base entity with group macaroon", - entities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), - }, - baseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").Build(), - }, - id: "~charmers/wordpress", - body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), - caveats: []checkers.Caveat{ - checkers.DeclaredCaveat(v4.UsernameAttr, "bob"), - }, - groups: map[string][]string{ - "bob": {"promulgators", "yellow"}, - }, - expectStatus: http.StatusOK, - expectEntities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), - }, -}, { - about: "no authorisation", - entities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), - }, - baseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), - }, - useHTTPDo: true, - id: "~charmers/wordpress", - body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), - expectStatus: http.StatusProxyAuthRequired, - expectBody: dischargeRequiredBody, - expectEntities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), - }, -}, { - about: "promulgate base entity with unauthorized user macaroon", - entities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), - }, - baseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").Build(), - }, - id: "~charmers/wordpress", - body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), - caveats: []checkers.Caveat{ - checkers.DeclaredCaveat(v4.UsernameAttr, "bob"), - }, - groups: map[string][]string{ - "bob": {"yellow"}, - }, - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Message: `unauthorized: access denied for user "bob"`, - Code: params.ErrUnauthorized, - }, - expectEntities: []*mongodoc.Entity{ - storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), - }, - expectBaseEntities: []*mongodoc.BaseEntity{ - storetesting.NewBaseEntity("~charmers/wordpress").Build(), - }, -}} - -func (s *APISuite) TestPromulgate(c *gc.C) { - for i, test := range promulgateTests { - c.Logf("%d. %s\n", i, test.about) - _, err := s.store.DB.Entities().RemoveAll(nil) - c.Assert(err, gc.IsNil) - _, err = s.store.DB.BaseEntities().RemoveAll(nil) - c.Assert(err, gc.IsNil) - for _, e := range test.entities { - err := s.store.DB.Entities().Insert(e) - c.Assert(err, gc.IsNil) - } - for _, e := range test.baseEntities { - err := s.store.DB.BaseEntities().Insert(e) - c.Assert(err, gc.IsNil) - } - if test.method == "" { - test.method = "PUT" - } - client := httpbakery.NewHTTPClient() - s.discharge = func(_, _ string) ([]checkers.Caveat, error) { - return test.caveats, nil - } - s.idM.groups = test.groups - p := httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(test.id + "/promulgate"), - Method: test.method, - Body: test.body, - Header: http.Header{"Content-Type": {"application/json"}}, - Username: test.username, - Password: test.password, - ExpectStatus: test.expectStatus, - ExpectBody: test.expectBody, - } - if !test.useHTTPDo { - p.Do = bakeryDo(client) - } - httptesting.AssertJSONCall(c, p) - n, err := s.store.DB.Entities().Count() - c.Assert(err, gc.IsNil) - c.Assert(n, gc.Equals, len(test.expectEntities)) - for _, e := range test.expectEntities { - storetesting.AssertEntity(c, s.store.DB.Entities(), e) - } - n, err = s.store.DB.BaseEntities().Count() - c.Assert(err, gc.IsNil) - c.Assert(n, gc.Equals, len(test.expectBaseEntities)) - for _, e := range test.expectBaseEntities { - storetesting.AssertBaseEntity(c, s.store.DB.BaseEntities(), e) - } - } -} - -func (s *APISuite) TestEndpointRequiringBaseEntityWithPromulgatedId(c *gc.C) { - // Add a promulgated charm. - url := newResolvedURL("~charmers/precise/wordpress-23", 23) - s.addPublicCharm(c, "wordpress", url) - - // Unpromulgate the base entity - err := s.store.SetPromulgated(url, false) - c.Assert(err, gc.IsNil) - - // Check that we can still enquire about the promulgation status - // of the entity when using its promulgated URL. - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("precise/wordpress-23/meta/promulgated"), - ExpectBody: params.PromulgatedResponse{ - Promulgated: false, - }, - }) -} - -// dischargeRequiredBody returns a httptesting.BodyAsserter that checks -// that the response body contains a discharge required error holding a macaroon -// with a third-party caveat addressed to expectedEntityLocation. -var dischargeRequiredBody httptesting.BodyAsserter = func(c *gc.C, body json.RawMessage) { - var response httpbakery.Error - err := json.Unmarshal(body, &response) - c.Assert(err, gc.IsNil) - c.Assert(response.Code, gc.Equals, httpbakery.ErrDischargeRequired) - c.Assert(response.Message, gc.Equals, "verification failed: no macaroon cookies in request") - c.Assert(response.Info.Macaroon, gc.NotNil) - for _, cav := range response.Info.Macaroon.Caveats() { - if cav.Location != "" { - return - } - } - c.Fatalf("no third party caveat found in response macaroon; caveats %#v", response.Info.Macaroon.Caveats()) -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/archive.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/archive.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/archive.go 1970-01-01 00:00:00 +0000 @@ -1,576 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4 - -import ( - "archive/zip" - "crypto/sha256" - "encoding/json" - "fmt" - "io" - "mime" - "net/http" - "path" - "path/filepath" - "sort" - "strconv" - "strings" - "time" - - "github.com/juju/utils/jsonhttp" - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/params" -) - -// GET id/archive -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idarchive -// -// POST id/archive?hash=sha384hash -// https://github.com/juju/charmstore/blob/v4/docs/API.md#post-idarchive -// -// DELETE id/archive -// https://github.com/juju/charmstore/blob/v4/docs/API.md#delete-idarchive -// -// PUT id/archive?hash=sha384hash -// This is like POST except that it puts the archive to a known revision -// rather than choosing a new one. As this feature is to support legacy -// ingestion methods, and will be removed in the future, it has no entry -// in the specification. -func (h *Handler) serveArchive(id *charm.Reference, w http.ResponseWriter, req *http.Request) error { - switch req.Method { - case "DELETE": - return h.resolveId(h.authId(h.serveDeleteArchive))(id, w, req) - case "GET": - return h.resolveId(h.authId(h.serveGetArchive))(id, w, req) - case "POST", "PUT": - if err := h.authorizeUpload(id, req); err != nil { - return errgo.Mask(err, errgo.Any) - } - if req.Method == "POST" { - return h.servePostArchive(id, w, req) - } - return h.servePutArchive(id, w, req) - } - // TODO(rog) params.ErrMethodNotAllowed - return errgo.Newf("method not allowed") -} - -func (h *Handler) authorizeUpload(id *charm.Reference, req *http.Request) error { - if id.User == "" { - return badRequestf(nil, "user not specified in entity upload URL %q", id) - } - store := h.pool.Store() - defer store.Close() - // Note that we pass a nil entity URL to authorizeWithPerms, because - // we haven't got a resolved URL at this point. At some - // point in the future, we may want to be able to allow - // is-entity first-party caveats to be allowed when uploading - // at which point we will need to rethink this a little. - baseURL := *id - baseURL.Revision = -1 - baseURL.Series = "" - baseEntity, err := store.FindBaseEntity(id, "acls") - if err == nil { - return h.authorizeWithPerms(req, baseEntity.ACLs.Read, baseEntity.ACLs.Write, nil) - } - if errgo.Cause(err) != params.ErrNotFound { - return errgo.Notef(err, "cannot retrieve entity %q for authorization", id) - } - // The base entity does not currently exist, so we default to - // assuming write permissions for the entity user. - return h.authorizeWithPerms(req, nil, []string{id.User}, nil) -} - -func (h *Handler) serveGetArchive(id *router.ResolvedURL, fullySpecified bool, w http.ResponseWriter, req *http.Request) error { - store := h.pool.Store() - defer store.Close() - r, size, hash, err := store.OpenBlob(id) - if err != nil { - return errgo.Mask(err, errgo.Is(params.ErrNotFound)) - } - defer r.Close() - header := w.Header() - setArchiveCacheControl(w.Header(), fullySpecified) - header.Set(params.ContentHashHeader, hash) - header.Set(params.EntityIdHeader, id.String()) - - if StatsEnabled(req) { - store.IncrementDownloadCountsAsync(id) - } - // TODO(rog) should we set connection=close here? - // See https://codereview.appspot.com/5958045 - serveContent(w, req, size, r) - return nil -} - -func (h *Handler) serveDeleteArchive(id *router.ResolvedURL, fullySpecified bool, w http.ResponseWriter, req *http.Request) error { - store := h.pool.Store() - defer store.Close() - // Retrieve the entity blob name from the database. - blobName, _, err := store.BlobNameAndHash(id) - if err != nil { - return errgo.Mask(err, errgo.Is(params.ErrNotFound)) - } - // Remove the entity. - if err := store.DB.Entities().RemoveId(&id.URL); err != nil { - return errgo.Notef(err, "cannot remove %s", id) - } - // Remove the reference to the archive from the blob store. - if err := store.BlobStore.Remove(blobName); err != nil { - return errgo.Notef(err, "cannot remove blob %s", blobName) - } - store.IncCounterAsync(charmstore.EntityStatsKey(&id.URL, params.StatsArchiveDelete)) - return nil -} - -func (h *Handler) updateStatsArchiveUpload(id *charm.Reference, err *error) { - store := h.pool.Store() - defer store.Close() - // Upload stats don't include revision: it is assumed that each - // entity revision is only uploaded once. - id.Revision = -1 - kind := params.StatsArchiveUpload - if *err != nil { - kind = params.StatsArchiveFailedUpload - } - store.IncCounterAsync(charmstore.EntityStatsKey(id, kind)) -} - -func (h *Handler) servePostArchive(id *charm.Reference, w http.ResponseWriter, req *http.Request) (err error) { - defer h.updateStatsArchiveUpload(id, &err) - - if id.Series == "" { - return badRequestf(nil, "series not specified") - } - if id.Revision != -1 { - return badRequestf(nil, "revision specified, but should not be specified") - } - if id.User == "" { - return badRequestf(nil, "user not specified") - } - hash := req.Form.Get("hash") - if hash == "" { - return badRequestf(nil, "hash parameter not specified") - } - if req.ContentLength == -1 { - return badRequestf(nil, "Content-Length not specified") - } - - oldId, oldHash, err := h.latestRevisionInfo(id) - if err != nil && errgo.Cause(err) != params.ErrNotFound { - return errgo.Notef(err, "cannot get hash of latest revision") - } - if oldHash == hash { - // The hash matches the hash of the latest revision, so - // no need to upload anything. - return jsonhttp.WriteJSON(w, http.StatusOK, ¶ms.ArchiveUploadResponse{ - Id: oldId, - }) - } - rid := &router.ResolvedURL{ - URL: *id, - } - // Choose the next revision number for the upload. - if oldId == nil { - rid.URL.Revision = 0 - } else { - rid.URL.Revision = oldId.Revision + 1 - } - rid.PromulgatedRevision, err = h.getNewPromulgatedRevision(id) - if err != nil { - return errgo.Mask(err) - } - - if err := h.addBlobAndEntity(rid, req.Body, hash, req.ContentLength); err != nil { - return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload)) - } - return jsonhttp.WriteJSON(w, http.StatusOK, ¶ms.ArchiveUploadResponse{ - Id: &rid.URL, - PromulgatedId: rid.PromulgatedURL(), - }) -} - -func (h *Handler) servePutArchive(id *charm.Reference, w http.ResponseWriter, req *http.Request) (err error) { - defer h.updateStatsArchiveUpload(id, &err) - if id.Series == "" { - return badRequestf(nil, "series not specified") - } - if id.Revision == -1 { - return badRequestf(nil, "revision not specified") - } - if id.User == "" { - return badRequestf(nil, "user not specified") - } - hash := req.Form.Get("hash") - if hash == "" { - return badRequestf(nil, "hash parameter not specified") - } - if req.ContentLength == -1 { - return badRequestf(nil, "Content-Length not specified") - } - rid := &router.ResolvedURL{ - URL: *id, - PromulgatedRevision: -1, - } - // Get the PromulgatedURL from the request parameters. When ingesting - // entities might not be added in order and the promulgated revision might - // not match the non-promulgated revision, so the full promulgated URL - // needs to be specified. - promulgatedURL := req.Form.Get("promulgated") - var pid *charm.Reference - if promulgatedURL != "" { - pid, err = charm.ParseReference(promulgatedURL) - if err != nil { - return badRequestf(err, "cannot parse promulgated url") - } - if pid.User != "" { - return badRequestf(nil, "promulgated URL cannot have a user") - } - if pid.Name != id.Name { - return badRequestf(nil, "promulgated URL has incorrect charm name") - } - if pid.Series != id.Series { - return badRequestf(nil, "promulgated URL has incorrect series") - } - if pid.Revision == -1 { - return badRequestf(nil, "promulgated URL has no revision") - } - rid.PromulgatedRevision = pid.Revision - } - if err := h.addBlobAndEntity(rid, req.Body, hash, req.ContentLength); err != nil { - return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload)) - } - return jsonhttp.WriteJSON(w, http.StatusOK, ¶ms.ArchiveUploadResponse{ - Id: id, - PromulgatedId: rid.PromulgatedURL(), - }) - return nil -} - -// addBlobAndEntity streams the contents of the given body -// to the blob store and adds an entity record for it. -// The hash and contentLength parameters hold -// the content hash and the content length respectively. -func (h *Handler) addBlobAndEntity(id *router.ResolvedURL, body io.Reader, hash string, contentLength int64) (err error) { - name := bson.NewObjectId().Hex() - - // Calculate the SHA256 hash while uploading the blob in the blob store. - hash256 := sha256.New() - body = io.TeeReader(body, hash256) - - store := h.pool.Store() - defer store.Close() - // Upload the actual blob, and make sure that it is removed - // if we fail later. - err = store.BlobStore.PutUnchallenged(body, name, contentLength, hash) - if err != nil { - return errgo.Notef(err, "cannot put archive blob") - } - r, _, err := store.BlobStore.Open(name) - if err != nil { - return errgo.Notef(err, "cannot open newly created blob") - } - defer r.Close() - defer func() { - if err != nil { - store.BlobStore.Remove(name) - // TODO(rog) log if remove fails. - } - }() - - // Add the entity entry to the charm store. - sum256 := fmt.Sprintf("%x", hash256.Sum(nil)) - if err := h.addEntity(id, r, name, hash, sum256, contentLength); err != nil { - return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload)) - } - return nil -} - -// addEntity adds the entity represented by the contents -// of the given reader, associating it with the given id. -func (h *Handler) addEntity(id *router.ResolvedURL, r io.ReadSeeker, blobName, hash, hash256 string, contentLength int64) error { - store := h.pool.Store() - defer store.Close() - readerAt := charmstore.ReaderAtSeeker(r) - p := charmstore.AddParams{ - URL: id, - BlobName: blobName, - BlobHash: hash, - BlobHash256: hash256, - BlobSize: contentLength, - } - if id.URL.Series == "bundle" { - b, err := charm.ReadBundleArchiveFromReader(readerAt, contentLength) - if err != nil { - return errgo.Notef(err, "cannot read bundle archive") - } - bundleData := b.Data() - charms, err := h.bundleCharms(bundleData.RequiredCharms()) - if err != nil { - return errgo.Notef(err, "cannot retrieve bundle charms") - } - if err := bundleData.VerifyWithCharms(verifyConstraints, charms); err != nil { - // TODO frankban: use multiError (defined in internal/router). - return errgo.Notef(verificationError(err), "bundle verification failed") - } - if err := store.AddBundle(b, p); err != nil { - return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload)) - } - return nil - } - ch, err := charm.ReadCharmArchiveFromReader(readerAt, contentLength) - if err != nil { - return errgo.Notef(err, "cannot read charm archive") - } - if err := checkCharmIsValid(ch); err != nil { - return errgo.Mask(err) - } - if err := store.AddCharm(ch, p); err != nil { - return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload)) - } - return nil -} - -func checkCharmIsValid(ch charm.Charm) error { - m := ch.Meta() - for _, rels := range []map[string]charm.Relation{m.Provides, m.Requires, m.Peers} { - if err := checkRelationsAreValid(rels); err != nil { - return errgo.Mask(err) - } - } - return nil -} - -func checkRelationsAreValid(rels map[string]charm.Relation) error { - for _, rel := range rels { - if rel.Name == "relation-name" { - return errgo.Newf("relation %s has almost certainly not been changed from the template", rel.Name) - } - if rel.Interface == "interface-name" { - return errgo.Newf("interface %s in relation %s has almost certainly not been changed from the template", rel.Interface, rel.Name) - } - } - return nil -} - -func (h *Handler) latestRevisionInfo(id *charm.Reference) (*charm.Reference, string, error) { - store := h.pool.Store() - defer store.Close() - entities, err := store.FindEntities(id, "_id", "blobhash") - if err != nil { - return nil, "", errgo.Mask(err) - } - if len(entities) == 0 { - return nil, "", params.ErrNotFound - } - latest := entities[0] - for _, entity := range entities { - if entity.URL.Revision > latest.URL.Revision { - latest = entity - } - } - return latest.URL, latest.BlobHash, nil -} - -func verifyConstraints(s string) error { - // TODO(rog) provide some actual constraints checking here. - return nil -} - -// GET id/archive/path -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idarchivepath -func (h *Handler) serveArchiveFile(id *router.ResolvedURL, fullySpecified bool, w http.ResponseWriter, req *http.Request) error { - store := h.pool.Store() - defer store.Close() - r, size, _, err := store.OpenBlob(id) - if err != nil { - return errgo.Mask(err, errgo.Is(params.ErrNotFound)) - } - defer r.Close() - zipReader, err := zip.NewReader(charmstore.ReaderAtSeeker(r), size) - if err != nil { - return errgo.Notef(err, "cannot read archive data for %s", id) - } - - // Retrieve the requested file from the zip archive. - filePath := strings.TrimPrefix(path.Clean(req.URL.Path), "/") - for _, file := range zipReader.File { - if path.Clean(file.Name) != filePath { - continue - } - // The file is found. - fileInfo := file.FileInfo() - if fileInfo.IsDir() { - return errgo.WithCausef(nil, params.ErrForbidden, "directory listing not allowed") - } - content, err := file.Open() - if err != nil { - return errgo.Notef(err, "unable to read file %q", filePath) - } - defer content.Close() - // Send the response to the client. - ctype := mime.TypeByExtension(filepath.Ext(filePath)) - if ctype != "" { - w.Header().Set("Content-Type", ctype) - } - w.Header().Set("Content-Length", strconv.FormatInt(fileInfo.Size(), 10)) - setArchiveCacheControl(w.Header(), fullySpecified) - w.WriteHeader(http.StatusOK) - io.Copy(w, content) - return nil - } - return errgo.WithCausef(nil, params.ErrNotFound, "file %q not found in the archive", filePath) -} - -func (h *Handler) bundleCharms(ids []string) (map[string]charm.Charm, error) { - store := h.pool.Store() - defer store.Close() - numIds := len(ids) - urls := make([]*charm.Reference, 0, numIds) - idKeys := make([]string, 0, numIds) - // TODO resolve ids concurrently. - for _, id := range ids { - url, err := charm.ParseReference(id) - if err != nil { - // Ignore this error. This will be caught in the bundle - // verification process (see bundleData.VerifyWithCharms) and will - // be returned to the user along with other bundle errors. - continue - } - e, err := store.FindBestEntity(url) - if err != nil { - if errgo.Cause(err) == params.ErrNotFound { - // Ignore this error too, for the same reasons - // described above. - continue - } - return nil, err - } - urls = append(urls, e.URL) - idKeys = append(idKeys, id) - } - var entities []mongodoc.Entity - if err := store.DB.Entities(). - Find(bson.D{{"_id", bson.D{{"$in", urls}}}}). - All(&entities); err != nil { - return nil, err - } - - entityCharms := make(map[charm.Reference]charm.Charm, len(entities)) - for i, entity := range entities { - entityCharms[*entity.URL] = &entityCharm{entities[i]} - } - charms := make(map[string]charm.Charm, len(urls)) - for i, url := range urls { - if ch, ok := entityCharms[*url]; ok { - charms[idKeys[i]] = ch - } - } - return charms, nil -} - -// entityCharm implements charm.Charm. -type entityCharm struct { - mongodoc.Entity -} - -func (e *entityCharm) Meta() *charm.Meta { - return e.CharmMeta -} - -func (e *entityCharm) Metrics() *charm.Metrics { - return nil -} - -func (e *entityCharm) Config() *charm.Config { - return e.CharmConfig -} - -func (e *entityCharm) Actions() *charm.Actions { - return e.CharmActions -} - -func (e *entityCharm) Revision() int { - return e.URL.Revision -} - -// verificationError returns an error whose string representation is a list of -// all the verification error messages stored in err, in JSON format. -// Note that err must be a *charm.VerificationError. -func verificationError(err error) error { - verr, ok := err.(*charm.VerificationError) - if !ok { - return err - } - messages := make([]string, len(verr.Errors)) - for i, err := range verr.Errors { - messages[i] = err.Error() - } - sort.Strings(messages) - encodedMessages, err := json.Marshal(messages) - if err != nil { - // This should never happen. - return err - } - return errgo.New(string(encodedMessages)) -} - -var ( - // archiveCacheVersionedMaxAge specifies the cache expiry duration for items - // returned from the archive where the id is fully specified. - archiveCacheVersionedMaxAge = 365 * 24 * time.Hour - - // archiveCacheNonVersionedMaxAge specifies the cache expiry duration for items - // returned from the archive where the id is not fully specified. - archiveCacheNonVersionedMaxAge = 5 * time.Minute -) - -// setArchiveCacheControl sets any cache control headers -// in a response to an archive-derived endpoint. -// The idFullySpecified header specifies whether -// the entity id in the request was fully specified by the client. -func setArchiveCacheControl(h http.Header, idFullySpecified bool) { - age := archiveCacheVersionedMaxAge - if !idFullySpecified { - age = archiveCacheNonVersionedMaxAge - } - seconds := int(age / time.Second) - h.Set("Cache-Control", "public, max-age="+strconv.Itoa(seconds)) -} - -// getNewPromulgatedRevision returns the promulgated revision -// to give to a newly uploaded charm with the given id. -// It returns -1 if the charm is not promulgated. -func (h *Handler) getNewPromulgatedRevision(id *charm.Reference) (int, error) { - store := h.pool.Store() - defer store.Close() - baseEntity, err := store.FindBaseEntity(id, "promulgated") - if err != nil && errgo.Cause(err) != params.ErrNotFound { - return 0, errgo.Mask(err) - } - if baseEntity == nil || !baseEntity.Promulgated { - return -1, nil - } - query := store.EntitiesQuery(&charm.Reference{ - Series: id.Series, - Name: id.Name, - Revision: -1, - }) - var entity mongodoc.Entity - err = query.Sort("-promulgated-revision").Select(bson.D{{"promulgated-revision", 1}}).One(&entity) - if err == mgo.ErrNotFound { - return 0, nil - } - if err != nil { - return 0, errgo.Mask(err) - } - return entity.PromulgatedRevision + 1, nil -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/archive_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/archive_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/archive_test.go 1970-01-01 00:00:00 +0000 @@ -1,1496 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4_test - -import ( - "archive/zip" - "bytes" - "crypto/sha256" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "strconv" - "strings" - "sync" - "time" - - jc "github.com/juju/testing/checkers" - "github.com/juju/testing/httptesting" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - charmtesting "gopkg.in/juju/charm.v5/testing" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/blobstore" - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/internal/storetesting" - "gopkg.in/juju/charmstore.v4/internal/storetesting/stats" - "gopkg.in/juju/charmstore.v4/internal/v4" - "gopkg.in/juju/charmstore.v4/params" -) - -type ArchiveSuite struct { - commonSuite -} - -var _ = gc.Suite(&ArchiveSuite{}) - -func (s *ArchiveSuite) TestGet(c *gc.C) { - patchArchiveCacheAges(s) - id := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) - wordpress := s.assertUploadCharm(c, "POST", id, "wordpress") - err := s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) - c.Assert(err, gc.IsNil) - - archiveBytes, err := ioutil.ReadFile(wordpress.Path) - c.Assert(err, gc.IsNil) - - archiveUrl := storeURL("~charmers/precise/wordpress-0/archive") - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: archiveUrl, - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes) - c.Assert(rec.Header().Get(params.ContentHashHeader), gc.Equals, hashOfBytes(archiveBytes)) - c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/precise/wordpress-0") - assertCacheControl(c, rec.Header(), true) - - // Check that the HTTP range logic is plugged in OK. If this - // is working, we assume that the whole thing is working OK, - // as net/http is well-tested. - rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: archiveUrl, - Header: http.Header{"Range": {"bytes=10-100"}}, - }) - c.Assert(rec.Code, gc.Equals, http.StatusPartialContent, gc.Commentf("body: %q", rec.Body.Bytes())) - c.Assert(rec.Body.Bytes(), gc.HasLen, 100-10+1) - c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes[10:101]) - c.Assert(rec.Header().Get(params.ContentHashHeader), gc.Equals, hashOfBytes(archiveBytes)) - c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/precise/wordpress-0") - assertCacheControl(c, rec.Header(), true) -} - -func (s *ArchiveSuite) TestGetWithPartialId(c *gc.C) { - id := newResolvedURL("cs:~charmers/utopic/wordpress-42", -1) - err := s.store.AddCharmWithArchive( - id, - storetesting.Charms.CharmArchive(c.MkDir(), "wordpress")) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) - c.Assert(err, gc.IsNil) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("~charmers/wordpress/archive"), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - // The complete entity id can be retrieved from the response header. - c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, id.URL.String()) -} - -func (s *ArchiveSuite) TestGetPromulgatedWithPartialId(c *gc.C) { - id := newResolvedURL("cs:~charmers/utopic/wordpress-42", 42) - err := s.store.AddCharmWithArchive( - id, - storetesting.Charms.CharmArchive(c.MkDir(), "wordpress")) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) - c.Assert(err, gc.IsNil) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("wordpress/archive"), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - // The complete entity id can be retrieved from the response header. - c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, id.PromulgatedURL().String()) -} - -func (s *ArchiveSuite) TestGetCounters(c *gc.C) { - if !storetesting.MongoJSEnabled() { - c.Skip("MongoDB JavaScript not available") - } - - for i, id := range []*router.ResolvedURL{ - newResolvedURL("~who/utopic/mysql-42", 42), - } { - c.Logf("test %d: %s", i, id) - - // Add a charm to the database (including the archive). - err := s.store.AddCharmWithArchive(id, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) - c.Assert(err, gc.IsNil) - - // Download the charm archive using the API. - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL(id.URL.Path() + "/archive"), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - - // Check that the downloads count for the entity has been updated. - key := []string{params.StatsArchiveDownload, "utopic", "mysql", id.URL.User, "42"} - stats.CheckCounterSum(c, s.store, key, false, 1) - // Check that the promulgated download count for the entity has also been updated - key = []string{params.StatsArchiveDownload, "utopic", "mysql", "", "42"} - stats.CheckCounterSum(c, s.store, key, false, 1) - } -} - -func (s *ArchiveSuite) TestGetCountersDisabled(c *gc.C) { - url := newResolvedURL("~charmers/utopic/mysql-42", 42) - // Add a charm to the database (including the archive). - err := s.store.AddCharmWithArchive(url, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) - c.Assert(err, gc.IsNil) - - // Download the charm archive using the API, passing stats=0. - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL(url.URL.Path() + "/archive?stats=0"), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - - // Check that the downloads count for the entity has not been updated. - key := []string{params.StatsArchiveDownload, "utopic", "mysql", "", "42"} - stats.CheckCounterSum(c, s.store, key, false, 0) -} - -var archivePostErrorsTests = []struct { - about string - path string - noContentLength bool - expectStatus int - expectMessage string - expectCode params.ErrorCode -}{{ - about: "no series", - path: "~charmers/wordpress/archive", - expectStatus: http.StatusBadRequest, - expectMessage: "series not specified", - expectCode: params.ErrBadRequest, -}, { - about: "revision specified", - path: "~charmers/precise/wordpress-23/archive", - expectStatus: http.StatusBadRequest, - expectMessage: "revision specified, but should not be specified", - expectCode: params.ErrBadRequest, -}, { - about: "no hash given", - path: "~charmers/precise/wordpress/archive", - expectStatus: http.StatusBadRequest, - expectMessage: "hash parameter not specified", - expectCode: params.ErrBadRequest, -}, { - about: "no content length", - path: "~charmers/precise/wordpress/archive?hash=1234563", - noContentLength: true, - expectStatus: http.StatusBadRequest, - expectMessage: "Content-Length not specified", - expectCode: params.ErrBadRequest, -}} - -func (s *ArchiveSuite) TestPostErrors(c *gc.C) { - type exoticReader struct { - io.Reader - } - for i, test := range archivePostErrorsTests { - c.Logf("test %d: %s", i, test.about) - var body io.Reader = strings.NewReader("bogus") - if test.noContentLength { - // net/http will automatically add a Content-Length header - // if it sees *strings.Reader, but not if it's a type it doesn't - // know about. - body = exoticReader{body} - } - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(test.path), - Method: "POST", - Header: http.Header{ - "Content-Type": {"application/zip"}, - }, - Body: body, - Username: testUsername, - Password: testPassword, - ExpectStatus: test.expectStatus, - ExpectBody: params.Error{ - Message: test.expectMessage, - Code: test.expectCode, - }, - }) - } -} - -func (s *ArchiveSuite) TestConcurrentUploads(c *gc.C) { - wordpress := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") - f, err := os.Open(wordpress.Path) - c.Assert(err, gc.IsNil) - - var buf bytes.Buffer - _, err = io.Copy(&buf, f) - c.Assert(err, gc.IsNil) - - hash, _ := hashOf(bytes.NewReader(buf.Bytes())) - - srv := httptest.NewServer(s.srv) - defer srv.Close() - - // Our strategy for testing concurrent uploads is as follows: We - // repeat uploading a bunch of simultaneous uploads to the same - // charm. Each upload should either succeed, or fail with an - // ErrDuplicateUpload error. We make sure that all replies are - // like this, and that at least one duplicate upload error is - // found, so that we know we've tested that error path. - - errorBodies := make(chan io.ReadCloser) - - // upload performs one upload of the testing charm. - // It sends the response body on the errorBodies channel when - // it finds an error response. - upload := func() { - c.Logf("uploading") - body := bytes.NewReader(buf.Bytes()) - url := srv.URL + storeURL("~charmers/precise/wordpress/archive?hash="+hash) - req, err := http.NewRequest("POST", url, body) - c.Assert(err, gc.IsNil) - req.Header.Set("Content-Type", "application/zip") - req.SetBasicAuth(testUsername, testPassword) - resp, err := http.DefaultClient.Do(req) - if !c.Check(err, gc.IsNil) { - return - } - if resp.StatusCode == http.StatusOK { - resp.Body.Close() - return - } - errorBodies <- resp.Body - } - - // The try loop continues concurrently uploading - // charms until it is told to stop (by closing the try - // channel). It then signals that it has terminated - // by closing errorBodies. - try := make(chan struct{}) - go func() { - for { - for _ = range try { - var wg sync.WaitGroup - for p := 0; p < 5; p++ { - wg.Add(1) - go func() { - upload() - wg.Done() - }() - } - wg.Wait() - } - close(errorBodies) - } - }() - - // We continue the loop until we have found an - // error (or the maximum iteration count has - // been exceeded). - foundError := false - count := 0 -loop: - for { - select { - case body, ok := <-errorBodies: - if !ok { - // The try loop has terminated, - // so we need to stop too. - break loop - } - dec := json.NewDecoder(body) - var errResp params.Error - err := dec.Decode(&errResp) - body.Close() - c.Assert(err, gc.IsNil) - c.Assert(errResp, jc.DeepEquals, params.Error{ - Message: "duplicate upload", - Code: params.ErrDuplicateUpload, - }) - // We've found the error we're looking for, - // so we signal to the try loop that it can stop. - // We will process any outstanding error bodies, - // before seeing errorBodies closed and exiting - // the loop. - foundError = true - if try != nil { - close(try) - try = nil - } - case try <- struct{}{}: - // In cases we've seen, the actual maximum value of - // count is 1, but let's allow for serious scheduler vagaries. - if count++; count > 200 { - c.Fatalf("200 tries with no duplicate error") - } - } - } - if !foundError { - c.Errorf("no duplicate-upload errors found") - } -} - -func (s *ArchiveSuite) TestPostCharm(c *gc.C) { - // A charm that did not exist before should get revision 0. - s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress") - - // Subsequent charm uploads should increment the - // revision by 1. - s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-1", -1), "mysql") -} - -func (s *ArchiveSuite) TestPutCharm(c *gc.C) { - s.assertUploadCharm( - c, - "PUT", - newResolvedURL("~charmers/precise/wordpress-3", 3), - "wordpress", - ) - - s.assertUploadCharm( - c, - "PUT", - newResolvedURL("~charmers/precise/wordpress-1", -1), - "wordpress", - ) - - // Check that we get a duplicate-upload error if we try to - // upload to the same revision again. - s.assertUploadCharmError( - c, - "PUT", - charm.MustParseReference("~charmers/precise/wordpress-3"), - nil, - "mysql", - http.StatusInternalServerError, - params.Error{ - Message: "duplicate upload", - Code: params.ErrDuplicateUpload, - }, - ) - - // Check we get an error if promulgated url already uploaded. - s.assertUploadCharmError( - c, - "PUT", - charm.MustParseReference("~charmers/precise/wordpress-4"), - charm.MustParseReference("precise/wordpress-3"), - "wordpress", - http.StatusInternalServerError, - params.Error{ - Message: "duplicate upload", - Code: params.ErrDuplicateUpload, - }, - ) - - // Check we get an error if promulgated url has user. - s.assertUploadCharmError( - c, - "PUT", - charm.MustParseReference("~charmers/precise/wordpress-4"), - charm.MustParseReference("~charmers/precise/wordpress-4"), - "mysql", - http.StatusBadRequest, - params.Error{ - Message: "promulgated URL cannot have a user", - Code: params.ErrBadRequest, - }, - ) - - // Check we get an error if promulgated url has different name. - s.assertUploadCharmError( - c, - "PUT", - charm.MustParseReference("~charmers/precise/wordpress-4"), - charm.MustParseReference("precise/mysql-4"), - "mysql", - http.StatusBadRequest, - params.Error{ - Message: "promulgated URL has incorrect charm name", - Code: params.ErrBadRequest, - }, - ) -} - -func (s *ArchiveSuite) TestPostBundle(c *gc.C) { - // Upload the required charms. - err := s.store.AddCharmWithArchive( - newResolvedURL("cs:~charmers/utopic/mysql-42", 42), - storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) - c.Assert(err, gc.IsNil) - err = s.store.AddCharmWithArchive( - newResolvedURL("cs:~charmers/utopic/wordpress-47", 47), - storetesting.Charms.CharmArchive(c.MkDir(), "wordpress")) - c.Assert(err, gc.IsNil) - err = s.store.AddCharmWithArchive( - newResolvedURL("cs:~charmers/utopic/logging-1", 1), - storetesting.Charms.CharmArchive(c.MkDir(), "logging")) - c.Assert(err, gc.IsNil) - - // A bundle that did not exist before should get revision 0. - s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-0", -1), "wordpress-simple") - - // Subsequent bundle uploads should increment the - // revision by 1. - s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-1", -1), "wordpress-with-logging") - - // Uploading the same archive twice should not increment the revision... - s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-1", -1), "wordpress-with-logging") - - // ... but uploading an archive used by a previous revision should. - s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-2", -1), "wordpress-simple") -} - -func (s *ArchiveSuite) TestPostHashMismatch(c *gc.C) { - content := []byte("some content") - hash, _ := hashOf(bytes.NewReader(content)) - - // Corrupt the content. - copy(content, "bogus") - path := fmt.Sprintf("~charmers/precise/wordpress/archive?hash=%s", hash) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(path), - Method: "POST", - Header: http.Header{ - "Content-Type": {"application/zip"}, - }, - Body: bytes.NewReader(content), - Username: testUsername, - Password: testPassword, - ExpectStatus: http.StatusInternalServerError, - ExpectBody: params.Error{ - Message: "cannot put archive blob: hash mismatch", - }, - }) -} - -func invalidZip() io.ReadSeeker { - return strings.NewReader("invalid zip content") -} - -func (s *ArchiveSuite) TestPostInvalidCharmZip(c *gc.C) { - s.assertCannotUpload(c, "~charmers/precise/wordpress", invalidZip(), "cannot read charm archive: zip: not a valid zip file") -} - -func (s *ArchiveSuite) TestPostInvalidBundleZip(c *gc.C) { - s.assertCannotUpload(c, "~charmers/bundle/wordpress", invalidZip(), "cannot read bundle archive: zip: not a valid zip file") -} - -var postInvalidCharmMetadataTests = []struct { - about string - spec charmtesting.CharmSpec - expectError string -}{{ - about: "bad provider relation name", - spec: charmtesting.CharmSpec{ - Meta: ` -name: foo -summary: bar -description: d -provides: - relation-name: - interface: baz -`, - }, - expectError: "relation relation-name has almost certainly not been changed from the template", -}, { - about: "bad provider interface name", - spec: charmtesting.CharmSpec{ - Meta: ` -name: foo -summary: bar -description: d -provides: - baz: - interface: interface-name -`, - }, - expectError: "interface interface-name in relation baz has almost certainly not been changed from the template", -}, { - about: "bad requirer relation name", - spec: charmtesting.CharmSpec{ - Meta: ` -name: foo -summary: bar -description: d -requires: - relation-name: - interface: baz -`, - }, - expectError: "relation relation-name has almost certainly not been changed from the template", -}, { - about: "bad requirer interface name", - spec: charmtesting.CharmSpec{ - Meta: ` -name: foo -summary: bar -description: d -requires: - baz: - interface: interface-name -`, - }, - expectError: "interface interface-name in relation baz has almost certainly not been changed from the template", -}, { - about: "bad peer relation name", - spec: charmtesting.CharmSpec{ - Meta: ` -name: foo -summary: bar -description: d -peers: - relation-name: - interface: baz -`, - }, - expectError: "relation relation-name has almost certainly not been changed from the template", -}, { - about: "bad peer interface name", - spec: charmtesting.CharmSpec{ - Meta: ` -name: foo -summary: bar -description: d -peers: - baz: - interface: interface-name -`, - }, - expectError: "interface interface-name in relation baz has almost certainly not been changed from the template", -}} - -func (s *ArchiveSuite) TestPostInvalidCharmMetadata(c *gc.C) { - for i, test := range postInvalidCharmMetadataTests { - c.Logf("test %d: %s", i, test.about) - ch := charmtesting.NewCharm(c, test.spec) - r := bytes.NewReader(ch.ArchiveBytes()) - s.assertCannotUpload(c, "~charmers/trusty/wordpress", r, test.expectError) - } -} - -func (s *ArchiveSuite) TestPostInvalidBundleData(c *gc.C) { - path := storetesting.Charms.BundleArchivePath(c.MkDir(), "bad") - f, err := os.Open(path) - c.Assert(err, gc.IsNil) - defer f.Close() - // Here we exercise both bundle internal verification (bad relation) and - // validation with respect to charms (wordpress and mysql are missing). - expectErr := `bundle verification failed: [` + - `"relation [\"foo:db\" \"mysql:server\"] refers to service \"foo\" not defined in this bundle",` + - `"service \"mysql\" refers to non-existent charm \"mysql\"",` + - `"service \"wordpress\" refers to non-existent charm \"wordpress\""]` - s.assertCannotUpload(c, "~charmers/bundle/wordpress", f, expectErr) -} - -func (s *ArchiveSuite) TestPostCounters(c *gc.C) { - if !storetesting.MongoJSEnabled() { - c.Skip("MongoDB JavaScript not available") - } - - s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress") - - // Check that the upload count for the entity has been updated. - key := []string{params.StatsArchiveUpload, "precise", "wordpress", "charmers"} - stats.CheckCounterSum(c, s.store, key, false, 1) -} - -func (s *ArchiveSuite) TestPostFailureCounters(c *gc.C) { - if !storetesting.MongoJSEnabled() { - c.Skip("MongoDB JavaScript not available") - } - - hash, _ := hashOf(invalidZip()) - doPost := func(url string, expectCode int) { - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL(url), - Method: "POST", - Header: http.Header{ - "Content-Type": {"application/zip"}, - }, - Body: invalidZip(), - Username: testUsername, - Password: testPassword, - }) - c.Assert(rec.Code, gc.Equals, expectCode, gc.Commentf("body: %s", rec.Body.Bytes())) - } - - // Send a first invalid request (revision specified). - doPost("~charmers/utopic/wordpress-42/archive", http.StatusBadRequest) - // Send a second invalid request (no hash). - doPost("~charmers/utopic/wordpress/archive", http.StatusBadRequest) - // Send a third invalid request (invalid zip). - doPost("~charmers/utopic/wordpress/archive?hash="+hash, http.StatusInternalServerError) - - // Check that the failed upload count for the entity has been updated. - key := []string{params.StatsArchiveFailedUpload, "utopic", "wordpress", "charmers"} - stats.CheckCounterSum(c, s.store, key, false, 3) -} - -func (s *ArchiveSuite) assertCannotUpload(c *gc.C, id string, content io.ReadSeeker, errorMessage string) { - hash, size := hashOf(content) - _, err := content.Seek(0, 0) - c.Assert(err, gc.IsNil) - - path := fmt.Sprintf("%s/archive?hash=%s", id, hash) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(path), - Method: "POST", - ContentLength: size, - Header: http.Header{ - "Content-Type": {"application/zip"}, - }, - Body: content, - Username: testUsername, - Password: testPassword, - ExpectStatus: http.StatusInternalServerError, - ExpectBody: params.Error{ - Message: errorMessage, - }, - }) - - // TODO(rog) check that the uploaded blob has been deleted, - // by checking that no new blobs have been added to the blob store. -} - -// assertUploadCharm uploads the testing charm with the given name -// through the API. The URL must hold the expected revision -// that the charm will be given when uploaded. -func (s *ArchiveSuite) assertUploadCharm(c *gc.C, method string, url *router.ResolvedURL, charmName string) *charm.CharmArchive { - ch := storetesting.Charms.CharmArchive(c.MkDir(), charmName) - size := s.assertUpload(c, method, url, ch.Path) - s.assertEntityInfo(c, url, entityInfo{ - Id: &url.URL, - Meta: entityMetaInfo{ - ArchiveSize: ¶ms.ArchiveSizeResponse{Size: size}, - CharmMeta: ch.Meta(), - CharmConfig: ch.Config(), - CharmActions: ch.Actions(), - }, - }) - return ch -} - -// assertUploadBundle uploads the testing bundle with the given name -// through the API. The URL must hold the expected revision -// that the bundle will be given when uploaded. -func (s *ArchiveSuite) assertUploadBundle(c *gc.C, method string, url *router.ResolvedURL, bundleName string) { - path := storetesting.Charms.BundleArchivePath(c.MkDir(), bundleName) - b, err := charm.ReadBundleArchive(path) - c.Assert(err, gc.IsNil) - size := s.assertUpload(c, method, url, path) - s.assertEntityInfo(c, url, entityInfo{ - Id: &url.URL, - Meta: entityMetaInfo{ - ArchiveSize: ¶ms.ArchiveSizeResponse{Size: size}, - BundleMeta: b.Data(), - }, - }, - ) -} - -func (s *ArchiveSuite) assertUpload(c *gc.C, method string, url *router.ResolvedURL, fileName string) (size int64) { - f, err := os.Open(fileName) - c.Assert(err, gc.IsNil) - defer f.Close() - - // Calculate blob hashes. - hash := blobstore.NewHash() - hash256 := sha256.New() - size, err = io.Copy(io.MultiWriter(hash, hash256), f) - c.Assert(err, gc.IsNil) - hashSum := fmt.Sprintf("%x", hash.Sum(nil)) - hash256Sum := fmt.Sprintf("%x", hash256.Sum(nil)) - _, err = f.Seek(0, 0) - c.Assert(err, gc.IsNil) - - uploadURL := url.URL - if method == "POST" { - uploadURL.Revision = -1 - } - - path := fmt.Sprintf("%s/archive?hash=%s", uploadURL.Path(), hashSum) - purl := url.PromulgatedURL() - if purl != nil { - path += fmt.Sprintf("&promulgated=%s", purl.String()) - } - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(path), - Method: method, - ContentLength: size, - Header: http.Header{ - "Content-Type": {"application/zip"}, - }, - Body: f, - Username: testUsername, - Password: testPassword, - ExpectBody: params.ArchiveUploadResponse{ - Id: &url.URL, - PromulgatedId: url.PromulgatedURL(), - }, - }) - - var entity mongodoc.Entity - err = s.store.DB.Entities().FindId(&url.URL).One(&entity) - c.Assert(err, gc.IsNil) - c.Assert(entity.BlobHash, gc.Equals, hashSum) - c.Assert(entity.BlobHash256, gc.Equals, hash256Sum) - c.Assert(entity.PromulgatedURL, gc.DeepEquals, purl) - // Test that the expected entry has been created - // in the blob store. - r, _, err := s.store.BlobStore.Open(entity.BlobName) - c.Assert(err, gc.IsNil) - r.Close() - - return size -} - -// assertUploadCharmError attempts to upload the testing charm with the -// given name through the API, checking that the attempt fails with the -// specified error. The URL must hold the expected revision that the -// charm will be given when uploaded. -func (s *ArchiveSuite) assertUploadCharmError(c *gc.C, method string, url, purl *charm.Reference, charmName string, expectStatus int, expectBody interface{}) { - ch := storetesting.Charms.CharmArchive(c.MkDir(), charmName) - s.assertUploadError(c, method, url, purl, ch.Path, expectStatus, expectBody) -} - -// assertUploadError asserts that we get an error when uploading -// the contents of the given file to the given url and promulgated URL. -// The reason this method does not take a *router.ResolvedURL -// is so that we can test what happens when an inconsistent promulgated URL -// is passed in. -func (s *ArchiveSuite) assertUploadError(c *gc.C, method string, url, purl *charm.Reference, fileName string, expectStatus int, expectBody interface{}) { - f, err := os.Open(fileName) - c.Assert(err, gc.IsNil) - defer f.Close() - - // Calculate blob hashes. - hash := blobstore.NewHash() - size, err := io.Copy(hash, f) - c.Assert(err, gc.IsNil) - hashSum := fmt.Sprintf("%x", hash.Sum(nil)) - _, err = f.Seek(0, 0) - c.Assert(err, gc.IsNil) - - uploadURL := *url - if method == "POST" { - uploadURL.Revision = -1 - } - - path := fmt.Sprintf("%s/archive?hash=%s", uploadURL.Path(), hashSum) - if purl != nil { - path += fmt.Sprintf("&promulgated=%s", purl.String()) - } - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(path), - Method: method, - ContentLength: size, - Header: http.Header{ - "Content-Type": {"application/zip"}, - }, - Body: f, - Username: testUsername, - Password: testPassword, - ExpectStatus: expectStatus, - ExpectBody: expectBody, - }) -} - -var archiveFileErrorsTests = []struct { - about string - path string - expectStatus int - expectMessage string - expectCode params.ErrorCode -}{{ - about: "entity not found", - path: "~charmers/trusty/no-such-42/archive/icon.svg", - expectStatus: http.StatusNotFound, - expectMessage: `entity "cs:~charmers/trusty/no-such-42" not found`, - expectCode: params.ErrNotFound, -}, { - about: "directory listing", - path: "~charmers/utopic/wordpress-0/archive/hooks", - expectStatus: http.StatusForbidden, - expectMessage: "directory listing not allowed", - expectCode: params.ErrForbidden, -}, { - about: "file not found", - path: "~charmers/utopic/wordpress-0/archive/no-such", - expectStatus: http.StatusNotFound, - expectMessage: `file "no-such" not found in the archive`, - expectCode: params.ErrNotFound, -}} - -func (s *ArchiveSuite) TestArchiveFileErrors(c *gc.C) { - wordpress := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") - url := newResolvedURL("cs:~charmers/utopic/wordpress-0", 0) - err := s.store.AddCharmWithArchive(url, wordpress) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) - c.Assert(err, gc.IsNil) - for i, test := range archiveFileErrorsTests { - c.Logf("test %d: %s", i, test.about) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(test.path), - Method: "GET", - ExpectStatus: test.expectStatus, - ExpectBody: params.Error{ - Message: test.expectMessage, - Code: test.expectCode, - }, - }) - } -} - -func (s *ArchiveSuite) TestArchiveFileGet(c *gc.C) { - ch := storetesting.Charms.CharmArchive(c.MkDir(), "all-hooks") - id := newResolvedURL("cs:~charmers/utopic/all-hooks-0", 0) - err := s.store.AddCharmWithArchive(id, ch) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) - c.Assert(err, gc.IsNil) - zipFile, err := zip.OpenReader(ch.Path) - c.Assert(err, gc.IsNil) - defer zipFile.Close() - - patchArchiveCacheAges(s) - - // Check a file in the root directory. - s.assertArchiveFileContents(c, zipFile, "~charmers/utopic/all-hooks-0/archive/metadata.yaml") - // Check a file in a subdirectory. - s.assertArchiveFileContents(c, zipFile, "~charmers/utopic/all-hooks-0/archive/hooks/install") -} - -// assertArchiveFileContents checks that the response returned by the -// serveArchiveFile endpoint is correct for the given archive and URL path. -func (s *ArchiveSuite) assertArchiveFileContents(c *gc.C, zipFile *zip.ReadCloser, path string) { - // For example: trusty/django/archive/hooks/install -> hooks/install. - filePath := strings.SplitN(path, "/archive/", 2)[1] - - // Retrieve the expected bytes. - var expectBytes []byte - for _, file := range zipFile.File { - if file.Name == filePath { - r, err := file.Open() - c.Assert(err, gc.IsNil) - defer r.Close() - expectBytes, err = ioutil.ReadAll(r) - c.Assert(err, gc.IsNil) - break - } - } - c.Assert(expectBytes, gc.Not(gc.HasLen), 0) - - // Make the request. - url := storeURL(path) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: url, - }) - - // Ensure the response is what we expect. - c.Assert(rec.Code, gc.Equals, http.StatusOK) - c.Assert(rec.Body.Bytes(), gc.DeepEquals, expectBytes) - headers := rec.Header() - c.Assert(headers.Get("Content-Length"), gc.Equals, strconv.Itoa(len(expectBytes))) - // We only have text files in the charm repository used for tests. - c.Assert(headers.Get("Content-Type"), gc.Equals, "text/plain; charset=utf-8") - assertCacheControl(c, rec.Header(), true) -} - -func (s *ArchiveSuite) TestBundleCharms(c *gc.C) { - // Populate the store with some testing charms. - mysql := storetesting.Charms.CharmArchive(c.MkDir(), "mysql") - err := s.store.AddCharmWithArchive( - newResolvedURL("cs:~charmers/saucy/mysql-0", 0), - mysql, - ) - c.Assert(err, gc.IsNil) - riak := storetesting.Charms.CharmArchive(c.MkDir(), "riak") - err = s.store.AddCharmWithArchive( - newResolvedURL("cs:~charmers/trusty/riak-42", 42), - riak, - ) - c.Assert(err, gc.IsNil) - wordpress := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") - err = s.store.AddCharmWithArchive( - newResolvedURL("cs:~charmers/utopic/wordpress-47", 47), - wordpress, - ) - c.Assert(err, gc.IsNil) - - // Retrieve the base handler so that we can invoke the - // bundleCharms method on it. - handler := v4.New(s.store.Pool(), s.srvParams) - - tests := []struct { - about string - ids []string - charms map[string]charm.Charm - }{{ - about: "no ids", - }, { - about: "fully qualified ids", - ids: []string{ - "cs:~charmers/saucy/mysql-0", - "cs:~charmers/trusty/riak-42", - "cs:~charmers/utopic/wordpress-47", - }, - charms: map[string]charm.Charm{ - "cs:~charmers/saucy/mysql-0": mysql, - "cs:~charmers/trusty/riak-42": riak, - "cs:~charmers/utopic/wordpress-47": wordpress, - }, - }, { - about: "partial ids", - ids: []string{"~charmers/utopic/wordpress", "~charmers/mysql-0", "~charmers/riak"}, - charms: map[string]charm.Charm{ - "~charmers/mysql-0": mysql, - "~charmers/riak": riak, - "~charmers/utopic/wordpress": wordpress, - }, - }, { - about: "charm not found", - ids: []string{"utopic/no-such", "~charmers/mysql"}, - charms: map[string]charm.Charm{ - "~charmers/mysql": mysql, - }, - }, { - about: "no charms found", - ids: []string{ - "cs:~charmers/saucy/mysql-99", // Revision not present. - "cs:~charmers/precise/riak-42", // Series not present. - "cs:~charmers/utopic/django-47", // Name not present. - }, - }, { - about: "repeated charms", - ids: []string{ - "cs:~charmers/saucy/mysql", - "cs:~charmers/trusty/riak-42", - "~charmers/mysql", - }, - charms: map[string]charm.Charm{ - "cs:~charmers/saucy/mysql": mysql, - "cs:~charmers/trusty/riak-42": riak, - "~charmers/mysql": mysql, - }, - }} - - // Run the tests. - for i, test := range tests { - c.Logf("test %d: %s", i, test.about) - charms, err := v4.BundleCharms(handler, test.ids) - c.Assert(err, gc.IsNil) - // Ensure the charms returned are what we expect. - c.Assert(charms, gc.HasLen, len(test.charms)) - for i, ch := range charms { - expectCharm := test.charms[i] - c.Assert(ch.Meta(), jc.DeepEquals, expectCharm.Meta()) - c.Assert(ch.Config(), jc.DeepEquals, expectCharm.Config()) - c.Assert(ch.Actions(), jc.DeepEquals, expectCharm.Actions()) - // Since the charm archive and the charm entity have a slightly - // different concept of what a revision is, and since the revision - // is not used for bundle validation, we can safely avoid checking - // the charm revision. - } - } -} - -func (s *ArchiveSuite) TestDelete(c *gc.C) { - // Add a charm to the database (including the archive). - id := "~charmers/utopic/mysql-42" - url := newResolvedURL(id, -1) - err := s.store.AddCharmWithArchive(url, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) - c.Assert(err, gc.IsNil) - - // Retrieve the corresponding entity. - var entity mongodoc.Entity - err = s.store.DB.Entities().FindId(&url.URL).Select(bson.D{{"blobname", 1}}).One(&entity) - c.Assert(err, gc.IsNil) - - // Delete the charm using the API. - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(id + "/archive"), - Method: "DELETE", - Username: testUsername, - Password: testPassword, - ExpectStatus: http.StatusOK, - }) - - // The entity has been deleted. - count, err := s.store.DB.Entities().FindId(url).Count() - c.Assert(err, gc.IsNil) - c.Assert(count, gc.Equals, 0) - - // The blob has been deleted. - _, _, err = s.store.BlobStore.Open(entity.BlobName) - c.Assert(err, gc.ErrorMatches, "resource.*not found") -} - -func (s *ArchiveSuite) TestDeleteSpecificCharm(c *gc.C) { - // Add a couple of charms to the database. - for _, id := range []string{"~charmers/trusty/mysql-42", "~charmers/utopic/mysql-42", "~charmers/utopic/mysql-47"} { - err := s.store.AddCharmWithArchive( - newResolvedURL(id, -1), - storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) - c.Assert(err, gc.IsNil) - } - - // Delete the second charm using the API. - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("~charmers/utopic/mysql-42/archive"), - Method: "DELETE", - Username: testUsername, - Password: testPassword, - ExpectStatus: http.StatusOK, - }) - - // The other two charms are still present in the database. - urls := []*charm.Reference{ - charm.MustParseReference("~charmers/trusty/mysql-42"), - charm.MustParseReference("~charmers/utopic/mysql-47"), - } - count, err := s.store.DB.Entities().Find(bson.D{{ - "_id", bson.D{{"$in", urls}}, - }}).Count() - c.Assert(err, gc.IsNil) - c.Assert(count, gc.Equals, 2) -} - -func (s *ArchiveSuite) TestDeleteNotFound(c *gc.C) { - // Try to delete a non existing charm using the API. - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("~charmers/utopic/no-such-0/archive"), - Method: "DELETE", - Username: testUsername, - Password: testPassword, - ExpectStatus: http.StatusNotFound, - ExpectBody: params.Error{ - Message: `entity "cs:~charmers/utopic/no-such-0" not found`, - Code: params.ErrNotFound, - }, - }) -} - -func (s *ArchiveSuite) TestDeleteError(c *gc.C) { - // Add a charm to the database (not including the archive). - id := "~charmers/utopic/mysql-42" - url := newResolvedURL(id, -1) - err := s.store.AddCharm(storetesting.Charms.CharmArchive(c.MkDir(), "mysql"), - charmstore.AddParams{ - URL: url, - BlobName: "no-such-name", - BlobHash: fakeBlobHash, - BlobSize: fakeBlobSize, - }) - c.Assert(err, gc.IsNil) - - // Try to delete the charm using the API. - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(id + "/archive"), - Method: "DELETE", - Username: testUsername, - Password: testPassword, - ExpectStatus: http.StatusInternalServerError, - ExpectBody: params.Error{ - Message: `cannot remove blob no-such-name: resource at path "global/no-such-name" not found`, - }, - }) -} - -func (s *ArchiveSuite) TestDeleteCounters(c *gc.C) { - if !storetesting.MongoJSEnabled() { - c.Skip("MongoDB JavaScript not available") - } - - // Add a charm to the database (including the archive). - id := "~charmers/utopic/mysql-42" - err := s.store.AddCharmWithArchive( - newResolvedURL(id, -1), - storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) - c.Assert(err, gc.IsNil) - - // Delete the charm using the API. - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - Method: "DELETE", - URL: storeURL(id + "/archive"), - Username: testUsername, - Password: testPassword, - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - - // Check that the delete count for the entity has been updated. - key := []string{params.StatsArchiveDelete, "utopic", "mysql", "charmers", "42"} - stats.CheckCounterSum(c, s.store, key, false, 1) -} - -func (s *ArchiveSuite) TestPostAuthErrors(c *gc.C) { - checkAuthErrors(c, s.srv, "POST", "~charmers/utopic/django/archive") -} - -func (s *ArchiveSuite) TestDeleteAuthErrors(c *gc.C) { - err := s.store.AddCharmWithArchive( - newResolvedURL("~charmers/utopic/django-42", 42), - storetesting.Charms.CharmArchive(c.MkDir(), "wordpress"), - ) - c.Assert(err, gc.IsNil) - checkAuthErrors(c, s.srv, "DELETE", "utopic/django-42/archive") -} - -var archiveAuthErrorsTests = []struct { - about string - header http.Header - username string - password string - expectMessage string -}{{ - about: "no credentials", - expectMessage: "authentication failed: missing HTTP auth header", -}, { - about: "invalid encoding", - header: http.Header{ - "Authorization": {"Basic not-a-valid-base64"}, - }, - expectMessage: "authentication failed: invalid HTTP auth encoding", -}, { - about: "invalid header", - header: http.Header{ - "Authorization": {"Basic " + base64.StdEncoding.EncodeToString([]byte("invalid"))}, - }, - expectMessage: "authentication failed: invalid HTTP auth contents", -}, { - about: "invalid credentials", - username: "no-such", - password: "exterminate!", - expectMessage: "invalid user name or password", -}} - -func checkAuthErrors(c *gc.C, handler http.Handler, method, url string) { - archiveURL := storeURL(url) - for i, test := range archiveAuthErrorsTests { - c.Logf("test %d: %s", i, test.about) - if test.header == nil { - test.header = http.Header{} - } - if method == "POST" { - test.header.Add("Content-Type", "application/zip") - } - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: handler, - URL: archiveURL, - Method: method, - Header: test.header, - Username: test.username, - Password: test.password, - ExpectStatus: http.StatusUnauthorized, - ExpectBody: params.Error{ - Message: test.expectMessage, - Code: params.ErrUnauthorized, - }, - }) - } -} - -// entityInfo holds all the information we want to find -// out about a charm or bundle uploaded to the store. -type entityInfo struct { - Id *charm.Reference - Meta entityMetaInfo -} - -type entityMetaInfo struct { - ArchiveSize *params.ArchiveSizeResponse `json:"archive-size,omitempty"` - CharmMeta *charm.Meta `json:"charm-metadata,omitempty"` - CharmConfig *charm.Config `json:"charm-config,omitempty"` - CharmActions *charm.Actions `json:"charm-actions,omitempty"` - BundleMeta *charm.BundleData `json:"bundle-metadata,omitempty"` -} - -func (s *ArchiveSuite) assertEntityInfo(c *gc.C, url *router.ResolvedURL, expect entityInfo) { - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL( - url.URL.Path() + "/meta/any" + - "?include=archive-size" + - "&include=charm-metadata" + - "&include=charm-config" + - "&include=charm-actions" + - "&include=bundle-metadata", - ), - Username: testUsername, - Password: testPassword, - ExpectBody: expect, - }) -} - -func (s *ArchiveSuite) TestArchiveFileGetHasCORSHeaders(c *gc.C) { - id := "~charmers/precise/wordpress-0" - s.assertUploadCharm(c, "POST", newResolvedURL(id, -1), "wordpress") - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL(fmt.Sprintf("%s/archive/metadata.yaml", id)), - }) - headers := rec.Header() - c.Assert(len(headers["Access-Control-Allow-Origin"]), gc.Equals, 1) - c.Assert(len(headers["Access-Control-Allow-Headers"]), gc.Equals, 1) - c.Assert(headers["Access-Control-Allow-Origin"][0], gc.Equals, "*") - c.Assert(headers["Access-Control-Allow-Headers"][0], gc.Equals, "X-Requested-With") -} - -var getNewPromulgatedRevisionTests = []struct { - about string - id *charm.Reference - expectRev int -}{{ - about: "no base entity", - id: charm.MustParseReference("cs:~mmouse/trusty/mysql-14"), - expectRev: -1, -}, { - about: "not promulgated", - id: charm.MustParseReference("cs:~dduck/trusty/mysql-14"), - expectRev: -1, -}, { - about: "not yet promulgated", - id: charm.MustParseReference("cs:~goofy/trusty/mysql-14"), - expectRev: 0, -}, { - about: "existing promulgated", - id: charm.MustParseReference("cs:~pluto/trusty/mariadb-14"), - expectRev: 4, -}, { - about: "previous promulgated by different user", - id: charm.MustParseReference("cs:~tom/trusty/sed-1"), - expectRev: 5, -}, { - about: "many previous promulgated revisions", - id: charm.MustParseReference("cs:~tom/trusty/awk-5"), - expectRev: 5, -}} - -func (s *ArchiveSuite) TestGetNewPromulgatedRevision(c *gc.C) { - err := s.store.DB.BaseEntities().Insert(&mongodoc.BaseEntity{ - URL: charm.MustParseReference("cs:~dduck/mysql"), - User: "dduck", - Name: "mysql", - Public: true, - ACLs: mongodoc.ACL{ - Read: []string{"everyone", "dduck"}, - }, - Promulgated: false, - }) - c.Assert(err, gc.IsNil) - err = s.store.DB.BaseEntities().Insert(&mongodoc.BaseEntity{ - URL: charm.MustParseReference("cs:~goofy/mysql"), - User: "goofy", - Name: "mysql", - Public: true, - ACLs: mongodoc.ACL{ - Read: []string{"everyone", "goofy"}, - }, - Promulgated: true, - }) - c.Assert(err, gc.IsNil) - err = s.store.DB.BaseEntities().Insert(&mongodoc.BaseEntity{ - URL: charm.MustParseReference("cs:~pluto/mariadb"), - User: "pluto", - Name: "mariadb", - Public: true, - ACLs: mongodoc.ACL{ - Read: []string{"everyone", "pluto"}, - }, - Promulgated: true, - }) - c.Assert(err, gc.IsNil) - err = s.store.DB.BaseEntities().Insert(&mongodoc.BaseEntity{ - URL: charm.MustParseReference("cs:~tom/sed"), - User: "tom", - Name: "sed", - Public: true, - ACLs: mongodoc.ACL{ - Read: []string{"everyone", "tom"}, - }, - Promulgated: true, - }) - c.Assert(err, gc.IsNil) - err = s.store.DB.BaseEntities().Insert(&mongodoc.BaseEntity{ - URL: charm.MustParseReference("cs:~jerry/sed"), - User: "jerry", - Name: "sed", - Public: true, - ACLs: mongodoc.ACL{ - Read: []string{"everyone", "jerry"}, - }, - Promulgated: false, - }) - c.Assert(err, gc.IsNil) - err = s.store.DB.BaseEntities().Insert(&mongodoc.BaseEntity{ - URL: charm.MustParseReference("cs:~tom/awk"), - User: "tom", - Name: "awk", - Public: true, - ACLs: mongodoc.ACL{ - Read: []string{"everyone", "tom"}, - }, - Promulgated: true, - }) - c.Assert(err, gc.IsNil) - err = s.store.DB.Entities().Insert(&mongodoc.Entity{ - URL: charm.MustParseReference("cs:~pluto/trusty/mariadb-5"), - User: "pluto", - Name: "mariadb", - Series: "trusty", - Revision: 5, - PromulgatedURL: charm.MustParseReference("cs:trusty/mariadb-3"), - PromulgatedRevision: 3, - }) - c.Assert(err, gc.IsNil) - err = s.store.DB.Entities().Insert(&mongodoc.Entity{ - URL: charm.MustParseReference("cs:~tom/trusty/sed-0"), - User: "tom", - Name: "sed", - Series: "trusty", - Revision: 0, - PromulgatedURL: charm.MustParseReference("cs:trusty/sed-0"), - PromulgatedRevision: 0, - }) - c.Assert(err, gc.IsNil) - err = s.store.DB.Entities().Insert(&mongodoc.Entity{ - URL: charm.MustParseReference("cs:~jerry/trusty/sed-3"), - User: "jerry", - Name: "sed", - Series: "trusty", - Revision: 3, - PromulgatedURL: charm.MustParseReference("cs:trusty/sed-4"), - PromulgatedRevision: 4, - }) - c.Assert(err, gc.IsNil) - id := charm.MustParseReference("cs:~tom/trusty/awk") - pid := charm.MustParseReference("cs:trusty/awk") - for i := 0; i < 5; i++ { - id.Revision = i - pid.Revision = i - err = s.store.DB.Entities().Insert(&mongodoc.Entity{ - URL: id, - User: "tom", - Name: "awk", - Series: "trusty", - Revision: i, - PromulgatedURL: pid, - PromulgatedRevision: i, - }) - c.Assert(err, gc.IsNil) - } - handler := v4.New(s.store.Pool(), s.srvParams) - for i, test := range getNewPromulgatedRevisionTests { - c.Logf("%d. %s", i, test.about) - rev, err := v4.GetNewPromulgatedRevision(handler, test.id) - c.Assert(err, gc.IsNil) - c.Assert(rev, gc.Equals, test.expectRev) - } -} - -func hashOfBytes(data []byte) string { - hash := blobstore.NewHash() - hash.Write(data) - return fmt.Sprintf("%x", hash.Sum(nil)) -} - -func hashOf(r io.Reader) (hashSum string, size int64) { - hash := blobstore.NewHash() - n, err := io.Copy(hash, r) - if err != nil { - panic(err) - } - return fmt.Sprintf("%x", hash.Sum(nil)), n -} - -func patchArchiveCacheAges(s interface { - PatchValue(interface{}, interface{}) -}) { - s.PatchValue(v4.ArchiveCacheVersionedMaxAge, 20*time.Second) - s.PatchValue(v4.ArchiveCacheNonVersionedMaxAge, 5*time.Second) -} - -// assertCacheControl asserts that the cache control headers are -// appropriately set. The isFullySpecified parameter specifies -// whether the id in the request was fully specified. -// It assumes that patchArchiveCacheAges has been called -// for the current test. -func assertCacheControl(c *gc.C, h http.Header, idFullySpecified bool) { - seconds := 5 - if idFullySpecified { - seconds = 20 - } - c.Assert(h.Get("Cache-Control"), gc.Equals, fmt.Sprintf("public, max-age=%d", seconds)) -} - -type ArchiveSearchSuite struct { - commonSuite -} - -var _ = gc.Suite(&ArchiveSearchSuite{}) - -func (s *ArchiveSearchSuite) SetUpSuite(c *gc.C) { - s.enableES = true - s.commonSuite.SetUpSuite(c) -} - -func (s *ArchiveSearchSuite) SetUpTest(c *gc.C) { - s.commonSuite.SetUpTest(c) - // TODO (frankban): remove this call when removing the legacy counts logic. - patchLegacyDownloadCountsEnabled(s.AddCleanup, false) -} - -func (s *ArchiveSearchSuite) TestGetSearchUpdate(c *gc.C) { - if !storetesting.MongoJSEnabled() { - c.Skip("MongoDB JavaScript not available") - } - - for i, id := range []string{"~charmers/utopic/mysql-42", "~who/utopic/mysql-42"} { - c.Logf("test %d: %s", i, id) - url := newResolvedURL(id, -1) - - // Add a charm to the database (including the archive). - err := s.store.AddCharmWithArchive(url, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) - c.Assert(err, gc.IsNil) - - // Download the charm archive using the API. - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL(id + "/archive"), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - - // Check that the search record for the entity has been updated. - stats.CheckSearchTotalDownloads(c, s.store, &url.URL, 1) - } -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/auth.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/auth.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/auth.go 1970-01-01 00:00:00 +0000 @@ -1,256 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4 - -import ( - "encoding/base64" - "encoding/json" - "io/ioutil" - "net/http" - "strings" - - "github.com/juju/utils" - "gopkg.in/errgo.v1" - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" - "gopkg.in/macaroon-bakery.v0/httpbakery" - "gopkg.in/macaroon.v1" - - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/params" -) - -const ( - basicRealm = "CharmStore4" - promulgatorsGroup = "promulgators" -) - -// authorize checks that the current user is authorized based on the provided -// ACL and optional entity. If an authenticated user is required, authorize tries to retrieve the -// current user in the following ways: -// - by checking that the request's headers HTTP basic auth credentials match -// the superuser credentials stored in the API handler; -// - by checking that there is a valid macaroon in the request's cookies. -// A params.ErrUnauthorized error is returned if superuser credentials fail; -// otherwise a macaroon is minted and a httpbakery discharge-required -// error is returned holding the macaroon. -func (h *Handler) authorize(req *http.Request, acl []string, alwaysAuth bool, entityId *router.ResolvedURL) (authorization, error) { - logger.Infof( - "authorize, bakery %p, auth location %q, acl %q, path: %q, method: %q", - h.pool.Bakery, - h.config.IdentityLocation, - acl, - req.URL.Path, - req.Method) - - if !alwaysAuth { - // No need to authenticate if the ACL is open to everyone. - for _, name := range acl { - if name == params.Everyone { - return authorization{}, nil - } - } - } - - auth, verr := h.checkRequest(req, entityId) - if verr == nil { - if err := h.checkACLMembership(auth, acl); err != nil { - return authorization{}, errgo.WithCausef(err, params.ErrUnauthorized, "") - } - return auth, nil - } - if _, ok := errgo.Cause(verr).(*bakery.VerificationError); !ok { - return authorization{}, errgo.Mask(verr, errgo.Is(params.ErrUnauthorized)) - } - - // Macaroon verification failed: mint a new macaroon. - m, err := h.newMacaroon() - if err != nil { - return authorization{}, errgo.Notef(err, "cannot mint macaroon") - } - // Request that this macaroon be supplied for all requests - // to the whole handler. - // TODO use a relative URL here: router.RelativeURLPath(req.RequestURI, "/") - cookiePath := "/" - return authorization{}, httpbakery.NewDischargeRequiredError(m, cookiePath, verr) -} - -// checkRequest checks for any authorization tokens in the request and returns any -// found as an authorization. If no suitable credentials are found, or an error occurs, -// then a zero valued authorization is returned. -// It also checks any first party caveats. If the entityId is provided, it will -// be used to check any "is-entity" first party caveat. -func (h *Handler) checkRequest(req *http.Request, entityId *router.ResolvedURL) (authorization, error) { - user, passwd, err := parseCredentials(req) - if err == nil { - if user != h.config.AuthUsername || passwd != h.config.AuthPassword { - return authorization{}, errgo.WithCausef(nil, params.ErrUnauthorized, "invalid user name or password") - } - return authorization{Admin: true}, nil - } - if errgo.Cause(err) != errNoCreds || h.pool.Bakery == nil || h.config.IdentityLocation == "" { - return authorization{}, errgo.WithCausef(err, params.ErrUnauthorized, "authentication failed") - } - attrMap, err := httpbakery.CheckRequest(h.pool.Bakery, req, nil, checkers.New( - checkers.CheckerFunc{ - Condition_: "is-entity", - Check_: func(_, arg string) error { - if entityId == nil { - return errgo.Newf("API operation does not involve expected entity %v", arg) - } - purl := entityId.PromulgatedURL() - if entityId.URL.String() == arg || purl != nil && purl.String() == arg { - // We allow either the non-promulgated or the promulgated - // URL form. - return nil - } - return errgo.Newf("API operation on entity %v, want %v", entityId, arg) - }, - }, - )) - if err != nil { - return authorization{}, errgo.Mask(err, errgo.Any) - } - return authorization{ - Admin: false, - Username: attrMap[usernameAttr], - }, nil -} - -// AuthorizeEntity checks that the given HTTP request -// can access the entity with the given id. -func (h *Handler) AuthorizeEntity(id *router.ResolvedURL, req *http.Request) error { - store := h.pool.Store() - defer store.Close() - baseEntity, err := store.FindBaseEntity(&id.URL, "acls") - if err != nil { - if errgo.Cause(err) == params.ErrNotFound { - return errgo.WithCausef(nil, params.ErrNotFound, "entity %q not found", id) - } - return errgo.Notef(err, "cannot retrieve entity %q for authorization", id) - } - return h.authorizeWithPerms(req, baseEntity.ACLs.Read, baseEntity.ACLs.Write, id) -} - -func (h *Handler) authorizeWithPerms(req *http.Request, read, write []string, entityId *router.ResolvedURL) error { - var acl []string - switch req.Method { - case "DELETE", "PATCH", "POST", "PUT": - acl = write - default: - acl = read - } - _, err := h.authorize(req, acl, false, entityId) - return err -} - -const usernameAttr = "username" - -// authorization conatains authorization information extracted from an HTTP request. -// The zero value for a authorization contains no privileges. -type authorization struct { - Admin bool - Username string -} - -func (h *Handler) groupsForUser(username string) ([]string, error) { - if h.config.IdentityAPIURL == "" { - return nil, nil - } - // TODO cache groups for a user - url := h.config.IdentityAPIURL + "/v1/u/" + username + "/idpgroups" - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, errgo.Mask(err) - } - req.Header = utils.BasicAuthHeader(h.config.IdentityAPIUsername, h.config.IdentityAPIPassword) - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, errgo.Notef(err, "cannot get groups from %s", url) - } - defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, errgo.Notef(err, "cannot read response from %s", url) - } - if resp.StatusCode == http.StatusOK { - var groups []string - if err := json.Unmarshal(data, &groups); err != nil { - return nil, errgo.Notef(err, "cannot unmarshal response from %s", url) - } - return groups, nil - } - var idmError struct { - Message string `json:"message,omitempty"` - Code string `json:"code,omitempty"` - } - if err := json.Unmarshal(data, &idmError); err != nil { - return nil, errgo.Notef(err, "cannot unmarshal error response from %s", url) - } - return nil, errgo.Newf("cannot get groups from %s: %s", url, idmError.Message) -} - -func (h *Handler) checkACLMembership(auth authorization, acl []string) error { - if auth.Admin { - return nil - } - if auth.Username == "" { - return errgo.New("no username declared") - } - // First check if access is granted without querying for groups. - for _, name := range acl { - if name == auth.Username || name == params.Everyone { - return nil - } - } - groups, err := h.groupsForUser(auth.Username) - if err != nil { - logger.Errorf("cannot get groups for %q: %v", auth.Username, err) - return errgo.Newf("access denied for user %q", auth.Username) - } - for _, name := range acl { - for _, g := range groups { - if g == name { - return nil - } - } - } - return errgo.Newf("access denied for user %q", auth.Username) -} - -func (h *Handler) newMacaroon() (*macaroon.Macaroon, error) { - // TODO generate different caveats depending on the requested operation - // and whether there's a charm id or not. - // Mint an appropriate macaroon and send it back to the client. - return h.pool.Bakery.NewMacaroon("", nil, []checkers.Caveat{checkers.NeedDeclaredCaveat(checkers.Caveat{ - Location: h.config.IdentityLocation, - Condition: "is-authenticated-user", - }, usernameAttr)}) -} - -var errNoCreds = errgo.New("missing HTTP auth header") - -// parseCredentials parses the given request and returns the HTTP basic auth -// credentials included in its header. -func parseCredentials(req *http.Request) (username, password string, err error) { - auth := req.Header.Get("Authorization") - if auth == "" { - return "", "", errNoCreds - } - parts := strings.Fields(auth) - if len(parts) != 2 || parts[0] != "Basic" { - return "", "", errgo.New("invalid HTTP auth header") - } - // Challenge is a base64-encoded "tag:pass" string. - // See RFC 2617, Section 2. - challenge, err := base64.StdEncoding.DecodeString(parts[1]) - if err != nil { - return "", "", errgo.New("invalid HTTP auth encoding") - } - tokens := strings.SplitN(string(challenge), ":", 2) - if len(tokens) != 2 { - return "", "", errgo.New("invalid HTTP auth contents") - } - return tokens[0], tokens[1], nil -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/auth_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/auth_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/auth_test.go 1970-01-01 00:00:00 +0000 @@ -1,866 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4_test - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "os" - "sort" - "strings" - "sync" - "time" - - jc "github.com/juju/testing/checkers" - "github.com/juju/testing/httptesting" - gc "gopkg.in/check.v1" - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" - "gopkg.in/macaroon-bakery.v0/httpbakery" - "gopkg.in/macaroon.v1" - - "gopkg.in/juju/charmstore.v4/internal/storetesting" - "gopkg.in/juju/charmstore.v4/internal/v4" - "gopkg.in/juju/charmstore.v4/params" -) - -func (s *commonSuite) AssertEndpointAuth(c *gc.C, p httptesting.JSONCallParams) { - s.testNonMacaroonAuth(c, p) - s.testMacaroonAuth(c, p) -} - -func (s *commonSuite) testNonMacaroonAuth(c *gc.C, p httptesting.JSONCallParams) { - p.Handler = s.noMacaroonSrv - // Check that the request succeeds when provided with the - // correct credentials. - p.Username = "test-user" - p.Password = "test-password" - httptesting.AssertJSONCall(c, p) - - // Check that auth fails with no creds provided. - p.Username = "" - p.Password = "" - p.ExpectStatus = http.StatusUnauthorized - p.ExpectBody = params.Error{ - Message: "authentication failed: missing HTTP auth header", - Code: params.ErrUnauthorized, - } - httptesting.AssertJSONCall(c, p) - - // Check that auth fails with the wrong username provided. - p.Username = "wrong" - p.Password = "test-password" - p.ExpectStatus = http.StatusUnauthorized - p.ExpectBody = params.Error{ - Message: "invalid user name or password", - Code: params.ErrUnauthorized, - } - httptesting.AssertJSONCall(c, p) - - // Check that auth fails with the wrong password provided. - p.Username = "test-user" - p.Password = "test-password-wrong" - p.ExpectStatus = http.StatusUnauthorized - p.ExpectBody = params.Error{ - Message: "invalid user name or password", - Code: params.ErrUnauthorized, - } - httptesting.AssertJSONCall(c, p) -} - -func (s *commonSuite) testMacaroonAuth(c *gc.C, p httptesting.JSONCallParams) { - // Make a test third party caveat discharger. - var checkedCaveats []string - var mu sync.Mutex - var dischargeError error - s.discharge = func(cond string, arg string) ([]checkers.Caveat, error) { - mu.Lock() - defer mu.Unlock() - checkedCaveats = append(checkedCaveats, cond+" "+arg) - if dischargeError != nil { - return nil, dischargeError - } - return []checkers.Caveat{ - checkers.DeclaredCaveat("username", "bob"), - }, nil - } - p.Handler = s.srv - - client := httpbakery.NewHTTPClient() - cookieJar := &cookieJar{CookieJar: client.Jar} - client.Jar = cookieJar - p.Do = bakeryDo(client) - - // Check that the call succeeds with simple auth. - c.Log("simple auth sucess") - p.Username = "test-user" - p.Password = "test-password" - httptesting.AssertJSONCall(c, p) - c.Assert(checkedCaveats, gc.HasLen, 0) - c.Assert(cookieJar.cookieURLs, gc.HasLen, 0) - - // Check that the call gives us the correct - // "authentication denied response" without simple auth - // and uses the third party checker - // and that a cookie is stored at the correct location. - // TODO when we allow admin access via macaroon creds, - // change this test to expect success. - c.Log("macaroon unauthorized error") - p.Username, p.Password = "", "" - p.ExpectStatus = http.StatusUnauthorized - p.ExpectBody = params.Error{ - Message: `unauthorized: access denied for user "bob"`, - Code: params.ErrUnauthorized, - } - httptesting.AssertJSONCall(c, p) - sort.Strings(checkedCaveats) - c.Assert(checkedCaveats, jc.DeepEquals, []string{ - "is-authenticated-user ", - }) - checkedCaveats = nil - c.Assert(cookieJar.cookieURLs, gc.DeepEquals, []string{"http://somehost/"}) - - // Check that the call fails with incorrect simple auth info. - c.Log("simple auth error") - p.Password = "bad-password" - p.ExpectStatus = http.StatusUnauthorized - p.ExpectBody = params.Error{ - Message: "authentication failed: missing HTTP auth header", - Code: params.ErrUnauthorized, - } - - // Check that it fails when the discharger refuses the discharge. - c.Log("macaroon discharge error") - client = httpbakery.NewHTTPClient() - dischargeError = fmt.Errorf("go away") - p.Do = bakeryDo(client) // clear cookies - p.Password = "" - p.Username = "" - p.ExpectError = `cannot get discharge from "http://[^"]*": third party refused discharge: cannot discharge: go away` - httptesting.AssertJSONCall(c, p) -} - -type cookieJar struct { - cookieURLs []string - http.CookieJar -} - -func (j *cookieJar) SetCookies(url *url.URL, cookies []*http.Cookie) { - url1 := *url - url1.Host = "somehost" - j.cookieURLs = append(j.cookieURLs, url1.String()) - j.CookieJar.SetCookies(url, cookies) -} - -func noInteraction(*url.URL) error { - return fmt.Errorf("unexpected interaction required") -} - -// dischargedAuthCookie retrieves and discharges an authentication macaroon cookie. It adds the provided -// first-party caveats before discharging the macaroon. -func dischargedAuthCookie(c *gc.C, srv http.Handler, caveats ...string) *http.Cookie { - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: srv, - URL: storeURL("macaroon"), - Method: "GET", - }) - var m macaroon.Macaroon - err := json.Unmarshal(rec.Body.Bytes(), &m) - c.Assert(err, gc.IsNil) - for _, cav := range caveats { - err := m.AddFirstPartyCaveat(cav) - c.Assert(err, gc.IsNil) - } - ms, err := httpbakery.DischargeAll(&m, httpbakery.NewHTTPClient(), noInteraction) - c.Assert(err, gc.IsNil) - macaroonCookie, err := httpbakery.NewCookie(ms) - c.Assert(err, gc.IsNil) - return macaroonCookie -} - -type authSuite struct { - commonSuite -} - -var _ = gc.Suite(&authSuite{}) - -func (s *authSuite) SetUpSuite(c *gc.C) { - s.enableIdentity = true - s.commonSuite.SetUpSuite(c) -} - -var readAuthorizationTests = []struct { - // about holds the test description. - about string - // username holds the authenticated user name returned by the discharger. - // If empty, an anonymous user is returned. - username string - // groups holds group names the user is member of, as returned by the - // discharger. - groups []string - // readPerm stores a list of users with read permissions. - readPerm []string - // expectStatus is the expected HTTP response status. - // Defaults to 200 status OK. - expectStatus int - // expectBody holds the expected body of the HTTP response. If nil, - // the body is not checked and the response is assumed to be ok. - expectBody interface{} -}{{ - about: "anonymous users are authorized", - readPerm: []string{params.Everyone}, -}, { - about: "everyone is authorized", - username: "dalek", - readPerm: []string{params.Everyone}, -}, { - about: "everyone and a specific user", - username: "dalek", - readPerm: []string{params.Everyone, "janeway"}, -}, { - about: "specific user authorized", - username: "who", - readPerm: []string{"who"}, -}, { - about: "multiple specific users authorized", - username: "picard", - readPerm: []string{"kirk", "picard", "sisko"}, -}, { - about: "nobody authorized", - username: "picard", - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: `unauthorized: access denied for user "picard"`, - }, -}, { - about: "access denied for user", - username: "kirk", - readPerm: []string{"picard", "sisko"}, - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: `unauthorized: access denied for user "kirk"`, - }, -}, { - about: "everyone is authorized (user is member of groups)", - username: "dalek", - groups: []string{"group1", "group2"}, - readPerm: []string{params.Everyone}, -}, { - about: "everyone and a specific group", - username: "dalek", - groups: []string{"group2", "group3"}, - readPerm: []string{params.Everyone, "group1"}, -}, { - about: "specific group authorized", - username: "who", - groups: []string{"group1", "group42", "group2"}, - readPerm: []string{"group42"}, -}, { - about: "multiple specific groups authorized", - username: "picard", - groups: []string{"group2"}, - readPerm: []string{"kirk", "group0", "group2"}, -}, { - about: "no group authorized", - username: "picard", - groups: []string{"group1", "group2"}, - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: `unauthorized: access denied for user "picard"`, - }, -}, { - about: "access denied for group", - username: "kirk", - groups: []string{"group1", "group2", "group3"}, - readPerm: []string{"picard", "sisko", "group42", "group47"}, - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: `unauthorized: access denied for user "kirk"`, - }, -}} - -func dischargeForUser(username string) func(_, _ string) ([]checkers.Caveat, error) { - return func(_, _ string) ([]checkers.Caveat, error) { - return []checkers.Caveat{ - checkers.DeclaredCaveat(v4.UsernameAttr, username), - }, nil - } -} - -func (s *authSuite) TestReadAuthorization(c *gc.C) { - for i, test := range readAuthorizationTests { - c.Logf("test %d: %s", i, test.about) - - s.discharge = dischargeForUser(test.username) - s.idM.groups = map[string][]string{ - test.username: test.groups, - } - - // Add a charm to the store, used for testing. - err := s.store.AddCharmWithArchive( - newResolvedURL("~charmers/utopic/wordpress-42", -1), - storetesting.Charms.CharmDir("wordpress"), - ) - c.Assert(err, gc.IsNil) - baseURL := charm.MustParseReference("~charmers/wordpress") - - // Change the ACLs for the testing charm. - err = s.store.SetPerms(baseURL, "read", test.readPerm...) - c.Assert(err, gc.IsNil) - - // Prepare the expected status. - expectStatus := test.expectStatus - if expectStatus == 0 { - expectStatus = http.StatusOK - } - - // Define an helper function used to send requests and check responses. - makeRequest := func(path string) { - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - Do: bakeryDo(nil), - URL: storeURL(path), - }) - c.Assert(rec.Code, gc.Equals, expectStatus, gc.Commentf("body: %s", rec.Body)) - if test.expectBody != nil { - c.Assert(rec.Body.String(), jc.JSONEquals, test.expectBody) - } - } - - // Perform a meta request. - makeRequest("~charmers/wordpress/meta/archive-size") - - // Perform an id request. - makeRequest("~charmers/wordpress/expand-id") - - // Remove all entities from the store. - _, err = s.store.DB.Entities().RemoveAll(nil) - c.Assert(err, gc.IsNil) - } -} - -var writeAuthorizationTests = []struct { - // about holds the test description. - about string - // username holds the authenticated user name returned by the discharger. - // If empty, an anonymous user is returned. - username string - // groups holds group names the user is member of, as returned by the - // discharger. - groups []string - // writePerm stores a list of users with write permissions. - writePerm []string - // expectStatus is the expected HTTP response status. - // Defaults to 200 status OK. - expectStatus int - // expectBody holds the expected body of the HTTP response. If nil, - // the body is not checked and the response is assumed to be ok. - expectBody interface{} -}{{ - about: "anonymous users are not authorized", - writePerm: []string{"who"}, - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: "unauthorized: no username declared", - }, -}, { - about: "specific user authorized to write", - username: "dalek", - writePerm: []string{"dalek"}, -}, { - about: "multiple users authorized", - username: "sisko", - writePerm: []string{"kirk", "picard", "sisko"}, -}, { - about: "no users authorized", - username: "who", - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: `unauthorized: access denied for user "who"`, - }, -}, { - about: "specific user unauthorized", - username: "kirk", - writePerm: []string{"picard", "sisko", "janeway"}, - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: `unauthorized: access denied for user "kirk"`, - }, -}, { - about: "access granted for group", - username: "picard", - groups: []string{"group1", "group2"}, - writePerm: []string{"group2"}, -}, { - about: "multiple groups authorized", - username: "picard", - groups: []string{"group1", "group2"}, - writePerm: []string{"kirk", "group0", "group1", "group2"}, -}, { - about: "no group authorized", - username: "picard", - groups: []string{"group1", "group2"}, - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: `unauthorized: access denied for user "picard"`, - }, -}, { - about: "access denied for group", - username: "kirk", - groups: []string{"group1", "group2", "group3"}, - writePerm: []string{"picard", "sisko", "group42", "group47"}, - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: `unauthorized: access denied for user "kirk"`, - }, -}} - -func (s *authSuite) TestWriteAuthorization(c *gc.C) { - for i, test := range writeAuthorizationTests { - c.Logf("test %d: %s", i, test.about) - - s.discharge = dischargeForUser(test.username) - s.idM.groups = map[string][]string{ - test.username: test.groups, - } - - // Add a charm to the store, used for testing. - err := s.store.AddCharmWithArchive( - newResolvedURL("~charmers/utopic/wordpress-42", -1), - storetesting.Charms.CharmDir("wordpress")) - c.Assert(err, gc.IsNil) - baseURL := charm.MustParseReference("~charmers/wordpress") - - // Change the ACLs for the testing charm. - err = s.store.SetPerms(baseURL, "write", test.writePerm...) - c.Assert(err, gc.IsNil) - - // Prepare the expected status. - expectStatus := test.expectStatus - if expectStatus == 0 { - expectStatus = http.StatusOK - } - - client := httpbakery.NewHTTPClient() - // Perform a meta PUT request. - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - Do: bakeryDo(client), - URL: storeURL("~charmers/wordpress/meta/extra-info/key"), - Method: "PUT", - Header: http.Header{ - "Content-Type": {"application/json"}, - }, - Body: strings.NewReader("42"), - }) - c.Assert(rec.Code, gc.Equals, expectStatus, gc.Commentf("body: %s", rec.Body)) - if test.expectBody != nil { - c.Assert(rec.Body.String(), jc.JSONEquals, test.expectBody) - } - - // Remove all entities from the store. - _, err = s.store.DB.Entities().RemoveAll(nil) - c.Assert(err, gc.IsNil) - } -} - -var uploadEntityAuthorizationTests = []struct { - // about holds the test description. - about string - // username holds the authenticated user name returned by the discharger. - // If empty, an anonymous user is returned. - username string - // groups holds group names the user is member of, as returned by the - // discharger. - groups []string - // id holds the id of the entity to be uploaded. - id string - // expectStatus is the expected HTTP response status. - // Defaults to 200 status OK. - expectStatus int - // expectBody holds the expected body of the HTTP response. If nil, - // the body is not checked and the response is assumed to be ok. - expectBody interface{} -}{{ - about: "user owned entity", - username: "who", - id: "~who/utopic/django", -}, { - about: "group owned entity", - username: "dalek", - groups: []string{"group1", "group2"}, - id: "~group1/utopic/django", -}, { - about: "specific group", - username: "dalek", - groups: []string{"group42"}, - id: "~group42/utopic/django", -}, { - about: "promulgated entity", - username: "sisko", - groups: []string{"group1", "group2"}, - id: "~charmers/utopic/django", - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: `unauthorized: access denied for user "sisko"`, - }, -}, { - about: "anonymous user", - id: "~who/utopic/django", - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: "unauthorized: no username declared", - }, -}, { - about: "anonymous user and promulgated entity", - id: "~charmers/utopic/django", - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: "unauthorized: no username declared", - }, -}, { - about: "user does not match", - username: "kirk", - id: "~picard/utopic/django", - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: `unauthorized: access denied for user "kirk"`, - }, -}, { - about: "group does not match", - username: "kirk", - groups: []string{"group1", "group2", "group3"}, - id: "~group0/utopic/django", - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: `unauthorized: access denied for user "kirk"`, - }, -}, { - about: "specific group and promulgated entity", - username: "janeway", - groups: []string{"group1"}, - id: "~charmers/utopic/django", - expectStatus: http.StatusUnauthorized, - expectBody: params.Error{ - Code: params.ErrUnauthorized, - Message: `unauthorized: access denied for user "janeway"`, - }, -}} - -func (s *authSuite) TestUploadEntityAuthorization(c *gc.C) { - for i, test := range uploadEntityAuthorizationTests { - c.Logf("test %d: %s", i, test.about) - - s.discharge = dischargeForUser(test.username) - s.idM.groups = map[string][]string{ - test.username: test.groups, - } - - // Prepare the expected status. - expectStatus := test.expectStatus - if expectStatus == 0 { - expectStatus = http.StatusOK - } - - // Try to upload the entity. - body, hash, size := s.archiveInfo(c) - defer body.Close() - - client := httpbakery.NewHTTPClient() - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - Do: bakeryDo(client), - URL: storeURL(test.id + "/archive?hash=" + hash), - Method: "POST", - ContentLength: size, - Header: http.Header{ - "Content-Type": {"application/zip"}, - }, - Body: body, - }) - c.Assert(rec.Code, gc.Equals, expectStatus, gc.Commentf("body: %s", rec.Body)) - if test.expectBody != nil { - c.Assert(rec.Body.String(), jc.JSONEquals, test.expectBody) - } - - // Remove all entities from the store. - _, err := s.store.DB.Entities().RemoveAll(nil) - c.Assert(err, gc.IsNil) - } -} - -// archiveInfo prepares a zip archive of an entity and return a reader for the -// archive, its blob hash and size. -func (s *authSuite) archiveInfo(c *gc.C) (r io.ReadCloser, hashSum string, size int64) { - ch := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") - f, err := os.Open(ch.Path) - c.Assert(err, gc.IsNil) - hash, size := hashOf(f) - _, err = f.Seek(0, 0) - c.Assert(err, gc.IsNil) - return f, hash, size -} - -var isEntityCaveatTests = []struct { - url string - expectError string -}{{ - url: "~charmers/utopic/wordpress-42/archive", -}, { - url: "~charmers/utopic/wordpress-42/meta/hash", -}, { - url: "wordpress/archive", -}, { - url: "wordpress/meta/hash", -}, { - url: "utopic/wordpress-10/archive", -}, { - url: "utopic/wordpress-10/meta/hash", -}, { - url: "~charmers/utopic/wordpress-41/archive", - expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: API operation on entity cs:~charmers/utopic/wordpress-41, want cs:~charmers/utopic/wordpress-42`, -}, { - url: "~charmers/utopic/wordpress-41/meta/hash", - expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: API operation on entity cs:~charmers/utopic/wordpress-41, want cs:~charmers/utopic/wordpress-42`, -}, { - url: "utopic/wordpress-9/archive", - expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: API operation on entity cs:utopic/wordpress-9, want cs:~charmers/utopic/wordpress-42`, -}, { - url: "utopic/wordpress-9/meta/hash", - expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: API operation on entity cs:utopic/wordpress-9, want cs:~charmers/utopic/wordpress-42`, -}, { - url: "log", - expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: API operation does not involve expected entity cs:~charmers/utopic/wordpress-42`, -}} - -func (s *authSuite) TestIsEntityCaveat(c *gc.C) { - s.discharge = func(_, _ string) ([]checkers.Caveat, error) { - return []checkers.Caveat{{ - Condition: "is-entity cs:~charmers/utopic/wordpress-42", - }, - checkers.DeclaredCaveat(v4.UsernameAttr, "bob"), - }, nil - } - - // Add a charm to the store, used for testing. - err := s.store.AddCharmWithArchive( - newResolvedURL("~charmers/utopic/wordpress-41", 9), - storetesting.Charms.CharmDir("wordpress")) - c.Assert(err, gc.IsNil) - err = s.store.AddCharmWithArchive( - newResolvedURL("~charmers/utopic/wordpress-42", 10), - storetesting.Charms.CharmDir("wordpress")) - c.Assert(err, gc.IsNil) - // Change the ACLs for the testing charm. - err = s.store.SetPerms(charm.MustParseReference("cs:~charmers/wordpress"), "read", "bob") - c.Assert(err, gc.IsNil) - - for i, test := range isEntityCaveatTests { - c.Logf("test %d: %s", i, test.url) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - Do: bakeryDo(nil), - URL: storeURL(test.url), - Method: "GET", - }) - if test.expectError != "" { - c.Assert(rec.Code, gc.Equals, http.StatusProxyAuthRequired) - var respErr httpbakery.Error - err := json.Unmarshal(rec.Body.Bytes(), &respErr) - c.Assert(err, gc.IsNil) - c.Assert(respErr.Message, gc.Matches, test.expectError) - continue - } - c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.Bytes())) - } -} - -func (s *authSuite) TestDelegatableMacaroon(c *gc.C) { - // Create a new server with a third party discharger. - s.discharge = dischargeForUser("bob") - - // First check that we get a macaraq error when using a vanilla http do - // request. - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("delegatable-macaroon"), - ExpectBody: httptesting.BodyAsserter(func(c *gc.C, m json.RawMessage) { - // Allow any body - the next check will check that it's a valid macaroon. - }), - ExpectStatus: http.StatusProxyAuthRequired, - }) - - client := httpbakery.NewHTTPClient() - - now := time.Now() - var gotBody json.RawMessage - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("delegatable-macaroon"), - ExpectBody: httptesting.BodyAsserter(func(c *gc.C, m json.RawMessage) { - gotBody = m - }), - Do: bakeryDo(client), - ExpectStatus: http.StatusOK, - }) - - c.Assert(gotBody, gc.NotNil) - var m macaroon.Macaroon - err := json.Unmarshal(gotBody, &m) - c.Assert(err, gc.IsNil) - - caveats := m.Caveats() - foundExpiry := false - for _, cav := range caveats { - cond, arg, err := checkers.ParseCaveat(cav.Id) - c.Assert(err, gc.IsNil) - switch cond { - case checkers.CondTimeBefore: - t, err := time.Parse(time.RFC3339Nano, arg) - c.Assert(err, gc.IsNil) - c.Assert(t, jc.TimeBetween(now.Add(v4.DelegatableMacaroonExpiry), now.Add(v4.DelegatableMacaroonExpiry+time.Second))) - foundExpiry = true - } - } - c.Assert(foundExpiry, jc.IsTrue) - - // Now check that we can use the obtained macaroon to do stuff - // as the declared user. - - err = s.store.AddCharmWithArchive( - newResolvedURL("~charmers/utopic/wordpress-41", 9), - storetesting.Charms.CharmDir("wordpress")) - c.Assert(err, gc.IsNil) - // Change the ACLs for the testing charm. - err = s.store.SetPerms(charm.MustParseReference("cs:~charmers/wordpress"), "read", "bob") - c.Assert(err, gc.IsNil) - - // First check that we require authorization to access the charm. - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("~charmers/utopic/wordpress/meta/id-name"), - Method: "GET", - }) - c.Assert(rec.Code, gc.Equals, http.StatusProxyAuthRequired) - - // Then check that the request succeeds if we provide the delegatable - // macaroon. - - client = httpbakery.NewHTTPClient() - u, err := url.Parse("http://127.0.0.1") - c.Assert(err, gc.IsNil) - err = httpbakery.SetCookie(client.Jar, u, macaroon.Slice{&m}) - c.Assert(err, gc.IsNil) - - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("~charmers/utopic/wordpress/meta/id-name"), - ExpectBody: params.IdNameResponse{ - Name: "wordpress", - }, - - ExpectStatus: http.StatusOK, - Do: bakeryDo(client), - }) -} - -func (s *authSuite) TestDelegatableMacaroonWithBasicAuth(c *gc.C) { - // First check that we get a macaraq error when using a vanilla http do - // request. - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - Username: testUsername, - Password: testPassword, - URL: storeURL("delegatable-macaroon"), - ExpectBody: params.Error{ - Code: params.ErrForbidden, - Message: "delegatable macaroon is not obtainable using admin credentials", - }, - ExpectStatus: http.StatusForbidden, - }) -} - -func (s *authSuite) TestGroupsForUserSuccess(c *gc.C) { - h := v4.New(s.store.Pool(), s.srvParams) - s.idM.groups = map[string][]string{ - "bob": {"one", "two"}, - } - groups, err := v4.GroupsForUser(h, "bob") - c.Assert(err, gc.IsNil) - c.Assert(groups, jc.DeepEquals, []string{"one", "two"}) -} - -func (s *authSuite) TestGroupsForUserWithNoIdentity(c *gc.C) { - h := v4.New(s.store.Pool(), s.noMacaroonSrvParams) - groups, err := v4.GroupsForUser(h, "someone") - c.Assert(err, gc.IsNil) - c.Assert(groups, gc.HasLen, 0) -} - -func (s *authSuite) TestGroupsForUserWithInvalidIdentityURL(c *gc.C) { - p := s.srvParams - p.IdentityAPIURL = ":::::" - h := v4.New(s.store.Pool(), p) - groups, err := v4.GroupsForUser(h, "someone") - c.Assert(err, gc.ErrorMatches, `parse :::::/v1/u/someone/idpgroups: missing protocol scheme`) - c.Assert(groups, gc.HasLen, 0) -} - -func (s *authSuite) TestGroupsForUserWithDoFailure(c *gc.C) { - h := v4.New(s.store.Pool(), s.srvParams) - s.PatchValue(&http.DefaultClient.Transport, errorTransport("some error")) - groups, err := v4.GroupsForUser(h, "someone") - c.Assert(err, gc.ErrorMatches, `cannot get groups from http://.*/v1/u/someone/idpgroups: Get http://.*/v1/u/someone/idpgroups: some error`) - c.Assert(groups, gc.HasLen, 0) -} - -func (s *authSuite) TestGroupsForUserWithInvalidBody(c *gc.C) { - h := v4.New(s.store.Pool(), s.srvParams) - s.idM.body = "bad" - groups, err := v4.GroupsForUser(h, "someone") - c.Assert(err, gc.ErrorMatches, `cannot unmarshal response from http://.*/v1/u/someone/idpgroups: .*`) - c.Assert(groups, gc.HasLen, 0) -} - -func (s *authSuite) TestGroupsForUserWithErrorResponse(c *gc.C) { - h := v4.New(s.store.Pool(), s.srvParams) - s.idM.body = `{"message":"some error","code":"some code"}` - s.idM.status = http.StatusUnauthorized - groups, err := v4.GroupsForUser(h, "someone") - c.Assert(err, gc.ErrorMatches, `cannot get groups from http://.*/v1/u/someone/idpgroups: some error`) - c.Assert(groups, gc.HasLen, 0) -} - -func (s *authSuite) TestGroupsForUserWithBadErrorResponse(c *gc.C) { - h := v4.New(s.store.Pool(), s.srvParams) - s.idM.body = `{"message":"some error"` - s.idM.status = http.StatusUnauthorized - groups, err := v4.GroupsForUser(h, "someone") - c.Assert(err, gc.ErrorMatches, `cannot unmarshal error response from http://.*/v1/u/someone/idpgroups: .*`) - c.Assert(groups, gc.HasLen, 0) -} - -type errorTransport string - -func (e errorTransport) RoundTrip(*http.Request) (*http.Response, error) { - return nil, errgo.New(string(e)) -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/common_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/common_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/common_test.go 1970-01-01 00:00:00 +0000 @@ -1,222 +0,0 @@ -package v4_test - -import ( - "encoding/json" - "io" - "net/http" - "net/http/httptest" - - gc "gopkg.in/check.v1" - - "github.com/julienschmidt/httprouter" - "gopkg.in/errgo.v1" - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" - "gopkg.in/macaroon-bakery.v0/bakerytest" - "gopkg.in/macaroon-bakery.v0/httpbakery" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/storetesting" - "gopkg.in/juju/charmstore.v4/internal/v4" -) - -type commonSuite struct { - storetesting.IsolatedMgoSuite - - // srv holds the store HTTP handler. - srv http.Handler - - // srvParams holds the parameters that the - // srv handler was started with - srvParams charmstore.ServerParams - - // noMacaroonSrv holds the store HTTP handler - // for an instance of the store without identity - // enabled. If enableIdentity is false, this is - // the same as srv. - noMacaroonSrv http.Handler - - // noMacaroonSrvParams holds the parameters that the - // noMacaroonSrv handler was started with - noMacaroonSrvParams charmstore.ServerParams - - // store holds an instance of *charm.Store - // that can be used to access the charmstore database - // directly. - store *charmstore.Store - - // esSuite is set only when enableES is set to true. - esSuite *storetesting.ElasticSearchSuite - - // discharge holds the function that will be used - // to check third party caveats by the mock - // discharger. This will be ignored if enableIdentity was - // not true before commonSuite.SetUpTest is invoked. - // - // It may be set by tests to influence the behavior of the - // discharger. - discharge func(cav, arg string) ([]checkers.Caveat, error) - - discharger *bakerytest.Discharger - idM *idM - idMServer *httptest.Server - - // The following fields may be set before - // SetUpSuite is invoked on commonSuite - // and influences how the suite sets itself up. - - // enableIdentity holds whether the charmstore server - // will be started with a configured identity service. - enableIdentity bool - - // enableES holds whether the charmstore server will be - // started with Elastic Search enabled. - enableES bool -} - -func (s *commonSuite) SetUpSuite(c *gc.C) { - s.IsolatedMgoSuite.SetUpSuite(c) - if s.enableES { - s.esSuite = new(storetesting.ElasticSearchSuite) - s.esSuite.SetUpSuite(c) - } -} - -func (s *commonSuite) TearDownSuite(c *gc.C) { - if s.esSuite != nil { - s.esSuite.TearDownSuite(c) - } -} - -func (s *commonSuite) SetUpTest(c *gc.C) { - s.IsolatedMgoSuite.SetUpTest(c) - if s.esSuite != nil { - s.esSuite.SetUpTest(c) - } - if s.enableIdentity { - s.idM = newIdM() - s.idMServer = httptest.NewServer(s.idM) - } - s.startServer(c) -} - -func (s *commonSuite) TearDownTest(c *gc.C) { - s.store.Close() - if s.esSuite != nil { - s.esSuite.TearDownTest(c) - } - if s.discharger != nil { - s.discharger.Close() - s.idMServer.Close() - } - s.IsolatedMgoSuite.TearDownTest(c) -} - -// startServer creates a new charmstore server. -func (s *commonSuite) startServer(c *gc.C) { - config := charmstore.ServerParams{ - AuthUsername: testUsername, - AuthPassword: testPassword, - } - if s.enableIdentity { - s.discharge = func(_, _ string) ([]checkers.Caveat, error) { - return nil, errgo.New("no discharge") - } - discharger := bakerytest.NewDischarger(nil, func(_ *http.Request, cond string, arg string) ([]checkers.Caveat, error) { - return s.discharge(cond, arg) - }) - config.IdentityLocation = discharger.Location() - config.PublicKeyLocator = discharger - config.IdentityAPIURL = s.idMServer.URL - } - var si *charmstore.SearchIndex - if s.enableES { - si = &charmstore.SearchIndex{ - Database: s.esSuite.ES, - Index: s.esSuite.TestIndex, - } - } - db := s.Session.DB("charmstore") - var err error - s.srv, err = charmstore.NewServer(db, si, config, map[string]charmstore.NewAPIHandlerFunc{"v4": v4.NewAPIHandler}) - c.Assert(err, gc.IsNil) - s.srvParams = config - - if s.enableIdentity { - config.IdentityLocation = "" - config.PublicKeyLocator = nil - config.IdentityAPIURL = "" - s.noMacaroonSrv, err = charmstore.NewServer(db, si, config, map[string]charmstore.NewAPIHandlerFunc{"v4": v4.NewAPIHandler}) - c.Assert(err, gc.IsNil) - } else { - s.noMacaroonSrv = s.srv - } - s.noMacaroonSrvParams = config - - pool, err := charmstore.NewPool(db, si, &bakery.NewServiceParams{}) - c.Assert(err, gc.IsNil) - s.store = pool.Store() -} - -func storeURL(path string) string { - return "/v4/" + path -} - -func bakeryDo(client *http.Client) func(*http.Request) (*http.Response, error) { - if client == nil { - client = httpbakery.NewHTTPClient() - } - return func(req *http.Request) (*http.Response, error) { - if req.Body != nil { - return httpbakery.DoWithBody(client, req, httpbakery.SeekerBody(req.Body.(io.ReadSeeker)), noInteraction) - } - return httpbakery.Do(client, req, noInteraction) - } -} - -type idM struct { - // groups may be set to determine the mapping - // from user to groups for that user. - groups map[string][]string - - // body may be set to cause serveGroups to return - // an arbitrary HTTP response body. - body string - - // status may be set to indicate the HTTP status code - // when body is not nil. - status int - - router *httprouter.Router -} - -func newIdM() *idM { - idM := &idM{ - groups: make(map[string][]string), - router: httprouter.New(), - } - idM.router.GET("/v1/u/:user/idpgroups", idM.serveGroups) - return idM -} - -func (idM *idM) ServeHTTP(w http.ResponseWriter, req *http.Request) { - idM.router.ServeHTTP(w, req) -} - -func (idM *idM) serveGroups(w http.ResponseWriter, req *http.Request, p httprouter.Params) { - if idM.body != "" { - if idM.status != 0 { - w.WriteHeader(idM.status) - } - w.Write([]byte(idM.body)) - return - } - u := p.ByName("user") - if u == "" { - panic("no user") - } - enc := json.NewEncoder(w) - if err := enc.Encode(idM.groups[u]); err != nil { - panic(err) - } -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/content.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/content.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/content.go 1970-01-01 00:00:00 +0000 @@ -1,243 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4 - -import ( - "archive/zip" - "bytes" - "fmt" - "io" - "net/http" - "path" - "strings" - - "github.com/juju/jujusvg" - "github.com/juju/xml" - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/params" -) - -// GET id/diagram.svg -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-iddiagramsvg -func (h *Handler) serveDiagram(id *router.ResolvedURL, fullySpecified bool, w http.ResponseWriter, req *http.Request) error { - store := h.pool.Store() - defer store.Close() - if id.URL.Series != "bundle" { - return errgo.WithCausef(nil, params.ErrNotFound, "diagrams not supported for charms") - } - entity, err := store.FindEntity(id, "bundledata") - if err != nil { - return errgo.Mask(err, errgo.Is(params.ErrNotFound)) - } - - var urlErr error - // TODO consider what happens when a charm's SVG does not exist. - canvas, err := jujusvg.NewFromBundle(entity.BundleData, func(id *charm.Reference) string { - // TODO change jujusvg so that the iconURL function can - // return an error. - absPath := "/" + id.Path() + "/icon.svg" - p, err := router.RelativeURLPath(req.RequestURI, absPath) - if err != nil { - urlErr = errgo.Notef(err, "cannot make relative URL from %q and %q", req.RequestURI, absPath) - } - return p - }) - if err != nil { - return errgo.Notef(err, "cannot create canvas") - } - if urlErr != nil { - return urlErr - } - setArchiveCacheControl(w.Header(), fullySpecified) - w.Header().Set("Content-Type", "image/svg+xml") - canvas.Marshal(w) - return nil -} - -// These are all forms of README files -// actually observed in charms in the wild. -var allowedReadMe = map[string]bool{ - "readme": true, - "readme.md": true, - "readme.rst": true, - "readme.ex": true, - "readme.markdown": true, - "readme.txt": true, -} - -// GET id/readme -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idreadme -func (h *Handler) serveReadMe(id *router.ResolvedURL, fullySpecified bool, w http.ResponseWriter, req *http.Request) error { - store := h.pool.Store() - defer store.Close() - entity, err := store.FindEntity(id, "_id", "contents", "blobname") - if err != nil { - return errgo.NoteMask(err, "cannot get README", errgo.Is(params.ErrNotFound)) - } - isReadMeFile := func(f *zip.File) bool { - name := strings.ToLower(path.Clean(f.Name)) - // This is the same condition currently used by the GUI. - // TODO propagate likely content type from file extension. - return allowedReadMe[name] - } - r, err := store.OpenCachedBlobFile(entity, mongodoc.FileReadMe, isReadMeFile) - if err != nil { - return errgo.Mask(err, errgo.Is(params.ErrNotFound)) - } - defer r.Close() - setArchiveCacheControl(w.Header(), fullySpecified) - io.Copy(w, r) - return nil -} - -// GET id/icon.svg -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idiconsvg -func (h *Handler) serveIcon(id *router.ResolvedURL, fullySpecified bool, w http.ResponseWriter, req *http.Request) error { - if id.URL.Series == "bundle" { - return errgo.WithCausef(nil, params.ErrNotFound, "icons not supported for bundles") - } - - store := h.pool.Store() - defer store.Close() - entity, err := store.FindEntity(id, "_id", "contents", "blobname") - if err != nil { - return errgo.NoteMask(err, "cannot get icon", errgo.Is(params.ErrNotFound)) - } - isIconFile := func(f *zip.File) bool { - return path.Clean(f.Name) == "icon.svg" - } - r, err := store.OpenCachedBlobFile(entity, mongodoc.FileIcon, isIconFile) - if err != nil { - logger.Errorf("cannot open icon.svg file for %v: %v", id, err) - if errgo.Cause(err) != params.ErrNotFound { - return errgo.Mask(err) - } - setArchiveCacheControl(w.Header(), fullySpecified) - w.Header().Set("Content-Type", "image/svg+xml") - io.Copy(w, strings.NewReader(defaultIcon)) - return nil - } - defer r.Close() - w.Header().Set("Content-Type", "image/svg+xml") - setArchiveCacheControl(w.Header(), fullySpecified) - if err := processIcon(w, r); err != nil { - if errgo.Cause(err) == errProbablyNotXML { - logger.Errorf("cannot process icon.svg from %s: %v", id, err) - io.Copy(w, strings.NewReader(defaultIcon)) - return nil - } - return errgo.Mask(err) - } - return nil -} - -var errProbablyNotXML = errgo.New("probably not XML") - -const svgNamespace = "http://www.w3.org/2000/svg" - -// processIcon reads an icon SVG from r and writes -// it to w, making any changes that need to be made. -// Currently it adds a viewBox attribute to the -// element if necessary. -// If there is an error processing the XML before -// the first token has been written, it returns an error -// with errProbablyNotXML as the cause. -func processIcon(w io.Writer, r io.Reader) error { - // Arrange to save all the content that we find up - // until the first element. Then we'll stitch it - // back together again for the actual processing. - var saved bytes.Buffer - dec := xml.NewDecoder(io.TeeReader(r, &saved)) - dec.DefaultSpace = svgNamespace - found, changed := false, false - for !found { - tok, err := dec.Token() - if err == io.EOF { - break - } - if err != nil { - return errgo.WithCausef(err, errProbablyNotXML, "") - } - _, found, changed = ensureViewbox(tok) - } - if !found { - return errgo.WithCausef(nil, errProbablyNotXML, "no element found") - } - // Stitch the input back together again so we can - // write the output without buffering it in memory. - r = io.MultiReader(&saved, r) - if !found || !changed { - _, err := io.Copy(w, r) - return err - } - return processNaive(w, r) -} - -// processNaive is like processIcon but processes all of the -// XML elements. It does not return errProbablyNotXML -// on error because it may have written arbitrary XML -// to w, at which point writing an alternative response would -// be unwise. -func processNaive(w io.Writer, r io.Reader) error { - dec := xml.NewDecoder(r) - dec.DefaultSpace = svgNamespace - enc := xml.NewEncoder(w) - found := false - for { - tok, err := dec.Token() - if err == io.EOF { - break - } - if err != nil { - return fmt.Errorf("failed to read token: %v", err) - } - if !found { - tok, found, _ = ensureViewbox(tok) - } - if err := enc.EncodeToken(tok); err != nil { - return fmt.Errorf("cannot encode token %#v: %v", tok, err) - } - } - if err := enc.Flush(); err != nil { - return fmt.Errorf("cannot flush output: %v", err) - } - return nil -} - -func ensureViewbox(tok0 xml.Token) (_ xml.Token, found, changed bool) { - tok, ok := tok0.(xml.StartElement) - if !ok || tok.Name.Space != svgNamespace || tok.Name.Local != "svg" { - return tok0, false, false - } - var width, height string - for _, attr := range tok.Attr { - if attr.Name.Space != "" { - continue - } - switch attr.Name.Local { - case "width": - width = attr.Value - case "height": - height = attr.Value - case "viewBox": - return tok, true, false - } - } - if width == "" || height == "" { - // Width and/or height have not been specified, - // so leave viewbox unspecified too. - return tok, true, false - } - tok.Attr = append(tok.Attr, xml.Attr{ - Name: xml.Name{ - Local: "viewBox", - }, - Value: fmt.Sprintf("0 0 %s %s", width, height), - }) - return tok, true, true -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/content_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/content_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/content_test.go 1970-01-01 00:00:00 +0000 @@ -1,517 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4_test - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "path/filepath" - "sort" - "strings" - - jc "github.com/juju/testing/checkers" - "github.com/juju/testing/httptesting" - "github.com/juju/xml" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/storetesting" - "gopkg.in/juju/charmstore.v4/internal/v4" - "gopkg.in/juju/charmstore.v4/params" -) - -var serveDiagramErrorsTests = []struct { - about string - url string - expectStatus int - expectBody interface{} -}{{ - about: "entity not found", - url: "~charmers/bundle/foo-23/diagram.svg", - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Code: params.ErrNotFound, - Message: `entity "cs:~charmers/bundle/foo-23" not found`, - }, -}, { - about: "diagram for a charm", - url: "~charmers/wordpress/diagram.svg", - expectStatus: http.StatusNotFound, - expectBody: params.Error{ - Code: params.ErrNotFound, - Message: "diagrams not supported for charms", - }, -}, { - about: "bundle with no position info", - url: "~charmers/nopositionbundle/diagram.svg", - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Message: `cannot create canvas: service "mysql" does not have a valid position`, - }, -}} - -func (s *APISuite) TestServeDiagramErrors(c *gc.C) { - id := newResolvedURL("cs:~charmers/trusty/wordpress-42", 42) - s.addPublicCharm(c, "wordpress", id) - id = newResolvedURL("cs:~charmers/bundle/nopositionbundle-42", 42) - s.addPublicBundle(c, "wordpress-simple", id) - - for i, test := range serveDiagramErrorsTests { - c.Logf("test %d: %s", i, test.about) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(test.url), - ExpectStatus: test.expectStatus, - ExpectBody: test.expectBody, - }) - } -} - -func (s *APISuite) TestServeDiagram(c *gc.C) { - patchArchiveCacheAges(s) - bundle := &testingBundle{ - data: &charm.BundleData{ - Services: map[string]*charm.ServiceSpec{ - "wordpress": { - Charm: "wordpress", - Annotations: map[string]string{ - "gui-x": "100", - "gui-y": "200", - }, - }, - "mysql": { - Charm: "utopic/mysql-23", - Annotations: map[string]string{ - "gui-x": "200", - "gui-y": "200", - }, - }, - }, - }, - } - - url := newResolvedURL("cs:~charmers/bundle/wordpressbundle-42", 42) - err := s.store.AddBundle(bundle, charmstore.AddParams{ - URL: url, - BlobName: "blobName", - BlobHash: fakeBlobHash, - BlobSize: fakeBlobSize, - }) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) - c.Assert(err, gc.IsNil) - - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("bundle/wordpressbundle/diagram.svg"), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %q", rec.Body.Bytes())) - c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") - assertCacheControl(c, rec.Header(), false) - - // Check that the output contains valid XML with an SVG tag, - // but don't check the details of the output so that this test doesn't - // break every time the jujusvg presentation changes. - // Also check that we get an image for each service containing the charm - // icon link. - assertXMLContains(c, rec.Body.Bytes(), map[string]func(xml.Token) bool{ - "svg element": isStartElementWithName("svg"), - "wordpress icon": isStartElementWithAttr("image", "href", "../../wordpress/icon.svg"), - "mysql icon": isStartElementWithAttr("image", "href", "../../utopic/mysql-23/icon.svg"), - }) - - // Do the same check again, but with the short form of the id; - // the relative links should change accordingly. - rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("wordpressbundle/diagram.svg"), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %q", rec.Body.Bytes())) - - // Check that the output contains valid XML with an SVG tag, - // but don't check the details of the output so that this test doesn't - // break every time the jujusvg presentation changes. - // Also check that we get an image for each service containing the charm - // icon link. - assertXMLContains(c, rec.Body.Bytes(), map[string]func(xml.Token) bool{ - "svg element": isStartElementWithName("svg"), - "wordpress icon": isStartElementWithAttr("image", "href", "../wordpress/icon.svg"), - "mysql icon": isStartElementWithAttr("image", "href", "../utopic/mysql-23/icon.svg"), - }) -} - -var serveReadMeTests = []struct { - name string - expectNotFound bool -}{{ - name: "README.md", -}, { - name: "README.rst", -}, { - name: "readme", -}, { - name: "README", -}, { - name: "ReadMe.Txt", -}, { - name: "README.ex", -}, { - name: "", - expectNotFound: true, -}, { - name: "readme-youtube-subscribe.html", - expectNotFound: true, -}, { - name: "readme Dutch.txt", - expectNotFound: true, -}, { - name: "readme Dutch.txt", - expectNotFound: true, -}, { - name: "README.debugging", - expectNotFound: true, -}} - -func (s *APISuite) TestServeReadMe(c *gc.C) { - patchArchiveCacheAges(s) - url := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) - for i, test := range serveReadMeTests { - c.Logf("test %d: %s", i, test.name) - wordpress := storetesting.Charms.ClonedDir(c.MkDir(), "wordpress") - content := fmt.Sprintf("some content %d", i) - if test.name != "" { - err := ioutil.WriteFile(filepath.Join(wordpress.Path, test.name), []byte(content), 0666) - c.Assert(err, gc.IsNil) - } - - url.URL.Revision = i - err := s.store.AddCharmWithArchive(url, wordpress) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) - c.Assert(err, gc.IsNil) - - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL(url.URL.Path() + "/readme"), - }) - if test.expectNotFound { - c.Assert(rec.Code, gc.Equals, http.StatusNotFound) - c.Assert(rec.Body.String(), jc.JSONEquals, params.Error{ - Code: params.ErrNotFound, - Message: "not found", - }) - } else { - c.Assert(rec.Code, gc.Equals, http.StatusOK) - c.Assert(rec.Body.String(), gc.DeepEquals, content) - assertCacheControl(c, rec.Header(), true) - } - } -} - -func (s *APISuite) TestServeReadMeEntityNotFound(c *gc.C) { - // Add another charm so that the base entity exists so we - // actually get through to the code we're wanting to test. - // (if the base entity does not exist, the authorization code - // will fail). - url := newResolvedURL("~charmers/precise/nothingatall-1", -1) - s.addPublicCharm(c, "wordpress", url) - - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("~charmers/precise/nothingatall-32/readme"), - ExpectStatus: http.StatusNotFound, - ExpectBody: params.Error{ - Code: params.ErrNotFound, - Message: `cannot get README: entity not found`, - }, - }) -} - -func (s *APISuite) TestServeIconEntityNotFound(c *gc.C) { - // Add another charm so that the base entity exists so we - // actually get through to the code we're wanting to test. - // (if the base entity does not exist, the authorization code - // will fail). - id := newResolvedURL("~charmers/precise/nothingatall-1", -1) - s.addPublicCharm(c, "wordpress", id) - - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("~charmers/precise/nothingatall-32/icon.svg"), - ExpectStatus: http.StatusNotFound, - ExpectBody: params.Error{ - Code: params.ErrNotFound, - Message: `cannot get icon: entity not found`, - }, - }) -} - -func charmWithExtraFile(c *gc.C, name, file, content string) *charm.CharmDir { - ch := storetesting.Charms.ClonedDir(c.MkDir(), name) - err := ioutil.WriteFile(filepath.Join(ch.Path, file), []byte(content), 0666) - c.Assert(err, gc.IsNil) - return ch -} - -func (s *APISuite) TestServeIcon(c *gc.C) { - patchArchiveCacheAges(s) - content := `an icon, really` - expected := `an icon, really` - wordpress := charmWithExtraFile(c, "wordpress", "icon.svg", content) - - url := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) - err := s.store.AddCharmWithArchive(url, wordpress) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) - c.Assert(err, gc.IsNil) - - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL(url.URL.Path() + "/icon.svg"), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - c.Assert(rec.Body.String(), gc.Equals, expected) - c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") - assertCacheControl(c, rec.Header(), true) - - // Test with revision -1 - noRevURL := url.URL - noRevURL.Revision = -1 - rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL(noRevURL.Path() + "/icon.svg"), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - c.Assert(rec.Body.String(), gc.Equals, expected) - c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") - assertCacheControl(c, rec.Header(), false) - - // Reload the charm with an icon that already has viewBox. - wordpress = storetesting.Charms.ClonedDir(c.MkDir(), "wordpress") - err = ioutil.WriteFile(filepath.Join(wordpress.Path, "icon.svg"), []byte(expected), 0666) - c.Assert(err, gc.IsNil) - - url.URL.Revision++ - err = s.store.AddCharmWithArchive(url, wordpress) - c.Assert(err, gc.IsNil) - - // Check that we still get expected svg. - rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL(url.URL.Path() + "/icon.svg"), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - c.Assert(rec.Body.String(), gc.Equals, expected) - c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") -} - -func (s *APISuite) TestServeBundleIcon(c *gc.C) { - s.addPublicBundle(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/something-32", 32)) - - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("~charmers/bundle/something-32/icon.svg"), - ExpectStatus: http.StatusNotFound, - ExpectBody: params.Error{ - Code: params.ErrNotFound, - Message: "icons not supported for bundles", - }, - }) -} - -func (s *APISuite) TestServeDefaultIcon(c *gc.C) { - patchArchiveCacheAges(s) - wordpress := storetesting.Charms.ClonedDir(c.MkDir(), "wordpress") - - url := newResolvedURL("cs:~charmers/precise/wordpress-0", 0) - err := s.store.AddCharmWithArchive(url, wordpress) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) - c.Assert(err, gc.IsNil) - - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL(url.URL.Path() + "/icon.svg"), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - c.Assert(rec.Body.String(), gc.Equals, v4.DefaultIcon) - c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") - assertCacheControl(c, rec.Header(), true) -} - -func (s *APISuite) TestServeDefaultIconForBadXML(c *gc.C) { - patchArchiveCacheAges(s) - - for i, content := range []string{ - "\x89\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44", - // Technically this XML is not bad - we just can't parse it because - // it's got internally defined character entities. Nonetheless, we treat - // it as "bad" for the time being. - cloudfoundrySVG, - } { - wordpress := charmWithExtraFile(c, "wordpress", "icon.svg", content) - - url := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) - url.URL.Revision = i - err := s.store.AddCharmWithArchive(url, wordpress) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) - c.Assert(err, gc.IsNil) - - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL(url.URL.Path() + "/icon.svg"), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - c.Assert(rec.Body.String(), gc.Equals, v4.DefaultIcon) - c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") - assertCacheControl(c, rec.Header(), true) - } -} - -func (s *APISuite) TestProcessIconWorksOnDefaultIcon(c *gc.C) { - var buf bytes.Buffer - err := v4.ProcessIcon(&buf, strings.NewReader(v4.DefaultIcon)) - c.Assert(err, gc.IsNil) - assertXMLEqual(c, buf.Bytes(), []byte(v4.DefaultIcon)) -} - -func (s *APISuite) TestProcessIconDoesNotQuoteNewlines(c *gc.C) { - // Note: this is important because Chrome does not like - // to see before the opening tag. - icon := ` - - -` - var buf bytes.Buffer - err := v4.ProcessIcon(&buf, strings.NewReader(icon)) - c.Assert(err, gc.IsNil) - if strings.Contains(buf.String(), "&#x") { - c.Errorf("newlines were quoted in processed icon output") - } -} - -// assertXMLEqual assers that the xml contained in the -// two slices is equal, without caring about namespace -// declarations or attribute ordering. -func assertXMLEqual(c *gc.C, body []byte, expect []byte) { - decBody := xml.NewDecoder(bytes.NewReader(body)) - decExpect := xml.NewDecoder(bytes.NewReader(expect)) - for i := 0; ; i++ { - tok0, err0 := decBody.Token() - tok1, err1 := decExpect.Token() - if err1 != nil { - c.Assert(err0, gc.NotNil) - c.Assert(err0.Error(), gc.Equals, err1.Error()) - break - } - ok, err := tokenEqual(tok0, tok1) - if !ok { - c.Logf("got %#v", tok0) - c.Logf("want %#v", tok1) - c.Fatalf("mismatch at token %d: %v", i, err) - } - } -} - -func tokenEqual(tok0, tok1 xml.Token) (bool, error) { - tok0 = canonicalXMLToken(tok0) - tok1 = canonicalXMLToken(tok1) - return jc.DeepEqual(tok0, tok1) -} - -func canonicalXMLToken(tok xml.Token) xml.Token { - start, ok := tok.(xml.StartElement) - if !ok { - return tok - } - // Remove all namespace-defining attributes. - j := 0 - for _, attr := range start.Attr { - if attr.Name.Local == "xmlns" && attr.Name.Space == "" || - attr.Name.Space == "xmlns" { - continue - } - start.Attr[j] = attr - j++ - } - start.Attr = start.Attr[0:j] - sort.Sort(attrByName(start.Attr)) - return start -} - -type attrByName []xml.Attr - -func (a attrByName) Len() int { return len(a) } -func (a attrByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a attrByName) Less(i, j int) bool { - if a[i].Name.Space != a[j].Name.Space { - return a[i].Name.Space < a[j].Name.Space - } - return a[i].Name.Local < a[j].Name.Local -} - -// assertXMLContains asserts that the XML in body is well formed, and -// contains at least one token that satisfies each of the functions in need. -func assertXMLContains(c *gc.C, body []byte, need map[string]func(xml.Token) bool) { - dec := xml.NewDecoder(bytes.NewReader(body)) - for { - tok, err := dec.Token() - if err == io.EOF { - break - } - c.Assert(err, gc.IsNil) - for what, f := range need { - if f(tok) { - delete(need, what) - } - } - } - c.Assert(need, gc.HasLen, 0, gc.Commentf("body:\n%s", body)) -} - -func isStartElementWithName(name string) func(xml.Token) bool { - return func(tok xml.Token) bool { - startElem, ok := tok.(xml.StartElement) - return ok && startElem.Name.Local == name - } -} - -func isStartElementWithAttr(name, attr, val string) func(xml.Token) bool { - return func(tok xml.Token) bool { - startElem, ok := tok.(xml.StartElement) - if !ok { - return false - } - for _, a := range startElem.Attr { - if a.Name.Local == attr && a.Value == val { - return true - } - } - return false - } -} - -const cloudfoundrySVG = ` - - - - - - - - - -]> - -content omitted - -` === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/defaulticon.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/defaulticon.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/defaulticon.go 1970-01-01 00:00:00 +0000 @@ -1,278 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4 - -var defaultIcon = ` - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - -` === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/defaulticon_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/defaulticon_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/defaulticon_test.go 1970-01-01 00:00:00 +0000 @@ -1,22 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4_test - -import ( - "strings" - - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charmstore.v4/internal/v4" -) - -type iconSuite struct{} - -var _ = gc.Suite(&iconSuite{}) - -func (s *iconSuite) TestValidXML(c *gc.C) { - // The XML declaration must be included in the first line of the icon. - hasXMLPrefix := strings.HasPrefix(v4.DefaultIcon, "= %d", minValue) - } - return value, nil -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/log_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/log_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/log_test.go 1970-01-01 00:00:00 +0000 @@ -1,528 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4_test - -import ( - "bytes" - "encoding/json" - "net/http" - "time" - - jc "github.com/juju/testing/checkers" - "github.com/juju/testing/httptesting" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/internal/v4" - "gopkg.in/juju/charmstore.v4/params" -) - -type logSuite struct { - commonSuite -} - -var _ = gc.Suite(&logSuite{}) - -func (s *logSuite) SetUpSuite(c *gc.C) { - s.enableIdentity = true - s.commonSuite.SetUpSuite(c) -} - -var logResponses = map[string]*params.LogResponse{ - "info1": { - Data: rawMessage("info data 1"), - Level: params.InfoLevel, - Type: params.IngestionType, - URLs: nil, - }, - "error1": { - Data: rawMessage("error data 1"), - Level: params.ErrorLevel, - Type: params.IngestionType, - URLs: nil, - }, - "info2": { - Data: rawMessage("info data 2"), - Level: params.InfoLevel, - Type: params.IngestionType, - URLs: []*charm.Reference{ - charm.MustParseReference("precise/django"), - charm.MustParseReference("django"), - charm.MustParseReference("rails"), - }, - }, - "warning1": { - Data: rawMessage("warning data 1"), - Level: params.WarningLevel, - Type: params.IngestionType, - URLs: nil, - }, - "error2": { - Data: rawMessage("error data 2"), - Level: params.ErrorLevel, - Type: params.IngestionType, - URLs: []*charm.Reference{ - charm.MustParseReference("hadoop"), - }, - }, - "info3": { - Data: rawMessage("info data 3"), - Level: params.InfoLevel, - Type: params.IngestionType, - URLs: []*charm.Reference{ - charm.MustParseReference("trusty/django"), - charm.MustParseReference("django"), - charm.MustParseReference("utopic/hadoop"), - charm.MustParseReference("hadoop"), - }, - }, - "error3": { - Data: rawMessage("error data 3"), - Level: params.ErrorLevel, - Type: params.IngestionType, - URLs: []*charm.Reference{ - charm.MustParseReference("utopic/hadoop"), - charm.MustParseReference("hadoop"), - charm.MustParseReference("precise/django"), - charm.MustParseReference("django"), - }, - }, - "stats": { - Data: rawMessage("statistics info data"), - Level: params.InfoLevel, - Type: params.LegacyStatisticsType, - URLs: nil, - }, -} - -var getLogsTests = []struct { - about string - querystring string - expectBody []*params.LogResponse -}{{ - about: "retrieve logs", - expectBody: []*params.LogResponse{ - logResponses["stats"], - logResponses["error3"], - logResponses["info3"], - logResponses["error2"], - logResponses["warning1"], - logResponses["info2"], - logResponses["error1"], - logResponses["info1"], - }, -}, { - about: "use limit", - querystring: "?limit=2", - expectBody: []*params.LogResponse{ - logResponses["stats"], - logResponses["error3"], - }, -}, { - about: "use offset", - querystring: "?skip=3", - expectBody: []*params.LogResponse{ - logResponses["error2"], - logResponses["warning1"], - logResponses["info2"], - logResponses["error1"], - logResponses["info1"], - }, -}, { - about: "zero offset", - querystring: "?skip=0", - expectBody: []*params.LogResponse{ - logResponses["stats"], - logResponses["error3"], - logResponses["info3"], - logResponses["error2"], - logResponses["warning1"], - logResponses["info2"], - logResponses["error1"], - logResponses["info1"], - }, -}, { - about: "use both limit and offset", - querystring: "?limit=3&skip=1", - expectBody: []*params.LogResponse{ - logResponses["error3"], - logResponses["info3"], - logResponses["error2"], - }, -}, { - about: "filter by level", - querystring: "?level=info", - expectBody: []*params.LogResponse{ - logResponses["stats"], - logResponses["info3"], - logResponses["info2"], - logResponses["info1"], - }, -}, { - about: "filter by type", - querystring: "?type=ingestion", - expectBody: []*params.LogResponse{ - logResponses["error3"], - logResponses["info3"], - logResponses["error2"], - logResponses["warning1"], - logResponses["info2"], - logResponses["error1"], - logResponses["info1"], - }, -}, { - about: "filter by level with a limit", - querystring: "?level=error&limit=2", - expectBody: []*params.LogResponse{ - logResponses["error3"], - logResponses["error2"], - }, -}, { - about: "filter by id", - querystring: "?id=precise/django", - expectBody: []*params.LogResponse{ - logResponses["error3"], - logResponses["info2"], - }, -}, { - about: "multiple query", - querystring: "?id=utopic/hadoop&limit=1&level=error", - expectBody: []*params.LogResponse{ - logResponses["error3"], - }, -}, { - about: "empty response offset", - querystring: "?id=utopic/hadoop&skip=10", -}, { - about: "empty response id not found", - querystring: "?id=utopic/mysql", -}, { - about: "empty response level", - querystring: "?id=trusty/rails&level=error", -}, { - about: "filter by type - legacyStatistics", - querystring: "?type=legacyStatistics", - expectBody: []*params.LogResponse{ - logResponses["stats"], - }, -}} - -func (s *logSuite) TestGetLogs(c *gc.C) { - // Add logs to the database. - beforeAdding := time.Now().Add(-time.Second) - for _, key := range []string{"info1", "error1", "info2", "warning1", "error2", "info3", "error3", "stats"} { - resp := logResponses[key] - err := s.store.AddLog(&resp.Data, v4.ParamsLogLevels[resp.Level], v4.ParamsLogTypes[resp.Type], resp.URLs) - c.Assert(err, gc.IsNil) - } - afterAdding := time.Now().Add(time.Second) - - // Run the tests. - for i, test := range getLogsTests { - c.Logf("test %d: %s", i, test.about) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("log" + test.querystring), - Username: testUsername, - Password: testPassword, - }) - - // Ensure the response is what we expect. - c.Assert(rec.Code, gc.Equals, http.StatusOK) - c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "application/json") - - // Decode the response. - var logs []*params.LogResponse - decoder := json.NewDecoder(rec.Body) - err := decoder.Decode(&logs) - c.Assert(err, gc.IsNil) - - // Check and then reset the response time so that the whole body - // can be more easily compared later. - for _, log := range logs { - c.Assert(log.Time, jc.TimeBetween(beforeAdding, afterAdding)) - log.Time = time.Time{} - } - - // Ensure the response includes the expected logs. - c.Assert(logs, jc.DeepEquals, test.expectBody) - } -} - -func rawMessage(msg string) json.RawMessage { - message, err := json.Marshal(msg) - if err != nil { - panic(err) - } - return json.RawMessage(message) -} - -var getLogsErrorsTests = []struct { - about string - querystring string - expectStatus int - expectMessage string - expectCode params.ErrorCode -}{{ - about: "invalid limit (negative number)", - querystring: "?limit=-100", - expectStatus: http.StatusBadRequest, - expectMessage: "invalid limit value: value must be >= 1", - expectCode: params.ErrBadRequest, -}, { - about: "invalid limit (zero value)", - querystring: "?limit=0", - expectStatus: http.StatusBadRequest, - expectMessage: "invalid limit value: value must be >= 1", - expectCode: params.ErrBadRequest, -}, { - about: "invalid limit (not a number)", - querystring: "?limit=foo", - expectStatus: http.StatusBadRequest, - expectMessage: "invalid limit value: value must be a number", - expectCode: params.ErrBadRequest, -}, { - about: "invalid offset (negative number)", - querystring: "?skip=-100", - expectStatus: http.StatusBadRequest, - expectMessage: "invalid skip value: value must be >= 0", - expectCode: params.ErrBadRequest, -}, { - about: "invalid offset (not a number)", - querystring: "?skip=bar", - expectStatus: http.StatusBadRequest, - expectMessage: "invalid skip value: value must be a number", - expectCode: params.ErrBadRequest, -}, { - about: "invalid id", - querystring: "?id=no-such:reference", - expectStatus: http.StatusBadRequest, - expectMessage: `invalid id value: charm URL has invalid schema: "no-such:reference"`, - expectCode: params.ErrBadRequest, -}, { - about: "invalid log level", - querystring: "?level=bar", - expectStatus: http.StatusBadRequest, - expectMessage: "invalid log level value", - expectCode: params.ErrBadRequest, -}, { - about: "invalid log type", - querystring: "?type=no-such", - expectStatus: http.StatusBadRequest, - expectMessage: "invalid log type value", - expectCode: params.ErrBadRequest, -}} - -func (s *logSuite) TestGetLogsErrors(c *gc.C) { - for i, test := range getLogsErrorsTests { - c.Logf("test %d: %s", i, test.about) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("log" + test.querystring), - Username: testUsername, - Password: testPassword, - ExpectStatus: test.expectStatus, - ExpectBody: params.Error{ - Message: test.expectMessage, - Code: test.expectCode, - }, - }) - } -} - -func (s *logSuite) TestGetLogsErrorInvalidLog(c *gc.C) { - // Add a non-parsable log message to the db directly. - err := s.store.DB.Logs().Insert(mongodoc.Log{ - Data: []byte("!"), - Level: mongodoc.InfoLevel, - Type: mongodoc.IngestionType, - Time: time.Now(), - }) - c.Assert(err, gc.IsNil) - // The log is just ignored. - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("log"), - Username: testUsername, - Password: testPassword, - ExpectStatus: http.StatusOK, - ExpectBody: []params.LogResponse{}, - }) -} - -func (s *logSuite) TestPostLogs(c *gc.C) { - // Prepare the request body. - body := makeByteLogs(rawMessage("info data"), params.InfoLevel, params.IngestionType, []*charm.Reference{ - charm.MustParseReference("trusty/django"), - charm.MustParseReference("utopic/rails"), - }) - - // Send the request. - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("log"), - Method: "POST", - Username: testUsername, - Password: testPassword, - Header: http.Header{ - "Content-Type": {"application/json"}, - }, - Body: bytes.NewReader(body), - ExpectStatus: http.StatusOK, - }) - - // Ensure the log message has been added to the database. - var doc mongodoc.Log - err := s.store.DB.Logs().Find(nil).One(&doc) - c.Assert(err, gc.IsNil) - c.Assert(string(doc.Data), gc.Equals, `"info data"`) - c.Assert(doc.Level, gc.Equals, mongodoc.InfoLevel) - c.Assert(doc.Type, gc.Equals, mongodoc.IngestionType) - c.Assert(doc.URLs, jc.DeepEquals, []*charm.Reference{ - charm.MustParseReference("trusty/django"), - charm.MustParseReference("django"), - charm.MustParseReference("utopic/rails"), - charm.MustParseReference("rails"), - }) -} - -func (s *logSuite) TestPostLogsMultipleEntries(c *gc.C) { - // Prepare the request body. - infoData := rawMessage("info data") - warningData := rawMessage("warning data") - logs := []params.Log{{ - Data: &infoData, - Level: params.InfoLevel, - Type: params.IngestionType, - }, { - Data: &warningData, - Level: params.WarningLevel, - Type: params.IngestionType, - }} - body, err := json.Marshal(logs) - c.Assert(err, gc.IsNil) - - // Send the request. - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL("log"), - Method: "POST", - Username: testUsername, - Password: testPassword, - Header: http.Header{ - "Content-Type": {"application/json"}, - }, - Body: bytes.NewReader(body), - ExpectStatus: http.StatusOK, - }) - - // Ensure the log messages has been added to the database. - var docs []mongodoc.Log - err = s.store.DB.Logs().Find(nil).Sort("id").All(&docs) - c.Assert(err, gc.IsNil) - c.Assert(docs, gc.HasLen, 2) - c.Assert(string(docs[0].Data), gc.Equals, string(infoData)) - c.Assert(docs[0].Level, gc.Equals, mongodoc.InfoLevel) - c.Assert(string(docs[1].Data), gc.Equals, string(warningData)) - c.Assert(docs[1].Level, gc.Equals, mongodoc.WarningLevel) -} - -var postLogsErrorsTests = []struct { - about string - contentType string - body []byte - expectStatus int - expectMessage string - expectCode params.ErrorCode -}{{ - about: "invalid content type", - contentType: "application/zip", - expectStatus: http.StatusBadRequest, - expectMessage: `unexpected Content-Type "application/zip"; expected 'application/json'`, - expectCode: params.ErrBadRequest, -}, { - about: "invalid body", - body: []byte("!"), - expectStatus: http.StatusBadRequest, - expectMessage: "cannot unmarshal body: invalid character '!' looking for beginning of value", - expectCode: params.ErrBadRequest, -}, { - about: "invalid log level", - body: makeByteLogs(rawMessage("message"), params.LogLevel(42), params.IngestionType, nil), - expectStatus: http.StatusBadRequest, - expectMessage: "invalid log level", - expectCode: params.ErrBadRequest, -}, { - about: "invalid log type", - body: makeByteLogs(rawMessage("message"), params.WarningLevel, params.LogType(42), nil), - expectStatus: http.StatusBadRequest, - expectMessage: "invalid log type", - expectCode: params.ErrBadRequest, -}} - -func (s *logSuite) TestPostLogsErrors(c *gc.C) { - url := storeURL("log") - for i, test := range postLogsErrorsTests { - c.Logf("test %d: %s", i, test.about) - if test.contentType == "" { - test.contentType = "application/json" - } - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: url, - Method: "POST", - Header: http.Header{ - "Content-Type": {test.contentType}, - }, - Body: bytes.NewReader(test.body), - Username: testUsername, - Password: testPassword, - ExpectStatus: test.expectStatus, - ExpectBody: params.Error{ - Message: test.expectMessage, - Code: test.expectCode, - }, - }) - } -} - -func (s *logSuite) TestGetLogsUnauthorizedError(c *gc.C) { - s.AssertEndpointAuth(c, httptesting.JSONCallParams{ - URL: storeURL("log"), - ExpectStatus: http.StatusOK, - ExpectBody: []params.LogResponse{}, - }) -} - -func (s *logSuite) TestPostLogsUnauthorizedError(c *gc.C) { - // Add a non-parsable log message to the db. - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.noMacaroonSrv, - URL: storeURL("log"), - Method: "POST", - Header: http.Header{ - "Content-Type": {"application/json"}, - }, - ExpectStatus: http.StatusUnauthorized, - ExpectBody: params.Error{ - Message: "authentication failed: missing HTTP auth header", - Code: params.ErrUnauthorized, - }, - }) -} - -func makeByteLogs(data json.RawMessage, logLevel params.LogLevel, logType params.LogType, urls []*charm.Reference) []byte { - logs := []params.Log{{ - Data: &data, - Level: logLevel, - Type: logType, - URLs: urls, - }} - b, err := json.Marshal(logs) - if err != nil { - panic(err) - } - return b -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/package_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/package_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4_test - -import ( - "testing" - - jujutesting "github.com/juju/testing" -) - -func TestPackage(t *testing.T) { - jujutesting.MgoTestPackage(t, nil) -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/pprof.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/pprof.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/pprof.go 1970-01-01 00:00:00 +0000 @@ -1,76 +0,0 @@ -package v4 - -import ( - "net/http" - runtimepprof "runtime/pprof" - "strings" - "text/template" - - "github.com/juju/httpprof" - - "gopkg.in/juju/charmstore.v4/internal/router" -) - -type pprofHandler struct { - mux *http.ServeMux - auth authorizer -} - -type authorizer interface { - authorize(req *http.Request, acl []string, alwaysAuth bool, entityId *router.ResolvedURL) (authorization, error) -} - -func newPprofHandler(auth authorizer) http.Handler { - mux := http.NewServeMux() - mux.HandleFunc("/cmdline", pprof.Cmdline) - mux.HandleFunc("/profile", pprof.Profile) - mux.HandleFunc("/symbol", pprof.Symbol) - mux.HandleFunc("/", pprofIndex) - return &pprofHandler{ - mux: mux, - auth: auth, - } -} - -func (h *pprofHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if _, err := h.auth.authorize(req, nil, true, nil); err != nil { - router.WriteError(w, err) - return - } - h.mux.ServeHTTP(w, req) -} - -// pprofIndex is copied from pprof.Index with minor modifications -// to make it work using a relative path. -func pprofIndex(w http.ResponseWriter, req *http.Request) { - if req.URL.Path == "/" { - profiles := runtimepprof.Profiles() - if err := indexTmpl.Execute(w, profiles); err != nil { - logger.Errorf("cannot execute pprof template: %v", err) - } - return - } - name := strings.TrimPrefix(req.URL.Path, "/") - pprof.Handler(name).ServeHTTP(w, req) -} - -var indexTmpl = template.Must(template.New("index").Parse(` - - - pprof - - -

pprof

-

profiles:

- - {{range .}} - - - - - {{end}} -
{{.Count}}{{.Name}}
-

full goroutine stack dump

- - -`)) === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/relations.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/relations.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/relations.go 1970-01-01 00:00:00 +0000 @@ -1,299 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4 - -import ( - "net/http" - "net/url" - - "gopkg.in/errgo.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/params" -) - -// GET id/meta/charm-related[?include=meta[&include=meta…]] -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-related -func (h *Handler) metaCharmRelated(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - if id.URL.Series == "bundle" { - return nil, nil - } - - // If the charm does not define any relation we can just return without - // hitting the db. - if len(entity.CharmProvidedInterfaces)+len(entity.CharmRequiredInterfaces) == 0 { - return ¶ms.RelatedResponse{}, nil - } - - // Build the query to retrieve the related entities. - query := bson.M{ - "$or": []bson.M{ - {"charmrequiredinterfaces": bson.M{ - "$elemMatch": bson.M{ - "$in": entity.CharmProvidedInterfaces, - }, - }}, - {"charmprovidedinterfaces": bson.M{ - "$elemMatch": bson.M{ - "$in": entity.CharmRequiredInterfaces, - }, - }}, - }, - } - fields := bson.D{ - {"_id", 1}, - {"charmrequiredinterfaces", 1}, - {"charmprovidedinterfaces", 1}, - {"promulgated-url", 1}, - {"promulgated-revision", 1}, - } - - store := h.pool.Store() - defer store.Close() - // Retrieve the entities from the database. - var entities []mongodoc.Entity - if err := store.DB.Entities().Find(query).Select(fields).Sort("_id").All(&entities); err != nil { - return nil, errgo.Notef(err, "cannot retrieve the related charms") - } - - // If no entities are found there is no need for further processing the - // results. - if len(entities) == 0 { - return ¶ms.RelatedResponse{}, nil - } - - // Build the results, by grouping entities based on their relations' roles - // and interfaces. - includes := flags["include"] - requires, err := h.getRelatedCharmsResponse(entity.CharmProvidedInterfaces, entities, func(e mongodoc.Entity) []string { - return e.CharmRequiredInterfaces - }, includes, req) - if err != nil { - return nil, errgo.Notef(err, "cannot retrieve the charm requires") - } - provides, err := h.getRelatedCharmsResponse(entity.CharmRequiredInterfaces, entities, func(e mongodoc.Entity) []string { - return e.CharmProvidedInterfaces - }, includes, req) - if err != nil { - return nil, errgo.Notef(err, "cannot retrieve the charm provides") - } - - // Return the response. - return ¶ms.RelatedResponse{ - Requires: requires, - Provides: provides, - }, nil -} - -type entityRelatedInterfacesGetter func(mongodoc.Entity) []string - -// getRelatedCharmsResponse returns a response mapping interfaces to related -// charms. For instance: -// map[string][]params.MetaAnyResponse{ -// "http": []params.MetaAnyResponse{ -// {Id: "cs:utopic/django-42", Meta: ...}, -// {Id: "cs:trusty/wordpress-47", Meta: ...}, -// }, -// "memcache": []params.MetaAnyResponse{ -// {Id: "cs:utopic/memcached-0", Meta: ...}, -// }, -// } -func (h *Handler) getRelatedCharmsResponse( - ifaces []string, - entities []mongodoc.Entity, - getInterfaces entityRelatedInterfacesGetter, - includes []string, - req *http.Request, -) (map[string][]params.MetaAnyResponse, error) { - results := make(map[string][]params.MetaAnyResponse, len(ifaces)) - for _, iface := range ifaces { - responses, err := h.getRelatedIfaceResponses(iface, entities, getInterfaces, includes, req) - if err != nil { - return nil, err - } - if len(responses) > 0 { - results[iface] = responses - } - } - return results, nil -} - -func (h *Handler) getRelatedIfaceResponses( - iface string, - entities []mongodoc.Entity, - getInterfaces entityRelatedInterfacesGetter, - includes []string, - req *http.Request, -) ([]params.MetaAnyResponse, error) { - // Build a list of responses including entities which are related - // to the given interface. - responses := make([]params.MetaAnyResponse, 0, len(entities)) - for _, entity := range entities { - for _, entityIface := range getInterfaces(entity) { - if entityIface == iface { - // Retrieve the requested metadata for the entity. - meta, err := h.getMetadataForEntity(&entity, includes, req) - if err != nil { - return nil, err - } - // Build the response. - responses = append(responses, params.MetaAnyResponse{ - Id: entity.PreferredURL(true), - Meta: meta, - }) - } - } - } - return responses, nil -} - -// GET id/meta/bundles-containing[?include=meta[&include=meta…]][&any-series=1][&any-revision=1][&all-results=1] -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundles-containing -func (h *Handler) metaBundlesContaining(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { - if id.URL.Series == "bundle" { - return nil, nil - } - - // Validate the URL query values. - anySeries, err := router.ParseBool(flags.Get("any-series")) - if err != nil { - return nil, badRequestf(err, "invalid value for any-series") - } - anyRevision, err := router.ParseBool(flags.Get("any-revision")) - if err != nil { - return nil, badRequestf(err, "invalid value for any-revision") - } - allResults, err := router.ParseBool(flags.Get("all-results")) - if err != nil { - return nil, badRequestf(err, "invalid value for all-results") - } - - // Mutate the reference so that it represents a base URL if required. - prefURL := id.PreferredURL() - searchId := *prefURL - if anySeries || anyRevision { - searchId.Revision = -1 - searchId.Series = "" - } - - store := h.pool.Store() - defer store.Close() - // Retrieve the bundles containing the resulting charm id. - var entities []*mongodoc.Entity - if err := store.DB.Entities(). - Find(bson.D{{"bundlecharms", &searchId}}). - Select(bson.D{{"_id", 1}, {"bundlecharms", 1}, {"promulgated-url", 1}}). - All(&entities); err != nil { - return nil, errgo.Notef(err, "cannot retrieve the related bundles") - } - - // Further filter the entities if required, by only including latest - // bundle revisions and/or excluding specific charm series or revisions. - - // Filter entities so it contains only entities that actually - // match the desired search criteria. - filterEntities(&entities, func(e *mongodoc.Entity) bool { - if anySeries == anyRevision { - // If neither anySeries or anyRevision are true, then - // the search will be exact and therefore e must be - // matched. - // If both anySeries and anyRevision are true, then - // the base entity that we are searching for is exactly - // what we want to search for, therefore e must be matched. - return true - } - for _, charmId := range e.BundleCharms { - if charmId.Name == prefURL.Name && - charmId.User == prefURL.User && - (anySeries || charmId.Series == prefURL.Series) && - (anyRevision || charmId.Revision == prefURL.Revision) { - return true - } - } - return false - }) - - var latest map[charm.Reference]int - if !allResults { - // Include only the latest revision of any bundle. - // This is made somewhat tricky by the fact that - // each bundle can have two URLs, its canonical - // URL (with user) and its promulgated URL. - // - // We want to maximise the URL revision regardless of - // whether the URL is promulgated or not, so we - // we build a map holding the latest revision for both - // promulgated and non-promulgated revisions - // and then include entities that have the latest - // revision for either. - latest = make(map[charm.Reference]int) - - // updateLatest updates the latest revision for u - // without its revision if it's greater than the existing - // entry. - updateLatest := func(u *charm.Reference) { - u1 := *u - u1.Revision = -1 - if rev, ok := latest[u1]; !ok || rev < u.Revision { - latest[u1] = u.Revision - } - } - for _, e := range entities { - updateLatest(e.URL) - if e.PromulgatedURL != nil { - updateLatest(e.PromulgatedURL) - } - } - filterEntities(&entities, func(e *mongodoc.Entity) bool { - if e.PromulgatedURL != nil { - u := *e.PromulgatedURL - u.Revision = -1 - if latest[u] == e.PromulgatedURL.Revision { - return true - } - } - u := *e.URL - u.Revision = -1 - return latest[u] == e.URL.Revision - }) - } - - // Prepare and return the response. - response := make([]*params.MetaAnyResponse, 0, len(entities)) - includes := flags["include"] - // TODO(rog) make this concurrent. - for _, e := range entities { - meta, err := h.getMetadataForEntity(e, includes, req) - if err != nil { - return nil, errgo.Notef(err, "cannot retrieve bundle metadata") - } - response = append(response, ¶ms.MetaAnyResponse{ - Id: e.PreferredURL(true), - Meta: meta, - }) - } - return response, nil -} - -func (h *Handler) getMetadataForEntity(e *mongodoc.Entity, includes []string, req *http.Request) (map[string]interface{}, error) { - return h.GetMetadata(charmstore.EntityResolvedURL(e), includes, req) -} - -// filterEntities deletes all entities from *entities for which -// the given predicate returns false. -func filterEntities(entities *[]*mongodoc.Entity, predicate func(*mongodoc.Entity) bool) { - entities1 := *entities - j := 0 - for _, e := range entities1 { - if predicate(e) { - entities1[j] = e - j++ - } - } - *entities = entities1[0:j] -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/relations_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/relations_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/relations_test.go 1970-01-01 00:00:00 +0000 @@ -1,854 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4_test - -import ( - "encoding/json" - "fmt" - "net/http" - "sort" - "strconv" - "strings" - - jc "github.com/juju/testing/checkers" - "github.com/juju/testing/httptesting" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/blobstore" - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/params" -) - -// Define fake blob attributes to be used in tests. -var fakeBlobSize, fakeBlobHash = func() (int64, string) { - b := []byte("fake content") - h := blobstore.NewHash() - h.Write(b) - return int64(len(b)), fmt.Sprintf("%x", h.Sum(nil)) -}() - -type RelationsSuite struct { - commonSuite -} - -var _ = gc.Suite(&RelationsSuite{}) - -// metaCharmRelatedCharms defines a bunch of charms to be used in -// the relation tests. -var metaCharmRelatedCharms = map[string]charm.Charm{ - "0 ~charmers/utopic/wordpress-0": &relationTestingCharm{ - provides: map[string]charm.Relation{ - "website": { - Name: "website", - Role: "provider", - Interface: "http", - }, - }, - requires: map[string]charm.Relation{ - "cache": { - Name: "cache", - Role: "requirer", - Interface: "memcache", - }, - "nfs": { - Name: "nfs", - Role: "requirer", - Interface: "mount", - }, - }, - }, - "42 ~charmers/utopic/memcached-42": &relationTestingCharm{ - provides: map[string]charm.Relation{ - "cache": { - Name: "cache", - Role: "provider", - Interface: "memcache", - }, - }, - }, - "1 ~charmers/precise/nfs-1": &relationTestingCharm{ - provides: map[string]charm.Relation{ - "nfs": { - Name: "nfs", - Role: "provider", - Interface: "mount", - }, - }, - }, - "47 ~charmers/trusty/haproxy-47": &relationTestingCharm{ - requires: map[string]charm.Relation{ - "reverseproxy": { - Name: "reverseproxy", - Role: "requirer", - Interface: "http", - }, - }, - }, - "48 ~charmers/precise/haproxy-48": &relationTestingCharm{ - requires: map[string]charm.Relation{ - "reverseproxy": { - Name: "reverseproxy", - Role: "requirer", - Interface: "http", - }, - }, - }, -} - -var metaCharmRelatedTests = []struct { - // Description of the test. - about string - // Charms to be stored in the store before the test is run. - charms map[string]charm.Charm - // The id of the charm for which related charms are returned. - id string - // The querystring to append to the resulting charmstore URL. - querystring string - // The expected response body. - expectBody params.RelatedResponse -}{{ - about: "provides and requires", - charms: metaCharmRelatedCharms, - id: "utopic/wordpress-0", - expectBody: params.RelatedResponse{ - Provides: map[string][]params.MetaAnyResponse{ - "memcache": {{ - Id: charm.MustParseReference("utopic/memcached-42"), - }}, - "mount": {{ - Id: charm.MustParseReference("precise/nfs-1"), - }}, - }, - Requires: map[string][]params.MetaAnyResponse{ - "http": {{ - Id: charm.MustParseReference("precise/haproxy-48"), - }, { - Id: charm.MustParseReference("trusty/haproxy-47"), - }}, - }, - }, -}, { - about: "only provides", - charms: metaCharmRelatedCharms, - id: "trusty/haproxy-47", - expectBody: params.RelatedResponse{ - Provides: map[string][]params.MetaAnyResponse{ - "http": {{ - Id: charm.MustParseReference("utopic/wordpress-0"), - }}, - }, - }, -}, { - about: "only requires", - charms: metaCharmRelatedCharms, - id: "utopic/memcached-42", - expectBody: params.RelatedResponse{ - Requires: map[string][]params.MetaAnyResponse{ - "memcache": {{ - Id: charm.MustParseReference("utopic/wordpress-0"), - }}, - }, - }, -}, { - about: "no relations found", - charms: map[string]charm.Charm{ - "0 ~charmers/utopic/wordpress-0": &relationTestingCharm{ - provides: map[string]charm.Relation{ - "website": { - Name: "website", - Role: "provider", - Interface: "http", - }, - }, - requires: map[string]charm.Relation{ - "cache": { - Name: "cache", - Role: "requirer", - Interface: "memcache", - }, - "nfs": { - Name: "nfs", - Role: "requirer", - Interface: "mount", - }, - }, - }, - }, - id: "utopic/wordpress-0", -}, { - about: "no relations defined", - charms: map[string]charm.Charm{ - "42 ~charmers/utopic/django-42": &relationTestingCharm{}, - }, - id: "utopic/django-42", -}, { - about: "multiple revisions of the same related charm", - charms: map[string]charm.Charm{ - "0 ~charmers/trusty/wordpress-0": &relationTestingCharm{ - requires: map[string]charm.Relation{ - "cache": { - Name: "cache", - Role: "requirer", - Interface: "memcache", - }, - }, - }, - "1 ~charmers/utopic/memcached-1": &relationTestingCharm{ - provides: map[string]charm.Relation{ - "cache": { - Name: "cache", - Role: "provider", - Interface: "memcache", - }, - }, - }, - "2 ~charmers/utopic/memcached-2": &relationTestingCharm{ - provides: map[string]charm.Relation{ - "cache": { - Name: "cache", - Role: "provider", - Interface: "memcache", - }, - }, - }, - "3 ~charmers/utopic/memcached-3": &relationTestingCharm{ - provides: map[string]charm.Relation{ - "cache": { - Name: "cache", - Role: "provider", - Interface: "memcache", - }, - }, - }, - }, - id: "trusty/wordpress-0", - expectBody: params.RelatedResponse{ - Provides: map[string][]params.MetaAnyResponse{ - "memcache": {{ - Id: charm.MustParseReference("utopic/memcached-1"), - }, { - Id: charm.MustParseReference("utopic/memcached-2"), - }, { - Id: charm.MustParseReference("utopic/memcached-3"), - }}, - }, - }, -}, { - about: "reference ordering", - charms: map[string]charm.Charm{ - "0 ~charmers/trusty/wordpress-0": &relationTestingCharm{ - requires: map[string]charm.Relation{ - "cache": { - Name: "cache", - Role: "requirer", - Interface: "memcache", - }, - "nfs": { - Name: "nfs", - Role: "requirer", - Interface: "mount", - }, - }, - }, - "1 ~charmers/utopic/memcached-1": &relationTestingCharm{ - provides: map[string]charm.Relation{ - "cache": { - Name: "cache", - Role: "provider", - Interface: "memcache", - }, - }, - }, - "2 ~charmers/utopic/memcached-2": &relationTestingCharm{ - provides: map[string]charm.Relation{ - "cache": { - Name: "cache", - Role: "provider", - Interface: "memcache", - }, - }, - }, - "90 ~charmers/utopic/redis-90": &relationTestingCharm{ - provides: map[string]charm.Relation{ - "cache": { - Name: "cache", - Role: "provider", - Interface: "memcache", - }, - }, - }, - "47 ~charmers/trusty/nfs-47": &relationTestingCharm{ - provides: map[string]charm.Relation{ - "nfs": { - Name: "nfs", - Role: "provider", - Interface: "mount", - }, - }, - }, - "42 ~charmers/precise/nfs-42": &relationTestingCharm{ - provides: map[string]charm.Relation{ - "nfs": { - Name: "nfs", - Role: "provider", - Interface: "mount", - }, - }, - }, - "47 ~charmers/precise/nfs-47": &relationTestingCharm{ - provides: map[string]charm.Relation{ - "nfs": { - Name: "nfs", - Role: "provider", - Interface: "mount", - }, - }, - }, - }, - id: "trusty/wordpress-0", - expectBody: params.RelatedResponse{ - Provides: map[string][]params.MetaAnyResponse{ - "memcache": {{ - Id: charm.MustParseReference("utopic/memcached-1"), - }, { - Id: charm.MustParseReference("utopic/memcached-2"), - }, { - Id: charm.MustParseReference("utopic/redis-90"), - }}, - "mount": {{ - Id: charm.MustParseReference("precise/nfs-42"), - }, { - Id: charm.MustParseReference("precise/nfs-47"), - }, { - Id: charm.MustParseReference("trusty/nfs-47"), - }}, - }, - }, -}, { - about: "includes", - charms: metaCharmRelatedCharms, - id: "precise/nfs-1", - querystring: "?include=archive-size&include=charm-metadata", - expectBody: params.RelatedResponse{ - Requires: map[string][]params.MetaAnyResponse{ - "mount": {{ - Id: charm.MustParseReference("utopic/wordpress-0"), - Meta: map[string]interface{}{ - "archive-size": params.ArchiveSizeResponse{Size: fakeBlobSize}, - "charm-metadata": &charm.Meta{ - Provides: map[string]charm.Relation{ - "website": { - Name: "website", - Role: "provider", - Interface: "http", - }, - }, - Requires: map[string]charm.Relation{ - "cache": { - Name: "cache", - Role: "requirer", - Interface: "memcache", - }, - "nfs": { - Name: "nfs", - Role: "requirer", - Interface: "mount", - }, - }, - }, - }, - }}, - }, - }, -}} - -func (s *RelationsSuite) addCharms(c *gc.C, charms map[string]charm.Charm) { - for id, ch := range charms { - url := mustParseResolvedURL(id) - // The blob related info are not used in these tests. - // The related charms are retrieved from the entities collection, - // without accessing the blob store. - err := s.store.AddCharm(ch, charmstore.AddParams{ - URL: url, - BlobName: "blobName", - BlobHash: fakeBlobHash, - BlobSize: fakeBlobSize, - }) - c.Assert(err, gc.IsNil, gc.Commentf("id %q", id)) - err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) - c.Assert(err, gc.IsNil) - } -} - -func (s *RelationsSuite) TestMetaCharmRelated(c *gc.C) { - for i, test := range metaCharmRelatedTests { - c.Logf("test %d: %s", i, test.about) - s.addCharms(c, test.charms) - storeURL := storeURL(test.id + "/meta/charm-related" + test.querystring) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL, - ExpectStatus: http.StatusOK, - ExpectBody: test.expectBody, - }) - // Clean up the entities in the store. - _, err := s.store.DB.Entities().RemoveAll(nil) - c.Assert(err, gc.IsNil) - } -} - -func (s *RelationsSuite) TestMetaCharmRelatedIncludeError(c *gc.C) { - s.addCharms(c, metaCharmRelatedCharms) - storeURL := storeURL("utopic/wordpress-0/meta/charm-related?include=no-such") - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL, - ExpectStatus: http.StatusInternalServerError, - ExpectBody: params.Error{ - Message: `cannot retrieve the charm requires: unrecognized metadata name "no-such"`, - }, - }) -} - -// relationTestingCharm implements charm.Charm, and it is used for testing -// charm relations. -type relationTestingCharm struct { - provides map[string]charm.Relation - requires map[string]charm.Relation -} - -func (ch *relationTestingCharm) Meta() *charm.Meta { - // The only metadata we are interested in is the relation data. - return &charm.Meta{ - Provides: ch.provides, - Requires: ch.requires, - } -} - -func (ch *relationTestingCharm) Config() *charm.Config { - // For the purposes of this implementation, the charm configuration is not - // relevant. - return nil -} - -func (e *relationTestingCharm) Metrics() *charm.Metrics { - return nil -} - -func (ch *relationTestingCharm) Actions() *charm.Actions { - // For the purposes of this implementation, the charm actions are not - // relevant. - return nil -} - -func (ch *relationTestingCharm) Revision() int { - // For the purposes of this implementation, the charm revision is not - // relevant. - return 0 -} - -// metaBundlesContainingBundles defines a bunch of bundles to be used in -// the bundles-containing tests. -var metaBundlesContainingBundles = map[string]charm.Bundle{ - "0 ~charmers/bundle/wordpress-simple-0": relationTestingBundle([]string{ - "cs:utopic/wordpress-42", - "cs:utopic/mysql-0", - }), - "1 ~charmers/bundle/wordpress-simple-1": relationTestingBundle([]string{ - "cs:utopic/wordpress-47", - "cs:utopic/mysql-1", - }), - "1 ~charmers/bundle/wordpress-complex-1": relationTestingBundle([]string{ - "cs:utopic/wordpress-42", - "cs:utopic/wordpress-47", - "cs:trusty/mysql-0", - "cs:trusty/mysql-1", - "cs:trusty/memcached-2", - }), - "42 ~charmers/bundle/django-generic-42": relationTestingBundle([]string{ - "django", - "django", - "mysql-1", - "trusty/memcached", - }), - "0 ~charmers/bundle/useless-0": relationTestingBundle([]string{ - "cs:utopic/wordpress-42", - "precise/mediawiki-10", - }), - "46 ~charmers/bundle/mediawiki-simple-46": relationTestingBundle([]string{ - "precise/mediawiki-0", - }), - "47 ~charmers/bundle/mediawiki-simple-47": relationTestingBundle([]string{ - "precise/mediawiki-0", - "mysql", - }), - "48 ~charmers/bundle/mediawiki-simple-48": relationTestingBundle([]string{ - "precise/mediawiki-0", - }), - "~bob/bundle/bobthebundle-2": relationTestingBundle([]string{ - "precise/mediawiki-0", - }), -} - -var metaBundlesContainingTests = []struct { - // Description of the test. - about string - // The id of the charm for which related bundles are returned. - id string - // The querystring to append to the resulting charmstore URL. - querystring string - // The expected status code of the response. - expectStatus int - // The expected response body. - expectBody interface{} -}{{ - about: "specific charm present in several bundles", - id: "utopic/wordpress-42", - expectStatus: http.StatusOK, - expectBody: []*params.MetaAnyResponse{{ - Id: charm.MustParseReference("bundle/useless-0"), - }, { - Id: charm.MustParseReference("bundle/wordpress-complex-1"), - }, { - Id: charm.MustParseReference("bundle/wordpress-simple-0"), - }}, -}, { - about: "specific charm present in one bundle", - id: "trusty/memcached-2", - expectStatus: http.StatusOK, - expectBody: []*params.MetaAnyResponse{{ - Id: charm.MustParseReference("bundle/wordpress-complex-1"), - }}, -}, { - about: "specific charm not present in any bundle", - id: "trusty/django-42", - expectStatus: http.StatusOK, - expectBody: []*params.MetaAnyResponse{}, -}, { - about: "specific charm with includes", - id: "trusty/mysql-1", - querystring: "?include=archive-size&include=bundle-metadata", - expectStatus: http.StatusOK, - expectBody: []*params.MetaAnyResponse{{ - Id: charm.MustParseReference("bundle/wordpress-complex-1"), - Meta: map[string]interface{}{ - "archive-size": params.ArchiveSizeResponse{Size: fakeBlobSize}, - "bundle-metadata": metaBundlesContainingBundles["1 ~charmers/bundle/wordpress-complex-1"].Data(), - }, - }}, -}, { - about: "partial charm id", - id: "mysql", // The test will add cs:utopic/mysql-0. - expectStatus: http.StatusOK, - expectBody: []*params.MetaAnyResponse{{ - Id: charm.MustParseReference("bundle/wordpress-simple-0"), - }}, -}, { - about: "any series set to true", - id: "trusty/mysql-0", - querystring: "?any-series=1", - expectStatus: http.StatusOK, - expectBody: []*params.MetaAnyResponse{{ - Id: charm.MustParseReference("bundle/wordpress-complex-1"), - }, { - Id: charm.MustParseReference("bundle/wordpress-simple-0"), - }}, -}, { - about: "any series and all-results set to true", - id: "trusty/mysql-0", - querystring: "?any-series=1&all-results=1", - expectStatus: http.StatusOK, - expectBody: []*params.MetaAnyResponse{{ - Id: charm.MustParseReference("bundle/wordpress-complex-1"), - }, { - // This result is included even if the latest wordpress-simple does not - // contain the mysql-0 charm. - Id: charm.MustParseReference("bundle/wordpress-simple-0"), - }}, -}, { - about: "invalid any series", - id: "utopic/mysql-0", - querystring: "?any-series=true", - expectStatus: http.StatusBadRequest, - expectBody: params.Error{ - Code: params.ErrBadRequest, - Message: `invalid value for any-series: unexpected bool value "true" (must be "0" or "1")`, - }, -}, { - about: "any revision set to true", - id: "trusty/memcached-99", - querystring: "?any-revision=1", - expectStatus: http.StatusOK, - expectBody: []*params.MetaAnyResponse{{ - Id: charm.MustParseReference("bundle/django-generic-42"), - }, { - Id: charm.MustParseReference("bundle/wordpress-complex-1"), - }}, -}, { - about: "invalid any revision", - id: "trusty/memcached-99", - querystring: "?any-revision=why-not", - expectStatus: http.StatusBadRequest, - expectBody: params.Error{ - Code: params.ErrBadRequest, - Message: `invalid value for any-revision: unexpected bool value "why-not" (must be "0" or "1")`, - }, -}, { - about: "all-results set to true", - id: "precise/mediawiki-0", - expectStatus: http.StatusOK, - querystring: "?all-results=1", - expectBody: []*params.MetaAnyResponse{{ - Id: charm.MustParseReference("bundle/mediawiki-simple-48"), - }, { - Id: charm.MustParseReference("bundle/mediawiki-simple-47"), - }, { - Id: charm.MustParseReference("bundle/mediawiki-simple-46"), - }, { - Id: charm.MustParseReference("~bob/bundle/bobthebundle-2"), - }}, -}, { - about: "all-results set to false", - id: "precise/mediawiki-0", - expectStatus: http.StatusOK, - expectBody: []*params.MetaAnyResponse{{ - Id: charm.MustParseReference("bundle/mediawiki-simple-48"), - }, { - Id: charm.MustParseReference("~bob/bundle/bobthebundle-2"), - }}, -}, { - about: "invalid all-results", - id: "trusty/memcached-99", - querystring: "?all-results=yes!", - expectStatus: http.StatusBadRequest, - expectBody: params.Error{ - Code: params.ErrBadRequest, - Message: `invalid value for all-results: unexpected bool value "yes!" (must be "0" or "1")`, - }, -}, { - about: "any series and revision, all results", - id: "saucy/mysql-99", - querystring: "?any-series=1&any-revision=1&all-results=1", - expectStatus: http.StatusOK, - expectBody: []*params.MetaAnyResponse{{ - Id: charm.MustParseReference("bundle/django-generic-42"), - }, { - Id: charm.MustParseReference("bundle/mediawiki-simple-47"), - }, { - Id: charm.MustParseReference("bundle/wordpress-complex-1"), - }, { - Id: charm.MustParseReference("bundle/wordpress-simple-1"), - }, { - Id: charm.MustParseReference("bundle/wordpress-simple-0"), - }}, -}, { - about: "any series, any revision", - id: "saucy/mysql-99", - querystring: "?any-series=1&any-revision=1", - expectStatus: http.StatusOK, - expectBody: []*params.MetaAnyResponse{{ - Id: charm.MustParseReference("bundle/django-generic-42"), - }, { - Id: charm.MustParseReference("bundle/mediawiki-simple-47"), - }, { - Id: charm.MustParseReference("bundle/wordpress-complex-1"), - }, { - Id: charm.MustParseReference("bundle/wordpress-simple-1"), - }}, -}, { - about: "any series and revision, last results", - id: "saucy/mediawiki", - querystring: "?any-series=1&any-revision=1", - expectStatus: http.StatusOK, - expectBody: []*params.MetaAnyResponse{{ - Id: charm.MustParseReference("bundle/mediawiki-simple-48"), - }, { - Id: charm.MustParseReference("bundle/useless-0"), - }, { - Id: charm.MustParseReference("~bob/bundle/bobthebundle-2"), - }}, -}, { - about: "any series and revision with includes", - id: "saucy/wordpress-99", - querystring: "?any-series=1&any-revision=1&include=archive-size&include=bundle-metadata", - expectStatus: http.StatusOK, - expectBody: []*params.MetaAnyResponse{{ - Id: charm.MustParseReference("bundle/useless-0"), - Meta: map[string]interface{}{ - "archive-size": params.ArchiveSizeResponse{Size: fakeBlobSize}, - "bundle-metadata": metaBundlesContainingBundles["0 ~charmers/bundle/useless-0"].Data(), - }, - }, { - Id: charm.MustParseReference("bundle/wordpress-complex-1"), - Meta: map[string]interface{}{ - "archive-size": params.ArchiveSizeResponse{Size: fakeBlobSize}, - "bundle-metadata": metaBundlesContainingBundles["1 ~charmers/bundle/wordpress-complex-1"].Data(), - }, - }, { - Id: charm.MustParseReference("bundle/wordpress-simple-1"), - Meta: map[string]interface{}{ - "archive-size": params.ArchiveSizeResponse{Size: fakeBlobSize}, - "bundle-metadata": metaBundlesContainingBundles["1 ~charmers/bundle/wordpress-simple-1"].Data(), - }, - }}, -}, { - about: "include-error", - id: "utopic/wordpress-42", - querystring: "?include=no-such", - expectStatus: http.StatusInternalServerError, - expectBody: params.Error{ - Message: `cannot retrieve bundle metadata: unrecognized metadata name "no-such"`, - }, -}} - -func (s *RelationsSuite) TestMetaBundlesContaining(c *gc.C) { - // Add the bundles used for testing to the database. - for id, b := range metaBundlesContainingBundles { - url := mustParseResolvedURL(id) - // The blob related info are not used in these tests. - // The charm-bundle relations are retrieved from the entities - // collection, without accessing the blob store. - err := s.store.AddBundle(b, charmstore.AddParams{ - URL: url, - BlobName: "blobName", - BlobHash: fakeBlobHash, - BlobSize: fakeBlobSize, - }) - c.Assert(err, gc.IsNil) - } - - for i, test := range metaBundlesContainingTests { - c.Logf("test %d: %s", i, test.about) - - // Expand the URL if required before adding the charm to the database, - // so that at least one matching charm can be resolved. - rurl := &router.ResolvedURL{ - URL: *charm.MustParseReference(test.id), - PromulgatedRevision: -1, - } - if rurl.URL.Series == "" { - rurl.URL.Series = "utopic" - } - if rurl.URL.Revision == -1 { - rurl.URL.Revision = 0 - } - if rurl.URL.User == "" { - rurl.URL.User = "charmers" - rurl.PromulgatedRevision = rurl.URL.Revision - } - // Add the charm we need bundle info on to the database. - err := s.store.AddCharm(&relationTestingCharm{}, charmstore.AddParams{ - URL: rurl, - BlobName: "blobName", - BlobHash: fakeBlobHash, - BlobSize: fakeBlobSize, - }) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) - c.Assert(err, gc.IsNil) - - // Perform the request and ensure the response is what we expect. - storeURL := storeURL(test.id + "/meta/bundles-containing" + test.querystring) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL, - ExpectStatus: test.expectStatus, - ExpectBody: sameMetaAnyResponses(test.expectBody), - }) - - // Clean up the charm entity in the store. - err = s.store.DB.Entities().Remove(bson.D{{"_id", &rurl.URL}}) - c.Assert(err, gc.IsNil) - } -} - -// sameMetaAnyResponses returns a BodyAsserter that checks whether the meta/any response -// matches the expected one, even if the results appear in a different order. -func sameMetaAnyResponses(expect interface{}) httptesting.BodyAsserter { - return func(c *gc.C, m json.RawMessage) { - expectMeta, ok := expect.([]*params.MetaAnyResponse) - if !ok { - c.Assert(string(m), jc.JSONEquals, expect) - return - } - var got []*params.MetaAnyResponse - err := json.Unmarshal(m, &got) - c.Assert(err, gc.IsNil) - sort.Sort(metaAnyResponseById(got)) - sort.Sort(metaAnyResponseById(expectMeta)) - data, err := json.Marshal(got) - c.Assert(err, gc.IsNil) - c.Assert(string(data), jc.JSONEquals, expect) - } -} - -// relationTestingBundle returns a bundle for use in relation -// testing. The urls parameter holds a list of charm references -// to be included in the bundle. -// For each URL, a corresponding service is automatically created. -func relationTestingBundle(urls []string) charm.Bundle { - services := make(map[string]*charm.ServiceSpec, len(urls)) - for i, url := range urls { - service := &charm.ServiceSpec{ - Charm: url, - NumUnits: 1, - } - services[fmt.Sprintf("service-%d", i)] = service - } - return &testingBundle{ - data: &charm.BundleData{ - Services: services, - }, - } -} - -// testingBundle is a bundle implementation that -// returns bundle metadata held in the data field. -type testingBundle struct { - data *charm.BundleData -} - -func (b *testingBundle) Data() *charm.BundleData { - return b.data -} - -func (b *testingBundle) ReadMe() string { - // For the purposes of this implementation, the charm readme is not - // relevant. - return "" -} - -type metaAnyResponseById []*params.MetaAnyResponse - -func (s metaAnyResponseById) Len() int { return len(s) } -func (s metaAnyResponseById) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s metaAnyResponseById) Less(i, j int) bool { - return s[i].Id.String() < s[j].Id.String() -} - -// mustParseResolvedURL parses a resolved URL in string form, with -// the optional promulgated revision preceding the entity URL -// separated by a space. -func mustParseResolvedURL(urlStr string) *router.ResolvedURL { - s := strings.Fields(urlStr) - promRev := -1 - switch len(s) { - default: - panic(fmt.Errorf("invalid resolved URL string %q", urlStr)) - case 2: - var err error - promRev, err = strconv.Atoi(s[0]) - if err != nil || promRev < 0 { - panic(fmt.Errorf("invalid resolved URL string %q", urlStr)) - } - case 1: - } - return &router.ResolvedURL{ - URL: *charm.MustParseReference(s[len(s)-1]), - PromulgatedRevision: promRev, - } -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/search.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/search.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/search.go 1970-01-01 00:00:00 +0000 @@ -1,162 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4 - -import ( - "net/http" - "strconv" - "sync/atomic" - - "github.com/juju/utils/parallel" - "gopkg.in/errgo.v1" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/params" -) - -const maxConcurrency = 20 - -// GET search[?text=text][&autocomplete=1][&filter=value…][&limit=limit][&include=meta][&skip=count][&sort=field[+dir]] -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-search -func (h *Handler) serveSearch(_ http.Header, req *http.Request) (interface{}, error) { - sp, err := parseSearchParams(req) - if err != nil { - return "", err - } - auth, err := h.checkRequest(req, nil) - if err != nil { - logger.Infof("authorization failed on search request, granting no privileges: %v", err) - } - sp.Admin = auth.Admin - if auth.Username != "" { - sp.Groups = append(sp.Groups, auth.Username) - groups, err := h.groupsForUser(auth.Username) - if err != nil { - logger.Infof("cannot get groups for user %q, assuming no groups: %v", auth.Username, err) - } - sp.Groups = append(sp.Groups, groups...) - } - // perform query - store := h.pool.Store() - defer store.Close() - results, err := store.Search(sp) - if err != nil { - return nil, errgo.Notef(err, "error performing search") - } - response := params.SearchResponse{ - SearchTime: results.SearchTime, - Total: results.Total, - Results: make([]params.SearchResult, len(results.Results)), - } - run := parallel.NewRun(maxConcurrency) - var missing int32 - for i, ref := range results.Results { - i, ref := i, ref - run.Do(func() error { - meta, err := h.Router.GetMetadata(ref, sp.Include, req) - if err != nil { - // Unfortunately it is possible to get errors here due to - // internal inconsistency, so rather than throwing away - // all the search results, we just log the error and move on. - logger.Errorf("cannot retrieve metadata for %v: %v", ref, err) - atomic.AddInt32(&missing, 1) - return nil - } - response.Results[i] = params.SearchResult{ - Id: ref.PreferredURL(), - Meta: meta, - } - return nil - }) - } - // We never return an error from the Do function above, so no need to - // check the error here. - run.Wait() - if missing == 0 { - return response, nil - } - // We're missing some results - shuffle all the results down to - // fill the gaps. - j := 0 - for _, result := range response.Results { - if result.Id != nil { - response.Results[j] = result - j++ - } - } - response.Results = response.Results[0:j] - return response, nil -} - -// GET search/interesting[?limit=limit][&include=meta] -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-searchinteresting -func (h *Handler) serveSearchInteresting(w http.ResponseWriter, req *http.Request) { - router.WriteError(w, errNotImplemented) -} - -// parseSearchParms extracts the search paramaters from the request -func parseSearchParams(req *http.Request) (charmstore.SearchParams, error) { - sp := charmstore.SearchParams{} - var err error - for k, v := range req.Form { - switch k { - case "text": - sp.Text = v[0] - case "autocomplete": - sp.AutoComplete, err = router.ParseBool(v[0]) - if err != nil { - return charmstore.SearchParams{}, badRequestf(err, "invalid autocomplete parameter") - } - case "limit": - sp.Limit, err = strconv.Atoi(v[0]) - if err != nil { - return charmstore.SearchParams{}, badRequestf(err, "invalid limit parameter: could not parse integer") - } - if sp.Limit < 1 { - return charmstore.SearchParams{}, badRequestf(nil, "invalid limit parameter: expected integer greater than zero") - } - case "include": - for _, s := range v { - if s != "" { - sp.Include = append(sp.Include, s) - } - } - case "description", "name", "owner", "provides", "requires", "series", "summary", "tags", "type": - if sp.Filters == nil { - sp.Filters = make(map[string][]string) - } - sp.Filters[k] = v - case "promulgated": - promulgated, err := router.ParseBool(v[0]) - if err != nil { - return charmstore.SearchParams{}, badRequestf(err, "invalid promulgated filter parameter") - } - if sp.Filters == nil { - sp.Filters = make(map[string][]string) - } - if promulgated { - sp.Filters[k] = []string{"1"} - } else { - sp.Filters[k] = []string{"0"} - } - case "skip": - sp.Skip, err = strconv.Atoi(v[0]) - if err != nil { - return charmstore.SearchParams{}, badRequestf(err, "invalid skip parameter: could not parse integer") - } - if sp.Skip < 0 { - return charmstore.SearchParams{}, badRequestf(nil, "invalid skip parameter: expected non-negative integer") - } - case "sort": - err = sp.ParseSortFields(v...) - if err != nil { - return charmstore.SearchParams{}, badRequestf(err, "invalid sort field") - } - default: - return charmstore.SearchParams{}, badRequestf(nil, "invalid parameter: %s", k) - } - } - return sp, nil -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/search_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/search_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/search_test.go 1970-01-01 00:00:00 +0000 @@ -1,904 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4_test - -import ( - "bytes" - "encoding/json" - "net/http" - "net/url" - "sort" - "strings" - - "github.com/juju/loggo" - jc "github.com/juju/testing/checkers" - "github.com/juju/testing/httptesting" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" - "gopkg.in/macaroon-bakery.v0/httpbakery" - "gopkg.in/macaroon.v1" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/internal/storetesting" - "gopkg.in/juju/charmstore.v4/internal/v4" - "gopkg.in/juju/charmstore.v4/params" -) - -type SearchSuite struct { - commonSuite -} - -var _ = gc.Suite(&SearchSuite{}) - -var exportTestCharms = map[string]*router.ResolvedURL{ - "wordpress": newResolvedURL("cs:~charmers/precise/wordpress-23", 23), - "mysql": newResolvedURL("cs:~openstack-charmers/trusty/mysql-7", 7), - "varnish": newResolvedURL("cs:~foo/trusty/varnish-1", -1), - "riak": newResolvedURL("cs:~charmers/trusty/riak-67", 67), -} - -var exportTestBundles = map[string]*router.ResolvedURL{ - "wordpress-simple": newResolvedURL("cs:~charmers/bundle/wordpress-simple-4", 4), -} - -func (s *SearchSuite) SetUpSuite(c *gc.C) { - s.enableES = true - s.enableIdentity = true - s.commonSuite.SetUpSuite(c) -} - -func (s *SearchSuite) SetUpTest(c *gc.C) { - s.commonSuite.SetUpTest(c) - s.addCharmsToStore(c) - // hide the riak charm - err := s.store.DB.BaseEntities().UpdateId( - charm.MustParseReference("cs:~charmers/riak"), - bson.D{{"$set", map[string]mongodoc.ACL{ - "acls": { - Read: []string{"charmers", "test-user"}, - }, - }}}, - ) - c.Assert(err, gc.IsNil) - err = s.store.UpdateSearch(newResolvedURL("~charmers/trusty/riak-0", 0)) - c.Assert(err, gc.IsNil) - err = s.esSuite.ES.RefreshIndex(s.esSuite.TestIndex) - c.Assert(err, gc.IsNil) -} - -func (s *SearchSuite) addCharmsToStore(c *gc.C) { - for name, id := range exportTestCharms { - err := s.store.AddCharmWithArchive(id, getCharm(name)) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) - c.Assert(err, gc.IsNil) - err = s.store.UpdateSearch(id) - c.Assert(err, gc.IsNil) - } - for name, id := range exportTestBundles { - err := s.store.AddBundleWithArchive(id, getBundle(name)) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) - c.Assert(err, gc.IsNil) - err = s.store.UpdateSearch(id) - c.Assert(err, gc.IsNil) - } -} - -func getCharm(name string) *charm.CharmDir { - ca := storetesting.Charms.CharmDir(name) - ca.Meta().Categories = append(strings.Split(name, "-"), "bar") - return ca -} - -func getBundle(name string) *charm.BundleDir { - ba := storetesting.Charms.BundleDir(name) - ba.Data().Tags = append(strings.Split(name, "-"), "baz") - return ba -} - -func (s *SearchSuite) TestParseSearchParams(c *gc.C) { - tests := []struct { - about string - query string - expectParams charmstore.SearchParams - expectError string - }{{ - about: "bare search", - query: "", - }, { - about: "text search", - query: "text=test", - expectParams: charmstore.SearchParams{ - Text: "test", - }, - }, { - about: "autocomplete", - query: "autocomplete=1", - expectParams: charmstore.SearchParams{ - AutoComplete: true, - }, - }, { - about: "invalid autocomplete", - query: "autocomplete=true", - expectError: `invalid autocomplete parameter: unexpected bool value "true" (must be "0" or "1")`, - }, { - about: "limit", - query: "limit=20", - expectParams: charmstore.SearchParams{ - Limit: 20, - }, - }, { - about: "invalid limit", - query: "limit=twenty", - expectError: `invalid limit parameter: could not parse integer: strconv.ParseInt: parsing "twenty": invalid syntax`, - }, { - about: "limit too low", - query: "limit=-1", - expectError: "invalid limit parameter: expected integer greater than zero", - }, { - about: "include", - query: "include=archive-size", - expectParams: charmstore.SearchParams{ - Include: []string{"archive-size"}, - }, - }, { - about: "include many", - query: "include=archive-size&include=bundle-data", - expectParams: charmstore.SearchParams{ - Include: []string{"archive-size", "bundle-data"}, - }, - }, { - about: "include many with blanks", - query: "include=archive-size&include=&include=bundle-data", - expectParams: charmstore.SearchParams{ - Include: []string{"archive-size", "bundle-data"}, - }, - }, { - about: "description filter", - query: "description=text", - expectParams: charmstore.SearchParams{ - Filters: map[string][]string{ - "description": {"text"}, - }, - }, - }, { - about: "name filter", - query: "name=text", - expectParams: charmstore.SearchParams{ - Filters: map[string][]string{ - "name": {"text"}, - }, - }, - }, { - about: "owner filter", - query: "owner=text", - expectParams: charmstore.SearchParams{ - Filters: map[string][]string{ - "owner": {"text"}, - }, - }, - }, { - about: "provides filter", - query: "provides=text", - expectParams: charmstore.SearchParams{ - Filters: map[string][]string{ - "provides": {"text"}, - }, - }, - }, { - about: "requires filter", - query: "requires=text", - expectParams: charmstore.SearchParams{ - Filters: map[string][]string{ - "requires": {"text"}, - }, - }, - }, { - about: "series filter", - query: "series=text", - expectParams: charmstore.SearchParams{ - Filters: map[string][]string{ - "series": {"text"}, - }, - }, - }, { - about: "tags filter", - query: "tags=text", - expectParams: charmstore.SearchParams{ - Filters: map[string][]string{ - "tags": {"text"}, - }, - }, - }, { - about: "type filter", - query: "type=text", - expectParams: charmstore.SearchParams{ - Filters: map[string][]string{ - "type": {"text"}, - }, - }, - }, { - about: "many filters", - query: "name=name&owner=owner&series=series1&series=series2", - expectParams: charmstore.SearchParams{ - Filters: map[string][]string{ - "name": {"name"}, - "owner": {"owner"}, - "series": {"series1", "series2"}, - }, - }, - }, { - about: "bad parameter", - query: "a=b", - expectError: "invalid parameter: a", - }, { - about: "skip", - query: "skip=20", - expectParams: charmstore.SearchParams{ - Skip: 20, - }, - }, { - about: "invalid skip", - query: "skip=twenty", - expectError: `invalid skip parameter: could not parse integer: strconv.ParseInt: parsing "twenty": invalid syntax`, - }, { - about: "skip too low", - query: "skip=-1", - expectError: "invalid skip parameter: expected non-negative integer", - }, { - about: "promulgated filter", - query: "promulgated=1", - expectParams: charmstore.SearchParams{ - Filters: map[string][]string{ - "promulgated": {"1"}, - }, - }, - }, { - about: "promulgated filter - bad", - query: "promulgated=bad", - expectError: `invalid promulgated filter parameter: unexpected bool value "bad" (must be "0" or "1")`, - }} - for i, test := range tests { - c.Logf("test %d. %s", i, test.about) - var req http.Request - var err error - req.Form, err = url.ParseQuery(test.query) - c.Assert(err, gc.IsNil) - sp, err := v4.ParseSearchParams(&req) - if test.expectError != "" { - c.Assert(err, gc.Not(gc.IsNil)) - c.Assert(err.Error(), gc.Equals, test.expectError) - } else { - c.Assert(err, gc.IsNil) - } - c.Assert(sp, jc.DeepEquals, test.expectParams) - } -} - -func (s *SearchSuite) TestSuccessfulSearches(c *gc.C) { - tests := []struct { - about string - query string - results []*router.ResolvedURL - }{{ - about: "bare search", - query: "", - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestCharms["mysql"], - exportTestCharms["varnish"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "text search", - query: "text=wordpress", - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "autocomplete search", - query: "text=word&autocomplete=1", - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "blank text search", - query: "text=", - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestCharms["mysql"], - exportTestCharms["varnish"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "description filter search", - query: "description=database", - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - exportTestCharms["varnish"], - }, - }, { - about: "name filter search", - query: "name=mysql", - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - }, - }, { - about: "owner filter search", - query: "owner=foo", - results: []*router.ResolvedURL{ - exportTestCharms["varnish"], - }, - }, { - about: "provides filter search", - query: "provides=mysql", - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - }, - }, { - about: "requires filter search", - query: "requires=mysql", - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - }, - }, { - about: "series filter search", - query: "series=trusty", - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - exportTestCharms["varnish"], - }, - }, { - about: "summary filter search", - query: "summary=database", - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - exportTestCharms["varnish"], - }, - }, { - about: "tags filter search", - query: "tags=wordpress", - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "type filter search", - query: "type=bundle", - results: []*router.ResolvedURL{ - exportTestBundles["wordpress-simple"], - }, - }, { - about: "multiple type filter search", - query: "type=bundle&type=charm", - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestCharms["mysql"], - exportTestCharms["varnish"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "provides multiple interfaces filter search", - query: "provides=monitoring+http", - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - }, - }, { - about: "requires multiple interfaces filter search", - query: "requires=mysql+varnish", - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - }, - }, { - about: "multiple tags filter search", - query: "tags=mysql+bar", - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - }, - }, { - about: "blank owner", - query: "owner=", - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestCharms["mysql"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "paginated search", - query: "name=mysql&skip=1", - }, { - about: "promulgated", - query: "promulgated=1", - results: []*router.ResolvedURL{ - exportTestCharms["wordpress"], - exportTestCharms["mysql"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "not promulgated", - query: "promulgated=0", - results: []*router.ResolvedURL{ - exportTestCharms["varnish"], - }, - }, { - about: "promulgated with owner", - query: "promulgated=1&owner=openstack-charmers", - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - }, - }} - for i, test := range tests { - c.Logf("test %d. %s", i, test.about) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("search?" + test.query), - }) - var sr params.SearchResponse - err := json.Unmarshal(rec.Body.Bytes(), &sr) - c.Assert(err, gc.IsNil) - c.Assert(sr.Results, gc.HasLen, len(test.results)) - c.Logf("results: %s", rec.Body.Bytes()) - assertResultSet(c, sr, test.results) - } -} - -func (s *SearchSuite) TestPaginatedSearch(c *gc.C) { - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("search?text=wordpress&skip=1"), - }) - var sr params.SearchResponse - err := json.Unmarshal(rec.Body.Bytes(), &sr) - c.Assert(err, gc.IsNil) - c.Assert(sr.Results, gc.HasLen, 1) - c.Assert(sr.Total, gc.Equals, 2) -} - -func (s *SearchSuite) TestMetadataFields(c *gc.C) { - tests := []struct { - about string - query string - meta map[string]interface{} - }{{ - about: "archive-size", - query: "name=mysql&include=archive-size", - meta: map[string]interface{}{ - "archive-size": params.ArchiveSizeResponse{438}, - }, - }, { - about: "bundle-metadata", - query: "name=wordpress-simple&type=bundle&include=bundle-metadata", - meta: map[string]interface{}{ - "bundle-metadata": getBundle("wordpress-simple").Data(), - }, - }, { - about: "bundle-machine-count", - query: "name=wordpress-simple&type=bundle&include=bundle-machine-count", - meta: map[string]interface{}{ - "bundle-machine-count": params.BundleCount{2}, - }, - }, { - about: "bundle-unit-count", - query: "name=wordpress-simple&type=bundle&include=bundle-unit-count", - meta: map[string]interface{}{ - "bundle-unit-count": params.BundleCount{2}, - }, - }, { - about: "charm-actions", - query: "name=wordpress&type=charm&include=charm-actions", - meta: map[string]interface{}{ - "charm-actions": getCharm("wordpress").Actions(), - }, - }, { - about: "charm-config", - query: "name=wordpress&type=charm&include=charm-config", - meta: map[string]interface{}{ - "charm-config": getCharm("wordpress").Config(), - }, - }, { - about: "charm-related", - query: "name=wordpress&type=charm&include=charm-related", - meta: map[string]interface{}{ - "charm-related": params.RelatedResponse{ - Provides: map[string][]params.MetaAnyResponse{ - "mysql": { - { - Id: exportTestCharms["mysql"].PreferredURL(), - }, - }, - "varnish": { - { - Id: exportTestCharms["varnish"].PreferredURL(), - }, - }, - }, - }, - }, - }, { - about: "multiple values", - query: "name=wordpress&type=charm&include=charm-related&include=charm-config", - meta: map[string]interface{}{ - "charm-related": params.RelatedResponse{ - Provides: map[string][]params.MetaAnyResponse{ - "mysql": { - { - Id: exportTestCharms["mysql"].PreferredURL(), - }, - }, - "varnish": { - { - Id: exportTestCharms["varnish"].PreferredURL(), - }, - }, - }, - }, - "charm-config": getCharm("wordpress").Config(), - }, - }} - for i, test := range tests { - c.Logf("test %d. %s", i, test.about) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("search?" + test.query), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - var sr struct { - Results []struct { - Meta json.RawMessage - } - } - err := json.Unmarshal(rec.Body.Bytes(), &sr) - c.Assert(err, gc.IsNil) - c.Assert(sr.Results, gc.HasLen, 1) - c.Assert(string(sr.Results[0].Meta), jc.JSONEquals, test.meta) - } -} - -func (s *SearchSuite) TestSearchError(c *gc.C) { - err := s.esSuite.ES.DeleteIndex(s.esSuite.TestIndex) - c.Assert(err, gc.Equals, nil) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("search?name=wordpress"), - }) - c.Assert(rec.Code, gc.Equals, http.StatusInternalServerError) - var resp params.Error - err = json.Unmarshal(rec.Body.Bytes(), &resp) - c.Assert(err, gc.IsNil) - c.Assert(resp.Code, gc.Equals, params.ErrorCode("")) - c.Assert(resp.Message, gc.Matches, "error performing search: search failed: .*") -} - -func (s *SearchSuite) TestSearchIncludeError(c *gc.C) { - // Perform a search for all charms, including the - // manifest, which will try to retrieve all charm - // blobs. - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("search?type=charm&include=manifest"), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - var resp params.SearchResponse - err := json.Unmarshal(rec.Body.Bytes(), &resp) - // cs:riak will not be found because it is not visible to "everyone". - c.Assert(resp.Results, gc.HasLen, len(exportTestCharms)-1) - - // Now remove one of the blobs. The search should still - // work, but only return a single result. - blobName, _, err := s.store.BlobNameAndHash(newResolvedURL("~charmers/precise/wordpress-23", 23)) - c.Assert(err, gc.IsNil) - err = s.store.BlobStore.Remove(blobName) - c.Assert(err, gc.IsNil) - - // Now search again - we should get one result less - // (and the error will be logged). - - // Register a logger that so that we can check the logging output. - // It will be automatically removed later because IsolatedMgoESSuite - // uses LoggingSuite. - var tw loggo.TestWriter - err = loggo.RegisterWriter("test-log", &tw, loggo.DEBUG) - c.Assert(err, gc.IsNil) - - rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("search?type=charm&include=manifest"), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - resp = params.SearchResponse{} - err = json.Unmarshal(rec.Body.Bytes(), &resp) - // cs:riak will not be found because it is not visible to "everyone". - // cs:wordpress will not be found because it has no manifest. - c.Assert(resp.Results, gc.HasLen, len(exportTestCharms)-2) - - c.Assert(tw.Log(), jc.LogMatches, []string{"cannot retrieve metadata for cs:precise/wordpress-23: cannot open archive data for cs:precise/wordpress-23: .*"}) -} - -func (s *SearchSuite) TestSorting(c *gc.C) { - tests := []struct { - about string - query string - results []*router.ResolvedURL - }{{ - about: "name ascending", - query: "sort=name", - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - exportTestCharms["varnish"], - exportTestCharms["wordpress"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "name descending", - query: "sort=-name", - results: []*router.ResolvedURL{ - exportTestBundles["wordpress-simple"], - exportTestCharms["wordpress"], - exportTestCharms["varnish"], - exportTestCharms["mysql"], - }, - }, { - about: "series ascending", - query: "sort=series,name", - results: []*router.ResolvedURL{ - exportTestBundles["wordpress-simple"], - exportTestCharms["wordpress"], - exportTestCharms["mysql"], - exportTestCharms["varnish"], - }, - }, { - about: "series descending", - query: "sort=-series&sort=name", - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - exportTestCharms["varnish"], - exportTestCharms["wordpress"], - exportTestBundles["wordpress-simple"], - }, - }, { - about: "owner ascending", - query: "sort=owner,name", - results: []*router.ResolvedURL{ - exportTestCharms["mysql"], - exportTestCharms["wordpress"], - exportTestBundles["wordpress-simple"], - exportTestCharms["varnish"], - }, - }, { - about: "owner descending", - query: "sort=-owner&sort=name", - results: []*router.ResolvedURL{ - exportTestCharms["varnish"], - exportTestCharms["mysql"], - exportTestCharms["wordpress"], - exportTestBundles["wordpress-simple"], - }, - }} - for i, test := range tests { - c.Logf("test %d. %s", i, test.about) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("search?" + test.query), - }) - var sr params.SearchResponse - err := json.Unmarshal(rec.Body.Bytes(), &sr) - c.Assert(err, gc.IsNil) - assertResultSet(c, sr, test.results) - } -} - -func (s *SearchSuite) TestSortUnsupportedField(c *gc.C) { - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("search?sort=foo"), - }) - var e params.Error - err := json.Unmarshal(rec.Body.Bytes(), &e) - c.Assert(err, gc.IsNil) - c.Assert(e.Code, gc.Equals, params.ErrBadRequest) - c.Assert(e.Message, gc.Equals, "invalid sort field: foo") -} - -func (s *SearchSuite) TestDownloadsBoost(c *gc.C) { - // TODO (frankban): remove this call when removing the legacy counts logic. - patchLegacyDownloadCountsEnabled(s.AddCleanup, false) - charmDownloads := map[string]int{ - "mysql": 0, - "wordpress": 1, - "varnish": 8, - } - for n, cnt := range charmDownloads { - url := newResolvedURL("cs:~downloads-test/trusty/x-1", -1) - url.URL.Name = n - err := s.store.AddCharmWithArchive(url, getCharm(n)) - c.Assert(err, gc.IsNil) - err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) - c.Assert(err, gc.IsNil) - err = s.store.UpdateSearch(url) - c.Assert(err, gc.IsNil) - for i := 0; i < cnt; i++ { - err := s.store.IncrementDownloadCounts(url) - c.Assert(err, gc.IsNil) - } - } - err := s.esSuite.ES.RefreshIndex(s.esSuite.TestIndex) - c.Assert(err, gc.IsNil) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("search?owner=downloads-test"), - }) - var sr params.SearchResponse - err = json.Unmarshal(rec.Body.Bytes(), &sr) - c.Assert(err, gc.IsNil) - c.Assert(sr.Results, gc.HasLen, 3) - c.Assert(sr.Results[0].Id.Name, gc.Equals, "varnish") - c.Assert(sr.Results[1].Id.Name, gc.Equals, "wordpress") - c.Assert(sr.Results[2].Id.Name, gc.Equals, "mysql") -} - -// TODO(mhilton) remove this test when removing legacy counts logic. -func (s *SearchSuite) TestLegacyStatsUpdatesSearch(c *gc.C) { - patchLegacyDownloadCountsEnabled(s.AddCleanup, true) - doc, err := s.store.ES.GetSearchDocument(charm.MustParseReference("~openstack-charmers/trusty/mysql-7")) - c.Assert(err, gc.IsNil) - c.Assert(doc.TotalDownloads, gc.Equals, int64(0)) - s.assertPut(c, "~openstack-charmers/trusty/mysql-7/meta/extra-info/"+params.LegacyDownloadStats, 57) - doc, err = s.store.ES.GetSearchDocument(charm.MustParseReference("~openstack-charmers/trusty/mysql-7")) - c.Assert(err, gc.IsNil) - c.Assert(doc.TotalDownloads, gc.Equals, int64(57)) - -} - -func (s *SearchSuite) assertPut(c *gc.C, url string, val interface{}) { - body, err := json.Marshal(val) - c.Assert(err, gc.IsNil) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL(url), - Method: "PUT", - Header: http.Header{ - "Content-Type": {"application/json"}, - }, - Username: testUsername, - Password: testPassword, - Body: bytes.NewReader(body), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("headers: %v, body: %s", rec.HeaderMap, rec.Body.String())) - c.Assert(rec.Body.String(), gc.HasLen, 0) -} - -func (s *SearchSuite) TestSearchWithAdminCredentials(c *gc.C) { - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("search"), - Username: testUsername, - Password: testPassword, - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - expected := []*router.ResolvedURL{ - exportTestCharms["mysql"], - exportTestCharms["wordpress"], - exportTestCharms["riak"], - exportTestCharms["varnish"], - exportTestBundles["wordpress-simple"], - } - var sr params.SearchResponse - err := json.Unmarshal(rec.Body.Bytes(), &sr) - c.Assert(err, gc.IsNil) - assertResultSet(c, sr, expected) -} - -func (s *SearchSuite) TestSearchWithUserMacaroon(c *gc.C) { - m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ - checkers.DeclaredCaveat("username", "test-user"), - }) - c.Assert(err, gc.IsNil) - macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) - c.Assert(err, gc.IsNil) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("search"), - Cookies: []*http.Cookie{macaroonCookie}, - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - expected := []*router.ResolvedURL{ - exportTestCharms["mysql"], - exportTestCharms["wordpress"], - exportTestCharms["riak"], - exportTestCharms["varnish"], - exportTestBundles["wordpress-simple"], - } - var sr params.SearchResponse - err = json.Unmarshal(rec.Body.Bytes(), &sr) - c.Assert(err, gc.IsNil) - assertResultSet(c, sr, expected) -} - -func (s *SearchSuite) TestSearchWithUserInGroups(c *gc.C) { - m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ - checkers.DeclaredCaveat(v4.UsernameAttr, "bob"), - }) - c.Assert(err, gc.IsNil) - macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) - c.Assert(err, gc.IsNil) - s.idM.groups = map[string][]string{ - "bob": {"test-user", "test-user2"}, - } - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("search"), - Cookies: []*http.Cookie{macaroonCookie}, - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - expected := []*router.ResolvedURL{ - exportTestCharms["mysql"], - exportTestCharms["wordpress"], - exportTestCharms["riak"], - exportTestCharms["varnish"], - exportTestBundles["wordpress-simple"], - } - var sr params.SearchResponse - err = json.Unmarshal(rec.Body.Bytes(), &sr) - c.Assert(err, gc.IsNil) - assertResultSet(c, sr, expected) -} - -func (s *SearchSuite) TestSearchWithBadAdminCredentialsAndACookie(c *gc.C) { - m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ - checkers.DeclaredCaveat("username", "test-user"), - }) - c.Assert(err, gc.IsNil) - macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) - c.Assert(err, gc.IsNil) - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("search"), - Cookies: []*http.Cookie{macaroonCookie}, - Username: testUsername, - Password: "bad-password", - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK) - expected := []*router.ResolvedURL{ - exportTestCharms["mysql"], - exportTestCharms["wordpress"], - exportTestCharms["varnish"], - exportTestBundles["wordpress-simple"], - } - var sr params.SearchResponse - err = json.Unmarshal(rec.Body.Bytes(), &sr) - c.Assert(err, gc.IsNil) - assertResultSet(c, sr, expected) -} - -func assertResultSet(c *gc.C, sr params.SearchResponse, expected []*router.ResolvedURL) { - sort.Sort(searchResultById(sr.Results)) - sort.Sort(resolvedURLByPreferredURL(expected)) - c.Assert(sr.Results, gc.HasLen, len(expected), gc.Commentf("expected %#v", expected)) - for i := range expected { - c.Assert(sr.Results[i].Id.String(), gc.Equals, expected[i].PreferredURL().String(), gc.Commentf("element %d")) - } -} - -type searchResultById []params.SearchResult - -func (s searchResultById) Len() int { return len(s) } -func (s searchResultById) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s searchResultById) Less(i, j int) bool { - return s[i].Id.String() < s[j].Id.String() -} - -type resolvedURLByPreferredURL []*router.ResolvedURL - -func (s resolvedURLByPreferredURL) Len() int { return len(s) } -func (s resolvedURLByPreferredURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s resolvedURLByPreferredURL) Less(i, j int) bool { - return s[i].PreferredURL().String() < s[j].PreferredURL().String() -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/stats.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/stats.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/stats.go 1970-01-01 00:00:00 +0000 @@ -1,123 +0,0 @@ -// Copyright 2012 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4 - -import ( - "net/http" - "net/url" - "strings" - "time" - - "gopkg.in/errgo.v1" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/params" -) - -const dateFormat = "2006-01-02" - -// parseDateRange parses a date range as specified in an http -// request. The returned times will be zero if not specified. -func parseDateRange(form url.Values) (start, stop time.Time, err error) { - if v := form.Get("start"); v != "" { - var err error - start, err = time.Parse(dateFormat, v) - if err != nil { - return time.Time{}, time.Time{}, badRequestf(err, "invalid 'start' value %q", v) - } - } - if v := form.Get("stop"); v != "" { - var err error - stop, err = time.Parse(dateFormat, v) - if err != nil { - return time.Time{}, time.Time{}, badRequestf(err, "invalid 'stop' value %q", v) - } - // Cover all timestamps within the stop day. - stop = stop.Add(24*time.Hour - 1*time.Second) - } - return -} - -// GET stats/counter/key[:key]...?[by=unit]&start=date][&stop=date][&list=1] -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-statscounter -func (h *Handler) serveStatsCounter(_ http.Header, r *http.Request) (interface{}, error) { - base := strings.TrimPrefix(r.URL.Path, "/") - if strings.Index(base, "/") > 0 { - return nil, errgo.WithCausef(nil, params.ErrNotFound, "invalid key") - } - if base == "" { - return nil, params.ErrForbidden - } - var by charmstore.CounterRequestBy - switch v := r.Form.Get("by"); v { - case "": - by = charmstore.ByAll - case "day": - by = charmstore.ByDay - case "week": - by = charmstore.ByWeek - default: - return nil, badRequestf(nil, "invalid 'by' value %q", v) - } - req := charmstore.CounterRequest{ - Key: strings.Split(base, ":"), - List: r.Form.Get("list") == "1", - By: by, - } - var err error - req.Start, req.Stop, err = parseDateRange(r.Form) - if err != nil { - return nil, errgo.Mask(err, errgo.Is(params.ErrBadRequest)) - } - if req.Key[len(req.Key)-1] == "*" { - req.Prefix = true - req.Key = req.Key[:len(req.Key)-1] - if len(req.Key) == 0 { - return nil, errgo.WithCausef(nil, params.ErrForbidden, "unknown key") - } - } - store := h.pool.Store() - defer store.Close() - entries, err := store.Counters(&req) - if err != nil { - return nil, errgo.Notef(err, "cannot query counters") - } - - var buf []byte - var items []params.Statistic - for i := range entries { - entry := &entries[i] - buf = buf[:0] - if req.List { - for j := range entry.Key { - buf = append(buf, entry.Key[j]...) - buf = append(buf, ':') - } - if entry.Prefix { - buf = append(buf, '*') - } else { - buf = buf[:len(buf)-1] - } - } - stat := params.Statistic{ - Key: string(buf), - Count: entry.Count, - } - if !entry.Time.IsZero() { - stat.Date = entry.Time.Format("2006-01-02") - } - items = append(items, stat) - } - - return items, nil -} - -// StatsEnabled reports whether statistics should be gathered for -// the given HTTP request. -func StatsEnabled(req *http.Request) bool { - // It's fine to parse the form more than once, and it avoids - // bugs from not parsing it. - req.ParseForm() - return req.Form.Get("stats") != "0" -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/stats_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/stats_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/stats_test.go 1970-01-01 00:00:00 +0000 @@ -1,428 +0,0 @@ -// Copyright 2012 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4_test - -import ( - "encoding/json" - "net/http" - "net/url" - "strings" - "time" - - "github.com/juju/testing/httptesting" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/storetesting" - "gopkg.in/juju/charmstore.v4/internal/v4" - "gopkg.in/juju/charmstore.v4/params" -) - -type StatsSuite struct { - commonSuite -} - -var _ = gc.Suite(&StatsSuite{}) - -func (s *StatsSuite) TestServerStatsStatus(c *gc.C) { - tests := []struct { - path string - status int - message string - code params.ErrorCode - }{{ - path: "stats/counter/", - status: http.StatusForbidden, - message: "forbidden", - code: params.ErrForbidden, - }, { - path: "stats/counter/*", - status: http.StatusForbidden, - message: "unknown key", - code: params.ErrForbidden, - }, { - path: "stats/counter/any/", - status: http.StatusNotFound, - message: "invalid key", - code: params.ErrNotFound, - }, { - path: "stats/", - status: http.StatusNotFound, - message: "not found", - code: params.ErrNotFound, - }, { - path: "stats/any", - status: http.StatusNotFound, - message: "not found", - code: params.ErrNotFound, - }, { - path: "stats/counter/any?by=fortnight", - status: http.StatusBadRequest, - message: `invalid 'by' value "fortnight"`, - code: params.ErrBadRequest, - }, { - path: "stats/counter/any?start=tomorrow", - status: http.StatusBadRequest, - message: `invalid 'start' value "tomorrow": parsing time "tomorrow" as "2006-01-02": cannot parse "tomorrow" as "2006"`, - code: params.ErrBadRequest, - }, { - path: "stats/counter/any?stop=3", - status: http.StatusBadRequest, - message: `invalid 'stop' value "3": parsing time "3" as "2006-01-02": cannot parse "3" as "2006"`, - code: params.ErrBadRequest, - }} - for i, test := range tests { - c.Logf("test %d. %s", i, test.path) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: storeURL(test.path), - ExpectStatus: test.status, - ExpectBody: params.Error{ - Message: test.message, - Code: test.code, - }, - }) - } -} - -func (s *StatsSuite) TestStatsCounter(c *gc.C) { - if !storetesting.MongoJSEnabled() { - c.Skip("MongoDB JavaScript not available") - } - - for _, key := range [][]string{{"a", "b"}, {"a", "b"}, {"a", "c"}, {"a"}} { - err := s.store.IncCounter(key) - c.Assert(err, gc.IsNil) - } - - var all []interface{} - err := s.store.DB.StatCounters().Find(nil).All(&all) - c.Assert(err, gc.IsNil) - data, err := json.Marshal(all) - c.Assert(err, gc.IsNil) - c.Logf("%s", data) - - expected := map[string]int64{ - "a:b": 2, - "a:b:*": 0, - "a:*": 3, - "a": 1, - "a:b:c": 0, - } - - for counter, n := range expected { - c.Logf("test %q", counter) - url := storeURL("stats/counter/" + counter) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: url, - ExpectBody: []params.Statistic{{ - Count: n, - }}, - }) - } -} - -func (s *StatsSuite) TestStatsCounterList(c *gc.C) { - if !storetesting.MongoJSEnabled() { - c.Skip("MongoDB JavaScript not available") - } - - incs := [][]string{ - {"a"}, - {"a", "b"}, - {"a", "b", "c"}, - {"a", "b", "c"}, - {"a", "b", "d"}, - {"a", "b", "e"}, - {"a", "f", "g"}, - {"a", "f", "h"}, - {"a", "i"}, - {"j", "k"}, - } - for _, key := range incs { - err := s.store.IncCounter(key) - c.Assert(err, gc.IsNil) - } - - tests := []struct { - key string - result []params.Statistic - }{{ - key: "a", - result: []params.Statistic{{ - Key: "a", - Count: 1, - }}, - }, { - key: "a:*", - result: []params.Statistic{{ - Key: "a:b:*", - Count: 4, - }, { - Key: "a:f:*", - Count: 2, - }, { - Key: "a:b", - Count: 1, - }, { - Key: "a:i", - Count: 1, - }}, - }, { - key: "a:b:*", - result: []params.Statistic{{ - Key: "a:b:c", - Count: 2, - }, { - Key: "a:b:d", - Count: 1, - }, { - Key: "a:b:e", - Count: 1, - }}, - }, { - key: "a:*", - result: []params.Statistic{{ - Key: "a:b:*", - Count: 4, - }, { - Key: "a:f:*", - Count: 2, - }, { - Key: "a:b", - Count: 1, - }, { - Key: "a:i", - Count: 1, - }}, - }} - - for i, test := range tests { - c.Logf("test %d: %s", i, test.key) - url := storeURL("stats/counter/" + test.key + "?list=1") - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: url, - ExpectBody: test.result, - }) - } -} - -func (s *StatsSuite) TestStatsCounterBy(c *gc.C) { - if !storetesting.MongoJSEnabled() { - c.Skip("MongoDB JavaScript not available") - } - - incs := []struct { - key []string - day int - }{ - {[]string{"a"}, 1}, - {[]string{"a"}, 1}, - {[]string{"b"}, 1}, - {[]string{"a", "b"}, 1}, - {[]string{"a", "c"}, 1}, - {[]string{"a"}, 3}, - {[]string{"a", "b"}, 3}, - {[]string{"b"}, 9}, - {[]string{"b"}, 9}, - {[]string{"a", "c", "d"}, 9}, - {[]string{"a", "c", "e"}, 9}, - {[]string{"a", "c", "f"}, 9}, - } - - day := func(i int) time.Time { - return time.Date(2012, time.May, i, 0, 0, 0, 0, time.UTC) - } - - for i, inc := range incs { - t := day(inc.day) - // Ensure each entry is unique by adding - // a sufficient increment for each test. - t = t.Add(time.Duration(i) * charmstore.StatsGranularity) - - err := s.store.IncCounterAtTime(inc.key, t) - c.Assert(err, gc.IsNil) - } - - tests := []struct { - request charmstore.CounterRequest - result []params.Statistic - }{{ - request: charmstore.CounterRequest{ - Key: []string{"a"}, - Prefix: false, - List: false, - By: charmstore.ByDay, - }, - result: []params.Statistic{{ - Date: "2012-05-01", - Count: 2, - }, { - Date: "2012-05-03", - Count: 1, - }}, - }, { - request: charmstore.CounterRequest{ - Key: []string{"a"}, - Prefix: true, - List: false, - By: charmstore.ByDay, - }, - result: []params.Statistic{{ - Date: "2012-05-01", - Count: 2, - }, { - Date: "2012-05-03", - Count: 1, - }, { - Date: "2012-05-09", - Count: 3, - }}, - }, { - request: charmstore.CounterRequest{ - Key: []string{"a"}, - Prefix: true, - List: false, - By: charmstore.ByDay, - Start: time.Date(2012, 5, 2, 0, 0, 0, 0, time.UTC), - }, - result: []params.Statistic{{ - Date: "2012-05-03", - Count: 1, - }, { - Date: "2012-05-09", - Count: 3, - }}, - }, { - request: charmstore.CounterRequest{ - Key: []string{"a"}, - Prefix: true, - List: false, - By: charmstore.ByDay, - Stop: time.Date(2012, 5, 4, 0, 0, 0, 0, time.UTC), - }, - result: []params.Statistic{{ - Date: "2012-05-01", - Count: 2, - }, { - Date: "2012-05-03", - Count: 1, - }}, - }, { - request: charmstore.CounterRequest{ - Key: []string{"a"}, - Prefix: true, - List: false, - By: charmstore.ByDay, - Start: time.Date(2012, 5, 3, 0, 0, 0, 0, time.UTC), - Stop: time.Date(2012, 5, 3, 0, 0, 0, 0, time.UTC), - }, - result: []params.Statistic{{ - Date: "2012-05-03", - Count: 1, - }}, - }, { - request: charmstore.CounterRequest{ - Key: []string{"a"}, - Prefix: true, - List: true, - By: charmstore.ByDay, - }, - result: []params.Statistic{{ - Key: "a:b", - Date: "2012-05-01", - Count: 1, - }, { - Key: "a:c", - Date: "2012-05-01", - Count: 1, - }, { - Key: "a:b", - Date: "2012-05-03", - Count: 1, - }, { - Key: "a:c:*", - Date: "2012-05-09", - Count: 3, - }}, - }, { - request: charmstore.CounterRequest{ - Key: []string{"a"}, - Prefix: true, - List: false, - By: charmstore.ByWeek, - }, - result: []params.Statistic{{ - Date: "2012-05-06", - Count: 3, - }, { - Date: "2012-05-13", - Count: 3, - }}, - }, { - request: charmstore.CounterRequest{ - Key: []string{"a"}, - Prefix: true, - List: true, - By: charmstore.ByWeek, - }, - result: []params.Statistic{{ - Key: "a:b", - Date: "2012-05-06", - Count: 2, - }, { - Key: "a:c", - Date: "2012-05-06", - Count: 1, - }, { - Key: "a:c:*", - Date: "2012-05-13", - Count: 3, - }}, - }} - - for i, test := range tests { - flags := make(url.Values) - url := storeURL("stats/counter/" + strings.Join(test.request.Key, ":")) - if test.request.Prefix { - url += ":*" - } - if test.request.List { - flags.Set("list", "1") - } - if !test.request.Start.IsZero() { - flags.Set("start", test.request.Start.Format("2006-01-02")) - } - if !test.request.Stop.IsZero() { - flags.Set("stop", test.request.Stop.Format("2006-01-02")) - } - switch test.request.By { - case charmstore.ByDay: - flags.Set("by", "day") - case charmstore.ByWeek: - flags.Set("by", "week") - } - if len(flags) > 0 { - url += "?" + flags.Encode() - } - c.Logf("test %d: %s", i, url) - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: s.srv, - URL: url, - ExpectBody: test.result, - }) - } -} - -func (s *StatsSuite) TestStatsEnabled(c *gc.C) { - statsEnabled := func(url string) bool { - req, _ := http.NewRequest("GET", url, nil) - return v4.StatsEnabled(req) - } - c.Assert(statsEnabled("http://foo.com"), gc.Equals, true) - c.Assert(statsEnabled("http://foo.com?stats=1"), gc.Equals, true) - c.Assert(statsEnabled("http://foo.com?stats=0"), gc.Equals, false) -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/status.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/status.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/status.go 1970-01-01 00:00:00 +0000 @@ -1,165 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4 - -import ( - "encoding/json" - "fmt" - "net/http" - "strings" - "time" - - "github.com/juju/utils/debugstatus" - "gopkg.in/errgo.v1" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/params" -) - -// GET /debug/status -// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-debugstatus -func (h *Handler) serveDebugStatus(_ http.Header, req *http.Request) (interface{}, error) { - store := h.pool.Store() - defer store.Close() - store.SetReconnectTimeout(500 * time.Millisecond) - return debugstatus.Check( - debugstatus.ServerStartTime, - debugstatus.Connection(store.DB.Session), - debugstatus.MongoCollections(store.DB), - h.checkElasticSearch(store), - h.checkEntities(store), - h.checkBaseEntities(store), - h.checkLogs(store, - "ingestion", "Ingestion", - mongodoc.IngestionType, - params.IngestionStart, params.IngestionComplete, - ), - h.checkLogs(store, - "legacy_statistics", "Legacy Statistics Load", - mongodoc.LegacyStatisticsType, - params.LegacyStatisticsImportStart, params.LegacyStatisticsImportComplete, - ), - ), nil -} - -func (h *Handler) checkElasticSearch(store *charmstore.Store) debugstatus.CheckerFunc { - return func() (key string, result debugstatus.CheckResult) { - key = "elasticsearch" - result.Name = "Elastic search is running" - if store.ES == nil || store.ES.Database == nil { - result.Value = "Elastic search is not configured" - result.Passed = true - return key, result - } - health, err := store.ES.Health() - if err != nil { - result.Value = "Connection issues to Elastic Search: " + err.Error() - return key, result - } - result.Value = health.String() - result.Passed = health.Status == "green" - return key, result - } -} - -func (h *Handler) checkEntities(store *charmstore.Store) debugstatus.CheckerFunc { - return func() (key string, result debugstatus.CheckResult) { - result.Name = "Entities in charm store" - charms, err := store.DB.Entities().Find(bson.D{{"series", bson.D{{"$ne", "bundle"}}}}).Count() - if err != nil { - result.Value = "Cannot count charms for consistency check: " + err.Error() - return "entities", result - } - bundles, err := store.DB.Entities().Find(bson.D{{"series", "bundle"}}).Count() - if err != nil { - result.Value = "Cannot count bundles for consistency check: " + err.Error() - return "entities", result - } - promulgated, err := store.DB.Entities().Find(bson.D{{"promulgated-url", bson.D{{"$exists", true}}}}).Count() - if err != nil { - result.Value = "Cannot count promulgated for consistency check: " + err.Error() - return "entities", result - } - result.Value = fmt.Sprintf("%d charms; %d bundles; %d promulgated", charms, bundles, promulgated) - result.Passed = true - return "entities", result - } -} - -func (h *Handler) checkBaseEntities(store *charmstore.Store) debugstatus.CheckerFunc { - return func() (key string, result debugstatus.CheckResult) { - resultKey := "base_entities" - result.Name = "Base entities in charm store" - - // Retrieve the number of base entities. - baseNum, err := store.DB.BaseEntities().Count() - if err != nil { - result.Value = "Cannot count base entities: " + err.Error() - return resultKey, result - } - - // Retrieve the number of entities. - num, err := store.DB.Entities().Count() - if err != nil { - result.Value = "Cannot count entities for consistency check: " + err.Error() - return resultKey, result - } - - result.Value = fmt.Sprintf("count: %d", baseNum) - result.Passed = num >= baseNum - return resultKey, result - } -} - -func (h *Handler) checkLogs( - store *charmstore.Store, - resultKey, resultName string, - logType mongodoc.LogType, - startPrefix, endPrefix string, -) debugstatus.CheckerFunc { - return func() (key string, result debugstatus.CheckResult) { - result.Name = resultName - start, end, err := h.findTimesInLogs(store, logType, startPrefix, endPrefix) - if err != nil { - result.Value = err.Error() - return resultKey, result - } - result.Value = fmt.Sprintf("started: %s, completed: %s", start.Format(time.RFC3339), end.Format(time.RFC3339)) - result.Passed = !(start.IsZero() || end.IsZero()) - return resultKey, result - } -} - -// findTimesInLogs goes through logs in reverse order finding when the start and -// end messages were last added. -func (h *Handler) findTimesInLogs(store *charmstore.Store, logType mongodoc.LogType, startPrefix, endPrefix string) (start, end time.Time, err error) { - var log mongodoc.Log - iter := store.DB.Logs(). - Find(bson.D{ - {"level", mongodoc.InfoLevel}, - {"type", logType}, - }).Sort("-time", "-id").Iter() - for iter.Next(&log) { - var msg string - if err := json.Unmarshal(log.Data, &msg); err != nil { - // an error here probably means the log isn't in the form we are looking for. - continue - } - if start.IsZero() && strings.HasPrefix(msg, startPrefix) { - start = log.Time - } - if end.IsZero() && strings.HasPrefix(msg, endPrefix) { - end = log.Time - } - if !start.IsZero() && !end.IsZero() { - break - } - } - if err = iter.Close(); err != nil { - return time.Time{}, time.Time{}, errgo.Notef(err, "Cannot query logs") - } - return -} === removed file 'src/gopkg.in/juju/charmstore.v4/internal/v4/status_test.go' --- src/gopkg.in/juju/charmstore.v4/internal/v4/status_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/internal/v4/status_test.go 1970-01-01 00:00:00 +0000 @@ -1,281 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package v4_test - -import ( - "encoding/json" - "net/http" - "time" - - jc "github.com/juju/testing/checkers" - "github.com/juju/testing/httptesting" - "github.com/juju/utils/debugstatus" - gc "gopkg.in/check.v1" - "gopkg.in/juju/charm.v5" - - "gopkg.in/juju/charmstore.v4/internal/mongodoc" - "gopkg.in/juju/charmstore.v4/internal/router" - "gopkg.in/juju/charmstore.v4/params" -) - -var zeroTimeStr = time.Time{}.Format(time.RFC3339) - -func (s *APISuite) TestStatus(c *gc.C) { - for _, id := range []*router.ResolvedURL{ - newResolvedURL("cs:~charmers/precise/wordpress-2", 2), - newResolvedURL("cs:~charmers/precise/wordpress-3", 3), - newResolvedURL("cs:~foo/precise/arble-9", -1), - newResolvedURL("cs:~bar/utopic/arble-10", -1), - newResolvedURL("cs:~charmers/bundle/oflaughs-3", 3), - newResolvedURL("cs:~bar/bundle/oflaughs-4", -1), - } { - if id.URL.Series == "bundle" { - s.addPublicBundle(c, "wordpress-simple", id) - } else { - s.addPublicCharm(c, "wordpress", id) - } - } - now := time.Now() - s.PatchValue(&debugstatus.StartTime, now) - start := now.Add(-2 * time.Hour) - s.addLog(c, &mongodoc.Log{ - Data: []byte(`"ingestion started"`), - Level: mongodoc.InfoLevel, - Type: mongodoc.IngestionType, - Time: start, - }) - end := now.Add(-1 * time.Hour) - s.addLog(c, &mongodoc.Log{ - Data: []byte(`"ingestion completed"`), - Level: mongodoc.InfoLevel, - Type: mongodoc.IngestionType, - Time: end, - }) - statisticsStart := now.Add(-1*time.Hour - 30*time.Minute) - s.addLog(c, &mongodoc.Log{ - Data: []byte(`"legacy statistics import started"`), - Level: mongodoc.InfoLevel, - Type: mongodoc.LegacyStatisticsType, - Time: statisticsStart, - }) - statisticsEnd := now.Add(-30 * time.Minute) - s.addLog(c, &mongodoc.Log{ - Data: []byte(`"legacy statistics import completed"`), - Level: mongodoc.InfoLevel, - Type: mongodoc.LegacyStatisticsType, - Time: statisticsEnd, - }) - s.AssertDebugStatus(c, true, map[string]params.DebugStatus{ - "mongo_connected": { - Name: "MongoDB is connected", - Value: "Connected", - Passed: true, - }, - "mongo_collections": { - Name: "MongoDB collections", - Value: "All required collections exist", - Passed: true, - }, - "elasticsearch": { - Name: "Elastic search is running", - Value: "Elastic search is not configured", - Passed: true, - }, - "entities": { - Name: "Entities in charm store", - Value: "4 charms; 2 bundles; 3 promulgated", - Passed: true, - }, - "base_entities": { - Name: "Base entities in charm store", - Value: "count: 5", - Passed: true, - }, - "server_started": { - Name: "Server started", - Value: now.String(), - Passed: true, - }, - "ingestion": { - Name: "Ingestion", - Value: "started: " + start.Format(time.RFC3339) + ", completed: " + end.Format(time.RFC3339), - Passed: true, - }, - "legacy_statistics": { - Name: "Legacy Statistics Load", - Value: "started: " + statisticsStart.Format(time.RFC3339) + ", completed: " + statisticsEnd.Format(time.RFC3339), - Passed: true, - }, - }) -} - -func (s *APISuite) TestStatusWithoutCorrectCollections(c *gc.C) { - s.store.DB.Entities().DropCollection() - s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ - "mongo_collections": { - Name: "MongoDB collections", - Value: "Missing collections: [" + s.store.DB.Entities().Name + "]", - Passed: false, - }, - }) -} - -func (s *APISuite) TestStatusWithoutIngestion(c *gc.C) { - s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ - "ingestion": { - Name: "Ingestion", - Value: "started: " + zeroTimeStr + ", completed: " + zeroTimeStr, - Passed: false, - }, - }) -} - -func (s *APISuite) TestStatusIngestionStarted(c *gc.C) { - now := time.Now() - start := now.Add(-1 * time.Hour) - s.addLog(c, &mongodoc.Log{ - Data: []byte(`"ingestion started"`), - Level: mongodoc.InfoLevel, - Type: mongodoc.IngestionType, - Time: start, - }) - s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ - "ingestion": { - Name: "Ingestion", - Value: "started: " + start.Format(time.RFC3339) + ", completed: " + zeroTimeStr, - Passed: false, - }, - }) -} - -func (s *APISuite) TestStatusWithoutLegacyStatistics(c *gc.C) { - s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ - "legacy_statistics": { - Name: "Legacy Statistics Load", - Value: "started: " + zeroTimeStr + ", completed: " + zeroTimeStr, - Passed: false, - }, - }) -} - -func (s *APISuite) TestStatusLegacyStatisticsStarted(c *gc.C) { - now := time.Now() - statisticsStart := now.Add(-1*time.Hour - 30*time.Minute) - s.addLog(c, &mongodoc.Log{ - Data: []byte(`"legacy statistics import started"`), - Level: mongodoc.InfoLevel, - Type: mongodoc.LegacyStatisticsType, - Time: statisticsStart, - }) - s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ - "legacy_statistics": { - Name: "Legacy Statistics Load", - Value: "started: " + statisticsStart.Format(time.RFC3339) + ", completed: " + zeroTimeStr, - Passed: false, - }, - }) -} - -func (s *APISuite) TestStatusLegacyStatisticsMultipleLogs(c *gc.C) { - now := time.Now() - statisticsStart := now.Add(-1*time.Hour - 30*time.Minute) - s.addLog(c, &mongodoc.Log{ - Data: []byte(`"legacy statistics import started"`), - Level: mongodoc.InfoLevel, - Type: mongodoc.LegacyStatisticsType, - Time: statisticsStart.Add(-1 * time.Hour), - }) - s.addLog(c, &mongodoc.Log{ - Data: []byte(`"legacy statistics import started"`), - Level: mongodoc.InfoLevel, - Type: mongodoc.LegacyStatisticsType, - Time: statisticsStart, - }) - statisticsEnd := now.Add(-30 * time.Minute) - s.addLog(c, &mongodoc.Log{ - Data: []byte(`"legacy statistics import completed"`), - Level: mongodoc.InfoLevel, - Type: mongodoc.LegacyStatisticsType, - Time: statisticsEnd.Add(-1 * time.Hour), - }) - s.addLog(c, &mongodoc.Log{ - Data: []byte(`"legacy statistics import completed"`), - Level: mongodoc.InfoLevel, - Type: mongodoc.LegacyStatisticsType, - Time: statisticsEnd, - }) - s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ - "legacy_statistics": { - Name: "Legacy Statistics Load", - Value: "started: " + statisticsStart.Format(time.RFC3339) + ", completed: " + statisticsEnd.Format(time.RFC3339), - Passed: true, - }, - }) -} - -func (s *APISuite) TestStatusBaseEntitiesError(c *gc.C) { - // Add a base entity without any corresponding entities. - entity := &mongodoc.BaseEntity{ - URL: charm.MustParseReference("django"), - Name: "django", - } - err := s.store.DB.BaseEntities().Insert(entity) - c.Assert(err, gc.IsNil) - - s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ - "base_entities": { - Name: "Base entities in charm store", - Value: "count: 1", - Passed: false, - }, - }) -} - -// AssertDebugStatus asserts that the current /debug/status endpoint -// matches the given status, ignoring status duration. -// If complete is true, it fails if the results contain -// keys not mentioned in status. -func (s *APISuite) AssertDebugStatus(c *gc.C, complete bool, status map[string]params.DebugStatus) { - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("debug/status"), - }) - c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.Bytes())) - c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "application/json") - var gotStatus map[string]params.DebugStatus - err := json.Unmarshal(rec.Body.Bytes(), &gotStatus) - c.Assert(err, gc.IsNil) - for key, r := range gotStatus { - if _, found := status[key]; !complete && !found { - delete(gotStatus, key) - continue - } - r.Duration = 0 - gotStatus[key] = r - } - c.Assert(gotStatus, jc.DeepEquals, status) -} - -type statusWithElasticSearchSuite struct { - commonSuite -} - -var _ = gc.Suite(&statusWithElasticSearchSuite{}) - -func (s *statusWithElasticSearchSuite) SetUpSuite(c *gc.C) { - s.enableES = true - s.commonSuite.SetUpSuite(c) -} - -func (s *statusWithElasticSearchSuite) TestStatusWithElasticSearch(c *gc.C) { - rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ - Handler: s.srv, - URL: storeURL("debug/status"), - }) - var results map[string]params.DebugStatus - err := json.Unmarshal(rec.Body.Bytes(), &results) - c.Assert(err, gc.IsNil) - c.Assert(results["elasticsearch"].Name, gc.Equals, "Elastic search is running") - c.Assert(results["elasticsearch"].Value, jc.Contains, "cluster_name:") -} === removed directory 'src/gopkg.in/juju/charmstore.v4/params' === removed file 'src/gopkg.in/juju/charmstore.v4/params/error.go' --- src/gopkg.in/juju/charmstore.v4/params/error.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/params/error.go 1970-01-01 00:00:00 +0000 @@ -1,80 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE.client file for details. - -package params - -import ( - "fmt" -) - -// ErrorCode holds the class of an error in machine-readable format. -// It is also an error in its own right. -type ErrorCode string - -func (code ErrorCode) Error() string { - return string(code) -} - -func (code ErrorCode) ErrorCode() ErrorCode { - return code -} - -const ( - ErrNotFound ErrorCode = "not found" - ErrMetadataNotFound ErrorCode = "metadata not found" - ErrForbidden ErrorCode = "forbidden" - ErrBadRequest ErrorCode = "bad request" - // TODO change to ErrAlreadyExists - ErrDuplicateUpload ErrorCode = "duplicate upload" - ErrMultipleErrors ErrorCode = "multiple errors" - ErrUnauthorized ErrorCode = "unauthorized" - ErrMethodNotAllowed ErrorCode = "method not allowed" - - // Note that these error codes sit in the same name space - // as the bakery error codes defined in gopkg.in/macaroon-bakery.v0/httpbakery . - // In particular, ErrBadRequest is a shared error code - // which needs to share the message too. -) - -// Error represents an error - it is returned for any response that fails. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#errors -type Error struct { - Message string - Code ErrorCode - Info map[string]*Error `json:",omitempty"` -} - -// NewError returns a new *Error with the given error code -// and message. -func NewError(code ErrorCode, f string, a ...interface{}) error { - return &Error{ - Message: fmt.Sprintf(f, a...), - Code: code, - } -} - -// Error implements error.Error. -func (e *Error) Error() string { - return e.Message -} - -// ErrorCode holds the class of the error in -// machine readable format. -func (e *Error) ErrorCode() string { - return e.Code.Error() -} - -// ErrorInfo returns additional info on the error. -// TODO(rog) rename this so that it more accurately -// reflects its role. -func (e *Error) ErrorInfo() map[string]*Error { - return e.Info -} - -// Cause implements errgo.Causer.Cause. -func (e *Error) Cause() error { - if e.Code != "" { - return e.Code - } - return nil -} === removed file 'src/gopkg.in/juju/charmstore.v4/params/package_test.go' --- src/gopkg.in/juju/charmstore.v4/params/package_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/params/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE.client file for details. - -package params_test - -import ( - "testing" - - gc "gopkg.in/check.v1" -) - -func TestPackage(t *testing.T) { - gc.TestingT(t) -} === removed file 'src/gopkg.in/juju/charmstore.v4/params/params.go' --- src/gopkg.in/juju/charmstore.v4/params/params.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/params/params.go 1970-01-01 00:00:00 +0000 @@ -1,277 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE.client file for details. - -// The params package holds types that are a part of the charm store's external -// contract - they will be marshalled (or unmarshalled) as JSON -// and delivered through the HTTP API. -package params - -import ( - "encoding/json" - "time" - - "github.com/juju/utils/debugstatus" - "gopkg.in/juju/charm.v5" -) - -const ( - // ContentHashHeader specifies the header attribute - // that will hold the content hash for archive GET responses. - ContentHashHeader = "Content-Sha384" - - // EntityIdHeader specifies the header attribute that will hold the - // id of the entity for archive GET responses. - EntityIdHeader = "Entity-Id" -) - -// Special user/group names. -const ( - Everyone = "everyone" - Admin = "admin" -) - -// MetaAnyResponse holds the result of a meta/any request. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaany -type MetaAnyResponse struct { - Id *charm.Reference - Meta map[string]interface{} `json:",omitempty"` -} - -// ArchiveUploadResponse holds the result of a post or a put to /id/archive. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#post-idarchive -type ArchiveUploadResponse struct { - Id *charm.Reference - PromulgatedId *charm.Reference `json:",omitempty"` -} - -// ExpandedId holds a charm or bundle fully qualified id. -// A slice of ExpandedId is used as response for -// id/expand-id GET requests. -type ExpandedId struct { - Id string -} - -// ArchiveSizeResponse holds the result of an -// id/meta/archive-size GET request. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaarchive-size -type ArchiveSizeResponse struct { - Size int64 -} - -// HashResponse holds the result of id/meta/hash and id/meta/hash256 GET -// requests. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetahash -// and https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetahash256 -type HashResponse struct { - Sum string -} - -// ManifestFile holds information about a charm or bundle file. -// A slice of ManifestFile is used as response for -// id/meta/manifest GET requests. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetamanifest -type ManifestFile struct { - Name string - Size int64 -} - -// ArchiveUploadTimeResponse holds the result of an id/meta/archive-upload-time -// GET request. See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaarchive-upload-time -type ArchiveUploadTimeResponse struct { - UploadTime time.Time -} - -// RelatedResponse holds the result of an id/meta/charm-related GET request. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-related -type RelatedResponse struct { - // Requires holds an entry for each interface provided by - // the charm, containing all charms that require that interface. - Requires map[string][]MetaAnyResponse `json:",omitempty"` - - // Provides holds an entry for each interface required by the - // the charm, containing all charms that provide that interface. - Provides map[string][]MetaAnyResponse `json:",omitempty"` -} - -// RevisionInfoResponse holds the result of an id/meta/revision-info GET -// request. See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetarevision-info -type RevisionInfoResponse struct { - Revisions []*charm.Reference -} - -// BundleCount holds the result of an id/meta/bundle-unit-count -// or bundle-machine-count GET request. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundle-unit-count -// and https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundle-machine-count -type BundleCount struct { - Count int -} - -// TagsResponse holds the result of an id/meta/tags GET request. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetatags -type TagsResponse struct { - Tags []string -} - -// Published holds the result of a changes/published GET request. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-changespublished -type Published struct { - Id *charm.Reference - PublishTime time.Time -} - -// DebugStatus holds the result of the status checks. -// This is defined for backward compatibility: new clients should use -// debugstatus.CheckResult directly. -type DebugStatus debugstatus.CheckResult - -// SearchResult holds a single result from a search operation. -type SearchResult struct { - Id *charm.Reference - // Meta holds at most one entry for each meta value - // specified in the include flags, holding the - // data that would be returned by reading /meta/meta?id=id. - // Metadata not relevant to a particular result will not - // be included. - Meta map[string]interface{} `json:",omitempty"` -} - -// SearchResponse holds the response from a search operation. -type SearchResponse struct { - SearchTime time.Duration - Total int - Results []SearchResult -} - -// IdUserResponse holds the result of an id/meta/id-user GET request. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-user -type IdUserResponse struct { - User string -} - -// IdSeriesResponse holds the result of an id/meta/id-series GET request. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-series -type IdSeriesResponse struct { - Series string -} - -// IdNameResponse holds the result of an id/meta/id-name GET request. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-name -type IdNameResponse struct { - Name string -} - -// IdRevisionResponse holds the result of an id/meta/id-revision GET request. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-revision -type IdRevisionResponse struct { - Revision int -} - -// IdResponse holds the result of an id/meta/id GET request. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid -type IdResponse struct { - Id *charm.Reference - User string `json:",omitempty"` - Series string `json:",omitempty"` - Name string - Revision int -} - -// PermResponse holds the result of an id/meta/perm GET request. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaperm -type PermResponse struct { - Read []string - Write []string -} - -// PermRequest holds the request of an id/meta/perm PUT request. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmetaperm -type PermRequest struct { - Read []string - Write []string -} - -// PromulgatedResponse holds the result of an id/meta/promulgated GET request. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetapromulgated -type PromulgatedResponse struct { - Promulgated bool -} - -// PromulgateRequest holds the request of an id/promulgate PUT request. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idpromulgate -type PromulgateRequest struct { - Promulgated bool -} - -const ( - // BzrDigestKey is the extra-info key used to store the Bazaar digest - BzrDigestKey = "bzr-digest" - - // LegacyDownloadStats is the extra-info key used to store the legacy - // download counts, and to retrieve them when - // charmstore.LegacyDownloadCountsEnabled is set to true. - // TODO (frankban): remove this constant when removing the legacy counts - // logic. - LegacyDownloadStats = "legacy-download-stats" -) - -// Log holds the representation of a log message. -// This is used by clients to store log events in the charm store. -type Log struct { - // Data holds the log message as a JSON-encoded value. - Data *json.RawMessage - - // Level holds the log level as a string. - Level LogLevel - - // Type holds the log type as a string. - Type LogType - - // URLs holds a slice of entity URLs associated with the log message. - URLs []*charm.Reference `json:",omitempty"` -} - -// LogResponse represents a single log message and is used in the responses -// to /log GET requests. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-log -type LogResponse struct { - // Data holds the log message as a JSON-encoded value. - Data json.RawMessage - - // Level holds the log level as a string. - Level LogLevel - - // Type holds the log type as a string. - Type LogType - - // URLs holds a slice of entity URLs associated with the log message. - URLs []*charm.Reference `json:",omitempty"` - - // Time holds the time of the log. - Time time.Time -} - -// LogLevel defines log levels (e.g. "info" or "error") to be used in log -// requests and responses. -type LogLevel string - -const ( - InfoLevel LogLevel = "info" - WarningLevel LogLevel = "warning" - ErrorLevel LogLevel = "error" -) - -// LogType defines log types (e.g. "ingestion") to be used in log requests and -// responses. -type LogType string - -const ( - IngestionType LogType = "ingestion" - LegacyStatisticsType LogType = "legacyStatistics" - - IngestionStart = "ingestion started" - IngestionComplete = "ingestion completed" - - LegacyStatisticsImportStart = "legacy statistics import started" - LegacyStatisticsImportComplete = "legacy statistics import completed" -) === removed file 'src/gopkg.in/juju/charmstore.v4/params/params_test.go' --- src/gopkg.in/juju/charmstore.v4/params/params_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/params/params_test.go 1970-01-01 00:00:00 +0000 @@ -1,41 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE.client file for details. - -package params_test - -import ( - "encoding/json" - "net/textproto" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/macaroon-bakery.v0/httpbakery" - - "gopkg.in/juju/charmstore.v4/params" -) - -type suite struct{} - -var _ = gc.Suite(&suite{}) - -func (*suite) TestContentHashHeaderCanonicalized(c *gc.C) { - // The header key should be canonicalized, because otherwise - // the actually produced header will be different from that - // specified. - canon := textproto.CanonicalMIMEHeaderKey(params.ContentHashHeader) - c.Assert(canon, gc.Equals, params.ContentHashHeader) -} - -func (*suite) TestBakeryErrorCompatibility(c *gc.C) { - err1 := httpbakery.Error{ - Code: httpbakery.ErrBadRequest, - Message: "some request", - } - err2 := params.Error{ - Code: params.ErrBadRequest, - Message: "some request", - } - data1, err := json.Marshal(err1) - c.Assert(err, gc.IsNil) - c.Assert(string(data1), jc.JSONEquals, err2) -} === removed file 'src/gopkg.in/juju/charmstore.v4/params/stats.go' --- src/gopkg.in/juju/charmstore.v4/params/stats.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/params/stats.go 1970-01-01 00:00:00 +0000 @@ -1,46 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the LGPLv3, see LICENCE.client file for details. - -package params - -// Define the kinds to be included in stats keys. -const ( - StatsArchiveDownload = "archive-download" - StatsArchiveDelete = "archive-delete" - StatsArchiveFailedUpload = "archive-failed-upload" - StatsArchiveUpload = "archive-upload" - // The following kinds are in use in the legacy API. - StatsCharmInfo = "charm-info" - StatsCharmMissing = "charm-missing" - StatsCharmEvent = "charm-event" -) - -// Statistic holds one element of a stats/counter response. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-statscounter -type Statistic struct { - Key string `json:",omitempty"` - Date string `json:",omitempty"` - Count int64 -} - -// StatsResponse holds the result of an id/meta/stats GET request. -// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetastats -type StatsResponse struct { - // ArchiveDownloadCount is superceded by ArchiveDownload but maintained for - // backward compatibility. - ArchiveDownloadCount int64 - // ArchiveDownload holds the downloads count for a specific revision of the - // entity. - ArchiveDownload StatsCount - // ArchiveDownloadAllRevisions holds the downloads count for all revisions - // of the entity. - ArchiveDownloadAllRevisions StatsCount -} - -// StatsCount holds stats counts and is used as part of StatsResponse. -type StatsCount struct { - Total int64 // Total count over all time. - Day int64 // Count over the last day. - Week int64 // Count over the last week. - Month int64 // Count over the last month. -} === removed file 'src/gopkg.in/juju/charmstore.v4/server.go' --- src/gopkg.in/juju/charmstore.v4/server.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/server.go 1970-01-01 00:00:00 +0000 @@ -1,88 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore - -import ( - "fmt" - "net/http" - "sort" - - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/mgo.v2" - - "gopkg.in/juju/charmstore.v4/internal/charmstore" - "gopkg.in/juju/charmstore.v4/internal/elasticsearch" - "gopkg.in/juju/charmstore.v4/internal/legacy" - "gopkg.in/juju/charmstore.v4/internal/v4" -) - -// Versions of the API that can be served. -const ( - V4 = "v4" - Legacy = "" -) - -var versions = map[string]charmstore.NewAPIHandlerFunc{ - V4: v4.NewAPIHandler, - Legacy: legacy.NewAPIHandler, -} - -// Versions returns all known API version strings in alphabetical order. -func Versions() []string { - vs := make([]string, 0, len(versions)) - for v := range versions { - vs = append(vs, v) - } - sort.Strings(vs) - return vs -} - -// ServerParams holds configuration for a new API server. -type ServerParams struct { - // AuthUsername and AuthPassword hold the credentials - // used for HTTP basic authentication. - AuthUsername string - AuthPassword string - - // IdentityLocation holds the location of the third party authorization - // service to use when creating third party caveats, - // for example: http://api.jujucharms.com/identity/v1/discharger - // If it is empty, IdentityURL+"/v1/discharger" will be used. - IdentityLocation string - - // PublicKeyLocator holds a public key store. - // It may be nil. - PublicKeyLocator bakery.PublicKeyLocator - - // IdentityAPIURL holds the URL of the identity manager, - // for example http://api.jujucharms.com/identity - IdentityAPIURL string - - // IdentityAPIUsername and IdentityAPIPassword hold the credentials - // to be used when querying the identity manager API. - IdentityAPIUsername string - IdentityAPIPassword string -} - -// NewServer returns a new handler that handles charm store requests and stores -// its data in the given database. The handler will serve the specified -// versions of the API using the given configuration. -func NewServer(db *mgo.Database, es *elasticsearch.Database, idx string, config ServerParams, serveVersions ...string) (http.Handler, error) { - newAPIs := make(map[string]charmstore.NewAPIHandlerFunc) - for _, vers := range serveVersions { - newAPI := versions[vers] - if newAPI == nil { - return nil, fmt.Errorf("unknown version %q", vers) - } - newAPIs[vers] = newAPI - } - var si *charmstore.SearchIndex - if es != nil { - si = &charmstore.SearchIndex{ - Database: es, - Index: idx, - } - } - return charmstore.NewServer(db, si, charmstore.ServerParams(config), newAPIs) -} === removed file 'src/gopkg.in/juju/charmstore.v4/server_test.go' --- src/gopkg.in/juju/charmstore.v4/server_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/server_test.go 1970-01-01 00:00:00 +0000 @@ -1,123 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package charmstore_test - -import ( - "fmt" - "net/http" - "testing" - - jujutesting "github.com/juju/testing" - "github.com/juju/testing/httptesting" - gc "gopkg.in/check.v1" - - "gopkg.in/juju/charmstore.v4" - "gopkg.in/juju/charmstore.v4/internal/storetesting" - "gopkg.in/juju/charmstore.v4/params" -) - -// These tests are copied (almost) verbatim from internal/charmstore/server_test.go - -func TestPackage(t *testing.T) { - jujutesting.MgoTestPackage(t, nil) -} - -type ServerSuite struct { - storetesting.IsolatedMgoSuite - config charmstore.ServerParams -} - -var _ = gc.Suite(&ServerSuite{}) - -func (s *ServerSuite) SetUpSuite(c *gc.C) { - s.IsolatedMgoSuite.SetUpSuite(c) - s.config = charmstore.ServerParams{ - AuthUsername: "test-user", - AuthPassword: "test-password", - } -} - -func (s *ServerSuite) TestNewServerWithNoVersions(c *gc.C) { - h, err := charmstore.NewServer(s.Session.DB("foo"), nil, "", s.config) - c.Assert(err, gc.ErrorMatches, `charm store server must serve at least one version of the API`) - c.Assert(h, gc.IsNil) -} - -func (s *ServerSuite) TestNewServerWithUnregisteredVersion(c *gc.C) { - h, err := charmstore.NewServer(s.Session.DB("foo"), nil, "", s.config, "wrong") - c.Assert(err, gc.ErrorMatches, `unknown version "wrong"`) - c.Assert(h, gc.IsNil) -} - -type versionResponse struct { - Version string - Path string -} - -func (s *ServerSuite) TestVersions(c *gc.C) { - c.Assert(charmstore.Versions(), gc.DeepEquals, []string{"", "v4"}) -} - -func (s *ServerSuite) TestNewServerWithVersions(c *gc.C) { - db := s.Session.DB("foo") - - h, err := charmstore.NewServer(db, nil, "", s.config, charmstore.V4) - c.Assert(err, gc.IsNil) - - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: h, - URL: "/v4/debug", - ExpectStatus: http.StatusInternalServerError, - ExpectBody: params.Error{ - Message: "method not implemented", - }, - }) - assertDoesNotServeVersion(c, h, "v3") -} - -func assertServesVersion(c *gc.C, h http.Handler, vers string) { - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: h, - URL: "/" + vers + "/some/path", - ExpectBody: versionResponse{ - Version: vers, - Path: "/some/path", - }, - }) -} - -func assertDoesNotServeVersion(c *gc.C, h http.Handler, vers string) { - url := "/" + vers + "/debug" - httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ - Handler: h, - URL: url, - ExpectStatus: http.StatusNotFound, - ExpectBody: params.Error{ - Message: fmt.Sprintf("no handler for %q", url), - Code: params.ErrNotFound, - }, - }) -} - -type ServerESSuite struct { - storetesting.IsolatedMgoESSuite - config charmstore.ServerParams -} - -var _ = gc.Suite(&ServerESSuite{}) - -func (s *ServerESSuite) SetUpSuite(c *gc.C) { - s.IsolatedMgoESSuite.SetUpSuite(c) - s.config = charmstore.ServerParams{ - AuthUsername: "test-user", - AuthPassword: "test-password", - } -} - -func (s *ServerESSuite) TestNewServerWithElasticsearch(c *gc.C) { - db := s.Session.DB("foo") - - _, err := charmstore.NewServer(db, s.ES, s.TestIndex, s.config, charmstore.V4) - c.Assert(err, gc.IsNil) -} === removed directory 'src/gopkg.in/juju/charmstore.v4/version' === removed file 'src/gopkg.in/juju/charmstore.v4/version/version.go' --- src/gopkg.in/juju/charmstore.v4/version/version.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/juju/charmstore.v4/version/version.go 1970-01-01 00:00:00 +0000 @@ -1,16 +0,0 @@ -// Copyright 2014 Canonical Ltd. -// Licensed under the AGPLv3, see LICENCE file for details. - -package version - -type Version struct { - GitCommit string - Version string -} - -var VersionInfo = unknownVersion - -var unknownVersion = Version{ - GitCommit: "unknown git commit", - Version: "unknown version", -} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/LICENSE' --- src/gopkg.in/juju/charmstore.v5-unstable/LICENSE 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. === added file 'src/gopkg.in/juju/charmstore.v5-unstable/Makefile' --- src/gopkg.in/juju/charmstore.v5-unstable/Makefile 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/Makefile 2016-03-22 15:18:22 +0000 @@ -0,0 +1,123 @@ +# Makefile for the charm store. + +ifndef GOPATH +$(warning You need to set up a GOPATH.) +endif + +PROJECT := gopkg.in/juju/charmstore.v5-unstable +PROJECT_DIR := $(shell go list -e -f '{{.Dir}}' $(PROJECT)) + +ifeq ($(shell uname -p | sed -r 's/.*(x86|armel|armhf).*/golang/'), golang) + GO_C := golang + INSTALL_FLAGS := +else + GO_C := gccgo-4.9 gccgo-go + INSTALL_FLAGS := -gccgoflags=-static-libgo +endif + +define DEPENDENCIES + build-essential + bzr + juju-mongodb + mongodb-server + $(GO_C) + openjdk-7-jre-headless + elasticsearch +endef + +default: build + +$(GOPATH)/bin/godeps: + go get -v launchpad.net/godeps + +# Start of GOPATH-dependent targets. Some targets only make sense - +# and will only work - when this tree is found on the GOPATH. +ifeq ($(CURDIR),$(PROJECT_DIR)) + +build: + go build $(PROJECT)/... + +check: + go test $(PROJECT)/... + +install: + go install $(INSTALL_FLAGS) -v $(PROJECT)/... + +clean: + go clean $(PROJECT)/... + +else + +build: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +check: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +install: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +clean: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +endif +# End of GOPATH-dependent targets. + +# Reformat source files. +format: + gofmt -w -l . + +# Reformat and simplify source files. +simplify: + gofmt -w -l -s . + +# Run the charmd server. +server: install + charmd -logging-config INFO cmd/charmd/config.yaml + +# Update the project Go dependencies to the required revision. +deps: $(GOPATH)/bin/godeps + $(GOPATH)/bin/godeps -u dependencies.tsv + +# Generate the dependencies file. +create-deps: $(GOPATH)/bin/godeps + godeps -t $(shell go list $(PROJECT)/...) > dependencies.tsv || true + +# Install packages required to develop the charm store and run tests. +APT_BASED := $(shell command -v apt-get >/dev/null; echo $$?) +sysdeps: +ifeq ($(APT_BASED),0) +ifeq ($(shell lsb_release -cs|sed -r 's/precise|quantal|raring/old/'),old) + @echo Adding PPAs for golang and mongodb + @sudo apt-add-repository --yes ppa:juju/golang + @sudo apt-add-repository --yes ppa:juju/stable +endif + @echo Installing dependencies + [ "x$(apt-key export D88E42B4 2>&1 1>/dev/null)" = "x" ] || { curl -s http://packages.elasticsearch.org/GPG-KEY-elasticsearch | sudo apt-key add -;} + repo="http://packages.elasticsearch.org/elasticsearch/1.3/debian" file=/etc/apt/sources.list.d/packages_elasticsearch_org_elasticsearch_1_3_debian.list ; grep "$$repo" $$file || echo "deb $$repo stable main" | sudo tee $$file > /dev/null + sudo apt-get update + @sudo apt-get --force-yes install $(strip $(DEPENDENCIES)) \ + $(shell apt-cache madison juju-mongodb mongodb-server | head -1 | cut -d '|' -f1) +else + @echo sysdeps runs only on systems with apt-get + @echo on OS X with homebrew try: brew install bazaar mongodb elasticsearch +endif + +gopkg: + @echo $(PROJECT) + +help: + @echo -e 'Charmstore - list of make targets:\n' + @echo 'make - Build the package.' + @echo 'make check - Run tests.' + @echo 'make install - Install the package.' + @echo 'make server - Start the charmd server.' + @echo 'make clean - Remove object files from package source directories.' + @echo 'make sysdeps - Install the development environment system packages.' + @echo 'make deps - Set up the project Go dependencies.' + @echo 'make create-deps - Generate the Go dependencies file.' + @echo 'make format - Format the source files.' + @echo 'make simplify - Format and simplify the source files.' + @echo 'make gopkg - Output the current gopkg repository path and version.' + +.PHONY: build check clean format gopkg help install simplify sysdeps === added file 'src/gopkg.in/juju/charmstore.v5-unstable/README.md' --- src/gopkg.in/juju/charmstore.v5-unstable/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,70 @@ +# juju/charmstore + +Store and publish Juju charms and bundles. + +## Installation + +To start using the charm store, first ensure you have a valid +Go environment, then run the following: + + go get -d gopkg.in/juju/charmstore.v5-unstable + cd $GOPATH/gopkg.in/juju/charmstore.v5-unstable + +## Go dependencies + +The project uses godeps (https://launchpad.net/godeps) to manage Go +dependencies. To install this, run: + + go get launchpad.net/godeps + +After installing it, you can update the dependencies +to the revision specified in the `dependencies.tsv` file with the following: + + make deps + +Use `make create-deps` to update the dependencies file. + +## Development environment + +A couple of system packages are required in order to set up a charm store +development environment. To install them, run the following: + + make sysdeps + +To run the elasticsearch tests you must run an elasticsearch server. If the +elasticsearch server is running at an address other than localhost:9200 then +set `JUJU_TEST_ELASTICSEARCH=:` where host and port provide +the address of the elasticsearch server. If you do not wish to run the +elasticsearh tests, set `JUJU_TEST_ELASTICSEARCH=none`. + +At this point, from the root of this branch, run the command:: + + make install + +The command above builds and installs the charm store binaries, and places them +in `$GOPATH/bin`. This is the list of the installed commands: + +- charmd: start the charm store server; +- essync: synchronize the contents of the Elastic Search database with the charm store. + +A description of each command can be found below. + +## Testing + +Run `make check` to test the application. +Run `make help` to display help about all the available make targets. + +## Charmstore server + +Once the charms database is fully populated, it is possible to interact with +charm data using the charm store server. It can be started with the following +command: + + charmd -logging-config INFO cmd/charmd/config.yaml + +The same result can be achieved more easily by running `make server`. +Note that this configuration *should not* be used when running +a production server, as it uses a known password for authentication. + +At this point the server starts listening on port 8080 (as specified in the +config YAML file). === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/audit' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/audit/audit.go' --- src/gopkg.in/juju/charmstore.v5-unstable/audit/audit.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/audit/audit.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,39 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package audit + +import ( + "time" + + "gopkg.in/juju/charm.v6-unstable" +) + +// Operation represents the type of an entry. +type Operation string + +const ( + // OpSetPerm represents the setting of ACLs on an entity. + // Required fields: Entity, ACL + OpSetPerm Operation = "set-perm" + + // OpPromulgate, OpUnpromulgate represent the promulgation on an entity. + // Required fields: Entity + OpPromulgate Operation = "promulgate" + OpUnpromulgate Operation = "unpromulgate" +) + +// ACL represents an access control list. +type ACL struct { + Read []string `json:"read,omitempty"` + Write []string `json:"write,omitempty"` +} + +// Entry represents an audit log entry. +type Entry struct { + Time time.Time `json:"time"` + User string `json:"user"` + Op Operation `json:"op"` + Entity *charm.URL `json:"entity,omitempty"` + ACL *ACL `json:"acl,omitempty"` +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/cmd' === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/config.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/config.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/config.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,24 @@ +audit-log-file: audit.log +mongo-url: localhost:27017 +api-addr: localhost:8080 +auth-username: admin +auth-password: example-passwd +#elasticsearch-addr: localhost:9200 +# For locally running services. +#identity-public-key: CIdWcEUN+0OZnKW9KwruRQnQDY/qqzVdD30CijwiWCk= +#identity-location: http://localhost:8081/v1/discharger +#identity-api-url: http://localhost:8081 +# For production identity manager. +identity-public-key: hmHaPgCC1UfuhYHUSX5+aihSAZesqpVdjRv0mgfIwjo= +identity-location: https://api.jujucharms.com/identity/v1/discharger +# Agent credentials. +#agent-username: charmstore@admin@idm +#agent-key: +# private: 85ZQqTnqiNdEggFVy7TGjRDGMulJHHz8UKkfVl5tTu8= +# public: X3Yj/aThvG20FoBhRAIX+JbFk300r9Roc2D78r/37iw= +# Statistics Cache maximum age, default 1 hour +#stats-cache-max-age: 1h +#request-timeout: 500ms +#search-cache-max-age: 0s +# Uncomment to test with a terms service running locally +#terms-location: localhost:8085 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/main.go' --- src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/main.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/cmd/charmd/main.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,142 @@ +// Copyright 2012, 2013, 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package main // import "gopkg.in/juju/charmstore.v5-unstable/cmd/charmd" + +import ( + "flag" + "fmt" + "net/http" + "os" + "path/filepath" + + "github.com/juju/loggo" + "gopkg.in/errgo.v1" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/mgo.v2" + "gopkg.in/natefinch/lumberjack.v2" + + "gopkg.in/juju/charmstore.v5-unstable" + "gopkg.in/juju/charmstore.v5-unstable/config" + "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" + "gopkg.in/juju/charmstore.v5-unstable/internal/debug" +) + +var ( + logger = loggo.GetLogger("charmd") + loggingConfig = flag.String("logging-config", "", "specify log levels for modules e.g. =TRACE") +) + +func main() { + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "usage: %s [options] \n", filepath.Base(os.Args[0])) + flag.PrintDefaults() + os.Exit(2) + } + flag.Parse() + if flag.NArg() != 1 { + flag.Usage() + } + if *loggingConfig != "" { + if err := loggo.ConfigureLoggers(*loggingConfig); err != nil { + fmt.Fprintf(os.Stderr, "cannot configure loggers: %v", err) + os.Exit(1) + } + } + if err := serve(flag.Arg(0)); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} + +func serve(confPath string) error { + logger.Infof("reading configuration") + conf, err := config.Read(confPath) + if err != nil { + return errgo.Notef(err, "cannot read config file %q", confPath) + } + + logger.Infof("connecting to mongo") + session, err := mgo.Dial(conf.MongoURL) + if err != nil { + return errgo.Notef(err, "cannot dial mongo at %q", conf.MongoURL) + } + defer session.Close() + db := session.DB("juju") + + var es *elasticsearch.Database + if conf.ESAddr != "" { + es = &elasticsearch.Database{ + Addr: conf.ESAddr, + } + } + + keyring := bakery.NewPublicKeyRing() + err = addPublicKey(keyring, conf.IdentityLocation, conf.IdentityPublicKey) + if err != nil { + return errgo.Mask(err) + } + if conf.TermsLocation != "" { + err = addPublicKey(keyring, conf.TermsLocation, conf.TermsPublicKey) + if err != nil { + return errgo.Mask(err) + } + } + + logger.Infof("setting up the API server") + cfg := charmstore.ServerParams{ + AuthUsername: conf.AuthUsername, + AuthPassword: conf.AuthPassword, + IdentityLocation: conf.IdentityLocation, + IdentityAPIURL: conf.IdentityAPIURL, + TermsLocation: conf.TermsLocation, + AgentUsername: conf.AgentUsername, + AgentKey: conf.AgentKey, + StatsCacheMaxAge: conf.StatsCacheMaxAge.Duration, + MaxMgoSessions: conf.MaxMgoSessions, + HTTPRequestWaitDuration: conf.RequestTimeout.Duration, + SearchCacheMaxAge: conf.SearchCacheMaxAge.Duration, + PublicKeyLocator: keyring, + } + + if conf.AuditLogFile != "" { + cfg.AuditLogger = &lumberjack.Logger{ + Filename: conf.AuditLogFile, + MaxSize: conf.AuditLogMaxSize, + MaxAge: conf.AuditLogMaxAge, + } + } + + server, err := charmstore.NewServer(db, es, "cs", cfg, charmstore.Legacy, charmstore.V4, charmstore.V5) + if err != nil { + return errgo.Notef(err, "cannot create new server at %q", conf.APIAddr) + } + + logger.Infof("starting the API server") + return http.ListenAndServe(conf.APIAddr, debug.Handler("", server)) +} + +func addPublicKey(ring *bakery.PublicKeyRing, loc string, key *bakery.PublicKey) error { + if key != nil { + return ring.AddPublicKeyForLocation(loc, false, key) + } + pubKey, err := httpbakery.PublicKeyForLocation(http.DefaultClient, loc) + if err != nil { + return errgo.Mask(err) + } + return ring.AddPublicKeyForLocation(loc, false, pubKey) +} + +var mgoLogger = loggo.GetLogger("mgo") + +func init() { + mgo.SetLogger(mgoLog{}) +} + +type mgoLog struct{} + +func (mgoLog) Output(calldepth int, s string) error { + mgoLogger.LogCallf(calldepth+1, loggo.DEBUG, "%s", s) + return nil +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/cmd/cshash256' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/cmd/cshash256/main.go' --- src/gopkg.in/juju/charmstore.v5-unstable/cmd/cshash256/main.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/cmd/cshash256/main.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,128 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// This command populates the blobhash256 field of all entities. +// This command is intended to be run on the production db and then discarded. +// The first time this command is executed, all the entities are updated. +// Subsequent runs have no effect. + +package main // import "gopkg.in/juju/charmstore.v5-unstable/cmd/cshash256" + +import ( + "crypto/sha256" + "flag" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/juju/loggo" + "gopkg.in/errgo.v1" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/config" + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" +) + +var ( + logger = loggo.GetLogger("cshash256") + loggingConfig = flag.String("logging-config", "INFO", "specify log levels for modules e.g. =TRACE") +) + +func main() { + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "usage: %s [options] \n", filepath.Base(os.Args[0])) + flag.PrintDefaults() + os.Exit(2) + } + flag.Parse() + if flag.NArg() != 1 { + flag.Usage() + } + if *loggingConfig != "" { + if err := loggo.ConfigureLoggers(*loggingConfig); err != nil { + fmt.Fprintf(os.Stderr, "cannot configure loggers: %v", err) + os.Exit(1) + } + } + if err := run(flag.Arg(0)); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} + +func run(confPath string) error { + logger.Infof("reading configuration") + conf, err := config.Read(confPath) + if err != nil { + return errgo.Notef(err, "cannot read config file %q", confPath) + } + + logger.Infof("connecting to mongo") + session, err := mgo.Dial(conf.MongoURL) + if err != nil { + return errgo.Notef(err, "cannot dial mongo at %q", conf.MongoURL) + } + defer session.Close() + db := session.DB("juju") + + logger.Infof("instantiating the store") + pool, err := charmstore.NewPool(db, nil, nil, charmstore.ServerParams{}) + if err != nil { + return errgo.Notef(err, "cannot create a new store") + } + store := pool.Store() + defer store.Close() + + logger.Infof("updating entities") + if err := update(store); err != nil { + return errgo.Notef(err, "cannot update entities") + } + + logger.Infof("done") + return nil +} + +func update(store *charmstore.Store) error { + entities := store.DB.Entities() + var entity mongodoc.Entity + iter := entities.Find(bson.D{{"blobhash256", ""}}).Select(bson.D{{"blobname", 1}}).Iter() + defer iter.Close() + + counter := 0 + for iter.Next(&entity) { + // Retrieve the archive contents. + r, _, err := store.BlobStore.Open(entity.BlobName) + if err != nil { + return errgo.Notef(err, "cannot open archive data for %s", entity.URL) + } + + // Calculate the contents hash. + hash := sha256.New() + if _, err = io.Copy(hash, r); err != nil { + r.Close() + return errgo.Notef(err, "cannot calculate archive sha256 for %s", entity.URL) + } + r.Close() + + // Update the entity document. + if err := entities.UpdateId(entity.URL, bson.D{{ + "$set", bson.D{{"blobhash256", fmt.Sprintf("%x", hash.Sum(nil))}}, + }}); err != nil { + return errgo.Notef(err, "cannot update entity id %s", entity.URL) + } + counter++ + if counter%100 == 0 { + logger.Infof("%d entities updated", counter) + } + + } + + if err := iter.Close(); err != nil { + return errgo.Notef(err, "cannot iterate entities") + } + logger.Infof("%d entities updated", counter) + return nil +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/cmd/essync' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/cmd/essync/main.go' --- src/gopkg.in/juju/charmstore.v5-unstable/cmd/essync/main.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/cmd/essync/main.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,84 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package main // import "gopkg.in/juju/charmstore.v5-unstable/cmd/essync" + +import ( + "flag" + "fmt" + "os" + "path/filepath" + + "github.com/juju/loggo" + "gopkg.in/errgo.v1" + "gopkg.in/mgo.v2" + + "gopkg.in/juju/charmstore.v5-unstable/config" + "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" +) + +var logger = loggo.GetLogger("essync") + +var ( + index = flag.String("index", "cs", "Name of index to populate.") + loggingConfig = flag.String("logging-config", "", "specify log levels for modules e.g. =TRACE") + mapping = flag.String("mapping", "", "No longer used.") + settings = flag.String("settings", "", "No longer used.") +) + +func main() { + flag.Usage = func() { + fmt.Fprintf(os.Stderr, "usage: %s [options] \n", filepath.Base(os.Args[0])) + flag.PrintDefaults() + os.Exit(2) + } + flag.Parse() + if flag.NArg() != 1 { + flag.Usage() + } + if *loggingConfig != "" { + if err := loggo.ConfigureLoggers(*loggingConfig); err != nil { + fmt.Fprintf(os.Stderr, "cannot configure loggers: %v", err) + os.Exit(1) + } + } + if err := populate(flag.Arg(0)); err != nil { + logger.Errorf("cannot populate elasticsearch: %v", err) + os.Exit(1) + } +} + +func populate(confPath string) error { + logger.Debugf("reading config file %q", confPath) + conf, err := config.Read(confPath) + if err != nil { + return errgo.Notef(err, "cannot read config file %q", confPath) + } + if conf.ESAddr == "" { + return errgo.Newf("no elasticsearch-addr specified in config file %q", confPath) + } + si := &charmstore.SearchIndex{ + Database: &elasticsearch.Database{ + conf.ESAddr, + }, + Index: *index, + } + session, err := mgo.Dial(conf.MongoURL) + if err != nil { + return errgo.Notef(err, "cannot dial mongo at %q", conf.MongoURL) + } + defer session.Close() + db := session.DB("juju") + + pool, err := charmstore.NewPool(db, si, nil, charmstore.ServerParams{}) + if err != nil { + return errgo.Notef(err, "cannot create a new store") + } + store := pool.Store() + defer store.Close() + if err := store.SynchroniseElasticsearch(); err != nil { + return errgo.Notef(err, "cannot synchronise elasticsearch") + } + return nil +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/config' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/config/config.go' --- src/gopkg.in/juju/charmstore.v5-unstable/config/config.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/config/config.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,103 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// The config package defines configuration parameters for +// the charm store. +package config // import "gopkg.in/juju/charmstore.v5-unstable/config" + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + "time" + + "gopkg.in/errgo.v1" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/yaml.v2" +) + +type Config struct { + // TODO(rog) rename this to MongoAddr - it's not a URL. + MongoURL string `yaml:"mongo-url"` + AuditLogFile string `yaml:"audit-log-file"` + AuditLogMaxSize int `yaml:"audit-log-max-size"` + AuditLogMaxAge int `yaml:"audit-log-max-age"` + APIAddr string `yaml:"api-addr"` + AuthUsername string `yaml:"auth-username"` + AuthPassword string `yaml:"auth-password"` + ESAddr string `yaml:"elasticsearch-addr"` // elasticsearch is optional + IdentityPublicKey *bakery.PublicKey `yaml:"identity-public-key"` + IdentityLocation string `yaml:"identity-location"` + TermsPublicKey *bakery.PublicKey `yaml:"terms-public-key"` + TermsLocation string `yaml:"terms-location"` + // The identity API is optional + IdentityAPIURL string `yaml:"identity-api-url"` + AgentUsername string `yaml:"agent-username"` + AgentKey *bakery.KeyPair `yaml:"agent-key"` + MaxMgoSessions int `yaml:"max-mgo-sessions"` + RequestTimeout DurationString `yaml:"request-timeout"` + StatsCacheMaxAge DurationString `yaml:"stats-cache-max-age"` + SearchCacheMaxAge DurationString `yaml:"search-cache-max-age"` +} + +func (c *Config) validate() error { + var missing []string + if c.MongoURL == "" { + missing = append(missing, "mongo-url") + } + if c.APIAddr == "" { + missing = append(missing, "api-addr") + } + if c.AuthUsername == "" { + missing = append(missing, "auth-username") + } + if strings.Contains(c.AuthUsername, ":") { + return fmt.Errorf("invalid user name %q (contains ':')", c.AuthUsername) + } + if c.AuthPassword == "" { + missing = append(missing, "auth-password") + } + if len(missing) != 0 { + return fmt.Errorf("missing fields %s in config file", strings.Join(missing, ", ")) + } + return nil +} + +// Read reads a charm store configuration file from the +// given path. +func Read(path string) (*Config, error) { + f, err := os.Open(path) + if err != nil { + return nil, errgo.Notef(err, "cannot open config file") + } + defer f.Close() + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, errgo.Notef(err, "cannot read %q", path) + } + var conf Config + err = yaml.Unmarshal(data, &conf) + if err != nil { + return nil, errgo.Notef(err, "cannot parse %q", path) + } + if err := conf.validate(); err != nil { + return nil, errgo.Mask(err) + } + return &conf, nil +} + +// DurationString holds a duration that marshals and +// unmarshals as a friendly string. +type DurationString struct { + time.Duration +} + +func (dp *DurationString) UnmarshalText(data []byte) error { + d, err := time.ParseDuration(string(data)) + if err != nil { + return errgo.Mask(err) + } + dp.Duration = d + return nil +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/config/config_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/config/config_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/config/config_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,120 @@ +// Copyright 2012, 2013, 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package config_test // import "gopkg.in/juju/charmstore.v5-unstable/config" + +import ( + "io/ioutil" + "path" + "testing" + "time" + + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/macaroon-bakery.v1/bakery" + + "gopkg.in/juju/charmstore.v5-unstable/config" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} + +type ConfigSuite struct { + jujutesting.IsolationSuite +} + +var _ = gc.Suite(&ConfigSuite{}) + +const testConfig = ` +audit-log-file: /var/log/charmstore/audit.log +audit-log-max-size: 500 +audit-log-max-age: 1 +mongo-url: localhost:23456 +api-addr: blah:2324 +foo: 1 +bar: false +auth-username: myuser +auth-password: mypasswd +identity-location: localhost:18082 +identity-public-key: +qNbDWly3kRTDVv2UN03hrv/CBt4W6nxY5dHdw+KJFA= +identity-api-url: "http://example.com/identity" +terms-public-key: +qNbDWly3kRTDVv2UN03hrv/CBt4W6nxY5dHdw+KJFB= +terms-location: localhost:8092 +agent-username: agentuser +agent-key: + private: lsvcDkapKoFxIyjX9/eQgb3s41KVwPMISFwAJdVCZ70= + public: +qNbDWly3kRTDVv2UN03hrv/CBt4W6nxY5dHdw+KJFA= +stats-cache-max-age: 1h +search-cache-max-age: 15m +request-timeout: 500ms +max-mgo-sessions: 10 +` + +func (s *ConfigSuite) readConfig(c *gc.C, content string) (*config.Config, error) { + // Write the configuration content to file. + path := path.Join(c.MkDir(), "charmd.conf") + err := ioutil.WriteFile(path, []byte(content), 0666) + c.Assert(err, gc.IsNil) + + // Read the configuration. + return config.Read(path) +} + +func (s *ConfigSuite) TestRead(c *gc.C) { + conf, err := s.readConfig(c, testConfig) + c.Assert(err, gc.IsNil) + c.Assert(conf, jc.DeepEquals, &config.Config{ + AuditLogFile: "/var/log/charmstore/audit.log", + AuditLogMaxAge: 1, + AuditLogMaxSize: 500, + MongoURL: "localhost:23456", + APIAddr: "blah:2324", + AuthUsername: "myuser", + AuthPassword: "mypasswd", + IdentityLocation: "localhost:18082", + IdentityPublicKey: &bakery.PublicKey{ + Key: mustParseKey("+qNbDWly3kRTDVv2UN03hrv/CBt4W6nxY5dHdw+KJFA="), + }, + IdentityAPIURL: "http://example.com/identity", + TermsLocation: "localhost:8092", + TermsPublicKey: &bakery.PublicKey{ + Key: mustParseKey("+qNbDWly3kRTDVv2UN03hrv/CBt4W6nxY5dHdw+KJFB="), + }, + AgentUsername: "agentuser", + AgentKey: &bakery.KeyPair{ + Public: bakery.PublicKey{ + Key: mustParseKey("+qNbDWly3kRTDVv2UN03hrv/CBt4W6nxY5dHdw+KJFA="), + }, + Private: bakery.PrivateKey{ + mustParseKey("lsvcDkapKoFxIyjX9/eQgb3s41KVwPMISFwAJdVCZ70="), + }, + }, + StatsCacheMaxAge: config.DurationString{time.Hour}, + RequestTimeout: config.DurationString{500 * time.Millisecond}, + MaxMgoSessions: 10, + SearchCacheMaxAge: config.DurationString{15 * time.Minute}, + }) +} + +func (s *ConfigSuite) TestReadConfigError(c *gc.C) { + cfg, err := config.Read(path.Join(c.MkDir(), "charmd.conf")) + c.Assert(err, gc.ErrorMatches, ".* no such file or directory") + c.Assert(cfg, gc.IsNil) +} + +func (s *ConfigSuite) TestValidateConfigError(c *gc.C) { + cfg, err := s.readConfig(c, "") + c.Assert(err, gc.ErrorMatches, "missing fields mongo-url, api-addr, auth-username, auth-password in config file") + c.Assert(cfg, gc.IsNil) +} + +func mustParseKey(s string) bakery.Key { + var k bakery.Key + err := k.UnmarshalText([]byte(s)) + if err != nil { + panic(err) + } + return k +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/dependencies.tsv' --- src/gopkg.in/juju/charmstore.v5-unstable/dependencies.tsv 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/dependencies.tsv 2016-03-22 15:18:22 +0000 @@ -0,0 +1,30 @@ +github.com/ajstarks/svgo git 89e3ac64b5b3e403a5e7c35ea4f98d45db7b4518 2014-10-04T21:11:59Z +github.com/juju/blobstore git 3e9b30af648f96e85d8f41f946ae4a1ce0ce588b 2015-06-11T10:42:44Z +github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z +github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z +github.com/juju/gojsonreference git f0d24ac5ee330baa21721cdff56d45e4ee42628e 2015-02-04T19:46:33Z +github.com/juju/gojsonschema git e1ad140384f254c82f89450d9a7c8dd38a632838 2015-03-12T17:00:16Z +github.com/juju/httpprof git 14bf14c307672fd2456bdbf35d19cf0ccd3cf565 2014-12-17T16:00:36Z +github.com/juju/httprequest git 1015665b66c26101695f2f51407b3b1e000176fd 2015-10-07T14:02:54Z +github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z +github.com/juju/names git e287fe4ae0dbda220cace3ed0e35cda4796c1aa3 2015-10-22T17:21:35Z +github.com/juju/schema git afe1151cb49d1d7ed3c75592dfc6f38703f2e988 2015-08-07T07:58:08Z +github.com/juju/testing git ad6f815f49f8209a27a3b7efb6d44876493e5939 2015-10-12T16:09:06Z +github.com/juju/txn git 99ec629d0066a4d73c54d8e021a7fc1dc07df614 2015-06-09T16:58:27Z +github.com/juju/utils git f2db28cef935aba0a7207254fa5dba273e649d0e 2015-11-09T11:51:43Z +github.com/juju/xml git eb759a627588d35166bc505fceb51b88500e291e 2015-04-13T13:11:21Z +github.com/julienschmidt/httprouter git 109e267447e95ad1bb48b758e40dd7453eb7b039 2015-09-05T17:25:33Z +golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z +golang.org/x/net git ea47fc708ee3e20177f3ca3716217c4ab75942cb 2015-08-29T23:03:18Z +gopkg.in/check.v1 git b3d3430320d4260e5fea99841af984b3badcea63 2015-06-26T10:50:28Z +gopkg.in/errgo.v1 git 66cb46252b94c1f3d65646f54ee8043ab38d766c 2015-10-07T15:31:57Z +gopkg.in/juju/charm.v6-unstable git a3d228ef5292531219d17d47679b260580fba1a8 2015-11-19T07:39:58Z +gopkg.in/juju/charmrepo.v2-unstable git b17697d8bb60cdac7d8ffd61e1357c9977cc2096 2015-11-30T13:55:09Z +gopkg.in/juju/jujusvg.v1 git 2c97ff517dee12dc48bb3c2d2b113e5045a75b71 2015-11-19T14:54:17Z +gopkg.in/macaroon-bakery.v1 git 7b63aca524cc3f7b1ad0171e54cb78b33ce1e747 2015-12-01T10:11:23Z +gopkg.in/macaroon.v1 git ab3940c6c16510a850e1c2dd628b919f0f3f1464 2015-01-21T11:42:31Z +gopkg.in/mgo.v2 git 4d04138ffef2791c479c0c8bbffc30b34081b8d9 2015-10-26T16:34:53Z +gopkg.in/natefinch/lumberjack.v2 git 588a21fb0fa0ebdfde42670fa214576b6f0f22df 2015-05-21T01:59:18Z +gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z +gopkg.in/yaml.v2 git 7ad95dd0798a40da1ccdff6dff35fd177b5edf40 2015-06-24T10:29:02Z +launchpad.net/tomb bzr gustavo@niemeyer.net-20140529072043-hzcrlnl3ygvg914q 18 === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/docs' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/docs/API.md' --- src/gopkg.in/juju/charmstore.v5-unstable/docs/API.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/docs/API.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2218 @@ +# Charm store API + +The current live API lives at https://api.jujucharms.com/charmstore/v4 + +## Intro + +The charm store stores and indexes charms and bundles. A charm or bundle is +referred to by a charm store id which can take one of the following two forms: + +* ~*owner*/*series*/*name*(-*revision*) +* *series*/*name*(-*revision*) + +*Owner* is the name of the user that owns the charm. +*Series* is one of a small number of known possible series for charms +(currently just the Ubuntu series names) or the special name bundle to signify +that the charm id refers to a charm bundle. + +A charm store id referring to a charm (not a bundle) can also use one of the +following two forms, omitting the series: + +* ~*owner*/*name*(-*revision*) +* *name*(-*revision*) + +In this case the store will look at all charms with the same *owner* and +*name*, and choose one according to its preference (for example, it currently +prefers the latest LTS series). + +### Data format + +All endpoints that do not produce binary data produce a single JSON object as +their result. These will be described in terms of the Go types that produce and +consume the format, along with an example. A charm id is represented as a +`charm.URL type`. + + +### Errors + +If any request returns an error, it will produce it in the following form: + +```go +type Error struct { + Message string + Code string + Info map[string] Error `json:",omitempty"` +} +``` + +Example: + +```json +{ + "Message": "unexpected Content-Type \"image/jpeg\"; expected \"application/json\"", + "Code": "bad request" +} +``` + +Note: this format is compatible with the error results used by juju-core. +Currently defined codes are the following: + +* not found +* metadata not found +* forbidden +* bad request +* duplicate upload +* multiple errors +* unauthorized +* method not allowed + +The `Info` field is set when a request returns a "multiple errors" error code; +currently the only two endpoints that can are "/meta" and "*id*/meta/any". +Each element in `Info` corresponds to an element in the PUT request, and holds +the error for that element. See those endpoints for examples. + +### Bulk requests and missing metadata + +There are two forms of "bulk" API request that can return information about +several items at once. The `/meta/any` endpoint (along with some others) have a +set of "include" flags that specify metadata to return. The `/meta` endpoint +has a set of "id" flags that specify a set of ids to return data on. + +In both of these cases, when the relevant data does not exist, the result will +be omitted from the returned map. For example a GET of +`/meta/archive-size?id=something` will return an empty map if the id +"something" is not found; a GET of +`/precise/wordpress-34/meta/any?include=bundle-metadata` will return an empty +map if the id "precise/wordpress-34" refers to a bundle rather than a charm. + +For the singular forms of these endpoints, a 404 "metadata not found" error +will be returned when this happens. + +In the `meta/any` GET bulk request, if some data requires authorization, the +default behavior is to return an authorization required response. Clients +interested in public data only can include a `ignore-auth=1` query so that only +public information is returned. In this case, results requiring authorization +(if any) will be omitted. + +### Versioning + +The version of the API is indicated by an initial "vN" prefix to the path. +Later versions will increment this number. This also means we can potentially +serve backwardly compatible paths to juju-core. All paths in this document +should be read as if they had a "v4" prefix. For example, the +`wordpress/meta/charm-metadata` path is actually at +`v4/wordpress/meta/charm-metadata`. + + +### Boolean values + +Where a flag specifies a boolean property, the value must be either "1", +signifying true, or empty or "0", signifying false. + +## Requests + +### Expand-id + +#### GET *id*/expand-id + +The expand-id path expands a general id into a set of specific ids. It strips +any revision number and series from id, and returns a slice of all the possible +ids matched by that, including all the versions and series. +If *id* is in the development channel, all development and non-development +revisions will be returned; if it is not, then only non-development +revisions will be returned. + +```go +[]Id + +type Id struct { + Id string +} +``` + +Example: `GET wordpress/expand-id` + +```json +[ + {"Id": "trusty/wordpress-2"} + {"Id": "trusty/wordpress-1"}, + {"Id": "precise/wordpress-2"}, + {"Id": "precise/wordpress-1"}, +] +``` + +Example: `GET precise/wordpress-34/expand-id` + +```json +[ + {"Id": "trusty/wordpress-2"} + {"Id": "trusty/wordpress-1"}, + {"Id": "precise/wordpress-2"}, + {"Id": "precise/wordpress-1"}, +] +``` + +Example: `GET development/precise/wordpress-34/expand-id` + +```json +[ + {"Id": "development/trusty/wordpress-3"}, + {"Id": "trusty/wordpress-2"}, + {"Id": "trusty/wordpress-1"}, + {"Id": "precise/wordpress-2"}, + {"Id": "precise/wordpress-1"}, + ] +``` + + +### Archive + +#### GET *id*/archive + +The `/archive` path returns the raw archive zip file for the charm with the +given charm id. The response header includes the SHA 384 hash of the archive +(Content-Sha384) and the fully qualified entity id (Entity-Id). + +Example: `GET wordpress/archive` + +Any additional elements attached to the `/charm` path retrieve the file from +the charm or bundle's zip file. The `Content-Sha384` header field in the +response will hold the hash checksum of the archive. + +#### GET *id*/archive/*path* + +Retrieve a file corresponding to *path* in the charm or bundle's zip archive. + +Example: `GET trusty/wordpress/archive/config.yaml` + +#### POST *id*/archive + +This uploads the given charm or bundle in zip format. + +
+POST id/archive?hash=sha384hash
+
+ +The id specified must specify the series and must not contain a revision +number. The hash flag must specify the SHA384 hash of the uploaded archive in +hexadecimal format. If the same content has already been uploaded, the response +will return immediately without reading the entire body. + +The charm or bundle is verified before being made available. + +The response holds the full charm/bundle id including the revision number. + +```go +type UploadedId struct { + Id string +} +``` + +Example response body: + +```json +{ + "Id": "precise/wordpress-24" +} +``` + +#### DELETE *id*/archive + +This deletes the given charm or bundle with the given id. If the ID is not +fully specified, the charm series or revisions are not resolved and the charm +is not deleted. In order to delete the charm, the ID must include series as +well as revisions. In order to delete all versions of the charm, use +`/expand-id` and iterate on all elements in the result. + +### Visual diagram + +#### GET *id*/diagram.svg + +This returns a scalable vector-graphics image representing the entity with the +given id. This will return a not-found error for charms. + +#### GET *id*/icon.svg + +This returns the SVG image of the charm's icon. This reports a not-found error +for bundles. Unlike the `archive/icon.svg` where 404 is returned in case an +icon does not exist, this endpoint returns the default icon. + +#### GET *id*/readme + +This returns the README. + +### Promulgation + +#### PUT *id*/promulgate + +A PUT to ~*user*/*anyseries*/*name*-*anyrevision* sets whether entities +with the id *x*/*name* are considered to be aliases +for ~*user*/*x*/*name* for all series *x*. The series +and revision in the id are ignored (except that an +entity must exist that matches the id). + +If Promulgate is true, it means that any new charms published +to ~*user*/*x*/*name* will also be given the alias +*x*/*name*. The latest revision for all ids ~*user*/*anyseries*/*name* +will also be aliased likewise. + +If Promulgate is false, any new charms published +to ~*user*/*anyseries*/*name* will not be given a promulgated +alias, but no change is made to any existing aliases. + +The promulgated status can be retrieved from the +promulgated meta endpoint. + +```go +type PromulgateRequest struct { + Promulgate bool +} +``` + +Example: `PUT ~charmers/precise/wordpress-23/promulgate` + +Request body: +```json +{ + "Promulgate" : true, +} +``` + +### Charm and bundle publishing + +#### PUT *id*/publish + +A PUT to ~*user*/*anyseries*/*name*-*anyrevision* sets whether the +corresponding charm or bundle is published and can be accessed through a URL +with no channel. If the revision number is not specified, the id is resolved to +the charm or bundle with the latest development revision number when +publishing, and to the charm or bundle with the latest non-development revision +number when unpublishing. The id must not include the development channel. + +```go +type PublishRequest struct { + Published bool +} +``` + +If Published is true, the charm or bundle is made available at the +non-development URL with the same revision number. If Published is false, the +id is unpublished. + +The response includes the id and promulgated id of the entity after the action +is performed: + +```go +type PublishResponse struct { + Id *charm.URL + PromulgatedId *charm.URL `json:",omitempty"` +} +``` + +If the charm or bundle have been unpublished, the identifiers in the response +will represent the corresponding development charm or bundle. + +Example: `PUT ~charmers/trusty/django-42/publish` + +Request body: +```json +{ + "Published" : true, +} +``` + +Response body: +```json +{ + "Id" : "cs:~charmers/trusty/django-42", + "PromulgatedId": "cs:trusty/django-10", +} +``` + +### Stats + +#### GET stats/counter/... + +This endpoint can be used to retrieve stats related to entities. + +
+GET stats/counter/key[:key]...?[by=unit]&start=date][&stop=date][&list=1]
+
+ +The stats path allows the retrieval of counts of operations in a general way. A +statistic is composed of an ordered tuple of keys: + +
+kind:series:name:user
+
+Operations on the store increment counts associated with a specific tuple, +determined by the operation and the charm being operated on. + +When querying statistics, it is possible to aggregate statistics by using a +`\*` as the last tuple element, standing for all tuples with the given prefix. +For example, `missing:\*` will retrieve the counts for all operations of kind +"missing", regardless of the series, name or user. + +If the list flag is specified, counts for all next level keys will be listed. + For example, a query for `stats/counter/download:*?list=1&by=week` will show + all the download counts for each series for each week. + +If a date range is specified, the returned counts will be restricted to the +given date range. Dates are specified in the form "yyyy-mm-dd". If the `by` +flag is specified, one count is shown for each unit in the specified period, +where unit can be `week` or `day`. + +Possible kinds are: + +* archive-download +* archive-delete +* archive-upload +* archive-failed-upload + +```go +[]Statistic + +type Statistic struct { + Key string `json:",omitempty"` + Date string `json:",omitempty"` + Count int64 +} +``` + +Example: `GET "stats/counter/missing:trusty:*"` + +```json +[ + {"Count": 1917} +] +``` + +Example: +`GET stats/counter/download/archive-download:*?by=week&list=1&start=2014-03-01` + +```json +[ + { + "Key": "charm-bundle:precise:*", + "Date": "2014-06-08", + "Count": 2715 + }, { + "Key": "charm-bundle:trusty:*", + "Date": "2014-06-08", + "Count": 2672 + }, { + "Key": "charm-bundle:oneiric:*", + "Date": "2014-06-08", + "Count": 14 + }, { + "Key": "charm-bundle:quantal:*", + "Date": "2014-06-08", + "Count": 1 + }, { + "Key": "charm-bundle:trusty:*", + "Date": "2014-06-15", + "Count": 3835 + }, { + "Key": "charm-bundle:precise:*", + "Date": "2014-06-15", + "Count": 3389 + } +] +``` + +**Update**: +We need to provide aggregated stats for downloads: +* promulgated and ~user counterpart charms should have the same download stats. + +#### PUT stats/update + +This endpoint can be used to increase the stats related to an entity. +This will increase the download stats by one for the entity provided and at the time stamp provided. +It can for future purpose include the client issuing the requests. +This is used when charmstore is in front of a cache server that will not call the real /archive endpoint and +as such will not increase the download counts. + +
+PUT stats/counter
+
+ +Request body: +```go +type StatsUpdateRequest struct { + Timestamp time.Time + Type string + CharmReference *charm.URL +} +``` +Example: `PUT stats/update` + + +Request body: +```json +{ + "Timestamp":"2015-08-06T06:46:13Z", + "Type":"deploy", + "CharmReference":"cs:~charmers/utopic/wordpress-42" +} +``` + +### Meta + +#### GET meta + +The meta path returns an array of all the path names under meta, excluding the +`meta/any` path, as suitable for passing as "include=" flags to paths that +allow those. Note that the result does not include sub-paths of extra-info +because these vary according to each charm or bundle. + +Example: `GET /meta` + +```json +[ + "archive-size", + "archive-upload-time", + "bundle-machine-count", + "bundle-metadata", + "bundle-unit-count", + "bundles-containing", + "charm-actions", + "charm-config", + "charm-metadata", + "charm-related", + "extra-info", + "hash", + "hash256", + "id", + "id-name", + "id-revision", + "id-series", + "id-user", + "manifest", + "promulgated", + "revision-info", + "stats", + "supported-series", + "tags" +] +``` + +#### GET meta/*endpoint* + +This endpoint allows a user to query any number of IDs for metadata. +
+GET meta/endpoint?id=id0[&id=id1...][otherflags]
+
+ +This call is equivalent to calling "*id*/meta" for each id separately. The +result holds an element for each id in the request with the resulting metadata +exactly as returned by "GET *id*/meta/*endpoint*[?*otherflags*]". The map keys +are the ids exactly as specified in the request, although they are resolved to +fill in series and revision as usual when fetching the metadata. Any ids that +are not found, or with non-relevant metadata, will be omitted. + +```go +map[string] interface{} +``` + +Example: `GET meta/archive-size?id=wordpress&id=mysql` + +```json +{ + "wordpress": { + "Size": 1234 + }, + "mysql" : { + "Size": 4321 + } +} +``` + +Example: `GET /meta/any?include=archive-size&include=extra-info/featured&id=wordpress&id=mysql` + +```json +{ + "wordpress": { + "Id": "precise/wordpress-3", + "Meta": { + "archive-size": { + "Size": 1234 + }, + "extra-info/featured": true + } + }, + "mysql" : { + "Id": "precise/mysql-23", + "Meta": { + "archive-size": { + "Size": 4321 + }, + "extra-info/featured": true + } + } +} +``` + +#### PUT meta/*endpoint* + +A PUT to this endpoint allows the metadata endpoint of several ids to be +updated. The request body is as specified in the result of the above GET +request. The ids in the body specify the ids that will be updated. If there is +a failure, the error code will be "multiple errors", and the Info field will +holds one entry for each id in the request body that failed, holding the error +for that id. If there are no errors, PUT endpoints usually return an empty body +in the response. + +Example: `PUT meta/extra-info/featured` + +Request body: +```json +{ + "precise/wordpress-23" : true, + "precise/mysql-53" : true, + "precise/wordpress-22" : false, +} +``` + +Example: `PUT meta/any` + +Request body: +```json +{ + "precise/wordpress-23": { + "Meta": { + "extra-info/featured": true, + "extra-info/revision-info": "12dfede4ee23", + "bad-metaname": 3235 + } + }, + "trusty/mysql-23": { + "Meta": { + "extra-info/featured": false, + } + } +} +``` + +Response body (with HTTP status 500): +```json +{ + "Message": "multiple errors (1) found", + "Code": "multiple errors", + "Info": { + "precise/wordpress-23": { + "Message": "multiple errors", + "Code": "multiple errors", + "Info": { + "bad-metaname": { + "Message": "metadata not found", + "Code": "not found" + } + } + } + } +} +``` + +If the request succeeds, a 200 OK status code is returned with an empty +response body. + +#### GET *id*/meta + +This path returns the same information as the meta path. The results are the +same regardless of the actual id. + +Example: `GET foo/meta` + +```json +[ + "archive-size", + "archive-upload-time", + "bundle-machine-count", + "bundle-metadata", + "bundle-unit-count", + "bundles-containing", + "charm-actions", + "charm-config", + "charm-metadata", + "charm-related", + "extra-info", + "id", + "id-name", + "id-revision", + "id-series", + "id-user", + "manifest", + "promulgated", + "revision-info", + "stats", + "tags" +] +``` + +#### GET *id*/meta/any + +
+GET id/meta/any?[include=meta[&include=meta...]]
+
+ +The `meta/any` path returns requested metadata information on the given id. If +the id is non-specific, the latest revision and preferred series for the id +will be assumed. + +Other metadata can be requested by specifying one or more `include` flags. The +value of each meta must be the name of one of the path elements defined under +the `/meta` path (for example: `charm-config`, `charm-meta`, `manifest`) and +causes the desired metadata to be included in the Meta field, keyed by meta. If +there is no metadata for the given meta path, the element will be omitted (for +example, if bundle-specific data is requested for a charm id). + +The `any` path may not itself be the subject of an include directive. It is +allowed to specify "charm-" or "bundle-"" specific metadata paths -- if the id +refers to a charm then bundle-specific metadata will be omitted and vice versa. + +Various other paths use the same `include` mechanism to allow retrieval of +arbitrary metadata. + +```go +type Meta struct { + Id string `json:",omitempty"` + Meta map[string] interface{} `json:",omitempty"` +} +``` + +Example: `GET wordpress/meta/any` + +```json +{ + "Id": "trusty/wordpress-32" +} +``` + +Example: `GET ubuntu/meta/any?include=archive-size&include=extra-info/featured` + +```json +{ + "Id": "trusty/ubuntu-3", + "Meta": { + "archive-size": { + "Size": 7580 + }, + "extra-info/featured": true + } +} +``` + +#### PUT *id*/meta/any + +This endpoint allows the updating of several metadata elements at once. These +must support PUT requests. The body of the PUT request is in the same form as +returned by the above GET request, except with the Id field omitted. The +elements inside the Meta field specify which meta endpoints will be updated. If +one or more of the update fails, the resulting error will contain an Info field +that has an entry for each update that fails, keyed by the endpoint name. + +Example: `PUT ubuntu/meta/any` + +Request body: +```json +{ + "Meta": { + "extra-info": { + "revision-info": "a46f45649f0d0e0b" + }, + "extra-info/featured": true + } +} +``` + +Example: `PUT ubuntu/meta/any` + +Request body: +```json +{ + "Meta": { + "extra-info/featured": false, + "archive-size": 12354, + } +} +``` + +Response body: +```json +{ + "Message": "multiple errors", + "Code": "multiple errors", + "Info": { + "archive-size": { + "Message": "method not allowed", + "Code": "bad request", + } + } +} +``` + +#### GET *id*/meta/charm-metadata + +The `/meta/charm.metadata` path returns the contents of the charm metadata file +for a charm. The id must refer to a charm, not a bundle. + +```go +type CharmMetadata struct { + Summary string + Description string + Subordinate bool `json:",omitempty"` + // Provides and Requires map from the relation name to + // information about the relation. + Provides map[string]Relation `json:",omitempty"` + Requires map[string]Relation `json:",omitempty"` + Peers map[string]Relation `json:",omitempty"` + Tags []string `json:",omitempty"` +} + +type Relation struct { + Interface string + Optional bool `json:",omitempty"` + Limit int `json:",omitempty"` + Scope RelationScope +} + +type RelationRole string +type RelationScope string +``` + +The possible values of a `RelationScope` are + +* global +* container + +Example: `GET wordpress/meta/charm-metadata` + +```json +{ + "Summary": "WordPress is a full featured web blogging tool, this charm deploys it.", + "Description": "This will install and setup WordPress optimized to run in the cloud. This install, in particular, will \n place Ngnix and php-fpm configured to scale horizontally with Nginx's reverse proxy\n", + "Provides": { + "website": { + "Interface": "http", + "Scope": "global" + } + }, + "Requires": { + "cache": { + "Interface": "cache", + "Scope": "global" + }, + "db": { + "Interface": "db", + "Scope": "global" + } + }, + "Peers": { + "loadbalancer": { + "Interface": "reversenginx", + "Scope": "global" + } + }, + "Tags": [ + "applications" + ] +} +``` + +#### GET *id*/meta/bundle-metadata + +The `meta/bundle-metadata` path returns the contents of the bundle metadata +file for a bundle. The id must refer to a bundle, not a charm. + +```go +type BundleData struct { + Services map[string] ServiceSpec + Machines map[string] MachineSpec `json:",omitempty"` + Series string `json:",omitempty"` + Relations [][]string `json:",omitempty"` +} + +type MachineSpec struct { + Constraints string `json:",omitempty"` + Annotations map[string]string `json:",omitempty"` +} + +type ServiceSpec struct { + Charm string + NumUnits int + To []string `json:",omitempty"` + + // Options holds the configuration values + // to apply to the new service. They should + // be compatible with the charm configuration. + Options map[string]interface{} `json:",omitempty"` + Annotations map[string]string `json:",omitempty"` + Constraints string `json:",omitempty"` +} +``` + +Example: `GET mediawiki/meta/bundle-metadata` + +```json +{ + "Services": { + "mediawiki": { + "Charm": "cs:precise/mediawiki-10", + "NumUnits": 1, + "Options": { + "debug": false, + "name": "Please set name of wiki", + "skin": "vector" + }, + "Annotations": { + "gui-x": "619", + "gui-y": "-128" + } + }, + "memcached": { + "Charm": "cs:precise/memcached-7", + "NumUnits": 1, + "Options": { + "connection_limit": "global", + "factor": 1.25 + }, + "Annotations": { + "gui-x": "926", + "gui-y": "-125" + } + } + }, + "Relations": [ + [ + "mediawiki:cache", + "memcached:cache" + ] + ] +} +``` + +#### GET *id*/meta/bundle-unit-count + +The `meta/bundle-unit-count` path returns a count of all the units that will be +created by a bundle. The id must refer to a bundle, not a charm. + +```go +type BundleCount struct { + Count int +} +``` + +Example: `GET bundle/mediawiki/meta/bundle-unit-count` + +```json +{ + "Count": 1 +} +``` + +#### GET *id*/meta/bundle-machine-count + +The `meta/bundle-machine-count` path returns a count of all the machines used +by a bundle. The id must refer to a bundle, not a charm. + +```go +type BundleCount struct { + Count int +} +``` + +Example: `GET bundle/mediawiki/meta/bundle-machine-count` + +```json +{ + "Count": 2 +} +``` + +#### GET *id*/meta/manifest + +The `meta/manifest` path returns the list of all files in the bundle or charm's +archive. + +```go +[]ManifestFile +type ManifestFile struct { + Name string + Size int64 +} +``` + +Example: `GET trusty/juju-gui-3/meta/manifest` + +```json +[ + { + "Name": "config.yaml", + "Size": 8254 + }, + { + "Name": "HACKING.md", + "Size": 11376 + }, + { + "Name": "Makefile", + "Size": 3304 + }, + { + "Name": "metadata.yaml", + "Size": 1110 + }, + { + "Name": "README.md", + "Size": 9243 + }, + { + "Name": "hooks/config-changed", + "Size": 1636 + }, + { + "Name": "hooks/install", + "Size": 3055 + }, + { + "Name": "hooks/start", + "Size": 1101 + }, + { + "Name": "hooks/stop", + "Size": 1053 + } +] +``` + +#### GET *id*/meta/charm-actions + + +The `meta/charm-actions` path returns the actions available in a charm as +stored in its `actions.yaml` file. Id must refer to a charm, not a bundle. + +```go +type Actions struct { + Actions map[string]ActionSpec `json:",omitempty"` +} + +type ActionSpec struct { + Description string + Params JSONSchema +} +``` + +The Params field holds a JSON schema specification of an action's parameters. +See [http://json-schema.org/latest/json-schema-core.html](http://json-schema.org/latest/json-schema-core.html). + +Example: `GET wordpress/meta/charm-actions` + +```json +{ + "Actions": { + "backup": { + "Description": "back up the charm", + "Params": { + "properties": { + "destination-host": { + "type": "string" + }, + "destination-name": { + "type": "string" + } + }, + "required": [ + "destination-host" + ], + "type": "object" + } + } + } +} +``` + +#### GET *id*/meta/charm-config + +The `meta/charm-config` path returns the charm's configuration specification as +stored in its `config.yaml` file. Id must refer to a charm, not a bundle. + +```go +type Config struct { + Options map[string] Option +} + +// Option represents a single charm config option. +type Option struct { + Type string + Description string + Default interface{} +} +``` + +Example: `GET trusty/juju-gui-3/meta/charm-config` + +```json +{ + "Options": { + "builtin-server": { + "Type": "boolean", + "Description": "Enable the built-in server.", + "Default": true + }, + "login-help": { + "Type": "string", + "Description": "The help text shown to the user.", + "Default": null + }, + "read-only": { + "Type": "boolean", + "Description": "Enable read-only mode.", + "Default": false + } + } +} +``` + +#### GET *id*/meta/archive-size + +The `meta/archive-size` path returns the archive size, in bytes, of the archive +of the given charm or bundle id. + +```go +type ArchiveSize struct { + Size int64 +} +``` + +Example: `GET wordpress/meta/archive-size` + +```json +{ + "Size": 4747 +} +``` + +#### GET *id*/meta/hash + +This path returns the SHA384 hash sum of the archive of the given charm or +bundle id. + +```go +type HashResponse struct { + Sum string +} +``` + +Example: `GET wordpress/meta/hash` + +Response body: +```json +{ + "Sum": "0a410321586d244d3981e2b23a27a7e86ebdcab8bd0ca8f818d3f4c34b2ea2791e0dbdc949f70b283a3f5efdf908abf1" +} +``` + +#### GET *id*/meta/hash256 + +This path returns the SHA256 hash sum of the archive of the given charm or +bundle id. + +```go +type HashResponse struct { + Sum string +} +``` + +Example: `GET wordpress/meta/hash256` + +Response body: +```json +{ + "Sum": "9ab5036cc18ba61a9d25fad389e46b3d407fc02c3eba917fe5f18fdf51ee6924" +} +``` + +#### GET *id*/meta/supported-series + +This path returns the set of series supported by the given +charm. This endpoint is appropriate for charms only. + +```go +type SupportedSeriesResponse struct { + SupportedSeries []string +} +``` + +Example: `GET precise/wordpress/meta/supported-series` + +Response body: +```json +{ + "SupportedSeries": ["precise"] +} +``` + +#### GET *id*/meta/bundles-containing + +The `meta/bundles-containing` path returns information on the last revision of +any bundles that contain the charm with the given id. + +
+GET id/meta/bundles-containing[?include=meta[&include=meta...]]
+
+ +The Meta field is populated with information on the returned bundles according +to the include flags - see the `meta/any` path for more info on how to use the +`include` flag. The only values that are valid for `any-series`, `any-revision` +or `all-results` flags are 0, 1 and empty. If `all-results` is enabled, all the +bundle revisions are returned, not just the last one. The API should validate +that and return bad request if any other value is provided. + +```go +[]Bundle +type Bundle struct { + Id string + Meta map[string]interface{} `json:",omitempty"` +} +``` + +Example: `GET mysql/meta/bundles-containing?include=featured` might return: + +```json +[ + { + "Id": "bundle/mysql-scalable", + "Meta": { + "featured": { + "Featured": false + } + } + }, + { + "Id": "bundle/wordpress-simple", + "Meta": { + "featured": { + "Featured": true + } + } + } +] +``` + +#### GET *id*/meta/extra-info + +The meta/extra-info path reports any additional metadata recorded for the +charm. This contains only information stored by clients - the API server itself +does not populate any fields. The resulting object holds an entry for each +piece of metadata recorded with a PUT to `meta/extra-info`. + +```go +type ExtraInfo struct { + Values map[string] interface{} +} +``` + +Example: `GET wordpress/meta/extra-info` + +```json +{ + "featured": true, + "vcs-digest": "4b6b3c7d795eb66ca5f82bc52c01eb57ab595ab2" +} +``` + +#### GET *id*/meta/extra-info/*key* + +This path returns the contents of the given `extra-info` key. The result is +exactly the JSON value stored as a result of the PUT request to `extra-info` or +`extra-info/key`. + +Example: `GET wordpress/meta/extra-info/featured` + +```json +true +``` + +#### PUT *id*/meta/extra-info + +This request updates the value of any metadata values. Any values that are not +mentioned in the request are left untouched. Any fields with null values are +deleted. + +Example: `PUT precise/wordpress-32/meta/extra-info` + +Request body: +```json +{ + "vcs-digest": "7d6a853c7bb102d90027b6add67b15834d815e08", +} +``` + +#### PUT *id*/meta/extra-info/*key* + +This request creates or updates the value for a specific key. +If the value is null, the key is deleted. + +Example: `PUT precise/wordpress-32/meta/extra-info/vcs-digest` + +Request body: + +```json +"7d6a853c7bb102d90027b6add67b15834d815e08", +``` + +The above example is equivalent to the `meta/extra-info` example above. + +#### GET *id*/meta/charm-related + +The `meta/charm-related` path returns all charms that are related to the given +charm id, which must not refer to a bundle. It is possible to include +additional metadata for charms by using the `include` query: + +
+GET id/meta/charm-related[?include=meta[&include=meta...]]
+
+ +```go +type Related struct { + // Requires holds an entry for each interface provided by + // the charm, containing all charms that require that interface. + Requires map[string] []Item `json:",omitempty"` + + + // Provides holds an entry for each interface required by the + // the charm, containing all charms that provide that interface. + Provides map[string] []Item `json:",omitempty"` +} + + +type Item struct { + Id string + Meta map[string] interface{} `json:",omitempty"` +} +``` + +The Meta field is populated according to the include flags - see the `meta` +path for more info on how to use this. + +Example: `GET wordpress/meta/charm-related` + +```json +{ + "Requires": { + "memcache": [ + {"Id": "precise/memcached-13"} + ], + "db": [ + {"Id": "precise/mysql-46"}, + {"Id": "~clint-fewbar/precise/galera-42"} + ] + }, + "Provides": { + "http": [ + {"Id": "precise/apache2-24"}, + {"Id": "precise/haproxy-31"}, + {"Id": "precise/squid-reverseproxy-8"} + ] + } +} +``` + +Example: `GET trusty/juju-gui-3/meta/charm-related?include=charm-config` + +```json +{ + "Provides": { + "http": [ + { + "Id": "precise/apache2-24", + "Meta": { + "charm-config": { + "Options": { + "logrotate_count": { + "Type": "int", + "Description": "The number of days", + "Default": 365 + } + } + } + } + } + ], + "nrpe-external-master": [ + { + "Id": "precise/nova-compute-31", + "Meta": { + "charm-config": { + "Options": { + "bridge-interface": { + "Type": "string", + "Description": "Bridge interface", + "Default": "br100" + }, + "bridge-ip": { + "Type": "string", + "Description": "IP to be assigned to bridge", + "Default": "11.0.0.1" + } + } + } + } + } + ] + } +} +``` + +#### GET *id*/meta/archive-upload-time + +The `meta/archive-upload-time` path returns the time the archives for the given +*id* was uploaded. The time is formatted according to RFC3339. + +```go +type ArchiveUploadTimeResponse struct { + UploadTime time.Time +} +``` + +Example: `GET trusty/wordpress-42/meta/archive-upload-time` + +```json +{ + "UploadTime": "2014-07-04T13:53:57.403506102Z" +} +``` + +#### GET *id*/meta/promulgated + +The `promulgated` path reports whether the entity with the given ID is promulgated. +Promulgated charms do not require the user portion of the ID to be specified. + +```go +type PromulgatedResponse struct { + Promulgated bool +} +``` + +Example: `GET trusty/wordpress-42/meta/promulgated` + +```json +{ + "Promulgated": true +} +``` + +#### GET *id*/meta/stats + +
+GET id/meta/stats?[refresh=0|1]
+
+ +Many clients will need to use stats to determine the best result. Details for a +charm/bundle might require the stats as important information to users. +Currently we track deployment stats only. We intend to open this up to +additional data. The response includes downloads count for both the specific +requested entity revision and for all the revisions, and it is structured as +below: + +```go +// StatsResponse holds the result of an id/meta/stats GET request. +type StatsResponse struct { + // ArchiveDownloadCount is superceded by ArchiveDownload but maintained for + // backward compatibility. + ArchiveDownloadCount int64 + // ArchiveDownload holds the downloads count for a specific revision of the + // entity. + ArchiveDownload StatsCount + // ArchiveDownloadAllRevisions holds the downloads count for all revisions + // of the entity. + ArchiveDownloadAllRevisions StatsCount +} + +// StatsCount holds stats counts and is used as part of StatsResponse. +type StatsCount struct { + Total int64 // Total count over all time. + Day int64 // Count over the last day. + Week int64 // Count over the last week. + Month int64 // Count over the last month. +} +``` + +If the refresh boolean parameter is non-zero, the latest stats will be returned without caching. + +#### GET *id*/meta/tags + +The `tags` path returns any tags that are associated with the entity. + +Example: `GET trusty/wordpress-42/meta/tags` + +```json +{ + "Tags": [ + "blog", + "cms" + ] +} +``` + +#### GET *id*/meta/revision-info + +The `revision-info` path returns information about other available revisions of +the charm id that the charm store knows about. It will include both older and +newer revisions. The fully qualified ids of those charms will be returned in an +ordered list from newest to oldest revision. Note that the current revision +will be included in the list as it is also an available revision. + +```go +type RevisionInfoResponse struct { + Revisions []*charm.URL +} +``` + +Example: `GET trusty/wordpress-42/meta/revision-info` + +```json +{ + "Revisions": [ + "cs:trusty/wordpress-43", + "cs:trusty/wordpress-42", + "cs:trusty/wordpress-41", + "cs:trusty/wordpress-39" + ] +} +``` + +#### GET *id*/meta/id + +The `id` path returns information on the charm or bundle id, split apart into +its various components, including the id itself. The information is exactly +that contained within the entity id. + +```go +type IdResponse struct { + Id *charm.URL + User string + Series string `json:",omitempty"` + Name string + Revision int +} +``` + +Example: `GET ~bob/trusty/wordpress/meta/id` + +```json +{ + "Id": "~bob/trusty/wordpress-42", + "User": "bob", + "Series": "trusty", + "Name": "wordpress", + "Revision": 42 +} +``` + +Example: `GET precise/wordpress/meta/id` + +```json +{ + "Id": "precise/wordpress-42", + "Series": "precise", + "Name": "wordpress", + "Revision": 42 +} +``` + +Example: `GET bundle/openstack/meta/id` + +```json +{ + "Id": "bundle/openstack-3", + "Series": "bundle", + "Name": "openstack", + "Revision": 3 +} +``` + +#### GET *id*/meta/id-revision + +The `revision` path returns information on the revision of the id. The +information is exactly that contained within the id. + +```go +type Revision struct { + Revision int +} +``` + +Example: `GET trusty/wordpress-42/meta/id-revision` + +```json +{ + "Revision": 42 +} +``` + +#### GET *id*/meta/id-name + +The `name` path returns information on the name of the id. The information is +exactly that contained within the id. + +```go +type Name struct { + Name string +} +``` + +Example: `GET trusty/wordpress-42/meta/id-name` + +```json +{ + "Name": "wordpress" +} +``` + +#### GET *id*/meta/id-user + +The `id-user` path returns information on the user name in the id. This +information is exactly that contained within the id. + +```go +type User struct { + User string +} +``` + +Example: `GET ~bob/trusty/wordpress-42/meta/id-user` + +```json +{ + "User": "bob" +} +``` + +Example: `GET trusty/wordpress-42/meta/id-user` + +```json +{ + "User": "" +} +``` + +#### GET *id*/meta/id-series + +The `id-series` path returns information on the series in the id. This +information is exactly that contained within the id. For bundles, this will +return "bundle". + +```go +type Series struct { + Series string +} +``` + +Example: `GET ~bob/trusty/wordpress-42/meta/id-series` + +```json +{ + "Series": "trusty" +} +``` + +#### GET *id*/meta/common-info + +The meta/common-info path reports any common metadata recorded for the base +entity. This contains only information stored by clients - the API server +itself does not populate any fields. The resulting object holds an entry for +each piece of metadata recorded with a PUT to `meta/common-info`. + +```go +type CommonInfo struct { + Values map[string] interface{} +} +``` + +Example: `GET wordpress/meta/common-info` + `GET precise/wordpress-32/meta/common-info` + +```json +{ + "homepage": "http://wordpress.org", + "bugs-url": "http://wordpress.org/bugs", +} +``` + +#### GET *id*/meta/common-info/*key* + +This path returns the contents of the given `common-info` key. The result is +exactly the JSON value stored as a result of the PUT request to `common-info` or +`common-info/key`. + +Example: `GET wordpress/meta/common-info/homepage` + `GET precise/wordpress-32/meta/common-info/homepage` + +```json +"http://wordpress.org" +``` + +#### PUT *id*/meta/common-info + +This request updates the value of any metadata values. Any values that are not +mentioned in the request are left untouched. Any fields with null values are +deleted. + +Example: `PUT precise/wordpress-32/meta/common-info` + +Request body: +```json +{ + "bugs-url": "http://wordpress.org/newbugs", +} +``` + +#### PUT *id*/meta/common-info/*key* + +This request creates or updates the value for a specific key. +If the value is null, the key is deleted. + +Example: `PUT precise/wordpress-32/meta/common-info/bugs-url` + +Request body: + +```json +"http://wordpress.org/newbugs", +``` + +The above example is equivalent to the `meta/common-info` example above. + +### Resources + +**Not yet implemented** + +#### POST *id*/resources/name.stream + +Posting to the resources path creates a new version of the given stream +for the charm with the given id. The request returns the new version. + +```go +type ResourcesRevision struct { + Revision int +} +``` + +#### GET *id*/resources/name.stream[-revision]/arch/filename + +Getting from the `/resources` path retrieves a charm resource from the charm +with the given id. If version is not specified, it retrieves the latest version +of the resource. The SHA-256 hash of the data is specified in the HTTP response +headers. + +#### PUT *id*/resources/[~user/]series/name.stream-revision/arch?sha256=hash + +Putting to the `resources` path uploads a resource (an arbitrary "blob" of +data) associated with the charm with id series/name, which must not be a +bundle. Stream and arch specify which of the charms resource streams and which +architecture the resource will be associated with, respectively. Revision +specifies the revision of the stream that's being uploaded to. + +The hash value must specify the hash of the stream. If the same series, name, +stream, revision combination is PUT again, it must specify the same hash. + +### Search + +#### GET search + +The `search` path searches within the latest version of charms and bundles +within the store. + +
+GET search[?text=text][&autocomplete=1][&filter=value...][&limit=limit][&skip=skip][&include=meta[&include=meta...]][&sort=field]
+
+ +`text` specifies any text to search for. If `autocomplete` is specified, the +search will return only charms and bundles with a name that has text as a +prefix. `limit` limits the number of returned items to the specified limit +count. `skip` skips over the first skip items in the result. Any number of +filters may be specified, limiting the search to items with attributes that +match the specified filter value. Items matching any of the selected values for +a filter are selected, so `name=1&name=2` would match items whose name was +either 1 or 2. However, if multiple filters are specified, the charm must match +all of them, so `name=1&series=2` will only match charms whose name is 1 and +whose series is 2. Available filters are: + +* tags - the set of tags associated with the charm. +* name - the charm's name. +* owner - the charm's owner (the ~user element of the charm id) +* promulgated - the charm has been promulgated. +* provides - interfaces provided by the charm. +* requires - interfaces required by the charm. +* series - the charm's series. +* summary - the charm's summary text. +* description - the charm's description text. +* type - "charm" or "bundle" to search only one doctype or the other. + + +Notes + +1. filtering on a specified, but empty, owner is the same as filtering on promulgated=1. +2. a specified, but empty text field will return all charms and bundles. +3. the promulgated filter is only applied if specified. If the value is "1" then only + promulgated entities are returned if it is any other value only non-promulgated + entities are returned. + +The response contains a list of information on the charms or bundles that were +matched by the request. If no parameters are specified, all charms and bundles +will match. By default, only the charm store id is included. + +The results are sorted according to the given sort field, which may be one of +`owner`, `name` or `series`, corresponding to the filters of the same names. If +the field is prefixed with a hyphen (-), the sorting order will be reversed. If +the sort field is not specified, the results are returned in +most-relevant-first order if the text filter was specified, or an arbitrary +order otherwise. It is possible to specify more than one sort field to get +multi-level sorting, e.g. sort=name,-series will get charms in order of the +charm name and then in reverse order of series. + +The Meta field is populated according to the include flag - see the `meta` +path for more info on how to use this. + +```go +[]SearchResult + +type SearchResult struct { + Id string + // Meta holds at most one entry for each meta value + // specified in the include flags, holding the + // data that would be returned by reading /meta/meta?id=id. + // Metadata not relevant to a particular result will not + // be included. + Meta map[string] interface{} `json:",omitempty"` +} +``` + +Example: `GET search?text=word&autocomplete=1&limit=2&include=archive-size` + +```json +[ + { + "Id": "precise/wordpress-1", + "Meta": { + "archive-size": { + "Size": 1024 + } + } + }, + { + "Id": "precise/wordpress-2", + "Meta": { + "archive-size": { + "Size": 4242 + } + } + } +] +``` + +#### GET search/interesting + +This returns a list of bundles and charms which are interesting from the Juju +GUI perspective. Those are shown on the left sidebar of the GUI when no other +search requests are performed. + +`GET search/interesting[?limit=limit][&include=meta]` + +The Meta field is populated according to the include flag - see the `meta` +path for more info on how to use this. +The `limit` flag is the same as for the "search" path. + +### List + +#### GET list + +The `list` path lists charms and bundles within the store. + +
+GET list[?filter=value...][&include=meta[&include=meta...]][&sort=field]
+
+ +Any number of filters may be specified, limiting the list to items with attributes that +match the specified filter value. Items matching any of the selected values for +a filter are selected, so `name=1&name=2` would match items whose name was +either 1 or 2. However, if multiple filters are specified, the charm must match +all of them, so `name=1&series=2` will only match charms whose name is 1 and +whose series is 2. Available filters are: + +* name - the charm's name. +* owner - the charm's owner (the ~user element of the charm id) +* promulgated - the charm has been promulgated. +* series - the charm's series. +* type - "charm" or "bundle" to search only one doctype or the other. + + +Notes + +1. the promulgated filter is only applied if specified. If the value is "1" then only + promulgated entities are returned if it is any other value only non-promulgated + entities are returned. + +The response contains a list of information on the charms or bundles that were +matched by the request. If no parameters are specified, all charms and bundles +will match. By default, only the charm store id is included. + +The results are sorted according to the given sort field, which may be one of +`owner`, `name` or `series`, corresponding to the filters of the same names. If +the field is prefixed with a hyphen (-), the sorting order will be reversed. If +the sort field is not specified the order will be a server side logical order. +It is possible to specify more than one sort field to get +multi-level sorting, e.g. sort=name,-series will get charms in order of the +charm name and then in reverse order of series. + +The Meta field is populated according to the include flag - see the `meta` +path for more info on how to use this. + +```go +[]EntityResult + +type EntityResult struct { + Id string + // Meta holds at most one entry for each meta value + // specified in the include flags, holding the + // data that would be returned by reading /meta/meta?id=id. + // Metadata not relevant to a particular result will not + // be included. + Meta map[string] interface{} `json:",omitempty"` +} +``` + +Example: `GET list?name=wordpress&include=archive-size` + +```json +[ + { + "Id": "precise/wordpress-1", + "Meta": { + "archive-size": { + "Size": 1024 + } + } + }, + { + "Id": "precise/wordpress-2", + "Meta": { + "archive-size": { + "Size": 4242 + } + } + } +] +``` + +### Debug info + +#### GET /debug + +**Not yet implemented** + +This returns metadata describing the current version of the software running +the server, and any other information deemed appropriate. The specific form of + the returned data is deliberately left unspecified for now. + +#### GET /debug/status + +Used as a health check of the service. The API will also be used for nagios +tests. The items that are checked: + +* connection to MongoDB +* connection to ElasticSearch (if needed) (based on charm config) (elasticsearch cluster status, all nodes up/etc see charmworld) +* number of charms and bundles in the blobstore +* number of promulgated items +* time and location of service start +* time of last ingestion process +* did ingestion finish +* did ingestion finished without errors (this should not count charm/bundle ingest errors) + +```go +type DebugStatuses map[string] struct { + Name string + Value string + Passed bool +} +``` + +Example: `GET /debug/status` + +```json +{ + "mongo_connected" : { + "Name": "MongoDB is connected", + "Value": "Connected", + "Passed": true + }, + "mongo_collections" : { + "Name": "MongoDB collections", + "Value": "All required collections exist", + "Passed": true + }, + "ES_connected": { + "Name": "ElasticSearch is connected", + "Value": "Connected", + "Passed": true + }, + "entities": { + "Name": "Entities in charm store", + "Value": "5701 charms; 2000 bundles; 42 promulgated", + "Passed": true, + }, + "server_started": { + "Name": "Server started", + "Value": "123.45.67.89 2014-09-16 11:12:29Z", + "Passed": true + }, +} +``` + +### Permissions + +All entities in the charm store have their own access control lists. Read and +write permissions are supported for specific users and groups. By default, all +charms and bundles are readable by everyone, meaning that anonymous users can +retrieve archives and metadata information without restrictions. The permission +endpoints can be used to retrieve or change entities' permissions. + +#### GET *id*/meta/perm + +This path reports the read and write ACLs for the charm or bundle. + +```go +type PermResponse struct { + Read []string + Write []string +} +``` + +If the `Read` ACL is empty, the entity and its metadata cannot be retrieved by +anyone. +If the `Write` ACL is empty, the entity cannot be modified by anyone. +The special user `everyone` indicates that the corresponding operation +(read or write) can be performed by everyone, including anonymous users. + +Example: `GET ~joe/wordpress/meta/perm` + +```json +{ + "Read": ["everyone"], + "Write": ["joe"] +} +``` + +#### PUT *id*/meta/perm + +This request updates the permissions associated with the charm or bundle. + +```go +type PermResponse struct { + Read []string + Write []string +} +``` + +If the Read or Write ACL is empty or missing from the request body, that +field will be overwritten as empty. See the *id*/meta/perm/*key* request +to PUT only Read or Write. + +Example: `PUT precise/wordpress-32/meta/perm` + +Request body: +```json +{ + "Read": ["everyone"], + "Write": ["joe"] +} +``` + +#### GET *id*/meta/perm/*key* + +This path returns the contents of the given permission *key* (that can be +`read` or `write`). The result is exactly the JSON value stored as a result of +the PUT request to `meta/perm/key`. + +Example: `GET wordpress/meta/perm/read` + +```json +["everyone"] +``` + +#### PUT *id*/meta/perm/*key* + +This request updates the *key* permission associated with the charm or bundle, +where *key* can be `read` or `write`. + +Example: `PUT precise/wordpress-32/meta/perm/read` + +Request body: + +```json +["joe", "frank"] +``` + +### Authorization + +#### GET /macaroon + +This endpoint returns a macaroon in JSON format that, when its third party +caveats are discharged, will allow access to the charm store. No prior +authorization is required. + +#### GET /delegatable-macaroon + +This endpoint returns a macaroon in JSON format that can be passed to +third parties to allow them to access the charm store on the user's +behalf. If the "id" parameter is specified (url encoded), the returned +macaroon will be restricted for use only with the entity with the +given id. + +A delegatable macaroon will only be returned to an authorized user (not +including admin). It will carry the same privileges as the macaroon used +to authorize the request, but is suitable for use by third parties. + +#### GET /whoami + +This endpoint returns the user name of the client and the list of groups the +user is a member of. This endpoint requires authorization. + +Example: `GET whoami` + +```json +{ + "User": "alice", + "Groups": ["charmers", "admin", "team-awesome"] +} +``` + +The response is defined as: +```go +type WhoAmIResponse struct { + User string + Groups []string +} +``` + +### Logs + +#### GET /log + +This endpoint returns the log messages stored on the charm store. It is +possible to save them by sending POST requests to the same endpoint (see +below). For instance, the ingestion of charms/bundles produces logs that are +collected and send to the charm store by the ingestion client. + +`GET /log[?limit=count][&skip=count][&id=entity-id][&level=log-level][&type=log-type]` + + +Each log message is defined as: + +```go +type LogResponse struct { + // Data holds the log message as a JSON-encoded value. + Data json.RawMessage + + // Level holds the log level as a string. + Level LogLevel + + // Type holds the log type as a string. + Type LogType + + // URLs holds a slice of entity URLs associated with the log message. + URLs []`*`charm.URL `json:",omitempty"` + + // Time holds the time of the log. + Time time.Time +} +``` + +The log entries are ordered by last inserted (most recent logs first), and by +default the last 1000 logs are returned. Use the `limit` and skip `query` +parameters to change the default behavior. Logs can further be filtered by log +level (“infoâ€, “warning†or “errorâ€) and by related entity id. The type query +parameter groups entries by type. For instance, to request all the ingestion +errors related to the *utopic/django* charm, use the following URL: + +`/log?type=ingestion&level=error&id=utopic/django` + +#### POST /log + +This endpoint uploads logs to the charm store. The request content type must be +`application/json`. The body must contain the JSON representation of a list of +logs, each one being in this format: + +```go +type Log struct { + // Data holds the log message as a JSON-encoded value. + Data *json.RawMessage + + // Level holds the log level as a string. + Level LogLevel + + // Type holds the log type as a string. + Type LogType + + // URLs holds a slice of entity URLs associated with the log message. + URLs []*charm.URL `json:",omitempty"` +} +``` + +Nothing is returned if the request succeeds. Otherwise, an error is returned. + +### Changes + +Each charm store has a global feed for all new published charms and bundles. + +#### GET changes/published + +This endpoint returns the ids of published charms or bundles published, most +recently published first. + +`GET changes/published[?limit=count][&from=fromdate][&to=todate]` + +The `fromdate` and `todate` values constrain the range of publish dates, in +"yyyy-mm-dd" format. If `fromdate` is specified only charms published on or +after that date are returned; if `todate` is specified, only charms published +on or before that date are returned. If the `limit` count is specified, it must +be positive, and only the first count results are returned. The published time +is in RFC3339 format. + +```go +[]Published +type Published struct { + Id string + PublishTime time.Time +} +``` + +Example: `GET changes/published` + +```json +[ + { + "Id": "cs:trusty/wordpress-42", + "PublishTime": "2014-07-31T15:04:05Z" + }, + { + "Id": "cs:trusty/mysql-11", + "PublishTime": "2014-07-30T14:20:00Z" + }, + { + "Id": "cs:bundle/mediawiki", + "PublishTime": "2014-07-29T13:45:10Z" + } +] +``` + +Example: `GET changes/published?limit=10&from=31-07-2014` + +```json +[ + { + "Id": "cs:trusty/wordpress-42", + "PublishTime": "2014-07-31T15:04:05Z" + } +] +``` === added file 'src/gopkg.in/juju/charmstore.v5-unstable/docs/bundles.md' --- src/gopkg.in/juju/charmstore.v5-unstable/docs/bundles.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/docs/bundles.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,164 @@ +# Bundles in The Charmstore + +The charmstore allows two versions of bundle specifications, as described by +github.com/juju/charm. The versions are numbered 3 and 4, relating to the API +version under which they can be hosted: charmworld (API v3) supports only +version 3 bundles, charmstore (API v4) supports version 3 and version 4. + +## Version 3 bundles + +Version 3 bundles are currently existing bundles that specify a deployment as a +list of services and, optionally, relations. The charmstore will not support +the idea of a "basket" or multiple bundles within one file. However, existing +baskets will still be imported, and split up into their component bundles. + +## Version 4 bundles + +Version 4 bundles are identical to version 3 bundles except for a few key +differences: the `branch` attribute of the service spec is no longer supported, +they may contain a machine specification, and their deployment directives are +different from version 3 bundles. + +### Deploying version 4 bundles + +Because version 4 bundles are not yet idempotent (i.e.: if a machine fails to +come up, running the bundle again will recreate all machines in the machine +spec), the juju deployer pessimistically assumes that a bundle is a version 4 +bundle *only* if it has a machine spec. This means that a bundle without a +machine spec must use the version 3 style of placement directives listed below +until further notice, when the deployer is updated. This does not affect +version 4 bundle support within the charmstore (that is, the machine spec is +still optional). + +The Juju GUI does not yet support version 4 bundles as of version 1.3.4, as the +GUI charm contains an older version of the deployer. + +### Machine Specifications + +A machine specification identifies a machine that will be created in the Juju +environment. These machines are named with an integer, and can have any of +three optional attributes: + +* *constraints* - Constraints are specified as a string as described by the Juju + constraints flag (see `juju help constraints` for more information). +* *annotations* - Annotations, provided as key-value pairs, are additional + information that is tacked onto the machine within the Juju state server. + These can be used for marking machines for your own use, or for use by Juju + clients. +* *series* - You may optionally specify the series of the machine to be created + (e.g.: "precise" or "trusty"). If you do not specify a series, the bundle + series will be used. + +Machines are specified under the `machines` top-level attribute. + +### Deployment directives + +Version 4 deployment directives (the `to` attribute on the service spec) is a +YAML list of items following the format: + + (:)?(||new) + +If containertype is specified, the unit is deployed into a new container of that +type, otherwise it will be "hulk-smashed" into the specified location, by +co-locating it with any other units that happen to be there, which may result in +unintended behavior. + +The second part (after the colon) specifies where the new unit should be placed; +it may refer to a unit of another service specified in the bundle, a machine +id specified in the machines section, or the special name "new" which specifies +a newly created machine. + +A unit placement may be specified with a service name only, in which case its +unit number is assumed to be one more than the unit number of the previous unit +in the list with the same service, or zero if there were none. + +If there are less elements in To than NumUnits, the last element is replicated +to fill it. If there are no elements (or To is omitted), "new" is replicated. + +For example: + + wordpress/0 wordpress/1 lxc:0 kvm:new + +specifies that the first two units get hulk-smashed onto the first two units of +the wordpress service, the third unit gets allocated onto an lxc container on +machine 0, and subsequent units get allocated on kvm containers on new machines. + +The above example is the same as this: + + wordpress wordpress lxc:0 kvm:new + +Version 3 placement directives take the format: + + ((:)?(=)?|0) + +meaning that a machine cannot be specified beyond colocating (either through a +container or hulk-smash) along with a specified unit of another service. +Version 3 placement directives may be either a string of a single directive or a +YAML list of directives in the above format. The only machine that may be +specified is machine 0, allowing colocation on the bootstrap node. + +## Example Bundles + +### Version 3 + +```yaml +series: precise +services: + nova-compute: + charm: cs:precise/nova-compute + units: 3 + ceph: + units: 3 + to: [nova-compute, nova-compute] + mysql: + to: 0 + quantum: + units: 4 + to: ["lxc:nova-compute", "lxc:nova-compute", "lxc:nova-compute", "lxc:nova-compute"] + verity: + to: lxc:nova-compute=2 + semper: + to: nova-compute=2 + lxc-service: + num_units: 5 + to: [ "lxc:nova-compute=1", "lxc:nova-compute=2", "lxc:nova-compute=0", "lxc:nova-compute=0", "lxc:nova-compute=2" ] +``` + +### Version 4 + +```yaml +series: precise +services: + # Automatically place + nova-compute: + charm: cs:precise/nova-compute + units: 3 + # Specify containers + ceph: + units: 3 + to: + # Specify a unit + - lxc:nova-compute/0 + # Specify a machine + - lxc:1 + # Create a new machine, deploy to container on that machine. + - lxc:new + # Specify a machine + mysql: + to: + - 0 + # Specify colocation + quantum: + units: 4 + to: + - ceph/1 + # Assume first unit + - nova-compute + # Repeats previous directive to fill out placements +machines: + 1: + constraints: "mem=16G arch=amd64" + annotations: + foo: bar + series: precise +``` === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/elasticsearch.go' --- src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/elasticsearch.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/elasticsearch.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,525 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// elasticsearch package api attempts to name methods to match the +// corresponding elasticsearch endpoint. Methods names like CatIndices are +// named as such because they correspond to /_cat/indices elasticsearch +// endpoint. +// There is no reason to use different vocabulary from that of elasticsearch. +// Use the elasticsearch terminology and avoid mapping names of things. + +package elasticsearch // import "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "path" + "strings" + + "github.com/juju/loggo" + "gopkg.in/errgo.v1" +) + +const ( + // Internal provides elasticsearche's "internal" versioning system, as described in + // http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + Internal = "internal" + + // External provides elasticsearche's "external" versioning system, as described in + // http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + External = "external" + + // ExternalGTE provides elasticsearche's "external_gte" versioning system, as described in + // http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + ExternalGTE = "external_gte" +) + +var log = loggo.GetLogger("charmstore.elasticsearch") + +var ErrConflict = errgo.New("elasticsearch document conflict") +var ErrNotFound = errgo.New("elasticsearch document not found") + +type ElasticSearchError struct { + Err string `json:"error"` + Status int `json:"status"` +} + +func (e ElasticSearchError) Error() string { + return e.Err +} + +type Database struct { + Addr string +} + +// Document represents a document in the elasticsearch database. +type Document struct { + Found bool `json:"found"` + Id string `json:"_id"` + Index string `json:"_index"` + Type string `json:"_type"` + Version int64 `json:"_version"` + Source json.RawMessage `json:"_source"` +} + +// Represents the response from _cluster/health on elastic search +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-health.html +type ClusterHealth struct { + ClusterName string `json:"cluster_name"` + Status string `json:"status"` + TimedOut bool `json:"timed_out"` + NumberOfNodes int64 `json:"number_of_nodes"` + NumberOfDataNodes int64 `json:"number_of_data_nodes"` + ActivePrimaryShards int64 `json:"active_primary_shards"` + ActiveShards int64 `json:"active_shards"` + RelocatingShards int64 `json:"relocating_shards"` + InitializingShards int64 `json:"initializing_shards"` + UnassignedShards int64 `json:"unassigned_shards"` +} + +func (h *ClusterHealth) String() string { + return fmt.Sprintf("cluster_name: %s, status: %s, timed_out: %t"+ + ", number_of_nodes: %d, number_of_data_nodes: %d"+ + ", active_primary_shards: %d, active_shards: %d"+ + ", relocating_shards: %d, initializing_shards: %d"+ + ", unassigned_shards:%d", h.ClusterName, h.Status, + h.TimedOut, h.NumberOfNodes, h.NumberOfDataNodes, + h.ActivePrimaryShards, h.ActiveShards, + h.RelocatingShards, h.InitializingShards, + h.UnassignedShards) +} + +// Alias creates or updates an index alias. An alias a is created, +// or modified if it already exists, to point to i. See +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-aliases.html#indices-aliases +// for further details. +func (db *Database) Alias(i, a string) error { + indexes, err := db.ListIndexesForAlias(a) + if err != nil { + return errgo.Notef(err, "cannot retrieve current aliases") + } + var actions struct { + Actions []action `json:"actions"` + } + for _, i := range indexes { + actions.Actions = append(actions.Actions, action{Remove: &alias{Index: i, Alias: a}}) + } + if i != "" { + actions.Actions = append(actions.Actions, action{Add: &alias{Index: i, Alias: a}}) + } + if len(actions.Actions) == 0 { + return nil + } + if err := db.post(db.url("_aliases"), actions, nil); err != nil { + return errgo.Notef(err, "error updating aliases") + } + return nil +} + +// Create document attempts to create a new document at index/type_/id with the +// contents in doc. If the document already exists then CreateDocument will return +// ErrConflict and return a non-nil error if any other error occurs. +// See http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/create-doc.html#create-doc +// for further details. +func (db *Database) CreateDocument(index, type_, id string, doc interface{}) error { + if err := db.put(db.url(index, type_, id, "_create"), doc, nil); err != nil { + return getError(err) + } + return nil +} + +// DeleteDocument deletes the document at index/type_/id from the elasticsearch +// database. See http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/delete-doc.html#delete-doc +// for further details. +func (db *Database) DeleteDocument(index, type_, id string) error { + if err := db.delete(db.url(index, type_, id), nil, nil); err != nil { + return getError(err) + } + return nil +} + +// DeleteIndex deletes the index with the given name from the database. +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-delete-index.html +// If the index does not exist or if the database cannot be +// reached, then an error is returned. +func (db *Database) DeleteIndex(index string) error { + if err := db.delete(db.url(index), nil, nil); err != nil { + return getError(err) + } + return nil +} + +// GetDocument retrieves the document with the given index, type_ and id and +// unmarshals the json response into v. GetDocument returns ErrNotFound if the +// requested document is not present, and returns a non-nil error if any other error +// occurs. +func (db *Database) GetDocument(index, type_, id string, v interface{}) error { + d, err := db.GetESDocument(index, type_, id) + if err != nil { + return getError(err) + } + if !d.Found { + return ErrNotFound + } + if err := json.Unmarshal([]byte(d.Source), &v); err != nil { + return errgo.Mask(err) + } + return nil +} + +// GetESDocument returns elasticsearch's view of the document stored at +// index/type_/id. It is not an error if this document does not exist, in that case +// the Found field of the returned Document will be false. +func (db *Database) GetESDocument(index, type_, id string) (Document, error) { + var d Document + if err := db.get(db.url(index, type_, id), nil, &d); err != nil { + return Document{}, getError(err) + } + return d, nil +} + +// HasDocument tests to see a document of the given index, type_, and id exists +// in the elasticsearch database. A non-nil error is returned if there is an error +// communicating with the elasticsearch database. +func (db *Database) HasDocument(index, type_, id string) (bool, error) { + var d Document + if err := db.get(db.url(index, type_, id)+"?_source=false", nil, &d); err != nil { + return false, getError(err) + } + return d.Found, nil +} + +// Check the health status of Elastic search and retrieve general data from it. +// Calling get on /_cluster/health to retrieve status. +func (db *Database) Health() (ClusterHealth, error) { + var result ClusterHealth + if err := db.get(db.url("_cluster", "health"), nil, &result); err != nil { + return ClusterHealth{}, getError(err) + } + + return result, nil +} + +// ListAllIndexes retreieves the list of all user indexes in the elasticsearch database. +// indexes that are generated to to support plugins are filtered out of the list that +// is returned. +func (db *Database) ListAllIndexes() ([]string, error) { + var result map[string]interface{} + if err := db.get(db.url("_aliases"), nil, &result); err != nil { + return nil, getError(err) + } + var indexes []string + for key := range result { + // Some ElasticSearch plugins create indexes (e.g. ".marvel...") for their + // use. Ignore any that start with a dot. + if !strings.HasPrefix(key, ".") { + indexes = append(indexes, key) + } + } + return indexes, nil +} + +// ListIndexesForAlias retreieves the list of all indexes in the elasticsearch database +// that have the alias a. +func (db *Database) ListIndexesForAlias(a string) ([]string, error) { + var result map[string]struct{} + if err := db.get(db.url("*", "_alias", a), nil, &result); err != nil { + return nil, getError(err) + } + var indexes []string + for key := range result { + indexes = append(indexes, key) + } + return indexes, nil +} + +// PostDocument creates a new auto id document with the given index and _type +// and returns the generated id of the document. The type_ parameter controls how +// the document will be mapped in the index. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html +// for more details. +func (db *Database) PostDocument(index, type_ string, doc interface{}) (string, error) { + var resp struct { + ID string `json:"_id"` + } + if err := db.post(db.url(index, type_), doc, &resp); err != nil { + return "", getError(err) + } + return resp.ID, nil +} + +// PutDocument creates or updates the document with the given index, type_ and +// id. The type_ parameter controls how the document will be mapped in the index. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html +// for more details. +func (db *Database) PutDocument(index, type_, id string, doc interface{}) error { + if err := db.put(db.url(index, type_, id), doc, nil); err != nil { + return getError(err) + } + return nil +} + +// PutDocumentVersion creates or updates the document in the given index if the version +// parameter is the same as the currently stored version. The type_ parameter +// controls how the document will be indexed. PutDocumentVersion returns +// ErrConflict if the data cannot be stored due to a version mismatch, and a non-nil error if +// any other error occurs. +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#index-versioning +// for more information. +func (db *Database) PutDocumentVersion(index, type_, id string, version int64, doc interface{}) error { + return db.PutDocumentVersionWithType(index, type_, id, version, "internal", doc) +} + +// PutDocumentVersion creates or updates the document in the given index if the version +// parameter is the same as the currently stored version. The type_ parameter +// controls how the document will be indexed. PutDocumentVersionWithType returns +// ErrConflict if the data cannot be stored due to a version mismatch, and a non-nil error if +// any other error occurs. +// +// The constants Internal, External and ExternalGTE represent some of the available +// version types. Other version types may also be available, plese check the elasticsearch +// documentation. +// +// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#index-versioning +// and http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types for more information. +func (db *Database) PutDocumentVersionWithType( + index, type_, id string, + version int64, + versionType string, + doc interface{}) error { + url := fmt.Sprintf("%s?version=%d&version_type=%s", db.url(index, type_, id), version, versionType) + if err := db.put(url, doc, nil); err != nil { + return getError(err) + } + return nil +} + +// PutIndex creates the index with the given configuration. +func (db *Database) PutIndex(index string, config interface{}) error { + if err := db.put(db.url(index), config, nil); err != nil { + return getError(err) + } + return nil +} + +// PutMapping creates or updates the mapping with the given configuration. +func (db *Database) PutMapping(index, type_ string, config interface{}) error { + if err := db.put(db.url(index, "_mapping", type_), config, nil); err != nil { + return getError(err) + } + return nil +} + +// RefreshIndex posts a _refresh to the index in the database. +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-refresh.html +func (db *Database) RefreshIndex(index string) error { + if err := db.post(db.url(index, "_refresh"), nil, nil); err != nil { + return getError(err) + } + return nil +} + +// Search performs the query specified in q on the values in index/type_ and returns a +// SearchResult. +func (db *Database) Search(index, type_ string, q QueryDSL) (SearchResult, error) { + var sr SearchResult + if err := db.get(db.url(index, type_, "_search"), q, &sr); err != nil { + return SearchResult{}, errgo.Notef(getError(err), "search failed") + } + return sr, nil +} + +// do performs a request on the elasticsearch server. If body is not nil it will be +// marshaled as a json object and sent with the request. If v is non nil the response +// body will be unmarshalled into the value it points to. +func (db *Database) do(method, url string, body, v interface{}) error { + log.Debugf(">>> %s %s", method, url) + var r io.Reader + if body != nil { + b, err := json.Marshal(body) + if err != nil { + return errgo.Notef(err, "can not marshaling body") + } + log.Debugf(">>> %s", b) + r = bytes.NewReader(b) + } + req, err := http.NewRequest(method, url, r) + if err != nil { + log.Debugf("*** %s", err) + return errgo.Notef(err, "cannot create request") + } + if body != nil { + req.Header.Add("Content-Type", "application/json") + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + log.Debugf("*** %s", err) + return errgo.Mask(err) + } + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Debugf("*** %s", err) + return errgo.Notef(err, "cannot read response") + } + log.Debugf("<<< %s", resp.Status) + log.Debugf("<<< %s", b) + var eserr *ElasticSearchError + // TODO(mhilton) don't try to parse every response as an error. + if err = json.Unmarshal(b, &eserr); err != nil { + log.Debugf("*** %s", err) + } + if eserr.Status != 0 { + return eserr + } + if v != nil { + if err = json.Unmarshal(b, v); err != nil { + log.Debugf("*** %s", err) + return errgo.Notef(err, "cannot unmarshal response") + } + } + return nil +} + +// delete makes a DELETE request to the database url. A non-nil body will be +// sent with the request and if v is not nill then the response will be unmarshaled +// into tha value it points to. +func (db *Database) delete(url string, body, v interface{}) error { + return db.do("DELETE", url, body, v) +} + +// get makes a GET request to the database url. A non-nil body will be +// sent with the request and if v is not nill then the response will be unmarshaled +// into tha value it points to. +func (db *Database) get(url string, body, v interface{}) error { + return db.do("GET", url, body, v) +} + +// post makes a POST request to the database url. A non-nil body will be +// sent with the request and if v is not nill then the response will be unmarshaled +// into tha value it points to. +func (db *Database) post(url string, body, v interface{}) error { + return db.do("POST", url, body, v) +} + +// put makes a PUT request to the database url. A non-nil body will be +// sent with the request and if v is not nill then the response will be unmarshaled +// into tha value it points to. +func (db *Database) put(url string, body, v interface{}) error { + return db.do("PUT", url, body, v) +} + +// url constructs the URL for accessing the database. +func (db *Database) url(pathParts ...string) string { + path := path.Join(pathParts...) + url := &url.URL{ + Scheme: "http", + Host: db.Addr, + Path: path, + } + return url.String() + +} + +// SearchResult is the result returned after performing a search in elasticsearch +type SearchResult struct { + Hits struct { + Total int `json:"total"` + MaxScore float64 `json:"max_score"` + Hits []Hit `json:"hits"` + } `json:"hits"` + Took int `json:"took"` + TimedOut bool `json:"timed_out"` +} + +// Hit represents an individual search hit returned from elasticsearch +type Hit struct { + Index string `json:"_index"` + Type string `json:"_type"` + ID string `json:"_id"` + Score float64 `json:"_score"` + Source json.RawMessage `json:"_source"` + Fields Fields `json:"fields"` +} + +type Fields map[string][]interface{} + +// Get retrieves the first value of key in the fields map. If no such value +// exists then it will return nil. +func (f Fields) Get(key string) interface{} { + if len(f[key]) < 1 { + return nil + } + return f[key][0] +} + +// Get retrieves the first value of key in the fields map, and coerces it into a +// string. If no such value exists or the value is not a string, then "" will be returned. +func (f Fields) GetString(key string) string { + s, ok := f.Get(key).(string) + if !ok { + return "" + } + return s +} + +// EscapeRegexp returns the supplied string with any special characters escaped. +// A regular expression match on the returned string will match exactly the characters +// in the supplied string. +func EscapeRegexp(s string) string { + return regexpReplacer.Replace(s) +} + +var regexpReplacer = strings.NewReplacer( + `.`, `\.`, + `?`, `\?`, + `+`, `\+`, + `*`, `\*`, + `|`, `\|`, + `{`, `\{`, + `}`, `\}`, + `[`, `\[`, + `]`, `\]`, + `(`, `\(`, + `)`, `\)`, + `"`, `\"`, + `\`, `\\`, + `#`, `\#`, + `@`, `\@`, + `&`, `\&`, + `<`, `\<`, + `>`, `\>`, + `~`, `\~`, +) + +// alias describes an alias in elasticsearch. +type alias struct { + Index string `json:"index"` + Alias string `json:"alias"` +} + +// action is an action that can be performed on an alias +type action struct { + Remove *alias `json:"remove,omitempty"` + Add *alias `json:"add,omitempty"` +} + +// getError derives an error from the underlaying error returned +// by elasticsearch. +func getError(err error) error { + if eserr, ok := err.(*ElasticSearchError); ok { + switch eserr.Status { + case http.StatusNotFound: + return ErrNotFound + case http.StatusConflict: + return ErrConflict + default: + return err + } + } + return err +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/elasticsearch_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/elasticsearch_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/elasticsearch_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,442 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package elasticsearch_test // import "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" + +import ( + "encoding/json" + "testing" + "time" + + jujutesting "github.com/juju/testing" + "github.com/juju/utils" + gc "gopkg.in/check.v1" + + es "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} + +type Suite struct { + jujutesting.IsolationSuite + storetesting.ElasticSearchSuite + Indexes []string + TestIndex string +} + +func (s *Suite) SetUpSuite(c *gc.C) { + s.IsolationSuite.SetUpSuite(c) + s.ElasticSearchSuite.SetUpSuite(c) +} +func (s *Suite) TearDownSuite(c *gc.C) { + s.ElasticSearchSuite.TearDownSuite(c) + s.IsolationSuite.TearDownSuite(c) +} +func (s *Suite) SetUpTest(c *gc.C) { + s.IsolationSuite.SetUpTest(c) + s.ElasticSearchSuite.SetUpTest(c) + s.TestIndex = s.NewIndex(c) + err := s.ES.PutIndex(s.TestIndex, map[string]interface{}{"settings": map[string]interface{}{"number_of_shards": 1}}) + c.Assert(err, gc.Equals, nil) + err = s.ES.PutDocument(s.TestIndex, "testtype", s.TestIndex, struct{}{}) + c.Assert(err, gc.Equals, nil) + err = s.ES.RefreshIndex(s.TestIndex) + c.Assert(err, gc.Equals, nil) +} +func (s *Suite) TearDownTest(c *gc.C) { + for _, i := range s.Indexes { + s.ES.DeleteIndex(i) + } + s.ElasticSearchSuite.TearDownTest(c) + s.IsolationSuite.TearDownTest(c) +} + +func (s *Suite) NewIndex(c *gc.C) string { + uuid, err := utils.NewUUID() + c.Assert(err, gc.Equals, nil) + idx := time.Now().Format("20060102150405") + "-" + uuid.String() + s.Indexes = append(s.Indexes, idx) + return idx +} + +var _ = gc.Suite(&Suite{}) + +func (s *Suite) TestSuccessfulPostDocument(c *gc.C) { + doc := map[string]string{ + "a": "b", + } + id, err := s.ES.PostDocument(s.TestIndex, "testtype", doc) + c.Assert(err, gc.IsNil) + c.Assert(id, gc.NotNil) + var result map[string]string + err = s.ES.GetDocument(s.TestIndex, "testtype", id, &result) + c.Assert(err, gc.IsNil) +} + +func (s *Suite) TestSuccessfulPutNewDocument(c *gc.C) { + doc := map[string]string{ + "a": "b", + } + // Show that no document with this id exists. + exists, err := s.ES.HasDocument(s.TestIndex, "testtype", "a") + c.Assert(err, gc.IsNil) + c.Assert(exists, gc.Equals, false) + err = s.ES.PutDocument(s.TestIndex, "testtype", "a", doc) + c.Assert(err, gc.IsNil) + var result map[string]string + err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) + c.Assert(result["a"], gc.Equals, "b") + exists, err = s.ES.HasDocument(s.TestIndex, "testtype", "a") + c.Assert(err, gc.IsNil) + c.Assert(exists, gc.Equals, true) +} + +func (s *Suite) TestSuccessfulPutUpdatedDocument(c *gc.C) { + doc := map[string]string{ + "a": "b", + } + err := s.ES.PutDocument(s.TestIndex, "testtype", "a", doc) + c.Assert(err, gc.IsNil) + doc["a"] = "c" + err = s.ES.PutDocument(s.TestIndex, "testtype", "a", doc) + c.Assert(err, gc.IsNil) + var result map[string]string + err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) + c.Assert(result["a"], gc.Equals, "c") +} + +func (s *Suite) TestPutVersionWithTypeNewDocument(c *gc.C) { + doc := map[string]string{ + "a": "b", + } + // Show that no document with this id exists. + exists, err := s.ES.HasDocument(s.TestIndex, "testtype", "a") + c.Assert(err, gc.IsNil) + c.Assert(exists, gc.Equals, false) + err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 1, es.ExternalGTE, doc) + c.Assert(err, gc.IsNil) + var result map[string]string + err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) + c.Assert(result["a"], gc.Equals, "b") + exists, err = s.ES.HasDocument(s.TestIndex, "testtype", "a") + c.Assert(err, gc.IsNil) + c.Assert(exists, gc.Equals, true) +} + +func (s *Suite) TestPutVersionWithTypeUpdateCurrentDocumentVersion(c *gc.C) { + doc := map[string]string{ + "a": "b", + } + // Show that no document with this id exists. + exists, err := s.ES.HasDocument(s.TestIndex, "testtype", "a") + c.Assert(err, gc.IsNil) + c.Assert(exists, gc.Equals, false) + err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 1, es.ExternalGTE, doc) + c.Assert(err, gc.IsNil) + doc["a"] = "c" + err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 1, es.ExternalGTE, doc) + c.Assert(err, gc.IsNil) + var result map[string]string + err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) + c.Assert(result["a"], gc.Equals, "c") + exists, err = s.ES.HasDocument(s.TestIndex, "testtype", "a") + c.Assert(err, gc.IsNil) + c.Assert(exists, gc.Equals, true) +} + +func (s *Suite) TestPutVersionWithTypeUpdateLaterDocumentVersion(c *gc.C) { + doc := map[string]string{ + "a": "b", + } + // Show that no document with this id exists. + exists, err := s.ES.HasDocument(s.TestIndex, "testtype", "a") + c.Assert(err, gc.IsNil) + c.Assert(exists, gc.Equals, false) + err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 1, es.ExternalGTE, doc) + c.Assert(err, gc.IsNil) + doc["a"] = "c" + err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 3, es.ExternalGTE, doc) + c.Assert(err, gc.IsNil) + var result map[string]string + err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) + c.Assert(result["a"], gc.Equals, "c") + exists, err = s.ES.HasDocument(s.TestIndex, "testtype", "a") + c.Assert(err, gc.IsNil) + c.Assert(exists, gc.Equals, true) +} + +func (s *Suite) TestPutVersionWithTypeUpdateEarlierDocumentVersion(c *gc.C) { + doc := map[string]string{ + "a": "b", + } + // Show that no document with this id exists. + exists, err := s.ES.HasDocument(s.TestIndex, "testtype", "a") + c.Assert(err, gc.IsNil) + c.Assert(exists, gc.Equals, false) + err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 3, es.ExternalGTE, doc) + c.Assert(err, gc.IsNil) + doc["a"] = "c" + err = s.ES.PutDocumentVersionWithType(s.TestIndex, "testtype", "a", 1, es.ExternalGTE, doc) + c.Assert(err, gc.Equals, es.ErrConflict) + var result map[string]string + err = s.ES.GetDocument(s.TestIndex, "testtype", "a", &result) + c.Assert(result["a"], gc.Equals, "b") + exists, err = s.ES.HasDocument(s.TestIndex, "testtype", "a") + c.Assert(err, gc.IsNil) + c.Assert(exists, gc.Equals, true) +} + +func (s *Suite) TestDelete(c *gc.C) { + doc := map[string]string{ + "a": "b", + } + _, err := s.ES.PostDocument(s.TestIndex, "testtype", doc) + c.Assert(err, gc.IsNil) + err = s.ES.DeleteIndex(s.TestIndex) + c.Assert(err, gc.IsNil) +} + +func (s *Suite) TestDeleteErrorOnNonExistingIndex(c *gc.C) { + err := s.ES.DeleteIndex("nope") + c.Assert(err, gc.NotNil) + c.Assert(err.Error(), gc.Equals, "elasticsearch document not found") +} + +func (s *Suite) TestIndexesCreatedAutomatically(c *gc.C) { + doc := map[string]string{"a": "b"} + _, err := s.ES.PostDocument(s.TestIndex, "testtype", doc) + c.Assert(err, gc.IsNil) + indexes, err := s.ES.ListAllIndexes() + c.Assert(err, gc.IsNil) + c.Assert(indexes, gc.Not(gc.HasLen), 0) + found := false + for _, index2 := range indexes { + if index2 == s.TestIndex { + found = true + } + } + c.Assert(found, gc.Equals, true) +} + +func (s *Suite) TestHealthIsWorking(c *gc.C) { + result, err := s.ES.Health() + c.Assert(err, gc.IsNil) + c.Assert(result.ClusterName, gc.NotNil) + c.Assert(result.ActivePrimaryShards, gc.NotNil) + c.Assert(result.ActiveShards, gc.NotNil) + c.Assert(result.InitializingShards, gc.NotNil) + c.Assert(result.NumberOfDataNodes, gc.NotNil) + c.Assert(result.NumberOfNodes, gc.NotNil) + c.Assert(result.RelocatingShards, gc.NotNil) + c.Assert(result.Status, gc.NotNil) + c.Assert(result.TimedOut, gc.NotNil) + c.Assert(result.UnassignedShards, gc.NotNil) +} + +func (s *Suite) TestSearch(c *gc.C) { + doc := map[string]string{"foo": "bar"} + _, err := s.ES.PostDocument(s.TestIndex, "testtype", doc) + c.Assert(err, gc.IsNil) + doc["foo"] = "baz" + id2, err := s.ES.PostDocument(s.TestIndex, "testtype", doc) + c.Assert(err, gc.IsNil) + s.ES.RefreshIndex(s.TestIndex) + q := es.QueryDSL{ + Query: es.TermQuery{Field: "foo", Value: "baz"}, + Fields: []string{"foo"}, + } + results, err := s.ES.Search(s.TestIndex, "testtype", q) + c.Assert(err, gc.IsNil) + c.Assert(results.Hits.Total, gc.Equals, 1) + c.Assert(results.Hits.Hits[0].ID, gc.Equals, id2) + c.Assert(results.Hits.Hits[0].Fields.GetString("foo"), gc.Equals, "baz") +} + +func (s *Suite) TestPutMapping(c *gc.C) { + var mapping = map[string]interface{}{ + "testtype": map[string]interface{}{ + "properties": map[string]interface{}{ + "foo": map[string]interface{}{ + "type": "string", + }, + }, + }, + } + err := s.ES.PutMapping(s.TestIndex, "testtype", mapping) + c.Assert(err, gc.IsNil) +} + +func (s *Suite) TestEscapeRegexp(c *gc.C) { + var tests = []struct { + about string + original string + expected string + }{{ + about: `plain string`, + original: `foo`, + expected: `foo`, + }, { + about: `escape .`, + original: `foo.bar`, + expected: `foo\.bar`, + }, { + about: `escape ?`, + original: `foo?bar`, + expected: `foo\?bar`, + }, { + about: `escape +`, + original: `foo+bar`, + expected: `foo\+bar`, + }, { + about: `escape *`, + original: `foo*bar`, + expected: `foo\*bar`, + }, { + about: `escape |`, + original: `foo|bar`, + expected: `foo\|bar`, + }, { + about: `escape {`, + original: `foo{bar`, + expected: `foo\{bar`, + }, { + about: `escape }`, + original: `foo}bar`, + expected: `foo\}bar`, + }, { + about: `escape [`, + original: `foo[bar`, + expected: `foo\[bar`, + }, { + about: `escape ]`, + original: `foo]bar`, + expected: `foo\]bar`, + }, { + about: `escape (`, + original: `foo(bar`, + expected: `foo\(bar`, + }, { + about: `escape )`, + original: `foo)bar`, + expected: `foo\)bar`, + }, { + about: `escape "`, + original: `foo"bar`, + expected: `foo\"bar`, + }, { + about: `escape \`, + original: `foo\bar`, + expected: `foo\\bar`, + }, { + about: `escape #`, + original: `foo#bar`, + expected: `foo\#bar`, + }, { + about: `escape @`, + original: `foo@bar`, + expected: `foo\@bar`, + }, { + about: `escape &`, + original: `foo&bar`, + expected: `foo\&bar`, + }, { + about: `escape <`, + original: `foo`, + original: `foo>bar`, + expected: `foo\>bar`, + }, { + about: `escape ~`, + original: `foo~bar`, + expected: `foo\~bar`, + }, { + about: `escape start`, + original: `*foo`, + expected: `\*foo`, + }, { + about: `escape end`, + original: `foo\`, + expected: `foo\\`, + }, { + about: `escape many`, + original: `\"*\`, + expected: `\\\"\*\\`, + }} + for i, test := range tests { + c.Logf("%d: %s", i, test.about) + c.Assert(es.EscapeRegexp(test.original), gc.Equals, test.expected) + } +} + +func (s *Suite) TestAlias(c *gc.C) { + uuid, err := utils.NewUUID() + c.Assert(err, gc.Equals, nil) + alias := uuid.String() + index1 := alias + "-1" + index2 := alias + "-2" + + // Create first index + err = s.ES.PutIndex(index1, struct{}{}) + c.Assert(err, gc.Equals, nil) + defer s.ES.DeleteIndex(index1) + + // Create second index + err = s.ES.PutIndex(index2, struct{}{}) + c.Assert(err, gc.Equals, nil) + defer s.ES.DeleteIndex(index2) + + // Check alias is not aliased to anything + indexes, err := s.ES.ListIndexesForAlias(alias) + c.Assert(err, gc.Equals, nil) + c.Assert(indexes, gc.HasLen, 0) + + // Associate alias with index 1 + err = s.ES.Alias(index1, alias) + c.Assert(err, gc.Equals, nil) + indexes, err = s.ES.ListIndexesForAlias(alias) + c.Assert(err, gc.Equals, nil) + c.Assert(indexes, gc.HasLen, 1) + c.Assert(indexes[0], gc.Equals, index1) + + // Associate alias with index 2, removing it from index 1 + err = s.ES.Alias(index2, alias) + c.Assert(err, gc.Equals, nil) + indexes, err = s.ES.ListIndexesForAlias(alias) + c.Assert(err, gc.Equals, nil) + c.Assert(indexes, gc.HasLen, 1) + c.Assert(indexes[0], gc.Equals, index2) +} + +func (S *Suite) TestDecodingHealthStatus(c *gc.C) { + const health_message = `{ + "cluster_name":"elasticsearch", + "status": "green", + "timed_out": true, + "number_of_nodes": 2, + "number_of_data_nodes": 2, + "active_primary_shards": 14, + "active_shards": 28, + "relocating_shards": 2, + "initializing_shards": 2, + "unassigned_shards": 2 + }` + + var h es.ClusterHealth + err := json.Unmarshal([]byte(health_message), &h) + c.Assert(err, gc.IsNil) + c.Assert(h.ClusterName, gc.Equals, "elasticsearch") + c.Assert(h.Status, gc.Equals, "green") + c.Assert(h.TimedOut, gc.Equals, true) + c.Assert(h.NumberOfNodes, gc.Equals, int64(2)) + c.Assert(h.NumberOfDataNodes, gc.Equals, int64(2)) + c.Assert(h.ActivePrimaryShards, gc.Equals, int64(14)) + c.Assert(h.ActiveShards, gc.Equals, int64(28)) + c.Assert(h.RelocatingShards, gc.Equals, int64(2)) + c.Assert(h.InitializingShards, gc.Equals, int64(2)) + c.Assert(h.UnassignedShards, gc.Equals, int64(2)) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/query.go' --- src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/query.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/query.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,252 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package elasticsearch // import "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" + +import ( + "encoding/json" + "fmt" +) + +// Query DSL - Queries + +// Query represents a query in the elasticsearch DSL. +type Query interface { + json.Marshaler +} + +// Filter represents a filter in the elasticsearch DSL. +type Filter interface { + json.Marshaler +} + +// Function is a function definition for use with a FunctionScoreQuery. +type Function interface{} + +// BoostField creates a string which represents a field name with a boost value. +func BoostField(field string, boost float64) string { + return fmt.Sprintf("%s^%f", field, boost) +} + +// MatchAllQuery provides a query that matches all +// documents in the index. +type MatchAllQuery struct { +} + +func (m MatchAllQuery) MarshalJSON() ([]byte, error) { + return marshalNamedObject("match_all", struct{}{}) +} + +// MatchQuery provides a query that matches against +// a complete field. +type MatchQuery struct { + Field string + Query string + Type string +} + +func (m MatchQuery) MarshalJSON() ([]byte, error) { + params := map[string]interface{}{"query": m.Query} + if m.Type != "" { + params["type"] = m.Type + } + + return marshalNamedObject("match", map[string]interface{}{m.Field: params}) +} + +// MultiMatchQuery provides a query that matches on a number of fields. +type MultiMatchQuery struct { + Query string + Fields []string +} + +func (m MultiMatchQuery) MarshalJSON() ([]byte, error) { + return marshalNamedObject("multi_match", map[string]interface{}{ + "query": m.Query, + "fields": m.Fields, + }) +} + +// FilteredQuery provides a query that includes a filter. +type FilteredQuery struct { + Query Query + Filter Filter +} + +func (f FilteredQuery) MarshalJSON() ([]byte, error) { + return marshalNamedObject("filtered", map[string]interface{}{ + "query": f.Query, + "filter": f.Filter, + }) +} + +// FunctionScoreQuery provides a query that adjusts the scoring of a +// query by applying functions to it. +type FunctionScoreQuery struct { + Query Query + Functions []Function +} + +func (f FunctionScoreQuery) MarshalJSON() ([]byte, error) { + return marshalNamedObject("function_score", map[string]interface{}{ + "query": f.Query, + "functions": f.Functions, + }) +} + +// TermQuery provides a query that matches a term in a field. +type TermQuery struct { + Field string + Value string +} + +func (t TermQuery) MarshalJSON() ([]byte, error) { + return marshalNamedObject("term", map[string]interface{}{ + t.Field: t.Value, + }) +} + +// DecayFunction provides a function that boosts depending on +// the difference in values of a certain field. See +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-function-score-query.html#_decay_functions +// for details. +type DecayFunction struct { + Function string + Field string + Scale string +} + +func (f DecayFunction) MarshalJSON() ([]byte, error) { + return marshalNamedObject(f.Function, map[string]interface{}{ + f.Field: map[string]interface{}{ + "scale": f.Scale, + }, + }) +} + +// BoostFactorFunction provides a function that boosts results by the specified amount. +type BoostFactorFunction struct { + Filter Filter `json:"filter,omitempty"` + BoostFactor float64 `json:"boost_factor"` +} + +// FieldValueFactorFunction boosts the results by the value of a field in the document. +type FieldValueFactorFunction struct { + Field string `json:"field"` + Factor float64 `json:"factor,omitempty"` + Modifier string `json:"modifier,omitempty"` +} + +func (f FieldValueFactorFunction) MarshalJSON() ([]byte, error) { + type ffvf FieldValueFactorFunction + return marshalNamedObject("field_value_factor", ffvf(f)) +} + +// AndFilter provides a filter that matches if all of the internal +// filters match. +type AndFilter []Filter + +func (a AndFilter) MarshalJSON() ([]byte, error) { + return marshalNamedObject("and", map[string]interface{}{ + "filters": []Filter(a), + }) +} + +// OrFilter provides a filter that matches if any of the internal +// filters match. +type OrFilter []Filter + +func (o OrFilter) MarshalJSON() ([]byte, error) { + return marshalNamedObject("or", map[string]interface{}{ + "filters": []Filter(o), + }) +} + +// NotFilter provides a filter that matches the opposite of the +// wrapped filter. +type NotFilter struct { + Filter Filter +} + +func (n NotFilter) MarshalJSON() ([]byte, error) { + return marshalNamedObject("not", n.Filter) +} + +// QueryFilter provides a filter that matches when a query matches +// on a result +type QueryFilter struct { + Query Query +} + +func (q QueryFilter) MarshalJSON() ([]byte, error) { + return marshalNamedObject("query", q.Query) +} + +// RegexpFilter provides a filter that matches a field against a +// regular expression. +type RegexpFilter struct { + Field string + Regexp string +} + +func (r RegexpFilter) MarshalJSON() ([]byte, error) { + return marshalNamedObject("regexp", map[string]string{r.Field: r.Regexp}) +} + +// TermFilter provides a filter that requires a field to match. +type TermFilter struct { + Field string + Value string +} + +func (t TermFilter) MarshalJSON() ([]byte, error) { + return marshalNamedObject("term", map[string]string{t.Field: t.Value}) +} + +// ExistsFilter provides a filter that requres a field to be present. +type ExistsFilter string + +func (f ExistsFilter) MarshalJSON() ([]byte, error) { + return marshalNamedObject("exists", map[string]string{"field": string(f)}) +} + +// QueryDSL provides a structure to put together a query using the +// elasticsearch DSL. +type QueryDSL struct { + Fields []string `json:"fields"` + From int `json:"from,omitempty"` + Size int `json:"size,omitempty"` + Query Query `json:"query,omitempty"` + Sort []Sort `json:"sort,omitempty"` +} + +type Sort struct { + Field string + Order Order +} + +type Order struct { + Order string `json:"order"` +} + +func (s Sort) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]Order{ + s.Field: {s.Order.Order}, + }) +} + +// Ascending is an Order that orders a sort by ascending through the values. +var Ascending = Order{"asc"} + +// Descending is an Order that orders a sort by descending throuth the values. +var Descending = Order{"desc"} + +// marshalNamedObject provides a helper that creates json objects in a form +// often required by the elasticsearch query DSL. The objects created +// take the following form: +// { +// name: obj +// } +func marshalNamedObject(name string, obj interface{}) ([]byte, error) { + return json.Marshal(map[string]interface{}{name: obj}) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/query_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/query_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/elasticsearch/query_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,149 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package elasticsearch_test // import "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + . "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" +) + +type QuerySuite struct{} + +var _ = gc.Suite(&QuerySuite{}) + +func (s *QuerySuite) TestJSONEncodings(c *gc.C) { + var tests = []struct { + about string + query interface{} + json string + }{{ + about: "term query", + query: TermQuery{Field: "foo", Value: "bar"}, + json: `{"term": {"foo": "bar"}}`, + }, { + about: "match all query", + query: MatchAllQuery{}, + json: `{"match_all": {}}`, + }, { + about: "match query", + query: MatchQuery{Field: "foo", Query: "bar"}, + json: `{"match": {"foo": {"query": "bar"}}}`, + }, { + about: "match query with type", + query: MatchQuery{Field: "foo", Query: "bar", Type: "baz"}, + json: `{"match": {"foo": {"query": "bar", "type": "baz"}}}`, + }, { + about: "multi match query", + query: MultiMatchQuery{Query: "foo", Fields: []string{BoostField("bar", 2), "baz"}}, + json: `{"multi_match": {"query": "foo", "fields": ["bar^2.000000", "baz"]}}`, + }, { + about: "filtered query", + query: FilteredQuery{ + Query: TermQuery{Field: "foo", Value: "bar"}, + Filter: TermFilter{Field: "baz", Value: "quz"}}, + json: `{"filtered": {"query": {"term": {"foo": "bar"}}, "filter": {"term": {"baz": "quz"}}}}`, + }, { + about: "function score query", + query: FunctionScoreQuery{ + Query: TermQuery{Field: "foo", Value: "bar"}, + Functions: []Function{ + DecayFunction{ + Function: "baz", + Field: "foo", + Scale: "quz", + }, + }, + }, + json: `{"function_score": {"query": {"term": {"foo": "bar"}}, "functions": [{"baz": {"foo":{"scale": "quz"}}}]}}`, + }, { + about: "term filter", + query: TermFilter{Field: "foo", Value: "bar"}, + json: `{"term": {"foo": "bar"}}`, + }, { + about: "and filter", + query: AndFilter{ + TermFilter{Field: "foo", Value: "bar"}, + TermFilter{Field: "baz", Value: "quz"}, + }, + json: `{"and": {"filters": [{"term": {"foo": "bar"}}, {"term": {"baz": "quz"}}]}}`, + }, { + about: "or filter", + query: OrFilter{ + TermFilter{Field: "foo", Value: "bar"}, + TermFilter{Field: "baz", Value: "quz"}, + }, + json: `{"or": {"filters": [{"term": {"foo": "bar"}}, {"term": {"baz": "quz"}}]}}`, + }, { + about: "not filter", + query: NotFilter{TermFilter{Field: "foo", Value: "bar"}}, + json: `{"not": {"term": {"foo": "bar"}}}`, + }, { + about: "query filter", + query: QueryFilter{Query: TermQuery{Field: "foo", Value: "bar"}}, + json: `{"query": {"term": {"foo": "bar"}}}`, + }, { + about: "regexp filter", + query: RegexpFilter{Field: "foo", Regexp: ".*"}, + json: `{"regexp": {"foo": ".*"}}`, + }, { + about: "query dsl", + query: QueryDSL{ + Fields: []string{"foo", "bar"}, + Size: 10, + Query: TermQuery{Field: "baz", Value: "quz"}, + Sort: []Sort{{Field: "foo", Order: Order{"desc"}}}, + }, + json: `{"fields": ["foo", "bar"], "size": 10, "query": {"term": {"baz": "quz"}}, "sort": [{"foo": { "order": "desc"}}]}`, + }, { + about: "decay function", + query: DecayFunction{ + Function: "baz", + Field: "foo", + Scale: "quz", + }, + json: `{"baz": {"foo":{"scale": "quz"}}}`, + }, { + about: "boost_factor function", + query: BoostFactorFunction{ + BoostFactor: 1.5, + }, + json: `{"boost_factor": 1.5}`, + }, { + about: "boost_factor function with filter", + query: BoostFactorFunction{ + BoostFactor: 1.5, + Filter: TermFilter{ + Field: "foo", + Value: "bar", + }, + }, + json: `{"filter": {"term": {"foo": "bar"}}, "boost_factor": 1.5}`, + }, { + about: "paginated query", + query: QueryDSL{ + Fields: []string{"foo", "bar"}, + Size: 10, + Query: TermQuery{Field: "baz", Value: "quz"}, + Sort: []Sort{{Field: "foo", Order: Order{"desc"}}}, + From: 10, + }, + json: `{"fields": ["foo", "bar"], "size": 10, "query": {"term": {"baz": "quz"}}, "sort": [{"foo": { "order": "desc"}}], "from": 10}`, + }, { + about: "field value factor", + query: FieldValueFactorFunction{ + Field: "foo", + Factor: 1.2, + Modifier: "bar", + }, + json: `{"field_value_factor": {"field": "foo", "factor": 1.2, "modifier": "bar"}}`, + }} + for i, test := range tests { + c.Logf("%d: %s", i, test.about) + // Note JSONEquals is being used a bit backwards here, this is fine + // but any error results may be a little confusing. + c.Assert(test.json, jc.JSONEquals, test.query) + } +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal' === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/agent' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/agent.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/agent.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/agent.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,110 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agent // import "gopkg.in/juju/charmstore.v5-unstable/internal/agent" + +import ( + "bytes" + "encoding/json" + "net/http" + "net/url" + + "github.com/juju/loggo" + "gopkg.in/errgo.v1" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +var logger = loggo.GetLogger("charmstore.internal.agent") + +type loginMethods struct { + Agent string `json:"agent"` +} + +type agentLoginRequest struct { + Username string `json:"username"` + PublicKey *bakery.PublicKey `json:"public_key"` +} + +// TODO make VisitWebPage support using different usernames (and possibly +// keys) for different sites. + +// VisitWebPage returns a function that can be used with +// httpbakery.Client.VisitWebPage. The returned function will attept to +// perform an agent login with the server. +func VisitWebPage(c *httpbakery.Client, username string) func(u *url.URL) error { + return func(u *url.URL) error { + logger.Infof("Attempting agent login to %q", u) + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return errgo.Notef(err, "cannot create request") + } + // Set the Accept header to indicate that we're asking for a + // non-interactive login. + req.Header.Set("Accept", "application/json") + resp, err := c.Do(req) + if err != nil { + return errgo.Notef(err, "cannot get login methods") + } + defer resp.Body.Close() + var lm loginMethods + if err := router.UnmarshalJSONResponse(resp, &lm, getError); err != nil { + return errgo.Notef(err, "cannot get login methods") + } + if lm.Agent == "" { + return errgo.New("agent login not supported") + } + lr := &agentLoginRequest{ + Username: username, + } + if c.Key != nil { + lr.PublicKey = &c.Key.Public + } + body, err := json.Marshal(lr) + if err != nil { + return errgo.Notef(err, "cannot marshal login request") + } + req, err = http.NewRequest("POST", lm.Agent, nil) + if err != nil { + return errgo.Notef(err, "cannot create login request") + } + req.Header.Set("Content-Type", "application/json") + resp, err = c.DoWithBody(req, bytes.NewReader(body)) + if err != nil { + return errgo.Notef(err, "cannot post login request") + } + defer resp.Body.Close() + if resp.StatusCode >= http.StatusBadRequest { + return errgo.Notef(getError(resp), "cannot log in") + } + return nil + } +} + +// NewClient creates an httpbakery.Client that is configured to use agent +// login. The agent login attempts will be made using the provided +// username and key. +func NewClient(username string, key *bakery.KeyPair) *httpbakery.Client { + c := httpbakery.NewClient() + c.Key = key + c.VisitWebPage = VisitWebPage(c, username) + return c +} + +func getError(resp *http.Response) error { + var aerr agentError + if err := router.UnmarshalJSONResponse(resp, &aerr, nil); err != nil { + return err + } + return aerr +} + +type agentError struct { + Message string +} + +func (e agentError) Error() string { + return e.Message +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/agent_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/agent_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/agent_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,82 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agent_test + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/macaroon-bakery.v1/bakery" + + "gopkg.in/juju/charmstore.v5-unstable/internal/agent" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +type agentSuite struct { + idM *idM +} + +var _ = gc.Suite(&agentSuite{}) + +func (s *agentSuite) SetUpSuite(c *gc.C) { + s.idM = newIdM(c) +} + +func (s *agentSuite) TearDownSuite(c *gc.C) { + s.idM.Close() +} + +var agentLoginTests = []struct { + about string + condition string + expectBody interface{} + expectError string +}{{ + about: "no login required", + condition: "allow", + expectBody: map[string]string{}, +}, { + about: "successful agent login", + condition: "agent", + expectBody: map[string]string{}, +}, { + about: "interactive", + condition: "interactive", + expectError: `cannot get discharge from ".*": cannot start interactive session: cannot get login methods: unexpected content type "text/plain"`, +}, { + about: "agent not supported", + condition: "no-agent", + expectError: `cannot get discharge from "http://.*": cannot start interactive session: agent login not supported`, +}, { + about: "agent fail", + condition: "agent-fail", + expectError: `cannot get discharge from "http://.*": cannot start interactive session: cannot log in: forced failure`, +}} + +func (s *agentSuite) TestAgentLogin(c *gc.C) { + key, err := bakery.GenerateKey() + c.Assert(err, gc.IsNil) + for i, test := range agentLoginTests { + c.Logf("%d. %s", i, test.about) + client := agent.NewClient("testuser", key) + u := fmt.Sprintf("%s/protected?test=%d&c=%s", s.idM.URL, i, url.QueryEscape(test.condition)) + req, err := http.NewRequest("GET", u, nil) + c.Assert(err, gc.IsNil) + resp, err := client.Do(req) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + continue + } + c.Assert(err, gc.IsNil) + defer resp.Body.Close() + var v json.RawMessage + err = router.UnmarshalJSONResponse(resp, &v, nil) + c.Assert(err, gc.IsNil) + c.Assert(string(v), jc.JSONEquals, test.expectBody) + } +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/export_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/export_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,10 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agent // import "gopkg.in/juju/charmstore.v5-unstable/internal/agent" + +type ( + AgentLoginRequest agentLoginRequest + LoginMethods loginMethods + Error agentError +) === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/idm_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/idm_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/idm_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,314 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agent_test + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "mime" + "net/http" + "net/http/httptest" + + gc "gopkg.in/check.v1" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + "gopkg.in/juju/charmstore.v5-unstable/internal/agent" +) + +type discharge struct { + id string + c chan error +} + +// idM provides a mock identity server that can be used to test agent login. +// the following endpoints are provided: +// /public-key +// /discharge +// /protected +// /login +// /agent +// /wait +// Most tests will intiate with a call to /protected. +type idM struct { + *httptest.Server + *http.ServeMux + svc *bakery.Service + discharges map[string]discharge + key *bakery.KeyPair +} + +func newIdM(c *gc.C) *idM { + i := &idM{ + ServeMux: http.NewServeMux(), + discharges: make(map[string]discharge), + } + i.Server = httptest.NewServer(i) + var err error + i.key, err = bakery.GenerateKey() + c.Assert(err, gc.IsNil) + i.svc, err = bakery.NewService(bakery.NewServiceParams{ + Key: i.key, + Locator: bakery.PublicKeyLocatorMap{ + i.URL: &i.key.Public, + }, + }) + c.Assert(err, gc.IsNil) + httpbakery.AddDischargeHandler(i.ServeMux, "/", i.svc, i.checker) + i.Handle("/", http.HandlerFunc(i.notFound)) + i.Handle("/protected", http.HandlerFunc(i.serveProtected)) + i.Handle("/login", http.HandlerFunc(i.serveLogin)) + i.Handle("/wait", http.HandlerFunc(i.serveWait)) + i.Handle("/agent", http.HandlerFunc(i.serveAgent)) + return i +} + +func (i *idM) notFound(w http.ResponseWriter, req *http.Request) { + i.error(w, http.StatusNotFound, "not found", "%s not found", req.URL.Path) +} + +func (i *idM) write(w http.ResponseWriter, v interface{}) { + body, err := json.Marshal(v) + if err != nil { + i.error(w, http.StatusInternalServerError, "cannot marshal response: %s", err) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(body) +} + +func (i *idM) error(w http.ResponseWriter, status int, format string, a ...interface{}) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + body, err := json.Marshal(&agent.Error{ + Message: fmt.Sprintf(format, a...), + }) + if err != nil { + panic(err) + } + w.Write(body) +} + +// serveProtected provides the /protected endpoint. When /protected is +// called two parameters should be provided: +// test = test id this id uniquely identifies the test +// cav = the caveat to put in the third party caveat. +// +// The cav parameter determines what will happen in the test and can be one of +// allow = the macaroon is discharged straight away +// agent = successful agent authentication +// agent-fail = unsuccessful agent authentication +// interactive = login does not return a JSON object +// no-agent = login does return a JSON object, but agent authentication is not specified. +func (i *idM) serveProtected(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + if r.Form.Get("test") == "" { + i.error(w, http.StatusBadRequest, "test id not specified") + return + } + attrs, err := httpbakery.CheckRequest(i.svc, r, nil, checkers.OperationChecker(r.Form.Get("test"))) + if err == nil { + i.write(w, attrs) + return + } + verr, ok := err.(*bakery.VerificationError) + if !ok { + i.error(w, http.StatusInternalServerError, "error checking macaroon: %s", err) + return + } + m, err := i.svc.NewMacaroon("", nil, []checkers.Caveat{ + { + Location: i.URL, + Condition: r.Form.Get("c") + " " + r.Form.Get("test"), + }, + checkers.AllowCaveat(r.Form.Get("test")), + }) + if err != nil { + i.error(w, http.StatusInternalServerError, "cannot create macaroon: %s", err) + return + } + httpbakery.WriteDischargeRequiredErrorForRequest(w, m, "/", verr, r) +} + +// serveLogin provides the /login endpoint. When /login is called it should +// be provided with a test id. /login also supports some additional parameters: +// a = if set to "true" an agent URL will be added to the json response. +// i = if set to "true" a plaintext response will be sent to simulate interaction. +func (i *idM) serveLogin(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + if r.Form.Get("i") == "true" || r.Header.Get("Accept") != "application/json" { + w.Write([]byte("Let's interact!")) + return + } + var lm agent.LoginMethods + if r.Form.Get("a") == "true" { + lm.Agent = i.URL + "/agent?test=" + r.Form.Get("test") + "&f=" + r.Form.Get("f") + } + i.write(w, lm) +} + +// serveWait provides the /wait endpoint. When /wait is called it should +// be provided with a test id. This then matches the wait to the login +// being tested. +func (i *idM) serveWait(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + if r.Form.Get("test") == "" { + i.error(w, http.StatusBadRequest, "test id not specified") + return + } + d := i.discharges[r.Form.Get("test")] + derr := <-d.c + if derr != nil { + // do something with the error + return + } + m, err := i.svc.Discharge( + bakery.ThirdPartyCheckerFunc( + func(cavId, cav string) ([]checkers.Caveat, error) { + return nil, nil + }, + ), + d.id, + ) + if err != nil { + i.error(w, http.StatusInternalServerError, "cannot discharge caveat: %s", err) + return + } + i.write(w, httpbakery.WaitResponse{ + Macaroon: m, + }) +} + +// serveAgent provides the /agent endpoint. When /agent is called it +// should be provided with a test id. This then matches the current login +// to the correct wait. If the optional f query variable is set to "true" +// then a failure will be simulated. +func (i *idM) serveAgent(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + if r.Form.Get("f") == "true" { + i.error(w, http.StatusTeapot, "forced failure") + return + } + test := r.Form.Get("test") + op := "agent-login-" + test + _, err := httpbakery.CheckRequest(i.svc, r, nil, checkers.OperationChecker(op)) + if err == nil { + d := i.discharges[test] + d.c <- nil + return + } + verr, ok := err.(*bakery.VerificationError) + if !ok { + d := i.discharges[test] + d.c <- err + i.error(w, http.StatusInternalServerError, "cannot check request: %s", err) + return + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + d := i.discharges[test] + d.c <- err + i.error(w, http.StatusInternalServerError, "cannot read agent login request: %s", err) + return + } + ct, _, err := mime.ParseMediaType(r.Header.Get("Content-Type")) + if err != nil { + d := i.discharges[test] + d.c <- err + i.error(w, http.StatusBadRequest, "cannot parse mediatype: %s", err) + return + } + if ct != "application/json" { + d := i.discharges[test] + d.c <- err + i.error(w, http.StatusBadRequest, "unexpected Content-Type: %s", ct) + return + } + var login agent.AgentLoginRequest + err = json.Unmarshal(body, &login) + if err != nil { + d := i.discharges[test] + d.c <- err + i.error(w, http.StatusBadRequest, "cannot unmarshal login request: %s", err) + return + } + m, err := i.svc.NewMacaroon("", nil, []checkers.Caveat{ + bakery.LocalThirdPartyCaveat(login.PublicKey), + checkers.AllowCaveat(op), + }) + if err != nil { + d := i.discharges[test] + d.c <- err + i.error(w, http.StatusInternalServerError, "cannot create macaroon: %s", err) + return + } + httpbakery.WriteDischargeRequiredErrorForRequest(w, m, "/", verr, r) +} + +func (i *idM) checker(r *http.Request, cavId, cav string) ([]checkers.Caveat, error) { + cond, arg, err := checkers.ParseCaveat(cav) + if err != nil { + return nil, err + } + switch cond { + case "allow": + return nil, nil + case "agent": + i.discharges[arg] = discharge{ + id: cavId, + c: make(chan error, 1), + } + return nil, &httpbakery.Error{ + Message: "need login", + Code: httpbakery.ErrInteractionRequired, + Info: &httpbakery.ErrorInfo{ + VisitURL: i.URL + "/login?a=true&test=" + arg, + WaitURL: i.URL + "/wait?test=" + arg, + }, + } + case "interactive": + i.discharges[arg] = discharge{ + id: cavId, + c: make(chan error, 1), + } + return nil, &httpbakery.Error{ + Message: "need login", + Code: httpbakery.ErrInteractionRequired, + Info: &httpbakery.ErrorInfo{ + VisitURL: i.URL + "/login?i=true&test=" + arg, + WaitURL: i.URL + "/wait?test=" + arg, + }, + } + case "no-agent": + i.discharges[arg] = discharge{ + id: cavId, + c: make(chan error, 1), + } + return nil, &httpbakery.Error{ + Message: "need login", + Code: httpbakery.ErrInteractionRequired, + Info: &httpbakery.ErrorInfo{ + VisitURL: i.URL + "/login?test=" + arg, + WaitURL: i.URL + "/wait?test=" + arg, + }, + } + case "agent-fail": + i.discharges[arg] = discharge{ + id: cavId, + c: make(chan error, 1), + } + return nil, &httpbakery.Error{ + Message: "need login", + Code: httpbakery.ErrInteractionRequired, + Info: &httpbakery.ErrorInfo{ + VisitURL: i.URL + "/login?a=true&f=true&test=" + arg, + WaitURL: i.URL + "/wait?test=" + arg, + }, + } + default: + return nil, checkers.ErrCaveatNotRecognized + } +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/package_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/agent/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package agent_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/blobstore' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/blobstore/blobstore.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/blobstore/blobstore.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/blobstore/blobstore.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,159 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package blobstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" + +import ( + "crypto/sha512" + "fmt" + "hash" + "io" + "strconv" + + "github.com/juju/blobstore" + "github.com/juju/errors" + "gopkg.in/errgo.v1" + "gopkg.in/mgo.v2" +) + +type ReadSeekCloser interface { + io.Reader + io.Seeker + io.Closer +} + +// ContentChallengeError holds a proof-of-content +// challenge produced by a blobstore. +type ContentChallengeError struct { + Req ContentChallenge +} + +func (e *ContentChallengeError) Error() string { + return "cannot upload because proof of content ownership is required" +} + +// ContentChallenge holds a proof-of-content challenge +// produced by a blobstore. A client can satisfy the request +// by producing a ContentChallengeResponse containing +// the same request id and a hash of RangeLength bytes +// of the content starting at RangeStart. +type ContentChallenge struct { + RequestId string + RangeStart int64 + RangeLength int64 +} + +// ContentChallengeResponse holds a response to a ContentChallenge. +type ContentChallengeResponse struct { + RequestId string + Hash string +} + +// NewHash is used to calculate checksums for the blob store. +func NewHash() hash.Hash { + return sha512.New384() +} + +// NewContentChallengeResponse can be used by a client to respond to a content +// challenge. The returned value should be passed to BlobStorage.Put +// when the client retries the request. +func NewContentChallengeResponse(chal *ContentChallenge, r io.ReadSeeker) (*ContentChallengeResponse, error) { + _, err := r.Seek(chal.RangeStart, 0) + if err != nil { + return nil, errgo.Mask(err) + } + hash := NewHash() + nw, err := io.CopyN(hash, r, chal.RangeLength) + if err != nil { + return nil, errgo.Mask(err) + } + if nw != chal.RangeLength { + return nil, errgo.Newf("content is not long enough") + } + return &ContentChallengeResponse{ + RequestId: chal.RequestId, + Hash: fmt.Sprintf("%x", hash.Sum(nil)), + }, nil +} + +// Store stores data blobs in mongodb, de-duplicating by +// blob hash. +type Store struct { + mstore blobstore.ManagedStorage +} + +// New returns a new blob store that writes to the given database, +// prefixing its collections with the given prefix. +func New(db *mgo.Database, prefix string) *Store { + rs := blobstore.NewGridFS(db.Name, prefix, db.Session) + return &Store{ + mstore: blobstore.NewManagedStorage(db, rs), + } +} + +func (s *Store) challengeResponse(resp *ContentChallengeResponse) error { + id, err := strconv.ParseInt(resp.RequestId, 10, 64) + if err != nil { + return errgo.Newf("invalid request id %q", id) + } + return s.mstore.ProofOfAccessResponse(blobstore.NewPutResponse(id, resp.Hash)) +} + +// Put tries to stream the content from the given reader into blob +// storage, with the provided name. The content should have the given +// size and hash. If the content is already in the store, a +// ContentChallengeError is returned containing a challenge that must be +// satisfied by a client to prove that they have access to the content. +// If the proof has already been acquired, it should be passed in as the +// proof argument. +func (s *Store) Put(r io.Reader, name string, size int64, hash string, proof *ContentChallengeResponse) (*ContentChallenge, error) { + if proof != nil { + err := s.challengeResponse(proof) + if err == nil { + return nil, nil + } + if err != blobstore.ErrResourceDeleted { + return nil, errgo.Mask(err) + } + // The blob has been deleted since the challenge + // was created, so continue on with uploading + // the content as if there was no previous challenge. + } + resp, err := s.mstore.PutForEnvironmentRequest("", name, hash) + if err != nil { + if errors.IsNotFound(err) { + if err := s.mstore.PutForEnvironmentAndCheckHash("", name, r, size, hash); err != nil { + return nil, errgo.Mask(err) + } + return nil, nil + } + return nil, err + } + return &ContentChallenge{ + RequestId: fmt.Sprint(resp.RequestId), + RangeStart: resp.RangeStart, + RangeLength: resp.RangeLength, + }, nil +} + +// PutUnchallenged stream the content from the given reader into blob +// storage, with the provided name. The content should have the given +// size and hash. In this case a challenge is never returned and a proof +// is not required. +func (s *Store) PutUnchallenged(r io.Reader, name string, size int64, hash string) error { + return s.mstore.PutForEnvironmentAndCheckHash("", name, r, size, hash) +} + +// Open opens the entry with the given name. +func (s *Store) Open(name string) (ReadSeekCloser, int64, error) { + r, length, err := s.mstore.GetForEnvironment("", name) + if err != nil { + return nil, 0, errgo.Mask(err) + } + return r.(ReadSeekCloser), length, nil +} + +// Remove the given name from the Store. +func (s *Store) Remove(name string) error { + return s.mstore.RemoveForEnvironment("", name) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/blobstore/blobstore_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/blobstore/blobstore_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/blobstore/blobstore_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,191 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package blobstore_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" + +import ( + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" + "testing" + + jujutesting "github.com/juju/testing" + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" +) + +func TestPackage(t *testing.T) { + jujutesting.MgoTestPackage(t, nil) +} + +type BlobStoreSuite struct { + jujutesting.IsolatedMgoSuite +} + +var _ = gc.Suite(&BlobStoreSuite{}) + +func (s *BlobStoreSuite) TestPutOpen(c *gc.C) { + store := blobstore.New(s.Session.DB("db"), "blobstore") + content := "some data" + chal, err := store.Put(strings.NewReader(content), "x", int64(len(content)), hashOf(content), nil) + c.Assert(err, gc.IsNil) + c.Assert(chal, gc.IsNil) + + rc, length, err := store.Open("x") + c.Assert(err, gc.IsNil) + defer rc.Close() + c.Assert(length, gc.Equals, int64(len(content))) + + data, err := ioutil.ReadAll(rc) + c.Assert(err, gc.IsNil) + c.Assert(string(data), gc.Equals, content) + + // Putting the resource again should generate a challenge. + chal, err = store.Put(strings.NewReader(content), "y", int64(len(content)), hashOf(content), nil) + c.Assert(err, gc.IsNil) + c.Assert(chal, gc.NotNil) + + resp, err := blobstore.NewContentChallengeResponse(chal, strings.NewReader(content)) + c.Assert(err, gc.IsNil) + + chal, err = store.Put(strings.NewReader(content), "y", int64(len(content)), hashOf(content), resp) + c.Assert(err, gc.IsNil) + c.Assert(chal, gc.IsNil) +} + +func (s *BlobStoreSuite) TestPutInvalidHash(c *gc.C) { + store := blobstore.New(s.Session.DB("db"), "blobstore") + content := "some data" + chal, err := store.Put(strings.NewReader(content), "x", int64(len(content)), hashOf("wrong"), nil) + c.Assert(err, gc.ErrorMatches, "hash mismatch") + c.Assert(chal, gc.IsNil) + + rc, length, err := store.Open("x") + c.Assert(err, gc.ErrorMatches, "resource.*not found") + c.Assert(rc, gc.Equals, nil) + c.Assert(length, gc.Equals, int64(0)) +} + +func (s *BlobStoreSuite) TestPutUnchallenged(c *gc.C) { + store := blobstore.New(s.Session.DB("db"), "blobstore") + + content := "some data" + err := store.PutUnchallenged(strings.NewReader(content), "x", int64(len(content)), hashOf(content)) + c.Assert(err, gc.IsNil) + + rc, length, err := store.Open("x") + c.Assert(err, gc.IsNil) + defer rc.Close() + c.Assert(length, gc.Equals, int64(len(content))) + + data, err := ioutil.ReadAll(rc) + c.Assert(err, gc.IsNil) + c.Assert(string(data), gc.Equals, content) + + err = store.PutUnchallenged(strings.NewReader(content), "x", int64(len(content)), hashOf(content)) + c.Assert(err, gc.IsNil) +} + +func (s *BlobStoreSuite) TestPutUnchallengedInvalidHash(c *gc.C) { + store := blobstore.New(s.Session.DB("db"), "blobstore") + content := "some data" + err := store.PutUnchallenged(strings.NewReader(content), "x", int64(len(content)), hashOf("wrong")) + c.Assert(err, gc.ErrorMatches, "hash mismatch") +} + +func (s *BlobStoreSuite) TestRemove(c *gc.C) { + store := blobstore.New(s.Session.DB("db"), "blobstore") + content := "some data" + err := store.PutUnchallenged(strings.NewReader(content), "x", int64(len(content)), hashOf(content)) + c.Assert(err, gc.IsNil) + + rc, length, err := store.Open("x") + c.Assert(err, gc.IsNil) + defer rc.Close() + c.Assert(length, gc.Equals, int64(len(content))) + data, err := ioutil.ReadAll(rc) + c.Assert(err, gc.IsNil) + c.Assert(string(data), gc.Equals, content) + + err = store.Remove("x") + c.Assert(err, gc.IsNil) + + rc, length, err = store.Open("x") + c.Assert(err, gc.ErrorMatches, `resource at path "[^"]+" not found`) +} + +func (s *BlobStoreSuite) TestLarge(c *gc.C) { + store := blobstore.New(s.Session.DB("db"), "blobstore") + size := int64(20 * 1024 * 1024) + newContent := func() io.Reader { + return newDataSource(123, size) + } + hash := hashOfReader(c, newContent()) + + chal, err := store.Put(newContent(), "x", size, hash, nil) + c.Assert(err, gc.IsNil) + c.Assert(chal, gc.IsNil) + + rc, length, err := store.Open("x") + c.Assert(err, gc.IsNil) + defer rc.Close() + c.Assert(length, gc.Equals, size) + + c.Assert(hashOfReader(c, rc), gc.Equals, hash) +} + +func hashOfReader(c *gc.C, r io.Reader) string { + h := blobstore.NewHash() + _, err := io.Copy(h, r) + c.Assert(err, gc.IsNil) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func hashOf(s string) string { + h := blobstore.NewHash() + h.Write([]byte(s)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +type dataSource struct { + buf []byte + bufIndex int + remain int64 +} + +// newDataSource returns a stream of size bytes holding +// a repeated number. +func newDataSource(fillWith int64, size int64) io.Reader { + src := &dataSource{ + remain: size, + } + for len(src.buf) < 8*1024 { + src.buf = strconv.AppendInt(src.buf, fillWith, 10) + src.buf = append(src.buf, ' ') + } + return src +} + +func (s *dataSource) Read(buf []byte) (int, error) { + if int64(len(buf)) > s.remain { + buf = buf[:int(s.remain)] + } + total := len(buf) + if total == 0 { + return 0, io.EOF + } + + for len(buf) > 0 { + if s.bufIndex == len(s.buf) { + s.bufIndex = 0 + } + nb := copy(buf, s.buf[s.bufIndex:]) + s.bufIndex += nb + buf = buf[nb:] + s.remain -= int64(nb) + } + return total, nil +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/cache' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/cache.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/cache.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/cache.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,154 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cache // import "gopkg.in/juju/charmstore.v5-unstable/internal/cache" + +import ( + "math/rand" + "sync" + "time" + + "gopkg.in/errgo.v1" +) + +type entry struct { + value interface{} + expire time.Time +} + +// Cache holds a time-limited cache of values for string keys. +type Cache struct { + maxAge time.Duration + + // mu guards the fields below it. + mu sync.Mutex + + // expire holds when the cache is due to expire. + expire time.Time + // We hold two maps so that can avoid scanning through all the + // items in the cache when the cache needs to be refreshed. + // Instead, we move items from old to new when they're accessed + // and throw away the old map at refresh time. + old, new map[string]entry +} + +// New returns a new Cache that will cache items for +// at most maxAge. +func New(maxAge time.Duration) *Cache { + // A maxAge is < 2ns then the expiry code will panic because the + // actual expiry time will be maxAge - a random value in the + // interval [0. maxAge/2). If maxAge is < 2ns then this requires + // a random interval in [0, 0) which causes a panic. + if maxAge < 2*time.Nanosecond { + maxAge = 2 * time.Nanosecond + } + // The returned cache will have a zero-valued expire + // time, so will expire immediately, causing the new + // map to be created. + return &Cache{ + maxAge: maxAge, + } +} + +// Len returns the total number of cached entries. +func (c *Cache) Len() int { + c.mu.Lock() + defer c.mu.Unlock() + return len(c.old) + len(c.new) +} + +// Evict removes the entry with the given key from the cache if present. +func (c *Cache) Evict(key string) { + c.mu.Lock() + defer c.mu.Unlock() + delete(c.new, key) +} + +// EvictAll removes all entries from the cache. +func (c *Cache) EvictAll() { + c.mu.Lock() + defer c.mu.Unlock() + c.new = make(map[string]entry) + c.old = nil +} + +// Get returns the value for the given key, using fetch to fetch +// the value if it is not found in the cache. +// If fetch returns an error, the returned error from Get will have +// the same cause. +func (c *Cache) Get(key string, fetch func() (interface{}, error)) (interface{}, error) { + return c.getAtTime(key, fetch, time.Now()) +} + +// getAtTime is the internal version of Get, useful for testing; now represents the current +// time. +func (c *Cache) getAtTime(key string, fetch func() (interface{}, error), now time.Time) (interface{}, error) { + if val, ok := c.cachedValue(key, now); ok { + return val, nil + } + // Fetch the data without the mutex held + // so that one slow fetch doesn't hold up + // all the other cache accesses. + val, err := fetch() + if err != nil { + // TODO consider caching cache misses. + return nil, errgo.Mask(err, errgo.Any) + } + c.mu.Lock() + defer c.mu.Unlock() + // Add the new cache entry. Because it's quite likely that a + // large number of cache entries will be initially fetched at + // the same time, we want to avoid a thundering herd of fetches + // when they all expire at the same time, so we set the expiry + // time to a random interval between [now + t.maxAge/2, now + + // t.maxAge] and so they'll be spread over time without + // compromising the maxAge value. + c.new[key] = entry{ + value: val, + expire: now.Add(c.maxAge - time.Duration(rand.Int63n(int64(c.maxAge/2)))), + } + return val, nil +} + +// cachedValue returns any cached value for the given key +// and whether it was found. +func (c *Cache) cachedValue(key string, now time.Time) (interface{}, bool) { + c.mu.Lock() + defer c.mu.Unlock() + if now.After(c.expire) { + c.old = c.new + c.new = make(map[string]entry) + c.expire = now.Add(c.maxAge) + } + if e, ok := c.entry(c.new, key, now); ok { + return e.value, true + } + if e, ok := c.entry(c.old, key, now); ok { + // An old entry has been accessed; move it to the new + // map so that we only use a single map access for + // subsequent lookups. Note that because we use the same + // duration for cache refresh (c.expire) as for max + // entry age, this is strictly speaking unnecessary + // because any entries in old will have expired by the + // time it is dropped. + c.new[key] = e + delete(c.old, key) + return e.value, true + } + return nil, false +} + +// entry returns an entry from the map and whether it +// was found. If the entry has expired, it is deleted from the map. +func (c *Cache) entry(m map[string]entry, key string, now time.Time) (entry, bool) { + e, ok := m[key] + if !ok { + return entry{}, false + } + if now.After(e.expire) { + // Delete expired entries. + delete(m, key) + return entry{}, false + } + return e, true +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/cache_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/cache_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/cache_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,223 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cache_test + +import ( + "fmt" + "sync" + "time" + + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + + "gopkg.in/juju/charmstore.v5-unstable/internal/cache" +) + +type suite struct{} + +var _ = gc.Suite(&suite{}) + +func (*suite) TestSimpleGet(c *gc.C) { + p := cache.New(time.Hour) + v, err := p.Get("a", fetchValue(2)) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, 2) +} + +func (*suite) TestSimpleRefresh(c *gc.C) { + p := cache.New(time.Hour) + v, err := p.Get("a", fetchValue(2)) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, 2) + + v, err = p.Get("a", fetchValue(4)) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, 2) + + p.Evict("a") + v, err = p.Get("a", fetchValue(3)) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, 3) + + v, err = p.Get("a", fetchValue(4)) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, 3) +} + +func (*suite) TestFetchError(c *gc.C) { + p := cache.New(time.Hour) + expectErr := errgo.New("hello") + v, err := p.Get("a", fetchError(expectErr)) + c.Assert(err, gc.ErrorMatches, "hello") + c.Assert(errgo.Cause(err), gc.Equals, expectErr) + c.Assert(v, gc.Equals, nil) +} + +func (*suite) TestFetchOnlyOnce(c *gc.C) { + p := cache.New(time.Hour) + v, err := p.Get("a", fetchValue(2)) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, 2) + + v, err = p.Get("a", fetchError(errUnexpectedFetch)) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, 2) +} + +func (*suite) TestEntryExpiresAfterMaxEntryAge(c *gc.C) { + now := time.Now() + p := cache.New(time.Minute) + v, err := cache.GetAtTime(p, "a", fetchValue(2), now) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, 2) + + // Entry is definitely not expired before half the entry expiry time. + v, err = cache.GetAtTime(p, "a", fetchError(errUnexpectedFetch), now.Add(time.Minute/2-1)) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, 2) + + // Entry is definitely expired after the entry expiry time + v, err = cache.GetAtTime(p, "a", fetchValue(3), now.Add(time.Minute+1)) + c.Assert(v, gc.Equals, 3) +} + +func (*suite) TestEntriesRemovedWhenNotRetrieved(c *gc.C) { + now := time.Now() + p := cache.New(time.Minute) + + // Populate the cache with an initial entry. + v, err := cache.GetAtTime(p, "a", fetchValue("a"), now) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, "a") + c.Assert(p.Len(), gc.Equals, 1) + + // Fetch another item after the expiry time, + // causing current entries to be moved to old. + v, err = cache.GetAtTime(p, "b", fetchValue("b"), now.Add(time.Minute+1)) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, "b") + c.Assert(p.Len(), gc.Equals, 2) + + // Fetch the other item after another expiry time + // causing the old entries to be discarded because + // nothing has fetched them. + v, err = cache.GetAtTime(p, "b", fetchValue("b"), now.Add(time.Minute*2+2)) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, "b") + c.Assert(p.Len(), gc.Equals, 1) +} + +// TestRefreshedEntry tests the code path where a value is moved +// from the old map to new. +func (*suite) TestRefreshedEntry(c *gc.C) { + now := time.Now() + p := cache.New(time.Minute) + + // Populate the cache with an initial entry. + v, err := cache.GetAtTime(p, "a", fetchValue("a"), now) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, "a") + c.Assert(p.Len(), gc.Equals, 1) + + // Fetch another item very close to the expiry time. + v, err = cache.GetAtTime(p, "b", fetchValue("b"), now.Add(time.Minute-1)) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, "b") + c.Assert(p.Len(), gc.Equals, 2) + + // Fetch it again just after the expiry time, + // which should move it into the new map. + v, err = cache.GetAtTime(p, "b", fetchError(errUnexpectedFetch), now.Add(time.Minute+1)) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, "b") + c.Assert(p.Len(), gc.Equals, 2) + + // Fetch another item, causing "a" to be removed from the cache + // and keeping "b" in there. + v, err = cache.GetAtTime(p, "c", fetchValue("c"), now.Add(time.Minute*2+2)) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, "c") + c.Assert(p.Len(), gc.Equals, 2) +} + +// TestConcurrentFetch checks that the cache is safe +// to use concurrently. It is designed to fail when +// tested with the race detector enabled. +func (*suite) TestConcurrentFetch(c *gc.C) { + p := cache.New(time.Minute) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + v, err := p.Get("a", fetchValue("a")) + c.Check(err, gc.IsNil) + c.Check(v, gc.Equals, "a") + }() + wg.Add(1) + go func() { + defer wg.Done() + v, err := p.Get("b", fetchValue("b")) + c.Check(err, gc.IsNil) + c.Check(v, gc.Equals, "b") + }() + wg.Wait() +} + +func (*suite) TestRefreshSpread(c *gc.C) { + now := time.Now() + p := cache.New(time.Minute) + // Get all values to start with. + const N = 100 + for i := 0; i < N; i++ { + v, err := cache.GetAtTime(p, fmt.Sprint(i), fetchValue(i), now) + c.Assert(err, gc.IsNil) + c.Assert(v, gc.Equals, i) + } + counts := make([]int, time.Minute/time.Millisecond/10+1) + + // Continually get values over the course of the + // expiry time; the fetches should be spread out. + slot := 0 + for t := now.Add(0); t.Before(now.Add(time.Minute + 1)); t = t.Add(time.Millisecond * 10) { + for i := 0; i < N; i++ { + cache.GetAtTime(p, fmt.Sprint(i), func() (interface{}, error) { + counts[slot]++ + return i, nil + }, t) + } + slot++ + } + + // There should be no fetches in the first half of the cycle. + for i := 0; i < len(counts)/2; i++ { + c.Assert(counts[i], gc.Equals, 0, gc.Commentf("slot %d", i)) + } + + max := 0 + total := 0 + for _, count := range counts { + if count > max { + max = count + } + total += count + } + if max > 10 { + c.Errorf("requests grouped too closely (max %d)", max) + } + c.Assert(total, gc.Equals, N) +} + +var errUnexpectedFetch = errgo.New("fetch called unexpectedly") + +func fetchError(err error) func() (interface{}, error) { + return func() (interface{}, error) { + return nil, err + } +} + +func fetchValue(val interface{}) func() (interface{}, error) { + return func() (interface{}, error) { + return val, nil + } +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/export_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/export_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,6 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cache + +var GetAtTime = (*Cache).getAtTime === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/package_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/cache/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package cache_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/archive.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/archive.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/archive.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,59 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "bytes" + "io" + "os" + + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + + "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" +) + +type archiverTo interface { + ArchiveTo(io.Writer) error +} + +// getArchive is used to turn the current charm and bundle implementations +// into ReadSeekClosers for their corresponding archive. +func getArchive(c interface{}) (blobstore.ReadSeekCloser, error) { + var path string + switch c := c.(type) { + case archiverTo: + // For example: charm.CharmDir or charm.BundleDir. + var buffer bytes.Buffer + if err := c.ArchiveTo(&buffer); err != nil { + return nil, errgo.Mask(err) + } + return nopCloser(bytes.NewReader(buffer.Bytes())), nil + case *charm.BundleArchive: + path = c.Path + case *charm.CharmArchive: + path = c.Path + default: + return nil, errgo.Newf("cannot get the archive for charm type %T", c) + } + file, err := os.Open(path) + if err != nil { + return nil, errgo.Mask(err) + } + return file, nil +} + +type nopCloserReadSeeker struct { + io.ReadSeeker +} + +func (nopCloserReadSeeker) Close() error { + return nil +} + +// nopCloser returns a blobstore.ReadSeekCloser with a no-op Close method +// wrapping the provided ReadSeeker r. +func nopCloser(r io.ReadSeeker) blobstore.ReadSeekCloser { + return nopCloserReadSeeker{r} +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/debug.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/debug.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/debug.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,267 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "bytes" + "encoding/json" + "fmt" + "math/rand" + "net/http" + "net/http/httptest" + "sort" + "strings" + "time" + + "github.com/juju/utils" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/mgo.v2" + + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + appver "gopkg.in/juju/charmstore.v5-unstable/version" +) + +// GET /debug/info . +func serveDebugInfo(http.Header, *http.Request) (interface{}, error) { + return appver.VersionInfo, nil +} + +// GET /debug/check. +func debugCheck(checks map[string]func() error) http.Handler { + return router.HandleJSON(func(http.Header, *http.Request) (interface{}, error) { + n := len(checks) + type result struct { + name string + err error + } + c := make(chan result) + for name, check := range checks { + name, check := name, check + go func() { + c <- result{name: name, err: check()} + }() + } + results := make(map[string]string, n) + var failed bool + for ; n > 0; n-- { + res := <-c + if res.err == nil { + results[res.name] = "OK" + } else { + failed = true + results[res.name] = res.err.Error() + } + } + if failed { + keys := make([]string, 0, len(results)) + for k := range results { + keys = append(keys, k) + } + sort.Strings(keys) + msgs := make([]string, len(results)) + for i, k := range keys { + msgs[i] = fmt.Sprintf("[%s: %s]", k, results[k]) + } + return nil, errgo.Newf("check failure: %s", strings.Join(msgs, " ")) + } + return results, nil + }) +} + +func checkDB(db *mgo.Database) func() error { + return func() error { + s := db.Session.Copy() + s.SetSyncTimeout(500 * time.Millisecond) + defer s.Close() + return s.Ping() + } +} + +func checkES(si *SearchIndex) func() error { + if si == nil || si.Database == nil { + return func() error { + return nil + } + } + return func() error { + _, err := si.Health() + return err + } +} + +// GET /debug/fullcheck +func debugFullCheck(hnd http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + code := http.StatusInternalServerError + resp := new(bytes.Buffer) + defer func() { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.WriteHeader(code) + resp.WriteTo(w) + }() + + fmt.Fprintln(resp, "Testing v4...") + + // test search + fmt.Fprintln(resp, "performing search...") + var sr params.SearchResponse + if err := get(hnd, "/v4/search?limit=2000", &sr); err != nil { + fmt.Fprintf(resp, "ERROR: search failed %s.\n", err) + return + } + if len(sr.Results) < 1 { + fmt.Fprintln(resp, "ERROR: no search results found.") + return + } + fmt.Fprintf(resp, "%d results found.\n", len(sr.Results)) + + // pick random charm + id := sr.Results[rand.Intn(len(sr.Results))].Id + fmt.Fprintf(resp, "using %s.\n", id) + + // test content + fmt.Fprintln(resp, "reading manifest...") + url := "/v4/" + id.Path() + "/meta/manifest" + fmt.Fprintln(resp, url) + var files []params.ManifestFile + if err := get(hnd, url, &files); err != nil { + fmt.Fprintf(resp, "ERROR: cannot retrieve manifest: %s.\n", err) + return + } + if len(files) == 0 { + fmt.Fprintln(resp, "ERROR: manifest empty.") + return + } + fmt.Fprintf(resp, "%d files found.\n", len(files)) + + // Choose a file to access + expectFile := "metadata.yaml" + if id.Series == "bundle" { + expectFile = "bundle.yaml" + } + var file params.ManifestFile + // default to metadata.yaml + for _, f := range files { + if f.Name == expectFile { + file = f + break + } + } + // find a random file + for i := 0; i < 5; i++ { + f := files[rand.Intn(len(files))] + if f.Size <= 16*1024 { + file = f + break + } + } + fmt.Fprintf(resp, "using %s.\n", file.Name) + + // read the file + fmt.Fprintln(resp, "reading file...") + url = "/v4/" + id.Path() + "/archive/" + file.Name + fmt.Fprintln(resp, url) + var buf []byte + if err := get(hnd, url, &buf); err != nil { + fmt.Fprintf(resp, "ERROR: cannot retrieve file: %s.\n", err) + return + } + if int64(len(buf)) != file.Size { + fmt.Fprintf(resp, "ERROR: incorrect file size, expected: %d, received %d.\n", file.Size, len(buf)) + return + } + fmt.Fprintf(resp, "%d bytes received.\n", len(buf)) + + // check if the charm is promulgated + fmt.Fprintln(resp, "checking promulgated...") + url = "/v4/" + id.Path() + "/meta/promulgated" + fmt.Fprintln(resp, url) + var promulgated params.PromulgatedResponse + if err := get(hnd, url, &promulgated); err != nil { + fmt.Fprintf(resp, "ERROR: cannot retrieve promulgated: %s.\n", err) + return + } + if promulgated.Promulgated != (id.User == "") { + fmt.Fprintf(resp, "ERROR: incorrect promulgated response, expected: %v, received %v.\n", (id.User == ""), promulgated.Promulgated) + return + } + fmt.Fprintf(resp, "promulgated: %v.\n", promulgated.Promulgated) + + // check expand-id + fmt.Fprintln(resp, "checking expand-id...") + url = "/v4/" + id.Path() + "/expand-id" + fmt.Fprintln(resp, url) + var expanded []params.ExpandedId + if err := get(hnd, url, &expanded); err != nil { + fmt.Fprintf(resp, "ERROR: cannot expand-id: %s.\n", err) + return + } + if len(expanded) == 0 { + fmt.Fprintln(resp, "ERROR: expand-id returned 0 results") + return + } + fmt.Fprintf(resp, "%d ids found.\n", len(expanded)) + + code = http.StatusOK + }) +} + +func newServiceDebugHandler(p *Pool, c ServerParams, hnd http.Handler) http.Handler { + mux := router.NewServeMux() + mux.Handle("/info", router.HandleJSON(serveDebugInfo)) + mux.Handle("/check", debugCheck(map[string]func() error{ + "mongodb": checkDB(p.db.Database), + "elasticsearch": checkES(p.es), + })) + mux.Handle("/fullcheck", authorized(c, debugFullCheck(hnd))) + return mux +} + +func authorized(c ServerParams, h http.Handler) http.Handler { + return router.HandleErrors(func(w http.ResponseWriter, r *http.Request) error { + u, p, err := utils.ParseBasicAuthHeader(r.Header) + if err != nil { + return errgo.WithCausef(err, params.ErrUnauthorized, "") + } + if u != c.AuthUsername || p != c.AuthPassword { + return errgo.WithCausef(nil, params.ErrUnauthorized, "username or password mismatch") + } + h.ServeHTTP(w, r) + return nil + }) +} + +func get(h http.Handler, url string, body interface{}) error { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return errgo.Notef(err, "cannot create request") + } + w := httptest.NewRecorder() + h.ServeHTTP(w, req) + if w.Code != http.StatusOK { + if w.HeaderMap.Get("Content-Type") != "application/json" { + return errgo.Newf("bad status %d", w.Code) + } + var e params.Error + if err := json.Unmarshal(w.Body.Bytes(), &e); err != nil { + return errgo.Notef(err, "cannot decode error") + } + return &e + } + if body == nil { + return nil + } + if bytes, ok := body.(*[]byte); ok { + *bytes = w.Body.Bytes() + return nil + } + if w.HeaderMap.Get("Content-Type") == "application/json" { + if err := json.Unmarshal(w.Body.Bytes(), body); err != nil { + return errgo.Notef(err, "cannot decode body") + } + return nil + } + return errgo.Newf("cannot decode body") +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/debug_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/debug_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/debug_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,100 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "errors" + "net/http" + + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + appver "gopkg.in/juju/charmstore.v5-unstable/version" +) + +type debugSuite struct{} + +var _ = gc.Suite(&debugSuite{}) + +var debugCheckTests = []struct { + about string + checks map[string]func() error + expectStatus int + expectBody interface{} +}{{ + about: "no checks", + expectStatus: http.StatusOK, + expectBody: map[string]string{}, +}, { + about: "passing check", + checks: map[string]func() error{ + "pass": func() error { return nil }, + }, + expectStatus: http.StatusOK, + expectBody: map[string]string{ + "pass": "OK", + }, +}, { + about: "failing check", + checks: map[string]func() error{ + "fail": func() error { return errors.New("test fail") }, + }, + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: "check failure: [fail: test fail]", + }, +}, { + about: "many pass", + checks: map[string]func() error{ + "pass1": func() error { return nil }, + "pass2": func() error { return nil }, + }, + expectStatus: http.StatusOK, + expectBody: map[string]string{ + "pass1": "OK", + "pass2": "OK", + }, +}, { + about: "many fail", + checks: map[string]func() error{ + "fail1": func() error { return errors.New("test fail1") }, + "fail2": func() error { return errors.New("test fail2") }, + }, + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: "check failure: [fail1: test fail1] [fail2: test fail2]", + }, +}, { + about: "pass and fail", + checks: map[string]func() error{ + "pass": func() error { return nil }, + "fail": func() error { return errors.New("test fail") }, + }, + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: "check failure: [fail: test fail] [pass: OK]", + }, +}} + +func (s *debugSuite) TestDebugCheck(c *gc.C) { + for i, test := range debugCheckTests { + c.Logf("%d. %s", i, test.about) + hnd := debugCheck(test.checks) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: hnd, + ExpectStatus: test.expectStatus, + ExpectBody: test.expectBody, + }) + } +} + +func (s *debugSuite) TestDebugInfo(c *gc.C) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: router.HandleJSON(serveDebugInfo), + ExpectStatus: http.StatusOK, + ExpectBody: appver.VersionInfo, + }) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/elasticsearch.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/elasticsearch.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/elasticsearch.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,351 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import "encoding/json" + +var ( + esIndex = mustParseJSON(esIndexJSON) + esMapping = mustParseJSON(esMappingJSON) +) + +const esSettingsVersion = 7 + +func mustParseJSON(s string) interface{} { + var j json.RawMessage + if err := json.Unmarshal([]byte(s), &j); err != nil { + panic(err) + } + return &j +} + +const esIndexJSON = ` +{ + "settings": { + "number_of_shards": 1, + "analysis": { + "filter": { + "n3_20grams_filter": { + "type": "nGram", + "min_gram": 3, + "max_gram": 20 + } + }, + "analyzer": { + "n3_20grams": { + "type": "custom", + "tokenizer": "standard", + "filter": [ + "lowercase", + "n3_20grams_filter" + ] + } + } + } + } +} +` + +const esMappingJSON = ` +{ + "entity" : { + "dynamic" : "false", + "properties" : { + "URL" : { + "type" : "multi_field", + "fields" : { + "URL" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "ngrams" : { + "type" : "string", + "analyzer" : "n3_20grams", + "include_in_all" : false + } + } + }, + "PromulgatedURL" : { + "type" : "string", + "index": "not_analyzed", + "index_options" : "docs" + }, + "BaseURL" : { + "type" : "string", + "index": "not_analyzed", + "index_options" : "docs" + }, + "User" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "Name" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "Revision" : { + "type" : "integer", + "index" : "not_analyzed" + }, + "Series" : { + "type" : "multi_field", + "fields" : { + "Series" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "ngrams" : { + "type" : "string", + "analyzer" : "n3_20grams", + "include_in_all" : false + } + } + }, + "TotalDownloads": { + "type": "long" + }, + "BlobHash" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "UploadTime" : { + "type" : "date", + "format" : "dateOptionalTime" + }, + "CharmMeta" : { + "dynamic" : "false", + "properties" : { + "Name" : { + "type" : "multi_field", + "fields" : { + "Name" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "ngrams" : { + "type" : "string", + "analyzer" : "n3_20grams", + "include_in_all" : false + } + } + }, + "Summary" : { + "type" : "string" + }, + "Description" : { + "type" : "string" + }, + "Provides" : { + "dynamic" : "false", + "properties" : { + "Name" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "Role" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "Interface" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "Scope" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + } + } + }, + "Requires" : { + "dynamic" : "false", + "properties" : { + "Name" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "Role" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "Interface" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "Scope" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + } + } + }, + "Peers" : { + "dynamic" : "false", + "properties" : { + "Name" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "Role" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "Interface" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "Scope" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + } + } + }, + "Categories" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "Tags" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + } + } + }, + "charmactions" : { + "dynamic" : "false", + "properties" : { + "description" : { + "type" : "string" + }, + "action_name" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + } + } + }, + "CharmProvidedInterfaces" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "CharmRequiredInterfaces" : { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + + + "BundleData" : { + "type": "object", + "dynamic": "false", + "properties" : { + "Services" : { + "type": "object", + "dynamic": "false", + "properties": { + "Charm": { + "type" : "string", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "NumUnits": { + "type" : "integer", + "index": "not_analyzed" + } + } + }, + "Series" : { + "type" : "string" + }, + "Relations" : { + "type" : "string", + "index": "not_analyzed" + }, + "Tags" : { + "type" : "string", + "index": "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + } + } + }, + "BundleReadMe" : { + "type": "string", + "index": "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "BundleCharms": { + "type": "string", + "index": "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "BundleMachineCount": { + "type": "integer" + }, + "BundleUnitCount": { + "type": "integer" + }, + "TotalDownloads": { + "type": "long" + }, + "Public": { + "type": "boolean", + "index" : "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + }, + "ReadACLs" : { + "type" : "string", + "index": "not_analyzed", + "omit_norms" : true, + "index_options" : "docs" + } + } + } +} +` === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/export_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/export_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +// Copyright 2013, 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +var TimeToStamp = timeToStamp + +// StatsCacheEvictAll removes everything from the stats cache. +func StatsCacheEvictAll(s *Store) { + s.pool.statsCache.EvictAll() +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/hash.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/hash.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/hash.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,53 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// TODO frankban: remove this file after updating entities in the production db +// with their SHA256 hash value. Entities are updated by running the cshash256 +// command. + +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "crypto/sha256" + "fmt" + "io" + + "gopkg.in/errgo.v1" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +// UpdateEntitySHA256 calculates and return the SHA256 hash of the archive of +// the given entity id. The entity document is then asynchronously updated with +// the resulting hash. This method will be removed soon. +func (s *Store) UpdateEntitySHA256(id *router.ResolvedURL) (string, error) { + r, _, _, err := s.OpenBlob(id) + defer r.Close() + hash := sha256.New() + _, err = io.Copy(hash, r) + if err != nil { + return "", errgo.Notef(err, "cannot calculate sha256 of archive") + } + sum256 := fmt.Sprintf("%x", hash.Sum(nil)) + + // Update the entry asynchronously because it doesn't matter if it succeeds + // or fails, or if several instances of the charm store do it concurrently, + // and it doesn't need to be on the critical path for API endpoints. + s.Go(func(s *Store) { + UpdateEntitySHA256(s, id, sum256) + }) + + return sum256, nil +} + +// UpdateEntitySHA256 updates the BlobHash256 entry for the entity. +// It is defined as a variable so that it can be mocked in tests. +// This function will be removed soon. +var UpdateEntitySHA256 = func(store *Store, id *router.ResolvedURL, sum256 string) { + err := store.DB.Entities().UpdateId(&id.URL, bson.D{{"$set", bson.D{{"blobhash256", sum256}}}}) + if err != nil && err != mgo.ErrNotFound { + logger.Errorf("cannot update sha256 of archive: %v", err) + } +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,192 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "gopkg.in/errgo.v1" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" +) + +const ( + migrationAddSupportedSeries mongodoc.MigrationName = "add supported series" + migrationAddDevelopment mongodoc.MigrationName = "add development" + migrationAddDevelopmentACLs mongodoc.MigrationName = "add development acls" +) + +// migrations holds all the migration functions that are executed in the order +// they are defined when the charm store server is started. Each migration is +// associated with a name that is used to check whether the migration has been +// already run. To introduce a new database migration, add the corresponding +// migration name and function to this list, and update the +// TestMigrateMigrationList test in migration_test.go adding the new name(s). +// Note that migration names must be unique across the list. +// +// A migration entry may have a nil migration function if the migration +// is obsolete. Obsolete migrations should never be removed entirely, +// otherwise the charmstore will see the old migrations in the table +// and refuse to start up because it thinks that it's running an old +// version of the charm store on a newer version of the database. +var migrations = []migration{{ + name: "entity ids denormalization", +}, { + name: "base entities creation", +}, { + name: "read acl creation", +}, { + name: "write acl creation", +}, { + name: migrationAddSupportedSeries, + migrate: addSupportedSeries, +}, { + name: migrationAddDevelopment, + migrate: addDevelopment, +}, { + name: migrationAddDevelopmentACLs, + migrate: addDevelopmentACLs, +}} + +// migration holds a migration function with its corresponding name. +type migration struct { + name mongodoc.MigrationName + migrate func(StoreDatabase) error +} + +// Migrate starts the migration process using the given database. +func migrate(db StoreDatabase) error { + // Retrieve already executed migrations. + executed, err := getExecuted(db) + if err != nil { + return errgo.Mask(err) + } + + // Explicitly create the collection in case there are no migrations + // so that the tests that expect the migrations collection to exist + // will pass. We ignore the error because we'll get one if the + // collection already exists and there's no special type or value + // for that (and if it's a genuine error, we'll catch the problem later + // anyway). + db.Migrations().Create(&mgo.CollectionInfo{}) + // Execute required migrations. + for _, m := range migrations { + if executed[m.name] || m.migrate == nil { + logger.Debugf("skipping already executed migration: %s", m.name) + continue + } + logger.Infof("starting migration: %s", m.name) + if err := m.migrate(db); err != nil { + return errgo.Notef(err, "error executing migration: %s", m.name) + } + if err := setExecuted(db, m.name); err != nil { + return errgo.Mask(err) + } + logger.Infof("migration completed: %s", m.name) + } + return nil +} + +func getExecuted(db StoreDatabase) (map[mongodoc.MigrationName]bool, error) { + // Retrieve the already executed migration names. + executed := make(map[mongodoc.MigrationName]bool) + var doc mongodoc.Migration + if err := db.Migrations().Find(nil).Select(bson.D{{"executed", 1}}).One(&doc); err != nil { + if err == mgo.ErrNotFound { + return executed, nil + } + return nil, errgo.Notef(err, "cannot retrieve executed migrations") + } + + names := make(map[mongodoc.MigrationName]bool, len(migrations)) + for _, m := range migrations { + names[m.name] = true + } + for _, name := range doc.Executed { + name := mongodoc.MigrationName(name) + // Check that the already executed migrations are known. + if !names[name] { + return nil, errgo.Newf("found unknown migration %q; running old charm store code on newer charm store database?", name) + } + // Collect the name of the executed migration. + executed[name] = true + } + return executed, nil +} + +// addSupportedSeries adds the supported-series field +// to entities that don't have it. Note that it does not +// need to work for multi-series charms because support +// for those has not been implemented before this migration. +func addSupportedSeries(db StoreDatabase) error { + entities := db.Entities() + var entity mongodoc.Entity + iter := entities.Find(bson.D{{ + // Use the supportedseries field to collect not migrated entities. + "supportedseries", bson.D{{"$exists", false}}, + }, { + "series", bson.D{{"$ne", "bundle"}}, + }}).Select(bson.D{{"_id", 1}}).Iter() + defer iter.Close() + + for iter.Next(&entity) { + logger.Infof("updating %s", entity.URL) + if err := entities.UpdateId(entity.URL, bson.D{{ + "$set", bson.D{ + {"supportedseries", []string{entity.URL.Series}}, + }, + }}); err != nil { + return errgo.Notef(err, "cannot denormalize entity id %s", entity.URL) + } + } + if err := iter.Close(); err != nil { + return errgo.Notef(err, "cannot iterate entities") + } + return nil +} + +// addDevelopment adds the Development field to all entities on which that +// field is not present. +func addDevelopment(db StoreDatabase) error { + logger.Infof("adding development field to all entities") + if _, err := db.Entities().UpdateAll(bson.D{{ + "development", bson.D{{"$exists", false}}, + }}, bson.D{{ + "$set", bson.D{{"development", false}}, + }}); err != nil { + return errgo.Notef(err, "cannot add development field to all entities") + } + return nil +} + +// addDevelopmentACLs sets up ACLs on base entities for development revisions. +func addDevelopmentACLs(db StoreDatabase) error { + logger.Infof("adding development ACLs to all base entities") + baseEntities := db.BaseEntities() + var baseEntity mongodoc.BaseEntity + iter := baseEntities.Find(bson.D{{ + "developmentacls", bson.D{{"$exists", false}}, + }}).Select(bson.D{{"_id", 1}, {"acls", 1}}).Iter() + defer iter.Close() + for iter.Next(&baseEntity) { + if err := baseEntities.UpdateId(baseEntity.URL, bson.D{{ + "$set", bson.D{{"developmentacls", baseEntity.ACLs}}, + }}); err != nil { + return errgo.Notef(err, "cannot add development ACLs to base entity id %s", baseEntity.URL) + } + } + if err := iter.Close(); err != nil { + return errgo.Notef(err, "cannot iterate base entities") + } + return nil +} + +func setExecuted(db StoreDatabase, name mongodoc.MigrationName) error { + if _, err := db.Migrations().Upsert(nil, bson.D{{ + "$addToSet", bson.D{{"executed", name}}, + }}); err != nil { + return errgo.Notef(err, "cannot add %s to executed migrations", name) + } + return nil +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/migrations_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,544 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "net/http" + "sync" + + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" +) + +type migrationsSuite struct { + jujutesting.IsolatedMgoSuite + db StoreDatabase + executed []mongodoc.MigrationName +} + +var _ = gc.Suite(&migrationsSuite{}) + +func (s *migrationsSuite) SetUpTest(c *gc.C) { + s.IsolatedMgoSuite.SetUpTest(c) + s.db = StoreDatabase{s.Session.DB("migration-testing")} + s.executed = nil +} + +const ( + beforeAllMigrations mongodoc.MigrationName = "start" + afterAllMigrations mongodoc.MigrationName = "end" +) + +var ( + // migrationEntityFields holds the fields added to mongodoc.Entity, + // keyed by the migration step that added them. + migrationEntityFields = map[mongodoc.MigrationName][]string{ + migrationAddSupportedSeries: {"supportedseries"}, + migrationAddDevelopment: {"development"}, + } + + // migrationBaseEntityFields holds the fields added to mongodoc.BaseEntity, + // keyed by the migration step that added them. + migrationBaseEntityFields = map[mongodoc.MigrationName][]string{ + migrationAddDevelopmentACLs: {"developmentacls"}, + } + + // initialFields holds all the mongodoc.Entity fields + // at the dawn of migration time. + initialEntityFields = []string{ + "_id", + "baseurl", + "user", + "name", + "revision", + "series", + "blobhash", + "blobhash256", + "size", + "blobname", + "uploadtime", + "extrainfo", + "charmmeta", + "charmconfig", + "charmactions", + "charmprovidedinterfaces", + "charmrequiredinterfaces", + "bundledata", + "bundlereadme", + "bundlemachinecount", + "bundleunitcount", + "contents", + "promulgated-url", + "promulgated-revision", + } + + // initialBaseEntityFields holds all the mongodoc.BaseEntity fields + // at the dawn of migration time. + initialBaseEntityFields = []string{ + "_id", + "user", + "name", + "public", + "acls", + "promulgated", + } + + // entityFields holds all the fields in mongodoc.Entity just + // before the named migration (the key) has been applied. + entityFields = make(map[mongodoc.MigrationName][]string) + + // baseEntityFields holds all the fields in mongodoc.Entity just + // before the named migration (the key) has been applied. + baseEntityFields = make(map[mongodoc.MigrationName][]string) + + // postMigrationEntityFields holds all the fields in mongodoc.Entity just + // after the named migration (the key) has been applied. + postMigrationEntityFields = make(map[mongodoc.MigrationName][]string) + + // postMigrationBaseEntityFields holds all the fields in + // mongodoc.BaseEntity just after the named migration (the key) has been + // applied. + postMigrationBaseEntityFields = make(map[mongodoc.MigrationName][]string) +) + +func init() { + // Initialize entityFields and baseEntityFields using the information + // specified in migrationEntityFields and migrationBaseEntityFields. + allEntityFields := initialEntityFields + allBaseEntityFields := initialBaseEntityFields + entityFields[beforeAllMigrations] = allEntityFields + baseEntityFields[beforeAllMigrations] = allBaseEntityFields + postMigrationEntityFields[beforeAllMigrations] = allEntityFields + postMigrationBaseEntityFields[beforeAllMigrations] = allBaseEntityFields + for _, m := range migrations { + entityFields[m.name] = allEntityFields + allEntityFields = append(allEntityFields, migrationEntityFields[m.name]...) + postMigrationEntityFields[m.name] = allEntityFields + baseEntityFields[m.name] = allBaseEntityFields + allBaseEntityFields = append(allBaseEntityFields, migrationBaseEntityFields[m.name]...) + postMigrationBaseEntityFields[m.name] = allBaseEntityFields + } + entityFields[afterAllMigrations] = allEntityFields + baseEntityFields[afterAllMigrations] = allBaseEntityFields + postMigrationEntityFields[afterAllMigrations] = allEntityFields + postMigrationBaseEntityFields[afterAllMigrations] = allBaseEntityFields +} + +func (s *migrationsSuite) newServer(c *gc.C) error { + apiHandler := func(p *Pool, config ServerParams) HTTPCloseHandler { + return nopCloseHandler{http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {})} + } + srv, err := NewServer(s.db.Database, nil, serverParams, map[string]NewAPIHandlerFunc{ + "version1": apiHandler, + }) + if err == nil { + srv.Close() + } + return err +} + +// patchMigrations patches the charm store migration list with the given migrations. +func (s *migrationsSuite) patchMigrations(c *gc.C, ms []migration) { + original := migrations + s.AddCleanup(func(*gc.C) { + migrations = original + }) + migrations = ms +} + +// makeMigrations generates default migrations using the given names, and then +// patches the charm store migration list with the generated ones. +func (s *migrationsSuite) makeMigrations(c *gc.C, names ...mongodoc.MigrationName) { + ms := make([]migration, len(names)) + for i, name := range names { + name := name + ms[i] = migration{ + name: name, + migrate: func(StoreDatabase) error { + s.executed = append(s.executed, name) + return nil + }, + } + } + s.patchMigrations(c, ms) +} + +func (s *migrationsSuite) TestMigrate(c *gc.C) { + // Create migrations. + names := []mongodoc.MigrationName{"migr-1", "migr-2"} + s.makeMigrations(c, names...) + + // Start the server. + err := s.newServer(c) + c.Assert(err, gc.IsNil) + + // The two migrations have been correctly executed in order. + c.Assert(s.executed, jc.DeepEquals, names) + + // The migration document in the db reports that the execution is done. + s.checkExecuted(c, names...) + + // Restart the server again and check migrations this time are not run. + err = s.newServer(c) + c.Assert(err, gc.IsNil) + c.Assert(s.executed, jc.DeepEquals, names) + s.checkExecuted(c, names...) +} + +func (s *migrationsSuite) TestMigrateNoMigrations(c *gc.C) { + // Empty the list of migrations. + s.makeMigrations(c) + + // Start the server. + err := s.newServer(c) + c.Assert(err, gc.IsNil) + + // No migrations were executed. + c.Assert(s.executed, gc.HasLen, 0) + s.checkExecuted(c) +} + +func (s *migrationsSuite) TestMigrateNewMigration(c *gc.C) { + // Simulate two migrations were already run. + err := setExecuted(s.db, "migr-1") + c.Assert(err, gc.IsNil) + err = setExecuted(s.db, "migr-2") + c.Assert(err, gc.IsNil) + + // Create migrations. + s.makeMigrations(c, "migr-1", "migr-2", "migr-3") + + // Start the server. + err = s.newServer(c) + c.Assert(err, gc.IsNil) + + // Only one migration has been executed. + c.Assert(s.executed, jc.DeepEquals, []mongodoc.MigrationName{"migr-3"}) + + // The migration document in the db reports that the execution is done. + s.checkExecuted(c, "migr-1", "migr-2", "migr-3") +} + +func (s *migrationsSuite) TestMigrateErrorUnknownMigration(c *gc.C) { + // Simulate that a migration was already run. + err := setExecuted(s.db, "migr-1") + c.Assert(err, gc.IsNil) + + // Create migrations, without including the already executed one. + s.makeMigrations(c, "migr-2", "migr-3") + + // Start the server. + err = s.newServer(c) + c.Assert(err, gc.ErrorMatches, `database migration failed: found unknown migration "migr-1"; running old charm store code on newer charm store database\?`) + + // No new migrations were executed. + c.Assert(s.executed, gc.HasLen, 0) + s.checkExecuted(c, "migr-1") +} + +func (s *migrationsSuite) TestMigrateErrorExecutingMigration(c *gc.C) { + ms := []migration{{ + name: "migr-1", + migrate: func(StoreDatabase) error { + return nil + }, + }, { + name: "migr-2", + migrate: func(StoreDatabase) error { + return errgo.New("bad wolf") + }, + }, { + name: "migr-3", + migrate: func(StoreDatabase) error { + return nil + }, + }} + s.patchMigrations(c, ms) + + // Start the server. + err := s.newServer(c) + c.Assert(err, gc.ErrorMatches, "database migration failed: error executing migration: migr-2: bad wolf") + + // Only one migration has been executed. + s.checkExecuted(c, "migr-1") +} + +func (s *migrationsSuite) TestMigrateMigrationNames(c *gc.C) { + names := make(map[mongodoc.MigrationName]bool, len(migrations)) + for _, m := range migrations { + c.Assert(names[m.name], jc.IsFalse, gc.Commentf("multiple migrations named %q", m.name)) + names[m.name] = true + } +} + +func (s *migrationsSuite) TestMigrateMigrationList(c *gc.C) { + // When adding migration, update the list below, but never remove existing + // migrations. + existing := []string{} + for i, name := range existing { + m := migrations[i] + c.Assert(m.name, gc.Equals, name) + } +} + +func (s *migrationsSuite) TestMigrateParallelMigration(c *gc.C) { + // This test uses real migrations to check they are idempotent and works + // well when done in parallel, for example when multiple charm store units + // are deployed together. + + // Prepare a database for the migration. + e1 := &mongodoc.Entity{ + URL: charm.MustParseURL("~charmers/trusty/django-42"), + PromulgatedURL: charm.MustParseURL("trusty/django-3"), + Size: 12, + } + denormalizeEntity(e1) + s.insertEntity(c, e1, beforeAllMigrations) + + e2 := &mongodoc.Entity{ + URL: charm.MustParseURL("~who/utopic/rails-47"), + Size: 13, + } + denormalizeEntity(e2) + s.insertEntity(c, e2, beforeAllMigrations) + + // Run the migrations in parallel. + var wg sync.WaitGroup + wg.Add(5) + errors := make(chan error, 5) + for i := 0; i < 5; i++ { + go func() { + errors <- s.newServer(c) + wg.Done() + }() + } + wg.Wait() + close(errors) + + // Check the server is correctly started in all the units. + for err := range errors { + c.Assert(err, gc.IsNil) + } + + // Ensure entities have been updated correctly by all the migrations. + // TODO when there are migrations, update e1 and e2 accordingly. + s.checkCount(c, s.db.Entities(), 2) + s.checkEntity(c, e1, afterAllMigrations) + s.checkEntity(c, e2, afterAllMigrations) +} + +func (s *migrationsSuite) TestMigrateAddSupportedSeries(c *gc.C) { + s.patchMigrations(c, getMigrations(migrationAddSupportedSeries)) + + entities := []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/trusty/django-42"), + PromulgatedURL: charm.MustParseURL("trusty/django-3"), + Size: 12, + }, { + URL: charm.MustParseURL("~who/utopic/rails-47"), + Size: 13, + }, { + URL: charm.MustParseURL("~who/bundle/something-47"), + Size: 13, + }} + for _, e := range entities { + denormalizeEntity(e) + s.insertEntity(c, e, migrationAddSupportedSeries) + } + + // Start the server. + err := s.newServer(c) + c.Assert(err, gc.IsNil) + + // Ensure entities have been updated correctly. + s.checkCount(c, s.db.Entities(), len(entities)) + for _, e := range entities { + s.checkEntity(c, e, migrationAddSupportedSeries) + } +} + +func (s *migrationsSuite) TestMigrateAddDevelopment(c *gc.C) { + s.patchMigrations(c, getMigrations(migrationAddDevelopment)) + + // Populate the database with some entities. + entities := []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/trusty/django-42"), + PromulgatedURL: charm.MustParseURL("trusty/django-3"), + Size: 47, + }, { + URL: charm.MustParseURL("~who/utopic/rails-47"), + Size: 48, + }, { + URL: charm.MustParseURL("~who/bundle/solution-0"), + Size: 1, + }} + for _, e := range entities { + denormalizeEntity(e) + s.insertEntity(c, e, migrationAddDevelopment) + } + + // Start the server. + err := s.newServer(c) + c.Assert(err, gc.IsNil) + + // Ensure entities have been updated correctly. + s.checkCount(c, s.db.Entities(), len(entities)) + for _, e := range entities { + var rawEntity map[string]interface{} + err := s.db.Entities().FindId(e.URL).One(&rawEntity) + c.Assert(err, gc.IsNil) + v, ok := rawEntity["development"] + c.Assert(ok, jc.IsTrue, gc.Commentf("development field not present in entity %s", rawEntity["_id"])) + c.Assert(v, jc.IsFalse, gc.Commentf("development field unexpectedly not false in entity %s", rawEntity["_id"])) + } +} + +func (s *migrationsSuite) TestMigrateAddDevelopmentACLs(c *gc.C) { + s.patchMigrations(c, getMigrations(migrationAddDevelopmentACLs)) + + // Populate the database with some entities. + entities := []*mongodoc.BaseEntity{{ + URL: charm.MustParseURL("~charmers/django"), + Name: "django", + ACLs: mongodoc.ACL{ + Read: []string{"user", "group"}, + Write: []string{"user"}, + }, + }, { + URL: charm.MustParseURL("~who/rails"), + Name: "rails", + ACLs: mongodoc.ACL{ + Read: []string{"everyone"}, + Write: []string{}, + }, + }, { + URL: charm.MustParseURL("~who/mediawiki-scalable"), + Name: "mediawiki-scalable", + ACLs: mongodoc.ACL{ + Read: []string{"who"}, + Write: []string{"dalek"}, + }, + }} + for _, e := range entities { + s.insertBaseEntity(c, e, migrationAddDevelopmentACLs) + } + + // Start the server. + err := s.newServer(c) + c.Assert(err, gc.IsNil) + + // Ensure base entities have been updated correctly. + s.checkCount(c, s.db.BaseEntities(), len(entities)) + for _, e := range entities { + e.DevelopmentACLs = e.ACLs + s.checkBaseEntity(c, e, migrationAddDevelopmentACLs) + } +} + +func (s *migrationsSuite) checkExecuted(c *gc.C, expected ...mongodoc.MigrationName) { + var obtained []mongodoc.MigrationName + var doc mongodoc.Migration + if err := s.db.Migrations().Find(nil).One(&doc); err != mgo.ErrNotFound { + c.Assert(err, gc.IsNil) + obtained = doc.Executed + } + c.Assert(obtained, jc.SameContents, expected) +} + +func getMigrations(names ...mongodoc.MigrationName) (ms []migration) { + for _, name := range names { + for _, m := range migrations { + if m.name == name { + ms = append(ms, m) + } + } + } + return ms +} + +func (s *migrationsSuite) checkCount(c *gc.C, coll *mgo.Collection, expectCount int) { + count, err := coll.Count() + c.Assert(err, gc.IsNil) + c.Assert(count, gc.Equals, expectCount) +} + +// checkEntity checks the entity stored in the database with the ID +// expectEntity.URL is the same as expectEntity for all fields that exist +// in the database following completion of the given migration. +func (s *migrationsSuite) checkEntity(c *gc.C, expectEntity *mongodoc.Entity, name mongodoc.MigrationName) { + var entity mongodoc.Entity + err := s.db.Entities().FindId(expectEntity.URL).One(&entity) + c.Assert(err, gc.IsNil) + obtained := entityWithFields(c, &entity, postMigrationEntityFields[name]) + expected := entityWithFields(c, expectEntity, postMigrationEntityFields[name]) + c.Assert(obtained, jc.DeepEquals, expected) +} + +// checkBaseEntity checks the base entity stored in the database with the ID +// expectEntity.URL is the same as expectEntity for all fields that exist +// in the database following completion of the given migration. +func (s *migrationsSuite) checkBaseEntity(c *gc.C, expectEntity *mongodoc.BaseEntity, name mongodoc.MigrationName) { + var entity mongodoc.BaseEntity + err := s.db.BaseEntities().FindId(expectEntity.URL).One(&entity) + c.Assert(err, gc.IsNil) + obtained := baseEntityWithFields(c, &entity, postMigrationBaseEntityFields[name]) + expected := baseEntityWithFields(c, expectEntity, postMigrationBaseEntityFields[name]) + c.Assert(obtained, jc.DeepEquals, expected) +} + +// insertEntity inserts the given entity. The migration that the entity +// is to be inserted for is specified in name; only fields that existed +// prior to that migration will be inserted. +func (s *migrationsSuite) insertEntity(c *gc.C, e *mongodoc.Entity, name mongodoc.MigrationName) { + err := s.db.Entities().Insert(entityWithFields(c, e, entityFields[name])) + c.Assert(err, gc.IsNil) +} + +// insertBaseEntity inserts the given base entity. The migration that the +// entity is to be inserted for is specified in name; only fields that existed +// prior to that migration will be inserted. +func (s *migrationsSuite) insertBaseEntity(c *gc.C, e *mongodoc.BaseEntity, name mongodoc.MigrationName) { + err := s.db.BaseEntities().Insert(baseEntityWithFields(c, e, baseEntityFields[name])) + c.Assert(err, gc.IsNil) +} + +// entityWithFields creates a version of the specified mongodoc.Entity as +// it would appear if it only contained the specified fields. This is to +// simulate previous versions of documents in the database. +func entityWithFields(c *gc.C, e *mongodoc.Entity, includeFields []string) map[string]interface{} { + data, err := bson.Marshal(e) + c.Assert(err, gc.IsNil) + return withFields(c, data, includeFields) +} + +// baseEntityWithFields creates a version of the specified mongodoc.BaseEntity +// as it would appear if it only contained the specified fields. This is to +// simulate previous versions of documents in the database. +func baseEntityWithFields(c *gc.C, e *mongodoc.BaseEntity, includeFields []string) map[string]interface{} { + data, err := bson.Marshal(e) + c.Assert(err, gc.IsNil) + return withFields(c, data, includeFields) +} + +func withFields(c *gc.C, data []byte, includeFields []string) (rawEntity map[string]interface{}) { + err := bson.Unmarshal(data, &rawEntity) + c.Assert(err, gc.IsNil) +loop: + for k := range rawEntity { + for _, inc := range includeFields { + if inc == k { + continue loop + } + } + delete(rawEntity, k) + } + return rawEntity +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/package_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "testing" + + jujutesting "github.com/juju/testing" +) + +func TestPackage(t *testing.T) { + jujutesting.MgoTestPackage(t, nil) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/search.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/search.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/search.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,815 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "crypto/sha1" + "encoding/base64" + "encoding/json" + "strings" + "time" + + "github.com/juju/utils" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + "gopkg.in/juju/charmstore.v5-unstable/internal/series" +) + +type SearchIndex struct { + *elasticsearch.Database + Index string +} + +const typeName = "entity" + +// seriesBoost defines how much the results for each +// series will be boosted. Series are currently ranked in +// reverse order of LTS releases, followed by the latest +// non-LTS release, followed by everything else. +var seriesBoost = func() map[string]float64 { + m := make(map[string]float64) + for k, v := range series.Series { + if !v.SearchIndex { + continue + } + m[k] = v.SearchBoost + } + return m +}() + +// SearchDoc is a mongodoc.Entity with additional fields useful for searching. +// This is the document that is stored in the search index. +type SearchDoc struct { + *mongodoc.Entity + TotalDownloads int64 + ReadACLs []string + Series []string +} + +// UpdateSearchAsync will update the search record for the entity +// reference r in the backgroud. +func (s *Store) UpdateSearchAsync(r *router.ResolvedURL) { + s.Go(func(s *Store) { + if err := s.UpdateSearch(r); err != nil { + logger.Errorf("cannot update search record for %v: %s", r, err) + } + }) +} + +// UpdateSearch updates the search record for the entity reference r. +// The search index only includes the latest revision of each entity so +// the latest revision of the charm specified by r will be indexed. +func (s *Store) UpdateSearch(r *router.ResolvedURL) error { + if s.ES == nil || s.ES.Database == nil { + return nil + } + if r.Development || r.URL.Series != "" && !series.Series[r.URL.Series].SearchIndex { + return nil + } + + var query *mgo.Query + query = s.DB.Entities().Find(bson.D{ + {"user", r.URL.User}, + {"name", r.URL.Name}, + {"series", r.URL.Series}, + }).Sort("-revision") + var entity mongodoc.Entity + if err := query.One(&entity); err != nil { + if err == mgo.ErrNotFound { + return errgo.WithCausef(nil, params.ErrNotFound, "entity not found %s", r) + } + return errgo.Notef(err, "cannot get %s", r) + } + baseEntity, err := s.FindBaseEntity(entity.BaseURL) + if err != nil { + return errgo.Notef(err, "cannot get %s", entity.BaseURL) + } + if err := s.updateSearchEntity(&entity, baseEntity); err != nil { + return errgo.Notef(err, "cannot update search record for %q", entity.URL) + } + return nil +} + +// UpdateSearchBaseURL updates the search record for all entities with +// the specified base URL. It must be called whenever the entry for the +// given URL in the BaseEntitites collection has changed. +func (s *Store) UpdateSearchBaseURL(baseURL *charm.URL) error { + if s.ES == nil || s.ES.Database == nil { + return nil + } + if baseURL.Series != "" { + return errgo.New("base url cannot contain series") + } + if baseURL.Revision != -1 { + return errgo.New("base url cannot contain revision") + } + // From the entities with the specified base URL find the latest revision in + // each of the available series. + // + // Note: It is possible to return the complete entity here and save some + // database round trips. Unfortunately the version of mongoDB we support + // (2.4) would require every field to be enumerated in this query, which + // would make it too fragile. + iter := s.DB.Entities().Pipe([]bson.D{ + {{"$match", bson.D{{"baseurl", baseURL}, {"development", false}}}}, + {{"$sort", bson.D{{"revision", 1}}}}, + {{"$group", bson.D{ + {"_id", "$series"}, + {"url", bson.D{{"$last", "$_id"}}}, + }}}, + }).Iter() + defer iter.Close() + var result struct { + URL *charm.URL + } + for iter.Next(&result) { + if result.URL.Series != "" && !series.Series[result.URL.Series].SearchIndex { + continue + } + if err := s.UpdateSearch(&router.ResolvedURL{URL: *result.URL, PromulgatedRevision: -1}); err != nil { + return errgo.Notef(err, "cannot update search record for %q", result.URL) + } + } + if err := iter.Close(); err != nil { + return errgo.Mask(err) + } + return nil +} + +func (s *Store) updateSearchEntity(entity *mongodoc.Entity, baseEntity *mongodoc.BaseEntity) error { + doc, err := s.searchDocFromEntity(entity, baseEntity) + if err != nil { + return errgo.Mask(err) + } + if err := s.ES.update(doc); err != nil { + return errgo.Notef(err, "cannot update search index") + } + return nil +} + +// UpdateSearchFields updates the search record for the entity reference r +// with the updated values in fields. +func (s *Store) UpdateSearchFields(r *router.ResolvedURL, fields map[string]interface{}) error { + if s.ES == nil || s.ES.Database == nil { + return nil + } + var needUpdate bool + for k := range fields { + // Add any additional fields here that should update the search index. + if k == "extrainfo.legacy-download-stats" { + needUpdate = true + } + } + if !needUpdate { + return nil + } + if err := s.UpdateSearch(r); err != nil { + return errgo.Mask(err) + } + return nil +} + +// searchDocFromEntity performs the processing required to convert a +// mongodoc.Entity and the corresponding mongodoc.BaseEntity to an esDoc +// for indexing. +func (s *Store) searchDocFromEntity(e *mongodoc.Entity, be *mongodoc.BaseEntity) (*SearchDoc, error) { + doc := SearchDoc{Entity: e} + doc.ReadACLs = be.ACLs.Read + // There should only be one record for the promulgated entity, which + // should be the latest promulgated revision. In the case that the base + // entity is not promulgated assume that there is a later promulgated + // entity. + if !be.Promulgated { + doc.Entity.PromulgatedURL = nil + doc.Entity.PromulgatedRevision = -1 + } + _, allRevisions, err := s.ArchiveDownloadCounts(EntityResolvedURL(e).PreferredURL(), false) + if err != nil { + return nil, errgo.Mask(err) + } + doc.TotalDownloads = allRevisions.Total + if doc.Entity.Series == "bundle" { + doc.Series = []string{"bundle"} + } else { + doc.Series = doc.Entity.SupportedSeries + } + return &doc, nil +} + +// update inserts an entity into elasticsearch if elasticsearch +// is configured. The entity with id r is extracted from mongodb +// and written into elasticsearch. +func (si *SearchIndex) update(doc *SearchDoc) error { + if si == nil || si.Database == nil { + return nil + } + err := si.PutDocumentVersionWithType( + si.Index, + typeName, + si.getID(doc.URL), + int64(doc.URL.Revision), + elasticsearch.ExternalGTE, + doc) + if err != nil && err != elasticsearch.ErrConflict { + return errgo.Mask(err) + } + if doc.Entity.URL.Series != "" { + return nil + } + // This document represents a multi-series charm. It might be + // replacing existing documents for previous single-series + // charms. Remove any documents that this replaces. + for _, series := range doc.Entity.SupportedSeries { + u := *doc.Entity.URL + u.Series = series + err := si.DeleteDocument(si.Index, typeName, si.getID(&u)) + if err != nil && errgo.Cause(err) != elasticsearch.ErrNotFound { + u.Revision = -1 + logger.Errorf("cannot remove old search document for %q: %s", u, err) + } + } + return nil +} + +// getID returns an ID for the elasticsearch document based on the contents of the +// mongoDB document. This is to allow elasticsearch documents to be replaced with +// updated versions when charm data is changed. +func (si *SearchIndex) getID(r *charm.URL) string { + ref := *r + ref.Revision = -1 + b := sha1.Sum([]byte(ref.String())) + s := base64.URLEncoding.EncodeToString(b[:]) + // Cut off any trailing = as there is no need for them and they will get URL escaped. + return strings.TrimRight(s, "=") +} + +// Search searches for matching entities in the configured elasticsearch index. +// If there is no elasticsearch index configured then it will return an empty +// SearchResult, as if no results were found. +func (si *SearchIndex) search(sp SearchParams) (SearchResult, error) { + if si == nil || si.Database == nil { + return SearchResult{}, nil + } + q := createSearchDSL(sp) + q.Fields = append(q.Fields, "URL", "PromulgatedURL") + esr, err := si.Search(si.Index, typeName, q) + if err != nil { + return SearchResult{}, errgo.Mask(err) + } + r := SearchResult{ + SearchTime: time.Duration(esr.Took) * time.Millisecond, + Total: esr.Hits.Total, + Results: make([]*router.ResolvedURL, 0, len(esr.Hits.Hits)), + } + for _, h := range esr.Hits.Hits { + urlStr := h.Fields.GetString("URL") + url, err := charm.ParseURL(urlStr) + if err != nil { + return SearchResult{}, errgo.Notef(err, "invalid URL in result %q", urlStr) + } + id := &router.ResolvedURL{ + URL: *url, + } + + if purlStr := h.Fields.GetString("PromulgatedURL"); purlStr != "" { + purl, err := charm.ParseURL(purlStr) + if err != nil { + return SearchResult{}, errgo.Notef(err, "invalid promulgated URL in result %q", purlStr) + } + id.PromulgatedRevision = purl.Revision + } else { + id.PromulgatedRevision = -1 + } + r.Results = append(r.Results, id) + } + return r, nil +} + +// GetSearchDocument retrieves the current search record for the charm +// reference id. +func (si *SearchIndex) GetSearchDocument(id *charm.URL) (*SearchDoc, error) { + if si == nil || si.Database == nil { + return &SearchDoc{}, nil + } + var s SearchDoc + err := si.GetDocument(si.Index, "entity", si.getID(id), &s) + if err != nil { + return nil, errgo.Notef(err, "cannot retrieve search document for %v", id) + } + return &s, nil +} + +// version is a document that stores the structure information +// in the elasticsearch database. +type version struct { + Version int64 + Index string +} + +const versionIndex = ".versions" +const versionType = "version" + +// ensureIndexes makes sure that the required indexes exist and have the right +// settings. If force is true then ensureIndexes will create new indexes irrespective +// of the status of the current index. +func (si *SearchIndex) ensureIndexes(force bool) error { + if si == nil || si.Database == nil { + return nil + } + old, dv, err := si.getCurrentVersion() + if err != nil { + return errgo.Notef(err, "cannot get current version") + } + if !force && old.Version >= esSettingsVersion { + return nil + } + index, err := si.newIndex() + if err != nil { + return errgo.Notef(err, "cannot create index") + } + new := version{ + Version: esSettingsVersion, + Index: index, + } + updated, err := si.updateVersion(new, dv) + if err != nil { + return errgo.Notef(err, "cannot update version") + } + if !updated { + // Update failed so delete the new index + if err := si.DeleteIndex(index); err != nil { + return errgo.Notef(err, "cannot delete index") + } + return nil + } + // Update succeeded - update the aliases + if err := si.Alias(index, si.Index); err != nil { + return errgo.Notef(err, "cannot create alias") + } + // Delete the old unused index + if old.Index != "" { + if err := si.DeleteIndex(old.Index); err != nil { + return errgo.Notef(err, "cannot delete index") + } + } + return nil +} + +// getCurrentVersion gets the version of elasticsearch settings, if any +// that are deployed to elasticsearch. +func (si *SearchIndex) getCurrentVersion() (version, int64, error) { + var v version + d, err := si.GetESDocument(versionIndex, versionType, si.Index) + if err != nil && err != elasticsearch.ErrNotFound { + return version{}, 0, errgo.Notef(err, "cannot get settings version") + } + if d.Found { + if err := json.Unmarshal(d.Source, &v); err != nil { + return version{}, 0, errgo.Notef(err, "invalid version") + } + } + return v, d.Version, nil +} + +// newIndex creates a new index with current elasticsearch settings. +// The new Index will have a randomized name based on si.Index. +func (si *SearchIndex) newIndex() (string, error) { + uuid, err := utils.NewUUID() + if err != nil { + return "", errgo.Notef(err, "cannot create index name") + } + index := si.Index + "-" + uuid.String() + if err := si.PutIndex(index, esIndex); err != nil { + return "", errgo.Notef(err, "cannot set index settings") + } + if err := si.PutMapping(index, "entity", esMapping); err != nil { + return "", errgo.Notef(err, "cannot set index mapping") + } + return index, nil +} + +// updateVersion attempts to atomically update the document specifying the version of +// the elasticsearch settings. If it succeeds then err will be nil, if the update could not be +// made atomically then err will be elasticsearch.ErrConflict, otherwise err is a non-nil +// error. +func (si *SearchIndex) updateVersion(v version, dv int64) (bool, error) { + var err error + if dv == 0 { + err = si.CreateDocument(versionIndex, versionType, si.Index, v) + } else { + err = si.PutDocumentVersion(versionIndex, versionType, si.Index, dv, v) + } + if err != nil { + if errgo.Cause(err) == elasticsearch.ErrConflict { + return false, nil + } + return false, err + } + return true, nil +} + +// syncSearch populates the SearchIndex with all the data currently stored in +// mongodb. If the SearchIndex is not configured then this method returns a nil error. +func (s *Store) syncSearch() error { + if s.ES == nil || s.ES.Database == nil { + return nil + } + var result mongodoc.Entity + // Only get the IDs here, UpdateSearch will get the full document + // if it is in a series that is indexed. + iter := s.DB.Entities().Find(nil).Select(bson.M{"_id": 1, "promulgated-url": 1}).Iter() + defer iter.Close() // Make sure we always close on error. + for iter.Next(&result) { + rurl := EntityResolvedURL(&result) + if err := s.UpdateSearch(rurl); err != nil { + return errgo.Notef(err, "cannot index %s", rurl) + } + } + if err := iter.Close(); err != nil { + return err + } + return nil +} + +// SearchParams represents the search parameters used to search the store. +type SearchParams struct { + // The text to use in the full text search query. + Text string + // If autocomplete is specified, the search will return only charms and + // bundles with a name that has text as a prefix. + AutoComplete bool + // Limit the search to items with attributes that match the specified filter value. + Filters map[string][]string + // Limit the number of returned items to the specified count. + Limit int + // Include the following metadata items in the search results. + Include []string + // Start the the returned items at a specific offset. + Skip int + // ACL values to search in addition to everyone. ACL values may represent user names + // or group names. + Groups []string + // Admin searches will not filter on the ACL and will show results for all matching + // charms. + Admin bool + // Sort the returned items. + sort []sortParam +} + +var allowedSortFields = map[string]bool{ + "name": true, + "owner": true, + "series": true, + "downloads": true, +} + +func (sp *SearchParams) ParseSortFields(f ...string) error { + for _, s := range f { + for _, s := range strings.Split(s, ",") { + var sort sortParam + if strings.HasPrefix(s, "-") { + sort.Order = sortDescending + s = s[1:] + } + if !allowedSortFields[s] { + return errgo.Newf("unrecognized sort parameter %q", s) + } + sort.Field = s + sp.sort = append(sp.sort, sort) + } + } + + return nil +} + +// sortOrder defines the order in which a field should be sorted. +type sortOrder int + +const ( + sortAscending sortOrder = iota + sortDescending +) + +// sortParam represents a field and direction on which results should be sorted. +type sortParam struct { + Field string + Order sortOrder +} + +// SearchResult represents the result of performing a search. +type SearchResult struct { + SearchTime time.Duration + Total int + Results []*router.ResolvedURL +} + +// ListResult represents the result of performing a list. +type ListResult struct { + Results []*router.ResolvedURL +} + +// queryFields provides a map of fields to weighting to use with the +// elasticsearch query. +func queryFields(sp SearchParams) map[string]float64 { + fields := map[string]float64{ + "URL.ngrams": 8, + "CharmMeta.Categories": 5, + "CharmMeta.Tags": 5, + "BundleData.Tags": 5, + "Series.ngrams": 5, + "CharmProvidedInterfaces": 3, + "CharmRequiredInterfaces": 3, + "CharmMeta.Description": 1, + "BundleReadMe": 1, + } + if sp.AutoComplete { + fields["CharmMeta.Name.ngrams"] = 10 + } else { + fields["CharmMeta.Name"] = 10 + } + return fields +} + +// encodeFields takes a map of field name to weight and builds a slice of strings +// representing those weighted fields for a MultiMatchQuery. +func encodeFields(fields map[string]float64) []string { + fs := make([]string, 0, len(fields)) + for k, v := range fields { + fs = append(fs, elasticsearch.BoostField(k, v)) + } + return fs +} + +// createSearchDSL builds an elasticsearch query from the query parameters. +// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl.html +func createSearchDSL(sp SearchParams) elasticsearch.QueryDSL { + qdsl := elasticsearch.QueryDSL{ + From: sp.Skip, + Size: sp.Limit, + } + + // Full text search + var q elasticsearch.Query + if sp.Text == "" { + q = elasticsearch.MatchAllQuery{} + } else { + q = elasticsearch.MultiMatchQuery{ + Query: sp.Text, + Fields: encodeFields(queryFields(sp)), + } + } + + // Boosting + f := []elasticsearch.Function{ + // TODO(mhilton) review this function in future if downloads get sufficiently + // large that the order becomes undesirable. + elasticsearch.FieldValueFactorFunction{ + Field: "TotalDownloads", + Factor: 0.000001, + Modifier: "ln2p", + }, + elasticsearch.BoostFactorFunction{ + Filter: promulgatedFilter("1"), + BoostFactor: 1.25, + }, + } + for k, v := range seriesBoost { + f = append(f, elasticsearch.BoostFactorFunction{ + Filter: seriesFilter(k), + BoostFactor: v, + }) + } + q = elasticsearch.FunctionScoreQuery{ + Query: q, + Functions: f, + } + + // Filters + qdsl.Query = elasticsearch.FilteredQuery{ + Query: q, + Filter: createFilters(sp.Filters, sp.Admin, sp.Groups), + } + + // Sorting + for _, s := range sp.sort { + qdsl.Sort = append(qdsl.Sort, createElasticSort(s)) + } + + return qdsl +} + +// createFilters converts the filters requested with the search API into +// filters in the elasticsearch query DSL. +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-search +// for details of how filters are specified in the API. For each key in f a +// filter is created that matches any one of the set of values specified for +// that key. The created filter will only match when at least one of the +// requested values matches for all of the requested keys. Any filter names +// that are not defined in the filters map will be silently skipped. +func createFilters(f map[string][]string, admin bool, groups []string) elasticsearch.Filter { + af := make(elasticsearch.AndFilter, 0, len(f)+1) + for k, vals := range f { + filter, ok := filters[k] + if !ok { + continue + } + of := make(elasticsearch.OrFilter, 0, len(vals)) + for _, v := range vals { + of = append(of, filter(v)) + } + af = append(af, of) + } + if admin { + return af + } + gf := make(elasticsearch.OrFilter, 0, len(groups)+1) + gf = append(gf, elasticsearch.TermFilter{ + Field: "ReadACLs", + Value: params.Everyone, + }) + for _, g := range groups { + gf = append(gf, elasticsearch.TermFilter{ + Field: "ReadACLs", + Value: g, + }) + } + af = append(af, gf) + return af +} + +// filters contains a mapping from a filter parameter in the API to a +// function that will generate an elasticsearch query DSL filter for the +// given value. +var filters = map[string]func(string) elasticsearch.Filter{ + "description": descriptionFilter, + "name": nameFilter, + "owner": ownerFilter, + "promulgated": promulgatedFilter, + "provides": termFilter("CharmProvidedInterfaces"), + "requires": termFilter("CharmRequiredInterfaces"), + "series": seriesFilter, + "summary": summaryFilter, + "tags": tagsFilter, + "type": typeFilter, +} + +// descriptionFilter generates a filter that will match against the +// description field of the charm data. +func descriptionFilter(value string) elasticsearch.Filter { + return elasticsearch.QueryFilter{ + Query: elasticsearch.MatchQuery{ + Field: "CharmMeta.Description", + Query: value, + Type: "phrase", + }, + } +} + +// nameFilter generates a filter that will match against the +// name of the charm or bundle. +func nameFilter(value string) elasticsearch.Filter { + return elasticsearch.QueryFilter{ + Query: elasticsearch.MatchQuery{ + Field: "Name", + Query: value, + Type: "phrase", + }, + } +} + +// ownerFilter generates a filter that will match against the +// owner taken from the URL. +func ownerFilter(value string) elasticsearch.Filter { + if value == "" { + return promulgatedFilter("1") + } + return elasticsearch.QueryFilter{ + Query: elasticsearch.MatchQuery{ + Field: "User", + Query: value, + Type: "phrase", + }, + } +} + +// promulgatedFilter generates a filter that will match against the +// existence of a promulgated URL. +func promulgatedFilter(value string) elasticsearch.Filter { + f := elasticsearch.ExistsFilter("PromulgatedURL") + if value == "1" { + return f + } + return elasticsearch.NotFilter{f} +} + +// seriesFilter generates a filter that will match against the +// series taken from the URL. +func seriesFilter(value string) elasticsearch.Filter { + return elasticsearch.QueryFilter{ + Query: elasticsearch.MatchQuery{ + Field: "Series", + Query: value, + Type: "phrase", + }, + } +} + +// summaryFilter generates a filter that will match against the +// summary field from the charm data. +func summaryFilter(value string) elasticsearch.Filter { + return elasticsearch.QueryFilter{ + Query: elasticsearch.MatchQuery{ + Field: "CharmMeta.Summary", + Query: value, + Type: "phrase", + }, + } +} + +// tagsFilter generates a filter that will match against the "tags" field +// in the data. For charms this is the Categories field and for bundles this +// is the Tags field. +func tagsFilter(value string) elasticsearch.Filter { + tags := strings.Split(value, " ") + af := make(elasticsearch.AndFilter, 0, len(tags)) + for _, t := range tags { + if t == "" { + continue + } + af = append(af, elasticsearch.OrFilter{ + elasticsearch.TermFilter{ + Field: "CharmMeta.Categories", + Value: t, + }, + elasticsearch.TermFilter{ + Field: "CharmMeta.Tags", + Value: t, + }, + elasticsearch.TermFilter{ + Field: "BundleData.Tags", + Value: t, + }, + }) + } + return af +} + +// termFilter creates a function that generates a filter on the specified +// document field. +func termFilter(field string) func(string) elasticsearch.Filter { + return func(value string) elasticsearch.Filter { + terms := strings.Split(value, " ") + af := make(elasticsearch.AndFilter, 0, len(terms)) + for _, t := range terms { + if t == "" { + continue + } + af = append(af, elasticsearch.TermFilter{ + Field: field, + Value: t, + }) + } + return af + } +} + +// bundleFilter is a filter that matches against bundles, based on +// the URL. +var bundleFilter = seriesFilter("bundle") + +// typeFilter generates a filter that is used to match either only charms, +// or only bundles. +func typeFilter(value string) elasticsearch.Filter { + if value == "bundle" { + return bundleFilter + } + return elasticsearch.NotFilter{bundleFilter} +} + +// sortFields contains a mapping from api fieldnames to the entity fields to search. +var sortESFields = map[string]string{ + "name": "Name", + "owner": "User", + "series": "Series", + "downloads": "TotalDownloads", +} + +// createSort creates an elasticsearch.Sort query parameter out of a Sort parameter. +func createElasticSort(s sortParam) elasticsearch.Sort { + sort := elasticsearch.Sort{ + Field: sortESFields[s.Field], + Order: elasticsearch.Ascending, + } + if s.Order == sortDescending { + sort.Order = elasticsearch.Descending + } + return sort +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/search_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/search_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/search_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,950 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "encoding/json" + "sort" + "strings" + "sync" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + + "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" +) + +type StoreSearchSuite struct { + storetesting.IsolatedMgoESSuite + pool *Pool + store *Store + index SearchIndex +} + +var _ = gc.Suite(&StoreSearchSuite{}) + +func (s *StoreSearchSuite) SetUpTest(c *gc.C) { + s.IsolatedMgoESSuite.SetUpTest(c) + + // Temporarily set LegacyDownloadCountsEnabled to false, so that the real + // code path can be reached by tests in this suite. + // TODO (frankban): remove this block when removing the legacy counts + // logic. + original := LegacyDownloadCountsEnabled + LegacyDownloadCountsEnabled = false + s.AddCleanup(func(*gc.C) { + LegacyDownloadCountsEnabled = original + }) + + s.index = SearchIndex{s.ES, s.TestIndex} + s.ES.RefreshIndex(".versions") + pool, err := NewPool(s.Session.DB("foo"), &s.index, nil, ServerParams{}) + c.Assert(err, gc.IsNil) + s.pool = pool + s.store = pool.Store() + s.addCharmsToStore(c) + c.Assert(err, gc.IsNil) +} + +func (s *StoreSearchSuite) TearDownTest(c *gc.C) { + s.store.Close() + s.pool.Close() + s.IsolatedMgoESSuite.TearDownTest(c) +} + +var newResolvedURL = router.MustNewResolvedURL + +var exportTestCharms = map[string]*router.ResolvedURL{ + "wordpress": newResolvedURL("cs:~charmers/precise/wordpress-23", 23), + "mysql": newResolvedURL("cs:~openstack-charmers/trusty/mysql-7", 7), + "varnish": newResolvedURL("cs:~foo/trusty/varnish-1", -1), + "riak": newResolvedURL("cs:~charmers/trusty/riak-67", 67), +} + +var exportTestBundles = map[string]*router.ResolvedURL{ + "wordpress-simple": newResolvedURL("cs:~charmers/bundle/wordpress-simple-4", 4), +} + +var charmDownloadCounts = map[string]int{ + "wordpress": 0, + "wordpress-simple": 1, + "mysql": 3, + "varnish": 5, +} + +func (s *StoreSearchSuite) TestSuccessfulExport(c *gc.C) { + s.store.pool.statsCache.EvictAll() + for name, ref := range exportTestCharms { + entity, err := s.store.FindEntity(ref) + c.Assert(err, gc.IsNil) + var actual json.RawMessage + err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(entity.URL), &actual) + c.Assert(err, gc.IsNil) + readACLs := []string{ref.URL.User, params.Everyone} + if ref.URL.Name == "riak" { + readACLs = []string{ref.URL.User} + } + doc := SearchDoc{ + Entity: entity, + TotalDownloads: int64(charmDownloadCounts[name]), + ReadACLs: readACLs, + Series: entity.SupportedSeries, + } + c.Assert(string(actual), jc.JSONEquals, doc) + } +} + +func (s *StoreSearchSuite) TestNoExportDeprecated(c *gc.C) { + charmArchive := storetesting.Charms.CharmDir("mysql") + url := newResolvedURL("cs:~charmers/saucy/mysql-4", -1) + err := s.store.AddCharmWithArchive(url, charmArchive) + c.Assert(err, gc.IsNil) + + var entity *mongodoc.Entity + err = s.store.DB.Entities().FindId("cs:~openstack-charmers/trusty/mysql-7").One(&entity) + c.Assert(err, gc.IsNil) + present, err := s.store.ES.HasDocument(s.TestIndex, typeName, s.store.ES.getID(entity.URL)) + c.Assert(err, gc.IsNil) + c.Assert(present, gc.Equals, true) + + err = s.store.DB.Entities().FindId("cs:~charmers/saucy/mysql-4").One(&entity) + c.Assert(err, gc.IsNil) + present, err = s.store.ES.HasDocument(s.TestIndex, typeName, s.store.ES.getID(entity.URL)) + c.Assert(err, gc.IsNil) + c.Assert(present, gc.Equals, false) +} + +func (s *StoreSearchSuite) TestNoExportDevelopment(c *gc.C) { + rurl := newResolvedURL("cs:~charmers/development/trusty/mysql-42", -1) + err := s.store.AddCharmWithArchive(rurl, storetesting.Charms.CharmDir("mysql")) + c.Assert(err, gc.IsNil) + + var entity *mongodoc.Entity + err = s.store.DB.Entities().FindId(rurl.URL.String()).One(&entity) + c.Assert(err, gc.IsNil) + present, err := s.store.ES.HasDocument(s.TestIndex, typeName, s.store.ES.getID(entity.URL)) + c.Assert(err, gc.IsNil) + c.Assert(present, gc.Equals, false) +} + +func (s *StoreSearchSuite) TestExportOnlyLatest(c *gc.C) { + charmArchive := storetesting.Charms.CharmDir("wordpress") + url := newResolvedURL("cs:~charmers/precise/wordpress-24", -1) + err := s.store.AddCharmWithArchive(url, charmArchive) + c.Assert(err, gc.IsNil) + var expected, old *mongodoc.Entity + var actual json.RawMessage + err = s.store.DB.Entities().FindId("cs:~charmers/precise/wordpress-23").One(&old) + c.Assert(err, gc.IsNil) + err = s.store.DB.Entities().FindId("cs:~charmers/precise/wordpress-24").One(&expected) + c.Assert(err, gc.IsNil) + err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(old.URL), &actual) + c.Assert(err, gc.IsNil) + doc := SearchDoc{ + Entity: expected, + ReadACLs: []string{"charmers", params.Everyone}, + Series: expected.SupportedSeries, + } + c.Assert(string(actual), jc.JSONEquals, doc) +} + +func (s *StoreSearchSuite) TestExportMultiSeriesCharmsReplaceEarlierOnes(c *gc.C) { + charmArchive := storetesting.Charms.CharmDir("wordpress") + url := newResolvedURL("cs:~charmers/trusty/juju-gui-24", -1) + err := s.store.AddCharmWithArchive(url, charmArchive) + c.Assert(err, gc.IsNil) + charmArchive = storetesting.Charms.CharmDir("multi-series") + url = newResolvedURL("cs:~charmers/juju-gui-25", -1) + err = s.store.AddCharmWithArchive(url, charmArchive) + c.Assert(err, gc.IsNil) + var expected, old *mongodoc.Entity + var actual json.RawMessage + err = s.store.DB.Entities().FindId("cs:~charmers/trusty/juju-gui-24").One(&old) + c.Assert(err, gc.IsNil) + err = s.store.DB.Entities().FindId("cs:~charmers/juju-gui-25").One(&expected) + c.Assert(err, gc.IsNil) + err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(old.URL), &actual) + c.Assert(errgo.Cause(err), gc.Equals, elasticsearch.ErrNotFound) + err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(expected.URL), &actual) + c.Assert(err, gc.IsNil) + doc := SearchDoc{ + Entity: expected, + ReadACLs: []string{"charmers"}, + Series: expected.SupportedSeries, + } + c.Assert(string(actual), jc.JSONEquals, doc) +} + +func (s *StoreSearchSuite) TestExportMultiSeriesCharmsDontReplaceEarlierOnesIfNotSupported(c *gc.C) { + charmArchive := storetesting.Charms.CharmDir("wordpress") + url := newResolvedURL("cs:~charmers/trusty/juju-gui-24", -1) + err := s.store.AddCharmWithArchive(url, charmArchive) + c.Assert(err, gc.IsNil) + url = newResolvedURL("cs:~charmers/precise/juju-gui-24", -1) + err = s.store.AddCharmWithArchive(url, charmArchive) + c.Assert(err, gc.IsNil) + charmArchive = storetesting.Charms.CharmDir("multi-series") + url = newResolvedURL("cs:~charmers/juju-gui-25", -1) + err = s.store.AddCharmWithArchive(url, charmArchive) + c.Assert(err, gc.IsNil) + var expected, trusty, precise *mongodoc.Entity + var actual json.RawMessage + err = s.store.DB.Entities().FindId("cs:~charmers/precise/juju-gui-24").One(&precise) + c.Assert(err, gc.IsNil) + err = s.store.DB.Entities().FindId("cs:~charmers/trusty/juju-gui-24").One(&trusty) + c.Assert(err, gc.IsNil) + err = s.store.DB.Entities().FindId("cs:~charmers/juju-gui-25").One(&expected) + c.Assert(err, gc.IsNil) + err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(precise.URL), &actual) + c.Assert(err, gc.IsNil) + doc := SearchDoc{ + Entity: precise, + ReadACLs: []string{"charmers"}, + Series: precise.SupportedSeries, + } + c.Assert(string(actual), jc.JSONEquals, doc) + err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(trusty.URL), &actual) + c.Assert(errgo.Cause(err), gc.Equals, elasticsearch.ErrNotFound) + err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(expected.URL), &actual) + c.Assert(err, gc.IsNil) + doc = SearchDoc{ + Entity: expected, + ReadACLs: []string{"charmers"}, + Series: expected.SupportedSeries, + } + c.Assert(string(actual), jc.JSONEquals, doc) +} + +func (s *StoreSearchSuite) TestExportSearchDocument(c *gc.C) { + var entity *mongodoc.Entity + var actual json.RawMessage + err := s.store.DB.Entities().FindId("cs:~charmers/precise/wordpress-23").One(&entity) + c.Assert(err, gc.IsNil) + doc := SearchDoc{Entity: entity, TotalDownloads: 4000} + err = s.store.ES.update(&doc) + c.Assert(err, gc.IsNil) + err = s.store.ES.GetDocument(s.TestIndex, typeName, s.store.ES.getID(entity.URL), &actual) + c.Assert(err, gc.IsNil) + c.Assert(string(actual), jc.JSONEquals, doc) +} + +func (s *StoreSearchSuite) addCharmsToStore(c *gc.C) { + for name, url := range exportTestCharms { + charmArchive := storetesting.Charms.CharmDir(name) + cats := strings.Split(name, "-") + charmArchive.Meta().Categories = cats + tags := make([]string, len(cats)) + for i, s := range cats { + tags[i] = s + "TAG" + } + charmArchive.Meta().Tags = tags + err := s.store.AddCharmWithArchive(url, charmArchive) + c.Assert(err, gc.IsNil) + for i := 0; i < charmDownloadCounts[name]; i++ { + err := s.store.IncrementDownloadCounts(url) + c.Assert(err, gc.IsNil) + } + if url.URL.Name == "riak" { + continue + } + err = s.store.SetPerms(&url.URL, "read", url.URL.User, params.Everyone) + c.Assert(err, gc.IsNil) + err = s.store.UpdateSearchBaseURL(baseURL(&url.URL)) + c.Assert(err, gc.IsNil) + } + for name, url := range exportTestBundles { + bundleArchive := storetesting.Charms.BundleDir(name) + bundleArchive.Data().Tags = strings.Split(name, "-") + err := s.store.AddBundleWithArchive(url, bundleArchive) + c.Assert(err, gc.IsNil) + for i := 0; i < charmDownloadCounts[name]; i++ { + err := s.store.IncrementDownloadCounts(url) + c.Assert(err, gc.IsNil) + } + err = s.store.SetPerms(&url.URL, "read", url.URL.User, params.Everyone) + c.Assert(err, gc.IsNil) + err = s.store.UpdateSearchBaseURL(baseURL(&url.URL)) + c.Assert(err, gc.IsNil) + } + s.store.pool.statsCache.EvictAll() + err := s.store.syncSearch() + c.Assert(err, gc.IsNil) +} + +var searchTests = []struct { + about string + sp SearchParams + results []*router.ResolvedURL + totalDiff int // len(results) + totalDiff = expected total +}{ + { + about: "basic text search", + sp: SearchParams{ + Text: "wordpress", + }, + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "blank text search", + sp: SearchParams{ + Text: "", + }, + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "autocomplete search", + sp: SearchParams{ + Text: "word", + AutoComplete: true, + }, + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "description filter search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "description": {"blog"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + }, + }, { + about: "name filter search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "name": {"wordpress"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + }, + }, { + about: "owner filter search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "owner": {"foo"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["varnish"], + }, + }, { + about: "provides filter search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "provides": {"mysql"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + }, + }, { + about: "requires filter search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "requires": {"mysql"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + }, + }, { + about: "series filter search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "series": {"trusty"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + }, + }, { + about: "summary filter search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "summary": {"Database engine"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + }, + }, { + about: "tags filter search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "tags": {"wordpress"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "bundle type filter search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "type": {"bundle"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestBundles["wordpress-simple"], + }, + }, { + about: "charm type filter search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "type": {"charm"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + }, + }, { + about: "charm & bundle type filter search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "type": {"charm", "bundle"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "invalid filter search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "no such filter": {"foo"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "valid & invalid filter search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "no such filter": {"foo"}, + "type": {"charm"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + }, + }, { + about: "paginated search", + sp: SearchParams{ + Filters: map[string][]string{ + "name": {"mysql"}, + }, + Skip: 1, + }, + totalDiff: +1, + }, { + about: "additional groups", + sp: SearchParams{ + Groups: []string{"charmers"}, + }, + results: []*router.ResolvedURL{ + exportTestCharms["riak"], + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "admin search", + sp: SearchParams{ + Admin: true, + }, + results: []*router.ResolvedURL{ + exportTestCharms["riak"], + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "charm tags filter search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "tags": {"wordpressTAG"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + }, + }, { + about: "blank owner filter search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "owner": {""}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "promulgated search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "promulgated": {"1"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "not promulgated search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "promulgated": {"0"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["varnish"], + }, + }, { + about: "owner and promulgated filter search", + sp: SearchParams{ + Text: "", + Filters: map[string][]string{ + "promulgated": {"1"}, + "owner": {"openstack-charmers"}, + }, + }, + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + }, + }, +} + +func (s *StoreSearchSuite) TestSearches(c *gc.C) { + s.store.ES.Database.RefreshIndex(s.TestIndex) + for i, test := range searchTests { + c.Logf("test %d: %s", i, test.about) + res, err := s.store.Search(test.sp) + c.Assert(err, gc.IsNil) + c.Logf("results: %v", res.Results) + sort.Sort(resolvedURLsByString(res.Results)) + sort.Sort(resolvedURLsByString(test.results)) + c.Check(res.Results, jc.DeepEquals, test.results) + c.Check(res.Total, gc.Equals, len(test.results)+test.totalDiff) + } +} + +type resolvedURLsByString []*router.ResolvedURL + +func (r resolvedURLsByString) Less(i, j int) bool { + return r[i].URL.String() < r[j].URL.String() +} + +func (r resolvedURLsByString) Swap(i, j int) { + r[i], r[j] = r[j], r[i] +} + +func (r resolvedURLsByString) Len() int { + return len(r) +} + +func (s *StoreSearchSuite) TestPaginatedSearch(c *gc.C) { + err := s.store.ES.Database.RefreshIndex(s.TestIndex) + c.Assert(err, gc.IsNil) + sp := SearchParams{ + Text: "wordpress", + Skip: 1, + } + res, err := s.store.Search(sp) + c.Assert(err, gc.IsNil) + c.Assert(res.Results, gc.HasLen, 1) + c.Assert(res.Total, gc.Equals, 2) +} + +func (s *StoreSearchSuite) TestLimitTestSearch(c *gc.C) { + err := s.store.ES.Database.RefreshIndex(s.TestIndex) + c.Assert(err, gc.IsNil) + sp := SearchParams{ + Text: "wordpress", + Limit: 1, + } + res, err := s.store.Search(sp) + c.Assert(err, gc.IsNil) + c.Assert(res.Results, gc.HasLen, 1) +} + +func (s *StoreSearchSuite) TestPromulgatedRank(c *gc.C) { + charmArchive := storetesting.Charms.CharmDir("varnish") + url := newResolvedURL("cs:~charmers/trusty/varnish-1", 1) + s.store.AddCharmWithArchive(url, charmArchive) + err := s.store.SetPerms(&url.URL, "read", url.URL.User, params.Everyone) + c.Assert(err, gc.IsNil) + err = s.store.UpdateSearchBaseURL(baseURL(&url.URL)) + c.Assert(err, gc.IsNil) + s.store.ES.Database.RefreshIndex(s.TestIndex) + sp := SearchParams{ + Filters: map[string][]string{ + "name": {"varnish"}, + }, + } + res, err := s.store.Search(sp) + c.Assert(err, gc.IsNil) + c.Logf("results: %s", res.Results) + c.Assert(res.Results, jc.DeepEquals, []*router.ResolvedURL{ + url, + exportTestCharms["varnish"], + }) +} + +func (s *StoreSearchSuite) TestSorting(c *gc.C) { + s.store.ES.Database.RefreshIndex(s.TestIndex) + tests := []struct { + about string + sortQuery string + results []*router.ResolvedURL + }{{ + about: "name ascending", + sortQuery: "name", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "name descending", + sortQuery: "-name", + results: []*router.ResolvedURL{ + exportTestBundles["wordpress-simple"], + exportTestCharms["wordpress"], + exportTestCharms["varnish"], + exportTestCharms["mysql"], + }, + }, { + about: "series ascending", + sortQuery: "series,name", + results: []*router.ResolvedURL{ + exportTestBundles["wordpress-simple"], + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + }, + }, { + about: "series descending", + sortQuery: "-series,name", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "owner ascending", + sortQuery: "owner,name", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + exportTestCharms["varnish"], + exportTestCharms["mysql"], + }, + }, { + about: "owner descending", + sortQuery: "-owner,name", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "downloads ascending", + sortQuery: "downloads", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + }, + }, { + about: "downloads descending", + sortQuery: "-downloads", + results: []*router.ResolvedURL{ + exportTestCharms["varnish"], + exportTestCharms["mysql"], + exportTestBundles["wordpress-simple"], + exportTestCharms["wordpress"], + }, + }} + for i, test := range tests { + c.Logf("test %d. %s", i, test.about) + var sp SearchParams + err := sp.ParseSortFields(test.sortQuery) + c.Assert(err, gc.IsNil) + res, err := s.store.Search(sp) + c.Assert(err, gc.IsNil) + c.Assert(res.Results, jc.DeepEquals, test.results) + c.Assert(res.Total, gc.Equals, len(test.results)) + } +} + +func (s *StoreSearchSuite) TestBoosting(c *gc.C) { + s.store.ES.Database.RefreshIndex(s.TestIndex) + var sp SearchParams + res, err := s.store.Search(sp) + c.Assert(err, gc.IsNil) + c.Assert(res.Results, gc.HasLen, 4) + c.Logf("results: %s", res.Results) + c.Assert(res.Results, jc.DeepEquals, []*router.ResolvedURL{ + exportTestBundles["wordpress-simple"], + exportTestCharms["mysql"], + exportTestCharms["wordpress"], + exportTestCharms["varnish"], + }) +} + +func (s *StoreSearchSuite) TestEnsureIndex(c *gc.C) { + s.store.ES.Index = s.TestIndex + "-ensure-index" + defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) + indexes, err := s.ES.ListIndexesForAlias(s.store.ES.Index) + c.Assert(err, gc.Equals, nil) + c.Assert(indexes, gc.HasLen, 0) + err = s.store.ES.ensureIndexes(false) + c.Assert(err, gc.Equals, nil) + indexes, err = s.ES.ListIndexesForAlias(s.store.ES.Index) + c.Assert(err, gc.Equals, nil) + c.Assert(indexes, gc.HasLen, 1) + index := indexes[0] + err = s.store.ES.ensureIndexes(false) + c.Assert(err, gc.Equals, nil) + indexes, err = s.ES.ListIndexesForAlias(s.store.ES.Index) + c.Assert(err, gc.Equals, nil) + c.Assert(indexes, gc.HasLen, 1) + c.Assert(indexes[0], gc.Equals, index) +} + +func (s *StoreSearchSuite) TestEnsureConcurrent(c *gc.C) { + s.store.ES.Index = s.TestIndex + "-ensure-index-conc" + defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) + indexes, err := s.ES.ListIndexesForAlias(s.store.ES.Index) + c.Assert(err, gc.Equals, nil) + c.Assert(indexes, gc.HasLen, 0) + var wg sync.WaitGroup + wg.Add(1) + go func() { + err := s.store.ES.ensureIndexes(false) + c.Check(err, gc.Equals, nil) + wg.Done() + }() + err = s.store.ES.ensureIndexes(false) + c.Assert(err, gc.Equals, nil) + indexes, err = s.ES.ListIndexesForAlias(s.store.ES.Index) + c.Assert(err, gc.Equals, nil) + c.Assert(indexes, gc.HasLen, 1) + wg.Wait() +} + +func (s *StoreSearchSuite) TestEnsureIndexForce(c *gc.C) { + s.store.ES.Index = s.TestIndex + "-ensure-index-force" + defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) + indexes, err := s.ES.ListIndexesForAlias(s.store.ES.Index) + c.Assert(err, gc.Equals, nil) + c.Assert(indexes, gc.HasLen, 0) + err = s.store.ES.ensureIndexes(false) + c.Assert(err, gc.Equals, nil) + indexes, err = s.ES.ListIndexesForAlias(s.store.ES.Index) + c.Assert(err, gc.Equals, nil) + c.Assert(indexes, gc.HasLen, 1) + index := indexes[0] + err = s.store.ES.ensureIndexes(true) + c.Assert(err, gc.Equals, nil) + indexes, err = s.ES.ListIndexesForAlias(s.store.ES.Index) + c.Assert(err, gc.Equals, nil) + c.Assert(indexes, gc.HasLen, 1) + c.Assert(indexes[0], gc.Not(gc.Equals), index) +} + +func (s *StoreSearchSuite) TestGetCurrentVersionNoVersion(c *gc.C) { + s.store.ES.Index = s.TestIndex + "-current-version" + defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) + v, dv, err := s.store.ES.getCurrentVersion() + c.Assert(err, gc.Equals, nil) + c.Assert(v, gc.Equals, version{}) + c.Assert(dv, gc.Equals, int64(0)) +} + +func (s *StoreSearchSuite) TestGetCurrentVersionWithVersion(c *gc.C) { + s.store.ES.Index = s.TestIndex + "-current-version" + defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) + index, err := s.store.ES.newIndex() + c.Assert(err, gc.Equals, nil) + updated, err := s.store.ES.updateVersion(version{1, index}, 0) + c.Assert(err, gc.Equals, nil) + c.Assert(updated, gc.Equals, true) + v, dv, err := s.store.ES.getCurrentVersion() + c.Assert(err, gc.Equals, nil) + c.Assert(v, gc.Equals, version{1, index}) + c.Assert(dv, gc.Equals, int64(1)) +} + +func (s *StoreSearchSuite) TestUpdateVersionNew(c *gc.C) { + s.store.ES.Index = s.TestIndex + "-update-version" + defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) + index, err := s.store.ES.newIndex() + c.Assert(err, gc.Equals, nil) + updated, err := s.store.ES.updateVersion(version{1, index}, 0) + c.Assert(err, gc.Equals, nil) + c.Assert(updated, gc.Equals, true) +} + +func (s *StoreSearchSuite) TestUpdateVersionUpdate(c *gc.C) { + s.store.ES.Index = s.TestIndex + "-update-version" + defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) + index, err := s.store.ES.newIndex() + c.Assert(err, gc.Equals, nil) + updated, err := s.store.ES.updateVersion(version{1, index}, 0) + c.Assert(err, gc.Equals, nil) + c.Assert(updated, gc.Equals, true) + index, err = s.store.ES.newIndex() + c.Assert(err, gc.Equals, nil) + updated, err = s.store.ES.updateVersion(version{2, index}, 1) + c.Assert(err, gc.Equals, nil) + c.Assert(updated, gc.Equals, true) +} + +func (s *StoreSearchSuite) TestUpdateCreateConflict(c *gc.C) { + s.store.ES.Index = s.TestIndex + "-update-version" + defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) + index, err := s.store.ES.newIndex() + c.Assert(err, gc.Equals, nil) + updated, err := s.store.ES.updateVersion(version{1, index}, 0) + c.Assert(err, gc.Equals, nil) + c.Assert(updated, gc.Equals, true) + index, err = s.store.ES.newIndex() + c.Assert(err, gc.Equals, nil) + updated, err = s.store.ES.updateVersion(version{1, index}, 0) + c.Assert(err, gc.Equals, nil) + c.Assert(updated, gc.Equals, false) +} + +func (s *StoreSearchSuite) TestUpdateConflict(c *gc.C) { + s.store.ES.Index = s.TestIndex + "-update-version" + defer s.ES.DeleteDocument(".versions", "version", s.store.ES.Index) + index, err := s.store.ES.newIndex() + c.Assert(err, gc.Equals, nil) + updated, err := s.store.ES.updateVersion(version{1, index}, 0) + c.Assert(err, gc.Equals, nil) + c.Assert(updated, gc.Equals, true) + index, err = s.store.ES.newIndex() + c.Assert(err, gc.Equals, nil) + updated, err = s.store.ES.updateVersion(version{1, index}, 3) + c.Assert(err, gc.Equals, nil) + c.Assert(updated, gc.Equals, false) +} + +func (s *StoreSearchSuite) TestMultiSeriesCharmFiltersSeriesCorrectly(c *gc.C) { + charmArchive := storetesting.Charms.CharmDir("multi-series") + url := newResolvedURL("cs:~charmers/juju-gui-25", -1) + err := s.store.AddCharmWithArchive(url, charmArchive) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", url.URL.User, params.Everyone) + c.Assert(err, gc.IsNil) + err = s.store.UpdateSearch(url) + c.Assert(err, gc.IsNil) + s.store.ES.Database.RefreshIndex(s.TestIndex) + filterTests := []struct { + series string + notFound bool + }{{ + series: "trusty", + }, { + series: "vivid", + }, { + series: "sauch", + notFound: true, + }} + for i, test := range filterTests { + c.Logf("%d. %s", i, test.series) + res, err := s.store.Search(SearchParams{ + Filters: map[string][]string{ + "name": []string{"juju-gui"}, + "series": []string{test.series}, + }, + }) + c.Assert(err, gc.IsNil) + if test.notFound { + c.Assert(res.Results, gc.HasLen, 0) + continue + } + c.Assert(res.Results, gc.HasLen, 1) + c.Assert(res.Results[0].URL.String(), gc.Equals, url.String()) + } +} + +func (s *StoreSearchSuite) TestMultiSeriesCharmSortsSeriesCorrectly(c *gc.C) { + charmArchive := storetesting.Charms.CharmDir("multi-series") + url := newResolvedURL("cs:~charmers/juju-gui-25", -1) + err := s.store.AddCharmWithArchive(url, charmArchive) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", url.URL.User, params.Everyone) + c.Assert(err, gc.IsNil) + err = s.store.UpdateSearch(url) + c.Assert(err, gc.IsNil) + s.store.ES.Database.RefreshIndex(s.TestIndex) + var sp SearchParams + sp.ParseSortFields("-series", "owner") + res, err := s.store.Search(sp) + c.Assert(err, gc.IsNil) + c.Assert(res.Results, jc.DeepEquals, []*router.ResolvedURL{ + newResolvedURL("cs:~charmers/juju-gui-25", -1), + newResolvedURL("cs:~foo/trusty/varnish-1", -1), + newResolvedURL("cs:~openstack-charmers/trusty/mysql-7", 7), + newResolvedURL("cs:~charmers/precise/wordpress-23", 23), + newResolvedURL("cs:~charmers/bundle/wordpress-simple-4", 4), + }) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/server.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/server.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/server.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,178 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// This is the internal version of the charmstore package. +// It exposes details to the various API packages +// that we do not wish to expose to the world at large. +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "net/http" + "strings" + "time" + + "gopkg.in/errgo.v1" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/mgo.v2" + "gopkg.in/natefinch/lumberjack.v2" + + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +// NewAPIHandlerFunc is a function that returns a new API handler that uses +// the given Store. +type NewAPIHandlerFunc func(*Pool, ServerParams) HTTPCloseHandler + +// HTTPCloseHandler represents a HTTP handler that +// must be closed after use. +type HTTPCloseHandler interface { + Close() + http.Handler +} + +// ServerParams holds configuration for a new internal API server. +type ServerParams struct { + // AuthUsername and AuthPassword hold the credentials + // used for HTTP basic authentication. + AuthUsername string + AuthPassword string + + // IdentityLocation holds the location of the third party authorization + // service to use when creating third party caveats, + // for example: http://api.jujucharms.com/identity/v1/discharger + // If it is empty, IdentityURL+"/v1/discharger" will be used. + IdentityLocation string + + // TermsLocation holds the location of the third party + // terms service to use when creating third party caveats. + TermsLocation string + + // PublicKeyLocator holds a public key store. + // It may be nil. + PublicKeyLocator bakery.PublicKeyLocator + + // IdentityAPIURL holds the URL of the identity manager, + // for example http://api.jujucharms.com/identity + IdentityAPIURL string + + // AgentUsername and AgentKey hold the credentials used for agent + // authentication. + AgentUsername string + AgentKey *bakery.KeyPair + + // StatsCacheMaxAge is the maximum length of time between + // refreshes of entities in the stats cache. + StatsCacheMaxAge time.Duration + + // SearchCacheMaxAge is the maximum length of time between + // refreshes of entities in the search cache. + SearchCacheMaxAge time.Duration + + // MaxMgoSessions specifies a soft limit on the maximum + // number of mongo sessions used. Each concurrent + // HTTP request will use one session. + MaxMgoSessions int + + // HTTPRequestWaitDuration holds the amount of time + // that an HTTP request will wait for a free connection + // when the MaxConcurrentHTTPRequests limit is reached. + HTTPRequestWaitDuration time.Duration + + // AuditLogger optionally holds the logger which will be used to + // write audit log entries. + AuditLogger *lumberjack.Logger +} + +// NewServer returns a handler that serves the given charm store API +// versions using db to store that charm store data. +// An optional elasticsearch configuration can be specified in si. If +// elasticsearch is not being used then si can be set to nil. +// The key of the versions map is the version name. +// The handler configuration is provided to all version handlers. +// +// The returned Server should be closed after use. +func NewServer(db *mgo.Database, si *SearchIndex, config ServerParams, versions map[string]NewAPIHandlerFunc) (*Server, error) { + if len(versions) == 0 { + return nil, errgo.Newf("charm store server must serve at least one version of the API") + } + config.IdentityLocation = strings.Trim(config.IdentityLocation, "/") + config.TermsLocation = strings.Trim(config.TermsLocation, "/") + config.IdentityAPIURL = strings.Trim(config.IdentityAPIURL, "/") + if config.IdentityLocation == "" && config.IdentityAPIURL != "" { + config.IdentityLocation = config.IdentityAPIURL + "/v1/discharger" + } + logger.Infof("identity discharge location: %s", config.IdentityLocation) + logger.Infof("identity API location: %s", config.IdentityAPIURL) + logger.Infof("terms discharge location: %s", config.TermsLocation) + bparams := bakery.NewServiceParams{ + // TODO The location is attached to any macaroons that we + // mint. Currently we don't know the location of the current + // service. We potentially provide a way to configure this, + // but it probably doesn't matter, as nothing currently uses + // the macaroon location for anything. + Location: "charmstore", + Locator: config.PublicKeyLocator, + } + pool, err := NewPool(db, si, &bparams, config) + if err != nil { + return nil, errgo.Notef(err, "cannot make store") + } + store := pool.Store() + defer store.Close() + if err := migrate(store.DB); err != nil { + pool.Close() + return nil, errgo.Notef(err, "database migration failed") + } + store.Go(func(store *Store) { + if err := store.syncSearch(); err != nil { + logger.Errorf("Cannot populate elasticsearch: %v", err) + } + }) + srv := &Server{ + pool: pool, + mux: router.NewServeMux(), + } + // Version independent API. + handle(srv.mux, "/debug", newServiceDebugHandler(pool, config, srv.mux)) + for vers, newAPI := range versions { + h := newAPI(pool, config) + handle(srv.mux, "/"+vers, h) + srv.handlers = append(srv.handlers, h) + } + + return srv, nil +} + +type Server struct { + pool *Pool + mux *router.ServeMux + handlers []HTTPCloseHandler +} + +// ServeHTTP implements http.Handler.ServeHTTP. +func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.mux.ServeHTTP(w, req) +} + +// Close closes the server. It must be called when the server +// is finished with. +func (s *Server) Close() { + s.pool.Close() + for _, h := range s.handlers { + h.Close() + } + s.handlers = nil +} + +// Pool returns the Pool used by the server. +func (s *Server) Pool() *Pool { + return s.pool +} + +func handle(mux *router.ServeMux, path string, handler http.Handler) { + if path != "/" { + handler = http.StripPrefix(path, handler) + path += "/" + } + mux.Handle(path, handler) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/server_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/server_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/server_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,162 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "net/http" + + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" +) + +var serverParams = ServerParams{ + AuthUsername: "test-user", + AuthPassword: "test-password", +} + +type ServerSuite struct { + storetesting.IsolatedMgoESSuite +} + +var _ = gc.Suite(&ServerSuite{}) + +func (s *ServerSuite) TestNewServerWithNoVersions(c *gc.C) { + h, err := NewServer(s.Session.DB("foo"), nil, serverParams, nil) + c.Assert(err, gc.ErrorMatches, `charm store server must serve at least one version of the API`) + c.Assert(h, gc.IsNil) +} + +type versionResponse struct { + Version string + Path string +} + +func (s *ServerSuite) TestNewServerWithVersions(c *gc.C) { + db := s.Session.DB("foo") + serveVersion := func(vers string) NewAPIHandlerFunc { + return func(p *Pool, config ServerParams) HTTPCloseHandler { + return nopCloseHandler{ + router.HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { + return versionResponse{ + Version: vers, + Path: req.URL.Path, + }, nil + }), + } + } + } + + h, err := NewServer(db, nil, serverParams, map[string]NewAPIHandlerFunc{ + "version1": serveVersion("version1"), + }) + c.Assert(err, gc.IsNil) + defer h.Close() + assertServesVersion(c, h, "version1") + assertDoesNotServeVersion(c, h, "version2") + assertDoesNotServeVersion(c, h, "version3") + + h, err = NewServer(db, nil, serverParams, map[string]NewAPIHandlerFunc{ + "version1": serveVersion("version1"), + "version2": serveVersion("version2"), + }) + c.Assert(err, gc.IsNil) + defer h.Close() + assertServesVersion(c, h, "version1") + assertServesVersion(c, h, "version2") + assertDoesNotServeVersion(c, h, "version3") + + h, err = NewServer(db, nil, serverParams, map[string]NewAPIHandlerFunc{ + "version1": serveVersion("version1"), + "version2": serveVersion("version2"), + "version3": serveVersion("version3"), + }) + c.Assert(err, gc.IsNil) + defer h.Close() + assertServesVersion(c, h, "version1") + assertServesVersion(c, h, "version2") + assertServesVersion(c, h, "version3") + + h, err = NewServer(db, nil, serverParams, map[string]NewAPIHandlerFunc{ + "version1": serveVersion("version1"), + "": serveVersion(""), + }) + c.Assert(err, gc.IsNil) + defer h.Close() + assertServesVersion(c, h, "") + assertServesVersion(c, h, "version1") +} + +func (s *ServerSuite) TestNewServerWithConfig(c *gc.C) { + serveConfig := func(p *Pool, config ServerParams) HTTPCloseHandler { + return nopCloseHandler{ + router.HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { + return config, nil + }), + } + } + h, err := NewServer(s.Session.DB("foo"), nil, serverParams, map[string]NewAPIHandlerFunc{ + "version1": serveConfig, + }) + c.Assert(err, gc.IsNil) + defer h.Close() + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: h, + URL: "/version1/some/path", + ExpectBody: serverParams, + }) +} + +func (s *ServerSuite) TestNewServerWithElasticSearch(c *gc.C) { + serveConfig := func(p *Pool, config ServerParams) HTTPCloseHandler { + return nopCloseHandler{ + router.HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { + return config, nil + }), + } + } + h, err := NewServer(s.Session.DB("foo"), &SearchIndex{s.ES, s.TestIndex}, serverParams, + map[string]NewAPIHandlerFunc{ + "version1": serveConfig, + }) + c.Assert(err, gc.IsNil) + defer h.Close() + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: h, + URL: "/version1/some/path", + ExpectBody: serverParams, + }) +} + +func assertServesVersion(c *gc.C, h http.Handler, vers string) { + path := vers + if path != "" { + path = "/" + path + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: h, + URL: path + "/some/path", + ExpectBody: versionResponse{ + Version: vers, + Path: "/some/path", + }, + }) +} + +func assertDoesNotServeVersion(c *gc.C, h http.Handler, vers string) { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: h, + URL: "/" + vers + "/some/path", + }) + c.Assert(rec.Code, gc.Equals, http.StatusNotFound) +} + +type nopCloseHandler struct { + http.Handler +} + +func (nopCloseHandler) Close() { +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/stats.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/stats.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/stats.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,652 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "encoding/json" + "fmt" + "sort" + "strconv" + "strings" + "sync" + "time" + + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +type stats struct { + // Cache for statistics key words (two generations). + cacheMu sync.RWMutex + statsIdNew map[string]int + statsIdOld map[string]int + statsTokenNew map[int]string + statsTokenOld map[int]string +} + +// Note that changing the StatsGranularity constant +// will not change the stats time granularity - it +// is defined for external code clarity. + +// StatsGranularity holds the time granularity of statistics +// gathering. IncCounter(Async) calls within this duration +// may be aggregated. +const StatsGranularity = time.Minute + +// The stats mechanism uses the following MongoDB collections: +// +// juju.stat.counters - Counters for statistics +// juju.stat.tokens - Tokens used in statistics counter keys + +func (s StoreDatabase) StatCounters() *mgo.Collection { + return s.C("juju.stat.counters") +} + +func (s StoreDatabase) StatTokens() *mgo.Collection { + return s.C("juju.stat.tokens") +} + +// key returns the compound statistics identifier that represents key. +// If write is true, the identifier will be created if necessary. +// Identifiers have a form similar to "ab:c:def:", where each section is a +// base-32 number that represents the respective word in key. This form +// allows efficiently indexing and searching for prefixes, while detaching +// the key content and size from the actual words used in key. +func (s *stats) key(db StoreDatabase, key []string, write bool) (string, error) { + if len(key) == 0 { + return "", errgo.New("store: empty statistics key") + } + tokens := db.StatTokens() + skey := make([]byte, 0, len(key)*4) + // Retry limit is mainly to prevent infinite recursion in edge cases, + // such as if the database is ever run in read-only mode. + // The logic below should deteministically stop in normal scenarios. + var err error + for i, retry := 0, 30; i < len(key) && retry > 0; retry-- { + err = nil + id, found := s.tokenId(key[i]) + if !found { + var t tokenId + err = tokens.Find(bson.D{{"t", key[i]}}).One(&t) + if err == mgo.ErrNotFound { + if !write { + return "", errgo.WithCausef(nil, params.ErrNotFound, "") + } + t.Id, err = tokens.Count() + if err != nil { + continue + } + t.Id++ + t.Token = key[i] + err = tokens.Insert(&t) + } + if err != nil { + continue + } + s.cacheTokenId(t.Token, t.Id) + id = t.Id + } + skey = strconv.AppendInt(skey, int64(id), 32) + skey = append(skey, ':') + i++ + } + if err != nil { + return "", err + } + return string(skey), nil +} + +const statsTokenCacheSize = 1024 + +type tokenId struct { + Id int `bson:"_id"` + Token string `bson:"t"` +} + +// cacheTokenId adds the id for token into the cache. +// The cache has two generations so that the least frequently used +// tokens are evicted regularly. +func (s *stats) cacheTokenId(token string, id int) { + s.cacheMu.Lock() + defer s.cacheMu.Unlock() + // Can't possibly be >, but reviews want it for defensiveness. + if len(s.statsIdNew) >= statsTokenCacheSize { + s.statsIdOld = s.statsIdNew + s.statsIdNew = nil + s.statsTokenOld = s.statsTokenNew + s.statsTokenNew = nil + } + if s.statsIdNew == nil { + s.statsIdNew = make(map[string]int, statsTokenCacheSize) + s.statsTokenNew = make(map[int]string, statsTokenCacheSize) + } + s.statsIdNew[token] = id + s.statsTokenNew[id] = token +} + +// tokenId returns the id for token from the cache, if found. +func (s *stats) tokenId(token string) (id int, found bool) { + s.cacheMu.RLock() + id, found = s.statsIdNew[token] + if found { + s.cacheMu.RUnlock() + return + } + id, found = s.statsIdOld[token] + s.cacheMu.RUnlock() + if found { + s.cacheTokenId(token, id) + } + return +} + +// idToken returns the token for id from the cache, if found. +func (s *stats) idToken(id int) (token string, found bool) { + s.cacheMu.RLock() + token, found = s.statsTokenNew[id] + if found { + s.cacheMu.RUnlock() + return + } + token, found = s.statsTokenOld[id] + s.cacheMu.RUnlock() + if found { + s.cacheTokenId(token, id) + } + return +} + +var counterEpoch = time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Unix() + +func timeToStamp(t time.Time) int32 { + return int32(t.Unix() - counterEpoch) +} + +// IncCounterAsync increases by one the counter associated with the composed +// key. The action is done in the background using a separate goroutine. +func (s *Store) IncCounterAsync(key []string) { + s.Go(func(s *Store) { + if err := s.IncCounter(key); err != nil { + logger.Errorf("cannot increase stats counter for key %v: %v", key, err) + } + }) +} + +// IncCounter increases by one the counter associated with the composed key. +func (s *Store) IncCounter(key []string) error { + return s.IncCounterAtTime(key, time.Now()) +} + +// IncCounterAtTime increases by one the counter associated with the composed +// key, associating it with the given time. +func (s *Store) IncCounterAtTime(key []string, t time.Time) error { + skey, err := s.stats.key(s.DB, key, true) + if err != nil { + return err + } + + // Round to the start of the minute so we get one document per minute at most. + t = t.UTC().Add(-time.Duration(t.Second()) * time.Second) + counters := s.DB.StatCounters() + _, err = counters.Upsert(bson.D{{"k", skey}, {"t", timeToStamp(t)}}, bson.D{{"$inc", bson.D{{"c", 1}}}}) + return err +} + +// CounterRequest represents a request to aggregate counter values. +type CounterRequest struct { + // Key and Prefix determine the counter keys to match. + // If Prefix is false, Key must match exactly. Otherwise, counters + // must begin with Key and have at least one more key token. + Key []string + Prefix bool + + // If List is true, matching counters are aggregated under their + // prefixes instead of being returned as a single overall sum. + // + // For example, given the following counts: + // + // {"a", "b"}: 1, + // {"a", "c"}: 3 + // {"a", "c", "d"}: 5 + // {"a", "c", "e"}: 7 + // + // and assuming that Prefix is true, the following keys will + // present the respective results if List is true: + // + // {"a"} => {{"a", "b"}, 1, false}, + // {{"a", "c"}, 3, false}, + // {{"a", "c"}, 12, true} + // {"a", "c"} => {{"a", "c", "d"}, 3, false}, + // {{"a", "c", "e"}, 5, false} + // + // If List is false, the same key prefixes will present: + // + // {"a"} => {{"a"}, 16, true} + // {"a", "c"} => {{"a", "c"}, 12, false} + // + List bool + + // By defines the period covered by each aggregated data point. + // If unspecified, it defaults to ByAll, which aggregates all + // matching data points in a single entry. + By CounterRequestBy + + // Start, if provided, changes the query so that only data points + // ocurring at the given time or afterwards are considered. + Start time.Time + + // Stop, if provided, changes the query so that only data points + // ocurring at the given time or before are considered. + Stop time.Time +} + +type CounterRequestBy int + +const ( + ByAll CounterRequestBy = iota + ByDay + ByWeek +) + +type Counter struct { + Key []string + Prefix bool + Count int64 + Time time.Time +} + +// Counters aggregates and returns counter values according to the provided request. +func (s *Store) Counters(req *CounterRequest) ([]Counter, error) { + tokensColl := s.DB.StatTokens() + countersColl := s.DB.StatCounters() + + searchKey, err := s.stats.key(s.DB, req.Key, false) + if errgo.Cause(err) == params.ErrNotFound { + if !req.List { + return []Counter{{ + Key: req.Key, + Prefix: req.Prefix, + Count: 0, + }}, nil + } + return nil, nil + } + if err != nil { + return nil, errgo.Mask(err) + } + var regex string + if req.Prefix { + regex = "^" + searchKey + ".+" + } else { + regex = "^" + searchKey + "$" + } + + // This reduce function simply sums, for each emitted key, all the values found under it. + job := mgo.MapReduce{Reduce: "function(key, values) { return Array.sum(values); }"} + var emit string + switch req.By { + case ByDay: + emit = "emit(k+'@'+NumberInt(this.t/86400), this.c);" + case ByWeek: + emit = "emit(k+'@'+NumberInt(this.t/604800), this.c);" + default: + emit = "emit(k, this.c);" + } + if req.List && req.Prefix { + // For a search key "a:b:" matching a key "a:b:c:d:e:", this map function emits "a:b:c:*". + // For a search key "a:b:" matching a key "a:b:c:", it emits "a:b:c:". + // For a search key "a:b:" matching a key "a:b:", it emits "a:b:". + job.Scope = bson.D{{"searchKeyLen", len(searchKey)}} + job.Map = fmt.Sprintf(` + function() { + var k = this.k; + var i = k.indexOf(':', searchKeyLen)+1; + if (k.length > i) { k = k.substr(0, i)+'*'; } + %s + }`, emit) + } else { + // For a search key "a:b:" matching a key "a:b:c:d:e:", this map function emits "a:b:*". + // For a search key "a:b:" matching a key "a:b:c:", it also emits "a:b:*". + // For a search key "a:b:" matching a key "a:b:", it emits "a:b:". + emitKey := searchKey + if req.Prefix { + emitKey += "*" + } + job.Scope = bson.D{{"emitKey", emitKey}} + job.Map = fmt.Sprintf(` + function() { + var k = emitKey; + %s + }`, emit) + } + + var result []struct { + Key string `bson:"_id"` + Value int64 + } + var query, tquery bson.D + if !req.Start.IsZero() { + tquery = append(tquery, bson.DocElem{ + Name: "$gte", + Value: timeToStamp(req.Start), + }) + } + if !req.Stop.IsZero() { + tquery = append(tquery, bson.DocElem{ + Name: "$lte", + Value: timeToStamp(req.Stop), + }) + } + if len(tquery) == 0 { + query = bson.D{{"k", bson.D{{"$regex", regex}}}} + } else { + query = bson.D{{"k", bson.D{{"$regex", regex}}}, {"t", tquery}} + } + _, err = countersColl.Find(query).MapReduce(&job, &result) + if err != nil { + return nil, err + } + var counters []Counter + for i := range result { + key := result[i].Key + when := time.Time{} + if req.By != ByAll { + var stamp int64 + if at := strings.Index(key, "@"); at != -1 && len(key) > at+1 { + stamp, _ = strconv.ParseInt(key[at+1:], 10, 32) + key = key[:at] + } + if stamp == 0 { + return nil, errgo.Newf("internal error: bad aggregated key: %q", result[i].Key) + } + switch req.By { + case ByDay: + stamp = stamp * 86400 + case ByWeek: + // The +1 puts it at the end of the period. + stamp = (stamp + 1) * 604800 + } + when = time.Unix(counterEpoch+stamp, 0).In(time.UTC) + } + ids := strings.Split(key, ":") + tokens := make([]string, 0, len(ids)) + for i := 0; i < len(ids)-1; i++ { + if ids[i] == "*" { + continue + } + id, err := strconv.ParseInt(ids[i], 32, 32) + if err != nil { + return nil, errgo.Newf("store: invalid id: %q", ids[i]) + } + token, found := s.stats.idToken(int(id)) + if !found { + var t tokenId + err = tokensColl.FindId(id).One(&t) + if err == mgo.ErrNotFound { + return nil, errgo.Newf("store: internal error; token id not found: %d", id) + } + s.stats.cacheTokenId(t.Token, t.Id) + token = t.Token + } + tokens = append(tokens, token) + } + counter := Counter{ + Key: tokens, + Prefix: len(ids) > 0 && ids[len(ids)-1] == "*", + Count: result[i].Value, + Time: when, + } + counters = append(counters, counter) + } + if !req.List && len(counters) == 0 { + counters = []Counter{{Key: req.Key, Prefix: req.Prefix, Count: 0}} + } else if len(counters) > 1 { + sort.Sort(sortableCounters(counters)) + } + return counters, nil +} + +type sortableCounters []Counter + +func (s sortableCounters) Len() int { return len(s) } +func (s sortableCounters) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s sortableCounters) Less(i, j int) bool { + // Earlier times first. + if !s[i].Time.Equal(s[j].Time) { + return s[i].Time.Before(s[j].Time) + } + // Then larger counts first. + if s[i].Count != s[j].Count { + return s[j].Count < s[i].Count + } + // Then smaller/shorter keys first. + ki := s[i].Key + kj := s[j].Key + for n := range ki { + if n >= len(kj) { + return false + } + if ki[n] != kj[n] { + return ki[n] < kj[n] + } + } + if len(ki) < len(kj) { + return true + } + // Then full keys first. + return !s[i].Prefix && s[j].Prefix +} + +// EntityStatsKey returns a stats key for the given charm or bundle +// reference and the given kind. +// Entity stats keys are generated using the following schema: +// kind:series:name:user:revision +// where user can be empty (for promulgated charms/bundles) and revision is +// optional (e.g. when uploading an entity the revision is not specified). +// For instance, entities' stats can then be retrieved like the following: +// - kind:utopic:* -> all charms of a specific series; +// - kind:trusty:django:* -> all revisions and user variations of a charm; +// - kind:trusty:django::* -> all revisions of a promulgated charm; +// - kind:trusty:django::42 -> a specific promulgated charm; +// - kind:trusty:django:who:* -> all revisions of a user owned charm; +// - kind:trusty:django:who:42 -> a specific user owned charm; +// The above also applies to bundles (where the series is "bundle"). +func EntityStatsKey(url *charm.URL, kind string) []string { + key := []string{kind, url.Series, url.Name, url.User} + if url.Revision != -1 { + key = append(key, strconv.Itoa(url.Revision)) + } + return key +} + +// AggregatedCounts contains counts for a statistic aggregated over the +// lastDay, lastWeek, lastMonth and all time. +type AggregatedCounts struct { + LastDay, LastWeek, LastMonth, Total int64 +} + +// LegacyDownloadCountsEnabled represents whether aggregated download counts +// must be retrieved from the legacy infrastructure. In essence, if the value +// is true (enabled), aggregated counts are not calculated based on the data +// stored in the charm store stats; they are instead retrieved from the entity +// extra-info. For this reason, enabling this we assume an external program +// updated the extra-info for the entity, specifically the +// "legacy-download-stats" key. +// TODO (frankban): this is a temporary hack, and can be removed once we have +// a more consistent way to import the download counts from the legacy charm +// store (charms) and from charmworld (bundles). To remove the legacy download +// counts logic in the future, grep the code for "LegacyDownloadCountsEnabled" +// and remove as required. +var LegacyDownloadCountsEnabled = true + +// ArchiveDownloadCounts calculates the aggregated download counts for +// a charm or bundle. +func (s *Store) ArchiveDownloadCounts(id *charm.URL, refresh bool) (thisRevision, allRevisions AggregatedCounts, err error) { + // Retrieve the aggregated stats. + fetchId := *id + fetch := func() (interface{}, error) { + return s.statsCacheFetch(&fetchId) + } + + var v interface{} + if refresh { + s.pool.statsCache.Evict(fetchId.String()) + } + v, err = s.pool.statsCache.Get(fetchId.String(), fetch) + + if err != nil { + return AggregatedCounts{}, AggregatedCounts{}, errgo.Mask(err) + } + thisRevision = v.(AggregatedCounts) + + fetchId.Revision = -1 + if refresh { + s.pool.statsCache.Evict(fetchId.String()) + } + v, err = s.pool.statsCache.Get(fetchId.String(), fetch) + + if err != nil { + return AggregatedCounts{}, AggregatedCounts{}, errgo.Mask(err) + } + allRevisions = v.(AggregatedCounts) + return +} + +func (s *Store) statsCacheFetch(id *charm.URL) (interface{}, error) { + prefix := id.Revision == -1 + kind := params.StatsArchiveDownload + if id.User == "" { + kind = params.StatsArchiveDownloadPromulgated + } + counts, err := s.aggregateStats(EntityStatsKey(id, kind), prefix) + if err != nil { + return nil, errgo.Notef(err, "cannot get aggregated count for %q", id) + } + if !LegacyDownloadCountsEnabled { + return counts, nil + } + // TODO (frankban): remove this code when removing the legacy counts logic. + legacy, err := s.legacyDownloadCounts(id) + if err != nil { + return nil, err + } + counts.LastDay += legacy.LastDay + counts.LastWeek += legacy.LastWeek + counts.LastMonth += legacy.LastMonth + counts.Total += legacy.Total + return counts, nil +} + +// legacyDownloadCounts retrieves the aggregated stats from the entity +// extra-info. This is used when LegacyDownloadCountsEnabled is true. +// TODO (frankban): remove this method when removing the legacy counts logic. +func (s *Store) legacyDownloadCounts(id *charm.URL) (AggregatedCounts, error) { + counts := AggregatedCounts{} + entities, err := s.FindEntities(id, "extrainfo") + if err != nil { + return counts, errgo.Mask(err, errgo.Is(params.ErrNotFound)) + } + if len(entities) == 0 { + return counts, errgo.WithCausef(nil, params.ErrNotFound, "entity not found") + } + entity := entities[0] + data, ok := entity.ExtraInfo[params.LegacyDownloadStats] + if ok { + if err := json.Unmarshal(data, &counts.Total); err != nil { + return counts, errgo.Notef(err, "cannot unmarshal extra-info value") + } + } + return counts, nil +} + +// aggregatedStats returns the aggregated downloads counts for the given stats +// key. +func (s *Store) aggregateStats(key []string, prefix bool) (AggregatedCounts, error) { + var counts AggregatedCounts + + req := CounterRequest{ + Key: key, + By: ByDay, + Prefix: prefix, + } + results, err := s.Counters(&req) + + if err != nil { + return counts, errgo.Notef(err, "cannot retrieve stats") + } + + today := time.Now() + lastDay := today.AddDate(0, 0, -1) + lastWeek := today.AddDate(0, 0, -7) + lastMonth := today.AddDate(0, -1, 0) + + // Aggregate the results. + for _, result := range results { + if result.Time.After(lastMonth) { + counts.LastMonth += result.Count + if result.Time.After(lastWeek) { + counts.LastWeek += result.Count + if result.Time.After(lastDay) { + counts.LastDay += result.Count + } + } + } + counts.Total += result.Count + } + return counts, nil +} + +// IncrementDownloadCountsAsync updates the download statistics for entity id in both +// the statistics database and the search database. The action is done in the +// background using a separate goroutine. +func (s *Store) IncrementDownloadCountsAsync(id *router.ResolvedURL) { + s.Go(func(s *Store) { + if err := s.IncrementDownloadCounts(id); err != nil { + logger.Errorf("cannot increase download counter for %v: %s", id, err) + } + }) +} + +// IncrementDownloadCounts updates the download statistics for entity id in both +// the statistics database and the search database. +func (s *Store) IncrementDownloadCounts(id *router.ResolvedURL) error { + return s.IncrementDownloadCountsAtTime(id, time.Now()) +} + +// IncrementDownloadCountsAtTime updates the download statistics for entity id in both +// the statistics database and the search database, associating it with the given time. +func (s *Store) IncrementDownloadCountsAtTime(id *router.ResolvedURL, t time.Time) error { + key := EntityStatsKey(&id.URL, params.StatsArchiveDownload) + if err := s.IncCounterAtTime(key, t); err != nil { + return errgo.Notef(err, "cannot increase stats counter for %v", key) + } + if id.PromulgatedRevision == -1 { + // Check that the id really is for an unpromulgated entity. + // This unfortunately adds an extra round trip to the database, + // but as incrementing statistics is performed asynchronously + // it will not be in the critical path. + entity, err := s.FindEntity(id, "promulgated-revision") + if err != nil { + return errgo.Notef(err, "cannot find entity %v", &id.URL) + } + id.PromulgatedRevision = entity.PromulgatedRevision + } + if id.PromulgatedRevision != -1 { + key := EntityStatsKey(id.PromulgatedURL(), params.StatsArchiveDownloadPromulgated) + if err := s.IncCounterAtTime(key, t); err != nil { + return errgo.Notef(err, "cannot increase stats counter for %v", key) + } + } + // TODO(mhilton) when this charmstore is being used by juju, find a more + // efficient way to update the download statistics for search. + if err := s.UpdateSearch(id); err != nil { + return errgo.Notef(err, "cannot update search record for %v", id) + } + return nil +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/stats_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/stats_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/stats_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,802 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "fmt" + "strconv" + "sync" + "time" + + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" +) + +type StatsSuite struct { + jujutesting.IsolatedMgoSuite + store *charmstore.Store +} + +var _ = gc.Suite(&StatsSuite{}) + +func (s *StatsSuite) SetUpTest(c *gc.C) { + s.IsolatedMgoSuite.SetUpTest(c) + pool, err := charmstore.NewPool(s.Session.DB("foo"), nil, nil, charmstore.ServerParams{}) + c.Assert(err, gc.IsNil) + s.store = pool.Store() + pool.Close() +} + +func (s *StatsSuite) TearDownTest(c *gc.C) { + s.store.Close() + s.IsolatedMgoSuite.TearDownTest(c) +} + +func (s *StatsSuite) TestSumCounters(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + req := charmstore.CounterRequest{Key: []string{"a"}} + cs, err := s.store.Counters(&req) + c.Assert(err, gc.IsNil) + c.Assert(cs, gc.DeepEquals, []charmstore.Counter{{Key: req.Key, Count: 0}}) + + for i := 0; i < 10; i++ { + err := s.store.IncCounter([]string{"a", "b", "c"}) + c.Assert(err, gc.IsNil) + } + for i := 0; i < 7; i++ { + s.store.IncCounter([]string{"a", "b"}) + c.Assert(err, gc.IsNil) + } + for i := 0; i < 3; i++ { + s.store.IncCounter([]string{"a", "z", "b"}) + c.Assert(err, gc.IsNil) + } + + tests := []struct { + key []string + prefix bool + result int64 + }{ + {[]string{"a", "b", "c"}, false, 10}, + {[]string{"a", "b"}, false, 7}, + {[]string{"a", "z", "b"}, false, 3}, + {[]string{"a", "b", "c"}, true, 0}, + {[]string{"a", "b", "c", "d"}, false, 0}, + {[]string{"a", "b"}, true, 10}, + {[]string{"a"}, true, 20}, + {[]string{"b"}, true, 0}, + } + + for _, t := range tests { + c.Logf("Test: %#v\n", t) + req = charmstore.CounterRequest{Key: t.key, Prefix: t.prefix} + cs, err := s.store.Counters(&req) + c.Assert(err, gc.IsNil) + c.Assert(cs, gc.DeepEquals, []charmstore.Counter{{Key: t.key, Prefix: t.prefix, Count: t.result}}) + } + + // High-level interface works. Now check that the data is + // stored correctly. + counters := s.store.DB.StatCounters() + docs1, err := counters.Count() + c.Assert(err, gc.IsNil) + if docs1 != 3 && docs1 != 4 { + fmt.Errorf("Expected 3 or 4 docs in counters collection, got %d", docs1) + } + + // Hack times so that the next operation adds another document. + err = counters.Update(nil, bson.D{{"$set", bson.D{{"t", 1}}}}) + c.Check(err, gc.IsNil) + + err = s.store.IncCounter([]string{"a", "b", "c"}) + c.Assert(err, gc.IsNil) + + docs2, err := counters.Count() + c.Assert(err, gc.IsNil) + c.Assert(docs2, gc.Equals, docs1+1) + + req = charmstore.CounterRequest{Key: []string{"a", "b", "c"}} + cs, err = s.store.Counters(&req) + c.Assert(err, gc.IsNil) + c.Assert(cs, gc.DeepEquals, []charmstore.Counter{{Key: req.Key, Count: 11}}) + + req = charmstore.CounterRequest{Key: []string{"a"}, Prefix: true} + cs, err = s.store.Counters(&req) + c.Assert(err, gc.IsNil) + c.Assert(cs, gc.DeepEquals, []charmstore.Counter{{Key: req.Key, Prefix: true, Count: 21}}) +} + +func (s *StatsSuite) TestCountersReadOnlySum(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + // Summing up an unknown key shouldn't add the key to the database. + req := charmstore.CounterRequest{Key: []string{"a", "b", "c"}} + _, err := s.store.Counters(&req) + c.Assert(err, gc.IsNil) + + tokens := s.Session.DB("juju").C("stat.tokens") + n, err := tokens.Count() + c.Assert(err, gc.IsNil) + c.Assert(n, gc.Equals, 0) +} + +func (s *StatsSuite) TestCountersTokenCaching(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + assertSum := func(i int, want int64) { + req := charmstore.CounterRequest{Key: []string{strconv.Itoa(i)}} + cs, err := s.store.Counters(&req) + c.Assert(err, gc.IsNil) + c.Assert(cs[0].Count, gc.Equals, want) + } + assertSum(100000, 0) + + const genSize = 1024 + + // All of these will be cached, as we have two generations + // of genSize entries each. + for i := 0; i < genSize*2; i++ { + err := s.store.IncCounter([]string{strconv.Itoa(i)}) + c.Assert(err, gc.IsNil) + } + + // Now go behind the scenes and corrupt all the tokens. + tokens := s.store.DB.StatTokens() + iter := tokens.Find(nil).Iter() + var t struct { + Id int "_id" + Token string "t" + } + for iter.Next(&t) { + err := tokens.UpdateId(t.Id, bson.M{"$set": bson.M{"t": "corrupted" + t.Token}}) + c.Assert(err, gc.IsNil) + } + c.Assert(iter.Err(), gc.IsNil) + + // We can consult the counters for the cached entries still. + // First, check that the newest generation is good. + for i := genSize; i < genSize*2; i++ { + assertSum(i, 1) + } + + // Now, we can still access a single entry of the older generation, + // but this will cause the generations to flip and thus the rest + // of the old generation will go away as the top half of the + // entries is turned into the old generation. + assertSum(0, 1) + + // Now we've lost access to the rest of the old generation. + for i := 1; i < genSize; i++ { + assertSum(i, 0) + } + + // But we still have all of the top half available since it was + // moved into the old generation. + for i := genSize; i < genSize*2; i++ { + assertSum(i, 1) + } +} + +func (s *StatsSuite) TestCounterTokenUniqueness(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + var wg0, wg1 sync.WaitGroup + wg0.Add(10) + wg1.Add(10) + for i := 0; i < 10; i++ { + go func() { + wg0.Done() + wg0.Wait() + defer wg1.Done() + err := s.store.IncCounter([]string{"a"}) + c.Check(err, gc.IsNil) + }() + } + wg1.Wait() + + req := charmstore.CounterRequest{Key: []string{"a"}} + cs, err := s.store.Counters(&req) + c.Assert(err, gc.IsNil) + c.Assert(cs[0].Count, gc.Equals, int64(10)) +} + +func (s *StatsSuite) TestListCounters(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + incs := [][]string{ + {"c", "b", "a"}, // Assign internal id c < id b < id a, to make sorting slightly trickier. + {"a"}, + {"a", "c"}, + {"a", "b"}, + {"a", "b", "c"}, + {"a", "b", "c"}, + {"a", "b", "e"}, + {"a", "b", "d"}, + {"a", "f", "g"}, + {"a", "f", "h"}, + {"a", "i"}, + {"a", "i", "j"}, + {"k", "l"}, + } + for _, key := range incs { + err := s.store.IncCounter(key) + c.Assert(err, gc.IsNil) + } + + tests := []struct { + prefix []string + result []charmstore.Counter + }{ + { + []string{"a"}, + []charmstore.Counter{ + {Key: []string{"a", "b"}, Prefix: true, Count: 4}, + {Key: []string{"a", "f"}, Prefix: true, Count: 2}, + {Key: []string{"a", "b"}, Prefix: false, Count: 1}, + {Key: []string{"a", "c"}, Prefix: false, Count: 1}, + {Key: []string{"a", "i"}, Prefix: false, Count: 1}, + {Key: []string{"a", "i"}, Prefix: true, Count: 1}, + }, + }, { + []string{"a", "b"}, + []charmstore.Counter{ + {Key: []string{"a", "b", "c"}, Prefix: false, Count: 2}, + {Key: []string{"a", "b", "d"}, Prefix: false, Count: 1}, + {Key: []string{"a", "b", "e"}, Prefix: false, Count: 1}, + }, + }, { + []string{"z"}, + []charmstore.Counter(nil), + }, + } + + // Use a different store to exercise cache filling. + pool, err := charmstore.NewPool(s.store.DB.Database, nil, nil, charmstore.ServerParams{}) + c.Assert(err, gc.IsNil) + st := pool.Store() + defer st.Close() + pool.Close() + + for i := range tests { + req := &charmstore.CounterRequest{Key: tests[i].prefix, Prefix: true, List: true} + result, err := st.Counters(req) + c.Assert(err, gc.IsNil) + c.Assert(result, gc.DeepEquals, tests[i].result) + } +} + +func (s *StatsSuite) TestListCountersBy(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + incs := []struct { + key []string + day int + }{ + {[]string{"a"}, 1}, + {[]string{"a"}, 1}, + {[]string{"b"}, 1}, + {[]string{"a", "b"}, 1}, + {[]string{"a", "c"}, 1}, + {[]string{"a"}, 3}, + {[]string{"a", "b"}, 3}, + {[]string{"b"}, 9}, + {[]string{"b"}, 9}, + {[]string{"a", "c", "d"}, 9}, + {[]string{"a", "c", "e"}, 9}, + {[]string{"a", "c", "f"}, 9}, + } + + day := func(i int) time.Time { + return time.Date(2012, time.May, i, 0, 0, 0, 0, time.UTC) + } + + for i, inc := range incs { + t := day(inc.day) + // Ensure each entry is unique by adding + // a sufficient increment for each test. + t = t.Add(time.Duration(i) * charmstore.StatsGranularity) + + err := s.store.IncCounterAtTime(inc.key, t) + c.Assert(err, gc.IsNil) + } + + tests := []struct { + request charmstore.CounterRequest + result []charmstore.Counter + }{ + { + charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: false, + List: false, + By: charmstore.ByDay, + }, + []charmstore.Counter{ + {Key: []string{"a"}, Prefix: false, Count: 2, Time: day(1)}, + {Key: []string{"a"}, Prefix: false, Count: 1, Time: day(3)}, + }, + }, { + charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: false, + By: charmstore.ByDay, + }, + []charmstore.Counter{ + {Key: []string{"a"}, Prefix: true, Count: 2, Time: day(1)}, + {Key: []string{"a"}, Prefix: true, Count: 1, Time: day(3)}, + {Key: []string{"a"}, Prefix: true, Count: 3, Time: day(9)}, + }, + }, { + charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: false, + By: charmstore.ByDay, + Start: day(2), + }, + []charmstore.Counter{ + {Key: []string{"a"}, Prefix: true, Count: 1, Time: day(3)}, + {Key: []string{"a"}, Prefix: true, Count: 3, Time: day(9)}, + }, + }, { + charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: false, + By: charmstore.ByDay, + Stop: day(4), + }, + []charmstore.Counter{ + {Key: []string{"a"}, Prefix: true, Count: 2, Time: day(1)}, + {Key: []string{"a"}, Prefix: true, Count: 1, Time: day(3)}, + }, + }, { + charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: false, + By: charmstore.ByDay, + Start: day(3), + Stop: day(8), + }, + []charmstore.Counter{ + {Key: []string{"a"}, Prefix: true, Count: 1, Time: day(3)}, + }, + }, { + charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: true, + By: charmstore.ByDay, + }, + []charmstore.Counter{ + {Key: []string{"a", "b"}, Prefix: false, Count: 1, Time: day(1)}, + {Key: []string{"a", "c"}, Prefix: false, Count: 1, Time: day(1)}, + {Key: []string{"a", "b"}, Prefix: false, Count: 1, Time: day(3)}, + {Key: []string{"a", "c"}, Prefix: true, Count: 3, Time: day(9)}, + }, + }, { + charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: false, + By: charmstore.ByWeek, + }, + []charmstore.Counter{ + {Key: []string{"a"}, Prefix: true, Count: 3, Time: day(6)}, + {Key: []string{"a"}, Prefix: true, Count: 3, Time: day(13)}, + }, + }, { + charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: true, + By: charmstore.ByWeek, + }, + []charmstore.Counter{ + {Key: []string{"a", "b"}, Prefix: false, Count: 2, Time: day(6)}, + {Key: []string{"a", "c"}, Prefix: false, Count: 1, Time: day(6)}, + {Key: []string{"a", "c"}, Prefix: true, Count: 3, Time: day(13)}, + }, + }, + } + + for _, test := range tests { + result, err := s.store.Counters(&test.request) + c.Assert(err, gc.IsNil) + c.Assert(result, gc.DeepEquals, test.result) + } +} + +type testStatsEntity struct { + id *router.ResolvedURL + lastDay int + lastWeek int + lastMonth int + total int + legacyTotal int +} + +var archiveDownloadCountsTests = []struct { + about string + charms []testStatsEntity + id *charm.URL + expectThisRevision charmstore.AggregatedCounts + expectAllRevisions charmstore.AggregatedCounts +}{{ + about: "single revision", + charms: []testStatsEntity{{ + id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), + lastDay: 1, + lastWeek: 2, + lastMonth: 3, + total: 4, + legacyTotal: 0, + }}, + id: charm.MustParseURL("~charmers/trusty/wordpress-0"), + expectThisRevision: charmstore.AggregatedCounts{ + LastDay: 1, + LastWeek: 3, + LastMonth: 6, + Total: 10, + }, + expectAllRevisions: charmstore.AggregatedCounts{ + LastDay: 1, + LastWeek: 3, + LastMonth: 6, + Total: 10, + }, +}, { + about: "single revision with legacy count", + charms: []testStatsEntity{{ + id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), + lastDay: 1, + lastWeek: 2, + lastMonth: 3, + total: 4, + legacyTotal: 10, + }}, + id: charm.MustParseURL("~charmers/trusty/wordpress-0"), + expectThisRevision: charmstore.AggregatedCounts{ + LastDay: 1, + LastWeek: 3, + LastMonth: 6, + Total: 20, + }, + expectAllRevisions: charmstore.AggregatedCounts{ + LastDay: 1, + LastWeek: 3, + LastMonth: 6, + Total: 20, + }, +}, { + about: "multiple revisions", + charms: []testStatsEntity{{ + id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), + lastDay: 1, + lastWeek: 2, + lastMonth: 3, + total: 4, + legacyTotal: 0, + }, { + id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-1"), + lastDay: 2, + lastWeek: 3, + lastMonth: 4, + total: 5, + legacyTotal: 0, + }}, + id: charm.MustParseURL("~charmers/trusty/wordpress-1"), + expectThisRevision: charmstore.AggregatedCounts{ + LastDay: 2, + LastWeek: 5, + LastMonth: 9, + Total: 14, + }, + expectAllRevisions: charmstore.AggregatedCounts{ + LastDay: 3, + LastWeek: 8, + LastMonth: 15, + Total: 24, + }, +}, { + about: "multiple revisions with legacy count", + charms: []testStatsEntity{{ + id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-0"), + lastDay: 1, + lastWeek: 2, + lastMonth: 3, + total: 4, + legacyTotal: 100, + }, { + id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-1"), + lastDay: 2, + lastWeek: 3, + lastMonth: 4, + total: 5, + legacyTotal: 100, + }}, + id: charm.MustParseURL("~charmers/trusty/wordpress-1"), + expectThisRevision: charmstore.AggregatedCounts{ + LastDay: 2, + LastWeek: 5, + LastMonth: 9, + Total: 114, + }, + expectAllRevisions: charmstore.AggregatedCounts{ + LastDay: 3, + LastWeek: 8, + LastMonth: 15, + Total: 124, + }, +}, { + about: "promulgated revision", + charms: []testStatsEntity{{ + id: charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-0"), + lastDay: 1, + lastWeek: 2, + lastMonth: 3, + total: 4, + legacyTotal: 0, + }}, + id: charm.MustParseURL("trusty/wordpress-0"), + expectThisRevision: charmstore.AggregatedCounts{ + LastDay: 1, + LastWeek: 3, + LastMonth: 6, + Total: 10, + }, + expectAllRevisions: charmstore.AggregatedCounts{ + LastDay: 1, + LastWeek: 3, + LastMonth: 6, + Total: 10, + }, +}, { + about: "promulgated revision with legacy count", + charms: []testStatsEntity{{ + id: charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-0"), + lastDay: 1, + lastWeek: 2, + lastMonth: 3, + total: 4, + legacyTotal: 10, + }}, + id: charm.MustParseURL("trusty/wordpress-0"), + expectThisRevision: charmstore.AggregatedCounts{ + LastDay: 1, + LastWeek: 3, + LastMonth: 6, + Total: 20, + }, + expectAllRevisions: charmstore.AggregatedCounts{ + LastDay: 1, + LastWeek: 3, + LastMonth: 6, + Total: 20, + }, +}, { + about: "promulgated revision with changed owner", + charms: []testStatsEntity{{ + id: charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-0"), + lastDay: 1, + lastWeek: 10, + lastMonth: 100, + total: 1000, + legacyTotal: 0, + }, { + id: charmstore.MustParseResolvedURL("~charmers/trusty/wordpress-1"), + lastDay: 2, + lastWeek: 20, + lastMonth: 200, + total: 2000, + legacyTotal: 0, + }, { + id: charmstore.MustParseResolvedURL("~wordpress-charmers/trusty/wordpress-0"), + lastDay: 3, + lastWeek: 30, + lastMonth: 300, + total: 3000, + legacyTotal: 0, + }, { + id: charmstore.MustParseResolvedURL("1 ~wordpress-charmers/trusty/wordpress-1"), + lastDay: 4, + lastWeek: 40, + lastMonth: 400, + total: 4000, + legacyTotal: 0, + }}, + id: charm.MustParseURL("trusty/wordpress-1"), + expectThisRevision: charmstore.AggregatedCounts{ + LastDay: 4, + LastWeek: 44, + LastMonth: 444, + Total: 4444, + }, + expectAllRevisions: charmstore.AggregatedCounts{ + LastDay: 5, + LastWeek: 55, + LastMonth: 555, + Total: 5555, + }, +}} + +func (s *StatsSuite) TestArchiveDownloadCounts(c *gc.C) { + s.PatchValue(&charmstore.LegacyDownloadCountsEnabled, true) + for i, test := range archiveDownloadCountsTests { + c.Logf("%d: %s", i, test.about) + // Clear everything + charmstore.StatsCacheEvictAll(s.store) + s.store.DB.Entities().RemoveAll(nil) + s.store.DB.StatCounters().RemoveAll(nil) + for _, charm := range test.charms { + ch := storetesting.Charms.CharmDir(charm.id.URL.Name) + err := s.store.AddCharmWithArchive(charm.id, ch) + c.Assert(err, gc.IsNil) + url := charm.id.URL + now := time.Now() + setDownloadCounts(c, s.store, &url, now, charm.lastDay) + setDownloadCounts(c, s.store, &url, now.Add(-2*24*time.Hour), charm.lastWeek) + setDownloadCounts(c, s.store, &url, now.Add(-10*24*time.Hour), charm.lastMonth) + setDownloadCounts(c, s.store, &url, now.Add(-100*24*time.Hour), charm.total) + if charm.id.PromulgatedRevision > -1 { + url.Revision = charm.id.PromulgatedRevision + url.User = "" + setDownloadCounts(c, s.store, &url, now, charm.lastDay) + setDownloadCounts(c, s.store, &url, now.Add(-2*24*time.Hour), charm.lastWeek) + setDownloadCounts(c, s.store, &url, now.Add(-10*24*time.Hour), charm.lastMonth) + setDownloadCounts(c, s.store, &url, now.Add(-100*24*time.Hour), charm.total) + } + extraInfo := map[string][]byte{ + params.LegacyDownloadStats: []byte(fmt.Sprintf("%d", charm.legacyTotal)), + } + err = s.store.UpdateEntity(charm.id, bson.D{{ + "$set", bson.D{{"extrainfo", extraInfo}}, + }}) + c.Assert(err, gc.IsNil) + } + thisRevision, allRevisions, err := s.store.ArchiveDownloadCounts(test.id, true) + c.Assert(err, gc.IsNil) + c.Assert(thisRevision, jc.DeepEquals, test.expectThisRevision) + c.Assert(allRevisions, jc.DeepEquals, test.expectAllRevisions) + } +} + +func setDownloadCounts(c *gc.C, s *charmstore.Store, id *charm.URL, t time.Time, n int) { + kind := params.StatsArchiveDownload + if id.User == "" { + kind = params.StatsArchiveDownloadPromulgated + } + key := charmstore.EntityStatsKey(id, kind) + for i := 0; i < n; i++ { + err := s.IncCounterAtTime(key, t) + c.Assert(err, gc.IsNil) + } +} + +func (s *StatsSuite) TestIncrementDownloadCounts(c *gc.C) { + ch := storetesting.Charms.CharmDir("wordpress") + id := charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-1") + err := s.store.AddCharmWithArchive(id, ch) + c.Assert(err, gc.IsNil) + err = s.store.IncrementDownloadCounts(id) + c.Assert(err, gc.IsNil) + expect := charmstore.AggregatedCounts{ + LastDay: 1, + LastWeek: 1, + LastMonth: 1, + Total: 1, + } + thisRevision, allRevisions, err := s.store.ArchiveDownloadCounts(charm.MustParseURL("~charmers/trusty/wordpress-1"), true) + c.Assert(err, gc.IsNil) + c.Assert(thisRevision, jc.DeepEquals, expect) + c.Assert(allRevisions, jc.DeepEquals, expect) + thisRevision, allRevisions, err = s.store.ArchiveDownloadCounts(charm.MustParseURL("trusty/wordpress-0"), true) + c.Assert(err, gc.IsNil) + c.Assert(thisRevision, jc.DeepEquals, expect) + c.Assert(allRevisions, jc.DeepEquals, expect) +} + +func (s *StatsSuite) TestIncrementDownloadCountsOnPromulgatedMultiSeriesCharm(c *gc.C) { + ch := storetesting.Charms.CharmDir("multi-series") + id := charmstore.MustParseResolvedURL("0 ~charmers/wordpress-1") + err := s.store.AddCharmWithArchive(id, ch) + c.Assert(err, gc.IsNil) + err = s.store.IncrementDownloadCounts(id) + c.Assert(err, gc.IsNil) + expect := charmstore.AggregatedCounts{ + LastDay: 1, + LastWeek: 1, + LastMonth: 1, + Total: 1, + } + thisRevision, allRevisions, err := s.store.ArchiveDownloadCounts(charm.MustParseURL("~charmers/wordpress-1"), true) + c.Assert(err, gc.IsNil) + c.Assert(thisRevision, jc.DeepEquals, expect) + c.Assert(allRevisions, jc.DeepEquals, expect) + thisRevision, allRevisions, err = s.store.ArchiveDownloadCounts(charm.MustParseURL("wordpress-0"), true) + c.Assert(err, gc.IsNil) + c.Assert(thisRevision, jc.DeepEquals, expect) + c.Assert(allRevisions, jc.DeepEquals, expect) +} + +func (s *StatsSuite) TestIncrementDownloadCountsOnIdWithPreferredSeries(c *gc.C) { + ch := storetesting.Charms.CharmDir("multi-series") + id := charmstore.MustParseResolvedURL("0 ~charmers/wordpress-1") + id.PreferredSeries = "trusty" + err := s.store.AddCharmWithArchive(id, ch) + c.Assert(err, gc.IsNil) + err = s.store.IncrementDownloadCounts(id) + c.Assert(err, gc.IsNil) + expect := charmstore.AggregatedCounts{ + LastDay: 1, + LastWeek: 1, + LastMonth: 1, + Total: 1, + } + thisRevision, allRevisions, err := s.store.ArchiveDownloadCounts(charm.MustParseURL("~charmers/wordpress-1"), true) + c.Assert(err, gc.IsNil) + c.Assert(thisRevision, jc.DeepEquals, expect) + c.Assert(allRevisions, jc.DeepEquals, expect) + thisRevision, allRevisions, err = s.store.ArchiveDownloadCounts(charm.MustParseURL("wordpress-0"), true) + c.Assert(err, gc.IsNil) + c.Assert(thisRevision, jc.DeepEquals, expect) + c.Assert(allRevisions, jc.DeepEquals, expect) +} + +func (s *StatsSuite) TestIncrementDownloadCountsCaching(c *gc.C) { + ch := storetesting.Charms.CharmDir("wordpress") + id := charmstore.MustParseResolvedURL("0 ~charmers/trusty/wordpress-1") + err := s.store.AddCharmWithArchive(id, ch) + c.Assert(err, gc.IsNil) + err = s.store.IncrementDownloadCounts(id) + c.Assert(err, gc.IsNil) + expect := charmstore.AggregatedCounts{ + LastDay: 1, + LastWeek: 1, + LastMonth: 1, + Total: 1, + } + expectAfter := charmstore.AggregatedCounts{ + LastDay: 2, + LastWeek: 2, + LastMonth: 2, + Total: 2, + } + thisRevision, allRevisions, err := s.store.ArchiveDownloadCounts(charm.MustParseURL("~charmers/trusty/wordpress-1"), false) + c.Assert(err, gc.IsNil) + c.Assert(thisRevision, jc.DeepEquals, expect) + c.Assert(allRevisions, jc.DeepEquals, expect) + err = s.store.IncrementDownloadCounts(id) + thisRevision, allRevisions, err = s.store.ArchiveDownloadCounts(charm.MustParseURL("~charmers/trusty/wordpress-1"), false) + c.Assert(err, gc.IsNil) + c.Assert(thisRevision, jc.DeepEquals, expect) + c.Assert(allRevisions, jc.DeepEquals, expect) + thisRevision, allRevisions, err = s.store.ArchiveDownloadCounts(charm.MustParseURL("~charmers/trusty/wordpress-1"), true) + c.Assert(err, gc.IsNil) + c.Assert(thisRevision, jc.DeepEquals, expectAfter) + c.Assert(allRevisions, jc.DeepEquals, expectAfter) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/store.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/store.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/store.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1618 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "archive/zip" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "sync" + "time" + + "github.com/juju/loggo" + "github.com/juju/utils/parallel" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/mgostorage" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + "gopkg.in/natefinch/lumberjack.v2" + + "gopkg.in/juju/charmstore.v5-unstable/audit" + "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/cache" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +var logger = loggo.GetLogger("charmstore.internal.charmstore") + +var ( + errClosed = errgo.New("charm store has been closed") + ErrTooManySessions = errgo.New("too many mongo sessions in use") +) + +// Pool holds a connection to the underlying charm and blob +// data stores. Calling its Store method returns a new Store +// from the pool that can be used to process short-lived requests +// to access and modify the store. +type Pool struct { + db StoreDatabase + es *SearchIndex + bakeryParams *bakery.NewServiceParams + stats stats + run *parallel.Run + + // statsCache holds a cache of AggregatedCounts + // values, keyed by entity id. When the id has no + // revision, the counts apply to all revisions of the + // entity. + statsCache *cache.Cache + + config ServerParams + + // auditEncoder encodes messages to auditLogger. + auditEncoder *json.Encoder + auditLogger *lumberjack.Logger + + // reqStoreC is a buffered channel that contains allocated + // stores that are not currently in use. + reqStoreC chan *Store + + // mu guards the fields following it. + mu sync.Mutex + + // storeCount holds the number of stores currently allocated. + storeCount int + + // closed holds whether the handler has been closed. + closed bool +} + +// reqStoreCacheSize holds the maximum number of store +// instances to keep around cached when there is no +// limit specified by config.MaxMgoSessions. +const reqStoreCacheSize = 50 + +// maxAsyncGoroutines holds the maximum number +// of goroutines that will be started by Store.Go. +const maxAsyncGoroutines = 50 + +// NewPool returns a Pool that uses the given database +// and search index. If bakeryParams is not nil, +// the Bakery field in the resulting Store will be set +// to a new Service that stores macaroons in mongo. +// +// The pool must be closed (with the Close method) +// after use. +func NewPool(db *mgo.Database, si *SearchIndex, bakeryParams *bakery.NewServiceParams, config ServerParams) (*Pool, error) { + if config.StatsCacheMaxAge == 0 { + config.StatsCacheMaxAge = time.Hour + } + + p := &Pool{ + db: StoreDatabase{db}.copy(), + es: si, + statsCache: cache.New(config.StatsCacheMaxAge), + config: config, + run: parallel.NewRun(maxAsyncGoroutines), + auditLogger: config.AuditLogger, + } + if config.MaxMgoSessions > 0 { + p.reqStoreC = make(chan *Store, config.MaxMgoSessions) + } else { + p.reqStoreC = make(chan *Store, reqStoreCacheSize) + } + if bakeryParams != nil { + bp := *bakeryParams + // Fill out any bakery parameters explicitly here so + // that we use the same values when each Store is + // created. We don't fill out bp.Store field though, as + // that needs to hold the correct mongo session which we + // only know when the Store is created from the Pool. + if bp.Key == nil { + var err error + bp.Key, err = bakery.GenerateKey() + if err != nil { + return nil, errgo.Notef(err, "cannot generate bakery key") + } + } + if bp.Locator == nil { + bp.Locator = bakery.PublicKeyLocatorMap(nil) + } + p.bakeryParams = &bp + } + + if config.AuditLogger != nil { + p.auditLogger = config.AuditLogger + p.auditEncoder = json.NewEncoder(p.auditLogger) + } + + store := p.Store() + defer store.Close() + if err := store.ensureIndexes(); err != nil { + return nil, errgo.Notef(err, "cannot ensure indexes") + } + if err := store.ES.ensureIndexes(false); err != nil { + return nil, errgo.Notef(err, "cannot ensure elasticsearch indexes") + } + return p, nil +} + +// Close closes the pool. This must be called when the pool +// is finished with. +func (p *Pool) Close() { + p.mu.Lock() + if p.closed { + p.mu.Unlock() + return + } + p.closed = true + p.mu.Unlock() + p.run.Wait() + p.db.Close() + // Close all cached stores. Any used by + // outstanding requests will be closed when the + // requests complete. + for { + select { + case s := <-p.reqStoreC: + s.DB.Close() + default: + return + } + } + + p.auditLogger.Close() +} + +// RequestStore returns a store for a client request. It returns +// an error with a ErrTooManySessions cause +// if too many mongo sessions are in use. +func (p *Pool) RequestStore() (*Store, error) { + store, err := p.requestStoreNB(false) + if store != nil { + return store, nil + } + if errgo.Cause(err) != ErrTooManySessions { + return nil, errgo.Mask(err) + } + // No handlers currently available - we've exceeded our concurrency limit + // so wait for a handler to become available. + select { + case store := <-p.reqStoreC: + return store, nil + case <-time.After(p.config.HTTPRequestWaitDuration): + return nil, errgo.Mask(err, errgo.Is(ErrTooManySessions)) + } +} + +// Store returns a Store that can be used to access the database. +// +// It must be closed (with the Close method) after use. +func (p *Pool) Store() *Store { + store, _ := p.requestStoreNB(true) + return store +} + +// requestStoreNB is like RequestStore except that it +// does not block when a Store is not immediately +// available, in which case it returns an error with +// a ErrTooManySessions cause. +// +// If always is true, it will never return an error. +func (p *Pool) requestStoreNB(always bool) (*Store, error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.closed && !always { + return nil, errClosed + } + select { + case store := <-p.reqStoreC: + return store, nil + default: + } + if !always && p.config.MaxMgoSessions > 0 && p.storeCount >= p.config.MaxMgoSessions { + return nil, ErrTooManySessions + } + p.storeCount++ + db := p.db.copy() + store := &Store{ + DB: db, + BlobStore: blobstore.New(db.Database, "entitystore"), + ES: p.es, + stats: &p.stats, + pool: p, + } + if p.bakeryParams != nil { + store.Bakery = newBakery(db, *p.bakeryParams) + } + return store, nil +} + +func newBakery(db StoreDatabase, bp bakery.NewServiceParams) *bakery.Service { + macStore, err := mgostorage.New(db.Macaroons()) + if err != nil { + // Should never happen. + panic(errgo.Newf("unexpected error from mgostorage.New: %v", err)) + } + bp.Store = macStore + bsvc, err := bakery.NewService(bp) + if err != nil { + // This should never happen because the only reason bakery.NewService + // can fail is if it can't generate a key, and we have already made + // sure that the key is generated. + panic(errgo.Notef(err, "cannot make bakery service")) + } + return bsvc +} + +// Store holds a connection to the underlying charm and blob +// data stores that is appropriate for short term use. +type Store struct { + DB StoreDatabase + BlobStore *blobstore.Store + ES *SearchIndex + Bakery *bakery.Service + stats *stats + pool *Pool +} + +// Copy returns a new store with a lifetime +// independent of s. Use this method if you +// need to use a store in an independent goroutine. +// +// It must be closed (with the Close method) after use. +func (s *Store) Copy() *Store { + s1 := *s + s1.DB = s.DB.clone() + s1.BlobStore = blobstore.New(s1.DB.Database, "entitystore") + if s.Bakery != nil { + s1.Bakery = newBakery(s1.DB, *s.pool.bakeryParams) + } + + s.pool.mu.Lock() + s.pool.storeCount++ + s.pool.mu.Unlock() + + return &s1 +} + +// Close closes the store instance. +func (s *Store) Close() { + // Refresh the mongodb session so that the + // next time the Store is used, it will acquire + // a new connection from the pool as if the + // session had been copied. + s.DB.Session.Refresh() + + s.pool.mu.Lock() + defer s.pool.mu.Unlock() + if !s.pool.closed && (s.pool.config.MaxMgoSessions == 0 || s.pool.storeCount <= s.pool.config.MaxMgoSessions) { + // The pool isn't overloaded, so put the store + // back. Note that the default case should + // never happen when MaxMgoSessions > 0. + select { + case s.pool.reqStoreC <- s: + return + default: + // No space for handler - this may happen when + // the number of actual sessions has exceeded + // the requested maximum (for example when + // a request already at the limit uses another session, + // or when we are imposing no limit). + } + } + s.DB.Close() + s.pool.storeCount-- +} + +// SetReconnectTimeout sets the length of time that +// mongo requests will block waiting to reconnect +// to a disconnected mongo server. If it is zero, +// requests may block forever. +func (s *Store) SetReconnectTimeout(d time.Duration) { + s.DB.Session.SetSyncTimeout(d) +} + +// Go runs the given function in a new goroutine, +// passing it a copy of s, which will be closed +// after the function returns. +func (s *Store) Go(f func(*Store)) { + s = s.Copy() + s.pool.run.Do(func() error { + defer s.Close() + f(s) + return nil + }) +} + +// Pool returns the pool that the store originally +// came from. +func (s *Store) Pool() *Pool { + return s.pool +} + +func (s *Store) ensureIndexes() error { + indexes := []struct { + c *mgo.Collection + i mgo.Index + }{{ + s.DB.StatCounters(), + mgo.Index{Key: []string{"k", "t"}, Unique: true}, + }, { + s.DB.StatTokens(), + mgo.Index{Key: []string{"t"}, Unique: true}, + }, { + s.DB.Entities(), + mgo.Index{Key: []string{"baseurl"}}, + }, { + s.DB.Entities(), + mgo.Index{Key: []string{"uploadtime"}}, + }, { + s.DB.Entities(), + mgo.Index{Key: []string{"promulgated-url"}, Unique: true, Sparse: true}, + }, { + s.DB.BaseEntities(), + mgo.Index{Key: []string{"public"}}, + }, { + s.DB.Logs(), + mgo.Index{Key: []string{"urls"}}, + }, { + s.DB.Entities(), + mgo.Index{Key: []string{"user"}}, + }, { + s.DB.Entities(), + mgo.Index{Key: []string{"user", "name"}}, + }, { + s.DB.Entities(), + mgo.Index{Key: []string{"user", "name", "series"}}, + }, { + s.DB.Entities(), + mgo.Index{Key: []string{"series"}}, + }, { + s.DB.Entities(), + mgo.Index{Key: []string{"blobhash256"}}, + }, { + s.DB.Entities(), + mgo.Index{Key: []string{"_id", "name"}}, + }, { + s.DB.Entities(), + mgo.Index{Key: []string{"charmrequiredinterfaces"}}, + }, { + s.DB.Entities(), + mgo.Index{Key: []string{"charmprovidedinterfaces"}}, + }, { + s.DB.Entities(), + mgo.Index{Key: []string{"bundlecharms"}}, + }, { + s.DB.BaseEntities(), + mgo.Index{Key: []string{"name"}}, + }} + for _, idx := range indexes { + err := idx.c.EnsureIndex(idx.i) + if err != nil { + return errgo.Notef(err, "cannot ensure index with keys %v on collection %s", idx.i, idx.c.Name) + } + } + return nil +} + +func (s *Store) putArchive(archive blobstore.ReadSeekCloser) (blobName, blobHash, blobHash256 string, size int64, err error) { + hash := blobstore.NewHash() + hash256 := sha256.New() + size, err = io.Copy(io.MultiWriter(hash, hash256), archive) + if err != nil { + return "", "", "", 0, errgo.Notef(err, "cannot copy archive") + } + if _, err = archive.Seek(0, 0); err != nil { + return "", "", "", 0, errgo.Notef(err, "cannot seek in archive") + } + blobHash = fmt.Sprintf("%x", hash.Sum(nil)) + blobName = bson.NewObjectId().Hex() + if err = s.BlobStore.PutUnchallenged(archive, blobName, size, blobHash); err != nil { + return "", "", "", 0, errgo.Notef(err, "cannot put archive into blob store") + } + return blobName, blobHash, fmt.Sprintf("%x", hash256.Sum(nil)), size, nil +} + +// AddCharmWithArchive is like AddCharm but +// also adds the charm archive to the blob store. +// This method is provided principally so that +// tests can easily create content in the store. +// +// If purl is not nil then the charm will also be +// available at the promulgated url specified. +func (s *Store) AddCharmWithArchive(url *router.ResolvedURL, ch charm.Charm) error { + blobName, blobHash, blobHash256, blobSize, err := s.uploadCharmOrBundle(ch) + if err != nil { + return errgo.Notef(err, "cannot upload charm") + } + return s.AddCharm(ch, AddParams{ + URL: url, + BlobName: blobName, + BlobHash: blobHash, + BlobHash256: blobHash256, + BlobSize: blobSize, + }) +} + +// AddBundleWithArchive is like AddBundle but +// also adds the charm archive to the blob store. +// This method is provided principally so that +// tests can easily create content in the store. +// +// If purl is not nil then the bundle will also be +// available at the promulgated url specified. +func (s *Store) AddBundleWithArchive(url *router.ResolvedURL, b charm.Bundle) error { + blobName, blobHash, blobHash256, size, err := s.uploadCharmOrBundle(b) + if err != nil { + return errgo.Notef(err, "cannot upload bundle") + } + return s.AddBundle(b, AddParams{ + URL: url, + BlobName: blobName, + BlobHash: blobHash, + BlobHash256: blobHash256, + BlobSize: size, + }) +} + +func (s *Store) uploadCharmOrBundle(c interface{}) (blobName, blobHash, blobHash256 string, size int64, err error) { + archive, err := getArchive(c) + if err != nil { + return "", "", "", 0, errgo.Notef(err, "cannot get archive") + } + defer archive.Close() + return s.putArchive(archive) +} + +// AddAudit adds the given entry to the audit log. +func (s *Store) AddAudit(entry audit.Entry) { + s.addAuditAtTime(entry, time.Now()) +} + +func (s *Store) addAuditAtTime(entry audit.Entry, t time.Time) { + if s.pool.auditEncoder == nil { + return + } + entry.Time = t + err := s.pool.auditEncoder.Encode(entry) + if err != nil { + logger.Errorf("Cannot write audit log entry: %v", err) + } +} + +// AddParams holds parameters held in common between the +// Store.AddCharm and Store.AddBundle methods. +type AddParams struct { + // URL holds the id to be associated with the stored entity. + // If URL.PromulgatedRevision is not -1, the entity will + // be promulgated. + URL *router.ResolvedURL + + // BlobName holds the name of the entity's archive blob. + BlobName string + + // BlobHash holds the hash of the entity's archive blob. + BlobHash string + + // BlobHash256 holds the sha256 hash of the entity's archive blob. + BlobHash256 string + + // BlobHash holds the size of the entity's archive blob. + BlobSize int64 + + // Contents holds references to files inside the + // entity's archive blob. + Contents map[mongodoc.FileId]mongodoc.ZipFile +} + +// AddCharm adds a charm entities collection with the given parameters. +// If p.URL cannot be used as a name for the charm then the returned +// error will have the cause params.ErrEntityIdNotAllowed. If the charm +// duplicates an existing charm then the returned error will have the +// cause params.ErrDuplicateUpload. +func (s *Store) AddCharm(c charm.Charm, p AddParams) (err error) { + // Strictly speaking this test is redundant, because a ResolvedURL should + // always be canonical, but check just in case anyway, as this is + // final gateway before a potentially invalid url might be stored + // in the database. + id := p.URL.URL + if id.Series == "bundle" || id.User == "" || id.Revision == -1 { + return errgo.Newf("charm added with invalid id %v", &id) + } + logger.Infof("add charm url %s; prev %d; dev %v", &id, p.URL.PromulgatedRevision, p.URL.Development) + entity := &mongodoc.Entity{ + URL: &id, + PromulgatedURL: p.URL.PromulgatedURL(), + BlobHash: p.BlobHash, + BlobHash256: p.BlobHash256, + BlobName: p.BlobName, + Size: p.BlobSize, + UploadTime: time.Now(), + CharmMeta: c.Meta(), + CharmConfig: c.Config(), + CharmActions: c.Actions(), + CharmProvidedInterfaces: interfacesForRelations(c.Meta().Provides), + CharmRequiredInterfaces: interfacesForRelations(c.Meta().Requires), + Contents: p.Contents, + SupportedSeries: c.Meta().Series, + Development: p.URL.Development, + } + denormalizeEntity(entity) + + // Check that we're not going to create a charm that duplicates + // the name of a bundle. This is racy, but it's the best we can + // do. Also check that there isn't an existing multi-series charm + // that would be replaced by this one. + entities, err := s.FindEntities(entity.BaseURL) + if err != nil { + return errgo.Notef(err, "cannot check for existing entities") + } + for _, entity := range entities { + if entity.URL.Series == "bundle" { + return errgo.WithCausef(err, params.ErrEntityIdNotAllowed, "charm name duplicates bundle name %v", entity.URL) + } + if id.Series != "" && entity.URL.Series == "" { + return errgo.WithCausef(err, params.ErrEntityIdNotAllowed, "charm name duplicates multi-series charm name %v", entity.URL) + } + } + if err := s.insertEntity(entity); err != nil { + return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload)) + } + return nil +} + +// denormalizeEntity sets all denormalized fields in e +// from their associated canonical fields. +// +// It is the responsibility of the caller to set e.SupportedSeries +// if the entity URL does not contain a series. If the entity +// URL *does* contain a series, e.SupportedSeries will +// be overwritten. +// +// This is exported for the purposes of tests that +// need to create directly into the database. +func denormalizeEntity(e *mongodoc.Entity) { + e.BaseURL = baseURL(e.URL) + e.Name = e.URL.Name + e.User = e.URL.User + e.Revision = e.URL.Revision + e.Series = e.URL.Series + if e.URL.Series != "" { + if e.URL.Series == "bundle" { + e.SupportedSeries = nil + } else { + e.SupportedSeries = []string{e.URL.Series} + } + } + if e.PromulgatedURL == nil { + e.PromulgatedRevision = -1 + } else { + e.PromulgatedRevision = e.PromulgatedURL.Revision + } +} + +var everyonePerm = []string{params.Everyone} + +func (s *Store) insertEntity(entity *mongodoc.Entity) (err error) { + // Add the base entity to the database. + perms := []string{entity.User} + acls := mongodoc.ACL{ + Read: perms, + Write: perms, + } + baseEntity := &mongodoc.BaseEntity{ + URL: entity.BaseURL, + User: entity.User, + Name: entity.Name, + Public: false, + ACLs: acls, + DevelopmentACLs: acls, + Promulgated: entity.PromulgatedURL != nil, + } + err = s.DB.BaseEntities().Insert(baseEntity) + if err != nil && !mgo.IsDup(err) { + return errgo.Notef(err, "cannot insert base entity") + } + + // Add the entity to the database. + err = s.DB.Entities().Insert(entity) + if mgo.IsDup(err) { + return params.ErrDuplicateUpload + } + if err != nil { + return errgo.Notef(err, "cannot insert entity") + } + // Ensure that if anything fails after this, that we delete + // the entity, otherwise we will be left in an internally + // inconsistent state. + defer func() { + if err != nil { + if err := s.DB.Entities().RemoveId(entity.URL); err != nil { + logger.Errorf("cannot remove entity after elastic search failure: %v", err) + } + } + }() + // Add entity to ElasticSearch. + if err := s.UpdateSearch(EntityResolvedURL(entity)); err != nil { + return errgo.Notef(err, "cannot index %s to ElasticSearch", entity.URL) + } + return nil +} + +// FindEntity finds the entity in the store with the given URL, +// which must be fully qualified. If any fields are specified, +// only those fields will be populated in the returned entities. +// If the given URL has no user then it is assumed to be a +// promulgated entity. +func (s *Store) FindEntity(url *router.ResolvedURL, fields ...string) (*mongodoc.Entity, error) { + entities, err := s.FindEntities(url.UserOwnedURL(), fields...) + if err != nil { + return nil, errgo.Mask(err) + } + if len(entities) == 0 { + return nil, errgo.WithCausef(nil, params.ErrNotFound, "entity not found") + } + // The URL is guaranteed to be fully qualified so we'll always + // get exactly one result. + return entities[0], nil +} + +// FindEntities finds all entities in the store matching the given URL. +// If any fields are specified, only those fields will be +// populated in the returned entities. If the given URL has no user then +// only promulgated entities will be queried. If the given URL channel does +// not represent an entity under development then only published entities +// will be queried. +func (s *Store) FindEntities(url *charm.URL, fields ...string) ([]*mongodoc.Entity, error) { + query := selectFields(s.EntitiesQuery(url), fields) + var docs []*mongodoc.Entity + err := query.All(&docs) + if err != nil { + return nil, errgo.Notef(err, "cannot find entities matching %s", url) + } + return docs, nil +} + +// FindBestEntity finds the entity that provides the preferred match to +// the given URL. If any fields are specified, only those fields will be +// populated in the returned entities. If the given URL has no user then +// only promulgated entities will be queried. +func (s *Store) FindBestEntity(url *charm.URL, fields ...string) (*mongodoc.Entity, error) { + if len(fields) > 0 { + // Make sure we have all the fields we need to make a decision. + fields = append(fields, "_id", "promulgated-url", "promulgated-revision", "series", "revision") + } + entities, err := s.FindEntities(url, fields...) + if err != nil { + return nil, errgo.Mask(err) + } + if len(entities) == 0 { + return nil, errgo.WithCausef(nil, params.ErrNotFound, "entity not found") + } + best := entities[0] + for _, e := range entities { + if seriesScore[e.Series] > seriesScore[best.Series] { + best = e + continue + } + if seriesScore[e.Series] < seriesScore[best.Series] { + continue + } + if url.User == "" { + if e.PromulgatedRevision > best.PromulgatedRevision { + best = e + continue + } + } else { + if e.Revision > best.Revision { + best = e + continue + } + } + } + return best, nil +} + +var seriesScore = map[string]int{ + "bundle": -1, + "lucid": 1000, + "precise": 1001, + "trusty": 1002, + "quantal": 1, + "raring": 2, + "saucy": 3, + "utopic": 4, + "vivid": 5, + "wily": 6, + // When we find a multi-series charm (no series) we + // will always choose it in preference to a series-specific + // charm + "": 5000, +} + +var seriesBundleOrEmpty = bson.D{{"$or", []bson.D{{{"series", "bundle"}}, {{"series", ""}}}}} + +// EntitiesQuery creates a mgo.Query object that can be used to find +// entities matching the given URL. If the given URL has no user then +// the produced query will only match promulgated entities. If the given URL +// channel is not "development" then the produced query will only match +// published entities. +func (s *Store) EntitiesQuery(url *charm.URL) *mgo.Query { + entities := s.DB.Entities() + query := make(bson.D, 1, 5) + query[0] = bson.DocElem{"name", url.Name} + if url.Channel != charm.DevelopmentChannel { + query = append(query, bson.DocElem{"development", false}) + } + if url.User == "" { + if url.Revision > -1 { + query = append(query, bson.DocElem{"promulgated-revision", url.Revision}) + } else { + query = append(query, bson.DocElem{"promulgated-revision", bson.D{{"$gt", -1}}}) + } + } else { + query = append(query, bson.DocElem{"user", url.User}) + if url.Revision > -1 { + query = append(query, bson.DocElem{"revision", url.Revision}) + } + } + if url.Series == "" { + if url.Revision > -1 { + // If we're specifying a revision we must be searching + // for a canonical URL, so search for a multi-series + // charm or a bundle. + query = append(query, seriesBundleOrEmpty...) + } + } else if url.Series == "bundle" { + query = append(query, bson.DocElem{"series", "bundle"}) + } else { + query = append(query, bson.DocElem{"supportedseries", url.Series}) + } + return entities.Find(query) +} + +// FindBaseEntity finds the base entity in the store using the given URL, +// which can either represent a fully qualified entity or a base id. +// If any fields are specified, only those fields will be populated in the +// returned base entity. +func (s *Store) FindBaseEntity(url *charm.URL, fields ...string) (*mongodoc.BaseEntity, error) { + var query *mgo.Query + if url.User == "" { + query = s.DB.BaseEntities().Find(bson.D{{"name", url.Name}, {"promulgated", 1}}) + } else { + query = s.DB.BaseEntities().FindId(baseURL(url)) + } + query = selectFields(query, fields) + var baseEntity mongodoc.BaseEntity + if err := query.One(&baseEntity); err != nil { + if err == mgo.ErrNotFound { + return nil, errgo.WithCausef(nil, params.ErrNotFound, "base entity not found") + } + return nil, errgo.Notef(err, "cannot find base entity %v", url) + } + return &baseEntity, nil +} + +func selectFields(query *mgo.Query, fields []string) *mgo.Query { + if len(fields) > 0 { + sel := make(bson.D, len(fields)) + for i, field := range fields { + sel[i] = bson.DocElem{field, 1} + } + query = query.Select(sel) + } + return query +} + +// UpdateEntity applies the provided update to the entity described by url. +func (s *Store) UpdateEntity(url *router.ResolvedURL, update interface{}) error { + if err := s.DB.Entities().Update(bson.D{{"_id", &url.URL}}, update); err != nil { + if err == mgo.ErrNotFound { + return errgo.WithCausef(err, params.ErrNotFound, "cannot update %q", url) + } + return errgo.Notef(err, "cannot update %q", url) + } + return nil +} + +// UpdateBaseEntity applies the provided update to the base entity of url. +func (s *Store) UpdateBaseEntity(url *router.ResolvedURL, update interface{}) error { + if err := s.DB.BaseEntities().Update(bson.D{{"_id", baseURL(&url.URL)}}, update); err != nil { + if err == mgo.ErrNotFound { + return errgo.WithCausef(err, params.ErrNotFound, "cannot update base entity for %q", url) + } + return errgo.Notef(err, "cannot update base entity for %q", url) + } + return nil +} + +// SetDevelopment sets whether the entity corresponding to the given URL will +// be only available in its development version (in essence, not published). +func (s *Store) SetDevelopment(url *router.ResolvedURL, development bool) error { + if err := s.UpdateEntity(url, bson.D{{ + "$set", bson.D{{"development", development}}, + }}); err != nil { + return errgo.Mask(err, errgo.Is(params.ErrNotFound)) + } + if !development { + // If the entity is published, update the search index. + rurl := *url + rurl.Development = development + if err := s.UpdateSearch(&rurl); err != nil { + return errgo.Notef(err, "cannot update search entities for %q", rurl) + } + } + return nil +} + +// SetPromulgated sets whether the base entity of url is promulgated, If +// promulgated is true it also unsets promulgated on any other base +// entity for entities with the same name. It also calculates the next +// promulgated URL for the entities owned by the new owner and sets those +// entities appropriately. +// +// Note: This code is known to have some unfortunate (but not dangerous) +// race conditions. It is possible that if one or more promulgations +// happens concurrently for the same entity name then it could result in +// more than one base entity being promulgated. If this happens then +// uploads to either user will get promulgated names, these names will +// never clash. This situation is easily remedied by setting the +// promulgated user for this charm again, even to one of the ones that is +// already promulgated. It can also result in the latest promulgated +// revision of the charm not being one created by the promulgated user. +// This will be remedied when a new charm is uploaded by the promulgated +// user. As promulgation is a rare operation, it is considered that the +// chances this will happen are slim. +func (s *Store) SetPromulgated(url *router.ResolvedURL, promulgate bool) error { + baseEntities := s.DB.BaseEntities() + base := baseURL(&url.URL) + if !promulgate { + err := baseEntities.UpdateId( + base, + bson.D{{"$set", bson.D{{"promulgated", mongodoc.IntBool(false)}}}}, + ) + if err != nil { + if errgo.Cause(err) == mgo.ErrNotFound { + return errgo.WithCausef(nil, params.ErrNotFound, "base entity %q not found", base) + } + return errgo.Notef(err, "cannot unpromulgate base entity %q", base) + } + if err := s.UpdateSearchBaseURL(base); err != nil { + return errgo.Notef(err, "cannot update search entities for %q", base) + } + return nil + } + + // Find any currently promulgated base entities for this charm name. + // Under normal circumstances there should be a maximum of one of these, + // but we should attempt to recover if there is an error condition. + iter := baseEntities.Find( + bson.D{ + {"_id", bson.D{{"$ne", base}}}, + {"name", base.Name}, + {"promulgated", mongodoc.IntBool(true)}, + }, + ).Iter() + defer iter.Close() + var baseEntity mongodoc.BaseEntity + for iter.Next(&baseEntity) { + err := baseEntities.UpdateId( + baseEntity.URL, + bson.D{{"$set", bson.D{{"promulgated", mongodoc.IntBool(false)}}}}, + ) + if err != nil { + return errgo.Notef(err, "cannot unpromulgate base entity %q", baseEntity.URL) + } + if err := s.UpdateSearchBaseURL(baseEntity.URL); err != nil { + return errgo.Notef(err, "cannot update search entities for %q", baseEntity.URL) + } + } + if err := iter.Close(); err != nil { + return errgo.Notef(err, "cannot close mgo iterator") + } + + // Set the promulgated flag on the base entity. + err := s.DB.BaseEntities().UpdateId(base, bson.D{{"$set", bson.D{{"promulgated", mongodoc.IntBool(true)}}}}) + if err != nil { + if errgo.Cause(err) == mgo.ErrNotFound { + return errgo.WithCausef(nil, params.ErrNotFound, "base entity %q not found", base) + } + return errgo.Notef(err, "cannot promulgate base entity %q", base) + } + + type result struct { + Series string `bson:"_id"` + Revision int + } + + // Find the latest revision in each series of entities with the promulgated base URL. + var latestOwned []result + err = s.DB.Entities().Pipe([]bson.D{ + {{"$match", bson.D{{"baseurl", base}}}}, + {{"$group", bson.D{{"_id", "$series"}, {"revision", bson.D{{"$max", "$revision"}}}}}}, + }).All(&latestOwned) + if err != nil { + return errgo.Notef(err, "cannot find latest revision for promulgated URL") + } + + // Find the latest revision in each series of the promulgated entitities + // with the same name as the base entity. Note that this works because: + // 1) promulgated URLs always have the same charm name as their + // non-promulgated counterparts. + // 2) bundles cannot have names that overlap with charms. + // Because of 1), we are sure that selecting on the entity name will + // select all entities with a matching promulgated URL name. Because of + // 2) we are sure that we are only updating all charms or the single + // bundle entity. + latestPromulgated := make(map[string]int) + iter = s.DB.Entities().Pipe([]bson.D{ + {{"$match", bson.D{{"name", base.Name}}}}, + {{"$group", bson.D{{"_id", "$series"}, {"revision", bson.D{{"$max", "$promulgated-revision"}}}}}}, + }).Iter() + var res result + for iter.Next(&res) { + latestPromulgated[res.Series] = res.Revision + } + if err := iter.Close(); err != nil { + return errgo.Notef(err, "cannot close mgo iterator") + } + + // Update the newest entity in each series with a base URL that matches the newly promulgated + // base entity to have a promulgated URL, if it does not already have one. + for _, r := range latestOwned { + id := *base + id.Series = r.Series + id.Revision = r.Revision + pID := id + pID.User = "" + pID.Revision = latestPromulgated[r.Series] + 1 + err := s.DB.Entities().Update( + bson.D{ + {"_id", &id}, + {"promulgated-revision", -1}, + }, + bson.D{ + {"$set", bson.D{ + {"promulgated-url", &pID}, + {"promulgated-revision", pID.Revision}, + }}, + }, + ) + if err != nil && err != mgo.ErrNotFound { + // If we get NotFound it is most likely because the latest owned revision is + // already promulgated, so carry on. + return errgo.Notef(err, "cannot update promulgated URLs") + } + } + + // Update the search record for the newest entity. + if err := s.UpdateSearchBaseURL(base); err != nil { + return errgo.Notef(err, "cannot update search entities for %q", base) + } + return nil +} + +func interfacesForRelations(rels map[string]charm.Relation) []string { + // Eliminate duplicates by storing interface names into a map. + interfaces := make(map[string]bool) + for _, rel := range rels { + interfaces[rel.Interface] = true + } + result := make([]string, 0, len(interfaces)) + for iface := range interfaces { + result = append(result, iface) + } + return result +} + +func baseURL(url *charm.URL) *charm.URL { + newURL := *url + newURL.Revision = -1 + newURL.Series = "" + newURL.Channel = "" + return &newURL +} + +var errNotImplemented = errgo.Newf("not implemented") + +// AddBundle adds a bundle to the entities collection with the given +// parameters. If p.URL cannot be used as a name for the bundle then the +// returned error will have the cause params.ErrEntityIdNotAllowed. If +// the bundle duplicates an existing bundle then the returned error will +// have the cause params.ErrDuplicateUpload. +func (s *Store) AddBundle(b charm.Bundle, p AddParams) error { + // Strictly speaking this test is redundant, because a ResolvedURL should + // always be canonical, but check just in case anyway, as this is + // final gateway before a potentially invalid url might be stored + // in the database. + if p.URL.URL.Series != "bundle" || p.URL.URL.User == "" || p.URL.URL.Revision == -1 || p.URL.URL.Series == "" { + return errgo.Newf("bundle added with invalid id %v", p.URL) + } + bundleData := b.Data() + urls, err := bundleCharms(bundleData) + if err != nil { + return errgo.Mask(err) + } + entity := &mongodoc.Entity{ + URL: &p.URL.URL, + BlobHash: p.BlobHash, + BlobHash256: p.BlobHash256, + BlobName: p.BlobName, + Size: p.BlobSize, + UploadTime: time.Now(), + BundleData: bundleData, + BundleUnitCount: newInt(bundleUnitCount(bundleData)), + BundleMachineCount: newInt(bundleMachineCount(bundleData)), + BundleReadMe: b.ReadMe(), + BundleCharms: urls, + Contents: p.Contents, + PromulgatedURL: p.URL.PromulgatedURL(), + Development: p.URL.Development, + } + denormalizeEntity(entity) + + // Check that we're not going to create a bundle that duplicates + // the name of a charm. This is racy, but it's the best we can do. + entities, err := s.FindEntities(entity.BaseURL) + if err != nil { + return errgo.Notef(err, "cannot check for existing entities") + } + for _, entity := range entities { + if entity.URL.Series != "bundle" { + return errgo.WithCausef(err, params.ErrEntityIdNotAllowed, "bundle name duplicates charm name %s", entity.URL) + } + } + if err := s.insertEntity(entity); err != nil { + return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload)) + } + return nil +} + +// OpenBlob opens a blob given its entity id; it returns the blob's +// data source, its size and its hash. It returns a params.ErrNotFound +// error if the entity does not exist. +func (s *Store) OpenBlob(id *router.ResolvedURL) (r blobstore.ReadSeekCloser, size int64, hash string, err error) { + blobName, hash, err := s.BlobNameAndHash(id) + if err != nil { + return nil, 0, "", errgo.Mask(err, errgo.Is(params.ErrNotFound)) + } + r, size, err = s.BlobStore.Open(blobName) + if err != nil { + return nil, 0, "", errgo.Notef(err, "cannot open archive data for %s", id) + } + return r, size, hash, nil +} + +// BlobNameAndHash returns the name that is used to store the blob +// for the entity with the given id and its hash. It returns a params.ErrNotFound +// error if the entity does not exist. +func (s *Store) BlobNameAndHash(id *router.ResolvedURL) (name, hash string, err error) { + entity, err := s.FindEntity(id, "blobname", "blobhash") + if err != nil { + if errgo.Cause(err) == params.ErrNotFound { + return "", "", errgo.WithCausef(nil, params.ErrNotFound, "entity not found") + } + return "", "", errgo.Notef(err, "cannot get %s", id) + } + return entity.BlobName, entity.BlobHash, nil +} + +// OpenCachedBlobFile opens a file from the given entity's archive blob. +// The file is identified by the provided fileId. If the file has not +// previously been opened on this entity, the isFile function will be +// used to determine which file in the zip file to use. The result will +// be cached for the next time. +// +// When retrieving the entity, at least the BlobName and +// Contents fields must be populated. +func (s *Store) OpenCachedBlobFile( + entity *mongodoc.Entity, + fileId mongodoc.FileId, + isFile func(f *zip.File) bool, +) (_ io.ReadCloser, err error) { + if entity.BlobName == "" { + // We'd like to check that the Contents field was populated + // here but we can't because it doesn't necessarily + // exist in the entity. + return nil, errgo.New("provided entity does not have required fields") + } + zipf, ok := entity.Contents[fileId] + if ok && !zipf.IsValid() { + return nil, errgo.WithCausef(nil, params.ErrNotFound, "") + } + blob, size, err := s.BlobStore.Open(entity.BlobName) + if err != nil { + return nil, errgo.Notef(err, "cannot open archive blob") + } + defer func() { + // When there's an error, we want to close + // the blob, otherwise we need to keep the blob + // open because it's used by the returned Reader. + if err != nil { + blob.Close() + } + }() + if !ok { + // We haven't already searched the archive for the icon, + // so find its archive now. + zipf, err = s.findZipFile(blob, size, isFile) + if err != nil && errgo.Cause(err) != params.ErrNotFound { + return nil, errgo.Mask(err) + } + } + // We update the content entry regardless of whether we've + // found a file, so that the next time that serveIcon is called + // it can know that we've already looked. + err = s.DB.Entities().UpdateId( + entity.URL, + bson.D{{"$set", + bson.D{{"contents." + string(fileId), zipf}}, + }}, + ) + if err != nil { + return nil, errgo.Notef(err, "cannot update %q", entity.URL) + } + if !zipf.IsValid() { + // We searched for the file and didn't find it. + return nil, errgo.WithCausef(nil, params.ErrNotFound, "") + } + + // We know where the icon is stored. Now serve it up. + r, err := ZipFileReader(blob, zipf) + if err != nil { + return nil, errgo.Notef(err, "cannot make zip file reader") + } + // We return a ReadCloser that reads from the newly created + // zip file reader, but when closed, will close the originally + // opened blob. + return struct { + io.Reader + io.Closer + }{r, blob}, nil +} + +func (s *Store) findZipFile(blob io.ReadSeeker, size int64, isFile func(f *zip.File) bool) (mongodoc.ZipFile, error) { + zipReader, err := zip.NewReader(&readerAtSeeker{blob}, size) + if err != nil { + return mongodoc.ZipFile{}, errgo.Notef(err, "cannot read archive data") + } + for _, f := range zipReader.File { + if isFile(f) { + return NewZipFile(f) + } + } + return mongodoc.ZipFile{}, params.ErrNotFound +} + +// SetPerms sets the permissions for the base entity with +// the given id for "which" operations ("read" or "write") +// to the given ACL. This is mostly provided for testing. +func (s *Store) SetPerms(id *charm.URL, which string, acl ...string) error { + field := "acls" + if id.Channel == charm.DevelopmentChannel { + field = "developmentacls" + } + return s.DB.BaseEntities().UpdateId(baseURL(id), bson.D{{"$set", + bson.D{{field + "." + which, acl}}, + }}) +} + +func newInt(x int) *int { + return &x +} + +// bundleUnitCount returns the number of units created by the bundle. +func bundleUnitCount(b *charm.BundleData) int { + count := 0 + for _, service := range b.Services { + count += service.NumUnits + } + return count +} + +// bundleMachineCount returns the number of machines +// that will be created or used by the bundle. +func bundleMachineCount(b *charm.BundleData) int { + count := len(b.Machines) + for _, service := range b.Services { + // The default placement is "new". + placement := &charm.UnitPlacement{ + Machine: "new", + } + // Check for "new" placements, which means a new machine + // must be added. + for _, location := range service.To { + var err error + placement, err = charm.ParsePlacement(location) + if err != nil { + // Ignore invalid placements - a bundle should always + // be verified before adding to the charm store so this + // should never happen in practice. + continue + } + if placement.Machine == "new" { + count++ + } + } + // If there are less elements in To than NumUnits, the last placement + // element is replicated. For this reason, if the last element is + // "new", we need to add more machines. + if placement != nil && placement.Machine == "new" { + count += service.NumUnits - len(service.To) + } + } + return count +} + +// bundleCharms returns all the charm URLs used by a bundle, +// without duplicates. +func bundleCharms(data *charm.BundleData) ([]*charm.URL, error) { + // Use a map to de-duplicate the URL list: a bundle can include services + // deployed by the same charm. + urlMap := make(map[string]*charm.URL) + for _, service := range data.Services { + url, err := charm.ParseURL(service.Charm) + if err != nil { + return nil, errgo.Mask(err) + } + urlMap[url.String()] = url + // Also add the corresponding base URL. + base := baseURL(url) + urlMap[base.String()] = base + } + urls := make([]*charm.URL, 0, len(urlMap)) + for _, url := range urlMap { + urls = append(urls, url) + } + return urls, nil +} + +// MatchingInterfacesQuery returns a mongo query +// that will find any charms that require any interfaces +// in the required slice or provide any interfaces in the +// provided slice. +// +// Development charms are never matched. +// TODO do we actually want to match dev charms here? +func (s *Store) MatchingInterfacesQuery(required, provided []string) *mgo.Query { + return s.DB.Entities().Find(bson.D{{ + "development", false, + }, { + "$or", []bson.D{{{ + "charmrequiredinterfaces", bson.D{{ + "$elemMatch", bson.D{{ + "$in", required, + }}, + }}, + }}, {{ + "charmprovidedinterfaces", bson.D{{ + "$elemMatch", bson.D{{ + "$in", provided, + }}, + }}, + }}}, + }}) +} + +// AddLog adds a log message to the database. +func (s *Store) AddLog(data *json.RawMessage, logLevel mongodoc.LogLevel, logType mongodoc.LogType, urls []*charm.URL) error { + // Encode the JSON data. + b, err := json.Marshal(data) + if err != nil { + return errgo.Notef(err, "cannot marshal log data") + } + + // Add the base URLs to the list of references associated with the log. + // Also remove duplicate URLs while maintaining the references' order. + var allUrls []*charm.URL + urlMap := make(map[string]bool) + for _, url := range urls { + urlStr := url.String() + if ok, _ := urlMap[urlStr]; !ok { + urlMap[urlStr] = true + allUrls = append(allUrls, url) + } + base := baseURL(url) + urlStr = base.String() + if ok, _ := urlMap[urlStr]; !ok { + urlMap[urlStr] = true + allUrls = append(allUrls, base) + } + } + + // Add the log to the database. + log := &mongodoc.Log{ + Data: b, + Level: logLevel, + Type: logType, + URLs: allUrls, + Time: time.Now(), + } + if err := s.DB.Logs().Insert(log); err != nil { + return errgo.Mask(err) + } + return nil +} + +// StoreDatabase wraps an mgo.DB ands adds a few convenience methods. +type StoreDatabase struct { + *mgo.Database +} + +// clone copies the StoreDatabase, cloning the underlying mgo session. +func (s StoreDatabase) clone() StoreDatabase { + return StoreDatabase{ + &mgo.Database{ + Name: s.Name, + Session: s.Session.Clone(), + }, + } +} + +// copy copies the StoreDatabase, copying the underlying mgo session. +func (s StoreDatabase) copy() StoreDatabase { + return StoreDatabase{ + &mgo.Database{ + Name: s.Name, + Session: s.Session.Copy(), + }, + } +} + +// Close closes the store database's underlying session. +func (s StoreDatabase) Close() { + s.Session.Close() +} + +// Entities returns the mongo collection where entities are stored. +func (s StoreDatabase) Entities() *mgo.Collection { + return s.C("entities") +} + +// BaseEntities returns the mongo collection where base entities are stored. +func (s StoreDatabase) BaseEntities() *mgo.Collection { + return s.C("base_entities") +} + +// Logs returns the Mongo collection where charm store logs are stored. +func (s StoreDatabase) Logs() *mgo.Collection { + return s.C("logs") +} + +// Migrations returns the Mongo collection where the migration info is stored. +func (s StoreDatabase) Migrations() *mgo.Collection { + return s.C("migrations") +} + +func (s StoreDatabase) Macaroons() *mgo.Collection { + return s.C("macaroons") +} + +// allCollections holds for each collection used by the charm store a +// function returns that collection. +// The macaroons collection is omitted because it does +// not exist until a macaroon is actually created. +var allCollections = []func(StoreDatabase) *mgo.Collection{ + StoreDatabase.StatCounters, + StoreDatabase.StatTokens, + StoreDatabase.Entities, + StoreDatabase.BaseEntities, + StoreDatabase.Logs, + StoreDatabase.Migrations, +} + +// Collections returns a slice of all the collections used +// by the charm store. +func (s StoreDatabase) Collections() []*mgo.Collection { + cs := make([]*mgo.Collection, len(allCollections)) + for i, f := range allCollections { + cs[i] = f(s) + } + return cs +} + +type readerAtSeeker struct { + r io.ReadSeeker +} + +func (r *readerAtSeeker) ReadAt(buf []byte, p int64) (int, error) { + if _, err := r.r.Seek(p, 0); err != nil { + return 0, errgo.Notef(err, "cannot seek") + } + return r.r.Read(buf) +} + +// ReaderAtSeeker adapts r so that it can be used as +// a ReaderAt. Note that, unlike some implementations +// of ReaderAt, it is not OK to use concurrently. +func ReaderAtSeeker(r io.ReadSeeker) io.ReaderAt { + return &readerAtSeeker{r} +} + +// Search searches the store for the given SearchParams. +// It returns a SearchResult containing the results of the search. +func (store *Store) Search(sp SearchParams) (SearchResult, error) { + result, err := store.ES.search(sp) + if err != nil { + return SearchResult{}, errgo.Mask(err) + } + return result, nil +} + +var listFilters = map[string]string{ + "name": "name", + "owner": "user", + "series": "serties", + "type": "type", + "promulgated": "promulgated-revision", +} + +func prepareList(sp SearchParams) (filters map[string]interface{}, sort bson.D, err error) { + if len(sp.Text) > 0 { + return nil, nil, errgo.New("text not allowed") + } + if sp.Limit > 0 { + return nil, nil, errgo.New("limit not allowed") + } + if sp.Skip > 0 { + return nil, nil, errgo.New("skip not allowed") + } + if sp.AutoComplete { + return nil, nil, errgo.New("autocomplete not allowed") + } + + filters = make(map[string]interface{}) + for k, v := range sp.Filters { + switch k { + case "name": + filters[k] = v[0] + case "owner": + filters["user"] = v[0] + case "series": + filters["series"] = v[0] + case "type": + if v[0] == "bundle" { + filters["series"] = "bundle" + } else { + filters["series"] = map[string]interface{}{"$ne": "bundle"} + } + case "promulgated": + if v[0] != "0" { + filters["promulgated-revision"] = map[string]interface{}{"$gte": 0} + } else { + filters["promulgated-revision"] = map[string]interface{}{"$lt": 0} + } + default: + return nil, nil, errgo.Newf("filter %q not allowed", k) + } + } + + sort, err = createMongoSort(sp) + if err != nil { + return nil, nil, errgo.Newf("invalid parameters: %s", err) + } + return filters, sort, nil +} + +// sortFields contains a mapping from api fieldnames to the entity fields to search. +var sortMongoFields = map[string]string{ + "name": "name", + "owner": "user", + "series": "series", +} + +// createSort creates a sort query parameters for mongo out of a Sort parameter. +func createMongoSort(sp SearchParams) (bson.D, error) { + sort := make(bson.D, len(sp.sort)) + + for i, s := range sp.sort { + field := sortMongoFields[s.Field] + if field == "" { + return nil, errgo.Newf("sort %q not allowed", s.Field) + } + order := 1 + if s.Order == sortDescending { + order = -1 + } + sort[i] = bson.DocElem{field, order} + } + return sort, nil +} + +// List lists the store for the given ListParams. +// It returns a ListResult containing the results of the list. +func (store *Store) List(sp SearchParams) (ListResult, error) { + filters, sort, err := prepareList(sp) + if err != nil { + return ListResult{}, errgo.Mask(err) + } + q := []bson.M{{"$match": filters}} + q = append(q, bson.M{"$sort": bson.D{{"revision", 1}}}) + + d := bson.M{ + "_id": bson.M{ + "$concat": []interface{}{ + "$baseurl", + "$series", + bson.M{ + "$cond": []string{"$development", "true", "false"}, + }, + }, + }, + "promulgated-url": bson.M{"$last": "$promulgated-url"}, + "development": bson.M{"$last": "$development"}, + "name": bson.M{"$last": "$name"}, + "user": bson.M{"$last": "$user"}, + "series": bson.M{"$last": "$series"}, + "url": bson.M{"$last": "$_id"}, + } + group := bson.M{"$group": d} + q = append(q, group) + project := bson.M{ + "$project": bson.M{ + "_id": "$url", + "development": "$development", + "name": "$name", + "user": "$user", + "series": "$series", + "promulgated-url": "$promulgated-url", + }, + } + q = append(q, project) + if len(sort) == 0 { + q = append(q, bson.M{ + "$sort": bson.D{{"_id", 1}}, + }) + } else { + q = append(q, bson.M{"$sort": sort}) + } + + pipe := store.DB.Entities().Pipe(q) + r := ListResult{ + Results: make([]*router.ResolvedURL, 0), + } + var entity mongodoc.Entity + iter := pipe.Iter() + for iter.Next(&entity) { + r.Results = append(r.Results, EntityResolvedURL(&entity)) + } + if err := iter.Close(); err != nil { + return ListResult{}, errgo.Mask(err) + } + return r, nil +} + +// SynchroniseElasticsearch creates new indexes in elasticsearch +// and populates them with the current data from the mongodb database. +func (s *Store) SynchroniseElasticsearch() error { + if err := s.ES.ensureIndexes(true); err != nil { + return errgo.Notef(err, "cannot create indexes") + } + if err := s.syncSearch(); err != nil { + return errgo.Notef(err, "cannot synchronise indexes") + } + return nil +} + +// EntityResolvedURL returns the ResolvedURL for the entity. +// It requires PromulgatedURL and Development fields to have been +// filled out in the entity. +func EntityResolvedURL(e *mongodoc.Entity) *router.ResolvedURL { + rurl := &router.ResolvedURL{ + URL: *e.URL, + PromulgatedRevision: -1, + Development: e.Development, + } + if e.PromulgatedURL != nil { + rurl.PromulgatedRevision = e.PromulgatedURL.Revision + } + return rurl +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/store_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/store_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/store_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2837 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "archive/zip" + "bytes" + "crypto/sha256" + "crypto/sha512" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/mgo.v2/bson" + "gopkg.in/natefinch/lumberjack.v2" + + "gopkg.in/juju/charmstore.v5-unstable/audit" + "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" + "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" +) + +type StoreSuite struct { + storetesting.IsolatedMgoESSuite + index string +} + +var _ = gc.Suite(&StoreSuite{}) + +func (s *StoreSuite) checkAddCharm(c *gc.C, ch charm.Charm, addToES bool, url *router.ResolvedURL) { + var es *elasticsearch.Database + if addToES { + es = s.ES + } + store := s.newStore(c, true) + defer store.Close() + + // Add the charm to the store. + beforeAdding := time.Now() + err := store.AddCharmWithArchive(url, ch) + c.Assert(err, gc.IsNil) + afterAdding := time.Now() + + var doc *mongodoc.Entity + err = store.DB.Entities().FindId(&url.URL).One(&doc) + c.Assert(err, gc.IsNil) + + // Ensure the document was indexed in ElasticSearch, if an ES database was provided. + if es != nil { + var result SearchDoc + id := store.ES.getID(doc.URL) + err = store.ES.GetDocument(s.TestIndex, typeName, id, &result) + c.Assert(err, gc.IsNil) + exists, err := store.ES.HasDocument(s.TestIndex, typeName, id) + c.Assert(err, gc.IsNil) + c.Assert(exists, gc.Equals, true) + if purl := url.PromulgatedURL(); purl != nil { + c.Assert(result.PromulgatedURL, jc.DeepEquals, purl) + } + } + // The entity doc has been correctly added to the mongo collection. + size, hash, hash256 := getSizeAndHashes(ch) + sort.Strings(doc.CharmProvidedInterfaces) + sort.Strings(doc.CharmRequiredInterfaces) + + // Check the upload time and then reset it to its zero value + // so that we can test the deterministic parts later. + c.Assert(doc.UploadTime, jc.TimeBetween(beforeAdding, afterAdding)) + + doc.UploadTime = time.Time{} + + blobName := doc.BlobName + c.Assert(blobName, gc.Matches, "[0-9a-z]+") + doc.BlobName = "" + + c.Assert(doc, jc.DeepEquals, denormalizedEntity(&mongodoc.Entity{ + URL: &url.URL, + BlobHash: hash, + BlobHash256: hash256, + Size: size, + CharmMeta: ch.Meta(), + CharmActions: ch.Actions(), + CharmConfig: ch.Config(), + CharmProvidedInterfaces: []string{"http", "logging", "monitoring"}, + CharmRequiredInterfaces: []string{"mysql", "varnish"}, + PromulgatedURL: url.PromulgatedURL(), + SupportedSeries: ch.Meta().Series, + Development: url.Development, + })) + + // The charm archive has been properly added to the blob store. + r, obtainedSize, err := store.BlobStore.Open(blobName) + c.Assert(err, gc.IsNil) + defer r.Close() + c.Assert(obtainedSize, gc.Equals, size) + data, err := ioutil.ReadAll(r) + c.Assert(err, gc.IsNil) + charmArchive, err := charm.ReadCharmArchiveBytes(data) + c.Assert(err, gc.IsNil) + c.Assert(charmArchive.Meta(), jc.DeepEquals, ch.Meta()) + c.Assert(charmArchive.Config(), jc.DeepEquals, ch.Config()) + c.Assert(charmArchive.Actions(), jc.DeepEquals, ch.Actions()) + c.Assert(charmArchive.Revision(), jc.DeepEquals, ch.Revision()) + + // Check that the base entity has been properly created. + assertBaseEntity(c, store, baseURL(&url.URL), url.PromulgatedRevision != -1) + + // Try inserting the charm again - it should fail because the charm is + // already there. + err = store.AddCharmWithArchive(url, ch) + c.Assert(errgo.Cause(err), gc.Equals, params.ErrDuplicateUpload) +} + +func (s *StoreSuite) checkAddBundle(c *gc.C, bundle charm.Bundle, addToES bool, url *router.ResolvedURL) { + var es *elasticsearch.Database + + if addToES { + es = s.ES + } + store := s.newStore(c, true) + defer store.Close() + + // Add the bundle to the store. + beforeAdding := time.Now() + err := store.AddBundleWithArchive(url, bundle) + c.Assert(err, gc.IsNil) + afterAdding := time.Now() + + var doc *mongodoc.Entity + err = store.DB.Entities().FindId(&url.URL).One(&doc) + c.Assert(err, gc.IsNil) + sort.Sort(orderedURLs(doc.BundleCharms)) + + // Ensure the document was indexed in ElasticSearch, if an ES database was provided. + if es != nil { + var result SearchDoc + id := store.ES.getID(doc.URL) + err = store.ES.GetDocument(s.TestIndex, typeName, id, &result) + c.Assert(err, gc.IsNil) + exists, err := store.ES.HasDocument(s.TestIndex, typeName, id) + c.Assert(err, gc.IsNil) + c.Assert(exists, gc.Equals, true) + if purl := url.PromulgatedURL(); purl != nil { + c.Assert(result.PromulgatedURL, jc.DeepEquals, purl) + } + } + + // Check the upload time and then reset it to its zero value + // so that we can test the deterministic parts later. + c.Assert(doc.UploadTime, jc.TimeBetween(beforeAdding, afterAdding)) + doc.UploadTime = time.Time{} + + // The blob name is random, but we check that it's + // in the correct format, and non-empty. + blobName := doc.BlobName + c.Assert(blobName, gc.Matches, "[0-9a-z]+") + doc.BlobName = "" + + // The entity doc has been correctly added to the mongo collection. + size, hash, hash256 := getSizeAndHashes(bundle) + c.Assert(doc, jc.DeepEquals, denormalizedEntity(&mongodoc.Entity{ + URL: &url.URL, + BlobHash: hash, + BlobHash256: hash256, + Size: size, + BundleData: bundle.Data(), + BundleReadMe: bundle.ReadMe(), + BundleCharms: []*charm.URL{ + charm.MustParseURL("mysql"), + charm.MustParseURL("wordpress"), + }, + BundleMachineCount: newInt(2), + BundleUnitCount: newInt(2), + PromulgatedURL: url.PromulgatedURL(), + Development: url.Development, + })) + + // The bundle archive has been properly added to the blob store. + r, obtainedSize, err := store.BlobStore.Open(blobName) + c.Assert(err, gc.IsNil) + defer r.Close() + c.Assert(obtainedSize, gc.Equals, size) + data, err := ioutil.ReadAll(r) + c.Assert(err, gc.IsNil) + bundleArchive, err := charm.ReadBundleArchiveBytes(data) + c.Assert(err, gc.IsNil) + c.Assert(bundleArchive.Data(), jc.DeepEquals, bundle.Data()) + c.Assert(bundleArchive.ReadMe(), jc.DeepEquals, bundle.ReadMe()) + + // Check that the base entity has been properly created. + assertBaseEntity(c, store, baseURL(&url.URL), url.PromulgatedRevision != -1) + + // Try inserting the bundle again - it should fail because the bundle is + // already there. + err = store.AddBundleWithArchive(url, bundle) + c.Assert(errgo.Cause(err), gc.Equals, params.ErrDuplicateUpload) +} + +func assertBaseEntity(c *gc.C, store *Store, url *charm.URL, promulgated bool) { + baseEntity, err := store.FindBaseEntity(url) + c.Assert(err, gc.IsNil) + expectACLs := mongodoc.ACL{ + Read: []string{url.User}, + Write: []string{url.User}, + } + c.Assert(baseEntity, jc.DeepEquals, &mongodoc.BaseEntity{ + URL: url, + User: url.User, + Name: url.Name, + Public: false, + ACLs: expectACLs, + DevelopmentACLs: expectACLs, + Promulgated: mongodoc.IntBool(promulgated), + }) +} + +type orderedURLs []*charm.URL + +func (o orderedURLs) Less(i, j int) bool { + return o[i].String() < o[j].String() +} + +func (o orderedURLs) Swap(i, j int) { + o[i], o[j] = o[j], o[i] +} + +func (o orderedURLs) Len() int { + return len(o) +} + +var urlFindingTests = []struct { + inStore []string + expand string + expect []string +}{{ + inStore: []string{"23 cs:~charmers/precise/wordpress-23"}, + expand: "wordpress", + expect: []string{"23 cs:~charmers/precise/wordpress-23"}, +}, { + inStore: []string{"23 cs:~charmers/development/precise/wordpress-23"}, + expand: "wordpress", + expect: []string{}, +}, { + inStore: []string{"23 cs:~charmers/development/precise/wordpress-23"}, + expand: "development/wordpress", + expect: []string{"23 cs:~charmers/development/precise/wordpress-23"}, +}, { + inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/precise/wordpress-24", "25 cs:~charmers/development/precise/wordpress-25"}, + expand: "wordpress", + expect: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/precise/wordpress-24"}, +}, { + inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/precise/wordpress-24", "25 cs:~charmers/development/precise/wordpress-25"}, + expand: "~charmers/precise/wordpress-24", + expect: []string{"24 cs:~charmers/precise/wordpress-24"}, +}, { + inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/precise/wordpress-24", "25 cs:~charmers/development/precise/wordpress-25"}, + expand: "~charmers/development/precise/wordpress-25", + expect: []string{"25 cs:~charmers/development/precise/wordpress-25"}, +}, { + inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/precise/wordpress-24", "25 cs:~charmers/development/precise/wordpress-25"}, + expand: "~charmers/precise/wordpress-25", + expect: []string{}, +}, { + inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/precise/wordpress-24", "25 cs:~charmers/development/precise/wordpress-25"}, + expand: "development/wordpress", + expect: []string{"25 cs:~charmers/development/precise/wordpress-25", "23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/precise/wordpress-24"}, +}, { + inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/trusty/wordpress-24", "25 cs:~charmers/development/precise/wordpress-25"}, + expand: "precise/wordpress", + expect: []string{"23 cs:~charmers/precise/wordpress-23"}, +}, { + inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/trusty/wordpress-24", "25 cs:~charmers/development/precise/wordpress-25", "26 cs:~charmers/development/wily/wordpress-26"}, + expand: "development/precise/wordpress", + expect: []string{"25 cs:~charmers/development/precise/wordpress-25", "23 cs:~charmers/precise/wordpress-23"}, +}, { + inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/trusty/wordpress-24", "434 cs:~charmers/foo/varnish-434"}, + expand: "wordpress", + expect: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/trusty/wordpress-24"}, +}, { + inStore: []string{"23 cs:~charmers/precise/wordpress-23", "23 cs:~charmers/trusty/wordpress-23", "24 cs:~charmers/trusty/wordpress-24"}, + expand: "wordpress-23", + expect: []string{}, +}, { + inStore: []string{"cs:~user/precise/wordpress-23", "cs:~user/trusty/wordpress-23"}, + expand: "~user/precise/wordpress", + expect: []string{"cs:~user/precise/wordpress-23"}, +}, { + inStore: []string{"cs:~user/precise/wordpress-23", "cs:~user/trusty/wordpress-23"}, + expand: "~user/wordpress", + expect: []string{"cs:~user/precise/wordpress-23", "cs:~user/trusty/wordpress-23"}, +}, { + inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/trusty/wordpress-24", "434 cs:~charmers/foo/varnish-434"}, + expand: "precise/wordpress-23", + expect: []string{"23 cs:~charmers/precise/wordpress-23"}, +}, { + inStore: []string{"23 cs:~charmers/precise/wordpress-23", "24 cs:~charmers/trusty/wordpress-24", "434 cs:~charmers/foo/varnish-434"}, + expand: "arble", + expect: []string{}, +}, { + inStore: []string{"23 cs:~charmers/multi-series-23", "24 cs:~charmers/multi-series-24"}, + expand: "multi-series", + expect: []string{"23 cs:~charmers/multi-series-23", "24 cs:~charmers/multi-series-24"}, +}, { + inStore: []string{"23 cs:~charmers/multi-series-23", "24 cs:~charmers/multi-series-24"}, + expand: "trusty/multi-series", + expect: []string{"23 cs:~charmers/multi-series-23", "24 cs:~charmers/multi-series-24"}, +}, { + inStore: []string{"23 cs:~charmers/multi-series-23", "24 cs:~charmers/multi-series-24"}, + expand: "multi-series-24", + expect: []string{"24 cs:~charmers/multi-series-24"}, +}, { + inStore: []string{"23 cs:~charmers/multi-series-23", "24 cs:~charmers/multi-series-24"}, + expand: "trusty/multi-series-24", + expect: []string{"24 cs:~charmers/multi-series-24"}, +}, { + inStore: []string{"1 cs:~charmers/multi-series-23", "2 cs:~charmers/multi-series-24"}, + expand: "trusty/multi-series-1", + expect: []string{"1 cs:~charmers/multi-series-23"}, +}, { + inStore: []string{"1 cs:~charmers/multi-series-23", "2 cs:~charmers/multi-series-24"}, + expand: "multi-series-23", + expect: []string{}, +}, { + inStore: []string{"1 cs:~charmers/multi-series-23", "2 cs:~charmers/multi-series-24"}, + expand: "cs:~charmers/utopic/multi-series-23", + expect: []string{"1 cs:~charmers/multi-series-23"}, +}, { + inStore: []string{}, + expand: "precise/wordpress-23", + expect: []string{}, +}, { + inStore: []string{}, + expand: "development/precise/wordpress-23", + expect: []string{}, +}} + +func (s *StoreSuite) testURLFinding(c *gc.C, check func(store *Store, expand *charm.URL, expect []*router.ResolvedURL)) { + charms := make(map[string]*charm.CharmDir) + store := s.newStore(c, false) + defer store.Close() + for i, test := range urlFindingTests { + c.Logf("test %d: %q from %q", i, test.expand, test.inStore) + _, err := store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + urls := MustParseResolvedURLs(test.inStore) + for _, url := range urls { + name := url.URL.Name + if charms[name] == nil { + charms[name] = storetesting.Charms.CharmDir(name) + } + err := store.AddCharmWithArchive(url, charms[name]) + c.Assert(err, gc.IsNil) + } + check(store, charm.MustParseURL(test.expand), MustParseResolvedURLs(test.expect)) + } +} + +func (s *StoreSuite) TestRequestStore(c *gc.C) { + config := ServerParams{ + HTTPRequestWaitDuration: time.Millisecond, + MaxMgoSessions: 1, + } + p, err := NewPool(s.Session.DB("juju_test"), nil, nil, config) + c.Assert(err, gc.IsNil) + defer p.Close() + + // Instances within the limit can be acquired + // instantly without error. + store, err := p.RequestStore() + c.Assert(err, gc.IsNil) + store.Close() + + // Check that when we get another instance, + // we reuse the original. + store1, err := p.RequestStore() + c.Assert(err, gc.IsNil) + defer store1.Close() + c.Assert(store1, gc.Equals, store) + + // If we try to exceed the limit, we'll wait for a while, + // then return an error. + t0 := time.Now() + store2, err := p.RequestStore() + c.Assert(err, gc.ErrorMatches, "too many mongo sessions in use") + c.Assert(errgo.Cause(err), gc.Equals, ErrTooManySessions) + c.Assert(store2, gc.IsNil) + if d := time.Since(t0); d < config.HTTPRequestWaitDuration { + c.Errorf("got wait of %v; want at least %v", d, config.HTTPRequestWaitDuration) + } +} + +func (s *StoreSuite) TestRequestStoreSatisfiedWithinTimeout(c *gc.C) { + config := ServerParams{ + HTTPRequestWaitDuration: 5 * time.Second, + MaxMgoSessions: 1, + } + p, err := NewPool(s.Session.DB("juju_test"), nil, nil, config) + c.Assert(err, gc.IsNil) + defer p.Close() + store, err := p.RequestStore() + c.Assert(err, gc.IsNil) + + // Start a goroutine that will close the Store after a short period. + go func() { + time.Sleep(time.Millisecond) + store.Close() + }() + store1, err := p.RequestStore() + c.Assert(err, gc.IsNil) + c.Assert(store1, gc.Equals, store) + store1.Close() +} + +func (s *StoreSuite) TestRequestStoreLimitCanBeExceeded(c *gc.C) { + config := ServerParams{ + HTTPRequestWaitDuration: 5 * time.Second, + MaxMgoSessions: 1, + } + p, err := NewPool(s.Session.DB("juju_test"), nil, nil, config) + c.Assert(err, gc.IsNil) + defer p.Close() + store, err := p.RequestStore() + c.Assert(err, gc.IsNil) + defer store.Close() + + store1 := store.Copy() + defer store1.Close() + c.Assert(store1.Pool(), gc.Equals, store.Pool()) + + store2 := p.Store() + defer store2.Close() + c.Assert(store2.Pool(), gc.Equals, store.Pool()) +} + +func (s *StoreSuite) TestRequestStoreFailsWhenPoolIsClosed(c *gc.C) { + config := ServerParams{ + HTTPRequestWaitDuration: 5 * time.Second, + MaxMgoSessions: 1, + } + p, err := NewPool(s.Session.DB("juju_test"), nil, nil, config) + c.Assert(err, gc.IsNil) + p.Close() + store, err := p.RequestStore() + c.Assert(err, gc.ErrorMatches, "charm store has been closed") + c.Assert(store, gc.IsNil) +} + +func (s *StoreSuite) TestRequestStoreLimitMaintained(c *gc.C) { + config := ServerParams{ + HTTPRequestWaitDuration: time.Millisecond, + MaxMgoSessions: 1, + } + p, err := NewPool(s.Session.DB("juju_test"), nil, nil, config) + c.Assert(err, gc.IsNil) + defer p.Close() + + // Acquire an instance. + store, err := p.RequestStore() + c.Assert(err, gc.IsNil) + defer store.Close() + + // Acquire another instance, exceeding the limit, + // and put it back. + store1 := p.Store() + c.Assert(err, gc.IsNil) + store1.Close() + + // We should still be unable to acquire another + // store for a request because we're still + // at the request limit. + _, err = p.RequestStore() + c.Assert(errgo.Cause(err), gc.Equals, ErrTooManySessions) +} + +func (s *StoreSuite) TestPoolDoubleClose(c *gc.C) { + p, err := NewPool(s.Session.DB("juju_test"), nil, nil, ServerParams{}) + c.Assert(err, gc.IsNil) + p.Close() + p.Close() + + // Close a third time to ensure that the lock has properly + // been released. + p.Close() +} + +func (s *StoreSuite) TestFindEntities(c *gc.C) { + s.testURLFinding(c, func(store *Store, expand *charm.URL, expect []*router.ResolvedURL) { + // Check FindEntities works when just retrieving the id and promulgated id. + gotEntities, err := store.FindEntities(expand, "_id", "promulgated-url") + c.Assert(err, gc.IsNil) + if expand.User == "" { + sort.Sort(entitiesByPromulgatedURL(gotEntities)) + } else { + sort.Sort(entitiesByURL(gotEntities)) + } + c.Assert(gotEntities, gc.HasLen, len(expect)) + for i, url := range expect { + c.Assert(gotEntities[i], jc.DeepEquals, &mongodoc.Entity{ + URL: &url.URL, + PromulgatedURL: url.PromulgatedURL(), + }) + } + + // check FindEntities works when retrieving all fields. + gotEntities, err = store.FindEntities(expand) + c.Assert(err, gc.IsNil) + if expand.User == "" { + sort.Sort(entitiesByPromulgatedURL(gotEntities)) + } else { + sort.Sort(entitiesByURL(gotEntities)) + } + c.Assert(gotEntities, gc.HasLen, len(expect)) + for i, url := range expect { + var entity mongodoc.Entity + err := store.DB.Entities().FindId(&url.URL).One(&entity) + c.Assert(err, gc.IsNil) + c.Assert(gotEntities[i], jc.DeepEquals, &entity) + } + }) +} + +func (s *StoreSuite) TestFindEntity(c *gc.C) { + s.testURLFinding(c, func(store *Store, expand *charm.URL, expect []*router.ResolvedURL) { + if expand.Series == "" || expand.Revision == -1 || expand.User == "" { + return + } + rurl := &router.ResolvedURL{ + URL: *expand.WithChannel(""), + PromulgatedRevision: -1, + Development: expand.Channel == charm.DevelopmentChannel, + } + entity, err := store.FindEntity(rurl, "_id", "promulgated-url", "development") + if len(expect) == 0 { + c.Assert(err, gc.ErrorMatches, "entity not found") + c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) + return + } + c.Assert(err, gc.IsNil) + c.Assert(len(expect), gc.Equals, 1) + c.Assert(entity.BlobName, gc.Equals, "") + c.Assert(EntityResolvedURL(entity), jc.DeepEquals, expect[0]) + + // Check that it works when returning other fields too. + entity, err = store.FindEntity(rurl, "blobname") + c.Assert(err, gc.IsNil) + c.Assert(entity.BlobName, gc.Not(gc.Equals), "") + }) +} + +var findBaseEntityTests = []struct { + about string + stored []string + url string + fields []string + expect *mongodoc.BaseEntity +}{{ + about: "entity found, base url, all fields", + stored: []string{"42 cs:~charmers/utopic/django-42"}, + url: "django", + expect: &mongodoc.BaseEntity{ + URL: charm.MustParseURL("~charmers/django"), + User: "charmers", + Name: "django", + Public: false, + Promulgated: true, + ACLs: mongodoc.ACL{ + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + DevelopmentACLs: mongodoc.ACL{ + Read: []string{"charmers"}, + Write: []string{"charmers"}, + }, + }, +}, { + about: "entity found, fully qualified url, few fields", + stored: []string{"42 cs:~charmers/utopic/django-42", "~who/precise/django-47"}, + url: "~who/precise/django-0", + fields: []string{"public", "user"}, + expect: &mongodoc.BaseEntity{ + URL: charm.MustParseURL("~who/django"), + User: "who", + Public: false, + }, +}, { + about: "entity found, partial url, only the ACLs", + stored: []string{"42 cs:~charmers/utopic/django-42", "~who/trusty/django-47"}, + url: "~who/django-42", + fields: []string{"acls"}, + expect: &mongodoc.BaseEntity{ + URL: charm.MustParseURL("~who/django"), + ACLs: mongodoc.ACL{ + Read: []string{"who"}, + Write: []string{"who"}, + }, + }, +}, { + about: "entity not found, charm name", + stored: []string{"42 cs:~charmers/utopic/django-42", "~who/trusty/django-47"}, + url: "rails", +}, { + about: "entity not found, user", + stored: []string{"42 cs:~charmers/utopic/django-42", "~who/trusty/django-47"}, + url: "~dalek/django", + fields: []string{"acls"}, +}} + +func (s *StoreSuite) TestFindBaseEntity(c *gc.C) { + ch := storetesting.Charms.CharmDir("wordpress") + store := s.newStore(c, false) + defer store.Close() + for i, test := range findBaseEntityTests { + c.Logf("test %d: %s", i, test.about) + + // Add initial charms to the store. + for _, url := range MustParseResolvedURLs(test.stored) { + err := store.AddCharmWithArchive(url, ch) + c.Assert(err, gc.IsNil) + } + + // Find the entity. + id := charm.MustParseURL(test.url) + baseEntity, err := store.FindBaseEntity(id, test.fields...) + if test.expect == nil { + // We don't expect the entity to be found. + c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) + c.Assert(baseEntity, gc.IsNil) + } else { + c.Assert(err, gc.IsNil) + c.Assert(baseEntity, jc.DeepEquals, test.expect) + } + + // Remove all the entities from the store. + _, err = store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + _, err = store.DB.BaseEntities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + } +} + +func (s *StoreSuite) TestAddCharmWithFailedESInsert(c *gc.C) { + // Make an elastic search with a non-existent address, + // so that will try to add the charm there, but fail. + esdb := &elasticsearch.Database{ + Addr: "0.1.2.3:0123", + } + + store := s.newStore(c, false) + defer store.Close() + store.ES = &SearchIndex{esdb, "no-index"} + + url := newResolvedURL("~charmers/precise/wordpress-12", -1) + err := store.AddCharmWithArchive(url, storetesting.Charms.CharmDir("wordpress")) + c.Assert(err, gc.ErrorMatches, "cannot index cs:~charmers/precise/wordpress-12 to ElasticSearch: .*") + + // Check that the entity has been correctly removed. + _, err = store.FindEntity(url) + c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) +} + +func (s *StoreSuite) TestAddCharmsWithTheSameBaseEntity(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + + // Add a charm to the database. + ch := storetesting.Charms.CharmDir("wordpress") + url := newResolvedURL("~charmers/trusty/wordpress-12", 12) + err := store.AddCharmWithArchive(url, ch) + c.Assert(err, gc.IsNil) + + // Add a second charm to the database, sharing the same base URL. + err = store.AddCharmWithArchive(newResolvedURL("~charmers/utopic/wordpress-13", -1), ch) + c.Assert(err, gc.IsNil) + + // Ensure a single base entity has been created. + num, err := store.DB.BaseEntities().Count() + c.Assert(err, gc.IsNil) + c.Assert(num, gc.Equals, 1) +} + +type entitiesByURL []*mongodoc.Entity + +func (s entitiesByURL) Len() int { return len(s) } +func (s entitiesByURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s entitiesByURL) Less(i, j int) bool { + return s[i].URL.String() < s[j].URL.String() +} + +type entitiesByPromulgatedURL []*mongodoc.Entity + +func (s entitiesByPromulgatedURL) Len() int { return len(s) } +func (s entitiesByPromulgatedURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s entitiesByPromulgatedURL) Less(i, j int) bool { + return s[i].PromulgatedURL.String() < s[j].PromulgatedURL.String() +} + +var bundleUnitCountTests = []struct { + about string + data *charm.BundleData + expectUnits int +}{{ + about: "empty bundle", + data: &charm.BundleData{}, +}, { + about: "no units", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:utopic/django-0", + NumUnits: 0, + }, + "haproxy": { + Charm: "cs:trusty/haproxy-0", + NumUnits: 0, + }, + }, + }, +}, { + about: "a single unit", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:trusty/django-42", + NumUnits: 1, + }, + "haproxy": { + Charm: "cs:trusty/haproxy-47", + NumUnits: 0, + }, + }, + }, + expectUnits: 1, +}, { + about: "multiple units", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:utopic/django-1", + NumUnits: 1, + }, + "haproxy": { + Charm: "cs:utopic/haproxy-2", + NumUnits: 2, + }, + "postgres": { + Charm: "cs:utopic/postgres-3", + NumUnits: 5, + }, + }, + }, + expectUnits: 8, +}} + +func (s *StoreSuite) TestBundleUnitCount(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + entities := store.DB.Entities() + for i, test := range bundleUnitCountTests { + c.Logf("test %d: %s", i, test.about) + url := newResolvedURL("cs:~charmers/bundle/django-0", -1) + url.URL.Revision = i + url.PromulgatedRevision = i + + // Add the bundle used for this test. + err := store.AddBundle(&testingBundle{ + data: test.data, + }, AddParams{ + URL: url, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + + // Retrieve the bundle from the database. + var doc mongodoc.Entity + err = entities.FindId(&url.URL).One(&doc) + c.Assert(err, gc.IsNil) + + c.Assert(*doc.BundleUnitCount, gc.Equals, test.expectUnits) + } +} + +var bundleMachineCountTests = []struct { + about string + data *charm.BundleData + expectMachines int +}{{ + about: "no machines", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:utopic/django-0", + NumUnits: 0, + }, + "haproxy": { + Charm: "cs:trusty/haproxy-0", + NumUnits: 0, + }, + }, + }, +}, { + about: "a single machine (no placement)", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:trusty/django-42", + NumUnits: 1, + }, + "haproxy": { + Charm: "cs:trusty/haproxy-47", + NumUnits: 0, + }, + }, + }, + expectMachines: 1, +}, { + about: "a single machine (machine placement)", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:trusty/django-42", + NumUnits: 1, + To: []string{"1"}, + }, + }, + Machines: map[string]*charm.MachineSpec{ + "1": nil, + }, + }, + expectMachines: 1, +}, { + about: "a single machine (hulk smash)", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:trusty/django-42", + NumUnits: 1, + To: []string{"1"}, + }, + "haproxy": { + Charm: "cs:trusty/haproxy-47", + NumUnits: 1, + To: []string{"1"}, + }, + }, + Machines: map[string]*charm.MachineSpec{ + "1": nil, + }, + }, + expectMachines: 1, +}, { + about: "a single machine (co-location)", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:trusty/django-42", + NumUnits: 1, + }, + "haproxy": { + Charm: "cs:trusty/haproxy-47", + NumUnits: 1, + To: []string{"django/0"}, + }, + }, + }, + expectMachines: 1, +}, { + about: "a single machine (containerization)", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:trusty/django-42", + NumUnits: 1, + To: []string{"1"}, + }, + "haproxy": { + Charm: "cs:trusty/haproxy-47", + NumUnits: 1, + To: []string{"lxc:1"}, + }, + "postgres": { + Charm: "cs:utopic/postgres-3", + NumUnits: 2, + To: []string{"kvm:1"}, + }, + }, + Machines: map[string]*charm.MachineSpec{ + "1": nil, + }, + }, + expectMachines: 1, +}, { + about: "multiple machines (no placement)", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:utopic/django-1", + NumUnits: 1, + }, + "haproxy": { + Charm: "cs:utopic/haproxy-2", + NumUnits: 2, + }, + "postgres": { + Charm: "cs:utopic/postgres-3", + NumUnits: 5, + }, + }, + }, + expectMachines: 1 + 2 + 5, +}, { + about: "multiple machines (machine placement)", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:utopic/django-1", + NumUnits: 2, + To: []string{"1", "3"}, + }, + "haproxy": { + Charm: "cs:utopic/haproxy-2", + NumUnits: 1, + To: []string{"2"}, + }, + }, + Machines: map[string]*charm.MachineSpec{ + "1": nil, "2": nil, "3": nil, + }, + }, + expectMachines: 2 + 1, +}, { + about: "multiple machines (hulk smash)", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:trusty/django-42", + NumUnits: 1, + To: []string{"1"}, + }, + "haproxy": { + Charm: "cs:trusty/haproxy-47", + NumUnits: 1, + To: []string{"2"}, + }, + "postgres": { + Charm: "cs:utopic/postgres-3", + NumUnits: 2, + To: []string{"1", "2"}, + }, + }, + Machines: map[string]*charm.MachineSpec{ + "1": nil, "2": nil, + }, + }, + expectMachines: 1 + 1 + 0, +}, { + about: "multiple machines (co-location)", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:trusty/django-42", + NumUnits: 2, + }, + "haproxy": { + Charm: "cs:trusty/haproxy-47", + NumUnits: 3, + To: []string{"django/0", "django/1", "new"}, + }, + }, + }, + expectMachines: 2 + 1, +}, { + about: "multiple machines (containerization)", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:trusty/django-42", + NumUnits: 2, + To: []string{"1", "2"}, + }, + "haproxy": { + Charm: "cs:trusty/haproxy-47", + NumUnits: 4, + To: []string{"lxc:1", "lxc:2", "lxc:3", "lxc:3"}, + }, + "postgres": { + Charm: "cs:utopic/postgres-3", + NumUnits: 1, + To: []string{"kvm:2"}, + }, + }, + Machines: map[string]*charm.MachineSpec{ + "1": nil, "2": nil, "3": nil, + }, + }, + expectMachines: 2 + 1 + 0, +}, { + about: "multiple machines (partial placement in a container)", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:trusty/django-42", + NumUnits: 1, + To: []string{"1"}, + }, + "haproxy": { + Charm: "cs:trusty/haproxy-47", + NumUnits: 10, + To: []string{"lxc:1", "lxc:2"}, + }, + }, + Machines: map[string]*charm.MachineSpec{ + "1": nil, "2": nil, + }, + }, + expectMachines: 1 + 1, +}, { + about: "multiple machines (partial placement in a new machine)", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:trusty/django-42", + NumUnits: 1, + To: []string{"1"}, + }, + "haproxy": { + Charm: "cs:trusty/haproxy-47", + NumUnits: 10, + To: []string{"lxc:1", "1", "new"}, + }, + }, + Machines: map[string]*charm.MachineSpec{ + "1": nil, + }, + }, + expectMachines: 1 + 8, +}, { + about: "multiple machines (partial placement with new machines)", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "django": { + Charm: "cs:trusty/django-42", + NumUnits: 3, + }, + "haproxy": { + Charm: "cs:trusty/haproxy-47", + NumUnits: 6, + To: []string{"new", "1", "lxc:1", "new"}, + }, + "postgres": { + Charm: "cs:utopic/postgres-3", + NumUnits: 10, + To: []string{"kvm:2", "lxc:django/1", "new", "new", "kvm:2"}, + }, + }, + Machines: map[string]*charm.MachineSpec{ + "1": nil, "2": nil, + }, + }, + expectMachines: 3 + 5 + 3, +}, { + about: "placement into container on new machine", + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "haproxy": { + Charm: "cs:trusty/haproxy-47", + NumUnits: 6, + To: []string{"lxc:new", "1", "lxc:1", "kvm:new"}, + }, + }, + Machines: map[string]*charm.MachineSpec{ + "1": nil, + }, + }, + expectMachines: 5, +}} + +func (s *StoreSuite) TestBundleMachineCount(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + entities := store.DB.Entities() + for i, test := range bundleMachineCountTests { + c.Logf("test %d: %s", i, test.about) + url := newResolvedURL("cs:~charmers/bundle/django-0", -1) + url.URL.Revision = i + url.PromulgatedRevision = i + err := test.data.Verify(nil, nil) + c.Assert(err, gc.IsNil) + // Add the bundle used for this test. + err = store.AddBundle(&testingBundle{ + data: test.data, + }, AddParams{ + URL: url, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + + // Retrieve the bundle from the database. + var doc mongodoc.Entity + err = entities.FindId(&url.URL).One(&doc) + c.Assert(err, gc.IsNil) + + c.Assert(*doc.BundleMachineCount, gc.Equals, test.expectMachines) + } +} + +func urlStrings(urls []*charm.URL) []string { + urlStrs := make([]string, len(urls)) + for i, url := range urls { + urlStrs[i] = url.String() + } + return urlStrs +} + +// MustParseResolvedURL parses a resolved URL in string form, with +// the optional promulgated revision preceding the entity URL +// separated by a space. +func MustParseResolvedURL(urlStr string) *router.ResolvedURL { + s := strings.Fields(urlStr) + promRev := -1 + switch len(s) { + default: + panic(fmt.Errorf("invalid resolved URL string %q", urlStr)) + case 2: + var err error + promRev, err = strconv.Atoi(s[0]) + if err != nil || promRev < 0 { + panic(fmt.Errorf("invalid resolved URL string %q", urlStr)) + } + case 1: + } + url := charm.MustParseURL(s[len(s)-1]) + return &router.ResolvedURL{ + URL: *url.WithChannel(""), + PromulgatedRevision: promRev, + Development: url.Channel == charm.DevelopmentChannel, + } +} + +func MustParseResolvedURLs(urlStrs []string) []*router.ResolvedURL { + urls := make([]*router.ResolvedURL, len(urlStrs)) + for i, u := range urlStrs { + urls[i] = MustParseResolvedURL(u) + } + return urls +} + +func (s *StoreSuite) TestAddPromulgatedCharmDir(c *gc.C) { + charmDir := storetesting.Charms.CharmDir("wordpress") + s.checkAddCharm(c, charmDir, false, newResolvedURL("~charmers/precise/wordpress-1", 1)) +} + +func (s *StoreSuite) TestAddPromulgatedCharmArchive(c *gc.C) { + charmArchive := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + s.checkAddCharm(c, charmArchive, false, newResolvedURL("~charmers/precise/wordpress-1", 1)) +} + +func (s *StoreSuite) TestAddUserOwnedCharmDir(c *gc.C) { + charmDir := storetesting.Charms.CharmDir("wordpress") + s.checkAddCharm(c, charmDir, false, newResolvedURL("~charmers/precise/wordpress-1", -1)) +} + +func (s *StoreSuite) TestAddUserOwnedCharmArchive(c *gc.C) { + charmArchive := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + s.checkAddCharm(c, charmArchive, false, newResolvedURL("~charmers/precise/wordpress-1", -1)) +} + +func (s *StoreSuite) TestAddDevelopmentCharmArchive(c *gc.C) { + charmArchive := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + url := newResolvedURL("~charmers/development/precise/wordpress-1", 1) + s.checkAddCharm(c, charmArchive, false, url) +} + +func (s *StoreSuite) TestAddBundleDir(c *gc.C) { + bundleDir := storetesting.Charms.BundleDir("wordpress-simple") + s.checkAddBundle(c, bundleDir, false, newResolvedURL("~charmers/bundle/wordpress-simple-2", 3)) +} + +func (s *StoreSuite) TestAddBundleArchive(c *gc.C) { + bundleArchive, err := charm.ReadBundleArchive( + storetesting.Charms.BundleArchivePath(c.MkDir(), "wordpress-simple"), + ) + c.Assert(err, gc.IsNil) + s.checkAddBundle(c, bundleArchive, false, newResolvedURL("~charmers/bundle/wordpress-simple-2", 3)) +} + +func (s *StoreSuite) TestAddUserOwnedBundleDir(c *gc.C) { + bundleDir := storetesting.Charms.BundleDir("wordpress-simple") + s.checkAddBundle(c, bundleDir, false, newResolvedURL("~charmers/bundle/wordpress-simple-1", -1)) +} + +func (s *StoreSuite) TestAddUserOwnedBundleArchive(c *gc.C) { + bundleArchive, err := charm.ReadBundleArchive( + storetesting.Charms.BundleArchivePath(c.MkDir(), "wordpress-simple"), + ) + c.Assert(err, gc.IsNil) + s.checkAddBundle(c, bundleArchive, false, newResolvedURL("~charmers/bundle/wordpress-simple-1", -1)) +} + +func (s *StoreSuite) TestAddDevelopmentBundleArchive(c *gc.C) { + bundleArchive, err := charm.ReadBundleArchive( + storetesting.Charms.BundleArchivePath(c.MkDir(), "wordpress-simple"), + ) + c.Assert(err, gc.IsNil) + url := newResolvedURL("~charmers/development/bundle/wordpress-simple-2", 3) + s.checkAddBundle(c, bundleArchive, false, url) +} + +func (s *StoreSuite) newStore(c *gc.C, withES bool) *Store { + var si *SearchIndex + if withES { + si = &SearchIndex{s.ES, s.TestIndex} + } + p, err := NewPool(s.Session.DB("juju_test"), si, &bakery.NewServiceParams{}, ServerParams{}) + c.Assert(err, gc.IsNil) + store := p.Store() + p.Close() + return store +} + +func (s *StoreSuite) TestAddCharmWithBundleSeries(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + ch := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + err := store.AddCharm(ch, AddParams{ + URL: newResolvedURL("~charmers/bundle/wordpress-2", -1), + }) + c.Assert(err, gc.ErrorMatches, `charm added with invalid id cs:~charmers/bundle/wordpress-2`) +} + +func (s *StoreSuite) TestAddCharmWithMultiSeries(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + ch := storetesting.Charms.CharmArchive(c.MkDir(), "multi-series") + s.checkAddCharm(c, ch, false, newResolvedURL("~charmers/multi-series-1", 1)) + // Make sure it can be accessed with a number of names + e, err := store.FindEntity(newResolvedURL("~charmers/multi-series-1", 1)) + c.Assert(err, gc.IsNil) + c.Assert(e.URL.String(), gc.Equals, "cs:~charmers/multi-series-1") + e, err = store.FindEntity(newResolvedURL("~charmers/trusty/multi-series-1", 1)) + c.Assert(err, gc.IsNil) + c.Assert(e.URL.String(), gc.Equals, "cs:~charmers/multi-series-1") + e, err = store.FindEntity(newResolvedURL("~charmers/wily/multi-series-1", 1)) + c.Assert(err, gc.IsNil) + c.Assert(e.URL.String(), gc.Equals, "cs:~charmers/multi-series-1") + _, err = store.FindEntity(newResolvedURL("~charmers/precise/multi-series-1", 1)) + c.Assert(err, gc.ErrorMatches, "entity not found") + c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) +} + +func (s *StoreSuite) TestAddCharmWithSeriesWhenThereIsAnExistingMultiSeriesVersion(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + ch := storetesting.Charms.CharmArchive(c.MkDir(), "multi-series") + err := store.AddCharm(ch, AddParams{ + URL: newResolvedURL("~charmers/multi-series-1", -1), + }) + c.Assert(err, gc.IsNil) + ch = storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + err = store.AddCharm(ch, AddParams{ + URL: newResolvedURL("~charmers/trusty/multi-series-2", -1), + }) + c.Assert(err, gc.ErrorMatches, `charm name duplicates multi-series charm name cs:~charmers/multi-series-1`) +} + +func (s *StoreSuite) TestAddCharmWithMultiSeriesToES(c *gc.C) { + store := s.newStore(c, true) + defer store.Close() + ch := storetesting.Charms.CharmArchive(c.MkDir(), "multi-series") + s.checkAddCharm(c, ch, true, newResolvedURL("~charmers/juju-gui-1", 1)) +} + +var addInvalidCharmURLTests = []string{ + "cs:precise/wordpress-2", // no user + "cs:~charmers/precise/wordpress", // no revision + "cs:~charmers/bundle/wordpress-2", // invalid series +} + +func (s *StoreSuite) TestAddInvalidCharmURL(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + ch := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + for i, urlStr := range addInvalidCharmURLTests { + c.Logf("test %d: %s", i, urlStr) + err := store.AddCharm(ch, AddParams{ + URL: &router.ResolvedURL{ + URL: *charm.MustParseURL(urlStr), + PromulgatedRevision: -1, + }, + }) + c.Assert(err, gc.ErrorMatches, `charm added with invalid id .*`) + } +} + +var addInvalidBundleURLTests = []string{ + "cs:bundle/wordpress-2", // no user + "cs:~charmers/bundle/wordpress", // no revision + "cs:~charmers/wordpress-2", // no series + "cs:~charmers/precise/wordpress-3", // invalid series +} + +func (s *StoreSuite) TestAddBundleWithCharmSeries(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + b := storetesting.Charms.BundleDir("wordpress-simple") + for i, urlStr := range addInvalidBundleURLTests { + c.Logf("test %d: %s", i, urlStr) + err := store.AddBundle(b, AddParams{ + URL: &router.ResolvedURL{ + URL: *charm.MustParseURL(urlStr), + PromulgatedRevision: -1, + }, + }) + c.Assert(err, gc.ErrorMatches, `bundle added with invalid id .*`) + } +} + +func (s *StoreSuite) TestAddBundleDuplicatingCharm(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + ch := storetesting.Charms.CharmDir("wordpress") + err := store.AddCharmWithArchive(newResolvedURL("~charmers/precise/wordpress-2", -1), ch) + c.Assert(err, gc.IsNil) + + b := storetesting.Charms.BundleDir("wordpress-simple") + err = store.AddBundleWithArchive(newResolvedURL("~charmers/bundle/wordpress-5", -1), b) + c.Assert(err, gc.ErrorMatches, "bundle name duplicates charm name cs:~charmers/precise/wordpress-2") +} + +func (s *StoreSuite) TestAddCharmDuplicatingBundle(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + + b := storetesting.Charms.BundleDir("wordpress-simple") + err := store.AddBundleWithArchive(newResolvedURL("~charmers/bundle/wordpress-2", -1), b) + c.Assert(err, gc.IsNil) + + ch := storetesting.Charms.CharmDir("wordpress") + err = store.AddCharmWithArchive(newResolvedURL("~charmers/precise/wordpress-5", -1), ch) + c.Assert(err, gc.ErrorMatches, "charm name duplicates bundle name cs:~charmers/bundle/wordpress-2") +} + +func (s *StoreSuite) TestOpenBlob(c *gc.C) { + charmArchive := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + store := s.newStore(c, false) + defer store.Close() + url := newResolvedURL("cs:~charmers/precise/wordpress-23", 23) + err := store.AddCharmWithArchive(url, charmArchive) + c.Assert(err, gc.IsNil) + + f, err := os.Open(charmArchive.Path) + c.Assert(err, gc.IsNil) + defer f.Close() + expectHash := hashOfReader(c, f) + + r, size, hash, err := store.OpenBlob(url) + c.Assert(err, gc.IsNil) + defer r.Close() + + c.Assert(hashOfReader(c, r), gc.Equals, expectHash) + c.Assert(hash, gc.Equals, expectHash) + + info, err := f.Stat() + c.Assert(err, gc.IsNil) + c.Assert(size, gc.Equals, info.Size()) +} + +func (s *StoreSuite) TestBlobNameAndHash(c *gc.C) { + charmArchive := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + + store := s.newStore(c, false) + defer store.Close() + url := newResolvedURL("cs:~charmers/precise/wordpress-23", 23) + err := store.AddCharmWithArchive(url, charmArchive) + c.Assert(err, gc.IsNil) + + f, err := os.Open(charmArchive.Path) + c.Assert(err, gc.IsNil) + defer f.Close() + expectHash := hashOfReader(c, f) + + name, hash, err := store.BlobNameAndHash(url) + c.Assert(err, gc.IsNil) + + r, _, err := store.BlobStore.Open(name) + c.Assert(err, gc.IsNil) + defer r.Close() + + c.Assert(hash, gc.Equals, expectHash) + c.Assert(hashOfReader(c, r), gc.Equals, expectHash) +} + +func (s *StoreSuite) TestAddLog(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + urls := []*charm.URL{ + charm.MustParseURL("cs:django"), + charm.MustParseURL("cs:rails"), + } + infoData := json.RawMessage([]byte(`"info data"`)) + errorData := json.RawMessage([]byte(`"error data"`)) + + // Add logs to the store. + beforeAdding := time.Now().Add(-time.Second) + err := store.AddLog(&infoData, mongodoc.InfoLevel, mongodoc.IngestionType, nil) + c.Assert(err, gc.IsNil) + err = store.AddLog(&errorData, mongodoc.ErrorLevel, mongodoc.IngestionType, urls) + c.Assert(err, gc.IsNil) + afterAdding := time.Now().Add(time.Second) + + // Retrieve the logs from the store. + var docs []mongodoc.Log + err = store.DB.Logs().Find(nil).Sort("_id").All(&docs) + c.Assert(err, gc.IsNil) + c.Assert(docs, gc.HasLen, 2) + + // The docs have been correctly added to the Mongo collection. + infoDoc, errorDoc := docs[0], docs[1] + c.Assert(infoDoc.Time, jc.TimeBetween(beforeAdding, afterAdding)) + c.Assert(errorDoc.Time, jc.TimeBetween(beforeAdding, afterAdding)) + infoDoc.Time = time.Time{} + errorDoc.Time = time.Time{} + c.Assert(infoDoc, jc.DeepEquals, mongodoc.Log{ + Data: []byte(infoData), + Level: mongodoc.InfoLevel, + Type: mongodoc.IngestionType, + URLs: nil, + }) + c.Assert(errorDoc, jc.DeepEquals, mongodoc.Log{ + Data: []byte(errorData), + Level: mongodoc.ErrorLevel, + Type: mongodoc.IngestionType, + URLs: urls, + }) +} + +func (s *StoreSuite) TestAddLogDataError(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + data := json.RawMessage([]byte("!")) + + // Try to add the invalid log message to the store. + err := store.AddLog(&data, mongodoc.InfoLevel, mongodoc.IngestionType, nil) + c.Assert(err, gc.ErrorMatches, "cannot marshal log data: json: error calling MarshalJSON .*") +} + +func (s *StoreSuite) TestAddLogBaseURLs(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + + // Add the log to the store with associated URLs. + data := json.RawMessage([]byte(`"info data"`)) + err := store.AddLog(&data, mongodoc.WarningLevel, mongodoc.IngestionType, []*charm.URL{ + charm.MustParseURL("trusty/django-42"), + charm.MustParseURL("~who/utopic/wordpress"), + }) + c.Assert(err, gc.IsNil) + + // Retrieve the log from the store. + var doc mongodoc.Log + err = store.DB.Logs().Find(nil).One(&doc) + c.Assert(err, gc.IsNil) + + // The log includes the base URLs. + c.Assert(doc.URLs, jc.DeepEquals, []*charm.URL{ + charm.MustParseURL("trusty/django-42"), + charm.MustParseURL("django"), + charm.MustParseURL("~who/utopic/wordpress"), + charm.MustParseURL("~who/wordpress"), + }) +} + +func (s *StoreSuite) TestAddLogDuplicateURLs(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + + // Add the log to the store with associated URLs. + data := json.RawMessage([]byte(`"info data"`)) + err := store.AddLog(&data, mongodoc.WarningLevel, mongodoc.IngestionType, []*charm.URL{ + charm.MustParseURL("trusty/django-42"), + charm.MustParseURL("django"), + charm.MustParseURL("trusty/django-42"), + charm.MustParseURL("django"), + }) + c.Assert(err, gc.IsNil) + + // Retrieve the log from the store. + var doc mongodoc.Log + err = store.DB.Logs().Find(nil).One(&doc) + c.Assert(err, gc.IsNil) + + // The log excludes duplicate URLs. + c.Assert(doc.URLs, jc.DeepEquals, []*charm.URL{ + charm.MustParseURL("trusty/django-42"), + charm.MustParseURL("django"), + }) +} + +func (s *StoreSuite) TestCollections(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + colls := store.DB.Collections() + names, err := store.DB.CollectionNames() + c.Assert(err, gc.IsNil) + // Some collections don't have indexes so they are created only when used. + createdOnUse := map[string]bool{ + "migrations": true, + "macaroons": true, + } + // Check that all collections mentioned by Collections are actually created. + for _, coll := range colls { + found := false + for _, name := range names { + if name == coll.Name || createdOnUse[coll.Name] { + found = true + } + } + if !found { + c.Errorf("collection %q not created", coll.Name) + } + + } + // Check that all created collections are mentioned in Collections. + for _, name := range names { + if name == "system.indexes" || name == "managedStoredResources" { + continue + } + found := false + for _, coll := range colls { + if coll.Name == name { + found = true + } + } + if !found { + c.Errorf("extra collection %q found", name) + } + } +} + +func (s *StoreSuite) TestOpenCachedBlobFileWithInvalidEntity(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + + wordpress := storetesting.Charms.CharmDir("wordpress") + url := newResolvedURL("cs:~charmers/precise/wordpress-23", 23) + err := store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + + entity, err := store.FindEntity(url, "charmmeta") + c.Assert(err, gc.IsNil) + r, err := store.OpenCachedBlobFile(entity, "", nil) + c.Assert(err, gc.ErrorMatches, "provided entity does not have required fields") + c.Assert(r, gc.Equals, nil) +} + +func (s *StoreSuite) TestOpenCachedBlobFileWithFoundContent(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + + wordpress := storetesting.Charms.CharmDir("wordpress") + url := newResolvedURL("cs:~charmers/precise/wordpress-23", 23) + err := store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + + // Get our expected content. + data, err := ioutil.ReadFile(filepath.Join(wordpress.Path, "metadata.yaml")) + c.Assert(err, gc.IsNil) + expectContent := string(data) + + entity, err := store.FindEntity(url, "blobname", "contents") + c.Assert(err, gc.IsNil) + + // Check that, when we open the file for the first time, + // we see the expected content. + r, err := store.OpenCachedBlobFile(entity, mongodoc.FileIcon, func(f *zip.File) bool { + return path.Clean(f.Name) == "metadata.yaml" + }) + c.Assert(err, gc.IsNil) + defer r.Close() + data, err = ioutil.ReadAll(r) + c.Assert(err, gc.IsNil) + c.Assert(string(data), gc.Equals, expectContent) + + // When retrieving the entity again, check that the Contents + // map has been set appropriately... + entity, err = store.FindEntity(url, "blobname", "contents") + c.Assert(err, gc.IsNil) + c.Assert(entity.Contents, gc.HasLen, 1) + c.Assert(entity.Contents[mongodoc.FileIcon].IsValid(), gc.Equals, true) + + // ... and that OpenCachedBlobFile still returns a reader with the + // same data, without making use of the isFile callback. + r, err = store.OpenCachedBlobFile(entity, mongodoc.FileIcon, func(f *zip.File) bool { + c.Errorf("isFile called unexpectedly") + return false + }) + c.Assert(err, gc.IsNil) + defer r.Close() + data, err = ioutil.ReadAll(r) + c.Assert(err, gc.IsNil) + c.Assert(string(data), gc.Equals, expectContent) +} + +func (s *StoreSuite) TestAddCharmWithUser(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + + wordpress := storetesting.Charms.CharmDir("wordpress") + url := newResolvedURL("cs:~who/precise/wordpress-23", -1) + err := store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + assertBaseEntity(c, store, baseURL(&url.URL), false) +} + +func (s *StoreSuite) TestOpenCachedBlobFileWithNotFoundContent(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + + wordpress := storetesting.Charms.CharmDir("wordpress") + url := newResolvedURL("cs:~charmers/precise/wordpress-23", 23) + err := store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + + entity, err := store.FindEntity(url, "blobname", "contents") + c.Assert(err, gc.IsNil) + + // Check that, when we open the file for the first time, + // we get a NotFound error. + r, err := store.OpenCachedBlobFile(entity, mongodoc.FileIcon, func(f *zip.File) bool { + return false + }) + c.Assert(err, gc.ErrorMatches, "not found") + c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) + c.Assert(r, gc.Equals, nil) + + // When retrieving the entity again, check that the Contents + // map has been set appropriately... + entity, err = store.FindEntity(url, "blobname", "contents") + c.Assert(err, gc.IsNil) + c.Assert(entity.Contents, gc.DeepEquals, map[mongodoc.FileId]mongodoc.ZipFile{ + mongodoc.FileIcon: {}, + }) + + // ... and that OpenCachedBlobFile still returns a NotFound + // error, without making use of the isFile callback. + r, err = store.OpenCachedBlobFile(entity, mongodoc.FileIcon, func(f *zip.File) bool { + c.Errorf("isFile called unexpectedly") + return false + }) + c.Assert(err, gc.ErrorMatches, "not found") + c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) + c.Assert(r, gc.Equals, nil) +} + +func hashOfReader(c *gc.C, r io.Reader) string { + hash := sha512.New384() + _, err := io.Copy(hash, r) + c.Assert(err, gc.IsNil) + return fmt.Sprintf("%x", hash.Sum(nil)) +} + +func getSizeAndHashes(c interface{}) (int64, string, string) { + var r io.ReadWriter + var err error + switch c := c.(type) { + case archiverTo: + r = new(bytes.Buffer) + err = c.ArchiveTo(r) + case *charm.BundleArchive: + r, err = os.Open(c.Path) + case *charm.CharmArchive: + r, err = os.Open(c.Path) + default: + panic(fmt.Sprintf("unable to get size and hash for type %T", c)) + } + if err != nil { + panic(err) + } + hash := blobstore.NewHash() + hash256 := sha256.New() + size, err := io.Copy(io.MultiWriter(hash, hash256), r) + if err != nil { + panic(err) + } + return size, fmt.Sprintf("%x", hash.Sum(nil)), fmt.Sprintf("%x", hash256.Sum(nil)) +} + +// testingBundle implements charm.Bundle, allowing tests +// to create a bundle with custom data. +type testingBundle struct { + data *charm.BundleData +} + +func (b *testingBundle) Data() *charm.BundleData { + return b.data +} + +func (b *testingBundle) ReadMe() string { + // For the purposes of this implementation, the charm readme is not + // relevant. + return "" +} + +// Define fake blob attributes to be used in tests. +var fakeBlobSize, fakeBlobHash = func() (int64, string) { + b := []byte("fake content") + h := blobstore.NewHash() + h.Write(b) + return int64(len(b)), fmt.Sprintf("%x", h.Sum(nil)) +}() + +func (s *StoreSuite) TestSESPutDoesNotErrorWithNoESConfigured(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + err := store.UpdateSearch(nil) + c.Assert(err, gc.IsNil) +} + +func (s *StoreSuite) TestAddCharmDirIndexed(c *gc.C) { + charmDir := storetesting.Charms.CharmDir("wordpress") + s.checkAddCharm(c, charmDir, true, newResolvedURL("cs:~charmers/precise/wordpress-2", -1)) +} + +func (s *StoreSuite) TestAddCharmArchiveIndexed(c *gc.C) { + charmArchive := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + s.checkAddCharm(c, charmArchive, true, newResolvedURL("cs:~charmers/precise/wordpress-2", -1)) +} + +func (s *StoreSuite) TestAddBundleDirIndexed(c *gc.C) { + bundleDir := storetesting.Charms.BundleDir("wordpress-simple") + s.checkAddBundle(c, bundleDir, true, newResolvedURL("cs:~charmers/bundle/baboom-2", -1)) +} + +func (s *StoreSuite) TestAddBundleArchiveIndexed(c *gc.C) { + bundleArchive, err := charm.ReadBundleArchive( + storetesting.Charms.BundleArchivePath(c.MkDir(), "wordpress-simple"), + ) + c.Assert(err, gc.IsNil) + s.checkAddBundle(c, bundleArchive, true, newResolvedURL("cs:~charmers/bundle/baboom-2", -1)) +} + +func (s *StoreSuite) TestAddCharmDirIndexedAndPromulgated(c *gc.C) { + charmDir := storetesting.Charms.CharmDir("wordpress") + s.checkAddCharm(c, charmDir, true, newResolvedURL("cs:~charmers/precise/wordpress-2", -1)) +} + +func (s *StoreSuite) TestAddCharmArchiveIndexedAndPromulgated(c *gc.C) { + charmArchive := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + s.checkAddCharm(c, charmArchive, true, newResolvedURL("cs:~charmers/precise/wordpress-2", 2)) +} + +func (s *StoreSuite) TestAddBundleDirIndexedAndPromulgated(c *gc.C) { + bundleDir := storetesting.Charms.BundleDir("wordpress-simple") + s.checkAddBundle(c, bundleDir, true, newResolvedURL("cs:~charmers/bundle/baboom-2", 2)) +} + +func (s *StoreSuite) TestAddBundleArchiveIndexedAndPromulgated(c *gc.C) { + bundleArchive, err := charm.ReadBundleArchive( + storetesting.Charms.BundleArchivePath(c.MkDir(), "wordpress-simple"), + ) + c.Assert(err, gc.IsNil) + s.checkAddBundle(c, bundleArchive, true, newResolvedURL("cs:~charmers/bundle/baboom-2", 2)) +} + +var findBestEntityTests = []struct { + url string + expectURL string + expectErr string +}{{ + url: "~charmers/trusty/wordpress-10", + expectURL: "~charmers/trusty/wordpress-10", +}, { + url: "~charmers/trusty/wordpress", + expectURL: "~charmers/trusty/wordpress-12", +}, { + url: "trusty/wordpress-11", + expectURL: "~charmers/trusty/wordpress-11", +}, { + url: "trusty/wordpress", + expectURL: "~mickey/trusty/wordpress-13", +}, { + url: "wordpress", + expectURL: "~mickey/trusty/wordpress-13", +}, { + url: "~mickey/wordpress-12", + expectErr: "entity not found", +}, { + url: "~mickey/precise/wordpress", + expectURL: "~mickey/precise/wordpress-24", +}, { + url: "mysql", + expectErr: "entity not found", +}, { + url: "precise/wordpress", + expectURL: "~mickey/precise/wordpress-24", +}, { + url: "~donald/bundle/wordpress-simple-0", + expectURL: "~donald/bundle/wordpress-simple-0", +}, { + url: "~donald/bundle/wordpress-simple", + expectURL: "~donald/bundle/wordpress-simple-1", +}, { + url: "~donald/wordpress-simple-0", + expectURL: "~donald/bundle/wordpress-simple-0", +}, { + url: "bundle/wordpress-simple-0", + expectURL: "~donald/bundle/wordpress-simple-1", +}, { + url: "bundle/wordpress-simple", + expectURL: "~donald/bundle/wordpress-simple-1", +}, { + url: "wordpress-simple", + expectURL: "~donald/bundle/wordpress-simple-1", +}, { + url: "~pluto/multi-series", + expectURL: "~pluto/wily/multi-series-1", +}} + +func (s *StoreSuite) TestFindBestEntity(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + entities := []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/trusty/wordpress-9"), + PromulgatedURL: charm.MustParseURL("trusty/wordpress-9"), + }, { + URL: charm.MustParseURL("~charmers/trusty/wordpress-10"), + PromulgatedURL: charm.MustParseURL("trusty/wordpress-10"), + }, { + URL: charm.MustParseURL("~charmers/trusty/wordpress-11"), + PromulgatedURL: charm.MustParseURL("trusty/wordpress-11"), + }, { + URL: charm.MustParseURL("~charmers/trusty/wordpress-12"), + PromulgatedURL: charm.MustParseURL("trusty/wordpress-12"), + }, { + URL: charm.MustParseURL("~mickey/precise/wordpress-12"), + }, { + URL: charm.MustParseURL("~mickey/trusty/wordpress-12"), + }, { + URL: charm.MustParseURL("~mickey/trusty/wordpress-13"), + PromulgatedURL: charm.MustParseURL("trusty/wordpress-13"), + }, { + URL: charm.MustParseURL("~mickey/precise/wordpress-24"), + PromulgatedURL: charm.MustParseURL("precise/wordpress-24"), + }, { + URL: charm.MustParseURL("~donald/bundle/wordpress-simple-0"), + }, { + URL: charm.MustParseURL("~donald/bundle/wordpress-simple-1"), + PromulgatedURL: charm.MustParseURL("bundle/wordpress-simple-0"), + }, { + URL: charm.MustParseURL("~pluto/utopic/multi-series-2"), + }, { + URL: charm.MustParseURL("~pluto/wily/multi-series-1"), + }} + for _, e := range entities { + err := store.DB.Entities().Insert(denormalizedEntity(e)) + c.Assert(err, gc.IsNil) + } + + for i, test := range findBestEntityTests { + c.Logf("test %d: %s", i, test.url) + entity, err := store.FindBestEntity(charm.MustParseURL(test.url)) + if test.expectErr != "" { + c.Assert(err, gc.ErrorMatches, test.expectErr) + } else { + c.Assert(err, gc.IsNil) + c.Assert(entity.URL.String(), gc.Equals, charm.MustParseURL(test.expectURL).String()) + } + } +} + +var matchingInterfacesQueryTests = []struct { + required []string + provided []string + expect []string +}{{ + provided: []string{"a"}, + expect: []string{ + "cs:~charmers/trusty/wordpress-1", + "cs:~charmers/trusty/wordpress-2", + }, +}, { + provided: []string{"a", "b", "d"}, + required: []string{"b", "c", "e"}, + expect: []string{ + "cs:~charmers/trusty/mysql-1", + "cs:~charmers/trusty/wordpress-1", + "cs:~charmers/trusty/wordpress-2", + }, +}, { + required: []string{"x"}, + expect: []string{ + "cs:~charmers/trusty/mysql-1", + "cs:~charmers/trusty/wordpress-2", + }, +}, { + expect: []string{}, +}} + +func (s *StoreSuite) TestMatchingInterfacesQuery(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + entities := []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/trusty/wordpress-1"), + PromulgatedURL: charm.MustParseURL("trusty/wordpress-1"), + CharmProvidedInterfaces: []string{"a", "b"}, + CharmRequiredInterfaces: []string{"b", "c"}, + }, { + URL: charm.MustParseURL("~charmers/trusty/wordpress-2"), + PromulgatedURL: charm.MustParseURL("trusty/wordpress-2"), + CharmProvidedInterfaces: []string{"a", "b"}, + CharmRequiredInterfaces: []string{"b", "c", "x"}, + }, { + // Note: development charm should never be found. + URL: charm.MustParseURL("~charmers/trusty/wordpress-3"), + PromulgatedURL: charm.MustParseURL("trusty/wordpress-3"), + Development: true, + CharmProvidedInterfaces: []string{"a", "b"}, + CharmRequiredInterfaces: []string{"b", "c", "x"}, + }, { + URL: charm.MustParseURL("~charmers/trusty/mysql-1"), + PromulgatedURL: charm.MustParseURL("trusty/mysql-1"), + CharmProvidedInterfaces: []string{"d", "b"}, + CharmRequiredInterfaces: []string{"e", "x"}, + }} + for _, e := range entities { + err := store.DB.Entities().Insert(denormalizedEntity(e)) + c.Assert(err, gc.IsNil) + } + for i, test := range matchingInterfacesQueryTests { + c.Logf("test %d: req %v; prov %v", i, test.required, test.provided) + var entities []*mongodoc.Entity + err := store.MatchingInterfacesQuery(test.required, test.provided).All(&entities) + c.Assert(err, gc.IsNil) + var got []string + for _, e := range entities { + got = append(got, e.URL.String()) + } + sort.Strings(got) + c.Assert(got, jc.DeepEquals, test.expect) + } +} + +var findBestEntityWithMultiSeriesCharmsTests = []struct { + about string + entities []*mongodoc.Entity + url string + expectURL string +}{{ + about: "URL with series and revision can select multi-series charm", + entities: []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/wordpress-10"), + SupportedSeries: []string{"precise", "trusty"}, + }}, + url: "~charmers/trusty/wordpress-10", + expectURL: "~charmers/wordpress-10", +}, { + about: "URL with series and revision gives not found if series not supported", + entities: []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/wordpress-10"), + SupportedSeries: []string{"trusty"}, + }, { + URL: charm.MustParseURL("~bob/wordpress-12"), + SupportedSeries: []string{"quantal"}, + }}, + url: "~charmers/utopic/wordpress-10", +}, { + about: "URL with series and no revision prefers latest revision that supports that series", + entities: []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/wordpress-10"), + SupportedSeries: []string{"precise", "trusty"}, + }, { + URL: charm.MustParseURL("~charmers/wordpress-11"), + SupportedSeries: []string{"quantal"}, + }, { + URL: charm.MustParseURL("~charmers/wordpress-12"), + SupportedSeries: []string{"precise"}, + }, { + URL: charm.MustParseURL("~charmers/wordpress-13"), + SupportedSeries: []string{"trusty"}, + }, { + URL: charm.MustParseURL("~bob/wordpress-14"), + SupportedSeries: []string{"precise"}, + }}, + url: "~charmers/precise/wordpress", + expectURL: "~charmers/wordpress-12", +}, { + about: "URL with no series and revision resolves to the given exact entity", + entities: []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/wordpress-10"), + SupportedSeries: []string{"precise", "trusty"}, + }}, + url: "~charmers/wordpress-10", + expectURL: "~charmers/wordpress-10", +}, { + about: "URL with no series and revision will not find non-multi-series charm", + entities: []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/precise/wordpress-10"), + }}, + url: "~charmers/wordpress-10", +}, { + about: "URL with no series and revision can find bundle", + entities: []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/bundle/trundle-10"), + }}, + url: "~charmers/trundle-10", + expectURL: "~charmers/bundle/trundle-10", +}, { + about: "URL with no series and no revision finds latest multi-series charm", + entities: []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/wordpress-11"), + SupportedSeries: []string{"precise", "trusty"}, + }, { + URL: charm.MustParseURL("~charmers/wordpress-10"), + SupportedSeries: []string{"precise"}, + }, { + URL: charm.MustParseURL("~charmers/wordpress-12"), + SupportedSeries: []string{"precise"}, + }}, + url: "~charmers/wordpress", + expectURL: "~charmers/wordpress-12", +}, { + about: "promulgated URL with series, name and revision can select multi-series charm", + entities: []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/wordpress-10"), + PromulgatedURL: charm.MustParseURL("wordpress-2"), + SupportedSeries: []string{"precise", "trusty"}, + }}, + url: "precise/wordpress-2", + expectURL: "~charmers/wordpress-10", +}, { + about: "promulgated URL with series and no revision prefers latest promulgated revision that supports that series", + entities: []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/wordpress-10"), + PromulgatedURL: charm.MustParseURL("wordpress-1"), + SupportedSeries: []string{"precise", "trusty"}, + }, { + URL: charm.MustParseURL("~charmers/wordpress-11"), + PromulgatedURL: charm.MustParseURL("wordpress-2"), + SupportedSeries: []string{"quantal"}, + }, { + URL: charm.MustParseURL("~newcharmers/wordpress-1"), + PromulgatedURL: charm.MustParseURL("wordpress-3"), + SupportedSeries: []string{"precise"}, + }, { + URL: charm.MustParseURL("~newcharmers/wordpress-13"), + PromulgatedURL: charm.MustParseURL("wordpress-4"), + SupportedSeries: []string{"trusty"}, + }, { + URL: charm.MustParseURL("~bob/wordpress-14"), + SupportedSeries: []string{"precise"}, + }}, + url: "precise/wordpress", + expectURL: "~newcharmers/wordpress-1", +}, { + about: "promulgated URL with no series and revision resolves to the given exact entity", + entities: []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/wordpress-10"), + PromulgatedURL: charm.MustParseURL("wordpress-3"), + SupportedSeries: []string{"precise", "trusty"}, + }}, + url: "wordpress-3", + expectURL: "~charmers/wordpress-10", +}, { + about: "promulgated URL with no series and revision will not find non-multi-series charm", + entities: []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/precise/wordpress-10"), + PromulgatedURL: charm.MustParseURL("precise/wordpress-3"), + }}, + url: "wordpress-3", +}, { + about: "promulgated URL with no series and revision can find bundle", + entities: []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/bundle/trundle-10"), + PromulgatedURL: charm.MustParseURL("bundle/trundle-10"), + }}, + url: "trundle-10", + expectURL: "~charmers/bundle/trundle-10", +}, { + about: "promulgated URL with no series and no revision finds latest multi-series charm", + entities: []*mongodoc.Entity{{ + URL: charm.MustParseURL("~charmers/wordpress-10"), + PromulgatedURL: charm.MustParseURL("wordpress-1"), + SupportedSeries: []string{"precise", "trusty"}, + }, { + URL: charm.MustParseURL("~charmers/wordpress-11"), + PromulgatedURL: charm.MustParseURL("wordpress-2"), + SupportedSeries: []string{"quantal"}, + }, { + URL: charm.MustParseURL("~newcharmers/wordpress-1"), + PromulgatedURL: charm.MustParseURL("wordpress-3"), + SupportedSeries: []string{"precise"}, + }, { + URL: charm.MustParseURL("~newcharmers/wordpress-13"), + PromulgatedURL: charm.MustParseURL("wordpress-4"), + SupportedSeries: []string{"trusty"}, + }, { + URL: charm.MustParseURL("~bob/wordpress-14"), + SupportedSeries: []string{"precise"}, + }}, + url: "wordpress", + expectURL: "~newcharmers/wordpress-13", +}} + +func (s *StoreSuite) TestFindBestEntityWithMultiSeriesCharms(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + + for i, test := range findBestEntityWithMultiSeriesCharmsTests { + c.Logf("test %d: %s", i, test.about) + _, err := store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + for _, e := range test.entities { + err := store.DB.Entities().Insert(denormalizedEntity(e)) + c.Assert(err, gc.IsNil) + } + entity, err := store.FindBestEntity(charm.MustParseURL(test.url)) + if test.expectURL == "" { + c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) + } else { + c.Assert(err, gc.IsNil) + c.Assert(entity.URL.String(), gc.Equals, charm.MustParseURL(test.expectURL).String()) + } + } +} + +var updateEntityTests = []struct { + url string + expectErr string +}{{ + url: "~charmers/trusty/wordpress-10", +}, { + url: "~charmers/precise/wordpress-10", + expectErr: `cannot update "cs:precise/wordpress-10": not found`, +}} + +func (s *StoreSuite) TestUpdateEntity(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + for i, test := range updateEntityTests { + c.Logf("test %d. %s", i, test.url) + url := newResolvedURL(test.url, 10) + _, err := store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + err = store.DB.Entities().Insert(denormalizedEntity(&mongodoc.Entity{ + URL: charm.MustParseURL("~charmers/trusty/wordpress-10"), + PromulgatedURL: charm.MustParseURL("trusty/wordpress-4"), + })) + c.Assert(err, gc.IsNil) + err = store.UpdateEntity(url, bson.D{{"$set", bson.D{{"extrainfo.test", []byte("PASS")}}}}) + if test.expectErr != "" { + c.Assert(err, gc.ErrorMatches, test.expectErr) + } else { + c.Assert(err, gc.IsNil) + entity, err := store.FindEntity(url) + c.Assert(err, gc.IsNil) + c.Assert(string(entity.ExtraInfo["test"]), gc.Equals, "PASS") + } + } +} + +var updateBaseEntityTests = []struct { + url string + expectErr string +}{{ + url: "~charmers/trusty/wordpress-10", +}, { + url: "~charmers/precise/mysql-10", + expectErr: `cannot update base entity for "cs:precise/mysql-10": not found`, +}} + +func (s *StoreSuite) TestUpdateBaseEntity(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + for i, test := range updateBaseEntityTests { + c.Logf("test %d. %s", i, test.url) + url := newResolvedURL(test.url, 10) + _, err := store.DB.BaseEntities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + err = store.DB.BaseEntities().Insert(&mongodoc.BaseEntity{ + URL: charm.MustParseURL("~charmers/wordpress"), + User: "charmers", + Name: "wordpress", + Promulgated: true, + }) + c.Assert(err, gc.IsNil) + err = store.UpdateBaseEntity(url, bson.D{{"$set", bson.D{{"acls", mongodoc.ACL{ + Read: []string{"test"}, + }}}}}) + if test.expectErr != "" { + c.Assert(err, gc.ErrorMatches, test.expectErr) + } else { + c.Assert(err, gc.IsNil) + baseEntity, err := store.FindBaseEntity(&url.URL) + c.Assert(err, gc.IsNil) + c.Assert(baseEntity.ACLs.Read, jc.DeepEquals, []string{"test"}) + } + } +} + +var promulgateTests = []struct { + about string + entities []*mongodoc.Entity + baseEntities []*mongodoc.BaseEntity + url string + promulgate bool + expectErr string + expectEntities []*mongodoc.Entity + expectBaseEntities []*mongodoc.BaseEntity +}{{ + about: "single charm not already promulgated", + entities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", ""), + }, + baseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", false), + }, + url: "~charmers/trusty/wordpress-0", + promulgate: true, + expectEntities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", true), + }, +}, { + about: "multiple series not already promulgated", + entities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", ""), + entity("~charmers/precise/wordpress-0", ""), + }, + baseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", false), + }, + url: "~charmers/trusty/wordpress-0", + promulgate: true, + expectEntities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), + entity("~charmers/precise/wordpress-0", "precise/wordpress-0"), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", true), + }, +}, { + about: "charm promulgated as different user", + entities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), + entity("~test-charmers/trusty/wordpress-0", ""), + }, + baseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", true), + baseEntity("~test-charmers/wordpress", false), + }, + url: "~test-charmers/trusty/wordpress-0", + promulgate: true, + expectEntities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), + entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-1"), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", false), + baseEntity("~test-charmers/wordpress", true), + }, +}, { + about: "single charm already promulgated", + entities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), + }, + baseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", true), + }, + url: "~charmers/trusty/wordpress-0", + promulgate: true, + expectEntities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", true), + }, +}, { + about: "unrelated charms are unaffected", + entities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", ""), + entity("~test-charmers/trusty/mysql-0", "trusty/mysql-0"), + }, + baseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", false), + baseEntity("~test-charmers/mysql", true), + }, + url: "~charmers/trusty/wordpress-0", + promulgate: true, + expectEntities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), + entity("~test-charmers/trusty/mysql-0", "trusty/mysql-0"), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", true), + baseEntity("~test-charmers/mysql", true), + }, +}, { + about: "only one owner promulgated", + entities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", ""), + entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-0"), + entity("~test2-charmers/trusty/wordpress-0", "trusty/wordpress-1"), + }, + baseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", false), + baseEntity("~test-charmers/wordpress", false), + baseEntity("~test2-charmers/wordpress", true), + }, + url: "~charmers/trusty/wordpress-0", + promulgate: true, + expectEntities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", "trusty/wordpress-2"), + entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-0"), + entity("~test2-charmers/trusty/wordpress-0", "trusty/wordpress-1"), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", true), + baseEntity("~test-charmers/wordpress", false), + baseEntity("~test2-charmers/wordpress", false), + }, +}, { + about: "recovers from two promulgated base entities", + entities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", ""), + entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-0"), + entity("~test-charmers/trusty/wordpress-1", "trusty/wordpress-2"), + entity("~test2-charmers/trusty/wordpress-0", "trusty/wordpress-1"), + }, + baseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", false), + baseEntity("~test-charmers/wordpress", true), + baseEntity("~test2-charmers/wordpress", true), + }, + url: "~test2-charmers/trusty/wordpress-0", + promulgate: true, + expectEntities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", ""), + entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-0"), + entity("~test-charmers/trusty/wordpress-1", "trusty/wordpress-2"), + entity("~test2-charmers/trusty/wordpress-0", "trusty/wordpress-1"), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", false), + baseEntity("~test-charmers/wordpress", false), + baseEntity("~test2-charmers/wordpress", true), + }, +}, { + about: "multiple series already promulgated", + entities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", "trusty/wordpress-2"), + entity("~charmers/precise/wordpress-0", "precise/wordpress-1"), + entity("~test-charmers/trusty/wordpress-0", ""), + entity("~test-charmers/utopic/wordpress-0", ""), + }, + baseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", true), + baseEntity("~test-charmers/wordpress", false), + }, + url: "~test-charmers/trusty/wordpress-0", + promulgate: true, + expectEntities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", "trusty/wordpress-2"), + entity("~charmers/precise/wordpress-0", "precise/wordpress-1"), + entity("~test-charmers/trusty/wordpress-0", "trusty/wordpress-3"), + entity("~test-charmers/utopic/wordpress-0", "utopic/wordpress-0"), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", false), + baseEntity("~test-charmers/wordpress", true), + }, +}, { + about: "unpromulgate single promulgated charm ", + entities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), + }, + baseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", true), + }, + url: "~charmers/trusty/wordpress-0", + promulgate: false, + expectEntities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", "trusty/wordpress-0"), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", false), + }, +}, { + about: "unpromulgate single unpromulgated charm ", + entities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", ""), + }, + baseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", false), + }, + url: "~charmers/trusty/wordpress-0", + promulgate: false, + expectEntities: []*mongodoc.Entity{ + entity("~charmers/trusty/wordpress-0", ""), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + baseEntity("~charmers/wordpress", false), + }, +}} + +func (s *StoreSuite) TestSetPromulgated(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + for i, test := range promulgateTests { + c.Logf("test %d. %s", i, test.about) + url := newResolvedURL(test.url, -1) + _, err := store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + _, err = store.DB.BaseEntities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + for _, entity := range test.entities { + err := store.DB.Entities().Insert(entity) + c.Assert(err, gc.IsNil) + } + for _, baseEntity := range test.baseEntities { + err := store.DB.BaseEntities().Insert(baseEntity) + c.Assert(err, gc.IsNil) + } + err = store.SetPromulgated(url, test.promulgate) + if test.expectErr != "" { + c.Assert(err, gc.ErrorMatches, test.expectErr) + continue + } + c.Assert(err, gc.IsNil) + n, err := store.DB.Entities().Count() + c.Assert(err, gc.IsNil) + c.Assert(n, gc.Equals, len(test.expectEntities)) + n, err = store.DB.BaseEntities().Count() + c.Assert(err, gc.IsNil) + c.Assert(n, gc.Equals, len(test.expectBaseEntities)) + for _, expectEntity := range test.expectEntities { + entity, err := store.FindEntity(EntityResolvedURL(expectEntity)) + c.Assert(err, gc.IsNil) + c.Assert(entity, jc.DeepEquals, expectEntity) + } + for _, expectBaseEntity := range test.expectBaseEntities { + baseEntity, err := store.FindBaseEntity(expectBaseEntity.URL) + c.Assert(err, gc.IsNil) + c.Assert(baseEntity, jc.DeepEquals, expectBaseEntity) + } + } +} + +func (s *StoreSuite) TestSetPromulgatedUpdateSearch(c *gc.C) { + store := s.newStore(c, true) + defer store.Close() + + // Insert some entities in the store, ensure there are a number of revisions of the same charm. + err := store.DB.Entities().Insert(entity("~charmers/trusty/wordpress-0", "trusty/wordpress-2")) + c.Assert(err, gc.IsNil) + err = store.DB.Entities().Insert(entity("~charmers/precise/wordpress-0", "precise/wordpress-1")) + c.Assert(err, gc.IsNil) + err = store.DB.Entities().Insert(entity("~openstack-charmers/trusty/wordpress-0", "")) + c.Assert(err, gc.IsNil) + err = store.DB.Entities().Insert(entity("~openstack-charmers/precise/wordpress-0", "")) + c.Assert(err, gc.IsNil) + err = store.DB.BaseEntities().Insert(baseEntity("~charmers/wordpress", true)) + c.Assert(err, gc.IsNil) + err = store.DB.BaseEntities().Insert(baseEntity("~openstack-charmers/wordpress", false)) + c.Assert(err, gc.IsNil) + url := newResolvedURL("~openstack-charmers/trusty/wordpress-0", -1) + + // Change the promulgated mysql version to openstack-charmers. + err = store.SetPromulgated(url, true) + c.Assert(err, gc.IsNil) + err = store.ES.RefreshIndex(s.TestIndex) + c.Assert(err, gc.IsNil) + // Check that the search records contain the correct information. + var zdoc SearchDoc + doc := zdoc + err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseURL("~charmers/trusty/wordpress-0")), &doc) + c.Assert(err, gc.IsNil) + c.Assert(doc.PromulgatedURL, gc.IsNil) + c.Assert(doc.PromulgatedRevision, gc.Equals, -1) + doc = zdoc + err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseURL("~charmers/precise/wordpress-0")), &doc) + c.Assert(err, gc.IsNil) + c.Assert(doc.PromulgatedURL, gc.IsNil) + c.Assert(doc.PromulgatedRevision, gc.Equals, -1) + doc = zdoc + err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseURL("~openstack-charmers/trusty/wordpress-0")), &doc) + c.Assert(err, gc.IsNil) + c.Assert(doc.PromulgatedURL.String(), gc.Equals, "cs:trusty/wordpress-3") + c.Assert(doc.PromulgatedRevision, gc.Equals, 3) + doc = zdoc + err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseURL("~openstack-charmers/precise/wordpress-0")), &doc) + c.Assert(err, gc.IsNil) + c.Assert(doc.PromulgatedURL.String(), gc.Equals, "cs:precise/wordpress-2") + c.Assert(doc.PromulgatedRevision, gc.Equals, 2) + + // Remove the promulgated flag from openstack-charmers, meaning mysql is + // no longer promulgated. + err = store.SetPromulgated(url, false) + c.Assert(err, gc.IsNil) + err = store.ES.RefreshIndex(s.TestIndex) + c.Assert(err, gc.IsNil) + // Check that the search records contain the correct information. + doc = zdoc + err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseURL("~charmers/trusty/wordpress-0")), &doc) + c.Assert(err, gc.IsNil) + c.Assert(doc.PromulgatedURL, gc.IsNil) + c.Assert(doc.PromulgatedRevision, gc.Equals, -1) + doc = zdoc + err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseURL("~charmers/precise/wordpress-0")), &doc) + c.Assert(err, gc.IsNil) + c.Assert(doc.PromulgatedURL, gc.IsNil) + c.Assert(doc.PromulgatedRevision, gc.Equals, -1) + doc = zdoc + err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseURL("~openstack-charmers/trusty/wordpress-0")), &doc) + c.Assert(err, gc.IsNil) + c.Assert(doc.PromulgatedURL, gc.IsNil) + c.Assert(doc.PromulgatedRevision, gc.Equals, -1) + doc = zdoc + err = store.ES.GetDocument(s.TestIndex, typeName, store.ES.getID(charm.MustParseURL("~openstack-charmers/precise/wordpress-0")), &doc) + c.Assert(err, gc.IsNil) + c.Assert(doc.PromulgatedURL, gc.IsNil) + c.Assert(doc.PromulgatedRevision, gc.Equals, -1) +} + +var setDevelopmentTests = []struct { + about string + existingDevelopment bool + development bool +}{{ + about: "keep entity under development", + existingDevelopment: true, + development: true, +}, { + about: "publish an entity", + existingDevelopment: true, +}, { + about: "unpublish an entity", + development: true, +}, { + about: "keep entity published", +}} + +func (s *StoreSuite) TestSetDevelopment(c *gc.C) { + store := s.newStore(c, true) + defer store.Close() + + for i, test := range setDevelopmentTests { + c.Logf("test %d: %s", i, test.about) + + // Insert the existing entity. + url := charm.MustParseURL("~who/wily/django") + url.Revision = i + if test.existingDevelopment { + url.Channel = charm.DevelopmentChannel + } + rurl := newResolvedURL(url.Path(), -1) + err := store.AddCharmWithArchive(rurl, storetesting.Charms.CharmDir("wordpress")) + c.Assert(err, gc.IsNil) + + // Set whether the entity is under development or published. + err = store.SetDevelopment(rurl, test.development) + c.Assert(err, gc.IsNil) + + // Ensure the entity development flag has been correctly set. + rurl.Development = test.development + e, err := store.FindEntity(rurl, "development") + c.Assert(err, gc.IsNil) + c.Assert(e.Development, gc.Equals, test.development) + + // Check that the entity can be found in the database if published. + if !test.development { + found, err := store.ES.HasDocument(s.TestIndex, typeName, store.ES.getID(&rurl.URL)) + c.Assert(err, gc.IsNil) + c.Assert(found, jc.IsTrue) + } + } +} + +func (s *StoreSuite) TestSetDevelopmentErrorNotFound(c *gc.C) { + store := s.newStore(c, false) + defer store.Close() + + err := store.SetDevelopment(newResolvedURL("~who/wily/no-such-42", -1), true) + c.Assert(err, gc.ErrorMatches, `cannot update "cs:~who/wily/no-such-42": not found`) + c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) +} + +var entityResolvedURLTests = []struct { + about string + entity *mongodoc.Entity + rurl *router.ResolvedURL +}{{ + about: "user owned, published", + entity: &mongodoc.Entity{ + URL: charm.MustParseURL("~charmers/precise/wordpress-23"), + }, + rurl: &router.ResolvedURL{ + URL: *charm.MustParseURL("~charmers/precise/wordpress-23"), + PromulgatedRevision: -1, + }, +}, { + about: "promulgated, published", + entity: &mongodoc.Entity{ + URL: charm.MustParseURL("~charmers/precise/wordpress-23"), + PromulgatedURL: charm.MustParseURL("precise/wordpress-4"), + }, + rurl: &router.ResolvedURL{ + URL: *charm.MustParseURL("~charmers/precise/wordpress-23"), + PromulgatedRevision: 4, + }, +}, { + about: "user owned, under development", + entity: &mongodoc.Entity{ + URL: charm.MustParseURL("~charmers/trusty/wordpress-42"), + Development: true, + }, + rurl: &router.ResolvedURL{ + URL: *charm.MustParseURL("~charmers/trusty/wordpress-42"), + PromulgatedRevision: -1, + Development: true, + }, +}, { + about: "promulgated, under development", + entity: &mongodoc.Entity{ + URL: charm.MustParseURL("~charmers/wily/wordpress-42"), + PromulgatedURL: charm.MustParseURL("wily/wordpress-0"), + Development: true, + }, + rurl: &router.ResolvedURL{ + URL: *charm.MustParseURL("~charmers/wily/wordpress-42"), + PromulgatedRevision: 0, + Development: true, + }, +}} + +func (s *StoreSuite) TestEntityResolvedURL(c *gc.C) { + for i, test := range entityResolvedURLTests { + c.Logf("test %d: %s", i, test.about) + c.Assert(EntityResolvedURL(test.entity), gc.DeepEquals, test.rurl) + } +} + +func (s *StoreSuite) TestCopyCopiesSessions(c *gc.C) { + store := s.newStore(c, false) + + wordpress := storetesting.Charms.CharmDir("wordpress") + url := MustParseResolvedURL("23 cs:~charmers/precise/wordpress-23") + err := store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + + store1 := store.Copy() + defer store1.Close() + + // Close the store we copied from. The copy should be unaffected. + store.Close() + + entity, err := store1.FindEntity(url) + c.Assert(err, gc.IsNil) + + // Also check the blob store, as it has its own session reference. + r, _, err := store1.BlobStore.Open(entity.BlobName) + c.Assert(err, gc.IsNil) + r.Close() + + // Also check the macaroon storage as that also has its own session reference. + m, err := store1.Bakery.NewMacaroon("", nil, nil) + c.Assert(err, gc.IsNil) + c.Assert(m, gc.NotNil) +} + +func (s *StoreSuite) TestAddAudit(c *gc.C) { + filename := filepath.Join(c.MkDir(), "audit.log") + config := ServerParams{ + AuditLogger: &lumberjack.Logger{ + Filename: filename, + }, + } + + p, err := NewPool(s.Session.DB("juju_test"), nil, nil, config) + c.Assert(err, gc.IsNil) + defer p.Close() + + store := p.Store() + defer store.Close() + + entries := []audit.Entry{{ + User: "George Clooney", + Op: audit.OpSetPerm, + Entity: charm.MustParseURL("cs:mycharm"), + ACL: &audit.ACL{ + Read: []string{"eleven", "ocean"}, + Write: []string{"brad", "pitt"}, + }, + }, { + User: "Julia Roberts", + Op: audit.OpSetPerm, + }} + + now := time.Now() + for _, e := range entries { + store.addAuditAtTime(e, now) + } + data, err := ioutil.ReadFile(filename) + c.Assert(err, gc.IsNil) + + lines := strings.Split(strings.TrimSuffix(string(data), "\n"), "\n") + c.Assert(lines, gc.HasLen, len(entries)) + for i, e := range entries { + e.Time = now + c.Assert(lines[i], jc.JSONEquals, e) + } +} + +func (s *StoreSuite) TestAddAuditWithNoLumberjack(c *gc.C) { + p, err := NewPool(s.Session.DB("juju_test"), nil, nil, ServerParams{}) + c.Assert(err, gc.IsNil) + defer p.Close() + + store := p.Store() + defer store.Close() + + // Check that it does not panic. + store.AddAudit(audit.Entry{ + User: "George Clooney", + Op: audit.OpSetPerm, + Entity: charm.MustParseURL("cs:mycharm"), + ACL: &audit.ACL{ + Read: []string{"eleven", "ocean"}, + Write: []string{"brad", "pitt"}, + }, + }) +} + +func (s *StoreSuite) TestDenormalizeEntity(c *gc.C) { + e := &mongodoc.Entity{ + URL: charm.MustParseURL("~someone/utopic/acharm-45"), + } + denormalizeEntity(e) + c.Assert(e, jc.DeepEquals, &mongodoc.Entity{ + URL: charm.MustParseURL("~someone/utopic/acharm-45"), + BaseURL: charm.MustParseURL("~someone/acharm"), + User: "someone", + Name: "acharm", + Revision: 45, + Series: "utopic", + PromulgatedRevision: -1, + SupportedSeries: []string{"utopic"}, + }) +} + +func (s *StoreSuite) TestDenormalizePromulgatedEntity(c *gc.C) { + e := &mongodoc.Entity{ + URL: charm.MustParseURL("~someone/utopic/acharm-45"), + PromulgatedURL: charm.MustParseURL("utopic/acharm-5"), + } + denormalizeEntity(e) + c.Assert(e, jc.DeepEquals, &mongodoc.Entity{ + URL: charm.MustParseURL("~someone/utopic/acharm-45"), + BaseURL: charm.MustParseURL("~someone/acharm"), + User: "someone", + Name: "acharm", + Revision: 45, + Series: "utopic", + PromulgatedURL: charm.MustParseURL("utopic/acharm-5"), + PromulgatedRevision: 5, + SupportedSeries: []string{"utopic"}, + }) +} + +func (s *StoreSuite) TestDenormalizeBundleEntity(c *gc.C) { + e := &mongodoc.Entity{ + URL: charm.MustParseURL("~someone/bundle/acharm-45"), + } + denormalizeEntity(e) + c.Assert(e, jc.DeepEquals, &mongodoc.Entity{ + URL: charm.MustParseURL("~someone/bundle/acharm-45"), + BaseURL: charm.MustParseURL("~someone/acharm"), + User: "someone", + Name: "acharm", + Revision: 45, + Series: "bundle", + PromulgatedRevision: -1, + }) +} + +func entity(url, purl string) *mongodoc.Entity { + id := charm.MustParseURL(url) + var pid *charm.URL + if purl != "" { + pid = charm.MustParseURL(purl) + } + e := &mongodoc.Entity{ + URL: id, + PromulgatedURL: pid, + } + denormalizeEntity(e) + return e +} + +func baseEntity(url string, promulgated bool) *mongodoc.BaseEntity { + id := charm.MustParseURL(url) + return &mongodoc.BaseEntity{ + URL: id, + Name: id.Name, + User: id.User, + Promulgated: mongodoc.IntBool(promulgated), + } +} + +// denormalizedEntity is a convenience function that returns +// a copy of e with its denormalized fields filled out. +func denormalizedEntity(e *mongodoc.Entity) *mongodoc.Entity { + e1 := *e + denormalizeEntity(&e1) + return &e1 +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/zip.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/zip.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/zip.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,49 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "archive/zip" + "compress/flate" + "io" + + "gopkg.in/errgo.v1" + + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" +) + +// ZipFileReader returns a reader that will read +// content referred to by f within zipr, which should +// refer to the contents of a zip file, +func ZipFileReader(zipr io.ReadSeeker, f mongodoc.ZipFile) (io.Reader, error) { + if _, err := zipr.Seek(f.Offset, 0); err != nil { + return nil, errgo.Notef(err, "cannot seek to %d in zip content", f.Offset) + } + content := io.LimitReader(zipr, f.Size) + if !f.Compressed { + return content, nil + } + return flate.NewReader(content), nil +} + +// NewZipFile returns a new mongodoc zip file +// reference to the given zip file. +func NewZipFile(f *zip.File) (mongodoc.ZipFile, error) { + offset, err := f.DataOffset() + if err != nil { + return mongodoc.ZipFile{}, errgo.Notef(err, "cannot determine data offset for %q", f.Name) + } + zf := mongodoc.ZipFile{ + Offset: offset, + Size: int64(f.CompressedSize64), + } + switch f.Method { + case zip.Store: + case zip.Deflate: + zf.Compressed = true + default: + return mongodoc.ZipFile{}, errgo.Newf("unknown zip compression method for %q", f.Name) + } + return zf, nil +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/zip_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/zip_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/charmstore/zip_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,123 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + +import ( + "archive/zip" + "bytes" + "io" + "io/ioutil" + "strings" + + jujutesting "github.com/juju/testing" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" +) + +type zipSuite struct { + jujutesting.IsolationSuite + contents map[string]string +} + +var _ = gc.Suite(&zipSuite{}) + +func (s *zipSuite) SetUpSuite(c *gc.C) { + s.IsolationSuite.SetUpSuite(c) + s.contents = map[string]string{ + "readme.md": "readme contents", + "uncompressed_readme.md": "readme contents", + "icon.svg": "icon contents", + "metadata.yaml": "metadata contents", + "empty": "", + "uncompressed_empty": "", + } +} + +func (s *zipSuite) makeZipReader(c *gc.C, contents map[string]string) (io.ReadSeeker, []*zip.File) { + // Create a customized zip archive in memory. + var buf bytes.Buffer + w := zip.NewWriter(&buf) + for name, content := range contents { + header := &zip.FileHeader{ + Name: name, + Method: zip.Deflate, + } + if strings.HasPrefix(name, "uncompressed_") { + header.Method = zip.Store + } + f, err := w.CreateHeader(header) + c.Assert(err, gc.IsNil) + _, err = f.Write([]byte(content)) + c.Assert(err, gc.IsNil) + } + c.Assert(w.Close(), gc.IsNil) + + // Retrieve the zip files in the archive. + zipReader := bytes.NewReader(buf.Bytes()) + r, err := zip.NewReader(zipReader, int64(buf.Len())) + c.Assert(err, gc.IsNil) + c.Assert(r.File, gc.HasLen, len(contents)) + return zipReader, r.File +} + +func (s *zipSuite) TestZipFileReader(c *gc.C) { + zipReader, files := s.makeZipReader(c, s.contents) + + // Check that a ZipFile created from each file in the archive + // can be read correctly. + for i, f := range files { + c.Logf("test %d: %s", i, f.Name) + zf, err := charmstore.NewZipFile(f) + c.Assert(err, gc.IsNil) + zfr, err := charmstore.ZipFileReader(zipReader, zf) + c.Assert(err, gc.IsNil) + content, err := ioutil.ReadAll(zfr) + c.Assert(err, gc.IsNil) + c.Assert(string(content), gc.Equals, s.contents[f.Name]) + } +} + +func (s *zipSuite) TestZipFileReaderWithErrorOnSeek(c *gc.C) { + er := &seekErrorReader{} + r, err := charmstore.ZipFileReader(er, mongodoc.ZipFile{}) + c.Assert(err, gc.ErrorMatches, "cannot seek to 0 in zip content: foiled!") + c.Assert(r, gc.Equals, nil) +} + +type seekErrorReader struct { + io.Reader +} + +func (r *seekErrorReader) Seek(offset int64, whence int) (int64, error) { + return 0, errgo.New("foiled!") +} + +func (s *zipSuite) TestNewZipFile(c *gc.C) { + _, files := s.makeZipReader(c, s.contents) + + // Check that we can create a new ZipFile from + // each zip file in the archive. + for i, f := range files { + c.Logf("test %d: %s", i, f.Name) + zf, err := charmstore.NewZipFile(f) + c.Assert(err, gc.IsNil) + offset, err := f.DataOffset() + c.Assert(err, gc.IsNil) + + c.Assert(zf.Offset, gc.Equals, offset) + c.Assert(zf.Size, gc.Equals, int64(f.CompressedSize64)) + c.Assert(zf.Compressed, gc.Equals, !strings.HasPrefix(f.Name, "uncompressed_")) + } +} + +func (s *zipSuite) TestNewZipFileWithCompressionMethodError(c *gc.C) { + _, files := s.makeZipReader(c, map[string]string{"foo": "contents"}) + f := files[0] + f.Method = 99 + _, err := charmstore.NewZipFile(f) + c.Assert(err, gc.ErrorMatches, `unknown zip compression method for "foo"`) +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/debug' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/debug/handler.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/debug/handler.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/debug/handler.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// The debug package holds various functions that may +// be used for debugging but should not be included +// in production code. +package debug // import "gopkg.in/juju/charmstore.v5-unstable/internal/debug" + +import ( + "log" + "net/http" +) + +// Handler returns a new handler that wraps h +// and logs the given message with the URL path +// every time the request is invoked. +func Handler(msg string, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + log.Printf("%s got request at URL %q; headers %q", msg, req.URL, req.Header) + h.ServeHTTP(w, req) + }) +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/identity' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/identity/client.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/identity/client.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/identity/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,97 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package identity implements a client for identity. +package identity // import "gopkg.in/juju/charmstore.v5-unstable/internal/identity" + +import ( + "net/http" + + "github.com/juju/loggo" + "gopkg.in/errgo.v1" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +var logger = loggo.GetLogger("charmstore.internal.identity") + +// Params provides the parameters to be passed when creating a new +// client. +type Params struct { + URL string + Client *httpbakery.Client +} + +// Client provides a client that can be used to query an identity server. +type Client struct { + p *Params +} + +// NewClient creates a new Client. +func NewClient(p *Params) *Client { + return &Client{ + p: p, + } +} + +// endpoint adds the endpoint to the identity URL. +func (c *Client) endpoint(ep string) string { + return c.p.URL + ep +} + +// get performs an http get using c.Client. +func (c *Client) get(path string) (*http.Response, error) { + u := c.endpoint(path) + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, errgo.Notef(err, "cannot create request for %q", u) + } + resp, err := c.p.Client.Do(req) + if err != nil { + return nil, errgo.Notef(err, "cannot GET %q", u) + } + return resp, nil +} + +// GetJSON performs a JSON request on the identity server at the +// specified path. The returned value is unmarshalled to v. +func (c *Client) GetJSON(path string, v interface{}) error { + resp, err := c.get(path) + if err != nil { + return errgo.Notef(err, "cannot GET %q", path) + } + return router.UnmarshalJSONResponse(resp, v, getError) +} + +// GroupsForUser gets the list of groups to which the specified user +// belongs. +func (c *Client) GroupsForUser(username string) ([]string, error) { + var groups []string + if err := c.GetJSON("/v1/u/"+username+"/groups", &groups); err != nil { + return nil, errgo.Notef(err, "cannot get groups for %s", username) + } + return groups, nil +} + +// idmError is the error that might be returned by identity. +type idmError struct { + Message string `json:"message,omitempty"` + Code string `json:"code,omitempty"` +} + +func (e idmError) Error() string { + return e.Message +} + +// getError tries to retrieve the error from a failed query. If the +// response does not contain an error the status line is used to create +// an error. +func getError(r *http.Response) error { + var ierr idmError + if err := router.UnmarshalJSONResponse(r, &ierr, nil); err != nil { + logger.Errorf("could not unmarshal error: %s", err) + return errgo.Newf("bad status %q", r.Status) + } + return ierr +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/identity/client_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/identity/client_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/identity/client_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,119 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package identity_test + +import ( + "encoding/json" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + "gopkg.in/juju/charmstore.v5-unstable/internal/identity" +) + +type clientSuite struct { + idM *idM + client *identity.Client +} + +var _ = gc.Suite(&clientSuite{}) + +func (s *clientSuite) SetUpSuite(c *gc.C) { + s.idM = newIdM(c) +} + +func (s *clientSuite) TearDownSuite(c *gc.C) { + s.idM.Close() +} + +func (s *clientSuite) SetUpTest(c *gc.C) { + s.client = identity.NewClient(&identity.Params{ + URL: s.idM.URL, + Client: httpbakery.NewClient(), + }) +} + +var getJSONTests = []struct { + about string + path string + expectBody interface{} + expectError string +}{{ + about: "GET", + path: "/test", + expectBody: map[string]string{ + "method": "GET", + }, +}, { + about: "GET bad URL", + path: "/%fg", + expectError: `cannot GET "/%fg": cannot create request for ".*/%fg": parse .*/%fg: invalid URL escape "%fg"`, +}, { + about: "GET bad request", + path: "5/test", + expectError: `cannot GET "5/test": cannot GET ".*5/test": .*`, +}, { + about: "GET error", + path: `/test?s=500&b=%7B%22message%22%3A%22an+error%22%7D`, + expectError: `an error`, +}, { + about: "GET unparsable content type", + path: `/test?ct=bad+content+type`, + expectError: `cannot parse content type: mime: expected slash after first token`, +}, { + about: "GET unexpected content type", + path: `/test?ct=application/xml`, + expectError: `unexpected content type "application/xml"`, +}, { + about: "GET unmarshal error", + path: `/test?b=tru`, + expectError: `cannot unmarshal response: invalid character ' ' in literal true \(expecting 'e'\)`, +}, { + about: "GET error cannot unmarshal", + path: `/test?b=fals&s=502`, + expectError: `bad status "502 Bad Gateway"`, +}} + +func (s *clientSuite) TestGetJSON(c *gc.C) { + for i, test := range getJSONTests { + c.Logf("%d. %s", i, test.about) + var v json.RawMessage + err := s.client.GetJSON(test.path, &v) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + continue + } + c.Assert(err, gc.IsNil) + c.Assert(string(v), jc.JSONEquals, test.expectBody) + } +} + +var groupsForUserTests = []struct { + user string + expectGroups []string + expectError string +}{{ + user: "user1", + expectGroups: []string{"g1", "g2"}, +}, { + user: "user2", + expectGroups: []string{}, +}, { + user: "user3", + expectError: "cannot get groups for user3: /v1/u/user3/groups not found", +}} + +func (s *clientSuite) TestGroupsForUser(c *gc.C) { + for i, test := range groupsForUserTests { + c.Logf("%d. %s", i, test.user) + groups, err := s.client.GroupsForUser(test.user) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + continue + } + c.Assert(err, gc.IsNil) + c.Assert(groups, jc.DeepEquals, test.expectGroups) + } +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/identity/export_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/identity/export_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/identity/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package identity // import "gopkg.in/juju/charmstore.v5-unstable/internal/identity" + +type ( + IdmError idmError +) === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/identity/idm_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/identity/idm_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/identity/idm_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,107 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package identity_test + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strconv" + + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charmstore.v5-unstable/internal/identity" +) + +type discharge struct { + id string + c chan error +} + +// idM is a mock identity manager that can be used to test the client. +type idM struct { + *httptest.Server + *http.ServeMux +} + +func newIdM(c *gc.C) *idM { + i := &idM{ + ServeMux: http.NewServeMux(), + } + i.Server = httptest.NewServer(i) + i.Handle("/", http.HandlerFunc(i.notFound)) + i.Handle("/test", http.HandlerFunc(i.serveTest)) + i.Handle("/v1/u/user1/groups", i.serveGroups("g1", "g2")) + i.Handle("/v1/u/user2/groups", i.serveGroups()) + return i +} + +func (i *idM) notFound(w http.ResponseWriter, req *http.Request) { + i.error(w, http.StatusNotFound, "not found", "%s not found", req.URL.Path) +} + +// serveTest serves a /test endpoint that can return a number of things +// depending on the query parameters: +// ct = Content-Type to use (application/json) +// s = Status code to use (200) +// b = body content ({"method": method used}) +func (i *idM) serveTest(w http.ResponseWriter, req *http.Request) { + req.ParseForm() + if req.Form.Get("ct") != "" { + w.Header().Set("Content-Type", req.Form.Get("ct")) + } else { + w.Header().Set("Content-Type", "application/json") + } + if req.Form.Get("s") != "" { + s, err := strconv.Atoi(req.Form.Get("s")) + if err != nil { + i.error(w, http.StatusBadRequest, "ERROR", "cannot read status: %s", err) + return + } + w.WriteHeader(s) + } + if req.Form.Get("b") != "" { + w.Write([]byte(req.Form.Get("b"))) + } else { + data := map[string]interface{}{ + "method": req.Method, + } + resp, err := json.Marshal(data) + if err != nil { + i.error(w, http.StatusInternalServerError, "ERROR", "cannot marshal response: %s", err) + return + } + w.Write(resp) + } +} + +func (i *idM) write(w http.ResponseWriter, v interface{}) { + body, err := json.Marshal(v) + if err != nil { + i.error(w, http.StatusInternalServerError, "ERROR", "cannot marshal response: %s", err) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write(body) +} + +func (i *idM) error(w http.ResponseWriter, status int, code, format string, a ...interface{}) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + body, err := json.Marshal(&identity.IdmError{ + Message: fmt.Sprintf(format, a...), + Code: code, + }) + if err != nil { + panic(err) + } + w.Write(body) +} + +func (i *idM) serveGroups(groups ...string) http.HandlerFunc { + return func(w http.ResponseWriter, _ *http.Request) { + i.write(w, groups) + } +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/identity/package_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/identity/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/identity/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package identity_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/legacy' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/legacy/api.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/legacy/api.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/legacy/api.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,329 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// The legacy package implements the legacy API, as follows: +// +// /charm-info +// +// A GET call to `/charm-info` returns info about one or more charms, including +// its canonical URL, revision, SHA256 checksum and VCS revision digest. +// The returned info is in JSON format. +// For instance a request to `/charm-info?charms=cs:trusty/juju-gui` returns the +// following response: +// +// {"cs:trusty/juju-gui": { +// "canonical-url": "cs:trusty/juju-gui", +// "revision": 3, +// "sha256": "a15c77f3f92a0fb7b61e9...", +// "digest": jeff.pihach@canonical.com-20140612210347-6cc9su1jqjkhbi84" +// }} +// +// /charm-event: +// +// A GET call to `/charm-event` returns info about an event occurred in the life +// of the specified charm(s). Currently two types of events are logged: +// "published" (a charm has been published and it's available in the store) and +// "publish-error" (an error occurred while importing the charm). +// E.g. a call to `/charm-event?charms=cs:trusty/juju-gui` generates the following +// JSON response: +// +// {"cs:trusty/juju-gui": { +// "kind": "published", +// "revision": 3, +// "digest": "jeff.pihach@canonicalcom-20140612210347-6cc9su1jqjkhbi84", +// "time": "2014-06-16T14:41:19Z" +// }} +// +// /charm/ +// +// The `charm` API provides the ability to download a charm as a Zip archive, +// given the charm identifier. For instance, it is possible to download the Juju +// GUI charm by performing a GET call to `/charm/trusty/juju-gui-42`. Both the +// revision and OS series can be omitted, e.g. `/charm/juju-gui` will download the +// last revision of the Juju GUI charm with support to the more recent Ubuntu LTS +// series. +// +// /stats/counter/ +// +// Stats can be retrieved by calling `/stats/counter/{key}` where key is a query +// that specifies the counter stats to calculate and return. +// +// For instance, a call to `/stats/counter/charm-bundle:*` returns the number of +// times a charm has been downloaded from the store. To get the same value for +// a specific charm, it is possible to filter the results by passing the charm +// series and name, e.g. `/stats/counter/charm-bundle:trusty:juju-gui`. +// +// The results can be grouped by specifying the `by` query (possible values are +// `day` and `week`), and time delimited using the `start` and `stop` queries. +// +// It is also possible to list the results by passing `list=1`. For example, a GET +// call to `/stats/counter/charm-bundle:trusty:*?by=day&list=1` returns an +// aggregated count of trusty charms downloads, grouped by charm and day, similar +// to the following: +// +// charm-bundle:trusty:juju-gui 2014-06-17 5 +// charm-bundle:trusty:mysql 2014-06-17 1 +package legacy // import "gopkg.in/juju/charmstore.v5-unstable/internal/legacy" + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/mempool" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + "gopkg.in/juju/charmstore.v5-unstable/internal/v4" +) + +type Handler struct { + v4 v4.Handler +} + +type reqHandler struct { + v4 v4.ReqHandler + mux *http.ServeMux + store *charmstore.Store +} + +// reqHandlerPool holds a cache of ReqHandlers to save +// on allocation time. +var reqHandlerPool = mempool.Pool{ + New: func() interface{} { + return newReqHandler() + }, +} + +func NewAPIHandler(pool *charmstore.Pool, config charmstore.ServerParams) charmstore.HTTPCloseHandler { + return &Handler{ + v4: v4.New(pool, config), + } +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + req.ParseForm() + rh, err := h.newReqHandler() + if err != nil { + router.WriteError(w, err) + return + } + defer rh.close() + rh.mux.ServeHTTP(w, req) +} + +func (h *Handler) Close() { +} + +func (h *Handler) newReqHandler() (*reqHandler, error) { + v4h, err := h.v4.NewReqHandler() + if err != nil { + return nil, errgo.Mask(err, errgo.Is(charmstore.ErrTooManySessions)) + } + rh := reqHandlerPool.Get().(*reqHandler) + rh.v4 = v4h + rh.store = v4h.Store + return rh, nil +} + +// newReqHandler returns a new instance of the legacy API handler. +// The returned value has a nil v4 field. +func newReqHandler() *reqHandler { + h := &reqHandler{ + mux: http.NewServeMux(), + } + h.handle("/charm-info", router.HandleJSON(h.serveCharmInfo)) + h.handle("/charm/", router.HandleErrors(h.serveCharm)) + h.handle("/charm-event", router.HandleJSON(h.serveCharmEvent)) + return h +} + +func (h *reqHandler) handle(path string, handler http.Handler) { + prefix := strings.TrimSuffix(path, "/") + h.mux.Handle(path, http.StripPrefix(prefix, handler)) +} + +func (h *reqHandler) close() { + h.v4.Close() + h.v4 = v4.ReqHandler{} + reqHandlerPool.Put(h) +} + +func (h *reqHandler) serveCharm(w http.ResponseWriter, req *http.Request) error { + if req.Method != "GET" && req.Method != "HEAD" { + return params.ErrMethodNotAllowed + } + curl, err := charm.ParseURL(strings.TrimPrefix(req.URL.Path, "/")) + if err != nil { + return errgo.WithCausef(err, params.ErrNotFound, "") + } + return h.v4.Router.Handlers().Id["archive"](curl, w, req) +} + +// charmStatsKey returns a stats key for the given charm reference and kind. +func charmStatsKey(url *charm.URL, kind string) []string { + if url.User == "" { + return []string{kind, url.Series, url.Name} + } + return []string{kind, url.Series, url.Name, url.User} +} + +var errNotFound = fmt.Errorf("entry not found") + +func (h *reqHandler) serveCharmInfo(_ http.Header, req *http.Request) (interface{}, error) { + response := make(map[string]*charmrepo.InfoResponse) + for _, url := range req.Form["charms"] { + c := &charmrepo.InfoResponse{} + response[url] = c + curl, err := charm.ParseURL(url) + if err != nil { + err = errNotFound + } + var entity *mongodoc.Entity + if err == nil { + entity, err = h.store.FindBestEntity(curl) + if errgo.Cause(err) == params.ErrNotFound { + // The old API actually returned "entry not found" + // on *any* error, but it seems reasonable to be + // a little more descriptive for other errors. + err = errNotFound + } + } + var rurl *router.ResolvedURL + if err == nil { + rurl = charmstore.EntityResolvedURL(entity) + if h.v4.AuthorizeEntity(rurl, req) != nil { + // The charm is unauthorized and there's no way to + // authorize it as part of the legacy API so we + // just treat it as a not-found error. + err = errNotFound + } + } + if err == nil && entity.BlobHash256 == "" { + // Lazily calculate SHA256 so that we don't burden + // non-legacy code with that task. + // TODO frankban: remove this lazy calculation after the cshash256 + // command is run in the production db. At that point, entities + // always have their blobhash256 field populated, and there is no + // need for this lazy evaluation anymore. + entity.BlobHash256, err = h.store.UpdateEntitySHA256(rurl) + } + // Prepare the response part for this charm. + if err == nil { + curl = entity.PreferredURL(curl.User == "") + c.CanonicalURL = curl.String() + c.Revision = curl.Revision + c.Sha256 = entity.BlobHash256 + c.Digest, err = entityBzrDigest(entity) + if err != nil { + c.Errors = append(c.Errors, err.Error()) + } + if v4.StatsEnabled(req) { + h.store.IncCounterAsync(charmStatsKey(curl, params.StatsCharmInfo)) + } + } else { + c.Errors = append(c.Errors, err.Error()) + if curl != nil && v4.StatsEnabled(req) { + h.store.IncCounterAsync(charmStatsKey(curl, params.StatsCharmMissing)) + } + } + } + return response, nil +} + +// serveCharmEvent returns events related to the charms specified in the +// "charms" query. In this implementation, the only supported event is +// "published", required by the "juju publish" command. +func (h *reqHandler) serveCharmEvent(_ http.Header, req *http.Request) (interface{}, error) { + response := make(map[string]*charmrepo.EventResponse) + for _, url := range req.Form["charms"] { + c := &charmrepo.EventResponse{} + + // Ignore the digest part of the request. + if i := strings.Index(url, "@"); i != -1 { + url = url[:i] + } + // We intentionally do not implement the long_keys query parameter that + // the legacy charm store supported, as "juju publish" does not use it. + response[url] = c + + // Validate the charm URL. + id, err := charm.ParseURL(url) + if err != nil { + c.Errors = []string{"invalid charm URL: " + err.Error()} + continue + } + if id.Revision != -1 { + c.Errors = []string{"got charm URL with revision: " + id.String()} + continue + } + + // Retrieve the charm. + entity, err := h.store.FindBestEntity(id, "_id", "uploadtime", "extrainfo") + if err != nil { + if errgo.Cause(err) == params.ErrNotFound { + // The old API actually returned "entry not found" + // on *any* error, but it seems reasonable to be + // a little more descriptive for other errors. + err = errNotFound + } + c.Errors = []string{err.Error()} + continue + } + + // Retrieve the entity Bazaar digest. + c.Digest, err = entityBzrDigest(entity) + if err != nil { + c.Errors = []string{err.Error()} + } else if c.Digest == "" { + // There are two possible reasons why an entity is found without a + // digest: + // 1) the entity has been recently added in the ingestion process, + // but the extra-info has not been sent yet by "charmload"; + // 2) there was an error while ingesting the entity. + // If the entity has been recently published, we assume case 1), + // and therefore we return a not found error, forcing + // "juju publish" to keep retrying and possibly succeed later. + // Otherwise, we return an error so that "juju publish" exits with + // an error and avoids an infinite loop. + if time.Since(entity.UploadTime).Minutes() < 2 { + c.Errors = []string{errNotFound.Error()} + } else { + c.Errors = []string{"digest not found: this can be due to an error while ingesting the entity"} + } + continue + } + + // Prepare the response part for this charm. + c.Kind = "published" + if id.User == "" { + c.Revision = entity.PromulgatedRevision + } else { + c.Revision = entity.Revision + } + c.Time = entity.UploadTime.UTC().Format(time.RFC3339) + if v4.StatsEnabled(req) { + h.store.IncCounterAsync(charmStatsKey(id, params.StatsCharmEvent)) + } + } + return response, nil +} + +func entityBzrDigest(entity *mongodoc.Entity) (string, error) { + value, found := entity.ExtraInfo[params.BzrDigestKey] + if !found { + return "", nil + } + var digest string + if err := json.Unmarshal(value, &digest); err != nil { + return "", errgo.Notef(err, "cannot unmarshal digest") + } + return digest, nil +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/legacy/api_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/legacy/api_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/legacy/api_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,638 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package legacy_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/legacy" + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "time" + + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/legacy" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/hashtesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/stats" +) + +var serverParams = charmstore.ServerParams{ + AuthUsername: "test-user", + AuthPassword: "test-password", +} + +type APISuite struct { + jujutesting.IsolatedMgoSuite + srv *charmstore.Server + store *charmstore.Store +} + +var _ = gc.Suite(&APISuite{}) + +func (s *APISuite) SetUpTest(c *gc.C) { + s.IsolatedMgoSuite.SetUpTest(c) + s.srv, s.store = newServer(c, s.Session, serverParams) +} + +func (s *APISuite) TearDownTest(c *gc.C) { + s.store.Close() + s.store.Pool().Close() + s.srv.Close() + s.IsolatedMgoSuite.TearDownTest(c) +} + +func newServer(c *gc.C, session *mgo.Session, config charmstore.ServerParams) (*charmstore.Server, *charmstore.Store) { + db := session.DB("charmstore") + pool, err := charmstore.NewPool(db, nil, nil, config) + c.Assert(err, gc.IsNil) + srv, err := charmstore.NewServer(db, nil, config, map[string]charmstore.NewAPIHandlerFunc{"": legacy.NewAPIHandler}) + c.Assert(err, gc.IsNil) + return srv, pool.Store() +} + +func (s *APISuite) TestCharmArchive(c *gc.C) { + _, wordpress := s.addPublicCharm(c, "wordpress", "cs:precise/wordpress-0") + archiveBytes, err := ioutil.ReadFile(wordpress.Path) + c.Assert(err, gc.IsNil) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: "/charm/precise/wordpress-0", + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes) + c.Assert(rec.Header().Get("Content-Length"), gc.Equals, fmt.Sprint(len(rec.Body.Bytes()))) + + // Test with unresolved URL. + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: "/charm/wordpress", + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes) + c.Assert(rec.Header().Get("Content-Length"), gc.Equals, fmt.Sprint(len(rec.Body.Bytes()))) + + // Check that the HTTP range logic is plugged in OK. If this + // is working, we assume that the whole thing is working OK, + // as net/http is well-tested. + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: "/charm/precise/wordpress-0", + Header: http.Header{"Range": {"bytes=10-100"}}, + }) + c.Assert(rec.Code, gc.Equals, http.StatusPartialContent, gc.Commentf("body: %q", rec.Body.Bytes())) + c.Assert(rec.Body.Bytes(), gc.HasLen, 100-10+1) + c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes[10:101]) +} + +func (s *APISuite) TestPostNotAllowed(c *gc.C) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + Method: "POST", + URL: "/charm/precise/wordpress", + ExpectStatus: http.StatusMethodNotAllowed, + ExpectBody: params.Error{ + Code: params.ErrMethodNotAllowed, + Message: params.ErrMethodNotAllowed.Error(), + }, + }) +} + +func (s *APISuite) TestCharmArchiveUnresolvedURL(c *gc.C) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: "/charm/wordpress", + ExpectStatus: http.StatusNotFound, + ExpectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:wordpress"`, + }, + }) +} + +func (s *APISuite) TestCharmInfoNotFound(c *gc.C) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: "/charm-info?charms=cs:precise/something-23", + ExpectStatus: http.StatusOK, + ExpectBody: map[string]charmrepo.InfoResponse{ + "cs:precise/something-23": { + Errors: []string{"entry not found"}, + }, + }, + }) +} + +func (s *APISuite) TestServeCharmInfo(c *gc.C) { + wordpressURL, wordpress := s.addPublicCharm(c, "wordpress", "cs:precise/wordpress-1") + hashSum := fileSHA256(c, wordpress.Path) + digest, err := json.Marshal("who@canonical.com-bzr-digest") + c.Assert(err, gc.IsNil) + + tests := []struct { + about string + url string + extrainfo map[string][]byte + canonical string + sha string + digest string + revision int + err string + }{{ + about: "full charm URL with digest extra info", + url: wordpressURL.String(), + extrainfo: map[string][]byte{ + params.BzrDigestKey: digest, + }, + canonical: "cs:precise/wordpress-1", + sha: hashSum, + digest: "who@canonical.com-bzr-digest", + revision: 1, + }, { + about: "full charm URL without digest extra info", + url: wordpressURL.String(), + canonical: "cs:precise/wordpress-1", + sha: hashSum, + revision: 1, + }, { + about: "partial charm URL with digest extra info", + url: "cs:wordpress", + extrainfo: map[string][]byte{ + params.BzrDigestKey: digest, + }, + canonical: "cs:precise/wordpress-1", + sha: hashSum, + digest: "who@canonical.com-bzr-digest", + revision: 1, + }, { + about: "partial charm URL without extra info", + url: "cs:wordpress", + canonical: "cs:precise/wordpress-1", + sha: hashSum, + revision: 1, + }, { + about: "invalid digest extra info", + url: "cs:wordpress", + extrainfo: map[string][]byte{ + params.BzrDigestKey: []byte("[]"), + }, + canonical: "cs:precise/wordpress-1", + sha: hashSum, + revision: 1, + err: `cannot unmarshal digest: json: cannot unmarshal array into Go value of type string`, + }, { + about: "charm not found", + url: "cs:precise/non-existent", + err: "entry not found", + }, { + about: "invalid charm URL", + url: "cs:/bad", + err: `entry not found`, + }, { + about: "invalid charm schema", + url: "gopher:archie-server", + err: `entry not found`, + }, { + about: "invalid URL", + url: "/charm-info?charms=cs:not-found", + err: "entry not found", + }} + + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + err = s.store.UpdateEntity(wordpressURL, bson.D{{ + "$set", bson.D{{"extrainfo", test.extrainfo}}, + }}) + c.Assert(err, gc.IsNil) + expectInfo := charmrepo.InfoResponse{ + CanonicalURL: test.canonical, + Sha256: test.sha, + Revision: test.revision, + Digest: test.digest, + } + if test.err != "" { + expectInfo.Errors = []string{test.err} + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: "/charm-info?charms=" + test.url, + ExpectStatus: http.StatusOK, + ExpectBody: map[string]charmrepo.InfoResponse{ + test.url: expectInfo, + }, + }) + } +} + +func (s *APISuite) TestCharmInfoCounters(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + // Add two charms to the database, a promulgated one and a user owned one. + s.addPublicCharm(c, "wordpress", "cs:utopic/wordpress-42") + s.addPublicCharm(c, "wordpress", "cs:~who/trusty/wordpress-47") + + requestInfo := func(id string, times int) { + for i := 0; i < times; i++ { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: "/charm-info?charms=" + id, + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + } + } + + // Request charm info several times for the promulgated charm, + // the user owned one and a missing charm. + requestInfo("utopic/wordpress-42", 4) + requestInfo("~who/trusty/wordpress-47", 3) + requestInfo("precise/django-0", 2) + + // The charm-info count for the promulgated charm has been updated. + key := []string{params.StatsCharmInfo, "utopic", "wordpress"} + stats.CheckCounterSum(c, s.store, key, false, 4) + + // The charm-info count for the user owned charm has been updated. + key = []string{params.StatsCharmInfo, "trusty", "wordpress", "who"} + stats.CheckCounterSum(c, s.store, key, false, 3) + + // The charm-missing count for the missing charm has been updated. + key = []string{params.StatsCharmMissing, "precise", "django"} + stats.CheckCounterSum(c, s.store, key, false, 2) + + // The charm-info count for the missing charm is still zero. + key = []string{params.StatsCharmInfo, "precise", "django"} + stats.CheckCounterSum(c, s.store, key, false, 0) +} + +func (s *APISuite) TestAPIInfoWithGatedCharm(c *gc.C) { + wordpressURL, _ := s.addPublicCharm(c, "wordpress", "cs:precise/wordpress-0") + s.store.SetPerms(&wordpressURL.URL, "read", "bob") + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: "/charm-info?charms=" + wordpressURL.URL.String(), + ExpectStatus: http.StatusOK, + ExpectBody: map[string]charmrepo.InfoResponse{ + wordpressURL.URL.String(): { + Errors: []string{"entry not found"}, + }, + }, + }) +} + +func fileSHA256(c *gc.C, path string) string { + f, err := os.Open(path) + c.Assert(err, gc.IsNil) + hash := sha256.New() + _, err = io.Copy(hash, f) + c.Assert(err, gc.IsNil) + return fmt.Sprintf("%x", hash.Sum(nil)) +} + +func (s *APISuite) TestCharmPackageGet(c *gc.C) { + wordpressURL, wordpress := s.addPublicCharm(c, "wordpress", "cs:precise/wordpress-0") + archiveBytes, err := ioutil.ReadFile(wordpress.Path) + c.Assert(err, gc.IsNil) + + srv := httptest.NewServer(s.srv) + defer srv.Close() + + s.PatchValue(&charmrepo.CacheDir, c.MkDir()) + s.PatchValue(&charmrepo.LegacyStore.BaseURL, srv.URL) + + ch, err := charmrepo.LegacyStore.Get(&wordpressURL.URL) + c.Assert(err, gc.IsNil) + chArchive := ch.(*charm.CharmArchive) + + data, err := ioutil.ReadFile(chArchive.Path) + c.Assert(err, gc.IsNil) + c.Assert(data, gc.DeepEquals, archiveBytes) +} + +func (s *APISuite) TestCharmPackageCharmInfo(c *gc.C) { + wordpressURL, wordpress := s.addPublicCharm(c, "wordpress", "cs:precise/wordpress-0") + wordpressSHA256 := fileSHA256(c, wordpress.Path) + mysqlURL, mySQL := s.addPublicCharm(c, "wordpress", "cs:precise/mysql-2") + mysqlSHA256 := fileSHA256(c, mySQL.Path) + notFoundURL := charm.MustParseURL("cs:precise/not-found-3") + + srv := httptest.NewServer(s.srv) + defer srv.Close() + s.PatchValue(&charmrepo.LegacyStore.BaseURL, srv.URL) + + resp, err := charmrepo.LegacyStore.Info(wordpressURL.PreferredURL(), mysqlURL.PreferredURL(), notFoundURL) + c.Assert(err, gc.IsNil) + c.Assert(resp, gc.HasLen, 3) + c.Assert(resp, jc.DeepEquals, []*charmrepo.InfoResponse{{ + CanonicalURL: wordpressURL.String(), + Sha256: wordpressSHA256, + }, { + CanonicalURL: mysqlURL.String(), + Sha256: mysqlSHA256, + Revision: 2, + }, { + Errors: []string{"charm not found: " + notFoundURL.String()}, + }}) +} + +func (s *APISuite) TestSHA256Laziness(c *gc.C) { + // TODO frankban: remove this test after updating entities in the + // production db with their SHA256 hash value. Entities are updated by + // running the cshash256 command. + id, ch := s.addPublicCharm(c, "wordpress", "cs:~who/precise/wordpress-0") + url := id.String() + sum256 := fileSHA256(c, ch.Path) + + hashtesting.CheckSHA256Laziness(c, s.store, &id.URL, func() { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: "/charm-info?charms=" + url, + ExpectStatus: http.StatusOK, + ExpectBody: map[string]charmrepo.InfoResponse{ + url: { + CanonicalURL: url, + Sha256: sum256, + Revision: 0, + }, + }, + }) + }) +} + +var serverStatusTests = []struct { + path string + code int +}{ + {"/charm-info/any", 404}, + {"/charm/bad-url", 404}, + {"/charm/bad-series/wordpress", 404}, +} + +func (s *APISuite) TestServerStatus(c *gc.C) { + // TODO(rog) add tests from old TestServerStatus tests + // when we implement charm-info. + for i, test := range serverStatusTests { + c.Logf("test %d: %s", i, test.path) + resp := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: test.path, + }) + c.Assert(resp.Code, gc.Equals, test.code, gc.Commentf("body: %s", resp.Body)) + } +} + +func (s *APISuite) addPublicCharm(c *gc.C, charmName, curl string) (*router.ResolvedURL, *charm.CharmArchive) { + rurl := &router.ResolvedURL{ + URL: *charm.MustParseURL(curl), + PromulgatedRevision: -1, + } + if rurl.URL.User == "" { + rurl.URL.User = "charmers" + rurl.PromulgatedRevision = rurl.URL.Revision + } + archive := storetesting.Charms.CharmArchive(c.MkDir(), charmName) + err := s.store.AddCharmWithArchive(rurl, archive) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + return rurl, archive +} + +var serveCharmEventErrorsTests = []struct { + about string + url string + responseUrl string + err string +}{{ + about: "invalid charm URL", + url: "no-such:charm", + err: `invalid charm URL: charm or bundle URL has invalid schema: "no-such:charm"`, +}, { + about: "revision specified", + url: "cs:utopic/django-42", + err: "got charm URL with revision: cs:utopic/django-42", +}, { + about: "charm not found", + url: "cs:trusty/django", + err: "entry not found", +}, { + about: "ignoring digest", + url: "precise/django-47@a-bzr-digest", + responseUrl: "precise/django-47", + err: "got charm URL with revision: cs:precise/django-47", +}} + +func (s *APISuite) TestServeCharmEventErrors(c *gc.C) { + for i, test := range serveCharmEventErrorsTests { + c.Logf("test %d: %s", i, test.about) + if test.responseUrl == "" { + test.responseUrl = test.url + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: "/charm-event?charms=" + test.url, + ExpectStatus: http.StatusOK, + ExpectBody: map[string]charmrepo.EventResponse{ + test.responseUrl: { + Errors: []string{test.err}, + }, + }, + }) + } +} + +func (s *APISuite) TestServeCharmEvent(c *gc.C) { + // Add three charms to the charm store. + mysqlUrl, _ := s.addPublicCharm(c, "mysql", "cs:trusty/mysql-2") + riakUrl, _ := s.addPublicCharm(c, "riak", "cs:utopic/riak-3") + + // Update the mysql charm with a valid digest extra-info. + s.addExtraInfoDigest(c, mysqlUrl, "who@canonical.com-bzr-digest") + + // Update the riak charm with an invalid digest extra-info. + err := s.store.UpdateEntity(riakUrl, bson.D{{ + "$set", bson.D{{"extrainfo", map[string][]byte{ + params.BzrDigestKey: []byte(":"), + }}}, + }}) + c.Assert(err, gc.IsNil) + + // Retrieve the entities. + mysql, err := s.store.FindEntity(mysqlUrl) + c.Assert(err, gc.IsNil) + riak, err := s.store.FindEntity(riakUrl) + c.Assert(err, gc.IsNil) + + tests := []struct { + about string + query string + expect map[string]*charmrepo.EventResponse + }{{ + about: "valid digest", + query: "?charms=cs:trusty/mysql", + expect: map[string]*charmrepo.EventResponse{ + "cs:trusty/mysql": { + Kind: "published", + Revision: mysql.Revision, + Time: mysql.UploadTime.UTC().Format(time.RFC3339), + Digest: "who@canonical.com-bzr-digest", + }, + }, + }, { + about: "invalid digest", + query: "?charms=cs:utopic/riak", + expect: map[string]*charmrepo.EventResponse{ + "cs:utopic/riak": { + Kind: "published", + Revision: riak.Revision, + Time: riak.UploadTime.UTC().Format(time.RFC3339), + Errors: []string{"cannot unmarshal digest: invalid character ':' looking for beginning of value"}, + }, + }, + }, { + about: "partial charm URL", + query: "?charms=cs:mysql", + expect: map[string]*charmrepo.EventResponse{ + "cs:mysql": { + Kind: "published", + Revision: mysql.Revision, + Time: mysql.UploadTime.UTC().Format(time.RFC3339), + Digest: "who@canonical.com-bzr-digest", + }, + }, + }, { + about: "digest in request", + query: "?charms=cs:trusty/mysql@my-digest", + expect: map[string]*charmrepo.EventResponse{ + "cs:trusty/mysql": { + Kind: "published", + Revision: mysql.Revision, + Time: mysql.UploadTime.UTC().Format(time.RFC3339), + Digest: "who@canonical.com-bzr-digest", + }, + }, + }, { + about: "multiple charms", + query: "?charms=cs:mysql&charms=utopic/riak", + expect: map[string]*charmrepo.EventResponse{ + "cs:mysql": { + Kind: "published", + Revision: mysql.Revision, + Time: mysql.UploadTime.UTC().Format(time.RFC3339), + Digest: "who@canonical.com-bzr-digest", + }, + "utopic/riak": { + Kind: "published", + Revision: riak.Revision, + Time: riak.UploadTime.UTC().Format(time.RFC3339), + Errors: []string{"cannot unmarshal digest: invalid character ':' looking for beginning of value"}, + }, + }, + }} + + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: "/charm-event" + test.query, + ExpectStatus: http.StatusOK, + ExpectBody: test.expect, + }) + } +} + +func (s *APISuite) TestServeCharmEventDigestNotFound(c *gc.C) { + // Add a charm without a Bazaar digest. + url, _ := s.addPublicCharm(c, "wordpress", "cs:trusty/wordpress-42") + + // Pretend the entity has been uploaded right now, and assume the test does + // not take more than two minutes to run. + s.updateUploadTime(c, url, time.Now()) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: "/charm-event?charms=cs:trusty/wordpress", + ExpectStatus: http.StatusOK, + ExpectBody: map[string]charmrepo.EventResponse{ + "cs:trusty/wordpress": { + Errors: []string{"entry not found"}, + }, + }, + }) + + // Now change the entity upload time to be more than 2 minutes ago. + s.updateUploadTime(c, url, time.Now().Add(-121*time.Second)) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: "/charm-event?charms=cs:trusty/wordpress", + ExpectStatus: http.StatusOK, + ExpectBody: map[string]charmrepo.EventResponse{ + "cs:trusty/wordpress": { + Errors: []string{"digest not found: this can be due to an error while ingesting the entity"}, + }, + }, + }) +} + +func (s *APISuite) TestServeCharmEventLastRevision(c *gc.C) { + // Add two revisions of the same charm. + url1, _ := s.addPublicCharm(c, "wordpress", "cs:trusty/wordpress-1") + url2, _ := s.addPublicCharm(c, "wordpress", "cs:trusty/wordpress-2") + + // Update the resulting entities with Bazaar digests. + s.addExtraInfoDigest(c, url1, "digest-1") + s.addExtraInfoDigest(c, url2, "digest-2") + + // Retrieve the most recent revision of the entity. + entity, err := s.store.FindEntity(url2) + c.Assert(err, gc.IsNil) + + // Ensure the last revision is correctly returned. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: "/charm-event?charms=wordpress", + ExpectStatus: http.StatusOK, + ExpectBody: map[string]*charmrepo.EventResponse{ + "wordpress": { + Kind: "published", + Revision: 2, + Time: entity.UploadTime.UTC().Format(time.RFC3339), + Digest: "digest-2", + }, + }, + }) +} + +func (s *APISuite) addExtraInfoDigest(c *gc.C, id *router.ResolvedURL, digest string) { + b, err := json.Marshal(digest) + c.Assert(err, gc.IsNil) + err = s.store.UpdateEntity(id, bson.D{{ + "$set", bson.D{{"extrainfo", map[string][]byte{ + params.BzrDigestKey: b, + }}}, + }}) + c.Assert(err, gc.IsNil) +} + +func (s *APISuite) updateUploadTime(c *gc.C, id *router.ResolvedURL, uploadTime time.Time) { + err := s.store.UpdateEntity(id, bson.D{{ + "$set", bson.D{{"uploadtime", uploadTime}}, + }}) + c.Assert(err, gc.IsNil) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/legacy/package_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/legacy/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/legacy/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package legacy_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/legacy" + +import ( + "testing" + + jujutesting "github.com/juju/testing" +) + +func TestPackage(t *testing.T) { + jujutesting.MgoTestPackage(t, nil) +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/mempool' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/mempool/LICENSE' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/mempool/LICENSE 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/mempool/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/mempool/pool.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/mempool/pool.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/mempool/pool.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,80 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mempool implements a version of sync.Pool +// as supported in Go versions later than 1.2. +// +// TODO use sync.Pool when we can use Go 1.3 or later. +package mempool + +import "sync" + +// A Pool is a set of temporary objects that may be individually saved and +// retrieved. +// +// Any item stored in the Pool may be removed automatically at any time without +// notification. If the Pool holds the only reference when this happens, the +// item might be deallocated. +// +// A Pool is safe for use by multiple goroutines simultaneously. +// +// Pool's purpose is to cache allocated but unused items for later reuse, +// relieving pressure on the garbage collector. That is, it makes it easy to +// build efficient, thread-safe free lists. However, it is not suitable for all +// free lists. +// +// An appropriate use of a Pool is to manage a group of temporary items +// silently shared among and potentially reused by concurrent independent +// clients of a package. Pool provides a way to amortize allocation overhead +// across many clients. +// +// An example of good use of a Pool is in the fmt package, which maintains a +// dynamically-sized store of temporary output buffers. The store scales under +// load (when many goroutines are actively printing) and shrinks when +// quiescent. +// +// On the other hand, a free list maintained as part of a short-lived object is +// not a suitable use for a Pool, since the overhead does not amortize well in +// that scenario. It is more efficient to have such objects implement their own +// free list. +// +type Pool struct { + mu sync.Mutex + values []interface{} + + // New optionally specifies a function to generate + // a value when Get would otherwise return nil. + // It may not be changed concurrently with calls to Get. + New func() interface{} +} + +// Put adds x to the pool. +func (p *Pool) Put(x interface{}) { + p.mu.Lock() + p.values = append(p.values, x) + p.mu.Unlock() +} + +// Get selects an arbitrary item from the Pool, removes it from the +// Pool, and returns it to the caller. +// Get may choose to ignore the pool and treat it as empty. +// Callers should not assume any relation between values passed to Put and +// the values returned by Get. +// +// If Get would otherwise return nil and p.New is non-nil, Get returns +// the result of calling p.New. +func (p *Pool) Get() interface{} { + p.mu.Lock() + if n := len(p.values); n > 0 { + v := p.values[n-1] + p.values = p.values[0 : n-1] + p.mu.Unlock() + return v + } + p.mu.Unlock() + if p.New == nil { + return nil + } + return p.New() +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/mempool/pool_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/mempool/pool_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/mempool/pool_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,89 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Pool is no-op under race detector, so all these tests do not work. +// +build !race + +package mempool + +import ( + "testing" +) + +// Note: the tests in this file are taken directly from +// $GOROOT/src/sync/pool_test.go, with the +// exception of all the GC-driven behaviour +// which has been removed. + +func TestPool(t *testing.T) { + var p Pool + if p.Get() != nil { + t.Fatal("expected empty") + } + p.Put("a") + p.Put("b") + if g := p.Get(); g != "b" { + t.Fatalf("got %#v; want a", g) + } + if g := p.Get(); g != "a" { + t.Fatalf("got %#v; want b", g) + } + if g := p.Get(); g != nil { + t.Fatalf("got %#v; want nil", g) + } + + p.Put("c") +} + +func TestPoolNew(t *testing.T) { + i := 0 + p := Pool{ + New: func() interface{} { + i++ + return i + }, + } + if v := p.Get(); v != 1 { + t.Fatalf("got %v; want 1", v) + } + if v := p.Get(); v != 2 { + t.Fatalf("got %v; want 2", v) + } + p.Put(42) + if v := p.Get(); v != 42 { + t.Fatalf("got %v; want 42", v) + } + if v := p.Get(); v != 3 { + t.Fatalf("got %v; want 3", v) + } +} + +func TestPoolStress(t *testing.T) { + const P = 10 + N := int(1e6) + if testing.Short() { + N /= 100 + } + var p Pool + done := make(chan bool) + for i := 0; i < P; i++ { + go func() { + var v interface{} = 0 + for j := 0; j < N; j++ { + if v == nil { + v = 0 + } + p.Put(v) + v = p.Get() + if v != nil && v.(int) != 0 { + t.Fatalf("expect 0, got %v", v) + } + } + done <- true + }() + } + for i := 0; i < P; i++ { + <-done + } +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/doc.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/doc.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/doc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,300 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package mongodoc // import "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + +import ( + "time" + + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/mgo.v2/bson" +) + +// Entity holds the in-database representation of charm or bundle's +// document in the charms collection. It holds information +// on one specific revision and series of the charm or bundle - see +// also BaseEntity. +// +// We ensure that there is always a single BaseEntity for any +// set of entities which share the same base URL. +type Entity struct { + // URL holds the fully specified URL of the charm or bundle. + // e.g. cs:precise/wordpress-34, cs:~user/trusty/foo-2 + URL *charm.URL `bson:"_id"` + + // BaseURL holds the reference URL of the charm or bundle + // (this omits the series and revision from URL) + // e.g. cs:wordpress, cs:~user/foo + BaseURL *charm.URL + + // User holds the user part of the entity URL (for instance, "joe"). + User string + + // Name holds the name of the entity (for instance "wordpress"). + Name string + + // Revision holds the entity revision (it cannot be -1/unset). + Revision int + + // Series holds the entity series (for instance "trusty" or "bundle"). + // For multi-series charms, this will be empty. + Series string + + // SupportedSeries holds the series supported by a charm. + // For non-multi-series charms, this is a single element slice + // containing the value in Series. + SupportedSeries []string + + // BlobHash holds the hash checksum of the blob, in hexadecimal format, + // as created by blobstore.NewHash. + BlobHash string + + // BlobHash256 holds the SHA256 hash checksum of the blob, + // in hexadecimal format. This is only used by the legacy + // API, and is calculated lazily the first time it is required. + BlobHash256 string + + // Size holds the size of the archive blob. + // TODO(rog) rename this to BlobSize. + Size int64 + + // BlobName holds the name that the archive blob is given in the blob store. + BlobName string + + UploadTime time.Time + + // ExtraInfo holds arbitrary extra metadata associated with + // the entity. The byte slices hold JSON-encoded data. + ExtraInfo map[string][]byte `bson:",omitempty" json:",omitempty"` + + // TODO(rog) verify that all these types marshal to the expected + // JSON form. + CharmMeta *charm.Meta + CharmConfig *charm.Config + CharmActions *charm.Actions + + // CharmProvidedInterfaces holds all the relation + // interfaces provided by the charm + CharmProvidedInterfaces []string + + // CharmRequiredInterfaces is similar to CharmProvidedInterfaces + // for required interfaces. + CharmRequiredInterfaces []string + + BundleData *charm.BundleData + BundleReadMe string + + // BundleCharms includes all the charm URLs referenced + // by the bundle, including base URLs where they are + // not already included. + BundleCharms []*charm.URL + + // BundleMachineCount counts the machines used or created + // by the bundle. It is nil for charms. + BundleMachineCount *int + + // BundleUnitCount counts the units created by the bundle. + // It is nil for charms. + BundleUnitCount *int + + // TODO Add fields denormalized for search purposes + // and search ranking field(s). + + // Contents holds entries for frequently accessed + // entries in the file's blob. Storing this avoids + // the need to linearly read the zip file's manifest + // every time we access one of these files. + Contents map[FileId]ZipFile `json:",omitempty" bson:",omitempty"` + + // PromulgatedURL holds the promulgated URL of the entity. If the entity + // is not promulgated this should be set to nil. + PromulgatedURL *charm.URL `json:",omitempty" bson:"promulgated-url,omitempty"` + + // PromulgatedRevision holds the revision number from the promulgated URL. + // If the entity is not promulgated this should be set to -1. + PromulgatedRevision int `bson:"promulgated-revision"` + + // Development holds whether the entity is in development or published. + // A development entity can only be referred to using URLs including the + // "development" channel. + Development bool +} + +// PreferredURL returns the preferred way to refer to this entity. If +// the entity has a promulgated URL and usePromulgated is true then the +// promulgated URL will be used, otherwise the standard URL is used. +// +// The returned URL may be modified freely. +func (e *Entity) PreferredURL(usePromulgated bool) *charm.URL { + var u charm.URL + if usePromulgated && e.PromulgatedURL != nil { + u = *e.PromulgatedURL + } else { + u = *e.URL + } + if e.Development { + u.Channel = charm.DevelopmentChannel + } + return &u +} + +// BaseEntity holds metadata for a charm or bundle +// independent of any specific uploaded revision or series. +type BaseEntity struct { + // URL holds the reference URL of of charm on bundle + // regardless of its revision, series or promulgation status + // (this omits the revision and series from URL). + // e.g., cs:~user/collection/foo + URL *charm.URL `bson:"_id"` + + // User holds the user part of the entity URL (for instance, "joe"). + User string + + // Name holds the name of the entity (for instance "wordpress"). + Name string + + // Public specifies whether the charm or bundle + // is available to all users. If this is true, the ACLs will + // be ignored when reading a charm. + Public bool + + // ACLs holds permission information relevant to the base entity. + // The permissions apply to all revisions. + ACLs ACL + + // DevelopmentACLs is similar to ACLs but applies to all development + // revisions. + DevelopmentACLs ACL + + // Promulgated specifies whether the charm or bundle should be + // promulgated. + Promulgated IntBool + + // CommonInfo holds arbitrary common extra metadata associated with + // the base entity. Thhose data apply to all revisions. + // The byte slices hold JSON-encoded data. + CommonInfo map[string][]byte `bson:",omitempty" json:",omitempty"` +} + +// ACL holds lists of users and groups that are +// allowed to perform specific actions. +type ACL struct { + // Read holds users and groups that are allowed to read the charm + // or bundle. + Read []string + // Write holds users and groups that are allowed to upload/modify the charm + // or bundle. + Write []string +} + +type FileId string + +const ( + FileReadMe FileId = "readme" + FileIcon FileId = "icon" +) + +// ZipFile refers to a specific file in the uploaded archive blob. +type ZipFile struct { + // Compressed specifies whether the file is compressed or not. + Compressed bool + + // Offset holds the offset into the zip archive of the start of + // the file's data. + Offset int64 + + // Size holds the size of the file before decompression. + Size int64 +} + +// Valid reports whether f is a valid (non-zero) reference to +// a zip file. +func (f ZipFile) IsValid() bool { + // Note that no valid zip files can start at offset zero, + // because that's where the zip header lives. + return f != ZipFile{} +} + +// Log holds the in-database representation of a log message sent to the charm +// store. +type Log struct { + // Data holds the JSON-encoded log message. + Data []byte + + // Level holds the log level: whether the log is a warning, an error, etc. + Level LogLevel + + // Type holds the log type. + Type LogType + + // URLs holds a slice of entity URLs associated with the log message. + URLs []*charm.URL + + // Time holds the time of the log. + Time time.Time +} + +// LogLevel holds the level associated with a log. +type LogLevel int + +// When introducing a new log level, do the following: +// 1) add the new level as a constant below; +// 2) add the new level in params as a string for HTTP requests/responses; +// 3) include the new level in the mongodocLogLevels and paramsLogLevels maps +// in internal/v4. +const ( + _ LogLevel = iota + InfoLevel + WarningLevel + ErrorLevel +) + +// LogType holds the type of the log. +type LogType int + +// When introducing a new log type, do the following: +// 1) add the new type as a constant below; +// 2) add the new type in params as a string for HTTP requests/responses; +// 3) include the new type in the mongodocLogTypes and paramsLogTypes maps +// in internal/v4. +const ( + _ LogType = iota + IngestionType + LegacyStatisticsType +) + +type MigrationName string + +// Migration holds information about the database migration. +type Migration struct { + // Executed holds the migration names for migrations already executed. + Executed []MigrationName +} + +// IntBool is a bool that will be represented internally in the database as 1 for +// true and -1 for false. +type IntBool bool + +func (b IntBool) GetBSON() (interface{}, error) { + if b { + return 1, nil + } + return -1, nil +} + +func (b *IntBool) SetBSON(raw bson.Raw) error { + var x int + if err := raw.Unmarshal(&x); err != nil { + return errgo.Notef(err, "cannot unmarshal value") + } + switch x { + case 1: + *b = IntBool(true) + case -1: + *b = IntBool(false) + default: + return errgo.Newf("invalid value %d", x) + } + return nil +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/doc_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/doc_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc/doc_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,108 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package mongodoc_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + +import ( + "testing" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} + +type DocSuite struct{} + +var _ = gc.Suite(&DocSuite{}) + +func (s *DocSuite) TestIntBoolGetBSON(c *gc.C) { + test := bson.D{{"true", mongodoc.IntBool(true)}, {"false", mongodoc.IntBool(false)}} + b, err := bson.Marshal(test) + c.Assert(err, gc.IsNil) + result := make(map[string]int, 2) + err = bson.Unmarshal(b, &result) + c.Assert(err, gc.IsNil) + c.Assert(result["true"], gc.Equals, 1) + c.Assert(result["false"], gc.Equals, -1) +} + +func (s *DocSuite) TestIntBoolSetBSON(c *gc.C) { + test := bson.D{{"true", 1}, {"false", -1}} + b, err := bson.Marshal(test) + c.Assert(err, gc.IsNil) + var result map[string]mongodoc.IntBool + err = bson.Unmarshal(b, &result) + c.Assert(err, gc.IsNil) + c.Assert(result, jc.DeepEquals, map[string]mongodoc.IntBool{"true": true, "false": false}) +} + +func (s *DocSuite) TestIntBoolSetBSONIncorrectType(c *gc.C) { + test := bson.D{{"test", "true"}} + b, err := bson.Marshal(test) + c.Assert(err, gc.IsNil) + var result map[string]mongodoc.IntBool + err = bson.Unmarshal(b, &result) + c.Assert(err, gc.ErrorMatches, "cannot unmarshal value: BSON kind 0x02 isn't compatible with type int") +} + +func (s *DocSuite) TestIntBoolSetBSONInvalidValue(c *gc.C) { + test := bson.D{{"test", 2}} + b, err := bson.Marshal(test) + c.Assert(err, gc.IsNil) + var result map[string]mongodoc.IntBool + err = bson.Unmarshal(b, &result) + c.Assert(err, gc.ErrorMatches, `invalid value 2`) +} + +var preferredURLTests = []struct { + entity *mongodoc.Entity + usePromulgated bool + expectURLFalse string + expectURLTrue string +}{{ + entity: &mongodoc.Entity{ + URL: charm.MustParseURL("~ken/trusty/b-1"), + }, + expectURLFalse: "cs:~ken/trusty/b-1", + expectURLTrue: "cs:~ken/trusty/b-1", +}, { + entity: &mongodoc.Entity{ + URL: charm.MustParseURL("~dmr/trusty/c-1"), + PromulgatedURL: charm.MustParseURL("trusty/c-2"), + }, + expectURLFalse: "cs:~dmr/trusty/c-1", + expectURLTrue: "cs:trusty/c-2", +}, { + entity: &mongodoc.Entity{ + URL: charm.MustParseURL("~dmr/trusty/c-1"), + PromulgatedURL: charm.MustParseURL("trusty/c-2"), + Development: true, + }, + expectURLFalse: "cs:~dmr/development/trusty/c-1", + expectURLTrue: "cs:development/trusty/c-2", +}, { + entity: &mongodoc.Entity{ + URL: charm.MustParseURL("~dmr/trusty/c-1"), + Development: true, + }, + expectURLFalse: "cs:~dmr/development/trusty/c-1", + expectURLTrue: "cs:~dmr/development/trusty/c-1", +}} + +func (s *DocSuite) TestPreferredURL(c *gc.C) { + for i, test := range preferredURLTests { + c.Logf("test %d: %#v", i, test.entity) + c.Assert(test.entity.PreferredURL(false).String(), gc.Equals, test.expectURLFalse) + c.Assert(test.entity.PreferredURL(true).String(), gc.Equals, test.expectURLTrue) + // Ensure no aliasing + test.entity.PreferredURL(false).Series = "foo" + c.Assert(test.entity.PreferredURL(false).Series, gc.Not(gc.Equals), "foo") + } +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/router' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/router/fieldinclude.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/router/fieldinclude.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/router/fieldinclude.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,188 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package router // import "gopkg.in/juju/charmstore.v5-unstable/internal/router" + +import ( + "encoding/json" + "net/http" + "net/url" + + "gopkg.in/errgo.v1" + + "gopkg.in/juju/charmstore.v5-unstable/audit" +) + +// A FieldQueryFunc is used to retrieve a metadata document for the given URL, +// selecting only those fields specified in keys of the given selector. +type FieldQueryFunc func(id *ResolvedURL, selector map[string]int, req *http.Request) (interface{}, error) + +// FieldUpdater records field changes made by a FieldUpdateFunc. +type FieldUpdater struct { + fields map[string]interface{} + entries []audit.Entry + search bool +} + +// UpdateField requests that the provided field is updated with +// the given value. +func (u *FieldUpdater) UpdateField(fieldName string, val interface{}, entry *audit.Entry) { + u.fields[fieldName] = val + if entry != nil { + u.entries = append(u.entries, *entry) + } +} + +// UpdateSearch requests that search records are updated. +func (u *FieldUpdater) UpdateSearch() { + u.search = true +} + +// A FieldUpdateFunc is used to update a metadata document for the +// given id. For each field in fields, it should set that field to +// its corresponding value in the metadata document. +type FieldUpdateFunc func(id *ResolvedURL, fields map[string]interface{}, entries []audit.Entry) error + +// A FieldUpdateSearchFunc is used to update a search document for the +// given id. For each field in fields, it should set that field to +// its corresponding value in the search document. +type FieldUpdateSearchFunc func(id *ResolvedURL, fields map[string]interface{}) error + +// A FieldGetFunc returns some data from the given document. The +// document will have been returned from an earlier call to the +// associated QueryFunc. +type FieldGetFunc func(doc interface{}, id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) + +// FieldPutFunc sets using the given FieldUpdater corresponding to fields to be set +// in the metadata document for the given id. The path holds the metadata path +// after the initial prefix has been removed. +type FieldPutFunc func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error + +// FieldIncludeHandlerParams specifies the parameters for NewFieldIncludeHandler. +type FieldIncludeHandlerParams struct { + // Key is used to group together similar FieldIncludeHandlers + // (the same query should be generated for any given key). + Key interface{} + + // Query is used to retrieve the document from the database for + // GET requests. The fields passed to the query will be the + // union of all fields found in all the handlers in the bulk + // request. + Query FieldQueryFunc + + // Fields specifies which fields are required by the given handler. + Fields []string + + // Handle actually returns the data from the document retrieved + // by Query, for GET requests. + HandleGet FieldGetFunc + + // HandlePut generates update operations for a PUT + // operation. + HandlePut FieldPutFunc + + // Update is used to update the document in the database for + // PUT requests. + Update FieldUpdateFunc + + // UpdateSearch is used to update the document in the search + // database for PUT requests. + UpdateSearch FieldUpdateSearchFunc +} + +type fieldIncludeHandler struct { + p FieldIncludeHandlerParams +} + +// FieldIncludeHandler returns a BulkIncludeHandler that will perform +// only a single database query for several requests. See FieldIncludeHandlerParams +// for more detail. +// +// See in ../v4/api.go for an example of its use. +func FieldIncludeHandler(p FieldIncludeHandlerParams) BulkIncludeHandler { + return &fieldIncludeHandler{p} +} + +func (h *fieldIncludeHandler) Key() interface{} { + return h.p.Key +} + +func (h *fieldIncludeHandler) HandlePut(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, values []*json.RawMessage, req *http.Request) []error { + updater := &FieldUpdater{ + fields: make(map[string]interface{}), + entries: make([]audit.Entry, 0), + } + var errs []error + errCount := 0 + setError := func(i int, err error) { + if errs == nil { + errs = make([]error, len(hs)) + } + if errs[i] == nil { + errs[i] = err + errCount++ + } + } + for i, h := range hs { + h := h.(*fieldIncludeHandler) + if h.p.HandlePut == nil { + setError(i, errgo.New("PUT not supported")) + continue + } + if err := h.p.HandlePut(id, paths[i], values[i], updater, req); err != nil { + setError(i, errgo.Mask(err, errgo.Any)) + } + } + if errCount == len(hs) { + // Every HandlePut request has drawn an error, + // no need to call Update. + return errs + } + if err := h.p.Update(id, updater.fields, updater.entries); err != nil { + for i := range hs { + setError(i, err) + } + } + if updater.search { + if err := h.p.UpdateSearch(id, updater.fields); err != nil { + for i := range hs { + setError(i, err) + } + } + } + return errs +} + +func (h *fieldIncludeHandler) HandleGet(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, flags url.Values, req *http.Request) ([]interface{}, error) { + funcs := make([]FieldGetFunc, len(hs)) + selector := make(map[string]int) + // Extract the handler functions and union all the fields. + for i, h := range hs { + h := h.(*fieldIncludeHandler) + funcs[i] = h.p.HandleGet + for _, field := range h.p.Fields { + selector[field] = 1 + } + } + // Make the single query. + doc, err := h.p.Query(id, selector, req) + if err != nil { + // Note: preserve error cause from handlers. + return nil, errgo.Mask(err, errgo.Any) + } + + // Call all the handlers with the resulting query document. + results := make([]interface{}, len(hs)) + for i, f := range funcs { + var err error + results[i], err = f(doc, id, paths[i], flags, req) + if err != nil { + // TODO correlate error with handler (perhaps return + // an error that identifies the slice position of the handler that + // failed). + // Note: preserve error cause from handlers. + return nil, errgo.Mask(err, errgo.Any) + } + } + return results, nil +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/router/package_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/router/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/router/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package router_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/router" + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/router/router.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/router/router.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/router/router.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,858 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// The router package implements an HTTP request router for charm store +// HTTP requests. +package router // import "gopkg.in/juju/charmstore.v5-unstable/internal/router" + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "reflect" + "sort" + "strings" + "sync" + + "github.com/juju/httprequest" + "github.com/juju/utils/parallel" + "gopkg.in/errgo.v1" + charm "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + "gopkg.in/juju/charmstore.v5-unstable/internal/series" +) + +// Implementation note on error handling: +// +// We use errgo.Any only when necessary, so that we can see at a glance +// which are the possible places that could be returning an error with a +// Cause (the only kind of error that can end up setting an HTTP status +// code) + +// BulkIncludeHandler represents a metadata handler that can +// handle multiple metadata "include" requests in a single batch. +// +// For simple metadata handlers that cannot be +// efficiently combined, see SingleIncludeHandler. +// +// All handlers may assume that http.Request.ParseForm +// has been called to parse the URL form values. +type BulkIncludeHandler interface { + // Key returns a value that will be used to group handlers + // together in preparation for a call to HandleGet or HandlePut. + // The key should be comparable for equality. + // Please do not return NaN. That would be silly, OK? + Key() interface{} + + // HandleGet returns the results of invoking all the given handlers + // on the given charm or bundle id. Each result is held in + // the respective element of the returned slice. + // + // All of the handlers' Keys will be equal to the receiving handler's + // Key. + // + // Each item in paths holds the remaining metadata path + // for the handler in the corresponding position + // in hs after the prefix in Handlers.Meta has been stripped, + // and flags holds all the URL query values. + // + // TODO(rog) document indexed errors. + HandleGet(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, flags url.Values, req *http.Request) ([]interface{}, error) + + // HandlePut invokes a PUT request on all the given handlers on + // the given charm or bundle id. If there is an error, the + // returned errors slice should contain one element for each element + // in paths. The error for handler hs[i] should be returned in errors[i]. + // If there is no error, an empty slice should be returned. + // + // Each item in paths holds the remaining metadata path + // for the handler in the corresponding position + // in hs after the prefix in Handlers.Meta has been stripped, + // and flags holds all the url query values. + HandlePut(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, values []*json.RawMessage, req *http.Request) []error +} + +// IdHandler handles a charm store request rooted at the given id. +// The request path (req.URL.Path) holds the URL path after +// the id has been stripped off. +type IdHandler func(charmId *charm.URL, w http.ResponseWriter, req *http.Request) error + +// Handlers specifies how HTTP requests will be routed +// by the router. All errors returned by the handlers will +// be processed by WriteError with their Cause left intact. +// This means that, for example, if they return an error +// with a Cause that is params.ErrNotFound, the HTTP +// status code will reflect that (assuming the error has +// not been absorbed by the bulk metadata logic). +type Handlers struct { + // Global holds handlers for paths not matched by Meta or Id. + // The map key is the path; the value is the handler that will + // be used to handle that path. + // + // Path matching is by matched by longest-prefix - the same as + // http.ServeMux. + // + // Note that, unlike http.ServeMux, the prefix is stripped + // from the URL path before the hander is invoked, + // matching the behaviour of the other handlers. + Global map[string]http.Handler + + // Id holds handlers for paths which correspond to a single + // charm or bundle id other than the meta path. The map key + // holds the first element of the path, which may end in a + // trailing slash (/) to indicate that longer paths are allowed + // too. + Id map[string]IdHandler + + // Meta holds metadata handlers for paths under the meta + // endpoint. The map key holds the first element of the path, + // which may end in a trailing slash (/) to indicate that longer + // paths are allowed too. + Meta map[string]BulkIncludeHandler +} + +// Router represents a charm store HTTP request router. +type Router struct { + // Context holds context that the router was created with. + Context Context + + handlers *Handlers + handler http.Handler +} + +// ResolvedURL represents a URL that has been resolved by resolveURL. +type ResolvedURL struct { + // URL holds the canonical URL for the entity, as used as a key into + // the Entities collection. URL.User should always be non-empty + // and URL.Revision should never be -1. URL.Series will only be non-empty + // if the URL refers to a multi-series charm. + URL charm.URL + + // PreferredSeries holds the series to return in PreferredURL + // if URL itself contains no series. + PreferredSeries string + + // PromulgatedRevision holds the revision of the promulgated version of the + // charm or -1 if the corresponding entity is not promulgated. + PromulgatedRevision int + + // Development holds whether the original entity URL included the + // "development" channel. + Development bool +} + +// MustNewResolvedURL returns a new ResolvedURL by parsing +// the entity URL in urlStr. The promulgatedRev parameter +// specifies the value of PromulgatedRevision in the returned +// value. +// +// This function panics if urlStr cannot be parsed as a charm.URL +// or if it is not fully specified, including user and revision. +func MustNewResolvedURL(urlStr string, promulgatedRev int) *ResolvedURL { + url := charm.MustParseURL(urlStr) + if url.User == "" || url.Revision == -1 { + panic(fmt.Errorf("incomplete url %v", urlStr)) + } + return &ResolvedURL{ + URL: *url.WithChannel(""), + PromulgatedRevision: promulgatedRev, + Development: url.Channel == charm.DevelopmentChannel, + } +} + +// UserOwnedURL returns the non-promulgated URL for the given id. +// The returned *charm.URL may be modified freely. +func (id *ResolvedURL) UserOwnedURL() *charm.URL { + u := id.URL + if id.Development { + u.Channel = charm.DevelopmentChannel + } + return &u +} + +// PreferredURL returns the promulgated URL for the given id if there is +// one, otherwise it returns the non-promulgated URL. The returned +// *charm.URL may be modified freely. +// +// If id.PreferredSeries is non-empty, the returns charm URL +// will always have a non-empty series. +func (id *ResolvedURL) PreferredURL() *charm.URL { + u := id.UserOwnedURL() + if u.Series == "" && id.PreferredSeries != "" { + u.Series = id.PreferredSeries + } + if id.PromulgatedRevision == -1 { + return u + } + u.User = "" + u.Revision = id.PromulgatedRevision + return u +} + +// PromulgatedURL returns the promulgated URL for id if there +// is one, or nil otherwise. +func (id *ResolvedURL) PromulgatedURL() *charm.URL { + if id.PromulgatedRevision == -1 { + return nil + } + u := id.UserOwnedURL() + u.User = "" + u.Revision = id.PromulgatedRevision + return u +} + +func (id *ResolvedURL) GoString() string { + // Make the URL member visible as a string + // rather than as a set of members. + var gid = struct { + URL string + PreferredSeries string + PromulgatedRevision int + Development bool + }{ + URL: id.URL.String(), + PreferredSeries: id.PreferredSeries, + PromulgatedRevision: id.PromulgatedRevision, + Development: id.Development, + } + return fmt.Sprintf("%#v", gid) +} + +// String returns the preferred string representation of u. +// It prefers to use the promulgated URL when there is one. +func (u *ResolvedURL) String() string { + return u.PreferredURL().String() +} + +// Context provides contextual information for a router. +type Context interface { + // ResolveURL will be called to resolve ids in + // router paths - it should return the fully + // resolved URL corresponding to the given id. + // If the entity referred to by the URL does not + // exist, it should return an error with a params.ErrNotFound + // cause. + ResolveURL(id *charm.URL) (*ResolvedURL, error) + + // The AuthorizeEntity function will be called to authorize requests + // to any BulkIncludeHandlers. All other handlers are expected + // to handle their own authorization. + AuthorizeEntity(id *ResolvedURL, req *http.Request) error +} + +// New returns a charm store router that will route requests to +// the given handlers and retrieve metadata from the given database. +// +// The Context argument provides additional context to the +// router. Any errors returned by the context methods will +// have their cause preserved when creating the error return +// as for the handlers. +func New( + handlers *Handlers, + ctxt Context, +) *Router { + r := &Router{ + handlers: handlers, + Context: ctxt, + } + mux := NewServeMux() + mux.Handle("/meta/", http.StripPrefix("/meta", HandleErrors(r.serveBulkMeta))) + for path, handler := range r.handlers.Global { + path = "/" + path + prefix := strings.TrimSuffix(path, "/") + mux.Handle(path, http.StripPrefix(prefix, handler)) + } + mux.Handle("/", HandleErrors(r.serveIds)) + r.handler = mux + return r +} + +// ServeHTTP implements http.Handler.ServeHTTP. +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // Allow cross-domain access from anywhere, including AJAX + // requests. An AJAX request will add an X-Requested-With: + // XMLHttpRequest header, which is a non-standard header, and + // hence will require a pre-flight request, so we need to + // specify that that header is allowed, and we also need to + // implement the OPTIONS method so that the pre-flight request + // can work. + // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS + header := w.Header() + header.Set("Access-Control-Allow-Origin", "*") + header.Set("Access-Control-Allow-Headers", "Bakery-Protocol-Version, Macaroons, X-Requested-With") + header.Set("Access-Control-Allow-Credentials", "true") + header.Set("Access-Control-Cache-Max-Age", "600") + header.Set("Access-Control-Allow-Methods", "DELETE,GET,HEAD,PUT,POST,OPTIONS") + header.Set("Access-Control-Expose-Headers", "WWW-Authenticate") + + if req.Method == "OPTIONS" { + // We cheat here and say that all methods are allowed, + // even though any individual endpoint will allow + // only a subset of these. This means we can avoid + // putting OPTIONS handling in every endpoint, + // and it shouldn't actually matter in practice. + header.Set("Allow", "DELETE,GET,HEAD,PUT,POST") + header.Set("Access-Control-Allow-Origin", req.Header.Get("Origin")) + return + } + if err := req.ParseForm(); err != nil { + WriteError(w, errgo.Notef(err, "cannot parse form")) + return + } + r.handler.ServeHTTP(w, req) +} + +// Handlers returns the set of handlers that the router was created with. +// This should not be changed. +func (r *Router) Handlers() *Handlers { + return r.handlers +} + +// serveIds serves requests that may be rooted at a charm or bundle id. +func (r *Router) serveIds(w http.ResponseWriter, req *http.Request) error { + // We can ignore a trailing / because we do not return any + // relative URLs. If we start to return relative URL redirects, + // we will need to redirect non-slash-terminated URLs + // to slash-terminated URLs. + // http://cdivilly.wordpress.com/2014/03/11/why-trailing-slashes-on-uris-are-important/ + path := strings.TrimSuffix(req.URL.Path, "/") + url, path, err := splitId(path) + if err != nil { + return errgo.WithCausef(err, params.ErrNotFound, "") + } + key, path := handlerKey(path) + if key == "" { + return errgo.WithCausef(nil, params.ErrNotFound, "") + } + handler := r.handlers.Id[key] + if handler != nil { + req.URL.Path = path + err := handler(url, w, req) + // Note: preserve error cause from handlers. + return errgo.Mask(err, errgo.Any) + } + if key != "meta/" && key != "meta" { + return errgo.WithCausef(nil, params.ErrNotFound, params.ErrNotFound.Error()) + } + // Always resolve the entity id for meta requests. + rurl, err := r.Context.ResolveURL(url) + if err != nil { + // Note: preserve error cause from resolveURL. + return errgo.Mask(err, errgo.Any) + } + req.URL.Path = path + return r.serveMeta(rurl, w, req) +} + +func idHandlerNeedsResolveURL(req *http.Request) bool { + return req.Method != "POST" && req.Method != "PUT" +} + +// handlerKey returns a key that can be used to look up a handler at the +// given path, and the remaining path elements. If there is no possible +// key, the returned key is empty. +func handlerKey(path string) (key, rest string) { + path = strings.TrimPrefix(path, "/") + key, i := splitPath(path, 0) + if key == "" { + // TODO what *should* we get if we GET just an id? + return "", rest + } + if i < len(path)-1 { + // There are more elements, so include the / character + // that terminates the element. + return path[0 : i+1], path[i:] + } + return key, "" +} + +func (r *Router) serveMeta(id *ResolvedURL, w http.ResponseWriter, req *http.Request) error { + switch req.Method { + case "GET", "HEAD": + resp, err := r.serveMetaGet(id, req) + if err != nil { + // Note: preserve error causes from meta handlers. + return errgo.Mask(err, errgo.Any) + } + httprequest.WriteJSON(w, http.StatusOK, resp) + return nil + case "PUT": + // Put requests don't return any data unless there's + // an error. + return r.serveMetaPut(id, req) + } + return params.ErrMethodNotAllowed +} + +func (r *Router) serveMetaGet(id *ResolvedURL, req *http.Request) (interface{}, error) { + // TODO: consider whether we might want the capability to + // have different permissions for different meta endpoints. + if err := r.Context.AuthorizeEntity(id, req); err != nil { + return nil, errgo.Mask(err, errgo.Any) + } + key, path := handlerKey(req.URL.Path) + if key == "" { + // GET id/meta + // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmeta + return r.metaNames(), nil + } + if key == "any" { + // GET id/meta/any?[include=meta[&include=meta...]] + // https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaany + includes := req.Form["include"] + // If there are no includes, we have no handlers to generate + // a "not found" error when the id doesn't exist, so we need + // to check explicitly. + if len(includes) == 0 { + return params.MetaAnyResponse{Id: id.PreferredURL()}, nil + } + meta, err := r.GetMetadata(id, includes, req) + if err != nil { + // Note: preserve error cause from handlers. + return nil, errgo.Mask(err, errgo.Any) + } + return params.MetaAnyResponse{ + Id: id.PreferredURL(), + Meta: meta, + }, nil + } + if handler := r.handlers.Meta[key]; handler != nil { + results, err := handler.HandleGet([]BulkIncludeHandler{handler}, id, []string{path}, req.Form, req) + if err != nil { + // Note: preserve error cause from handlers. + return nil, errgo.Mask(err, errgo.Any) + } + result := results[0] + if isNull(result) { + return nil, params.ErrMetadataNotFound + } + return results[0], nil + } + return nil, errgo.WithCausef(nil, params.ErrNotFound, "unknown metadata %q", strings.TrimPrefix(req.URL.Path, "/")) +} + +const jsonContentType = "application/json" + +func unmarshalJSONBody(req *http.Request, val interface{}) error { + if ct := req.Header.Get("Content-Type"); ct != jsonContentType { + return errgo.WithCausef(nil, params.ErrBadRequest, "unexpected Content-Type %q; expected %q", ct, jsonContentType) + } + dec := json.NewDecoder(req.Body) + if err := dec.Decode(val); err != nil { + return errgo.Notef(err, "cannot unmarshal body") + } + return nil +} + +// serveMetaPut serves a PUT request to the metadata for the given id. +// The metadata to be put is in the request body. +// PUT /$id/meta/... +func (r *Router) serveMetaPut(id *ResolvedURL, req *http.Request) error { + if err := r.Context.AuthorizeEntity(id, req); err != nil { + return errgo.Mask(err, errgo.Any) + } + var body json.RawMessage + if err := unmarshalJSONBody(req, &body); err != nil { + return errgo.Mask(err, errgo.Is(params.ErrBadRequest)) + } + return r.serveMetaPutBody(id, req, &body) +} + +// serveMetaPutBody serves a PUT request to the metadata for the given id. +// The metadata to be put is in body. +// This method is used both for individual metadata PUTs and +// also bulk metadata PUTs. +func (r *Router) serveMetaPutBody(id *ResolvedURL, req *http.Request, body *json.RawMessage) error { + key, path := handlerKey(req.URL.Path) + if key == "" { + return params.ErrForbidden + } + if key == "any" { + // PUT id/meta/any + var bodyMeta struct { + Meta map[string]*json.RawMessage + } + if err := json.Unmarshal(*body, &bodyMeta); err != nil { + return errgo.Notef(err, "cannot unmarshal body") + } + if err := r.PutMetadata(id, bodyMeta.Meta, req); err != nil { + return errgo.Mask(err, errgo.Any) + } + return nil + } + if handler := r.handlers.Meta[key]; handler != nil { + errs := handler.HandlePut( + []BulkIncludeHandler{handler}, + id, + []string{path}, + []*json.RawMessage{body}, + req, + ) + if len(errs) > 0 && errs[0] != nil { + // Note: preserve error cause from handlers. + return errgo.Mask(errs[0], errgo.Any) + } + return nil + } + return errgo.WithCausef(nil, params.ErrNotFound, "") +} + +// isNull reports whether the given value will encode to +// a null JSON value. +func isNull(val interface{}) bool { + if val == nil { + return true + } + v := reflect.ValueOf(val) + if kind := v.Kind(); kind != reflect.Map && kind != reflect.Ptr && kind != reflect.Slice { + return false + } + return v.IsNil() +} + +// metaNames returns a slice of all the metadata endpoint names. +func (r *Router) metaNames() []string { + names := make([]string, 0, len(r.handlers.Meta)) + for name := range r.handlers.Meta { + // Ensure that we don't generate duplicate entries + // when there's an entry for both "x" and "x/". + trimmed := strings.TrimSuffix(name, "/") + if trimmed != name && r.handlers.Meta[trimmed] != nil { + continue + } + names = append(names, trimmed) + } + sort.Strings(names) + return names +} + +// serveBulkMeta serves bulk metadata requests (requests to /meta/...). +func (r *Router) serveBulkMeta(w http.ResponseWriter, req *http.Request) error { + switch req.Method { + case "GET", "HEAD": + // A bare meta returns all endpoints. + // See https://github.com/juju/charmstore/blob/v4/docs/API.md#bulk-requests-and-missing-metadata + if req.URL.Path == "/" || req.URL.Path == "" { + httprequest.WriteJSON(w, http.StatusOK, r.metaNames()) + return nil + } + resp, err := r.serveBulkMetaGet(req) + if err != nil { + return errgo.Mask(err, errgo.Any) + } + httprequest.WriteJSON(w, http.StatusOK, resp) + return nil + case "PUT": + return r.serveBulkMetaPut(req) + default: + return params.ErrMethodNotAllowed + } +} + +// serveBulkMetaGet serves the "bulk" metadata retrieval endpoint +// that can return information on several ids at once. +// +// GET meta/$endpoint?id=$id0[&id=$id1...][$otherflags] +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-metaendpoint +func (r *Router) serveBulkMetaGet(req *http.Request) (interface{}, error) { + ids := req.Form["id"] + if len(ids) == 0 { + return nil, errgo.WithCausef(nil, params.ErrBadRequest, "no ids specified in meta request") + } + delete(req.Form, "id") + ignoreAuth, err := ParseBool(req.Form.Get("ignore-auth")) + if err != nil { + return nil, errgo.WithCausef(err, params.ErrBadRequest, "") + } + delete(req.Form, "ignore-auth") + result := make(map[string]interface{}) + for _, id := range ids { + url, err := charm.ParseURL(id) + if err != nil { + return nil, errgo.WithCausef(err, params.ErrBadRequest, "") + } + rurl, err := r.Context.ResolveURL(url) + if err != nil { + if errgo.Cause(err) == params.ErrNotFound { + // URLs not found will be omitted from the result. + // https://github.com/juju/charmstore/blob/v4/docs/API.md#bulk-requests-and-missing-metadata + continue + } + // Note: preserve error cause from resolveURL. + return nil, errgo.Mask(err, errgo.Any) + } + meta, err := r.serveMetaGet(rurl, req) + if cause := errgo.Cause(err); cause == params.ErrNotFound || cause == params.ErrMetadataNotFound || (ignoreAuth && isAuthorizationError(cause)) { + // The relevant data does not exist, or it is not public and client + // asked not to authorize. + // https://github.com/juju/charmstore/blob/v4/docs/API.md#bulk-requests-and-missing-metadata + continue + } + if err != nil { + return nil, errgo.Mask(err) + } + result[id] = meta + } + return result, nil +} + +// ParseBool returns the boolean value represented by the string. +// It accepts "1" or "0". Any other value returns an error. +func ParseBool(value string) (bool, error) { + switch value { + case "0", "": + return false, nil + case "1": + return true, nil + } + return false, errgo.Newf(`unexpected bool value %q (must be "0" or "1")`, value) +} + +// isAuthorizationError reports whether the given error cause is an +// authorization error. +func isAuthorizationError(cause error) bool { + if cause == params.ErrUnauthorized { + return true + } + _, ok := cause.(*httpbakery.Error) + return ok +} + +// serveBulkMetaPut serves a bulk PUT request to several ids. +// PUT /meta/$endpoint +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-metaendpoint +func (r *Router) serveBulkMetaPut(req *http.Request) error { + if len(req.Form["id"]) > 0 { + return fmt.Errorf("ids may not be specified in meta PUT request") + } + var ids map[string]*json.RawMessage + if err := unmarshalJSONBody(req, &ids); err != nil { + return errgo.Mask(err, errgo.Is(params.ErrBadRequest)) + } + var multiErr multiError + for id, val := range ids { + if err := r.serveBulkMetaPutOne(req, id, val); err != nil { + if multiErr == nil { + multiErr = make(multiError) + } + multiErr[id] = errgo.Mask(err, errgo.Any) + } + } + if len(multiErr) != 0 { + return multiErr + } + return nil +} + +// serveBulkMetaPutOne serves a PUT to a single id as part of a bulk PUT +// request. It's in a separate function to make the error handling easier. +func (r *Router) serveBulkMetaPutOne(req *http.Request, id string, val *json.RawMessage) error { + url, err := charm.ParseURL(id) + if err != nil { + return errgo.Mask(err) + } + rurl, err := r.Context.ResolveURL(url) + if err != nil { + // Note: preserve error cause from resolveURL. + return errgo.Mask(err, errgo.Any) + } + if err := r.Context.AuthorizeEntity(rurl, req); err != nil { + return errgo.Mask(err, errgo.Any) + } + if err := r.serveMetaPutBody(rurl, req, val); err != nil { + return errgo.Mask(err, errgo.Any) + } + return nil +} + +// maxMetadataConcurrency specifies the maximum number +// of goroutines started to service a given GetMetadata request. +// 5 is enough to more that cover the number of metadata +// group handlers in the current API. +const maxMetadataConcurrency = 5 + +// GetMetadata retrieves metadata for the given charm or bundle id, +// including information as specified by the includes slice. +func (r *Router) GetMetadata(id *ResolvedURL, includes []string, req *http.Request) (map[string]interface{}, error) { + groups := make(map[interface{}][]BulkIncludeHandler) + includesByGroup := make(map[interface{}][]string) + for _, include := range includes { + // Get the key that lets us choose the include handler. + includeKey, _ := handlerKey(include) + handler := r.handlers.Meta[includeKey] + if handler == nil { + return nil, errgo.Newf("unrecognized metadata name %q", include) + } + + // Get the key that lets us group this handler into the + // correct bulk group. + key := handler.Key() + groups[key] = append(groups[key], handler) + includesByGroup[key] = append(includesByGroup[key], include) + } + results := make(map[string]interface{}) + // TODO when the number of groups is 1 (a common case, + // using parallel.NewRun is actually slowing things down + // by creating a goroutine). We could optimise it so that + // it doesn't actually create a goroutine in that case. + run := parallel.NewRun(maxMetadataConcurrency) + var mu sync.Mutex + for _, g := range groups { + g := g + run.Do(func() error { + // We know that we must have at least one element in the + // slice here. We could use any member of the slice to + // actually handle the request, so arbitrarily choose + // g[0]. Note that g[0].Key() is equal to g[i].Key() for + // every i in the slice. + groupIncludes := includesByGroup[g[0].Key()] + + // Paths contains all the path elements after + // the handler key has been stripped off. + // TODO(rog) BUG shouldn't this be len(groupIncludes) ? + paths := make([]string, len(g)) + for i, include := range groupIncludes { + _, paths[i] = handlerKey(include) + } + groupResults, err := g[0].HandleGet(g, id, paths, nil, req) + if err != nil { + // TODO(rog) if it's a BulkError, attach + // the original include path to error (the BulkError + // should contain the index of the failed one). + return errgo.Mask(err, errgo.Any) + } + mu.Lock() + for i, result := range groupResults { + // Omit nil results from map. Note: omit statically typed + // nil results too to make it easy for handlers to return + // possibly nil data with a static type. + // https://github.com/juju/charmstore/blob/v4/docs/API.md#bulk-requests-and-missing-metadata + if !isNull(result) { + results[groupIncludes[i]] = result + } + } + mu.Unlock() + return nil + }) + } + if err := run.Wait(); err != nil { + // We could have got multiple errors, but we'll only return one of them. + return nil, errgo.Mask(err.(parallel.Errors)[0], errgo.Any) + } + return results, nil +} + +// PutMetadata puts metadata for the given id. Each key in data holds +// the name of a metadata endpoint; its associated value +// holds the value to be written. +func (r *Router) PutMetadata(id *ResolvedURL, data map[string]*json.RawMessage, req *http.Request) error { + groups := make(map[interface{}][]BulkIncludeHandler) + valuesByGroup := make(map[interface{}][]*json.RawMessage) + pathsByGroup := make(map[interface{}][]string) + for path, body := range data { + // Get the key that lets us choose the meta handler. + metaKey, _ := handlerKey(path) + handler := r.handlers.Meta[metaKey] + if handler == nil { + return errgo.Newf("unrecognized metadata name %q", path) + } + + // Get the key that lets us group this handler into the + // correct bulk group. + key := handler.Key() + groups[key] = append(groups[key], handler) + valuesByGroup[key] = append(valuesByGroup[key], body) + + // Paths contains all the path elements after + // the handler key has been stripped off. + pathsByGroup[key] = append(pathsByGroup[key], path) + } + var multiErr multiError + for _, g := range groups { + // We know that we must have at least one element in the + // slice here. We could use any member of the slice to + // actually handle the request, so arbitrarily choose + // g[0]. Note that g[0].Key() is equal to g[i].Key() for + // every i in the slice. + key := g[0].Key() + + paths := pathsByGroup[key] + // The paths passed to the handler contain all the path elements + // after the handler key has been stripped off. + strippedPaths := make([]string, len(paths)) + for i, path := range paths { + _, strippedPaths[i] = handlerKey(path) + } + + errs := g[0].HandlePut(g, id, strippedPaths, valuesByGroup[key], req) + if len(errs) > 0 { + if multiErr == nil { + multiErr = make(multiError) + } + if len(errs) != len(paths) { + return fmt.Errorf("unexpected error count; expected %d, got %q", len(paths), errs) + } + for i, err := range errs { + if err != nil { + multiErr[paths[i]] = err + } + } + } + } + if len(multiErr) != 0 { + return multiErr + } + return nil +} + +// splitPath returns the first path element +// after path[i:] and the start of the next +// element. +// +// For example, splitPath("/foo/bar/bzr", 4) returns ("bar", 8). +func splitPath(path string, i int) (elem string, nextIndex int) { + if i < len(path) && path[i] == '/' { + i++ + } + j := strings.Index(path[i:], "/") + if j == -1 { + return path[i:], len(path) + } + j += i + return path[i:j], j +} + +// splitId splits the given URL path into a charm or bundle +// URL and the rest of the path. +func splitId(path string) (url *charm.URL, rest string, err error) { + path = strings.TrimPrefix(path, "/") + part, i := splitPath(path, 0) + + // Skip ~. + if strings.HasPrefix(part, "~") { + part, i = splitPath(path, i) + } + + // Skip channel. + if charm.Channel(part) == charm.DevelopmentChannel { + part, i = splitPath(path, i) + } + + // Skip series. + if _, ok := series.Series[part]; ok { + part, i = splitPath(path, i) + } + + // part should now contain the charm name, + // and path[0:i] should contain the entire + // charm id. + urlStr := strings.TrimSuffix(path[0:i], "/") + url, err = charm.ParseURL(urlStr) + if err != nil { + return nil, "", errgo.Mask(err) + } + return url, path[i:], nil +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/router/router_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/router/router_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/router/router_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2651 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package router // import "gopkg.in/juju/charmstore.v5-unstable/internal/router" + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "sort" + "strings" + "sync" + "sync/atomic" + + "github.com/juju/httprequest" + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/macaroon-bakery.v1/httpbakery" + + "gopkg.in/juju/charmstore.v5-unstable/audit" +) + +type RouterSuite struct { + jujutesting.IsolationSuite +} + +var _ = gc.Suite(&RouterSuite{}) + +var newResolvedURL = MustNewResolvedURL + +var routerGetTests = []struct { + about string + handlers Handlers + urlStr string + expectStatus int + expectBody interface{} + expectQueryCount int32 + resolveURL func(*charm.URL) (*ResolvedURL, error) + authorize func(*ResolvedURL, *http.Request) error + exists func(*ResolvedURL, *http.Request) (bool, error) +}{{ + about: "global handler", + handlers: Handlers{ + Global: map[string]http.Handler{ + "foo": HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { + return ReqInfo{ + Method: req.Method, + Path: req.URL.Path, + Form: req.Form, + }, nil + }), + }, + }, + urlStr: "/foo", + expectStatus: http.StatusOK, + expectBody: ReqInfo{ + Method: "GET", + Path: "", + }, +}, { + about: "global handler with sub-path and flags", + handlers: Handlers{ + Global: map[string]http.Handler{ + "foo/bar/": HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { + return ReqInfo{ + Method: req.Method, + Path: req.URL.Path, + Form: req.Form, + }, nil + }), + }, + }, + urlStr: "/foo/bar/a/b?a=1&b=two", + expectStatus: http.StatusOK, + expectBody: ReqInfo{ + Path: "/a/b", + Method: "GET", + Form: url.Values{ + "a": {"1"}, + "b": {"two"}, + }, + }, +}, { + about: "invalid form", + urlStr: "/foo?a=%", + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: `cannot parse form: invalid URL escape "%"`, + }, +}, { + about: "id handler", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": testIdHandler, + }, + }, + urlStr: "/precise/wordpress-34/foo", + expectStatus: http.StatusOK, + expectBody: idHandlerTestResp{ + Method: "GET", + CharmURL: "cs:precise/wordpress-34", + }, +}, { + about: "development id handler", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": testIdHandler, + }, + }, + urlStr: "/development/trusty/wordpress-34/foo", + expectStatus: http.StatusOK, + expectBody: idHandlerTestResp{ + Method: "GET", + CharmURL: "cs:development/trusty/wordpress-34", + }, +}, { + about: "id handler with invalid channel", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": testIdHandler, + }, + }, + urlStr: "/bad-wolf/trusty/wordpress-34/foo", + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: "not found", + }, +}, { + about: "windows id handler", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": testIdHandler, + }, + }, + urlStr: "/win81/visualstudio-2012/foo", + expectStatus: http.StatusOK, + expectBody: idHandlerTestResp{ + Method: "GET", + CharmURL: "cs:win81/visualstudio-2012", + }, +}, { + about: "windows development id handler", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": testIdHandler, + }, + }, + urlStr: "/development/win81/visualstudio-2012/foo", + expectStatus: http.StatusOK, + expectBody: idHandlerTestResp{ + Method: "GET", + CharmURL: "cs:development/win81/visualstudio-2012", + }, +}, { + about: "wily id handler", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": testIdHandler, + }, + }, + urlStr: "/wily/wordpress-34/foo", + expectStatus: http.StatusOK, + expectBody: idHandlerTestResp{ + Method: "GET", + CharmURL: "cs:wily/wordpress-34", + }, +}, { + about: "id handler with no series in id", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": testIdHandler, + }, + }, + urlStr: "/wordpress-34/foo", + expectStatus: http.StatusOK, + expectBody: idHandlerTestResp{ + Method: "GET", + CharmURL: "cs:wordpress-34", + }, +}, { + about: "id handler with no revision in id", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": testIdHandler, + }, + }, + urlStr: "/precise/wordpress/foo", + expectStatus: http.StatusOK, + expectBody: idHandlerTestResp{ + Method: "GET", + CharmURL: "cs:precise/wordpress", + }, +}, { + about: "id handler with channel and name only", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": testIdHandler, + }, + }, + urlStr: "/development/wordpress/foo", + expectStatus: http.StatusOK, + expectBody: idHandlerTestResp{ + Method: "GET", + CharmURL: "cs:development/wordpress", + }, +}, { + about: "id handler with extra path", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo/": testIdHandler, + }, + }, + urlStr: "/precise/wordpress-34/foo/blah/arble", + expectStatus: http.StatusOK, + expectBody: idHandlerTestResp{ + Method: "GET", + CharmURL: "cs:precise/wordpress-34", + Path: "/blah/arble", + }, +}, { + about: "id handler with allowed extra path but none given", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo/": testIdHandler, + }, + }, + urlStr: "/precise/wordpress-34/foo", + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: "not found", + }, +}, { + about: "id handler with unwanted extra path", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": testIdHandler, + }, + }, + urlStr: "/precise/wordpress-34/foo/blah", + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: "not found", + }, +}, { + about: "id handler with user", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": testIdHandler, + }, + }, + urlStr: "/~joe/precise/wordpress-34/foo", + expectStatus: http.StatusOK, + expectBody: idHandlerTestResp{ + Method: "GET", + CharmURL: "cs:~joe/precise/wordpress-34", + }, +}, { + about: "wily handler with user", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": testIdHandler, + }, + }, + urlStr: "/~joe/wily/wordpress-34/foo", + expectStatus: http.StatusOK, + expectBody: idHandlerTestResp{ + Method: "GET", + CharmURL: "cs:~joe/wily/wordpress-34", + }, +}, { + about: "id handler with user and extra path", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo/": testIdHandler, + }, + }, + urlStr: "/~joe/precise/wordpress-34/foo/blah/arble", + expectStatus: http.StatusOK, + expectBody: idHandlerTestResp{ + Method: "GET", + CharmURL: "cs:~joe/precise/wordpress-34", + Path: "/blah/arble", + }, +}, { + about: "development id handler with user and extra path", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo/": testIdHandler, + }, + }, + urlStr: "/~joe/development/precise/wordpress-34/foo/blah/arble", + expectStatus: http.StatusOK, + expectBody: idHandlerTestResp{ + Method: "GET", + CharmURL: "cs:~joe/development/precise/wordpress-34", + Path: "/blah/arble", + }, +}, { + about: "id handler with user, invalid channel and extra path", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo/": testIdHandler, + }, + }, + urlStr: "/~joe/bad-wolf/precise/wordpress-34/foo/blah/arble", + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: "not found", + }, +}, { + about: "id handler that returns an error", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo/": errorIdHandler, + }, + }, + urlStr: "/~joe/precise/wordpress-34/foo/blah/arble", + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: "errorIdHandler error", + }, +}, { + about: "id handler that returns a not-found error", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": func(charmId *charm.URL, w http.ResponseWriter, req *http.Request) error { + return params.ErrNotFound + }, + }, + }, + urlStr: "/~joe/precise/wordpress-34/foo", + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Message: "not found", + Code: params.ErrNotFound, + }, +}, { + about: "id handler that returns some other kind of coded error", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": func(charmId *charm.URL, w http.ResponseWriter, req *http.Request) error { + return errgo.WithCausef(nil, params.ErrorCode("foo"), "a message") + }, + }, + }, + urlStr: "/~joe/precise/wordpress-34/foo", + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: "a message", + Code: "foo", + }, +}, { + about: "id with unspecified series and revision, not resolved", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": testIdHandler, + }, + }, + urlStr: "/~joe/wordpress/foo", + resolveURL: resolveTo("precise", 34), + expectStatus: http.StatusOK, + expectBody: idHandlerTestResp{ + Method: "GET", + CharmURL: "cs:~joe/wordpress", + }, +}, { + about: "id with error on resolving", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": testIdHandler, + }, + }, + urlStr: "/wordpress/meta", + resolveURL: resolveURLError(errgo.New("resolve URL error")), + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: "resolve URL error", + }, +}, { + about: "id with error on resolving that has a Cause", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": testIdHandler, + }, + }, + urlStr: "/wordpress/meta", + resolveURL: resolveURLError(params.ErrNotFound), + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Message: "not found", + Code: params.ErrNotFound, + }, +}, { + about: "meta list", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + "bar": testMetaHandler(1), + "bar/": testMetaHandler(2), + "foo/": testMetaHandler(3), + "baz": testMetaHandler(4), + }, + }, + urlStr: "/precise/wordpress-42/meta", + expectStatus: http.StatusOK, + expectBody: []string{"bar", "baz", "foo"}, +}, { + about: "meta list at root", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + "bar": testMetaHandler(1), + "bar/": testMetaHandler(2), + "foo/": testMetaHandler(3), + "baz": testMetaHandler(4), + }, + }, + urlStr: "/meta", + expectStatus: http.StatusOK, + expectBody: []string{"bar", "baz", "foo"}, +}, { + about: "meta list at root with trailing /", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + "bar": testMetaHandler(1), + "bar/": testMetaHandler(2), + "foo/": testMetaHandler(3), + "baz": testMetaHandler(4), + }, + }, + urlStr: "/meta/", + expectStatus: http.StatusOK, + expectBody: []string{"bar", "baz", "foo"}, +}, { + about: "meta handler", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + urlStr: "/precise/wordpress-42/meta/foo", + expectStatus: http.StatusOK, + expectBody: &metaHandlerTestResp{ + CharmURL: "cs:precise/wordpress-42", + }, +}, { + about: "meta handler with development channel", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + urlStr: "/development/precise/wordpress/meta/foo", + expectStatus: http.StatusOK, + expectBody: &metaHandlerTestResp{ + CharmURL: "cs:development/precise/wordpress-0", + }, +}, { + about: "meta handler with additional elements", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo/": testMetaHandler(0), + }, + }, + urlStr: "/precise/wordpress-42/meta/foo/bar/baz", + expectStatus: http.StatusOK, + expectBody: metaHandlerTestResp{ + CharmURL: "cs:precise/wordpress-42", + Path: "/bar/baz", + }, +}, { + about: "meta handler with params", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + urlStr: "/precise/wordpress-42/meta/foo?one=a&two=b&one=c", + expectStatus: http.StatusOK, + expectBody: metaHandlerTestResp{ + CharmURL: "cs:precise/wordpress-42", + Flags: url.Values{ + "one": {"a", "c"}, + "two": {"b"}, + }, + }, +}, { + about: "meta handler that's not found", + urlStr: "/precise/wordpress-42/meta/foo", + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: `unknown metadata "foo"`, + }, +}, { + about: "meta sub-handler that's not found", + urlStr: "/precise/wordpress-42/meta/foo/bar", + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: `unknown metadata "foo/bar"`, + }, +}, { + about: "meta handler with nil data", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": constMetaHandler(nil), + }, + }, + urlStr: "/precise/wordpress-42/meta/foo", + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrMetadataNotFound, + Message: "metadata not found", + }, +}, { + about: "meta handler with typed nil data", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": constMetaHandler((*struct{})(nil)), + }, + }, + urlStr: "/precise/wordpress-42/meta/foo", + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrMetadataNotFound, + Message: "metadata not found", + }, +}, { + about: "meta handler with field selector", + urlStr: "/precise/wordpress-42/meta/foo", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": fieldSelectHandler("handler1", 0, "field1", "field2"), + }, + }, + expectStatus: http.StatusOK, + expectQueryCount: 1, + expectBody: fieldSelectHandleGetInfo{ + HandlerId: "handler1", + Doc: fieldSelectQueryInfo{ + Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), + Selector: map[string]int{"field1": 1, "field2": 1}, + }, + Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), + }, +}, { + about: "meta handler returning error with code", + urlStr: "/precise/wordpress-42/meta/foo", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": errorMetaHandler(errgo.WithCausef(nil, params.ErrorCode("arble"), "a message")), + }, + }, + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Code: "arble", + Message: "a message", + }, +}, { + about: "unauthorized meta handler", + urlStr: "/precise/wordpress-42/meta/foo", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + authorize: neverAuthorize, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: "bad wolf", + }, +}, { + about: "meta/any, no includes, id exists", + urlStr: "/precise/wordpress-42/meta/any", + expectStatus: http.StatusOK, + expectBody: params.MetaAnyResponse{ + Id: charm.MustParseURL("cs:precise/wordpress-42"), + }, +}, { + about: "meta/any, no includes, id does not exist", + urlStr: "/precise/wordpress/meta/any", + resolveURL: resolveURLError(params.ErrNotFound), + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: "not found", + }, +}, { + about: "meta/any, some includes all using same key", + urlStr: "/precise/wordpress-42/meta/any?include=field1-1&include=field2&include=field1-2", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "field1-1": fieldSelectHandler("handler1", 0, "field1"), + "field2": fieldSelectHandler("handler2", 0, "field2"), + "field1-2": fieldSelectHandler("handler3", 0, "field1"), + }, + }, + expectQueryCount: 1, + expectStatus: http.StatusOK, + expectBody: params.MetaAnyResponse{ + Id: charm.MustParseURL("cs:precise/wordpress-42"), + Meta: map[string]interface{}{ + "field1-1": fieldSelectHandleGetInfo{ + HandlerId: "handler1", + Doc: fieldSelectQueryInfo{ + Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), + Selector: map[string]int{"field1": 1, "field2": 1}, + }, + Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), + }, + "field2": fieldSelectHandleGetInfo{ + HandlerId: "handler2", + Doc: fieldSelectQueryInfo{ + Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), + Selector: map[string]int{"field1": 1, "field2": 1}, + }, + Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), + }, + "field1-2": fieldSelectHandleGetInfo{ + HandlerId: "handler3", + Doc: fieldSelectQueryInfo{ + Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), + Selector: map[string]int{"field1": 1, "field2": 1}, + }, + Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), + }, + }, + }, +}, { + about: "meta/any, includes with additional path elements", + urlStr: "/precise/wordpress-42/meta/any?include=item1/foo&include=item2/bar&include=item1", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "item1/": fieldSelectHandler("handler1", 0, "field1"), + "item2/": fieldSelectHandler("handler2", 0, "field2"), + "item1": fieldSelectHandler("handler3", 0, "field3"), + }, + }, + expectQueryCount: 1, + expectStatus: http.StatusOK, + expectBody: params.MetaAnyResponse{ + Id: charm.MustParseURL("cs:precise/wordpress-42"), + Meta: map[string]interface{}{ + "item1/foo": fieldSelectHandleGetInfo{ + HandlerId: "handler1", + Doc: fieldSelectQueryInfo{ + Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), + Selector: map[string]int{"field1": 1, "field2": 1, "field3": 1}, + }, + Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), + Path: "/foo", + }, + "item2/bar": fieldSelectHandleGetInfo{ + HandlerId: "handler2", + Doc: fieldSelectQueryInfo{ + Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), + Selector: map[string]int{"field1": 1, "field2": 1, "field3": 1}, + }, + Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), + Path: "/bar", + }, + "item1": fieldSelectHandleGetInfo{ + HandlerId: "handler3", + Doc: fieldSelectQueryInfo{ + Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), + Selector: map[string]int{"field1": 1, "field2": 1, "field3": 1}, + }, + Id: newResolvedURL("cs:~charmers/precise/wordpress-42", 42), + }, + }, + }, +}, { + about: "meta/any, nil metadata omitted", + urlStr: "/precise/wordpress-42/meta/any?include=ok&include=nil", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "ok": testMetaHandler(0), + "nil": constMetaHandler(nil), + "typednil": constMetaHandler((*struct{})(nil)), + }, + }, + expectStatus: http.StatusOK, + expectBody: params.MetaAnyResponse{ + Id: charm.MustParseURL("cs:precise/wordpress-42"), + Meta: map[string]interface{}{ + "ok": metaHandlerTestResp{ + CharmURL: "cs:precise/wordpress-42", + }, + }, + }, +}, { + about: "meta/any, handler returns error with cause", + urlStr: "/precise/wordpress-42/meta/any?include=error", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "error": errorMetaHandler(errgo.WithCausef(nil, params.ErrorCode("foo"), "a message")), + }, + }, + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Code: "foo", + Message: "a message", + }, +}, { + about: "bulk meta handler, single id", + urlStr: "/meta/foo?id=precise/wordpress-42", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + expectStatus: http.StatusOK, + expectBody: map[string]metaHandlerTestResp{ + "precise/wordpress-42": { + CharmURL: "cs:precise/wordpress-42", + }, + }, +}, { + about: "bulk meta handler, single development id", + urlStr: "/meta/foo?id=~user/development/wily/wordpress-42", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + expectStatus: http.StatusOK, + expectBody: map[string]metaHandlerTestResp{ + "~user/development/wily/wordpress-42": { + CharmURL: "cs:~user/development/wily/wordpress-42", + }, + }, +}, { + about: "bulk meta handler, single id with invalid channel", + urlStr: "/meta/foo?id=~user/bad-wolf/wily/wordpress-42", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: `bad request: charm or bundle URL has invalid form: "~user/bad-wolf/wily/wordpress-42"`, + }, +}, { + about: "bulk meta handler, several ids", + urlStr: "/meta/foo?id=precise/wordpress-42&id=utopic/foo-32&id=development/django", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + expectStatus: http.StatusOK, + expectBody: map[string]metaHandlerTestResp{ + "precise/wordpress-42": { + CharmURL: "cs:precise/wordpress-42", + }, + "utopic/foo-32": { + CharmURL: "cs:utopic/foo-32", + }, + "development/django": { + CharmURL: "cs:development/precise/django-0", + }, + }, +}, { + about: "bulk meta/any handler, several ids", + urlStr: "/meta/any?id=precise/wordpress-42&id=utopic/foo-32&id=development/django-47&include=foo&include=bar/something", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + "bar/": testMetaHandler(1), + }, + }, + expectStatus: http.StatusOK, + expectBody: map[string]params.MetaAnyResponse{ + "precise/wordpress-42": { + Id: charm.MustParseURL("cs:precise/wordpress-42"), + Meta: map[string]interface{}{ + "foo": metaHandlerTestResp{ + CharmURL: "cs:precise/wordpress-42", + }, + "bar/something": metaHandlerTestResp{ + CharmURL: "cs:precise/wordpress-42", + Path: "/something", + }, + }, + }, + "utopic/foo-32": { + Id: charm.MustParseURL("cs:utopic/foo-32"), + Meta: map[string]interface{}{ + "foo": metaHandlerTestResp{ + CharmURL: "cs:utopic/foo-32", + }, + "bar/something": metaHandlerTestResp{ + CharmURL: "cs:utopic/foo-32", + Path: "/something", + }, + }, + }, + "development/django-47": { + Id: charm.MustParseURL("cs:development/precise/django-47"), + Meta: map[string]interface{}{ + "foo": metaHandlerTestResp{ + CharmURL: "cs:development/precise/django-47", + }, + "bar/something": metaHandlerTestResp{ + CharmURL: "cs:development/precise/django-47", + Path: "/something", + }, + }, + }, + }, +}, { + about: "bulk meta/any handler, several ids, invalid channel", + urlStr: "/meta/any?id=precise/wordpress-42&id=staging/trusty/django&include=foo&include=bar/something", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + "bar/": testMetaHandler(1), + }, + }, + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: `bad request: charm or bundle URL has invalid form: "staging/trusty/django"`, + }, +}, { + about: "bulk meta/any handler, discharge required", + urlStr: "/meta/any?id=precise/wordpress-42&include=foo", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + authorize: dischargeRequiredAuthorize, + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: "discharge required", + }, +}, { + about: "bulk meta/any handler, discharge required, ignore authorization", + urlStr: "/meta/any?id=precise/wordpress-42&include=foo&ignore-auth=1", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + authorize: dischargeRequiredAuthorize, + expectStatus: http.StatusOK, + expectBody: map[string]params.MetaAnyResponse{}, +}, { + about: "bulk meta/any handler, some unauthorized, ignore authorization", + urlStr: "/meta/any?id=precise/wordpress-42&id=utopic/foo-32&include=foo&ignore-auth=1", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + authorize: dischargeRequiredAuthorize, + expectStatus: http.StatusOK, + expectBody: map[string]params.MetaAnyResponse{ + "utopic/foo-32": { + Id: charm.MustParseURL("cs:utopic/foo-32"), + Meta: map[string]interface{}{ + "foo": metaHandlerTestResp{ + CharmURL: "cs:utopic/foo-32", + }, + }, + }, + }, +}, { + about: "bulk meta/any handler, unauthorized", + urlStr: "/meta/any?id=precise/wordpress-42&include=foo", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + authorize: neverAuthorize, + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: "bad wolf", + }, +}, { + about: "bulk meta/any handler, unauthorized, ignore authorization", + urlStr: "/meta/any?id=precise/wordpress-42&include=foo&ignore-auth=1", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + authorize: neverAuthorize, + expectStatus: http.StatusOK, + expectBody: map[string]params.MetaAnyResponse{}, +}, { + about: "bulk meta/any handler, invalid ignore-auth flag", + urlStr: "/meta/any?id=precise/wordpress-42&include=foo&ignore-auth=meh", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: `bad request: unexpected bool value "meh" (must be "0" or "1")`, + }, +}, { + about: "bulk meta handler with unresolved id", + urlStr: "/meta/foo/bar?id=wordpress", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo/": testMetaHandler(0), + }, + }, + resolveURL: resolveTo("precise", 100), + expectStatus: http.StatusOK, + expectBody: map[string]metaHandlerTestResp{ + "wordpress": { + CharmURL: "cs:precise/wordpress-100", + Path: "/bar", + }, + }, +}, { + about: "bulk meta handler with extra flags", + urlStr: "/meta/foo/bar?id=wordpress&arble=bletch&z=w&z=p", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo/": testMetaHandler(0), + }, + }, + resolveURL: resolveTo("precise", 100), + expectStatus: http.StatusOK, + expectBody: map[string]metaHandlerTestResp{ + "wordpress": { + CharmURL: "cs:precise/wordpress-100", + Path: "/bar", + Flags: url.Values{ + "arble": {"bletch"}, + "z": {"w", "p"}, + }, + }, + }, +}, { + about: "bulk meta handler with no ids", + urlStr: "/meta/foo/bar", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo/": testMetaHandler(0), + }, + }, + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "no ids specified in meta request", + }, +}, { + about: "bulk meta handler with unresolvable id", + urlStr: "/meta/foo?id=unresolved&id=~foo/precise/wordpress-23", + resolveURL: func(url *charm.URL) (*ResolvedURL, error) { + if url.Name == "unresolved" { + return nil, params.ErrNotFound + } + return &ResolvedURL{URL: *url, PromulgatedRevision: 99}, nil + }, + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + expectStatus: http.StatusOK, + expectBody: map[string]metaHandlerTestResp{ + "~foo/precise/wordpress-23": { + CharmURL: "cs:precise/wordpress-99", + }, + }, +}, { + about: "bulk meta handler with id resolution error", + urlStr: "/meta/foo?id=resolveerror&id=precise/wordpress-23", + resolveURL: func(url *charm.URL) (*ResolvedURL, error) { + if url.Name == "resolveerror" { + return nil, errgo.Newf("an error") + } + return &ResolvedURL{URL: *url}, nil + }, + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: "an error", + }, +}, { + about: "bulk meta handler with some nil data", + urlStr: "/meta/foo?id=bundle/something-24&id=precise/wordpress-23", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": selectiveIdHandler(map[string]interface{}{ + "cs:bundle/something-24": "bundlefoo", + }), + }, + }, + expectStatus: http.StatusOK, + expectBody: map[string]string{ + "bundle/something-24": "bundlefoo", + }, +}, { + about: "bulk meta handler with entity not found", + urlStr: "/meta/foo?id=bundle/something-24&id=precise/wordpress-23", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": SingleIncludeHandler(func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + if id.URL.Revision == 23 { + return nil, errgo.WithCausef(nil, params.ErrNotFound, "") + } + return "something", nil + }), + }, + }, + expectStatus: http.StatusOK, + expectBody: map[string]string{ + "bundle/something-24": "something", + }, +}, { + about: "meta request with invalid entity reference", + urlStr: "/robots.txt/meta/any", + handlers: Handlers{}, + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: `not found: URL has invalid charm or bundle name: "robots.txt"`, + }, +}, { + about: "bulk meta handler, invalid id", + urlStr: "/meta/foo?id=robots.txt", + handlers: Handlers{}, + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: `bad request: URL has invalid charm or bundle name: "robots.txt"`, + }, +}} + +// resolveTo returns a URL resolver that resolves +// unspecified series and revision to the given series +// and revision. +func resolveTo(series string, revision int) func(*charm.URL) (*ResolvedURL, error) { + return func(url *charm.URL) (*ResolvedURL, error) { + var rurl ResolvedURL + rurl.URL = *url + if url.Series == "" { + rurl.URL.Series = series + } + if url.Revision == -1 { + rurl.URL.Revision = revision + } + if url.User == "" { + rurl.URL.User = "charmers" + rurl.PromulgatedRevision = revision + } + return &rurl, nil + } +} + +func resolveURLError(err error) func(*charm.URL) (*ResolvedURL, error) { + return func(*charm.URL) (*ResolvedURL, error) { + return nil, err + } +} + +func alwaysResolveURL(u *charm.URL) (*ResolvedURL, error) { + u1 := *u + if u1.Series == "" { + u1.Series = "precise" + } + if u1.Revision == -1 { + u1.Revision = 0 + } + promRev := -1 + if u1.User == "" { + u1.User = "charmers" + promRev = u1.Revision + } + return newResolvedURL(u1.String(), promRev), nil +} + +func (s *RouterSuite) TestRouterGet(c *gc.C) { + for i, test := range routerGetTests { + c.Logf("test %d: %s", i, test.about) + ctxt := alwaysContext + if test.resolveURL != nil { + ctxt.resolveURL = test.resolveURL + } + if test.authorize != nil { + ctxt.authorizeURL = test.authorize + } + router := New(&test.handlers, ctxt) + // Note that fieldSelectHandler increments queryCount each time + // a query is made. + queryCount = 0 + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: router, + URL: test.urlStr, + ExpectStatus: test.expectStatus, + ExpectBody: test.expectBody, + }) + c.Assert(queryCount, gc.Equals, test.expectQueryCount) + } +} + +type funcContext struct { + resolveURL func(id *charm.URL) (*ResolvedURL, error) + authorizeURL func(id *ResolvedURL, req *http.Request) error +} + +func (ctxt funcContext) ResolveURL(id *charm.URL) (*ResolvedURL, error) { + return ctxt.resolveURL(id) +} + +func (ctxt funcContext) AuthorizeEntity(id *ResolvedURL, req *http.Request) error { + return ctxt.authorizeURL(id, req) +} + +var parseBoolTests = []struct { + value string + result bool + err bool +}{{ + value: "0", +}, { + value: "", +}, { + value: "1", + result: true, +}, { + value: "invalid", + err: true, +}} + +func (s *RouterSuite) TestParseBool(c *gc.C) { + for i, test := range parseBoolTests { + c.Logf("test %d: %s", i, test.value) + result, err := ParseBool(test.value) + c.Assert(result, gc.Equals, test.result) + if test.err { + c.Assert(err, gc.ErrorMatches, "unexpected bool value .*") + continue + } + c.Assert(err, jc.ErrorIsNil) + } +} + +var alwaysContext = funcContext{ + resolveURL: alwaysResolveURL, + authorizeURL: alwaysAuthorize, +} + +func (s *RouterSuite) TestCORSHeaders(c *gc.C) { + h := New(&Handlers{ + Global: map[string]http.Handler{ + "foo": http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {}), + }, + }, alwaysContext) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: h, + URL: "/foo", + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Header().Get("Access-Control-Allow-Origin"), gc.Equals, "*") + c.Assert(rec.Header().Get("Access-Control-Cache-Max-Age"), gc.Equals, "600") + c.Assert(rec.Header().Get("Access-Control-Allow-Headers"), gc.Equals, "Bakery-Protocol-Version, Macaroons, X-Requested-With") + c.Assert(rec.Header().Get("Access-Control-Allow-Methods"), gc.Equals, "DELETE,GET,HEAD,PUT,POST,OPTIONS") + c.Assert(rec.Header().Get("Access-Control-Expose-Headers"), gc.Equals, "WWW-Authenticate") +} + +func (s *RouterSuite) TestHTTPRequestPassedThroughToMeta(c *gc.C) { + testReq, err := http.NewRequest("GET", "/wordpress/meta/foo", nil) + c.Assert(err, gc.IsNil) + doneQuery := false + query := func(id *ResolvedURL, selector map[string]int, req *http.Request) (interface{}, error) { + if req != testReq { + return nil, fmt.Errorf("unexpected request found in Query") + } + doneQuery = true + return 0, nil + } + doneGet := false + handleGet := func(doc interface{}, id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + if req != testReq { + return nil, fmt.Errorf("unexpected request found in HandleGet") + } + doneGet = true + return 0, nil + } + donePut := false + handlePut := func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { + if req != testReq { + return fmt.Errorf("unexpected request found in HandlePut") + } + donePut = true + return nil + } + update := func(id *ResolvedURL, fields map[string]interface{}, entries []audit.Entry) error { + return nil + } + h := New(&Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": FieldIncludeHandler(FieldIncludeHandlerParams{ + Key: 0, + Query: query, + Fields: []string{"foo"}, + HandleGet: handleGet, + HandlePut: handlePut, + Update: update, + }), + }, + }, alwaysContext) + resp := httptest.NewRecorder() + h.ServeHTTP(resp, testReq) + c.Assert(resp.Code, gc.Equals, http.StatusOK, gc.Commentf("response body: %s", resp.Body)) + c.Assert(doneGet, jc.IsTrue) + c.Assert(doneQuery, jc.IsTrue) + + testReq, err = http.NewRequest("PUT", "/wordpress/meta/foo", strings.NewReader(`"hello"`)) + testReq.Header.Set("Content-Type", "application/json") + c.Assert(err, gc.IsNil) + resp = httptest.NewRecorder() + h.ServeHTTP(resp, testReq) + c.Assert(resp.Code, gc.Equals, http.StatusOK, gc.Commentf("response body: %s", resp.Body)) + c.Assert(donePut, jc.IsTrue) +} + +func (s *RouterSuite) TestOptionsHTTPMethod(c *gc.C) { + h := New(&Handlers{}, alwaysContext) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: h, + Method: "OPTIONS", + URL: "/foo", + Header: http.Header{"Origin": []string{"https://1.2.42.47"}}, + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + header := rec.Header() + c.Assert(header.Get("Access-Control-Allow-Origin"), gc.Equals, "https://1.2.42.47") + c.Assert(header.Get("Access-Control-Cache-Max-Age"), gc.Equals, "600") + c.Assert(header.Get("Access-Control-Allow-Headers"), gc.Equals, "Bakery-Protocol-Version, Macaroons, X-Requested-With") + c.Assert(header.Get("Access-Control-Allow-Methods"), gc.Equals, "DELETE,GET,HEAD,PUT,POST,OPTIONS") + c.Assert(header.Get("Allow"), gc.Equals, "DELETE,GET,HEAD,PUT,POST") +} + +var routerPutTests = []struct { + about string + handlers Handlers + urlStr string + body interface{} + expectCode int + expectBody interface{} + expectRecordedCalls []interface{} + resolveURL func(*charm.URL) (*ResolvedURL, error) +}{{ + about: "global handler", + handlers: Handlers{ + Global: map[string]http.Handler{ + "foo": HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { + return ReqInfo{ + Method: req.Method, + Path: req.URL.Path, + Form: req.Form, + }, nil + }), + }, + }, + urlStr: "/foo", + expectCode: http.StatusOK, + expectBody: ReqInfo{ + Method: "PUT", + Path: "", + }, +}, { + about: "id handler", + handlers: Handlers{ + Id: map[string]IdHandler{ + "foo": testIdHandler, + }, + }, + urlStr: "/precise/wordpress-34/foo", + expectCode: http.StatusOK, + expectBody: idHandlerTestResp{ + Method: "PUT", + CharmURL: "cs:precise/wordpress-34", + }, +}, { + about: "meta handler", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + urlStr: "/precise/wordpress-42/meta/foo", + expectCode: http.StatusOK, + body: "hello", + expectRecordedCalls: []interface{}{ + metaHandlerTestPutParams{ + NumHandlers: 1, + Id: "cs:precise/wordpress-42", + Paths: []string{""}, + Values: []interface{}{"hello"}, + }, + }, +}, { + about: "meta/any", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + "bar": testMetaHandler(1), + }, + }, + urlStr: "/precise/wordpress-42/meta/any", + body: params.MetaAnyResponse{ + Meta: map[string]interface{}{ + "foo": "foo-value", + "bar": map[string]interface{}{ + "bar-value1": 234.0, + "bar-value2": "whee", + }, + }, + }, + expectRecordedCalls: []interface{}{ + metaHandlerTestPutParams{ + NumHandlers: 2, + Id: "cs:precise/wordpress-42", + Paths: []string{"", ""}, + Values: []interface{}{ + "foo-value", + map[string]interface{}{ + "bar-value1": 234.0, + "bar-value2": "whee", + }, + }, + }, + }, +}, { + about: "meta/any with extra paths", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo/": testMetaHandler(0), + "bar": testMetaHandler(1), + }, + }, + urlStr: "/precise/wordpress-42/meta/any", + body: params.MetaAnyResponse{ + Meta: map[string]interface{}{ + "foo/one": "foo-value-one", + "foo/two": "foo-value-two", + "bar": 1234.0, + }, + }, + expectRecordedCalls: []interface{}{ + metaHandlerTestPutParams{ + NumHandlers: 3, + Id: "cs:precise/wordpress-42", + Paths: []string{"/one", "/two", ""}, + Values: []interface{}{ + "foo-value-one", + "foo-value-two", + 1234.0, + }, + }, + }, +}, { + about: "bulk meta", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + urlStr: "/meta/foo", + body: map[string]string{ + "precise/wordpress-42": "forty two", + "precise/foo-134": "blah", + }, + expectRecordedCalls: []interface{}{ + metaHandlerTestPutParams{ + NumHandlers: 1, + Id: "cs:precise/foo-134", + Paths: []string{""}, + Values: []interface{}{"blah"}, + }, + metaHandlerTestPutParams{ + NumHandlers: 1, + Id: "cs:precise/wordpress-42", + Paths: []string{""}, + Values: []interface{}{"forty two"}, + }, + }, +}, { + about: "bulk meta any", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + "bar": testMetaHandler(1), + "baz/": testMetaHandler(2), + }, + }, + urlStr: "/meta/any", + body: map[string]params.MetaAnyResponse{ + "precise/wordpress-42": { + Meta: map[string]interface{}{ + "foo": "foo-wordpress-val", + "bar": "bar-wordpress-val", + }, + }, + "precise/mysql-134": { + Meta: map[string]interface{}{ + "foo": "foo-mysql-val", + "baz/blah": "baz/blah-mysql-val", + "baz/ppp": "baz/ppp-mysql-val", + }, + }, + "development/trusty/django-47": { + Meta: map[string]interface{}{ + "foo": "foo-django-val", + }, + }, + }, + expectRecordedCalls: []interface{}{ + metaHandlerTestPutParams{ + NumHandlers: 1, + Id: "cs:development/trusty/django-47", + Paths: []string{""}, + Values: []interface{}{"foo-django-val"}, + }, + metaHandlerTestPutParams{ + NumHandlers: 3, + Id: "cs:precise/mysql-134", + Paths: []string{"", "/blah", "/ppp"}, + Values: []interface{}{"foo-mysql-val", "baz/blah-mysql-val", "baz/ppp-mysql-val"}, + }, + metaHandlerTestPutParams{ + NumHandlers: 2, + Id: "cs:precise/wordpress-42", + Paths: []string{"", ""}, + Values: []interface{}{"foo-wordpress-val", "bar-wordpress-val"}, + }, + }, +}, { + about: "field include handler with bulk meta any", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": fieldSelectHandler("handler1", 0, "field1", "field2"), + "bar": fieldSelectHandler("handler2", 0, "field3", "field4"), + "baz/": fieldSelectHandler("handler3", 1, "field5"), + }, + }, + urlStr: "/meta/any", + body: map[string]params.MetaAnyResponse{ + "precise/mysql-123": { + Meta: map[string]interface{}{ + "foo": "foo-mysql-val", + "baz/blah": "baz/blah-mysql-val", + "baz/ppp": "baz/ppp-mysql-val", + }, + }, + "precise/wordpress-42": { + Meta: map[string]interface{}{ + "foo": "foo-wordpress-val", + "bar": "bar-wordpress-val", + }, + }, + }, + expectRecordedCalls: []interface{}{ + fieldSelectHandleUpdateInfo{ + Id: "cs:precise/mysql-123", + Fields: map[string]fieldSelectHandlePutInfo{ + "field1": { + Id: "cs:precise/mysql-123", + Value: "foo-mysql-val", + }, + "field2": { + Id: "cs:precise/mysql-123", + Value: "foo-mysql-val", + }, + }, + }, + fieldSelectHandleUpdateInfo{ + Id: "cs:precise/mysql-123", + Fields: map[string]fieldSelectHandlePutInfo{ + "field5/blah": { + Id: "cs:precise/mysql-123", + Value: "baz/blah-mysql-val", + }, + "field5/ppp": { + Id: "cs:precise/mysql-123", + Value: "baz/ppp-mysql-val", + }, + }, + }, + fieldSelectHandleUpdateInfo{ + Id: "cs:precise/wordpress-42", + Fields: map[string]fieldSelectHandlePutInfo{ + "field1": { + Id: "cs:precise/wordpress-42", + Value: "foo-wordpress-val", + }, + "field2": { + Id: "cs:precise/wordpress-42", + Value: "foo-wordpress-val", + }, + "field3": { + Id: "cs:precise/wordpress-42", + Value: "bar-wordpress-val", + }, + "field4": { + Id: "cs:precise/wordpress-42", + Value: "bar-wordpress-val", + }, + }, + }, + }, +}, { + about: "field include handler with no HandlePut", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": FieldIncludeHandler(FieldIncludeHandlerParams{ + Key: 0, + }), + }, + }, + urlStr: "/precise/wordpress-23/meta/foo", + body: "something", + expectCode: http.StatusInternalServerError, + expectBody: params.Error{ + Message: "PUT not supported", + }, +}, { + about: "field include handler when HandlePut returns an error", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": FieldIncludeHandler(FieldIncludeHandlerParams{ + Key: 0, + HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { + return errgo.WithCausef(nil, params.ErrNotFound, "message") + }, + }), + }, + }, + urlStr: "/precise/wordpress-23/meta/foo", + body: "something", + expectCode: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: "message", + }, +}, { + about: "meta put to field include handler with several errors", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": FieldIncludeHandler(FieldIncludeHandlerParams{ + Key: 0, + HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { + return errgo.WithCausef(nil, params.ErrNotFound, "foo error") + }, + Update: nopUpdate, + }), + "bar": FieldIncludeHandler(FieldIncludeHandlerParams{ + Key: 0, + HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { + return errgo.New("bar error") + }, + Update: nopUpdate, + }), + "baz": FieldIncludeHandler(FieldIncludeHandlerParams{ + Key: 0, + HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { + return nil + }, + Update: nopUpdate, + }), + }, + }, + urlStr: "/precise/wordpress-23/meta/any", + body: params.MetaAnyResponse{ + Meta: map[string]interface{}{ + "foo": "one", + "bar": "two", + "baz": "three", + }, + }, + expectCode: http.StatusInternalServerError, + expectBody: params.Error{ + Code: params.ErrMultipleErrors, + Message: "multiple (2) errors", + Info: map[string]*params.Error{ + "foo": { + Code: params.ErrNotFound, + Message: "foo error", + }, + "bar": { + Message: "bar error", + }, + }, + }, +}, { + about: "meta/any put with update error", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo/": FieldIncludeHandler(FieldIncludeHandlerParams{ + Key: 0, + HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { + if path == "/bad" { + return fmt.Errorf("foo/bad error") + } + return nil + }, + Update: func(id *ResolvedURL, fields map[string]interface{}, entries []audit.Entry) error { + return params.ErrBadRequest + }, + }), + "bar": FieldIncludeHandler(FieldIncludeHandlerParams{ + Key: 1, + HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { + return fmt.Errorf("bar error") + }, + }), + }, + }, + urlStr: "/precise/wordpress-23/meta/any", + body: params.MetaAnyResponse{ + Meta: map[string]interface{}{ + "foo/one": "one", + "foo/two": "two", + "foo/bad": "bad", + "bar": "bar", + }, + }, + expectCode: http.StatusInternalServerError, + expectBody: params.Error{ + Code: params.ErrMultipleErrors, + Message: "multiple (4) errors", + Info: map[string]*params.Error{ + // All endpoints that share the same bulk key should + // get the same error, as the update pertains to all of them, + // but endpoints for which the HandlePut failed will + // not be included in that. + "foo/one": { + Code: params.ErrBadRequest, + Message: "bad request", + }, + "foo/two": { + Code: params.ErrBadRequest, + Message: "bad request", + }, + "foo/bad": { + Message: "foo/bad error", + }, + "bar": { + Message: "bar error", + }, + }, + }, +}, { + about: "bulk meta/any put with several errors", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": FieldIncludeHandler(FieldIncludeHandlerParams{ + Key: 0, + HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { + return nil + }, + Update: nopUpdate, + }), + "bar": FieldIncludeHandler(FieldIncludeHandlerParams{ + Key: 0, + HandlePut: func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { + return errgo.WithCausef(nil, params.ErrNotFound, "bar error") + }, + Update: nopUpdate, + }), + }, + }, + resolveURL: func(id *charm.URL) (*ResolvedURL, error) { + if id.Name == "bad" { + return nil, params.ErrBadRequest + } + return &ResolvedURL{URL: *id}, nil + }, + urlStr: "/meta/any", + body: map[string]params.MetaAnyResponse{ + "precise/mysql-123": { + Meta: map[string]interface{}{ + "foo": "fooval", + "bar": "barval", + }, + }, + "bad": { + Meta: map[string]interface{}{ + "foo": "foo-wordpress-val", + "bar": "bar-wordpress-val", + }, + }, + }, + expectCode: http.StatusInternalServerError, + expectBody: params.Error{ + Code: params.ErrMultipleErrors, + Message: "multiple (2) errors", + Info: map[string]*params.Error{ + "precise/mysql-123": { + Code: params.ErrMultipleErrors, + Message: "multiple (1) errors", + Info: map[string]*params.Error{ + "bar": { + Code: params.ErrNotFound, + Message: "bar error", + }, + }, + }, + "bad": { + Message: "bad request", + Code: params.ErrBadRequest, + }, + }, + }, +}, { + about: "meta put with unresolved URL", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + urlStr: "/wordpress/meta/foo", + resolveURL: resolveTo("series", 245), + expectCode: http.StatusOK, + body: "hello", + expectRecordedCalls: []interface{}{ + metaHandlerTestPutParams{ + NumHandlers: 1, + Id: "cs:series/wordpress-245", + Paths: []string{""}, + Values: []interface{}{"hello"}, + }, + }, +}, { + about: "bulk put with unresolved URL", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + urlStr: "/meta/foo", + resolveURL: resolveTo("series", 245), + expectCode: http.StatusOK, + body: map[string]string{ + "wordpress": "hello", + }, + expectRecordedCalls: []interface{}{ + metaHandlerTestPutParams{ + NumHandlers: 1, + Id: "cs:series/wordpress-245", + Paths: []string{""}, + Values: []interface{}{"hello"}, + }, + }, +}, { + about: "bulk put with ids specified in URL", + handlers: Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + }, + urlStr: "/meta/foo?id=wordpress", + expectCode: http.StatusInternalServerError, + expectBody: params.Error{ + Message: "ids may not be specified in meta PUT request", + }, +}} + +func nopUpdate(id *ResolvedURL, fields map[string]interface{}, entries []audit.Entry) error { + return nil +} + +func (s *RouterSuite) TestRouterPut(c *gc.C) { + for i, test := range routerPutTests { + c.Logf("test %d: %s", i, test.about) + ResetRecordedCalls() + resolve := alwaysResolveURL + if test.resolveURL != nil { + resolve = test.resolveURL + } + bodyVal, err := json.Marshal(test.body) + c.Assert(err, gc.IsNil) + ctxt := alwaysContext + ctxt.resolveURL = resolve + router := New(&test.handlers, ctxt) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: router, + URL: test.urlStr, + Body: bytes.NewReader(bodyVal), + Method: "PUT", + Header: map[string][]string{ + "Content-Type": {"application/json"}, + }, + ExpectStatus: test.expectCode, + ExpectBody: test.expectBody, + }) + c.Assert(RecordedCalls(), jc.DeepEquals, test.expectRecordedCalls) + } +} + +var routerPutWithInvalidContentTests = []struct { + about string + urlStr string + contentType string + body string + expectCode int + expectBody interface{} +}{{ + about: "invalid content type with meta", + urlStr: "/precise/wordpress-23/meta/foo", + contentType: "foo/bar", + expectCode: http.StatusBadRequest, + expectBody: params.Error{ + Message: `unexpected Content-Type "foo/bar"; expected "application/json"`, + Code: params.ErrBadRequest, + }, +}, { + about: "invalid content type with bulk meta", + urlStr: "/meta/foo", + contentType: "foo/bar", + expectCode: http.StatusBadRequest, + expectBody: params.Error{ + Message: `unexpected Content-Type "foo/bar"; expected "application/json"`, + Code: params.ErrBadRequest, + }, +}, { + about: "bad JSON with meta", + urlStr: "/precise/wordpress-23/meta/foo", + contentType: "application/json", + body: `"foo`, + expectCode: http.StatusInternalServerError, + expectBody: params.Error{ + Message: `cannot unmarshal body: unexpected EOF`, + }, +}, { + about: "bad JSON with bulk meta", + urlStr: "/meta/foo", + contentType: "application/json", + body: `"foo`, + expectCode: http.StatusInternalServerError, + expectBody: params.Error{ + Message: `cannot unmarshal body: unexpected EOF`, + }, +}} + +func (s *RouterSuite) TestRouterPutWithInvalidContent(c *gc.C) { + for i, test := range routerPutWithInvalidContentTests { + c.Logf("test %d: %s", i, test.about) + handlers := &Handlers{ + Meta: map[string]BulkIncludeHandler{ + "foo": testMetaHandler(0), + }, + } + router := New(handlers, alwaysContext) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: router, + URL: test.urlStr, + Body: strings.NewReader(test.body), + Method: "PUT", + Header: map[string][]string{ + "Content-Type": {test.contentType}, + }, + ExpectStatus: test.expectCode, + ExpectBody: test.expectBody, + }) + } +} + +func alwaysExists(id *ResolvedURL, req *http.Request) (bool, error) { + return true, nil +} + +func alwaysAuthorize(id *ResolvedURL, req *http.Request) error { + return nil +} + +func neverAuthorize(id *ResolvedURL, req *http.Request) error { + return errgo.WithCausef(nil, params.ErrUnauthorized, "bad wolf") +} + +func dischargeRequiredAuthorize(id *ResolvedURL, req *http.Request) error { + if id.String() == "cs:utopic/foo-32" { + return nil + } + return httpbakery.NewDischargeRequiredError(nil, "/", errgo.New("discharge required")) +} + +var getMetadataTests = []struct { + id *ResolvedURL + includes []string + expectResult map[string]interface{} + expectError string +}{{ + id: newResolvedURL("~charmers/precise/wordpress-34", 34), + includes: []string{}, + expectResult: map[string]interface{}{}, +}, { + id: newResolvedURL("~rog/precise/wordpress-2", -1), + includes: []string{"item1", "item2", "test"}, + expectResult: map[string]interface{}{ + "item1": fieldSelectHandleGetInfo{ + HandlerId: "handler1", + Doc: fieldSelectQueryInfo{ + Id: newResolvedURL("cs:~rog/precise/wordpress-2", -1), + Selector: map[string]int{"item1": 1, "item2": 1}, + }, + Id: newResolvedURL("cs:~rog/precise/wordpress-2", -1), + }, + "item2": fieldSelectHandleGetInfo{ + HandlerId: "handler2", + Doc: fieldSelectQueryInfo{ + Id: newResolvedURL("cs:~rog/precise/wordpress-2", -1), + Selector: map[string]int{"item1": 1, "item2": 1}, + }, + Id: newResolvedURL("cs:~rog/precise/wordpress-2", -1), + }, + "test": &metaHandlerTestResp{ + CharmURL: "cs:~rog/precise/wordpress-2", + }, + }, +}, { + id: newResolvedURL("~rog/precise/wordpress-2", -1), + includes: []string{"mistaek"}, + expectError: `unrecognized metadata name "mistaek"`, +}} + +func (s *RouterSuite) TestGetMetadata(c *gc.C) { + for i, test := range getMetadataTests { + c.Logf("test %d: %q", i, test.includes) + router := New(&Handlers{ + Meta: map[string]BulkIncludeHandler{ + "item1": fieldSelectHandler("handler1", 0, "item1"), + "item2": fieldSelectHandler("handler2", 0, "item2"), + "test": testMetaHandler(0), + }, + }, alwaysContext) + result, err := router.GetMetadata(test.id, test.includes, nil) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + c.Assert(result, gc.IsNil) + continue + } + c.Assert(err, gc.IsNil) + c.Assert(result, jc.DeepEquals, test.expectResult) + } +} + +var splitIdTests = []struct { + path string + expectURL string + expectError string +}{{ + path: "precise/wordpress-23", + expectURL: "cs:precise/wordpress-23", +}, { + path: "~user/precise/wordpress-23", + expectURL: "cs:~user/precise/wordpress-23", +}, { + path: "wordpress", + expectURL: "cs:wordpress", +}, { + path: "~user/wordpress", + expectURL: "cs:~user/wordpress", +}, { + path: "development/wordpress", + expectURL: "cs:development/wordpress", +}, { + path: "~user/development/wordpress", + expectURL: "cs:~user/development/wordpress", +}, { + path: "", + expectError: `URL has invalid charm or bundle name: ""`, +}, { + path: "~foo-bar-/wordpress", + expectError: `charm or bundle URL has invalid user name: "~foo-bar-/wordpress"`, +}} + +func (s *RouterSuite) TestSplitId(c *gc.C) { + for i, test := range splitIdTests { + c.Logf("test %d: %s", i, test.path) + url, rest, err := splitId(test.path) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + c.Assert(url, gc.IsNil) + c.Assert(rest, gc.Equals, "") + continue + } + c.Assert(err, gc.Equals, nil) + c.Assert(url.String(), gc.Equals, test.expectURL) + c.Assert(rest, gc.Equals, "") + + url, rest, err = splitId(test.path + "/some/more") + c.Assert(err, gc.Equals, nil) + c.Assert(url.String(), gc.Equals, test.expectURL) + c.Assert(rest, gc.Equals, "/some/more") + } +} + +var handlerKeyTests = []struct { + path string + expectKey string + expectRest string +}{{ + path: "/foo/bar", + expectKey: "foo/", + expectRest: "/bar", +}, { + path: "/foo", + expectKey: "foo", + expectRest: "", +}, { + path: "/foo/bar/baz", + expectKey: "foo/", + expectRest: "/bar/baz", +}, { + path: "/foo/", + expectKey: "foo", + expectRest: "", +}, { + path: "foo/", + expectKey: "foo", + expectRest: "", +}} + +func (s *RouterSuite) TestHandlerKey(c *gc.C) { + for i, test := range handlerKeyTests { + c.Logf("test %d: %s", i, test.path) + key, rest := handlerKey(test.path) + c.Assert(key, gc.Equals, test.expectKey) + c.Assert(rest, gc.Equals, test.expectRest) + } +} + +var splitPathTests = []struct { + path string + index int + expectElem string + expectRest string +}{{ + path: "/foo/bar", + expectElem: "foo", + expectRest: "/bar", +}, { + path: "foo/bar", + expectElem: "foo", + expectRest: "/bar", +}, { + path: "foo/", + expectElem: "foo", + expectRest: "/", +}, { + path: "/foo/bar/baz", + expectElem: "foo", + expectRest: "/bar/baz", +}, { + path: "/foo", + expectElem: "foo", + expectRest: "", +}, { + path: "/foo/bar/baz", + index: 4, + expectElem: "bar", + expectRest: "/baz", +}} + +func (s *RouterSuite) TestSplitPath(c *gc.C) { + for i, test := range splitPathTests { + c.Logf("test %d: %s", i, test.path) + elem, index := splitPath(test.path, test.index) + c.Assert(elem, gc.Equals, test.expectElem) + c.Assert(index, jc.LessThan, len(test.path)+1) + c.Assert(test.path[index:], gc.Equals, test.expectRest) + } +} + +func (s *RouterSuite) TestWriteJSON(c *gc.C) { + rec := httptest.NewRecorder() + type Number struct { + N int + } + err := httprequest.WriteJSON(rec, http.StatusTeapot, Number{1234}) + c.Assert(err, gc.IsNil) + c.Assert(rec.Code, gc.Equals, http.StatusTeapot) + c.Assert(rec.Body.String(), gc.Equals, `{"N":1234}`) + c.Assert(rec.Header().Get("content-type"), gc.Equals, "application/json") +} + +func (s *RouterSuite) TestWriteError(c *gc.C) { + rec := httptest.NewRecorder() + WriteError(rec, errgo.Newf("an error")) + var errResp params.Error + err := json.Unmarshal(rec.Body.Bytes(), &errResp) + c.Assert(err, gc.IsNil) + c.Assert(errResp, gc.DeepEquals, params.Error{Message: "an error"}) + c.Assert(rec.Code, gc.Equals, http.StatusInternalServerError) + + rec = httptest.NewRecorder() + errResp0 := params.Error{ + Message: "a message", + Code: "some code", + } + WriteError(rec, &errResp0) + var errResp1 params.Error + err = json.Unmarshal(rec.Body.Bytes(), &errResp1) + c.Assert(err, gc.IsNil) + c.Assert(errResp1, gc.DeepEquals, errResp0) + c.Assert(rec.Code, gc.Equals, http.StatusInternalServerError) +} + +func (s *RouterSuite) TestServeMux(c *gc.C) { + mux := NewServeMux() + mux.Handle("/data", HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { + return Foo{"hello"}, nil + })) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: mux, + URL: "/data", + ExpectBody: Foo{"hello"}, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: mux, + URL: "/foo", + ExpectStatus: http.StatusNotFound, + ExpectBody: params.Error{ + Message: `no handler for "/foo"`, + Code: params.ErrNotFound, + }, + }) +} + +var handlerTests = []struct { + about string + handler http.Handler + urlStr string + expectStatus int + expectBody interface{} +}{{ + about: "handleErrors, normal error", + handler: HandleErrors(func(http.ResponseWriter, *http.Request) error { + return errgo.Newf("an error") + }), + urlStr: "", + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: "an error", + }, +}, { + about: "handleErrors, error with code", + handler: HandleErrors(func(http.ResponseWriter, *http.Request) error { + return ¶ms.Error{ + Message: "something went wrong", + Code: "snafu", + } + }), + urlStr: "", + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: "something went wrong", + Code: "snafu", + }, +}, { + about: "handleErrors, no error", + handler: HandleErrors(func(w http.ResponseWriter, req *http.Request) error { + w.WriteHeader(http.StatusTeapot) + return nil + }), + expectStatus: http.StatusTeapot, +}, { + about: "handleErrors, params error", + handler: HandleErrors(func(w http.ResponseWriter, req *http.Request) error { + return params.ErrMetadataNotFound + }), + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Message: "metadata not found", + Code: params.ErrMetadataNotFound, + }, +}, { + about: "handleErrors, wrapped params error", + handler: HandleErrors(func(w http.ResponseWriter, req *http.Request) error { + err := params.ErrMetadataNotFound + return errgo.NoteMask(err, "annotation", errgo.Is(params.ErrMetadataNotFound)) + }), + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Message: "annotation: metadata not found", + Code: params.ErrMetadataNotFound, + }, +}, { + about: "handleErrors: error - bad request", + handler: HandleErrors(func(w http.ResponseWriter, req *http.Request) error { + return params.ErrBadRequest + }), + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Message: "bad request", + Code: params.ErrBadRequest, + }, +}, { + about: "handleErrors: error - forbidden", + handler: HandleErrors(func(w http.ResponseWriter, req *http.Request) error { + return params.ErrForbidden + }), + expectStatus: http.StatusForbidden, + expectBody: params.Error{ + Message: "forbidden", + Code: params.ErrForbidden, + }, +}, { + about: "handleJSON, normal case", + handler: HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { + return Foo{"hello"}, nil + }), + expectStatus: http.StatusOK, + expectBody: Foo{"hello"}, +}, { + about: "handleJSON, error case", + handler: HandleJSON(func(_ http.Header, req *http.Request) (interface{}, error) { + return nil, errgo.Newf("an error") + }), + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: "an error", + }, +}, { + about: "NotFoundHandler", + handler: NotFoundHandler(), + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Message: "not found", + Code: params.ErrNotFound, + }, +}} + +type Foo struct { + S string +} + +type ReqInfo struct { + Path string + Method string + Form url.Values `json:",omitempty"` +} + +func (s *RouterSuite) TestHandlers(c *gc.C) { + for i, test := range handlerTests { + c.Logf("test %d: %s", i, test.about) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: test.handler, + URL: "", + ExpectStatus: test.expectStatus, + ExpectBody: test.expectBody, + }) + } +} + +var resolvedURLTests = []struct { + rurl *ResolvedURL + expectUserOwnedURL *charm.URL + expectPreferredURL *charm.URL + expectPromulgatedURL *charm.URL +}{{ + rurl: MustNewResolvedURL("~charmers/precise/wordpress-23", 4), + expectUserOwnedURL: charm.MustParseURL("~charmers/precise/wordpress-23"), + expectPreferredURL: charm.MustParseURL("precise/wordpress-4"), + expectPromulgatedURL: charm.MustParseURL("precise/wordpress-4"), +}, { + rurl: MustNewResolvedURL("~who/development/trusty/wordpress-42", -1), + expectUserOwnedURL: charm.MustParseURL("~who/development/trusty/wordpress-42"), + expectPreferredURL: charm.MustParseURL("~who/development/trusty/wordpress-42"), +}, { + rurl: MustNewResolvedURL("~charmers/precise/wordpress-23", -1), + expectUserOwnedURL: charm.MustParseURL("~charmers/precise/wordpress-23"), + expectPreferredURL: charm.MustParseURL("~charmers/precise/wordpress-23"), +}, { + rurl: MustNewResolvedURL("~charmers/development/trusty/wordpress-42", 0), + expectUserOwnedURL: charm.MustParseURL("~charmers/development/trusty/wordpress-42"), + expectPreferredURL: charm.MustParseURL("development/trusty/wordpress-0"), + expectPromulgatedURL: charm.MustParseURL("development/trusty/wordpress-0"), +}, { + rurl: withPreferredSeries(MustNewResolvedURL("~charmers/wordpress-42", 0), "trusty"), + expectUserOwnedURL: charm.MustParseURL("~charmers/wordpress-42"), + expectPreferredURL: charm.MustParseURL("trusty/wordpress-0"), + expectPromulgatedURL: charm.MustParseURL("wordpress-0"), +}, { + rurl: withPreferredSeries(MustNewResolvedURL("~charmers/wordpress-42", -1), "trusty"), + expectUserOwnedURL: charm.MustParseURL("~charmers/wordpress-42"), + expectPreferredURL: charm.MustParseURL("~charmers/trusty/wordpress-42"), +}} + +func withPreferredSeries(r *ResolvedURL, series string) *ResolvedURL { + r.PreferredSeries = series + return r +} + +func (*RouterSuite) TestResolvedURL(c *gc.C) { + testMethod := func(name string, rurl *ResolvedURL, m func() *charm.URL, expect *charm.URL) { + c.Logf("- method %s", name) + u := m() + c.Assert(u, jc.DeepEquals, expect) + // Ensure it's not aliased. + c.Assert(u, gc.Not(gc.Equals), &rurl.URL) + } + for i, test := range resolvedURLTests { + c.Logf("test %d: %#v", i, test.rurl) + testMethod("UserOwnedURL", test.rurl, test.rurl.UserOwnedURL, test.expectUserOwnedURL) + testMethod("PromulgatedURL", test.rurl, test.rurl.PromulgatedURL, test.expectPromulgatedURL) + + testMethod("PreferredURL", test.rurl, test.rurl.PreferredURL, test.expectPreferredURL) + } +} + +func errorIdHandler(charmId *charm.URL, w http.ResponseWriter, req *http.Request) error { + return errgo.Newf("errorIdHandler error") +} + +type idHandlerTestResp struct { + Method string + CharmURL string + Path string +} + +func testIdHandler(charmId *charm.URL, w http.ResponseWriter, req *http.Request) error { + httprequest.WriteJSON(w, http.StatusOK, idHandlerTestResp{ + CharmURL: charmId.String(), + Path: req.URL.Path, + Method: req.Method, + }) + return nil +} + +type metaHandlerTestResp struct { + CharmURL string + Path string + Flags url.Values +} + +var testMetaGetHandler = SingleIncludeHandler( + func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + if len(flags) == 0 { + flags = nil + } + return &metaHandlerTestResp{ + CharmURL: id.String(), + Path: path, + Flags: flags, + }, nil + }, +) + +type testMetaHandler int + +func (testMetaHandler) Key() interface{} { + type testMetaHandlerKey struct{} + return testMetaHandlerKey{} +} + +func (testMetaHandler) HandleGet(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, flags url.Values, req *http.Request) ([]interface{}, error) { + results := make([]interface{}, len(hs)) + for i, h := range hs { + _ = h.(testMetaHandler) + if len(flags) == 0 { + flags = nil + } + results[i] = &metaHandlerTestResp{ + CharmURL: id.String(), + Path: paths[i], + Flags: flags, + } + } + return results, nil +} + +type metaHandlerTestPutParams struct { + Id string + NumHandlers int + Paths []string + Values []interface{} +} + +func (testMetaHandler) HandlePut(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, rawValues []*json.RawMessage, req *http.Request) []error { + // Handlers are provided in arbitrary order, + // so we order them (and their associated paths + // and values) to enable easier testing. + keys := make(sort.StringSlice, len(hs)) + for i, h := range hs { + // Sort by handler primary, path secondary. + keys[i] = fmt.Sprintf("%d.%s", int(h.(testMetaHandler)), paths[i]) + } + sort.Sort(groupSort{ + key: keys, + other: []swapper{ + sort.StringSlice(paths), + swapFunc(func(i, j int) { + rawValues[i], rawValues[j] = rawValues[j], rawValues[i] + }), + }, + }) + + values := make([]interface{}, len(rawValues)) + for i, val := range rawValues { + err := json.Unmarshal(*val, &values[i]) + if err != nil { + panic(err) + } + } + RecordCall(metaHandlerTestPutParams{ + NumHandlers: len(hs), + Id: id.String(), + Paths: paths, + Values: values, + }) + return nil +} + +// constMetaHandler returns a handler that always returns the given +// value. +func constMetaHandler(val interface{}) BulkIncludeHandler { + return SingleIncludeHandler( + func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return val, nil + }, + ) +} + +func errorMetaHandler(err error) BulkIncludeHandler { + return SingleIncludeHandler( + func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return nil, err + }, + ) +} + +type fieldSelectQueryInfo struct { + Id *ResolvedURL + Selector map[string]int +} + +type fieldSelectHandleGetInfo struct { + HandlerId string + Doc fieldSelectQueryInfo + Id *ResolvedURL + Path string + Flags url.Values +} + +type fieldSelectHandleUpdateInfo struct { + Id string + Fields map[string]fieldSelectHandlePutInfo +} + +type fieldSelectHandlePutInfo struct { + Id string + Path string + Value interface{} +} + +var queryCount int32 + +var ( + callRecordsMutex sync.Mutex + callRecords byJSON +) + +// RecordCall adds a value that can be retrieved later with +// RecordedCalls. +// +// This is used to check the parameters passed to +// handlers that do not return results. +func RecordCall(x interface{}) { + callRecordsMutex.Lock() + defer callRecordsMutex.Unlock() + callRecords = append(callRecords, x) +} + +// ResetRecordedCalls clears the call records. +func ResetRecordedCalls() { + callRecordsMutex.Lock() + defer callRecordsMutex.Unlock() + callRecords = nil +} + +// RecordedCalls returns the values passed to RecordCall, +// ordered by their JSON serialization. +func RecordedCalls() []interface{} { + callRecordsMutex.Lock() + defer callRecordsMutex.Unlock() + + sort.Sort(callRecords) + return callRecords +} + +// byJSON implements sort.Interface, ordering its +// elements lexicographically by marshaled JSON +// representation. +type byJSON []interface{} + +func (b byJSON) Less(i, j int) bool { + idata, err := json.Marshal(b[i]) + if err != nil { + panic(err) + } + jdata, err := json.Marshal(b[j]) + if err != nil { + panic(err) + } + return bytes.Compare(idata, jdata) < 0 +} + +func (b byJSON) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b byJSON) Len() int { + return len(b) +} + +// fieldSelectHandler returns a BulkIncludeHandler that returns +// information about the call for testing purposes. +// When the GET handler is invoked, it returns a fieldSelectHandleGetInfo value +// with the given handlerId. Key holds the grouping key, +// and fields holds the fields to select. +// +// When the PUT handler is invoked SetCallRecord is called with +// a fieldSelectHandlePutInfo value holding the parameters that were +// provided. +func fieldSelectHandler(handlerId string, key interface{}, fields ...string) BulkIncludeHandler { + query := func(id *ResolvedURL, selector map[string]int, req *http.Request) (interface{}, error) { + atomic.AddInt32(&queryCount, 1) + return fieldSelectQueryInfo{ + Id: id, + Selector: selector, + }, nil + } + handleGet := func(doc interface{}, id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + if len(flags) == 0 { + flags = nil + } + return fieldSelectHandleGetInfo{ + HandlerId: handlerId, + Doc: doc.(fieldSelectQueryInfo), + Id: id, + Path: path, + Flags: flags, + }, nil + } + + handlePut := func(id *ResolvedURL, path string, val *json.RawMessage, updater *FieldUpdater, req *http.Request) error { + var vali interface{} + err := json.Unmarshal(*val, &vali) + if err != nil { + panic(err) + } + for _, field := range fields { + updater.UpdateField(field+path, fieldSelectHandlePutInfo{ + Id: id.String(), + Value: vali, + }, nil) + } + return nil + } + + update := func(id *ResolvedURL, fields map[string]interface{}, entries []audit.Entry) error { + // We make information on how update and handlePut have + // been called by calling SetCallRecord with the above + // parameters. The fields will have been created by + // handlePut, and therefore are known to contain + // fieldSelectHandlePutInfo values. We convert the + // values to static types so that it is more obvious + // what the values in fieldSelectHandleUpdateInfo.Fields + // contain. + infoFields := make(map[string]fieldSelectHandlePutInfo) + for name, val := range fields { + infoFields[name] = val.(fieldSelectHandlePutInfo) + } + RecordCall(fieldSelectHandleUpdateInfo{ + Id: id.String(), + Fields: infoFields, + }) + return nil + } + + return FieldIncludeHandler(FieldIncludeHandlerParams{ + Key: key, + Query: query, + Fields: fields, + HandleGet: handleGet, + HandlePut: handlePut, + Update: update, + }) +} + +// selectiveIdHandler handles metadata by returning the +// data found in the map for the requested id. +func selectiveIdHandler(m map[string]interface{}) BulkIncludeHandler { + return SingleIncludeHandler(func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return m[id.String()], nil + }) +} + +type swapper interface { + Swap(i, j int) +} + +type swapFunc func(i, j int) + +func (f swapFunc) Swap(i, j int) { + f(i, j) +} + +// groupSort is an implementation of sort.Interface +// that keeps a set of secondary values sorted according +// to the same criteria as key. +type groupSort struct { + key sort.Interface + other []swapper +} + +func (g groupSort) Less(i, j int) bool { + return g.key.Less(i, j) +} + +func (g groupSort) Swap(i, j int) { + g.key.Swap(i, j) + for _, o := range g.other { + o.Swap(i, j) + } +} + +func (g groupSort) Len() int { + return g.key.Len() +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/router/singleinclude.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/router/singleinclude.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/router/singleinclude.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,52 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package router // import "gopkg.in/juju/charmstore.v5-unstable/internal/router" + +import ( + "encoding/json" + "net/http" + "net/url" + + "gopkg.in/errgo.v1" +) + +var _ BulkIncludeHandler = SingleIncludeHandler(nil) + +// SingleIncludeHandler implements BulkMetaHander for a non-batching +// metadata retrieval function that can perform a GET only. +type SingleIncludeHandler func(id *ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) + +// Key implements BulkMetadataHander.Key. +func (h SingleIncludeHandler) Key() interface{} { + // Use a local type so that we are guaranteed that nothing + // other than SingleIncludeHandler can generate that key. + type singleMetaHandlerKey struct{} + return singleMetaHandlerKey(singleMetaHandlerKey{}) +} + +// HandleGet implements BulkMetadataHander.HandleGet. +func (h SingleIncludeHandler) HandleGet(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, flags url.Values, req *http.Request) ([]interface{}, error) { + results := make([]interface{}, len(hs)) + for i, h := range hs { + h := h.(SingleIncludeHandler) + result, err := h(id, paths[i], flags, req) + if err != nil { + // TODO(rog) include index of failed handler. + return nil, errgo.Mask(err, errgo.Any) + } + results[i] = result + } + return results, nil +} + +var errPutNotImplemented = errgo.New("PUT not implemented") + +// HandlePut implements BulkMetadataHander.HandlePut. +func (h SingleIncludeHandler) HandlePut(hs []BulkIncludeHandler, id *ResolvedURL, paths []string, values []*json.RawMessage, req *http.Request) []error { + errs := make([]error, len(hs)) + for i := range hs { + errs[i] = errPutNotImplemented + } + return errs +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/router/util.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/router/util.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/router/util.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,239 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package router // import "gopkg.in/juju/charmstore.v5-unstable/internal/router" + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "mime" + "net/http" + "strings" + + "github.com/juju/httprequest" + "github.com/juju/loggo" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +var logger = loggo.GetLogger("charmstore.internal.router") + +// WriteError can be used to write an error response. +var WriteError = errorToResp.WriteError + +// JSONHandler represents a handler that returns a JSON value. +// The provided header can be used to set response headers. +type JSONHandler func(http.Header, *http.Request) (interface{}, error) + +// ErrorHandler represents a handler that can return an error. +type ErrorHandler func(http.ResponseWriter, *http.Request) error + +// HandleJSON converts from a JSONHandler function to an http.Handler. +func HandleJSON(h JSONHandler) http.Handler { + // We can't use errorToResp.HandleJSON directly because + // we still use old-style handlers in charmstore, so we + // insert shim functions to do the conversion. + handleJSON := errorToResp.HandleJSON( + func(p httprequest.Params) (interface{}, error) { + return h(p.Response.Header(), p.Request) + }, + ) + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + handleJSON(w, req, nil) + }) +} + +// HandleJSON converts from a ErrorHandler function to an http.Handler. +func HandleErrors(h ErrorHandler) http.Handler { + // We can't use errorToResp.HandleErrors directly because + // we still use old-style handlers in charmstore, so we + // insert shim functions to do the conversion. + handleErrors := errorToResp.HandleErrors( + func(p httprequest.Params) error { + return h(p.Response, p.Request) + }, + ) + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + handleErrors(w, req, nil) + }) +} + +var errorToResp httprequest.ErrorMapper = func(err error) (int, interface{}) { + status, body := errorToResp1(err) + logger.Infof("error response %d; %s", status, errgo.Details(err)) + return status, body +} + +func errorToResp1(err error) (int, interface{}) { + // Allow bakery errors to be returned as the bakery would + // like them, so that httpbakery.Client.Do will work. + if err, ok := errgo.Cause(err).(*httpbakery.Error); ok { + return httpbakery.ErrorToResponse(err) + } + errorBody := errorResponseBody(err) + status := http.StatusInternalServerError + switch errorBody.Code { + case params.ErrNotFound, params.ErrMetadataNotFound: + status = http.StatusNotFound + case params.ErrBadRequest, params.ErrInvalidEntity: + status = http.StatusBadRequest + case params.ErrForbidden, params.ErrEntityIdNotAllowed: + status = http.StatusForbidden + case params.ErrUnauthorized: + status = http.StatusUnauthorized + case params.ErrMethodNotAllowed: + // TODO(rog) from RFC 2616, section 4.7: An Allow header + // field MUST be present in a 405 (Method Not Allowed) + // response. + // Perhaps we should not ever return StatusMethodNotAllowed. + status = http.StatusMethodNotAllowed + case params.ErrServiceUnavailable: + status = http.StatusServiceUnavailable + } + return status, errorBody +} + +// errorResponse returns an appropriate error +// response for the provided error. +func errorResponseBody(err error) *params.Error { + + errResp := ¶ms.Error{ + Message: err.Error(), + } + cause := errgo.Cause(err) + if coder, ok := cause.(errorCoder); ok { + errResp.Code = coder.ErrorCode() + } + if infoer, ok := cause.(errorInfoer); ok { + errResp.Info = infoer.ErrorInfo() + } + return errResp +} + +type errorInfoer interface { + ErrorInfo() map[string]*params.Error +} + +type errorCoder interface { + ErrorCode() params.ErrorCode +} + +// multiError holds multiple errors. +type multiError map[string]error + +func (err multiError) Error() string { + return fmt.Sprintf("multiple (%d) errors", len(err)) +} + +func (err multiError) ErrorCode() params.ErrorCode { + return params.ErrMultipleErrors +} + +func (err multiError) ErrorInfo() map[string]*params.Error { + m := make(map[string]*params.Error) + for key, err := range err { + m[key] = errorResponseBody(err) + } + return m +} + +// NotFoundHandler is like http.NotFoundHandler except it +// returns a JSON error response. +func NotFoundHandler() http.Handler { + return HandleErrors(func(w http.ResponseWriter, req *http.Request) error { + return errgo.WithCausef(nil, params.ErrNotFound, params.ErrNotFound.Error()) + }) +} + +func NewServeMux() *ServeMux { + return &ServeMux{http.NewServeMux()} +} + +// ServeMux is like http.ServeMux but returns +// JSON errors when pages are not found. +type ServeMux struct { + *http.ServeMux +} + +func (mux *ServeMux) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if req.RequestURI == "*" { + mux.ServeMux.ServeHTTP(w, req) + return + } + h, pattern := mux.Handler(req) + if pattern == "" { + WriteError(w, errgo.WithCausef(nil, params.ErrNotFound, "no handler for %q", req.URL.Path)) + return + } + h.ServeHTTP(w, req) +} + +// RelativeURLPath returns a relative URL path that is lexically equivalent to +// targpath when interpreted by url.URL.ResolveReference. +// On succes, the returned path will always be relative to basePath, even if basePath +// and targPath share no elements. An error is returned if targPath can't +// be made relative to basePath (for example when either basePath +// or targetPath are non-absolute). +func RelativeURLPath(basePath, targPath string) (string, error) { + if !strings.HasPrefix(basePath, "/") { + return "", errgo.Newf("non-absolute base URL") + } + if !strings.HasPrefix(targPath, "/") { + return "", errgo.Newf("non-absolute target URL") + } + baseParts := strings.Split(basePath, "/") + targParts := strings.Split(targPath, "/") + + // For the purposes of dotdot, the last element of + // the paths are irrelevant. We save the last part + // of the target path for later. + lastElem := targParts[len(targParts)-1] + baseParts = baseParts[0 : len(baseParts)-1] + targParts = targParts[0 : len(targParts)-1] + + // Find the common prefix between the two paths: + var i int + for ; i < len(baseParts); i++ { + if i >= len(targParts) || baseParts[i] != targParts[i] { + break + } + } + dotdotCount := len(baseParts) - i + targOnly := targParts[i:] + result := make([]string, 0, dotdotCount+len(targOnly)+1) + for i := 0; i < dotdotCount; i++ { + result = append(result, "..") + } + result = append(result, targOnly...) + result = append(result, lastElem) + return strings.Join(result, "/"), nil +} + +// TODO(mhilton) This is not an ideal place for UnmarshalJSONResponse, +// maybe it should be in httprequest somewhere? + +// UnmarshalJSONResponse unmarshals resp.Body into v. If errorF is not +// nil and resp.StatusCode indicates an error has occured (>= 400) then +// the result of calling errorF with resp is returned. +func UnmarshalJSONResponse(resp *http.Response, v interface{}, errorF func(*http.Response) error) error { + if errorF != nil && resp.StatusCode >= http.StatusBadRequest { + return errgo.Mask(errorF(resp), errgo.Any) + } + mt, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if err != nil { + return errgo.Notef(err, "cannot parse content type") + } + if mt != "application/json" { + return errgo.Newf("unexpected content type %q", mt) + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return errgo.Notef(err, "cannot read response body") + } + if err := json.Unmarshal(body, v); err != nil { + return errgo.Notef(err, "cannot unmarshal response") + } + return nil +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/router/util_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/router/util_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/router/util_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,267 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package router_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/router" + +import ( + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "net/url" + "strings" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +type utilSuite struct{} + +var _ = gc.Suite(&utilSuite{}) +var relativeURLTests = []struct { + base string + target string + expect string + expectError string +}{{ + expectError: "non-absolute base URL", +}, { + base: "/foo", + expectError: "non-absolute target URL", +}, { + base: "foo", + expectError: "non-absolute base URL", +}, { + base: "/foo", + target: "foo", + expectError: "non-absolute target URL", +}, { + base: "/foo", + target: "/bar", + expect: "bar", +}, { + base: "/foo/", + target: "/bar", + expect: "../bar", +}, { + base: "/foo/", + target: "/bar/", + expect: "../bar/", +}, { + base: "/foo/bar", + target: "/bar/", + expect: "../bar/", +}, { + base: "/foo/bar/", + target: "/bar/", + expect: "../../bar/", +}, { + base: "/foo/bar/baz", + target: "/foo/targ", + expect: "../targ", +}, { + base: "/foo/bar/baz/frob", + target: "/foo/bar/one/two/", + expect: "../one/two/", +}, { + base: "/foo/bar/baz/", + target: "/foo/targ", + expect: "../../targ", +}, { + base: "/foo/bar/baz/frob/", + target: "/foo/bar/one/two/", + expect: "../../one/two/", +}, { + base: "/foo/bar", + target: "/foot/bar", + expect: "../foot/bar", +}, { + base: "/foo/bar/baz/frob", + target: "/foo/bar", + expect: "../../bar", +}, { + base: "/foo/bar/baz/frob/", + target: "/foo/bar", + expect: "../../../bar", +}, { + base: "/foo/bar/baz/frob/", + target: "/foo/bar/", + expect: "../../", +}, { + base: "/foo/bar/baz", + target: "/foo/bar/other", + expect: "other", +}, { + base: "/foo/bar/", + target: "/foo/bar/", + expect: "", +}, { + base: "/foo/bar", + target: "/foo/bar", + expect: "bar", +}, { + base: "/foo/bar/", + target: "/foo/bar/", + expect: "", +}} + +func (*utilSuite) TestRelativeURL(c *gc.C) { + for i, test := range relativeURLTests { + c.Logf("test %d: %q %q", i, test.base, test.target) + // Sanity check the test itself. + if test.expectError == "" { + baseURL := &url.URL{Path: test.base} + expectURL := &url.URL{Path: test.expect} + targetURL := baseURL.ResolveReference(expectURL) + c.Check(targetURL.Path, gc.Equals, test.target, gc.Commentf("resolve reference failure")) + } + + result, err := router.RelativeURLPath(test.base, test.target) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + c.Assert(result, gc.Equals, "") + } else { + c.Assert(err, gc.IsNil) + c.Check(result, gc.Equals, test.expect) + } + } +} + +type errorReader struct { + err error +} + +func (e errorReader) Read([]byte) (int, error) { + return 0, e.err +} + +var unmarshalJSONResponseTests = []struct { + about string + resp *http.Response + errorF func(*http.Response) error + expectValue interface{} + expectError string + expectErrorCause error +}{{ + about: "unmarshal object", + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: ioutil.NopCloser(strings.NewReader(`"OK"`)), + }, + errorF: func(*http.Response) error { + return errors.New("unexpected error") + }, + expectValue: "OK", +}, { + about: "error response with function", + resp: &http.Response{ + StatusCode: http.StatusBadRequest, + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: ioutil.NopCloser(strings.NewReader(`"OK"`)), + }, + errorF: func(*http.Response) error { + return errors.New("expected error") + }, + expectError: "expected error", +}, { + about: "error response without function", + resp: &http.Response{ + StatusCode: http.StatusInternalServerError, + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: ioutil.NopCloser(strings.NewReader(`"OK"`)), + }, + expectValue: "OK", +}, { + about: "unparsable content type", + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{ + "Content-Type": {"application/"}, + }, + Body: ioutil.NopCloser(strings.NewReader(`"OK"`)), + }, + errorF: func(*http.Response) error { + return errors.New("expected error") + }, + expectError: "cannot parse content type: mime: expected token after slash", +}, { + about: "wrong content type", + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{ + "Content-Type": {"text/plain"}, + }, + Body: ioutil.NopCloser(strings.NewReader(`"OK"`)), + }, + errorF: func(*http.Response) error { + return errors.New("expected error") + }, + expectError: `unexpected content type "text/plain"`, +}, { + about: "read error", + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: ioutil.NopCloser(errorReader{errors.New("read error")}), + }, + errorF: func(*http.Response) error { + return errors.New("unexpected error") + }, + expectError: `cannot read response body: read error`, +}, { + about: "read error", + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: ioutil.NopCloser(strings.NewReader(`"OK`)), + }, + errorF: func(*http.Response) error { + return errors.New("unexpected error") + }, + expectError: `cannot unmarshal response: unexpected end of JSON input`, +}, { + about: "error with cause", + resp: &http.Response{ + StatusCode: http.StatusBadRequest, + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: ioutil.NopCloser(strings.NewReader(`"OK"`)), + }, + errorF: func(*http.Response) error { + return errgo.WithCausef(nil, errors.New("expected error"), "an error message") + }, + expectError: "an error message", + expectErrorCause: errors.New("expected error"), +}} + +func (*utilSuite) TestUnmarshalJSONObject(c *gc.C) { + for i, test := range unmarshalJSONResponseTests { + c.Logf("%d. %s", i, test.about) + var v json.RawMessage + err := router.UnmarshalJSONResponse(test.resp, &v, test.errorF) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + if test.expectErrorCause != nil { + c.Assert(errgo.Cause(err), jc.DeepEquals, test.expectErrorCause) + } + continue + } + c.Assert(err, gc.IsNil) + c.Assert(string(v), jc.JSONEquals, test.expectValue) + } +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/series' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/series/series.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/series/series.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/series/series.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,66 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// Package series holds information about series supported in the +// charmstore. +package series // import "gopkg.in/juju/charmstore.v5-unstable/internal/series" + +// Distribution represents a distribution supported by the charmstore. +// Every series will belong to a distribution. +type Distribution string + +const ( + Ubuntu Distribution = "ubuntu" + CentOS Distribution = "centos" + Windows Distribution = "windows" +) + +// SeriesInfo contains the information the charmstore knows about a +// series name. +type SeriesInfo struct { + // CharmSeries holds whether this series name is for charms. + CharmSeries bool + + // Distribution holds the Distribution this series belongs to. + Distribution Distribution + + // SearchIndex holds wether charms in this series should be added + // to the search index. + SearchIndex bool + + // SearchBoost contains the relative boost given to charms in + // this series when searching. + SearchBoost float64 +} + +// Series contains the data charmstore knows about series names +var Series = map[string]SeriesInfo{ + // Bundle + "bundle": SeriesInfo{false, "", true, 1.1255}, + + // Ubuntu + "oneiric": SeriesInfo{true, Ubuntu, false, 0}, + "precise": SeriesInfo{true, Ubuntu, true, 1.1125}, + "quantal": SeriesInfo{true, Ubuntu, false, 0}, + "raring": SeriesInfo{true, Ubuntu, false, 0}, + "saucy": SeriesInfo{true, Ubuntu, false, 0}, + "trusty": SeriesInfo{true, Ubuntu, true, 1.125}, + "utopic": SeriesInfo{true, Ubuntu, false, 0}, + "vivid": SeriesInfo{true, Ubuntu, true, 1.101}, + "wily": SeriesInfo{true, Ubuntu, true, 1.102}, + + // Windows + "win2012hvr2": SeriesInfo{true, Windows, true, 1.1}, + "win2012hv": SeriesInfo{true, Windows, true, 1.1}, + "win2012r2": SeriesInfo{true, Windows, true, 1.1}, + "win2012": SeriesInfo{true, Windows, true, 1.1}, + "win7": SeriesInfo{true, Windows, true, 1.1}, + "win8": SeriesInfo{true, Windows, true, 1.1}, + "win81": SeriesInfo{true, Windows, true, 1.1}, + "win10": SeriesInfo{true, Windows, true, 1.1}, + "win2016": SeriesInfo{true, Windows, true, 1.1}, + "win2016nano": SeriesInfo{true, Windows, true, 1.1}, + + // Centos + "centos7": SeriesInfo{true, CentOS, true, 1.1}, +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting' === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo' === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle' === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/bad' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/bad/README.md' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/bad/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/bad/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +A dummy bundle === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/bad/bundle.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/bad/bundle.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/bad/bundle.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +# This bundle has a bad relation, which will cause it to fail +# its verification. +services: + wordpress: + charm: wordpress + num_units: 1 + mysql: + charm: mysql + num_units: 1 +relations: + - ["foo:db", "mysql:server"] === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/openstack' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/openstack/README.md' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/openstack/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/openstack/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,46 @@ +OpenStack Bundle for Juju +========================= + +Overview +-------- + +This bundle deploys a reference OpenStack architecture including all core projects: + + - OpenStack Compute + - OpenStack Networking (using Open vSwitch plugin) + - OpenStack Block Storage (backed with Ceph storage) + - OpenStack Image + - OpenStack Object Storage + - OpenStack Identity + - OpenStack Dashboard + - OpenStack Telemetry + - OpenStack Orchestration + +The charm configuration is an opinioned set for deploying OpenStack for testing on Cloud environments which support nested KVM. Instance types also need to have ephemeral storage (these block devices are used for Ceph and Swift storage). + +The Ubuntu Server Team use this bundle for testing OpenStack-on-OpenStack. + +Usage +----- + +Once deployed, the cloud can be accessed either using the OpenStack command line tools or using the OpenStack Dashboard: + + http:///horizon + +The charms configure the 'admin' user with a password of 'openstack' by default. + +The OpenStack cloud deployed is completely clean; the charms don't attempt to configure networking or upload images. Read the OpenStack User Guide on how to configure your cloud for use: + + http://docs.openstack.org/user-guide/content/ + +Niggles +------- + +The neutron-gateway service requires a service unit with two network interfaces to provide full functionality; this part of OpenStack provides L3 routing between tenant networks and the rest of the world. Its possible todo this when testing on OpenStack by adding a second network interface to the neutron-gateway service: + + nova interface-attach --net-id + juju set neutron-gateway ext-port=eth1 + +Note that you will need to be running this bundle on an OpenStack cloud that supports MAC address learning of some description; this includes using OpenStack Havana with the Neutron Open vSwitch plugin. + +For actual OpenStack deployments, this service would reside of a physical server with network ports attached to both the internal network (for communication with nova-compute service units) and the external network (for inbound/outbound network access to/from instances within the cloud). === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/openstack/bundle.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/openstack/bundle.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/openstack/bundle.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,202 @@ +series: precise +services: + mysql: + charm: cs:precise/mysql + constraints: mem=1G + options: + dataset-size: 50% + rabbitmq-server: + charm: cs:precise/rabbitmq-server + constraints: mem=1G + ceph: + charm: cs:precise/ceph + num_units: 3 + constraints: mem=1G + options: + monitor-count: 3 + fsid: 6547bd3e-1397-11e2-82e5-53567c8d32dc + monitor-secret: AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ== + osd-devices: /dev/vdb + osd-reformat: "yes" + ephemeral-unmount: /mnt + keystone: + charm: cs:precise/keystone + constraints: mem=1G + options: + admin-password: openstack + admin-token: ubuntutesting + openstack-dashboard: + charm: cs:precise/openstack-dashboard + constraints: mem=1G + nova-compute: + charm: cs:precise/nova-compute + num_units: 3 + constraints: mem=4G + options: + config-flags: "auto_assign_floating_ip=False" + enable-live-migration: False + virt-type: kvm + nova-cloud-controller: + charm: cs:precise/nova-cloud-controller + constraints: mem=1G + options: + network-manager: Neutron + quantum-security-groups: "yes" + neutron-gateway: + charm: cs:precise/quantum-gateway + constraints: mem=1G + cinder: + charm: cs:precise/cinder + options: + block-device: "None" + constraints": mem=1G + glance: + charm: cs:precise/glance + constraints: mem=1G + swift-proxy: + charm: cs:precise/swift-proxy + constraints: mem=1G + options: + zone-assignment: manual + replicas: 3 + use-https: 'no' + swift-hash: fdfef9d4-8b06-11e2-8ac0-531c923c8fae + swift-storage-z1: + charm: cs:precise/swift-storage + constraints: mem=1G + options: + zone: 1 + block-device: vdb + overwrite: "true" + swift-storage-z2: + charm: cs:precise/swift-storage + constraints: mem=1G + options: + zone: 2 + block-device: vdb + overwrite: "true" + swift-storage-z3: + charm: cs:precise/swift-storage + constraints: mem=1G + options: + zone: 3 + block-device: vdb + overwrite: "true" + ceilometer: + charm: cs:precise/ceilometer + constraints: mem=1G + ceilometer-agent: + charm: cs:precise/ceilometer-agent + mongodb: + charm: cs:precise/mongodb + constraints: mem=1G + heat: + charm: cs:precise/heat + constraints: mem=1G + ntp: + charm: cs:precise/ntp +relations: + - - keystone:shared-db + - mysql:shared-db + - - nova-cloud-controller:shared-db + - mysql:shared-db + - - nova-cloud-controller:amqp + - rabbitmq-server:amqp + - - nova-cloud-controller:image-service + - glance:image-service + - - nova-cloud-controller:identity-service + - keystone:identity-service + - - nova-compute:cloud-compute + - nova-cloud-controller:cloud-compute + - - nova-compute:shared-db + - mysql:shared-db + - - nova-compute:amqp + - rabbitmq-server:amqp + - - nova-compute:image-service + - glance:image-service + - - nova-compute:ceph + - ceph:client + - - glance:shared-db + - mysql:shared-db + - - glance:identity-service + - keystone:identity-service + - - glance:ceph + - ceph:client + - - glance:image-service + - cinder:image-service + - - cinder:shared-db + - mysql:shared-db + - - cinder:amqp + - rabbitmq-server:amqp + - - cinder:cinder-volume-service + - nova-cloud-controller:cinder-volume-service + - - cinder:identity-service + - keystone:identity-service + - - cinder:ceph + - ceph:client + - - neutron-gateway:shared-db + - mysql:shared-db + - - neutron-gateway:amqp + - rabbitmq-server:amqp + - - neutron-gateway:quantum-network-service + - nova-cloud-controller:quantum-network-service + - - openstack-dashboard:identity-service + - keystone:identity-service + - - swift-proxy:identity-service + - keystone:identity-service + - - swift-proxy:swift-storage + - swift-storage-z1:swift-storage + - - swift-proxy:swift-storage + - swift-storage-z2:swift-storage + - - swift-proxy:swift-storage + - swift-storage-z3:swift-storage + - - ceilometer:identity-service + - keystone:identity-service + - - ceilometer:amqp + - rabbitmq-server:amqp + - - ceilometer:shared-db + - mongodb:database + - - ceilometer-agent:nova-ceilometer + - nova-compute:nova-ceilometer + - - ceilometer-agent:ceilometer-service + - ceilometer:ceilometer-service + - - heat:identity-service + - keystone:identity-service + - - heat:shared-db + - mysql:shared-db + - - heat:amqp + - rabbitmq-server:amqp + - - ntp:juju-info + - nova-compute:juju-info + - - ntp:juju-info + - nova-cloud-controller:juju-info + - - ntp:juju-info + - neutron-gateway:juju-info + - - ntp:juju-info + - ceph:juju-info + - - ntp:juju-info + - cinder:juju-info + - - ntp:juju-info + - keystone:juju-info + - - ntp:juju-info + - glance:juju-info + - - ntp:juju-info + - swift-proxy:juju-info + - - ntp:juju-info + - swift-storage-z1:juju-info + - - ntp:juju-info + - swift-storage-z2:juju-info + - - ntp:juju-info + - swift-storage-z3:juju-info + - - ntp:juju-info + - ceilometer:juju-info + - - ntp:juju-info + - mongodb:juju-info + - - ntp:juju-info + - rabbitmq-server:juju-info + - - ntp:juju-info + - mysql:juju-info + - - ntp:juju-info + - openstack-dashboard:juju-info + - - ntp:juju-info + - heat:juju-info === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/wordpress-simple' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/wordpress-simple/README.md' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/wordpress-simple/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/wordpress-simple/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +A dummy bundle === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/wordpress-simple/bundle.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/wordpress-simple/bundle.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/wordpress-simple/bundle.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,9 @@ +services: + wordpress: + charm: wordpress + num_units: 1 + mysql: + charm: mysql + num_units: 1 +relations: + - ["wordpress:db", "mysql:server"] === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/wordpress-with-logging' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/wordpress-with-logging/README.md' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/wordpress-with-logging/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/wordpress-with-logging/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +A dummy bundle === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/wordpress-with-logging/bundle.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/wordpress-with-logging/bundle.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/bundle/wordpress-with-logging/bundle.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,13 @@ +services: + wordpress: + charm: wordpress + num_units: 1 + mysql: + charm: mysql + num_units: 1 + logging: + charm: logging +relations: + - ["wordpress:db", "mysql:server"] + - ["wordpress:juju-info", "logging:info"] + - ["mysql:juju-info", "logging:info"] === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal' === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks' === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-broken' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-broken 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-broken 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-changed' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-changed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-changed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-departed' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-departed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-departed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-joined' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-joined 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/bar-relation-joined 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/collect-metrics' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/collect-metrics 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/collect-metrics 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/config-changed' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/config-changed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/config-changed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-broken' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-broken 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-broken 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-changed' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-changed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-changed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-departed' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-departed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-departed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-joined' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-joined 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/foo-relation-joined 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/install' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/install 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/install 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/meter-status-changed' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/meter-status-changed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/meter-status-changed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/otherdata' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/otherdata 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/otherdata 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +some text === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-broken' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-broken 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-broken 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-changed' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-changed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-changed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-departed' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-departed 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-departed 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-joined' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-joined 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/self-relation-joined 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/start' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/start 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/start 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/stop' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/stop 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/stop 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/subdir' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/subdir/stuff' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/subdir/stuff 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/subdir/stuff 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +non hook related stuff === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/upgrade-charm' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/upgrade-charm 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/hooks/upgrade-charm 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/sh +echo $0 === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/metadata.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,12 @@ +name: all-hooks +summary: "That's a dummy charm with hook scrips for all types of hooks." +description: "This is a longer description." +provides: + foo: + interface: phony +requires: + bar: + interface: fake +peers: + self: + interface: dummy === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/revision' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/all-hooks/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/category' === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/category/.dir' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/category/.dir/ignored' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/category/.ignored' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/category/.ignored 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/category/.ignored 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +# \ No newline at end of file === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/category/metadata.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/category/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/category/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,6 @@ +name: categories +summary: "Sample charm with a category" +description: | + That's a boring charm that has a category. +categories: ["database"] +tags: ["openstack", "storage"] \ No newline at end of file === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy' === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/.dir' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/.dir/ignored' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/.ignored' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/.ignored 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/.ignored 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +# \ No newline at end of file === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/actions.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/actions.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/actions.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,7 @@ +snapshot: + description: Take a snapshot of the database. + params: + outfile: + description: The file to write out to. + type: string + default: foo.bz2 === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/build' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/build/ignored' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/config.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/config.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/config.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +options: + title: {default: My Title, description: A descriptive title used for the service., type: string} + outlook: {description: No default outlook., type: string} + username: {default: admin001, description: The name of the initial account (given admin permissions)., type: string} + skill-level: {description: A number indicating skill., type: int} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/empty' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/empty/.gitkeep' === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/hooks' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/hooks/install' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/hooks/install 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/hooks/install 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2 @@ +#!/bin/bash +echo "Done!" === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/metadata.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +name: dummy +summary: "That's a dummy charm." +description: | + This is a longer description which + potentially contains multiple lines. === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/revision' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/src' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/src/hello.c' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/src/hello.c 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/dummy/src/hello.c 2016-03-22 15:18:22 +0000 @@ -0,0 +1,7 @@ +#include + +main() +{ + printf ("Hello World!\n"); + return 0; +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/logging' === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/logging/hooks' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/logging/hooks/.gitkeep' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/logging/metadata.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/logging/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/logging/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +name: logging +summary: "Subordinate logging test charm" +description: | + This is a longer description which + potentially contains multiple lines. +subordinate: true +provides: + logging-client: + interface: logging +requires: + logging-directory: + interface: logging + scope: container + info: + interface: juju-info + scope: container === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/logging/revision' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/logging/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/logging/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series' === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-bad-combination' === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-bad-combination/hooks' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-bad-combination/hooks/.gitkeep' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-bad-combination/metadata.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-bad-combination/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-bad-combination/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,29 @@ +name: multi-series +summary: multi-series test charm +description: Test charm that supports a number of series +series: + - trusty + - utopic + - vivid + - wily + - win10 +provides: + url: + interface: http + limit: + optional: false + logging-dir: + interface: logging + scope: container + monitoring-port: + interface: monitoring + scope: container +requires: + db: + interface: mysql + limit: 1 + optional: false + cache: + interface: varnish + limit: 2 + optional: true === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-unknown' === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-unknown/hooks' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-unknown/hooks/.gitkeep' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-unknown/metadata.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-unknown/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series-unknown/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,29 @@ +name: multi-series +summary: multi-series test charm +description: Test charm that supports a number of series +series: + - trusty + - utopic + - vivid + - wily + - nosuchseries +provides: + url: + interface: http + limit: + optional: false + logging-dir: + interface: logging + scope: container + monitoring-port: + interface: monitoring + scope: container +requires: + db: + interface: mysql + limit: 1 + optional: false + cache: + interface: varnish + limit: 2 + optional: true === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/actions' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/actions/.gitkeep' === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/hooks' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/hooks/.gitkeep' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/metadata.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,28 @@ +name: multi-series +summary: multi-series test charm +description: Test charm that supports a number of series +series: + - trusty + - utopic + - vivid + - wily +provides: + url: + interface: http + limit: + optional: false + logging-dir: + interface: logging + scope: container + monitoring-port: + interface: monitoring + scope: container +requires: + db: + interface: mysql + limit: 1 + optional: false + cache: + interface: varnish + limit: 2 + optional: true === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/revision' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/multi-series/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/mysql' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/mysql/metadata.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/mysql/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/mysql/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +name: mysql +summary: "Database engine" +description: "A pretty popular database" +provides: + server: mysql === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/mysql/revision' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/mysql/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/mysql/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/riak' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/riak/metadata.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/riak/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/riak/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +name: riak +summary: "K/V storage engine" +description: "Scalable K/V Store in Erlang with Clocks :-)" +provides: + endpoint: + interface: http + admin: + interface: http +peers: + ring: + interface: riak === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/riak/revision' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/riak/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/riak/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +7 \ No newline at end of file === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/terms' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/terms/metadata.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/terms/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/terms/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,6 @@ +name: terms +summary: "Sample charm with terms and conditions" +description: | + That's a boring charm that requires certain terms. +tags: ["openstack", "storage"] +terms: ["terms-1/1", "terms-2/5"] === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/varnish' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/varnish/metadata.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/varnish/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/varnish/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,5 @@ +name: varnish +summary: "Database engine" +description: "Another popular database" +provides: + webcache: varnish === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/varnish/revision' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/varnish/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/varnish/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +1 \ No newline at end of file === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress' === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/actions' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/actions/.gitkeep' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/config.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/config.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/config.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,3 @@ +options: + blog-title: {default: My Title, description: A descriptive title used for the blog., type: string} + === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/hooks' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/hooks/.gitkeep' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/metadata.yaml' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/metadata.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/metadata.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,23 @@ +name: wordpress +summary: "Blog engine" +description: "A pretty popular blog engine" +provides: + url: + interface: http + limit: + optional: false + logging-dir: + interface: logging + scope: container + monitoring-port: + interface: monitoring + scope: container +requires: + db: + interface: mysql + limit: 1 + optional: false + cache: + interface: varnish + limit: 2 + optional: true === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/revision' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/revision 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm-repo/quantal/wordpress/revision 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +3 \ No newline at end of file === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/charm.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,10 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storetesting // import "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + +import ( + "gopkg.in/juju/charmrepo.v2-unstable/testing" +) + +var Charms = testing.NewRepo("charm-repo", "quantal") === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/elasticsearch.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/elasticsearch.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/elasticsearch.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,76 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storetesting // import "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + +import ( + "os" + "time" + + "github.com/juju/utils" + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" +) + +// ElasticSearchSuite defines a test suite that connects to an +// elastic-search server. The address of the server depends on the value +// of the JUJU_TEST_ELASTICSEARCH environment variable, which can be +// "none" (do not start or connect to a server) or host:port holding the +// address and port of the server to connect to. If +// JUJU_TEST_ELASTICSEARCH is not specified then localhost:9200 will be +// used. +type ElasticSearchSuite struct { + ES *elasticsearch.Database + indexes []string + TestIndex string +} + +var jujuTestElasticSearch = os.Getenv("JUJU_TEST_ELASTICSEARCH") + +func (s *ElasticSearchSuite) SetUpSuite(c *gc.C) { + serverAddr := jujuTestElasticSearch + switch serverAddr { + case "none": + c.Skip("elasticsearch disabled") + case "": + serverAddr = ":9200" + } + s.ES = &elasticsearch.Database{serverAddr} +} + +func (s *ElasticSearchSuite) TearDownSuite(c *gc.C) { +} + +func (s *ElasticSearchSuite) SetUpTest(c *gc.C) { + s.TestIndex = s.NewIndex(c) +} + +func (s *ElasticSearchSuite) TearDownTest(c *gc.C) { + for _, index := range s.indexes { + s.ES.DeleteIndex(index + "*") + s.ES.DeleteDocument(".versions", "version", index) + } + s.indexes = nil +} + +// NewIndex creates a new index name and ensures that it will be cleaned up at +// end of the test. +func (s *ElasticSearchSuite) NewIndex(c *gc.C) string { + uuid, err := utils.NewUUID() + c.Assert(err, gc.IsNil) + id := time.Now().Format("20060102") + uuid.String() + s.indexes = append(s.indexes, id) + return id +} + +// LoadESConfig loads a canned test configuration to the specified index +func (s *ElasticSearchSuite) LoadESConfig(index string, settings, mapping interface{}) error { + if err := s.ES.PutIndex(index, settings); err != nil { + return err + } + if err := s.ES.PutMapping(index, "entity", mapping); err != nil { + return err + } + return nil +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/entities.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/entities.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/entities.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,139 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storetesting // import "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + +import ( + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/mgo.v2" + + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" +) + +// EntityBuilder provides a convenient way to describe a mongodoc.Entity +// for tests that is correctly formed and contains the desired +// information. +type EntityBuilder struct { + entity *mongodoc.Entity +} + +// NewEntity creates a new EntityBuilder for the provided URL. +func NewEntity(url string) EntityBuilder { + URL := charm.MustParseURL(url) + return EntityBuilder{ + entity: &mongodoc.Entity{ + URL: URL, + Name: URL.Name, + Series: URL.Series, + Revision: URL.Revision, + User: URL.User, + BaseURL: baseURL(URL), + PromulgatedRevision: -1, + }, + } +} + +func copyURL(id *charm.URL) *charm.URL { + if id == nil { + return nil + } + id1 := *id + return &id1 +} + +func (b EntityBuilder) copy() EntityBuilder { + e := *b.entity + e.PromulgatedURL = copyURL(e.PromulgatedURL) + e.URL = copyURL(e.URL) + e.BaseURL = copyURL(e.BaseURL) + return EntityBuilder{&e} +} + +// WithPromulgatedURL sets the PromulgatedURL and PromulgatedRevision of the +// entity being built. +func (b EntityBuilder) WithPromulgatedURL(url string) EntityBuilder { + b = b.copy() + if url == "" { + b.entity.PromulgatedURL = nil + b.entity.PromulgatedRevision = -1 + } else { + b.entity.PromulgatedURL = charm.MustParseURL(url) + b.entity.PromulgatedRevision = b.entity.PromulgatedURL.Revision + } + return b +} + +// Build creates a mongodoc.Entity from the EntityBuilder. +func (b EntityBuilder) Build() *mongodoc.Entity { + return b.copy().entity +} + +// AssertEntity checks that db contains an entity that matches expect. +func AssertEntity(c *gc.C, db *mgo.Collection, expect *mongodoc.Entity) { + var entity mongodoc.Entity + err := db.FindId(expect.URL).One(&entity) + c.Assert(err, gc.IsNil) + c.Assert(&entity, jc.DeepEquals, expect) +} + +// BaseEntityBuilder provides a convenient way to describe a +// mongodoc.BaseEntity for tests that is correctly formed and contains the +// desired information. +type BaseEntityBuilder struct { + baseEntity *mongodoc.BaseEntity +} + +// NewBaseEntity creates a new BaseEntityBuilder for the provided URL. +func NewBaseEntity(url string) BaseEntityBuilder { + URL := charm.MustParseURL(url) + return BaseEntityBuilder{ + baseEntity: &mongodoc.BaseEntity{ + URL: URL, + Name: URL.Name, + User: URL.User, + }, + } +} + +func (b BaseEntityBuilder) copy() BaseEntityBuilder { + e := *b.baseEntity + e.URL = copyURL(e.URL) + return BaseEntityBuilder{&e} +} + +// WithPromulgated sets the promulgated flag on the BaseEntity. +func (b BaseEntityBuilder) WithPromulgated(promulgated bool) BaseEntityBuilder { + b = b.copy() + b.baseEntity.Promulgated = mongodoc.IntBool(promulgated) + return b +} + +// WithACLs sets the non-development ACLs field on the BaseEntity. +func (b BaseEntityBuilder) WithACLs(acls mongodoc.ACL) BaseEntityBuilder { + b = b.copy() + b.baseEntity.ACLs = acls + return b +} + +// Build creates a mongodoc.BaseEntity from the BaseEntityBuilder. +func (b BaseEntityBuilder) Build() *mongodoc.BaseEntity { + return b.copy().baseEntity +} + +// AssertBaseEntity checks that db contains a base entity that matches expect. +func AssertBaseEntity(c *gc.C, db *mgo.Collection, expect *mongodoc.BaseEntity) { + var baseEntity mongodoc.BaseEntity + err := db.FindId(expect.URL).One(&baseEntity) + c.Assert(err, gc.IsNil) + c.Assert(&baseEntity, jc.DeepEquals, expect) +} + +func baseURL(url *charm.URL) *charm.URL { + baseURL := *url + baseURL.Series = "" + baseURL.Revision = -1 + baseURL.Channel = "" + return &baseURL +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/flag.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/flag.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/flag.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,25 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storetesting // import "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + +import ( + "flag" + "os" + + jujutesting "github.com/juju/testing" +) + +var noTestMongoJs *bool = flag.Bool("notest-mongojs", false, "Disable MongoDB tests that require JavaScript") + +func init() { + if os.Getenv("JUJU_NOTEST_MONGOJS") == "1" || jujutesting.MgoServer.WithoutV8 { + *noTestMongoJs = true + } +} + +// MongoJSEnabled reports whether testing code should run tests +// that rely on JavaScript inside MongoDB. +func MongoJSEnabled() bool { + return !*noTestMongoJs +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/hashtesting' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/hashtesting/hash.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/hashtesting/hash.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/hashtesting/hash.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,60 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +// TODO frankban: remove this package after updating entities in the production +// db with their SHA256 hash value. Entities are updated by running the +// cshash256 command. + +package hashtesting // import "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/hashtesting" + +import ( + "time" + + jujutesting "github.com/juju/testing" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +func CheckSHA256Laziness(c *gc.C, store *charmstore.Store, id *charm.URL, check func()) { + updated := make(chan struct{}, 1) + + // Patch charmstore.UpdateEntitySHA256 so that we can know whether it has + // been called or not. + original := charmstore.UpdateEntitySHA256 + restore := jujutesting.PatchValue( + &charmstore.UpdateEntitySHA256, + func(store *charmstore.Store, id *router.ResolvedURL, sum256 string) { + original(store, id, sum256) + updated <- struct{}{} + }) + defer restore() + + // Update the entity removing the SHA256 hash. + store.DB.Entities().UpdateId(id, bson.D{{ + "$set", bson.D{{"blobhash256", ""}}, + }}) + + // Run the code under test. + check() + + // Ensure the db is updated asynchronously. + select { + case <-updated: + case <-time.After(5 * time.Second): + c.Fatalf("timed out waiting for update") + } + + // Run the code under test. again. + check() + + // We should not update the SHA256 the second time. + select { + case <-updated: + c.Fatalf("update called twice") + case <-time.After(10 * time.Millisecond): + } +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/json.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/json.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/json.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,26 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storetesting // import "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + +import ( + "bytes" + "encoding/json" + "io" +) + +// MustMarshalJSON marshals the specified value using json.Marshal and +// returns the corresponding byte slice. If there is an error marshalling +// the value then MustMarshalJSON will panic. +func MustMarshalJSON(v interface{}) []byte { + data, err := json.Marshal(v) + if err != nil { + panic(err) + } + return data +} + +// JSONReader creates an io.Reader which can read the Marshalled value of v. +func JSONReader(v interface{}) io.Reader { + return bytes.NewReader(MustMarshalJSON(v)) +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/stats' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/stats/stats.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/stats/stats.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/stats/stats.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,54 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package stats // import "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/stats" + +import ( + "time" + + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" +) + +// CheckCounterSum checks that statistics are properly collected. +// It retries a few times as they are generally collected in background. +func CheckCounterSum(c *gc.C, store *charmstore.Store, key []string, prefix bool, expected int64) { + var sum int64 + for retry := 0; retry < 10; retry++ { + time.Sleep(100 * time.Millisecond) + req := charmstore.CounterRequest{ + Key: key, + Prefix: prefix, + } + cs, err := store.Counters(&req) + c.Assert(err, gc.IsNil) + if sum = cs[0].Count; sum == expected { + if expected == 0 && retry < 2 { + continue // Wait a bit to make sure. + } + return + } + } + c.Errorf("counter sum for %#v is %d, want %d", key, sum, expected) +} + +// CheckSearchTotalDownloads checks that the search index is properly updated. +// It retries a few times as they are generally updated in background. +func CheckSearchTotalDownloads(c *gc.C, store *charmstore.Store, id *charm.URL, expected int64) { + var doc *charmstore.SearchDoc + for retry := 0; retry < 10; retry++ { + var err error + time.Sleep(100 * time.Millisecond) + doc, err = store.ES.GetSearchDocument(id) + c.Assert(err, gc.IsNil) + if doc.TotalDownloads == expected { + if expected == 0 && retry < 2 { + continue // Wait a bit to make sure. + } + return + } + } + c.Errorf("total downloads for %#v is %d, want %d", id, doc.TotalDownloads, expected) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/suite.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/suite.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/suite.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,34 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package storetesting // import "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + +import ( + jujutesting "github.com/juju/testing" + gc "gopkg.in/check.v1" +) + +type IsolatedMgoESSuite struct { + jujutesting.IsolatedMgoSuite + ElasticSearchSuite +} + +func (s *IsolatedMgoESSuite) SetUpSuite(c *gc.C) { + s.IsolatedMgoSuite.SetUpSuite(c) + s.ElasticSearchSuite.SetUpSuite(c) +} + +func (s *IsolatedMgoESSuite) TearDownSuite(c *gc.C) { + s.ElasticSearchSuite.TearDownSuite(c) + s.IsolatedMgoSuite.TearDownSuite(c) +} + +func (s *IsolatedMgoESSuite) SetUpTest(c *gc.C) { + s.IsolatedMgoSuite.SetUpTest(c) + s.ElasticSearchSuite.SetUpTest(c) +} + +func (s *IsolatedMgoESSuite) TearDownTest(c *gc.C) { + s.ElasticSearchSuite.TearDownTest(c) + s.IsolatedMgoSuite.TearDownTest(c) +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v4' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/api.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/api.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/api.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,275 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v4 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" + +import ( + "net/http" + "net/url" + + "github.com/juju/httprequest" + "github.com/juju/loggo" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/mempool" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + "gopkg.in/juju/charmstore.v5-unstable/internal/v5" +) + +var logger = loggo.GetLogger("charmstore.internal.v4") + +const ( + PromulgatorsGroup = v5.PromulgatorsGroup + UsernameAttr = v5.UsernameAttr + DelegatableMacaroonExpiry = v5.DelegatableMacaroonExpiry + DefaultIcon = v5.DefaultIcon + ArchiveCachePublicMaxAge = v5.ArchiveCachePublicMaxAge +) + +// reqHandlerPool holds a cache of ReqHandlers to save +// on allocation time. When a handler is done with, +// it is put back into the pool. +var reqHandlerPool = mempool.Pool{ + New: func() interface{} { + return newReqHandler() + }, +} + +type Handler struct { + *v5.Handler +} + +type ReqHandler struct { + *v5.ReqHandler +} + +func New(pool *charmstore.Pool, config charmstore.ServerParams) Handler { + return Handler{ + Handler: v5.New(pool, config), + } +} + +func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // When requests in this handler use router.RelativeURL, we want + // the "absolute path" there to be interpreted relative to the + // root of this handler, not the absolute root of the web server, + // which may be abitrarily many levels up. + req.RequestURI = req.URL.Path + + rh, err := h.NewReqHandler() + if err != nil { + router.WriteError(w, err) + return + } + defer rh.Close() + rh.ServeHTTP(w, req) +} + +func NewAPIHandler(pool *charmstore.Pool, config charmstore.ServerParams) charmstore.HTTPCloseHandler { + return New(pool, config) +} + +// NewReqHandler fetchs a new instance of ReqHandler +// from h.Pool and returns it. The ReqHandler must +// be closed when finished with. +func (h *Handler) NewReqHandler() (ReqHandler, error) { + store, err := h.Pool.RequestStore() + if err != nil { + if errgo.Cause(err) == charmstore.ErrTooManySessions { + return ReqHandler{}, errgo.WithCausef(err, params.ErrServiceUnavailable, "") + } + return ReqHandler{}, errgo.Mask(err) + } + rh := reqHandlerPool.Get().(ReqHandler) + rh.Handler = h.Handler + rh.Store = store + return rh, nil +} + +func newReqHandler() ReqHandler { + h := ReqHandler{ + ReqHandler: new(v5.ReqHandler), + } + resolveId := h.ResolvedIdHandler + authId := h.AuthIdHandler + handlers := v5.RouterHandlers(h.ReqHandler) + handlers.Meta["charm-related"] = h.EntityHandler(h.metaCharmRelated, "charmprovidedinterfaces", "charmrequiredinterfaces") + handlers.Meta["revision-info"] = router.SingleIncludeHandler(h.metaRevisionInfo) + handlers.Id["expand-id"] = resolveId(authId(h.serveExpandId)) + + h.Router = router.New(handlers, h) + return h +} + +// ResolveURL implements router.Context.ResolveURL, +// ensuring that any resulting ResolvedURL always +// has a non-empty PreferredSeries field. +func (h ReqHandler) ResolveURL(url *charm.URL) (*router.ResolvedURL, error) { + return resolveURL(h.Store, url) +} + +// resolveURL implements URL resolving for the ReqHandler. +// It's defined as a separate function so it can be more +// easily unit-tested. +func resolveURL(store *charmstore.Store, url *charm.URL) (*router.ResolvedURL, error) { + entity, err := store.FindBestEntity(url, "_id", "promulgated-revision", "supportedseries") + if err != nil && errgo.Cause(err) != params.ErrNotFound { + return nil, errgo.Mask(err) + } + if errgo.Cause(err) == params.ErrNotFound { + return nil, noMatchingURLError(url) + } + rurl := &router.ResolvedURL{ + URL: *entity.URL, + PromulgatedRevision: -1, + Development: url.Channel == charm.DevelopmentChannel, + } + if url.User == "" { + rurl.PromulgatedRevision = entity.PromulgatedRevision + } + if rurl.URL.Series != "" { + return rurl, nil + } + if url.Series != "" { + rurl.PreferredSeries = url.Series + return rurl, nil + } + if len(entity.SupportedSeries) == 0 { + return nil, errgo.Newf("entity %q has no supported series", &rurl.URL) + } + rurl.PreferredSeries = entity.SupportedSeries[0] + return rurl, nil +} + +// Close closes the ReqHandler. This should always be called when the +// ReqHandler is done with. +func (h ReqHandler) Close() { + h.Store.Close() + h.Reset() + reqHandlerPool.Put(h) +} + +// StatsEnabled reports whether statistics should be gathered for +// the given HTTP request. +func StatsEnabled(req *http.Request) bool { + return v5.StatsEnabled(req) +} + +func noMatchingURLError(url *charm.URL) error { + return errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %q", url) +} + +// GET id/meta/revision-info +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetarevision-info +func (h *ReqHandler) metaRevisionInfo(id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + searchURL := id.PreferredURL() + searchURL.Revision = -1 + + q := h.Store.EntitiesQuery(searchURL) + if id.PromulgatedRevision != -1 { + q = q.Sort("-promulgated-revision") + } else { + q = q.Sort("-revision") + } + var docs []*mongodoc.Entity + if err := q.Select(bson.D{{"_id", 1}, {"promulgated-url", 1}, {"supportedseries", 1}, {"development", 1}}).All(&docs); err != nil { + return "", errgo.Notef(err, "cannot get ids") + } + + if len(docs) == 0 { + return "", errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %s", id) + } + specifiedSeries := id.URL.Series + if specifiedSeries == "" { + specifiedSeries = id.PreferredSeries + } + var response params.RevisionInfoResponse + expandMultiSeries(docs, func(series string, doc *mongodoc.Entity) error { + if specifiedSeries != series { + return nil + } + url := doc.PreferredURL(id.PromulgatedRevision != -1) + url.Series = series + response.Revisions = append(response.Revisions, url) + return nil + }) + return &response, nil +} + +// GET id/expand-id +// https://docs.google.com/a/canonical.com/document/d/1TgRA7jW_mmXoKH3JiwBbtPvQu7WiM6XMrz1wSrhTMXw/edit#bookmark=id.4xdnvxphb2si +func (h *ReqHandler) serveExpandId(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { + baseURL := id.PreferredURL() + baseURL.Revision = -1 + baseURL.Series = "" + + // baseURL now represents the base URL of the given id; + // it will be a promulgated URL iff the original URL was + // specified without a user, which will cause EntitiesQuery + // to return entities that match appropriately. + + // Retrieve all the entities with the same base URL. + // Note that we don't do any permission checking of the returned URLs. + // This is because we know that the user is allowed to read at + // least the resolved URL passed into serveExpandId. + // If this does not specify "development", then no development + // revisions will be chosen, so the single ACL already checked + // is sufficient. If it *does* specify "development", then we assume + // that the development ACLs are more restrictive than the + // non-development ACLs, and given that, we can allow all + // the URLs. + q := h.Store.EntitiesQuery(baseURL).Select(bson.D{{"_id", 1}, {"promulgated-url", 1}, {"development", 1}, {"supportedseries", 1}}) + if id.PromulgatedRevision != -1 { + q = q.Sort("-series", "-promulgated-revision") + } else { + q = q.Sort("-series", "-revision") + } + var docs []*mongodoc.Entity + err := q.All(&docs) + if err != nil && errgo.Cause(err) != mgo.ErrNotFound { + return errgo.Mask(err) + } + + // Collect all the expanded identifiers for each entity. + response := make([]params.ExpandedId, 0, len(docs)) + expandMultiSeries(docs, func(series string, doc *mongodoc.Entity) error { + url := doc.PreferredURL(id.PromulgatedRevision != -1) + url.Series = series + response = append(response, params.ExpandedId{Id: url.String()}) + return nil + }) + + // Write the response in JSON format. + return httprequest.WriteJSON(w, http.StatusOK, response) +} + +// expandMultiSeries calls the provided append function once for every +// supported series of each entry in the given entities slice. The series +// argument will be passed as that series and the doc argument will point +// to the entity. This function will only return an error if the append +// function returns an error; such an error will be returned without +// masking the cause. +// +// Note that the SupportedSeries field of the entities must have +// been populated for this to work. +func expandMultiSeries(entities []*mongodoc.Entity, append func(series string, doc *mongodoc.Entity) error) error { + // TODO(rog) make this concurrent. + for _, entity := range entities { + if entity.URL.Series != "" { + append(entity.URL.Series, entity) + continue + } + for _, series := range entity.SupportedSeries { + if err := append(series, entity); err != nil { + return errgo.Mask(err, errgo.Any) + } + } + } + return nil +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/api_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/api_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/api_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,3577 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" + +import ( + "archive/zip" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "time" + + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon.v1" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/hashtesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/v4" +) + +var testPublicKey = bakery.PublicKey{ + bakery.Key{ + 0xf6, 0xfb, 0xcf, 0x67, 0x8c, 0x5a, 0xb6, 0x52, + 0xa9, 0x23, 0x4d, 0x7e, 0x01, 0xf5, 0x0a, 0x25, + 0xc4, 0x63, 0x69, 0x54, 0x42, 0x62, 0xaf, 0x62, + 0xbe, 0x40, 0x6a, 0x0b, 0xe2, 0x9a, 0xb0, 0x5f, + }, +} + +const ( + testUsername = "test-user" + testPassword = "test-password" +) + +var es *elasticsearch.Database = &elasticsearch.Database{"localhost:9200"} +var si *charmstore.SearchIndex = &charmstore.SearchIndex{ + Database: es, + Index: "cs", +} + +type APISuite struct { + commonSuite +} + +func (s *APISuite) SetUpSuite(c *gc.C) { + s.enableIdentity = true + s.commonSuite.SetUpSuite(c) +} + +var newResolvedURL = router.MustNewResolvedURL + +func newResolvedURLWithPreferredSeries(urlStr string, promulgatedRev int, series string) *router.ResolvedURL { + rurl := newResolvedURL(urlStr, promulgatedRev) + rurl.PreferredSeries = series + return rurl +} + +var _ = gc.Suite(&APISuite{}) + +// patchLegacyDownloadCountsEnabled sets LegacyDownloadCountsEnabled to the +// given value for the duration of the test. +// TODO (frankban): remove this function when removing the legacy counts logic. +func patchLegacyDownloadCountsEnabled(addCleanup func(jujutesting.CleanupFunc), value bool) { + original := charmstore.LegacyDownloadCountsEnabled + charmstore.LegacyDownloadCountsEnabled = value + addCleanup(func(*gc.C) { + charmstore.LegacyDownloadCountsEnabled = original + }) +} + +type metaEndpointExpectedValueGetter func(*charmstore.Store, *router.ResolvedURL) (interface{}, error) + +type metaEndpoint struct { + // name names the meta endpoint. + name string + + // exclusive specifies whether the endpoint is + // valid for charms only (charmOnly), bundles only (bundleOnly) + // or to both (zero). + exclusive int + + // get returns the expected data for the endpoint. + get metaEndpointExpectedValueGetter + + // checkURL holds one URL to sanity check data against. + checkURL *router.ResolvedURL + + // assertCheckData holds a function that will be used to check that + // the get function returns sane data for checkURL. + assertCheckData func(c *gc.C, data interface{}) +} + +const ( + charmOnly = iota + 1 + bundleOnly +) + +var metaEndpoints = []metaEndpoint{{ + name: "charm-config", + exclusive: charmOnly, + get: entityFieldGetter("CharmConfig"), + checkURL: newResolvedURL("cs:~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(*charm.Config).Options["blog-title"].Default, gc.Equals, "My Title") + }, +}, { + name: "charm-metadata", + exclusive: charmOnly, + get: entityFieldGetter("CharmMeta"), + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(*charm.Meta).Summary, gc.Equals, "Blog engine") + }, +}, { + name: "bundle-metadata", + exclusive: bundleOnly, + get: entityFieldGetter("BundleData"), + checkURL: newResolvedURL("cs:~charmers/bundle/wordpress-simple-42", 42), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(*charm.BundleData).Services["wordpress"].Charm, gc.Equals, "wordpress") + }, +}, { + name: "bundle-unit-count", + exclusive: bundleOnly, + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + if entity.BundleData == nil { + return nil + } + return params.BundleCount{*entity.BundleUnitCount} + }), + checkURL: newResolvedURL("~charmers/bundle/wordpress-simple-42", 42), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(params.BundleCount).Count, gc.Equals, 2) + }, +}, { + name: "bundle-machine-count", + exclusive: bundleOnly, + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + if entity.BundleData == nil { + return nil + } + return params.BundleCount{*entity.BundleMachineCount} + }), + checkURL: newResolvedURL("~charmers/bundle/wordpress-simple-42", 42), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(params.BundleCount).Count, gc.Equals, 2) + }, +}, { + name: "charm-actions", + exclusive: charmOnly, + get: entityFieldGetter("CharmActions"), + checkURL: newResolvedURL("~charmers/precise/dummy-10", 10), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(*charm.Actions).ActionSpecs["snapshot"].Description, gc.Equals, "Take a snapshot of the database.") + }, +}, { + name: "archive-size", + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + return ¶ms.ArchiveSizeResponse{ + Size: entity.Size, + } + }), + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: entitySizeChecker, +}, { + name: "hash", + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + return ¶ms.HashResponse{ + Sum: entity.BlobHash, + } + }), + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(*params.HashResponse).Sum, gc.Not(gc.Equals), "") + }, +}, { + name: "hash256", + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + return ¶ms.HashResponse{ + Sum: entity.BlobHash256, + } + }), + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(*params.HashResponse).Sum, gc.Not(gc.Equals), "") + }, +}, { + name: "manifest", + get: zipGetter(func(r *zip.Reader) interface{} { + var manifest []params.ManifestFile + for _, file := range r.File { + if strings.HasSuffix(file.Name, "/") { + continue + } + manifest = append(manifest, params.ManifestFile{ + Name: file.Name, + Size: int64(file.UncompressedSize64), + }) + } + return manifest + }), + checkURL: newResolvedURL("~charmers/bundle/wordpress-simple-42", 42), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.([]params.ManifestFile), gc.Not(gc.HasLen), 0) + }, +}, { + name: "archive-upload-time", + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + return ¶ms.ArchiveUploadTimeResponse{ + UploadTime: entity.UploadTime.UTC(), + } + }), + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + response := data.(*params.ArchiveUploadTimeResponse) + c.Assert(response.UploadTime, gc.Not(jc.Satisfies), time.Time.IsZero) + c.Assert(response.UploadTime.Location(), gc.Equals, time.UTC) + }, +}, { + name: "revision-info", + get: func(store *charmstore.Store, id *router.ResolvedURL) (interface{}, error) { + ref := &id.URL + if id.PromulgatedRevision != -1 { + ref = id.PreferredURL() + } + return params.RevisionInfoResponse{ + []*charm.URL{ref}, + }, nil + }, + checkURL: newResolvedURL("~charmers/precise/wordpress-99", 99), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.DeepEquals, params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:precise/wordpress-99"), + }}) + }, +}, { + name: "charm-related", + exclusive: charmOnly, + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + // The charms we use for those tests are not related each other. + // Charm relations are independently tested in relations_test.go. + if url.URL.Series == "bundle" { + return nil, nil + } + return ¶ms.RelatedResponse{}, nil + }, + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.FitsTypeOf, (*params.RelatedResponse)(nil)) + }, +}, { + name: "bundles-containing", + exclusive: charmOnly, + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + // The charms we use for those tests are not included in any bundle. + // Charm/bundle relations are tested in relations_test.go. + if url.URL.Series == "bundle" { + return nil, nil + } + return []*params.MetaAnyResponse{}, nil + }, + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.FitsTypeOf, []*params.MetaAnyResponse(nil)) + }, +}, { + name: "stats", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + // The entities used for those tests were never downloaded. + return ¶ms.StatsResponse{ + ArchiveDownloadCount: 0, + }, nil + }, + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.FitsTypeOf, (*params.StatsResponse)(nil)) + }, +}, { + name: "extra-info", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + return map[string]string{ + "key": "value " + url.URL.String(), + }, nil + }, + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.DeepEquals, map[string]string{ + "key": "value cs:~charmers/precise/wordpress-23", + }) + }, +}, { + name: "extra-info/key", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + return "value " + url.URL.String(), nil + }, + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.Equals, "value cs:~charmers/precise/wordpress-23") + }, +}, { + name: "common-info", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + return map[string]string{ + "key": "value " + url.URL.String(), + }, nil + }, + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.DeepEquals, map[string]string{ + "key": "value cs:~charmers/precise/wordpress-23", + }) + }, +}, { + name: "common-info/key", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + return "value " + url.URL.String(), nil + }, + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.Equals, "value cs:~charmers/precise/wordpress-23") + }, +}, { + name: "perm", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + e, err := store.FindBaseEntity(&url.URL) + if err != nil { + return nil, err + } + return params.PermResponse{ + Read: e.ACLs.Read, + Write: e.ACLs.Write, + }, nil + }, + checkURL: newResolvedURL("~bob/utopic/wordpress-2", -1), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.DeepEquals, params.PermResponse{ + Read: []string{params.Everyone, "bob"}, + Write: []string{"bob"}, + }) + }, +}, { + name: "perm/read", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + e, err := store.FindBaseEntity(&url.URL) + if err != nil { + return nil, err + } + return e.ACLs.Read, nil + }, + checkURL: newResolvedURL("cs:~bob/utopic/wordpress-2", -1), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.DeepEquals, []string{params.Everyone, "bob"}) + }, +}, { + name: "tags", + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + if entity.URL.Series == "bundle" { + return params.TagsResponse{entity.BundleData.Tags} + } + if len(entity.CharmMeta.Tags) > 0 { + return params.TagsResponse{entity.CharmMeta.Tags} + } + return params.TagsResponse{entity.CharmMeta.Categories} + }), + checkURL: newResolvedURL("~charmers/utopic/category-2", 2), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, jc.DeepEquals, params.TagsResponse{ + Tags: []string{"openstack", "storage"}, + }) + }, +}, { + name: "id-user", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + return params.IdUserResponse{url.PreferredURL().User}, nil + }, + checkURL: newResolvedURL("cs:~bob/utopic/wordpress-2", -1), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.Equals, params.IdUserResponse{"bob"}) + }, +}, { + name: "id-series", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + return params.IdSeriesResponse{url.URL.Series}, nil + }, + checkURL: newResolvedURL("~charmers/utopic/category-2", 2), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.Equals, params.IdSeriesResponse{"utopic"}) + }, +}, { + name: "id-name", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + return params.IdNameResponse{url.URL.Name}, nil + }, + checkURL: newResolvedURL("~charmers/utopic/category-2", 2), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.Equals, params.IdNameResponse{"category"}) + }, +}, { + name: "id-revision", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + return params.IdRevisionResponse{url.PreferredURL().Revision}, nil + }, + checkURL: newResolvedURL("~charmers/utopic/category-2", 2), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.Equals, params.IdRevisionResponse{2}) + }, +}, { + name: "id", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + id := url.PreferredURL() + return params.IdResponse{ + Id: id, + User: id.User, + Series: id.Series, + Name: id.Name, + Revision: id.Revision, + }, nil + }, + checkURL: newResolvedURL("~charmers/utopic/category-2", 2), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, jc.DeepEquals, params.IdResponse{ + Id: charm.MustParseURL("cs:utopic/category-2"), + User: "", + Series: "utopic", + Name: "category", + Revision: 2, + }) + }, +}, { + name: "promulgated", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + e, err := store.FindBaseEntity(&url.URL) + if err != nil { + return nil, err + } + return params.PromulgatedResponse{ + Promulgated: bool(e.Promulgated), + }, nil + }, + checkURL: newResolvedURL("cs:~bob/utopic/wordpress-2", -1), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.Equals, params.PromulgatedResponse{Promulgated: false}) + }, +}, { + name: "supported-series", + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + if entity.URL.Series == "bundle" { + return nil + } + return params.SupportedSeriesResponse{ + SupportedSeries: entity.SupportedSeries, + } + }), + checkURL: newResolvedURL("~charmers/utopic/category-2", 2), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, jc.DeepEquals, params.SupportedSeriesResponse{ + SupportedSeries: []string{"utopic"}, + }) + }, +}} + +// TestEndpointGet tries to ensure that the endpoint +// test data getters correspond with reality. +func (s *APISuite) TestEndpointGet(c *gc.C) { + s.addTestEntities(c) + for i, ep := range metaEndpoints { + c.Logf("test %d: %s\n", i, ep.name) + data, err := ep.get(s.store, ep.checkURL) + c.Assert(err, gc.IsNil) + ep.assertCheckData(c, data) + } +} + +func (s *APISuite) TestAllMetaEndpointsTested(c *gc.C) { + // Make sure that we're testing all the metadata + // endpoints that we need to. + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-23", 23)) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-23/meta"), + }) + c.Logf("meta response body: %s", rec.Body) + var list []string + err := json.Unmarshal(rec.Body.Bytes(), &list) + c.Assert(err, gc.IsNil) + + listNames := make(map[string]bool) + for _, name := range list { + c.Assert(listNames[name], gc.Equals, false, gc.Commentf("name %s", name)) + listNames[name] = true + } + + testNames := make(map[string]bool) + for _, test := range metaEndpoints { + if strings.Contains(test.name, "/") { + continue + } + testNames[test.name] = true + } + c.Assert(testNames, jc.DeepEquals, listNames) +} + +var testEntities = []*router.ResolvedURL{ + // A stock charm. + newResolvedURL("cs:~charmers/precise/wordpress-23", 23), + // A stock bundle. + newResolvedURL("cs:~charmers/bundle/wordpress-simple-42", 42), + // A charm with some actions. + newResolvedURL("cs:~charmers/precise/dummy-10", 10), + // A charm with some tags. + newResolvedURL("cs:~charmers/utopic/category-2", 2), + // A charm with a different user. + newResolvedURL("cs:~bob/utopic/wordpress-2", -1), +} + +func (s *APISuite) addTestEntities(c *gc.C) []*router.ResolvedURL { + for _, e := range testEntities { + if e.URL.Series == "bundle" { + s.addPublicBundle(c, e.URL.Name, e) + } else { + s.addPublicCharm(c, e.URL.Name, e) + } + // Associate some extra-info data with the entity. + key := e.URL.Path() + "/meta/extra-info/key" + commonkey := e.URL.Path() + "/meta/common-info/key" + s.assertPut(c, key, "value "+e.URL.String()) + s.assertPut(c, commonkey, "value "+e.URL.String()) + } + return testEntities +} + +func (s *APISuite) TestMetaEndpointsSingle(c *gc.C) { + urls := s.addTestEntities(c) + for i, ep := range metaEndpoints { + c.Logf("test %d. %s", i, ep.name) + tested := false + for _, url := range urls { + charmId := strings.TrimPrefix(url.String(), "cs:") + path := charmId + "/meta/" + ep.name + expectData, err := ep.get(s.store, url) + c.Assert(err, gc.IsNil) + c.Logf(" expected data for %q: %#v", url, expectData) + if isNull(expectData) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(path), + ExpectStatus: http.StatusNotFound, + ExpectBody: params.Error{ + Message: params.ErrMetadataNotFound.Error(), + Code: params.ErrMetadataNotFound, + }, + }) + continue + } + tested = true + c.Logf(" path %q: %#v", url, path) + s.assertGet(c, path, expectData) + } + if !tested { + c.Errorf("endpoint %q is null for all endpoints, so is not properly tested", ep.name) + } + } +} + +func (s *APISuite) TestMetaPerm(c *gc.C) { + // Create a charm store server that will use the test third party for + // its third party caveat. + s.discharge = dischargeForUser("bob") + + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-23", 23)) + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-24", 24)) + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/trusty/wordpress-1", 1)) + s.assertGet(c, "wordpress/meta/perm", params.PermResponse{ + Read: []string{params.Everyone, "charmers"}, + Write: []string{"charmers"}, + }) + s.assertGet(c, "development/wordpress/meta/perm", params.PermResponse{ + Read: []string{params.Everyone, "charmers"}, + Write: []string{"charmers"}, + }) + e, err := s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.ACLs.Read, gc.DeepEquals, []string{params.Everyone, "charmers"}) + + // Change the published read perms to only include a specific user and the + // published write perms to include an "admin" user. + s.assertPut(c, "precise/wordpress-23/meta/perm/read", []string{"bob"}) + s.assertPut(c, "precise/wordpress-23/meta/perm/write", []string{"admin"}) + + // Check that the perms have changed for all revisions and series. + for i, u := range []string{"precise/wordpress-23", "precise/wordpress-24", "trusty/wordpress-1"} { + c.Logf("id %d: %q", i, u) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + Do: bakeryDo(nil), + URL: storeURL(u + "/meta/perm"), + ExpectBody: params.PermResponse{ + Read: []string{"bob"}, + Write: []string{"admin"}, + }, + }) + // The development perms did not mutate. + s.assertGet(c, "development/"+u+"/meta/perm", params.PermResponse{ + Read: []string{params.Everyone, "charmers"}, + Write: []string{"charmers"}, + }) + } + e, err = s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.Public, jc.IsFalse) + c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{ + Read: []string{"bob"}, + Write: []string{"admin"}, + }) + c.Assert(e.DevelopmentACLs, jc.DeepEquals, mongodoc.ACL{ + Read: []string{params.Everyone, "charmers"}, + Write: []string{"charmers"}, + }) + + // Try restoring everyone's read permission on the published charm, and + // adding write permissions to bob for the development charm. + s.assertPut(c, "wordpress/meta/perm/read", []string{"bob", params.Everyone}) + s.assertPut(c, "development/wordpress/meta/perm/write", []string{"bob", "admin"}) + s.assertGet(c, "wordpress/meta/perm", params.PermResponse{ + Read: []string{"bob", params.Everyone}, + Write: []string{"admin"}, + }) + s.assertGet(c, "development/wordpress/meta/perm", params.PermResponse{ + Read: []string{params.Everyone, "charmers"}, + Write: []string{"bob", "admin"}, + }) + s.assertGet(c, "wordpress/meta/perm/read", []string{"bob", params.Everyone}) + s.assertGet(c, "development/wordpress/meta/perm/read", []string{params.Everyone, "charmers"}) + e, err = s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.Public, jc.IsTrue) + c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{ + Read: []string{"bob", params.Everyone}, + Write: []string{"admin"}, + }) + c.Assert(e.DevelopmentACLs, jc.DeepEquals, mongodoc.ACL{ + Read: []string{params.Everyone, "charmers"}, + Write: []string{"bob", "admin"}, + }) + + // Try deleting all development permissions. + s.assertPut(c, "development/wordpress/meta/perm/read", []string{}) + s.assertPut(c, "development/wordpress/meta/perm/write", []string{}) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + Do: bakeryDo(nil), + URL: storeURL("development/wordpress/meta/perm"), + ExpectStatus: http.StatusUnauthorized, + ExpectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "bob"`, + }, + }) + e, err = s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.DevelopmentACLs, jc.DeepEquals, mongodoc.ACL{}) + e, err = s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.Public, jc.IsTrue) + c.Assert(e.DevelopmentACLs, jc.DeepEquals, mongodoc.ACL{}) + c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{ + Read: []string{"bob", params.Everyone}, + Write: []string{"admin"}, + }) + + // Try deleting all published permissions. + s.assertPut(c, "wordpress/meta/perm/read", []string{}) + s.assertPut(c, "wordpress/meta/perm/write", []string{}) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + Do: bakeryDo(nil), + URL: storeURL("wordpress/meta/perm"), + ExpectStatus: http.StatusUnauthorized, + ExpectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "bob"`, + }, + }) + e, err = s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.Public, jc.IsFalse) + c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{}) + c.Assert(e.ACLs.Read, gc.DeepEquals, []string{}) + + // Try setting all published permissions in one request. + s.assertPut(c, "wordpress/meta/perm", params.PermRequest{ + Read: []string{"bob"}, + Write: []string{"admin"}, + }) + e, err = s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.Public, jc.IsFalse) + c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{ + Read: []string{"bob"}, + Write: []string{"admin"}, + }) + + // Try setting all development permissions in one request. + s.assertPut(c, "development/wordpress/meta/perm", params.PermRequest{ + Read: []string{"who", params.Everyone}, + Write: []string{"who"}, + }) + e, err = s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.Public, jc.IsFalse) + c.Assert(e.DevelopmentACLs, jc.DeepEquals, mongodoc.ACL{ + Read: []string{"who", params.Everyone}, + Write: []string{"who"}, + }) + + // Try only read permissions to published meta/perm endpoint. + var readRequest = struct { + Read []string + }{Read: []string{"joe"}} + s.assertPut(c, "wordpress/meta/perm", readRequest) + e, err = s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.Public, jc.IsFalse) + c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{ + Read: []string{"joe"}, + Write: []string{}, + }) +} + +func (s *APISuite) TestMetaPermPutUnauthorized(c *gc.C) { + id := "precise/wordpress-23" + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/"+id, 23)) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.noMacaroonSrv, + URL: storeURL("~charmers/" + id + "/meta/perm/read"), + Method: "PUT", + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: strings.NewReader(`["some-user"]`), + ExpectStatus: http.StatusUnauthorized, + ExpectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: "authentication failed: missing HTTP auth header", + }, + }) +} + +func (s *APISuite) TestExtraInfo(c *gc.C) { + id := "precise/wordpress-23" + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/"+id, 23)) + s.checkInfo(c, "extra-info", id) + s.checkInfo(c, "common-info", id) +} + +func (s *APISuite) checkInfo(c *gc.C, path string, id string) { + // Add one value and check that it's there. + s.assertPut(c, id+"/meta/"+path+"/foo", "fooval") + s.assertGet(c, id+"/meta/"+path+"/foo", "fooval") + s.assertGet(c, id+"/meta/"+path, map[string]string{ + "foo": "fooval", + }) + + // Add another value and check that both values are there. + s.assertPut(c, id+"/meta/"+path+"/bar", "barval") + s.assertGet(c, id+"/meta/"+path+"/bar", "barval") + s.assertGet(c, id+"/meta/"+path, map[string]string{ + "foo": "fooval", + "bar": "barval", + }) + + // Overwrite a value and check that it's changed. + s.assertPut(c, id+"/meta/"+path+"/foo", "fooval2") + s.assertGet(c, id+"/meta/"+path+"/foo", "fooval2") + s.assertGet(c, id+"/meta/"+path+"", map[string]string{ + "foo": "fooval2", + "bar": "barval", + }) + + // Write several values at once. + s.assertPut(c, id+"/meta/any", params.MetaAnyResponse{ + Meta: map[string]interface{}{ + path: map[string]string{ + "foo": "fooval3", + "baz": "bazval", + }, + path + "/frob": []int{1, 4, 6}, + }, + }) + s.assertGet(c, id+"/meta/"+path, map[string]interface{}{ + "foo": "fooval3", + "baz": "bazval", + "bar": "barval", + "frob": []int{1, 4, 6}, + }) + + // Delete a single value. + s.assertPut(c, id+"/meta/"+path+"/foo", nil) + s.assertGet(c, id+"/meta/"+path, map[string]interface{}{ + "baz": "bazval", + "bar": "barval", + "frob": []int{1, 4, 6}, + }) + + // Delete a value and add some values at the same time. + s.assertPut(c, id+"/meta/any", params.MetaAnyResponse{ + Meta: map[string]interface{}{ + path: map[string]interface{}{ + "baz": nil, + "bar": nil, + "dazzle": "x", + "fizzle": "y", + }, + }, + }) + s.assertGet(c, id+"/meta/"+path, map[string]interface{}{ + "frob": []int{1, 4, 6}, + "dazzle": "x", + "fizzle": "y", + }) +} + +var extraInfoBadPutRequestsTests = []struct { + about string + key string + body interface{} + contentType string + expectStatus int + expectBody params.Error +}{{ + about: "key with extra element", + key: "foo/bar", + body: "hello", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "bad key for $1", + }, +}, { + about: "key with a dot", + key: "foo.bar", + body: "hello", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "bad key for $1", + }, +}, { + about: "key with a dollar", + key: "foo$bar", + body: "hello", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "bad key for $1", + }, +}, { + about: "multi key with extra element", + key: "", + body: map[string]string{ + "foo/bar": "value", + }, + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "bad key for $1", + }, +}, { + about: "multi key with dot", + key: "", + body: map[string]string{ + ".bar": "value", + }, + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "bad key for $1", + }, +}, { + about: "multi key with dollar", + key: "", + body: map[string]string{ + "$bar": "value", + }, + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "bad key for $1", + }, +}, { + about: "multi key with bad map", + key: "", + body: "bad", + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: `cannot unmarshal $1 body: json: cannot unmarshal string into Go value of type map[string]*json.RawMessage`, + }, +}} + +func (s *APISuite) TestExtraInfoBadPutRequests(c *gc.C) { + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) + path := "precise/wordpress-23/meta/" + for i, test := range extraInfoBadPutRequestsTests { + c.Logf("test %d: %s", i, test.about) + contentType := test.contentType + if contentType == "" { + contentType = "application/json" + } + extraBodyMessage := strings.Replace(test.expectBody.Message, "$1", "extra-info", -1) + commonBodyMessage := strings.Replace(test.expectBody.Message, "$1", "common-info", -1) + test.expectBody.Message = extraBodyMessage + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(path + "extra-info/" + test.key), + Method: "PUT", + Header: http.Header{ + "Content-Type": {contentType}, + }, + Username: testUsername, + Password: testPassword, + Body: strings.NewReader(mustMarshalJSON(test.body)), + ExpectStatus: test.expectStatus, + ExpectBody: test.expectBody, + }) + test.expectBody.Message = commonBodyMessage + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(path + "common-info/" + test.key), + Method: "PUT", + Header: http.Header{ + "Content-Type": {contentType}, + }, + Username: testUsername, + Password: testPassword, + Body: strings.NewReader(mustMarshalJSON(test.body)), + ExpectStatus: test.expectStatus, + ExpectBody: test.expectBody, + }) + } +} + +func (s *APISuite) TestExtraInfoPutUnauthorized(c *gc.C) { + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-23/meta/extra-info"), + Method: "PUT", + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: strings.NewReader(mustMarshalJSON(map[string]string{ + "bar": "value", + })), + ExpectStatus: http.StatusProxyAuthRequired, + ExpectBody: dischargeRequiredBody, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-23/meta/extra-info"), + Method: "PUT", + Header: http.Header{ + "Content-Type": {"application/json"}, + "Bakery-Protocol-Version": {"1"}, + }, + Body: strings.NewReader(mustMarshalJSON(map[string]string{ + "bar": "value", + })), + ExpectStatus: http.StatusUnauthorized, + ExpectHeader: http.Header{ + "WWW-Authenticate": {"Macaroon"}, + }, + ExpectBody: dischargeRequiredBody, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-23/meta/common-info"), + Method: "PUT", + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: strings.NewReader(mustMarshalJSON(map[string]string{ + "bar": "value", + })), + ExpectStatus: http.StatusProxyAuthRequired, + ExpectBody: dischargeRequiredBody, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-23/meta/common-info"), + Method: "PUT", + Header: http.Header{ + "Content-Type": {"application/json"}, + "Bakery-Protocol-Version": {"1"}, + }, + Body: strings.NewReader(mustMarshalJSON(map[string]string{ + "bar": "value", + })), + ExpectStatus: http.StatusUnauthorized, + ExpectHeader: http.Header{ + "WWW-Authenticate": {"Macaroon"}, + }, + ExpectBody: dischargeRequiredBody, + }) +} + +func (s *APISuite) TestCommonInfo(c *gc.C) { + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-23", 23)) + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-24", 24)) + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/trusty/wordpress-1", 1)) + + s.assertPut(c, "wordpress/meta/common-info/key", "something") + + s.assertGet(c, "wordpress/meta/common-info", map[string]string{ + "key": "something", + }) + for i, u := range []string{"precise/wordpress-23", "precise/wordpress-24", "trusty/wordpress-1"} { + c.Logf("id %d: %q", i, u) + s.assertGet(c, u+"/meta/common-info", map[string]string{ + "key": "something", + }) + e, err := s.store.FindBaseEntity(charm.MustParseURL(u)) + c.Assert(err, gc.IsNil) + c.Assert(e.CommonInfo, gc.DeepEquals, map[string][]byte{ + "key": []byte("\"something\""), + }) + } +} + +func isNull(v interface{}) bool { + data, err := json.Marshal(v) + if err != nil { + panic(err) + } + return string(data) == "null" +} + +func (s *APISuite) TestMetaEndpointsAny(c *gc.C) { + rurls := s.addTestEntities(c) + // We check the meta endpoint for both promulgated and non-promulgated + // versions of each URL. + urls := make([]*router.ResolvedURL, 0, len(rurls)*2) + for _, rurl := range rurls { + urls = append(urls, rurl) + if rurl.PromulgatedRevision != -1 { + rurl1 := *rurl + rurl1.PromulgatedRevision = -1 + urls = append(urls, &rurl1) + } + } + for _, url := range urls { + charmId := strings.TrimPrefix(url.String(), "cs:") + var flags []string + expectData := params.MetaAnyResponse{ + Id: url.PreferredURL(), + Meta: make(map[string]interface{}), + } + for _, ep := range metaEndpoints { + flags = append(flags, "include="+ep.name) + isBundle := url.URL.Series == "bundle" + if ep.exclusive != 0 && isBundle != (ep.exclusive == bundleOnly) { + // endpoint not relevant. + continue + } + val, err := ep.get(s.store, url) + c.Assert(err, gc.IsNil) + if val != nil { + expectData.Meta[ep.name] = val + } + } + s.assertGet(c, charmId+"/meta/any?"+strings.Join(flags, "&"), expectData) + } +} + +func (s *APISuite) TestMetaAnyWithNoIncludesAndNoEntity(c *gc.C) { + wordpressURL, _ := s.addPublicCharm( + c, + "wordpress", + newResolvedURL("cs:~charmers/precise/wordpress-23", 23), + ) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-1/meta/any"), + ExpectStatus: http.StatusNotFound, + ExpectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:precise/wordpress-1"`, + }, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("meta/any?id=precise/wordpress-23&id=precise/wordpress-1"), + ExpectStatus: http.StatusOK, + ExpectBody: map[string]interface{}{ + "precise/wordpress-23": params.MetaAnyResponse{ + Id: wordpressURL.PreferredURL(), + }, + }, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-23/meta/any"), + ExpectStatus: http.StatusOK, + ExpectBody: params.MetaAnyResponse{ + Id: wordpressURL.PreferredURL(), + }, + }) +} + +// In this test we rely on the charm.v2 testing repo package and +// dummy charm that has actions included. +func (s *APISuite) TestMetaCharmActions(c *gc.C) { + url, dummy := s.addPublicCharm(c, "dummy", newResolvedURL("cs:~charmers/precise/dummy-10", 10)) + s.assertGet(c, "precise/dummy-10/meta/charm-actions", dummy.Actions()) + s.assertGet(c, "precise/dummy-10/meta/any?include=charm-actions", + params.MetaAnyResponse{ + Id: url.PreferredURL(), + Meta: map[string]interface{}{ + "charm-actions": dummy.Actions(), + }, + }, + ) +} + +func (s *APISuite) TestBulkMeta(c *gc.C) { + // We choose an arbitrary set of ids and metadata here, just to smoke-test + // whether the meta/any logic is hooked up correctly. + // Detailed tests for this feature are in the router package. + + _, wordpress := s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) + _, mysql := s.addPublicCharm(c, "mysql", newResolvedURL("cs:~charmers/precise/mysql-10", 10)) + s.assertGet(c, + "meta/charm-metadata?id=precise/wordpress-23&id=precise/mysql-10", + map[string]*charm.Meta{ + "precise/wordpress-23": wordpress.Meta(), + "precise/mysql-10": mysql.Meta(), + }, + ) +} + +func (s *APISuite) TestBulkMetaAny(c *gc.C) { + // We choose an arbitrary set of metadata here, just to smoke-test + // whether the meta/any logic is hooked up correctly. + // Detailed tests for this feature are in the router package. + + wordpressURL, wordpress := s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) + mysqlURL, mysql := s.addPublicCharm(c, "mysql", newResolvedURL("cs:~charmers/precise/mysql-10", 10)) + s.assertGet(c, + "meta/any?include=charm-metadata&include=charm-config&id=precise/wordpress-23&id=precise/mysql-10", + map[string]params.MetaAnyResponse{ + "precise/wordpress-23": { + Id: wordpressURL.PreferredURL(), + Meta: map[string]interface{}{ + "charm-config": wordpress.Config(), + "charm-metadata": wordpress.Meta(), + }, + }, + "precise/mysql-10": { + Id: mysqlURL.PreferredURL(), + Meta: map[string]interface{}{ + "charm-config": mysql.Config(), + "charm-metadata": mysql.Meta(), + }, + }, + }, + ) +} + +var metaCharmTagsTests = []struct { + about string + tags []string + categories []string + expectTags []string +}{{ + about: "tags only", + tags: []string{"foo", "bar"}, + expectTags: []string{"foo", "bar"}, +}, { + about: "categories only", + categories: []string{"foo", "bar"}, + expectTags: []string{"foo", "bar"}, +}, { + about: "tags and categories", + categories: []string{"foo", "bar"}, + tags: []string{"tag1", "tag2"}, + expectTags: []string{"tag1", "tag2"}, +}, { + about: "no tags or categories", +}} + +func (s *APISuite) TestMetaCharmTags(c *gc.C) { + url := newResolvedURL("~charmers/precise/wordpress-0", -1) + for i, test := range metaCharmTagsTests { + c.Logf("%d: %s", i, test.about) + wordpress := storetesting.Charms.CharmDir("wordpress") + meta := wordpress.Meta() + meta.Tags, meta.Categories = test.tags, test.categories + url.URL.Revision = i + err := s.store.AddCharm(&testMetaCharm{ + meta: meta, + Charm: wordpress, + }, charmstore.AddParams{ + URL: url, + BlobName: "no-such-name", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(url.URL.Path() + "/meta/tags"), + ExpectStatus: http.StatusOK, + ExpectBody: params.TagsResponse{test.expectTags}, + }) + } +} + +func (s *APISuite) TestPromulgatedMetaCharmTags(c *gc.C) { + url := newResolvedURL("~charmers/precise/wordpress-0", 0) + for i, test := range metaCharmTagsTests { + c.Logf("%d: %s", i, test.about) + wordpress := storetesting.Charms.CharmDir("wordpress") + meta := wordpress.Meta() + meta.Tags, meta.Categories = test.tags, test.categories + url.URL.Revision = i + url.PromulgatedRevision = i + err := s.store.AddCharm(&testMetaCharm{ + meta: meta, + Charm: wordpress, + }, charmstore.AddParams{ + URL: url, + BlobName: "no-such-name", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(url.PromulgatedURL().Path() + "/meta/tags"), + ExpectStatus: http.StatusOK, + ExpectBody: params.TagsResponse{test.expectTags}, + }) + } +} + +func (s *APISuite) TestBundleTags(c *gc.C) { + b := storetesting.Charms.BundleDir("wordpress-simple") + url := newResolvedURL("~charmers/bundle/wordpress-2", -1) + data := b.Data() + data.Tags = []string{"foo", "bar"} + err := s.store.AddBundle(&testingBundle{data}, charmstore.AddParams{ + URL: url, + BlobName: "no-such-name", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(url.URL.Path() + "/meta/tags"), + ExpectStatus: http.StatusOK, + ExpectBody: params.TagsResponse{[]string{"foo", "bar"}}, + }) +} + +func (s *APISuite) TestPromulgatedBundleTags(c *gc.C) { + b := storetesting.Charms.BundleDir("wordpress-simple") + url := newResolvedURL("~charmers/bundle/wordpress-2", 2) + data := b.Data() + data.Tags = []string{"foo", "bar"} + err := s.store.AddBundle(&testingBundle{data}, charmstore.AddParams{ + URL: url, + BlobName: "no-such-name", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(url.PromulgatedURL().Path() + "/meta/tags"), + ExpectStatus: http.StatusOK, + ExpectBody: params.TagsResponse{[]string{"foo", "bar"}}, + }) +} + +type testMetaCharm struct { + meta *charm.Meta + charm.Charm +} + +func (c *testMetaCharm) Meta() *charm.Meta { + return c.meta +} + +func (s *APISuite) TestIdsAreResolved(c *gc.C) { + // This is just testing that ResolveURL is actually + // passed to the router. Given how Router is + // defined, and the ResolveURL tests, this should + // be sufficient to "join the dots". + _, wordpress := s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) + s.assertGet(c, "wordpress/meta/charm-metadata", wordpress.Meta()) +} + +func (s *APISuite) TestMetaCharmNotFound(c *gc.C) { + for i, ep := range metaEndpoints { + c.Logf("test %d: %s", i, ep.name) + expected := params.Error{ + Message: `no matching charm or bundle for "cs:precise/wordpress-23"`, + Code: params.ErrNotFound, + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-23/meta/" + ep.name), + ExpectStatus: http.StatusNotFound, + ExpectBody: expected, + }) + expected.Message = `no matching charm or bundle for "cs:wordpress"` + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("wordpress/meta/" + ep.name), + ExpectStatus: http.StatusNotFound, + ExpectBody: expected, + }) + } +} + +var resolveURLTests = []struct { + url string + expect *router.ResolvedURL + notFound bool +}{{ + url: "wordpress", + expect: newResolvedURL("cs:~charmers/trusty/wordpress-25", 25), +}, { + url: "development/wordpress", + expect: newResolvedURL("cs:~charmers/development/trusty/wordpress-25", 25), +}, { + url: "precise/wordpress", + expect: newResolvedURL("cs:~charmers/precise/wordpress-24", 24), +}, { + url: "development/precise/wordpress", + expect: newResolvedURL("cs:~charmers/development/precise/wordpress-24", 24), +}, { + url: "utopic/bigdata", + expect: newResolvedURL("cs:~charmers/utopic/bigdata-10", 10), +}, { + url: "development/utopic/bigdata", + expect: newResolvedURL("cs:~charmers/development/utopic/bigdata-10", 10), +}, { + url: "~charmers/precise/wordpress", + expect: newResolvedURL("cs:~charmers/precise/wordpress-24", -1), +}, { + url: "~charmers/development/precise/wordpress", + expect: newResolvedURL("cs:~charmers/development/precise/wordpress-24", -1), +}, { + url: "~charmers/precise/wordpress-99", + notFound: true, +}, { + url: "~charmers/development/precise/wordpress-99", + notFound: true, +}, { + url: "~charmers/wordpress", + expect: newResolvedURL("cs:~charmers/trusty/wordpress-25", -1), +}, { + url: "~charmers/development/wordpress", + expect: newResolvedURL("cs:~charmers/development/trusty/wordpress-25", -1), +}, { + url: "~charmers/wordpress-24", + notFound: true, +}, { + url: "~charmers/development/wordpress-24", + notFound: true, +}, { + url: "~bob/wordpress", + expect: newResolvedURL("cs:~bob/trusty/wordpress-1", -1), +}, { + url: "~bob/development/wordpress", + expect: newResolvedURL("cs:~bob/development/trusty/wordpress-1", -1), +}, { + url: "~bob/precise/wordpress", + expect: newResolvedURL("cs:~bob/precise/wordpress-2", -1), +}, { + url: "~bob/development/precise/wordpress", + expect: newResolvedURL("cs:~bob/development/precise/wordpress-2", -1), +}, { + url: "bigdata", + expect: newResolvedURL("cs:~charmers/utopic/bigdata-10", 10), +}, { + url: "development/bigdata", + expect: newResolvedURL("cs:~charmers/development/utopic/bigdata-10", 10), +}, { + url: "wordpress-24", + notFound: true, +}, { + url: "development/wordpress-24", + notFound: true, +}, { + url: "bundlelovin", + expect: newResolvedURL("cs:~charmers/bundle/bundlelovin-10", 10), +}, { + url: "development/bundlelovin", + expect: newResolvedURL("cs:~charmers/development/bundle/bundlelovin-10", 10), +}, { + url: "wordpress-26", + notFound: true, +}, { + url: "development/wordpress-26", + notFound: true, +}, { + url: "foo", + notFound: true, +}, { + url: "development/foo", + notFound: true, +}, { + url: "trusty/bigdata", + notFound: true, +}, { + url: "development/trusty/bigdata", + notFound: true, +}, { + url: "~bob/wily/django-47", + notFound: true, +}, { + url: "~bob/django", + notFound: true, +}, { + url: "wily/django", + notFound: true, +}, { + url: "django", + notFound: true, +}, { + url: "~bob/development/wily/django-47", + expect: newResolvedURL("cs:~bob/development/wily/django-47", -1), +}, { + url: "~bob/development/wily/django", + expect: newResolvedURL("cs:~bob/development/wily/django-47", -1), +}, { + url: "~bob/development/django", + expect: newResolvedURL("cs:~bob/development/wily/django-47", -1), +}, { + url: "development/wily/django-27", + expect: newResolvedURL("cs:~bob/development/wily/django-47", 27), +}, { + url: "development/wily/django", + expect: newResolvedURL("cs:~bob/development/wily/django-47", 27), +}, { + url: "development/django", + expect: newResolvedURL("cs:~bob/development/wily/django-47", 27), +}, { + url: "~bob/trusty/haproxy-0", + notFound: true, +}, { + url: "~bob/haproxy", + notFound: true, +}, { + url: "trusty/haproxy", + notFound: true, +}, { + url: "haproxy", + notFound: true, +}, { + url: "~bob/development/trusty/haproxy-0", + expect: newResolvedURL("cs:~bob/development/trusty/haproxy-0", -1), +}, { + url: "~bob/development/trusty/haproxy", + expect: newResolvedURL("cs:~bob/development/trusty/haproxy-0", -1), +}, { + url: "~bob/development/haproxy", + expect: newResolvedURL("cs:~bob/development/trusty/haproxy-0", -1), +}, { + url: "~bob/development/trusty/haproxy-1", + notFound: true, +}, { + url: "development/trusty/haproxy-27", + notFound: true, +}, { + url: "development/trusty/haproxy", + notFound: true, +}, { + url: "development/haproxy", + notFound: true, +}, { + // V4 SPECIFIC + url: "~bob/multi-series", + expect: newResolvedURLWithPreferredSeries("cs:~bob/multi-series-0", -1, "trusty"), +}, { + // V4 SPECIFIC + url: "~bob/utopic/multi-series", + expect: newResolvedURLWithPreferredSeries("cs:~bob/multi-series-0", -1, "utopic"), +}} + +func (s *APISuite) TestResolveURL(c *gc.C) { + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-24", 24)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-24", 24)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-25", 25)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/utopic/wordpress-10", 10)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/saucy/bigdata-99", 99)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/utopic/bigdata-10", 10)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/trusty/wordpress-1", -1)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/precise/wordpress-2", -1)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/precise/other-2", -1)) + s.addPublicBundle(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/bundlelovin-10", 10)) + s.addPublicBundle(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/wordpress-simple-10", 10)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/development/wily/django-47", 27)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/development/trusty/haproxy-0", -1)) + s.addPublicCharm(c, "multi-series", newResolvedURL("cs:~bob/multi-series-0", -1)) + + for i, test := range resolveURLTests { + c.Logf("test %d: %s", i, test.url) + url := charm.MustParseURL(test.url) + rurl, err := v4.ResolveURL(s.store, url) + if test.notFound { + c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) + c.Assert(err, gc.ErrorMatches, `no matching charm or bundle for ".*"`) + c.Assert(rurl, gc.IsNil) + continue + } + c.Assert(err, gc.IsNil) + c.Assert(rurl, jc.DeepEquals, test.expect) + } +} + +var serveExpandIdTests = []struct { + about string + url string + expect []params.ExpandedId + err string +}{{ + about: "fully qualified URL", + url: "~charmers/trusty/wordpress-47", + expect: []params.ExpandedId{ + // V4 SPECIFIC + {Id: "cs:~charmers/utopic/wordpress-42"}, + {Id: "cs:~charmers/trusty/wordpress-47"}, + {Id: "cs:~charmers/trusty/wordpress-5"}, + {Id: "cs:~charmers/utopic/wordpress-5"}, + {Id: "cs:~charmers/vivid/wordpress-5"}, + {Id: "cs:~charmers/wily/wordpress-5"}, + }, +}, { + about: "fully qualified development URL", + url: "~charmers/development/trusty/wordpress-47", + expect: []params.ExpandedId{ + // V4 SPECIFIC + {Id: "cs:~charmers/utopic/wordpress-42"}, + {Id: "cs:~charmers/development/trusty/wordpress-48"}, + {Id: "cs:~charmers/trusty/wordpress-47"}, + {Id: "cs:~charmers/development/trusty/wordpress-7"}, + {Id: "cs:~charmers/development/utopic/wordpress-7"}, + {Id: "cs:~charmers/development/vivid/wordpress-7"}, + {Id: "cs:~charmers/development/wily/wordpress-7"}, + {Id: "cs:~charmers/development/trusty/wordpress-6"}, + {Id: "cs:~charmers/development/utopic/wordpress-6"}, + {Id: "cs:~charmers/development/vivid/wordpress-6"}, + {Id: "cs:~charmers/development/wily/wordpress-6"}, + {Id: "cs:~charmers/trusty/wordpress-5"}, + {Id: "cs:~charmers/utopic/wordpress-5"}, + {Id: "cs:~charmers/vivid/wordpress-5"}, + {Id: "cs:~charmers/wily/wordpress-5"}, + }, +}, { + about: "promulgated URL", + url: "trusty/wordpress-47", + expect: []params.ExpandedId{ + // V4 SPECIFIC + {Id: "cs:utopic/wordpress-42"}, + {Id: "cs:trusty/wordpress-47"}, + {Id: "cs:trusty/wordpress-49"}, + {Id: "cs:utopic/wordpress-49"}, + {Id: "cs:vivid/wordpress-49"}, + {Id: "cs:wily/wordpress-49"}, + }, +}, { + about: "development promulgated URL", + url: "development/trusty/wordpress-48", + expect: []params.ExpandedId{ + // V4 SPECIFIC + {Id: "cs:utopic/wordpress-42"}, + {Id: "cs:development/trusty/wordpress-48"}, + {Id: "cs:trusty/wordpress-47"}, + {Id: "cs:development/trusty/wordpress-51"}, + {Id: "cs:development/utopic/wordpress-51"}, + {Id: "cs:development/vivid/wordpress-51"}, + {Id: "cs:development/wily/wordpress-51"}, + {Id: "cs:development/trusty/wordpress-50"}, + {Id: "cs:development/utopic/wordpress-50"}, + {Id: "cs:development/vivid/wordpress-50"}, + {Id: "cs:development/wily/wordpress-50"}, + {Id: "cs:trusty/wordpress-49"}, + {Id: "cs:utopic/wordpress-49"}, + {Id: "cs:vivid/wordpress-49"}, + {Id: "cs:wily/wordpress-49"}, + }, +}, { + about: "non-promulgated charm", + url: "~bob/precise/builder", + expect: []params.ExpandedId{ + {Id: "cs:~bob/precise/builder-5"}, + }, +}, { + about: "non-promulgated charm with development URL", + url: "~bob/development/precise/builder", + expect: []params.ExpandedId{ + {Id: "cs:~bob/development/precise/builder-6"}, + {Id: "cs:~bob/precise/builder-5"}, + }, +}, { + about: "partial URL", + url: "haproxy", + expect: []params.ExpandedId{ + {Id: "cs:trusty/haproxy-1"}, + {Id: "cs:precise/haproxy-1"}, + }, +}, { + about: "revision with series matches bundles (and multi-series charms) only", + url: "mongo-0", + expect: []params.ExpandedId{ + {Id: "cs:bundle/mongo-0"}, + }, +}, { + about: "single result", + url: "bundle/mongo-0", + expect: []params.ExpandedId{ + {Id: "cs:bundle/mongo-0"}, + }, +}, { + about: "fully qualified URL with no entities found", + url: "~charmers/precise/no-such-42", + err: `no matching charm or bundle for "cs:~charmers/precise/no-such-42"`, +}, { + about: "partial URL with no entities found", + url: "no-such", + err: `no matching charm or bundle for "cs:no-such"`, +}} + +func (s *APISuite) TestServeExpandId(c *gc.C) { + // Add a bunch of entities in the database. + // Note that expand-id only cares about entity identifiers, + // so it is ok to reuse the same charm for all the entities. + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/utopic/wordpress-42", 42)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-47", 47)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/development/trusty/wordpress-48", 48)) + s.addPublicCharm(c, "multi-series", newResolvedURL("cs:~charmers/wordpress-5", 49)) + s.addPublicCharm(c, "multi-series", newResolvedURL("cs:~charmers/development/wordpress-6", 50)) + s.addPublicCharm(c, "multi-series", newResolvedURL("cs:~charmers/development/wordpress-7", 51)) + + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/haproxy-1", 1)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/haproxy-1", 1)) + + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/precise/builder-5", -1)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/development/precise/builder-6", -1)) + + s.addPublicBundle(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/mongo-0", 0)) + s.addPublicBundle(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/wordpress-simple-0", 0)) + + for i, test := range serveExpandIdTests { + c.Logf("test %d: %s", i, test.about) + storeURL := storeURL(test.url + "/expand-id") + var expectStatus int + var expectBody interface{} + if test.err == "" { + expectStatus = http.StatusOK + expectBody = test.expect + } else { + expectStatus = http.StatusNotFound + expectBody = params.Error{ + Code: params.ErrNotFound, + Message: test.err, + } + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL, + ExpectStatus: expectStatus, + ExpectBody: expectBody, + }) + } +} + +var serveMetaRevisionInfoTests = []struct { + about string + url string + expect params.RevisionInfoResponse + err string +}{{ + about: "fully qualified url", + url: "trusty/wordpress-42", + expect: params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:trusty/wordpress-43"), + charm.MustParseURL("cs:trusty/wordpress-42"), + charm.MustParseURL("cs:trusty/wordpress-41"), + charm.MustParseURL("cs:trusty/wordpress-9"), + }, + }, +}, { + about: "partial url uses a default series", + url: "wordpress", + expect: params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:trusty/wordpress-43"), + charm.MustParseURL("cs:trusty/wordpress-42"), + charm.MustParseURL("cs:trusty/wordpress-41"), + charm.MustParseURL("cs:trusty/wordpress-9"), + }, + }, +}, { + about: "non-promulgated URL gives non-promulgated revisions (~charmers)", + url: "~charmers/trusty/cinder", + expect: params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:~charmers/trusty/cinder-6"), + charm.MustParseURL("cs:~charmers/trusty/cinder-5"), + charm.MustParseURL("cs:~charmers/trusty/cinder-4"), + charm.MustParseURL("cs:~charmers/trusty/cinder-3"), + charm.MustParseURL("cs:~charmers/trusty/cinder-2"), + charm.MustParseURL("cs:~charmers/trusty/cinder-1"), + charm.MustParseURL("cs:~charmers/trusty/cinder-0"), + }, + }, +}, { + about: "non-promulgated URL gives non-promulgated revisions (~openstack-charmers)", + url: "~openstack-charmers/trusty/cinder", + expect: params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:~openstack-charmers/trusty/cinder-1"), + charm.MustParseURL("cs:~openstack-charmers/trusty/cinder-0"), + }, + }, +}, { + about: "promulgated URL gives promulgated revisions", + url: "trusty/cinder", + expect: params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:trusty/cinder-5"), + charm.MustParseURL("cs:trusty/cinder-4"), + charm.MustParseURL("cs:trusty/cinder-3"), + charm.MustParseURL("cs:trusty/cinder-2"), + charm.MustParseURL("cs:trusty/cinder-1"), + charm.MustParseURL("cs:trusty/cinder-0"), + }, + }, +}, { + about: "multi-series charm expands to all revisions of that charm", + url: "multi-series", + expect: params.RevisionInfoResponse{ + // V4 SPECIFIC + []*charm.URL{ + charm.MustParseURL("cs:trusty/multi-series-41"), + charm.MustParseURL("cs:trusty/multi-series-40"), + }, + }, +}, { + about: "multi-series charm with series specified", + url: "trusty/multi-series", + expect: params.RevisionInfoResponse{ + // V4 SPECIFIC + []*charm.URL{ + charm.MustParseURL("cs:trusty/multi-series-41"), + charm.MustParseURL("cs:trusty/multi-series-40"), + }, + }, +}, { + about: "multi-series charm with non-promulgated URL", + url: "~charmers/multi-series", + expect: params.RevisionInfoResponse{ + // V4 SPECIFIC + []*charm.URL{ + charm.MustParseURL("cs:~charmers/trusty/multi-series-2"), + charm.MustParseURL("cs:~charmers/trusty/multi-series-1"), + }, + }, +}, { + about: "multi-series charm with non-promulgated URL and series specified", + url: "~charmers/utopic/multi-series", + expect: params.RevisionInfoResponse{ + // V4 SPECIFIC + []*charm.URL{ + charm.MustParseURL("cs:~charmers/utopic/multi-series-2"), + charm.MustParseURL("cs:~charmers/utopic/multi-series-1"), + }, + }, +}, { + about: "mixed multi/single series charm, latest rev", + url: "mixed", + expect: params.RevisionInfoResponse{ + // V4 SPECIFIC + []*charm.URL{ + charm.MustParseURL("cs:trusty/mixed-43"), + charm.MustParseURL("cs:trusty/mixed-42"), + charm.MustParseURL("cs:trusty/mixed-41"), + charm.MustParseURL("cs:trusty/mixed-40"), + }, + }, +}, { + about: "mixed multi/single series charm with series", + url: "trusty/mixed-40", + expect: params.RevisionInfoResponse{ + // V4 SPECIFIC + []*charm.URL{ + charm.MustParseURL("cs:trusty/mixed-43"), + charm.MustParseURL("cs:trusty/mixed-42"), + charm.MustParseURL("cs:trusty/mixed-41"), + charm.MustParseURL("cs:trusty/mixed-40"), + }, + }, +}, { + about: "no entities found", + url: "precise/no-such-33", + err: `no matching charm or bundle for "cs:precise/no-such-33"`, +}} + +func (s *APISuite) TestServeMetaRevisionInfo(c *gc.C) { + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mysql-41", 41)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mysql-42", 42)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-41", 41)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-42", 42)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-43", 43)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-9", 9)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-42", 42)) + + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-0", -1)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-1", -1)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-2", 0)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-3", 1)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~openstack-charmers/trusty/cinder-0", 2)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~openstack-charmers/trusty/cinder-1", 3)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-4", -1)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-5", 4)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-6", 5)) + + s.addPublicCharm(c, "multi-series", newResolvedURL("cs:~charmers/multi-series-1", 40)) + s.addPublicCharm(c, "multi-series", newResolvedURL("cs:~charmers/multi-series-2", 41)) + + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mixed-1", 40)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mixed-2", 41)) + s.addPublicCharm(c, "multi-series", newResolvedURL("cs:~charmers/mixed-3", 42)) + s.addPublicCharm(c, "multi-series", newResolvedURL("cs:~charmers/mixed-4", 43)) + + for i, test := range serveMetaRevisionInfoTests { + c.Logf("test %d: %s", i, test.about) + storeURL := storeURL(test.url + "/meta/revision-info") + var expectStatus int + var expectBody interface{} + if test.err == "" { + expectStatus = http.StatusOK + expectBody = test.expect + } else { + expectStatus = http.StatusNotFound + expectBody = params.Error{ + Code: params.ErrNotFound, + Message: test.err, + } + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL, + ExpectStatus: expectStatus, + ExpectBody: expectBody, + }) + } +} + +var metaStatsTests = []struct { + // about describes the test. + about string + // url is the entity id to use when making the meta/stats request. + url string + // downloads maps entity ids to a numeric key/value pair where the key is + // the number of days in the past when the entity was downloaded and the + // value is the number of downloads performed that day. + downloads map[string]map[int]int + // expectResponse is the expected response from the meta/stats endpoint. + expectResponse params.StatsResponse +}{{ + about: "no downloads", + url: "trusty/mysql-0", + downloads: map[string]map[int]int{"trusty/mysql-0": {}}, +}, { + about: "single download", + url: "utopic/django-42", + downloads: map[string]map[int]int{ + "utopic/django-42": {0: 1}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 1, + ArchiveDownload: params.StatsCount{ + Total: 1, + Day: 1, + Week: 1, + Month: 1, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 1, + Day: 1, + Week: 1, + Month: 1, + }, + }, +}, { + about: "single download a long time ago", + url: "utopic/django-42", + downloads: map[string]map[int]int{ + "utopic/django-42": {100: 1}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 1, + ArchiveDownload: params.StatsCount{ + Total: 1, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 1, + }, + }, +}, { + about: "some downloads this month", + url: "utopic/wordpress-47", + downloads: map[string]map[int]int{ + "utopic/wordpress-47": {20: 2, 25: 5}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 2 + 5, + ArchiveDownload: params.StatsCount{ + Total: 2 + 5, + Month: 2 + 5, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 2 + 5, + Month: 2 + 5, + }, + }, +}, { + about: "multiple recent downloads", + url: "utopic/django-42", + downloads: map[string]map[int]int{ + "utopic/django-42": {100: 1, 12: 3, 8: 5, 4: 10, 2: 1, 0: 3}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 1 + 3 + 5 + 10 + 1 + 3, + ArchiveDownload: params.StatsCount{ + Total: 1 + 3 + 5 + 10 + 1 + 3, + Day: 3, + Week: 10 + 1 + 3, + Month: 3 + 5 + 10 + 1 + 3, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 1 + 3 + 5 + 10 + 1 + 3, + Day: 3, + Week: 10 + 1 + 3, + Month: 3 + 5 + 10 + 1 + 3, + }, + }, +}, { + about: "sparse downloads", + url: "utopic/django-42", + downloads: map[string]map[int]int{ + "utopic/django-42": {200: 3, 27: 4, 3: 5}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 3 + 4 + 5, + ArchiveDownload: params.StatsCount{ + Total: 3 + 4 + 5, + Week: 5, + Month: 4 + 5, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 3 + 4 + 5, + Week: 5, + Month: 4 + 5, + }, + }, +}, { + about: "bundle downloads", + url: "bundle/django-simple-2", + downloads: map[string]map[int]int{ + "bundle/django-simple-2": {200: 3, 27: 4, 3: 5}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 3 + 4 + 5, + ArchiveDownload: params.StatsCount{ + Total: 3 + 4 + 5, + Week: 5, + Month: 4 + 5, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 3 + 4 + 5, + Week: 5, + Month: 4 + 5, + }, + }, +}, { + about: "different charms", + url: "trusty/rails-47", + downloads: map[string]map[int]int{ + "utopic/rails-47": {200: 3, 27: 4, 3: 5}, + "trusty/rails-47": {20: 2, 6: 10}, + "trusty/mysql-0": {200: 1, 14: 2, 1: 7}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 2 + 10, + ArchiveDownload: params.StatsCount{ + Total: 2 + 10, + Week: 10, + Month: 2 + 10, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 2 + 10, + Week: 10, + Month: 2 + 10, + }, + }, +}, { + about: "different revisions of the same charm", + url: "precise/rails-1", + downloads: map[string]map[int]int{ + "precise/rails-0": {300: 1, 200: 2}, + "precise/rails-1": {100: 5, 10: 3, 2: 7}, + "precise/rails-2": {6: 10, 0: 9}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 5 + 3 + 7, + ArchiveDownload: params.StatsCount{ + Total: 5 + 3 + 7, + Week: 7, + Month: 3 + 7, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: (1 + 2) + (5 + 3 + 7) + (10 + 9), + Day: 0 + 0 + 9, + Week: 0 + 7 + (10 + 9), + Month: 0 + (3 + 7) + (10 + 9), + }, + }, +}, { + about: "downloads only in an old revision", + url: "trusty/wordpress-2", + downloads: map[string]map[int]int{ + "precise/wordpress-2": {2: 2, 0: 1}, + "trusty/wordpress-0": {100: 10}, + "trusty/wordpress-2": {}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 10, + }, + }, +}, { + about: "downloads only in newer revision", + url: "utopic/wordpress-0", + downloads: map[string]map[int]int{ + "utopic/wordpress-0": {}, + "utopic/wordpress-1": {31: 7, 10: 1, 3: 2, 0: 1}, + "utopic/wordpress-2": {6: 9, 0: 2}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: (7 + 1 + 2 + 1) + (9 + 2), + Day: 1 + 2, + Week: (2 + 1) + (9 + 2), + Month: (1 + 2 + 1) + (9 + 2), + }, + }, +}, { + about: "non promulgated charms", + url: "~who/utopic/django-0", + downloads: map[string]map[int]int{ + "utopic/django-0": {100: 1, 10: 2, 1: 3, 0: 4}, + "~who/utopic/django-0": {2: 5}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 5, + ArchiveDownload: params.StatsCount{ + Total: 5, + Week: 5, + Month: 5, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 5, + Week: 5, + Month: 5, + }, + }, +}} + +func (s *APISuite) TestMetaStats(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + // TODO (frankban): remove this call when removing the legacy counts logic. + patchLegacyDownloadCountsEnabled(s.AddCleanup, false) + + today := time.Now() + for i, test := range metaStatsTests { + c.Logf("test %d: %s", i, test.about) + + for id, downloadsPerDay := range test.downloads { + url := &router.ResolvedURL{ + URL: *charm.MustParseURL(id), + PromulgatedRevision: -1, + } + if url.URL.User == "" { + url.URL.User = "charmers" + url.PromulgatedRevision = url.URL.Revision + } + + // Add the required entities to the database. + if url.URL.Series == "bundle" { + s.addPublicBundle(c, "wordpress-simple", url) + } else { + s.addPublicCharm(c, "wordpress", url) + } + + // Simulate the entity was downloaded at the specified dates. + for daysAgo, downloads := range downloadsPerDay { + date := today.AddDate(0, 0, -daysAgo) + key := []string{params.StatsArchiveDownload, url.URL.Series, url.URL.Name, url.URL.User, strconv.Itoa(url.URL.Revision)} + for i := 0; i < downloads; i++ { + err := s.store.IncCounterAtTime(key, date) + c.Assert(err, gc.IsNil) + } + if url.PromulgatedRevision > -1 { + key := []string{params.StatsArchiveDownloadPromulgated, url.URL.Series, url.URL.Name, "", strconv.Itoa(url.PromulgatedRevision)} + for i := 0; i < downloads; i++ { + err := s.store.IncCounterAtTime(key, date) + c.Assert(err, gc.IsNil) + } + } + } + } + // Ensure the meta/stats response reports the correct downloads count. + s.assertGet(c, test.url+"/meta/stats", test.expectResponse) + + // Clean up the collections. + _, err := s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + _, err = s.store.DB.StatCounters().RemoveAll(nil) + c.Assert(err, gc.IsNil) + } +} + +var metaStatsWithLegacyDownloadCountsTests = []struct { + about string + count string + expectValue int64 + expectError string +}{{ + about: "no extra-info", +}, { + about: "zero downloads", + count: "0", +}, { + about: "some downloads", + count: "47", + expectValue: 47, +}, { + about: "invalid value", + count: "invalid", + expectError: "cannot unmarshal extra-info value: invalid character 'i' looking for beginning of value", +}} + +// Tests meta/stats with LegacyDownloadCountsEnabled set to true. +// TODO (frankban): remove this test case when removing the legacy counts +// logic. +func (s *APISuite) TestMetaStatsWithLegacyDownloadCounts(c *gc.C) { + patchLegacyDownloadCountsEnabled(s.AddCleanup, true) + id, _ := s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/utopic/wordpress-42", 42)) + url := storeURL("utopic/wordpress-42/meta/stats") + + for i, test := range metaStatsWithLegacyDownloadCountsTests { + c.Logf("test %d: %s", i, test.about) + + // Update the entity extra info if required. + if test.count != "" { + extraInfo := map[string][]byte{ + params.LegacyDownloadStats: []byte(test.count), + } + err := s.store.UpdateEntity(id, bson.D{{ + "$set", bson.D{{"extrainfo", extraInfo}}, + }}) + c.Assert(err, gc.IsNil) + } + + var expectBody interface{} + var expectStatus int + if test.expectError == "" { + // Ensure the downloads count is correctly returned. + expectBody = params.StatsResponse{ + ArchiveDownloadCount: test.expectValue, + ArchiveDownload: params.StatsCount{ + Total: test.expectValue, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: test.expectValue, + }, + } + expectStatus = http.StatusOK + } else { + // Ensure an error is returned. + expectBody = params.Error{ + Message: test.expectError, + } + expectStatus = http.StatusInternalServerError + } + + // Perform the request. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: url, + ExpectStatus: expectStatus, + ExpectBody: expectBody, + }) + } +} + +type publishSpec struct { + id *router.ResolvedURL + time string + acl []string +} + +func (p publishSpec) published() params.Published { + t, err := time.Parse("2006-01-02 15:04", p.time) + if err != nil { + panic(err) + } + return params.Published{&p.id.URL, t} +} + +var publishedCharms = []publishSpec{{ + id: newResolvedURL("cs:~charmers/precise/wordpress-1", 1), + time: "5432-10-12 00:00", +}, { + id: newResolvedURL("cs:~charmers/precise/mysql-1", 1), + time: "5432-10-12 13:00", +}, { + id: newResolvedURL("cs:~charmers/precise/wordpress-2", 2), + time: "5432-10-12 23:59", +}, { + id: newResolvedURL("cs:~charmers/precise/mysql-2", 2), + time: "5432-10-13 00:00", +}, { + id: newResolvedURL("cs:~charmers/precise/mysql-5", 5), + time: "5432-10-13 10:00", +}, { + id: newResolvedURL("cs:~charmers/precise/wordpress-3", 3), + time: "5432-10-14 01:00", +}, { + id: newResolvedURL("cs:~charmers/precise/django-0", -1), + time: "5432-10-14 02:00", + acl: []string{"charmers"}, +}} + +var changesPublishedTests = []struct { + args string + // expect holds indexes into publishedCharms + // of the expected indexes returned by charms/published + expect []int +}{{ + args: "", + expect: []int{5, 4, 3, 2, 1, 0}, +}, { + args: "?start=5432-10-13", + expect: []int{5, 4, 3}, +}, { + args: "?stop=5432-10-13", + expect: []int{4, 3, 2, 1, 0}, +}, { + args: "?start=5432-10-13&stop=5432-10-13", + expect: []int{4, 3}, +}, { + args: "?start=5432-10-12&stop=5432-10-13", + expect: []int{4, 3, 2, 1, 0}, +}, { + args: "?start=5432-10-13&stop=5432-10-12", + expect: []int{}, +}, { + args: "?limit=3", + expect: []int{5, 4, 3}, +}, { + args: "?start=5432-10-12&stop=5432-10-13&limit=2", + expect: []int{4, 3}, +}} + +func (s *APISuite) TestChangesPublished(c *gc.C) { + s.publishCharmsAtKnownTimes(c, publishedCharms) + for i, test := range changesPublishedTests { + c.Logf("test %d: %q", i, test.args) + expect := make([]params.Published, len(test.expect)) + for j, index := range test.expect { + expect[j] = publishedCharms[index].published() + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("changes/published") + test.args, + ExpectBody: expect, + }) + } +} + +func (s *APISuite) TestChangesPublishedAdmin(c *gc.C) { + s.publishCharmsAtKnownTimes(c, publishedCharms) + expect := make([]params.Published, len(publishedCharms)) + for i := range expect { + expect[i] = publishedCharms[len(publishedCharms)-(i+1)].published() + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + Username: testUsername, + Password: testPassword, + URL: storeURL("changes/published"), + ExpectBody: expect, + }) +} + +var changesPublishedErrorsTests = []struct { + args string + expect params.Error + status int +}{{ + args: "?limit=0", + expect: params.Error{ + Code: params.ErrBadRequest, + Message: "invalid 'limit' value", + }, + status: http.StatusBadRequest, +}, { + args: "?limit=-1", + expect: params.Error{ + Code: params.ErrBadRequest, + Message: "invalid 'limit' value", + }, + status: http.StatusBadRequest, +}, { + args: "?limit=-9999", + expect: params.Error{ + Code: params.ErrBadRequest, + Message: "invalid 'limit' value", + }, + status: http.StatusBadRequest, +}, { + args: "?start=baddate", + expect: params.Error{ + Code: params.ErrBadRequest, + Message: `invalid 'start' value "baddate": parsing time "baddate" as "2006-01-02": cannot parse "baddate" as "2006"`, + }, + status: http.StatusBadRequest, +}, { + args: "?stop=baddate", + expect: params.Error{ + Code: params.ErrBadRequest, + Message: `invalid 'stop' value "baddate": parsing time "baddate" as "2006-01-02": cannot parse "baddate" as "2006"`, + }, + status: http.StatusBadRequest, +}} + +func (s *APISuite) TestChangesPublishedErrors(c *gc.C) { + s.publishCharmsAtKnownTimes(c, publishedCharms) + for i, test := range changesPublishedErrorsTests { + c.Logf("test %d: %q", i, test.args) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("changes/published") + test.args, + ExpectStatus: test.status, + ExpectBody: test.expect, + }) + } +} + +// publishCharmsAtKnownTimes populates the store with +// a range of charms with known time stamps. +func (s *APISuite) publishCharmsAtKnownTimes(c *gc.C, charms []publishSpec) { + for _, ch := range publishedCharms { + id, _ := s.addPublicCharm(c, "wordpress", ch.id) + t := ch.published().PublishTime + err := s.store.UpdateEntity(id, bson.D{{"$set", bson.D{{"uploadtime", t}}}}) + c.Assert(err, gc.IsNil) + if len(ch.acl) > 0 { + err := s.store.SetPerms(&id.URL, "read", ch.acl...) + c.Assert(err, gc.IsNil) + } + } +} + +var debugPprofTests = []struct { + path string + match string +}{{ + path: "debug/pprof/", + match: `(?s).*profiles:.*heap.*`, +}, { + path: "debug/pprof/goroutine?debug=2", + match: "(?s)goroutine [0-9]+.*", +}, { + path: "debug/pprof/cmdline", + match: ".+charmstore.+", +}} + +func (s *APISuite) TestDebugPprof(c *gc.C) { + for i, test := range debugPprofTests { + c.Logf("test %d: %s", i, test.path) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + Header: basicAuthHeader(testUsername, testPassword), + URL: storeURL(test.path), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.String())) + c.Assert(rec.Body.String(), gc.Matches, test.match) + } +} + +func (s *APISuite) TestDebugPprofFailsWithoutAuth(c *gc.C) { + for i, test := range debugPprofTests { + c.Logf("test %d: %s", i, test.path) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.path), + ExpectStatus: http.StatusProxyAuthRequired, + ExpectBody: dischargeRequiredBody, + }) + } +} + +func (s *APISuite) TestHash256Laziness(c *gc.C) { + // TODO frankban: remove this test after updating entities in the + // production db with their SHA256 hash value. Entities are updated by + // running the cshash256 command. + id, _ := s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~who/precise/wordpress-0", -1)) + + // Retrieve the SHA256 hash. + entity, err := s.store.FindEntity(id, "blobhash256") + c.Assert(err, gc.IsNil) + c.Assert(entity.BlobHash256, gc.Not(gc.Equals), "") + + hashtesting.CheckSHA256Laziness(c, s.store, &id.URL, func() { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(id.URL.Path() + "/meta/hash256"), + ExpectStatus: http.StatusOK, + ExpectBody: params.HashResponse{ + Sum: entity.BlobHash256, + }, + }) + }) +} + +func basicAuthHeader(username, password string) http.Header { + // It's a pity we have to jump through this hoop. + req := &http.Request{ + Header: make(http.Header), + } + req.SetBasicAuth(username, password) + return req.Header +} + +func entityFieldGetter(fieldName string) metaEndpointExpectedValueGetter { + return entityGetter(func(entity *mongodoc.Entity) interface{} { + field := reflect.ValueOf(entity).Elem().FieldByName(fieldName) + if !field.IsValid() { + panic(errgo.Newf("entity has no field %q", fieldName)) + } + return field.Interface() + }) +} + +func entityGetter(get func(*mongodoc.Entity) interface{}) metaEndpointExpectedValueGetter { + return func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + doc, err := store.FindEntity(url) + if err != nil { + return nil, errgo.Mask(err) + } + return get(doc), nil + } +} + +func zipGetter(get func(*zip.Reader) interface{}) metaEndpointExpectedValueGetter { + return func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + doc, err := store.FindEntity(url, "blobname") + if err != nil { + return nil, errgo.Mask(err) + } + blob, size, err := store.BlobStore.Open(doc.BlobName) + if err != nil { + return nil, errgo.Mask(err) + } + defer blob.Close() + content, err := ioutil.ReadAll(blob) + if err != nil { + return nil, errgo.Mask(err) + } + r, err := zip.NewReader(bytes.NewReader(content), size) + if err != nil { + return nil, errgo.Mask(err) + } + return get(r), nil + } +} + +func entitySizeChecker(c *gc.C, data interface{}) { + response := data.(*params.ArchiveSizeResponse) + c.Assert(response.Size, gc.Not(gc.Equals), int64(0)) +} + +func (s *APISuite) addPublicCharm(c *gc.C, charmName string, rurl *router.ResolvedURL) (*router.ResolvedURL, charm.Charm) { + ch := storetesting.Charms.CharmDir(charmName) + err := s.store.AddCharmWithArchive(rurl, ch) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(rurl.URL.WithChannel(charm.DevelopmentChannel), "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + return rurl, ch +} + +func (s *APISuite) addPublicBundle(c *gc.C, bundleName string, rurl *router.ResolvedURL) (*router.ResolvedURL, charm.Bundle) { + bundle := storetesting.Charms.BundleDir(bundleName) + err := s.store.AddBundleWithArchive(rurl, bundle) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(rurl.URL.WithChannel(charm.DevelopmentChannel), "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + return rurl, bundle +} + +func (s *APISuite) assertPutNonAdmin(c *gc.C, url string, val interface{}) { + s.assertPut0(c, url, val, false) +} + +func (s *APISuite) assertPut(c *gc.C, url string, val interface{}) { + s.assertPut0(c, url, val, true) +} + +func (s *APISuite) assertPut0(c *gc.C, url string, val interface{}, asAdmin bool) { + body, err := json.Marshal(val) + c.Assert(err, gc.IsNil) + p := httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url), + Method: "PUT", + Do: bakeryDo(nil), + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: bytes.NewReader(body), + } + if asAdmin { + p.Username = testUsername + p.Password = testPassword + } + rec := httptesting.DoRequest(c, p) + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.String())) + c.Assert(rec.Body.String(), gc.HasLen, 0) +} + +func (s *APISuite) assertGet(c *gc.C, url string, expectVal interface{}) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + Do: bakeryDo(nil), + URL: storeURL(url), + ExpectBody: expectVal, + }) +} + +func (s *APISuite) addLog(c *gc.C, log *mongodoc.Log) { + err := s.store.DB.Logs().Insert(log) + c.Assert(err, gc.Equals, nil) +} + +func mustMarshalJSON(val interface{}) string { + data, err := json.Marshal(val) + if err != nil { + panic(fmt.Errorf("cannot marshal %#v: %v", val, err)) + } + return string(data) +} + +func (s *APISuite) TestMacaroon(c *gc.C) { + var checkedCaveats []string + var mu sync.Mutex + var dischargeError error + s.discharge = func(cond string, arg string) ([]checkers.Caveat, error) { + mu.Lock() + defer mu.Unlock() + checkedCaveats = append(checkedCaveats, cond+" "+arg) + return []checkers.Caveat{checkers.DeclaredCaveat("username", "who")}, dischargeError + } + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("macaroon"), + Method: "GET", + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.String())) + var m macaroon.Macaroon + err := json.Unmarshal(rec.Body.Bytes(), &m) + c.Assert(err, gc.IsNil) + c.Assert(m.Location(), gc.Equals, "charmstore") + client := httpbakery.NewClient() + ms, err := client.DischargeAll(&m) + c.Assert(err, gc.IsNil) + sort.Strings(checkedCaveats) + c.Assert(checkedCaveats, jc.DeepEquals, []string{ + "is-authenticated-user ", + }) + macaroonCookie, err := httpbakery.NewCookie(ms) + c.Assert(err, gc.IsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("log"), + Do: bakeryDo(nil), + Cookies: []*http.Cookie{macaroonCookie}, + ExpectStatus: http.StatusUnauthorized, + ExpectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "who"`, + }, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.noMacaroonSrv, + URL: storeURL("log"), + ExpectStatus: http.StatusUnauthorized, + ExpectBody: params.Error{ + Message: "authentication failed: missing HTTP auth header", + Code: params.ErrUnauthorized, + }, + }) +} + +func (s *APISuite) TestWhoAmIFailWithNoMacaroon(c *gc.C) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.noMacaroonSrv, + URL: storeURL("whoami"), + Do: bakeryDo(nil), + ExpectStatus: http.StatusUnauthorized, + ExpectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: "authentication failed: missing HTTP auth header", + }, + }) +} + +func (s *APISuite) TestWhoAmIReturnsNameAndGroups(c *gc.C) { + s.discharge = dischargeForUser("who") + s.idM.groups = map[string][]string{ + "who": {"foo", "bar"}, + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("whoami"), + Do: bakeryDo(nil), + ExpectStatus: http.StatusOK, + ExpectBody: params.WhoAmIResponse{ + User: "who", + Groups: []string{"foo", "bar"}, + }, + }) +} + +var promulgateTests = []struct { + about string + entities []*mongodoc.Entity + baseEntities []*mongodoc.BaseEntity + id string + useHTTPDo bool + method string + caveats []checkers.Caveat + groups map[string][]string + body io.Reader + username string + password string + expectStatus int + expectBody interface{} + expectEntities []*mongodoc.Entity + expectBaseEntities []*mongodoc.BaseEntity + expectPromulgate bool + expectUser string +}{{ + about: "unpromulgate base entity", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, + id: "~charmers/wordpress", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), + username: testUsername, + password: testPassword, + expectStatus: http.StatusOK, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, + expectUser: "admin", +}, { + about: "promulgate base entity", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, + id: "~charmers/wordpress", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), + username: testUsername, + password: testPassword, + expectStatus: http.StatusOK, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithACLs(mongodoc.ACL{ + Write: []string{v4.PromulgatorsGroup}, + }).WithPromulgated(true).Build(), + }, + expectPromulgate: true, + expectUser: "admin", +}, { + about: "unpromulgate base entity not found", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, + id: "~charmers/mysql", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), + username: testUsername, + password: testPassword, + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:~charmers/mysql"`, + }, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, +}, { + about: "promulgate base entity not found", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, + id: "~charmers/mysql", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), + username: testUsername, + password: testPassword, + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:~charmers/mysql"`, + }, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, +}, { + about: "bad method", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, + id: "~charmers/wordpress", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), + username: testUsername, + password: testPassword, + method: "POST", + expectStatus: http.StatusMethodNotAllowed, + expectBody: params.Error{ + Code: params.ErrMethodNotAllowed, + Message: "POST not allowed", + }, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, +}, { + about: "bad JSON", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, + id: "~charmers/wordpress", + body: bytes.NewReader([]byte("tru")), + username: testUsername, + password: testPassword, + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "bad request: invalid character ' ' in literal true (expecting 'e')", + }, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, +}, { + about: "unpromulgate base entity with macaroon", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, + id: "~charmers/wordpress", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), + caveats: []checkers.Caveat{ + checkers.DeclaredCaveat(v4.UsernameAttr, v4.PromulgatorsGroup), + }, + expectStatus: http.StatusOK, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, + expectUser: v4.PromulgatorsGroup, +}, { + about: "promulgate base entity with macaroon", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, + id: "~charmers/wordpress", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), + caveats: []checkers.Caveat{ + checkers.DeclaredCaveat(v4.UsernameAttr, v4.PromulgatorsGroup), + }, + expectStatus: http.StatusOK, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithACLs(mongodoc.ACL{ + Write: []string{v4.PromulgatorsGroup}, + }).WithPromulgated(true).Build(), + }, + expectPromulgate: true, + expectUser: v4.PromulgatorsGroup, +}, { + about: "promulgate base entity with group macaroon", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, + id: "~charmers/wordpress", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), + caveats: []checkers.Caveat{ + checkers.DeclaredCaveat(v4.UsernameAttr, "bob"), + }, + groups: map[string][]string{ + "bob": {v4.PromulgatorsGroup, "yellow"}, + }, + expectStatus: http.StatusOK, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithACLs(mongodoc.ACL{ + Write: []string{v4.PromulgatorsGroup}, + }).WithPromulgated(true).Build(), + }, + expectPromulgate: true, + expectUser: "bob", +}, { + about: "no authorisation", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, + useHTTPDo: true, + id: "~charmers/wordpress", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), + expectStatus: http.StatusProxyAuthRequired, + expectBody: dischargeRequiredBody, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, +}, { + about: "promulgate base entity with unauthorized user macaroon", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, + id: "~charmers/wordpress", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), + caveats: []checkers.Caveat{ + checkers.DeclaredCaveat(v4.UsernameAttr, "bob"), + }, + groups: map[string][]string{ + "bob": {"yellow"}, + }, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Message: `unauthorized: access denied for user "bob"`, + Code: params.ErrUnauthorized, + }, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, +}} + +func (s *APISuite) TestPromulgate(c *gc.C) { + for i, test := range promulgateTests { + c.Logf("%d. %s\n", i, test.about) + _, err := s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + _, err = s.store.DB.BaseEntities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + for _, e := range test.entities { + err := s.store.DB.Entities().Insert(e) + c.Assert(err, gc.IsNil) + } + for _, e := range test.baseEntities { + err := s.store.DB.BaseEntities().Insert(e) + c.Assert(err, gc.IsNil) + } + if test.method == "" { + test.method = "PUT" + } + + client := httpbakery.NewHTTPClient() + s.discharge = func(_, _ string) ([]checkers.Caveat, error) { + return test.caveats, nil + } + s.idM.groups = test.groups + p := httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.id + "/promulgate"), + Method: test.method, + Body: test.body, + Header: http.Header{"Content-Type": {"application/json"}}, + Username: test.username, + Password: test.password, + ExpectStatus: test.expectStatus, + ExpectBody: test.expectBody, + } + if !test.useHTTPDo { + p.Do = bakeryDo(client) + } + httptesting.AssertJSONCall(c, p) + n, err := s.store.DB.Entities().Count() + c.Assert(err, gc.IsNil) + c.Assert(n, gc.Equals, len(test.expectEntities)) + for _, e := range test.expectEntities { + storetesting.AssertEntity(c, s.store.DB.Entities(), e) + } + n, err = s.store.DB.BaseEntities().Count() + c.Assert(err, gc.IsNil) + c.Assert(n, gc.Equals, len(test.expectBaseEntities)) + for _, e := range test.expectBaseEntities { + storetesting.AssertBaseEntity(c, s.store.DB.BaseEntities(), e) + } + } +} + +func (s *APISuite) TestEndpointRequiringBaseEntityWithPromulgatedId(c *gc.C) { + // Add a promulgated charm. + url := newResolvedURL("~charmers/precise/wordpress-23", 23) + s.addPublicCharm(c, "wordpress", url) + + // Unpromulgate the base entity + err := s.store.SetPromulgated(url, false) + c.Assert(err, gc.IsNil) + + // Check that we can still enquire about the promulgation status + // of the entity when using its promulgated URL. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-23/meta/promulgated"), + ExpectBody: params.PromulgatedResponse{ + Promulgated: false, + }, + }) +} + +var publishTests = []struct { + about string + db []*router.ResolvedURL + id string + publish bool + expectDB []*router.ResolvedURL + expectBody params.PublishResponse +}{{ + about: "publish: one development charm present, fully qualified id, not promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-42", -1), + }, + id: "~who/wily/django-42", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-42", -1), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-42"), + }, +}, { + about: "publish: one development charm present, fully qualified id, promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-42", 47), + }, + id: "wily/django-47", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-42", 47), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-42"), + PromulgatedId: charm.MustParseURL("wily/django-47"), + }, +}, { + about: "publish: one development charm present, partial id, not promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-42", -1), + }, + id: "~who/wily/django", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-42", -1), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-42"), + }, +}, { + about: "publish: one development charm present, partial id, promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-42", 47), + }, + id: "django", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-42", 47), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-42"), + PromulgatedId: charm.MustParseURL("wily/django-47"), + }, +}, { + about: "publish: one published charm present, partial id, not promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-1", -1), + }, + id: "~who/django", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-1", -1), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-1"), + }, +}, { + about: "publish: one published charm present, fully qualified id, promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-2", 0), + }, + id: "wily/django-0", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-2", 0), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-2"), + PromulgatedId: charm.MustParseURL("wily/django-0"), + }, +}, { + about: "publish: multiple development charms present, fully qualified id, not promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-0", -1), + newResolvedURL("~who/development/wily/django-1", -1), + newResolvedURL("~who/development/wily/django-2", -1), + newResolvedURL("~who/development/trusty/django-1", -1), + }, + id: "~who/wily/django-1", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-0", -1), + newResolvedURL("~who/wily/django-1", -1), + newResolvedURL("~who/development/wily/django-2", -1), + newResolvedURL("~who/development/trusty/django-1", -1), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-1"), + }, +}, { + about: "publish: multiple development charms present, fully qualified id, promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-42", 10), + newResolvedURL("~who/development/wily/django-43", 11), + newResolvedURL("~who/development/wily/django-44", 12), + newResolvedURL("~who/development/wily/rails-100", 10), + }, + id: "wily/django-10", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-42", 10), + newResolvedURL("~who/development/wily/django-43", 11), + newResolvedURL("~who/development/wily/django-44", 12), + newResolvedURL("~who/development/wily/rails-100", 10), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-42"), + PromulgatedId: charm.MustParseURL("wily/django-10"), + }, +}, { + about: "publish: multiple development charms present, fully qualified id, promulgated, last one published", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-42", 10), + newResolvedURL("~who/development/wily/django-43", 11), + newResolvedURL("~who/development/wily/django-44", 12), + newResolvedURL("~who/development/wily/rails-100", 10), + }, + id: "wily/django-12", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-42", 10), + newResolvedURL("~who/development/wily/django-43", 11), + newResolvedURL("~who/wily/django-44", 12), + newResolvedURL("~who/development/wily/rails-100", 10), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-44"), + PromulgatedId: charm.MustParseURL("wily/django-12"), + }, +}, { + about: "publish: multiple development charms present, partial id, not promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-0", -1), + newResolvedURL("~who/development/wily/django-1", -1), + newResolvedURL("~who/development/trusty/django-42", -1), + newResolvedURL("~who/development/trusty/django-47", -1), + }, + id: "~who/wily/django", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-0", -1), + newResolvedURL("~who/wily/django-1", -1), + newResolvedURL("~who/development/trusty/django-42", -1), + newResolvedURL("~who/development/trusty/django-47", -1), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-1"), + }, +}, { + about: "publish: multiple development charms present, partial id, promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-0", 0), + newResolvedURL("~who/development/wily/django-1", 1), + newResolvedURL("~who/development/trusty/django-42", 10), + newResolvedURL("~who/development/trusty/django-47", 11), + }, + id: "django", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-0", 0), + newResolvedURL("~who/development/wily/django-1", 1), + newResolvedURL("~who/development/trusty/django-42", 10), + newResolvedURL("~who/trusty/django-47", 11), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/trusty/django-47"), + PromulgatedId: charm.MustParseURL("trusty/django-11"), + }, +}, { + about: "publish: multiple published charms present, partial id, not promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-0", -1), + newResolvedURL("~who/development/wily/django-1", -1), + newResolvedURL("~who/development/wily/django-2", -1), + }, + id: "~who/wily/django", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-0", -1), + newResolvedURL("~who/development/wily/django-1", -1), + newResolvedURL("~who/wily/django-2", -1), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-2"), + }, +}, { + about: "publish: multiple published charms present, partial id, promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/trusty/django-42", 10), + newResolvedURL("~who/trusty/django-47", 11), + newResolvedURL("~who/trusty/django-48", 12), + newResolvedURL("~who/development/trusty/django-49", 13), + }, + id: "django", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/trusty/django-42", 10), + newResolvedURL("~who/trusty/django-47", 11), + newResolvedURL("~who/trusty/django-48", 12), + newResolvedURL("~who/trusty/django-49", 13), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/trusty/django-49"), + PromulgatedId: charm.MustParseURL("trusty/django-13"), + }, +}, { + about: "unpublish: one published charm present, partial id, not promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-1", -1), + }, + id: "~who/django", + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-1", -1), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/development/wily/django-1"), + }, +}, { + about: "unpublish: one published charm present, fully qualified id, promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-2", 0), + }, + id: "wily/django-0", + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-2", 0), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/development/wily/django-2"), + PromulgatedId: charm.MustParseURL("development/wily/django-0"), + }, +}, { + about: "unpublish: multiple published charms present, partial id, not promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-0", -1), + newResolvedURL("~who/development/wily/django-1", -1), + newResolvedURL("~who/development/wily/django-2", -1), + }, + id: "~who/wily/django", + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-0", -1), + newResolvedURL("~who/development/wily/django-1", -1), + newResolvedURL("~who/development/wily/django-2", -1), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/development/wily/django-0"), + }, +}, { + about: "unpublish: multiple published charms present, partial id, promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/trusty/django-42", 10), + newResolvedURL("~who/trusty/django-47", 11), + newResolvedURL("~who/trusty/django-48", 12), + newResolvedURL("~who/development/trusty/django-49", 13), + }, + id: "django", + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/trusty/django-42", 10), + newResolvedURL("~who/trusty/django-47", 11), + newResolvedURL("~who/development/trusty/django-48", 12), + newResolvedURL("~who/development/trusty/django-49", 13), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/development/trusty/django-48"), + PromulgatedId: charm.MustParseURL("development/trusty/django-12"), + }, +}} + +func (s *APISuite) TestPublish(c *gc.C) { + for i, test := range publishTests { + c.Logf("test %d: %s", i, test.about) + + // Add the initial entities to the database. + for _, rurl := range test.db { + s.addPublicCharm(c, "wordpress", rurl) + } + + // Build the proper request body. + body := mustMarshalJSON(params.PublishRequest{ + Published: test.publish, + }) + + // Check that the request/response process works as expected. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.id + "/publish"), + Method: "PUT", + Header: http.Header{"Content-Type": {"application/json"}}, + Username: testUsername, + Password: testPassword, + Body: strings.NewReader(body), + ExpectStatus: http.StatusOK, + ExpectBody: test.expectBody, + }) + + // Check that the database now includes the expected entities. + for _, rurl := range test.expectDB { + e, err := s.store.FindEntity(rurl) + c.Assert(err, gc.IsNil) + c.Assert(charmstore.EntityResolvedURL(e), jc.DeepEquals, rurl) + } + + // Remove all entities from the database. + _, err := s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + _, err = s.store.DB.BaseEntities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + } +} + +var publishErrorsTests = []struct { + about string + method string + id string + contentType string + body string + expectStatus int + expectBody params.Error +}{{ + about: "get method not allowed", + method: "GET", + id: "~who/wily/django-42", + expectStatus: http.StatusMethodNotAllowed, + expectBody: params.Error{ + Code: params.ErrMethodNotAllowed, + Message: "GET not allowed", + }, +}, { + about: "post method not allowed", + method: "POST", + id: "~who/wily/django-42", + expectStatus: http.StatusMethodNotAllowed, + expectBody: params.Error{ + Code: params.ErrMethodNotAllowed, + Message: "POST not allowed", + }, +}, { + about: "invalid channel", + method: "PUT", + id: "~who/development/wily/django-42", + expectStatus: http.StatusForbidden, + expectBody: params.Error{ + Code: params.ErrForbidden, + Message: `can only set publish on published URL, "cs:~who/development/wily/django-42" provided`, + }, +}, { + about: "unexpected content type", + method: "PUT", + id: "~who/wily/django-42", + contentType: "text/invalid", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: `cannot unmarshal publish request body: cannot unmarshal into field: unexpected content type text/invalid; want application/json; content: "{\"Published\":true}"`, + }, +}, { + about: "invalid body", + method: "PUT", + id: "~who/wily/django-42", + body: "bad wolf", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "cannot unmarshal publish request body: cannot unmarshal into field: cannot unmarshal request body: invalid character 'b' looking for beginning of value", + }, +}, { + about: "entity to be published not found", + method: "PUT", + id: "~who/wily/django-42", + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:~who/development/wily/django-42"`, + }, +}, { + about: "entity to be unpublished not found", + method: "PUT", + id: "~who/wily/django-42", + body: mustMarshalJSON(params.PublishRequest{ + Published: false, + }), + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:~who/wily/django-42"`, + }, +}} + +func (s *APISuite) TestPublishErrors(c *gc.C) { + for i, test := range publishErrorsTests { + c.Logf("test %d: %s", i, test.about) + contentType := test.contentType + if contentType == "" { + contentType = "application/json" + } + body := test.body + if body == "" { + body = mustMarshalJSON(params.PublishRequest{ + Published: true, + }) + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.id + "/publish"), + Method: test.method, + Header: http.Header{"Content-Type": {contentType}}, + Username: testUsername, + Password: testPassword, + Body: strings.NewReader(body), + ExpectStatus: test.expectStatus, + ExpectBody: test.expectBody, + }) + } +} + +func (s *APISuite) TestTooManyConcurrentRequests(c *gc.C) { + // We don't have any control over the number of concurrent + // connections allowed by s.srv, so we make our own + // server here with custom config. + config := charmstore.ServerParams{ + MaxMgoSessions: 1, + } + db := s.Session.DB("charmstore") + srv, err := charmstore.NewServer(db, nil, config, map[string]charmstore.NewAPIHandlerFunc{"v4": v4.NewAPIHandler}) + c.Assert(err, gc.IsNil) + defer srv.Close() + + // Get a store from the pool so that we'll be + // at the concurrent request limit. + store := srv.Pool().Store() + defer store.Close() + + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: srv, + Do: bakeryDo(nil), + URL: storeURL("debug/status"), + ExpectStatus: http.StatusServiceUnavailable, + ExpectBody: params.Error{ + Message: "service unavailable: too many mongo sessions in use", + Code: params.ErrServiceUnavailable, + }, + }) +} + +// dischargeRequiredBody returns a httptesting.BodyAsserter that checks +// that the response body contains a discharge required error holding a macaroon +// with a third-party caveat addressed to expectedEntityLocation. +var dischargeRequiredBody httptesting.BodyAsserter = func(c *gc.C, body json.RawMessage) { + var response httpbakery.Error + err := json.Unmarshal(body, &response) + c.Assert(err, gc.IsNil) + c.Assert(response.Code, gc.Equals, httpbakery.ErrDischargeRequired) + c.Assert(response.Message, gc.Equals, "verification failed: no macaroon cookies in request") + c.Assert(response.Info.Macaroon, gc.NotNil) + for _, cav := range response.Info.Macaroon.Caveats() { + if cav.Location != "" { + return + } + } + c.Fatalf("no third party caveat found in response macaroon; caveats %#v", response.Info.Macaroon.Caveats()) +} + +func (s *APISuite) TestSetAuthCookie(c *gc.C) { + m, err := macaroon.New([]byte("key"), "id", "location") + c.Assert(err, jc.ErrorIsNil) + ms := macaroon.Slice{m} + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("set-auth-cookie"), + Method: "PUT", + Header: http.Header{"Origin": []string{"https://1.2.3.4"}}, + JSONBody: params.SetAuthCookie{ + Macaroons: ms, + }, + }) + // The request is successful. + c.Assert(rec.Code, gc.Equals, http.StatusOK) + + // The response includes the CORS header for the specific request. + c.Assert(rec.Header().Get("Access-Control-Allow-Origin"), gc.Equals, "https://1.2.3.4") + + // The response includes the macaroons cookie. + resp := http.Response{Header: rec.Header()} + cookies := resp.Cookies() + c.Assert(len(cookies), gc.Equals, 1) + expected, err := httpbakery.NewCookie(ms) + expected.Path = "/" + c.Assert(err, jc.ErrorIsNil) + c.Assert(cookies[0].Value, gc.Equals, expected.Value) +} + +func (s *APISuite) TestSetAuthCookieBodyError(c *gc.C) { + m, err := macaroon.New([]byte("key"), "id", "location") + c.Assert(err, jc.ErrorIsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("set-auth-cookie"), + Method: "PUT", + JSONBody: macaroon.Slice{m}, + ExpectStatus: http.StatusInternalServerError, + ExpectBody: params.Error{ + Message: "cannot unmarshal macaroons: json: cannot unmarshal array into Go value of type params.SetAuthCookie", + }, + }) +} + +func (s *APISuite) TestSetAuthCookieMethodError(c *gc.C) { + m, err := macaroon.New([]byte("key"), "id", "location") + c.Assert(err, jc.ErrorIsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("set-auth-cookie"), + Method: "POST", + JSONBody: macaroon.Slice{m}, + ExpectStatus: http.StatusMethodNotAllowed, + ExpectBody: params.Error{ + Code: params.ErrMethodNotAllowed, + Message: "POST not allowed", + }, + }) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/archive_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/archive_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/archive_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1575 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" + +import ( + "archive/zip" + "bytes" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "strconv" + "strings" + "sync" + "time" + + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + charmtesting "gopkg.in/juju/charmrepo.v2-unstable/testing" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/stats" + "gopkg.in/juju/charmstore.v5-unstable/internal/v4" +) + +type ArchiveSuite struct { + commonSuite +} + +var _ = gc.Suite(&ArchiveSuite{}) + +func (s *ArchiveSuite) TestGet(c *gc.C) { + id := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) + wordpress := s.assertUploadCharm(c, "POST", id, "wordpress") + err := s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + + archiveBytes, err := ioutil.ReadFile(wordpress.Path) + c.Assert(err, gc.IsNil) + + archiveUrl := storeURL("~charmers/precise/wordpress-0/archive") + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: archiveUrl, + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes) + c.Assert(rec.Header().Get(params.ContentHashHeader), gc.Equals, hashOfBytes(archiveBytes)) + c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/precise/wordpress-0") + assertCacheControl(c, rec.Header(), true) + + // Check that the HTTP range logic is plugged in OK. If this + // is working, we assume that the whole thing is working OK, + // as net/http is well-tested. + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: archiveUrl, + Header: http.Header{"Range": {"bytes=10-100"}}, + }) + c.Assert(rec.Code, gc.Equals, http.StatusPartialContent, gc.Commentf("body: %q", rec.Body.Bytes())) + c.Assert(rec.Body.Bytes(), gc.HasLen, 100-10+1) + c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes[10:101]) + c.Assert(rec.Header().Get(params.ContentHashHeader), gc.Equals, hashOfBytes(archiveBytes)) + c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/precise/wordpress-0") + assertCacheControl(c, rec.Header(), true) + + // The development version of the entity can also be retrieved. + err = s.store.SetPerms(id.URL.WithChannel(charm.DevelopmentChannel), "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("~charmers/development/precise/wordpress-0/archive"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes) + c.Assert(rec.Header().Get(params.ContentHashHeader), gc.Equals, hashOfBytes(archiveBytes)) + c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/development/precise/wordpress-0") +} + +func (s *ArchiveSuite) TestGetDevelopment(c *gc.C) { + id := newResolvedURL("cs:~charmers/development/trusty/wordpress-0", -1) + wordpress := s.assertUploadCharm(c, "POST", id, "wordpress") + url := id.PreferredURL() + err := s.store.SetPerms(url, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + + archiveBytes, err := ioutil.ReadFile(wordpress.Path) + c.Assert(err, gc.IsNil) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("~charmers/development/trusty/wordpress-0/archive"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes) + c.Assert(rec.Header().Get(params.ContentHashHeader), gc.Equals, hashOfBytes(archiveBytes)) + c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/development/trusty/wordpress-0") + + // It is not possible to use the published URL to retrieve the archive, + err = s.store.SetPerms(url.WithChannel(""), "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("~charmers/trusty/wordpress-0/archive"), + ExpectStatus: http.StatusNotFound, + ExpectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:~charmers/trusty/wordpress-0"`, + }, + }) +} + +func (s *ArchiveSuite) TestGetWithPartialId(c *gc.C) { + id := newResolvedURL("cs:~charmers/utopic/wordpress-42", -1) + err := s.store.AddCharmWithArchive( + id, + storetesting.Charms.CharmArchive(c.MkDir(), "wordpress")) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("~charmers/wordpress/archive"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + // The complete entity id can be retrieved from the response header. + c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, id.URL.String()) +} + +func (s *ArchiveSuite) TestGetPromulgatedWithPartialId(c *gc.C) { + id := newResolvedURL("cs:~charmers/utopic/wordpress-42", 42) + err := s.store.AddCharmWithArchive( + id, + storetesting.Charms.CharmArchive(c.MkDir(), "wordpress")) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("wordpress/archive"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + // The complete entity id can be retrieved from the response header. + c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, id.PromulgatedURL().String()) +} + +func (s *ArchiveSuite) TestGetCounters(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + for i, id := range []*router.ResolvedURL{ + newResolvedURL("~who/utopic/mysql-42", 42), + } { + c.Logf("test %d: %s", i, id) + + // Add a charm to the database (including the archive). + err := s.store.AddCharmWithArchive(id, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + + // Download the charm archive using the API. + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(id.URL.Path() + "/archive"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + + // Check that the downloads count for the entity has been updated. + key := []string{params.StatsArchiveDownload, "utopic", "mysql", id.URL.User, "42"} + stats.CheckCounterSum(c, s.store, key, false, 1) + // Check that the promulgated download count for the entity has also been updated + key = []string{params.StatsArchiveDownloadPromulgated, "utopic", "mysql", "", "42"} + stats.CheckCounterSum(c, s.store, key, false, 1) + } +} + +func (s *ArchiveSuite) TestGetCountersDisabled(c *gc.C) { + url := newResolvedURL("~charmers/utopic/mysql-42", 42) + // Add a charm to the database (including the archive). + err := s.store.AddCharmWithArchive(url, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + + // Download the charm archive using the API, passing stats=0. + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url.URL.Path() + "/archive?stats=0"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + + // Check that the downloads count for the entity has not been updated. + key := []string{params.StatsArchiveDownload, "utopic", "mysql", "", "42"} + stats.CheckCounterSum(c, s.store, key, false, 0) +} + +var archivePostErrorsTests = []struct { + about string + path string + noContentLength bool + expectStatus int + expectMessage string + expectCode params.ErrorCode +}{{ + about: "revision specified", + path: "~charmers/precise/wordpress-23/archive", + expectStatus: http.StatusBadRequest, + expectMessage: "revision specified, but should not be specified", + expectCode: params.ErrBadRequest, +}, { + about: "no hash given", + path: "~charmers/precise/wordpress/archive", + expectStatus: http.StatusBadRequest, + expectMessage: "hash parameter not specified", + expectCode: params.ErrBadRequest, +}, { + about: "no content length", + path: "~charmers/precise/wordpress/archive?hash=1234563", + noContentLength: true, + expectStatus: http.StatusBadRequest, + expectMessage: "Content-Length not specified", + expectCode: params.ErrBadRequest, +}, { + about: "invalid channel", + path: "~charmers/bad-wolf/trusty/wordpress/archive", + expectStatus: http.StatusNotFound, + expectMessage: "not found", + expectCode: params.ErrNotFound, +}} + +func (s *ArchiveSuite) TestPostErrors(c *gc.C) { + type exoticReader struct { + io.Reader + } + for i, test := range archivePostErrorsTests { + c.Logf("test %d: %s", i, test.about) + var body io.Reader = strings.NewReader("bogus") + if test.noContentLength { + // net/http will automatically add a Content-Length header + // if it sees *strings.Reader, but not if it's a type it doesn't + // know about. + body = exoticReader{body} + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.path), + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/zip"}, + }, + Body: body, + Username: testUsername, + Password: testPassword, + ExpectStatus: test.expectStatus, + ExpectBody: params.Error{ + Message: test.expectMessage, + Code: test.expectCode, + }, + }) + } +} + +func (s *ArchiveSuite) TestConcurrentUploads(c *gc.C) { + wordpress := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + f, err := os.Open(wordpress.Path) + c.Assert(err, gc.IsNil) + + var buf bytes.Buffer + _, err = io.Copy(&buf, f) + c.Assert(err, gc.IsNil) + + hash, _ := hashOf(bytes.NewReader(buf.Bytes())) + + srv := httptest.NewServer(s.srv) + defer srv.Close() + + // Our strategy for testing concurrent uploads is as follows: We + // repeat uploading a bunch of simultaneous uploads to the same + // charm. Each upload should either succeed, or fail with an + // ErrDuplicateUpload error. We make sure that all replies are + // like this, and that at least one duplicate upload error is + // found, so that we know we've tested that error path. + + errorBodies := make(chan io.ReadCloser) + + // upload performs one upload of the testing charm. + // It sends the response body on the errorBodies channel when + // it finds an error response. + upload := func() { + c.Logf("uploading") + body := bytes.NewReader(buf.Bytes()) + url := srv.URL + storeURL("~charmers/precise/wordpress/archive?hash="+hash) + req, err := http.NewRequest("POST", url, body) + c.Assert(err, gc.IsNil) + req.Header.Set("Content-Type", "application/zip") + req.SetBasicAuth(testUsername, testPassword) + resp, err := http.DefaultClient.Do(req) + if !c.Check(err, gc.IsNil) { + return + } + if resp.StatusCode == http.StatusOK { + resp.Body.Close() + return + } + errorBodies <- resp.Body + } + + // The try loop continues concurrently uploading + // charms until it is told to stop (by closing the try + // channel). It then signals that it has terminated + // by closing errorBodies. + try := make(chan struct{}) + go func(try chan struct{}) { + for _ = range try { + var wg sync.WaitGroup + for p := 0; p < 5; p++ { + wg.Add(1) + go func() { + upload() + wg.Done() + }() + } + wg.Wait() + } + close(errorBodies) + }(try) + + // We continue the loop until we have found an + // error (or the maximum iteration count has + // been exceeded). + foundError := false + count := 0 +loop: + for { + select { + case body, ok := <-errorBodies: + if !ok { + // The try loop has terminated, + // so we need to stop too. + break loop + } + dec := json.NewDecoder(body) + var errResp params.Error + err := dec.Decode(&errResp) + body.Close() + c.Assert(err, gc.IsNil) + c.Assert(errResp, jc.DeepEquals, params.Error{ + Message: "duplicate upload", + Code: params.ErrDuplicateUpload, + }) + // We've found the error we're looking for, + // so we signal to the try loop that it can stop. + // We will process any outstanding error bodies, + // before seeing errorBodies closed and exiting + // the loop. + foundError = true + if try != nil { + close(try) + try = nil + } + case try <- struct{}{}: + // In cases we've seen, the actual maximum value of + // count is 1, but let's allow for serious scheduler vagaries. + if count++; count > 200 { + c.Fatalf("200 tries with no duplicate error") + } + } + } + if !foundError { + c.Errorf("no duplicate-upload errors found") + } +} + +func (s *ArchiveSuite) TestPostCharm(c *gc.C) { + // A charm that did not exist before should get revision 0. + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress") + + // Subsequent charm uploads should increment the revision by 1. + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-1", -1), "mysql") + + // Subsequent development charm uploads should increment the revision by 1. + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/development/precise/wordpress-2", -1), "wordpress") + + // Retrieving the published version returns the last non-development charm. + err := s.store.SetPerms(charm.MustParseURL("~charmers/wordpress"), "read", params.Everyone) + c.Assert(err, gc.IsNil) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("~charmers/wordpress/archive"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/precise/wordpress-1") +} + +func (s *ArchiveSuite) TestPostCurrentVersion(c *gc.C) { + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/development/precise/wordpress-0", -1), "wordpress") + + // Subsequent charm uploads should not increment the revision by 1. + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/development/precise/wordpress-0", -1), "wordpress") +} + +func (s *ArchiveSuite) TestPostDevelopmentPromulgated(c *gc.C) { + s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/development/trusty/wordpress-0", 0), "wordpress") + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/development/trusty/wordpress-1", 1), "mysql") + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/development/trusty/wordpress-1", 1), "mysql") + + // The promulgated charm can be accessed via its development URL. + err := s.store.SetPerms(charm.MustParseURL("~charmers/development/wordpress"), "read", params.Everyone) + c.Assert(err, gc.IsNil) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("development/wordpress/archive"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:development/trusty/wordpress-1") + + // The promulgated charm cannot be retrieved using the published URL. + err = s.store.SetPerms(charm.MustParseURL("~charmers/wordpress"), "read", params.Everyone) + c.Assert(err, gc.IsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("wordpress/archive"), + ExpectStatus: http.StatusNotFound, + ExpectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:wordpress"`, + }, + }) +} + +var uploadAndPublishTests = []struct { + about string + existing string + upload string + expectId string + expectDevelopment bool +}{{ + about: "upload same development entity", + existing: "~who/development/django-0", + upload: "~who/development/django", + expectId: "~who/development/django-0", + expectDevelopment: true, +}, { + about: "upload same published entity", + existing: "~who/django-0", + upload: "~who/django", + expectId: "~who/django-0", +}, { + about: "existing development, upload published", + existing: "~who/development/django-0", + upload: "~who/django", + expectId: "~who/django-0", +}, { + about: "existing published, upload development", + existing: "~who/django-0", + upload: "~who/development/django", + expectId: "~who/development/django-0", +}} + +func (s *ArchiveSuite) TestUploadAndPublish(c *gc.C) { + for i, test := range uploadAndPublishTests { + c.Logf("%d. %s", i, test.about) + + // Upload the pre-existing entity. + rurl := newResolvedURL(test.existing, -1) + s.assertUploadCharm(c, "POST", rurl, "multi-series") + + // Upload the same charm again, using the upload URL. + body, hash, size := archiveInfo(c, "multi-series") + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.upload + "/archive?hash=" + hash), + Method: "POST", + ContentLength: size, + Header: http.Header{"Content-Type": {"application/zip"}}, + Body: body, + Username: testUsername, + Password: testPassword, + ExpectBody: params.ArchiveUploadResponse{ + Id: charm.MustParseURL(test.expectId), + }, + }) + + // Check the development flag of the entity. + entity, err := s.store.FindEntity(rurl, "development") + c.Assert(err, gc.IsNil) + c.Assert(entity.Development, gc.Equals, test.expectDevelopment) + + // Remove all entities from the store. + _, err = s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + _, err = s.store.DB.BaseEntities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + } +} + +func (s *ArchiveSuite) TestPostMultiSeriesCharm(c *gc.C) { + // A charm that did not exist before should get revision 0. + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/juju-gui-0", -1), "multi-series") +} + +func (s *ArchiveSuite) TestPostMultiSeriesDevelopmentCharm(c *gc.C) { + // A charm that did not exist before should get revision 0. + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/development/juju-gui-0", -1), "multi-series") +} + +var charmPostErrorTests = []struct { + about string + url *charm.URL + charm string + expectStatus int + expectBody interface{} +}{{ + about: "no series", + url: charm.MustParseURL("~charmers/juju-gui-0"), + charm: "wordpress", + expectStatus: http.StatusForbidden, + expectBody: params.Error{ + Message: "series not specified in url or charm metadata", + Code: params.ErrEntityIdNotAllowed, + }, +}, { + about: "url series not in metadata", + url: charm.MustParseURL("~charmers/precise/juju-gui-0"), + charm: "multi-series", + expectStatus: http.StatusForbidden, + expectBody: params.Error{ + Message: `"precise" series not listed in charm metadata`, + Code: params.ErrEntityIdNotAllowed, + }, +}, { + about: "bad combination of series", + url: charm.MustParseURL("~charmers/juju-gui-0"), + charm: "multi-series-bad-combination", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Message: `cannot mix series from ubuntu and windows in single charm`, + Code: params.ErrInvalidEntity, + }, +}, { + about: "unknown series", + url: charm.MustParseURL("~charmers/juju-gui-0"), + charm: "multi-series-unknown", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Message: `unrecognised series "nosuchseries" in metadata`, + Code: params.ErrInvalidEntity, + }, +}} + +func (s *ArchiveSuite) TestCharmPostError(c *gc.C) { + for i, test := range charmPostErrorTests { + c.Logf("%d. %s", i, test.about) + s.assertUploadCharmError( + c, + "POST", + test.url, + nil, + test.charm, + test.expectStatus, + test.expectBody, + ) + } +} + +func (s *ArchiveSuite) TestPostMultiSeriesCharmRevisionAfterAllSingleSeriesOnes(c *gc.C) { + // Create some single series versions of the charm + s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/vivid/juju-gui-1", -1), "mysql") + s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/trusty/juju-gui-12", -1), "mysql") + s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/precise/juju-gui-44", -1), "mysql") + + // Check that the new multi-series revision takes the a revision + // number larger than the largest of all the single series + // revisions. + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/juju-gui-45", -1), "multi-series") +} + +func (s *ArchiveSuite) TestPostMultiSeriesPromulgatedRevisionAfterAllSingleSeriesOnes(c *gc.C) { + // Create some single series versions of the charm + s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/vivid/juju-gui-1", 0), "mysql") + s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/trusty/juju-gui-12", 9), "mysql") + s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/precise/juju-gui-44", 33), "mysql") + + // Check that the new multi-series promulgated revision takes the + // a revision number larger than the largest of all the single + // series revisions. + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/juju-gui-45", 34), "multi-series") +} + +func (s *ArchiveSuite) TestPostSingleSeriesCharmWhenMultiSeriesVersionExists(c *gc.C) { + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/juju-gui-0", -1), "multi-series") + + s.assertUploadCharmError( + c, + "POST", + charm.MustParseURL("~charmers/saucy/juju-gui-0"), + nil, + "wordpress", + http.StatusForbidden, + params.Error{ + Message: "charm name duplicates multi-series charm name cs:~charmers/juju-gui-0", + Code: params.ErrEntityIdNotAllowed, + }, + ) +} + +func (s *ArchiveSuite) TestPutCharm(c *gc.C) { + s.assertUploadCharm( + c, + "PUT", + newResolvedURL("~charmers/precise/wordpress-3", 3), + "wordpress", + ) + + s.assertUploadCharm( + c, + "PUT", + newResolvedURL("~charmers/precise/wordpress-1", -1), + "wordpress", + ) + + // Check that we get a duplicate-upload error if we try to + // upload to the same revision again. + s.assertUploadCharmError( + c, + "PUT", + charm.MustParseURL("~charmers/precise/wordpress-3"), + nil, + "mysql", + http.StatusInternalServerError, + params.Error{ + Message: "duplicate upload", + Code: params.ErrDuplicateUpload, + }, + ) + + // Check we get an error if promulgated url already uploaded. + s.assertUploadCharmError( + c, + "PUT", + charm.MustParseURL("~charmers/precise/wordpress-4"), + charm.MustParseURL("precise/wordpress-3"), + "wordpress", + http.StatusInternalServerError, + params.Error{ + Message: "duplicate upload", + Code: params.ErrDuplicateUpload, + }, + ) + + // Check we get an error if promulgated url has user. + s.assertUploadCharmError( + c, + "PUT", + charm.MustParseURL("~charmers/precise/wordpress-4"), + charm.MustParseURL("~charmers/precise/wordpress-4"), + "mysql", + http.StatusBadRequest, + params.Error{ + Message: "promulgated URL cannot have a user", + Code: params.ErrBadRequest, + }, + ) + + // Check we get an error if promulgated url has different name. + s.assertUploadCharmError( + c, + "PUT", + charm.MustParseURL("~charmers/precise/wordpress-4"), + charm.MustParseURL("precise/mysql-4"), + "mysql", + http.StatusBadRequest, + params.Error{ + Message: "promulgated URL has incorrect charm name", + Code: params.ErrBadRequest, + }, + ) +} + +func (s *ArchiveSuite) TestPostBundle(c *gc.C) { + // Upload the required charms. + err := s.store.AddCharmWithArchive( + newResolvedURL("cs:~charmers/utopic/mysql-42", 42), + storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) + c.Assert(err, gc.IsNil) + err = s.store.AddCharmWithArchive( + newResolvedURL("cs:~charmers/utopic/wordpress-47", 47), + storetesting.Charms.CharmArchive(c.MkDir(), "wordpress")) + c.Assert(err, gc.IsNil) + err = s.store.AddCharmWithArchive( + newResolvedURL("cs:~charmers/utopic/logging-1", 1), + storetesting.Charms.CharmArchive(c.MkDir(), "logging")) + c.Assert(err, gc.IsNil) + + // A bundle that did not exist before should get revision 0. + s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-0", -1), "wordpress-simple") + + // Subsequent bundle uploads should increment the + // revision by 1. + s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-1", -1), "wordpress-with-logging") + + // Uploading the same archive twice should not increment the revision... + s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-1", -1), "wordpress-with-logging") + + // ... but uploading an archive used by a previous revision should. + s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-2", -1), "wordpress-simple") +} + +func (s *ArchiveSuite) TestPostHashMismatch(c *gc.C) { + content := []byte("some content") + hash, _ := hashOf(bytes.NewReader(content)) + + // Corrupt the content. + copy(content, "bogus") + path := fmt.Sprintf("~charmers/precise/wordpress/archive?hash=%s", hash) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(path), + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/zip"}, + }, + Body: bytes.NewReader(content), + Username: testUsername, + Password: testPassword, + ExpectStatus: http.StatusInternalServerError, + ExpectBody: params.Error{ + Message: "cannot put archive blob: hash mismatch", + }, + }) +} + +func invalidZip() io.ReadSeeker { + return strings.NewReader("invalid zip content") +} + +func (s *ArchiveSuite) TestPostInvalidCharmZip(c *gc.C) { + s.assertCannotUpload(c, "~charmers/precise/wordpress", invalidZip(), http.StatusBadRequest, params.ErrInvalidEntity, "cannot read charm archive: zip: not a valid zip file") +} + +func (s *ArchiveSuite) TestPostInvalidBundleZip(c *gc.C) { + s.assertCannotUpload(c, "~charmers/bundle/wordpress", invalidZip(), http.StatusBadRequest, params.ErrInvalidEntity, "cannot read bundle archive: zip: not a valid zip file") +} + +var postInvalidCharmMetadataTests = []struct { + about string + spec charmtesting.CharmSpec + expectError string +}{{ + about: "bad provider relation name", + spec: charmtesting.CharmSpec{ + Meta: ` +name: foo +summary: bar +description: d +provides: + relation-name: + interface: baz +`, + }, + expectError: "relation relation-name has almost certainly not been changed from the template", +}, { + about: "bad provider interface name", + spec: charmtesting.CharmSpec{ + Meta: ` +name: foo +summary: bar +description: d +provides: + baz: + interface: interface-name +`, + }, + expectError: "interface interface-name in relation baz has almost certainly not been changed from the template", +}, { + about: "bad requirer relation name", + spec: charmtesting.CharmSpec{ + Meta: ` +name: foo +summary: bar +description: d +requires: + relation-name: + interface: baz +`, + }, + expectError: "relation relation-name has almost certainly not been changed from the template", +}, { + about: "bad requirer interface name", + spec: charmtesting.CharmSpec{ + Meta: ` +name: foo +summary: bar +description: d +requires: + baz: + interface: interface-name +`, + }, + expectError: "interface interface-name in relation baz has almost certainly not been changed from the template", +}, { + about: "bad peer relation name", + spec: charmtesting.CharmSpec{ + Meta: ` +name: foo +summary: bar +description: d +peers: + relation-name: + interface: baz +`, + }, + expectError: "relation relation-name has almost certainly not been changed from the template", +}, { + about: "bad peer interface name", + spec: charmtesting.CharmSpec{ + Meta: ` +name: foo +summary: bar +description: d +peers: + baz: + interface: interface-name +`, + }, + expectError: "interface interface-name in relation baz has almost certainly not been changed from the template", +}} + +func (s *ArchiveSuite) TestPostInvalidCharmMetadata(c *gc.C) { + for i, test := range postInvalidCharmMetadataTests { + c.Logf("test %d: %s", i, test.about) + ch := charmtesting.NewCharm(c, test.spec) + r := bytes.NewReader(ch.ArchiveBytes()) + s.assertCannotUpload(c, "~charmers/trusty/wordpress", r, http.StatusBadRequest, params.ErrInvalidEntity, test.expectError) + } +} + +func (s *ArchiveSuite) TestPostInvalidBundleData(c *gc.C) { + path := storetesting.Charms.BundleArchivePath(c.MkDir(), "bad") + f, err := os.Open(path) + c.Assert(err, gc.IsNil) + defer f.Close() + // Here we exercise both bundle internal verification (bad relation) and + // validation with respect to charms (wordpress and mysql are missing). + expectErr := `bundle verification failed: [` + + `"relation [\"foo:db\" \"mysql:server\"] refers to service \"foo\" not defined in this bundle",` + + `"service \"mysql\" refers to non-existent charm \"mysql\"",` + + `"service \"wordpress\" refers to non-existent charm \"wordpress\""]` + s.assertCannotUpload(c, "~charmers/bundle/wordpress", f, http.StatusBadRequest, params.ErrInvalidEntity, expectErr) +} + +func (s *ArchiveSuite) TestPostCounters(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress") + + // Check that the upload count for the entity has been updated. + key := []string{params.StatsArchiveUpload, "precise", "wordpress", "charmers"} + stats.CheckCounterSum(c, s.store, key, false, 1) +} + +func (s *ArchiveSuite) TestPostFailureCounters(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + hash, _ := hashOf(invalidZip()) + doPost := func(url string, expectCode int) { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url), + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/zip"}, + }, + Body: invalidZip(), + Username: testUsername, + Password: testPassword, + }) + c.Assert(rec.Code, gc.Equals, expectCode, gc.Commentf("body: %s", rec.Body.Bytes())) + } + + // Send a first invalid request (revision specified). + doPost("~charmers/utopic/wordpress-42/archive", http.StatusBadRequest) + // Send a second invalid request (no hash). + doPost("~charmers/utopic/wordpress/archive", http.StatusBadRequest) + // Send a third invalid request (invalid zip). + doPost("~charmers/utopic/wordpress/archive?hash="+hash, http.StatusBadRequest) + + // Check that the failed upload count for the entity has been updated. + key := []string{params.StatsArchiveFailedUpload, "utopic", "wordpress", "charmers"} + stats.CheckCounterSum(c, s.store, key, false, 3) +} + +func (s *ArchiveSuite) TestPostErrorReadsFully(c *gc.C) { + h := s.handler(c) + defer h.Close() + + b := bytes.NewBuffer([]byte("test body")) + r, err := http.NewRequest("POST", "/~charmers/trusty/wordpress/archive", b) + c.Assert(err, gc.IsNil) + r.Header.Set("Content-Type", "application/zip") + r.SetBasicAuth(testUsername, testPassword) + rec := httptest.NewRecorder() + h.ServeHTTP(rec, r) + c.Assert(rec.Code, gc.Equals, http.StatusBadRequest) + c.Assert(b.Len(), gc.Equals, 0) +} + +func (s *ArchiveSuite) TestPostAuthErrorReadsFully(c *gc.C) { + h := s.handler(c) + defer h.Close() + b := bytes.NewBuffer([]byte("test body")) + r, err := http.NewRequest("POST", "/~charmers/trusty/wordpress/archive", b) + c.Assert(err, gc.IsNil) + r.Header.Set("Content-Type", "application/zip") + rec := httptest.NewRecorder() + h.ServeHTTP(rec, r) + c.Assert(rec.Code, gc.Equals, http.StatusUnauthorized) + c.Assert(b.Len(), gc.Equals, 0) +} + +func (s *ArchiveSuite) TestUploadOfCurrentCharmReadsFully(c *gc.C) { + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress") + + ch := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + f, err := os.Open(ch.Path) + c.Assert(err, gc.IsNil) + defer f.Close() + + // Calculate blob hashes. + hash := blobstore.NewHash() + _, err = io.Copy(hash, f) + c.Assert(err, gc.IsNil) + hashSum := fmt.Sprintf("%x", hash.Sum(nil)) + + // Simulate upload of current version + h := s.handler(c) + defer h.Close() + b := bytes.NewBuffer([]byte("test body")) + r, err := http.NewRequest("POST", "/~charmers/precise/wordpress/archive?hash="+hashSum, b) + c.Assert(err, gc.IsNil) + r.Header.Set("Content-Type", "application/zip") + r.SetBasicAuth(testUsername, testPassword) + rec := httptest.NewRecorder() + h.ServeHTTP(rec, r) + httptesting.AssertJSONResponse( + c, + rec, + http.StatusOK, + params.ArchiveUploadResponse{ + Id: charm.MustParseURL("~charmers/precise/wordpress-0"), + }, + ) + c.Assert(b.Len(), gc.Equals, 0) +} + +func (s *ArchiveSuite) assertCannotUpload(c *gc.C, id string, content io.ReadSeeker, httpStatus int, errorCode params.ErrorCode, errorMessage string) { + hash, size := hashOf(content) + _, err := content.Seek(0, 0) + c.Assert(err, gc.IsNil) + + path := fmt.Sprintf("%s/archive?hash=%s", id, hash) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(path), + Method: "POST", + ContentLength: size, + Header: http.Header{ + "Content-Type": {"application/zip"}, + }, + Body: content, + Username: testUsername, + Password: testPassword, + ExpectStatus: httpStatus, + ExpectBody: params.Error{ + Message: errorMessage, + Code: errorCode, + }, + }) + + // TODO(rog) check that the uploaded blob has been deleted, + // by checking that no new blobs have been added to the blob store. +} + +// assertUploadCharm uploads the testing charm with the given name +// through the API. The URL must hold the expected revision +// that the charm will be given when uploaded. +func (s *ArchiveSuite) assertUploadCharm(c *gc.C, method string, url *router.ResolvedURL, charmName string) *charm.CharmArchive { + ch := storetesting.Charms.CharmArchive(c.MkDir(), charmName) + id, size := s.assertUpload(c, method, url, ch.Path) + if url.URL.Series == "" { + // V4 SPECIFIC: + // We're uploading a multi-series charm, but we always + // return charm ids with a series. + id.Series = ch.Meta().Series[0] + } + s.assertEntityInfo(c, entityInfo{ + Id: id, + Meta: entityMetaInfo{ + ArchiveSize: ¶ms.ArchiveSizeResponse{Size: size}, + CharmMeta: ch.Meta(), + CharmConfig: ch.Config(), + CharmActions: ch.Actions(), + }, + }) + return ch +} + +// assertUploadBundle uploads the testing bundle with the given name +// through the API. The URL must hold the expected revision +// that the bundle will be given when uploaded. +func (s *ArchiveSuite) assertUploadBundle(c *gc.C, method string, url *router.ResolvedURL, bundleName string) { + path := storetesting.Charms.BundleArchivePath(c.MkDir(), bundleName) + b, err := charm.ReadBundleArchive(path) + c.Assert(err, gc.IsNil) + id, size := s.assertUpload(c, method, url, path) + s.assertEntityInfo(c, entityInfo{ + Id: id, + Meta: entityMetaInfo{ + ArchiveSize: ¶ms.ArchiveSizeResponse{Size: size}, + BundleMeta: b.Data(), + }, + }, + ) +} + +func (s *ArchiveSuite) assertUpload(c *gc.C, method string, url *router.ResolvedURL, fileName string) (id *charm.URL, size int64) { + f, err := os.Open(fileName) + c.Assert(err, gc.IsNil) + defer f.Close() + + // Calculate blob hashes. + hash := blobstore.NewHash() + hash256 := sha256.New() + size, err = io.Copy(io.MultiWriter(hash, hash256), f) + c.Assert(err, gc.IsNil) + hashSum := fmt.Sprintf("%x", hash.Sum(nil)) + hash256Sum := fmt.Sprintf("%x", hash256.Sum(nil)) + _, err = f.Seek(0, 0) + c.Assert(err, gc.IsNil) + + uploadURL := url.UserOwnedURL() + if method == "POST" { + uploadURL.Revision = -1 + } + + path := fmt.Sprintf("%s/archive?hash=%s", uploadURL.Path(), hashSum) + expectId := uploadURL.WithRevision(url.URL.Revision) + expectedPromulgatedId := url.PromulgatedURL() + if expectedPromulgatedId != nil { + path += fmt.Sprintf("&promulgated=%s", expectedPromulgatedId.String()) + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(path), + Method: method, + ContentLength: size, + Header: http.Header{ + "Content-Type": {"application/zip"}, + }, + Body: f, + Username: testUsername, + Password: testPassword, + ExpectBody: params.ArchiveUploadResponse{ + Id: expectId, + PromulgatedId: expectedPromulgatedId, + }, + }) + + var entity mongodoc.Entity + err = s.store.DB.Entities().FindId(expectId.WithChannel("")).One(&entity) + c.Assert(err, gc.IsNil) + c.Assert(entity.BlobHash, gc.Equals, hashSum) + c.Assert(entity.BlobHash256, gc.Equals, hash256Sum) + c.Assert(entity.PromulgatedURL, gc.DeepEquals, expectedPromulgatedId) + c.Assert(entity.Development, gc.Equals, url.Development) + // Test that the expected entry has been created + // in the blob store. + r, _, err := s.store.BlobStore.Open(entity.BlobName) + c.Assert(err, gc.IsNil) + r.Close() + + return expectId, size +} + +// assertUploadCharmError attempts to upload the testing charm with the +// given name through the API, checking that the attempt fails with the +// specified error. The URL must hold the expected revision that the +// charm will be given when uploaded. +func (s *ArchiveSuite) assertUploadCharmError(c *gc.C, method string, url, purl *charm.URL, charmName string, expectStatus int, expectBody interface{}) { + ch := storetesting.Charms.CharmArchive(c.MkDir(), charmName) + s.assertUploadError(c, method, url, purl, ch.Path, expectStatus, expectBody) +} + +// assertUploadError asserts that we get an error when uploading +// the contents of the given file to the given url and promulgated URL. +// The reason this method does not take a *router.ResolvedURL +// is so that we can test what happens when an inconsistent promulgated URL +// is passed in. +func (s *ArchiveSuite) assertUploadError(c *gc.C, method string, url, purl *charm.URL, fileName string, expectStatus int, expectBody interface{}) { + f, err := os.Open(fileName) + c.Assert(err, gc.IsNil) + defer f.Close() + + // Calculate blob hashes. + hash := blobstore.NewHash() + size, err := io.Copy(hash, f) + c.Assert(err, gc.IsNil) + hashSum := fmt.Sprintf("%x", hash.Sum(nil)) + _, err = f.Seek(0, 0) + c.Assert(err, gc.IsNil) + + uploadURL := *url + if method == "POST" { + uploadURL.Revision = -1 + } + + path := fmt.Sprintf("%s/archive?hash=%s", uploadURL.Path(), hashSum) + if purl != nil { + path += fmt.Sprintf("&promulgated=%s", purl.String()) + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(path), + Method: method, + ContentLength: size, + Header: http.Header{ + "Content-Type": {"application/zip"}, + }, + Body: f, + Username: testUsername, + Password: testPassword, + ExpectStatus: expectStatus, + ExpectBody: expectBody, + }) +} + +var archiveFileErrorsTests = []struct { + about string + path string + expectStatus int + expectMessage string + expectCode params.ErrorCode +}{{ + about: "entity not found", + path: "~charmers/trusty/no-such-42/archive/icon.svg", + expectStatus: http.StatusNotFound, + expectMessage: `no matching charm or bundle for "cs:~charmers/trusty/no-such-42"`, + expectCode: params.ErrNotFound, +}, { + about: "directory listing", + path: "~charmers/utopic/wordpress-0/archive/hooks", + expectStatus: http.StatusForbidden, + expectMessage: "directory listing not allowed", + expectCode: params.ErrForbidden, +}, { + about: "file not found", + path: "~charmers/utopic/wordpress-0/archive/no-such", + expectStatus: http.StatusNotFound, + expectMessage: `file "no-such" not found in the archive`, + expectCode: params.ErrNotFound, +}} + +func (s *ArchiveSuite) TestArchiveFileErrors(c *gc.C) { + wordpress := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + url := newResolvedURL("cs:~charmers/utopic/wordpress-0", 0) + err := s.store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + for i, test := range archiveFileErrorsTests { + c.Logf("test %d: %s", i, test.about) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.path), + Method: "GET", + ExpectStatus: test.expectStatus, + ExpectBody: params.Error{ + Message: test.expectMessage, + Code: test.expectCode, + }, + }) + } +} + +func (s *ArchiveSuite) TestArchiveFileGet(c *gc.C) { + ch := storetesting.Charms.CharmArchive(c.MkDir(), "all-hooks") + id := newResolvedURL("cs:~charmers/utopic/all-hooks-0", 0) + err := s.store.AddCharmWithArchive(id, ch) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + zipFile, err := zip.OpenReader(ch.Path) + c.Assert(err, gc.IsNil) + defer zipFile.Close() + + // Check a file in the root directory. + s.assertArchiveFileContents(c, zipFile, "~charmers/utopic/all-hooks-0/archive/metadata.yaml") + // Check a file in a subdirectory. + s.assertArchiveFileContents(c, zipFile, "~charmers/utopic/all-hooks-0/archive/hooks/install") +} + +// assertArchiveFileContents checks that the response returned by the +// serveArchiveFile endpoint is correct for the given archive and URL path. +func (s *ArchiveSuite) assertArchiveFileContents(c *gc.C, zipFile *zip.ReadCloser, path string) { + // For example: trusty/django/archive/hooks/install -> hooks/install. + filePath := strings.SplitN(path, "/archive/", 2)[1] + + // Retrieve the expected bytes. + var expectBytes []byte + for _, file := range zipFile.File { + if file.Name == filePath { + r, err := file.Open() + c.Assert(err, gc.IsNil) + defer r.Close() + expectBytes, err = ioutil.ReadAll(r) + c.Assert(err, gc.IsNil) + break + } + } + c.Assert(expectBytes, gc.Not(gc.HasLen), 0) + + // Make the request. + url := storeURL(path) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: url, + }) + + // Ensure the response is what we expect. + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.Bytes(), gc.DeepEquals, expectBytes) + headers := rec.Header() + c.Assert(headers.Get("Content-Length"), gc.Equals, strconv.Itoa(len(expectBytes))) + // We only have text files in the charm repository used for tests. + c.Assert(headers.Get("Content-Type"), gc.Equals, "text/plain; charset=utf-8") + assertCacheControl(c, rec.Header(), true) +} + +func (s *ArchiveSuite) TestDelete(c *gc.C) { + // Add a charm to the database (including the archive). + id := "~charmers/utopic/mysql-42" + url := newResolvedURL(id, -1) + err := s.store.AddCharmWithArchive(url, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) + c.Assert(err, gc.IsNil) + + // Retrieve the corresponding entity. + var entity mongodoc.Entity + err = s.store.DB.Entities().FindId(&url.URL).Select(bson.D{{"blobname", 1}}).One(&entity) + c.Assert(err, gc.IsNil) + + // Delete the charm using the API. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(id + "/archive"), + Method: "DELETE", + Username: testUsername, + Password: testPassword, + ExpectStatus: http.StatusOK, + }) + + // The entity has been deleted. + count, err := s.store.DB.Entities().FindId(url).Count() + c.Assert(err, gc.IsNil) + c.Assert(count, gc.Equals, 0) + + // The blob has been deleted. + _, _, err = s.store.BlobStore.Open(entity.BlobName) + c.Assert(err, gc.ErrorMatches, "resource.*not found") +} + +func (s *ArchiveSuite) TestDeleteSpecificCharm(c *gc.C) { + // Add a couple of charms to the database. + for _, id := range []string{"~charmers/trusty/mysql-42", "~charmers/utopic/mysql-42", "~charmers/utopic/mysql-47"} { + err := s.store.AddCharmWithArchive( + newResolvedURL(id, -1), + storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) + c.Assert(err, gc.IsNil) + } + + // Delete the second charm using the API. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("~charmers/utopic/mysql-42/archive"), + Method: "DELETE", + Username: testUsername, + Password: testPassword, + ExpectStatus: http.StatusOK, + }) + + // The other two charms are still present in the database. + urls := []*charm.URL{ + charm.MustParseURL("~charmers/trusty/mysql-42"), + charm.MustParseURL("~charmers/utopic/mysql-47"), + } + count, err := s.store.DB.Entities().Find(bson.D{{ + "_id", bson.D{{"$in", urls}}, + }}).Count() + c.Assert(err, gc.IsNil) + c.Assert(count, gc.Equals, 2) +} + +func (s *ArchiveSuite) TestDeleteNotFound(c *gc.C) { + // Try to delete a non existing charm using the API. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("~charmers/utopic/no-such-0/archive"), + Method: "DELETE", + Username: testUsername, + Password: testPassword, + ExpectStatus: http.StatusNotFound, + ExpectBody: params.Error{ + Message: `no matching charm or bundle for "cs:~charmers/utopic/no-such-0"`, + Code: params.ErrNotFound, + }, + }) +} + +func (s *ArchiveSuite) TestDeleteError(c *gc.C) { + // Add a charm to the database (not including the archive). + id := "~charmers/utopic/mysql-42" + url := newResolvedURL(id, -1) + err := s.store.AddCharm(storetesting.Charms.CharmArchive(c.MkDir(), "mysql"), + charmstore.AddParams{ + URL: url, + BlobName: "no-such-name", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + + // Try to delete the charm using the API. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(id + "/archive"), + Method: "DELETE", + Username: testUsername, + Password: testPassword, + ExpectStatus: http.StatusInternalServerError, + ExpectBody: params.Error{ + Message: `cannot remove blob no-such-name: resource at path "global/no-such-name" not found`, + }, + }) +} + +func (s *ArchiveSuite) TestDeleteCounters(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + // Add a charm to the database (including the archive). + id := "~charmers/utopic/mysql-42" + err := s.store.AddCharmWithArchive( + newResolvedURL(id, -1), + storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) + c.Assert(err, gc.IsNil) + + // Delete the charm using the API. + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + Method: "DELETE", + URL: storeURL(id + "/archive"), + Username: testUsername, + Password: testPassword, + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + + // Check that the delete count for the entity has been updated. + key := []string{params.StatsArchiveDelete, "utopic", "mysql", "charmers", "42"} + stats.CheckCounterSum(c, s.store, key, false, 1) +} + +func (s *ArchiveSuite) TestPostAuthErrors(c *gc.C) { + checkAuthErrors(c, s.srv, "POST", "~charmers/utopic/django/archive") +} + +func (s *ArchiveSuite) TestDeleteAuthErrors(c *gc.C) { + err := s.store.AddCharmWithArchive( + newResolvedURL("~charmers/utopic/django-42", 42), + storetesting.Charms.CharmArchive(c.MkDir(), "wordpress"), + ) + c.Assert(err, gc.IsNil) + checkAuthErrors(c, s.srv, "DELETE", "utopic/django-42/archive") +} + +var archiveAuthErrorsTests = []struct { + about string + header http.Header + username string + password string + expectMessage string +}{{ + about: "no credentials", + expectMessage: "authentication failed: missing HTTP auth header", +}, { + about: "invalid encoding", + header: http.Header{ + "Authorization": {"Basic not-a-valid-base64"}, + }, + expectMessage: "authentication failed: invalid HTTP auth encoding", +}, { + about: "invalid header", + header: http.Header{ + "Authorization": {"Basic " + base64.StdEncoding.EncodeToString([]byte("invalid"))}, + }, + expectMessage: "authentication failed: invalid HTTP auth contents", +}, { + about: "invalid credentials", + username: "no-such", + password: "exterminate!", + expectMessage: "invalid user name or password", +}} + +func checkAuthErrors(c *gc.C, handler http.Handler, method, url string) { + archiveURL := storeURL(url) + for i, test := range archiveAuthErrorsTests { + c.Logf("test %d: %s", i, test.about) + if test.header == nil { + test.header = http.Header{} + } + if method == "POST" { + test.header.Add("Content-Type", "application/zip") + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: handler, + URL: archiveURL, + Method: method, + Header: test.header, + Username: test.username, + Password: test.password, + ExpectStatus: http.StatusUnauthorized, + ExpectBody: params.Error{ + Message: test.expectMessage, + Code: params.ErrUnauthorized, + }, + }) + } +} + +// entityInfo holds all the information we want to find +// out about a charm or bundle uploaded to the store. +type entityInfo struct { + Id *charm.URL + Meta entityMetaInfo +} + +type entityMetaInfo struct { + ArchiveSize *params.ArchiveSizeResponse `json:"archive-size,omitempty"` + CharmMeta *charm.Meta `json:"charm-metadata,omitempty"` + CharmConfig *charm.Config `json:"charm-config,omitempty"` + CharmActions *charm.Actions `json:"charm-actions,omitempty"` + BundleMeta *charm.BundleData `json:"bundle-metadata,omitempty"` +} + +func (s *ArchiveSuite) assertEntityInfo(c *gc.C, expect entityInfo) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL( + expect.Id.Path() + "/meta/any" + + "?include=archive-size" + + "&include=charm-metadata" + + "&include=charm-config" + + "&include=charm-actions" + + "&include=bundle-metadata", + ), + Username: testUsername, + Password: testPassword, + ExpectBody: expect, + }) +} + +func (s *ArchiveSuite) TestArchiveFileGetHasCORSHeaders(c *gc.C) { + id := "~charmers/precise/wordpress-0" + s.assertUploadCharm(c, "POST", newResolvedURL(id, -1), "wordpress") + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(fmt.Sprintf("%s/archive/metadata.yaml", id)), + }) + headers := rec.Header() + c.Assert(len(headers["Access-Control-Allow-Origin"]), gc.Equals, 1) + c.Assert(len(headers["Access-Control-Allow-Headers"]), gc.Equals, 1) + c.Assert(headers["Access-Control-Allow-Origin"][0], gc.Equals, "*") + c.Assert(headers["Access-Control-Cache-Max-Age"][0], gc.Equals, "600") + c.Assert(headers["Access-Control-Allow-Headers"][0], gc.Equals, "Bakery-Protocol-Version, Macaroons, X-Requested-With") +} + +func hashOfBytes(data []byte) string { + hash := blobstore.NewHash() + hash.Write(data) + return fmt.Sprintf("%x", hash.Sum(nil)) +} + +func hashOf(r io.Reader) (hashSum string, size int64) { + hash := blobstore.NewHash() + n, err := io.Copy(hash, r) + if err != nil { + panic(err) + } + return fmt.Sprintf("%x", hash.Sum(nil)), n +} + +// assertCacheControl asserts that the cache control headers are +// appropriately set. The isPublic parameter specifies +// whether the id in the request represents a public charm or bundle. +func assertCacheControl(c *gc.C, h http.Header, isPublic bool) { + if isPublic { + seconds := v4.ArchiveCachePublicMaxAge / time.Second + c.Assert(h.Get("Cache-Control"), gc.Equals, fmt.Sprintf("public, max-age=%d", seconds)) + } else { + c.Assert(h.Get("Cache-Control"), gc.Equals, "no-cache, must-revalidate") + } +} + +type ArchiveSearchSuite struct { + commonSuite +} + +var _ = gc.Suite(&ArchiveSearchSuite{}) + +func (s *ArchiveSearchSuite) SetUpSuite(c *gc.C) { + s.enableES = true + s.commonSuite.SetUpSuite(c) +} + +func (s *ArchiveSearchSuite) SetUpTest(c *gc.C) { + s.commonSuite.SetUpTest(c) + // TODO (frankban): remove this call when removing the legacy counts logic. + patchLegacyDownloadCountsEnabled(s.AddCleanup, false) +} + +func (s *ArchiveSearchSuite) TestGetSearchUpdate(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + for i, id := range []string{"~charmers/wily/mysql-42", "~who/wily/mysql-42"} { + c.Logf("test %d: %s", i, id) + url := newResolvedURL(id, -1) + + // Add a charm to the database (including the archive). + err := s.store.AddCharmWithArchive(url, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + + // Download the charm archive using the API. + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(id + "/archive"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + + // Check that the search record for the entity has been updated. + stats.CheckSearchTotalDownloads(c, s.store, &url.URL, 1) + } +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/auth_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/auth_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/auth_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,989 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "sort" + "strings" + "sync" + "time" + + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon.v1" + + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/v4" +) + +func (s *commonSuite) AssertEndpointAuth(c *gc.C, p httptesting.JSONCallParams) { + s.testNonMacaroonAuth(c, p) + s.testMacaroonAuth(c, p) +} + +func (s *commonSuite) testNonMacaroonAuth(c *gc.C, p httptesting.JSONCallParams) { + p.Handler = s.noMacaroonSrv + // Check that the request succeeds when provided with the + // correct credentials. + p.Username = "test-user" + p.Password = "test-password" + httptesting.AssertJSONCall(c, p) + + // Check that auth fails with no creds provided. + p.Username = "" + p.Password = "" + p.ExpectStatus = http.StatusUnauthorized + p.ExpectBody = params.Error{ + Message: "authentication failed: missing HTTP auth header", + Code: params.ErrUnauthorized, + } + httptesting.AssertJSONCall(c, p) + + // Check that auth fails with the wrong username provided. + p.Username = "wrong" + p.Password = "test-password" + p.ExpectStatus = http.StatusUnauthorized + p.ExpectBody = params.Error{ + Message: "invalid user name or password", + Code: params.ErrUnauthorized, + } + httptesting.AssertJSONCall(c, p) + + // Check that auth fails with the wrong password provided. + p.Username = "test-user" + p.Password = "test-password-wrong" + p.ExpectStatus = http.StatusUnauthorized + p.ExpectBody = params.Error{ + Message: "invalid user name or password", + Code: params.ErrUnauthorized, + } + httptesting.AssertJSONCall(c, p) +} + +func (s *commonSuite) testMacaroonAuth(c *gc.C, p httptesting.JSONCallParams) { + // Make a test third party caveat discharger. + var checkedCaveats []string + var mu sync.Mutex + var dischargeError error + s.discharge = func(cond string, arg string) ([]checkers.Caveat, error) { + mu.Lock() + defer mu.Unlock() + checkedCaveats = append(checkedCaveats, cond+" "+arg) + if dischargeError != nil { + return nil, dischargeError + } + return []checkers.Caveat{ + checkers.DeclaredCaveat("username", "bob"), + }, nil + } + p.Handler = s.srv + + client := httpbakery.NewHTTPClient() + cookieJar := &cookieJar{CookieJar: client.Jar} + client.Jar = cookieJar + p.Do = bakeryDo(client) + + // Check that the call succeeds with simple auth. + c.Log("simple auth sucess") + p.Username = "test-user" + p.Password = "test-password" + httptesting.AssertJSONCall(c, p) + c.Assert(checkedCaveats, gc.HasLen, 0) + c.Assert(cookieJar.cookieURLs, gc.HasLen, 0) + + // Check that the call gives us the correct + // "authentication denied response" without simple auth + // and uses the third party checker + // and that a cookie is stored at the correct location. + // TODO when we allow admin access via macaroon creds, + // change this test to expect success. + c.Log("macaroon unauthorized error") + p.Username, p.Password = "", "" + p.ExpectStatus = http.StatusUnauthorized + p.ExpectBody = params.Error{ + Message: `unauthorized: access denied for user "bob"`, + Code: params.ErrUnauthorized, + } + httptesting.AssertJSONCall(c, p) + sort.Strings(checkedCaveats) + c.Assert(checkedCaveats, jc.DeepEquals, []string{ + "is-authenticated-user ", + }) + checkedCaveats = nil + c.Assert(cookieJar.cookieURLs, gc.DeepEquals, []string{"http://somehost/"}) + + // Check that the call fails with incorrect simple auth info. + c.Log("simple auth error") + p.Password = "bad-password" + p.ExpectStatus = http.StatusUnauthorized + p.ExpectBody = params.Error{ + Message: "authentication failed: missing HTTP auth header", + Code: params.ErrUnauthorized, + } + + // Check that it fails when the discharger refuses the discharge. + c.Log("macaroon discharge error") + client = httpbakery.NewHTTPClient() + dischargeError = fmt.Errorf("go away") + p.Do = bakeryDo(client) // clear cookies + p.Password = "" + p.Username = "" + p.ExpectError = `cannot get discharge from "https://[^"]*": third party refused discharge: cannot discharge: go away` + httptesting.AssertJSONCall(c, p) +} + +type cookieJar struct { + cookieURLs []string + http.CookieJar +} + +func (j *cookieJar) SetCookies(url *url.URL, cookies []*http.Cookie) { + url1 := *url + url1.Host = "somehost" + for _, cookie := range cookies { + if cookie.Path != "" { + url1.Path = cookie.Path + } + } + j.cookieURLs = append(j.cookieURLs, url1.String()) + j.CookieJar.SetCookies(url, cookies) +} + +func noInteraction(*url.URL) error { + return fmt.Errorf("unexpected interaction required") +} + +// dischargedAuthCookie retrieves and discharges an authentication macaroon cookie. It adds the provided +// first-party caveats before discharging the macaroon. +func dischargedAuthCookie(c *gc.C, srv http.Handler, caveats ...string) *http.Cookie { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: srv, + URL: storeURL("macaroon"), + Method: "GET", + }) + var m macaroon.Macaroon + err := json.Unmarshal(rec.Body.Bytes(), &m) + c.Assert(err, gc.IsNil) + for _, cav := range caveats { + err := m.AddFirstPartyCaveat(cav) + c.Assert(err, gc.IsNil) + } + client := httpbakery.NewClient() + ms, err := client.DischargeAll(&m) + c.Assert(err, gc.IsNil) + macaroonCookie, err := httpbakery.NewCookie(ms) + c.Assert(err, gc.IsNil) + return macaroonCookie +} + +type authSuite struct { + commonSuite +} + +var _ = gc.Suite(&authSuite{}) + +func (s *authSuite) SetUpSuite(c *gc.C) { + s.enableIdentity = true + s.commonSuite.SetUpSuite(c) +} + +var readAuthorizationTests = []struct { + // about holds the test description. + about string + // username holds the authenticated user name returned by the discharger. + // If empty, an anonymous user is returned. + username string + // groups holds group names the user is member of, as returned by the + // discharger. + groups []string + // readPerm stores a list of users with read permissions. + readPerm []string + // expectStatus is the expected HTTP response status. + // Defaults to 200 status OK. + expectStatus int + // expectBody holds the expected body of the HTTP response. If nil, + // the body is not checked and the response is assumed to be ok. + expectBody interface{} +}{{ + about: "anonymous users are authorized", + readPerm: []string{params.Everyone}, +}, { + about: "everyone is authorized", + username: "dalek", + readPerm: []string{params.Everyone}, +}, { + about: "everyone and a specific user", + username: "dalek", + readPerm: []string{params.Everyone, "janeway"}, +}, { + about: "specific user authorized", + username: "who", + readPerm: []string{"who"}, +}, { + about: "multiple specific users authorized", + username: "picard", + readPerm: []string{"kirk", "picard", "sisko"}, +}, { + about: "nobody authorized", + username: "picard", + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "picard"`, + }, +}, { + about: "access denied for user", + username: "kirk", + readPerm: []string{"picard", "sisko"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "kirk"`, + }, +}, { + about: "everyone is authorized (user is member of groups)", + username: "dalek", + groups: []string{"group1", "group2"}, + readPerm: []string{params.Everyone}, +}, { + about: "everyone and a specific group", + username: "dalek", + groups: []string{"group2", "group3"}, + readPerm: []string{params.Everyone, "group1"}, +}, { + about: "specific group authorized", + username: "who", + groups: []string{"group1", "group42", "group2"}, + readPerm: []string{"group42"}, +}, { + about: "multiple specific groups authorized", + username: "picard", + groups: []string{"group2"}, + readPerm: []string{"kirk", "group0", "group2"}, +}, { + about: "no group authorized", + username: "picard", + groups: []string{"group1", "group2"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "picard"`, + }, +}, { + about: "access denied for group", + username: "kirk", + groups: []string{"group1", "group2", "group3"}, + readPerm: []string{"picard", "sisko", "group42", "group47"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "kirk"`, + }, +}} + +func dischargeForUser(username string) func(_, _ string) ([]checkers.Caveat, error) { + return func(_, _ string) ([]checkers.Caveat, error) { + return []checkers.Caveat{ + checkers.DeclaredCaveat(v4.UsernameAttr, username), + }, nil + } +} + +func (s *authSuite) TestReadAuthorization(c *gc.C) { + for i, test := range readAuthorizationTests { + c.Logf("test %d: %s", i, test.about) + + s.discharge = dischargeForUser(test.username) + s.idM.groups = map[string][]string{ + test.username: test.groups, + } + + // Add a charm to the store, used for testing. + rurl := newResolvedURL("~charmers/utopic/wordpress-42", -1) + err := s.store.AddCharmWithArchive(rurl, storetesting.Charms.CharmDir("wordpress")) + c.Assert(err, gc.IsNil) + + // Change the ACLs for the testing charm + // (both published and development versions). + err = s.store.SetPerms(&rurl.URL, "read", test.readPerm...) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(rurl.URL.WithChannel(charm.DevelopmentChannel), "read", test.readPerm...) + c.Assert(err, gc.IsNil) + + // Define an helper function used to send requests and check responses. + makeRequest := func(path string, expectStatus int, expectBody interface{}) { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + Do: bakeryDo(nil), + URL: storeURL(path), + }) + if expectStatus == 0 { + expectStatus = http.StatusOK + } + c.Assert(rec.Code, gc.Equals, expectStatus, gc.Commentf("body: %s", rec.Body)) + if expectBody != nil { + c.Assert(rec.Body.String(), jc.JSONEquals, expectBody) + } + } + + // Perform meta and id requests. + makeRequest("~charmers/wordpress/meta/archive-size", test.expectStatus, test.expectBody) + makeRequest("~charmers/wordpress/expand-id", test.expectStatus, test.expectBody) + + // Perform meta and id requests to the development channel. + makeRequest("~charmers/development/wordpress/meta/archive-size", test.expectStatus, test.expectBody) + makeRequest("~charmers/development/wordpress/expand-id", test.expectStatus, test.expectBody) + + // Remove permissions for the published charm. + err = s.store.SetPerms(&rurl.URL, "read") + c.Assert(err, gc.IsNil) + + // Check that now accessing the published charm is not allowed, + // but accessing the development charm still works as expected. + makeRequest("~charmers/wordpress/meta/archive-size", http.StatusUnauthorized, nil) + makeRequest("~charmers/development/wordpress/meta/archive-size", test.expectStatus, test.expectBody) + + // Remove permissions for the development charm as well. + err = s.store.SetPerms(rurl.URL.WithChannel(charm.DevelopmentChannel), "read") + c.Assert(err, gc.IsNil) + + // Check that now accessing the development charm is also denied. + makeRequest("~charmers/development/wordpress/meta/archive-size", http.StatusUnauthorized, nil) + + // Remove all entities from the store. + _, err = s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + } +} + +var writeAuthorizationTests = []struct { + // about holds the test description. + about string + // username holds the authenticated user name returned by the discharger. + // If empty, an anonymous user is returned. + username string + // groups holds group names the user is member of, as returned by the + // discharger. + groups []string + // writePerm stores a list of users with write permissions. + writePerm []string + // expectStatus is the expected HTTP response status. + // Defaults to 200 status OK. + expectStatus int + // expectBody holds the expected body of the HTTP response. If nil, + // the body is not checked and the response is assumed to be ok. + expectBody interface{} +}{{ + about: "anonymous users are not authorized", + writePerm: []string{"who"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: "unauthorized: no username declared", + }, +}, { + about: "specific user authorized to write", + username: "dalek", + writePerm: []string{"dalek"}, +}, { + about: "multiple users authorized", + username: "sisko", + writePerm: []string{"kirk", "picard", "sisko"}, +}, { + about: "no users authorized", + username: "who", + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "who"`, + }, +}, { + about: "specific user unauthorized", + username: "kirk", + writePerm: []string{"picard", "sisko", "janeway"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "kirk"`, + }, +}, { + about: "access granted for group", + username: "picard", + groups: []string{"group1", "group2"}, + writePerm: []string{"group2"}, +}, { + about: "multiple groups authorized", + username: "picard", + groups: []string{"group1", "group2"}, + writePerm: []string{"kirk", "group0", "group1", "group2"}, +}, { + about: "no group authorized", + username: "picard", + groups: []string{"group1", "group2"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "picard"`, + }, +}, { + about: "access denied for group", + username: "kirk", + groups: []string{"group1", "group2", "group3"}, + writePerm: []string{"picard", "sisko", "group42", "group47"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "kirk"`, + }, +}} + +func (s *authSuite) TestWriteAuthorization(c *gc.C) { + for i, test := range writeAuthorizationTests { + c.Logf("test %d: %s", i, test.about) + + s.discharge = dischargeForUser(test.username) + s.idM.groups = map[string][]string{ + test.username: test.groups, + } + + // Add a charm to the store, used for testing. + rurl := newResolvedURL("~charmers/utopic/wordpress-42", -1) + err := s.store.AddCharmWithArchive(rurl, storetesting.Charms.CharmDir("wordpress")) + c.Assert(err, gc.IsNil) + + // Change the ACLs for the testing charm. + // (both published and development versions). + err = s.store.SetPerms(&rurl.URL, "write", test.writePerm...) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(rurl.URL.WithChannel(charm.DevelopmentChannel), "write", test.writePerm...) + c.Assert(err, gc.IsNil) + + makeRequest := func(path string, expectStatus int, expectBody interface{}) { + client := httpbakery.NewHTTPClient() + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + Do: bakeryDo(client), + URL: storeURL(path), + Method: "PUT", + Header: http.Header{"Content-Type": {"application/json"}}, + Body: strings.NewReader("42"), + }) + if expectStatus == 0 { + expectStatus = http.StatusOK + } + c.Assert(rec.Code, gc.Equals, expectStatus, gc.Commentf("body: %s", rec.Body)) + if expectBody != nil { + c.Assert(rec.Body.String(), jc.JSONEquals, expectBody) + } + } + + // Perform a meta PUT request to the published and development URLs. + makeRequest("~charmers/wordpress/meta/extra-info/key", test.expectStatus, test.expectBody) + makeRequest("~charmers/development/wordpress/meta/extra-info/key", test.expectStatus, test.expectBody) + + // Remove permissions to write on the published entity. + err = s.store.SetPerms(&rurl.URL, "write") + c.Assert(err, gc.IsNil) + + // Check that now writing to the published charm is not allowed, + // but accessing the development charm still works as expected. + makeRequest("~charmers/wordpress/meta/extra-info/key", http.StatusUnauthorized, nil) + makeRequest("~charmers/development/wordpress/meta/extra-info/key", test.expectStatus, test.expectBody) + + // Remove write permissions for the development charm as well. + err = s.store.SetPerms(rurl.URL.WithChannel(charm.DevelopmentChannel), "write") + c.Assert(err, gc.IsNil) + + // Check that now modifying the development charm is also denied. + makeRequest("~charmers/development/wordpress/meta/extra-info/key", http.StatusUnauthorized, nil) + + // Remove all entities from the store. + _, err = s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + } +} + +var uploadEntityAuthorizationTests = []struct { + // about holds the test description. + about string + // username holds the authenticated user name returned by the discharger. + // If empty, an anonymous user is returned. + username string + // groups holds group names the user is member of, as returned by the + // discharger. + groups []string + // id holds the id of the entity to be uploaded. + id string + // promulgated holds whether the corresponding promulgated entity must be + // already present in the charm store before performing the upload. + promulgated bool + // developmentWriteAcls can be used to set customized write ACLs for the + // development entity before performing the upload. If empty, default ACLs + // are used. + developmentWriteAcls []string + // writeAcls can be used to set customized write ACLs for the published + // entity before performing the upload. If empty, default ACLs are used. + writeAcls []string + // expectStatus is the expected HTTP response status. + // Defaults to 200 status OK. + expectStatus int + // expectBody holds the expected body of the HTTP response. If nil, + // the body is not checked and the response is assumed to be ok. + expectBody interface{} +}{{ + about: "user owned entity", + username: "who", + id: "~who/utopic/django", +}, { + about: "user owned development entity", + username: "who", + id: "~who/development/utopic/django", +}, { + about: "group owned entity", + username: "dalek", + groups: []string{"group1", "group2"}, + id: "~group1/utopic/django", +}, { + about: "group owned development entity", + username: "dalek", + groups: []string{"group1", "group2"}, + id: "~group1/development/utopic/django", +}, { + about: "specific group", + username: "dalek", + groups: []string{"group42"}, + id: "~group42/utopic/django", +}, { + about: "promulgated entity", + username: "sisko", + groups: []string{"charmers", "group2"}, + id: "~charmers/utopic/django", + promulgated: true, +}, { + about: "promulgated entity in development", + username: "sisko", + groups: []string{"group1", "charmers"}, + id: "~charmers/development/utopic/django", + promulgated: true, +}, { + about: "unauthorized: promulgated entity", + username: "sisko", + groups: []string{"group1", "group2"}, + id: "~charmers/utopic/django", + promulgated: true, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "sisko"`, + }, +}, { + about: "unauthorized: promulgated entity in development", + username: "sisko", + groups: []string{"group1", "group2"}, + id: "~charmers/development/utopic/django", + promulgated: true, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "sisko"`, + }, +}, { + about: "unauthorized: anonymous user", + id: "~who/utopic/django", + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: "unauthorized: no username declared", + }, +}, { + about: "unauthorized: anonymous user, development entity", + id: "~who/development/utopic/django", + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: "unauthorized: no username declared", + }, +}, { + about: "unauthorized: anonymous user and promulgated entity", + id: "~charmers/utopic/django", + promulgated: true, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: "unauthorized: no username declared", + }, +}, { + about: "unauthorized: anonymous user and promulgated entity in development", + id: "~charmers/development/utopic/django", + promulgated: true, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: "unauthorized: no username declared", + }, +}, { + about: "unauthorized: user does not match", + username: "kirk", + id: "~picard/utopic/django", + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "kirk"`, + }, +}, { + about: "unauthorized: user does not match for a development entity", + username: "kirk", + id: "~picard/development/utopic/django", + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "kirk"`, + }, +}, { + about: "unauthorized: group does not match", + username: "kirk", + groups: []string{"group1", "group2", "group3"}, + id: "~group0/utopic/django", + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "kirk"`, + }, +}, { + about: "unauthorized: group does not match for a development entity", + username: "kirk", + groups: []string{"group1", "group2", "group3"}, + id: "~group0/development/utopic/django", + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "kirk"`, + }, +}, { + about: "unauthorized: specific group and promulgated entity", + username: "janeway", + groups: []string{"group1"}, + id: "~charmers/utopic/django", + promulgated: true, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "janeway"`, + }, +}, { + about: "unauthorized: specific group and promulgated entity in development", + username: "janeway", + groups: []string{"group1"}, + id: "~charmers/development/utopic/django", + promulgated: true, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "janeway"`, + }, +}, { + about: "unauthorized: published entity no development permissions", + username: "picard", + id: "~picard/wily/django", + developmentWriteAcls: []string{"group2"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "picard"`, + }, +}, { + about: "unauthorized: published entity no published permissions", + username: "picard", + id: "~picard/wily/django", + writeAcls: []string{"kirk"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "picard"`, + }, +}} + +func (s *authSuite) TestUploadEntityAuthorization(c *gc.C) { + for i, test := range uploadEntityAuthorizationTests { + c.Logf("test %d: %s", i, test.about) + + s.discharge = dischargeForUser(test.username) + s.idM.groups = map[string][]string{ + test.username: test.groups, + } + + // Prepare the expected status. + expectStatus := test.expectStatus + if expectStatus == 0 { + expectStatus = http.StatusOK + } + + // Add a pre-existing entity if required. + if test.promulgated || len(test.developmentWriteAcls) != 0 || len(test.writeAcls) != 0 { + id := charm.MustParseURL(test.id).WithRevision(0) + revision := -1 + if test.promulgated { + revision = 1 + } + rurl := newResolvedURL(id.String(), revision) + s.store.AddCharmWithArchive(rurl, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) + if len(test.developmentWriteAcls) != 0 { + s.store.SetPerms(rurl.URL.WithChannel(charm.DevelopmentChannel), "write", test.developmentWriteAcls...) + } + if len(test.writeAcls) != 0 { + s.store.SetPerms(&rurl.URL, "write", test.writeAcls...) + } + } + + // Try to upload the entity. + body, hash, size := archiveInfo(c, "wordpress") + defer body.Close() + client := httpbakery.NewHTTPClient() + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + Do: bakeryDo(client), + URL: storeURL(test.id + "/archive?hash=" + hash), + Method: "POST", + ContentLength: size, + Header: http.Header{ + "Content-Type": {"application/zip"}, + }, + Body: body, + }) + c.Assert(rec.Code, gc.Equals, expectStatus, gc.Commentf("body: %s", rec.Body)) + if test.expectBody != nil { + c.Assert(rec.Body.String(), jc.JSONEquals, test.expectBody) + } + + // Remove all entities from the store. + _, err := s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + _, err = s.store.DB.BaseEntities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + } +} + +type readSeekCloser interface { + io.ReadCloser + io.Seeker +} + +// archiveInfo prepares a zip archive of an entity and return a reader for the +// archive, its blob hash and size. +func archiveInfo(c *gc.C, name string) (r readSeekCloser, hashSum string, size int64) { + ch := storetesting.Charms.CharmArchive(c.MkDir(), name) + f, err := os.Open(ch.Path) + c.Assert(err, gc.IsNil) + hash, size := hashOf(f) + _, err = f.Seek(0, 0) + c.Assert(err, gc.IsNil) + return f, hash, size +} + +var isEntityCaveatTests = []struct { + url string + expectError string +}{{ + url: "~charmers/utopic/wordpress-42/archive", +}, { + url: "~charmers/utopic/wordpress-42/meta/hash", +}, { + url: "wordpress/archive", +}, { + url: "wordpress/meta/hash", +}, { + url: "utopic/wordpress-10/archive", +}, { + url: "utopic/wordpress-10/meta/hash", +}, { + url: "~charmers/utopic/wordpress-41/archive", + expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation on entity cs:~charmers/utopic/wordpress-41 not allowed`, +}, { + url: "~charmers/utopic/wordpress-41/meta/hash", + expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation on entity cs:~charmers/utopic/wordpress-41 not allowed`, +}, { + url: "utopic/wordpress-9/archive", + expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation on entity cs:utopic/wordpress-9 not allowed`, +}, { + url: "utopic/wordpress-9/meta/hash", + expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation on entity cs:utopic/wordpress-9 not allowed`, +}, { + url: "log", + expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation does not involve any of the allowed entities cs:~charmers/utopic/wordpress-42`, +}} + +func (s *authSuite) TestIsEntityCaveat(c *gc.C) { + s.discharge = func(_, _ string) ([]checkers.Caveat, error) { + return []checkers.Caveat{{ + Condition: "is-entity cs:~charmers/utopic/wordpress-42", + }, + checkers.DeclaredCaveat(v4.UsernameAttr, "bob"), + }, nil + } + + // Add a charm to the store, used for testing. + err := s.store.AddCharmWithArchive( + newResolvedURL("~charmers/utopic/wordpress-41", 9), + storetesting.Charms.CharmDir("wordpress")) + c.Assert(err, gc.IsNil) + err = s.store.AddCharmWithArchive( + newResolvedURL("~charmers/utopic/wordpress-42", 10), + storetesting.Charms.CharmDir("wordpress")) + c.Assert(err, gc.IsNil) + // Change the ACLs for the testing charm. + err = s.store.SetPerms(charm.MustParseURL("cs:~charmers/wordpress"), "read", "bob") + c.Assert(err, gc.IsNil) + + for i, test := range isEntityCaveatTests { + c.Logf("test %d: %s", i, test.url) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + Do: bakeryDo(nil), + URL: storeURL(test.url), + Method: "GET", + }) + if test.expectError != "" { + c.Assert(rec.Code, gc.Equals, http.StatusUnauthorized) + var respErr httpbakery.Error + err := json.Unmarshal(rec.Body.Bytes(), &respErr) + c.Assert(err, gc.IsNil) + c.Assert(respErr.Message, gc.Matches, test.expectError) + continue + } + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.Bytes())) + } +} + +func (s *authSuite) TestDelegatableMacaroon(c *gc.C) { + // Create a new server with a third party discharger. + s.discharge = dischargeForUser("bob") + + // First check that we get a macaraq error when using a vanilla http do + // request with both bakery protocol. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("delegatable-macaroon"), + Header: http.Header{"Bakery-Protocol-Version": {"1"}}, + ExpectBody: httptesting.BodyAsserter(func(c *gc.C, m json.RawMessage) { + // Allow any body - the next check will check that it's a valid macaroon. + }), + ExpectStatus: http.StatusUnauthorized, + }) + + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("delegatable-macaroon"), + ExpectBody: httptesting.BodyAsserter(func(c *gc.C, m json.RawMessage) { + // Allow any body - the next check will check that it's a valid macaroon. + }), + ExpectStatus: http.StatusProxyAuthRequired, + }) + + client := httpbakery.NewHTTPClient() + + now := time.Now() + var gotBody json.RawMessage + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("delegatable-macaroon"), + ExpectBody: httptesting.BodyAsserter(func(c *gc.C, m json.RawMessage) { + gotBody = m + }), + Do: bakeryDo(client), + ExpectStatus: http.StatusOK, + }) + + c.Assert(gotBody, gc.NotNil) + var m macaroon.Macaroon + err := json.Unmarshal(gotBody, &m) + c.Assert(err, gc.IsNil) + + caveats := m.Caveats() + foundExpiry := false + for _, cav := range caveats { + cond, arg, err := checkers.ParseCaveat(cav.Id) + c.Assert(err, gc.IsNil) + switch cond { + case checkers.CondTimeBefore: + t, err := time.Parse(time.RFC3339Nano, arg) + c.Assert(err, gc.IsNil) + c.Assert(t, jc.TimeBetween(now.Add(v4.DelegatableMacaroonExpiry), now.Add(v4.DelegatableMacaroonExpiry+time.Second))) + foundExpiry = true + } + } + c.Assert(foundExpiry, jc.IsTrue) + + // Now check that we can use the obtained macaroon to do stuff + // as the declared user. + + err = s.store.AddCharmWithArchive( + newResolvedURL("~charmers/utopic/wordpress-41", 9), + storetesting.Charms.CharmDir("wordpress")) + c.Assert(err, gc.IsNil) + // Change the ACLs for the testing charm. + err = s.store.SetPerms(charm.MustParseURL("cs:~charmers/wordpress"), "read", "bob") + c.Assert(err, gc.IsNil) + + // First check that we require authorization to access the charm. + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("~charmers/utopic/wordpress/meta/id-name"), + Method: "GET", + }) + c.Assert(rec.Code, gc.Equals, http.StatusProxyAuthRequired) + + // Then check that the request succeeds if we provide the delegatable + // macaroon. + + client = httpbakery.NewHTTPClient() + u, err := url.Parse("http://127.0.0.1") + c.Assert(err, gc.IsNil) + err = httpbakery.SetCookie(client.Jar, u, macaroon.Slice{&m}) + c.Assert(err, gc.IsNil) + + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("~charmers/utopic/wordpress/meta/id-name"), + ExpectBody: params.IdNameResponse{ + Name: "wordpress", + }, + + ExpectStatus: http.StatusOK, + Do: bakeryDo(client), + }) +} + +func (s *authSuite) TestDelegatableMacaroonWithBasicAuth(c *gc.C) { + // First check that we get a macaraq error when using a vanilla http do + // request. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + Username: testUsername, + Password: testPassword, + URL: storeURL("delegatable-macaroon"), + ExpectBody: params.Error{ + Code: params.ErrForbidden, + Message: "delegatable macaroon is not obtainable using admin credentials", + }, + ExpectStatus: http.StatusForbidden, + }) +} + +type errorTransport string + +func (e errorTransport) RoundTrip(*http.Request) (*http.Response, error) { + return nil, errgo.New(string(e)) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/common_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/common_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/common_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,276 @@ +package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" + +import ( + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "time" + + "github.com/juju/loggo" + jujutesting "github.com/juju/testing" + "github.com/julienschmidt/httprouter" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/bakerytest" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/mgo.v2" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/v4" +) + +var mgoLogger = loggo.GetLogger("mgo") + +func init() { + mgo.SetLogger(mgoLog{}) +} + +type mgoLog struct{} + +func (mgoLog) Output(calldepth int, s string) error { + mgoLogger.LogCallf(calldepth+1, loggo.INFO, "%s", s) + return nil +} + +type commonSuite struct { + jujutesting.IsolatedMgoSuite + + // srv holds the store HTTP handler. + srv *charmstore.Server + + // srvParams holds the parameters that the + // srv handler was started with + srvParams charmstore.ServerParams + + // noMacaroonSrv holds the store HTTP handler + // for an instance of the store without identity + // enabled. If enableIdentity is false, this is + // the same as srv. + noMacaroonSrv *charmstore.Server + + // noMacaroonSrvParams holds the parameters that the + // noMacaroonSrv handler was started with + noMacaroonSrvParams charmstore.ServerParams + + // store holds an instance of *charm.Store + // that can be used to access the charmstore database + // directly. + store *charmstore.Store + + // esSuite is set only when enableES is set to true. + esSuite *storetesting.ElasticSearchSuite + + // discharge holds the function that will be used + // to check third party caveats by the mock + // discharger. This will be ignored if enableIdentity was + // not true before commonSuite.SetUpTest is invoked. + // + // It may be set by tests to influence the behavior of the + // discharger. + discharge func(cav, arg string) ([]checkers.Caveat, error) + + discharger *bakerytest.Discharger + idM *idM + idMServer *httptest.Server + + // The following fields may be set before + // SetUpSuite is invoked on commonSuite + // and influences how the suite sets itself up. + + // enableIdentity holds whether the charmstore server + // will be started with a configured identity service. + enableIdentity bool + + // enableES holds whether the charmstore server will be + // started with Elastic Search enabled. + enableES bool + + // maxMgoSessions specifies the value that will be given + // to config.MaxMgoSessions when calling charmstore.NewServer. + maxMgoSessions int +} + +func (s *commonSuite) SetUpSuite(c *gc.C) { + s.IsolatedMgoSuite.SetUpSuite(c) + if s.enableES { + s.esSuite = new(storetesting.ElasticSearchSuite) + s.esSuite.SetUpSuite(c) + } +} + +func (s *commonSuite) TearDownSuite(c *gc.C) { + if s.esSuite != nil { + s.esSuite.TearDownSuite(c) + } +} + +func (s *commonSuite) SetUpTest(c *gc.C) { + s.IsolatedMgoSuite.SetUpTest(c) + if s.esSuite != nil { + s.esSuite.SetUpTest(c) + } + if s.enableIdentity { + s.idM = newIdM() + s.idMServer = httptest.NewServer(s.idM) + } + s.startServer(c) +} + +func (s *commonSuite) TearDownTest(c *gc.C) { + s.store.Pool().Close() + s.store.Close() + s.srv.Close() + s.noMacaroonSrv.Close() + if s.esSuite != nil { + s.esSuite.TearDownTest(c) + } + if s.discharger != nil { + s.discharger.Close() + s.idMServer.Close() + } + s.IsolatedMgoSuite.TearDownTest(c) +} + +// startServer creates a new charmstore server. +func (s *commonSuite) startServer(c *gc.C) { + config := charmstore.ServerParams{ + AuthUsername: testUsername, + AuthPassword: testPassword, + StatsCacheMaxAge: time.Nanosecond, + MaxMgoSessions: s.maxMgoSessions, + } + keyring := bakery.NewPublicKeyRing() + if s.enableIdentity { + s.discharge = func(_, _ string) ([]checkers.Caveat, error) { + return nil, errgo.New("no discharge") + } + discharger := bakerytest.NewDischarger(nil, func(_ *http.Request, cond string, arg string) ([]checkers.Caveat, error) { + return s.discharge(cond, arg) + }) + config.IdentityLocation = discharger.Location() + config.IdentityAPIURL = s.idMServer.URL + pk, err := httpbakery.PublicKeyForLocation(http.DefaultClient, discharger.Location()) + c.Assert(err, gc.IsNil) + err = keyring.AddPublicKeyForLocation(discharger.Location(), true, pk) + c.Assert(err, gc.IsNil) + } + config.PublicKeyLocator = keyring + var si *charmstore.SearchIndex + if s.enableES { + si = &charmstore.SearchIndex{ + Database: s.esSuite.ES, + Index: s.esSuite.TestIndex, + } + } + db := s.Session.DB("charmstore") + var err error + s.srv, err = charmstore.NewServer(db, si, config, map[string]charmstore.NewAPIHandlerFunc{"v4": v4.NewAPIHandler}) + c.Assert(err, gc.IsNil) + s.srvParams = config + + if s.enableIdentity { + config.IdentityLocation = "" + config.PublicKeyLocator = nil + config.IdentityAPIURL = "" + s.noMacaroonSrv, err = charmstore.NewServer(db, si, config, map[string]charmstore.NewAPIHandlerFunc{"v4": v4.NewAPIHandler}) + c.Assert(err, gc.IsNil) + } else { + s.noMacaroonSrv = s.srv + } + s.noMacaroonSrvParams = config + s.store = s.srv.Pool().Store() +} + +// handler returns a request handler that can be +// used to invoke private methods. The caller +// is responsible for calling Put on the returned handler. +func (s *commonSuite) handler(c *gc.C) v4.ReqHandler { + h := v4.New(s.store.Pool(), s.srvParams) + defer h.Close() + rh, err := h.NewReqHandler() + c.Assert(err, gc.IsNil) + // It would be nice if we could call s.AddCleanup here + // to call rh.Put when the test has completed, but + // unfortunately CleanupSuite.TearDownTest runs + // after MgoSuite.TearDownTest, so that's not an option. + return rh +} + +func storeURL(path string) string { + return "/v4/" + path +} + +func bakeryDo(client *http.Client) func(*http.Request) (*http.Response, error) { + if client == nil { + client = httpbakery.NewHTTPClient() + } + bclient := httpbakery.NewClient() + bclient.Client = client + return func(req *http.Request) (*http.Response, error) { + if req.Body != nil { + body := req.Body.(io.ReadSeeker) + req.Body = nil + return bclient.DoWithBody(req, body) + } + return bclient.Do(req) + } +} + +type idM struct { + // groups may be set to determine the mapping + // from user to groups for that user. + groups map[string][]string + + // body may be set to cause serveGroups to return + // an arbitrary HTTP response body. + body string + + // contentType is the contentType to use when body is not "" + contentType string + + // status may be set to indicate the HTTP status code + // when body is not nil. + status int + + router *httprouter.Router +} + +func newIdM() *idM { + idM := &idM{ + groups: make(map[string][]string), + router: httprouter.New(), + } + idM.router.GET("/v1/u/:user/groups", idM.serveGroups) + idM.router.GET("/v1/u/:user/idpgroups", idM.serveGroups) + return idM +} + +func (idM *idM) ServeHTTP(w http.ResponseWriter, req *http.Request) { + idM.router.ServeHTTP(w, req) +} + +func (idM *idM) serveGroups(w http.ResponseWriter, req *http.Request, p httprouter.Params) { + if idM.body != "" { + if idM.contentType != "" { + w.Header().Set("Content-Type", idM.contentType) + } + if idM.status != 0 { + w.WriteHeader(idM.status) + } + w.Write([]byte(idM.body)) + return + } + u := p.ByName("user") + if u == "" { + panic("no user") + } + w.Header().Set("Content-Type", "application/json") + enc := json.NewEncoder(w) + if err := enc.Encode(idM.groups[u]); err != nil { + panic(err) + } +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/content_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/content_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/content_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,482 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "path/filepath" + "sort" + + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + "github.com/juju/xml" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/v4" +) + +var serveDiagramErrorsTests = []struct { + about string + url string + expectStatus int + expectBody interface{} +}{{ + about: "entity not found", + url: "~charmers/bundle/foo-23/diagram.svg", + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:~charmers/bundle/foo-23"`, + }, +}, { + about: "diagram for a charm", + url: "~charmers/wordpress/diagram.svg", + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: "diagrams not supported for charms", + }, +}} + +func (s *APISuite) TestServeDiagramErrors(c *gc.C) { + id := newResolvedURL("cs:~charmers/trusty/wordpress-42", 42) + s.addPublicCharm(c, "wordpress", id) + id = newResolvedURL("cs:~charmers/bundle/nopositionbundle-42", 42) + s.addPublicBundle(c, "wordpress-simple", id) + + for i, test := range serveDiagramErrorsTests { + c.Logf("test %d: %s", i, test.about) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.url), + ExpectStatus: test.expectStatus, + ExpectBody: test.expectBody, + }) + } +} + +func (s *APISuite) TestServeDiagram(c *gc.C) { + bundle := &testingBundle{ + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "wordpress": { + Charm: "wordpress", + Annotations: map[string]string{ + "gui-x": "100", + "gui-y": "200", + }, + }, + "mysql": { + Charm: "utopic/mysql-23", + Annotations: map[string]string{ + "gui-x": "200", + "gui-y": "200", + }, + }, + }, + }, + } + + url := newResolvedURL("cs:~charmers/bundle/wordpressbundle-42", 42) + err := s.store.AddBundle(bundle, charmstore.AddParams{ + URL: url, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("bundle/wordpressbundle/diagram.svg"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %q", rec.Body.Bytes())) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") + assertCacheControl(c, rec.Header(), true) + + // Check that the output contains valid XML with an SVG tag, + // but don't check the details of the output so that this test doesn't + // break every time the jujusvg presentation changes. + // Also check that we get an image for each service containing the charm + // icon link. + assertXMLContains(c, rec.Body.Bytes(), map[string]func(xml.Token) bool{ + "svg element": isStartElementWithName("svg"), + "wordpress icon": isStartElementWithAttr("image", "href", "../../wordpress/icon.svg"), + "mysql icon": isStartElementWithAttr("image", "href", "../../utopic/mysql-23/icon.svg"), + }) + + // Do the same check again, but with the short form of the id; + // the relative links should change accordingly. + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("wordpressbundle/diagram.svg"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %q", rec.Body.Bytes())) + + // Check that the output contains valid XML with an SVG tag, + // but don't check the details of the output so that this test doesn't + // break every time the jujusvg presentation changes. + // Also check that we get an image for each service containing the charm + // icon link. + assertXMLContains(c, rec.Body.Bytes(), map[string]func(xml.Token) bool{ + "svg element": isStartElementWithName("svg"), + "wordpress icon": isStartElementWithAttr("image", "href", "../wordpress/icon.svg"), + "mysql icon": isStartElementWithAttr("image", "href", "../utopic/mysql-23/icon.svg"), + }) +} + +func (s *APISuite) TestServeDiagramNoPosition(c *gc.C) { + bundle := &testingBundle{ + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "wordpress": { + Charm: "wordpress", + }, + "mysql": { + Charm: "utopic/mysql-23", + Annotations: map[string]string{ + "gui-x": "200", + "gui-y": "200", + }, + }, + }, + }, + } + + url := newResolvedURL("cs:~charmers/bundle/wordpressbundle-42", 42) + err := s.store.AddBundle(bundle, charmstore.AddParams{ + URL: url, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("bundle/wordpressbundle/diagram.svg"), + }) + // Check that the request succeeds and has the expected content type. + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %q", rec.Body.Bytes())) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") +} + +var serveReadMeTests = []struct { + name string + expectNotFound bool +}{{ + name: "README.md", +}, { + name: "README.rst", +}, { + name: "readme", +}, { + name: "README", +}, { + name: "ReadMe.Txt", +}, { + name: "README.ex", +}, { + name: "", + expectNotFound: true, +}, { + name: "readme-youtube-subscribe.html", + expectNotFound: true, +}, { + name: "readme Dutch.txt", + expectNotFound: true, +}, { + name: "readme Dutch.txt", + expectNotFound: true, +}, { + name: "README.debugging", + expectNotFound: true, +}} + +func (s *APISuite) TestServeReadMe(c *gc.C) { + url := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) + for i, test := range serveReadMeTests { + c.Logf("test %d: %s", i, test.name) + wordpress := storetesting.Charms.ClonedDir(c.MkDir(), "wordpress") + content := fmt.Sprintf("some content %d", i) + if test.name != "" { + err := ioutil.WriteFile(filepath.Join(wordpress.Path, test.name), []byte(content), 0666) + c.Assert(err, gc.IsNil) + } + + url.URL.Revision = i + err := s.store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url.URL.Path() + "/readme"), + }) + if test.expectNotFound { + c.Assert(rec.Code, gc.Equals, http.StatusNotFound) + c.Assert(rec.Body.String(), jc.JSONEquals, params.Error{ + Code: params.ErrNotFound, + Message: "not found", + }) + } else { + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.DeepEquals, content) + assertCacheControl(c, rec.Header(), true) + } + } +} + +func charmWithExtraFile(c *gc.C, name, file, content string) *charm.CharmDir { + ch := storetesting.Charms.ClonedDir(c.MkDir(), name) + err := ioutil.WriteFile(filepath.Join(ch.Path, file), []byte(content), 0666) + c.Assert(err, gc.IsNil) + return ch +} + +func (s *APISuite) TestServeIcon(c *gc.C) { + content := `an icon, really` + expected := `an icon, really` + wordpress := charmWithExtraFile(c, "wordpress", "icon.svg", content) + + url := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) + err := s.store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url.URL.Path() + "/icon.svg"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.Equals, expected) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") + assertCacheControl(c, rec.Header(), true) + + // Test with revision -1 + noRevURL := url.URL + noRevURL.Revision = -1 + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(noRevURL.Path() + "/icon.svg"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.Equals, expected) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") + assertCacheControl(c, rec.Header(), true) + + // Reload the charm with an icon that already has viewBox. + wordpress = storetesting.Charms.ClonedDir(c.MkDir(), "wordpress") + err = ioutil.WriteFile(filepath.Join(wordpress.Path, "icon.svg"), []byte(expected), 0666) + c.Assert(err, gc.IsNil) + + url.URL.Revision++ + err = s.store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + + // Check that we still get expected svg. + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url.URL.Path() + "/icon.svg"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.Equals, expected) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") +} + +func (s *APISuite) TestServeBundleIcon(c *gc.C) { + s.addPublicBundle(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/something-32", 32)) + + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("~charmers/bundle/something-32/icon.svg"), + ExpectStatus: http.StatusNotFound, + ExpectBody: params.Error{ + Code: params.ErrNotFound, + Message: "icons not supported for bundles", + }, + }) +} + +func (s *APISuite) TestServeDefaultIcon(c *gc.C) { + wordpress := storetesting.Charms.ClonedDir(c.MkDir(), "wordpress") + + url := newResolvedURL("cs:~charmers/precise/wordpress-0", 0) + err := s.store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url.URL.Path() + "/icon.svg"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.Equals, v4.DefaultIcon) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") + assertCacheControl(c, rec.Header(), true) +} + +func (s *APISuite) TestServeDefaultIconForBadXML(c *gc.C) { + + for i, content := range []string{ + "\x89\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44", + // Technically this XML is not bad - we just can't parse it because + // it's got internally defined character entities. Nonetheless, we treat + // it as "bad" for the time being. + cloudfoundrySVG, + } { + wordpress := charmWithExtraFile(c, "wordpress", "icon.svg", content) + + url := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) + url.URL.Revision = i + err := s.store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url.URL.Path() + "/icon.svg"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.Equals, v4.DefaultIcon) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") + assertCacheControl(c, rec.Header(), true) + } +} + +// assertXMLEqual assers that the xml contained in the +// two slices is equal, without caring about namespace +// declarations or attribute ordering. +func assertXMLEqual(c *gc.C, body []byte, expect []byte) { + decBody := xml.NewDecoder(bytes.NewReader(body)) + decExpect := xml.NewDecoder(bytes.NewReader(expect)) + for i := 0; ; i++ { + tok0, err0 := decBody.Token() + tok1, err1 := decExpect.Token() + if err1 != nil { + c.Assert(err0, gc.NotNil) + c.Assert(err0.Error(), gc.Equals, err1.Error()) + break + } + ok, err := tokenEqual(tok0, tok1) + if !ok { + c.Logf("got %#v", tok0) + c.Logf("want %#v", tok1) + c.Fatalf("mismatch at token %d: %v", i, err) + } + } +} + +func tokenEqual(tok0, tok1 xml.Token) (bool, error) { + tok0 = canonicalXMLToken(tok0) + tok1 = canonicalXMLToken(tok1) + return jc.DeepEqual(tok0, tok1) +} + +func canonicalXMLToken(tok xml.Token) xml.Token { + start, ok := tok.(xml.StartElement) + if !ok { + return tok + } + // Remove all namespace-defining attributes. + j := 0 + for _, attr := range start.Attr { + if attr.Name.Local == "xmlns" && attr.Name.Space == "" || + attr.Name.Space == "xmlns" { + continue + } + start.Attr[j] = attr + j++ + } + start.Attr = start.Attr[0:j] + sort.Sort(attrByName(start.Attr)) + return start +} + +type attrByName []xml.Attr + +func (a attrByName) Len() int { return len(a) } +func (a attrByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a attrByName) Less(i, j int) bool { + if a[i].Name.Space != a[j].Name.Space { + return a[i].Name.Space < a[j].Name.Space + } + return a[i].Name.Local < a[j].Name.Local +} + +// assertXMLContains asserts that the XML in body is well formed, and +// contains at least one token that satisfies each of the functions in need. +func assertXMLContains(c *gc.C, body []byte, need map[string]func(xml.Token) bool) { + dec := xml.NewDecoder(bytes.NewReader(body)) + for { + tok, err := dec.Token() + if err == io.EOF { + break + } + c.Assert(err, gc.IsNil) + for what, f := range need { + if f(tok) { + delete(need, what) + } + } + } + c.Assert(need, gc.HasLen, 0, gc.Commentf("body:\n%s", body)) +} + +func isStartElementWithName(name string) func(xml.Token) bool { + return func(tok xml.Token) bool { + startElem, ok := tok.(xml.StartElement) + return ok && startElem.Name.Local == name + } +} + +func isStartElementWithAttr(name, attr, val string) func(xml.Token) bool { + return func(tok xml.Token) bool { + startElem, ok := tok.(xml.StartElement) + if !ok { + return false + } + for _, a := range startElem.Attr { + if a.Name.Local == attr && a.Value == val { + return true + } + } + return false + } +} + +const cloudfoundrySVG = ` + + + + + + + + + +]> + +content omitted + +` === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/defaulticon_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/defaulticon_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/defaulticon_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" + +import ( + "strings" + + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charmstore.v5-unstable/internal/v4" +) + +type iconSuite struct{} + +var _ = gc.Suite(&iconSuite{}) + +func (s *iconSuite) TestValidXML(c *gc.C) { + // The XML declaration must be included in the first line of the icon. + hasXMLPrefix := strings.HasPrefix(v4.DefaultIcon, "= 1", + expectCode: params.ErrBadRequest, +}, { + about: "invalid limit (zero value)", + querystring: "?limit=0", + expectStatus: http.StatusBadRequest, + expectMessage: "invalid limit value: value must be >= 1", + expectCode: params.ErrBadRequest, +}, { + about: "invalid limit (not a number)", + querystring: "?limit=foo", + expectStatus: http.StatusBadRequest, + expectMessage: "invalid limit value: value must be a number", + expectCode: params.ErrBadRequest, +}, { + about: "invalid offset (negative number)", + querystring: "?skip=-100", + expectStatus: http.StatusBadRequest, + expectMessage: "invalid skip value: value must be >= 0", + expectCode: params.ErrBadRequest, +}, { + about: "invalid offset (not a number)", + querystring: "?skip=bar", + expectStatus: http.StatusBadRequest, + expectMessage: "invalid skip value: value must be a number", + expectCode: params.ErrBadRequest, +}, { + about: "invalid id", + querystring: "?id=no-such:reference", + expectStatus: http.StatusBadRequest, + expectMessage: `invalid id value: charm or bundle URL has invalid schema: "no-such:reference"`, + expectCode: params.ErrBadRequest, +}, { + about: "invalid log level", + querystring: "?level=bar", + expectStatus: http.StatusBadRequest, + expectMessage: "invalid log level value", + expectCode: params.ErrBadRequest, +}, { + about: "invalid log type", + querystring: "?type=no-such", + expectStatus: http.StatusBadRequest, + expectMessage: "invalid log type value", + expectCode: params.ErrBadRequest, +}} + +func (s *logSuite) TestGetLogsErrors(c *gc.C) { + for i, test := range getLogsErrorsTests { + c.Logf("test %d: %s", i, test.about) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("log" + test.querystring), + Username: testUsername, + Password: testPassword, + ExpectStatus: test.expectStatus, + ExpectBody: params.Error{ + Message: test.expectMessage, + Code: test.expectCode, + }, + }) + } +} + +func (s *logSuite) TestGetLogsErrorInvalidLog(c *gc.C) { + // Add a non-parsable log message to the db directly. + err := s.store.DB.Logs().Insert(mongodoc.Log{ + Data: []byte("!"), + Level: mongodoc.InfoLevel, + Type: mongodoc.IngestionType, + Time: time.Now(), + }) + c.Assert(err, gc.IsNil) + // The log is just ignored. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("log"), + Username: testUsername, + Password: testPassword, + ExpectStatus: http.StatusOK, + ExpectBody: []params.LogResponse{}, + }) +} + +func (s *logSuite) TestPostLogs(c *gc.C) { + // Prepare the request body. + body := makeByteLogs(rawMessage("info data"), params.InfoLevel, params.IngestionType, []*charm.URL{ + charm.MustParseURL("trusty/django"), + charm.MustParseURL("utopic/rails"), + }) + + // Send the request. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("log"), + Method: "POST", + Username: testUsername, + Password: testPassword, + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: bytes.NewReader(body), + ExpectStatus: http.StatusOK, + }) + + // Ensure the log message has been added to the database. + var doc mongodoc.Log + err := s.store.DB.Logs().Find(nil).One(&doc) + c.Assert(err, gc.IsNil) + c.Assert(string(doc.Data), gc.Equals, `"info data"`) + c.Assert(doc.Level, gc.Equals, mongodoc.InfoLevel) + c.Assert(doc.Type, gc.Equals, mongodoc.IngestionType) + c.Assert(doc.URLs, jc.DeepEquals, []*charm.URL{ + charm.MustParseURL("trusty/django"), + charm.MustParseURL("django"), + charm.MustParseURL("utopic/rails"), + charm.MustParseURL("rails"), + }) +} + +func (s *logSuite) TestPostLogsMultipleEntries(c *gc.C) { + // Prepare the request body. + infoData := rawMessage("info data") + warningData := rawMessage("warning data") + logs := []params.Log{{ + Data: &infoData, + Level: params.InfoLevel, + Type: params.IngestionType, + }, { + Data: &warningData, + Level: params.WarningLevel, + Type: params.IngestionType, + }} + body, err := json.Marshal(logs) + c.Assert(err, gc.IsNil) + + // Send the request. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("log"), + Method: "POST", + Username: testUsername, + Password: testPassword, + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: bytes.NewReader(body), + ExpectStatus: http.StatusOK, + }) + + // Ensure the log messages has been added to the database. + var docs []mongodoc.Log + err = s.store.DB.Logs().Find(nil).Sort("id").All(&docs) + c.Assert(err, gc.IsNil) + c.Assert(docs, gc.HasLen, 2) + c.Assert(string(docs[0].Data), gc.Equals, string(infoData)) + c.Assert(docs[0].Level, gc.Equals, mongodoc.InfoLevel) + c.Assert(string(docs[1].Data), gc.Equals, string(warningData)) + c.Assert(docs[1].Level, gc.Equals, mongodoc.WarningLevel) +} + +var postLogsErrorsTests = []struct { + about string + contentType string + body []byte + expectStatus int + expectMessage string + expectCode params.ErrorCode +}{{ + about: "invalid content type", + contentType: "application/zip", + expectStatus: http.StatusBadRequest, + expectMessage: `unexpected Content-Type "application/zip"; expected 'application/json'`, + expectCode: params.ErrBadRequest, +}, { + about: "invalid body", + body: []byte("!"), + expectStatus: http.StatusBadRequest, + expectMessage: "cannot unmarshal body: invalid character '!' looking for beginning of value", + expectCode: params.ErrBadRequest, +}, { + about: "invalid log level", + body: makeByteLogs(rawMessage("message"), params.LogLevel(42), params.IngestionType, nil), + expectStatus: http.StatusBadRequest, + expectMessage: "invalid log level", + expectCode: params.ErrBadRequest, +}, { + about: "invalid log type", + body: makeByteLogs(rawMessage("message"), params.WarningLevel, params.LogType(42), nil), + expectStatus: http.StatusBadRequest, + expectMessage: "invalid log type", + expectCode: params.ErrBadRequest, +}} + +func (s *logSuite) TestPostLogsErrors(c *gc.C) { + url := storeURL("log") + for i, test := range postLogsErrorsTests { + c.Logf("test %d: %s", i, test.about) + if test.contentType == "" { + test.contentType = "application/json" + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: url, + Method: "POST", + Header: http.Header{ + "Content-Type": {test.contentType}, + }, + Body: bytes.NewReader(test.body), + Username: testUsername, + Password: testPassword, + ExpectStatus: test.expectStatus, + ExpectBody: params.Error{ + Message: test.expectMessage, + Code: test.expectCode, + }, + }) + } +} + +func (s *logSuite) TestGetLogsUnauthorizedError(c *gc.C) { + s.AssertEndpointAuth(c, httptesting.JSONCallParams{ + URL: storeURL("log"), + ExpectStatus: http.StatusOK, + ExpectBody: []params.LogResponse{}, + }) +} + +func (s *logSuite) TestPostLogsUnauthorizedError(c *gc.C) { + // Add a non-parsable log message to the db. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.noMacaroonSrv, + URL: storeURL("log"), + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + ExpectStatus: http.StatusUnauthorized, + ExpectBody: params.Error{ + Message: "authentication failed: missing HTTP auth header", + Code: params.ErrUnauthorized, + }, + }) +} + +func makeByteLogs(data json.RawMessage, logLevel params.LogLevel, logType params.LogType, urls []*charm.URL) []byte { + logs := []params.Log{{ + Data: &data, + Level: logLevel, + Type: logType, + URLs: urls, + }} + b, err := json.Marshal(logs) + if err != nil { + panic(err) + } + return b +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/package_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" + +import ( + "testing" + + jujutesting "github.com/juju/testing" +) + +func TestPackage(t *testing.T) { + jujutesting.MgoTestPackage(t, nil) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/relations.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/relations.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/relations.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,170 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v4 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" + +import ( + "net/http" + "net/url" + + "gopkg.in/errgo.v1" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +// GET id/meta/charm-related[?include=meta[&include=meta…]] +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-related +func (h *ReqHandler) metaCharmRelated(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + if id.URL.Series == "bundle" { + return nil, nil + } + // If the charm does not define any relation we can just return without + // hitting the db. + if len(entity.CharmProvidedInterfaces)+len(entity.CharmRequiredInterfaces) == 0 { + return ¶ms.RelatedResponse{}, nil + } + q := h.Store.MatchingInterfacesQuery(entity.CharmProvidedInterfaces, entity.CharmRequiredInterfaces) + + fields := bson.D{ + {"_id", 1}, + {"supportedseries", 1}, + {"development", 1}, + {"charmrequiredinterfaces", 1}, + {"charmprovidedinterfaces", 1}, + {"promulgated-url", 1}, + {"promulgated-revision", 1}, + } + + var entities []*mongodoc.Entity + if err := q.Select(fields).Sort("_id").All(&entities); err != nil { + return nil, errgo.Notef(err, "cannot retrieve the related charms") + } + + // If no entities are found there is no need for further processing the + // results. + if len(entities) == 0 { + return ¶ms.RelatedResponse{}, nil + } + + // Build the results, by grouping entities based on their relations' roles + // and interfaces. + includes := flags["include"] + requires, err := h.getRelatedCharmsResponse(entity.CharmProvidedInterfaces, entities, func(e *mongodoc.Entity) []string { + return e.CharmRequiredInterfaces + }, includes, req) + if err != nil { + return nil, errgo.Notef(err, "cannot retrieve the charm requires") + } + provides, err := h.getRelatedCharmsResponse(entity.CharmRequiredInterfaces, entities, func(e *mongodoc.Entity) []string { + return e.CharmProvidedInterfaces + }, includes, req) + if err != nil { + return nil, errgo.Notef(err, "cannot retrieve the charm provides") + } + + // Return the response. + return ¶ms.RelatedResponse{ + Requires: requires, + Provides: provides, + }, nil +} + +type entityRelatedInterfacesGetter func(*mongodoc.Entity) []string + +// getRelatedCharmsResponse returns a response mapping interfaces to related +// charms. For instance: +// map[string][]params.MetaAnyResponse{ +// "http": []params.MetaAnyResponse{ +// {Id: "cs:utopic/django-42", Meta: ...}, +// {Id: "cs:trusty/wordpress-47", Meta: ...}, +// }, +// "memcache": []params.MetaAnyResponse{ +// {Id: "cs:utopic/memcached-0", Meta: ...}, +// }, +// } +func (h *ReqHandler) getRelatedCharmsResponse( + ifaces []string, + entities []*mongodoc.Entity, + getInterfaces entityRelatedInterfacesGetter, + includes []string, + req *http.Request, +) (map[string][]params.MetaAnyResponse, error) { + results := make(map[string][]params.MetaAnyResponse, len(ifaces)) + for _, iface := range ifaces { + responses, err := h.getRelatedIfaceResponses(iface, entities, getInterfaces, includes, req) + if err != nil { + return nil, err + } + if len(responses) > 0 { + results[iface] = responses + } + } + return results, nil +} + +func (h *ReqHandler) getRelatedIfaceResponses( + iface string, + entities []*mongodoc.Entity, + getInterfaces entityRelatedInterfacesGetter, + includes []string, + req *http.Request, +) ([]params.MetaAnyResponse, error) { + // Build a list of responses including only entities which are related + // to the given interface. + usesInterface := func(e *mongodoc.Entity) bool { + for _, entityIface := range getInterfaces(e) { + if entityIface == iface { + return true + } + } + return false + } + + resp, err := h.getMetadataForEntities(entities, includes, req, usesInterface) + if err != nil { + return nil, errgo.Mask(err) + } + return resp, nil +} + +func (h *ReqHandler) getMetadataForEntities(entities []*mongodoc.Entity, includes []string, req *http.Request, includeEntity func(*mongodoc.Entity) bool) ([]params.MetaAnyResponse, error) { + response := make([]params.MetaAnyResponse, 0, len(entities)) + err := expandMultiSeries(entities, func(series string, e *mongodoc.Entity) error { + if includeEntity != nil && !includeEntity(e) { + return nil + } + meta, err := h.getMetadataForEntity(e, includes, req) + if err == errMetadataUnauthorized { + return nil + } + if err != nil { + return errgo.Mask(err) + } + id := e.PreferredURL(true) + id.Series = series + response = append(response, params.MetaAnyResponse{ + Id: id, + Meta: meta, + }) + return nil + }) + if err != nil { + return nil, errgo.Mask(err) + } + return response, nil +} + +var errMetadataUnauthorized = errgo.Newf("metadata unauthorized") + +func (h *ReqHandler) getMetadataForEntity(e *mongodoc.Entity, includes []string, req *http.Request) (map[string]interface{}, error) { + rurl := charmstore.EntityResolvedURL(e) + // Ignore entities that aren't readable by the current user. + if err := h.AuthorizeEntity(rurl, req); err != nil { + return nil, errMetadataUnauthorized + } + return h.Router.GetMetadata(rurl, includes, req) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/relations_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/relations_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/relations_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,935 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" + +import ( + "encoding/json" + "fmt" + "net/http" + "sort" + "strconv" + "strings" + + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +// Define fake blob attributes to be used in tests. +var fakeBlobSize, fakeBlobHash = func() (int64, string) { + b := []byte("fake content") + h := blobstore.NewHash() + h.Write(b) + return int64(len(b)), fmt.Sprintf("%x", h.Sum(nil)) +}() + +type RelationsSuite struct { + commonSuite +} + +var _ = gc.Suite(&RelationsSuite{}) + +// metaCharmRelatedCharms defines a bunch of charms to be used in +// the relation tests. +var metaCharmRelatedCharms = map[string]charm.Charm{ + "0 ~charmers/utopic/wordpress-0": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "website": { + Name: "website", + Role: "provider", + Interface: "http", + }, + }, + requires: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "requirer", + Interface: "memcache", + }, + "nfs": { + Name: "nfs", + Role: "requirer", + Interface: "mount", + }, + }, + }, + "42 ~charmers/utopic/memcached-42": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "provider", + Interface: "memcache", + }, + }, + }, + "1 ~charmers/precise/nfs-1": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "nfs": { + Name: "nfs", + Role: "provider", + Interface: "mount", + }, + }, + }, + "47 ~charmers/trusty/haproxy-47": &relationTestingCharm{ + requires: map[string]charm.Relation{ + "reverseproxy": { + Name: "reverseproxy", + Role: "requirer", + Interface: "http", + }, + }, + }, + "48 ~charmers/precise/haproxy-48": &relationTestingCharm{ + requires: map[string]charm.Relation{ + "reverseproxy": { + Name: "reverseproxy", + Role: "requirer", + Interface: "http", + }, + }, + }, + // development charms should not be included in any results. + "49 ~charmers/development/precise/haproxy-49": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "reverseproxy": { + Name: "reverseproxy", + Role: "requirer", + Interface: "http", + }, + }, + }, + "1 ~charmers/multi-series-20": &relationTestingCharm{ + supportedSeries: []string{"precise", "trusty", "utopic"}, + requires: map[string]charm.Relation{ + "reverseproxy": { + Name: "reverseproxy", + Role: "requirer", + Interface: "http", + }, + }, + }, +} + +var metaCharmRelatedTests = []struct { + // Description of the test. + about string + // Charms to be stored in the store before the test is run. + charms map[string]charm.Charm + // The id of the charm for which related charms are returned. + id string + // The querystring to append to the resulting charmstore URL. + querystring string + // The expected response body. + expectBody params.RelatedResponse +}{{ + about: "provides and requires", + charms: metaCharmRelatedCharms, + id: "utopic/wordpress-0", + // V4 SPECIFIC + expectBody: params.RelatedResponse{ + Provides: map[string][]params.MetaAnyResponse{ + "memcache": {{ + Id: charm.MustParseURL("utopic/memcached-42"), + }}, + "mount": {{ + Id: charm.MustParseURL("precise/nfs-1"), + }}, + }, + Requires: map[string][]params.MetaAnyResponse{ + "http": {{ + Id: charm.MustParseURL("precise/multi-series-1"), + }, { + Id: charm.MustParseURL("trusty/multi-series-1"), + }, { + Id: charm.MustParseURL("utopic/multi-series-1"), + }, { + Id: charm.MustParseURL("precise/haproxy-48"), + }, { + Id: charm.MustParseURL("trusty/haproxy-47"), + }}, + }, + }, +}, { + about: "only provides", + charms: metaCharmRelatedCharms, + id: "trusty/haproxy-47", + expectBody: params.RelatedResponse{ + Provides: map[string][]params.MetaAnyResponse{ + "http": {{ + Id: charm.MustParseURL("utopic/wordpress-0"), + }}, + }, + }, +}, { + about: "only requires", + charms: metaCharmRelatedCharms, + id: "utopic/memcached-42", + expectBody: params.RelatedResponse{ + Requires: map[string][]params.MetaAnyResponse{ + "memcache": {{ + Id: charm.MustParseURL("utopic/wordpress-0"), + }}, + }, + }, +}, { + about: "no relations found", + charms: map[string]charm.Charm{ + "0 ~charmers/utopic/wordpress-0": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "website": { + Name: "website", + Role: "provider", + Interface: "http", + }, + }, + requires: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "requirer", + Interface: "memcache", + }, + "nfs": { + Name: "nfs", + Role: "requirer", + Interface: "mount", + }, + }, + }, + }, + id: "utopic/wordpress-0", +}, { + about: "no relations defined", + charms: map[string]charm.Charm{ + "42 ~charmers/utopic/django-42": &relationTestingCharm{}, + }, + id: "utopic/django-42", +}, { + about: "multiple revisions of the same related charm", + charms: map[string]charm.Charm{ + "0 ~charmers/trusty/wordpress-0": &relationTestingCharm{ + requires: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "requirer", + Interface: "memcache", + }, + }, + }, + "1 ~charmers/utopic/memcached-1": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "provider", + Interface: "memcache", + }, + }, + }, + "2 ~charmers/utopic/memcached-2": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "provider", + Interface: "memcache", + }, + }, + }, + "3 ~charmers/utopic/memcached-3": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "provider", + Interface: "memcache", + }, + }, + }, + }, + id: "trusty/wordpress-0", + expectBody: params.RelatedResponse{ + Provides: map[string][]params.MetaAnyResponse{ + "memcache": {{ + Id: charm.MustParseURL("utopic/memcached-1"), + }, { + Id: charm.MustParseURL("utopic/memcached-2"), + }, { + Id: charm.MustParseURL("utopic/memcached-3"), + }}, + }, + }, +}, { + about: "reference ordering", + charms: map[string]charm.Charm{ + "0 ~charmers/trusty/wordpress-0": &relationTestingCharm{ + requires: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "requirer", + Interface: "memcache", + }, + "nfs": { + Name: "nfs", + Role: "requirer", + Interface: "mount", + }, + }, + }, + "1 ~charmers/utopic/memcached-1": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "provider", + Interface: "memcache", + }, + }, + }, + "2 ~charmers/utopic/memcached-2": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "provider", + Interface: "memcache", + }, + }, + }, + "90 ~charmers/utopic/redis-90": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "provider", + Interface: "memcache", + }, + }, + }, + "47 ~charmers/trusty/nfs-47": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "nfs": { + Name: "nfs", + Role: "provider", + Interface: "mount", + }, + }, + }, + "42 ~charmers/precise/nfs-42": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "nfs": { + Name: "nfs", + Role: "provider", + Interface: "mount", + }, + }, + }, + "47 ~charmers/precise/nfs-47": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "nfs": { + Name: "nfs", + Role: "provider", + Interface: "mount", + }, + }, + }, + }, + id: "trusty/wordpress-0", + expectBody: params.RelatedResponse{ + Provides: map[string][]params.MetaAnyResponse{ + "memcache": {{ + Id: charm.MustParseURL("utopic/memcached-1"), + }, { + Id: charm.MustParseURL("utopic/memcached-2"), + }, { + Id: charm.MustParseURL("utopic/redis-90"), + }}, + "mount": {{ + Id: charm.MustParseURL("precise/nfs-42"), + }, { + Id: charm.MustParseURL("precise/nfs-47"), + }, { + Id: charm.MustParseURL("trusty/nfs-47"), + }}, + }, + }, +}, { + about: "includes", + charms: metaCharmRelatedCharms, + id: "precise/nfs-1", + querystring: "?include=archive-size&include=charm-metadata", + expectBody: params.RelatedResponse{ + Requires: map[string][]params.MetaAnyResponse{ + "mount": {{ + Id: charm.MustParseURL("utopic/wordpress-0"), + Meta: map[string]interface{}{ + "archive-size": params.ArchiveSizeResponse{Size: fakeBlobSize}, + "charm-metadata": &charm.Meta{ + Provides: map[string]charm.Relation{ + "website": { + Name: "website", + Role: "provider", + Interface: "http", + }, + }, + Requires: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "requirer", + Interface: "memcache", + }, + "nfs": { + Name: "nfs", + Role: "requirer", + Interface: "mount", + }, + }, + }, + }, + }}, + }, + }, +}} + +func (s *RelationsSuite) addCharms(c *gc.C, charms map[string]charm.Charm) { + for id, ch := range charms { + url := mustParseResolvedURL(id) + // The blob related info are not used in these tests. + // The related charms are retrieved from the entities collection, + // without accessing the blob store. + err := s.store.AddCharm(ch, charmstore.AddParams{ + URL: url, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil, gc.Commentf("id %q", id)) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + } +} + +func (s *RelationsSuite) TestMetaCharmRelated(c *gc.C) { + for i, test := range metaCharmRelatedTests { + c.Logf("test %d: %s", i, test.about) + s.addCharms(c, test.charms) + storeURL := storeURL(test.id + "/meta/charm-related" + test.querystring) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL, + ExpectStatus: http.StatusOK, + ExpectBody: test.expectBody, + }) + // Clean up the entities in the store. + _, err := s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + } +} + +func (s *RelationsSuite) TestMetaCharmRelatedIncludeError(c *gc.C) { + s.addCharms(c, metaCharmRelatedCharms) + storeURL := storeURL("utopic/wordpress-0/meta/charm-related?include=no-such") + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL, + ExpectStatus: http.StatusInternalServerError, + ExpectBody: params.Error{ + Message: `cannot retrieve the charm requires: unrecognized metadata name "no-such"`, + }, + }) +} + +// relationTestingCharm implements charm.Charm, and it is used for testing +// charm relations. +type relationTestingCharm struct { + supportedSeries []string + provides map[string]charm.Relation + requires map[string]charm.Relation +} + +func (ch *relationTestingCharm) Meta() *charm.Meta { + // The only metadata we are interested in is the relation data. + return &charm.Meta{ + Series: ch.supportedSeries, + Provides: ch.provides, + Requires: ch.requires, + } +} + +func (ch *relationTestingCharm) Config() *charm.Config { + // For the purposes of this implementation, the charm configuration is not + // relevant. + return nil +} + +func (e *relationTestingCharm) Metrics() *charm.Metrics { + return nil +} + +func (ch *relationTestingCharm) Actions() *charm.Actions { + // For the purposes of this implementation, the charm actions are not + // relevant. + return nil +} + +func (ch *relationTestingCharm) Revision() int { + // For the purposes of this implementation, the charm revision is not + // relevant. + return 0 +} + +// metaBundlesContainingBundles defines a bunch of bundles to be used in +// the bundles-containing tests. +var metaBundlesContainingBundles = map[string]charm.Bundle{ + "0 ~charmers/bundle/wordpress-simple-0": relationTestingBundle([]string{ + "cs:utopic/wordpress-42", + "cs:utopic/mysql-0", + }), + "1 ~charmers/bundle/wordpress-simple-1": relationTestingBundle([]string{ + "cs:utopic/wordpress-47", + "cs:utopic/mysql-1", + }), + "1 ~charmers/bundle/wordpress-complex-1": relationTestingBundle([]string{ + "cs:utopic/wordpress-42", + "cs:utopic/wordpress-47", + "cs:trusty/mysql-0", + "cs:trusty/mysql-1", + "cs:trusty/memcached-2", + }), + "42 ~charmers/bundle/django-generic-42": relationTestingBundle([]string{ + "django", + "django", + "mysql-1", + "trusty/memcached", + }), + "0 ~charmers/bundle/useless-0": relationTestingBundle([]string{ + "cs:utopic/wordpress-42", + "precise/mediawiki-10", + }), + "46 ~charmers/bundle/mediawiki-simple-46": relationTestingBundle([]string{ + "precise/mediawiki-0", + }), + "47 ~charmers/bundle/mediawiki-simple-47": relationTestingBundle([]string{ + "precise/mediawiki-0", + "mysql", + }), + "48 ~charmers/bundle/mediawiki-simple-48": relationTestingBundle([]string{ + "precise/mediawiki-0", + }), + "~bob/bundle/bobthebundle-2": relationTestingBundle([]string{ + "precise/mediawiki-0", + }), +} + +var metaBundlesContainingTests = []struct { + // Description of the test. + about string + // The id of the charm for which related bundles are returned. + id string + // The querystring to append to the resulting charmstore URL. + querystring string + // The expected status code of the response. + expectStatus int + // The expected response body. + expectBody interface{} +}{{ + about: "specific charm present in several bundles", + id: "utopic/wordpress-42", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/useless-0"), + }, { + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + }, { + Id: charm.MustParseURL("bundle/wordpress-simple-0"), + }}, +}, { + about: "specific charm present in one bundle", + id: "trusty/memcached-2", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + }}, +}, { + about: "specific charm not present in any bundle", + id: "trusty/django-42", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{}, +}, { + about: "specific charm with includes", + id: "trusty/mysql-1", + querystring: "?include=archive-size&include=bundle-metadata", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + Meta: map[string]interface{}{ + "archive-size": params.ArchiveSizeResponse{Size: fakeBlobSize}, + "bundle-metadata": metaBundlesContainingBundles["1 ~charmers/bundle/wordpress-complex-1"].Data(), + }, + }}, +}, { + about: "partial charm id", + id: "mysql", // The test will add cs:utopic/mysql-0. + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/wordpress-simple-0"), + }}, +}, { + about: "any series set to true", + id: "trusty/mysql-0", + querystring: "?any-series=1", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + }, { + Id: charm.MustParseURL("bundle/wordpress-simple-0"), + }}, +}, { + about: "any series and all-results set to true", + id: "trusty/mysql-0", + querystring: "?any-series=1&all-results=1", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + }, { + // This result is included even if the latest wordpress-simple does not + // contain the mysql-0 charm. + Id: charm.MustParseURL("bundle/wordpress-simple-0"), + }}, +}, { + about: "invalid any series", + id: "utopic/mysql-0", + querystring: "?any-series=true", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: `invalid value for any-series: unexpected bool value "true" (must be "0" or "1")`, + }, +}, { + about: "any revision set to true", + id: "trusty/memcached-99", + querystring: "?any-revision=1", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/django-generic-42"), + }, { + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + }}, +}, { + about: "invalid any revision", + id: "trusty/memcached-99", + querystring: "?any-revision=why-not", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: `invalid value for any-revision: unexpected bool value "why-not" (must be "0" or "1")`, + }, +}, { + about: "all-results set to true", + id: "precise/mediawiki-0", + expectStatus: http.StatusOK, + querystring: "?all-results=1", + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/mediawiki-simple-48"), + }, { + Id: charm.MustParseURL("bundle/mediawiki-simple-47"), + }, { + Id: charm.MustParseURL("bundle/mediawiki-simple-46"), + }, { + Id: charm.MustParseURL("~bob/bundle/bobthebundle-2"), + }}, +}, { + about: "all-results set to false", + id: "precise/mediawiki-0", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/mediawiki-simple-48"), + }, { + Id: charm.MustParseURL("~bob/bundle/bobthebundle-2"), + }}, +}, { + about: "invalid all-results", + id: "trusty/memcached-99", + querystring: "?all-results=yes!", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: `invalid value for all-results: unexpected bool value "yes!" (must be "0" or "1")`, + }, +}, { + about: "any series and revision, all results", + id: "saucy/mysql-99", + querystring: "?any-series=1&any-revision=1&all-results=1", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/django-generic-42"), + }, { + Id: charm.MustParseURL("bundle/mediawiki-simple-47"), + }, { + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + }, { + Id: charm.MustParseURL("bundle/wordpress-simple-1"), + }, { + Id: charm.MustParseURL("bundle/wordpress-simple-0"), + }}, +}, { + about: "any series, any revision", + id: "saucy/mysql-99", + querystring: "?any-series=1&any-revision=1", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/django-generic-42"), + }, { + Id: charm.MustParseURL("bundle/mediawiki-simple-47"), + }, { + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + }, { + Id: charm.MustParseURL("bundle/wordpress-simple-1"), + }}, +}, { + about: "any series and revision, last results", + id: "saucy/mediawiki", + querystring: "?any-series=1&any-revision=1", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/mediawiki-simple-48"), + }, { + Id: charm.MustParseURL("bundle/useless-0"), + }, { + Id: charm.MustParseURL("~bob/bundle/bobthebundle-2"), + }}, +}, { + about: "any series and revision with includes", + id: "saucy/wordpress-99", + querystring: "?any-series=1&any-revision=1&include=archive-size&include=bundle-metadata", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/useless-0"), + Meta: map[string]interface{}{ + "archive-size": params.ArchiveSizeResponse{Size: fakeBlobSize}, + "bundle-metadata": metaBundlesContainingBundles["0 ~charmers/bundle/useless-0"].Data(), + }, + }, { + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + Meta: map[string]interface{}{ + "archive-size": params.ArchiveSizeResponse{Size: fakeBlobSize}, + "bundle-metadata": metaBundlesContainingBundles["1 ~charmers/bundle/wordpress-complex-1"].Data(), + }, + }, { + Id: charm.MustParseURL("bundle/wordpress-simple-1"), + Meta: map[string]interface{}{ + "archive-size": params.ArchiveSizeResponse{Size: fakeBlobSize}, + "bundle-metadata": metaBundlesContainingBundles["1 ~charmers/bundle/wordpress-simple-1"].Data(), + }, + }}, +}, { + about: "include-error", + id: "utopic/wordpress-42", + querystring: "?include=no-such", + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: `unrecognized metadata name "no-such"`, + }, +}} + +func (s *RelationsSuite) TestMetaBundlesContaining(c *gc.C) { + // Add the bundles used for testing to the database. + for id, b := range metaBundlesContainingBundles { + url := mustParseResolvedURL(id) + // The blob related info are not used in these tests. + // The charm-bundle relations are retrieved from the entities + // collection, without accessing the blob store. + err := s.store.AddBundle(b, charmstore.AddParams{ + URL: url, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + } + + for i, test := range metaBundlesContainingTests { + c.Logf("test %d: %s", i, test.about) + + // Expand the URL if required before adding the charm to the database, + // so that at least one matching charm can be resolved. + rurl := &router.ResolvedURL{ + URL: *charm.MustParseURL(test.id), + PromulgatedRevision: -1, + } + if rurl.URL.Series == "" { + rurl.URL.Series = "utopic" + } + if rurl.URL.Revision == -1 { + rurl.URL.Revision = 0 + } + if rurl.URL.User == "" { + rurl.URL.User = "charmers" + rurl.PromulgatedRevision = rurl.URL.Revision + } + // Add the charm we need bundle info on to the database. + err := s.store.AddCharm(&relationTestingCharm{}, charmstore.AddParams{ + URL: rurl, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + + // Perform the request and ensure the response is what we expect. + storeURL := storeURL(test.id + "/meta/bundles-containing" + test.querystring) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL, + ExpectStatus: test.expectStatus, + ExpectBody: sameMetaAnyResponses(test.expectBody), + }) + + // Clean up the charm entity in the store. + err = s.store.DB.Entities().Remove(bson.D{{"_id", &rurl.URL}}) + c.Assert(err, gc.IsNil) + } +} + +func (s *RelationsSuite) TestMetaBundlesContainingBundleACL(c *gc.C) { + // Add the bundles used for testing to the database. + for id, b := range metaBundlesContainingBundles { + url := mustParseResolvedURL(id) + // The blob related info are not used in these tests. + // The charm-bundle relations are retrieved from the entities + // collection, without accessing the blob store. + err := s.store.AddBundle(b, charmstore.AddParams{ + URL: url, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + if url.URL.Name == "useless" { + // The useless bundle is not available for "everyone". + err = s.store.SetPerms(&url.URL, "read", url.URL.User) + c.Assert(err, gc.IsNil) + continue + } + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + } + rurl := mustParseResolvedURL("42 ~charmers/utopic/wordpress-42") + // Add the charm we need bundle info on to the database. + err := s.store.AddCharm(&relationTestingCharm{}, charmstore.AddParams{ + URL: rurl, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + + // Perform the request and ensure that the useless bundle isn't listed. + storeURL := storeURL("utopic/wordpress-42/meta/bundles-containing") + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL, + ExpectBody: sameMetaAnyResponses([]*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + }, { + Id: charm.MustParseURL("bundle/wordpress-simple-0"), + }}), + }) +} + +// sameMetaAnyResponses returns a BodyAsserter that checks whether the meta/any response +// matches the expected one, even if the results appear in a different order. +func sameMetaAnyResponses(expect interface{}) httptesting.BodyAsserter { + return func(c *gc.C, m json.RawMessage) { + expectMeta, ok := expect.([]*params.MetaAnyResponse) + if !ok { + c.Assert(string(m), jc.JSONEquals, expect) + return + } + var got []*params.MetaAnyResponse + err := json.Unmarshal(m, &got) + c.Assert(err, gc.IsNil) + sort.Sort(metaAnyResponseById(got)) + sort.Sort(metaAnyResponseById(expectMeta)) + data, err := json.Marshal(got) + c.Assert(err, gc.IsNil) + c.Assert(string(data), jc.JSONEquals, expect) + } +} + +// relationTestingBundle returns a bundle for use in relation +// testing. The urls parameter holds a list of charm references +// to be included in the bundle. +// For each URL, a corresponding service is automatically created. +func relationTestingBundle(urls []string) charm.Bundle { + services := make(map[string]*charm.ServiceSpec, len(urls)) + for i, url := range urls { + service := &charm.ServiceSpec{ + Charm: url, + NumUnits: 1, + } + services[fmt.Sprintf("service-%d", i)] = service + } + return &testingBundle{ + data: &charm.BundleData{ + Services: services, + }, + } +} + +// testingBundle is a bundle implementation that +// returns bundle metadata held in the data field. +type testingBundle struct { + data *charm.BundleData +} + +func (b *testingBundle) Data() *charm.BundleData { + return b.data +} + +func (b *testingBundle) ReadMe() string { + // For the purposes of this implementation, the charm readme is not + // relevant. + return "" +} + +type metaAnyResponseById []*params.MetaAnyResponse + +func (s metaAnyResponseById) Len() int { return len(s) } +func (s metaAnyResponseById) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s metaAnyResponseById) Less(i, j int) bool { + return s[i].Id.String() < s[j].Id.String() +} + +// mustParseResolvedURL parses a resolved URL in string form, with +// the optional promulgated revision preceding the entity URL +// separated by a space. +func mustParseResolvedURL(urlStr string) *router.ResolvedURL { + s := strings.Fields(urlStr) + promRev := -1 + switch len(s) { + default: + panic(fmt.Errorf("invalid resolved URL string %q", urlStr)) + case 2: + var err error + promRev, err = strconv.Atoi(s[0]) + if err != nil || promRev < 0 { + panic(fmt.Errorf("invalid resolved URL string %q", urlStr)) + } + case 1: + } + url := charm.MustParseURL(s[len(s)-1]) + return &router.ResolvedURL{ + URL: *url.WithChannel(""), + PromulgatedRevision: promRev, + Development: url.Channel == charm.DevelopmentChannel, + } +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/search_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/search_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/search_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,727 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" + +import ( + "bytes" + "encoding/json" + "net/http" + "sort" + "strings" + + "github.com/juju/loggo" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon.v1" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/v4" +) + +type SearchSuite struct { + commonSuite +} + +var _ = gc.Suite(&SearchSuite{}) + +var exportTestCharms = map[string]*router.ResolvedURL{ + "wordpress": newResolvedURL("cs:~charmers/precise/wordpress-23", 23), + "mysql": newResolvedURL("cs:~openstack-charmers/trusty/mysql-7", 7), + "varnish": newResolvedURL("cs:~foo/trusty/varnish-1", -1), + "riak": newResolvedURL("cs:~charmers/trusty/riak-67", 67), +} + +var exportTestBundles = map[string]*router.ResolvedURL{ + "wordpress-simple": newResolvedURL("cs:~charmers/bundle/wordpress-simple-4", 4), +} + +func (s *SearchSuite) SetUpSuite(c *gc.C) { + s.enableES = true + s.enableIdentity = true + s.commonSuite.SetUpSuite(c) +} + +func (s *SearchSuite) SetUpTest(c *gc.C) { + s.commonSuite.SetUpTest(c) + s.addCharmsToStore(c) + // hide the riak charm + err := s.store.DB.BaseEntities().UpdateId( + charm.MustParseURL("cs:~charmers/riak"), + bson.D{{"$set", map[string]mongodoc.ACL{ + "acls": { + Read: []string{"charmers", "test-user"}, + }, + }}}, + ) + c.Assert(err, gc.IsNil) + err = s.store.UpdateSearch(newResolvedURL("~charmers/trusty/riak-0", 0)) + c.Assert(err, gc.IsNil) + err = s.esSuite.ES.RefreshIndex(s.esSuite.TestIndex) + c.Assert(err, gc.IsNil) +} + +func (s *SearchSuite) addCharmsToStore(c *gc.C) { + for name, id := range exportTestCharms { + err := s.store.AddCharmWithArchive(id, getCharm(name)) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + err = s.store.UpdateSearch(id) + c.Assert(err, gc.IsNil) + } + for name, id := range exportTestBundles { + err := s.store.AddBundleWithArchive(id, getBundle(name)) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + err = s.store.UpdateSearch(id) + c.Assert(err, gc.IsNil) + } +} + +func getCharm(name string) *charm.CharmDir { + ca := storetesting.Charms.CharmDir(name) + ca.Meta().Categories = append(strings.Split(name, "-"), "bar") + return ca +} + +func getBundle(name string) *charm.BundleDir { + ba := storetesting.Charms.BundleDir(name) + ba.Data().Tags = append(strings.Split(name, "-"), "baz") + return ba +} + +func (s *SearchSuite) TestSuccessfulSearches(c *gc.C) { + tests := []struct { + about string + query string + results []*router.ResolvedURL + }{{ + about: "bare search", + query: "", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "text search", + query: "text=wordpress", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "autocomplete search", + query: "text=word&autocomplete=1", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "blank text search", + query: "text=", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "description filter search", + query: "description=database", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + }, + }, { + about: "name filter search", + query: "name=mysql", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + }, + }, { + about: "owner filter search", + query: "owner=foo", + results: []*router.ResolvedURL{ + exportTestCharms["varnish"], + }, + }, { + about: "provides filter search", + query: "provides=mysql", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + }, + }, { + about: "requires filter search", + query: "requires=mysql", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + }, + }, { + about: "series filter search", + query: "series=trusty", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + }, + }, { + about: "summary filter search", + query: "summary=database", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + }, + }, { + about: "tags filter search", + query: "tags=wordpress", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "type filter search", + query: "type=bundle", + results: []*router.ResolvedURL{ + exportTestBundles["wordpress-simple"], + }, + }, { + about: "multiple type filter search", + query: "type=bundle&type=charm", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "provides multiple interfaces filter search", + query: "provides=monitoring+http", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + }, + }, { + about: "requires multiple interfaces filter search", + query: "requires=mysql+varnish", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + }, + }, { + about: "multiple tags filter search", + query: "tags=mysql+bar", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + }, + }, { + about: "blank owner", + query: "owner=", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "paginated search", + query: "name=mysql&skip=1", + }, { + about: "promulgated", + query: "promulgated=1", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "not promulgated", + query: "promulgated=0", + results: []*router.ResolvedURL{ + exportTestCharms["varnish"], + }, + }, { + about: "promulgated with owner", + query: "promulgated=1&owner=openstack-charmers", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + }, + }} + for i, test := range tests { + c.Logf("test %d. %s", i, test.about) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?" + test.query), + }) + var sr params.SearchResponse + err := json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + c.Assert(sr.Results, gc.HasLen, len(test.results)) + c.Logf("results: %s", rec.Body.Bytes()) + assertResultSet(c, sr, test.results) + } +} + +func (s *SearchSuite) TestPaginatedSearch(c *gc.C) { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?text=wordpress&skip=1"), + }) + var sr params.SearchResponse + err := json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + c.Assert(sr.Results, gc.HasLen, 1) + c.Assert(sr.Total, gc.Equals, 2) +} + +func (s *SearchSuite) TestMetadataFields(c *gc.C) { + tests := []struct { + about string + query string + meta map[string]interface{} + }{{ + about: "archive-size", + query: "name=mysql&include=archive-size", + meta: map[string]interface{}{ + "archive-size": params.ArchiveSizeResponse{438}, + }, + }, { + about: "bundle-metadata", + query: "name=wordpress-simple&type=bundle&include=bundle-metadata", + meta: map[string]interface{}{ + "bundle-metadata": getBundle("wordpress-simple").Data(), + }, + }, { + about: "bundle-machine-count", + query: "name=wordpress-simple&type=bundle&include=bundle-machine-count", + meta: map[string]interface{}{ + "bundle-machine-count": params.BundleCount{2}, + }, + }, { + about: "bundle-unit-count", + query: "name=wordpress-simple&type=bundle&include=bundle-unit-count", + meta: map[string]interface{}{ + "bundle-unit-count": params.BundleCount{2}, + }, + }, { + about: "charm-actions", + query: "name=wordpress&type=charm&include=charm-actions", + meta: map[string]interface{}{ + "charm-actions": getCharm("wordpress").Actions(), + }, + }, { + about: "charm-config", + query: "name=wordpress&type=charm&include=charm-config", + meta: map[string]interface{}{ + "charm-config": getCharm("wordpress").Config(), + }, + }, { + about: "charm-related", + query: "name=wordpress&type=charm&include=charm-related", + meta: map[string]interface{}{ + "charm-related": params.RelatedResponse{ + Provides: map[string][]params.MetaAnyResponse{ + "mysql": { + { + Id: exportTestCharms["mysql"].PreferredURL(), + }, + }, + "varnish": { + { + Id: exportTestCharms["varnish"].PreferredURL(), + }, + }, + }, + }, + }, + }, { + about: "multiple values", + query: "name=wordpress&type=charm&include=charm-related&include=charm-config", + meta: map[string]interface{}{ + "charm-related": params.RelatedResponse{ + Provides: map[string][]params.MetaAnyResponse{ + "mysql": { + { + Id: exportTestCharms["mysql"].PreferredURL(), + }, + }, + "varnish": { + { + Id: exportTestCharms["varnish"].PreferredURL(), + }, + }, + }, + }, + "charm-config": getCharm("wordpress").Config(), + }, + }} + for i, test := range tests { + c.Logf("test %d. %s", i, test.about) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?" + test.query), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + var sr struct { + Results []struct { + Meta json.RawMessage + } + } + err := json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + c.Assert(sr.Results, gc.HasLen, 1) + c.Assert(string(sr.Results[0].Meta), jc.JSONEquals, test.meta) + } +} + +func (s *SearchSuite) TestSearchError(c *gc.C) { + err := s.esSuite.ES.DeleteIndex(s.esSuite.TestIndex) + c.Assert(err, gc.Equals, nil) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?name=wordpress"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusInternalServerError) + var resp params.Error + err = json.Unmarshal(rec.Body.Bytes(), &resp) + c.Assert(err, gc.IsNil) + c.Assert(resp.Code, gc.Equals, params.ErrorCode("")) + c.Assert(resp.Message, gc.Matches, "error performing search: search failed: .*") +} + +func (s *SearchSuite) TestSearchIncludeError(c *gc.C) { + // Perform a search for all charms, including the + // manifest, which will try to retrieve all charm + // blobs. + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?type=charm&include=manifest"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + var resp params.SearchResponse + err := json.Unmarshal(rec.Body.Bytes(), &resp) + // cs:riak will not be found because it is not visible to "everyone". + c.Assert(resp.Results, gc.HasLen, len(exportTestCharms)-1) + + // Now remove one of the blobs. The search should still + // work, but only return a single result. + blobName, _, err := s.store.BlobNameAndHash(newResolvedURL("~charmers/precise/wordpress-23", 23)) + c.Assert(err, gc.IsNil) + err = s.store.BlobStore.Remove(blobName) + c.Assert(err, gc.IsNil) + + // Now search again - we should get one result less + // (and the error will be logged). + + // Register a logger that so that we can check the logging output. + // It will be automatically removed later because IsolatedMgoESSuite + // uses LoggingSuite. + var tw loggo.TestWriter + err = loggo.RegisterWriter("test-log", &tw, loggo.DEBUG) + c.Assert(err, gc.IsNil) + + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?type=charm&include=manifest"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + resp = params.SearchResponse{} + err = json.Unmarshal(rec.Body.Bytes(), &resp) + // cs:riak will not be found because it is not visible to "everyone". + // cs:wordpress will not be found because it has no manifest. + c.Assert(resp.Results, gc.HasLen, len(exportTestCharms)-2) + + c.Assert(tw.Log(), jc.LogMatches, []string{"cannot retrieve metadata for cs:precise/wordpress-23: cannot open archive data for cs:precise/wordpress-23: .*"}) +} + +func (s *SearchSuite) TestSorting(c *gc.C) { + tests := []struct { + about string + query string + results []*router.ResolvedURL + }{{ + about: "name ascending", + query: "sort=name", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "name descending", + query: "sort=-name", + results: []*router.ResolvedURL{ + exportTestBundles["wordpress-simple"], + exportTestCharms["wordpress"], + exportTestCharms["varnish"], + exportTestCharms["mysql"], + }, + }, { + about: "series ascending", + query: "sort=series,name", + results: []*router.ResolvedURL{ + exportTestBundles["wordpress-simple"], + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + }, + }, { + about: "series descending", + query: "sort=-series&sort=name", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "owner ascending", + query: "sort=owner,name", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + exportTestCharms["varnish"], + exportTestCharms["mysql"], + }, + }, { + about: "owner descending", + query: "sort=-owner&sort=name", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }} + for i, test := range tests { + c.Logf("test %d. %s", i, test.about) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?" + test.query), + }) + var sr params.SearchResponse + err := json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + // Not using assertResultSet(c, sr, test.results) as it does sort internally + c.Assert(sr.Results, gc.HasLen, len(test.results), gc.Commentf("expected %#v", test.results)) + c.Logf("results: %s", rec.Body.Bytes()) + for i := range test.results { + c.Assert(sr.Results[i].Id.String(), gc.Equals, test.results[i].PreferredURL().String(), gc.Commentf("element %d")) + } + } +} + +func (s *SearchSuite) TestSortUnsupportedField(c *gc.C) { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?sort=foo"), + }) + var e params.Error + err := json.Unmarshal(rec.Body.Bytes(), &e) + c.Assert(err, gc.IsNil) + c.Assert(e.Code, gc.Equals, params.ErrBadRequest) + c.Assert(e.Message, gc.Equals, "invalid sort field: unrecognized sort parameter \"foo\"") +} + +func (s *SearchSuite) TestDownloadsBoost(c *gc.C) { + // TODO (frankban): remove this call when removing the legacy counts logic. + patchLegacyDownloadCountsEnabled(s.AddCleanup, false) + charmDownloads := map[string]int{ + "mysql": 0, + "wordpress": 1, + "varnish": 8, + } + for n, cnt := range charmDownloads { + url := newResolvedURL("cs:~downloads-test/trusty/x-1", -1) + url.URL.Name = n + err := s.store.AddCharmWithArchive(url, getCharm(n)) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + err = s.store.UpdateSearch(url) + c.Assert(err, gc.IsNil) + for i := 0; i < cnt; i++ { + err := s.store.IncrementDownloadCounts(url) + c.Assert(err, gc.IsNil) + } + } + err := s.esSuite.ES.RefreshIndex(s.esSuite.TestIndex) + c.Assert(err, gc.IsNil) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?owner=downloads-test"), + }) + var sr params.SearchResponse + err = json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + c.Assert(sr.Results, gc.HasLen, 3) + c.Assert(sr.Results[0].Id.Name, gc.Equals, "varnish") + c.Assert(sr.Results[1].Id.Name, gc.Equals, "wordpress") + c.Assert(sr.Results[2].Id.Name, gc.Equals, "mysql") +} + +// TODO(mhilton) remove this test when removing legacy counts logic. +func (s *SearchSuite) TestLegacyStatsUpdatesSearch(c *gc.C) { + patchLegacyDownloadCountsEnabled(s.AddCleanup, true) + doc, err := s.store.ES.GetSearchDocument(charm.MustParseURL("~openstack-charmers/trusty/mysql-7")) + c.Assert(err, gc.IsNil) + c.Assert(doc.TotalDownloads, gc.Equals, int64(0)) + s.assertPut(c, "~openstack-charmers/trusty/mysql-7/meta/extra-info/"+params.LegacyDownloadStats, 57) + doc, err = s.store.ES.GetSearchDocument(charm.MustParseURL("~openstack-charmers/trusty/mysql-7")) + c.Assert(err, gc.IsNil) + c.Assert(doc.TotalDownloads, gc.Equals, int64(57)) +} + +func (s *SearchSuite) assertPut(c *gc.C, url string, val interface{}) { + body, err := json.Marshal(val) + c.Assert(err, gc.IsNil) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url), + Method: "PUT", + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Username: testUsername, + Password: testPassword, + Body: bytes.NewReader(body), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("headers: %v, body: %s", rec.HeaderMap, rec.Body.String())) + c.Assert(rec.Body.String(), gc.HasLen, 0) +} + +func (s *SearchSuite) TestSearchWithAdminCredentials(c *gc.C) { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search"), + Username: testUsername, + Password: testPassword, + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + expected := []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["wordpress"], + exportTestCharms["riak"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + } + var sr params.SearchResponse + err := json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + assertResultSet(c, sr, expected) +} + +func (s *SearchSuite) TestSearchWithUserMacaroon(c *gc.C) { + m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ + checkers.DeclaredCaveat("username", "test-user"), + }) + c.Assert(err, gc.IsNil) + macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) + c.Assert(err, gc.IsNil) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search"), + Cookies: []*http.Cookie{macaroonCookie}, + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + expected := []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["wordpress"], + exportTestCharms["riak"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + } + var sr params.SearchResponse + err = json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + assertResultSet(c, sr, expected) +} + +func (s *SearchSuite) TestSearchWithUserInGroups(c *gc.C) { + m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ + checkers.DeclaredCaveat(v4.UsernameAttr, "bob"), + }) + c.Assert(err, gc.IsNil) + macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) + c.Assert(err, gc.IsNil) + s.idM.groups = map[string][]string{ + "bob": {"test-user", "test-user2"}, + } + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search"), + Cookies: []*http.Cookie{macaroonCookie}, + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + expected := []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["wordpress"], + exportTestCharms["riak"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + } + var sr params.SearchResponse + err = json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + assertResultSet(c, sr, expected) +} + +func (s *SearchSuite) TestSearchWithBadAdminCredentialsAndACookie(c *gc.C) { + m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ + checkers.DeclaredCaveat("username", "test-user"), + }) + c.Assert(err, gc.IsNil) + macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) + c.Assert(err, gc.IsNil) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search"), + Cookies: []*http.Cookie{macaroonCookie}, + Username: testUsername, + Password: "bad-password", + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + expected := []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["wordpress"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + } + var sr params.SearchResponse + err = json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + assertResultSet(c, sr, expected) +} + +func assertResultSet(c *gc.C, sr params.SearchResponse, expected []*router.ResolvedURL) { + sort.Sort(searchResultById(sr.Results)) + sort.Sort(resolvedURLByPreferredURL(expected)) + c.Assert(sr.Results, gc.HasLen, len(expected), gc.Commentf("expected %#v", expected)) + for i := range expected { + c.Assert(sr.Results[i].Id.String(), gc.Equals, expected[i].PreferredURL().String(), gc.Commentf("element %d")) + } +} + +type searchResultById []params.EntityResult + +func (s searchResultById) Len() int { return len(s) } +func (s searchResultById) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s searchResultById) Less(i, j int) bool { + return s[i].Id.String() < s[j].Id.String() +} + +type resolvedURLByPreferredURL []*router.ResolvedURL + +func (s resolvedURLByPreferredURL) Len() int { return len(s) } +func (s resolvedURLByPreferredURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s resolvedURLByPreferredURL) Less(i, j int) bool { + return s[i].PreferredURL().String() < s[j].PreferredURL().String() +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/stats_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/stats_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/stats_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,654 @@ +// Copyright 2012 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" + +import ( + "encoding/json" + "net/http" + "net/url" + "strings" + "time" + + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/v4" +) + +type StatsSuite struct { + commonSuite +} + +var _ = gc.Suite(&StatsSuite{}) + +func (s *StatsSuite) TestServerStatsStatus(c *gc.C) { + tests := []struct { + path string + status int + message string + code params.ErrorCode + }{{ + path: "stats/counter/", + status: http.StatusForbidden, + message: "forbidden", + code: params.ErrForbidden, + }, { + path: "stats/counter/*", + status: http.StatusForbidden, + message: "unknown key", + code: params.ErrForbidden, + }, { + path: "stats/counter/any/", + status: http.StatusNotFound, + message: "invalid key", + code: params.ErrNotFound, + }, { + path: "stats/", + status: http.StatusNotFound, + message: "not found", + code: params.ErrNotFound, + }, { + path: "stats/any", + status: http.StatusNotFound, + message: "not found", + code: params.ErrNotFound, + }, { + path: "stats/counter/any?by=fortnight", + status: http.StatusBadRequest, + message: `invalid 'by' value "fortnight"`, + code: params.ErrBadRequest, + }, { + path: "stats/counter/any?start=tomorrow", + status: http.StatusBadRequest, + message: `invalid 'start' value "tomorrow": parsing time "tomorrow" as "2006-01-02": cannot parse "tomorrow" as "2006"`, + code: params.ErrBadRequest, + }, { + path: "stats/counter/any?stop=3", + status: http.StatusBadRequest, + message: `invalid 'stop' value "3": parsing time "3" as "2006-01-02": cannot parse "3" as "2006"`, + code: params.ErrBadRequest, + }} + for i, test := range tests { + c.Logf("test %d. %s", i, test.path) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.path), + ExpectStatus: test.status, + ExpectBody: params.Error{ + Message: test.message, + Code: test.code, + }, + }) + } +} + +func (s *StatsSuite) TestServerStatsUpdate(c *gc.C) { + ref := charm.MustParseURL("~charmers/precise/wordpress-23") + tests := []struct { + path string + status int + body params.StatsUpdateRequest + expectBody map[string]interface{} + previousMonth bool + }{{ + path: "stats/update", + status: http.StatusOK, + body: params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + Timestamp: time.Now(), + CharmReference: charm.MustParseURL("~charmers/wordpress"), + }}}, + }, { + path: "stats/update", + status: http.StatusOK, + body: params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + Timestamp: time.Now(), + CharmReference: ref, + }}, + }, + }, { + path: "stats/update", + status: http.StatusOK, + body: params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + Timestamp: time.Now().AddDate(0, -1, 0), + CharmReference: ref, + }}, + }, + previousMonth: true, + }} + + ch := storetesting.Charms.CharmDir("wordpress") + rurl := newResolvedURL("~charmers/precise/wordpress-23", 23) + err := s.store.AddCharmWithArchive(rurl, ch) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + + var countsBefore, countsAfter charmstore.AggregatedCounts + for i, test := range tests { + c.Logf("test %d. %s", i, test.path) + + _, countsBefore, err = s.store.ArchiveDownloadCounts(ref, true) + c.Assert(err, gc.IsNil) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(test.path), + Method: "PUT", + Username: testUsername, + Password: testPassword, + JSONBody: test.body, + }) + + c.Assert(rec.Code, gc.Equals, test.status) + + _, countsAfter, err = s.store.ArchiveDownloadCounts(ref, true) + c.Assert(err, gc.IsNil) + c.Assert(countsAfter.Total-countsBefore.Total, gc.Equals, int64(1)) + if test.previousMonth { + c.Assert(countsAfter.LastDay-countsBefore.LastDay, gc.Equals, int64(0)) + } else { + c.Assert(countsAfter.LastDay-countsBefore.LastDay, gc.Equals, int64(1)) + } + } +} + +func (s *StatsSuite) TestServerStatsArchiveDownloadOnPromulgatedEntity(c *gc.C) { + ref := charm.MustParseURL("~charmers/precise/wordpress-23") + path := "/stats/counter/archive-download:*" + + ch := storetesting.Charms.CharmDir("wordpress") + rurl := newResolvedURL("~charmers/precise/wordpress-23", 23) + err := s.store.AddCharmWithArchive(rurl, ch) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + s.store.SetPromulgated(rurl, true) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(path), + Method: "GET", + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.Equals, `[{"Count":0}]`) + + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("stats/update"), + Method: "PUT", + Username: testUsername, + Password: testPassword, + JSONBody: params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + Timestamp: time.Now(), + CharmReference: ref, + }}}, + }) + + c.Assert(rec.Code, gc.Equals, http.StatusOK) + + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(path), + Method: "GET", + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.Equals, `[{"Count":1}]`) +} + +func (s *StatsSuite) TestServerStatsUpdateErrors(c *gc.C) { + ref := charm.MustParseURL("~charmers/precise/wordpress-23") + tests := []struct { + path string + status int + body params.StatsUpdateRequest + expectMessage string + expectCode params.ErrorCode + partialUpdate bool + }{{ + path: "stats/update", + status: http.StatusInternalServerError, + body: params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + Timestamp: time.Now(), + CharmReference: charm.MustParseURL("~charmers/precise/unknown-23"), + }}, + }, + expectMessage: `cannot find entity for url cs:~charmers/precise/unknown-23: no matching charm or bundle for "cs:~charmers/precise/unknown-23"`, + }, { + path: "stats/update", + status: http.StatusInternalServerError, + body: params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + Timestamp: time.Now(), + CharmReference: charm.MustParseURL("~charmers/precise/unknown-23"), + }, { + Timestamp: time.Now(), + CharmReference: charm.MustParseURL("~charmers/precise/wordpress-23"), + }}, + }, + expectMessage: `cannot find entity for url cs:~charmers/precise/unknown-23: no matching charm or bundle for "cs:~charmers/precise/unknown-23"`, + partialUpdate: true, + }} + + ch := storetesting.Charms.CharmDir("wordpress") + rurl := newResolvedURL("~charmers/precise/wordpress-23", 23) + err := s.store.AddCharmWithArchive(rurl, ch) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + + for i, test := range tests { + c.Logf("test %d. %s", i, test.path) + var countsBefore, countsAfter charmstore.AggregatedCounts + if test.partialUpdate { + _, countsBefore, err = s.store.ArchiveDownloadCounts(ref, true) + c.Assert(err, gc.IsNil) + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.path), + Method: "PUT", + Username: testUsername, + Password: testPassword, + JSONBody: test.body, + ExpectStatus: test.status, + ExpectBody: params.Error{ + Message: test.expectMessage, + Code: test.expectCode, + }, + }) + if test.partialUpdate { + _, countsAfter, err = s.store.ArchiveDownloadCounts(ref, true) + c.Assert(err, gc.IsNil) + c.Assert(countsAfter.Total-countsBefore.Total, gc.Equals, int64(1)) + c.Assert(countsAfter.LastDay-countsBefore.LastDay, gc.Equals, int64(1)) + } + } +} + +func (s *StatsSuite) TestServerStatsUpdateNonAdmin(c *gc.C) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("stats/update"), + Method: "PUT", + JSONBody: params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + Timestamp: time.Now(), + CharmReference: charm.MustParseURL("~charmers/precise/wordpress-23"), + }}, + }, + ExpectStatus: http.StatusUnauthorized, + ExpectBody: ¶ms.Error{ + Message: "authentication failed: missing HTTP auth header", + Code: params.ErrUnauthorized, + }, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("stats/update"), + Method: "PUT", + Username: "brad", + Password: "pitt", + JSONBody: params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + Timestamp: time.Now(), + CharmReference: charm.MustParseURL("~charmers/precise/wordpress-23"), + }}, + }, + ExpectStatus: http.StatusUnauthorized, + ExpectBody: ¶ms.Error{ + Message: "invalid user name or password", + Code: params.ErrUnauthorized, + }, + }) +} + +func (s *StatsSuite) TestStatsCounter(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + for _, key := range [][]string{{"a", "b"}, {"a", "b"}, {"a", "c"}, {"a"}} { + err := s.store.IncCounter(key) + c.Assert(err, gc.IsNil) + } + + var all []interface{} + err := s.store.DB.StatCounters().Find(nil).All(&all) + c.Assert(err, gc.IsNil) + data, err := json.Marshal(all) + c.Assert(err, gc.IsNil) + c.Logf("%s", data) + + expected := map[string]int64{ + "a:b": 2, + "a:b:*": 0, + "a:*": 3, + "a": 1, + "a:b:c": 0, + } + + for counter, n := range expected { + c.Logf("test %q", counter) + url := storeURL("stats/counter/" + counter) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: url, + ExpectBody: []params.Statistic{{ + Count: n, + }}, + }) + } +} + +func (s *StatsSuite) TestStatsCounterList(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + incs := [][]string{ + {"a"}, + {"a", "b"}, + {"a", "b", "c"}, + {"a", "b", "c"}, + {"a", "b", "d"}, + {"a", "b", "e"}, + {"a", "f", "g"}, + {"a", "f", "h"}, + {"a", "i"}, + {"j", "k"}, + } + for _, key := range incs { + err := s.store.IncCounter(key) + c.Assert(err, gc.IsNil) + } + + tests := []struct { + key string + result []params.Statistic + }{{ + key: "a", + result: []params.Statistic{{ + Key: "a", + Count: 1, + }}, + }, { + key: "a:*", + result: []params.Statistic{{ + Key: "a:b:*", + Count: 4, + }, { + Key: "a:f:*", + Count: 2, + }, { + Key: "a:b", + Count: 1, + }, { + Key: "a:i", + Count: 1, + }}, + }, { + key: "a:b:*", + result: []params.Statistic{{ + Key: "a:b:c", + Count: 2, + }, { + Key: "a:b:d", + Count: 1, + }, { + Key: "a:b:e", + Count: 1, + }}, + }, { + key: "a:*", + result: []params.Statistic{{ + Key: "a:b:*", + Count: 4, + }, { + Key: "a:f:*", + Count: 2, + }, { + Key: "a:b", + Count: 1, + }, { + Key: "a:i", + Count: 1, + }}, + }} + + for i, test := range tests { + c.Logf("test %d: %s", i, test.key) + url := storeURL("stats/counter/" + test.key + "?list=1") + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: url, + ExpectBody: test.result, + }) + } +} + +func (s *StatsSuite) TestStatsCounterBy(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + incs := []struct { + key []string + day int + }{ + {[]string{"a"}, 1}, + {[]string{"a"}, 1}, + {[]string{"b"}, 1}, + {[]string{"a", "b"}, 1}, + {[]string{"a", "c"}, 1}, + {[]string{"a"}, 3}, + {[]string{"a", "b"}, 3}, + {[]string{"b"}, 9}, + {[]string{"b"}, 9}, + {[]string{"a", "c", "d"}, 9}, + {[]string{"a", "c", "e"}, 9}, + {[]string{"a", "c", "f"}, 9}, + } + + day := func(i int) time.Time { + return time.Date(2012, time.May, i, 0, 0, 0, 0, time.UTC) + } + + for i, inc := range incs { + t := day(inc.day) + // Ensure each entry is unique by adding + // a sufficient increment for each test. + t = t.Add(time.Duration(i) * charmstore.StatsGranularity) + + err := s.store.IncCounterAtTime(inc.key, t) + c.Assert(err, gc.IsNil) + } + + tests := []struct { + request charmstore.CounterRequest + result []params.Statistic + }{{ + request: charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: false, + List: false, + By: charmstore.ByDay, + }, + result: []params.Statistic{{ + Date: "2012-05-01", + Count: 2, + }, { + Date: "2012-05-03", + Count: 1, + }}, + }, { + request: charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: false, + By: charmstore.ByDay, + }, + result: []params.Statistic{{ + Date: "2012-05-01", + Count: 2, + }, { + Date: "2012-05-03", + Count: 1, + }, { + Date: "2012-05-09", + Count: 3, + }}, + }, { + request: charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: false, + By: charmstore.ByDay, + Start: time.Date(2012, 5, 2, 0, 0, 0, 0, time.UTC), + }, + result: []params.Statistic{{ + Date: "2012-05-03", + Count: 1, + }, { + Date: "2012-05-09", + Count: 3, + }}, + }, { + request: charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: false, + By: charmstore.ByDay, + Stop: time.Date(2012, 5, 4, 0, 0, 0, 0, time.UTC), + }, + result: []params.Statistic{{ + Date: "2012-05-01", + Count: 2, + }, { + Date: "2012-05-03", + Count: 1, + }}, + }, { + request: charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: false, + By: charmstore.ByDay, + Start: time.Date(2012, 5, 3, 0, 0, 0, 0, time.UTC), + Stop: time.Date(2012, 5, 3, 0, 0, 0, 0, time.UTC), + }, + result: []params.Statistic{{ + Date: "2012-05-03", + Count: 1, + }}, + }, { + request: charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: true, + By: charmstore.ByDay, + }, + result: []params.Statistic{{ + Key: "a:b", + Date: "2012-05-01", + Count: 1, + }, { + Key: "a:c", + Date: "2012-05-01", + Count: 1, + }, { + Key: "a:b", + Date: "2012-05-03", + Count: 1, + }, { + Key: "a:c:*", + Date: "2012-05-09", + Count: 3, + }}, + }, { + request: charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: false, + By: charmstore.ByWeek, + }, + result: []params.Statistic{{ + Date: "2012-05-06", + Count: 3, + }, { + Date: "2012-05-13", + Count: 3, + }}, + }, { + request: charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: true, + By: charmstore.ByWeek, + }, + result: []params.Statistic{{ + Key: "a:b", + Date: "2012-05-06", + Count: 2, + }, { + Key: "a:c", + Date: "2012-05-06", + Count: 1, + }, { + Key: "a:c:*", + Date: "2012-05-13", + Count: 3, + }}, + }} + + for i, test := range tests { + flags := make(url.Values) + url := storeURL("stats/counter/" + strings.Join(test.request.Key, ":")) + if test.request.Prefix { + url += ":*" + } + if test.request.List { + flags.Set("list", "1") + } + if !test.request.Start.IsZero() { + flags.Set("start", test.request.Start.Format("2006-01-02")) + } + if !test.request.Stop.IsZero() { + flags.Set("stop", test.request.Stop.Format("2006-01-02")) + } + switch test.request.By { + case charmstore.ByDay: + flags.Set("by", "day") + case charmstore.ByWeek: + flags.Set("by", "week") + } + if len(flags) > 0 { + url += "?" + flags.Encode() + } + c.Logf("test %d: %s", i, url) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: url, + ExpectBody: test.result, + }) + } +} + +func (s *StatsSuite) TestStatsEnabled(c *gc.C) { + statsEnabled := func(url string) bool { + req, _ := http.NewRequest("GET", url, nil) + return v4.StatsEnabled(req) + } + c.Assert(statsEnabled("http://foo.com"), gc.Equals, true) + c.Assert(statsEnabled("http://foo.com?stats=1"), gc.Equals, true) + c.Assert(statsEnabled("http://foo.com?stats=0"), gc.Equals, false) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/status_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/status_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v4/status_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,281 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v4_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v4" + +import ( + "encoding/json" + "net/http" + "time" + + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + "github.com/juju/utils/debugstatus" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +var zeroTimeStr = time.Time{}.Format(time.RFC3339) + +func (s *APISuite) TestStatus(c *gc.C) { + for _, id := range []*router.ResolvedURL{ + newResolvedURL("cs:~charmers/precise/wordpress-2", 2), + newResolvedURL("cs:~charmers/precise/wordpress-3", 3), + newResolvedURL("cs:~foo/precise/arble-9", -1), + newResolvedURL("cs:~bar/utopic/arble-10", -1), + newResolvedURL("cs:~charmers/bundle/oflaughs-3", 3), + newResolvedURL("cs:~bar/bundle/oflaughs-4", -1), + } { + if id.URL.Series == "bundle" { + s.addPublicBundle(c, "wordpress-simple", id) + } else { + s.addPublicCharm(c, "wordpress", id) + } + } + now := time.Now() + s.PatchValue(&debugstatus.StartTime, now) + start := now.Add(-2 * time.Hour) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"ingestion started"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.IngestionType, + Time: start, + }) + end := now.Add(-1 * time.Hour) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"ingestion completed"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.IngestionType, + Time: end, + }) + statisticsStart := now.Add(-1*time.Hour - 30*time.Minute) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"legacy statistics import started"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.LegacyStatisticsType, + Time: statisticsStart, + }) + statisticsEnd := now.Add(-30 * time.Minute) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"legacy statistics import completed"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.LegacyStatisticsType, + Time: statisticsEnd, + }) + s.AssertDebugStatus(c, true, map[string]params.DebugStatus{ + "mongo_connected": { + Name: "MongoDB is connected", + Value: "Connected", + Passed: true, + }, + "mongo_collections": { + Name: "MongoDB collections", + Value: "All required collections exist", + Passed: true, + }, + "elasticsearch": { + Name: "Elastic search is running", + Value: "Elastic search is not configured", + Passed: true, + }, + "entities": { + Name: "Entities in charm store", + Value: "4 charms; 2 bundles; 3 promulgated", + Passed: true, + }, + "base_entities": { + Name: "Base entities in charm store", + Value: "count: 5", + Passed: true, + }, + "server_started": { + Name: "Server started", + Value: now.String(), + Passed: true, + }, + "ingestion": { + Name: "Ingestion", + Value: "started: " + start.Format(time.RFC3339) + ", completed: " + end.Format(time.RFC3339), + Passed: true, + }, + "legacy_statistics": { + Name: "Legacy Statistics Load", + Value: "started: " + statisticsStart.Format(time.RFC3339) + ", completed: " + statisticsEnd.Format(time.RFC3339), + Passed: true, + }, + }) +} + +func (s *APISuite) TestStatusWithoutCorrectCollections(c *gc.C) { + s.store.DB.Entities().DropCollection() + s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ + "mongo_collections": { + Name: "MongoDB collections", + Value: "Missing collections: [" + s.store.DB.Entities().Name + "]", + Passed: false, + }, + }) +} + +func (s *APISuite) TestStatusWithoutIngestion(c *gc.C) { + s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ + "ingestion": { + Name: "Ingestion", + Value: "started: " + zeroTimeStr + ", completed: " + zeroTimeStr, + Passed: false, + }, + }) +} + +func (s *APISuite) TestStatusIngestionStarted(c *gc.C) { + now := time.Now() + start := now.Add(-1 * time.Hour) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"ingestion started"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.IngestionType, + Time: start, + }) + s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ + "ingestion": { + Name: "Ingestion", + Value: "started: " + start.Format(time.RFC3339) + ", completed: " + zeroTimeStr, + Passed: false, + }, + }) +} + +func (s *APISuite) TestStatusWithoutLegacyStatistics(c *gc.C) { + s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ + "legacy_statistics": { + Name: "Legacy Statistics Load", + Value: "started: " + zeroTimeStr + ", completed: " + zeroTimeStr, + Passed: false, + }, + }) +} + +func (s *APISuite) TestStatusLegacyStatisticsStarted(c *gc.C) { + now := time.Now() + statisticsStart := now.Add(-1*time.Hour - 30*time.Minute) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"legacy statistics import started"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.LegacyStatisticsType, + Time: statisticsStart, + }) + s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ + "legacy_statistics": { + Name: "Legacy Statistics Load", + Value: "started: " + statisticsStart.Format(time.RFC3339) + ", completed: " + zeroTimeStr, + Passed: false, + }, + }) +} + +func (s *APISuite) TestStatusLegacyStatisticsMultipleLogs(c *gc.C) { + now := time.Now() + statisticsStart := now.Add(-1*time.Hour - 30*time.Minute) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"legacy statistics import started"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.LegacyStatisticsType, + Time: statisticsStart.Add(-1 * time.Hour), + }) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"legacy statistics import started"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.LegacyStatisticsType, + Time: statisticsStart, + }) + statisticsEnd := now.Add(-30 * time.Minute) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"legacy statistics import completed"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.LegacyStatisticsType, + Time: statisticsEnd.Add(-1 * time.Hour), + }) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"legacy statistics import completed"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.LegacyStatisticsType, + Time: statisticsEnd, + }) + s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ + "legacy_statistics": { + Name: "Legacy Statistics Load", + Value: "started: " + statisticsStart.Format(time.RFC3339) + ", completed: " + statisticsEnd.Format(time.RFC3339), + Passed: true, + }, + }) +} + +func (s *APISuite) TestStatusBaseEntitiesError(c *gc.C) { + // Add a base entity without any corresponding entities. + entity := &mongodoc.BaseEntity{ + URL: charm.MustParseURL("django"), + Name: "django", + } + err := s.store.DB.BaseEntities().Insert(entity) + c.Assert(err, gc.IsNil) + + s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ + "base_entities": { + Name: "Base entities in charm store", + Value: "count: 1", + Passed: false, + }, + }) +} + +// AssertDebugStatus asserts that the current /debug/status endpoint +// matches the given status, ignoring status duration. +// If complete is true, it fails if the results contain +// keys not mentioned in status. +func (s *APISuite) AssertDebugStatus(c *gc.C, complete bool, status map[string]params.DebugStatus) { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("debug/status"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.Bytes())) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "application/json") + var gotStatus map[string]params.DebugStatus + err := json.Unmarshal(rec.Body.Bytes(), &gotStatus) + c.Assert(err, gc.IsNil) + for key, r := range gotStatus { + if _, found := status[key]; !complete && !found { + delete(gotStatus, key) + continue + } + r.Duration = 0 + gotStatus[key] = r + } + c.Assert(gotStatus, jc.DeepEquals, status) +} + +type statusWithElasticSearchSuite struct { + commonSuite +} + +var _ = gc.Suite(&statusWithElasticSearchSuite{}) + +func (s *statusWithElasticSearchSuite) SetUpSuite(c *gc.C) { + s.enableES = true + s.commonSuite.SetUpSuite(c) +} + +func (s *statusWithElasticSearchSuite) TestStatusWithElasticSearch(c *gc.C) { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("debug/status"), + }) + var results map[string]params.DebugStatus + err := json.Unmarshal(rec.Body.Bytes(), &results) + c.Assert(err, gc.IsNil) + c.Assert(results["elasticsearch"].Name, gc.Equals, "Elastic search is running") + c.Assert(results["elasticsearch"].Value, jc.Contains, "cluster_name:") +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/api.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/api.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/api.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1382 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "archive/zip" + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/juju/httprequest" + "github.com/juju/loggo" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/audit" + "gopkg.in/juju/charmstore.v5-unstable/internal/agent" + "gopkg.in/juju/charmstore.v5-unstable/internal/cache" + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/identity" + "gopkg.in/juju/charmstore.v5-unstable/internal/mempool" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +var logger = loggo.GetLogger("charmstore.internal.v5") + +// reqHandlerPool holds a cache of ReqHandlers to save +// on allocation time. When a handler is done with, +// it is put back into the pool. +var reqHandlerPool = mempool.Pool{ + New: func() interface{} { + return newReqHandler() + }, +} + +type Handler struct { + // Pool holds the store pool that the handler was created + // with. + Pool *charmstore.Pool + + config charmstore.ServerParams + locator bakery.PublicKeyLocator + identityClient *identity.Client + + // searchCache is a cache of search results keyed on the query + // parameters of the search. It should only be used for searches + // from unauthenticated users. + searchCache *cache.Cache +} + +// ReqHandler holds the context for a single HTTP request. +// It uses an independent mgo session from the handler +// used by other requests. +type ReqHandler struct { + // Router holds the router that the ReqHandler will use + // to route HTTP requests. This is usually set by + // Handler.NewReqHandler to the result of RouterHandlers. + Router *router.Router + + // Handler holds the Handler that the ReqHandler + // is derived from. + Handler *Handler + + // Store holds the charmstore Store instance + // for the request. + Store *charmstore.Store + + // auth holds the results of any authorization that + // has been done on this request. + auth authorization +} + +const ( + DelegatableMacaroonExpiry = time.Minute + reqHandlerCacheSize = 50 +) + +func New(pool *charmstore.Pool, config charmstore.ServerParams) *Handler { + h := &Handler{ + Pool: pool, + config: config, + searchCache: cache.New(config.SearchCacheMaxAge), + locator: config.PublicKeyLocator, + identityClient: identity.NewClient(&identity.Params{ + URL: config.IdentityAPIURL, + Client: agent.NewClient(config.AgentUsername, config.AgentKey), + }), + } + return h +} + +// Close closes the Handler. +func (h *Handler) Close() { +} + +// NewReqHandler returns an instance of a *ReqHandler +// suitable for handling an HTTP request. After use, the ReqHandler.Close +// method should be called to close it. +// +// If no handlers are available, it returns an error with +// a charmstore.ErrTooManySessions cause. +func (h *Handler) NewReqHandler() (*ReqHandler, error) { + store, err := h.Pool.RequestStore() + if err != nil { + if errgo.Cause(err) == charmstore.ErrTooManySessions { + return nil, errgo.WithCausef(err, params.ErrServiceUnavailable, "") + } + return nil, errgo.Mask(err) + } + rh := reqHandlerPool.Get().(*ReqHandler) + rh.Handler = h + rh.Store = store + return rh, nil +} + +// RouterHandlers returns router handlers that will route requests to +// the given ReqHandler. This is provided so that different API versions +// can override selected parts of the handlers to serve their own API +// while still using ReqHandler to serve the majority of the API. +func RouterHandlers(h *ReqHandler) *router.Handlers { + resolveId := h.ResolvedIdHandler + authId := h.AuthIdHandler + return &router.Handlers{ + Global: map[string]http.Handler{ + "changes/published": router.HandleJSON(h.serveChangesPublished), + "debug": http.HandlerFunc(h.serveDebug), + "debug/pprof/": newPprofHandler(h), + "debug/status": router.HandleJSON(h.serveDebugStatus), + "list": router.HandleJSON(h.serveList), + "log": router.HandleErrors(h.serveLog), + "search": router.HandleJSON(h.serveSearch), + "search/interesting": http.HandlerFunc(h.serveSearchInteresting), + "set-auth-cookie": router.HandleErrors(h.serveSetAuthCookie), + "stats/": router.NotFoundHandler(), + "stats/counter/": router.HandleJSON(h.serveStatsCounter), + "stats/update": router.HandleErrors(h.serveStatsUpdate), + "macaroon": router.HandleJSON(h.serveMacaroon), + "delegatable-macaroon": router.HandleJSON(h.serveDelegatableMacaroon), + "whoami": router.HandleJSON(h.serveWhoAmI), + }, + Id: map[string]router.IdHandler{ + "archive": h.serveArchive, + "archive/": resolveId(authId(h.serveArchiveFile)), + "diagram.svg": resolveId(authId(h.serveDiagram)), + "expand-id": resolveId(authId(h.serveExpandId)), + "icon.svg": resolveId(authId(h.serveIcon)), + "promulgate": resolveId(h.serveAdminPromulgate), + "publish": h.servePublish, + "readme": resolveId(authId(h.serveReadMe)), + "resources": resolveId(authId(h.serveResources)), + }, + Meta: map[string]router.BulkIncludeHandler{ + "archive-size": h.EntityHandler(h.metaArchiveSize, "size"), + "archive-upload-time": h.EntityHandler(h.metaArchiveUploadTime, "uploadtime"), + "bundle-machine-count": h.EntityHandler(h.metaBundleMachineCount, "bundlemachinecount"), + "bundle-metadata": h.EntityHandler(h.metaBundleMetadata, "bundledata"), + "bundles-containing": h.EntityHandler(h.metaBundlesContaining), + "bundle-unit-count": h.EntityHandler(h.metaBundleUnitCount, "bundleunitcount"), + "charm-actions": h.EntityHandler(h.metaCharmActions, "charmactions"), + "charm-config": h.EntityHandler(h.metaCharmConfig, "charmconfig"), + "charm-metadata": h.EntityHandler(h.metaCharmMetadata, "charmmeta"), + "charm-related": h.EntityHandler(h.metaCharmRelated, "charmprovidedinterfaces", "charmrequiredinterfaces"), + "extra-info": h.puttableEntityHandler( + h.metaExtraInfo, + h.putMetaExtraInfo, + "extrainfo", + ), + "extra-info/": h.puttableEntityHandler( + h.metaExtraInfoWithKey, + h.putMetaExtraInfoWithKey, + "extrainfo", + ), + "common-info": h.puttableBaseEntityHandler( + h.metaCommonInfo, + h.putMetaCommonInfo, + "commoninfo", + ), + "common-info/": h.puttableBaseEntityHandler( + h.metaCommonInfoWithKey, + h.putMetaCommonInfoWithKey, + "commoninfo", + ), + "hash": h.EntityHandler(h.metaHash, "blobhash"), + "hash256": h.EntityHandler(h.metaHash256, "blobhash256"), + "id": h.EntityHandler(h.metaId, "_id"), + "id-name": h.EntityHandler(h.metaIdName, "_id"), + "id-user": h.EntityHandler(h.metaIdUser, "_id"), + "id-revision": h.EntityHandler(h.metaIdRevision, "_id"), + "id-series": h.EntityHandler(h.metaIdSeries, "_id"), + "manifest": h.EntityHandler(h.metaManifest, "blobname"), + "perm": h.puttableBaseEntityHandler(h.metaPerm, h.putMetaPerm, "acls", "developmentacls"), + "perm/": h.puttableBaseEntityHandler(h.metaPermWithKey, h.putMetaPermWithKey, "acls", "developmentacls"), + "promulgated": h.baseEntityHandler(h.metaPromulgated, "promulgated"), + "revision-info": router.SingleIncludeHandler(h.metaRevisionInfo), + "stats": h.EntityHandler(h.metaStats), + "supported-series": h.EntityHandler(h.metaSupportedSeries, "supportedseries"), + "tags": h.EntityHandler(h.metaTags, "charmmeta", "bundledata"), + + // endpoints not yet implemented: + // "color": router.SingleIncludeHandler(h.metaColor), + }, + } +} + +// newReqHandler returns a new instance of the v4 API handler. +// The returned value has nil handler and store fields. +func newReqHandler() *ReqHandler { + var h ReqHandler + h.Router = router.New(RouterHandlers(&h), &h) + return &h +} + +// ServeHTTP implements http.Handler by first retrieving a +// request-specific instance of ReqHandler and +// calling ServeHTTP on that. +func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // When requests in this handler use router.RelativeURL, we want + // the "absolute path" there to be interpreted relative to the + // root of this handler, not the absolute root of the web server, + // which may be abitrarily many levels up. + req.RequestURI = req.URL.Path + + rh, err := h.NewReqHandler() + if err != nil { + router.WriteError(w, err) + return + } + defer rh.Close() + rh.ServeHTTP(w, req) +} + +// ServeHTTP implements http.Handler by calling h.Router.ServeHTTP. +func (h *ReqHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + h.Router.ServeHTTP(w, req) +} + +// NewAPIHandler returns a new Handler as an http Handler. +// It is defined for the convenience of callers that require a +// charmstore.NewAPIHandlerFunc. +func NewAPIHandler(pool *charmstore.Pool, config charmstore.ServerParams) charmstore.HTTPCloseHandler { + return New(pool, config) +} + +// Close closes the ReqHandler. This should always be called when the +// ReqHandler is done with. +func (h *ReqHandler) Close() { + h.Store.Close() + h.Reset() + reqHandlerPool.Put(h) +} + +// Reset resets the request-specific fields of the ReqHandler +// so that it's suitable for putting back into a pool for reuse. +func (h *ReqHandler) Reset() { + h.Store = nil + h.Handler = nil + h.auth = authorization{} +} + +// ResolveURL implements router.Context.ResolveURL. +func (h ReqHandler) ResolveURL(url *charm.URL) (*router.ResolvedURL, error) { + return resolveURL(h.Store, url) +} + +// resolveURL implements URL resolving for the ReqHandler. +// It's defined as a separate function so it can be more +// easily unit-tested. +func resolveURL(store *charmstore.Store, url *charm.URL) (*router.ResolvedURL, error) { + entity, err := store.FindBestEntity(url, "_id", "promulgated-revision") + if err != nil && errgo.Cause(err) != params.ErrNotFound { + return nil, errgo.Mask(err) + } + if errgo.Cause(err) == params.ErrNotFound { + return nil, noMatchingURLError(url) + } + rurl := &router.ResolvedURL{ + URL: *entity.URL, + PromulgatedRevision: -1, + Development: url.Channel == charm.DevelopmentChannel, + } + if url.User == "" { + rurl.PromulgatedRevision = entity.PromulgatedRevision + } + return rurl, nil +} + +func noMatchingURLError(url *charm.URL) error { + return errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %q", url) +} + +type EntityHandlerFunc func(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) + +type baseEntityHandlerFunc func(entity *mongodoc.BaseEntity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) + +// EntityHandler returns a Handler that calls f with a *mongodoc.Entity that +// contains at least the given fields. It allows only GET requests. +func (h *ReqHandler) EntityHandler(f EntityHandlerFunc, fields ...string) router.BulkIncludeHandler { + return h.puttableEntityHandler(f, nil, fields...) +} + +func (h *ReqHandler) puttableEntityHandler(get EntityHandlerFunc, handlePut router.FieldPutFunc, fields ...string) router.BulkIncludeHandler { + handleGet := func(doc interface{}, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + edoc := doc.(*mongodoc.Entity) + val, err := get(edoc, id, path, flags, req) + return val, errgo.Mask(err, errgo.Any) + } + type entityHandlerKey struct{} + return router.FieldIncludeHandler(router.FieldIncludeHandlerParams{ + Key: entityHandlerKey{}, + Query: h.entityQuery, + Fields: fields, + HandleGet: handleGet, + HandlePut: handlePut, + Update: h.updateEntity, + UpdateSearch: h.updateSearch, + }) +} + +// baseEntityHandler returns a Handler that calls f with a *mongodoc.Entity that +// contains at least the given fields. It allows only GET requests. +func (h *ReqHandler) baseEntityHandler(f baseEntityHandlerFunc, fields ...string) router.BulkIncludeHandler { + return h.puttableBaseEntityHandler(f, nil, fields...) +} + +func (h *ReqHandler) puttableBaseEntityHandler(get baseEntityHandlerFunc, handlePut router.FieldPutFunc, fields ...string) router.BulkIncludeHandler { + handleGet := func(doc interface{}, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + edoc := doc.(*mongodoc.BaseEntity) + val, err := get(edoc, id, path, flags, req) + return val, errgo.Mask(err, errgo.Any) + } + type baseEntityHandlerKey struct{} + return router.FieldIncludeHandler(router.FieldIncludeHandlerParams{ + Key: baseEntityHandlerKey{}, + Query: h.baseEntityQuery, + Fields: fields, + HandleGet: handleGet, + HandlePut: handlePut, + Update: h.updateBaseEntity, + UpdateSearch: h.updateSearchBase, + }) +} + +func (h *ReqHandler) processEntries(entries []audit.Entry) { + for _, e := range entries { + h.addAudit(e) + } +} + +func (h *ReqHandler) updateBaseEntity(id *router.ResolvedURL, fields map[string]interface{}, entries []audit.Entry) error { + if err := h.Store.UpdateBaseEntity(id, entityUpdateOp(fields)); err != nil { + return errgo.Notef(err, "cannot update base entity %q", id) + } + h.processEntries(entries) + return nil +} + +func (h *ReqHandler) updateEntity(id *router.ResolvedURL, fields map[string]interface{}, entries []audit.Entry) error { + err := h.Store.UpdateEntity(id, entityUpdateOp(fields)) + if err != nil { + return errgo.Notef(err, "cannot update %q", &id.URL) + } + err = h.Store.UpdateSearchFields(id, fields) + if err != nil { + return errgo.Notef(err, "cannot update %q", &id.URL) + } + h.processEntries(entries) + return nil +} + +// entityUpdateOp returns a mongo update operation that +// sets the given fields. Any nil fields will be unset. +func entityUpdateOp(fields map[string]interface{}) bson.D { + setFields := make(bson.D, 0, len(fields)) + var unsetFields bson.D + for name, val := range fields { + if val != nil { + setFields = append(setFields, bson.DocElem{name, val}) + } else { + unsetFields = append(unsetFields, bson.DocElem{name, val}) + } + } + op := make(bson.D, 0, 2) + if len(setFields) > 0 { + op = append(op, bson.DocElem{"$set", setFields}) + } + if len(unsetFields) > 0 { + op = append(op, bson.DocElem{"$unset", unsetFields}) + } + return op +} + +func (h *ReqHandler) updateSearch(id *router.ResolvedURL, fields map[string]interface{}) error { + return h.Store.UpdateSearch(id) +} + +// updateSearchBase updates the search records for all entities with +// the same base URL as the given id. +func (h *ReqHandler) updateSearchBase(id *router.ResolvedURL, fields map[string]interface{}) error { + baseURL := id.URL + baseURL.Series = "" + baseURL.Revision = -1 + if err := h.Store.UpdateSearchBaseURL(&baseURL); err != nil { + return errgo.Mask(err) + } + return nil +} + +func (h *ReqHandler) baseEntityQuery(id *router.ResolvedURL, selector map[string]int, req *http.Request) (interface{}, error) { + fields := make([]string, 0, len(selector)) + for k, v := range selector { + if v == 0 { + continue + } + fields = append(fields, k) + } + val, err := h.Store.FindBaseEntity(&id.URL, fields...) + if errgo.Cause(err) == params.ErrNotFound { + return nil, errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %s", id) + } + if err != nil { + return nil, errgo.Mask(err) + } + return val, nil +} + +func (h *ReqHandler) entityQuery(id *router.ResolvedURL, selector map[string]int, req *http.Request) (interface{}, error) { + val, err := h.Store.FindEntity(id, fieldsFromSelector(selector)...) + if errgo.Cause(err) == params.ErrNotFound { + return nil, errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %s", id) + } + if err != nil { + return nil, errgo.Mask(err) + } + return val, nil +} + +func fieldsFromSelector(selector map[string]int) []string { + fields := make([]string, 0, len(selector)) + for k, v := range selector { + if v == 0 { + continue + } + fields = append(fields, k) + } + return fields +} + +var errNotImplemented = errgo.Newf("method not implemented") + +// GET /debug +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-debug +func (h *ReqHandler) serveDebug(w http.ResponseWriter, req *http.Request) { + router.WriteError(w, errNotImplemented) +} + +// POST id/resources/name.stream +// https://github.com/juju/charmstore/blob/v4/docs/API.md#post-idresourcesnamestream +// +// GET id/resources/name.stream[-revision]/arch/filename +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idresourcesnamestream-revisionarchfilename +// +// PUT id/resources/[~user/]series/name.stream-revision/arch?sha256=hash +// https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idresourcesuserseriesnamestream-revisionarchsha256hash +func (h *ReqHandler) serveResources(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { + return errNotImplemented +} + +// GET id/expand-id +// https://docs.google.com/a/canonical.com/document/d/1TgRA7jW_mmXoKH3JiwBbtPvQu7WiM6XMrz1wSrhTMXw/edit#bookmark=id.4xdnvxphb2si +func (h *ReqHandler) serveExpandId(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { + baseURL := id.PreferredURL() + baseURL.Revision = -1 + baseURL.Series = "" + + // baseURL now represents the base URL of the given id; + // it will be a promulgated URL iff the original URL was + // specified without a user, which will cause EntitiesQuery + // to return entities that match appropriately. + + // Retrieve all the entities with the same base URL. + // Note that we don't do any permission checking of the returned URLs. + // This is because we know that the user is allowed to read at + // least the resolved URL passed into serveExpandId. + // If this does not specify "development", then no development + // revisions will be chosen, so the single ACL already checked + // is sufficient. If it *does* specify "development", then we assume + // that the development ACLs are more restrictive than the + // non-development ACLs, and given that, we can allow all + // the URLs. + q := h.Store.EntitiesQuery(baseURL).Select(bson.D{{"_id", 1}, {"promulgated-url", 1}, {"development", 1}}) + if id.PromulgatedRevision != -1 { + q = q.Sort("-series", "-promulgated-revision") + } else { + q = q.Sort("-series", "-revision") + } + var docs []*mongodoc.Entity + err := q.All(&docs) + if err != nil && errgo.Cause(err) != mgo.ErrNotFound { + return errgo.Mask(err) + } + + // Collect all the expanded identifiers for each entity. + response := make([]params.ExpandedId, 0, len(docs)) + for _, doc := range docs { + url := doc.PreferredURL(id.PromulgatedRevision != -1) + response = append(response, params.ExpandedId{Id: url.String()}) + } + + // Write the response in JSON format. + return httprequest.WriteJSON(w, http.StatusOK, response) +} + +func badRequestf(underlying error, f string, a ...interface{}) error { + err := errgo.WithCausef(underlying, params.ErrBadRequest, f, a...) + err.(*errgo.Err).SetLocation(1) + return err +} + +// GET id/meta/charm-metadata +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-metadata +func (h *ReqHandler) metaCharmMetadata(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return entity.CharmMeta, nil +} + +// GET id/meta/bundle-metadata +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundle-metadata +func (h *ReqHandler) metaBundleMetadata(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return entity.BundleData, nil +} + +// GET id/meta/bundle-unit-count +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundle-unit-count +func (h *ReqHandler) metaBundleUnitCount(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return bundleCount(entity.BundleUnitCount), nil +} + +// GET id/meta/bundle-machine-count +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundle-machine-count +func (h *ReqHandler) metaBundleMachineCount(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return bundleCount(entity.BundleMachineCount), nil +} + +func bundleCount(x *int) interface{} { + if x == nil { + return nil + } + return params.BundleCount{ + Count: *x, + } +} + +// GET id/meta/manifest +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetamanifest +func (h *ReqHandler) metaManifest(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + r, size, err := h.Store.BlobStore.Open(entity.BlobName) + if err != nil { + return nil, errgo.Notef(err, "cannot open archive data for %s", id) + } + defer r.Close() + zipReader, err := zip.NewReader(charmstore.ReaderAtSeeker(r), size) + if err != nil { + return nil, errgo.Notef(err, "cannot read archive data for %s", id) + } + // Collect the files. + manifest := make([]params.ManifestFile, 0, len(zipReader.File)) + for _, file := range zipReader.File { + fileInfo := file.FileInfo() + if fileInfo.IsDir() { + continue + } + manifest = append(manifest, params.ManifestFile{ + Name: file.Name, + Size: fileInfo.Size(), + }) + } + return manifest, nil +} + +// GET id/meta/charm-actions +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-actions +func (h *ReqHandler) metaCharmActions(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return entity.CharmActions, nil +} + +// GET id/meta/charm-config +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-config +func (h *ReqHandler) metaCharmConfig(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return entity.CharmConfig, nil +} + +// GET id/meta/color +func (h *ReqHandler) metaColor(id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return nil, errNotImplemented +} + +// GET id/meta/archive-size +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaarchive-size +func (h *ReqHandler) metaArchiveSize(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return ¶ms.ArchiveSizeResponse{ + Size: entity.Size, + }, nil +} + +// GET id/meta/hash +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetahash +func (h *ReqHandler) metaHash(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return ¶ms.HashResponse{ + Sum: entity.BlobHash, + }, nil +} + +// GET id/meta/hash256 +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetahash256 +func (h *ReqHandler) metaHash256(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + // TODO frankban: remove this lazy calculation after the cshash256 + // command is run in the production db. At that point, entities + // always have their blobhash256 field populated, and there is no + // need for this lazy evaluation anymore. + if entity.BlobHash256 == "" { + var err error + if entity.BlobHash256, err = h.Store.UpdateEntitySHA256(id); err != nil { + return nil, errgo.Notef(err, "cannot retrieve the SHA256 hash for entity %s", entity.URL) + } + } + return ¶ms.HashResponse{ + Sum: entity.BlobHash256, + }, nil +} + +// GET id/meta/tags +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetatags +func (h *ReqHandler) metaTags(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + var tags []string + switch { + case id.URL.Series == "bundle": + tags = entity.BundleData.Tags + case len(entity.CharmMeta.Tags) > 0: + // TODO only return whitelisted tags. + tags = entity.CharmMeta.Tags + default: + tags = entity.CharmMeta.Categories + } + return params.TagsResponse{ + Tags: tags, + }, nil +} + +// GET id/meta/stats/ +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetastats +func (h *ReqHandler) metaStats(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + + // Retrieve the aggregated downloads count for the specific revision. + refresh, err := router.ParseBool(flags.Get("refresh")) + if err != nil { + return charmstore.SearchParams{}, badRequestf(err, "invalid refresh parameter") + } + counts, countsAllRevisions, err := h.Store.ArchiveDownloadCounts(id.PreferredURL(), refresh) + if err != nil { + return nil, errgo.Mask(err) + } + // Return the response. + return ¶ms.StatsResponse{ + ArchiveDownloadCount: counts.Total, + ArchiveDownload: params.StatsCount{ + Total: counts.Total, + Day: counts.LastDay, + Week: counts.LastWeek, + Month: counts.LastMonth, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: countsAllRevisions.Total, + Day: countsAllRevisions.LastDay, + Week: countsAllRevisions.LastWeek, + Month: countsAllRevisions.LastMonth, + }, + }, nil +} + +// GET id/meta/revision-info +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetarevision-info +func (h *ReqHandler) metaRevisionInfo(id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + searchURL := id.PreferredURL() + searchURL.Revision = -1 + + q := h.Store.EntitiesQuery(searchURL) + if id.PromulgatedRevision != -1 { + q = q.Sort("-promulgated-revision") + } else { + q = q.Sort("-revision") + } + var docs []*mongodoc.Entity + if err := q.Select(bson.D{{"_id", 1}, {"promulgated-url", 1}}).All(&docs); err != nil { + return "", errgo.Notef(err, "cannot get ids") + } + + if len(docs) == 0 { + return "", errgo.WithCausef(nil, params.ErrNotFound, "no matching charm or bundle for %s", id) + } + var response params.RevisionInfoResponse + for _, doc := range docs { + if id.PromulgatedRevision != -1 { + response.Revisions = append(response.Revisions, doc.PromulgatedURL) + } else { + response.Revisions = append(response.Revisions, doc.URL) + } + } + + // Write the response in JSON format. + return &response, nil +} + +// GET id/meta/id-user +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-user +func (h *ReqHandler) metaIdUser(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return params.IdUserResponse{ + User: id.PreferredURL().User, + }, nil +} + +// GET id/meta/id-series +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-series +func (h *ReqHandler) metaIdSeries(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return params.IdSeriesResponse{ + Series: id.PreferredURL().Series, + }, nil +} + +// GET id/meta/id +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid +func (h *ReqHandler) metaId(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + u := id.PreferredURL() + return params.IdResponse{ + Id: u, + User: u.User, + Series: u.Series, + Name: u.Name, + Revision: u.Revision, + }, nil +} + +// GET id/meta/id-name +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-name +func (h *ReqHandler) metaIdName(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return params.IdNameResponse{ + Name: id.URL.Name, + }, nil +} + +// GET id/meta/id-revision +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaid-revision +func (h *ReqHandler) metaIdRevision(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return params.IdRevisionResponse{ + Revision: id.PreferredURL().Revision, + }, nil +} + +// GET id/meta/supported-series +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetasupported-series +func (h *ReqHandler) metaSupportedSeries(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + if entity.URL.Series == "bundle" { + return nil, nil + } + return ¶ms.SupportedSeriesResponse{ + SupportedSeries: entity.SupportedSeries, + }, nil +} + +// GET id/meta/extra-info +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaextra-info +func (h *ReqHandler) metaExtraInfo(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + // The extra-info is stored in mongo as simple byte + // slices, so convert the values to json.RawMessages + // so that the client will see the original JSON. + m := make(map[string]*json.RawMessage) + for key, val := range entity.ExtraInfo { + jmsg := json.RawMessage(val) + m[key] = &jmsg + } + return m, nil +} + +// GET id/meta/extra-info/key +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaextra-infokey +func (h *ReqHandler) metaExtraInfoWithKey(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + path = strings.TrimPrefix(path, "/") + var data json.RawMessage = entity.ExtraInfo[path] + if len(data) == 0 { + return nil, nil + } + return &data, nil +} + +// PUT id/meta/extra-info +// https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmetaextra-info +func (h *ReqHandler) putMetaExtraInfo(id *router.ResolvedURL, path string, val *json.RawMessage, updater *router.FieldUpdater, req *http.Request) error { + var fields map[string]*json.RawMessage + if err := json.Unmarshal(*val, &fields); err != nil { + return errgo.Notef(err, "cannot unmarshal extra-info body") + } + // Check all the fields are OK before adding any fields to be updated. + for key := range fields { + if err := checkExtraInfoKey(key, "extra-info"); err != nil { + return err + } + } + for key, val := range fields { + if val == nil { + updater.UpdateField("extrainfo."+key, nil, nil) + } else { + updater.UpdateField("extrainfo."+key, *val, nil) + } + } + return nil +} + +var nullBytes = []byte("null") + +// PUT id/meta/extra-info/key +// https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmetaextra-infokey +func (h *ReqHandler) putMetaExtraInfoWithKey(id *router.ResolvedURL, path string, val *json.RawMessage, updater *router.FieldUpdater, req *http.Request) error { + key := strings.TrimPrefix(path, "/") + if err := checkExtraInfoKey(key, "extra-info"); err != nil { + return err + } + // If the user puts null, we treat that as if they want to + // delete the field. + if val == nil || bytes.Equal(*val, nullBytes) { + updater.UpdateField("extrainfo."+key, nil, nil) + } else { + updater.UpdateField("extrainfo."+key, *val, nil) + } + return nil +} + +// GET id/meta/common-info +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacommon-info +func (h *ReqHandler) metaCommonInfo(entity *mongodoc.BaseEntity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + // The common-info is stored in mongo as simple byte + // slices, so convert the values to json.RawMessages + // so that the client will see the original JSON. + m := make(map[string]*json.RawMessage) + for key, val := range entity.CommonInfo { + jmsg := json.RawMessage(val) + m[key] = &jmsg + } + return m, nil +} + +// GET id/meta/common-info/key +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacommon-infokey +func (h *ReqHandler) metaCommonInfoWithKey(entity *mongodoc.BaseEntity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + path = strings.TrimPrefix(path, "/") + var data json.RawMessage = entity.CommonInfo[path] + if len(data) == 0 { + return nil, nil + } + return &data, nil +} + +// PUT id/meta/common-info +// https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmetacommon-info +func (h *ReqHandler) putMetaCommonInfo(id *router.ResolvedURL, path string, val *json.RawMessage, updater *router.FieldUpdater, req *http.Request) error { + var fields map[string]*json.RawMessage + if err := json.Unmarshal(*val, &fields); err != nil { + return errgo.Notef(err, "cannot unmarshal common-info body") + } + // Check all the fields are OK before adding any fields to be updated. + for key := range fields { + if err := checkExtraInfoKey(key, "common-info"); err != nil { + return err + } + } + for key, val := range fields { + if val == nil { + updater.UpdateField("commoninfo."+key, nil, nil) + } else { + updater.UpdateField("commoninfo."+key, *val, nil) + } + } + return nil +} + +// PUT id/meta/common-info/key +// https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmetacommon-infokey +func (h *ReqHandler) putMetaCommonInfoWithKey(id *router.ResolvedURL, path string, val *json.RawMessage, updater *router.FieldUpdater, req *http.Request) error { + key := strings.TrimPrefix(path, "/") + if err := checkExtraInfoKey(key, "common-info"); err != nil { + return err + } + // If the user puts null, we treat that as if they want to + // delete the field. + if val == nil || bytes.Equal(*val, nullBytes) { + updater.UpdateField("commoninfo."+key, nil, nil) + } else { + updater.UpdateField("commoninfo."+key, *val, nil) + } + return nil +} + +func checkExtraInfoKey(key string, field string) error { + if strings.ContainsAny(key, "./$") { + return errgo.WithCausef(nil, params.ErrBadRequest, "bad key for "+field) + } + return nil +} + +// GET id/meta/perm +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaperm +func (h *ReqHandler) metaPerm(entity *mongodoc.BaseEntity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + acls := entity.ACLs + if id.Development { + acls = entity.DevelopmentACLs + } + return params.PermResponse{ + Read: acls.Read, + Write: acls.Write, + }, nil +} + +// PUT id/meta/perm +// https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmeta +func (h *ReqHandler) putMetaPerm(id *router.ResolvedURL, path string, val *json.RawMessage, updater *router.FieldUpdater, req *http.Request) error { + var perms params.PermRequest + if err := json.Unmarshal(*val, &perms); err != nil { + return errgo.Mask(err) + } + field := "acls" + if id.Development { + field = "developmentacls" + } else { + isPublic := false + for _, p := range perms.Read { + if p == params.Everyone { + isPublic = true + break + } + } + updater.UpdateField("public", isPublic, nil) + } + updater.UpdateField(field+".read", perms.Read, &audit.Entry{ + Op: audit.OpSetPerm, + Entity: &id.URL, + ACL: &audit.ACL{ + Read: perms.Read, + }, + }) + updater.UpdateField(field+".write", perms.Write, &audit.Entry{ + Op: audit.OpSetPerm, + Entity: &id.URL, + ACL: &audit.ACL{ + Write: perms.Write, + }, + }) + updater.UpdateSearch() + return nil +} + +// GET id/meta/promulgated +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetapromulgated +func (h *ReqHandler) metaPromulgated(entity *mongodoc.BaseEntity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return params.PromulgatedResponse{ + Promulgated: bool(entity.Promulgated), + }, nil +} + +// GET id/meta/perm/key +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetapermkey +func (h *ReqHandler) metaPermWithKey(entity *mongodoc.BaseEntity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + acls := entity.ACLs + if id.Development { + acls = entity.DevelopmentACLs + } + switch path { + case "/read": + return acls.Read, nil + case "/write": + return acls.Write, nil + } + return nil, errgo.WithCausef(nil, params.ErrNotFound, "unknown permission") +} + +// PUT id/meta/perm/key +// https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idmetapermkey +func (h *ReqHandler) putMetaPermWithKey(id *router.ResolvedURL, path string, val *json.RawMessage, updater *router.FieldUpdater, req *http.Request) error { + var perms []string + if err := json.Unmarshal(*val, &perms); err != nil { + return errgo.Mask(err) + } + field := "acls" + if id.Development { + field = "developmentacls" + } + switch path { + case "/read": + updater.UpdateField(field+".read", perms, &audit.Entry{ + Op: audit.OpSetPerm, + Entity: &id.URL, + ACL: &audit.ACL{ + Read: perms, + }, + }) + if !id.Development { + isPublic := false + for _, p := range perms { + if p == params.Everyone { + isPublic = true + break + } + } + updater.UpdateField("public", isPublic, nil) + } + updater.UpdateSearch() + return nil + case "/write": + updater.UpdateField(field+".write", perms, &audit.Entry{ + Op: audit.OpSetPerm, + Entity: &id.URL, + ACL: &audit.ACL{ + Write: perms, + }, + }) + return nil + } + return errgo.WithCausef(nil, params.ErrNotFound, "unknown permission") +} + +// GET id/meta/archive-upload-time +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetaarchive-upload-time +func (h *ReqHandler) metaArchiveUploadTime(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + return ¶ms.ArchiveUploadTimeResponse{ + UploadTime: entity.UploadTime.UTC(), + }, nil +} + +// GET changes/published[?limit=$count][&from=$fromdate][&to=$todate] +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-changespublished +func (h *ReqHandler) serveChangesPublished(_ http.Header, r *http.Request) (interface{}, error) { + start, stop, err := parseDateRange(r.Form) + if err != nil { + return nil, errgo.Mask(err, errgo.Is(params.ErrBadRequest)) + } + limit := -1 + if limitStr := r.Form.Get("limit"); limitStr != "" { + limit, err = strconv.Atoi(limitStr) + if err != nil || limit <= 0 { + return nil, badRequestf(nil, "invalid 'limit' value") + } + } + var tquery bson.D + if !start.IsZero() { + tquery = make(bson.D, 0, 2) + tquery = append(tquery, bson.DocElem{ + Name: "$gte", + Value: start, + }) + } + if !stop.IsZero() { + tquery = append(tquery, bson.DocElem{ + Name: "$lte", + Value: stop, + }) + } + var findQuery bson.D + if len(tquery) > 0 { + findQuery = bson.D{{"uploadtime", tquery}} + } + query := h.Store.DB.Entities(). + Find(findQuery). + Sort("-uploadtime"). + Select(bson.D{{"_id", 1}, {"uploadtime", 1}}) + + results := []params.Published{} + var count int + var entity mongodoc.Entity + iter := query.Iter() + for iter.Next(&entity) { + // Ignore entities that aren't readable by the current user. + if err := h.AuthorizeEntity(charmstore.EntityResolvedURL(&entity), r); err != nil { + continue + } + results = append(results, params.Published{ + Id: entity.URL, + PublishTime: entity.UploadTime.UTC(), + }) + count++ + if limit > 0 && limit <= count { + break + } + } + if err := iter.Close(); err != nil { + return nil, errgo.Mask(err) + } + return results, nil +} + +// GET /macaroon +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-macaroon +func (h *ReqHandler) serveMacaroon(_ http.Header, _ *http.Request) (interface{}, error) { + return h.newMacaroon() +} + +// GET /delegatable-macaroon +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#get-delegatable-macaroon +func (h *ReqHandler) serveDelegatableMacaroon(_ http.Header, req *http.Request) (interface{}, error) { + values, err := url.ParseQuery(req.URL.RawQuery) + if err != nil { + return nil, errgo.Mask(err) + } + entityIds := values["id"] + // No entity ids, so we provide a macaroon that's good for any entity that the + // user can access, as long as that entity doesn't have terms and conditions. + if len(entityIds) == 0 { + auth, err := h.authorize(req, []string{params.Everyone}, true, nil) + if err != nil { + return nil, errgo.Mask(err, errgo.Any) + } + if auth.Username == "" { + return nil, errgo.WithCausef(nil, params.ErrForbidden, "delegatable macaroon is not obtainable using admin credentials") + } + // TODO propagate expiry time from macaroons in request. + m, err := h.Store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ + checkers.DeclaredCaveat(UsernameAttr, auth.Username), + checkers.TimeBeforeCaveat(time.Now().Add(DelegatableMacaroonExpiry)), + checkers.DenyCaveat(opAccessCharmWithTerms), + }) + if err != nil { + return nil, errgo.Mask(err) + } + return m, nil + } + resolvedURLs := make([]*router.ResolvedURL, len(entityIds)) + for i, id := range entityIds { + charmRef, err := charm.ParseURL(id) + if err != nil { + return nil, errgo.WithCausef(err, params.ErrBadRequest, `bad "id" parameter`) + } + resolvedURL, err := h.ResolveURL(charmRef) + if err != nil { + return nil, errgo.Mask(err) + } + resolvedURLs[i] = resolvedURL + } + + // Note that we require authorization even though we allow + // anyone to obtain a delegatable macaroon. This means + // that we will be able to add the declared caveats to + // the returned macaroon. + auth, err := h.authorizeEntityAndTerms(req, resolvedURLs) + if err != nil { + return nil, errgo.Mask(err, errgo.Any) + } + if auth.Username == "" { + return nil, errgo.WithCausef(nil, params.ErrForbidden, "delegatable macaroon is not obtainable using admin credentials") + } + + resolvedURLstrings := make([]string, len(resolvedURLs)) + for i, resolvedURL := range resolvedURLs { + resolvedURLstrings[i] = resolvedURL.URL.String() + } + + // TODO propagate expiry time from macaroons in request. + m, err := h.Store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ + checkers.DeclaredCaveat(UsernameAttr, auth.Username), + checkers.TimeBeforeCaveat(time.Now().Add(DelegatableMacaroonExpiry)), + checkers.Caveat{Condition: "is-entity " + strings.Join(resolvedURLstrings, " ")}, + }) + if err != nil { + return nil, errgo.Mask(err) + } + return m, nil +} + +// GET /whoami +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#whoami +func (h *ReqHandler) serveWhoAmI(_ http.Header, req *http.Request) (interface{}, error) { + auth, err := h.authorize(req, []string{params.Everyone}, true, nil) + if err != nil { + return nil, errgo.Mask(err, errgo.Any) + } + if auth.Admin { + return nil, errgo.WithCausef(nil, params.ErrForbidden, "admin credentials used") + } + groups, err := h.groupsForUser(auth.Username) + if err != nil { + return nil, errgo.Mask(err, errgo.Any) + } + return params.WhoAmIResponse{ + User: auth.Username, + Groups: groups, + }, nil +} + +// PUT id/promulgate +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idpromulgate +func (h *ReqHandler) serveAdminPromulgate(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { + if _, err := h.authorize(req, []string{PromulgatorsGroup}, false, id); err != nil { + return errgo.Mask(err, errgo.Any) + } + if req.Method != "PUT" { + return errgo.WithCausef(nil, params.ErrMethodNotAllowed, "%s not allowed", req.Method) + } + data, err := ioutil.ReadAll(req.Body) + if err != nil { + return errgo.Mask(err) + } + var promulgate params.PromulgateRequest + if err := json.Unmarshal(data, &promulgate); err != nil { + return errgo.WithCausef(err, params.ErrBadRequest, "") + } + if err := h.Store.SetPromulgated(id, promulgate.Promulgated); err != nil { + return errgo.Mask(err, errgo.Any) + } + + if promulgate.Promulgated { + // Set write permissions for the non-development entity to promulgators + // only, so that the user cannot just publish newer promulgated + // versions of the charm or bundle. Promulgators are responsible of + // reviewing and publishing subsequent revisions of this entity. + if err := h.updateBaseEntity(id, map[string]interface{}{ + "acls.write": []string{PromulgatorsGroup}, + }, nil); err != nil { + return errgo.Notef(err, "cannot set permissions for %q", id) + } + } + + // Build an audit entry for this promulgation. + e := audit.Entry{ + Entity: &id.URL, + } + if promulgate.Promulgated { + e.Op = audit.OpPromulgate + } else { + e.Op = audit.OpUnpromulgate + } + h.addAudit(e) + + return nil +} + +// PUT id/publish +// See https://github.com/juju/charmstore/blob/v4/docs/API.md#put-idpublish +func (h *ReqHandler) servePublish(id *charm.URL, w http.ResponseWriter, req *http.Request) error { + // Perform basic validation of the request. + if req.Method != "PUT" { + return errgo.WithCausef(nil, params.ErrMethodNotAllowed, "%s not allowed", req.Method) + } + if id.Channel != "" { + return errgo.WithCausef(nil, params.ErrForbidden, "can only set publish on published URL, %q provided", id) + } + + // Retrieve the requested action from the request body. + var publish struct { + params.PublishRequest `httprequest:",body"` + } + if err := httprequest.Unmarshal(httprequest.Params{Request: req}, &publish); err != nil { + return errgo.WithCausef(err, params.ErrBadRequest, "cannot unmarshal publish request body") + } + + // Retrieve the resolved URL for the entity to update. It will be referring + // to the entity under development is the action is to publish a charm or + // bundle, or the published one otherwise. + url := *id + if publish.Published { + url = *id.WithChannel(charm.DevelopmentChannel) + } + rurl, err := h.Router.Context.ResolveURL(&url) + if err != nil { + return errgo.Mask(err, errgo.Is(params.ErrNotFound)) + } + + // Authorize the operation: users must have write permissions on the + // published charm or bundle. + prurl := *rurl + prurl.Development = false + if err := h.AuthorizeEntity(&prurl, req); err != nil { + return errgo.Mask(err, errgo.Any) + } + + // Update the entity. + if err := h.Store.SetDevelopment(rurl, !publish.Published); err != nil { + return errgo.NoteMask(err, "cannot publish or unpublish charm or bundle", errgo.Is(params.ErrNotFound)) + } + + // Return information on the updated charm or bundle. + rurl.Development = !publish.Published + return httprequest.WriteJSON(w, http.StatusOK, ¶ms.PublishResponse{ + Id: rurl.UserOwnedURL(), + PromulgatedId: rurl.PromulgatedURL(), + }) +} + +// serveSetAuthCookie sets the provided macaroon slice as a cookie on the +// client. +func (h *ReqHandler) serveSetAuthCookie(w http.ResponseWriter, req *http.Request) error { + // Allow cross-domain requests for the origin of this specific request so + // that cookies can be set even if the request is xhr. + w.Header().Set("Access-Control-Allow-Origin", req.Header.Get("Origin")) + if req.Method != "PUT" { + return errgo.WithCausef(nil, params.ErrMethodNotAllowed, "%s not allowed", req.Method) + } + var p params.SetAuthCookie + decoder := json.NewDecoder(req.Body) + if err := decoder.Decode(&p); err != nil { + return errgo.Notef(err, "cannot unmarshal macaroons") + } + cookie, err := httpbakery.NewCookie(p.Macaroons) + if err != nil { + return errgo.Notef(err, "cannot create macaroons cookie") + } + cookie.Path = "/" + cookie.Name = "macaroon-ui" + http.SetCookie(w, cookie) + return nil +} + +// ResolvedIdHandler represents a HTTP handler that is invoked +// on a resolved entity id. +type ResolvedIdHandler func(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error + +// AuthIdHandler returns a ResolvedIdHandler that uses h.Router.Context.AuthorizeEntity to +// check that the client is authorized to perform the HTTP request method before +// invoking f. +// +// Note that it only accesses h.Router.Context when the returned +// handler is called. +func (h *ReqHandler) AuthIdHandler(f ResolvedIdHandler) ResolvedIdHandler { + return func(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { + if err := h.Router.Context.AuthorizeEntity(id, req); err != nil { + return errgo.Mask(err, errgo.Any) + } + if err := f(id, w, req); err != nil { + return errgo.Mask(err, errgo.Any) + } + return nil + } +} + +// ResolvedIdHandler returns an id handler that uses h.Router.Context.ResolveURL +// to resolves any entity ids before calling f with the resolved id. +// +// Note that it only accesses h.Router.Context when the returned +// handler is called. +func (h *ReqHandler) ResolvedIdHandler(f ResolvedIdHandler) router.IdHandler { + return func(id *charm.URL, w http.ResponseWriter, req *http.Request) error { + rid, err := h.Router.Context.ResolveURL(id) + if err != nil { + return errgo.Mask(err, errgo.Is(params.ErrNotFound)) + } + return f(rid, w, req) + } +} + +var testAddAuditCallback func(e audit.Entry) + +// addAudit delegates an audit entry to the store to record an audit log after +// it has set correctly the user doing the action. +func (h *ReqHandler) addAudit(e audit.Entry) { + if h.auth.Username == "" && !h.auth.Admin { + panic("No auth set in ReqHandler") + } + e.User = h.auth.Username + if h.auth.Admin && e.User == "" { + e.User = "admin" + } + h.Store.AddAudit(e) + if testAddAuditCallback != nil { + testAddAuditCallback(e) + } +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/api_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/api_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/api_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,3612 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "archive/zip" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "time" + + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon.v1" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/audit" + "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/hashtesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/v5" +) + +var testPublicKey = bakery.PublicKey{ + bakery.Key{ + 0xf6, 0xfb, 0xcf, 0x67, 0x8c, 0x5a, 0xb6, 0x52, + 0xa9, 0x23, 0x4d, 0x7e, 0x01, 0xf5, 0x0a, 0x25, + 0xc4, 0x63, 0x69, 0x54, 0x42, 0x62, 0xaf, 0x62, + 0xbe, 0x40, 0x6a, 0x0b, 0xe2, 0x9a, 0xb0, 0x5f, + }, +} + +const ( + testUsername = "test-user" + testPassword = "test-password" +) + +var es *elasticsearch.Database = &elasticsearch.Database{"localhost:9200"} +var si *charmstore.SearchIndex = &charmstore.SearchIndex{ + Database: es, + Index: "cs", +} + +type APISuite struct { + commonSuite +} + +func (s *APISuite) SetUpSuite(c *gc.C) { + s.enableIdentity = true + s.commonSuite.SetUpSuite(c) +} + +var newResolvedURL = router.MustNewResolvedURL + +var _ = gc.Suite(&APISuite{}) + +// patchLegacyDownloadCountsEnabled sets LegacyDownloadCountsEnabled to the +// given value for the duration of the test. +// TODO (frankban): remove this function when removing the legacy counts logic. +func patchLegacyDownloadCountsEnabled(addCleanup func(jujutesting.CleanupFunc), value bool) { + original := charmstore.LegacyDownloadCountsEnabled + charmstore.LegacyDownloadCountsEnabled = value + addCleanup(func(*gc.C) { + charmstore.LegacyDownloadCountsEnabled = original + }) +} + +type metaEndpointExpectedValueGetter func(*charmstore.Store, *router.ResolvedURL) (interface{}, error) + +type metaEndpoint struct { + // name names the meta endpoint. + name string + + // exclusive specifies whether the endpoint is + // valid for charms only (charmOnly), bundles only (bundleOnly) + // or to both (zero). + exclusive int + + // get returns the expected data for the endpoint. + get metaEndpointExpectedValueGetter + + // checkURL holds one URL to sanity check data against. + checkURL *router.ResolvedURL + + // assertCheckData holds a function that will be used to check that + // the get function returns sane data for checkURL. + assertCheckData func(c *gc.C, data interface{}) +} + +const ( + charmOnly = iota + 1 + bundleOnly +) + +var metaEndpoints = []metaEndpoint{{ + name: "charm-config", + exclusive: charmOnly, + get: entityFieldGetter("CharmConfig"), + checkURL: newResolvedURL("cs:~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(*charm.Config).Options["blog-title"].Default, gc.Equals, "My Title") + }, +}, { + name: "charm-metadata", + exclusive: charmOnly, + get: entityFieldGetter("CharmMeta"), + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(*charm.Meta).Summary, gc.Equals, "Blog engine") + }, +}, { + name: "bundle-metadata", + exclusive: bundleOnly, + get: entityFieldGetter("BundleData"), + checkURL: newResolvedURL("cs:~charmers/bundle/wordpress-simple-42", 42), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(*charm.BundleData).Services["wordpress"].Charm, gc.Equals, "wordpress") + }, +}, { + name: "bundle-unit-count", + exclusive: bundleOnly, + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + if entity.BundleData == nil { + return nil + } + return params.BundleCount{*entity.BundleUnitCount} + }), + checkURL: newResolvedURL("~charmers/bundle/wordpress-simple-42", 42), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(params.BundleCount).Count, gc.Equals, 2) + }, +}, { + name: "bundle-machine-count", + exclusive: bundleOnly, + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + if entity.BundleData == nil { + return nil + } + return params.BundleCount{*entity.BundleMachineCount} + }), + checkURL: newResolvedURL("~charmers/bundle/wordpress-simple-42", 42), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(params.BundleCount).Count, gc.Equals, 2) + }, +}, { + name: "charm-actions", + exclusive: charmOnly, + get: entityFieldGetter("CharmActions"), + checkURL: newResolvedURL("~charmers/precise/dummy-10", 10), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(*charm.Actions).ActionSpecs["snapshot"].Description, gc.Equals, "Take a snapshot of the database.") + }, +}, { + name: "archive-size", + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + return ¶ms.ArchiveSizeResponse{ + Size: entity.Size, + } + }), + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: entitySizeChecker, +}, { + name: "hash", + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + return ¶ms.HashResponse{ + Sum: entity.BlobHash, + } + }), + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(*params.HashResponse).Sum, gc.Not(gc.Equals), "") + }, +}, { + name: "hash256", + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + return ¶ms.HashResponse{ + Sum: entity.BlobHash256, + } + }), + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.(*params.HashResponse).Sum, gc.Not(gc.Equals), "") + }, +}, { + name: "manifest", + get: zipGetter(func(r *zip.Reader) interface{} { + var manifest []params.ManifestFile + for _, file := range r.File { + if strings.HasSuffix(file.Name, "/") { + continue + } + manifest = append(manifest, params.ManifestFile{ + Name: file.Name, + Size: int64(file.UncompressedSize64), + }) + } + return manifest + }), + checkURL: newResolvedURL("~charmers/bundle/wordpress-simple-42", 42), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data.([]params.ManifestFile), gc.Not(gc.HasLen), 0) + }, +}, { + name: "archive-upload-time", + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + return ¶ms.ArchiveUploadTimeResponse{ + UploadTime: entity.UploadTime.UTC(), + } + }), + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + response := data.(*params.ArchiveUploadTimeResponse) + c.Assert(response.UploadTime, gc.Not(jc.Satisfies), time.Time.IsZero) + c.Assert(response.UploadTime.Location(), gc.Equals, time.UTC) + }, +}, { + name: "revision-info", + get: func(store *charmstore.Store, id *router.ResolvedURL) (interface{}, error) { + ref := &id.URL + if id.PromulgatedRevision != -1 { + ref = id.PreferredURL() + } + return params.RevisionInfoResponse{ + []*charm.URL{ref}, + }, nil + }, + checkURL: newResolvedURL("~charmers/precise/wordpress-99", 99), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.DeepEquals, params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:precise/wordpress-99"), + }}) + }, +}, { + name: "charm-related", + exclusive: charmOnly, + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + // The charms we use for those tests are not related each other. + // Charm relations are independently tested in relations_test.go. + if url.URL.Series == "bundle" { + return nil, nil + } + return ¶ms.RelatedResponse{}, nil + }, + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.FitsTypeOf, (*params.RelatedResponse)(nil)) + }, +}, { + name: "bundles-containing", + exclusive: charmOnly, + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + // The charms we use for those tests are not included in any bundle. + // Charm/bundle relations are tested in relations_test.go. + if url.URL.Series == "bundle" { + return nil, nil + } + return []*params.MetaAnyResponse{}, nil + }, + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.FitsTypeOf, []*params.MetaAnyResponse(nil)) + }, +}, { + name: "stats", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + // The entities used for those tests were never downloaded. + return ¶ms.StatsResponse{ + ArchiveDownloadCount: 0, + }, nil + }, + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.FitsTypeOf, (*params.StatsResponse)(nil)) + }, +}, { + name: "extra-info", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + return map[string]string{ + "key": "value " + url.URL.String(), + }, nil + }, + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.DeepEquals, map[string]string{ + "key": "value cs:~charmers/precise/wordpress-23", + }) + }, +}, { + name: "extra-info/key", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + return "value " + url.URL.String(), nil + }, + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.Equals, "value cs:~charmers/precise/wordpress-23") + }, +}, { + name: "common-info", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + return map[string]string{ + "key": "value " + url.URL.String(), + }, nil + }, + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.DeepEquals, map[string]string{ + "key": "value cs:~charmers/precise/wordpress-23", + }) + }, +}, { + name: "common-info/key", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + return "value " + url.URL.String(), nil + }, + checkURL: newResolvedURL("~charmers/precise/wordpress-23", 23), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.Equals, "value cs:~charmers/precise/wordpress-23") + }, +}, { + name: "perm", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + e, err := store.FindBaseEntity(&url.URL) + if err != nil { + return nil, err + } + return params.PermResponse{ + Read: e.ACLs.Read, + Write: e.ACLs.Write, + }, nil + }, + checkURL: newResolvedURL("~bob/utopic/wordpress-2", -1), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.DeepEquals, params.PermResponse{ + Read: []string{params.Everyone, "bob"}, + Write: []string{"bob"}, + }) + }, +}, { + name: "perm/read", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + e, err := store.FindBaseEntity(&url.URL) + if err != nil { + return nil, err + } + return e.ACLs.Read, nil + }, + checkURL: newResolvedURL("cs:~bob/utopic/wordpress-2", -1), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.DeepEquals, []string{params.Everyone, "bob"}) + }, +}, { + name: "tags", + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + if entity.URL.Series == "bundle" { + return params.TagsResponse{entity.BundleData.Tags} + } + if len(entity.CharmMeta.Tags) > 0 { + return params.TagsResponse{entity.CharmMeta.Tags} + } + return params.TagsResponse{entity.CharmMeta.Categories} + }), + checkURL: newResolvedURL("~charmers/utopic/category-2", 2), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, jc.DeepEquals, params.TagsResponse{ + Tags: []string{"openstack", "storage"}, + }) + }, +}, { + name: "id-user", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + return params.IdUserResponse{url.PreferredURL().User}, nil + }, + checkURL: newResolvedURL("cs:~bob/utopic/wordpress-2", -1), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.Equals, params.IdUserResponse{"bob"}) + }, +}, { + name: "id-series", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + return params.IdSeriesResponse{url.URL.Series}, nil + }, + checkURL: newResolvedURL("~charmers/utopic/category-2", 2), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.Equals, params.IdSeriesResponse{"utopic"}) + }, +}, { + name: "id-name", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + return params.IdNameResponse{url.URL.Name}, nil + }, + checkURL: newResolvedURL("~charmers/utopic/category-2", 2), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.Equals, params.IdNameResponse{"category"}) + }, +}, { + name: "id-revision", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + return params.IdRevisionResponse{url.PreferredURL().Revision}, nil + }, + checkURL: newResolvedURL("~charmers/utopic/category-2", 2), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.Equals, params.IdRevisionResponse{2}) + }, +}, { + name: "id", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + id := url.PreferredURL() + return params.IdResponse{ + Id: id, + User: id.User, + Series: id.Series, + Name: id.Name, + Revision: id.Revision, + }, nil + }, + checkURL: newResolvedURL("~charmers/utopic/category-2", 2), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, jc.DeepEquals, params.IdResponse{ + Id: charm.MustParseURL("cs:utopic/category-2"), + User: "", + Series: "utopic", + Name: "category", + Revision: 2, + }) + }, +}, { + name: "promulgated", + get: func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + e, err := store.FindBaseEntity(&url.URL) + if err != nil { + return nil, err + } + return params.PromulgatedResponse{ + Promulgated: bool(e.Promulgated), + }, nil + }, + checkURL: newResolvedURL("cs:~bob/utopic/wordpress-2", -1), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, gc.Equals, params.PromulgatedResponse{Promulgated: false}) + }, +}, { + name: "supported-series", + get: entityGetter(func(entity *mongodoc.Entity) interface{} { + if entity.URL.Series == "bundle" { + return nil + } + return params.SupportedSeriesResponse{ + SupportedSeries: entity.SupportedSeries, + } + }), + checkURL: newResolvedURL("~charmers/utopic/category-2", 2), + assertCheckData: func(c *gc.C, data interface{}) { + c.Assert(data, jc.DeepEquals, params.SupportedSeriesResponse{ + SupportedSeries: []string{"utopic"}, + }) + }, +}} + +// TestEndpointGet tries to ensure that the endpoint +// test data getters correspond with reality. +func (s *APISuite) TestEndpointGet(c *gc.C) { + s.addTestEntities(c) + for i, ep := range metaEndpoints { + c.Logf("test %d: %s\n", i, ep.name) + data, err := ep.get(s.store, ep.checkURL) + c.Assert(err, gc.IsNil) + ep.assertCheckData(c, data) + } +} + +func (s *APISuite) TestAllMetaEndpointsTested(c *gc.C) { + // Make sure that we're testing all the metadata + // endpoints that we need to. + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-23", 23)) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-23/meta"), + }) + c.Logf("meta response body: %s", rec.Body) + var list []string + err := json.Unmarshal(rec.Body.Bytes(), &list) + c.Assert(err, gc.IsNil) + + listNames := make(map[string]bool) + for _, name := range list { + c.Assert(listNames[name], gc.Equals, false, gc.Commentf("name %s", name)) + listNames[name] = true + } + + testNames := make(map[string]bool) + for _, test := range metaEndpoints { + if strings.Contains(test.name, "/") { + continue + } + testNames[test.name] = true + } + c.Assert(testNames, jc.DeepEquals, listNames) +} + +var testEntities = []*router.ResolvedURL{ + // A stock charm. + newResolvedURL("cs:~charmers/precise/wordpress-23", 23), + // A stock bundle. + newResolvedURL("cs:~charmers/bundle/wordpress-simple-42", 42), + // A charm with some actions. + newResolvedURL("cs:~charmers/precise/dummy-10", 10), + // A charm with some tags. + newResolvedURL("cs:~charmers/utopic/category-2", 2), + // A charm with a different user. + newResolvedURL("cs:~bob/utopic/wordpress-2", -1), +} + +func (s *APISuite) addTestEntities(c *gc.C) []*router.ResolvedURL { + for _, e := range testEntities { + if e.URL.Series == "bundle" { + s.addPublicBundle(c, e.URL.Name, e) + } else { + s.addPublicCharm(c, e.URL.Name, e) + } + // Associate some extra-info data with the entity. + key := e.URL.Path() + "/meta/extra-info/key" + commonkey := e.URL.Path() + "/meta/common-info/key" + s.assertPut(c, key, "value "+e.URL.String()) + s.assertPut(c, commonkey, "value "+e.URL.String()) + } + return testEntities +} + +func (s *APISuite) TestMetaEndpointsSingle(c *gc.C) { + urls := s.addTestEntities(c) + for i, ep := range metaEndpoints { + c.Logf("test %d. %s", i, ep.name) + tested := false + for _, url := range urls { + charmId := strings.TrimPrefix(url.String(), "cs:") + path := charmId + "/meta/" + ep.name + expectData, err := ep.get(s.store, url) + c.Assert(err, gc.IsNil) + c.Logf(" expected data for %q: %#v", url, expectData) + if isNull(expectData) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(path), + ExpectStatus: http.StatusNotFound, + ExpectBody: params.Error{ + Message: params.ErrMetadataNotFound.Error(), + Code: params.ErrMetadataNotFound, + }, + }) + continue + } + tested = true + c.Logf(" path %q: %#v", url, path) + s.assertGet(c, path, expectData) + } + if !tested { + c.Errorf("endpoint %q is null for all endpoints, so is not properly tested", ep.name) + } + } +} + +func (s *APISuite) TestMetaPermAudit(c *gc.C) { + var calledEntities []audit.Entry + s.PatchValue(v5.TestAddAuditCallback, func(e audit.Entry) { + calledEntities = append(calledEntities, e) + }) + s.discharge = dischargeForUser("bob") + + url := newResolvedURL("~bob/precise/wordpress-23", 23) + s.addPublicCharm(c, "wordpress", url) + s.assertPutNonAdmin(c, "precise/wordpress-23/meta/perm/read", []string{"charlie"}) + c.Assert(calledEntities, jc.DeepEquals, []audit.Entry{{ + User: "bob", + Op: audit.OpSetPerm, + ACL: &audit.ACL{ + Read: []string{"charlie"}, + }, + Entity: charm.MustParseURL("~bob/precise/wordpress-23"), + }}) + calledEntities = []audit.Entry{} + + s.assertPut(c, "precise/wordpress-23/meta/perm/write", []string{"bob", "foo"}) + c.Assert(calledEntities, jc.DeepEquals, []audit.Entry{{ + User: "admin", + Op: audit.OpSetPerm, + ACL: &audit.ACL{ + Write: []string{"bob", "foo"}, + }, + Entity: charm.MustParseURL("~bob/precise/wordpress-23"), + }}) + calledEntities = []audit.Entry{} + + s.assertPutNonAdmin(c, "precise/wordpress-23/meta/perm", params.PermRequest{ + Read: []string{"a"}, + Write: []string{"b", "c"}, + }) + c.Assert(calledEntities, jc.DeepEquals, []audit.Entry{{ + User: "bob", + Op: audit.OpSetPerm, + ACL: &audit.ACL{ + Read: []string{"a"}, + }, + Entity: charm.MustParseURL("~bob/precise/wordpress-23"), + }, { + User: "bob", + Op: audit.OpSetPerm, + ACL: &audit.ACL{ + Write: []string{"b", "c"}, + }, + Entity: charm.MustParseURL("~bob/precise/wordpress-23"), + }}) +} + +func (s *APISuite) TestMetaPerm(c *gc.C) { + // Create a charm store server that will use the test third party for + // its third party caveat. + s.discharge = dischargeForUser("bob") + + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-23", 23)) + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-24", 24)) + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/trusty/wordpress-1", 1)) + s.assertGet(c, "wordpress/meta/perm", params.PermResponse{ + Read: []string{params.Everyone, "charmers"}, + Write: []string{"charmers"}, + }) + s.assertGet(c, "development/wordpress/meta/perm", params.PermResponse{ + Read: []string{params.Everyone, "charmers"}, + Write: []string{"charmers"}, + }) + e, err := s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.ACLs.Read, gc.DeepEquals, []string{params.Everyone, "charmers"}) + + // Change the published read perms to only include a specific user and the + // published write perms to include an "admin" user. + s.assertPut(c, "precise/wordpress-23/meta/perm/read", []string{"bob"}) + s.assertPut(c, "precise/wordpress-23/meta/perm/write", []string{"admin"}) + + // Check that the perms have changed for all revisions and series. + for i, u := range []string{"precise/wordpress-23", "precise/wordpress-24", "trusty/wordpress-1"} { + c.Logf("id %d: %q", i, u) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + Do: bakeryDo(nil), + URL: storeURL(u + "/meta/perm"), + ExpectBody: params.PermResponse{ + Read: []string{"bob"}, + Write: []string{"admin"}, + }, + }) + // The development perms did not mutate. + s.assertGet(c, "development/"+u+"/meta/perm", params.PermResponse{ + Read: []string{params.Everyone, "charmers"}, + Write: []string{"charmers"}, + }) + } + e, err = s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.Public, jc.IsFalse) + c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{ + Read: []string{"bob"}, + Write: []string{"admin"}, + }) + c.Assert(e.DevelopmentACLs, jc.DeepEquals, mongodoc.ACL{ + Read: []string{params.Everyone, "charmers"}, + Write: []string{"charmers"}, + }) + + // Try restoring everyone's read permission on the published charm, and + // adding write permissions to bob for the development charm. + s.assertPut(c, "wordpress/meta/perm/read", []string{"bob", params.Everyone}) + s.assertPut(c, "development/wordpress/meta/perm/write", []string{"bob", "admin"}) + s.assertGet(c, "wordpress/meta/perm", params.PermResponse{ + Read: []string{"bob", params.Everyone}, + Write: []string{"admin"}, + }) + s.assertGet(c, "development/wordpress/meta/perm", params.PermResponse{ + Read: []string{params.Everyone, "charmers"}, + Write: []string{"bob", "admin"}, + }) + s.assertGet(c, "wordpress/meta/perm/read", []string{"bob", params.Everyone}) + s.assertGet(c, "development/wordpress/meta/perm/read", []string{params.Everyone, "charmers"}) + e, err = s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.Public, jc.IsTrue) + c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{ + Read: []string{"bob", params.Everyone}, + Write: []string{"admin"}, + }) + c.Assert(e.DevelopmentACLs, jc.DeepEquals, mongodoc.ACL{ + Read: []string{params.Everyone, "charmers"}, + Write: []string{"bob", "admin"}, + }) + + // Try deleting all development permissions. + s.assertPut(c, "development/wordpress/meta/perm/read", []string{}) + s.assertPut(c, "development/wordpress/meta/perm/write", []string{}) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + Do: bakeryDo(nil), + URL: storeURL("development/wordpress/meta/perm"), + ExpectStatus: http.StatusUnauthorized, + ExpectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "bob"`, + }, + }) + e, err = s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.DevelopmentACLs, jc.DeepEquals, mongodoc.ACL{}) + e, err = s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.Public, jc.IsTrue) + c.Assert(e.DevelopmentACLs, jc.DeepEquals, mongodoc.ACL{}) + c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{ + Read: []string{"bob", params.Everyone}, + Write: []string{"admin"}, + }) + + // Try deleting all published permissions. + s.assertPut(c, "wordpress/meta/perm/read", []string{}) + s.assertPut(c, "wordpress/meta/perm/write", []string{}) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + Do: bakeryDo(nil), + URL: storeURL("wordpress/meta/perm"), + ExpectStatus: http.StatusUnauthorized, + ExpectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "bob"`, + }, + }) + e, err = s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.Public, jc.IsFalse) + c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{}) + c.Assert(e.ACLs.Read, gc.DeepEquals, []string{}) + + // Try setting all published permissions in one request. + s.assertPut(c, "wordpress/meta/perm", params.PermRequest{ + Read: []string{"bob"}, + Write: []string{"admin"}, + }) + e, err = s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.Public, jc.IsFalse) + c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{ + Read: []string{"bob"}, + Write: []string{"admin"}, + }) + + // Try setting all development permissions in one request. + s.assertPut(c, "development/wordpress/meta/perm", params.PermRequest{ + Read: []string{"who", params.Everyone}, + Write: []string{"who"}, + }) + e, err = s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.Public, jc.IsFalse) + c.Assert(e.DevelopmentACLs, jc.DeepEquals, mongodoc.ACL{ + Read: []string{"who", params.Everyone}, + Write: []string{"who"}, + }) + + // Try only read permissions to published meta/perm endpoint. + var readRequest = struct { + Read []string + }{Read: []string{"joe"}} + s.assertPut(c, "wordpress/meta/perm", readRequest) + e, err = s.store.FindBaseEntity(charm.MustParseURL("precise/wordpress-23")) + c.Assert(err, gc.IsNil) + c.Assert(e.Public, jc.IsFalse) + c.Assert(e.ACLs, jc.DeepEquals, mongodoc.ACL{ + Read: []string{"joe"}, + Write: []string{}, + }) +} + +func (s *APISuite) TestMetaPermPutUnauthorized(c *gc.C) { + id := "precise/wordpress-23" + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/"+id, 23)) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.noMacaroonSrv, + URL: storeURL("~charmers/" + id + "/meta/perm/read"), + Method: "PUT", + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: strings.NewReader(`["some-user"]`), + ExpectStatus: http.StatusUnauthorized, + ExpectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: "authentication failed: missing HTTP auth header", + }, + }) +} + +func (s *APISuite) TestExtraInfo(c *gc.C) { + id := "precise/wordpress-23" + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/"+id, 23)) + s.checkInfo(c, "extra-info", id) + s.checkInfo(c, "common-info", id) +} + +func (s *APISuite) checkInfo(c *gc.C, path string, id string) { + // Add one value and check that it's there. + s.assertPut(c, id+"/meta/"+path+"/foo", "fooval") + s.assertGet(c, id+"/meta/"+path+"/foo", "fooval") + s.assertGet(c, id+"/meta/"+path, map[string]string{ + "foo": "fooval", + }) + + // Add another value and check that both values are there. + s.assertPut(c, id+"/meta/"+path+"/bar", "barval") + s.assertGet(c, id+"/meta/"+path+"/bar", "barval") + s.assertGet(c, id+"/meta/"+path, map[string]string{ + "foo": "fooval", + "bar": "barval", + }) + + // Overwrite a value and check that it's changed. + s.assertPut(c, id+"/meta/"+path+"/foo", "fooval2") + s.assertGet(c, id+"/meta/"+path+"/foo", "fooval2") + s.assertGet(c, id+"/meta/"+path+"", map[string]string{ + "foo": "fooval2", + "bar": "barval", + }) + + // Write several values at once. + s.assertPut(c, id+"/meta/any", params.MetaAnyResponse{ + Meta: map[string]interface{}{ + path: map[string]string{ + "foo": "fooval3", + "baz": "bazval", + }, + path + "/frob": []int{1, 4, 6}, + }, + }) + s.assertGet(c, id+"/meta/"+path, map[string]interface{}{ + "foo": "fooval3", + "baz": "bazval", + "bar": "barval", + "frob": []int{1, 4, 6}, + }) + + // Delete a single value. + s.assertPut(c, id+"/meta/"+path+"/foo", nil) + s.assertGet(c, id+"/meta/"+path, map[string]interface{}{ + "baz": "bazval", + "bar": "barval", + "frob": []int{1, 4, 6}, + }) + + // Delete a value and add some values at the same time. + s.assertPut(c, id+"/meta/any", params.MetaAnyResponse{ + Meta: map[string]interface{}{ + path: map[string]interface{}{ + "baz": nil, + "bar": nil, + "dazzle": "x", + "fizzle": "y", + }, + }, + }) + s.assertGet(c, id+"/meta/"+path, map[string]interface{}{ + "frob": []int{1, 4, 6}, + "dazzle": "x", + "fizzle": "y", + }) +} + +var extraInfoBadPutRequestsTests = []struct { + about string + key string + body interface{} + contentType string + expectStatus int + expectBody params.Error +}{{ + about: "key with extra element", + key: "foo/bar", + body: "hello", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "bad key for $1", + }, +}, { + about: "key with a dot", + key: "foo.bar", + body: "hello", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "bad key for $1", + }, +}, { + about: "key with a dollar", + key: "foo$bar", + body: "hello", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "bad key for $1", + }, +}, { + about: "multi key with extra element", + key: "", + body: map[string]string{ + "foo/bar": "value", + }, + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "bad key for $1", + }, +}, { + about: "multi key with dot", + key: "", + body: map[string]string{ + ".bar": "value", + }, + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "bad key for $1", + }, +}, { + about: "multi key with dollar", + key: "", + body: map[string]string{ + "$bar": "value", + }, + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "bad key for $1", + }, +}, { + about: "multi key with bad map", + key: "", + body: "bad", + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: `cannot unmarshal $1 body: json: cannot unmarshal string into Go value of type map[string]*json.RawMessage`, + }, +}} + +func (s *APISuite) TestExtraInfoBadPutRequests(c *gc.C) { + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) + path := "precise/wordpress-23/meta/" + for i, test := range extraInfoBadPutRequestsTests { + c.Logf("test %d: %s", i, test.about) + contentType := test.contentType + if contentType == "" { + contentType = "application/json" + } + extraBodyMessage := strings.Replace(test.expectBody.Message, "$1", "extra-info", -1) + commonBodyMessage := strings.Replace(test.expectBody.Message, "$1", "common-info", -1) + test.expectBody.Message = extraBodyMessage + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(path + "extra-info/" + test.key), + Method: "PUT", + Header: http.Header{ + "Content-Type": {contentType}, + }, + Username: testUsername, + Password: testPassword, + Body: strings.NewReader(mustMarshalJSON(test.body)), + ExpectStatus: test.expectStatus, + ExpectBody: test.expectBody, + }) + test.expectBody.Message = commonBodyMessage + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(path + "common-info/" + test.key), + Method: "PUT", + Header: http.Header{ + "Content-Type": {contentType}, + }, + Username: testUsername, + Password: testPassword, + Body: strings.NewReader(mustMarshalJSON(test.body)), + ExpectStatus: test.expectStatus, + ExpectBody: test.expectBody, + }) + } +} + +func (s *APISuite) TestExtraInfoPutUnauthorized(c *gc.C) { + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-23/meta/extra-info"), + Method: "PUT", + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: strings.NewReader(mustMarshalJSON(map[string]string{ + "bar": "value", + })), + ExpectStatus: http.StatusProxyAuthRequired, + ExpectBody: dischargeRequiredBody, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-23/meta/extra-info"), + Method: "PUT", + Header: http.Header{ + "Content-Type": {"application/json"}, + "Bakery-Protocol-Version": {"1"}, + }, + Body: strings.NewReader(mustMarshalJSON(map[string]string{ + "bar": "value", + })), + ExpectStatus: http.StatusUnauthorized, + ExpectHeader: http.Header{ + "WWW-Authenticate": {"Macaroon"}, + }, + ExpectBody: dischargeRequiredBody, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-23/meta/common-info"), + Method: "PUT", + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: strings.NewReader(mustMarshalJSON(map[string]string{ + "bar": "value", + })), + ExpectStatus: http.StatusProxyAuthRequired, + ExpectBody: dischargeRequiredBody, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-23/meta/common-info"), + Method: "PUT", + Header: http.Header{ + "Content-Type": {"application/json"}, + "Bakery-Protocol-Version": {"1"}, + }, + Body: strings.NewReader(mustMarshalJSON(map[string]string{ + "bar": "value", + })), + ExpectStatus: http.StatusUnauthorized, + ExpectHeader: http.Header{ + "WWW-Authenticate": {"Macaroon"}, + }, + ExpectBody: dischargeRequiredBody, + }) +} + +func (s *APISuite) TestCommonInfo(c *gc.C) { + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-23", 23)) + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/precise/wordpress-24", 24)) + s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/trusty/wordpress-1", 1)) + + s.assertPut(c, "wordpress/meta/common-info/key", "something") + + s.assertGet(c, "wordpress/meta/common-info", map[string]string{ + "key": "something", + }) + for i, u := range []string{"precise/wordpress-23", "precise/wordpress-24", "trusty/wordpress-1"} { + c.Logf("id %d: %q", i, u) + s.assertGet(c, u+"/meta/common-info", map[string]string{ + "key": "something", + }) + e, err := s.store.FindBaseEntity(charm.MustParseURL(u)) + c.Assert(err, gc.IsNil) + c.Assert(e.CommonInfo, gc.DeepEquals, map[string][]byte{ + "key": []byte("\"something\""), + }) + } +} + +func isNull(v interface{}) bool { + data, err := json.Marshal(v) + if err != nil { + panic(err) + } + return string(data) == "null" +} + +func (s *APISuite) TestMetaEndpointsAny(c *gc.C) { + rurls := s.addTestEntities(c) + // We check the meta endpoint for both promulgated and non-promulgated + // versions of each URL. + urls := make([]*router.ResolvedURL, 0, len(rurls)*2) + for _, rurl := range rurls { + urls = append(urls, rurl) + if rurl.PromulgatedRevision != -1 { + rurl1 := *rurl + rurl1.PromulgatedRevision = -1 + urls = append(urls, &rurl1) + } + } + for _, url := range urls { + charmId := strings.TrimPrefix(url.String(), "cs:") + var flags []string + expectData := params.MetaAnyResponse{ + Id: url.PreferredURL(), + Meta: make(map[string]interface{}), + } + for _, ep := range metaEndpoints { + flags = append(flags, "include="+ep.name) + isBundle := url.URL.Series == "bundle" + if ep.exclusive != 0 && isBundle != (ep.exclusive == bundleOnly) { + // endpoint not relevant. + continue + } + val, err := ep.get(s.store, url) + c.Assert(err, gc.IsNil) + if val != nil { + expectData.Meta[ep.name] = val + } + } + s.assertGet(c, charmId+"/meta/any?"+strings.Join(flags, "&"), expectData) + } +} + +func (s *APISuite) TestMetaAnyWithNoIncludesAndNoEntity(c *gc.C) { + wordpressURL, _ := s.addPublicCharm( + c, + "wordpress", + newResolvedURL("cs:~charmers/precise/wordpress-23", 23), + ) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-1/meta/any"), + ExpectStatus: http.StatusNotFound, + ExpectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:precise/wordpress-1"`, + }, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("meta/any?id=precise/wordpress-23&id=precise/wordpress-1"), + ExpectStatus: http.StatusOK, + ExpectBody: map[string]interface{}{ + "precise/wordpress-23": params.MetaAnyResponse{ + Id: wordpressURL.PreferredURL(), + }, + }, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-23/meta/any"), + ExpectStatus: http.StatusOK, + ExpectBody: params.MetaAnyResponse{ + Id: wordpressURL.PreferredURL(), + }, + }) +} + +// In this test we rely on the charm.v2 testing repo package and +// dummy charm that has actions included. +func (s *APISuite) TestMetaCharmActions(c *gc.C) { + url, dummy := s.addPublicCharm(c, "dummy", newResolvedURL("cs:~charmers/precise/dummy-10", 10)) + s.assertGet(c, "precise/dummy-10/meta/charm-actions", dummy.Actions()) + s.assertGet(c, "precise/dummy-10/meta/any?include=charm-actions", + params.MetaAnyResponse{ + Id: url.PreferredURL(), + Meta: map[string]interface{}{ + "charm-actions": dummy.Actions(), + }, + }, + ) +} + +func (s *APISuite) TestBulkMeta(c *gc.C) { + // We choose an arbitrary set of ids and metadata here, just to smoke-test + // whether the meta/any logic is hooked up correctly. + // Detailed tests for this feature are in the router package. + + _, wordpress := s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) + _, mysql := s.addPublicCharm(c, "mysql", newResolvedURL("cs:~charmers/precise/mysql-10", 10)) + s.assertGet(c, + "meta/charm-metadata?id=precise/wordpress-23&id=precise/mysql-10", + map[string]*charm.Meta{ + "precise/wordpress-23": wordpress.Meta(), + "precise/mysql-10": mysql.Meta(), + }, + ) +} + +func (s *APISuite) TestBulkMetaAny(c *gc.C) { + // We choose an arbitrary set of metadata here, just to smoke-test + // whether the meta/any logic is hooked up correctly. + // Detailed tests for this feature are in the router package. + + wordpressURL, wordpress := s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) + mysqlURL, mysql := s.addPublicCharm(c, "mysql", newResolvedURL("cs:~charmers/precise/mysql-10", 10)) + s.assertGet(c, + "meta/any?include=charm-metadata&include=charm-config&id=precise/wordpress-23&id=precise/mysql-10", + map[string]params.MetaAnyResponse{ + "precise/wordpress-23": { + Id: wordpressURL.PreferredURL(), + Meta: map[string]interface{}{ + "charm-config": wordpress.Config(), + "charm-metadata": wordpress.Meta(), + }, + }, + "precise/mysql-10": { + Id: mysqlURL.PreferredURL(), + Meta: map[string]interface{}{ + "charm-config": mysql.Config(), + "charm-metadata": mysql.Meta(), + }, + }, + }, + ) +} + +var metaCharmTagsTests = []struct { + about string + tags []string + categories []string + expectTags []string +}{{ + about: "tags only", + tags: []string{"foo", "bar"}, + expectTags: []string{"foo", "bar"}, +}, { + about: "categories only", + categories: []string{"foo", "bar"}, + expectTags: []string{"foo", "bar"}, +}, { + about: "tags and categories", + categories: []string{"foo", "bar"}, + tags: []string{"tag1", "tag2"}, + expectTags: []string{"tag1", "tag2"}, +}, { + about: "no tags or categories", +}} + +func (s *APISuite) TestMetaCharmTags(c *gc.C) { + url := newResolvedURL("~charmers/precise/wordpress-0", -1) + for i, test := range metaCharmTagsTests { + c.Logf("%d: %s", i, test.about) + wordpress := storetesting.Charms.CharmDir("wordpress") + meta := wordpress.Meta() + meta.Tags, meta.Categories = test.tags, test.categories + url.URL.Revision = i + err := s.store.AddCharm(&testMetaCharm{ + meta: meta, + Charm: wordpress, + }, charmstore.AddParams{ + URL: url, + BlobName: "no-such-name", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(url.URL.Path() + "/meta/tags"), + ExpectStatus: http.StatusOK, + ExpectBody: params.TagsResponse{test.expectTags}, + }) + } +} + +func (s *APISuite) TestPromulgatedMetaCharmTags(c *gc.C) { + url := newResolvedURL("~charmers/precise/wordpress-0", 0) + for i, test := range metaCharmTagsTests { + c.Logf("%d: %s", i, test.about) + wordpress := storetesting.Charms.CharmDir("wordpress") + meta := wordpress.Meta() + meta.Tags, meta.Categories = test.tags, test.categories + url.URL.Revision = i + url.PromulgatedRevision = i + err := s.store.AddCharm(&testMetaCharm{ + meta: meta, + Charm: wordpress, + }, charmstore.AddParams{ + URL: url, + BlobName: "no-such-name", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(url.PromulgatedURL().Path() + "/meta/tags"), + ExpectStatus: http.StatusOK, + ExpectBody: params.TagsResponse{test.expectTags}, + }) + } +} + +func (s *APISuite) TestBundleTags(c *gc.C) { + b := storetesting.Charms.BundleDir("wordpress-simple") + url := newResolvedURL("~charmers/bundle/wordpress-2", -1) + data := b.Data() + data.Tags = []string{"foo", "bar"} + err := s.store.AddBundle(&testingBundle{data}, charmstore.AddParams{ + URL: url, + BlobName: "no-such-name", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(url.URL.Path() + "/meta/tags"), + ExpectStatus: http.StatusOK, + ExpectBody: params.TagsResponse{[]string{"foo", "bar"}}, + }) +} + +func (s *APISuite) TestPromulgatedBundleTags(c *gc.C) { + b := storetesting.Charms.BundleDir("wordpress-simple") + url := newResolvedURL("~charmers/bundle/wordpress-2", 2) + data := b.Data() + data.Tags = []string{"foo", "bar"} + err := s.store.AddBundle(&testingBundle{data}, charmstore.AddParams{ + URL: url, + BlobName: "no-such-name", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(url.PromulgatedURL().Path() + "/meta/tags"), + ExpectStatus: http.StatusOK, + ExpectBody: params.TagsResponse{[]string{"foo", "bar"}}, + }) +} + +type testMetaCharm struct { + meta *charm.Meta + charm.Charm +} + +func (c *testMetaCharm) Meta() *charm.Meta { + return c.meta +} + +func (s *APISuite) TestIdsAreResolved(c *gc.C) { + // This is just testing that ResolveURL is actually + // passed to the router. Given how Router is + // defined, and the ResolveURL tests, this should + // be sufficient to "join the dots". + _, wordpress := s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) + s.assertGet(c, "wordpress/meta/charm-metadata", wordpress.Meta()) +} + +func (s *APISuite) TestMetaCharmNotFound(c *gc.C) { + for i, ep := range metaEndpoints { + c.Logf("test %d: %s", i, ep.name) + expected := params.Error{ + Message: `no matching charm or bundle for "cs:precise/wordpress-23"`, + Code: params.ErrNotFound, + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-23/meta/" + ep.name), + ExpectStatus: http.StatusNotFound, + ExpectBody: expected, + }) + expected.Message = `no matching charm or bundle for "cs:wordpress"` + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("wordpress/meta/" + ep.name), + ExpectStatus: http.StatusNotFound, + ExpectBody: expected, + }) + } +} + +var resolveURLTests = []struct { + url string + expect *router.ResolvedURL + notFound bool +}{{ + url: "wordpress", + expect: newResolvedURL("cs:~charmers/trusty/wordpress-25", 25), +}, { + url: "development/wordpress", + expect: newResolvedURL("cs:~charmers/development/trusty/wordpress-25", 25), +}, { + url: "precise/wordpress", + expect: newResolvedURL("cs:~charmers/precise/wordpress-24", 24), +}, { + url: "development/precise/wordpress", + expect: newResolvedURL("cs:~charmers/development/precise/wordpress-24", 24), +}, { + url: "utopic/bigdata", + expect: newResolvedURL("cs:~charmers/utopic/bigdata-10", 10), +}, { + url: "development/utopic/bigdata", + expect: newResolvedURL("cs:~charmers/development/utopic/bigdata-10", 10), +}, { + url: "~charmers/precise/wordpress", + expect: newResolvedURL("cs:~charmers/precise/wordpress-24", -1), +}, { + url: "~charmers/development/precise/wordpress", + expect: newResolvedURL("cs:~charmers/development/precise/wordpress-24", -1), +}, { + url: "~charmers/precise/wordpress-99", + notFound: true, +}, { + url: "~charmers/development/precise/wordpress-99", + notFound: true, +}, { + url: "~charmers/wordpress", + expect: newResolvedURL("cs:~charmers/trusty/wordpress-25", -1), +}, { + url: "~charmers/development/wordpress", + expect: newResolvedURL("cs:~charmers/development/trusty/wordpress-25", -1), +}, { + url: "~charmers/wordpress-24", + notFound: true, +}, { + url: "~charmers/development/wordpress-24", + notFound: true, +}, { + url: "~bob/wordpress", + expect: newResolvedURL("cs:~bob/trusty/wordpress-1", -1), +}, { + url: "~bob/development/wordpress", + expect: newResolvedURL("cs:~bob/development/trusty/wordpress-1", -1), +}, { + url: "~bob/precise/wordpress", + expect: newResolvedURL("cs:~bob/precise/wordpress-2", -1), +}, { + url: "~bob/development/precise/wordpress", + expect: newResolvedURL("cs:~bob/development/precise/wordpress-2", -1), +}, { + url: "bigdata", + expect: newResolvedURL("cs:~charmers/utopic/bigdata-10", 10), +}, { + url: "development/bigdata", + expect: newResolvedURL("cs:~charmers/development/utopic/bigdata-10", 10), +}, { + url: "wordpress-24", + notFound: true, +}, { + url: "development/wordpress-24", + notFound: true, +}, { + url: "bundlelovin", + expect: newResolvedURL("cs:~charmers/bundle/bundlelovin-10", 10), +}, { + url: "development/bundlelovin", + expect: newResolvedURL("cs:~charmers/development/bundle/bundlelovin-10", 10), +}, { + url: "wordpress-26", + notFound: true, +}, { + url: "development/wordpress-26", + notFound: true, +}, { + url: "foo", + notFound: true, +}, { + url: "development/foo", + notFound: true, +}, { + url: "trusty/bigdata", + notFound: true, +}, { + url: "development/trusty/bigdata", + notFound: true, +}, { + url: "~bob/wily/django-47", + notFound: true, +}, { + url: "~bob/django", + notFound: true, +}, { + url: "wily/django", + notFound: true, +}, { + url: "django", + notFound: true, +}, { + url: "~bob/development/wily/django-47", + expect: newResolvedURL("cs:~bob/development/wily/django-47", -1), +}, { + url: "~bob/development/wily/django", + expect: newResolvedURL("cs:~bob/development/wily/django-47", -1), +}, { + url: "~bob/development/django", + expect: newResolvedURL("cs:~bob/development/wily/django-47", -1), +}, { + url: "development/wily/django-27", + expect: newResolvedURL("cs:~bob/development/wily/django-47", 27), +}, { + url: "development/wily/django", + expect: newResolvedURL("cs:~bob/development/wily/django-47", 27), +}, { + url: "development/django", + expect: newResolvedURL("cs:~bob/development/wily/django-47", 27), +}, { + url: "~bob/trusty/haproxy-0", + notFound: true, +}, { + url: "~bob/haproxy", + notFound: true, +}, { + url: "trusty/haproxy", + notFound: true, +}, { + url: "haproxy", + notFound: true, +}, { + url: "~bob/development/trusty/haproxy-0", + expect: newResolvedURL("cs:~bob/development/trusty/haproxy-0", -1), +}, { + url: "~bob/development/trusty/haproxy", + expect: newResolvedURL("cs:~bob/development/trusty/haproxy-0", -1), +}, { + url: "~bob/development/haproxy", + expect: newResolvedURL("cs:~bob/development/trusty/haproxy-0", -1), +}, { + url: "~bob/development/trusty/haproxy-1", + notFound: true, +}, { + url: "development/trusty/haproxy-27", + notFound: true, +}, { + url: "development/trusty/haproxy", + notFound: true, +}, { + url: "development/haproxy", + notFound: true, +}, { + url: "~bob/multi-series", + expect: newResolvedURL("cs:~bob/multi-series-0", -1), +}, { + url: "~bob/utopic/multi-series", + expect: newResolvedURL("cs:~bob/multi-series-0", -1), +}} + +func (s *APISuite) TestResolveURL(c *gc.C) { + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-23", 23)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-24", 24)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-24", 24)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-25", 25)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/utopic/wordpress-10", 10)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/saucy/bigdata-99", 99)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/utopic/bigdata-10", 10)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/trusty/wordpress-1", -1)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/precise/wordpress-2", -1)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/precise/other-2", -1)) + s.addPublicBundle(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/bundlelovin-10", 10)) + s.addPublicBundle(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/wordpress-simple-10", 10)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/development/wily/django-47", 27)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/development/trusty/haproxy-0", -1)) + s.addPublicCharm(c, "multi-series", newResolvedURL("cs:~bob/multi-series-0", -1)) + + for i, test := range resolveURLTests { + c.Logf("test %d: %s", i, test.url) + url := charm.MustParseURL(test.url) + rurl, err := v5.ResolveURL(s.store, url) + if test.notFound { + c.Assert(errgo.Cause(err), gc.Equals, params.ErrNotFound) + c.Assert(err, gc.ErrorMatches, `no matching charm or bundle for ".*"`) + c.Assert(rurl, gc.IsNil) + continue + } + c.Assert(err, gc.IsNil) + c.Assert(rurl, jc.DeepEquals, test.expect) + } +} + +var serveExpandIdTests = []struct { + about string + url string + expect []params.ExpandedId + err string +}{{ + about: "fully qualified URL", + url: "~charmers/trusty/wordpress-47", + expect: []params.ExpandedId{ + {Id: "cs:~charmers/utopic/wordpress-42"}, + {Id: "cs:~charmers/trusty/wordpress-47"}, + {Id: "cs:~charmers/wordpress-5"}, + }, +}, { + about: "fully qualified development URL", + url: "~charmers/development/trusty/wordpress-47", + expect: []params.ExpandedId{ + {Id: "cs:~charmers/utopic/wordpress-42"}, + {Id: "cs:~charmers/development/trusty/wordpress-48"}, + {Id: "cs:~charmers/trusty/wordpress-47"}, + {Id: "cs:~charmers/development/wordpress-7"}, + {Id: "cs:~charmers/development/wordpress-6"}, + {Id: "cs:~charmers/wordpress-5"}, + }, +}, { + about: "promulgated URL", + url: "trusty/wordpress-47", + expect: []params.ExpandedId{ + {Id: "cs:utopic/wordpress-42"}, + {Id: "cs:trusty/wordpress-47"}, + {Id: "cs:wordpress-49"}, + }, +}, { + about: "development promulgated URL", + url: "development/trusty/wordpress-48", + expect: []params.ExpandedId{ + {Id: "cs:utopic/wordpress-42"}, + {Id: "cs:development/trusty/wordpress-48"}, + {Id: "cs:trusty/wordpress-47"}, + {Id: "cs:development/wordpress-51"}, + {Id: "cs:development/wordpress-50"}, + {Id: "cs:wordpress-49"}, + }, +}, { + about: "non-promulgated charm", + url: "~bob/precise/builder", + expect: []params.ExpandedId{ + {Id: "cs:~bob/precise/builder-5"}, + }, +}, { + about: "non-promulgated charm with development URL", + url: "~bob/development/precise/builder", + expect: []params.ExpandedId{ + {Id: "cs:~bob/development/precise/builder-6"}, + {Id: "cs:~bob/precise/builder-5"}, + }, +}, { + about: "partial URL", + url: "haproxy", + expect: []params.ExpandedId{ + {Id: "cs:trusty/haproxy-1"}, + {Id: "cs:precise/haproxy-1"}, + }, +}, { + about: "revision with series matches bundles (and multi-series charms) only", + url: "mongo-0", + expect: []params.ExpandedId{ + {Id: "cs:bundle/mongo-0"}, + }, +}, { + about: "single result", + url: "bundle/mongo-0", + expect: []params.ExpandedId{ + {Id: "cs:bundle/mongo-0"}, + }, +}, { + about: "fully qualified URL with no entities found", + url: "~charmers/precise/no-such-42", + err: `no matching charm or bundle for "cs:~charmers/precise/no-such-42"`, +}, { + about: "partial URL with no entities found", + url: "no-such", + err: `no matching charm or bundle for "cs:no-such"`, +}} + +func (s *APISuite) TestServeExpandId(c *gc.C) { + // Add a bunch of entities in the database. + // Note that expand-id only cares about entity identifiers, + // so it is ok to reuse the same charm for all the entities. + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/utopic/wordpress-42", 42)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-47", 47)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/development/trusty/wordpress-48", 48)) + s.addPublicCharm(c, "multi-series", newResolvedURL("cs:~charmers/wordpress-5", 49)) + s.addPublicCharm(c, "multi-series", newResolvedURL("cs:~charmers/development/wordpress-6", 50)) + s.addPublicCharm(c, "multi-series", newResolvedURL("cs:~charmers/development/wordpress-7", 51)) + + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/haproxy-1", 1)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/haproxy-1", 1)) + + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/precise/builder-5", -1)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~bob/development/precise/builder-6", -1)) + + s.addPublicBundle(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/mongo-0", 0)) + s.addPublicBundle(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/wordpress-simple-0", 0)) + + for i, test := range serveExpandIdTests { + c.Logf("test %d: %s", i, test.about) + storeURL := storeURL(test.url + "/expand-id") + var expectStatus int + var expectBody interface{} + if test.err == "" { + expectStatus = http.StatusOK + expectBody = test.expect + } else { + expectStatus = http.StatusNotFound + expectBody = params.Error{ + Code: params.ErrNotFound, + Message: test.err, + } + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL, + ExpectStatus: expectStatus, + ExpectBody: expectBody, + }) + } +} + +var serveMetaRevisionInfoTests = []struct { + about string + url string + expect params.RevisionInfoResponse + err string +}{{ + about: "fully qualified url", + url: "trusty/wordpress-42", + expect: params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:trusty/wordpress-43"), + charm.MustParseURL("cs:trusty/wordpress-42"), + charm.MustParseURL("cs:trusty/wordpress-41"), + charm.MustParseURL("cs:trusty/wordpress-9"), + }, + }, +}, { + about: "partial url uses a default series", + url: "wordpress", + expect: params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:trusty/wordpress-43"), + charm.MustParseURL("cs:trusty/wordpress-42"), + charm.MustParseURL("cs:trusty/wordpress-41"), + charm.MustParseURL("cs:trusty/wordpress-9"), + }, + }, +}, { + about: "non-promulgated URL gives non-promulgated revisions (~charmers)", + url: "~charmers/trusty/cinder", + expect: params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:~charmers/trusty/cinder-6"), + charm.MustParseURL("cs:~charmers/trusty/cinder-5"), + charm.MustParseURL("cs:~charmers/trusty/cinder-4"), + charm.MustParseURL("cs:~charmers/trusty/cinder-3"), + charm.MustParseURL("cs:~charmers/trusty/cinder-2"), + charm.MustParseURL("cs:~charmers/trusty/cinder-1"), + charm.MustParseURL("cs:~charmers/trusty/cinder-0"), + }, + }, +}, { + about: "non-promulgated URL gives non-promulgated revisions (~openstack-charmers)", + url: "~openstack-charmers/trusty/cinder", + expect: params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:~openstack-charmers/trusty/cinder-1"), + charm.MustParseURL("cs:~openstack-charmers/trusty/cinder-0"), + }, + }, +}, { + about: "promulgated URL gives promulgated revisions", + url: "trusty/cinder", + expect: params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:trusty/cinder-5"), + charm.MustParseURL("cs:trusty/cinder-4"), + charm.MustParseURL("cs:trusty/cinder-3"), + charm.MustParseURL("cs:trusty/cinder-2"), + charm.MustParseURL("cs:trusty/cinder-1"), + charm.MustParseURL("cs:trusty/cinder-0"), + }, + }, +}, { + about: "multi-series charm expands to all revisions of that charm", + url: "multi-series", + expect: params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:multi-series-41"), + charm.MustParseURL("cs:multi-series-40"), + }, + }, +}, { + about: "multi-series charm with series specified", + url: "trusty/multi-series", + expect: params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:multi-series-41"), + charm.MustParseURL("cs:multi-series-40"), + }, + }, +}, { + about: "multi-series charm with non-promulgated URL", + url: "~charmers/multi-series", + expect: params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:~charmers/multi-series-2"), + charm.MustParseURL("cs:~charmers/multi-series-1"), + }, + }, +}, { + about: "multi-series charm with non-promulgated URL and series specified", + url: "~charmers/utopic/multi-series", + expect: params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:~charmers/multi-series-2"), + charm.MustParseURL("cs:~charmers/multi-series-1"), + }, + }, +}, { + about: "mixed multi/single series charm, latest rev", + url: "mixed", + expect: params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:mixed-43"), + charm.MustParseURL("cs:mixed-42"), + charm.MustParseURL("cs:trusty/mixed-41"), + charm.MustParseURL("cs:trusty/mixed-40"), + }, + }, +}, { + about: "mixed multi/single series charm with series", + url: "trusty/mixed-40", + expect: params.RevisionInfoResponse{ + []*charm.URL{ + charm.MustParseURL("cs:mixed-43"), + charm.MustParseURL("cs:mixed-42"), + charm.MustParseURL("cs:trusty/mixed-41"), + charm.MustParseURL("cs:trusty/mixed-40"), + }, + }, +}, { + about: "no entities found", + url: "precise/no-such-33", + err: `no matching charm or bundle for "cs:precise/no-such-33"`, +}} + +func (s *APISuite) TestServeMetaRevisionInfo(c *gc.C) { + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mysql-41", 41)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mysql-42", 42)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-41", 41)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/precise/wordpress-42", 42)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-43", 43)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-9", 9)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/wordpress-42", 42)) + + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-0", -1)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-1", -1)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-2", 0)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-3", 1)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~openstack-charmers/trusty/cinder-0", 2)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~openstack-charmers/trusty/cinder-1", 3)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-4", -1)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-5", 4)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/cinder-6", 5)) + + s.addPublicCharm(c, "multi-series", newResolvedURL("cs:~charmers/multi-series-1", 40)) + s.addPublicCharm(c, "multi-series", newResolvedURL("cs:~charmers/multi-series-2", 41)) + + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mixed-1", 40)) + s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~charmers/trusty/mixed-2", 41)) + s.addPublicCharm(c, "multi-series", newResolvedURL("cs:~charmers/mixed-3", 42)) + s.addPublicCharm(c, "multi-series", newResolvedURL("cs:~charmers/mixed-4", 43)) + + for i, test := range serveMetaRevisionInfoTests { + c.Logf("test %d: %s", i, test.about) + storeURL := storeURL(test.url + "/meta/revision-info") + var expectStatus int + var expectBody interface{} + if test.err == "" { + expectStatus = http.StatusOK + expectBody = test.expect + } else { + expectStatus = http.StatusNotFound + expectBody = params.Error{ + Code: params.ErrNotFound, + Message: test.err, + } + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL, + ExpectStatus: expectStatus, + ExpectBody: expectBody, + }) + } +} + +var metaStatsTests = []struct { + // about describes the test. + about string + // url is the entity id to use when making the meta/stats request. + url string + // downloads maps entity ids to a numeric key/value pair where the key is + // the number of days in the past when the entity was downloaded and the + // value is the number of downloads performed that day. + downloads map[string]map[int]int + // expectResponse is the expected response from the meta/stats endpoint. + expectResponse params.StatsResponse +}{{ + about: "no downloads", + url: "trusty/mysql-0", + downloads: map[string]map[int]int{"trusty/mysql-0": {}}, +}, { + about: "single download", + url: "utopic/django-42", + downloads: map[string]map[int]int{ + "utopic/django-42": {0: 1}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 1, + ArchiveDownload: params.StatsCount{ + Total: 1, + Day: 1, + Week: 1, + Month: 1, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 1, + Day: 1, + Week: 1, + Month: 1, + }, + }, +}, { + about: "single download a long time ago", + url: "utopic/django-42", + downloads: map[string]map[int]int{ + "utopic/django-42": {100: 1}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 1, + ArchiveDownload: params.StatsCount{ + Total: 1, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 1, + }, + }, +}, { + about: "some downloads this month", + url: "utopic/wordpress-47", + downloads: map[string]map[int]int{ + "utopic/wordpress-47": {20: 2, 25: 5}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 2 + 5, + ArchiveDownload: params.StatsCount{ + Total: 2 + 5, + Month: 2 + 5, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 2 + 5, + Month: 2 + 5, + }, + }, +}, { + about: "multiple recent downloads", + url: "utopic/django-42", + downloads: map[string]map[int]int{ + "utopic/django-42": {100: 1, 12: 3, 8: 5, 4: 10, 2: 1, 0: 3}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 1 + 3 + 5 + 10 + 1 + 3, + ArchiveDownload: params.StatsCount{ + Total: 1 + 3 + 5 + 10 + 1 + 3, + Day: 3, + Week: 10 + 1 + 3, + Month: 3 + 5 + 10 + 1 + 3, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 1 + 3 + 5 + 10 + 1 + 3, + Day: 3, + Week: 10 + 1 + 3, + Month: 3 + 5 + 10 + 1 + 3, + }, + }, +}, { + about: "sparse downloads", + url: "utopic/django-42", + downloads: map[string]map[int]int{ + "utopic/django-42": {200: 3, 27: 4, 3: 5}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 3 + 4 + 5, + ArchiveDownload: params.StatsCount{ + Total: 3 + 4 + 5, + Week: 5, + Month: 4 + 5, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 3 + 4 + 5, + Week: 5, + Month: 4 + 5, + }, + }, +}, { + about: "bundle downloads", + url: "bundle/django-simple-2", + downloads: map[string]map[int]int{ + "bundle/django-simple-2": {200: 3, 27: 4, 3: 5}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 3 + 4 + 5, + ArchiveDownload: params.StatsCount{ + Total: 3 + 4 + 5, + Week: 5, + Month: 4 + 5, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 3 + 4 + 5, + Week: 5, + Month: 4 + 5, + }, + }, +}, { + about: "different charms", + url: "trusty/rails-47", + downloads: map[string]map[int]int{ + "utopic/rails-47": {200: 3, 27: 4, 3: 5}, + "trusty/rails-47": {20: 2, 6: 10}, + "trusty/mysql-0": {200: 1, 14: 2, 1: 7}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 2 + 10, + ArchiveDownload: params.StatsCount{ + Total: 2 + 10, + Week: 10, + Month: 2 + 10, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 2 + 10, + Week: 10, + Month: 2 + 10, + }, + }, +}, { + about: "different revisions of the same charm", + url: "precise/rails-1", + downloads: map[string]map[int]int{ + "precise/rails-0": {300: 1, 200: 2}, + "precise/rails-1": {100: 5, 10: 3, 2: 7}, + "precise/rails-2": {6: 10, 0: 9}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 5 + 3 + 7, + ArchiveDownload: params.StatsCount{ + Total: 5 + 3 + 7, + Week: 7, + Month: 3 + 7, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: (1 + 2) + (5 + 3 + 7) + (10 + 9), + Day: 0 + 0 + 9, + Week: 0 + 7 + (10 + 9), + Month: 0 + (3 + 7) + (10 + 9), + }, + }, +}, { + about: "downloads only in an old revision", + url: "trusty/wordpress-2", + downloads: map[string]map[int]int{ + "precise/wordpress-2": {2: 2, 0: 1}, + "trusty/wordpress-0": {100: 10}, + "trusty/wordpress-2": {}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 10, + }, + }, +}, { + about: "downloads only in newer revision", + url: "utopic/wordpress-0", + downloads: map[string]map[int]int{ + "utopic/wordpress-0": {}, + "utopic/wordpress-1": {31: 7, 10: 1, 3: 2, 0: 1}, + "utopic/wordpress-2": {6: 9, 0: 2}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: (7 + 1 + 2 + 1) + (9 + 2), + Day: 1 + 2, + Week: (2 + 1) + (9 + 2), + Month: (1 + 2 + 1) + (9 + 2), + }, + }, +}, { + about: "non promulgated charms", + url: "~who/utopic/django-0", + downloads: map[string]map[int]int{ + "utopic/django-0": {100: 1, 10: 2, 1: 3, 0: 4}, + "~who/utopic/django-0": {2: 5}, + }, + expectResponse: params.StatsResponse{ + ArchiveDownloadCount: 5, + ArchiveDownload: params.StatsCount{ + Total: 5, + Week: 5, + Month: 5, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: 5, + Week: 5, + Month: 5, + }, + }, +}} + +func (s *APISuite) TestMetaStats(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + // TODO (frankban): remove this call when removing the legacy counts logic. + patchLegacyDownloadCountsEnabled(s.AddCleanup, false) + + today := time.Now() + for i, test := range metaStatsTests { + c.Logf("test %d: %s", i, test.about) + + for id, downloadsPerDay := range test.downloads { + url := &router.ResolvedURL{ + URL: *charm.MustParseURL(id), + PromulgatedRevision: -1, + } + if url.URL.User == "" { + url.URL.User = "charmers" + url.PromulgatedRevision = url.URL.Revision + } + + // Add the required entities to the database. + if url.URL.Series == "bundle" { + s.addPublicBundle(c, "wordpress-simple", url) + } else { + s.addPublicCharm(c, "wordpress", url) + } + + // Simulate the entity was downloaded at the specified dates. + for daysAgo, downloads := range downloadsPerDay { + date := today.AddDate(0, 0, -daysAgo) + key := []string{params.StatsArchiveDownload, url.URL.Series, url.URL.Name, url.URL.User, strconv.Itoa(url.URL.Revision)} + for i := 0; i < downloads; i++ { + err := s.store.IncCounterAtTime(key, date) + c.Assert(err, gc.IsNil) + } + if url.PromulgatedRevision > -1 { + key := []string{params.StatsArchiveDownloadPromulgated, url.URL.Series, url.URL.Name, "", strconv.Itoa(url.PromulgatedRevision)} + for i := 0; i < downloads; i++ { + err := s.store.IncCounterAtTime(key, date) + c.Assert(err, gc.IsNil) + } + } + } + } + // Ensure the meta/stats response reports the correct downloads count. + s.assertGet(c, test.url+"/meta/stats", test.expectResponse) + + // Clean up the collections. + _, err := s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + _, err = s.store.DB.StatCounters().RemoveAll(nil) + c.Assert(err, gc.IsNil) + } +} + +var metaStatsWithLegacyDownloadCountsTests = []struct { + about string + count string + expectValue int64 + expectError string +}{{ + about: "no extra-info", +}, { + about: "zero downloads", + count: "0", +}, { + about: "some downloads", + count: "47", + expectValue: 47, +}, { + about: "invalid value", + count: "invalid", + expectError: "cannot unmarshal extra-info value: invalid character 'i' looking for beginning of value", +}} + +// Tests meta/stats with LegacyDownloadCountsEnabled set to true. +// TODO (frankban): remove this test case when removing the legacy counts +// logic. +func (s *APISuite) TestMetaStatsWithLegacyDownloadCounts(c *gc.C) { + patchLegacyDownloadCountsEnabled(s.AddCleanup, true) + id, _ := s.addPublicCharm(c, "wordpress", newResolvedURL("~charmers/utopic/wordpress-42", 42)) + url := storeURL("utopic/wordpress-42/meta/stats") + + for i, test := range metaStatsWithLegacyDownloadCountsTests { + c.Logf("test %d: %s", i, test.about) + + // Update the entity extra info if required. + if test.count != "" { + extraInfo := map[string][]byte{ + params.LegacyDownloadStats: []byte(test.count), + } + err := s.store.UpdateEntity(id, bson.D{{ + "$set", bson.D{{"extrainfo", extraInfo}}, + }}) + c.Assert(err, gc.IsNil) + } + + var expectBody interface{} + var expectStatus int + if test.expectError == "" { + // Ensure the downloads count is correctly returned. + expectBody = params.StatsResponse{ + ArchiveDownloadCount: test.expectValue, + ArchiveDownload: params.StatsCount{ + Total: test.expectValue, + }, + ArchiveDownloadAllRevisions: params.StatsCount{ + Total: test.expectValue, + }, + } + expectStatus = http.StatusOK + } else { + // Ensure an error is returned. + expectBody = params.Error{ + Message: test.expectError, + } + expectStatus = http.StatusInternalServerError + } + + // Perform the request. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: url, + ExpectStatus: expectStatus, + ExpectBody: expectBody, + }) + } +} + +type publishSpec struct { + id *router.ResolvedURL + time string + acl []string +} + +func (p publishSpec) published() params.Published { + t, err := time.Parse("2006-01-02 15:04", p.time) + if err != nil { + panic(err) + } + return params.Published{&p.id.URL, t} +} + +var publishedCharms = []publishSpec{{ + id: newResolvedURL("cs:~charmers/precise/wordpress-1", 1), + time: "5432-10-12 00:00", +}, { + id: newResolvedURL("cs:~charmers/precise/mysql-1", 1), + time: "5432-10-12 13:00", +}, { + id: newResolvedURL("cs:~charmers/precise/wordpress-2", 2), + time: "5432-10-12 23:59", +}, { + id: newResolvedURL("cs:~charmers/precise/mysql-2", 2), + time: "5432-10-13 00:00", +}, { + id: newResolvedURL("cs:~charmers/precise/mysql-5", 5), + time: "5432-10-13 10:00", +}, { + id: newResolvedURL("cs:~charmers/precise/wordpress-3", 3), + time: "5432-10-14 01:00", +}, { + id: newResolvedURL("cs:~charmers/precise/django-0", -1), + time: "5432-10-14 02:00", + acl: []string{"charmers"}, +}} + +var changesPublishedTests = []struct { + args string + // expect holds indexes into publishedCharms + // of the expected indexes returned by charms/published + expect []int +}{{ + args: "", + expect: []int{5, 4, 3, 2, 1, 0}, +}, { + args: "?start=5432-10-13", + expect: []int{5, 4, 3}, +}, { + args: "?stop=5432-10-13", + expect: []int{4, 3, 2, 1, 0}, +}, { + args: "?start=5432-10-13&stop=5432-10-13", + expect: []int{4, 3}, +}, { + args: "?start=5432-10-12&stop=5432-10-13", + expect: []int{4, 3, 2, 1, 0}, +}, { + args: "?start=5432-10-13&stop=5432-10-12", + expect: []int{}, +}, { + args: "?limit=3", + expect: []int{5, 4, 3}, +}, { + args: "?start=5432-10-12&stop=5432-10-13&limit=2", + expect: []int{4, 3}, +}} + +func (s *APISuite) TestChangesPublished(c *gc.C) { + s.publishCharmsAtKnownTimes(c, publishedCharms) + for i, test := range changesPublishedTests { + c.Logf("test %d: %q", i, test.args) + expect := make([]params.Published, len(test.expect)) + for j, index := range test.expect { + expect[j] = publishedCharms[index].published() + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("changes/published") + test.args, + ExpectBody: expect, + }) + } +} + +func (s *APISuite) TestChangesPublishedAdmin(c *gc.C) { + s.publishCharmsAtKnownTimes(c, publishedCharms) + expect := make([]params.Published, len(publishedCharms)) + for i := range expect { + expect[i] = publishedCharms[len(publishedCharms)-(i+1)].published() + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + Username: testUsername, + Password: testPassword, + URL: storeURL("changes/published"), + ExpectBody: expect, + }) +} + +var changesPublishedErrorsTests = []struct { + args string + expect params.Error + status int +}{{ + args: "?limit=0", + expect: params.Error{ + Code: params.ErrBadRequest, + Message: "invalid 'limit' value", + }, + status: http.StatusBadRequest, +}, { + args: "?limit=-1", + expect: params.Error{ + Code: params.ErrBadRequest, + Message: "invalid 'limit' value", + }, + status: http.StatusBadRequest, +}, { + args: "?limit=-9999", + expect: params.Error{ + Code: params.ErrBadRequest, + Message: "invalid 'limit' value", + }, + status: http.StatusBadRequest, +}, { + args: "?start=baddate", + expect: params.Error{ + Code: params.ErrBadRequest, + Message: `invalid 'start' value "baddate": parsing time "baddate" as "2006-01-02": cannot parse "baddate" as "2006"`, + }, + status: http.StatusBadRequest, +}, { + args: "?stop=baddate", + expect: params.Error{ + Code: params.ErrBadRequest, + Message: `invalid 'stop' value "baddate": parsing time "baddate" as "2006-01-02": cannot parse "baddate" as "2006"`, + }, + status: http.StatusBadRequest, +}} + +func (s *APISuite) TestChangesPublishedErrors(c *gc.C) { + s.publishCharmsAtKnownTimes(c, publishedCharms) + for i, test := range changesPublishedErrorsTests { + c.Logf("test %d: %q", i, test.args) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("changes/published") + test.args, + ExpectStatus: test.status, + ExpectBody: test.expect, + }) + } +} + +// publishCharmsAtKnownTimes populates the store with +// a range of charms with known time stamps. +func (s *APISuite) publishCharmsAtKnownTimes(c *gc.C, charms []publishSpec) { + for _, ch := range publishedCharms { + id, _ := s.addPublicCharm(c, "wordpress", ch.id) + t := ch.published().PublishTime + err := s.store.UpdateEntity(id, bson.D{{"$set", bson.D{{"uploadtime", t}}}}) + c.Assert(err, gc.IsNil) + if len(ch.acl) > 0 { + err := s.store.SetPerms(&id.URL, "read", ch.acl...) + c.Assert(err, gc.IsNil) + } + } +} + +var debugPprofTests = []struct { + path string + match string +}{{ + path: "debug/pprof/", + match: `(?s).*profiles:.*heap.*`, +}, { + path: "debug/pprof/goroutine?debug=2", + match: "(?s)goroutine [0-9]+.*", +}, { + path: "debug/pprof/cmdline", + match: ".+charmstore.+", +}} + +func (s *APISuite) TestDebugPprof(c *gc.C) { + for i, test := range debugPprofTests { + c.Logf("test %d: %s", i, test.path) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + Header: basicAuthHeader(testUsername, testPassword), + URL: storeURL(test.path), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.String())) + c.Assert(rec.Body.String(), gc.Matches, test.match) + } +} + +func (s *APISuite) TestDebugPprofFailsWithoutAuth(c *gc.C) { + for i, test := range debugPprofTests { + c.Logf("test %d: %s", i, test.path) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.path), + ExpectStatus: http.StatusProxyAuthRequired, + ExpectBody: dischargeRequiredBody, + }) + } +} + +func (s *APISuite) TestHash256Laziness(c *gc.C) { + // TODO frankban: remove this test after updating entities in the + // production db with their SHA256 hash value. Entities are updated by + // running the cshash256 command. + id, _ := s.addPublicCharm(c, "wordpress", newResolvedURL("cs:~who/precise/wordpress-0", -1)) + + // Retrieve the SHA256 hash. + entity, err := s.store.FindEntity(id, "blobhash256") + c.Assert(err, gc.IsNil) + c.Assert(entity.BlobHash256, gc.Not(gc.Equals), "") + + hashtesting.CheckSHA256Laziness(c, s.store, &id.URL, func() { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(id.URL.Path() + "/meta/hash256"), + ExpectStatus: http.StatusOK, + ExpectBody: params.HashResponse{ + Sum: entity.BlobHash256, + }, + }) + }) +} + +func basicAuthHeader(username, password string) http.Header { + // It's a pity we have to jump through this hoop. + req := &http.Request{ + Header: make(http.Header), + } + req.SetBasicAuth(username, password) + return req.Header +} + +func entityFieldGetter(fieldName string) metaEndpointExpectedValueGetter { + return entityGetter(func(entity *mongodoc.Entity) interface{} { + field := reflect.ValueOf(entity).Elem().FieldByName(fieldName) + if !field.IsValid() { + panic(errgo.Newf("entity has no field %q", fieldName)) + } + return field.Interface() + }) +} + +func entityGetter(get func(*mongodoc.Entity) interface{}) metaEndpointExpectedValueGetter { + return func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + doc, err := store.FindEntity(url) + if err != nil { + return nil, errgo.Mask(err) + } + return get(doc), nil + } +} + +func zipGetter(get func(*zip.Reader) interface{}) metaEndpointExpectedValueGetter { + return func(store *charmstore.Store, url *router.ResolvedURL) (interface{}, error) { + doc, err := store.FindEntity(url, "blobname") + if err != nil { + return nil, errgo.Mask(err) + } + blob, size, err := store.BlobStore.Open(doc.BlobName) + if err != nil { + return nil, errgo.Mask(err) + } + defer blob.Close() + content, err := ioutil.ReadAll(blob) + if err != nil { + return nil, errgo.Mask(err) + } + r, err := zip.NewReader(bytes.NewReader(content), size) + if err != nil { + return nil, errgo.Mask(err) + } + return get(r), nil + } +} + +func entitySizeChecker(c *gc.C, data interface{}) { + response := data.(*params.ArchiveSizeResponse) + c.Assert(response.Size, gc.Not(gc.Equals), int64(0)) +} + +func (s *APISuite) addPublicCharm(c *gc.C, charmName string, rurl *router.ResolvedURL) (*router.ResolvedURL, charm.Charm) { + ch := storetesting.Charms.CharmDir(charmName) + err := s.store.AddCharmWithArchive(rurl, ch) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(rurl.URL.WithChannel(charm.DevelopmentChannel), "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + return rurl, ch +} + +func (s *APISuite) addPublicBundle(c *gc.C, bundleName string, rurl *router.ResolvedURL) (*router.ResolvedURL, charm.Bundle) { + bundle := storetesting.Charms.BundleDir(bundleName) + err := s.store.AddBundleWithArchive(rurl, bundle) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(rurl.URL.WithChannel(charm.DevelopmentChannel), "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + return rurl, bundle +} + +func (s *APISuite) assertPutNonAdmin(c *gc.C, url string, val interface{}) { + s.assertPut0(c, url, val, false) +} + +func (s *APISuite) assertPut(c *gc.C, url string, val interface{}) { + s.assertPut0(c, url, val, true) +} + +func (s *APISuite) assertPut0(c *gc.C, url string, val interface{}, asAdmin bool) { + body, err := json.Marshal(val) + c.Assert(err, gc.IsNil) + p := httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url), + Method: "PUT", + Do: bakeryDo(nil), + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: bytes.NewReader(body), + } + if asAdmin { + p.Username = testUsername + p.Password = testPassword + } + rec := httptesting.DoRequest(c, p) + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.String())) + c.Assert(rec.Body.String(), gc.HasLen, 0) +} + +func (s *APISuite) assertGet(c *gc.C, url string, expectVal interface{}) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + Do: bakeryDo(nil), + URL: storeURL(url), + ExpectBody: expectVal, + }) +} + +func (s *APISuite) addLog(c *gc.C, log *mongodoc.Log) { + err := s.store.DB.Logs().Insert(log) + c.Assert(err, gc.Equals, nil) +} + +func mustMarshalJSON(val interface{}) string { + data, err := json.Marshal(val) + if err != nil { + panic(fmt.Errorf("cannot marshal %#v: %v", val, err)) + } + return string(data) +} + +func (s *APISuite) TestMacaroon(c *gc.C) { + var checkedCaveats []string + var mu sync.Mutex + var dischargeError error + s.discharge = func(cond string, arg string) ([]checkers.Caveat, error) { + mu.Lock() + defer mu.Unlock() + checkedCaveats = append(checkedCaveats, cond+" "+arg) + return []checkers.Caveat{checkers.DeclaredCaveat("username", "who")}, dischargeError + } + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("macaroon"), + Method: "GET", + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.String())) + var m macaroon.Macaroon + err := json.Unmarshal(rec.Body.Bytes(), &m) + c.Assert(err, gc.IsNil) + c.Assert(m.Location(), gc.Equals, "charmstore") + client := httpbakery.NewClient() + ms, err := client.DischargeAll(&m) + c.Assert(err, gc.IsNil) + sort.Strings(checkedCaveats) + c.Assert(checkedCaveats, jc.DeepEquals, []string{ + "is-authenticated-user ", + }) + macaroonCookie, err := httpbakery.NewCookie(ms) + c.Assert(err, gc.IsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("log"), + Do: bakeryDo(nil), + Cookies: []*http.Cookie{macaroonCookie}, + ExpectStatus: http.StatusUnauthorized, + ExpectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "who"`, + }, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.noMacaroonSrv, + URL: storeURL("log"), + ExpectStatus: http.StatusUnauthorized, + ExpectBody: params.Error{ + Message: "authentication failed: missing HTTP auth header", + Code: params.ErrUnauthorized, + }, + }) +} + +func (s *APISuite) TestWhoAmIFailWithNoMacaroon(c *gc.C) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.noMacaroonSrv, + URL: storeURL("whoami"), + Do: bakeryDo(nil), + ExpectStatus: http.StatusUnauthorized, + ExpectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: "authentication failed: missing HTTP auth header", + }, + }) +} + +func (s *APISuite) TestWhoAmIReturnsNameAndGroups(c *gc.C) { + s.discharge = dischargeForUser("who") + s.idM.groups = map[string][]string{ + "who": {"foo", "bar"}, + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("whoami"), + Do: bakeryDo(nil), + ExpectStatus: http.StatusOK, + ExpectBody: params.WhoAmIResponse{ + User: "who", + Groups: []string{"foo", "bar"}, + }, + }) +} + +var promulgateTests = []struct { + about string + entities []*mongodoc.Entity + baseEntities []*mongodoc.BaseEntity + id string + useHTTPDo bool + method string + caveats []checkers.Caveat + groups map[string][]string + body io.Reader + username string + password string + expectStatus int + expectBody interface{} + expectEntities []*mongodoc.Entity + expectBaseEntities []*mongodoc.BaseEntity + expectPromulgate bool + expectUser string +}{{ + about: "unpromulgate base entity", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, + id: "~charmers/wordpress", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), + username: testUsername, + password: testPassword, + expectStatus: http.StatusOK, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, + expectUser: "admin", +}, { + about: "promulgate base entity", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, + id: "~charmers/wordpress", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), + username: testUsername, + password: testPassword, + expectStatus: http.StatusOK, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithACLs(mongodoc.ACL{ + Write: []string{v5.PromulgatorsGroup}, + }).WithPromulgated(true).Build(), + }, + expectPromulgate: true, + expectUser: "admin", +}, { + about: "unpromulgate base entity not found", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, + id: "~charmers/mysql", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), + username: testUsername, + password: testPassword, + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:~charmers/mysql"`, + }, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, +}, { + about: "promulgate base entity not found", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, + id: "~charmers/mysql", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), + username: testUsername, + password: testPassword, + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:~charmers/mysql"`, + }, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, +}, { + about: "bad method", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, + id: "~charmers/wordpress", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), + username: testUsername, + password: testPassword, + method: "POST", + expectStatus: http.StatusMethodNotAllowed, + expectBody: params.Error{ + Code: params.ErrMethodNotAllowed, + Message: "POST not allowed", + }, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, +}, { + about: "bad JSON", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, + id: "~charmers/wordpress", + body: bytes.NewReader([]byte("tru")), + username: testUsername, + password: testPassword, + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "bad request: invalid character ' ' in literal true (expecting 'e')", + }, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, +}, { + about: "unpromulgate base entity with macaroon", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, + id: "~charmers/wordpress", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), + caveats: []checkers.Caveat{ + checkers.DeclaredCaveat(v5.UsernameAttr, v5.PromulgatorsGroup), + }, + expectStatus: http.StatusOK, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, + expectUser: v5.PromulgatorsGroup, +}, { + about: "promulgate base entity with macaroon", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, + id: "~charmers/wordpress", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), + caveats: []checkers.Caveat{ + checkers.DeclaredCaveat(v5.UsernameAttr, v5.PromulgatorsGroup), + }, + expectStatus: http.StatusOK, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithACLs(mongodoc.ACL{ + Write: []string{v5.PromulgatorsGroup}, + }).WithPromulgated(true).Build(), + }, + expectPromulgate: true, + expectUser: v5.PromulgatorsGroup, +}, { + about: "promulgate base entity with group macaroon", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, + id: "~charmers/wordpress", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), + caveats: []checkers.Caveat{ + checkers.DeclaredCaveat(v5.UsernameAttr, "bob"), + }, + groups: map[string][]string{ + "bob": {v5.PromulgatorsGroup, "yellow"}, + }, + expectStatus: http.StatusOK, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithACLs(mongodoc.ACL{ + Write: []string{v5.PromulgatorsGroup}, + }).WithPromulgated(true).Build(), + }, + expectPromulgate: true, + expectUser: "bob", +}, { + about: "no authorisation", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, + useHTTPDo: true, + id: "~charmers/wordpress", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: false}), + expectStatus: http.StatusProxyAuthRequired, + expectBody: dischargeRequiredBody, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").WithPromulgatedURL("trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").WithPromulgated(true).Build(), + }, +}, { + about: "promulgate base entity with unauthorized user macaroon", + entities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), + }, + baseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, + id: "~charmers/wordpress", + body: storetesting.JSONReader(params.PromulgateRequest{Promulgated: true}), + caveats: []checkers.Caveat{ + checkers.DeclaredCaveat(v5.UsernameAttr, "bob"), + }, + groups: map[string][]string{ + "bob": {"yellow"}, + }, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Message: `unauthorized: access denied for user "bob"`, + Code: params.ErrUnauthorized, + }, + expectEntities: []*mongodoc.Entity{ + storetesting.NewEntity("~charmers/trusty/wordpress-0").Build(), + }, + expectBaseEntities: []*mongodoc.BaseEntity{ + storetesting.NewBaseEntity("~charmers/wordpress").Build(), + }, +}} + +func (s *APISuite) TestPromulgate(c *gc.C) { + for i, test := range promulgateTests { + c.Logf("%d. %s\n", i, test.about) + _, err := s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + _, err = s.store.DB.BaseEntities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + for _, e := range test.entities { + err := s.store.DB.Entities().Insert(e) + c.Assert(err, gc.IsNil) + } + for _, e := range test.baseEntities { + err := s.store.DB.BaseEntities().Insert(e) + c.Assert(err, gc.IsNil) + } + if test.method == "" { + test.method = "PUT" + } + + var calledEntities []audit.Entry + s.PatchValue(v5.TestAddAuditCallback, func(e audit.Entry) { + calledEntities = append(calledEntities, e) + }) + + client := httpbakery.NewHTTPClient() + s.discharge = func(_, _ string) ([]checkers.Caveat, error) { + return test.caveats, nil + } + s.idM.groups = test.groups + p := httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.id + "/promulgate"), + Method: test.method, + Body: test.body, + Header: http.Header{"Content-Type": {"application/json"}}, + Username: test.username, + Password: test.password, + ExpectStatus: test.expectStatus, + ExpectBody: test.expectBody, + } + if !test.useHTTPDo { + p.Do = bakeryDo(client) + } + httptesting.AssertJSONCall(c, p) + n, err := s.store.DB.Entities().Count() + c.Assert(err, gc.IsNil) + c.Assert(n, gc.Equals, len(test.expectEntities)) + for _, e := range test.expectEntities { + storetesting.AssertEntity(c, s.store.DB.Entities(), e) + } + n, err = s.store.DB.BaseEntities().Count() + c.Assert(err, gc.IsNil) + c.Assert(n, gc.Equals, len(test.expectBaseEntities)) + for _, e := range test.expectBaseEntities { + storetesting.AssertBaseEntity(c, s.store.DB.BaseEntities(), e) + } + + if test.expectStatus == http.StatusOK { + ref := charm.MustParseURL(test.id) + ref.Series = "trusty" + ref.Revision = 0 + + e := audit.Entry{ + User: test.expectUser, + Op: audit.OpUnpromulgate, + Entity: ref, + } + if test.expectPromulgate { + e.Op = audit.OpPromulgate + } + c.Assert(calledEntities, jc.DeepEquals, []audit.Entry{e}) + } else { + c.Assert(len(calledEntities), gc.Equals, 0) + } + calledEntities = nil + } +} + +func (s *APISuite) TestEndpointRequiringBaseEntityWithPromulgatedId(c *gc.C) { + // Add a promulgated charm. + url := newResolvedURL("~charmers/precise/wordpress-23", 23) + s.addPublicCharm(c, "wordpress", url) + + // Unpromulgate the base entity + err := s.store.SetPromulgated(url, false) + c.Assert(err, gc.IsNil) + + // Check that we can still enquire about the promulgation status + // of the entity when using its promulgated URL. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("precise/wordpress-23/meta/promulgated"), + ExpectBody: params.PromulgatedResponse{ + Promulgated: false, + }, + }) +} + +var publishTests = []struct { + about string + db []*router.ResolvedURL + id string + publish bool + expectDB []*router.ResolvedURL + expectBody params.PublishResponse +}{{ + about: "publish: one development charm present, fully qualified id, not promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-42", -1), + }, + id: "~who/wily/django-42", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-42", -1), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-42"), + }, +}, { + about: "publish: one development charm present, fully qualified id, promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-42", 47), + }, + id: "wily/django-47", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-42", 47), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-42"), + PromulgatedId: charm.MustParseURL("wily/django-47"), + }, +}, { + about: "publish: one development charm present, partial id, not promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-42", -1), + }, + id: "~who/wily/django", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-42", -1), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-42"), + }, +}, { + about: "publish: one development charm present, partial id, promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-42", 47), + }, + id: "django", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-42", 47), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-42"), + PromulgatedId: charm.MustParseURL("wily/django-47"), + }, +}, { + about: "publish: one published charm present, partial id, not promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-1", -1), + }, + id: "~who/django", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-1", -1), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-1"), + }, +}, { + about: "publish: one published charm present, fully qualified id, promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-2", 0), + }, + id: "wily/django-0", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-2", 0), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-2"), + PromulgatedId: charm.MustParseURL("wily/django-0"), + }, +}, { + about: "publish: multiple development charms present, fully qualified id, not promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-0", -1), + newResolvedURL("~who/development/wily/django-1", -1), + newResolvedURL("~who/development/wily/django-2", -1), + newResolvedURL("~who/development/trusty/django-1", -1), + }, + id: "~who/wily/django-1", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-0", -1), + newResolvedURL("~who/wily/django-1", -1), + newResolvedURL("~who/development/wily/django-2", -1), + newResolvedURL("~who/development/trusty/django-1", -1), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-1"), + }, +}, { + about: "publish: multiple development charms present, fully qualified id, promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-42", 10), + newResolvedURL("~who/development/wily/django-43", 11), + newResolvedURL("~who/development/wily/django-44", 12), + newResolvedURL("~who/development/wily/rails-100", 10), + }, + id: "wily/django-10", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-42", 10), + newResolvedURL("~who/development/wily/django-43", 11), + newResolvedURL("~who/development/wily/django-44", 12), + newResolvedURL("~who/development/wily/rails-100", 10), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-42"), + PromulgatedId: charm.MustParseURL("wily/django-10"), + }, +}, { + about: "publish: multiple development charms present, fully qualified id, promulgated, last one published", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-42", 10), + newResolvedURL("~who/development/wily/django-43", 11), + newResolvedURL("~who/development/wily/django-44", 12), + newResolvedURL("~who/development/wily/rails-100", 10), + }, + id: "wily/django-12", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-42", 10), + newResolvedURL("~who/development/wily/django-43", 11), + newResolvedURL("~who/wily/django-44", 12), + newResolvedURL("~who/development/wily/rails-100", 10), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-44"), + PromulgatedId: charm.MustParseURL("wily/django-12"), + }, +}, { + about: "publish: multiple development charms present, partial id, not promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-0", -1), + newResolvedURL("~who/development/wily/django-1", -1), + newResolvedURL("~who/development/trusty/django-42", -1), + newResolvedURL("~who/development/trusty/django-47", -1), + }, + id: "~who/wily/django", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-0", -1), + newResolvedURL("~who/wily/django-1", -1), + newResolvedURL("~who/development/trusty/django-42", -1), + newResolvedURL("~who/development/trusty/django-47", -1), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-1"), + }, +}, { + about: "publish: multiple development charms present, partial id, promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-0", 0), + newResolvedURL("~who/development/wily/django-1", 1), + newResolvedURL("~who/development/trusty/django-42", 10), + newResolvedURL("~who/development/trusty/django-47", 11), + }, + id: "django", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-0", 0), + newResolvedURL("~who/development/wily/django-1", 1), + newResolvedURL("~who/development/trusty/django-42", 10), + newResolvedURL("~who/trusty/django-47", 11), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/trusty/django-47"), + PromulgatedId: charm.MustParseURL("trusty/django-11"), + }, +}, { + about: "publish: multiple published charms present, partial id, not promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-0", -1), + newResolvedURL("~who/development/wily/django-1", -1), + newResolvedURL("~who/development/wily/django-2", -1), + }, + id: "~who/wily/django", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-0", -1), + newResolvedURL("~who/development/wily/django-1", -1), + newResolvedURL("~who/wily/django-2", -1), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/wily/django-2"), + }, +}, { + about: "publish: multiple published charms present, partial id, promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/trusty/django-42", 10), + newResolvedURL("~who/trusty/django-47", 11), + newResolvedURL("~who/trusty/django-48", 12), + newResolvedURL("~who/development/trusty/django-49", 13), + }, + id: "django", + publish: true, + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/trusty/django-42", 10), + newResolvedURL("~who/trusty/django-47", 11), + newResolvedURL("~who/trusty/django-48", 12), + newResolvedURL("~who/trusty/django-49", 13), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/trusty/django-49"), + PromulgatedId: charm.MustParseURL("trusty/django-13"), + }, +}, { + about: "unpublish: one published charm present, partial id, not promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-1", -1), + }, + id: "~who/django", + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-1", -1), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/development/wily/django-1"), + }, +}, { + about: "unpublish: one published charm present, fully qualified id, promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-2", 0), + }, + id: "wily/django-0", + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-2", 0), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/development/wily/django-2"), + PromulgatedId: charm.MustParseURL("development/wily/django-0"), + }, +}, { + about: "unpublish: multiple published charms present, partial id, not promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/wily/django-0", -1), + newResolvedURL("~who/development/wily/django-1", -1), + newResolvedURL("~who/development/wily/django-2", -1), + }, + id: "~who/wily/django", + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/wily/django-0", -1), + newResolvedURL("~who/development/wily/django-1", -1), + newResolvedURL("~who/development/wily/django-2", -1), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/development/wily/django-0"), + }, +}, { + about: "unpublish: multiple published charms present, partial id, promulgated", + db: []*router.ResolvedURL{ + newResolvedURL("~who/development/trusty/django-42", 10), + newResolvedURL("~who/trusty/django-47", 11), + newResolvedURL("~who/trusty/django-48", 12), + newResolvedURL("~who/development/trusty/django-49", 13), + }, + id: "django", + expectDB: []*router.ResolvedURL{ + newResolvedURL("~who/development/trusty/django-42", 10), + newResolvedURL("~who/trusty/django-47", 11), + newResolvedURL("~who/development/trusty/django-48", 12), + newResolvedURL("~who/development/trusty/django-49", 13), + }, + expectBody: params.PublishResponse{ + Id: charm.MustParseURL("~who/development/trusty/django-48"), + PromulgatedId: charm.MustParseURL("development/trusty/django-12"), + }, +}} + +func (s *APISuite) TestPublish(c *gc.C) { + for i, test := range publishTests { + c.Logf("test %d: %s", i, test.about) + + // Add the initial entities to the database. + for _, rurl := range test.db { + s.addPublicCharm(c, "wordpress", rurl) + } + + // Build the proper request body. + body := mustMarshalJSON(params.PublishRequest{ + Published: test.publish, + }) + + // Check that the request/response process works as expected. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.id + "/publish"), + Method: "PUT", + Header: http.Header{"Content-Type": {"application/json"}}, + Username: testUsername, + Password: testPassword, + Body: strings.NewReader(body), + ExpectStatus: http.StatusOK, + ExpectBody: test.expectBody, + }) + + // Check that the database now includes the expected entities. + for _, rurl := range test.expectDB { + e, err := s.store.FindEntity(rurl) + c.Assert(err, gc.IsNil) + c.Assert(charmstore.EntityResolvedURL(e), jc.DeepEquals, rurl) + } + + // Remove all entities from the database. + _, err := s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + _, err = s.store.DB.BaseEntities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + } +} + +var publishErrorsTests = []struct { + about string + method string + id string + contentType string + body string + expectStatus int + expectBody params.Error +}{{ + about: "get method not allowed", + method: "GET", + id: "~who/wily/django-42", + expectStatus: http.StatusMethodNotAllowed, + expectBody: params.Error{ + Code: params.ErrMethodNotAllowed, + Message: "GET not allowed", + }, +}, { + about: "post method not allowed", + method: "POST", + id: "~who/wily/django-42", + expectStatus: http.StatusMethodNotAllowed, + expectBody: params.Error{ + Code: params.ErrMethodNotAllowed, + Message: "POST not allowed", + }, +}, { + about: "invalid channel", + method: "PUT", + id: "~who/development/wily/django-42", + expectStatus: http.StatusForbidden, + expectBody: params.Error{ + Code: params.ErrForbidden, + Message: `can only set publish on published URL, "cs:~who/development/wily/django-42" provided`, + }, +}, { + about: "unexpected content type", + method: "PUT", + id: "~who/wily/django-42", + contentType: "text/invalid", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: `cannot unmarshal publish request body: cannot unmarshal into field: unexpected content type text/invalid; want application/json; content: "{\"Published\":true}"`, + }, +}, { + about: "invalid body", + method: "PUT", + id: "~who/wily/django-42", + body: "bad wolf", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: "cannot unmarshal publish request body: cannot unmarshal into field: cannot unmarshal request body: invalid character 'b' looking for beginning of value", + }, +}, { + about: "entity to be published not found", + method: "PUT", + id: "~who/wily/django-42", + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:~who/development/wily/django-42"`, + }, +}, { + about: "entity to be unpublished not found", + method: "PUT", + id: "~who/wily/django-42", + body: mustMarshalJSON(params.PublishRequest{ + Published: false, + }), + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:~who/wily/django-42"`, + }, +}} + +func (s *APISuite) TestPublishErrors(c *gc.C) { + for i, test := range publishErrorsTests { + c.Logf("test %d: %s", i, test.about) + contentType := test.contentType + if contentType == "" { + contentType = "application/json" + } + body := test.body + if body == "" { + body = mustMarshalJSON(params.PublishRequest{ + Published: true, + }) + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.id + "/publish"), + Method: test.method, + Header: http.Header{"Content-Type": {contentType}}, + Username: testUsername, + Password: testPassword, + Body: strings.NewReader(body), + ExpectStatus: test.expectStatus, + ExpectBody: test.expectBody, + }) + } +} + +func (s *APISuite) TestTooManyConcurrentRequests(c *gc.C) { + // We don't have any control over the number of concurrent + // connections allowed by s.srv, so we make our own + // server here with custom config. + config := charmstore.ServerParams{ + MaxMgoSessions: 1, + } + db := s.Session.DB("charmstore") + srv, err := charmstore.NewServer(db, nil, config, map[string]charmstore.NewAPIHandlerFunc{"v4": v5.NewAPIHandler}) + c.Assert(err, gc.IsNil) + defer srv.Close() + + // Get a store from the pool so that we'll be + // at the concurrent request limit. + store := srv.Pool().Store() + defer store.Close() + + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: srv, + Do: bakeryDo(nil), + URL: storeURL("debug/status"), + ExpectStatus: http.StatusServiceUnavailable, + ExpectBody: params.Error{ + Message: "service unavailable: too many mongo sessions in use", + Code: params.ErrServiceUnavailable, + }, + }) +} + +// dischargeRequiredBody returns a httptesting.BodyAsserter that checks +// that the response body contains a discharge required error holding a macaroon +// with a third-party caveat addressed to expectedEntityLocation. +var dischargeRequiredBody httptesting.BodyAsserter = func(c *gc.C, body json.RawMessage) { + var response httpbakery.Error + err := json.Unmarshal(body, &response) + c.Assert(err, gc.IsNil) + c.Assert(response.Code, gc.Equals, httpbakery.ErrDischargeRequired) + c.Assert(response.Message, gc.Equals, "verification failed: no macaroon cookies in request") + c.Assert(response.Info.Macaroon, gc.NotNil) + for _, cav := range response.Info.Macaroon.Caveats() { + if cav.Location != "" { + return + } + } + c.Fatalf("no third party caveat found in response macaroon; caveats %#v", response.Info.Macaroon.Caveats()) +} + +func (s *APISuite) TestSetAuthCookie(c *gc.C) { + m, err := macaroon.New([]byte("key"), "id", "location") + c.Assert(err, jc.ErrorIsNil) + ms := macaroon.Slice{m} + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("set-auth-cookie"), + Method: "PUT", + Header: http.Header{"Origin": []string{"https://1.2.3.4"}}, + JSONBody: params.SetAuthCookie{ + Macaroons: ms, + }, + }) + // The request is successful. + c.Assert(rec.Code, gc.Equals, http.StatusOK) + + // The response includes the CORS header for the specific request. + c.Assert(rec.Header().Get("Access-Control-Allow-Origin"), gc.Equals, "https://1.2.3.4") + + // The response includes the macaroons cookie. + resp := http.Response{Header: rec.Header()} + cookies := resp.Cookies() + c.Assert(len(cookies), gc.Equals, 1) + expected, err := httpbakery.NewCookie(ms) + expected.Path = "/" + c.Assert(err, jc.ErrorIsNil) + c.Assert(cookies[0].Value, gc.Equals, expected.Value) +} + +func (s *APISuite) TestSetAuthCookieBodyError(c *gc.C) { + m, err := macaroon.New([]byte("key"), "id", "location") + c.Assert(err, jc.ErrorIsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("set-auth-cookie"), + Method: "PUT", + JSONBody: macaroon.Slice{m}, + ExpectStatus: http.StatusInternalServerError, + ExpectBody: params.Error{ + Message: "cannot unmarshal macaroons: json: cannot unmarshal array into Go value of type params.SetAuthCookie", + }, + }) +} + +func (s *APISuite) TestSetAuthCookieMethodError(c *gc.C) { + m, err := macaroon.New([]byte("key"), "id", "location") + c.Assert(err, jc.ErrorIsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("set-auth-cookie"), + Method: "POST", + JSONBody: macaroon.Slice{m}, + ExpectStatus: http.StatusMethodNotAllowed, + ExpectBody: params.Error{ + Code: params.ErrMethodNotAllowed, + Message: "POST not allowed", + }, + }) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/archive.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/archive.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/archive.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,675 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "archive/zip" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "github.com/juju/httprequest" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/mgo.v2" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + "gopkg.in/juju/charmstore.v5-unstable/internal/series" +) + +// GET id/archive +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idarchive +// +// POST id/archive?hash=sha384hash +// https://github.com/juju/charmstore/blob/v4/docs/API.md#post-idarchive +// +// DELETE id/archive +// https://github.com/juju/charmstore/blob/v4/docs/API.md#delete-idarchive +// +// PUT id/archive?hash=sha384hash +// This is like POST except that it puts the archive to a known revision +// rather than choosing a new one. As this feature is to support legacy +// ingestion methods, and will be removed in the future, it has no entry +// in the specification. +func (h *ReqHandler) serveArchive(id *charm.URL, w http.ResponseWriter, req *http.Request) error { + resolveId := h.ResolvedIdHandler + authId := h.AuthIdHandler + switch req.Method { + case "DELETE": + return resolveId(authId(h.serveDeleteArchive))(id, w, req) + case "GET": + return resolveId(h.serveGetArchive)(id, w, req) + case "POST", "PUT": + // Make sure we consume the full request body, before responding. + // + // It seems a shame to require the whole, possibly large, archive + // is uploaded if we already know that the request is going to + // fail, but it is necessary to prevent some failures. + // + // TODO: investigate using 100-Continue statuses to prevent + // unnecessary uploads. + defer io.Copy(ioutil.Discard, req.Body) + if err := h.authorizeUpload(id, req); err != nil { + return errgo.Mask(err, errgo.Any) + } + if req.Method == "POST" { + return h.servePostArchive(id, w, req) + } + return h.servePutArchive(id, w, req) + } + return errgo.WithCausef(nil, params.ErrMethodNotAllowed, "%s not allowed", req.Method) +} + +func (h *ReqHandler) authorizeUpload(id *charm.URL, req *http.Request) error { + if id.User == "" { + return badRequestf(nil, "user not specified in entity upload URL %q", id) + } + baseEntity, err := h.Store.FindBaseEntity(id, "acls", "developmentacls") + // Note that we pass a nil entity URL to authorizeWithPerms, because + // we haven't got a resolved URL at this point. At some + // point in the future, we may want to be able to allow + // is-entity first-party caveats to be allowed when uploading + // at which point we will need to rethink this a little. + if err == nil { + if err := h.authorizeWithPerms(req, baseEntity.DevelopmentACLs.Read, baseEntity.DevelopmentACLs.Write, nil); err != nil { + return errgo.Mask(err, errgo.Any) + } + // If uploading a published entity, also check that the user has + // publishing permissions. + if id.Channel == "" { + if err := h.authorizeWithPerms(req, baseEntity.ACLs.Read, baseEntity.ACLs.Write, nil); err != nil { + return errgo.Mask(err, errgo.Any) + } + } + return nil + } + if errgo.Cause(err) != params.ErrNotFound { + return errgo.Notef(err, "cannot retrieve entity %q for authorization", id) + } + // The base entity does not currently exist, so we default to + // assuming write permissions for the entity user. + if err := h.authorizeWithPerms(req, nil, []string{id.User}, nil); err != nil { + return errgo.Mask(err, errgo.Any) + } + return nil +} + +func (h *ReqHandler) serveGetArchive(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { + _, err := h.authorizeEntityAndTerms(req, []*router.ResolvedURL{id}) + if err != nil { + return errgo.Mask(err, errgo.Any) + } + r, size, hash, err := h.Store.OpenBlob(id) + if err != nil { + return errgo.Mask(err, errgo.Is(params.ErrNotFound)) + } + defer r.Close() + header := w.Header() + setArchiveCacheControl(w.Header(), h.isPublic(id.URL)) + header.Set(params.ContentHashHeader, hash) + header.Set(params.EntityIdHeader, id.String()) + + if StatsEnabled(req) { + h.Store.IncrementDownloadCountsAsync(id) + } + // TODO(rog) should we set connection=close here? + // See https://codereview.appspot.com/5958045 + serveContent(w, req, size, r) + return nil +} + +func (h *ReqHandler) serveDeleteArchive(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { + // Retrieve the entity blob name from the database. + blobName, _, err := h.Store.BlobNameAndHash(id) + if err != nil { + return errgo.Mask(err, errgo.Is(params.ErrNotFound)) + } + // Remove the entity. + if err := h.Store.DB.Entities().RemoveId(&id.URL); err != nil { + return errgo.Notef(err, "cannot remove %s", id) + } + // Remove the reference to the archive from the blob store. + if err := h.Store.BlobStore.Remove(blobName); err != nil { + return errgo.Notef(err, "cannot remove blob %s", blobName) + } + h.Store.IncCounterAsync(charmstore.EntityStatsKey(&id.URL, params.StatsArchiveDelete)) + return nil +} + +func (h *ReqHandler) updateStatsArchiveUpload(id *charm.URL, err *error) { + // Upload stats don't include revision: it is assumed that each + // entity revision is only uploaded once. + id.Revision = -1 + kind := params.StatsArchiveUpload + if *err != nil { + kind = params.StatsArchiveFailedUpload + } + h.Store.IncCounterAsync(charmstore.EntityStatsKey(id, kind)) +} + +func (h *ReqHandler) servePostArchive(id *charm.URL, w http.ResponseWriter, req *http.Request) (err error) { + defer h.updateStatsArchiveUpload(id, &err) + + if id.Revision != -1 { + return badRequestf(nil, "revision specified, but should not be specified") + } + if id.User == "" { + return badRequestf(nil, "user not specified") + } + hash := req.Form.Get("hash") + if hash == "" { + return badRequestf(nil, "hash parameter not specified") + } + if req.ContentLength == -1 { + return badRequestf(nil, "Content-Length not specified") + } + + oldURL, oldHash, err := h.latestRevisionInfo(id) + if err != nil && errgo.Cause(err) != params.ErrNotFound { + return errgo.Notef(err, "cannot get hash of latest revision") + } + if oldHash == hash { + // The hash matches the hash of the latest revision, so + // no need to upload anything. When uploading a published URL and + // the latest revision is a development entity, then we need to + // actually publish the existing entity. Note that at this point the + // user is already known to have the required permissions. + underDevelopment := id.Channel == charm.DevelopmentChannel + if oldURL.Development && !underDevelopment { + if err := h.Store.SetDevelopment(oldURL, false); err != nil { + return errgo.NoteMask(err, "cannot publish charm or bundle", errgo.Is(params.ErrNotFound)) + } + } + oldURL.Development = underDevelopment + return httprequest.WriteJSON(w, http.StatusOK, ¶ms.ArchiveUploadResponse{ + Id: oldURL.UserOwnedURL(), + PromulgatedId: oldURL.PromulgatedURL(), + }) + } + rid := &router.ResolvedURL{ + URL: *id.WithChannel(""), + Development: id.Channel == charm.DevelopmentChannel, + } + // Choose the next revision number for the upload. + if oldURL == nil { + rid.URL.Revision = 0 + } else { + rid.URL.Revision = oldURL.URL.Revision + 1 + } + rid.PromulgatedRevision, err = h.getNewPromulgatedRevision(id) + if err != nil { + return errgo.Mask(err) + } + + if err := h.addBlobAndEntity(rid, req.Body, hash, req.ContentLength); err != nil { + return errgo.Mask(err, + errgo.Is(params.ErrDuplicateUpload), + errgo.Is(params.ErrEntityIdNotAllowed), + errgo.Is(params.ErrInvalidEntity), + ) + } + return httprequest.WriteJSON(w, http.StatusOK, ¶ms.ArchiveUploadResponse{ + Id: rid.UserOwnedURL(), + PromulgatedId: rid.PromulgatedURL(), + }) +} + +func (h *ReqHandler) servePutArchive(id *charm.URL, w http.ResponseWriter, req *http.Request) (err error) { + defer h.updateStatsArchiveUpload(id, &err) + if id.Series == "" { + return badRequestf(nil, "series not specified") + } + if id.Revision == -1 { + return badRequestf(nil, "revision not specified") + } + if id.User == "" { + return badRequestf(nil, "user not specified") + } + hash := req.Form.Get("hash") + if hash == "" { + return badRequestf(nil, "hash parameter not specified") + } + if req.ContentLength == -1 { + return badRequestf(nil, "Content-Length not specified") + } + rid := &router.ResolvedURL{ + URL: *id.WithChannel(""), + PromulgatedRevision: -1, + Development: id.Channel == charm.DevelopmentChannel, + } + // Get the PromulgatedURL from the request parameters. When ingesting + // entities might not be added in order and the promulgated revision might + // not match the non-promulgated revision, so the full promulgated URL + // needs to be specified. + promulgatedURL := req.Form.Get("promulgated") + var pid *charm.URL + if promulgatedURL != "" { + pid, err = charm.ParseURL(promulgatedURL) + if err != nil { + return badRequestf(err, "cannot parse promulgated url") + } + if pid.User != "" { + return badRequestf(nil, "promulgated URL cannot have a user") + } + if pid.Name != id.Name { + return badRequestf(nil, "promulgated URL has incorrect charm name") + } + if pid.Series != id.Series { + return badRequestf(nil, "promulgated URL has incorrect series") + } + if pid.Revision == -1 { + return badRequestf(nil, "promulgated URL has no revision") + } + rid.PromulgatedRevision = pid.Revision + } + if err := h.addBlobAndEntity(rid, req.Body, hash, req.ContentLength); err != nil { + return errgo.Mask(err, + errgo.Is(params.ErrDuplicateUpload), + errgo.Is(params.ErrEntityIdNotAllowed), + errgo.Is(params.ErrInvalidEntity), + ) + } + return httprequest.WriteJSON(w, http.StatusOK, ¶ms.ArchiveUploadResponse{ + Id: rid.UserOwnedURL(), + PromulgatedId: rid.PromulgatedURL(), + }) + return nil +} + +// addBlobAndEntity streams the contents of the given body +// to the blob store and adds an entity record for it. +// The hash and contentLength parameters hold +// the content hash and the content length respectively. +func (h *ReqHandler) addBlobAndEntity(id *router.ResolvedURL, body io.Reader, hash string, contentLength int64) (err error) { + name := bson.NewObjectId().Hex() + + // Calculate the SHA256 hash while uploading the blob in the blob store. + hash256 := sha256.New() + body = io.TeeReader(body, hash256) + + // Upload the actual blob, and make sure that it is removed + // if we fail later. + err = h.Store.BlobStore.PutUnchallenged(body, name, contentLength, hash) + if err != nil { + return errgo.Notef(err, "cannot put archive blob") + } + r, _, err := h.Store.BlobStore.Open(name) + if err != nil { + return errgo.Notef(err, "cannot open newly created blob") + } + defer r.Close() + defer func() { + if err != nil { + h.Store.BlobStore.Remove(name) + // TODO(rog) log if remove fails. + } + }() + + // Add the entity entry to the charm store. + sum256 := fmt.Sprintf("%x", hash256.Sum(nil)) + if err = h.addEntity(id, r, name, hash, sum256, contentLength); err != nil { + return errgo.Mask(err, + errgo.Is(params.ErrDuplicateUpload), + errgo.Is(params.ErrEntityIdNotAllowed), + errgo.Is(params.ErrInvalidEntity), + ) + } + return nil +} + +// addEntity adds the entity represented by the contents +// of the given reader, associating it with the given id. +func (h *ReqHandler) addEntity(id *router.ResolvedURL, r io.ReadSeeker, blobName, hash, hash256 string, contentLength int64) error { + readerAt := charmstore.ReaderAtSeeker(r) + p := charmstore.AddParams{ + URL: id, + BlobName: blobName, + BlobHash: hash, + BlobHash256: hash256, + BlobSize: contentLength, + } + if id.URL.Series == "bundle" { + b, err := charm.ReadBundleArchiveFromReader(readerAt, contentLength) + if err != nil { + return readError(err, "cannot read bundle archive") + } + bundleData := b.Data() + charms, err := h.bundleCharms(bundleData.RequiredCharms()) + if err != nil { + return errgo.Notef(err, "cannot retrieve bundle charms") + } + if err := bundleData.VerifyWithCharms(verifyConstraints, verifyStorage, charms); err != nil { + // TODO frankban: use multiError (defined in internal/router). + return errgo.NoteMask(verificationError(err), "bundle verification failed", errgo.Is(params.ErrInvalidEntity)) + } + if err := h.Store.AddBundle(b, p); err != nil { + return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload), errgo.Is(params.ErrEntityIdNotAllowed)) + } + return nil + } + ch, err := charm.ReadCharmArchiveFromReader(readerAt, contentLength) + if err != nil { + return readError(err, "cannot read charm archive") + } + if err := checkCharmIsValid(ch); err != nil { + return errgo.Mask(err, errgo.Is(params.ErrInvalidEntity)) + } + if err := checkIdAllowed(id, ch); err != nil { + return errgo.Mask(err, errgo.Is(params.ErrEntityIdNotAllowed)) + } + if err := h.Store.AddCharm(ch, p); err != nil { + return errgo.Mask(err, errgo.Is(params.ErrDuplicateUpload), errgo.Is(params.ErrEntityIdNotAllowed)) + } + return nil +} + +func checkCharmIsValid(ch charm.Charm) error { + m := ch.Meta() + for _, rels := range []map[string]charm.Relation{m.Provides, m.Requires, m.Peers} { + if err := checkRelationsAreValid(rels); err != nil { + return errgo.Mask(err, errgo.Is(params.ErrInvalidEntity)) + } + } + if err := checkConsistentSeries(m.Series); err != nil { + return errgo.Mask(err, errgo.Is(params.ErrInvalidEntity)) + } + return nil +} + +func checkRelationsAreValid(rels map[string]charm.Relation) error { + for _, rel := range rels { + if rel.Name == "relation-name" { + return errgo.WithCausef(nil, params.ErrInvalidEntity, "relation %s has almost certainly not been changed from the template", rel.Name) + } + if rel.Interface == "interface-name" { + return errgo.WithCausef(nil, params.ErrInvalidEntity, "interface %s in relation %s has almost certainly not been changed from the template", rel.Interface, rel.Name) + } + } + return nil +} + +// checkConsistentSeries ensures that all of the series listed in the +// charm metadata come from the same distribution. If an error is +// returned it will have a cause of params.ErrInvalidEntity. +func checkConsistentSeries(metadataSeries []string) error { + var dist series.Distribution + for _, s := range metadataSeries { + d := series.Series[s].Distribution + if d == "" { + return errgo.WithCausef(nil, params.ErrInvalidEntity, "unrecognised series %q in metadata", s) + } + if dist == "" { + dist = d + } else if dist != d { + return errgo.WithCausef(nil, params.ErrInvalidEntity, "cannot mix series from %s and %s in single charm", dist, d) + } + } + return nil +} + +// checkIdAllowed ensures that the given id may be used for the provided +// charm. If an error is returned it will have a cause of +// params.ErrEntityIdNotAllowed. +func checkIdAllowed(id *router.ResolvedURL, ch charm.Charm) error { + m := ch.Meta() + if id.URL.Series == "" && len(m.Series) == 0 { + return errgo.WithCausef(nil, params.ErrEntityIdNotAllowed, "series not specified in url or charm metadata") + } else if id.URL.Series == "" || len(m.Series) == 0 { + return nil + } + // if we get here we have series in both the id and metadata, ensure they agree. + for _, s := range m.Series { + if s == id.URL.Series { + return nil + } + } + return errgo.WithCausef(nil, params.ErrEntityIdNotAllowed, "%q series not listed in charm metadata", id.URL.Series) +} + +func (h *ReqHandler) latestRevisionInfo(id *charm.URL) (*router.ResolvedURL, string, error) { + entities, err := h.Store.FindEntities(id.WithChannel(charm.DevelopmentChannel), "_id", "blobhash", "promulgated-url", "development") + if err != nil { + return nil, "", errgo.Mask(err) + } + if len(entities) == 0 { + return nil, "", params.ErrNotFound + } + latest := entities[0] + for _, entity := range entities { + if entity.URL.Revision > latest.URL.Revision { + latest = entity + } + } + return charmstore.EntityResolvedURL(latest), latest.BlobHash, nil +} + +func verifyConstraints(s string) error { + // TODO(rog) provide some actual constraints checking here. + return nil +} + +func verifyStorage(s string) error { + // TODO(frankban) provide some actual storage checking here. + return nil +} + +// GET id/archive/path +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idarchivepath +func (h *ReqHandler) serveArchiveFile(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { + r, size, _, err := h.Store.OpenBlob(id) + if err != nil { + return errgo.Mask(err, errgo.Is(params.ErrNotFound)) + } + defer r.Close() + zipReader, err := zip.NewReader(charmstore.ReaderAtSeeker(r), size) + if err != nil { + return errgo.Notef(err, "cannot read archive data for %s", id) + } + + // Retrieve the requested file from the zip archive. + filePath := strings.TrimPrefix(path.Clean(req.URL.Path), "/") + for _, file := range zipReader.File { + if path.Clean(file.Name) != filePath { + continue + } + // The file is found. + fileInfo := file.FileInfo() + if fileInfo.IsDir() { + return errgo.WithCausef(nil, params.ErrForbidden, "directory listing not allowed") + } + content, err := file.Open() + if err != nil { + return errgo.Notef(err, "unable to read file %q", filePath) + } + defer content.Close() + // Send the response to the client. + ctype := mime.TypeByExtension(filepath.Ext(filePath)) + if ctype != "" { + w.Header().Set("Content-Type", ctype) + } + w.Header().Set("Content-Length", strconv.FormatInt(fileInfo.Size(), 10)) + setArchiveCacheControl(w.Header(), h.isPublic(id.URL)) + w.WriteHeader(http.StatusOK) + io.Copy(w, content) + return nil + } + return errgo.WithCausef(nil, params.ErrNotFound, "file %q not found in the archive", filePath) +} + +func (h *ReqHandler) isPublic(id charm.URL) bool { + baseEntity, err := h.Store.FindBaseEntity(&id, "acls") + if err == nil { + for _, p := range baseEntity.ACLs.Read { + if p == params.Everyone { + return true + break + } + } + } + return false +} + +func (h *ReqHandler) bundleCharms(ids []string) (map[string]charm.Charm, error) { + numIds := len(ids) + urls := make([]*charm.URL, 0, numIds) + idKeys := make([]string, 0, numIds) + // TODO resolve ids concurrently. + for _, id := range ids { + url, err := charm.ParseURL(id) + if err != nil { + // Ignore this error. This will be caught in the bundle + // verification process (see bundleData.VerifyWithCharms) and will + // be returned to the user along with other bundle errors. + continue + } + e, err := h.Store.FindBestEntity(url) + if err != nil { + if errgo.Cause(err) == params.ErrNotFound { + // Ignore this error too, for the same reasons + // described above. + continue + } + return nil, err + } + urls = append(urls, e.URL) + idKeys = append(idKeys, id) + } + var entities []mongodoc.Entity + if err := h.Store.DB.Entities(). + Find(bson.D{{"_id", bson.D{{"$in", urls}}}}). + All(&entities); err != nil { + return nil, err + } + + entityCharms := make(map[charm.URL]charm.Charm, len(entities)) + for i, entity := range entities { + entityCharms[*entity.URL] = &entityCharm{entities[i]} + } + charms := make(map[string]charm.Charm, len(urls)) + for i, url := range urls { + if ch, ok := entityCharms[*url]; ok { + charms[idKeys[i]] = ch + } + } + return charms, nil +} + +// entityCharm implements charm.Charm. +type entityCharm struct { + mongodoc.Entity +} + +func (e *entityCharm) Meta() *charm.Meta { + return e.CharmMeta +} + +func (e *entityCharm) Metrics() *charm.Metrics { + return nil +} + +func (e *entityCharm) Config() *charm.Config { + return e.CharmConfig +} + +func (e *entityCharm) Actions() *charm.Actions { + return e.CharmActions +} + +func (e *entityCharm) Revision() int { + return e.URL.Revision +} + +// verificationError returns an error whose string representation is a list of +// all the verification error messages stored in err, in JSON format. +// Note that err must be a *charm.VerificationError. +func verificationError(err error) error { + verr, ok := err.(*charm.VerificationError) + if !ok { + return err + } + messages := make([]string, len(verr.Errors)) + for i, err := range verr.Errors { + messages[i] = err.Error() + } + sort.Strings(messages) + encodedMessages, err := json.Marshal(messages) + if err != nil { + // This should never happen. + return err + } + return errgo.WithCausef(nil, params.ErrInvalidEntity, string(encodedMessages)) +} + +// ArchiveCachePublicMaxAge specifies the cache expiry duration for items +// returned from the archive where the id represents the id of a public entity. +const ArchiveCachePublicMaxAge = 1 * time.Hour + +// setArchiveCacheControl sets cache control headers +// in a response to an archive-derived endpoint. +// The isPublic parameter specifies whether +// the entity id can or not be cached . +func setArchiveCacheControl(h http.Header, isPublic bool) { + if isPublic { + seconds := int(ArchiveCachePublicMaxAge / time.Second) + h.Set("Cache-Control", "public, max-age="+strconv.Itoa(seconds)) + } else { + h.Set("Cache-Control", "no-cache, must-revalidate") + } +} + +// getNewPromulgatedRevision returns the promulgated revision +// to give to a newly uploaded charm with the given id. +// It returns -1 if the charm is not promulgated. +func (h *ReqHandler) getNewPromulgatedRevision(id *charm.URL) (int, error) { + baseEntity, err := h.Store.FindBaseEntity(id, "promulgated") + if err != nil && errgo.Cause(err) != params.ErrNotFound { + return 0, errgo.Mask(err) + } + if baseEntity == nil || !baseEntity.Promulgated { + return -1, nil + } + query := h.Store.EntitiesQuery(&charm.URL{ + Series: id.Series, + Name: id.Name, + Channel: id.Channel, + Revision: -1, + }) + var entity mongodoc.Entity + err = query.Sort("-promulgated-revision").Select(bson.D{{"promulgated-revision", 1}}).One(&entity) + if err == mgo.ErrNotFound { + return 0, nil + } + if err != nil { + return 0, errgo.Mask(err) + } + return entity.PromulgatedRevision + 1, nil +} + +// readError creates an appropriate error for errors in reading an +// uploaded archive. If the archive could not be read because the data +// uploaded is invalid then an error with a cause of +// params.ErrInvalidEntity will be returned. The given message will be +// added as context. +func readError(err error, msg string) error { + switch errgo.Cause(err) { + case zip.ErrFormat, zip.ErrAlgorithm, zip.ErrChecksum: + return errgo.WithCausef(err, params.ErrInvalidEntity, msg) + + } + return errgo.Notef(err, msg) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/archive_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/archive_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/archive_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1867 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "archive/zip" + "bytes" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/juju/errgo" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + charmtesting "gopkg.in/juju/charmrepo.v2-unstable/testing" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting/stats" + "gopkg.in/juju/charmstore.v5-unstable/internal/v5" +) + +type commonArchiveSuite struct { + commonSuite +} + +type ArchiveSuite struct { + commonArchiveSuite +} + +var _ = gc.Suite(&ArchiveSuite{}) + +func (s *ArchiveSuite) TestGetCharmWithTerms(c *gc.C) { + client := httpbakery.NewHTTPClient() + + id := newResolvedURL("cs:~charmers/precise/terms-0", -1) + s.assertUploadCharm(c, "POST", id, "terms") + err := s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + + archiveUrl := storeURL("~charmers/precise/terms-0/archive") + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: archiveUrl, + Do: bakeryDo(client), + }) + c.Assert(rec.Code, gc.Equals, http.StatusUnauthorized) +} + +func (s *ArchiveSuite) TestGet(c *gc.C) { + id := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) + wordpress := s.assertUploadCharm(c, "POST", id, "wordpress") + err := s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + + archiveBytes, err := ioutil.ReadFile(wordpress.Path) + c.Assert(err, gc.IsNil) + + rec := s.assertArchiveDownload( + c, + "~charmers/precise/wordpress-0", + nil, + archiveBytes, + ) + c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/precise/wordpress-0") + assertCacheControl(c, rec.Header(), true) + + // Check that the HTTP range logic is plugged in OK. If this + // is working, we assume that the whole thing is working OK, + // as net/http is well-tested. + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("~charmers/precise/wordpress-0/archive"), + Header: http.Header{"Range": {"bytes=10-100"}}, + }) + c.Assert(rec.Code, gc.Equals, http.StatusPartialContent, gc.Commentf("body: %q", rec.Body.Bytes())) + c.Assert(rec.Body.Bytes(), gc.HasLen, 100-10+1) + c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes[10:101]) + c.Assert(rec.Header().Get(params.ContentHashHeader), gc.Equals, hashOfBytes(archiveBytes)) + c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/precise/wordpress-0") + assertCacheControl(c, rec.Header(), true) + + // The development version of the entity can also be retrieved. + err = s.store.SetPerms(id.URL.WithChannel(charm.DevelopmentChannel), "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("~charmers/development/precise/wordpress-0/archive"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes) + c.Assert(rec.Header().Get(params.ContentHashHeader), gc.Equals, hashOfBytes(archiveBytes)) + c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/development/precise/wordpress-0") +} + +func (s *ArchiveSuite) TestGetDevelopment(c *gc.C) { + id := newResolvedURL("cs:~charmers/development/trusty/wordpress-0", -1) + wordpress := s.assertUploadCharm(c, "POST", id, "wordpress") + url := id.PreferredURL() + err := s.store.SetPerms(url, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + + archiveBytes, err := ioutil.ReadFile(wordpress.Path) + c.Assert(err, gc.IsNil) + + rec := s.assertArchiveDownload( + c, + "~charmers/development/trusty/wordpress-0", + nil, + archiveBytes, + ) + c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/development/trusty/wordpress-0") + + // It is not possible to use the published URL to retrieve the archive, + err = s.store.SetPerms(url.WithChannel(""), "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("~charmers/trusty/wordpress-0/archive"), + ExpectStatus: http.StatusNotFound, + ExpectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:~charmers/trusty/wordpress-0"`, + }, + }) +} + +func (s *ArchiveSuite) TestGetWithPartialId(c *gc.C) { + id := newResolvedURL("cs:~charmers/utopic/wordpress-42", -1) + err := s.store.AddCharmWithArchive( + id, + storetesting.Charms.CharmArchive(c.MkDir(), "wordpress")) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + + rec := s.assertArchiveDownload( + c, + "~charmers/wordpress", + nil, + nil, + ) + // The complete entity id can be retrieved from the response header. + c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, id.URL.String()) +} + +func (s *ArchiveSuite) TestGetPromulgatedWithPartialId(c *gc.C) { + id := newResolvedURL("cs:~charmers/utopic/wordpress-42", 42) + err := s.store.AddCharmWithArchive( + id, + storetesting.Charms.CharmArchive(c.MkDir(), "wordpress")) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + rec := s.assertArchiveDownload( + c, + "wordpress", + nil, + nil, + ) + c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, id.PromulgatedURL().String()) +} + +func (s *ArchiveSuite) TestGetCounters(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + for i, id := range []*router.ResolvedURL{ + newResolvedURL("~who/utopic/mysql-42", 42), + } { + c.Logf("test %d: %s", i, id) + + // Add a charm to the database (including the archive). + err := s.store.AddCharmWithArchive(id, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + + // Download the charm archive using the API. + s.assertArchiveDownload( + c, + id.URL.Path(), + nil, + nil, + ) + + // Check that the downloads count for the entity has been updated. + key := []string{params.StatsArchiveDownload, "utopic", "mysql", id.URL.User, "42"} + stats.CheckCounterSum(c, s.store, key, false, 1) + // Check that the promulgated download count for the entity has also been updated + key = []string{params.StatsArchiveDownloadPromulgated, "utopic", "mysql", "", "42"} + stats.CheckCounterSum(c, s.store, key, false, 1) + } +} + +func (s *ArchiveSuite) TestGetCountersDisabled(c *gc.C) { + url := newResolvedURL("~charmers/utopic/mysql-42", 42) + // Add a charm to the database (including the archive). + err := s.store.AddCharmWithArchive(url, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + + // Download the charm archive using the API, passing stats=0. + s.assertArchiveDownload( + c, + "", + &httptesting.DoRequestParams{URL: storeURL(url.URL.Path() + "/archive?stats=0")}, + nil, + ) + + // Check that the downloads count for the entity has not been updated. + key := []string{params.StatsArchiveDownload, "utopic", "mysql", "", "42"} + stats.CheckCounterSum(c, s.store, key, false, 0) +} + +var archivePostErrorsTests = []struct { + about string + path string + noContentLength bool + expectStatus int + expectMessage string + expectCode params.ErrorCode +}{{ + about: "revision specified", + path: "~charmers/precise/wordpress-23/archive", + expectStatus: http.StatusBadRequest, + expectMessage: "revision specified, but should not be specified", + expectCode: params.ErrBadRequest, +}, { + about: "no hash given", + path: "~charmers/precise/wordpress/archive", + expectStatus: http.StatusBadRequest, + expectMessage: "hash parameter not specified", + expectCode: params.ErrBadRequest, +}, { + about: "no content length", + path: "~charmers/precise/wordpress/archive?hash=1234563", + noContentLength: true, + expectStatus: http.StatusBadRequest, + expectMessage: "Content-Length not specified", + expectCode: params.ErrBadRequest, +}, { + about: "invalid channel", + path: "~charmers/bad-wolf/trusty/wordpress/archive", + expectStatus: http.StatusNotFound, + expectMessage: "not found", + expectCode: params.ErrNotFound, +}} + +func (s *ArchiveSuite) TestPostErrors(c *gc.C) { + type exoticReader struct { + io.Reader + } + for i, test := range archivePostErrorsTests { + c.Logf("test %d: %s", i, test.about) + var body io.Reader = strings.NewReader("bogus") + if test.noContentLength { + // net/http will automatically add a Content-Length header + // if it sees *strings.Reader, but not if it's a type it doesn't + // know about. + body = exoticReader{body} + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.path), + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/zip"}, + }, + Body: body, + Username: testUsername, + Password: testPassword, + ExpectStatus: test.expectStatus, + ExpectBody: params.Error{ + Message: test.expectMessage, + Code: test.expectCode, + }, + }) + } +} + +func (s *ArchiveSuite) TestConcurrentUploads(c *gc.C) { + wordpress := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + f, err := os.Open(wordpress.Path) + c.Assert(err, gc.IsNil) + + var buf bytes.Buffer + _, err = io.Copy(&buf, f) + c.Assert(err, gc.IsNil) + + hash, _ := hashOf(bytes.NewReader(buf.Bytes())) + + srv := httptest.NewServer(s.srv) + defer srv.Close() + + // Our strategy for testing concurrent uploads is as follows: We + // repeat uploading a bunch of simultaneous uploads to the same + // charm. Each upload should either succeed, or fail with an + // ErrDuplicateUpload error. We make sure that all replies are + // like this, and that at least one duplicate upload error is + // found, so that we know we've tested that error path. + + errorBodies := make(chan io.ReadCloser) + + // upload performs one upload of the testing charm. + // It sends the response body on the errorBodies channel when + // it finds an error response. + upload := func() { + c.Logf("uploading") + body := bytes.NewReader(buf.Bytes()) + url := srv.URL + storeURL("~charmers/precise/wordpress/archive?hash="+hash) + req, err := http.NewRequest("POST", url, body) + c.Assert(err, gc.IsNil) + req.Header.Set("Content-Type", "application/zip") + req.SetBasicAuth(testUsername, testPassword) + resp, err := http.DefaultClient.Do(req) + if !c.Check(err, gc.IsNil) { + return + } + if resp.StatusCode == http.StatusOK { + resp.Body.Close() + return + } + errorBodies <- resp.Body + } + + // The try loop continues concurrently uploading + // charms until it is told to stop (by closing the try + // channel). It then signals that it has terminated + // by closing errorBodies. + try := make(chan struct{}) + go func(try chan struct{}) { + for _ = range try { + var wg sync.WaitGroup + for p := 0; p < 5; p++ { + wg.Add(1) + go func() { + upload() + wg.Done() + }() + } + wg.Wait() + } + close(errorBodies) + }(try) + + // We continue the loop until we have found an + // error (or the maximum iteration count has + // been exceeded). + foundError := false + count := 0 +loop: + for { + select { + case body, ok := <-errorBodies: + if !ok { + // The try loop has terminated, + // so we need to stop too. + break loop + } + dec := json.NewDecoder(body) + var errResp params.Error + err := dec.Decode(&errResp) + body.Close() + c.Assert(err, gc.IsNil) + c.Assert(errResp, jc.DeepEquals, params.Error{ + Message: "duplicate upload", + Code: params.ErrDuplicateUpload, + }) + // We've found the error we're looking for, + // so we signal to the try loop that it can stop. + // We will process any outstanding error bodies, + // before seeing errorBodies closed and exiting + // the loop. + foundError = true + if try != nil { + close(try) + try = nil + } + case try <- struct{}{}: + // In cases we've seen, the actual maximum value of + // count is 1, but let's allow for serious scheduler vagaries. + if count++; count > 200 { + c.Fatalf("200 tries with no duplicate error") + } + } + } + if !foundError { + c.Errorf("no duplicate-upload errors found") + } +} + +func (s *ArchiveSuite) TestPostCharm(c *gc.C) { + // A charm that did not exist before should get revision 0. + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress") + + // Subsequent charm uploads should increment the revision by 1. + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-1", -1), "mysql") + + // Subsequent development charm uploads should increment the revision by 1. + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/development/precise/wordpress-2", -1), "wordpress") + + // Retrieving the published version returns the last non-development charm. + err := s.store.SetPerms(charm.MustParseURL("~charmers/wordpress"), "read", params.Everyone) + c.Assert(err, gc.IsNil) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("~charmers/wordpress/archive"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:~charmers/precise/wordpress-1") +} + +func (s *ArchiveSuite) TestPostCurrentVersion(c *gc.C) { + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/development/precise/wordpress-0", -1), "wordpress") + + // Subsequent charm uploads should not increment the revision by 1. + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/development/precise/wordpress-0", -1), "wordpress") +} + +func (s *ArchiveSuite) TestPostDevelopmentPromulgated(c *gc.C) { + s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/development/trusty/wordpress-0", 0), "wordpress") + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/development/trusty/wordpress-1", 1), "mysql") + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/development/trusty/wordpress-1", 1), "mysql") + + // The promulgated charm can be accessed via its development URL. + err := s.store.SetPerms(charm.MustParseURL("~charmers/development/wordpress"), "read", params.Everyone) + c.Assert(err, gc.IsNil) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("development/wordpress/archive"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Header().Get(params.EntityIdHeader), gc.Equals, "cs:development/trusty/wordpress-1") + + // The promulgated charm cannot be retrieved using the published URL. + err = s.store.SetPerms(charm.MustParseURL("~charmers/wordpress"), "read", params.Everyone) + c.Assert(err, gc.IsNil) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("wordpress/archive"), + ExpectStatus: http.StatusNotFound, + ExpectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:wordpress"`, + }, + }) +} + +var uploadAndPublishTests = []struct { + about string + existing string + upload string + expectId string + expectDevelopment bool +}{{ + about: "upload same development entity", + existing: "~who/development/django-0", + upload: "~who/development/django", + expectId: "~who/development/django-0", + expectDevelopment: true, +}, { + about: "upload same published entity", + existing: "~who/django-0", + upload: "~who/django", + expectId: "~who/django-0", +}, { + about: "existing development, upload published", + existing: "~who/development/django-0", + upload: "~who/django", + expectId: "~who/django-0", +}, { + about: "existing published, upload development", + existing: "~who/django-0", + upload: "~who/development/django", + expectId: "~who/development/django-0", +}} + +func (s *ArchiveSuite) TestUploadAndPublish(c *gc.C) { + for i, test := range uploadAndPublishTests { + c.Logf("%d. %s", i, test.about) + + // Upload the pre-existing entity. + rurl := newResolvedURL(test.existing, -1) + s.assertUploadCharm(c, "POST", rurl, "multi-series") + + // Upload the same charm again, using the upload URL. + body, hash, size := archiveInfo(c, "multi-series") + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.upload + "/archive?hash=" + hash), + Method: "POST", + ContentLength: size, + Header: http.Header{"Content-Type": {"application/zip"}}, + Body: body, + Username: testUsername, + Password: testPassword, + ExpectBody: params.ArchiveUploadResponse{ + Id: charm.MustParseURL(test.expectId), + }, + }) + + // Check the development flag of the entity. + entity, err := s.store.FindEntity(rurl, "development") + c.Assert(err, gc.IsNil) + c.Assert(entity.Development, gc.Equals, test.expectDevelopment) + + // Remove all entities from the store. + _, err = s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + _, err = s.store.DB.BaseEntities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + } +} + +func (s *ArchiveSuite) TestPostMultiSeriesCharm(c *gc.C) { + // A charm that did not exist before should get revision 0. + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/juju-gui-0", -1), "multi-series") +} + +func (s *ArchiveSuite) TestPostMultiSeriesDevelopmentCharm(c *gc.C) { + // A charm that did not exist before should get revision 0. + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/development/juju-gui-0", -1), "multi-series") +} + +var charmPostErrorTests = []struct { + about string + url *charm.URL + charm string + expectStatus int + expectBody interface{} +}{{ + about: "no series", + url: charm.MustParseURL("~charmers/juju-gui-0"), + charm: "wordpress", + expectStatus: http.StatusForbidden, + expectBody: params.Error{ + Message: "series not specified in url or charm metadata", + Code: params.ErrEntityIdNotAllowed, + }, +}, { + about: "url series not in metadata", + url: charm.MustParseURL("~charmers/precise/juju-gui-0"), + charm: "multi-series", + expectStatus: http.StatusForbidden, + expectBody: params.Error{ + Message: `"precise" series not listed in charm metadata`, + Code: params.ErrEntityIdNotAllowed, + }, +}, { + about: "bad combination of series", + url: charm.MustParseURL("~charmers/juju-gui-0"), + charm: "multi-series-bad-combination", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Message: `cannot mix series from ubuntu and windows in single charm`, + Code: params.ErrInvalidEntity, + }, +}, { + about: "unknown series", + url: charm.MustParseURL("~charmers/juju-gui-0"), + charm: "multi-series-unknown", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Message: `unrecognised series "nosuchseries" in metadata`, + Code: params.ErrInvalidEntity, + }, +}} + +func (s *ArchiveSuite) TestCharmPostError(c *gc.C) { + for i, test := range charmPostErrorTests { + c.Logf("%d. %s", i, test.about) + s.assertUploadCharmError( + c, + "POST", + test.url, + nil, + test.charm, + test.expectStatus, + test.expectBody, + ) + } +} + +func (s *ArchiveSuite) TestPostMultiSeriesCharmRevisionAfterAllSingleSeriesOnes(c *gc.C) { + // Create some single series versions of the charm + s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/vivid/juju-gui-1", -1), "mysql") + s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/trusty/juju-gui-12", -1), "mysql") + s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/precise/juju-gui-44", -1), "mysql") + + // Check that the new multi-series revision takes the a revision + // number larger than the largest of all the single series + // revisions. + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/juju-gui-45", -1), "multi-series") +} + +func (s *ArchiveSuite) TestPostMultiSeriesPromulgatedRevisionAfterAllSingleSeriesOnes(c *gc.C) { + // Create some single series versions of the charm + s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/vivid/juju-gui-1", 0), "mysql") + s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/trusty/juju-gui-12", 9), "mysql") + s.assertUploadCharm(c, "PUT", newResolvedURL("~charmers/precise/juju-gui-44", 33), "mysql") + + // Check that the new multi-series promulgated revision takes the + // a revision number larger than the largest of all the single + // series revisions. + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/juju-gui-45", 34), "multi-series") +} + +func (s *ArchiveSuite) TestPostSingleSeriesCharmWhenMultiSeriesVersionExists(c *gc.C) { + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/juju-gui-0", -1), "multi-series") + + s.assertUploadCharmError( + c, + "POST", + charm.MustParseURL("~charmers/saucy/juju-gui-0"), + nil, + "wordpress", + http.StatusForbidden, + params.Error{ + Message: "charm name duplicates multi-series charm name cs:~charmers/juju-gui-0", + Code: params.ErrEntityIdNotAllowed, + }, + ) +} + +func (s *ArchiveSuite) TestPutCharm(c *gc.C) { + s.assertUploadCharm( + c, + "PUT", + newResolvedURL("~charmers/precise/wordpress-3", 3), + "wordpress", + ) + + s.assertUploadCharm( + c, + "PUT", + newResolvedURL("~charmers/precise/wordpress-1", -1), + "wordpress", + ) + + // Check that we get a duplicate-upload error if we try to + // upload to the same revision again. + s.assertUploadCharmError( + c, + "PUT", + charm.MustParseURL("~charmers/precise/wordpress-3"), + nil, + "mysql", + http.StatusInternalServerError, + params.Error{ + Message: "duplicate upload", + Code: params.ErrDuplicateUpload, + }, + ) + + // Check we get an error if promulgated url already uploaded. + s.assertUploadCharmError( + c, + "PUT", + charm.MustParseURL("~charmers/precise/wordpress-4"), + charm.MustParseURL("precise/wordpress-3"), + "wordpress", + http.StatusInternalServerError, + params.Error{ + Message: "duplicate upload", + Code: params.ErrDuplicateUpload, + }, + ) + + // Check we get an error if promulgated url has user. + s.assertUploadCharmError( + c, + "PUT", + charm.MustParseURL("~charmers/precise/wordpress-4"), + charm.MustParseURL("~charmers/precise/wordpress-4"), + "mysql", + http.StatusBadRequest, + params.Error{ + Message: "promulgated URL cannot have a user", + Code: params.ErrBadRequest, + }, + ) + + // Check we get an error if promulgated url has different name. + s.assertUploadCharmError( + c, + "PUT", + charm.MustParseURL("~charmers/precise/wordpress-4"), + charm.MustParseURL("precise/mysql-4"), + "mysql", + http.StatusBadRequest, + params.Error{ + Message: "promulgated URL has incorrect charm name", + Code: params.ErrBadRequest, + }, + ) +} + +func (s *ArchiveSuite) TestPostBundle(c *gc.C) { + // Upload the required charms. + err := s.store.AddCharmWithArchive( + newResolvedURL("cs:~charmers/utopic/mysql-42", 42), + storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) + c.Assert(err, gc.IsNil) + err = s.store.AddCharmWithArchive( + newResolvedURL("cs:~charmers/utopic/wordpress-47", 47), + storetesting.Charms.CharmArchive(c.MkDir(), "wordpress")) + c.Assert(err, gc.IsNil) + err = s.store.AddCharmWithArchive( + newResolvedURL("cs:~charmers/utopic/logging-1", 1), + storetesting.Charms.CharmArchive(c.MkDir(), "logging")) + c.Assert(err, gc.IsNil) + + // A bundle that did not exist before should get revision 0. + s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-0", -1), "wordpress-simple") + + // Subsequent bundle uploads should increment the + // revision by 1. + s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-1", -1), "wordpress-with-logging") + + // Uploading the same archive twice should not increment the revision... + s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-1", -1), "wordpress-with-logging") + + // ... but uploading an archive used by a previous revision should. + s.assertUploadBundle(c, "POST", newResolvedURL("~charmers/bundle/wordpress-simple-2", -1), "wordpress-simple") +} + +func (s *ArchiveSuite) TestPostHashMismatch(c *gc.C) { + content := []byte("some content") + hash, _ := hashOf(bytes.NewReader(content)) + + // Corrupt the content. + copy(content, "bogus") + path := fmt.Sprintf("~charmers/precise/wordpress/archive?hash=%s", hash) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(path), + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/zip"}, + }, + Body: bytes.NewReader(content), + Username: testUsername, + Password: testPassword, + ExpectStatus: http.StatusInternalServerError, + ExpectBody: params.Error{ + Message: "cannot put archive blob: hash mismatch", + }, + }) +} + +func invalidZip() io.ReadSeeker { + return strings.NewReader("invalid zip content") +} + +func (s *ArchiveSuite) TestPostInvalidCharmZip(c *gc.C) { + s.assertCannotUpload(c, "~charmers/precise/wordpress", invalidZip(), http.StatusBadRequest, params.ErrInvalidEntity, "cannot read charm archive: zip: not a valid zip file") +} + +func (s *ArchiveSuite) TestPostInvalidBundleZip(c *gc.C) { + s.assertCannotUpload(c, "~charmers/bundle/wordpress", invalidZip(), http.StatusBadRequest, params.ErrInvalidEntity, "cannot read bundle archive: zip: not a valid zip file") +} + +var postInvalidCharmMetadataTests = []struct { + about string + spec charmtesting.CharmSpec + expectError string +}{{ + about: "bad provider relation name", + spec: charmtesting.CharmSpec{ + Meta: ` +name: foo +summary: bar +description: d +provides: + relation-name: + interface: baz +`, + }, + expectError: "relation relation-name has almost certainly not been changed from the template", +}, { + about: "bad provider interface name", + spec: charmtesting.CharmSpec{ + Meta: ` +name: foo +summary: bar +description: d +provides: + baz: + interface: interface-name +`, + }, + expectError: "interface interface-name in relation baz has almost certainly not been changed from the template", +}, { + about: "bad requirer relation name", + spec: charmtesting.CharmSpec{ + Meta: ` +name: foo +summary: bar +description: d +requires: + relation-name: + interface: baz +`, + }, + expectError: "relation relation-name has almost certainly not been changed from the template", +}, { + about: "bad requirer interface name", + spec: charmtesting.CharmSpec{ + Meta: ` +name: foo +summary: bar +description: d +requires: + baz: + interface: interface-name +`, + }, + expectError: "interface interface-name in relation baz has almost certainly not been changed from the template", +}, { + about: "bad peer relation name", + spec: charmtesting.CharmSpec{ + Meta: ` +name: foo +summary: bar +description: d +peers: + relation-name: + interface: baz +`, + }, + expectError: "relation relation-name has almost certainly not been changed from the template", +}, { + about: "bad peer interface name", + spec: charmtesting.CharmSpec{ + Meta: ` +name: foo +summary: bar +description: d +peers: + baz: + interface: interface-name +`, + }, + expectError: "interface interface-name in relation baz has almost certainly not been changed from the template", +}} + +func (s *ArchiveSuite) TestPostInvalidCharmMetadata(c *gc.C) { + for i, test := range postInvalidCharmMetadataTests { + c.Logf("test %d: %s", i, test.about) + ch := charmtesting.NewCharm(c, test.spec) + r := bytes.NewReader(ch.ArchiveBytes()) + s.assertCannotUpload(c, "~charmers/trusty/wordpress", r, http.StatusBadRequest, params.ErrInvalidEntity, test.expectError) + } +} + +func (s *ArchiveSuite) TestPostInvalidBundleData(c *gc.C) { + path := storetesting.Charms.BundleArchivePath(c.MkDir(), "bad") + f, err := os.Open(path) + c.Assert(err, gc.IsNil) + defer f.Close() + // Here we exercise both bundle internal verification (bad relation) and + // validation with respect to charms (wordpress and mysql are missing). + expectErr := `bundle verification failed: [` + + `"relation [\"foo:db\" \"mysql:server\"] refers to service \"foo\" not defined in this bundle",` + + `"service \"mysql\" refers to non-existent charm \"mysql\"",` + + `"service \"wordpress\" refers to non-existent charm \"wordpress\""]` + s.assertCannotUpload(c, "~charmers/bundle/wordpress", f, http.StatusBadRequest, params.ErrInvalidEntity, expectErr) +} + +func (s *ArchiveSuite) TestPostCounters(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress") + + // Check that the upload count for the entity has been updated. + key := []string{params.StatsArchiveUpload, "precise", "wordpress", "charmers"} + stats.CheckCounterSum(c, s.store, key, false, 1) +} + +func (s *ArchiveSuite) TestPostFailureCounters(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + hash, _ := hashOf(invalidZip()) + doPost := func(url string, expectCode int) { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url), + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/zip"}, + }, + Body: invalidZip(), + Username: testUsername, + Password: testPassword, + }) + c.Assert(rec.Code, gc.Equals, expectCode, gc.Commentf("body: %s", rec.Body.Bytes())) + } + + // Send a first invalid request (revision specified). + doPost("~charmers/utopic/wordpress-42/archive", http.StatusBadRequest) + // Send a second invalid request (no hash). + doPost("~charmers/utopic/wordpress/archive", http.StatusBadRequest) + // Send a third invalid request (invalid zip). + doPost("~charmers/utopic/wordpress/archive?hash="+hash, http.StatusBadRequest) + + // Check that the failed upload count for the entity has been updated. + key := []string{params.StatsArchiveFailedUpload, "utopic", "wordpress", "charmers"} + stats.CheckCounterSum(c, s.store, key, false, 3) +} + +func (s *ArchiveSuite) TestPostErrorReadsFully(c *gc.C) { + h := s.handler(c) + defer h.Close() + + b := bytes.NewBuffer([]byte("test body")) + r, err := http.NewRequest("POST", "/~charmers/trusty/wordpress/archive", b) + c.Assert(err, gc.IsNil) + r.Header.Set("Content-Type", "application/zip") + r.SetBasicAuth(testUsername, testPassword) + rec := httptest.NewRecorder() + h.ServeHTTP(rec, r) + c.Assert(rec.Code, gc.Equals, http.StatusBadRequest) + c.Assert(b.Len(), gc.Equals, 0) +} + +func (s *ArchiveSuite) TestPostAuthErrorReadsFully(c *gc.C) { + h := s.handler(c) + defer h.Close() + b := bytes.NewBuffer([]byte("test body")) + r, err := http.NewRequest("POST", "/~charmers/trusty/wordpress/archive", b) + c.Assert(err, gc.IsNil) + r.Header.Set("Content-Type", "application/zip") + rec := httptest.NewRecorder() + h.ServeHTTP(rec, r) + c.Assert(rec.Code, gc.Equals, http.StatusUnauthorized) + c.Assert(b.Len(), gc.Equals, 0) +} + +func (s *ArchiveSuite) TestUploadOfCurrentCharmReadsFully(c *gc.C) { + s.assertUploadCharm(c, "POST", newResolvedURL("~charmers/precise/wordpress-0", -1), "wordpress") + + ch := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + f, err := os.Open(ch.Path) + c.Assert(err, gc.IsNil) + defer f.Close() + + // Calculate blob hashes. + hash := blobstore.NewHash() + _, err = io.Copy(hash, f) + c.Assert(err, gc.IsNil) + hashSum := fmt.Sprintf("%x", hash.Sum(nil)) + + // Simulate upload of current version + h := s.handler(c) + defer h.Close() + b := bytes.NewBuffer([]byte("test body")) + r, err := http.NewRequest("POST", "/~charmers/precise/wordpress/archive?hash="+hashSum, b) + c.Assert(err, gc.IsNil) + r.Header.Set("Content-Type", "application/zip") + r.SetBasicAuth(testUsername, testPassword) + rec := httptest.NewRecorder() + h.ServeHTTP(rec, r) + httptesting.AssertJSONResponse( + c, + rec, + http.StatusOK, + params.ArchiveUploadResponse{ + Id: charm.MustParseURL("~charmers/precise/wordpress-0"), + }, + ) + c.Assert(b.Len(), gc.Equals, 0) +} + +func (s *ArchiveSuite) assertCannotUpload(c *gc.C, id string, content io.ReadSeeker, httpStatus int, errorCode params.ErrorCode, errorMessage string) { + hash, size := hashOf(content) + _, err := content.Seek(0, 0) + c.Assert(err, gc.IsNil) + + path := fmt.Sprintf("%s/archive?hash=%s", id, hash) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(path), + Method: "POST", + ContentLength: size, + Header: http.Header{ + "Content-Type": {"application/zip"}, + }, + Body: content, + Username: testUsername, + Password: testPassword, + ExpectStatus: httpStatus, + ExpectBody: params.Error{ + Message: errorMessage, + Code: errorCode, + }, + }) + + // TODO(rog) check that the uploaded blob has been deleted, + // by checking that no new blobs have been added to the blob store. +} + +// assertUploadCharm uploads the testing charm with the given name +// through the API. The URL must hold the expected revision +// that the charm will be given when uploaded. +func (s *commonArchiveSuite) assertUploadCharm(c *gc.C, method string, url *router.ResolvedURL, charmName string) *charm.CharmArchive { + ch := storetesting.Charms.CharmArchive(c.MkDir(), charmName) + id, size := s.assertUpload(c, method, url, ch.Path) + s.assertEntityInfo(c, entityInfo{ + Id: id, + Meta: entityMetaInfo{ + ArchiveSize: ¶ms.ArchiveSizeResponse{Size: size}, + CharmMeta: ch.Meta(), + CharmConfig: ch.Config(), + CharmActions: ch.Actions(), + }, + }) + return ch +} + +// assertUploadBundle uploads the testing bundle with the given name +// through the API. The URL must hold the expected revision +// that the bundle will be given when uploaded. +func (s *commonArchiveSuite) assertUploadBundle(c *gc.C, method string, url *router.ResolvedURL, bundleName string) { + path := storetesting.Charms.BundleArchivePath(c.MkDir(), bundleName) + b, err := charm.ReadBundleArchive(path) + c.Assert(err, gc.IsNil) + id, size := s.assertUpload(c, method, url, path) + s.assertEntityInfo(c, entityInfo{ + Id: id, + Meta: entityMetaInfo{ + ArchiveSize: ¶ms.ArchiveSizeResponse{Size: size}, + BundleMeta: b.Data(), + }, + }, + ) +} + +func (s *commonArchiveSuite) assertUpload(c *gc.C, method string, url *router.ResolvedURL, fileName string) (id *charm.URL, size int64) { + f, err := os.Open(fileName) + c.Assert(err, gc.IsNil) + defer f.Close() + + // Calculate blob hashes. + hash := blobstore.NewHash() + hash256 := sha256.New() + size, err = io.Copy(io.MultiWriter(hash, hash256), f) + c.Assert(err, gc.IsNil) + hashSum := fmt.Sprintf("%x", hash.Sum(nil)) + hash256Sum := fmt.Sprintf("%x", hash256.Sum(nil)) + _, err = f.Seek(0, 0) + c.Assert(err, gc.IsNil) + + uploadURL := url.UserOwnedURL() + if method == "POST" { + uploadURL.Revision = -1 + } + + path := fmt.Sprintf("%s/archive?hash=%s", uploadURL.Path(), hashSum) + expectId := uploadURL.WithRevision(url.URL.Revision) + expectedPromulgatedId := url.PromulgatedURL() + if expectedPromulgatedId != nil { + path += fmt.Sprintf("&promulgated=%s", expectedPromulgatedId.String()) + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(path), + Method: method, + ContentLength: size, + Header: http.Header{ + "Content-Type": {"application/zip"}, + }, + Body: f, + Username: testUsername, + Password: testPassword, + ExpectBody: params.ArchiveUploadResponse{ + Id: expectId, + PromulgatedId: expectedPromulgatedId, + }, + }) + + var entity mongodoc.Entity + err = s.store.DB.Entities().FindId(expectId.WithChannel("")).One(&entity) + c.Assert(err, gc.IsNil) + c.Assert(entity.BlobHash, gc.Equals, hashSum) + c.Assert(entity.BlobHash256, gc.Equals, hash256Sum) + c.Assert(entity.PromulgatedURL, gc.DeepEquals, expectedPromulgatedId) + c.Assert(entity.Development, gc.Equals, url.Development) + // Test that the expected entry has been created + // in the blob store. + r, _, err := s.store.BlobStore.Open(entity.BlobName) + c.Assert(err, gc.IsNil) + r.Close() + + return expectId, size +} + +// assertUploadCharmError attempts to upload the testing charm with the +// given name through the API, checking that the attempt fails with the +// specified error. The URL must hold the expected revision that the +// charm will be given when uploaded. +func (s *ArchiveSuite) assertUploadCharmError(c *gc.C, method string, url, purl *charm.URL, charmName string, expectStatus int, expectBody interface{}) { + ch := storetesting.Charms.CharmArchive(c.MkDir(), charmName) + s.assertUploadError(c, method, url, purl, ch.Path, expectStatus, expectBody) +} + +// assertUploadError asserts that we get an error when uploading +// the contents of the given file to the given url and promulgated URL. +// The reason this method does not take a *router.ResolvedURL +// is so that we can test what happens when an inconsistent promulgated URL +// is passed in. +func (s *ArchiveSuite) assertUploadError(c *gc.C, method string, url, purl *charm.URL, fileName string, expectStatus int, expectBody interface{}) { + f, err := os.Open(fileName) + c.Assert(err, gc.IsNil) + defer f.Close() + + // Calculate blob hashes. + hash := blobstore.NewHash() + size, err := io.Copy(hash, f) + c.Assert(err, gc.IsNil) + hashSum := fmt.Sprintf("%x", hash.Sum(nil)) + _, err = f.Seek(0, 0) + c.Assert(err, gc.IsNil) + + uploadURL := *url + if method == "POST" { + uploadURL.Revision = -1 + } + + path := fmt.Sprintf("%s/archive?hash=%s", uploadURL.Path(), hashSum) + if purl != nil { + path += fmt.Sprintf("&promulgated=%s", purl.String()) + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(path), + Method: method, + ContentLength: size, + Header: http.Header{ + "Content-Type": {"application/zip"}, + }, + Body: f, + Username: testUsername, + Password: testPassword, + ExpectStatus: expectStatus, + ExpectBody: expectBody, + }) +} + +var archiveFileErrorsTests = []struct { + about string + path string + expectStatus int + expectMessage string + expectCode params.ErrorCode +}{{ + about: "entity not found", + path: "~charmers/trusty/no-such-42/archive/icon.svg", + expectStatus: http.StatusNotFound, + expectMessage: `no matching charm or bundle for "cs:~charmers/trusty/no-such-42"`, + expectCode: params.ErrNotFound, +}, { + about: "directory listing", + path: "~charmers/utopic/wordpress-0/archive/hooks", + expectStatus: http.StatusForbidden, + expectMessage: "directory listing not allowed", + expectCode: params.ErrForbidden, +}, { + about: "file not found", + path: "~charmers/utopic/wordpress-0/archive/no-such", + expectStatus: http.StatusNotFound, + expectMessage: `file "no-such" not found in the archive`, + expectCode: params.ErrNotFound, +}} + +func (s *ArchiveSuite) TestArchiveFileErrors(c *gc.C) { + wordpress := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + url := newResolvedURL("cs:~charmers/utopic/wordpress-0", 0) + err := s.store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + for i, test := range archiveFileErrorsTests { + c.Logf("test %d: %s", i, test.about) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.path), + Method: "GET", + ExpectStatus: test.expectStatus, + ExpectBody: params.Error{ + Message: test.expectMessage, + Code: test.expectCode, + }, + }) + } +} + +func (s *ArchiveSuite) TestArchiveFileGet(c *gc.C) { + ch := storetesting.Charms.CharmArchive(c.MkDir(), "all-hooks") + id := newResolvedURL("cs:~charmers/utopic/all-hooks-0", 0) + err := s.store.AddCharmWithArchive(id, ch) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + zipFile, err := zip.OpenReader(ch.Path) + c.Assert(err, gc.IsNil) + defer zipFile.Close() + + // Check a file in the root directory. + s.assertArchiveFileContents(c, zipFile, "~charmers/utopic/all-hooks-0/archive/metadata.yaml") + // Check a file in a subdirectory. + s.assertArchiveFileContents(c, zipFile, "~charmers/utopic/all-hooks-0/archive/hooks/install") +} + +// assertArchiveFileContents checks that the response returned by the +// serveArchiveFile endpoint is correct for the given archive and URL path. +func (s *ArchiveSuite) assertArchiveFileContents(c *gc.C, zipFile *zip.ReadCloser, path string) { + // For example: trusty/django/archive/hooks/install -> hooks/install. + filePath := strings.SplitN(path, "/archive/", 2)[1] + + // Retrieve the expected bytes. + var expectBytes []byte + for _, file := range zipFile.File { + if file.Name == filePath { + r, err := file.Open() + c.Assert(err, gc.IsNil) + defer r.Close() + expectBytes, err = ioutil.ReadAll(r) + c.Assert(err, gc.IsNil) + break + } + } + c.Assert(expectBytes, gc.Not(gc.HasLen), 0) + + // Make the request. + url := storeURL(path) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: url, + }) + + // Ensure the response is what we expect. + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.Bytes(), gc.DeepEquals, expectBytes) + headers := rec.Header() + c.Assert(headers.Get("Content-Length"), gc.Equals, strconv.Itoa(len(expectBytes))) + // We only have text files in the charm repository used for tests. + c.Assert(headers.Get("Content-Type"), gc.Equals, "text/plain; charset=utf-8") + assertCacheControl(c, rec.Header(), true) +} + +func (s *ArchiveSuite) TestBundleCharms(c *gc.C) { + // Populate the store with some testing charms. + mysql := storetesting.Charms.CharmArchive(c.MkDir(), "mysql") + err := s.store.AddCharmWithArchive( + newResolvedURL("cs:~charmers/saucy/mysql-0", 0), + mysql, + ) + c.Assert(err, gc.IsNil) + riak := storetesting.Charms.CharmArchive(c.MkDir(), "riak") + err = s.store.AddCharmWithArchive( + newResolvedURL("cs:~charmers/trusty/riak-42", 42), + riak, + ) + c.Assert(err, gc.IsNil) + wordpress := storetesting.Charms.CharmArchive(c.MkDir(), "wordpress") + err = s.store.AddCharmWithArchive( + newResolvedURL("cs:~charmers/utopic/wordpress-47", 47), + wordpress, + ) + c.Assert(err, gc.IsNil) + + // Retrieve the base handler so that we can invoke the + // bundleCharms method on it. + handler := s.handler(c) + defer handler.Close() + + tests := []struct { + about string + ids []string + charms map[string]charm.Charm + }{{ + about: "no ids", + }, { + about: "fully qualified ids", + ids: []string{ + "cs:~charmers/saucy/mysql-0", + "cs:~charmers/trusty/riak-42", + "cs:~charmers/utopic/wordpress-47", + }, + charms: map[string]charm.Charm{ + "cs:~charmers/saucy/mysql-0": mysql, + "cs:~charmers/trusty/riak-42": riak, + "cs:~charmers/utopic/wordpress-47": wordpress, + }, + }, { + about: "partial ids", + ids: []string{"~charmers/utopic/wordpress", "~charmers/riak"}, + charms: map[string]charm.Charm{ + "~charmers/riak": riak, + "~charmers/utopic/wordpress": wordpress, + }, + }, { + about: "charm not found", + ids: []string{"utopic/no-such", "~charmers/mysql"}, + charms: map[string]charm.Charm{ + "~charmers/mysql": mysql, + }, + }, { + about: "no charms found", + ids: []string{ + "cs:~charmers/saucy/mysql-99", // Revision not present. + "cs:~charmers/precise/riak-42", // Series not present. + "cs:~charmers/utopic/django-47", // Name not present. + }, + }, { + about: "repeated charms", + ids: []string{ + "cs:~charmers/saucy/mysql", + "cs:~charmers/trusty/riak-42", + "~charmers/mysql", + }, + charms: map[string]charm.Charm{ + "cs:~charmers/saucy/mysql": mysql, + "cs:~charmers/trusty/riak-42": riak, + "~charmers/mysql": mysql, + }, + }} + + // Run the tests. + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + charms, err := v5.BundleCharms(handler, test.ids) + c.Assert(err, gc.IsNil) + // Ensure the charms returned are what we expect. + c.Assert(charms, gc.HasLen, len(test.charms)) + for i, ch := range charms { + expectCharm := test.charms[i] + c.Assert(ch.Meta(), jc.DeepEquals, expectCharm.Meta()) + c.Assert(ch.Config(), jc.DeepEquals, expectCharm.Config()) + c.Assert(ch.Actions(), jc.DeepEquals, expectCharm.Actions()) + // Since the charm archive and the charm entity have a slightly + // different concept of what a revision is, and since the revision + // is not used for bundle validation, we can safely avoid checking + // the charm revision. + } + } +} + +func (s *ArchiveSuite) TestDelete(c *gc.C) { + // Add a charm to the database (including the archive). + id := "~charmers/utopic/mysql-42" + url := newResolvedURL(id, -1) + err := s.store.AddCharmWithArchive(url, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) + c.Assert(err, gc.IsNil) + + // Retrieve the corresponding entity. + var entity mongodoc.Entity + err = s.store.DB.Entities().FindId(&url.URL).Select(bson.D{{"blobname", 1}}).One(&entity) + c.Assert(err, gc.IsNil) + + // Delete the charm using the API. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(id + "/archive"), + Method: "DELETE", + Username: testUsername, + Password: testPassword, + ExpectStatus: http.StatusOK, + }) + + // The entity has been deleted. + count, err := s.store.DB.Entities().FindId(url).Count() + c.Assert(err, gc.IsNil) + c.Assert(count, gc.Equals, 0) + + // The blob has been deleted. + _, _, err = s.store.BlobStore.Open(entity.BlobName) + c.Assert(err, gc.ErrorMatches, "resource.*not found") +} + +func (s *ArchiveSuite) TestDeleteSpecificCharm(c *gc.C) { + // Add a couple of charms to the database. + for _, id := range []string{"~charmers/trusty/mysql-42", "~charmers/utopic/mysql-42", "~charmers/utopic/mysql-47"} { + err := s.store.AddCharmWithArchive( + newResolvedURL(id, -1), + storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) + c.Assert(err, gc.IsNil) + } + + // Delete the second charm using the API. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("~charmers/utopic/mysql-42/archive"), + Method: "DELETE", + Username: testUsername, + Password: testPassword, + ExpectStatus: http.StatusOK, + }) + + // The other two charms are still present in the database. + urls := []*charm.URL{ + charm.MustParseURL("~charmers/trusty/mysql-42"), + charm.MustParseURL("~charmers/utopic/mysql-47"), + } + count, err := s.store.DB.Entities().Find(bson.D{{ + "_id", bson.D{{"$in", urls}}, + }}).Count() + c.Assert(err, gc.IsNil) + c.Assert(count, gc.Equals, 2) +} + +func (s *ArchiveSuite) TestDeleteNotFound(c *gc.C) { + // Try to delete a non existing charm using the API. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("~charmers/utopic/no-such-0/archive"), + Method: "DELETE", + Username: testUsername, + Password: testPassword, + ExpectStatus: http.StatusNotFound, + ExpectBody: params.Error{ + Message: `no matching charm or bundle for "cs:~charmers/utopic/no-such-0"`, + Code: params.ErrNotFound, + }, + }) +} + +func (s *ArchiveSuite) TestDeleteError(c *gc.C) { + // Add a charm to the database (not including the archive). + id := "~charmers/utopic/mysql-42" + url := newResolvedURL(id, -1) + err := s.store.AddCharm(storetesting.Charms.CharmArchive(c.MkDir(), "mysql"), + charmstore.AddParams{ + URL: url, + BlobName: "no-such-name", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + + // Try to delete the charm using the API. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(id + "/archive"), + Method: "DELETE", + Username: testUsername, + Password: testPassword, + ExpectStatus: http.StatusInternalServerError, + ExpectBody: params.Error{ + Message: `cannot remove blob no-such-name: resource at path "global/no-such-name" not found`, + }, + }) +} + +func (s *ArchiveSuite) TestDeleteCounters(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + // Add a charm to the database (including the archive). + id := "~charmers/utopic/mysql-42" + err := s.store.AddCharmWithArchive( + newResolvedURL(id, -1), + storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) + c.Assert(err, gc.IsNil) + + // Delete the charm using the API. + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + Method: "DELETE", + URL: storeURL(id + "/archive"), + Username: testUsername, + Password: testPassword, + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + + // Check that the delete count for the entity has been updated. + key := []string{params.StatsArchiveDelete, "utopic", "mysql", "charmers", "42"} + stats.CheckCounterSum(c, s.store, key, false, 1) +} + +func (s *ArchiveSuite) TestPostAuthErrors(c *gc.C) { + checkAuthErrors(c, s.srv, "POST", "~charmers/utopic/django/archive") +} + +func (s *ArchiveSuite) TestDeleteAuthErrors(c *gc.C) { + err := s.store.AddCharmWithArchive( + newResolvedURL("~charmers/utopic/django-42", 42), + storetesting.Charms.CharmArchive(c.MkDir(), "wordpress"), + ) + c.Assert(err, gc.IsNil) + checkAuthErrors(c, s.srv, "DELETE", "utopic/django-42/archive") +} + +var archiveAuthErrorsTests = []struct { + about string + header http.Header + username string + password string + expectMessage string +}{{ + about: "no credentials", + expectMessage: "authentication failed: missing HTTP auth header", +}, { + about: "invalid encoding", + header: http.Header{ + "Authorization": {"Basic not-a-valid-base64"}, + }, + expectMessage: "authentication failed: invalid HTTP auth encoding", +}, { + about: "invalid header", + header: http.Header{ + "Authorization": {"Basic " + base64.StdEncoding.EncodeToString([]byte("invalid"))}, + }, + expectMessage: "authentication failed: invalid HTTP auth contents", +}, { + about: "invalid credentials", + username: "no-such", + password: "exterminate!", + expectMessage: "invalid user name or password", +}} + +func checkAuthErrors(c *gc.C, handler http.Handler, method, url string) { + archiveURL := storeURL(url) + for i, test := range archiveAuthErrorsTests { + c.Logf("test %d: %s", i, test.about) + if test.header == nil { + test.header = http.Header{} + } + if method == "POST" { + test.header.Add("Content-Type", "application/zip") + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: handler, + URL: archiveURL, + Method: method, + Header: test.header, + Username: test.username, + Password: test.password, + ExpectStatus: http.StatusUnauthorized, + ExpectBody: params.Error{ + Message: test.expectMessage, + Code: params.ErrUnauthorized, + }, + }) + } +} + +// entityInfo holds all the information we want to find +// out about a charm or bundle uploaded to the store. +type entityInfo struct { + Id *charm.URL + Meta entityMetaInfo +} + +type entityMetaInfo struct { + ArchiveSize *params.ArchiveSizeResponse `json:"archive-size,omitempty"` + CharmMeta *charm.Meta `json:"charm-metadata,omitempty"` + CharmConfig *charm.Config `json:"charm-config,omitempty"` + CharmActions *charm.Actions `json:"charm-actions,omitempty"` + BundleMeta *charm.BundleData `json:"bundle-metadata,omitempty"` +} + +func (s *commonArchiveSuite) assertEntityInfo(c *gc.C, expect entityInfo) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL( + expect.Id.Path() + "/meta/any" + + "?include=archive-size" + + "&include=charm-metadata" + + "&include=charm-config" + + "&include=charm-actions" + + "&include=bundle-metadata", + ), + Username: testUsername, + Password: testPassword, + ExpectBody: expect, + }) +} + +func (s *ArchiveSuite) TestArchiveFileGetHasCORSHeaders(c *gc.C) { + id := "~charmers/precise/wordpress-0" + s.assertUploadCharm(c, "POST", newResolvedURL(id, -1), "wordpress") + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(fmt.Sprintf("%s/archive/metadata.yaml", id)), + }) + headers := rec.Header() + c.Assert(len(headers["Access-Control-Allow-Origin"]), gc.Equals, 1) + c.Assert(len(headers["Access-Control-Allow-Headers"]), gc.Equals, 1) + c.Assert(headers["Access-Control-Allow-Origin"][0], gc.Equals, "*") + c.Assert(headers["Access-Control-Cache-Max-Age"][0], gc.Equals, "600") + c.Assert(headers["Access-Control-Allow-Headers"][0], gc.Equals, "Bakery-Protocol-Version, Macaroons, X-Requested-With") +} + +var getNewPromulgatedRevisionTests = []struct { + about string + id *charm.URL + expectRev int +}{{ + about: "no base entity", + id: charm.MustParseURL("cs:~mmouse/trusty/mysql-14"), + expectRev: -1, +}, { + about: "not promulgated", + id: charm.MustParseURL("cs:~dduck/trusty/mysql-14"), + expectRev: -1, +}, { + about: "not yet promulgated", + id: charm.MustParseURL("cs:~goofy/trusty/mysql-14"), + expectRev: 0, +}, { + about: "existing promulgated", + id: charm.MustParseURL("cs:~pluto/trusty/mariadb-14"), + expectRev: 4, +}, { + about: "previous promulgated by different user", + id: charm.MustParseURL("cs:~tom/trusty/sed-1"), + expectRev: 5, +}, { + about: "many previous promulgated revisions", + id: charm.MustParseURL("cs:~tom/trusty/awk-5"), + expectRev: 5, +}} + +func (s *ArchiveSuite) TestGetNewPromulgatedRevision(c *gc.C) { + charms := []string{ + "cs:~dduck/trusty/mysql-14", + "14 cs:~goofy/precise/mysql-14", + "3 cs:~pluto/trusty/mariadb-5", + "0 cs:~tom/trusty/sed-0", + "cs:~jerry/trusty/sed-2", + "4 cs:~jerry/trusty/sed-3", + "0 cs:~tom/trusty/awk-0", + "1 cs:~tom/trusty/awk-1", + "2 cs:~tom/trusty/awk-2", + "3 cs:~tom/trusty/awk-3", + "4 cs:~tom/trusty/awk-4", + } + for _, ch := range charms { + url := mustParseResolvedURL(ch) + err := s.store.AddCharm(&relationTestingCharm{}, charmstore.AddParams{ + URL: url, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + } + handler := s.handler(c) + defer handler.Close() + for i, test := range getNewPromulgatedRevisionTests { + c.Logf("%d. %s", i, test.about) + rev, err := v5.GetNewPromulgatedRevision(handler, test.id) + c.Assert(err, gc.IsNil) + c.Assert(rev, gc.Equals, test.expectRev) + } +} + +func hashOfBytes(data []byte) string { + hash := blobstore.NewHash() + hash.Write(data) + return fmt.Sprintf("%x", hash.Sum(nil)) +} + +func hashOf(r io.Reader) (hashSum string, size int64) { + hash := blobstore.NewHash() + n, err := io.Copy(hash, r) + if err != nil { + panic(err) + } + return fmt.Sprintf("%x", hash.Sum(nil)), n +} + +// assertCacheControl asserts that the cache control headers are +// appropriately set. The isPublic parameter specifies +// whether the id in the request represents a public charm or bundle. +func assertCacheControl(c *gc.C, h http.Header, isPublic bool) { + if isPublic { + seconds := v5.ArchiveCachePublicMaxAge / time.Second + c.Assert(h.Get("Cache-Control"), gc.Equals, fmt.Sprintf("public, max-age=%d", seconds)) + } else { + c.Assert(h.Get("Cache-Control"), gc.Equals, "no-cache, must-revalidate") + } +} + +type ArchiveSearchSuite struct { + commonSuite +} + +var _ = gc.Suite(&ArchiveSearchSuite{}) + +func (s *ArchiveSearchSuite) SetUpSuite(c *gc.C) { + s.enableES = true + s.commonSuite.SetUpSuite(c) +} + +func (s *ArchiveSearchSuite) SetUpTest(c *gc.C) { + s.commonSuite.SetUpTest(c) + // TODO (frankban): remove this call when removing the legacy counts logic. + patchLegacyDownloadCountsEnabled(s.AddCleanup, false) +} + +func (s *ArchiveSearchSuite) TestGetSearchUpdate(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + for i, id := range []string{"~charmers/wily/mysql-42", "~who/wily/mysql-42"} { + c.Logf("test %d: %s", i, id) + url := newResolvedURL(id, -1) + + // Add a charm to the database (including the archive). + err := s.store.AddCharmWithArchive(url, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + + // Download the charm archive using the API. + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(id + "/archive"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + + // Check that the search record for the entity has been updated. + stats.CheckSearchTotalDownloads(c, s.store, &url.URL, 1) + } +} + +type ArchiveSuiteWithTerms struct { + commonArchiveSuite +} + +var _ = gc.Suite(&ArchiveSuiteWithTerms{}) + +func (s *ArchiveSuiteWithTerms) SetUpSuite(c *gc.C) { + s.commonSuite.SetUpSuite(c) + s.enableTerms = true + s.enableIdentity = true +} + +func (s *ArchiveSuiteWithTerms) SetUpTest(c *gc.C) { + s.commonSuite.SetUpTest(c) + s.discharge = dischargeForUser("bob") +} + +func (s *ArchiveSuiteWithTerms) TestGetUserHasAgreedToTermsAndConditions(c *gc.C) { + termsDischargeAccessed := false + s.dischargeTerms = func(cond, args string) ([]checkers.Caveat, error) { + termsDischargeAccessed = true + if cond != "has-agreed" || args != "terms-1/1 terms-2/5" { + c.Logf("terms %#v", args) + return nil, errgo.New("discharge error") + } + return nil, nil + } + + client := httpbakery.NewHTTPClient() + + id := newResolvedURL("cs:~charmers/precise/terms-0", -1) + s.assertUploadCharm(c, "POST", id, "terms") + err := s.store.SetPerms(&id.URL, "read", "bob") + c.Assert(err, gc.IsNil) + + s.assertArchiveDownload( + c, + "~charmers/precise/terms-0", + &httptesting.DoRequestParams{ + Do: bakeryDo(client), + }, + nil, + ) + + c.Assert(termsDischargeAccessed, gc.Equals, true) +} + +func (s *ArchiveSuiteWithTerms) TestGetUserHasNotAgreedToTerms(c *gc.C) { + s.dischargeTerms = func(_, _ string) ([]checkers.Caveat, error) { + return nil, errgo.New("user has not agreed to specified terms and conditions") + } + client := httpbakery.NewHTTPClient() + + id := newResolvedURL("cs:~charmers/precise/terms-0", -1) + s.assertUploadCharm(c, "POST", id, "terms") + err := s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + + archiveUrl := storeURL("~charmers/precise/terms-0/archive") + httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: archiveUrl, + Do: bakeryDo(client), + ExpectError: ".*third party refused discharge: cannot discharge: user has not agreed to specified terms and conditions", + }) +} + +func (s *ArchiveSuiteWithTerms) TestGetIgnorningTermsWithBasicAuth(c *gc.C) { + s.dischargeTerms = func(_, _ string) ([]checkers.Caveat, error) { + return nil, errgo.New("user has not agreed to specified terms and conditions") + } + + id := newResolvedURL("cs:~charmers/precise/terms-0", -1) + terms := s.assertUploadCharm(c, "POST", id, "terms") + err := s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + + archiveBytes, err := ioutil.ReadFile(terms.Path) + c.Assert(err, gc.IsNil) + + s.assertArchiveDownload( + c, + "~charmers/precise/terms-0", + &httptesting.DoRequestParams{ + Header: basicAuthHeader(testUsername, testPassword), + }, + archiveBytes, + ) +} + +func (s *commonSuite) assertArchiveDownload(c *gc.C, id string, extraParams *httptesting.DoRequestParams, archiveBytes []byte) *httptest.ResponseRecorder { + //resolvedId := newResolvedURL(id, -1) + + doParams := httptesting.DoRequestParams{} + if extraParams != nil { + doParams = *extraParams + } + doParams.Handler = s.srv + if doParams.URL == "" { + doParams.URL = storeURL(id + "/archive") + } + rec := httptesting.DoRequest(c, doParams) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + + if archiveBytes != nil { + c.Assert(rec.Body.Bytes(), gc.DeepEquals, archiveBytes) + c.Assert(rec.Header().Get(params.ContentHashHeader), gc.Equals, hashOfBytes(archiveBytes)) + } + return rec +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/auth.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/auth.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/auth.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,394 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "encoding/base64" + "net/http" + "strings" + "time" + + "gopkg.in/errgo.v1" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon.v1" + + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +const ( + PromulgatorsGroup = "charmers" + // opAccessCharmWitTerms indicates an operation of accessing the archive of + // a charm that requires agreement to certain terms and conditions. + opAccessCharmWithTerms = "op-get-with-terms" + // opOther indicates all other operations. + // This operation should not be added as part of a macaroon caveat. + opOther = "op-other" + defaultMacaroonExpiry = 24 * time.Hour +) + +// authorize checks that the current user is authorized based on the provided +// ACL and optional entity. If an authenticated user is required, authorize tries to retrieve the +// current user in the following ways: +// - by checking that the request's headers HTTP basic auth credentials match +// the superuser credentials stored in the API handler; +// - by checking that there is a valid macaroon in the request's cookies. +// A params.ErrUnauthorized error is returned if superuser credentials fail; +// otherwise a macaroon is minted and a httpbakery discharge-required +// error is returned holding the macaroon. +// +// This method also sets h.auth to the returned authorization info. +func (h *ReqHandler) authorize(req *http.Request, acl []string, alwaysAuth bool, entityId *router.ResolvedURL) (authorization, error) { + logger.Infof( + "authorize, auth location %q, acl %q, path: %q, method: %q, entity: %#v", + h.Handler.config.IdentityLocation, + acl, + req.URL.Path, + req.Method, + entityId) + + if !alwaysAuth { + // No need to authenticate if the ACL is open to everyone. + for _, name := range acl { + if name == params.Everyone { + return authorization{}, nil + } + } + } + entities := []*router.ResolvedURL{} + if entityId != nil { + entities = append(entities, entityId) + } + + auth, verr := h.checkRequest(req, entities, opOther) + if verr == nil { + if err := h.checkACLMembership(auth, acl); err != nil { + return authorization{}, errgo.WithCausef(err, params.ErrUnauthorized, "") + } + h.auth = auth + return auth, nil + } + if _, ok := errgo.Cause(verr).(*bakery.VerificationError); !ok { + return authorization{}, errgo.Mask(verr, errgo.Is(params.ErrUnauthorized)) + } + + // Macaroon verification failed: mint a new macaroon. + // We need to deny access for opAccessCharmWithTerms operations because they + // may require more specific checks that terms and conditions have been + // satisfied. + m, err := h.newMacaroon(checkers.DenyCaveat(opAccessCharmWithTerms)) + if err != nil { + return authorization{}, errgo.Notef(err, "cannot mint macaroon") + } + + // Request that this macaroon be supplied for all requests + // to the whole handler. + // TODO use a relative URL here: router.RelativeURLPath(req.RequestURI, "/") + cookiePath := "/" + return authorization{}, httpbakery.NewDischargeRequiredErrorForRequest(m, cookiePath, verr, req) +} + +// authorizeEntityAndTerms is similar to the authorize method, but +// in addition it also checks if the entity meta data specifies +// and terms and conditions that the user needs to agree to. If so, +// it will require the user to agree to those terms and conditions +// by adding a third party caveat addressed to the terms service +// requiring the user to have agreements to specified terms. +func (h *ReqHandler) authorizeEntityAndTerms(req *http.Request, entityIds []*router.ResolvedURL) (authorization, error) { + logger.Infof( + "authorize entity and terms, auth location %q, terms location %q, path: %q, method: %q, entities: %#v", + h.Handler.config.IdentityLocation, + h.Handler.config.TermsLocation, + req.URL.Path, + req.Method, + entityIds) + + if len(entityIds) == 0 { + return authorization{}, errgo.WithCausef(nil, params.ErrUnauthorized, "entity id not specified") + } + + public, acls, requiredTerms, err := h.entityAuthInfo(entityIds) + if err != nil { + return authorization{}, errgo.Mask(err) + } + + // if all entities are open to everyone and non of the entities defines any Terms, then we return nil + if public { + return authorization{}, nil + } + + if len(requiredTerms) > 0 && h.Handler.config.TermsLocation == "" { + return authorization{}, errgo.WithCausef(nil, params.ErrUnauthorized, "charmstore not configured to serve charms with terms and conditions") + } + + operation := opOther + if len(requiredTerms) > 0 { + operation = opAccessCharmWithTerms + } + + auth, verr := h.checkRequest(req, entityIds, operation) + if verr == nil { + for _, acl := range acls { + if err := h.checkACLMembership(auth, acl); err != nil { + return authorization{}, errgo.WithCausef(err, params.ErrUnauthorized, "") + } + } + h.auth = auth + return auth, nil + } + if _, ok := errgo.Cause(verr).(*bakery.VerificationError); !ok { + return authorization{}, errgo.Mask(verr, errgo.Is(params.ErrUnauthorized)) + } + + caveats := []checkers.Caveat{} + if len(requiredTerms) > 0 { + terms := []string{} + for term, _ := range requiredTerms { + terms = append(terms, term) + } + caveats = append(caveats, + checkers.Caveat{h.Handler.config.TermsLocation, "has-agreed " + strings.Join(terms, " ")}, + ) + } + + // Macaroon verification failed: mint a new macaroon. + m, err := h.newMacaroon(caveats...) + if err != nil { + return authorization{}, errgo.Notef(err, "cannot mint macaroon") + } + + // Request that this macaroon be supplied for all requests + // to the whole handler. + // TODO use a relative URL here: router.RelativeURLPath(req.RequestURI, "/") + cookiePath := "/" + return authorization{}, httpbakery.NewDischargeRequiredErrorForRequest(m, cookiePath, verr, req) +} + +// entityAuthInfo returns authorization on the entities with the given ids. +// If public is true, no authorization is required, otherwise acls holds +// an entry for each id with the corresponding ACL for each entity, +// and requiredTerms holds entries for all required terms. +func (h *ReqHandler) entityAuthInfo(entityIds []*router.ResolvedURL) (public bool, acls [][]string, requiredTerms map[string]bool, err error) { + acls = make([][]string, len(entityIds)) + requiredTerms = make(map[string]bool) + public = true + for i, entityId := range entityIds { + entity, err := h.Store.FindEntity(entityId) + if err != nil { + return false, nil, nil, errgo.Mask(err, errgo.Is(params.ErrNotFound)) + } + baseEntity, err := h.Store.FindBaseEntity(&entityId.URL, "acls", "developmentacls") + if err != nil { + return false, nil, nil, errgo.Mask(err, errgo.Is(params.ErrNotFound)) + } + + acls[i] = baseEntity.ACLs.Read + if entityId.Development { + acls[i] = baseEntity.DevelopmentACLs.Read + } + + if entity.CharmMeta == nil || len(entity.CharmMeta.Terms) == 0 { + // No need to authenticate if the ACL is open to everyone. + publicCharm := false + for _, name := range acls[i] { + if name == params.Everyone { + publicCharm = true + break + } + } + public = public && publicCharm + } else { + public = false + for _, term := range entity.CharmMeta.Terms { + requiredTerms[term] = true + } + } + } + + return public, acls, requiredTerms, nil +} + +// checkRequest checks for any authorization tokens in the request and returns any +// found as an authorization. If no suitable credentials are found, or an error occurs, +// then a zero valued authorization is returned. +// It also checks any first party caveats. If the entityId is provided, it will +// be used to check any "is-entity" first party caveat. +// In addition it adds a checker that checks if operation specified +// by the operation parameters is allowed. +func (h *ReqHandler) checkRequest(req *http.Request, entityIds []*router.ResolvedURL, operation string) (authorization, error) { + user, passwd, err := parseCredentials(req) + if err == nil { + if user != h.Handler.config.AuthUsername || passwd != h.Handler.config.AuthPassword { + return authorization{}, errgo.WithCausef(nil, params.ErrUnauthorized, "invalid user name or password") + } + return authorization{Admin: true}, nil + } + bk := h.Store.Bakery + if errgo.Cause(err) != errNoCreds || bk == nil || h.Handler.config.IdentityLocation == "" { + return authorization{}, errgo.WithCausef(err, params.ErrUnauthorized, "authentication failed") + } + + attrMap, err := httpbakery.CheckRequest(bk, req, nil, checkers.New( + checkers.CheckerFunc{ + Condition_: "is-entity", + Check_: func(_, args string) error { + return areAllowedEntities(entityIds, args) + }, + }, + checkers.OperationChecker(operation), + )) + if err != nil { + return authorization{}, errgo.Mask(err, errgo.Any) + } + return authorization{ + Admin: false, + Username: attrMap[UsernameAttr], + }, nil +} + +// areAllowedEntities checks if all entityIds are in the allowedEntities list (space +// separated). +func areAllowedEntities(entityIds []*router.ResolvedURL, allowedEntities string) error { + allowedEntitiesMap := make(map[string]bool) + for _, curl := range strings.Fields(allowedEntities) { + allowedEntitiesMap[curl] = true + } + if len(entityIds) == 0 { + return errgo.Newf("operation does not involve any of the allowed entities %v", allowedEntities) + } + + for _, entityId := range entityIds { + if allowedEntitiesMap[entityId.URL.String()] { + continue + } + purl := entityId.PromulgatedURL() + if purl != nil { + if allowedEntitiesMap[purl.String()] { + continue + } + } + return errgo.Newf("operation on entity %v not allowed", entityId) + } + return nil +} + +// AuthorizeEntity checks that the given HTTP request +// can access the entity with the given id. +func (h *ReqHandler) AuthorizeEntity(id *router.ResolvedURL, req *http.Request) error { + baseEntity, err := h.Store.FindBaseEntity(&id.URL, "acls", "developmentacls") + if err != nil { + if errgo.Cause(err) == params.ErrNotFound { + return errgo.WithCausef(nil, params.ErrNotFound, "entity %q not found", id) + } + return errgo.Notef(err, "cannot retrieve entity %q for authorization", id) + } + acls := baseEntity.ACLs + if id.Development { + acls = baseEntity.DevelopmentACLs + } + return h.authorizeWithPerms(req, acls.Read, acls.Write, id) +} + +func (h *ReqHandler) authorizeWithPerms(req *http.Request, read, write []string, entityId *router.ResolvedURL) error { + var acl []string + switch req.Method { + case "DELETE", "PATCH", "POST", "PUT": + acl = write + default: + acl = read + } + _, err := h.authorize(req, acl, false, entityId) + return err +} + +const UsernameAttr = "username" + +// authorization conatains authorization information extracted from an HTTP request. +// The zero value for a authorization contains no privileges. +type authorization struct { + Admin bool + Username string +} + +func (h *ReqHandler) groupsForUser(username string) ([]string, error) { + if h.Handler.config.IdentityAPIURL == "" { + logger.Debugf("IdentityAPIURL not configured, not retrieving groups for %s", username) + return nil, nil + } + // TODO cache groups for a user + return h.Handler.identityClient.GroupsForUser(username) +} + +func (h *ReqHandler) checkACLMembership(auth authorization, acl []string) error { + if auth.Admin { + return nil + } + if auth.Username == "" { + return errgo.New("no username declared") + } + // First check if access is granted without querying for groups. + for _, name := range acl { + if name == auth.Username || name == params.Everyone { + return nil + } + } + groups, err := h.groupsForUser(auth.Username) + if err != nil { + logger.Errorf("cannot get groups for %q: %v", auth.Username, err) + return errgo.Newf("access denied for user %q", auth.Username) + } + for _, name := range acl { + for _, g := range groups { + if g == name { + return nil + } + } + } + return errgo.Newf("access denied for user %q", auth.Username) +} + +func (h *ReqHandler) newMacaroon(caveats ...checkers.Caveat) (*macaroon.Macaroon, error) { + caveats = append(caveats, + checkers.NeedDeclaredCaveat( + checkers.Caveat{ + Location: h.Handler.config.IdentityLocation, + Condition: "is-authenticated-user", + }, + UsernameAttr, + ), + checkers.TimeBeforeCaveat(time.Now().Add(defaultMacaroonExpiry)), + ) + // TODO generate different caveats depending on the requested operation + // and whether there's a charm id or not. + // Mint an appropriate macaroon and send it back to the client. + return h.Store.Bakery.NewMacaroon("", nil, caveats) +} + +var errNoCreds = errgo.New("missing HTTP auth header") + +// parseCredentials parses the given request and returns the HTTP basic auth +// credentials included in its header. +func parseCredentials(req *http.Request) (username, password string, err error) { + auth := req.Header.Get("Authorization") + if auth == "" { + return "", "", errNoCreds + } + parts := strings.Fields(auth) + if len(parts) != 2 || parts[0] != "Basic" { + return "", "", errgo.New("invalid HTTP auth header") + } + // Challenge is a base64-encoded "tag:pass" string. + // See RFC 2617, Section 2. + challenge, err := base64.StdEncoding.DecodeString(parts[1]) + if err != nil { + return "", "", errgo.New("invalid HTTP auth encoding") + } + tokens := strings.SplitN(string(challenge), ":", 2) + if len(tokens) != 2 { + return "", "", errgo.New("invalid HTTP auth contents") + } + return tokens[0], tokens[1], nil +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/auth_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/auth_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/auth_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1049 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "sort" + "strings" + "sync" + "time" + + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon.v1" + + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/v5" +) + +func (s *commonSuite) AssertEndpointAuth(c *gc.C, p httptesting.JSONCallParams) { + s.testNonMacaroonAuth(c, p) + s.testMacaroonAuth(c, p) +} + +func (s *commonSuite) testNonMacaroonAuth(c *gc.C, p httptesting.JSONCallParams) { + p.Handler = s.noMacaroonSrv + // Check that the request succeeds when provided with the + // correct credentials. + p.Username = "test-user" + p.Password = "test-password" + httptesting.AssertJSONCall(c, p) + + // Check that auth fails with no creds provided. + p.Username = "" + p.Password = "" + p.ExpectStatus = http.StatusUnauthorized + p.ExpectBody = params.Error{ + Message: "authentication failed: missing HTTP auth header", + Code: params.ErrUnauthorized, + } + httptesting.AssertJSONCall(c, p) + + // Check that auth fails with the wrong username provided. + p.Username = "wrong" + p.Password = "test-password" + p.ExpectStatus = http.StatusUnauthorized + p.ExpectBody = params.Error{ + Message: "invalid user name or password", + Code: params.ErrUnauthorized, + } + httptesting.AssertJSONCall(c, p) + + // Check that auth fails with the wrong password provided. + p.Username = "test-user" + p.Password = "test-password-wrong" + p.ExpectStatus = http.StatusUnauthorized + p.ExpectBody = params.Error{ + Message: "invalid user name or password", + Code: params.ErrUnauthorized, + } + httptesting.AssertJSONCall(c, p) +} + +func (s *commonSuite) testMacaroonAuth(c *gc.C, p httptesting.JSONCallParams) { + // Make a test third party caveat discharger. + var checkedCaveats []string + var mu sync.Mutex + var dischargeError error + s.discharge = func(cond string, arg string) ([]checkers.Caveat, error) { + mu.Lock() + defer mu.Unlock() + checkedCaveats = append(checkedCaveats, cond+" "+arg) + if dischargeError != nil { + return nil, dischargeError + } + return []checkers.Caveat{ + checkers.DeclaredCaveat("username", "bob"), + }, nil + } + p.Handler = s.srv + + client := httpbakery.NewHTTPClient() + cookieJar := &cookieJar{CookieJar: client.Jar} + client.Jar = cookieJar + p.Do = bakeryDo(client) + + // Check that the call succeeds with simple auth. + c.Log("simple auth sucess") + p.Username = "test-user" + p.Password = "test-password" + httptesting.AssertJSONCall(c, p) + c.Assert(checkedCaveats, gc.HasLen, 0) + c.Assert(cookieJar.cookieURLs, gc.HasLen, 0) + + // Check that the call gives us the correct + // "authentication denied response" without simple auth + // and uses the third party checker + // and that a cookie is stored at the correct location. + // TODO when we allow admin access via macaroon creds, + // change this test to expect success. + c.Log("macaroon unauthorized error") + p.Username, p.Password = "", "" + p.ExpectStatus = http.StatusUnauthorized + p.ExpectBody = params.Error{ + Message: `unauthorized: access denied for user "bob"`, + Code: params.ErrUnauthorized, + } + httptesting.AssertJSONCall(c, p) + sort.Strings(checkedCaveats) + c.Assert(checkedCaveats, jc.DeepEquals, []string{ + "is-authenticated-user ", + }) + checkedCaveats = nil + c.Assert(cookieJar.cookieURLs, gc.DeepEquals, []string{"http://somehost/"}) + + // Check that the call fails with incorrect simple auth info. + c.Log("simple auth error") + p.Password = "bad-password" + p.ExpectStatus = http.StatusUnauthorized + p.ExpectBody = params.Error{ + Message: "authentication failed: missing HTTP auth header", + Code: params.ErrUnauthorized, + } + + // Check that it fails when the discharger refuses the discharge. + c.Log("macaroon discharge error") + client = httpbakery.NewHTTPClient() + dischargeError = fmt.Errorf("go away") + p.Do = bakeryDo(client) // clear cookies + p.Password = "" + p.Username = "" + p.ExpectError = `cannot get discharge from "https://[^"]*": third party refused discharge: cannot discharge: go away` + httptesting.AssertJSONCall(c, p) +} + +type cookieJar struct { + cookieURLs []string + http.CookieJar +} + +func (j *cookieJar) SetCookies(url *url.URL, cookies []*http.Cookie) { + url1 := *url + url1.Host = "somehost" + for _, cookie := range cookies { + if cookie.Path != "" { + url1.Path = cookie.Path + } + } + j.cookieURLs = append(j.cookieURLs, url1.String()) + j.CookieJar.SetCookies(url, cookies) +} + +func noInteraction(*url.URL) error { + return fmt.Errorf("unexpected interaction required") +} + +// dischargedAuthCookie retrieves and discharges an authentication macaroon cookie. It adds the provided +// first-party caveats before discharging the macaroon. +func dischargedAuthCookie(c *gc.C, srv http.Handler, caveats ...string) *http.Cookie { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: srv, + URL: storeURL("macaroon"), + Method: "GET", + }) + var m macaroon.Macaroon + err := json.Unmarshal(rec.Body.Bytes(), &m) + c.Assert(err, gc.IsNil) + for _, cav := range caveats { + err := m.AddFirstPartyCaveat(cav) + c.Assert(err, gc.IsNil) + } + client := httpbakery.NewClient() + ms, err := client.DischargeAll(&m) + c.Assert(err, gc.IsNil) + macaroonCookie, err := httpbakery.NewCookie(ms) + c.Assert(err, gc.IsNil) + return macaroonCookie +} + +type authSuite struct { + commonSuite +} + +var _ = gc.Suite(&authSuite{}) + +func (s *authSuite) SetUpSuite(c *gc.C) { + s.enableIdentity = true + s.commonSuite.SetUpSuite(c) +} + +var readAuthorizationTests = []struct { + // about holds the test description. + about string + // username holds the authenticated user name returned by the discharger. + // If empty, an anonymous user is returned. + username string + // groups holds group names the user is member of, as returned by the + // discharger. + groups []string + // readPerm stores a list of users with read permissions. + readPerm []string + // expectStatus is the expected HTTP response status. + // Defaults to 200 status OK. + expectStatus int + // expectBody holds the expected body of the HTTP response. If nil, + // the body is not checked and the response is assumed to be ok. + expectBody interface{} +}{{ + about: "anonymous users are authorized", + readPerm: []string{params.Everyone}, +}, { + about: "everyone is authorized", + username: "dalek", + readPerm: []string{params.Everyone}, +}, { + about: "everyone and a specific user", + username: "dalek", + readPerm: []string{params.Everyone, "janeway"}, +}, { + about: "specific user authorized", + username: "who", + readPerm: []string{"who"}, +}, { + about: "multiple specific users authorized", + username: "picard", + readPerm: []string{"kirk", "picard", "sisko"}, +}, { + about: "nobody authorized", + username: "picard", + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "picard"`, + }, +}, { + about: "access denied for user", + username: "kirk", + readPerm: []string{"picard", "sisko"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "kirk"`, + }, +}, { + about: "everyone is authorized (user is member of groups)", + username: "dalek", + groups: []string{"group1", "group2"}, + readPerm: []string{params.Everyone}, +}, { + about: "everyone and a specific group", + username: "dalek", + groups: []string{"group2", "group3"}, + readPerm: []string{params.Everyone, "group1"}, +}, { + about: "specific group authorized", + username: "who", + groups: []string{"group1", "group42", "group2"}, + readPerm: []string{"group42"}, +}, { + about: "multiple specific groups authorized", + username: "picard", + groups: []string{"group2"}, + readPerm: []string{"kirk", "group0", "group2"}, +}, { + about: "no group authorized", + username: "picard", + groups: []string{"group1", "group2"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "picard"`, + }, +}, { + about: "access denied for group", + username: "kirk", + groups: []string{"group1", "group2", "group3"}, + readPerm: []string{"picard", "sisko", "group42", "group47"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "kirk"`, + }, +}} + +func dischargeForUser(username string) func(_, _ string) ([]checkers.Caveat, error) { + return func(_, _ string) ([]checkers.Caveat, error) { + return []checkers.Caveat{ + checkers.DeclaredCaveat(v5.UsernameAttr, username), + }, nil + } +} + +func (s *authSuite) TestReadAuthorization(c *gc.C) { + for i, test := range readAuthorizationTests { + c.Logf("test %d: %s", i, test.about) + + s.discharge = dischargeForUser(test.username) + s.idM.groups = map[string][]string{ + test.username: test.groups, + } + + // Add a charm to the store, used for testing. + rurl := newResolvedURL("~charmers/utopic/wordpress-42", -1) + err := s.store.AddCharmWithArchive(rurl, storetesting.Charms.CharmDir("wordpress")) + c.Assert(err, gc.IsNil) + + // Change the ACLs for the testing charm + // (both published and development versions). + err = s.store.SetPerms(&rurl.URL, "read", test.readPerm...) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(rurl.URL.WithChannel(charm.DevelopmentChannel), "read", test.readPerm...) + c.Assert(err, gc.IsNil) + + // Define an helper function used to send requests and check responses. + makeRequest := func(path string, expectStatus int, expectBody interface{}) { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + Do: bakeryDo(nil), + URL: storeURL(path), + }) + if expectStatus == 0 { + expectStatus = http.StatusOK + } + c.Assert(rec.Code, gc.Equals, expectStatus, gc.Commentf("body: %s", rec.Body)) + if expectBody != nil { + c.Assert(rec.Body.String(), jc.JSONEquals, expectBody) + } + } + + // Perform meta and id requests. + makeRequest("~charmers/wordpress/meta/archive-size", test.expectStatus, test.expectBody) + makeRequest("~charmers/wordpress/expand-id", test.expectStatus, test.expectBody) + + // Perform meta and id requests to the development channel. + makeRequest("~charmers/development/wordpress/meta/archive-size", test.expectStatus, test.expectBody) + makeRequest("~charmers/development/wordpress/expand-id", test.expectStatus, test.expectBody) + + // Remove permissions for the published charm. + err = s.store.SetPerms(&rurl.URL, "read") + c.Assert(err, gc.IsNil) + + // Check that now accessing the published charm is not allowed, + // but accessing the development charm still works as expected. + makeRequest("~charmers/wordpress/meta/archive-size", http.StatusUnauthorized, nil) + makeRequest("~charmers/development/wordpress/meta/archive-size", test.expectStatus, test.expectBody) + + // Remove permissions for the development charm as well. + err = s.store.SetPerms(rurl.URL.WithChannel(charm.DevelopmentChannel), "read") + c.Assert(err, gc.IsNil) + + // Check that now accessing the development charm is also denied. + makeRequest("~charmers/development/wordpress/meta/archive-size", http.StatusUnauthorized, nil) + + // Remove all entities from the store. + _, err = s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + } +} + +var writeAuthorizationTests = []struct { + // about holds the test description. + about string + // username holds the authenticated user name returned by the discharger. + // If empty, an anonymous user is returned. + username string + // groups holds group names the user is member of, as returned by the + // discharger. + groups []string + // writePerm stores a list of users with write permissions. + writePerm []string + // expectStatus is the expected HTTP response status. + // Defaults to 200 status OK. + expectStatus int + // expectBody holds the expected body of the HTTP response. If nil, + // the body is not checked and the response is assumed to be ok. + expectBody interface{} +}{{ + about: "anonymous users are not authorized", + writePerm: []string{"who"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: "unauthorized: no username declared", + }, +}, { + about: "specific user authorized to write", + username: "dalek", + writePerm: []string{"dalek"}, +}, { + about: "multiple users authorized", + username: "sisko", + writePerm: []string{"kirk", "picard", "sisko"}, +}, { + about: "no users authorized", + username: "who", + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "who"`, + }, +}, { + about: "specific user unauthorized", + username: "kirk", + writePerm: []string{"picard", "sisko", "janeway"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "kirk"`, + }, +}, { + about: "access granted for group", + username: "picard", + groups: []string{"group1", "group2"}, + writePerm: []string{"group2"}, +}, { + about: "multiple groups authorized", + username: "picard", + groups: []string{"group1", "group2"}, + writePerm: []string{"kirk", "group0", "group1", "group2"}, +}, { + about: "no group authorized", + username: "picard", + groups: []string{"group1", "group2"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "picard"`, + }, +}, { + about: "access denied for group", + username: "kirk", + groups: []string{"group1", "group2", "group3"}, + writePerm: []string{"picard", "sisko", "group42", "group47"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "kirk"`, + }, +}} + +func (s *authSuite) TestWriteAuthorization(c *gc.C) { + for i, test := range writeAuthorizationTests { + c.Logf("test %d: %s", i, test.about) + + s.discharge = dischargeForUser(test.username) + s.idM.groups = map[string][]string{ + test.username: test.groups, + } + + // Add a charm to the store, used for testing. + rurl := newResolvedURL("~charmers/utopic/wordpress-42", -1) + err := s.store.AddCharmWithArchive(rurl, storetesting.Charms.CharmDir("wordpress")) + c.Assert(err, gc.IsNil) + + // Change the ACLs for the testing charm. + // (both published and development versions). + err = s.store.SetPerms(&rurl.URL, "write", test.writePerm...) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(rurl.URL.WithChannel(charm.DevelopmentChannel), "write", test.writePerm...) + c.Assert(err, gc.IsNil) + + makeRequest := func(path string, expectStatus int, expectBody interface{}) { + client := httpbakery.NewHTTPClient() + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + Do: bakeryDo(client), + URL: storeURL(path), + Method: "PUT", + Header: http.Header{"Content-Type": {"application/json"}}, + Body: strings.NewReader("42"), + }) + if expectStatus == 0 { + expectStatus = http.StatusOK + } + c.Assert(rec.Code, gc.Equals, expectStatus, gc.Commentf("body: %s", rec.Body)) + if expectBody != nil { + c.Assert(rec.Body.String(), jc.JSONEquals, expectBody) + } + } + + // Perform a meta PUT request to the published and development URLs. + makeRequest("~charmers/wordpress/meta/extra-info/key", test.expectStatus, test.expectBody) + makeRequest("~charmers/development/wordpress/meta/extra-info/key", test.expectStatus, test.expectBody) + + // Remove permissions to write on the published entity. + err = s.store.SetPerms(&rurl.URL, "write") + c.Assert(err, gc.IsNil) + + // Check that now writing to the published charm is not allowed, + // but accessing the development charm still works as expected. + makeRequest("~charmers/wordpress/meta/extra-info/key", http.StatusUnauthorized, nil) + makeRequest("~charmers/development/wordpress/meta/extra-info/key", test.expectStatus, test.expectBody) + + // Remove write permissions for the development charm as well. + err = s.store.SetPerms(rurl.URL.WithChannel(charm.DevelopmentChannel), "write") + c.Assert(err, gc.IsNil) + + // Check that now modifying the development charm is also denied. + makeRequest("~charmers/development/wordpress/meta/extra-info/key", http.StatusUnauthorized, nil) + + // Remove all entities from the store. + _, err = s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + } +} + +var uploadEntityAuthorizationTests = []struct { + // about holds the test description. + about string + // username holds the authenticated user name returned by the discharger. + // If empty, an anonymous user is returned. + username string + // groups holds group names the user is member of, as returned by the + // discharger. + groups []string + // id holds the id of the entity to be uploaded. + id string + // promulgated holds whether the corresponding promulgated entity must be + // already present in the charm store before performing the upload. + promulgated bool + // developmentWriteAcls can be used to set customized write ACLs for the + // development entity before performing the upload. If empty, default ACLs + // are used. + developmentWriteAcls []string + // writeAcls can be used to set customized write ACLs for the published + // entity before performing the upload. If empty, default ACLs are used. + writeAcls []string + // expectStatus is the expected HTTP response status. + // Defaults to 200 status OK. + expectStatus int + // expectBody holds the expected body of the HTTP response. If nil, + // the body is not checked and the response is assumed to be ok. + expectBody interface{} +}{{ + about: "user owned entity", + username: "who", + id: "~who/utopic/django", +}, { + about: "user owned development entity", + username: "who", + id: "~who/development/utopic/django", +}, { + about: "group owned entity", + username: "dalek", + groups: []string{"group1", "group2"}, + id: "~group1/utopic/django", +}, { + about: "group owned development entity", + username: "dalek", + groups: []string{"group1", "group2"}, + id: "~group1/development/utopic/django", +}, { + about: "specific group", + username: "dalek", + groups: []string{"group42"}, + id: "~group42/utopic/django", +}, { + about: "promulgated entity", + username: "sisko", + groups: []string{"charmers", "group2"}, + id: "~charmers/utopic/django", + promulgated: true, +}, { + about: "promulgated entity in development", + username: "sisko", + groups: []string{"group1", "charmers"}, + id: "~charmers/development/utopic/django", + promulgated: true, +}, { + about: "unauthorized: promulgated entity", + username: "sisko", + groups: []string{"group1", "group2"}, + id: "~charmers/utopic/django", + promulgated: true, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "sisko"`, + }, +}, { + about: "unauthorized: promulgated entity in development", + username: "sisko", + groups: []string{"group1", "group2"}, + id: "~charmers/development/utopic/django", + promulgated: true, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "sisko"`, + }, +}, { + about: "unauthorized: anonymous user", + id: "~who/utopic/django", + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: "unauthorized: no username declared", + }, +}, { + about: "unauthorized: anonymous user, development entity", + id: "~who/development/utopic/django", + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: "unauthorized: no username declared", + }, +}, { + about: "unauthorized: anonymous user and promulgated entity", + id: "~charmers/utopic/django", + promulgated: true, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: "unauthorized: no username declared", + }, +}, { + about: "unauthorized: anonymous user and promulgated entity in development", + id: "~charmers/development/utopic/django", + promulgated: true, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: "unauthorized: no username declared", + }, +}, { + about: "unauthorized: user does not match", + username: "kirk", + id: "~picard/utopic/django", + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "kirk"`, + }, +}, { + about: "unauthorized: user does not match for a development entity", + username: "kirk", + id: "~picard/development/utopic/django", + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "kirk"`, + }, +}, { + about: "unauthorized: group does not match", + username: "kirk", + groups: []string{"group1", "group2", "group3"}, + id: "~group0/utopic/django", + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "kirk"`, + }, +}, { + about: "unauthorized: group does not match for a development entity", + username: "kirk", + groups: []string{"group1", "group2", "group3"}, + id: "~group0/development/utopic/django", + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "kirk"`, + }, +}, { + about: "unauthorized: specific group and promulgated entity", + username: "janeway", + groups: []string{"group1"}, + id: "~charmers/utopic/django", + promulgated: true, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "janeway"`, + }, +}, { + about: "unauthorized: specific group and promulgated entity in development", + username: "janeway", + groups: []string{"group1"}, + id: "~charmers/development/utopic/django", + promulgated: true, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "janeway"`, + }, +}, { + about: "unauthorized: published entity no development permissions", + username: "picard", + id: "~picard/wily/django", + developmentWriteAcls: []string{"group2"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "picard"`, + }, +}, { + about: "unauthorized: published entity no published permissions", + username: "picard", + id: "~picard/wily/django", + writeAcls: []string{"kirk"}, + expectStatus: http.StatusUnauthorized, + expectBody: params.Error{ + Code: params.ErrUnauthorized, + Message: `unauthorized: access denied for user "picard"`, + }, +}} + +func (s *authSuite) TestUploadEntityAuthorization(c *gc.C) { + for i, test := range uploadEntityAuthorizationTests { + c.Logf("test %d: %s", i, test.about) + + s.discharge = dischargeForUser(test.username) + s.idM.groups = map[string][]string{ + test.username: test.groups, + } + + // Prepare the expected status. + expectStatus := test.expectStatus + if expectStatus == 0 { + expectStatus = http.StatusOK + } + + // Add a pre-existing entity if required. + if test.promulgated || len(test.developmentWriteAcls) != 0 || len(test.writeAcls) != 0 { + id := charm.MustParseURL(test.id).WithRevision(0) + revision := -1 + if test.promulgated { + revision = 1 + } + rurl := newResolvedURL(id.String(), revision) + s.store.AddCharmWithArchive(rurl, storetesting.Charms.CharmArchive(c.MkDir(), "mysql")) + if len(test.developmentWriteAcls) != 0 { + s.store.SetPerms(rurl.URL.WithChannel(charm.DevelopmentChannel), "write", test.developmentWriteAcls...) + } + if len(test.writeAcls) != 0 { + s.store.SetPerms(&rurl.URL, "write", test.writeAcls...) + } + } + + // Try to upload the entity. + body, hash, size := archiveInfo(c, "wordpress") + defer body.Close() + client := httpbakery.NewHTTPClient() + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + Do: bakeryDo(client), + URL: storeURL(test.id + "/archive?hash=" + hash), + Method: "POST", + ContentLength: size, + Header: http.Header{ + "Content-Type": {"application/zip"}, + }, + Body: body, + }) + c.Assert(rec.Code, gc.Equals, expectStatus, gc.Commentf("body: %s", rec.Body)) + if test.expectBody != nil { + c.Assert(rec.Body.String(), jc.JSONEquals, test.expectBody) + } + + // Remove all entities from the store. + _, err := s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + _, err = s.store.DB.BaseEntities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + } +} + +type readSeekCloser interface { + io.ReadCloser + io.Seeker +} + +// archiveInfo prepares a zip archive of an entity and return a reader for the +// archive, its blob hash and size. +func archiveInfo(c *gc.C, name string) (r readSeekCloser, hashSum string, size int64) { + ch := storetesting.Charms.CharmArchive(c.MkDir(), name) + f, err := os.Open(ch.Path) + c.Assert(err, gc.IsNil) + hash, size := hashOf(f) + _, err = f.Seek(0, 0) + c.Assert(err, gc.IsNil) + return f, hash, size +} + +var isEntityCaveatTests = []struct { + url string + expectError string +}{{ + url: "~charmers/utopic/wordpress-42/archive", +}, { + url: "~charmers/utopic/wordpress-42/meta/hash", +}, { + url: "wordpress/archive", +}, { + url: "wordpress/meta/hash", +}, { + url: "utopic/wordpress-10/archive", +}, { + url: "utopic/wordpress-10/meta/hash", +}, { + url: "~charmers/utopic/wordpress-41/archive", + expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation on entity cs:~charmers/utopic/wordpress-41 not allowed`, +}, { + url: "~charmers/utopic/wordpress-41/meta/hash", + expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation on entity cs:~charmers/utopic/wordpress-41 not allowed`, +}, { + url: "utopic/wordpress-9/archive", + expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation on entity cs:utopic/wordpress-9 not allowed`, +}, { + url: "utopic/wordpress-9/meta/hash", + expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation on entity cs:utopic/wordpress-9 not allowed`, +}, { + url: "log", + expectError: `verification failed: caveat "is-entity cs:~charmers/utopic/wordpress-42" not satisfied: operation does not involve any of the allowed entities cs:~charmers/utopic/wordpress-42`, +}} + +func (s *authSuite) TestIsEntityCaveat(c *gc.C) { + s.discharge = func(_, _ string) ([]checkers.Caveat, error) { + return []checkers.Caveat{{ + Condition: "is-entity cs:~charmers/utopic/wordpress-42", + }, + checkers.DeclaredCaveat(v5.UsernameAttr, "bob"), + }, nil + } + + // Add a charm to the store, used for testing. + err := s.store.AddCharmWithArchive( + newResolvedURL("~charmers/utopic/wordpress-41", 9), + storetesting.Charms.CharmDir("wordpress")) + c.Assert(err, gc.IsNil) + err = s.store.AddCharmWithArchive( + newResolvedURL("~charmers/utopic/wordpress-42", 10), + storetesting.Charms.CharmDir("wordpress")) + c.Assert(err, gc.IsNil) + // Change the ACLs for the testing charm. + err = s.store.SetPerms(charm.MustParseURL("cs:~charmers/wordpress"), "read", "bob") + c.Assert(err, gc.IsNil) + + for i, test := range isEntityCaveatTests { + c.Logf("test %d: %s", i, test.url) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + Do: bakeryDo(nil), + URL: storeURL(test.url), + Method: "GET", + }) + if test.expectError != "" { + c.Assert(rec.Code, gc.Equals, http.StatusUnauthorized) + var respErr httpbakery.Error + err := json.Unmarshal(rec.Body.Bytes(), &respErr) + c.Assert(err, gc.IsNil) + c.Assert(respErr.Message, gc.Matches, test.expectError) + continue + } + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.Bytes())) + } +} + +func (s *authSuite) TestDelegatableMacaroon(c *gc.C) { + // Create a new server with a third party discharger. + s.discharge = dischargeForUser("bob") + + // First check that we get a macaraq error when using a vanilla http do + // request with both bakery protocol. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("delegatable-macaroon"), + Header: http.Header{"Bakery-Protocol-Version": {"1"}}, + ExpectBody: httptesting.BodyAsserter(func(c *gc.C, m json.RawMessage) { + // Allow any body - the next check will check that it's a valid macaroon. + }), + ExpectStatus: http.StatusUnauthorized, + }) + + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("delegatable-macaroon"), + ExpectBody: httptesting.BodyAsserter(func(c *gc.C, m json.RawMessage) { + // Allow any body - the next check will check that it's a valid macaroon. + }), + ExpectStatus: http.StatusProxyAuthRequired, + }) + + client := httpbakery.NewHTTPClient() + + now := time.Now() + var gotBody json.RawMessage + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("delegatable-macaroon"), + ExpectBody: httptesting.BodyAsserter(func(c *gc.C, m json.RawMessage) { + gotBody = m + }), + Do: bakeryDo(client), + ExpectStatus: http.StatusOK, + }) + + c.Assert(gotBody, gc.NotNil) + var m macaroon.Macaroon + err := json.Unmarshal(gotBody, &m) + c.Assert(err, gc.IsNil) + + caveats := m.Caveats() + foundExpiry := false + for _, cav := range caveats { + cond, arg, err := checkers.ParseCaveat(cav.Id) + c.Assert(err, gc.IsNil) + switch cond { + case checkers.CondTimeBefore: + t, err := time.Parse(time.RFC3339Nano, arg) + c.Assert(err, gc.IsNil) + c.Assert(t, jc.TimeBetween(now.Add(v5.DelegatableMacaroonExpiry), now.Add(v5.DelegatableMacaroonExpiry+time.Second))) + foundExpiry = true + } + } + c.Assert(foundExpiry, jc.IsTrue) + + // Now check that we can use the obtained macaroon to do stuff + // as the declared user. + + err = s.store.AddCharmWithArchive( + newResolvedURL("~charmers/utopic/wordpress-41", 9), + storetesting.Charms.CharmDir("wordpress")) + c.Assert(err, gc.IsNil) + // Change the ACLs for the testing charm. + err = s.store.SetPerms(charm.MustParseURL("cs:~charmers/wordpress"), "read", "bob") + c.Assert(err, gc.IsNil) + + // First check that we require authorization to access the charm. + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("~charmers/utopic/wordpress/meta/id-name"), + Method: "GET", + }) + c.Assert(rec.Code, gc.Equals, http.StatusProxyAuthRequired) + + // Then check that the request succeeds if we provide the delegatable + // macaroon. + + client = httpbakery.NewHTTPClient() + u, err := url.Parse("http://127.0.0.1") + c.Assert(err, gc.IsNil) + err = httpbakery.SetCookie(client.Jar, u, macaroon.Slice{&m}) + c.Assert(err, gc.IsNil) + + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("~charmers/utopic/wordpress/meta/id-name"), + ExpectBody: params.IdNameResponse{ + Name: "wordpress", + }, + + ExpectStatus: http.StatusOK, + Do: bakeryDo(client), + }) +} + +func (s *authSuite) TestDelegatableMacaroonWithBasicAuth(c *gc.C) { + // First check that we get a macaraq error when using a vanilla http do + // request. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + Username: testUsername, + Password: testPassword, + URL: storeURL("delegatable-macaroon"), + ExpectBody: params.Error{ + Code: params.ErrForbidden, + Message: "delegatable macaroon is not obtainable using admin credentials", + }, + ExpectStatus: http.StatusForbidden, + }) +} + +func (s *authSuite) TestGroupsForUserSuccess(c *gc.C) { + h := s.handler(c) + defer h.Close() + s.idM.groups = map[string][]string{ + "bob": {"one", "two"}, + } + groups, err := v5.GroupsForUser(h, "bob") + c.Assert(err, gc.IsNil) + c.Assert(groups, jc.DeepEquals, []string{"one", "two"}) +} + +func (s *authSuite) TestGroupsForUserWithNoIdentity(c *gc.C) { + h := s.handler(c) + defer h.Close() + groups, err := v5.GroupsForUser(h, "someone") + c.Assert(err, gc.IsNil) + c.Assert(groups, gc.HasLen, 0) +} + +func (s *authSuite) TestGroupsForUserWithInvalidIdentityURL(c *gc.C) { + s.PatchValue(&s.srvParams.IdentityAPIURL, ":::::") + h := s.handler(c) + defer h.Close() + groups, err := v5.GroupsForUser(h, "someone") + c.Assert(err, gc.ErrorMatches, `cannot get groups for someone: cannot GET \"/v1/u/someone/groups\": cannot create request for \":::::/v1/u/someone/groups\": parse :::::/v1/u/someone/groups: missing protocol scheme`) + c.Assert(groups, gc.HasLen, 0) +} + +func (s *authSuite) TestGroupsForUserWithInvalidBody(c *gc.C) { + h := s.handler(c) + defer h.Close() + s.idM.body = "bad" + s.idM.contentType = "application/json" + groups, err := v5.GroupsForUser(h, "someone") + c.Assert(err, gc.ErrorMatches, `cannot get groups for someone: cannot unmarshal response: invalid character 'b' looking for beginning of value`) + c.Assert(groups, gc.HasLen, 0) +} + +func (s *authSuite) TestGroupsForUserWithErrorResponse(c *gc.C) { + h := s.handler(c) + defer h.Close() + s.idM.body = `{"message":"some error","code":"some code"}` + s.idM.status = http.StatusUnauthorized + s.idM.contentType = "application/json" + groups, err := v5.GroupsForUser(h, "someone") + c.Assert(err, gc.ErrorMatches, `cannot get groups for someone: some error`) + c.Assert(groups, gc.HasLen, 0) +} + +func (s *authSuite) TestGroupsForUserWithBadErrorResponse(c *gc.C) { + h := s.handler(c) + defer h.Close() + s.idM.body = `{"message":"some error"` + s.idM.status = http.StatusUnauthorized + s.idM.contentType = "application/json" + groups, err := v5.GroupsForUser(h, "someone") + c.Assert(err, gc.ErrorMatches, `cannot get groups for someone: bad status "401 Unauthorized"`) + c.Assert(groups, gc.HasLen, 0) +} + +type errorTransport string + +func (e errorTransport) RoundTrip(*http.Request) (*http.Response, error) { + return nil, errgo.New(string(e)) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/common_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/common_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/common_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,296 @@ +package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "time" + + "github.com/juju/loggo" + jujutesting "github.com/juju/testing" + "github.com/julienschmidt/httprouter" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/bakerytest" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/mgo.v2" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/v5" +) + +var mgoLogger = loggo.GetLogger("mgo") + +func init() { + mgo.SetLogger(mgoLog{}) +} + +type mgoLog struct{} + +func (mgoLog) Output(calldepth int, s string) error { + mgoLogger.LogCallf(calldepth+1, loggo.INFO, "%s", s) + return nil +} + +type commonSuite struct { + jujutesting.IsolatedMgoSuite + + // srv holds the store HTTP handler. + srv *charmstore.Server + + // srvParams holds the parameters that the + // srv handler was started with + srvParams charmstore.ServerParams + + // noMacaroonSrv holds the store HTTP handler + // for an instance of the store without identity + // enabled. If enableIdentity is false, this is + // the same as srv. + noMacaroonSrv *charmstore.Server + + // noMacaroonSrvParams holds the parameters that the + // noMacaroonSrv handler was started with + noMacaroonSrvParams charmstore.ServerParams + + // store holds an instance of *charm.Store + // that can be used to access the charmstore database + // directly. + store *charmstore.Store + + // esSuite is set only when enableES is set to true. + esSuite *storetesting.ElasticSearchSuite + + // discharge holds the function that will be used + // to check third party caveats by the mock + // discharger. This will be ignored if enableIdentity was + // not true before commonSuite.SetUpTest is invoked. + // + // It may be set by tests to influence the behavior of the + // discharger. + discharge func(cav, arg string) ([]checkers.Caveat, error) + + discharger *bakerytest.Discharger + idM *idM + idMServer *httptest.Server + + dischargeTerms func(cav, arg string) ([]checkers.Caveat, error) + termsDischarger *bakerytest.Discharger + enableTerms bool + + // The following fields may be set before + // SetUpSuite is invoked on commonSuite + // and influences how the suite sets itself up. + + // enableIdentity holds whether the charmstore server + // will be started with a configured identity service. + enableIdentity bool + + // enableES holds whether the charmstore server will be + // started with Elastic Search enabled. + enableES bool + + // maxMgoSessions specifies the value that will be given + // to config.MaxMgoSessions when calling charmstore.NewServer. + maxMgoSessions int +} + +func (s *commonSuite) SetUpSuite(c *gc.C) { + s.IsolatedMgoSuite.SetUpSuite(c) + if s.enableES { + s.esSuite = new(storetesting.ElasticSearchSuite) + s.esSuite.SetUpSuite(c) + } +} + +func (s *commonSuite) TearDownSuite(c *gc.C) { + if s.esSuite != nil { + s.esSuite.TearDownSuite(c) + } +} + +func (s *commonSuite) SetUpTest(c *gc.C) { + s.IsolatedMgoSuite.SetUpTest(c) + if s.esSuite != nil { + s.esSuite.SetUpTest(c) + } + if s.enableIdentity { + s.idM = newIdM() + s.idMServer = httptest.NewServer(s.idM) + } + s.startServer(c) +} + +func (s *commonSuite) TearDownTest(c *gc.C) { + s.store.Pool().Close() + s.store.Close() + s.srv.Close() + s.noMacaroonSrv.Close() + if s.esSuite != nil { + s.esSuite.TearDownTest(c) + } + if s.discharger != nil { + s.discharger.Close() + s.idMServer.Close() + } + if s.termsDischarger != nil { + s.termsDischarger.Close() + } + s.IsolatedMgoSuite.TearDownTest(c) +} + +// startServer creates a new charmstore server. +func (s *commonSuite) startServer(c *gc.C) { + config := charmstore.ServerParams{ + AuthUsername: testUsername, + AuthPassword: testPassword, + StatsCacheMaxAge: time.Nanosecond, + MaxMgoSessions: s.maxMgoSessions, + } + keyring := bakery.NewPublicKeyRing() + if s.enableIdentity { + s.discharge = func(_, _ string) ([]checkers.Caveat, error) { + return nil, errgo.New("no discharge") + } + discharger := bakerytest.NewDischarger(nil, func(_ *http.Request, cond string, arg string) ([]checkers.Caveat, error) { + return s.discharge(cond, arg) + }) + config.IdentityLocation = discharger.Location() + config.IdentityAPIURL = s.idMServer.URL + pk, err := httpbakery.PublicKeyForLocation(http.DefaultClient, discharger.Location()) + c.Assert(err, gc.IsNil) + err = keyring.AddPublicKeyForLocation(discharger.Location(), true, pk) + c.Assert(err, gc.IsNil) + } + if s.enableTerms { + s.dischargeTerms = func(_, _ string) ([]checkers.Caveat, error) { + return nil, errgo.New("no discharge") + } + termsDischarger := bakerytest.NewDischarger(nil, func(_ *http.Request, cond string, arg string) ([]checkers.Caveat, error) { + return s.dischargeTerms(cond, arg) + }) + config.TermsLocation = termsDischarger.Location() + pk, err := httpbakery.PublicKeyForLocation(http.DefaultClient, termsDischarger.Location()) + c.Assert(err, gc.IsNil) + err = keyring.AddPublicKeyForLocation(termsDischarger.Location(), true, pk) + c.Assert(err, gc.IsNil) + } + config.PublicKeyLocator = keyring + var si *charmstore.SearchIndex + if s.enableES { + si = &charmstore.SearchIndex{ + Database: s.esSuite.ES, + Index: s.esSuite.TestIndex, + } + } + db := s.Session.DB("charmstore") + var err error + s.srv, err = charmstore.NewServer(db, si, config, map[string]charmstore.NewAPIHandlerFunc{"v4": v5.NewAPIHandler}) + c.Assert(err, gc.IsNil) + s.srvParams = config + + if s.enableIdentity { + config.IdentityLocation = "" + config.PublicKeyLocator = nil + config.IdentityAPIURL = "" + s.noMacaroonSrv, err = charmstore.NewServer(db, si, config, map[string]charmstore.NewAPIHandlerFunc{"v4": v5.NewAPIHandler}) + c.Assert(err, gc.IsNil) + } else { + s.noMacaroonSrv = s.srv + } + s.noMacaroonSrvParams = config + s.store = s.srv.Pool().Store() +} + +// handler returns a request handler that can be +// used to invoke private methods. The caller +// is responsible for calling Put on the returned handler. +func (s *commonSuite) handler(c *gc.C) *v5.ReqHandler { + h := v5.New(s.store.Pool(), s.srvParams) + defer h.Close() + rh, err := h.NewReqHandler() + c.Assert(err, gc.IsNil) + // It would be nice if we could call s.AddCleanup here + // to call rh.Put when the test has completed, but + // unfortunately CleanupSuite.TearDownTest runs + // after MgoSuite.TearDownTest, so that's not an option. + return rh +} + +func storeURL(path string) string { + return "/v4/" + path +} + +func bakeryDo(client *http.Client) func(*http.Request) (*http.Response, error) { + if client == nil { + client = httpbakery.NewHTTPClient() + } + bclient := httpbakery.NewClient() + bclient.Client = client + return func(req *http.Request) (*http.Response, error) { + if req.Body != nil { + body := req.Body.(io.ReadSeeker) + req.Body = nil + return bclient.DoWithBody(req, body) + } + return bclient.Do(req) + } +} + +type idM struct { + // groups may be set to determine the mapping + // from user to groups for that user. + groups map[string][]string + + // body may be set to cause serveGroups to return + // an arbitrary HTTP response body. + body string + + // contentType is the contentType to use when body is not "" + contentType string + + // status may be set to indicate the HTTP status code + // when body is not nil. + status int + + router *httprouter.Router +} + +func newIdM() *idM { + idM := &idM{ + groups: make(map[string][]string), + router: httprouter.New(), + } + idM.router.GET("/v1/u/:user/groups", idM.serveGroups) + idM.router.GET("/v1/u/:user/idpgroups", idM.serveGroups) + return idM +} + +func (idM *idM) ServeHTTP(w http.ResponseWriter, req *http.Request) { + idM.router.ServeHTTP(w, req) +} + +func (idM *idM) serveGroups(w http.ResponseWriter, req *http.Request, p httprouter.Params) { + if idM.body != "" { + if idM.contentType != "" { + w.Header().Set("Content-Type", idM.contentType) + } + if idM.status != 0 { + w.WriteHeader(idM.status) + } + w.Write([]byte(idM.body)) + return + } + u := p.ByName("user") + if u == "" { + panic("no user") + } + w.Header().Set("Content-Type", "application/json") + enc := json.NewEncoder(w) + if err := enc.Encode(idM.groups[u]); err != nil { + panic(err) + } +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/content.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/content.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/content.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,236 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "archive/zip" + "bytes" + "fmt" + "io" + "net/http" + "path" + "strings" + + "github.com/juju/xml" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/juju/jujusvg.v1" + + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +// GET id/diagram.svg +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-iddiagramsvg +func (h *ReqHandler) serveDiagram(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { + if id.URL.Series != "bundle" { + return errgo.WithCausef(nil, params.ErrNotFound, "diagrams not supported for charms") + } + entity, err := h.Store.FindEntity(id, "bundledata") + if err != nil { + return errgo.Mask(err, errgo.Is(params.ErrNotFound)) + } + + var urlErr error + // TODO consider what happens when a charm's SVG does not exist. + canvas, err := jujusvg.NewFromBundle(entity.BundleData, func(id *charm.URL) string { + // TODO change jujusvg so that the iconURL function can + // return an error. + absPath := "/" + id.Path() + "/icon.svg" + p, err := router.RelativeURLPath(req.RequestURI, absPath) + if err != nil { + urlErr = errgo.Notef(err, "cannot make relative URL from %q and %q", req.RequestURI, absPath) + } + return p + }, nil) + if err != nil { + return errgo.Notef(err, "cannot create canvas") + } + if urlErr != nil { + return urlErr + } + setArchiveCacheControl(w.Header(), h.isPublic(id.URL)) + w.Header().Set("Content-Type", "image/svg+xml") + canvas.Marshal(w) + return nil +} + +// These are all forms of README files +// actually observed in charms in the wild. +var allowedReadMe = map[string]bool{ + "readme": true, + "readme.md": true, + "readme.rst": true, + "readme.ex": true, + "readme.markdown": true, + "readme.txt": true, +} + +// GET id/readme +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idreadme +func (h *ReqHandler) serveReadMe(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { + entity, err := h.Store.FindEntity(id, "_id", "contents", "blobname") + if err != nil { + return errgo.NoteMask(err, "cannot get README", errgo.Is(params.ErrNotFound)) + } + isReadMeFile := func(f *zip.File) bool { + name := strings.ToLower(path.Clean(f.Name)) + // This is the same condition currently used by the GUI. + // TODO propagate likely content type from file extension. + return allowedReadMe[name] + } + r, err := h.Store.OpenCachedBlobFile(entity, mongodoc.FileReadMe, isReadMeFile) + if err != nil { + return errgo.Mask(err, errgo.Is(params.ErrNotFound)) + } + defer r.Close() + setArchiveCacheControl(w.Header(), h.isPublic(id.URL)) + io.Copy(w, r) + return nil +} + +// GET id/icon.svg +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idiconsvg +func (h *ReqHandler) serveIcon(id *router.ResolvedURL, w http.ResponseWriter, req *http.Request) error { + if id.URL.Series == "bundle" { + return errgo.WithCausef(nil, params.ErrNotFound, "icons not supported for bundles") + } + entity, err := h.Store.FindEntity(id, "_id", "contents", "blobname") + if err != nil { + return errgo.NoteMask(err, "cannot get icon", errgo.Is(params.ErrNotFound)) + } + isIconFile := func(f *zip.File) bool { + return path.Clean(f.Name) == "icon.svg" + } + r, err := h.Store.OpenCachedBlobFile(entity, mongodoc.FileIcon, isIconFile) + if err != nil { + logger.Errorf("cannot open icon.svg file for %v: %v", id, err) + if errgo.Cause(err) != params.ErrNotFound { + return errgo.Mask(err) + } + setArchiveCacheControl(w.Header(), h.isPublic(id.URL)) + w.Header().Set("Content-Type", "image/svg+xml") + io.Copy(w, strings.NewReader(DefaultIcon)) + return nil + } + defer r.Close() + w.Header().Set("Content-Type", "image/svg+xml") + setArchiveCacheControl(w.Header(), h.isPublic(id.URL)) + if err := processIcon(w, r); err != nil { + if errgo.Cause(err) == errProbablyNotXML { + logger.Errorf("cannot process icon.svg from %s: %v", id, err) + io.Copy(w, strings.NewReader(DefaultIcon)) + return nil + } + return errgo.Mask(err) + } + return nil +} + +var errProbablyNotXML = errgo.New("probably not XML") + +const svgNamespace = "http://www.w3.org/2000/svg" + +// processIcon reads an icon SVG from r and writes +// it to w, making any changes that need to be made. +// Currently it adds a viewBox attribute to the +// element if necessary. +// If there is an error processing the XML before +// the first token has been written, it returns an error +// with errProbablyNotXML as the cause. +func processIcon(w io.Writer, r io.Reader) error { + // Arrange to save all the content that we find up + // until the first element. Then we'll stitch it + // back together again for the actual processing. + var saved bytes.Buffer + dec := xml.NewDecoder(io.TeeReader(r, &saved)) + dec.DefaultSpace = svgNamespace + found, changed := false, false + for !found { + tok, err := dec.Token() + if err == io.EOF { + break + } + if err != nil { + return errgo.WithCausef(err, errProbablyNotXML, "") + } + _, found, changed = ensureViewbox(tok) + } + if !found { + return errgo.WithCausef(nil, errProbablyNotXML, "no element found") + } + // Stitch the input back together again so we can + // write the output without buffering it in memory. + r = io.MultiReader(&saved, r) + if !found || !changed { + _, err := io.Copy(w, r) + return err + } + return processNaive(w, r) +} + +// processNaive is like processIcon but processes all of the +// XML elements. It does not return errProbablyNotXML +// on error because it may have written arbitrary XML +// to w, at which point writing an alternative response would +// be unwise. +func processNaive(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + dec.DefaultSpace = svgNamespace + enc := xml.NewEncoder(w) + found := false + for { + tok, err := dec.Token() + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("failed to read token: %v", err) + } + if !found { + tok, found, _ = ensureViewbox(tok) + } + if err := enc.EncodeToken(tok); err != nil { + return fmt.Errorf("cannot encode token %#v: %v", tok, err) + } + } + if err := enc.Flush(); err != nil { + return fmt.Errorf("cannot flush output: %v", err) + } + return nil +} + +func ensureViewbox(tok0 xml.Token) (_ xml.Token, found, changed bool) { + tok, ok := tok0.(xml.StartElement) + if !ok || tok.Name.Space != svgNamespace || tok.Name.Local != "svg" { + return tok0, false, false + } + var width, height string + for _, attr := range tok.Attr { + if attr.Name.Space != "" { + continue + } + switch attr.Name.Local { + case "width": + width = attr.Value + case "height": + height = attr.Value + case "viewBox": + return tok, true, false + } + } + if width == "" || height == "" { + // Width and/or height have not been specified, + // so leave viewbox unspecified too. + return tok, true, false + } + tok.Attr = append(tok.Attr, xml.Attr{ + Name: xml.Name{ + Local: "viewBox", + }, + Value: fmt.Sprintf("0 0 %s %s", width, height), + }) + return tok, true, true +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/content_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/content_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/content_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,505 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "path/filepath" + "sort" + "strings" + + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + "github.com/juju/xml" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/v5" +) + +var serveDiagramErrorsTests = []struct { + about string + url string + expectStatus int + expectBody interface{} +}{{ + about: "entity not found", + url: "~charmers/bundle/foo-23/diagram.svg", + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: `no matching charm or bundle for "cs:~charmers/bundle/foo-23"`, + }, +}, { + about: "diagram for a charm", + url: "~charmers/wordpress/diagram.svg", + expectStatus: http.StatusNotFound, + expectBody: params.Error{ + Code: params.ErrNotFound, + Message: "diagrams not supported for charms", + }, +}} + +func (s *APISuite) TestServeDiagramErrors(c *gc.C) { + id := newResolvedURL("cs:~charmers/trusty/wordpress-42", 42) + s.addPublicCharm(c, "wordpress", id) + id = newResolvedURL("cs:~charmers/bundle/nopositionbundle-42", 42) + s.addPublicBundle(c, "wordpress-simple", id) + + for i, test := range serveDiagramErrorsTests { + c.Logf("test %d: %s", i, test.about) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.url), + ExpectStatus: test.expectStatus, + ExpectBody: test.expectBody, + }) + } +} + +func (s *APISuite) TestServeDiagram(c *gc.C) { + bundle := &testingBundle{ + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "wordpress": { + Charm: "wordpress", + Annotations: map[string]string{ + "gui-x": "100", + "gui-y": "200", + }, + }, + "mysql": { + Charm: "utopic/mysql-23", + Annotations: map[string]string{ + "gui-x": "200", + "gui-y": "200", + }, + }, + }, + }, + } + + url := newResolvedURL("cs:~charmers/bundle/wordpressbundle-42", 42) + err := s.store.AddBundle(bundle, charmstore.AddParams{ + URL: url, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("bundle/wordpressbundle/diagram.svg"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %q", rec.Body.Bytes())) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") + assertCacheControl(c, rec.Header(), true) + + // Check that the output contains valid XML with an SVG tag, + // but don't check the details of the output so that this test doesn't + // break every time the jujusvg presentation changes. + // Also check that we get an image for each service containing the charm + // icon link. + assertXMLContains(c, rec.Body.Bytes(), map[string]func(xml.Token) bool{ + "svg element": isStartElementWithName("svg"), + "wordpress icon": isStartElementWithAttr("image", "href", "../../wordpress/icon.svg"), + "mysql icon": isStartElementWithAttr("image", "href", "../../utopic/mysql-23/icon.svg"), + }) + + // Do the same check again, but with the short form of the id; + // the relative links should change accordingly. + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("wordpressbundle/diagram.svg"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %q", rec.Body.Bytes())) + + // Check that the output contains valid XML with an SVG tag, + // but don't check the details of the output so that this test doesn't + // break every time the jujusvg presentation changes. + // Also check that we get an image for each service containing the charm + // icon link. + assertXMLContains(c, rec.Body.Bytes(), map[string]func(xml.Token) bool{ + "svg element": isStartElementWithName("svg"), + "wordpress icon": isStartElementWithAttr("image", "href", "../wordpress/icon.svg"), + "mysql icon": isStartElementWithAttr("image", "href", "../utopic/mysql-23/icon.svg"), + }) +} + +func (s *APISuite) TestServeDiagramNoPosition(c *gc.C) { + bundle := &testingBundle{ + data: &charm.BundleData{ + Services: map[string]*charm.ServiceSpec{ + "wordpress": { + Charm: "wordpress", + }, + "mysql": { + Charm: "utopic/mysql-23", + Annotations: map[string]string{ + "gui-x": "200", + "gui-y": "200", + }, + }, + }, + }, + } + + url := newResolvedURL("cs:~charmers/bundle/wordpressbundle-42", 42) + err := s.store.AddBundle(bundle, charmstore.AddParams{ + URL: url, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("bundle/wordpressbundle/diagram.svg"), + }) + // Check that the request succeeds and has the expected content type. + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %q", rec.Body.Bytes())) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") +} + +var serveReadMeTests = []struct { + name string + expectNotFound bool +}{{ + name: "README.md", +}, { + name: "README.rst", +}, { + name: "readme", +}, { + name: "README", +}, { + name: "ReadMe.Txt", +}, { + name: "README.ex", +}, { + name: "", + expectNotFound: true, +}, { + name: "readme-youtube-subscribe.html", + expectNotFound: true, +}, { + name: "readme Dutch.txt", + expectNotFound: true, +}, { + name: "readme Dutch.txt", + expectNotFound: true, +}, { + name: "README.debugging", + expectNotFound: true, +}} + +func (s *APISuite) TestServeReadMe(c *gc.C) { + url := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) + for i, test := range serveReadMeTests { + c.Logf("test %d: %s", i, test.name) + wordpress := storetesting.Charms.ClonedDir(c.MkDir(), "wordpress") + content := fmt.Sprintf("some content %d", i) + if test.name != "" { + err := ioutil.WriteFile(filepath.Join(wordpress.Path, test.name), []byte(content), 0666) + c.Assert(err, gc.IsNil) + } + + url.URL.Revision = i + err := s.store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url.URL.Path() + "/readme"), + }) + if test.expectNotFound { + c.Assert(rec.Code, gc.Equals, http.StatusNotFound) + c.Assert(rec.Body.String(), jc.JSONEquals, params.Error{ + Code: params.ErrNotFound, + Message: "not found", + }) + } else { + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.DeepEquals, content) + assertCacheControl(c, rec.Header(), true) + } + } +} + +func charmWithExtraFile(c *gc.C, name, file, content string) *charm.CharmDir { + ch := storetesting.Charms.ClonedDir(c.MkDir(), name) + err := ioutil.WriteFile(filepath.Join(ch.Path, file), []byte(content), 0666) + c.Assert(err, gc.IsNil) + return ch +} + +func (s *APISuite) TestServeIcon(c *gc.C) { + content := `an icon, really` + expected := `an icon, really` + wordpress := charmWithExtraFile(c, "wordpress", "icon.svg", content) + + url := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) + err := s.store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url.URL.Path() + "/icon.svg"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.Equals, expected) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") + assertCacheControl(c, rec.Header(), true) + + // Test with revision -1 + noRevURL := url.URL + noRevURL.Revision = -1 + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(noRevURL.Path() + "/icon.svg"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.Equals, expected) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") + assertCacheControl(c, rec.Header(), true) + + // Reload the charm with an icon that already has viewBox. + wordpress = storetesting.Charms.ClonedDir(c.MkDir(), "wordpress") + err = ioutil.WriteFile(filepath.Join(wordpress.Path, "icon.svg"), []byte(expected), 0666) + c.Assert(err, gc.IsNil) + + url.URL.Revision++ + err = s.store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + + // Check that we still get expected svg. + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url.URL.Path() + "/icon.svg"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.Equals, expected) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") +} + +func (s *APISuite) TestServeBundleIcon(c *gc.C) { + s.addPublicBundle(c, "wordpress-simple", newResolvedURL("cs:~charmers/bundle/something-32", 32)) + + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("~charmers/bundle/something-32/icon.svg"), + ExpectStatus: http.StatusNotFound, + ExpectBody: params.Error{ + Code: params.ErrNotFound, + Message: "icons not supported for bundles", + }, + }) +} + +func (s *APISuite) TestServeDefaultIcon(c *gc.C) { + wordpress := storetesting.Charms.ClonedDir(c.MkDir(), "wordpress") + + url := newResolvedURL("cs:~charmers/precise/wordpress-0", 0) + err := s.store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url.URL.Path() + "/icon.svg"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.Equals, v5.DefaultIcon) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") + assertCacheControl(c, rec.Header(), true) +} + +func (s *APISuite) TestServeDefaultIconForBadXML(c *gc.C) { + + for i, content := range []string{ + "\x89\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44", + // Technically this XML is not bad - we just can't parse it because + // it's got internally defined character entities. Nonetheless, we treat + // it as "bad" for the time being. + cloudfoundrySVG, + } { + wordpress := charmWithExtraFile(c, "wordpress", "icon.svg", content) + + url := newResolvedURL("cs:~charmers/precise/wordpress-0", -1) + url.URL.Revision = i + err := s.store.AddCharmWithArchive(url, wordpress) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url.URL.Path() + "/icon.svg"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.Equals, v5.DefaultIcon) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "image/svg+xml") + assertCacheControl(c, rec.Header(), true) + } +} + +func (s *APISuite) TestProcessIconWorksOnDefaultIcon(c *gc.C) { + var buf bytes.Buffer + err := v5.ProcessIcon(&buf, strings.NewReader(v5.DefaultIcon)) + c.Assert(err, gc.IsNil) + assertXMLEqual(c, buf.Bytes(), []byte(v5.DefaultIcon)) +} + +func (s *APISuite) TestProcessIconDoesNotQuoteNewlines(c *gc.C) { + // Note: this is important because Chrome does not like + // to see before the opening tag. + icon := ` + + +` + var buf bytes.Buffer + err := v5.ProcessIcon(&buf, strings.NewReader(icon)) + c.Assert(err, gc.IsNil) + if strings.Contains(buf.String(), "&#x") { + c.Errorf("newlines were quoted in processed icon output") + } +} + +// assertXMLEqual assers that the xml contained in the +// two slices is equal, without caring about namespace +// declarations or attribute ordering. +func assertXMLEqual(c *gc.C, body []byte, expect []byte) { + decBody := xml.NewDecoder(bytes.NewReader(body)) + decExpect := xml.NewDecoder(bytes.NewReader(expect)) + for i := 0; ; i++ { + tok0, err0 := decBody.Token() + tok1, err1 := decExpect.Token() + if err1 != nil { + c.Assert(err0, gc.NotNil) + c.Assert(err0.Error(), gc.Equals, err1.Error()) + break + } + ok, err := tokenEqual(tok0, tok1) + if !ok { + c.Logf("got %#v", tok0) + c.Logf("want %#v", tok1) + c.Fatalf("mismatch at token %d: %v", i, err) + } + } +} + +func tokenEqual(tok0, tok1 xml.Token) (bool, error) { + tok0 = canonicalXMLToken(tok0) + tok1 = canonicalXMLToken(tok1) + return jc.DeepEqual(tok0, tok1) +} + +func canonicalXMLToken(tok xml.Token) xml.Token { + start, ok := tok.(xml.StartElement) + if !ok { + return tok + } + // Remove all namespace-defining attributes. + j := 0 + for _, attr := range start.Attr { + if attr.Name.Local == "xmlns" && attr.Name.Space == "" || + attr.Name.Space == "xmlns" { + continue + } + start.Attr[j] = attr + j++ + } + start.Attr = start.Attr[0:j] + sort.Sort(attrByName(start.Attr)) + return start +} + +type attrByName []xml.Attr + +func (a attrByName) Len() int { return len(a) } +func (a attrByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a attrByName) Less(i, j int) bool { + if a[i].Name.Space != a[j].Name.Space { + return a[i].Name.Space < a[j].Name.Space + } + return a[i].Name.Local < a[j].Name.Local +} + +// assertXMLContains asserts that the XML in body is well formed, and +// contains at least one token that satisfies each of the functions in need. +func assertXMLContains(c *gc.C, body []byte, need map[string]func(xml.Token) bool) { + dec := xml.NewDecoder(bytes.NewReader(body)) + for { + tok, err := dec.Token() + if err == io.EOF { + break + } + c.Assert(err, gc.IsNil) + for what, f := range need { + if f(tok) { + delete(need, what) + } + } + } + c.Assert(need, gc.HasLen, 0, gc.Commentf("body:\n%s", body)) +} + +func isStartElementWithName(name string) func(xml.Token) bool { + return func(tok xml.Token) bool { + startElem, ok := tok.(xml.StartElement) + return ok && startElem.Name.Local == name + } +} + +func isStartElementWithAttr(name, attr, val string) func(xml.Token) bool { + return func(tok xml.Token) bool { + startElem, ok := tok.(xml.StartElement) + if !ok { + return false + } + for _, a := range startElem.Attr { + if a.Name.Local == attr && a.Value == val { + return true + } + } + return false + } +} + +const cloudfoundrySVG = ` + + + + + + + + + +]> + +content omitted + +` === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/defaulticon.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/defaulticon.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/defaulticon.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,281 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +// DefaultIcon holds the default charm icon SVG content. +const DefaultIcon = defaultIcon + +const defaultIcon = ` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + +` === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/defaulticon_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/defaulticon_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/defaulticon_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "strings" + + gc "gopkg.in/check.v1" + + "gopkg.in/juju/charmstore.v5-unstable/internal/v5" +) + +type iconSuite struct{} + +var _ = gc.Suite(&iconSuite{}) + +func (s *iconSuite) TestValidXML(c *gc.C) { + // The XML declaration must be included in the first line of the icon. + hasXMLPrefix := strings.HasPrefix(v5.DefaultIcon, "= %d", minValue) + } + return value, nil +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/log_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/log_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/log_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,539 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "bytes" + "encoding/json" + "net/http" + "time" + + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" +) + +type logSuite struct { + commonSuite +} + +var _ = gc.Suite(&logSuite{}) + +func (s *logSuite) SetUpSuite(c *gc.C) { + s.enableIdentity = true + s.commonSuite.SetUpSuite(c) +} + +var logResponses = map[string]*params.LogResponse{ + "info1": { + Data: rawMessage("info data 1"), + Level: params.InfoLevel, + Type: params.IngestionType, + URLs: nil, + }, + "error1": { + Data: rawMessage("error data 1"), + Level: params.ErrorLevel, + Type: params.IngestionType, + URLs: nil, + }, + "info2": { + Data: rawMessage("info data 2"), + Level: params.InfoLevel, + Type: params.IngestionType, + URLs: []*charm.URL{ + charm.MustParseURL("precise/django"), + charm.MustParseURL("django"), + charm.MustParseURL("rails"), + }, + }, + "warning1": { + Data: rawMessage("warning data 1"), + Level: params.WarningLevel, + Type: params.IngestionType, + URLs: nil, + }, + "error2": { + Data: rawMessage("error data 2"), + Level: params.ErrorLevel, + Type: params.IngestionType, + URLs: []*charm.URL{ + charm.MustParseURL("hadoop"), + }, + }, + "info3": { + Data: rawMessage("info data 3"), + Level: params.InfoLevel, + Type: params.IngestionType, + URLs: []*charm.URL{ + charm.MustParseURL("trusty/django"), + charm.MustParseURL("django"), + charm.MustParseURL("utopic/hadoop"), + charm.MustParseURL("hadoop"), + }, + }, + "error3": { + Data: rawMessage("error data 3"), + Level: params.ErrorLevel, + Type: params.IngestionType, + URLs: []*charm.URL{ + charm.MustParseURL("utopic/hadoop"), + charm.MustParseURL("hadoop"), + charm.MustParseURL("precise/django"), + charm.MustParseURL("django"), + }, + }, + "stats": { + Data: rawMessage("statistics info data"), + Level: params.InfoLevel, + Type: params.LegacyStatisticsType, + URLs: nil, + }, +} + +var getLogsTests = []struct { + about string + querystring string + expectBody []*params.LogResponse +}{{ + about: "retrieve logs", + expectBody: []*params.LogResponse{ + logResponses["stats"], + logResponses["error3"], + logResponses["info3"], + logResponses["error2"], + logResponses["warning1"], + logResponses["info2"], + logResponses["error1"], + logResponses["info1"], + }, +}, { + about: "use limit", + querystring: "?limit=2", + expectBody: []*params.LogResponse{ + logResponses["stats"], + logResponses["error3"], + }, +}, { + about: "use offset", + querystring: "?skip=3", + expectBody: []*params.LogResponse{ + logResponses["error2"], + logResponses["warning1"], + logResponses["info2"], + logResponses["error1"], + logResponses["info1"], + }, +}, { + about: "zero offset", + querystring: "?skip=0", + expectBody: []*params.LogResponse{ + logResponses["stats"], + logResponses["error3"], + logResponses["info3"], + logResponses["error2"], + logResponses["warning1"], + logResponses["info2"], + logResponses["error1"], + logResponses["info1"], + }, +}, { + about: "use both limit and offset", + querystring: "?limit=3&skip=1", + expectBody: []*params.LogResponse{ + logResponses["error3"], + logResponses["info3"], + logResponses["error2"], + }, +}, { + about: "filter by level", + querystring: "?level=info", + expectBody: []*params.LogResponse{ + logResponses["stats"], + logResponses["info3"], + logResponses["info2"], + logResponses["info1"], + }, +}, { + about: "filter by type", + querystring: "?type=ingestion", + expectBody: []*params.LogResponse{ + logResponses["error3"], + logResponses["info3"], + logResponses["error2"], + logResponses["warning1"], + logResponses["info2"], + logResponses["error1"], + logResponses["info1"], + }, +}, { + about: "filter by level with a limit", + querystring: "?level=error&limit=2", + expectBody: []*params.LogResponse{ + logResponses["error3"], + logResponses["error2"], + }, +}, { + about: "filter by id", + querystring: "?id=precise/django", + expectBody: []*params.LogResponse{ + logResponses["error3"], + logResponses["info2"], + }, +}, { + about: "multiple query", + querystring: "?id=utopic/hadoop&limit=1&level=error", + expectBody: []*params.LogResponse{ + logResponses["error3"], + }, +}, { + about: "empty response offset", + querystring: "?id=utopic/hadoop&skip=10", +}, { + about: "empty response id not found", + querystring: "?id=utopic/mysql", +}, { + about: "empty response level", + querystring: "?id=trusty/rails&level=error", +}, { + about: "filter by type - legacyStatistics", + querystring: "?type=legacyStatistics", + expectBody: []*params.LogResponse{ + logResponses["stats"], + }, +}} + +var paramsLogLevels = map[params.LogLevel]mongodoc.LogLevel{ + params.InfoLevel: mongodoc.InfoLevel, + params.WarningLevel: mongodoc.WarningLevel, + params.ErrorLevel: mongodoc.ErrorLevel, +} + +// paramsLogTypes maps API params log types to internal mongodoc ones. +var paramsLogTypes = map[params.LogType]mongodoc.LogType{ + params.IngestionType: mongodoc.IngestionType, + params.LegacyStatisticsType: mongodoc.LegacyStatisticsType, +} + +func (s *logSuite) TestGetLogs(c *gc.C) { + // Add logs to the database. + beforeAdding := time.Now().Add(-time.Second) + for _, key := range []string{"info1", "error1", "info2", "warning1", "error2", "info3", "error3", "stats"} { + resp := logResponses[key] + err := s.store.AddLog(&resp.Data, paramsLogLevels[resp.Level], paramsLogTypes[resp.Type], resp.URLs) + c.Assert(err, gc.IsNil) + } + afterAdding := time.Now().Add(time.Second) + + // Run the tests. + for i, test := range getLogsTests { + c.Logf("test %d: %s", i, test.about) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("log" + test.querystring), + Username: testUsername, + Password: testPassword, + }) + + // Ensure the response is what we expect. + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "application/json") + + // Decode the response. + var logs []*params.LogResponse + decoder := json.NewDecoder(rec.Body) + err := decoder.Decode(&logs) + c.Assert(err, gc.IsNil) + + // Check and then reset the response time so that the whole body + // can be more easily compared later. + for _, log := range logs { + c.Assert(log.Time, jc.TimeBetween(beforeAdding, afterAdding)) + log.Time = time.Time{} + } + + // Ensure the response includes the expected logs. + c.Assert(logs, jc.DeepEquals, test.expectBody) + } +} + +func rawMessage(msg string) json.RawMessage { + message, err := json.Marshal(msg) + if err != nil { + panic(err) + } + return json.RawMessage(message) +} + +var getLogsErrorsTests = []struct { + about string + querystring string + expectStatus int + expectMessage string + expectCode params.ErrorCode +}{{ + about: "invalid limit (negative number)", + querystring: "?limit=-100", + expectStatus: http.StatusBadRequest, + expectMessage: "invalid limit value: value must be >= 1", + expectCode: params.ErrBadRequest, +}, { + about: "invalid limit (zero value)", + querystring: "?limit=0", + expectStatus: http.StatusBadRequest, + expectMessage: "invalid limit value: value must be >= 1", + expectCode: params.ErrBadRequest, +}, { + about: "invalid limit (not a number)", + querystring: "?limit=foo", + expectStatus: http.StatusBadRequest, + expectMessage: "invalid limit value: value must be a number", + expectCode: params.ErrBadRequest, +}, { + about: "invalid offset (negative number)", + querystring: "?skip=-100", + expectStatus: http.StatusBadRequest, + expectMessage: "invalid skip value: value must be >= 0", + expectCode: params.ErrBadRequest, +}, { + about: "invalid offset (not a number)", + querystring: "?skip=bar", + expectStatus: http.StatusBadRequest, + expectMessage: "invalid skip value: value must be a number", + expectCode: params.ErrBadRequest, +}, { + about: "invalid id", + querystring: "?id=no-such:reference", + expectStatus: http.StatusBadRequest, + expectMessage: `invalid id value: charm or bundle URL has invalid schema: "no-such:reference"`, + expectCode: params.ErrBadRequest, +}, { + about: "invalid log level", + querystring: "?level=bar", + expectStatus: http.StatusBadRequest, + expectMessage: "invalid log level value", + expectCode: params.ErrBadRequest, +}, { + about: "invalid log type", + querystring: "?type=no-such", + expectStatus: http.StatusBadRequest, + expectMessage: "invalid log type value", + expectCode: params.ErrBadRequest, +}} + +func (s *logSuite) TestGetLogsErrors(c *gc.C) { + for i, test := range getLogsErrorsTests { + c.Logf("test %d: %s", i, test.about) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("log" + test.querystring), + Username: testUsername, + Password: testPassword, + ExpectStatus: test.expectStatus, + ExpectBody: params.Error{ + Message: test.expectMessage, + Code: test.expectCode, + }, + }) + } +} + +func (s *logSuite) TestGetLogsErrorInvalidLog(c *gc.C) { + // Add a non-parsable log message to the db directly. + err := s.store.DB.Logs().Insert(mongodoc.Log{ + Data: []byte("!"), + Level: mongodoc.InfoLevel, + Type: mongodoc.IngestionType, + Time: time.Now(), + }) + c.Assert(err, gc.IsNil) + // The log is just ignored. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("log"), + Username: testUsername, + Password: testPassword, + ExpectStatus: http.StatusOK, + ExpectBody: []params.LogResponse{}, + }) +} + +func (s *logSuite) TestPostLogs(c *gc.C) { + // Prepare the request body. + body := makeByteLogs(rawMessage("info data"), params.InfoLevel, params.IngestionType, []*charm.URL{ + charm.MustParseURL("trusty/django"), + charm.MustParseURL("utopic/rails"), + }) + + // Send the request. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("log"), + Method: "POST", + Username: testUsername, + Password: testPassword, + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: bytes.NewReader(body), + ExpectStatus: http.StatusOK, + }) + + // Ensure the log message has been added to the database. + var doc mongodoc.Log + err := s.store.DB.Logs().Find(nil).One(&doc) + c.Assert(err, gc.IsNil) + c.Assert(string(doc.Data), gc.Equals, `"info data"`) + c.Assert(doc.Level, gc.Equals, mongodoc.InfoLevel) + c.Assert(doc.Type, gc.Equals, mongodoc.IngestionType) + c.Assert(doc.URLs, jc.DeepEquals, []*charm.URL{ + charm.MustParseURL("trusty/django"), + charm.MustParseURL("django"), + charm.MustParseURL("utopic/rails"), + charm.MustParseURL("rails"), + }) +} + +func (s *logSuite) TestPostLogsMultipleEntries(c *gc.C) { + // Prepare the request body. + infoData := rawMessage("info data") + warningData := rawMessage("warning data") + logs := []params.Log{{ + Data: &infoData, + Level: params.InfoLevel, + Type: params.IngestionType, + }, { + Data: &warningData, + Level: params.WarningLevel, + Type: params.IngestionType, + }} + body, err := json.Marshal(logs) + c.Assert(err, gc.IsNil) + + // Send the request. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("log"), + Method: "POST", + Username: testUsername, + Password: testPassword, + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Body: bytes.NewReader(body), + ExpectStatus: http.StatusOK, + }) + + // Ensure the log messages has been added to the database. + var docs []mongodoc.Log + err = s.store.DB.Logs().Find(nil).Sort("id").All(&docs) + c.Assert(err, gc.IsNil) + c.Assert(docs, gc.HasLen, 2) + c.Assert(string(docs[0].Data), gc.Equals, string(infoData)) + c.Assert(docs[0].Level, gc.Equals, mongodoc.InfoLevel) + c.Assert(string(docs[1].Data), gc.Equals, string(warningData)) + c.Assert(docs[1].Level, gc.Equals, mongodoc.WarningLevel) +} + +var postLogsErrorsTests = []struct { + about string + contentType string + body []byte + expectStatus int + expectMessage string + expectCode params.ErrorCode +}{{ + about: "invalid content type", + contentType: "application/zip", + expectStatus: http.StatusBadRequest, + expectMessage: `unexpected Content-Type "application/zip"; expected 'application/json'`, + expectCode: params.ErrBadRequest, +}, { + about: "invalid body", + body: []byte("!"), + expectStatus: http.StatusBadRequest, + expectMessage: "cannot unmarshal body: invalid character '!' looking for beginning of value", + expectCode: params.ErrBadRequest, +}, { + about: "invalid log level", + body: makeByteLogs(rawMessage("message"), params.LogLevel(42), params.IngestionType, nil), + expectStatus: http.StatusBadRequest, + expectMessage: "invalid log level", + expectCode: params.ErrBadRequest, +}, { + about: "invalid log type", + body: makeByteLogs(rawMessage("message"), params.WarningLevel, params.LogType(42), nil), + expectStatus: http.StatusBadRequest, + expectMessage: "invalid log type", + expectCode: params.ErrBadRequest, +}} + +func (s *logSuite) TestPostLogsErrors(c *gc.C) { + url := storeURL("log") + for i, test := range postLogsErrorsTests { + c.Logf("test %d: %s", i, test.about) + if test.contentType == "" { + test.contentType = "application/json" + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: url, + Method: "POST", + Header: http.Header{ + "Content-Type": {test.contentType}, + }, + Body: bytes.NewReader(test.body), + Username: testUsername, + Password: testPassword, + ExpectStatus: test.expectStatus, + ExpectBody: params.Error{ + Message: test.expectMessage, + Code: test.expectCode, + }, + }) + } +} + +func (s *logSuite) TestGetLogsUnauthorizedError(c *gc.C) { + s.AssertEndpointAuth(c, httptesting.JSONCallParams{ + URL: storeURL("log"), + ExpectStatus: http.StatusOK, + ExpectBody: []params.LogResponse{}, + }) +} + +func (s *logSuite) TestPostLogsUnauthorizedError(c *gc.C) { + // Add a non-parsable log message to the db. + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.noMacaroonSrv, + URL: storeURL("log"), + Method: "POST", + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + ExpectStatus: http.StatusUnauthorized, + ExpectBody: params.Error{ + Message: "authentication failed: missing HTTP auth header", + Code: params.ErrUnauthorized, + }, + }) +} + +func makeByteLogs(data json.RawMessage, logLevel params.LogLevel, logType params.LogType, urls []*charm.URL) []byte { + logs := []params.Log{{ + Data: &data, + Level: logLevel, + Type: logType, + URLs: urls, + }} + b, err := json.Marshal(logs) + if err != nil { + panic(err) + } + return b +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/package_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "testing" + + jujutesting "github.com/juju/testing" +) + +func TestPackage(t *testing.T) { + jujutesting.MgoTestPackage(t, nil) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/pprof.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/pprof.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/pprof.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,76 @@ +package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "net/http" + runtimepprof "runtime/pprof" + "strings" + "text/template" + + "github.com/juju/httpprof" + + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +type pprofHandler struct { + mux *http.ServeMux + auth authorizer +} + +type authorizer interface { + authorize(req *http.Request, acl []string, alwaysAuth bool, entityId *router.ResolvedURL) (authorization, error) +} + +func newPprofHandler(auth authorizer) http.Handler { + mux := http.NewServeMux() + mux.HandleFunc("/cmdline", pprof.Cmdline) + mux.HandleFunc("/profile", pprof.Profile) + mux.HandleFunc("/symbol", pprof.Symbol) + mux.HandleFunc("/", pprofIndex) + return &pprofHandler{ + mux: mux, + auth: auth, + } +} + +func (h *pprofHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if _, err := h.auth.authorize(req, nil, true, nil); err != nil { + router.WriteError(w, err) + return + } + h.mux.ServeHTTP(w, req) +} + +// pprofIndex is copied from pprof.Index with minor modifications +// to make it work using a relative path. +func pprofIndex(w http.ResponseWriter, req *http.Request) { + if req.URL.Path == "/" { + profiles := runtimepprof.Profiles() + if err := indexTmpl.Execute(w, profiles); err != nil { + logger.Errorf("cannot execute pprof template: %v", err) + } + return + } + name := strings.TrimPrefix(req.URL.Path, "/") + pprof.Handler(name).ServeHTTP(w, req) +} + +var indexTmpl = template.Must(template.New("index").Parse(` + + + pprof + + +

pprof

+

profiles:

+ + {{range .}} + + + + + {{end}} +
{{.Count}}{{.Name}}
+

full goroutine stack dump

+ + +`)) === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/relations.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/relations.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/relations.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,294 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "net/http" + "net/url" + + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +// GET id/meta/charm-related[?include=meta[&include=meta…]] +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetacharm-related +func (h *ReqHandler) metaCharmRelated(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + if id.URL.Series == "bundle" { + return nil, nil + } + // If the charm does not define any relation we can just return without + // hitting the db. + if len(entity.CharmProvidedInterfaces)+len(entity.CharmRequiredInterfaces) == 0 { + return ¶ms.RelatedResponse{}, nil + } + q := h.Store.MatchingInterfacesQuery(entity.CharmProvidedInterfaces, entity.CharmRequiredInterfaces) + + fields := bson.D{ + {"_id", 1}, + {"supportedseries", 1}, + {"development", 1}, + {"charmrequiredinterfaces", 1}, + {"charmprovidedinterfaces", 1}, + {"promulgated-url", 1}, + {"promulgated-revision", 1}, + } + + var entities []*mongodoc.Entity + if err := q.Select(fields).Sort("_id").All(&entities); err != nil { + return nil, errgo.Notef(err, "cannot retrieve the related charms") + } + + // If no entities are found there is no need for further processing the + // results. + if len(entities) == 0 { + return ¶ms.RelatedResponse{}, nil + } + + // Build the results, by grouping entities based on their relations' roles + // and interfaces. + includes := flags["include"] + requires, err := h.getRelatedCharmsResponse(entity.CharmProvidedInterfaces, entities, func(e *mongodoc.Entity) []string { + return e.CharmRequiredInterfaces + }, includes, req) + if err != nil { + return nil, errgo.Notef(err, "cannot retrieve the charm requires") + } + provides, err := h.getRelatedCharmsResponse(entity.CharmRequiredInterfaces, entities, func(e *mongodoc.Entity) []string { + return e.CharmProvidedInterfaces + }, includes, req) + if err != nil { + return nil, errgo.Notef(err, "cannot retrieve the charm provides") + } + + // Return the response. + return ¶ms.RelatedResponse{ + Requires: requires, + Provides: provides, + }, nil +} + +type entityRelatedInterfacesGetter func(*mongodoc.Entity) []string + +// getRelatedCharmsResponse returns a response mapping interfaces to related +// charms. For instance: +// map[string][]params.MetaAnyResponse{ +// "http": []params.MetaAnyResponse{ +// {Id: "cs:utopic/django-42", Meta: ...}, +// {Id: "cs:trusty/wordpress-47", Meta: ...}, +// }, +// "memcache": []params.MetaAnyResponse{ +// {Id: "cs:utopic/memcached-0", Meta: ...}, +// }, +// } +func (h *ReqHandler) getRelatedCharmsResponse( + ifaces []string, + entities []*mongodoc.Entity, + getInterfaces entityRelatedInterfacesGetter, + includes []string, + req *http.Request, +) (map[string][]params.MetaAnyResponse, error) { + results := make(map[string][]params.MetaAnyResponse, len(ifaces)) + for _, iface := range ifaces { + responses, err := h.getRelatedIfaceResponses(iface, entities, getInterfaces, includes, req) + if err != nil { + return nil, err + } + if len(responses) > 0 { + results[iface] = responses + } + } + return results, nil +} + +func (h *ReqHandler) getRelatedIfaceResponses( + iface string, + entities []*mongodoc.Entity, + getInterfaces entityRelatedInterfacesGetter, + includes []string, + req *http.Request, +) ([]params.MetaAnyResponse, error) { + // Build a list of responses including only entities which are related + // to the given interface. + usesInterface := func(e *mongodoc.Entity) bool { + for _, entityIface := range getInterfaces(e) { + if entityIface == iface { + return true + } + } + return false + } + resp, err := h.getMetadataForEntities(entities, includes, req, usesInterface) + if err != nil { + return nil, errgo.Mask(err) + } + return resp, nil +} + +// GET id/meta/bundles-containing[?include=meta[&include=meta…]][&any-series=1][&any-revision=1][&all-results=1] +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-idmetabundles-containing +func (h *ReqHandler) metaBundlesContaining(entity *mongodoc.Entity, id *router.ResolvedURL, path string, flags url.Values, req *http.Request) (interface{}, error) { + if id.URL.Series == "bundle" { + return nil, nil + } + + // Validate the URL query values. + anySeries, err := router.ParseBool(flags.Get("any-series")) + if err != nil { + return nil, badRequestf(err, "invalid value for any-series") + } + anyRevision, err := router.ParseBool(flags.Get("any-revision")) + if err != nil { + return nil, badRequestf(err, "invalid value for any-revision") + } + allResults, err := router.ParseBool(flags.Get("all-results")) + if err != nil { + return nil, badRequestf(err, "invalid value for all-results") + } + + // Mutate the reference so that it represents a base URL if required. + prefURL := id.PreferredURL() + searchId := *prefURL + if anySeries || anyRevision { + searchId.Revision = -1 + searchId.Series = "" + } + + // Retrieve the bundles containing the resulting charm id. + var entities []*mongodoc.Entity + if err := h.Store.DB.Entities(). + Find(bson.D{{"bundlecharms", &searchId}}). + Select(bson.D{{"_id", 1}, {"bundlecharms", 1}, {"promulgated-url", 1}}). + All(&entities); err != nil { + return nil, errgo.Notef(err, "cannot retrieve the related bundles") + } + + // Further filter the entities if required, by only including latest + // bundle revisions and/or excluding specific charm series or revisions. + + // Filter entities so it contains only entities that actually + // match the desired search criteria. + filterEntities(&entities, func(e *mongodoc.Entity) bool { + if anySeries == anyRevision { + // If neither anySeries or anyRevision are true, then + // the search will be exact and therefore e must be + // matched. + // If both anySeries and anyRevision are true, then + // the base entity that we are searching for is exactly + // what we want to search for, therefore e must be matched. + return true + } + for _, charmId := range e.BundleCharms { + if charmId.Name == prefURL.Name && + charmId.User == prefURL.User && + (anySeries || charmId.Series == prefURL.Series) && + (anyRevision || charmId.Revision == prefURL.Revision) { + return true + } + } + return false + }) + + var latest map[charm.URL]int + if !allResults { + // Include only the latest revision of any bundle. + // This is made somewhat tricky by the fact that + // each bundle can have two URLs, its canonical + // URL (with user) and its promulgated URL. + // + // We want to maximise the URL revision regardless of + // whether the URL is promulgated or not, so we + // we build a map holding the latest revision for both + // promulgated and non-promulgated revisions + // and then include entities that have the latest + // revision for either. + latest = make(map[charm.URL]int) + + // updateLatest updates the latest revision for u + // without its revision if it's greater than the existing + // entry. + updateLatest := func(u *charm.URL) { + u1 := *u + u1.Revision = -1 + if rev, ok := latest[u1]; !ok || rev < u.Revision { + latest[u1] = u.Revision + } + } + for _, e := range entities { + updateLatest(e.URL) + if e.PromulgatedURL != nil { + updateLatest(e.PromulgatedURL) + } + } + filterEntities(&entities, func(e *mongodoc.Entity) bool { + if e.PromulgatedURL != nil { + u := *e.PromulgatedURL + u.Revision = -1 + if latest[u] == e.PromulgatedURL.Revision { + return true + } + } + u := *e.URL + u.Revision = -1 + return latest[u] == e.URL.Revision + }) + } + resp, err := h.getMetadataForEntities(entities, flags["include"], req, nil) + if err != nil { + return nil, errgo.Mask(err) + } + return resp, nil +} + +func (h *ReqHandler) getMetadataForEntities(entities []*mongodoc.Entity, includes []string, req *http.Request, includeEntity func(*mongodoc.Entity) bool) ([]params.MetaAnyResponse, error) { + // TODO(rog) make this concurrent. + response := make([]params.MetaAnyResponse, 0, len(entities)) + for _, e := range entities { + if includeEntity != nil && !includeEntity(e) { + continue + } + meta, err := h.getMetadataForEntity(e, includes, req) + if err == errMetadataUnauthorized { + continue + } + if err != nil { + return nil, errgo.Mask(err) + } + response = append(response, params.MetaAnyResponse{ + Id: e.PreferredURL(true), + Meta: meta, + }) + } + return response, nil +} + +var errMetadataUnauthorized = errgo.Newf("metadata unauthorized") + +func (h *ReqHandler) getMetadataForEntity(e *mongodoc.Entity, includes []string, req *http.Request) (map[string]interface{}, error) { + rurl := charmstore.EntityResolvedURL(e) + // Ignore entities that aren't readable by the current user. + if err := h.AuthorizeEntity(rurl, req); err != nil { + return nil, errMetadataUnauthorized + } + return h.Router.GetMetadata(rurl, includes, req) +} + +// filterEntities deletes all entities from *entities for which +// the given predicate returns false. +func filterEntities(entities *[]*mongodoc.Entity, predicate func(*mongodoc.Entity) bool) { + entities1 := *entities + j := 0 + for _, e := range entities1 { + if predicate(e) { + entities1[j] = e + j++ + } + } + *entities = entities1[0:j] +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/relations_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/relations_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/relations_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,974 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "encoding/json" + "fmt" + "net/http" + "sort" + "strconv" + "strings" + + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/blobstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +// Define fake blob attributes to be used in tests. +var fakeBlobSize, fakeBlobHash = func() (int64, string) { + b := []byte("fake content") + h := blobstore.NewHash() + h.Write(b) + return int64(len(b)), fmt.Sprintf("%x", h.Sum(nil)) +}() + +type RelationsSuite struct { + commonSuite +} + +var _ = gc.Suite(&RelationsSuite{}) + +// metaCharmRelatedCharms defines a bunch of charms to be used in +// the relation tests. +var metaCharmRelatedCharms = map[string]charm.Charm{ + "0 ~charmers/utopic/wordpress-0": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "website": { + Name: "website", + Role: "provider", + Interface: "http", + }, + }, + requires: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "requirer", + Interface: "memcache", + }, + "nfs": { + Name: "nfs", + Role: "requirer", + Interface: "mount", + }, + }, + }, + "42 ~charmers/utopic/memcached-42": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "provider", + Interface: "memcache", + }, + }, + }, + "1 ~charmers/precise/nfs-1": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "nfs": { + Name: "nfs", + Role: "provider", + Interface: "mount", + }, + }, + }, + "47 ~charmers/trusty/haproxy-47": &relationTestingCharm{ + requires: map[string]charm.Relation{ + "reverseproxy": { + Name: "reverseproxy", + Role: "requirer", + Interface: "http", + }, + }, + }, + "48 ~charmers/precise/haproxy-48": &relationTestingCharm{ + requires: map[string]charm.Relation{ + "reverseproxy": { + Name: "reverseproxy", + Role: "requirer", + Interface: "http", + }, + }, + }, + // development charms should not be included in any results. + "49 ~charmers/development/precise/haproxy-49": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "reverseproxy": { + Name: "reverseproxy", + Role: "requirer", + Interface: "http", + }, + }, + }, + "1 ~charmers/multi-series-20": &relationTestingCharm{ + supportedSeries: []string{"precise", "trusty", "utopic"}, + requires: map[string]charm.Relation{ + "reverseproxy": { + Name: "reverseproxy", + Role: "requirer", + Interface: "http", + }, + }, + }, +} + +var metaCharmRelatedTests = []struct { + // Description of the test. + about string + + // Charms to be stored in the store before the test is run. + charms map[string]charm.Charm + + // readACLs holds ACLs for charms that should be given + // non-public permissions, indexed by URL string + readACLs map[string][]string + + // The id of the charm for which related charms are returned. + id string + + // The querystring to append to the resulting charmstore URL. + querystring string + + // The expected response body. + expectBody params.RelatedResponse +}{{ + about: "provides and requires", + charms: metaCharmRelatedCharms, + id: "utopic/wordpress-0", + expectBody: params.RelatedResponse{ + Provides: map[string][]params.MetaAnyResponse{ + "memcache": {{ + Id: charm.MustParseURL("utopic/memcached-42"), + }}, + "mount": {{ + Id: charm.MustParseURL("precise/nfs-1"), + }}, + }, + Requires: map[string][]params.MetaAnyResponse{ + "http": {{ + Id: charm.MustParseURL("multi-series-1"), + }, { + Id: charm.MustParseURL("precise/haproxy-48"), + }, { + Id: charm.MustParseURL("trusty/haproxy-47"), + }}, + }, + }, +}, { + about: "only provides", + charms: metaCharmRelatedCharms, + id: "trusty/haproxy-47", + expectBody: params.RelatedResponse{ + Provides: map[string][]params.MetaAnyResponse{ + "http": {{ + Id: charm.MustParseURL("utopic/wordpress-0"), + }}, + }, + }, +}, { + about: "only requires", + charms: metaCharmRelatedCharms, + id: "utopic/memcached-42", + expectBody: params.RelatedResponse{ + Requires: map[string][]params.MetaAnyResponse{ + "memcache": {{ + Id: charm.MustParseURL("utopic/wordpress-0"), + }}, + }, + }, +}, { + about: "no relations found", + charms: map[string]charm.Charm{ + "0 ~charmers/utopic/wordpress-0": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "website": { + Name: "website", + Role: "provider", + Interface: "http", + }, + }, + requires: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "requirer", + Interface: "memcache", + }, + "nfs": { + Name: "nfs", + Role: "requirer", + Interface: "mount", + }, + }, + }, + }, + id: "utopic/wordpress-0", +}, { + about: "no relations defined", + charms: map[string]charm.Charm{ + "42 ~charmers/utopic/django-42": &relationTestingCharm{}, + }, + id: "utopic/django-42", +}, { + about: "multiple revisions of the same related charm", + charms: map[string]charm.Charm{ + "0 ~charmers/trusty/wordpress-0": &relationTestingCharm{ + requires: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "requirer", + Interface: "memcache", + }, + }, + }, + "1 ~charmers/utopic/memcached-1": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "provider", + Interface: "memcache", + }, + }, + }, + "2 ~charmers/utopic/memcached-2": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "provider", + Interface: "memcache", + }, + }, + }, + "3 ~charmers/utopic/memcached-3": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "provider", + Interface: "memcache", + }, + }, + }, + }, + id: "trusty/wordpress-0", + expectBody: params.RelatedResponse{ + Provides: map[string][]params.MetaAnyResponse{ + "memcache": {{ + Id: charm.MustParseURL("utopic/memcached-1"), + }, { + Id: charm.MustParseURL("utopic/memcached-2"), + }, { + Id: charm.MustParseURL("utopic/memcached-3"), + }}, + }, + }, +}, { + about: "reference ordering", + charms: map[string]charm.Charm{ + "0 ~charmers/trusty/wordpress-0": &relationTestingCharm{ + requires: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "requirer", + Interface: "memcache", + }, + "nfs": { + Name: "nfs", + Role: "requirer", + Interface: "mount", + }, + }, + }, + "1 ~charmers/utopic/memcached-1": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "provider", + Interface: "memcache", + }, + }, + }, + "2 ~charmers/utopic/memcached-2": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "provider", + Interface: "memcache", + }, + }, + }, + "90 ~charmers/utopic/redis-90": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "provider", + Interface: "memcache", + }, + }, + }, + "47 ~charmers/trusty/nfs-47": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "nfs": { + Name: "nfs", + Role: "provider", + Interface: "mount", + }, + }, + }, + "42 ~charmers/precise/nfs-42": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "nfs": { + Name: "nfs", + Role: "provider", + Interface: "mount", + }, + }, + }, + "47 ~charmers/precise/nfs-47": &relationTestingCharm{ + provides: map[string]charm.Relation{ + "nfs": { + Name: "nfs", + Role: "provider", + Interface: "mount", + }, + }, + }, + }, + id: "trusty/wordpress-0", + expectBody: params.RelatedResponse{ + Provides: map[string][]params.MetaAnyResponse{ + "memcache": {{ + Id: charm.MustParseURL("utopic/memcached-1"), + }, { + Id: charm.MustParseURL("utopic/memcached-2"), + }, { + Id: charm.MustParseURL("utopic/redis-90"), + }}, + "mount": {{ + Id: charm.MustParseURL("precise/nfs-42"), + }, { + Id: charm.MustParseURL("precise/nfs-47"), + }, { + Id: charm.MustParseURL("trusty/nfs-47"), + }}, + }, + }, +}, { + about: "includes", + charms: metaCharmRelatedCharms, + id: "precise/nfs-1", + querystring: "?include=archive-size&include=charm-metadata", + expectBody: params.RelatedResponse{ + Requires: map[string][]params.MetaAnyResponse{ + "mount": {{ + Id: charm.MustParseURL("utopic/wordpress-0"), + Meta: map[string]interface{}{ + "archive-size": params.ArchiveSizeResponse{Size: fakeBlobSize}, + "charm-metadata": &charm.Meta{ + Provides: map[string]charm.Relation{ + "website": { + Name: "website", + Role: "provider", + Interface: "http", + }, + }, + Requires: map[string]charm.Relation{ + "cache": { + Name: "cache", + Role: "requirer", + Interface: "memcache", + }, + "nfs": { + Name: "nfs", + Role: "requirer", + Interface: "mount", + }, + }, + }, + }, + }}, + }, + }, +}, { + about: "don't show charms if you don't have perms for 'em", + charms: metaCharmRelatedCharms, + readACLs: map[string][]string{ + "~charmers/memcached": []string{"noone"}, + }, + id: "utopic/wordpress-0", + expectBody: params.RelatedResponse{ + Provides: map[string][]params.MetaAnyResponse{ + "mount": {{ + Id: charm.MustParseURL("precise/nfs-1"), + }}, + }, + Requires: map[string][]params.MetaAnyResponse{ + "http": {{ + Id: charm.MustParseURL("multi-series-1"), + }, { + Id: charm.MustParseURL("precise/haproxy-48"), + }, { + Id: charm.MustParseURL("trusty/haproxy-47"), + }}, + }, + }, +}} + +func (s *RelationsSuite) addCharms(c *gc.C, charms map[string]charm.Charm) { + for id, ch := range charms { + url := mustParseResolvedURL(id) + // The blob related info are not used in these tests. + // The related charms are retrieved from the entities collection, + // without accessing the blob store. + err := s.store.AddCharm(ch, charmstore.AddParams{ + URL: url, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil, gc.Commentf("id %q", id)) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + if url.Development { + err = s.store.SetPerms(url.UserOwnedURL(), "read", params.Everyone, url.URL.User) + } + } +} + +func (s *RelationsSuite) setPerms(c *gc.C, readACLs map[string][]string) { + for url, acl := range readACLs { + err := s.store.SetPerms(charm.MustParseURL(url), "read", acl...) + c.Assert(err, gc.IsNil) + } +} + +func (s *RelationsSuite) TestMetaCharmRelated(c *gc.C) { + for i, test := range metaCharmRelatedTests { + c.Logf("test %d: %s", i, test.about) + s.addCharms(c, test.charms) + s.setPerms(c, test.readACLs) + storeURL := storeURL(test.id + "/meta/charm-related" + test.querystring) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL, + ExpectStatus: http.StatusOK, + ExpectBody: test.expectBody, + }) + // Clean up the entities in the store. + _, err := s.store.DB.Entities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + _, err = s.store.DB.BaseEntities().RemoveAll(nil) + c.Assert(err, gc.IsNil) + } +} + +func (s *RelationsSuite) TestMetaCharmRelatedIncludeError(c *gc.C) { + s.addCharms(c, metaCharmRelatedCharms) + storeURL := storeURL("utopic/wordpress-0/meta/charm-related?include=no-such") + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL, + ExpectStatus: http.StatusInternalServerError, + ExpectBody: params.Error{ + Message: `cannot retrieve the charm requires: unrecognized metadata name "no-such"`, + }, + }) +} + +// relationTestingCharm implements charm.Charm, and it is used for testing +// charm relations. +type relationTestingCharm struct { + supportedSeries []string + provides map[string]charm.Relation + requires map[string]charm.Relation +} + +func (ch *relationTestingCharm) Meta() *charm.Meta { + // The only metadata we are interested in is the relation data. + return &charm.Meta{ + Series: ch.supportedSeries, + Provides: ch.provides, + Requires: ch.requires, + } +} + +func (ch *relationTestingCharm) Config() *charm.Config { + // For the purposes of this implementation, the charm configuration is not + // relevant. + return nil +} + +func (e *relationTestingCharm) Metrics() *charm.Metrics { + return nil +} + +func (ch *relationTestingCharm) Actions() *charm.Actions { + // For the purposes of this implementation, the charm actions are not + // relevant. + return nil +} + +func (ch *relationTestingCharm) Revision() int { + // For the purposes of this implementation, the charm revision is not + // relevant. + return 0 +} + +// metaBundlesContainingBundles defines a bunch of bundles to be used in +// the bundles-containing tests. +var metaBundlesContainingBundles = map[string]charm.Bundle{ + "0 ~charmers/bundle/wordpress-simple-0": relationTestingBundle([]string{ + "cs:utopic/wordpress-42", + "cs:utopic/mysql-0", + }), + "1 ~charmers/bundle/wordpress-simple-1": relationTestingBundle([]string{ + "cs:utopic/wordpress-47", + "cs:utopic/mysql-1", + }), + "1 ~charmers/bundle/wordpress-complex-1": relationTestingBundle([]string{ + "cs:utopic/wordpress-42", + "cs:utopic/wordpress-47", + "cs:trusty/mysql-0", + "cs:trusty/mysql-1", + "cs:trusty/memcached-2", + }), + "42 ~charmers/bundle/django-generic-42": relationTestingBundle([]string{ + "django", + "django", + "mysql-1", + "trusty/memcached", + }), + "0 ~charmers/bundle/useless-0": relationTestingBundle([]string{ + "cs:utopic/wordpress-42", + "precise/mediawiki-10", + }), + "46 ~charmers/bundle/mediawiki-simple-46": relationTestingBundle([]string{ + "precise/mediawiki-0", + }), + "47 ~charmers/bundle/mediawiki-simple-47": relationTestingBundle([]string{ + "precise/mediawiki-0", + "mysql", + }), + "48 ~charmers/bundle/mediawiki-simple-48": relationTestingBundle([]string{ + "precise/mediawiki-0", + }), + "~bob/bundle/bobthebundle-2": relationTestingBundle([]string{ + "precise/mediawiki-0", + }), +} + +var metaBundlesContainingTests = []struct { + // Description of the test. + about string + // The id of the charm for which related bundles are returned. + id string + // The querystring to append to the resulting charmstore URL. + querystring string + // The expected status code of the response. + expectStatus int + // The expected response body. + expectBody interface{} +}{{ + about: "specific charm present in several bundles", + id: "utopic/wordpress-42", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/useless-0"), + }, { + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + }, { + Id: charm.MustParseURL("bundle/wordpress-simple-0"), + }}, +}, { + about: "specific charm present in one bundle", + id: "trusty/memcached-2", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + }}, +}, { + about: "specific charm not present in any bundle", + id: "trusty/django-42", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{}, +}, { + about: "specific charm with includes", + id: "trusty/mysql-1", + querystring: "?include=archive-size&include=bundle-metadata", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + Meta: map[string]interface{}{ + "archive-size": params.ArchiveSizeResponse{Size: fakeBlobSize}, + "bundle-metadata": metaBundlesContainingBundles["1 ~charmers/bundle/wordpress-complex-1"].Data(), + }, + }}, +}, { + about: "partial charm id", + id: "mysql", // The test will add cs:utopic/mysql-0. + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/wordpress-simple-0"), + }}, +}, { + about: "any series set to true", + id: "trusty/mysql-0", + querystring: "?any-series=1", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + }, { + Id: charm.MustParseURL("bundle/wordpress-simple-0"), + }}, +}, { + about: "any series and all-results set to true", + id: "trusty/mysql-0", + querystring: "?any-series=1&all-results=1", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + }, { + // This result is included even if the latest wordpress-simple does not + // contain the mysql-0 charm. + Id: charm.MustParseURL("bundle/wordpress-simple-0"), + }}, +}, { + about: "invalid any series", + id: "utopic/mysql-0", + querystring: "?any-series=true", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: `invalid value for any-series: unexpected bool value "true" (must be "0" or "1")`, + }, +}, { + about: "any revision set to true", + id: "trusty/memcached-99", + querystring: "?any-revision=1", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/django-generic-42"), + }, { + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + }}, +}, { + about: "invalid any revision", + id: "trusty/memcached-99", + querystring: "?any-revision=why-not", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: `invalid value for any-revision: unexpected bool value "why-not" (must be "0" or "1")`, + }, +}, { + about: "all-results set to true", + id: "precise/mediawiki-0", + expectStatus: http.StatusOK, + querystring: "?all-results=1", + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/mediawiki-simple-48"), + }, { + Id: charm.MustParseURL("bundle/mediawiki-simple-47"), + }, { + Id: charm.MustParseURL("bundle/mediawiki-simple-46"), + }, { + Id: charm.MustParseURL("~bob/bundle/bobthebundle-2"), + }}, +}, { + about: "all-results set to false", + id: "precise/mediawiki-0", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/mediawiki-simple-48"), + }, { + Id: charm.MustParseURL("~bob/bundle/bobthebundle-2"), + }}, +}, { + about: "invalid all-results", + id: "trusty/memcached-99", + querystring: "?all-results=yes!", + expectStatus: http.StatusBadRequest, + expectBody: params.Error{ + Code: params.ErrBadRequest, + Message: `invalid value for all-results: unexpected bool value "yes!" (must be "0" or "1")`, + }, +}, { + about: "any series and revision, all results", + id: "saucy/mysql-99", + querystring: "?any-series=1&any-revision=1&all-results=1", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/django-generic-42"), + }, { + Id: charm.MustParseURL("bundle/mediawiki-simple-47"), + }, { + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + }, { + Id: charm.MustParseURL("bundle/wordpress-simple-1"), + }, { + Id: charm.MustParseURL("bundle/wordpress-simple-0"), + }}, +}, { + about: "any series, any revision", + id: "saucy/mysql-99", + querystring: "?any-series=1&any-revision=1", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/django-generic-42"), + }, { + Id: charm.MustParseURL("bundle/mediawiki-simple-47"), + }, { + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + }, { + Id: charm.MustParseURL("bundle/wordpress-simple-1"), + }}, +}, { + about: "any series and revision, last results", + id: "saucy/mediawiki", + querystring: "?any-series=1&any-revision=1", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/mediawiki-simple-48"), + }, { + Id: charm.MustParseURL("bundle/useless-0"), + }, { + Id: charm.MustParseURL("~bob/bundle/bobthebundle-2"), + }}, +}, { + about: "any series and revision with includes", + id: "saucy/wordpress-99", + querystring: "?any-series=1&any-revision=1&include=archive-size&include=bundle-metadata", + expectStatus: http.StatusOK, + expectBody: []*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/useless-0"), + Meta: map[string]interface{}{ + "archive-size": params.ArchiveSizeResponse{Size: fakeBlobSize}, + "bundle-metadata": metaBundlesContainingBundles["0 ~charmers/bundle/useless-0"].Data(), + }, + }, { + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + Meta: map[string]interface{}{ + "archive-size": params.ArchiveSizeResponse{Size: fakeBlobSize}, + "bundle-metadata": metaBundlesContainingBundles["1 ~charmers/bundle/wordpress-complex-1"].Data(), + }, + }, { + Id: charm.MustParseURL("bundle/wordpress-simple-1"), + Meta: map[string]interface{}{ + "archive-size": params.ArchiveSizeResponse{Size: fakeBlobSize}, + "bundle-metadata": metaBundlesContainingBundles["1 ~charmers/bundle/wordpress-simple-1"].Data(), + }, + }}, +}, { + about: "include-error", + id: "utopic/wordpress-42", + querystring: "?include=no-such", + expectStatus: http.StatusInternalServerError, + expectBody: params.Error{ + Message: `unrecognized metadata name "no-such"`, + }, +}} + +func (s *RelationsSuite) TestMetaBundlesContaining(c *gc.C) { + // Add the bundles used for testing to the database. + for id, b := range metaBundlesContainingBundles { + url := mustParseResolvedURL(id) + // The blob related info are not used in these tests. + // The charm-bundle relations are retrieved from the entities + // collection, without accessing the blob store. + err := s.store.AddBundle(b, charmstore.AddParams{ + URL: url, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + } + + for i, test := range metaBundlesContainingTests { + c.Logf("test %d: %s", i, test.about) + + // Expand the URL if required before adding the charm to the database, + // so that at least one matching charm can be resolved. + rurl := &router.ResolvedURL{ + URL: *charm.MustParseURL(test.id), + PromulgatedRevision: -1, + } + if rurl.URL.Series == "" { + rurl.URL.Series = "utopic" + } + if rurl.URL.Revision == -1 { + rurl.URL.Revision = 0 + } + if rurl.URL.User == "" { + rurl.URL.User = "charmers" + rurl.PromulgatedRevision = rurl.URL.Revision + } + // Add the charm we need bundle info on to the database. + err := s.store.AddCharm(&relationTestingCharm{}, charmstore.AddParams{ + URL: rurl, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + + // Perform the request and ensure the response is what we expect. + storeURL := storeURL(test.id + "/meta/bundles-containing" + test.querystring) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL, + ExpectStatus: test.expectStatus, + ExpectBody: sameMetaAnyResponses(test.expectBody), + }) + + // Clean up the charm entity in the store. + err = s.store.DB.Entities().Remove(bson.D{{"_id", &rurl.URL}}) + c.Assert(err, gc.IsNil) + } +} + +func (s *RelationsSuite) TestMetaBundlesContainingBundleACL(c *gc.C) { + // Add the bundles used for testing to the database. + for id, b := range metaBundlesContainingBundles { + url := mustParseResolvedURL(id) + // The blob related info are not used in these tests. + // The charm-bundle relations are retrieved from the entities + // collection, without accessing the blob store. + err := s.store.AddBundle(b, charmstore.AddParams{ + URL: url, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + if url.URL.Name == "useless" { + // The useless bundle is not available for "everyone". + err = s.store.SetPerms(&url.URL, "read", url.URL.User) + c.Assert(err, gc.IsNil) + continue + } + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + } + rurl := mustParseResolvedURL("42 ~charmers/utopic/wordpress-42") + // Add the charm we need bundle info on to the database. + err := s.store.AddCharm(&relationTestingCharm{}, charmstore.AddParams{ + URL: rurl, + BlobName: "blobName", + BlobHash: fakeBlobHash, + BlobSize: fakeBlobSize, + }) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + + // Perform the request and ensure that the useless bundle isn't listed. + storeURL := storeURL("utopic/wordpress-42/meta/bundles-containing") + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL, + ExpectBody: sameMetaAnyResponses([]*params.MetaAnyResponse{{ + Id: charm.MustParseURL("bundle/wordpress-complex-1"), + }, { + Id: charm.MustParseURL("bundle/wordpress-simple-0"), + }}), + }) +} + +// sameMetaAnyResponses returns a BodyAsserter that checks whether the meta/any response +// matches the expected one, even if the results appear in a different order. +func sameMetaAnyResponses(expect interface{}) httptesting.BodyAsserter { + return func(c *gc.C, m json.RawMessage) { + expectMeta, ok := expect.([]*params.MetaAnyResponse) + if !ok { + c.Assert(string(m), jc.JSONEquals, expect) + return + } + var got []*params.MetaAnyResponse + err := json.Unmarshal(m, &got) + c.Assert(err, gc.IsNil) + sort.Sort(metaAnyResponseById(got)) + sort.Sort(metaAnyResponseById(expectMeta)) + data, err := json.Marshal(got) + c.Assert(err, gc.IsNil) + c.Assert(string(data), jc.JSONEquals, expect) + } +} + +// relationTestingBundle returns a bundle for use in relation +// testing. The urls parameter holds a list of charm references +// to be included in the bundle. +// For each URL, a corresponding service is automatically created. +func relationTestingBundle(urls []string) charm.Bundle { + services := make(map[string]*charm.ServiceSpec, len(urls)) + for i, url := range urls { + service := &charm.ServiceSpec{ + Charm: url, + NumUnits: 1, + } + services[fmt.Sprintf("service-%d", i)] = service + } + return &testingBundle{ + data: &charm.BundleData{ + Services: services, + }, + } +} + +// testingBundle is a bundle implementation that +// returns bundle metadata held in the data field. +type testingBundle struct { + data *charm.BundleData +} + +func (b *testingBundle) Data() *charm.BundleData { + return b.data +} + +func (b *testingBundle) ReadMe() string { + // For the purposes of this implementation, the charm readme is not + // relevant. + return "" +} + +type metaAnyResponseById []*params.MetaAnyResponse + +func (s metaAnyResponseById) Len() int { return len(s) } +func (s metaAnyResponseById) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s metaAnyResponseById) Less(i, j int) bool { + return s[i].Id.String() < s[j].Id.String() +} + +// mustParseResolvedURL parses a resolved URL in string form, with +// the optional promulgated revision preceding the entity URL +// separated by a space. +func mustParseResolvedURL(urlStr string) *router.ResolvedURL { + s := strings.Fields(urlStr) + promRev := -1 + switch len(s) { + default: + panic(fmt.Errorf("invalid resolved URL string %q", urlStr)) + case 2: + var err error + promRev, err = strconv.Atoi(s[0]) + if err != nil || promRev < 0 { + panic(fmt.Errorf("invalid resolved URL string %q", urlStr)) + } + case 1: + } + url := charm.MustParseURL(s[len(s)-1]) + return &router.ResolvedURL{ + URL: *url.WithChannel(""), + PromulgatedRevision: promRev, + Development: url.Channel == charm.DevelopmentChannel, + } +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/search.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/search.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/search.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,172 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "net/http" + "strconv" + "sync/atomic" + + "github.com/juju/utils/parallel" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +const maxConcurrency = 20 + +// GET search[?text=text][&autocomplete=1][&filter=value…][&limit=limit][&include=meta][&skip=count][&sort=field[+dir]] +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-search +func (h *ReqHandler) serveSearch(_ http.Header, req *http.Request) (interface{}, error) { + sp, err := parseSearchParams(req) + if err != nil { + return "", err + } + auth, err := h.checkRequest(req, nil, opOther) + if err != nil { + logger.Infof("authorization failed on search request, granting no privileges: %v", err) + } + sp.Admin = auth.Admin + if auth.Username != "" { + sp.Groups = append(sp.Groups, auth.Username) + groups, err := h.groupsForUser(auth.Username) + if err != nil { + logger.Infof("cannot get groups for user %q, assuming no groups: %v", auth.Username, err) + } + sp.Groups = append(sp.Groups, groups...) + } + return h.doSearch(sp, req) +} + +// doSearch performs the search specified by SearchParams. If sp +// specifies that additional metadata needs to be added to the results, +// then it is added. +func (h *ReqHandler) doSearch(sp charmstore.SearchParams, req *http.Request) (interface{}, error) { + // perform query + results, err := h.Store.Search(sp) + if err != nil { + return nil, errgo.Notef(err, "error performing search") + } + return params.SearchResponse{ + SearchTime: results.SearchTime, + Total: results.Total, + Results: h.addMetaData(results.Results, sp.Include, req), + }, nil +} + +//addMetada adds the requested meta data with the include list. +func (h *ReqHandler) addMetaData(results []*router.ResolvedURL, include []string, req *http.Request) []params.EntityResult { + entities := make([]params.EntityResult, len(results)) + run := parallel.NewRun(maxConcurrency) + var missing int32 + for i, ref := range results { + i, ref := i, ref + run.Do(func() error { + meta, err := h.Router.GetMetadata(ref, include, req) + if err != nil { + // Unfortunately it is possible to get errors here due to + // internal inconsistency, so rather than throwing away + // all the search results, we just log the error and move on. + logger.Errorf("cannot retrieve metadata for %v: %v", ref, err) + atomic.AddInt32(&missing, 1) + return nil + } + entities[i] = params.EntityResult{ + Id: ref.PreferredURL(), + Meta: meta, + } + return nil + }) + } + // We never return an error from the Do function above, so no need to + // check the error here. + run.Wait() + if missing == 0 { + return entities + } + // We're missing some results - shuffle all the results down to + // fill the gaps. + j := 0 + for _, result := range entities { + if result.Id != nil { + entities[j] = result + j++ + } + } + + return entities[0:j] +} + +// GET search/interesting[?limit=limit][&include=meta] +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-searchinteresting +func (h *ReqHandler) serveSearchInteresting(w http.ResponseWriter, req *http.Request) { + router.WriteError(w, errNotImplemented) +} + +// parseSearchParms extracts the search paramaters from the request +func parseSearchParams(req *http.Request) (charmstore.SearchParams, error) { + sp := charmstore.SearchParams{} + var err error + for k, v := range req.Form { + switch k { + case "text": + sp.Text = v[0] + case "autocomplete": + sp.AutoComplete, err = router.ParseBool(v[0]) + if err != nil { + return charmstore.SearchParams{}, badRequestf(err, "invalid autocomplete parameter") + } + case "limit": + sp.Limit, err = strconv.Atoi(v[0]) + if err != nil { + return charmstore.SearchParams{}, badRequestf(err, "invalid limit parameter: could not parse integer") + } + if sp.Limit < 1 { + return charmstore.SearchParams{}, badRequestf(nil, "invalid limit parameter: expected integer greater than zero") + } + case "include": + for _, s := range v { + if s != "" { + sp.Include = append(sp.Include, s) + } + } + case "description", "name", "owner", "provides", "requires", "series", "summary", "tags", "type": + if sp.Filters == nil { + sp.Filters = make(map[string][]string) + } + sp.Filters[k] = v + case "promulgated": + promulgated, err := router.ParseBool(v[0]) + if err != nil { + return charmstore.SearchParams{}, badRequestf(err, "invalid promulgated filter parameter") + } + if sp.Filters == nil { + sp.Filters = make(map[string][]string) + } + if promulgated { + sp.Filters[k] = []string{"1"} + } else { + sp.Filters[k] = []string{"0"} + } + case "skip": + sp.Skip, err = strconv.Atoi(v[0]) + if err != nil { + return charmstore.SearchParams{}, badRequestf(err, "invalid skip parameter: could not parse integer") + } + if sp.Skip < 0 { + return charmstore.SearchParams{}, badRequestf(nil, "invalid skip parameter: expected non-negative integer") + } + case "sort": + err = sp.ParseSortFields(v...) + if err != nil { + return charmstore.SearchParams{}, badRequestf(err, "invalid sort field") + } + default: + return charmstore.SearchParams{}, badRequestf(nil, "invalid parameter: %s", k) + } + } + return sp, nil +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/search_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/search_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/search_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,908 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "bytes" + "encoding/json" + "net/http" + "net/url" + "sort" + "strings" + + "github.com/juju/loggo" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon.v1" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/v5" +) + +type SearchSuite struct { + commonSuite +} + +var _ = gc.Suite(&SearchSuite{}) + +var exportTestCharms = map[string]*router.ResolvedURL{ + "wordpress": newResolvedURL("cs:~charmers/precise/wordpress-23", 23), + "mysql": newResolvedURL("cs:~openstack-charmers/trusty/mysql-7", 7), + "varnish": newResolvedURL("cs:~foo/trusty/varnish-1", -1), + "riak": newResolvedURL("cs:~charmers/trusty/riak-67", 67), +} + +var exportTestBundles = map[string]*router.ResolvedURL{ + "wordpress-simple": newResolvedURL("cs:~charmers/bundle/wordpress-simple-4", 4), +} + +func (s *SearchSuite) SetUpSuite(c *gc.C) { + s.enableES = true + s.enableIdentity = true + s.commonSuite.SetUpSuite(c) +} + +func (s *SearchSuite) SetUpTest(c *gc.C) { + s.commonSuite.SetUpTest(c) + s.addCharmsToStore(c) + // hide the riak charm + err := s.store.DB.BaseEntities().UpdateId( + charm.MustParseURL("cs:~charmers/riak"), + bson.D{{"$set", map[string]mongodoc.ACL{ + "acls": { + Read: []string{"charmers", "test-user"}, + }, + }}}, + ) + c.Assert(err, gc.IsNil) + err = s.store.UpdateSearch(newResolvedURL("~charmers/trusty/riak-0", 0)) + c.Assert(err, gc.IsNil) + err = s.esSuite.ES.RefreshIndex(s.esSuite.TestIndex) + c.Assert(err, gc.IsNil) +} + +func (s *SearchSuite) addCharmsToStore(c *gc.C) { + for name, id := range exportTestCharms { + err := s.store.AddCharmWithArchive(id, getCharm(name)) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + err = s.store.UpdateSearch(id) + c.Assert(err, gc.IsNil) + } + for name, id := range exportTestBundles { + err := s.store.AddBundleWithArchive(id, getBundle(name)) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&id.URL, "read", params.Everyone, id.URL.User) + c.Assert(err, gc.IsNil) + err = s.store.UpdateSearch(id) + c.Assert(err, gc.IsNil) + } +} + +func getCharm(name string) *charm.CharmDir { + ca := storetesting.Charms.CharmDir(name) + ca.Meta().Categories = append(strings.Split(name, "-"), "bar") + return ca +} + +func getBundle(name string) *charm.BundleDir { + ba := storetesting.Charms.BundleDir(name) + ba.Data().Tags = append(strings.Split(name, "-"), "baz") + return ba +} + +func (s *SearchSuite) TestParseSearchParams(c *gc.C) { + tests := []struct { + about string + query string + expectParams charmstore.SearchParams + expectError string + }{{ + about: "bare search", + query: "", + }, { + about: "text search", + query: "text=test", + expectParams: charmstore.SearchParams{ + Text: "test", + }, + }, { + about: "autocomplete", + query: "autocomplete=1", + expectParams: charmstore.SearchParams{ + AutoComplete: true, + }, + }, { + about: "invalid autocomplete", + query: "autocomplete=true", + expectError: `invalid autocomplete parameter: unexpected bool value "true" (must be "0" or "1")`, + }, { + about: "limit", + query: "limit=20", + expectParams: charmstore.SearchParams{ + Limit: 20, + }, + }, { + about: "invalid limit", + query: "limit=twenty", + expectError: `invalid limit parameter: could not parse integer: strconv.ParseInt: parsing "twenty": invalid syntax`, + }, { + about: "limit too low", + query: "limit=-1", + expectError: "invalid limit parameter: expected integer greater than zero", + }, { + about: "include", + query: "include=archive-size", + expectParams: charmstore.SearchParams{ + Include: []string{"archive-size"}, + }, + }, { + about: "include many", + query: "include=archive-size&include=bundle-data", + expectParams: charmstore.SearchParams{ + Include: []string{"archive-size", "bundle-data"}, + }, + }, { + about: "include many with blanks", + query: "include=archive-size&include=&include=bundle-data", + expectParams: charmstore.SearchParams{ + Include: []string{"archive-size", "bundle-data"}, + }, + }, { + about: "description filter", + query: "description=text", + expectParams: charmstore.SearchParams{ + Filters: map[string][]string{ + "description": {"text"}, + }, + }, + }, { + about: "name filter", + query: "name=text", + expectParams: charmstore.SearchParams{ + Filters: map[string][]string{ + "name": {"text"}, + }, + }, + }, { + about: "owner filter", + query: "owner=text", + expectParams: charmstore.SearchParams{ + Filters: map[string][]string{ + "owner": {"text"}, + }, + }, + }, { + about: "provides filter", + query: "provides=text", + expectParams: charmstore.SearchParams{ + Filters: map[string][]string{ + "provides": {"text"}, + }, + }, + }, { + about: "requires filter", + query: "requires=text", + expectParams: charmstore.SearchParams{ + Filters: map[string][]string{ + "requires": {"text"}, + }, + }, + }, { + about: "series filter", + query: "series=text", + expectParams: charmstore.SearchParams{ + Filters: map[string][]string{ + "series": {"text"}, + }, + }, + }, { + about: "tags filter", + query: "tags=text", + expectParams: charmstore.SearchParams{ + Filters: map[string][]string{ + "tags": {"text"}, + }, + }, + }, { + about: "type filter", + query: "type=text", + expectParams: charmstore.SearchParams{ + Filters: map[string][]string{ + "type": {"text"}, + }, + }, + }, { + about: "many filters", + query: "name=name&owner=owner&series=series1&series=series2", + expectParams: charmstore.SearchParams{ + Filters: map[string][]string{ + "name": {"name"}, + "owner": {"owner"}, + "series": {"series1", "series2"}, + }, + }, + }, { + about: "bad parameter", + query: "a=b", + expectError: "invalid parameter: a", + }, { + about: "skip", + query: "skip=20", + expectParams: charmstore.SearchParams{ + Skip: 20, + }, + }, { + about: "invalid skip", + query: "skip=twenty", + expectError: `invalid skip parameter: could not parse integer: strconv.ParseInt: parsing "twenty": invalid syntax`, + }, { + about: "skip too low", + query: "skip=-1", + expectError: "invalid skip parameter: expected non-negative integer", + }, { + about: "promulgated filter", + query: "promulgated=1", + expectParams: charmstore.SearchParams{ + Filters: map[string][]string{ + "promulgated": {"1"}, + }, + }, + }, { + about: "promulgated filter - bad", + query: "promulgated=bad", + expectError: `invalid promulgated filter parameter: unexpected bool value "bad" (must be "0" or "1")`, + }} + for i, test := range tests { + c.Logf("test %d. %s", i, test.about) + var req http.Request + var err error + req.Form, err = url.ParseQuery(test.query) + c.Assert(err, gc.IsNil) + sp, err := v5.ParseSearchParams(&req) + if test.expectError != "" { + c.Assert(err, gc.Not(gc.IsNil)) + c.Assert(err.Error(), gc.Equals, test.expectError) + } else { + c.Assert(err, gc.IsNil) + } + c.Assert(sp, jc.DeepEquals, test.expectParams) + } +} + +func (s *SearchSuite) TestSuccessfulSearches(c *gc.C) { + tests := []struct { + about string + query string + results []*router.ResolvedURL + }{{ + about: "bare search", + query: "", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "text search", + query: "text=wordpress", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "autocomplete search", + query: "text=word&autocomplete=1", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "blank text search", + query: "text=", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "description filter search", + query: "description=database", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + }, + }, { + about: "name filter search", + query: "name=mysql", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + }, + }, { + about: "owner filter search", + query: "owner=foo", + results: []*router.ResolvedURL{ + exportTestCharms["varnish"], + }, + }, { + about: "provides filter search", + query: "provides=mysql", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + }, + }, { + about: "requires filter search", + query: "requires=mysql", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + }, + }, { + about: "series filter search", + query: "series=trusty", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + }, + }, { + about: "summary filter search", + query: "summary=database", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + }, + }, { + about: "tags filter search", + query: "tags=wordpress", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "type filter search", + query: "type=bundle", + results: []*router.ResolvedURL{ + exportTestBundles["wordpress-simple"], + }, + }, { + about: "multiple type filter search", + query: "type=bundle&type=charm", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "provides multiple interfaces filter search", + query: "provides=monitoring+http", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + }, + }, { + about: "requires multiple interfaces filter search", + query: "requires=mysql+varnish", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + }, + }, { + about: "multiple tags filter search", + query: "tags=mysql+bar", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + }, + }, { + about: "blank owner", + query: "owner=", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "paginated search", + query: "name=mysql&skip=1", + }, { + about: "promulgated", + query: "promulgated=1", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "not promulgated", + query: "promulgated=0", + results: []*router.ResolvedURL{ + exportTestCharms["varnish"], + }, + }, { + about: "promulgated with owner", + query: "promulgated=1&owner=openstack-charmers", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + }, + }} + for i, test := range tests { + c.Logf("test %d. %s", i, test.about) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?" + test.query), + }) + var sr params.SearchResponse + err := json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + c.Assert(sr.Results, gc.HasLen, len(test.results)) + c.Logf("results: %s", rec.Body.Bytes()) + assertResultSet(c, sr, test.results) + } +} + +func (s *SearchSuite) TestPaginatedSearch(c *gc.C) { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?text=wordpress&skip=1"), + }) + var sr params.SearchResponse + err := json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + c.Assert(sr.Results, gc.HasLen, 1) + c.Assert(sr.Total, gc.Equals, 2) +} + +func (s *SearchSuite) TestMetadataFields(c *gc.C) { + tests := []struct { + about string + query string + meta map[string]interface{} + }{{ + about: "archive-size", + query: "name=mysql&include=archive-size", + meta: map[string]interface{}{ + "archive-size": params.ArchiveSizeResponse{438}, + }, + }, { + about: "bundle-metadata", + query: "name=wordpress-simple&type=bundle&include=bundle-metadata", + meta: map[string]interface{}{ + "bundle-metadata": getBundle("wordpress-simple").Data(), + }, + }, { + about: "bundle-machine-count", + query: "name=wordpress-simple&type=bundle&include=bundle-machine-count", + meta: map[string]interface{}{ + "bundle-machine-count": params.BundleCount{2}, + }, + }, { + about: "bundle-unit-count", + query: "name=wordpress-simple&type=bundle&include=bundle-unit-count", + meta: map[string]interface{}{ + "bundle-unit-count": params.BundleCount{2}, + }, + }, { + about: "charm-actions", + query: "name=wordpress&type=charm&include=charm-actions", + meta: map[string]interface{}{ + "charm-actions": getCharm("wordpress").Actions(), + }, + }, { + about: "charm-config", + query: "name=wordpress&type=charm&include=charm-config", + meta: map[string]interface{}{ + "charm-config": getCharm("wordpress").Config(), + }, + }, { + about: "charm-related", + query: "name=wordpress&type=charm&include=charm-related", + meta: map[string]interface{}{ + "charm-related": params.RelatedResponse{ + Provides: map[string][]params.MetaAnyResponse{ + "mysql": { + { + Id: exportTestCharms["mysql"].PreferredURL(), + }, + }, + "varnish": { + { + Id: exportTestCharms["varnish"].PreferredURL(), + }, + }, + }, + }, + }, + }, { + about: "multiple values", + query: "name=wordpress&type=charm&include=charm-related&include=charm-config", + meta: map[string]interface{}{ + "charm-related": params.RelatedResponse{ + Provides: map[string][]params.MetaAnyResponse{ + "mysql": { + { + Id: exportTestCharms["mysql"].PreferredURL(), + }, + }, + "varnish": { + { + Id: exportTestCharms["varnish"].PreferredURL(), + }, + }, + }, + }, + "charm-config": getCharm("wordpress").Config(), + }, + }} + for i, test := range tests { + c.Logf("test %d. %s", i, test.about) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?" + test.query), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + var sr struct { + Results []struct { + Meta json.RawMessage + } + } + err := json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + c.Assert(sr.Results, gc.HasLen, 1) + c.Assert(string(sr.Results[0].Meta), jc.JSONEquals, test.meta) + } +} + +func (s *SearchSuite) TestSearchError(c *gc.C) { + err := s.esSuite.ES.DeleteIndex(s.esSuite.TestIndex) + c.Assert(err, gc.Equals, nil) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?name=wordpress"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusInternalServerError) + var resp params.Error + err = json.Unmarshal(rec.Body.Bytes(), &resp) + c.Assert(err, gc.IsNil) + c.Assert(resp.Code, gc.Equals, params.ErrorCode("")) + c.Assert(resp.Message, gc.Matches, "error performing search: search failed: .*") +} + +func (s *SearchSuite) TestSearchIncludeError(c *gc.C) { + // Perform a search for all charms, including the + // manifest, which will try to retrieve all charm + // blobs. + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?type=charm&include=manifest"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + var resp params.SearchResponse + err := json.Unmarshal(rec.Body.Bytes(), &resp) + // cs:riak will not be found because it is not visible to "everyone". + c.Assert(resp.Results, gc.HasLen, len(exportTestCharms)-1) + + // Now remove one of the blobs. The search should still + // work, but only return a single result. + blobName, _, err := s.store.BlobNameAndHash(newResolvedURL("~charmers/precise/wordpress-23", 23)) + c.Assert(err, gc.IsNil) + err = s.store.BlobStore.Remove(blobName) + c.Assert(err, gc.IsNil) + + // Now search again - we should get one result less + // (and the error will be logged). + + // Register a logger that so that we can check the logging output. + // It will be automatically removed later because IsolatedMgoESSuite + // uses LoggingSuite. + var tw loggo.TestWriter + err = loggo.RegisterWriter("test-log", &tw, loggo.DEBUG) + c.Assert(err, gc.IsNil) + + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?type=charm&include=manifest"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + resp = params.SearchResponse{} + err = json.Unmarshal(rec.Body.Bytes(), &resp) + // cs:riak will not be found because it is not visible to "everyone". + // cs:wordpress will not be found because it has no manifest. + c.Assert(resp.Results, gc.HasLen, len(exportTestCharms)-2) + + c.Assert(tw.Log(), jc.LogMatches, []string{"cannot retrieve metadata for cs:precise/wordpress-23: cannot open archive data for cs:precise/wordpress-23: .*"}) +} + +func (s *SearchSuite) TestSorting(c *gc.C) { + tests := []struct { + about string + query string + results []*router.ResolvedURL + }{{ + about: "name ascending", + query: "sort=name", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "name descending", + query: "sort=-name", + results: []*router.ResolvedURL{ + exportTestBundles["wordpress-simple"], + exportTestCharms["wordpress"], + exportTestCharms["varnish"], + exportTestCharms["mysql"], + }, + }, { + about: "series ascending", + query: "sort=series,name", + results: []*router.ResolvedURL{ + exportTestBundles["wordpress-simple"], + exportTestCharms["wordpress"], + exportTestCharms["mysql"], + exportTestCharms["varnish"], + }, + }, { + about: "series descending", + query: "sort=-series&sort=name", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }, { + about: "owner ascending", + query: "sort=owner,name", + results: []*router.ResolvedURL{ + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + exportTestCharms["varnish"], + exportTestCharms["mysql"], + }, + }, { + about: "owner descending", + query: "sort=-owner&sort=name", + results: []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["varnish"], + exportTestCharms["wordpress"], + exportTestBundles["wordpress-simple"], + }, + }} + for i, test := range tests { + c.Logf("test %d. %s", i, test.about) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?" + test.query), + }) + var sr params.SearchResponse + err := json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + // Not using assertResultSet(c, sr, test.results) as it does sort internally + c.Assert(sr.Results, gc.HasLen, len(test.results), gc.Commentf("expected %#v", test.results)) + c.Logf("results: %s", rec.Body.Bytes()) + for i := range test.results { + c.Assert(sr.Results[i].Id.String(), gc.Equals, test.results[i].PreferredURL().String(), gc.Commentf("element %d")) + } + } +} + +func (s *SearchSuite) TestSortUnsupportedField(c *gc.C) { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?sort=foo"), + }) + var e params.Error + err := json.Unmarshal(rec.Body.Bytes(), &e) + c.Assert(err, gc.IsNil) + c.Assert(e.Code, gc.Equals, params.ErrBadRequest) + c.Assert(e.Message, gc.Equals, "invalid sort field: unrecognized sort parameter \"foo\"") +} + +func (s *SearchSuite) TestDownloadsBoost(c *gc.C) { + // TODO (frankban): remove this call when removing the legacy counts logic. + patchLegacyDownloadCountsEnabled(s.AddCleanup, false) + charmDownloads := map[string]int{ + "mysql": 0, + "wordpress": 1, + "varnish": 8, + } + for n, cnt := range charmDownloads { + url := newResolvedURL("cs:~downloads-test/trusty/x-1", -1) + url.URL.Name = n + err := s.store.AddCharmWithArchive(url, getCharm(n)) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&url.URL, "read", params.Everyone, url.URL.User) + c.Assert(err, gc.IsNil) + err = s.store.UpdateSearch(url) + c.Assert(err, gc.IsNil) + for i := 0; i < cnt; i++ { + err := s.store.IncrementDownloadCounts(url) + c.Assert(err, gc.IsNil) + } + } + err := s.esSuite.ES.RefreshIndex(s.esSuite.TestIndex) + c.Assert(err, gc.IsNil) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search?owner=downloads-test"), + }) + var sr params.SearchResponse + err = json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + c.Assert(sr.Results, gc.HasLen, 3) + c.Assert(sr.Results[0].Id.Name, gc.Equals, "varnish") + c.Assert(sr.Results[1].Id.Name, gc.Equals, "wordpress") + c.Assert(sr.Results[2].Id.Name, gc.Equals, "mysql") +} + +// TODO(mhilton) remove this test when removing legacy counts logic. +func (s *SearchSuite) TestLegacyStatsUpdatesSearch(c *gc.C) { + patchLegacyDownloadCountsEnabled(s.AddCleanup, true) + doc, err := s.store.ES.GetSearchDocument(charm.MustParseURL("~openstack-charmers/trusty/mysql-7")) + c.Assert(err, gc.IsNil) + c.Assert(doc.TotalDownloads, gc.Equals, int64(0)) + s.assertPut(c, "~openstack-charmers/trusty/mysql-7/meta/extra-info/"+params.LegacyDownloadStats, 57) + doc, err = s.store.ES.GetSearchDocument(charm.MustParseURL("~openstack-charmers/trusty/mysql-7")) + c.Assert(err, gc.IsNil) + c.Assert(doc.TotalDownloads, gc.Equals, int64(57)) +} + +func (s *SearchSuite) assertPut(c *gc.C, url string, val interface{}) { + body, err := json.Marshal(val) + c.Assert(err, gc.IsNil) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(url), + Method: "PUT", + Header: http.Header{ + "Content-Type": {"application/json"}, + }, + Username: testUsername, + Password: testPassword, + Body: bytes.NewReader(body), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("headers: %v, body: %s", rec.HeaderMap, rec.Body.String())) + c.Assert(rec.Body.String(), gc.HasLen, 0) +} + +func (s *SearchSuite) TestSearchWithAdminCredentials(c *gc.C) { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search"), + Username: testUsername, + Password: testPassword, + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + expected := []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["wordpress"], + exportTestCharms["riak"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + } + var sr params.SearchResponse + err := json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + assertResultSet(c, sr, expected) +} + +func (s *SearchSuite) TestSearchWithUserMacaroon(c *gc.C) { + m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ + checkers.DeclaredCaveat("username", "test-user"), + }) + c.Assert(err, gc.IsNil) + macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) + c.Assert(err, gc.IsNil) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search"), + Cookies: []*http.Cookie{macaroonCookie}, + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + expected := []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["wordpress"], + exportTestCharms["riak"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + } + var sr params.SearchResponse + err = json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + assertResultSet(c, sr, expected) +} + +func (s *SearchSuite) TestSearchWithUserInGroups(c *gc.C) { + m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ + checkers.DeclaredCaveat(v5.UsernameAttr, "bob"), + }) + c.Assert(err, gc.IsNil) + macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) + c.Assert(err, gc.IsNil) + s.idM.groups = map[string][]string{ + "bob": {"test-user", "test-user2"}, + } + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search"), + Cookies: []*http.Cookie{macaroonCookie}, + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + expected := []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["wordpress"], + exportTestCharms["riak"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + } + var sr params.SearchResponse + err = json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + assertResultSet(c, sr, expected) +} + +func (s *SearchSuite) TestSearchWithBadAdminCredentialsAndACookie(c *gc.C) { + m, err := s.store.Bakery.NewMacaroon("", nil, []checkers.Caveat{ + checkers.DeclaredCaveat("username", "test-user"), + }) + c.Assert(err, gc.IsNil) + macaroonCookie, err := httpbakery.NewCookie(macaroon.Slice{m}) + c.Assert(err, gc.IsNil) + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("search"), + Cookies: []*http.Cookie{macaroonCookie}, + Username: testUsername, + Password: "bad-password", + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + expected := []*router.ResolvedURL{ + exportTestCharms["mysql"], + exportTestCharms["wordpress"], + exportTestCharms["varnish"], + exportTestBundles["wordpress-simple"], + } + var sr params.SearchResponse + err = json.Unmarshal(rec.Body.Bytes(), &sr) + c.Assert(err, gc.IsNil) + assertResultSet(c, sr, expected) +} + +func assertResultSet(c *gc.C, sr params.SearchResponse, expected []*router.ResolvedURL) { + sort.Sort(searchResultById(sr.Results)) + sort.Sort(resolvedURLByPreferredURL(expected)) + c.Assert(sr.Results, gc.HasLen, len(expected), gc.Commentf("expected %#v", expected)) + for i := range expected { + c.Assert(sr.Results[i].Id.String(), gc.Equals, expected[i].PreferredURL().String(), gc.Commentf("element %d")) + } +} + +type searchResultById []params.EntityResult + +func (s searchResultById) Len() int { return len(s) } +func (s searchResultById) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s searchResultById) Less(i, j int) bool { + return s[i].Id.String() < s[j].Id.String() +} + +type resolvedURLByPreferredURL []*router.ResolvedURL + +func (s resolvedURLByPreferredURL) Len() int { return len(s) } +func (s resolvedURLByPreferredURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s resolvedURLByPreferredURL) Less(i, j int) bool { + return s[i].PreferredURL().String() < s[j].PreferredURL().String() +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/stats.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/stats.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/stats.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,169 @@ +// Copyright 2012 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "encoding/json" + "net/http" + "net/url" + "strings" + "time" + + "gopkg.in/errgo.v1" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" +) + +const dateFormat = "2006-01-02" + +// parseDateRange parses a date range as specified in an http +// request. The returned times will be zero if not specified. +func parseDateRange(form url.Values) (start, stop time.Time, err error) { + if v := form.Get("start"); v != "" { + var err error + start, err = time.Parse(dateFormat, v) + if err != nil { + return time.Time{}, time.Time{}, badRequestf(err, "invalid 'start' value %q", v) + } + } + if v := form.Get("stop"); v != "" { + var err error + stop, err = time.Parse(dateFormat, v) + if err != nil { + return time.Time{}, time.Time{}, badRequestf(err, "invalid 'stop' value %q", v) + } + // Cover all timestamps within the stop day. + stop = stop.Add(24*time.Hour - 1*time.Second) + } + return +} + +// GET stats/counter/key[:key]...?[by=unit]&start=date][&stop=date][&list=1] +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-statscounter +func (h *ReqHandler) serveStatsCounter(_ http.Header, r *http.Request) (interface{}, error) { + base := strings.TrimPrefix(r.URL.Path, "/") + if strings.Index(base, "/") > 0 { + return nil, errgo.WithCausef(nil, params.ErrNotFound, "invalid key") + } + if base == "" { + return nil, params.ErrForbidden + } + var by charmstore.CounterRequestBy + switch v := r.Form.Get("by"); v { + case "": + by = charmstore.ByAll + case "day": + by = charmstore.ByDay + case "week": + by = charmstore.ByWeek + default: + return nil, badRequestf(nil, "invalid 'by' value %q", v) + } + req := charmstore.CounterRequest{ + Key: strings.Split(base, ":"), + List: r.Form.Get("list") == "1", + By: by, + } + var err error + req.Start, req.Stop, err = parseDateRange(r.Form) + if err != nil { + return nil, errgo.Mask(err, errgo.Is(params.ErrBadRequest)) + } + if req.Key[len(req.Key)-1] == "*" { + req.Prefix = true + req.Key = req.Key[:len(req.Key)-1] + if len(req.Key) == 0 { + return nil, errgo.WithCausef(nil, params.ErrForbidden, "unknown key") + } + } + entries, err := h.Store.Counters(&req) + if err != nil { + return nil, errgo.Notef(err, "cannot query counters") + } + + var buf []byte + var items []params.Statistic + for i := range entries { + entry := &entries[i] + buf = buf[:0] + if req.List { + for j := range entry.Key { + buf = append(buf, entry.Key[j]...) + buf = append(buf, ':') + } + if entry.Prefix { + buf = append(buf, '*') + } else { + buf = buf[:len(buf)-1] + } + } + stat := params.Statistic{ + Key: string(buf), + Count: entry.Count, + } + if !entry.Time.IsZero() { + stat.Date = entry.Time.Format("2006-01-02") + } + items = append(items, stat) + } + + return items, nil +} + +// PUT stats/update +// https://github.com/juju/charmstore/blob/v4/docs/API.md#put-statsupdate +func (h *ReqHandler) serveStatsUpdate(w http.ResponseWriter, r *http.Request) error { + if _, err := h.authorize(r, nil, true, nil); err != nil { + return err + } + if r.Method != "PUT" { + return errgo.WithCausef(nil, params.ErrMethodNotAllowed, "%s not allowed", r.Method) + } + + var req params.StatsUpdateRequest + if ct := r.Header.Get("Content-Type"); ct != "application/json" { + return errgo.WithCausef(nil, params.ErrBadRequest, "unexpected Content-Type %q; expected %q", ct, "application/json") + } + + dec := json.NewDecoder(r.Body) + if err := dec.Decode(&req); err != nil { + return errgo.Notef(err, "cannot unmarshal body") + } + + errors := make([]error, 0) + for _, entry := range req.Entries { + rid, err := h.Router.Context.ResolveURL(entry.CharmReference) + if err != nil { + errors = append(errors, errgo.Notef(err, "cannot find entity for url %s", entry.CharmReference)) + continue + } + + logger.Infof("Increase download stats for id: %s at time: %s", rid, entry.Timestamp) + + if err := h.Store.IncrementDownloadCountsAtTime(rid, entry.Timestamp); err != nil { + errors = append(errors, err) + continue + } + } + + if len(errors) != 0 { + logger.Infof("Errors detected during /stats/update processing: %v", errors) + if len(errors) > 1 { + return errgo.Newf("%s (and %d more errors)", errors[0], len(errors)-1) + } + return errors[0] + } + + return nil +} + +// StatsEnabled reports whether statistics should be gathered for +// the given HTTP request. +func StatsEnabled(req *http.Request) bool { + // It's fine to parse the form more than once, and it avoids + // bugs from not parsing it. + req.ParseForm() + return req.Form.Get("stats") != "0" +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/stats_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/stats_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/stats_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,654 @@ +// Copyright 2012 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "encoding/json" + "net/http" + "net/url" + "strings" + "time" + + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" + "gopkg.in/juju/charmstore.v5-unstable/internal/v5" +) + +type StatsSuite struct { + commonSuite +} + +var _ = gc.Suite(&StatsSuite{}) + +func (s *StatsSuite) TestServerStatsStatus(c *gc.C) { + tests := []struct { + path string + status int + message string + code params.ErrorCode + }{{ + path: "stats/counter/", + status: http.StatusForbidden, + message: "forbidden", + code: params.ErrForbidden, + }, { + path: "stats/counter/*", + status: http.StatusForbidden, + message: "unknown key", + code: params.ErrForbidden, + }, { + path: "stats/counter/any/", + status: http.StatusNotFound, + message: "invalid key", + code: params.ErrNotFound, + }, { + path: "stats/", + status: http.StatusNotFound, + message: "not found", + code: params.ErrNotFound, + }, { + path: "stats/any", + status: http.StatusNotFound, + message: "not found", + code: params.ErrNotFound, + }, { + path: "stats/counter/any?by=fortnight", + status: http.StatusBadRequest, + message: `invalid 'by' value "fortnight"`, + code: params.ErrBadRequest, + }, { + path: "stats/counter/any?start=tomorrow", + status: http.StatusBadRequest, + message: `invalid 'start' value "tomorrow": parsing time "tomorrow" as "2006-01-02": cannot parse "tomorrow" as "2006"`, + code: params.ErrBadRequest, + }, { + path: "stats/counter/any?stop=3", + status: http.StatusBadRequest, + message: `invalid 'stop' value "3": parsing time "3" as "2006-01-02": cannot parse "3" as "2006"`, + code: params.ErrBadRequest, + }} + for i, test := range tests { + c.Logf("test %d. %s", i, test.path) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.path), + ExpectStatus: test.status, + ExpectBody: params.Error{ + Message: test.message, + Code: test.code, + }, + }) + } +} + +func (s *StatsSuite) TestServerStatsUpdate(c *gc.C) { + ref := charm.MustParseURL("~charmers/precise/wordpress-23") + tests := []struct { + path string + status int + body params.StatsUpdateRequest + expectBody map[string]interface{} + previousMonth bool + }{{ + path: "stats/update", + status: http.StatusOK, + body: params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + Timestamp: time.Now(), + CharmReference: charm.MustParseURL("~charmers/wordpress"), + }}}, + }, { + path: "stats/update", + status: http.StatusOK, + body: params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + Timestamp: time.Now(), + CharmReference: ref, + }}, + }, + }, { + path: "stats/update", + status: http.StatusOK, + body: params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + Timestamp: time.Now().AddDate(0, -1, 0), + CharmReference: ref, + }}, + }, + previousMonth: true, + }} + + ch := storetesting.Charms.CharmDir("wordpress") + rurl := newResolvedURL("~charmers/precise/wordpress-23", 23) + err := s.store.AddCharmWithArchive(rurl, ch) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + + var countsBefore, countsAfter charmstore.AggregatedCounts + for i, test := range tests { + c.Logf("test %d. %s", i, test.path) + + _, countsBefore, err = s.store.ArchiveDownloadCounts(ref, true) + c.Assert(err, gc.IsNil) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(test.path), + Method: "PUT", + Username: testUsername, + Password: testPassword, + JSONBody: test.body, + }) + + c.Assert(rec.Code, gc.Equals, test.status) + + _, countsAfter, err = s.store.ArchiveDownloadCounts(ref, true) + c.Assert(err, gc.IsNil) + c.Assert(countsAfter.Total-countsBefore.Total, gc.Equals, int64(1)) + if test.previousMonth { + c.Assert(countsAfter.LastDay-countsBefore.LastDay, gc.Equals, int64(0)) + } else { + c.Assert(countsAfter.LastDay-countsBefore.LastDay, gc.Equals, int64(1)) + } + } +} + +func (s *StatsSuite) TestServerStatsArchiveDownloadOnPromulgatedEntity(c *gc.C) { + ref := charm.MustParseURL("~charmers/precise/wordpress-23") + path := "/stats/counter/archive-download:*" + + ch := storetesting.Charms.CharmDir("wordpress") + rurl := newResolvedURL("~charmers/precise/wordpress-23", 23) + err := s.store.AddCharmWithArchive(rurl, ch) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + s.store.SetPromulgated(rurl, true) + + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(path), + Method: "GET", + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.Equals, `[{"Count":0}]`) + + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("stats/update"), + Method: "PUT", + Username: testUsername, + Password: testPassword, + JSONBody: params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + Timestamp: time.Now(), + CharmReference: ref, + }}}, + }) + + c.Assert(rec.Code, gc.Equals, http.StatusOK) + + rec = httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL(path), + Method: "GET", + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK) + c.Assert(rec.Body.String(), gc.Equals, `[{"Count":1}]`) +} + +func (s *StatsSuite) TestServerStatsUpdateErrors(c *gc.C) { + ref := charm.MustParseURL("~charmers/precise/wordpress-23") + tests := []struct { + path string + status int + body params.StatsUpdateRequest + expectMessage string + expectCode params.ErrorCode + partialUpdate bool + }{{ + path: "stats/update", + status: http.StatusInternalServerError, + body: params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + Timestamp: time.Now(), + CharmReference: charm.MustParseURL("~charmers/precise/unknown-23"), + }}, + }, + expectMessage: `cannot find entity for url cs:~charmers/precise/unknown-23: no matching charm or bundle for "cs:~charmers/precise/unknown-23"`, + }, { + path: "stats/update", + status: http.StatusInternalServerError, + body: params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + Timestamp: time.Now(), + CharmReference: charm.MustParseURL("~charmers/precise/unknown-23"), + }, { + Timestamp: time.Now(), + CharmReference: charm.MustParseURL("~charmers/precise/wordpress-23"), + }}, + }, + expectMessage: `cannot find entity for url cs:~charmers/precise/unknown-23: no matching charm or bundle for "cs:~charmers/precise/unknown-23"`, + partialUpdate: true, + }} + + ch := storetesting.Charms.CharmDir("wordpress") + rurl := newResolvedURL("~charmers/precise/wordpress-23", 23) + err := s.store.AddCharmWithArchive(rurl, ch) + c.Assert(err, gc.IsNil) + err = s.store.SetPerms(&rurl.URL, "read", params.Everyone, rurl.URL.User) + c.Assert(err, gc.IsNil) + + for i, test := range tests { + c.Logf("test %d. %s", i, test.path) + var countsBefore, countsAfter charmstore.AggregatedCounts + if test.partialUpdate { + _, countsBefore, err = s.store.ArchiveDownloadCounts(ref, true) + c.Assert(err, gc.IsNil) + } + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL(test.path), + Method: "PUT", + Username: testUsername, + Password: testPassword, + JSONBody: test.body, + ExpectStatus: test.status, + ExpectBody: params.Error{ + Message: test.expectMessage, + Code: test.expectCode, + }, + }) + if test.partialUpdate { + _, countsAfter, err = s.store.ArchiveDownloadCounts(ref, true) + c.Assert(err, gc.IsNil) + c.Assert(countsAfter.Total-countsBefore.Total, gc.Equals, int64(1)) + c.Assert(countsAfter.LastDay-countsBefore.LastDay, gc.Equals, int64(1)) + } + } +} + +func (s *StatsSuite) TestServerStatsUpdateNonAdmin(c *gc.C) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("stats/update"), + Method: "PUT", + JSONBody: params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + Timestamp: time.Now(), + CharmReference: charm.MustParseURL("~charmers/precise/wordpress-23"), + }}, + }, + ExpectStatus: http.StatusUnauthorized, + ExpectBody: ¶ms.Error{ + Message: "authentication failed: missing HTTP auth header", + Code: params.ErrUnauthorized, + }, + }) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: storeURL("stats/update"), + Method: "PUT", + Username: "brad", + Password: "pitt", + JSONBody: params.StatsUpdateRequest{ + Entries: []params.StatsUpdateEntry{{ + Timestamp: time.Now(), + CharmReference: charm.MustParseURL("~charmers/precise/wordpress-23"), + }}, + }, + ExpectStatus: http.StatusUnauthorized, + ExpectBody: ¶ms.Error{ + Message: "invalid user name or password", + Code: params.ErrUnauthorized, + }, + }) +} + +func (s *StatsSuite) TestStatsCounter(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + for _, key := range [][]string{{"a", "b"}, {"a", "b"}, {"a", "c"}, {"a"}} { + err := s.store.IncCounter(key) + c.Assert(err, gc.IsNil) + } + + var all []interface{} + err := s.store.DB.StatCounters().Find(nil).All(&all) + c.Assert(err, gc.IsNil) + data, err := json.Marshal(all) + c.Assert(err, gc.IsNil) + c.Logf("%s", data) + + expected := map[string]int64{ + "a:b": 2, + "a:b:*": 0, + "a:*": 3, + "a": 1, + "a:b:c": 0, + } + + for counter, n := range expected { + c.Logf("test %q", counter) + url := storeURL("stats/counter/" + counter) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: url, + ExpectBody: []params.Statistic{{ + Count: n, + }}, + }) + } +} + +func (s *StatsSuite) TestStatsCounterList(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + incs := [][]string{ + {"a"}, + {"a", "b"}, + {"a", "b", "c"}, + {"a", "b", "c"}, + {"a", "b", "d"}, + {"a", "b", "e"}, + {"a", "f", "g"}, + {"a", "f", "h"}, + {"a", "i"}, + {"j", "k"}, + } + for _, key := range incs { + err := s.store.IncCounter(key) + c.Assert(err, gc.IsNil) + } + + tests := []struct { + key string + result []params.Statistic + }{{ + key: "a", + result: []params.Statistic{{ + Key: "a", + Count: 1, + }}, + }, { + key: "a:*", + result: []params.Statistic{{ + Key: "a:b:*", + Count: 4, + }, { + Key: "a:f:*", + Count: 2, + }, { + Key: "a:b", + Count: 1, + }, { + Key: "a:i", + Count: 1, + }}, + }, { + key: "a:b:*", + result: []params.Statistic{{ + Key: "a:b:c", + Count: 2, + }, { + Key: "a:b:d", + Count: 1, + }, { + Key: "a:b:e", + Count: 1, + }}, + }, { + key: "a:*", + result: []params.Statistic{{ + Key: "a:b:*", + Count: 4, + }, { + Key: "a:f:*", + Count: 2, + }, { + Key: "a:b", + Count: 1, + }, { + Key: "a:i", + Count: 1, + }}, + }} + + for i, test := range tests { + c.Logf("test %d: %s", i, test.key) + url := storeURL("stats/counter/" + test.key + "?list=1") + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: url, + ExpectBody: test.result, + }) + } +} + +func (s *StatsSuite) TestStatsCounterBy(c *gc.C) { + if !storetesting.MongoJSEnabled() { + c.Skip("MongoDB JavaScript not available") + } + + incs := []struct { + key []string + day int + }{ + {[]string{"a"}, 1}, + {[]string{"a"}, 1}, + {[]string{"b"}, 1}, + {[]string{"a", "b"}, 1}, + {[]string{"a", "c"}, 1}, + {[]string{"a"}, 3}, + {[]string{"a", "b"}, 3}, + {[]string{"b"}, 9}, + {[]string{"b"}, 9}, + {[]string{"a", "c", "d"}, 9}, + {[]string{"a", "c", "e"}, 9}, + {[]string{"a", "c", "f"}, 9}, + } + + day := func(i int) time.Time { + return time.Date(2012, time.May, i, 0, 0, 0, 0, time.UTC) + } + + for i, inc := range incs { + t := day(inc.day) + // Ensure each entry is unique by adding + // a sufficient increment for each test. + t = t.Add(time.Duration(i) * charmstore.StatsGranularity) + + err := s.store.IncCounterAtTime(inc.key, t) + c.Assert(err, gc.IsNil) + } + + tests := []struct { + request charmstore.CounterRequest + result []params.Statistic + }{{ + request: charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: false, + List: false, + By: charmstore.ByDay, + }, + result: []params.Statistic{{ + Date: "2012-05-01", + Count: 2, + }, { + Date: "2012-05-03", + Count: 1, + }}, + }, { + request: charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: false, + By: charmstore.ByDay, + }, + result: []params.Statistic{{ + Date: "2012-05-01", + Count: 2, + }, { + Date: "2012-05-03", + Count: 1, + }, { + Date: "2012-05-09", + Count: 3, + }}, + }, { + request: charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: false, + By: charmstore.ByDay, + Start: time.Date(2012, 5, 2, 0, 0, 0, 0, time.UTC), + }, + result: []params.Statistic{{ + Date: "2012-05-03", + Count: 1, + }, { + Date: "2012-05-09", + Count: 3, + }}, + }, { + request: charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: false, + By: charmstore.ByDay, + Stop: time.Date(2012, 5, 4, 0, 0, 0, 0, time.UTC), + }, + result: []params.Statistic{{ + Date: "2012-05-01", + Count: 2, + }, { + Date: "2012-05-03", + Count: 1, + }}, + }, { + request: charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: false, + By: charmstore.ByDay, + Start: time.Date(2012, 5, 3, 0, 0, 0, 0, time.UTC), + Stop: time.Date(2012, 5, 3, 0, 0, 0, 0, time.UTC), + }, + result: []params.Statistic{{ + Date: "2012-05-03", + Count: 1, + }}, + }, { + request: charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: true, + By: charmstore.ByDay, + }, + result: []params.Statistic{{ + Key: "a:b", + Date: "2012-05-01", + Count: 1, + }, { + Key: "a:c", + Date: "2012-05-01", + Count: 1, + }, { + Key: "a:b", + Date: "2012-05-03", + Count: 1, + }, { + Key: "a:c:*", + Date: "2012-05-09", + Count: 3, + }}, + }, { + request: charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: false, + By: charmstore.ByWeek, + }, + result: []params.Statistic{{ + Date: "2012-05-06", + Count: 3, + }, { + Date: "2012-05-13", + Count: 3, + }}, + }, { + request: charmstore.CounterRequest{ + Key: []string{"a"}, + Prefix: true, + List: true, + By: charmstore.ByWeek, + }, + result: []params.Statistic{{ + Key: "a:b", + Date: "2012-05-06", + Count: 2, + }, { + Key: "a:c", + Date: "2012-05-06", + Count: 1, + }, { + Key: "a:c:*", + Date: "2012-05-13", + Count: 3, + }}, + }} + + for i, test := range tests { + flags := make(url.Values) + url := storeURL("stats/counter/" + strings.Join(test.request.Key, ":")) + if test.request.Prefix { + url += ":*" + } + if test.request.List { + flags.Set("list", "1") + } + if !test.request.Start.IsZero() { + flags.Set("start", test.request.Start.Format("2006-01-02")) + } + if !test.request.Stop.IsZero() { + flags.Set("stop", test.request.Stop.Format("2006-01-02")) + } + switch test.request.By { + case charmstore.ByDay: + flags.Set("by", "day") + case charmstore.ByWeek: + flags.Set("by", "week") + } + if len(flags) > 0 { + url += "?" + flags.Encode() + } + c.Logf("test %d: %s", i, url) + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: s.srv, + URL: url, + ExpectBody: test.result, + }) + } +} + +func (s *StatsSuite) TestStatsEnabled(c *gc.C) { + statsEnabled := func(url string) bool { + req, _ := http.NewRequest("GET", url, nil) + return v5.StatsEnabled(req) + } + c.Assert(statsEnabled("http://foo.com"), gc.Equals, true) + c.Assert(statsEnabled("http://foo.com?stats=1"), gc.Equals, true) + c.Assert(statsEnabled("http://foo.com?stats=0"), gc.Equals, false) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/status.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/status.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/status.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,155 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5 // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/juju/utils/debugstatus" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + "gopkg.in/mgo.v2/bson" + + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" +) + +// GET /debug/status +// https://github.com/juju/charmstore/blob/v4/docs/API.md#get-debugstatus +func (h *ReqHandler) serveDebugStatus(_ http.Header, req *http.Request) (interface{}, error) { + h.Store.SetReconnectTimeout(500 * time.Millisecond) + return debugstatus.Check( + debugstatus.ServerStartTime, + debugstatus.Connection(h.Store.DB.Session), + debugstatus.MongoCollections(h.Store.DB), + h.checkElasticSearch, + h.checkEntities, + h.checkBaseEntities, + h.checkLogs( + "ingestion", "Ingestion", + mongodoc.IngestionType, + params.IngestionStart, params.IngestionComplete, + ), + h.checkLogs( + "legacy_statistics", "Legacy Statistics Load", + mongodoc.LegacyStatisticsType, + params.LegacyStatisticsImportStart, params.LegacyStatisticsImportComplete, + ), + ), nil +} + +func (h *ReqHandler) checkElasticSearch() (key string, result debugstatus.CheckResult) { + key = "elasticsearch" + result.Name = "Elastic search is running" + if h.Store.ES == nil || h.Store.ES.Database == nil { + result.Value = "Elastic search is not configured" + result.Passed = true + return key, result + } + health, err := h.Store.ES.Health() + if err != nil { + result.Value = "Connection issues to Elastic Search: " + err.Error() + return key, result + } + result.Value = health.String() + result.Passed = health.Status == "green" + return key, result +} + +func (h *ReqHandler) checkEntities() (key string, result debugstatus.CheckResult) { + result.Name = "Entities in charm store" + charms, err := h.Store.DB.Entities().Find(bson.D{{"series", bson.D{{"$ne", "bundle"}}}}).Count() + if err != nil { + result.Value = "Cannot count charms for consistency check: " + err.Error() + return "entities", result + } + bundles, err := h.Store.DB.Entities().Find(bson.D{{"series", "bundle"}}).Count() + if err != nil { + result.Value = "Cannot count bundles for consistency check: " + err.Error() + return "entities", result + } + promulgated, err := h.Store.DB.Entities().Find(bson.D{{"promulgated-url", bson.D{{"$exists", true}}}}).Count() + if err != nil { + result.Value = "Cannot count promulgated for consistency check: " + err.Error() + return "entities", result + } + result.Value = fmt.Sprintf("%d charms; %d bundles; %d promulgated", charms, bundles, promulgated) + result.Passed = true + return "entities", result +} + +func (h *ReqHandler) checkBaseEntities() (key string, result debugstatus.CheckResult) { + resultKey := "base_entities" + result.Name = "Base entities in charm store" + + // Retrieve the number of base entities. + baseNum, err := h.Store.DB.BaseEntities().Count() + if err != nil { + result.Value = "Cannot count base entities: " + err.Error() + return resultKey, result + } + + // Retrieve the number of entities. + num, err := h.Store.DB.Entities().Count() + if err != nil { + result.Value = "Cannot count entities for consistency check: " + err.Error() + return resultKey, result + } + + result.Value = fmt.Sprintf("count: %d", baseNum) + result.Passed = num >= baseNum + return resultKey, result +} + +func (h *ReqHandler) checkLogs( + resultKey, resultName string, + logType mongodoc.LogType, + startPrefix, endPrefix string, +) debugstatus.CheckerFunc { + return func() (key string, result debugstatus.CheckResult) { + result.Name = resultName + start, end, err := h.findTimesInLogs(logType, startPrefix, endPrefix) + if err != nil { + result.Value = err.Error() + return resultKey, result + } + result.Value = fmt.Sprintf("started: %s, completed: %s", start.Format(time.RFC3339), end.Format(time.RFC3339)) + result.Passed = !(start.IsZero() || end.IsZero()) + return resultKey, result + } +} + +// findTimesInLogs goes through logs in reverse order finding when the start and +// end messages were last added. +func (h *ReqHandler) findTimesInLogs(logType mongodoc.LogType, startPrefix, endPrefix string) (start, end time.Time, err error) { + var log mongodoc.Log + iter := h.Store.DB.Logs(). + Find(bson.D{ + {"level", mongodoc.InfoLevel}, + {"type", logType}, + }).Sort("-time", "-id").Iter() + for iter.Next(&log) { + var msg string + if err := json.Unmarshal(log.Data, &msg); err != nil { + // an error here probably means the log isn't in the form we are looking for. + continue + } + if start.IsZero() && strings.HasPrefix(msg, startPrefix) { + start = log.Time + } + if end.IsZero() && strings.HasPrefix(msg, endPrefix) { + end = log.Time + } + if !start.IsZero() && !end.IsZero() { + break + } + } + if err = iter.Close(); err != nil { + return time.Time{}, time.Time{}, errgo.Notef(err, "Cannot query logs") + } + return +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/status_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/status_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/internal/v5/status_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,281 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package v5_test // import "gopkg.in/juju/charmstore.v5-unstable/internal/v5" + +import ( + "encoding/json" + "net/http" + "time" + + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + "github.com/juju/utils/debugstatus" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + + "gopkg.in/juju/charmstore.v5-unstable/internal/mongodoc" + "gopkg.in/juju/charmstore.v5-unstable/internal/router" +) + +var zeroTimeStr = time.Time{}.Format(time.RFC3339) + +func (s *APISuite) TestStatus(c *gc.C) { + for _, id := range []*router.ResolvedURL{ + newResolvedURL("cs:~charmers/precise/wordpress-2", 2), + newResolvedURL("cs:~charmers/precise/wordpress-3", 3), + newResolvedURL("cs:~foo/precise/arble-9", -1), + newResolvedURL("cs:~bar/utopic/arble-10", -1), + newResolvedURL("cs:~charmers/bundle/oflaughs-3", 3), + newResolvedURL("cs:~bar/bundle/oflaughs-4", -1), + } { + if id.URL.Series == "bundle" { + s.addPublicBundle(c, "wordpress-simple", id) + } else { + s.addPublicCharm(c, "wordpress", id) + } + } + now := time.Now() + s.PatchValue(&debugstatus.StartTime, now) + start := now.Add(-2 * time.Hour) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"ingestion started"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.IngestionType, + Time: start, + }) + end := now.Add(-1 * time.Hour) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"ingestion completed"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.IngestionType, + Time: end, + }) + statisticsStart := now.Add(-1*time.Hour - 30*time.Minute) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"legacy statistics import started"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.LegacyStatisticsType, + Time: statisticsStart, + }) + statisticsEnd := now.Add(-30 * time.Minute) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"legacy statistics import completed"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.LegacyStatisticsType, + Time: statisticsEnd, + }) + s.AssertDebugStatus(c, true, map[string]params.DebugStatus{ + "mongo_connected": { + Name: "MongoDB is connected", + Value: "Connected", + Passed: true, + }, + "mongo_collections": { + Name: "MongoDB collections", + Value: "All required collections exist", + Passed: true, + }, + "elasticsearch": { + Name: "Elastic search is running", + Value: "Elastic search is not configured", + Passed: true, + }, + "entities": { + Name: "Entities in charm store", + Value: "4 charms; 2 bundles; 3 promulgated", + Passed: true, + }, + "base_entities": { + Name: "Base entities in charm store", + Value: "count: 5", + Passed: true, + }, + "server_started": { + Name: "Server started", + Value: now.String(), + Passed: true, + }, + "ingestion": { + Name: "Ingestion", + Value: "started: " + start.Format(time.RFC3339) + ", completed: " + end.Format(time.RFC3339), + Passed: true, + }, + "legacy_statistics": { + Name: "Legacy Statistics Load", + Value: "started: " + statisticsStart.Format(time.RFC3339) + ", completed: " + statisticsEnd.Format(time.RFC3339), + Passed: true, + }, + }) +} + +func (s *APISuite) TestStatusWithoutCorrectCollections(c *gc.C) { + s.store.DB.Entities().DropCollection() + s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ + "mongo_collections": { + Name: "MongoDB collections", + Value: "Missing collections: [" + s.store.DB.Entities().Name + "]", + Passed: false, + }, + }) +} + +func (s *APISuite) TestStatusWithoutIngestion(c *gc.C) { + s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ + "ingestion": { + Name: "Ingestion", + Value: "started: " + zeroTimeStr + ", completed: " + zeroTimeStr, + Passed: false, + }, + }) +} + +func (s *APISuite) TestStatusIngestionStarted(c *gc.C) { + now := time.Now() + start := now.Add(-1 * time.Hour) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"ingestion started"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.IngestionType, + Time: start, + }) + s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ + "ingestion": { + Name: "Ingestion", + Value: "started: " + start.Format(time.RFC3339) + ", completed: " + zeroTimeStr, + Passed: false, + }, + }) +} + +func (s *APISuite) TestStatusWithoutLegacyStatistics(c *gc.C) { + s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ + "legacy_statistics": { + Name: "Legacy Statistics Load", + Value: "started: " + zeroTimeStr + ", completed: " + zeroTimeStr, + Passed: false, + }, + }) +} + +func (s *APISuite) TestStatusLegacyStatisticsStarted(c *gc.C) { + now := time.Now() + statisticsStart := now.Add(-1*time.Hour - 30*time.Minute) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"legacy statistics import started"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.LegacyStatisticsType, + Time: statisticsStart, + }) + s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ + "legacy_statistics": { + Name: "Legacy Statistics Load", + Value: "started: " + statisticsStart.Format(time.RFC3339) + ", completed: " + zeroTimeStr, + Passed: false, + }, + }) +} + +func (s *APISuite) TestStatusLegacyStatisticsMultipleLogs(c *gc.C) { + now := time.Now() + statisticsStart := now.Add(-1*time.Hour - 30*time.Minute) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"legacy statistics import started"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.LegacyStatisticsType, + Time: statisticsStart.Add(-1 * time.Hour), + }) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"legacy statistics import started"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.LegacyStatisticsType, + Time: statisticsStart, + }) + statisticsEnd := now.Add(-30 * time.Minute) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"legacy statistics import completed"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.LegacyStatisticsType, + Time: statisticsEnd.Add(-1 * time.Hour), + }) + s.addLog(c, &mongodoc.Log{ + Data: []byte(`"legacy statistics import completed"`), + Level: mongodoc.InfoLevel, + Type: mongodoc.LegacyStatisticsType, + Time: statisticsEnd, + }) + s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ + "legacy_statistics": { + Name: "Legacy Statistics Load", + Value: "started: " + statisticsStart.Format(time.RFC3339) + ", completed: " + statisticsEnd.Format(time.RFC3339), + Passed: true, + }, + }) +} + +func (s *APISuite) TestStatusBaseEntitiesError(c *gc.C) { + // Add a base entity without any corresponding entities. + entity := &mongodoc.BaseEntity{ + URL: charm.MustParseURL("django"), + Name: "django", + } + err := s.store.DB.BaseEntities().Insert(entity) + c.Assert(err, gc.IsNil) + + s.AssertDebugStatus(c, false, map[string]params.DebugStatus{ + "base_entities": { + Name: "Base entities in charm store", + Value: "count: 1", + Passed: false, + }, + }) +} + +// AssertDebugStatus asserts that the current /debug/status endpoint +// matches the given status, ignoring status duration. +// If complete is true, it fails if the results contain +// keys not mentioned in status. +func (s *APISuite) AssertDebugStatus(c *gc.C, complete bool, status map[string]params.DebugStatus) { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("debug/status"), + }) + c.Assert(rec.Code, gc.Equals, http.StatusOK, gc.Commentf("body: %s", rec.Body.Bytes())) + c.Assert(rec.Header().Get("Content-Type"), gc.Equals, "application/json") + var gotStatus map[string]params.DebugStatus + err := json.Unmarshal(rec.Body.Bytes(), &gotStatus) + c.Assert(err, gc.IsNil) + for key, r := range gotStatus { + if _, found := status[key]; !complete && !found { + delete(gotStatus, key) + continue + } + r.Duration = 0 + gotStatus[key] = r + } + c.Assert(gotStatus, jc.DeepEquals, status) +} + +type statusWithElasticSearchSuite struct { + commonSuite +} + +var _ = gc.Suite(&statusWithElasticSearchSuite{}) + +func (s *statusWithElasticSearchSuite) SetUpSuite(c *gc.C) { + s.enableES = true + s.commonSuite.SetUpSuite(c) +} + +func (s *statusWithElasticSearchSuite) TestStatusWithElasticSearch(c *gc.C) { + rec := httptesting.DoRequest(c, httptesting.DoRequestParams{ + Handler: s.srv, + URL: storeURL("debug/status"), + }) + var results map[string]params.DebugStatus + err := json.Unmarshal(rec.Body.Bytes(), &results) + c.Assert(err, gc.IsNil) + c.Assert(results["elasticsearch"].Name, gc.Equals, "Elastic search is running") + c.Assert(results["elasticsearch"].Value, jc.Contains, "cluster_name:") +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/server.go' --- src/gopkg.in/juju/charmstore.v5-unstable/server.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/server.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,127 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore // import "gopkg.in/juju/charmstore.v5-unstable" + +import ( + "fmt" + "net/http" + "sort" + "time" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/mgo.v2" + "gopkg.in/natefinch/lumberjack.v2" + + "gopkg.in/juju/charmstore.v5-unstable/elasticsearch" + "gopkg.in/juju/charmstore.v5-unstable/internal/charmstore" + "gopkg.in/juju/charmstore.v5-unstable/internal/legacy" + "gopkg.in/juju/charmstore.v5-unstable/internal/v4" + "gopkg.in/juju/charmstore.v5-unstable/internal/v5" +) + +// Versions of the API that can be served. +const ( + Legacy = "" + V4 = "v4" + V5 = "v5" +) + +var versions = map[string]charmstore.NewAPIHandlerFunc{ + Legacy: legacy.NewAPIHandler, + V4: v4.NewAPIHandler, + V5: v5.NewAPIHandler, +} + +// HTTPCloseHandler represents a HTTP handler that +// must be closed after use. +type HTTPCloseHandler interface { + Close() + http.Handler +} + +// Versions returns all known API version strings in alphabetical order. +func Versions() []string { + vs := make([]string, 0, len(versions)) + for v := range versions { + vs = append(vs, v) + } + sort.Strings(vs) + return vs +} + +// ServerParams holds configuration for a new API server. +type ServerParams struct { + // AuthUsername and AuthPassword hold the credentials + // used for HTTP basic authentication. + AuthUsername string + AuthPassword string + + // IdentityLocation holds the location of the third party authorization + // service to use when creating third party caveats, + // for example: http://api.jujucharms.com/identity/v1/discharger + // If it is empty, IdentityURL+"/v1/discharger" will be used. + IdentityLocation string + + // TermsLocations holds the location of the + // terms service, which knows about user agreements to + // Terms and Conditions required by the charm. + TermsLocation string + + // PublicKeyLocator holds a public key store. + // It may be nil. + PublicKeyLocator bakery.PublicKeyLocator + + // IdentityAPIURL holds the URL of the identity manager, + // for example http://api.jujucharms.com/identity + IdentityAPIURL string + + // AgentUsername and AgentKey hold the credentials used for agent + // authentication. + AgentUsername string + AgentKey *bakery.KeyPair + + // StatsCacheMaxAge is the maximum length of time between + // refreshes of entities in the stats cache. + StatsCacheMaxAge time.Duration + + // SearchCacheMaxAge is the maximum length of time between + // refreshes of entities in the search cache. + SearchCacheMaxAge time.Duration + + // MaxMgoSessions specifies a soft limit on the maximum + // number of mongo sessions used. Each concurrent + // HTTP request will use one session. + MaxMgoSessions int + + // HTTPRequestWaitDuration holds the amount of time + // that an HTTP request will wait for a free connection + // when the MaxConcurrentHTTPRequests limit is reached. + HTTPRequestWaitDuration time.Duration + + // AuditLogger optionally holds the logger which will be used to + // write audit log entries. + AuditLogger *lumberjack.Logger +} + +// NewServer returns a new handler that handles charm store requests and stores +// its data in the given database. The handler will serve the specified +// versions of the API using the given configuration. +func NewServer(db *mgo.Database, es *elasticsearch.Database, idx string, config ServerParams, serveVersions ...string) (HTTPCloseHandler, error) { + newAPIs := make(map[string]charmstore.NewAPIHandlerFunc) + for _, vers := range serveVersions { + newAPI := versions[vers] + if newAPI == nil { + return nil, fmt.Errorf("unknown version %q", vers) + } + newAPIs[vers] = newAPI + } + var si *charmstore.SearchIndex + if es != nil { + si = &charmstore.SearchIndex{ + Database: es, + Index: idx, + } + } + return charmstore.NewServer(db, si, charmstore.ServerParams(config), newAPIs) +} === added file 'src/gopkg.in/juju/charmstore.v5-unstable/server_test.go' --- src/gopkg.in/juju/charmstore.v5-unstable/server_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/server_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,125 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package charmstore_test // import "gopkg.in/juju/charmstore.v5-unstable" + +import ( + "fmt" + "net/http" + "testing" + + jujutesting "github.com/juju/testing" + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/juju/charmrepo.v2-unstable/csclient/params" + + "gopkg.in/juju/charmstore.v5-unstable" + "gopkg.in/juju/charmstore.v5-unstable/internal/storetesting" +) + +// These tests are copied (almost) verbatim from internal/charmstore/server_test.go + +func TestPackage(t *testing.T) { + jujutesting.MgoTestPackage(t, nil) +} + +type ServerSuite struct { + jujutesting.IsolatedMgoSuite + config charmstore.ServerParams +} + +var _ = gc.Suite(&ServerSuite{}) + +func (s *ServerSuite) SetUpSuite(c *gc.C) { + s.IsolatedMgoSuite.SetUpSuite(c) + s.config = charmstore.ServerParams{ + AuthUsername: "test-user", + AuthPassword: "test-password", + } +} + +func (s *ServerSuite) TestNewServerWithNoVersions(c *gc.C) { + h, err := charmstore.NewServer(s.Session.DB("foo"), nil, "", s.config) + c.Assert(err, gc.ErrorMatches, `charm store server must serve at least one version of the API`) + c.Assert(h, gc.IsNil) +} + +func (s *ServerSuite) TestNewServerWithUnregisteredVersion(c *gc.C) { + h, err := charmstore.NewServer(s.Session.DB("foo"), nil, "", s.config, "wrong") + c.Assert(err, gc.ErrorMatches, `unknown version "wrong"`) + c.Assert(h, gc.IsNil) +} + +type versionResponse struct { + Version string + Path string +} + +func (s *ServerSuite) TestVersions(c *gc.C) { + c.Assert(charmstore.Versions(), gc.DeepEquals, []string{"", "v4", "v5"}) +} + +func (s *ServerSuite) TestNewServerWithVersions(c *gc.C) { + db := s.Session.DB("foo") + + h, err := charmstore.NewServer(db, nil, "", s.config, charmstore.V4) + c.Assert(err, gc.IsNil) + defer h.Close() + + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: h, + URL: "/v4/debug", + ExpectStatus: http.StatusInternalServerError, + ExpectBody: params.Error{ + Message: "method not implemented", + }, + }) + assertDoesNotServeVersion(c, h, "v3") +} + +func assertServesVersion(c *gc.C, h http.Handler, vers string) { + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: h, + URL: "/" + vers + "/some/path", + ExpectBody: versionResponse{ + Version: vers, + Path: "/some/path", + }, + }) +} + +func assertDoesNotServeVersion(c *gc.C, h http.Handler, vers string) { + url := "/" + vers + "/debug" + httptesting.AssertJSONCall(c, httptesting.JSONCallParams{ + Handler: h, + URL: url, + ExpectStatus: http.StatusNotFound, + ExpectBody: params.Error{ + Message: fmt.Sprintf("no handler for %q", url), + Code: params.ErrNotFound, + }, + }) +} + +type ServerESSuite struct { + storetesting.IsolatedMgoESSuite + config charmstore.ServerParams +} + +var _ = gc.Suite(&ServerESSuite{}) + +func (s *ServerESSuite) SetUpSuite(c *gc.C) { + s.IsolatedMgoESSuite.SetUpSuite(c) + s.config = charmstore.ServerParams{ + AuthUsername: "test-user", + AuthPassword: "test-password", + } +} + +func (s *ServerESSuite) TestNewServerWithElasticsearch(c *gc.C) { + db := s.Session.DB("foo") + + srv, err := charmstore.NewServer(db, s.ES, s.TestIndex, s.config, charmstore.V4) + c.Assert(err, gc.IsNil) + srv.Close() +} === added directory 'src/gopkg.in/juju/charmstore.v5-unstable/version' === added file 'src/gopkg.in/juju/charmstore.v5-unstable/version/version.go' --- src/gopkg.in/juju/charmstore.v5-unstable/version/version.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/charmstore.v5-unstable/version/version.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +// Copyright 2014 Canonical Ltd. +// Licensed under the AGPLv3, see LICENCE file for details. + +package version // import "gopkg.in/juju/charmstore.v5-unstable/version" + +type Version struct { + GitCommit string + Version string +} + +var VersionInfo = unknownVersion + +var unknownVersion = Version{ + GitCommit: "unknown git commit", + Version: "unknown version", +} === modified file 'src/gopkg.in/juju/environschema.v1/fields.go' --- src/gopkg.in/juju/environschema.v1/fields.go 2015-10-23 18:28:45 +0000 +++ src/gopkg.in/juju/environschema.v1/fields.go 2016-03-22 15:18:22 +0000 @@ -71,6 +71,20 @@ Values []interface{} `json:"values,omitempty"` } +// Checker returns a checker that can be used to coerce values into the +// type of the attribute. Specifically, string is always supported for +// any checker type. +func (attr Attr) Checker() (schema.Checker, error) { + checker := checkers[attr.Type] + if checker == nil { + return nil, fmt.Errorf("invalid type %q", attr.Type) + } + if len(attr.Values) == 0 { + return checker, nil + } + return oneOfValues(checker, attr.Values) +} + // Group describes the grouping of attributes. type Group string @@ -98,18 +112,27 @@ type FieldType string // The following constants are the possible type values. +// The "canonical Go type" is the type that the will be +// the result of a successful Coerce call. const ( + // Tstring represents a string type. Its canonical Go type is string. Tstring FieldType = "string" - Tbool FieldType = "bool" - Tint FieldType = "int" - Tattrs FieldType = "attrs" + + // Tbool represents a boolean type. Its canonical Go type is bool. + Tbool FieldType = "bool" + + // Tint represents an integer type. Its canonical Go type is int. + Tint FieldType = "int" + + // Tattrs represents an attribute map. Its canonical Go type is map[string]string. + Tattrs FieldType = "attrs" ) var checkers = map[FieldType]schema.Checker{ Tstring: schema.String(), Tbool: schema.Bool(), Tint: schema.ForceInt(), - Tattrs: attrsC{}, + Tattrs: attrsChecker{}, } // Alternative possibilities to ValidationSchema to bear in mind for @@ -129,21 +152,14 @@ defaults := make(schema.Defaults) for name, attr := range s { path := []string{name} - checker := checkers[attr.Type] - if checker == nil { - return nil, nil, fmt.Errorf("%sinvalid type %q", pathPrefix(path), attr.Type) - } - if attr.Values != nil { - var err error - checker, err = oneOfValues(checker, attr.Values, path) - if err != nil { - return nil, nil, err - } - } - fields[name] = checker + checker, err := attr.Checker() + if err != nil { + return nil, nil, errors.Annotatef(err, "%s", mkPath(path)) + } if !attr.Mandatory { defaults[name] = schema.Omit } + fields[name] = checker } return fields, defaults, nil } @@ -151,28 +167,28 @@ // oneOfValues returns a checker that coerces its value // using the supplied checker, then checks that the // resulting value is equal to one of the given values. -func oneOfValues(checker schema.Checker, values []interface{}, path []string) (schema.Checker, error) { +func oneOfValues(checker schema.Checker, values []interface{}) (schema.Checker, error) { cvalues := make([]interface{}, len(values)) for i, v := range values { cv, err := checker.Coerce(v, nil) if err != nil { - return nil, fmt.Errorf("%sinvalid enumerated value: %v", pathPrefix(path), err) + return nil, fmt.Errorf("invalid enumerated value: %v", err) } cvalues[i] = cv } - return oneOfValuesC{ + return oneOfValuesChecker{ vals: cvalues, checker: checker, }, nil } -type oneOfValuesC struct { +type oneOfValuesChecker struct { vals []interface{} checker schema.Checker } // Coerce implements schema.Checker.Coerce. -func (c oneOfValuesC) Coerce(v interface{}, path []string) (interface{}, error) { +func (c oneOfValuesChecker) Coerce(v interface{}, path []string) (interface{}, error) { v, err := c.checker.Coerce(v, path) if err != nil { return v, err @@ -185,14 +201,14 @@ return nil, fmt.Errorf("%sexpected one of %v, got %#v", pathPrefix(path), c.vals, v) } -type attrsC struct{} +type attrsChecker struct{} var ( attrMapChecker = schema.Map(schema.String(), schema.String()) attrSliceChecker = schema.List(schema.String()) ) -func (c attrsC) Coerce(v interface{}, path []string) (interface{}, error) { +func (c attrsChecker) Coerce(v interface{}, path []string) (interface{}, error) { // TODO consider allowing only the map variant. switch reflect.TypeOf(v).Kind() { case reflect.String: @@ -240,13 +256,23 @@ // the concatenation of the path elements. If path // starts with a ".", the dot is omitted. func pathPrefix(path []string) string { + if p := mkPath(path); p != "" { + return p + ": " + } + return "" +} + +// mkPath returns a string holding +// the concatenation of the path elements. +// If path starts with a ".", the dot is omitted. +func mkPath(path []string) string { if len(path) == 0 { return "" } if path[0] == "." { - return strings.Join(path[1:], "") + ": " + return strings.Join(path[1:], "") } - return strings.Join(path, "") + ": " + return strings.Join(path, "") } // ExampleYAML returns the fields formatted as a YAML === added directory 'src/gopkg.in/juju/environschema.v1/form' === added directory 'src/gopkg.in/juju/environschema.v1/form/cmd' === added directory 'src/gopkg.in/juju/environschema.v1/form/cmd/formtest' === added file 'src/gopkg.in/juju/environschema.v1/form/cmd/formtest/main.go' --- src/gopkg.in/juju/environschema.v1/form/cmd/formtest/main.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/environschema.v1/form/cmd/formtest/main.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,73 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package main + +import ( + "encoding/json" + "flag" + "fmt" + "os" + + "gopkg.in/juju/environschema.v1" + "gopkg.in/juju/environschema.v1/form" +) + +var showDescriptions = flag.Bool("v", false, "show descriptions") + +func main() { + flag.Parse() + + f := form.IOFiller{ + ShowDescriptions: *showDescriptions, + } + fmt.Println(`formtest: +This is a simple interactive test program for environschema forms. +Expect the prompts to be as follows: + +e-mail [user@example.com]: +name: +password: +PIN [****]: + +The entered values will be displayed at the end. +`) + os.Setenv("PIN", "1234") + os.Setenv("EMAIL", "user@example.com") + r, err := f.Fill(form.Form{ + Title: "Test Form", + Fields: environschema.Fields{ + "name": environschema.Attr{ + Description: "Your full name.", + Type: environschema.Tstring, + Mandatory: true, + }, + "email": environschema.Attr{ + Description: "Your email address.", + Type: environschema.Tstring, + EnvVar: "EMAIL", + }, + "password": environschema.Attr{ + Description: "Your very secret password.", + Type: environschema.Tstring, + Secret: true, + Mandatory: true, + }, + "pin": environschema.Attr{ + Description: "Some PIN that you have probably forgotten.", + Type: environschema.Tint, + EnvVar: "PIN", + Secret: true, + }, + }}) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + b, err := json.MarshalIndent(r, "", "\t") + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + fmt.Println(string(b)) +} === added file 'src/gopkg.in/juju/environschema.v1/form/form.go' --- src/gopkg.in/juju/environschema.v1/form/form.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/environschema.v1/form/form.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,304 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// Package form provides ways to create and process forms based on +// environschema schemas. +// +// The API exposed by this package is not currently subject +// to the environschema.v1 API compatibility guarantees. +package form + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + + "github.com/juju/schema" + "golang.org/x/crypto/ssh/terminal" + "gopkg.in/errgo.v1" + "gopkg.in/juju/environschema.v1" +) + +// Form describes a form based on a schema. +type Form struct { + // Title holds the title of the form, giving contextual + // information for the fields. + Title string + + // Fields holds the fields that make up the body of the form. + Fields environschema.Fields +} + +// Filler represents an object that can fill out a Form. The the form is +// described in f. The returned value should be compatible with the +// schema defined in f.Fields. +type Filler interface { + Fill(f Form) (map[string]interface{}, error) +} + +// SortedFields returns the given fields sorted first by group name. +// Those in the same group are sorted so that secret fields come after +// non-secret ones, finally the fields are sorted by name. +func SortedFields(fields environschema.Fields) []NamedAttr { + fs := make(namedAttrSlice, 0, len(fields)) + for k, v := range fields { + fs = append(fs, NamedAttr{ + Name: k, + Attr: v, + }) + } + sort.Sort(fs) + return fs +} + +// NamedAttr associates a name with an environschema.Field. +type NamedAttr struct { + Name string + environschema.Attr +} + +type namedAttrSlice []NamedAttr + +func (s namedAttrSlice) Len() int { + return len(s) +} + +func (s namedAttrSlice) Less(i, j int) bool { + a1 := &s[i] + a2 := &s[j] + if a1.Group != a2.Group { + return a1.Group < a2.Group + } + if a1.Secret != a2.Secret { + return a2.Secret + } + return a1.Name < a2.Name +} + +func (s namedAttrSlice) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// IOFiller is a Filler based around an io.Reader and io.Writer. +type IOFiller struct { + // In is used to read responses from the user. If this is nil, + // then os.Stdin will be used. + In io.Reader + + // Out is used to write prompts and information to the user. If + // this is nil, then os.Stdout will be used. + Out io.Writer + + // MaxTries is the number of times to attempt to get a valid + // response when prompting. If this is 0 then the default of 3 + // attempts will be used. + MaxTries int + + // ShowDescriptions holds whether attribute descriptions + // should be printed as well as the attribute names. + ShowDescriptions bool + + // GetDefault returns the default value for the given attribute, + // which must have been coerced using the given checker. + // If there is no default, it should return (nil, "", nil). + // + // The display return value holds the string to use + // to describe the value of the default. If it's empty, + // fmt.Sprint(val) will be used. + // + // If GetDefault returns an error, it will be printed as a warning. + // + // If GetDefault is nil, DefaultFromEnv will be used. + GetDefault func(attr NamedAttr, checker schema.Checker) (val interface{}, display string, err error) +} + +// Fill implements Filler.Fill by writing the field information to +// f.Out, then reading input from f.In. If f.In is a terminal and the +// attribute is secret, echo will be disabled. +// +// Fill processes fields by first sorting them and then prompting for +// the value of each one in turn. +// +// The fields are sorted by first by group name. Those in the same group +// are sorted so that secret fields come after non-secret ones, finally +// the fields are sorted by description. +// +// Each field will be prompted for, then the returned value will be +// validated against the field's type. If the returned value does not +// validate correctly it will be prompted again up to MaxTries before +// giving up. +func (f IOFiller) Fill(form Form) (map[string]interface{}, error) { + if len(form.Fields) == 0 { + return map[string]interface{}{}, nil + } + if f.MaxTries == 0 { + f.MaxTries = 3 + } + if f.In == nil { + f.In = os.Stdin + } + if f.Out == nil { + f.Out = os.Stdout + } + if f.GetDefault == nil { + f.GetDefault = DefaultFromEnv + } + fields := SortedFields(form.Fields) + values := make(map[string]interface{}, len(fields)) + checkers := make([]schema.Checker, len(fields)) + allMandatory := true + for i, field := range fields { + checker, err := field.Checker() + if err != nil { + return nil, errgo.Notef(err, "invalid field %s", field.Name) + } + checkers[i] = checker + allMandatory = allMandatory && field.Mandatory + } + if form.Title != "" { + f.printf("%s\n", form.Title) + } + if allMandatory { + f.printf("Press return to select a default value.\n") + } else { + f.printf("Press return to select a default value, or enter - to omit an entry.\n") + } + for i, field := range fields { + v, err := f.promptLoop(field, checkers[i], allMandatory) + if err != nil { + return nil, errgo.Notef(err, "cannot complete form") + } + if v != nil { + values[field.Name] = v + } + } + return values, nil +} + +func (f IOFiller) promptLoop(attr NamedAttr, checker schema.Checker, allMandatory bool) (interface{}, error) { + if f.ShowDescriptions && attr.Description != "" { + f.printf("\n%s\n", strings.TrimSpace(attr.Description)) + } + defVal, defDisplay, err := f.GetDefault(attr, checker) + if err != nil { + f.printf("Warning: invalid default value: %v\n", err) + } + if defVal != nil && defDisplay == "" { + defDisplay = fmt.Sprint(defVal) + } + for i := 0; i < f.MaxTries; i++ { + vStr, err := f.prompt(attr, checker, defDisplay) + if err != nil { + return nil, errgo.Mask(err) + } + if vStr == "" { + // An empty value has been entered, signifying + // that the user has chosen the default value. + // If there is no default and the attribute is mandatory, + // we treat it as a potentially valid value and + // coerce it below. + if defVal != nil { + return defVal, nil + } + if !attr.Mandatory { + // No value entered but the attribute is not mandatory. + return nil, nil + } + } else if vStr == "-" && !allMandatory { + // The user has entered a hyphen to cause + // the attribute to be omitted. + if attr.Mandatory { + f.printf("Cannot omit %s because it is mandatory.\n", attr.Name) + continue + } + f.printf("Value %s omitted.\n", attr.Name) + return nil, nil + } + v, err := checker.Coerce(vStr, nil) + if err == nil { + return v, nil + } + f.printf("Invalid input: %v\n", err) + } + return nil, errgo.New("too many invalid inputs") +} + +func (f IOFiller) printf(format string, a ...interface{}) { + fmt.Fprintf(f.Out, format, a...) +} + +func (f IOFiller) prompt(attr NamedAttr, checker schema.Checker, def string) (string, error) { + prompt := attr.Name + if def != "" { + if attr.Secret { + def = strings.Repeat("*", len(def)) + } + prompt = fmt.Sprintf("%s [%s]", attr.Name, def) + } + f.printf("%s: ", prompt) + input, err := readLine(f.Out, f.In, attr.Secret) + if err != nil { + return "", errgo.Notef(err, "cannot read input") + } + return input, nil +} + +func readLine(w io.Writer, r io.Reader, secret bool) (string, error) { + if f, ok := r.(*os.File); ok && secret && terminal.IsTerminal(int(f.Fd())) { + defer w.Write([]byte{'\n'}) + line, err := terminal.ReadPassword(int(f.Fd())) + return string(line), err + } + var input []byte + for { + var buf [1]byte + n, err := r.Read(buf[:]) + if n == 1 { + if buf[0] == '\n' { + break + } + input = append(input, buf[0]) + } + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return "", errgo.Mask(err) + } + } + return strings.TrimRight(string(input), "\r"), nil +} + +// DefaultFromEnv returns any default value found in the environment for +// the given attribute. +// +// The environment variables specified in attr will be checked in order +// and the first non-empty value found is coerced using the given +// checker and returned. +func DefaultFromEnv(attr NamedAttr, checker schema.Checker) (val interface{}, _ string, err error) { + val, envVar := defaultFromEnv(attr) + if val == "" { + return nil, "", nil + } + v, err := checker.Coerce(val, nil) + if err != nil { + return nil, "", errgo.Notef(err, "cannot convert $%s", envVar) + } + return v, "", nil +} + +func defaultFromEnv(attr NamedAttr) (val, envVar string) { + if attr.EnvVar != "" { + if val := os.Getenv(attr.EnvVar); val != "" { + return val, attr.EnvVar + } + } + for _, envVar := range attr.EnvVars { + if val := os.Getenv(envVar); val != "" { + return val, envVar + } + } + return "", "" +} === added file 'src/gopkg.in/juju/environschema.v1/form/form_test.go' --- src/gopkg.in/juju/environschema.v1/form/form_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/environschema.v1/form/form_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,923 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package form_test + +import ( + "bytes" + "strings" + + "github.com/juju/schema" + "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + + "gopkg.in/juju/environschema.v1" + "gopkg.in/juju/environschema.v1/form" +) + +type formSuite struct { + testing.OsEnvSuite +} + +var _ = gc.Suite(&formSuite{}) + +var _ form.Filler = form.IOFiller{} + +var ioFillerTests = []struct { + about string + form form.Form + filler form.IOFiller + environment map[string]string + expectIO string + expectResult map[string]interface{} + expectError string +}{{ + about: "no fields, no interaction", + form: form.Form{ + Title: "something", + }, + expectIO: "", + expectResult: map[string]interface{}{}, +}, { + about: "single field no default", + form: form.Form{ + Fields: environschema.Fields{ + "A": environschema.Attr{ + Type: environschema.Tstring, + Description: "A description", + }, + }, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |A: »B + `, + expectResult: map[string]interface{}{ + "A": "B", + }, +}, { + about: "single field with default", + form: form.Form{ + Fields: environschema.Fields{ + "A": environschema.Attr{ + Type: environschema.Tstring, + Description: "A description", + EnvVar: "A", + }, + }, + }, + environment: map[string]string{ + "A": "C", + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |A [C]: »B + `, + expectResult: map[string]interface{}{ + "A": "B", + }, +}, { + about: "single field with default no input", + form: form.Form{ + Fields: environschema.Fields{ + "A": environschema.Attr{ + Type: environschema.Tstring, + Description: "A description", + EnvVar: "A", + }, + }, + }, + environment: map[string]string{ + "A": "C", + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |A [C]: » + `, + expectResult: map[string]interface{}{ + "A": "C", + }, +}, { + about: "secret single field with default no input", + form: form.Form{ + Fields: environschema.Fields{ + "A": environschema.Attr{ + Type: environschema.Tstring, + Description: "A description", + EnvVar: "A", + Secret: true, + }, + }, + }, + environment: map[string]string{ + "A": "password", + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |A [********]: » + `, + expectResult: map[string]interface{}{ + "A": "password", + }, +}, { + about: "windows line endings", + form: form.Form{ + Fields: environschema.Fields{ + "A": environschema.Attr{ + Type: environschema.Tstring, + Description: "A description", + }, + }, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |A: »B` + "\r" + ` + `, + expectResult: map[string]interface{}{ + "A": "B", + }, +}, { + about: "with title", + form: form.Form{ + Title: "Test Title", + Fields: environschema.Fields{ + "A": environschema.Attr{ + Type: environschema.Tstring, + Description: "A description", + }, + }, + }, + expectIO: ` + |Test Title + |Press return to select a default value, or enter - to omit an entry. + |A: »hello + `, + expectResult: map[string]interface{}{ + "A": "hello", + }, +}, { + about: "title with prompts", + form: form.Form{ + Title: "Test Title", + Fields: environschema.Fields{ + "A": environschema.Attr{ + Type: environschema.Tstring, + Description: "A description", + }, + }, + }, + expectIO: ` + |Test Title + |Press return to select a default value, or enter - to omit an entry. + |A: »B + `, + expectResult: map[string]interface{}{ + "A": "B", + }, +}, { + about: "correct ordering", + form: form.Form{ + Fields: environschema.Fields{ + "a1": environschema.Attr{ + Group: "A", + Description: "z a1 description", + Type: environschema.Tstring, + }, + "c1": environschema.Attr{ + Group: "A", + Description: "c1 description", + Type: environschema.Tstring, + }, + "b1": environschema.Attr{ + Group: "A", + Description: "b1 description", + Type: environschema.Tstring, + Secret: true, + }, + "a2": environschema.Attr{ + Group: "B", + Description: "a2 description", + Type: environschema.Tstring, + }, + "c2": environschema.Attr{ + Group: "B", + Description: "c2 description", + Type: environschema.Tstring, + }, + "b2": environschema.Attr{ + Group: "B", + Description: "b2 description", + Type: environschema.Tstring, + Secret: true, + }, + }, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |a1: »a1 + |c1: »c1 + |b1: »b1 + |a2: »a2 + |c2: »c2 + |b2: »b2 + `, + expectResult: map[string]interface{}{ + "a1": "a1", + "b1": "b1", + "c1": "c1", + "a2": "a2", + "b2": "b2", + "c2": "c2", + }, +}, { + about: "string type", + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tstring, + }, + "b": environschema.Attr{ + Description: "b description", + Type: environschema.Tstring, + Mandatory: true, + }, + "c": environschema.Attr{ + Description: "c description", + Type: environschema.Tstring, + }, + }, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |a: » + |b: » + |c: »something + `, + expectResult: map[string]interface{}{ + "b": "", + "c": "something", + }, +}, { + about: "bool type", + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tbool, + }, + "b": environschema.Attr{ + Description: "b description", + Type: environschema.Tbool, + }, + "c": environschema.Attr{ + Description: "c description", + Type: environschema.Tbool, + }, + "d": environschema.Attr{ + Description: "d description", + Type: environschema.Tbool, + }, + }, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |a: »true + |b: »false + |c: »1 + |d: »0 + `, + expectResult: map[string]interface{}{ + "a": true, + "b": false, + "c": true, + "d": false, + }, +}, { + about: "int type", + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tint, + }, + "b": environschema.Attr{ + Description: "b description", + Type: environschema.Tint, + }, + "c": environschema.Attr{ + Description: "c description", + Type: environschema.Tint, + }, + }, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |a: »0 + |b: »-1000000 + |c: »1000000 + `, + expectResult: map[string]interface{}{ + "a": 0, + "b": -1000000, + "c": 1000000, + }, +}, { + about: "attrs type", + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tattrs, + }, + "b": environschema.Attr{ + Description: "b description", + Type: environschema.Tattrs, + }, + }, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |a: »x=y z= foo=bar + |b: » + `, + expectResult: map[string]interface{}{ + "a": map[string]string{ + "x": "y", + "foo": "bar", + "z": "", + }, + }, +}, { + about: "don't mention hyphen if all entries are mandatory", + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tint, + Mandatory: true, + }, + "b": environschema.Attr{ + Description: "b description", + Type: environschema.Tstring, + Mandatory: true, + }, + }, + }, + expectIO: ` + |Press return to select a default value. + |a: »12 + |b: »- + `, + expectResult: map[string]interface{}{ + "a": 12, + "b": "-", + }, +}, { + about: "too many bad responses", + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tint, + Mandatory: true, + }, + }, + }, + expectIO: ` + |Press return to select a default value. + |a: »one + |Invalid input: expected number, got string("one") + |a: » + |Invalid input: expected number, got string("") + |a: »three + |Invalid input: expected number, got string("three") + `, + expectError: `cannot complete form: too many invalid inputs`, +}, { + about: "too many bad responses with maxtries=1", + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tint, + }, + }, + }, + filler: form.IOFiller{ + MaxTries: 1, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |a: »one + |Invalid input: expected number, got string("one") + `, + expectError: `cannot complete form: too many invalid inputs`, +}, { + about: "bad then good input", + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tint, + }, + }, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |a: »one + |Invalid input: expected number, got string("one") + |a: »two + |Invalid input: expected number, got string("two") + |a: »3 + `, + expectResult: map[string]interface{}{ + "a": 3, + }, +}, { + about: "empty value entered for optional attribute with no default", + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tstring, + }, + "b": environschema.Attr{ + Description: "b description", + Type: environschema.Tint, + }, + "c": environschema.Attr{ + Description: "c description", + Type: environschema.Tbool, + }, + }, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |a: » + |b: » + |c: » + `, + expectResult: map[string]interface{}{}, +}, { + about: "unsupported type", + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: "bogus", + }, + }, + }, + expectError: `invalid field a: invalid type "bogus"`, +}, { + about: "no interaction is done if any field has an invalid type", + form: form.Form{ + Title: "some title", + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tstring, + }, + "b": environschema.Attr{ + Description: "b description", + Type: "bogus", + }, + }, + }, + expectError: `invalid field b: invalid type "bogus"`, +}, { + about: "invalid default value is ignored", + environment: map[string]string{ + "a": "three", + }, + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tint, + EnvVars: []string{"a"}, + }, + }, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |Warning: invalid default value: cannot convert $a: expected number, got string("three") + |a: »99 + `, + expectResult: map[string]interface{}{ + "a": 99, + }, +}, { + about: "entering a hyphen causes an optional value to be omitted", + environment: map[string]string{ + "a": "29", + }, + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tint, + EnvVar: "a", + }, + }, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |a [29]: »- + |Value a omitted. + `, + expectResult: map[string]interface{}{}, +}, { + about: "entering a hyphen causes a mandatory value to be fail when there are other optional values", + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tint, + Mandatory: true, + }, + "b": environschema.Attr{ + Description: "b description", + Type: environschema.Tint, + }, + }, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |a: »- + |Cannot omit a because it is mandatory. + |a: »123 + |b: »99 + `, + expectResult: map[string]interface{}{ + "a": 123, + "b": 99, + }, +}, { + about: "descriptions can be enabled with ShowDescriptions", + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: " The a attribute\nis pretty boring.\n\n", + Type: environschema.Tstring, + Mandatory: true, + }, + "b": environschema.Attr{ + Type: environschema.Tint, + }, + }, + }, + filler: form.IOFiller{ + ShowDescriptions: true, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + | + |The a attribute + |is pretty boring. + |a: »- + |Cannot omit a because it is mandatory. + |a: »value + |b: »99 + `, + expectResult: map[string]interface{}{ + "a": "value", + "b": 99, + }, +}, { + about: "custom GetDefault value success", + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tstring, + }, + }, + }, + filler: form.IOFiller{ + GetDefault: func(attr form.NamedAttr, checker schema.Checker) (interface{}, string, error) { + return "hello", "", nil + }, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |a [hello]: » + `, + expectResult: map[string]interface{}{ + "a": "hello", + }, +}, { + about: "custom GetDefault value error", + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tstring, + }, + }, + }, + filler: form.IOFiller{ + GetDefault: func(attr form.NamedAttr, checker schema.Checker) (interface{}, string, error) { + return nil, "", errgo.New("some error") + }, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |Warning: invalid default value: some error + |a: »value + `, + expectResult: map[string]interface{}{ + "a": "value", + }, +}, { + about: "custom GetDefault value with custom display", + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tint, + }, + }, + }, + filler: form.IOFiller{ + GetDefault: func(attr form.NamedAttr, checker schema.Checker) (interface{}, string, error) { + return 99, "ninety-nine", nil + }, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |a [ninety-nine]: » + `, + expectResult: map[string]interface{}{ + "a": 99, + }, +}, { + about: "custom GetDefault value with empty display and non-string type", + form: form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tint, + }, + }, + }, + filler: form.IOFiller{ + GetDefault: func(attr form.NamedAttr, checker schema.Checker) (interface{}, string, error) { + return 99, "", nil + }, + }, + expectIO: ` + |Press return to select a default value, or enter - to omit an entry. + |a [99]: » + `, + expectResult: map[string]interface{}{ + "a": 99, + }, +}} + +func (s *formSuite) TestIOFiller(c *gc.C) { + for i, test := range ioFillerTests { + func() { + c.Logf("%d. %s", i, test.about) + for k, v := range test.environment { + defer testing.PatchEnvironment(k, v)() + } + ioChecker := newInteractionChecker(c, "»", strings.TrimPrefix(unbeautify(test.expectIO), "\n")) + ioFiller := test.filler + ioFiller.In = ioChecker + ioFiller.Out = ioChecker + result, err := ioFiller.Fill(test.form) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + c.Assert(result, gc.IsNil) + } else { + ioChecker.Close() + c.Assert(err, gc.IsNil) + c.Assert(result, jc.DeepEquals, test.expectResult) + } + }() + } +} + +func (s *formSuite) TestIOFillerReadError(c *gc.C) { + r := errorReader{} + var out bytes.Buffer + ioFiller := form.IOFiller{ + In: r, + Out: &out, + } + result, err := ioFiller.Fill(form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tstring, + }, + }, + }) + c.Check(out.String(), gc.Equals, "Press return to select a default value, or enter - to omit an entry.\na: ") + c.Assert(err, gc.ErrorMatches, `cannot complete form: cannot read input: some read error`) + c.Assert(result, gc.IsNil) + // Verify that the cause is masked. Maybe it shouldn't + // be, but test the code as it is. + c.Assert(errgo.Cause(err), gc.Not(gc.Equals), errRead) +} + +func (s *formSuite) TestIOFillerUnexpectedEOF(c *gc.C) { + r := strings.NewReader("a") + var out bytes.Buffer + ioFiller := form.IOFiller{ + In: r, + Out: &out, + } + result, err := ioFiller.Fill(form.Form{ + Fields: environschema.Fields{ + "a": environschema.Attr{ + Description: "a description", + Type: environschema.Tstring, + }, + }, + }) + c.Check(out.String(), gc.Equals, "Press return to select a default value, or enter - to omit an entry.\na: ") + c.Assert(err, gc.ErrorMatches, `cannot complete form: cannot read input: unexpected EOF`) + c.Assert(result, gc.IsNil) +} + +func (s *formSuite) TestSortedFields(c *gc.C) { + fields := environschema.Fields{ + "a1": environschema.Attr{ + Group: "A", + Description: "a1 description", + Type: environschema.Tstring, + }, + "c1": environschema.Attr{ + Group: "A", + Description: "c1 description", + Type: environschema.Tstring, + }, + "b1": environschema.Attr{ + Group: "A", + Description: "b1 description", + Type: environschema.Tstring, + Secret: true, + }, + "a2": environschema.Attr{ + Group: "B", + Description: "a2 description", + Type: environschema.Tstring, + }, + "c2": environschema.Attr{ + Group: "B", + Description: "c2 description", + Type: environschema.Tstring, + }, + "b2": environschema.Attr{ + Group: "B", + Description: "b2 description", + Type: environschema.Tstring, + Secret: true, + }, + } + c.Assert(form.SortedFields(fields), jc.DeepEquals, []form.NamedAttr{{ + Name: "a1", + Attr: environschema.Attr{ + Group: "A", + Description: "a1 description", + Type: environschema.Tstring, + }}, { + Name: "c1", + Attr: environschema.Attr{ + Group: "A", + Description: "c1 description", + Type: environschema.Tstring, + }}, { + Name: "b1", + Attr: environschema.Attr{ + Group: "A", + Description: "b1 description", + Type: environschema.Tstring, + Secret: true, + }}, { + Name: "a2", + Attr: environschema.Attr{ + Group: "B", + Description: "a2 description", + Type: environschema.Tstring, + }}, { + Name: "c2", + Attr: environschema.Attr{ + Group: "B", + Description: "c2 description", + Type: environschema.Tstring, + }}, { + Name: "b2", + Attr: environschema.Attr{ + Group: "B", + Description: "b2 description", + Type: environschema.Tstring, + Secret: true, + }, + }}) +} + +var errRead = errgo.New("some read error") + +type errorReader struct{} + +func (r errorReader) Read([]byte) (int, error) { + return 0, errRead +} + +var defaultFromEnvTests = []struct { + about string + environment map[string]string + attr environschema.Attr + expect interface{} + expectError string +}{{ + about: "no envvars", + attr: environschema.Attr{ + EnvVar: "A", + Type: environschema.Tstring, + }, +}, { + about: "matching envvar", + environment: map[string]string{ + "A": "B", + }, + attr: environschema.Attr{ + EnvVar: "A", + Type: environschema.Tstring, + }, + expect: "B", +}, { + about: "matching envvars", + environment: map[string]string{ + "B": "C", + }, + attr: environschema.Attr{ + EnvVar: "A", + Type: environschema.Tstring, + EnvVars: []string{"B"}, + }, + expect: "C", +}, { + about: "envvar takes priority", + environment: map[string]string{ + "A": "1", + "B": "2", + }, + attr: environschema.Attr{ + EnvVar: "A", + Type: environschema.Tstring, + EnvVars: []string{"B"}, + }, + expect: "1", +}, { + about: "cannot coerce", + environment: map[string]string{ + "A": "B", + }, + attr: environschema.Attr{ + EnvVar: "A", + Type: environschema.Tint, + }, + expectError: `cannot convert \$A: expected number, got string\("B"\)`, +}} + +func (s *formSuite) TestDefaultFromEnv(c *gc.C) { + for i, test := range defaultFromEnvTests { + c.Logf("%d. %s", i, test.about) + func() { + for k, v := range test.environment { + defer testing.PatchEnvironment(k, v)() + } + checker, err := test.attr.Checker() + c.Assert(err, gc.IsNil) + result, display, err := form.DefaultFromEnv(form.NamedAttr{ + Name: "ignored", + Attr: test.attr, + }, checker) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + c.Assert(display, gc.Equals, "") + c.Assert(result, gc.Equals, nil) + return + } + c.Assert(err, gc.IsNil) + c.Assert(display, gc.Equals, "") + c.Assert(result, gc.Equals, test.expect) + }() + } +} + +// indentReplacer deletes tabs and | beautifier characters. +var indentReplacer = strings.NewReplacer("\t", "", "|", "") + +// unbeautify strips the leading tabs and | characters that +// we use to make the tests look nicer. +func unbeautify(s string) string { + return indentReplacer.Replace(s) +} === added file 'src/gopkg.in/juju/environschema.v1/form/interaction_test.go' --- src/gopkg.in/juju/environschema.v1/form/interaction_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/environschema.v1/form/interaction_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,187 @@ +package form_test + +import ( + "fmt" + "strconv" + "strings" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" +) + +// newInteractionChecker returns a object that can be used to check a sequence of +// IO interactions. Expected input from the user is marked with the +// given user input marker (for example a distinctive unicode character +// that will not occur in the rest of the text) and runs to the end of a +// line. +// +// The returned interactionChecker is an io.ReadWriteCloser that checks that read +// and write corresponds to the expected action in the sequence. +// +// After all interaction is done, the interactionChecker should be closed to +// check that no more interactions are expected. +// +// Any failures will result in c.Fatalf being called. +// +// For example given the interactionChecker created with: +// +// checker := newInteractionChecker(c, "»", `What is your name: »Bob +// And your age: »148 +// You're very old, Bob! +// `) +// +// The following code will pass the checker: +// +// fmt.Fprintf(checker, "What is your name: ") +// buf := make([]byte, 100) +// n, _ := checker.Read(buf) +// name := strings.TrimSpace(string(buf[0:n])) +// fmt.Fprintf(checker, "And your age: ") +// n, _ = checker.Read(buf) +// age, err := strconv.Atoi(strings.TrimSpace(string(buf[0:n]))) +// c.Assert(err, gc.IsNil) +// if age > 90 { +// fmt.Fprintf(checker, "You're very old, %s!\n", name) +// } +// checker.Close() +func newInteractionChecker(c *gc.C, userInputMarker, text string) *interactionChecker { + var ios []ioInteraction + for { + i := strings.Index(text, userInputMarker) + foundInput := i >= 0 + if i == -1 { + i = len(text) + } + if i > 0 { + ios = append(ios, ioInteraction{ + isInput: false, + data: text[0:i], + }) + text = text[i:] + } + if !foundInput { + break + } + text = text[len(userInputMarker):] + endLine := strings.Index(text, "\n") + if endLine == -1 { + c.Errorf("no newline found after expected input %q", text) + } + ios = append(ios, ioInteraction{ + isInput: true, + data: text[0 : endLine+1], + }) + text = text[endLine+1:] + } + return &interactionChecker{ + c: c, + ios: ios, + } +} + +type ioInteraction struct { + isInput bool + data string +} + +type interactionChecker struct { + c *gc.C + ios []ioInteraction +} + +// Read implements io.Reader by producing the next user +// input data from the interactionChecker. It raises an fatal error if +// the currently expected action is not a read. +func (c *interactionChecker) Read(buf []byte) (int, error) { + if len(c.ios) == 0 { + c.c.Fatalf("got read when expecting interaction to have finished") + } + io := &c.ios[0] + if !io.isInput { + c.c.Fatalf("got read when expecting write %q", io.data) + } + n := copy(buf, io.data) + io.data = io.data[n:] + if len(io.data) == 0 { + c.ios = c.ios[1:] + } + return n, nil +} + +// Write implements io.Writer by checking that the written +// data corresponds with the next expected text +// to be written. +func (c *interactionChecker) Write(buf []byte) (int, error) { + if len(c.ios) == 0 { + c.c.Fatalf("got write %q when expecting interaction to have finished", buf) + } + io := &c.ios[0] + if io.isInput { + c.c.Fatalf("got write %q when expecting read %q", buf, io.data) + } + if len(buf) > len(io.data) { + c.c.Fatalf("write too long; got %q want %q", buf, io.data) + } + checkData := io.data[0:len(buf)] + if string(buf) != checkData { + c.c.Fatalf("unexpected write got %q want %q", buf, io.data) + } + io.data = io.data[len(buf):] + if len(io.data) == 0 { + c.ios = c.ios[1:] + } + return len(buf), nil +} + +// Close implements io.Closer by checking that all expected interactions +// have been completed. +func (c *interactionChecker) Close() error { + if len(c.ios) == 0 { + return nil + } + io := &c.ios[0] + what := "write" + if io.isInput { + what = "read" + } + c.c.Fatalf("filler terminated too early; expected %s %q", what, io.data) + return nil +} + +type interactionCheckerSuite struct{} + +var _ = gc.Suite(&interactionCheckerSuite{}) + +func (*interactionCheckerSuite) TestNewIOChecker(c *gc.C) { + checker := newInteractionChecker(c, "»", `What is your name: »Bob +And your age: »148 +You're very old, Bob! +`) + c.Assert(checker.ios, jc.DeepEquals, []ioInteraction{{ + data: "What is your name: ", + }, { + isInput: true, + data: "Bob\n", + }, { + data: "And your age: ", + }, { + isInput: true, + data: "148\n", + }, { + data: "You're very old, Bob!\n", + }}) + fmt.Fprintf(checker, "What is your name: ") + buf := make([]byte, 100) + n, _ := checker.Read(buf) + name := strings.TrimSpace(string(buf[0:n])) + fmt.Fprintf(checker, "And your age: ") + n, _ = checker.Read(buf) + age, err := strconv.Atoi(strings.TrimSpace(string(buf[0:n]))) + c.Assert(err, gc.IsNil) + if age > 90 { + fmt.Fprintf(checker, "You're very old, %s!\n", name) + } + checker.Close() + + c.Assert(checker.ios, gc.HasLen, 0) +} === added file 'src/gopkg.in/juju/environschema.v1/form/package_test.go' --- src/gopkg.in/juju/environschema.v1/form/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/environschema.v1/form/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,14 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package form_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/gopkg.in/juju/environschema.v1/sample.go' --- src/gopkg.in/juju/environschema.v1/sample.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/environschema.v1/sample.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,217 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package environschema + +import ( + "bytes" + "fmt" + "go/doc" + "io" + "reflect" + "sort" + "strings" + "unicode" + + "gopkg.in/yaml.v2" +) + +// SampleYAML writes YAML output to w, indented by indent spaces +// that holds the attributes in attrs with descriptions found +// in the given fields. An entry for any attribute in fields not +// in attrs will be generated but commented out. +func SampleYAML(w io.Writer, indent int, attrs map[string]interface{}, fields Fields) error { + indentStr := strings.Repeat(" ", indent) + orderedFields := make(fieldsByGroup, 0, len(fields)) + for name, f := range fields { + orderedFields = append(orderedFields, attrWithName{ + name: name, + Attr: f, + }) + } + sort.Sort(orderedFields) + for i, f := range orderedFields { + if i > 0 { + w.Write(nl) + } + writeSampleDescription(w, f.Attr, indentStr+"# ") + val, ok := attrs[f.name] + if ok { + fmt.Fprintf(w, "%s:", f.name) + indentVal(w, val, indentStr) + } else { + if f.Example != nil { + val = f.Example + } else { + val = sampleValue(f.Type) + } + fmt.Fprintf(w, "# %s:", f.name) + indentVal(w, val, indentStr+"# ") + } + } + return nil +} + +const textWidth = 80 + +var ( + space = []byte(" ") + nl = []byte("\n") +) + +// writeSampleDescription writes the given attribute to w +// prefixed by the given indentation string. +func writeSampleDescription(w io.Writer, f Attr, indent string) { + previousText := false + + // section marks the start of a new section of the comment; + // sections are separated with empty lines. + section := func() { + if previousText { + fmt.Fprintf(w, "%s\n", strings.TrimRightFunc(indent, unicode.IsSpace)) + } + previousText = true + } + + descr := strings.TrimSpace(f.Description) + if descr != "" { + section() + doc.ToText(w, descr, indent, " ", textWidth-len(indent)) + } + vars := make([]string, 0, len(f.EnvVars)+1) + if f.EnvVar != "" { + vars = append(vars, "$"+f.EnvVar) + } + for _, v := range f.EnvVars { + vars = append(vars, "$"+v) + } + if len(vars) > 0 { + section() + fmt.Fprintf(w, "%sDefault value taken from %s.\n", indent, wordyList(vars)) + } + attrText := "" + switch { + case f.Secret && f.Immutable: + attrText = "immutable and considered secret" + case f.Secret: + attrText = "considered secret" + case f.Immutable: + attrText = "immutable" + } + if attrText != "" { + section() + fmt.Fprintf(w, "%sThis attribute is %s.\n", indent, attrText) + } + section() +} + +// emptyLine writes an empty line prefixed with the given +// indent, ensuring that it doesn't have any trailing white space. +func emptyLine(w io.Writer, indent string) { + fmt.Fprintf(w, "%s\n", strings.TrimRightFunc(indent, unicode.IsSpace)) +} + +// wordyList formats the given slice in the form "x, y or z". +func wordyList(words []string) string { + if len(words) == 0 { + return "" + } + if len(words) == 1 { + return words[0] + } + return strings.Join(words[0:len(words)-1], ", ") + " or " + words[len(words)-1] +} + +var groupPriority = map[Group]int{ + ProviderGroup: 3, + AccountGroup: 2, + EnvironGroup: 1, +} + +type attrWithName struct { + name string + Attr +} + +type fieldsByGroup []attrWithName + +func (f fieldsByGroup) Len() int { + return len(f) +} + +func (f fieldsByGroup) Swap(i0, i1 int) { + f[i0], f[i1] = f[i1], f[i0] +} + +func (f fieldsByGroup) Less(i0, i1 int) bool { + f0, f1 := &f[i0], &f[i1] + pri0, pri1 := groupPriority[f0.Group], groupPriority[f1.Group] + if pri0 != pri1 { + return pri0 > pri1 + } + return f0.name < f1.name +} + +// indentVal writes the given YAML-formatted value x to w and prefixing +// the second and subsequent lines with the given ident. +func indentVal(w io.Writer, x interface{}, indentStr string) { + data, err := yaml.Marshal(x) + if err != nil { + panic(fmt.Errorf("cannot marshal YAML", err)) + } + if len(data) == 0 { + panic("YAML cannot marshal to empty string") + } + indent := []byte(indentStr + " ") + if canUseSameLine(x) { + w.Write(space) + } else { + w.Write(nl) + w.Write(indent) + } + data = bytes.TrimSuffix(data, nl) + lines := bytes.Split(data, nl) + for i, line := range lines { + if i > 0 { + w.Write(indent) + } + w.Write(line) + w.Write(nl) + } +} + +func canUseSameLine(x interface{}) bool { + if x == nil { + return true + } + v := reflect.ValueOf(x) + switch v.Kind() { + case reflect.Map: + return v.Len() == 0 + case reflect.Slice: + return v.Len() == 0 + } + return true +} + +func yamlQuote(s string) string { + data, _ := yaml.Marshal(s) + return strings.TrimSpace(string(data)) +} + +func sampleValue(t FieldType) interface{} { + switch t { + case Tstring: + return "" + case Tbool: + return false + case Tint: + return 0 + case Tattrs: + return map[string]string{ + "example": "value", + } + default: + panic(fmt.Errorf("unknown schema type %q", t)) + } +} === added file 'src/gopkg.in/juju/environschema.v1/sample_test.go' --- src/gopkg.in/juju/environschema.v1/sample_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/environschema.v1/sample_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,328 @@ +// Copyright 2015 Canonical Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +package environschema_test + +import ( + "bytes" + "strings" + + gc "gopkg.in/check.v1" + + "gopkg.in/juju/environschema.v1" +) + +type sampleSuite struct{} + +var _ = gc.Suite(&sampleSuite{}) + +var sampleYAMLTests = []struct { + about string + indent int + attrs map[string]interface{} + fields environschema.Fields + expect string +}{{ + about: "simple values, all attributes specified", attrs: map[string]interface{}{ + "foo": "foovalue", + "bar": 1243, + "baz": false, + "attrs": map[string]string{ + "arble": "bletch", + "hello": "goodbye", + }, + }, + fields: environschema.Fields{ + "foo": { + Type: environschema.Tstring, + Description: "foo is a string.", + }, + "bar": { + Type: environschema.Tint, + Description: "bar is a number.\nWith a long description that contains newlines. And quite a bit more text that will be folded because it is longer than 80 characters.", + }, + "baz": { + Type: environschema.Tbool, + Description: "baz is a bool.", + }, + "attrs": { + Type: environschema.Tattrs, + Description: "attrs is an attribute list", + }, + }, + expect: ` + |# attrs is an attribute list + |# + |attrs: + | arble: bletch + | hello: goodbye + | + |# bar is a number. With a long description that contains newlines. And quite a + |# bit more text that will be folded because it is longer than 80 characters. + |# + |bar: 1243 + | + |# baz is a bool. + |# + |baz: false + | + |# foo is a string. + |# + |foo: foovalue + `, +}, { + about: "when a value is not specified, it's commented out", + attrs: map[string]interface{}{ + "foo": "foovalue", + }, + fields: environschema.Fields{ + "foo": { + Type: environschema.Tstring, + Description: "foo is a string.", + }, + "bar": { + Type: environschema.Tint, + Description: "bar is a number.", + Example: 1243, + }, + }, + expect: ` + |# bar is a number. + |# + |# bar: 1243 + | + |# foo is a string. + |# + |foo: foovalue + `, +}, { + about: "environment variables are mentioned as defaults", + attrs: map[string]interface{}{ + "bar": 1324, + "baz": true, + "foo": "foovalue", + }, + fields: environschema.Fields{ + "bar": { + Type: environschema.Tint, + Description: "bar is a number.", + EnvVars: []string{"BAR_VAL", "ALT_BAR_VAL"}, + }, + "baz": { + Type: environschema.Tbool, + Description: "baz is a bool.", + EnvVar: "BAZ_VAL", + EnvVars: []string{"ALT_BAZ_VAL", "ALT2_BAZ_VAL"}, + }, + "foo": { + Type: environschema.Tstring, + Description: "foo is a string.", + EnvVar: "FOO_VAL", + }, + }, + expect: ` + |# bar is a number. + |# + |# Default value taken from $BAR_VAL or $ALT_BAR_VAL. + |# + |bar: 1324 + | + |# baz is a bool. + |# + |# Default value taken from $BAZ_VAL, $ALT_BAZ_VAL or $ALT2_BAZ_VAL. + |# + |baz: true + | + |# foo is a string. + |# + |# Default value taken from $FOO_VAL. + |# + |foo: foovalue + `, +}, { + about: "sorted by attribute group (provider, account, environ, other), then alphabetically", + fields: environschema.Fields{ + "baz": { + Type: environschema.Tbool, + Description: "baz is a bool.", + Group: environschema.ProviderGroup, + }, + "zaphod": { + Type: environschema.Tstring, + Group: environschema.ProviderGroup, + }, + "bar": { + Type: environschema.Tint, + Description: "bar is a number.", + Group: environschema.AccountGroup, + }, + "foo": { + Type: environschema.Tstring, + Description: "foo is a string.", + Group: environschema.AccountGroup, + }, + "alpha": { + Type: environschema.Tstring, + Group: environschema.EnvironGroup, + }, + "bravo": { + Type: environschema.Tstring, + Group: environschema.EnvironGroup, + }, + "charlie": { + Type: environschema.Tstring, + Group: "unknown", + }, + "delta": { + Type: environschema.Tstring, + Group: "unknown", + }, + }, + expect: ` + |# baz is a bool. + |# + |# baz: false + | + |# zaphod: "" + | + |# bar is a number. + |# + |# bar: 0 + | + |# foo is a string. + |# + |# foo: "" + | + |# alpha: "" + | + |# bravo: "" + | + |# charlie: "" + | + |# delta: "" +`, +}, { + about: "example value is used when possible; zero value otherwise", + fields: environschema.Fields{ + "intval-with-example": { + Type: environschema.Tint, + Example: 999, + }, + "intval": { + Type: environschema.Tint, + }, + "boolval": { + Type: environschema.Tbool, + }, + "attrsval": { + Type: environschema.Tattrs, + }, + }, + expect: ` + |# attrsval: + |# example: value + | + |# boolval: false + | + |# intval: 0 + | + |# intval-with-example: 999 + `, +}, { + about: "secret values are marked as secret/immutable", + fields: environschema.Fields{ + "a": { + Type: environschema.Tbool, + Description: "With a description", + Secret: true, + }, + "b": { + Type: environschema.Tstring, + Secret: true, + }, + "c": { + Type: environschema.Tstring, + Secret: true, + Description: "With a description", + EnvVar: "VAR", + }, + "d": { + Type: environschema.Tstring, + Immutable: true, + }, + "e": { + Type: environschema.Tstring, + Immutable: true, + Secret: true, + }, + }, + expect: ` + |# With a description + |# + |# This attribute is considered secret. + |# + |# a: false + | + |# This attribute is considered secret. + |# + |# b: "" + | + |# With a description + |# + |# Default value taken from $VAR. + |# + |# This attribute is considered secret. + |# + |# c: "" + | + |# This attribute is immutable. + |# + |# d: "" + | + |# This attribute is immutable and considered secret. + |# + |# e: "" + `, +}} + +func (*sampleSuite) TestSampleYAML(c *gc.C) { + for i, test := range sampleYAMLTests { + c.Logf("test %d. %s\n", i, test.about) + var buf bytes.Buffer + err := environschema.SampleYAML(&buf, 0, test.attrs, test.fields) + c.Assert(err, gc.IsNil) + diff(c, buf.String(), unbeautify(test.expect[1:])) + } +} + +// indentReplacer deletes tabs and | beautifier characters. +var indentReplacer = strings.NewReplacer("\t", "", "|", "") + +// unbeautify strips the leading tabs and | characters that +// we use to make the tests look nicer. +func unbeautify(s string) string { + return indentReplacer.Replace(s) +} + +func diff(c *gc.C, have, want string) { + // Final sanity check in case the below logic is flawed. + defer c.Check(have, gc.Equals, want) + + haveLines := strings.Split(have, "\n") + wantLines := strings.Split(want, "\n") + + for i, wantLine := range wantLines { + if i >= len(haveLines) { + c.Errorf("have too few lines from line %d, %s", i+1, wantLine) + return + } + haveLine := haveLines[i] + if !c.Check(haveLine, gc.Equals, wantLine, gc.Commentf("line %d", i+1)) { + return + } + } + if len(haveLines) > len(wantLines) { + c.Errorf("have too many lines from line %d, %s", len(wantLines), haveLines[len(wantLines)]) + return + } +} === added directory 'src/gopkg.in/juju/jujusvg.v1' === added file 'src/gopkg.in/juju/jujusvg.v1/.gitignore' --- src/gopkg.in/juju/jujusvg.v1/.gitignore 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/.gitignore 2016-03-22 15:18:22 +0000 @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test === added file 'src/gopkg.in/juju/jujusvg.v1/LICENSE' --- src/gopkg.in/juju/jujusvg.v1/LICENSE 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,191 @@ +All files in this repository are licensed as follows. If you contribute +to this repository, it is assumed that you license your contribution +under the same license unless you state otherwise. + +All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. === added file 'src/gopkg.in/juju/jujusvg.v1/Makefile' --- src/gopkg.in/juju/jujusvg.v1/Makefile 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/Makefile 2016-03-22 15:18:22 +0000 @@ -0,0 +1,55 @@ +ifndef GOPATH + $(warning You need to set up a GOPATH.) +endif + +PROJECT := gopkg.in/juju/jujusvg.v1 +PROJECT_DIR := $(shell go list -e -f '{{.Dir}}' $(PROJECT)) + +help: + @echo "Available targets:" + @echo " deps - fetch all dependencies" + @echo " build - build the project" + @echo " check - run tests" + @echo " install - install the library in your GOPATH" + @echo " clean - clean the project" + +# Start of GOPATH-dependent targets. Some targets only make sense - +# and will only work - when this tree is found on the GOPATH. +ifeq ($(CURDIR),$(PROJECT_DIR)) + +deps: + go get -v -t $(PROJECT)/... + +build: + go build $(PROJECT)/... + +check: + go test $(PROJECT)/... + +install: + go install $(INSTALL_FLAGS) -v $(PROJECT)/... + +clean: + go clean $(PROJECT)/... + +else + +deps: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +build: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +check: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +install: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +clean: + $(error Cannot $@; $(CURDIR) is not on GOPATH) + +endif +# End of GOPATH-dependent targets. + +.PHONY: help deps build check install clean === added file 'src/gopkg.in/juju/jujusvg.v1/README.md' --- src/gopkg.in/juju/jujusvg.v1/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,71 @@ +jujusvg +======= + +A library for generating SVGs from Juju bundles and environments. + +Installation +------------ + +To start using jujusvg, first ensure you have a valid Go environment, then run +the following: + + go get gopkg.in/juju/jujusvg.v1 + +Dependencies +------------ + +The project uses godeps (https://launchpad.net/godeps) to manage Go +dependencies. To install this, run: + + + go get launchpad.net/godeps + +After installing it, you can update the dependencies to the revision specified +in the `dependencies.tsv` file with the following: + + make deps + +Use `make create-deps` to update the dependencies file. + +Usage +----- + +Given a Juju bundle, you can convert this to an SVG programatically. This +generates a simple SVG representation of a bundle or bundles that can then be +included in a webpage as a visualization. + +For an example of how to use this library, please see `examples/generatesvg.go`. +You can run this example like: + + go run generatesvg.go bundle.yaml > bundle.svg + +The examples directory also includes three sample bundles that you can play +around with, or you can use the [Juju GUI](https://demo.jujucharms.com) to +generate your own bundles. + +Design-related assets +--------------------- + +Some assets are specified based on assets provided by the design team. These +assets are specified in the defs section of the generated SVG, and can thus +be found in the Canvas.definition() method. These assets are, except where +indicated, embedded in a go file assigned to an exported variable, so that they +may be used like so: + +```go +import ( + "io" + + "gopkg.in/juju/jujusvg.v1/assets" +) + +// ... + +io.WriteString(canvas.Writer, assets.AssetToWrite) +``` + +Current assets in use: + +* The service block +* ~~The relation health indicator~~ *this is defined by hand, but a similar path + may be taken in the future if the asset should change* === added directory 'src/gopkg.in/juju/jujusvg.v1/assets' === added file 'src/gopkg.in/juju/jujusvg.v1/assets/relation-icon-healthy.svg' --- src/gopkg.in/juju/jujusvg.v1/assets/relation-icon-healthy.svg 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/assets/relation-icon-healthy.svg 2016-03-22 15:18:22 +0000 @@ -0,0 +1,13 @@ + + + + + + + + + + + === added file 'src/gopkg.in/juju/jujusvg.v1/assets/service_module.go' --- src/gopkg.in/juju/jujusvg.v1/assets/service_module.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/assets/service_module.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,45 @@ +package assets + +// This is the SVG for the service module block used in the bundle diagram. +// Note that there MUST NOT be anything (processing instructions, xml +// declarations, or directives) before the tag. +var ServiceModule = ` + + + + + + + + + + + + + + + + + + + +` === added file 'src/gopkg.in/juju/jujusvg.v1/assets/service_module.svg' --- src/gopkg.in/juju/jujusvg.v1/assets/service_module.svg 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/assets/service_module.svg 2016-03-22 15:18:22 +0000 @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + + + === added file 'src/gopkg.in/juju/jujusvg.v1/canvas.go' --- src/gopkg.in/juju/jujusvg.v1/canvas.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/canvas.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,303 @@ +package jujusvg + +import ( + "bytes" + "fmt" + "image" + "io" + "math" + + svg "github.com/ajstarks/svgo" + + "gopkg.in/juju/jujusvg.v1/assets" +) + +const ( + iconSize = 96 + serviceBlockSize = 189 + healthCircleRadius = 10 + relationLineWidth = 2 + maxInt = int(^uint(0) >> 1) + minInt = -(maxInt - 1) + maxHeight = 450 + maxWidth = 1000 + + fontColor = "#505050" + relationColor = "#38B44A" +) + +// Canvas holds the parsed form of a bundle or environment. +type Canvas struct { + services []*service + relations []*serviceRelation + iconsRendered map[string]bool + iconIds map[string]string +} + +// service represents a service deployed to an environment and contains the +// point of the top-left corner of the icon, icon URL, and additional metadata. +type service struct { + name string + charmPath string + iconUrl string + iconSrc []byte + point image.Point +} + +// serviceRelation represents a relation created between two services. +type serviceRelation struct { + serviceA *service + serviceB *service +} + +// line represents a line segment with two endpoints. +type line struct { + p0, p1 image.Point +} + +// definition creates any necessary defs that can be used later in the SVG. +func (s *service) definition(canvas *svg.SVG, iconsRendered map[string]bool, iconIds map[string]string) error { + if len(s.iconSrc) == 0 || iconsRendered[s.charmPath] { + return nil + } + iconsRendered[s.charmPath] = true + iconIds[s.charmPath] = fmt.Sprintf("icon-%d", len(iconsRendered)) + + // Temporary solution: + iconBuf := bytes.NewBuffer(s.iconSrc) + return processIcon(iconBuf, canvas.Writer, iconIds[s.charmPath]) +} + +// usage creates any necessary tags for actually using the service in the SVG. +func (s *service) usage(canvas *svg.SVG, iconIds map[string]string) { + canvas.Use( + s.point.X, + s.point.Y, + "#serviceBlock", + fmt.Sprintf(`id=%q`, s.name)) + if len(s.iconSrc) > 0 { + canvas.Use( + s.point.X+serviceBlockSize/2-iconSize/2, + s.point.Y+serviceBlockSize/2-iconSize/2, + "#"+iconIds[s.charmPath], + fmt.Sprintf(`width="%d" height="%d"`, iconSize, iconSize), + ) + } else { + canvas.Image( + s.point.X+serviceBlockSize/2-iconSize/2, + s.point.Y+serviceBlockSize/2-iconSize/2, + iconSize, + iconSize, + s.iconUrl, + ) + } + canvas.Textlines( + s.point.X+serviceBlockSize/2, + s.point.Y+serviceBlockSize/6, + []string{s.name}, + serviceBlockSize/10, + 0, + "#505050", + "middle") +} + +// definition creates any necessary defs that can be used later in the SVG. +func (r *serviceRelation) definition(canvas *svg.SVG) { +} + +// usage creates any necessary tags for actually using the relation in the SVG. +func (r *serviceRelation) usage(canvas *svg.SVG) { + l := r.shortestRelation() + canvas.Line( + l.p0.X, + l.p0.Y, + l.p1.X, + l.p1.Y, + fmt.Sprintf(`stroke=%q`, relationColor), + fmt.Sprintf(`stroke-width="%dpx"`, relationLineWidth), + fmt.Sprintf(`stroke-dasharray=%q`, strokeDashArray(l)), + ) + mid := l.p0.Add(l.p1).Div(2).Sub(point(healthCircleRadius, healthCircleRadius)) + canvas.Use(mid.X, mid.Y, "#healthCircle") +} + +// shortestRelation finds the shortest line between two services, assuming +// that each service can be connected on one of four cardinal points only. +func (r *serviceRelation) shortestRelation() line { + aConnectors, bConnectors := r.serviceA.cardinalPoints(), r.serviceB.cardinalPoints() + shortestDistance := float64(maxInt) + shortestPair := line{ + p0: r.serviceA.point, + p1: r.serviceB.point, + } + for _, pointA := range aConnectors { + for _, pointB := range bConnectors { + ab := line{p0: pointA, p1: pointB} + distance := ab.length() + if distance < shortestDistance { + shortestDistance = distance + shortestPair = ab + } + } + } + return shortestPair +} + +// cardinalPoints generates the points for each of the four cardinal points +// of each service. +func (s *service) cardinalPoints() []image.Point { + return []image.Point{ + point(s.point.X+serviceBlockSize/2, s.point.Y), + point(s.point.X, s.point.Y+serviceBlockSize/2), + point(s.point.X+serviceBlockSize/2, s.point.Y+serviceBlockSize), + point(s.point.X+serviceBlockSize, s.point.Y+serviceBlockSize/2), + } +} + +// strokeDashArray generates the stroke-dasharray attribute content so that +// the relation health indicator is placed in an empty space. +func strokeDashArray(l line) string { + return fmt.Sprintf("%.2f, %d", l.length()/2-healthCircleRadius, healthCircleRadius*2) +} + +// length calculates the length of a line. +func (l *line) length() float64 { + dp := l.p0.Sub(l.p1) + return math.Sqrt(square(float64(dp.X)) + square(float64(dp.Y))) +} + +// addService adds a new service to the canvas. +func (c *Canvas) addService(s *service) { + c.services = append(c.services, s) +} + +// addRelation adds a new relation to the canvas. +func (c *Canvas) addRelation(r *serviceRelation) { + c.relations = append(c.relations, r) +} + +// layout adjusts all items so that they are positioned appropriately, +// and returns the overall size of the canvas. +func (c *Canvas) layout() (int, int) { + minWidth := maxInt + minHeight := maxInt + maxWidth := minInt + maxHeight := minInt + + for _, service := range c.services { + if service.point.X < minWidth { + minWidth = service.point.X + } + if service.point.Y < minHeight { + minHeight = service.point.Y + } + if service.point.X > maxWidth { + maxWidth = service.point.X + } + if service.point.Y > maxHeight { + maxHeight = service.point.Y + } + } + for _, service := range c.services { + service.point = service.point.Sub(point(minWidth, minHeight)) + } + return abs(maxWidth-minWidth) + serviceBlockSize, + abs(maxHeight-minHeight) + serviceBlockSize +} + +func (c *Canvas) definition(canvas *svg.SVG) { + canvas.Def() + defer canvas.DefEnd() + + // Service block. + canvas.Group(`id="serviceBlock"`, + `transform="scale(0.8)"`) + io.WriteString(canvas.Writer, assets.ServiceModule) + canvas.Gend() // Gid + + // Relation health circle. + canvas.Gid("healthCircle") + canvas.Circle( + healthCircleRadius, + healthCircleRadius, + healthCircleRadius, + fmt.Sprintf("stroke:%s;fill:none;stroke-width:%dpx", relationColor, relationLineWidth), + ) + canvas.Circle( + healthCircleRadius, + healthCircleRadius, + healthCircleRadius/2, + fmt.Sprintf("fill:%s", relationColor), + ) + canvas.Gend() + + // Service and relation specific defs. + for _, relation := range c.relations { + relation.definition(canvas) + } + for _, service := range c.services { + service.definition(canvas, c.iconsRendered, c.iconIds) + } +} + +func (c *Canvas) relationsGroup(canvas *svg.SVG) { + canvas.Gid("relations") + defer canvas.Gend() + for _, relation := range c.relations { + relation.usage(canvas) + } +} + +func (c *Canvas) servicesGroup(canvas *svg.SVG) { + canvas.Gid("services") + defer canvas.Gend() + for _, service := range c.services { + service.usage(canvas, c.iconIds) + } +} + +// Marshal renders the SVG to the given io.Writer. +func (c *Canvas) Marshal(w io.Writer) { + // Initialize maps for service icons, which are used both in definition + // and use methods for services. + c.iconsRendered = make(map[string]bool) + c.iconIds = make(map[string]string) + + // TODO check write errors and return an error from + // Marshal if the write fails. The svg package does not + // itself check or return write errors; a possible work-around + // is to wrap the writer in a custom writer that panics + // on error, and catch the panic here. + width, height := c.layout() + + canvas := svg.New(w) + canvas.Start( + width, + height, + fmt.Sprintf(`style="font-family:Ubuntu, sans-serif;" viewBox="0 0 %d %d"`, + width, height), + ) + defer canvas.End() + c.definition(canvas) + c.relationsGroup(canvas) + c.servicesGroup(canvas) +} + +// abs returns the absolute value of a number. +func abs(x int) int { + if x < 0 { + return -x + } else { + return x + } +} + +// square multiplies a number by itself. +func square(x float64) float64 { + return x * x +} + +// point generates an image.Point given its coordinates. +func point(x, y int) image.Point { + return image.Point{x, y} +} === added file 'src/gopkg.in/juju/jujusvg.v1/canvas_test.go' --- src/gopkg.in/juju/jujusvg.v1/canvas_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/canvas_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,259 @@ +package jujusvg + +import ( + "bytes" + "encoding/xml" + "image" + "io" + + "github.com/ajstarks/svgo" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "gopkg.in/juju/jujusvg.v1/assets" +) + +type CanvasSuite struct{} + +var _ = gc.Suite(&CanvasSuite{}) + +func (s *CanvasSuite) TestServiceRender(c *gc.C) { + // Ensure that the Service's definition and usage methods output the + // proper SVG elements. + var tests = []struct { + about string + service service + expected string + }{ + { + about: "Service without iconSrc, no def created", + service: service{ + name: "foo", + point: image.Point{ + X: 0, + Y: 0, + }, + iconUrl: "foo", + }, + expected: ` + + +foo + +`, + }, + { + about: "Service with iconSrc", + service: service{ + name: "bar", + charmPath: "bar", + point: image.Point{ + X: 0, + Y: 0, + }, + iconSrc: []byte("bar"), + }, + expected: `bar + + +bar + +`, + }, + { + about: "Service with already def'd icon", + service: service{ + name: "baz", + charmPath: "bar", + point: image.Point{ + X: 0, + Y: 0, + }, + iconSrc: []byte("bar"), + }, + expected: ` + + +baz + +`, + }, + } + // Maintain our list of rendered icons outside the loop. + iconsRendered := make(map[string]bool) + iconIds := make(map[string]string) + for _, test := range tests { + var buf bytes.Buffer + svg := svg.New(&buf) + test.service.definition(svg, iconsRendered, iconIds) + test.service.usage(svg, iconIds) + c.Assert(buf.String(), gc.Equals, test.expected) + } +} + +func (s *CanvasSuite) TestRelationRender(c *gc.C) { + // Ensure that the Relation's definition and usage methods output the + // proper SVG elements. + var buf bytes.Buffer + svg := svg.New(&buf) + relation := serviceRelation{ + serviceA: &service{ + point: image.Point{ + X: 0, + Y: 0, + }, + }, + serviceB: &service{ + point: image.Point{ + X: 100, + Y: 100, + }, + }, + } + relation.definition(svg) + relation.usage(svg) + c.Assert(buf.String(), gc.Equals, + ` + +`) +} + +func (s *CanvasSuite) TestLayout(c *gc.C) { + // Ensure that the SVG is sized exactly around the positioned services. + canvas := Canvas{} + canvas.addService(&service{ + point: image.Point{ + X: 0, + Y: 0, + }, + }) + canvas.addService(&service{ + point: image.Point{ + X: 100, + Y: 100, + }, + }) + width, height := canvas.layout() + c.Assert(width, gc.Equals, 289) + c.Assert(height, gc.Equals, 289) + canvas.addService(&service{ + point: image.Point{ + X: -100, + Y: -100, + }, + }) + canvas.addService(&service{ + point: image.Point{ + X: -100, + Y: 100, + }, + }) + canvas.addService(&service{ + point: image.Point{ + X: 200, + Y: -100, + }, + }) + width, height = canvas.layout() + c.Assert(width, gc.Equals, 489) + c.Assert(height, gc.Equals, 389) +} + +func (s *CanvasSuite) TestMarshal(c *gc.C) { + // Ensure that the internal representation of the canvas can be marshalled + // to SVG. + var buf bytes.Buffer + canvas := Canvas{} + serviceA := &service{ + name: "service-a", + charmPath: "trusty/svc-a", + point: image.Point{ + X: 0, + Y: 0, + }, + iconSrc: []byte(` + + + `), + } + serviceB := &service{ + name: "service-b", + point: image.Point{ + X: 100, + Y: 100, + }, + } + canvas.addService(serviceA) + canvas.addService(serviceB) + canvas.addRelation(&serviceRelation{ + serviceA: serviceA, + serviceB: serviceB, + }) + canvas.Marshal(&buf) + c.Logf("%s", buf) + assertXMLEqual(c, buf.Bytes(), []byte(` + + + + +`+assets.ServiceModule+` + + + + + + + + + + + + + + + + + +service-a + + + + +service-b + + + +`)) +} + +func assertXMLEqual(c *gc.C, obtained, expected []byte) { + toksObtained := xmlTokens(c, obtained) + toksExpected := xmlTokens(c, expected) + c.Assert(toksObtained, jc.DeepEquals, toksExpected) +} + +func xmlTokens(c *gc.C, data []byte) []xml.Token { + dec := xml.NewDecoder(bytes.NewReader(data)) + var toks []xml.Token + for { + tok, err := dec.Token() + if err == io.EOF { + return toks + } + c.Assert(err, gc.IsNil) + + if cdata, ok := tok.(xml.CharData); ok { + // It's char data - trim all white space and ignore it + // if it's all blank. + cdata = bytes.TrimSpace(cdata) + if len(cdata) == 0 { + continue + } + tok = cdata + } + toks = append(toks, xml.CopyToken(tok)) + } +} === added file 'src/gopkg.in/juju/jujusvg.v1/dependencies.tsv' --- src/gopkg.in/juju/jujusvg.v1/dependencies.tsv 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/dependencies.tsv 2016-03-22 15:18:22 +0000 @@ -0,0 +1,19 @@ +github.com/ajstarks/svgo git 89e3ac64b5b3e403a5e7c35ea4f98d45db7b4518 2014-10-04T21:11:59Z +github.com/juju/errors git 1b5e39b83d1835fa480e0c2ddefb040ee82d58b3 2015-09-16T12:56:42Z +github.com/juju/gojsonpointer git afe8b77aa08f272b49e01b82de78510c11f61500 2015-02-04T19:46:29Z +github.com/juju/gojsonreference git f0d24ac5ee330baa21721cdff56d45e4ee42628e 2015-02-04T19:46:33Z +github.com/juju/gojsonschema git e1ad140384f254c82f89450d9a7c8dd38a632838 2015-03-12T17:00:16Z +github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z +github.com/juju/names git e287fe4ae0dbda220cace3ed0e35cda4796c1aa3 2015-10-22T17:21:35Z +github.com/juju/schema git afe1151cb49d1d7ed3c75592dfc6f38703f2e988 2015-08-07T07:58:08Z +github.com/juju/testing git ad6f815f49f8209a27a3b7efb6d44876493e5939 2015-10-12T16:09:06Z +github.com/juju/utils git f2db28cef935aba0a7207254fa5dba273e649d0e 2015-11-09T11:51:43Z +github.com/juju/xml git eb759a627588d35166bc505fceb51b88500e291e 2015-04-13T13:11:21Z +golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z +gopkg.in/check.v1 git b3d3430320d4260e5fea99841af984b3badcea63 2015-06-26T10:50:28Z +gopkg.in/errgo.v1 git 66cb46252b94c1f3d65646f54ee8043ab38d766c 2015-10-07T15:31:57Z +gopkg.in/juju/charm.v6-unstable git a3d228ef5292531219d17d47679b260580fba1a8 2015-11-19T07:39:58Z +gopkg.in/mgo.v2 git 4d04138ffef2791c479c0c8bbffc30b34081b8d9 2015-10-26T16:34:53Z +gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z +gopkg.in/yaml.v2 git 7ad95dd0798a40da1ccdff6dff35fd177b5edf40 2015-06-24T10:29:02Z +launchpad.net/tomb bzr gustavo@niemeyer.net-20140529072043-hzcrlnl3ygvg914q 18 === added file 'src/gopkg.in/juju/jujusvg.v1/doc.go' --- src/gopkg.in/juju/jujusvg.v1/doc.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/doc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,8 @@ +// Copyright 2014 Canonical, Ltd. +// Licensed under the LGPLv3, see LICENCE file for details. + +// jujusvg generates SVG representations of various Juju artifacts, such as +// charm bundles or live environments. +// +// For more information, please refer to the README file in this directory. +package jujusvg === added directory 'src/gopkg.in/juju/jujusvg.v1/examples' === added file 'src/gopkg.in/juju/jujusvg.v1/examples/charmworld-missing-placement.yaml' --- src/gopkg.in/juju/jujusvg.v1/examples/charmworld-missing-placement.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/examples/charmworld-missing-placement.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,29 @@ +services: + mongodb: + charm: "cs:precise/mongodb-21" + num_units: 1 + annotations: + "gui-x": "940.5" + "gui-y": "388.7698359714502" + constraints: "mem=2G cpu-cores=1" + elasticsearch: + charm: "cs:~charming-devs/precise/elasticsearch-2" + num_units: 1 + constraints: "mem=2G cpu-cores=1" + charmworld: + charm: "cs:~juju-jitsu/precise/charmworld-58" + num_units: 1 + expose: true + annotations: + "gui-x": "813.5" + "gui-y": "112.23016402854975" + options: + charm_import_limit: -1 + source: "lp:~bac/charmworld/ingest-local-charms" + revno: 511 +relations: + - - "charmworld:essearch" + - "elasticsearch:essearch" + - - "charmworld:database" + - "mongodb:database" +series: precise === added file 'src/gopkg.in/juju/jujusvg.v1/examples/charmworld-no-placement.yaml' --- src/gopkg.in/juju/jujusvg.v1/examples/charmworld-no-placement.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/examples/charmworld-no-placement.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,23 @@ +services: + mongodb: + charm: "cs:precise/mongodb-21" + num_units: 1 + constraints: "mem=2G cpu-cores=1" + elasticsearch: + charm: "cs:~charming-devs/precise/elasticsearch-2" + num_units: 1 + constraints: "mem=2G cpu-cores=1" + charmworld: + charm: "cs:~juju-jitsu/precise/charmworld-58" + num_units: 1 + expose: true + options: + charm_import_limit: -1 + source: "lp:~bac/charmworld/ingest-local-charms" + revno: 511 +relations: + - - "charmworld:essearch" + - "elasticsearch:essearch" + - - "charmworld:database" + - "mongodb:database" +series: precise === added file 'src/gopkg.in/juju/jujusvg.v1/examples/charmworld.yaml' --- src/gopkg.in/juju/jujusvg.v1/examples/charmworld.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/examples/charmworld.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,32 @@ +services: + mongodb: + charm: "cs:precise/mongodb-21" + num_units: 1 + annotations: + "gui-x": "940.5" + "gui-y": "388.7698359714502" + constraints: "mem=2G cpu-cores=1" + elasticsearch: + charm: "cs:~charming-devs/precise/elasticsearch-2" + num_units: 1 + annotations: + "gui-x": "490.5" + "gui-y": "369.7698359714502" + constraints: "mem=2G cpu-cores=1" + charmworld: + charm: "cs:~juju-jitsu/precise/charmworld-58" + num_units: 1 + expose: true + annotations: + "gui-x": "813.5" + "gui-y": "112.23016402854975" + options: + charm_import_limit: -1 + source: "lp:~bac/charmworld/ingest-local-charms" + revno: 511 +relations: + - - "charmworld:essearch" + - "elasticsearch:essearch" + - - "charmworld:database" + - "mongodb:database" +series: precise === added file 'src/gopkg.in/juju/jujusvg.v1/examples/generatesvg.go' --- src/gopkg.in/juju/jujusvg.v1/examples/generatesvg.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/examples/generatesvg.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,56 @@ +package main + +// This is a demo application that uses the jujusvg library to build a bundle SVG +// from a given bundle.yaml file. + +import ( + "io/ioutil" + "log" + "os" + "strings" + + "gopkg.in/juju/charm.v6-unstable" + + // Import the jujusvg library and the juju charm library + "gopkg.in/juju/jujusvg.v1" +) + +// iconURL takes a reference to a charm and returns the URL for that charm's icon. +// In this case, we're using the api.jujucharms.com API to provide the icon's URL. +func iconURL(ref *charm.URL) string { + return "https://api.jujucharms.com/charmstore/v4/" + ref.Path() + "/icon.svg" +} + +func main() { + if len(os.Args) != 2 { + log.Fatalf("Please provide the name of a bundle file as the first argument") + } + + // First, we need to read our bundle data into a []byte + bundle_data, err := ioutil.ReadFile(os.Args[1]) + if err != nil { + log.Fatalf("Error reading bundle: %s\n", err) + } + + // Next, generate a charm.Bundle from the bytearray by passing it to ReadNewBundleData. + // This gives us an in-memory object representation of the bundle that we can pass to jujusvg + bundle, err := charm.ReadBundleData(strings.NewReader(string(bundle_data))) + if err != nil { + log.Fatalf("Error parsing bundle: %s\n", err) + } + + fetcher := &jujusvg.HTTPFetcher{ + IconURL: iconURL, + } + // Next, build a canvas of the bundle. This is a simplified version of a charm.Bundle + // that contains just the position information and charm icon URLs necessary to build + // the SVG representation of the bundle + canvas, err := jujusvg.NewFromBundle(bundle, iconURL, fetcher) + if err != nil { + log.Fatalf("Error generating canvas: %s\n", err) + } + + // Finally, marshal that canvas as SVG to os.Stdout; this will print the SVG data + // required to generate an image of the bundle. + canvas.Marshal(os.Stdout) +} === added file 'src/gopkg.in/juju/jujusvg.v1/examples/kubernetes-bundle.yaml' --- src/gopkg.in/juju/jujusvg.v1/examples/kubernetes-bundle.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/examples/kubernetes-bundle.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,44 @@ +services: + "kubernetes-master": + charm: cs:~kubernetes/trusty/kubernetes-master-5 + annotations: + "gui-x": "600" + "gui-y": "0" + expose: true + docker: + charm: cs:trusty/docker-2 + num_units: 2 + annotations: + "gui-x": "0" + "gui-y": "0" + flannel-docker: + charm: cs:trusty/flannel-docker-5 + annotations: + "gui-x": "0" + "gui-y": "300" + kubernetes: + charm: cs:~kubernetes/trusty/kubernetes-5 + annotations: + "gui-x": "300" + "gui-y": "300" + etcd: + charm: cs:~kubernetes/trusty/etcd-2 + annotations: + "gui-x": "300" + "gui-y": "0" +relations: + - - "flannel-docker:network" + - "docker:network" + - - "flannel-docker:docker-host" + - "docker:juju-info" + - - "flannel-docker:db" + - "etcd:client" + - - "kubernetes:docker-host" + - "docker:juju-info" + - - "etcd:client" + - "kubernetes:etcd" + - - "etcd:client" + - "kubernetes-master:etcd" + - - "kubernetes-master:minions-api" + - "kubernetes:api" +series: trusty === added file 'src/gopkg.in/juju/jujusvg.v1/examples/mediawiki-scalable.yaml' --- src/gopkg.in/juju/jujusvg.v1/examples/mediawiki-scalable.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/examples/mediawiki-scalable.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,114 @@ +services: + haproxy: + charm: cs:precise/haproxy-35 + num_units: 1 + options: + default_log: global + default_mode: http + default_options: httplog, dontlognull + default_retries: 3 + default_timeouts: queue 20000, client 50000, connect 5000, server 50000 + enable_monitoring: false + global_debug: false + global_group: haproxy + global_log: 127.0.0.1 local0, 127.0.0.1 local1 notice + global_maxconn: 4096 + global_quiet: false + global_spread_checks: 0 + global_user: haproxy + monitoring_allowed_cidr: 127.0.0.1/32 + monitoring_password: changeme + monitoring_port: 10000 + monitoring_stats_refresh: 3 + monitoring_username: haproxy + nagios_context: juju + package_status: install + services: "- service_name: haproxy_service\n service_host: \"0.0.0.0\"\n service_port: + 80\n service_options: [balance leastconn]\n server_options: maxconn 100\n" + sysctl: "" + annotations: + gui-x: "619" + gui-y: "-406" + mediawiki: + charm: cs:precise/mediawiki-10 + num_units: 1 + options: + debug: false + name: Please set name of wiki + skin: vector + annotations: + gui-x: "618" + gui-y: "-128" + memcached: + charm: cs:precise/memcached-7 + num_units: 1 + options: + connection-limit: 1024 + disable-auto-cleanup: "no" + disable-cas: "no" + disable-large-pages: "no" + extra-options: "" + factor: 1.25 + min-item-size: -1 + nagios_context: juju + request-limit: -1 + size: 768 + slab-page-size: -1 + tcp-port: 11211 + threads: -1 + udp-port: 0 + annotations: + gui-x: "926" + gui-y: "-125" + mysql: + charm: cs:precise/mysql-28 + num_units: 1 + options: + binlog-format: MIXED + block-size: 5 + dataset-size: 80% + flavor: distro + ha-bindiface: eth0 + ha-mcastport: 5411 + max-connections: -1 + preferred-storage-engine: InnoDB + query-cache-size: -1 + query-cache-type: "OFF" + rbd-name: mysql1 + tuning-level: safest + vip_cidr: 24 + vip_iface: eth0 + annotations: + gui-x: "926" + gui-y: "123" + mysql-slave: + charm: cs:precise/mysql-28 + num_units: 1 + options: + binlog-format: MIXED + block-size: 5 + dataset-size: 80% + flavor: distro + ha-bindiface: eth0 + ha-mcastport: 5411 + max-connections: -1 + preferred-storage-engine: InnoDB + query-cache-size: -1 + query-cache-type: "OFF" + rbd-name: mysql1 + tuning-level: safest + vip_cidr: 24 + vip_iface: eth0 + annotations: + gui-x: "619" + gui-y: "124" +series: precise +relations: +- - mediawiki:cache + - memcached:cache +- - haproxy:reverseproxy + - mediawiki:website +- - mysql-slave:slave + - mysql:master +- - mediawiki:slave + - mysql-slave:db === added file 'src/gopkg.in/juju/jujusvg.v1/examples/openstack.yaml' --- src/gopkg.in/juju/jujusvg.v1/examples/openstack.yaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/examples/openstack.yaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,268 @@ +services: + mongodb: + charm: "cs:precise/mongodb-36" + num_units: 1 + constraints: mem=1G + annotations: + "gui-x": "639.4860908103093" + "gui-y": "636.380460366218" + ceilometer: + charm: "cs:precise/ceilometer-22" + num_units: 1 + constraints: mem=1G + annotations: + "gui-x": "350.1477364318532" + "gui-y": "922.7442622575415" + ceph: + charm: "cs:precise/ceph-27" + num_units: 3 + options: + "ephemeral-unmount": /mnt + fsid: "6547bd3e-1397-11e2-82e5-53567c8d32dc" + "monitor-secret": AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ== + "osd-reformat": "yes" + constraints: mem=1G + annotations: + "gui-x": "-20.643093692706657" + "gui-y": "51.01664830551104" + cinder: + charm: "cs:precise/cinder-27" + num_units: 1 + options: + "block-device": None + annotations: + "gui-x": "194.05177718665982" + "gui-y": "266.145686795207" + keystone: + charm: "cs:precise/keystone-38" + num_units: 1 + options: + "admin-password": openstack + "admin-token": ubuntutesting + constraints: mem=1G + annotations: + "gui-x": "-4.963964429345083" + "gui-y": "468.638517006843" + heat: + charm: "cs:precise/heat-4" + num_units: 1 + constraints: mem=1G + annotations: + "gui-x": "94.05177718665982" + "gui-y": "748.5507197770728" + "ceilometer-agent": + charm: "cs:precise/ceilometer-agent-19" + num_units: 0 + annotations: + "gui-x": "309.1965280494909" + "gui-y": "552.5094886865305" + mysql: + charm: "cs:precise/mysql-48" + num_units: 1 + options: + "dataset-size": "50%" + constraints: mem=1G + annotations: + "gui-x": "644.9534429914997" + "gui-y": "909.841064015354" + "nova-cloud-controller": + charm: "cs:precise/nova-cloud-controller-43" + num_units: 1 + options: + "network-manager": Neutron + "quantum-security-groups": "yes" + constraints: mem=1G + annotations: + "gui-x": "1534.2916447820653" + "gui-y": "336.380460366218" + "neutron-gateway": + charm: "cs:precise/quantum-gateway-21" + num_units: 1 + constraints: mem=1G + annotations: + "gui-x": "1051.4050421125935" + "gui-y": "95.17794387528511" + "nova-compute": + charm: "cs:precise/nova-compute-35" + num_units: 3 + options: + "config-flags": auto_assign_floating_ip=False + constraints: mem=4G + annotations: + "gui-x": "407.14071845761987" + "gui-y": "-100.96660846127398" + ntp: + charm: "cs:precise/ntp-3" + num_units: 0 + annotations: + "gui-x": "1404.9864510314626" + "gui-y": "-79.0155986051837" + glance: + charm: "cs:precise/glance-34" + num_units: 1 + constraints: mem=1G + annotations: + "gui-x": "829.8086616110905" + "gui-y": "-123.44393373014978" + "openstack-dashboard": + charm: "cs:precise/openstack-dashboard-20" + num_units: 1 + constraints: mem=1G + annotations: + "gui-x": "998.0178961164997" + "gui-y": "850.0023188981665" + "rabbitmq-server": + charm: "cs:precise/rabbitmq-server-33" + num_units: 1 + constraints: mem=1G + annotations: + "gui-x": "1078.6630377180622" + "gui-y": "495.1636348161353" + "swift-storage-z1": + charm: "cs:precise/swift-storage-22" + num_units: 1 + options: + "block-device": vdb + overwrite: "true" + constraints: mem=1G + annotations: + "gui-x": "1285.1146978743122" + "gui-y": "808.0668330583228" + "swift-storage-z2": + charm: "cs:precise/swift-storage-22" + num_units: 1 + options: + "block-device": vdb + overwrite: "true" + zone: 2 + constraints: mem=1G + annotations: + "gui-x": "1620.5985845930622" + "gui-y": "688.7120356950415" + "swift-storage-z3": + charm: "cs:precise/swift-storage-22" + num_units: 1 + options: + "block-device": vdb + overwrite: "true" + zone: 3 + constraints: mem=1G + annotations: + "gui-x": "1791.5662359602497" + "gui-y": "337.0991511735572" + "swift-proxy": + charm: "cs:precise/swift-proxy-34" + num_units: 1 + options: + "swift-hash": "fdfef9d4-8b06-11e2-8ac0-531c923c8fae" + "use-https": "no" + constraints: mem=1G + annotations: + "gui-x": "1791.5663580305622" + "gui-y": "53.22814134689702" +relations: + - - "ntp:juju-info" + - "heat:juju-info" + - - "ntp:juju-info" + - "openstack-dashboard:juju-info" + - - "ntp:juju-info" + - "mysql:juju-info" + - - "ntp:juju-info" + - "rabbitmq-server:juju-info" + - - "ntp:juju-info" + - "mongodb:juju-info" + - - "ntp:juju-info" + - "ceilometer:juju-info" + - - "ntp:juju-info" + - "swift-storage-z3:juju-info" + - - "ntp:juju-info" + - "swift-storage-z2:juju-info" + - - "ntp:juju-info" + - "swift-storage-z1:juju-info" + - - "ntp:juju-info" + - "swift-proxy:juju-info" + - - "ntp:juju-info" + - "glance:juju-info" + - - "ntp:juju-info" + - "keystone:juju-info" + - - "keystone:shared-db" + - "mysql:shared-db" + - - "nova-cloud-controller:shared-db" + - "mysql:shared-db" + - - "nova-cloud-controller:amqp" + - "rabbitmq-server:amqp" + - - "nova-cloud-controller:image-service" + - "glance:image-service" + - - "nova-cloud-controller:identity-service" + - "keystone:identity-service" + - - "nova-cloud-controller:cloud-compute" + - "nova-compute:cloud-compute" + - - "nova-compute:shared-db" + - "mysql:shared-db" + - - "nova-compute:amqp" + - "rabbitmq-server:amqp" + - - "nova-compute:image-service" + - "glance:image-service" + - - "nova-compute:ceph" + - "ceph:client" + - - "glance:shared-db" + - "mysql:shared-db" + - - "glance:identity-service" + - "keystone:identity-service" + - - "glance:ceph" + - "ceph:client" + - - "cinder:image-service" + - "glance:image-service" + - - "cinder:shared-db" + - "mysql:shared-db" + - - "cinder:amqp" + - "rabbitmq-server:amqp" + - - "nova-cloud-controller:cinder-volume-service" + - "cinder:cinder-volume-service" + - - "cinder:identity-service" + - "keystone:identity-service" + - - "cinder:ceph" + - "ceph:client" + - - "neutron-gateway:shared-db" + - "mysql:shared-db" + - - "neutron-gateway:amqp" + - "rabbitmq-server:amqp" + - - "nova-cloud-controller:quantum-network-service" + - "neutron-gateway:quantum-network-service" + - - "openstack-dashboard:identity-service" + - "keystone:identity-service" + - - "swift-proxy:identity-service" + - "keystone:identity-service" + - - "swift-proxy:swift-storage" + - "swift-storage-z1:swift-storage" + - - "swift-proxy:swift-storage" + - "swift-storage-z2:swift-storage" + - - "swift-proxy:swift-storage" + - "swift-storage-z3:swift-storage" + - - "ceilometer:identity-service" + - "keystone:identity-service" + - - "ceilometer:amqp" + - "rabbitmq-server:amqp" + - - "ceilometer:shared-db" + - "mongodb:database" + - - "nova-compute:nova-ceilometer" + - "ceilometer-agent:nova-ceilometer" + - - "ceilometer-agent:ceilometer-service" + - "ceilometer:ceilometer-service" + - - "heat:identity-service" + - "keystone:identity-service" + - - "heat:shared-db" + - "mysql:shared-db" + - - "heat:amqp" + - "rabbitmq-server:amqp" + - - "ntp:juju-info" + - "nova-compute:juju-info" + - - "ntp:juju-info" + - "nova-cloud-controller:juju-info" + - - "ntp:juju-info" + - "neutron-gateway:juju-info" + - - "ntp:juju-info" + - "ceph:juju-info" + - - "ntp:juju-info" + - "cinder:juju-info" +series: precise === added file 'src/gopkg.in/juju/jujusvg.v1/hull.go' --- src/gopkg.in/juju/jujusvg.v1/hull.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/hull.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,105 @@ +package jujusvg + +import ( + "image" + "math" + "sort" +) + +// getPointOutside returns a point that is outside the hull of existing placed +// vertices so that an object can be placed on the canvas without overlapping +// others. +func getPointOutside(vertices []image.Point, padding image.Point) image.Point { + // Shortcut some easy solutions. + switch len(vertices) { + case 0: + return image.Point{0, 0} + case 1: + return image.Point{ + vertices[0].X + padding.X, + vertices[0].Y + padding.Y, + } + case 2: + return image.Point{ + int(math.Max(float64(vertices[0].X), float64(vertices[1].X))) + padding.X, + int(math.Max(float64(vertices[0].Y), float64(vertices[1].Y))) + padding.Y, + } + } + hull := convexHull(vertices) + // Find point that is the furthest to the right on the hull. + var rightmost image.Point + maxDistance := 0.0 + for _, vertex := range hull { + fromOrigin := line{p0: vertex, p1: image.Point{0, 0}} + distance := fromOrigin.length() + if math.Abs(distance) > maxDistance { + maxDistance = math.Abs(distance) + rightmost = vertex + } + } + return image.Point{ + rightmost.X + padding.X, + rightmost.Y + padding.Y, + } +} + +// vertexSet implements sort.Interface for image.Point, sorting first by X, then +// by Y +type vertexSet []image.Point + +func (vs vertexSet) Len() int { return len(vs) } +func (vs vertexSet) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } +func (vs vertexSet) Less(i, j int) bool { + if vs[i].X == vs[j].X { + return vs[i].Y < vs[j].Y + } + return vs[i].X < vs[j].X +} + +// convexHull takes a list of vertices and returns the set of vertices which +// make up the convex hull encapsulating all vertices on a plane. +func convexHull(vertices []image.Point) []image.Point { + // Simple cases can be shortcutted. + if len(vertices) == 0 { + return []image.Point{ + {0, 0}, + } + } + // For our purposes, we can assume that three vertices form a hull. + if len(vertices) < 4 { + return vertices + } + + sort.Sort(vertexSet(vertices)) + var lower, upper []image.Point + for _, vertex := range vertices { + for len(lower) >= 2 && cross(lower[len(lower)-2], lower[len(lower)-1], vertex) <= 0 { + lower = lower[:len(lower)-1] + } + lower = append(lower, vertex) + } + + for _, vertex := range reverse(vertices) { + for len(upper) >= 2 && cross(upper[len(upper)-2], upper[len(upper)-1], vertex) <= 0 { + upper = upper[:len(upper)-1] + } + upper = append(upper, vertex) + } + return append(lower[:len(lower)-1], upper[:len(upper)-1]...) +} + +// cross finds the 2D cross-product of OA and OB vectors. +// Returns a positive value if OAB makes a counter-clockwise turn, a negative +// value if OAB makes a clockwise turn, and zero if the points are collinear. +func cross(o, a, b image.Point) int { + return (a.X-o.X)*(b.Y-o.Y) - (a.Y-o.Y)*(b.X-o.X) +} + +// reverse reverses a slice of Points for use in finding the upper hull. +func reverse(vertices []image.Point) []image.Point { + for i := 0; i < len(vertices)/2; i++ { + opp := len(vertices) - (i + 1) + vertices[i], vertices[opp] = vertices[opp], vertices[i] + } + return vertices +} === added file 'src/gopkg.in/juju/jujusvg.v1/hull_test.go' --- src/gopkg.in/juju/jujusvg.v1/hull_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/hull_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,77 @@ +package jujusvg + +import ( + "image" + + gc "gopkg.in/check.v1" +) + +type HullSuite struct{} + +var _ = gc.Suite(&HullSuite{}) + +func (s *HullSuite) TestGetPointOutside(c *gc.C) { + var tests = []struct { + about string + vertices []image.Point + expected image.Point + }{ + { + about: "zero vertices", + vertices: []image.Point{}, + expected: image.Point{0, 0}, + }, + { + about: "one vertex", + vertices: []image.Point{{0, 0}}, + expected: image.Point{10, 10}, + }, + { + about: "two vertices", + vertices: []image.Point{{0, 0}, {10, 10}}, + expected: image.Point{20, 20}, + }, + { + about: "three vertices (convexHull fall through)", + vertices: []image.Point{{0, 0}, {0, 10}, {10, 0}}, + expected: image.Point{10, 20}, + }, + { + about: "four vertices", + vertices: []image.Point{{0, 0}, {0, 10}, {10, 0}, {10, 10}}, + expected: image.Point{20, 20}, + }, + } + for _, test := range tests { + c.Log(test.about) + c.Assert(getPointOutside(test.vertices, image.Point{10, 10}), gc.Equals, test.expected) + } +} + +func (s *HullSuite) TestConvexHull(c *gc.C) { + // Zero vertices + vertices := []image.Point{} + c.Assert(convexHull(vertices), gc.DeepEquals, []image.Point{{0, 0}}) + + // Identities + vertices = []image.Point{{1, 1}} + c.Assert(convexHull(vertices), gc.DeepEquals, vertices) + + vertices = []image.Point{{1, 1}, {2, 2}} + c.Assert(convexHull(vertices), gc.DeepEquals, vertices) + + vertices = []image.Point{{1, 1}, {2, 2}, {1, 2}} + c.Assert(convexHull(vertices), gc.DeepEquals, vertices) + + // > 3 vertices + vertices = []image.Point{} + for i := 0; i < 100; i++ { + vertices = append(vertices, image.Point{i / 10, i % 10}) + } + c.Assert(convexHull(vertices), gc.DeepEquals, []image.Point{ + {0, 0}, + {9, 0}, + {9, 9}, + {0, 9}, + }) +} === added file 'src/gopkg.in/juju/jujusvg.v1/iconfetcher.go' --- src/gopkg.in/juju/jujusvg.v1/iconfetcher.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/iconfetcher.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,139 @@ +package jujusvg + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "sync" + + "github.com/juju/utils/parallel" + "github.com/juju/xml" + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" +) + +// An IconFetcher provides functionality for retrieving icons for the charms +// within a given bundle. The FetchIcons function accepts a bundle, and +// returns a map from charm paths to icon data. +type IconFetcher interface { + FetchIcons(*charm.BundleData) (map[string][]byte, error) +} + +// LinkFetcher fetches icons as links so that they are included within the SVG +// as remote resources using SVG tags. +type LinkFetcher struct { + // IconURL returns the URL of the entity for embedding + IconURL func(*charm.URL) string +} + +// FetchIcons generates the svg image tags given an appropriate URL, generating +// tags only for unique icons. +func (l *LinkFetcher) FetchIcons(b *charm.BundleData) (map[string][]byte, error) { + // Maintain a list of icons that have already been fetched. + alreadyFetched := make(map[string]bool) + + // Build the map of icons. + icons := make(map[string][]byte) + for _, serviceData := range b.Services { + charmId, err := charm.ParseURL(serviceData.Charm) + if err != nil { + return nil, errgo.Notef(err, "cannot parse charm %q", serviceData.Charm) + } + path := charmId.Path() + + // Don't duplicate icons in the map. + if !alreadyFetched[path] { + alreadyFetched[path] = true + icons[path] = []byte(fmt.Sprintf(` + + + `, escapeString(l.IconURL(charmId)))) + } + } + return icons, nil +} + +// Wrap around xml.EscapeText to make it more string-friendly. +func escapeString(s string) string { + var buf bytes.Buffer + xml.EscapeText(&buf, []byte(s)) + return buf.String() +} + +// HTTPFetcher is an implementation of IconFetcher which retrieves charm +// icons from the web using the URL generated by IconURL on that charm. The +// HTTP Client used may be overridden by an instance of http.Client. The icons +// may optionally be fetched concurrently. +type HTTPFetcher struct { + // Concurrency specifies the number of GoRoutines to use when fetching + // icons. If it is not positive, 10 will be used. Setting this to 1 + // makes this method nominally synchronous. + Concurrency int + + // IconURL returns the URL from which to fetch the given entity's icon SVG. + IconURL func(*charm.URL) string + + // Client specifies what HTTP client to use; if it is not provided, + // http.DefaultClient will be used. + Client *http.Client +} + +// FetchIcons retrieves icon SVGs over HTTP. If specified in the struct, icons +// will be fetched concurrently. +func (h *HTTPFetcher) FetchIcons(b *charm.BundleData) (map[string][]byte, error) { + client := http.DefaultClient + if h.Client != nil { + client = h.Client + } + concurrency := h.Concurrency + if concurrency <= 0 { + concurrency = 10 + } + var iconsMu sync.Mutex // Guards icons. + icons := make(map[string][]byte) + alreadyFetched := make(map[string]bool) + run := parallel.NewRun(concurrency) + for _, serviceData := range b.Services { + charmId, err := charm.ParseURL(serviceData.Charm) + if err != nil { + return nil, errgo.Notef(err, "cannot parse charm %q", serviceData.Charm) + } + path := charmId.Path() + if alreadyFetched[path] { + continue + } + alreadyFetched[path] = true + run.Do(func() error { + icon, err := h.fetchIcon(h.IconURL(charmId), client) + if err != nil { + return err + } + iconsMu.Lock() + defer iconsMu.Unlock() + icons[path] = icon + return nil + }) + } + if err := run.Wait(); err != nil { + return nil, err + } + return icons, nil +} + +// fetchIcon retrieves a single icon svg over HTTP. +func (h *HTTPFetcher) fetchIcon(url string, client *http.Client) ([]byte, error) { + resp, err := client.Get(url) + if err != nil { + return nil, errgo.Notef(err, "HTTP error fetching %s: %v", url, err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, errgo.Newf("cannot retrieve icon from %s: %s", url, resp.Status) + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errgo.Notef(err, "could not read icon data from url %s", url) + } + return body, nil +} === added file 'src/gopkg.in/juju/jujusvg.v1/iconfetcher_test.go' --- src/gopkg.in/juju/jujusvg.v1/iconfetcher_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/iconfetcher_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,119 @@ +package jujusvg + +import ( + "fmt" + "net/http" + "net/http/httptest" + "strings" + + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" +) + +type IconFetcherSuite struct{} + +var _ = gc.Suite(&IconFetcherSuite{}) + +func (s *IconFetcherSuite) TestLinkFetchIcons(c *gc.C) { + tests := map[string][]byte{ + "~charming-devs/precise/elasticsearch-2": []byte(` + + + `), + "~juju-jitsu/precise/charmworld-58": []byte(` + + + `), + "precise/mongodb-21": []byte(` + + + `), + } + iconURL := func(ref *charm.URL) string { + return "/" + ref.Path() + ".svg" + } + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + err = b.Verify(nil, nil) + c.Assert(err, gc.IsNil) + fetcher := LinkFetcher{ + IconURL: iconURL, + } + iconMap, err := fetcher.FetchIcons(b) + c.Assert(err, gc.IsNil) + for charm, link := range tests { + assertXMLEqual(c, []byte(iconMap[charm]), []byte(link)) + } +} + +func (s *IconFetcherSuite) TestHTTPFetchIcons(c *gc.C) { + fetchCount := 0 + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fetchCount++ + fmt.Fprintln(w, fmt.Sprintf("%s", r.URL.Path)) + })) + defer ts.Close() + + tsIconURL := func(ref *charm.URL) string { + return ts.URL + "/" + ref.Path() + ".svg" + } + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + err = b.Verify(nil, nil) + c.Assert(err, gc.IsNil) + // Only one copy of precise/mongodb-21 + b.Services["duplicateService"] = &charm.ServiceSpec{ + Charm: "cs:precise/mongodb-21", + NumUnits: 1, + } + fetcher := HTTPFetcher{ + Concurrency: 1, + IconURL: tsIconURL, + } + iconMap, err := fetcher.FetchIcons(b) + c.Assert(err, gc.IsNil) + c.Assert(iconMap, gc.DeepEquals, map[string][]byte{ + "~charming-devs/precise/elasticsearch-2": []byte("/~charming-devs/precise/elasticsearch-2.svg\n"), + "~juju-jitsu/precise/charmworld-58": []byte("/~juju-jitsu/precise/charmworld-58.svg\n"), + "precise/mongodb-21": []byte("/precise/mongodb-21.svg\n"), + }) + + fetcher.Concurrency = 10 + iconMap, err = fetcher.FetchIcons(b) + c.Assert(err, gc.IsNil) + c.Assert(iconMap, gc.DeepEquals, map[string][]byte{ + "~charming-devs/precise/elasticsearch-2": []byte("/~charming-devs/precise/elasticsearch-2.svg\n"), + "~juju-jitsu/precise/charmworld-58": []byte("/~juju-jitsu/precise/charmworld-58.svg\n"), + "precise/mongodb-21": []byte("/precise/mongodb-21.svg\n"), + }) + c.Assert(fetchCount, gc.Equals, 6) +} + +func (s *IconFetcherSuite) TestHTTPBadIconURL(c *gc.C) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "bad-wolf", http.StatusForbidden) + return + })) + defer ts.Close() + + tsIconURL := func(ref *charm.URL) string { + return ts.URL + "/" + ref.Path() + ".svg" + } + + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + err = b.Verify(nil, nil) + c.Assert(err, gc.IsNil) + fetcher := HTTPFetcher{ + Concurrency: 1, + IconURL: tsIconURL, + } + iconMap, err := fetcher.FetchIcons(b) + c.Assert(err, gc.ErrorMatches, fmt.Sprintf("cannot retrieve icon from %s.+\\.svg: 403 Forbidden.*", ts.URL)) + c.Assert(iconMap, gc.IsNil) + + fetcher.Concurrency = 10 + iconMap, err = fetcher.FetchIcons(b) + c.Assert(err, gc.ErrorMatches, fmt.Sprintf("cannot retrieve icon from %s.+\\.svg: 403 Forbidden.*", ts.URL)) + c.Assert(iconMap, gc.IsNil) +} === added file 'src/gopkg.in/juju/jujusvg.v1/jujusvg.go' --- src/gopkg.in/juju/jujusvg.v1/jujusvg.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/jujusvg.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,99 @@ +package jujusvg // import "gopkg.in/juju/jujusvg.v1" + +import ( + "image" + "math" + "sort" + "strconv" + "strings" + + "gopkg.in/errgo.v1" + "gopkg.in/juju/charm.v6-unstable" +) + +// NewFromBundle returns a new Canvas that can be used +// to generate a graphical representation of the given bundle +// data. The iconURL function is used to generate a URL +// that refers to an SVG for the supplied charm URL. +// If fetcher is non-nil, it will be used to fetch icon +// contents for any icons embedded within the charm, +// allowing the generated bundle to be self-contained. If fetcher +// is nil, a default fetcher which refers to icons by their +// URLs as svg tags will be used. +func NewFromBundle(b *charm.BundleData, iconURL func(*charm.URL) string, fetcher IconFetcher) (*Canvas, error) { + if fetcher == nil { + fetcher = &LinkFetcher{ + IconURL: iconURL, + } + } + iconMap, err := fetcher.FetchIcons(b) + if err != nil { + return nil, err + } + + var canvas Canvas + + // Verify the bundle to make sure that all the invariants + // that we depend on below actually hold true. + if err := b.Verify(nil, nil); err != nil { + return nil, errgo.Notef(err, "cannot verify bundle") + } + // Go through all services in alphabetical order so that + // we get consistent results. + serviceNames := make([]string, 0, len(b.Services)) + for name := range b.Services { + serviceNames = append(serviceNames, name) + } + sort.Strings(serviceNames) + services := make(map[string]*service) + servicesNeedingPlacement := make(map[string]bool) + for _, name := range serviceNames { + serviceData := b.Services[name] + x, xerr := strconv.ParseFloat(serviceData.Annotations["gui-x"], 64) + y, yerr := strconv.ParseFloat(serviceData.Annotations["gui-y"], 64) + if xerr != nil || yerr != nil { + if serviceData.Annotations["gui-x"] == "" && serviceData.Annotations["gui-y"] == "" { + servicesNeedingPlacement[name] = true + x = 0 + y = 0 + } else { + return nil, errgo.Newf("service %q does not have a valid position", name) + } + } + charmID, err := charm.ParseURL(serviceData.Charm) + if err != nil { + // cannot actually happen, as we've verified it. + return nil, errgo.Notef(err, "cannot parse charm %q", serviceData.Charm) + } + icon := iconMap[charmID.Path()] + svc := &service{ + name: name, + charmPath: charmID.Path(), + point: image.Point{int(x), int(y)}, + iconUrl: iconURL(charmID), + iconSrc: icon, + } + services[name] = svc + } + padding := image.Point{int(math.Floor(serviceBlockSize * 1.5)), int(math.Floor(serviceBlockSize * 0.5))} + for name := range servicesNeedingPlacement { + vertices := []image.Point{} + for n, svc := range services { + if !servicesNeedingPlacement[n] { + vertices = append(vertices, svc.point) + } + } + services[name].point = getPointOutside(vertices, padding) + servicesNeedingPlacement[name] = false + } + for _, name := range serviceNames { + canvas.addService(services[name]) + } + for _, relation := range b.Relations { + canvas.addRelation(&serviceRelation{ + serviceA: services[strings.Split(relation[0], ":")[0]], + serviceB: services[strings.Split(relation[1], ":")[0]], + }) + } + return &canvas, nil +} === added file 'src/gopkg.in/juju/jujusvg.v1/jujusvg_test.go' --- src/gopkg.in/juju/jujusvg.v1/jujusvg_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/jujusvg_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,395 @@ +package jujusvg + +import ( + "bytes" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + + gc "gopkg.in/check.v1" + "gopkg.in/juju/charm.v6-unstable" + + "gopkg.in/juju/jujusvg.v1/assets" +) + +func Test(t *testing.T) { gc.TestingT(t) } + +type newSuite struct{} + +var _ = gc.Suite(&newSuite{}) + +var bundle = ` +services: + mongodb: + charm: "cs:precise/mongodb-21" + num_units: 1 + annotations: + "gui-x": "940.5" + "gui-y": "388.7698359714502" + constraints: "mem=2G cpu-cores=1" + elasticsearch: + charm: "cs:~charming-devs/precise/elasticsearch-2" + num_units: 1 + annotations: + "gui-x": "490.5" + "gui-y": "369.7698359714502" + constraints: "mem=2G cpu-cores=1" + charmworld: + charm: "cs:~juju-jitsu/precise/charmworld-58" + num_units: 1 + expose: true + annotations: + "gui-x": "813.5" + "gui-y": "112.23016402854975" + options: + charm_import_limit: -1 + source: "lp:~bac/charmworld/ingest-local-charms" + revno: 511 +relations: + - - "charmworld:essearch" + - "elasticsearch:essearch" + - - "charmworld:database" + - "mongodb:database" +series: precise +` + +func iconURL(ref *charm.URL) string { + return "http://0.1.2.3/" + ref.Path() + ".svg" +} + +type emptyFetcher struct{} + +func (f *emptyFetcher) FetchIcons(*charm.BundleData) (map[string][]byte, error) { + return nil, nil +} + +type errFetcher string + +func (f *errFetcher) FetchIcons(*charm.BundleData) (map[string][]byte, error) { + return nil, fmt.Errorf("%s", *f) +} + +func (s *newSuite) TestNewFromBundle(c *gc.C) { + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + err = b.Verify(nil, nil) + c.Assert(err, gc.IsNil) + + cvs, err := NewFromBundle(b, iconURL, nil) + c.Assert(err, gc.IsNil) + + var buf bytes.Buffer + cvs.Marshal(&buf) + c.Logf("%s", buf.String()) + assertXMLEqual(c, buf.Bytes(), []byte(` + + + + +`+assets.ServiceModule+` + + + + + + + + + + + + + + + + + + + + + + + + + +charmworld + + + + +elasticsearch + + + + +mongodb + + + +`)) +} + +func (s *newSuite) TestNewFromBundleWithUnplacedService(c *gc.C) { + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + err = b.Verify(nil, nil) + c.Assert(err, gc.IsNil) + b.Services["charmworld"].Annotations["gui-x"] = "" + b.Services["charmworld"].Annotations["gui-y"] = "" + + cvs, err := NewFromBundle(b, iconURL, nil) + c.Assert(err, gc.IsNil) + + var buf bytes.Buffer + cvs.Marshal(&buf) + c.Logf("%s", buf.String()) + assertXMLEqual(c, buf.Bytes(), []byte(` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +charmworld + + + + +elasticsearch + + + + +mongodb + + + +`)) +} + +func (s *newSuite) TestWithFetcher(c *gc.C) { + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + err = b.Verify(nil, nil) + c.Assert(err, gc.IsNil) + + cvs, err := NewFromBundle(b, iconURL, new(emptyFetcher)) + c.Assert(err, gc.IsNil) + + var buf bytes.Buffer + cvs.Marshal(&buf) + c.Logf("%s", buf.String()) + assertXMLEqual(c, buf.Bytes(), []byte(` + + + + +`+assets.ServiceModule+` + + + + + + + + + + + + + + + + +charmworld + + + + +elasticsearch + + + + +mongodb + + + +`)) +} + +func (s *newSuite) TestDefaultHTTPFetcher(c *gc.C) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "") + })) + defer ts.Close() + + tsIconUrl := func(ref *charm.URL) string { + return ts.URL + "/" + ref.Path() + ".svg" + } + + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + err = b.Verify(nil, nil) + c.Assert(err, gc.IsNil) + + cvs, err := NewFromBundle(b, tsIconUrl, &HTTPFetcher{IconURL: tsIconUrl}) + c.Assert(err, gc.IsNil) + + var buf bytes.Buffer + cvs.Marshal(&buf) + c.Logf("%s", buf.String()) + assertXMLEqual(c, buf.Bytes(), []byte(` + + + + +`+assets.ServiceModule+` + + + + + + + + + + + + + + + + + + + +charmworld + + + + +elasticsearch + + + + +mongodb + + + +`)) + +} + +func (s *newSuite) TestFetcherError(c *gc.C) { + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + err = b.Verify(nil, nil) + c.Assert(err, gc.IsNil) + + ef := errFetcher("bad-wolf") + _, err = NewFromBundle(b, iconURL, &ef) + c.Assert(err, gc.ErrorMatches, "bad-wolf") +} + +func (s *newSuite) TestWithBadBundle(c *gc.C) { + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + b.Relations[0][0] = "evil-unknown-service" + cvs, err := NewFromBundle(b, iconURL, nil) + c.Assert(err, gc.ErrorMatches, "cannot verify bundle: .*") + c.Assert(cvs, gc.IsNil) +} + +func (s *newSuite) TestWithBadPosition(c *gc.C) { + b, err := charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + + b.Services["charmworld"].Annotations["gui-x"] = "bad" + cvs, err := NewFromBundle(b, iconURL, nil) + c.Assert(err, gc.ErrorMatches, `service "charmworld" does not have a valid position`) + c.Assert(cvs, gc.IsNil) + + b, err = charm.ReadBundleData(strings.NewReader(bundle)) + c.Assert(err, gc.IsNil) + + b.Services["charmworld"].Annotations["gui-y"] = "bad" + cvs, err = NewFromBundle(b, iconURL, nil) + c.Assert(err, gc.ErrorMatches, `service "charmworld" does not have a valid position`) + c.Assert(cvs, gc.IsNil) +} === added file 'src/gopkg.in/juju/jujusvg.v1/svg.go' --- src/gopkg.in/juju/jujusvg.v1/svg.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/svg.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,98 @@ +package jujusvg + +import ( + "io" + + "github.com/juju/xml" + "gopkg.in/errgo.v1" +) + +const svgNamespace = "http://www.w3.org/2000/svg" + +// Process an icon SVG file from a reader, removing anything surrounding +// the tags, which would be invalid in this context (such as +// decls, directives, etc), writing out to a writer. In +// addition, loosely check that the icon is a valid SVG file. The id +// argument provides a unique identifier for the icon SVG so that it can +// be referenced within the bundle diagram. If an id attribute on the SVG +// tag already exists, it will be replaced with this argument. +func processIcon(r io.Reader, w io.Writer, id string) error { + dec := xml.NewDecoder(r) + dec.DefaultSpace = svgNamespace + + enc := xml.NewEncoder(w) + + svgStartFound := false + svgEndFound := false + depth := 0 + for depth < 1 { + tok, err := dec.Token() + if err != nil { + if err == io.EOF { + break + } + return errgo.Notef(err, "cannot get token") + } + tag, ok := tok.(xml.StartElement) + if ok && tag.Name.Space == svgNamespace && tag.Name.Local == "svg" { + svgStartFound = true + depth++ + tag.Attr = setXMLAttr(tag.Attr, xml.Name{ + Local: "id", + }, id) + if err := enc.EncodeToken(tag); err != nil { + return errgo.Notef(err, "cannot encode token %#v", tag) + } + } + } + for depth > 0 { + tok, err := dec.Token() + if err != nil { + if err == io.EOF { + break + } + return errgo.Notef(err, "cannot get token") + } + switch tag := tok.(type) { + case xml.StartElement: + if tag.Name.Space == svgNamespace && tag.Name.Local == "svg" { + depth++ + } + case xml.EndElement: + if tag.Name.Space == svgNamespace && tag.Name.Local == "svg" { + depth-- + if depth == 0 { + svgEndFound = true + } + } + } + if err := enc.EncodeToken(tok); err != nil { + return errgo.Notef(err, "cannot encode token %#v", tok) + } + } + + if !svgStartFound || !svgEndFound { + return errgo.Newf("icon does not appear to be a valid SVG") + } + + if err := enc.Flush(); err != nil { + return err + } + + return nil +} + +// setXMLAttr returns the given attributes with the given attribute name set to +// val, adding an attribute if necessary. +func setXMLAttr(attrs []xml.Attr, name xml.Name, val string) []xml.Attr { + for i := range attrs { + if attrs[i].Name == name { + attrs[i].Value = val + return attrs + } + } + return append(attrs, xml.Attr{ + Name: name, + Value: val, + }) +} === added file 'src/gopkg.in/juju/jujusvg.v1/svg_test.go' --- src/gopkg.in/juju/jujusvg.v1/svg_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/juju/jujusvg.v1/svg_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,217 @@ +package jujusvg + +import ( + "bytes" + "fmt" + + "github.com/juju/xml" + gc "gopkg.in/check.v1" +) + +type SVGSuite struct{} + +var _ = gc.Suite(&SVGSuite{}) + +func (s *SVGSuite) TestProcessIcon(c *gc.C) { + tests := []struct { + about string + icon string + expected string + err string + }{ + { + about: "Nothing stripped", + icon: ` + + + + `, + expected: ` + + + `, + }, + { + about: "SVG inside an SVG", + icon: ` + + + + + + + `, + expected: ` + + + + + + `, + }, + { + about: "ProcInst at start stripped", + icon: ` + + + + + `, + expected: ` + + + `, + }, + { + about: "Directive at start stripped", + icon: ` + + + + + `, + expected: ` + + + `, + }, + { + about: "ProcInst at end stripped", + icon: ` + + + + + `, + expected: ` + + + `, + }, + { + about: "Directive at end stripped", + icon: ` + + + + + `, + expected: ` + + + `, + }, + { + about: "ProcInsts/Directives inside svg left in place", + icon: ` + + + + + + `, + expected: ` + + + + + `, + }, + { + about: "Not an SVG", + icon: ` + + bad-wolf + + `, + err: "icon does not appear to be a valid SVG", + }, + } + for i, test := range tests { + in := bytes.NewBuffer([]byte(test.icon)) + out := bytes.Buffer{} + err := processIcon(in, &out, fmt.Sprintf("test-%d", i)) + if test.err != "" { + c.Assert(err, gc.ErrorMatches, test.err) + } else { + c.Assert(err, gc.IsNil) + assertXMLEqual(c, out.Bytes(), []byte(test.expected)) + } + } +} + +func (s *SVGSuite) TestSetXMLAttr(c *gc.C) { + // Attribute is added. + expected := []xml.Attr{ + { + Name: xml.Name{ + Local: "id", + }, + Value: "foo", + }, + } + + result := setXMLAttr([]xml.Attr{}, xml.Name{ + Local: "id", + }, "foo") + c.Assert(result, gc.DeepEquals, expected) + + // Attribute is changed. + result = setXMLAttr([]xml.Attr{ + { + Name: xml.Name{ + Local: "id", + }, + Value: "bar", + }, + }, xml.Name{ + Local: "id", + }, "foo") + c.Assert(result, gc.DeepEquals, expected) + + // Attribute is changed, existing attributes unchanged. + expected = []xml.Attr{ + { + Name: xml.Name{ + Local: "class", + }, + Value: "bar", + }, + { + Name: xml.Name{ + Local: "id", + }, + Value: "foo", + }, + } + result = setXMLAttr([]xml.Attr{ + { + Name: xml.Name{ + Local: "class", + }, + Value: "bar", + }, + { + Name: xml.Name{ + Local: "id", + }, + Value: "bar", + }, + }, xml.Name{ + Local: "id", + }, "foo") + c.Assert(result, gc.DeepEquals, expected) + + // Attribute is added, existing attributes unchanged. + result = setXMLAttr([]xml.Attr{ + { + Name: xml.Name{ + Local: "class", + }, + Value: "bar", + }, + }, xml.Name{ + Local: "id", + }, "foo") + c.Assert(result, gc.DeepEquals, expected) +} === removed directory 'src/gopkg.in/macaroon-bakery.v0' === removed file 'src/gopkg.in/macaroon-bakery.v0/.gitignore' --- src/gopkg.in/macaroon-bakery.v0/.gitignore 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/.gitignore 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -*.test === removed file 'src/gopkg.in/macaroon-bakery.v0/LICENSE' --- src/gopkg.in/macaroon-bakery.v0/LICENSE 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/LICENSE 1970-01-01 00:00:00 +0000 @@ -1,187 +0,0 @@ -Copyright © 2014, Roger Peppe, Canonical Inc. - -This software is licensed under the LGPLv3, included below. - -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. - -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. - - - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. === removed file 'src/gopkg.in/macaroon-bakery.v0/README.md' --- src/gopkg.in/macaroon-bakery.v0/README.md 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/README.md 1970-01-01 00:00:00 +0000 @@ -1,10 +0,0 @@ -# The macaroon bakery - -This repository is a companion to http://github.com/go-macaroon . -It holds higher level operations for building systems with macaroons. - -For documentation, see: - -- http://godoc.org/gopkg.in/macaroon-bakery.v0/bakery -- http://godoc.org/gopkg.in/macaroon-bakery.v0/httpbakery -- http://godoc.org/gopkg.in/macaroon-bakery.v0/bakery/checkers === removed file 'src/gopkg.in/macaroon-bakery.v0/TODO' --- src/gopkg.in/macaroon-bakery.v0/TODO 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/TODO 1970-01-01 00:00:00 +0000 @@ -1,7 +0,0 @@ -all: - - when API is stable, move to gopkg.in/macaroon.v1 - -macaroon: - - - change all signature calculations to correspond exactly - with libmacaroons. === removed directory 'src/gopkg.in/macaroon-bakery.v0/bakery' === removed directory 'src/gopkg.in/macaroon-bakery.v0/bakery/checkers' === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/checkers/checkers.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/checkers/checkers.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/checkers/checkers.go 1970-01-01 00:00:00 +0000 @@ -1,237 +0,0 @@ -// The checkers package provides some standard first-party -// caveat checkers and some primitives for combining them. -package checkers - -import ( - "fmt" - "net" - "strings" - - "gopkg.in/errgo.v1" -) - -// Constants for all the standard caveat conditions. -// First and third party caveat conditions are both defined here, -// even though notionally they exist in separate name spaces. -const ( - CondDeclared = "declared" - CondTimeBefore = "time-before" - CondClientIPAddr = "client-ip-addr" - CondError = "error" - CondNeedDeclared = "need-declared" -) - -// ErrCaveatNotRecognized is the cause of errors returned -// from caveat checkers when the caveat was not -// recognized. -var ErrCaveatNotRecognized = errgo.New("caveat not recognized") - -// Caveat represents a condition that must be true for a check to -// complete successfully. If Location is non-empty, the caveat must be -// discharged by a third party at the given location. -// This differs from macaroon.Caveat in that the condition -// is not encrypted. -type Caveat struct { - Location string - Condition string -} - -// Checker is implemented by types that can check caveats. -type Checker interface { - // Condition returns the identifier of the condition - // to be checked - the Check method will be used - // to check caveats with this identifier. - // - // It may return an empty string, in which case - // it will be used to check any condition - Condition() string - - // Check checks that the given caveat holds true. - // The condition and arg are as returned - // from ParseCaveat. - // - // For a checker with an empty condition, a - // return of bakery.ErrCaveatNotRecognised from - // this method indicates that the condition was - // not recognized. - Check(cond, arg string) error -} - -// New returns a new MultiChecker that uses all the -// provided Checkers to check caveats. If several checkers return the -// same condition identifier, all of them will be used. -// -// The cause of any error returned by a checker will be preserved. -// -// Note that because the returned checker implements Checker -// as well as bakery.FirstPartyChecker, calls to New can be nested. -// For example, a checker can be easily added to an existing -// MultiChecker, by doing: -// -// checker := checkers.New(old, another) -func New(checkers ...Checker) *MultiChecker { - return &MultiChecker{ - checkers: checkers, - } -} - -// MultiChecker implements bakery.FirstPartyChecker -// and Checker for a collection of checkers. -type MultiChecker struct { - // TODO it may be faster to initialize a map, but we'd - // be paying the price of creating and initializing - // the map vs a few linear scans through a probably-small - // slice. Let's wait for some real-world numbers. - checkers []Checker -} - -var errBadCaveat = errgo.Newf("bad caveat") - -// Check implements Checker.Check. -func (c *MultiChecker) Check(cond, arg string) error { - // Always check for the error caveat so that we're - // sure to get a nice error message even when there - // are no other checkers. This also prevents someone - // from inadvertently overriding the error condition. - if cond == CondError { - return errBadCaveat - } - checked := false - for _, c := range c.checkers { - checkerCond := c.Condition() - if checkerCond != "" && checkerCond != cond { - continue - } - if err := c.Check(cond, arg); err != nil { - if checkerCond == "" && errgo.Cause(err) == ErrCaveatNotRecognized { - continue - } - return errgo.Mask(err, errgo.Any) - } - checked = true - } - if !checked { - return ErrCaveatNotRecognized - } - return nil -} - -// Condition implements Checker.Condition. -func (c *MultiChecker) Condition() string { - return "" -} - -// CheckFirstPartyCaveat implements bakery.FirstPartyChecker.CheckFirstPartyCaveat. -func (c *MultiChecker) CheckFirstPartyCaveat(cav string) error { - cond, arg, err := ParseCaveat(cav) - if err != nil { - // If we can't parse it, perhaps it's in some other format, - // return a not-recognised error. - return errgo.WithCausef(err, ErrCaveatNotRecognized, "cannot parse caveat %q", cav) - } - if err := c.Check(cond, arg); err != nil { - return errgo.NoteMask(err, fmt.Sprintf("caveat %q not satisfied", cav), errgo.Any) - } - return nil -} - -// TODO add multiChecker.CheckThirdPartyCaveat ? -// i.e. make this stuff reusable for 3rd party caveats too. - -func firstParty(cond, arg string) Caveat { - return Caveat{ - Condition: cond + " " + arg, - } -} - -// CheckerFunc implements Checker for a function. -type CheckerFunc struct { - // Condition_ holds the condition that the checker - // implements. - Condition_ string - - // Check_ holds the function to call to make the check. - Check_ func(cond, arg string) error -} - -// Condition implements Checker.Condition. -func (f CheckerFunc) Condition() string { - return f.Condition_ -} - -// Check implements Checker.Check -func (f CheckerFunc) Check(cond, arg string) error { - return f.Check_(cond, arg) -} - -// Map is a checker where the various checkers -// are specified as entries in a map, one for each -// condition. -// The cond argument passed to the function -// is always the same as its corresponding key -// in the map. -type Map map[string]func(cond string, arg string) error - -// Condition implements Checker.Condition. -func (m Map) Condition() string { - return "" -} - -// Check implements Checker.Check -func (m Map) Check(cond, arg string) error { - f, ok := m[cond] - if !ok { - return ErrCaveatNotRecognized - } - if err := f(cond, arg); err != nil { - return errgo.Mask(err, errgo.Any) - } - return nil -} - -// ParseCaveat parses a caveat into an identifier, identifying the -// checker that should be used, and the argument to the checker (the -// rest of the string). -// -// The identifier is taken from all the characters before the first -// space character. -func ParseCaveat(cav string) (cond, arg string, err error) { - if cav == "" { - return "", "", fmt.Errorf("empty caveat") - } - i := strings.IndexByte(cav, ' ') - if i < 0 { - return cav, "", nil - } - if i == 0 { - return "", "", fmt.Errorf("caveat starts with space character") - } - return cav[0:i], cav[i+1:], nil -} - -// ClientIPAddrCaveat returns a caveat that will check whether the -// client's IP address is as provided. -// Note that the checkers package provides no specific -// implementation of the checker for this - that is -// left to external transport-specific packages. -func ClientIPAddrCaveat(addr net.IP) Caveat { - if len(addr) != net.IPv4len && len(addr) != net.IPv6len { - return ErrorCaveatf("bad IP address %d", []byte(addr)) - } - return firstParty(CondClientIPAddr, addr.String()) -} - -// ErrorCaveatf returns a caveat that will never be satisfied, holding -// the given fmt.Sprintf formatted text as the text of the caveat. -// -// This should only be used for highly unusual conditions that are never -// expected to happen in practice, such as a malformed key that is -// conventionally passed as a constant. It's not a panic but you should -// only use it in cases where a panic might possibly be appropriate. -// -// This mechanism means that caveats can be created without error -// checking and a later systematic check at a higher level (in the -// bakery package) can produce an error instead. -func ErrorCaveatf(f string, a ...interface{}) Caveat { - return firstParty(CondError, fmt.Sprintf(f, a...)) -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/checkers/checkers_test.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/checkers/checkers_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/checkers/checkers_test.go 1970-01-01 00:00:00 +0000 @@ -1,420 +0,0 @@ -package checkers_test - -import ( - "fmt" - "net" - "time" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v1" - - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" -) - -type CheckersSuite struct{} - -var _ = gc.Suite(&CheckersSuite{}) - -// Freeze time for the tests. -var now = func() time.Time { - now, err := time.Parse(time.RFC3339Nano, "2006-01-02T15:04:05.123Z") - if err != nil { - panic(err) - } - *checkers.TimeNow = func() time.Time { - return now - } - return now -}() - -type checkTest struct { - caveat string - expectError string - expectCause func(err error) bool -} - -var isCaveatNotRecognized = errgo.Is(checkers.ErrCaveatNotRecognized) - -var checkerTests = []struct { - about string - checker bakery.FirstPartyChecker - checks []checkTest -}{{ - about: "empty MultiChecker", - checker: checkers.New(), - checks: []checkTest{{ - caveat: "something", - expectError: `caveat "something" not satisfied: caveat not recognized`, - expectCause: isCaveatNotRecognized, - }, { - caveat: "", - expectError: `cannot parse caveat "": empty caveat`, - expectCause: isCaveatNotRecognized, - }, { - caveat: " hello", - expectError: `cannot parse caveat " hello": caveat starts with space character`, - expectCause: isCaveatNotRecognized, - }}, -}, { - about: "MultiChecker with some values", - checker: checkers.New( - argChecker("a", "aval"), - argChecker("b", "bval"), - ), - checks: []checkTest{{ - caveat: "a aval", - }, { - caveat: "b bval", - }, { - caveat: "a wrong", - expectError: `caveat "a wrong" not satisfied: wrong arg`, - expectCause: errgo.Is(errWrongArg), - }}, -}, { - about: "MultiChecker with several of the same condition", - checker: checkers.New( - argChecker("a", "aval"), - argChecker("a", "bval"), - ), - checks: []checkTest{{ - caveat: "a aval", - expectError: `caveat "a aval" not satisfied: wrong arg`, - expectCause: errgo.Is(errWrongArg), - }, { - caveat: "a bval", - expectError: `caveat "a bval" not satisfied: wrong arg`, - expectCause: errgo.Is(errWrongArg), - }}, -}, { - about: "nested MultiChecker", - checker: checkers.New( - argChecker("a", "aval"), - argChecker("b", "bval"), - checkers.New( - argChecker("c", "cval"), - checkers.New( - argChecker("d", "dval"), - ), - argChecker("e", "eval"), - ), - ), - checks: []checkTest{{ - caveat: "a aval", - }, { - caveat: "b bval", - }, { - caveat: "c cval", - }, { - caveat: "d dval", - }, { - caveat: "e eval", - }, { - caveat: "a wrong", - expectError: `caveat "a wrong" not satisfied: wrong arg`, - expectCause: errgo.Is(errWrongArg), - }, { - caveat: "c wrong", - expectError: `caveat "c wrong" not satisfied: wrong arg`, - expectCause: errgo.Is(errWrongArg), - }, { - caveat: "d wrong", - expectError: `caveat "d wrong" not satisfied: wrong arg`, - expectCause: errgo.Is(errWrongArg), - }, { - caveat: "f something", - expectError: `caveat "f something" not satisfied: caveat not recognized`, - expectCause: isCaveatNotRecognized, - }}, -}, { - about: "Map with no items", - checker: checkers.New( - checkers.Map{}, - ), - checks: []checkTest{{ - caveat: "a aval", - expectError: `caveat "a aval" not satisfied: caveat not recognized`, - expectCause: isCaveatNotRecognized, - }}, -}, { - about: "Map with some values", - checker: checkers.New( - checkers.Map{ - "a": argChecker("a", "aval").Check, - "b": argChecker("b", "bval").Check, - }, - ), - checks: []checkTest{{ - caveat: "a aval", - }, { - caveat: "b bval", - }, { - caveat: "a wrong", - expectError: `caveat "a wrong" not satisfied: wrong arg`, - expectCause: errgo.Is(errWrongArg), - }, { - caveat: "b wrong", - expectError: `caveat "b wrong" not satisfied: wrong arg`, - expectCause: errgo.Is(errWrongArg), - }}, -}, { - about: "time within limit", - checker: checkers.New( - checkers.TimeBefore, - ), - checks: []checkTest{{ - caveat: checkers.TimeBeforeCaveat(now.Add(1)).Condition, - }, { - caveat: checkers.TimeBeforeCaveat(now).Condition, - expectError: `caveat "time-before 2006-01-02T15:04:05.123Z" not satisfied: macaroon has expired`, - }, { - caveat: checkers.TimeBeforeCaveat(now.Add(-1)).Condition, - expectError: `caveat "time-before 2006-01-02T15:04:05.122999999Z" not satisfied: macaroon has expired`, - }, { - caveat: `time-before bad-date`, - expectError: `caveat "time-before bad-date" not satisfied: parsing time "bad-date" as "2006-01-02T15:04:05.999999999Z07:00": cannot parse "bad-date" as "2006"`, - }, { - caveat: checkers.TimeBeforeCaveat(now).Condition + " ", - expectError: `caveat "time-before 2006-01-02T15:04:05.123Z " not satisfied: parsing time "2006-01-02T15:04:05.123Z ": extra text: `, - }}, -}, { - about: "declared, no entries", - checker: checkers.New(checkers.Declared{}), - checks: []checkTest{{ - caveat: checkers.DeclaredCaveat("a", "aval").Condition, - expectError: `caveat "declared a aval" not satisfied: got a=null, expected "aval"`, - }, { - caveat: checkers.CondDeclared, - expectError: `caveat "declared" not satisfied: declared caveat has no value`, - }}, -}, { - about: "declared, some entries", - checker: checkers.New(checkers.Declared{ - "a": "aval", - "b": "bval", - "spc": " a b", - }), - checks: []checkTest{{ - caveat: checkers.DeclaredCaveat("a", "aval").Condition, - }, { - caveat: checkers.DeclaredCaveat("b", "bval").Condition, - }, { - caveat: checkers.DeclaredCaveat("spc", " a b").Condition, - }, { - caveat: checkers.DeclaredCaveat("a", "bval").Condition, - expectError: `caveat "declared a bval" not satisfied: got a="aval", expected "bval"`, - }, { - caveat: checkers.DeclaredCaveat("a", " aval").Condition, - expectError: `caveat "declared a aval" not satisfied: got a="aval", expected " aval"`, - }, { - caveat: checkers.DeclaredCaveat("spc", "a b").Condition, - expectError: `caveat "declared spc a b" not satisfied: got spc=" a b", expected "a b"`, - }, { - caveat: checkers.DeclaredCaveat("", "a b").Condition, - expectError: `caveat "error invalid caveat 'declared' key \\"\\"" not satisfied: bad caveat`, - }, { - caveat: checkers.DeclaredCaveat("a b", "a b").Condition, - expectError: `caveat "error invalid caveat 'declared' key \\"a b\\"" not satisfied: bad caveat`, - }}, -}, { - about: "error caveat", - checker: checkers.New(), - checks: []checkTest{{ - caveat: checkers.ErrorCaveatf("").Condition, - expectError: `caveat "error " not satisfied: bad caveat`, - }, { - caveat: checkers.ErrorCaveatf("something %d", 134).Condition, - expectError: `caveat "error something 134" not satisfied: bad caveat`, - }}, -}, { - about: "error caveat overrides other", - checker: checkers.New(argChecker("error", "something")), - checks: []checkTest{{ - caveat: checkers.ErrorCaveatf("something").Condition, - expectError: `caveat "error something" not satisfied: bad caveat`, - }}, -}} - -var errWrongArg = errgo.New("wrong arg") - -func argChecker(expectCond, checkArg string) checkers.Checker { - return checkers.CheckerFunc{ - Condition_: expectCond, - Check_: func(cond, arg string) error { - if cond != expectCond { - panic(fmt.Errorf("got condition %q want %q", cond, expectCond)) - } - if arg != checkArg { - return errWrongArg - } - return nil - }, - } -} - -func (s *CheckersSuite) TestCheckers(c *gc.C) { - for i, test := range checkerTests { - c.Logf("test %d: %s", i, test.about) - for j, check := range test.checks { - c.Logf("\tcheck %d", j) - err := test.checker.CheckFirstPartyCaveat(check.caveat) - if check.expectError != "" { - c.Assert(err, gc.ErrorMatches, check.expectError) - if check.expectCause == nil { - check.expectCause = errgo.Any - } - c.Assert(check.expectCause(errgo.Cause(err)), gc.Equals, true) - } else { - c.Assert(err, gc.IsNil) - } - } - } -} - -func (s *CheckersSuite) TestClientIPAddrCaveat(c *gc.C) { - cav := checkers.ClientIPAddrCaveat(net.IP{127, 0, 0, 1}) - c.Assert(cav, gc.Equals, checkers.Caveat{ - Condition: "client-ip-addr 127.0.0.1", - }) - cav = checkers.ClientIPAddrCaveat(net.ParseIP("2001:4860:0:2001::68")) - c.Assert(cav, gc.Equals, checkers.Caveat{ - Condition: "client-ip-addr 2001:4860:0:2001::68", - }) - cav = checkers.ClientIPAddrCaveat(nil) - c.Assert(cav, gc.Equals, checkers.Caveat{ - Condition: "error bad IP address []", - }) - cav = checkers.ClientIPAddrCaveat(net.IP{123, 3}) - c.Assert(cav, gc.Equals, checkers.Caveat{ - Condition: "error bad IP address [123 3]", - }) -} - -var inferDeclaredTests = []struct { - about string - caveats [][]checkers.Caveat - expect checkers.Declared -}{{ - about: "no macaroons", - expect: checkers.Declared{}, -}, { - about: "single macaroon with one declaration", - caveats: [][]checkers.Caveat{{{ - Condition: "declared foo bar", - }}}, - expect: checkers.Declared{ - "foo": "bar", - }, -}, { - about: "only one argument to declared", - caveats: [][]checkers.Caveat{{{ - Condition: "declared foo", - }}}, - expect: checkers.Declared{}, -}, { - about: "spaces in value", - caveats: [][]checkers.Caveat{{{ - Condition: "declared foo bar bloggs", - }}}, - expect: checkers.Declared{ - "foo": "bar bloggs", - }, -}, { - about: "attribute with declared prefix", - caveats: [][]checkers.Caveat{{{ - Condition: "declaredccf foo", - }}}, - expect: checkers.Declared{}, -}, { - about: "several macaroons with different declares", - caveats: [][]checkers.Caveat{{ - checkers.DeclaredCaveat("a", "aval"), - checkers.DeclaredCaveat("b", "bval"), - }, { - checkers.DeclaredCaveat("c", "cval"), - checkers.DeclaredCaveat("d", "dval"), - }}, - expect: checkers.Declared{ - "a": "aval", - "b": "bval", - "c": "cval", - "d": "dval", - }, -}, { - about: "duplicate values", - caveats: [][]checkers.Caveat{{ - checkers.DeclaredCaveat("a", "aval"), - checkers.DeclaredCaveat("a", "aval"), - checkers.DeclaredCaveat("b", "bval"), - }, { - checkers.DeclaredCaveat("a", "aval"), - checkers.DeclaredCaveat("b", "bval"), - checkers.DeclaredCaveat("c", "cval"), - checkers.DeclaredCaveat("d", "dval"), - }}, - expect: checkers.Declared{ - "a": "aval", - "b": "bval", - "c": "cval", - "d": "dval", - }, -}, { - about: "conflicting values", - caveats: [][]checkers.Caveat{{ - checkers.DeclaredCaveat("a", "aval"), - checkers.DeclaredCaveat("a", "conflict"), - checkers.DeclaredCaveat("b", "bval"), - }, { - checkers.DeclaredCaveat("a", "conflict"), - checkers.DeclaredCaveat("b", "another conflict"), - checkers.DeclaredCaveat("c", "cval"), - checkers.DeclaredCaveat("d", "dval"), - }}, - expect: checkers.Declared{ - "c": "cval", - "d": "dval", - }, -}, { - about: "third party caveats ignored", - caveats: [][]checkers.Caveat{{{ - Condition: "declared a no conflict", - Location: "location", - }, - checkers.DeclaredCaveat("a", "aval"), - }}, - expect: checkers.Declared{ - "a": "aval", - }, -}, { - about: "unparseable caveats ignored", - caveats: [][]checkers.Caveat{{{ - Condition: " bad", - }, - checkers.DeclaredCaveat("a", "aval"), - }}, - expect: checkers.Declared{ - "a": "aval", - }, -}} - -func (*CheckersSuite) TestInferDeclared(c *gc.C) { - for i, test := range inferDeclaredTests { - c.Logf("test %d: %s", i, test.about) - ms := make(macaroon.Slice, len(test.caveats)) - for i, caveats := range test.caveats { - m, err := macaroon.New(nil, fmt.Sprint(i), "") - c.Assert(err, gc.IsNil) - for _, cav := range caveats { - if cav.Location == "" { - m.AddFirstPartyCaveat(cav.Condition) - } else { - m.AddThirdPartyCaveat(nil, cav.Condition, cav.Location) - } - } - ms[i] = m - } - c.Assert(checkers.InferDeclared(ms), jc.DeepEquals, test.expect) - } -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/checkers/declared.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/checkers/declared.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/checkers/declared.go 1970-01-01 00:00:00 +0000 @@ -1,110 +0,0 @@ -package checkers - -import ( - "strings" - - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v1" -) - -// DeclaredCaveat returns a "declared" caveat asserting that the given key is -// set to the given value. If a macaroon has exactly one first party -// caveat asserting the value of a particular key, then InferDeclared -// will be able to infer the value, and then DeclaredChecker will allow -// the declared value if it has the value specified here. -// -// If the key is empty or contains a space, DeclaredCaveat -// will return an error caveat. -func DeclaredCaveat(key string, value string) Caveat { - if strings.Contains(key, " ") || key == "" { - return ErrorCaveatf("invalid caveat 'declared' key %q", key) - } - return firstParty(CondDeclared, key+" "+value) -} - -// NeedDeclaredCaveat returns a third party caveat that -// wraps the provided third party caveat and requires -// that the third party must add "declared" caveats for -// all the named keys. -func NeedDeclaredCaveat(cav Caveat, keys ...string) Caveat { - if cav.Location == "" { - return ErrorCaveatf("need-declared caveat is not third-party") - } - return Caveat{ - Location: cav.Location, - Condition: CondNeedDeclared + " " + strings.Join(keys, ",") + " " + cav.Condition, - } -} - -// Declared implements a checker that will -// check that any "declared" caveats have a matching -// key for their value in the map. -type Declared map[string]string - -// Condition implements Checker.Condition. -func (c Declared) Condition() string { - return CondDeclared -} - -// Check implements Checker.Check by checking that the given -// argument holds a key in the map with a matching value. -func (c Declared) Check(_, arg string) error { - // Note that we don't need to check the condition argument - // here because it has been specified explicitly in the - // return from the Condition method. - parts := strings.SplitN(arg, " ", 2) - if len(parts) != 2 { - return errgo.Newf("declared caveat has no value") - } - val, ok := c[parts[0]] - if !ok { - return errgo.Newf("got %s=null, expected %q", parts[0], parts[1]) - } - if val != parts[1] { - return errgo.Newf("got %s=%q, expected %q", parts[0], val, parts[1]) - } - return nil -} - -// InferDeclared retrieves any declared information from -// the given macaroons and returns it as a key-value map. -// -// Information is declared with a first party caveat as created -// by DeclaredCaveat. -// -// If there are two caveats that declare the same key with -// different values, the information is omitted from the map. -// When the caveats are later checked, this will cause the -// check to fail. -func InferDeclared(ms macaroon.Slice) Declared { - var conflicts []string - info := make(Declared) - for _, m := range ms { - for _, cav := range m.Caveats() { - if cav.Location != "" { - continue - } - name, rest, err := ParseCaveat(cav.Id) - if err != nil { - continue - } - if name != CondDeclared { - continue - } - parts := strings.SplitN(rest, " ", 2) - if len(parts) != 2 { - continue - } - key, val := parts[0], parts[1] - if oldVal, ok := info[key]; ok && oldVal != val { - conflicts = append(conflicts, key) - continue - } - info[key] = val - } - } - for _, key := range conflicts { - delete(info, key) - } - return info -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/checkers/export_test.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/checkers/export_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/checkers/export_test.go 1970-01-01 00:00:00 +0000 @@ -1,3 +0,0 @@ -package checkers - -var TimeNow = &timeNow === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/checkers/package_test.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/checkers/package_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/checkers/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,11 +0,0 @@ -package checkers_test - -import ( - "testing" - - gc "gopkg.in/check.v1" -) - -func TestPackage(t *testing.T) { - gc.TestingT(t) -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/checkers/time.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/checkers/time.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/checkers/time.go 1970-01-01 00:00:00 +0000 @@ -1,32 +0,0 @@ -package checkers - -import ( - "fmt" - "time" - - "gopkg.in/errgo.v1" -) - -var timeNow = time.Now - -// TimeBefore is a checker that checks caveats -// as created by TimeBeforeCaveat. -var TimeBefore = CheckerFunc{ - Condition_: CondTimeBefore, - Check_: func(_, cav string) error { - t, err := time.Parse(time.RFC3339Nano, cav) - if err != nil { - return errgo.Mask(err) - } - if !timeNow().Before(t) { - return fmt.Errorf("macaroon has expired") - } - return nil - }, -} - -// TimeBeforeCaveat returns a caveat that specifies that -// the time that it is checked should be before t. -func TimeBeforeCaveat(t time.Time) Caveat { - return firstParty(CondTimeBefore, t.UTC().Format(time.RFC3339Nano)) -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/codec.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/codec.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/codec.go 1970-01-01 00:00:00 +0000 @@ -1,143 +0,0 @@ -package bakery - -import ( - "bytes" - "crypto/rand" - "encoding/base64" - "encoding/json" - "fmt" - - "golang.org/x/crypto/nacl/box" - - "gopkg.in/macaroon-bakery.v0/bakery/checkers" -) - -type caveatIdRecord struct { - RootKey []byte - Condition string -} - -// caveatId defines the format of a third party caveat id. -type caveatId struct { - ThirdPartyPublicKey *PublicKey - FirstPartyPublicKey *PublicKey - Nonce []byte - Id string -} - -// boxEncoder encodes caveat ids confidentially to a third-party service using -// authenticated public key encryption compatible with NaCl box. -type boxEncoder struct { - locator PublicKeyLocator - key *KeyPair -} - -// newBoxEncoder creates a new boxEncoder with the given public key pair and -// third-party public key locator function. -func newBoxEncoder(locator PublicKeyLocator, key *KeyPair) *boxEncoder { - return &boxEncoder{ - key: key, - locator: locator, - } -} - -func (enc *boxEncoder) encodeCaveatId(cav checkers.Caveat, rootKey []byte) (string, error) { - if cav.Location == "" { - return "", fmt.Errorf("cannot make caveat id for first party caveat") - } - thirdPartyPub, err := enc.locator.PublicKeyForLocation(cav.Location) - if err != nil { - return "", err - } - id, err := enc.newCaveatId(cav, rootKey, thirdPartyPub) - if err != nil { - return "", err - } - data, err := json.Marshal(id) - if err != nil { - return "", fmt.Errorf("cannot marshal %#v: %v", id, err) - } - return base64.StdEncoding.EncodeToString(data), nil -} - -func (enc *boxEncoder) newCaveatId(cav checkers.Caveat, rootKey []byte, thirdPartyPub *PublicKey) (*caveatId, error) { - var nonce [NonceLen]byte - if _, err := rand.Read(nonce[:]); err != nil { - return nil, fmt.Errorf("cannot generate random number for nonce: %v", err) - } - plain := caveatIdRecord{ - RootKey: rootKey, - Condition: cav.Condition, - } - plainData, err := json.Marshal(&plain) - if err != nil { - return nil, fmt.Errorf("cannot marshal %#v: %v", &plain, err) - } - sealed := box.Seal(nil, plainData, &nonce, thirdPartyPub.boxKey(), enc.key.Private.boxKey()) - return &caveatId{ - ThirdPartyPublicKey: thirdPartyPub, - FirstPartyPublicKey: &enc.key.Public, - Nonce: nonce[:], - Id: base64.StdEncoding.EncodeToString(sealed), - }, nil -} - -// boxDecoder decodes caveat ids for third-party service that were encoded to -// the third-party with authenticated public key encryption compatible with -// NaCl box. -type boxDecoder struct { - key *KeyPair -} - -// newBoxDecoder creates a new BoxDecoder using the given key pair. -func newBoxDecoder(key *KeyPair) *boxDecoder { - return &boxDecoder{ - key: key, - } -} - -func (d *boxDecoder) decodeCaveatId(id string) (rootKey []byte, condition string, err error) { - data, err := base64.StdEncoding.DecodeString(id) - if err != nil { - return nil, "", fmt.Errorf("cannot base64-decode caveat id: %v", err) - } - var tpid caveatId - if err := json.Unmarshal(data, &tpid); err != nil { - return nil, "", fmt.Errorf("cannot unmarshal caveat id %q: %v", data, err) - } - var recordData []byte - - recordData, err = d.encryptedCaveatId(tpid) - if err != nil { - return nil, "", err - } - var record caveatIdRecord - if err := json.Unmarshal(recordData, &record); err != nil { - return nil, "", fmt.Errorf("cannot decode third party caveat record: %v", err) - } - return record.RootKey, record.Condition, nil -} - -func (d *boxDecoder) encryptedCaveatId(id caveatId) ([]byte, error) { - if d.key == nil { - return nil, fmt.Errorf("no public key for caveat id decryption") - } - if !bytes.Equal(d.key.Public.Key[:], id.ThirdPartyPublicKey.Key[:]) { - return nil, fmt.Errorf("public key mismatch") - } - var nonce [NonceLen]byte - if len(id.Nonce) != len(nonce) { - return nil, fmt.Errorf("bad nonce length") - } - copy(nonce[:], id.Nonce) - - sealed, err := base64.StdEncoding.DecodeString(id.Id) - if err != nil { - return nil, fmt.Errorf("cannot base64-decode encrypted caveat id: %v", err) - } - out, ok := box.Open(nil, sealed, &nonce, id.FirstPartyPublicKey.boxKey(), d.key.Private.boxKey()) - if !ok { - return nil, fmt.Errorf("decryption of public-key encrypted caveat id %#v failed", id) - } - return out, nil -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/discharge.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/discharge.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/discharge.go 1970-01-01 00:00:00 +0000 @@ -1,45 +0,0 @@ -package bakery - -import ( - "fmt" - - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v1" -) - -// DischargeAll gathers discharge macaroons for all the third party caveats -// in m (and any subsequent caveats required by those) using getDischarge to -// acquire each discharge macaroon. -// It returns a slice with m as the first element, followed by -// all the discharge macaroons. All the discharge macaroons -// will be bound to the primary macaroon. -func DischargeAll( - m *macaroon.Macaroon, - getDischarge func(firstPartyLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error), -) (macaroon.Slice, error) { - sig := m.Signature() - discharges := macaroon.Slice{m} - var need []macaroon.Caveat - addCaveats := func(m *macaroon.Macaroon) { - for _, cav := range m.Caveats() { - if cav.Location == "" { - continue - } - need = append(need, cav) - } - } - addCaveats(m) - firstPartyLocation := m.Location() - for len(need) > 0 { - cav := need[0] - need = need[1:] - dm, err := getDischarge(firstPartyLocation, cav) - if err != nil { - return nil, errgo.NoteMask(err, fmt.Sprintf("cannot get discharge from %q", cav.Location), errgo.Any) - } - dm.Bind(sig) - discharges = append(discharges, dm) - addCaveats(dm) - } - return discharges, nil -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/discharge_test.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/discharge_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/discharge_test.go 1970-01-01 00:00:00 +0000 @@ -1,69 +0,0 @@ -package bakery_test - -import ( - "fmt" - - gc "gopkg.in/check.v1" - "gopkg.in/macaroon.v1" - - "gopkg.in/macaroon-bakery.v0/bakery" -) - -type DischargeSuite struct{} - -var _ = gc.Suite(&DischargeSuite{}) - -func alwaysOK(string) error { - return nil -} - -func (*DischargeSuite) TestDischargeAllNoDischarges(c *gc.C) { - rootKey := []byte("root key") - m, err := macaroon.New(rootKey, "id0", "loc0") - c.Assert(err, gc.IsNil) - getDischarge := func(string, macaroon.Caveat) (*macaroon.Macaroon, error) { - c.Errorf("getDischarge called unexpectedly") - return nil, fmt.Errorf("nothing") - } - ms, err := bakery.DischargeAll(m, getDischarge) - c.Assert(err, gc.IsNil) - c.Assert(ms, gc.HasLen, 1) - c.Assert(ms[0], gc.Equals, m) - - err = m.Verify(rootKey, alwaysOK, nil) - c.Assert(err, gc.IsNil) -} - -func (*DischargeSuite) TestDischargeAllManyDischarges(c *gc.C) { - rootKey := []byte("root key") - m0, err := macaroon.New(rootKey, "id0", "location0") - c.Assert(err, gc.IsNil) - totalRequired := 40 - id := 1 - addCaveats := func(m *macaroon.Macaroon) { - for i := 0; i < 2; i++ { - if totalRequired == 0 { - break - } - cid := fmt.Sprint("id", id) - err := m.AddThirdPartyCaveat([]byte("root key "+cid), cid, "somewhere") - c.Assert(err, gc.IsNil) - id++ - totalRequired-- - } - } - addCaveats(m0) - getDischarge := func(loc string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { - c.Assert(loc, gc.Equals, "location0") - m, err := macaroon.New([]byte("root key "+cav.Id), cav.Id, "") - c.Assert(err, gc.IsNil) - addCaveats(m) - return m, nil - } - ms, err := bakery.DischargeAll(m0, getDischarge) - c.Assert(err, gc.IsNil) - c.Assert(ms, gc.HasLen, 41) - - err = ms[0].Verify(rootKey, alwaysOK, ms[1:]) - c.Assert(err, gc.IsNil) -} === removed directory 'src/gopkg.in/macaroon-bakery.v0/bakery/example' === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/example/authservice.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/example/authservice.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/example/authservice.go 1970-01-01 00:00:00 +0000 @@ -1,43 +0,0 @@ -package main - -import ( - "net/http" - - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" - "gopkg.in/macaroon-bakery.v0/httpbakery" -) - -// authService implements an authorization service, -// that can discharge third-party caveats added -// to other macaroons. -func authService(endpoint string, key *bakery.KeyPair) (http.Handler, error) { - svc, err := bakery.NewService(bakery.NewServiceParams{ - Location: endpoint, - Key: key, - Locator: bakery.NewPublicKeyRing(), - }) - if err != nil { - return nil, err - } - mux := http.NewServeMux() - httpbakery.AddDischargeHandler(mux, "/", svc, thirdPartyChecker) - return mux, nil -} - -// thirdPartyChecker is used to check third party caveats added by other -// services. The HTTP request is that of the client - it is attempting -// to gather a discharge macaroon. -// -// Note how this function can return additional first- and third-party -// caveats which will be added to the original macaroon's caveats. -func thirdPartyChecker(req *http.Request, cavId, condition string) ([]checkers.Caveat, error) { - if condition != "access-allowed" { - return nil, checkers.ErrCaveatNotRecognized - } - // TODO check that the HTTP request has cookies that prove - // something about the client. - return []checkers.Caveat{ - httpbakery.SameClientIPAddrCaveat(req), - }, nil -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/example/client.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/example/client.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/example/client.go 1970-01-01 00:00:00 +0000 @@ -1,44 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" - - "gopkg.in/errgo.v1" - - "gopkg.in/macaroon-bakery.v0/httpbakery" -) - -// client represents a client of the target service. -// In this simple example, it just tries a GET -// request, which will fail unless the client -// has the required authorization. -func clientRequest(httpClient *http.Client, serverEndpoint string) (string, error) { - req, err := http.NewRequest("GET", serverEndpoint, nil) - if err != nil { - return "", errgo.Notef(err, "cannot make new HTTP request") - } - // The Do function implements the mechanics - // of actually gathering discharge macaroons - // when required, and retrying the request - // when necessary. - - visitWebPage := func(url *url.URL) error { - fmt.Printf("please visit this web page:\n") - fmt.Printf("\t%s\n", url) - return nil - } - resp, err := httpbakery.Do(httpClient, req, visitWebPage) - if err != nil { - return "", errgo.NoteMask(err, "GET failed", errgo.Any) - } - defer resp.Body.Close() - // TODO(rog) unmarshal error - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", fmt.Errorf("cannot read response: %v", err) - } - return string(data), nil -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/example/example_test.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/example/example_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/example/example_test.go 1970-01-01 00:00:00 +0000 @@ -1,63 +0,0 @@ -package main - -import ( - "net/http" - "testing" - - gc "gopkg.in/check.v1" - - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/httpbakery" -) - -func TestPackage(t *testing.T) { - gc.TestingT(t) -} - -type exampleSuite struct { - authEndpoint string - authPublicKey *bakery.PublicKey -} - -var _ = gc.Suite(&exampleSuite{}) - -func (s *exampleSuite) SetUpSuite(c *gc.C) { - key, err := bakery.GenerateKey() - c.Assert(err, gc.IsNil) - s.authPublicKey = &key.Public - s.authEndpoint, err = serve(func(endpoint string) (http.Handler, error) { - return authService(endpoint, key) - }) - c.Assert(err, gc.IsNil) -} - -func (s *exampleSuite) TestExample(c *gc.C) { - httpClient := httpbakery.NewHTTPClient() - serverEndpoint, err := serve(func(endpoint string) (http.Handler, error) { - return targetService(endpoint, s.authEndpoint, s.authPublicKey) - }) - c.Assert(err, gc.IsNil) - c.Logf("gold request") - resp, err := clientRequest(httpClient, serverEndpoint+"/gold") - c.Assert(err, gc.IsNil) - c.Assert(resp, gc.Equals, "all is golden") - - c.Logf("silver request") - resp, err = clientRequest(httpClient, serverEndpoint+"/silver") - c.Assert(err, gc.IsNil) - c.Assert(resp, gc.Equals, "every cloud has a silver lining") -} - -func (s *exampleSuite) BenchmarkExample(c *gc.C) { - httpClient := httpbakery.NewHTTPClient() - serverEndpoint, err := serve(func(endpoint string) (http.Handler, error) { - return targetService(endpoint, s.authEndpoint, s.authPublicKey) - }) - c.Assert(err, gc.IsNil) - c.ResetTimer() - for i := 0; i < c.N; i++ { - resp, err := clientRequest(httpClient, serverEndpoint) - c.Assert(err, gc.IsNil) - c.Assert(resp, gc.Equals, "hello, world\n") - } -} === removed directory 'src/gopkg.in/macaroon-bakery.v0/bakery/example/idservice' === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/example/idservice/idservice.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/example/idservice/idservice.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/example/idservice/idservice.go 1970-01-01 00:00:00 +0000 @@ -1,461 +0,0 @@ -package idservice - -import ( - "fmt" - "html/template" - "log" - "net/http" - - "github.com/juju/utils/jsonhttp" - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v1" - - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" - "gopkg.in/macaroon-bakery.v0/bakery/example/meeting" - "gopkg.in/macaroon-bakery.v0/httpbakery" -) - -var ( - handleJSON = jsonhttp.HandleJSON(errorToResponse) -) - -const ( - cookieUser = "username" -) - -// handler implements http.Handler to serve the name space -// provided by the id service. -type handler struct { - svc *bakery.Service - place *place - users map[string]*UserInfo -} - -// UserInfo holds information about a user. -type UserInfo struct { - Password string - Groups map[string]bool -} - -// Params holds parameters for New. -type Params struct { - Service bakery.NewServiceParams - Users map[string]*UserInfo -} - -// New returns a new handler that services an identity-providing -// service. This acts as a login service and can discharge third-party caveats -// for users. -func New(p Params) (http.Handler, error) { - svc, err := bakery.NewService(p.Service) - if err != nil { - return nil, err - } - h := &handler{ - svc: svc, - users: p.Users, - place: &place{meeting.New()}, - } - mux := http.NewServeMux() - httpbakery.AddDischargeHandler(mux, "/", svc, h.checkThirdPartyCaveat) - mux.Handle("/user/", handleJSON(h.userHandler)) - mux.HandleFunc("/login", h.loginHandler) - mux.Handle("/question", handleJSON(h.questionHandler)) - mux.Handle("/wait", handleJSON(h.waitHandler)) - mux.HandleFunc("/loginattempt", h.loginAttemptHandler) - return mux, nil -} - -// userHandler handles requests to add new users, change user details, etc. -// It is only accessible to users that are members of the admin group. -func (h *handler) userHandler(_ http.Header, req *http.Request) (interface{}, error) { - ctxt := h.newContext(req, "change-user") - if _, err := httpbakery.CheckRequest(h.svc, req, nil, ctxt); err != nil { - // TODO do this only if the error cause is *bakery.VerificationError - // We issue a macaroon with a third-party caveat targetting - // the id service itself. This means that the flow for self-created - // macaroons is just the same as for any other service. - // Theoretically, we could just redirect the user to the - // login page, but that would require a different flow - // and it's not clear that it would be an advantage. - m, err := h.svc.NewMacaroon("", nil, []checkers.Caveat{{ - Location: h.svc.Location(), - Condition: "member-of-group admin", - }, { - Condition: "operation change-user", - }}) - if err != nil { - return nil, errgo.Notef(err, "cannot mint new macaroon") - } - return nil, &httpbakery.Error{ - Message: err.Error(), - Code: httpbakery.ErrDischargeRequired, - Info: &httpbakery.ErrorInfo{ - Macaroon: m, - }, - } - } - // PUT /user/$user - create new user - // PUT /user/$user/group-membership - change group membership of user - return nil, errgo.New("not implemented yet") -} - -type loginPageParams struct { - WaitId string -} - -var loginPage = template.Must(template.New("").Parse(` - - -
-User name: -

-Password: -Log in - -

- - -`)) - -// loginHandler serves up a login page for the user to interact with, -// having been redirected there as part of a macaroon discharge requirement. -// This is a proxy for any third-party authorization service. -func (h *handler) loginHandler(w http.ResponseWriter, req *http.Request) { - req.ParseForm() - waitId := req.Form.Get("waitid") - if waitId == "" { - http.Error(w, "wait id not found in form", http.StatusBadRequest) - return - } - err := loginPage.Execute(w, loginPageParams{ - WaitId: waitId, - }) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } -} - -// loginAttemptHandler is invoked when a user clicks on the "Log in" -// button on the login page. It checks the credentials and then -// completes the rendezvous, allowing the original wait -// request to complete. -func (h *handler) loginAttemptHandler(w http.ResponseWriter, req *http.Request) { - log.Printf("login attempt %s", req.URL) - req.ParseForm() - waitId := req.Form.Get("waitid") - if waitId == "" { - http.Error(w, "wait id not found in form", http.StatusBadRequest) - return - } - user := req.Form.Get("user") - info, ok := h.users[user] - if !ok { - http.Error(w, fmt.Sprintf("user %q not found", user), http.StatusUnauthorized) - return - } - if req.Form.Get("password") != info.Password { - http.Error(w, "bad password", http.StatusUnauthorized) - return - } - - // User and password match; we can allow the user - // to have a macaroon that they can use later to prove - // to us that they have logged in. We also add a cookie - // to hold the logged in user name. - m, err := h.svc.NewMacaroon("", nil, []checkers.Caveat{{ - Condition: "user-is " + user, - }}) - // TODO(rog) when this fails, we should complete the rendezvous - // to cause the wait request to complete with an appropriate error. - if err != nil { - http.Error(w, "cannot mint macaroon: "+err.Error(), http.StatusInternalServerError) - return - } - cookie, err := httpbakery.NewCookie(macaroon.Slice{m}) - if err != nil { - http.Error(w, "cannot make cookie: "+err.Error(), http.StatusInternalServerError) - return - } - http.SetCookie(w, cookie) - http.SetCookie(w, &http.Cookie{ - Path: "/", - Name: cookieUser, - Value: user, - }) - h.place.Done(waitId, &loginInfo{ - User: user, - }) -} - -// checkThirdPartyCaveat is called by the httpbakery discharge handler. -func (h *handler) checkThirdPartyCaveat(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { - return h.newContext(req, "").CheckThirdPartyCaveat(cavId, cav) -} - -// newContext returns a new caveat-checking context -// for the client making the given request. -func (h *handler) newContext(req *http.Request, operation string) *context { - // Determine the current logged-in user, if any. - var username string - for _, c := range req.Cookies() { - if c.Name == cookieUser { - // TODO could potentially allow several concurrent - // logins - caveats asking about current user privilege - // could be satisfied if any of the user names had that - // privilege. - username = c.Value - break - } - } - if username == "" { - log.Printf("not logged in") - } else { - log.Printf("logged in as %q", username) - } - return &context{ - handler: h, - req: req, - svc: h.svc, - declaredUser: username, - operation: operation, - } -} - -// needLogin returns an error suitable for returning -// from a discharge request that can only be satisfied -// if the user logs in. -func (h *handler) needLogin(cavId string, caveat string, why string) error { - // TODO(rog) If the user is already logged in (username != ""), - // we should perhaps just return an error here. - log.Printf("login required") - waitId, err := h.place.NewRendezvous(&thirdPartyCaveatInfo{ - CaveatId: cavId, - Caveat: caveat, - }) - if err != nil { - return fmt.Errorf("cannot make rendezvous: %v", err) - } - log.Printf("returning redirect error") - return &httpbakery.Error{ - Message: why, - Code: httpbakery.ErrInteractionRequired, - Info: &httpbakery.ErrorInfo{ - VisitURL: "/login?waitid=" + waitId, - WaitURL: "/wait?waitid=" + waitId, - }, - } -} - -// waitHandler serves an HTTP endpoint that waits until a macaroon -// has been discharged, and returns the discharge macaroon. -func (h *handler) waitHandler(_ http.Header, req *http.Request) (interface{}, error) { - req.ParseForm() - waitId := req.Form.Get("waitid") - if waitId == "" { - return nil, fmt.Errorf("wait id parameter not found") - } - caveat, login, err := h.place.Wait(waitId) - if err != nil { - return nil, fmt.Errorf("cannot wait: %v", err) - } - if login.User == "" { - return nil, fmt.Errorf("login failed") - } - // Create a context to verify the third party caveat. - // Note that because the information in login has been - // supplied directly by our own code, we can assume - // that it can be trusted, so we set verifiedUser to true. - ctxt := &context{ - handler: h, - req: req, - svc: h.svc, - declaredUser: login.User, - verifiedUser: true, - } - // Now that we've verified the user, we can check again to see - // if we can discharge the original caveat. - macaroon, err := h.svc.Discharge(ctxt, caveat.CaveatId) - if err != nil { - return nil, errgo.Mask(err) - } - return WaitResponse{ - Macaroon: macaroon, - }, nil -} - -func (h *handler) questionHandler(_ http.Header, req *http.Request) (interface{}, error) { - return nil, errgo.New("question unimplemented") - // TODO - // req.ParseForm() - // - // macStr := req.Form.Get("macaroons") - // if macStr == "" { - // return nil, fmt.Errorf("macaroon parameter not found") - // } - // var macaroons []*macaroon.Macaroon - // err := json.Unmarshal([]byte(macStr), &macaroons) - // if err != nil { - // return nil, fmt.Errorf("cannot unmarshal macaroon: %v", err) - // } - // if len(macaroons) == 0 { - // return nil, fmt.Errorf("no macaroons found") - // } - // q := req.Form.Get("q") - // if q == "" { - // return nil, fmt.Errorf("q parameter not found") - // } - // user := req.Form.Get("user") - // if user == "" { - // return nil, fmt.Errorf("user parameter not found") - // } - // ctxt := &context{ - // declaredUser: user, - // operation: "question " + q, - // } - // breq := h.svc.NewRequest(req, ctxt) - // for _, m := range macaroons { - // breq.AddClientMacaroon(m) - // } - // err := breq.Check() - // return nil, err -} - -// WaitResponse holds the response from the wait endpoint. -type WaitResponse struct { - Macaroon *macaroon.Macaroon -} - -// context represents the context in which a caveat -// will be checked. -type context struct { - // handler refers to the idservice handler. - handler *handler - - // declaredUser holds the user name that we want to use for - // checking authorization caveats. - declaredUser string - - // verifiedUser is true when the declared user has been verified - // directly (by the user login) - verifiedUser bool - - // operation holds the current operation, if any. - operation string - - svc *bakery.Service - - // req holds the current client's HTTP request. - req *http.Request -} - -func (ctxt *context) Condition() string { - return "" -} - -func (ctxt *context) Check(cond, arg string) error { - switch cond { - case "user-is": - if arg != ctxt.declaredUser { - return fmt.Errorf("not logged in as %q", arg) - } - return nil - case "operation": - if ctxt.operation != "" && arg == ctxt.operation { - return nil - } - return errgo.Newf("operation mismatch") - default: - return checkers.ErrCaveatNotRecognized - } -} - -func (ctxt *context) CheckThirdPartyCaveat(cavId, cav string) ([]checkers.Caveat, error) { - h := ctxt.handler - log.Printf("checking third party caveat %q", cav) - op, rest, err := checkers.ParseCaveat(cav) - if err != nil { - return nil, fmt.Errorf("cannot parse caveat %q: %v", cav, err) - } - switch op { - case "can-speak-for": - // TODO(rog) We ignore the currently logged in user here, - // but perhaps it would be better to let the user be in control - // of which user they're currently "declared" as, rather than - // getting privileges of users we currently have macaroons for. - checkErr := ctxt.canSpeakFor(rest) - if checkErr == nil { - return ctxt.firstPartyCaveats(), nil - } - return nil, h.needLogin(cavId, cav, checkErr.Error()) - case "member-of-group": - // The third-party caveat is asking if the currently logged in - // user is a member of a particular group. - // We can find the currently logged in user by checking - // the username cookie (which doesn't provide any power, but - // indicates which user name to check) - if ctxt.declaredUser == "" { - return nil, h.needLogin(cavId, cav, "not logged in") - } - if err := ctxt.canSpeakFor(ctxt.declaredUser); err != nil { - return nil, errgo.Notef(err, "cannot speak for declared user %q", ctxt.declaredUser) - } - info, ok := h.users[ctxt.declaredUser] - if !ok { - return nil, errgo.Newf("user %q not found", ctxt.declaredUser) - } - group := rest - if !info.Groups[group] { - return nil, errgo.Newf("not privileged enough") - } - return ctxt.firstPartyCaveats(), nil - default: - return nil, checkers.ErrCaveatNotRecognized - } -} - -// canSpeakFor checks whether the client sending -// the given request can speak for the given user. -// We do that by declaring that user and checking -// whether the supplied macaroons in the request -// verify OK. -func (ctxt *context) canSpeakFor(user string) error { - if user == ctxt.declaredUser && ctxt.verifiedUser { - // The context is a direct result of logging in. - // No need to check macaroons. - return nil - } - ctxt1 := *ctxt - ctxt1.declaredUser = user - _, err := httpbakery.CheckRequest(ctxt.svc, ctxt.req, nil, &ctxt1) - if err != nil { - log.Printf("client cannot speak for %q: %v", user, err) - } else { - log.Printf("client can speak for %q", user) - } - return err -} - -// firstPartyCaveats returns first-party caveats suitable -// for adding to a third-party caveat discharge macaroon -// within the receiving context. -func (ctxt *context) firstPartyCaveats() []checkers.Caveat { - // TODO return caveat specifying that ip-addr is - // the same as that given in ctxt.req.RemoteAddr - // and other 1st party caveats, potentially. - return nil -} - -func errorToResponse(err error) (int, interface{}) { - cause := errgo.Cause(err) - if cause, ok := cause.(*httpbakery.Error); ok { - err1 := *cause - err1.Message = err.Error() - return http.StatusInternalServerError, &err1 - } - return http.StatusInternalServerError, &httpbakery.Error{ - Message: err.Error(), - } -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/example/idservice/idservice_test.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/example/idservice/idservice_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/example/idservice/idservice_test.go 1970-01-01 00:00:00 +0000 @@ -1,195 +0,0 @@ -package idservice_test - -import ( - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "regexp" - "time" - - gc "gopkg.in/check.v1" - "gopkg.in/errgo.v1" - - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/bakery/example/idservice" - "gopkg.in/macaroon-bakery.v0/httpbakery" -) - -type suite struct { - authEndpoint string - authPublicKey *bakery.PublicKey - httpClient *http.Client -} - -var _ = gc.Suite(&suite{}) - -func (s *suite) SetUpSuite(c *gc.C) { - key, err := bakery.GenerateKey() - c.Assert(err, gc.IsNil) - s.authPublicKey = &key.Public - s.authEndpoint = serve(c, func(endpoint string) (http.Handler, error) { - return idservice.New(idservice.Params{ - Users: map[string]*idservice.UserInfo{ - "rog": { - Password: "password", - }, - "root": { - Password: "superman", - Groups: map[string]bool{ - "target-service-users": true, - }, - }, - }, - Service: bakery.NewServiceParams{ - Location: endpoint, - Store: bakery.NewMemStorage(), - Key: key, - Locator: bakery.NewPublicKeyRing(), - }, - }) - }) - c.Logf("auth endpoint at %s", s.authEndpoint) -} - -func (s *suite) SetUpTest(c *gc.C) { - s.httpClient = httpbakery.NewHTTPClient() -} - -func (s *suite) TestIdService(c *gc.C) { - serverEndpoint := serve(c, func(endpoint string) (http.Handler, error) { - return targetService(endpoint, s.authEndpoint, s.authPublicKey) - }) - c.Logf("target service endpoint at %s", serverEndpoint) - visitDone := make(chan struct{}) - visitWebPage := func(u *url.URL) error { - go func() { - err := s.scrapeLoginPage(u) - c.Logf("scrape returned %v", err) - c.Check(err, gc.IsNil) - visitDone <- struct{}{} - }() - return nil - } - resp, err := s.clientRequest(serverEndpoint+"/gold", visitWebPage) - c.Assert(err, gc.IsNil) - c.Assert(resp, gc.Equals, "all is golden") - select { - case <-visitDone: - case <-time.After(5 * time.Second): - c.Fatalf("visit never done") - } - - // Try again. We shouldn't need to interact this time. - resp, err = s.clientRequest(serverEndpoint+"/silver", noVisit) - c.Assert(err, gc.IsNil) - c.Assert(resp, gc.Equals, "every cloud has a silver lining") -} - -func noVisit(*url.URL) error { - return errgo.New("should not be visiting") -} - -func serve(c *gc.C, newHandler func(string) (http.Handler, error)) (endpointURL string) { - listener, err := net.Listen("tcp", "localhost:0") - c.Assert(err, gc.IsNil) - - endpointURL = "http://" + listener.Addr().String() - handler, err := newHandler(endpointURL) - c.Assert(err, gc.IsNil) - - go http.Serve(listener, handler) - return endpointURL -} - -// client represents a client of the target service. In this simple -// example, it just tries a GET request, which will fail unless the -// client has the required authorization. -func (s *suite) clientRequest(serverEndpoint string, visitWebPage func(*url.URL) error) (string, error) { - req, err := http.NewRequest("GET", serverEndpoint, nil) - if err != nil { - return "", errgo.Notef(err, "cannot make new HTTP request") - } - - // The Do function implements the mechanics - // of actually gathering discharge macaroons - // when required, and retrying the request - // when necessary. - resp, err := httpbakery.Do(s.httpClient, req, visitWebPage) - if err != nil { - return "", errgo.NoteMask(err, "GET failed", errgo.Any) - } - defer resp.Body.Close() - // TODO(rog) unmarshal error - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", fmt.Errorf("cannot read response: %v", err) - } - return string(data), nil -} - -// Patterns to search for the relevant information in the login page. -// Alternatives to this might be (in likely ascending order of complexity): -// - use the template itself as the pattern. -// - parse the html with encoding/xml -// - parse the html with code.google.com/p/go.net/html -var ( - actionPat = regexp.MustCompile(`
n { - found = &k.key - n = len(k.url.Path) - } - } - if found == nil { - return nil, ErrNotFound - } - return found, nil -} - -func (r *publicKeyRecord) match(url *url.URL) bool { - if url == nil { - return false - } - if url.Host != r.url.Host { - return false - } - if !r.prefix { - return url.Path == r.url.Path - } - pattern := r.url.Path - n := len(pattern) - if pattern[n-1] != '/' { - return pattern == url.Path - } - return len(url.Path) >= n && url.Path[0:n] == pattern -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/keys_test.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/keys_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/keys_test.go 1970-01-01 00:00:00 +0000 @@ -1,244 +0,0 @@ -package bakery_test - -import ( - "encoding/base64" - "encoding/json" - - jc "github.com/juju/testing/checkers" - gc "gopkg.in/check.v1" - - "gopkg.in/macaroon-bakery.v0/bakery" -) - -type KeysSuite struct{} - -var _ = gc.Suite(&KeysSuite{}) - -var testKey = newTestKey(0) - -func (*KeysSuite) TestMarshalBinary(c *gc.C) { - data, err := testKey.MarshalBinary() - c.Assert(err, gc.IsNil) - c.Assert(data, jc.DeepEquals, []byte(testKey[:])) - - var key1 bakery.Key - err = key1.UnmarshalBinary(data) - c.Assert(err, gc.IsNil) - c.Assert(key1, gc.DeepEquals, testKey) -} - -func (*KeysSuite) TestMarshalText(c *gc.C) { - data, err := testKey.MarshalText() - c.Assert(err, gc.IsNil) - c.Assert(string(data), gc.Equals, base64.StdEncoding.EncodeToString([]byte(testKey[:]))) - - var key1 bakery.Key - err = key1.UnmarshalText(data) - c.Assert(err, gc.IsNil) - c.Assert(key1, gc.Equals, testKey) -} - -func (*KeysSuite) TestKeyPairMarshalJSON(c *gc.C) { - kp := bakery.KeyPair{ - Public: bakery.PublicKey{testKey}, - Private: bakery.PrivateKey{testKey}, - } - kp.Private.Key[0] = 99 - data, err := json.Marshal(kp) - c.Assert(err, gc.IsNil) - var x interface{} - err = json.Unmarshal(data, &x) - c.Assert(err, gc.IsNil) - - // Check that the fields have marshaled as strings. - c.Assert(x.(map[string]interface{})["private"], gc.FitsTypeOf, "") - c.Assert(x.(map[string]interface{})["public"], gc.FitsTypeOf, "") - - var kp1 bakery.KeyPair - err = json.Unmarshal(data, &kp1) - c.Assert(err, gc.IsNil) - c.Assert(kp1, jc.DeepEquals, kp) -} - -func newTestKey(n byte) bakery.Key { - var k bakery.Key - for i := range k { - k[i] = n + byte(i) - } - return k -} - -type addPublicKeyArgs struct { - loc string - prefix bool - key bakery.Key -} - -var publicKeyRingTests = []struct { - about string - add []addPublicKeyArgs - loc string - expectKey bakery.Key - expectNotFound bool -}{{ - about: "empty keyring", - add: []addPublicKeyArgs{}, - loc: "something", - expectNotFound: true, -}, { - about: "single non-prefix key", - add: []addPublicKeyArgs{{ - loc: "http://foo.com/x", - key: testKey, - }}, - loc: "http://foo.com/x", - expectKey: testKey, -}, { - about: "single prefix key", - add: []addPublicKeyArgs{{ - loc: "http://foo.com/x", - key: testKey, - prefix: true, - }}, - loc: "http://foo.com/x", - expectKey: testKey, -}, { - about: "pattern longer than url", - add: []addPublicKeyArgs{{ - loc: "http://foo.com/x", - key: testKey, - prefix: true, - }}, - loc: "http://foo.com/", - expectNotFound: true, -}, { - about: "pattern not ending in /", - add: []addPublicKeyArgs{{ - loc: "http://foo.com/x", - key: testKey, - prefix: true, - }}, - loc: "http://foo.com/x/y", - expectNotFound: true, -}, { - about: "mismatched host", - add: []addPublicKeyArgs{{ - loc: "http://foo.com/x", - key: testKey, - prefix: true, - }}, - loc: "http://bar.com/x/y", - expectNotFound: true, -}, { - about: "http vs https", - add: []addPublicKeyArgs{{ - loc: "http://foo.com/x", - key: testKey, - }}, - loc: "https://foo.com/x", - expectKey: testKey, -}, { - about: "naked pattern url with prefix", - add: []addPublicKeyArgs{{ - loc: "http://foo.com", - key: testKey, - prefix: true, - }}, - loc: "http://foo.com/arble", - expectKey: testKey, -}, { - about: "naked pattern url with prefix with naked match url", - add: []addPublicKeyArgs{{ - loc: "http://foo.com", - key: testKey, - prefix: true, - }}, - loc: "http://foo.com", - expectKey: testKey, -}, { - about: "naked pattern url, no prefix", - add: []addPublicKeyArgs{{ - loc: "http://foo.com", - key: testKey, - }}, - loc: "http://foo.com", - expectKey: testKey, -}, { - about: "naked pattern url, no prefix, match with no slash", - add: []addPublicKeyArgs{{ - loc: "http://foo.com", - key: testKey, - }}, - loc: "http://foo.com/", - expectKey: testKey, -}, { - about: "port mismatch", - add: []addPublicKeyArgs{{ - loc: "http://foo.com:8080/x", - key: testKey, - }}, - loc: "https://foo.com/x", - expectNotFound: true, -}, { - about: "url longer than pattern", - add: []addPublicKeyArgs{{ - loc: "http://foo.com/x/", - key: testKey, - prefix: true, - }}, - loc: "https://foo.com/x/y/z", - expectKey: testKey, -}, { - about: "longer match preferred", - add: []addPublicKeyArgs{{ - loc: "http://foo.com/x/", - key: newTestKey(0), - prefix: true, - }, { - loc: "http://foo.com/x/y/", - key: newTestKey(1), - prefix: true, - }}, - loc: "https://foo.com/x/y/z", - expectKey: newTestKey(1), -}, { - about: "longer match preferred, with other matches", - add: []addPublicKeyArgs{{ - loc: "http://foo.com/foo/arble", - key: newTestKey(0), - prefix: true, - }, { - loc: "http://foo.com/foo/arble/blah/", - key: newTestKey(1), - prefix: true, - }, { - loc: "http://foo.com/foo/", - key: newTestKey(2), - prefix: true, - }, { - loc: "http://foo.com/foobieblahbletcharbl", - key: newTestKey(3), - prefix: true, - }}, - loc: "https://foo.com/foo/arble/blah/x", - expectKey: newTestKey(1), -}} - -func (*KeysSuite) TestPublicKeyRing(c *gc.C) { - for i, test := range publicKeyRingTests { - c.Logf("test %d: %s", i, test.about) - kr := bakery.NewPublicKeyRing() - for _, add := range test.add { - err := kr.AddPublicKeyForLocation(add.loc, add.prefix, &bakery.PublicKey{add.key}) - c.Assert(err, gc.IsNil) - } - key, err := kr.PublicKeyForLocation(test.loc) - if test.expectNotFound { - c.Assert(err, gc.Equals, bakery.ErrNotFound) - c.Assert(key, gc.IsNil) - continue - } - c.Assert(err, gc.IsNil) - c.Assert(*key, gc.Equals, bakery.PublicKey{test.expectKey}) - } -} === removed directory 'src/gopkg.in/macaroon-bakery.v0/bakery/mgostorage' === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/mgostorage/package_test.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/mgostorage/package_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/mgostorage/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,11 +0,0 @@ -package mgostorage_test - -import ( - "testing" - - jujutesting "github.com/juju/testing" -) - -func TestPackage(t *testing.T) { - jujutesting.MgoTestPackage(t, nil) -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/mgostorage/storage.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/mgostorage/storage.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/mgostorage/storage.go 1970-01-01 00:00:00 +0000 @@ -1,102 +0,0 @@ -// Package mgostorage provides an implementation of the -// bakery Storage interface that uses MongoDB to store -// items. -package mgostorage - -import ( - "gopkg.in/errgo.v1" - "gopkg.in/mgo.v2" - "gopkg.in/mgo.v2/bson" - - "gopkg.in/macaroon-bakery.v0/bakery" -) - -// New returns an implementation of Storage -// that stores all items in MongoDB. -func New(c *mgo.Collection) (bakery.Storage, error) { - m := mgoStorage{ - col: c, - } - err := m.setUpCollection() - if err != nil { - return nil, errgo.Mask(err) - } - return &m, nil -} - -type mgoStorage struct { - col *mgo.Collection -} - -type storageDoc struct { - Location string `bson:"loc"` - Item string `bson:"item"` -} - -func (s *mgoStorage) setUpCollection() error { - collection := s.collection() - defer collection.Close() - err := collection.EnsureIndex(mgo.Index{Key: []string{"loc"}, Unique: true}) - if err != nil { - return errgo.Notef(err, "failed to ensure an index on loc exists") - } - return nil -} - -// collection returns the collection with a copied mgo session. -// It must be closed when done with. -func (m *mgoStorage) collection() collectionCloser { - c := m.col.Database.Session.Copy().DB(m.col.Database.Name).C(m.col.Name) - return collectionCloser{c} -} - -type collectionCloser struct { - *mgo.Collection -} - -func (c collectionCloser) Close() { - c.Collection.Database.Session.Close() -} - -// Put implements bakery.Storage.Put. -func (s mgoStorage) Put(location, item string) error { - i := storageDoc{Location: location, Item: item} - - collection := s.collection() - defer collection.Close() - - _, err := collection.Upsert(bson.M{"loc": location}, i) - if err != nil { - return errgo.Notef(err, "cannot store item for location %q", location) - } - return nil -} - -// Get implements bakery.Storage.Get. -func (s mgoStorage) Get(location string) (string, error) { - collection := s.collection() - defer collection.Close() - - var i storageDoc - err := collection.Find(bson.M{"loc": location}).One(&i) - if err != nil { - if err == mgo.ErrNotFound { - return "", bakery.ErrNotFound - } - return "", errgo.Notef(err, "cannot get %q", location) - } - - return i.Item, nil -} - -// Del implements bakery.Storage.Del. -func (s mgoStorage) Del(location string) error { - collection := s.collection() - defer collection.Close() - - err := collection.Remove(bson.M{"loc": location}) - if err != nil { - return errgo.Notef(err, "cannot remove %q", location) - } - return nil -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/mgostorage/storage_test.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/mgostorage/storage_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/mgostorage/storage_test.go 1970-01-01 00:00:00 +0000 @@ -1,132 +0,0 @@ -package mgostorage_test - -import ( - "errors" - "fmt" - - "github.com/juju/testing" - gc "gopkg.in/check.v1" - "gopkg.in/macaroon.v1" - "gopkg.in/mgo.v2" - - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" - "gopkg.in/macaroon-bakery.v0/bakery/mgostorage" -) - -type StorageSuite struct { - testing.MgoSuite - session *mgo.Session - store bakery.Storage -} - -var _ = gc.Suite(&StorageSuite{}) - -func (s *StorageSuite) SetUpTest(c *gc.C) { - s.MgoSuite.SetUpTest(c) - s.session = testing.MgoServer.MustDial() - - store, err := mgostorage.New(s.session.DB("test").C("items")) - c.Assert(err, gc.IsNil) - - s.store = store -} - -func (s *StorageSuite) TearDownTest(c *gc.C) { - s.session.Close() - s.MgoSuite.TearDownTest(c) -} - -func (s *StorageSuite) TestMgoStorage(c *gc.C) { - err := s.store.Put("foo", "bar") - c.Assert(err, gc.IsNil) - item, err := s.store.Get("foo") - c.Assert(err, gc.IsNil) - c.Assert(item, gc.Equals, "bar") - - err = s.store.Put("bletch", "blat") - c.Assert(err, gc.IsNil) - item, err = s.store.Get("bletch") - c.Assert(err, gc.IsNil) - c.Assert(item, gc.Equals, "blat") - - item, err = s.store.Get("nothing") - c.Assert(err, gc.Equals, bakery.ErrNotFound) - c.Assert(item, gc.Equals, "") - - err = s.store.Del("bletch") - c.Assert(err, gc.IsNil) - - item, err = s.store.Get("bletch") - c.Assert(err, gc.Equals, bakery.ErrNotFound) - c.Assert(item, gc.Equals, "") -} - -func (s *StorageSuite) TestMgoStorageUpsert(c *gc.C) { - err := s.store.Put("foo", "bar") - c.Assert(err, gc.IsNil) - item, err := s.store.Get("foo") - c.Assert(err, gc.IsNil) - c.Assert(item, gc.Equals, "bar") - - err = s.store.Put("foo", "ba-ba") - c.Assert(err, gc.IsNil) - item, err = s.store.Get("foo") - c.Assert(err, gc.IsNil) - c.Assert(item, gc.Equals, "ba-ba") - -} - -func (s *StorageSuite) TestConcurrentMgoStorage(c *gc.C) { - done := make(chan struct{}) - for i := 0; i < 3; i++ { - i := i - go func() { - k := fmt.Sprint(i) - err := s.store.Put(k, k) - c.Check(err, gc.IsNil) - v, err := s.store.Get(k) - c.Check(v, gc.Equals, k) - err = s.store.Del(k) - c.Check(err, gc.IsNil) - done <- struct{}{} - }() - } - for i := 0; i < 3; i++ { - <-done - } -} - -type testChecker struct{} - -func (tc *testChecker) CheckFirstPartyCaveat(caveat string) error { - if caveat != "is-authorised bob" { - return errors.New("not bob") - } - return nil -} - -func (s *StorageSuite) TestCreateMacaroon(c *gc.C) { - keypair, err := bakery.GenerateKey() - c.Assert(err, gc.IsNil) - - params := bakery.NewServiceParams{Location: "local", Store: s.store, Key: keypair} - service, err := bakery.NewService(params) - c.Assert(err, gc.IsNil) - c.Assert(service, gc.NotNil) - - m, err := service.NewMacaroon( - "123", - []byte("abc"), - []checkers.Caveat{checkers.Caveat{Location: "", Condition: "is-authorised bob"}}, - ) - c.Assert(err, gc.IsNil) - c.Assert(m, gc.NotNil) - - item, err := s.store.Get("123") - c.Assert(err, gc.IsNil) - c.Assert(item, gc.DeepEquals, `{"RootKey":"YWJj"}`) - - err = service.Check(macaroon.Slice{m}, &testChecker{}) - c.Assert(err, gc.IsNil) -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/package_test.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/package_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,11 +0,0 @@ -package bakery_test - -import ( - "testing" - - gc "gopkg.in/check.v1" -) - -func TestPackage(t *testing.T) { - gc.TestingT(t) -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/service.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/service.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/service.go 1970-01-01 00:00:00 +0000 @@ -1,364 +0,0 @@ -// The bakery package layers on top of the macaroon package, providing -// a transport and storage-agnostic way of using macaroons to assert -// client capabilities. -// -package bakery - -import ( - "crypto/rand" - "fmt" - "log" - "strings" - - "github.com/juju/loggo" - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v1" - - "gopkg.in/macaroon-bakery.v0/bakery/checkers" -) - -var logger = loggo.GetLogger("bakery") - -// Service represents a service which can use macaroons -// to check authorization. -type Service struct { - location string - store storage - checker FirstPartyChecker - encoder *boxEncoder - key *KeyPair -} - -// NewServiceParams holds the parameters for a NewService call. -type NewServiceParams struct { - // Location will be set as the location of any macaroons - // minted by the service. - Location string - - // Store will be used to store macaroon - // information locally. If it is nil, - // an in-memory storage will be used. - Store Storage - - // Key is the public key pair used by the service for - // third-party caveat encryption. - // It may be nil, in which case a new key pair - // will be generated. - Key *KeyPair - - // Locator provides public keys for third-party services by location when - // adding a third-party caveat. - // It may be nil, in which case, no third-party caveats can be created. - Locator PublicKeyLocator -} - -// NewService returns a new service that can mint new -// macaroons and store their associated root keys. -func NewService(p NewServiceParams) (*Service, error) { - if p.Store == nil { - p.Store = NewMemStorage() - } - svc := &Service{ - location: p.Location, - store: storage{p.Store}, - } - - var err error - if p.Key == nil { - p.Key, err = GenerateKey() - if err != nil { - return nil, err - } - } - if p.Locator == nil { - p.Locator = PublicKeyLocatorMap(nil) - } - svc.key = p.Key - svc.encoder = newBoxEncoder(p.Locator, p.Key) - return svc, nil -} - -// Store returns the store used by the service. -func (svc *Service) Store() Storage { - return svc.store.store -} - -// Location returns the service's configured macaroon location. -func (svc *Service) Location() string { - return svc.location -} - -// PublicKey returns the service's public key. -func (svc *Service) PublicKey() *PublicKey { - return &svc.key.Public -} - -// Check checks that the given macaroons verify -// correctly using the provided checker to check -// first party caveats. The primary macaroon is in ms[0]; the discharges -// fill the rest of the slice. -// -// If there is a verification error, it returns a VerificationError that -// describes the error (other errors might be returned in other -// circumstances). -func (svc *Service) Check(ms macaroon.Slice, checker FirstPartyChecker) error { - if len(ms) == 0 { - return &VerificationError{ - Reason: fmt.Errorf("no macaroons in slice"), - } - } - item, err := svc.store.Get(ms[0].Id()) - if err != nil { - if errgo.Cause(err) == ErrNotFound { - // If the macaroon was not found, it is probably - // because it's been removed after time-expiry, - // so return a verification error. - return &VerificationError{ - Reason: errgo.New("macaroon not found in storage"), - } - } - return errgo.Notef(err, "cannot get macaroon") - } - err = ms[0].Verify(item.RootKey, checker.CheckFirstPartyCaveat, ms[1:]) - if err != nil { - return &VerificationError{ - Reason: err, - } - } - return nil -} - -// CheckAny checks that the given slice of slices contains at least -// one macaroon minted by the given service, using checker to check -// any first party caveats. It returns an error with a -// *bakery.VerificationError cause if the macaroon verification failed. -// -// The assert map holds any required attributes of "declared" attributes, -// overriding any inferences made from the macaroons themselves. -// It has a similar effect to adding a checkers.DeclaredCaveat -// for each key and value, but the error message will be more -// useful. -// -// It adds all the standard caveat checkers to the given checker. -// -// It returns any attributes declared in the successfully validated request. -func (svc *Service) CheckAny(mss []macaroon.Slice, assert map[string]string, checker checkers.Checker) (map[string]string, error) { - // TODO perhaps return a slice of attribute maps, one - // for each successfully validated macaroon slice? - var err error - for _, ms := range mss { - declared := checkers.InferDeclared(ms) - for key, val := range assert { - declared[key] = val - } - err = svc.Check(ms, checkers.New(declared, checker)) - if err == nil { - return declared, nil - } - } - // Return an arbitrary error from the macaroons provided. - // TODO return all errors. - return nil, errgo.Mask(err, isVerificationError) -} - -func isVerificationError(err error) bool { - _, ok := err.(*VerificationError) - return ok -} - -// NewMacaroon mints a new macaroon with the given id and caveats. -// If the id is empty, a random id will be used. -// If rootKey is nil, a random root key will be used. -// The macaroon will be stored in the service's storage. -func (svc *Service) NewMacaroon(id string, rootKey []byte, caveats []checkers.Caveat) (*macaroon.Macaroon, error) { - if rootKey == nil { - newRootKey, err := randomBytes(24) - if err != nil { - return nil, fmt.Errorf("cannot generate root key for new macaroon: %v", err) - } - rootKey = newRootKey - } - if id == "" { - idBytes, err := randomBytes(24) - if err != nil { - return nil, fmt.Errorf("cannot generate id for new macaroon: %v", err) - } - id = fmt.Sprintf("%x", idBytes) - } - m, err := macaroon.New(rootKey, id, svc.location) - if err != nil { - return nil, fmt.Errorf("cannot bake macaroon: %v", err) - } - - // TODO look at the caveats for expiry time and associate - // that with the storage item so that the storage can - // garbage collect it at an appropriate time. - if err := svc.store.Put(m.Id(), &storageItem{ - RootKey: rootKey, - }); err != nil { - return nil, fmt.Errorf("cannot save macaroon to store: %v", err) - } - for _, cav := range caveats { - if err := svc.AddCaveat(m, cav); err != nil { - if err := svc.store.store.Del(m.Id()); err != nil { - log.Printf("failed to remove macaroon from storage: %v", err) - } - return nil, err - } - } - return m, nil -} - -// AddCaveat adds a caveat to the given macaroon. -// -// If it's a third-party caveat, it uses the service's caveat-id encoder -// to create the id of the new caveat. -func (svc *Service) AddCaveat(m *macaroon.Macaroon, cav checkers.Caveat) error { - logger.Infof("Service.AddCaveat id %q; cav %#v", m.Id(), cav) - if cav.Location == "" { - m.AddFirstPartyCaveat(cav.Condition) - return nil - } - rootKey, err := randomBytes(24) - if err != nil { - return fmt.Errorf("cannot generate third party secret: %v", err) - } - id, err := svc.encoder.encodeCaveatId(cav, rootKey) - if err != nil { - return fmt.Errorf("cannot create third party caveat id at %q: %v", cav.Location, err) - } - if err := m.AddThirdPartyCaveat(rootKey, id, cav.Location); err != nil { - return fmt.Errorf("cannot add third party caveat: %v", err) - } - return nil -} - -// Discharge creates a macaroon that discharges the third party caveat with the -// given id. The id should have been created earlier by a Service. The -// condition implicit in the id is checked for validity using checker, and -// then if valid, a new macaroon is minted which discharges the caveat, and can -// eventually be associated with a client request using AddClientMacaroon. -func (svc *Service) Discharge(checker ThirdPartyChecker, id string) (*macaroon.Macaroon, error) { - decoder := newBoxDecoder(svc.encoder.key) - - logger.Infof("server attempting to discharge %q", id) - rootKey, condition, err := decoder.decodeCaveatId(id) - if err != nil { - return nil, fmt.Errorf("discharger cannot decode caveat id: %v", err) - } - // Note that we don't check the error - we allow the - // third party checker to see even caveats that we can't - // understand. - cond, arg, _ := checkers.ParseCaveat(condition) - var caveats []checkers.Caveat - if cond == checkers.CondNeedDeclared { - caveats, err = checkNeedDeclared(id, arg, checker) - } else { - caveats, err = checker.CheckThirdPartyCaveat(id, condition) - } - if err != nil { - return nil, errgo.Mask(err, errgo.Any) - } - return svc.NewMacaroon(id, rootKey, caveats) -} - -func checkNeedDeclared(caveatId, arg string, checker ThirdPartyChecker) ([]checkers.Caveat, error) { - i := strings.Index(arg, " ") - if i <= 0 { - return nil, errgo.Newf("need-declared caveat requires an argument, got %q", arg) - } - needDeclared := strings.Split(arg[0:i], ",") - for _, d := range needDeclared { - if d == "" { - return nil, errgo.New("need-declared caveat with empty required attribute") - } - } - if len(needDeclared) == 0 { - return nil, fmt.Errorf("need-declared caveat with no required attributes") - } - caveats, err := checker.CheckThirdPartyCaveat(caveatId, arg[i+1:]) - if err != nil { - return nil, errgo.Mask(err, errgo.Any) - } - declared := make(map[string]bool) - for _, cav := range caveats { - if cav.Location != "" { - continue - } - // Note that we ignore the error. We allow the service to - // generate caveats that we don't understand here. - cond, arg, _ := checkers.ParseCaveat(cav.Condition) - if cond != checkers.CondDeclared { - continue - } - parts := strings.SplitN(arg, " ", 2) - if len(parts) != 2 { - return nil, errgo.Newf("declared caveat has no value") - } - declared[parts[0]] = true - } - // Add empty declarations for everything mentioned in need-declared - // that was not actually declared. - for _, d := range needDeclared { - if !declared[d] { - caveats = append(caveats, checkers.DeclaredCaveat(d, "")) - } - } - return caveats, nil -} - -func randomBytes(n int) ([]byte, error) { - b := make([]byte, n) - _, err := rand.Read(b) - if err != nil { - return nil, fmt.Errorf("cannot generate %d random bytes: %v", n, err) - } - return b, nil -} - -type VerificationError struct { - Reason error -} - -func (e *VerificationError) Error() string { - return fmt.Sprintf("verification failed: %v", e.Reason) -} - -// TODO(rog) consider possible options for checkers: -// - first and third party checkers could be merged, but -// then there would have to be a runtime check -// that when used to check first-party caveats, the -// checker does not return third-party caveats. - -// ThirdPartyChecker holds a function that checks third party caveats -// for validity. If the caveat is valid, it returns a nil error and -// optionally a slice of extra caveats that will be added to the -// discharge macaroon. The caveatId parameter holds the still-encoded id -// of the caveat. -// -// If the caveat kind was not recognised, the checker should return an -// error with a ErrCaveatNotRecognized cause. -type ThirdPartyChecker interface { - CheckThirdPartyCaveat(caveatId, caveat string) ([]checkers.Caveat, error) -} - -type ThirdPartyCheckerFunc func(caveatId, caveat string) ([]checkers.Caveat, error) - -func (c ThirdPartyCheckerFunc) CheckThirdPartyCaveat(caveatId, caveat string) ([]checkers.Caveat, error) { - return c(caveatId, caveat) -} - -// FirstPartyChecker holds a function that checks first party caveats -// for validity. -// -// If the caveat kind was not recognised, the checker should return -// ErrCaveatNotRecognized. -type FirstPartyChecker interface { - CheckFirstPartyCaveat(caveat string) error -} - -type FirstPartyCheckerFunc func(caveat string) error - -func (c FirstPartyCheckerFunc) CheckFirstPartyCaveat(caveat string) error { - return c(caveat) -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/service_test.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/service_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/service_test.go 1970-01-01 00:00:00 +0000 @@ -1,334 +0,0 @@ -package bakery_test - -import ( - "encoding/json" - "fmt" - - gc "gopkg.in/check.v1" - "gopkg.in/macaroon.v1" - - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" -) - -type ServiceSuite struct{} - -var _ = gc.Suite(&ServiceSuite{}) - -// TestSingleServiceFirstParty creates a single service -// with a macaroon with one first party caveat. -// It creates a request with this macaroon and checks that the service -// can verify this macaroon as valid. -func (s *ServiceSuite) TestSingleServiceFirstParty(c *gc.C) { - p := bakery.NewServiceParams{ - Location: "loc", - Store: nil, - Key: nil, - Locator: nil, - } - service, err := bakery.NewService(p) - c.Assert(err, gc.IsNil) - - primary, err := service.NewMacaroon("", nil, nil) - c.Assert(err, gc.IsNil) - c.Assert(primary.Location(), gc.Equals, "loc") - cav := checkers.Caveat{ - Location: "", - Condition: "something", - } - err = service.AddCaveat(primary, cav) - c.Assert(err, gc.IsNil) - - err = service.Check(macaroon.Slice{primary}, strcmpChecker("something")) - c.Assert(err, gc.IsNil) -} - -// TestMacaroonPaperFig6 implements an example flow as described in the macaroons paper: -// http://theory.stanford.edu/~ataly/Papers/macaroons.pdf -// There are three services, ts, fs, as: -// ts is a storage service which has deligated authority to a forum service fs. -// The forum service wants to require its users to be logged into to an authentication service as. -// -// The client obtains a macaroon from fs (minted by ts, with a third party caveat addressed to as). -// The client obtains a discharge macaroon from as to satisfy this caveat. -// The target service verifies the original macaroon it delegated to fs -// No direct contact between as and ts is required -func (s *ServiceSuite) TestMacaroonPaperFig6(c *gc.C) { - locator := make(bakery.PublicKeyLocatorMap) - as := newService(c, "as-loc", locator) - ts := newService(c, "ts-loc", locator) - fs := newService(c, "fs-loc", locator) - - // ts creates a macaroon. - tsMacaroon, err := ts.NewMacaroon("", nil, nil) - c.Assert(err, gc.IsNil) - - // ts somehow sends the macaroon to fs which adds a third party caveat to be discharged by as. - err = fs.AddCaveat(tsMacaroon, checkers.Caveat{Location: "as-loc", Condition: "user==bob"}) - c.Assert(err, gc.IsNil) - - // client asks for a discharge macaroon for each third party caveat - d, err := bakery.DischargeAll(tsMacaroon, func(firstPartyLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { - c.Assert(firstPartyLocation, gc.Equals, "ts-loc") - c.Assert(cav.Location, gc.Equals, "as-loc") - mac, err := as.Discharge(strcmpChecker("user==bob"), cav.Id) - c.Assert(err, gc.IsNil) - return mac, nil - }) - c.Assert(err, gc.IsNil) - - err = ts.Check(d, strcmpChecker("")) - c.Assert(err, gc.IsNil) -} - -func macStr(m *macaroon.Macaroon) string { - data, err := json.MarshalIndent(m, "\t", "\t") - if err != nil { - panic(err) - } - return string(data) -} - -// TestMacaroonPaperFig6FailsWithoutDischarges runs a similar test as TestMacaroonPaperFig6 -// without the client discharging the third party caveats. -func (s *ServiceSuite) TestMacaroonPaperFig6FailsWithoutDischarges(c *gc.C) { - locator := make(bakery.PublicKeyLocatorMap) - ts := newService(c, "ts-loc", locator) - fs := newService(c, "fs-loc", locator) - _ = newService(c, "as-loc", locator) - - // ts creates a macaroon. - tsMacaroon, err := ts.NewMacaroon("", nil, nil) - c.Assert(err, gc.IsNil) - - // ts somehow sends the macaroon to fs which adds a third party caveat to be discharged by as. - err = fs.AddCaveat(tsMacaroon, checkers.Caveat{Location: "as-loc", Condition: "user==bob"}) - c.Assert(err, gc.IsNil) - - // client makes request to ts - err = ts.Check(macaroon.Slice{tsMacaroon}, strcmpChecker("")) - c.Assert(err, gc.ErrorMatches, `verification failed: cannot find discharge macaroon for caveat ".*"`) -} - -// TestMacaroonPaperFig6FailsWithBindingOnTamperedSignature runs a similar test as TestMacaroonPaperFig6 -// with the discharge macaroon binding being done on a tampered signature. -func (s *ServiceSuite) TestMacaroonPaperFig6FailsWithBindingOnTamperedSignature(c *gc.C) { - locator := make(bakery.PublicKeyLocatorMap) - as := newService(c, "as-loc", locator) - ts := newService(c, "ts-loc", locator) - fs := newService(c, "fs-loc", locator) - - // ts creates a macaroon. - tsMacaroon, err := ts.NewMacaroon("", nil, nil) - c.Assert(err, gc.IsNil) - - // ts somehow sends the macaroon to fs which adds a third party caveat to be discharged by as. - err = fs.AddCaveat(tsMacaroon, checkers.Caveat{Location: "as-loc", Condition: "user==bob"}) - c.Assert(err, gc.IsNil) - - // client asks for a discharge macaroon for each third party caveat - d, err := bakery.DischargeAll(tsMacaroon, func(firstPartyLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { - c.Assert(firstPartyLocation, gc.Equals, "ts-loc") - c.Assert(cav.Location, gc.Equals, "as-loc") - mac, err := as.Discharge(strcmpChecker("user==bob"), cav.Id) - c.Assert(err, gc.IsNil) - return mac, nil - }) - c.Assert(err, gc.IsNil) - - // client has all the discharge macaroons. For each discharge macaroon bind it to our tsMacaroon - // and add it to our request. - for _, dm := range d[1:] { - dm.Bind([]byte("tampered-signature")) // Bind against an incorrect signature. - } - - // client makes request to ts. - err = ts.Check(d, strcmpChecker("")) - c.Assert(err, gc.ErrorMatches, "verification failed: signature mismatch after caveat verification") -} - -func (s *ServiceSuite) TestNeedDeclared(c *gc.C) { - locator := make(bakery.PublicKeyLocatorMap) - firstParty := newService(c, "first", locator) - thirdParty := newService(c, "third", locator) - - // firstParty mints a macaroon with a third-party caveat addressed - // to thirdParty with a need-declared caveat. - m, err := firstParty.NewMacaroon("", nil, []checkers.Caveat{ - checkers.NeedDeclaredCaveat(checkers.Caveat{ - Location: "third", - Condition: "something", - }, "foo", "bar"), - }) - c.Assert(err, gc.IsNil) - - // The client asks for a discharge macaroon for each third party caveat. - d, err := bakery.DischargeAll(m, func(_ string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { - return thirdParty.Discharge(strcmpChecker("something"), cav.Id) - }) - c.Assert(err, gc.IsNil) - - // The required declared attributes should have been added - // to the discharge macaroons. - declared := checkers.InferDeclared(d) - c.Assert(declared, gc.DeepEquals, checkers.Declared{ - "foo": "", - "bar": "", - }) - - // Make sure the macaroons actually check out correctly - // when provided with the declared checker. - err = firstParty.Check(d, checkers.New(declared)) - c.Assert(err, gc.IsNil) - - // Try again when the third party does add a required declaration. - - // The client asks for a discharge macaroon for each third party caveat. - d, err = bakery.DischargeAll(m, func(_ string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { - checker := thirdPartyCheckerWithCaveats{ - checkers.DeclaredCaveat("foo", "a"), - checkers.DeclaredCaveat("arble", "b"), - } - return thirdParty.Discharge(checker, cav.Id) - }) - c.Assert(err, gc.IsNil) - - // One attribute should have been added, the other was already there. - declared = checkers.InferDeclared(d) - c.Assert(declared, gc.DeepEquals, checkers.Declared{ - "foo": "a", - "bar": "", - "arble": "b", - }) - - err = firstParty.Check(d, checkers.New(declared)) - c.Assert(err, gc.IsNil) - - // Try again, but this time pretend a client is sneakily trying - // to add another "declared" attribute to alter the declarations. - d, err = bakery.DischargeAll(m, func(_ string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { - checker := thirdPartyCheckerWithCaveats{ - checkers.DeclaredCaveat("foo", "a"), - checkers.DeclaredCaveat("arble", "b"), - } - m, err := thirdParty.Discharge(checker, cav.Id) - c.Assert(err, gc.IsNil) - - // Sneaky client adds a first party caveat. - m.AddFirstPartyCaveat(checkers.DeclaredCaveat("foo", "c").Condition) - return m, nil - }) - c.Assert(err, gc.IsNil) - - declared = checkers.InferDeclared(d) - c.Assert(declared, gc.DeepEquals, checkers.Declared{ - "bar": "", - "arble": "b", - }) - - err = firstParty.Check(d, checkers.New(declared)) - c.Assert(err, gc.ErrorMatches, `verification failed: caveat "declared foo a" not satisfied: got foo=null, expected "a"`) -} - -func (s *ServiceSuite) TestDischargeTwoNeedDeclared(c *gc.C) { - locator := make(bakery.PublicKeyLocatorMap) - firstParty := newService(c, "first", locator) - thirdParty := newService(c, "third", locator) - - // firstParty mints a macaroon with two third party caveats - // with overlapping attributes. - m, err := firstParty.NewMacaroon("", nil, []checkers.Caveat{ - checkers.NeedDeclaredCaveat(checkers.Caveat{ - Location: "third", - Condition: "x", - }, "foo", "bar"), - checkers.NeedDeclaredCaveat(checkers.Caveat{ - Location: "third", - Condition: "y", - }, "bar", "baz"), - }) - c.Assert(err, gc.IsNil) - - // The client asks for a discharge macaroon for each third party caveat. - // Since no declarations are added by the discharger, - d, err := bakery.DischargeAll(m, func(_ string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { - return thirdParty.Discharge(bakery.ThirdPartyCheckerFunc(func(_, caveat string) ([]checkers.Caveat, error) { - return nil, nil - }), cav.Id) - }) - c.Assert(err, gc.IsNil) - declared := checkers.InferDeclared(d) - c.Assert(declared, gc.DeepEquals, checkers.Declared{ - "foo": "", - "bar": "", - "baz": "", - }) - err = firstParty.Check(d, checkers.New(declared)) - c.Assert(err, gc.IsNil) - - // If they return conflicting values, the discharge fails. - // The client asks for a discharge macaroon for each third party caveat. - // Since no declarations are added by the discharger, - d, err = bakery.DischargeAll(m, func(_ string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { - return thirdParty.Discharge(bakery.ThirdPartyCheckerFunc(func(_, caveat string) ([]checkers.Caveat, error) { - switch caveat { - case "x": - return []checkers.Caveat{ - checkers.DeclaredCaveat("foo", "fooval1"), - }, nil - case "y": - return []checkers.Caveat{ - checkers.DeclaredCaveat("foo", "fooval2"), - checkers.DeclaredCaveat("baz", "bazval"), - }, nil - } - return nil, fmt.Errorf("no matched") - }), cav.Id) - }) - c.Assert(err, gc.IsNil) - declared = checkers.InferDeclared(d) - c.Assert(declared, gc.DeepEquals, checkers.Declared{ - "bar": "", - "baz": "bazval", - }) - err = firstParty.Check(d, checkers.New(declared)) - c.Assert(err, gc.ErrorMatches, `verification failed: caveat "declared foo fooval1" not satisfied: got foo=null, expected "fooval1"`) -} - -func newService(c *gc.C, location string, locator bakery.PublicKeyLocatorMap) *bakery.Service { - keyPair, err := bakery.GenerateKey() - c.Assert(err, gc.IsNil) - - svc, err := bakery.NewService(bakery.NewServiceParams{ - Location: location, - Key: keyPair, - Locator: locator, - }) - c.Assert(err, gc.IsNil) - if locator != nil { - locator[location] = &keyPair.Public - } - return svc -} - -type strcmpChecker string - -func (c strcmpChecker) CheckFirstPartyCaveat(caveat string) error { - if caveat != string(c) { - return fmt.Errorf("%v doesn't match %s", caveat, c) - } - return nil -} - -func (c strcmpChecker) CheckThirdPartyCaveat(caveatId string, caveat string) ([]checkers.Caveat, error) { - if caveat != string(c) { - return nil, fmt.Errorf("%v doesn't match %s", caveat, c) - } - return nil, nil -} - -type thirdPartyCheckerWithCaveats []checkers.Caveat - -func (c thirdPartyCheckerWithCaveats) CheckThirdPartyCaveat(caveatId string, caveat string) ([]checkers.Caveat, error) { - return c, nil -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/storage.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/storage.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/storage.go 1970-01-01 00:00:00 +0000 @@ -1,100 +0,0 @@ -package bakery - -import ( - "encoding/json" - "errors" - "fmt" - "sync" -) - -// Storage defines storage for macaroons. -// Calling its methods concurrently is allowed. -type Storage interface { - // Put stores the item at the given location, overwriting - // any item that might already be there. - // TODO(rog) would it be better to lose the overwrite - // semantics? - Put(location string, item string) error - - // Get retrieves an item from the given location. - // If the item is not there, it returns ErrNotFound. - Get(location string) (item string, err error) - - // Del deletes the item from the given location. - Del(location string) error -} - -var ErrNotFound = errors.New("item not found") - -// NewMemStorage returns an implementation of Storage -// that stores all items in memory. -func NewMemStorage() Storage { - return &memStorage{ - values: make(map[string]string), - } -} - -type memStorage struct { - mu sync.Mutex - values map[string]string -} - -func (s *memStorage) Put(location, item string) error { - logger.Infof("storage.Put[%q] %q", location, item) - s.mu.Lock() - defer s.mu.Unlock() - s.values[location] = item - return nil -} - -func (s *memStorage) Get(location string) (string, error) { - s.mu.Lock() - defer s.mu.Unlock() - item, ok := s.values[location] - if !ok { - logger.Infof("storage.Get[%q] -> not found", location) - return "", ErrNotFound - } - logger.Infof("storage.Get[%q] -> %q", location, item) - return item, nil -} - -func (s *memStorage) Del(location string) error { - s.mu.Lock() - defer s.mu.Unlock() - delete(s.values, location) - return nil -} - -// storageItem is the format used to store items in -// the store. -type storageItem struct { - RootKey []byte -} - -// storage is a thin wrapper around Storage that -// converts to and from StorageItems in its -// Put and Get methods. -type storage struct { - store Storage -} - -func (s storage) Get(location string) (*storageItem, error) { - itemStr, err := s.store.Get(location) - if err != nil { - return nil, err - } - var item storageItem - if err := json.Unmarshal([]byte(itemStr), &item); err != nil { - return nil, fmt.Errorf("badly formatted item in store: %v", err) - } - return &item, nil -} - -func (s storage) Put(location string, item *storageItem) error { - data, err := json.Marshal(item) - if err != nil { - panic(fmt.Errorf("cannot marshal storage item: %v", err)) - } - return s.store.Put(location, string(data)) -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakery/storage_test.go' --- src/gopkg.in/macaroon-bakery.v0/bakery/storage_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakery/storage_test.go 1970-01-01 00:00:00 +0000 @@ -1,62 +0,0 @@ -package bakery_test - -import ( - "fmt" - - gc "gopkg.in/check.v1" - - "gopkg.in/macaroon-bakery.v0/bakery" -) - -type StorageSuite struct{} - -var _ = gc.Suite(&StorageSuite{}) - -func (*StorageSuite) TestMemStorage(c *gc.C) { - store := bakery.NewMemStorage() - err := store.Put("foo", "bar") - c.Assert(err, gc.IsNil) - item, err := store.Get("foo") - c.Assert(err, gc.IsNil) - c.Assert(item, gc.Equals, "bar") - - err = store.Put("bletch", "blat") - c.Assert(err, gc.IsNil) - item, err = store.Get("bletch") - c.Assert(err, gc.IsNil) - c.Assert(item, gc.Equals, "blat") - - item, err = store.Get("nothing") - c.Assert(err, gc.Equals, bakery.ErrNotFound) - c.Assert(item, gc.Equals, "") - - err = store.Del("bletch") - c.Assert(err, gc.IsNil) - - item, err = store.Get("bletch") - c.Assert(err, gc.Equals, bakery.ErrNotFound) - c.Assert(item, gc.Equals, "") -} - -func (*StorageSuite) TestConcurrentMemStorage(c *gc.C) { - // If locking is not done right, this test will - // definitely trigger the race detector. - done := make(chan struct{}) - store := bakery.NewMemStorage() - for i := 0; i < 3; i++ { - i := i - go func() { - k := fmt.Sprint(i) - err := store.Put(k, k) - c.Check(err, gc.IsNil) - v, err := store.Get(k) - c.Check(v, gc.Equals, k) - err = store.Del(k) - c.Check(err, gc.IsNil) - done <- struct{}{} - }() - } - for i := 0; i < 3; i++ { - <-done - } -} === removed directory 'src/gopkg.in/macaroon-bakery.v0/bakerytest' === removed file 'src/gopkg.in/macaroon-bakery.v0/bakerytest/bakerytest.go' --- src/gopkg.in/macaroon-bakery.v0/bakerytest/bakerytest.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakerytest/bakerytest.go 1970-01-01 00:00:00 +0000 @@ -1,76 +0,0 @@ -// Package bakerytest provides test helper functions for -// the bakery. -package bakerytest - -import ( - "net/http" - "net/http/httptest" - - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" - "gopkg.in/macaroon-bakery.v0/httpbakery" -) - -// Discharger is a third-party caveat discharger suitable -// for testing. It listens on a local network port for -// discharge requests. It should be shut down by calling -// Close when done with. -type Discharger struct { - Service *bakery.Service - - server *httptest.Server -} - -// NewDischarger returns a new third party caveat discharger -// which uses the given function to check caveats. -// The cond and arg arguments to the function are as returned -// by checkers.ParseCaveat. -// -// If locator is non-nil, it will be used to find public keys -// for any third party caveats returned by the checker. -func NewDischarger( - locator bakery.PublicKeyLocator, - checker func(req *http.Request, cond, arg string) ([]checkers.Caveat, error), -) *Discharger { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - svc, err := bakery.NewService(bakery.NewServiceParams{ - Location: server.URL, - Locator: locator, - }) - if err != nil { - panic(err) - } - checker1 := func(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { - cond, arg, err := checkers.ParseCaveat(cav) - if err != nil { - return nil, err - } - return checker(req, cond, arg) - } - httpbakery.AddDischargeHandler(mux, "/", svc, checker1) - return &Discharger{ - Service: svc, - server: server, - } -} - -// Close shuts down the server. -func (d *Discharger) Close() { - d.server.Close() -} - -// Location returns the location of the discharger, suitable -// for setting as the location in a third party caveat. -// This will be the URL of the server. -func (d *Discharger) Location() string { - return d.Service.Location() -} - -// PublicKeyForLocation implements bakery.PublicKeyLocator. -func (d *Discharger) PublicKeyForLocation(loc string) (*bakery.PublicKey, error) { - if loc == d.Location() { - return d.Service.PublicKey(), nil - } - return nil, bakery.ErrNotFound -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakerytest/bakerytest_test.go' --- src/gopkg.in/macaroon-bakery.v0/bakerytest/bakerytest_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakerytest/bakerytest_test.go 1970-01-01 00:00:00 +0000 @@ -1,109 +0,0 @@ -package bakerytest_test - -import ( - "fmt" - "net/http" - "net/url" - - gc "gopkg.in/check.v1" - - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" - "gopkg.in/macaroon-bakery.v0/bakerytest" - "gopkg.in/macaroon-bakery.v0/httpbakery" -) - -type suite struct { - httpClient *http.Client -} - -func (s *suite) SetUpTest(c *gc.C) { - s.httpClient = httpbakery.NewHTTPClient() -} - -var _ = gc.Suite(&suite{}) - -func noCaveatChecker(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { - return nil, nil -} - -func (s *suite) TestDischargerSimple(c *gc.C) { - d := bakerytest.NewDischarger(nil, noCaveatChecker) - defer d.Close() - - svc, err := bakery.NewService(bakery.NewServiceParams{ - Location: "here", - Locator: d, - }) - c.Assert(err, gc.IsNil) - m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ - Location: d.Location(), - Condition: "something", - }}) - c.Assert(err, gc.IsNil) - ms, err := httpbakery.DischargeAll(m, s.httpClient, noInteraction) - c.Assert(err, gc.IsNil) - c.Assert(ms, gc.HasLen, 2) - - err = svc.Check(ms, failChecker) - c.Assert(err, gc.IsNil) -} - -var failChecker = bakery.FirstPartyCheckerFunc(func(s string) error { - return fmt.Errorf("fail %s", s) -}) - -func (s *suite) TestDischargerTwoLevels(c *gc.C) { - d1checker := func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { - if cond != "xtrue" { - return nil, fmt.Errorf("caveat refused") - } - return nil, nil - } - d1 := bakerytest.NewDischarger(nil, d1checker) - defer d1.Close() - d2checker := func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { - return []checkers.Caveat{{ - Location: d1.Location(), - Condition: "x" + cond, - }}, nil - } - d2 := bakerytest.NewDischarger(d1, d2checker) - defer d2.Close() - locator := bakery.PublicKeyLocatorMap{ - d1.Location(): d1.Service.PublicKey(), - d2.Location(): d2.Service.PublicKey(), - } - c.Logf("map: %s", locator) - svc, err := bakery.NewService(bakery.NewServiceParams{ - Location: "here", - Locator: locator, - }) - c.Assert(err, gc.IsNil) - m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ - Location: d2.Location(), - Condition: "true", - }}) - c.Assert(err, gc.IsNil) - - ms, err := httpbakery.DischargeAll(m, s.httpClient, noInteraction) - c.Assert(err, gc.IsNil) - c.Assert(ms, gc.HasLen, 3) - - err = svc.Check(ms, failChecker) - c.Assert(err, gc.IsNil) - - err = svc.AddCaveat(m, checkers.Caveat{ - Location: d2.Location(), - Condition: "nope", - }) - c.Assert(err, gc.IsNil) - - ms, err = httpbakery.DischargeAll(m, s.httpClient, noInteraction) - c.Assert(err, gc.ErrorMatches, `cannot get discharge from "http://[^"]*": third party refused discharge: cannot discharge: caveat refused`) - c.Assert(ms, gc.HasLen, 0) -} - -func noInteraction(*url.URL) error { - return fmt.Errorf("unexpected interaction required") -} === removed file 'src/gopkg.in/macaroon-bakery.v0/bakerytest/package_test.go' --- src/gopkg.in/macaroon-bakery.v0/bakerytest/package_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/bakerytest/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,11 +0,0 @@ -package bakerytest_test - -import ( - "testing" - - gc "gopkg.in/check.v1" -) - -func TestPackage(t *testing.T) { - gc.TestingT(t) -} === removed directory 'src/gopkg.in/macaroon-bakery.v0/cmd' === removed directory 'src/gopkg.in/macaroon-bakery.v0/cmd/bakery-keygen' === removed file 'src/gopkg.in/macaroon-bakery.v0/cmd/bakery-keygen/main.go' --- src/gopkg.in/macaroon-bakery.v0/cmd/bakery-keygen/main.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/cmd/bakery-keygen/main.go 1970-01-01 00:00:00 +0000 @@ -1,22 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "os" - - "gopkg.in/macaroon-bakery.v0/bakery" -) - -func main() { - kp, err := bakery.GenerateKey() - if err != nil { - fmt.Fprintf(os.Stderr, "cannot generate key: %s\n", err) - os.Exit(1) - } - b, err := json.MarshalIndent(kp, "", "\t") - if err != nil { - panic(err) - } - fmt.Printf("%s\n", b) -} === removed file 'src/gopkg.in/macaroon-bakery.v0/dependencies.tsv' --- src/gopkg.in/macaroon-bakery.v0/dependencies.tsv 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/dependencies.tsv 1970-01-01 00:00:00 +0000 @@ -1,11 +0,0 @@ -github.com/juju/errors git 036046bfdccf6f576e2e5dec7f7878597bcaebe7 2015-02-11T20:59:49Z -github.com/juju/loggo git dc8e19f7c70a62a59c69c40f85b8df09ff20742c 2014-11-17T04:05:26Z -github.com/juju/testing git b93c38205ac2e3817b7b1d9d60e89e69c99a6362 2015-03-16T13:08:43Z -github.com/juju/utils git a90aa2e02b9e7fe354ab816e05b1e0a77f27242d 2015-02-23T16:02:32Z -golang.org/x/crypto git 4ed45ec682102c643324fae5dff8dab085b6c300 2015-01-12T22:01:33Z -golang.org/x/net git 7dbad50ab5b31073856416cdcfeb2796d682f844 2015-03-20T03:46:21Z -gopkg.in/check.v1 git 64131543e7896d5bcc6bd5a76287eb75ea96c673 2014-10-24T13:38:53Z -gopkg.in/errgo.v1 git 81357a83344ddd9f7772884874e5622c2a3da21c 2014-10-13T17:33:38Z -gopkg.in/macaroon.v1 git ab3940c6c16510a850e1c2dd628b919f0f3f1464 2015-01-21T11:42:31Z -gopkg.in/mgo.v2 git c6a7dce14133ccac2dcac3793f1d6e2ef048503a 2015-01-24T11:37:54Z -gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z === removed directory 'src/gopkg.in/macaroon-bakery.v0/httpbakery' === removed file 'src/gopkg.in/macaroon-bakery.v0/httpbakery/browser.go' --- src/gopkg.in/macaroon-bakery.v0/httpbakery/browser.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/httpbakery/browser.go 1970-01-01 00:00:00 +0000 @@ -1,61 +0,0 @@ -package httpbakery - -import ( - "fmt" - "net/url" - "os" - "os/exec" - "runtime" - "strings" -) - -var browser = map[string]string{ - "linux": "sensible-browser", - "darwin": "open", - "freebsd": "xdg-open", - "netbsd": "xdg-open", - "openbsd": "xdg-open", -} - -// OpenWebBrowser opens a web browser at the -// given URL. If the OS is not recognised, the URL -// is just printed to standard output. -func OpenWebBrowser(url *url.URL) error { - var args []string - if runtime.GOOS == "windows" { - // Windows is special because the start command is - // built into cmd.exe and hence requires the argument - // to be quoted. - args = []string{"cmd", "/c", "start", winCmdQuote.Replace(url.String())} - } else if b := browser[runtime.GOOS]; b != "" { - args = []string{b, url.String()} - } - if args != nil { - fmt.Fprintf(os.Stderr, "Opening an authorization web page in your browser.\n") - fmt.Fprintf(os.Stderr, "If it does not open, please open this URL:\n%s\n", url) - cmd := exec.Command(args[0], args[1:]...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Start() - go cmd.Wait() - } else { - fmt.Fprintf(os.Stderr, "Please open this URL in your browser to authorize:\n%s\n", url) - } - return nil -} - -// winCmdQuote can quote metacharacters special to the Windows -// cmd.exe command interpreter. It does that by inserting -// a '^' character before each metacharacter. Note that -// most of these cannot actually be produced by URL.String, -// but we include them for completeness. -var winCmdQuote = strings.NewReplacer( - "&", "^&", - "%", "^%", - "(", "^(", - ")", "^)", - "^", "^^", - "<", "^<", - ">", "^>", - "|", "^|", -) === removed file 'src/gopkg.in/macaroon-bakery.v0/httpbakery/checkers.go' --- src/gopkg.in/macaroon-bakery.v0/httpbakery/checkers.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/httpbakery/checkers.go 1970-01-01 00:00:00 +0000 @@ -1,70 +0,0 @@ -package httpbakery - -import ( - "net" - "net/http" - - "gopkg.in/errgo.v1" - - "gopkg.in/macaroon-bakery.v0/bakery/checkers" -) - -type httpContext struct { - req *http.Request -} - -// Checkers implements the standard HTTP-request checkers. -// It does not include the "declared" checker, as that -// must be added for each individual set of macaroons -// that are checked. -func Checkers(req *http.Request) checkers.Checker { - c := httpContext{req} - return checkers.Map{ - checkers.CondClientIPAddr: c.clientIPAddr, - } -} - -// clientIPAddr implements the IP client address checker -// for an HTTP request. -func (c httpContext) clientIPAddr(_, addr string) error { - ip := net.ParseIP(addr) - if ip == nil { - return errgo.Newf("cannot parse IP address in caveat") - } - if c.req.RemoteAddr == "" { - return errgo.Newf("client has no remote address") - } - reqIP, err := requestIPAddr(c.req) - if err != nil { - return errgo.Mask(err) - } - if !reqIP.Equal(ip) { - return errgo.Newf("client IP address mismatch, got %s", reqIP) - } - return nil -} - -// SameClientIPAddrCaveat returns a caveat that will check that -// the remote IP address is the same as that in the given HTTP request. -func SameClientIPAddrCaveat(req *http.Request) checkers.Caveat { - if req.RemoteAddr == "" { - return checkers.ErrorCaveatf("client has no remote IP address") - } - ip, err := requestIPAddr(req) - if err != nil { - return checkers.ErrorCaveatf("%v", err) - } - return checkers.ClientIPAddrCaveat(ip) -} - -func requestIPAddr(req *http.Request) (net.IP, error) { - reqHost, _, err := net.SplitHostPort(req.RemoteAddr) - if err != nil { - return nil, errgo.Newf("cannot parse host port in remote address: %v", err) - } - ip := net.ParseIP(reqHost) - if ip == nil { - return nil, errgo.Newf("invalid IP address in remote address %q", req.RemoteAddr) - } - return ip, nil -} === removed file 'src/gopkg.in/macaroon-bakery.v0/httpbakery/checkers_test.go' --- src/gopkg.in/macaroon-bakery.v0/httpbakery/checkers_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/httpbakery/checkers_test.go 1970-01-01 00:00:00 +0000 @@ -1,148 +0,0 @@ -package httpbakery_test - -import ( - "net" - "net/http" - - gc "gopkg.in/check.v1" - "gopkg.in/errgo.v1" - - "gopkg.in/macaroon-bakery.v0/bakery/checkers" - "gopkg.in/macaroon-bakery.v0/httpbakery" -) - -type CheckersSuite struct{} - -var _ = gc.Suite(&CheckersSuite{}) - -type checkTest struct { - caveat string - expectError string - expectCause func(err error) bool -} - -var isCaveatNotRecognized = errgo.Is(checkers.ErrCaveatNotRecognized) - -var checkerTests = []struct { - about string - checker checkers.Checker - checks []checkTest -}{{ - about: "no host name declared", - checker: checkers.New(httpbakery.Checkers(&http.Request{})), - checks: []checkTest{{ - caveat: checkers.ClientIPAddrCaveat(net.IP{0, 0, 0, 0}).Condition, - expectError: `caveat "client-ip-addr 0.0.0.0" not satisfied: client has no remote address`, - }, { - caveat: checkers.ClientIPAddrCaveat(net.IP{127, 0, 0, 1}).Condition, - expectError: `caveat "client-ip-addr 127.0.0.1" not satisfied: client has no remote address`, - }, { - caveat: "client-ip-addr badip", - expectError: `caveat "client-ip-addr badip" not satisfied: cannot parse IP address in caveat`, - }}, -}, { - about: "IPv4 host name declared", - checker: checkers.New(httpbakery.Checkers(&http.Request{ - RemoteAddr: "127.0.0.1:1234", - })), - checks: []checkTest{{ - caveat: checkers.ClientIPAddrCaveat(net.IP{127, 0, 0, 1}).Condition, - }, { - caveat: checkers.ClientIPAddrCaveat(net.IP{127, 0, 0, 1}.To16()).Condition, - }, { - caveat: "client-ip-addr ::ffff:7f00:1", - }, { - caveat: checkers.ClientIPAddrCaveat(net.IP{127, 0, 0, 2}).Condition, - expectError: `caveat "client-ip-addr 127.0.0.2" not satisfied: client IP address mismatch, got 127.0.0.1`, - }, { - caveat: checkers.ClientIPAddrCaveat(net.ParseIP("2001:4860:0:2001::68")).Condition, - expectError: `caveat "client-ip-addr 2001:4860:0:2001::68" not satisfied: client IP address mismatch, got 127.0.0.1`, - }}, -}, { - about: "IPv6 host name declared", - checker: checkers.New(httpbakery.Checkers(&http.Request{ - RemoteAddr: "[2001:4860:0:2001::68]:1234", - })), - checks: []checkTest{{ - caveat: checkers.ClientIPAddrCaveat(net.ParseIP("2001:4860:0:2001::68")).Condition, - }, { - caveat: "client-ip-addr 2001:4860:0:2001:0::68", - }, { - caveat: checkers.ClientIPAddrCaveat(net.ParseIP("2001:4860:0:2001::69")).Condition, - expectError: `caveat "client-ip-addr 2001:4860:0:2001::69" not satisfied: client IP address mismatch, got 2001:4860:0:2001::68`, - }, { - caveat: checkers.ClientIPAddrCaveat(net.ParseIP("127.0.0.1")).Condition, - expectError: `caveat "client-ip-addr 127.0.0.1" not satisfied: client IP address mismatch, got 2001:4860:0:2001::68`, - }}, -}, { - about: "same client address, ipv4 request address", - checker: checkers.New(httpbakery.Checkers(&http.Request{ - RemoteAddr: "127.0.0.1:1324", - })), - checks: []checkTest{{ - caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ - RemoteAddr: "127.0.0.1:1234", - }).Condition, - }, { - caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ - RemoteAddr: "[::ffff:7f00:1]:1235", - }).Condition, - }, { - caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ - RemoteAddr: "127.0.0.2:1234", - }).Condition, - expectError: `caveat "client-ip-addr 127.0.0.2" not satisfied: client IP address mismatch, got 127.0.0.1`, - }, { - caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ - RemoteAddr: "[::ffff:7f00:2]:1235", - }).Condition, - expectError: `caveat "client-ip-addr 127.0.0.2" not satisfied: client IP address mismatch, got 127.0.0.1`, - }, { - caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{}).Condition, - expectError: `caveat "error client has no remote IP address" not satisfied: bad caveat`, - }, { - caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ - RemoteAddr: "bad", - }).Condition, - expectError: `caveat "error cannot parse host port in remote address: missing port in address bad" not satisfied: bad caveat`, - }, { - caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ - RemoteAddr: "bad:56", - }).Condition, - expectError: `caveat "error invalid IP address in remote address \\"bad:56\\"" not satisfied: bad caveat`, - }}, -}, { - about: "same client address, ipv6 request address", - checker: checkers.New(httpbakery.Checkers(&http.Request{ - RemoteAddr: "[2001:4860:0:2001:0::68]:1235", - })), - checks: []checkTest{{ - caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ - RemoteAddr: "[2001:4860:0:2001:0::68]:1234", - }).Condition, - }, { - caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ - RemoteAddr: "127.0.0.2:1234", - }).Condition, - expectError: `caveat "client-ip-addr 127.0.0.2" not satisfied: client IP address mismatch, got 2001:4860:0:2001::68`, - }}, -}} - -func (s *CheckersSuite) TestCheckers(c *gc.C) { - for i, test := range checkerTests { - c.Logf("test %d: %s", i, test.about) - for j, check := range test.checks { - c.Logf("\tcheck %d", j) - err := checkers.New(test.checker).CheckFirstPartyCaveat(check.caveat) - if check.expectError != "" { - c.Assert(err, gc.ErrorMatches, check.expectError) - if check.expectCause == nil { - check.expectCause = errgo.Any - } - c.Assert(check.expectCause(errgo.Cause(err)), gc.Equals, true) - } else { - c.Assert(err, gc.IsNil) - } - } - } -} === removed file 'src/gopkg.in/macaroon-bakery.v0/httpbakery/client.go' --- src/gopkg.in/macaroon-bakery.v0/httpbakery/client.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/httpbakery/client.go 1970-01-01 00:00:00 +0000 @@ -1,565 +0,0 @@ -package httpbakery - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/cookiejar" - "net/url" - "strings" - - "github.com/juju/loggo" - "golang.org/x/net/publicsuffix" - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v1" - - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" -) - -var logger = loggo.GetLogger("httpbakery") - -// DischargeError represents the error when a third party discharge -// is refused by a server. -type DischargeError struct { - // Reason holds the underlying remote error that caused the - // discharge to fail. - Reason *Error -} - -func (e *DischargeError) Error() string { - return fmt.Sprintf("third party refused discharge: %v", e.Reason) -} - -// IsDischargeError reports whether err is a *DischargeError. -func IsDischargeError(err error) bool { - _, ok := err.(*DischargeError) - return ok -} - -// InteractionError wraps an error returned by a call to visitWebPage. -type InteractionError struct { - // Reason holds the actual error returned from visitWebPage. - Reason error -} - -func (e *InteractionError) Error() string { - return fmt.Sprintf("cannot start interactive session: %v", e.Reason) -} - -// IsInteractionError reports whether err is a *DischargeError. -func IsInteractionError(err error) bool { - _, ok := err.(*InteractionError) - return ok -} - -// NewHTTPClient returns an http.Client that ensures -// that headers are sent to the server even when the -// server redirects a GET request. The returned client -// also contains an empty in-memory cookie jar. -// -// See https://github.com/golang/go/issues/4677 -func NewHTTPClient() *http.Client { - c := *http.DefaultClient - c.CheckRedirect = func(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return fmt.Errorf("too many redirects") - } - if len(via) == 0 { - return nil - } - for attr, val := range via[0].Header { - if _, ok := req.Header[attr]; !ok { - req.Header[attr] = val - } - } - return nil - } - jar, err := cookiejar.New(&cookiejar.Options{ - PublicSuffixList: publicsuffix.List, - }) - if err != nil { - panic(err) - } - c.Jar = &cookieLogger{jar} - return &c -} - -// WaitResponse holds the type that should be returned -// by an HTTP response made to a WaitURL -// (See the ErrorInfo type). -type WaitResponse struct { - Macaroon *macaroon.Macaroon -} - -// Do makes an http request to the given client. -// If the request fails with a discharge-required error, -// any required discharge macaroons will be acquired, -// and the request will be repeated with those attached. -// -// If the required discharges were refused by a third -// party, an error with a *DischargeError cause will be returned. -// -// Note that because the request may be retried, no -// body may be provided in the http request (otherwise -// the contents will be lost when retrying). For requests -// with a body (for example PUT or POST methods), -// use DoWithBody instead. -// -// If the client.Jar field is non-nil, the macaroons will be -// stored there and made available to subsequent requests. -// -// If interaction is required by the user, the visitWebPage -// function is called with a URL to be opened in a -// web browser. If visitWebPage returns an error, -// an error with a *InteractionError cause will be returned. -// See OpenWebBrowser for a possible implementation -// of visitWebPage. -func Do(client *http.Client, req *http.Request, visitWebPage func(url *url.URL) error) (*http.Response, error) { - if req.Body != nil { - return nil, fmt.Errorf("body unexpectedly provided in request - use DoWithBody") - } - return DoWithBody(client, req, noBody, visitWebPage) -} - -func noBody() (io.ReadCloser, error) { - return nil, nil -} - -// DoWithBody is like Do except that the given getBody function is -// called to obtain the body for the HTTP request. Any returned body -// will be closed after each request is made. -func DoWithBody(client *http.Client, req *http.Request, getBody BodyGetter, visitWebPage func(url *url.URL) error) (*http.Response, error) { - // Add a temporary cookie jar (without mutating the original - // client) if there isn't one available. - if client.Jar == nil { - client1 := *client - jar, err := cookiejar.New(&cookiejar.Options{ - PublicSuffixList: publicsuffix.List, - }) - if err != nil { - return nil, errgo.Notef(err, "cannot make cookie jar") - } - client1.Jar = jar - client = &client1 - } - ctxt := &clientContext{ - client: client, - visitWebPage: visitWebPage, - } - return ctxt.do(req, getBody) -} - -// DischargeAll attempts to acquire discharge macaroons for all the -// third party caveats in m, and returns a slice containing all of them -// bound to m. -// -// If the discharge fails because a third party refuses to discharge a -// caveat, the returned error will have a cause of type *DischargeError. -// If the discharge fails because visitWebPage returns an error, -// the returned error will have a cause of *InteractionError. -// -// The returned macaroon slice will not be stored in the client cookie -// jar (see SetCookie if you need to do that). -func DischargeAll(m *macaroon.Macaroon, client *http.Client, visitWebPage func(url *url.URL) error) (macaroon.Slice, error) { - ctxt := &clientContext{ - client: client, - visitWebPage: visitWebPage, - } - return bakery.DischargeAll(m, ctxt.obtainThirdPartyDischarge) -} - -// PublicKeyForLocation returns the public key from a macaroon -// discharge server running at the given location URL. -// Note that this is insecure if an http: URL scheme is used. -func PublicKeyForLocation(client *http.Client, url string) (*bakery.PublicKey, error) { - url = url + "/publickey" - resp, err := client.Get(url) - if err != nil { - return nil, errgo.Notef(err, "cannot get public key from %q", url) - } - if resp.StatusCode != http.StatusOK { - return nil, errgo.Newf("cannot get public key from %q: got status %s", url, resp.Status) - } - defer resp.Body.Close() - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, errgo.Notef(err, "failed to read response body from %q", url) - } - var pubkey struct { - PublicKey *bakery.PublicKey - } - err = json.Unmarshal(data, &pubkey) - if err != nil { - return nil, errgo.Notef(err, "failed to decode response from %q", url) - } - return pubkey.PublicKey, nil -} - -type clientContext struct { - client *http.Client - visitWebPage func(*url.URL) error -} - -// relativeURL returns newPath relative to an original URL. -func relativeURL(base, new string) (*url.URL, error) { - if new == "" { - return nil, errgo.Newf("empty URL") - } - baseURL, err := url.Parse(base) - if err != nil { - return nil, errgo.Notef(err, "cannot parse URL") - } - newURL, err := url.Parse(new) - if err != nil { - return nil, errgo.Notef(err, "cannot parse URL") - } - return baseURL.ResolveReference(newURL), nil -} - -func (ctxt *clientContext) do(req *http.Request, getBody BodyGetter) (*http.Response, error) { - logger.Debugf("client do %s %s {", req.Method, req.URL) - resp, err := ctxt.do1(req, getBody) - logger.Debugf("} -> error %#v", err) - return resp, err -} - -func (ctxt *clientContext) do1(req *http.Request, getBody BodyGetter) (*http.Response, error) { - if err := ctxt.setRequestBody(req, getBody); err != nil { - return nil, errgo.Mask(err) - } - httpResp, err := ctxt.client.Do(req) - if err != nil { - return nil, err - } - if httpResp.StatusCode != http.StatusProxyAuthRequired { - return httpResp, nil - } - if httpResp.Header.Get("Content-Type") != "application/json" { - return httpResp, nil - } - defer httpResp.Body.Close() - - var resp Error - if err := json.NewDecoder(httpResp.Body).Decode(&resp); err != nil { - return nil, errgo.Notef(err, "cannot unmarshal error response") - } - if resp.Code != ErrDischargeRequired { - return nil, errgo.NoteMask(&resp, fmt.Sprintf("%s %s failed", req.Method, req.URL), errgo.Any) - } - if resp.Info == nil || resp.Info.Macaroon == nil { - return nil, errgo.New("no macaroon found in response") - } - mac := resp.Info.Macaroon - macaroons, err := bakery.DischargeAll(mac, ctxt.obtainThirdPartyDischarge) - if err != nil { - return nil, errgo.Mask(err, errgo.Any) - } - cookieURL := req.URL - if path := resp.Info.MacaroonPath; path != "" { - relURL, err := parseURLPath(path) - if err != nil { - logger.Warningf("ignoring invalid path in discharge-required response: %v", err) - } else { - cookieURL = req.URL.ResolveReference(relURL) - } - } - if err := SetCookie(ctxt.client.Jar, cookieURL, macaroons); err != nil { - return nil, errgo.Notef(err, "cannot set cookie") - } - if err := ctxt.setRequestBody(req, getBody); err != nil { - return nil, errgo.Mask(err) - } - // Try again with our newly acquired discharge macaroons - hresp, err := ctxt.client.Do(req) - if err != nil { - return nil, errgo.Mask(err, errgo.Any) - } - return hresp, nil -} - -func parseURLPath(path string) (*url.URL, error) { - u, err := url.Parse(path) - if err != nil { - return nil, errgo.Mask(err) - } - if u.Scheme != "" || - u.Opaque != "" || - u.User != nil || - u.Host != "" || - u.RawQuery != "" || - u.Fragment != "" { - return nil, errgo.Newf("URL path %q is not clean", path) - } - return u, nil -} - -func (ctxt *clientContext) setRequestBody(req *http.Request, getBody BodyGetter) error { - body, err := getBody() - if err != nil { - return errgo.Notef(err, "cannot get request body") - } - req.Body = body - return nil -} - -// NewCookie takes a slice of macaroons and returns them -// encoded as a cookie. The slice should contain a single primary -// macaroon in its first element, and any discharges after that. -func NewCookie(ms macaroon.Slice) (*http.Cookie, error) { - if len(ms) == 0 { - return nil, errgo.New("no macaroons in cookie") - } - data, err := json.Marshal(ms) - if err != nil { - return nil, errgo.Notef(err, "cannot marshal macaroons") - } - return &http.Cookie{ - Name: fmt.Sprintf("macaroon-%x", ms[0].Signature()), - Value: base64.StdEncoding.EncodeToString(data), - // TODO(rog) other fields, particularly expiry time. - }, nil -} - -// SetCookie sets a cookie for the given URL on the given cookie jar -// that will holds the given macaroon slice. The macaroon slice should -// contain a single primary macaroon in its first element, and any -// discharges after that. -func SetCookie(jar http.CookieJar, url *url.URL, ms macaroon.Slice) error { - cookie, err := NewCookie(ms) - if err != nil { - return errgo.Mask(err) - } - // TODO verify that setting this for the URL makes it available - // to all paths under that URL. - jar.SetCookies(url, []*http.Cookie{cookie}) - return nil -} - -func (ctxt *clientContext) addCookie(req *http.Request, ms macaroon.Slice) error { - cookies, err := NewCookie(ms) - if err != nil { - return errgo.Mask(err) - } - // TODO should we set it for the URL only, or the host. - // Can we set cookies such that they'll always get sent to any - // URL on the given host? - ctxt.client.Jar.SetCookies(req.URL, []*http.Cookie{cookies}) - return nil -} - -func appendURLElem(u, elem string) string { - if strings.HasSuffix(u, "/") { - return u + elem - } - return u + "/" + elem -} - -func (ctxt *clientContext) obtainThirdPartyDischarge(originalLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { - var resp dischargeResponse - loc := appendURLElem(cav.Location, "discharge") - err := postFormJSON( - loc, - url.Values{ - "id": {cav.Id}, - "location": {originalLocation}, - }, - &resp, - ctxt.postForm, - ) - if err == nil { - return resp.Macaroon, nil - } - cause, ok := errgo.Cause(err).(*Error) - if !ok { - return nil, errgo.Notef(err, "cannot acquire discharge") - } - if cause.Code != ErrInteractionRequired { - return nil, &DischargeError{ - Reason: cause, - } - } - if cause.Info == nil { - return nil, errgo.Notef(err, "interaction-required response with no info") - } - m, err := ctxt.interact(loc, cause.Info.VisitURL, cause.Info.WaitURL) - if err != nil { - return nil, errgo.Mask(err, IsDischargeError, IsInteractionError) - } - return m, nil -} - -// interact gathers a macaroon by directing the user to interact -// with a web page. -func (ctxt *clientContext) interact(location, visitURLStr, waitURLStr string) (*macaroon.Macaroon, error) { - visitURL, err := relativeURL(location, visitURLStr) - if err != nil { - return nil, errgo.Notef(err, "cannot make relative visit URL") - } - waitURL, err := relativeURL(location, waitURLStr) - if err != nil { - return nil, errgo.Notef(err, "cannot make relative wait URL") - } - if err := ctxt.visitWebPage(visitURL); err != nil { - return nil, &InteractionError{ - Reason: err, - } - } - waitResp, err := ctxt.client.Get(waitURL.String()) - if err != nil { - return nil, errgo.Notef(err, "cannot get %q", waitURL) - } - defer waitResp.Body.Close() - if waitResp.StatusCode != http.StatusOK { - var resp Error - if err := json.NewDecoder(waitResp.Body).Decode(&resp); err != nil { - return nil, errgo.Notef(err, "cannot unmarshal wait error response") - } - return nil, errgo.WithCausef(nil, &DischargeError{ - Reason: &resp, - }, "failed to acquire macaroon after waiting") - } - var resp WaitResponse - if err := json.NewDecoder(waitResp.Body).Decode(&resp); err != nil { - return nil, errgo.Notef(err, "cannot unmarshal wait response") - } - if resp.Macaroon == nil { - return nil, errgo.New("no macaroon found in wait response") - } - return resp.Macaroon, nil -} - -func (ctxt *clientContext) postForm(url string, data url.Values) (*http.Response, error) { - getBody := SeekerBody(strings.NewReader(data.Encode())) - return ctxt.post(url, "application/x-www-form-urlencoded", getBody) -} - -// SeekerBody returns a body getter function suitable for -// passing to DoWithBody that always returns the given reader, -// first seeking to its start. -func SeekerBody(r io.ReadSeeker) BodyGetter { - rc := ioutil.NopCloser(r) - return func() (io.ReadCloser, error) { - if _, err := r.Seek(0, 0); err != nil { - return nil, errgo.Notef(err, "cannot seek") - } - return rc, nil - } -} - -type BodyGetter func() (io.ReadCloser, error) - -func (ctxt *clientContext) post(url string, bodyType string, getBody BodyGetter) (resp *http.Response, err error) { - req, err := http.NewRequest("POST", url, nil) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - // TODO(rog) see http.shouldRedirectPost - return ctxt.do(req, getBody) -} - -// postFormJSON does an HTTP POST request to the given url with the given -// values and unmarshals the response in the value pointed to be resp. -// It uses the given postForm function to actually make the POST request. -func postFormJSON(url string, vals url.Values, resp interface{}, postForm func(url string, vals url.Values) (*http.Response, error)) error { - logger.Debugf("postFormJSON to %s; vals: %#v", url, vals) - httpResp, err := postForm(url, vals) - if err != nil { - return errgo.NoteMask(err, fmt.Sprintf("cannot http POST to %q", url), errgo.Any) - } - defer httpResp.Body.Close() - data, err := ioutil.ReadAll(httpResp.Body) - if err != nil { - return errgo.Notef(err, "failed to read body from %q", url) - } - if httpResp.StatusCode != http.StatusOK { - var errResp Error - if err := json.Unmarshal(data, &errResp); err != nil { - // TODO better error here - return errgo.Notef(err, "POST %q failed with status %q; cannot parse body %q", url, httpResp.Status, data) - } - return &errResp - } - if err := json.Unmarshal(data, resp); err != nil { - return errgo.Notef(err, "cannot unmarshal response from %q", url) - } - return nil -} - -// RequestMacaroons returns any collections of macaroons from the cookies -// found in the request. By convention, each slice will contain a primary -// macaroon followed by its discharges. -func RequestMacaroons(req *http.Request) []macaroon.Slice { - var mss []macaroon.Slice - for _, cookie := range req.Cookies() { - if !strings.HasPrefix(cookie.Name, "macaroon-") { - continue - } - data, err := base64.StdEncoding.DecodeString(cookie.Value) - if err != nil { - logger.Errorf("cannot base64-decode cookie; ignoring: %v", err) - continue - } - var ms macaroon.Slice - if err := json.Unmarshal(data, &ms); err != nil { - logger.Errorf("cannot unmarshal macaroons from cookie; ignoring: %v", err) - continue - } - mss = append(mss, ms) - } - return mss -} - -func isVerificationError(err error) bool { - _, ok := err.(*bakery.VerificationError) - return ok -} - -// CheckRequest checks that the given http request contains at least one -// valid macaroon minted by the given service, using checker to check -// any first party caveats. It returns an error with a -// *bakery.VerificationError cause if the macaroon verification failed. -// -// The assert map holds any required attributes of "declared" attributes, -// overriding any inferences made from the macaroons themselves. -// It has a similar effect to adding a checkers.DeclaredCaveat -// for each key and value, but the error message will be more -// useful. -// -// It adds all the standard caveat checkers to the given checker. -// -// It returns any attributes declared in the successfully validated request. -func CheckRequest(svc *bakery.Service, req *http.Request, assert map[string]string, checker checkers.Checker) (map[string]string, error) { - mss := RequestMacaroons(req) - if len(mss) == 0 { - return nil, &bakery.VerificationError{ - Reason: errgo.Newf("no macaroon cookies in request"), - } - } - checker = checkers.New( - checker, - Checkers(req), - checkers.TimeBefore, - ) - attrs, err := svc.CheckAny(mss, assert, checker) - if err != nil { - return nil, errgo.Mask(err, isVerificationError) - } - return attrs, nil -} - -type cookieLogger struct { - http.CookieJar -} - -func (j *cookieLogger) SetCookies(u *url.URL, cookies []*http.Cookie) { - logger.Debugf("%p setting %d cookies for %s", j.CookieJar, len(cookies), u) - for i, c := range cookies { - logger.Debugf("\t%d. path %s; name %s", i, c.Path, c.Name) - } - j.CookieJar.SetCookies(u, cookies) -} === removed file 'src/gopkg.in/macaroon-bakery.v0/httpbakery/client_test.go' --- src/gopkg.in/macaroon-bakery.v0/httpbakery/client_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/httpbakery/client_test.go 1970-01-01 00:00:00 +0000 @@ -1,426 +0,0 @@ -package httpbakery_test - -import ( - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "strings" - - jujutesting "github.com/juju/testing" - "github.com/juju/utils/jsonhttp" - gc "gopkg.in/check.v1" - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v1" - - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" - "gopkg.in/macaroon-bakery.v0/bakerytest" - "gopkg.in/macaroon-bakery.v0/httpbakery" -) - -type ClientSuite struct { - jujutesting.LoggingSuite -} - -var _ = gc.Suite(&ClientSuite{}) - -// TestSingleServiceFirstParty creates a single service -// with a macaroon with one first party caveat. -// It creates a request with this macaroon and checks that the service -// can verify this macaroon as valid. -func (s *ClientSuite) TestSingleServiceFirstParty(c *gc.C) { - // Create a target service. - svc := newService("loc", nil) - // No discharge required, so pass "unknown" for the third party - // caveat discharger location so we know that we don't try - // to discharge the location. - ts := httptest.NewServer(serverHandler(svc, "unknown", nil)) - defer ts.Close() - - // Mint a macaroon for the target service. - serverMacaroon, err := svc.NewMacaroon("", nil, nil) - c.Assert(err, gc.IsNil) - c.Assert(serverMacaroon.Location(), gc.Equals, "loc") - err = svc.AddCaveat(serverMacaroon, checkers.Caveat{ - Condition: "is something", - }) - c.Assert(err, gc.IsNil) - - // Create a client request. - req, err := http.NewRequest("GET", ts.URL, nil) - c.Assert(err, gc.IsNil) - client := clientRequestWithCookies(c, ts.URL, macaroon.Slice{serverMacaroon}) - // Somehow the client has accquired the macaroon. Add it to the cookiejar in our request. - - // Make the request to the server. - resp, err := client.Do(req) - c.Assert(err, gc.IsNil) - defer resp.Body.Close() - assertResponse(c, resp, "done") -} - -func (s *ClientSuite) TestRepeatedRequestWithBody(c *gc.C) { - d := bakerytest.NewDischarger(nil, noCaveatChecker) - defer d.Close() - - // Create a target service. - svc := newService("loc", d) - - ts := httptest.NewServer(serverHandler(svc, d.Location(), nil)) - defer ts.Close() - - // Create a client request. - req, err := http.NewRequest("POST", ts.URL, nil) - c.Assert(err, gc.IsNil) - - // Make the request to the server. - - // First try with a body in the request, which should be denied - // because we must use DoWithBody. - req.Body = ioutil.NopCloser(strings.NewReader("postbody")) - resp, err := httpbakery.Do(httpbakery.NewHTTPClient(), req, noVisit) - c.Assert(err, gc.ErrorMatches, "body unexpectedly provided in request - use DoWithBody") - c.Assert(resp, gc.IsNil) - - // Then try with no authorization, so make sure that httpbakery.Do - // really will retry the request. - - req.Body = nil - - bodyText := "postbody" - bodyReader := &readCounter{ReadSeeker: strings.NewReader(bodyText)} - - resp, err = httpbakery.DoWithBody(httpbakery.NewHTTPClient(), req, httpbakery.SeekerBody(bodyReader), noVisit) - c.Assert(err, gc.IsNil) - defer resp.Body.Close() - assertResponse(c, resp, "done postbody") - - // Sanity check that the body really was read twice and hence - // that we are checking the logic we intend to check. - c.Assert(bodyReader.byteCount, gc.Equals, len(bodyText)*2) -} - -func (s *ClientSuite) TestDischargeServerWithMacaraqOnDischarge(c *gc.C) { - locator := bakery.NewPublicKeyRing() - - var called [3]int - - // create the services from leaf discharger to primary - // service so that each one can know the location - // to discharge at. - key2, h2 := newHTTPDischarger(locator, func(svc *bakery.Service, req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { - called[2]++ - if cav != "is-ok" { - return nil, fmt.Errorf("unrecognized caveat at srv2") - } - return nil, nil - }) - srv2 := httptest.NewServer(h2) - locator.AddPublicKeyForLocation(srv2.URL, true, key2) - - key1, h1 := newHTTPDischarger(locator, func(svc *bakery.Service, req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { - called[1]++ - if _, err := httpbakery.CheckRequest(svc, req, nil, checkers.New()); err != nil { - return nil, newDischargeRequiredError(svc, srv2.URL, nil, err) - } - if cav != "is-ok" { - return nil, fmt.Errorf("unrecognized caveat at srv1") - } - return nil, nil - }) - srv1 := httptest.NewServer(h1) - locator.AddPublicKeyForLocation(srv1.URL, true, key1) - - svc0 := newService("loc", locator) - srv0 := httptest.NewServer(serverHandler(svc0, srv1.URL, nil)) - - // Make a client request. - client := httpbakery.NewHTTPClient() - req, err := http.NewRequest("GET", srv0.URL, nil) - c.Assert(err, gc.IsNil) - resp, err := httpbakery.Do(client, req, noVisit) - c.Assert(err, gc.IsNil) - defer resp.Body.Close() - assertResponse(c, resp, "done") - - c.Assert(called, gc.DeepEquals, [3]int{0, 2, 1}) -} - -func newHTTPDischarger(locator bakery.PublicKeyLocator, checker func(svc *bakery.Service, req *http.Request, cavId, cav string) ([]checkers.Caveat, error)) (*bakery.PublicKey, http.Handler) { - svc := newService("loc", locator) - mux := http.NewServeMux() - httpbakery.AddDischargeHandler(mux, "/", svc, func(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { - return checker(svc, req, cavId, cav) - }) - return svc.PublicKey(), mux -} - -func (s *ClientSuite) TestMacaroonCookiePath(c *gc.C) { - svc := newService("loc", nil) - - cookiePath := "" - ts := httptest.NewServer(serverHandler(svc, "", func() string { - return cookiePath - })) - defer ts.Close() - - var client *http.Client - doRequest := func() { - req, err := http.NewRequest("GET", ts.URL+"/foo/bar/", nil) - c.Assert(err, gc.IsNil) - client = httpbakery.NewHTTPClient() - resp, err := httpbakery.Do(client, req, noVisit) - c.Assert(err, gc.IsNil) - defer resp.Body.Close() - assertResponse(c, resp, "done") - } - assertCookieCount := func(path string, n int) { - u, err := url.Parse(ts.URL + path) - c.Assert(err, gc.IsNil) - c.Logf("client jar %p", client.Jar) - c.Assert(client.Jar.Cookies(u), gc.HasLen, n) - } - cookiePath = "" - c.Logf("- cookie path %q", cookiePath) - doRequest() - assertCookieCount("", 0) - assertCookieCount("/foo", 0) - assertCookieCount("/foo", 0) - assertCookieCount("/foo/", 0) - assertCookieCount("/foo/bar/", 1) - assertCookieCount("/foo/bar/baz", 1) - - cookiePath = "/foo/" - c.Logf("- cookie path %q", cookiePath) - doRequest() - assertCookieCount("", 0) - assertCookieCount("/foo", 1) - assertCookieCount("/foo/", 1) - assertCookieCount("/foo/bar/", 1) - assertCookieCount("/foo/bar/baz", 1) - - cookiePath = "../" - c.Logf("- cookie path %q", cookiePath) - doRequest() - assertCookieCount("", 0) - assertCookieCount("/bar", 0) - assertCookieCount("/foo", 1) - assertCookieCount("/foo/", 1) - assertCookieCount("/foo/bar/", 1) - assertCookieCount("/foo/bar/baz", 1) -} - -func (s *ClientSuite) TestPublicKey(c *gc.C) { - d := bakerytest.NewDischarger(nil, noCaveatChecker) - defer d.Close() - client := httpbakery.NewHTTPClient() - publicKey, err := httpbakery.PublicKeyForLocation(client, d.Location()) - c.Assert(err, gc.IsNil) - expectedKey := d.Service.PublicKey() - c.Assert(publicKey, gc.DeepEquals, expectedKey) -} - -func (s *ClientSuite) TestPublicKeyWrongURL(c *gc.C) { - client := httpbakery.NewHTTPClient() - _, err := httpbakery.PublicKeyForLocation(client, "http://localhost:0") - c.Assert(err, gc.ErrorMatches, - `cannot get public key from "http://localhost:0/publickey": Get http://localhost:0/publickey: dial tcp 127.0.0.1:0: connection refused`) -} - -func (s *ClientSuite) TestPublicKeyReturnsInvalidJson(c *gc.C) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "BADJSON") - })) - defer ts.Close() - client := httpbakery.NewHTTPClient() - _, err := httpbakery.PublicKeyForLocation(client, ts.URL) - c.Assert(err, gc.ErrorMatches, - fmt.Sprintf(`failed to decode response from "%s/publickey": invalid character 'B' looking for beginning of value`, ts.URL)) -} - -func (s *ClientSuite) TestPublicKeyReturnsStatusInternalServerError(c *gc.C) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusInternalServerError) - })) - defer ts.Close() - client := httpbakery.NewHTTPClient() - _, err := httpbakery.PublicKeyForLocation(client, ts.URL) - c.Assert(err, gc.ErrorMatches, - fmt.Sprintf(`cannot get public key from "%s/publickey": got status 500 Internal Server Error`, ts.URL)) -} - -func (s *ClientSuite) TestThirdPartyDischargeRefused(c *gc.C) { - d := bakerytest.NewDischarger(nil, func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { - return nil, errgo.New("boo! cond " + cond) - }) - defer d.Close() - - // Create a target service. - svc := newService("loc", d) - - ts := httptest.NewServer(serverHandler(svc, d.Location(), nil)) - defer ts.Close() - - // Create a client request. - req, err := http.NewRequest("GET", ts.URL, nil) - c.Assert(err, gc.IsNil) - - client := httpbakery.NewHTTPClient() - - // Make the request to the server. - resp, err := httpbakery.Do(client, req, noVisit) - c.Assert(errgo.Cause(err), gc.FitsTypeOf, (*httpbakery.DischargeError)(nil)) - c.Assert(err, gc.ErrorMatches, `cannot get discharge from ".*": third party refused discharge: cannot discharge: boo! cond is-ok`) - c.Assert(resp, gc.IsNil) -} - -func (s *ClientSuite) TestDischargeWithInteractionRequiredError(c *gc.C) { - d := bakerytest.NewDischarger(nil, func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { - return nil, &httpbakery.Error{ - Code: httpbakery.ErrInteractionRequired, - Message: "interaction required", - Info: &httpbakery.ErrorInfo{ - VisitURL: "http://0.1.2.3/", - WaitURL: "http://0.1.2.3/", - }, - } - }) - defer d.Close() - - // Create a target service. - svc := newService("loc", d) - - ts := httptest.NewServer(serverHandler(svc, d.Location(), nil)) - defer ts.Close() - - // Create a client request. - req, err := http.NewRequest("GET", ts.URL, nil) - c.Assert(err, gc.IsNil) - - client := httpbakery.NewHTTPClient() - - errCannotVisit := errgo.New("cannot visit") - // Make the request to the server. - resp, err := httpbakery.Do(client, req, func(*url.URL) error { - return errCannotVisit - }) - c.Assert(err, gc.ErrorMatches, `cannot get discharge from ".*": cannot start interactive session: cannot visit`) - c.Assert(httpbakery.IsInteractionError(errgo.Cause(err)), gc.Equals, true) - ierr, ok := errgo.Cause(err).(*httpbakery.InteractionError) - c.Assert(ok, gc.Equals, true) - c.Assert(ierr.Reason, gc.Equals, errCannotVisit) - c.Assert(resp, gc.IsNil) -} - -// assertResponse asserts that the given response is OK and contains -// the expected body text. -func assertResponse(c *gc.C, resp *http.Response, expectBody string) { - c.Assert(resp.StatusCode, gc.Equals, http.StatusOK) - body, err := ioutil.ReadAll(resp.Body) - c.Assert(err, gc.IsNil) - c.Assert(string(body), gc.DeepEquals, expectBody) -} - -func noVisit(*url.URL) error { - return fmt.Errorf("should not be visiting") -} - -type readCounter struct { - io.ReadSeeker - byteCount int -} - -func (r *readCounter) Read(buf []byte) (int, error) { - n, err := r.ReadSeeker.Read(buf) - r.byteCount += n - return n, err -} - -func newService(location string, locator bakery.PublicKeyLocator) *bakery.Service { - svc, err := bakery.NewService(bakery.NewServiceParams{ - Location: location, - Locator: locator, - }) - if err != nil { - panic(err) - } - return svc -} - -func clientRequestWithCookies(c *gc.C, u string, macaroons macaroon.Slice) *http.Client { - client := httpbakery.NewHTTPClient() - url, err := url.Parse(u) - c.Assert(err, gc.IsNil) - err = httpbakery.SetCookie(client.Jar, url, macaroons) - c.Assert(err, gc.IsNil) - return client -} - -// serverHandler returns an HTTP handler that checks macaroon authorization -// and, if that succeeds, writes the string "done" and echos anything in the -// request body. -// It recognises the single first party caveat "is something". -func serverHandler(service *bakery.Service, authLocation string, cookiePath func() string) http.Handler { - handleErrors := jsonhttp.HandleErrors(httpbakery.ErrorToResponse) - return handleErrors(func(w http.ResponseWriter, req *http.Request) error { - if _, checkErr := httpbakery.CheckRequest(service, req, nil, isChecker("something")); checkErr != nil { - return newDischargeRequiredError(service, authLocation, cookiePath, checkErr) - } - fmt.Fprintf(w, "done") - data, err := ioutil.ReadAll(req.Body) - if err != nil { - panic(fmt.Errorf("cannot read body: %v", err)) - } - if len(data) > 0 { - fmt.Fprintf(w, " %s", data) - } - return nil - }) -} - -// newDischargeRequiredError returns a discharge-required error -// holding a newly minted macaroon -// referencing the original check error checkErr. -// If authLocation is non-empty, the issued macaroon -// will contain an "is-ok" third party caveat addressed to that location. -// -// If cookiePath is not nil, it will be called to find the cookie path to -// put in the response. -func newDischargeRequiredError(svc *bakery.Service, authLocation string, cookiePath func() string, checkErr error) error { - var caveats []checkers.Caveat - if authLocation != "" { - caveats = []checkers.Caveat{{ - Location: authLocation, - Condition: "is-ok", - }} - } - m, err := svc.NewMacaroon("", nil, caveats) - if err != nil { - panic(fmt.Errorf("cannot make new macaroon: %v", err)) - } - path := "" - if cookiePath != nil { - path = cookiePath() - } - return httpbakery.NewDischargeRequiredError(m, path, checkErr) -} - -type isChecker string - -func (isChecker) Condition() string { - return "is" -} - -func (c isChecker) Check(_, arg string) error { - if arg != string(c) { - return fmt.Errorf("%v doesn't match %s", arg, c) - } - return nil -} - -func noCaveatChecker(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { - return nil, nil -} === removed file 'src/gopkg.in/macaroon-bakery.v0/httpbakery/discharge.go' --- src/gopkg.in/macaroon-bakery.v0/httpbakery/discharge.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/httpbakery/discharge.go 1970-01-01 00:00:00 +0000 @@ -1,184 +0,0 @@ -package httpbakery - -import ( - "crypto/rand" - "encoding/base64" - "encoding/json" - "fmt" - "net/http" - "path" - - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v1" - - "gopkg.in/macaroon-bakery.v0/bakery" - "gopkg.in/macaroon-bakery.v0/bakery/checkers" -) - -type dischargeHandler struct { - svc *bakery.Service - checker func(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) -} - -// AddDischargeHandler adds handlers to the given -// ServeMux to serve third party caveat discharges -// using the given service. -// -// The handlers are added under the given rootPath, -// which must be non-empty. -// -// The check function is used to check whether a client making the given -// request should be allowed a discharge for the given caveat. If it -// does not return an error, the caveat will be discharged, with any -// returned caveats also added to the discharge macaroon. -// If it returns an error with a *Error cause, the error will be marshaled -// and sent back to the client. -// -// The name space served by DischargeHandler is as follows. -// All parameters can be provided either as URL attributes -// or form attributes. The result is always formatted as a JSON -// object. -// -// On failure, all endpoints return an error described by -// the Error type. -// -// POST /discharge -// params: -// id: id of macaroon to discharge -// location: location of original macaroon (optional (?)) -// ?? flow=redirect|newwindow -// result on success (http.StatusOK): -// { -// Macaroon *macaroon.Macaroon -// } -// -// POST /create -// params: -// condition: caveat condition to discharge -// rootkey: root key of discharge caveat -// result: -// { -// CaveatID: string -// } -// -// GET /publickey -// result: -// public key of service -// expiry time of key -func AddDischargeHandler(mux *http.ServeMux, rootPath string, svc *bakery.Service, checker func(req *http.Request, cavId, cav string) ([]checkers.Caveat, error)) { - d := &dischargeHandler{ - svc: svc, - checker: checker, - } - mux.Handle(path.Join(rootPath, "discharge"), handleJSON(d.serveDischarge)) - mux.Handle(path.Join(rootPath, "create"), handleJSON(d.serveCreate)) - // TODO(rog) is there a case for making public key caveat signing - // optional? - mux.Handle(path.Join(rootPath, "publickey"), handleJSON(d.servePublicKey)) -} - -type dischargeResponse struct { - Macaroon *macaroon.Macaroon `json:",omitempty"` -} - -func (d *dischargeHandler) serveDischarge(h http.Header, req *http.Request) (interface{}, error) { - r, err := d.serveDischarge1(h, req) - if err != nil { - logger.Debugf("serveDischarge -> error %#v", err) - } else { - logger.Debugf("serveDischarge -> %#v", r) - } - return r, err -} - -func (d *dischargeHandler) serveDischarge1(h http.Header, req *http.Request) (interface{}, error) { - logger.Debugf("dischargeHandler.serveDischarge {") - defer logger.Debugf("}") - if req.Method != "POST" { - // TODO http.StatusMethodNotAllowed) - return nil, badRequestErrorf("method not allowed") - } - req.ParseForm() - id := req.Form.Get("id") - if id == "" { - return nil, badRequestErrorf("id attribute is empty") - } - checker := func(cavId, cav string) ([]checkers.Caveat, error) { - return d.checker(req, cavId, cav) - } - - // TODO(rog) pass location into discharge - // location := req.Form.Get("location") - - var resp dischargeResponse - m, err := d.svc.Discharge(bakery.ThirdPartyCheckerFunc(checker), id) - if err != nil { - return nil, errgo.NoteMask(err, "cannot discharge", errgo.Any) - } - resp.Macaroon = m - return &resp, nil -} - -type thirdPartyCaveatIdRecord struct { - RootKey []byte - Condition string -} - -type caveatIdResponse struct { - CaveatId string - Error string -} - -func (d *dischargeHandler) serveCreate(h http.Header, req *http.Request) (interface{}, error) { - req.ParseForm() - condition := req.Form.Get("condition") - rootKeyStr := req.Form.Get("root-key") - - if len(condition) == 0 { - return nil, badRequestErrorf("empty value for condition") - } - if len(rootKeyStr) == 0 { - return nil, badRequestErrorf("empty value for root key") - } - rootKey, err := base64.StdEncoding.DecodeString(rootKeyStr) - if err != nil { - return nil, badRequestErrorf("cannot base64-decode root key: %v", err) - } - // TODO(rog) what about expiry times? - idBytes, err := randomBytes(24) - if err != nil { - return nil, fmt.Errorf("cannot generate random key: %v", err) - } - id := fmt.Sprintf("%x", idBytes) - recordBytes, err := json.Marshal(thirdPartyCaveatIdRecord{ - Condition: condition, - RootKey: rootKey, - }) - if err != nil { - return nil, fmt.Errorf("cannot marshal caveat id record: %v", err) - } - err = d.svc.Store().Put(id, string(recordBytes)) - if err != nil { - return nil, fmt.Errorf("cannot store caveat id record: %v", err) - } - return caveatIdResponse{ - CaveatId: id, - }, nil -} - -type publicKeyResponse struct { - PublicKey *bakery.PublicKey -} - -func (d *dischargeHandler) servePublicKey(h http.Header, r *http.Request) (interface{}, error) { - return publicKeyResponse{d.svc.PublicKey()}, nil -} - -func randomBytes(n int) ([]byte, error) { - b := make([]byte, n) - _, err := rand.Read(b) - if err != nil { - return nil, fmt.Errorf("cannot generate %d random bytes: %v", n, err) - } - return b, nil -} === removed file 'src/gopkg.in/macaroon-bakery.v0/httpbakery/error.go' --- src/gopkg.in/macaroon-bakery.v0/httpbakery/error.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/httpbakery/error.go 1970-01-01 00:00:00 +0000 @@ -1,162 +0,0 @@ -package httpbakery - -import ( - "net/http" - - "github.com/juju/utils/jsonhttp" - "gopkg.in/errgo.v1" - "gopkg.in/macaroon.v1" -) - -// ErrorCode holds an error code that classifies -// an error returned from a bakery HTTP handler. -type ErrorCode string - -func (e ErrorCode) Error() string { - return string(e) -} - -func (e ErrorCode) ErrorCode() ErrorCode { - return e -} - -const ( - ErrBadRequest = ErrorCode("bad request") - ErrDischargeRequired = ErrorCode("macaroon discharge required") - ErrInteractionRequired = ErrorCode("interaction required") -) - -var ( - handleJSON = jsonhttp.HandleJSON(ErrorToResponse) - handleErrors = jsonhttp.HandleErrors(ErrorToResponse) - writeError = jsonhttp.WriteError(ErrorToResponse) -) - -// Error holds the type of a response from an httpbakery HTTP request, -// marshaled as JSON. -type Error struct { - Code ErrorCode `json:",omitempty"` - Message string `json:",omitempty"` - Info *ErrorInfo `json:",omitempty"` -} - -// ErrorInfo holds additional information provided -// by an error. -type ErrorInfo struct { - // Macaroon may hold a macaroon that, when - // discharged, may allow access to a service. - // This field is associated with the ErrDischargeRequired - // error code. - Macaroon *macaroon.Macaroon `json:",omitempty"` - - // MacaroonPath holds the URL path to be associated - // with the macaroon. The macaroon is potentially - // valid for all URLs under the given path. - // If it is empty, the macaroon will be associated with - // the original URL from which the error was returned. - MacaroonPath string `json:",omitempty"` - - // VisitURL and WaitURL are associated with the - // ErrInteractionRequired error code. - - // VisitURL holds a URL that the client should visit - // in a web browser to authenticate themselves. - VisitURL string `json:",omitempty"` - - // WaitURL holds a URL that the client should visit - // to acquire the discharge macaroon. A GET on - // this URL will block until the client has authenticated, - // and then it will return the discharge macaroon. - WaitURL string `json:",omitempty"` -} - -func (e *Error) Error() string { - return e.Message -} - -func (e *Error) ErrorCode() ErrorCode { - return e.Code -} - -// ErrorInfo returns additional information -// about the error. -// TODO return interface{} here? -func (e *Error) ErrorInfo() *ErrorInfo { - return e.Info -} - -// ErrorToResponse returns the HTTP status and an error body to be -// marshaled as JSON for the given error. This allows a third party -// package to integrate bakery errors into their error responses when -// they encounter an error with a *bakery.Error cause. -func ErrorToResponse(err error) (int, interface{}) { - errorBody := errorResponseBody(err) - status := http.StatusInternalServerError - switch errorBody.Code { - case ErrBadRequest: - status = http.StatusBadRequest - case ErrDischargeRequired, ErrInteractionRequired: - status = http.StatusProxyAuthRequired - } - return status, errorBody -} - -type errorInfoer interface { - ErrorInfo() *ErrorInfo -} - -type errorCoder interface { - ErrorCode() ErrorCode -} - -// errorResponse returns an appropriate error -// response for the provided error. -func errorResponseBody(err error) *Error { - errResp := &Error{ - Message: err.Error(), - } - cause := errgo.Cause(err) - if coder, ok := cause.(errorCoder); ok { - errResp.Code = coder.ErrorCode() - } - if infoer, ok := cause.(errorInfoer); ok { - errResp.Info = infoer.ErrorInfo() - } - return errResp -} - -func badRequestErrorf(f string, a ...interface{}) error { - return errgo.WithCausef(nil, ErrBadRequest, f, a...) -} - -// WriteDischargeRequiredError creates an error using -// NewDischargeRequiredError and writes it to the given response writer, -// indicating that the client should discharge the macaroon to allow the -// original request to be accepted. -func WriteDischargeRequiredError(w http.ResponseWriter, m *macaroon.Macaroon, path string, originalErr error) { - writeError(w, NewDischargeRequiredError(m, path, originalErr)) -} - -// NewDischargeRequiredError returns an error of type *Error -// that reports the given original error and includes the -// given macaroon. -// -// The returned macaroon will be -// declared as valid for the given URL path and may -// be relative. When the client stores the discharged -// macaroon as a cookie this will be the path associated -// with the cookie. See ErrorInfo.MacaroonPath for -// more information. -func NewDischargeRequiredError(m *macaroon.Macaroon, path string, originalErr error) error { - if originalErr == nil { - originalErr = ErrDischargeRequired - } - return &Error{ - Message: originalErr.Error(), - Code: ErrDischargeRequired, - Info: &ErrorInfo{ - Macaroon: m, - MacaroonPath: path, - }, - } -} === removed file 'src/gopkg.in/macaroon-bakery.v0/httpbakery/error_test.go' --- src/gopkg.in/macaroon-bakery.v0/httpbakery/error_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/httpbakery/error_test.go 1970-01-01 00:00:00 +0000 @@ -1,71 +0,0 @@ -package httpbakery_test - -import ( - "errors" - "net/http" - "net/http/httptest" - - "github.com/juju/testing/httptesting" - gc "gopkg.in/check.v1" - "gopkg.in/macaroon.v1" - - "gopkg.in/macaroon-bakery.v0/httpbakery" -) - -type ErrorSuite struct{} - -var _ = gc.Suite(&ErrorSuite{}) - -func (s *ErrorSuite) TestWriteDischargeRequiredError(c *gc.C) { - m, err := macaroon.New([]byte("secret"), "id", "a location") - c.Assert(err, gc.IsNil) - tests := []struct { - about string - path string - err error - expectedResponse httpbakery.Error - }{{ - about: `write discharge required with "an error" but no path`, - path: "", - err: errors.New("an error"), - expectedResponse: httpbakery.Error{ - Code: httpbakery.ErrDischargeRequired, - Message: "an error", - Info: &httpbakery.ErrorInfo{ - Macaroon: m, - }, - }, - }, { - about: `write discharge required with "an error" but and set a path`, - path: "http://foobar:1234", - err: errors.New("an error"), - expectedResponse: httpbakery.Error{ - Code: httpbakery.ErrDischargeRequired, - Message: "an error", - Info: &httpbakery.ErrorInfo{ - Macaroon: m, - MacaroonPath: "http://foobar:1234", - }, - }, - }, { - about: `write discharge required with nil error but set a path`, - path: "http://foobar:1234", - err: nil, - expectedResponse: httpbakery.Error{ - Code: httpbakery.ErrDischargeRequired, - Message: httpbakery.ErrDischargeRequired.Error(), - Info: &httpbakery.ErrorInfo{ - Macaroon: m, - MacaroonPath: "http://foobar:1234", - }, - }, - }, - } - - for i, t := range tests { - c.Logf("Running test %d %s", i, t.about) - response := httptest.NewRecorder() - httpbakery.WriteDischargeRequiredError(response, m, t.path, t.err) - httptesting.AssertJSONResponse(c, response, http.StatusProxyAuthRequired, t.expectedResponse) - } -} === removed file 'src/gopkg.in/macaroon-bakery.v0/httpbakery/package_test.go' --- src/gopkg.in/macaroon-bakery.v0/httpbakery/package_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/macaroon-bakery.v0/httpbakery/package_test.go 1970-01-01 00:00:00 +0000 @@ -1,11 +0,0 @@ -package httpbakery_test - -import ( - "testing" - - gc "gopkg.in/check.v1" -) - -func TestPackage(t *testing.T) { - gc.TestingT(t) -} === added directory 'src/gopkg.in/macaroon-bakery.v1' === added file 'src/gopkg.in/macaroon-bakery.v1/.gitignore' --- src/gopkg.in/macaroon-bakery.v1/.gitignore 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/.gitignore 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1 @@ +*.test === added file 'src/gopkg.in/macaroon-bakery.v1/LICENSE' --- src/gopkg.in/macaroon-bakery.v1/LICENSE 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,187 @@ +Copyright © 2014, Roger Peppe, Canonical Inc. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. === added file 'src/gopkg.in/macaroon-bakery.v1/README.md' --- src/gopkg.in/macaroon-bakery.v1/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,10 @@ +# The macaroon bakery + +This repository is a companion to http://github.com/go-macaroon . +It holds higher level operations for building systems with macaroons. + +For documentation, see: + +- http://godoc.org/gopkg.in/macaroon-bakery.v1/bakery +- http://godoc.org/gopkg.in/macaroon-bakery.v1/httpbakery +- http://godoc.org/gopkg.in/macaroon-bakery.v1/bakery/checkers === added file 'src/gopkg.in/macaroon-bakery.v1/TODO' --- src/gopkg.in/macaroon-bakery.v1/TODO 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/TODO 2016-03-22 15:18:22 +0000 @@ -0,0 +1,7 @@ +all: + - when API is stable, move to gopkg.in/macaroon.v1 + +macaroon: + + - change all signature calculations to correspond exactly + with libmacaroons. === added directory 'src/gopkg.in/macaroon-bakery.v1/bakery' === added directory 'src/gopkg.in/macaroon-bakery.v1/bakery/checkers' === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/checkers/checkers.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/checkers/checkers.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/checkers/checkers.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,309 @@ +// The checkers package provides some standard first-party +// caveat checkers and some primitives for combining them. +package checkers + +import ( + "fmt" + "net" + "strings" + + "gopkg.in/errgo.v1" +) + +// Constants for all the standard caveat conditions. +// First and third party caveat conditions are both defined here, +// even though notionally they exist in separate name spaces. +const ( + CondDeclared = "declared" + CondTimeBefore = "time-before" + CondClientIPAddr = "client-ip-addr" + CondClientOrigin = "origin" + CondError = "error" + CondNeedDeclared = "need-declared" + CondAllow = "allow" + CondDeny = "deny" +) + +// ErrCaveatNotRecognized is the cause of errors returned +// from caveat checkers when the caveat was not +// recognized. +var ErrCaveatNotRecognized = errgo.New("caveat not recognized") + +// Caveat represents a condition that must be true for a check to +// complete successfully. If Location is non-empty, the caveat must be +// discharged by a third party at the given location. +// This differs from macaroon.Caveat in that the condition +// is not encrypted. +type Caveat struct { + Location string + Condition string +} + +// Checker is implemented by types that can check caveats. +type Checker interface { + // Condition returns the identifier of the condition + // to be checked - the Check method will be used + // to check caveats with this identifier. + // + // It may return an empty string, in which case + // it will be used to check any condition + Condition() string + + // Check checks that the given caveat holds true. + // The condition and arg are as returned + // from ParseCaveat. + // + // For a checker with an empty condition, a + // return of bakery.ErrCaveatNotRecognised from + // this method indicates that the condition was + // not recognized. + Check(cond, arg string) error +} + +// New returns a new MultiChecker that uses all the +// provided Checkers to check caveats. If several checkers return the +// same condition identifier, all of them will be used. +// +// The cause of any error returned by a checker will be preserved. +// +// Note that because the returned checker implements Checker +// as well as bakery.FirstPartyChecker, calls to New can be nested. +// For example, a checker can be easily added to an existing +// MultiChecker, by doing: +// +// checker := checkers.New(old, another) +func New(checkers ...Checker) *MultiChecker { + return &MultiChecker{ + checkers: checkers, + } +} + +// MultiChecker implements bakery.FirstPartyChecker +// and Checker for a collection of checkers. +type MultiChecker struct { + // TODO it may be faster to initialize a map, but we'd + // be paying the price of creating and initializing + // the map vs a few linear scans through a probably-small + // slice. Let's wait for some real-world numbers. + checkers []Checker +} + +var errBadCaveat = errgo.Newf("bad caveat") + +// Check implements Checker.Check. +func (c *MultiChecker) Check(cond, arg string) error { + // Always check for the error caveat so that we're + // sure to get a nice error message even when there + // are no other checkers. This also prevents someone + // from inadvertently overriding the error condition. + if cond == CondError { + return errBadCaveat + } + checked := false + for _, c := range c.checkers { + checkerCond := c.Condition() + if checkerCond != "" && checkerCond != cond { + continue + } + if err := c.Check(cond, arg); err != nil { + if checkerCond == "" && errgo.Cause(err) == ErrCaveatNotRecognized { + continue + } + return errgo.Mask(err, errgo.Any) + } + checked = true + } + if !checked { + return ErrCaveatNotRecognized + } + return nil +} + +// Condition implements Checker.Condition. +func (c *MultiChecker) Condition() string { + return "" +} + +// CheckFirstPartyCaveat implements bakery.FirstPartyChecker.CheckFirstPartyCaveat. +func (c *MultiChecker) CheckFirstPartyCaveat(cav string) error { + cond, arg, err := ParseCaveat(cav) + if err != nil { + // If we can't parse it, perhaps it's in some other format, + // return a not-recognised error. + return errgo.WithCausef(err, ErrCaveatNotRecognized, "cannot parse caveat %q", cav) + } + if err := c.Check(cond, arg); err != nil { + return errgo.NoteMask(err, fmt.Sprintf("caveat %q not satisfied", cav), errgo.Any) + } + return nil +} + +// TODO add multiChecker.CheckThirdPartyCaveat ? +// i.e. make this stuff reusable for 3rd party caveats too. + +func firstParty(cond, arg string) Caveat { + return Caveat{ + Condition: cond + " " + arg, + } +} + +// CheckerFunc implements Checker for a function. +type CheckerFunc struct { + // Condition_ holds the condition that the checker + // implements. + Condition_ string + + // Check_ holds the function to call to make the check. + Check_ func(cond, arg string) error +} + +// Condition implements Checker.Condition. +func (f CheckerFunc) Condition() string { + return f.Condition_ +} + +// Check implements Checker.Check +func (f CheckerFunc) Check(cond, arg string) error { + return f.Check_(cond, arg) +} + +// Map is a checker where the various checkers +// are specified as entries in a map, one for each +// condition. +// The cond argument passed to the function +// is always the same as its corresponding key +// in the map. +type Map map[string]func(cond string, arg string) error + +// Condition implements Checker.Condition. +func (m Map) Condition() string { + return "" +} + +// Check implements Checker.Check +func (m Map) Check(cond, arg string) error { + f, ok := m[cond] + if !ok { + return ErrCaveatNotRecognized + } + if err := f(cond, arg); err != nil { + return errgo.Mask(err, errgo.Any) + } + return nil +} + +// ParseCaveat parses a caveat into an identifier, identifying the +// checker that should be used, and the argument to the checker (the +// rest of the string). +// +// The identifier is taken from all the characters before the first +// space character. +func ParseCaveat(cav string) (cond, arg string, err error) { + if cav == "" { + return "", "", fmt.Errorf("empty caveat") + } + i := strings.IndexByte(cav, ' ') + if i < 0 { + return cav, "", nil + } + if i == 0 { + return "", "", fmt.Errorf("caveat starts with space character") + } + return cav[0:i], cav[i+1:], nil +} + +// ClientIPAddrCaveat returns a caveat that will check whether the +// client's IP address is as provided. +// Note that the checkers package provides no specific +// implementation of the checker for this - that is +// left to external transport-specific packages. +func ClientIPAddrCaveat(addr net.IP) Caveat { + if len(addr) != net.IPv4len && len(addr) != net.IPv6len { + return ErrorCaveatf("bad IP address %d", []byte(addr)) + } + return firstParty(CondClientIPAddr, addr.String()) +} + +// ClientOriginCaveat returns a caveat that will check whether the +// client's Origin header in its HTTP request is as provided. +func ClientOriginCaveat(origin string) Caveat { + return firstParty(CondClientOrigin, origin) +} + +// ErrorCaveatf returns a caveat that will never be satisfied, holding +// the given fmt.Sprintf formatted text as the text of the caveat. +// +// This should only be used for highly unusual conditions that are never +// expected to happen in practice, such as a malformed key that is +// conventionally passed as a constant. It's not a panic but you should +// only use it in cases where a panic might possibly be appropriate. +// +// This mechanism means that caveats can be created without error +// checking and a later systematic check at a higher level (in the +// bakery package) can produce an error instead. +func ErrorCaveatf(f string, a ...interface{}) Caveat { + return firstParty(CondError, fmt.Sprintf(f, a...)) +} + +// AllowCaveat returns a caveat that will deny attempts to use the +// macaroon to perform any operation other than those listed. Operations +// must not contain a space. +func AllowCaveat(op ...string) Caveat { + if len(op) == 0 { + return ErrorCaveatf("no operations allowed") + } + return operationCaveat(CondAllow, op) +} + +// DenyCaveat returns a caveat that will deny attempts to use the +// macaroon to perform any of the listed operations. Operations +// must not contain a space. +func DenyCaveat(op ...string) Caveat { + return operationCaveat(CondDeny, op) +} + +// operationCaveat is a helper for AllowCaveat and DenyCaveat. It checks +// that all operation names are valid before createing the caveat. +func operationCaveat(cond string, op []string) Caveat { + for _, o := range op { + if strings.IndexByte(o, ' ') != -1 { + return ErrorCaveatf("invalid operation name %q", o) + } + } + return firstParty(cond, strings.Join(op, " ")) +} + +// OperationChecker checks any allow or deny caveats ensuring they do not +// prohibit the named operation. +type OperationChecker string + +// Condition implements Checker.Condition. +func (OperationChecker) Condition() string { + return "" +} + +// Check implements Checker.Check. +func (o OperationChecker) Check(cond, arg string) error { + var expect bool + switch cond { + case CondAllow: + expect = true + fallthrough + case CondDeny: + var found bool + for _, op := range strings.Fields(arg) { + if string(o) == op { + found = true + break + } + } + if found == expect { + return nil + } + return fmt.Errorf("%s not allowed", o) + default: + return ErrCaveatNotRecognized + } +} + +var _ Checker = OperationChecker("") === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/checkers/checkers_test.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/checkers/checkers_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/checkers/checkers_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,505 @@ +package checkers_test + +import ( + "fmt" + "net" + "time" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + "gopkg.in/macaroon.v1" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" +) + +type CheckersSuite struct{} + +var _ = gc.Suite(&CheckersSuite{}) + +// Freeze time for the tests. +var now = func() time.Time { + now, err := time.Parse(time.RFC3339Nano, "2006-01-02T15:04:05.123Z") + if err != nil { + panic(err) + } + *checkers.TimeNow = func() time.Time { + return now + } + return now +}() + +type checkTest struct { + caveat string + expectError string + expectCause func(err error) bool +} + +var isCaveatNotRecognized = errgo.Is(checkers.ErrCaveatNotRecognized) + +var checkerTests = []struct { + about string + checker bakery.FirstPartyChecker + checks []checkTest +}{{ + about: "empty MultiChecker", + checker: checkers.New(), + checks: []checkTest{{ + caveat: "something", + expectError: `caveat "something" not satisfied: caveat not recognized`, + expectCause: isCaveatNotRecognized, + }, { + caveat: "", + expectError: `cannot parse caveat "": empty caveat`, + expectCause: isCaveatNotRecognized, + }, { + caveat: " hello", + expectError: `cannot parse caveat " hello": caveat starts with space character`, + expectCause: isCaveatNotRecognized, + }}, +}, { + about: "MultiChecker with some values", + checker: checkers.New( + argChecker("a", "aval"), + argChecker("b", "bval"), + ), + checks: []checkTest{{ + caveat: "a aval", + }, { + caveat: "b bval", + }, { + caveat: "a wrong", + expectError: `caveat "a wrong" not satisfied: wrong arg`, + expectCause: errgo.Is(errWrongArg), + }}, +}, { + about: "MultiChecker with several of the same condition", + checker: checkers.New( + argChecker("a", "aval"), + argChecker("a", "bval"), + ), + checks: []checkTest{{ + caveat: "a aval", + expectError: `caveat "a aval" not satisfied: wrong arg`, + expectCause: errgo.Is(errWrongArg), + }, { + caveat: "a bval", + expectError: `caveat "a bval" not satisfied: wrong arg`, + expectCause: errgo.Is(errWrongArg), + }}, +}, { + about: "nested MultiChecker", + checker: checkers.New( + argChecker("a", "aval"), + argChecker("b", "bval"), + checkers.New( + argChecker("c", "cval"), + checkers.New( + argChecker("d", "dval"), + ), + argChecker("e", "eval"), + ), + ), + checks: []checkTest{{ + caveat: "a aval", + }, { + caveat: "b bval", + }, { + caveat: "c cval", + }, { + caveat: "d dval", + }, { + caveat: "e eval", + }, { + caveat: "a wrong", + expectError: `caveat "a wrong" not satisfied: wrong arg`, + expectCause: errgo.Is(errWrongArg), + }, { + caveat: "c wrong", + expectError: `caveat "c wrong" not satisfied: wrong arg`, + expectCause: errgo.Is(errWrongArg), + }, { + caveat: "d wrong", + expectError: `caveat "d wrong" not satisfied: wrong arg`, + expectCause: errgo.Is(errWrongArg), + }, { + caveat: "f something", + expectError: `caveat "f something" not satisfied: caveat not recognized`, + expectCause: isCaveatNotRecognized, + }}, +}, { + about: "Map with no items", + checker: checkers.New( + checkers.Map{}, + ), + checks: []checkTest{{ + caveat: "a aval", + expectError: `caveat "a aval" not satisfied: caveat not recognized`, + expectCause: isCaveatNotRecognized, + }}, +}, { + about: "Map with some values", + checker: checkers.New( + checkers.Map{ + "a": argChecker("a", "aval").Check, + "b": argChecker("b", "bval").Check, + }, + ), + checks: []checkTest{{ + caveat: "a aval", + }, { + caveat: "b bval", + }, { + caveat: "a wrong", + expectError: `caveat "a wrong" not satisfied: wrong arg`, + expectCause: errgo.Is(errWrongArg), + }, { + caveat: "b wrong", + expectError: `caveat "b wrong" not satisfied: wrong arg`, + expectCause: errgo.Is(errWrongArg), + }}, +}, { + about: "time within limit", + checker: checkers.New( + checkers.TimeBefore, + ), + checks: []checkTest{{ + caveat: checkers.TimeBeforeCaveat(now.Add(1)).Condition, + }, { + caveat: checkers.TimeBeforeCaveat(now).Condition, + expectError: `caveat "time-before 2006-01-02T15:04:05.123Z" not satisfied: macaroon has expired`, + }, { + caveat: checkers.TimeBeforeCaveat(now.Add(-1)).Condition, + expectError: `caveat "time-before 2006-01-02T15:04:05.122999999Z" not satisfied: macaroon has expired`, + }, { + caveat: `time-before bad-date`, + expectError: `caveat "time-before bad-date" not satisfied: parsing time "bad-date" as "2006-01-02T15:04:05.999999999Z07:00": cannot parse "bad-date" as "2006"`, + }, { + caveat: checkers.TimeBeforeCaveat(now).Condition + " ", + expectError: `caveat "time-before 2006-01-02T15:04:05.123Z " not satisfied: parsing time "2006-01-02T15:04:05.123Z ": extra text: `, + }}, +}, { + about: "declared, no entries", + checker: checkers.New(checkers.Declared{}), + checks: []checkTest{{ + caveat: checkers.DeclaredCaveat("a", "aval").Condition, + expectError: `caveat "declared a aval" not satisfied: got a=null, expected "aval"`, + }, { + caveat: checkers.CondDeclared, + expectError: `caveat "declared" not satisfied: declared caveat has no value`, + }}, +}, { + about: "declared, some entries", + checker: checkers.New(checkers.Declared{ + "a": "aval", + "b": "bval", + "spc": " a b", + }), + checks: []checkTest{{ + caveat: checkers.DeclaredCaveat("a", "aval").Condition, + }, { + caveat: checkers.DeclaredCaveat("b", "bval").Condition, + }, { + caveat: checkers.DeclaredCaveat("spc", " a b").Condition, + }, { + caveat: checkers.DeclaredCaveat("a", "bval").Condition, + expectError: `caveat "declared a bval" not satisfied: got a="aval", expected "bval"`, + }, { + caveat: checkers.DeclaredCaveat("a", " aval").Condition, + expectError: `caveat "declared a aval" not satisfied: got a="aval", expected " aval"`, + }, { + caveat: checkers.DeclaredCaveat("spc", "a b").Condition, + expectError: `caveat "declared spc a b" not satisfied: got spc=" a b", expected "a b"`, + }, { + caveat: checkers.DeclaredCaveat("", "a b").Condition, + expectError: `caveat "error invalid caveat 'declared' key \\"\\"" not satisfied: bad caveat`, + }, { + caveat: checkers.DeclaredCaveat("a b", "a b").Condition, + expectError: `caveat "error invalid caveat 'declared' key \\"a b\\"" not satisfied: bad caveat`, + }}, +}, { + about: "error caveat", + checker: checkers.New(), + checks: []checkTest{{ + caveat: checkers.ErrorCaveatf("").Condition, + expectError: `caveat "error " not satisfied: bad caveat`, + }, { + caveat: checkers.ErrorCaveatf("something %d", 134).Condition, + expectError: `caveat "error something 134" not satisfied: bad caveat`, + }}, +}, { + about: "error caveat overrides other", + checker: checkers.New(argChecker("error", "something")), + checks: []checkTest{{ + caveat: checkers.ErrorCaveatf("something").Condition, + expectError: `caveat "error something" not satisfied: bad caveat`, + }}, +}} + +var errWrongArg = errgo.New("wrong arg") + +func argChecker(expectCond, checkArg string) checkers.Checker { + return checkers.CheckerFunc{ + Condition_: expectCond, + Check_: func(cond, arg string) error { + if cond != expectCond { + panic(fmt.Errorf("got condition %q want %q", cond, expectCond)) + } + if arg != checkArg { + return errWrongArg + } + return nil + }, + } +} + +func (s *CheckersSuite) TestCheckers(c *gc.C) { + for i, test := range checkerTests { + c.Logf("test %d: %s", i, test.about) + for j, check := range test.checks { + c.Logf("\tcheck %d", j) + err := test.checker.CheckFirstPartyCaveat(check.caveat) + if check.expectError != "" { + c.Assert(err, gc.ErrorMatches, check.expectError) + if check.expectCause == nil { + check.expectCause = errgo.Any + } + c.Assert(check.expectCause(errgo.Cause(err)), gc.Equals, true) + } else { + c.Assert(err, gc.IsNil) + } + } + } +} + +func (s *CheckersSuite) TestClientIPAddrCaveat(c *gc.C) { + cav := checkers.ClientIPAddrCaveat(net.IP{127, 0, 0, 1}) + c.Assert(cav, gc.Equals, checkers.Caveat{ + Condition: "client-ip-addr 127.0.0.1", + }) + cav = checkers.ClientIPAddrCaveat(net.ParseIP("2001:4860:0:2001::68")) + c.Assert(cav, gc.Equals, checkers.Caveat{ + Condition: "client-ip-addr 2001:4860:0:2001::68", + }) + cav = checkers.ClientIPAddrCaveat(nil) + c.Assert(cav, gc.Equals, checkers.Caveat{ + Condition: "error bad IP address []", + }) + cav = checkers.ClientIPAddrCaveat(net.IP{123, 3}) + c.Assert(cav, gc.Equals, checkers.Caveat{ + Condition: "error bad IP address [123 3]", + }) +} + +func (s *CheckersSuite) TestClientOriginCaveat(c *gc.C) { + cav := checkers.ClientOriginCaveat("") + c.Assert(cav, gc.Equals, checkers.Caveat{ + Condition: "origin ", + }) + cav = checkers.ClientOriginCaveat("somewhere") + c.Assert(cav, gc.Equals, checkers.Caveat{ + Condition: "origin somewhere", + }) +} + +var inferDeclaredTests = []struct { + about string + caveats [][]checkers.Caveat + expect checkers.Declared +}{{ + about: "no macaroons", + expect: checkers.Declared{}, +}, { + about: "single macaroon with one declaration", + caveats: [][]checkers.Caveat{{{ + Condition: "declared foo bar", + }}}, + expect: checkers.Declared{ + "foo": "bar", + }, +}, { + about: "only one argument to declared", + caveats: [][]checkers.Caveat{{{ + Condition: "declared foo", + }}}, + expect: checkers.Declared{}, +}, { + about: "spaces in value", + caveats: [][]checkers.Caveat{{{ + Condition: "declared foo bar bloggs", + }}}, + expect: checkers.Declared{ + "foo": "bar bloggs", + }, +}, { + about: "attribute with declared prefix", + caveats: [][]checkers.Caveat{{{ + Condition: "declaredccf foo", + }}}, + expect: checkers.Declared{}, +}, { + about: "several macaroons with different declares", + caveats: [][]checkers.Caveat{{ + checkers.DeclaredCaveat("a", "aval"), + checkers.DeclaredCaveat("b", "bval"), + }, { + checkers.DeclaredCaveat("c", "cval"), + checkers.DeclaredCaveat("d", "dval"), + }}, + expect: checkers.Declared{ + "a": "aval", + "b": "bval", + "c": "cval", + "d": "dval", + }, +}, { + about: "duplicate values", + caveats: [][]checkers.Caveat{{ + checkers.DeclaredCaveat("a", "aval"), + checkers.DeclaredCaveat("a", "aval"), + checkers.DeclaredCaveat("b", "bval"), + }, { + checkers.DeclaredCaveat("a", "aval"), + checkers.DeclaredCaveat("b", "bval"), + checkers.DeclaredCaveat("c", "cval"), + checkers.DeclaredCaveat("d", "dval"), + }}, + expect: checkers.Declared{ + "a": "aval", + "b": "bval", + "c": "cval", + "d": "dval", + }, +}, { + about: "conflicting values", + caveats: [][]checkers.Caveat{{ + checkers.DeclaredCaveat("a", "aval"), + checkers.DeclaredCaveat("a", "conflict"), + checkers.DeclaredCaveat("b", "bval"), + }, { + checkers.DeclaredCaveat("a", "conflict"), + checkers.DeclaredCaveat("b", "another conflict"), + checkers.DeclaredCaveat("c", "cval"), + checkers.DeclaredCaveat("d", "dval"), + }}, + expect: checkers.Declared{ + "c": "cval", + "d": "dval", + }, +}, { + about: "third party caveats ignored", + caveats: [][]checkers.Caveat{{{ + Condition: "declared a no conflict", + Location: "location", + }, + checkers.DeclaredCaveat("a", "aval"), + }}, + expect: checkers.Declared{ + "a": "aval", + }, +}, { + about: "unparseable caveats ignored", + caveats: [][]checkers.Caveat{{{ + Condition: " bad", + }, + checkers.DeclaredCaveat("a", "aval"), + }}, + expect: checkers.Declared{ + "a": "aval", + }, +}} + +func (*CheckersSuite) TestInferDeclared(c *gc.C) { + for i, test := range inferDeclaredTests { + c.Logf("test %d: %s", i, test.about) + ms := make(macaroon.Slice, len(test.caveats)) + for i, caveats := range test.caveats { + m, err := macaroon.New(nil, fmt.Sprint(i), "") + c.Assert(err, gc.IsNil) + for _, cav := range caveats { + if cav.Location == "" { + m.AddFirstPartyCaveat(cav.Condition) + } else { + m.AddThirdPartyCaveat(nil, cav.Condition, cav.Location) + } + } + ms[i] = m + } + c.Assert(checkers.InferDeclared(ms), jc.DeepEquals, test.expect) + } +} + +var operationCheckerTests = []struct { + about string + caveat checkers.Caveat + oc checkers.OperationChecker + expectError string +}{{ + about: "allowed operation", + caveat: checkers.AllowCaveat("op1", "op2", "op3"), + oc: checkers.OperationChecker("op1"), +}, { + about: "not denied oc", + caveat: checkers.DenyCaveat("op1", "op2", "op3"), + oc: checkers.OperationChecker("op4"), +}, { + about: "not allowed oc", + caveat: checkers.AllowCaveat("op1", "op2", "op3"), + oc: checkers.OperationChecker("op4"), + expectError: "op4 not allowed", +}, { + about: "denied oc", + caveat: checkers.DenyCaveat("op1", "op2", "op3"), + oc: checkers.OperationChecker("op1"), + expectError: "op1 not allowed", +}, { + about: "unrecognised caveat", + caveat: checkers.ErrorCaveatf("unrecognized"), + oc: checkers.OperationChecker("op1"), + expectError: "caveat not recognized", +}, { + about: "empty deny caveat", + caveat: checkers.DenyCaveat(), + oc: checkers.OperationChecker("op1"), +}} + +func (*CheckersSuite) TestOperationChecker(c *gc.C) { + for i, test := range operationCheckerTests { + c.Logf("%d: %s", i, test.about) + cond, arg, err := checkers.ParseCaveat(test.caveat.Condition) + c.Assert(err, gc.IsNil) + c.Assert(test.oc.Condition(), gc.Equals, "") + err = test.oc.Check(cond, arg) + if test.expectError == "" { + c.Assert(err, gc.IsNil) + continue + } + c.Assert(err, gc.ErrorMatches, test.expectError) + } +} + +var operationErrorCaveatTests = []struct { + about string + caveat checkers.Caveat + expectCondition string +}{{ + about: "empty allow", + caveat: checkers.AllowCaveat(), + expectCondition: "error no operations allowed", +}, { + about: "allow: invalid operation name", + caveat: checkers.AllowCaveat("op1", "operation number 2"), + expectCondition: `error invalid operation name "operation number 2"`, +}, { + about: "deny: invalid operation name", + caveat: checkers.DenyCaveat("op1", "operation number 2"), + expectCondition: `error invalid operation name "operation number 2"`, +}} + +func (*CheckersSuite) TestOperationErrorCaveatTest(c *gc.C) { + for i, test := range operationErrorCaveatTests { + c.Logf("%d: %s", i, test.about) + c.Assert(test.caveat.Condition, gc.Matches, test.expectCondition) + } +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/checkers/declared.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/checkers/declared.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/checkers/declared.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,110 @@ +package checkers + +import ( + "strings" + + "gopkg.in/errgo.v1" + "gopkg.in/macaroon.v1" +) + +// DeclaredCaveat returns a "declared" caveat asserting that the given key is +// set to the given value. If a macaroon has exactly one first party +// caveat asserting the value of a particular key, then InferDeclared +// will be able to infer the value, and then DeclaredChecker will allow +// the declared value if it has the value specified here. +// +// If the key is empty or contains a space, DeclaredCaveat +// will return an error caveat. +func DeclaredCaveat(key string, value string) Caveat { + if strings.Contains(key, " ") || key == "" { + return ErrorCaveatf("invalid caveat 'declared' key %q", key) + } + return firstParty(CondDeclared, key+" "+value) +} + +// NeedDeclaredCaveat returns a third party caveat that +// wraps the provided third party caveat and requires +// that the third party must add "declared" caveats for +// all the named keys. +func NeedDeclaredCaveat(cav Caveat, keys ...string) Caveat { + if cav.Location == "" { + return ErrorCaveatf("need-declared caveat is not third-party") + } + return Caveat{ + Location: cav.Location, + Condition: CondNeedDeclared + " " + strings.Join(keys, ",") + " " + cav.Condition, + } +} + +// Declared implements a checker that will +// check that any "declared" caveats have a matching +// key for their value in the map. +type Declared map[string]string + +// Condition implements Checker.Condition. +func (c Declared) Condition() string { + return CondDeclared +} + +// Check implements Checker.Check by checking that the given +// argument holds a key in the map with a matching value. +func (c Declared) Check(_, arg string) error { + // Note that we don't need to check the condition argument + // here because it has been specified explicitly in the + // return from the Condition method. + parts := strings.SplitN(arg, " ", 2) + if len(parts) != 2 { + return errgo.Newf("declared caveat has no value") + } + val, ok := c[parts[0]] + if !ok { + return errgo.Newf("got %s=null, expected %q", parts[0], parts[1]) + } + if val != parts[1] { + return errgo.Newf("got %s=%q, expected %q", parts[0], val, parts[1]) + } + return nil +} + +// InferDeclared retrieves any declared information from +// the given macaroons and returns it as a key-value map. +// +// Information is declared with a first party caveat as created +// by DeclaredCaveat. +// +// If there are two caveats that declare the same key with +// different values, the information is omitted from the map. +// When the caveats are later checked, this will cause the +// check to fail. +func InferDeclared(ms macaroon.Slice) Declared { + var conflicts []string + info := make(Declared) + for _, m := range ms { + for _, cav := range m.Caveats() { + if cav.Location != "" { + continue + } + name, rest, err := ParseCaveat(cav.Id) + if err != nil { + continue + } + if name != CondDeclared { + continue + } + parts := strings.SplitN(rest, " ", 2) + if len(parts) != 2 { + continue + } + key, val := parts[0], parts[1] + if oldVal, ok := info[key]; ok && oldVal != val { + conflicts = append(conflicts, key) + continue + } + info[key] = val + } + } + for _, key := range conflicts { + delete(info, key) + } + return info +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/checkers/export_test.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/checkers/export_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/checkers/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,3 @@ +package checkers + +var TimeNow = &timeNow === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/checkers/package_test.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/checkers/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/checkers/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +package checkers_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/checkers/time.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/checkers/time.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/checkers/time.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,72 @@ +package checkers + +import ( + "fmt" + "strings" + "time" + + "gopkg.in/errgo.v1" + "gopkg.in/macaroon.v1" +) + +var timeNow = time.Now + +// TimeBefore is a checker that checks caveats +// as created by TimeBeforeCaveat. +var TimeBefore = CheckerFunc{ + Condition_: CondTimeBefore, + Check_: func(_, cav string) error { + t, err := time.Parse(time.RFC3339Nano, cav) + if err != nil { + return errgo.Mask(err) + } + if !timeNow().Before(t) { + return fmt.Errorf("macaroon has expired") + } + return nil + }, +} + +// TimeBeforeCaveat returns a caveat that specifies that +// the time that it is checked should be before t. +func TimeBeforeCaveat(t time.Time) Caveat { + return firstParty(CondTimeBefore, t.UTC().Format(time.RFC3339Nano)) +} + +// ExpiryTime returns the minimum time of any time-before caveats found +// in the given slice and whether there were any such caveats found. +func ExpiryTime(cavs []macaroon.Caveat) (time.Time, bool) { + var t time.Time + var expires bool + for _, cav := range cavs { + if !strings.HasPrefix(cav.Id, CondTimeBefore) { + continue + } + et, err := time.Parse(CondTimeBefore+" "+time.RFC3339Nano, cav.Id) + if err != nil { + continue + } + if !expires || et.Before(t) { + t = et + expires = true + } + } + return t, expires +} + +// MacaroonsExpiryTime returns the minimum time of any time-before +// caveats found in the given macaroons and whether there were +// any such caveats found. +func MacaroonsExpiryTime(ms macaroon.Slice) (time.Time, bool) { + var t time.Time + var expires bool + for _, m := range ms { + if et, ex := ExpiryTime(m.Caveats()); ex { + if !expires || et.Before(t) { + t = et + expires = true + } + } + } + return t, expires +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/checkers/time_test.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/checkers/time_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/checkers/time_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,170 @@ +package checkers_test + +import ( + "time" + + gc "gopkg.in/check.v1" + "gopkg.in/macaroon.v1" + + "gopkg.in/macaroon-bakery.v1/bakery/checkers" +) + +type timeSuite struct{} + +var _ = gc.Suite(&timeSuite{}) + +var t1 = time.Now() +var t2 = t1.Add(1 * time.Hour) +var t3 = t2.Add(1 * time.Hour) + +var expireTimeTests = []struct { + about string + caveats []macaroon.Caveat + expectTime time.Time + expectExpires bool +}{{ + about: "nil caveats", +}, { + about: "empty caveats", + caveats: []macaroon.Caveat{}, +}, { + about: "single time-before caveat", + caveats: []macaroon.Caveat{ + macaroon.Caveat{ + Id: checkers.TimeBeforeCaveat(t1).Condition, + }, + }, + expectTime: t1, + expectExpires: true, +}, { + about: "single deny caveat", + caveats: []macaroon.Caveat{ + macaroon.Caveat{ + Id: checkers.DenyCaveat("abc").Condition, + }, + }, +}, { + about: "multiple time-before caveat", + caveats: []macaroon.Caveat{ + macaroon.Caveat{ + Id: checkers.TimeBeforeCaveat(t2).Condition, + }, + macaroon.Caveat{ + Id: checkers.TimeBeforeCaveat(t1).Condition, + }, + }, + expectTime: t1, + expectExpires: true, +}, { + about: "mixed caveats", + caveats: []macaroon.Caveat{ + macaroon.Caveat{ + Id: checkers.TimeBeforeCaveat(t1).Condition, + }, + macaroon.Caveat{ + Id: checkers.AllowCaveat("abc").Condition, + }, + macaroon.Caveat{ + Id: checkers.TimeBeforeCaveat(t2).Condition, + }, + macaroon.Caveat{ + Id: checkers.DenyCaveat("def").Condition, + }, + }, + expectTime: t1, + expectExpires: true, +}, { + about: "invalid time-before caveat", + caveats: []macaroon.Caveat{ + macaroon.Caveat{ + Id: checkers.CondTimeBefore + " tomorrow", + }, + }, +}} + +func (s *timeSuite) TestExpireTime(c *gc.C) { + for i, test := range expireTimeTests { + c.Logf("%d. %s", i, test.about) + t, expires := checkers.ExpiryTime(test.caveats) + c.Assert(t.Equal(test.expectTime), gc.Equals, true, gc.Commentf("obtained: %s, expected: %s", t, test.expectTime)) + c.Assert(expires, gc.Equals, test.expectExpires) + } +} + +var macaroonsExpireTimeTests = []struct { + about string + macaroons macaroon.Slice + expectTime time.Time + expectExpires bool +}{{ + about: "nil macaroons", +}, { + about: "empty macaroons", + macaroons: macaroon.Slice{}, +}, { + about: "single macaroon without caveats", + macaroons: macaroon.Slice{ + mustNewMacaroon(), + }, +}, { + about: "multiple macaroon without caveats", + macaroons: macaroon.Slice{ + mustNewMacaroon(), + mustNewMacaroon(), + }, +}, { + about: "single macaroon with time-before caveat", + macaroons: macaroon.Slice{ + mustNewMacaroon( + checkers.TimeBeforeCaveat(t1).Condition, + ), + }, + expectTime: t1, + expectExpires: true, +}, { + about: "single macaroon with multiple time-before caveats", + macaroons: macaroon.Slice{ + mustNewMacaroon( + checkers.TimeBeforeCaveat(t2).Condition, + checkers.TimeBeforeCaveat(t1).Condition, + ), + }, + expectTime: t1, + expectExpires: true, +}, { + about: "multiple macaroons with multiple time-before caveats", + macaroons: macaroon.Slice{ + mustNewMacaroon( + checkers.TimeBeforeCaveat(t3).Condition, + checkers.TimeBeforeCaveat(t2).Condition, + ), + mustNewMacaroon( + checkers.TimeBeforeCaveat(t3).Condition, + checkers.TimeBeforeCaveat(t1).Condition, + ), + }, + expectTime: t1, + expectExpires: true, +}} + +func (s *timeSuite) TestMacaroonsExpireTime(c *gc.C) { + for i, test := range macaroonsExpireTimeTests { + c.Logf("%d. %s", i, test.about) + t, expires := checkers.MacaroonsExpiryTime(test.macaroons) + c.Assert(t.Equal(test.expectTime), gc.Equals, true, gc.Commentf("obtained: %s, expected: %s", t, test.expectTime)) + c.Assert(expires, gc.Equals, test.expectExpires) + } +} + +func mustNewMacaroon(cavs ...string) *macaroon.Macaroon { + m, err := macaroon.New(nil, "", "") + if err != nil { + panic(err) + } + for _, cav := range cavs { + if err := m.AddFirstPartyCaveat(cav); err != nil { + panic(err) + } + } + return m +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/codec.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/codec.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/codec.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,131 @@ +package bakery + +import ( + "bytes" + "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + + "golang.org/x/crypto/nacl/box" +) + +type caveatIdRecord struct { + RootKey []byte + Condition string +} + +// caveatId defines the format of a third party caveat id. +type caveatId struct { + ThirdPartyPublicKey *PublicKey + FirstPartyPublicKey *PublicKey + Nonce []byte + Id string +} + +// boxEncoder encodes caveat ids confidentially to a third-party service using +// authenticated public key encryption compatible with NaCl box. +type boxEncoder struct { + key *KeyPair +} + +// newBoxEncoder creates a new boxEncoder that uses the given public key pair. +func newBoxEncoder(key *KeyPair) *boxEncoder { + return &boxEncoder{ + key: key, + } +} + +func (enc *boxEncoder) encodeCaveatId(condition string, rootKey []byte, thirdPartyPub *PublicKey) (string, error) { + id, err := enc.newCaveatId(condition, rootKey, thirdPartyPub) + if err != nil { + return "", err + } + data, err := json.Marshal(id) + if err != nil { + return "", fmt.Errorf("cannot marshal %#v: %v", id, err) + } + return base64.StdEncoding.EncodeToString(data), nil +} + +func (enc *boxEncoder) newCaveatId(condition string, rootKey []byte, thirdPartyPub *PublicKey) (*caveatId, error) { + var nonce [NonceLen]byte + if _, err := rand.Read(nonce[:]); err != nil { + return nil, fmt.Errorf("cannot generate random number for nonce: %v", err) + } + plain := caveatIdRecord{ + RootKey: rootKey, + Condition: condition, + } + plainData, err := json.Marshal(&plain) + if err != nil { + return nil, fmt.Errorf("cannot marshal %#v: %v", &plain, err) + } + sealed := box.Seal(nil, plainData, &nonce, thirdPartyPub.boxKey(), enc.key.Private.boxKey()) + return &caveatId{ + ThirdPartyPublicKey: thirdPartyPub, + FirstPartyPublicKey: &enc.key.Public, + Nonce: nonce[:], + Id: base64.StdEncoding.EncodeToString(sealed), + }, nil +} + +// boxDecoder decodes caveat ids for third-party service that were encoded to +// the third-party with authenticated public key encryption compatible with +// NaCl box. +type boxDecoder struct { + key *KeyPair +} + +// newBoxDecoder creates a new BoxDecoder using the given key pair. +func newBoxDecoder(key *KeyPair) *boxDecoder { + return &boxDecoder{ + key: key, + } +} + +func (d *boxDecoder) decodeCaveatId(id string) (rootKey []byte, condition string, err error) { + data, err := base64.StdEncoding.DecodeString(id) + if err != nil { + return nil, "", fmt.Errorf("cannot base64-decode caveat id: %v", err) + } + var tpid caveatId + if err := json.Unmarshal(data, &tpid); err != nil { + return nil, "", fmt.Errorf("cannot unmarshal caveat id %q: %v", data, err) + } + var recordData []byte + + recordData, err = d.encryptedCaveatId(tpid) + if err != nil { + return nil, "", err + } + var record caveatIdRecord + if err := json.Unmarshal(recordData, &record); err != nil { + return nil, "", fmt.Errorf("cannot decode third party caveat record: %v", err) + } + return record.RootKey, record.Condition, nil +} + +func (d *boxDecoder) encryptedCaveatId(id caveatId) ([]byte, error) { + if d.key == nil { + return nil, fmt.Errorf("no public key for caveat id decryption") + } + if !bytes.Equal(d.key.Public.Key[:], id.ThirdPartyPublicKey.Key[:]) { + return nil, fmt.Errorf("public key mismatch") + } + var nonce [NonceLen]byte + if len(id.Nonce) != len(nonce) { + return nil, fmt.Errorf("bad nonce length") + } + copy(nonce[:], id.Nonce) + + sealed, err := base64.StdEncoding.DecodeString(id.Id) + if err != nil { + return nil, fmt.Errorf("cannot base64-decode encrypted caveat id: %v", err) + } + out, ok := box.Open(nil, sealed, &nonce, id.FirstPartyPublicKey.boxKey(), d.key.Private.boxKey()) + if !ok { + return nil, fmt.Errorf("decryption of public-key encrypted caveat id %#v failed", id) + } + return out, nil +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/discharge.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/discharge.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/discharge.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,76 @@ +package bakery + +import ( + "fmt" + + "gopkg.in/errgo.v1" + "gopkg.in/macaroon.v1" + + "gopkg.in/macaroon-bakery.v1/bakery/checkers" +) + +// DischargeAll gathers discharge macaroons for all the third party +// caveats in m (and any subsequent caveats required by those) using +// getDischarge to acquire each discharge macaroon. It returns a slice +// with m as the first element, followed by all the discharge macaroons. +// All the discharge macaroons will be bound to the primary macaroon. +func DischargeAll( + m *macaroon.Macaroon, + getDischarge func(firstPartyLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error), +) (macaroon.Slice, error) { + return DischargeAllWithKey(m, getDischarge, nil) +} + +// DischargeAllWithKey is like DischargeAll except that the localKey +// parameter may optionally hold the key of the client, in which case it +// will be used to discharge any third party caveats with the special +// location "local". In this case, the caveat itself must be "true". This +// can be used be a server to ask a client to prove ownership of the +// private key. +// +// When localKey is nil, DischargeAllWithKey is exactly the same as +// DischargeAll. +func DischargeAllWithKey( + m *macaroon.Macaroon, + getDischarge func(firstPartyLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error), + localKey *KeyPair, +) (macaroon.Slice, error) { + sig := m.Signature() + discharges := macaroon.Slice{m} + var need []macaroon.Caveat + addCaveats := func(m *macaroon.Macaroon) { + for _, cav := range m.Caveats() { + if cav.Location == "" { + continue + } + need = append(need, cav) + } + } + addCaveats(m) + firstPartyLocation := m.Location() + for len(need) > 0 { + cav := need[0] + need = need[1:] + var dm *macaroon.Macaroon + var err error + if localKey != nil && cav.Location == "local" { + dm, _, err = Discharge(localKey, localDischargeChecker, cav.Id) + } else { + dm, err = getDischarge(firstPartyLocation, cav) + } + if err != nil { + return nil, errgo.NoteMask(err, fmt.Sprintf("cannot get discharge from %q", cav.Location), errgo.Any) + } + dm.Bind(sig) + discharges = append(discharges, dm) + addCaveats(dm) + } + return discharges, nil +} + +var localDischargeChecker = ThirdPartyCheckerFunc(func(caveatId, caveat string) ([]checkers.Caveat, error) { + if caveat != "true" { + return nil, checkers.ErrCaveatNotRecognized + } + return nil, nil +}) === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/discharge_test.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/discharge_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/discharge_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,92 @@ +package bakery_test + +import ( + "fmt" + + gc "gopkg.in/check.v1" + "gopkg.in/macaroon.v1" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" +) + +type DischargeSuite struct{} + +var _ = gc.Suite(&DischargeSuite{}) + +func alwaysOK(string) error { + return nil +} + +func (*DischargeSuite) TestDischargeAllNoDischarges(c *gc.C) { + rootKey := []byte("root key") + m, err := macaroon.New(rootKey, "id0", "loc0") + c.Assert(err, gc.IsNil) + ms, err := bakery.DischargeAll(m, noDischarge(c)) + c.Assert(err, gc.IsNil) + c.Assert(ms, gc.HasLen, 1) + c.Assert(ms[0], gc.Equals, m) + + err = m.Verify(rootKey, alwaysOK, nil) + c.Assert(err, gc.IsNil) +} + +func (*DischargeSuite) TestDischargeAllManyDischarges(c *gc.C) { + rootKey := []byte("root key") + m0, err := macaroon.New(rootKey, "id0", "location0") + c.Assert(err, gc.IsNil) + totalRequired := 40 + id := 1 + addCaveats := func(m *macaroon.Macaroon) { + for i := 0; i < 2; i++ { + if totalRequired == 0 { + break + } + cid := fmt.Sprint("id", id) + err := m.AddThirdPartyCaveat([]byte("root key "+cid), cid, "somewhere") + c.Assert(err, gc.IsNil) + id++ + totalRequired-- + } + } + addCaveats(m0) + getDischarge := func(loc string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { + c.Assert(loc, gc.Equals, "location0") + m, err := macaroon.New([]byte("root key "+cav.Id), cav.Id, "") + c.Assert(err, gc.IsNil) + addCaveats(m) + return m, nil + } + ms, err := bakery.DischargeAll(m0, getDischarge) + c.Assert(err, gc.IsNil) + c.Assert(ms, gc.HasLen, 41) + + err = ms[0].Verify(rootKey, alwaysOK, ms[1:]) + c.Assert(err, gc.IsNil) +} + +func (*DischargeSuite) TestDischargeAllLocalDischarge(c *gc.C) { + svc, err := bakery.NewService(bakery.NewServiceParams{}) + c.Assert(err, gc.IsNil) + + clientKey, err := bakery.GenerateKey() + c.Assert(err, gc.IsNil) + + m, err := svc.NewMacaroon("", nil, []checkers.Caveat{ + bakery.LocalThirdPartyCaveat(&clientKey.Public), + }) + c.Assert(err, gc.IsNil) + + ms, err := bakery.DischargeAllWithKey(m, noDischarge(c), clientKey) + c.Assert(err, gc.IsNil) + + err = svc.Check(ms, checkers.New()) + c.Assert(err, gc.IsNil) +} + +func noDischarge(c *gc.C) func(string, macaroon.Caveat) (*macaroon.Macaroon, error) { + return func(string, macaroon.Caveat) (*macaroon.Macaroon, error) { + c.Errorf("getDischarge called unexpectedly") + return nil, fmt.Errorf("nothing") + } +} === added directory 'src/gopkg.in/macaroon-bakery.v1/bakery/example' === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/example/authservice.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/example/authservice.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/example/authservice.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,43 @@ +package main + +import ( + "net/http" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +// authService implements an authorization service, +// that can discharge third-party caveats added +// to other macaroons. +func authService(endpoint string, key *bakery.KeyPair) (http.Handler, error) { + svc, err := bakery.NewService(bakery.NewServiceParams{ + Location: endpoint, + Key: key, + Locator: bakery.NewPublicKeyRing(), + }) + if err != nil { + return nil, err + } + mux := http.NewServeMux() + httpbakery.AddDischargeHandler(mux, "/", svc, thirdPartyChecker) + return mux, nil +} + +// thirdPartyChecker is used to check third party caveats added by other +// services. The HTTP request is that of the client - it is attempting +// to gather a discharge macaroon. +// +// Note how this function can return additional first- and third-party +// caveats which will be added to the original macaroon's caveats. +func thirdPartyChecker(req *http.Request, cavId, condition string) ([]checkers.Caveat, error) { + if condition != "access-allowed" { + return nil, checkers.ErrCaveatNotRecognized + } + // TODO check that the HTTP request has cookies that prove + // something about the client. + return []checkers.Caveat{ + httpbakery.SameClientIPAddrCaveat(req), + }, nil +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/example/client.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/example/client.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/example/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,37 @@ +package main + +import ( + "fmt" + "io/ioutil" + "net/http" + + "gopkg.in/errgo.v1" + + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +// client represents a client of the target service. +// In this simple example, it just tries a GET +// request, which will fail unless the client +// has the required authorization. +func clientRequest(client *httpbakery.Client, serverEndpoint string) (string, error) { + // The Do function implements the mechanics + // of actually gathering discharge macaroons + // when required, and retrying the request + // when necessary. + req, err := http.NewRequest("GET", serverEndpoint, nil) + if err != nil { + return "", errgo.Notef(err, "cannot make new HTTP request") + } + resp, err := client.Do(req) + if err != nil { + return "", errgo.NoteMask(err, "GET failed", errgo.Any) + } + defer resp.Body.Close() + // TODO(rog) unmarshal error + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("cannot read response: %v", err) + } + return string(data), nil +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/example/example_test.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/example/example_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/example/example_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,76 @@ +package main + +import ( + "fmt" + "net/http" + "net/url" + "testing" + + gc "gopkg.in/check.v1" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} + +type exampleSuite struct { + authEndpoint string + authPublicKey *bakery.PublicKey +} + +var _ = gc.Suite(&exampleSuite{}) + +func (s *exampleSuite) SetUpSuite(c *gc.C) { + key, err := bakery.GenerateKey() + c.Assert(err, gc.IsNil) + s.authPublicKey = &key.Public + s.authEndpoint, err = serve(func(endpoint string) (http.Handler, error) { + return authService(endpoint, key) + }) + c.Assert(err, gc.IsNil) +} + +func (s *exampleSuite) TestExample(c *gc.C) { + client := newClient() + serverEndpoint, err := serve(func(endpoint string) (http.Handler, error) { + return targetService(endpoint, s.authEndpoint, s.authPublicKey) + }) + c.Assert(err, gc.IsNil) + c.Logf("gold request") + resp, err := clientRequest(client, serverEndpoint+"/gold") + c.Assert(err, gc.IsNil) + c.Assert(resp, gc.Equals, "all is golden") + + c.Logf("silver request") + resp, err = clientRequest(client, serverEndpoint+"/silver") + c.Assert(err, gc.IsNil) + c.Assert(resp, gc.Equals, "every cloud has a silver lining") +} + +func (s *exampleSuite) BenchmarkExample(c *gc.C) { + client := newClient() + serverEndpoint, err := serve(func(endpoint string) (http.Handler, error) { + return targetService(endpoint, s.authEndpoint, s.authPublicKey) + }) + c.Assert(err, gc.IsNil) + c.ResetTimer() + for i := 0; i < c.N; i++ { + resp, err := clientRequest(client, serverEndpoint) + c.Assert(err, gc.IsNil) + c.Assert(resp, gc.Equals, "hello, world\n") + } +} + +func newClient() *httpbakery.Client { + return &httpbakery.Client{ + Client: httpbakery.NewHTTPClient(), + VisitWebPage: func(url *url.URL) error { + fmt.Printf("please visit this web page:\n") + fmt.Printf("\t%s\n", url) + return nil + }, + } +} === added directory 'src/gopkg.in/macaroon-bakery.v1/bakery/example/idservice' === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/example/idservice/idservice.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/example/idservice/idservice.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/example/idservice/idservice.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,463 @@ +package idservice + +import ( + "fmt" + "html/template" + "log" + "net/http" + + "github.com/juju/httprequest" + "github.com/julienschmidt/httprouter" + "gopkg.in/errgo.v1" + "gopkg.in/macaroon.v1" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/bakery/example/meeting" + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +var ( + handleJSON = httprequest.ErrorMapper(errorToResponse).HandleJSON +) + +const ( + cookieUser = "username" +) + +// handler implements http.Handler to serve the name space +// provided by the id service. +type handler struct { + svc *bakery.Service + place *place + users map[string]*UserInfo +} + +// UserInfo holds information about a user. +type UserInfo struct { + Password string + Groups map[string]bool +} + +// Params holds parameters for New. +type Params struct { + Service bakery.NewServiceParams + Users map[string]*UserInfo +} + +// New returns a new handler that services an identity-providing +// service. This acts as a login service and can discharge third-party caveats +// for users. +func New(p Params) (http.Handler, error) { + svc, err := bakery.NewService(p.Service) + if err != nil { + return nil, err + } + h := &handler{ + svc: svc, + users: p.Users, + place: &place{meeting.New()}, + } + mux := http.NewServeMux() + httpbakery.AddDischargeHandler(mux, "/", svc, h.checkThirdPartyCaveat) + mux.Handle("/user/", mkHandler(handleJSON(h.userHandler))) + mux.HandleFunc("/login", h.loginHandler) + mux.Handle("/question", mkHandler(handleJSON(h.questionHandler))) + mux.Handle("/wait", mkHandler(handleJSON(h.waitHandler))) + mux.HandleFunc("/loginattempt", h.loginAttemptHandler) + return mux, nil +} + +// userHandler handles requests to add new users, change user details, etc. +// It is only accessible to users that are members of the admin group. +func (h *handler) userHandler(p httprequest.Params) (interface{}, error) { + ctxt := h.newContext(p.Request, "change-user") + if _, err := httpbakery.CheckRequest(h.svc, p.Request, nil, ctxt); err != nil { + // TODO do this only if the error cause is *bakery.VerificationError + // We issue a macaroon with a third-party caveat targetting + // the id service itself. This means that the flow for self-created + // macaroons is just the same as for any other service. + // Theoretically, we could just redirect the user to the + // login page, but that would p.Requestuire a different flow + // and it's not clear that it would be an advantage. + m, err := h.svc.NewMacaroon("", nil, []checkers.Caveat{{ + Location: h.svc.Location(), + Condition: "member-of-group admin", + }, { + Condition: "operation change-user", + }}) + if err != nil { + return nil, errgo.Notef(err, "cannot mint new macaroon") + } + return nil, &httpbakery.Error{ + Message: err.Error(), + Code: httpbakery.ErrDischargeRequired, + Info: &httpbakery.ErrorInfo{ + Macaroon: m, + }, + } + } + // PUT /user/$user - create new user + // PUT /user/$user/group-membership - change group membership of user + return nil, errgo.New("not implemented yet") +} + +type loginPageParams struct { + WaitId string +} + +var loginPage = template.Must(template.New("").Parse(` + + + +User name: +

+Password: +Log in + +

+ + +`)) + +// loginHandler serves up a login page for the user to interact with, +// having been redirected there as part of a macaroon discharge requirement. +// This is a proxy for any third-party authorization service. +func (h *handler) loginHandler(w http.ResponseWriter, req *http.Request) { + req.ParseForm() + waitId := req.Form.Get("waitid") + if waitId == "" { + http.Error(w, "wait id not found in form", http.StatusBadRequest) + return + } + err := loginPage.Execute(w, loginPageParams{ + WaitId: waitId, + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +// loginAttemptHandler is invoked when a user clicks on the "Log in" +// button on the login page. It checks the credentials and then +// completes the rendezvous, allowing the original wait +// request to complete. +func (h *handler) loginAttemptHandler(w http.ResponseWriter, req *http.Request) { + log.Printf("login attempt %s", req.URL) + req.ParseForm() + waitId := req.Form.Get("waitid") + if waitId == "" { + http.Error(w, "wait id not found in form", http.StatusBadRequest) + return + } + user := req.Form.Get("user") + info, ok := h.users[user] + if !ok { + http.Error(w, fmt.Sprintf("user %q not found", user), http.StatusUnauthorized) + return + } + if req.Form.Get("password") != info.Password { + http.Error(w, "bad password", http.StatusUnauthorized) + return + } + + // User and password match; we can allow the user + // to have a macaroon that they can use later to prove + // to us that they have logged in. We also add a cookie + // to hold the logged in user name. + m, err := h.svc.NewMacaroon("", nil, []checkers.Caveat{{ + Condition: "user-is " + user, + }}) + // TODO(rog) when this fails, we should complete the rendezvous + // to cause the wait request to complete with an appropriate error. + if err != nil { + http.Error(w, "cannot mint macaroon: "+err.Error(), http.StatusInternalServerError) + return + } + cookie, err := httpbakery.NewCookie(macaroon.Slice{m}) + if err != nil { + http.Error(w, "cannot make cookie: "+err.Error(), http.StatusInternalServerError) + return + } + http.SetCookie(w, cookie) + http.SetCookie(w, &http.Cookie{ + Path: "/", + Name: cookieUser, + Value: user, + }) + h.place.Done(waitId, &loginInfo{ + User: user, + }) +} + +// checkThirdPartyCaveat is called by the httpbakery discharge handler. +func (h *handler) checkThirdPartyCaveat(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { + return h.newContext(req, "").CheckThirdPartyCaveat(cavId, cav) +} + +// newContext returns a new caveat-checking context +// for the client making the given request. +func (h *handler) newContext(req *http.Request, operation string) *context { + // Determine the current logged-in user, if any. + var username string + for _, c := range req.Cookies() { + if c.Name == cookieUser { + // TODO could potentially allow several concurrent + // logins - caveats asking about current user privilege + // could be satisfied if any of the user names had that + // privilege. + username = c.Value + break + } + } + if username == "" { + log.Printf("not logged in") + } else { + log.Printf("logged in as %q", username) + } + return &context{ + handler: h, + req: req, + svc: h.svc, + declaredUser: username, + operation: operation, + } +} + +// needLogin returns an error suitable for returning +// from a discharge request that can only be satisfied +// if the user logs in. +func (h *handler) needLogin(cavId string, caveat string, why error, req *http.Request) error { + // TODO(rog) If the user is already logged in (username != ""), + // we should perhaps just return an error here. + log.Printf("login required") + waitId, err := h.place.NewRendezvous(&thirdPartyCaveatInfo{ + CaveatId: cavId, + Caveat: caveat, + }) + if err != nil { + return fmt.Errorf("cannot make rendezvous: %v", err) + } + log.Printf("returning redirect error") + visitURL := "/login?waitid=" + waitId + waitURL := "/wait?waitid=" + waitId + return httpbakery.NewInteractionRequiredError(visitURL, waitURL, why, req) +} + +// waitHandler serves an HTTP endpoint that waits until a macaroon +// has been discharged, and returns the discharge macaroon. +func (h *handler) waitHandler(p httprequest.Params) (interface{}, error) { + p.Request.ParseForm() + waitId := p.Request.Form.Get("waitid") + if waitId == "" { + return nil, fmt.Errorf("wait id parameter not found") + } + caveat, login, err := h.place.Wait(waitId) + if err != nil { + return nil, fmt.Errorf("cannot wait: %v", err) + } + if login.User == "" { + return nil, fmt.Errorf("login failed") + } + // Create a context to verify the third party caveat. + // Note that because the information in login has been + // supplied directly by our own code, we can assume + // that it can be trusted, so we set verifiedUser to true. + ctxt := &context{ + handler: h, + req: p.Request, + svc: h.svc, + declaredUser: login.User, + verifiedUser: true, + } + // Now that we've verified the user, we can check again to see + // if we can discharge the original caveat. + macaroon, err := h.svc.Discharge(ctxt, caveat.CaveatId) + if err != nil { + return nil, errgo.Mask(err) + } + return WaitResponse{ + Macaroon: macaroon, + }, nil +} + +func (h *handler) questionHandler(_ httprequest.Params) (interface{}, error) { + return nil, errgo.New("question unimplemented") + // TODO + // req.ParseForm() + // + // macStr := req.Form.Get("macaroons") + // if macStr == "" { + // return nil, fmt.Errorf("macaroon parameter not found") + // } + // var macaroons []*macaroon.Macaroon + // err := json.Unmarshal([]byte(macStr), &macaroons) + // if err != nil { + // return nil, fmt.Errorf("cannot unmarshal macaroon: %v", err) + // } + // if len(macaroons) == 0 { + // return nil, fmt.Errorf("no macaroons found") + // } + // q := req.Form.Get("q") + // if q == "" { + // return nil, fmt.Errorf("q parameter not found") + // } + // user := req.Form.Get("user") + // if user == "" { + // return nil, fmt.Errorf("user parameter not found") + // } + // ctxt := &context{ + // declaredUser: user, + // operation: "question " + q, + // } + // breq := h.svc.NewRequest(req, ctxt) + // for _, m := range macaroons { + // breq.AddClientMacaroon(m) + // } + // err := breq.Check() + // return nil, err +} + +// WaitResponse holds the response from the wait endpoint. +type WaitResponse struct { + Macaroon *macaroon.Macaroon +} + +// context represents the context in which a caveat +// will be checked. +type context struct { + // handler refers to the idservice handler. + handler *handler + + // declaredUser holds the user name that we want to use for + // checking authorization caveats. + declaredUser string + + // verifiedUser is true when the declared user has been verified + // directly (by the user login) + verifiedUser bool + + // operation holds the current operation, if any. + operation string + + svc *bakery.Service + + // req holds the current client's HTTP request. + req *http.Request +} + +func (ctxt *context) Condition() string { + return "" +} + +func (ctxt *context) Check(cond, arg string) error { + switch cond { + case "user-is": + if arg != ctxt.declaredUser { + return fmt.Errorf("not logged in as %q", arg) + } + return nil + case "operation": + if ctxt.operation != "" && arg == ctxt.operation { + return nil + } + return errgo.Newf("operation mismatch") + default: + return checkers.ErrCaveatNotRecognized + } +} + +func (ctxt *context) CheckThirdPartyCaveat(cavId, cav string) ([]checkers.Caveat, error) { + h := ctxt.handler + log.Printf("checking third party caveat %q", cav) + op, rest, err := checkers.ParseCaveat(cav) + if err != nil { + return nil, fmt.Errorf("cannot parse caveat %q: %v", cav, err) + } + switch op { + case "can-speak-for": + // TODO(rog) We ignore the currently logged in user here, + // but perhaps it would be better to let the user be in control + // of which user they're currently "declared" as, rather than + // getting privileges of users we currently have macaroons for. + checkErr := ctxt.canSpeakFor(rest) + if checkErr == nil { + return ctxt.firstPartyCaveats(), nil + } + return nil, h.needLogin(cavId, cav, checkErr, ctxt.req) + case "member-of-group": + // The third-party caveat is asking if the currently logged in + // user is a member of a particular group. + // We can find the currently logged in user by checking + // the username cookie (which doesn't provide any power, but + // indicates which user name to check) + if ctxt.declaredUser == "" { + return nil, h.needLogin(cavId, cav, errgo.New("not logged in"), ctxt.req) + } + if err := ctxt.canSpeakFor(ctxt.declaredUser); err != nil { + return nil, errgo.Notef(err, "cannot speak for declared user %q", ctxt.declaredUser) + } + info, ok := h.users[ctxt.declaredUser] + if !ok { + return nil, errgo.Newf("user %q not found", ctxt.declaredUser) + } + group := rest + if !info.Groups[group] { + return nil, errgo.Newf("not privileged enough") + } + return ctxt.firstPartyCaveats(), nil + default: + return nil, checkers.ErrCaveatNotRecognized + } +} + +// canSpeakFor checks whether the client sending +// the given request can speak for the given user. +// We do that by declaring that user and checking +// whether the supplied macaroons in the request +// verify OK. +func (ctxt *context) canSpeakFor(user string) error { + if user == ctxt.declaredUser && ctxt.verifiedUser { + // The context is a direct result of logging in. + // No need to check macaroons. + return nil + } + ctxt1 := *ctxt + ctxt1.declaredUser = user + _, err := httpbakery.CheckRequest(ctxt.svc, ctxt.req, nil, &ctxt1) + if err != nil { + log.Printf("client cannot speak for %q: %v", user, err) + } else { + log.Printf("client can speak for %q", user) + } + return err +} + +// firstPartyCaveats returns first-party caveats suitable +// for adding to a third-party caveat discharge macaroon +// within the receiving context. +func (ctxt *context) firstPartyCaveats() []checkers.Caveat { + // TODO return caveat specifying that ip-addr is + // the same as that given in ctxt.req.RemoteAddr + // and other 1st party caveats, potentially. + return nil +} + +func errorToResponse(err error) (int, interface{}) { + cause := errgo.Cause(err) + if cause, ok := cause.(*httpbakery.Error); ok { + err1 := *cause + err1.Message = err.Error() + return http.StatusInternalServerError, &err1 + } + return http.StatusInternalServerError, &httpbakery.Error{ + Message: err.Error(), + } +} + +func mkHandler(h httprouter.Handle) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + h(w, req, nil) + }) +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/example/idservice/idservice_test.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/example/idservice/idservice_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/example/idservice/idservice_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,192 @@ +package idservice_test + +import ( + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "regexp" + "time" + + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/example/idservice" + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +type suite struct { + authEndpoint string + authPublicKey *bakery.PublicKey + client *httpbakery.Client +} + +var _ = gc.Suite(&suite{}) + +func (s *suite) SetUpSuite(c *gc.C) { + key, err := bakery.GenerateKey() + c.Assert(err, gc.IsNil) + s.authPublicKey = &key.Public + s.authEndpoint = serve(c, func(endpoint string) (http.Handler, error) { + return idservice.New(idservice.Params{ + Users: map[string]*idservice.UserInfo{ + "rog": { + Password: "password", + }, + "root": { + Password: "superman", + Groups: map[string]bool{ + "target-service-users": true, + }, + }, + }, + Service: bakery.NewServiceParams{ + Location: endpoint, + Store: bakery.NewMemStorage(), + Key: key, + Locator: bakery.NewPublicKeyRing(), + }, + }) + }) + c.Logf("auth endpoint at %s", s.authEndpoint) +} + +func (s *suite) SetUpTest(c *gc.C) { + s.client = httpbakery.NewClient() +} + +func (s *suite) TestIdService(c *gc.C) { + serverEndpoint := serve(c, func(endpoint string) (http.Handler, error) { + return targetService(endpoint, s.authEndpoint, s.authPublicKey) + }) + c.Logf("target service endpoint at %s", serverEndpoint) + visitDone := make(chan struct{}) + s.client.VisitWebPage = func(u *url.URL) error { + go func() { + err := s.scrapeLoginPage(u) + c.Logf("scrape returned %v", err) + c.Check(err, gc.IsNil) + visitDone <- struct{}{} + }() + return nil + } + resp, err := s.clientRequest(serverEndpoint + "/gold") + c.Assert(err, gc.IsNil) + c.Assert(resp, gc.Equals, "all is golden") + select { + case <-visitDone: + case <-time.After(5 * time.Second): + c.Fatalf("visit never done") + } + + // Try again. We shouldn't need to interact this time. + s.client.VisitWebPage = nil + resp, err = s.clientRequest(serverEndpoint + "/silver") + c.Assert(err, gc.IsNil) + c.Assert(resp, gc.Equals, "every cloud has a silver lining") +} + +func serve(c *gc.C, newHandler func(string) (http.Handler, error)) (endpointURL string) { + listener, err := net.Listen("tcp", "localhost:0") + c.Assert(err, gc.IsNil) + + endpointURL = "http://" + listener.Addr().String() + handler, err := newHandler(endpointURL) + c.Assert(err, gc.IsNil) + + go http.Serve(listener, handler) + return endpointURL +} + +// client represents a client of the target service. In this simple +// example, it just tries a GET request, which will fail unless the +// client has the required authorization. +func (s *suite) clientRequest(serverEndpoint string) (string, error) { + req, err := http.NewRequest("GET", serverEndpoint, nil) + if err != nil { + return "", errgo.Notef(err, "cannot make new HTTP request") + } + + // The Do function implements the mechanics + // of actually gathering discharge macaroons + // when required, and retrying the request + // when necessary. + resp, err := s.client.Do(req) + if err != nil { + return "", errgo.NoteMask(err, "GET failed", errgo.Any) + } + defer resp.Body.Close() + // TODO(rog) unmarshal error + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("cannot read response: %v", err) + } + return string(data), nil +} + +// Patterns to search for the relevant information in the login page. +// Alternatives to this might be (in likely ascending order of complexity): +// - use the template itself as the pattern. +// - parse the html with encoding/xml +// - parse the html with code.google.com/p/go.net/html +var ( + actionPat = regexp.MustCompile(`
n { + found = &k.key + n = len(k.url.Path) + } + } + if found == nil { + return nil, ErrNotFound + } + return found, nil +} + +func (r *publicKeyRecord) match(url *url.URL) bool { + if url == nil { + return false + } + if url.Host != r.url.Host { + return false + } + if !r.prefix { + return url.Path == r.url.Path + } + pattern := r.url.Path + n := len(pattern) + if pattern[n-1] != '/' { + return pattern == url.Path + } + return len(url.Path) >= n && url.Path[0:n] == pattern +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/keys_test.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/keys_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/keys_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,244 @@ +package bakery_test + +import ( + "encoding/base64" + "encoding/json" + + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + + "gopkg.in/macaroon-bakery.v1/bakery" +) + +type KeysSuite struct{} + +var _ = gc.Suite(&KeysSuite{}) + +var testKey = newTestKey(0) + +func (*KeysSuite) TestMarshalBinary(c *gc.C) { + data, err := testKey.MarshalBinary() + c.Assert(err, gc.IsNil) + c.Assert(data, jc.DeepEquals, []byte(testKey[:])) + + var key1 bakery.Key + err = key1.UnmarshalBinary(data) + c.Assert(err, gc.IsNil) + c.Assert(key1, gc.DeepEquals, testKey) +} + +func (*KeysSuite) TestMarshalText(c *gc.C) { + data, err := testKey.MarshalText() + c.Assert(err, gc.IsNil) + c.Assert(string(data), gc.Equals, base64.StdEncoding.EncodeToString([]byte(testKey[:]))) + + var key1 bakery.Key + err = key1.UnmarshalText(data) + c.Assert(err, gc.IsNil) + c.Assert(key1, gc.Equals, testKey) +} + +func (*KeysSuite) TestKeyPairMarshalJSON(c *gc.C) { + kp := bakery.KeyPair{ + Public: bakery.PublicKey{testKey}, + Private: bakery.PrivateKey{testKey}, + } + kp.Private.Key[0] = 99 + data, err := json.Marshal(kp) + c.Assert(err, gc.IsNil) + var x interface{} + err = json.Unmarshal(data, &x) + c.Assert(err, gc.IsNil) + + // Check that the fields have marshaled as strings. + c.Assert(x.(map[string]interface{})["private"], gc.FitsTypeOf, "") + c.Assert(x.(map[string]interface{})["public"], gc.FitsTypeOf, "") + + var kp1 bakery.KeyPair + err = json.Unmarshal(data, &kp1) + c.Assert(err, gc.IsNil) + c.Assert(kp1, jc.DeepEquals, kp) +} + +func newTestKey(n byte) bakery.Key { + var k bakery.Key + for i := range k { + k[i] = n + byte(i) + } + return k +} + +type addPublicKeyArgs struct { + loc string + prefix bool + key bakery.Key +} + +var publicKeyRingTests = []struct { + about string + add []addPublicKeyArgs + loc string + expectKey bakery.Key + expectNotFound bool +}{{ + about: "empty keyring", + add: []addPublicKeyArgs{}, + loc: "something", + expectNotFound: true, +}, { + about: "single non-prefix key", + add: []addPublicKeyArgs{{ + loc: "http://foo.com/x", + key: testKey, + }}, + loc: "http://foo.com/x", + expectKey: testKey, +}, { + about: "single prefix key", + add: []addPublicKeyArgs{{ + loc: "http://foo.com/x", + key: testKey, + prefix: true, + }}, + loc: "http://foo.com/x", + expectKey: testKey, +}, { + about: "pattern longer than url", + add: []addPublicKeyArgs{{ + loc: "http://foo.com/x", + key: testKey, + prefix: true, + }}, + loc: "http://foo.com/", + expectNotFound: true, +}, { + about: "pattern not ending in /", + add: []addPublicKeyArgs{{ + loc: "http://foo.com/x", + key: testKey, + prefix: true, + }}, + loc: "http://foo.com/x/y", + expectNotFound: true, +}, { + about: "mismatched host", + add: []addPublicKeyArgs{{ + loc: "http://foo.com/x", + key: testKey, + prefix: true, + }}, + loc: "http://bar.com/x/y", + expectNotFound: true, +}, { + about: "http vs https", + add: []addPublicKeyArgs{{ + loc: "http://foo.com/x", + key: testKey, + }}, + loc: "https://foo.com/x", + expectKey: testKey, +}, { + about: "naked pattern url with prefix", + add: []addPublicKeyArgs{{ + loc: "http://foo.com", + key: testKey, + prefix: true, + }}, + loc: "http://foo.com/arble", + expectKey: testKey, +}, { + about: "naked pattern url with prefix with naked match url", + add: []addPublicKeyArgs{{ + loc: "http://foo.com", + key: testKey, + prefix: true, + }}, + loc: "http://foo.com", + expectKey: testKey, +}, { + about: "naked pattern url, no prefix", + add: []addPublicKeyArgs{{ + loc: "http://foo.com", + key: testKey, + }}, + loc: "http://foo.com", + expectKey: testKey, +}, { + about: "naked pattern url, no prefix, match with no slash", + add: []addPublicKeyArgs{{ + loc: "http://foo.com", + key: testKey, + }}, + loc: "http://foo.com/", + expectKey: testKey, +}, { + about: "port mismatch", + add: []addPublicKeyArgs{{ + loc: "http://foo.com:8080/x", + key: testKey, + }}, + loc: "https://foo.com/x", + expectNotFound: true, +}, { + about: "url longer than pattern", + add: []addPublicKeyArgs{{ + loc: "http://foo.com/x/", + key: testKey, + prefix: true, + }}, + loc: "https://foo.com/x/y/z", + expectKey: testKey, +}, { + about: "longer match preferred", + add: []addPublicKeyArgs{{ + loc: "http://foo.com/x/", + key: newTestKey(0), + prefix: true, + }, { + loc: "http://foo.com/x/y/", + key: newTestKey(1), + prefix: true, + }}, + loc: "https://foo.com/x/y/z", + expectKey: newTestKey(1), +}, { + about: "longer match preferred, with other matches", + add: []addPublicKeyArgs{{ + loc: "http://foo.com/foo/arble", + key: newTestKey(0), + prefix: true, + }, { + loc: "http://foo.com/foo/arble/blah/", + key: newTestKey(1), + prefix: true, + }, { + loc: "http://foo.com/foo/", + key: newTestKey(2), + prefix: true, + }, { + loc: "http://foo.com/foobieblahbletcharbl", + key: newTestKey(3), + prefix: true, + }}, + loc: "https://foo.com/foo/arble/blah/x", + expectKey: newTestKey(1), +}} + +func (*KeysSuite) TestPublicKeyRing(c *gc.C) { + for i, test := range publicKeyRingTests { + c.Logf("test %d: %s", i, test.about) + kr := bakery.NewPublicKeyRing() + for _, add := range test.add { + err := kr.AddPublicKeyForLocation(add.loc, add.prefix, &bakery.PublicKey{add.key}) + c.Assert(err, gc.IsNil) + } + key, err := kr.PublicKeyForLocation(test.loc) + if test.expectNotFound { + c.Assert(err, gc.Equals, bakery.ErrNotFound) + c.Assert(key, gc.IsNil) + continue + } + c.Assert(err, gc.IsNil) + c.Assert(*key, gc.Equals, bakery.PublicKey{test.expectKey}) + } +} === added directory 'src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage' === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/package_test.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +package mgostorage_test + +import ( + "testing" + + jujutesting "github.com/juju/testing" +) + +func TestPackage(t *testing.T) { + jujutesting.MgoTestPackage(t, nil) +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/storage.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/storage.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/storage.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,78 @@ +// Package mgostorage provides an implementation of the +// bakery Storage interface that uses MongoDB to store +// items. +package mgostorage + +import ( + "gopkg.in/errgo.v1" + "gopkg.in/mgo.v2" + + "gopkg.in/macaroon-bakery.v1/bakery" +) + +// New returns an implementation of Storage +// that stores all items in MongoDB. +// It never returns an error (the error return +// is for backward compatibility with a previous +// version that could return an error). +// +// Note that the caller is responsible for closing +// the mgo session associated with the collection. +func New(c *mgo.Collection) (bakery.Storage, error) { + return mgoStorage{ + col: c, + }, nil +} + +type mgoStorage struct { + col *mgo.Collection +} + +type storageDoc struct { + Location string `bson:"_id"` + Item string `bson:"item"` + + // OldLocation is set for backward compatibility reasons - the + // original version of the code used "loc" as a unique index + // so we need to maintain the uniqueness otherwise + // inserts will fail. + // TODO remove this when moving to bakery.v2. + OldLocation string `bson:"loc"` +} + +// Put implements bakery.Storage.Put. +func (s mgoStorage) Put(location, item string) error { + i := storageDoc{ + Location: location, + OldLocation: location, + Item: item, + } + _, err := s.col.UpsertId(location, i) + if err != nil { + return errgo.Notef(err, "cannot store item for location %q", location) + } + return nil +} + +// Get implements bakery.Storage.Get. +func (s mgoStorage) Get(location string) (string, error) { + var i storageDoc + err := s.col.FindId(location).One(&i) + if err != nil { + if err == mgo.ErrNotFound { + return "", bakery.ErrNotFound + } + return "", errgo.Notef(err, "cannot get %q", location) + } + + return i.Item, nil +} + +// Del implements bakery.Storage.Del. +func (s mgoStorage) Del(location string) error { + err := s.col.RemoveId(location) + if err != nil { + return errgo.Notef(err, "cannot remove %q", location) + } + return nil +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/storage_test.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/storage_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/mgostorage/storage_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,132 @@ +package mgostorage_test + +import ( + "errors" + "fmt" + + "github.com/juju/testing" + gc "gopkg.in/check.v1" + "gopkg.in/macaroon.v1" + "gopkg.in/mgo.v2" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/bakery/mgostorage" +) + +type StorageSuite struct { + testing.MgoSuite + session *mgo.Session + store bakery.Storage +} + +var _ = gc.Suite(&StorageSuite{}) + +func (s *StorageSuite) SetUpTest(c *gc.C) { + s.MgoSuite.SetUpTest(c) + s.session = testing.MgoServer.MustDial() + + store, err := mgostorage.New(s.session.DB("test").C("items")) + c.Assert(err, gc.IsNil) + + s.store = store +} + +func (s *StorageSuite) TearDownTest(c *gc.C) { + s.session.Close() + s.MgoSuite.TearDownTest(c) +} + +func (s *StorageSuite) TestMgoStorage(c *gc.C) { + err := s.store.Put("foo", "bar") + c.Assert(err, gc.IsNil) + item, err := s.store.Get("foo") + c.Assert(err, gc.IsNil) + c.Assert(item, gc.Equals, "bar") + + err = s.store.Put("bletch", "blat") + c.Assert(err, gc.IsNil) + item, err = s.store.Get("bletch") + c.Assert(err, gc.IsNil) + c.Assert(item, gc.Equals, "blat") + + item, err = s.store.Get("nothing") + c.Assert(err, gc.Equals, bakery.ErrNotFound) + c.Assert(item, gc.Equals, "") + + err = s.store.Del("bletch") + c.Assert(err, gc.IsNil) + + item, err = s.store.Get("bletch") + c.Assert(err, gc.Equals, bakery.ErrNotFound) + c.Assert(item, gc.Equals, "") +} + +func (s *StorageSuite) TestMgoStorageUpsert(c *gc.C) { + err := s.store.Put("foo", "bar") + c.Assert(err, gc.IsNil) + item, err := s.store.Get("foo") + c.Assert(err, gc.IsNil) + c.Assert(item, gc.Equals, "bar") + + err = s.store.Put("foo", "ba-ba") + c.Assert(err, gc.IsNil) + item, err = s.store.Get("foo") + c.Assert(err, gc.IsNil) + c.Assert(item, gc.Equals, "ba-ba") + +} + +func (s *StorageSuite) TestConcurrentMgoStorage(c *gc.C) { + done := make(chan struct{}) + for i := 0; i < 3; i++ { + i := i + go func() { + k := fmt.Sprint(i) + err := s.store.Put(k, k) + c.Check(err, gc.IsNil) + v, err := s.store.Get(k) + c.Check(v, gc.Equals, k) + err = s.store.Del(k) + c.Check(err, gc.IsNil) + done <- struct{}{} + }() + } + for i := 0; i < 3; i++ { + <-done + } +} + +type testChecker struct{} + +func (tc *testChecker) CheckFirstPartyCaveat(caveat string) error { + if caveat != "is-authorised bob" { + return errors.New("not bob") + } + return nil +} + +func (s *StorageSuite) TestCreateMacaroon(c *gc.C) { + keypair, err := bakery.GenerateKey() + c.Assert(err, gc.IsNil) + + params := bakery.NewServiceParams{Location: "local", Store: s.store, Key: keypair} + service, err := bakery.NewService(params) + c.Assert(err, gc.IsNil) + c.Assert(service, gc.NotNil) + + m, err := service.NewMacaroon( + "123", + []byte("abc"), + []checkers.Caveat{checkers.Caveat{Location: "", Condition: "is-authorised bob"}}, + ) + c.Assert(err, gc.IsNil) + c.Assert(m, gc.NotNil) + + item, err := s.store.Get("123") + c.Assert(err, gc.IsNil) + c.Assert(item, gc.DeepEquals, `{"RootKey":"YWJj"}`) + + err = service.Check(macaroon.Slice{m}, &testChecker{}) + c.Assert(err, gc.IsNil) +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/package_test.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +package bakery_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/service.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/service.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/service.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,439 @@ +// The bakery package layers on top of the macaroon package, providing +// a transport and storage-agnostic way of using macaroons to assert +// client capabilities. +// +package bakery + +import ( + "crypto/rand" + "fmt" + "strings" + + "github.com/juju/loggo" + "gopkg.in/errgo.v1" + "gopkg.in/macaroon.v1" + + "gopkg.in/macaroon-bakery.v1/bakery/checkers" +) + +var logger = loggo.GetLogger("bakery") + +// Service represents a service which can use macaroons +// to check authorization. +type Service struct { + location string + store storage + checker FirstPartyChecker + encoder *boxEncoder + key *KeyPair + locator PublicKeyLocator +} + +// NewServiceParams holds the parameters for a NewService call. +type NewServiceParams struct { + // Location will be set as the location of any macaroons + // minted by the service. + Location string + + // Store will be used to store macaroon + // information locally. If it is nil, + // an in-memory storage will be used. + Store Storage + + // Key is the public key pair used by the service for + // third-party caveat encryption. + // It may be nil, in which case a new key pair + // will be generated. + Key *KeyPair + + // Locator provides public keys for third-party services by location when + // adding a third-party caveat. + // It may be nil, in which case, no third-party caveats can be created. + Locator PublicKeyLocator +} + +// NewService returns a new service that can mint new +// macaroons and store their associated root keys. +func NewService(p NewServiceParams) (*Service, error) { + if p.Store == nil { + p.Store = NewMemStorage() + } + svc := &Service{ + location: p.Location, + store: storage{p.Store}, + locator: p.Locator, + } + + var err error + if p.Key == nil { + p.Key, err = GenerateKey() + if err != nil { + return nil, err + } + } + if svc.locator == nil { + svc.locator = PublicKeyLocatorMap(nil) + } + svc.key = p.Key + svc.encoder = newBoxEncoder(p.Key) + return svc, nil +} + +// Store returns the store used by the service. +func (svc *Service) Store() Storage { + return svc.store.store +} + +// Location returns the service's configured macaroon location. +func (svc *Service) Location() string { + return svc.location +} + +// PublicKey returns the service's public key. +func (svc *Service) PublicKey() *PublicKey { + return &svc.key.Public +} + +// Check checks that the given macaroons verify +// correctly using the provided checker to check +// first party caveats. The primary macaroon is in ms[0]; the discharges +// fill the rest of the slice. +// +// If there is a verification error, it returns a VerificationError that +// describes the error (other errors might be returned in other +// circumstances). +func (svc *Service) Check(ms macaroon.Slice, checker FirstPartyChecker) error { + if len(ms) == 0 { + return &VerificationError{ + Reason: fmt.Errorf("no macaroons in slice"), + } + } + item, err := svc.store.Get(ms[0].Id()) + if err != nil { + if errgo.Cause(err) == ErrNotFound { + // If the macaroon was not found, it is probably + // because it's been removed after time-expiry, + // so return a verification error. + return &VerificationError{ + Reason: errgo.New("macaroon not found in storage"), + } + } + return errgo.Notef(err, "cannot get macaroon") + } + err = ms[0].Verify(item.RootKey, checker.CheckFirstPartyCaveat, ms[1:]) + if err != nil { + return &VerificationError{ + Reason: err, + } + } + return nil +} + +// CheckAnyM is like CheckAny except that on success it also returns +// the set of macaroons that was successfully checked. +// The "M" suffix is for backward compatibility reasons - in a +// later bakery version, the signature of CheckRequest will be +// changed to return the macaroon slice and CheckAnyM will be +// removed. +func (svc *Service) CheckAnyM(mss []macaroon.Slice, assert map[string]string, checker checkers.Checker) (map[string]string, macaroon.Slice, error) { + if len(mss) == 0 { + return nil, nil, &VerificationError{ + Reason: errgo.Newf("no macaroons"), + } + } + // TODO perhaps return a slice of attribute maps, one + // for each successfully validated macaroon slice? + var err error + for _, ms := range mss { + declared := checkers.InferDeclared(ms) + for key, val := range assert { + declared[key] = val + } + err = svc.Check(ms, checkers.New(declared, checker)) + if err == nil { + return declared, ms, nil + } + } + // Return an arbitrary error from the macaroons provided. + // TODO return all errors. + return nil, nil, errgo.Mask(err, isVerificationError) +} + +// CheckAny checks that the given slice of slices contains at least +// one macaroon minted by the given service, using checker to check +// any first party caveats. It returns an error with a +// *bakery.VerificationError cause if the macaroon verification failed. +// +// The assert map holds any required attributes of "declared" attributes, +// overriding any inferences made from the macaroons themselves. +// It has a similar effect to adding a checkers.DeclaredCaveat +// for each key and value, but the error message will be more +// useful. +// +// It adds all the standard caveat checkers to the given checker. +// +// It returns any attributes declared in the successfully validated request. +func (svc *Service) CheckAny(mss []macaroon.Slice, assert map[string]string, checker checkers.Checker) (map[string]string, error) { + attrs, _, err := svc.CheckAnyM(mss, assert, checker) + return attrs, err +} + +func isVerificationError(err error) bool { + _, ok := err.(*VerificationError) + return ok +} + +// NewMacaroon mints a new macaroon with the given id and caveats. +// If the id is empty, a random id will be used. +// If rootKey is nil, a random root key will be used. +// The macaroon will be stored in the service's storage. +// TODO swap the first two arguments so that they're +// in the same order as macaroon.New. +func (svc *Service) NewMacaroon(id string, rootKey []byte, caveats []checkers.Caveat) (*macaroon.Macaroon, error) { + if rootKey == nil { + newRootKey, err := randomBytes(24) + if err != nil { + return nil, fmt.Errorf("cannot generate root key for new macaroon: %v", err) + } + rootKey = newRootKey + } + if id == "" { + idBytes, err := randomBytes(24) + if err != nil { + return nil, fmt.Errorf("cannot generate id for new macaroon: %v", err) + } + id = fmt.Sprintf("%x", idBytes) + } + m, err := macaroon.New(rootKey, id, svc.location) + if err != nil { + return nil, fmt.Errorf("cannot bake macaroon: %v", err) + } + for _, cav := range caveats { + if err := svc.AddCaveat(m, cav); err != nil { + return nil, errgo.Notef(err, "cannot add caveat") + } + } + // TODO look at the caveats for expiry time and associate + // that with the storage item so that the storage can + // garbage collect it at an appropriate time. + if err := svc.store.Put(m.Id(), &storageItem{ + RootKey: rootKey, + }); err != nil { + return nil, fmt.Errorf("cannot save macaroon to store: %v", err) + } + return m, nil +} + +// LocalThirdPartyCaveat returns a third-party caveat that, when added +// to a macaroon with AddCaveat, results in a caveat +// with the location "local", encrypted with the given public key. +// This can be automatically discharged by DischargeAllWithKey. +func LocalThirdPartyCaveat(key *PublicKey) checkers.Caveat { + return checkers.Caveat{ + Location: "local " + key.String(), + } +} + +// AddCaveat adds a caveat to the given macaroon. +// +// If it's a third-party caveat, it uses the service's caveat-id encoder +// to create the id of the new caveat. +// +// As a special case, if the caveat's Location field has the prefix +// "local " the caveat is added as a client self-discharge caveat +// using the public key base64-encoded in the rest of the location. +// In this case, the Condition field must be empty. The +// resulting third-party caveat will encode the condition "true" +// encrypted with that public key. See LocalThirdPartyCaveat +// for a way of creating such caveats. +func (svc *Service) AddCaveat(m *macaroon.Macaroon, cav checkers.Caveat) error { + logger.Infof("Service.AddCaveat id %q; cav %#v", m.Id(), cav) + if cav.Location == "" { + m.AddFirstPartyCaveat(cav.Condition) + return nil + } + var thirdPartyPub *PublicKey + if strings.HasPrefix(cav.Location, "local ") { + var key PublicKey + if err := key.UnmarshalText([]byte(cav.Location[len("local "):])); err != nil { + return errgo.Notef(err, "cannot unmarshal client's public key in local third-party caveat") + } + thirdPartyPub = &key + cav.Location = "local" + if cav.Condition != "" { + return errgo.New("cannot specify caveat condition in local third-party caveat") + } + cav.Condition = "true" + } else { + var err error + thirdPartyPub, err = svc.locator.PublicKeyForLocation(cav.Location) + if err != nil { + return errgo.Notef(err, "cannot find public key for location %q", cav.Location) + } + } + rootKey, err := randomBytes(24) + if err != nil { + return errgo.Notef(err, "cannot generate third party secret") + } + id, err := svc.encoder.encodeCaveatId(cav.Condition, rootKey, thirdPartyPub) + if err != nil { + return errgo.Notef(err, "cannot create third party caveat id at %q", cav.Location) + } + if err := m.AddThirdPartyCaveat(rootKey, id, cav.Location); err != nil { + return errgo.Notef(err, "cannot add third party caveat") + } + return nil +} + +// Discharge creates a macaroon that discharges the third party caveat with the +// given id that should have been created earlier using key.Public. The +// condition implicit in the id is checked for validity using checker. If +// it is valid, a new macaroon is returned which discharges the caveat +// along with any caveats returned from the checker. +func Discharge(key *KeyPair, checker ThirdPartyChecker, id string) (*macaroon.Macaroon, []checkers.Caveat, error) { + decoder := newBoxDecoder(key) + + logger.Infof("server attempting to discharge %q", id) + rootKey, condition, err := decoder.decodeCaveatId(id) + if err != nil { + return nil, nil, errgo.Notef(err, "discharger cannot decode caveat id") + } + // Note that we don't check the error - we allow the + // third party checker to see even caveats that we can't + // understand. + cond, arg, _ := checkers.ParseCaveat(condition) + var caveats []checkers.Caveat + if cond == checkers.CondNeedDeclared { + caveats, err = checkNeedDeclared(id, arg, checker) + } else { + caveats, err = checker.CheckThirdPartyCaveat(id, condition) + } + if err != nil { + return nil, nil, errgo.Mask(err, errgo.Any) + } + // Note that the discharge macaroon does not need to + // be stored persistently. Indeed, it would be a problem if + // we did, because then the macaroon could potentially be used + // for normal authorization with the third party. + m, err := macaroon.New(rootKey, id, "") + if err != nil { + return nil, nil, errgo.Mask(err) + } + return m, caveats, nil +} + +// Discharge calls Discharge with the service's key and uses the service +// to add any returned caveats to the discharge macaroon. +func (svc *Service) Discharge(checker ThirdPartyChecker, id string) (*macaroon.Macaroon, error) { + m, caveats, err := Discharge(svc.encoder.key, checker, id) + if err != nil { + return nil, errgo.Mask(err, errgo.Any) + } + for _, cav := range caveats { + if err := svc.AddCaveat(m, cav); err != nil { + return nil, errgo.Notef(err, "cannot add caveat") + } + } + return m, nil +} + +func checkNeedDeclared(caveatId, arg string, checker ThirdPartyChecker) ([]checkers.Caveat, error) { + i := strings.Index(arg, " ") + if i <= 0 { + return nil, errgo.Newf("need-declared caveat requires an argument, got %q", arg) + } + needDeclared := strings.Split(arg[0:i], ",") + for _, d := range needDeclared { + if d == "" { + return nil, errgo.New("need-declared caveat with empty required attribute") + } + } + if len(needDeclared) == 0 { + return nil, fmt.Errorf("need-declared caveat with no required attributes") + } + caveats, err := checker.CheckThirdPartyCaveat(caveatId, arg[i+1:]) + if err != nil { + return nil, errgo.Mask(err, errgo.Any) + } + declared := make(map[string]bool) + for _, cav := range caveats { + if cav.Location != "" { + continue + } + // Note that we ignore the error. We allow the service to + // generate caveats that we don't understand here. + cond, arg, _ := checkers.ParseCaveat(cav.Condition) + if cond != checkers.CondDeclared { + continue + } + parts := strings.SplitN(arg, " ", 2) + if len(parts) != 2 { + return nil, errgo.Newf("declared caveat has no value") + } + declared[parts[0]] = true + } + // Add empty declarations for everything mentioned in need-declared + // that was not actually declared. + for _, d := range needDeclared { + if !declared[d] { + caveats = append(caveats, checkers.DeclaredCaveat(d, "")) + } + } + return caveats, nil +} + +func randomBytes(n int) ([]byte, error) { + b := make([]byte, n) + _, err := rand.Read(b) + if err != nil { + return nil, fmt.Errorf("cannot generate %d random bytes: %v", n, err) + } + return b, nil +} + +type VerificationError struct { + Reason error +} + +func (e *VerificationError) Error() string { + return fmt.Sprintf("verification failed: %v", e.Reason) +} + +// TODO(rog) consider possible options for checkers: +// - first and third party checkers could be merged, but +// then there would have to be a runtime check +// that when used to check first-party caveats, the +// checker does not return third-party caveats. + +// ThirdPartyChecker holds a function that checks third party caveats +// for validity. If the caveat is valid, it returns a nil error and +// optionally a slice of extra caveats that will be added to the +// discharge macaroon. The caveatId parameter holds the still-encoded id +// of the caveat. +// +// If the caveat kind was not recognised, the checker should return an +// error with a ErrCaveatNotRecognized cause. +type ThirdPartyChecker interface { + CheckThirdPartyCaveat(caveatId, caveat string) ([]checkers.Caveat, error) +} + +type ThirdPartyCheckerFunc func(caveatId, caveat string) ([]checkers.Caveat, error) + +func (c ThirdPartyCheckerFunc) CheckThirdPartyCaveat(caveatId, caveat string) ([]checkers.Caveat, error) { + return c(caveatId, caveat) +} + +// FirstPartyChecker holds a function that checks first party caveats +// for validity. +// +// If the caveat kind was not recognised, the checker should return +// ErrCaveatNotRecognized. +type FirstPartyChecker interface { + CheckFirstPartyCaveat(caveat string) error +} + +type FirstPartyCheckerFunc func(caveat string) error + +func (c FirstPartyCheckerFunc) CheckFirstPartyCaveat(caveat string) error { + return c(caveat) +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/service_test.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/service_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/service_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,464 @@ +package bakery_test + +import ( + "encoding/json" + "fmt" + + gc "gopkg.in/check.v1" + "gopkg.in/macaroon.v1" + + jc "github.com/juju/testing/checkers" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" +) + +type ServiceSuite struct{} + +var _ = gc.Suite(&ServiceSuite{}) + +// TestSingleServiceFirstParty creates a single service +// with a macaroon with one first party caveat. +// It creates a request with this macaroon and checks that the service +// can verify this macaroon as valid. +func (s *ServiceSuite) TestSingleServiceFirstParty(c *gc.C) { + p := bakery.NewServiceParams{ + Location: "loc", + Store: nil, + Key: nil, + Locator: nil, + } + service, err := bakery.NewService(p) + c.Assert(err, gc.IsNil) + + primary, err := service.NewMacaroon("", nil, nil) + c.Assert(err, gc.IsNil) + c.Assert(primary.Location(), gc.Equals, "loc") + cav := checkers.Caveat{ + Location: "", + Condition: "something", + } + err = service.AddCaveat(primary, cav) + c.Assert(err, gc.IsNil) + + err = service.Check(macaroon.Slice{primary}, strcmpChecker("something")) + c.Assert(err, gc.IsNil) +} + +// TestMacaroonPaperFig6 implements an example flow as described in the macaroons paper: +// http://theory.stanford.edu/~ataly/Papers/macaroons.pdf +// There are three services, ts, fs, as: +// ts is a storage service which has deligated authority to a forum service fs. +// The forum service wants to require its users to be logged into to an authentication service as. +// +// The client obtains a macaroon from fs (minted by ts, with a third party caveat addressed to as). +// The client obtains a discharge macaroon from as to satisfy this caveat. +// The target service verifies the original macaroon it delegated to fs +// No direct contact between as and ts is required +func (s *ServiceSuite) TestMacaroonPaperFig6(c *gc.C) { + locator := make(bakery.PublicKeyLocatorMap) + as := newService(c, "as-loc", locator) + ts := newService(c, "ts-loc", locator) + fs := newService(c, "fs-loc", locator) + + // ts creates a macaroon. + tsMacaroon, err := ts.NewMacaroon("", nil, nil) + c.Assert(err, gc.IsNil) + + // ts somehow sends the macaroon to fs which adds a third party caveat to be discharged by as. + err = fs.AddCaveat(tsMacaroon, checkers.Caveat{Location: "as-loc", Condition: "user==bob"}) + c.Assert(err, gc.IsNil) + + // client asks for a discharge macaroon for each third party caveat + d, err := bakery.DischargeAll(tsMacaroon, func(firstPartyLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { + c.Assert(firstPartyLocation, gc.Equals, "ts-loc") + c.Assert(cav.Location, gc.Equals, "as-loc") + mac, err := as.Discharge(strcmpChecker("user==bob"), cav.Id) + c.Assert(err, gc.IsNil) + return mac, nil + }) + c.Assert(err, gc.IsNil) + + err = ts.Check(d, strcmpChecker("")) + c.Assert(err, gc.IsNil) +} + +func macStr(m *macaroon.Macaroon) string { + data, err := json.MarshalIndent(m, "\t", "\t") + if err != nil { + panic(err) + } + return string(data) +} + +// TestMacaroonPaperFig6FailsWithoutDischarges runs a similar test as TestMacaroonPaperFig6 +// without the client discharging the third party caveats. +func (s *ServiceSuite) TestMacaroonPaperFig6FailsWithoutDischarges(c *gc.C) { + locator := make(bakery.PublicKeyLocatorMap) + ts := newService(c, "ts-loc", locator) + fs := newService(c, "fs-loc", locator) + _ = newService(c, "as-loc", locator) + + // ts creates a macaroon. + tsMacaroon, err := ts.NewMacaroon("", nil, nil) + c.Assert(err, gc.IsNil) + + // ts somehow sends the macaroon to fs which adds a third party caveat to be discharged by as. + err = fs.AddCaveat(tsMacaroon, checkers.Caveat{Location: "as-loc", Condition: "user==bob"}) + c.Assert(err, gc.IsNil) + + // client makes request to ts + err = ts.Check(macaroon.Slice{tsMacaroon}, strcmpChecker("")) + c.Assert(err, gc.ErrorMatches, `verification failed: cannot find discharge macaroon for caveat ".*"`) +} + +// TestMacaroonPaperFig6FailsWithBindingOnTamperedSignature runs a similar test as TestMacaroonPaperFig6 +// with the discharge macaroon binding being done on a tampered signature. +func (s *ServiceSuite) TestMacaroonPaperFig6FailsWithBindingOnTamperedSignature(c *gc.C) { + locator := make(bakery.PublicKeyLocatorMap) + as := newService(c, "as-loc", locator) + ts := newService(c, "ts-loc", locator) + fs := newService(c, "fs-loc", locator) + + // ts creates a macaroon. + tsMacaroon, err := ts.NewMacaroon("", nil, nil) + c.Assert(err, gc.IsNil) + + // ts somehow sends the macaroon to fs which adds a third party caveat to be discharged by as. + err = fs.AddCaveat(tsMacaroon, checkers.Caveat{Location: "as-loc", Condition: "user==bob"}) + c.Assert(err, gc.IsNil) + + // client asks for a discharge macaroon for each third party caveat + d, err := bakery.DischargeAll(tsMacaroon, func(firstPartyLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { + c.Assert(firstPartyLocation, gc.Equals, "ts-loc") + c.Assert(cav.Location, gc.Equals, "as-loc") + mac, err := as.Discharge(strcmpChecker("user==bob"), cav.Id) + c.Assert(err, gc.IsNil) + return mac, nil + }) + c.Assert(err, gc.IsNil) + + // client has all the discharge macaroons. For each discharge macaroon bind it to our tsMacaroon + // and add it to our request. + for _, dm := range d[1:] { + dm.Bind([]byte("tampered-signature")) // Bind against an incorrect signature. + } + + // client makes request to ts. + err = ts.Check(d, strcmpChecker("")) + c.Assert(err, gc.ErrorMatches, "verification failed: signature mismatch after caveat verification") +} + +func (s *ServiceSuite) TestNeedDeclared(c *gc.C) { + locator := make(bakery.PublicKeyLocatorMap) + firstParty := newService(c, "first", locator) + thirdParty := newService(c, "third", locator) + + // firstParty mints a macaroon with a third-party caveat addressed + // to thirdParty with a need-declared caveat. + m, err := firstParty.NewMacaroon("", nil, []checkers.Caveat{ + checkers.NeedDeclaredCaveat(checkers.Caveat{ + Location: "third", + Condition: "something", + }, "foo", "bar"), + }) + c.Assert(err, gc.IsNil) + + // The client asks for a discharge macaroon for each third party caveat. + d, err := bakery.DischargeAll(m, func(_ string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { + return thirdParty.Discharge(strcmpChecker("something"), cav.Id) + }) + c.Assert(err, gc.IsNil) + + // The required declared attributes should have been added + // to the discharge macaroons. + declared := checkers.InferDeclared(d) + c.Assert(declared, gc.DeepEquals, checkers.Declared{ + "foo": "", + "bar": "", + }) + + // Make sure the macaroons actually check out correctly + // when provided with the declared checker. + err = firstParty.Check(d, checkers.New(declared)) + c.Assert(err, gc.IsNil) + + // Try again when the third party does add a required declaration. + + // The client asks for a discharge macaroon for each third party caveat. + d, err = bakery.DischargeAll(m, func(_ string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { + checker := thirdPartyCheckerWithCaveats{ + checkers.DeclaredCaveat("foo", "a"), + checkers.DeclaredCaveat("arble", "b"), + } + return thirdParty.Discharge(checker, cav.Id) + }) + c.Assert(err, gc.IsNil) + + // One attribute should have been added, the other was already there. + declared = checkers.InferDeclared(d) + c.Assert(declared, gc.DeepEquals, checkers.Declared{ + "foo": "a", + "bar": "", + "arble": "b", + }) + + err = firstParty.Check(d, checkers.New(declared)) + c.Assert(err, gc.IsNil) + + // Try again, but this time pretend a client is sneakily trying + // to add another "declared" attribute to alter the declarations. + d, err = bakery.DischargeAll(m, func(_ string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { + checker := thirdPartyCheckerWithCaveats{ + checkers.DeclaredCaveat("foo", "a"), + checkers.DeclaredCaveat("arble", "b"), + } + m, err := thirdParty.Discharge(checker, cav.Id) + c.Assert(err, gc.IsNil) + + // Sneaky client adds a first party caveat. + m.AddFirstPartyCaveat(checkers.DeclaredCaveat("foo", "c").Condition) + return m, nil + }) + c.Assert(err, gc.IsNil) + + declared = checkers.InferDeclared(d) + c.Assert(declared, gc.DeepEquals, checkers.Declared{ + "bar": "", + "arble": "b", + }) + + err = firstParty.Check(d, checkers.New(declared)) + c.Assert(err, gc.ErrorMatches, `verification failed: caveat "declared foo a" not satisfied: got foo=null, expected "a"`) +} + +func (s *ServiceSuite) TestDischargeTwoNeedDeclared(c *gc.C) { + locator := make(bakery.PublicKeyLocatorMap) + firstParty := newService(c, "first", locator) + thirdParty := newService(c, "third", locator) + + // firstParty mints a macaroon with two third party caveats + // with overlapping attributes. + m, err := firstParty.NewMacaroon("", nil, []checkers.Caveat{ + checkers.NeedDeclaredCaveat(checkers.Caveat{ + Location: "third", + Condition: "x", + }, "foo", "bar"), + checkers.NeedDeclaredCaveat(checkers.Caveat{ + Location: "third", + Condition: "y", + }, "bar", "baz"), + }) + c.Assert(err, gc.IsNil) + + // The client asks for a discharge macaroon for each third party caveat. + // Since no declarations are added by the discharger, + d, err := bakery.DischargeAll(m, func(_ string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { + return thirdParty.Discharge(bakery.ThirdPartyCheckerFunc(func(_, caveat string) ([]checkers.Caveat, error) { + return nil, nil + }), cav.Id) + }) + c.Assert(err, gc.IsNil) + declared := checkers.InferDeclared(d) + c.Assert(declared, gc.DeepEquals, checkers.Declared{ + "foo": "", + "bar": "", + "baz": "", + }) + err = firstParty.Check(d, checkers.New(declared)) + c.Assert(err, gc.IsNil) + + // If they return conflicting values, the discharge fails. + // The client asks for a discharge macaroon for each third party caveat. + // Since no declarations are added by the discharger, + d, err = bakery.DischargeAll(m, func(_ string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { + return thirdParty.Discharge(bakery.ThirdPartyCheckerFunc(func(_, caveat string) ([]checkers.Caveat, error) { + switch caveat { + case "x": + return []checkers.Caveat{ + checkers.DeclaredCaveat("foo", "fooval1"), + }, nil + case "y": + return []checkers.Caveat{ + checkers.DeclaredCaveat("foo", "fooval2"), + checkers.DeclaredCaveat("baz", "bazval"), + }, nil + } + return nil, fmt.Errorf("not matched") + }), cav.Id) + }) + c.Assert(err, gc.IsNil) + declared = checkers.InferDeclared(d) + c.Assert(declared, gc.DeepEquals, checkers.Declared{ + "bar": "", + "baz": "bazval", + }) + err = firstParty.Check(d, checkers.New(declared)) + c.Assert(err, gc.ErrorMatches, `verification failed: caveat "declared foo fooval1" not satisfied: got foo=null, expected "fooval1"`) +} + +func (s *ServiceSuite) TestDischargeMacaroonCannotBeUsedAsNormalMacaroon(c *gc.C) { + locator := make(bakery.PublicKeyLocatorMap) + firstParty := newService(c, "first", locator) + thirdParty := newService(c, "third", locator) + + // First party mints a macaroon with a 3rd party caveat. + m, err := firstParty.NewMacaroon("", nil, []checkers.Caveat{{ + Location: "third", + Condition: "true", + }}) + c.Assert(err, gc.IsNil) + + // Acquire the discharge macaroon, but don't bind it to the original. + d, err := thirdParty.Discharge(bakery.ThirdPartyCheckerFunc(func(_, caveat string) ([]checkers.Caveat, error) { + return nil, nil + }), m.Caveats()[0].Id) + c.Assert(err, gc.IsNil) + + // Make sure it cannot be used as a normal macaroon in the third party. + err = thirdParty.Check(macaroon.Slice{d}, checkers.New()) + c.Assert(err, gc.ErrorMatches, `verification failed: macaroon not found in storage`) +} + +func (*ServiceSuite) TestCheckAny(c *gc.C) { + svc := newService(c, "somewhere", nil) + newMacaroons := func(id string, caveats ...checkers.Caveat) macaroon.Slice { + m, err := svc.NewMacaroon(id, nil, caveats) + c.Assert(err, gc.IsNil) + return macaroon.Slice{m} + } + tests := []struct { + about string + macaroons []macaroon.Slice + assert map[string]string + checker checkers.Checker + expectDeclared map[string]string + expectId string + expectError string + }{{ + about: "no macaroons", + expectError: "verification failed: no macaroons", + }, { + about: "one macaroon, no caveats", + macaroons: []macaroon.Slice{ + newMacaroons("x"), + }, + expectId: "x", + }, { + about: "one macaroon, one unrecognized caveat", + macaroons: []macaroon.Slice{ + newMacaroons("x", checkers.Caveat{ + Condition: "bad", + }), + }, + expectError: `verification failed: caveat "bad" not satisfied: caveat not recognized`, + }, { + about: "two macaroons, only one ok", + macaroons: []macaroon.Slice{ + newMacaroons("x", checkers.Caveat{ + Condition: "bad", + }), + newMacaroons("y"), + }, + expectId: "y", + }, { + about: "macaroon with declared caveats", + macaroons: []macaroon.Slice{ + newMacaroons("x", + checkers.DeclaredCaveat("key1", "value1"), + checkers.DeclaredCaveat("key2", "value2"), + ), + }, + expectDeclared: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + expectId: "x", + }, { + about: "macaroon with declared values and asserted keys with wrong value", + macaroons: []macaroon.Slice{ + newMacaroons("x", + checkers.DeclaredCaveat("key1", "value1"), + checkers.DeclaredCaveat("key2", "value2"), + ), + }, + assert: map[string]string{ + "key1": "valuex", + }, + expectId: "x", + expectError: `verification failed: caveat "declared key1 value1" not satisfied: got key1="valuex", expected "value1"`, + }, { + about: "macaroon with declared values and asserted keys with correct value", + macaroons: []macaroon.Slice{ + newMacaroons("x", + checkers.DeclaredCaveat("key1", "value1"), + checkers.DeclaredCaveat("key2", "value2"), + ), + }, + assert: map[string]string{ + "key1": "value1", + }, + expectDeclared: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + expectId: "x", + }, {}} + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + if test.expectDeclared == nil { + test.expectDeclared = make(map[string]string) + } + if test.checker == nil { + test.checker = checkers.New() + } + + decl, ms, err := svc.CheckAnyM(test.macaroons, test.assert, test.checker) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + c.Assert(decl, gc.HasLen, 0) + c.Assert(ms, gc.IsNil) + continue + } + c.Assert(err, gc.IsNil) + c.Assert(decl, jc.DeepEquals, test.expectDeclared) + c.Assert(ms[0].Id(), gc.Equals, test.expectId) + } +} + +func newService(c *gc.C, location string, locator bakery.PublicKeyLocatorMap) *bakery.Service { + keyPair, err := bakery.GenerateKey() + c.Assert(err, gc.IsNil) + + svc, err := bakery.NewService(bakery.NewServiceParams{ + Location: location, + Key: keyPair, + Locator: locator, + }) + c.Assert(err, gc.IsNil) + if locator != nil { + locator[location] = &keyPair.Public + } + return svc +} + +type strcmpChecker string + +func (c strcmpChecker) CheckFirstPartyCaveat(caveat string) error { + if caveat != string(c) { + return fmt.Errorf("%v doesn't match %s", caveat, c) + } + return nil +} + +func (c strcmpChecker) CheckThirdPartyCaveat(caveatId string, caveat string) ([]checkers.Caveat, error) { + if caveat != string(c) { + return nil, fmt.Errorf("%v doesn't match %s", caveat, c) + } + return nil, nil +} + +type thirdPartyCheckerWithCaveats []checkers.Caveat + +func (c thirdPartyCheckerWithCaveats) CheckThirdPartyCaveat(caveatId string, caveat string) ([]checkers.Caveat, error) { + return c, nil +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/storage.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/storage.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/storage.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,97 @@ +package bakery + +import ( + "encoding/json" + "errors" + "fmt" + "sync" +) + +// Storage defines storage for macaroons. +// Calling its methods concurrently is allowed. +type Storage interface { + // Put stores the item at the given location, overwriting + // any item that might already be there. + // TODO(rog) would it be better to lose the overwrite + // semantics? + Put(location string, item string) error + + // Get retrieves an item from the given location. + // If the item is not there, it returns ErrNotFound. + Get(location string) (item string, err error) + + // Del deletes the item from the given location. + Del(location string) error +} + +var ErrNotFound = errors.New("item not found") + +// NewMemStorage returns an implementation of Storage +// that stores all items in memory. +func NewMemStorage() Storage { + return &memStorage{ + values: make(map[string]string), + } +} + +type memStorage struct { + mu sync.Mutex + values map[string]string +} + +func (s *memStorage) Put(location, item string) error { + s.mu.Lock() + defer s.mu.Unlock() + s.values[location] = item + return nil +} + +func (s *memStorage) Get(location string) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + item, ok := s.values[location] + if !ok { + return "", ErrNotFound + } + return item, nil +} + +func (s *memStorage) Del(location string) error { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.values, location) + return nil +} + +// storageItem is the format used to store items in +// the store. +type storageItem struct { + RootKey []byte +} + +// storage is a thin wrapper around Storage that +// converts to and from StorageItems in its +// Put and Get methods. +type storage struct { + store Storage +} + +func (s storage) Get(location string) (*storageItem, error) { + itemStr, err := s.store.Get(location) + if err != nil { + return nil, err + } + var item storageItem + if err := json.Unmarshal([]byte(itemStr), &item); err != nil { + return nil, fmt.Errorf("badly formatted item in store: %v", err) + } + return &item, nil +} + +func (s storage) Put(location string, item *storageItem) error { + data, err := json.Marshal(item) + if err != nil { + panic(fmt.Errorf("cannot marshal storage item: %v", err)) + } + return s.store.Put(location, string(data)) +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakery/storage_test.go' --- src/gopkg.in/macaroon-bakery.v1/bakery/storage_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakery/storage_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,62 @@ +package bakery_test + +import ( + "fmt" + + gc "gopkg.in/check.v1" + + "gopkg.in/macaroon-bakery.v1/bakery" +) + +type StorageSuite struct{} + +var _ = gc.Suite(&StorageSuite{}) + +func (*StorageSuite) TestMemStorage(c *gc.C) { + store := bakery.NewMemStorage() + err := store.Put("foo", "bar") + c.Assert(err, gc.IsNil) + item, err := store.Get("foo") + c.Assert(err, gc.IsNil) + c.Assert(item, gc.Equals, "bar") + + err = store.Put("bletch", "blat") + c.Assert(err, gc.IsNil) + item, err = store.Get("bletch") + c.Assert(err, gc.IsNil) + c.Assert(item, gc.Equals, "blat") + + item, err = store.Get("nothing") + c.Assert(err, gc.Equals, bakery.ErrNotFound) + c.Assert(item, gc.Equals, "") + + err = store.Del("bletch") + c.Assert(err, gc.IsNil) + + item, err = store.Get("bletch") + c.Assert(err, gc.Equals, bakery.ErrNotFound) + c.Assert(item, gc.Equals, "") +} + +func (*StorageSuite) TestConcurrentMemStorage(c *gc.C) { + // If locking is not done right, this test will + // definitely trigger the race detector. + done := make(chan struct{}) + store := bakery.NewMemStorage() + for i := 0; i < 3; i++ { + i := i + go func() { + k := fmt.Sprint(i) + err := store.Put(k, k) + c.Check(err, gc.IsNil) + v, err := store.Get(k) + c.Check(v, gc.Equals, k) + err = store.Del(k) + c.Check(err, gc.IsNil) + done <- struct{}{} + }() + } + for i := 0; i < 3; i++ { + <-done + } +} === added directory 'src/gopkg.in/macaroon-bakery.v1/bakerytest' === added file 'src/gopkg.in/macaroon-bakery.v1/bakerytest/bakerytest.go' --- src/gopkg.in/macaroon-bakery.v1/bakerytest/bakerytest.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakerytest/bakerytest.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,318 @@ +// Package bakerytest provides test helper functions for +// the bakery. +package bakerytest + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "sync" + "time" + + "github.com/juju/httprequest" + "gopkg.in/errgo.v1" + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +// Discharger is a third-party caveat discharger suitable +// for testing. It listens on a local network port for +// discharge requests. It should be shut down by calling +// Close when done with. +type Discharger struct { + Service *bakery.Service + + server *httptest.Server +} + +var skipVerify struct { + mu sync.Mutex + refCount int + oldSkipVerify bool +} + +func startSkipVerify() { + v := &skipVerify + v.mu.Lock() + defer v.mu.Unlock() + if v.refCount++; v.refCount > 1 { + return + } + transport, ok := http.DefaultTransport.(*http.Transport) + if !ok { + return + } + if transport.TLSClientConfig != nil { + v.oldSkipVerify = transport.TLSClientConfig.InsecureSkipVerify + transport.TLSClientConfig.InsecureSkipVerify = true + } else { + v.oldSkipVerify = false + transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } +} + +func stopSkipVerify() { + v := &skipVerify + v.mu.Lock() + defer v.mu.Unlock() + if v.refCount--; v.refCount > 0 { + return + } + transport, ok := http.DefaultTransport.(*http.Transport) + if !ok { + return + } + // technically this doesn't return us to the original state, + // as TLSClientConfig may have been nil before but won't + // be now, but that should be equivalent. + transport.TLSClientConfig.InsecureSkipVerify = v.oldSkipVerify +} + +// NewDischarger returns a new third party caveat discharger +// which uses the given function to check caveats. +// The cond and arg arguments to the function are as returned +// by checkers.ParseCaveat. +// +// If locator is non-nil, it will be used to find public keys +// for any third party caveats returned by the checker. +// +// Calling this function has the side-effect of setting +// InsecureSkipVerify in http.DefaultTransport.TLSClientConfig +// until all the dischargers are closed. +func NewDischarger( + locator bakery.PublicKeyLocator, + checker func(req *http.Request, cond, arg string) ([]checkers.Caveat, error), +) *Discharger { + mux := http.NewServeMux() + server := httptest.NewTLSServer(mux) + svc, err := bakery.NewService(bakery.NewServiceParams{ + Location: server.URL, + Locator: locator, + }) + if err != nil { + panic(err) + } + checker1 := func(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { + cond, arg, err := checkers.ParseCaveat(cav) + if err != nil { + return nil, err + } + return checker(req, cond, arg) + } + httpbakery.AddDischargeHandler(mux, "/", svc, checker1) + startSkipVerify() + return &Discharger{ + Service: svc, + server: server, + } +} + +// Close shuts down the server. It may be called more than +// once on the same discharger. +func (d *Discharger) Close() { + if d.server == nil { + return + } + d.server.Close() + stopSkipVerify() + d.server = nil +} + +// Location returns the location of the discharger, suitable +// for setting as the location in a third party caveat. +// This will be the URL of the server. +func (d *Discharger) Location() string { + return d.Service.Location() +} + +// PublicKeyForLocation implements bakery.PublicKeyLocator. +func (d *Discharger) PublicKeyForLocation(loc string) (*bakery.PublicKey, error) { + if loc == d.Location() { + return d.Service.PublicKey(), nil + } + return nil, bakery.ErrNotFound +} + +type dischargeResult struct { + err error + cavs []checkers.Caveat +} + +type discharge struct { + cavId string + c chan dischargeResult +} + +// InteractiveDischarger is a Discharger that always requires interraction to +// complete the discharge. +type InteractiveDischarger struct { + Discharger + Mux *http.ServeMux + + // mu protects the following fields. + mu sync.Mutex + waiting map[string]discharge + id int +} + +// NewInteractiveDischarger returns a new InteractiveDischarger. The +// InteractiveDischarger will serve the following endpoints by default: +// +// /discharge - always causes interaction to be required. +// /publickey - gets the bakery public key. +// /visit - delegates to visitHandler. +// /wait - blocks waiting for the interaction to complete. +// +// Additional endpoints may be added to Mux as necessary. +// +// The /discharge endpoint generates a error with the code +// httpbakery.ErrInterractionRequired. The visitURL and waitURL will +// point to the /visit and /wait endpoints of the InteractiveDischarger +// respectively. These URLs will also carry context information in query +// parameters, any handlers should be careful to preserve this context +// information between calls. The easiest way to do this is to always use +// the URL method when generating new URLs. +// +// The /visit endpoint is handled by the provided visitHandler. This +// handler performs the required interactions and should result in the +// FinishInteraction method being called. This handler may process the +// interaction in a number of steps, possibly using additional handlers, +// so long as FinishInteraction is called when no further interaction is +// required. +// +// The /wait endpoint blocks until FinishInteraction has been called by +// the corresponding /visit endpoint, or another endpoint triggered by +// visitHandler. +// +// If locator is non-nil, it will be used to find public keys +// for any third party caveats returned by the checker. +// +// Calling this function has the side-effect of setting +// InsecureSkipVerify in http.DefaultTransport.TLSClientConfig +// until all the dischargers are closed. +// +// The returned InteractiveDischarger must be closed when finished with. +func NewInteractiveDischarger(locator bakery.PublicKeyLocator, visitHandler http.Handler) *InteractiveDischarger { + d := &InteractiveDischarger{ + Mux: http.NewServeMux(), + waiting: map[string]discharge{}, + } + d.Mux.Handle("/visit", visitHandler) + d.Mux.Handle("/wait", http.HandlerFunc(d.wait)) + server := httptest.NewTLSServer(d.Mux) + svc, err := bakery.NewService(bakery.NewServiceParams{ + Location: server.URL, + Locator: locator, + }) + if err != nil { + panic(err) + } + httpbakery.AddDischargeHandler(d.Mux, "/", svc, d.checker) + startSkipVerify() + d.Discharger = Discharger{ + Service: svc, + server: server, + } + return d +} + +func (d *InteractiveDischarger) checker(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { + d.mu.Lock() + id := fmt.Sprintf("%d", d.id) + d.id++ + d.waiting[id] = discharge{cavId, make(chan dischargeResult, 1)} + d.mu.Unlock() + prefix := strings.TrimSuffix(req.URL.String(), "/discharge") + visitURL := fmt.Sprintf("%s/visit?waitid=%s", prefix, id) + waitURL := fmt.Sprintf("%s/wait?waitid=%s", prefix, id) + return nil, httpbakery.NewInteractionRequiredError(visitURL, waitURL, nil, req) +} + +func (d *InteractiveDischarger) wait(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + d.mu.Lock() + discharge, ok := d.waiting[r.Form.Get("waitid")] + d.mu.Unlock() + if !ok { + code, body := httpbakery.ErrorToResponse(errgo.Newf("invalid waitid %q", r.Form.Get("waitid"))) + httprequest.WriteJSON(w, code, body) + return + } + defer func() { + d.mu.Lock() + delete(d.waiting, r.Form.Get("waitid")) + d.mu.Unlock() + }() + var err error + var cavs []checkers.Caveat + select { + case res := <-discharge.c: + err = res.err + cavs = res.cavs + case <-time.After(5 * time.Minute): + code, body := httpbakery.ErrorToResponse(errgo.New("timeout waiting for interaction to complete")) + httprequest.WriteJSON(w, code, body) + return + } + if err != nil { + code, body := httpbakery.ErrorToResponse(err) + httprequest.WriteJSON(w, code, body) + return + } + m, err := d.Service.Discharge( + bakery.ThirdPartyCheckerFunc( + func(cavId, caveat string) ([]checkers.Caveat, error) { + return cavs, nil + }, + ), + discharge.cavId, + ) + if err != nil { + code, body := httpbakery.ErrorToResponse(err) + httprequest.WriteJSON(w, code, body) + return + } + httprequest.WriteJSON( + w, + http.StatusOK, + httpbakery.WaitResponse{ + Macaroon: m, + }, + ) +} + +// FinishInteraction signals to the InteractiveDischarger that a +// particular interaction is complete. It causes any waiting requests to +// return. If err is not nil then it will be returned by the +// corresponding /wait request. +func (d *InteractiveDischarger) FinishInteraction(w http.ResponseWriter, r *http.Request, cavs []checkers.Caveat, err error) { + r.ParseForm() + d.mu.Lock() + discharge, ok := d.waiting[r.Form.Get("waitid")] + d.mu.Unlock() + if !ok { + code, body := httpbakery.ErrorToResponse(errgo.Newf("invalid waitid %q", r.Form.Get("waitid"))) + httprequest.WriteJSON(w, code, body) + return + } + select { + case discharge.c <- dischargeResult{err: err, cavs: cavs}: + default: + panic("cannot finish interaction " + r.Form.Get("waitid")) + } + return +} + +// URL returns a URL addressed to the given path in the discharger that +// contains any discharger context information found in the given +// request. Use this to generate intermediate URLs before calling +// FinishInteraction. +func (d *InteractiveDischarger) URL(path string, r *http.Request) string { + r.ParseForm() + return d.Location() + path + "?waitid=" + r.Form.Get("waitid") +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakerytest/bakerytest_test.go' --- src/gopkg.in/macaroon-bakery.v1/bakerytest/bakerytest_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakerytest/bakerytest_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,252 @@ +package bakerytest_test + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "sync" + + "github.com/juju/httprequest" + gc "gopkg.in/check.v1" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/bakerytest" + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +type suite struct { + client *httpbakery.Client +} + +func (s *suite) SetUpTest(c *gc.C) { + s.client = httpbakery.NewClient() +} + +var _ = gc.Suite(&suite{}) + +func noCaveatChecker(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { + return nil, nil +} + +func (s *suite) TestDischargerSimple(c *gc.C) { + d := bakerytest.NewDischarger(nil, noCaveatChecker) + defer d.Close() + + svc, err := bakery.NewService(bakery.NewServiceParams{ + Location: "here", + Locator: d, + }) + c.Assert(err, gc.IsNil) + m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ + Location: d.Location(), + Condition: "something", + }}) + c.Assert(err, gc.IsNil) + ms, err := s.client.DischargeAll(m) + c.Assert(err, gc.IsNil) + c.Assert(ms, gc.HasLen, 2) + + err = svc.Check(ms, failChecker) + c.Assert(err, gc.IsNil) +} + +var failChecker = bakery.FirstPartyCheckerFunc(func(s string) error { + return fmt.Errorf("fail %s", s) +}) + +func (s *suite) TestDischargerTwoLevels(c *gc.C) { + d1checker := func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { + if cond != "xtrue" { + return nil, fmt.Errorf("caveat refused") + } + return nil, nil + } + d1 := bakerytest.NewDischarger(nil, d1checker) + defer d1.Close() + d2checker := func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { + return []checkers.Caveat{{ + Location: d1.Location(), + Condition: "x" + cond, + }}, nil + } + d2 := bakerytest.NewDischarger(d1, d2checker) + defer d2.Close() + locator := bakery.PublicKeyLocatorMap{ + d1.Location(): d1.Service.PublicKey(), + d2.Location(): d2.Service.PublicKey(), + } + c.Logf("map: %s", locator) + svc, err := bakery.NewService(bakery.NewServiceParams{ + Location: "here", + Locator: locator, + }) + c.Assert(err, gc.IsNil) + m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ + Location: d2.Location(), + Condition: "true", + }}) + c.Assert(err, gc.IsNil) + + ms, err := s.client.DischargeAll(m) + c.Assert(err, gc.IsNil) + c.Assert(ms, gc.HasLen, 3) + + err = svc.Check(ms, failChecker) + c.Assert(err, gc.IsNil) + + err = svc.AddCaveat(m, checkers.Caveat{ + Location: d2.Location(), + Condition: "nope", + }) + c.Assert(err, gc.IsNil) + + ms, err = s.client.DischargeAll(m) + c.Assert(err, gc.ErrorMatches, `cannot get discharge from "https://[^"]*": third party refused discharge: cannot discharge: caveat refused`) + c.Assert(ms, gc.HasLen, 0) +} + +func (s *suite) TestInsecureSkipVerifyRestoration(c *gc.C) { + d1 := bakerytest.NewDischarger(nil, noCaveatChecker) + d2 := bakerytest.NewDischarger(nil, noCaveatChecker) + d2.Close() + c.Assert(http.DefaultTransport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, gc.Equals, true) + d1.Close() + c.Assert(http.DefaultTransport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, gc.Equals, false) + + // When InsecureSkipVerify is already true, it should not + // be restored to false. + http.DefaultTransport.(*http.Transport).TLSClientConfig.InsecureSkipVerify = true + d3 := bakerytest.NewDischarger(nil, noCaveatChecker) + d3.Close() + + c.Assert(http.DefaultTransport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, gc.Equals, true) +} + +func (s *suite) TestConcurrentDischargers(c *gc.C) { + var wg sync.WaitGroup + for i := 0; i < 5; i++ { + wg.Add(1) + go func() { + d := bakerytest.NewDischarger(nil, noCaveatChecker) + d.Close() + wg.Done() + }() + } + wg.Wait() + c.Assert(http.DefaultTransport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, gc.Equals, false) +} + +func (s *suite) TestInteractiveDischarger(c *gc.C) { + var d *bakerytest.InteractiveDischarger + d = bakerytest.NewInteractiveDischarger(nil, http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + d.FinishInteraction(w, r, []checkers.Caveat{ + checkers.Caveat{ + Condition: "test pass", + }, + }, nil) + }, + )) + defer d.Close() + + svc, err := bakery.NewService(bakery.NewServiceParams{ + Location: "here", + Locator: d, + }) + c.Assert(err, gc.IsNil) + m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ + Location: d.Location(), + Condition: "something", + }}) + c.Assert(err, gc.IsNil) + client := httpbakery.NewClient() + client.VisitWebPage = func(u *url.URL) error { + var c httprequest.Client + return c.Get(u.String(), nil) + } + ms, err := client.DischargeAll(m) + c.Assert(err, gc.IsNil) + c.Assert(ms, gc.HasLen, 2) + + var r recordingChecker + err = svc.Check(ms, &r) + c.Assert(err, gc.IsNil) + c.Assert(r.caveats, gc.HasLen, 1) + c.Assert(r.caveats[0], gc.Equals, "test pass") +} + +func (s *suite) TestLoginDischargerError(c *gc.C) { + var d *bakerytest.InteractiveDischarger + d = bakerytest.NewInteractiveDischarger(nil, http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + d.FinishInteraction(w, r, nil, errors.New("test error")) + }, + )) + defer d.Close() + + svc, err := bakery.NewService(bakery.NewServiceParams{ + Location: "here", + Locator: d, + }) + c.Assert(err, gc.IsNil) + m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ + Location: d.Location(), + Condition: "something", + }}) + c.Assert(err, gc.IsNil) + client := httpbakery.NewClient() + client.VisitWebPage = func(u *url.URL) error { + c.Logf("visiting %s", u) + var c httprequest.Client + return c.Get(u.String(), nil) + } + _, err = client.DischargeAll(m) + c.Assert(err, gc.ErrorMatches, `cannot get discharge from ".*": failed to acquire macaroon after waiting: third party refused discharge: test error`) +} + +func (s *suite) TestInteractiveDischargerURL(c *gc.C) { + var d *bakerytest.InteractiveDischarger + d = bakerytest.NewInteractiveDischarger(nil, http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, d.URL("/redirect", r), http.StatusFound) + }, + )) + defer d.Close() + d.Mux.Handle("/redirect", http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + d.FinishInteraction(w, r, nil, nil) + }, + )) + svc, err := bakery.NewService(bakery.NewServiceParams{ + Location: "here", + Locator: d, + }) + c.Assert(err, gc.IsNil) + m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ + Location: d.Location(), + Condition: "something", + }}) + c.Assert(err, gc.IsNil) + client := httpbakery.NewClient() + client.VisitWebPage = func(u *url.URL) error { + var c httprequest.Client + return c.Get(u.String(), nil) + } + ms, err := client.DischargeAll(m) + c.Assert(err, gc.IsNil) + c.Assert(ms, gc.HasLen, 2) + + err = svc.Check(ms, failChecker) + c.Assert(err, gc.IsNil) +} + +type recordingChecker struct { + caveats []string +} + +func (c *recordingChecker) CheckFirstPartyCaveat(caveat string) error { + c.caveats = append(c.caveats, caveat) + return nil +} === added file 'src/gopkg.in/macaroon-bakery.v1/bakerytest/package_test.go' --- src/gopkg.in/macaroon-bakery.v1/bakerytest/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/bakerytest/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +package bakerytest_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added directory 'src/gopkg.in/macaroon-bakery.v1/cmd' === added directory 'src/gopkg.in/macaroon-bakery.v1/cmd/bakery-keygen' === added file 'src/gopkg.in/macaroon-bakery.v1/cmd/bakery-keygen/main.go' --- src/gopkg.in/macaroon-bakery.v1/cmd/bakery-keygen/main.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/cmd/bakery-keygen/main.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,22 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + + "gopkg.in/macaroon-bakery.v1/bakery" +) + +func main() { + kp, err := bakery.GenerateKey() + if err != nil { + fmt.Fprintf(os.Stderr, "cannot generate key: %s\n", err) + os.Exit(1) + } + b, err := json.MarshalIndent(kp, "", "\t") + if err != nil { + panic(err) + } + fmt.Printf("%s\n", b) +} === added file 'src/gopkg.in/macaroon-bakery.v1/dependencies.tsv' --- src/gopkg.in/macaroon-bakery.v1/dependencies.tsv 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/dependencies.tsv 2016-03-22 15:18:22 +0000 @@ -0,0 +1,16 @@ +github.com/juju/errors git 4567a5e69fd3130ca0d89f69478e7ac025b67452 2015-03-27T19:24:31Z +github.com/juju/httprequest git abb29cbb15079888950f7b9d73f77c4e4ac89042 2015-09-16T09:23:22Z +github.com/juju/loggo git 8477fc936adf0e382d680310047ca27e128a309a 2015-05-27T03:58:39Z +github.com/juju/schema git afe1151cb49d1d7ed3c75592dfc6f38703f2e988 2015-08-07T07:58:08Z +github.com/juju/testing git 6e944d606b6efca96146fbda3e6a27f73313d867 2015-10-02T11:09:45Z +github.com/juju/utils git c9d1b9e09eebb41c3bb2d762ea16f4d8abed6ce5 2015-09-02T22:36:51Z +github.com/julienschmidt/httprouter git 109e267447e95ad1bb48b758e40dd7453eb7b039 2015-09-05T17:25:33Z +golang.org/x/crypto git aedad9a179ec1ea11b7064c57cbc6dc30d7724ec 2015-08-30T18:06:42Z +golang.org/x/net git ea47fc708ee3e20177f3ca3716217c4ab75942cb 2015-08-29T23:03:18Z +gopkg.in/check.v1 git b3d3430320d4260e5fea99841af984b3badcea63 2015-06-26T10:50:28Z +gopkg.in/errgo.v1 git 15098963088579c1cd9eb1a7da285831e548390b 2015-07-07T18:34:45Z +gopkg.in/juju/environschema.v1 git cfc85b8479122af20b9b5b5ac6a69d5248914e56 2015-10-09T10:58:43Z +gopkg.in/macaroon.v1 git ab3940c6c16510a850e1c2dd628b919f0f3f1464 2015-01-21T11:42:31Z +gopkg.in/mgo.v2 git f4923a569136442e900b8cf5c1a706c0a8b0883c 2015-08-21T15:31:23Z +gopkg.in/yaml.v1 git 9f9df34309c04878acc86042b16630b0f696e1de 2014-09-24T16:16:07Z +gopkg.in/yaml.v2 git 7ad95dd0798a40da1ccdff6dff35fd177b5edf40 2015-06-24T10:29:02Z === added directory 'src/gopkg.in/macaroon-bakery.v1/httpbakery' === added directory 'src/gopkg.in/macaroon-bakery.v1/httpbakery/agent' === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/agent.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/agent.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/agent.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,226 @@ +// Package agent enables non-interactive (agent) login using macaroons. +// To enable agent authorization with a given httpbakery.Client c against +// a given third party discharge server URL u: +// +// SetUpAuth(c, u, agentUsername) +// +package agent + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "mime" + "net/http" + "net/url" + + "github.com/juju/loggo" + "gopkg.in/errgo.v1" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +var logger = loggo.GetLogger("httpbakery.agent") + +/* +PROTOCOL + +An agent login works as follows: + + Agent Login Service + | | + | GET visitURL with agent cookie | + |----------------------------------->| + | | + | Macaroon with local third-party | + | caveat | + |<-----------------------------------| + | | + | GET visitURL with agent cookie & | + | discharged macaroon | + |----------------------------------->| + | | + | Agent login response | + |<-----------------------------------| + | | + +The agent cookie is a cookie named "agent-login" holding a base64 +encoded JSON object described by the agentLogin struct. + +A local third-party caveat is a third party caveat with the location +set to "local" and the caveat encrypted with the public key declared +in the agent cookie. The httpbakery.Client automatically discharges +the local third-party caveat. + +On success the response is a JSON object described by agentResponse +with the AgentLogin field set to true. + +If an error occurs then the response should be a JSON object that +unmarshals to an httpbakery.Error. +*/ + +const cookieName = "agent-login" + +// agentLogin defines the structure of an agent login cookie. It is also +// returned in a successful agent login attempt to help indicate that an +// agent login has occurred. +type agentLogin struct { + Username string `json:"username"` + PublicKey *bakery.PublicKey `json:"public_key"` +} + +// agentResponse contains the response to an agent login attempt. +type agentResponse struct { + AgentLogin bool `json:"agent_login"` +} + +// ErrNoAgentLoginCookie is the error returned when the expected +// agent login cookie has not been found. +var ErrNoAgentLoginCookie = errgo.New("no agent-login cookie found") + +// LoginCookie returns details of the agent login cookie +// from the given request. If no agent-login cookie is found, +// it returns an ErrNoAgentLoginCookie error. +func LoginCookie(req *http.Request) (username string, key *bakery.PublicKey, err error) { + c, err := req.Cookie(cookieName) + if err != nil { + return "", nil, ErrNoAgentLoginCookie + } + b, err := base64.StdEncoding.DecodeString(c.Value) + if err != nil { + return "", nil, errgo.Notef(err, "cannot decode cookie value") + } + var al agentLogin + if err := json.Unmarshal(b, &al); err != nil { + return "", nil, errgo.Notef(err, "cannot unmarshal agent login") + } + if al.Username == "" { + return "", nil, errgo.Newf("agent login has no user name") + } + if al.PublicKey == nil { + return "", nil, errgo.Newf("agent login has no public key") + } + return al.Username, al.PublicKey, nil +} + +// SetUpAuth configures agent authentication on c. A cookie is created in +// c's cookie jar containing credentials derived from the username and +// c.Key. c.VisitWebPage is set to VisitWebPage(c). The return is +// non-nil only if c.Key is nil. +func SetUpAuth(c *httpbakery.Client, u *url.URL, username string) error { + if c.Key == nil { + return errgo.New("cannot set-up authentication: client key not configured") + } + SetCookie(c.Jar, u, username, &c.Key.Public) + c.VisitWebPage = VisitWebPage(c) + return nil +} + +// SetCookie creates a cookie in jar which is suitable for performing agent +// logins to u. +// +// If using SetUpAuth, it should not be necessary to use +// this function. +func SetCookie(jar http.CookieJar, u *url.URL, username string, pk *bakery.PublicKey) { + al := agentLogin{ + Username: username, + PublicKey: pk, + } + b, err := json.Marshal(al) + if err != nil { + // This shouldn't happen as the agentLogin type has to be marshalable. + panic(errgo.Notef(err, "cannot marshal cookie")) + } + v := base64.StdEncoding.EncodeToString(b) + jar.SetCookies(u, []*http.Cookie{{ + Name: cookieName, + Value: v, + }}) +} + +// VisitWebPage creates a function that can be used with +// httpbakery.Client.VisitWebPage. The function uses c to access the +// visit URL. If no agent-login cookie has been configured for u an error +// with the cause of ErrNoAgentLoginCookie will be returned. If the login +// fails the returned error will be of type *httpbakery.Error. If the +// response from the visitURL cannot be interpreted the error will be of +// type *UnexpectedResponseError. +// +// If using SetUpAuth, it should not be necessary to use +// this function. +func VisitWebPage(c *httpbakery.Client) func(u *url.URL) error { + return func(u *url.URL) error { + err := ErrNoAgentLoginCookie + for _, c := range c.Jar.Cookies(u) { + if c.Name == cookieName { + err = nil + break + } + } + if err != nil { + return errgo.WithCausef(err, http.ErrNoCookie, "cannot perform agent login") + } + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return errgo.Notef(err, "cannot create request") + } + resp, err := c.Do(req) + if err != nil { + return errgo.Notef(err, "cannot perform request") + } + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + logger.Errorf("cannot read response body: %s", err) + b = []byte{} + } + mt, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if err != nil { + logger.Warningf("cannot parse response content type: %s", err) + mt = "" + } + if mt != "application/json" { + uerr := (*UnexpectedResponseError)(resp) + uerr.Body = ioutil.NopCloser(bytes.NewReader(b)) + return uerr + } + if resp.StatusCode != http.StatusOK { + var herr httpbakery.Error + err := json.Unmarshal(b, &herr) + if err == nil && herr.Message != "" { + return &herr + } + if err != nil { + logger.Warningf("cannot unmarshal error response: %s", err) + } + uerr := (*UnexpectedResponseError)(resp) + uerr.Body = ioutil.NopCloser(bytes.NewReader(b)) + return uerr + } + var ar agentResponse + err = json.Unmarshal(b, &ar) + if err == nil && ar.AgentLogin { + return nil + } + if err != nil { + logger.Warningf("cannot unmarshal response: %s", err) + } + uerr := (*UnexpectedResponseError)(resp) + uerr.Body = ioutil.NopCloser(bytes.NewReader(b)) + return uerr + } +} + +// UnexpectedResponseError is the error returned when a response is +// received that cannot be interpreted. +type UnexpectedResponseError http.Response + +func (u *UnexpectedResponseError) Error() string { + return fmt.Sprintf( + "unexpected response to non-interactive web page visit %s (content type %s)", + u.Request.URL.String(), + u.Header.Get("Content-Type")) +} === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/agent_test.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/agent_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/agent_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,239 @@ +package agent_test + +import ( + "encoding/base64" + "net/http" + "net/http/httptest" + "net/url" + + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon-bakery.v1/httpbakery/agent" +) + +type agentSuite struct { + bakery *bakery.Service + dischargeKey *bakery.PublicKey + discharger *Discharger + server *httptest.Server +} + +var _ = gc.Suite(&agentSuite{}) + +func (s *agentSuite) SetUpSuite(c *gc.C) { + key, err := bakery.GenerateKey() + c.Assert(err, gc.IsNil) + s.dischargeKey = &key.Public + c.Assert(err, gc.IsNil) + bak, err := bakery.NewService(bakery.NewServiceParams{ + Key: key, + }) + c.Assert(err, gc.IsNil) + s.discharger = &Discharger{ + Bakery: bak, + } + s.server = s.discharger.Serve() + s.bakery, err = bakery.NewService(bakery.NewServiceParams{ + Locator: bakery.PublicKeyLocatorMap{ + s.discharger.URL: &key.Public, + }, + }) +} + +func (s *agentSuite) TearDownSuite(c *gc.C) { + s.server.Close() +} + +var agentLoginTests = []struct { + about string + loginHandler func(*Discharger, http.ResponseWriter, *http.Request) + expectError string +}{{ + about: "success", +}, { + about: "error response", + loginHandler: func(d *Discharger, w http.ResponseWriter, _ *http.Request) { + d.WriteJSON(w, http.StatusBadRequest, httpbakery.Error{ + Code: "bad request", + Message: "test error", + }) + }, + expectError: `cannot get discharge from ".*": cannot start interactive session: test error`, +}, { + about: "unexpected response", + loginHandler: func(d *Discharger, w http.ResponseWriter, _ *http.Request) { + w.Write([]byte("OK")) + }, + expectError: `cannot get discharge from ".*": cannot start interactive session: unexpected response to non-interactive web page visit .* \(content type text/plain; charset=utf-8\)`, +}, { + about: "unexpected error response", + loginHandler: func(d *Discharger, w http.ResponseWriter, _ *http.Request) { + d.WriteJSON(w, http.StatusBadRequest, httpbakery.Error{}) + }, + expectError: `cannot get discharge from ".*": cannot start interactive session: unexpected response to non-interactive web page visit .* \(content type application/json\)`, +}, { + about: "incorrect JSON", + loginHandler: func(d *Discharger, w http.ResponseWriter, _ *http.Request) { + d.WriteJSON(w, http.StatusOK, httpbakery.Error{ + Code: "bad request", + Message: "test error", + }) + }, + expectError: `cannot get discharge from ".*": cannot start interactive session: unexpected response to non-interactive web page visit .* \(content type application/json\)`, +}} + +func (s *agentSuite) TestAgentLogin(c *gc.C) { + u, err := url.Parse(s.discharger.URL) + c.Assert(err, gc.IsNil) + for i, test := range agentLoginTests { + c.Logf("%d. %s", i, test.about) + s.discharger.LoginHandler = test.loginHandler + client := httpbakery.NewClient() + client.Key, err = bakery.GenerateKey() + c.Assert(err, gc.IsNil) + err = agent.SetUpAuth(client, u, "test-user") + c.Assert(err, gc.IsNil) + m, err := s.bakery.NewMacaroon("", nil, []checkers.Caveat{{ + Location: s.discharger.URL, + Condition: "test condition", + }}) + c.Assert(err, gc.IsNil) + ms, err := client.DischargeAll(m) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + continue + } + c.Assert(err, gc.IsNil) + err = s.bakery.Check(ms, bakery.FirstPartyCheckerFunc( + func(caveat string) error { + return nil + }, + )) + c.Assert(err, gc.IsNil) + } +} + +func (s *agentSuite) TestSetUpAuthError(c *gc.C) { + client := httpbakery.NewClient() + err := agent.SetUpAuth(client, nil, "test-user") + c.Assert(err, gc.ErrorMatches, "cannot set-up authentication: client key not configured") +} + +func (s *agentSuite) TestNoCookieError(c *gc.C) { + client := httpbakery.NewClient() + client.VisitWebPage = agent.VisitWebPage(client) + m, err := s.bakery.NewMacaroon("", nil, []checkers.Caveat{{ + Location: s.discharger.URL, + Condition: "test condition", + }}) + c.Assert(err, gc.IsNil) + _, err = client.DischargeAll(m) + c.Assert(err, gc.ErrorMatches, "cannot get discharge from .*: cannot start interactive session: cannot perform agent login: no agent-login cookie found") + ierr := errgo.Cause(err).(*httpbakery.InteractionError) + c.Assert(errgo.Cause(ierr.Reason), gc.Equals, http.ErrNoCookie) +} + +func (s *agentSuite) TestLoginCookie(c *gc.C) { + key, err := bakery.GenerateKey() + c.Assert(err, gc.IsNil) + + tests := []struct { + about string + setCookie func(*httpbakery.Client, *url.URL) + expectUser string + expectKey *bakery.PublicKey + expectError string + expectCause error + }{{ + about: "success", + setCookie: func(client *httpbakery.Client, u *url.URL) { + agent.SetUpAuth(client, u, "bob") + }, + expectUser: "bob", + expectKey: &key.Public, + }, { + about: "no cookie", + setCookie: func(client *httpbakery.Client, u *url.URL) {}, + expectError: "no agent-login cookie found", + expectCause: agent.ErrNoAgentLoginCookie, + }, { + about: "invalid base64 encoding", + setCookie: func(client *httpbakery.Client, u *url.URL) { + client.Jar.SetCookies(u, []*http.Cookie{{ + Name: "agent-login", + Value: "x", + }}) + }, + expectError: "cannot decode cookie value: illegal base64 data at input byte 0", + }, { + about: "invalid JSON", + setCookie: func(client *httpbakery.Client, u *url.URL) { + client.Jar.SetCookies(u, []*http.Cookie{{ + Name: "agent-login", + Value: base64.StdEncoding.EncodeToString([]byte("}")), + }}) + }, + expectError: "cannot unmarshal agent login: invalid character '}' looking for beginning of value", + }, { + about: "no username", + setCookie: func(client *httpbakery.Client, u *url.URL) { + agent.SetCookie(client.Jar, u, "", &key.Public) + }, + expectError: "agent login has no user name", + }, { + about: "no public key", + setCookie: func(client *httpbakery.Client, u *url.URL) { + agent.SetCookie(client.Jar, u, "hello", nil) + }, + expectError: "agent login has no public key", + }} + var ( + foundUser string + foundKey *bakery.PublicKey + foundErr error + ) + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + foundUser, foundKey, foundErr = agent.LoginCookie(req) + })) + defer srv.Close() + + srvURL, err := url.Parse(srv.URL) + c.Assert(err, gc.IsNil) + + for i, test := range tests { + c.Logf("test %d: %s", i, test.about) + + client := httpbakery.NewClient() + client.Key = key + test.setCookie(client, srvURL) + + req, err := http.NewRequest("GET", srv.URL, nil) + c.Assert(err, gc.IsNil) + resp, err := client.Do(req) + c.Assert(err, gc.IsNil) + c.Assert(resp.StatusCode, gc.Equals, http.StatusOK) + if test.expectError != "" { + c.Assert(foundErr, gc.ErrorMatches, test.expectError) + if test.expectCause != nil { + c.Assert(errgo.Cause(foundErr), gc.Equals, test.expectCause) + } + continue + } + c.Assert(foundUser, gc.Equals, test.expectUser) + c.Assert(foundKey, gc.DeepEquals, test.expectKey) + } +} + +func ExampleVisitWebPage() { + var key *bakery.KeyPair + var u *url.URL + + client := httpbakery.NewClient() + client.Key = key + agent.SetCookie(client.Jar, u, "agent-username", &client.Key.Public) + client.VisitWebPage = agent.VisitWebPage(client) +} === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/discharge_test.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/discharge_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/discharge_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,181 @@ +package agent_test + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strconv" + "sync" + + "gopkg.in/errgo.v1" + "gopkg.in/macaroon.v1" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon-bakery.v1/httpbakery/agent" +) + +type discharge struct { + cavId string + c chan error +} + +type Discharger struct { + Bakery *bakery.Service + URL string + LoginHandler func(*Discharger, http.ResponseWriter, *http.Request) + + mu sync.Mutex + waiting []discharge +} + +func (d *Discharger) ServeMux() *http.ServeMux { + mux := http.NewServeMux() + httpbakery.AddDischargeHandler(mux, "/", d.Bakery, d.checker) + mux.Handle("/login", http.HandlerFunc(d.login)) + mux.Handle("/wait", http.HandlerFunc(d.wait)) + mux.Handle("/", http.HandlerFunc(d.notfound)) + return mux +} + +func (d *Discharger) Serve() *httptest.Server { + s := httptest.NewServer(d.ServeMux()) + d.URL = s.URL + return s +} + +func (d *Discharger) WriteJSON(w http.ResponseWriter, status int, v interface{}) error { + body, err := json.Marshal(v) + if err != nil { + return errgo.Notef(err, "cannot marshal v") + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(status) + if _, err := w.Write(body); err != nil { + return errgo.Notef(err, "cannot write response") + } + return nil +} + +func (d *Discharger) GetAgentLogin(r *http.Request) (*agent.AgentLogin, error) { + c, err := r.Cookie("agent-login") + if err != nil { + return nil, errgo.Notef(err, "cannot find cookie") + } + b, err := base64.StdEncoding.DecodeString(c.Value) + if err != nil { + return nil, errgo.Notef(err, "cannot decode cookie") + } + var al agent.AgentLogin + if err := json.Unmarshal(b, &al); err != nil { + return nil, errgo.Notef(err, "cannot unmarshal cookie") + } + return &al, nil +} + +func (d *Discharger) FinishWait(w http.ResponseWriter, r *http.Request, err error) { + r.ParseForm() + id, err := strconv.Atoi(r.Form.Get("waitid")) + if err != nil { + d.WriteJSON(w, http.StatusBadRequest, httpbakery.Error{ + Message: fmt.Sprintf("cannot read waitid: %s", err), + }) + return + } + d.waiting[id].c <- err + return +} + +func (d *Discharger) checker(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { + d.mu.Lock() + id := len(d.waiting) + d.waiting = append(d.waiting, discharge{cavId, make(chan error, 1)}) + d.mu.Unlock() + return nil, &httpbakery.Error{ + Code: httpbakery.ErrInteractionRequired, + Message: "test interaction", + Info: &httpbakery.ErrorInfo{ + VisitURL: fmt.Sprintf("%s/login?waitid=%d", d.URL, id), + WaitURL: fmt.Sprintf("%s/wait?waitid=%d", d.URL, id), + }, + } +} + +func (d *Discharger) login(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + if d.LoginHandler != nil { + d.LoginHandler(d, w, r) + return + } + al, err := d.GetAgentLogin(r) + if err != nil { + d.WriteJSON(w, http.StatusBadRequest, httpbakery.Error{ + Message: fmt.Sprintf("cannot read agent login: %s", err), + }) + return + } + _, err = httpbakery.CheckRequest(d.Bakery, r, nil, nil) + if err == nil { + d.FinishWait(w, r, nil) + d.WriteJSON(w, http.StatusOK, agent.AgentResponse{ + AgentLogin: true, + }) + return + } + m, err := d.Bakery.NewMacaroon("", nil, []checkers.Caveat{ + bakery.LocalThirdPartyCaveat(al.PublicKey), + }) + if err != nil { + d.WriteJSON(w, http.StatusInternalServerError, httpbakery.Error{ + Message: fmt.Sprintf("cannot create macaroon: %s", err), + }) + return + } + httpbakery.WriteDischargeRequiredError(w, m, "", nil) +} + +func (d *Discharger) wait(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + id, err := strconv.Atoi(r.Form.Get("waitid")) + if err != nil { + d.WriteJSON(w, http.StatusBadRequest, httpbakery.Error{ + Message: fmt.Sprintf("cannot read waitid: %s", err), + }) + return + } + err = <-d.waiting[id].c + if err != nil { + d.WriteJSON(w, http.StatusForbidden, err) + return + } + m, err := d.Bakery.Discharge( + bakery.ThirdPartyCheckerFunc( + func(cavId, caveat string) ([]checkers.Caveat, error) { + return nil, nil + }, + ), + d.waiting[id].cavId, + ) + if err != nil { + d.WriteJSON(w, http.StatusForbidden, err) + return + } + d.WriteJSON( + w, + http.StatusOK, + struct { + Macaroon *macaroon.Macaroon + }{ + Macaroon: m, + }, + ) +} + +func (d *Discharger) notfound(w http.ResponseWriter, r *http.Request) { + d.WriteJSON(w, http.StatusNotFound, httpbakery.Error{ + Message: fmt.Sprintf("cannot find %s", r.URL.String()), + }) +} === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/export_test.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/export_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/export_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,4 @@ +package agent + +type AgentLogin agentLogin +type AgentResponse agentResponse === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/package_test.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/agent/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +package agent_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/browser.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/browser.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/browser.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,61 @@ +package httpbakery + +import ( + "fmt" + "net/url" + "os" + "os/exec" + "runtime" + "strings" +) + +var browser = map[string]string{ + "linux": "sensible-browser", + "darwin": "open", + "freebsd": "xdg-open", + "netbsd": "xdg-open", + "openbsd": "xdg-open", +} + +// OpenWebBrowser opens a web browser at the +// given URL. If the OS is not recognised, the URL +// is just printed to standard output. +func OpenWebBrowser(url *url.URL) error { + var args []string + if runtime.GOOS == "windows" { + // Windows is special because the start command is + // built into cmd.exe and hence requires the argument + // to be quoted. + args = []string{"cmd", "/c", "start", winCmdQuote.Replace(url.String())} + } else if b := browser[runtime.GOOS]; b != "" { + args = []string{b, url.String()} + } + if args != nil { + fmt.Fprintf(os.Stderr, "Opening an authorization web page in your browser.\n") + fmt.Fprintf(os.Stderr, "If it does not open, please open this URL:\n%s\n", url) + cmd := exec.Command(args[0], args[1:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Start() + go cmd.Wait() + } else { + fmt.Fprintf(os.Stderr, "Please open this URL in your browser to authorize:\n%s\n", url) + } + return nil +} + +// winCmdQuote can quote metacharacters special to the Windows +// cmd.exe command interpreter. It does that by inserting +// a '^' character before each metacharacter. Note that +// most of these cannot actually be produced by URL.String, +// but we include them for completeness. +var winCmdQuote = strings.NewReplacer( + "&", "^&", + "%", "^%", + "(", "^(", + ")", "^)", + "^", "^^", + "<", "^<", + ">", "^>", + "|", "^|", +) === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/checkers.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/checkers.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/checkers.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,80 @@ +package httpbakery + +import ( + "net" + "net/http" + + "gopkg.in/errgo.v1" + + "gopkg.in/macaroon-bakery.v1/bakery/checkers" +) + +type httpContext struct { + req *http.Request +} + +// Checkers implements the standard HTTP-request checkers. +// It does not include the "declared" checker, as that +// must be added for each individual set of macaroons +// that are checked. +func Checkers(req *http.Request) checkers.Checker { + c := httpContext{req} + return checkers.Map{ + checkers.CondClientIPAddr: c.clientIPAddr, + checkers.CondClientOrigin: c.clientOrigin, + } +} + +// clientIPAddr implements the IP client address checker +// for an HTTP request. +func (c httpContext) clientIPAddr(_, addr string) error { + ip := net.ParseIP(addr) + if ip == nil { + return errgo.Newf("cannot parse IP address in caveat") + } + if c.req.RemoteAddr == "" { + return errgo.Newf("client has no remote address") + } + reqIP, err := requestIPAddr(c.req) + if err != nil { + return errgo.Mask(err) + } + if !reqIP.Equal(ip) { + return errgo.Newf("client IP address mismatch, got %s", reqIP) + } + return nil +} + +// clientOrigin implements the Origin header checker +// for an HTTP request. +func (c httpContext) clientOrigin(_, origin string) error { + if reqOrigin := c.req.Header.Get("Origin"); reqOrigin != origin { + return errgo.Newf("request has invalid Origin header; got %q", reqOrigin) + } + return nil +} + +// SameClientIPAddrCaveat returns a caveat that will check that +// the remote IP address is the same as that in the given HTTP request. +func SameClientIPAddrCaveat(req *http.Request) checkers.Caveat { + if req.RemoteAddr == "" { + return checkers.ErrorCaveatf("client has no remote IP address") + } + ip, err := requestIPAddr(req) + if err != nil { + return checkers.ErrorCaveatf("%v", err) + } + return checkers.ClientIPAddrCaveat(ip) +} + +func requestIPAddr(req *http.Request) (net.IP, error) { + reqHost, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + return nil, errgo.Newf("cannot parse host port in remote address: %v", err) + } + ip := net.ParseIP(reqHost) + if ip == nil { + return nil, errgo.Newf("invalid IP address in remote address %q", req.RemoteAddr) + } + return ip, nil +} === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/checkers_test.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/checkers_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/checkers_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,170 @@ +package httpbakery_test + +import ( + "net" + "net/http" + + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +type CheckersSuite struct{} + +var _ = gc.Suite(&CheckersSuite{}) + +type checkTest struct { + caveat string + expectError string + expectCause func(err error) bool +} + +var isCaveatNotRecognized = errgo.Is(checkers.ErrCaveatNotRecognized) + +var checkerTests = []struct { + about string + checker checkers.Checker + checks []checkTest +}{{ + about: "no host name declared", + checker: checkers.New(httpbakery.Checkers(&http.Request{})), + checks: []checkTest{{ + caveat: checkers.ClientIPAddrCaveat(net.IP{0, 0, 0, 0}).Condition, + expectError: `caveat "client-ip-addr 0.0.0.0" not satisfied: client has no remote address`, + }, { + caveat: checkers.ClientIPAddrCaveat(net.IP{127, 0, 0, 1}).Condition, + expectError: `caveat "client-ip-addr 127.0.0.1" not satisfied: client has no remote address`, + }, { + caveat: "client-ip-addr badip", + expectError: `caveat "client-ip-addr badip" not satisfied: cannot parse IP address in caveat`, + }}, +}, { + about: "IPv4 host name declared", + checker: checkers.New(httpbakery.Checkers(&http.Request{ + RemoteAddr: "127.0.0.1:1234", + })), + checks: []checkTest{{ + caveat: checkers.ClientIPAddrCaveat(net.IP{127, 0, 0, 1}).Condition, + }, { + caveat: checkers.ClientIPAddrCaveat(net.IP{127, 0, 0, 1}.To16()).Condition, + }, { + caveat: "client-ip-addr ::ffff:7f00:1", + }, { + caveat: checkers.ClientIPAddrCaveat(net.IP{127, 0, 0, 2}).Condition, + expectError: `caveat "client-ip-addr 127.0.0.2" not satisfied: client IP address mismatch, got 127.0.0.1`, + }, { + caveat: checkers.ClientIPAddrCaveat(net.ParseIP("2001:4860:0:2001::68")).Condition, + expectError: `caveat "client-ip-addr 2001:4860:0:2001::68" not satisfied: client IP address mismatch, got 127.0.0.1`, + }}, +}, { + about: "IPv6 host name declared", + checker: checkers.New(httpbakery.Checkers(&http.Request{ + RemoteAddr: "[2001:4860:0:2001::68]:1234", + })), + checks: []checkTest{{ + caveat: checkers.ClientIPAddrCaveat(net.ParseIP("2001:4860:0:2001::68")).Condition, + }, { + caveat: "client-ip-addr 2001:4860:0:2001:0::68", + }, { + caveat: checkers.ClientIPAddrCaveat(net.ParseIP("2001:4860:0:2001::69")).Condition, + expectError: `caveat "client-ip-addr 2001:4860:0:2001::69" not satisfied: client IP address mismatch, got 2001:4860:0:2001::68`, + }, { + caveat: checkers.ClientIPAddrCaveat(net.ParseIP("127.0.0.1")).Condition, + expectError: `caveat "client-ip-addr 127.0.0.1" not satisfied: client IP address mismatch, got 2001:4860:0:2001::68`, + }}, +}, { + about: "same client address, ipv4 request address", + checker: checkers.New(httpbakery.Checkers(&http.Request{ + RemoteAddr: "127.0.0.1:1324", + })), + checks: []checkTest{{ + caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ + RemoteAddr: "127.0.0.1:1234", + }).Condition, + }, { + caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ + RemoteAddr: "[::ffff:7f00:1]:1235", + }).Condition, + }, { + caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ + RemoteAddr: "127.0.0.2:1234", + }).Condition, + expectError: `caveat "client-ip-addr 127.0.0.2" not satisfied: client IP address mismatch, got 127.0.0.1`, + }, { + caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ + RemoteAddr: "[::ffff:7f00:2]:1235", + }).Condition, + expectError: `caveat "client-ip-addr 127.0.0.2" not satisfied: client IP address mismatch, got 127.0.0.1`, + }, { + caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{}).Condition, + expectError: `caveat "error client has no remote IP address" not satisfied: bad caveat`, + }, { + caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ + RemoteAddr: "bad", + }).Condition, + expectError: `caveat "error cannot parse host port in remote address: missing port in address bad" not satisfied: bad caveat`, + }, { + caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ + RemoteAddr: "bad:56", + }).Condition, + expectError: `caveat "error invalid IP address in remote address \\"bad:56\\"" not satisfied: bad caveat`, + }}, +}, { + about: "same client address, ipv6 request address", + checker: checkers.New(httpbakery.Checkers(&http.Request{ + RemoteAddr: "[2001:4860:0:2001:0::68]:1235", + })), + checks: []checkTest{{ + caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ + RemoteAddr: "[2001:4860:0:2001:0::68]:1234", + }).Condition, + }, { + caveat: httpbakery.SameClientIPAddrCaveat(&http.Request{ + RemoteAddr: "127.0.0.2:1234", + }).Condition, + expectError: `caveat "client-ip-addr 127.0.0.2" not satisfied: client IP address mismatch, got 2001:4860:0:2001::68`, + }}, +}, { + about: "request with no origin", + checker: checkers.New(httpbakery.Checkers(&http.Request{})), + checks: []checkTest{{ + caveat: checkers.ClientOriginCaveat("").Condition, + }, { + caveat: checkers.ClientOriginCaveat("somewhere").Condition, + expectError: `caveat "origin somewhere" not satisfied: request has invalid Origin header; got ""`, + }}, +}, { + about: "request with origin", + checker: checkers.New(httpbakery.Checkers(&http.Request{ + Header: http.Header{ + "Origin": {"somewhere"}, + }, + })), + checks: []checkTest{{ + caveat: checkers.ClientOriginCaveat("").Condition, + expectError: `caveat "origin " not satisfied: request has invalid Origin header; got "somewhere"`, + }, { + caveat: checkers.ClientOriginCaveat("somewhere").Condition, + }}, +}} + +func (s *CheckersSuite) TestCheckers(c *gc.C) { + for i, test := range checkerTests { + c.Logf("test %d: %s", i, test.about) + for j, check := range test.checks { + c.Logf("\tcheck %d", j) + err := checkers.New(test.checker).CheckFirstPartyCaveat(check.caveat) + if check.expectError != "" { + c.Assert(err, gc.ErrorMatches, check.expectError) + if check.expectCause == nil { + check.expectCause = errgo.Any + } + c.Assert(check.expectCause(errgo.Cause(err)), gc.Equals, true) + } else { + c.Assert(err, gc.IsNil) + } + } + } +} === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/client.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/client.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/client.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,689 @@ +package httpbakery + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/cookiejar" + "net/url" + "strings" + "sync" + + "github.com/juju/loggo" + "golang.org/x/net/publicsuffix" + "gopkg.in/errgo.v1" + "gopkg.in/macaroon.v1" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" +) + +var logger = loggo.GetLogger("httpbakery") + +// DischargeError represents the error when a third party discharge +// is refused by a server. +type DischargeError struct { + // Reason holds the underlying remote error that caused the + // discharge to fail. + Reason *Error +} + +func (e *DischargeError) Error() string { + return fmt.Sprintf("third party refused discharge: %v", e.Reason) +} + +// IsDischargeError reports whether err is a *DischargeError. +func IsDischargeError(err error) bool { + _, ok := err.(*DischargeError) + return ok +} + +// InteractionError wraps an error returned by a call to visitWebPage. +type InteractionError struct { + // Reason holds the actual error returned from visitWebPage. + Reason error +} + +func (e *InteractionError) Error() string { + return fmt.Sprintf("cannot start interactive session: %v", e.Reason) +} + +// IsInteractionError reports whether err is a *DischargeError. +func IsInteractionError(err error) bool { + _, ok := err.(*InteractionError) + return ok +} + +// WaitResponse holds the type that should be returned +// by an HTTP response made to a WaitURL +// (See the ErrorInfo type). +type WaitResponse struct { + Macaroon *macaroon.Macaroon +} + +// NewHTTPClient returns an http.Client that ensures +// that headers are sent to the server even when the +// server redirects a GET request. The returned client +// also contains an empty in-memory cookie jar. +// +// See https://github.com/golang/go/issues/4677 +func NewHTTPClient() *http.Client { + c := *http.DefaultClient + c.CheckRedirect = func(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return fmt.Errorf("too many redirects") + } + if len(via) == 0 { + return nil + } + for attr, val := range via[0].Header { + if _, ok := req.Header[attr]; !ok { + req.Header[attr] = val + } + } + return nil + } + jar, err := cookiejar.New(&cookiejar.Options{ + PublicSuffixList: publicsuffix.List, + }) + if err != nil { + panic(err) + } + c.Jar = &cookieLogger{jar} + return &c +} + +// Client holds the context for making HTTP requests +// that automatically acquire and discharge macaroons. +type Client struct { + // Client holds the HTTP client to use. It should have a cookie + // jar configured, and when redirecting it should preserve the + // headers (see NewHTTPClient). + *http.Client + + // VisitWebPage is called when the authorization process + // requires user interaction, and should cause the given URL to + // be opened in a web browser. If this is nil, no interaction + // will be allowed. + VisitWebPage func(*url.URL) error + + // Key holds the client's key. If set, the client will try to + // discharge third party caveats with the special location + // "local" by using this key. See bakery.DischargeAllWithKey and + // bakery.LocalThirdPartyCaveat for more information + Key *bakery.KeyPair + + // DischargeAcquirer holds the object that will be used to obtain + // third-party discharges. If nil, the Client itself will be used. + DischargeAcquirer DischargeAcquirer +} + +// DischargeAcquirer can be implemented by clients that want to customize the +// discharge-acquisition process used by a Client. +type DischargeAcquirer interface { + // AcquireDischarge should return a discharge macaroon for the given third + // party caveat. The firstPartyLocation holds the location of the original + // macaroon. + AcquireDischarge(firstPartyLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error) +} + +// NewClient returns a new Client containing an HTTP client +// created with NewHTTPClient and leaves all other fields zero. +func NewClient() *Client { + return &Client{ + Client: NewHTTPClient(), + } +} + +// Do sends the given HTTP request and returns its response. If the +// request fails with a discharge-required error, any required discharge +// macaroons will be acquired, and the request will be repeated with +// those attached. Do may add headers to req.Header. +// +// If the required discharges were refused by a third party, an error +// with a *DischargeError cause will be returned. +// +// Note that because the request may be retried, no body may be provided +// in the request, otherwise the contents will be lost when retrying. +// For requests with a body (for example PUT or POST methods), use +// DoWithBody instead. +// +// If interaction is required by the user, the visitWebPage function is +// called with a URL to be opened in a web browser. If visitWebPage +// returns an error, an error with a *InteractionError cause will be +// returned. See OpenWebBrowser for a possible implementation of +// visitWebPage. +func (c *Client) Do(req *http.Request) (*http.Response, error) { + if req.Body != nil { + return nil, fmt.Errorf("body unexpectedly provided in request - use DoWithBody") + } + return c.DoWithBody(req, nil) +} + +// DischargeAll attempts to acquire discharge macaroons for all the +// third party caveats in m, and returns a slice containing all +// of them bound to m. +// +// If the discharge fails because a third party refuses to discharge a +// caveat, the returned error will have a cause of type *DischargeError. +// If the discharge fails because visitWebPage returns an error, +// the returned error will have a cause of *InteractionError. +// +// The returned macaroon slice will not be stored in the client +// cookie jar (see SetCookie if you need to do that). +func (c *Client) DischargeAll(m *macaroon.Macaroon) (macaroon.Slice, error) { + return bakery.DischargeAllWithKey(m, c.dischargeAcquirer().AcquireDischarge, c.Key) +} + +func (c *Client) dischargeAcquirer() DischargeAcquirer { + if c.DischargeAcquirer != nil { + return c.DischargeAcquirer + } + return c +} + +// relativeURL returns newPath relative to an original URL. +func relativeURL(base, new string) (*url.URL, error) { + if new == "" { + return nil, errgo.Newf("empty URL") + } + baseURL, err := url.Parse(base) + if err != nil { + return nil, errgo.Notef(err, "cannot parse URL") + } + newURL, err := url.Parse(new) + if err != nil { + return nil, errgo.Notef(err, "cannot parse URL") + } + return baseURL.ResolveReference(newURL), nil +} + +// DoWithBody is like Do except that the given body is used for the body +// of the HTTP request, and reset to its start by seeking if the request +// is retried. It is an error if req.Body is non-zero. +// +// Note that, unlike the request body passed to http.NewRequest, +// the body will not be closed even if implements io.Closer. +// +// Do may add headers to req.Header. +func (c *Client) DoWithBody(req *http.Request, body io.ReadSeeker) (*http.Response, error) { + return c.DoWithBodyAndCustomError(req, body, nil) +} + +// DoWithBodyAndCustomError is like DoWithBody except it allows a client +// to specify a custom error function, getError, which is called on the +// HTTP response and may return a non-nil error if the response holds an +// error. If the cause of the returned error is a *Error value and its +// code is ErrDischargeRequired, the macaroon in its Info field will be +// discharged and the request will be repeated with the discharged +// macaroon. If getError returns nil, it should leave the response body +// unchanged. +// +// If getError is nil, DefaultGetError will be used. +// +// This method can be useful when dealing with APIs that +// return their errors in a format incompatible with Error, but the +// need for it should be avoided when creating new APIs, +// as it makes the endpoints less amenable to generic tools. +func (c *Client) DoWithBodyAndCustomError(req *http.Request, body io.ReadSeeker, getError func(resp *http.Response) error) (*http.Response, error) { + logger.Debugf("client do %s %s {", req.Method, req.URL) + resp, err := c.doWithBody(req, body, getError) + logger.Debugf("} -> error %#v", err) + return resp, err +} + +func (c *Client) doWithBody(req *http.Request, body io.ReadSeeker, getError func(resp *http.Response) error) (*http.Response, error) { + if getError == nil { + getError = DefaultGetError + } + if req.Body != nil { + return nil, errgo.New("body unexpectedly supplied in Request struct") + } + if c.Client.Jar == nil { + return nil, errgo.New("no cookie jar supplied in HTTP client") + } + if err := c.setRequestBody(req, body); err != nil { + return nil, errgo.Mask(err) + } + req.Header.Set(BakeryProtocolHeader, fmt.Sprint(latestVersion)) + httpResp, err := c.Client.Do(req) + if err != nil { + return nil, errgo.Mask(err, errgo.Any) + } + err = getError(httpResp) + if err == nil { + return httpResp, nil + } + httpResp.Body.Close() + + if err := c.HandleError(req.URL, err); err != nil { + return nil, errgo.Mask(err, errgo.Any) + } + + if err := c.setRequestBody(req, body); err != nil { + return nil, errgo.Mask(err) + } + // Try again with our newly acquired discharge macaroons + hresp, err := c.Client.Do(req) + if err != nil { + return nil, errgo.Mask(err, errgo.Any) + } + return hresp, nil +} + +// HandleError tries to resolve the given error, which should be a +// response to the given URL, by discharging any macaroon contained in +// it. That is, if the error cause is an *Error and its code is +// ErrDischargeRequired, then it will try to discharge +// err.Info.Macaroon. If the discharge succeeds, the discharged macaroon +// will be saved to the client's cookie jar and ResolveError will return +// nil. +// +// For any other kind of error, the original error will be returned. +func (c *Client) HandleError(reqURL *url.URL, err error) error { + respErr, ok := errgo.Cause(err).(*Error) + if !ok { + return err + } + if respErr.Code != ErrDischargeRequired { + return respErr + } + if respErr.Info == nil || respErr.Info.Macaroon == nil { + return errgo.New("no macaroon found in discharge-required response") + } + mac := respErr.Info.Macaroon + macaroons, err := bakery.DischargeAllWithKey(mac, c.dischargeAcquirer().AcquireDischarge, c.Key) + if err != nil { + return errgo.Mask(err, errgo.Any) + } + var cookiePath string + if path := respErr.Info.MacaroonPath; path != "" { + relURL, err := parseURLPath(path) + if err != nil { + logger.Warningf("ignoring invalid path in discharge-required response: %v", err) + } else { + cookiePath = reqURL.ResolveReference(relURL).Path + } + } + cookie, err := NewCookie(macaroons) + if err != nil { + return errgo.Notef(err, "cannot make cookie") + } + cookie.Path = cookiePath + if name := respErr.Info.CookieNameSuffix; name != "" { + cookie.Name = "macaroon-" + name + } + c.Jar.SetCookies(reqURL, []*http.Cookie{cookie}) + return nil +} + +// DefaultGetError is the default error unmarshaler used by Client.DoWithBody. +func DefaultGetError(httpResp *http.Response) error { + if httpResp.StatusCode != http.StatusProxyAuthRequired && httpResp.StatusCode != http.StatusUnauthorized { + return nil + } + // Check for the new protocol discharge error. + if httpResp.StatusCode == http.StatusUnauthorized && httpResp.Header.Get("WWW-Authenticate") != "Macaroon" { + return nil + } + if httpResp.Header.Get("Content-Type") != "application/json" { + return nil + } + var resp Error + if err := json.NewDecoder(httpResp.Body).Decode(&resp); err != nil { + return fmt.Errorf("cannot unmarshal error response: %v", err) + } + return &resp +} + +func parseURLPath(path string) (*url.URL, error) { + u, err := url.Parse(path) + if err != nil { + return nil, errgo.Mask(err) + } + if u.Scheme != "" || + u.Opaque != "" || + u.User != nil || + u.Host != "" || + u.RawQuery != "" || + u.Fragment != "" { + return nil, errgo.Newf("URL path %q is not clean", path) + } + return u, nil +} + +func (c *Client) setRequestBody(req *http.Request, body io.ReadSeeker) error { + if body == nil { + return nil + } + if req.Body != nil { + req.Body.Close() + if _, err := body.Seek(0, 0); err != nil { + return errgo.Notef(err, "cannot seek to start of request body") + } + } + // Always replace the body with a new readStopper so that + // the old request cannot interfere with the new request's reader. + req.Body = &readStopper{ + r: body, + } + return nil +} + +var errClosed = errgo.New("reader has been closed") + +// readStopper works around an issue with the net/http +// package (see http://golang.org/issue/12796). +// Because the first HTTP request might not have finished +// reading from its body when it returns, we need to +// ensure that the second request does not race on Read, +// so this type implements a Reader that prevents all Read +// calls to the underlying Reader after Close has been called. +type readStopper struct { + mu sync.Mutex + r io.ReadSeeker +} + +func (r *readStopper) Read(buf []byte) (int, error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.r == nil { + return 0, errClosed + } + return r.r.Read(buf) +} + +func (r *readStopper) Close() error { + r.mu.Lock() + r.r = nil + r.mu.Unlock() + return nil +} + +// NewCookie takes a slice of macaroons and returns them +// encoded as a cookie. The slice should contain a single primary +// macaroon in its first element, and any discharges after that. +func NewCookie(ms macaroon.Slice) (*http.Cookie, error) { + if len(ms) == 0 { + return nil, errgo.New("no macaroons in cookie") + } + data, err := json.Marshal(ms) + if err != nil { + return nil, errgo.Notef(err, "cannot marshal macaroons") + } + cookie := &http.Cookie{ + Name: fmt.Sprintf("macaroon-%x", ms[0].Signature()), + Value: base64.StdEncoding.EncodeToString(data), + } + cookie.Expires, _ = checkers.MacaroonsExpiryTime(ms) + // TODO(rog) other fields. + return cookie, nil +} + +// SetCookie sets a cookie for the given URL on the given cookie jar +// that will holds the given macaroon slice. The macaroon slice should +// contain a single primary macaroon in its first element, and any +// discharges after that. +func SetCookie(jar http.CookieJar, url *url.URL, ms macaroon.Slice) error { + cookie, err := NewCookie(ms) + if err != nil { + return errgo.Mask(err) + } + // TODO verify that setting this for the URL makes it available + // to all paths under that URL. + jar.SetCookies(url, []*http.Cookie{cookie}) + return nil +} + +// MacaroonsForURL returns any macaroons associated with the +// given URL in the given cookie jar. +func MacaroonsForURL(jar http.CookieJar, u *url.URL) []macaroon.Slice { + return cookiesToMacaroons(jar.Cookies(u)) +} + +func appendURLElem(u, elem string) string { + if strings.HasSuffix(u, "/") { + return u + elem + } + return u + "/" + elem +} + +// AcquireDischarge implements DischargeAcquirer by requesting a discharge +// macaroon from the caveat location as an HTTP URL. +func (c *Client) AcquireDischarge(originalLocation string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { + var resp dischargeResponse + loc := appendURLElem(cav.Location, "discharge") + err := postFormJSON( + loc, + url.Values{ + "id": {cav.Id}, + "location": {originalLocation}, + }, + &resp, + c.postForm, + ) + if err == nil { + return resp.Macaroon, nil + } + cause, ok := errgo.Cause(err).(*Error) + if !ok { + return nil, errgo.NoteMask(err, "cannot acquire discharge", IsInteractionError) + } + if cause.Code != ErrInteractionRequired { + return nil, &DischargeError{ + Reason: cause, + } + } + if cause.Info == nil { + return nil, errgo.Notef(err, "interaction-required response with no info") + } + m, err := c.interact(loc, cause.Info.VisitURL, cause.Info.WaitURL) + if err != nil { + return nil, errgo.Mask(err, IsDischargeError, IsInteractionError) + } + return m, nil +} + +// interact gathers a macaroon by directing the user to interact +// with a web page. +func (c *Client) interact(location, visitURLStr, waitURLStr string) (*macaroon.Macaroon, error) { + visitURL, err := relativeURL(location, visitURLStr) + if err != nil { + return nil, errgo.Notef(err, "cannot make relative visit URL") + } + waitURL, err := relativeURL(location, waitURLStr) + if err != nil { + return nil, errgo.Notef(err, "cannot make relative wait URL") + } + if c.VisitWebPage == nil { + return nil, &InteractionError{ + Reason: errgo.New("interaction required but not possible"), + } + } + if err := c.VisitWebPage(visitURL); err != nil { + return nil, &InteractionError{ + Reason: err, + } + } + waitResp, err := c.Client.Get(waitURL.String()) + if err != nil { + return nil, errgo.Notef(err, "cannot get %q", waitURL) + } + defer waitResp.Body.Close() + if waitResp.StatusCode != http.StatusOK { + var resp Error + if err := json.NewDecoder(waitResp.Body).Decode(&resp); err != nil { + return nil, errgo.Notef(err, "cannot unmarshal wait error response") + } + dischargeErr := &DischargeError{ + Reason: &resp, + } + return nil, errgo.NoteMask(dischargeErr, "failed to acquire macaroon after waiting", errgo.Any) + } + var resp WaitResponse + if err := json.NewDecoder(waitResp.Body).Decode(&resp); err != nil { + return nil, errgo.Notef(err, "cannot unmarshal wait response") + } + if resp.Macaroon == nil { + return nil, errgo.New("no macaroon found in wait response") + } + return resp.Macaroon, nil +} + +func (c *Client) postForm(url string, data url.Values) (*http.Response, error) { + return c.post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +func (c *Client) post(url string, bodyType string, body io.ReadSeeker) (resp *http.Response, err error) { + req, err := http.NewRequest("POST", url, nil) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + // TODO(rog) see http.shouldRedirectPost + return c.DoWithBody(req, body) +} + +// postFormJSON does an HTTP POST request to the given url with the given +// values and unmarshals the response in the value pointed to be resp. +// It uses the given postForm function to actually make the POST request. +func postFormJSON(url string, vals url.Values, resp interface{}, postForm func(url string, vals url.Values) (*http.Response, error)) error { + logger.Debugf("postFormJSON to %s; vals: %#v", url, vals) + httpResp, err := postForm(url, vals) + if err != nil { + return errgo.NoteMask(err, fmt.Sprintf("cannot http POST to %q", url), errgo.Any) + } + defer httpResp.Body.Close() + data, err := ioutil.ReadAll(httpResp.Body) + if err != nil { + return errgo.Notef(err, "failed to read body from %q", url) + } + if httpResp.StatusCode != http.StatusOK { + var errResp Error + if err := json.Unmarshal(data, &errResp); err != nil { + // TODO better error here + return errgo.Notef(err, "POST %q failed with status %q; cannot parse body %q", url, httpResp.Status, data) + } + return &errResp + } + if err := json.Unmarshal(data, resp); err != nil { + return errgo.Notef(err, "cannot unmarshal response from %q", url) + } + return nil +} + +// MacaroonsHeader is the key of the HTTP header that can be used to provide a +// macaroon for request authorization. +const MacaroonsHeader = "Macaroons" + +// RequestMacaroons returns any collections of macaroons from the header and +// cookies found in the request. By convention, each slice will contain a +// primary macaroon followed by its discharges. +func RequestMacaroons(req *http.Request) []macaroon.Slice { + mss := cookiesToMacaroons(req.Cookies()) + for _, h := range req.Header[MacaroonsHeader] { + ms, err := decodeMacaroonSlice(h) + if err != nil { + logger.Errorf("cannot retrieve macaroons from header: %v", err) + } else { + mss = append(mss, ms) + } + } + return mss +} + +// cookiesToMacaroons returns a slice of any macaroons found +// in the given slice of cookies. +func cookiesToMacaroons(cookies []*http.Cookie) []macaroon.Slice { + var mss []macaroon.Slice + for _, cookie := range cookies { + if !strings.HasPrefix(cookie.Name, "macaroon-") { + continue + } + ms, err := decodeMacaroonSlice(cookie.Value) + if err != nil { + logger.Errorf("cannot retrieve macaroons from cookie: %v", err) + continue + } + mss = append(mss, ms) + } + return mss +} + +// decodeMacaroonSlice decodes a base64-JSON-encoded slice of macaroons from +// the given string. +func decodeMacaroonSlice(value string) (macaroon.Slice, error) { + data, err := base64.StdEncoding.DecodeString(value) + if err != nil { + return nil, errgo.NoteMask(err, "cannot base64-decode macaroons") + } + var ms macaroon.Slice + if err := json.Unmarshal(data, &ms); err != nil { + return nil, errgo.NoteMask(err, "cannot unmarshal macaroons") + } + return ms, nil +} + +func isVerificationError(err error) bool { + _, ok := err.(*bakery.VerificationError) + return ok +} + +// CheckRequest checks that the given http request contains at least one +// valid macaroon minted by the given service, using checker to check +// any first party caveats. It returns an error with a +// *bakery.VerificationError cause if the macaroon verification failed. +// +// The assert map holds any required attributes of "declared" attributes, +// overriding any inferences made from the macaroons themselves. +// It has a similar effect to adding a checkers.DeclaredCaveat +// for each key and value, but the error message will be more +// useful. +// +// It adds all the standard caveat checkers to the given checker. +// +// It returns any attributes declared in the successfully validated request. +func CheckRequest(svc *bakery.Service, req *http.Request, assert map[string]string, checker checkers.Checker) (map[string]string, error) { + attrs, _, err := CheckRequestM(svc, req, assert, checker) + return attrs, err +} + +// CheckRequestM is like CheckRequest except that on success it also returns +// the set of macaroons that was successfully checked. +// The "M" suffix is for backward compatibility reasons - in a +// later bakery version, the signature of CheckRequest will be +// changed to return the macaroon slice and CheckRequestM will be +// removed. +func CheckRequestM(svc *bakery.Service, req *http.Request, assert map[string]string, checker checkers.Checker) (map[string]string, macaroon.Slice, error) { + mss := RequestMacaroons(req) + if len(mss) == 0 { + return nil, nil, &bakery.VerificationError{ + Reason: errgo.Newf("no macaroon cookies in request"), + } + } + checker = checkers.New( + checker, + Checkers(req), + checkers.TimeBefore, + ) + attrs, ms, err := svc.CheckAnyM(mss, assert, checker) + if err != nil { + return nil, nil, errgo.Mask(err, isVerificationError) + } + return attrs, ms, nil +} + +type cookieLogger struct { + http.CookieJar +} + +func (j *cookieLogger) SetCookies(u *url.URL, cookies []*http.Cookie) { + logger.Debugf("%p setting %d cookies for %s", j.CookieJar, len(cookies), u) + for i, c := range cookies { + logger.Debugf("\t%d. path %s; name %s", i, c.Path, c.Name) + } + j.CookieJar.SetCookies(u, cookies) +} === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/client_test.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/client_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/client_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1059 @@ +package httpbakery_test + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/cookiejar" + "net/http/httptest" + "net/url" + "sort" + "strings" + "sync" + "time" + + "github.com/juju/httprequest" + jujutesting "github.com/juju/testing" + jc "github.com/juju/testing/checkers" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + "gopkg.in/macaroon.v1" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/bakerytest" + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +type ClientSuite struct { + jujutesting.LoggingSuite +} + +var _ = gc.Suite(&ClientSuite{}) + +// TestSingleServiceFirstParty creates a single service +// with a macaroon with one first party caveat. +// It creates a request with this macaroon and checks that the service +// can verify this macaroon as valid. +func (s *ClientSuite) TestSingleServiceFirstParty(c *gc.C) { + // Create a target service. + svc := newService("loc", nil) + // No discharge required, so pass "unknown" for the third party + // caveat discharger location so we know that we don't try + // to discharge the location. + ts := httptest.NewServer(serverHandler(serverHandlerParams{ + service: svc, + authLocation: "unknown", + })) + defer ts.Close() + + // Mint a macaroon for the target service. + serverMacaroon, err := svc.NewMacaroon("", nil, nil) + c.Assert(err, gc.IsNil) + c.Assert(serverMacaroon.Location(), gc.Equals, "loc") + err = svc.AddCaveat(serverMacaroon, checkers.Caveat{ + Condition: "is something", + }) + c.Assert(err, gc.IsNil) + + // Create a client request. + req, err := http.NewRequest("GET", ts.URL, nil) + c.Assert(err, gc.IsNil) + client := clientRequestWithCookies(c, ts.URL, macaroon.Slice{serverMacaroon}) + // Somehow the client has accquired the macaroon. Add it to the cookiejar in our request. + + // Make the request to the server. + resp, err := client.Do(req) + c.Assert(err, gc.IsNil) + defer resp.Body.Close() + assertResponse(c, resp, "done") +} + +func (s *ClientSuite) TestSingleServiceFirstPartyWithHeader(c *gc.C) { + // Create a target service. + svc := newService("loc", nil) + // No discharge required, so pass "unknown" for the third party + // caveat discharger location so we know that we don't try + // to discharge the location. + ts := httptest.NewServer(serverHandler(serverHandlerParams{ + service: svc, + authLocation: "unknown", + })) + defer ts.Close() + + // Mint a macaroon for the target service. + serverMacaroon, err := svc.NewMacaroon("", nil, nil) + c.Assert(err, gc.IsNil) + c.Assert(serverMacaroon.Location(), gc.Equals, "loc") + err = svc.AddCaveat(serverMacaroon, checkers.Caveat{ + Condition: "is something", + }) + c.Assert(err, gc.IsNil) + + // Serialize the macaroon slice. + data, err := json.Marshal(macaroon.Slice{serverMacaroon}) + c.Assert(err, gc.IsNil) + value := base64.StdEncoding.EncodeToString(data) + + // Create a client request. + req, err := http.NewRequest("GET", ts.URL, nil) + c.Assert(err, gc.IsNil) + req.Header.Set(httpbakery.MacaroonsHeader, value) + client := httpbakery.NewHTTPClient() + + // Make the request to the server. + resp, err := client.Do(req) + c.Assert(err, gc.IsNil) + defer resp.Body.Close() + assertResponse(c, resp, "done") +} + +func (s *ClientSuite) TestRepeatedRequestWithBody(c *gc.C) { + d := bakerytest.NewDischarger(nil, noCaveatChecker) + defer d.Close() + + // Create a target service. + svc := newService("loc", d) + + ts := httptest.NewServer(serverHandler(serverHandlerParams{ + service: svc, + authLocation: d.Location(), + })) + defer ts.Close() + + // Create a client request. + req, err := http.NewRequest("POST", ts.URL, nil) + c.Assert(err, gc.IsNil) + + // Make the request to the server. + + // First try with a body in the request, which should be denied + // because we must use DoWithBody. + req.Body = ioutil.NopCloser(strings.NewReader("postbody")) + resp, err := httpbakery.NewClient().Do(req) + c.Assert(err, gc.ErrorMatches, "body unexpectedly provided in request - use DoWithBody") + c.Assert(resp, gc.IsNil) + + // Then try with no authorization, so make sure that httpbakery.Do + // really will retry the request. + + req.Body = nil + + bodyText := "postbody" + bodyReader := &readCounter{ReadSeeker: strings.NewReader(bodyText)} + + resp, err = httpbakery.NewClient().DoWithBody(req, bodyReader) + c.Assert(err, gc.IsNil) + defer resp.Body.Close() + assertResponse(c, resp, "done postbody") + + // Sanity check that the body really was read twice and hence + // that we are checking the logic we intend to check. + c.Assert(bodyReader.byteCount, gc.Equals, len(bodyText)*2) +} + +func (s ClientSuite) TestWithLargeBody(c *gc.C) { + // This test is designed to fail when run with the race + // checker enabled and when go issue #12796 + // is not fixed. + + d := bakerytest.NewDischarger(nil, noCaveatChecker) + defer d.Close() + + // Create a target service. + svc := newService("loc", d) + + ts := httptest.NewServer(serverHandler(serverHandlerParams{ + service: svc, + authLocation: d.Location(), + })) + defer ts.Close() + + // Create a client request. + req, err := http.NewRequest("POST", ts.URL+"/no-body", nil) + c.Assert(err, gc.IsNil) + + resp, err := httpbakery.NewClient().DoWithBody(req, &largeReader{total: 3 * 1024 * 1024}) + c.Assert(err, gc.IsNil) + c.Assert(resp.StatusCode, gc.Equals, http.StatusOK) +} + +// largeReader implements a reader that produces up to total bytes +// in 1 byte reads. +type largeReader struct { + total int + n int +} + +func (r *largeReader) Read(buf []byte) (int, error) { + if r.n >= r.total { + return 0, io.EOF + } + r.n++ + return copy(buf, []byte("a")), nil +} + +func (r *largeReader) Seek(offset int64, whence int) (int64, error) { + if offset != 0 || whence != 0 { + panic("unexpected seek") + } + r.n = 0 + return 0, nil +} + +func (s *ClientSuite) TestDoWithBodyFailsWithBodyInRequest(c *gc.C) { + body := strings.NewReader("foo") + // Create a client request. + req, err := http.NewRequest("POST", "http://0.1.2.3/", body) + c.Assert(err, gc.IsNil) + _, err = httpbakery.NewClient().DoWithBody(req, body) + c.Assert(err, gc.ErrorMatches, "body unexpectedly supplied in Request struct") +} + +func (s *ClientSuite) TestDischargeServerWithMacaraqOnDischarge(c *gc.C) { + locator := bakery.NewPublicKeyRing() + + var called [3]int + + // create the services from leaf discharger to primary + // service so that each one can know the location + // to discharge at. + key2, h2 := newHTTPDischarger(locator, func(svc *bakery.Service, req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { + called[2]++ + if cav != "is-ok" { + return nil, fmt.Errorf("unrecognized caveat at srv2") + } + return nil, nil + }) + srv2 := httptest.NewServer(h2) + locator.AddPublicKeyForLocation(srv2.URL, true, key2) + + key1, h1 := newHTTPDischarger(locator, func(svc *bakery.Service, req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { + called[1]++ + if _, err := httpbakery.CheckRequest(svc, req, nil, checkers.New()); err != nil { + return nil, newDischargeRequiredError(serverHandlerParams{ + service: svc, + authLocation: srv2.URL, + }, err, req) + } + if cav != "is-ok" { + return nil, fmt.Errorf("unrecognized caveat at srv1") + } + return nil, nil + }) + srv1 := httptest.NewServer(h1) + locator.AddPublicKeyForLocation(srv1.URL, true, key1) + + svc0 := newService("loc", locator) + srv0 := httptest.NewServer(serverHandler(serverHandlerParams{ + service: svc0, + authLocation: srv1.URL, + })) + + // Make a client request. + client := httpbakery.NewClient() + req, err := http.NewRequest("GET", srv0.URL, nil) + c.Assert(err, gc.IsNil) + resp, err := client.Do(req) + c.Assert(err, gc.IsNil) + defer resp.Body.Close() + assertResponse(c, resp, "done") + + c.Assert(called, gc.DeepEquals, [3]int{0, 2, 1}) +} + +func (s *ClientSuite) TestVersion0Generates407Status(c *gc.C) { + m, err := macaroon.New([]byte("root key"), "id", "location") + c.Assert(err, gc.IsNil) + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + httpbakery.WriteDischargeRequiredErrorForRequest(w, m, "", errgo.New("foo"), req) + })) + defer srv.Close() + resp, err := http.Get(srv.URL) + c.Assert(err, gc.IsNil) + c.Assert(resp.StatusCode, gc.Equals, http.StatusProxyAuthRequired) +} + +func (s *ClientSuite) TestVersion1Generates401Status(c *gc.C) { + m, err := macaroon.New([]byte("root key"), "id", "location") + c.Assert(err, gc.IsNil) + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + httpbakery.WriteDischargeRequiredErrorForRequest(w, m, "", errgo.New("foo"), req) + })) + defer srv.Close() + + req, err := http.NewRequest("GET", srv.URL, nil) + c.Assert(err, gc.IsNil) + req.Header.Set(httpbakery.BakeryProtocolHeader, "1") + resp, err := http.DefaultClient.Do(req) + c.Assert(err, gc.IsNil) + c.Assert(resp.StatusCode, gc.Equals, http.StatusUnauthorized) + c.Assert(resp.Header.Get("WWW-Authenticate"), gc.Equals, "Macaroon") +} + +func newHTTPDischarger(locator bakery.PublicKeyLocator, checker func(svc *bakery.Service, req *http.Request, cavId, cav string) ([]checkers.Caveat, error)) (*bakery.PublicKey, http.Handler) { + svc := newService("loc", locator) + mux := http.NewServeMux() + httpbakery.AddDischargeHandler(mux, "/", svc, func(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) { + return checker(svc, req, cavId, cav) + }) + return svc.PublicKey(), mux +} + +func (s *ClientSuite) TestDischargeAcquirer(c *gc.C) { + rootKey := []byte("secret") + m, err := macaroon.New(rootKey, "", "here") + c.Assert(err, gc.IsNil) + + dischargeRootKey := []byte("shared root key") + thirdPartyCaveatId := "3rd party caveat" + err = m.AddThirdPartyCaveat(dischargeRootKey, thirdPartyCaveatId, "there") + c.Assert(err, gc.IsNil) + + dm, err := macaroon.New(dischargeRootKey, thirdPartyCaveatId, "there") + c.Assert(err, gc.IsNil) + + ta := &testAcquirer{dischargeMacaroon: dm} + cl := httpbakery.NewClient() + cl.DischargeAcquirer = ta + + ms, err := cl.DischargeAll(m) + c.Assert(err, gc.IsNil) + c.Assert(ms, gc.HasLen, 2) + + c.Assert(ta.acquireLocation, gc.Equals, "here") // should be first-party location + c.Assert(ta.acquireCaveat.Id, gc.Equals, thirdPartyCaveatId) + expectCaveat := "must foo" + var lastCaveat string + err = ms[0].Verify(rootKey, func(s string) error { + if s != expectCaveat { + return errgo.Newf(`expected %q, got %q`, expectCaveat, s) + } + lastCaveat = s + return nil + }, ms[1:]) + c.Assert(err, gc.IsNil) + c.Assert(lastCaveat, gc.Equals, expectCaveat) +} + +type testAcquirer struct { + dischargeMacaroon *macaroon.Macaroon + + acquireLocation string + acquireCaveat macaroon.Caveat +} + +// AcquireDischarge implements httpbakery.DischargeAcquirer. +func (ta *testAcquirer) AcquireDischarge(loc string, cav macaroon.Caveat) (*macaroon.Macaroon, error) { + ta.acquireLocation = loc + ta.acquireCaveat = cav + err := ta.dischargeMacaroon.AddFirstPartyCaveat("must foo") + if err != nil { + return nil, err + } + return ta.dischargeMacaroon, nil +} + +// onceOnlyChecker returns a third-party checker that accepts any given +// caveat id once only. +func onceOnlyChecker() func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { + checked := make(map[string]bool) + var mu sync.Mutex + return func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { + mu.Lock() + defer mu.Unlock() + id := cond + " " + arg + if checked[id] { + return nil, errgo.Newf("caveat %q fails second time", id) + } + checked[id] = true + return nil, nil + } +} + +func (s *ClientSuite) TestMacaroonCookieName(c *gc.C) { + d := bakerytest.NewDischarger(nil, noCaveatChecker) + defer d.Close() + + svc := newService("loc", nil) + + // We arrange things so that although we use the same client + // (with the same cookie jar), the macaroon verification only + // succeeds once, so the client always fetches a new macaroon. + + caveatSeq := 0 + checked := make(map[string]bool) + cookieName := "" + ts := httptest.NewServer(serverHandler(serverHandlerParams{ + service: svc, + mutateError: func(e *httpbakery.Error) { + e.Info.CookieNameSuffix = cookieName + e.Info.MacaroonPath = "/" + }, + checker: checkers.CheckerFunc{ + Condition_: "once", + Check_: func(_, arg string) error { + if checked[arg] { + return errgo.Newf("caveat %q has already been checked once", arg) + } + checked[arg] = true + return nil + }, + }, + caveats: func() []checkers.Caveat { + caveatSeq++ + return []checkers.Caveat{{ + Condition: fmt.Sprintf("once %d", caveatSeq), + }} + }, + })) + defer ts.Close() + + client := httpbakery.NewClient() + doRequest := func() { + req, err := http.NewRequest("GET", ts.URL+"/foo/bar/", nil) + c.Assert(err, gc.IsNil) + resp, err := client.Do(req) + c.Assert(err, gc.IsNil) + defer resp.Body.Close() + assertResponse(c, resp, "done") + } + assertCookieNames := func(names ...string) { + u, err := url.Parse(ts.URL) + c.Assert(err, gc.IsNil) + sort.Strings(names) + var gotNames []string + for _, c := range client.Jar.Cookies(u) { + gotNames = append(gotNames, c.Name) + } + sort.Strings(gotNames) + c.Assert(gotNames, jc.DeepEquals, names) + } + cookieName = "foo" + doRequest() + assertCookieNames("macaroon-foo") + + // Another request with the same cookie name should + // overwrite the old cookie. + doRequest() + assertCookieNames("macaroon-foo") + + // A subsequent request with a different cookie name + // should create a new cookie, but the old one will still + // be around. + cookieName = "bar" + doRequest() + assertCookieNames("macaroon-foo", "macaroon-bar") +} + +func (s *ClientSuite) TestMacaroonCookiePath(c *gc.C) { + svc := newService("loc", nil) + + cookiePath := "" + ts := httptest.NewServer(serverHandler(serverHandlerParams{ + service: svc, + mutateError: func(e *httpbakery.Error) { + e.Info.MacaroonPath = cookiePath + }, + })) + defer ts.Close() + + var client *httpbakery.Client + doRequest := func() { + req, err := http.NewRequest("GET", ts.URL+"/foo/bar/", nil) + c.Assert(err, gc.IsNil) + client = httpbakery.NewClient() + resp, err := client.Do(req) + c.Assert(err, gc.IsNil) + defer resp.Body.Close() + assertResponse(c, resp, "done") + } + assertCookieCount := func(path string, n int) { + u, err := url.Parse(ts.URL + path) + c.Assert(err, gc.IsNil) + c.Assert(client.Jar.Cookies(u), gc.HasLen, n) + } + cookiePath = "" + c.Logf("- cookie path %q", cookiePath) + doRequest() + assertCookieCount("", 0) + assertCookieCount("/foo", 0) + assertCookieCount("/foo", 0) + assertCookieCount("/foo/", 0) + assertCookieCount("/foo/bar/", 1) + assertCookieCount("/foo/bar/baz", 1) + + cookiePath = "/foo/" + c.Logf("- cookie path %q", cookiePath) + doRequest() + assertCookieCount("", 0) + assertCookieCount("/foo", 0) + assertCookieCount("/foo/", 1) + assertCookieCount("/foo/bar/", 1) + assertCookieCount("/foo/bar/baz", 1) + + cookiePath = "/foo" + c.Logf("- cookie path %q", cookiePath) + doRequest() + assertCookieCount("", 0) + assertCookieCount("/bar", 0) + assertCookieCount("/foo", 1) + assertCookieCount("/foo/", 1) + assertCookieCount("/foo/bar/", 1) + assertCookieCount("/foo/bar/baz", 1) + + cookiePath = "../" + c.Logf("- cookie path %q", cookiePath) + doRequest() + assertCookieCount("", 0) + assertCookieCount("/bar", 0) + assertCookieCount("/foo", 0) + assertCookieCount("/foo/", 1) + assertCookieCount("/foo/bar/", 1) + assertCookieCount("/foo/bar/baz", 1) + + cookiePath = "../bar" + c.Logf("- cookie path %q", cookiePath) + doRequest() + assertCookieCount("", 0) + assertCookieCount("/bar", 0) + assertCookieCount("/foo", 0) + assertCookieCount("/foo/", 0) + assertCookieCount("/foo/bar/", 1) + assertCookieCount("/foo/bar/baz", 1) + assertCookieCount("/foo/baz", 0) + assertCookieCount("/foo/baz/", 0) + assertCookieCount("/foo/baz/bar", 0) + + cookiePath = "/" + c.Logf("- cookie path %q", cookiePath) + doRequest() + assertCookieCount("", 1) + assertCookieCount("/bar", 1) + assertCookieCount("/foo", 1) + assertCookieCount("/foo/", 1) + assertCookieCount("/foo/bar/", 1) + assertCookieCount("/foo/bar/baz", 1) +} + +func (s *ClientSuite) TestThirdPartyDischargeRefused(c *gc.C) { + d := bakerytest.NewDischarger(nil, func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { + return nil, errgo.New("boo! cond " + cond) + }) + defer d.Close() + + // Create a target service. + svc := newService("loc", d) + + ts := httptest.NewServer(serverHandler(serverHandlerParams{ + service: svc, + authLocation: d.Location(), + })) + defer ts.Close() + + // Create a client request. + req, err := http.NewRequest("GET", ts.URL, nil) + c.Assert(err, gc.IsNil) + + client := httpbakery.NewClient() + + // Make the request to the server. + resp, err := client.Do(req) + c.Assert(errgo.Cause(err), gc.FitsTypeOf, (*httpbakery.DischargeError)(nil)) + c.Assert(err, gc.ErrorMatches, `cannot get discharge from ".*": third party refused discharge: cannot discharge: boo! cond is-ok`) + c.Assert(resp, gc.IsNil) +} + +func (s *ClientSuite) TestDischargeWithInteractionRequiredError(c *gc.C) { + d := bakerytest.NewDischarger(nil, func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { + return nil, &httpbakery.Error{ + Code: httpbakery.ErrInteractionRequired, + Message: "interaction required", + Info: &httpbakery.ErrorInfo{ + VisitURL: "http://0.1.2.3/", + WaitURL: "http://0.1.2.3/", + }, + } + }) + defer d.Close() + + // Create a target service. + svc := newService("loc", d) + + ts := httptest.NewServer(serverHandler(serverHandlerParams{ + service: svc, + authLocation: d.Location(), + })) + defer ts.Close() + + // Create a client request. + req, err := http.NewRequest("GET", ts.URL, nil) + c.Assert(err, gc.IsNil) + + errCannotVisit := errgo.New("cannot visit") + client := httpbakery.NewClient() + client.VisitWebPage = func(*url.URL) error { + return errCannotVisit + } + + // Make the request to the server. + resp, err := client.Do(req) + c.Assert(err, gc.ErrorMatches, `cannot get discharge from "https://.*": cannot start interactive session: cannot visit`) + c.Assert(httpbakery.IsInteractionError(errgo.Cause(err)), gc.Equals, true) + ierr, ok := errgo.Cause(err).(*httpbakery.InteractionError) + c.Assert(ok, gc.Equals, true) + c.Assert(ierr.Reason, gc.Equals, errCannotVisit) + c.Assert(resp, gc.IsNil) +} + +var dischargeWithVisitURLErrorTests = []struct { + about string + respond func(http.ResponseWriter) + expectError string +}{{ + about: "error message", + respond: func(w http.ResponseWriter) { + httprequest.ErrorMapper(httpbakery.ErrorToResponse).WriteError(w, fmt.Errorf("an error")) + }, + expectError: `cannot get discharge from ".*": failed to acquire macaroon after waiting: third party refused discharge: an error`, +}, { + about: "non-JSON error", + respond: func(w http.ResponseWriter) { + w.Write([]byte("bad response")) + }, + // TODO fix this unhelpful error message + expectError: `cannot get discharge from ".*": cannot unmarshal wait response: invalid character 'b' looking for beginning of value`, +}} + +func (s *ClientSuite) TestDischargeWithVisitURLError(c *gc.C) { + visitor := newVisitHandler(nil) + visitSrv := httptest.NewServer(visitor) + defer visitSrv.Close() + + d := bakerytest.NewDischarger(nil, func(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { + return nil, &httpbakery.Error{ + Code: httpbakery.ErrInteractionRequired, + Message: "interaction required", + Info: &httpbakery.ErrorInfo{ + VisitURL: visitSrv.URL + "/visit", + WaitURL: visitSrv.URL + "/wait", + }, + } + }) + defer d.Close() + + // Create a target service. + svc := newService("loc", d) + ts := httptest.NewServer(serverHandler(serverHandlerParams{ + service: svc, + authLocation: d.Location(), + })) + defer ts.Close() + + for i, test := range dischargeWithVisitURLErrorTests { + c.Logf("test %d: %s", i, test.about) + visitor.respond = test.respond + + client := httpbakery.NewClient() + client.VisitWebPage = func(u *url.URL) error { + resp, err := http.Get(u.String()) + if err != nil { + return err + } + resp.Body.Close() + return nil + } + + // Create a client request. + req, err := http.NewRequest("GET", ts.URL, nil) + c.Assert(err, gc.IsNil) + + // Make the request to the server. + _, err = client.Do(req) + c.Assert(err, gc.ErrorMatches, test.expectError) + } +} + +func (s *ClientSuite) TestMacaroonsForURL(c *gc.C) { + // Create a target service. + svc := newService("loc", nil) + + m1, err := svc.NewMacaroon("id1", []byte("key1"), nil) + c.Assert(err, gc.IsNil) + m2, err := svc.NewMacaroon("id2", []byte("key2"), nil) + c.Assert(err, gc.IsNil) + + u1 := mustParseURL("http://0.1.2.3/") + u2 := mustParseURL("http://0.1.2.3/x/") + + // Create some cookies with different cookie paths. + jar, err := cookiejar.New(nil) + c.Assert(err, gc.IsNil) + httpbakery.SetCookie(jar, u1, macaroon.Slice{m1}) + httpbakery.SetCookie(jar, u2, macaroon.Slice{m2}) + jar.SetCookies(u1, []*http.Cookie{{ + Name: "foo", + Path: "/", + Value: "ignored", + }, { + Name: "bar", + Path: "/x/", + Value: "ignored", + }}) + + // Check that MacaroonsForURL behaves correctly + // with both single and multiple cookies. + + mss := httpbakery.MacaroonsForURL(jar, u1) + c.Assert(mss, gc.HasLen, 1) + c.Assert(mss[0], gc.HasLen, 1) + c.Assert(mss[0][0].Id(), gc.Equals, "id1") + + mss = httpbakery.MacaroonsForURL(jar, u2) + + checked := make(map[string]int) + for _, ms := range mss { + checked[ms[0].Id()]++ + err := svc.Check(ms, checkers.New()) + c.Assert(err, gc.IsNil) + } + c.Assert(checked, jc.DeepEquals, map[string]int{ + "id1": 1, + "id2": 1, + }) +} + +func (s *ClientSuite) TestDoWithBodyAndCustomError(c *gc.C) { + d := bakerytest.NewDischarger(nil, noCaveatChecker) + defer d.Close() + + // Create a target service. + svc := newService("loc", d) + + type customError struct { + CustomError *httpbakery.Error + } + callCount := 0 + handler := func(w http.ResponseWriter, req *http.Request) { + callCount++ + if _, checkErr := httpbakery.CheckRequest(svc, req, nil, checkers.New()); checkErr != nil { + httprequest.WriteJSON(w, http.StatusTeapot, customError{ + CustomError: newDischargeRequiredError(serverHandlerParams{ + service: svc, + authLocation: d.Location(), + }, checkErr, req).(*httpbakery.Error), + }) + return + } + fmt.Fprintf(w, "hello there") + } + srv := httptest.NewServer(http.HandlerFunc(handler)) + defer srv.Close() + + req, err := http.NewRequest("GET", srv.URL, nil) + c.Assert(err, gc.IsNil) + + // First check that a normal request fails. + resp, err := httpbakery.NewClient().Do(req) + c.Assert(err, gc.IsNil) + defer resp.Body.Close() + c.Assert(resp.StatusCode, gc.Equals, http.StatusTeapot) + c.Assert(callCount, gc.Equals, 1) + callCount = 0 + + // Then check that a request with a custom error getter succeeds. + errorGetter := func(resp *http.Response) error { + if resp.StatusCode != http.StatusTeapot { + return nil + } + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + panic(err) + } + var respErr customError + if err := json.Unmarshal(data, &respErr); err != nil { + panic(err) + } + return respErr.CustomError + } + + resp, err = httpbakery.NewClient().DoWithBodyAndCustomError(req, nil, errorGetter) + c.Assert(err, gc.IsNil) + + data, err := ioutil.ReadAll(resp.Body) + c.Assert(err, gc.IsNil) + c.Assert(string(data), gc.Equals, "hello there") + c.Assert(callCount, gc.Equals, 2) +} + +func (s *ClientSuite) TestHandleError(c *gc.C) { + d := bakerytest.NewDischarger(nil, noCaveatChecker) + defer d.Close() + + // Create a target service. + svc := newService("loc", d) + + srv := httptest.NewServer(serverHandler(serverHandlerParams{ + service: svc, + authLocation: "unknown", + mutateError: nil, + })) + defer srv.Close() + + m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ + Location: d.Location(), + Condition: "something", + }}) + c.Assert(err, gc.IsNil) + + u, err := url.Parse(srv.URL + "/bar") + c.Assert(err, gc.IsNil) + + respErr := &httpbakery.Error{ + Message: "an error", + Code: httpbakery.ErrDischargeRequired, + Info: &httpbakery.ErrorInfo{ + Macaroon: m, + MacaroonPath: "/foo", + }, + } + client := httpbakery.NewClient() + err = client.HandleError(u, respErr) + c.Assert(err, gc.Equals, nil) + // No cookies at the original location. + c.Assert(client.Client.Jar.Cookies(u), gc.HasLen, 0) + + u.Path = "/foo" + cookies := client.Client.Jar.Cookies(u) + c.Assert(cookies, gc.HasLen, 1) + + // Check that we can actually make a request + // with the newly acquired macaroon cookies. + + req, err := http.NewRequest("GET", srv.URL+"/foo", nil) + c.Assert(err, gc.IsNil) + + resp, err := client.Do(req) + c.Assert(err, gc.IsNil) + resp.Body.Close() + c.Assert(resp.StatusCode, gc.Equals, http.StatusOK) +} + +func (s *ClientSuite) TestHandleErrorDifferentError(c *gc.C) { + berr := &httpbakery.Error{ + Message: "an error", + Code: "another code", + } + client := httpbakery.NewClient() + err := client.HandleError(&url.URL{}, berr) + c.Assert(err, gc.Equals, berr) +} + +func (s *ClientSuite) TestNewCookieExpires(c *gc.C) { + t := time.Now().Add(24 * time.Hour) + svc := newService("loc", nil) + m, err := svc.NewMacaroon("", nil, []checkers.Caveat{ + checkers.TimeBeforeCaveat(t), + }) + c.Assert(err, gc.IsNil) + cookie, err := httpbakery.NewCookie(macaroon.Slice{m}) + c.Assert(err, gc.IsNil) + c.Assert(cookie.Expires.Equal(t), gc.Equals, true, gc.Commentf("obtained: %s, expected: %s", cookie.Expires, t)) +} + +func mustParseURL(s string) *url.URL { + u, err := url.Parse(s) + if err != nil { + panic(err) + } + return u +} + +type visitHandler struct { + mux *http.ServeMux + rendez chan struct{} + respond func(w http.ResponseWriter) +} + +func newVisitHandler(respond func(http.ResponseWriter)) *visitHandler { + h := &visitHandler{ + rendez: make(chan struct{}, 1), + respond: respond, + mux: http.NewServeMux(), + } + h.mux.HandleFunc("/visit", h.serveVisit) + h.mux.HandleFunc("/wait", h.serveWait) + return h +} + +func (h *visitHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + h.mux.ServeHTTP(w, req) +} + +func (h *visitHandler) serveVisit(w http.ResponseWriter, req *http.Request) { + h.rendez <- struct{}{} +} + +func (h *visitHandler) serveWait(w http.ResponseWriter, req *http.Request) { + <-h.rendez + h.respond(w) +} + +// assertResponse asserts that the given response is OK and contains +// the expected body text. +func assertResponse(c *gc.C, resp *http.Response, expectBody string) { + c.Assert(resp.StatusCode, gc.Equals, http.StatusOK) + body, err := ioutil.ReadAll(resp.Body) + c.Assert(err, gc.IsNil) + c.Assert(string(body), gc.DeepEquals, expectBody) +} + +func noVisit(*url.URL) error { + return fmt.Errorf("should not be visiting") +} + +type readCounter struct { + io.ReadSeeker + byteCount int +} + +func (r *readCounter) Read(buf []byte) (int, error) { + n, err := r.ReadSeeker.Read(buf) + r.byteCount += n + return n, err +} + +func newService(location string, locator bakery.PublicKeyLocator) *bakery.Service { + svc, err := bakery.NewService(bakery.NewServiceParams{ + Location: location, + Locator: locator, + }) + if err != nil { + panic(err) + } + return svc +} + +func clientRequestWithCookies(c *gc.C, u string, macaroons macaroon.Slice) *http.Client { + client := httpbakery.NewHTTPClient() + url, err := url.Parse(u) + c.Assert(err, gc.IsNil) + err = httpbakery.SetCookie(client.Jar, url, macaroons) + c.Assert(err, gc.IsNil) + return client +} + +var handleErrors = httprequest.ErrorMapper(httpbakery.ErrorToResponse).HandleErrors + +type serverHandlerParams struct { + // service holds the service that will be used to check incoming + // requests. + service *bakery.Service + + // checker is used to check first party caveats in macaroons. + // If it is nil, isChecker("something") will be used. + checker checkers.Checker + + // authLocation holds the location of any 3rd party authorizer. + // If this is non-empty, a 3rd party caveat will be added + // addressed to this location. + authLocation string + + // When authLocation is non-empty and thirdPartyCondition + // is non-zero, it will be called to determine the condition + // to address to he third party. + thirdPartyCondition func() string + + // mutateError, if non-zero, will be called with any + // discharge-required error before responding + // to the client. + mutateError func(*httpbakery.Error) + + // If caveats is non-nil, it is called to get caveats to + // add to the returned macaroon. + caveats func() []checkers.Caveat +} + +// serverHandler returns an HTTP handler that checks macaroon authorization +// and, if that succeeds, writes the string "done" and echos anything in the +// request body. +// It recognises the single first party caveat "is something". +func serverHandler(hp serverHandlerParams) http.Handler { + if hp.checker == nil { + hp.checker = isChecker("something") + } + h := handleErrors(func(p httprequest.Params) error { + if _, checkErr := httpbakery.CheckRequest(hp.service, p.Request, nil, hp.checker); checkErr != nil { + return newDischargeRequiredError(hp, checkErr, p.Request) + } + fmt.Fprintf(p.Response, "done") + // Special case: the no-body path doesn't return the body. + if p.Request.URL.Path == "/no-body" { + return nil + } + data, err := ioutil.ReadAll(p.Request.Body) + if err != nil { + panic(fmt.Errorf("cannot read body: %v", err)) + } + if len(data) > 0 { + fmt.Fprintf(p.Response, " %s", data) + } + return nil + }) + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + h(w, req, nil) + }) +} + +// newDischargeRequiredError returns a discharge-required error holding +// a newly minted macaroon referencing the original check error +// checkErr. If hp.authLocation is non-empty, the issued macaroon will +// contain an "is-ok" third party caveat addressed to that location. +// +// If req is non-nil, it will be used to pass to NewDischargeRequiredErrorForRequest, +// otherwise the old protocol (triggered by NewDischargeRequiredError) will be used. +func newDischargeRequiredError(hp serverHandlerParams, checkErr error, req *http.Request) error { + var caveats []checkers.Caveat + if hp.authLocation != "" { + caveats = []checkers.Caveat{{ + Location: hp.authLocation, + Condition: "is-ok", + }} + } + if hp.caveats != nil { + caveats = append(caveats, hp.caveats()...) + } + m, err := hp.service.NewMacaroon("", nil, caveats) + if err != nil { + panic(fmt.Errorf("cannot make new macaroon: %v", err)) + } + if req != nil { + err = httpbakery.NewDischargeRequiredErrorForRequest(m, "", checkErr, req) + } else { + err = httpbakery.NewDischargeRequiredError(m, "", checkErr) + } + if hp.mutateError != nil { + hp.mutateError(err.(*httpbakery.Error)) + } + return err +} + +type isChecker string + +func (isChecker) Condition() string { + return "is" +} + +func (c isChecker) Check(_, arg string) error { + if arg != string(c) { + return fmt.Errorf("%v doesn't match %s", arg, c) + } + return nil +} + +func noCaveatChecker(_ *http.Request, cond, arg string) ([]checkers.Caveat, error) { + return nil, nil +} === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/discharge.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/discharge.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/discharge.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,133 @@ +package httpbakery + +import ( + "crypto/rand" + "fmt" + "net/http" + "path" + + "github.com/juju/httprequest" + "github.com/julienschmidt/httprouter" + "gopkg.in/errgo.v1" + "gopkg.in/macaroon.v1" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" +) + +type dischargeHandler struct { + svc *bakery.Service + checker func(req *http.Request, cavId, cav string) ([]checkers.Caveat, error) +} + +// AddDischargeHandler adds handlers to the given +// ServeMux to serve third party caveat discharges +// using the given service. +// +// The handlers are added under the given rootPath, +// which must be non-empty. +// +// The check function is used to check whether a client making the given +// request should be allowed a discharge for the given caveat. If it +// does not return an error, the caveat will be discharged, with any +// returned caveats also added to the discharge macaroon. +// If it returns an error with a *Error cause, the error will be marshaled +// and sent back to the client. +// +// The name space served by DischargeHandler is as follows. +// All parameters can be provided either as URL attributes +// or form attributes. The result is always formatted as a JSON +// object. +// +// On failure, all endpoints return an error described by +// the Error type. +// +// POST /discharge +// params: +// id: id of macaroon to discharge +// location: location of original macaroon (optional (?)) +// ?? flow=redirect|newwindow +// result on success (http.StatusOK): +// { +// Macaroon *macaroon.Macaroon +// } +// +// GET /publickey +// result: +// public key of service +// expiry time of key +func AddDischargeHandler(mux *http.ServeMux, rootPath string, svc *bakery.Service, checker func(req *http.Request, cavId, cav string) ([]checkers.Caveat, error)) { + d := &dischargeHandler{ + svc: svc, + checker: checker, + } + mux.Handle(path.Join(rootPath, "discharge"), mkHandler(handleJSON(d.serveDischarge))) + // TODO(rog) is there a case for making public key caveat signing + // optional? + mux.Handle(path.Join(rootPath, "publickey"), mkHandler(handleJSON(d.servePublicKey))) +} + +type dischargeResponse struct { + Macaroon *macaroon.Macaroon `json:",omitempty"` +} + +func (d *dischargeHandler) serveDischarge(p httprequest.Params) (interface{}, error) { + r, err := d.serveDischarge1(p) + if err != nil { + logger.Debugf("serveDischarge -> error %#v", err) + } else { + logger.Debugf("serveDischarge -> %#v", r) + } + return r, err +} + +func (d *dischargeHandler) serveDischarge1(p httprequest.Params) (interface{}, error) { + logger.Debugf("dischargeHandler.serveDischarge {") + defer logger.Debugf("}") + if p.Request.Method != "POST" { + // TODO http.StatusMethodNotAllowed) + return nil, badRequestErrorf("method not allowed") + } + p.Request.ParseForm() + id := p.Request.Form.Get("id") + if id == "" { + return nil, badRequestErrorf("id attribute is empty") + } + checker := func(cavId, cav string) ([]checkers.Caveat, error) { + return d.checker(p.Request, cavId, cav) + } + + // TODO(rog) pass location into discharge + // location := p.Request.Form.Get("location") + + var resp dischargeResponse + m, err := d.svc.Discharge(bakery.ThirdPartyCheckerFunc(checker), id) + if err != nil { + return nil, errgo.NoteMask(err, "cannot discharge", errgo.Any) + } + resp.Macaroon = m + return &resp, nil +} + +type publicKeyResponse struct { + PublicKey *bakery.PublicKey +} + +func (d *dischargeHandler) servePublicKey(httprequest.Params) (interface{}, error) { + return publicKeyResponse{d.svc.PublicKey()}, nil +} + +func randomBytes(n int) ([]byte, error) { + b := make([]byte, n) + _, err := rand.Read(b) + if err != nil { + return nil, fmt.Errorf("cannot generate %d random bytes: %v", n, err) + } + return b, nil +} + +func mkHandler(h httprouter.Handle) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + h(w, req, nil) + }) +} === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/error.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/error.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/error.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,293 @@ +package httpbakery + +import ( + "net/http" + "strconv" + + "github.com/juju/httprequest" + "gopkg.in/errgo.v1" + "gopkg.in/macaroon.v1" +) + +// ErrorCode holds an error code that classifies +// an error returned from a bakery HTTP handler. +type ErrorCode string + +func (e ErrorCode) Error() string { + return string(e) +} + +func (e ErrorCode) ErrorCode() ErrorCode { + return e +} + +const ( + ErrBadRequest = ErrorCode("bad request") + ErrDischargeRequired = ErrorCode("macaroon discharge required") + ErrInteractionRequired = ErrorCode("interaction required") +) + +var ( + errorMapper httprequest.ErrorMapper = ErrorToResponse + handleJSON = errorMapper.HandleJSON + writeError = errorMapper.WriteError +) + +// Error holds the type of a response from an httpbakery HTTP request, +// marshaled as JSON. +// +// Note: Do not construct Error values with ErrDischargeRequired or +// ErrInteractionRequired codes directly - use the +// NewDischargeRequiredErrorForRequest or NewInteractionRequiredError +// functions instead. +type Error struct { + Code ErrorCode `json:",omitempty"` + Message string `json:",omitempty"` + Info *ErrorInfo `json:",omitempty"` + + // version holds the protocol version that was used + // to create the error (see NewDischargeRequiredErrorWithVersion). + version version +} + +// version represents a version of the bakery protocol. It is jused +// to determine the kind of response to send when there is a +// discharge-required error. +type version int + +const ( + version0 version = 0 + version1 version = 1 + latestVersion = version1 +) + +// ErrorInfo holds additional information provided +// by an error. +type ErrorInfo struct { + // Macaroon may hold a macaroon that, when + // discharged, may allow access to a service. + // This field is associated with the ErrDischargeRequired + // error code. + Macaroon *macaroon.Macaroon `json:",omitempty"` + + // MacaroonPath holds the URL path to be associated + // with the macaroon. The macaroon is potentially + // valid for all URLs under the given path. + // If it is empty, the macaroon will be associated with + // the original URL from which the error was returned. + MacaroonPath string `json:",omitempty"` + + // CookieNameSuffix holds the desired cookie name suffix to be + // associated with the macaroon. The actual name used will be + // ("macaroon-" + CookieName). Clients may ignore this field - + // older clients will always use ("macaroon-" + + // macaroon.Signature() in hex). + CookieNameSuffix string `json:",omitempty"` + + // VisitURL and WaitURL are associated with the + // ErrInteractionRequired error code. + + // VisitURL holds a URL that the client should visit + // in a web browser to authenticate themselves. + VisitURL string `json:",omitempty"` + + // WaitURL holds a URL that the client should visit + // to acquire the discharge macaroon. A GET on + // this URL will block until the client has authenticated, + // and then it will return the discharge macaroon. + WaitURL string `json:",omitempty"` +} + +func (e *Error) Error() string { + return e.Message +} + +func (e *Error) ErrorCode() ErrorCode { + return e.Code +} + +// ErrorInfo returns additional information +// about the error. +// TODO return interface{} here? +func (e *Error) ErrorInfo() *ErrorInfo { + return e.Info +} + +// ErrorToResponse returns the HTTP status and an error body to be +// marshaled as JSON for the given error. This allows a third party +// package to integrate bakery errors into their error responses when +// they encounter an error with a *bakery.Error cause. +func ErrorToResponse(err error) (int, interface{}) { + errorBody := errorResponseBody(err) + var body interface{} = errorBody + status := http.StatusInternalServerError + switch errorBody.Code { + case ErrBadRequest: + status = http.StatusBadRequest + case ErrDischargeRequired, ErrInteractionRequired: + switch errorBody.version { + case version0: + status = http.StatusProxyAuthRequired + case version1: + status = http.StatusUnauthorized + body = httprequest.CustomHeader{ + Body: body, + SetHeaderFunc: setAuthenticateHeader, + } + default: + panic("out of range version number") + } + } + return status, body +} + +func setAuthenticateHeader(h http.Header) { + h.Set("WWW-Authenticate", "Macaroon") +} + +type errorInfoer interface { + ErrorInfo() *ErrorInfo +} + +type errorCoder interface { + ErrorCode() ErrorCode +} + +// errorResponse returns an appropriate error +// response for the provided error. +func errorResponseBody(err error) *Error { + var errResp Error + cause := errgo.Cause(err) + if cause, ok := cause.(*Error); ok { + // It's an Error already. Preserve the wrapped + // error message but copy everything else. + errResp = *cause + errResp.Message = err.Error() + return &errResp + } + // It's not an error. Preserve as much info as + // we can find. + errResp.Message = err.Error() + if coder, ok := cause.(errorCoder); ok { + errResp.Code = coder.ErrorCode() + } + if infoer, ok := cause.(errorInfoer); ok { + errResp.Info = infoer.ErrorInfo() + } + return &errResp +} + +func badRequestErrorf(f string, a ...interface{}) error { + return errgo.WithCausef(nil, ErrBadRequest, f, a...) +} + +// WriteDischargeRequiredError creates an error using +// NewDischargeRequiredError and writes it to the given response writer, +// indicating that the client should discharge the macaroon to allow the +// original request to be accepted. +func WriteDischargeRequiredError(w http.ResponseWriter, m *macaroon.Macaroon, path string, originalErr error) { + writeError(w, NewDischargeRequiredError(m, path, originalErr)) +} + +// WriteDischargeRequiredErrorForRequest is like NewDischargeRequiredError +// but uses the given request to determine the protocol version appropriate +// for the client. +// +// This function should always be used in preference to +// WriteDischargeRequiredError, because it enables +// in-browser macaroon discharge. +func WriteDischargeRequiredErrorForRequest(w http.ResponseWriter, m *macaroon.Macaroon, path string, originalErr error, req *http.Request) { + writeError(w, NewDischargeRequiredErrorForRequest(m, path, originalErr, req)) +} + +// NewDischargeRequiredError returns an error of type *Error that +// reports the given original error and includes the given macaroon. +// +// The returned macaroon will be declared as valid for the given URL +// path and may be relative. When the client stores the discharged +// macaroon as a cookie this will be the path associated with the +// cookie. See ErrorInfo.MacaroonPath for more information. +func NewDischargeRequiredError(m *macaroon.Macaroon, path string, originalErr error) error { + return newDischargeRequiredErrorWithVersion(m, path, originalErr, version0) +} + +// NewInteractionRequiredError returns an error of type *Error +// that requests an interaction from the client in response +// to the given request. The originalErr value describes the original +// error - if it is nil, a default message will be provided. +// +// See Error.ErrorInfo for more details of visitURL and waitURL. +// +// This function should be used in preference to creating the Error value +// directly, as it sets the bakery protocol version correctly in the error. +func NewInteractionRequiredError(visitURL, waitURL string, originalErr error, req *http.Request) error { + if originalErr == nil { + originalErr = ErrInteractionRequired + } + return &Error{ + Message: originalErr.Error(), + version: versionFromRequest(req), + Code: ErrInteractionRequired, + Info: &ErrorInfo{ + VisitURL: visitURL, + WaitURL: waitURL, + }, + } +} + +// NewDischargeRequiredErrorForRequest is like NewDischargeRequiredError +// except that it determines the client's bakery protocol version from +// the request and returns an error response appropriate for that. +// +// This function should always be used in preference to +// NewDischargeRequiredError, because it enables in-browser macaroon +// discharge. +// +// To request a particular cookie name: +// +// err := NewDischargeRequiredErrorForRequest(...) +// err.(*httpbakery.Error).Info.CookieNameSuffix = cookieName +func NewDischargeRequiredErrorForRequest(m *macaroon.Macaroon, path string, originalErr error, req *http.Request) error { + v := versionFromRequest(req) + return newDischargeRequiredErrorWithVersion(m, path, originalErr, v) +} + +// newDischargeRequiredErrorWithVersion is the internal version of NewDischargeRequiredErrorForRequest. +func newDischargeRequiredErrorWithVersion(m *macaroon.Macaroon, path string, originalErr error, v version) error { + if originalErr == nil { + originalErr = ErrDischargeRequired + } + return &Error{ + Message: originalErr.Error(), + version: v, + Code: ErrDischargeRequired, + Info: &ErrorInfo{ + Macaroon: m, + MacaroonPath: path, + }, + } +} + +// BakeryProtocolHeader is the header that HTTP clients should set +// to determine the bakery protocol version. If it is 0 or missing, +// a discharge-required error response will be returned with HTTP status 407; +// if it is 1, the response will have status 401 with the WWW-Authenticate +// header set to "Macaroon". +const BakeryProtocolHeader = "Bakery-Protocol-Version" + +// versionFromRequest determines the bakery protocol version from a client +// request. If the protocol cannot be determined, or is invalid, +// the original version of the protocol is used. +func versionFromRequest(req *http.Request) version { + vs := req.Header.Get(BakeryProtocolHeader) + if vs == "" { + // No header - use backward compatibility mode. + return version0 + } + v, err := strconv.Atoi(vs) + if err != nil || version(v) < 0 || version(v) > latestVersion { + // Badly formed header - use backward compatibility mode. + return version0 + } + return version(v) +} === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/error_test.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/error_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/error_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,122 @@ +package httpbakery_test + +import ( + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + + "github.com/juju/httprequest" + jc "github.com/juju/testing/checkers" + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/macaroon.v1" + + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +type ErrorSuite struct{} + +var _ = gc.Suite(&ErrorSuite{}) + +func (s *ErrorSuite) TestWriteDischargeRequiredError(c *gc.C) { + m, err := macaroon.New([]byte("secret"), "id", "a location") + c.Assert(err, gc.IsNil) + tests := []struct { + about string + path string + err error + expectedResponse httpbakery.Error + }{{ + about: `write discharge required with "an error" but no path`, + path: "", + err: errors.New("an error"), + expectedResponse: httpbakery.Error{ + Code: httpbakery.ErrDischargeRequired, + Message: "an error", + Info: &httpbakery.ErrorInfo{ + Macaroon: m, + }, + }, + }, { + about: `write discharge required with "an error" but and set a path`, + path: "http://foobar:1234", + err: errors.New("an error"), + expectedResponse: httpbakery.Error{ + Code: httpbakery.ErrDischargeRequired, + Message: "an error", + Info: &httpbakery.ErrorInfo{ + Macaroon: m, + MacaroonPath: "http://foobar:1234", + }, + }, + }, { + about: `write discharge required with nil error but set a path`, + path: "http://foobar:1234", + err: nil, + expectedResponse: httpbakery.Error{ + Code: httpbakery.ErrDischargeRequired, + Message: httpbakery.ErrDischargeRequired.Error(), + Info: &httpbakery.ErrorInfo{ + Macaroon: m, + MacaroonPath: "http://foobar:1234", + }, + }, + }, + } + + for i, t := range tests { + c.Logf("Running test %d %s", i, t.about) + response := httptest.NewRecorder() + httpbakery.WriteDischargeRequiredError(response, m, t.path, t.err) + httptesting.AssertJSONResponse(c, response, http.StatusProxyAuthRequired, t.expectedResponse) + } +} + +func (s *ErrorSuite) TestNewInteractionRequiredError(c *gc.C) { + // With a request with no version header, the response + // should be 407. + req, err := http.NewRequest("GET", "/", nil) + c.Assert(err, gc.IsNil) + + err = httpbakery.NewInteractionRequiredError("/visit", "/wait", nil, req) + code, resp := httpbakery.ErrorToResponse(err) + c.Assert(code, gc.Equals, http.StatusProxyAuthRequired) + + data, err := json.Marshal(resp) + c.Assert(err, gc.IsNil) + + c.Assert(string(data), jc.JSONEquals, &httpbakery.Error{ + Code: httpbakery.ErrInteractionRequired, + Message: httpbakery.ErrInteractionRequired.Error(), + Info: &httpbakery.ErrorInfo{ + VisitURL: "/visit", + WaitURL: "/wait", + }, + }) + + // With a request with a version 1 header, the response + // should be 401. + req.Header.Set("Bakery-Protocol-Version", "1") + + err = httpbakery.NewInteractionRequiredError("/visit", "/wait", nil, req) + code, resp = httpbakery.ErrorToResponse(err) + c.Assert(code, gc.Equals, http.StatusUnauthorized) + + h := make(http.Header) + resp.(httprequest.HeaderSetter).SetHeader(h) + c.Assert(h.Get("WWW-Authenticate"), gc.Equals, "Macaroon") + + data, err = json.Marshal(resp) + c.Assert(err, gc.IsNil) + + c.Assert(string(data), jc.JSONEquals, &httpbakery.Error{ + Code: httpbakery.ErrInteractionRequired, + Message: httpbakery.ErrInteractionRequired.Error(), + Info: &httpbakery.ErrorInfo{ + VisitURL: "/visit", + WaitURL: "/wait", + }, + }) + +} === added directory 'src/gopkg.in/macaroon-bakery.v1/httpbakery/form' === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/form/form.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/form/form.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/form/form.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,177 @@ +// Package form enables interactive login without using a web browser. +package form + +import ( + "net/http" + "net/url" + + "github.com/juju/httprequest" + "golang.org/x/net/publicsuffix" + "gopkg.in/errgo.v1" + "gopkg.in/juju/environschema.v1" + "gopkg.in/juju/environschema.v1/form" + + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +/* +PROTOCOL + +A form login works as follows: + + Client Login Service + | | + | GET visitURL with | + | "Accept: application/json" | + |----------------------------------->| + | | + | Login Methods (including "form") | + |<-----------------------------------| + | | + | GET "form" URL | + |----------------------------------->| + | | + | Schema definition | + |<-----------------------------------| + | | + +-------------+ | + | Client | | + | Interaction | | + +-------------+ | + | | + | POST data to "form" URL | + |----------------------------------->| + | | + | Form login response | + |<-----------------------------------| + | | + +The schema is provided as a environschema.Fileds object. It is the +client's responsibility to interpret the schema and present it to the +user. +*/ + +// SetUpAuth configures form authentication on c. The VisitWebPage field +// in c will be set to a function that will attempt form-based +// authentication using f to perform the interaction with the user and +// fall back to using the current value of VisitWebPage if form-based +// authentication is not supported. +func SetUpAuth(c *httpbakery.Client, f form.Filler) { + c.VisitWebPage = VisitWebPage(c, f, c.VisitWebPage) +} + +// VisitWebPage creates a function suitable for use with +// httpbakery.Client.VisitWebPage. The new function downloads the schema +// from the specified server and calls f.Fill. The map returned by f.Fill +// should match the schema specified, but this is not verified before +// sending to the server. Any errors returned by f.Fill or fallback will +// not have their cause masked. +// +// If the new function detects that form login is not supported by the +// server and fallback is not nil then fallback will be called to perform +// the visit. +func VisitWebPage(d httprequest.Doer, f form.Filler, fallback func(u *url.URL) error) func(u *url.URL) error { + v := webPageVisitor{ + client: &httprequest.Client{ + Doer: d, + }, + filler: f, + fallback: fallback, + } + return v.visitWebPage +} + +// webPageVisitor contains the state required by visitWebPage. +type webPageVisitor struct { + client *httprequest.Client + filler form.Filler + fallback func(u *url.URL) error +} + +// loginMethods contains the response expected from the login URL. It +// only checks for the "form" method as that is the only one that can be +// handled. +type loginMethods struct { + Form string `json:"form"` +} + +// SchemaRequest is a request for a form schema. +type SchemaRequest struct { + httprequest.Route `httprequest:"GET"` +} + +// SchemaResponse contains the message expected in response to the schema +// request. +type SchemaResponse struct { + Schema environschema.Fields `json:"schema"` +} + +// LoginRequest is a request to perform a login using the provided form. +type LoginRequest struct { + httprequest.Route `httprequest:"POST"` + Body LoginBody `httprequest:",body"` +} + +// LoginBody holds the body of a form login request. +type LoginBody struct { + Form map[string]interface{} `json:"form"` +} + +// visitWebPage performs the actual visit request. It attempts to +// determine that form login is supported and then download the form +// schema. It calls v.handler.Handle using the downloaded schema and then +// submits the returned form. Any error produced by v.handler.Handle will +// not have it's cause masked. +func (v webPageVisitor) visitWebPage(u *url.URL) error { + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return errgo.Notef(err, "cannot create request") + } + req.Header.Set("Accept", "application/json") + var lm loginMethods + if err := v.client.Do(req, nil, &lm); err != nil { + if v.fallback != nil { + if err := v.fallback(u); err != nil { + return errgo.Mask(err, errgo.Any) + } + return nil + } + return errgo.Notef(err, "cannot get login methods") + } + if lm.Form == "" { + if v.fallback != nil { + if err := v.fallback(u); err != nil { + return errgo.Mask(err, errgo.Any) + } + return nil + } + return errgo.Newf("form login not supported") + } + var s SchemaResponse + if err := v.client.CallURL(lm.Form, &SchemaRequest{}, &s); err != nil { + return errgo.Notef(err, "cannot get schema") + } + if len(s.Schema) == 0 { + return errgo.Newf("invalid schema: no fields found") + } + host, err := publicsuffix.EffectiveTLDPlusOne(u.Host) + if err != nil { + host = u.Host + } + form, err := v.filler.Fill(form.Form{ + Title: "Log in to " + host, + Fields: s.Schema, + }) + if err != nil { + return errgo.NoteMask(err, "cannot handle form", errgo.Any) + } + lr := LoginRequest{ + Body: LoginBody{ + Form: form, + }, + } + if err := v.client.CallURL(lm.Form, &lr, nil); err != nil { + return errgo.Notef(err, "cannot submit form") + } + return nil +} === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/form/form_test.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/form/form_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/form/form_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,336 @@ +package form_test + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/juju/httprequest" + "github.com/juju/testing/httptesting" + gc "gopkg.in/check.v1" + "gopkg.in/juju/environschema.v1" + esform "gopkg.in/juju/environschema.v1/form" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakery/checkers" + "gopkg.in/macaroon-bakery.v1/bakerytest" + "gopkg.in/macaroon-bakery.v1/httpbakery" + "gopkg.in/macaroon-bakery.v1/httpbakery/form" +) + +type formSuite struct{} + +var _ = gc.Suite(&formSuite{}) + +var formLoginTests = []struct { + about string + opts dischargeOptions + filler fillerFunc + fallback func(*url.URL) error + expectError string +}{{ + about: "complete login", +}, { + about: "login error", + opts: dischargeOptions{ + loginError: true, + }, + expectError: `cannot get discharge from ".*": cannot start interactive session: cannot get login methods: GET .*: httprequest: test error`, +}, { + about: "login methods not supported", + opts: dischargeOptions{ + ignoreAccept: true, + }, + expectError: `cannot get discharge from ".*": cannot start interactive session: cannot get login methods: GET .*: unexpected content type text/plain; want application/json; content: OK`, +}, { + about: "form login method not supported", + opts: dischargeOptions{ + formUnsupported: true, + }, + expectError: `cannot get discharge from ".*": cannot start interactive session: form login not supported`, +}, { + about: "error getting schema", + opts: dischargeOptions{ + getError: true, + }, + expectError: `cannot get discharge from ".*": cannot start interactive session: cannot get schema: GET .*: httprequest: test error`, +}, { + about: "error submitting form", + opts: dischargeOptions{ + postError: true, + }, + expectError: `cannot get discharge from ".*": cannot start interactive session: cannot submit form: POST .*: httprequest: test error`, +}, { + about: "no schema", + opts: dischargeOptions{ + emptySchema: true, + }, + expectError: `cannot get discharge from ".*": cannot start interactive session: invalid schema: no fields found`, +}, { + about: "filler error", + filler: func(esform.Form) (map[string]interface{}, error) { + return nil, testError + }, + expectError: `cannot get discharge from ".*": cannot start interactive session: cannot handle form: test error`, +}, { + about: "login methods fallback success", + opts: dischargeOptions{ + ignoreAccept: true, + }, + fallback: func(u *url.URL) error { + resp, err := http.Get(u.String() + "&fallback=OK") + if err == nil { + resp.Body.Close() + } + return err + }, +}, { + about: "login methods fallback failure", + opts: dischargeOptions{ + ignoreAccept: true, + }, + fallback: func(u *url.URL) error { + return testError + }, + expectError: `cannot get discharge from ".*": cannot start interactive session: test error`, +}, { + about: "form not suppoorted fallback success", + opts: dischargeOptions{ + formUnsupported: true, + }, + fallback: func(u *url.URL) error { + resp, err := http.Get(u.String() + "&fallback=OK") + if err == nil { + resp.Body.Close() + } + return err + }, +}, { + about: "form not supported fallback failure", + opts: dischargeOptions{ + formUnsupported: true, + }, + fallback: func(u *url.URL) error { + return testError + }, + expectError: `cannot get discharge from ".*": cannot start interactive session: test error`, +}} + +func (s *formSuite) TestFormLogin(c *gc.C) { + d := &formDischarger{} + d.discharger = bakerytest.NewInteractiveDischarger(nil, http.HandlerFunc(d.login)) + defer d.discharger.Close() + d.discharger.Mux.Handle("/form", http.HandlerFunc(d.form)) + svc, err := bakery.NewService(bakery.NewServiceParams{ + Locator: d.discharger, + }) + c.Assert(err, gc.IsNil) + for i, test := range formLoginTests { + c.Logf("%d. %s", i, test.about) + d.dischargeOptions = test.opts + m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ + Location: d.discharger.Location(), + Condition: "test condition", + }}) + c.Assert(err, gc.Equals, nil) + client := httpbakery.NewClient() + h := defaultFiller + if test.filler != nil { + h = test.filler + } + client.VisitWebPage = test.fallback + form.SetUpAuth(client, h) + + ms, err := client.DischargeAll(m) + if test.expectError != "" { + c.Assert(err, gc.ErrorMatches, test.expectError) + continue + } + c.Assert(err, gc.IsNil) + c.Assert(len(ms), gc.Equals, 2) + } +} + +func (s *formSuite) TestFormLoginNewRequestError(c *gc.C) { + client := httpbakery.NewClient() + form.SetUpAuth(client, defaultFiller) + u := url.URL{ + Scheme: ":", + } + err := client.VisitWebPage(&u) + c.Assert(err, gc.ErrorMatches, "cannot create request: parse :://: missing protocol scheme") +} + +var formTitleTests = []struct { + host string + expect string +}{{ + host: "xyz.com", + expect: "Log in to xyz.com", +}, { + host: "abc.xyz.com", + expect: "Log in to xyz.com", +}, { + host: "com", + expect: "Log in to com", +}} + +func (s *formSuite) TestFormTitle(c *gc.C) { + d := &formDischarger{} + d.discharger = bakerytest.NewInteractiveDischarger(nil, http.HandlerFunc(d.login)) + defer d.discharger.Close() + d.discharger.Mux.Handle("/form", http.HandlerFunc(d.form)) + svc, err := bakery.NewService(bakery.NewServiceParams{ + Locator: testLocator{ + loc: d.discharger.Location(), + locator: d.discharger, + }, + }) + c.Assert(err, gc.IsNil) + for i, test := range formTitleTests { + c.Logf("%d. %s", i, test.host) + m, err := svc.NewMacaroon("", nil, []checkers.Caveat{{ + Location: "https://" + test.host, + Condition: "test condition", + }}) + c.Assert(err, gc.Equals, nil) + client := httpbakery.NewClient() + client.Client.Transport = httptesting.URLRewritingTransport{ + MatchPrefix: "https://" + test.host, + Replace: d.discharger.Location(), + RoundTripper: http.DefaultTransport, + } + f := new(titleTestFiller) + form.SetUpAuth(client, f) + + ms, err := client.DischargeAll(m) + c.Assert(err, gc.IsNil) + c.Assert(len(ms), gc.Equals, 2) + c.Assert(f.title, gc.Equals, test.expect) + } +} + +type dischargeOptions struct { + ignoreAccept bool + loginError bool + formUnsupported bool + getError bool + postError bool + emptySchema bool +} + +type formDischarger struct { + discharger *bakerytest.InteractiveDischarger + dischargeOptions +} + +func (d *formDischarger) login(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + if r.Form.Get("fallback") != "" { + d.discharger.FinishInteraction(w, r, nil, nil) + return + } + if d.ignoreAccept { + w.Write([]byte("OK")) + return + } + if r.Header.Get("Accept") != "application/json" { + d.errorf(w, r, "bad accept header %q", r.Header.Get("Accept")) + } + if d.loginError { + httprequest.WriteJSON(w, http.StatusInternalServerError, testError) + d.discharger.FinishInteraction(w, r, nil, testError) + return + } + methods := map[string]string{} + if !d.formUnsupported { + r.ParseForm() + methods["form"] = d.discharger.URL("/form", r) + } + httprequest.WriteJSON(w, http.StatusOK, methods) +} + +func (d *formDischarger) form(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + if r.Form.Get("waitid") == "" { + d.errorf(w, r, "no waitid") + return + } + if r.Method == "GET" { + if d.getError { + httprequest.WriteJSON(w, http.StatusInternalServerError, testError) + d.discharger.FinishInteraction(w, r, nil, testError) + return + } + var sr form.SchemaResponse + if !d.emptySchema { + sr.Schema = environschema.Fields{ + "username": environschema.Attr{ + Type: environschema.Tstring, + }, + "password": environschema.Attr{ + Type: environschema.Tstring, + Secret: true, + }, + } + } + httprequest.WriteJSON(w, http.StatusOK, sr) + return + } + if r.Method != "POST" { + d.errorf(w, r, "bad method %q", r.Method) + return + } + if d.postError { + httprequest.WriteJSON(w, http.StatusInternalServerError, testError) + d.discharger.FinishInteraction(w, r, nil, testError) + return + } + var lr form.LoginRequest + err := httprequest.Unmarshal(httprequest.Params{Request: r}, &lr) + if err != nil { + d.errorf(w, r, "bad login request: %s", err) + return + } + d.discharger.FinishInteraction(w, r, nil, nil) +} + +func (d *formDischarger) errorf(w http.ResponseWriter, r *http.Request, s string, p ...interface{}) { + err := &httpbakery.Error{ + Code: httpbakery.ErrBadRequest, + Message: fmt.Sprintf(s, p...), + } + d.discharger.FinishInteraction(w, r, nil, err) +} + +var testError = &httpbakery.Error{ + Message: "test error", +} + +type fillerFunc func(esform.Form) (map[string]interface{}, error) + +func (f fillerFunc) Fill(form esform.Form) (map[string]interface{}, error) { + return f(form) +} + +var defaultFiller = fillerFunc(func(esform.Form) (map[string]interface{}, error) { + return map[string]interface{}{"test": 1}, nil +}) + +type testLocator struct { + loc string + locator bakery.PublicKeyLocator +} + +func (l testLocator) PublicKeyForLocation(loc string) (*bakery.PublicKey, error) { + return l.locator.PublicKeyForLocation(l.loc) +} + +type titleTestFiller struct { + title string +} + +func (f *titleTestFiller) Fill(form esform.Form) (map[string]interface{}, error) { + f.title = form.Title + return map[string]interface{}{"test": 1}, nil +} === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/form/package_test.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/form/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/form/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +package form_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/keyring.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/keyring.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/keyring.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,105 @@ +package httpbakery + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "gopkg.in/errgo.v1" + + "gopkg.in/macaroon-bakery.v1/bakery" +) + +// NewPublicKeyRing returns a new public keyring that uses +// the given client to find public keys and uses the +// given cache as a backing. If cache is nil, a new +// cache will be created. If client is nil, http.DefaultClient will +// be used. +func NewPublicKeyRing(client *http.Client, cache *bakery.PublicKeyRing) *PublicKeyRing { + if cache == nil { + cache = bakery.NewPublicKeyRing() + } + if client == nil { + client = http.DefaultClient + } + return &PublicKeyRing{ + client: client, + cache: cache, + } +} + +// PublicKeyRing represents a public keyring that can interrogate +// remote services for their public keys. By default it refuses +// to use insecure URLs. +type PublicKeyRing struct { + client *http.Client + allowInsecure bool + cache *bakery.PublicKeyRing +} + +// AllowInsecure allows insecure URLs. This can be useful +// for testing purposes. +func (kr *PublicKeyRing) AllowInsecure() { + kr.allowInsecure = true +} + +// PublicKeyForLocation implements bakery.PublicKeyLocator +// by first looking in the backing cache and, if that fails, +// making an HTTP request to the public key associated +// with the given discharge location. +func (kr *PublicKeyRing) PublicKeyForLocation(loc string) (*bakery.PublicKey, error) { + u, err := url.Parse(loc) + if err != nil { + return nil, errgo.Notef(err, "invalid discharge URL %q", loc) + } + if u.Scheme != "https" && !kr.allowInsecure { + return nil, errgo.Newf("untrusted discharge URL %q", loc) + } + k, err := kr.cache.PublicKeyForLocation(loc) + if err == nil { + return k, nil + } + k, err = PublicKeyForLocation(kr.client, loc) + if err != nil { + return nil, errgo.Mask(err) + } + if err := kr.cache.AddPublicKeyForLocation(loc, true, k); err != nil { + // Cannot happen in practice as it will only fail if + // loc is an invalid URL which we have already checked. + return nil, errgo.Notef(err, "cannot cache discharger URL %q", loc) + } + return k, nil +} + +// PublicKeyForLocation returns the public key from a macaroon +// discharge server running at the given location URL. +// Note that this is insecure if an http: URL scheme is used. +// If client is nil, http.DefaultClient will be used. +func PublicKeyForLocation(client *http.Client, url string) (*bakery.PublicKey, error) { + if client == nil { + client = http.DefaultClient + } + url = strings.TrimSuffix(url, "/") + "/publickey" + resp, err := client.Get(url) + if err != nil { + return nil, errgo.Notef(err, "cannot get public key from %q", url) + } + if resp.StatusCode != http.StatusOK { + return nil, errgo.Newf("cannot get public key from %q: got status %s", url, resp.Status) + } + defer resp.Body.Close() + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errgo.Notef(err, "failed to read response body from %q", url) + } + var pubkey struct { + PublicKey *bakery.PublicKey + } + err = json.Unmarshal(data, &pubkey) + if err != nil { + return nil, errgo.Notef(err, "failed to decode response from %q", url) + } + return pubkey.PublicKey, nil +} === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/keyring_test.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/keyring_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/keyring_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,136 @@ +package httpbakery_test + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + + jujutesting "github.com/juju/testing" + gc "gopkg.in/check.v1" + "gopkg.in/errgo.v1" + + "gopkg.in/macaroon-bakery.v1/bakery" + "gopkg.in/macaroon-bakery.v1/bakerytest" + "gopkg.in/macaroon-bakery.v1/httpbakery" +) + +type KeyringSuite struct { + jujutesting.LoggingSuite +} + +var _ = gc.Suite(&KeyringSuite{}) + +func (s *KeyringSuite) TestCachePrepopulated(c *gc.C) { + cache := bakery.NewPublicKeyRing() + key, err := bakery.GenerateKey() + c.Assert(err, gc.IsNil) + cache.AddPublicKeyForLocation("https://0.1.2.3/", true, &key.Public) + kr := httpbakery.NewPublicKeyRing(nil, cache) + pk, err := kr.PublicKeyForLocation("https://0.1.2.3/") + c.Assert(err, gc.IsNil) + c.Assert(*pk, gc.Equals, key.Public) +} + +func (s *KeyringSuite) TestCacheMiss(c *gc.C) { + d := bakerytest.NewDischarger(nil, nil) + defer d.Close() + kr := httpbakery.NewPublicKeyRing(nil, nil) + + expectPublicKey := d.Service.PublicKey() + pk, err := kr.PublicKeyForLocation(d.Location()) + c.Assert(err, gc.IsNil) + c.Assert(*pk, gc.Equals, *expectPublicKey) + + // Close down the service and make sure that + // the key is cached. + d.Close() + + pk, err = kr.PublicKeyForLocation(d.Location()) + c.Assert(err, gc.IsNil) + c.Assert(*pk, gc.Equals, *expectPublicKey) +} + +func (s *KeyringSuite) TestInsecureURL(c *gc.C) { + // Set up a discharger with an non-HTTPS access point. + d := bakerytest.NewDischarger(nil, nil) + defer d.Close() + httpsDischargeURL, err := url.Parse(d.Location()) + c.Assert(err, gc.IsNil) + + srv := httptest.NewServer(httputil.NewSingleHostReverseProxy(httpsDischargeURL)) + defer srv.Close() + + // Check that we are refused because it's an insecure URL. + kr := httpbakery.NewPublicKeyRing(nil, nil) + pk, err := kr.PublicKeyForLocation(srv.URL) + c.Assert(err, gc.ErrorMatches, `untrusted discharge URL "http://.*"`) + c.Assert(pk, gc.IsNil) + + // Check that it does work when we've enabled AllowInsecure. + kr.AllowInsecure() + pk, err = kr.PublicKeyForLocation(srv.URL) + c.Assert(err, gc.IsNil) + c.Assert(*pk, gc.Equals, *d.Service.PublicKey()) +} + +func (s *KeyringSuite) TestCustomHTTPClient(c *gc.C) { + client := &http.Client{ + Transport: errorTransport{}, + } + kr := httpbakery.NewPublicKeyRing(client, nil) + pk, err := kr.PublicKeyForLocation("https://0.1.2.3/") + c.Assert(err, gc.ErrorMatches, `cannot get public key from "https://0.1.2.3/publickey": Get https://0.1.2.3/publickey: custom round trip error`) + c.Assert(pk, gc.IsNil) +} + +func (s *KeyringSuite) TestPublicKey(c *gc.C) { + d := bakerytest.NewDischarger(nil, noCaveatChecker) + defer d.Close() + client := httpbakery.NewHTTPClient() + publicKey, err := httpbakery.PublicKeyForLocation(client, d.Location()) + c.Assert(err, gc.IsNil) + expectedKey := d.Service.PublicKey() + c.Assert(publicKey, gc.DeepEquals, expectedKey) + + // Check that it works with client==nil. + publicKey, err = httpbakery.PublicKeyForLocation(nil, d.Location()) + c.Assert(err, gc.IsNil) + c.Assert(publicKey, gc.DeepEquals, expectedKey) +} + +func (s *KeyringSuite) TestPublicKeyWrongURL(c *gc.C) { + client := httpbakery.NewHTTPClient() + _, err := httpbakery.PublicKeyForLocation(client, "http://localhost:0") + c.Assert(err, gc.ErrorMatches, + `cannot get public key from "http://localhost:0/publickey": Get http://localhost:0/publickey: dial tcp 127.0.0.1:0: .*connection refused`) +} + +func (s *KeyringSuite) TestPublicKeyReturnsInvalidJSON(c *gc.C) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "BADJSON") + })) + defer ts.Close() + client := httpbakery.NewHTTPClient() + _, err := httpbakery.PublicKeyForLocation(client, ts.URL) + c.Assert(err, gc.ErrorMatches, + fmt.Sprintf(`failed to decode response from "%s/publickey": invalid character 'B' looking for beginning of value`, ts.URL)) +} + +func (s *KeyringSuite) TestPublicKeyReturnsStatusInternalServerError(c *gc.C) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer ts.Close() + client := httpbakery.NewHTTPClient() + _, err := httpbakery.PublicKeyForLocation(client, ts.URL) + c.Assert(err, gc.ErrorMatches, + fmt.Sprintf(`cannot get public key from "%s/publickey": got status 500 Internal Server Error`, ts.URL)) +} + +type errorTransport struct{} + +func (errorTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return nil, errgo.New("custom round trip error") +} === added file 'src/gopkg.in/macaroon-bakery.v1/httpbakery/package_test.go' --- src/gopkg.in/macaroon-bakery.v1/httpbakery/package_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/macaroon-bakery.v1/httpbakery/package_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,11 @@ +package httpbakery_test + +import ( + "testing" + + gc "gopkg.in/check.v1" +) + +func TestPackage(t *testing.T) { + gc.TestingT(t) +} === modified file 'src/gopkg.in/mgo.v2/bson/bson.go' --- src/gopkg.in/mgo.v2/bson/bson.go 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/bson/bson.go 2016-03-22 15:18:22 +0000 @@ -189,15 +189,25 @@ // objectIdCounter is atomically incremented when generating a new ObjectId // using NewObjectId() function. It's used as a counter part of an id. -var objectIdCounter uint32 = 0 +var objectIdCounter uint32 = readRandomUint32() + +// readRandomUint32 returns a random objectIdCounter. +func readRandomUint32() uint32 { + var b [4]byte + _, err := io.ReadFull(rand.Reader, b[:]) + if err != nil { + panic(fmt.Errorf("cannot read random object id: %v", err)) + } + return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)) +} + // machineId stores machine id generated once and used in subsequent calls // to NewObjectId function. var machineId = readMachineId() -// readMachineId generates machine id and puts it into the machineId global -// variable. If this function fails to get the hostname, it will cause -// a runtime error. +// readMachineId generates and returns a machine id. +// If this function fails to get the hostname it will cause a runtime error. func readMachineId() []byte { var sum [3]byte id := sum[:] @@ -421,7 +431,8 @@ } // Marshal serializes the in value, which may be a map or a struct value. -// In the case of struct values, only exported fields will be serialized. +// In the case of struct values, only exported fields will be serialized, +// and the order of serialized fields will match that of the struct itself. // The lowercased field name is used as the key for each exported field, // but this behavior may be changed using the respective field tag. // The tag may also contain flags to tweak the marshalling behavior for === modified file 'src/gopkg.in/mgo.v2/bson/bson_test.go' --- src/gopkg.in/mgo.v2/bson/bson_test.go 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/bson/bson_test.go 2016-03-22 15:18:22 +0000 @@ -29,15 +29,18 @@ import ( "encoding/binary" + "encoding/hex" "encoding/json" "errors" "net/url" "reflect" + "strings" "testing" "time" . "gopkg.in/check.v1" "gopkg.in/mgo.v2/bson" + "gopkg.in/yaml.v2" ) func TestAll(t *testing.T) { @@ -582,8 +585,12 @@ "Can't marshal complex128 in a BSON document"}, {&structWithDupKeys{}, "Duplicated key 'name' in struct bson_test.structWithDupKeys"}, - {bson.Raw{0x0A, []byte{}}, - "Attempted to unmarshal Raw kind 10 as a document"}, + {bson.Raw{0xA, []byte{}}, + "Attempted to marshal Raw kind 10 as a document"}, + {bson.Raw{0x3, []byte{}}, + "Attempted to marshal empty Raw document"}, + {bson.M{"w": bson.Raw{0x3, []byte{}}}, + "Attempted to marshal empty Raw document"}, {&inlineCantPtr{&struct{ A, B int }{1, 2}}, "Option ,inline needs a struct value or map field"}, {&inlineDupName{1, struct{ A, B int }{2, 3}}, @@ -635,6 +642,10 @@ {123, "\x10name\x00\x08\x00\x00\x00", "Unmarshal needs a map or a pointer to a struct."}, + + {nil, + "\x08\x62\x00\x02", + "encoded boolean must be 1 or 0, found 2"}, } func (s *S) TestUnmarshalErrorItems(c *C) { @@ -687,7 +698,7 @@ } var corruptedData = []string{ - "\x04\x00\x00\x00\x00", // Shorter than minimum + "\x04\x00\x00\x00\x00", // Document shorter than minimum "\x06\x00\x00\x00\x00", // Not enough data "\x05\x00\x00", // Broken length "\x05\x00\x00\x00\xff", // Corrupted termination @@ -704,6 +715,15 @@ // String with corrupted end. wrapInDoc("\x02\x00\x03\x00\x00\x00yo\xFF"), + + // String with negative length (issue #116). + "\x0c\x00\x00\x00\x02x\x00\xff\xff\xff\xff\x00", + + // String with zero length (must include trailing '\x00') + "\x0c\x00\x00\x00\x02x\x00\x00\x00\x00\x00\x00", + + // Binary with negative length. + "\r\x00\x00\x00\x05x\x00\xff\xff\xff\xff\x00\x00", } func (s *S) TestUnmarshalMapDocumentTooShort(c *C) { @@ -979,6 +999,9 @@ type condStruct struct { V struct{ A []int } ",omitempty" } +type condRaw struct { + V bson.Raw ",omitempty" +} type shortInt struct { V int64 ",minsize" @@ -1232,6 +1255,9 @@ {&condStruct{struct{ A []int }{[]int{1}}}, bson.M{"v": bson.M{"a": []interface{}{1}}}}, {&condStruct{struct{ A []int }{}}, bson.M{}}, + {&condRaw{bson.Raw{Kind: 0x0A, Data: []byte{}}}, bson.M{"v": nil}}, + {&condRaw{bson.Raw{Kind: 0x00}}, bson.M{}}, + {&namedCondStr{"yo"}, map[string]string{"myv": "yo"}}, {&namedCondStr{}, map[string]string{}}, @@ -1254,6 +1280,9 @@ {&inlineMapInt{A: 1, M: nil}, map[string]int{"a": 1}}, {&inlineMapMyM{A: 1, M: MyM{"b": MyM{"c": 3}}}, map[string]interface{}{"a": 1, "b": map[string]interface{}{"c": 3}}}, + // []byte <=> Binary + {&struct{ B []byte }{[]byte("abc")}, map[string]bson.Binary{"b": bson.Binary{Data: []byte("abc")}}}, + // []byte <=> MyBytes {&struct{ B MyBytes }{[]byte("abc")}, map[string]string{"b": "abc"}}, {&struct{ B MyBytes }{[]byte{}}, map[string]string{"b": ""}}, @@ -1324,6 +1353,9 @@ {&struct { V struct{ v time.Time } ",omitempty" }{}, map[string]interface{}{}}, + + // Attempt to marshal slice into RawD (issue #120). + {bson.M{"x": []int{1, 2, 3}}, &struct{ X bson.RawD }{}}, } func testCrossPair(c *C, dump interface{}, load interface{}) { @@ -1535,6 +1567,62 @@ } } +type specTest struct { + Description string + Documents []struct { + Decoded map[string]interface{} + Encoded string + DecodeOnly bool `yaml:"decodeOnly"` + Error interface{} + } +} + +func (s *S) TestSpecTests(c *C) { + for _, data := range specTests { + var test specTest + err := yaml.Unmarshal([]byte(data), &test) + c.Assert(err, IsNil) + + c.Logf("Running spec test set %q", test.Description) + + for _, doc := range test.Documents { + if doc.Error != nil { + continue + } + c.Logf("Ensuring %q decodes as %v", doc.Encoded, doc.Decoded) + var decoded map[string]interface{} + encoded, err := hex.DecodeString(doc.Encoded) + c.Assert(err, IsNil) + err = bson.Unmarshal(encoded, &decoded) + c.Assert(err, IsNil) + c.Assert(decoded, DeepEquals, doc.Decoded) + } + + for _, doc := range test.Documents { + if doc.DecodeOnly || doc.Error != nil { + continue + } + c.Logf("Ensuring %v encodes as %q", doc.Decoded, doc.Encoded) + encoded, err := bson.Marshal(doc.Decoded) + c.Assert(err, IsNil) + c.Assert(strings.ToUpper(hex.EncodeToString(encoded)), Equals, doc.Encoded) + } + + for _, doc := range test.Documents { + if doc.Error == nil { + continue + } + c.Logf("Ensuring %q errors when decoded: %s", doc.Encoded, doc.Error) + var decoded map[string]interface{} + encoded, err := hex.DecodeString(doc.Encoded) + c.Assert(err, IsNil) + err = bson.Unmarshal(encoded, &decoded) + c.Assert(err, NotNil) + c.Logf("Failed with: %v", err) + } + } +} + // -------------------------------------------------------------------------- // Some simple benchmarks. === modified file 'src/gopkg.in/mgo.v2/bson/decode.go' --- src/gopkg.in/mgo.v2/bson/decode.go 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/bson/decode.go 2016-03-22 15:18:22 +0000 @@ -325,6 +325,10 @@ func (d *decoder) readSliceDoc(t reflect.Type) interface{} { tmp := make([]reflect.Value, 0, 8) elemType := t.Elem() + if elemType == typeRawDocElem { + d.dropElem(0x04) + return reflect.Zero(t).Interface() + } end := int(d.readInt32()) end += d.i - 4 @@ -437,7 +441,7 @@ start := d.i - if kind == '\x03' { + if kind == 0x03 { // Delegate unmarshaling of documents. outt := out.Type() outk := out.Kind() @@ -723,6 +727,12 @@ out.Set(reflect.ValueOf(u).Elem()) return true } + if outt == typeBinary { + if b, ok := in.([]byte); ok { + out.Set(reflect.ValueOf(Binary{Data: b})) + return true + } + } } return false @@ -776,10 +786,14 @@ } func (d *decoder) readBool() bool { - if d.readByte() == 1 { + b := d.readByte() + if b == 0 { + return false + } + if b == 1 { return true } - return false + panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b)) } func (d *decoder) readFloat64() float64 { @@ -816,9 +830,12 @@ } func (d *decoder) readBytes(length int32) []byte { + if length < 0 { + corrupted() + } start := d.i d.i += int(length) - if d.i > len(d.in) { + if d.i < start || d.i > len(d.in) { corrupted() } return d.in[start : start+int(length)] === modified file 'src/gopkg.in/mgo.v2/bson/encode.go' --- src/gopkg.in/mgo.v2/bson/encode.go 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/bson/encode.go 2016-03-22 15:18:22 +0000 @@ -101,7 +101,10 @@ if v.Type() == typeRaw { raw := v.Interface().(Raw) if raw.Kind != 0x03 && raw.Kind != 0x00 { - panic("Attempted to unmarshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document") + panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document") + } + if len(raw.Data) == 0 { + panic("Attempted to marshal empty Raw document") } e.addBytes(raw.Data...) return @@ -389,6 +392,9 @@ if kind == 0x00 { kind = 0x03 } + if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F { + panic("Attempted to marshal empty Raw document") + } e.addElemName(kind, name) e.addBytes(s.Data...) === added directory 'src/gopkg.in/mgo.v2/bson/specdata' === added file 'src/gopkg.in/mgo.v2/bson/specdata/update.sh' --- src/gopkg.in/mgo.v2/bson/specdata/update.sh 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/mgo.v2/bson/specdata/update.sh 2016-03-22 15:18:22 +0000 @@ -0,0 +1,27 @@ +#!/bin/sh + +set -e + +if [ ! -d specifications ]; then + git clone -b bson git@github.com:jyemin/specifications +fi + +TESTFILE="../specdata_test.go" + +cat < $TESTFILE +package bson_test + +var specTests = []string{ +END + +for file in specifications/source/bson/tests/*.yml; do + ( + echo '`' + cat $file + echo -n '`,' + ) >> $TESTFILE +done + +echo '}' >> $TESTFILE + +gofmt -w $TESTFILE === added file 'src/gopkg.in/mgo.v2/bson/specdata_test.go' --- src/gopkg.in/mgo.v2/bson/specdata_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/mgo.v2/bson/specdata_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,241 @@ +package bson_test + +var specTests = []string{ + ` +--- +description: "Array type" +documents: + - + decoded: + a : [] + encoded: 0D000000046100050000000000 + - + decoded: + a: [10] + encoded: 140000000461000C0000001030000A0000000000 + - + # Decode an array that uses an empty string as the key + decodeOnly : true + decoded: + a: [10] + encoded: 130000000461000B00000010000A0000000000 + - + # Decode an array that uses a non-numeric string as the key + decodeOnly : true + decoded: + a: [10] + encoded: 150000000461000D000000106162000A0000000000 + + +`, ` +--- +description: "Boolean type" +documents: + - + encoded: "090000000862000100" + decoded: { "b" : true } + - + encoded: "090000000862000000" + decoded: { "b" : false } + + + `, ` +--- +description: "Corrupted BSON" +documents: + - + encoded: "09000000016600" + error: "truncated double" + - + encoded: "09000000026600" + error: "truncated string" + - + encoded: "09000000036600" + error: "truncated document" + - + encoded: "09000000046600" + error: "truncated array" + - + encoded: "09000000056600" + error: "truncated binary" + - + encoded: "09000000076600" + error: "truncated objectid" + - + encoded: "09000000086600" + error: "truncated boolean" + - + encoded: "09000000096600" + error: "truncated date" + - + encoded: "090000000b6600" + error: "truncated regex" + - + encoded: "090000000c6600" + error: "truncated db pointer" + - + encoded: "0C0000000d6600" + error: "truncated javascript" + - + encoded: "0C0000000e6600" + error: "truncated symbol" + - + encoded: "0C0000000f6600" + error: "truncated javascript with scope" + - + encoded: "0C000000106600" + error: "truncated int32" + - + encoded: "0C000000116600" + error: "truncated timestamp" + - + encoded: "0C000000126600" + error: "truncated int64" + - + encoded: "0400000000" + error: basic + - + encoded: "0500000001" + error: basic + - + encoded: "05000000" + error: basic + - + encoded: "0700000002610078563412" + error: basic + - + encoded: "090000001061000500" + error: basic + - + encoded: "00000000000000000000" + error: basic + - + encoded: "1300000002666f6f00040000006261720000" + error: "basic" + - + encoded: "1800000003666f6f000f0000001062617200ffffff7f0000" + error: basic + - + encoded: "1500000003666f6f000c0000000862617200010000" + error: basic + - + encoded: "1c00000003666f6f001200000002626172000500000062617a000000" + error: basic + - + encoded: "1000000002610004000000616263ff00" + error: string is not null-terminated + - + encoded: "0c0000000200000000000000" + error: bad_string_length + - + encoded: "120000000200ffffffff666f6f6261720000" + error: bad_string_length + - + encoded: "0c0000000e00000000000000" + error: bad_string_length + - + encoded: "120000000e00ffffffff666f6f6261720000" + error: bad_string_length + - + encoded: "180000000c00fa5bd841d6585d9900" + error: "" + - + encoded: "1e0000000c00ffffffff666f6f626172005259b56afa5bd841d6585d9900" + error: bad_string_length + - + encoded: "0c0000000d00000000000000" + error: bad_string_length + - + encoded: "0c0000000d00ffffffff0000" + error: bad_string_length + - + encoded: "1c0000000f001500000000000000000c000000020001000000000000" + error: bad_string_length + - + encoded: "1c0000000f0015000000ffffffff000c000000020001000000000000" + error: bad_string_length + - + encoded: "1c0000000f001500000001000000000c000000020000000000000000" + error: bad_string_length + - + encoded: "1c0000000f001500000001000000000c0000000200ffffffff000000" + error: bad_string_length + - + encoded: "0E00000008616263646566676869707172737475" + error: "Run-on CString" + - + encoded: "0100000000" + error: "An object size that's too small to even include the object size, but is correctly encoded, along with a correct EOO (and no data)" + - + encoded: "1a0000000e74657374000c00000068656c6c6f20776f726c6400000500000000" + error: "One object, but with object size listed smaller than it is in the data" + - + encoded: "05000000" + error: "One object, missing the EOO at the end" + - + encoded: "0500000001" + error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x01" + - + encoded: "05000000ff" + error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0xff" + - + encoded: "0500000070" + error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x70" + - + encoded: "07000000000000" + error: "Invalid BSON type low range" + - + encoded: "07000000800000" + error: "Invalid BSON type high range" + - + encoded: "090000000862000200" + error: "Invalid boolean value of 2" + - + encoded: "09000000086200ff00" + error: "Invalid boolean value of -1" + `, ` +--- +description: "Int32 type" +documents: + - + decoded: + i: -2147483648 + encoded: 0C0000001069000000008000 + - + decoded: + i: 2147483647 + encoded: 0C000000106900FFFFFF7F00 + - + decoded: + i: -1 + encoded: 0C000000106900FFFFFFFF00 + - + decoded: + i: 0 + encoded: 0C0000001069000000000000 + - + decoded: + i: 1 + encoded: 0C0000001069000100000000 + +`, ` +--- +description: "String type" +documents: + - + decoded: + s : "" + encoded: 0D000000027300010000000000 + - + decoded: + s: "a" + encoded: 0E00000002730002000000610000 + - + decoded: + s: "This is a string" + encoded: 1D0000000273001100000054686973206973206120737472696E670000 + - + decoded: + s: "κόσμε" + encoded: 180000000273000C000000CEBAE1BDB9CF83CEBCCEB50000 +`} === modified file 'src/gopkg.in/mgo.v2/bulk.go' --- src/gopkg.in/mgo.v2/bulk.go 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/bulk.go 2016-03-22 15:18:22 +0000 @@ -1,10 +1,14 @@ package mgo +import ( + "bytes" + + "gopkg.in/mgo.v2/bson" +) + // Bulk represents an operation that can be prepared with several // orthogonal changes before being delivered to the server. // -// WARNING: This API is still experimental. -// // Relevant documentation: // // http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api @@ -12,19 +16,37 @@ type Bulk struct { c *Collection ordered bool - inserts []interface{} -} + actions []bulkAction +} + +type bulkOp int + +const ( + bulkInsert bulkOp = iota + 1 + bulkUpdate + bulkUpdateAll +) + +type bulkAction struct { + op bulkOp + docs []interface{} +} + +type bulkUpdateOp []interface{} // BulkError holds an error returned from running a Bulk operation. // // TODO: This is private for the moment, until we understand exactly how // to report these multi-errors in a useful and convenient way. type bulkError struct { - err error + errs []error } // BulkResult holds the results for a bulk operation. type BulkResult struct { + Matched int + Modified int // Available only for MongoDB 2.6+ + // Be conservative while we understand exactly how to report these // results in a useful and convenient way, and also how to emulate // them with prior servers. @@ -32,7 +54,29 @@ } func (e *bulkError) Error() string { - return e.err.Error() + if len(e.errs) == 0 { + return "invalid bulkError instance: no errors" + } + if len(e.errs) == 1 { + return e.errs[0].Error() + } + msgs := make(map[string]bool) + for _, err := range e.errs { + msgs[err.Error()] = true + } + if len(msgs) == 1 { + for msg := range msgs { + return msg + } + } + var buf bytes.Buffer + buf.WriteString("multiple errors in bulk operation:\n") + for msg := range msgs { + buf.WriteString(" - ") + buf.WriteString(msg) + buf.WriteByte('\n') + } + return buf.String() } // Bulk returns a value to prepare the execution of a bulk operation. @@ -52,20 +96,163 @@ b.ordered = false } +func (b *Bulk) action(op bulkOp) *bulkAction { + if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op { + return &b.actions[len(b.actions)-1] + } + if !b.ordered { + for i := range b.actions { + if b.actions[i].op == op { + return &b.actions[i] + } + } + } + b.actions = append(b.actions, bulkAction{op: op}) + return &b.actions[len(b.actions)-1] +} + // Insert queues up the provided documents for insertion. func (b *Bulk) Insert(docs ...interface{}) { - b.inserts = append(b.inserts, docs...) + action := b.action(bulkInsert) + action.docs = append(action.docs, docs...) +} + +// Update queues up the provided pairs of updating instructions. +// The first element of each pair selects which documents must be +// updated, and the second element defines how to update it. +// Each pair matches exactly one document for updating at most. +func (b *Bulk) Update(pairs ...interface{}) { + if len(pairs)%2 != 0 { + panic("Bulk.Update requires an even number of parameters") + } + action := b.action(bulkUpdate) + for i := 0; i < len(pairs); i += 2 { + selector := pairs[i] + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &updateOp{ + Collection: b.c.FullName, + Selector: selector, + Update: pairs[i+1], + }) + } +} + +// UpdateAll queues up the provided pairs of updating instructions. +// The first element of each pair selects which documents must be +// updated, and the second element defines how to update it. +// Each pair updates all documents matching the selector. +func (b *Bulk) UpdateAll(pairs ...interface{}) { + if len(pairs)%2 != 0 { + panic("Bulk.UpdateAll requires an even number of parameters") + } + action := b.action(bulkUpdate) + for i := 0; i < len(pairs); i += 2 { + selector := pairs[i] + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &updateOp{ + Collection: b.c.FullName, + Selector: selector, + Update: pairs[i+1], + Flags: 2, + Multi: true, + }) + } +} + +// Upsert queues up the provided pairs of upserting instructions. +// The first element of each pair selects which documents must be +// updated, and the second element defines how to update it. +// Each pair matches exactly one document for updating at most. +func (b *Bulk) Upsert(pairs ...interface{}) { + if len(pairs)%2 != 0 { + panic("Bulk.Update requires an even number of parameters") + } + action := b.action(bulkUpdate) + for i := 0; i < len(pairs); i += 2 { + selector := pairs[i] + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &updateOp{ + Collection: b.c.FullName, + Selector: selector, + Update: pairs[i+1], + Flags: 1, + Upsert: true, + }) + } } // Run runs all the operations queued up. +// +// If an error is reported on an unordered bulk operation, the error value may +// be an aggregation of all issues observed. As an exception to that, Insert +// operations running on MongoDB versions prior to 2.6 will report the last +// error only due to a limitation in the wire protocol. func (b *Bulk) Run() (*BulkResult, error) { - op := &insertOp{b.c.FullName, b.inserts, 0} + var result BulkResult + var berr bulkError + var failed bool + for i := range b.actions { + action := &b.actions[i] + var ok bool + switch action.op { + case bulkInsert: + ok = b.runInsert(action, &result, &berr) + case bulkUpdate: + ok = b.runUpdate(action, &result, &berr) + default: + panic("unknown bulk operation") + } + if !ok { + failed = true + if b.ordered { + break + } + } + } + if failed { + return nil, &berr + } + return &result, nil +} + +func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *bulkError) bool { + op := &insertOp{b.c.FullName, action.docs, 0} if !b.ordered { op.flags = 1 // ContinueOnError } - _, err := b.c.writeQuery(op) - if err != nil { - return nil, &bulkError{err} - } - return &BulkResult{}, nil + lerr, err := b.c.writeOp(op, b.ordered) + return b.checkSuccess(berr, lerr, err) +} + +func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *bulkError) bool { + ok := true + for _, op := range action.docs { + lerr, err := b.c.writeOp(op, b.ordered) + if !b.checkSuccess(berr, lerr, err) { + ok = false + if b.ordered { + break + } + } + result.Matched += lerr.N + result.Modified += lerr.modified + } + return ok +} + +func (b *Bulk) checkSuccess(berr *bulkError, lerr *LastError, err error) bool { + if lerr != nil && len(lerr.errors) > 0 { + berr.errs = append(berr.errs, lerr.errors...) + return false + } else if err != nil { + berr.errs = append(berr.errs, err) + return false + } + return true } === modified file 'src/gopkg.in/mgo.v2/bulk_test.go' --- src/gopkg.in/mgo.v2/bulk_test.go 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/bulk_test.go 2016-03-22 15:18:22 +0000 @@ -1,6 +1,6 @@ // mgo - MongoDB driver for Go // -// Copyright (c) 2010-2014 - Gustavo Niemeyer +// Copyright (c) 2010-2015 - Gustavo Niemeyer // // All rights reserved. // @@ -61,6 +61,7 @@ bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3}) _, err = bulk.Run() c.Assert(err, ErrorMatches, ".*duplicate key.*") + c.Assert(mgo.IsDup(err), Equals, true) type doc struct { N int `_id` @@ -129,3 +130,215 @@ c.Assert(err, IsNil) c.Assert(res.Id, Equals, 1500) } + +func (s *S) TestBulkError(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + // If it's just the same string multiple times, join it into a single message. + bulk := coll.Bulk() + bulk.Unordered() + bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}) + _, err = bulk.Run() + c.Assert(err, ErrorMatches, ".*duplicate key.*") + c.Assert(err, Not(ErrorMatches), ".*duplicate key.*duplicate key") + c.Assert(mgo.IsDup(err), Equals, true) + + // With matching errors but different messages, present them all. + bulk = coll.Bulk() + bulk.Unordered() + bulk.Insert(M{"_id": "dupone"}, M{"_id": "dupone"}, M{"_id": "duptwo"}, M{"_id": "duptwo"}) + _, err = bulk.Run() + if s.versionAtLeast(2, 6) { + c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n( - .*duplicate.*\n){2}$") + c.Assert(err, ErrorMatches, "(?s).*dupone.*") + c.Assert(err, ErrorMatches, "(?s).*duptwo.*") + } else { + // Wire protocol query doesn't return all errors. + c.Assert(err, ErrorMatches, ".*duplicate.*") + } + c.Assert(mgo.IsDup(err), Equals, true) + + // With mixed errors, present them all. + bulk = coll.Bulk() + bulk.Unordered() + bulk.Insert(M{"_id": 1}, M{"_id": []int{2}}) + _, err = bulk.Run() + if s.versionAtLeast(2, 6) { + c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n - .*duplicate.*\n - .*array.*\n$") + } else { + // Wire protocol query doesn't return all errors. + c.Assert(err, ErrorMatches, ".*array.*") + } + c.Assert(mgo.IsDup(err), Equals, false) +} + +func (s *S) TestBulkUpdate(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) + c.Assert(err, IsNil) + + bulk := coll.Bulk() + bulk.Update(M{"n": 1}, M{"$set": M{"n": 1}}) + bulk.Update(M{"n": 2}, M{"$set": M{"n": 20}}) + bulk.Update(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match. + bulk.Update(M{"n": 1}, M{"$set": M{"n": 10}}, M{"n": 3}, M{"$set": M{"n": 30}}) + r, err := bulk.Run() + c.Assert(err, IsNil) + c.Assert(r.Matched, Equals, 4) + if s.versionAtLeast(2, 6) { + c.Assert(r.Modified, Equals, 3) + } + + type doc struct{ N int } + var res []doc + err = coll.Find(nil).Sort("n").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{10}, {20}, {30}}) +} + +func (s *S) TestBulkUpdateError(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) + c.Assert(err, IsNil) + + bulk := coll.Bulk() + bulk.Update( + M{"n": 1}, M{"$set": M{"n": 10}}, + M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}}, + M{"n": 3}, M{"$set": M{"n": 30}}, + ) + r, err := bulk.Run() + c.Assert(err, ErrorMatches, ".*_id.*") + c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) + + type doc struct{ N int } + var res []doc + err = coll.Find(nil).Sort("n").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{2}, {3}, {10}}) +} + +func (s *S) TestBulkUpdateErrorUnordered(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) + c.Assert(err, IsNil) + + bulk := coll.Bulk() + bulk.Unordered() + bulk.Update( + M{"n": 1}, M{"$set": M{"n": 10}}, + M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}}, + M{"n": 3}, M{"$set": M{"n": 30}}, + ) + r, err := bulk.Run() + c.Assert(err, ErrorMatches, ".*_id.*") + c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) + + type doc struct{ N int } + var res []doc + err = coll.Find(nil).Sort("n").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{2}, {10}, {30}}) +} + +func (s *S) TestBulkUpdateAll(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) + c.Assert(err, IsNil) + + bulk := coll.Bulk() + bulk.UpdateAll(M{"n": 1}, M{"$set": M{"n": 10}}) + bulk.UpdateAll(M{"n": 2}, M{"$set": M{"n": 2}}) + bulk.UpdateAll(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match. + bulk.UpdateAll(M{}, M{"$inc": M{"n": 1}}, M{"n": 11}, M{"$set": M{"n": 5}}) + r, err := bulk.Run() + c.Assert(err, IsNil) + c.Assert(r.Matched, Equals, 6) + if s.versionAtLeast(2, 6) { + c.Assert(r.Modified, Equals, 5) + } + + type doc struct{ N int } + var res []doc + err = coll.Find(nil).Sort("n").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{3}, {4}, {5}}) +} + +func (s *S) TestBulkMixedUnordered(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + // Abuse undefined behavior to ensure the desired implementation is in place. + bulk := coll.Bulk() + bulk.Unordered() + bulk.Insert(M{"n": 1}) + bulk.Update(M{"n": 2}, M{"$inc": M{"n": 1}}) + bulk.Insert(M{"n": 2}) + bulk.Update(M{"n": 3}, M{"$inc": M{"n": 1}}) + bulk.Update(M{"n": 1}, M{"$inc": M{"n": 1}}) + bulk.Insert(M{"n": 3}) + r, err := bulk.Run() + c.Assert(err, IsNil) + c.Assert(r.Matched, Equals, 3) + if s.versionAtLeast(2, 6) { + c.Assert(r.Modified, Equals, 3) + } + + type doc struct{ N int } + var res []doc + err = coll.Find(nil).Sort("n").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{2}, {3}, {4}}) +} + +func (s *S) TestBulkUpsert(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}) + c.Assert(err, IsNil) + + bulk := coll.Bulk() + bulk.Upsert(M{"n": 2}, M{"$set": M{"n": 20}}) + bulk.Upsert(M{"n": 4}, M{"$set": M{"n": 40}}, M{"n": 3}, M{"$set": M{"n": 30}}) + r, err := bulk.Run() + c.Assert(err, IsNil) + c.Assert(r, FitsTypeOf, &mgo.BulkResult{}) + + type doc struct{ N int } + var res []doc + err = coll.Find(nil).Sort("n").All(&res) + c.Assert(err, IsNil) + c.Assert(res, DeepEquals, []doc{{1}, {20}, {30}, {40}}) +} === modified file 'src/gopkg.in/mgo.v2/cluster.go' --- src/gopkg.in/mgo.v2/cluster.go 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/cluster.go 2016-03-22 15:18:22 +0000 @@ -30,6 +30,8 @@ "errors" "fmt" "net" + "strconv" + "strings" "sync" "time" @@ -408,14 +410,59 @@ } func resolveAddr(addr string) (*net.TCPAddr, error) { - // This hack allows having a timeout on resolution. - conn, err := net.DialTimeout("udp", addr, 10*time.Second) - if err != nil { + // Simple cases that do not need actual resolution. Works with IPv4 and v6. + if host, port, err := net.SplitHostPort(addr); err == nil { + if port, _ := strconv.Atoi(port); port > 0 { + zone := "" + if i := strings.LastIndex(host, "%"); i >= 0 { + zone = host[i+1:] + host = host[:i] + } + ip := net.ParseIP(host) + if ip != nil { + return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil + } + } + } + + // Attempt to resolve IPv4 and v6 concurrently. + addrChan := make(chan *net.TCPAddr, 2) + for _, network := range []string{"udp4", "udp6"} { + network := network + go func() { + // The unfortunate UDP dialing hack allows having a timeout on address resolution. + conn, err := net.DialTimeout(network, addr, 10*time.Second) + if err != nil { + addrChan <- nil + } else { + addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr)) + conn.Close() + } + }() + } + + // Wait for the result of IPv4 and v6 resolution. Use IPv4 if available. + tcpaddr := <-addrChan + if tcpaddr == nil || len(tcpaddr.IP) != 4 { + var timeout <-chan time.Time + if tcpaddr != nil { + // Don't wait too long if an IPv6 address is known. + timeout = time.After(50 * time.Millisecond) + } + select { + case <-timeout: + case tcpaddr2 := <-addrChan: + if tcpaddr == nil || tcpaddr2 != nil { + // It's an IPv4 address or the only known address. Use it. + tcpaddr = tcpaddr2 + } + } + } + + if tcpaddr == nil { log("SYNC Failed to resolve server address: ", addr) return nil, errors.New("failed to resolve server address: " + addr) } - tcpaddr := (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr)) - conn.Close() if tcpaddr.String() != addr { debug("SYNC Address ", addr, " resolved as ", tcpaddr.String()) } @@ -512,8 +559,8 @@ } cluster.Lock() - ml := cluster.masters.Len() - logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", ml, cluster.servers.Len()-ml) + mastersLen := cluster.masters.Len() + logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", mastersLen, cluster.servers.Len()-mastersLen) // Update dynamic seeds, but only if we have any good servers. Otherwise, // leave them alone for better chances of a successful sync in the future. @@ -531,17 +578,17 @@ // AcquireSocket returns a socket to a server in the cluster. If slaveOk is // true, it will attempt to return a socket to a slave server. If it is // false, the socket will necessarily be to a master server. -func (cluster *mongoCluster) AcquireSocket(slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) { +func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) { var started time.Time var syncCount uint warnedLimit := false for { cluster.RLock() for { - ml := cluster.masters.Len() - sl := cluster.servers.Len() - debugf("Cluster has %d known masters and %d known slaves.", ml, sl-ml) - if ml > 0 || slaveOk && sl > 0 { + mastersLen := cluster.masters.Len() + slavesLen := cluster.servers.Len() - mastersLen + debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen) + if !(slaveOk && mode == Secondary) && mastersLen > 0 || slaveOk && slavesLen > 0 { break } if started.IsZero() { @@ -561,9 +608,9 @@ var server *mongoServer if slaveOk { - server = cluster.servers.BestFit(serverTags) + server = cluster.servers.BestFit(mode, serverTags) } else { - server = cluster.masters.BestFit(nil) + server = cluster.masters.BestFit(mode, nil) } cluster.RUnlock() === modified file 'src/gopkg.in/mgo.v2/cluster_test.go' --- src/gopkg.in/mgo.v2/cluster_test.go 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/cluster_test.go 2016-03-22 15:18:22 +0000 @@ -158,7 +158,7 @@ c.Assert(stats.ReceivedDocs, Equals, 1) } -func (s *S) TestSetModeStrong(c *C) { +func (s *S) TestModeStrong(c *C) { session, err := mgo.Dial("localhost:40012") c.Assert(err, IsNil) defer session.Close() @@ -195,7 +195,7 @@ c.Assert(stats.SocketsInUse, Equals, 0) } -func (s *S) TestSetModeMonotonic(c *C) { +func (s *S) TestModeMonotonic(c *C) { // Must necessarily connect to a slave, otherwise the // master connection will be available first. session, err := mgo.Dial("localhost:40012") @@ -206,20 +206,19 @@ c.Assert(session.Mode(), Equals, mgo.Monotonic) - result := M{} + var result struct{ IsMaster bool } cmd := session.DB("admin").C("$cmd") err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, false) + c.Assert(result.IsMaster, Equals, false) coll := session.DB("mydb").C("mycoll") err = coll.Insert(M{"a": 1}) c.Assert(err, IsNil) - result = M{} err = cmd.Find(M{"ismaster": 1}).One(&result) c.Assert(err, IsNil) - c.Assert(result["ismaster"], Equals, true) + c.Assert(result.IsMaster, Equals, true) // Wait since the sync also uses sockets. for len(session.LiveServers()) != 3 { @@ -238,7 +237,7 @@ c.Assert(stats.SocketsInUse, Equals, 0) } -func (s *S) TestSetModeMonotonicAfterStrong(c *C) { +func (s *S) TestModeMonotonicAfterStrong(c *C) { // Test that a strong session shifting to a monotonic // one preserves the socket untouched. @@ -271,7 +270,7 @@ c.Assert(result["ismaster"], Equals, true) } -func (s *S) TestSetModeStrongAfterMonotonic(c *C) { +func (s *S) TestModeStrongAfterMonotonic(c *C) { // Test that shifting from Monotonic to Strong while // using a slave socket will keep the socket reserved // until the master socket is necessary, so that no @@ -311,7 +310,7 @@ c.Assert(result["ismaster"], Equals, true) } -func (s *S) TestSetModeMonotonicWriteOnIteration(c *C) { +func (s *S) TestModeMonotonicWriteOnIteration(c *C) { // Must necessarily connect to a slave, otherwise the // master connection will be available first. session, err := mgo.Dial("localhost:40012") @@ -356,7 +355,7 @@ c.Assert(i, Equals, len(ns)) } -func (s *S) TestSetModeEventual(c *C) { +func (s *S) TestModeEventual(c *C) { // Must necessarily connect to a slave, otherwise the // master connection will be available first. session, err := mgo.Dial("localhost:40012") @@ -393,7 +392,7 @@ c.Assert(stats.SocketsInUse, Equals, 0) } -func (s *S) TestSetModeEventualAfterStrong(c *C) { +func (s *S) TestModeEventualAfterStrong(c *C) { // Test that a strong session shifting to an eventual // one preserves the socket untouched. @@ -431,7 +430,7 @@ c.Assert(stats.SocketsInUse, Equals, 0) } -func (s *S) TestPrimaryShutdownStrong(c *C) { +func (s *S) TestModeStrongFallover(c *C) { if *fast { c.Skip("-fast") } @@ -472,7 +471,7 @@ c.Assert(err, IsNil) } -func (s *S) TestPrimaryHiccup(c *C) { +func (s *S) TestModePrimaryHiccup(c *C) { if *fast { c.Skip("-fast") } @@ -523,7 +522,7 @@ c.Assert(err, IsNil) } -func (s *S) TestPrimaryShutdownMonotonic(c *C) { +func (s *S) TestModeMonotonicFallover(c *C) { if *fast { c.Skip("-fast") } @@ -566,7 +565,7 @@ c.Assert(result.Host, Not(Equals), host) } -func (s *S) TestPrimaryShutdownMonotonicWithSlave(c *C) { +func (s *S) TestModeMonotonicWithSlaveFallover(c *C) { if *fast { c.Skip("-fast") } @@ -645,7 +644,7 @@ c.Assert(ssresult.Host, Not(Equals), master) } -func (s *S) TestPrimaryShutdownEventual(c *C) { +func (s *S) TestModeEventualFallover(c *C) { if *fast { c.Skip("-fast") } @@ -682,6 +681,192 @@ c.Assert(result.Host, Not(Equals), master) } +func (s *S) TestModeSecondaryJustPrimary(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Secondary, true) + + err = session.Ping() + c.Assert(err, ErrorMatches, "no reachable servers") +} + +func (s *S) TestModeSecondaryPreferredJustPrimary(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.SecondaryPreferred, true) + + result := &struct{ Host string }{} + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) +} + +func (s *S) TestModeSecondaryPreferredFallover(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + // Ensure secondaries are available for being picked up. + for len(session.LiveServers()) != 3 { + c.Log("Waiting for cluster sync to finish...") + time.Sleep(5e8) + } + + session.SetMode(mgo.SecondaryPreferred, true) + + result := &struct{ Host string }{} + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(supvName(result.Host), Not(Equals), "rs1a") + secondary := result.Host + + // Should connect to the primary when needed. + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + // Wait a bit for this to be synchronized to slaves. + time.Sleep(3 * time.Second) + + // Kill the primary. + s.Stop("localhost:40011") + + // It can still talk to the selected secondary. + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(result.Host, Equals, secondary) + + // But cannot speak to the primary until reset. + coll = session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, Equals, io.EOF) + + session.Refresh() + + // Can still talk to a secondary. + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(supvName(result.Host), Not(Equals), "rs1a") + + s.StartAll() + + // Should now be able to talk to the primary again. + coll = session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) +} + +func (s *S) TestModePrimaryPreferredFallover(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.PrimaryPreferred, true) + + result := &struct{ Host string }{} + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(supvName(result.Host), Equals, "rs1a") + + // Kill the primary. + s.Stop("localhost:40011") + + // Should now fail as there was a primary socket in use already. + err = session.Run("serverStatus", result) + c.Assert(err, Equals, io.EOF) + + // Refresh so the reserved primary socket goes away. + session.Refresh() + + // Should be able to talk to the secondary. + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + + s.StartAll() + + // Should wait for the new primary to become available. + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + // And should use the new primary in general, as it is preferred. + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(supvName(result.Host), Equals, "rs1a") +} + +func (s *S) TestModePrimaryFallover(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + session.SetSyncTimeout(3 * time.Second) + + session.SetMode(mgo.Primary, true) + + result := &struct{ Host string }{} + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(supvName(result.Host), Equals, "rs1a") + + // Kill the primary. + s.Stop("localhost:40011") + + session.Refresh() + + err = session.Ping() + c.Assert(err, ErrorMatches, "no reachable servers") +} + +func (s *S) TestModeSecondary(c *C) { + if *fast { + c.Skip("-fast") + } + + session, err := mgo.Dial("localhost:40011") + c.Assert(err, IsNil) + defer session.Close() + + session.SetMode(mgo.Secondary, true) + + result := &struct{ Host string }{} + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(supvName(result.Host), Not(Equals), "rs1a") + secondary := result.Host + + coll := session.DB("mydb").C("mycoll") + err = coll.Insert(M{"a": 1}) + c.Assert(err, IsNil) + + err = session.Run("serverStatus", result) + c.Assert(err, IsNil) + c.Assert(result.Host, Equals, secondary) +} + func (s *S) TestPreserveSocketCountOnSync(c *C) { if *fast { c.Skip("-fast") @@ -1123,10 +1308,8 @@ opc23b, err := getOpCounters("localhost:40023") c.Assert(err, IsNil) - masterPort := master[strings.Index(master, ":")+1:] - var masterDelta, slaveDelta int - switch masterPort { + switch hostPort(master) { case "40021": masterDelta = opc21b.Query - opc21a.Query slaveDelta = (opc22b.Query - opc22a.Query) + (opc23b.Query - opc23a.Query) @@ -1176,10 +1359,23 @@ slaveAddr := result.Me defer func() { + config := map[string]string{ + "40021": `{_id: 1, host: "127.0.0.1:40021", priority: 1, tags: {rs2: "a"}}`, + "40022": `{_id: 2, host: "127.0.0.1:40022", priority: 0, tags: {rs2: "b"}}`, + "40023": `{_id: 3, host: "127.0.0.1:40023", priority: 0, tags: {rs2: "c"}}`, + } master.Refresh() - master.Run(bson.D{{"$eval", `rs.add("` + slaveAddr + `")`}}, nil) + master.Run(bson.D{{"$eval", `rs.add(` + config[hostPort(slaveAddr)] + `)`}}, nil) master.Close() slave.Close() + + // Ensure suite syncs up with the changes before next test. + s.Stop(":40201") + s.StartAll() + time.Sleep(8 * time.Second) + // TODO Find a better way to find out when mongos is fully aware that all + // servers are up. Without that follow up tests that depend on mongos will + // break due to their expectation of things being in a working state. }() c.Logf("========== Removing slave: %s ==========", slaveAddr) @@ -1460,7 +1656,7 @@ } func (s *S) TestNearestSecondary(c *C) { - defer mgo.HackPingDelay(3 * time.Second)() + defer mgo.HackPingDelay(300 * time.Millisecond)() rs1a := "127.0.0.1:40011" rs1b := "127.0.0.1:40012" @@ -1522,6 +1718,68 @@ } } +func (s *S) TestNearestServer(c *C) { + defer mgo.HackPingDelay(300 * time.Millisecond)() + + rs1a := "127.0.0.1:40011" + rs1b := "127.0.0.1:40012" + rs1c := "127.0.0.1:40013" + + session, err := mgo.Dial(rs1a) + c.Assert(err, IsNil) + defer session.Close() + + s.Freeze(rs1a) + s.Freeze(rs1b) + + // Extra delay to ensure the first two servers get penalized. + time.Sleep(500 * time.Millisecond) + + // Release them. + s.Thaw(rs1a) + s.Thaw(rs1b) + + // Wait for everyone to come up. + for len(session.LiveServers()) != 3 { + c.Log("Waiting for all servers to be alive...") + time.Sleep(100 * time.Millisecond) + } + + session.SetMode(mgo.Nearest, true) + var result struct{ Host string } + + // See which server picks the line, several times to avoid chance. + for i := 0; i < 10; i++ { + session.Refresh() + err = session.Run("serverStatus", &result) + c.Assert(err, IsNil) + c.Assert(hostPort(result.Host), Equals, hostPort(rs1c)) + } + + if *fast { + // Don't hold back for several seconds. + return + } + + // Now hold the two secondaries for long enough to penalize them. + s.Freeze(rs1b) + s.Freeze(rs1c) + time.Sleep(5 * time.Second) + s.Thaw(rs1b) + s.Thaw(rs1c) + + // Wait for the ping to be processed. + time.Sleep(500 * time.Millisecond) + + // Repeating the test should now pick the primary server consistently. + for i := 0; i < 10; i++ { + session.Refresh() + err = session.Run("serverStatus", &result) + c.Assert(err, IsNil) + c.Assert(hostPort(result.Host), Equals, hostPort(rs1a)) + } +} + func (s *S) TestConnectCloseConcurrency(c *C) { restore := mgo.HackPingDelay(500 * time.Millisecond) defer restore() === modified file 'src/gopkg.in/mgo.v2/doc.go' --- src/gopkg.in/mgo.v2/doc.go 2014-08-20 15:00:12 +0000 +++ src/gopkg.in/mgo.v2/doc.go 2016-03-22 15:18:22 +0000 @@ -20,7 +20,7 @@ // // New sessions are typically created by calling session.Copy on the // initial session obtained at dial time. These new sessions will share -// the same cluster information and connection cache, and may be easily +// the same cluster information and connection pool, and may be easily // handed into other methods and functions for organizing logic. // Every session created must have its Close method called at the end // of its life time, so its resources may be put back in the pool or === modified file 'src/gopkg.in/mgo.v2/gridfs.go' --- src/gopkg.in/mgo.v2/gridfs.go 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/gridfs.go 2016-03-22 15:18:22 +0000 @@ -692,7 +692,7 @@ // Read reads into b the next available data from the file and // returns the number of bytes written and an error in case // something wrong happened. At the end of the file, n will -// be zero and err will be set to os.EOF. +// be zero and err will be set to io.EOF. // // The parameters and behavior of this function turn the file // into an io.Reader. === modified file 'src/gopkg.in/mgo.v2/server.go' --- src/gopkg.in/mgo.v2/server.go 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/server.go 2016-03-22 15:18:22 +0000 @@ -84,9 +84,8 @@ sync: sync, dial: dial, info: &defaultServerInfo, + pingValue: time.Hour, // Push it back before an actual ping. } - // Once so the server gets a ping value, then loop in background. - server.pinger(false) go server.pinger(true) return server } @@ -274,7 +273,7 @@ return false } -var pingDelay = 5 * time.Second +var pingDelay = 15 * time.Second func (server *mongoServer) pinger(loop bool) { var delay time.Duration @@ -297,7 +296,7 @@ time.Sleep(delay) } op := op - socket, _, err := server.AcquireSocket(0, 3*delay) + socket, _, err := server.AcquireSocket(0, delay) if err == nil { start := time.Now() _, _ = socket.SimpleQuery(&op) @@ -400,7 +399,7 @@ // BestFit returns the best guess of what would be the most interesting // server to perform operations on at this point in time. -func (servers *mongoServers) BestFit(serverTags []bson.D) *mongoServer { +func (servers *mongoServers) BestFit(mode Mode, serverTags []bson.D) *mongoServer { var best *mongoServer for _, next := range servers.slice { if best == nil { @@ -417,9 +416,9 @@ switch { case serverTags != nil && !next.info.Mongos && !next.hasTags(serverTags): // Must have requested tags. - case next.info.Master != best.info.Master: - // Prefer slaves. - swap = best.info.Master + case next.info.Master != best.info.Master && mode != Nearest: + // Prefer slaves, unless the mode is PrimaryPreferred. + swap = (mode == PrimaryPreferred) != best.info.Master case absDuration(next.pingValue-best.pingValue) > 15*time.Millisecond: // Prefer nearest server. swap = next.pingValue < best.pingValue === modified file 'src/gopkg.in/mgo.v2/session.go' --- src/gopkg.in/mgo.v2/session.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/mgo.v2/session.go 2016-03-22 15:18:22 +0000 @@ -44,24 +44,41 @@ "gopkg.in/mgo.v2/bson" ) -type mode int +type Mode int const ( - Eventual mode = 0 - Monotonic mode = 1 - Strong mode = 2 + // Relevant documentation on read preference modes: + // + // http://docs.mongodb.org/manual/reference/read-preference/ + // + Primary Mode = 2 // Default mode. All operations read from the current replica set primary. + PrimaryPreferred Mode = 3 // Read from the primary if available. Read from the secondary otherwise. + Secondary Mode = 4 // Read from one of the nearest secondary members of the replica set. + SecondaryPreferred Mode = 5 // Read from one of the nearest secondaries if available. Read from primary otherwise. + Nearest Mode = 6 // Read from one of the nearest members, irrespective of it being primary or secondary. + + // Read preference modes are specific to mgo: + Eventual Mode = 0 // Same as Nearest, but may change servers between reads. + Monotonic Mode = 1 // Same as SecondaryPreferred before first write. Same as Primary after first write. + Strong Mode = 2 // Same as Primary. ) // When changing the Session type, check if newSession and copySession // need to be updated too. +// Session represents a communication session with the database. +// +// All Session methods are concurrency-safe and may be called from multiple +// goroutines. In all session modes but Eventual, using the session from +// multiple goroutines will cause them to share the same underlying socket. +// See the documentation on Session.SetMode for more details. type Session struct { m sync.RWMutex cluster_ *mongoCluster slaveSocket *mongoSocket masterSocket *mongoSocket slaveOk bool - consistency mode + consistency Mode queryConfig query safeOp *queryOp syncTimeout time.Duration @@ -354,6 +371,8 @@ Dial func(addr net.Addr) (net.Conn, error) } +// mgo.v3: Drop DialInfo.Dial. + // ServerAddr represents the address for establishing a connection to an // individual MongoDB server. type ServerAddr struct { @@ -481,7 +500,7 @@ return info, nil } -func newSession(consistency mode, cluster *mongoCluster, timeout time.Duration) (session *Session) { +func newSession(consistency Mode, cluster *mongoCluster, timeout time.Duration) (session *Session) { cluster.Acquire() session = &Session{ cluster_: cluster, @@ -963,12 +982,14 @@ DropDups bool "dropDups,omitempty" Background bool ",omitempty" Sparse bool ",omitempty" - Bits, Min, Max int ",omitempty" + Bits int ",omitempty" + Min, Max float64 ",omitempty" BucketSize float64 "bucketSize,omitempty" ExpireAfter int "expireAfterSeconds,omitempty" Weights bson.D ",omitempty" DefaultLanguage string "default_language,omitempty" LanguageOverride string "language_override,omitempty" + TextIndexVersion int "textIndexVersion,omitempty" } type Index struct { @@ -982,13 +1003,21 @@ // documents with indexed time.Time older than the provided delta. ExpireAfter time.Duration - // Name holds the stored index name. On creation this field is ignored and the index name - // is automatically computed by EnsureIndex based on the index key + // Name holds the stored index name. On creation if this field is unset it is + // computed by EnsureIndex based on the index key. Name string // Properties for spatial indexes. - Bits, Min, Max int - BucketSize float64 + // + // Min and Max were improperly typed as int when they should have been + // floats. To preserve backwards compatibility they are still typed as + // int and the following two fields enable reading and writing the same + // fields as float numbers. In mgo.v3, these fields will be dropped and + // Min/Max will become floats. + Min, Max int + Minf, Maxf float64 + BucketSize float64 + Bits int // Properties for text indexes. DefaultLanguage string @@ -1001,6 +1030,8 @@ Weights map[string]int } +// mgo.v3: Drop Minf and Maxf and transform Min and Max to floats. + type indexKeyInfo struct { name string key bson.D @@ -1182,8 +1213,8 @@ Background: index.Background, Sparse: index.Sparse, Bits: index.Bits, - Min: index.Min, - Max: index.Max, + Min: index.Minf, + Max: index.Maxf, BucketSize: index.BucketSize, ExpireAfter: int(index.ExpireAfter / time.Second), Weights: keyInfo.weights, @@ -1191,6 +1222,15 @@ LanguageOverride: index.LanguageOverride, } + if spec.Min == 0 && spec.Max == 0 { + spec.Min = float64(index.Min) + spec.Max = float64(index.Max) + } + + if index.Name != "" { + spec.Name = index.Name + } + NextField: for name, weight := range index.Weights { for i, elem := range spec.Weights { @@ -1220,17 +1260,15 @@ return err } -// DropIndex removes the index with key from the collection. +// DropIndex drops the index with the provided key from the c collection. // -// The key value determines which fields compose the index. The index ordering -// will be ascending by default. To obtain an index with a descending order, -// the field name should be prefixed by a dash (e.g. []string{"-time"}). +// See EnsureIndex for details on the accepted key variants. // // For example: // -// err := collection.DropIndex("lastname", "firstname") +// err1 := collection.DropIndex("firstField", "-secondField") +// err2 := collection.DropIndex("customIndexName") // -// See the EnsureIndex method for more details on indexes. func (c *Collection) DropIndex(key ...string) error { keyInfo, err := parseIndexKey(key) if err != nil { @@ -1260,6 +1298,58 @@ return nil } +// DropIndexName removes the index with the provided index name. +// +// For example: +// +// err := collection.DropIndex("customIndexName") +// +func (c *Collection) DropIndexName(name string) error { + session := c.Database.Session + + session = session.Clone() + defer session.Close() + session.SetMode(Strong, false) + + c = c.With(session) + + indexes, err := c.Indexes() + if err != nil { + return err + } + + var index Index + for _, idx := range indexes { + if idx.Name == name { + index = idx + break + } + } + + if index.Name != "" { + keyInfo, err := parseIndexKey(index.Key) + if err != nil { + return err + } + + cacheKey := c.FullName + "\x00" + keyInfo.name + session.cluster().CacheIndex(cacheKey, false) + } + + result := struct { + ErrMsg string + Ok bool + }{} + err = c.Database.Run(bson.D{{"dropIndexes", c.Name}, {"index", name}}, &result) + if err != nil { + return err + } + if !result.Ok { + return errors.New(result.ErrMsg) + } + return nil +} + // Indexes returns a list of all indexes for the collection. // // For example, this snippet would drop all available indexes: @@ -1329,15 +1419,36 @@ } func indexFromSpec(spec indexSpec) Index { - return Index{ - Name: spec.Name, - Key: simpleIndexKey(spec.Key), - Unique: spec.Unique, - DropDups: spec.DropDups, - Background: spec.Background, - Sparse: spec.Sparse, - ExpireAfter: time.Duration(spec.ExpireAfter) * time.Second, - } + index := Index{ + Name: spec.Name, + Key: simpleIndexKey(spec.Key), + Unique: spec.Unique, + DropDups: spec.DropDups, + Background: spec.Background, + Sparse: spec.Sparse, + Minf: spec.Min, + Maxf: spec.Max, + Bits: spec.Bits, + BucketSize: spec.BucketSize, + DefaultLanguage: spec.DefaultLanguage, + LanguageOverride: spec.LanguageOverride, + ExpireAfter: time.Duration(spec.ExpireAfter) * time.Second, + } + if float64(int(spec.Min)) == spec.Min && float64(int(spec.Max)) == spec.Max { + index.Min = int(spec.Min) + index.Max = int(spec.Max) + } + if spec.TextIndexVersion > 0 { + index.Key = make([]string, len(spec.Weights)) + index.Weights = make(map[string]int) + for i, elem := range spec.Weights { + index.Key[i] = "$text:" + elem.Name + if w, ok := elem.Value.(int); ok { + index.Weights[elem.Name] = w + } + } + } + return index } type indexSlice []Index @@ -1489,7 +1600,7 @@ // Shifting between Monotonic and Strong modes will keep a previously // reserved connection for the session unless refresh is true or the // connection is unsuitable (to a secondary server in a Strong session). -func (s *Session) SetMode(consistency mode, refresh bool) { +func (s *Session) SetMode(consistency Mode, refresh bool) { s.m.Lock() debugf("Session %p: setting mode %d with refresh=%v (master=%p, slave=%p)", s, consistency, refresh, s.masterSocket, s.slaveSocket) s.consistency = consistency @@ -1505,7 +1616,7 @@ } // Mode returns the current consistency mode for the session. -func (s *Session) Mode() mode { +func (s *Session) Mode() Mode { s.m.RLock() mode := s.consistency s.m.RUnlock() @@ -1800,7 +1911,7 @@ // used for reading operations to those with both tag "disk" set to // "ssd" and tag "rack" set to 1: // -// session.SelectSlaves(bson.D{{"disk", "ssd"}, {"rack", 1}}) +// session.SelectServers(bson.D{{"disk", "ssd"}, {"rack", 1}}) // // Multiple sets of tags may be provided, in which case the used server // must match all tags within any one set. @@ -2161,6 +2272,8 @@ return p } +// mgo.v3: Use a single user-visible error type. + type LastError struct { Err string Code, N, Waited int @@ -2168,6 +2281,9 @@ WTimeout bool UpdatedExisting bool `bson:"updatedExisting"` UpsertedId interface{} `bson:"upserted"` + + modified int + errors []error } func (err *LastError) Error() string { @@ -2204,6 +2320,13 @@ return e.Code == 11000 || e.Code == 11001 || e.Code == 12582 || e.Code == 16460 && strings.Contains(e.Err, " E11000 ") case *QueryError: return e.Code == 11000 || e.Code == 11001 || e.Code == 12582 + case *bulkError: + for _, ee := range e.errs { + if !IsDup(ee) { + return false + } + } + return true } return false } @@ -2213,7 +2336,7 @@ // happens while inserting the provided documents, the returned error will // be of type *LastError. func (c *Collection) Insert(docs ...interface{}) error { - _, err := c.writeQuery(&insertOp{c.FullName, docs, 0}) + _, err := c.writeOp(&insertOp{c.FullName, docs, 0}, true) return err } @@ -2229,7 +2352,15 @@ // http://www.mongodb.org/display/DOCS/Atomic+Operations // func (c *Collection) Update(selector interface{}, update interface{}) error { - lerr, err := c.writeQuery(&updateOp{c.FullName, selector, update, 0}) + if selector == nil { + selector = bson.D{} + } + op := updateOp{ + Collection: c.FullName, + Selector: selector, + Update: update, + } + lerr, err := c.writeOp(&op, true) if err == nil && lerr != nil && !lerr.UpdatedExisting { return ErrNotFound } @@ -2265,7 +2396,17 @@ // http://www.mongodb.org/display/DOCS/Atomic+Operations // func (c *Collection) UpdateAll(selector interface{}, update interface{}) (info *ChangeInfo, err error) { - lerr, err := c.writeQuery(&updateOp{c.FullName, selector, update, 2}) + if selector == nil { + selector = bson.D{} + } + op := updateOp{ + Collection: c.FullName, + Selector: selector, + Update: update, + Flags: 2, + Multi: true, + } + lerr, err := c.writeOp(&op, true) if err == nil && lerr != nil { info = &ChangeInfo{Updated: lerr.N} } @@ -2286,7 +2427,17 @@ // http://www.mongodb.org/display/DOCS/Atomic+Operations // func (c *Collection) Upsert(selector interface{}, update interface{}) (info *ChangeInfo, err error) { - lerr, err := c.writeQuery(&updateOp{c.FullName, selector, update, 1}) + if selector == nil { + selector = bson.D{} + } + op := updateOp{ + Collection: c.FullName, + Selector: selector, + Update: update, + Flags: 1, + Upsert: true, + } + lerr, err := c.writeOp(&op, true) if err == nil && lerr != nil { info = &ChangeInfo{} if lerr.UpdatedExisting { @@ -2318,7 +2469,7 @@ // http://www.mongodb.org/display/DOCS/Removing // func (c *Collection) Remove(selector interface{}) error { - lerr, err := c.writeQuery(&deleteOp{c.FullName, selector, 1}) + lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 1}, true) if err == nil && lerr != nil && lerr.N == 0 { return ErrNotFound } @@ -2344,7 +2495,7 @@ // http://www.mongodb.org/display/DOCS/Removing // func (c *Collection) RemoveAll(selector interface{}) (info *ChangeInfo, err error) { - lerr, err := c.writeQuery(&deleteOp{c.FullName, selector, 0}) + lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 0}, true) if err == nil && lerr != nil { info = &ChangeInfo{Removed: lerr.N} } @@ -2789,7 +2940,7 @@ } defer socket.Release() - op.flags |= session.slaveOkFlag() + session.prepareQuery(&op) op.limit = -1 data, err := socket.SimpleQuery(&op) @@ -2829,7 +2980,7 @@ op.collection = db.Name + ".$cmd" // Query.One: - op.flags |= session.slaveOkFlag() + session.prepareQuery(&op) op.limit = -1 data, err := socket.SimpleQuery(&op) @@ -3022,8 +3173,9 @@ iter.op.limit = op.limit iter.op.replyFunc = iter.replyFunc() iter.docsToReceive++ + + session.prepareQuery(&op) op.replyFunc = iter.op.replyFunc - op.flags |= session.slaveOkFlag() socket, err := session.acquireSocket(true) if err != nil { @@ -3103,8 +3255,9 @@ iter.op.limit = op.limit iter.op.replyFunc = iter.replyFunc() iter.docsToReceive++ + session.prepareQuery(&op) op.replyFunc = iter.op.replyFunc - op.flags |= flagTailable | flagAwaitData | session.slaveOkFlag() + op.flags |= flagTailable | flagAwaitData socket, err := session.acquireSocket(true) if err != nil { @@ -3123,10 +3276,11 @@ return iter } -func (s *Session) slaveOkFlag() (flag queryOpFlags) { +func (s *Session) prepareQuery(op *queryOp) { s.m.RLock() + op.mode = s.consistency if s.slaveOk { - flag = flagSlaveOk + op.flags |= flagSlaveOk } s.m.RUnlock() return @@ -3909,18 +4063,20 @@ // Read-only lock to check for previously reserved socket. s.m.RLock() + // If there is a slave socket reserved and its use is acceptable, take it as long + // as there isn't a master socket which would be preferred by the read preference mode. + if s.slaveSocket != nil && s.slaveOk && slaveOk && (s.masterSocket == nil || s.consistency != PrimaryPreferred && s.consistency != Monotonic) { + socket := s.slaveSocket + socket.Acquire() + s.m.RUnlock() + return socket, nil + } if s.masterSocket != nil { socket := s.masterSocket socket.Acquire() s.m.RUnlock() return socket, nil } - if s.slaveSocket != nil && s.slaveOk && slaveOk { - socket := s.slaveSocket - socket.Acquire() - s.m.RUnlock() - return socket, nil - } s.m.RUnlock() // No go. We may have to request a new socket and change the session, @@ -3928,17 +4084,17 @@ s.m.Lock() defer s.m.Unlock() + if s.slaveSocket != nil && s.slaveOk && slaveOk && (s.masterSocket == nil || s.consistency != PrimaryPreferred && s.consistency != Monotonic) { + s.slaveSocket.Acquire() + return s.slaveSocket, nil + } if s.masterSocket != nil { s.masterSocket.Acquire() return s.masterSocket, nil } - if s.slaveSocket != nil && s.slaveOk && slaveOk { - s.slaveSocket.Acquire() - return s.slaveSocket, nil - } // Still not good. We need a new socket. - sock, err := s.cluster().AcquireSocket(slaveOk && s.slaveOk, s.syncTimeout, s.sockTimeout, s.queryConfig.op.serverTags, s.poolLimit) + sock, err := s.cluster().AcquireSocket(s.consistency, slaveOk && s.slaveOk, s.syncTimeout, s.sockTimeout, s.queryConfig.op.serverTags, s.poolLimit) if err != nil { return nil, err } @@ -4041,27 +4197,36 @@ Index int Id interface{} `_id` } - Errors []struct { - Ok bool - Index int - Code int - N int - ErrMsg string - } `bson:"writeErrors"` - ConcernError struct { - Code int - ErrMsg string - } `bson:"writeConcernError"` -} - -// writeQuery runs the given modifying operation, potentially followed up + ConcernError writeConcernError `bson:"writeConcernError"` + Errors []writeCmdError `bson:"writeErrors"` +} + +type writeConcernError struct { + Code int + ErrMsg string +} + +type writeCmdError struct { + Index int + Code int + ErrMsg string +} + +func (r *writeCmdResult) QueryErrors() []error { + var errs []error + for _, err := range r.Errors { + errs = append(errs, &QueryError{Code: err.Code, Message: err.ErrMsg}) + } + return errs +} + +// writeOp runs the given modifying operation, potentially followed up // by a getLastError command in case the session is in safe mode. The // LastError result is made available in lerr, and if lerr.Err is set it // will also be returned as err. -func (c *Collection) writeQuery(op interface{}) (lerr *LastError, err error) { +func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err error) { s := c.Database.Session - dbname := c.Database.Name - socket, err := s.acquireSocket(dbname == "local") + socket, err := s.acquireSocket(c.Database.Name == "local") if err != nil { return nil, err } @@ -4071,11 +4236,10 @@ safeOp := s.safeOp s.m.RUnlock() - // TODO Enable this path for wire version 2 as well. - if socket.ServerInfo().MaxWireVersion >= 3 { + if socket.ServerInfo().MaxWireVersion >= 2 { // Servers with a more recent write protocol benefit from write commands. if op, ok := op.(*insertOp); ok && len(op.documents) > 1000 { - var firstErr error + var errors []error // Maximum batch size is 1000. Must split out in separate operations for compatibility. all := op.documents for i := 0; i < len(all); i += 1000 { @@ -4084,22 +4248,40 @@ l = len(all) } op.documents = all[i:l] - _, err := c.writeCommand(socket, safeOp, op) + lerr, err := c.writeOpCommand(socket, safeOp, op, ordered) if err != nil { - if op.flags&1 != 0 { - if firstErr == nil { - firstErr = err - } - } else { - return nil, err + errors = append(errors, lerr.errors...) + if op.flags&1 == 0 { + return &LastError{errors: errors}, err } } } - return nil, firstErr - } - return c.writeCommand(socket, safeOp, op) + if len(errors) == 0 { + return nil, nil + } + return &LastError{errors: errors}, errors[0] + } + return c.writeOpCommand(socket, safeOp, op, ordered) + } else if updateOps, ok := op.(bulkUpdateOp); ok { + var errors []error + for _, updateOp := range updateOps { + lerr, err := c.writeOpQuery(socket, safeOp, updateOp, ordered) + if err != nil { + errors = append(errors, lerr.errors...) + if ordered { + return &LastError{errors: errors}, err + } + } + } + if len(errors) == 0 { + return nil, nil + } + return &LastError{errors: errors}, errors[0] } + return c.writeOpQuery(socket, safeOp, op, ordered) +} +func (c *Collection) writeOpQuery(socket *mongoSocket, safeOp *queryOp, op interface{}, ordered bool) (lerr *LastError, err error) { if safeOp == nil { return nil, socket.Query(op) } @@ -4109,7 +4291,7 @@ var replyErr error mutex.Lock() query := *safeOp // Copy the data. - query.collection = dbname + ".$cmd" + query.collection = c.Database.Name + ".$cmd" query.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { replyData = docData replyErr = err @@ -4139,7 +4321,7 @@ return result, nil } -func (c *Collection) writeCommand(socket *mongoSocket, safeOp *queryOp, op interface{}) (lerr *LastError, err error) { +func (c *Collection) writeOpCommand(socket *mongoSocket, safeOp *queryOp, op interface{}, ordered bool) (lerr *LastError, err error) { var writeConcern interface{} if safeOp == nil { writeConcern = bson.D{{"w", 0}} @@ -4159,15 +4341,19 @@ } case *updateOp: // http://docs.mongodb.org/manual/reference/command/update - selector := op.selector - if selector == nil { - selector = bson.D{} + cmd = bson.D{ + {"update", c.Name}, + {"updates", []interface{}{op}}, + {"writeConcern", writeConcern}, + {"ordered", ordered}, } + case bulkUpdateOp: + // http://docs.mongodb.org/manual/reference/command/update cmd = bson.D{ {"update", c.Name}, - {"updates", []bson.D{{{"q", selector}, {"u", op.update}, {"upsert", op.flags&1 != 0}, {"multi", op.flags&2 != 0}}}}, + {"updates", op}, {"writeConcern", writeConcern}, - //{"ordered", }, + {"ordered", ordered}, } case *deleteOp: // http://docs.mongodb.org/manual/reference/command/delete @@ -4188,18 +4374,19 @@ debugf("Write command result: %#v (err=%v)", result, err) lerr = &LastError{ UpdatedExisting: result.N > 0 && len(result.Upserted) == 0, - N: result.N, + N: result.N, + + modified: result.NModified, + errors: result.QueryErrors(), } if len(result.Upserted) > 0 { lerr.UpsertedId = result.Upserted[0].Id } if len(result.Errors) > 0 { e := result.Errors[0] - if !e.Ok { - lerr.Code = e.Code - lerr.Err = e.ErrMsg - err = lerr - } + lerr.Code = e.Code + lerr.Err = e.ErrMsg + err = lerr } else if result.ConcernError.Code != 0 { e := result.ConcernError lerr.Code = e.Code === modified file 'src/gopkg.in/mgo.v2/session_test.go' --- src/gopkg.in/mgo.v2/session_test.go 2015-09-22 15:27:01 +0000 +++ src/gopkg.in/mgo.v2/session_test.go 2016-03-22 15:18:22 +0000 @@ -30,7 +30,6 @@ "flag" "fmt" "math" - "reflect" "runtime" "sort" "strconv" @@ -84,6 +83,16 @@ c.Assert(stats.ReceivedOps, Equals, 1) } +func (s *S) TestDialIPAddress(c *C) { + session, err := mgo.Dial("127.0.0.1:40001") + c.Assert(err, IsNil) + defer session.Close() + + session, err = mgo.Dial("[::1%]:40001") + c.Assert(err, IsNil) + defer session.Close() +} + func (s *S) TestURLSingle(c *C) { session, err := mgo.Dial("mongodb://localhost:40001/") c.Assert(err, IsNil) @@ -277,10 +286,7 @@ names, err := session.DatabaseNames() c.Assert(err, IsNil) - if !reflect.DeepEqual(names, []string{"db1", "db2"}) { - // 2.4+ has "local" as well. - c.Assert(names, DeepEquals, []string{"db1", "db2", "local"}) - } + c.Assert(filterDBs(names), DeepEquals, []string{"db1", "db2"}) // Try to exercise cursor logic. 2.8.0-rc3 still ignores this. session.SetBatch(2) @@ -688,20 +694,30 @@ names, err := session.DatabaseNames() c.Assert(err, IsNil) - if !reflect.DeepEqual(names, []string{"db2"}) { - // 2.4+ has "local" as well. - c.Assert(names, DeepEquals, []string{"db2", "local"}) - } + c.Assert(filterDBs(names), DeepEquals, []string{"db2"}) err = db2.DropDatabase() c.Assert(err, IsNil) names, err = session.DatabaseNames() c.Assert(err, IsNil) - if !reflect.DeepEqual(names, []string(nil)) { - // 2.4+ has "local" as well. - c.Assert(names, DeepEquals, []string{"local"}) - } + c.Assert(filterDBs(names), DeepEquals, []string{}) +} + +func filterDBs(dbs []string) []string { + var i int + for _, name := range dbs { + switch name { + case "admin", "local": + default: + dbs[i] = name + i++ + } + } + if len(dbs) == 0 { + return []string{} + } + return dbs[:i] } func (s *S) TestDropCollection(c *C) { @@ -1689,7 +1705,7 @@ // 1*QUERY for nonce + 1*GET_MORE_OP on Next + 1*GET_MORE_OP on Next after sleep + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() - if s.versionAtLeast(3, 0) { // TODO Will be 2.6 when write commands are enabled for it. + if s.versionAtLeast(2, 6) { c.Assert(stats.SentOps, Equals, 4) } else { c.Assert(stats.SentOps, Equals, 5) @@ -1786,7 +1802,7 @@ // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() - if s.versionAtLeast(3, 0) { // TODO Will be 2.6 when write commands are enabled for it. + if s.versionAtLeast(2, 6) { c.Assert(stats.SentOps, Equals, 3) } else { c.Assert(stats.SentOps, Equals, 4) @@ -1882,7 +1898,7 @@ // 1*QUERY_OP for nonce + 1*GET_MORE_OP on Next + // 1*INSERT_OP + 1*QUERY_OP for getLastError on insert of 47 stats := mgo.GetStats() - if s.versionAtLeast(3, 0) { // TODO Will be 2.6 when write commands are enabled for it. + if s.versionAtLeast(2, 6) { c.Assert(stats.SentOps, Equals, 3) } else { c.Assert(stats.SentOps, Equals, 4) @@ -2273,6 +2289,10 @@ err = coll.EnsureIndex(mgo.Index{ Key: []string{"$text:a", "$text:b"}, }) + msg := "text search not enabled" + if err != nil && strings.Contains(err.Error(), msg) { + c.Skip(msg) + } c.Assert(err, IsNil) err = coll.Insert(M{ @@ -2507,8 +2527,7 @@ // It must have sent two operations (INSERT_OP + getLastError QUERY_OP) stats := mgo.GetStats() - // TODO Will be 2.6 when write commands are enabled for it. - if s.versionAtLeast(3, 0) { + if s.versionAtLeast(2, 6) { c.Assert(stats.SentOps, Equals, 1) } else { c.Assert(stats.SentOps, Equals, 2) @@ -2637,8 +2656,8 @@ "name": "loc_old_2d", "key": M{"loc_old": "2d"}, "ns": "mydb.mycoll", - "min": -500, - "max": 500, + "min": -500.0, + "max": 500.0, "bits": 32, }, }, { @@ -2652,8 +2671,25 @@ "name": "loc_2d", "key": M{"loc": "2d"}, "ns": "mydb.mycoll", - "min": -500, - "max": 500, + "min": -500.0, + "max": 500.0, + "bits": 32, + }, +}, { + mgo.Index{ + Key: []string{"$2d:loc"}, + Minf: -500.1, + Maxf: 500.1, + Min: 1, // Should be ignored + Max: 2, + Bits: 32, + }, + M{ + "name": "loc_2d", + "key": M{"loc": "2d"}, + "ns": "mydb.mycoll", + "min": -500.1, + "max": 500.1, "bits": 32, }, }, { @@ -2709,6 +2745,16 @@ "language_override": "language", "textIndexVersion": 2, }, +}, { + mgo.Index{ + Key: []string{"cn"}, + Name: "CustomName", + }, + M{ + "name": "CustomName", + "key": M{"cn": 1}, + "ns": "mydb.mycoll", + }, }} func (s *S) TestEnsureIndex(c *C) { @@ -2720,16 +2766,20 @@ idxs := session.DB("mydb").C("system.indexes") for _, test := range indexTests { - if !s.versionAtLeast(2, 4) && test.expected["weights"] != nil { - // No text indexes until 2.4. + err = coll.EnsureIndex(test.index) + msg := "text search not enabled" + if err != nil && strings.Contains(err.Error(), msg) { continue } - - err = coll.EnsureIndex(test.index) c.Assert(err, IsNil) + expectedName := test.index.Name + if expectedName == "" { + expectedName, _ = test.expected["name"].(string) + } + obtained := M{} - err = idxs.Find(M{"name": test.expected["name"]}).One(obtained) + err = idxs.Find(M{"name": expectedName}).One(obtained) c.Assert(err, IsNil) delete(obtained, "v") @@ -2737,12 +2787,57 @@ if s.versionAtLeast(2, 7) { // Was deprecated in 2.6, and not being reported by 2.7+. delete(test.expected, "dropDups") + test.index.DropDups = false } c.Assert(obtained, DeepEquals, test.expected) - err = coll.DropIndex(test.index.Key...) + // The result of Indexes must match closely what was used to create the index. + indexes, err := coll.Indexes() c.Assert(err, IsNil) + c.Assert(indexes, HasLen, 2) + gotIndex := indexes[0] + if gotIndex.Name == "_id_" { + gotIndex = indexes[1] + } + wantIndex := test.index + if wantIndex.Name == "" { + wantIndex.Name = gotIndex.Name + } + if strings.HasPrefix(wantIndex.Key[0], "@") { + wantIndex.Key[0] = "$2d:" + wantIndex.Key[0][1:] + } + if wantIndex.Minf == 0 && wantIndex.Maxf == 0 { + wantIndex.Minf = float64(wantIndex.Min) + wantIndex.Maxf = float64(wantIndex.Max) + } else { + wantIndex.Min = gotIndex.Min + wantIndex.Max = gotIndex.Max + } + if wantIndex.DefaultLanguage == "" { + wantIndex.DefaultLanguage = gotIndex.DefaultLanguage + } + if wantIndex.LanguageOverride == "" { + wantIndex.LanguageOverride = gotIndex.LanguageOverride + } + for name, _ := range gotIndex.Weights { + if _, ok := wantIndex.Weights[name]; !ok { + if wantIndex.Weights == nil { + wantIndex.Weights = make(map[string]int) + } + wantIndex.Weights[name] = 1 + } + } + c.Assert(gotIndex, DeepEquals, wantIndex) + + // Drop created index by key or by name if a custom name was used. + if test.index.Name == "" { + err = coll.DropIndex(test.index.Key...) + c.Assert(err, IsNil) + } else { + err = coll.DropIndexName(test.index.Name) + c.Assert(err, IsNil) + } } } @@ -2842,21 +2937,54 @@ c.Assert(err, IsNil) sysidx := session.DB("mydb").C("system.indexes") - dummy := &struct{}{} - - err = sysidx.Find(M{"name": "a_1"}).One(dummy) - c.Assert(err, IsNil) - - err = sysidx.Find(M{"name": "b_1"}).One(dummy) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = coll.DropIndex("a") - c.Assert(err, IsNil) - - err = sysidx.Find(M{"name": "a_1"}).One(dummy) - c.Assert(err, Equals, mgo.ErrNotFound) - - err = coll.DropIndex("a") + + err = sysidx.Find(M{"name": "a_1"}).One(nil) + c.Assert(err, IsNil) + + err = sysidx.Find(M{"name": "b_1"}).One(nil) + c.Assert(err, Equals, mgo.ErrNotFound) + + err = coll.DropIndex("a") + c.Assert(err, IsNil) + + err = sysidx.Find(M{"name": "a_1"}).One(nil) + c.Assert(err, Equals, mgo.ErrNotFound) + + err = coll.DropIndex("a") + c.Assert(err, ErrorMatches, "index not found.*") +} + +func (s *S) TestEnsureIndexDropIndexName(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.EnsureIndexKey("a") + c.Assert(err, IsNil) + + err = coll.EnsureIndex(mgo.Index{Key: []string{"b"}, Name: "a"}) + c.Assert(err, IsNil) + + err = coll.DropIndexName("a") + c.Assert(err, IsNil) + + sysidx := session.DB("mydb").C("system.indexes") + + err = sysidx.Find(M{"name": "a_1"}).One(nil) + c.Assert(err, IsNil) + + err = sysidx.Find(M{"name": "a"}).One(nil) + c.Assert(err, Equals, mgo.ErrNotFound) + + err = coll.DropIndexName("a_1") + c.Assert(err, IsNil) + + err = sysidx.Find(M{"name": "a_1"}).One(nil) + c.Assert(err, Equals, mgo.ErrNotFound) + + err = coll.DropIndexName("a_1") c.Assert(err, ErrorMatches, "index not found.*") } @@ -2938,6 +3066,50 @@ c.Assert(indexes[4].Key, DeepEquals, []string{"$2d:d"}) } +func (s *S) TestEnsureIndexNameCaching(c *C) { + session, err := mgo.Dial("localhost:40001") + c.Assert(err, IsNil) + defer session.Close() + + coll := session.DB("mydb").C("mycoll") + + err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"}) + c.Assert(err, IsNil) + + mgo.ResetStats() + + // Second EnsureIndex should be cached and do nothing. + err = coll.EnsureIndexKey("a") + c.Assert(err, IsNil) + + err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"}) + c.Assert(err, IsNil) + + stats := mgo.GetStats() + c.Assert(stats.SentOps, Equals, 0) + + // Resetting the cache should make it contact the server again. + session.ResetIndexCache() + + err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"}) + c.Assert(err, IsNil) + + stats = mgo.GetStats() + c.Assert(stats.SentOps > 0, Equals, true) + + // Dropping the index should also drop the cached index key. + err = coll.DropIndexName("custom") + c.Assert(err, IsNil) + + mgo.ResetStats() + + err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"}) + c.Assert(err, IsNil) + + stats = mgo.GetStats() + c.Assert(stats.SentOps > 0, Equals, true) +} + func (s *S) TestEnsureIndexEvalGetIndexes(c *C) { session, err := mgo.Dial("localhost:40001") c.Assert(err, IsNil) === modified file 'src/gopkg.in/mgo.v2/socket.go' --- src/gopkg.in/mgo.v2/socket.go 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/socket.go 2016-03-22 15:18:22 +0000 @@ -28,6 +28,7 @@ import ( "errors" + "fmt" "net" "sync" "time" @@ -74,6 +75,7 @@ flags queryOpFlags replyFunc replyFunc + mode Mode options queryWrapper hasOptions bool serverTags []bson.D @@ -92,9 +94,30 @@ } func (op *queryOp) finalQuery(socket *mongoSocket) interface{} { - if op.flags&flagSlaveOk != 0 && len(op.serverTags) > 0 && socket.ServerInfo().Mongos { + if op.flags&flagSlaveOk != 0 && socket.ServerInfo().Mongos { + var modeName string + switch op.mode { + case Strong: + modeName = "primary" + case Monotonic, Eventual: + modeName = "secondaryPreferred" + case PrimaryPreferred: + modeName = "primaryPreferred" + case Secondary: + modeName = "secondary" + case SecondaryPreferred: + modeName = "secondaryPreferred" + case Nearest: + modeName = "nearest" + default: + panic(fmt.Sprintf("unsupported read mode: %d", op.mode)) + } op.hasOptions = true - op.options.ReadPreference = bson.D{{"mode", "secondaryPreferred"}, {"tags", op.serverTags}} + op.options.ReadPreference = make(bson.D, 0, 2) + op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{"mode", modeName}) + if len(op.serverTags) > 0 { + op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{"tags", op.serverTags}) + } } if op.hasOptions { if op.query == nil { @@ -130,10 +153,12 @@ } type updateOp struct { - collection string // "database.collection" - selector interface{} - update interface{} - flags uint32 + Collection string `bson:"-"` // "database.collection" + Selector interface{} `bson:"q"` + Update interface{} `bson:"u"` + Flags uint32 `bson:"-"` + Multi bool `bson:"multi,omitempty"` + Upsert bool `bson:"upsert,omitempty"` } type deleteOp struct { @@ -370,15 +395,15 @@ case *updateOp: buf = addHeader(buf, 2001) buf = addInt32(buf, 0) // Reserved - buf = addCString(buf, op.collection) - buf = addInt32(buf, int32(op.flags)) - debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.selector) - buf, err = addBSON(buf, op.selector) + buf = addCString(buf, op.Collection) + buf = addInt32(buf, int32(op.Flags)) + debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.Selector) + buf, err = addBSON(buf, op.Selector) if err != nil { return err } - debugf("Socket %p to %s: serializing update document: %#v", socket, socket.addr, op.update) - buf, err = addBSON(buf, op.update) + debugf("Socket %p to %s: serializing update document: %#v", socket, socket.addr, op.Update) + buf, err = addBSON(buf, op.Update) if err != nil { return err } === modified file 'src/gopkg.in/mgo.v2/suite_test.go' --- src/gopkg.in/mgo.v2/suite_test.go 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/suite_test.go 2016-03-22 15:18:22 +0000 @@ -103,6 +103,9 @@ func (s *S) TearDownTest(c *C) { if s.stopped { + s.Stop(":40201") + s.Stop(":40202") + s.Stop(":40203") s.StartAll() } for _, host := range s.frozen { @@ -180,13 +183,15 @@ } func (s *S) StartAll() { - // Restart any stopped nodes. - run("cd _testdb && supervisorctl start all") - err := run("cd testdb && mongo --nodb wait.js") - if err != nil { - panic(err) + if s.stopped { + // Restart any stopped nodes. + run("cd _testdb && supervisorctl start all") + err := run("cd testdb && mongo --nodb wait.js") + if err != nil { + panic(err) + } + s.stopped = false } - s.stopped = false } func run(command string) error { === modified file 'src/gopkg.in/mgo.v2/testdb/client.pem' --- src/gopkg.in/mgo.v2/testdb/client.pem 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/testdb/client.pem 2016-03-22 15:18:22 +0000 @@ -1,44 +1,57 @@ +To regenerate the key: + + openssl req -newkey rsa:2048 -new -x509 -days 36500 -nodes -out server.crt -keyout server.key + cat server.key server.crt > server.pem + openssl genrsa -out client.key 2048 + openssl req -key client.key -new -out client.req + openssl x509 -req -in client.req -CA server.crt -CAkey server.key -days 36500 -CAserial file.srl -out client.crt + cat client.key client.crt > client.pem + -----BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAwE2sl8YeTTSetwo9kykJ5mCZ/FtfPtn/0X4nOlTM2Qc/uWzA -sjSYoSV4UkuOiWjKQQH2EDeXaltshOo7F0oCY5ozVeQe+phe987iKTvLtf7NoXJD -KqNqR4Kb4ylbCrEky7+Xvw6yrrqw8qgWy+9VsrilR3q8LsETE9SBMtfp3BUaaNQp -peNm+iAhx3uZSv3mdzSLFSA/o61kAyG0scLExYDjo/7xyMNQoloLvNmx4Io160+y -lOz077/qqU620tmuDLRz1QdxK/bptmXTnsBCRxl+U8nzbwVZgWFENhXplbcN+SjN -LhdnvTiU2qFhgZmc7ZtCKdPIpx3W6pH9bx7kTwIDAQABAoIBAQCOQygyo8NY9FuS -J8ZDrvF+9+oS8fm1QorpDT2x/ngI+j7fSyAG9bgQRusLXpAVAWvWyb+iYa3nZbkT -X0DVys+XpcTifr+YPc7L3sYbIPxkKBsxm5kq2vfN7Uart7V9ZG1HOfblxdbUQpKT -AVzUA7vPWqATEC5VHEqjuerWlTqRr9YLZE/nkE7ICLISqdl4WDYfUYJwoXWfYkXQ -Lfl5Qh2leyri9S3urvDrhnURTQ1lM182IbTRA+9rUiFzsRW+9U4HPY7Ao2Itp8dr -GRP4rcq4TP+NcF0Ky64cNfKXCWmwqTBRFYAlTD6gwjN/s2BzvWD/2nlnc0DYAXrB -TgFCPk7xAoGBAOwuHICwwTxtzrdWjuRGU3RxL4eLEXedtL8yon/yPci3e+8eploX -1Fp0rEK2gIGDp/X8DiOtrKXih8XPusCwE/I3EvjHdI0RylLZXTPOp1Ei21dXRsiV -YxcF+d5s11q5tJtF+5ISUeIz2iSc9Z2LBnb8JDK1jcCRa5Q212q3ZWW5AoGBANBw -9CoMbxINLG1q0NvOOSwMKDk2OB+9JbQ5lwF4ijZl2I6qRoOCzQ3lBs0Qv/AeBjNR -SerDs2+eWnIBUbgSdiqcOKnXAI/Qbl1IkVFYV/2g9m6dgu1fNWNBv8NIYDHCLfDx -W3fpO5JMf+iE5XC4XqCfSBIME2yxPSGQjal6tB5HAoGAddYDzolhv/6hVoPPQ0F7 -PeuC5UOTcXSzy3k97kw0W0KAiStnoCengYIYuChKMVQ4ptgdTdvG+fTt/NnJuX2g -Vgb4ZjtNgVzQ70kX4VNH04lqmkcnP8iY6dHHexwezls9KwNdouGVDSEFw6K0QOgu -T4s5nDtNADkNzaMXE11xL7ECgYBoML3rstFmTY1ymB0Uck3jtaP5jR+axdpt7weL -Zax4qooILhcXL6++DUhMAt5ecTOaPTzci7xKw/Xj3MLzZs8IV5R/WQhf2sj/+gEh -jy5UijwEaNmEO74dAkWPoMLsvGpocMzO8JeldnXNTXi+0noCgfvtgXnIMAQlnfMh -z0LviwKBgQCg5KR9JC4iuKses7Kfv2YelcO8vOZkRzBu3NdRWMsiJQC+qfetgd57 -RjRjlRWd1WCHJ5Kmx3hkUaZZOrX5knqfsRW3Nl0I74xgWl7Bli2eSJ9VWl59bcd6 -DqphhY7/gcW+QZlhXpnqbf0W8jB2gPhTYERyCBoS9LfhZWZu/11wuQ== +MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7 +wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ +r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ +Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI +KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5 +Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu +La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq +KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv +bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f +Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA +Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp +QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo +DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl +QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F +Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ ++HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F +jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB +K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy +HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP +Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E +xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB +28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z +ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ +4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo +I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk= -----END RSA PRIVATE KEY----- -----BEGIN CERTIFICATE----- -MIICyTCCAjKgAwIBAgIBATANBgkqhkiG9w0BAQUFADBcMQswCQYDVQQGEwJHTzEM -MAoGA1UECBMDTUdPMQwwCgYDVQQHEwNNR08xDDAKBgNVBAoTA01HTzEPMA0GA1UE -CxMGU2VydmVyMRIwEAYDVQQDEwlsb2NhbGhvc3QwHhcNMTQwOTI0MTQwMzUzWhcN -MTUwOTI0MTQwMzUzWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECBMDTUdPMQwwCgYD -VQQHEwNNR08xDDAKBgNVBAoTA01HTzEPMA0GA1UECxMGQ2xpZW50MRIwEAYDVQQD -Ewlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDATayX -xh5NNJ63Cj2TKQnmYJn8W18+2f/Rfic6VMzZBz+5bMCyNJihJXhSS46JaMpBAfYQ -N5dqW2yE6jsXSgJjmjNV5B76mF73zuIpO8u1/s2hckMqo2pHgpvjKVsKsSTLv5e/ -DrKuurDyqBbL71WyuKVHerwuwRMT1IEy1+ncFRpo1Cml42b6ICHHe5lK/eZ3NIsV -ID+jrWQDIbSxwsTFgOOj/vHIw1CiWgu82bHgijXrT7KU7PTvv+qpTrbS2a4MtHPV -B3Er9um2ZdOewEJHGX5TyfNvBVmBYUQ2FemVtw35KM0uF2e9OJTaoWGBmZztm0Ip -08inHdbqkf1vHuRPAgMBAAGjFzAVMBMGA1UdJQQMMAoGCCsGAQUFBwMCMA0GCSqG -SIb3DQEBBQUAA4GBAJZD7idSIRzhGlJYARPKWnX2CxD4VVB0F5cH5Mlc2YnoUSU/ -rKuPZFuOYND3awKqez6K3rNb3+tQmNitmoOT8ImmX1uJKBo5w9tuo4B2MmLQcPMk -3fhPePuQCjtlArSmKVrNTrYPkyB9NwKS6q0+FzseFTw9ZJUIKiO9sSjMe+HP +MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV +BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl +cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw +OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH +DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls +b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H +4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ +616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I +AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd +7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO +Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx +l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5 +CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW +DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47 +PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR +OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI +/nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r +z3A= -----END CERTIFICATE----- + === modified file 'src/gopkg.in/mgo.v2/testdb/dropall.js' --- src/gopkg.in/mgo.v2/testdb/dropall.js 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/testdb/dropall.js 2016-03-22 15:18:22 +0000 @@ -60,7 +60,7 @@ } function notMaster(result) { - return typeof result.errmsg != "undefined" && result.errmsg.indexOf("not master") >= 0 + return typeof result.errmsg != "undefined" && (result.errmsg.indexOf("not master") >= 0 || result.errmsg.indexOf("no master found")) } // vim:ts=4:sw=4:et === modified file 'src/gopkg.in/mgo.v2/testdb/init.js' --- src/gopkg.in/mgo.v2/testdb/init.js 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/testdb/init.js 2016-03-22 15:18:22 +0000 @@ -79,14 +79,21 @@ function countHealthy(rs) { var status = rs.runCommand({replSetGetStatus: 1}) var count = 0 + var primary = 0 if (typeof status.members != "undefined") { for (var i = 0; i != status.members.length; i++) { var m = status.members[i] if (m.health == 1 && (m.state == 1 || m.state == 2)) { count += 1 + if (m.state == 1) { + primary = 1 + } } } } + if (primary == 0) { + count = 0 + } return count } @@ -96,7 +103,6 @@ var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a) print("Replica sets have", count, "healthy nodes.") if (count == totalRSMembers) { - sleep(2000) configShards() configAuth() quit(0) === modified file 'src/gopkg.in/mgo.v2/testdb/server.pem' --- src/gopkg.in/mgo.v2/testdb/server.pem 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/testdb/server.pem 2016-03-22 15:18:22 +0000 @@ -1,33 +1,50 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB +Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk +mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi +xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb +YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R +ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs +uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9 +wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu +MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi +wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby +yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk +eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3 +ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC +tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB +xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6 +MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9 +Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3 +IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q +Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl +QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z +GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do +4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1 +ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7 +1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt +9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk +SruEA1+5bfBRMW0P+h7Qfe4= +-----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- -MIIC+DCCAmGgAwIBAgIJAJ5pBAq2HXAsMA0GCSqGSIb3DQEBBQUAMFwxCzAJBgNV -BAYTAkdPMQwwCgYDVQQIEwNNR08xDDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdP -MQ8wDQYDVQQLEwZTZXJ2ZXIxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNDA5MjQx -MzUxMTBaFw0xNTA5MjQxMzUxMTBaMFwxCzAJBgNVBAYTAkdPMQwwCgYDVQQIEwNN -R08xDDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdPMQ8wDQYDVQQLEwZTZXJ2ZXIx -EjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA -pQ5wO2L23xMI4PzpVt/Ftvez82IvA9amwr3fUd7RjlYwiFsFeMnG24a4CUoOeKF0 -fpQWc9rmCs0EeP5ofZ2otOsfxoVWXZAZWdgauuwlYB6EeFaAMH3fxVH3IiH+21RR -q2w9sH/s4fqh5stavUfyPdVmCcb8NW0jD8jlqniJL0kCAwEAAaOBwTCBvjAdBgNV -HQ4EFgQUjyVWGMHBrmPDGwCY5VusHsKIpzIwgY4GA1UdIwSBhjCBg4AUjyVWGMHB -rmPDGwCY5VusHsKIpzKhYKReMFwxCzAJBgNVBAYTAkdPMQwwCgYDVQQIEwNNR08x -DDAKBgNVBAcTA01HTzEMMAoGA1UEChMDTUdPMQ8wDQYDVQQLEwZTZXJ2ZXIxEjAQ -BgNVBAMTCWxvY2FsaG9zdIIJAJ5pBAq2HXAsMAwGA1UdEwQFMAMBAf8wDQYJKoZI -hvcNAQEFBQADgYEAa65TgDKp3SRUDNAILSuQOCEbenWh/DMPL4vTVgo/Dxd4emoO -7i8/4HMTa0XeYIVbAsxO+dqtxqt32IcV7DurmQozdUZ7q0ueJRXon6APnCN0IqPC -sF71w63xXfpmnvTAfQXi7x6TUAyAQ2nScHExAjzc000DF1dO/6+nIINqNQE= +MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV +BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP +MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw +ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM +A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl +cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm +6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK +IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5 +GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji +fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP +JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd +OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu +2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG +TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw +nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s +UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C +W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL +yQ== -----END CERTIFICATE----- ------BEGIN RSA PRIVATE KEY----- -MIICWwIBAAKBgQClDnA7YvbfEwjg/OlW38W297PzYi8D1qbCvd9R3tGOVjCIWwV4 -ycbbhrgJSg54oXR+lBZz2uYKzQR4/mh9nai06x/GhVZdkBlZ2Bq67CVgHoR4VoAw -fd/FUfciIf7bVFGrbD2wf+zh+qHmy1q9R/I91WYJxvw1bSMPyOWqeIkvSQIDAQAB -AoGABA9S22MXx2zkbwRJiQWAC3wURQxJM8L33xpkf9MHPIUKNJBolgwAhC3QIQpd -SMJP5z0lQDxGJEXesksvrsdN+vsgbleRfQsAIcY/rEhr9h8m6auM08f+69oIX32o -aTOWJJRofjbgzE5c/RijqhIaYGdq54a0EE9mAaODwZoa2/ECQQDRGrIRI5L3pdRA -yifDKNjvAFOk6TbdGe+J9zHFw4F7bA2In/b+rno9vrj+EanOevD8LRLzeFshzXrG -WQFzZ69/AkEAyhLSY7WNiQTeJWCwXawVnoSl5AMSRYFA/A2sEUokfORR5BS7gqvL -mmEKmvslnZp5qlMtM4AyrW2OaoGvE6sFNwJACB3xK5kl61cUli9Cu+CqCx0IIi6r -YonPMpvV4sdkD1ZycAtFmz1KoXr102b8IHfFQwS855aUcwt26Jwr4j70IQJAXv9+ -PTXq9hF9xiCwiTkPaNh/jLQM8PQU8uoSjIZIpRZJkWpVxNay/z7D15xeULuAmxxD -UcThDjtFCrkw75Qk/QJAFfcM+5r31R1RrBGM1QPKwDqkFTGsFKnMWuS/pXyLTTOv -I+In9ZJyA/R5zKeJZjM7xtZs0ANU9HpOpgespq6CvA== ------END RSA PRIVATE KEY----- === modified file 'src/gopkg.in/mgo.v2/testdb/supervisord.conf' --- src/gopkg.in/mgo.v2/testdb/supervisord.conf 2015-06-05 17:40:37 +0000 +++ src/gopkg.in/mgo.v2/testdb/supervisord.conf 2016-03-22 15:18:22 +0000 @@ -14,13 +14,13 @@ supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface [program:db1] -command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db1 --bind_ip=127.0.0.1 --port 40001 +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db1 --bind_ip=127.0.0.1,::1 --port 40001 --ipv6 [program:db2] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --dbpath %(here)s/db2 --bind_ip=127.0.0.1 --port 40002 --auth [program:db3] -command = mongod -nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --dbpath %(here)s/db3 --bind_ip=127.0.0.1 --port 40003 --auth --sslMode preferSSL --sslCAFile %(here)s/server.pem --sslPEMKeyFile %(here)s/server.pem +command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --dbpath %(here)s/db3 --bind_ip=127.0.0.1 --port 40003 --auth --sslMode preferSSL --sslCAFile %(here)s/server.pem --sslPEMKeyFile %(here)s/server.pem [program:rs1a] command = mongod --nohttpinterface --noprealloc --nojournal --smallfiles --nssize=1 --oplogSize=1 --shardsvr --replSet rs1 --dbpath %(here)s/rs1a --bind_ip=127.0.0.1 --port 40011 === modified file 'src/gopkg.in/mgo.v2/testdb/wait.js' --- src/gopkg.in/mgo.v2/testdb/wait.js 2014-08-20 15:00:12 +0000 +++ src/gopkg.in/mgo.v2/testdb/wait.js 2016-03-22 15:18:22 +0000 @@ -32,14 +32,21 @@ function countHealthy(rs) { var status = rs.runCommand({replSetGetStatus: 1}) var count = 0 + var primary = 0 if (typeof status.members != "undefined") { for (var i = 0; i != status.members.length; i++) { var m = status.members[i] if (m.health == 1 && (m.state == 1 || m.state == 2)) { count += 1 + if (m.state == 1) { + primary = 1 + } } } } + if (primary == 0) { + count = 0 + } return count } @@ -56,3 +63,5 @@ print("Replica sets didn't sync up properly.") quit(12) + +// vim:ts=4:sw=4:et === added directory 'src/gopkg.in/yaml.v2' === added file 'src/gopkg.in/yaml.v2/LICENSE' --- src/gopkg.in/yaml.v2/LICENSE 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/LICENSE 2016-03-22 15:18:22 +0000 @@ -0,0 +1,188 @@ + +Copyright (c) 2011-2014 - Canonical Inc. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. === added file 'src/gopkg.in/yaml.v2/LICENSE.libyaml' --- src/gopkg.in/yaml.v2/LICENSE.libyaml 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/LICENSE.libyaml 2016-03-22 15:18:22 +0000 @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. === added file 'src/gopkg.in/yaml.v2/README.md' --- src/gopkg.in/yaml.v2/README.md 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/README.md 2016-03-22 15:18:22 +0000 @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + === added file 'src/gopkg.in/yaml.v2/apic.go' --- src/gopkg.in/yaml.v2/apic.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/apic.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,742 @@ +package yaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// === added file 'src/gopkg.in/yaml.v2/decode.go' --- src/gopkg.in/yaml.v2/decode.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/decode.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,683 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + } + panic("unreachable") +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool + mapType reflect.Type + terrors []string +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() +) + +func newDecoder() *decoder { + d := &decoder{mapType: defaultMapType} + d.aliases = make(map[string]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + failf("unknown anchor '%s' referenced", n.value) + } + if d.aliases[n.value] { + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if s, ok := resolved.(string); ok && out.CanAddr() { + if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { + err := u.UnmarshalText([]byte(s)) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + good = true + } else if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + good = true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case uint64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + if !good { + d.terror(n, tag, out) + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + out.Set(out.Slice(0, j)) + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + inlineMap.SetMapIndex(name, value) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} === added file 'src/gopkg.in/yaml.v2/decode_test.go' --- src/gopkg.in/yaml.v2/decode_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/decode_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,966 @@ +package yaml_test + +import ( + "errors" + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "math" + "net" + "reflect" + "strings" + "time" +) + +var unmarshalIntTest = 123 + +var unmarshalTests = []struct { + data string + value interface{} +}{ + { + "", + &struct{}{}, + }, { + "{}", &struct{}{}, + }, { + "v: hi", + map[string]string{"v": "hi"}, + }, { + "v: hi", map[string]interface{}{"v": "hi"}, + }, { + "v: true", + map[string]string{"v": "true"}, + }, { + "v: true", + map[string]interface{}{"v": true}, + }, { + "v: 10", + map[string]interface{}{"v": 10}, + }, { + "v: 0b10", + map[string]interface{}{"v": 2}, + }, { + "v: 0xA", + map[string]interface{}{"v": 10}, + }, { + "v: 4294967296", + map[string]int64{"v": 4294967296}, + }, { + "v: 0.1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .Inf", + map[string]interface{}{"v": math.Inf(+1)}, + }, { + "v: -.Inf", + map[string]interface{}{"v": math.Inf(-1)}, + }, { + "v: -10", + map[string]interface{}{"v": -10}, + }, { + "v: -.1", + map[string]interface{}{"v": -0.1}, + }, + + // Simple values. + { + "123", + &unmarshalIntTest, + }, + + // Floats from spec + { + "canonical: 6.8523e+5", + map[string]interface{}{"canonical": 6.8523e+5}, + }, { + "expo: 685.230_15e+03", + map[string]interface{}{"expo": 685.23015e+03}, + }, { + "fixed: 685_230.15", + map[string]interface{}{"fixed": 685230.15}, + }, { + "neginf: -.inf", + map[string]interface{}{"neginf": math.Inf(-1)}, + }, { + "fixed: 685_230.15", + map[string]float64{"fixed": 685230.15}, + }, + //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported + //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails. + + // Bools from spec + { + "canonical: y", + map[string]interface{}{"canonical": true}, + }, { + "answer: NO", + map[string]interface{}{"answer": false}, + }, { + "logical: True", + map[string]interface{}{"logical": true}, + }, { + "option: on", + map[string]interface{}{"option": true}, + }, { + "option: on", + map[string]bool{"option": true}, + }, + // Ints from spec + { + "canonical: 685230", + map[string]interface{}{"canonical": 685230}, + }, { + "decimal: +685_230", + map[string]interface{}{"decimal": 685230}, + }, { + "octal: 02472256", + map[string]interface{}{"octal": 685230}, + }, { + "hexa: 0x_0A_74_AE", + map[string]interface{}{"hexa": 685230}, + }, { + "bin: 0b1010_0111_0100_1010_1110", + map[string]interface{}{"bin": 685230}, + }, { + "bin: -0b101010", + map[string]interface{}{"bin": -42}, + }, { + "decimal: +685_230", + map[string]int{"decimal": 685230}, + }, + + //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported + + // Nulls from spec + { + "empty:", + map[string]interface{}{"empty": nil}, + }, { + "canonical: ~", + map[string]interface{}{"canonical": nil}, + }, { + "english: null", + map[string]interface{}{"english": nil}, + }, { + "~: null key", + map[interface{}]string{nil: "null key"}, + }, { + "empty:", + map[string]*bool{"empty": nil}, + }, + + // Flow sequence + { + "seq: [A,B]", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq: [A,B,C,]", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]int{"seq": []int{1}}, + }, { + "seq: [A,1,C]", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + // Block sequence + { + "seq:\n - A\n - B", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq:\n - A\n - B\n - C", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]int{"seq": []int{1}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + + // Literal block scalar + { + "scalar: | # Comment\n\n literal\n\n \ttext\n\n", + map[string]string{"scalar": "\nliteral\n\n\ttext\n"}, + }, + + // Folded block scalar + { + "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n", + map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"}, + }, + + // Map inside interface with no type hints. + { + "a: {b: c}", + map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + }, + + // Structs and type conversions. + { + "hello: world", + &struct{ Hello string }{"world"}, + }, { + "a: {b: c}", + &struct{ A struct{ B string } }{struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A *struct{ B string } }{&struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A map[string]string }{map[string]string{"b": "c"}}, + }, { + "a: {b: c}", + &struct{ A *map[string]string }{&map[string]string{"b": "c"}}, + }, { + "a:", + &struct{ A map[string]string }{}, + }, { + "a: 1", + &struct{ A int }{1}, + }, { + "a: 1", + &struct{ A float64 }{1}, + }, { + "a: 1.0", + &struct{ A int }{1}, + }, { + "a: 1.0", + &struct{ A uint }{1}, + }, { + "a: [1, 2]", + &struct{ A []int }{[]int{1, 2}}, + }, { + "a: 1", + &struct{ B int }{0}, + }, { + "a: 1", + &struct { + B int "a" + }{1}, + }, { + "a: y", + &struct{ A bool }{true}, + }, + + // Some cross type conversions + { + "v: 42", + map[string]uint{"v": 42}, + }, { + "v: -42", + map[string]uint{}, + }, { + "v: 4294967296", + map[string]uint64{"v": 4294967296}, + }, { + "v: -4294967296", + map[string]uint64{}, + }, + + // int + { + "int_max: 2147483647", + map[string]int{"int_max": math.MaxInt32}, + }, + { + "int_min: -2147483648", + map[string]int{"int_min": math.MinInt32}, + }, + { + "int_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int{}, + }, + + // int64 + { + "int64_max: 9223372036854775807", + map[string]int64{"int64_max": math.MaxInt64}, + }, + { + "int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_max_base2": math.MaxInt64}, + }, + { + "int64_min: -9223372036854775808", + map[string]int64{"int64_min": math.MinInt64}, + }, + { + "int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_neg_base2": -math.MaxInt64}, + }, + { + "int64_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int64{}, + }, + + // uint + { + "uint_min: 0", + map[string]uint{"uint_min": 0}, + }, + { + "uint_max: 4294967295", + map[string]uint{"uint_max": math.MaxUint32}, + }, + { + "uint_underflow: -1", + map[string]uint{}, + }, + + // uint64 + { + "uint64_min: 0", + map[string]uint{"uint64_min": 0}, + }, + { + "uint64_max: 18446744073709551615", + map[string]uint64{"uint64_max": math.MaxUint64}, + }, + { + "uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111", + map[string]uint64{"uint64_max_base2": math.MaxUint64}, + }, + { + "uint64_maxint64: 9223372036854775807", + map[string]uint64{"uint64_maxint64": math.MaxInt64}, + }, + { + "uint64_underflow: -1", + map[string]uint64{}, + }, + + // float32 + { + "float32_max: 3.40282346638528859811704183484516925440e+38", + map[string]float32{"float32_max": math.MaxFloat32}, + }, + { + "float32_nonzero: 1.401298464324817070923729583289916131280e-45", + map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32}, + }, + { + "float32_maxuint64: 18446744073709551615", + map[string]float32{"float32_maxuint64": float32(math.MaxUint64)}, + }, + { + "float32_maxuint64+1: 18446744073709551616", + map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)}, + }, + + // float64 + { + "float64_max: 1.797693134862315708145274237317043567981e+308", + map[string]float64{"float64_max": math.MaxFloat64}, + }, + { + "float64_nonzero: 4.940656458412465441765687928682213723651e-324", + map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64}, + }, + { + "float64_maxuint64: 18446744073709551615", + map[string]float64{"float64_maxuint64": float64(math.MaxUint64)}, + }, + { + "float64_maxuint64+1: 18446744073709551616", + map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)}, + }, + + // Overflow cases. + { + "v: 4294967297", + map[string]int32{}, + }, { + "v: 128", + map[string]int8{}, + }, + + // Quoted values. + { + "'1': '\"2\"'", + map[interface{}]interface{}{"1": "\"2\""}, + }, { + "v:\n- A\n- 'B\n\n C'\n", + map[string][]string{"v": []string{"A", "B\nC"}}, + }, + + // Explicit tags. + { + "v: !!float '1.1'", + map[string]interface{}{"v": 1.1}, + }, { + "v: !!null ''", + map[string]interface{}{"v": nil}, + }, { + "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", + map[string]interface{}{"v": 1}, + }, + + // Anchors and aliases. + { + "a: &x 1\nb: &y 2\nc: *x\nd: *y\n", + &struct{ A, B, C, D int }{1, 2, 1, 2}, + }, { + "a: &a {c: 1}\nb: *a", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, { + "a: &a [1, 2]\nb: *a", + &struct{ B []int }{[]int{1, 2}}, + }, { + "b: *a\na: &a {c: 1}", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, + + // Bug #1133337 + { + "foo: ''", + map[string]*string{"foo": new(string)}, + }, { + "foo: null", + map[string]string{"foo": ""}, + }, { + "foo: null", + map[string]interface{}{"foo": nil}, + }, + + // Ignored field + { + "a: 1\nb: 2\n", + &struct { + A int + B int "-" + }{1, 0}, + }, + + // Bug #1191981 + { + "" + + "%YAML 1.1\n" + + "--- !!str\n" + + `"Generic line break (no glyph)\n\` + "\n" + + ` Generic line break (glyphed)\n\` + "\n" + + ` Line separator\u2028\` + "\n" + + ` Paragraph separator\u2029"` + "\n", + "" + + "Generic line break (no glyph)\n" + + "Generic line break (glyphed)\n" + + "Line separator\u2028Paragraph separator\u2029", + }, + + // Struct inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + }, + + // Map inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + }, + + // bug 1243827 + { + "a: -b_c", + map[string]interface{}{"a": "-b_c"}, + }, + { + "a: +b_c", + map[string]interface{}{"a": "+b_c"}, + }, + { + "a: 50cent_of_dollar", + map[string]interface{}{"a": "50cent_of_dollar"}, + }, + + // Duration + { + "a: 3s", + map[string]time.Duration{"a": 3 * time.Second}, + }, + + // Issue #24. + { + "a: ", + map[string]string{"a": ""}, + }, + + // Base 60 floats are obsolete and unsupported. + { + "a: 1:1\n", + map[string]string{"a": "1:1"}, + }, + + // Binary data. + { + "a: !!binary gIGC\n", + map[string]string{"a": "\x80\x81\x82"}, + }, { + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + map[string]string{"a": strings.Repeat("\x90", 54)}, + }, { + "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n", + map[string]string{"a": strings.Repeat("\x00", 52)}, + }, + + // Ordered maps. + { + "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}", + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + }, + + // Issue #39. + { + "a:\n b:\n c: d\n", + map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}}, + }, + + // Custom map type. + { + "a: {b: c}", + M{"a": M{"b": "c"}}, + }, + + // Support encoding.TextUnmarshaler. + { + "a: 1.2.3.4\n", + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + }, + { + "a: 2015-02-24T18:19:39Z\n", + map[string]time.Time{"a": time.Unix(1424801979, 0)}, + }, + + // Encode empty lists as zero-length slices. + { + "a: []", + &struct{ A []int }{[]int{}}, + }, +} + +type M map[interface{}]interface{} + +type inlineB struct { + B int + inlineC `yaml:",inline"` +} + +type inlineC struct { + C int +} + +func (s *S) TestUnmarshal(c *C) { + for _, item := range unmarshalTests { + t := reflect.ValueOf(item.value).Type() + var value interface{} + switch t.Kind() { + case reflect.Map: + value = reflect.MakeMap(t).Interface() + case reflect.String: + value = reflect.New(t).Interface() + case reflect.Ptr: + value = reflect.New(t.Elem()).Interface() + default: + c.Fatalf("missing case for %s", t) + } + err := yaml.Unmarshal([]byte(item.data), value) + if _, ok := err.(*yaml.TypeError); !ok { + c.Assert(err, IsNil) + } + if t.Kind() == reflect.String { + c.Assert(*value.(*string), Equals, item.value) + } else { + c.Assert(value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalNaN(c *C) { + value := map[string]interface{}{} + err := yaml.Unmarshal([]byte("notanum: .NaN"), &value) + c.Assert(err, IsNil) + c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true) +} + +var unmarshalErrorTests = []struct { + data, error string +}{ + {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"}, + {"v: [A,", "yaml: line 1: did not find expected node content"}, + {"v:\n- [A,", "yaml: line 2: did not find expected node content"}, + {"a: *b\n", "yaml: unknown anchor 'b' referenced"}, + {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"}, + {"value: -", "yaml: block sequence entries are not allowed in this context"}, + {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"}, + {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`}, + {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`}, +} + +func (s *S) TestUnmarshalErrors(c *C) { + for _, item := range unmarshalErrorTests { + var value interface{} + err := yaml.Unmarshal([]byte(item.data), &value) + c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) + } +} + +var unmarshalerTests = []struct { + data, tag string + value interface{} +}{ + {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}}, + {"_: [1,A]", "!!seq", []interface{}{1, "A"}}, + {"_: 10", "!!int", 10}, + {"_: null", "!!null", nil}, + {`_: BAR!`, "!!str", "BAR!"}, + {`_: "BAR!"`, "!!str", "BAR!"}, + {"_: !!foo 'BAR!'", "!!foo", "BAR!"}, +} + +var unmarshalerResult = map[int]error{} + +type unmarshalerType struct { + value interface{} +} + +func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error { + if err := unmarshal(&o.value); err != nil { + return err + } + if i, ok := o.value.(int); ok { + if result, ok := unmarshalerResult[i]; ok { + return result + } + } + return nil +} + +type unmarshalerPointer struct { + Field *unmarshalerType "_" +} + +type unmarshalerValue struct { + Field unmarshalerType "_" +} + +func (s *S) TestUnmarshalerPointerField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerPointer{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + if item.value == nil { + c.Assert(obj.Field, IsNil) + } else { + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalerValueField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerValue{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } +} + +func (s *S) TestUnmarshalerWholeDocument(c *C) { + obj := &unmarshalerType{} + err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj) + c.Assert(err, IsNil) + value, ok := obj.value.(map[interface{}]interface{}) + c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value)) + c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value) +} + +func (s *S) TestUnmarshalerTypeError(c *C) { + unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}} + unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}} + defer func() { + delete(unmarshalerResult, 2) + delete(unmarshalerResult, 4) + }() + + type T struct { + Before int + After int + M map[string]*unmarshalerType + } + var v T + data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " foo\n"+ + " bar\n"+ + " line 1: cannot unmarshal !!str `B` into int") + c.Assert(v.M["abc"], NotNil) + c.Assert(v.M["def"], IsNil) + c.Assert(v.M["ghi"], NotNil) + c.Assert(v.M["jkl"], IsNil) + + c.Assert(v.M["abc"].value, Equals, 1) + c.Assert(v.M["ghi"].value, Equals, 3) +} + +type proxyTypeError struct{} + +func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + var a int32 + var b int64 + if err := unmarshal(&s); err != nil { + panic(err) + } + if s == "a" { + if err := unmarshal(&b); err == nil { + panic("should have failed") + } + return unmarshal(&a) + } + if err := unmarshal(&a); err == nil { + panic("should have failed") + } + return unmarshal(&b) +} + +func (s *S) TestUnmarshalerTypeErrorProxying(c *C) { + type T struct { + Before int + After int + M map[string]*proxyTypeError + } + var v T + data := `{before: A, m: {abc: a, def: b}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " line 1: cannot unmarshal !!str `a` into int32\n"+ + " line 1: cannot unmarshal !!str `b` into int64\n"+ + " line 1: cannot unmarshal !!str `B` into int") +} + +type failingUnmarshaler struct{} + +var failingErr = errors.New("failingErr") + +func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + return failingErr +} + +func (s *S) TestUnmarshalerError(c *C) { + err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{}) + c.Assert(err, Equals, failingErr) +} + +type sliceUnmarshaler []int + +func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + var slice []int + err := unmarshal(&slice) + if err == nil { + *su = slice + return nil + } + + var intVal int + err = unmarshal(&intVal) + if err == nil { + *su = []int{intVal} + return nil + } + + return err +} + +func (s *S) TestUnmarshalerRetry(c *C) { + var su sliceUnmarshaler + err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3})) + + err = yaml.Unmarshal([]byte("1"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1})) +} + +// From http://yaml.org/type/merge.html +var mergeTests = ` +anchors: + list: + - &CENTER { "x": 1, "y": 2 } + - &LEFT { "x": 0, "y": 2 } + - &BIG { "r": 10 } + - &SMALL { "r": 1 } + +# All the following maps are equal: + +plain: + # Explicit keys + "x": 1 + "y": 2 + "r": 10 + label: center/big + +mergeOne: + # Merge one map + << : *CENTER + "r": 10 + label: center/big + +mergeMultiple: + # Merge multiple maps + << : [ *CENTER, *BIG ] + label: center/big + +override: + # Override + << : [ *BIG, *LEFT, *SMALL ] + "x": 1 + label: center/big + +shortTag: + # Explicit short merge tag + !!merge "<<" : [ *CENTER, *BIG ] + label: center/big + +longTag: + # Explicit merge long tag + ! "<<" : [ *CENTER, *BIG ] + label: center/big + +inlineMap: + # Inlined map + << : {"x": 1, "y": 2, "r": 10} + label: center/big + +inlineSequenceMap: + # Inlined map in sequence + << : [ *CENTER, {"r": 10} ] + label: center/big +` + +func (s *S) TestMerge(c *C) { + var want = map[interface{}]interface{}{ + "x": 1, + "y": 2, + "r": 10, + "label": "center/big", + } + + var m map[interface{}]interface{} + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, DeepEquals, want, Commentf("test %q failed", name)) + } +} + +func (s *S) TestMergeStruct(c *C) { + type Data struct { + X, Y, R int + Label string + } + want := Data{1, 2, 10, "center/big"} + + var m map[string]Data + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, Equals, want, Commentf("test %q failed", name)) + } +} + +var unmarshalNullTests = []func() interface{}{ + func() interface{} { var v interface{}; v = "v"; return &v }, + func() interface{} { var s = "s"; return &s }, + func() interface{} { var s = "s"; sptr := &s; return &sptr }, + func() interface{} { var i = 1; return &i }, + func() interface{} { var i = 1; iptr := &i; return &iptr }, + func() interface{} { m := map[string]int{"s": 1}; return &m }, + func() interface{} { m := map[string]int{"s": 1}; return m }, +} + +func (s *S) TestUnmarshalNull(c *C) { + for _, test := range unmarshalNullTests { + item := test() + zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface() + err := yaml.Unmarshal([]byte("null"), item) + c.Assert(err, IsNil) + if reflect.TypeOf(item).Kind() == reflect.Map { + c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface()) + } else { + c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero) + } + } +} + +func (s *S) TestUnmarshalSliceOnPreset(c *C) { + // Issue #48. + v := struct{ A []int }{[]int{1}} + yaml.Unmarshal([]byte("a: [2]"), &v) + c.Assert(v.A, DeepEquals, []int{2}) +} + +//var data []byte +//func init() { +// var err error +// data, err = ioutil.ReadFile("/tmp/file.yaml") +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkUnmarshal(c *C) { +// var err error +// for i := 0; i < c.N; i++ { +// var v map[string]interface{} +// err = yaml.Unmarshal(data, &v) +// } +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkMarshal(c *C) { +// var v map[string]interface{} +// yaml.Unmarshal(data, &v) +// c.ResetTimer() +// for i := 0; i < c.N; i++ { +// yaml.Marshal(&v) +// } +//} === added file 'src/gopkg.in/yaml.v2/emitterc.go' --- src/gopkg.in/yaml.v2/emitterc.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/emitterc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } + return false +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceeded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceeded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceeded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceeded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} === added file 'src/gopkg.in/yaml.v2/encode.go' --- src/gopkg.in/yaml.v2/encode.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/encode.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,306 @@ +package yaml + +import ( + "encoding" + "fmt" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() { + e.nilv() + return + } + iface := in.Interface() + if m, ok := iface.(Marshaler); ok { + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + } else if m, ok := iface.(encoding.TextMarshaler); ok { + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + rtag, rs := resolve("", s) + if rtag == yaml_BINARY_TAG { + if tag == "" || tag == yaml_STR_TAG { + tag = rtag + s = rs.(string) + } else if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } else { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + } + if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else if strings.Contains(s, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} === added file 'src/gopkg.in/yaml.v2/encode_test.go' --- src/gopkg.in/yaml.v2/encode_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/encode_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,501 @@ +package yaml_test + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" + + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "net" + "os" +) + +var marshalIntTest = 123 + +var marshalTests = []struct { + value interface{} + data string +}{ + { + nil, + "null\n", + }, { + &struct{}{}, + "{}\n", + }, { + map[string]string{"v": "hi"}, + "v: hi\n", + }, { + map[string]interface{}{"v": "hi"}, + "v: hi\n", + }, { + map[string]string{"v": "true"}, + "v: \"true\"\n", + }, { + map[string]string{"v": "false"}, + "v: \"false\"\n", + }, { + map[string]interface{}{"v": true}, + "v: true\n", + }, { + map[string]interface{}{"v": false}, + "v: false\n", + }, { + map[string]interface{}{"v": 10}, + "v: 10\n", + }, { + map[string]interface{}{"v": -10}, + "v: -10\n", + }, { + map[string]uint{"v": 42}, + "v: 42\n", + }, { + map[string]interface{}{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]int64{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]uint64{"v": 4294967296}, + "v: 4294967296\n", + }, { + map[string]interface{}{"v": "10"}, + "v: \"10\"\n", + }, { + map[string]interface{}{"v": 0.1}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": float64(0.1)}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": -0.1}, + "v: -0.1\n", + }, { + map[string]interface{}{"v": math.Inf(+1)}, + "v: .inf\n", + }, { + map[string]interface{}{"v": math.Inf(-1)}, + "v: -.inf\n", + }, { + map[string]interface{}{"v": math.NaN()}, + "v: .nan\n", + }, { + map[string]interface{}{"v": nil}, + "v: null\n", + }, { + map[string]interface{}{"v": ""}, + "v: \"\"\n", + }, { + map[string][]string{"v": []string{"A", "B"}}, + "v:\n- A\n- B\n", + }, { + map[string][]string{"v": []string{"A", "B\nC"}}, + "v:\n- A\n- |-\n B\n C\n", + }, { + map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}}, + "v:\n- A\n- 1\n- B:\n - 2\n - 3\n", + }, { + map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + "a:\n b: c\n", + }, { + map[string]interface{}{"a": "-"}, + "a: '-'\n", + }, + + // Simple values. + { + &marshalIntTest, + "123\n", + }, + + // Structures + { + &struct{ Hello string }{"world"}, + "hello: world\n", + }, { + &struct { + A struct { + B string + } + }{struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{&struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{}, + "a: null\n", + }, { + &struct{ A int }{1}, + "a: 1\n", + }, { + &struct{ A []int }{[]int{1, 2}}, + "a:\n- 1\n- 2\n", + }, { + &struct { + B int "a" + }{1}, + "a: 1\n", + }, { + &struct{ A bool }{true}, + "a: true\n", + }, + + // Conditional flag + { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{1, 0}, + "a: 1\n", + }, { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{0, 0}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{nil}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{}}, + "a: {x: 0}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{0, 1}}, + "{}\n", + }, { + &struct { + A float64 "a,omitempty" + B float64 "b,omitempty" + }{1, 0}, + "a: 1\n", + }, + + // Flow flag + { + &struct { + A []int "a,flow" + }{[]int{1, 2}}, + "a: [1, 2]\n", + }, { + &struct { + A map[string]string "a,flow" + }{map[string]string{"b": "c", "d": "e"}}, + "a: {b: c, d: e}\n", + }, { + &struct { + A struct { + B, D string + } "a,flow" + }{struct{ B, D string }{"c", "e"}}, + "a: {b: c, d: e}\n", + }, + + // Unexported field + { + &struct { + u int + A int + }{0, 1}, + "a: 1\n", + }, + + // Ignored field + { + &struct { + A int + B int "-" + }{1, 2}, + "a: 1\n", + }, + + // Struct inlining + { + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Map inlining + { + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Duration + { + map[string]time.Duration{"a": 3 * time.Second}, + "a: 3s\n", + }, + + // Issue #24: bug in map merging logic. + { + map[string]string{"a": ""}, + "a: \n", + }, + + // Issue #34: marshal unsupported base 60 floats quoted for compatibility + // with old YAML 1.1 parsers. + { + map[string]string{"a": "1:1"}, + "a: \"1:1\"\n", + }, + + // Binary data. + { + map[string]string{"a": "\x00"}, + "a: \"\\0\"\n", + }, { + map[string]string{"a": "\x80\x81\x82"}, + "a: !!binary gIGC\n", + }, { + map[string]string{"a": strings.Repeat("\x90", 54)}, + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + }, + + // Ordered maps. + { + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n", + }, + + // Encode unicode as utf-8 rather than in escaped form. + { + map[string]string{"a": "你好"}, + "a: 你好\n", + }, + + // Support encoding.TextMarshaler. + { + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + "a: 1.2.3.4\n", + }, + { + map[string]time.Time{"a": time.Unix(1424801979, 0)}, + "a: 2015-02-24T18:19:39Z\n", + }, + + // Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible). + { + map[string]string{"a": "b: c"}, + "a: 'b: c'\n", + }, + + // Containing hash mark ('#') in string should be quoted + { + map[string]string{"a": "Hello #comment"}, + "a: 'Hello #comment'\n", + }, + { + map[string]string{"a": "你好 #comment"}, + "a: '你好 #comment'\n", + }, +} + +func (s *S) TestMarshal(c *C) { + defer os.Setenv("TZ", os.Getenv("TZ")) + os.Setenv("TZ", "UTC") + for _, item := range marshalTests { + data, err := yaml.Marshal(item.value) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, item.data) + } +} + +var marshalErrorTests = []struct { + value interface{} + error string + panic string +}{{ + value: &struct { + B int + inlineB ",inline" + }{1, inlineB{2, inlineC{3}}}, + panic: `Duplicated key 'b' in struct struct \{ B int; .*`, +}, { + value: &struct { + A int + B map[string]int ",inline" + }{1, map[string]int{"a": 2}}, + panic: `Can't have key "a" in inlined map; conflicts with struct field`, +}} + +func (s *S) TestMarshalErrors(c *C) { + for _, item := range marshalErrorTests { + if item.panic != "" { + c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic) + } else { + _, err := yaml.Marshal(item.value) + c.Assert(err, ErrorMatches, item.error) + } + } +} + +func (s *S) TestMarshalTypeCache(c *C) { + var data []byte + var err error + func() { + type T struct{ A int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + func() { + type T struct{ B int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + c.Assert(string(data), Equals, "b: 0\n") +} + +var marshalerTests = []struct { + data string + value interface{} +}{ + {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}}, + {"_:\n- 1\n- A\n", []interface{}{1, "A"}}, + {"_: 10\n", 10}, + {"_: null\n", nil}, + {"_: BAR!\n", "BAR!"}, +} + +type marshalerType struct { + value interface{} +} + +func (o marshalerType) MarshalText() ([]byte, error) { + panic("MarshalText called on type with MarshalYAML") +} + +func (o marshalerType) MarshalYAML() (interface{}, error) { + return o.value, nil +} + +type marshalerValue struct { + Field marshalerType "_" +} + +func (s *S) TestMarshaler(c *C) { + for _, item := range marshalerTests { + obj := &marshalerValue{} + obj.Field.value = item.value + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, string(item.data)) + } +} + +func (s *S) TestMarshalerWholeDocument(c *C) { + obj := &marshalerType{} + obj.value = map[string]string{"hello": "world!"} + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "hello: world!\n") +} + +type failingMarshaler struct{} + +func (ft *failingMarshaler) MarshalYAML() (interface{}, error) { + return nil, failingErr +} + +func (s *S) TestMarshalerError(c *C) { + _, err := yaml.Marshal(&failingMarshaler{}) + c.Assert(err, Equals, failingErr) +} + +func (s *S) TestSortedOutput(c *C) { + order := []interface{}{ + false, + true, + 1, + uint(1), + 1.0, + 1.1, + 1.2, + 2, + uint(2), + 2.0, + 2.1, + "", + ".1", + ".2", + ".a", + "1", + "2", + "a!10", + "a/2", + "a/10", + "a~10", + "ab/1", + "b/1", + "b/01", + "b/2", + "b/02", + "b/3", + "b/03", + "b1", + "b01", + "b3", + "c2.10", + "c10.2", + "d1", + "d12", + "d12a", + } + m := make(map[interface{}]int) + for _, k := range order { + m[k] = 1 + } + data, err := yaml.Marshal(m) + c.Assert(err, IsNil) + out := "\n" + string(data) + last := 0 + for i, k := range order { + repr := fmt.Sprint(k) + if s, ok := k.(string); ok { + if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil { + repr = `"` + repr + `"` + } + } + index := strings.Index(out, "\n"+repr+":") + if index == -1 { + c.Fatalf("%#v is not in the output: %#v", k, out) + } + if index < last { + c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out) + } + last = index + } +} === added file 'src/gopkg.in/yaml.v2/parserc.go' --- src/gopkg.in/yaml.v2/parserc.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/parserc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,1096 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } + return false +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} === added file 'src/gopkg.in/yaml.v2/readerc.go' --- src/gopkg.in/yaml.v2/readerc.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/readerc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,391 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + } + buffer_len += width + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} === added file 'src/gopkg.in/yaml.v2/resolve.go' --- src/gopkg.in/yaml.v2/resolve.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/resolve.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,203 @@ +package yaml + +import ( + "encoding/base64" + "math" + "strconv" + "strings" + "unicode/utf8" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + return true + } + return false +} + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt(plain[3:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, -int(intv) + } else { + return yaml_INT_TAG, -intv + } + } + } + // XXX Handle timestamps here. + + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + if tag == yaml_BINARY_TAG { + return yaml_BINARY_TAG, in + } + if utf8.ValidString(in) { + return yaml_STR_TAG, in + } + return yaml_BINARY_TAG, encodeBase64(in) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} === added file 'src/gopkg.in/yaml.v2/scannerc.go' --- src/gopkg.in/yaml.v2/scannerc.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/scannerc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,2710 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found uknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && !(s[0] == '!' && s[1] == 0) { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the tag is non-empty. + if len(s) == 0 { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} === added file 'src/gopkg.in/yaml.v2/sorter.go' --- src/gopkg.in/yaml.v2/sorter.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/sorter.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,104 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} === added file 'src/gopkg.in/yaml.v2/suite_test.go' --- src/gopkg.in/yaml.v2/suite_test.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/suite_test.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,12 @@ +package yaml_test + +import ( + . "gopkg.in/check.v1" + "testing" +) + +func Test(t *testing.T) { TestingT(t) } + +type S struct{} + +var _ = Suite(&S{}) === added file 'src/gopkg.in/yaml.v2/writerc.go' --- src/gopkg.in/yaml.v2/writerc.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/writerc.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,89 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} === added file 'src/gopkg.in/yaml.v2/yaml.go' --- src/gopkg.in/yaml.v2/yaml.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/yaml.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,346 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only unmarshalled if they are exported (have an upper case +// first letter), and are unmarshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int "a,omitempty" +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} === added file 'src/gopkg.in/yaml.v2/yamlh.go' --- src/gopkg.in/yaml.v2/yamlh.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/yamlh.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,716 @@ +package yaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occured. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} === added file 'src/gopkg.in/yaml.v2/yamlprivateh.go' --- src/gopkg.in/yaml.v2/yamlprivateh.go 1970-01-01 00:00:00 +0000 +++ src/gopkg.in/yaml.v2/yamlprivateh.go 2016-03-22 15:18:22 +0000 @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} === removed directory 'src/launchpad.net/gomaasapi' === removed file 'src/launchpad.net/gomaasapi/COPYING' --- src/launchpad.net/gomaasapi/COPYING 2013-04-24 22:34:47 +0000 +++ src/launchpad.net/gomaasapi/COPYING 1970-01-01 00:00:00 +0000 @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. === removed file 'src/launchpad.net/gomaasapi/COPYING.LESSER' --- src/launchpad.net/gomaasapi/COPYING.LESSER 2013-04-24 22:34:47 +0000 +++ src/launchpad.net/gomaasapi/COPYING.LESSER 1970-01-01 00:00:00 +0000 @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. === removed file 'src/launchpad.net/gomaasapi/LICENSE' --- src/launchpad.net/gomaasapi/LICENSE 2014-08-20 15:00:12 +0000 +++ src/launchpad.net/gomaasapi/LICENSE 1970-01-01 00:00:00 +0000 @@ -1,15 +0,0 @@ -Gomaasapi - Go bindings for talking to MAAS - -Copyright 2012-2014, Canonical Ltd. - -This program is free software: you can redistribute it and/or modify it under -the terms of the GNU Lesser General Public License as published by the Free -Software Foundation, either version 3 of the License, or (at your option) any -later version. - -This program is distributed in the hope that it will be useful, but WITHOUT ANY -WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A -PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. - -See both COPYING and COPYING.LESSER for the full terms of the GNU Lesser -General Public License. \ No newline at end of file === removed file 'src/launchpad.net/gomaasapi/Makefile' --- src/launchpad.net/gomaasapi/Makefile 2013-04-24 22:34:47 +0000 +++ src/launchpad.net/gomaasapi/Makefile 1970-01-01 00:00:00 +0000 @@ -1,26 +0,0 @@ -# Build, and run tests. -check: examples - go test ./... - -example_source := $(wildcard example/*.go) -example_binaries := $(patsubst %.go,%,$(example_source)) - -# Clean up binaries. -clean: - $(RM) $(example_binaries) - -# Reformat the source files to match our layout standards. -format: - gofmt -w . - -# Invoke gofmt's "simplify" option to streamline the source code. -simplify: - gofmt -w -s . - -# Build the examples (we have no tests for them). -examples: $(example_binaries) - -%: %.go - go build -o $@ $< - -.PHONY: check clean format examples simplify === removed file 'src/launchpad.net/gomaasapi/README' --- src/launchpad.net/gomaasapi/README 2013-04-24 22:34:47 +0000 +++ src/launchpad.net/gomaasapi/README 1970-01-01 00:00:00 +0000 @@ -1,12 +0,0 @@ -.. -*- mode: rst -*- - -****************************** -MAAS API client library for Go -****************************** - -This library serves as a minimal client for communicating with the MAAS web -API in Go programs. - -For more information see the `project homepage`_. - -.. _project homepage: https://launchpad.net/gomaasapi === removed file 'src/launchpad.net/gomaasapi/client.go' --- src/launchpad.net/gomaasapi/client.go 2015-03-26 15:54:39 +0000 +++ src/launchpad.net/gomaasapi/client.go 1970-01-01 00:00:00 +0000 @@ -1,303 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "mime/multipart" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -const ( - // Number of retries performed when the server returns a 503 - // response with a 'Retry-after' header. A request will be issued - // at most NumberOfRetries + 1 times. - NumberOfRetries = 4 - - RetryAfterHeaderName = "Retry-After" -) - -// Client represents a way to communicating with a MAAS API instance. -// It is stateless, so it can have concurrent requests in progress. -type Client struct { - APIURL *url.URL - Signer OAuthSigner -} - -// ServerError is an http error (or at least, a non-2xx result) received from -// the server. It contains the numerical HTTP status code as well as an error -// string and the response's headers. -type ServerError struct { - error - StatusCode int - Header http.Header -} - -// readAndClose reads and closes the given ReadCloser. -// -// Trying to read from a nil simply returns nil, no error. -func readAndClose(stream io.ReadCloser) ([]byte, error) { - if stream == nil { - return nil, nil - } - defer stream.Close() - return ioutil.ReadAll(stream) -} - -// dispatchRequest sends a request to the server, and interprets the response. -// Client-side errors will return an empty response and a non-nil error. For -// server-side errors however (i.e. responses with a non 2XX status code), the -// returned error will be ServerError and the returned body will reflect the -// server's response. If the server returns a 503 response with a 'Retry-after' -// header, the request will be transparenty retried. -func (client Client) dispatchRequest(request *http.Request) ([]byte, error) { - // First, store the request's body into a byte[] to be able to restore it - // after each request. - bodyContent, err := readAndClose(request.Body) - if err != nil { - return nil, err - } - for retry := 0; retry < NumberOfRetries; retry++ { - // Restore body before issuing request. - newBody := ioutil.NopCloser(bytes.NewReader(bodyContent)) - request.Body = newBody - body, err := client.dispatchSingleRequest(request) - // If this is a 503 response with a non-void "Retry-After" header: wait - // as instructed and retry the request. - if err != nil { - serverError, ok := err.(ServerError) - if ok && serverError.StatusCode == http.StatusServiceUnavailable { - retry_time_int, errConv := strconv.Atoi(serverError.Header.Get(RetryAfterHeaderName)) - if errConv == nil { - select { - case <-time.After(time.Duration(retry_time_int) * time.Second): - } - continue - } - } - } - return body, err - } - // Restore body before issuing request. - newBody := ioutil.NopCloser(bytes.NewReader(bodyContent)) - request.Body = newBody - return client.dispatchSingleRequest(request) -} - -func (client Client) dispatchSingleRequest(request *http.Request) ([]byte, error) { - client.Signer.OAuthSign(request) - httpClient := http.Client{} - // See https://code.google.com/p/go/issues/detail?id=4677 - // We need to force the connection to close each time so that we don't - // hit the above Go bug. - request.Close = true - response, err := httpClient.Do(request) - if err != nil { - return nil, err - } - body, err := readAndClose(response.Body) - if err != nil { - return nil, err - } - if response.StatusCode < 200 || response.StatusCode > 299 { - msg := fmt.Errorf("gomaasapi: got error back from server: %v (%v)", response.Status, string(body)) - return body, ServerError{error: msg, StatusCode: response.StatusCode, Header: response.Header} - } - return body, nil -} - -// GetURL returns the URL to a given resource on the API, based on its URI. -// The resource URI may be absolute or relative; either way the result is a -// full absolute URL including the network part. -func (client Client) GetURL(uri *url.URL) *url.URL { - return client.APIURL.ResolveReference(uri) -} - -// Get performs an HTTP "GET" to the API. This may be either an API method -// invocation (if you pass its name in "operation") or plain resource -// retrieval (if you leave "operation" blank). -func (client Client) Get(uri *url.URL, operation string, parameters url.Values) ([]byte, error) { - if parameters == nil { - parameters = make(url.Values) - } - opParameter := parameters.Get("op") - if opParameter != "" { - msg := fmt.Errorf("reserved parameter 'op' passed (with value '%s')", opParameter) - return nil, msg - } - if operation != "" { - parameters.Set("op", operation) - } - queryUrl := client.GetURL(uri) - queryUrl.RawQuery = parameters.Encode() - request, err := http.NewRequest("GET", queryUrl.String(), nil) - if err != nil { - return nil, err - } - return client.dispatchRequest(request) -} - -// writeMultiPartFiles writes the given files as parts of a multipart message -// using the given writer. -func writeMultiPartFiles(writer *multipart.Writer, files map[string][]byte) error { - for fileName, fileContent := range files { - - fw, err := writer.CreateFormFile(fileName, fileName) - if err != nil { - return err - } - io.Copy(fw, bytes.NewBuffer(fileContent)) - } - return nil -} - -// writeMultiPartParams writes the given parameters as parts of a multipart -// message using the given writer. -func writeMultiPartParams(writer *multipart.Writer, parameters url.Values) error { - for key, values := range parameters { - for _, value := range values { - fw, err := writer.CreateFormField(key) - if err != nil { - return err - } - buffer := bytes.NewBufferString(value) - io.Copy(fw, buffer) - } - } - return nil - -} - -// nonIdempotentRequestFiles implements the common functionality of PUT and -// POST requests (but not GET or DELETE requests) when uploading files is -// needed. -func (client Client) nonIdempotentRequestFiles(method string, uri *url.URL, parameters url.Values, files map[string][]byte) ([]byte, error) { - buf := new(bytes.Buffer) - writer := multipart.NewWriter(buf) - err := writeMultiPartFiles(writer, files) - if err != nil { - return nil, err - } - err = writeMultiPartParams(writer, parameters) - if err != nil { - return nil, err - } - writer.Close() - url := client.GetURL(uri) - request, err := http.NewRequest(method, url.String(), buf) - if err != nil { - return nil, err - } - request.Header.Set("Content-Type", writer.FormDataContentType()) - return client.dispatchRequest(request) - -} - -// nonIdempotentRequest implements the common functionality of PUT and POST -// requests (but not GET or DELETE requests). -func (client Client) nonIdempotentRequest(method string, uri *url.URL, parameters url.Values) ([]byte, error) { - url := client.GetURL(uri) - request, err := http.NewRequest(method, url.String(), strings.NewReader(string(parameters.Encode()))) - if err != nil { - return nil, err - } - request.Header.Set("Content-Type", "application/x-www-form-urlencoded") - return client.dispatchRequest(request) -} - -// Post performs an HTTP "POST" to the API. This may be either an API method -// invocation (if you pass its name in "operation") or plain resource -// retrieval (if you leave "operation" blank). -func (client Client) Post(uri *url.URL, operation string, parameters url.Values, files map[string][]byte) ([]byte, error) { - queryParams := url.Values{"op": {operation}} - uri.RawQuery = queryParams.Encode() - if files != nil { - return client.nonIdempotentRequestFiles("POST", uri, parameters, files) - } - return client.nonIdempotentRequest("POST", uri, parameters) -} - -// Put updates an object on the API, using an HTTP "PUT" request. -func (client Client) Put(uri *url.URL, parameters url.Values) ([]byte, error) { - return client.nonIdempotentRequest("PUT", uri, parameters) -} - -// Delete deletes an object on the API, using an HTTP "DELETE" request. -func (client Client) Delete(uri *url.URL) error { - url := client.GetURL(uri) - request, err := http.NewRequest("DELETE", url.String(), strings.NewReader("")) - if err != nil { - return err - } - _, err = client.dispatchRequest(request) - if err != nil { - return err - } - return nil -} - -// Anonymous "signature method" implementation. -type anonSigner struct{} - -func (signer anonSigner) OAuthSign(request *http.Request) error { - return nil -} - -// *anonSigner implements the OAuthSigner interface. -var _ OAuthSigner = anonSigner{} - -func composeAPIURL(BaseURL string, apiVersion string) (*url.URL, error) { - baseurl := EnsureTrailingSlash(BaseURL) - apiurl := fmt.Sprintf("%sapi/%s/", baseurl, apiVersion) - return url.Parse(apiurl) -} - -// NewAnonymousClient creates a client that issues anonymous requests. -// BaseURL should refer to the root of the MAAS server path, e.g. -// http://my.maas.server.example.com/MAAS/ -// apiVersion should contain the version of the MAAS API that you want to use. -func NewAnonymousClient(BaseURL string, apiVersion string) (*Client, error) { - parsedBaseURL, err := composeAPIURL(BaseURL, apiVersion) - if err != nil { - return nil, err - } - return &Client{Signer: &anonSigner{}, APIURL: parsedBaseURL}, nil -} - -// NewAuthenticatedClient parses the given MAAS API key into the individual -// OAuth tokens and creates an Client that will use these tokens to sign the -// requests it issues. -// BaseURL should refer to the root of the MAAS server path, e.g. -// http://my.maas.server.example.com/MAAS/ -// apiVersion should contain the version of the MAAS API that you want to use. -func NewAuthenticatedClient(BaseURL string, apiKey string, apiVersion string) (*Client, error) { - elements := strings.Split(apiKey, ":") - if len(elements) != 3 { - errString := "invalid API key %q; expected \"::\"" - return nil, fmt.Errorf(errString, apiKey) - } - token := &OAuthToken{ - ConsumerKey: elements[0], - // The consumer secret is the empty string in MAAS' authentication. - ConsumerSecret: "", - TokenKey: elements[1], - TokenSecret: elements[2], - } - signer, err := NewPlainTestOAuthSigner(token, "MAAS API") - if err != nil { - return nil, err - } - parsedBaseURL, err := composeAPIURL(BaseURL, apiVersion) - if err != nil { - return nil, err - } - return &Client{Signer: signer, APIURL: parsedBaseURL}, nil -} === removed file 'src/launchpad.net/gomaasapi/client_test.go' --- src/launchpad.net/gomaasapi/client_test.go 2015-03-26 15:54:39 +0000 +++ src/launchpad.net/gomaasapi/client_test.go 1970-01-01 00:00:00 +0000 @@ -1,311 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -import ( - "bytes" - "fmt" - "io/ioutil" - . "launchpad.net/gocheck" - "net/http" - "net/url" - "strings" -) - -type ClientSuite struct{} - -var _ = Suite(&ClientSuite{}) - -func (*ClientSuite) TestReadAndCloseReturnsEmptyStringForNil(c *C) { - data, err := readAndClose(nil) - c.Assert(err, IsNil) - c.Check(string(data), Equals, "") -} - -func (*ClientSuite) TestReadAndCloseReturnsContents(c *C) { - content := "Stream contents." - stream := ioutil.NopCloser(strings.NewReader(content)) - - data, err := readAndClose(stream) - c.Assert(err, IsNil) - - c.Check(string(data), Equals, content) -} - -func (suite *ClientSuite) TestClientdispatchRequestReturnsServerError(c *C) { - URI := "/some/url/?param1=test" - expectedResult := "expected:result" - server := newSingleServingServer(URI, expectedResult, http.StatusBadRequest) - defer server.Close() - client, err := NewAnonymousClient(server.URL, "1.0") - c.Assert(err, IsNil) - request, err := http.NewRequest("GET", server.URL+URI, nil) - - result, err := client.dispatchRequest(request) - - expectedErrorString := fmt.Sprintf("gomaasapi: got error back from server: 400 Bad Request (%v)", expectedResult) - c.Check(err.Error(), Equals, expectedErrorString) - c.Check(err.(ServerError).StatusCode, Equals, 400) - c.Check(string(result), Equals, expectedResult) -} - -func (suite *ClientSuite) TestClientdispatchRequestRetries503(c *C) { - URI := "/some/url/?param1=test" - server := newFlakyServer(URI, 503, NumberOfRetries) - defer server.Close() - client, err := NewAnonymousClient(server.URL, "1.0") - c.Assert(err, IsNil) - content := "content" - request, err := http.NewRequest("GET", server.URL+URI, ioutil.NopCloser(strings.NewReader(content))) - - _, err = client.dispatchRequest(request) - - c.Check(err, IsNil) - c.Check(*server.nbRequests, Equals, NumberOfRetries+1) - expectedRequestsContent := make([][]byte, NumberOfRetries+1) - for i := 0; i < NumberOfRetries+1; i++ { - expectedRequestsContent[i] = []byte(content) - } - c.Check(*server.requests, DeepEquals, expectedRequestsContent) -} - -func (suite *ClientSuite) TestClientdispatchRequestDoesntRetry200(c *C) { - URI := "/some/url/?param1=test" - server := newFlakyServer(URI, 200, 10) - defer server.Close() - client, err := NewAnonymousClient(server.URL, "1.0") - c.Assert(err, IsNil) - - request, err := http.NewRequest("GET", server.URL+URI, nil) - - _, err = client.dispatchRequest(request) - - c.Check(err, IsNil) - c.Check(*server.nbRequests, Equals, 1) -} - -func (suite *ClientSuite) TestClientdispatchRequestRetriesIsLimited(c *C) { - URI := "/some/url/?param1=test" - // Make the server return 503 responses NumberOfRetries + 1 times. - server := newFlakyServer(URI, 503, NumberOfRetries+1) - defer server.Close() - client, err := NewAnonymousClient(server.URL, "1.0") - c.Assert(err, IsNil) - request, err := http.NewRequest("GET", server.URL+URI, nil) - - _, err = client.dispatchRequest(request) - - c.Check(*server.nbRequests, Equals, NumberOfRetries+1) - c.Check(err.(ServerError).StatusCode, Equals, 503) -} - -func (suite *ClientSuite) TestClientDispatchRequestReturnsNonServerError(c *C) { - client, err := NewAnonymousClient("/foo", "1.0") - c.Assert(err, IsNil) - // Create a bad request that will fail to dispatch. - request, err := http.NewRequest("GET", "/", nil) - c.Assert(err, IsNil) - - result, err := client.dispatchRequest(request) - - // This type of failure is an error, but not a ServerError. - c.Check(err, NotNil) - c.Check(err, Not(FitsTypeOf), ServerError{}) - // For this kind of error, result is guaranteed to be nil. - c.Check(result, IsNil) -} - -func (suite *ClientSuite) TestClientdispatchRequestSignsRequest(c *C) { - URI := "/some/url/?param1=test" - expectedResult := "expected:result" - server := newSingleServingServer(URI, expectedResult, http.StatusOK) - defer server.Close() - client, err := NewAuthenticatedClient(server.URL, "the:api:key", "1.0") - c.Assert(err, IsNil) - request, err := http.NewRequest("GET", server.URL+URI, nil) - c.Assert(err, IsNil) - - result, err := client.dispatchRequest(request) - - c.Check(err, IsNil) - c.Check(string(result), Equals, expectedResult) - c.Check((*server.requestHeader)["Authorization"][0], Matches, "^OAuth .*") -} - -func (suite *ClientSuite) TestClientGetFormatsGetParameters(c *C) { - URI, err := url.Parse("/some/url") - c.Assert(err, IsNil) - expectedResult := "expected:result" - params := url.Values{"test": {"123"}} - fullURI := URI.String() + "?test=123" - server := newSingleServingServer(fullURI, expectedResult, http.StatusOK) - defer server.Close() - client, err := NewAnonymousClient(server.URL, "1.0") - c.Assert(err, IsNil) - - result, err := client.Get(URI, "", params) - - c.Check(err, IsNil) - c.Check(string(result), Equals, expectedResult) -} - -func (suite *ClientSuite) TestClientGetFormatsOperationAsGetParameter(c *C) { - URI, err := url.Parse("/some/url") - c.Assert(err, IsNil) - expectedResult := "expected:result" - fullURI := URI.String() + "?op=list" - server := newSingleServingServer(fullURI, expectedResult, http.StatusOK) - defer server.Close() - client, err := NewAnonymousClient(server.URL, "1.0") - c.Assert(err, IsNil) - - result, err := client.Get(URI, "list", nil) - - c.Check(err, IsNil) - c.Check(string(result), Equals, expectedResult) -} - -func (suite *ClientSuite) TestClientPostSendsRequestWithParams(c *C) { - URI, err := url.Parse("/some/url") - c.Check(err, IsNil) - expectedResult := "expected:result" - fullURI := URI.String() + "?op=list" - params := url.Values{"test": {"123"}} - server := newSingleServingServer(fullURI, expectedResult, http.StatusOK) - defer server.Close() - client, err := NewAnonymousClient(server.URL, "1.0") - c.Check(err, IsNil) - - result, err := client.Post(URI, "list", params, nil) - - c.Check(err, IsNil) - c.Check(string(result), Equals, expectedResult) - postedValues, err := url.ParseQuery(*server.requestContent) - c.Check(err, IsNil) - expectedPostedValues, err := url.ParseQuery("test=123") - c.Check(err, IsNil) - c.Check(postedValues, DeepEquals, expectedPostedValues) -} - -// extractFileContent extracts from the request built using 'requestContent', -// 'requestHeader' and 'requestURL', the file named 'filename'. -func extractFileContent(requestContent string, requestHeader *http.Header, requestURL string, filename string) ([]byte, error) { - // Recreate the request from server.requestContent to use the parsing - // utility from the http package (http.Request.FormFile). - request, err := http.NewRequest("POST", requestURL, bytes.NewBufferString(requestContent)) - if err != nil { - return nil, err - } - request.Header.Set("Content-Type", requestHeader.Get("Content-Type")) - file, _, err := request.FormFile("testfile") - if err != nil { - return nil, err - } - fileContent, err := ioutil.ReadAll(file) - if err != nil { - return nil, err - } - return fileContent, nil -} - -func (suite *ClientSuite) TestClientPostSendsMultipartRequest(c *C) { - URI, err := url.Parse("/some/url") - c.Assert(err, IsNil) - expectedResult := "expected:result" - fullURI := URI.String() + "?op=add" - server := newSingleServingServer(fullURI, expectedResult, http.StatusOK) - defer server.Close() - client, err := NewAnonymousClient(server.URL, "1.0") - c.Assert(err, IsNil) - fileContent := []byte("content") - files := map[string][]byte{"testfile": fileContent} - - result, err := client.Post(URI, "add", nil, files) - - c.Check(err, IsNil) - c.Check(string(result), Equals, expectedResult) - receivedFileContent, err := extractFileContent(*server.requestContent, server.requestHeader, fullURI, "testfile") - c.Assert(err, IsNil) - c.Check(receivedFileContent, DeepEquals, fileContent) -} - -func (suite *ClientSuite) TestClientPutSendsRequest(c *C) { - URI, err := url.Parse("/some/url") - c.Assert(err, IsNil) - expectedResult := "expected:result" - params := url.Values{"test": {"123"}} - server := newSingleServingServer(URI.String(), expectedResult, http.StatusOK) - defer server.Close() - client, err := NewAnonymousClient(server.URL, "1.0") - c.Assert(err, IsNil) - - result, err := client.Put(URI, params) - - c.Check(err, IsNil) - c.Check(string(result), Equals, expectedResult) - c.Check(*server.requestContent, Equals, "test=123") -} - -func (suite *ClientSuite) TestClientDeleteSendsRequest(c *C) { - URI, err := url.Parse("/some/url") - c.Assert(err, IsNil) - expectedResult := "expected:result" - server := newSingleServingServer(URI.String(), expectedResult, http.StatusOK) - defer server.Close() - client, err := NewAnonymousClient(server.URL, "1.0") - c.Assert(err, IsNil) - - err = client.Delete(URI) - - c.Check(err, IsNil) -} - -func (suite *ClientSuite) TestNewAnonymousClientEnsuresTrailingSlash(c *C) { - client, err := NewAnonymousClient("http://example.com/", "1.0") - c.Check(err, IsNil) - expectedURL, err := url.Parse("http://example.com/api/1.0/") - c.Assert(err, IsNil) - c.Check(client.APIURL, DeepEquals, expectedURL) -} - -func (suite *ClientSuite) TestNewAuthenticatedClientEnsuresTrailingSlash(c *C) { - client, err := NewAuthenticatedClient("http://example.com/", "a:b:c", "1.0") - c.Check(err, IsNil) - expectedURL, err := url.Parse("http://example.com/api/1.0/") - c.Assert(err, IsNil) - c.Check(client.APIURL, DeepEquals, expectedURL) -} - -func (suite *ClientSuite) TestNewAuthenticatedClientParsesApiKey(c *C) { - // NewAuthenticatedClient returns a plainTextOAuthSigneri configured - // to use the given API key. - consumerKey := "consumerKey" - tokenKey := "tokenKey" - tokenSecret := "tokenSecret" - keyElements := []string{consumerKey, tokenKey, tokenSecret} - apiKey := strings.Join(keyElements, ":") - - client, err := NewAuthenticatedClient("http://example.com/", apiKey, "1.0") - - c.Check(err, IsNil) - signer := client.Signer.(*plainTextOAuthSigner) - c.Check(signer.token.ConsumerKey, Equals, consumerKey) - c.Check(signer.token.TokenKey, Equals, tokenKey) - c.Check(signer.token.TokenSecret, Equals, tokenSecret) -} - -func (suite *ClientSuite) TestNewAuthenticatedClientFailsIfInvalidKey(c *C) { - client, err := NewAuthenticatedClient("", "invalid-key", "1.0") - - c.Check(err, ErrorMatches, "invalid API key.*") - c.Check(client, IsNil) - -} - -func (suite *ClientSuite) TestcomposeAPIURLReturnsURL(c *C) { - apiurl, err := composeAPIURL("http://example.com/MAAS", "1.0") - c.Assert(err, IsNil) - expectedURL, err := url.Parse("http://example.com/MAAS/api/1.0/") - c.Check(expectedURL, DeepEquals, apiurl) -} === removed file 'src/launchpad.net/gomaasapi/enum.go' --- src/launchpad.net/gomaasapi/enum.go 2015-09-22 15:27:01 +0000 +++ src/launchpad.net/gomaasapi/enum.go 1970-01-01 00:00:00 +0000 @@ -1,57 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -const ( - // NodeStatus* values represent the vocabulary of a Node‘s possible statuses. - - // The node has been created and has a system ID assigned to it. - NodeStatusDeclared = "0" - - //Testing and other commissioning steps are taking place. - NodeStatusCommissioning = "1" - - // Smoke or burn-in testing has a found a problem. - NodeStatusFailedTests = "2" - - // The node can’t be contacted. - NodeStatusMissing = "3" - - // The node is in the general pool ready to be deployed. - NodeStatusReady = "4" - - // The node is ready for named deployment. - NodeStatusReserved = "5" - - // The node is powering a service from a charm or is ready for use with a fresh Ubuntu install. - NodeStatusDeployed = "6" - - // The node has been removed from service manually until an admin overrides the retirement. - NodeStatusRetired = "7" - - // The node is broken: a step in the node lifecyle failed. More details - // can be found in the node's event log. - NodeStatusBroken = "8" - - // The node is being installed. - NodeStatusDeploying = "9" - - // The node has been allocated to a user and is ready for deployment. - NodeStatusAllocated = "10" - - // The deployment of the node failed. - NodeStatusFailedDeployment = "11" - - // The node is powering down after a release request. - NodeStatusReleasing = "12" - - // The releasing of the node failed. - NodeStatusFailedReleasing = "13" - - // The node is erasing its disks. - NodeStatusDiskErasing = "14" - - // The node failed to erase its disks. - NodeStatusFailedDiskErasing = "15" -) === removed directory 'src/launchpad.net/gomaasapi/example' === removed file 'src/launchpad.net/gomaasapi/example/live_example.go' --- src/launchpad.net/gomaasapi/example/live_example.go 2013-04-24 22:34:47 +0000 +++ src/launchpad.net/gomaasapi/example/live_example.go 1970-01-01 00:00:00 +0000 @@ -1,170 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -/* -This is an example on how the Go library gomaasapi can be used to interact with -a real MAAS server. -Note that this is a provided only as an example and that real code should probably do something more sensible with errors than ignoring them or panicking. -*/ -package main - -import ( - "bytes" - "fmt" - "launchpad.net/gomaasapi" - "net/url" -) - -var apiKey string -var apiURL string -var apiVersion string - -func getParams() { - fmt.Println("Warning: this will create a node on the MAAS server; it should be deleted at the end of the run but if something goes wrong, that test node might be left over. You've been warned.") - fmt.Print("Enter API key: ") - _, err := fmt.Scanf("%s", &apiKey) - if err != nil { - panic(err) - } - fmt.Print("Enter API URL: ") - _, err = fmt.Scanf("%s", &apiURL) - if err != nil { - panic(err) - } - - fmt.Print("Enter API version: ") - _, err = fmt.Scanf("%s", &apiVersion) - if err != nil { - panic(err) - } -} - -func checkError(err error) { - if err != nil { - panic(err) - } -} - -func main() { - getParams() - - // Create API server endpoint. - authClient, err := gomaasapi.NewAuthenticatedClient(apiURL, apiKey, apiVersion) - checkError(err) - maas := gomaasapi.NewMAAS(*authClient) - - // Exercise the API. - ManipulateNodes(maas) - ManipulateFiles(maas) - - fmt.Println("All done.") -} - -// ManipulateFiles exercises the /api/1.0/files/ API endpoint. Most precisely, -// it uploads a files and then fetches it, making sure the received content -// is the same as the one that was sent. -func ManipulateFiles(maas *gomaasapi.MAASObject) { - files := maas.GetSubObject("files") - fileContent := []byte("test file content") - fileName := "filename" - filesToUpload := map[string][]byte{"file": fileContent} - - // Upload a file. - fmt.Println("Uploading a file...") - _, err := files.CallPostFiles("add", url.Values{"filename": {fileName}}, filesToUpload) - checkError(err) - fmt.Println("File sent.") - - // Fetch the file. - fmt.Println("Fetching the file...") - fileResult, err := files.CallGet("get", url.Values{"filename": {fileName}}) - checkError(err) - receivedFileContent, err := fileResult.GetBytes() - checkError(err) - if bytes.Compare(receivedFileContent, fileContent) != 0 { - panic("Received content differs from the content sent!") - } - fmt.Println("Got file.") - - // Fetch list of files. - listFiles, err := files.CallGet("list", url.Values{}) - checkError(err) - listFilesArray, err := listFiles.GetArray() - checkError(err) - fmt.Printf("We've got %v file(s)\n", len(listFilesArray)) - - // Delete the file. - fmt.Println("Deleting the file...") - fileObject, err := listFilesArray[0].GetMAASObject() - checkError(err) - errDelete := fileObject.Delete() - checkError(errDelete) - - // Count the files. - listFiles, err = files.CallGet("list", url.Values{}) - checkError(err) - listFilesArray, err = listFiles.GetArray() - checkError(err) - fmt.Printf("We've got %v file(s)\n", len(listFilesArray)) -} - -// ManipulateFiles exercises the /api/1.0/nodes/ API endpoint. Most precisely, -// it lists the existing nodes, creates a new node, updates it and then -// deletes it. -func ManipulateNodes(maas *gomaasapi.MAASObject) { - nodeListing := maas.GetSubObject("nodes") - - // List nodes. - fmt.Println("Fetching list of nodes...") - listNodeObjects, err := nodeListing.CallGet("list", url.Values{}) - checkError(err) - listNodes, err := listNodeObjects.GetArray() - checkError(err) - fmt.Printf("Got list of %v nodes\n", len(listNodes)) - for index, nodeObj := range listNodes { - node, err := nodeObj.GetMAASObject() - checkError(err) - hostname, err := node.GetField("hostname") - checkError(err) - fmt.Printf("Node #%d is named '%v' (%v)\n", index, hostname, node.URL()) - } - - // Create a node. - fmt.Println("Creating a new node...") - params := url.Values{"architecture": {"i386/generic"}, "mac_addresses": {"AA:BB:CC:DD:EE:FF"}} - newNodeObj, err := nodeListing.CallPost("new", params) - checkError(err) - newNode, err := newNodeObj.GetMAASObject() - checkError(err) - newNodeName, err := newNode.GetField("hostname") - checkError(err) - fmt.Printf("New node created: %s (%s)\n", newNodeName, newNode.URL()) - - // Update the new node. - fmt.Println("Updating the new node...") - updateParams := url.Values{"hostname": {"mynewname"}} - newNodeObj2, err := newNode.Update(updateParams) - checkError(err) - newNodeName2, err := newNodeObj2.GetField("hostname") - checkError(err) - fmt.Printf("New node updated, now named: %s\n", newNodeName2) - - // Count the nodes. - listNodeObjects2, err := nodeListing.CallGet("list", url.Values{}) - checkError(err) - listNodes2, err := listNodeObjects2.GetArray() - checkError(err) - fmt.Printf("We've got %v nodes\n", len(listNodes2)) - - // Delete the new node. - fmt.Println("Deleting the new node...") - errDelete := newNode.Delete() - checkError(errDelete) - - // Count the nodes. - listNodeObjects3, err := nodeListing.CallGet("list", url.Values{}) - checkError(err) - listNodes3, err := listNodeObjects3.GetArray() - checkError(err) - fmt.Printf("We've got %v nodes\n", len(listNodes3)) -} === removed file 'src/launchpad.net/gomaasapi/gomaasapi.go' --- src/launchpad.net/gomaasapi/gomaasapi.go 2013-04-24 22:34:47 +0000 +++ src/launchpad.net/gomaasapi/gomaasapi.go 1970-01-01 00:00:00 +0000 @@ -1,4 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi === removed file 'src/launchpad.net/gomaasapi/gomaasapi_test.go' --- src/launchpad.net/gomaasapi/gomaasapi_test.go 2013-04-24 22:34:47 +0000 +++ src/launchpad.net/gomaasapi/gomaasapi_test.go 1970-01-01 00:00:00 +0000 @@ -1,18 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -import ( - . "launchpad.net/gocheck" - "testing" -) - -func Test(t *testing.T) { - TestingT(t) -} - -type GomaasapiTestSuite struct { -} - -var _ = Suite(&GomaasapiTestSuite{}) === removed file 'src/launchpad.net/gomaasapi/jsonobject.go' --- src/launchpad.net/gomaasapi/jsonobject.go 2013-04-24 22:34:47 +0000 +++ src/launchpad.net/gomaasapi/jsonobject.go 1970-01-01 00:00:00 +0000 @@ -1,206 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -import ( - "encoding/json" - "errors" - "fmt" -) - -// JSONObject is a wrapper around a JSON structure which provides -// methods to extract data from that structure. -// A JSONObject provides a simple structure consisting of the data types -// defined in JSON: string, number, object, list, and bool. To get the -// value you want out of a JSONObject, you must know (or figure out) which -// kind of value you have, and then call the appropriate Get*() method to -// get at it. Reading an item as the wrong type will return an error. -// For instance, if your JSONObject consists of a number, call GetFloat64() -// to get the value as a float64. If it's a list, call GetArray() to get -// a slice of JSONObjects. To read any given item from the slice, you'll -// need to "Get" that as the right type as well. -// There is one exception: a MAASObject is really a special kind of map, -// so you can read it as either. -// Reading a null item is also an error. So before you try obj.Get*(), -// first check obj.IsNil(). -type JSONObject struct { - // Parsed value. May actually be any of the types a JSONObject can - // wrap, except raw bytes. If the object can only be interpreted - // as raw bytes, this will be nil. - value interface{} - // Raw bytes, if this object was parsed directly from an API response. - // Is nil for sub-objects found within other objects. An object that - // was parsed directly from a response can be both raw bytes and some - // other value at the same time. - // For example, "[]" looks like a JSON list, so you can read it as an - // array. But it may also be the raw contents of a file that just - // happens to look like JSON, and so you can read it as raw bytes as - // well. - bytes []byte - // Client for further communication with the API. - client Client - // Is this a JSON null? - isNull bool -} - -// Our JSON processor distinguishes a MAASObject from a jsonMap by the fact -// that it contains a key "resource_uri". (A regular map might contain the -// same key through sheer coincide, but never mind: you can still treat it -// as a jsonMap and never notice the difference.) -const resourceURI = "resource_uri" - -// maasify turns a completely untyped json.Unmarshal result into a JSONObject -// (with the appropriate implementation of course). This function is -// recursive. Maps and arrays are deep-copied, with each individual value -// being converted to a JSONObject type. -func maasify(client Client, value interface{}) JSONObject { - if value == nil { - return JSONObject{isNull: true} - } - switch value.(type) { - case string, float64, bool: - return JSONObject{value: value} - case map[string]interface{}: - original := value.(map[string]interface{}) - result := make(map[string]JSONObject, len(original)) - for key, value := range original { - result[key] = maasify(client, value) - } - return JSONObject{value: result, client: client} - case []interface{}: - original := value.([]interface{}) - result := make([]JSONObject, len(original)) - for index, value := range original { - result[index] = maasify(client, value) - } - return JSONObject{value: result} - } - msg := fmt.Sprintf("Unknown JSON type, can't be converted to JSONObject: %v", value) - panic(msg) -} - -// Parse a JSON blob into a JSONObject. -func Parse(client Client, input []byte) (JSONObject, error) { - var obj JSONObject - if input == nil { - panic(errors.New("Parse() called with nil input")) - } - var parsed interface{} - err := json.Unmarshal(input, &parsed) - if err == nil { - obj = maasify(client, parsed) - obj.bytes = input - } else { - switch err.(type) { - case *json.InvalidUTF8Error: - case *json.SyntaxError: - // This isn't JSON. Treat it as raw binary data. - default: - return obj, err - } - obj = JSONObject{value: nil, client: client, bytes: input} - } - return obj, nil -} - -// Return error value for failed type conversion. -func failConversion(wantedType string, obj JSONObject) error { - msg := fmt.Sprintf("Requested %v, got %T.", wantedType, obj.value) - return errors.New(msg) -} - -// MarshalJSON tells the standard json package how to serialize a JSONObject -// back to JSON. -func (obj JSONObject) MarshalJSON() ([]byte, error) { - if obj.IsNil() { - return json.Marshal(nil) - } - return json.Marshal(obj.value) -} - -// With MarshalJSON, JSONObject implements json.Marshaler. -var _ json.Marshaler = (*JSONObject)(nil) - -// IsNil tells you whether a JSONObject is a JSON "null." -// There is one irregularity. If the original JSON blob was actually raw -// data, not JSON, then its IsNil will return false because the object -// contains the binary data as a non-nil value. But, if the original JSON -// blob consisted of a null, then IsNil returns true even though you can -// still retrieve binary data from it. -func (obj JSONObject) IsNil() bool { - if obj.value != nil { - return false - } - if obj.bytes == nil { - return true - } - // This may be a JSON null. We can't expect every JSON null to look - // the same; there may be leading or trailing space. - return obj.isNull -} - -// GetString retrieves the object's value as a string. If the value wasn't -// a JSON string, that's an error. -func (obj JSONObject) GetString() (value string, err error) { - value, ok := obj.value.(string) - if !ok { - err = failConversion("string", obj) - } - return -} - -// GetFloat64 retrieves the object's value as a float64. If the value wasn't -// a JSON number, that's an error. -func (obj JSONObject) GetFloat64() (value float64, err error) { - value, ok := obj.value.(float64) - if !ok { - err = failConversion("float64", obj) - } - return -} - -// GetMap retrieves the object's value as a map. If the value wasn't a JSON -// object, that's an error. -func (obj JSONObject) GetMap() (value map[string]JSONObject, err error) { - value, ok := obj.value.(map[string]JSONObject) - if !ok { - err = failConversion("map", obj) - } - return -} - -// GetArray retrieves the object's value as an array. If the value wasn't a -// JSON list, that's an error. -func (obj JSONObject) GetArray() (value []JSONObject, err error) { - value, ok := obj.value.([]JSONObject) - if !ok { - err = failConversion("array", obj) - } - return -} - -// GetBool retrieves the object's value as a bool. If the value wasn't a JSON -// bool, that's an error. -func (obj JSONObject) GetBool() (value bool, err error) { - value, ok := obj.value.(bool) - if !ok { - err = failConversion("bool", obj) - } - return -} - -// GetBytes retrieves the object's value as raw bytes. A JSONObject that was -// parsed from the original input (as opposed to one that's embedded in -// another JSONObject) can contain both the raw bytes and the parsed JSON -// value, but either can be the case without the other. -// If this object wasn't parsed directly from the original input, that's an -// error. -// If the object was parsed from an original input that just said "null", then -// IsNil will return true but the raw bytes are still available from GetBytes. -func (obj JSONObject) GetBytes() ([]byte, error) { - if obj.bytes == nil { - return nil, failConversion("bytes", obj) - } - return obj.bytes, nil -} === removed file 'src/launchpad.net/gomaasapi/jsonobject_test.go' --- src/launchpad.net/gomaasapi/jsonobject_test.go 2013-04-24 22:34:47 +0000 +++ src/launchpad.net/gomaasapi/jsonobject_test.go 1970-01-01 00:00:00 +0000 @@ -1,462 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -import ( - "encoding/json" - "fmt" - . "launchpad.net/gocheck" -) - -type JSONObjectSuite struct { -} - -var _ = Suite(&JSONObjectSuite{}) - -// maasify() converts nil. -func (suite *JSONObjectSuite) TestMaasifyConvertsNil(c *C) { - c.Check(maasify(Client{}, nil).IsNil(), Equals, true) -} - -// maasify() converts strings. -func (suite *JSONObjectSuite) TestMaasifyConvertsString(c *C) { - const text = "Hello" - out, err := maasify(Client{}, text).GetString() - c.Assert(err, IsNil) - c.Check(out, Equals, text) -} - -// maasify() converts float64 numbers. -func (suite *JSONObjectSuite) TestMaasifyConvertsNumber(c *C) { - const number = 3.1415926535 - num, err := maasify(Client{}, number).GetFloat64() - c.Assert(err, IsNil) - c.Check(num, Equals, number) -} - -// maasify() converts array slices. -func (suite *JSONObjectSuite) TestMaasifyConvertsArray(c *C) { - original := []interface{}{3.0, 2.0, 1.0} - output, err := maasify(Client{}, original).GetArray() - c.Assert(err, IsNil) - c.Check(len(output), Equals, len(original)) -} - -// When maasify() converts an array slice, the result contains JSONObjects. -func (suite *JSONObjectSuite) TestMaasifyArrayContainsJSONObjects(c *C) { - arr, err := maasify(Client{}, []interface{}{9.9}).GetArray() - c.Assert(err, IsNil) - var _ JSONObject = arr[0] - entry, err := arr[0].GetFloat64() - c.Assert(err, IsNil) - c.Check(entry, Equals, 9.9) -} - -// maasify() converts maps. -func (suite *JSONObjectSuite) TestMaasifyConvertsMap(c *C) { - original := map[string]interface{}{"1": "one", "2": "two", "3": "three"} - output, err := maasify(Client{}, original).GetMap() - c.Assert(err, IsNil) - c.Check(len(output), Equals, len(original)) -} - -// When maasify() converts a map, the result contains JSONObjects. -func (suite *JSONObjectSuite) TestMaasifyMapContainsJSONObjects(c *C) { - jsonobj := maasify(Client{}, map[string]interface{}{"key": "value"}) - mp, err := jsonobj.GetMap() - var _ JSONObject = mp["key"] - c.Assert(err, IsNil) - entry, err := mp["key"].GetString() - c.Check(entry, Equals, "value") -} - -// maasify() converts MAAS objects. -func (suite *JSONObjectSuite) TestMaasifyConvertsMAASObject(c *C) { - original := map[string]interface{}{ - "resource_uri": "http://example.com/foo", - "size": "3", - } - obj, err := maasify(Client{}, original).GetMAASObject() - c.Assert(err, IsNil) - c.Check(len(obj.GetMap()), Equals, len(original)) - size, err := obj.GetMap()["size"].GetString() - c.Assert(err, IsNil) - c.Check(size, Equals, "3") -} - -// maasify() passes its client to a MAASObject it creates. -func (suite *JSONObjectSuite) TestMaasifyPassesClientToMAASObject(c *C) { - client := Client{} - original := map[string]interface{}{"resource_uri": "/foo"} - output, err := maasify(client, original).GetMAASObject() - c.Assert(err, IsNil) - c.Check(output.client, Equals, client) -} - -// maasify() passes its client into an array of MAASObjects it creates. -func (suite *JSONObjectSuite) TestMaasifyPassesClientIntoArray(c *C) { - client := Client{} - obj := map[string]interface{}{"resource_uri": "/foo"} - list := []interface{}{obj} - jsonobj, err := maasify(client, list).GetArray() - c.Assert(err, IsNil) - out, err := jsonobj[0].GetMAASObject() - c.Assert(err, IsNil) - c.Check(out.client, Equals, client) -} - -// maasify() passes its client into a map of MAASObjects it creates. -func (suite *JSONObjectSuite) TestMaasifyPassesClientIntoMap(c *C) { - client := Client{} - obj := map[string]interface{}{"resource_uri": "/foo"} - mp := map[string]interface{}{"key": obj} - jsonobj, err := maasify(client, mp).GetMap() - c.Assert(err, IsNil) - out, err := jsonobj["key"].GetMAASObject() - c.Assert(err, IsNil) - c.Check(out.client, Equals, client) -} - -// maasify() passes its client all the way down into any MAASObjects in the -// object structure it creates. -func (suite *JSONObjectSuite) TestMaasifyPassesClientAllTheWay(c *C) { - client := Client{} - obj := map[string]interface{}{"resource_uri": "/foo"} - mp := map[string]interface{}{"key": obj} - list := []interface{}{mp} - jsonobj, err := maasify(client, list).GetArray() - c.Assert(err, IsNil) - outerMap, err := jsonobj[0].GetMap() - c.Assert(err, IsNil) - out, err := outerMap["key"].GetMAASObject() - c.Assert(err, IsNil) - c.Check(out.client, Equals, client) -} - -// maasify() converts Booleans. -func (suite *JSONObjectSuite) TestMaasifyConvertsBool(c *C) { - t, err := maasify(Client{}, true).GetBool() - c.Assert(err, IsNil) - f, err := maasify(Client{}, false).GetBool() - c.Assert(err, IsNil) - c.Check(t, Equals, true) - c.Check(f, Equals, false) -} - -// Parse takes you from a JSON blob to a JSONObject. -func (suite *JSONObjectSuite) TestParseMaasifiesJSONBlob(c *C) { - blob := []byte("[12]") - obj, err := Parse(Client{}, blob) - c.Assert(err, IsNil) - - arr, err := obj.GetArray() - c.Assert(err, IsNil) - out, err := arr[0].GetFloat64() - c.Assert(err, IsNil) - c.Check(out, Equals, 12.0) -} - -func (suite *JSONObjectSuite) TestParseKeepsBinaryOriginal(c *C) { - blob := []byte(`"Hi"`) - - obj, err := Parse(Client{}, blob) - c.Assert(err, IsNil) - - text, err := obj.GetString() - c.Assert(err, IsNil) - c.Check(text, Equals, "Hi") - binary, err := obj.GetBytes() - c.Assert(err, IsNil) - c.Check(binary, DeepEquals, blob) -} - -func (suite *JSONObjectSuite) TestParseTreatsInvalidJSONAsBinary(c *C) { - blob := []byte("?x]}y![{z") - - obj, err := Parse(Client{}, blob) - c.Assert(err, IsNil) - - c.Check(obj.IsNil(), Equals, false) - c.Check(obj.value, IsNil) - binary, err := obj.GetBytes() - c.Assert(err, IsNil) - c.Check(binary, DeepEquals, blob) -} - -func (suite *JSONObjectSuite) TestParseTreatsInvalidUTF8AsBinary(c *C) { - // Arbitrary data that is definitely not UTF-8. - blob := []byte{220, 8, 129} - - obj, err := Parse(Client{}, blob) - c.Assert(err, IsNil) - - c.Check(obj.IsNil(), Equals, false) - c.Check(obj.value, IsNil) - binary, err := obj.GetBytes() - c.Assert(err, IsNil) - c.Check(binary, DeepEquals, blob) -} - -func (suite *JSONObjectSuite) TestParseTreatsEmptyJSONAsBinary(c *C) { - blob := []byte{} - - obj, err := Parse(Client{}, blob) - c.Assert(err, IsNil) - - c.Check(obj.IsNil(), Equals, false) - data, err := obj.GetBytes() - c.Assert(err, IsNil) - c.Check(data, DeepEquals, blob) -} - -func (suite *JSONObjectSuite) TestParsePanicsOnNilJSON(c *C) { - defer func() { - failure := recover() - c.Assert(failure, NotNil) - c.Check(failure.(error).Error(), Matches, ".*nil input") - }() - Parse(Client{}, nil) -} - -func (suite *JSONObjectSuite) TestParseNullProducesIsNil(c *C) { - blob := []byte("null") - obj, err := Parse(Client{}, blob) - c.Assert(err, IsNil) - c.Check(obj.IsNil(), Equals, true) -} - -func (suite *JSONObjectSuite) TestParseNonNullProducesNonIsNil(c *C) { - blob := []byte("1") - obj, err := Parse(Client{}, blob) - c.Assert(err, IsNil) - c.Check(obj.IsNil(), Equals, false) -} - -func (suite *JSONObjectSuite) TestParseSpacedNullProducesIsNil(c *C) { - blob := []byte(" null ") - obj, err := Parse(Client{}, blob) - c.Assert(err, IsNil) - c.Check(obj.IsNil(), Equals, true) -} - -// String-type JSONObjects convert only to string. -func (suite *JSONObjectSuite) TestConversionsString(c *C) { - obj := maasify(Client{}, "Test string") - - value, err := obj.GetString() - c.Check(err, IsNil) - c.Check(value, Equals, "Test string") - - _, err = obj.GetFloat64() - c.Check(err, NotNil) - _, err = obj.GetMap() - c.Check(err, NotNil) - _, err = obj.GetMAASObject() - c.Check(err, NotNil) - _, err = obj.GetArray() - c.Check(err, NotNil) - _, err = obj.GetBool() - c.Check(err, NotNil) -} - -// Number-type JSONObjects convert only to float64. -func (suite *JSONObjectSuite) TestConversionsFloat64(c *C) { - obj := maasify(Client{}, 1.1) - - value, err := obj.GetFloat64() - c.Check(err, IsNil) - c.Check(value, Equals, 1.1) - - _, err = obj.GetString() - c.Check(err, NotNil) - _, err = obj.GetMap() - c.Check(err, NotNil) - _, err = obj.GetMAASObject() - c.Check(err, NotNil) - _, err = obj.GetArray() - c.Check(err, NotNil) - _, err = obj.GetBool() - c.Check(err, NotNil) -} - -// Map-type JSONObjects convert only to map. -func (suite *JSONObjectSuite) TestConversionsMap(c *C) { - obj := maasify(Client{}, map[string]interface{}{"x": "y"}) - - value, err := obj.GetMap() - c.Check(err, IsNil) - text, err := value["x"].GetString() - c.Check(err, IsNil) - c.Check(text, Equals, "y") - - _, err = obj.GetString() - c.Check(err, NotNil) - _, err = obj.GetFloat64() - c.Check(err, NotNil) - _, err = obj.GetMAASObject() - c.Check(err, NotNil) - _, err = obj.GetArray() - c.Check(err, NotNil) - _, err = obj.GetBool() - c.Check(err, NotNil) -} - -// Array-type JSONObjects convert only to array. -func (suite *JSONObjectSuite) TestConversionsArray(c *C) { - obj := maasify(Client{}, []interface{}{"item"}) - - value, err := obj.GetArray() - c.Check(err, IsNil) - text, err := value[0].GetString() - c.Check(err, IsNil) - c.Check(text, Equals, "item") - - _, err = obj.GetString() - c.Check(err, NotNil) - _, err = obj.GetFloat64() - c.Check(err, NotNil) - _, err = obj.GetMap() - c.Check(err, NotNil) - _, err = obj.GetMAASObject() - c.Check(err, NotNil) - _, err = obj.GetBool() - c.Check(err, NotNil) -} - -// Boolean-type JSONObjects convert only to bool. -func (suite *JSONObjectSuite) TestConversionsBool(c *C) { - obj := maasify(Client{}, false) - - value, err := obj.GetBool() - c.Check(err, IsNil) - c.Check(value, Equals, false) - - _, err = obj.GetString() - c.Check(err, NotNil) - _, err = obj.GetFloat64() - c.Check(err, NotNil) - _, err = obj.GetMap() - c.Check(err, NotNil) - _, err = obj.GetMAASObject() - c.Check(err, NotNil) - _, err = obj.GetArray() - c.Check(err, NotNil) -} - -func (suite *JSONObjectSuite) TestNilSerializesToJSON(c *C) { - output, err := json.Marshal(maasify(Client{}, nil)) - c.Assert(err, IsNil) - c.Check(output, DeepEquals, []byte("null")) -} - -func (suite *JSONObjectSuite) TestEmptyStringSerializesToJSON(c *C) { - output, err := json.Marshal(maasify(Client{}, "")) - c.Assert(err, IsNil) - c.Check(string(output), Equals, `""`) -} - -func (suite *JSONObjectSuite) TestStringSerializesToJSON(c *C) { - text := "Text wrapped in JSON" - output, err := json.Marshal(maasify(Client{}, text)) - c.Assert(err, IsNil) - c.Check(output, DeepEquals, []byte(fmt.Sprintf(`"%s"`, text))) -} - -func (suite *JSONObjectSuite) TestStringIsEscapedInJSON(c *C) { - text := `\"Quote,\" \\backslash, and \'apostrophe\'.` - output, err := json.Marshal(maasify(Client{}, text)) - c.Assert(err, IsNil) - var deserialized string - err = json.Unmarshal(output, &deserialized) - c.Assert(err, IsNil) - c.Check(deserialized, Equals, text) -} - -func (suite *JSONObjectSuite) TestFloat64SerializesToJSON(c *C) { - number := 3.1415926535 - output, err := json.Marshal(maasify(Client{}, number)) - c.Assert(err, IsNil) - var deserialized float64 - err = json.Unmarshal(output, &deserialized) - c.Assert(err, IsNil) - c.Check(deserialized, Equals, number) -} - -func (suite *JSONObjectSuite) TestEmptyMapSerializesToJSON(c *C) { - mp := map[string]interface{}{} - output, err := json.Marshal(maasify(Client{}, mp)) - c.Assert(err, IsNil) - var deserialized interface{} - err = json.Unmarshal(output, &deserialized) - c.Assert(err, IsNil) - c.Check(deserialized.(map[string]interface{}), DeepEquals, mp) -} - -func (suite *JSONObjectSuite) TestMapSerializesToJSON(c *C) { - // Sample data: counting in Japanese. - mp := map[string]interface{}{"one": "ichi", "two": "nii", "three": "san"} - output, err := json.Marshal(maasify(Client{}, mp)) - c.Assert(err, IsNil) - var deserialized interface{} - err = json.Unmarshal(output, &deserialized) - c.Assert(err, IsNil) - c.Check(deserialized.(map[string]interface{}), DeepEquals, mp) -} - -func (suite *JSONObjectSuite) TestEmptyArraySerializesToJSON(c *C) { - arr := []interface{}{} - output, err := json.Marshal(maasify(Client{}, arr)) - c.Assert(err, IsNil) - var deserialized interface{} - err = json.Unmarshal(output, &deserialized) - c.Assert(err, IsNil) - // The deserialized value is a slice, and it contains no elements. - // Can't do a regular comparison here because at least in the current - // json implementation, an empty list deserializes as a nil slice, - // not as an empty slice! - // (It doesn't work that way for maps though, for some reason). - c.Check(len(deserialized.([]interface{})), Equals, len(arr)) -} - -func (suite *JSONObjectSuite) TestArrayOfStringsSerializesToJSON(c *C) { - value := "item" - output, err := json.Marshal(maasify(Client{}, []interface{}{value})) - c.Assert(err, IsNil) - var deserialized []string - err = json.Unmarshal(output, &deserialized) - c.Assert(err, IsNil) - c.Check(deserialized, DeepEquals, []string{value}) -} - -func (suite *JSONObjectSuite) TestArrayOfNumbersSerializesToJSON(c *C) { - value := 9.0 - output, err := json.Marshal(maasify(Client{}, []interface{}{value})) - c.Assert(err, IsNil) - var deserialized []float64 - err = json.Unmarshal(output, &deserialized) - c.Assert(err, IsNil) - c.Check(deserialized, DeepEquals, []float64{value}) -} - -func (suite *JSONObjectSuite) TestArrayPreservesOrderInJSON(c *C) { - // Sample data: counting in Korean. - arr := []interface{}{"jong", "il", "ee", "sam"} - output, err := json.Marshal(maasify(Client{}, arr)) - c.Assert(err, IsNil) - - var deserialized []interface{} - err = json.Unmarshal(output, &deserialized) - c.Assert(err, IsNil) - c.Check(deserialized, DeepEquals, arr) -} - -func (suite *JSONObjectSuite) TestBoolSerializesToJSON(c *C) { - f, err := json.Marshal(maasify(Client{}, false)) - c.Assert(err, IsNil) - t, err := json.Marshal(maasify(Client{}, true)) - c.Assert(err, IsNil) - - c.Check(f, DeepEquals, []byte("false")) - c.Check(t, DeepEquals, []byte("true")) -} === removed file 'src/launchpad.net/gomaasapi/maas.go' --- src/launchpad.net/gomaasapi/maas.go 2013-04-24 22:34:47 +0000 +++ src/launchpad.net/gomaasapi/maas.go 1970-01-01 00:00:00 +0000 @@ -1,11 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -// NewMAAS returns an interface to the MAAS API as a *MAASObject. -func NewMAAS(client Client) *MAASObject { - attrs := map[string]interface{}{resourceURI: client.APIURL.String()} - obj := newJSONMAASObject(attrs, client) - return &obj -} === removed file 'src/launchpad.net/gomaasapi/maas_test.go' --- src/launchpad.net/gomaasapi/maas_test.go 2013-04-24 22:34:47 +0000 +++ src/launchpad.net/gomaasapi/maas_test.go 1970-01-01 00:00:00 +0000 @@ -1,22 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -import ( - . "launchpad.net/gocheck" - "net/url" -) - -type MAASSuite struct{} - -var _ = Suite(&MAASSuite{}) - -func (suite *MAASSuite) TestNewMAASUsesBaseURLFromClient(c *C) { - baseURLString := "https://server.com:888/" - baseURL, _ := url.Parse(baseURLString) - client := Client{APIURL: baseURL} - maas := NewMAAS(client) - URL := maas.URL() - c.Check(URL, DeepEquals, baseURL) -} === removed file 'src/launchpad.net/gomaasapi/maasobject.go' --- src/launchpad.net/gomaasapi/maasobject.go 2013-04-24 22:34:47 +0000 +++ src/launchpad.net/gomaasapi/maasobject.go 1970-01-01 00:00:00 +0000 @@ -1,197 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -import ( - "encoding/json" - "errors" - "fmt" - "net/url" -) - -// MAASObject represents a MAAS object as returned by the MAAS API, such as a -// Node or a Tag. -// You can extract a MAASObject out of a JSONObject using -// JSONObject.GetMAASObject. A MAAS API call will usually return either a -// MAASObject or a list of MAASObjects. The list itself would be wrapped in -// a JSONObject, so if an API call returns a list of objects "l," you first -// obtain the array using l.GetArray(). Then, for each item "i" in the array, -// obtain the matching MAASObject using i.GetMAASObject(). -type MAASObject struct { - values map[string]JSONObject - client Client - uri *url.URL -} - -// newJSONMAASObject creates a new MAAS object. It will panic if the given map -// does not contain a valid URL for the 'resource_uri' key. -func newJSONMAASObject(jmap map[string]interface{}, client Client) MAASObject { - obj, err := maasify(client, jmap).GetMAASObject() - if err != nil { - panic(err) - } - return obj -} - -// MarshalJSON tells the standard json package how to serialize a MAASObject. -func (obj MAASObject) MarshalJSON() ([]byte, error) { - return json.Marshal(obj.GetMap()) -} - -// With MarshalJSON, MAASObject implements json.Marshaler. -var _ json.Marshaler = (*MAASObject)(nil) - -func marshalNode(node MAASObject) string { - res, _ := json.Marshal(node) - return string(res) - -} - -var noResourceURI = errors.New("not a MAAS object: no 'resource_uri' key") - -// extractURI obtains the "resource_uri" string from a JSONObject map. -func extractURI(attrs map[string]JSONObject) (*url.URL, error) { - uriEntry, ok := attrs[resourceURI] - if !ok { - return nil, noResourceURI - } - uri, err := uriEntry.GetString() - if err != nil { - return nil, fmt.Errorf("invalid resource_uri: %v", uri) - } - resourceURL, err := url.Parse(uri) - if err != nil { - return nil, fmt.Errorf("resource_uri does not contain a valid URL: %v", uri) - } - return resourceURL, nil -} - -// JSONObject getter for a MAAS object. From a decoding perspective, a -// MAASObject is just like a map except it contains a key "resource_uri", and -// it keeps track of the Client you got it from so that you can invoke API -// methods directly on their MAAS objects. -func (obj JSONObject) GetMAASObject() (MAASObject, error) { - attrs, err := obj.GetMap() - if err != nil { - return MAASObject{}, err - } - uri, err := extractURI(attrs) - if err != nil { - return MAASObject{}, err - } - return MAASObject{values: attrs, client: obj.client, uri: uri}, nil -} - -// GetField extracts a string field from this MAAS object. -func (obj MAASObject) GetField(name string) (string, error) { - return obj.values[name].GetString() -} - -// URI is the resource URI for this MAAS object. It is an absolute path, but -// without a network part. -func (obj MAASObject) URI() *url.URL { - // Duplicate the URL. - uri, err := url.Parse(obj.uri.String()) - if err != nil { - panic(err) - } - return uri -} - -// URL returns a full absolute URL (including network part) for this MAAS -// object on the API. -func (obj MAASObject) URL() *url.URL { - return obj.client.GetURL(obj.URI()) -} - -// GetMap returns all of the object's attributes in the form of a map. -func (obj MAASObject) GetMap() map[string]JSONObject { - return obj.values -} - -// GetSubObject returns a new MAASObject representing the API resource found -// at a given sub-path of the current object's resource URI. -func (obj MAASObject) GetSubObject(name string) MAASObject { - uri := obj.URI() - newURL := url.URL{Path: name} - resUrl := uri.ResolveReference(&newURL) - resUrl.Path = EnsureTrailingSlash(resUrl.Path) - input := map[string]interface{}{resourceURI: resUrl.String()} - return newJSONMAASObject(input, obj.client) -} - -var NotImplemented = errors.New("Not implemented") - -// Get retrieves a fresh copy of this MAAS object from the API. -func (obj MAASObject) Get() (MAASObject, error) { - uri := obj.URI() - result, err := obj.client.Get(uri, "", url.Values{}) - if err != nil { - return MAASObject{}, err - } - jsonObj, err := Parse(obj.client, result) - if err != nil { - return MAASObject{}, err - } - return jsonObj.GetMAASObject() -} - -// Post overwrites this object's existing value on the API with those given -// in "params." It returns the object's new value as received from the API. -func (obj MAASObject) Post(params url.Values) (JSONObject, error) { - uri := obj.URI() - result, err := obj.client.Post(uri, "", params, nil) - if err != nil { - return JSONObject{}, err - } - return Parse(obj.client, result) -} - -// Update modifies this object on the API, based on the values given in -// "params." It returns the object's new value as received from the API. -func (obj MAASObject) Update(params url.Values) (MAASObject, error) { - uri := obj.URI() - result, err := obj.client.Put(uri, params) - if err != nil { - return MAASObject{}, err - } - jsonObj, err := Parse(obj.client, result) - if err != nil { - return MAASObject{}, err - } - return jsonObj.GetMAASObject() -} - -// Delete removes this object on the API. -func (obj MAASObject) Delete() error { - uri := obj.URI() - return obj.client.Delete(uri) -} - -// CallGet invokes an idempotent API method on this object. -func (obj MAASObject) CallGet(operation string, params url.Values) (JSONObject, error) { - uri := obj.URI() - result, err := obj.client.Get(uri, operation, params) - if err != nil { - return JSONObject{}, err - } - return Parse(obj.client, result) -} - -// CallPost invokes a non-idempotent API method on this object. -func (obj MAASObject) CallPost(operation string, params url.Values) (JSONObject, error) { - return obj.CallPostFiles(operation, params, nil) -} - -// CallPostFiles invokes a non-idempotent API method on this object. It is -// similar to CallPost but has an extra parameter, 'files', which should -// contain the files that will be uploaded to the API. -func (obj MAASObject) CallPostFiles(operation string, params url.Values, files map[string][]byte) (JSONObject, error) { - uri := obj.URI() - result, err := obj.client.Post(uri, operation, params, files) - if err != nil { - return JSONObject{}, err - } - return Parse(obj.client, result) -} === removed file 'src/launchpad.net/gomaasapi/maasobject_test.go' --- src/launchpad.net/gomaasapi/maasobject_test.go 2014-01-17 09:58:40 +0000 +++ src/launchpad.net/gomaasapi/maasobject_test.go 1970-01-01 00:00:00 +0000 @@ -1,205 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -import ( - "encoding/json" - "fmt" - . "launchpad.net/gocheck" - "math/rand" - "net/url" -) - -type MAASObjectSuite struct{} - -var _ = Suite(&MAASObjectSuite{}) - -func makeFakeResourceURI() string { - return "http://example.com/" + fmt.Sprint(rand.Int31()) -} - -// JSONObjects containing MAAS objects convert only to map or to MAASObject. -func (suite *MAASObjectSuite) TestConversionsMAASObject(c *C) { - input := map[string]interface{}{resourceURI: "someplace"} - obj := maasify(Client{}, input) - - mp, err := obj.GetMap() - c.Check(err, IsNil) - text, err := mp[resourceURI].GetString() - c.Check(err, IsNil) - c.Check(text, Equals, "someplace") - - var maasobj MAASObject - maasobj, err = obj.GetMAASObject() - c.Assert(err, IsNil) - c.Check(maasobj, NotNil) - - _, err = obj.GetString() - c.Check(err, NotNil) - _, err = obj.GetFloat64() - c.Check(err, NotNil) - _, err = obj.GetArray() - c.Check(err, NotNil) - _, err = obj.GetBool() - c.Check(err, NotNil) -} - -func (suite *MAASObjectSuite) TestNewJSONMAASObjectPanicsIfNoResourceURI(c *C) { - defer func() { - recoveredError := recover() - c.Check(recoveredError, NotNil) - msg := recoveredError.(error).Error() - c.Check(msg, Matches, ".*no 'resource_uri' key.*") - }() - - input := map[string]interface{}{"test": "test"} - newJSONMAASObject(input, Client{}) -} - -func (suite *MAASObjectSuite) TestNewJSONMAASObjectPanicsIfResourceURINotString(c *C) { - defer func() { - recoveredError := recover() - c.Check(recoveredError, NotNil) - msg := recoveredError.(error).Error() - c.Check(msg, Matches, ".*invalid resource_uri.*") - }() - - input := map[string]interface{}{resourceURI: 77.77} - newJSONMAASObject(input, Client{}) -} - -func (suite *MAASObjectSuite) TestNewJSONMAASObjectPanicsIfResourceURINotURL(c *C) { - defer func() { - recoveredError := recover() - c.Check(recoveredError, NotNil) - msg := recoveredError.(error).Error() - c.Check(msg, Matches, ".*resource_uri.*valid URL.*") - }() - - input := map[string]interface{}{resourceURI: "%z"} - newJSONMAASObject(input, Client{}) -} - -func (suite *MAASObjectSuite) TestNewJSONMAASObjectSetsUpURI(c *C) { - URI, err := url.Parse("http://example.com/a/resource") - c.Assert(err, IsNil) - attrs := map[string]interface{}{resourceURI: URI.String()} - obj := newJSONMAASObject(attrs, Client{}) - c.Check(obj.uri, DeepEquals, URI) -} - -func (suite *MAASObjectSuite) TestURL(c *C) { - baseURL, err := url.Parse("http://example.com/") - c.Assert(err, IsNil) - uri := "http://example.com/a/resource" - resourceURL, err := url.Parse(uri) - c.Assert(err, IsNil) - input := map[string]interface{}{resourceURI: uri} - client := Client{APIURL: baseURL} - obj := newJSONMAASObject(input, client) - - URL := obj.URL() - - c.Check(URL, DeepEquals, resourceURL) -} - -// makeFakeMAASObject creates a MAASObject for some imaginary resource. -// There is no actual HTTP service or resource attached. -// serviceURL is the base URL of the service, and resourceURI is the path for -// the object, relative to serviceURL. -func makeFakeMAASObject(serviceURL, resourcePath string) MAASObject { - baseURL, err := url.Parse(serviceURL) - if err != nil { - panic(fmt.Errorf("creation of fake object failed: %v", err)) - } - uri := serviceURL + resourcePath - input := map[string]interface{}{resourceURI: uri} - client := Client{APIURL: baseURL} - return newJSONMAASObject(input, client) -} - -// Passing GetSubObject a relative path effectively concatenates that path to -// the original object's resource URI. -func (suite *MAASObjectSuite) TestGetSubObjectRelative(c *C) { - obj := makeFakeMAASObject("http://example.com/", "a/resource/") - - subObj := obj.GetSubObject("test") - subURL := subObj.URL() - - // uri ends with a slash and subName starts with one, but the two paths - // should be concatenated as "http://example.com/a/resource/test/". - expectedSubURL, err := url.Parse("http://example.com/a/resource/test/") - c.Assert(err, IsNil) - c.Check(subURL, DeepEquals, expectedSubURL) -} - -// Passing GetSubObject an absolute path effectively substitutes that path for -// the path component in the original object's resource URI. -func (suite *MAASObjectSuite) TestGetSubObjectAbsolute(c *C) { - obj := makeFakeMAASObject("http://example.com/", "a/resource/") - - subObj := obj.GetSubObject("/b/test") - subURL := subObj.URL() - - expectedSubURL, err := url.Parse("http://example.com/b/test/") - c.Assert(err, IsNil) - c.Check(subURL, DeepEquals, expectedSubURL) -} - -// An absolute path passed to GetSubObject is rooted at the server root, not -// at the service root. So every absolute resource URI must repeat the part -// of the path that leads to the service root. This does not double that part -// of the URI. -func (suite *MAASObjectSuite) TestGetSubObjectAbsoluteDoesNotDoubleServiceRoot(c *C) { - obj := makeFakeMAASObject("http://example.com/service", "a/resource/") - - subObj := obj.GetSubObject("/service/test") - subURL := subObj.URL() - - // The "/service" part is not repeated; it must be included. - expectedSubURL, err := url.Parse("http://example.com/service/test/") - c.Assert(err, IsNil) - c.Check(subURL, DeepEquals, expectedSubURL) -} - -// The argument to GetSubObject is a relative path, not a URL. So it won't -// take a query part. The special characters that mark a query are escaped -// so they are recognized as parts of the path. -func (suite *MAASObjectSuite) TestGetSubObjectTakesPathNotURL(c *C) { - obj := makeFakeMAASObject("http://example.com/", "x/") - - subObj := obj.GetSubObject("/y?z") - - c.Check(subObj.URL().String(), Equals, "http://example.com/y%3Fz/") -} - -func (suite *MAASObjectSuite) TestGetField(c *C) { - uri := "http://example.com/a/resource" - fieldName := "field name" - fieldValue := "a value" - input := map[string]interface{}{ - resourceURI: uri, fieldName: fieldValue, - } - obj := newJSONMAASObject(input, Client{}) - value, err := obj.GetField(fieldName) - c.Check(err, IsNil) - c.Check(value, Equals, fieldValue) -} - -func (suite *MAASObjectSuite) TestSerializesToJSON(c *C) { - attrs := map[string]interface{}{ - resourceURI: "http://maas.example.com/", - "counter": 5.0, - "active": true, - "macs": map[string]interface{}{"eth0": "AA:BB:CC:DD:EE:FF"}, - } - obj := maasify(Client{}, attrs) - output, err := json.Marshal(obj) - c.Assert(err, IsNil) - - var deserialized map[string]interface{} - err = json.Unmarshal(output, &deserialized) - c.Assert(err, IsNil) - c.Check(deserialized, DeepEquals, attrs) -} === removed file 'src/launchpad.net/gomaasapi/oauth.go' --- src/launchpad.net/gomaasapi/oauth.go 2014-09-11 18:13:12 +0000 +++ src/launchpad.net/gomaasapi/oauth.go 1970-01-01 00:00:00 +0000 @@ -1,80 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -import ( - "crypto/rand" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// Not a true uuidgen, but at least creates same length random -func generateNonce() (string, error) { - randBytes := make([]byte, 16) - _, err := rand.Read(randBytes) - if err != nil { - return "", err - } - return fmt.Sprintf("%16x", randBytes), nil -} - -func generateTimestamp() string { - return strconv.Itoa(int(time.Now().Unix())) -} - -type OAuthSigner interface { - OAuthSign(request *http.Request) error -} - -type OAuthToken struct { - ConsumerKey string - ConsumerSecret string - TokenKey string - TokenSecret string -} - -// Trick to ensure *plainTextOAuthSigner implements the OAuthSigner interface. -var _ OAuthSigner = (*plainTextOAuthSigner)(nil) - -type plainTextOAuthSigner struct { - token *OAuthToken - realm string -} - -func NewPlainTestOAuthSigner(token *OAuthToken, realm string) (OAuthSigner, error) { - return &plainTextOAuthSigner{token, realm}, nil -} - -// OAuthSignPLAINTEXT signs the provided request using the OAuth PLAINTEXT -// method: http://oauth.net/core/1.0/#anchor22. -func (signer plainTextOAuthSigner) OAuthSign(request *http.Request) error { - - signature := signer.token.ConsumerSecret + `&` + signer.token.TokenSecret - nonce, err := generateNonce() - if err != nil { - return err - } - authData := map[string]string{ - "realm": signer.realm, - "oauth_consumer_key": signer.token.ConsumerKey, - "oauth_token": signer.token.TokenKey, - "oauth_signature_method": "PLAINTEXT", - "oauth_signature": signature, - "oauth_timestamp": generateTimestamp(), - "oauth_nonce": nonce, - "oauth_version": "1.0", - } - // Build OAuth header. - var authHeader []string - for key, value := range authData { - authHeader = append(authHeader, fmt.Sprintf(`%s="%s"`, key, url.QueryEscape(value))) - } - strHeader := "OAuth " + strings.Join(authHeader, ", ") - request.Header.Add("Authorization", strHeader) - return nil -} === removed directory 'src/launchpad.net/gomaasapi/templates' === removed file 'src/launchpad.net/gomaasapi/templates/source.go' --- src/launchpad.net/gomaasapi/templates/source.go 2013-04-24 22:34:47 +0000 +++ src/launchpad.net/gomaasapi/templates/source.go 1970-01-01 00:00:00 +0000 @@ -1,4 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi === removed file 'src/launchpad.net/gomaasapi/templates/source_test.go' --- src/launchpad.net/gomaasapi/templates/source_test.go 2013-04-24 22:34:47 +0000 +++ src/launchpad.net/gomaasapi/templates/source_test.go 1970-01-01 00:00:00 +0000 @@ -1,17 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -import ( - . "launchpad.net/gocheck" -) - -type MyTestSuite struct{} - -var _ = Suite(&MyTestSuite{}) - -// TODO: Replace with real test functions. Give them real names. -func (suite *MyTestSuite) TestXXX(c *C) { - c.Check(2+2, Equals, 4) -} === removed file 'src/launchpad.net/gomaasapi/testing.go' --- src/launchpad.net/gomaasapi/testing.go 2015-03-26 15:54:39 +0000 +++ src/launchpad.net/gomaasapi/testing.go 1970-01-01 00:00:00 +0000 @@ -1,82 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -import ( - "fmt" - "net/http" - "net/http/httptest" -) - -type singleServingServer struct { - *httptest.Server - requestContent *string - requestHeader *http.Header -} - -// newSingleServingServer creates a single-serving test http server which will -// return only one response as defined by the passed arguments. -func newSingleServingServer(uri string, response string, code int) *singleServingServer { - var requestContent string - var requestHeader http.Header - var requested bool - handler := func(writer http.ResponseWriter, request *http.Request) { - if requested { - http.Error(writer, "Already requested", http.StatusServiceUnavailable) - } - res, err := readAndClose(request.Body) - if err != nil { - panic(err) - } - requestContent = string(res) - requestHeader = request.Header - if request.URL.String() != uri { - errorMsg := fmt.Sprintf("Error 404: page not found (expected '%v', got '%v').", uri, request.URL.String()) - http.Error(writer, errorMsg, http.StatusNotFound) - } else { - writer.WriteHeader(code) - fmt.Fprint(writer, response) - } - requested = true - } - server := httptest.NewServer(http.HandlerFunc(handler)) - return &singleServingServer{server, &requestContent, &requestHeader} -} - -type flakyServer struct { - *httptest.Server - nbRequests *int - requests *[][]byte -} - -// newFlakyServer creates a "flaky" test http server which will -// return `nbFlakyResponses` responses with the given code and then a 200 response. -func newFlakyServer(uri string, code int, nbFlakyResponses int) *flakyServer { - nbRequests := 0 - requests := make([][]byte, nbFlakyResponses+1) - handler := func(writer http.ResponseWriter, request *http.Request) { - nbRequests += 1 - body, err := readAndClose(request.Body) - if err != nil { - panic(err) - } - requests[nbRequests-1] = body - if request.URL.String() != uri { - errorMsg := fmt.Sprintf("Error 404: page not found (expected '%v', got '%v').", uri, request.URL.String()) - http.Error(writer, errorMsg, http.StatusNotFound) - } else if nbRequests <= nbFlakyResponses { - if code == http.StatusServiceUnavailable { - writer.Header().Set("Retry-After", "0") - } - writer.WriteHeader(code) - fmt.Fprint(writer, "flaky") - } else { - writer.WriteHeader(http.StatusOK) - fmt.Fprint(writer, "ok") - } - - } - server := httptest.NewServer(http.HandlerFunc(handler)) - return &flakyServer{server, &nbRequests, &requests} -} === removed file 'src/launchpad.net/gomaasapi/testservice.go' --- src/launchpad.net/gomaasapi/testservice.go 2015-10-23 18:28:45 +0000 +++ src/launchpad.net/gomaasapi/testservice.go 1970-01-01 00:00:00 +0000 @@ -1,1550 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -import ( - "bufio" - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" - "mime/multipart" - "net" - "net/http" - "net/http/httptest" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - "text/template" - "time" - - "gopkg.in/mgo.v2/bson" -) - -// TestMAASObject is a fake MAAS server MAASObject. -type TestMAASObject struct { - MAASObject - TestServer *TestServer -} - -// checkError is a shorthand helper that panics if err is not nil. -func checkError(err error) { - if err != nil { - panic(err) - } -} - -// NewTestMAAS returns a TestMAASObject that implements the MAASObject -// interface and thus can be used as a test object instead of the one returned -// by gomaasapi.NewMAAS(). -func NewTestMAAS(version string) *TestMAASObject { - server := NewTestServer(version) - authClient, err := NewAnonymousClient(server.URL, version) - checkError(err) - maas := NewMAAS(*authClient) - return &TestMAASObject{*maas, server} -} - -// Close shuts down the test server. -func (testMAASObject *TestMAASObject) Close() { - testMAASObject.TestServer.Close() -} - -// A TestServer is an HTTP server listening on a system-chosen port on the -// local loopback interface, which simulates the behavior of a MAAS server. -// It is intendend for use in end-to-end HTTP tests using the gomaasapi -// library. -type TestServer struct { - *httptest.Server - serveMux *http.ServeMux - client Client - nodes map[string]MAASObject - ownedNodes map[string]bool - // mapping system_id -> list of operations performed. - nodeOperations map[string][]string - // list of operations performed at the /nodes/ level. - nodesOperations []string - // mapping system_id -> list of Values passed when performing - // operations - nodeOperationRequestValues map[string][]url.Values - // list of Values passed when performing operations at the - // /nodes/ level. - nodesOperationRequestValues []url.Values - files map[string]MAASObject - networks map[string]MAASObject - networksPerNode map[string][]string - ipAddressesPerNetwork map[string][]string - version string - macAddressesPerNetwork map[string]map[string]JSONObject - nodeDetails map[string]string - zones map[string]JSONObject - // bootImages is a map of nodegroup UUIDs to boot-image objects. - bootImages map[string][]JSONObject - // nodegroupsInterfaces is a map of nodegroup UUIDs to interface - // objects. - nodegroupsInterfaces map[string][]JSONObject - - // versionJSON is the response to the /version/ endpoint listing the - // capabilities of the MAAS server. - versionJSON string - - // devices is a map of device UUIDs to devices. - devices map[string]*device -} - -type device struct { - IPAddresses []string - SystemId string - MACAddress string - Parent string - Hostname string - - // Not part of the device definition but used by the template. - APIVersion string -} - -func getNodesEndpoint(version string) string { - return fmt.Sprintf("/api/%s/nodes/", version) -} - -func getNodeURL(version, systemId string) string { - return fmt.Sprintf("/api/%s/nodes/%s/", version, systemId) -} - -func getNodeURLRE(version string) *regexp.Regexp { - reString := fmt.Sprintf("^/api/%s/nodes/([^/]*)/$", regexp.QuoteMeta(version)) - return regexp.MustCompile(reString) -} - -func getDevicesEndpoint(version string) string { - return fmt.Sprintf("/api/%s/devices/", version) -} - -func getDeviceURL(version, systemId string) string { - return fmt.Sprintf("/api/%s/devices/%s/", version, systemId) -} - -func getDeviceURLRE(version string) *regexp.Regexp { - reString := fmt.Sprintf("^/api/%s/devices/([^/]*)/$", regexp.QuoteMeta(version)) - return regexp.MustCompile(reString) -} - -func getFilesEndpoint(version string) string { - return fmt.Sprintf("/api/%s/files/", version) -} - -func getFileURL(version, filename string) string { - // Uses URL object so filename is correctly percent-escaped - url := url.URL{} - url.Path = fmt.Sprintf("/api/%s/files/%s/", version, filename) - return url.String() -} - -func getFileURLRE(version string) *regexp.Regexp { - reString := fmt.Sprintf("^/api/%s/files/(.*)/$", regexp.QuoteMeta(version)) - return regexp.MustCompile(reString) -} - -func getNetworksEndpoint(version string) string { - return fmt.Sprintf("/api/%s/networks/", version) -} - -func getNetworkURL(version, name string) string { - return fmt.Sprintf("/api/%s/networks/%s/", version, name) -} - -func getNetworkURLRE(version string) *regexp.Regexp { - reString := fmt.Sprintf("^/api/%s/networks/(.*)/$", regexp.QuoteMeta(version)) - return regexp.MustCompile(reString) -} - -func getIPAddressesEndpoint(version string) string { - return fmt.Sprintf("/api/%s/ipaddresses/", version) -} - -func getMACAddressURL(version, systemId, macAddress string) string { - return fmt.Sprintf("/api/%s/nodes/%s/macs/%s/", version, systemId, url.QueryEscape(macAddress)) -} - -func getVersionURL(version string) string { - return fmt.Sprintf("/api/%s/version/", version) -} - -func getNodegroupsEndpoint(version string) string { - return fmt.Sprintf("/api/%s/nodegroups/", version) -} - -func getNodegroupURL(version, uuid string) string { - return fmt.Sprintf("/api/%s/nodegroups/%s/", version, uuid) -} - -func getNodegroupsInterfacesURLRE(version string) *regexp.Regexp { - reString := fmt.Sprintf("^/api/%s/nodegroups/([^/]*)/interfaces/$", regexp.QuoteMeta(version)) - return regexp.MustCompile(reString) -} - -func getBootimagesURLRE(version string) *regexp.Regexp { - reString := fmt.Sprintf("^/api/%s/nodegroups/([^/]*)/boot-images/$", regexp.QuoteMeta(version)) - return regexp.MustCompile(reString) -} - -func getZonesEndpoint(version string) string { - return fmt.Sprintf("/api/%s/zones/", version) -} - -// Clear clears all the fake data stored and recorded by the test server -// (nodes, recorded operations, etc.). -func (server *TestServer) Clear() { - server.nodes = make(map[string]MAASObject) - server.ownedNodes = make(map[string]bool) - server.nodesOperations = make([]string, 0) - server.nodeOperations = make(map[string][]string) - server.nodesOperationRequestValues = make([]url.Values, 0) - server.nodeOperationRequestValues = make(map[string][]url.Values) - server.files = make(map[string]MAASObject) - server.networks = make(map[string]MAASObject) - server.networksPerNode = make(map[string][]string) - server.ipAddressesPerNetwork = make(map[string][]string) - server.macAddressesPerNetwork = make(map[string]map[string]JSONObject) - server.nodeDetails = make(map[string]string) - server.bootImages = make(map[string][]JSONObject) - server.nodegroupsInterfaces = make(map[string][]JSONObject) - server.zones = make(map[string]JSONObject) - server.versionJSON = `{"capabilities": ["networks-management","static-ipaddresses"]}` - server.devices = make(map[string]*device) -} - -// SetVersionJSON sets the JSON response (capabilities) returned from the -// /version/ endpoint. -func (server *TestServer) SetVersionJSON(json string) { - server.versionJSON = json -} - -// NodesOperations returns the list of operations performed at the /nodes/ -// level. -func (server *TestServer) NodesOperations() []string { - return server.nodesOperations -} - -// NodeOperations returns the map containing the list of the operations -// performed for each node. -func (server *TestServer) NodeOperations() map[string][]string { - return server.nodeOperations -} - -// NodesOperationRequestValues returns the list of url.Values extracted -// from the request used when performing operations at the /nodes/ level. -func (server *TestServer) NodesOperationRequestValues() []url.Values { - return server.nodesOperationRequestValues -} - -// NodeOperationRequestValues returns the map containing the list of the -// url.Values extracted from the request used when performing operations -// on nodes. -func (server *TestServer) NodeOperationRequestValues() map[string][]url.Values { - return server.nodeOperationRequestValues -} - -func parseRequestValues(request *http.Request) url.Values { - var requestValues url.Values - if request.Header.Get("Content-Type") == "application/x-www-form-urlencoded" { - if request.PostForm == nil { - if err := request.ParseForm(); err != nil { - panic(err) - } - } - requestValues = request.PostForm - } - return requestValues -} - -func (server *TestServer) addNodesOperation(operation string, request *http.Request) url.Values { - requestValues := parseRequestValues(request) - server.nodesOperations = append(server.nodesOperations, operation) - server.nodesOperationRequestValues = append(server.nodesOperationRequestValues, requestValues) - return requestValues -} - -func (server *TestServer) addNodeOperation(systemId, operation string, request *http.Request) url.Values { - operations, present := server.nodeOperations[systemId] - operationRequestValues, present2 := server.nodeOperationRequestValues[systemId] - if present != present2 { - panic("inconsistent state: nodeOperations and nodeOperationRequestValues don't have the same keys.") - } - requestValues := parseRequestValues(request) - if !present { - operations = []string{operation} - operationRequestValues = []url.Values{requestValues} - } else { - operations = append(operations, operation) - operationRequestValues = append(operationRequestValues, requestValues) - } - server.nodeOperations[systemId] = operations - server.nodeOperationRequestValues[systemId] = operationRequestValues - return requestValues -} - -// NewNode creates a MAAS node. The provided string should be a valid json -// string representing a map and contain a string value for the key -// 'system_id'. e.g. `{"system_id": "mysystemid"}`. -// If one of these conditions is not met, NewNode panics. -func (server *TestServer) NewNode(jsonText string) MAASObject { - var attrs map[string]interface{} - err := json.Unmarshal([]byte(jsonText), &attrs) - checkError(err) - systemIdEntry, hasSystemId := attrs["system_id"] - if !hasSystemId { - panic("The given map json string does not contain a 'system_id' value.") - } - systemId := systemIdEntry.(string) - attrs[resourceURI] = getNodeURL(server.version, systemId) - if _, hasStatus := attrs["status"]; !hasStatus { - attrs["status"] = NodeStatusDeployed - } - obj := newJSONMAASObject(attrs, server.client) - server.nodes[systemId] = obj - return obj -} - -// Nodes returns a map associating all the nodes' system ids with the nodes' -// objects. -func (server *TestServer) Nodes() map[string]MAASObject { - return server.nodes -} - -// OwnedNodes returns a map whose keys represent the nodes that are currently -// allocated. -func (server *TestServer) OwnedNodes() map[string]bool { - return server.ownedNodes -} - -// NewFile creates a file in the test MAAS server. -func (server *TestServer) NewFile(filename string, filecontent []byte) MAASObject { - attrs := make(map[string]interface{}) - attrs[resourceURI] = getFileURL(server.version, filename) - base64Content := base64.StdEncoding.EncodeToString(filecontent) - attrs["content"] = base64Content - attrs["filename"] = filename - - // Allocate an arbitrary URL here. It would be nice if the caller - // could do this, but that would change the API and require many - // changes. - escapedName := url.QueryEscape(filename) - attrs["anon_resource_uri"] = "/maas/1.0/files/?op=get_by_key&key=" + escapedName + "_key" - - obj := newJSONMAASObject(attrs, server.client) - server.files[filename] = obj - return obj -} - -func (server *TestServer) Files() map[string]MAASObject { - return server.files -} - -// ChangeNode updates a node with the given key/value. -func (server *TestServer) ChangeNode(systemId, key, value string) { - node, found := server.nodes[systemId] - if !found { - panic("No node with such 'system_id'.") - } - node.GetMap()[key] = maasify(server.client, value) -} - -// NewIPAddress creates a new static IP address reservation for the -// given network and ipAddress. -func (server *TestServer) NewIPAddress(ipAddress, network string) { - if _, found := server.networks[network]; !found { - panic("No such network: " + network) - } - ips, found := server.ipAddressesPerNetwork[network] - if found { - ips = append(ips, ipAddress) - } else { - ips = []string{ipAddress} - } - server.ipAddressesPerNetwork[network] = ips -} - -// RemoveIPAddress removes the given existing ipAddress and returns -// whether it was actually removed. -func (server *TestServer) RemoveIPAddress(ipAddress string) bool { - for network, ips := range server.ipAddressesPerNetwork { - for i, ip := range ips { - if ip == ipAddress { - ips = append(ips[:i], ips[i+1:]...) - server.ipAddressesPerNetwork[network] = ips - return true - } - } - } - return false -} - -// IPAddresses returns the map with network names as keys and slices -// of IP addresses belonging to each network as values. -func (server *TestServer) IPAddresses() map[string][]string { - return server.ipAddressesPerNetwork -} - -// NewNetwork creates a network in the test MAAS server -func (server *TestServer) NewNetwork(jsonText string) MAASObject { - var attrs map[string]interface{} - err := json.Unmarshal([]byte(jsonText), &attrs) - checkError(err) - nameEntry, hasName := attrs["name"] - _, hasIP := attrs["ip"] - _, hasNetmask := attrs["netmask"] - if !hasName || !hasIP || !hasNetmask { - panic("The given map json string does not contain a 'name', 'ip', or 'netmask' value.") - } - // TODO(gz): Sanity checking done on other fields - name := nameEntry.(string) - attrs[resourceURI] = getNetworkURL(server.version, name) - obj := newJSONMAASObject(attrs, server.client) - server.networks[name] = obj - return obj -} - -// NewNodegroupInterface adds a nodegroup-interface, for the specified -// nodegroup, in the test MAAS server. -func (server *TestServer) NewNodegroupInterface(uuid, jsonText string) JSONObject { - _, ok := server.bootImages[uuid] - if !ok { - panic("no nodegroup with the given UUID") - } - var attrs map[string]interface{} - err := json.Unmarshal([]byte(jsonText), &attrs) - checkError(err) - requiredMembers := []string{"ip_range_high", "ip_range_low", "broadcast_ip", "static_ip_range_low", "static_ip_range_high", "name", "ip", "subnet_mask", "management", "interface"} - for _, member := range requiredMembers { - _, hasMember := attrs[member] - if !hasMember { - panic(fmt.Sprintf("The given map json string does not contain a required %q", member)) - } - } - obj := maasify(server.client, attrs) - server.nodegroupsInterfaces[uuid] = append(server.nodegroupsInterfaces[uuid], obj) - return obj -} - -func (server *TestServer) ConnectNodeToNetwork(systemId, name string) { - _, hasNode := server.nodes[systemId] - if !hasNode { - panic("no node with the given system id") - } - _, hasNetwork := server.networks[name] - if !hasNetwork { - panic("no network with the given name") - } - networkNames, _ := server.networksPerNode[systemId] - server.networksPerNode[systemId] = append(networkNames, name) -} - -func (server *TestServer) ConnectNodeToNetworkWithMACAddress(systemId, networkName, macAddress string) { - node, hasNode := server.nodes[systemId] - if !hasNode { - panic("no node with the given system id") - } - if _, hasNetwork := server.networks[networkName]; !hasNetwork { - panic("no network with the given name") - } - networkNames, _ := server.networksPerNode[systemId] - server.networksPerNode[systemId] = append(networkNames, networkName) - attrs := make(map[string]interface{}) - attrs[resourceURI] = getMACAddressURL(server.version, systemId, macAddress) - attrs["mac_address"] = macAddress - array := []JSONObject{} - if set, ok := node.GetMap()["macaddress_set"]; ok { - var err error - array, err = set.GetArray() - if err != nil { - panic(err) - } - } - array = append(array, maasify(server.client, attrs)) - node.GetMap()["macaddress_set"] = JSONObject{value: array, client: server.client} - if _, ok := server.macAddressesPerNetwork[networkName]; !ok { - server.macAddressesPerNetwork[networkName] = map[string]JSONObject{} - } - server.macAddressesPerNetwork[networkName][systemId] = maasify(server.client, attrs) -} - -// AddBootImage adds a boot-image object to the specified nodegroup. -func (server *TestServer) AddBootImage(nodegroupUUID string, jsonText string) { - var attrs map[string]interface{} - err := json.Unmarshal([]byte(jsonText), &attrs) - checkError(err) - if _, ok := attrs["architecture"]; !ok { - panic("The boot-image json string does not contain an 'architecture' value.") - } - if _, ok := attrs["release"]; !ok { - panic("The boot-image json string does not contain a 'release' value.") - } - obj := maasify(server.client, attrs) - server.bootImages[nodegroupUUID] = append(server.bootImages[nodegroupUUID], obj) -} - -// AddZone adds a physical zone to the server. -func (server *TestServer) AddZone(name, description string) { - attrs := map[string]interface{}{ - "name": name, - "description": description, - } - obj := maasify(server.client, attrs) - server.zones[name] = obj -} - -// NewTestServer starts and returns a new MAAS test server. The caller should call Close when finished, to shut it down. -func NewTestServer(version string) *TestServer { - server := &TestServer{version: version} - - serveMux := http.NewServeMux() - devicesURL := getDevicesEndpoint(server.version) - // Register handler for '/api//devices/*'. - serveMux.HandleFunc(devicesURL, func(w http.ResponseWriter, r *http.Request) { - devicesHandler(server, w, r) - }) - nodesURL := getNodesEndpoint(server.version) - // Register handler for '/api//nodes/*'. - serveMux.HandleFunc(nodesURL, func(w http.ResponseWriter, r *http.Request) { - nodesHandler(server, w, r) - }) - filesURL := getFilesEndpoint(server.version) - // Register handler for '/api//files/*'. - serveMux.HandleFunc(filesURL, func(w http.ResponseWriter, r *http.Request) { - filesHandler(server, w, r) - }) - networksURL := getNetworksEndpoint(server.version) - // Register handler for '/api//networks/'. - serveMux.HandleFunc(networksURL, func(w http.ResponseWriter, r *http.Request) { - networksHandler(server, w, r) - }) - ipAddressesURL := getIPAddressesEndpoint(server.version) - // Register handler for '/api//ipaddresses/'. - serveMux.HandleFunc(ipAddressesURL, func(w http.ResponseWriter, r *http.Request) { - ipAddressesHandler(server, w, r) - }) - versionURL := getVersionURL(server.version) - // Register handler for '/api//version/'. - serveMux.HandleFunc(versionURL, func(w http.ResponseWriter, r *http.Request) { - versionHandler(server, w, r) - }) - // Register handler for '/api//nodegroups/*'. - nodegroupsURL := getNodegroupsEndpoint(server.version) - serveMux.HandleFunc(nodegroupsURL, func(w http.ResponseWriter, r *http.Request) { - nodegroupsHandler(server, w, r) - }) - - // Register handler for '/api//zones/*'. - zonesURL := getZonesEndpoint(server.version) - serveMux.HandleFunc(zonesURL, func(w http.ResponseWriter, r *http.Request) { - zonesHandler(server, w, r) - }) - - newServer := httptest.NewServer(serveMux) - client, err := NewAnonymousClient(newServer.URL, "1.0") - checkError(err) - server.Server = newServer - server.serveMux = serveMux - server.client = *client - server.Clear() - return server -} - -// devicesHandler handles requests for '/api//devices/*'. -func devicesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - values, err := url.ParseQuery(r.URL.RawQuery) - checkError(err) - op := values.Get("op") - deviceURLRE := getDeviceURLRE(server.version) - deviceURLMatch := deviceURLRE.FindStringSubmatch(r.URL.Path) - devicesURL := getDevicesEndpoint(server.version) - switch { - case r.URL.Path == devicesURL: - devicesTopLevelHandler(server, w, r, op) - case deviceURLMatch != nil: - // Request for a single device. - deviceHandler(server, w, r, deviceURLMatch[1], op) - default: - // Default handler: not found. - http.NotFoundHandler().ServeHTTP(w, r) - } -} - -// devicesTopLevelHandler handles a request for /api//devices/ -// (with no device id following as part of the path). -func devicesTopLevelHandler(server *TestServer, w http.ResponseWriter, r *http.Request, op string) { - switch { - case r.Method == "GET" && op == "list": - // Device listing operation. - deviceListingHandler(server, w, r) - case r.Method == "POST" && op == "new": - newDeviceHandler(server, w, r) - default: - w.WriteHeader(http.StatusBadRequest) - } -} - -func macMatches(device *device, macs []string, hasMac bool) bool { - if !hasMac { - return true - } - return contains(macs, device.MACAddress) -} - -// deviceListingHandler handles requests for '/devices/'. -func deviceListingHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - values, err := url.ParseQuery(r.URL.RawQuery) - checkError(err) - // TODO(mfoord): support filtering by hostname and id - macs, hasMac := values["mac_address"] - var matchedDevices []string - for _, device := range server.devices { - if macMatches(device, macs, hasMac) { - matchedDevices = append(matchedDevices, renderDevice(device)) - } - } - json := fmt.Sprintf("[%v]", strings.Join(matchedDevices, ", ")) - - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, json) -} - -var templateFuncs = template.FuncMap{ - "quotedList": func(items []string) string { - var pieces []string - for _, item := range items { - pieces = append(pieces, fmt.Sprintf("%q", item)) - } - return strings.Join(pieces, ", ") - }, -} - -const ( - // The json template for generating new devices. - // TODO(mfoord): set resource_uri in MAC addresses - deviceTemplate = `{ - "macaddress_set": [ - { - "mac_address": "{{.MACAddress}}" - } - ], - "zone": { - "resource_uri": "/MAAS/api/{{.APIVersion}}/zones/default/", - "name": "default", - "description": "" - }, - "parent": "{{.Parent}}", - "ip_addresses": [{{.IPAddresses | quotedList }}], - "hostname": "{{.Hostname}}", - "tag_names": [], - "owner": "maas-admin", - "system_id": "{{.SystemId}}", - "resource_uri": "/MAAS/api/{{.APIVersion}}/devices/{{.SystemId}}/" -}` -) - -func renderDevice(device *device) string { - t := template.New("Device template") - t = t.Funcs(templateFuncs) - t, err := t.Parse(deviceTemplate) - checkError(err) - var buf bytes.Buffer - err = t.Execute(&buf, device) - checkError(err) - return buf.String() -} - -func getValue(values url.Values, value string) (string, bool) { - result, hasResult := values[value] - if !hasResult || len(result) != 1 || result[0] == "" { - return "", false - } - return result[0], true -} - -// newDeviceHandler creates, stores and returns new devices. -func newDeviceHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - err := r.ParseForm() - checkError(err) - values := r.PostForm - - // TODO(mfood): generate a "proper" uuid for the system Id. - uuid, err := generateNonce() - checkError(err) - systemId := fmt.Sprintf("node-%v", uuid) - // At least one MAC address must be specified. - // TODO(mfoord) we only support a single MAC in the test server. - mac, hasMac := getValue(values, "mac_addresses") - - // hostname and parent are optional. - // TODO(mfoord): we require both to be set in the test server. - hostname, hasHostname := getValue(values, "hostname") - parent, hasParent := getValue(values, "parent") - if !hasHostname || !hasMac || !hasParent { - w.WriteHeader(http.StatusBadRequest) - return - } - - device := &device{ - MACAddress: mac, - APIVersion: server.version, - Parent: parent, - Hostname: hostname, - SystemId: systemId, - } - - deviceJSON := renderDevice(device) - server.devices[systemId] = device - - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, deviceJSON) - return -} - -// deviceHandler handles requests for '/api//devices//'. -func deviceHandler(server *TestServer, w http.ResponseWriter, r *http.Request, systemId string, operation string) { - device, ok := server.devices[systemId] - if !ok { - http.NotFoundHandler().ServeHTTP(w, r) - return - } - if r.Method == "GET" { - deviceJSON := renderDevice(device) - if operation == "" { - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, deviceJSON) - return - } else { - w.WriteHeader(http.StatusBadRequest) - return - } - } - if r.Method == "POST" { - if operation == "claim_sticky_ip_address" { - err := r.ParseForm() - checkError(err) - values := r.PostForm - // TODO(mfoord): support optional mac_address parameter - // TODO(mfoord): requested_address should be optional - // and we should generate one if it isn't provided. - address, hasAddress := getValue(values, "requested_address") - if !hasAddress { - w.WriteHeader(http.StatusBadRequest) - return - } - checkError(err) - device.IPAddresses = append(device.IPAddresses, address) - deviceJSON := renderDevice(device) - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, deviceJSON) - return - } else { - w.WriteHeader(http.StatusBadRequest) - return - } - } else if r.Method == "DELETE" { - delete(server.devices, systemId) - w.WriteHeader(http.StatusNoContent) - return - - } - - // TODO(mfoord): support PUT method for updating device - http.NotFoundHandler().ServeHTTP(w, r) -} - -// nodesHandler handles requests for '/api//nodes/*'. -func nodesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - values, err := url.ParseQuery(r.URL.RawQuery) - checkError(err) - op := values.Get("op") - nodeURLRE := getNodeURLRE(server.version) - nodeURLMatch := nodeURLRE.FindStringSubmatch(r.URL.Path) - nodesURL := getNodesEndpoint(server.version) - switch { - case r.URL.Path == nodesURL: - nodesTopLevelHandler(server, w, r, op) - case nodeURLMatch != nil: - // Request for a single node. - nodeHandler(server, w, r, nodeURLMatch[1], op) - default: - // Default handler: not found. - http.NotFoundHandler().ServeHTTP(w, r) - } -} - -// nodeHandler handles requests for '/api//nodes//'. -func nodeHandler(server *TestServer, w http.ResponseWriter, r *http.Request, systemId string, operation string) { - node, ok := server.nodes[systemId] - if !ok { - http.NotFoundHandler().ServeHTTP(w, r) - return - } - if r.Method == "GET" { - if operation == "" { - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, marshalNode(node)) - return - } else if operation == "details" { - nodeDetailsHandler(server, w, r, systemId) - return - } else { - w.WriteHeader(http.StatusBadRequest) - return - } - } - if r.Method == "POST" { - // The only operations supported are "start", "stop" and "release". - if operation == "start" || operation == "stop" || operation == "release" { - // Record operation on node. - server.addNodeOperation(systemId, operation, r) - - if operation == "release" { - delete(server.OwnedNodes(), systemId) - } - - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, marshalNode(node)) - return - } else { - w.WriteHeader(http.StatusBadRequest) - return - } - } - if r.Method == "DELETE" { - delete(server.nodes, systemId) - w.WriteHeader(http.StatusOK) - return - } - http.NotFoundHandler().ServeHTTP(w, r) -} - -func contains(slice []string, val string) bool { - for _, item := range slice { - if item == val { - return true - } - } - return false -} - -// nodeListingHandler handles requests for '/nodes/'. -func nodeListingHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - values, err := url.ParseQuery(r.URL.RawQuery) - checkError(err) - ids, hasId := values["id"] - var convertedNodes = []map[string]JSONObject{} - for systemId, node := range server.nodes { - if !hasId || contains(ids, systemId) { - convertedNodes = append(convertedNodes, node.GetMap()) - } - } - res, err := json.Marshal(convertedNodes) - checkError(err) - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(res)) -} - -// nodeDeploymentStatusHandler handles requests for '/nodes/?op=deployment_status'. -func nodeDeploymentStatusHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - values, err := url.ParseQuery(r.URL.RawQuery) - checkError(err) - nodes, _ := values["nodes"] - var nodeStatus = make(map[string]interface{}) - for _, systemId := range nodes { - node := server.nodes[systemId] - field, err := node.GetField("status") - if err != nil { - continue - } - switch field { - case NodeStatusDeployed: - nodeStatus[systemId] = "Deployed" - case NodeStatusFailedDeployment: - nodeStatus[systemId] = "Failed deployment" - default: - nodeStatus[systemId] = "Not in Deployment" - } - } - obj := maasify(server.client, nodeStatus) - res, err := json.Marshal(obj) - checkError(err) - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(res)) -} - -// findFreeNode looks for a node that is currently available, and -// matches the specified filter. -func findFreeNode(server *TestServer, filter url.Values) *MAASObject { - for systemID, node := range server.Nodes() { - _, present := server.OwnedNodes()[systemID] - if !present { - var agentName, nodeName, zoneName, mem, cpuCores, arch string - for k := range filter { - switch k { - case "agent_name": - agentName = filter.Get(k) - case "name": - nodeName = filter.Get(k) - case "zone": - zoneName = filter.Get(k) - case "mem": - mem = filter.Get(k) - case "arch": - arch = filter.Get(k) - case "cpu-cores": - cpuCores = filter.Get(k) - } - } - if nodeName != "" && !matchField(node, "hostname", nodeName) { - continue - } - if zoneName != "" && !matchField(node, "zone", zoneName) { - continue - } - if mem != "" && !matchNumericField(node, "memory", mem) { - continue - } - if arch != "" && !matchArchitecture(node, "architecture", arch) { - continue - } - if cpuCores != "" && !matchNumericField(node, "cpu_count", cpuCores) { - continue - } - if agentName != "" { - agentNameObj := maasify(server.client, agentName) - node.GetMap()["agent_name"] = agentNameObj - } else { - delete(node.GetMap(), "agent_name") - } - return &node - } - } - return nil -} - -func matchArchitecture(node MAASObject, k, v string) bool { - field, err := node.GetField(k) - if err != nil { - return false - } - baseArch := strings.Split(field, "/") - return v == baseArch[0] -} - -func matchNumericField(node MAASObject, k, v string) bool { - field, ok := node.GetMap()[k] - if !ok { - return false - } - nodeVal, err := field.GetFloat64() - if err != nil { - return false - } - constraintVal, err := strconv.ParseFloat(v, 64) - if err != nil { - return false - } - return constraintVal <= nodeVal -} - -func matchField(node MAASObject, k, v string) bool { - field, err := node.GetField(k) - if err != nil { - return false - } - return field == v -} - -// nodesAcquireHandler simulates acquiring a node. -func nodesAcquireHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - requestValues := server.addNodesOperation("acquire", r) - node := findFreeNode(server, requestValues) - if node == nil { - w.WriteHeader(http.StatusConflict) - } else { - systemId, err := node.GetField("system_id") - checkError(err) - server.OwnedNodes()[systemId] = true - res, err := json.Marshal(node) - checkError(err) - // Record operation. - server.addNodeOperation(systemId, "acquire", r) - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(res)) - } -} - -// nodesReleaseHandler simulates releasing multiple nodes. -func nodesReleaseHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - server.addNodesOperation("release", r) - values := server.NodesOperationRequestValues() - systemIds := values[len(values)-1]["nodes"] - var unknown []string - for _, systemId := range systemIds { - if _, ok := server.Nodes()[systemId]; !ok { - unknown = append(unknown, systemId) - } - } - if len(unknown) > 0 { - w.WriteHeader(http.StatusBadRequest) - fmt.Fprintf(w, "Unknown node(s): %s.", strings.Join(unknown, ", ")) - return - } - var releasedNodes = []map[string]JSONObject{} - for _, systemId := range systemIds { - if _, ok := server.OwnedNodes()[systemId]; !ok { - continue - } - delete(server.OwnedNodes(), systemId) - node := server.Nodes()[systemId] - releasedNodes = append(releasedNodes, node.GetMap()) - } - res, err := json.Marshal(releasedNodes) - checkError(err) - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(res)) -} - -// nodesTopLevelHandler handles a request for /api//nodes/ -// (with no node id following as part of the path). -func nodesTopLevelHandler(server *TestServer, w http.ResponseWriter, r *http.Request, op string) { - switch { - case r.Method == "GET" && op == "list": - // Node listing operation. - nodeListingHandler(server, w, r) - case r.Method == "GET" && op == "deployment_status": - // Node deployment_status operation. - nodeDeploymentStatusHandler(server, w, r) - case r.Method == "POST" && op == "acquire": - nodesAcquireHandler(server, w, r) - case r.Method == "POST" && op == "release": - nodesReleaseHandler(server, w, r) - default: - w.WriteHeader(http.StatusBadRequest) - } -} - -// AddNodeDetails stores node details, expected in XML format. -func (server *TestServer) AddNodeDetails(systemId, xmlText string) { - _, hasNode := server.nodes[systemId] - if !hasNode { - panic("no node with the given system id") - } - server.nodeDetails[systemId] = xmlText -} - -const lldpXML = ` - -` - -// nodeDetailesHandler handles requests for '/api//nodes//?op=details'. -func nodeDetailsHandler(server *TestServer, w http.ResponseWriter, r *http.Request, systemId string) { - attrs := make(map[string]interface{}) - attrs["lldp"] = lldpXML - xmlText, _ := server.nodeDetails[systemId] - attrs["lshw"] = []byte(xmlText) - res, err := bson.Marshal(attrs) - checkError(err) - w.Header().Set("Content-Type", "application/bson") - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(res)) -} - -// filesHandler handles requests for '/api//files/*'. -func filesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - values, err := url.ParseQuery(r.URL.RawQuery) - checkError(err) - op := values.Get("op") - fileURLRE := getFileURLRE(server.version) - fileURLMatch := fileURLRE.FindStringSubmatch(r.URL.Path) - fileListingURL := getFilesEndpoint(server.version) - switch { - case r.Method == "GET" && op == "list" && r.URL.Path == fileListingURL: - // File listing operation. - fileListingHandler(server, w, r) - case op == "get" && r.Method == "GET" && r.URL.Path == fileListingURL: - getFileHandler(server, w, r) - case op == "add" && r.Method == "POST" && r.URL.Path == fileListingURL: - addFileHandler(server, w, r) - case fileURLMatch != nil: - // Request for a single file. - fileHandler(server, w, r, fileURLMatch[1], op) - default: - // Default handler: not found. - http.NotFoundHandler().ServeHTTP(w, r) - } - -} - -// listFilenames returns the names of those uploaded files whose names start -// with the given prefix, sorted lexicographically. -func listFilenames(server *TestServer, prefix string) []string { - var filenames = make([]string, 0) - for filename := range server.files { - if strings.HasPrefix(filename, prefix) { - filenames = append(filenames, filename) - } - } - sort.Strings(filenames) - return filenames -} - -// stripFileContent copies a map of attributes representing an uploaded file, -// but with the "content" attribute removed. -func stripContent(original map[string]JSONObject) map[string]JSONObject { - newMap := make(map[string]JSONObject, len(original)-1) - for key, value := range original { - if key != "content" { - newMap[key] = value - } - } - return newMap -} - -// fileListingHandler handles requests for '/api//files/?op=list'. -func fileListingHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - values, err := url.ParseQuery(r.URL.RawQuery) - checkError(err) - prefix := values.Get("prefix") - filenames := listFilenames(server, prefix) - - // Build a sorted list of the files as map[string]JSONObject objects. - convertedFiles := make([]map[string]JSONObject, 0) - for _, filename := range filenames { - // The "content" attribute is not in the listing. - fileMap := stripContent(server.files[filename].GetMap()) - convertedFiles = append(convertedFiles, fileMap) - } - res, err := json.Marshal(convertedFiles) - checkError(err) - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(res)) -} - -// fileHandler handles requests for '/api//files//'. -func fileHandler(server *TestServer, w http.ResponseWriter, r *http.Request, filename string, operation string) { - switch { - case r.Method == "DELETE": - delete(server.files, filename) - w.WriteHeader(http.StatusOK) - case r.Method == "GET": - // Retrieve a file's information (including content) as a JSON - // object. - file, ok := server.files[filename] - if !ok { - http.NotFoundHandler().ServeHTTP(w, r) - return - } - jsonText, err := json.Marshal(file) - if err != nil { - panic(err) - } - w.WriteHeader(http.StatusOK) - w.Write(jsonText) - default: - // Default handler: not found. - http.NotFoundHandler().ServeHTTP(w, r) - } -} - -// InternalError replies to the request with an HTTP 500 internal error. -func InternalError(w http.ResponseWriter, r *http.Request, err error) { - http.Error(w, err.Error(), http.StatusInternalServerError) -} - -// getFileHandler handles requests for -// '/api//files/?op=get&filename=filename'. -func getFileHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - values, err := url.ParseQuery(r.URL.RawQuery) - checkError(err) - filename := values.Get("filename") - file, found := server.files[filename] - if !found { - http.NotFoundHandler().ServeHTTP(w, r) - return - } - base64Content, err := file.GetField("content") - if err != nil { - InternalError(w, r, err) - return - } - content, err := base64.StdEncoding.DecodeString(base64Content) - if err != nil { - InternalError(w, r, err) - return - } - w.Write(content) -} - -func readMultipart(upload *multipart.FileHeader) ([]byte, error) { - file, err := upload.Open() - if err != nil { - return nil, err - } - defer file.Close() - reader := bufio.NewReader(file) - return ioutil.ReadAll(reader) -} - -// filesHandler handles requests for '/api//files/?op=add&filename=filename'. -func addFileHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - err := r.ParseMultipartForm(10000000) - checkError(err) - - filename := r.Form.Get("filename") - if filename == "" { - panic("upload has no filename") - } - - uploads := r.MultipartForm.File - if len(uploads) != 1 { - panic("the payload should contain one file and one file only") - } - var upload *multipart.FileHeader - for _, uploadContent := range uploads { - upload = uploadContent[0] - } - content, err := readMultipart(upload) - checkError(err) - server.NewFile(filename, content) - w.WriteHeader(http.StatusOK) -} - -// networkListConnectedMACSHandler handles requests for '/api//networks//?op=list_connected_macs' -func networkListConnectedMACSHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - networkURLRE := getNetworkURLRE(server.version) - networkURLREMatch := networkURLRE.FindStringSubmatch(r.URL.Path) - if networkURLREMatch == nil { - http.NotFoundHandler().ServeHTTP(w, r) - return - } - networkName := networkURLREMatch[1] - convertedMacAddresses := []map[string]JSONObject{} - if macAddresses, ok := server.macAddressesPerNetwork[networkName]; ok { - for _, macAddress := range macAddresses { - m, err := macAddress.GetMap() - checkError(err) - convertedMacAddresses = append(convertedMacAddresses, m) - } - } - res, err := json.Marshal(convertedMacAddresses) - checkError(err) - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(res)) -} - -// networksHandler handles requests for '/api//networks/?node=system_id'. -func networksHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { - panic("only networks GET operation implemented") - } - values, err := url.ParseQuery(r.URL.RawQuery) - checkError(err) - op := values.Get("op") - systemId := values.Get("node") - if op == "list_connected_macs" { - networkListConnectedMACSHandler(server, w, r) - return - } - if op != "" { - panic("only list_connected_macs and default operations implemented") - } - if systemId == "" { - panic("network missing associated node system id") - } - networks := []MAASObject{} - if networkNames, hasNetworks := server.networksPerNode[systemId]; hasNetworks { - networks = make([]MAASObject, len(networkNames)) - for i, networkName := range networkNames { - networks[i] = server.networks[networkName] - } - } - res, err := json.Marshal(networks) - checkError(err) - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(res)) -} - -// ipAddressesHandler handles requests for '/api//ipaddresses/'. -func ipAddressesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - err := r.ParseForm() - checkError(err) - values := r.Form - op := values.Get("op") - - switch r.Method { - case "GET": - if op != "" { - panic("expected empty op for GET, got " + op) - } - listIPAddressesHandler(server, w, r) - return - case "POST": - switch op { - case "reserve": - reserveIPAddressHandler(server, w, r, values.Get("network"), values.Get("requested_address")) - return - case "release": - releaseIPAddressHandler(server, w, r, values.Get("ip")) - return - default: - panic("expected op=release|reserve for POST, got " + op) - } - } - http.NotFoundHandler().ServeHTTP(w, r) -} - -func marshalIPAddress(server *TestServer, ipAddress string) (JSONObject, error) { - jsonTemplate := `{"alloc_type": 4, "ip": %q, "resource_uri": %q, "created": %q}` - uri := getIPAddressesEndpoint(server.version) - now := time.Now().UTC().Format(time.RFC3339) - bytes := []byte(fmt.Sprintf(jsonTemplate, ipAddress, uri, now)) - return Parse(server.client, bytes) -} - -func badRequestError(w http.ResponseWriter, err error) { - w.WriteHeader(http.StatusBadRequest) - fmt.Fprint(w, err.Error()) -} - -func listIPAddressesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - results := []MAASObject{} - for _, ips := range server.IPAddresses() { - for _, ip := range ips { - jsonObj, err := marshalIPAddress(server, ip) - if err != nil { - badRequestError(w, err) - return - } - maasObj, err := jsonObj.GetMAASObject() - if err != nil { - badRequestError(w, err) - return - } - results = append(results, maasObj) - } - } - res, err := json.Marshal(results) - checkError(err) - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(res)) -} - -func reserveIPAddressHandler(server *TestServer, w http.ResponseWriter, r *http.Request, network, reqAddress string) { - _, ipNet, err := net.ParseCIDR(network) - if err != nil { - badRequestError(w, fmt.Errorf("Invalid network parameter %s", network)) - return - } - if reqAddress != "" { - // Validate "requested_address" parameter. - reqIP := net.ParseIP(reqAddress) - if reqIP == nil { - badRequestError(w, fmt.Errorf("failed to detect a valid IP address from u'%s'", reqAddress)) - return - } - if !ipNet.Contains(reqIP) { - badRequestError(w, fmt.Errorf("%s is not inside the range %s", reqAddress, ipNet.String())) - return - } - } - // Find the network name matching the parsed CIDR. - foundNetworkName := "" - for netName, netObj := range server.networks { - // Get the "ip" and "netmask" attributes of the network. - netIP, err := netObj.GetField("ip") - checkError(err) - netMask, err := netObj.GetField("netmask") - checkError(err) - - // Convert the netmask string to net.IPMask. - parts := strings.Split(netMask, ".") - ipMask := make(net.IPMask, len(parts)) - for i, part := range parts { - intPart, err := strconv.Atoi(part) - checkError(err) - ipMask[i] = byte(intPart) - } - netNet := &net.IPNet{IP: net.ParseIP(netIP), Mask: ipMask} - if netNet.String() == network { - // Exact match found. - foundNetworkName = netName - break - } - } - if foundNetworkName == "" { - badRequestError(w, fmt.Errorf("No network found matching %s", network)) - return - } - ips, found := server.ipAddressesPerNetwork[foundNetworkName] - if !found { - // This will be the first address. - ips = []string{} - } - reservedIP := "" - if reqAddress != "" { - // Use what the user provided. NOTE: Because this is testing - // code, no duplicates check is done. - reservedIP = reqAddress - } else { - // Generate an IP in the network range by incrementing the - // last byte of the network's IP. - firstIP := ipNet.IP - firstIP[len(firstIP)-1] += byte(len(ips) + 1) - reservedIP = firstIP.String() - } - ips = append(ips, reservedIP) - server.ipAddressesPerNetwork[foundNetworkName] = ips - jsonObj, err := marshalIPAddress(server, reservedIP) - checkError(err) - maasObj, err := jsonObj.GetMAASObject() - checkError(err) - res, err := json.Marshal(maasObj) - checkError(err) - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(res)) -} - -func releaseIPAddressHandler(server *TestServer, w http.ResponseWriter, r *http.Request, ip string) { - if netIP := net.ParseIP(ip); netIP == nil { - http.NotFoundHandler().ServeHTTP(w, r) - return - } - if server.RemoveIPAddress(ip) { - w.WriteHeader(http.StatusOK) - return - } - http.NotFoundHandler().ServeHTTP(w, r) -} - -// versionHandler handles requests for '/api//version/'. -func versionHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { - panic("only version GET operation implemented") - } - w.Header().Set("Content-Type", "application/json; charset=utf-8") - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, server.versionJSON) -} - -// nodegroupsHandler handles requests for '/api//nodegroups/*'. -func nodegroupsHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - values, err := url.ParseQuery(r.URL.RawQuery) - checkError(err) - op := values.Get("op") - bootimagesURLRE := getBootimagesURLRE(server.version) - bootimagesURLMatch := bootimagesURLRE.FindStringSubmatch(r.URL.Path) - nodegroupsInterfacesURLRE := getNodegroupsInterfacesURLRE(server.version) - nodegroupsInterfacesURLMatch := nodegroupsInterfacesURLRE.FindStringSubmatch(r.URL.Path) - nodegroupsURL := getNodegroupsEndpoint(server.version) - switch { - case r.URL.Path == nodegroupsURL: - nodegroupsTopLevelHandler(server, w, r, op) - case bootimagesURLMatch != nil: - bootimagesHandler(server, w, r, bootimagesURLMatch[1], op) - case nodegroupsInterfacesURLMatch != nil: - nodegroupsInterfacesHandler(server, w, r, nodegroupsInterfacesURLMatch[1], op) - default: - // Default handler: not found. - http.NotFoundHandler().ServeHTTP(w, r) - } -} - -// nodegroupsTopLevelHandler handles requests for '/api//nodegroups/'. -func nodegroupsTopLevelHandler(server *TestServer, w http.ResponseWriter, r *http.Request, op string) { - if r.Method != "GET" || op != "list" { - w.WriteHeader(http.StatusBadRequest) - return - } - - nodegroups := []JSONObject{} - for uuid := range server.bootImages { - attrs := map[string]interface{}{ - "uuid": uuid, - resourceURI: getNodegroupURL(server.version, uuid), - } - obj := maasify(server.client, attrs) - nodegroups = append(nodegroups, obj) - } - - res, err := json.Marshal(nodegroups) - checkError(err) - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(res)) -} - -// bootimagesHandler handles requests for '/api//nodegroups//boot-images/'. -func bootimagesHandler(server *TestServer, w http.ResponseWriter, r *http.Request, nodegroupUUID, op string) { - if r.Method != "GET" { - w.WriteHeader(http.StatusBadRequest) - return - } - - bootImages, ok := server.bootImages[nodegroupUUID] - if !ok { - http.NotFoundHandler().ServeHTTP(w, r) - return - } - - res, err := json.Marshal(bootImages) - checkError(err) - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(res)) -} - -// nodegroupsInterfacesHandler handles requests for '/api//nodegroups//interfaces/' -func nodegroupsInterfacesHandler(server *TestServer, w http.ResponseWriter, r *http.Request, nodegroupUUID, op string) { - if r.Method != "GET" { - w.WriteHeader(http.StatusBadRequest) - return - } - _, ok := server.bootImages[nodegroupUUID] - if !ok { - http.NotFoundHandler().ServeHTTP(w, r) - return - } - - interfaces, ok := server.nodegroupsInterfaces[nodegroupUUID] - if !ok { - // we already checked the nodegroup exists, so return an empty list - interfaces = []JSONObject{} - } - res, err := json.Marshal(interfaces) - checkError(err) - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(res)) -} - -// zonesHandler handles requests for '/api//zones/'. -func zonesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) { - if r.Method != "GET" { - w.WriteHeader(http.StatusBadRequest) - return - } - - if len(server.zones) == 0 { - // Until a zone is registered, behave as if the endpoint - // does not exist. This way we can simulate older MAAS - // servers that do not support zones. - http.NotFoundHandler().ServeHTTP(w, r) - return - } - - zones := make([]JSONObject, 0, len(server.zones)) - for _, zone := range server.zones { - zones = append(zones, zone) - } - res, err := json.Marshal(zones) - checkError(err) - w.WriteHeader(http.StatusOK) - fmt.Fprint(w, string(res)) -} === removed file 'src/launchpad.net/gomaasapi/testservice_test.go' --- src/launchpad.net/gomaasapi/testservice_test.go 2015-10-23 18:28:45 +0000 +++ src/launchpad.net/gomaasapi/testservice_test.go 1970-01-01 00:00:00 +0000 @@ -1,1432 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "mime/multipart" - "net/http" - "net/url" - "sort" - "strings" - - "gopkg.in/mgo.v2/bson" - . "launchpad.net/gocheck" -) - -type TestServerSuite struct { - server *TestServer -} - -var _ = Suite(&TestServerSuite{}) - -func (suite *TestServerSuite) SetUpTest(c *C) { - server := NewTestServer("1.0") - suite.server = server -} - -func (suite *TestServerSuite) TearDownTest(c *C) { - suite.server.Close() -} - -func (suite *TestServerSuite) TestNewTestServerReturnsTestServer(c *C) { - handler := func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusAccepted) - } - suite.server.serveMux.HandleFunc("/test/", handler) - resp, err := http.Get(suite.server.Server.URL + "/test/") - - c.Check(err, IsNil) - c.Check(resp.StatusCode, Equals, http.StatusAccepted) -} - -func (suite *TestServerSuite) TestGetResourceURI(c *C) { - c.Check(getNodeURL("0.1", "test"), Equals, "/api/0.1/nodes/test/") -} - -func (suite *TestServerSuite) TestSetVersionJSON(c *C) { - capabilities := `{"capabilities": ["networks-management","static-ipaddresses", "devices-management"]}` - suite.server.SetVersionJSON(capabilities) - - url := fmt.Sprintf("/api/%s/version/", suite.server.version) - resp, err := http.Get(suite.server.Server.URL + url) - c.Assert(err, IsNil) - c.Check(resp.StatusCode, Equals, http.StatusOK) - content, err := readAndClose(resp.Body) - c.Assert(err, IsNil) - c.Assert(string(content), Equals, capabilities) -} - -func (suite *TestServerSuite) createDevice(c *C, mac, hostname, parent string) string { - devicesURL := fmt.Sprintf("/api/%s/devices/", suite.server.version) + "?op=new" - values := url.Values{} - values.Add("mac_addresses", mac) - values.Add("hostname", hostname) - values.Add("parent", parent) - result := suite.post(c, devicesURL, values) - resultMap, err := result.GetMap() - c.Assert(err, IsNil) - systemId, err := resultMap["system_id"].GetString() - c.Assert(err, IsNil) - return systemId -} - -func getString(c *C, object map[string]JSONObject, key string) string { - value, err := object[key].GetString() - c.Assert(err, IsNil) - return value -} - -func (suite *TestServerSuite) post(c *C, url string, values url.Values) JSONObject { - resp, err := http.Post(suite.server.Server.URL+url, "application/x-www-form-urlencoded", strings.NewReader(values.Encode())) - c.Assert(err, IsNil) - c.Check(resp.StatusCode, Equals, http.StatusOK) - content, err := readAndClose(resp.Body) - c.Assert(err, IsNil) - result, err := Parse(suite.server.client, content) - c.Assert(err, IsNil) - return result -} - -func (suite *TestServerSuite) get(c *C, url string) JSONObject { - resp, err := http.Get(suite.server.Server.URL + url) - c.Assert(err, IsNil) - c.Assert(resp.StatusCode, Equals, http.StatusOK) - - content, err := readAndClose(resp.Body) - c.Assert(err, IsNil) - - result, err := Parse(suite.server.client, content) - c.Assert(err, IsNil) - return result -} - -func checkDevice(c *C, device map[string]JSONObject, mac, hostname, parent string) { - macArray, err := device["macaddress_set"].GetArray() - c.Assert(err, IsNil) - c.Assert(macArray, HasLen, 1) - macMap, err := macArray[0].GetMap() - c.Assert(err, IsNil) - - actualMac := getString(c, macMap, "mac_address") - c.Assert(actualMac, Equals, mac) - - actualParent := getString(c, device, "parent") - c.Assert(actualParent, Equals, parent) - actualHostname := getString(c, device, "hostname") - c.Assert(actualHostname, Equals, hostname) -} - -func (suite *TestServerSuite) TestNewDeviceRequiredParameters(c *C) { - devicesURL := fmt.Sprintf("/api/%s/devices/", suite.server.version) + "?op=new" - values := url.Values{} - values.Add("mac_addresses", "foo") - values.Add("hostname", "bar") - post := func(values url.Values) int { - resp, err := http.Post(suite.server.Server.URL+devicesURL, "application/x-www-form-urlencoded", strings.NewReader(values.Encode())) - c.Assert(err, IsNil) - return resp.StatusCode - } - c.Check(post(values), Equals, http.StatusBadRequest) - values.Del("hostname") - values.Add("parent", "baz") - c.Check(post(values), Equals, http.StatusBadRequest) - values.Del("mac_addresses") - values.Add("hostname", "bam") - c.Check(post(values), Equals, http.StatusBadRequest) -} - -func (suite *TestServerSuite) TestNewDevice(c *C) { - devicesURL := fmt.Sprintf("/api/%s/devices/", suite.server.version) + "?op=new" - - values := url.Values{} - values.Add("mac_addresses", "foo") - values.Add("hostname", "bar") - values.Add("parent", "baz") - result := suite.post(c, devicesURL, values) - - resultMap, err := result.GetMap() - c.Assert(err, IsNil) - - macArray, err := resultMap["macaddress_set"].GetArray() - c.Assert(err, IsNil) - c.Assert(macArray, HasLen, 1) - macMap, err := macArray[0].GetMap() - c.Assert(err, IsNil) - - mac := getString(c, macMap, "mac_address") - c.Assert(mac, Equals, "foo") - - parent := getString(c, resultMap, "parent") - c.Assert(parent, Equals, "baz") - hostname := getString(c, resultMap, "hostname") - c.Assert(hostname, Equals, "bar") - - addresses, err := resultMap["ip_addresses"].GetArray() - c.Assert(err, IsNil) - c.Assert(addresses, HasLen, 0) - - systemId := getString(c, resultMap, "system_id") - resourceURI := getString(c, resultMap, "resource_uri") - c.Assert(resourceURI, Equals, fmt.Sprintf("/MAAS/api/%v/devices/%v/", suite.server.version, systemId)) -} - -func (suite *TestServerSuite) TestGetDevice(c *C) { - systemId := suite.createDevice(c, "foo", "bar", "baz") - deviceURL := fmt.Sprintf("/api/%v/devices/%v/", suite.server.version, systemId) - - result := suite.get(c, deviceURL) - resultMap, err := result.GetMap() - c.Assert(err, IsNil) - checkDevice(c, resultMap, "foo", "bar", "baz") - actualId, err := resultMap["system_id"].GetString() - c.Assert(actualId, Equals, systemId) -} - -func (suite *TestServerSuite) TestDevicesList(c *C) { - firstId := suite.createDevice(c, "foo", "bar", "baz") - c.Assert(firstId, Not(Equals), "") - secondId := suite.createDevice(c, "bam", "bing", "bong") - c.Assert(secondId, Not(Equals), "") - - devicesURL := fmt.Sprintf("/api/%s/devices/", suite.server.version) + "?op=list" - result := suite.get(c, devicesURL) - - devicesArray, err := result.GetArray() - c.Assert(err, IsNil) - c.Assert(devicesArray, HasLen, 2) - - for _, device := range devicesArray { - deviceMap, err := device.GetMap() - c.Assert(err, IsNil) - systemId, err := deviceMap["system_id"].GetString() - c.Assert(err, IsNil) - switch systemId { - case firstId: - checkDevice(c, deviceMap, "foo", "bar", "baz") - case secondId: - checkDevice(c, deviceMap, "bam", "bing", "bong") - default: - c.Fatalf("unknown system id %q", systemId) - } - } -} - -func (suite *TestServerSuite) TestDevicesListMacFiltering(c *C) { - firstId := suite.createDevice(c, "foo", "bar", "baz") - c.Assert(firstId, Not(Equals), "") - secondId := suite.createDevice(c, "bam", "bing", "bong") - c.Assert(secondId, Not(Equals), "") - - op := fmt.Sprintf("?op=list&mac_address=%v", "foo") - devicesURL := fmt.Sprintf("/api/%s/devices/", suite.server.version) + op - result := suite.get(c, devicesURL) - - devicesArray, err := result.GetArray() - c.Assert(err, IsNil) - c.Assert(devicesArray, HasLen, 1) - deviceMap, err := devicesArray[0].GetMap() - c.Assert(err, IsNil) - checkDevice(c, deviceMap, "foo", "bar", "baz") -} - -func (suite *TestServerSuite) TestDeviceClaimStickyIPRequiresAddress(c *C) { - systemId := suite.createDevice(c, "foo", "bar", "baz") - op := "?op=claim_sticky_ip_address" - deviceURL := fmt.Sprintf("/api/%s/devices/%s/%s", suite.server.version, systemId, op) - values := url.Values{} - resp, err := http.Post(suite.server.Server.URL+deviceURL, "application/x-www-form-urlencoded", strings.NewReader(values.Encode())) - c.Assert(err, IsNil) - c.Assert(resp.StatusCode, Equals, http.StatusBadRequest) -} - -func (suite *TestServerSuite) TestDeviceClaimStickyIP(c *C) { - systemId := suite.createDevice(c, "foo", "bar", "baz") - op := "?op=claim_sticky_ip_address" - deviceURL := fmt.Sprintf("/api/%s/devices/%s/", suite.server.version, systemId) - values := url.Values{} - values.Add("requested_address", "127.0.0.1") - result := suite.post(c, deviceURL+op, values) - resultMap, err := result.GetMap() - c.Assert(err, IsNil) - - addresses, err := resultMap["ip_addresses"].GetArray() - c.Assert(err, IsNil) - c.Assert(addresses, HasLen, 1) - address, err := addresses[0].GetString() - c.Assert(err, IsNil) - c.Assert(address, Equals, "127.0.0.1") -} - -func (suite *TestServerSuite) TestDeleteDevice(c *C) { - systemId := suite.createDevice(c, "foo", "bar", "baz") - deviceURL := fmt.Sprintf("/api/%s/devices/%s/", suite.server.version, systemId) - req, err := http.NewRequest("DELETE", suite.server.Server.URL+deviceURL, nil) - c.Assert(err, IsNil) - resp, err := http.DefaultClient.Do(req) - c.Assert(err, IsNil) - c.Assert(resp.StatusCode, Equals, http.StatusNoContent) - - resp, err = http.Get(suite.server.Server.URL + deviceURL) - c.Assert(err, IsNil) - c.Assert(resp.StatusCode, Equals, http.StatusNotFound) -} - -func (suite *TestServerSuite) TestInvalidOperationOnNodesIsBadRequest(c *C) { - badURL := getNodesEndpoint(suite.server.version) + "?op=procrastinate" - - response, err := http.Get(suite.server.Server.URL + badURL) - c.Assert(err, IsNil) - - c.Check(response.StatusCode, Equals, http.StatusBadRequest) -} - -func (suite *TestServerSuite) TestHandlesNodeListingUnknownPath(c *C) { - invalidPath := fmt.Sprintf("/api/%s/nodes/invalid/path/", suite.server.version) - resp, err := http.Get(suite.server.Server.URL + invalidPath) - - c.Check(err, IsNil) - c.Check(resp.StatusCode, Equals, http.StatusNotFound) -} - -func (suite *TestServerSuite) TestHandlesNodegroupsInterfacesListingUnknownNodegroup(c *C) { - invalidPath := fmt.Sprintf("/api/%s/nodegroups/unknown/interfaces/", suite.server.version) - resp, err := http.Get(suite.server.Server.URL + invalidPath) - - c.Check(err, IsNil) - c.Check(resp.StatusCode, Equals, http.StatusNotFound) -} - -func (suite *TestServerSuite) TestNewNode(c *C) { - input := `{"system_id": "mysystemid"}` - - newNode := suite.server.NewNode(input) - - c.Check(len(suite.server.nodes), Equals, 1) - c.Check(suite.server.nodes["mysystemid"], DeepEquals, newNode) -} - -func (suite *TestServerSuite) TestNodesReturnsNodes(c *C) { - input := `{"system_id": "mysystemid"}` - newNode := suite.server.NewNode(input) - - nodesMap := suite.server.Nodes() - - c.Check(len(nodesMap), Equals, 1) - c.Check(nodesMap["mysystemid"], DeepEquals, newNode) -} - -func (suite *TestServerSuite) TestChangeNode(c *C) { - input := `{"system_id": "mysystemid"}` - suite.server.NewNode(input) - suite.server.ChangeNode("mysystemid", "newfield", "newvalue") - - node, _ := suite.server.nodes["mysystemid"] - field, err := node.GetField("newfield") - c.Assert(err, IsNil) - c.Check(field, Equals, "newvalue") -} - -func (suite *TestServerSuite) TestClearClearsData(c *C) { - input := `{"system_id": "mysystemid"}` - suite.server.NewNode(input) - suite.server.addNodeOperation("mysystemid", "start", &http.Request{}) - - suite.server.Clear() - - c.Check(len(suite.server.nodes), Equals, 0) - c.Check(len(suite.server.nodeOperations), Equals, 0) - c.Check(len(suite.server.nodeOperationRequestValues), Equals, 0) -} - -func (suite *TestServerSuite) TestAddNodeOperationPopulatesOperations(c *C) { - input := `{"system_id": "mysystemid"}` - suite.server.NewNode(input) - - suite.server.addNodeOperation("mysystemid", "start", &http.Request{}) - suite.server.addNodeOperation("mysystemid", "stop", &http.Request{}) - - nodeOperations := suite.server.NodeOperations() - operations := nodeOperations["mysystemid"] - c.Check(operations, DeepEquals, []string{"start", "stop"}) -} - -func (suite *TestServerSuite) TestAddNodeOperationPopulatesOperationRequestValues(c *C) { - input := `{"system_id": "mysystemid"}` - suite.server.NewNode(input) - reader := strings.NewReader("key=value") - request, err := http.NewRequest("POST", "http://example.com/", reader) - request.Header.Set("Content-Type", "application/x-www-form-urlencoded") - c.Assert(err, IsNil) - - suite.server.addNodeOperation("mysystemid", "start", request) - - values := suite.server.NodeOperationRequestValues() - value := values["mysystemid"] - c.Check(len(value), Equals, 1) - c.Check(value[0], DeepEquals, url.Values{"key": []string{"value"}}) -} - -func (suite *TestServerSuite) TestNewNodeRequiresJSONString(c *C) { - input := `invalid:json` - defer func() { - recoveredError := recover().(*json.SyntaxError) - c.Check(recoveredError, NotNil) - c.Check(recoveredError.Error(), Matches, ".*invalid character.*") - }() - suite.server.NewNode(input) -} - -func (suite *TestServerSuite) TestNewNodeRequiresSystemIdKey(c *C) { - input := `{"test": "test"}` - defer func() { - recoveredError := recover() - c.Check(recoveredError, NotNil) - c.Check(recoveredError, Matches, ".*does not contain a 'system_id' value.") - }() - suite.server.NewNode(input) -} - -func (suite *TestServerSuite) TestHandlesNodeRequestNotFound(c *C) { - getURI := fmt.Sprintf("/api/%s/nodes/test/", suite.server.version) - resp, err := http.Get(suite.server.Server.URL + getURI) - - c.Check(err, IsNil) - c.Check(resp.StatusCode, Equals, http.StatusNotFound) -} - -func (suite *TestServerSuite) TestHandlesNodeUnknownOperation(c *C) { - input := `{"system_id": "mysystemid"}` - suite.server.NewNode(input) - postURI := fmt.Sprintf("/api/%s/nodes/mysystemid/?op=unknown/", suite.server.version) - respStart, err := http.Post(suite.server.Server.URL+postURI, "", nil) - - c.Check(err, IsNil) - c.Check(respStart.StatusCode, Equals, http.StatusBadRequest) -} - -func (suite *TestServerSuite) TestHandlesNodeDelete(c *C) { - input := `{"system_id": "mysystemid"}` - suite.server.NewNode(input) - deleteURI := fmt.Sprintf("/api/%s/nodes/mysystemid/?op=mysystemid", suite.server.version) - req, err := http.NewRequest("DELETE", suite.server.Server.URL+deleteURI, nil) - var client http.Client - resp, err := client.Do(req) - - c.Check(err, IsNil) - c.Check(resp.StatusCode, Equals, http.StatusOK) - c.Check(len(suite.server.nodes), Equals, 0) -} - -func uploadTo(url, fileName string, fileContent []byte) (*http.Response, error) { - buf := new(bytes.Buffer) - w := multipart.NewWriter(buf) - fw, err := w.CreateFormFile(fileName, fileName) - if err != nil { - panic(err) - } - io.Copy(fw, bytes.NewBuffer(fileContent)) - w.Close() - req, err := http.NewRequest("POST", url, buf) - if err != nil { - panic(err) - } - req.Header.Set("Content-Type", w.FormDataContentType()) - client := &http.Client{} - return client.Do(req) -} - -func (suite *TestServerSuite) TestHandlesUploadFile(c *C) { - fileContent := []byte("test file content") - postURL := suite.server.Server.URL + fmt.Sprintf("/api/%s/files/?op=add&filename=filename", suite.server.version) - - resp, err := uploadTo(postURL, "upload", fileContent) - - c.Check(err, IsNil) - c.Check(resp.StatusCode, Equals, http.StatusOK) - c.Check(len(suite.server.files), Equals, 1) - file, ok := suite.server.files["filename"] - c.Assert(ok, Equals, true) - field, err := file.GetField("content") - c.Assert(err, IsNil) - c.Check(field, Equals, base64.StdEncoding.EncodeToString(fileContent)) -} - -func (suite *TestServerSuite) TestNewFileEscapesName(c *C) { - obj := suite.server.NewFile("aa?bb", []byte("bytes")) - resourceURI := obj.URI() - c.Check(strings.Contains(resourceURI.String(), "aa?bb"), Equals, false) - c.Check(strings.Contains(resourceURI.Path, "aa?bb"), Equals, true) - anonURI, err := obj.GetField("anon_resource_uri") - c.Assert(err, IsNil) - c.Check(strings.Contains(anonURI, "aa?bb"), Equals, false) - c.Check(strings.Contains(anonURI, url.QueryEscape("aa?bb")), Equals, true) -} - -func (suite *TestServerSuite) TestHandlesFile(c *C) { - const filename = "my-file" - const fileContent = "test file content" - file := suite.server.NewFile(filename, []byte(fileContent)) - getURI := fmt.Sprintf("/api/%s/files/%s/", suite.server.version, filename) - fileURI, err := file.GetField("anon_resource_uri") - c.Assert(err, IsNil) - - resp, err := http.Get(suite.server.Server.URL + getURI) - c.Check(err, IsNil) - c.Check(resp.StatusCode, Equals, http.StatusOK) - - content, err := readAndClose(resp.Body) - c.Assert(err, IsNil) - var obj map[string]interface{} - err = json.Unmarshal(content, &obj) - c.Assert(err, IsNil) - anon_url, ok := obj["anon_resource_uri"] - c.Check(ok, Equals, true) - c.Check(anon_url.(string), Equals, fileURI) - base64Content, ok := obj["content"] - c.Check(ok, Equals, true) - decodedContent, err := base64.StdEncoding.DecodeString(base64Content.(string)) - c.Assert(err, IsNil) - c.Check(string(decodedContent), Equals, fileContent) -} - -func (suite *TestServerSuite) TestHandlesGetFile(c *C) { - fileContent := []byte("test file content") - fileName := "filename" - suite.server.NewFile(fileName, fileContent) - getURI := fmt.Sprintf("/api/%s/files/?op=get&filename=filename", suite.server.version) - - resp, err := http.Get(suite.server.Server.URL + getURI) - - c.Check(err, IsNil) - c.Check(resp.StatusCode, Equals, http.StatusOK) - content, err := readAndClose(resp.Body) - c.Check(err, IsNil) - c.Check(string(content), Equals, string(fileContent)) - c.Check(content, DeepEquals, fileContent) -} - -func (suite *TestServerSuite) TestHandlesListReturnsSortedFilenames(c *C) { - fileName1 := "filename1" - suite.server.NewFile(fileName1, []byte("test file content")) - fileName2 := "filename2" - suite.server.NewFile(fileName2, []byte("test file content")) - getURI := fmt.Sprintf("/api/%s/files/?op=list", suite.server.version) - - resp, err := http.Get(suite.server.Server.URL + getURI) - c.Check(err, IsNil) - c.Check(resp.StatusCode, Equals, http.StatusOK) - content, err := readAndClose(resp.Body) - c.Assert(err, IsNil) - var files []map[string]string - err = json.Unmarshal(content, &files) - c.Assert(err, IsNil) - c.Check(len(files), Equals, 2) - c.Check(files[0]["filename"], Equals, fileName1) - c.Check(files[1]["filename"], Equals, fileName2) -} - -func (suite *TestServerSuite) TestHandlesListFiltersFiles(c *C) { - fileName1 := "filename1" - suite.server.NewFile(fileName1, []byte("test file content")) - fileName2 := "prefixFilename" - suite.server.NewFile(fileName2, []byte("test file content")) - getURI := fmt.Sprintf("/api/%s/files/?op=list&prefix=prefix", suite.server.version) - - resp, err := http.Get(suite.server.Server.URL + getURI) - - c.Check(err, IsNil) - c.Check(resp.StatusCode, Equals, http.StatusOK) - content, err := readAndClose(resp.Body) - c.Assert(err, IsNil) - var files []map[string]string - err = json.Unmarshal(content, &files) - c.Assert(err, IsNil) - c.Check(len(files), Equals, 1) - c.Check(files[0]["filename"], Equals, fileName2) -} - -func (suite *TestServerSuite) TestHandlesListOmitsContent(c *C) { - const filename = "myfile" - fileContent := []byte("test file content") - suite.server.NewFile(filename, fileContent) - getURI := fmt.Sprintf("/api/%s/files/?op=list", suite.server.version) - - resp, err := http.Get(suite.server.Server.URL + getURI) - c.Assert(err, IsNil) - - content, err := readAndClose(resp.Body) - c.Assert(err, IsNil) - var files []map[string]string - err = json.Unmarshal(content, &files) - - // The resulting dict does not have a "content" entry. - file := files[0] - _, ok := file["content"] - c.Check(ok, Equals, false) - - // But the original as stored in the test service still has it. - contentAfter, err := suite.server.files[filename].GetField("content") - c.Assert(err, IsNil) - bytes, err := base64.StdEncoding.DecodeString(contentAfter) - c.Assert(err, IsNil) - c.Check(string(bytes), Equals, string(fileContent)) -} - -func (suite *TestServerSuite) TestDeleteFile(c *C) { - fileName1 := "filename1" - suite.server.NewFile(fileName1, []byte("test file content")) - deleteURI := fmt.Sprintf("/api/%s/files/filename1/", suite.server.version) - - req, err := http.NewRequest("DELETE", suite.server.Server.URL+deleteURI, nil) - c.Check(err, IsNil) - var client http.Client - resp, err := client.Do(req) - - c.Check(err, IsNil) - c.Check(resp.StatusCode, Equals, http.StatusOK) - c.Check(suite.server.Files(), DeepEquals, map[string]MAASObject{}) -} - -func (suite *TestServerSuite) TestListZonesNotSupported(c *C) { - // Older versions of MAAS do not support zones. We simulate - // this behaviour by returning 404 if no zones are defined. - zonesURL := getZonesEndpoint(suite.server.version) - resp, err := http.Get(suite.server.Server.URL + zonesURL) - - c.Check(err, IsNil) - c.Check(resp.StatusCode, Equals, http.StatusNotFound) -} - -// TestMAASObjectSuite validates that the object created by -// NewTestMAAS can be used by the gomaasapi library as if it were a real -// MAAS server. -type TestMAASObjectSuite struct { - TestMAASObject *TestMAASObject -} - -var _ = Suite(&TestMAASObjectSuite{}) - -func (s *TestMAASObjectSuite) SetUpSuite(c *C) { - s.TestMAASObject = NewTestMAAS("1.0") -} - -func (s *TestMAASObjectSuite) TearDownSuite(c *C) { - s.TestMAASObject.Close() -} - -func (s *TestMAASObjectSuite) TearDownTest(c *C) { - s.TestMAASObject.TestServer.Clear() -} - -func (suite *TestMAASObjectSuite) TestListNodes(c *C) { - input := `{"system_id": "mysystemid"}` - suite.TestMAASObject.TestServer.NewNode(input) - nodeListing := suite.TestMAASObject.GetSubObject("nodes") - - listNodeObjects, err := nodeListing.CallGet("list", url.Values{}) - - c.Check(err, IsNil) - listNodes, err := listNodeObjects.GetArray() - c.Assert(err, IsNil) - c.Check(len(listNodes), Equals, 1) - node, err := listNodes[0].GetMAASObject() - c.Assert(err, IsNil) - systemId, err := node.GetField("system_id") - c.Assert(err, IsNil) - c.Check(systemId, Equals, "mysystemid") - resourceURI, _ := node.GetField(resourceURI) - apiVersion := suite.TestMAASObject.TestServer.version - expectedResourceURI := fmt.Sprintf("/api/%s/nodes/mysystemid/", apiVersion) - c.Check(resourceURI, Equals, expectedResourceURI) -} - -func (suite *TestMAASObjectSuite) TestListNodesNoNodes(c *C) { - nodeListing := suite.TestMAASObject.GetSubObject("nodes") - listNodeObjects, err := nodeListing.CallGet("list", url.Values{}) - c.Check(err, IsNil) - - listNodes, err := listNodeObjects.GetArray() - - c.Check(err, IsNil) - c.Check(listNodes, DeepEquals, []JSONObject{}) -} - -func (suite *TestMAASObjectSuite) TestListNodesSelectedNodes(c *C) { - input := `{"system_id": "mysystemid"}` - suite.TestMAASObject.TestServer.NewNode(input) - input2 := `{"system_id": "mysystemid2"}` - suite.TestMAASObject.TestServer.NewNode(input2) - nodeListing := suite.TestMAASObject.GetSubObject("nodes") - - listNodeObjects, err := nodeListing.CallGet("list", url.Values{"id": {"mysystemid2"}}) - - c.Check(err, IsNil) - listNodes, err := listNodeObjects.GetArray() - c.Check(err, IsNil) - c.Check(len(listNodes), Equals, 1) - node, _ := listNodes[0].GetMAASObject() - systemId, _ := node.GetField("system_id") - c.Check(systemId, Equals, "mysystemid2") -} - -func (suite *TestMAASObjectSuite) TestDeleteNode(c *C) { - input := `{"system_id": "mysystemid"}` - node := suite.TestMAASObject.TestServer.NewNode(input) - - err := node.Delete() - - c.Check(err, IsNil) - c.Check(suite.TestMAASObject.TestServer.Nodes(), DeepEquals, map[string]MAASObject{}) -} - -func (suite *TestMAASObjectSuite) TestOperationsOnNode(c *C) { - input := `{"system_id": "mysystemid"}` - node := suite.TestMAASObject.TestServer.NewNode(input) - operations := []string{"start", "stop", "release"} - for _, operation := range operations { - _, err := node.CallPost(operation, url.Values{}) - c.Check(err, IsNil) - } -} - -func (suite *TestMAASObjectSuite) TestOperationsOnNodeGetRecorded(c *C) { - input := `{"system_id": "mysystemid"}` - node := suite.TestMAASObject.TestServer.NewNode(input) - - _, err := node.CallPost("start", url.Values{}) - - c.Check(err, IsNil) - nodeOperations := suite.TestMAASObject.TestServer.NodeOperations() - operations := nodeOperations["mysystemid"] - c.Check(operations, DeepEquals, []string{"start"}) -} - -func (suite *TestMAASObjectSuite) TestAcquireOperationGetsRecorded(c *C) { - input := `{"system_id": "mysystemid"}` - suite.TestMAASObject.TestServer.NewNode(input) - nodesObj := suite.TestMAASObject.GetSubObject("nodes/") - params := url.Values{"key": []string{"value"}} - - jsonResponse, err := nodesObj.CallPost("acquire", params) - c.Assert(err, IsNil) - acquiredNode, err := jsonResponse.GetMAASObject() - c.Assert(err, IsNil) - systemId, err := acquiredNode.GetField("system_id") - c.Assert(err, IsNil) - - // The 'acquire' operation has been recorded. - nodeOperations := suite.TestMAASObject.TestServer.NodeOperations() - operations := nodeOperations[systemId] - c.Check(operations, DeepEquals, []string{"acquire"}) - - // The parameters used to 'acquire' the node have been recorded as well. - values := suite.TestMAASObject.TestServer.NodeOperationRequestValues() - value := values[systemId] - c.Check(len(value), Equals, 1) - c.Check(value[0], DeepEquals, params) -} - -func (suite *TestMAASObjectSuite) TestNodesRelease(c *C) { - suite.TestMAASObject.TestServer.NewNode(`{"system_id": "mysystemid1"}`) - suite.TestMAASObject.TestServer.NewNode(`{"system_id": "mysystemid2"}`) - suite.TestMAASObject.TestServer.OwnedNodes()["mysystemid2"] = true - nodesObj := suite.TestMAASObject.GetSubObject("nodes/") - params := url.Values{"nodes": []string{"mysystemid1", "mysystemid2"}} - - // release should only release mysystemid2, as it is the only one allocated. - jsonResponse, err := nodesObj.CallPost("release", params) - c.Assert(err, IsNil) - releasedNodes, err := jsonResponse.GetArray() - c.Assert(err, IsNil) - c.Assert(releasedNodes, HasLen, 1) - releasedNode, err := releasedNodes[0].GetMAASObject() - c.Assert(err, IsNil) - systemId, err := releasedNode.GetField("system_id") - c.Assert(err, IsNil) - c.Assert(systemId, Equals, "mysystemid2") - - // The 'release' operation has been recorded. - nodesOperations := suite.TestMAASObject.TestServer.NodesOperations() - c.Check(nodesOperations, DeepEquals, []string{"release"}) - nodesOperationRequestValues := suite.TestMAASObject.TestServer.NodesOperationRequestValues() - expectedValues := make(url.Values) - expectedValues.Add("nodes", "mysystemid1") - expectedValues.Add("nodes", "mysystemid2") - c.Check(nodesOperationRequestValues, DeepEquals, []url.Values{expectedValues}) -} - -func (suite *TestMAASObjectSuite) TestNodesReleaseUnknown(c *C) { - suite.TestMAASObject.TestServer.NewNode(`{"system_id": "mysystemid"}`) - suite.TestMAASObject.TestServer.OwnedNodes()["mysystemid"] = true - nodesObj := suite.TestMAASObject.GetSubObject("nodes/") - params := url.Values{"nodes": []string{"mysystemid", "what"}} - - // if there are any unknown nodes, none are released. - _, err := nodesObj.CallPost("release", params) - c.Assert(err, ErrorMatches, `gomaasapi: got error back from server: 400 Bad Request \(Unknown node\(s\): what.\)`) - c.Assert(suite.TestMAASObject.TestServer.OwnedNodes()["mysystemid"], Equals, true) -} - -func (suite *TestMAASObjectSuite) TestUploadFile(c *C) { - const filename = "myfile.txt" - const fileContent = "uploaded contents" - files := suite.TestMAASObject.GetSubObject("files") - params := url.Values{"filename": {filename}} - filesMap := map[string][]byte{"file": []byte(fileContent)} - - // Upload a file. - _, err := files.CallPostFiles("add", params, filesMap) - c.Assert(err, IsNil) - - // The file can now be downloaded. - downloadedFile, err := files.CallGet("get", params) - c.Assert(err, IsNil) - bytes, err := downloadedFile.GetBytes() - c.Assert(err, IsNil) - c.Check(string(bytes), Equals, fileContent) -} - -func (suite *TestMAASObjectSuite) TestFileNamesMayContainSlashes(c *C) { - const filename = "filename/with/slashes/in/it" - const fileContent = "file contents" - files := suite.TestMAASObject.GetSubObject("files") - params := url.Values{"filename": {filename}} - filesMap := map[string][]byte{"file": []byte(fileContent)} - - _, err := files.CallPostFiles("add", params, filesMap) - c.Assert(err, IsNil) - - file, err := files.GetSubObject(filename).Get() - c.Assert(err, IsNil) - field, err := file.GetField("content") - c.Assert(err, IsNil) - c.Check(field, Equals, base64.StdEncoding.EncodeToString([]byte(fileContent))) -} - -func (suite *TestMAASObjectSuite) TestAcquireNodeGrabsAvailableNode(c *C) { - input := `{"system_id": "nodeid"}` - suite.TestMAASObject.TestServer.NewNode(input) - nodesObj := suite.TestMAASObject.GetSubObject("nodes/") - - jsonResponse, err := nodesObj.CallPost("acquire", nil) - c.Assert(err, IsNil) - - acquiredNode, err := jsonResponse.GetMAASObject() - c.Assert(err, IsNil) - systemID, err := acquiredNode.GetField("system_id") - c.Assert(err, IsNil) - c.Check(systemID, Equals, "nodeid") - _, owned := suite.TestMAASObject.TestServer.OwnedNodes()[systemID] - c.Check(owned, Equals, true) -} - -func (suite *TestMAASObjectSuite) TestAcquireNodeNeedsANode(c *C) { - nodesObj := suite.TestMAASObject.GetSubObject("nodes/") - _, err := nodesObj.CallPost("acquire", nil) - c.Check(err.(ServerError).StatusCode, Equals, http.StatusConflict) -} - -func (suite *TestMAASObjectSuite) TestAcquireNodeIgnoresOwnedNodes(c *C) { - input := `{"system_id": "nodeid"}` - suite.TestMAASObject.TestServer.NewNode(input) - nodesObj := suite.TestMAASObject.GetSubObject("nodes/") - // Ensure that the one node in the MAAS is not available. - _, err := nodesObj.CallPost("acquire", nil) - c.Assert(err, IsNil) - - _, err = nodesObj.CallPost("acquire", nil) - c.Check(err.(ServerError).StatusCode, Equals, http.StatusConflict) -} - -func (suite *TestMAASObjectSuite) TestReleaseNodeReleasesAcquiredNode(c *C) { - input := `{"system_id": "nodeid"}` - suite.TestMAASObject.TestServer.NewNode(input) - nodesObj := suite.TestMAASObject.GetSubObject("nodes/") - jsonResponse, err := nodesObj.CallPost("acquire", nil) - c.Assert(err, IsNil) - acquiredNode, err := jsonResponse.GetMAASObject() - c.Assert(err, IsNil) - systemID, err := acquiredNode.GetField("system_id") - c.Assert(err, IsNil) - nodeObj := nodesObj.GetSubObject(systemID) - - _, err = nodeObj.CallPost("release", nil) - c.Assert(err, IsNil) - _, owned := suite.TestMAASObject.TestServer.OwnedNodes()[systemID] - c.Check(owned, Equals, false) -} - -func (suite *TestMAASObjectSuite) TestGetNetworks(c *C) { - nodeJSON := `{"system_id": "mysystemid"}` - suite.TestMAASObject.TestServer.NewNode(nodeJSON) - networkJSON := `{"name": "mynetworkname", "ip": "0.1.2.0", "netmask": "255.255.255.0"}` - suite.TestMAASObject.TestServer.NewNetwork(networkJSON) - suite.TestMAASObject.TestServer.ConnectNodeToNetwork("mysystemid", "mynetworkname") - - networkMethod := suite.TestMAASObject.GetSubObject("networks") - params := url.Values{"node": []string{"mysystemid"}} - listNetworkObjects, err := networkMethod.CallGet("", params) - c.Assert(err, IsNil) - - networkJSONArray, err := listNetworkObjects.GetArray() - c.Assert(err, IsNil) - c.Check(networkJSONArray, HasLen, 1) - - listNetworks, err := networkJSONArray[0].GetMAASObject() - c.Assert(err, IsNil) - - networkName, err := listNetworks.GetField("name") - c.Assert(err, IsNil) - ip, err := listNetworks.GetField("ip") - c.Assert(err, IsNil) - netmask, err := listNetworks.GetField("netmask") - c.Assert(err, IsNil) - c.Check(networkName, Equals, "mynetworkname") - c.Check(ip, Equals, "0.1.2.0") - c.Check(netmask, Equals, "255.255.255.0") -} - -func (suite *TestMAASObjectSuite) TestGetNetworksNone(c *C) { - nodeJSON := `{"system_id": "mysystemid"}` - suite.TestMAASObject.TestServer.NewNode(nodeJSON) - - networkMethod := suite.TestMAASObject.GetSubObject("networks") - params := url.Values{"node": []string{"mysystemid"}} - listNetworkObjects, err := networkMethod.CallGet("", params) - c.Assert(err, IsNil) - - networkJSONArray, err := listNetworkObjects.GetArray() - c.Assert(err, IsNil) - c.Check(networkJSONArray, HasLen, 0) -} - -func (suite *TestMAASObjectSuite) TestListNodesWithNetworks(c *C) { - nodeJSON := `{"system_id": "mysystemid"}` - suite.TestMAASObject.TestServer.NewNode(nodeJSON) - networkJSON := `{"name": "mynetworkname", "ip": "0.1.2.0", "netmask": "255.255.255.0"}` - suite.TestMAASObject.TestServer.NewNetwork(networkJSON) - suite.TestMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("mysystemid", "mynetworkname", "aa:bb:cc:dd:ee:ff") - - nodeListing := suite.TestMAASObject.GetSubObject("nodes") - listNodeObjects, err := nodeListing.CallGet("list", url.Values{}) - c.Assert(err, IsNil) - - listNodes, err := listNodeObjects.GetArray() - c.Assert(err, IsNil) - c.Check(listNodes, HasLen, 1) - - node, err := listNodes[0].GetMAASObject() - c.Assert(err, IsNil) - systemId, err := node.GetField("system_id") - c.Assert(err, IsNil) - c.Check(systemId, Equals, "mysystemid") - - gotResourceURI, err := node.GetField(resourceURI) - c.Assert(err, IsNil) - apiVersion := suite.TestMAASObject.TestServer.version - expectedResourceURI := fmt.Sprintf("/api/%s/nodes/mysystemid/", apiVersion) - c.Check(gotResourceURI, Equals, expectedResourceURI) - - macAddressSet, err := node.GetMap()["macaddress_set"].GetArray() - c.Assert(err, IsNil) - c.Check(macAddressSet, HasLen, 1) - - macAddress, err := macAddressSet[0].GetMap() - c.Assert(err, IsNil) - macAddressString, err := macAddress["mac_address"].GetString() - c.Check(macAddressString, Equals, "aa:bb:cc:dd:ee:ff") - - gotResourceURI, err = macAddress[resourceURI].GetString() - c.Assert(err, IsNil) - expectedResourceURI = fmt.Sprintf("/api/%s/nodes/mysystemid/macs/%s/", apiVersion, url.QueryEscape("aa:bb:cc:dd:ee:ff")) - c.Check(gotResourceURI, Equals, expectedResourceURI) -} - -func (suite *TestMAASObjectSuite) TestListNetworkConnectedMACAddresses(c *C) { - suite.TestMAASObject.TestServer.NewNode(`{"system_id": "node_1"}`) - suite.TestMAASObject.TestServer.NewNode(`{"system_id": "node_2"}`) - suite.TestMAASObject.TestServer.NewNetwork( - `{"name": "net_1", "ip": "0.1.2.0", "netmask": "255.255.255.0"}`, - ) - suite.TestMAASObject.TestServer.NewNetwork( - `{"name": "net_2", "ip": "0.2.2.0", "netmask": "255.255.255.0"}`, - ) - suite.TestMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node_2", "net_2", "aa:bb:cc:dd:ee:22") - suite.TestMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node_1", "net_1", "aa:bb:cc:dd:ee:11") - suite.TestMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node_2", "net_1", "aa:bb:cc:dd:ee:21") - suite.TestMAASObject.TestServer.ConnectNodeToNetworkWithMACAddress("node_1", "net_2", "aa:bb:cc:dd:ee:12") - - nodeListing := suite.TestMAASObject.GetSubObject("networks").GetSubObject("net_1") - listNodeObjects, err := nodeListing.CallGet("list_connected_macs", url.Values{}) - c.Assert(err, IsNil) - - listNodes, err := listNodeObjects.GetArray() - c.Assert(err, IsNil) - c.Check(listNodes, HasLen, 2) - - node, err := listNodes[0].GetMAASObject() - c.Assert(err, IsNil) - macAddress, err := node.GetField("mac_address") - c.Assert(err, IsNil) - c.Check(macAddress == "aa:bb:cc:dd:ee:11" || macAddress == "aa:bb:cc:dd:ee:21", Equals, true) - node1_idx := 0 - if macAddress == "aa:bb:cc:dd:ee:21" { - node1_idx = 1 - } - - node, err = listNodes[node1_idx].GetMAASObject() - c.Assert(err, IsNil) - macAddress, err = node.GetField("mac_address") - c.Assert(err, IsNil) - c.Check(macAddress, Equals, "aa:bb:cc:dd:ee:11") - nodeResourceURI, err := node.GetField(resourceURI) - c.Assert(err, IsNil) - apiVersion := suite.TestMAASObject.TestServer.version - expectedResourceURI := fmt.Sprintf("/api/%s/nodes/node_1/macs/%s/", apiVersion, url.QueryEscape("aa:bb:cc:dd:ee:11")) - c.Check(nodeResourceURI, Equals, expectedResourceURI) - - node, err = listNodes[1-node1_idx].GetMAASObject() - c.Assert(err, IsNil) - macAddress, err = node.GetField("mac_address") - c.Assert(err, IsNil) - c.Check(macAddress, Equals, "aa:bb:cc:dd:ee:21") - nodeResourceURI, err = node.GetField(resourceURI) - c.Assert(err, IsNil) - expectedResourceURI = fmt.Sprintf("/api/%s/nodes/node_2/macs/%s/", apiVersion, url.QueryEscape("aa:bb:cc:dd:ee:21")) - c.Check(nodeResourceURI, Equals, expectedResourceURI) -} - -func (suite *TestMAASObjectSuite) TestGetVersion(c *C) { - networkMethod := suite.TestMAASObject.GetSubObject("version") - params := url.Values{"node": []string{"mysystemid"}} - versionObject, err := networkMethod.CallGet("", params) - c.Assert(err, IsNil) - - versionMap, err := versionObject.GetMap() - c.Assert(err, IsNil) - jsonArray, ok := versionMap["capabilities"] - c.Check(ok, Equals, true) - capArray, err := jsonArray.GetArray() - for _, capJSONName := range capArray { - capName, err := capJSONName.GetString() - c.Assert(err, IsNil) - switch capName { - case "networks-management": - case "static-ipaddresses": - default: - c.Fatalf("unknown capability %q", capName) - } - } -} - -func (suite *TestMAASObjectSuite) assertIPAmong(c *C, jsonObjIP JSONObject, expectIPs ...string) { - apiVersion := suite.TestMAASObject.TestServer.version - expectedURI := getIPAddressesEndpoint(apiVersion) - - maasObj, err := jsonObjIP.GetMAASObject() - c.Assert(err, IsNil) - attrs := maasObj.GetMap() - uri, err := attrs["resource_uri"].GetString() - c.Assert(err, IsNil) - c.Assert(uri, Equals, expectedURI) - allocType, err := attrs["alloc_type"].GetFloat64() - c.Assert(err, IsNil) - c.Assert(allocType, Equals, 4.0) - created, err := attrs["created"].GetString() - c.Assert(err, IsNil) - c.Assert(created, Not(Equals), "") - ip, err := attrs["ip"].GetString() - c.Assert(err, IsNil) - if !contains(expectIPs, ip) { - c.Fatalf("expected IP in %v, got %q", expectIPs, ip) - } -} - -func (suite *TestMAASObjectSuite) TestListIPAddresses(c *C) { - ipAddresses := suite.TestMAASObject.GetSubObject("ipaddresses") - - // First try without any networks and IPs. - listIPObjects, err := ipAddresses.CallGet("", url.Values{}) - c.Assert(err, IsNil) - items, err := listIPObjects.GetArray() - c.Assert(err, IsNil) - c.Assert(items, HasLen, 0) - - // Add two networks and some addresses to each one. - suite.TestMAASObject.TestServer.NewNetwork( - `{"name": "net_1", "ip": "0.1.2.0", "netmask": "255.255.255.0"}`, - ) - suite.TestMAASObject.TestServer.NewNetwork( - `{"name": "net_2", "ip": "0.2.2.0", "netmask": "255.255.255.0"}`, - ) - suite.TestMAASObject.TestServer.NewIPAddress("0.1.2.3", "net_1") - suite.TestMAASObject.TestServer.NewIPAddress("0.1.2.4", "net_1") - suite.TestMAASObject.TestServer.NewIPAddress("0.1.2.5", "net_1") - suite.TestMAASObject.TestServer.NewIPAddress("0.2.2.3", "net_2") - suite.TestMAASObject.TestServer.NewIPAddress("0.2.2.4", "net_2") - - // List all addresses and verify the needed response fields are set. - listIPObjects, err = ipAddresses.CallGet("", url.Values{}) - c.Assert(err, IsNil) - items, err = listIPObjects.GetArray() - c.Assert(err, IsNil) - c.Assert(items, HasLen, 5) - - for _, ipObj := range items { - suite.assertIPAmong( - c, ipObj, - "0.1.2.3", "0.1.2.4", "0.1.2.5", "0.2.2.3", "0.2.2.4", - ) - } - - // Remove all net_1 IPs. - removed := suite.TestMAASObject.TestServer.RemoveIPAddress("0.1.2.3") - c.Assert(removed, Equals, true) - removed = suite.TestMAASObject.TestServer.RemoveIPAddress("0.1.2.4") - c.Assert(removed, Equals, true) - removed = suite.TestMAASObject.TestServer.RemoveIPAddress("0.1.2.5") - c.Assert(removed, Equals, true) - // Remove the last IP twice, should be OK and return false. - removed = suite.TestMAASObject.TestServer.RemoveIPAddress("0.1.2.5") - c.Assert(removed, Equals, false) - - // List again. - listIPObjects, err = ipAddresses.CallGet("", url.Values{}) - c.Assert(err, IsNil) - items, err = listIPObjects.GetArray() - c.Assert(err, IsNil) - c.Assert(items, HasLen, 2) - for _, ipObj := range items { - suite.assertIPAmong( - c, ipObj, - "0.2.2.3", "0.2.2.4", - ) - } -} - -func (suite *TestMAASObjectSuite) TestReserveIPAddress(c *C) { - suite.TestMAASObject.TestServer.NewNetwork( - `{"name": "net_1", "ip": "0.1.2.0", "netmask": "255.255.255.0"}`, - ) - ipAddresses := suite.TestMAASObject.GetSubObject("ipaddresses") - // First try "reserve" with requested_address set. - params := url.Values{"network": []string{"0.1.2.0/24"}, "requested_address": []string{"0.1.2.42"}} - res, err := ipAddresses.CallPost("reserve", params) - c.Assert(err, IsNil) - suite.assertIPAmong(c, res, "0.1.2.42") - - // Now try "reserve" without requested_address. - delete(params, "requested_address") - res, err = ipAddresses.CallPost("reserve", params) - c.Assert(err, IsNil) - suite.assertIPAmong(c, res, "0.1.2.2") -} - -func (suite *TestMAASObjectSuite) TestReleaseIPAddress(c *C) { - suite.TestMAASObject.TestServer.NewNetwork( - `{"name": "net_1", "ip": "0.1.2.0", "netmask": "255.255.255.0"}`, - ) - suite.TestMAASObject.TestServer.NewIPAddress("0.1.2.3", "net_1") - ipAddresses := suite.TestMAASObject.GetSubObject("ipaddresses") - - // Try with non-existing address - should return 404. - params := url.Values{"ip": []string{"0.2.2.1"}} - _, err := ipAddresses.CallPost("release", params) - c.Assert(err, ErrorMatches, `(\n|.)*404 Not Found(\n|.)*`) - - // Now with existing one - all OK. - params = url.Values{"ip": []string{"0.1.2.3"}} - _, err = ipAddresses.CallPost("release", params) - c.Assert(err, IsNil) - - // Ensure it got removed. - c.Assert(suite.TestMAASObject.TestServer.ipAddressesPerNetwork["net_1"], HasLen, 0) - - // Try again, should return 404. - _, err = ipAddresses.CallPost("release", params) - c.Assert(err, ErrorMatches, `(\n|.)*404 Not Found(\n|.)*`) -} - -const nodeDetailsXML = ` - - - Computer - -` - -func (suite *TestMAASObjectSuite) TestNodeDetails(c *C) { - nodeJSON := `{"system_id": "mysystemid"}` - suite.TestMAASObject.TestServer.NewNode(nodeJSON) - suite.TestMAASObject.TestServer.AddNodeDetails("mysystemid", nodeDetailsXML) - - obj := suite.TestMAASObject.GetSubObject("nodes").GetSubObject("mysystemid") - uri := obj.URI() - result, err := obj.client.Get(uri, "details", nil) - c.Assert(err, IsNil) - - bsonObj := map[string]interface{}{} - err = bson.Unmarshal(result, &bsonObj) - c.Assert(err, IsNil) - - _, ok := bsonObj["lldp"] - c.Check(ok, Equals, true) - gotXMLText, ok := bsonObj["lshw"] - c.Check(ok, Equals, true) - c.Check(string(gotXMLText.([]byte)), Equals, string(nodeDetailsXML)) -} - -func (suite *TestMAASObjectSuite) TestListNodegroups(c *C) { - suite.TestMAASObject.TestServer.AddBootImage("uuid-0", `{"architecture": "arm64", "release": "trusty"}`) - suite.TestMAASObject.TestServer.AddBootImage("uuid-1", `{"architecture": "amd64", "release": "precise"}`) - - nodegroupListing := suite.TestMAASObject.GetSubObject("nodegroups") - result, err := nodegroupListing.CallGet("list", nil) - c.Assert(err, IsNil) - - nodegroups, err := result.GetArray() - c.Assert(err, IsNil) - c.Check(nodegroups, HasLen, 2) - - for _, obj := range nodegroups { - nodegroup, err := obj.GetMAASObject() - c.Assert(err, IsNil) - uuid, err := nodegroup.GetField("uuid") - c.Assert(err, IsNil) - - nodegroupResourceURI, err := nodegroup.GetField(resourceURI) - c.Assert(err, IsNil) - apiVersion := suite.TestMAASObject.TestServer.version - expectedResourceURI := fmt.Sprintf("/api/%s/nodegroups/%s/", apiVersion, uuid) - c.Check(nodegroupResourceURI, Equals, expectedResourceURI) - } -} - -func (suite *TestMAASObjectSuite) TestListNodegroupsEmptyList(c *C) { - nodegroupListing := suite.TestMAASObject.GetSubObject("nodegroups") - result, err := nodegroupListing.CallGet("list", nil) - c.Assert(err, IsNil) - - nodegroups, err := result.GetArray() - c.Assert(err, IsNil) - c.Check(nodegroups, HasLen, 0) -} - -func (suite *TestMAASObjectSuite) TestListNodegroupInterfaces(c *C) { - suite.TestMAASObject.TestServer.AddBootImage("uuid-0", `{"architecture": "arm64", "release": "trusty"}`) - jsonText := `{ - "ip_range_high": "172.16.0.128", - "ip_range_low": "172.16.0.2", - "broadcast_ip": "172.16.0.255", - "static_ip_range_low": "172.16.0.129", - "name": "eth0", - "ip": "172.16.0.2", - "subnet_mask": "255.255.255.0", - "management": 2, - "static_ip_range_high": "172.16.0.255", - "interface": "eth0" - }` - - suite.TestMAASObject.TestServer.NewNodegroupInterface("uuid-0", jsonText) - nodegroupsInterfacesListing := suite.TestMAASObject.GetSubObject("nodegroups").GetSubObject("uuid-0").GetSubObject("interfaces") - result, err := nodegroupsInterfacesListing.CallGet("list", nil) - c.Assert(err, IsNil) - - nodegroupsInterfaces, err := result.GetArray() - c.Assert(err, IsNil) - c.Check(nodegroupsInterfaces, HasLen, 1) - - nodegroupsInterface, err := nodegroupsInterfaces[0].GetMap() - c.Assert(err, IsNil) - - checkMember := func(member, expectedValue string) { - value, err := nodegroupsInterface[member].GetString() - c.Assert(err, IsNil) - c.Assert(value, Equals, expectedValue) - } - checkMember("ip_range_high", "172.16.0.128") - checkMember("ip_range_low", "172.16.0.2") - checkMember("broadcast_ip", "172.16.0.255") - checkMember("static_ip_range_low", "172.16.0.129") - checkMember("static_ip_range_high", "172.16.0.255") - checkMember("name", "eth0") - checkMember("ip", "172.16.0.2") - checkMember("subnet_mask", "255.255.255.0") - checkMember("interface", "eth0") - - value, err := nodegroupsInterface["management"].GetFloat64() - c.Assert(err, IsNil) - c.Assert(value, Equals, 2.0) -} - -func (suite *TestMAASObjectSuite) TestListNodegroupsInterfacesEmptyList(c *C) { - suite.TestMAASObject.TestServer.AddBootImage("uuid-0", `{"architecture": "arm64", "release": "trusty"}`) - nodegroupsInterfacesListing := suite.TestMAASObject.GetSubObject("nodegroups").GetSubObject("uuid-0").GetSubObject("interfaces") - result, err := nodegroupsInterfacesListing.CallGet("list", nil) - c.Assert(err, IsNil) - - interfaces, err := result.GetArray() - c.Assert(err, IsNil) - c.Check(interfaces, HasLen, 0) -} - -func (suite *TestMAASObjectSuite) TestListBootImages(c *C) { - suite.TestMAASObject.TestServer.AddBootImage("uuid-0", `{"architecture": "arm64", "release": "trusty"}`) - suite.TestMAASObject.TestServer.AddBootImage("uuid-1", `{"architecture": "amd64", "release": "precise"}`) - suite.TestMAASObject.TestServer.AddBootImage("uuid-1", `{"architecture": "ppc64el", "release": "precise"}`) - - bootImageListing := suite.TestMAASObject.GetSubObject("nodegroups").GetSubObject("uuid-1").GetSubObject("boot-images") - result, err := bootImageListing.CallGet("", nil) - c.Assert(err, IsNil) - - bootImageObjects, err := result.GetArray() - c.Assert(err, IsNil) - c.Check(bootImageObjects, HasLen, 2) - - expectedBootImages := []string{"amd64.precise", "ppc64el.precise"} - bootImages := make([]string, len(bootImageObjects)) - for i, obj := range bootImageObjects { - bootimage, err := obj.GetMap() - c.Assert(err, IsNil) - architecture, err := bootimage["architecture"].GetString() - c.Assert(err, IsNil) - release, err := bootimage["release"].GetString() - c.Assert(err, IsNil) - bootImages[i] = fmt.Sprintf("%s.%s", architecture, release) - } - sort.Strings(bootImages) - c.Assert(bootImages, DeepEquals, expectedBootImages) -} - -func (suite *TestMAASObjectSuite) TestListZones(c *C) { - expected := map[string]string{ - "zone0": "zone0 is very nice", - "zone1": "zone1 is much nicer than zone0", - } - for name, desc := range expected { - suite.TestMAASObject.TestServer.AddZone(name, desc) - } - - result, err := suite.TestMAASObject.GetSubObject("zones").CallGet("", nil) - c.Assert(err, IsNil) - c.Assert(result, NotNil) - - list, err := result.GetArray() - c.Assert(err, IsNil) - c.Assert(list, HasLen, len(expected)) - - m := make(map[string]string) - for _, item := range list { - itemMap, err := item.GetMap() - c.Assert(err, IsNil) - name, err := itemMap["name"].GetString() - c.Assert(err, IsNil) - desc, err := itemMap["description"].GetString() - c.Assert(err, IsNil) - m[name] = desc - } - c.Assert(m, DeepEquals, expected) -} - -func (suite *TestMAASObjectSuite) TestAcquireNodeZone(c *C) { - suite.TestMAASObject.TestServer.AddZone("z0", "rox") - suite.TestMAASObject.TestServer.AddZone("z1", "sux") - suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n0", "zone": "z0"}`) - suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n1", "zone": "z1"}`) - suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n2", "zone": "z1"}`) - nodesObj := suite.TestMAASObject.GetSubObject("nodes") - - acquire := func(zone string) (string, string, error) { - var params url.Values - if zone != "" { - params = url.Values{"zone": []string{zone}} - } - jsonResponse, err := nodesObj.CallPost("acquire", params) - if err != nil { - return "", "", err - } - acquiredNode, err := jsonResponse.GetMAASObject() - c.Assert(err, IsNil) - systemId, err := acquiredNode.GetField("system_id") - c.Assert(err, IsNil) - assignedZone, err := acquiredNode.GetField("zone") - c.Assert(err, IsNil) - if zone != "" { - c.Assert(assignedZone, Equals, zone) - } - return systemId, assignedZone, nil - } - - id, _, err := acquire("z0") - c.Assert(err, IsNil) - c.Assert(id, Equals, "n0") - id, _, err = acquire("z0") - c.Assert(err.(ServerError).StatusCode, Equals, http.StatusConflict) - - id, zone, err := acquire("") - c.Assert(err, IsNil) - c.Assert(id, Not(Equals), "n0") - c.Assert(zone, Equals, "z1") -} - -func (suite *TestMAASObjectSuite) TestAcquireFilterMemory(c *C) { - suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n0", "memory": 1024}`) - suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n1", "memory": 2048}`) - nodeListing := suite.TestMAASObject.GetSubObject("nodes") - jsonResponse, err := nodeListing.CallPost("acquire", url.Values{"mem": []string{"2048"}}) - c.Assert(err, IsNil) - acquiredNode, err := jsonResponse.GetMAASObject() - c.Assert(err, IsNil) - mem, err := acquiredNode.GetMap()["memory"].GetFloat64() - c.Assert(err, IsNil) - c.Assert(mem, Equals, float64(2048)) -} - -func (suite *TestMAASObjectSuite) TestAcquireFilterCpuCores(c *C) { - suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n0", "cpu_count": 1}`) - suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n1", "cpu_count": 2}`) - nodeListing := suite.TestMAASObject.GetSubObject("nodes") - jsonResponse, err := nodeListing.CallPost("acquire", url.Values{"cpu-cores": []string{"2"}}) - c.Assert(err, IsNil) - acquiredNode, err := jsonResponse.GetMAASObject() - c.Assert(err, IsNil) - cpucount, err := acquiredNode.GetMap()["cpu_count"].GetFloat64() - c.Assert(err, IsNil) - c.Assert(cpucount, Equals, float64(2)) -} - -func (suite *TestMAASObjectSuite) TestAcquireFilterArch(c *C) { - suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n0", "architecture": "amd64"}`) - suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n1", "architecture": "arm/generic"}`) - nodeListing := suite.TestMAASObject.GetSubObject("nodes") - jsonResponse, err := nodeListing.CallPost("acquire", url.Values{"arch": []string{"arm"}}) - c.Assert(err, IsNil) - acquiredNode, err := jsonResponse.GetMAASObject() - c.Assert(err, IsNil) - arch, _ := acquiredNode.GetField("architecture") - c.Assert(arch, Equals, "arm/generic") -} - -func (suite *TestMAASObjectSuite) TestDeploymentStatus(c *C) { - suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n0", "status": "6"}`) - suite.TestMAASObject.TestServer.NewNode(`{"system_id": "n1", "status": "1"}`) - nodes := suite.TestMAASObject.GetSubObject("nodes") - jsonResponse, err := nodes.CallGet("deployment_status", url.Values{"nodes": []string{"n0", "n1"}}) - c.Assert(err, IsNil) - deploymentStatus, err := jsonResponse.GetMap() - c.Assert(err, IsNil) - c.Assert(deploymentStatus, HasLen, 2) - expectedStatus := map[string]string{ - "n0": "Deployed", "n1": "Not in Deployment", - } - for systemId, status := range expectedStatus { - nodeStatus, err := deploymentStatus[systemId].GetString() - c.Assert(err, IsNil) - c.Assert(nodeStatus, Equals, status) - } -} === removed file 'src/launchpad.net/gomaasapi/util.go' --- src/launchpad.net/gomaasapi/util.go 2013-04-24 22:34:47 +0000 +++ src/launchpad.net/gomaasapi/util.go 1970-01-01 00:00:00 +0000 @@ -1,27 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -import ( - "strings" -) - -// JoinURLs joins a base URL and a subpath together. -// Regardless of whether baseURL ends in a trailing slash (or even multiple -// trailing slashes), or whether there are any leading slashes at the begining -// of path, the two will always be joined together by a single slash. -func JoinURLs(baseURL, path string) string { - return strings.TrimRight(baseURL, "/") + "/" + strings.TrimLeft(path, "/") -} - -// EnsureTrailingSlash appends a slash at the end of the given string unless -// there already is one. -// This is used to create the kind of normalized URLs that Django expects. -// (to avoid Django's redirection when an URL does not ends with a slash.) -func EnsureTrailingSlash(URL string) string { - if strings.HasSuffix(URL, "/") { - return URL - } - return URL + "/" -} === removed file 'src/launchpad.net/gomaasapi/util_test.go' --- src/launchpad.net/gomaasapi/util_test.go 2013-04-24 22:34:47 +0000 +++ src/launchpad.net/gomaasapi/util_test.go 1970-01-01 00:00:00 +0000 @@ -1,32 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gomaasapi - -import ( - . "launchpad.net/gocheck" -) - -func (suite *GomaasapiTestSuite) TestJoinURLsAppendsPathToBaseURL(c *C) { - c.Check(JoinURLs("http://example.com/", "foo"), Equals, "http://example.com/foo") -} - -func (suite *GomaasapiTestSuite) TestJoinURLsAddsSlashIfNeeded(c *C) { - c.Check(JoinURLs("http://example.com/foo", "bar"), Equals, "http://example.com/foo/bar") -} - -func (suite *GomaasapiTestSuite) TestJoinURLsNormalizesDoubleSlash(c *C) { - c.Check(JoinURLs("http://example.com/base/", "/szot"), Equals, "http://example.com/base/szot") -} - -func (suite *GomaasapiTestSuite) TestEnsureTrailingSlashAppendsSlashIfMissing(c *C) { - c.Check(EnsureTrailingSlash("test"), Equals, "test/") -} - -func (suite *GomaasapiTestSuite) TestEnsureTrailingSlashDoesNotAppendIfPresent(c *C) { - c.Check(EnsureTrailingSlash("test/"), Equals, "test/") -} - -func (suite *GomaasapiTestSuite) TestEnsureTrailingSlashReturnsSlashIfEmpty(c *C) { - c.Check(EnsureTrailingSlash(""), Equals, "/") -} === removed directory 'src/launchpad.net/gwacl' === removed file 'src/launchpad.net/gwacl/.bzrignore' --- src/launchpad.net/gwacl/.bzrignore 2013-07-11 17:18:27 +0000 +++ src/launchpad.net/gwacl/.bzrignore 1970-01-01 00:00:00 +0000 @@ -1,1 +0,0 @@ -./example/*/run === removed file 'src/launchpad.net/gwacl/COPYING' --- src/launchpad.net/gwacl/COPYING 2013-07-23 08:51:44 +0000 +++ src/launchpad.net/gwacl/COPYING 1970-01-01 00:00:00 +0000 @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. === removed file 'src/launchpad.net/gwacl/HACKING.txt' --- src/launchpad.net/gwacl/HACKING.txt 2013-08-20 16:02:16 +0000 +++ src/launchpad.net/gwacl/HACKING.txt 1970-01-01 00:00:00 +0000 @@ -1,143 +0,0 @@ -================================================== -Hacking in GWACL (Go Windows Azure Client Library) -================================================== - -(this doc is a huge WIP) - -Submitting changes ------------------- - -`GWACL`_ is hosted on Launchpad using `Bazaar`_. Submitting a change -requires you to create a merge proposal against the trunk branch and a -core committer will then review the branch. Once the branch is accepted, -it will be landed by the reviewer. - -All branch submissions must be formatted using ``make format``. They must -also have a successful test run with ``make check``. New features must -always be accompanied by new tests. - -.. _GWACL: https://launchpad.net/gwacl -.. _Bazaar: http://bazaar.canonical.com/ - - -Overview of Azure ------------------ - -Computing services -^^^^^^^^^^^^^^^^^^ - -Azure was originally designed as SaaS for Microsoft Windows and later amended -to allow individual virtual machines to run up with Linux distributions. -Some remnants of this SaaS architecture remain today, and understanding them -is crucial to understanding how GWACL works when spinning up virtual -instances. - -There are three main components to any virtual instance: - - * A hosted service - * A deployment - * A role instance - -Hosted services are the "top level" of a virtual resource. Each one -contains up to two deployments, and has its own DNS entry and firewall -settings (known as "endpoints" in Azure). The name of the service forms the -DNS entry as ".cloudapp.net". - -Deployments are Azure's abstraction of whether something is running on its -staging or production environment. They are only exposed in the API and not -the web management UI. Deployments contain one or more role instances. - -Role instances are virtual machines. Many instances may exist in a deployment -(and hence hosted service) but if there is more than one they are intended to -be running components from the same application and they will all share the -same DNS entry and open endpoints. Thus, a hosted service exposes a single -application on the internet and may be composed of multiple role instances -for load balancing and differing components. - -For this reason, if you want several separate applications, you must create a -separate service, deployment and role instance for each. - -Networking across hosted services -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Each service can only see as much of each other service as any public observer -does, however it's possible to place them in a private network so they are -effectively on a share LAN segment with no firewall. - -In Azure this is called a "virtual network". The virtual network must be -defined before any services that use it are created, and then associated at -service creation time. The virtual network can be assigned any valid -networking range which is then private to all the virtual instances defined to -use it. - -Storage services -^^^^^^^^^^^^^^^^ - -Azure supports data storage which is accessed separately to role instances -and hosted services. This is organised into several components: - - * A storage account - * Containers within an account - * Blobs inside containers - -A storage account can be created via the web management UI or API. Its name -forms the DNS entry for that storage as ".blob.core.windows.net". - -A container forms the next, and only, level of indirection under the account. -You cannot have a container under a container. Containers control the -default privacy of files therein. - -Blobs are the actual files in the storage. They can be of two main types: - - * Block blobs - * Page blobs - -Block blobs are used for sequential access and are optimised for streaming. -Page blobs are used for random access and allow access to ranges of bytes in a -blob. - -The full URL to a file in a storage account looks like: - - https://.blob.core.windows.net// - -The http version of the same URL would work too, but is prone to spurious -authentication failures when accessed through a proxy. Therefore gwacl accesses -the storage API through https; this may become configurable later if there is -demand. - - -RESTful API access ------------------- - -There are two API endpoints for Azure, the management API and the storage API. -Each also uses its own authentication method: - - * x509 certificates for the management API - * HMAC signed request for the storage API - -The GWACL code hides most of this complexity from you, it just requires the -cerificate data for the management API access, and the storage key for storage -API access. - -The storage key can be fetched either from the management UI, or management -API call. - -Generating x509 certificates is explained in the :doc:`README ` - - -GWACL's API philosophy ----------------------- - -API functions in the library should take a single struct parameter, which -itself contains one or more parameters. Existing functions that do not follow -this rule are historic and should not be copied. - -This brings several advantages:: - - 1. Keyword parameters improve callsite readability. - 2. It allows for parameters to be defaulted if not supplied. - 3. It's much easier to change the API later without breaking existing - code, it just needs re-compiling in the case where you add new, - optional, parameters. - - === removed file 'src/launchpad.net/gwacl/LICENSE' --- src/launchpad.net/gwacl/LICENSE 2013-08-20 16:02:16 +0000 +++ src/launchpad.net/gwacl/LICENSE 1970-01-01 00:00:00 +0000 @@ -1,16 +0,0 @@ -GWACL - Go API for talking to Windows Azure. - -Copyright 2012-2013, Canonical Ltd. - -Except where explicitly noted, all files herein are part of GWACL. - -GWACL is free software: you can redistribute it and/or modify it under the -terms of the GNU Lesser General Public License as published by the Free -Software Foundation, either version 3 of the License, or (at your option) any -later version. - -This program is distributed in the hope that it will be useful, but WITHOUT ANY -WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A -PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. - -See COPYING for the full terms of the GNU Lesser General Public License. === removed file 'src/launchpad.net/gwacl/Makefile' --- src/launchpad.net/gwacl/Makefile 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/Makefile 1970-01-01 00:00:00 +0000 @@ -1,35 +0,0 @@ -# Build, and run tests. Be careful of the ordering of flags here; get -# it wrong and things can disappear into the void without warning. -# Yes, both -v and -gocheck.v=true are needed. -check: examples - go test -v ./... -gocheck.v=true - -debug-test: - go test -c -gcflags "-N -l" - gdb gwacl.test - $(RM) gwacl.test - -all_source := $(shell find . -name '*.go' ! -name '*_test.go') - -example_source := $(wildcard example/*/run.go) -example_binaries := $(patsubst %.go,%,$(example_source)) - -# Clean up binaries. -clean: - $(RM) $(example_binaries) - -# Reformat source files. -format: - gofmt -w -l . - -# Reformat and simplify source files. -simplify: - gofmt -w -l -s . - -# Build the examples (we have no tests for them). -examples: $(example_binaries) - -%: %.go $(all_source) - go build -o $@ $< - -.PHONY: check clean format examples debug_check === removed file 'src/launchpad.net/gwacl/README' --- src/launchpad.net/gwacl/README 2013-08-20 16:02:16 +0000 +++ src/launchpad.net/gwacl/README 1970-01-01 00:00:00 +0000 @@ -1,54 +0,0 @@ -=========================================== -GWACL - The Go Windows Azure Client Library -=========================================== - - -How to generate an x509 key to talk to Azure --------------------------------------------- - -Azure requires that API clients use an x509 certificate to authenticate to the -management API. Create the certificate with:: - - openssl req -config /usr/share/ssl-cert/ssleay.cnf -x509 -nodes \ - -days 3650 -newkey rsa:2048 -keyout azure.pem -out azure.pem - -Note the use of ``ssleay.cnf``. It just contains some crappy defaults so you -don't get prompted for certificate data. You can leave it out if you want, -but then you'll need to enter country, organisation, etc. - -Azure wants you to upload a ``.cer`` file (which is in DER format). Here's -how you can extract a ``.cer`` file from the ``.pem``:: - - openssl x509 -inform pem -in azure.pem -outform der -out azure.cer - -You can now upload ``azure.cer`` to Azure as a management certificate. - - -Using the key in GWACL ----------------------- - -GWACL requires the key in the .pem file, so make sure you keep that file -around. The .cer file can be deleted as you won't need it again, and it's easy -to regenerate if you want to re-upload it. - - -Example programs ----------------- - -Storage -^^^^^^^ - -The storage example is a stand-alone tool which allows the user to manipulate -a storage account:: - - go run example/storage/run.go --help - -Management -^^^^^^^^^^ - -The management example is a piece of code that starts up a new role instance, -optionally pauses so you can play with it, and then shuts everything down -again. It is intended to be useful for testing the library itself, but also -serves as an example of how to use the GWACL API:: - - go run example/management/run.go -cert -subscriptionid [-wait] === removed directory 'src/launchpad.net/gwacl/dedent' === removed file 'src/launchpad.net/gwacl/dedent/dedent.go' --- src/launchpad.net/gwacl/dedent/dedent.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/dedent/dedent.go 1970-01-01 00:00:00 +0000 @@ -1,68 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package dedent - -import ( - "regexp" - "strings" -) - -const emptyString = "" - -var reLine = regexp.MustCompile(`(?m-s)^.*$`) - -// Split the given text into lines. -func splitLines(text string) []string { - return reLine.FindAllString(text, -1) -} - -// Match leading whitespace or tabs. \p{Zs} is a Unicode character class: -// http://en.wikipedia.org/wiki/Mapping_of_Unicode_characters#General_Category -var reLeadingWhitespace = regexp.MustCompile(`^[\p{Zs}\t]+`) - -// Find the longest leading margin common between the given lines. -func calculateMargin(lines []string) string { - var margin string - var first bool = true - for _, line := range lines { - indent := reLeadingWhitespace.FindString(line) - switch { - case len(indent) == len(line): - // The line is either empty or whitespace and will be ignored for - // the purposes of calculating the margin. - case first: - // This is the first line with an indent, so start from here. - margin = indent - first = false - case strings.HasPrefix(indent, margin): - // This line's indent is longer or equal to the margin. The - // current margin remains unalterered. - case strings.HasPrefix(margin, indent): - // This line's indent is compatible with the margin but shorter - // (strictly it could be equal, however that condition is handled - // earlier in this switch). The current indent becomes the margin. - margin = indent - default: - // There is no common margin so stop scanning. - return emptyString - } - } - return margin -} - -// Remove a prefix from each line, if present. -func trimPrefix(lines []string, prefix string) { - trim := len(prefix) - for i, line := range lines { - if strings.HasPrefix(line, prefix) { - lines[i] = line[trim:] - } - } -} - -func Dedent(text string) string { - lines := splitLines(text) - trimPrefix(lines, calculateMargin(lines)) - return strings.Join(lines, "\n") -} === removed file 'src/launchpad.net/gwacl/dedent/dedent_test.go' --- src/launchpad.net/gwacl/dedent/dedent_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/dedent/dedent_test.go 1970-01-01 00:00:00 +0000 @@ -1,94 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package dedent - -import ( - . "launchpad.net/gocheck" - "testing" -) - -type dedentSuite struct{} - -var _ = Suite(&dedentSuite{}) - -// Dedent() does nothing with the empty string. -func (suite *dedentSuite) TestEmptyString(c *C) { - input := "" - expected := input - observed := Dedent(input) - c.Check(observed, Equals, expected) -} - -// Dedent() does nothing to a single line without an indent. -func (suite *dedentSuite) TestSingleLine(c *C) { - input := "This is a single line." - expected := input - observed := Dedent(input) - c.Check(observed, Equals, expected) -} - -// Dedent() removes all leading whitespace from single lines. -func (suite *dedentSuite) TestSingleLineWithIndent(c *C) { - input := " This is a single line." - expected := "This is a single line." - observed := Dedent(input) - c.Check(observed, Equals, expected) -} - -// Dedent() does nothing when none of the lines are indented. -func (suite *dedentSuite) TestLines(c *C) { - input := "One\nTwo\n" - expected := input - observed := Dedent(input) - c.Check(observed, Equals, expected) -} - -// Dedent() does nothing when *any* line is not indented. -func (suite *dedentSuite) TestLinesWithSomeIndents(c *C) { - input := "One\n Two\n" - expected := input - observed := Dedent(input) - c.Check(observed, Equals, expected) -} - -// Dedent() removes the common leading indent from each line. -func (suite *dedentSuite) TestLinesWithIndents(c *C) { - input := " One\n Two\n" - expected := "One\n Two\n" - observed := Dedent(input) - c.Check(observed, Equals, expected) -} - -// Dedent() ignores all-whitespace lines for the purposes of margin -// calculation. However, the margin *is* trimmed from these lines, if they -// begin with it. -func (suite *dedentSuite) TestLinesWithEmptyLine(c *C) { - input := " One\n \n Three\n" - expected := "One\n \nThree\n" - observed := Dedent(input) - c.Check(observed, Equals, expected) -} - -// Dedent() ignores blank lines for the purposes of margin calculation, -// including the first line. -func (suite *dedentSuite) TestLinesWithEmptyFirstLine(c *C) { - input := "\n Two\n Three\n" - expected := "\nTwo\nThree\n" - observed := Dedent(input) - c.Check(observed, Equals, expected) -} - -// Dedent() treats spaces and tabs as completely different; no number of -// spaces is equivalent to a tab. -func (suite *dedentSuite) TestLinesWithTabsAndSpaces(c *C) { - input := "\tOne\n Two\n" - expected := input - observed := Dedent(input) - c.Check(observed, Equals, expected) -} - -// Master loader for all tests. -func Test(t *testing.T) { - TestingT(t) -} === removed file 'src/launchpad.net/gwacl/deletedisk.go' --- src/launchpad.net/gwacl/deletedisk.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/deletedisk.go 1970-01-01 00:00:00 +0000 @@ -1,65 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -// A poller object used to delete a disk. -// -// It takes an indeterminate time for a disk previously attached to a -// deleted VM to become "not in use" and thus be available for deletion. -// When we receive the "disk is still attached" error, we try again every -// 10 seconds until it succeeds, with a timeout of 30 minutes). -// This bug might be related to: -// http://social.msdn.microsoft.com/Forums/en-US/WAVirtualMachinesforWindows/thread/4394c75d-59ff-4634-8212-2ad71bf6fbd5/ -// -// Once this bug is fixed in Windows Azure, this file and the related tests -// can safely be removed, and ManagementAPI._DeleteDisk() can replace the -// current implementation of ManagementAPI.DeleteDisk() (which uses this -// poller). - -package gwacl - -import ( - "fmt" - "regexp" - "time" -) - -var deleteDiskTimeout = 30 * time.Minute -var deleteDiskInterval = 10 * time.Second - -type diskDeletePoller struct { - api *ManagementAPI - diskName string - deleteBlob bool -} - -var _ poller = &diskDeletePoller{} - -func (poller diskDeletePoller) poll() (*x509Response, error) { - return nil, poller.api._DeleteDisk(poller.diskName, poller.deleteBlob) -} - -// isInUseError returns whether or not the given string is of the "disk in use" -// type. -// Here is a real-world example of the error in question: -// "BadRequest - A disk with name gwacldiske5w7lkj is currently in use -// by virtual machine gwaclrolemvo1yab running within hosted service -// gwacl623yosxtppsa9577xy5, deployment gwaclmachinewes4n64f. (http -// code 400: Bad Request)" -func isInUseError(errString string, diskName string) bool { - pattern := fmt.Sprintf("BadRequest - A disk with name %s is currently in use by virtual machine.*", regexp.QuoteMeta(diskName)) - reg := regexp.MustCompile(pattern) - return reg.MatchString(errString) -} - -func (poller diskDeletePoller) isDone(response *x509Response, pollerErr error) (bool, error) { - if pollerErr == nil { - return true, nil - } - if isInUseError(pollerErr.Error(), poller.diskName) { - // The error is of the "disk in use" type: continue polling. - return false, nil - } - // The error is *not* of the "disk in use" type: stop polling and return - // the error. - return true, pollerErr -} === removed file 'src/launchpad.net/gwacl/deletedisk_test.go' --- src/launchpad.net/gwacl/deletedisk_test.go 2015-10-23 18:28:45 +0000 +++ src/launchpad.net/gwacl/deletedisk_test.go 1970-01-01 00:00:00 +0000 @@ -1,104 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "fmt" - "net/http" - "time" - - . "launchpad.net/gocheck" -) - -type deleteDiskSuite struct{} - -var _ = Suite(&deleteDiskSuite{}) - -// Real-world error messages and names. -const ( - diskInUseErrorTemplate = "BadRequest - A disk with name %s is currently in use by virtual machine gwaclrolemvo1yab running within hosted service gwacl623yosxtppsa9577xy5, deployment gwaclmachinewes4n64f. (http code 400: Bad Request)" - diskName = "gwacldiske5w7lkj" - diskDoesNotExistError = "DELETE request failed: ResourceNotFound - The disk with the specified name does not exist. (http code 404: Not Found)" -) - -func (suite *deleteDiskSuite) TestIsInUseError(c *C) { - var testValues = []struct { - errorString string - diskName string - expectedResult bool - }{ - {fmt.Sprintf(diskInUseErrorTemplate, diskName), diskName, true}, - {fmt.Sprintf(diskInUseErrorTemplate, diskName), "another-disk", false}, - {"unknown error", diskName, false}, - {diskDoesNotExistError, diskName, false}, - } - for _, test := range testValues { - c.Check(isInUseError(test.errorString, test.diskName), Equals, test.expectedResult) - } -} - -func (suite *deleteDiskSuite) TestIsDoneReturnsTrueIfNilError(c *C) { - poller := diskDeletePoller{nil, "", false} - randomResponse := x509Response{StatusCode: http.StatusAccepted} - done, err := poller.isDone(&randomResponse, nil) - c.Check(done, Equals, true) - c.Check(err, IsNil) -} - -func (suite *deleteDiskSuite) TestIsDoneReturnsFalseIfDiskInUseError(c *C) { - diskName := "gwacldiske5w7lkj" - diskInUseError := fmt.Errorf(diskInUseErrorTemplate, diskName) - poller := diskDeletePoller{nil, diskName, false} - done, err := poller.isDone(nil, diskInUseError) - c.Check(done, Equals, false) - c.Check(err, IsNil) -} - -func (suite *deleteDiskSuite) TestIsDoneReturnsTrueIfAnotherError(c *C) { - anotherError := fmt.Errorf("Unknown error") - poller := diskDeletePoller{nil, "disk-name", false} - done, err := poller.isDone(nil, anotherError) - c.Check(done, Equals, true) - c.Check(err, Equals, anotherError) -} - -func (suite *deleteDiskSuite) TestPollCallsDeleteDisk(c *C) { - api := makeAPI(c) - recordedRequests := setUpDispatcher("operationID") - diskName := "gwacldiske5w7lkj" - poller := diskDeletePoller{api, diskName, false} - - response, err := poller.poll() - - c.Assert(response, IsNil) - c.Assert(err, IsNil) - expectedURL := api.session.composeURL("services/disks/" + diskName) - checkOneRequest(c, recordedRequests, expectedURL, baseAPIVersion, nil, "DELETE") -} - -func (suite *deleteDiskSuite) TestManagementAPIDeleteDiskPolls(c *C) { - firstResponse := DispatcherResponse{ - response: &x509Response{}, - errorObject: fmt.Errorf(diskInUseErrorTemplate, diskName)} - secondResponse := DispatcherResponse{ - response: &x509Response{StatusCode: http.StatusOK}, - errorObject: nil} - responses := []DispatcherResponse{firstResponse, secondResponse} - rigPreparedResponseDispatcher(responses) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - api := makeAPI(c) - diskName := "gwacldiske5w7lkj" - poller := diskDeletePoller{api, diskName, false} - - response, err := performPolling(poller, time.Nanosecond, time.Minute) - - c.Assert(response, IsNil) - c.Assert(err, IsNil) - expectedURL := api.session.composeURL("services/disks/" + diskName) - c.Check(len(recordedRequests), Equals, 2) - checkRequest(c, recordedRequests[0], expectedURL, baseAPIVersion, nil, "DELETE") - checkRequest(c, recordedRequests[1], expectedURL, baseAPIVersion, nil, "DELETE") -} === removed file 'src/launchpad.net/gwacl/endpoints.go' --- src/launchpad.net/gwacl/endpoints.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/endpoints.go 1970-01-01 00:00:00 +0000 @@ -1,60 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "fmt" - "net/url" - "strings" -) - -// APIEndpoint describes the base URL for accesing Windows Azure's APIs. -// -// Azure will have subdomains on this URL's domain, such as blob. for -// storage, with further sub-domains for storage accounts; management. -// for the management API; and possibly more such as queue., -// table.. APIEndpoint defines methods to obtain these URLs. -type APIEndpoint string - -// GetEndpoint returns the API endpoint for the given location. This is -// hard-coded, so some guesswork may be involved. -func GetEndpoint(location string) APIEndpoint { - if strings.Contains(location, "China") { - // Mainland China is a special case. It has its own endpoint. - return "https://core.chinacloudapi.cn/" - } - - // The rest of the world shares a single endpoint. - return "https://core.windows.net/" -} - -// prefixHost prefixes the hostname part of a URL with a subdomain. For -// example, prefixHost("foo", "http://example.com") becomes -// "http://foo.example.com". -// -// The URL must be well-formed, and contain a hostname. -func prefixHost(host, originalURL string) string { - parsedURL, err := url.Parse(originalURL) - if err != nil { - panic(fmt.Errorf("failed to parse URL %s - %v", originalURL, err)) - } - if parsedURL.Host == "" { - panic(fmt.Errorf("no hostname in URL '%s'", originalURL)) - } - // Escape manually. Strangely, turning a url.URL into a string does not - // do this for you. - parsedURL.Host = url.QueryEscape(host) + "." + parsedURL.Host - return parsedURL.String() -} - -// ManagementAPI returns the URL for the endpoint's management API. -func (endpoint APIEndpoint) ManagementAPI() string { - return prefixHost("management", string(endpoint)) -} - -// BlobStorageAPI returns a URL for the endpoint's blob storage API, for -// requests on the given account. -func (endpoint APIEndpoint) BlobStorageAPI(account string) string { - return prefixHost(account, prefixHost("blob", string(endpoint))) -} === removed file 'src/launchpad.net/gwacl/endpoints_test.go' --- src/launchpad.net/gwacl/endpoints_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/endpoints_test.go 1970-01-01 00:00:00 +0000 @@ -1,122 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "fmt" - . "launchpad.net/gocheck" - "net/url" -) - -type endpointsSuite struct{} - -var _ = Suite(&endpointsSuite{}) - -func (*endpointsSuite) TestGetEndpointReturnsEndpointsForKnownRegions(c *C) { - internationalLocations := []string{ - "West Europe", - "East Asia", - "East US 2", - "Southeast Asia", - "East US", - "Central US", - "West US", - "North Europe", - } - internationalEndpoint := APIEndpoint("https://core.windows.net/") - - for _, location := range internationalLocations { - c.Check(GetEndpoint(location), Equals, internationalEndpoint) - } - - // The mainland-China locations have a different endpoint. - // (Actually the East Asia data centre is said to be in Hong Kong, but it - // acts as international). - mainlandChinaLocations := []string{ - "China East", - "China North", - } - mainlandChinaEndpoint := APIEndpoint("https://core.chinacloudapi.cn/") - for _, location := range mainlandChinaLocations { - c.Check(GetEndpoint(location), Equals, mainlandChinaEndpoint) - } -} - -func (*endpointsSuite) TestGetEndpointMakesGoodGuessesForUknownRegions(c *C) { - c.Check( - GetEndpoint("South San Marino Highlands"), - Equals, - GetEndpoint("West US")) - c.Check( - GetEndpoint("Central China West"), - Equals, - GetEndpoint("China East")) -} - -func (*endpointsSuite) TestPrefixHostPrefixesSubdomain(c *C) { - c.Check( - prefixHost("foo", "http://example.com"), - Equals, - "http://foo.example.com") -} - -func (*endpointsSuite) TestPrefixHostPreservesOtherURLComponents(c *C) { - c.Check( - prefixHost("foo", "http://example.com/"), - Equals, - "http://foo.example.com/") - c.Check( - prefixHost("foo", "nntp://example.com"), - Equals, - "nntp://foo.example.com") - c.Check( - prefixHost("foo", "http://user@example.com"), - Equals, - "http://user@foo.example.com") - c.Check( - prefixHost("foo", "http://example.com:999"), - Equals, - "http://foo.example.com:999") - c.Check( - prefixHost("foo", "http://example.com/path"), - Equals, - "http://foo.example.com/path") -} - -func (*endpointsSuite) TestPrefixHostEscapes(c *C) { - host := "5%=1/20?" - c.Check( - prefixHost(host, "http://example.com"), - Equals, - fmt.Sprintf("http://%s.example.com", url.QueryEscape(host))) -} - -func (*endpointsSuite) TestManagementAPICombinesWithGetEndpoint(c *C) { - c.Check( - GetEndpoint("West US").ManagementAPI(), - Equals, - "https://management.core.windows.net/") - c.Check( - GetEndpoint("China East").ManagementAPI(), - Equals, - "https://management.core.chinacloudapi.cn/") -} - -func (*endpointsSuite) TestBlobStorageAPIIncludesAccountName(c *C) { - c.Check( - APIEndpoint("http://example.com").BlobStorageAPI("myaccount"), - Equals, - "http://myaccount.blob.example.com") -} - -func (*endpointsSuite) TestBlobStorageAPICombinesWithGetEndpoint(c *C) { - c.Check( - GetEndpoint("West US").BlobStorageAPI("account"), - Equals, - "https://account.blob.core.windows.net/") - c.Check( - GetEndpoint("China East").BlobStorageAPI("account"), - Equals, - "https://account.blob.core.chinacloudapi.cn/") -} === removed directory 'src/launchpad.net/gwacl/example' === removed directory 'src/launchpad.net/gwacl/example/management' === removed file 'src/launchpad.net/gwacl/example/management/run.go' --- src/launchpad.net/gwacl/example/management/run.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/example/management/run.go 1970-01-01 00:00:00 +0000 @@ -1,283 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -/* -This is an example on how the Azure Go library can be used to interact with -the Windows Azure Service. -Note that this is a provided only as an example and that real code should -probably do something more sensible with errors than ignoring them or panicking. -*/ -package main - -import ( - "encoding/base64" - "flag" - "fmt" - "launchpad.net/gwacl" - . "launchpad.net/gwacl/logging" - "math/rand" - "os" - "time" -) - -var certFile string -var subscriptionID string -var pause bool -var location string - -func getParams() error { - flag.StringVar(&certFile, "cert", "", "Name of your management certificate file (in PEM format).") - flag.StringVar(&subscriptionID, "subscriptionid", "", "Your Azure subscription ID.") - flag.BoolVar(&pause, "pause", false, "Wait for user input after the VM is brought up (useful for further testing)") - flag.StringVar(&location, "location", "North Europe", "Azure cloud location, e.g. 'West US' or 'China East'") - - flag.Parse() - - if certFile == "" { - return fmt.Errorf("No .pem certificate specified. Use the -cert option.") - } - if subscriptionID == "" { - return fmt.Errorf("No subscription ID specified. Use the -subscriptionid option.") - } - return nil -} - -func checkError(err error) { - if err != nil { - panic(err) - } -} - -// makeRandomIdentifier creates an arbitrary identifier of the given length, -// consisting of only ASCII digits and lower-case ASCII letters. -// The identifier will start with the given prefix. The prefix must be no -// longer than the specified length, or there'll be trouble. -func makeRandomIdentifier(prefix string, length int) string { - // Only digits and lower-case ASCII letters are accepted. - const chars = "abcdefghijklmnopqrstuvwxyz0123456789" - - if len(prefix) > length { - panic(fmt.Errorf("prefix '%s' is more than the requested %d characters long", prefix, length)) - } - - id := prefix - for len(id) < length { - id += string(chars[rand.Intn(len(chars))]) - } - return id -} - -func main() { - rand.Seed(int64(time.Now().Nanosecond())) - - err := getParams() - if err != nil { - Info(err) - os.Exit(1) - } - - api, err := gwacl.NewManagementAPI(subscriptionID, certFile, location) - checkError(err) - - ExerciseHostedServicesAPI(api) - - Info("All done.") -} - -func ExerciseHostedServicesAPI(api *gwacl.ManagementAPI) { - var err error - location := "West US" - release := "13.04" - - affinityGroupName := gwacl.MakeRandomHostname("affinitygroup") - Info("Creating an affinity group...") - cag := gwacl.NewCreateAffinityGroup(affinityGroupName, "affinity-label", "affinity-description", location) - err = api.CreateAffinityGroup(&gwacl.CreateAffinityGroupRequest{ - CreateAffinityGroup: cag}) - checkError(err) - Infof("Created affinity group %s\n", affinityGroupName) - - defer func() { - Infof("Deleting affinity group %s\n", affinityGroupName) - err := api.DeleteAffinityGroup(&gwacl.DeleteAffinityGroupRequest{ - Name: affinityGroupName}) - checkError(err) - Infof("Done deleting affinity group %s\n", affinityGroupName) - }() - - virtualNetworkName := gwacl.MakeRandomVirtualNetworkName("virtual-net-") - Infof("Creating virtual network %s...\n", virtualNetworkName) - virtualNetwork := gwacl.VirtualNetworkSite{ - Name: virtualNetworkName, - AffinityGroup: affinityGroupName, - AddressSpacePrefixes: []string{ - "10.0.0.0/8", - }, - } - err = api.AddVirtualNetworkSite(&virtualNetwork) - checkError(err) - Info("Done creating virtual network") - - defer func() { - Infof("Deleting virtual network %s...\n", virtualNetworkName) - err := api.RemoveVirtualNetworkSite(virtualNetworkName) - checkError(err) - Infof("Done deleting virtual network %s\n", virtualNetworkName) - }() - - networkConfig, err := api.GetNetworkConfiguration() - checkError(err) - if networkConfig == nil { - Info("No network configuration is set") - } else { - xml, err := networkConfig.Serialize() - checkError(err) - Info(xml) - } - - Infof("Getting OS Image for release '%s' and location '%s'...\n", release, location) - images, err := api.ListOSImages() - checkError(err) - image, err := images.GetLatestUbuntuImage(release, location) - checkError(err) - sourceImageName := image.Name - Infof("Got image named '%s'\n", sourceImageName) - Info("Done getting OS Image\n") - - hostServiceName := gwacl.MakeRandomHostedServiceName("gwacl") - Infof("Creating a hosted service: '%s'...\n", hostServiceName) - createHostedService := gwacl.NewCreateHostedServiceWithLocation(hostServiceName, "testLabel", location) - createHostedService.AffinityGroup = affinityGroupName - err = api.AddHostedService(createHostedService) - checkError(err) - Info("Done creating a hosted service\n") - - defer func() { - Info("Destroying hosted service...") - // FIXME: Check error - api.DestroyHostedService(&gwacl.DestroyHostedServiceRequest{ - ServiceName: hostServiceName}) - Info("Done destroying hosted service\n") - }() - - Info("Listing hosted services...") - hostedServices, err := api.ListHostedServices() - checkError(err) - Infof("Got %d hosted service(s)\n", len(hostedServices)) - if len(hostedServices) > 0 { - hostedService := hostedServices[0] - detailedHostedService, err := api.GetHostedServiceProperties(hostedService.ServiceName, true) - checkError(err) - Infof("Hosted service '%s' contains %d deployments\n", hostedService.ServiceName, len(detailedHostedService.Deployments)) - // Do the same again with ListAllDeployments. - deployments, err := api.ListAllDeployments(&gwacl.ListAllDeploymentsRequest{ServiceName: hostedService.ServiceName}) - checkError(err) - if len(deployments) != len(detailedHostedService.Deployments) { - Errorf( - "Mismatch in reported deployments: %d != %d", - len(deployments), len(detailedHostedService.Deployments)) - } - } - Info("Done listing hosted services\n") - - Info("Adding VM deployment...") - hostname := gwacl.MakeRandomHostname("gwaclhost") - // Random passwords are no use to man nor beast here if you want to - // test with your instance, so we'll use a fixed one. It's not really a - // security hazard in such a short-lived private instance. - password := "Ubuntu123" - username := "ubuntu" - vhdName := gwacl.MakeRandomDiskName("gwacldisk") - userdata := base64.StdEncoding.EncodeToString([]byte("TEST_USER_DATA")) - linuxConfigurationSet := gwacl.NewLinuxProvisioningConfigurationSet( - hostname, username, password, userdata, "false") - inputendpoint := gwacl.InputEndpoint{LocalPort: 22, Name: "sshport", Port: 22, Protocol: "TCP"} - networkConfigurationSet := gwacl.NewNetworkConfigurationSet([]gwacl.InputEndpoint{inputendpoint}, nil) - - storageAccount := makeRandomIdentifier("gwacl", 24) - storageLabel := makeRandomIdentifier("gwacl", 64) - Infof("Requesting storage account with name '%s' and label '%s'...\n", storageAccount, storageLabel) - cssi := gwacl.NewCreateStorageServiceInputWithLocation(storageAccount, storageLabel, location, "false") - err = api.AddStorageAccount(cssi) - checkError(err) - Info("Done requesting storage account\n") - - defer func() { - Infof("Deleting storage account %s...\n", storageAccount) - // FIXME: Check error - api.DeleteStorageAccount(storageAccount) - Info("Done deleting storage account\n") - }() - - mediaLink := gwacl.CreateVirtualHardDiskMediaLink(storageAccount, fmt.Sprintf("vhds/%s.vhd", vhdName)) - diskName := makeRandomIdentifier("gwacldisk", 16) - diskLabel := makeRandomIdentifier("gwacl", 64) - vhd := gwacl.NewOSVirtualHardDisk("", diskLabel, diskName, mediaLink, sourceImageName, "Linux") - roleName := gwacl.MakeRandomRoleName("gwaclrole") - role := gwacl.NewRole("ExtraSmall", roleName, vhd, - []gwacl.ConfigurationSet{*linuxConfigurationSet, *networkConfigurationSet}) - machineName := makeRandomIdentifier("gwaclmachine", 20) - deployment := gwacl.NewDeploymentForCreateVMDeployment( - machineName, "Production", machineName, []gwacl.Role{*role}, virtualNetworkName) - err = api.AddDeployment(deployment, hostServiceName) - checkError(err) - Info("Done adding VM deployment\n") - - Info("Starting VM...") - err = api.StartRole(&gwacl.StartRoleRequest{hostServiceName, deployment.Name, role.RoleName}) - checkError(err) - Info("Done starting VM\n") - - Info("Listing VM...") - instances, err := api.ListInstances(&gwacl.ListInstancesRequest{hostServiceName}) - checkError(err) - Infof("Got %d instance(s)\n", len(instances)) - Info("Done listing VM\n") - - Info("Getting deployment info...") - request := &gwacl.GetDeploymentRequest{ServiceName: hostServiceName, DeploymentName: machineName} - deploy, err := api.GetDeployment(request) - checkError(err) - fqdn, err := deploy.GetFQDN() - checkError(err) - Info("Got deployment info\n") - - Info("Adding role input endpoint...") - endpoint := gwacl.InputEndpoint{ - Name: gwacl.MakeRandomHostname("endpoint-"), - Port: 1080, - LocalPort: 80, - Protocol: "TCP", - } - err = api.AddRoleEndpoints(&gwacl.AddRoleEndpointsRequest{ - ServiceName: hostServiceName, - DeploymentName: deployment.Name, - RoleName: role.RoleName, - InputEndpoints: []gwacl.InputEndpoint{endpoint}, - }) - checkError(err) - Info("Added role input endpoint\n") - - defer func() { - Info("Removing role input endpoint...") - err := api.RemoveRoleEndpoints(&gwacl.RemoveRoleEndpointsRequest{ - ServiceName: hostServiceName, - DeploymentName: deployment.Name, - RoleName: role.RoleName, - InputEndpoints: []gwacl.InputEndpoint{endpoint}, - }) - checkError(err) - Info("Removed role input endpoint\n") - }() - - if pause { - var wait string - fmt.Println("host:", fqdn) - fmt.Println("username:", username) - fmt.Println("password:", password) - fmt.Println("") - fmt.Println("Pausing so you can play with the newly-created VM") - fmt.Println("To clear up, type something and press enter to continue") - fmt.Scan(&wait) - } -} === removed directory 'src/launchpad.net/gwacl/example/storage' === removed file 'src/launchpad.net/gwacl/example/storage/run.go' --- src/launchpad.net/gwacl/example/storage/run.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/example/storage/run.go 1970-01-01 00:00:00 +0000 @@ -1,396 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -// This is an example of how to use GWACL to interact with the Azure storage -// services API. -// -// Note that it is provided "as-is" and contains very little error handling. -// Real code should handle errors. - -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "launchpad.net/gwacl" - "os" - "strings" -) - -func badOperationError() error { - return fmt.Errorf("Must specify one of %v", operationNames) -} - -// operation is something you can instruct this program to do, by specifying -// its name on the command line. -type operation struct { - // name is the operation name as used on the command line. - name string - // description holds a description of what the operation does. - description string - // example illustrates how the operation is used. - example string - // requiredArgs lists the command-line options that are required for this - // operation. - requiredArgs []string - // validate is an optional callback to perform more detailed checking on - // the operation's arguments. - validate func() error - // implementation is a function that performs the operation. If it fails, - // it just panics. - implementation func(gwacl.StorageContext) -} - -// operations defines what operations are available to be invoked from the -// command line. -var operations = []operation{ - { - name: "listcontainers", - description: "Show existing storage containers", - example: "listcontainers", - implementation: listcontainers, - }, - { - name: "list", - description: "List files in a container", - example: "-container= list", - requiredArgs: []string{"container"}, - implementation: list, - }, - { - name: "containeracl", - description: "Set access on a container", - example: "-container= -acl containeracl", - requiredArgs: []string{"container", "key", "acl"}, - validate: func() error { - if acl != "container" && acl != "blob" && acl != "private" { - return fmt.Errorf( - "Usage: containeracl -container= ") - } - return nil - }, - implementation: containeracl, - }, - { - name: "getblob", - description: "Get a file from a container (it's returned on stdout)", - example: "-container= -filename= getblob", - requiredArgs: []string{"container", "filename"}, - implementation: getblob, - }, - { - name: "addblock", - description: "Upload a file to a block blob", - example: "-container= -filename= addblock", - requiredArgs: []string{"key", "container", "filename"}, - implementation: addblock, - }, - { - name: "deleteblob", - description: "Delete a blob", - example: "-container= -filename= deleteblob", - requiredArgs: []string{"key", "container", "filename"}, - implementation: deleteblob, - }, - { - name: "putblob", - description: "Create an empty page blob", - example: "-container= -blobname= -size= " + - "-blobtype=\"page\" putblob", - requiredArgs: []string{"key", "blobname", "blobtype", "container", "size"}, - implementation: putblob, - }, - { - name: "putpage", - description: "Upload a file to a page blob's page. The range parameters must " + - "be (modulo 512)-(modulo 512 -1), eg: -pagerange=0-511", - example: "-container= -blobname= -pagerange= " + - "-filename= putpage", - requiredArgs: []string{"key", "blobname", "container", "pagerange", "filename"}, - implementation: putpage, - }, -} - -// operationsByName maps each opeation name to an operation struct that -// describes it. -var operationsByName map[string]operation - -// operationNames lists just the names of the oeprations, in the order in which -// they are listed in "operations." -var operationNames []string - -func init() { - operationsByName = make(map[string]operation, len(operations)) - for _, op := range operations { - operationsByName[op.name] = op - } - - operationNames = make([]string, len(operations)) - for index, op := range operations { - operationNames[index] = op.name - } -} - -// Variables set by command-line options. -var ( - help bool - account string - location string - key string - filename string - container string - prefix string - blobname string - blobtype string - size int - pagerange string - acl string -) - -// argumentGiven returns whether the named argument was specified on the -// command line. -func argumentGiven(name string) bool { - // This is stupid. There must be a way to ask the flag module directly! - switch name { - case "account": - return account != "" - case "location": - return location != "" - case "key": - return key != "" - case "container": - return container != "" - case "filename": - return filename != "" - case "prefix": - return prefix != "" - case "blobname": - return blobname != "" - case "blobtype": - return blobtype != "" - case "size": - return size != 0 - case "pagerange": - return pagerange != "" - case "acl": - return acl != "" - } - panic(fmt.Errorf("internal error: unknown command-line option: %s", name)) -} - -func getParams() (string, error) { - flag.BoolVar(&help, "h", false, "Show usage and exit") - - flag.StringVar(&account, "account", "", "Storage account name") - flag.StringVar(&location, "location", "", "Azure location, e.g. \"West US\", \"China East\", or \"North Europe\"") - flag.StringVar(&key, "key", "", "A valid storage account key (base64 encoded), defaults to the empty string (i.e. anonymous access)") - flag.StringVar(&container, "container", "", "Name of the container to use") - flag.StringVar(&filename, "filename", "", "File containing blob/page to upload/download") - flag.StringVar(&prefix, "prefix", "", "Prefix to match when listing blobs") - flag.StringVar(&blobname, "blobname", "", "Name of blob in container") - flag.StringVar(&blobtype, "blobtype", "", "Type of blob, 'page' or 'block'") - flag.IntVar(&size, "size", 0, "Size of blob to create for a page 'putblob'") - flag.StringVar(&pagerange, "pagerange", "", "When uploading to a page blob, this specifies what range in the blob. Use the format 'start-end', e.g. -pagerange 1024-2048") - flag.StringVar(&acl, "acl", "", "When using 'containeracl', specify an ACL type") - flag.Parse() - - if help { - return "", nil - } - - opName := flag.Arg(0) - if opName == "" { - return "", fmt.Errorf("No operation specified") - } - - requiredArgs := []string{"account", "location"} - for _, arg := range requiredArgs { - if !argumentGiven(arg) { - return "", fmt.Errorf("Must supply %q parameter.", arg) - } - } - - if len(flag.Args()) != 1 { - return "", badOperationError() - } - - op, isDefined := operationsByName[opName] - if !isDefined { - return "", badOperationError() - } - - for _, arg := range op.requiredArgs { - if !argumentGiven(arg) { - return "", fmt.Errorf("%q requires these options: %v", op.name, op.requiredArgs) - } - } - - if op.validate != nil { - err := op.validate() - if err != nil { - return "", err - } - } - - return op.name, nil -} - -func Usage() { - fmt.Fprintf( - os.Stderr, - "Usage:\n %s [args] <%s>\n", - os.Args[0], - strings.Join(operationNames, "|")) - flag.PrintDefaults() - - fmt.Fprintf(os.Stderr, ` - This is an example of how to interact with the Azure storage service. - It is not a complete example but it does give a useful way to do do some - basic operations. - - The -account param must always be supplied and -key must be supplied for - operations that change things, (get these from the Azure web UI) otherwise - anonymous access is made. Additionally there are the following command - invocation parameters: - `) - - for _, op := range operations { - fmt.Fprintf(os.Stderr, "\n %s:\n %s\n", op.description, op.example) - } -} - -func dumpError(err error) { - if err != nil { - fmt.Fprintf(os.Stderr, "ERROR:") - fmt.Fprintf(os.Stderr, "%s\n", err) - } -} - -func listcontainers(storage gwacl.StorageContext) { - res, e := storage.ListAllContainers() - if e != nil { - dumpError(e) - return - } - for _, c := range res.Containers { - // TODO: embellish with the other container data - fmt.Println(c.Name) - } -} - -func containeracl(storage gwacl.StorageContext) { - err := storage.SetContainerACL(&gwacl.SetContainerACLRequest{ - Container: container, - Access: acl, - }) - dumpError(err) -} - -func list(storage gwacl.StorageContext) { - request := &gwacl.ListBlobsRequest{ - Container: container, Prefix: prefix} - res, err := storage.ListAllBlobs(request) - if err != nil { - dumpError(err) - return - } - for _, b := range res.Blobs { - fmt.Printf("%s, %s, %s\n", b.ContentLength, b.LastModified, b.Name) - } -} - -func addblock(storage gwacl.StorageContext) { - var err error - file, err := os.Open(filename) - if err != nil { - dumpError(err) - return - } - defer file.Close() - - err = storage.UploadBlockBlob(container, filename, file) - if err != nil { - dumpError(err) - return - } -} - -func deleteblob(storage gwacl.StorageContext) { - err := storage.DeleteBlob(container, filename) - dumpError(err) -} - -func getblob(storage gwacl.StorageContext) { - var err error - file, err := storage.GetBlob(container, filename) - if err != nil { - dumpError(err) - return - } - data, err := ioutil.ReadAll(file) - if err != nil { - dumpError(err) - return - } - os.Stdout.Write(data) -} - -func putblob(storage gwacl.StorageContext) { - err := storage.PutBlob(&gwacl.PutBlobRequest{ - Container: container, - BlobType: blobtype, - Filename: blobname, - Size: size, - }) - dumpError(err) -} - -func putpage(storage gwacl.StorageContext) { - var err error - file, err := os.Open(filename) - if err != nil { - dumpError(err) - return - } - defer file.Close() - - var start, end int - fmt.Sscanf(pagerange, "%d-%d", &start, &end) - - err = storage.PutPage(&gwacl.PutPageRequest{ - Container: container, - Filename: blobname, - StartRange: start, - EndRange: end, - Data: file, - }) - if err != nil { - dumpError(err) - return - } -} - -func main() { - flag.Usage = Usage - var err error - op, err := getParams() - if err != nil { - fmt.Fprintf(os.Stderr, "%s\n", err.Error()) - fmt.Fprintf(os.Stderr, "Use -h for help with using this program.\n") - os.Exit(1) - } - if help { - Usage() - os.Exit(0) - } - - storage := gwacl.StorageContext{ - Account: account, - Key: key, - AzureEndpoint: gwacl.GetEndpoint(location), - } - - perform := operationsByName[op].implementation - perform(storage) -} === removed directory 'src/launchpad.net/gwacl/fork' === removed file 'src/launchpad.net/gwacl/fork/LICENSE' --- src/launchpad.net/gwacl/fork/LICENSE 2013-07-23 08:51:44 +0000 +++ src/launchpad.net/gwacl/fork/LICENSE 1970-01-01 00:00:00 +0000 @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. === removed file 'src/launchpad.net/gwacl/fork/README' --- src/launchpad.net/gwacl/fork/README 2013-07-23 08:51:44 +0000 +++ src/launchpad.net/gwacl/fork/README 1970-01-01 00:00:00 +0000 @@ -1,11 +0,0 @@ -This directory contains a fork of Go's standard libraries net/http and -crypto/tls. - -This fork is required to support the TLS renegociation which is triggered by -the Windows Azure Server when establishing an https connexion. TLS -renegociation is currently not supported by Go's standard library. - -The fork is based on go version 2:1.0.2-2. -The library crypto/tls is patched to support TLS renegociation (see the patch -file "go-tls-renegotiation.patch"). -The library net/http is patched to use the forked version of crypto/tls. === removed file 'src/launchpad.net/gwacl/fork/go-tls-renegotiation.patch' --- src/launchpad.net/gwacl/fork/go-tls-renegotiation.patch 2013-07-23 08:51:44 +0000 +++ src/launchpad.net/gwacl/fork/go-tls-renegotiation.patch 1970-01-01 00:00:00 +0000 @@ -1,113 +0,0 @@ -diff -r c242bbf5fa8c src/pkg/crypto/tls/common.go ---- a/src/pkg/crypto/tls/common.go Wed Jul 17 14:03:27 2013 -0400 -+++ b/src/pkg/crypto/tls/common.go Thu Jul 18 13:45:43 2013 -0400 -@@ -44,6 +44,7 @@ - - // TLS handshake message types. - const ( -+ typeHelloRequest uint8 = 0 - typeClientHello uint8 = 1 - typeServerHello uint8 = 2 - typeNewSessionTicket uint8 = 4 -diff -r c242bbf5fa8c src/pkg/crypto/tls/conn.go ---- a/src/pkg/crypto/tls/conn.go Wed Jul 17 14:03:27 2013 -0400 -+++ b/src/pkg/crypto/tls/conn.go Thu Jul 18 13:45:43 2013 -0400 -@@ -146,6 +146,9 @@ - hc.mac = hc.nextMac - hc.nextCipher = nil - hc.nextMac = nil -+ for i := range hc.seq { -+ hc.seq[i] = 0 -+ } - return nil - } - -@@ -478,7 +481,7 @@ - func (c *Conn) readRecord(want recordType) error { - // Caller must be in sync with connection: - // handshake data if handshake not yet completed, -- // else application data. (We don't support renegotiation.) -+ // else application data. - switch want { - default: - return c.sendAlert(alertInternalError) -@@ -611,7 +614,7 @@ - - case recordTypeHandshake: - // TODO(rsc): Should at least pick off connection close. -- if typ != want { -+ if typ != want && !c.isClient { - return c.sendAlert(alertNoRenegotiation) - } - c.hand.Write(data) -@@ -741,6 +744,8 @@ - data = c.hand.Next(4 + n) - var m handshakeMessage - switch data[0] { -+ case typeHelloRequest: -+ m = new(helloRequestMsg) - case typeClientHello: - m = new(clientHelloMsg) - case typeServerHello: -@@ -825,6 +830,25 @@ - return n + m, c.setError(err) - } - -+func (c *Conn) handleRenegotiation() error { -+ c.handshakeComplete = false -+ if !c.isClient { -+ panic("renegotiation should only happen for a client") -+ } -+ -+ msg, err := c.readHandshake() -+ if err != nil { -+ return err -+ } -+ _, ok := msg.(*helloRequestMsg) -+ if !ok { -+ c.sendAlert(alertUnexpectedMessage) -+ return alertUnexpectedMessage -+ } -+ -+ return c.Handshake() -+} -+ - // Read can be made to time out and return a net.Error with Timeout() == true - // after a fixed time limit; see SetDeadline and SetReadDeadline. - func (c *Conn) Read(b []byte) (n int, err error) { -@@ -844,6 +868,14 @@ - // Soft error, like EAGAIN - return 0, err - } -+ if c.hand.Len() > 0 { -+ // We received handshake bytes, indicating the start of -+ // a renegotiation. -+ if err := c.handleRenegotiation(); err != nil { -+ return 0, err -+ } -+ continue -+ } - } - if err := c.error(); err != nil { - return 0, err -diff -r c242bbf5fa8c src/pkg/crypto/tls/handshake_messages.go ---- a/src/pkg/crypto/tls/handshake_messages.go Wed Jul 17 14:03:27 2013 -0400 -+++ b/src/pkg/crypto/tls/handshake_messages.go Thu Jul 18 13:45:43 2013 -0400 -@@ -1243,6 +1243,17 @@ - return true - } - -+type helloRequestMsg struct { -+} -+ -+func (*helloRequestMsg) marshal() []byte { -+ return []byte{typeHelloRequest, 0, 0, 0} -+} -+ -+func (*helloRequestMsg) unmarshal(data []byte) bool { -+ return len(data) == 4 -+} -+ - func eqUint16s(x, y []uint16) bool { - if len(x) != len(y) { - return false === removed directory 'src/launchpad.net/gwacl/fork/http' === removed file 'src/launchpad.net/gwacl/fork/http/chunked.go' --- src/launchpad.net/gwacl/fork/http/chunked.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/http/chunked.go 1970-01-01 00:00:00 +0000 @@ -1,170 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The wire protocol for HTTP's "chunked" Transfer-Encoding. - -// This code is duplicated in httputil/chunked.go. -// Please make any changes in both files. - -package http - -import ( - "bufio" - "bytes" - "errors" - "io" - "strconv" -) - -const maxLineLength = 4096 // assumed <= bufio.defaultBufSize - -var ErrLineTooLong = errors.New("header line too long") - -// newChunkedReader returns a new chunkedReader that translates the data read from r -// out of HTTP "chunked" format before returning it. -// The chunkedReader returns io.EOF when the final 0-length chunk is read. -// -// newChunkedReader is not needed by normal applications. The http package -// automatically decodes chunking when reading response bodies. -func newChunkedReader(r io.Reader) io.Reader { - br, ok := r.(*bufio.Reader) - if !ok { - br = bufio.NewReader(r) - } - return &chunkedReader{r: br} -} - -type chunkedReader struct { - r *bufio.Reader - n uint64 // unread bytes in chunk - err error -} - -func (cr *chunkedReader) beginChunk() { - // chunk-size CRLF - var line string - line, cr.err = readLine(cr.r) - if cr.err != nil { - return - } - cr.n, cr.err = strconv.ParseUint(line, 16, 64) - if cr.err != nil { - return - } - if cr.n == 0 { - cr.err = io.EOF - } -} - -func (cr *chunkedReader) Read(b []uint8) (n int, err error) { - if cr.err != nil { - return 0, cr.err - } - if cr.n == 0 { - cr.beginChunk() - if cr.err != nil { - return 0, cr.err - } - } - if uint64(len(b)) > cr.n { - b = b[0:cr.n] - } - n, cr.err = cr.r.Read(b) - cr.n -= uint64(n) - if cr.n == 0 && cr.err == nil { - // end of chunk (CRLF) - b := make([]byte, 2) - if _, cr.err = io.ReadFull(cr.r, b); cr.err == nil { - if b[0] != '\r' || b[1] != '\n' { - cr.err = errors.New("malformed chunked encoding") - } - } - } - return n, cr.err -} - -// Read a line of bytes (up to \n) from b. -// Give up if the line exceeds maxLineLength. -// The returned bytes are a pointer into storage in -// the bufio, so they are only valid until the next bufio read. -func readLineBytes(b *bufio.Reader) (p []byte, err error) { - if p, err = b.ReadSlice('\n'); err != nil { - // We always know when EOF is coming. - // If the caller asked for a line, there should be a line. - if err == io.EOF { - err = io.ErrUnexpectedEOF - } else if err == bufio.ErrBufferFull { - err = ErrLineTooLong - } - return nil, err - } - if len(p) >= maxLineLength { - return nil, ErrLineTooLong - } - - // Chop off trailing white space. - p = bytes.TrimRight(p, " \r\t\n") - - return p, nil -} - -// readLineBytes, but convert the bytes into a string. -func readLine(b *bufio.Reader) (s string, err error) { - p, e := readLineBytes(b) - if e != nil { - return "", e - } - return string(p), nil -} - -// newChunkedWriter returns a new chunkedWriter that translates writes into HTTP -// "chunked" format before writing them to w. Closing the returned chunkedWriter -// sends the final 0-length chunk that marks the end of the stream. -// -// newChunkedWriter is not needed by normal applications. The http -// package adds chunking automatically if handlers don't set a -// Content-Length header. Using newChunkedWriter inside a handler -// would result in double chunking or chunking with a Content-Length -// length, both of which are wrong. -func newChunkedWriter(w io.Writer) io.WriteCloser { - return &chunkedWriter{w} -} - -// Writing to chunkedWriter translates to writing in HTTP chunked Transfer -// Encoding wire format to the underlying Wire chunkedWriter. -type chunkedWriter struct { - Wire io.Writer -} - -// Write the contents of data as one chunk to Wire. -// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has -// a bug since it does not check for success of io.WriteString -func (cw *chunkedWriter) Write(data []byte) (n int, err error) { - - // Don't send 0-length data. It looks like EOF for chunked encoding. - if len(data) == 0 { - return 0, nil - } - - head := strconv.FormatInt(int64(len(data)), 16) + "\r\n" - - if _, err = io.WriteString(cw.Wire, head); err != nil { - return 0, err - } - if n, err = cw.Wire.Write(data); err != nil { - return - } - if n != len(data) { - err = io.ErrShortWrite - return - } - _, err = io.WriteString(cw.Wire, "\r\n") - - return -} - -func (cw *chunkedWriter) Close() error { - _, err := io.WriteString(cw.Wire, "0\r\n") - return err -} === removed file 'src/launchpad.net/gwacl/fork/http/client.go' --- src/launchpad.net/gwacl/fork/http/client.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/http/client.go 1970-01-01 00:00:00 +0000 @@ -1,339 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP client. See RFC 2616. -// -// This is the high-level Client interface. -// The low-level implementation is in transport.go. - -package http - -import ( - "encoding/base64" - "errors" - "fmt" - "io" - "net/url" - "strings" -) - -// A Client is an HTTP client. Its zero value (DefaultClient) is a usable client -// that uses DefaultTransport. -// -// The Client's Transport typically has internal state (cached -// TCP connections), so Clients should be reused instead of created as -// needed. Clients are safe for concurrent use by multiple goroutines. -type Client struct { - // Transport specifies the mechanism by which individual - // HTTP requests are made. - // If nil, DefaultTransport is used. - Transport RoundTripper - - // CheckRedirect specifies the policy for handling redirects. - // If CheckRedirect is not nil, the client calls it before - // following an HTTP redirect. The arguments req and via - // are the upcoming request and the requests made already, - // oldest first. If CheckRedirect returns an error, the client - // returns that error instead of issue the Request req. - // - // If CheckRedirect is nil, the Client uses its default policy, - // which is to stop after 10 consecutive requests. - CheckRedirect func(req *Request, via []*Request) error - - // Jar specifies the cookie jar. - // If Jar is nil, cookies are not sent in requests and ignored - // in responses. - Jar CookieJar -} - -// DefaultClient is the default Client and is used by Get, Head, and Post. -var DefaultClient = &Client{} - -// RoundTripper is an interface representing the ability to execute a -// single HTTP transaction, obtaining the Response for a given Request. -// -// A RoundTripper must be safe for concurrent use by multiple -// goroutines. -type RoundTripper interface { - // RoundTrip executes a single HTTP transaction, returning - // the Response for the request req. RoundTrip should not - // attempt to interpret the response. In particular, - // RoundTrip must return err == nil if it obtained a response, - // regardless of the response's HTTP status code. A non-nil - // err should be reserved for failure to obtain a response. - // Similarly, RoundTrip should not attempt to handle - // higher-level protocol details such as redirects, - // authentication, or cookies. - // - // RoundTrip should not modify the request, except for - // consuming the Body. The request's URL and Header fields - // are guaranteed to be initialized. - RoundTrip(*Request) (*Response, error) -} - -// Given a string of the form "host", "host:port", or "[ipv6::address]:port", -// return true if the string includes a port. -func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } - -// Used in Send to implement io.ReadCloser by bundling together the -// bufio.Reader through which we read the response, and the underlying -// network connection. -type readClose struct { - io.Reader - io.Closer -} - -// Do sends an HTTP request and returns an HTTP response, following -// policy (e.g. redirects, cookies, auth) as configured on the client. -// -// A non-nil response always contains a non-nil resp.Body. -// -// Callers should close resp.Body when done reading from it. If -// resp.Body is not closed, the Client's underlying RoundTripper -// (typically Transport) may not be able to re-use a persistent TCP -// connection to the server for a subsequent "keep-alive" request. -// -// Generally Get, Post, or PostForm will be used instead of Do. -func (c *Client) Do(req *Request) (resp *Response, err error) { - if req.Method == "GET" || req.Method == "HEAD" { - return c.doFollowingRedirects(req) - } - return send(req, c.Transport) -} - -// send issues an HTTP request. Caller should close resp.Body when done reading from it. -func send(req *Request, t RoundTripper) (resp *Response, err error) { - if t == nil { - t = DefaultTransport - if t == nil { - err = errors.New("http: no Client.Transport or DefaultTransport") - return - } - } - - if req.URL == nil { - return nil, errors.New("http: nil Request.URL") - } - - if req.RequestURI != "" { - return nil, errors.New("http: Request.RequestURI can't be set in client requests.") - } - - // Most the callers of send (Get, Post, et al) don't need - // Headers, leaving it uninitialized. We guarantee to the - // Transport that this has been initialized, though. - if req.Header == nil { - req.Header = make(Header) - } - - if u := req.URL.User; u != nil { - req.Header.Set("Authorization", "Basic "+base64.URLEncoding.EncodeToString([]byte(u.String()))) - } - return t.RoundTrip(req) -} - -// True if the specified HTTP status code is one for which the Get utility should -// automatically redirect. -func shouldRedirect(statusCode int) bool { - switch statusCode { - case StatusMovedPermanently, StatusFound, StatusSeeOther, StatusTemporaryRedirect: - return true - } - return false -} - -// Get issues a GET to the specified URL. If the response is one of the following -// redirect codes, Get follows the redirect, up to a maximum of 10 redirects: -// -// 301 (Moved Permanently) -// 302 (Found) -// 303 (See Other) -// 307 (Temporary Redirect) -// -// Caller should close r.Body when done reading from it. -// -// Get is a wrapper around DefaultClient.Get. -func Get(url string) (r *Response, err error) { - return DefaultClient.Get(url) -} - -// Get issues a GET to the specified URL. If the response is one of the -// following redirect codes, Get follows the redirect after calling the -// Client's CheckRedirect function. -// -// 301 (Moved Permanently) -// 302 (Found) -// 303 (See Other) -// 307 (Temporary Redirect) -// -// Caller should close r.Body when done reading from it. -func (c *Client) Get(url string) (r *Response, err error) { - req, err := NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return c.doFollowingRedirects(req) -} - -func (c *Client) doFollowingRedirects(ireq *Request) (r *Response, err error) { - // TODO: if/when we add cookie support, the redirected request shouldn't - // necessarily supply the same cookies as the original. - var base *url.URL - redirectChecker := c.CheckRedirect - if redirectChecker == nil { - redirectChecker = defaultCheckRedirect - } - var via []*Request - - if ireq.URL == nil { - return nil, errors.New("http: nil Request.URL") - } - - jar := c.Jar - if jar == nil { - jar = blackHoleJar{} - } - - req := ireq - urlStr := "" // next relative or absolute URL to fetch (after first request) - for redirect := 0; ; redirect++ { - if redirect != 0 { - req = new(Request) - req.Method = ireq.Method - req.Header = make(Header) - req.URL, err = base.Parse(urlStr) - if err != nil { - break - } - if len(via) > 0 { - // Add the Referer header. - lastReq := via[len(via)-1] - if lastReq.URL.Scheme != "https" { - req.Header.Set("Referer", lastReq.URL.String()) - } - - err = redirectChecker(req, via) - if err != nil { - break - } - } - } - - for _, cookie := range jar.Cookies(req.URL) { - req.AddCookie(cookie) - } - urlStr = req.URL.String() - if r, err = send(req, c.Transport); err != nil { - break - } - if c := r.Cookies(); len(c) > 0 { - jar.SetCookies(req.URL, c) - } - - if shouldRedirect(r.StatusCode) { - r.Body.Close() - if urlStr = r.Header.Get("Location"); urlStr == "" { - err = errors.New(fmt.Sprintf("%d response missing Location header", r.StatusCode)) - break - } - base = req.URL - via = append(via, req) - continue - } - return - } - - method := ireq.Method - err = &url.Error{ - Op: method[0:1] + strings.ToLower(method[1:]), - URL: urlStr, - Err: err, - } - return -} - -func defaultCheckRedirect(req *Request, via []*Request) error { - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - return nil -} - -// Post issues a POST to the specified URL. -// -// Caller should close r.Body when done reading from it. -// -// Post is a wrapper around DefaultClient.Post -func Post(url string, bodyType string, body io.Reader) (r *Response, err error) { - return DefaultClient.Post(url, bodyType, body) -} - -// Post issues a POST to the specified URL. -// -// Caller should close r.Body when done reading from it. -func (c *Client) Post(url string, bodyType string, body io.Reader) (r *Response, err error) { - req, err := NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - if c.Jar != nil { - for _, cookie := range c.Jar.Cookies(req.URL) { - req.AddCookie(cookie) - } - } - r, err = send(req, c.Transport) - if err == nil && c.Jar != nil { - c.Jar.SetCookies(req.URL, r.Cookies()) - } - return r, err -} - -// PostForm issues a POST to the specified URL, -// with data's keys and values urlencoded as the request body. -// -// Caller should close r.Body when done reading from it. -// -// PostForm is a wrapper around DefaultClient.PostForm -func PostForm(url string, data url.Values) (r *Response, err error) { - return DefaultClient.PostForm(url, data) -} - -// PostForm issues a POST to the specified URL, -// with data's keys and values urlencoded as the request body. -// -// Caller should close r.Body when done reading from it. -func (c *Client) PostForm(url string, data url.Values) (r *Response, err error) { - return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} - -// Head issues a HEAD to the specified URL. If the response is one of the -// following redirect codes, Head follows the redirect after calling the -// Client's CheckRedirect function. -// -// 301 (Moved Permanently) -// 302 (Found) -// 303 (See Other) -// 307 (Temporary Redirect) -// -// Head is a wrapper around DefaultClient.Head -func Head(url string) (r *Response, err error) { - return DefaultClient.Head(url) -} - -// Head issues a HEAD to the specified URL. If the response is one of the -// following redirect codes, Head follows the redirect after calling the -// Client's CheckRedirect function. -// -// 301 (Moved Permanently) -// 302 (Found) -// 303 (See Other) -// 307 (Temporary Redirect) -func (c *Client) Head(url string) (r *Response, err error) { - req, err := NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return c.doFollowingRedirects(req) -} === removed file 'src/launchpad.net/gwacl/fork/http/cookie.go' --- src/launchpad.net/gwacl/fork/http/cookie.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/http/cookie.go 1970-01-01 00:00:00 +0000 @@ -1,267 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bytes" - "fmt" - "strconv" - "strings" - "time" -) - -// This implementation is done according to RFC 6265: -// -// http://tools.ietf.org/html/rfc6265 - -// A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an -// HTTP response or the Cookie header of an HTTP request. -type Cookie struct { - Name string - Value string - Path string - Domain string - Expires time.Time - RawExpires string - - // MaxAge=0 means no 'Max-Age' attribute specified. - // MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0' - // MaxAge>0 means Max-Age attribute present and given in seconds - MaxAge int - Secure bool - HttpOnly bool - Raw string - Unparsed []string // Raw text of unparsed attribute-value pairs -} - -// readSetCookies parses all "Set-Cookie" values from -// the header h and returns the successfully parsed Cookies. -func readSetCookies(h Header) []*Cookie { - cookies := []*Cookie{} - for _, line := range h["Set-Cookie"] { - parts := strings.Split(strings.TrimSpace(line), ";") - if len(parts) == 1 && parts[0] == "" { - continue - } - parts[0] = strings.TrimSpace(parts[0]) - j := strings.Index(parts[0], "=") - if j < 0 { - continue - } - name, value := parts[0][:j], parts[0][j+1:] - if !isCookieNameValid(name) { - continue - } - value, success := parseCookieValue(value) - if !success { - continue - } - c := &Cookie{ - Name: name, - Value: value, - Raw: line, - } - for i := 1; i < len(parts); i++ { - parts[i] = strings.TrimSpace(parts[i]) - if len(parts[i]) == 0 { - continue - } - - attr, val := parts[i], "" - if j := strings.Index(attr, "="); j >= 0 { - attr, val = attr[:j], attr[j+1:] - } - lowerAttr := strings.ToLower(attr) - parseCookieValueFn := parseCookieValue - if lowerAttr == "expires" { - parseCookieValueFn = parseCookieExpiresValue - } - val, success = parseCookieValueFn(val) - if !success { - c.Unparsed = append(c.Unparsed, parts[i]) - continue - } - switch lowerAttr { - case "secure": - c.Secure = true - continue - case "httponly": - c.HttpOnly = true - continue - case "domain": - c.Domain = val - // TODO: Add domain parsing - continue - case "max-age": - secs, err := strconv.Atoi(val) - if err != nil || secs != 0 && val[0] == '0' { - break - } - if secs <= 0 { - c.MaxAge = -1 - } else { - c.MaxAge = secs - } - continue - case "expires": - c.RawExpires = val - exptime, err := time.Parse(time.RFC1123, val) - if err != nil { - exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", val) - if err != nil { - c.Expires = time.Time{} - break - } - } - c.Expires = exptime.UTC() - continue - case "path": - c.Path = val - // TODO: Add path parsing - continue - } - c.Unparsed = append(c.Unparsed, parts[i]) - } - cookies = append(cookies, c) - } - return cookies -} - -// SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers. -func SetCookie(w ResponseWriter, cookie *Cookie) { - w.Header().Add("Set-Cookie", cookie.String()) -} - -// String returns the serialization of the cookie for use in a Cookie -// header (if only Name and Value are set) or a Set-Cookie response -// header (if other fields are set). -func (c *Cookie) String() string { - var b bytes.Buffer - fmt.Fprintf(&b, "%s=%s", sanitizeName(c.Name), sanitizeValue(c.Value)) - if len(c.Path) > 0 { - fmt.Fprintf(&b, "; Path=%s", sanitizeValue(c.Path)) - } - if len(c.Domain) > 0 { - fmt.Fprintf(&b, "; Domain=%s", sanitizeValue(c.Domain)) - } - if c.Expires.Unix() > 0 { - fmt.Fprintf(&b, "; Expires=%s", c.Expires.UTC().Format(time.RFC1123)) - } - if c.MaxAge > 0 { - fmt.Fprintf(&b, "; Max-Age=%d", c.MaxAge) - } else if c.MaxAge < 0 { - fmt.Fprintf(&b, "; Max-Age=0") - } - if c.HttpOnly { - fmt.Fprintf(&b, "; HttpOnly") - } - if c.Secure { - fmt.Fprintf(&b, "; Secure") - } - return b.String() -} - -// readCookies parses all "Cookie" values from the header h and -// returns the successfully parsed Cookies. -// -// if filter isn't empty, only cookies of that name are returned -func readCookies(h Header, filter string) []*Cookie { - cookies := []*Cookie{} - lines, ok := h["Cookie"] - if !ok { - return cookies - } - - for _, line := range lines { - parts := strings.Split(strings.TrimSpace(line), ";") - if len(parts) == 1 && parts[0] == "" { - continue - } - // Per-line attributes - parsedPairs := 0 - for i := 0; i < len(parts); i++ { - parts[i] = strings.TrimSpace(parts[i]) - if len(parts[i]) == 0 { - continue - } - name, val := parts[i], "" - if j := strings.Index(name, "="); j >= 0 { - name, val = name[:j], name[j+1:] - } - if !isCookieNameValid(name) { - continue - } - if filter != "" && filter != name { - continue - } - val, success := parseCookieValue(val) - if !success { - continue - } - cookies = append(cookies, &Cookie{Name: name, Value: val}) - parsedPairs++ - } - } - return cookies -} - -var cookieNameSanitizer = strings.NewReplacer("\n", "-", "\r", "-") - -func sanitizeName(n string) string { - return cookieNameSanitizer.Replace(n) -} - -var cookieValueSanitizer = strings.NewReplacer("\n", " ", "\r", " ", ";", " ") - -func sanitizeValue(v string) string { - return cookieValueSanitizer.Replace(v) -} - -func unquoteCookieValue(v string) string { - if len(v) > 1 && v[0] == '"' && v[len(v)-1] == '"' { - return v[1 : len(v)-1] - } - return v -} - -func isCookieByte(c byte) bool { - switch { - case c == 0x21, 0x23 <= c && c <= 0x2b, 0x2d <= c && c <= 0x3a, - 0x3c <= c && c <= 0x5b, 0x5d <= c && c <= 0x7e: - return true - } - return false -} - -func isCookieExpiresByte(c byte) (ok bool) { - return isCookieByte(c) || c == ',' || c == ' ' -} - -func parseCookieValue(raw string) (string, bool) { - return parseCookieValueUsing(raw, isCookieByte) -} - -func parseCookieExpiresValue(raw string) (string, bool) { - return parseCookieValueUsing(raw, isCookieExpiresByte) -} - -func parseCookieValueUsing(raw string, validByte func(byte) bool) (string, bool) { - raw = unquoteCookieValue(raw) - for i := 0; i < len(raw); i++ { - if !validByte(raw[i]) { - return "", false - } - } - return raw, true -} - -func isCookieNameValid(raw string) bool { - for _, c := range raw { - if !isToken(byte(c)) { - return false - } - } - return true -} === removed file 'src/launchpad.net/gwacl/fork/http/doc.go' --- src/launchpad.net/gwacl/fork/http/doc.go 2013-07-23 08:51:44 +0000 +++ src/launchpad.net/gwacl/fork/http/doc.go 1970-01-01 00:00:00 +0000 @@ -1,80 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package http provides HTTP client and server implementations. - -Get, Head, Post, and PostForm make HTTP requests: - - resp, err := http.Get("http://example.com/") - ... - resp, err := http.Post("http://example.com/upload", "image/jpeg", &buf) - ... - resp, err := http.PostForm("http://example.com/form", - url.Values{"key": {"Value"}, "id": {"123"}}) - -The client must close the response body when finished with it: - - resp, err := http.Get("http://example.com/") - if err != nil { - // handle error - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - // ... - -For control over HTTP client headers, redirect policy, and other -settings, create a Client: - - client := &http.Client{ - CheckRedirect: redirectPolicyFunc, - } - - resp, err := client.Get("http://example.com") - // ... - - req, err := http.NewRequest("GET", "http://example.com", nil) - // ... - req.Header.Add("If-None-Match", `W/"wyzzy"`) - resp, err := client.Do(req) - // ... - -For control over proxies, TLS configuration, keep-alives, -compression, and other settings, create a Transport: - - tr := &http.Transport{ - TLSClientConfig: &tls.Config{RootCAs: pool}, - DisableCompression: true, - } - client := &http.Client{Transport: tr} - resp, err := client.Get("https://example.com") - -Clients and Transports are safe for concurrent use by multiple -goroutines and for efficiency should only be created once and re-used. - -ListenAndServe starts an HTTP server with a given address and handler. -The handler is usually nil, which means to use DefaultServeMux. -Handle and HandleFunc add handlers to DefaultServeMux: - - http.Handle("/foo", fooHandler) - - http.HandleFunc("/bar", func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path)) - }) - - log.Fatal(http.ListenAndServe(":8080", nil)) - -More control over the server's behavior is available by creating a -custom Server: - - s := &http.Server{ - Addr: ":8080", - Handler: myHandler, - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxHeaderBytes: 1 << 20, - } - log.Fatal(s.ListenAndServe()) -*/ -package http === removed file 'src/launchpad.net/gwacl/fork/http/filetransport.go' --- src/launchpad.net/gwacl/fork/http/filetransport.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/http/filetransport.go 1970-01-01 00:00:00 +0000 @@ -1,123 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "fmt" - "io" -) - -// fileTransport implements RoundTripper for the 'file' protocol. -type fileTransport struct { - fh fileHandler -} - -// NewFileTransport returns a new RoundTripper, serving the provided -// FileSystem. The returned RoundTripper ignores the URL host in its -// incoming requests, as well as most other properties of the -// request. -// -// The typical use case for NewFileTransport is to register the "file" -// protocol with a Transport, as in: -// -// t := &http.Transport{} -// t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/"))) -// c := &http.Client{Transport: t} -// res, err := c.Get("file:///etc/passwd") -// ... -func NewFileTransport(fs FileSystem) RoundTripper { - return fileTransport{fileHandler{fs}} -} - -func (t fileTransport) RoundTrip(req *Request) (resp *Response, err error) { - // We start ServeHTTP in a goroutine, which may take a long - // time if the file is large. The newPopulateResponseWriter - // call returns a channel which either ServeHTTP or finish() - // sends our *Response on, once the *Response itself has been - // populated (even if the body itself is still being - // written to the res.Body, a pipe) - rw, resc := newPopulateResponseWriter() - go func() { - t.fh.ServeHTTP(rw, req) - rw.finish() - }() - return <-resc, nil -} - -func newPopulateResponseWriter() (*populateResponse, <-chan *Response) { - pr, pw := io.Pipe() - rw := &populateResponse{ - ch: make(chan *Response), - pw: pw, - res: &Response{ - Proto: "HTTP/1.0", - ProtoMajor: 1, - Header: make(Header), - Close: true, - Body: pr, - }, - } - return rw, rw.ch -} - -// populateResponse is a ResponseWriter that populates the *Response -// in res, and writes its body to a pipe connected to the response -// body. Once writes begin or finish() is called, the response is sent -// on ch. -type populateResponse struct { - res *Response - ch chan *Response - wroteHeader bool - hasContent bool - sentResponse bool - pw *io.PipeWriter -} - -func (pr *populateResponse) finish() { - if !pr.wroteHeader { - pr.WriteHeader(500) - } - if !pr.sentResponse { - pr.sendResponse() - } - pr.pw.Close() -} - -func (pr *populateResponse) sendResponse() { - if pr.sentResponse { - return - } - pr.sentResponse = true - - if pr.hasContent { - pr.res.ContentLength = -1 - } - pr.ch <- pr.res -} - -func (pr *populateResponse) Header() Header { - return pr.res.Header -} - -func (pr *populateResponse) WriteHeader(code int) { - if pr.wroteHeader { - return - } - pr.wroteHeader = true - - pr.res.StatusCode = code - pr.res.Status = fmt.Sprintf("%d %s", code, StatusText(code)) -} - -func (pr *populateResponse) Write(p []byte) (n int, err error) { - if !pr.wroteHeader { - pr.WriteHeader(StatusOK) - } - pr.hasContent = true - if !pr.sentResponse { - pr.sendResponse() - } - return pr.pw.Write(p) -} === removed file 'src/launchpad.net/gwacl/fork/http/fs.go' --- src/launchpad.net/gwacl/fork/http/fs.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/http/fs.go 1970-01-01 00:00:00 +0000 @@ -1,367 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP file system request handler - -package http - -import ( - "errors" - "fmt" - "io" - "mime" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "time" -) - -// A Dir implements http.FileSystem using the native file -// system restricted to a specific directory tree. -// -// An empty Dir is treated as ".". -type Dir string - -func (d Dir) Open(name string) (File, error) { - if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 { - return nil, errors.New("http: invalid character in file path") - } - dir := string(d) - if dir == "" { - dir = "." - } - f, err := os.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) - if err != nil { - return nil, err - } - return f, nil -} - -// A FileSystem implements access to a collection of named files. -// The elements in a file path are separated by slash ('/', U+002F) -// characters, regardless of host operating system convention. -type FileSystem interface { - Open(name string) (File, error) -} - -// A File is returned by a FileSystem's Open method and can be -// served by the FileServer implementation. -type File interface { - Close() error - Stat() (os.FileInfo, error) - Readdir(count int) ([]os.FileInfo, error) - Read([]byte) (int, error) - Seek(offset int64, whence int) (int64, error) -} - -func dirList(w ResponseWriter, f File) { - w.Header().Set("Content-Type", "text/html; charset=utf-8") - fmt.Fprintf(w, "
\n")
-	for {
-		dirs, err := f.Readdir(100)
-		if err != nil || len(dirs) == 0 {
-			break
-		}
-		for _, d := range dirs {
-			name := d.Name()
-			if d.IsDir() {
-				name += "/"
-			}
-			// TODO htmlescape
-			fmt.Fprintf(w, "%s\n", name, name)
-		}
-	}
-	fmt.Fprintf(w, "
\n") -} - -// ServeContent replies to the request using the content in the -// provided ReadSeeker. The main benefit of ServeContent over io.Copy -// is that it handles Range requests properly, sets the MIME type, and -// handles If-Modified-Since requests. -// -// If the response's Content-Type header is not set, ServeContent -// first tries to deduce the type from name's file extension and, -// if that fails, falls back to reading the first block of the content -// and passing it to DetectContentType. -// The name is otherwise unused; in particular it can be empty and is -// never sent in the response. -// -// If modtime is not the zero time, ServeContent includes it in a -// Last-Modified header in the response. If the request includes an -// If-Modified-Since header, ServeContent uses modtime to decide -// whether the content needs to be sent at all. -// -// The content's Seek method must work: ServeContent uses -// a seek to the end of the content to determine its size. -// -// Note that *os.File implements the io.ReadSeeker interface. -func ServeContent(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker) { - size, err := content.Seek(0, os.SEEK_END) - if err != nil { - Error(w, "seeker can't seek", StatusInternalServerError) - return - } - _, err = content.Seek(0, os.SEEK_SET) - if err != nil { - Error(w, "seeker can't seek", StatusInternalServerError) - return - } - serveContent(w, req, name, modtime, size, content) -} - -// if name is empty, filename is unknown. (used for mime type, before sniffing) -// if modtime.IsZero(), modtime is unknown. -// content must be seeked to the beginning of the file. -func serveContent(w ResponseWriter, r *Request, name string, modtime time.Time, size int64, content io.ReadSeeker) { - if checkLastModified(w, r, modtime) { - return - } - - code := StatusOK - - // If Content-Type isn't set, use the file's extension to find it. - if w.Header().Get("Content-Type") == "" { - ctype := mime.TypeByExtension(filepath.Ext(name)) - if ctype == "" { - // read a chunk to decide between utf-8 text and binary - var buf [1024]byte - n, _ := io.ReadFull(content, buf[:]) - b := buf[:n] - ctype = DetectContentType(b) - _, err := content.Seek(0, os.SEEK_SET) // rewind to output whole file - if err != nil { - Error(w, "seeker can't seek", StatusInternalServerError) - return - } - } - w.Header().Set("Content-Type", ctype) - } - - // handle Content-Range header. - // TODO(adg): handle multiple ranges - sendSize := size - if size >= 0 { - ranges, err := parseRange(r.Header.Get("Range"), size) - if err == nil && len(ranges) > 1 { - err = errors.New("multiple ranges not supported") - } - if err != nil { - Error(w, err.Error(), StatusRequestedRangeNotSatisfiable) - return - } - if len(ranges) == 1 { - ra := ranges[0] - if _, err := content.Seek(ra.start, os.SEEK_SET); err != nil { - Error(w, err.Error(), StatusRequestedRangeNotSatisfiable) - return - } - sendSize = ra.length - code = StatusPartialContent - w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", ra.start, ra.start+ra.length-1, size)) - } - - w.Header().Set("Accept-Ranges", "bytes") - if w.Header().Get("Content-Encoding") == "" { - w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10)) - } - } - - w.WriteHeader(code) - - if r.Method != "HEAD" { - if sendSize == -1 { - io.Copy(w, content) - } else { - io.CopyN(w, content, sendSize) - } - } -} - -// modtime is the modification time of the resource to be served, or IsZero(). -// return value is whether this request is now complete. -func checkLastModified(w ResponseWriter, r *Request, modtime time.Time) bool { - if modtime.IsZero() { - return false - } - - // The Date-Modified header truncates sub-second precision, so - // use mtime < t+1s instead of mtime <= t to check for unmodified. - if t, err := time.Parse(TimeFormat, r.Header.Get("If-Modified-Since")); err == nil && modtime.Before(t.Add(1*time.Second)) { - w.WriteHeader(StatusNotModified) - return true - } - w.Header().Set("Last-Modified", modtime.UTC().Format(TimeFormat)) - return false -} - -// name is '/'-separated, not filepath.Separator. -func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirect bool) { - const indexPage = "/index.html" - - // redirect .../index.html to .../ - // can't use Redirect() because that would make the path absolute, - // which would be a problem running under StripPrefix - if strings.HasSuffix(r.URL.Path, indexPage) { - localRedirect(w, r, "./") - return - } - - f, err := fs.Open(name) - if err != nil { - // TODO expose actual error? - NotFound(w, r) - return - } - defer f.Close() - - d, err1 := f.Stat() - if err1 != nil { - // TODO expose actual error? - NotFound(w, r) - return - } - - if redirect { - // redirect to canonical path: / at end of directory url - // r.URL.Path always begins with / - url := r.URL.Path - if d.IsDir() { - if url[len(url)-1] != '/' { - localRedirect(w, r, path.Base(url)+"/") - return - } - } else { - if url[len(url)-1] == '/' { - localRedirect(w, r, "../"+path.Base(url)) - return - } - } - } - - // use contents of index.html for directory, if present - if d.IsDir() { - if checkLastModified(w, r, d.ModTime()) { - return - } - index := name + indexPage - ff, err := fs.Open(index) - if err == nil { - defer ff.Close() - dd, err := ff.Stat() - if err == nil { - name = index - d = dd - f = ff - } - } - } - - if d.IsDir() { - dirList(w, f) - return - } - - serveContent(w, r, d.Name(), d.ModTime(), d.Size(), f) -} - -// localRedirect gives a Moved Permanently response. -// It does not convert relative paths to absolute paths like Redirect does. -func localRedirect(w ResponseWriter, r *Request, newPath string) { - if q := r.URL.RawQuery; q != "" { - newPath += "?" + q - } - w.Header().Set("Location", newPath) - w.WriteHeader(StatusMovedPermanently) -} - -// ServeFile replies to the request with the contents of the named file or directory. -func ServeFile(w ResponseWriter, r *Request, name string) { - dir, file := filepath.Split(name) - serveFile(w, r, Dir(dir), file, false) -} - -type fileHandler struct { - root FileSystem -} - -// FileServer returns a handler that serves HTTP requests -// with the contents of the file system rooted at root. -// -// To use the operating system's file system implementation, -// use http.Dir: -// -// http.Handle("/", http.FileServer(http.Dir("/tmp"))) -func FileServer(root FileSystem) Handler { - return &fileHandler{root} -} - -func (f *fileHandler) ServeHTTP(w ResponseWriter, r *Request) { - upath := r.URL.Path - if !strings.HasPrefix(upath, "/") { - upath = "/" + upath - r.URL.Path = upath - } - serveFile(w, r, f.root, path.Clean(upath), true) -} - -// httpRange specifies the byte range to be sent to the client. -type httpRange struct { - start, length int64 -} - -// parseRange parses a Range header string as per RFC 2616. -func parseRange(s string, size int64) ([]httpRange, error) { - if s == "" { - return nil, nil // header not present - } - const b = "bytes=" - if !strings.HasPrefix(s, b) { - return nil, errors.New("invalid range") - } - var ranges []httpRange - for _, ra := range strings.Split(s[len(b):], ",") { - i := strings.Index(ra, "-") - if i < 0 { - return nil, errors.New("invalid range") - } - start, end := ra[:i], ra[i+1:] - var r httpRange - if start == "" { - // If no start is specified, end specifies the - // range start relative to the end of the file. - i, err := strconv.ParseInt(end, 10, 64) - if err != nil { - return nil, errors.New("invalid range") - } - if i > size { - i = size - } - r.start = size - i - r.length = size - r.start - } else { - i, err := strconv.ParseInt(start, 10, 64) - if err != nil || i > size || i < 0 { - return nil, errors.New("invalid range") - } - r.start = i - if end == "" { - // If no end is specified, range extends to end of the file. - r.length = size - r.start - } else { - i, err := strconv.ParseInt(end, 10, 64) - if err != nil || r.start > i { - return nil, errors.New("invalid range") - } - if i >= size { - i = size - 1 - } - r.length = i - r.start + 1 - } - } - ranges = append(ranges, r) - } - return ranges, nil -} === removed file 'src/launchpad.net/gwacl/fork/http/header.go' --- src/launchpad.net/gwacl/fork/http/header.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/http/header.go 1970-01-01 00:00:00 +0000 @@ -1,78 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "fmt" - "io" - "net/textproto" - "sort" - "strings" -) - -// A Header represents the key-value pairs in an HTTP header. -type Header map[string][]string - -// Add adds the key, value pair to the header. -// It appends to any existing values associated with key. -func (h Header) Add(key, value string) { - textproto.MIMEHeader(h).Add(key, value) -} - -// Set sets the header entries associated with key to -// the single element value. It replaces any existing -// values associated with key. -func (h Header) Set(key, value string) { - textproto.MIMEHeader(h).Set(key, value) -} - -// Get gets the first value associated with the given key. -// If there are no values associated with the key, Get returns "". -// To access multiple values of a key, access the map directly -// with CanonicalHeaderKey. -func (h Header) Get(key string) string { - return textproto.MIMEHeader(h).Get(key) -} - -// Del deletes the values associated with key. -func (h Header) Del(key string) { - textproto.MIMEHeader(h).Del(key) -} - -// Write writes a header in wire format. -func (h Header) Write(w io.Writer) error { - return h.WriteSubset(w, nil) -} - -var headerNewlineToSpace = strings.NewReplacer("\n", " ", "\r", " ") - -// WriteSubset writes a header in wire format. -// If exclude is not nil, keys where exclude[key] == true are not written. -func (h Header) WriteSubset(w io.Writer, exclude map[string]bool) error { - keys := make([]string, 0, len(h)) - for k := range h { - if exclude == nil || !exclude[k] { - keys = append(keys, k) - } - } - sort.Strings(keys) - for _, k := range keys { - for _, v := range h[k] { - v = headerNewlineToSpace.Replace(v) - v = strings.TrimSpace(v) - if _, err := fmt.Fprintf(w, "%s: %s\r\n", k, v); err != nil { - return err - } - } - } - return nil -} - -// CanonicalHeaderKey returns the canonical format of the -// header key s. The canonicalization converts the first -// letter and any letter following a hyphen to upper case; -// the rest are converted to lowercase. For example, the -// canonical key for "accept-encoding" is "Accept-Encoding". -func CanonicalHeaderKey(s string) string { return textproto.CanonicalMIMEHeaderKey(s) } === removed file 'src/launchpad.net/gwacl/fork/http/jar.go' --- src/launchpad.net/gwacl/fork/http/jar.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/http/jar.go 1970-01-01 00:00:00 +0000 @@ -1,30 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "net/url" -) - -// A CookieJar manages storage and use of cookies in HTTP requests. -// -// Implementations of CookieJar must be safe for concurrent use by multiple -// goroutines. -type CookieJar interface { - // SetCookies handles the receipt of the cookies in a reply for the - // given URL. It may or may not choose to save the cookies, depending - // on the jar's policy and implementation. - SetCookies(u *url.URL, cookies []*Cookie) - - // Cookies returns the cookies to send in a request for the given URL. - // It is up to the implementation to honor the standard cookie use - // restrictions such as in RFC 6265. - Cookies(u *url.URL) []*Cookie -} - -type blackHoleJar struct{} - -func (blackHoleJar) SetCookies(u *url.URL, cookies []*Cookie) {} -func (blackHoleJar) Cookies(u *url.URL) []*Cookie { return nil } === removed file 'src/launchpad.net/gwacl/fork/http/lex.go' --- src/launchpad.net/gwacl/fork/http/lex.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/http/lex.go 1970-01-01 00:00:00 +0000 @@ -1,136 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -// This file deals with lexical matters of HTTP - -func isSeparator(c byte) bool { - switch c { - case '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', ' ', '\t': - return true - } - return false -} - -func isCtl(c byte) bool { return (0 <= c && c <= 31) || c == 127 } - -func isChar(c byte) bool { return 0 <= c && c <= 127 } - -func isAnyText(c byte) bool { return !isCtl(c) } - -func isQdText(c byte) bool { return isAnyText(c) && c != '"' } - -func isToken(c byte) bool { return isChar(c) && !isCtl(c) && !isSeparator(c) } - -// Valid escaped sequences are not specified in RFC 2616, so for now, we assume -// that they coincide with the common sense ones used by GO. Malformed -// characters should probably not be treated as errors by a robust (forgiving) -// parser, so we replace them with the '?' character. -func httpUnquotePair(b byte) byte { - // skip the first byte, which should always be '\' - switch b { - case 'a': - return '\a' - case 'b': - return '\b' - case 'f': - return '\f' - case 'n': - return '\n' - case 'r': - return '\r' - case 't': - return '\t' - case 'v': - return '\v' - case '\\': - return '\\' - case '\'': - return '\'' - case '"': - return '"' - } - return '?' -} - -// raw must begin with a valid quoted string. Only the first quoted string is -// parsed and is unquoted in result. eaten is the number of bytes parsed, or -1 -// upon failure. -func httpUnquote(raw []byte) (eaten int, result string) { - buf := make([]byte, len(raw)) - if raw[0] != '"' { - return -1, "" - } - eaten = 1 - j := 0 // # of bytes written in buf - for i := 1; i < len(raw); i++ { - switch b := raw[i]; b { - case '"': - eaten++ - buf = buf[0:j] - return i + 1, string(buf) - case '\\': - if len(raw) < i+2 { - return -1, "" - } - buf[j] = httpUnquotePair(raw[i+1]) - eaten += 2 - j++ - i++ - default: - if isQdText(b) { - buf[j] = b - } else { - buf[j] = '?' - } - eaten++ - j++ - } - } - return -1, "" -} - -// This is a best effort parse, so errors are not returned, instead not all of -// the input string might be parsed. result is always non-nil. -func httpSplitFieldValue(fv string) (eaten int, result []string) { - result = make([]string, 0, len(fv)) - raw := []byte(fv) - i := 0 - chunk := "" - for i < len(raw) { - b := raw[i] - switch { - case b == '"': - eaten, unq := httpUnquote(raw[i:]) - if eaten < 0 { - return i, result - } else { - i += eaten - chunk += unq - } - case isSeparator(b): - if chunk != "" { - result = result[0 : len(result)+1] - result[len(result)-1] = chunk - chunk = "" - } - i++ - case isToken(b): - chunk += string(b) - i++ - case b == '\n' || b == '\r': - i++ - default: - chunk += "?" - i++ - } - } - if chunk != "" { - result = result[0 : len(result)+1] - result[len(result)-1] = chunk - chunk = "" - } - return i, result -} === removed file 'src/launchpad.net/gwacl/fork/http/request.go' --- src/launchpad.net/gwacl/fork/http/request.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/http/request.go 1970-01-01 00:00:00 +0000 @@ -1,743 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP Request reading and parsing. - -package http - -import ( - "bufio" - "bytes" - "crypto/tls" - "encoding/base64" - "errors" - "fmt" - "io" - "io/ioutil" - "mime" - "mime/multipart" - "net/textproto" - "net/url" - "strings" -) - -const ( - maxValueLength = 4096 - maxHeaderLines = 1024 - chunkSize = 4 << 10 // 4 KB chunks - defaultMaxMemory = 32 << 20 // 32 MB -) - -// ErrMissingFile is returned by FormFile when the provided file field name -// is either not present in the request or not a file field. -var ErrMissingFile = errors.New("http: no such file") - -// HTTP request parsing errors. -type ProtocolError struct { - ErrorString string -} - -func (err *ProtocolError) Error() string { return err.ErrorString } - -var ( - ErrHeaderTooLong = &ProtocolError{"header too long"} - ErrShortBody = &ProtocolError{"entity body too short"} - ErrNotSupported = &ProtocolError{"feature not supported"} - ErrUnexpectedTrailer = &ProtocolError{"trailer header without chunked transfer encoding"} - ErrMissingContentLength = &ProtocolError{"missing ContentLength in HEAD response"} - ErrNotMultipart = &ProtocolError{"request Content-Type isn't multipart/form-data"} - ErrMissingBoundary = &ProtocolError{"no multipart boundary param Content-Type"} -) - -type badStringError struct { - what string - str string -} - -func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } - -// Headers that Request.Write handles itself and should be skipped. -var reqWriteExcludeHeader = map[string]bool{ - "Host": true, // not in Header map anyway - "User-Agent": true, - "Content-Length": true, - "Transfer-Encoding": true, - "Trailer": true, -} - -// A Request represents an HTTP request received by a server -// or to be sent by a client. -type Request struct { - Method string // GET, POST, PUT, etc. - URL *url.URL - - // The protocol version for incoming requests. - // Outgoing requests always use HTTP/1.1. - Proto string // "HTTP/1.0" - ProtoMajor int // 1 - ProtoMinor int // 0 - - // A header maps request lines to their values. - // If the header says - // - // accept-encoding: gzip, deflate - // Accept-Language: en-us - // Connection: keep-alive - // - // then - // - // Header = map[string][]string{ - // "Accept-Encoding": {"gzip, deflate"}, - // "Accept-Language": {"en-us"}, - // "Connection": {"keep-alive"}, - // } - // - // HTTP defines that header names are case-insensitive. - // The request parser implements this by canonicalizing the - // name, making the first character and any characters - // following a hyphen uppercase and the rest lowercase. - Header Header - - // The message body. - Body io.ReadCloser - - // ContentLength records the length of the associated content. - // The value -1 indicates that the length is unknown. - // Values >= 0 indicate that the given number of bytes may - // be read from Body. - // For outgoing requests, a value of 0 means unknown if Body is not nil. - ContentLength int64 - - // TransferEncoding lists the transfer encodings from outermost to - // innermost. An empty list denotes the "identity" encoding. - // TransferEncoding can usually be ignored; chunked encoding is - // automatically added and removed as necessary when sending and - // receiving requests. - TransferEncoding []string - - // Close indicates whether to close the connection after - // replying to this request. - Close bool - - // The host on which the URL is sought. - // Per RFC 2616, this is either the value of the Host: header - // or the host name given in the URL itself. - Host string - - // Form contains the parsed form data, including both the URL - // field's query parameters and the POST or PUT form data. - // This field is only available after ParseForm is called. - // The HTTP client ignores Form and uses Body instead. - Form url.Values - - // MultipartForm is the parsed multipart form, including file uploads. - // This field is only available after ParseMultipartForm is called. - // The HTTP client ignores MultipartForm and uses Body instead. - MultipartForm *multipart.Form - - // Trailer maps trailer keys to values. Like for Header, if the - // response has multiple trailer lines with the same key, they will be - // concatenated, delimited by commas. - // For server requests, Trailer is only populated after Body has been - // closed or fully consumed. - // Trailer support is only partially complete. - Trailer Header - - // RemoteAddr allows HTTP servers and other software to record - // the network address that sent the request, usually for - // logging. This field is not filled in by ReadRequest and - // has no defined format. The HTTP server in this package - // sets RemoteAddr to an "IP:port" address before invoking a - // handler. - // This field is ignored by the HTTP client. - RemoteAddr string - - // RequestURI is the unmodified Request-URI of the - // Request-Line (RFC 2616, Section 5.1) as sent by the client - // to a server. Usually the URL field should be used instead. - // It is an error to set this field in an HTTP client request. - RequestURI string - - // TLS allows HTTP servers and other software to record - // information about the TLS connection on which the request - // was received. This field is not filled in by ReadRequest. - // The HTTP server in this package sets the field for - // TLS-enabled connections before invoking a handler; - // otherwise it leaves the field nil. - // This field is ignored by the HTTP client. - TLS *tls.ConnectionState -} - -// ProtoAtLeast returns whether the HTTP protocol used -// in the request is at least major.minor. -func (r *Request) ProtoAtLeast(major, minor int) bool { - return r.ProtoMajor > major || - r.ProtoMajor == major && r.ProtoMinor >= minor -} - -// UserAgent returns the client's User-Agent, if sent in the request. -func (r *Request) UserAgent() string { - return r.Header.Get("User-Agent") -} - -// Cookies parses and returns the HTTP cookies sent with the request. -func (r *Request) Cookies() []*Cookie { - return readCookies(r.Header, "") -} - -var ErrNoCookie = errors.New("http: named cookie not present") - -// Cookie returns the named cookie provided in the request or -// ErrNoCookie if not found. -func (r *Request) Cookie(name string) (*Cookie, error) { - for _, c := range readCookies(r.Header, name) { - return c, nil - } - return nil, ErrNoCookie -} - -// AddCookie adds a cookie to the request. Per RFC 6265 section 5.4, -// AddCookie does not attach more than one Cookie header field. That -// means all cookies, if any, are written into the same line, -// separated by semicolon. -func (r *Request) AddCookie(c *Cookie) { - s := fmt.Sprintf("%s=%s", sanitizeName(c.Name), sanitizeValue(c.Value)) - if c := r.Header.Get("Cookie"); c != "" { - r.Header.Set("Cookie", c+"; "+s) - } else { - r.Header.Set("Cookie", s) - } -} - -// Referer returns the referring URL, if sent in the request. -// -// Referer is misspelled as in the request itself, a mistake from the -// earliest days of HTTP. This value can also be fetched from the -// Header map as Header["Referer"]; the benefit of making it available -// as a method is that the compiler can diagnose programs that use the -// alternate (correct English) spelling req.Referrer() but cannot -// diagnose programs that use Header["Referrer"]. -func (r *Request) Referer() string { - return r.Header.Get("Referer") -} - -// multipartByReader is a sentinel value. -// Its presence in Request.MultipartForm indicates that parsing of the request -// body has been handed off to a MultipartReader instead of ParseMultipartFrom. -var multipartByReader = &multipart.Form{ - Value: make(map[string][]string), - File: make(map[string][]*multipart.FileHeader), -} - -// MultipartReader returns a MIME multipart reader if this is a -// multipart/form-data POST request, else returns nil and an error. -// Use this function instead of ParseMultipartForm to -// process the request body as a stream. -func (r *Request) MultipartReader() (*multipart.Reader, error) { - if r.MultipartForm == multipartByReader { - return nil, errors.New("http: MultipartReader called twice") - } - if r.MultipartForm != nil { - return nil, errors.New("http: multipart handled by ParseMultipartForm") - } - r.MultipartForm = multipartByReader - return r.multipartReader() -} - -func (r *Request) multipartReader() (*multipart.Reader, error) { - v := r.Header.Get("Content-Type") - if v == "" { - return nil, ErrNotMultipart - } - d, params, err := mime.ParseMediaType(v) - if err != nil || d != "multipart/form-data" { - return nil, ErrNotMultipart - } - boundary, ok := params["boundary"] - if !ok { - return nil, ErrMissingBoundary - } - return multipart.NewReader(r.Body, boundary), nil -} - -// Return value if nonempty, def otherwise. -func valueOrDefault(value, def string) string { - if value != "" { - return value - } - return def -} - -const defaultUserAgent = "Go http package" - -// Write writes an HTTP/1.1 request -- header and body -- in wire format. -// This method consults the following fields of the request: -// Host -// URL -// Method (defaults to "GET") -// Header -// ContentLength -// TransferEncoding -// Body -// -// If Body is present, Content-Length is <= 0 and TransferEncoding -// hasn't been set to "identity", Write adds "Transfer-Encoding: -// chunked" to the header. Body is closed after it is sent. -func (r *Request) Write(w io.Writer) error { - return r.write(w, false, nil) -} - -// WriteProxy is like Write but writes the request in the form -// expected by an HTTP proxy. In particular, WriteProxy writes the -// initial Request-URI line of the request with an absolute URI, per -// section 5.1.2 of RFC 2616, including the scheme and host. -// In either case, WriteProxy also writes a Host header, using -// either r.Host or r.URL.Host. -func (r *Request) WriteProxy(w io.Writer) error { - return r.write(w, true, nil) -} - -// extraHeaders may be nil -func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) error { - host := req.Host - if host == "" { - if req.URL == nil { - return errors.New("http: Request.Write on Request with no Host or URL set") - } - host = req.URL.Host - } - - ruri := req.URL.RequestURI() - if usingProxy && req.URL.Scheme != "" && req.URL.Opaque == "" { - ruri = req.URL.Scheme + "://" + host + ruri - } else if req.Method == "CONNECT" && req.URL.Path == "" { - // CONNECT requests normally give just the host and port, not a full URL. - ruri = host - } - // TODO(bradfitz): escape at least newlines in ruri? - - bw := bufio.NewWriter(w) - fmt.Fprintf(bw, "%s %s HTTP/1.1\r\n", valueOrDefault(req.Method, "GET"), ruri) - - // Header lines - fmt.Fprintf(bw, "Host: %s\r\n", host) - - // Use the defaultUserAgent unless the Header contains one, which - // may be blank to not send the header. - userAgent := defaultUserAgent - if req.Header != nil { - if ua := req.Header["User-Agent"]; len(ua) > 0 { - userAgent = ua[0] - } - } - if userAgent != "" { - fmt.Fprintf(bw, "User-Agent: %s\r\n", userAgent) - } - - // Process Body,ContentLength,Close,Trailer - tw, err := newTransferWriter(req) - if err != nil { - return err - } - err = tw.WriteHeader(bw) - if err != nil { - return err - } - - // TODO: split long values? (If so, should share code with Conn.Write) - err = req.Header.WriteSubset(bw, reqWriteExcludeHeader) - if err != nil { - return err - } - - if extraHeaders != nil { - err = extraHeaders.Write(bw) - if err != nil { - return err - } - } - - io.WriteString(bw, "\r\n") - - // Write body and trailer - err = tw.WriteBody(bw) - if err != nil { - return err - } - - return bw.Flush() -} - -// Convert decimal at s[i:len(s)] to integer, -// returning value, string position where the digits stopped, -// and whether there was a valid number (digits, not too big). -func atoi(s string, i int) (n, i1 int, ok bool) { - const Big = 1000000 - if i >= len(s) || s[i] < '0' || s[i] > '9' { - return 0, 0, false - } - n = 0 - for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ { - n = n*10 + int(s[i]-'0') - if n > Big { - return 0, 0, false - } - } - return n, i, true -} - -// ParseHTTPVersion parses a HTTP version string. -// "HTTP/1.0" returns (1, 0, true). -func ParseHTTPVersion(vers string) (major, minor int, ok bool) { - if len(vers) < 5 || vers[0:5] != "HTTP/" { - return 0, 0, false - } - major, i, ok := atoi(vers, 5) - if !ok || i >= len(vers) || vers[i] != '.' { - return 0, 0, false - } - minor, i, ok = atoi(vers, i+1) - if !ok || i != len(vers) { - return 0, 0, false - } - return major, minor, true -} - -// NewRequest returns a new Request given a method, URL, and optional body. -func NewRequest(method, urlStr string, body io.Reader) (*Request, error) { - u, err := url.Parse(urlStr) - if err != nil { - return nil, err - } - rc, ok := body.(io.ReadCloser) - if !ok && body != nil { - rc = ioutil.NopCloser(body) - } - req := &Request{ - Method: method, - URL: u, - Proto: "HTTP/1.1", - ProtoMajor: 1, - ProtoMinor: 1, - Header: make(Header), - Body: rc, - Host: u.Host, - } - if body != nil { - switch v := body.(type) { - case *strings.Reader: - req.ContentLength = int64(v.Len()) - case *bytes.Buffer: - req.ContentLength = int64(v.Len()) - } - } - - return req, nil -} - -// SetBasicAuth sets the request's Authorization header to use HTTP -// Basic Authentication with the provided username and password. -// -// With HTTP Basic Authentication the provided username and password -// are not encrypted. -func (r *Request) SetBasicAuth(username, password string) { - s := username + ":" + password - r.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(s))) -} - -// ReadRequest reads and parses a request from b. -func ReadRequest(b *bufio.Reader) (req *Request, err error) { - - tp := textproto.NewReader(b) - req = new(Request) - - // First line: GET /index.html HTTP/1.0 - var s string - if s, err = tp.ReadLine(); err != nil { - return nil, err - } - defer func() { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - }() - - var f []string - if f = strings.SplitN(s, " ", 3); len(f) < 3 { - return nil, &badStringError{"malformed HTTP request", s} - } - req.Method, req.RequestURI, req.Proto = f[0], f[1], f[2] - rawurl := req.RequestURI - var ok bool - if req.ProtoMajor, req.ProtoMinor, ok = ParseHTTPVersion(req.Proto); !ok { - return nil, &badStringError{"malformed HTTP version", req.Proto} - } - - // CONNECT requests are used two different ways, and neither uses a full URL: - // The standard use is to tunnel HTTPS through an HTTP proxy. - // It looks like "CONNECT www.google.com:443 HTTP/1.1", and the parameter is - // just the authority section of a URL. This information should go in req.URL.Host. - // - // The net/rpc package also uses CONNECT, but there the parameter is a path - // that starts with a slash. It can be parsed with the regular URL parser, - // and the path will end up in req.URL.Path, where it needs to be in order for - // RPC to work. - justAuthority := req.Method == "CONNECT" && !strings.HasPrefix(rawurl, "/") - if justAuthority { - rawurl = "http://" + rawurl - } - - if req.URL, err = url.ParseRequestURI(rawurl); err != nil { - return nil, err - } - - if justAuthority { - // Strip the bogus "http://" back off. - req.URL.Scheme = "" - } - - // Subsequent lines: Key: value. - mimeHeader, err := tp.ReadMIMEHeader() - if err != nil { - return nil, err - } - req.Header = Header(mimeHeader) - - // RFC2616: Must treat - // GET /index.html HTTP/1.1 - // Host: www.google.com - // and - // GET http://www.google.com/index.html HTTP/1.1 - // Host: doesntmatter - // the same. In the second case, any Host line is ignored. - req.Host = req.URL.Host - if req.Host == "" { - req.Host = req.Header.Get("Host") - } - req.Header.Del("Host") - - fixPragmaCacheControl(req.Header) - - // TODO: Parse specific header values: - // Accept - // Accept-Encoding - // Accept-Language - // Authorization - // Cache-Control - // Connection - // Date - // Expect - // From - // If-Match - // If-Modified-Since - // If-None-Match - // If-Range - // If-Unmodified-Since - // Max-Forwards - // Proxy-Authorization - // Referer [sic] - // TE (transfer-codings) - // Trailer - // Transfer-Encoding - // Upgrade - // User-Agent - // Via - // Warning - - err = readTransfer(req, b) - if err != nil { - return nil, err - } - - return req, nil -} - -// MaxBytesReader is similar to io.LimitReader but is intended for -// limiting the size of incoming request bodies. In contrast to -// io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a -// non-EOF error for a Read beyond the limit, and Closes the -// underlying reader when its Close method is called. -// -// MaxBytesReader prevents clients from accidentally or maliciously -// sending a large request and wasting server resources. -func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser { - return &maxBytesReader{w: w, r: r, n: n} -} - -type maxBytesReader struct { - w ResponseWriter - r io.ReadCloser // underlying reader - n int64 // max bytes remaining - stopped bool -} - -func (l *maxBytesReader) Read(p []byte) (n int, err error) { - if l.n <= 0 { - if !l.stopped { - l.stopped = true - if res, ok := l.w.(*response); ok { - res.requestTooLarge() - } - } - return 0, errors.New("http: request body too large") - } - if int64(len(p)) > l.n { - p = p[:l.n] - } - n, err = l.r.Read(p) - l.n -= int64(n) - return -} - -func (l *maxBytesReader) Close() error { - return l.r.Close() -} - -// ParseForm parses the raw query from the URL. -// -// For POST or PUT requests, it also parses the request body as a form. -// If the request Body's size has not already been limited by MaxBytesReader, -// the size is capped at 10MB. -// -// ParseMultipartForm calls ParseForm automatically. -// It is idempotent. -func (r *Request) ParseForm() (err error) { - if r.Form != nil { - return - } - if r.URL != nil { - r.Form, err = url.ParseQuery(r.URL.RawQuery) - } - if r.Method == "POST" || r.Method == "PUT" { - if r.Body == nil { - return errors.New("missing form body") - } - ct := r.Header.Get("Content-Type") - ct, _, err = mime.ParseMediaType(ct) - switch { - case ct == "application/x-www-form-urlencoded": - var reader io.Reader = r.Body - maxFormSize := int64(1<<63 - 1) - if _, ok := r.Body.(*maxBytesReader); !ok { - maxFormSize = int64(10 << 20) // 10 MB is a lot of text. - reader = io.LimitReader(r.Body, maxFormSize+1) - } - b, e := ioutil.ReadAll(reader) - if e != nil { - if err == nil { - err = e - } - break - } - if int64(len(b)) > maxFormSize { - return errors.New("http: POST too large") - } - var newValues url.Values - newValues, e = url.ParseQuery(string(b)) - if err == nil { - err = e - } - if r.Form == nil { - r.Form = make(url.Values) - } - // Copy values into r.Form. TODO: make this smoother. - for k, vs := range newValues { - for _, value := range vs { - r.Form.Add(k, value) - } - } - case ct == "multipart/form-data": - // handled by ParseMultipartForm (which is calling us, or should be) - // TODO(bradfitz): there are too many possible - // orders to call too many functions here. - // Clean this up and write more tests. - // request_test.go contains the start of this, - // in TestRequestMultipartCallOrder. - } - } - return err -} - -// ParseMultipartForm parses a request body as multipart/form-data. -// The whole request body is parsed and up to a total of maxMemory bytes of -// its file parts are stored in memory, with the remainder stored on -// disk in temporary files. -// ParseMultipartForm calls ParseForm if necessary. -// After one call to ParseMultipartForm, subsequent calls have no effect. -func (r *Request) ParseMultipartForm(maxMemory int64) error { - if r.MultipartForm == multipartByReader { - return errors.New("http: multipart handled by MultipartReader") - } - if r.Form == nil { - err := r.ParseForm() - if err != nil { - return err - } - } - if r.MultipartForm != nil { - return nil - } - - mr, err := r.multipartReader() - if err == ErrNotMultipart { - return nil - } else if err != nil { - return err - } - - f, err := mr.ReadForm(maxMemory) - if err != nil { - return err - } - for k, v := range f.Value { - r.Form[k] = append(r.Form[k], v...) - } - r.MultipartForm = f - - return nil -} - -// FormValue returns the first value for the named component of the query. -// FormValue calls ParseMultipartForm and ParseForm if necessary. -func (r *Request) FormValue(key string) string { - if r.Form == nil { - r.ParseMultipartForm(defaultMaxMemory) - } - if vs := r.Form[key]; len(vs) > 0 { - return vs[0] - } - return "" -} - -// FormFile returns the first file for the provided form key. -// FormFile calls ParseMultipartForm and ParseForm if necessary. -func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, error) { - if r.MultipartForm == multipartByReader { - return nil, nil, errors.New("http: multipart handled by MultipartReader") - } - if r.MultipartForm == nil { - err := r.ParseMultipartForm(defaultMaxMemory) - if err != nil { - return nil, nil, err - } - } - if r.MultipartForm != nil && r.MultipartForm.File != nil { - if fhs := r.MultipartForm.File[key]; len(fhs) > 0 { - f, err := fhs[0].Open() - return f, fhs[0], err - } - } - return nil, nil, ErrMissingFile -} - -func (r *Request) expectsContinue() bool { - return strings.ToLower(r.Header.Get("Expect")) == "100-continue" -} - -func (r *Request) wantsHttp10KeepAlive() bool { - if r.ProtoMajor != 1 || r.ProtoMinor != 0 { - return false - } - return strings.Contains(strings.ToLower(r.Header.Get("Connection")), "keep-alive") -} === removed file 'src/launchpad.net/gwacl/fork/http/response.go' --- src/launchpad.net/gwacl/fork/http/response.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/http/response.go 1970-01-01 00:00:00 +0000 @@ -1,239 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP Response reading and parsing. - -package http - -import ( - "bufio" - "errors" - "io" - "net/textproto" - "net/url" - "strconv" - "strings" -) - -var respExcludeHeader = map[string]bool{ - "Content-Length": true, - "Transfer-Encoding": true, - "Trailer": true, -} - -// Response represents the response from an HTTP request. -// -type Response struct { - Status string // e.g. "200 OK" - StatusCode int // e.g. 200 - Proto string // e.g. "HTTP/1.0" - ProtoMajor int // e.g. 1 - ProtoMinor int // e.g. 0 - - // Header maps header keys to values. If the response had multiple - // headers with the same key, they will be concatenated, with comma - // delimiters. (Section 4.2 of RFC 2616 requires that multiple headers - // be semantically equivalent to a comma-delimited sequence.) Values - // duplicated by other fields in this struct (e.g., ContentLength) are - // omitted from Header. - // - // Keys in the map are canonicalized (see CanonicalHeaderKey). - Header Header - - // Body represents the response body. - // - // The http Client and Transport guarantee that Body is always - // non-nil, even on responses without a body or responses with - // a zero-lengthed body. - Body io.ReadCloser - - // ContentLength records the length of the associated content. The - // value -1 indicates that the length is unknown. Unless RequestMethod - // is "HEAD", values >= 0 indicate that the given number of bytes may - // be read from Body. - ContentLength int64 - - // Contains transfer encodings from outer-most to inner-most. Value is - // nil, means that "identity" encoding is used. - TransferEncoding []string - - // Close records whether the header directed that the connection be - // closed after reading Body. The value is advice for clients: neither - // ReadResponse nor Response.Write ever closes a connection. - Close bool - - // Trailer maps trailer keys to values, in the same - // format as the header. - Trailer Header - - // The Request that was sent to obtain this Response. - // Request's Body is nil (having already been consumed). - // This is only populated for Client requests. - Request *Request -} - -// Cookies parses and returns the cookies set in the Set-Cookie headers. -func (r *Response) Cookies() []*Cookie { - return readSetCookies(r.Header) -} - -var ErrNoLocation = errors.New("http: no Location header in response") - -// Location returns the URL of the response's "Location" header, -// if present. Relative redirects are resolved relative to -// the Response's Request. ErrNoLocation is returned if no -// Location header is present. -func (r *Response) Location() (*url.URL, error) { - lv := r.Header.Get("Location") - if lv == "" { - return nil, ErrNoLocation - } - if r.Request != nil && r.Request.URL != nil { - return r.Request.URL.Parse(lv) - } - return url.Parse(lv) -} - -// ReadResponse reads and returns an HTTP response from r. The -// req parameter specifies the Request that corresponds to -// this Response. Clients must call resp.Body.Close when finished -// reading resp.Body. After that call, clients can inspect -// resp.Trailer to find key/value pairs included in the response -// trailer. -func ReadResponse(r *bufio.Reader, req *Request) (resp *Response, err error) { - - tp := textproto.NewReader(r) - resp = new(Response) - - resp.Request = req - resp.Request.Method = strings.ToUpper(resp.Request.Method) - - // Parse the first line of the response. - line, err := tp.ReadLine() - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return nil, err - } - f := strings.SplitN(line, " ", 3) - if len(f) < 2 { - return nil, &badStringError{"malformed HTTP response", line} - } - reasonPhrase := "" - if len(f) > 2 { - reasonPhrase = f[2] - } - resp.Status = f[1] + " " + reasonPhrase - resp.StatusCode, err = strconv.Atoi(f[1]) - if err != nil { - return nil, &badStringError{"malformed HTTP status code", f[1]} - } - - resp.Proto = f[0] - var ok bool - if resp.ProtoMajor, resp.ProtoMinor, ok = ParseHTTPVersion(resp.Proto); !ok { - return nil, &badStringError{"malformed HTTP version", resp.Proto} - } - - // Parse the response headers. - mimeHeader, err := tp.ReadMIMEHeader() - if err != nil { - return nil, err - } - resp.Header = Header(mimeHeader) - - fixPragmaCacheControl(resp.Header) - - err = readTransfer(resp, r) - if err != nil { - return nil, err - } - - return resp, nil -} - -// RFC2616: Should treat -// Pragma: no-cache -// like -// Cache-Control: no-cache -func fixPragmaCacheControl(header Header) { - if hp, ok := header["Pragma"]; ok && len(hp) > 0 && hp[0] == "no-cache" { - if _, presentcc := header["Cache-Control"]; !presentcc { - header["Cache-Control"] = []string{"no-cache"} - } - } -} - -// ProtoAtLeast returns whether the HTTP protocol used -// in the response is at least major.minor. -func (r *Response) ProtoAtLeast(major, minor int) bool { - return r.ProtoMajor > major || - r.ProtoMajor == major && r.ProtoMinor >= minor -} - -// Writes the response (header, body and trailer) in wire format. This method -// consults the following fields of the response: -// -// StatusCode -// ProtoMajor -// ProtoMinor -// RequestMethod -// TransferEncoding -// Trailer -// Body -// ContentLength -// Header, values for non-canonical keys will have unpredictable behavior -// -func (r *Response) Write(w io.Writer) error { - - // RequestMethod should be upper-case - if r.Request != nil { - r.Request.Method = strings.ToUpper(r.Request.Method) - } - - // Status line - text := r.Status - if text == "" { - var ok bool - text, ok = statusText[r.StatusCode] - if !ok { - text = "status code " + strconv.Itoa(r.StatusCode) - } - } - protoMajor, protoMinor := strconv.Itoa(r.ProtoMajor), strconv.Itoa(r.ProtoMinor) - statusCode := strconv.Itoa(r.StatusCode) + " " - if strings.HasPrefix(text, statusCode) { - text = text[len(statusCode):] - } - io.WriteString(w, "HTTP/"+protoMajor+"."+protoMinor+" "+statusCode+text+"\r\n") - - // Process Body,ContentLength,Close,Trailer - tw, err := newTransferWriter(r) - if err != nil { - return err - } - err = tw.WriteHeader(w) - if err != nil { - return err - } - - // Rest of header - err = r.Header.WriteSubset(w, respExcludeHeader) - if err != nil { - return err - } - - // End-of-header - io.WriteString(w, "\r\n") - - // Write body and trailer - err = tw.WriteBody(w) - if err != nil { - return err - } - - // Success - return nil -} === removed file 'src/launchpad.net/gwacl/fork/http/server.go' --- src/launchpad.net/gwacl/fork/http/server.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/http/server.go 1970-01-01 00:00:00 +0000 @@ -1,1234 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP server. See RFC 2616. - -// TODO(rsc): -// logging - -package http - -import ( - "bufio" - "bytes" - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "net/url" - "path" - "runtime/debug" - "strconv" - "strings" - "sync" - "time" -) - -// Errors introduced by the HTTP server. -var ( - ErrWriteAfterFlush = errors.New("Conn.Write called after Flush") - ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") - ErrHijacked = errors.New("Conn has been hijacked") - ErrContentLength = errors.New("Conn.Write wrote more than the declared Content-Length") -) - -// Objects implementing the Handler interface can be -// registered to serve a particular path or subtree -// in the HTTP server. -// -// ServeHTTP should write reply headers and data to the ResponseWriter -// and then return. Returning signals that the request is finished -// and that the HTTP server can move on to the next request on -// the connection. -type Handler interface { - ServeHTTP(ResponseWriter, *Request) -} - -// A ResponseWriter interface is used by an HTTP handler to -// construct an HTTP response. -type ResponseWriter interface { - // Header returns the header map that will be sent by WriteHeader. - // Changing the header after a call to WriteHeader (or Write) has - // no effect. - Header() Header - - // Write writes the data to the connection as part of an HTTP reply. - // If WriteHeader has not yet been called, Write calls WriteHeader(http.StatusOK) - // before writing the data. If the Header does not contain a - // Content-Type line, Write adds a Content-Type set to the result of passing - // the initial 512 bytes of written data to DetectContentType. - Write([]byte) (int, error) - - // WriteHeader sends an HTTP response header with status code. - // If WriteHeader is not called explicitly, the first call to Write - // will trigger an implicit WriteHeader(http.StatusOK). - // Thus explicit calls to WriteHeader are mainly used to - // send error codes. - WriteHeader(int) -} - -// The Flusher interface is implemented by ResponseWriters that allow -// an HTTP handler to flush buffered data to the client. -// -// Note that even for ResponseWriters that support Flush, -// if the client is connected through an HTTP proxy, -// the buffered data may not reach the client until the response -// completes. -type Flusher interface { - // Flush sends any buffered data to the client. - Flush() -} - -// The Hijacker interface is implemented by ResponseWriters that allow -// an HTTP handler to take over the connection. -type Hijacker interface { - // Hijack lets the caller take over the connection. - // After a call to Hijack(), the HTTP server library - // will not do anything else with the connection. - // It becomes the caller's responsibility to manage - // and close the connection. - Hijack() (net.Conn, *bufio.ReadWriter, error) -} - -// A conn represents the server side of an HTTP connection. -type conn struct { - remoteAddr string // network address of remote side - server *Server // the Server on which the connection arrived - rwc net.Conn // i/o connection - lr *io.LimitedReader // io.LimitReader(rwc) - buf *bufio.ReadWriter // buffered(lr,rwc), reading from bufio->limitReader->rwc - hijacked bool // connection has been hijacked by handler - tlsState *tls.ConnectionState // or nil when not using TLS - body []byte -} - -// A response represents the server side of an HTTP response. -type response struct { - conn *conn - req *Request // request for this response - chunking bool // using chunked transfer encoding for reply body - wroteHeader bool // reply header has been written - wroteContinue bool // 100 Continue response was written - header Header // reply header parameters - written int64 // number of bytes written in body - contentLength int64 // explicitly-declared Content-Length; or -1 - status int // status code passed to WriteHeader - needSniff bool // need to sniff to find Content-Type - - // close connection after this reply. set on request and - // updated after response from handler if there's a - // "Connection: keep-alive" response header and a - // Content-Length. - closeAfterReply bool - - // requestBodyLimitHit is set by requestTooLarge when - // maxBytesReader hits its max size. It is checked in - // WriteHeader, to make sure we don't consume the the - // remaining request body to try to advance to the next HTTP - // request. Instead, when this is set, we stop doing - // subsequent requests on this connection and stop reading - // input from it. - requestBodyLimitHit bool -} - -// requestTooLarge is called by maxBytesReader when too much input has -// been read from the client. -func (w *response) requestTooLarge() { - w.closeAfterReply = true - w.requestBodyLimitHit = true - if !w.wroteHeader { - w.Header().Set("Connection", "close") - } -} - -type writerOnly struct { - io.Writer -} - -func (w *response) ReadFrom(src io.Reader) (n int64, err error) { - // Call WriteHeader before checking w.chunking if it hasn't - // been called yet, since WriteHeader is what sets w.chunking. - if !w.wroteHeader { - w.WriteHeader(StatusOK) - } - if !w.chunking && w.bodyAllowed() && !w.needSniff { - w.Flush() - if rf, ok := w.conn.rwc.(io.ReaderFrom); ok { - n, err = rf.ReadFrom(src) - w.written += n - return - } - } - // Fall back to default io.Copy implementation. - // Use wrapper to hide w.ReadFrom from io.Copy. - return io.Copy(writerOnly{w}, src) -} - -// noLimit is an effective infinite upper bound for io.LimitedReader -const noLimit int64 = (1 << 63) - 1 - -// Create new connection from rwc. -func (srv *Server) newConn(rwc net.Conn) (c *conn, err error) { - c = new(conn) - c.remoteAddr = rwc.RemoteAddr().String() - c.server = srv - c.rwc = rwc - c.body = make([]byte, sniffLen) - c.lr = io.LimitReader(rwc, noLimit).(*io.LimitedReader) - br := bufio.NewReader(c.lr) - bw := bufio.NewWriter(rwc) - c.buf = bufio.NewReadWriter(br, bw) - return c, nil -} - -// DefaultMaxHeaderBytes is the maximum permitted size of the headers -// in an HTTP request. -// This can be overridden by setting Server.MaxHeaderBytes. -const DefaultMaxHeaderBytes = 1 << 20 // 1 MB - -func (srv *Server) maxHeaderBytes() int { - if srv.MaxHeaderBytes > 0 { - return srv.MaxHeaderBytes - } - return DefaultMaxHeaderBytes -} - -// wrapper around io.ReaderCloser which on first read, sends an -// HTTP/1.1 100 Continue header -type expectContinueReader struct { - resp *response - readCloser io.ReadCloser - closed bool -} - -func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { - if ecr.closed { - return 0, errors.New("http: Read after Close on request Body") - } - if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked { - ecr.resp.wroteContinue = true - io.WriteString(ecr.resp.conn.buf, "HTTP/1.1 100 Continue\r\n\r\n") - ecr.resp.conn.buf.Flush() - } - return ecr.readCloser.Read(p) -} - -func (ecr *expectContinueReader) Close() error { - ecr.closed = true - return ecr.readCloser.Close() -} - -// TimeFormat is the time format to use with -// time.Parse and time.Time.Format when parsing -// or generating times in HTTP headers. -// It is like time.RFC1123 but hard codes GMT as the time zone. -const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" - -var errTooLarge = errors.New("http: request too large") - -// Read next request from connection. -func (c *conn) readRequest() (w *response, err error) { - if c.hijacked { - return nil, ErrHijacked - } - c.lr.N = int64(c.server.maxHeaderBytes()) + 4096 /* bufio slop */ - var req *Request - if req, err = ReadRequest(c.buf.Reader); err != nil { - if c.lr.N == 0 { - return nil, errTooLarge - } - return nil, err - } - c.lr.N = noLimit - - req.RemoteAddr = c.remoteAddr - req.TLS = c.tlsState - - w = new(response) - w.conn = c - w.req = req - w.header = make(Header) - w.contentLength = -1 - c.body = c.body[:0] - return w, nil -} - -func (w *response) Header() Header { - return w.header -} - -// maxPostHandlerReadBytes is the max number of Request.Body bytes not -// consumed by a handler that the server will read from the client -// in order to keep a connection alive. If there are more bytes than -// this then the server to be paranoid instead sends a "Connection: -// close" response. -// -// This number is approximately what a typical machine's TCP buffer -// size is anyway. (if we have the bytes on the machine, we might as -// well read them) -const maxPostHandlerReadBytes = 256 << 10 - -func (w *response) WriteHeader(code int) { - if w.conn.hijacked { - log.Print("http: response.WriteHeader on hijacked connection") - return - } - if w.wroteHeader { - log.Print("http: multiple response.WriteHeader calls") - return - } - w.wroteHeader = true - w.status = code - - // Check for a explicit (and valid) Content-Length header. - var hasCL bool - var contentLength int64 - if clenStr := w.header.Get("Content-Length"); clenStr != "" { - var err error - contentLength, err = strconv.ParseInt(clenStr, 10, 64) - if err == nil { - hasCL = true - } else { - log.Printf("http: invalid Content-Length of %q sent", clenStr) - w.header.Del("Content-Length") - } - } - - if w.req.wantsHttp10KeepAlive() && (w.req.Method == "HEAD" || hasCL) { - _, connectionHeaderSet := w.header["Connection"] - if !connectionHeaderSet { - w.header.Set("Connection", "keep-alive") - } - } else if !w.req.ProtoAtLeast(1, 1) { - // Client did not ask to keep connection alive. - w.closeAfterReply = true - } - - if w.header.Get("Connection") == "close" { - w.closeAfterReply = true - } - - // Per RFC 2616, we should consume the request body before - // replying, if the handler hasn't already done so. But we - // don't want to do an unbounded amount of reading here for - // DoS reasons, so we only try up to a threshold. - if w.req.ContentLength != 0 && !w.closeAfterReply { - ecr, isExpecter := w.req.Body.(*expectContinueReader) - if !isExpecter || ecr.resp.wroteContinue { - n, _ := io.CopyN(ioutil.Discard, w.req.Body, maxPostHandlerReadBytes+1) - if n >= maxPostHandlerReadBytes { - w.requestTooLarge() - w.header.Set("Connection", "close") - } else { - w.req.Body.Close() - } - } - } - - if code == StatusNotModified { - // Must not have body. - for _, header := range []string{"Content-Type", "Content-Length", "Transfer-Encoding"} { - if w.header.Get(header) != "" { - // TODO: return an error if WriteHeader gets a return parameter - // or set a flag on w to make future Writes() write an error page? - // for now just log and drop the header. - log.Printf("http: StatusNotModified response with header %q defined", header) - w.header.Del(header) - } - } - } else { - // If no content type, apply sniffing algorithm to body. - if w.header.Get("Content-Type") == "" && w.req.Method != "HEAD" { - w.needSniff = true - } - } - - if _, ok := w.header["Date"]; !ok { - w.Header().Set("Date", time.Now().UTC().Format(TimeFormat)) - } - - te := w.header.Get("Transfer-Encoding") - hasTE := te != "" - if hasCL && hasTE && te != "identity" { - // TODO: return an error if WriteHeader gets a return parameter - // For now just ignore the Content-Length. - log.Printf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", - te, contentLength) - w.header.Del("Content-Length") - hasCL = false - } - - if w.req.Method == "HEAD" || code == StatusNotModified { - // do nothing - } else if hasCL { - w.contentLength = contentLength - w.header.Del("Transfer-Encoding") - } else if w.req.ProtoAtLeast(1, 1) { - // HTTP/1.1 or greater: use chunked transfer encoding - // to avoid closing the connection at EOF. - // TODO: this blows away any custom or stacked Transfer-Encoding they - // might have set. Deal with that as need arises once we have a valid - // use case. - w.chunking = true - w.header.Set("Transfer-Encoding", "chunked") - } else { - // HTTP version < 1.1: cannot do chunked transfer - // encoding and we don't know the Content-Length so - // signal EOF by closing connection. - w.closeAfterReply = true - w.header.Del("Transfer-Encoding") // in case already set - } - - // Cannot use Content-Length with non-identity Transfer-Encoding. - if w.chunking { - w.header.Del("Content-Length") - } - if !w.req.ProtoAtLeast(1, 0) { - return - } - proto := "HTTP/1.0" - if w.req.ProtoAtLeast(1, 1) { - proto = "HTTP/1.1" - } - codestring := strconv.Itoa(code) - text, ok := statusText[code] - if !ok { - text = "status code " + codestring - } - io.WriteString(w.conn.buf, proto+" "+codestring+" "+text+"\r\n") - w.header.Write(w.conn.buf) - - // If we need to sniff the body, leave the header open. - // Otherwise, end it here. - if !w.needSniff { - io.WriteString(w.conn.buf, "\r\n") - } -} - -// sniff uses the first block of written data, -// stored in w.conn.body, to decide the Content-Type -// for the HTTP body. -func (w *response) sniff() { - if !w.needSniff { - return - } - w.needSniff = false - - data := w.conn.body - fmt.Fprintf(w.conn.buf, "Content-Type: %s\r\n\r\n", DetectContentType(data)) - - if len(data) == 0 { - return - } - if w.chunking { - fmt.Fprintf(w.conn.buf, "%x\r\n", len(data)) - } - _, err := w.conn.buf.Write(data) - if w.chunking && err == nil { - io.WriteString(w.conn.buf, "\r\n") - } -} - -// bodyAllowed returns true if a Write is allowed for this response type. -// It's illegal to call this before the header has been flushed. -func (w *response) bodyAllowed() bool { - if !w.wroteHeader { - panic("") - } - return w.status != StatusNotModified && w.req.Method != "HEAD" -} - -func (w *response) Write(data []byte) (n int, err error) { - if w.conn.hijacked { - log.Print("http: response.Write on hijacked connection") - return 0, ErrHijacked - } - if !w.wroteHeader { - w.WriteHeader(StatusOK) - } - if len(data) == 0 { - return 0, nil - } - if !w.bodyAllowed() { - return 0, ErrBodyNotAllowed - } - - w.written += int64(len(data)) // ignoring errors, for errorKludge - if w.contentLength != -1 && w.written > w.contentLength { - return 0, ErrContentLength - } - - var m int - if w.needSniff { - // We need to sniff the beginning of the output to - // determine the content type. Accumulate the - // initial writes in w.conn.body. - // Cap m so that append won't allocate. - m = cap(w.conn.body) - len(w.conn.body) - if m > len(data) { - m = len(data) - } - w.conn.body = append(w.conn.body, data[:m]...) - data = data[m:] - if len(data) == 0 { - // Copied everything into the buffer. - // Wait for next write. - return m, nil - } - - // Filled the buffer; more data remains. - // Sniff the content (flushes the buffer) - // and then proceed with the remainder - // of the data as a normal Write. - // Calling sniff clears needSniff. - w.sniff() - } - - // TODO(rsc): if chunking happened after the buffering, - // then there would be fewer chunk headers. - // On the other hand, it would make hijacking more difficult. - if w.chunking { - fmt.Fprintf(w.conn.buf, "%x\r\n", len(data)) // TODO(rsc): use strconv not fmt - } - n, err = w.conn.buf.Write(data) - if err == nil && w.chunking { - if n != len(data) { - err = io.ErrShortWrite - } - if err == nil { - io.WriteString(w.conn.buf, "\r\n") - } - } - - return m + n, err -} - -func (w *response) finishRequest() { - // If this was an HTTP/1.0 request with keep-alive and we sent a Content-Length - // back, we can make this a keep-alive response ... - if w.req.wantsHttp10KeepAlive() { - sentLength := w.header.Get("Content-Length") != "" - if sentLength && w.header.Get("Connection") == "keep-alive" { - w.closeAfterReply = false - } - } - if !w.wroteHeader { - w.WriteHeader(StatusOK) - } - if w.needSniff { - w.sniff() - } - if w.chunking { - io.WriteString(w.conn.buf, "0\r\n") - // trailer key/value pairs, followed by blank line - io.WriteString(w.conn.buf, "\r\n") - } - w.conn.buf.Flush() - // Close the body, unless we're about to close the whole TCP connection - // anyway. - if !w.closeAfterReply { - w.req.Body.Close() - } - if w.req.MultipartForm != nil { - w.req.MultipartForm.RemoveAll() - } - - if w.contentLength != -1 && w.contentLength != w.written { - // Did not write enough. Avoid getting out of sync. - w.closeAfterReply = true - } -} - -func (w *response) Flush() { - if !w.wroteHeader { - w.WriteHeader(StatusOK) - } - w.sniff() - w.conn.buf.Flush() -} - -// Close the connection. -func (c *conn) close() { - if c.buf != nil { - c.buf.Flush() - c.buf = nil - } - if c.rwc != nil { - c.rwc.Close() - c.rwc = nil - } -} - -// Serve a new connection. -func (c *conn) serve() { - defer func() { - err := recover() - if err == nil { - return - } - - var buf bytes.Buffer - fmt.Fprintf(&buf, "http: panic serving %v: %v\n", c.remoteAddr, err) - buf.Write(debug.Stack()) - log.Print(buf.String()) - - if c.rwc != nil { // may be nil if connection hijacked - c.rwc.Close() - } - }() - - if tlsConn, ok := c.rwc.(*tls.Conn); ok { - if err := tlsConn.Handshake(); err != nil { - c.close() - return - } - c.tlsState = new(tls.ConnectionState) - *c.tlsState = tlsConn.ConnectionState() - } - - for { - w, err := c.readRequest() - if err != nil { - msg := "400 Bad Request" - if err == errTooLarge { - // Their HTTP client may or may not be - // able to read this if we're - // responding to them and hanging up - // while they're still writing their - // request. Undefined behavior. - msg = "413 Request Entity Too Large" - } else if err == io.EOF { - break // Don't reply - } else if neterr, ok := err.(net.Error); ok && neterr.Timeout() { - break // Don't reply - } - fmt.Fprintf(c.rwc, "HTTP/1.1 %s\r\n\r\n", msg) - break - } - - // Expect 100 Continue support - req := w.req - if req.expectsContinue() { - if req.ProtoAtLeast(1, 1) { - // Wrap the Body reader with one that replies on the connection - req.Body = &expectContinueReader{readCloser: req.Body, resp: w} - } - if req.ContentLength == 0 { - w.Header().Set("Connection", "close") - w.WriteHeader(StatusBadRequest) - w.finishRequest() - break - } - req.Header.Del("Expect") - } else if req.Header.Get("Expect") != "" { - // TODO(bradfitz): let ServeHTTP handlers handle - // requests with non-standard expectation[s]? Seems - // theoretical at best, and doesn't fit into the - // current ServeHTTP model anyway. We'd need to - // make the ResponseWriter an optional - // "ExpectReplier" interface or something. - // - // For now we'll just obey RFC 2616 14.20 which says - // "If a server receives a request containing an - // Expect field that includes an expectation- - // extension that it does not support, it MUST - // respond with a 417 (Expectation Failed) status." - w.Header().Set("Connection", "close") - w.WriteHeader(StatusExpectationFailed) - w.finishRequest() - break - } - - handler := c.server.Handler - if handler == nil { - handler = DefaultServeMux - } - - // HTTP cannot have multiple simultaneous active requests.[*] - // Until the server replies to this request, it can't read another, - // so we might as well run the handler in this goroutine. - // [*] Not strictly true: HTTP pipelining. We could let them all process - // in parallel even if their responses need to be serialized. - handler.ServeHTTP(w, w.req) - if c.hijacked { - return - } - w.finishRequest() - if w.closeAfterReply { - break - } - } - c.close() -} - -// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter -// and a Hijacker. -func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { - if w.conn.hijacked { - return nil, nil, ErrHijacked - } - w.conn.hijacked = true - rwc = w.conn.rwc - buf = w.conn.buf - w.conn.rwc = nil - w.conn.buf = nil - return -} - -// The HandlerFunc type is an adapter to allow the use of -// ordinary functions as HTTP handlers. If f is a function -// with the appropriate signature, HandlerFunc(f) is a -// Handler object that calls f. -type HandlerFunc func(ResponseWriter, *Request) - -// ServeHTTP calls f(w, r). -func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { - f(w, r) -} - -// Helper handlers - -// Error replies to the request with the specified error message and HTTP code. -func Error(w ResponseWriter, error string, code int) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.WriteHeader(code) - fmt.Fprintln(w, error) -} - -// NotFound replies to the request with an HTTP 404 not found error. -func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } - -// NotFoundHandler returns a simple request handler -// that replies to each request with a ``404 page not found'' reply. -func NotFoundHandler() Handler { return HandlerFunc(NotFound) } - -// StripPrefix returns a handler that serves HTTP requests -// by removing the given prefix from the request URL's Path -// and invoking the handler h. StripPrefix handles a -// request for a path that doesn't begin with prefix by -// replying with an HTTP 404 not found error. -func StripPrefix(prefix string, h Handler) Handler { - return HandlerFunc(func(w ResponseWriter, r *Request) { - if !strings.HasPrefix(r.URL.Path, prefix) { - NotFound(w, r) - return - } - r.URL.Path = r.URL.Path[len(prefix):] - h.ServeHTTP(w, r) - }) -} - -// Redirect replies to the request with a redirect to url, -// which may be a path relative to the request path. -func Redirect(w ResponseWriter, r *Request, urlStr string, code int) { - if u, err := url.Parse(urlStr); err == nil { - // If url was relative, make absolute by - // combining with request path. - // The browser would probably do this for us, - // but doing it ourselves is more reliable. - - // NOTE(rsc): RFC 2616 says that the Location - // line must be an absolute URI, like - // "http://www.google.com/redirect/", - // not a path like "/redirect/". - // Unfortunately, we don't know what to - // put in the host name section to get the - // client to connect to us again, so we can't - // know the right absolute URI to send back. - // Because of this problem, no one pays attention - // to the RFC; they all send back just a new path. - // So do we. - oldpath := r.URL.Path - if oldpath == "" { // should not happen, but avoid a crash if it does - oldpath = "/" - } - if u.Scheme == "" { - // no leading http://server - if urlStr == "" || urlStr[0] != '/' { - // make relative path absolute - olddir, _ := path.Split(oldpath) - urlStr = olddir + urlStr - } - - var query string - if i := strings.Index(urlStr, "?"); i != -1 { - urlStr, query = urlStr[:i], urlStr[i:] - } - - // clean up but preserve trailing slash - trailing := urlStr[len(urlStr)-1] == '/' - urlStr = path.Clean(urlStr) - if trailing && urlStr[len(urlStr)-1] != '/' { - urlStr += "/" - } - urlStr += query - } - } - - w.Header().Set("Location", urlStr) - w.WriteHeader(code) - - // RFC2616 recommends that a short note "SHOULD" be included in the - // response because older user agents may not understand 301/307. - // Shouldn't send the response for POST or HEAD; that leaves GET. - if r.Method == "GET" { - note := "" + statusText[code] + ".\n" - fmt.Fprintln(w, note) - } -} - -var htmlReplacer = strings.NewReplacer( - "&", "&", - "<", "<", - ">", ">", - // """ is shorter than """. - `"`, """, - // "'" is shorter than "'" and apos was not in HTML until HTML5. - "'", "'", -) - -func htmlEscape(s string) string { - return htmlReplacer.Replace(s) -} - -// Redirect to a fixed URL -type redirectHandler struct { - url string - code int -} - -func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { - Redirect(w, r, rh.url, rh.code) -} - -// RedirectHandler returns a request handler that redirects -// each request it receives to the given url using the given -// status code. -func RedirectHandler(url string, code int) Handler { - return &redirectHandler{url, code} -} - -// ServeMux is an HTTP request multiplexer. -// It matches the URL of each incoming request against a list of registered -// patterns and calls the handler for the pattern that -// most closely matches the URL. -// -// Patterns named fixed, rooted paths, like "/favicon.ico", -// or rooted subtrees, like "/images/" (note the trailing slash). -// Longer patterns take precedence over shorter ones, so that -// if there are handlers registered for both "/images/" -// and "/images/thumbnails/", the latter handler will be -// called for paths beginning "/images/thumbnails/" and the -// former will receiver requests for any other paths in the -// "/images/" subtree. -// -// Patterns may optionally begin with a host name, restricting matches to -// URLs on that host only. Host-specific patterns take precedence over -// general patterns, so that a handler might register for the two patterns -// "/codesearch" and "codesearch.google.com/" without also taking over -// requests for "http://www.google.com/". -// -// ServeMux also takes care of sanitizing the URL request path, -// redirecting any request containing . or .. elements to an -// equivalent .- and ..-free URL. -type ServeMux struct { - mu sync.RWMutex - m map[string]muxEntry -} - -type muxEntry struct { - explicit bool - h Handler -} - -// NewServeMux allocates and returns a new ServeMux. -func NewServeMux() *ServeMux { return &ServeMux{m: make(map[string]muxEntry)} } - -// DefaultServeMux is the default ServeMux used by Serve. -var DefaultServeMux = NewServeMux() - -// Does path match pattern? -func pathMatch(pattern, path string) bool { - if len(pattern) == 0 { - // should not happen - return false - } - n := len(pattern) - if pattern[n-1] != '/' { - return pattern == path - } - return len(path) >= n && path[0:n] == pattern -} - -// Return the canonical path for p, eliminating . and .. elements. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - return np -} - -// Find a handler on a handler map given a path string -// Most-specific (longest) pattern wins -func (mux *ServeMux) match(path string) Handler { - var h Handler - var n = 0 - for k, v := range mux.m { - if !pathMatch(k, path) { - continue - } - if h == nil || len(k) > n { - n = len(k) - h = v.h - } - } - return h -} - -// handler returns the handler to use for the request r. -func (mux *ServeMux) handler(r *Request) Handler { - mux.mu.RLock() - defer mux.mu.RUnlock() - - // Host-specific pattern takes precedence over generic ones - h := mux.match(r.Host + r.URL.Path) - if h == nil { - h = mux.match(r.URL.Path) - } - if h == nil { - h = NotFoundHandler() - } - return h -} - -// ServeHTTP dispatches the request to the handler whose -// pattern most closely matches the request URL. -func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { - // Clean path to canonical form and redirect. - if p := cleanPath(r.URL.Path); p != r.URL.Path { - w.Header().Set("Location", p) - w.WriteHeader(StatusMovedPermanently) - return - } - mux.handler(r).ServeHTTP(w, r) -} - -// Handle registers the handler for the given pattern. -// If a handler already exists for pattern, Handle panics. -func (mux *ServeMux) Handle(pattern string, handler Handler) { - mux.mu.Lock() - defer mux.mu.Unlock() - - if pattern == "" { - panic("http: invalid pattern " + pattern) - } - if handler == nil { - panic("http: nil handler") - } - if mux.m[pattern].explicit { - panic("http: multiple registrations for " + pattern) - } - - mux.m[pattern] = muxEntry{explicit: true, h: handler} - - // Helpful behavior: - // If pattern is /tree/, insert an implicit permanent redirect for /tree. - // It can be overridden by an explicit registration. - n := len(pattern) - if n > 0 && pattern[n-1] == '/' && !mux.m[pattern[0:n-1]].explicit { - mux.m[pattern[0:n-1]] = muxEntry{h: RedirectHandler(pattern, StatusMovedPermanently)} - } -} - -// HandleFunc registers the handler function for the given pattern. -func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { - mux.Handle(pattern, HandlerFunc(handler)) -} - -// Handle registers the handler for the given pattern -// in the DefaultServeMux. -// The documentation for ServeMux explains how patterns are matched. -func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } - -// HandleFunc registers the handler function for the given pattern -// in the DefaultServeMux. -// The documentation for ServeMux explains how patterns are matched. -func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { - DefaultServeMux.HandleFunc(pattern, handler) -} - -// Serve accepts incoming HTTP connections on the listener l, -// creating a new service thread for each. The service threads -// read requests and then call handler to reply to them. -// Handler is typically nil, in which case the DefaultServeMux is used. -func Serve(l net.Listener, handler Handler) error { - srv := &Server{Handler: handler} - return srv.Serve(l) -} - -// A Server defines parameters for running an HTTP server. -type Server struct { - Addr string // TCP address to listen on, ":http" if empty - Handler Handler // handler to invoke, http.DefaultServeMux if nil - ReadTimeout time.Duration // maximum duration before timing out read of the request - WriteTimeout time.Duration // maximum duration before timing out write of the response - MaxHeaderBytes int // maximum size of request headers, DefaultMaxHeaderBytes if 0 - TLSConfig *tls.Config // optional TLS config, used by ListenAndServeTLS -} - -// ListenAndServe listens on the TCP network address srv.Addr and then -// calls Serve to handle requests on incoming connections. If -// srv.Addr is blank, ":http" is used. -func (srv *Server) ListenAndServe() error { - addr := srv.Addr - if addr == "" { - addr = ":http" - } - l, e := net.Listen("tcp", addr) - if e != nil { - return e - } - return srv.Serve(l) -} - -// Serve accepts incoming connections on the Listener l, creating a -// new service thread for each. The service threads read requests and -// then call srv.Handler to reply to them. -func (srv *Server) Serve(l net.Listener) error { - defer l.Close() - var tempDelay time.Duration // how long to sleep on accept failure - for { - rw, e := l.Accept() - if e != nil { - if ne, ok := e.(net.Error); ok && ne.Temporary() { - if tempDelay == 0 { - tempDelay = 5 * time.Millisecond - } else { - tempDelay *= 2 - } - if max := 1 * time.Second; tempDelay > max { - tempDelay = max - } - log.Printf("http: Accept error: %v; retrying in %v", e, tempDelay) - time.Sleep(tempDelay) - continue - } - return e - } - tempDelay = 0 - if srv.ReadTimeout != 0 { - rw.SetReadDeadline(time.Now().Add(srv.ReadTimeout)) - } - if srv.WriteTimeout != 0 { - rw.SetWriteDeadline(time.Now().Add(srv.WriteTimeout)) - } - c, err := srv.newConn(rw) - if err != nil { - continue - } - go c.serve() - } - panic("not reached") -} - -// ListenAndServe listens on the TCP network address addr -// and then calls Serve with handler to handle requests -// on incoming connections. Handler is typically nil, -// in which case the DefaultServeMux is used. -// -// A trivial example server is: -// -// package main -// -// import ( -// "io" -// "net/http" -// "log" -// ) -// -// // hello world, the web server -// func HelloServer(w http.ResponseWriter, req *http.Request) { -// io.WriteString(w, "hello, world!\n") -// } -// -// func main() { -// http.HandleFunc("/hello", HelloServer) -// err := http.ListenAndServe(":12345", nil) -// if err != nil { -// log.Fatal("ListenAndServe: ", err) -// } -// } -func ListenAndServe(addr string, handler Handler) error { - server := &Server{Addr: addr, Handler: handler} - return server.ListenAndServe() -} - -// ListenAndServeTLS acts identically to ListenAndServe, except that it -// expects HTTPS connections. Additionally, files containing a certificate and -// matching private key for the server must be provided. If the certificate -// is signed by a certificate authority, the certFile should be the concatenation -// of the server's certificate followed by the CA's certificate. -// -// A trivial example server is: -// -// import ( -// "log" -// "net/http" -// ) -// -// func handler(w http.ResponseWriter, req *http.Request) { -// w.Header().Set("Content-Type", "text/plain") -// w.Write([]byte("This is an example server.\n")) -// } -// -// func main() { -// http.HandleFunc("/", handler) -// log.Printf("About to listen on 10443. Go to https://127.0.0.1:10443/") -// err := http.ListenAndServeTLS(":10443", "cert.pem", "key.pem", nil) -// if err != nil { -// log.Fatal(err) -// } -// } -// -// One can use generate_cert.go in crypto/tls to generate cert.pem and key.pem. -func ListenAndServeTLS(addr string, certFile string, keyFile string, handler Handler) error { - server := &Server{Addr: addr, Handler: handler} - return server.ListenAndServeTLS(certFile, keyFile) -} - -// ListenAndServeTLS listens on the TCP network address srv.Addr and -// then calls Serve to handle requests on incoming TLS connections. -// -// Filenames containing a certificate and matching private key for -// the server must be provided. If the certificate is signed by a -// certificate authority, the certFile should be the concatenation -// of the server's certificate followed by the CA's certificate. -// -// If srv.Addr is blank, ":https" is used. -func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { - addr := srv.Addr - if addr == "" { - addr = ":https" - } - config := &tls.Config{} - if srv.TLSConfig != nil { - *config = *srv.TLSConfig - } - if config.NextProtos == nil { - config.NextProtos = []string{"http/1.1"} - } - - var err error - config.Certificates = make([]tls.Certificate, 1) - config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return err - } - - conn, err := net.Listen("tcp", addr) - if err != nil { - return err - } - - tlsListener := tls.NewListener(conn, config) - return srv.Serve(tlsListener) -} - -// TimeoutHandler returns a Handler that runs h with the given time limit. -// -// The new Handler calls h.ServeHTTP to handle each request, but if a -// call runs for more than ns nanoseconds, the handler responds with -// a 503 Service Unavailable error and the given message in its body. -// (If msg is empty, a suitable default message will be sent.) -// After such a timeout, writes by h to its ResponseWriter will return -// ErrHandlerTimeout. -func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { - f := func() <-chan time.Time { - return time.After(dt) - } - return &timeoutHandler{h, f, msg} -} - -// ErrHandlerTimeout is returned on ResponseWriter Write calls -// in handlers which have timed out. -var ErrHandlerTimeout = errors.New("http: Handler timeout") - -type timeoutHandler struct { - handler Handler - timeout func() <-chan time.Time // returns channel producing a timeout - body string -} - -func (h *timeoutHandler) errorBody() string { - if h.body != "" { - return h.body - } - return "Timeout

Timeout

" -} - -func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { - done := make(chan bool) - tw := &timeoutWriter{w: w} - go func() { - h.handler.ServeHTTP(tw, r) - done <- true - }() - select { - case <-done: - return - case <-h.timeout(): - tw.mu.Lock() - defer tw.mu.Unlock() - if !tw.wroteHeader { - tw.w.WriteHeader(StatusServiceUnavailable) - tw.w.Write([]byte(h.errorBody())) - } - tw.timedOut = true - } -} - -type timeoutWriter struct { - w ResponseWriter - - mu sync.Mutex - timedOut bool - wroteHeader bool -} - -func (tw *timeoutWriter) Header() Header { - return tw.w.Header() -} - -func (tw *timeoutWriter) Write(p []byte) (int, error) { - tw.mu.Lock() - timedOut := tw.timedOut - tw.mu.Unlock() - if timedOut { - return 0, ErrHandlerTimeout - } - return tw.w.Write(p) -} - -func (tw *timeoutWriter) WriteHeader(code int) { - tw.mu.Lock() - if tw.timedOut || tw.wroteHeader { - tw.mu.Unlock() - return - } - tw.wroteHeader = true - tw.mu.Unlock() - tw.w.WriteHeader(code) -} === removed file 'src/launchpad.net/gwacl/fork/http/sniff.go' --- src/launchpad.net/gwacl/fork/http/sniff.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/http/sniff.go 1970-01-01 00:00:00 +0000 @@ -1,214 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bytes" - "encoding/binary" -) - -// The algorithm uses at most sniffLen bytes to make its decision. -const sniffLen = 512 - -// DetectContentType implements the algorithm described -// at http://mimesniff.spec.whatwg.org/ to determine the -// Content-Type of the given data. It considers at most the -// first 512 bytes of data. DetectContentType always returns -// a valid MIME type: if it cannot determine a more specific one, it -// returns "application/octet-stream". -func DetectContentType(data []byte) string { - if len(data) > sniffLen { - data = data[:sniffLen] - } - - // Index of the first non-whitespace byte in data. - firstNonWS := 0 - for ; firstNonWS < len(data) && isWS(data[firstNonWS]); firstNonWS++ { - } - - for _, sig := range sniffSignatures { - if ct := sig.match(data, firstNonWS); ct != "" { - return ct - } - } - - return "application/octet-stream" // fallback -} - -func isWS(b byte) bool { - return bytes.IndexByte([]byte("\t\n\x0C\r "), b) != -1 -} - -type sniffSig interface { - // match returns the MIME type of the data, or "" if unknown. - match(data []byte, firstNonWS int) string -} - -// Data matching the table in section 6. -var sniffSignatures = []sniffSig{ - htmlSig("' { - return "" - } - return "text/html; charset=utf-8" -} - -type mp4Sig int - -func (mp4Sig) match(data []byte, firstNonWS int) string { - // c.f. section 6.1. - if len(data) < 8 { - return "" - } - boxSize := int(binary.BigEndian.Uint32(data[:4])) - if boxSize%4 != 0 || len(data) < boxSize { - return "" - } - if !bytes.Equal(data[4:8], []byte("ftyp")) { - return "" - } - for st := 8; st < boxSize; st += 4 { - if st == 12 { - // minor version number - continue - } - seg := string(data[st : st+3]) - switch seg { - case "mp4", "iso", "M4V", "M4P", "M4B": - return "video/mp4" - /* The remainder are not in the spec. - case "M4A": - return "audio/mp4" - case "3gp": - return "video/3gpp" - case "jp2": - return "image/jp2" // JPEG 2000 - */ - } - } - return "" -} - -type textSig int - -func (textSig) match(data []byte, firstNonWS int) string { - // c.f. section 5, step 4. - for _, b := range data[firstNonWS:] { - switch { - case 0x00 <= b && b <= 0x08, - b == 0x0B, - 0x0E <= b && b <= 0x1A, - 0x1C <= b && b <= 0x1F: - return "" - } - } - return "text/plain; charset=utf-8" -} === removed file 'src/launchpad.net/gwacl/fork/http/status.go' --- src/launchpad.net/gwacl/fork/http/status.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/http/status.go 1970-01-01 00:00:00 +0000 @@ -1,108 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -// HTTP status codes, defined in RFC 2616. -const ( - StatusContinue = 100 - StatusSwitchingProtocols = 101 - - StatusOK = 200 - StatusCreated = 201 - StatusAccepted = 202 - StatusNonAuthoritativeInfo = 203 - StatusNoContent = 204 - StatusResetContent = 205 - StatusPartialContent = 206 - - StatusMultipleChoices = 300 - StatusMovedPermanently = 301 - StatusFound = 302 - StatusSeeOther = 303 - StatusNotModified = 304 - StatusUseProxy = 305 - StatusTemporaryRedirect = 307 - - StatusBadRequest = 400 - StatusUnauthorized = 401 - StatusPaymentRequired = 402 - StatusForbidden = 403 - StatusNotFound = 404 - StatusMethodNotAllowed = 405 - StatusNotAcceptable = 406 - StatusProxyAuthRequired = 407 - StatusRequestTimeout = 408 - StatusConflict = 409 - StatusGone = 410 - StatusLengthRequired = 411 - StatusPreconditionFailed = 412 - StatusRequestEntityTooLarge = 413 - StatusRequestURITooLong = 414 - StatusUnsupportedMediaType = 415 - StatusRequestedRangeNotSatisfiable = 416 - StatusExpectationFailed = 417 - StatusTeapot = 418 - - StatusInternalServerError = 500 - StatusNotImplemented = 501 - StatusBadGateway = 502 - StatusServiceUnavailable = 503 - StatusGatewayTimeout = 504 - StatusHTTPVersionNotSupported = 505 -) - -var statusText = map[int]string{ - StatusContinue: "Continue", - StatusSwitchingProtocols: "Switching Protocols", - - StatusOK: "OK", - StatusCreated: "Created", - StatusAccepted: "Accepted", - StatusNonAuthoritativeInfo: "Non-Authoritative Information", - StatusNoContent: "No Content", - StatusResetContent: "Reset Content", - StatusPartialContent: "Partial Content", - - StatusMultipleChoices: "Multiple Choices", - StatusMovedPermanently: "Moved Permanently", - StatusFound: "Found", - StatusSeeOther: "See Other", - StatusNotModified: "Not Modified", - StatusUseProxy: "Use Proxy", - StatusTemporaryRedirect: "Temporary Redirect", - - StatusBadRequest: "Bad Request", - StatusUnauthorized: "Unauthorized", - StatusPaymentRequired: "Payment Required", - StatusForbidden: "Forbidden", - StatusNotFound: "Not Found", - StatusMethodNotAllowed: "Method Not Allowed", - StatusNotAcceptable: "Not Acceptable", - StatusProxyAuthRequired: "Proxy Authentication Required", - StatusRequestTimeout: "Request Timeout", - StatusConflict: "Conflict", - StatusGone: "Gone", - StatusLengthRequired: "Length Required", - StatusPreconditionFailed: "Precondition Failed", - StatusRequestEntityTooLarge: "Request Entity Too Large", - StatusRequestURITooLong: "Request URI Too Long", - StatusUnsupportedMediaType: "Unsupported Media Type", - StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable", - StatusExpectationFailed: "Expectation Failed", - StatusTeapot: "I'm a teapot", - - StatusInternalServerError: "Internal Server Error", - StatusNotImplemented: "Not Implemented", - StatusBadGateway: "Bad Gateway", - StatusServiceUnavailable: "Service Unavailable", - StatusGatewayTimeout: "Gateway Timeout", - StatusHTTPVersionNotSupported: "HTTP Version Not Supported", -} - -// StatusText returns a text for the HTTP status code. It returns the empty -// string if the code is unknown. -func StatusText(code int) string { - return statusText[code] -} === removed file 'src/launchpad.net/gwacl/fork/http/transfer.go' --- src/launchpad.net/gwacl/fork/http/transfer.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/http/transfer.go 1970-01-01 00:00:00 +0000 @@ -1,632 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "net/textproto" - "strconv" - "strings" -) - -// transferWriter inspects the fields of a user-supplied Request or Response, -// sanitizes them without changing the user object and provides methods for -// writing the respective header, body and trailer in wire format. -type transferWriter struct { - Method string - Body io.Reader - BodyCloser io.Closer - ResponseToHEAD bool - ContentLength int64 // -1 means unknown, 0 means exactly none - Close bool - TransferEncoding []string - Trailer Header -} - -func newTransferWriter(r interface{}) (t *transferWriter, err error) { - t = &transferWriter{} - - // Extract relevant fields - atLeastHTTP11 := false - switch rr := r.(type) { - case *Request: - if rr.ContentLength != 0 && rr.Body == nil { - return nil, fmt.Errorf("http: Request.ContentLength=%d with nil Body", rr.ContentLength) - } - t.Method = rr.Method - t.Body = rr.Body - t.BodyCloser = rr.Body - t.ContentLength = rr.ContentLength - t.Close = rr.Close - t.TransferEncoding = rr.TransferEncoding - t.Trailer = rr.Trailer - atLeastHTTP11 = rr.ProtoAtLeast(1, 1) - if t.Body != nil && len(t.TransferEncoding) == 0 && atLeastHTTP11 { - if t.ContentLength == 0 { - // Test to see if it's actually zero or just unset. - var buf [1]byte - n, _ := io.ReadFull(t.Body, buf[:]) - if n == 1 { - // Oh, guess there is data in this Body Reader after all. - // The ContentLength field just wasn't set. - // Stich the Body back together again, re-attaching our - // consumed byte. - t.ContentLength = -1 - t.Body = io.MultiReader(bytes.NewBuffer(buf[:]), t.Body) - } else { - // Body is actually empty. - t.Body = nil - t.BodyCloser = nil - } - } - if t.ContentLength < 0 { - t.TransferEncoding = []string{"chunked"} - } - } - case *Response: - if rr.Request != nil { - t.Method = rr.Request.Method - } - t.Body = rr.Body - t.BodyCloser = rr.Body - t.ContentLength = rr.ContentLength - t.Close = rr.Close - t.TransferEncoding = rr.TransferEncoding - t.Trailer = rr.Trailer - atLeastHTTP11 = rr.ProtoAtLeast(1, 1) - t.ResponseToHEAD = noBodyExpected(t.Method) - } - - // Sanitize Body,ContentLength,TransferEncoding - if t.ResponseToHEAD { - t.Body = nil - t.TransferEncoding = nil - // ContentLength is expected to hold Content-Length - if t.ContentLength < 0 { - return nil, ErrMissingContentLength - } - } else { - if !atLeastHTTP11 || t.Body == nil { - t.TransferEncoding = nil - } - if chunked(t.TransferEncoding) { - t.ContentLength = -1 - } else if t.Body == nil { // no chunking, no body - t.ContentLength = 0 - } - } - - // Sanitize Trailer - if !chunked(t.TransferEncoding) { - t.Trailer = nil - } - - return t, nil -} - -func noBodyExpected(requestMethod string) bool { - return requestMethod == "HEAD" -} - -func (t *transferWriter) shouldSendContentLength() bool { - if chunked(t.TransferEncoding) { - return false - } - if t.ContentLength > 0 { - return true - } - if t.ResponseToHEAD { - return true - } - // Many servers expect a Content-Length for these methods - if t.Method == "POST" || t.Method == "PUT" { - return true - } - if t.ContentLength == 0 && isIdentity(t.TransferEncoding) { - return true - } - - return false -} - -func (t *transferWriter) WriteHeader(w io.Writer) (err error) { - if t.Close { - _, err = io.WriteString(w, "Connection: close\r\n") - if err != nil { - return - } - } - - // Write Content-Length and/or Transfer-Encoding whose values are a - // function of the sanitized field triple (Body, ContentLength, - // TransferEncoding) - if t.shouldSendContentLength() { - io.WriteString(w, "Content-Length: ") - _, err = io.WriteString(w, strconv.FormatInt(t.ContentLength, 10)+"\r\n") - if err != nil { - return - } - } else if chunked(t.TransferEncoding) { - _, err = io.WriteString(w, "Transfer-Encoding: chunked\r\n") - if err != nil { - return - } - } - - // Write Trailer header - if t.Trailer != nil { - // TODO: At some point, there should be a generic mechanism for - // writing long headers, using HTTP line splitting - io.WriteString(w, "Trailer: ") - needComma := false - for k := range t.Trailer { - k = CanonicalHeaderKey(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return &badStringError{"invalid Trailer key", k} - } - if needComma { - io.WriteString(w, ",") - } - io.WriteString(w, k) - needComma = true - } - _, err = io.WriteString(w, "\r\n") - } - - return -} - -func (t *transferWriter) WriteBody(w io.Writer) (err error) { - var ncopy int64 - - // Write body - if t.Body != nil { - if chunked(t.TransferEncoding) { - cw := newChunkedWriter(w) - _, err = io.Copy(cw, t.Body) - if err == nil { - err = cw.Close() - } - } else if t.ContentLength == -1 { - ncopy, err = io.Copy(w, t.Body) - } else { - ncopy, err = io.Copy(w, io.LimitReader(t.Body, t.ContentLength)) - nextra, err := io.Copy(ioutil.Discard, t.Body) - if err != nil { - return err - } - ncopy += nextra - } - if err != nil { - return err - } - if err = t.BodyCloser.Close(); err != nil { - return err - } - } - - if t.ContentLength != -1 && t.ContentLength != ncopy { - return fmt.Errorf("http: Request.ContentLength=%d with Body length %d", - t.ContentLength, ncopy) - } - - // TODO(petar): Place trailer writer code here. - if chunked(t.TransferEncoding) { - // Last chunk, empty trailer - _, err = io.WriteString(w, "\r\n") - } - - return -} - -type transferReader struct { - // Input - Header Header - StatusCode int - RequestMethod string - ProtoMajor int - ProtoMinor int - // Output - Body io.ReadCloser - ContentLength int64 - TransferEncoding []string - Close bool - Trailer Header -} - -// bodyAllowedForStatus returns whether a given response status code -// permits a body. See RFC2616, section 4.4. -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -// msg is *Request or *Response. -func readTransfer(msg interface{}, r *bufio.Reader) (err error) { - t := &transferReader{} - - // Unify input - isResponse := false - switch rr := msg.(type) { - case *Response: - t.Header = rr.Header - t.StatusCode = rr.StatusCode - t.RequestMethod = rr.Request.Method - t.ProtoMajor = rr.ProtoMajor - t.ProtoMinor = rr.ProtoMinor - t.Close = shouldClose(t.ProtoMajor, t.ProtoMinor, t.Header) - isResponse = true - case *Request: - t.Header = rr.Header - t.ProtoMajor = rr.ProtoMajor - t.ProtoMinor = rr.ProtoMinor - // Transfer semantics for Requests are exactly like those for - // Responses with status code 200, responding to a GET method - t.StatusCode = 200 - t.RequestMethod = "GET" - default: - panic("unexpected type") - } - - // Default to HTTP/1.1 - if t.ProtoMajor == 0 && t.ProtoMinor == 0 { - t.ProtoMajor, t.ProtoMinor = 1, 1 - } - - // Transfer encoding, content length - t.TransferEncoding, err = fixTransferEncoding(t.RequestMethod, t.Header) - if err != nil { - return err - } - - t.ContentLength, err = fixLength(isResponse, t.StatusCode, t.RequestMethod, t.Header, t.TransferEncoding) - if err != nil { - return err - } - - // Trailer - t.Trailer, err = fixTrailer(t.Header, t.TransferEncoding) - if err != nil { - return err - } - - // If there is no Content-Length or chunked Transfer-Encoding on a *Response - // and the status is not 1xx, 204 or 304, then the body is unbounded. - // See RFC2616, section 4.4. - switch msg.(type) { - case *Response: - if t.ContentLength == -1 && - !chunked(t.TransferEncoding) && - bodyAllowedForStatus(t.StatusCode) { - // Unbounded body. - t.Close = true - } - } - - // Prepare body reader. ContentLength < 0 means chunked encoding - // or close connection when finished, since multipart is not supported yet - switch { - case chunked(t.TransferEncoding): - t.Body = &body{Reader: newChunkedReader(r), hdr: msg, r: r, closing: t.Close} - case t.ContentLength >= 0: - // TODO: limit the Content-Length. This is an easy DoS vector. - t.Body = &body{Reader: io.LimitReader(r, t.ContentLength), closing: t.Close} - default: - // t.ContentLength < 0, i.e. "Content-Length" not mentioned in header - if t.Close { - // Close semantics (i.e. HTTP/1.0) - t.Body = &body{Reader: r, closing: t.Close} - } else { - // Persistent connection (i.e. HTTP/1.1) - t.Body = &body{Reader: io.LimitReader(r, 0), closing: t.Close} - } - } - - // Unify output - switch rr := msg.(type) { - case *Request: - rr.Body = t.Body - rr.ContentLength = t.ContentLength - rr.TransferEncoding = t.TransferEncoding - rr.Close = t.Close - rr.Trailer = t.Trailer - case *Response: - rr.Body = t.Body - rr.ContentLength = t.ContentLength - rr.TransferEncoding = t.TransferEncoding - rr.Close = t.Close - rr.Trailer = t.Trailer - } - - return nil -} - -// Checks whether chunked is part of the encodings stack -func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" } - -// Checks whether the encoding is explicitly "identity". -func isIdentity(te []string) bool { return len(te) == 1 && te[0] == "identity" } - -// Sanitize transfer encoding -func fixTransferEncoding(requestMethod string, header Header) ([]string, error) { - raw, present := header["Transfer-Encoding"] - if !present { - return nil, nil - } - - delete(header, "Transfer-Encoding") - - // Head responses have no bodies, so the transfer encoding - // should be ignored. - if requestMethod == "HEAD" { - return nil, nil - } - - encodings := strings.Split(raw[0], ",") - te := make([]string, 0, len(encodings)) - // TODO: Even though we only support "identity" and "chunked" - // encodings, the loop below is designed with foresight. One - // invariant that must be maintained is that, if present, - // chunked encoding must always come first. - for _, encoding := range encodings { - encoding = strings.ToLower(strings.TrimSpace(encoding)) - // "identity" encoding is not recorded - if encoding == "identity" { - break - } - if encoding != "chunked" { - return nil, &badStringError{"unsupported transfer encoding", encoding} - } - te = te[0 : len(te)+1] - te[len(te)-1] = encoding - } - if len(te) > 1 { - return nil, &badStringError{"too many transfer encodings", strings.Join(te, ",")} - } - if len(te) > 0 { - // Chunked encoding trumps Content-Length. See RFC 2616 - // Section 4.4. Currently len(te) > 0 implies chunked - // encoding. - delete(header, "Content-Length") - return te, nil - } - - return nil, nil -} - -// Determine the expected body length, using RFC 2616 Section 4.4. This -// function is not a method, because ultimately it should be shared by -// ReadResponse and ReadRequest. -func fixLength(isResponse bool, status int, requestMethod string, header Header, te []string) (int64, error) { - - // Logic based on response type or status - if noBodyExpected(requestMethod) { - return 0, nil - } - if status/100 == 1 { - return 0, nil - } - switch status { - case 204, 304: - return 0, nil - } - - // Logic based on Transfer-Encoding - if chunked(te) { - return -1, nil - } - - // Logic based on Content-Length - cl := strings.TrimSpace(header.Get("Content-Length")) - if cl != "" { - n, err := strconv.ParseInt(cl, 10, 64) - if err != nil || n < 0 { - return -1, &badStringError{"bad Content-Length", cl} - } - return n, nil - } else { - header.Del("Content-Length") - } - - if !isResponse && requestMethod == "GET" { - // RFC 2616 doesn't explicitly permit nor forbid an - // entity-body on a GET request so we permit one if - // declared, but we default to 0 here (not -1 below) - // if there's no mention of a body. - return 0, nil - } - - // Logic based on media type. The purpose of the following code is just - // to detect whether the unsupported "multipart/byteranges" is being - // used. A proper Content-Type parser is needed in the future. - if strings.Contains(strings.ToLower(header.Get("Content-Type")), "multipart/byteranges") { - return -1, ErrNotSupported - } - - // Body-EOF logic based on other methods (like closing, or chunked coding) - return -1, nil -} - -// Determine whether to hang up after sending a request and body, or -// receiving a response and body -// 'header' is the request headers -func shouldClose(major, minor int, header Header) bool { - if major < 1 { - return true - } else if major == 1 && minor == 0 { - if !strings.Contains(strings.ToLower(header.Get("Connection")), "keep-alive") { - return true - } - return false - } else { - // TODO: Should split on commas, toss surrounding white space, - // and check each field. - if strings.ToLower(header.Get("Connection")) == "close" { - header.Del("Connection") - return true - } - } - return false -} - -// Parse the trailer header -func fixTrailer(header Header, te []string) (Header, error) { - raw := header.Get("Trailer") - if raw == "" { - return nil, nil - } - - header.Del("Trailer") - trailer := make(Header) - keys := strings.Split(raw, ",") - for _, key := range keys { - key = CanonicalHeaderKey(strings.TrimSpace(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - return nil, &badStringError{"bad trailer key", key} - } - trailer.Del(key) - } - if len(trailer) == 0 { - return nil, nil - } - if !chunked(te) { - // Trailer and no chunking - return nil, ErrUnexpectedTrailer - } - return trailer, nil -} - -// body turns a Reader into a ReadCloser. -// Close ensures that the body has been fully read -// and then reads the trailer if necessary. -type body struct { - io.Reader - hdr interface{} // non-nil (Response or Request) value means read trailer - r *bufio.Reader // underlying wire-format reader for the trailer - closing bool // is the connection to be closed after reading body? - closed bool - - res *response // response writer for server requests, else nil -} - -// ErrBodyReadAfterClose is returned when reading a Request Body after -// the body has been closed. This typically happens when the body is -// read after an HTTP Handler calls WriteHeader or Write on its -// ResponseWriter. -var ErrBodyReadAfterClose = errors.New("http: invalid Read on closed request Body") - -func (b *body) Read(p []byte) (n int, err error) { - if b.closed { - return 0, ErrBodyReadAfterClose - } - n, err = b.Reader.Read(p) - - // Read the final trailer once we hit EOF. - if err == io.EOF && b.hdr != nil { - if e := b.readTrailer(); e != nil { - err = e - } - b.hdr = nil - } - return n, err -} - -var ( - singleCRLF = []byte("\r\n") - doubleCRLF = []byte("\r\n\r\n") -) - -func seeUpcomingDoubleCRLF(r *bufio.Reader) bool { - for peekSize := 4; ; peekSize++ { - // This loop stops when Peek returns an error, - // which it does when r's buffer has been filled. - buf, err := r.Peek(peekSize) - if bytes.HasSuffix(buf, doubleCRLF) { - return true - } - if err != nil { - break - } - } - return false -} - -func (b *body) readTrailer() error { - // The common case, since nobody uses trailers. - buf, _ := b.r.Peek(2) - if bytes.Equal(buf, singleCRLF) { - b.r.ReadByte() - b.r.ReadByte() - return nil - } - - // Make sure there's a header terminator coming up, to prevent - // a DoS with an unbounded size Trailer. It's not easy to - // slip in a LimitReader here, as textproto.NewReader requires - // a concrete *bufio.Reader. Also, we can't get all the way - // back up to our conn's LimitedReader that *might* be backing - // this bufio.Reader. Instead, a hack: we iteratively Peek up - // to the bufio.Reader's max size, looking for a double CRLF. - // This limits the trailer to the underlying buffer size, typically 4kB. - if !seeUpcomingDoubleCRLF(b.r) { - return errors.New("http: suspiciously long trailer after chunked body") - } - - hdr, err := textproto.NewReader(b.r).ReadMIMEHeader() - if err != nil { - return err - } - switch rr := b.hdr.(type) { - case *Request: - rr.Trailer = Header(hdr) - case *Response: - rr.Trailer = Header(hdr) - } - return nil -} - -func (b *body) Close() error { - if b.closed { - return nil - } - defer func() { - b.closed = true - }() - if b.hdr == nil && b.closing { - // no trailer and closing the connection next. - // no point in reading to EOF. - return nil - } - - // In a server request, don't continue reading from the client - // if we've already hit the maximum body size set by the - // handler. If this is set, that also means the TCP connection - // is about to be closed, so getting to the next HTTP request - // in the stream is not necessary. - if b.res != nil && b.res.requestBodyLimitHit { - return nil - } - - // Fully consume the body, which will also lead to us reading - // the trailer headers after the body, if present. - if _, err := io.Copy(ioutil.Discard, b); err != nil { - return err - } - return nil -} === removed file 'src/launchpad.net/gwacl/fork/http/transport.go' --- src/launchpad.net/gwacl/fork/http/transport.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/http/transport.go 1970-01-01 00:00:00 +0000 @@ -1,757 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// HTTP client implementation. See RFC 2616. -// -// This is the low-level Transport implementation of RoundTripper. -// The high-level interface is in client.go. - -package http - -import ( - "bufio" - "compress/gzip" - "encoding/base64" - "errors" - "fmt" - "io" - "io/ioutil" - "launchpad.net/gwacl/fork/tls" - "log" - "net" - "net/url" - "os" - "strings" - "sync" -) - -// DefaultTransport is the default implementation of Transport and is -// used by DefaultClient. It establishes a new network connection for -// each call to Do and uses HTTP proxies as directed by the -// $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy) -// environment variables. -var DefaultTransport RoundTripper = &Transport{Proxy: ProxyFromEnvironment} - -// DefaultMaxIdleConnsPerHost is the default value of Transport's -// MaxIdleConnsPerHost. -const DefaultMaxIdleConnsPerHost = 2 - -// Transport is an implementation of RoundTripper that supports http, -// https, and http proxies (for either http or https with CONNECT). -// Transport can also cache connections for future re-use. -type Transport struct { - lk sync.Mutex - idleConn map[string][]*persistConn - altProto map[string]RoundTripper // nil or map of URI scheme => RoundTripper - - // TODO: tunable on global max cached connections - // TODO: tunable on timeout on cached connections - // TODO: optional pipelining - - // Proxy specifies a function to return a proxy for a given - // Request. If the function returns a non-nil error, the - // request is aborted with the provided error. - // If Proxy is nil or returns a nil *URL, no proxy is used. - Proxy func(*Request) (*url.URL, error) - - // Dial specifies the dial function for creating TCP - // connections. - // If Dial is nil, net.Dial is used. - Dial func(net, addr string) (c net.Conn, err error) - - // TLSClientConfig specifies the TLS configuration to use with - // tls.Client. If nil, the default configuration is used. - TLSClientConfig *tls.Config - - DisableKeepAlives bool - DisableCompression bool - - // MaxIdleConnsPerHost, if non-zero, controls the maximum idle - // (keep-alive) to keep to keep per-host. If zero, - // DefaultMaxIdleConnsPerHost is used. - MaxIdleConnsPerHost int -} - -// ProxyFromEnvironment returns the URL of the proxy to use for a -// given request, as indicated by the environment variables -// $HTTP_PROXY and $NO_PROXY (or $http_proxy and $no_proxy). -// An error is returned if the proxy environment is invalid. -// A nil URL and nil error are returned if no proxy is defined in the -// environment, or a proxy should not be used for the given request. -func ProxyFromEnvironment(req *Request) (*url.URL, error) { - proxy := getenvEitherCase("HTTP_PROXY") - if proxy == "" { - return nil, nil - } - if !useProxy(canonicalAddr(req.URL)) { - return nil, nil - } - proxyURL, err := url.Parse(proxy) - if err != nil || proxyURL.Scheme == "" { - if u, err := url.Parse("http://" + proxy); err == nil { - proxyURL = u - err = nil - } - } - if err != nil { - return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) - } - return proxyURL, nil -} - -// ProxyURL returns a proxy function (for use in a Transport) -// that always returns the same URL. -func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) { - return func(*Request) (*url.URL, error) { - return fixedURL, nil - } -} - -// transportRequest is a wrapper around a *Request that adds -// optional extra headers to write. -type transportRequest struct { - *Request // original request, not to be mutated - extra Header // extra headers to write, or nil -} - -func (tr *transportRequest) extraHeaders() Header { - if tr.extra == nil { - tr.extra = make(Header) - } - return tr.extra -} - -// RoundTrip implements the RoundTripper interface. -func (t *Transport) RoundTrip(req *Request) (resp *Response, err error) { - if req.URL == nil { - return nil, errors.New("http: nil Request.URL") - } - if req.Header == nil { - return nil, errors.New("http: nil Request.Header") - } - if req.URL.Scheme != "http" && req.URL.Scheme != "https" { - t.lk.Lock() - var rt RoundTripper - if t.altProto != nil { - rt = t.altProto[req.URL.Scheme] - } - t.lk.Unlock() - if rt == nil { - return nil, &badStringError{"unsupported protocol scheme", req.URL.Scheme} - } - return rt.RoundTrip(req) - } - treq := &transportRequest{Request: req} - cm, err := t.connectMethodForRequest(treq) - if err != nil { - return nil, err - } - - // Get the cached or newly-created connection to either the - // host (for http or https), the http proxy, or the http proxy - // pre-CONNECTed to https server. In any case, we'll be ready - // to send it requests. - pconn, err := t.getConn(cm) - if err != nil { - return nil, err - } - - return pconn.roundTrip(treq) -} - -// RegisterProtocol registers a new protocol with scheme. -// The Transport will pass requests using the given scheme to rt. -// It is rt's responsibility to simulate HTTP request semantics. -// -// RegisterProtocol can be used by other packages to provide -// implementations of protocol schemes like "ftp" or "file". -func (t *Transport) RegisterProtocol(scheme string, rt RoundTripper) { - if scheme == "http" || scheme == "https" { - panic("protocol " + scheme + " already registered") - } - t.lk.Lock() - defer t.lk.Unlock() - if t.altProto == nil { - t.altProto = make(map[string]RoundTripper) - } - if _, exists := t.altProto[scheme]; exists { - panic("protocol " + scheme + " already registered") - } - t.altProto[scheme] = rt -} - -// CloseIdleConnections closes any connections which were previously -// connected from previous requests but are now sitting idle in -// a "keep-alive" state. It does not interrupt any connections currently -// in use. -func (t *Transport) CloseIdleConnections() { - t.lk.Lock() - defer t.lk.Unlock() - if t.idleConn == nil { - return - } - for _, conns := range t.idleConn { - for _, pconn := range conns { - pconn.close() - } - } - t.idleConn = make(map[string][]*persistConn) -} - -// -// Private implementation past this point. -// - -func getenvEitherCase(k string) string { - if v := os.Getenv(strings.ToUpper(k)); v != "" { - return v - } - return os.Getenv(strings.ToLower(k)) -} - -func (t *Transport) connectMethodForRequest(treq *transportRequest) (*connectMethod, error) { - cm := &connectMethod{ - targetScheme: treq.URL.Scheme, - targetAddr: canonicalAddr(treq.URL), - } - if t.Proxy != nil { - var err error - cm.proxyURL, err = t.Proxy(treq.Request) - if err != nil { - return nil, err - } - } - return cm, nil -} - -// proxyAuth returns the Proxy-Authorization header to set -// on requests, if applicable. -func (cm *connectMethod) proxyAuth() string { - if cm.proxyURL == nil { - return "" - } - if u := cm.proxyURL.User; u != nil { - return "Basic " + base64.URLEncoding.EncodeToString([]byte(u.String())) - } - return "" -} - -// putIdleConn adds pconn to the list of idle persistent connections awaiting -// a new request. -// If pconn is no longer needed or not in a good state, putIdleConn -// returns false. -func (t *Transport) putIdleConn(pconn *persistConn) bool { - t.lk.Lock() - defer t.lk.Unlock() - if t.DisableKeepAlives || t.MaxIdleConnsPerHost < 0 { - pconn.close() - return false - } - if pconn.isBroken() { - return false - } - key := pconn.cacheKey - max := t.MaxIdleConnsPerHost - if max == 0 { - max = DefaultMaxIdleConnsPerHost - } - if len(t.idleConn[key]) >= max { - pconn.close() - return false - } - t.idleConn[key] = append(t.idleConn[key], pconn) - return true -} - -func (t *Transport) getIdleConn(cm *connectMethod) (pconn *persistConn) { - t.lk.Lock() - defer t.lk.Unlock() - if t.idleConn == nil { - t.idleConn = make(map[string][]*persistConn) - } - key := cm.String() - for { - pconns, ok := t.idleConn[key] - if !ok { - return nil - } - if len(pconns) == 1 { - pconn = pconns[0] - delete(t.idleConn, key) - } else { - // 2 or more cached connections; pop last - // TODO: queue? - pconn = pconns[len(pconns)-1] - t.idleConn[key] = pconns[0 : len(pconns)-1] - } - if !pconn.isBroken() { - return - } - } - return -} - -func (t *Transport) dial(network, addr string) (c net.Conn, err error) { - if t.Dial != nil { - return t.Dial(network, addr) - } - return net.Dial(network, addr) -} - -// getConn dials and creates a new persistConn to the target as -// specified in the connectMethod. This includes doing a proxy CONNECT -// and/or setting up TLS. If this doesn't return an error, the persistConn -// is ready to write requests to. -func (t *Transport) getConn(cm *connectMethod) (*persistConn, error) { - if pc := t.getIdleConn(cm); pc != nil { - return pc, nil - } - - conn, err := t.dial("tcp", cm.addr()) - if err != nil { - if cm.proxyURL != nil { - err = fmt.Errorf("http: error connecting to proxy %s: %v", cm.proxyURL, err) - } - return nil, err - } - - pa := cm.proxyAuth() - - pconn := &persistConn{ - t: t, - cacheKey: cm.String(), - conn: conn, - reqch: make(chan requestAndChan, 50), - } - - switch { - case cm.proxyURL == nil: - // Do nothing. - case cm.targetScheme == "http": - pconn.isProxy = true - if pa != "" { - pconn.mutateHeaderFunc = func(h Header) { - h.Set("Proxy-Authorization", pa) - } - } - case cm.targetScheme == "https": - connectReq := &Request{ - Method: "CONNECT", - URL: &url.URL{Opaque: cm.targetAddr}, - Host: cm.targetAddr, - Header: make(Header), - } - if pa != "" { - connectReq.Header.Set("Proxy-Authorization", pa) - } - connectReq.Write(conn) - - // Read response. - // Okay to use and discard buffered reader here, because - // TLS server will not speak until spoken to. - br := bufio.NewReader(conn) - resp, err := ReadResponse(br, connectReq) - if err != nil { - conn.Close() - return nil, err - } - if resp.StatusCode != 200 { - f := strings.SplitN(resp.Status, " ", 2) - conn.Close() - return nil, errors.New(f[1]) - } - } - - if cm.targetScheme == "https" { - // Initiate TLS and check remote host name against certificate. - conn = tls.Client(conn, t.TLSClientConfig) - if err = conn.(*tls.Conn).Handshake(); err != nil { - return nil, err - } - if t.TLSClientConfig == nil || !t.TLSClientConfig.InsecureSkipVerify { - if err = conn.(*tls.Conn).VerifyHostname(cm.tlsHost()); err != nil { - return nil, err - } - } - pconn.conn = conn - } - - pconn.br = bufio.NewReader(pconn.conn) - pconn.bw = bufio.NewWriter(pconn.conn) - go pconn.readLoop() - return pconn, nil -} - -// useProxy returns true if requests to addr should use a proxy, -// according to the NO_PROXY or no_proxy environment variable. -// addr is always a canonicalAddr with a host and port. -func useProxy(addr string) bool { - if len(addr) == 0 { - return true - } - host, _, err := net.SplitHostPort(addr) - if err != nil { - return false - } - if host == "localhost" { - return false - } - if ip := net.ParseIP(host); ip != nil { - if ip.IsLoopback() { - return false - } - } - - no_proxy := getenvEitherCase("NO_PROXY") - if no_proxy == "*" { - return false - } - - addr = strings.ToLower(strings.TrimSpace(addr)) - if hasPort(addr) { - addr = addr[:strings.LastIndex(addr, ":")] - } - - for _, p := range strings.Split(no_proxy, ",") { - p = strings.ToLower(strings.TrimSpace(p)) - if len(p) == 0 { - continue - } - if hasPort(p) { - p = p[:strings.LastIndex(p, ":")] - } - if addr == p || (p[0] == '.' && (strings.HasSuffix(addr, p) || addr == p[1:])) { - return false - } - } - return true -} - -// connectMethod is the map key (in its String form) for keeping persistent -// TCP connections alive for subsequent HTTP requests. -// -// A connect method may be of the following types: -// -// Cache key form Description -// ----------------- ------------------------- -// ||http|foo.com http directly to server, no proxy -// ||https|foo.com https directly to server, no proxy -// http://proxy.com|https|foo.com http to proxy, then CONNECT to foo.com -// http://proxy.com|http http to proxy, http to anywhere after that -// -// Note: no support to https to the proxy yet. -// -type connectMethod struct { - proxyURL *url.URL // nil for no proxy, else full proxy URL - targetScheme string // "http" or "https" - targetAddr string // Not used if proxy + http targetScheme (4th example in table) -} - -func (ck *connectMethod) String() string { - proxyStr := "" - targetAddr := ck.targetAddr - if ck.proxyURL != nil { - proxyStr = ck.proxyURL.String() - if ck.targetScheme == "http" { - targetAddr = "" - } - } - return strings.Join([]string{proxyStr, ck.targetScheme, targetAddr}, "|") -} - -// addr returns the first hop "host:port" to which we need to TCP connect. -func (cm *connectMethod) addr() string { - if cm.proxyURL != nil { - return canonicalAddr(cm.proxyURL) - } - return cm.targetAddr -} - -// tlsHost returns the host name to match against the peer's -// TLS certificate. -func (cm *connectMethod) tlsHost() string { - h := cm.targetAddr - if hasPort(h) { - h = h[:strings.LastIndex(h, ":")] - } - return h -} - -// persistConn wraps a connection, usually a persistent one -// (but may be used for non-keep-alive requests as well) -type persistConn struct { - t *Transport - cacheKey string // its connectMethod.String() - conn net.Conn - br *bufio.Reader // from conn - bw *bufio.Writer // to conn - reqch chan requestAndChan // written by roundTrip(); read by readLoop() - isProxy bool - - // mutateHeaderFunc is an optional func to modify extra - // headers on each outbound request before it's written. (the - // original Request given to RoundTrip is not modified) - mutateHeaderFunc func(Header) - - lk sync.Mutex // guards numExpectedResponses and broken - numExpectedResponses int - broken bool // an error has happened on this connection; marked broken so it's not reused. -} - -func (pc *persistConn) isBroken() bool { - pc.lk.Lock() - defer pc.lk.Unlock() - return pc.broken -} - -var remoteSideClosedFunc func(error) bool // or nil to use default - -func remoteSideClosed(err error) bool { - if err == io.EOF { - return true - } - if remoteSideClosedFunc != nil { - return remoteSideClosedFunc(err) - } - return false -} - -func (pc *persistConn) readLoop() { - alive := true - var lastbody io.ReadCloser // last response body, if any, read on this connection - - for alive { - pb, err := pc.br.Peek(1) - - pc.lk.Lock() - if pc.numExpectedResponses == 0 { - pc.closeLocked() - pc.lk.Unlock() - if len(pb) > 0 { - log.Printf("Unsolicited response received on idle HTTP channel starting with %q; err=%v", - string(pb), err) - } - return - } - pc.lk.Unlock() - - rc := <-pc.reqch - - // Advance past the previous response's body, if the - // caller hasn't done so. - if lastbody != nil { - lastbody.Close() // assumed idempotent - lastbody = nil - } - resp, err := ReadResponse(pc.br, rc.req) - - if err != nil { - pc.close() - } else { - hasBody := rc.req.Method != "HEAD" && resp.ContentLength != 0 - if rc.addedGzip && hasBody && resp.Header.Get("Content-Encoding") == "gzip" { - resp.Header.Del("Content-Encoding") - resp.Header.Del("Content-Length") - resp.ContentLength = -1 - gzReader, zerr := gzip.NewReader(resp.Body) - if zerr != nil { - pc.close() - err = zerr - } else { - resp.Body = &readFirstCloseBoth{&discardOnCloseReadCloser{gzReader}, resp.Body} - } - } - resp.Body = &bodyEOFSignal{body: resp.Body} - } - - if err != nil || resp.Close || rc.req.Close { - alive = false - } - - hasBody := resp != nil && resp.ContentLength != 0 - var waitForBodyRead chan bool - if alive { - if hasBody { - lastbody = resp.Body - waitForBodyRead = make(chan bool) - resp.Body.(*bodyEOFSignal).fn = func() { - if !pc.t.putIdleConn(pc) { - alive = false - } - waitForBodyRead <- true - } - } else { - // When there's no response body, we immediately - // reuse the TCP connection (putIdleConn), but - // we need to prevent ClientConn.Read from - // closing the Response.Body on the next - // loop, otherwise it might close the body - // before the client code has had a chance to - // read it (even though it'll just be 0, EOF). - lastbody = nil - - if !pc.t.putIdleConn(pc) { - alive = false - } - } - } - - rc.ch <- responseAndError{resp, err} - - // Wait for the just-returned response body to be fully consumed - // before we race and peek on the underlying bufio reader. - if waitForBodyRead != nil { - <-waitForBodyRead - } else if !alive { - // If waitForBodyRead is nil, and we're not alive, we - // must close the connection before we leave the loop. - pc.close() - } - } -} - -type responseAndError struct { - res *Response - err error -} - -type requestAndChan struct { - req *Request - ch chan responseAndError - - // did the Transport (as opposed to the client code) add an - // Accept-Encoding gzip header? only if it we set it do - // we transparently decode the gzip. - addedGzip bool -} - -func (pc *persistConn) roundTrip(req *transportRequest) (resp *Response, err error) { - if pc.mutateHeaderFunc != nil { - pc.mutateHeaderFunc(req.extraHeaders()) - } - - // Ask for a compressed version if the caller didn't set their - // own value for Accept-Encoding. We only attempted to - // uncompress the gzip stream if we were the layer that - // requested it. - requestedGzip := false - if !pc.t.DisableCompression && req.Header.Get("Accept-Encoding") == "" { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 - requestedGzip = true - req.extraHeaders().Set("Accept-Encoding", "gzip") - } - - pc.lk.Lock() - pc.numExpectedResponses++ - pc.lk.Unlock() - - err = req.Request.write(pc.bw, pc.isProxy, req.extra) - if err != nil { - pc.close() - return - } - pc.bw.Flush() - - ch := make(chan responseAndError, 1) - pc.reqch <- requestAndChan{req.Request, ch, requestedGzip} - re := <-ch - pc.lk.Lock() - pc.numExpectedResponses-- - pc.lk.Unlock() - - return re.res, re.err -} - -func (pc *persistConn) close() { - pc.lk.Lock() - defer pc.lk.Unlock() - pc.closeLocked() -} - -func (pc *persistConn) closeLocked() { - pc.broken = true - pc.conn.Close() - pc.mutateHeaderFunc = nil -} - -var portMap = map[string]string{ - "http": "80", - "https": "443", -} - -// canonicalAddr returns url.Host but always with a ":port" suffix -func canonicalAddr(url *url.URL) string { - addr := url.Host - if !hasPort(addr) { - return addr + ":" + portMap[url.Scheme] - } - return addr -} - -func responseIsKeepAlive(res *Response) bool { - // TODO: implement. for now just always shutting down the connection. - return false -} - -// bodyEOFSignal wraps a ReadCloser but runs fn (if non-nil) at most -// once, right before the final Read() or Close() call returns, but after -// EOF has been seen. -type bodyEOFSignal struct { - body io.ReadCloser - fn func() - isClosed bool -} - -func (es *bodyEOFSignal) Read(p []byte) (n int, err error) { - n, err = es.body.Read(p) - if es.isClosed && n > 0 { - panic("http: unexpected bodyEOFSignal Read after Close; see issue 1725") - } - if err == io.EOF && es.fn != nil { - es.fn() - es.fn = nil - } - return -} - -func (es *bodyEOFSignal) Close() (err error) { - if es.isClosed { - return nil - } - es.isClosed = true - err = es.body.Close() - if err == nil && es.fn != nil { - es.fn() - es.fn = nil - } - return -} - -type readFirstCloseBoth struct { - io.ReadCloser - io.Closer -} - -func (r *readFirstCloseBoth) Close() error { - if err := r.ReadCloser.Close(); err != nil { - r.Closer.Close() - return err - } - if err := r.Closer.Close(); err != nil { - return err - } - return nil -} - -// discardOnCloseReadCloser consumes all its input on Close. -type discardOnCloseReadCloser struct { - io.ReadCloser -} - -func (d *discardOnCloseReadCloser) Close() error { - io.Copy(ioutil.Discard, d.ReadCloser) // ignore errors; likely invalid or already closed - return d.ReadCloser.Close() -} === removed file 'src/launchpad.net/gwacl/fork/http/triv.go' --- src/launchpad.net/gwacl/fork/http/triv.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/http/triv.go 1970-01-01 00:00:00 +0000 @@ -1,141 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -package main - -import ( - "bytes" - "expvar" - "flag" - "fmt" - "io" - "log" - "net/http" - "os" - "os/exec" - "strconv" - "sync" -) - -// hello world, the web server -var helloRequests = expvar.NewInt("hello-requests") - -func HelloServer(w http.ResponseWriter, req *http.Request) { - helloRequests.Add(1) - io.WriteString(w, "hello, world!\n") -} - -// Simple counter server. POSTing to it will set the value. -type Counter struct { - mu sync.Mutex // protects n - n int -} - -// This makes Counter satisfy the expvar.Var interface, so we can export -// it directly. -func (ctr *Counter) String() string { - ctr.mu.Lock() - defer ctr.mu.Unlock() - return fmt.Sprintf("%d", ctr.n) -} - -func (ctr *Counter) ServeHTTP(w http.ResponseWriter, req *http.Request) { - ctr.mu.Lock() - defer ctr.mu.Unlock() - switch req.Method { - case "GET": - ctr.n++ - case "POST": - buf := new(bytes.Buffer) - io.Copy(buf, req.Body) - body := buf.String() - if n, err := strconv.Atoi(body); err != nil { - fmt.Fprintf(w, "bad POST: %v\nbody: [%v]\n", err, body) - } else { - ctr.n = n - fmt.Fprint(w, "counter reset\n") - } - } - fmt.Fprintf(w, "counter = %d\n", ctr.n) -} - -// simple flag server -var booleanflag = flag.Bool("boolean", true, "another flag for testing") - -func FlagServer(w http.ResponseWriter, req *http.Request) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - fmt.Fprint(w, "Flags:\n") - flag.VisitAll(func(f *flag.Flag) { - if f.Value.String() != f.DefValue { - fmt.Fprintf(w, "%s = %s [default = %s]\n", f.Name, f.Value.String(), f.DefValue) - } else { - fmt.Fprintf(w, "%s = %s\n", f.Name, f.Value.String()) - } - }) -} - -// simple argument server -func ArgServer(w http.ResponseWriter, req *http.Request) { - for _, s := range os.Args { - fmt.Fprint(w, s, " ") - } -} - -// a channel (just for the fun of it) -type Chan chan int - -func ChanCreate() Chan { - c := make(Chan) - go func(c Chan) { - for x := 0; ; x++ { - c <- x - } - }(c) - return c -} - -func (ch Chan) ServeHTTP(w http.ResponseWriter, req *http.Request) { - io.WriteString(w, fmt.Sprintf("channel send #%d\n", <-ch)) -} - -// exec a program, redirecting output -func DateServer(rw http.ResponseWriter, req *http.Request) { - rw.Header().Set("Content-Type", "text/plain; charset=utf-8") - - date, err := exec.Command("/bin/date").Output() - if err != nil { - http.Error(rw, err.Error(), 500) - return - } - rw.Write(date) -} - -func Logger(w http.ResponseWriter, req *http.Request) { - log.Print(req.URL) - http.Error(w, "oops", 404) -} - -var webroot = flag.String("root", os.Getenv("HOME"), "web root directory") - -func main() { - flag.Parse() - - // The counter is published as a variable directly. - ctr := new(Counter) - expvar.Publish("counter", ctr) - http.Handle("/counter", ctr) - http.Handle("/", http.HandlerFunc(Logger)) - http.Handle("/go/", http.StripPrefix("/go/", http.FileServer(http.Dir(*webroot)))) - http.Handle("/chan", ChanCreate()) - http.HandleFunc("/flags", FlagServer) - http.HandleFunc("/args", ArgServer) - http.HandleFunc("/go/hello", HelloServer) - http.HandleFunc("/date", DateServer) - err := http.ListenAndServe(":12345", nil) - if err != nil { - log.Panicln("ListenAndServe:", err) - } -} === removed directory 'src/launchpad.net/gwacl/fork/tls' === removed file 'src/launchpad.net/gwacl/fork/tls/alert.go' --- src/launchpad.net/gwacl/fork/tls/alert.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/tls/alert.go 1970-01-01 00:00:00 +0000 @@ -1,77 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import "strconv" - -type alert uint8 - -const ( - // alert level - alertLevelWarning = 1 - alertLevelError = 2 -) - -const ( - alertCloseNotify alert = 0 - alertUnexpectedMessage alert = 10 - alertBadRecordMAC alert = 20 - alertDecryptionFailed alert = 21 - alertRecordOverflow alert = 22 - alertDecompressionFailure alert = 30 - alertHandshakeFailure alert = 40 - alertBadCertificate alert = 42 - alertUnsupportedCertificate alert = 43 - alertCertificateRevoked alert = 44 - alertCertificateExpired alert = 45 - alertCertificateUnknown alert = 46 - alertIllegalParameter alert = 47 - alertUnknownCA alert = 48 - alertAccessDenied alert = 49 - alertDecodeError alert = 50 - alertDecryptError alert = 51 - alertProtocolVersion alert = 70 - alertInsufficientSecurity alert = 71 - alertInternalError alert = 80 - alertUserCanceled alert = 90 - alertNoRenegotiation alert = 100 -) - -var alertText = map[alert]string{ - alertCloseNotify: "close notify", - alertUnexpectedMessage: "unexpected message", - alertBadRecordMAC: "bad record MAC", - alertDecryptionFailed: "decryption failed", - alertRecordOverflow: "record overflow", - alertDecompressionFailure: "decompression failure", - alertHandshakeFailure: "handshake failure", - alertBadCertificate: "bad certificate", - alertUnsupportedCertificate: "unsupported certificate", - alertCertificateRevoked: "revoked certificate", - alertCertificateExpired: "expired certificate", - alertCertificateUnknown: "unknown certificate", - alertIllegalParameter: "illegal parameter", - alertUnknownCA: "unknown certificate authority", - alertAccessDenied: "access denied", - alertDecodeError: "error decoding message", - alertDecryptError: "error decrypting message", - alertProtocolVersion: "protocol version not supported", - alertInsufficientSecurity: "insufficient security level", - alertInternalError: "internal error", - alertUserCanceled: "user canceled", - alertNoRenegotiation: "no renegotiation", -} - -func (e alert) String() string { - s, ok := alertText[e] - if ok { - return s - } - return "alert(" + strconv.Itoa(int(e)) + ")" -} - -func (e alert) Error() string { - return e.String() -} === removed file 'src/launchpad.net/gwacl/fork/tls/cipher_suites.go' --- src/launchpad.net/gwacl/fork/tls/cipher_suites.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/tls/cipher_suites.go 1970-01-01 00:00:00 +0000 @@ -1,188 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/hmac" - "crypto/rc4" - "crypto/sha1" - "crypto/x509" - "hash" -) - -// a keyAgreement implements the client and server side of a TLS key agreement -// protocol by generating and processing key exchange messages. -type keyAgreement interface { - // On the server side, the first two methods are called in order. - - // In the case that the key agreement protocol doesn't use a - // ServerKeyExchange message, generateServerKeyExchange can return nil, - // nil. - generateServerKeyExchange(*Config, *Certificate, *clientHelloMsg, *serverHelloMsg) (*serverKeyExchangeMsg, error) - processClientKeyExchange(*Config, *Certificate, *clientKeyExchangeMsg, uint16) ([]byte, error) - - // On the client side, the next two methods are called in order. - - // This method may not be called if the server doesn't send a - // ServerKeyExchange message. - processServerKeyExchange(*Config, *clientHelloMsg, *serverHelloMsg, *x509.Certificate, *serverKeyExchangeMsg) error - generateClientKeyExchange(*Config, *clientHelloMsg, *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) -} - -// A cipherSuite is a specific combination of key agreement, cipher and MAC -// function. All cipher suites currently assume RSA key agreement. -type cipherSuite struct { - id uint16 - // the lengths, in bytes, of the key material needed for each component. - keyLen int - macLen int - ivLen int - ka func() keyAgreement - // If elliptic is set, a server will only consider this ciphersuite if - // the ClientHello indicated that the client supports an elliptic curve - // and point format that we can handle. - elliptic bool - cipher func(key, iv []byte, isRead bool) interface{} - mac func(version uint16, macKey []byte) macFunction -} - -var cipherSuites = []*cipherSuite{ - {TLS_RSA_WITH_RC4_128_SHA, 16, 20, 0, rsaKA, false, cipherRC4, macSHA1}, - {TLS_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, rsaKA, false, cipher3DES, macSHA1}, - {TLS_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, rsaKA, false, cipherAES, macSHA1}, - {TLS_ECDHE_RSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheRSAKA, true, cipherRC4, macSHA1}, - {TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, ecdheRSAKA, true, cipher3DES, macSHA1}, - {TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheRSAKA, true, cipherAES, macSHA1}, -} - -func cipherRC4(key, iv []byte, isRead bool) interface{} { - cipher, _ := rc4.NewCipher(key) - return cipher -} - -func cipher3DES(key, iv []byte, isRead bool) interface{} { - block, _ := des.NewTripleDESCipher(key) - if isRead { - return cipher.NewCBCDecrypter(block, iv) - } - return cipher.NewCBCEncrypter(block, iv) -} - -func cipherAES(key, iv []byte, isRead bool) interface{} { - block, _ := aes.NewCipher(key) - if isRead { - return cipher.NewCBCDecrypter(block, iv) - } - return cipher.NewCBCEncrypter(block, iv) -} - -// macSHA1 returns a macFunction for the given protocol version. -func macSHA1(version uint16, key []byte) macFunction { - if version == versionSSL30 { - mac := ssl30MAC{ - h: sha1.New(), - key: make([]byte, len(key)), - } - copy(mac.key, key) - return mac - } - return tls10MAC{hmac.New(sha1.New, key)} -} - -type macFunction interface { - Size() int - MAC(digestBuf, seq, data []byte) []byte -} - -// ssl30MAC implements the SSLv3 MAC function, as defined in -// www.mozilla.org/projects/security/pki/nss/ssl/draft302.txt section 5.2.3.1 -type ssl30MAC struct { - h hash.Hash - key []byte -} - -func (s ssl30MAC) Size() int { - return s.h.Size() -} - -var ssl30Pad1 = [48]byte{0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36} - -var ssl30Pad2 = [48]byte{0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c} - -func (s ssl30MAC) MAC(digestBuf, seq, record []byte) []byte { - padLength := 48 - if s.h.Size() == 20 { - padLength = 40 - } - - s.h.Reset() - s.h.Write(s.key) - s.h.Write(ssl30Pad1[:padLength]) - s.h.Write(seq) - s.h.Write(record[:1]) - s.h.Write(record[3:5]) - s.h.Write(record[recordHeaderLen:]) - digestBuf = s.h.Sum(digestBuf[:0]) - - s.h.Reset() - s.h.Write(s.key) - s.h.Write(ssl30Pad2[:padLength]) - s.h.Write(digestBuf) - return s.h.Sum(digestBuf[:0]) -} - -// tls10MAC implements the TLS 1.0 MAC function. RFC 2246, section 6.2.3. -type tls10MAC struct { - h hash.Hash -} - -func (s tls10MAC) Size() int { - return s.h.Size() -} - -func (s tls10MAC) MAC(digestBuf, seq, record []byte) []byte { - s.h.Reset() - s.h.Write(seq) - s.h.Write(record) - return s.h.Sum(digestBuf[:0]) -} - -func rsaKA() keyAgreement { - return rsaKeyAgreement{} -} - -func ecdheRSAKA() keyAgreement { - return new(ecdheRSAKeyAgreement) -} - -// mutualCipherSuite returns a cipherSuite given a list of supported -// ciphersuites and the id requested by the peer. -func mutualCipherSuite(have []uint16, want uint16) *cipherSuite { - for _, id := range have { - if id == want { - for _, suite := range cipherSuites { - if suite.id == want { - return suite - } - } - return nil - } - } - return nil -} - -// A list of the possible cipher suite ids. Taken from -// http://www.iana.org/assignments/tls-parameters/tls-parameters.xml -const ( - TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 - TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000a - TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002f - TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xc011 - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xc012 - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xc013 -) === removed file 'src/launchpad.net/gwacl/fork/tls/common.go' --- src/launchpad.net/gwacl/fork/tls/common.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/tls/common.go 1970-01-01 00:00:00 +0000 @@ -1,322 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import ( - "crypto" - "crypto/rand" - "crypto/x509" - "io" - "strings" - "sync" - "time" -) - -const ( - maxPlaintext = 16384 // maximum plaintext payload length - maxCiphertext = 16384 + 2048 // maximum ciphertext payload length - recordHeaderLen = 5 // record header length - maxHandshake = 65536 // maximum handshake we support (protocol max is 16 MB) - - versionSSL30 = 0x0300 - versionTLS10 = 0x0301 - - minVersion = versionSSL30 - maxVersion = versionTLS10 -) - -// TLS record types. -type recordType uint8 - -const ( - recordTypeChangeCipherSpec recordType = 20 - recordTypeAlert recordType = 21 - recordTypeHandshake recordType = 22 - recordTypeApplicationData recordType = 23 -) - -// TLS handshake message types. -const ( - typeHelloRequest uint8 = 0 - typeClientHello uint8 = 1 - typeServerHello uint8 = 2 - typeCertificate uint8 = 11 - typeServerKeyExchange uint8 = 12 - typeCertificateRequest uint8 = 13 - typeServerHelloDone uint8 = 14 - typeCertificateVerify uint8 = 15 - typeClientKeyExchange uint8 = 16 - typeFinished uint8 = 20 - typeCertificateStatus uint8 = 22 - typeNextProtocol uint8 = 67 // Not IANA assigned -) - -// TLS compression types. -const ( - compressionNone uint8 = 0 -) - -// TLS extension numbers -var ( - extensionServerName uint16 = 0 - extensionStatusRequest uint16 = 5 - extensionSupportedCurves uint16 = 10 - extensionSupportedPoints uint16 = 11 - extensionNextProtoNeg uint16 = 13172 // not IANA assigned -) - -// TLS Elliptic Curves -// http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-8 -var ( - curveP256 uint16 = 23 - curveP384 uint16 = 24 - curveP521 uint16 = 25 -) - -// TLS Elliptic Curve Point Formats -// http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-9 -var ( - pointFormatUncompressed uint8 = 0 -) - -// TLS CertificateStatusType (RFC 3546) -const ( - statusTypeOCSP uint8 = 1 -) - -// Certificate types (for certificateRequestMsg) -const ( - certTypeRSASign = 1 // A certificate containing an RSA key - certTypeDSSSign = 2 // A certificate containing a DSA key - certTypeRSAFixedDH = 3 // A certificate containing a static DH key - certTypeDSSFixedDH = 4 // A certificate containing a static DH key - // Rest of these are reserved by the TLS spec -) - -// ConnectionState records basic TLS details about the connection. -type ConnectionState struct { - HandshakeComplete bool - CipherSuite uint16 - NegotiatedProtocol string - NegotiatedProtocolIsMutual bool - - // ServerName contains the server name indicated by the client, if any. - // (Only valid for server connections.) - ServerName string - - // the certificate chain that was presented by the other side - PeerCertificates []*x509.Certificate - // the verified certificate chains built from PeerCertificates. - VerifiedChains [][]*x509.Certificate -} - -// ClientAuthType declares the policy the server will follow for -// TLS Client Authentication. -type ClientAuthType int - -const ( - NoClientCert ClientAuthType = iota - RequestClientCert - RequireAnyClientCert - VerifyClientCertIfGiven - RequireAndVerifyClientCert -) - -// A Config structure is used to configure a TLS client or server. After one -// has been passed to a TLS function it must not be modified. -type Config struct { - // Rand provides the source of entropy for nonces and RSA blinding. - // If Rand is nil, TLS uses the cryptographic random reader in package - // crypto/rand. - Rand io.Reader - - // Time returns the current time as the number of seconds since the epoch. - // If Time is nil, TLS uses time.Now. - Time func() time.Time - - // Certificates contains one or more certificate chains - // to present to the other side of the connection. - // Server configurations must include at least one certificate. - Certificates []Certificate - - // NameToCertificate maps from a certificate name to an element of - // Certificates. Note that a certificate name can be of the form - // '*.example.com' and so doesn't have to be a domain name as such. - // See Config.BuildNameToCertificate - // The nil value causes the first element of Certificates to be used - // for all connections. - NameToCertificate map[string]*Certificate - - // RootCAs defines the set of root certificate authorities - // that clients use when verifying server certificates. - // If RootCAs is nil, TLS uses the host's root CA set. - RootCAs *x509.CertPool - - // NextProtos is a list of supported, application level protocols. - NextProtos []string - - // ServerName is included in the client's handshake to support virtual - // hosting. - ServerName string - - // ClientAuth determines the server's policy for - // TLS Client Authentication. The default is NoClientCert. - ClientAuth ClientAuthType - - // ClientCAs defines the set of root certificate authorities - // that servers use if required to verify a client certificate - // by the policy in ClientAuth. - ClientCAs *x509.CertPool - - // InsecureSkipVerify controls whether a client verifies the - // server's certificate chain and host name. - // If InsecureSkipVerify is true, TLS accepts any certificate - // presented by the server and any host name in that certificate. - // In this mode, TLS is susceptible to man-in-the-middle attacks. - // This should be used only for testing. - InsecureSkipVerify bool - - // CipherSuites is a list of supported cipher suites. If CipherSuites - // is nil, TLS uses a list of suites supported by the implementation. - CipherSuites []uint16 -} - -func (c *Config) rand() io.Reader { - r := c.Rand - if r == nil { - return rand.Reader - } - return r -} - -func (c *Config) time() time.Time { - t := c.Time - if t == nil { - t = time.Now - } - return t() -} - -func (c *Config) cipherSuites() []uint16 { - s := c.CipherSuites - if s == nil { - s = defaultCipherSuites() - } - return s -} - -// getCertificateForName returns the best certificate for the given name, -// defaulting to the first element of c.Certificates if there are no good -// options. -func (c *Config) getCertificateForName(name string) *Certificate { - if len(c.Certificates) == 1 || c.NameToCertificate == nil { - // There's only one choice, so no point doing any work. - return &c.Certificates[0] - } - - name = strings.ToLower(name) - for len(name) > 0 && name[len(name)-1] == '.' { - name = name[:len(name)-1] - } - - if cert, ok := c.NameToCertificate[name]; ok { - return cert - } - - // try replacing labels in the name with wildcards until we get a - // match. - labels := strings.Split(name, ".") - for i := range labels { - labels[i] = "*" - candidate := strings.Join(labels, ".") - if cert, ok := c.NameToCertificate[candidate]; ok { - return cert - } - } - - // If nothing matches, return the first certificate. - return &c.Certificates[0] -} - -// BuildNameToCertificate parses c.Certificates and builds c.NameToCertificate -// from the CommonName and SubjectAlternateName fields of each of the leaf -// certificates. -func (c *Config) BuildNameToCertificate() { - c.NameToCertificate = make(map[string]*Certificate) - for i := range c.Certificates { - cert := &c.Certificates[i] - x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - continue - } - if len(x509Cert.Subject.CommonName) > 0 { - c.NameToCertificate[x509Cert.Subject.CommonName] = cert - } - for _, san := range x509Cert.DNSNames { - c.NameToCertificate[san] = cert - } - } -} - -// A Certificate is a chain of one or more certificates, leaf first. -type Certificate struct { - Certificate [][]byte - PrivateKey crypto.PrivateKey // supported types: *rsa.PrivateKey - // OCSPStaple contains an optional OCSP response which will be served - // to clients that request it. - OCSPStaple []byte - // Leaf is the parsed form of the leaf certificate, which may be - // initialized using x509.ParseCertificate to reduce per-handshake - // processing for TLS clients doing client authentication. If nil, the - // leaf certificate will be parsed as needed. - Leaf *x509.Certificate -} - -// A TLS record. -type record struct { - contentType recordType - major, minor uint8 - payload []byte -} - -type handshakeMessage interface { - marshal() []byte - unmarshal([]byte) bool -} - -// mutualVersion returns the protocol version to use given the advertised -// version of the peer. -func mutualVersion(vers uint16) (uint16, bool) { - if vers < minVersion { - return 0, false - } - if vers > maxVersion { - vers = maxVersion - } - return vers, true -} - -var emptyConfig Config - -func defaultConfig() *Config { - return &emptyConfig -} - -var ( - once sync.Once - varDefaultCipherSuites []uint16 -) - -func defaultCipherSuites() []uint16 { - once.Do(initDefaultCipherSuites) - return varDefaultCipherSuites -} - -func initDefaultCipherSuites() { - varDefaultCipherSuites = make([]uint16, len(cipherSuites)) - for i, suite := range cipherSuites { - varDefaultCipherSuites[i] = suite.id - } -} === removed file 'src/launchpad.net/gwacl/fork/tls/conn.go' --- src/launchpad.net/gwacl/fork/tls/conn.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/tls/conn.go 1970-01-01 00:00:00 +0000 @@ -1,886 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TLS low level connection and record layer - -package tls - -import ( - "bytes" - "crypto/cipher" - "crypto/subtle" - "crypto/x509" - "errors" - "io" - "net" - "sync" - "time" -) - -// A Conn represents a secured connection. -// It implements the net.Conn interface. -type Conn struct { - // constant - conn net.Conn - isClient bool - - // constant after handshake; protected by handshakeMutex - handshakeMutex sync.Mutex // handshakeMutex < in.Mutex, out.Mutex, errMutex - vers uint16 // TLS version - haveVers bool // version has been negotiated - config *Config // configuration passed to constructor - handshakeComplete bool - cipherSuite uint16 - ocspResponse []byte // stapled OCSP response - peerCertificates []*x509.Certificate - // verifiedChains contains the certificate chains that we built, as - // opposed to the ones presented by the server. - verifiedChains [][]*x509.Certificate - // serverName contains the server name indicated by the client, if any. - serverName string - - clientProtocol string - clientProtocolFallback bool - - // first permanent error - errMutex sync.Mutex - err error - - // input/output - in, out halfConn // in.Mutex < out.Mutex - rawInput *block // raw input, right off the wire - input *block // application data waiting to be read - hand bytes.Buffer // handshake data waiting to be read - - tmp [16]byte -} - -func (c *Conn) setError(err error) error { - c.errMutex.Lock() - defer c.errMutex.Unlock() - - if c.err == nil { - c.err = err - } - return err -} - -func (c *Conn) error() error { - c.errMutex.Lock() - defer c.errMutex.Unlock() - - return c.err -} - -// Access to net.Conn methods. -// Cannot just embed net.Conn because that would -// export the struct field too. - -// LocalAddr returns the local network address. -func (c *Conn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -// RemoteAddr returns the remote network address. -func (c *Conn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -// SetDeadline sets the read and write deadlines associated with the connection. -// A zero value for t means Read and Write will not time out. -// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error. -func (c *Conn) SetDeadline(t time.Time) error { - return c.conn.SetDeadline(t) -} - -// SetReadDeadline sets the read deadline on the underlying connection. -// A zero value for t means Read will not time out. -func (c *Conn) SetReadDeadline(t time.Time) error { - return c.conn.SetReadDeadline(t) -} - -// SetWriteDeadline sets the write deadline on the underlying conneciton. -// A zero value for t means Write will not time out. -// After a Write has timed out, the TLS state is corrupt and all future writes will return the same error. -func (c *Conn) SetWriteDeadline(t time.Time) error { - return c.conn.SetWriteDeadline(t) -} - -// A halfConn represents one direction of the record layer -// connection, either sending or receiving. -type halfConn struct { - sync.Mutex - version uint16 // protocol version - cipher interface{} // cipher algorithm - mac macFunction - seq [8]byte // 64-bit sequence number - bfree *block // list of free blocks - - nextCipher interface{} // next encryption state - nextMac macFunction // next MAC algorithm - - // used to save allocating a new buffer for each MAC. - inDigestBuf, outDigestBuf []byte -} - -// prepareCipherSpec sets the encryption and MAC states -// that a subsequent changeCipherSpec will use. -func (hc *halfConn) prepareCipherSpec(version uint16, cipher interface{}, mac macFunction) { - hc.version = version - hc.nextCipher = cipher - hc.nextMac = mac -} - -// changeCipherSpec changes the encryption and MAC states -// to the ones previously passed to prepareCipherSpec. -func (hc *halfConn) changeCipherSpec() error { - if hc.nextCipher == nil { - return alertInternalError - } - hc.cipher = hc.nextCipher - hc.mac = hc.nextMac - hc.nextCipher = nil - hc.nextMac = nil - for i := range hc.seq { - hc.seq[i] = 0 - } - return nil -} - -// incSeq increments the sequence number. -func (hc *halfConn) incSeq() { - for i := 7; i >= 0; i-- { - hc.seq[i]++ - if hc.seq[i] != 0 { - return - } - } - - // Not allowed to let sequence number wrap. - // Instead, must renegotiate before it does. - // Not likely enough to bother. - panic("TLS: sequence number wraparound") -} - -// resetSeq resets the sequence number to zero. -func (hc *halfConn) resetSeq() { - for i := range hc.seq { - hc.seq[i] = 0 - } -} - -// removePadding returns an unpadded slice, in constant time, which is a prefix -// of the input. It also returns a byte which is equal to 255 if the padding -// was valid and 0 otherwise. See RFC 2246, section 6.2.3.2 -func removePadding(payload []byte) ([]byte, byte) { - if len(payload) < 1 { - return payload, 0 - } - - paddingLen := payload[len(payload)-1] - t := uint(len(payload)-1) - uint(paddingLen) - // if len(payload) >= (paddingLen - 1) then the MSB of t is zero - good := byte(int32(^t) >> 31) - - toCheck := 255 // the maximum possible padding length - // The length of the padded data is public, so we can use an if here - if toCheck+1 > len(payload) { - toCheck = len(payload) - 1 - } - - for i := 0; i < toCheck; i++ { - t := uint(paddingLen) - uint(i) - // if i <= paddingLen then the MSB of t is zero - mask := byte(int32(^t) >> 31) - b := payload[len(payload)-1-i] - good &^= mask&paddingLen ^ mask&b - } - - // We AND together the bits of good and replicate the result across - // all the bits. - good &= good << 4 - good &= good << 2 - good &= good << 1 - good = uint8(int8(good) >> 7) - - toRemove := good&paddingLen + 1 - return payload[:len(payload)-int(toRemove)], good -} - -// removePaddingSSL30 is a replacement for removePadding in the case that the -// protocol version is SSLv3. In this version, the contents of the padding -// are random and cannot be checked. -func removePaddingSSL30(payload []byte) ([]byte, byte) { - if len(payload) < 1 { - return payload, 0 - } - - paddingLen := int(payload[len(payload)-1]) + 1 - if paddingLen > len(payload) { - return payload, 0 - } - - return payload[:len(payload)-paddingLen], 255 -} - -func roundUp(a, b int) int { - return a + (b-a%b)%b -} - -// decrypt checks and strips the mac and decrypts the data in b. -func (hc *halfConn) decrypt(b *block) (bool, alert) { - // pull out payload - payload := b.data[recordHeaderLen:] - - macSize := 0 - if hc.mac != nil { - macSize = hc.mac.Size() - } - - paddingGood := byte(255) - - // decrypt - if hc.cipher != nil { - switch c := hc.cipher.(type) { - case cipher.Stream: - c.XORKeyStream(payload, payload) - case cipher.BlockMode: - blockSize := c.BlockSize() - - if len(payload)%blockSize != 0 || len(payload) < roundUp(macSize+1, blockSize) { - return false, alertBadRecordMAC - } - - c.CryptBlocks(payload, payload) - if hc.version == versionSSL30 { - payload, paddingGood = removePaddingSSL30(payload) - } else { - payload, paddingGood = removePadding(payload) - } - b.resize(recordHeaderLen + len(payload)) - - // note that we still have a timing side-channel in the - // MAC check, below. An attacker can align the record - // so that a correct padding will cause one less hash - // block to be calculated. Then they can iteratively - // decrypt a record by breaking each byte. See - // "Password Interception in a SSL/TLS Channel", Brice - // Canvel et al. - // - // However, our behavior matches OpenSSL, so we leak - // only as much as they do. - default: - panic("unknown cipher type") - } - } - - // check, strip mac - if hc.mac != nil { - if len(payload) < macSize { - return false, alertBadRecordMAC - } - - // strip mac off payload, b.data - n := len(payload) - macSize - b.data[3] = byte(n >> 8) - b.data[4] = byte(n) - b.resize(recordHeaderLen + n) - remoteMAC := payload[n:] - localMAC := hc.mac.MAC(hc.inDigestBuf, hc.seq[0:], b.data) - hc.incSeq() - - if subtle.ConstantTimeCompare(localMAC, remoteMAC) != 1 || paddingGood != 255 { - return false, alertBadRecordMAC - } - hc.inDigestBuf = localMAC - } - - return true, 0 -} - -// padToBlockSize calculates the needed padding block, if any, for a payload. -// On exit, prefix aliases payload and extends to the end of the last full -// block of payload. finalBlock is a fresh slice which contains the contents of -// any suffix of payload as well as the needed padding to make finalBlock a -// full block. -func padToBlockSize(payload []byte, blockSize int) (prefix, finalBlock []byte) { - overrun := len(payload) % blockSize - paddingLen := blockSize - overrun - prefix = payload[:len(payload)-overrun] - finalBlock = make([]byte, blockSize) - copy(finalBlock, payload[len(payload)-overrun:]) - for i := overrun; i < blockSize; i++ { - finalBlock[i] = byte(paddingLen - 1) - } - return -} - -// encrypt encrypts and macs the data in b. -func (hc *halfConn) encrypt(b *block) (bool, alert) { - // mac - if hc.mac != nil { - mac := hc.mac.MAC(hc.outDigestBuf, hc.seq[0:], b.data) - hc.incSeq() - - n := len(b.data) - b.resize(n + len(mac)) - copy(b.data[n:], mac) - hc.outDigestBuf = mac - } - - payload := b.data[recordHeaderLen:] - - // encrypt - if hc.cipher != nil { - switch c := hc.cipher.(type) { - case cipher.Stream: - c.XORKeyStream(payload, payload) - case cipher.BlockMode: - prefix, finalBlock := padToBlockSize(payload, c.BlockSize()) - b.resize(recordHeaderLen + len(prefix) + len(finalBlock)) - c.CryptBlocks(b.data[recordHeaderLen:], prefix) - c.CryptBlocks(b.data[recordHeaderLen+len(prefix):], finalBlock) - default: - panic("unknown cipher type") - } - } - - // update length to include MAC and any block padding needed. - n := len(b.data) - recordHeaderLen - b.data[3] = byte(n >> 8) - b.data[4] = byte(n) - - return true, 0 -} - -// A block is a simple data buffer. -type block struct { - data []byte - off int // index for Read - link *block -} - -// resize resizes block to be n bytes, growing if necessary. -func (b *block) resize(n int) { - if n > cap(b.data) { - b.reserve(n) - } - b.data = b.data[0:n] -} - -// reserve makes sure that block contains a capacity of at least n bytes. -func (b *block) reserve(n int) { - if cap(b.data) >= n { - return - } - m := cap(b.data) - if m == 0 { - m = 1024 - } - for m < n { - m *= 2 - } - data := make([]byte, len(b.data), m) - copy(data, b.data) - b.data = data -} - -// readFromUntil reads from r into b until b contains at least n bytes -// or else returns an error. -func (b *block) readFromUntil(r io.Reader, n int) error { - // quick case - if len(b.data) >= n { - return nil - } - - // read until have enough. - b.reserve(n) - for { - m, err := r.Read(b.data[len(b.data):cap(b.data)]) - b.data = b.data[0 : len(b.data)+m] - if len(b.data) >= n { - break - } - if err != nil { - return err - } - } - return nil -} - -func (b *block) Read(p []byte) (n int, err error) { - n = copy(p, b.data[b.off:]) - b.off += n - return -} - -// newBlock allocates a new block, from hc's free list if possible. -func (hc *halfConn) newBlock() *block { - b := hc.bfree - if b == nil { - return new(block) - } - hc.bfree = b.link - b.link = nil - b.resize(0) - return b -} - -// freeBlock returns a block to hc's free list. -// The protocol is such that each side only has a block or two on -// its free list at a time, so there's no need to worry about -// trimming the list, etc. -func (hc *halfConn) freeBlock(b *block) { - b.link = hc.bfree - hc.bfree = b -} - -// splitBlock splits a block after the first n bytes, -// returning a block with those n bytes and a -// block with the remainder. the latter may be nil. -func (hc *halfConn) splitBlock(b *block, n int) (*block, *block) { - if len(b.data) <= n { - return b, nil - } - bb := hc.newBlock() - bb.resize(len(b.data) - n) - copy(bb.data, b.data[n:]) - b.data = b.data[0:n] - return b, bb -} - -// readRecord reads the next TLS record from the connection -// and updates the record layer state. -// c.in.Mutex <= L; c.input == nil. -func (c *Conn) readRecord(want recordType) error { - // Caller must be in sync with connection: - // handshake data if handshake not yet completed, - // else application data. - switch want { - default: - return c.sendAlert(alertInternalError) - case recordTypeHandshake, recordTypeChangeCipherSpec: - if c.handshakeComplete { - return c.sendAlert(alertInternalError) - } - case recordTypeApplicationData: - if !c.handshakeComplete { - return c.sendAlert(alertInternalError) - } - } - -Again: - if c.rawInput == nil { - c.rawInput = c.in.newBlock() - } - b := c.rawInput - - // Read header, payload. - if err := b.readFromUntil(c.conn, recordHeaderLen); err != nil { - // RFC suggests that EOF without an alertCloseNotify is - // an error, but popular web sites seem to do this, - // so we can't make it an error. - // if err == io.EOF { - // err = io.ErrUnexpectedEOF - // } - if e, ok := err.(net.Error); !ok || !e.Temporary() { - c.setError(err) - } - return err - } - typ := recordType(b.data[0]) - vers := uint16(b.data[1])<<8 | uint16(b.data[2]) - n := int(b.data[3])<<8 | int(b.data[4]) - if c.haveVers && vers != c.vers { - return c.sendAlert(alertProtocolVersion) - } - if n > maxCiphertext { - return c.sendAlert(alertRecordOverflow) - } - if !c.haveVers { - // First message, be extra suspicious: - // this might not be a TLS client. - // Bail out before reading a full 'body', if possible. - // The current max version is 3.1. - // If the version is >= 16.0, it's probably not real. - // Similarly, a clientHello message encodes in - // well under a kilobyte. If the length is >= 12 kB, - // it's probably not real. - if (typ != recordTypeAlert && typ != want) || vers >= 0x1000 || n >= 0x3000 { - return c.sendAlert(alertUnexpectedMessage) - } - } - if err := b.readFromUntil(c.conn, recordHeaderLen+n); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - if e, ok := err.(net.Error); !ok || !e.Temporary() { - c.setError(err) - } - return err - } - - // Process message. - b, c.rawInput = c.in.splitBlock(b, recordHeaderLen+n) - b.off = recordHeaderLen - if ok, err := c.in.decrypt(b); !ok { - return c.sendAlert(err) - } - data := b.data[b.off:] - if len(data) > maxPlaintext { - c.sendAlert(alertRecordOverflow) - c.in.freeBlock(b) - return c.error() - } - - switch typ { - default: - c.sendAlert(alertUnexpectedMessage) - - case recordTypeAlert: - if len(data) != 2 { - c.sendAlert(alertUnexpectedMessage) - break - } - if alert(data[1]) == alertCloseNotify { - c.setError(io.EOF) - break - } - switch data[0] { - case alertLevelWarning: - // drop on the floor - c.in.freeBlock(b) - goto Again - case alertLevelError: - c.setError(&net.OpError{Op: "remote error", Err: alert(data[1])}) - default: - c.sendAlert(alertUnexpectedMessage) - } - - case recordTypeChangeCipherSpec: - if typ != want || len(data) != 1 || data[0] != 1 { - c.sendAlert(alertUnexpectedMessage) - break - } - err := c.in.changeCipherSpec() - if err != nil { - c.sendAlert(err.(alert)) - } - - case recordTypeApplicationData: - if typ != want { - c.sendAlert(alertUnexpectedMessage) - break - } - c.input = b - b = nil - - case recordTypeHandshake: - // TODO(rsc): Should at least pick off connection close. - if typ != want && !c.isClient { - return c.sendAlert(alertNoRenegotiation) - } - c.hand.Write(data) - } - - if b != nil { - c.in.freeBlock(b) - } - return c.error() -} - -// sendAlert sends a TLS alert message. -// c.out.Mutex <= L. -func (c *Conn) sendAlertLocked(err alert) error { - c.tmp[0] = alertLevelError - if err == alertNoRenegotiation { - c.tmp[0] = alertLevelWarning - } - c.tmp[1] = byte(err) - c.writeRecord(recordTypeAlert, c.tmp[0:2]) - // closeNotify is a special case in that it isn't an error: - if err != alertCloseNotify { - return c.setError(&net.OpError{Op: "local error", Err: err}) - } - return nil -} - -// sendAlert sends a TLS alert message. -// L < c.out.Mutex. -func (c *Conn) sendAlert(err alert) error { - c.out.Lock() - defer c.out.Unlock() - return c.sendAlertLocked(err) -} - -// writeRecord writes a TLS record with the given type and payload -// to the connection and updates the record layer state. -// c.out.Mutex <= L. -func (c *Conn) writeRecord(typ recordType, data []byte) (n int, err error) { - b := c.out.newBlock() - for len(data) > 0 { - m := len(data) - if m > maxPlaintext { - m = maxPlaintext - } - b.resize(recordHeaderLen + m) - b.data[0] = byte(typ) - vers := c.vers - if vers == 0 { - vers = maxVersion - } - b.data[1] = byte(vers >> 8) - b.data[2] = byte(vers) - b.data[3] = byte(m >> 8) - b.data[4] = byte(m) - copy(b.data[recordHeaderLen:], data) - c.out.encrypt(b) - _, err = c.conn.Write(b.data) - if err != nil { - break - } - n += m - data = data[m:] - } - c.out.freeBlock(b) - - if typ == recordTypeChangeCipherSpec { - err = c.out.changeCipherSpec() - if err != nil { - // Cannot call sendAlert directly, - // because we already hold c.out.Mutex. - c.tmp[0] = alertLevelError - c.tmp[1] = byte(err.(alert)) - c.writeRecord(recordTypeAlert, c.tmp[0:2]) - c.err = &net.OpError{Op: "local error", Err: err} - return n, c.err - } - } - return -} - -// readHandshake reads the next handshake message from -// the record layer. -// c.in.Mutex < L; c.out.Mutex < L. -func (c *Conn) readHandshake() (interface{}, error) { - for c.hand.Len() < 4 { - if c.err != nil { - return nil, c.err - } - if err := c.readRecord(recordTypeHandshake); err != nil { - return nil, err - } - } - - data := c.hand.Bytes() - n := int(data[1])<<16 | int(data[2])<<8 | int(data[3]) - if n > maxHandshake { - c.sendAlert(alertInternalError) - return nil, c.err - } - for c.hand.Len() < 4+n { - if c.err != nil { - return nil, c.err - } - if err := c.readRecord(recordTypeHandshake); err != nil { - return nil, err - } - } - data = c.hand.Next(4 + n) - var m handshakeMessage - switch data[0] { - case typeHelloRequest: - m = new(helloRequestMsg) - case typeClientHello: - m = new(clientHelloMsg) - case typeServerHello: - m = new(serverHelloMsg) - case typeCertificate: - m = new(certificateMsg) - case typeCertificateRequest: - m = new(certificateRequestMsg) - case typeCertificateStatus: - m = new(certificateStatusMsg) - case typeServerKeyExchange: - m = new(serverKeyExchangeMsg) - case typeServerHelloDone: - m = new(serverHelloDoneMsg) - case typeClientKeyExchange: - m = new(clientKeyExchangeMsg) - case typeCertificateVerify: - m = new(certificateVerifyMsg) - case typeNextProtocol: - m = new(nextProtoMsg) - case typeFinished: - m = new(finishedMsg) - default: - c.sendAlert(alertUnexpectedMessage) - return nil, alertUnexpectedMessage - } - - // The handshake message unmarshallers - // expect to be able to keep references to data, - // so pass in a fresh copy that won't be overwritten. - data = append([]byte(nil), data...) - - if !m.unmarshal(data) { - c.sendAlert(alertUnexpectedMessage) - return nil, alertUnexpectedMessage - } - return m, nil -} - -// Write writes data to the connection. -func (c *Conn) Write(b []byte) (int, error) { - if c.err != nil { - return 0, c.err - } - - if c.err = c.Handshake(); c.err != nil { - return 0, c.err - } - - c.out.Lock() - defer c.out.Unlock() - - if !c.handshakeComplete { - return 0, alertInternalError - } - - var n int - n, c.err = c.writeRecord(recordTypeApplicationData, b) - return n, c.err -} - -func (c *Conn) handleRenegotiation() error { - c.handshakeComplete = false - if !c.isClient { - panic("renegotiation should only happen for a client") - } - - msg, err := c.readHandshake() - if err != nil { - return err - } - _, ok := msg.(*helloRequestMsg) - if !ok { - c.sendAlert(alertUnexpectedMessage) - return alertUnexpectedMessage - } - - return c.Handshake() -} - -// Read can be made to time out and return a net.Error with Timeout() == true -// after a fixed time limit; see SetDeadline and SetReadDeadline. -func (c *Conn) Read(b []byte) (n int, err error) { - if err = c.Handshake(); err != nil { - return - } - - c.in.Lock() - defer c.in.Unlock() - - for c.input == nil && c.err == nil { - if err := c.readRecord(recordTypeApplicationData); err != nil { - // Soft error, like EAGAIN - return 0, err - } - if c.hand.Len() > 0 { - // We received handshake bytes, indicating the start of - // a renegotiation. - if err := c.handleRenegotiation(); err != nil { - return 0, err - } - continue - } - } - if c.err != nil { - return 0, c.err - } - n, err = c.input.Read(b) - if c.input.off >= len(c.input.data) { - c.in.freeBlock(c.input) - c.input = nil - } - return n, nil -} - -// Close closes the connection. -func (c *Conn) Close() error { - var alertErr error - - c.handshakeMutex.Lock() - defer c.handshakeMutex.Unlock() - if c.handshakeComplete { - alertErr = c.sendAlert(alertCloseNotify) - } - - if err := c.conn.Close(); err != nil { - return err - } - return alertErr -} - -// Handshake runs the client or server handshake -// protocol if it has not yet been run. -// Most uses of this package need not call Handshake -// explicitly: the first Read or Write will call it automatically. -func (c *Conn) Handshake() error { - c.handshakeMutex.Lock() - defer c.handshakeMutex.Unlock() - if err := c.error(); err != nil { - return err - } - if c.handshakeComplete { - return nil - } - if c.isClient { - return c.clientHandshake() - } - return c.serverHandshake() -} - -// ConnectionState returns basic TLS details about the connection. -func (c *Conn) ConnectionState() ConnectionState { - c.handshakeMutex.Lock() - defer c.handshakeMutex.Unlock() - - var state ConnectionState - state.HandshakeComplete = c.handshakeComplete - if c.handshakeComplete { - state.NegotiatedProtocol = c.clientProtocol - state.NegotiatedProtocolIsMutual = !c.clientProtocolFallback - state.CipherSuite = c.cipherSuite - state.PeerCertificates = c.peerCertificates - state.VerifiedChains = c.verifiedChains - state.ServerName = c.serverName - } - - return state -} - -// OCSPResponse returns the stapled OCSP response from the TLS server, if -// any. (Only valid for client connections.) -func (c *Conn) OCSPResponse() []byte { - c.handshakeMutex.Lock() - defer c.handshakeMutex.Unlock() - - return c.ocspResponse -} - -// VerifyHostname checks that the peer certificate chain is valid for -// connecting to host. If so, it returns nil; if not, it returns an error -// describing the problem. -func (c *Conn) VerifyHostname(host string) error { - c.handshakeMutex.Lock() - defer c.handshakeMutex.Unlock() - if !c.isClient { - return errors.New("VerifyHostname called on TLS server connection") - } - if !c.handshakeComplete { - return errors.New("TLS handshake has not yet been performed") - } - return c.peerCertificates[0].VerifyHostname(host) -} === removed file 'src/launchpad.net/gwacl/fork/tls/generate_cert.go' --- src/launchpad.net/gwacl/fork/tls/generate_cert.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/tls/generate_cert.go 1970-01-01 00:00:00 +0000 @@ -1,74 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Generate a self-signed X.509 certificate for a TLS server. Outputs to -// 'cert.pem' and 'key.pem' and will overwrite existing files. - -package main - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "flag" - "log" - "math/big" - "os" - "time" -) - -var hostName *string = flag.String("host", "127.0.0.1", "Hostname to generate a certificate for") - -func main() { - flag.Parse() - - priv, err := rsa.GenerateKey(rand.Reader, 1024) - if err != nil { - log.Fatalf("failed to generate private key: %s", err) - return - } - - now := time.Now() - - template := x509.Certificate{ - SerialNumber: new(big.Int).SetInt64(0), - Subject: pkix.Name{ - CommonName: *hostName, - Organization: []string{"Acme Co"}, - }, - NotBefore: now.Add(-5 * time.Minute).UTC(), - NotAfter: now.AddDate(1, 0, 0).UTC(), // valid for 1 year. - - SubjectKeyId: []byte{1, 2, 3, 4}, - KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - } - - derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) - if err != nil { - log.Fatalf("Failed to create certificate: %s", err) - return - } - - certOut, err := os.Create("cert.pem") - if err != nil { - log.Fatalf("failed to open cert.pem for writing: %s", err) - return - } - pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - certOut.Close() - log.Print("written cert.pem\n") - - keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - log.Print("failed to open key.pem for writing:", err) - return - } - pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}) - keyOut.Close() - log.Print("written key.pem\n") -} === removed file 'src/launchpad.net/gwacl/fork/tls/handshake_client.go' --- src/launchpad.net/gwacl/fork/tls/handshake_client.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/tls/handshake_client.go 1970-01-01 00:00:00 +0000 @@ -1,347 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import ( - "bytes" - "crypto" - "crypto/rsa" - "crypto/subtle" - "crypto/x509" - "errors" - "io" - "strconv" -) - -func (c *Conn) clientHandshake() error { - finishedHash := newFinishedHash(versionTLS10) - - if c.config == nil { - c.config = defaultConfig() - } - - hello := &clientHelloMsg{ - vers: maxVersion, - cipherSuites: c.config.cipherSuites(), - compressionMethods: []uint8{compressionNone}, - random: make([]byte, 32), - ocspStapling: true, - serverName: c.config.ServerName, - supportedCurves: []uint16{curveP256, curveP384, curveP521}, - supportedPoints: []uint8{pointFormatUncompressed}, - nextProtoNeg: len(c.config.NextProtos) > 0, - } - - t := uint32(c.config.time().Unix()) - hello.random[0] = byte(t >> 24) - hello.random[1] = byte(t >> 16) - hello.random[2] = byte(t >> 8) - hello.random[3] = byte(t) - _, err := io.ReadFull(c.config.rand(), hello.random[4:]) - if err != nil { - c.sendAlert(alertInternalError) - return errors.New("short read from Rand") - } - - finishedHash.Write(hello.marshal()) - c.writeRecord(recordTypeHandshake, hello.marshal()) - - msg, err := c.readHandshake() - if err != nil { - return err - } - serverHello, ok := msg.(*serverHelloMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - finishedHash.Write(serverHello.marshal()) - - vers, ok := mutualVersion(serverHello.vers) - if !ok || vers < versionTLS10 { - // TLS 1.0 is the minimum version supported as a client. - return c.sendAlert(alertProtocolVersion) - } - c.vers = vers - c.haveVers = true - - if serverHello.compressionMethod != compressionNone { - return c.sendAlert(alertUnexpectedMessage) - } - - if !hello.nextProtoNeg && serverHello.nextProtoNeg { - c.sendAlert(alertHandshakeFailure) - return errors.New("server advertised unrequested NPN") - } - - suite := mutualCipherSuite(c.config.cipherSuites(), serverHello.cipherSuite) - if suite == nil { - return c.sendAlert(alertHandshakeFailure) - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - certMsg, ok := msg.(*certificateMsg) - if !ok || len(certMsg.certificates) == 0 { - return c.sendAlert(alertUnexpectedMessage) - } - finishedHash.Write(certMsg.marshal()) - - certs := make([]*x509.Certificate, len(certMsg.certificates)) - for i, asn1Data := range certMsg.certificates { - cert, err := x509.ParseCertificate(asn1Data) - if err != nil { - c.sendAlert(alertBadCertificate) - return errors.New("failed to parse certificate from server: " + err.Error()) - } - certs[i] = cert - } - - if !c.config.InsecureSkipVerify { - opts := x509.VerifyOptions{ - Roots: c.config.RootCAs, - CurrentTime: c.config.time(), - DNSName: c.config.ServerName, - Intermediates: x509.NewCertPool(), - } - - for i, cert := range certs { - if i == 0 { - continue - } - opts.Intermediates.AddCert(cert) - } - c.verifiedChains, err = certs[0].Verify(opts) - if err != nil { - c.sendAlert(alertBadCertificate) - return err - } - } - - if _, ok := certs[0].PublicKey.(*rsa.PublicKey); !ok { - return c.sendAlert(alertUnsupportedCertificate) - } - - c.peerCertificates = certs - - if serverHello.ocspStapling { - msg, err = c.readHandshake() - if err != nil { - return err - } - cs, ok := msg.(*certificateStatusMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - finishedHash.Write(cs.marshal()) - - if cs.statusType == statusTypeOCSP { - c.ocspResponse = cs.response - } - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - - keyAgreement := suite.ka() - - skx, ok := msg.(*serverKeyExchangeMsg) - if ok { - finishedHash.Write(skx.marshal()) - err = keyAgreement.processServerKeyExchange(c.config, hello, serverHello, certs[0], skx) - if err != nil { - c.sendAlert(alertUnexpectedMessage) - return err - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - } - - var certToSend *Certificate - var certRequested bool - certReq, ok := msg.(*certificateRequestMsg) - if ok { - certRequested = true - - // RFC 4346 on the certificateAuthorities field: - // A list of the distinguished names of acceptable certificate - // authorities. These distinguished names may specify a desired - // distinguished name for a root CA or for a subordinate CA; - // thus, this message can be used to describe both known roots - // and a desired authorization space. If the - // certificate_authorities list is empty then the client MAY - // send any certificate of the appropriate - // ClientCertificateType, unless there is some external - // arrangement to the contrary. - - finishedHash.Write(certReq.marshal()) - - // For now, we only know how to sign challenges with RSA - rsaAvail := false - for _, certType := range certReq.certificateTypes { - if certType == certTypeRSASign { - rsaAvail = true - break - } - } - - // We need to search our list of client certs for one - // where SignatureAlgorithm is RSA and the Issuer is in - // certReq.certificateAuthorities - findCert: - for i, cert := range c.config.Certificates { - if !rsaAvail { - continue - } - - leaf := cert.Leaf - if leaf == nil { - if leaf, err = x509.ParseCertificate(cert.Certificate[0]); err != nil { - c.sendAlert(alertInternalError) - return errors.New("tls: failed to parse client certificate #" + strconv.Itoa(i) + ": " + err.Error()) - } - } - - if leaf.PublicKeyAlgorithm != x509.RSA { - continue - } - - if len(certReq.certificateAuthorities) == 0 { - // they gave us an empty list, so just take the - // first RSA cert from c.config.Certificates - certToSend = &cert - break - } - - for _, ca := range certReq.certificateAuthorities { - if bytes.Equal(leaf.RawIssuer, ca) { - certToSend = &cert - break findCert - } - } - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - } - - shd, ok := msg.(*serverHelloDoneMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - finishedHash.Write(shd.marshal()) - - // If the server requested a certificate then we have to send a - // Certificate message, even if it's empty because we don't have a - // certificate to send. - if certRequested { - certMsg = new(certificateMsg) - if certToSend != nil { - certMsg.certificates = certToSend.Certificate - } - finishedHash.Write(certMsg.marshal()) - c.writeRecord(recordTypeHandshake, certMsg.marshal()) - } - - preMasterSecret, ckx, err := keyAgreement.generateClientKeyExchange(c.config, hello, certs[0]) - if err != nil { - c.sendAlert(alertInternalError) - return err - } - if ckx != nil { - finishedHash.Write(ckx.marshal()) - c.writeRecord(recordTypeHandshake, ckx.marshal()) - } - - if certToSend != nil { - certVerify := new(certificateVerifyMsg) - digest := make([]byte, 0, 36) - digest = finishedHash.serverMD5.Sum(digest) - digest = finishedHash.serverSHA1.Sum(digest) - signed, err := rsa.SignPKCS1v15(c.config.rand(), c.config.Certificates[0].PrivateKey.(*rsa.PrivateKey), crypto.MD5SHA1, digest) - if err != nil { - return c.sendAlert(alertInternalError) - } - certVerify.signature = signed - - finishedHash.Write(certVerify.marshal()) - c.writeRecord(recordTypeHandshake, certVerify.marshal()) - } - - masterSecret, clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV := - keysFromPreMasterSecret(c.vers, preMasterSecret, hello.random, serverHello.random, suite.macLen, suite.keyLen, suite.ivLen) - - clientCipher := suite.cipher(clientKey, clientIV, false /* not for reading */) - clientHash := suite.mac(c.vers, clientMAC) - c.out.prepareCipherSpec(c.vers, clientCipher, clientHash) - c.writeRecord(recordTypeChangeCipherSpec, []byte{1}) - - if serverHello.nextProtoNeg { - nextProto := new(nextProtoMsg) - proto, fallback := mutualProtocol(c.config.NextProtos, serverHello.nextProtos) - nextProto.proto = proto - c.clientProtocol = proto - c.clientProtocolFallback = fallback - - finishedHash.Write(nextProto.marshal()) - c.writeRecord(recordTypeHandshake, nextProto.marshal()) - } - - finished := new(finishedMsg) - finished.verifyData = finishedHash.clientSum(masterSecret) - finishedHash.Write(finished.marshal()) - c.writeRecord(recordTypeHandshake, finished.marshal()) - - serverCipher := suite.cipher(serverKey, serverIV, true /* for reading */) - serverHash := suite.mac(c.vers, serverMAC) - c.in.prepareCipherSpec(c.vers, serverCipher, serverHash) - c.readRecord(recordTypeChangeCipherSpec) - if c.err != nil { - return c.err - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - serverFinished, ok := msg.(*finishedMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - - verify := finishedHash.serverSum(masterSecret) - if len(verify) != len(serverFinished.verifyData) || - subtle.ConstantTimeCompare(verify, serverFinished.verifyData) != 1 { - return c.sendAlert(alertHandshakeFailure) - } - - c.handshakeComplete = true - c.cipherSuite = suite.id - return nil -} - -// mutualProtocol finds the mutual Next Protocol Negotiation protocol given the -// set of client and server supported protocols. The set of client supported -// protocols must not be empty. It returns the resulting protocol and flag -// indicating if the fallback case was reached. -func mutualProtocol(clientProtos, serverProtos []string) (string, bool) { - for _, s := range serverProtos { - for _, c := range clientProtos { - if s == c { - return s, false - } - } - } - - return clientProtos[0], true -} === removed file 'src/launchpad.net/gwacl/fork/tls/handshake_messages.go' --- src/launchpad.net/gwacl/fork/tls/handshake_messages.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/tls/handshake_messages.go 1970-01-01 00:00:00 +0000 @@ -1,1078 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import "bytes" - -type clientHelloMsg struct { - raw []byte - vers uint16 - random []byte - sessionId []byte - cipherSuites []uint16 - compressionMethods []uint8 - nextProtoNeg bool - serverName string - ocspStapling bool - supportedCurves []uint16 - supportedPoints []uint8 -} - -func (m *clientHelloMsg) equal(i interface{}) bool { - m1, ok := i.(*clientHelloMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - m.vers == m1.vers && - bytes.Equal(m.random, m1.random) && - bytes.Equal(m.sessionId, m1.sessionId) && - eqUint16s(m.cipherSuites, m1.cipherSuites) && - bytes.Equal(m.compressionMethods, m1.compressionMethods) && - m.nextProtoNeg == m1.nextProtoNeg && - m.serverName == m1.serverName && - m.ocspStapling == m1.ocspStapling && - eqUint16s(m.supportedCurves, m1.supportedCurves) && - bytes.Equal(m.supportedPoints, m1.supportedPoints) -} - -func (m *clientHelloMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - - length := 2 + 32 + 1 + len(m.sessionId) + 2 + len(m.cipherSuites)*2 + 1 + len(m.compressionMethods) - numExtensions := 0 - extensionsLength := 0 - if m.nextProtoNeg { - numExtensions++ - } - if m.ocspStapling { - extensionsLength += 1 + 2 + 2 - numExtensions++ - } - if len(m.serverName) > 0 { - extensionsLength += 5 + len(m.serverName) - numExtensions++ - } - if len(m.supportedCurves) > 0 { - extensionsLength += 2 + 2*len(m.supportedCurves) - numExtensions++ - } - if len(m.supportedPoints) > 0 { - extensionsLength += 1 + len(m.supportedPoints) - numExtensions++ - } - if numExtensions > 0 { - extensionsLength += 4 * numExtensions - length += 2 + extensionsLength - } - - x := make([]byte, 4+length) - x[0] = typeClientHello - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - x[4] = uint8(m.vers >> 8) - x[5] = uint8(m.vers) - copy(x[6:38], m.random) - x[38] = uint8(len(m.sessionId)) - copy(x[39:39+len(m.sessionId)], m.sessionId) - y := x[39+len(m.sessionId):] - y[0] = uint8(len(m.cipherSuites) >> 7) - y[1] = uint8(len(m.cipherSuites) << 1) - for i, suite := range m.cipherSuites { - y[2+i*2] = uint8(suite >> 8) - y[3+i*2] = uint8(suite) - } - z := y[2+len(m.cipherSuites)*2:] - z[0] = uint8(len(m.compressionMethods)) - copy(z[1:], m.compressionMethods) - - z = z[1+len(m.compressionMethods):] - if numExtensions > 0 { - z[0] = byte(extensionsLength >> 8) - z[1] = byte(extensionsLength) - z = z[2:] - } - if m.nextProtoNeg { - z[0] = byte(extensionNextProtoNeg >> 8) - z[1] = byte(extensionNextProtoNeg) - // The length is always 0 - z = z[4:] - } - if len(m.serverName) > 0 { - z[0] = byte(extensionServerName >> 8) - z[1] = byte(extensionServerName) - l := len(m.serverName) + 5 - z[2] = byte(l >> 8) - z[3] = byte(l) - z = z[4:] - - // RFC 3546, section 3.1 - // - // struct { - // NameType name_type; - // select (name_type) { - // case host_name: HostName; - // } name; - // } ServerName; - // - // enum { - // host_name(0), (255) - // } NameType; - // - // opaque HostName<1..2^16-1>; - // - // struct { - // ServerName server_name_list<1..2^16-1> - // } ServerNameList; - - z[0] = byte((len(m.serverName) + 3) >> 8) - z[1] = byte(len(m.serverName) + 3) - z[3] = byte(len(m.serverName) >> 8) - z[4] = byte(len(m.serverName)) - copy(z[5:], []byte(m.serverName)) - z = z[l:] - } - if m.ocspStapling { - // RFC 4366, section 3.6 - z[0] = byte(extensionStatusRequest >> 8) - z[1] = byte(extensionStatusRequest) - z[2] = 0 - z[3] = 5 - z[4] = 1 // OCSP type - // Two zero valued uint16s for the two lengths. - z = z[9:] - } - if len(m.supportedCurves) > 0 { - // http://tools.ietf.org/html/rfc4492#section-5.5.1 - z[0] = byte(extensionSupportedCurves >> 8) - z[1] = byte(extensionSupportedCurves) - l := 2 + 2*len(m.supportedCurves) - z[2] = byte(l >> 8) - z[3] = byte(l) - l -= 2 - z[4] = byte(l >> 8) - z[5] = byte(l) - z = z[6:] - for _, curve := range m.supportedCurves { - z[0] = byte(curve >> 8) - z[1] = byte(curve) - z = z[2:] - } - } - if len(m.supportedPoints) > 0 { - // http://tools.ietf.org/html/rfc4492#section-5.5.2 - z[0] = byte(extensionSupportedPoints >> 8) - z[1] = byte(extensionSupportedPoints) - l := 1 + len(m.supportedPoints) - z[2] = byte(l >> 8) - z[3] = byte(l) - l-- - z[4] = byte(l) - z = z[5:] - for _, pointFormat := range m.supportedPoints { - z[0] = byte(pointFormat) - z = z[1:] - } - } - - m.raw = x - - return x -} - -func (m *clientHelloMsg) unmarshal(data []byte) bool { - if len(data) < 42 { - return false - } - m.raw = data - m.vers = uint16(data[4])<<8 | uint16(data[5]) - m.random = data[6:38] - sessionIdLen := int(data[38]) - if sessionIdLen > 32 || len(data) < 39+sessionIdLen { - return false - } - m.sessionId = data[39 : 39+sessionIdLen] - data = data[39+sessionIdLen:] - if len(data) < 2 { - return false - } - // cipherSuiteLen is the number of bytes of cipher suite numbers. Since - // they are uint16s, the number must be even. - cipherSuiteLen := int(data[0])<<8 | int(data[1]) - if cipherSuiteLen%2 == 1 || len(data) < 2+cipherSuiteLen { - return false - } - numCipherSuites := cipherSuiteLen / 2 - m.cipherSuites = make([]uint16, numCipherSuites) - for i := 0; i < numCipherSuites; i++ { - m.cipherSuites[i] = uint16(data[2+2*i])<<8 | uint16(data[3+2*i]) - } - data = data[2+cipherSuiteLen:] - if len(data) < 1 { - return false - } - compressionMethodsLen := int(data[0]) - if len(data) < 1+compressionMethodsLen { - return false - } - m.compressionMethods = data[1 : 1+compressionMethodsLen] - - data = data[1+compressionMethodsLen:] - - m.nextProtoNeg = false - m.serverName = "" - m.ocspStapling = false - - if len(data) == 0 { - // ClientHello is optionally followed by extension data - return true - } - if len(data) < 2 { - return false - } - - extensionsLength := int(data[0])<<8 | int(data[1]) - data = data[2:] - if extensionsLength != len(data) { - return false - } - - for len(data) != 0 { - if len(data) < 4 { - return false - } - extension := uint16(data[0])<<8 | uint16(data[1]) - length := int(data[2])<<8 | int(data[3]) - data = data[4:] - if len(data) < length { - return false - } - - switch extension { - case extensionServerName: - if length < 2 { - return false - } - numNames := int(data[0])<<8 | int(data[1]) - d := data[2:] - for i := 0; i < numNames; i++ { - if len(d) < 3 { - return false - } - nameType := d[0] - nameLen := int(d[1])<<8 | int(d[2]) - d = d[3:] - if len(d) < nameLen { - return false - } - if nameType == 0 { - m.serverName = string(d[0:nameLen]) - break - } - d = d[nameLen:] - } - case extensionNextProtoNeg: - if length > 0 { - return false - } - m.nextProtoNeg = true - case extensionStatusRequest: - m.ocspStapling = length > 0 && data[0] == statusTypeOCSP - case extensionSupportedCurves: - // http://tools.ietf.org/html/rfc4492#section-5.5.1 - if length < 2 { - return false - } - l := int(data[0])<<8 | int(data[1]) - if l%2 == 1 || length != l+2 { - return false - } - numCurves := l / 2 - m.supportedCurves = make([]uint16, numCurves) - d := data[2:] - for i := 0; i < numCurves; i++ { - m.supportedCurves[i] = uint16(d[0])<<8 | uint16(d[1]) - d = d[2:] - } - case extensionSupportedPoints: - // http://tools.ietf.org/html/rfc4492#section-5.5.2 - if length < 1 { - return false - } - l := int(data[0]) - if length != l+1 { - return false - } - m.supportedPoints = make([]uint8, l) - copy(m.supportedPoints, data[1:]) - } - data = data[length:] - } - - return true -} - -type serverHelloMsg struct { - raw []byte - vers uint16 - random []byte - sessionId []byte - cipherSuite uint16 - compressionMethod uint8 - nextProtoNeg bool - nextProtos []string - ocspStapling bool -} - -func (m *serverHelloMsg) equal(i interface{}) bool { - m1, ok := i.(*serverHelloMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - m.vers == m1.vers && - bytes.Equal(m.random, m1.random) && - bytes.Equal(m.sessionId, m1.sessionId) && - m.cipherSuite == m1.cipherSuite && - m.compressionMethod == m1.compressionMethod && - m.nextProtoNeg == m1.nextProtoNeg && - eqStrings(m.nextProtos, m1.nextProtos) && - m.ocspStapling == m1.ocspStapling -} - -func (m *serverHelloMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - - length := 38 + len(m.sessionId) - numExtensions := 0 - extensionsLength := 0 - - nextProtoLen := 0 - if m.nextProtoNeg { - numExtensions++ - for _, v := range m.nextProtos { - nextProtoLen += len(v) - } - nextProtoLen += len(m.nextProtos) - extensionsLength += nextProtoLen - } - if m.ocspStapling { - numExtensions++ - } - if numExtensions > 0 { - extensionsLength += 4 * numExtensions - length += 2 + extensionsLength - } - - x := make([]byte, 4+length) - x[0] = typeServerHello - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - x[4] = uint8(m.vers >> 8) - x[5] = uint8(m.vers) - copy(x[6:38], m.random) - x[38] = uint8(len(m.sessionId)) - copy(x[39:39+len(m.sessionId)], m.sessionId) - z := x[39+len(m.sessionId):] - z[0] = uint8(m.cipherSuite >> 8) - z[1] = uint8(m.cipherSuite) - z[2] = uint8(m.compressionMethod) - - z = z[3:] - if numExtensions > 0 { - z[0] = byte(extensionsLength >> 8) - z[1] = byte(extensionsLength) - z = z[2:] - } - if m.nextProtoNeg { - z[0] = byte(extensionNextProtoNeg >> 8) - z[1] = byte(extensionNextProtoNeg) - z[2] = byte(nextProtoLen >> 8) - z[3] = byte(nextProtoLen) - z = z[4:] - - for _, v := range m.nextProtos { - l := len(v) - if l > 255 { - l = 255 - } - z[0] = byte(l) - copy(z[1:], []byte(v[0:l])) - z = z[1+l:] - } - } - if m.ocspStapling { - z[0] = byte(extensionStatusRequest >> 8) - z[1] = byte(extensionStatusRequest) - z = z[4:] - } - - m.raw = x - - return x -} - -func (m *serverHelloMsg) unmarshal(data []byte) bool { - if len(data) < 42 { - return false - } - m.raw = data - m.vers = uint16(data[4])<<8 | uint16(data[5]) - m.random = data[6:38] - sessionIdLen := int(data[38]) - if sessionIdLen > 32 || len(data) < 39+sessionIdLen { - return false - } - m.sessionId = data[39 : 39+sessionIdLen] - data = data[39+sessionIdLen:] - if len(data) < 3 { - return false - } - m.cipherSuite = uint16(data[0])<<8 | uint16(data[1]) - m.compressionMethod = data[2] - data = data[3:] - - m.nextProtoNeg = false - m.nextProtos = nil - m.ocspStapling = false - - if len(data) == 0 { - // ServerHello is optionally followed by extension data - return true - } - if len(data) < 2 { - return false - } - - extensionsLength := int(data[0])<<8 | int(data[1]) - data = data[2:] - if len(data) != extensionsLength { - return false - } - - for len(data) != 0 { - if len(data) < 4 { - return false - } - extension := uint16(data[0])<<8 | uint16(data[1]) - length := int(data[2])<<8 | int(data[3]) - data = data[4:] - if len(data) < length { - return false - } - - switch extension { - case extensionNextProtoNeg: - m.nextProtoNeg = true - d := data - for len(d) > 0 { - l := int(d[0]) - d = d[1:] - if l == 0 || l > len(d) { - return false - } - m.nextProtos = append(m.nextProtos, string(d[0:l])) - d = d[l:] - } - case extensionStatusRequest: - if length > 0 { - return false - } - m.ocspStapling = true - } - data = data[length:] - } - - return true -} - -type certificateMsg struct { - raw []byte - certificates [][]byte -} - -func (m *certificateMsg) equal(i interface{}) bool { - m1, ok := i.(*certificateMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - eqByteSlices(m.certificates, m1.certificates) -} - -func (m *certificateMsg) marshal() (x []byte) { - if m.raw != nil { - return m.raw - } - - var i int - for _, slice := range m.certificates { - i += len(slice) - } - - length := 3 + 3*len(m.certificates) + i - x = make([]byte, 4+length) - x[0] = typeCertificate - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - - certificateOctets := length - 3 - x[4] = uint8(certificateOctets >> 16) - x[5] = uint8(certificateOctets >> 8) - x[6] = uint8(certificateOctets) - - y := x[7:] - for _, slice := range m.certificates { - y[0] = uint8(len(slice) >> 16) - y[1] = uint8(len(slice) >> 8) - y[2] = uint8(len(slice)) - copy(y[3:], slice) - y = y[3+len(slice):] - } - - m.raw = x - return -} - -func (m *certificateMsg) unmarshal(data []byte) bool { - if len(data) < 7 { - return false - } - - m.raw = data - certsLen := uint32(data[4])<<16 | uint32(data[5])<<8 | uint32(data[6]) - if uint32(len(data)) != certsLen+7 { - return false - } - - numCerts := 0 - d := data[7:] - for certsLen > 0 { - if len(d) < 4 { - return false - } - certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2]) - if uint32(len(d)) < 3+certLen { - return false - } - d = d[3+certLen:] - certsLen -= 3 + certLen - numCerts++ - } - - m.certificates = make([][]byte, numCerts) - d = data[7:] - for i := 0; i < numCerts; i++ { - certLen := uint32(d[0])<<16 | uint32(d[1])<<8 | uint32(d[2]) - m.certificates[i] = d[3 : 3+certLen] - d = d[3+certLen:] - } - - return true -} - -type serverKeyExchangeMsg struct { - raw []byte - key []byte -} - -func (m *serverKeyExchangeMsg) equal(i interface{}) bool { - m1, ok := i.(*serverKeyExchangeMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - bytes.Equal(m.key, m1.key) -} - -func (m *serverKeyExchangeMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - length := len(m.key) - x := make([]byte, length+4) - x[0] = typeServerKeyExchange - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - copy(x[4:], m.key) - - m.raw = x - return x -} - -func (m *serverKeyExchangeMsg) unmarshal(data []byte) bool { - m.raw = data - if len(data) < 4 { - return false - } - m.key = data[4:] - return true -} - -type certificateStatusMsg struct { - raw []byte - statusType uint8 - response []byte -} - -func (m *certificateStatusMsg) equal(i interface{}) bool { - m1, ok := i.(*certificateStatusMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - m.statusType == m1.statusType && - bytes.Equal(m.response, m1.response) -} - -func (m *certificateStatusMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - - var x []byte - if m.statusType == statusTypeOCSP { - x = make([]byte, 4+4+len(m.response)) - x[0] = typeCertificateStatus - l := len(m.response) + 4 - x[1] = byte(l >> 16) - x[2] = byte(l >> 8) - x[3] = byte(l) - x[4] = statusTypeOCSP - - l -= 4 - x[5] = byte(l >> 16) - x[6] = byte(l >> 8) - x[7] = byte(l) - copy(x[8:], m.response) - } else { - x = []byte{typeCertificateStatus, 0, 0, 1, m.statusType} - } - - m.raw = x - return x -} - -func (m *certificateStatusMsg) unmarshal(data []byte) bool { - m.raw = data - if len(data) < 5 { - return false - } - m.statusType = data[4] - - m.response = nil - if m.statusType == statusTypeOCSP { - if len(data) < 8 { - return false - } - respLen := uint32(data[5])<<16 | uint32(data[6])<<8 | uint32(data[7]) - if uint32(len(data)) != 4+4+respLen { - return false - } - m.response = data[8:] - } - return true -} - -type serverHelloDoneMsg struct{} - -func (m *serverHelloDoneMsg) equal(i interface{}) bool { - _, ok := i.(*serverHelloDoneMsg) - return ok -} - -func (m *serverHelloDoneMsg) marshal() []byte { - x := make([]byte, 4) - x[0] = typeServerHelloDone - return x -} - -func (m *serverHelloDoneMsg) unmarshal(data []byte) bool { - return len(data) == 4 -} - -type clientKeyExchangeMsg struct { - raw []byte - ciphertext []byte -} - -func (m *clientKeyExchangeMsg) equal(i interface{}) bool { - m1, ok := i.(*clientKeyExchangeMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - bytes.Equal(m.ciphertext, m1.ciphertext) -} - -func (m *clientKeyExchangeMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - length := len(m.ciphertext) - x := make([]byte, length+4) - x[0] = typeClientKeyExchange - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - copy(x[4:], m.ciphertext) - - m.raw = x - return x -} - -func (m *clientKeyExchangeMsg) unmarshal(data []byte) bool { - m.raw = data - if len(data) < 4 { - return false - } - l := int(data[1])<<16 | int(data[2])<<8 | int(data[3]) - if l != len(data)-4 { - return false - } - m.ciphertext = data[4:] - return true -} - -type finishedMsg struct { - raw []byte - verifyData []byte -} - -func (m *finishedMsg) equal(i interface{}) bool { - m1, ok := i.(*finishedMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - bytes.Equal(m.verifyData, m1.verifyData) -} - -func (m *finishedMsg) marshal() (x []byte) { - if m.raw != nil { - return m.raw - } - - x = make([]byte, 4+len(m.verifyData)) - x[0] = typeFinished - x[3] = byte(len(m.verifyData)) - copy(x[4:], m.verifyData) - m.raw = x - return -} - -func (m *finishedMsg) unmarshal(data []byte) bool { - m.raw = data - if len(data) < 4 { - return false - } - m.verifyData = data[4:] - return true -} - -type nextProtoMsg struct { - raw []byte - proto string -} - -func (m *nextProtoMsg) equal(i interface{}) bool { - m1, ok := i.(*nextProtoMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - m.proto == m1.proto -} - -func (m *nextProtoMsg) marshal() []byte { - if m.raw != nil { - return m.raw - } - l := len(m.proto) - if l > 255 { - l = 255 - } - - padding := 32 - (l+2)%32 - length := l + padding + 2 - x := make([]byte, length+4) - x[0] = typeNextProtocol - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - - y := x[4:] - y[0] = byte(l) - copy(y[1:], []byte(m.proto[0:l])) - y = y[1+l:] - y[0] = byte(padding) - - m.raw = x - - return x -} - -func (m *nextProtoMsg) unmarshal(data []byte) bool { - m.raw = data - - if len(data) < 5 { - return false - } - data = data[4:] - protoLen := int(data[0]) - data = data[1:] - if len(data) < protoLen { - return false - } - m.proto = string(data[0:protoLen]) - data = data[protoLen:] - - if len(data) < 1 { - return false - } - paddingLen := int(data[0]) - data = data[1:] - if len(data) != paddingLen { - return false - } - - return true -} - -type certificateRequestMsg struct { - raw []byte - certificateTypes []byte - certificateAuthorities [][]byte -} - -func (m *certificateRequestMsg) equal(i interface{}) bool { - m1, ok := i.(*certificateRequestMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - bytes.Equal(m.certificateTypes, m1.certificateTypes) && - eqByteSlices(m.certificateAuthorities, m1.certificateAuthorities) -} - -func (m *certificateRequestMsg) marshal() (x []byte) { - if m.raw != nil { - return m.raw - } - - // See http://tools.ietf.org/html/rfc4346#section-7.4.4 - length := 1 + len(m.certificateTypes) + 2 - casLength := 0 - for _, ca := range m.certificateAuthorities { - casLength += 2 + len(ca) - } - length += casLength - - x = make([]byte, 4+length) - x[0] = typeCertificateRequest - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - - x[4] = uint8(len(m.certificateTypes)) - - copy(x[5:], m.certificateTypes) - y := x[5+len(m.certificateTypes):] - y[0] = uint8(casLength >> 8) - y[1] = uint8(casLength) - y = y[2:] - for _, ca := range m.certificateAuthorities { - y[0] = uint8(len(ca) >> 8) - y[1] = uint8(len(ca)) - y = y[2:] - copy(y, ca) - y = y[len(ca):] - } - - m.raw = x - return -} - -func (m *certificateRequestMsg) unmarshal(data []byte) bool { - m.raw = data - - if len(data) < 5 { - return false - } - - length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3]) - if uint32(len(data))-4 != length { - return false - } - - numCertTypes := int(data[4]) - data = data[5:] - if numCertTypes == 0 || len(data) <= numCertTypes { - return false - } - - m.certificateTypes = make([]byte, numCertTypes) - if copy(m.certificateTypes, data) != numCertTypes { - return false - } - - data = data[numCertTypes:] - - if len(data) < 2 { - return false - } - casLength := uint16(data[0])<<8 | uint16(data[1]) - data = data[2:] - if len(data) < int(casLength) { - return false - } - cas := make([]byte, casLength) - copy(cas, data) - data = data[casLength:] - - m.certificateAuthorities = nil - for len(cas) > 0 { - if len(cas) < 2 { - return false - } - caLen := uint16(cas[0])<<8 | uint16(cas[1]) - cas = cas[2:] - - if len(cas) < int(caLen) { - return false - } - - m.certificateAuthorities = append(m.certificateAuthorities, cas[:caLen]) - cas = cas[caLen:] - } - if len(data) > 0 { - return false - } - - return true -} - -type certificateVerifyMsg struct { - raw []byte - signature []byte -} - -func (m *certificateVerifyMsg) equal(i interface{}) bool { - m1, ok := i.(*certificateVerifyMsg) - if !ok { - return false - } - - return bytes.Equal(m.raw, m1.raw) && - bytes.Equal(m.signature, m1.signature) -} - -func (m *certificateVerifyMsg) marshal() (x []byte) { - if m.raw != nil { - return m.raw - } - - // See http://tools.ietf.org/html/rfc4346#section-7.4.8 - siglength := len(m.signature) - length := 2 + siglength - x = make([]byte, 4+length) - x[0] = typeCertificateVerify - x[1] = uint8(length >> 16) - x[2] = uint8(length >> 8) - x[3] = uint8(length) - x[4] = uint8(siglength >> 8) - x[5] = uint8(siglength) - copy(x[6:], m.signature) - - m.raw = x - - return -} - -func (m *certificateVerifyMsg) unmarshal(data []byte) bool { - m.raw = data - - if len(data) < 6 { - return false - } - - length := uint32(data[1])<<16 | uint32(data[2])<<8 | uint32(data[3]) - if uint32(len(data))-4 != length { - return false - } - - siglength := int(data[4])<<8 + int(data[5]) - if len(data)-6 != siglength { - return false - } - - m.signature = data[6:] - - return true -} - -type helloRequestMsg struct { -} - -func (*helloRequestMsg) marshal() []byte { - return []byte{typeHelloRequest, 0, 0, 0} -} - -func (*helloRequestMsg) unmarshal(data []byte) bool { - return len(data) == 4 -} - -func eqUint16s(x, y []uint16) bool { - if len(x) != len(y) { - return false - } - for i, v := range x { - if y[i] != v { - return false - } - } - return true -} - -func eqStrings(x, y []string) bool { - if len(x) != len(y) { - return false - } - for i, v := range x { - if y[i] != v { - return false - } - } - return true -} - -func eqByteSlices(x, y [][]byte) bool { - if len(x) != len(y) { - return false - } - for i, v := range x { - if !bytes.Equal(v, y[i]) { - return false - } - } - return true -} === removed file 'src/launchpad.net/gwacl/fork/tls/handshake_server.go' --- src/launchpad.net/gwacl/fork/tls/handshake_server.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/tls/handshake_server.go 1970-01-01 00:00:00 +0000 @@ -1,352 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import ( - "crypto" - "crypto/rsa" - "crypto/subtle" - "crypto/x509" - "errors" - "io" -) - -func (c *Conn) serverHandshake() error { - config := c.config - msg, err := c.readHandshake() - if err != nil { - return err - } - clientHello, ok := msg.(*clientHelloMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - vers, ok := mutualVersion(clientHello.vers) - if !ok { - return c.sendAlert(alertProtocolVersion) - } - c.vers = vers - c.haveVers = true - - finishedHash := newFinishedHash(vers) - finishedHash.Write(clientHello.marshal()) - - hello := new(serverHelloMsg) - - supportedCurve := false -Curves: - for _, curve := range clientHello.supportedCurves { - switch curve { - case curveP256, curveP384, curveP521: - supportedCurve = true - break Curves - } - } - - supportedPointFormat := false - for _, pointFormat := range clientHello.supportedPoints { - if pointFormat == pointFormatUncompressed { - supportedPointFormat = true - break - } - } - - ellipticOk := supportedCurve && supportedPointFormat - - var suite *cipherSuite -FindCipherSuite: - for _, id := range clientHello.cipherSuites { - for _, supported := range config.cipherSuites() { - if id == supported { - var candidate *cipherSuite - - for _, s := range cipherSuites { - if s.id == id { - candidate = s - break - } - } - if candidate == nil { - continue - } - // Don't select a ciphersuite which we can't - // support for this client. - if candidate.elliptic && !ellipticOk { - continue - } - suite = candidate - break FindCipherSuite - } - } - } - - foundCompression := false - // We only support null compression, so check that the client offered it. - for _, compression := range clientHello.compressionMethods { - if compression == compressionNone { - foundCompression = true - break - } - } - - if suite == nil || !foundCompression { - return c.sendAlert(alertHandshakeFailure) - } - - hello.vers = vers - hello.cipherSuite = suite.id - t := uint32(config.time().Unix()) - hello.random = make([]byte, 32) - hello.random[0] = byte(t >> 24) - hello.random[1] = byte(t >> 16) - hello.random[2] = byte(t >> 8) - hello.random[3] = byte(t) - _, err = io.ReadFull(config.rand(), hello.random[4:]) - if err != nil { - return c.sendAlert(alertInternalError) - } - hello.compressionMethod = compressionNone - if clientHello.nextProtoNeg { - hello.nextProtoNeg = true - hello.nextProtos = config.NextProtos - } - - if len(config.Certificates) == 0 { - return c.sendAlert(alertInternalError) - } - cert := &config.Certificates[0] - if len(clientHello.serverName) > 0 { - c.serverName = clientHello.serverName - cert = config.getCertificateForName(clientHello.serverName) - } - - if clientHello.ocspStapling && len(cert.OCSPStaple) > 0 { - hello.ocspStapling = true - } - - finishedHash.Write(hello.marshal()) - c.writeRecord(recordTypeHandshake, hello.marshal()) - - certMsg := new(certificateMsg) - certMsg.certificates = cert.Certificate - finishedHash.Write(certMsg.marshal()) - c.writeRecord(recordTypeHandshake, certMsg.marshal()) - - if hello.ocspStapling { - certStatus := new(certificateStatusMsg) - certStatus.statusType = statusTypeOCSP - certStatus.response = cert.OCSPStaple - finishedHash.Write(certStatus.marshal()) - c.writeRecord(recordTypeHandshake, certStatus.marshal()) - } - - keyAgreement := suite.ka() - skx, err := keyAgreement.generateServerKeyExchange(config, cert, clientHello, hello) - if err != nil { - c.sendAlert(alertHandshakeFailure) - return err - } - if skx != nil { - finishedHash.Write(skx.marshal()) - c.writeRecord(recordTypeHandshake, skx.marshal()) - } - - if config.ClientAuth >= RequestClientCert { - // Request a client certificate - certReq := new(certificateRequestMsg) - certReq.certificateTypes = []byte{certTypeRSASign} - - // An empty list of certificateAuthorities signals to - // the client that it may send any certificate in response - // to our request. When we know the CAs we trust, then - // we can send them down, so that the client can choose - // an appropriate certificate to give to us. - if config.ClientCAs != nil { - certReq.certificateAuthorities = config.ClientCAs.Subjects() - } - finishedHash.Write(certReq.marshal()) - c.writeRecord(recordTypeHandshake, certReq.marshal()) - } - - helloDone := new(serverHelloDoneMsg) - finishedHash.Write(helloDone.marshal()) - c.writeRecord(recordTypeHandshake, helloDone.marshal()) - - var pub *rsa.PublicKey // public key for client auth, if any - - msg, err = c.readHandshake() - if err != nil { - return err - } - - // If we requested a client certificate, then the client must send a - // certificate message, even if it's empty. - if config.ClientAuth >= RequestClientCert { - if certMsg, ok = msg.(*certificateMsg); !ok { - return c.sendAlert(alertHandshakeFailure) - } - finishedHash.Write(certMsg.marshal()) - - if len(certMsg.certificates) == 0 { - // The client didn't actually send a certificate - switch config.ClientAuth { - case RequireAnyClientCert, RequireAndVerifyClientCert: - c.sendAlert(alertBadCertificate) - return errors.New("tls: client didn't provide a certificate") - } - } - - certs := make([]*x509.Certificate, len(certMsg.certificates)) - for i, asn1Data := range certMsg.certificates { - if certs[i], err = x509.ParseCertificate(asn1Data); err != nil { - c.sendAlert(alertBadCertificate) - return errors.New("tls: failed to parse client certificate: " + err.Error()) - } - } - - if c.config.ClientAuth >= VerifyClientCertIfGiven && len(certs) > 0 { - opts := x509.VerifyOptions{ - Roots: c.config.ClientCAs, - CurrentTime: c.config.time(), - Intermediates: x509.NewCertPool(), - } - - for i, cert := range certs { - if i == 0 { - continue - } - opts.Intermediates.AddCert(cert) - } - - chains, err := certs[0].Verify(opts) - if err != nil { - c.sendAlert(alertBadCertificate) - return errors.New("tls: failed to verify client's certificate: " + err.Error()) - } - - ok := false - for _, ku := range certs[0].ExtKeyUsage { - if ku == x509.ExtKeyUsageClientAuth { - ok = true - break - } - } - if !ok { - c.sendAlert(alertHandshakeFailure) - return errors.New("tls: client's certificate's extended key usage doesn't permit it to be used for client authentication") - } - - c.verifiedChains = chains - } - - if len(certs) > 0 { - if pub, ok = certs[0].PublicKey.(*rsa.PublicKey); !ok { - return c.sendAlert(alertUnsupportedCertificate) - } - c.peerCertificates = certs - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - } - - // Get client key exchange - ckx, ok := msg.(*clientKeyExchangeMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - finishedHash.Write(ckx.marshal()) - - // If we received a client cert in response to our certificate request message, - // the client will send us a certificateVerifyMsg immediately after the - // clientKeyExchangeMsg. This message is a MD5SHA1 digest of all preceding - // handshake-layer messages that is signed using the private key corresponding - // to the client's certificate. This allows us to verify that the client is in - // possession of the private key of the certificate. - if len(c.peerCertificates) > 0 { - msg, err = c.readHandshake() - if err != nil { - return err - } - certVerify, ok := msg.(*certificateVerifyMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - - digest := make([]byte, 0, 36) - digest = finishedHash.serverMD5.Sum(digest) - digest = finishedHash.serverSHA1.Sum(digest) - err = rsa.VerifyPKCS1v15(pub, crypto.MD5SHA1, digest, certVerify.signature) - if err != nil { - c.sendAlert(alertBadCertificate) - return errors.New("could not validate signature of connection nonces: " + err.Error()) - } - - finishedHash.Write(certVerify.marshal()) - } - - preMasterSecret, err := keyAgreement.processClientKeyExchange(config, cert, ckx, c.vers) - if err != nil { - c.sendAlert(alertHandshakeFailure) - return err - } - - masterSecret, clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV := - keysFromPreMasterSecret(c.vers, preMasterSecret, clientHello.random, hello.random, suite.macLen, suite.keyLen, suite.ivLen) - - clientCipher := suite.cipher(clientKey, clientIV, true /* for reading */) - clientHash := suite.mac(c.vers, clientMAC) - c.in.prepareCipherSpec(c.vers, clientCipher, clientHash) - c.readRecord(recordTypeChangeCipherSpec) - if err := c.error(); err != nil { - return err - } - - if hello.nextProtoNeg { - msg, err = c.readHandshake() - if err != nil { - return err - } - nextProto, ok := msg.(*nextProtoMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - finishedHash.Write(nextProto.marshal()) - c.clientProtocol = nextProto.proto - } - - msg, err = c.readHandshake() - if err != nil { - return err - } - clientFinished, ok := msg.(*finishedMsg) - if !ok { - return c.sendAlert(alertUnexpectedMessage) - } - - verify := finishedHash.clientSum(masterSecret) - if len(verify) != len(clientFinished.verifyData) || - subtle.ConstantTimeCompare(verify, clientFinished.verifyData) != 1 { - return c.sendAlert(alertHandshakeFailure) - } - - finishedHash.Write(clientFinished.marshal()) - - serverCipher := suite.cipher(serverKey, serverIV, false /* not for reading */) - serverHash := suite.mac(c.vers, serverMAC) - c.out.prepareCipherSpec(c.vers, serverCipher, serverHash) - c.writeRecord(recordTypeChangeCipherSpec, []byte{1}) - - finished := new(finishedMsg) - finished.verifyData = finishedHash.serverSum(masterSecret) - c.writeRecord(recordTypeHandshake, finished.marshal()) - - c.handshakeComplete = true - c.cipherSuite = suite.id - - return nil -} === removed file 'src/launchpad.net/gwacl/fork/tls/key_agreement.go' --- src/launchpad.net/gwacl/fork/tls/key_agreement.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/tls/key_agreement.go 1970-01-01 00:00:00 +0000 @@ -1,253 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import ( - "crypto" - "crypto/elliptic" - "crypto/md5" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "errors" - "io" - "math/big" -) - -// rsaKeyAgreement implements the standard TLS key agreement where the client -// encrypts the pre-master secret to the server's public key. -type rsaKeyAgreement struct{} - -func (ka rsaKeyAgreement) generateServerKeyExchange(config *Config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) { - return nil, nil -} - -func (ka rsaKeyAgreement) processClientKeyExchange(config *Config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) { - preMasterSecret := make([]byte, 48) - _, err := io.ReadFull(config.rand(), preMasterSecret[2:]) - if err != nil { - return nil, err - } - - if len(ckx.ciphertext) < 2 { - return nil, errors.New("bad ClientKeyExchange") - } - - ciphertext := ckx.ciphertext - if version != versionSSL30 { - ciphertextLen := int(ckx.ciphertext[0])<<8 | int(ckx.ciphertext[1]) - if ciphertextLen != len(ckx.ciphertext)-2 { - return nil, errors.New("bad ClientKeyExchange") - } - ciphertext = ckx.ciphertext[2:] - } - - err = rsa.DecryptPKCS1v15SessionKey(config.rand(), cert.PrivateKey.(*rsa.PrivateKey), ciphertext, preMasterSecret) - if err != nil { - return nil, err - } - // We don't check the version number in the premaster secret. For one, - // by checking it, we would leak information about the validity of the - // encrypted pre-master secret. Secondly, it provides only a small - // benefit against a downgrade attack and some implementations send the - // wrong version anyway. See the discussion at the end of section - // 7.4.7.1 of RFC 4346. - return preMasterSecret, nil -} - -func (ka rsaKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error { - return errors.New("unexpected ServerKeyExchange") -} - -func (ka rsaKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) { - preMasterSecret := make([]byte, 48) - preMasterSecret[0] = byte(clientHello.vers >> 8) - preMasterSecret[1] = byte(clientHello.vers) - _, err := io.ReadFull(config.rand(), preMasterSecret[2:]) - if err != nil { - return nil, nil, err - } - - encrypted, err := rsa.EncryptPKCS1v15(config.rand(), cert.PublicKey.(*rsa.PublicKey), preMasterSecret) - if err != nil { - return nil, nil, err - } - ckx := new(clientKeyExchangeMsg) - ckx.ciphertext = make([]byte, len(encrypted)+2) - ckx.ciphertext[0] = byte(len(encrypted) >> 8) - ckx.ciphertext[1] = byte(len(encrypted)) - copy(ckx.ciphertext[2:], encrypted) - return preMasterSecret, ckx, nil -} - -// md5SHA1Hash implements TLS 1.0's hybrid hash function which consists of the -// concatenation of an MD5 and SHA1 hash. -func md5SHA1Hash(slices ...[]byte) []byte { - md5sha1 := make([]byte, md5.Size+sha1.Size) - hmd5 := md5.New() - for _, slice := range slices { - hmd5.Write(slice) - } - copy(md5sha1, hmd5.Sum(nil)) - - hsha1 := sha1.New() - for _, slice := range slices { - hsha1.Write(slice) - } - copy(md5sha1[md5.Size:], hsha1.Sum(nil)) - return md5sha1 -} - -// ecdheRSAKeyAgreement implements a TLS key agreement where the server -// generates a ephemeral EC public/private key pair and signs it. The -// pre-master secret is then calculated using ECDH. -type ecdheRSAKeyAgreement struct { - privateKey []byte - curve elliptic.Curve - x, y *big.Int -} - -func (ka *ecdheRSAKeyAgreement) generateServerKeyExchange(config *Config, cert *Certificate, clientHello *clientHelloMsg, hello *serverHelloMsg) (*serverKeyExchangeMsg, error) { - var curveid uint16 - -Curve: - for _, c := range clientHello.supportedCurves { - switch c { - case curveP256: - ka.curve = elliptic.P256() - curveid = c - break Curve - case curveP384: - ka.curve = elliptic.P384() - curveid = c - break Curve - case curveP521: - ka.curve = elliptic.P521() - curveid = c - break Curve - } - } - - if curveid == 0 { - return nil, errors.New("tls: no supported elliptic curves offered") - } - - var x, y *big.Int - var err error - ka.privateKey, x, y, err = elliptic.GenerateKey(ka.curve, config.rand()) - if err != nil { - return nil, err - } - ecdhePublic := elliptic.Marshal(ka.curve, x, y) - - // http://tools.ietf.org/html/rfc4492#section-5.4 - serverECDHParams := make([]byte, 1+2+1+len(ecdhePublic)) - serverECDHParams[0] = 3 // named curve - serverECDHParams[1] = byte(curveid >> 8) - serverECDHParams[2] = byte(curveid) - serverECDHParams[3] = byte(len(ecdhePublic)) - copy(serverECDHParams[4:], ecdhePublic) - - md5sha1 := md5SHA1Hash(clientHello.random, hello.random, serverECDHParams) - sig, err := rsa.SignPKCS1v15(config.rand(), cert.PrivateKey.(*rsa.PrivateKey), crypto.MD5SHA1, md5sha1) - if err != nil { - return nil, errors.New("failed to sign ECDHE parameters: " + err.Error()) - } - - skx := new(serverKeyExchangeMsg) - skx.key = make([]byte, len(serverECDHParams)+2+len(sig)) - copy(skx.key, serverECDHParams) - k := skx.key[len(serverECDHParams):] - k[0] = byte(len(sig) >> 8) - k[1] = byte(len(sig)) - copy(k[2:], sig) - - return skx, nil -} - -func (ka *ecdheRSAKeyAgreement) processClientKeyExchange(config *Config, cert *Certificate, ckx *clientKeyExchangeMsg, version uint16) ([]byte, error) { - if len(ckx.ciphertext) == 0 || int(ckx.ciphertext[0]) != len(ckx.ciphertext)-1 { - return nil, errors.New("bad ClientKeyExchange") - } - x, y := elliptic.Unmarshal(ka.curve, ckx.ciphertext[1:]) - if x == nil { - return nil, errors.New("bad ClientKeyExchange") - } - x, _ = ka.curve.ScalarMult(x, y, ka.privateKey) - preMasterSecret := make([]byte, (ka.curve.Params().BitSize+7)>>3) - xBytes := x.Bytes() - copy(preMasterSecret[len(preMasterSecret)-len(xBytes):], xBytes) - - return preMasterSecret, nil -} - -var errServerKeyExchange = errors.New("invalid ServerKeyExchange") - -func (ka *ecdheRSAKeyAgreement) processServerKeyExchange(config *Config, clientHello *clientHelloMsg, serverHello *serverHelloMsg, cert *x509.Certificate, skx *serverKeyExchangeMsg) error { - if len(skx.key) < 4 { - return errServerKeyExchange - } - if skx.key[0] != 3 { // named curve - return errors.New("server selected unsupported curve") - } - curveid := uint16(skx.key[1])<<8 | uint16(skx.key[2]) - - switch curveid { - case curveP256: - ka.curve = elliptic.P256() - case curveP384: - ka.curve = elliptic.P384() - case curveP521: - ka.curve = elliptic.P521() - default: - return errors.New("server selected unsupported curve") - } - - publicLen := int(skx.key[3]) - if publicLen+4 > len(skx.key) { - return errServerKeyExchange - } - ka.x, ka.y = elliptic.Unmarshal(ka.curve, skx.key[4:4+publicLen]) - if ka.x == nil { - return errServerKeyExchange - } - serverECDHParams := skx.key[:4+publicLen] - - sig := skx.key[4+publicLen:] - if len(sig) < 2 { - return errServerKeyExchange - } - sigLen := int(sig[0])<<8 | int(sig[1]) - if sigLen+2 != len(sig) { - return errServerKeyExchange - } - sig = sig[2:] - - md5sha1 := md5SHA1Hash(clientHello.random, serverHello.random, serverECDHParams) - return rsa.VerifyPKCS1v15(cert.PublicKey.(*rsa.PublicKey), crypto.MD5SHA1, md5sha1, sig) -} - -func (ka *ecdheRSAKeyAgreement) generateClientKeyExchange(config *Config, clientHello *clientHelloMsg, cert *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error) { - if ka.curve == nil { - return nil, nil, errors.New("missing ServerKeyExchange message") - } - priv, mx, my, err := elliptic.GenerateKey(ka.curve, config.rand()) - if err != nil { - return nil, nil, err - } - x, _ := ka.curve.ScalarMult(ka.x, ka.y, priv) - preMasterSecret := make([]byte, (ka.curve.Params().BitSize+7)>>3) - xBytes := x.Bytes() - copy(preMasterSecret[len(preMasterSecret)-len(xBytes):], xBytes) - - serialized := elliptic.Marshal(ka.curve, mx, my) - - ckx := new(clientKeyExchangeMsg) - ckx.ciphertext = make([]byte, 1+len(serialized)) - ckx.ciphertext[0] = byte(len(serialized)) - copy(ckx.ciphertext[1:], serialized) - - return preMasterSecret, ckx, nil -} === removed file 'src/launchpad.net/gwacl/fork/tls/parse-gnutls-cli-debug-log.py' --- src/launchpad.net/gwacl/fork/tls/parse-gnutls-cli-debug-log.py 2013-07-23 08:51:44 +0000 +++ src/launchpad.net/gwacl/fork/tls/parse-gnutls-cli-debug-log.py 1970-01-01 00:00:00 +0000 @@ -1,57 +0,0 @@ -# Copyright 2010 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -# This code is used to parse the debug log from gnutls-cli and generate a -# script of the handshake. This script is included in handshake_server_test.go. -# See the comments there for details. - -import sys - -blocks = [] - -READ = 1 -WRITE = 2 - -currentBlockType = 0 -currentBlock = [] -for line in sys.stdin.readlines(): - line = line[:-1] - if line.startswith("|<7>| WRITE: "): - if currentBlockType != WRITE: - if len(currentBlock) > 0: - blocks.append(currentBlock) - currentBlock = [] - currentBlockType = WRITE - elif line.startswith("|<7>| READ: "): - if currentBlockType != READ: - if len(currentBlock) > 0: - blocks.append(currentBlock) - currentBlock = [] - currentBlockType = READ - elif line.startswith("|<7>| 0"): - line = line[13:] - line = line.strip() - bs = line.split() - for b in bs: - currentBlock.append(int(b, 16)) - elif line.startswith("|<7>| RB-PEEK: Read 1 bytes"): - currentBlock = currentBlock[:-1] - -if len(currentBlock) > 0: - blocks.append(currentBlock) - -for block in blocks: - sys.stdout.write("\t{\n") - - i = 0 - for b in block: - if i % 8 == 0: - sys.stdout.write("\t\t") - sys.stdout.write("0x%02x," % b) - if i % 8 == 7: - sys.stdout.write("\n") - else: - sys.stdout.write(" ") - i += 1 - sys.stdout.write("\n\t},\n\n") === removed file 'src/launchpad.net/gwacl/fork/tls/prf.go' --- src/launchpad.net/gwacl/fork/tls/prf.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/tls/prf.go 1970-01-01 00:00:00 +0000 @@ -1,235 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tls - -import ( - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Split a premaster secret in two as specified in RFC 4346, section 5. -func splitPreMasterSecret(secret []byte) (s1, s2 []byte) { - s1 = secret[0 : (len(secret)+1)/2] - s2 = secret[len(secret)/2:] - return -} - -// pHash implements the P_hash function, as defined in RFC 4346, section 5. -func pHash(result, secret, seed []byte, hash func() hash.Hash) { - h := hmac.New(hash, secret) - h.Write(seed) - a := h.Sum(nil) - - j := 0 - for j < len(result) { - h.Reset() - h.Write(a) - h.Write(seed) - b := h.Sum(nil) - todo := len(b) - if j+todo > len(result) { - todo = len(result) - j - } - copy(result[j:j+todo], b) - j += todo - - h.Reset() - h.Write(a) - a = h.Sum(nil) - } -} - -// pRF10 implements the TLS 1.0 pseudo-random function, as defined in RFC 2246, section 5. -func pRF10(result, secret, label, seed []byte) { - hashSHA1 := sha1.New - hashMD5 := md5.New - - labelAndSeed := make([]byte, len(label)+len(seed)) - copy(labelAndSeed, label) - copy(labelAndSeed[len(label):], seed) - - s1, s2 := splitPreMasterSecret(secret) - pHash(result, s1, labelAndSeed, hashMD5) - result2 := make([]byte, len(result)) - pHash(result2, s2, labelAndSeed, hashSHA1) - - for i, b := range result2 { - result[i] ^= b - } -} - -// pRF30 implements the SSL 3.0 pseudo-random function, as defined in -// www.mozilla.org/projects/security/pki/nss/ssl/draft302.txt section 6. -func pRF30(result, secret, label, seed []byte) { - hashSHA1 := sha1.New() - hashMD5 := md5.New() - - done := 0 - i := 0 - // RFC5246 section 6.3 says that the largest PRF output needed is 128 - // bytes. Since no more ciphersuites will be added to SSLv3, this will - // remain true. Each iteration gives us 16 bytes so 10 iterations will - // be sufficient. - var b [11]byte - for done < len(result) { - for j := 0; j <= i; j++ { - b[j] = 'A' + byte(i) - } - - hashSHA1.Reset() - hashSHA1.Write(b[:i+1]) - hashSHA1.Write(secret) - hashSHA1.Write(seed) - digest := hashSHA1.Sum(nil) - - hashMD5.Reset() - hashMD5.Write(secret) - hashMD5.Write(digest) - - done += copy(result[done:], hashMD5.Sum(nil)) - i++ - } -} - -const ( - tlsRandomLength = 32 // Length of a random nonce in TLS 1.1. - masterSecretLength = 48 // Length of a master secret in TLS 1.1. - finishedVerifyLength = 12 // Length of verify_data in a Finished message. -) - -var masterSecretLabel = []byte("master secret") -var keyExpansionLabel = []byte("key expansion") -var clientFinishedLabel = []byte("client finished") -var serverFinishedLabel = []byte("server finished") - -// keysFromPreMasterSecret generates the connection keys from the pre master -// secret, given the lengths of the MAC key, cipher key and IV, as defined in -// RFC 2246, section 6.3. -func keysFromPreMasterSecret(version uint16, preMasterSecret, clientRandom, serverRandom []byte, macLen, keyLen, ivLen int) (masterSecret, clientMAC, serverMAC, clientKey, serverKey, clientIV, serverIV []byte) { - prf := pRF10 - if version == versionSSL30 { - prf = pRF30 - } - - var seed [tlsRandomLength * 2]byte - copy(seed[0:len(clientRandom)], clientRandom) - copy(seed[len(clientRandom):], serverRandom) - masterSecret = make([]byte, masterSecretLength) - prf(masterSecret, preMasterSecret, masterSecretLabel, seed[0:]) - - copy(seed[0:len(clientRandom)], serverRandom) - copy(seed[len(serverRandom):], clientRandom) - - n := 2*macLen + 2*keyLen + 2*ivLen - keyMaterial := make([]byte, n) - prf(keyMaterial, masterSecret, keyExpansionLabel, seed[0:]) - clientMAC = keyMaterial[:macLen] - keyMaterial = keyMaterial[macLen:] - serverMAC = keyMaterial[:macLen] - keyMaterial = keyMaterial[macLen:] - clientKey = keyMaterial[:keyLen] - keyMaterial = keyMaterial[keyLen:] - serverKey = keyMaterial[:keyLen] - keyMaterial = keyMaterial[keyLen:] - clientIV = keyMaterial[:ivLen] - keyMaterial = keyMaterial[ivLen:] - serverIV = keyMaterial[:ivLen] - return -} - -func newFinishedHash(version uint16) finishedHash { - return finishedHash{md5.New(), sha1.New(), md5.New(), sha1.New(), version} -} - -// A finishedHash calculates the hash of a set of handshake messages suitable -// for including in a Finished message. -type finishedHash struct { - clientMD5 hash.Hash - clientSHA1 hash.Hash - serverMD5 hash.Hash - serverSHA1 hash.Hash - version uint16 -} - -func (h finishedHash) Write(msg []byte) (n int, err error) { - h.clientMD5.Write(msg) - h.clientSHA1.Write(msg) - h.serverMD5.Write(msg) - h.serverSHA1.Write(msg) - return len(msg), nil -} - -// finishedSum10 calculates the contents of the verify_data member of a TLSv1 -// Finished message given the MD5 and SHA1 hashes of a set of handshake -// messages. -func finishedSum10(md5, sha1, label, masterSecret []byte) []byte { - seed := make([]byte, len(md5)+len(sha1)) - copy(seed, md5) - copy(seed[len(md5):], sha1) - out := make([]byte, finishedVerifyLength) - pRF10(out, masterSecret, label, seed) - return out -} - -// finishedSum30 calculates the contents of the verify_data member of a SSLv3 -// Finished message given the MD5 and SHA1 hashes of a set of handshake -// messages. -func finishedSum30(md5, sha1 hash.Hash, masterSecret []byte, magic [4]byte) []byte { - md5.Write(magic[:]) - md5.Write(masterSecret) - md5.Write(ssl30Pad1[:]) - md5Digest := md5.Sum(nil) - - md5.Reset() - md5.Write(masterSecret) - md5.Write(ssl30Pad2[:]) - md5.Write(md5Digest) - md5Digest = md5.Sum(nil) - - sha1.Write(magic[:]) - sha1.Write(masterSecret) - sha1.Write(ssl30Pad1[:40]) - sha1Digest := sha1.Sum(nil) - - sha1.Reset() - sha1.Write(masterSecret) - sha1.Write(ssl30Pad2[:40]) - sha1.Write(sha1Digest) - sha1Digest = sha1.Sum(nil) - - ret := make([]byte, len(md5Digest)+len(sha1Digest)) - copy(ret, md5Digest) - copy(ret[len(md5Digest):], sha1Digest) - return ret -} - -var ssl3ClientFinishedMagic = [4]byte{0x43, 0x4c, 0x4e, 0x54} -var ssl3ServerFinishedMagic = [4]byte{0x53, 0x52, 0x56, 0x52} - -// clientSum returns the contents of the verify_data member of a client's -// Finished message. -func (h finishedHash) clientSum(masterSecret []byte) []byte { - if h.version == versionSSL30 { - return finishedSum30(h.clientMD5, h.clientSHA1, masterSecret, ssl3ClientFinishedMagic) - } - - md5 := h.clientMD5.Sum(nil) - sha1 := h.clientSHA1.Sum(nil) - return finishedSum10(md5, sha1, clientFinishedLabel, masterSecret) -} - -// serverSum returns the contents of the verify_data member of a server's -// Finished message. -func (h finishedHash) serverSum(masterSecret []byte) []byte { - if h.version == versionSSL30 { - return finishedSum30(h.serverMD5, h.serverSHA1, masterSecret, ssl3ServerFinishedMagic) - } - - md5 := h.serverMD5.Sum(nil) - sha1 := h.serverSHA1.Sum(nil) - return finishedSum10(md5, sha1, serverFinishedLabel, masterSecret) -} === removed file 'src/launchpad.net/gwacl/fork/tls/tls.go' --- src/launchpad.net/gwacl/fork/tls/tls.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/fork/tls/tls.go 1970-01-01 00:00:00 +0000 @@ -1,187 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tls partially implements TLS 1.0, as specified in RFC 2246. -package tls - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" - "io/ioutil" - "net" - "strings" -) - -// Server returns a new TLS server side connection -// using conn as the underlying transport. -// The configuration config must be non-nil and must have -// at least one certificate. -func Server(conn net.Conn, config *Config) *Conn { - return &Conn{conn: conn, config: config} -} - -// Client returns a new TLS client side connection -// using conn as the underlying transport. -// Client interprets a nil configuration as equivalent to -// the zero configuration; see the documentation of Config -// for the defaults. -func Client(conn net.Conn, config *Config) *Conn { - return &Conn{conn: conn, config: config, isClient: true} -} - -// A listener implements a network listener (net.Listener) for TLS connections. -type listener struct { - net.Listener - config *Config -} - -// Accept waits for and returns the next incoming TLS connection. -// The returned connection c is a *tls.Conn. -func (l *listener) Accept() (c net.Conn, err error) { - c, err = l.Listener.Accept() - if err != nil { - return - } - c = Server(c, l.config) - return -} - -// NewListener creates a Listener which accepts connections from an inner -// Listener and wraps each connection with Server. -// The configuration config must be non-nil and must have -// at least one certificate. -func NewListener(inner net.Listener, config *Config) net.Listener { - l := new(listener) - l.Listener = inner - l.config = config - return l -} - -// Listen creates a TLS listener accepting connections on the -// given network address using net.Listen. -// The configuration config must be non-nil and must have -// at least one certificate. -func Listen(network, laddr string, config *Config) (net.Listener, error) { - if config == nil || len(config.Certificates) == 0 { - return nil, errors.New("tls.Listen: no certificates in configuration") - } - l, err := net.Listen(network, laddr) - if err != nil { - return nil, err - } - return NewListener(l, config), nil -} - -// Dial connects to the given network address using net.Dial -// and then initiates a TLS handshake, returning the resulting -// TLS connection. -// Dial interprets a nil configuration as equivalent to -// the zero configuration; see the documentation of Config -// for the defaults. -func Dial(network, addr string, config *Config) (*Conn, error) { - raddr := addr - c, err := net.Dial(network, raddr) - if err != nil { - return nil, err - } - - colonPos := strings.LastIndex(raddr, ":") - if colonPos == -1 { - colonPos = len(raddr) - } - hostname := raddr[:colonPos] - - if config == nil { - config = defaultConfig() - } - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - c := *config - c.ServerName = hostname - config = &c - } - conn := Client(c, config) - if err = conn.Handshake(); err != nil { - c.Close() - return nil, err - } - return conn, nil -} - -// LoadX509KeyPair reads and parses a public/private key pair from a pair of -// files. The files must contain PEM encoded data. -func LoadX509KeyPair(certFile, keyFile string) (cert Certificate, err error) { - certPEMBlock, err := ioutil.ReadFile(certFile) - if err != nil { - return - } - keyPEMBlock, err := ioutil.ReadFile(keyFile) - if err != nil { - return - } - return X509KeyPair(certPEMBlock, keyPEMBlock) -} - -// X509KeyPair parses a public/private key pair from a pair of -// PEM encoded data. -func X509KeyPair(certPEMBlock, keyPEMBlock []byte) (cert Certificate, err error) { - var certDERBlock *pem.Block - for { - certDERBlock, certPEMBlock = pem.Decode(certPEMBlock) - if certDERBlock == nil { - break - } - if certDERBlock.Type == "CERTIFICATE" { - cert.Certificate = append(cert.Certificate, certDERBlock.Bytes) - } - } - - if len(cert.Certificate) == 0 { - err = errors.New("crypto/tls: failed to parse certificate PEM data") - return - } - - keyDERBlock, _ := pem.Decode(keyPEMBlock) - if keyDERBlock == nil { - err = errors.New("crypto/tls: failed to parse key PEM data") - return - } - - // OpenSSL 0.9.8 generates PKCS#1 private keys by default, while - // OpenSSL 1.0.0 generates PKCS#8 keys. We try both. - var key *rsa.PrivateKey - if key, err = x509.ParsePKCS1PrivateKey(keyDERBlock.Bytes); err != nil { - var privKey interface{} - if privKey, err = x509.ParsePKCS8PrivateKey(keyDERBlock.Bytes); err != nil { - err = errors.New("crypto/tls: failed to parse key: " + err.Error()) - return - } - - var ok bool - if key, ok = privKey.(*rsa.PrivateKey); !ok { - err = errors.New("crypto/tls: found non-RSA private key in PKCS#8 wrapping") - return - } - } - - cert.PrivateKey = key - - // We don't need to parse the public key for TLS, but we so do anyway - // to check that it looks sane and matches the private key. - x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) - if err != nil { - return - } - - if x509Cert.PublicKeyAlgorithm != x509.RSA || x509Cert.PublicKey.(*rsa.PublicKey).N.Cmp(key.PublicKey.N) != 0 { - err = errors.New("crypto/tls: private key does not match public key") - return - } - - return -} === removed file 'src/launchpad.net/gwacl/helpers_apiobjects_test.go' --- src/launchpad.net/gwacl/helpers_apiobjects_test.go 2015-10-23 18:28:45 +0000 +++ src/launchpad.net/gwacl/helpers_apiobjects_test.go 1970-01-01 00:00:00 +0000 @@ -1,194 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). -// -// Test helpers to fake objects that go into, or come out of, the Azure API. - -package gwacl - -func populateEndpoint(endpoint *InputEndpoint) *InputEndpoint { - if endpoint.LoadBalancedEndpointSetName == "" { - endpoint.LoadBalancedEndpointSetName = MakeRandomString(10) - } - if endpoint.LocalPort == 0 { - endpoint.LocalPort = int(MakeRandomPort()) - } - if endpoint.Name == "" { - endpoint.Name = MakeRandomString(10) - } - if endpoint.Port == 0 { - endpoint.Port = int(MakeRandomPort()) - } - if endpoint.LoadBalancerProbe == nil { - endpoint.LoadBalancerProbe = &LoadBalancerProbe{} - } - if endpoint.LoadBalancerProbe.Path == "" { - endpoint.LoadBalancerProbe.Path = MakeRandomString(10) - } - if endpoint.LoadBalancerProbe.Port == 0 { - endpoint.LoadBalancerProbe.Port = int(MakeRandomPort()) - } - if endpoint.LoadBalancerProbe.Protocol == "" { - endpoint.LoadBalancerProbe.Protocol = MakeRandomString(10) - } - if endpoint.Protocol == "" { - endpoint.Protocol = MakeRandomString(10) - } - if endpoint.VIP == "" { - endpoint.VIP = MakeRandomString(10) - } - return endpoint -} - -func makeLinuxProvisioningConfiguration() *ConfigurationSet { - hostname := MakeRandomString(10) - username := MakeRandomString(10) - password := MakeRandomString(10) - customdata := MakeRandomString(10) - disableSSH := BoolToString(MakeRandomBool()) - return NewLinuxProvisioningConfigurationSet(hostname, username, password, customdata, disableSSH) -} - -func makeWindowsProvisioningConfiguration() *ConfigurationSet { - ComputerName := MakeRandomString(10) - Password := MakeRandomString(10) - AdminUsername := MakeRandomString(10) - TimeZone := MakeRandomString(10) - AdditionalUnattendContent := MakeRandomString(10) - CustomData := MakeRandomString(10) - EnableAutomaticUpdates := BoolToString(MakeRandomBool()) - - StoreLocation := MakeRandomString(10) - StoreName := MakeRandomString(10) - Thumbprint := MakeRandomString(10) - CertSettings := []CertificateSetting{CertificateSetting{StoreLocation, StoreName, Thumbprint}} - - WinRMListener := &WinRMListener{WinRMProtocolHTTP, Thumbprint} - - return NewWindowsProvisioningConfigurationSet(ComputerName, Password, EnableAutomaticUpdates, TimeZone, CertSettings, WinRMListener, AdminUsername, AdditionalUnattendContent, CustomData) -} - -func makeOSVirtualHardDisk() *OSVirtualHardDisk { - HostCaching := BoolToString(MakeRandomBool()) - DiskLabel := MakeRandomString(10) - DiskName := MakeRandomString(10) - MediaLink := MakeRandomString(10) - SourceImageName := MakeRandomString(10) - - return &OSVirtualHardDisk{ - HostCaching: HostCaching, - DiskLabel: DiskLabel, - DiskName: DiskName, - MediaLink: MediaLink, - SourceImageName: SourceImageName} -} - -func makeLinuxRole() *Role { - RoleSize := "ExtraSmall" - RoleName := MakeRandomString(10) - RoleType := "PersistentVMRole" - config := makeLinuxProvisioningConfiguration() - configset := []ConfigurationSet{*config} - - return &Role{ - RoleSize: RoleSize, - RoleName: RoleName, - RoleType: RoleType, - ConfigurationSets: configset} -} - -func makeWindowsRole() *Role { - RoleSize := "ExtraSmall" - RoleName := MakeRandomString(10) - RoleType := "PersistentVMRole" - config := makeWindowsProvisioningConfiguration() - configset := []ConfigurationSet{*config} - provisionGuestAgent := BoolToString(MakeRandomBool()) - resourceReference := makeWindowsResourceExtensionReference() - - return &Role{ - RoleSize: RoleSize, - RoleName: RoleName, - RoleType: RoleType, - ConfigurationSets: configset, - ProvisionGuestAgent: provisionGuestAgent, - ResourceExtensionReferences: &[]ResourceExtensionReference{*resourceReference}, - } -} - -func makeWindowsResourceExtensionReference() *ResourceExtensionReference { - refName := MakeRandomString(10) - publisher := MakeRandomString(10) - name := MakeRandomString(10) - version := MakeRandomString(10) - state := MakeRandomString(10) - - param1 := makeWindowsResourceExtensionParameter(true) - param2 := makeWindowsResourceExtensionParameter(false) - - return NewResourceExtensionReference(refName, publisher, name, version, state, - []ResourceExtensionParameter{*param1, *param2}) -} - -func makeWindowsResourceExtensionParameter(private bool) *ResourceExtensionParameter { - key := MakeRandomString(10) - value := MakeRandomString(10) - var paramType ResourceExtensionParameterType - if private { - paramType = ResourceExtensionParameterTypePrivate - } else { - paramType = ResourceExtensionParameterTypePublic - } - return NewResourceExtensionParameter(key, value, paramType) -} - -func makeDnsServer() *DnsServer { - name := MakeRandomString(10) - address := MakeRandomString(10) - - return &DnsServer{ - Name: name, - Address: address} -} - -func makeDeployment() *Deployment { - Name := MakeRandomString(10) - DeploymentSlot := "Staging" - Label := MakeRandomString(10) - VirtualNetworkName := MakeRandomString(10) - role := makeLinuxRole() - RoleList := []Role{*role} - Dns := []DnsServer{*makeDnsServer()} - - return &Deployment{ - XMLNS: XMLNS, - XMLNS_I: XMLNS_I, - Name: Name, - DeploymentSlot: DeploymentSlot, - Label: Label, - RoleList: RoleList, - VirtualNetworkName: VirtualNetworkName, - DNS: Dns, - } -} - -func makeCreateStorageServiceInput() *CreateStorageServiceInput { - ServiceName := MakeRandomString(10) - Description := MakeRandomString(10) - Label := MakeRandomString(10) - AffinityGroup := MakeRandomString(10) - Location := MakeRandomString(10) - GeoReplicationEnabled := BoolToString(MakeRandomBool()) - ExtendedProperties := []ExtendedProperty{{ - Name: MakeRandomString(10), - Value: MakeRandomString(10)}} - - return &CreateStorageServiceInput{ - XMLNS: XMLNS, - ServiceName: ServiceName, - Description: Description, - Label: Label, - AffinityGroup: AffinityGroup, - Location: Location, - GeoReplicationEnabled: GeoReplicationEnabled, - ExtendedProperties: ExtendedProperties} -} === removed file 'src/launchpad.net/gwacl/helpers_factory_test.go' --- src/launchpad.net/gwacl/helpers_factory_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/helpers_factory_test.go 1970-01-01 00:00:00 +0000 @@ -1,47 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). -// -// Factories for various types of objects that tests need to create. - -package gwacl - -// This should be refactored at some point, it does not belong in here. -// Perhaps we can add it to gocheck, or start a testtools-like project. -const chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890abcdefghijklmnopqrstuvwxyz" - -// MakeRandomString returns an arbitrary string of alphanumerical characters. -// TODO: This isn't really a random string, more of a random identifier. -func MakeRandomString(length int) string { - return string(MakeRandomByteSlice(length)) -} - -// MakeRandomString returns a slice of arbitrary bytes, all corresponding to -// alphanumerical characters in ASCII. -// TODO: This isn't really very random. Good tests need zero and "high" values. -func MakeRandomByteSlice(length int) []byte { - dest := make([]byte, length) - for i := range dest { - num := random.Intn(len(chars)) - randChar := chars[num] - dest[i] = randChar - } - return dest -} - -// MakeRandomBool returns an arbitrary bool value (true or false). -func MakeRandomBool() bool { - v := random.Intn(2) - if v == 0 { - return false - } - return true -} - -// MakeRandomPort returns a port number between 1 and 65535 inclusive. -func MakeRandomPort() uint16 { - port := uint16(random.Intn(1 << 16)) - if port == 0 { - return MakeRandomPort() - } - return port -} === removed file 'src/launchpad.net/gwacl/helpers_http_test.go' --- src/launchpad.net/gwacl/helpers_http_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/helpers_http_test.go 1970-01-01 00:00:00 +0000 @@ -1,59 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). -// -// Test helpers for dealing with http requests through the http package. - -package gwacl - -import ( - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" -) - -// TestTransport is used as an http.Client.Transport for testing. It records -// the latest request, and returns a predetermined Response and error. -type TestTransport struct { - Request *http.Request - Response *http.Response - Error error -} - -// TestTransport implements the http.RoundTripper interface. -var _ http.RoundTripper = &TestTransport{} - -func (t *TestTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { - t.Request = req - return t.Response, t.Error -} - -// makeFakeCreatedResponse returns an HTTP response with the Created status. -func makeFakeCreatedResponse() *http.Response { - return &http.Response{ - Status: fmt.Sprintf("%d", http.StatusCreated), - StatusCode: http.StatusCreated, - Body: Empty, - } -} - -// makeResponseBody creates an http response body containing the given string. -// Use this to initialize an http.Response.Body with a given string, without -// having to care about the type details. -func makeResponseBody(content string) io.ReadCloser { - return ioutil.NopCloser(strings.NewReader(content)) -} - -// Convenience factory to create a StorageContext with a random name and -// random base64-encoded key. -func makeStorageContext(transport http.RoundTripper) *StorageContext { - context := &StorageContext{ - Account: MakeRandomString(10), - Key: base64.StdEncoding.EncodeToString(MakeRandomByteSlice(10)), - AzureEndpoint: APIEndpoint("http://" + MakeRandomString(5) + ".example.com/"), - } - context.client = &http.Client{Transport: transport} - return context -} === removed file 'src/launchpad.net/gwacl/helpers_misc_test.go' --- src/launchpad.net/gwacl/helpers_misc_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/helpers_misc_test.go 1970-01-01 00:00:00 +0000 @@ -1,32 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "encoding/base64" - "fmt" - "io" -) - -// b64 is shorthand for base64-encoding a string. -func b64(s string) string { - return base64.StdEncoding.EncodeToString([]byte(s)) -} - -// A Reader and ReadCloser that EOFs immediately. -var Empty io.ReadCloser = makeResponseBody("") - -// BoolToString represents a boolean value as a string ("true" or "false"). -func BoolToString(v bool) string { - return fmt.Sprintf("%t", v) -} - -// StringToBool parses a string containing a boolean (case-insensitive). -func StringToBool(v string) (b bool) { - items, err := fmt.Sscanf(v, "%t", &b) - if err != nil || items != 1 { - panic(fmt.Errorf("can't convert '%s' to a bool", v)) - } - return -} === removed file 'src/launchpad.net/gwacl/httperror.go' --- src/launchpad.net/gwacl/httperror.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/httperror.go 1970-01-01 00:00:00 +0000 @@ -1,129 +0,0 @@ -package gwacl - -import ( - "encoding/xml" - "errors" - "fmt" - "net/http" -) - -// HTTPError is an extended version of the standard "error" interface. It -// adds an HTTP status code. -type HTTPError interface { - error - StatusCode() int -} - -// HTTPStatus is an HTTP status code. -type HTTPStatus int - -// Status returns the HTTP status code as an int. -func (s HTTPStatus) StatusCode() int { - return int(s) -} - -// AzureError is an HTTPError returned by the Azure API. It contains an -// error message, and an Azure-defined error code. -type AzureError struct { - error `xml:"-"` - HTTPStatus `xml:"-"` - Code string `xml:"Code"` - Message string `xml:"Message"` -} - -// *AzureError implements HTTPError. -var _ HTTPError = &AzureError{} - -func (e *AzureError) Error() string { - description := e.error.Error() - status := e.StatusCode() - name := http.StatusText(status) - return fmt.Sprintf("%s: %s - %s (http code %d: %s)", description, e.Code, e.Message, status, name) -} - -// ServerError is a generic HTTPError, without any further helpful information -// from the server that we can count on. -type ServerError struct { - error - HTTPStatus -} - -// *ServerError implements HTTPError. -var _ HTTPError = &ServerError{} - -func (e *ServerError) Error() string { - description := e.error.Error() - status := e.StatusCode() - name := http.StatusText(status) - return fmt.Sprintf("%s (%d: %s)", description, status, name) -} - -// newHTTPError returns the appropriate HTTPError implementation for a given -// HTTP response. -// It takes a status code and response body, rather than just a standard -// http.Response object. -func newHTTPError(status int, body []byte, description string) HTTPError { - httpStatus := HTTPStatus(status) - baseErr := errors.New(description) - azureError := AzureError{error: baseErr, HTTPStatus: httpStatus} - err := xml.Unmarshal(body, &azureError) - if err != nil { - // It's OK if the response body wasn't actually XML... That just means - // it wasn't a proper AzureError. We have another error type for that. - return &ServerError{error: baseErr, HTTPStatus: httpStatus} - } - return &azureError -} - -// newAzureErrorFromOperation composes an HTTPError based on an Operation -// struct, i.e. the result of an asynchronous operation. -func newAzureErrorFromOperation(outcome *Operation) *AzureError { - if outcome.Status != FailedOperationStatus { - msg := fmt.Errorf("interpreting Azure %s as an asynchronous failure", outcome.Status) - panic(msg) - } - return &AzureError{ - error: errors.New("asynchronous operation failed"), - HTTPStatus: HTTPStatus(outcome.HTTPStatusCode), - Code: outcome.ErrorCode, - Message: outcome.ErrorMessage, - } -} - -// extendError returns an error whos description is the concatenation of -// the given message plus the error string from the original error. -// It preserves the value of the error types it knows about (currently only -// ServerError). -// -// The main purpose of this method is to offer a unified way to -// extend the information present in errors while still not losing the -// additioning information present on specific errors gwacl knows out to extend -// in a more meaningful way. -func extendError(err error, message string) error { - switch err := err.(type) { - case *ServerError: - extendedError := *err - extendedError.error = fmt.Errorf(message+"%v", err.error) - return &extendedError - case *AzureError: - extendedError := *err - extendedError.error = fmt.Errorf(message+"%v", err.error) - return &extendedError - default: - return fmt.Errorf(message+"%v", err) - } - // This code cannot be reached but Go insists on having a return or a panic - // statement at the end of this method. Sigh. - panic("invalid extendError state!") -} - -// IsNotFoundError returns whether or not the given error is an error (as -// returned by a gwacl method) which corresponds to a 'Not Found' error -// returned by Windows Azure. -func IsNotFoundError(err error) bool { - httpError, ok := err.(HTTPError) - if ok { - return httpError.StatusCode() == http.StatusNotFound - } - return false -} === removed file 'src/launchpad.net/gwacl/httperror_test.go' --- src/launchpad.net/gwacl/httperror_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/httperror_test.go 1970-01-01 00:00:00 +0000 @@ -1,148 +0,0 @@ -package gwacl - -import ( - "errors" - "fmt" - . "launchpad.net/gocheck" - "net/http" -) - -type httpErrorSuite struct{} - -var _ = Suite(&httpErrorSuite{}) - -func (suite *httpErrorSuite) TestNewHTTPErrorParsesAzureError(c *C) { - description := "upload failed" - status := 415 - code := "CannotUpload" - message := "Unknown data format" - xml := fmt.Sprintf(` - %s - %s - `, - code, message) - - httpErr := newHTTPError(status, []byte(xml), description) - - azureErr, ok := httpErr.(*AzureError) - c.Assert(ok, Equals, true) - c.Check(azureErr.StatusCode(), Equals, status) - c.Check(azureErr.Code, Equals, code) - c.Check(azureErr.Message, Equals, message) - c.Check(httpErr, ErrorMatches, ".*"+description+".*") - c.Check(httpErr, ErrorMatches, ".*415: Unsupported Media Type.*") -} - -func (suite *httpErrorSuite) TestNewHTTPErrorResortsToServerError(c *C) { - description := "could not talk to server" - status := 505 - - httpErr := newHTTPError(status, []byte{}, description) - - _, ok := httpErr.(*ServerError) - c.Assert(ok, Equals, true) - c.Check(httpErr.StatusCode(), Equals, status) - c.Check(httpErr, ErrorMatches, ".*505: HTTP Version Not Supported.*") - c.Check(httpErr, ErrorMatches, ".*"+description+".*") -} - -func (suite *httpErrorSuite) TestAzureErrorComposesError(c *C) { - description := "something failed" - status := 410 - httpErr := AzureError{ - error: errors.New(description), - HTTPStatus: HTTPStatus(status), - Code: "MissingError", - Message: "Your object has disappeared", - } - c.Check(httpErr.Error(), Equals, "something failed: MissingError - Your object has disappeared (http code 410: Gone)") -} - -func (suite *httpErrorSuite) TestServerErrorComposesError(c *C) { - description := "something failed" - status := 501 - httpErr := ServerError{ - error: errors.New(description), - HTTPStatus: HTTPStatus(status), - } - c.Check(httpErr.Error(), Equals, "something failed (501: Not Implemented)") -} - -func (suite *httpErrorSuite) TestNewAzureErrorFromOperation(c *C) { - status := http.StatusConflict - code := MakeRandomString(7) - message := MakeRandomString(20) - // Test body copied from Azure documentation, mutatis mutandi. - body := fmt.Sprintf(` - - - %s - Failed - %d - - %s - %s - - - `, - MakeRandomString(5), status, code, message) - operation := Operation{} - operation.Deserialize([]byte(body)) - - err := newAzureErrorFromOperation(&operation) - c.Check(err.HTTPStatus, Equals, HTTPStatus(status)) - c.Check(err.Code, Equals, code) - c.Check(err.Message, Equals, message) -} - -func (suite *httpErrorSuite) TestExtendErrorExtendsGenericError(c *C) { - errorString := "an-error" - error := fmt.Errorf(errorString) - additionalErrorMsg := "additional message" - newError := extendError(error, additionalErrorMsg) - c.Check(newError.Error(), Equals, fmt.Sprintf("%s%s", additionalErrorMsg, error.Error())) -} - -func (suite *httpErrorSuite) TestExtendErrorExtendsServerError(c *C) { - err := &ServerError{ - error: errors.New("could not talk to server"), - HTTPStatus: HTTPStatus(http.StatusGatewayTimeout), - } - additionalErrorMsg := "additional message: " - newError := extendError(err, additionalErrorMsg) - newServerError, ok := newError.(*ServerError) - c.Assert(ok, Equals, true) - c.Check(newError.Error(), Equals, additionalErrorMsg+err.Error()) - c.Check(newServerError.HTTPStatus, Equals, err.HTTPStatus) -} - -func (suite *httpErrorSuite) TestExtendErrorExtendsAzureError(c *C) { - err := &AzureError{ - error: errors.New("could not talk to server"), - HTTPStatus: HTTPStatus(http.StatusGatewayTimeout), - } - additionalErrorMsg := "additional message: " - newError := extendError(err, additionalErrorMsg) - newAzureError, ok := newError.(*AzureError) - c.Assert(ok, Equals, true) - c.Check(newError.Error(), Equals, additionalErrorMsg+err.Error()) - c.Check(newAzureError.HTTPStatus, Equals, err.HTTPStatus) -} - -func (suite *httpErrorSuite) TestIsNotFound(c *C) { - var testValues = []struct { - err error - expectedResult bool - }{ - {fmt.Errorf("generic error"), false}, - {&AzureError{HTTPStatus: HTTPStatus(http.StatusOK)}, false}, - {&AzureError{HTTPStatus: HTTPStatus(http.StatusNotFound)}, true}, - {&AzureError{HTTPStatus: HTTPStatus(http.StatusInternalServerError)}, false}, - {&ServerError{HTTPStatus: HTTPStatus(http.StatusOK)}, false}, - {&ServerError{HTTPStatus: HTTPStatus(http.StatusNotFound)}, true}, - {&ServerError{HTTPStatus: HTTPStatus(http.StatusInternalServerError)}, false}, - } - for _, test := range testValues { - c.Check(IsNotFoundError(test.err), Equals, test.expectedResult) - } -} === removed directory 'src/launchpad.net/gwacl/logging' === removed file 'src/launchpad.net/gwacl/logging/logging.go' --- src/launchpad.net/gwacl/logging/logging.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/logging/logging.go 1970-01-01 00:00:00 +0000 @@ -1,83 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package logging - -import ( - "log" - "os" -) - -const ( - DEBUG = 10 * (iota + 1) - INFO - WARN - ERROR -) - -var level = WARN - -func init() { - setLevel(os.Getenv("LOGLEVEL")) -} - -func setLevel(levelName string) { - switch levelName { - case "DEBUG": - level = DEBUG - case "INFO": - level = INFO - case "WARN": - level = WARN - case "ERROR": - level = ERROR - } -} - -func Debug(args ...interface{}) { - if level <= DEBUG { - log.Println(args...) - } -} - -func Debugf(format string, args ...interface{}) { - if level <= DEBUG { - log.Printf(format, args...) - } -} - -func Info(args ...interface{}) { - if level <= INFO { - log.Println(args...) - } -} - -func Infof(format string, args ...interface{}) { - if level <= INFO { - log.Printf(format, args...) - } -} - -func Warn(args ...interface{}) { - if level <= WARN { - log.Println(args...) - } -} - -func Warnf(format string, args ...interface{}) { - if level <= WARN { - log.Printf(format, args...) - } -} - -func Error(args ...interface{}) { - if level <= ERROR { - log.Println(args...) - } -} - -func Errorf(format string, args ...interface{}) { - if level <= ERROR { - log.Printf(format, args...) - } -} === removed file 'src/launchpad.net/gwacl/logging/logging_test.go' --- src/launchpad.net/gwacl/logging/logging_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/logging/logging_test.go 1970-01-01 00:00:00 +0000 @@ -1,45 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package logging - -import ( - . "launchpad.net/gocheck" - "testing" -) - -var originalLevel = level - -func restoreLevel() { - level = originalLevel -} - -type testLogging struct{} - -var _ = Suite(&testLogging{}) - -func (suite *testLogging) TestSetLevel(c *C) { - defer restoreLevel() - // The names of the logging constants are recognised arguments to - // setLevel(). - level = -1 - setLevel("DEBUG") - c.Check(level, Equals, DEBUG) - setLevel("INFO") - c.Check(level, Equals, INFO) - setLevel("WARN") - c.Check(level, Equals, WARN) - setLevel("ERROR") - c.Check(level, Equals, ERROR) - // Unrecognised arguments are ignored. - level = -1 - setLevel("FOOBAR") - c.Check(level, Equals, -1) - setLevel("123") - c.Check(level, Equals, -1) -} - -// Master loader for all tests. -func Test(t *testing.T) { - TestingT(t) -} === removed file 'src/launchpad.net/gwacl/management.go' --- src/launchpad.net/gwacl/management.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/management.go 1970-01-01 00:00:00 +0000 @@ -1,430 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "fmt" - "sort" - "strings" -) - -type ListInstancesRequest struct { - ServiceName string -} - -// ListInstances returns a slice of all instances for all deployments for the -// given hosted service name. -func (api *ManagementAPI) ListInstances(request *ListInstancesRequest) ([]RoleInstance, error) { - instances := []RoleInstance{} - properties, err := api.GetHostedServiceProperties(request.ServiceName, true) - if err != nil { - return nil, err - } - for _, deployment := range properties.Deployments { - instances = append(instances, deployment.RoleInstanceList...) - } - return instances, nil -} - -// ListAllDeploymentsRequest is a parameter object for ListAllDeployments. -type ListAllDeploymentsRequest struct { - // ServiceName restricts the listing to the given service. - ServiceName string -} - -// ListAllDeployments returns a slice containing all deployments that match -// the request. -func (api *ManagementAPI) ListAllDeployments(request *ListAllDeploymentsRequest) ([]Deployment, error) { - properties, err := api.GetHostedServiceProperties(request.ServiceName, true) - if err != nil { - return nil, err - } - return properties.Deployments, nil -} - -// ListDeploymentsRequest is a parameter object for ListDeployments. -type ListDeploymentsRequest struct { - // ServiceName restricts the listing to the given service. - ServiceName string - // DeploymentNames is a set (its value type is ignored) that restricts the - // listing to those deployments which it contains. - DeploymentNames []string -} - -// ListDeployments returns a slice containing specific deployments, insofar -// as they match the request. -func (api *ManagementAPI) ListDeployments(request *ListDeploymentsRequest) ([]Deployment, error) { - properties, err := api.GetHostedServiceProperties(request.ServiceName, true) - if err != nil { - return nil, err - } - // Filter the deployment list according to the given names. - filter := make(map[string]bool) - for _, name := range request.DeploymentNames { - filter[name] = true - } - deployments := []Deployment{} - for _, deployment := range properties.Deployments { - if _, ok := filter[deployment.Name]; ok { - deployments = append(deployments, deployment) - } - } - return deployments, nil -} - -type ListSpecificHostedServicesRequest struct { - ServiceNames []string -} - -// ListSpecificHostedServices returns a slice containing specific -// HostedServiceDescriptor objects, insofar as they match the request. -func (api *ManagementAPI) ListSpecificHostedServices(request *ListSpecificHostedServicesRequest) ([]HostedServiceDescriptor, error) { - allServices, err := api.ListHostedServices() - if err != nil { - return nil, err - } - // Filter the service list according to the given names. - filter := make(map[string]bool) - for _, name := range request.ServiceNames { - filter[name] = true - } - services := []HostedServiceDescriptor{} - for _, service := range allServices { - if _, ok := filter[service.ServiceName]; ok { - services = append(services, service) - } - } - return services, nil -} - -type ListPrefixedHostedServicesRequest struct { - ServiceNamePrefix string -} - -// ListPrefixedHostedServices returns a slice containing specific -// HostedServiceDescriptor objects, insofar as they match the request. -func (api *ManagementAPI) ListPrefixedHostedServices(request *ListPrefixedHostedServicesRequest) ([]HostedServiceDescriptor, error) { - services, err := api.ListHostedServices() - if err != nil { - return nil, err - } - resServices := []HostedServiceDescriptor{} - for _, service := range services { - if strings.HasPrefix(service.ServiceName, request.ServiceNamePrefix) { - resServices = append(resServices, service) - } - } - services = resServices - return services, nil -} - -type DestroyDeploymentRequest struct { - ServiceName string - DeploymentName string -} - -// DestroyDeployment brings down all resources within a deployment - running -// instances, disks, etc. - and deletes the deployment itself. -func (api *ManagementAPI) DestroyDeployment(request *DestroyDeploymentRequest) error { - deployment, err := api.GetDeployment(&GetDeploymentRequest{ - ServiceName: request.ServiceName, - DeploymentName: request.DeploymentName, - }) - if err != nil { - if IsNotFoundError(err) { - return nil - } - return err - } - // 1. Get the list of the VM disks. - diskNameMap := make(map[string]bool) - for _, role := range deployment.RoleList { - diskNameMap[role.OSVirtualHardDisk.DiskName] = true - } - // 2. Delete deployment. This will delete all the role instances inside - // this deployment as a side effect. - err = api.DeleteDeployment(request.ServiceName, request.DeploymentName) - if err != nil && !IsNotFoundError(err) { - return err - } - // Sort the disk names to aid testing. - diskNames := []string{} - for diskName := range diskNameMap { - diskNames = append(diskNames, diskName) - } - sort.Strings(diskNames) - // 3. Delete the disks. - for _, diskName := range diskNames { - err = api.DeleteDisk(&DeleteDiskRequest{ - DiskName: diskName, - DeleteBlob: true}) - if err != nil && !IsNotFoundError(err) { - return err - } - } - // Done. - return nil -} - -type DestroyHostedServiceRequest struct { - ServiceName string -} - -// DestroyHostedService destroys all of the hosted service's contained -// deployments then deletes the hosted service itself. -func (api *ManagementAPI) DestroyHostedService(request *DestroyHostedServiceRequest) error { - // 1. Get properties. - properties, err := api.GetHostedServiceProperties(request.ServiceName, true) - if err != nil { - if IsNotFoundError(err) { - return nil - } - return err - } - // 2. Delete deployments. - for _, deployment := range properties.Deployments { - err := api.DestroyDeployment(&DestroyDeploymentRequest{ - ServiceName: request.ServiceName, - DeploymentName: deployment.Name, - }) - if err != nil { - return err - } - } - // 3. Delete service. - err = api.DeleteHostedService(request.ServiceName) - if err != nil && !IsNotFoundError(err) { - return err - } - // Done. - return nil -} - -func (api *ManagementAPI) AddVirtualNetworkSite(site *VirtualNetworkSite) error { - // Obtain the current network config, which we will then modify. - networkConfig, err := api.GetNetworkConfiguration() - if err != nil { - return err - } - if networkConfig == nil { - // There's no config yet. - networkConfig = &NetworkConfiguration{XMLNS: XMLNS_NC} - } - if networkConfig.VirtualNetworkSites == nil { - networkConfig.VirtualNetworkSites = &[]VirtualNetworkSite{} - } - // Check to see if this network already exists. - for _, existingSite := range *networkConfig.VirtualNetworkSites { - if existingSite.Name == site.Name { - // Network already defined. - return fmt.Errorf("could not add virtual network: %q already exists", site.Name) - } - } - // Add the network to the configuration. - virtualNetworkSites := append(*networkConfig.VirtualNetworkSites, *site) - networkConfig.VirtualNetworkSites = &virtualNetworkSites - // Put it back up to Azure. There's a race here... - return api.SetNetworkConfiguration(networkConfig) -} - -func (api *ManagementAPI) RemoveVirtualNetworkSite(siteName string) error { - // Obtain the current network config, which we will then modify. - networkConfig, err := api.GetNetworkConfiguration() - if err != nil { - return err - } - if networkConfig == nil || networkConfig.VirtualNetworkSites == nil { - // There's no config, nothing to do. - return nil - } - // Remove all references to the specified virtual network site name. - virtualNetworkSites := []VirtualNetworkSite{} - for _, existingSite := range *networkConfig.VirtualNetworkSites { - if existingSite.Name != siteName { - virtualNetworkSites = append(virtualNetworkSites, existingSite) - } - } - if len(virtualNetworkSites) < len(*networkConfig.VirtualNetworkSites) { - // Put it back up to Azure. There's a race here... - networkConfig.VirtualNetworkSites = &virtualNetworkSites - return api.SetNetworkConfiguration(networkConfig) - } - return nil -} - -type ListRoleEndpointsRequest struct { - ServiceName string - DeploymentName string - RoleName string -} - -// ListRoleEndpoints lists the open endpoints for the named service/deployment/role name. -func (api *ManagementAPI) ListRoleEndpoints(request *ListRoleEndpointsRequest) ([]InputEndpoint, error) { - var err error - vmRole, err := api.GetRole(&GetRoleRequest{ - ServiceName: request.ServiceName, - DeploymentName: request.DeploymentName, - RoleName: request.RoleName}) - - if err != nil { - return nil, err - } - - for i, configSet := range vmRole.ConfigurationSets { - if configSet.ConfigurationSetType == CONFIG_SET_NETWORK { - endpointsP := vmRole.ConfigurationSets[i].InputEndpoints - if endpointsP != nil { - return *endpointsP, nil - } - } - } - return []InputEndpoint{}, nil -} - -type AddRoleEndpointsRequest struct { - ServiceName string - DeploymentName string - RoleName string - InputEndpoints []InputEndpoint -} - -// AddRoleEndpoints appends the supplied endpoints to the existing endpoints -// for the named service/deployment/role name. Note that the Azure API -// leaves this open to a race condition between fetching and updating the role. -func (api *ManagementAPI) AddRoleEndpoints(request *AddRoleEndpointsRequest) error { - var err error - vmRole, err := api.GetRole(&GetRoleRequest{ - ServiceName: request.ServiceName, - DeploymentName: request.DeploymentName, - RoleName: request.RoleName}) - - if err != nil { - return err - } - - for i, configSet := range vmRole.ConfigurationSets { - // TODO: Is NetworkConfiguration always present? - if configSet.ConfigurationSetType == CONFIG_SET_NETWORK { - endpointsP := vmRole.ConfigurationSets[i].InputEndpoints - if endpointsP == nil { - // No endpoints set at all, initialise it to be empty. - vmRole.ConfigurationSets[i].InputEndpoints = &[]InputEndpoint{} - } - // Append to existing endpoints. - // TODO: Check clashing endpoint. LocalPort/Name/Port unique? - endpoints := append( - *vmRole.ConfigurationSets[i].InputEndpoints, - request.InputEndpoints...) - vmRole.ConfigurationSets[i].InputEndpoints = &endpoints - - break // Only one NetworkConfiguration so exit loop now. - } - } - - // Enjoy this race condition. - err = api.UpdateRole(&UpdateRoleRequest{ - ServiceName: request.ServiceName, - DeploymentName: request.DeploymentName, - RoleName: request.RoleName, - PersistentVMRole: vmRole}) - if err != nil { - return err - } - return nil -} - -// CompareInputEndpoints attempts to compare two InputEndpoint objects in a -// way that's congruent with how Windows's Azure considers them. The name is -// always ignored, as is LoadBalancerProbe. When LoadBalancedEndpointSetName -// is set (not the empty string), all the fields - LocalPort, Port, Protocol, -// VIP and LoadBalancedEndpointSetName - are used for the comparison. When -// LoadBalancedEndpointSetName is the empty string, all except LocalPort - -// effectively Port, Protocol and VIP - are used for comparison. -func CompareInputEndpoints(a, b *InputEndpoint) bool { - if a.LoadBalancedEndpointSetName == "" { - return a.Port == b.Port && a.Protocol == b.Protocol && a.VIP == b.VIP - } else { - return (a.LoadBalancedEndpointSetName == b.LoadBalancedEndpointSetName && - a.LocalPort == b.LocalPort && a.Port == b.Port && - a.Protocol == b.Protocol && a.VIP == b.VIP) - } -} - -type RemoveRoleEndpointsRequest struct { - ServiceName string - DeploymentName string - RoleName string - InputEndpoints []InputEndpoint -} - -// RemoveRoleEndpoints attempts to remove the given endpoints from the -// specified role. It uses `CompareInputEndpoints` to determine when there's a -// match between the given endpoint and one already configured. -func (api *ManagementAPI) RemoveRoleEndpoints(request *RemoveRoleEndpointsRequest) error { - filterRequest := filterRoleEndpointsRequest{ - ServiceName: request.ServiceName, - DeploymentName: request.DeploymentName, - RoleName: request.RoleName, - Filter: func(a *InputEndpoint) bool { - for _, b := range request.InputEndpoints { - if CompareInputEndpoints(a, &b) { - return false - } - } - return true - }, - } - return api.filterRoleEndpoints(&filterRequest) -} - -// Returns true to keep the endpoint defined in the role's configuration, -// false to remove it. It is also welcome to mutate endpoints; they are passed -// by reference. -type inputEndpointFilter func(*InputEndpoint) bool - -type filterRoleEndpointsRequest struct { - ServiceName string - DeploymentName string - RoleName string - Filter inputEndpointFilter -} - -// filterRoleEndpoints is a general role endpoint filtering function. It is -// private because it is only a support function for RemoveRoleEndpoints, and -// is not tested directly. -func (api *ManagementAPI) filterRoleEndpoints(request *filterRoleEndpointsRequest) error { - role, err := api.GetRole(&GetRoleRequest{ - ServiceName: request.ServiceName, - DeploymentName: request.DeploymentName, - RoleName: request.RoleName, - }) - if err != nil { - return err - } - for index, configSet := range role.ConfigurationSets { - if configSet.ConfigurationSetType == CONFIG_SET_NETWORK { - if configSet.InputEndpoints != nil { - endpoints := []InputEndpoint{} - for _, existingEndpoint := range *configSet.InputEndpoints { - if request.Filter(&existingEndpoint) { - endpoints = append(endpoints, existingEndpoint) - } - } - if len(endpoints) == 0 { - configSet.InputEndpoints = nil - } else { - configSet.InputEndpoints = &endpoints - } - } - } - // Update the role; implicit copying is a nuisance. - role.ConfigurationSets[index] = configSet - } - return api.UpdateRole(&UpdateRoleRequest{ - ServiceName: request.ServiceName, - DeploymentName: request.DeploymentName, - RoleName: request.RoleName, - PersistentVMRole: role, - }) -} === removed file 'src/launchpad.net/gwacl/management_base.go' --- src/launchpad.net/gwacl/management_base.go 2015-10-23 18:28:45 +0000 +++ src/launchpad.net/gwacl/management_base.go 1970-01-01 00:00:00 +0000 @@ -1,737 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "encoding/xml" - "fmt" - "net/http" - "path" - "strings" - "time" - - "launchpad.net/gwacl/fork/tls" -) - -// baseAPIVersion is the minimum API version used for all API calls. -// -// Note: all methods have been audited for use with this API version, -// so do not change this without having done so again. -const baseAPIVersion = "2015-04-01" - -// Note: each API call is required to include a version string in the request header. -// These may often be the same string, but need to be kept as strings rather than being -// pulled out and replaced with a constant, each API call may be individually changed. - -type ManagementAPI struct { - session *x509Session - // The interval used when polling the server. - // Set this to 0 to prevent polling from happening. When polling is - // disabled, the API methods return immediately after the request has - // been issued so the caller's code will have to manually deal with the - // possibility that the triggered asynchronous operation might still - // not be completed. - PollerInterval time.Duration - - // The duration after which the polling is terminated. - PollerTimeout time.Duration -} - -// The default interval used when polling the server to get the status of a -// running operation. -const DefaultPollerInterval = 10 * time.Second - -// The default duration after which the polling is terminated. -const DefaultPollerTimeout = 20 * time.Minute - -// NewManagementAPIWithRetryPolicy creates an object used to interact with -// Windows Azure's API. Certificate data is provided through a file path; -// the file must contain the private key, then the certificate, both in PEM format. -// http://msdn.microsoft.com/en-us/library/windowsazure/ff800682.aspx -func NewManagementAPIWithRetryPolicy(subscriptionId, certFile, location string, policy RetryPolicy) (*ManagementAPI, error) { - session, err := newX509Session(subscriptionId, certFile, location, policy) - if err != nil { - return nil, err - } - api := ManagementAPI{session, DefaultPollerInterval, DefaultPollerTimeout} - return &api, nil -} - -// NewManagementAPI creates an object used to interact with Windows Azure's API. -// Certificate data is provided through a file path; the file must contain the -// private key, then the certificate, both in PEM format. -// http://msdn.microsoft.com/en-us/library/windowsazure/ff800682.aspx -func NewManagementAPI(subscriptionId, certFile, location string) (*ManagementAPI, error) { - return NewManagementAPIWithRetryPolicy(subscriptionId, certFile, location, NoRetryPolicy) -} - -// NewManagementAPICertDataWithRetryPolicy creates an object used to interact with -// Windows Azure's API. Certificate and private key data are provided through input -// byte arrays, each containing data in PEM format. -// http://msdn.microsoft.com/en-us/library/windowsazure/ff800682.aspx -func NewManagementAPICertDataWithRetryPolicy(subscriptionId string, cert, key []byte, location string, policy RetryPolicy) (*ManagementAPI, error) { - session, err := newX509SessionCertData(subscriptionId, cert, key, location, policy) - if err != nil { - return nil, err - } - api := ManagementAPI{session, DefaultPollerInterval, DefaultPollerTimeout} - return &api, nil -} - -// NewManagementAPICertData creates an object used to interact with Windows Azure's API. -// Certificate and private key data are provided through input byte arrays, -// each containing data in PEM format. -// http://msdn.microsoft.com/en-us/library/windowsazure/ff800682.aspx -func NewManagementAPICertData(subscriptionId string, cert, key []byte, location string) (*ManagementAPI, error) { - return NewManagementAPICertDataWithRetryPolicy(subscriptionId, cert, key, location, NoRetryPolicy) -} - -// NewManagementAPICertsWithRetryPolicy creates an object used to interact with -// Windows Azure's API. -// http://msdn.microsoft.com/en-us/library/windowsazure/ff800682.aspx -func NewManagementAPICertsWithRetryPolicy(subscriptionId string, certs []tls.Certificate, location string, policy RetryPolicy) (*ManagementAPI, error) { - session, err := newX509SessionCerts(subscriptionId, certs, location, policy) - if err != nil { - return nil, err - } - api := ManagementAPI{session, DefaultPollerInterval, DefaultPollerTimeout} - return &api, nil -} - -// NewManagementAPICerts creates an object used to interact with Windows Azure's API. -// http://msdn.microsoft.com/en-us/library/windowsazure/ff800682.aspx -func NewManagementAPICerts(subscriptionId string, certs []tls.Certificate, location string) (*ManagementAPI, error) { - return NewManagementAPICertsWithRetryPolicy(subscriptionId, certs, location, NoRetryPolicy) -} - -var operationIDHeaderName = http.CanonicalHeaderKey("x-ms-request-id") - -// getOperationID extracts the Windows Azure operation ID from the headers -// of the given x509Response. -func getOperationID(response *x509Response) (string, error) { - header := response.Header[operationIDHeaderName] - if header != nil && len(header) != 0 { - return header[0], nil - } - err := fmt.Errorf("no operation header (%v) found in response", operationIDHeaderName) - return "", err -} - -func (api *ManagementAPI) GetRetryPolicy() RetryPolicy { - return api.session.retryPolicy -} - -// blockUntilCompleted blocks and polls for completion of an Azure operation. -// The "response" parameter is the result of the request that started the -// operation. If the response says that the operation is running -// asynchronously, this function will block and poll until the operation is -// finished. On the other hand, if the response was a failure or a synchronous -// result, the function returns immediately. -func (api *ManagementAPI) blockUntilCompleted(response *x509Response) error { - switch response.StatusCode { - case http.StatusAccepted: - // Asynchronous. Fall out of the switch and start blocking. - case http.StatusOK, http.StatusCreated, http.StatusNoContent: - // Simple success. Sometimes it happens; enjoy it. - return nil - default: - // Request failed, synchronously. - return newHTTPError(response.StatusCode, response.Body, "request failed") - } - - if api.PollerInterval == 0 { - // Polling has been disabled for test purposes. Return immediately. - return nil - } - operationID, err := getOperationID(response) - if err != nil { - return fmt.Errorf("could not interpret asynchronous response: %v", err) - } - poller := newOperationPoller(api, operationID) - operation, err := performOperationPolling(poller, api.PollerInterval, api.PollerTimeout) - if err != nil { - return err - } - if operation.Status != SucceededOperationStatus { - return newAzureErrorFromOperation(operation) - } - return nil -} - -// ListLocations lists the Azure data centre locations, and details -// about each location (e.g. role sizes available). -func (api *ManagementAPI) ListLocations() ([]Location, error) { - var locations struct { - Locations []Location `xml:"Location"` - } - response, err := api.session.get("locations", "2014-05-01") - if err != nil { - return nil, err - } - err = xml.Unmarshal(response.Body, &locations) - return locations.Locations, err -} - -// ListOSImages retrieves the list of available operating system disk images -// from the Azure management API. -// Images are returned in the order in which Azure lists them. -// http://msdn.microsoft.com/en-us/library/windowsazure/jj157191.aspx -func (api *ManagementAPI) ListOSImages() (*Images, error) { - response, err := api.session.get("services/images", baseAPIVersion) - if err != nil { - return nil, err - } - images := Images{} - err = xml.Unmarshal(response.Body, &images) - return &images, err -} - -// ListHostedServices loads a list of HostedServiceDescriptor objects from the -// Azure management API. -// HostedServiceDescriptor objects contains a small subset of the fields present in -// HostedService objects. -// See http://msdn.microsoft.com/en-us/library/windowsazure/ee460781.aspx -func (api *ManagementAPI) ListHostedServices() ([]HostedServiceDescriptor, error) { - res, err := api.session.get("services/hostedservices", baseAPIVersion) - if err != nil { - return nil, err - } - hostedServices := HostedServiceDescriptorList{} - err = xml.Unmarshal(res.Body, &hostedServices) - return hostedServices.HostedServices, err -} - -// UpdateHostedService updates the provided values on the named service. -// Use NewUpdateHostedService() to create an UpdateHostedService params object. -// See http://msdn.microsoft.com/en-us/library/windowsazure/gg441303.aspx -func (api *ManagementAPI) UpdateHostedService(serviceName string, params *UpdateHostedService) error { - var err error - checkPathComponents(serviceName) - URI := "services/hostedservices/" + serviceName - body, err := params.Serialize() - if err != nil { - return err - } - response, err := api.session.put(URI, baseAPIVersion, []byte(body), "application/xml") - if err != nil { - return err - } - return api.blockUntilCompleted(response) -} - -// GetHostedServiceProperties loads a HostedService object from the Azure -// management API. -// See http://msdn.microsoft.com/en-us/library/windowsazure/ee460806.aspx -func (api *ManagementAPI) GetHostedServiceProperties( - serviceName string, embedDetail bool) (*HostedService, error) { - checkPathComponents(serviceName) - URI := "services/hostedservices/" + serviceName + "?embed-detail=" - switch embedDetail { - case true: - URI += "true" - case false: - URI += "false" - } - res, err := api.session.get(URI, baseAPIVersion) - if err != nil { - return nil, err - } - hostedService := HostedService{} - err = xml.Unmarshal(res.Body, &hostedService) - return &hostedService, err -} - -// AddHostedService adds a hosted service. -// This is an asynchronous operation on Azure, but this call blocks until the -// operation is completed. -// This is actually called CreateHostedService in the Azure documentation. -// See http://msdn.microsoft.com/en-us/library/windowsazure/gg441304.aspx -func (api *ManagementAPI) AddHostedService(definition *CreateHostedService) error { - URI := "services/hostedservices" - body, err := marshalXML(definition) - if err != nil { - return err - } - response, err := api.session.post(URI, baseAPIVersion, []byte(body), "application/xml") - if err != nil { - return err - } - return api.blockUntilCompleted(response) -} - -// CheckHostedServiceNameAvailability looks to see if the supplied name is -// acceptable to use as a cloud service name. It returns nil if it is available -// or an error containing the reason if it is not. Names may not be acceptable -// based on reserved words, trademarks and profanity. -// See http://msdn.microsoft.com/en-us/library/windowsazure/jj154116.aspx -func (api *ManagementAPI) CheckHostedServiceNameAvailability(name string) error { - var err error - response, err := api.session.get( - "services/hostedservices/operations/isavailable/"+name, baseAPIVersion) - if err != nil { - return err - } - - availability := &AvailabilityResponse{} - err = availability.Deserialize(response.Body) - if err != nil { - return err - } - if strings.ToLower(availability.Result) == "true" { - return nil - } - return fmt.Errorf(availability.Reason) -} - -// DeleteHostedService deletes the named hosted service. -// See http://msdn.microsoft.com/en-us/library/windowsazure/gg441305.aspx -func (api *ManagementAPI) DeleteHostedService(serviceName string) error { - url := "services/hostedservices/" + serviceName - url = addURLQueryParams(url, "comp", "media") - response, err := api.session.delete(url, baseAPIVersion) - if err != nil { - if IsNotFoundError(err) { - return nil - } - return err - } - return api.blockUntilCompleted(response) -} - -// AddDeployment adds a virtual machine deployment. -// This is an asynchronous operation on Azure, but this call blocks until the -// operation is completed. -// This is actually called CreateDeployment in the Azure documentation. -// See http://msdn.microsoft.com/en-us/library/windowsazure/ee460813.aspx -func (api *ManagementAPI) AddDeployment(definition *Deployment, serviceName string) error { - checkPathComponents(serviceName) - URI := "services/hostedservices/" + serviceName + "/deployments" - body, err := marshalXML(definition) - if err != nil { - return err - } - response, err := api.session.post(URI, baseAPIVersion, []byte(body), "application/xml") - if err != nil { - return err - } - return api.blockUntilCompleted(response) -} - -// DeleteDeployment deletes the named deployment from the named hosted service. -// See http://msdn.microsoft.com/en-us/library/windowsazure/ee460815.aspx -func (api *ManagementAPI) DeleteDeployment(serviceName string, deploymentName string) error { - path := "services/hostedservices/" + serviceName + "/deployments/" + deploymentName - response, err := api.session.delete(path, baseAPIVersion) - if err != nil { - if IsNotFoundError(err) { - return nil - } - return err - } - return api.blockUntilCompleted(response) -} - -type GetDeploymentRequest struct { - ServiceName string - DeploymentName string -} - -// GetDeployment returns a Deployment object for the named hosted service and -// deployment name. -// See http://msdn.microsoft.com/en-us/library/windowsazure/ee460804.aspx -func (api *ManagementAPI) GetDeployment(request *GetDeploymentRequest) (*Deployment, error) { - checkPathComponents(request.ServiceName) - checkPathComponents(request.DeploymentName) - path := "services/hostedservices/" + request.ServiceName + "/deployments/" + request.DeploymentName - response, err := api.session.get(path, baseAPIVersion) - if err != nil { - return nil, err - } - deployment := Deployment{} - err = deployment.Deserialize(response.Body) - if err != nil { - return nil, err - } - return &deployment, nil -} - -// AddStorageAccount starts the creation of a storage account. This is -// called a storage service in the Azure API, but nomenclature seems to -// have changed. -// This is an asynchronous operation on Azure, but the call blocks until the -// operation is completed. -// This is actually called CreateStorageAccount in the Azure documentation. -// See http://msdn.microsoft.com/en-us/library/windowsazure/hh264518.aspx -func (api *ManagementAPI) AddStorageAccount(definition *CreateStorageServiceInput) error { - uri := "services/storageservices" - body, err := marshalXML(definition) - if err != nil { - return err - } - response, err := api.session.post(uri, baseAPIVersion, []byte(body), "application/xml") - if err != nil { - return err - } - return api.blockUntilCompleted(response) -} - -// DeleteStorageAccount deletes a storage account. -// See http://msdn.microsoft.com/en-us/library/windowsazure/hh264517.aspx -func (api *ManagementAPI) DeleteStorageAccount(storageAccountName string) error { - response, err := api.session.delete("services/storageservices/"+storageAccountName, baseAPIVersion) - if err != nil { - if IsNotFoundError(err) { - return nil - } - return err - } - return api.blockUntilCompleted(response) -} - -// GetStorageAccountKeys retrieves a storage account's primary and secondary -// access keys from the Azure service. -// See http://msdn.microsoft.com/en-us/library/windowsazure/ee460785.aspx -func (api *ManagementAPI) GetStorageAccountKeys(accountName string) (*StorageAccountKeys, error) { - url := "services/storageservices/" + accountName + "/keys" - res, err := api.session.get(url, baseAPIVersion) - if err != nil { - return nil, err - } - keys := StorageAccountKeys{} - err = keys.Deserialize(res.Body) - if err != nil { - return nil, err - } - return &keys, nil -} - -type DeleteDiskRequest struct { - DiskName string // Name of the disk to delete. - DeleteBlob bool // Whether to delete the associated blob storage. -} - -// DeleteDisk deletes the named OS/data disk. -// See http://msdn.microsoft.com/en-us/library/windowsazure/jj157200.aspx -func (api *ManagementAPI) DeleteDisk(request *DeleteDiskRequest) error { - // Use the disk deletion poller to work around a bug in Windows Azure. - // See the documentation in the file deletedisk.go for details. - poller := diskDeletePoller{api, request.DiskName, request.DeleteBlob} - _, err := performPolling(poller, deleteDiskInterval, deleteDiskTimeout) - return err -} - -func (api *ManagementAPI) _DeleteDisk(diskName string, deleteBlob bool) error { - url := "services/disks/" + diskName - if deleteBlob { - url = addURLQueryParams(url, "comp", "media") - } - response, err := api.session.delete(url, baseAPIVersion) - if err != nil { - if IsNotFoundError(err) { - return nil - } - return err - } - return api.blockUntilCompleted(response) -} - -type AddDataDiskRequest struct { - ServiceName string - DeploymentName string - RoleName string - DataVirtualHardDisk DataVirtualHardDisk -} - -// AddDataDisk requests the addition of a data disk to a VM role. -// See https://msdn.microsoft.com/en-us/library/azure/jj157199.aspx -func (api *ManagementAPI) AddDataDisk(req *AddDataDiskRequest) error { - url := path.Join( - "services", "hostedservices", req.ServiceName, - "deployments", req.DeploymentName, - "roles", req.RoleName, - "DataDisks", - ) - disk := req.DataVirtualHardDisk - disk.XMLNS = XMLNS - disk.XMLNS_I = XMLNS_I - body, err := marshalXML(&disk) - if err != nil { - return err - } - response, err := api.session.post(url, baseAPIVersion, []byte(body), "application/xml") - if err != nil { - return err - } - return api.blockUntilCompleted(response) -} - -// ListDisks requests a list of disks in the account's image repository. -// See https://msdn.microsoft.com/en-us/library/azure/jj157176.aspx -func (api *ManagementAPI) ListDisks() ([]Disk, error) { - var disks struct { - Disks []Disk `xml:"Disk"` - } - response, err := api.session.get("services/disks", baseAPIVersion) - if err != nil { - return nil, err - } - err = xml.Unmarshal(response.Body, &disks) - return disks.Disks, err -} - -// Perform an operation on the specified role (as defined by serviceName, -// deploymentName and roleName) This is an asynchronous operation on Azure, but -// the call blocks until the operation is completed. -func (api *ManagementAPI) performRoleOperation(serviceName, deploymentName, roleName, apiVersion string, operation *RoleOperation) error { - checkPathComponents(serviceName, deploymentName, roleName) - URI := "services/hostedservices/" + serviceName + "/deployments/" + deploymentName + "/roleinstances/" + roleName + "/Operations" - body, err := marshalXML(operation) - if err != nil { - return err - } - response, err := api.session.post(URI, apiVersion, []byte(body), "application/xml") - if err != nil { - return err - } - return api.blockUntilCompleted(response) -} - -type performRoleOperationRequest struct { - ServiceName string - DeploymentName string - RoleName string -} - -type StartRoleRequest performRoleOperationRequest - -// StartRole starts the named Role. -// See http://msdn.microsoft.com/en-us/library/windowsazure/jj157189.aspx -func (api *ManagementAPI) StartRole(request *StartRoleRequest) error { - return api.performRoleOperation( - request.ServiceName, request.DeploymentName, request.RoleName, - baseAPIVersion, startRoleOperation) -} - -type RestartRoleRequest performRoleOperationRequest - -// RestartRole restarts the named Role. -// See http://msdn.microsoft.com/en-us/library/windowsazure/jj157197.aspx -func (api *ManagementAPI) RestartRole(request *RestartRoleRequest) error { - return api.performRoleOperation( - request.ServiceName, request.DeploymentName, request.RoleName, - baseAPIVersion, restartRoleOperation) -} - -type ShutdownRoleRequest performRoleOperationRequest - -// ShutdownRole shuts down the named Role. -// See http://msdn.microsoft.com/en-us/library/windowsazure/jj157195.aspx -func (api *ManagementAPI) ShutdownRole(request *ShutdownRoleRequest) error { - return api.performRoleOperation( - request.ServiceName, request.DeploymentName, request.RoleName, - baseAPIVersion, shutdownRoleOperation) -} - -type GetRoleRequest performRoleOperationRequest - -// GetRole requests the role data for the specified role. -// See http://msdn.microsoft.com/en-us/library/windowsazure/jj157193.aspx -func (api *ManagementAPI) GetRole(request *GetRoleRequest) (*PersistentVMRole, error) { - checkPathComponents( - request.ServiceName, request.DeploymentName, request.RoleName) - url := ("services/hostedservices/" + request.ServiceName + - "/deployments/" + request.DeploymentName + "/roles/" + - request.RoleName) - response, err := api.session.get(url, baseAPIVersion) - if err != nil { - return nil, err - } - role := PersistentVMRole{} - err = role.Deserialize(response.Body) - if err != nil { - return nil, err - } - return &role, nil -} - -type UpdateRoleRequest struct { - // It would be nice to inherit ServiceName, DeploymentName and RoleName - // from performRoleOperationRequest... alas, struct embedding is too - // clunky, so copy-n-paste it is. My kingdom for a macro! - ServiceName string - DeploymentName string - RoleName string - PersistentVMRole *PersistentVMRole -} - -// UpdateRole pushes a PersistentVMRole document back up to Azure for the -// specified role. See -// http://msdn.microsoft.com/en-us/library/windowsazure/jj157187.aspx -func (api *ManagementAPI) UpdateRole(request *UpdateRoleRequest) error { - checkPathComponents( - request.ServiceName, request.DeploymentName, request.RoleName) - url := ("services/hostedservices/" + request.ServiceName + - "/deployments/" + request.DeploymentName + "/roles/" + - request.RoleName) - role, err := request.PersistentVMRole.Serialize() - if err != nil { - return err - } - response, err := api.session.put(url, baseAPIVersion, []byte(role), "application/xml") - if err != nil { - return err - } - return api.blockUntilCompleted(response) -} - -type DeleteRoleRequest struct { - ServiceName string - DeploymentName string - RoleName string - DeleteMedia bool -} - -// DeleteRole deletes a named Role from within the specified Cloud Service -// and Deployment. -// See http://msdn.microsoft.com/en-us/library/windowsazure/jj157187.aspx -func (api *ManagementAPI) DeleteRole(request *DeleteRoleRequest) error { - checkPathComponents(request.ServiceName, request.DeploymentName, request.RoleName) - url := ("services/hostedservices/" + request.ServiceName + - "/deployments/" + request.DeploymentName + "/roles/" + request.RoleName) - if request.DeleteMedia { - url = addURLQueryParams(url, "comp", "media") - } - response, err := api.session.delete(url, baseAPIVersion) - if err != nil { - if IsNotFoundError(err) { - return nil - } - return err - } - return api.blockUntilCompleted(response) -} - -type AddRoleRequest struct { - ServiceName string - DeploymentName string - PersistentVMRole *PersistentVMRole -} - -// AddRole creates a new Role within the specified Cloud Service -// and Deployment. -// See http://msdn.microsoft.com/en-us/library/windowsazure/jj157187.aspx -func (api *ManagementAPI) AddRole(request *AddRoleRequest) error { - checkPathComponents(request.ServiceName, request.DeploymentName) - url := ("services/hostedservices/" + request.ServiceName + - "/deployments/" + request.DeploymentName + "/roles") - role, err := request.PersistentVMRole.Serialize() - if err != nil { - return err - } - response, err := api.session.post(url, baseAPIVersion, []byte(role), "application/xml") - if err != nil { - return err - } - return api.blockUntilCompleted(response) -} - -type CreateAffinityGroupRequest struct { - CreateAffinityGroup *CreateAffinityGroup -} - -// CreateAffinityGroup sends a request to make a new affinity group. -// See http://msdn.microsoft.com/en-us/library/windowsazure/gg715317.aspx -func (api *ManagementAPI) CreateAffinityGroup(request *CreateAffinityGroupRequest) error { - var err error - url := "affinitygroups" - body, err := request.CreateAffinityGroup.Serialize() - if err != nil { - return err - } - response, err := api.session.post(url, baseAPIVersion, []byte(body), "application/xml") - if err != nil { - return err - } - return api.blockUntilCompleted(response) -} - -type UpdateAffinityGroupRequest struct { - Name string - UpdateAffinityGroup *UpdateAffinityGroup -} - -// UpdateAffinityGroup sends a request to update the named affinity group. -// See http://msdn.microsoft.com/en-us/library/windowsazure/gg715316.aspx -func (api *ManagementAPI) UpdateAffinityGroup(request *UpdateAffinityGroupRequest) error { - var err error - checkPathComponents(request.Name) - url := "affinitygroups/" + request.Name - body, err := request.UpdateAffinityGroup.Serialize() - if err != nil { - return err - } - response, err := api.session.put(url, baseAPIVersion, []byte(body), "application/xml") - if err != nil { - return err - } - return api.blockUntilCompleted(response) -} - -type DeleteAffinityGroupRequest struct { - Name string -} - -// DeleteAffinityGroup requests a deletion of the named affinity group. -// See http://msdn.microsoft.com/en-us/library/windowsazure/gg715314.aspx -func (api *ManagementAPI) DeleteAffinityGroup(request *DeleteAffinityGroupRequest) error { - checkPathComponents(request.Name) - url := "affinitygroups/" + request.Name - response, err := api.session.delete(url, baseAPIVersion) - if err != nil { - if IsNotFoundError(err) { - return nil - } - return err - } - return api.blockUntilCompleted(response) -} - -// GetNetworkConfiguration gets the network configuration for this -// subscription. If there is no network configuration the configuration will -// be nil. -// See http://msdn.microsoft.com/en-us/library/windowsazure/jj157196.aspx -func (api *ManagementAPI) GetNetworkConfiguration() (*NetworkConfiguration, error) { - response, err := api.session.get("services/networking/media", baseAPIVersion) - if err != nil { - if IsNotFoundError(err) { - return nil, nil - } - return nil, err - } - networkConfig := NetworkConfiguration{} - err = networkConfig.Deserialize(response.Body) - if err != nil { - return nil, err - } - return &networkConfig, nil -} - -// SetNetworkConfiguration sets the network configuration for this -// subscription. See: -// http://msdn.microsoft.com/en-us/library/windowsazure/jj157181.aspx -func (api *ManagementAPI) SetNetworkConfiguration(cfg *NetworkConfiguration) error { - var err error - body, err := cfg.Serialize() - if err != nil { - return err - } - response, err := api.session.put( - "services/networking/media", baseAPIVersion, []byte(body), - "text/plain", - ) - if err != nil { - return err - } - return api.blockUntilCompleted(response) -} === removed file 'src/launchpad.net/gwacl/management_base_test.go' --- src/launchpad.net/gwacl/management_base_test.go 2015-10-23 18:28:45 +0000 +++ src/launchpad.net/gwacl/management_base_test.go 1970-01-01 00:00:00 +0000 @@ -1,1542 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "bytes" - "encoding/base64" - "encoding/xml" - "errors" - "fmt" - "io/ioutil" - "net/url" - "strings" - "time" - - . "launchpad.net/gocheck" - "launchpad.net/gwacl/dedent" - "launchpad.net/gwacl/fork/http" -) - -type managementBaseAPISuite struct { - x509DispatcherFixture - oldPollerInterval time.Duration -} - -var _ = Suite(&managementBaseAPISuite{}) - -func makeX509ResponseWithOperationHeader(operationID string) *x509Response { - header := http.Header{} - header.Set(operationIDHeaderName, operationID) - response := x509Response{ - StatusCode: http.StatusAccepted, - Header: header, - } - return &response -} - -func makeAPI(c *C) *ManagementAPI { - subscriptionId := "subscriptionId" - api, err := NewManagementAPI(subscriptionId, "", "West US") - c.Assert(err, IsNil) - // Polling is disabled by default. - api.PollerInterval = 0 - return api -} - -func (suite *managementBaseAPISuite) TestGetOperationIDExtractsHeader(c *C) { - operationID := "operationID" - response := makeX509ResponseWithOperationHeader(operationID) - - returnedOperationID, err := getOperationID(response) - c.Assert(err, IsNil) - c.Check(returnedOperationID, Equals, operationID) -} - -func (suite *managementBaseAPISuite) TestBlockUntilCompletedSucceedsOnSyncSuccess(c *C) { - // Set of expected error returns for success statuses. - // (They all map to nil. It makes it easier further down to report - // unexpected errors in a helpful way). - expectedErrors := map[int]error{ - http.StatusOK: nil, - http.StatusCreated: nil, - http.StatusNoContent: nil, - } - api := makeAPI(c) - api.PollerInterval = time.Nanosecond - - receivedErrors := make(map[int]error) - for status := range expectedErrors { - err := api.blockUntilCompleted(&x509Response{StatusCode: status}) - receivedErrors[status] = err - } - - c.Check(receivedErrors, DeepEquals, expectedErrors) -} - -func (suite *managementBaseAPISuite) TestBlockUntilCompletedReturnsHTTPErrorOnSyncFailure(c *C) { - // Set of failure statuses, and whether they're supposed to return - // HTTPError. - // (They all map to true. It makes it easier further down to report - // failures in a helpful way). - expectedErrors := map[int]bool{ - http.StatusBadRequest: true, - http.StatusUnauthorized: true, - http.StatusForbidden: true, - http.StatusNotFound: true, - http.StatusConflict: true, - http.StatusInternalServerError: true, - http.StatusNotImplemented: true, - } - api := makeAPI(c) - api.PollerInterval = time.Nanosecond - - receivedErrors := make(map[int]bool) - for status := range expectedErrors { - err := api.blockUntilCompleted(&x509Response{StatusCode: status}) - _, ok := err.(HTTPError) - receivedErrors[status] = ok - } - - c.Check(receivedErrors, DeepEquals, expectedErrors) -} - -func (suite *managementBaseAPISuite) TestBlockUntilCompletedPollsOnAsyncOperation(c *C) { - const operationID = "async-operation-id" - // We set the dispatcher up for failure, and prove that - // blockUntilCompleted() makes a polling request. - failure := errors.New("Simulated failure") - rigFailingDispatcher(failure) - requests := make([]*X509Request, 0) - rigRecordingDispatcher(&requests) - api := makeAPI(c) - api.PollerInterval = time.Nanosecond - accepted := makeX509ResponseWithOperationHeader(operationID) - - err := api.blockUntilCompleted(accepted) - - c.Check(err, Equals, failure) - c.Assert(len(requests), Equals, 1) - requestURL := requests[0].URL - urlParts := strings.Split(requestURL, "/") - polledOperationID := urlParts[len(urlParts)-1] - c.Check(polledOperationID, Equals, operationID) -} - -func (suite *managementBaseAPISuite) TestBlockUntilCompletedErrorsIfAsyncOperationFails(c *C) { - response := DispatcherResponse{ - response: &x509Response{ - Body: []byte(fmt.Sprintf(operationXMLTemplate, "Failed")), - StatusCode: http.StatusOK, - }, - errorObject: nil} - responses := []DispatcherResponse{response} - rigPreparedResponseDispatcher(responses) - operationID := "operationID" - operationResponse := makeX509ResponseWithOperationHeader(operationID) - api := makeAPI(c) - api.PollerInterval = time.Nanosecond - - err := api.blockUntilCompleted(operationResponse) - c.Check(err, ErrorMatches, ".*asynchronous operation failed.*") -} - -func (suite *managementBaseAPISuite) TestBlockUntilCompletedErrorsOnInvalidXML(c *C) { - response := DispatcherResponse{ - response: &x509Response{ - Body: []byte(">invalidXML<"), - StatusCode: http.StatusOK, - }, - errorObject: nil} - responses := []DispatcherResponse{response} - rigPreparedResponseDispatcher(responses) - operationID := "operationID" - operationResponse := makeX509ResponseWithOperationHeader(operationID) - api := makeAPI(c) - api.PollerInterval = time.Nanosecond - - err := api.blockUntilCompleted(operationResponse) - c.Check(err, FitsTypeOf, new(xml.SyntaxError)) -} - -func (suite *managementBaseAPISuite) TestBlockUntilCompletedErrorsIfPollingFails(c *C) { - response := DispatcherResponse{ - response: &x509Response{}, - errorObject: fmt.Errorf("unexpected error")} - responses := []DispatcherResponse{response} - rigPreparedResponseDispatcher(responses) - operationID := "operationID" - operationResponse := makeX509ResponseWithOperationHeader(operationID) - api := makeAPI(c) - api.PollerInterval = time.Nanosecond - - err := api.blockUntilCompleted(operationResponse) - c.Check(err, ErrorMatches, ".*unexpected error.*") -} - -func (suite *managementBaseAPISuite) TestBlockUntilCompletedErrorsCanTimeout(c *C) { - response := &x509Response{ - Body: []byte(fmt.Sprintf(operationXMLTemplate, InProgressOperationStatus)), - StatusCode: http.StatusOK, - } - rigFixedResponseDispatcher(response) - operationID := "operationID" - operationResponse := makeX509ResponseWithOperationHeader(operationID) - api := makeAPI(c) - api.PollerInterval = time.Nanosecond - api.PollerTimeout = 2 * time.Nanosecond - - err := api.blockUntilCompleted(operationResponse) - c.Check(err, ErrorMatches, ".*polling timed out.*") -} - -func (suite *managementBaseAPISuite) TestBlockUntilCompletedSucceedsIfAsyncOperationSucceeds(c *C) { - response := DispatcherResponse{ - response: &x509Response{ - Body: []byte(fmt.Sprintf(operationXMLTemplate, "Succeeded")), - StatusCode: http.StatusOK, - }, - errorObject: nil} - responses := []DispatcherResponse{response} - rigPreparedResponseDispatcher(responses) - operationID := "operationID" - operationResponse := makeX509ResponseWithOperationHeader(operationID) - api := makeAPI(c) - api.PollerInterval = time.Nanosecond - - err := api.blockUntilCompleted(operationResponse) - c.Assert(err, IsNil) -} - -var testKey = []byte(dedent.Dedent(` - -----BEGIN PRIVATE KEY----- - MIIBCgIBADANBgkqhkiG9w0BAQEFAASB9TCB8gIBAAIxAKQGQxP1i0VfCWn4KmMP - taUFn8sMBKjP/9vHnUYdZRvvmoJCA1C6arBUDp8s2DNX+QIDAQABAjBLRqhwN4dU - LfqHDKJ/Vg1aD8u3Buv4gYRBxdFR5PveyqHSt5eJ4g/x/4ndsvr2OqUCGQDNfNlD - zxHCiEAwZZAPaAkn8jDkFupTljcCGQDMWCujiVZ1NNuBD/N32Yt8P9JDiNzZa08C - GBW7VXLxbExpgnhb1V97vjQmTfthXQjYAwIYSTEjoFXm4+Bk5xuBh2IidgSeGZaC - FFY9AhkAsteo31cyQw2xJ80SWrmsIw+ps7Cvt5W9 - -----END PRIVATE KEY----- - `[1:])) -var testCert = []byte(dedent.Dedent(` - -----BEGIN CERTIFICATE----- - MIIBDzCByqADAgECAgkAgIBb3+lSwzEwDQYJKoZIhvcNAQEFBQAwFTETMBEGA1UE - AxQKQEhvc3ROYW1lQDAeFw0xMzA3MTkxNjA1NTRaFw0yMzA3MTcxNjA1NTRaMBUx - EzARBgNVBAMUCkBIb3N0TmFtZUAwTDANBgkqhkiG9w0BAQEFAAM7ADA4AjEApAZD - E/WLRV8JafgqYw+1pQWfywwEqM//28edRh1lG++agkIDULpqsFQOnyzYM1f5AgMB - AAGjDTALMAkGA1UdEwQCMAAwDQYJKoZIhvcNAQEFBQADMQABKfn08tKfzzqMMD2w - PI2fs3bw5bRH8tmGjrsJeEdp9crCBS8I3hKcxCkTTRTowdY= - -----END CERTIFICATE----- - `[1:])) -var testCertKey = bytes.Join([][]byte{testKey, testCert}, []byte{}) - -func (suite *managementBaseAPISuite) TestNewManagementAPI(c *C) { - subscriptionId := "subscriptionId" - certDir := c.MkDir() - certFile := certDir + "/cert.pem" - err := ioutil.WriteFile(certFile, testCertKey, 0600) - c.Assert(err, IsNil) - - api, err := NewManagementAPI(subscriptionId, certFile, "West US") - c.Assert(err, IsNil) - - c.Assert(api.session.subscriptionId, DeepEquals, subscriptionId) - c.Assert(api.session.retryPolicy, DeepEquals, NoRetryPolicy) -} - -func (suite *managementBaseAPISuite) TestNewManagementAPIWithRetryPolicy(c *C) { - subscriptionId := "subscriptionId" - certDir := c.MkDir() - certFile := certDir + "/cert.pem" - err := ioutil.WriteFile(certFile, testCertKey, 0600) - c.Assert(err, IsNil) - retryPolicy := RetryPolicy{NbRetries: 5, HttpStatusCodes: []int{409}, Delay: time.Minute} - - api, err := NewManagementAPIWithRetryPolicy(subscriptionId, certFile, "West US", retryPolicy) - c.Assert(err, IsNil) - - c.Assert(api.session.subscriptionId, DeepEquals, subscriptionId) - c.Assert(api.session.retryPolicy, DeepEquals, retryPolicy) - c.Assert(api.GetRetryPolicy(), DeepEquals, retryPolicy) -} - -func (suite *managementBaseAPISuite) TestNewManagementAPICertData(c *C) { - subscriptionId := "subscriptionId" - - api, err := NewManagementAPICertData(subscriptionId, testCert, testKey, "West US") - c.Assert(err, IsNil) - - c.Assert(api.session.subscriptionId, DeepEquals, subscriptionId) - c.Assert(api.session.retryPolicy, DeepEquals, NoRetryPolicy) -} - -func (suite *managementBaseAPISuite) TestNewManagementAPICertDataWithRetryPolicy(c *C) { - subscriptionId := "subscriptionId" - retryPolicy := RetryPolicy{NbRetries: 5, HttpStatusCodes: []int{409}, Delay: time.Minute} - - api, err := NewManagementAPICertDataWithRetryPolicy(subscriptionId, testCert, testKey, "West US", retryPolicy) - c.Assert(err, IsNil) - - c.Assert(api.session.subscriptionId, DeepEquals, subscriptionId) - c.Assert(api.session.retryPolicy, DeepEquals, retryPolicy) - c.Assert(api.GetRetryPolicy(), DeepEquals, retryPolicy) -} - -func (suite *managementBaseAPISuite) TestNewManagementAPISetsDefaultPollerInterval(c *C) { - api, err := NewManagementAPI("subscriptionId", "", "West US") - c.Assert(err, IsNil) - - c.Assert(api.PollerInterval, Equals, DefaultPollerInterval) -} - -func (suite *managementBaseAPISuite) TestNewManagementAPISetsDefaultPollerTimeout(c *C) { - api, err := NewManagementAPI("subscriptionId", "", "West US") - c.Assert(err, IsNil) - - c.Assert(api.PollerTimeout, Equals, DefaultPollerTimeout) -} - -// setUpDispatcher sets up a request dispatcher that: -// - records requests -// - returns empty responses -func (suite *managementBaseAPISuite) setUpDispatcher() *[]*X509Request { - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - Body: []byte{}, - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - return &recordedRequests -} - -// checkOneRequest asserts that the given slice contains one request, with the -// given characteristics. -func checkOneRequest(c *C, recordedRequests *[]*X509Request, URL, version string, payload []byte, Method string) { - requests := *recordedRequests - c.Assert(len(requests), Equals, 1) - request := requests[0] - checkRequest(c, request, URL, version, payload, Method) -} - -func checkRequest(c *C, request *X509Request, URL, version string, payload []byte, Method string) { - c.Check(request.URL, Equals, URL) - c.Check( - strings.TrimSpace(string(request.Payload)), - Equals, - strings.TrimSpace(string(payload))) - c.Check(request.Method, Equals, Method) - c.Check(request.APIVersion, Equals, version) -} - -func (suite *managementBaseAPISuite) TestGetOperationFailsIfNoHeader(c *C) { - response := x509Response{ - StatusCode: http.StatusOK, - } - - _, err := getOperationID(&response) - c.Check(err, ErrorMatches, ".*no operation header.*") -} - -func (suite *managementBaseAPISuite) TestListOSImagesRequestsListing(c *C) { - api := makeAPI(c) - rigFixedResponseDispatcher(&x509Response{StatusCode: http.StatusOK, Body: []byte("")}) - requests := make([]*X509Request, 0) - rigRecordingDispatcher(&requests) - - _, err := api.ListOSImages() - c.Assert(err, IsNil) - - c.Assert(len(requests), Equals, 1) - c.Check(requests[0].URL, Equals, api.session.composeURL("services/images")) -} - -func (suite *managementBaseAPISuite) TestListOSImagesReturnsImages(c *C) { - expectedImage := OSImage{ - LogicalSizeInGB: 199.0, - Label: MakeRandomString(10), - MediaLink: "http://storage.example.com/" + MakeRandomString(10), - Name: MakeRandomString(10), - OS: "Linux", - EULA: "http://eula.example.com/" + MakeRandomString(10), - Description: MakeRandomString(10), - RecommendedVMSize: "Medium", - } - body := fmt.Sprintf(` - - - %f - - %s - %s - %s - %s - %s - %s - - - `, - expectedImage.LogicalSizeInGB, expectedImage.Label, - expectedImage.MediaLink, expectedImage.Name, expectedImage.OS, - expectedImage.EULA, expectedImage.Description, - expectedImage.RecommendedVMSize) - api := makeAPI(c) - rigFixedResponseDispatcher(&x509Response{ - StatusCode: http.StatusOK, - Body: []byte(body), - }) - - listing, err := api.ListOSImages() - c.Assert(err, IsNil) - - c.Assert(len(listing.Images), Equals, 1) - c.Check(listing.Images[0], DeepEquals, expectedImage) -} - -func (suite *managementBaseAPISuite) TestListOSImagesPreservesOrdering(c *C) { - imageNames := []string{ - MakeRandomString(5), - MakeRandomString(5), - MakeRandomString(5), - } - body := fmt.Sprintf(` - - %s - %s - %s - - `, - imageNames[0], imageNames[1], imageNames[2]) - api := makeAPI(c) - rigFixedResponseDispatcher(&x509Response{StatusCode: http.StatusOK, Body: []byte(body)}) - - listing, err := api.ListOSImages() - c.Assert(err, IsNil) - - c.Assert(len(listing.Images), Equals, 3) - receivedNames := make([]string, 3) - for index := range listing.Images { - receivedNames[index] = listing.Images[index].Name - } - c.Check(receivedNames, DeepEquals, imageNames) -} - -func (suite *managementBaseAPISuite) TestListHostedServices(c *C) { - api := makeAPI(c) - url := MakeRandomString(10) - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - Body: []byte(makeHostedServiceDescriptorList(url)), - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - descriptors, err := api.ListHostedServices() - - c.Assert(err, IsNil) - expectedURL := defaultManagement + api.session.subscriptionId + "/services/hostedservices" - checkOneRequest(c, &recordedRequests, expectedURL, baseAPIVersion, nil, "GET") - c.Assert(descriptors[0].URL, Equals, url) -} - -func (suite *managementBaseAPISuite) TestUpdateHostedService(c *C) { - api := makeAPI(c) - randomLabel := MakeRandomString(10) - randomDescription := MakeRandomString(10) - property := ExtendedProperty{ - Name: "property-name", - Value: "property-value", - } - base64Label := base64.StdEncoding.EncodeToString([]byte(randomLabel)) - requestPayload := []byte(makeUpdateHostedService(base64Label, randomDescription, property)) - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - serviceName := MakeRandomString(10) - properties := []ExtendedProperty{ - property, - } - update := NewUpdateHostedService(randomLabel, randomDescription, properties) - err := api.UpdateHostedService(serviceName, update) - - c.Assert(err, IsNil) - expectedURL := defaultManagement + api.session.subscriptionId + "/services/hostedservices/" + serviceName - checkOneRequest(c, &recordedRequests, expectedURL, baseAPIVersion, requestPayload, "PUT") -} - -func assertGetHostedServicePropertiesRequest(c *C, api *ManagementAPI, serviceName string, embedDetail bool, httpRequest *X509Request) { - var query string - if embedDetail { - query = "embed-detail=true" - } else { - query = "embed-detail=false" - } - expectedURL := fmt.Sprintf("%s%s/services/hostedservices/%s?%s", defaultManagement, - api.session.subscriptionId, serviceName, query) - checkRequest(c, httpRequest, expectedURL, baseAPIVersion, nil, "GET") -} - -func (suite *managementBaseAPISuite) TestGetHostedServiceProperties_withoutDetails(c *C) { - api := makeAPI(c) - body := ` - - - hosted-service-url - hosted-service-name - - description - name-of-affinity-group - location-of-service - - current-status-of-service - creation-date-of-service - last-modification-date-of-service - - - name-of-property - value-of-property - - - - - ` - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - Body: []byte(body), - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - serviceName := "serviceName" - - properties, err := api.GetHostedServiceProperties(serviceName, false) - c.Assert(err, IsNil) - c.Check(recordedRequests, HasLen, 1) - assertGetHostedServicePropertiesRequest(c, api, serviceName, false, recordedRequests[0]) - - c.Check(properties.URL, Equals, "hosted-service-url") - c.Check(properties.ServiceName, Equals, "hosted-service-name") - c.Check(properties.Description, Equals, "description") - // Details were explicitly not requested, so this is empty. - c.Check(len(properties.Deployments), Equals, 0) -} - -func (suite *managementBaseAPISuite) TestGetHostedServiceProperties_withDetails(c *C) { - api := makeAPI(c) - body := ` - - - hosted-service-url - hosted-service-name - - description-of-service - name-of-affinity-group - location-of-service - - current-status-of-service - creation-date-of-service - last-modification-date-of-service - - - name-of-property - value-of-property - - - - - - name-of-deployment - - - - ` - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - Body: []byte(body), - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - serviceName := "serviceName" - - properties, err := api.GetHostedServiceProperties(serviceName, true) - c.Assert(err, IsNil) - c.Check(recordedRequests, HasLen, 1) - assertGetHostedServicePropertiesRequest(c, api, serviceName, true, recordedRequests[0]) - - c.Check(properties.URL, Equals, "hosted-service-url") - c.Check(properties.ServiceName, Equals, "hosted-service-name") - c.Check(properties.Description, Equals, "description-of-service") - c.Check(len(properties.Deployments), Equals, 1) - c.Check(properties.Deployments[0].Name, Equals, "name-of-deployment") -} - -func (suite *managementBaseAPISuite) TestAddHostedService(c *C) { - api := makeAPI(c) - recordedRequests := setUpDispatcher("operationID") - createHostedService := NewCreateHostedServiceWithLocation("testName", "testLabel", "East US") - err := api.AddHostedService(createHostedService) - c.Assert(err, IsNil) - expectedURL := defaultManagement + api.session.subscriptionId + "/services/hostedservices" - expectedPayload, err := marshalXML(createHostedService) - c.Assert(err, IsNil) - checkOneRequest(c, recordedRequests, expectedURL, baseAPIVersion, expectedPayload, "POST") -} - -func makeAvailabilityResponse(result, reason string) string { - return fmt.Sprintf(` - - - %s - %s - `, result, reason) -} - -func (*managementBaseAPISuite) TestAddHostedServiceWithOKName(c *C) { - api := makeAPI(c) - body := makeAvailabilityResponse("true", "") - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - Body: []byte(body), - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - serviceName := "service-name" - err := api.CheckHostedServiceNameAvailability(serviceName) - - c.Assert(err, IsNil) - expectedURL := (defaultManagement + api.session.subscriptionId + - "/services/hostedservices/operations/isavailable/" + serviceName) - checkOneRequest(c, &recordedRequests, expectedURL, baseAPIVersion, nil, "GET") -} - -func (*managementBaseAPISuite) TestAddHostedServiceWithBadName(c *C) { - api := makeAPI(c) - reason := "This is a false test response" - body := makeAvailabilityResponse("false", reason) - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - Body: []byte(body), - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - serviceName := "service-name" - err := api.CheckHostedServiceNameAvailability(serviceName) - - c.Assert(err, ErrorMatches, reason) - c.Check(recordedRequests, HasLen, 1) - expectedURL := (defaultManagement + api.session.subscriptionId + - "/services/hostedservices/operations/isavailable/" + serviceName) - checkOneRequest(c, &recordedRequests, expectedURL, baseAPIVersion, nil, "GET") -} - -func (*managementBaseAPISuite) TestAddHostedServiceWithServerError(c *C) { - api := makeAPI(c) - fixedResponse := x509Response{ - StatusCode: http.StatusBadRequest, - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - serviceName := "service-name" - err := api.CheckHostedServiceNameAvailability(serviceName) - - c.Assert(err, ErrorMatches, ".*Bad Request.*") -} - -func (*managementBaseAPISuite) TestAddHostedServiceWithBadXML(c *C) { - api := makeAPI(c) - body := ` - - foo - unclosed tag - ` - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - Body: []byte(body), - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - serviceName := "service-name" - err := api.CheckHostedServiceNameAvailability(serviceName) - - c.Assert(err, ErrorMatches, ".*XML syntax error.*") -} - -func assertDeleteHostedServiceRequest(c *C, api *ManagementAPI, serviceName string, httpRequest *X509Request) { - expectedURL := fmt.Sprintf("%s%s/services/hostedservices/%s?comp=media", defaultManagement, - api.session.subscriptionId, serviceName) - checkRequest(c, httpRequest, expectedURL, baseAPIVersion, nil, "DELETE") -} - -func (suite *managementBaseAPISuite) TestDeleteHostedService(c *C) { - api := makeAPI(c) - recordedRequests := setUpDispatcher("operationID") - hostedServiceName := "testName" - err := api.DeleteHostedService(hostedServiceName) - c.Assert(err, IsNil) - c.Check(*recordedRequests, HasLen, 1) - assertDeleteHostedServiceRequest(c, api, hostedServiceName, (*recordedRequests)[0]) -} - -func (suite *managementBaseAPISuite) TestDeleteHostedServiceWhenServiceDoesNotExist(c *C) { - rigFixedResponseDispatcher(&x509Response{StatusCode: http.StatusNotFound}) - err := makeAPI(c).DeleteHostedService("hosted-service-name") - c.Assert(err, IsNil) -} - -func (suite *managementBaseAPISuite) TestAddDeployment(c *C) { - api := makeAPI(c) - recordedRequests := setUpDispatcher("operationID") - serviceName := "serviceName" - configurationSet := NewLinuxProvisioningConfigurationSet("testHostname12345", "test", "test123#@!", "user-data", "false") - vhd := NewOSVirtualHardDisk("hostCaching", "diskLabel", "diskName", "http://mediaLink", "sourceImageName", "os") - role := NewLinuxRole("ExtraSmall", "test-role-123", vhd, []ConfigurationSet{*configurationSet}) - deployment := NewDeploymentForCreateVMDeployment("test-machine-name", "Staging", "testLabel", []Role{*role}, "testNetwork") - err := api.AddDeployment(deployment, serviceName) - - c.Assert(err, IsNil) - expectedURL := defaultManagement + api.session.subscriptionId + "/services/hostedservices/" + serviceName + "/deployments" - expectedPayload, err := marshalXML(deployment) - c.Assert(err, IsNil) - checkOneRequest(c, recordedRequests, expectedURL, baseAPIVersion, expectedPayload, "POST") -} - -func assertDeleteDeploymentRequest(c *C, api *ManagementAPI, hostedServiceName, deploymentName string, httpRequest *X509Request) { - expectedURL := fmt.Sprintf( - "%s%s/services/hostedservices/%s/deployments/%s", defaultManagement, - api.session.subscriptionId, hostedServiceName, deploymentName) - checkRequest(c, httpRequest, expectedURL, baseAPIVersion, nil, "DELETE") -} - -func (suite *managementBaseAPISuite) TestDeleteDeployment(c *C) { - api := makeAPI(c) - recordedRequests := setUpDispatcher("operationID") - hostedServiceName := "testHosterServiceName" - deploymentName := "testDeploymentName" - err := api.DeleteDeployment(hostedServiceName, deploymentName) - c.Assert(err, IsNil) - c.Assert(*recordedRequests, HasLen, 1) - assertDeleteDeploymentRequest(c, api, hostedServiceName, deploymentName, (*recordedRequests)[0]) -} - -func (suite *managementBaseAPISuite) TestDeleteDeploymentWhenDeploymentDoesNotExist(c *C) { - rigFixedResponseDispatcher(&x509Response{StatusCode: http.StatusNotFound}) - err := makeAPI(c).DeleteDeployment("hosted-service-name", "deployment-name") - c.Assert(err, IsNil) -} - -var getDeploymentResponse = ` - - -  gwaclmachinekjn8minr -  Staging -  53b117c3126a4f1b8b23bc36c2c94dd1 -  Running -  -  http://53b117c3126a4f1b8b23bc36c2c94dd1.cloudapp.net/ -  PFNlcnZpY2VDb25maWd1cmF0aW9uIHhtbG5zOnhzZD0iaHR0cDovL3d3dy53 -My5vcmcvMjAwMS9YTUxTY2hlbWEiIHhtbG5zOnhzaT0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTU -xTY2hlbWEtaW5zdGFuY2UiIHhtbG5zPSJodHRwOi8vc2NoZW1hcy5taWNyb3NvZnQuY29tL1NlcnZp -Y2VIb3N0aW5nLzIwMDgvMTAvU2VydmljZUNvbmZpZ3VyYXRpb24iPg0KICA8Um9sZSBuYW1lPSJnd2 -FjbHJvbGVoYXVxODFyIj4NCiAgICA8SW5zdGFuY2VzIGNvdW50PSIxIiAvPg0KICA8L1JvbGU+DQo8 -L1NlcnZpY2VDb25maWd1cmF0aW9uPg== -  -    -      gwaclrolehauq81r -      gwaclrolehauq81r -      ReadyRole -      0 -      0 -      ExtraSmall -      -      10.241.158.13 -      Started -      gwaclhostsnx7m1co57n -      68db67cd8a6047a6cf6da0f92a7ee67d -    -  -  1 -  -    -      gwaclrolehauq81r -      -      PersistentVMRole -      -        -          NetworkConfiguration -          -        -      -      -      -        ReadWrite -        gwaclauonntmontirrz9rgltt8d5f4evtjeagbcx7kf8umqhs3t421m21t798ebw -        gwacldiskdvmvahc -        http://gwacl3133mh3fs9jck6yk0dh.blob.core.windows.net/vhds/gwacldisk79vobmh.vhd -        b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_2-LTS-amd64-server-20130624-en-us-30GB -        Linux -      -      ExtraSmall -    -  -  -  false -  false -  2013-06-25T14:35:22Z -  2013-06-25T14:48:54Z -  -  -    2013-05-08T22:00:00Z -    2013-05-10T06:00:00Z -    PersistentVMUpdateCompleted -  -  -    -     
137.117.72.69
-      true -      __PseudoBackEndContractVip -   
- 
-
-` - -func assertGetDeploymentRequest(c *C, api *ManagementAPI, request *GetDeploymentRequest, httpRequest *X509Request) { - expectedURL := fmt.Sprintf( - "%s%s/services/hostedservices/%s/deployments/%s", defaultManagement, - api.session.subscriptionId, request.ServiceName, request.DeploymentName) - checkRequest(c, httpRequest, expectedURL, baseAPIVersion, nil, "GET") -} - -func (suite *managementBaseAPISuite) TestGetDeployment(c *C) { - api := makeAPI(c) - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - Body: []byte(getDeploymentResponse), - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - serviceName := "serviceName" - deploymentName := "gwaclmachinekjn8minr" - - request := &GetDeploymentRequest{ServiceName: serviceName, DeploymentName: deploymentName} - deployment, err := api.GetDeployment(request) - c.Assert(err, IsNil) - c.Assert(recordedRequests, HasLen, 1) - assertGetDeploymentRequest(c, api, request, recordedRequests[0]) - c.Check(deployment.Name, Equals, deploymentName) -} - -func (suite *managementBaseAPISuite) TestAddStorageAccount(c *C) { - api := makeAPI(c) - header := http.Header{} - header.Set("X-Ms-Request-Id", "operationID") - fixedResponse := x509Response{ - StatusCode: http.StatusAccepted, - Header: header, - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - cssi := NewCreateStorageServiceInputWithLocation("name", "label", "East US", "false") - - err := api.AddStorageAccount(cssi) - c.Assert(err, IsNil) - - expectedURL := defaultManagement + api.session.subscriptionId + "/services/storageservices" - expectedPayload, err := marshalXML(cssi) - c.Assert(err, IsNil) - checkOneRequest(c, &recordedRequests, expectedURL, baseAPIVersion, expectedPayload, "POST") -} - -func (suite *managementBaseAPISuite) TestDeleteStorageAccount(c *C) { - const accountName = "myaccount" - api := makeAPI(c) - accountURL := api.session.composeURL("services/storageservices/" + accountName) - recordedRequests := setUpDispatcher("operationID") - - err := api.DeleteStorageAccount(accountName) - c.Assert(err, IsNil) - - checkOneRequest(c, recordedRequests, accountURL, baseAPIVersion, nil, "DELETE") -} - -func (suite *managementBaseAPISuite) TestDeleteStorageAccountWhenAccountDoesNotExist(c *C) { - rigFixedResponseDispatcher(&x509Response{StatusCode: http.StatusNotFound}) - err := makeAPI(c).DeleteStorageAccount("account-name") - c.Assert(err, IsNil) -} - -func (suite *managementBaseAPISuite) TestGetStorageAccountKeys(c *C) { - const ( - accountName = "accountname" - primaryKey = "primarykey" - secondaryKey = "secondarykey" - ) - api := makeAPI(c) - url := api.session.composeURL("services/storageservices/" + accountName) - body := fmt.Sprintf( - ` - %s - - %s - %s - - `, - url, primaryKey, secondaryKey) - rigFixedResponseDispatcher(&x509Response{ - StatusCode: http.StatusOK, - Body: []byte(body), - }) - - keys, err := api.GetStorageAccountKeys("account") - c.Assert(err, IsNil) - - c.Check(keys.Primary, Equals, primaryKey) - c.Check(keys.Secondary, Equals, secondaryKey) - c.Check(keys.URL, Equals, url) -} - -func assertDeleteDiskRequest(c *C, api *ManagementAPI, diskName string, httpRequest *X509Request, deleteBlob bool) { - expectedURL := fmt.Sprintf("%s%s/services/disks/%s", defaultManagement, - api.session.subscriptionId, diskName) - if deleteBlob { - expectedURL += "?comp=media" - } - checkRequest(c, httpRequest, expectedURL, baseAPIVersion, nil, "DELETE") -} - -func (suite *managementBaseAPISuite) TestDeleteDisk(c *C) { - // The current implementation of DeleteDisk() works around a bug in - // Windows Azure by polling the server. See the documentation in the file - // deletedisk.go for details. - // Change the polling interval to speed up the tests: - deleteDiskInterval = time.Nanosecond - api := makeAPI(c) - diskName := "diskName" - for _, deleteBlob := range [...]bool{false, true} { - recordedRequests := setUpDispatcher("operationID") - err := api.DeleteDisk(&DeleteDiskRequest{ - DiskName: diskName, - DeleteBlob: deleteBlob, - }) - c.Assert(err, IsNil) - c.Assert(*recordedRequests, HasLen, 1) - assertDeleteDiskRequest(c, api, diskName, (*recordedRequests)[0], deleteBlob) - } -} - -func (suite *managementBaseAPISuite) TestDeleteDiskWhenDiskDoesNotExist(c *C) { - rigFixedResponseDispatcher(&x509Response{StatusCode: http.StatusNotFound}) - err := makeAPI(c).DeleteDisk(&DeleteDiskRequest{DiskName: "disk-name"}) - c.Assert(err, IsNil) -} - -func (suite *managementBaseAPISuite) TestDeleteDiskWithDeleteBlob(c *C) { - // Setting deleteBlob=true should append comp=media as a url query value. - deleteDiskInterval = time.Nanosecond - api := makeAPI(c) - recordedRequests := setUpDispatcher("operationID") - diskName := "diskName" - - err := api.DeleteDisk(&DeleteDiskRequest{ - DiskName: diskName, DeleteBlob: true}) - - c.Assert(err, IsNil) - originalURL := (*recordedRequests)[0].URL - parsedURL, err := url.Parse(originalURL) - c.Assert(err, IsNil) - values := parsedURL.Query() - c.Assert(err, IsNil) - c.Check(values["comp"], DeepEquals, []string{"media"}) -} - -func (suite *managementBaseAPISuite) TestAddDataDisk(c *C) { - api := makeAPI(c) - serviceName := "serviceName" - deploymentName := "deploymentName" - roleName := "roleName" - - recordedRequests := setUpDispatcher("operationID") - err := api.AddDataDisk(&AddDataDiskRequest{ - ServiceName: serviceName, - DeploymentName: deploymentName, - RoleName: roleName, - DataVirtualHardDisk: DataVirtualHardDisk{ - DiskLabel: "abcdef", - LUN: 31, - LogicalDiskSizeInGB: 42, - MediaLink: "http://example.blob.core.windows.net/disks/mydisk.vhd", - }, - }) - c.Assert(err, IsNil) - c.Assert(*recordedRequests, HasLen, 1) - - expectedURL := fmt.Sprintf( - "%s%s/services/hostedservices/%s/deployments/%s/roles/%s/DataDisks", - defaultManagement, - api.session.subscriptionId, - serviceName, - deploymentName, - roleName, - ) - - expectedPayload := []byte(strings.TrimSpace(` - - abcdef - 31 - 42 - http://example.blob.core.windows.net/disks/mydisk.vhd - - `)) - checkOneRequest(c, recordedRequests, expectedURL, baseAPIVersion, expectedPayload, "POST") -} - -func (suite *managementBaseAPISuite) TestListDisks(c *C) { - api := makeAPI(c) - - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - Body: []byte(strings.TrimSpace(` - - - name-of-affinity-group - - name-of-cloud-service - name-of-deployment - name-of-virtual-machine - - operating-system-type - geo-location-of-disk - 123 - uri-of-vhd - name-of-disk - name-of-source-image - creation-time-of-disk - IO-Type - - -`)), - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - disks, err := api.ListDisks() - c.Assert(err, IsNil) - c.Assert(disks, HasLen, 1) - - expectedURL := fmt.Sprintf("%s%s/services/disks", defaultManagement, api.session.subscriptionId) - checkOneRequest(c, &recordedRequests, expectedURL, baseAPIVersion, nil, "GET") - - c.Assert(disks[0], DeepEquals, Disk{ - AffinityGroup: "name-of-affinity-group", - AttachedTo: &DiskAttachment{ - HostedServiceName: "name-of-cloud-service", - DeploymentName: "name-of-deployment", - RoleName: "name-of-virtual-machine", - }, - OS: "operating-system-type", - Location: "geo-location-of-disk", - LogicalSizeInGB: 123, - MediaLink: "uri-of-vhd", - Name: "name-of-disk", - SourceImageName: "name-of-source-image", - CreatedTime: "creation-time-of-disk", - IOType: "IO-Type", - }) -} - -func (suite *managementBaseAPISuite) TestPerformNodeOperation(c *C) { - api := makeAPI(c) - recordedRequests := setUpDispatcher("operationID") - serviceName := "serviceName" - deploymentName := "deploymentName" - roleName := "roleName" - operation := newRoleOperation("RandomOperation") - version := "test-version" - err := api.performRoleOperation(serviceName, deploymentName, roleName, version, operation) - - c.Assert(err, IsNil) - expectedURL := defaultManagement + api.session.subscriptionId + "/services/hostedservices/" + serviceName + "/deployments/" + deploymentName + "/roleinstances/" + roleName + "/Operations" - expectedPayload, err := marshalXML(operation) - c.Assert(err, IsNil) - checkOneRequest(c, recordedRequests, expectedURL, version, expectedPayload, "POST") -} - -func (suite *managementBaseAPISuite) TestStartRole(c *C) { - api := makeAPI(c) - recordedRequests := setUpDispatcher("operationID") - request := &StartRoleRequest{"serviceName", "deploymentName", "roleName"} - err := api.StartRole(request) - c.Assert(err, IsNil) - expectedURL := (defaultManagement + api.session.subscriptionId + "/services/hostedservices/" + - request.ServiceName + "/deployments/" + request.DeploymentName + "/roleinstances/" + - request.RoleName + "/Operations") - expectedPayload, err := marshalXML(startRoleOperation) - c.Assert(err, IsNil) - checkOneRequest(c, recordedRequests, expectedURL, baseAPIVersion, expectedPayload, "POST") -} - -func (suite *managementBaseAPISuite) TestRestartRole(c *C) { - api := makeAPI(c) - recordedRequests := setUpDispatcher("operationID") - request := &RestartRoleRequest{"serviceName", "deploymentName", "roleName"} - err := api.RestartRole(request) - c.Assert(err, IsNil) - expectedURL := (defaultManagement + api.session.subscriptionId + "/services/hostedservices/" + - request.ServiceName + "/deployments/" + request.DeploymentName + "/roleinstances/" + - request.RoleName + "/Operations") - expectedPayload, err := marshalXML(restartRoleOperation) - c.Assert(err, IsNil) - checkOneRequest(c, recordedRequests, expectedURL, baseAPIVersion, expectedPayload, "POST") -} - -func assertShutdownRoleRequest(c *C, api *ManagementAPI, request *ShutdownRoleRequest, httpRequest *X509Request) { - expectedURL := fmt.Sprintf( - "%s%s/services/hostedservices/%s/deployments/%s/roleinstances/%s/Operations", - defaultManagement, api.session.subscriptionId, request.ServiceName, - request.DeploymentName, request.RoleName) - expectedPayload, err := marshalXML(shutdownRoleOperation) - c.Assert(err, IsNil) - checkRequest(c, httpRequest, expectedURL, baseAPIVersion, expectedPayload, "POST") -} - -func (suite *managementBaseAPISuite) TestShutdownRole(c *C) { - api := makeAPI(c) - recordedRequests := setUpDispatcher("operationID") - request := &ShutdownRoleRequest{"serviceName", "deploymentName", "roleName"} - err := api.ShutdownRole(request) - c.Assert(err, IsNil) - c.Assert(*recordedRequests, HasLen, 1) - assertShutdownRoleRequest(c, api, request, (*recordedRequests)[0]) -} - -func assertGetRoleRequest(c *C, api *ManagementAPI, httpRequest *X509Request, serviceName, deploymentName, roleName string) { - expectedURL := (defaultManagement + api.session.subscriptionId + - "/services/hostedservices/" + - serviceName + "/deployments/" + deploymentName + "/roles/" + roleName) - checkRequest(c, httpRequest, expectedURL, baseAPIVersion, nil, "GET") -} - -func (suite *managementBaseAPISuite) TestGetRole(c *C) { - api := makeAPI(c) - request := &GetRoleRequest{"serviceName", "deploymentName", "roleName"} - - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - Body: []byte(makePersistentVMRole("rolename")), - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - role, err := api.GetRole(request) - c.Assert(err, IsNil) - - assertGetRoleRequest( - c, api, recordedRequests[0], request.ServiceName, - request.DeploymentName, request.RoleName) - c.Check(role.RoleName, Equals, "rolename") -} - -func assertUpdateRoleRequest(c *C, api *ManagementAPI, httpRequest *X509Request, serviceName, deploymentName, roleName, expectedXML string) { - expectedURL := (defaultManagement + api.session.subscriptionId + - "/services/hostedservices/" + - serviceName + "/deployments/" + deploymentName + "/roles/" + roleName) - checkRequest( - c, httpRequest, expectedURL, baseAPIVersion, []byte(expectedXML), "PUT") - c.Assert(httpRequest.ContentType, Equals, "application/xml") -} - -func (suite *managementBaseAPISuite) TestUpdateRole(c *C) { - api := makeAPI(c) - request := &UpdateRoleRequest{ - ServiceName: "serviceName", - DeploymentName: "deploymentName", - RoleName: "roleName", - PersistentVMRole: &PersistentVMRole{ - RoleName: "newRoleNamePerhaps", - }, - } - rigFixedResponseDispatcher(&x509Response{StatusCode: http.StatusOK}) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - err := api.UpdateRole(request) - c.Assert(err, IsNil) - - expectedXML, err := request.PersistentVMRole.Serialize() - c.Assert(err, IsNil) - assertUpdateRoleRequest( - c, api, recordedRequests[0], request.ServiceName, - request.DeploymentName, request.RoleName, expectedXML) -} - -func (suite *managementBaseAPISuite) TestUpdateRoleBlocksUntilComplete(c *C) { - api := makeAPI(c) - api.PollerInterval = time.Nanosecond - request := &UpdateRoleRequest{ - ServiceName: "serviceName", - DeploymentName: "deploymentName", - RoleName: "roleName", - PersistentVMRole: &PersistentVMRole{ - RoleName: "newRoleNamePerhaps", - }, - } - responses := []DispatcherResponse{ - // First response is 202 with an X-MS-Request-ID header. - {makeX509ResponseWithOperationHeader("foobar"), nil}, - // Second response is XML to say that the request above has completed. - { - &x509Response{ - Body: []byte(fmt.Sprintf(operationXMLTemplate, "Succeeded")), - StatusCode: http.StatusOK, - }, - nil, - }, - } - recordedRequests := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&recordedRequests, responses) - - err := api.UpdateRole(request) - c.Assert(err, IsNil) - - c.Assert(recordedRequests, HasLen, 2) - expectedXML, err := request.PersistentVMRole.Serialize() - c.Assert(err, IsNil) - assertUpdateRoleRequest( - c, api, recordedRequests[0], request.ServiceName, - request.DeploymentName, request.RoleName, expectedXML) - // Second request is to get status of operation "foobar". - c.Check(recordedRequests[1].Method, Equals, "GET") - c.Check(recordedRequests[1].URL, Matches, ".*/operations/foobar") -} - -func assertDeleteRoleRequest(c *C, api *ManagementAPI, httpRequest *X509Request, serviceName, deploymentName, roleName string, deleteMedia bool) { - expectedURL := (defaultManagement + api.session.subscriptionId + - "/services/hostedservices/" + - serviceName + "/deployments/" + deploymentName + "/roles/" + roleName) - if deleteMedia { - expectedURL += "?comp=media" - } - checkRequest( - c, httpRequest, expectedURL, baseAPIVersion, nil, "DELETE") -} - -func (suite *managementBaseAPISuite) TestDeleteRole(c *C) { - suite.testDeleteRole(c, false) -} - -func (suite *managementBaseAPISuite) TestDeleteRoleDeleteMedia(c *C) { - suite.testDeleteRole(c, true) -} - -func (suite *managementBaseAPISuite) testDeleteRole(c *C, deleteMedia bool) { - api := makeAPI(c) - request := &DeleteRoleRequest{ - ServiceName: "serviceName", - DeploymentName: "deploymentName", - RoleName: "roleName", - DeleteMedia: deleteMedia, - } - rigFixedResponseDispatcher(&x509Response{StatusCode: http.StatusOK}) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - err := api.DeleteRole(request) - c.Assert(err, IsNil) - assertDeleteRoleRequest( - c, api, recordedRequests[0], request.ServiceName, - request.DeploymentName, request.RoleName, deleteMedia) -} - -func assertAddRoleRequest(c *C, api *ManagementAPI, httpRequest *X509Request, serviceName, deploymentName, expectedXML string) { - expectedURL := (defaultManagement + api.session.subscriptionId + - "/services/hostedservices/" + - serviceName + "/deployments/" + deploymentName + "/roles") - checkRequest( - c, httpRequest, expectedURL, baseAPIVersion, []byte(expectedXML), "POST") - c.Assert(httpRequest.ContentType, Equals, "application/xml") -} - -func (suite *managementBaseAPISuite) TestAddRole(c *C) { - api := makeAPI(c) - request := &AddRoleRequest{ - ServiceName: "serviceName", - DeploymentName: "deploymentName", - PersistentVMRole: &PersistentVMRole{ - RoleName: "newRoleNamePerhaps", - }, - } - rigFixedResponseDispatcher(&x509Response{StatusCode: http.StatusOK}) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - err := api.AddRole(request) - c.Assert(err, IsNil) - - expectedXML, err := request.PersistentVMRole.Serialize() - c.Assert(err, IsNil) - assertAddRoleRequest( - c, api, recordedRequests[0], request.ServiceName, - request.DeploymentName, expectedXML) -} - -func (suite *managementBaseAPISuite) TestCreateAffinityGroup(c *C) { - api := makeAPI(c) - cag := NewCreateAffinityGroup( - "name", "label", "description", "location") - request := CreateAffinityGroupRequest{ - CreateAffinityGroup: cag} - fixedResponse := x509Response{ - StatusCode: http.StatusCreated} - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - err := api.CreateAffinityGroup(&request) - c.Assert(err, IsNil) - - expectedURL := defaultManagement + api.session.subscriptionId + "/affinitygroups" - expectedBody, _ := cag.Serialize() - checkOneRequest(c, &recordedRequests, expectedURL, baseAPIVersion, []byte(expectedBody), "POST") -} - -func (suite *managementBaseAPISuite) TestUpdateAffinityGroup(c *C) { - api := makeAPI(c) - uag := NewUpdateAffinityGroup("label", "description") - request := UpdateAffinityGroupRequest{ - Name: "groupname", - UpdateAffinityGroup: uag} - fixedResponse := x509Response{ - StatusCode: http.StatusCreated} - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - err := api.UpdateAffinityGroup(&request) - c.Assert(err, IsNil) - - expectedURL := (defaultManagement + api.session.subscriptionId + - "/affinitygroups/" + request.Name) - expectedBody, _ := uag.Serialize() - checkOneRequest(c, &recordedRequests, expectedURL, baseAPIVersion, []byte(expectedBody), "PUT") -} - -func (suite *managementBaseAPISuite) TestDeleteAffinityGroup(c *C) { - api := makeAPI(c) - request := DeleteAffinityGroupRequest{ - Name: "groupname"} - fixedResponse := x509Response{ - StatusCode: http.StatusCreated} - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - err := api.DeleteAffinityGroup(&request) - c.Assert(err, IsNil) - - expectedURL := (defaultManagement + api.session.subscriptionId + - "/affinitygroups/" + request.Name) - checkOneRequest(c, &recordedRequests, expectedURL, baseAPIVersion, nil, "DELETE") -} - -func (suite *managementBaseAPISuite) TestDeleteAffinityGroupWhenGroupDoesNotExist(c *C) { - rigFixedResponseDispatcher(&x509Response{StatusCode: http.StatusNotFound}) - request := DeleteAffinityGroupRequest{Name: "groupname"} - err := makeAPI(c).DeleteAffinityGroup(&request) - c.Assert(err, IsNil) -} - -func makeNetworkConfiguration() *NetworkConfiguration { - return &NetworkConfiguration{ - XMLNS: XMLNS_NC, - DNS: &[]VirtualNetDnsServer{ - { - Name: "dns-server-name", - IPAddress: "IPV4-address-of-the-server", - }, - }, - LocalNetworkSites: &[]LocalNetworkSite{ - { - Name: "local-site-name", - AddressSpacePrefixes: []string{ - "CIDR-identifier", - }, - VPNGatewayAddress: "IPV4-address-of-the-vpn-gateway", - }, - }, - VirtualNetworkSites: &[]VirtualNetworkSite{ - { - Name: "virtual-network-name", - AffinityGroup: "affinity-group-name", - AddressSpacePrefixes: []string{ - "CIDR-identifier", - }, - Subnets: &[]Subnet{ - { - Name: "subnet-name", - AddressPrefix: "CIDR-identifier", - }, - }, - DnsServersRef: &[]DnsServerRef{ - { - Name: "primary-DNS-name", - }, - }, - Gateway: &Gateway{ - Profile: "Small", - VPNClientAddressPoolPrefixes: []string{ - "CIDR-identifier", - }, - LocalNetworkSiteRef: LocalNetworkSiteRef{ - Name: "local-site-name", - Connection: LocalNetworkSiteRefConnection{ - Type: "connection-type", - }, - }, - }, - }, - }, - } -} - -func assertGetNetworkConfigurationRequest(c *C, api *ManagementAPI, httpRequest *X509Request) { - expectedURL := fmt.Sprintf( - "%s%s/services/networking/media", defaultManagement, - api.session.subscriptionId) - checkRequest(c, httpRequest, expectedURL, baseAPIVersion, nil, "GET") -} - -func (suite *managementBaseAPISuite) TestGetNetworkConfiguration(c *C) { - expected := makeNetworkConfiguration() - expectedXML, err := expected.Serialize() - c.Assert(err, IsNil) - - rigFixedResponseDispatcher(&x509Response{ - StatusCode: http.StatusOK, Body: []byte(expectedXML)}) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - api := makeAPI(c) - observed, err := api.GetNetworkConfiguration() - c.Assert(err, IsNil) - c.Assert(recordedRequests, HasLen, 1) - assertGetNetworkConfigurationRequest(c, api, recordedRequests[0]) - c.Assert(observed, DeepEquals, expected) -} - -func (suite *managementBaseAPISuite) TestGetNetworkConfigurationNotFound(c *C) { - rigFixedResponseDispatcher(&x509Response{ - StatusCode: http.StatusNotFound}) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - api := makeAPI(c) - observed, err := api.GetNetworkConfiguration() - c.Assert(observed, IsNil) - c.Assert(err, IsNil) - c.Assert(recordedRequests, HasLen, 1) - assertGetNetworkConfigurationRequest(c, api, recordedRequests[0]) -} - -func assertSetNetworkConfigurationRequest(c *C, api *ManagementAPI, body []byte, httpRequest *X509Request) { - expectedURL := fmt.Sprintf( - "%s%s/services/networking/media", defaultManagement, - api.session.subscriptionId) - checkRequest(c, httpRequest, expectedURL, baseAPIVersion, body, "PUT") - // Azure chokes when the content type is text/xml or similar. - c.Assert(httpRequest.ContentType, Equals, "text/plain") -} - -func (suite *managementBaseAPISuite) TestSetNetworkConfiguration(c *C) { - api := makeAPI(c) - fixedResponse := x509Response{StatusCode: http.StatusOK} - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - request := makeNetworkConfiguration() - requestXML, err := request.Serialize() - c.Assert(err, IsNil) - requestPayload := []byte(requestXML) - err = api.SetNetworkConfiguration(request) - - c.Assert(err, IsNil) - c.Assert(recordedRequests, HasLen, 1) - assertSetNetworkConfigurationRequest(c, api, requestPayload, recordedRequests[0]) -} - -func (suite *managementBaseAPISuite) TestListLocationsRequestsListing(c *C) { - api := makeAPI(c) - rigFixedResponseDispatcher(&x509Response{StatusCode: http.StatusOK, Body: []byte("")}) - requests := make([]*X509Request, 0) - rigRecordingDispatcher(&requests) - - _, err := api.ListLocations() - c.Assert(err, IsNil) - - c.Assert(len(requests), Equals, 1) - c.Check(requests[0].URL, Equals, api.session.composeURL("locations")) -} - -func (suite *managementBaseAPISuite) TestListLocationsReturnsLocations(c *C) { - expectedLocations := []Location{{ - Name: "West US", - }, { - Name: "East US", - ComputeCapabilities: &ComputeCapabilities{ - VirtualMachineRoleSizes: []string{"ExtraSmall", "ExtraLarge"}, - WebWorkerRoleSizes: []string{"Small", "Large"}, - }, - }} - body := ` - - - West US - - - East US - - - ExtraSmall - ExtraLarge - - - Small - Large - - - - -` - api := makeAPI(c) - rigFixedResponseDispatcher(&x509Response{ - StatusCode: http.StatusOK, - Body: []byte(body), - }) - - locations, err := api.ListLocations() - c.Assert(err, IsNil) - - c.Assert(len(locations), Equals, 2) - c.Assert(locations, DeepEquals, expectedLocations) -} === removed file 'src/launchpad.net/gwacl/management_test.go' --- src/launchpad.net/gwacl/management_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/management_test.go 1970-01-01 00:00:00 +0000 @@ -1,1405 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "fmt" - . "launchpad.net/gocheck" - "net/http" -) - -type managementAPISuite struct{} - -var _ = Suite(&managementAPISuite{}) - -// makeNamedRoleInstances creates an array of RoleInstance objects, each with -// the respective given name. -func makeNamedRoleInstances(names ...string) []RoleInstance { - instances := make([]RoleInstance, 0) - for _, name := range names { - instances = append(instances, RoleInstance{RoleName: name}) - } - return instances -} - -// makeNamedDeployments creates an array of Deployment objects, each with -// the respective given name. -func makeNamedDeployments(names ...string) []Deployment { - deployments := make([]Deployment, 0) - for _, name := range names { - deployments = append(deployments, Deployment{Name: name}) - } - return deployments -} - -// makeHostedService creates a HostedService with the given deployments. -func makeHostedService(deployments []Deployment) HostedService { - desc := HostedServiceDescriptor{ServiceName: "S1"} - return HostedService{ - HostedServiceDescriptor: desc, - Deployments: deployments, - } -} - -// makeOKXMLResponse creates a DispatcherResponse with status code OK, and -// an XML-serialized version of the given object. -// The response is wrapped in a slice because that's slightly easier for -// the callers. -func makeOKXMLResponse(c *C, bodyObject AzureObject) []DispatcherResponse { - body, err := bodyObject.Serialize() - c.Assert(err, IsNil) - return []DispatcherResponse{ - { - response: &x509Response{ - StatusCode: http.StatusOK, - Body: []byte(body), - }, - }, - } -} - -// TestListInstances goes through the happy path for ListInstances. -func (suite *managementAPISuite) TestListInstances(c *C) { - service := makeHostedService( - []Deployment{ - {RoleInstanceList: makeNamedRoleInstances("one", "two")}, - {RoleInstanceList: makeNamedRoleInstances("three", "four")}, - }) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, makeOKXMLResponse(c, service)) - - // Exercise ListInstances. - api := makeAPI(c) - request := &ListInstancesRequest{ServiceName: service.ServiceName} - instances, err := api.ListInstances(request) - c.Assert(err, IsNil) - - // We get the expected instances back. - c.Check(instances, DeepEquals, []RoleInstance{ - service.Deployments[0].RoleInstanceList[0], - service.Deployments[0].RoleInstanceList[1], - service.Deployments[1].RoleInstanceList[0], - service.Deployments[1].RoleInstanceList[1], - }) - - // The only request is for the service's properties - c.Assert(record, Not(HasLen), 0) - expectedURL := fmt.Sprintf( - "%ssubscriptionId/services/hostedservices/%s?embed-detail=true", - defaultManagement, service.ServiceName) - c.Check(record[0].URL, Equals, expectedURL) - c.Check(record[0].Method, Equals, "GET") -} - -func (suite *managementAPISuite) TestListInstancesFailsGettingDetails(c *C) { - rigPreparedResponseDispatcher( - []DispatcherResponse{{response: &x509Response{StatusCode: http.StatusNotFound}}}, - ) - api := makeAPI(c) - request := &ListInstancesRequest{ServiceName: "SomeService"} - instances, err := api.ListInstances(request) - c.Check(instances, IsNil) - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "GET request failed (404: Not Found)") -} - -// TestListAllDeployments goes through the happy path for ListAllDeployments. -func (suite *managementAPISuite) TestListAllDeployments(c *C) { - service := makeHostedService(makeNamedDeployments("one", "two")) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, makeOKXMLResponse(c, service)) - - // Exercise ListDeployments. - api := makeAPI(c) - request := &ListAllDeploymentsRequest{ServiceName: service.ServiceName} - deployments, err := api.ListAllDeployments(request) - c.Assert(err, IsNil) - - // We get the complete set of deployments back. - c.Check(deployments, DeepEquals, service.Deployments) - - // Only one request to the API is made. - c.Assert(record, HasLen, 1) -} - -// TestListDeployments tests ListDeployments, including filtering by name. -func (suite *managementAPISuite) TestListDeployments(c *C) { - service := makeHostedService(makeNamedDeployments("Arthur", "Bobby")) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, makeOKXMLResponse(c, service)) - - // Exercise ListDeployments. - api := makeAPI(c) - request := &ListDeploymentsRequest{ - ServiceName: service.ServiceName, - DeploymentNames: []string{"Arthur"}, - } - deployments, err := api.ListDeployments(request) - c.Assert(err, IsNil) - - // Only the first deployment - named "Arthur" - is returned. - c.Check(deployments, DeepEquals, []Deployment{service.Deployments[0]}) - // Only one request to the API is made. - c.Assert(record, HasLen, 1) -} - -func (suite *managementAPISuite) TestListDeploymentsWithoutNamesReturnsNothing(c *C) { - service := makeHostedService(makeNamedDeployments("One", "Two")) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, makeOKXMLResponse(c, service)) - // Exercise ListDeployments. - api := makeAPI(c) - request := &ListDeploymentsRequest{ - ServiceName: service.ServiceName, - DeploymentNames: []string{}, - } - deployments, err := api.ListDeployments(request) - c.Assert(err, IsNil) - - // No deployments are returned. - c.Check(deployments, HasLen, 0) -} - -func makeHostedServiceDescriptor() *HostedServiceDescriptor { - url := MakeRandomString(10) - serviceName := MakeRandomString(10) - return &HostedServiceDescriptor{ServiceName: serviceName, URL: url} -} - -func (suite *managementAPISuite) TestListSpecificHostedServices(c *C) { - service1 := makeHostedServiceDescriptor() - service2 := makeHostedServiceDescriptor() - list := HostedServiceDescriptorList{HostedServices: []HostedServiceDescriptor{*service1, *service2}} - XML, err := list.Serialize() - c.Assert(err, IsNil) - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - Body: []byte(XML), - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - api := makeAPI(c) - request := &ListSpecificHostedServicesRequest{ - ServiceNames: []string{service1.ServiceName}, - } - descriptors, err := api.ListSpecificHostedServices(request) - - // Only the first service is returned. - c.Check(descriptors, DeepEquals, []HostedServiceDescriptor{*service1}) - // Only one request to the API is made. - c.Assert(recordedRequests, HasLen, 1) -} - -func (suite *managementAPISuite) TestListPrefixedHostedServices(c *C) { - prefix := "prefix" - service1 := &HostedServiceDescriptor{ServiceName: prefix + "service1"} - service2 := makeHostedServiceDescriptor() - list := HostedServiceDescriptorList{HostedServices: []HostedServiceDescriptor{*service1, *service2}} - XML, err := list.Serialize() - c.Assert(err, IsNil) - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - Body: []byte(XML), - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - api := makeAPI(c) - request := &ListPrefixedHostedServicesRequest{ - ServiceNamePrefix: prefix, - } - descriptors, err := api.ListPrefixedHostedServices(request) - - // Only the first service is returned. - c.Check(descriptors, DeepEquals, []HostedServiceDescriptor{*service1}) - // Only one request to the API is made. - c.Assert(recordedRequests, HasLen, 1) -} - -func (suite *managementAPISuite) TestListSpecificHostedServicesWithoutNamesReturnsNothing(c *C) { - service1 := makeHostedServiceDescriptor() - service2 := makeHostedServiceDescriptor() - list := HostedServiceDescriptorList{HostedServices: []HostedServiceDescriptor{*service1, *service2}} - XML, err := list.Serialize() - c.Assert(err, IsNil) - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - Body: []byte(XML), - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - api := makeAPI(c) - request := &ListSpecificHostedServicesRequest{ - ServiceNames: []string{}, - } - descriptors, err := api.ListSpecificHostedServices(request) - - c.Check(descriptors, DeepEquals, []HostedServiceDescriptor{}) - // Only one request to the API is made. - c.Assert(recordedRequests, HasLen, 1) -} - -var exampleOkayResponse = DispatcherResponse{ - response: &x509Response{StatusCode: http.StatusOK}, -} - -var exampleFailResponse = DispatcherResponse{ - response: &x509Response{StatusCode: http.StatusInternalServerError}, -} - -var exampleNotFoundResponse = DispatcherResponse{ - response: &x509Response{StatusCode: http.StatusNotFound}, -} - -type suiteDestroyDeployment struct{} - -var _ = Suite(&suiteDestroyDeployment{}) - -func (suite *suiteDestroyDeployment) makeExampleDeployment() *Deployment { - return &Deployment{ - RoleInstanceList: makeNamedRoleInstances("one", "two"), - RoleList: []Role{ - {OSVirtualHardDisk: &OSVirtualHardDisk{DiskName: "disk1"}}, - {OSVirtualHardDisk: &OSVirtualHardDisk{DiskName: "disk2"}}, - }, - } -} - -func (suite *suiteDestroyDeployment) TestHappyPath(c *C) { - var responses []DispatcherResponse - // Prepare. - exampleDeployment := suite.makeExampleDeployment() - responses = append(responses, makeOKXMLResponse(c, exampleDeployment)...) - // For deleting the deployment. - responses = append(responses, exampleOkayResponse) - // For deleting disks. - responses = append(responses, exampleOkayResponse, exampleOkayResponse, exampleOkayResponse) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - // Exercise DestroyDeployment. - api := makeAPI(c) - request := &DestroyDeploymentRequest{ - ServiceName: "service-name", - DeploymentName: "deployment-name", - } - err := api.DestroyDeployment(request) - c.Assert(err, IsNil) - c.Check(record, HasLen, 4) - assertGetDeploymentRequest(c, api, &GetDeploymentRequest{ - request.ServiceName, request.DeploymentName}, record[0]) - assertDeleteDeploymentRequest(c, api, request.ServiceName, - request.DeploymentName, record[1]) - assertDeleteDiskRequest(c, api, "disk1", record[2], true) - assertDeleteDiskRequest(c, api, "disk2", record[3], true) -} - -func (suite *suiteDestroyDeployment) TestOkayWhenDeploymentNotFound(c *C) { - var responses []DispatcherResponse - // Prepare. - responses = []DispatcherResponse{exampleNotFoundResponse} - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - // Exercise DestroyDeployment. - api := makeAPI(c) - request := &DestroyDeploymentRequest{ - ServiceName: "service-name", - DeploymentName: "deployment-name", - } - err := api.DestroyDeployment(request) - c.Assert(err, IsNil) - c.Check(record, HasLen, 1) -} - -func (suite *suiteDestroyDeployment) TestOkayWhenAssetsNotFound(c *C) { - var responses []DispatcherResponse - // Prepare. - exampleDeployment := suite.makeExampleDeployment() - responses = append(responses, makeOKXMLResponse(c, exampleDeployment)...) - // For deleting the deployment. - responses = append(responses, exampleNotFoundResponse) - // For deleting the disks. - responses = append(responses, exampleNotFoundResponse, exampleNotFoundResponse, exampleNotFoundResponse) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - // Exercise DestroyDeployment. - api := makeAPI(c) - request := &DestroyDeploymentRequest{ - ServiceName: "service-name", - DeploymentName: "deployment-name", - } - err := api.DestroyDeployment(request) - c.Assert(err, IsNil) - c.Check(record, HasLen, 4) - assertGetDeploymentRequest(c, api, &GetDeploymentRequest{ - request.ServiceName, request.DeploymentName}, record[0]) - assertDeleteDeploymentRequest(c, api, request.ServiceName, - request.DeploymentName, record[1]) - assertDeleteDiskRequest(c, api, "disk1", record[2], true) - assertDeleteDiskRequest(c, api, "disk2", record[3], true) -} - -func (suite *suiteDestroyDeployment) TestFailsGettingDeployment(c *C) { - var responses []DispatcherResponse - // Prepare. - responses = []DispatcherResponse{exampleFailResponse} - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - // Exercise DestroyDeployment. - api := makeAPI(c) - request := &DestroyDeploymentRequest{ - ServiceName: "service-name", - DeploymentName: "deployment-name", - } - err := api.DestroyDeployment(request) - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, "GET request failed [(]500: Internal Server Error[)]") - c.Check(record, HasLen, 1) -} - -func (suite *suiteDestroyDeployment) TestFailsDeletingDisk(c *C) { - var responses []DispatcherResponse - // Prepare. - exampleDeployment := suite.makeExampleDeployment() - responses = append(responses, makeOKXMLResponse(c, exampleDeployment)...) - // For deleting disks. - responses = append(responses, exampleFailResponse) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - // Exercise DestroyDeployment. - api := makeAPI(c) - request := &DestroyDeploymentRequest{ - ServiceName: "service-name", - DeploymentName: "deployment-name", - } - err := api.DestroyDeployment(request) - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, "DELETE request failed [(]500: Internal Server Error[)]") - c.Check(record, HasLen, 2) -} - -func (suite *suiteDestroyDeployment) TestFailsDeletingDeployment(c *C) { - var responses []DispatcherResponse - // Prepare. - exampleDeployment := suite.makeExampleDeployment() - responses = append(responses, makeOKXMLResponse(c, exampleDeployment)...) - // For deleting disks. - responses = append(responses, exampleOkayResponse, exampleOkayResponse) - // For other requests. - responses = append(responses, exampleFailResponse) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - // Exercise DestroyDeployment. - api := makeAPI(c) - request := &DestroyDeploymentRequest{ - ServiceName: "service-name", - DeploymentName: "deployment-name", - } - err := api.DestroyDeployment(request) - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, "DELETE request failed [(]500: Internal Server Error[)]") - c.Check(record, HasLen, 4) -} - -type suiteDestroyHostedService struct{} - -var _ = Suite(&suiteDestroyHostedService{}) - -func (suite *suiteDestroyHostedService) makeExampleHostedService(deploymentNames ...string) *HostedService { - var exampleHostedService = &HostedService{} - for _, deploymentName := range deploymentNames { - exampleHostedService.Deployments = append( - exampleHostedService.Deployments, - Deployment{Name: deploymentName}) - } - return exampleHostedService -} - -func (suite *suiteDestroyHostedService) TestHappyPath(c *C) { - var responses []DispatcherResponse - // DestroyHostedService first gets the hosted service proerties. - exampleHostedService := suite.makeExampleHostedService("one", "two") - responses = append(responses, makeOKXMLResponse(c, exampleHostedService)...) - // It calls DestroyDeployment, which first gets the deployment's - // properties, deletes any assets contained therein (none in this case) - // then deletes the deployment. - for _, deployment := range exampleHostedService.Deployments { - responses = append(responses, makeOKXMLResponse(c, &deployment)...) - responses = append(responses, exampleOkayResponse) - } - // For deleting the service itself. - responses = append(responses, exampleOkayResponse) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - // Exercise DestroyHostedService. - api := makeAPI(c) - request := &DestroyHostedServiceRequest{ - ServiceName: "service-name", - } - err := api.DestroyHostedService(request) - c.Assert(err, IsNil) - c.Check(record, HasLen, 6) - // The first request is for the hosted service. - assertGetHostedServicePropertiesRequest(c, api, request.ServiceName, true, record[0]) - // The second and third requests fetch then delete deployment "one"; see - // DestroyDeployment for an explanation of this behaviour. - assertGetDeploymentRequest(c, api, &GetDeploymentRequest{ - request.ServiceName, "one"}, record[1]) - assertDeleteDeploymentRequest(c, api, request.ServiceName, "one", - record[2]) - // The fourth and fifth requests are a repaat of the previous two, but for - // deployment "two". - assertGetDeploymentRequest(c, api, &GetDeploymentRequest{ - request.ServiceName, "two"}, record[3]) - assertDeleteDeploymentRequest(c, api, request.ServiceName, "two", - record[4]) - // The last request deletes the hosted service. - assertDeleteHostedServiceRequest(c, api, request.ServiceName, record[5]) -} - -func (suite *suiteDestroyHostedService) TestOkayWhenHostedServiceNotFound(c *C) { - var responses []DispatcherResponse - // Prepare. - responses = []DispatcherResponse{exampleNotFoundResponse} - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - // Exercise DestroyHostedService. - api := makeAPI(c) - request := &DestroyHostedServiceRequest{ServiceName: "service-name"} - err := api.DestroyHostedService(request) - c.Assert(err, IsNil) - c.Check(record, HasLen, 1) -} - -func (suite *suiteDestroyHostedService) TestOkayWhenDeploymentsNotFound(c *C) { - var responses []DispatcherResponse - // Prepare. - exampleHostedService := suite.makeExampleHostedService("one", "two") - responses = append(responses, makeOKXMLResponse(c, exampleHostedService)...) - // Someone else has destroyed the deployments in the meantime. - responses = append(responses, exampleNotFoundResponse, exampleNotFoundResponse) - // Success deleting the hosted service. - responses = append(responses, exampleOkayResponse) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - // Exercise DestroyHostedService. - api := makeAPI(c) - request := &DestroyHostedServiceRequest{ServiceName: "service-name"} - err := api.DestroyHostedService(request) - c.Assert(err, IsNil) - c.Check(record, HasLen, 4) - assertGetDeploymentRequest(c, api, &GetDeploymentRequest{ - request.ServiceName, "one"}, record[1]) - assertGetDeploymentRequest(c, api, &GetDeploymentRequest{ - request.ServiceName, "two"}, record[2]) - assertDeleteHostedServiceRequest(c, api, request.ServiceName, record[3]) -} - -func (suite *suiteDestroyHostedService) TestOkayWhenHostedServiceNotFoundWhenDeleting(c *C) { - var responses []DispatcherResponse - // Prepare. - exampleHostedService := suite.makeExampleHostedService("one", "two") - responses = append(responses, makeOKXMLResponse(c, exampleHostedService)...) - // Someone else has destroyed the deployments in the meantime. - responses = append(responses, exampleNotFoundResponse, exampleNotFoundResponse) - // Someone else has destroyed the hosted service in the meantime. - responses = append(responses, exampleNotFoundResponse) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - // Exercise DestroyHostedService. - api := makeAPI(c) - request := &DestroyHostedServiceRequest{ServiceName: "service-name"} - err := api.DestroyHostedService(request) - c.Assert(err, IsNil) - c.Check(record, HasLen, 4) - assertGetDeploymentRequest(c, api, &GetDeploymentRequest{ - request.ServiceName, "one"}, record[1]) - assertGetDeploymentRequest(c, api, &GetDeploymentRequest{ - request.ServiceName, "two"}, record[2]) - assertDeleteHostedServiceRequest(c, api, request.ServiceName, record[3]) -} - -func (suite *suiteDestroyHostedService) TestFailsGettingHostedService(c *C) { - var responses []DispatcherResponse - // Prepare. - responses = []DispatcherResponse{exampleFailResponse} - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - // Exercise DestroyHostedService. - api := makeAPI(c) - request := &DestroyHostedServiceRequest{ServiceName: "service-name"} - err := api.DestroyHostedService(request) - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, "GET request failed [(]500: Internal Server Error[)]") - c.Check(record, HasLen, 1) -} - -func (suite *suiteDestroyHostedService) TestFailsDestroyingDeployment(c *C) { - var responses []DispatcherResponse - // Prepare. - exampleHostedService := suite.makeExampleHostedService("one", "two") - responses = append(responses, makeOKXMLResponse(c, exampleHostedService)...) - responses = append(responses, exampleFailResponse) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - // Exercise DestroyHostedService. - api := makeAPI(c) - request := &DestroyHostedServiceRequest{ServiceName: "service-name"} - err := api.DestroyHostedService(request) - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, "GET request failed [(]500: Internal Server Error[)]") - c.Check(record, HasLen, 2) -} - -func (suite *suiteDestroyHostedService) TestFailsDeletingHostedService(c *C) { - var responses []DispatcherResponse - // Prepare. - exampleHostedService := suite.makeExampleHostedService("one", "two") - responses = append(responses, makeOKXMLResponse(c, exampleHostedService)...) - // Deployments not found, but that's okay. - responses = append(responses, exampleNotFoundResponse, exampleNotFoundResponse) - // When deleting hosted service. - responses = append(responses, exampleFailResponse) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - // Exercise DestroyHostedService. - api := makeAPI(c) - request := &DestroyHostedServiceRequest{ServiceName: "service-name"} - err := api.DestroyHostedService(request) - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, "DELETE request failed [(]500: Internal Server Error[)]") - c.Check(record, HasLen, 4) -} - -type suiteAddVirtualNetworkSite struct{} - -var _ = Suite(&suiteAddVirtualNetworkSite{}) - -func (suite *suiteAddVirtualNetworkSite) TestWhenConfigCannotBeFetched(c *C) { - responses := []DispatcherResponse{ - {response: &x509Response{StatusCode: http.StatusInternalServerError}}, - } - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - - err := api.AddVirtualNetworkSite(nil) - - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, "GET request failed [(]500: Internal Server Error[)]") - c.Check(record, HasLen, 1) - assertGetNetworkConfigurationRequest(c, api, record[0]) -} - -func (suite *suiteAddVirtualNetworkSite) TestWhenConfigDoesNotExist(c *C) { - responses := []DispatcherResponse{ - // No configuration found. - {response: &x509Response{StatusCode: http.StatusNotFound}}, - // Accept upload of new configuration. - {response: &x509Response{StatusCode: http.StatusOK}}, - } - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - virtualNetwork := &VirtualNetworkSite{Name: MakeRandomVirtualNetworkName("test-")} - - err := api.AddVirtualNetworkSite(virtualNetwork) - - c.Assert(err, IsNil) - c.Check(record, HasLen, 2) - assertGetNetworkConfigurationRequest(c, api, record[0]) - expected := &NetworkConfiguration{ - XMLNS: XMLNS_NC, - VirtualNetworkSites: &[]VirtualNetworkSite{*virtualNetwork}, - } - expectedBody, err := expected.Serialize() - c.Assert(err, IsNil) - assertSetNetworkConfigurationRequest(c, api, []byte(expectedBody), record[1]) -} - -func (suite *suiteAddVirtualNetworkSite) TestWhenNoPreexistingVirtualNetworkSites(c *C) { - // Prepare a basic, empty, configuration. - existingConfig := &NetworkConfiguration{XMLNS: XMLNS_NC} - responses := makeOKXMLResponse(c, existingConfig) - responses = append(responses, DispatcherResponse{ - // Accept upload of new configuration. - response: &x509Response{StatusCode: http.StatusOK}, - }) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - virtualNetwork := &VirtualNetworkSite{Name: MakeRandomVirtualNetworkName("test-")} - - err := api.AddVirtualNetworkSite(virtualNetwork) - - c.Assert(err, IsNil) - c.Check(record, HasLen, 2) - assertGetNetworkConfigurationRequest(c, api, record[0]) - expected := &NetworkConfiguration{ - XMLNS: XMLNS_NC, - VirtualNetworkSites: &[]VirtualNetworkSite{*virtualNetwork}, - } - expectedBody, err := expected.Serialize() - c.Assert(err, IsNil) - assertSetNetworkConfigurationRequest(c, api, []byte(expectedBody), record[1]) -} - -func (suite *suiteAddVirtualNetworkSite) TestWhenPreexistingVirtualNetworkSites(c *C) { - // Prepare a configuration with a single virtual network. - existingConfig := &NetworkConfiguration{ - XMLNS: XMLNS_NC, - VirtualNetworkSites: &[]VirtualNetworkSite{ - {Name: MakeRandomVirtualNetworkName("test-")}, - }, - } - responses := makeOKXMLResponse(c, existingConfig) - responses = append(responses, DispatcherResponse{ - response: &x509Response{StatusCode: http.StatusOK}, - }) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - virtualNetwork := &VirtualNetworkSite{Name: MakeRandomVirtualNetworkName("test-")} - - err := api.AddVirtualNetworkSite(virtualNetwork) - - c.Assert(err, IsNil) - c.Check(record, HasLen, 2) - assertGetNetworkConfigurationRequest(c, api, record[0]) - expectedSites := append( - *existingConfig.VirtualNetworkSites, *virtualNetwork) - expected := &NetworkConfiguration{ - XMLNS: XMLNS_NC, - VirtualNetworkSites: &expectedSites, - } - expectedBody, err := expected.Serialize() - c.Assert(err, IsNil) - assertSetNetworkConfigurationRequest(c, api, []byte(expectedBody), record[1]) -} - -func (suite *suiteAddVirtualNetworkSite) TestWhenPreexistingVirtualNetworkSiteWithSameName(c *C) { - // Prepare a configuration with a single virtual network. - existingConfig := &NetworkConfiguration{ - XMLNS: XMLNS_NC, - VirtualNetworkSites: &[]VirtualNetworkSite{ - {Name: "virtual-network-bob"}, - }, - } - responses := makeOKXMLResponse(c, existingConfig) - rigPreparedResponseDispatcher(responses) - api := makeAPI(c) - virtualNetwork := &VirtualNetworkSite{Name: "virtual-network-bob"} - - err := api.AddVirtualNetworkSite(virtualNetwork) - - c.Check(err, ErrorMatches, "could not add virtual network: \"virtual-network-bob\" already exists") -} - -func (suite *suiteAddVirtualNetworkSite) TestWhenConfigCannotBePushed(c *C) { - responses := []DispatcherResponse{ - // No configuration found. - {response: &x509Response{StatusCode: http.StatusNotFound}}, - // Cannot accept upload of new configuration. - {response: &x509Response{StatusCode: http.StatusInternalServerError}}, - } - rigPreparedResponseDispatcher(responses) - virtualNetwork := &VirtualNetworkSite{Name: MakeRandomVirtualNetworkName("test-")} - - err := makeAPI(c).AddVirtualNetworkSite(virtualNetwork) - - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, "PUT request failed [(]500: Internal Server Error[)]") -} - -type suiteRemoveVirtualNetworkSite struct{} - -var _ = Suite(&suiteRemoveVirtualNetworkSite{}) - -func (suite *suiteRemoveVirtualNetworkSite) TestWhenConfigCannotBeFetched(c *C) { - responses := []DispatcherResponse{ - {response: &x509Response{StatusCode: http.StatusInternalServerError}}, - } - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - - err := api.RemoveVirtualNetworkSite("virtual-network-o-doom") - - c.Check(err, ErrorMatches, "GET request failed [(]500: Internal Server Error[)]") - c.Check(record, HasLen, 1) - assertGetNetworkConfigurationRequest(c, api, record[0]) -} - -func (suite *suiteRemoveVirtualNetworkSite) TestWhenConfigDoesNotExist(c *C) { - responses := []DispatcherResponse{ - {response: &x509Response{StatusCode: http.StatusNotFound}}, - } - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - - err := api.RemoveVirtualNetworkSite("virtual-network-in-my-eyes") - - c.Assert(err, IsNil) - c.Check(record, HasLen, 1) - assertGetNetworkConfigurationRequest(c, api, record[0]) -} - -func (suite *suiteRemoveVirtualNetworkSite) TestWhenNoPreexistingVirtualNetworkSites(c *C) { - // Prepare a basic, empty, configuration. - existingConfig := &NetworkConfiguration{XMLNS: XMLNS_NC} - responses := makeOKXMLResponse(c, existingConfig) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - - err := api.RemoveVirtualNetworkSite("virtual-network-on-my-shoes") - - c.Assert(err, IsNil) - c.Check(record, HasLen, 1) - assertGetNetworkConfigurationRequest(c, api, record[0]) -} - -func (suite *suiteRemoveVirtualNetworkSite) TestWhenPreexistingVirtualNetworkSites(c *C) { - // Prepare a configuration with a single virtual network. - existingConfig := &NetworkConfiguration{ - XMLNS: XMLNS_NC, - VirtualNetworkSites: &[]VirtualNetworkSite{ - {Name: MakeRandomVirtualNetworkName("test-")}, - }, - } - responses := makeOKXMLResponse(c, existingConfig) - responses = append(responses, DispatcherResponse{ - response: &x509Response{StatusCode: http.StatusOK}, - }) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - virtualNetwork := &VirtualNetworkSite{Name: MakeRandomVirtualNetworkName("test-")} - - err := api.RemoveVirtualNetworkSite(virtualNetwork.Name) - - c.Assert(err, IsNil) - c.Check(record, HasLen, 1) - assertGetNetworkConfigurationRequest(c, api, record[0]) - // It didn't do anything, so no upload. -} - -func (suite *suiteRemoveVirtualNetworkSite) TestWhenPreexistingVirtualNetworkSiteWithSameName(c *C) { - // Prepare a configuration with a single virtual network. - existingConfig := &NetworkConfiguration{ - XMLNS: XMLNS_NC, - VirtualNetworkSites: &[]VirtualNetworkSite{ - {Name: "virtual-network-bob"}, - }, - } - responses := makeOKXMLResponse(c, existingConfig) - responses = append(responses, DispatcherResponse{ - response: &x509Response{StatusCode: http.StatusOK}, - }) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - virtualNetwork := &VirtualNetworkSite{Name: "virtual-network-bob"} - - err := api.RemoveVirtualNetworkSite(virtualNetwork.Name) - - c.Assert(err, IsNil) - c.Check(record, HasLen, 2) - assertGetNetworkConfigurationRequest(c, api, record[0]) - expected := &NetworkConfiguration{ - XMLNS: XMLNS_NC, - VirtualNetworkSites: &[]VirtualNetworkSite{}, - } - expectedBody, err := expected.Serialize() - c.Assert(err, IsNil) - assertSetNetworkConfigurationRequest(c, api, []byte(expectedBody), record[1]) -} - -func (suite *suiteRemoveVirtualNetworkSite) TestWhenConfigCannotBePushed(c *C) { - existingConfig := &NetworkConfiguration{ - XMLNS: XMLNS_NC, - VirtualNetworkSites: &[]VirtualNetworkSite{ - {Name: "virtual-network-all-over"}, - }, - } - responses := makeOKXMLResponse(c, existingConfig) - responses = append(responses, DispatcherResponse{ - response: &x509Response{StatusCode: http.StatusInternalServerError}, - }) - rigPreparedResponseDispatcher(responses) - - err := makeAPI(c).RemoveVirtualNetworkSite("virtual-network-all-over") - - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, "PUT request failed [(]500: Internal Server Error[)]") -} - -type suiteListRoleEndpoints struct{} - -var _ = Suite(&suiteListRoleEndpoints{}) - -func (suite *suiteListRoleEndpoints) TestWhenNoExistingEndpoints(c *C) { - var err error - existingRole := &PersistentVMRole{ - ConfigurationSets: []ConfigurationSet{ - { - ConfigurationSetType: CONFIG_SET_NETWORK, - }, - }, - } - responses := makeOKXMLResponse(c, existingRole) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - - request := &ListRoleEndpointsRequest{ - ServiceName: "foo", - DeploymentName: "foo", - RoleName: "foo"} - endpoints, err := api.ListRoleEndpoints(request) - - c.Assert(err, IsNil) - c.Assert(endpoints, DeepEquals, []InputEndpoint{}) - c.Check(record, HasLen, 1) - // Check GetRole was performed. - assertGetRoleRequest( - c, api, record[0], request.ServiceName, request.DeploymentName, - request.RoleName) -} - -func (suite *suiteListRoleEndpoints) TestWhenExistingEndpoints(c *C) { - var err error - endpoints := &[]InputEndpoint{ - { - LocalPort: 123, - Name: "test123", - Port: 1123, - }, - { - LocalPort: 456, - Name: "test456", - Port: 4456, - }} - - existingRole := &PersistentVMRole{ - ConfigurationSets: []ConfigurationSet{ - { - ConfigurationSetType: CONFIG_SET_NETWORK, - InputEndpoints: endpoints, - }, - }, - } - responses := makeOKXMLResponse(c, existingRole) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - - request := &ListRoleEndpointsRequest{ - ServiceName: "foo", - DeploymentName: "foo", - RoleName: "foo"} - listedEndpoints, err := api.ListRoleEndpoints(request) - - c.Assert(err, IsNil) - c.Assert(listedEndpoints, DeepEquals, *endpoints) - c.Check(record, HasLen, 1) - // Check GetRole was performed. - assertGetRoleRequest( - c, api, record[0], request.ServiceName, request.DeploymentName, - request.RoleName) -} - -func (suite *suiteListRoleEndpoints) TestWhenGetRoleFails(c *C) { - responses := []DispatcherResponse{ - // No role found. - {response: &x509Response{StatusCode: http.StatusNotFound}}, - } - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - - request := &ListRoleEndpointsRequest{ - ServiceName: "foo", - DeploymentName: "foo", - RoleName: "foo"} - _, err := api.ListRoleEndpoints(request) - - c.Check(err, ErrorMatches, "GET request failed [(]404: Not Found[)]") - c.Check(record, HasLen, 1) - assertGetRoleRequest( - c, api, record[0], request.ServiceName, request.DeploymentName, - request.RoleName) -} - -type suiteAddRoleEndpoints struct{} - -var _ = Suite(&suiteAddRoleEndpoints{}) - -func (suite *suiteAddRoleEndpoints) TestWhenNoPreexistingEndpoints(c *C) { - var err error - existingRole := &PersistentVMRole{ - ConfigurationSets: []ConfigurationSet{ - { - ConfigurationSetType: CONFIG_SET_NETWORK, - }, - }, - } - responses := makeOKXMLResponse(c, existingRole) - responses = append(responses, DispatcherResponse{ - // Accept upload of new endpoints - response: &x509Response{StatusCode: http.StatusOK}, - }) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - endpoints := []InputEndpoint{ - { - LocalPort: 999, - Name: "test999", - Port: 1999, - }, - } - - request := &AddRoleEndpointsRequest{ - ServiceName: "foo", - DeploymentName: "foo", - RoleName: "foo", - InputEndpoints: endpoints} - err = api.AddRoleEndpoints(request) - - c.Assert(err, IsNil) - c.Check(record, HasLen, 2) - // Check GetRole was performed. - assertGetRoleRequest( - c, api, record[0], request.ServiceName, request.DeploymentName, - request.RoleName) - // Check UpdateRole was performed. - existingRole.ConfigurationSets[0].InputEndpoints = &endpoints - expectedXML, err := existingRole.Serialize() - c.Assert(err, IsNil) - assertUpdateRoleRequest( - c, api, record[1], request.ServiceName, request.DeploymentName, - request.RoleName, expectedXML) -} - -func (suite *suiteAddRoleEndpoints) TestWhenPreexistingEndpoints(c *C) { - var err error - existingRole := &PersistentVMRole{ - ConfigurationSets: []ConfigurationSet{ - { - ConfigurationSetType: CONFIG_SET_NETWORK, - InputEndpoints: &[]InputEndpoint{ - { - LocalPort: 123, - Name: "test123", - Port: 1123, - }, - }, - }, - }, - } - responses := makeOKXMLResponse(c, existingRole) - responses = append(responses, DispatcherResponse{ - // Accept upload of new endpoints - response: &x509Response{StatusCode: http.StatusOK}, - }) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - endpoints := []InputEndpoint{ - { - LocalPort: 999, - Name: "test999", - Port: 1999, - }, - } - - request := &AddRoleEndpointsRequest{ - ServiceName: "foo", - DeploymentName: "foo", - RoleName: "foo", - InputEndpoints: endpoints} - err = api.AddRoleEndpoints(request) - - c.Assert(err, IsNil) - c.Check(record, HasLen, 2) - // Check GetRole was performed. - assertGetRoleRequest( - c, api, record[0], request.ServiceName, request.DeploymentName, - request.RoleName) - // Check UpdateRole was performed. - allEndpoints := append( - *existingRole.ConfigurationSets[0].InputEndpoints, endpoints...) - existingRole.ConfigurationSets[0].InputEndpoints = &allEndpoints - expectedXML, err := existingRole.Serialize() - c.Assert(err, IsNil) - assertUpdateRoleRequest( - c, api, record[1], request.ServiceName, request.DeploymentName, - request.RoleName, expectedXML) -} - -func (suite *suiteAddRoleEndpoints) TestWhenGetRoleFails(c *C) { - responses := []DispatcherResponse{ - // No role found. - {response: &x509Response{StatusCode: http.StatusNotFound}}, - } - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - - request := &AddRoleEndpointsRequest{ - ServiceName: "foo", - DeploymentName: "foo", - RoleName: "foo"} - err := api.AddRoleEndpoints(request) - - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, "GET request failed [(]404: Not Found[)]") - c.Check(record, HasLen, 1) - assertGetRoleRequest( - c, api, record[0], request.ServiceName, request.DeploymentName, - request.RoleName) -} - -func (suite *suiteAddRoleEndpoints) TestWhenUpdateFails(c *C) { - var err error - existingRole := &PersistentVMRole{ - ConfigurationSets: []ConfigurationSet{ - { - ConfigurationSetType: CONFIG_SET_NETWORK, - }, - }, - } - responses := makeOKXMLResponse(c, existingRole) - responses = append(responses, DispatcherResponse{ - // Cannot accept upload of new role endpoint - response: &x509Response{StatusCode: http.StatusInternalServerError}, - }) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - endpoints := []InputEndpoint{ - { - LocalPort: 999, - Name: "test999", - Port: 1999, - }, - } - - request := &AddRoleEndpointsRequest{ - ServiceName: "foo", - DeploymentName: "foo", - RoleName: "foo", - InputEndpoints: endpoints} - err = api.AddRoleEndpoints(request) - - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, "PUT request failed [(]500: Internal Server Error[)]") - c.Check(record, HasLen, 2) -} - -type suiteRemoveRoleEndpoints struct{} - -var _ = Suite(&suiteRemoveRoleEndpoints{}) - -func (suite *suiteRemoveRoleEndpoints) TestWhenNoPreexistingEndpoints(c *C) { - var err error - existingRole := &PersistentVMRole{ - ConfigurationSets: []ConfigurationSet{ - { - ConfigurationSetType: CONFIG_SET_NETWORK, - }, - }, - } - responses := makeOKXMLResponse(c, existingRole) - responses = append(responses, DispatcherResponse{ - // Accept upload of new endpoints - response: &x509Response{StatusCode: http.StatusOK}, - }) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - endpoints := []InputEndpoint{ - { - LocalPort: 999, - Name: "test999", - Port: 1999, - }, - } - - request := &RemoveRoleEndpointsRequest{ - ServiceName: "service-name", - DeploymentName: "deployment-name", - RoleName: "role-name", - InputEndpoints: endpoints, - } - err = api.RemoveRoleEndpoints(request) - - c.Assert(err, IsNil) - c.Check(record, HasLen, 2) - // Check GetRole was performed. - assertGetRoleRequest( - c, api, record[0], request.ServiceName, request.DeploymentName, - request.RoleName) - // Check UpdateRole was performed. - expectedXML, err := existingRole.Serialize() - c.Assert(err, IsNil) - assertUpdateRoleRequest( - c, api, record[1], request.ServiceName, request.DeploymentName, - request.RoleName, expectedXML) -} - -func (suite *suiteRemoveRoleEndpoints) TestWhenEndpointIsDefined(c *C) { - var err error - existingRole := &PersistentVMRole{ - ConfigurationSets: []ConfigurationSet{ - { - ConfigurationSetType: CONFIG_SET_NETWORK, - InputEndpoints: &[]InputEndpoint{ - { - LocalPort: 123, - Name: "test123", - Port: 1123, - }, - { - LocalPort: 456, - Name: "test456", - Port: 4456, - }, - }, - }, - }, - } - responses := makeOKXMLResponse(c, existingRole) - responses = append(responses, DispatcherResponse{ - // Accept upload of new endpoints - response: &x509Response{StatusCode: http.StatusOK}, - }) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - - request := &RemoveRoleEndpointsRequest{ - ServiceName: "service-name", - DeploymentName: "deployment-name", - RoleName: "role-name", - // Remove the first of the existing endppints. - InputEndpoints: (*existingRole.ConfigurationSets[0].InputEndpoints)[:1], - } - err = api.RemoveRoleEndpoints(request) - - c.Assert(err, IsNil) - c.Check(record, HasLen, 2) - // Check GetRole was performed. - assertGetRoleRequest( - c, api, record[0], request.ServiceName, request.DeploymentName, - request.RoleName) - // Check UpdateRole was performed. - expected := &PersistentVMRole{ - ConfigurationSets: []ConfigurationSet{ - { - ConfigurationSetType: CONFIG_SET_NETWORK, - InputEndpoints: &[]InputEndpoint{ - (*existingRole.ConfigurationSets[0].InputEndpoints)[1], - }, - }, - }, - } - expectedXML, err := expected.Serialize() - c.Assert(err, IsNil) - assertUpdateRoleRequest( - c, api, record[1], request.ServiceName, request.DeploymentName, - request.RoleName, expectedXML) -} - -func (suite *suiteRemoveRoleEndpoints) TestWhenAllEndpointsAreRemoved(c *C) { - var err error - existingRole := &PersistentVMRole{ - ConfigurationSets: []ConfigurationSet{ - { - ConfigurationSetType: CONFIG_SET_NETWORK, - InputEndpoints: &[]InputEndpoint{ - { - LocalPort: 123, - Name: "test123", - Port: 1123, - }, - }, - }, - }, - } - responses := makeOKXMLResponse(c, existingRole) - responses = append(responses, DispatcherResponse{ - // Accept upload of new endpoints - response: &x509Response{StatusCode: http.StatusOK}, - }) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - - request := &RemoveRoleEndpointsRequest{ - ServiceName: "service-name", - DeploymentName: "deployment-name", - RoleName: "role-name", - // Remove the first of the existing endppints. - InputEndpoints: *existingRole.ConfigurationSets[0].InputEndpoints, - } - err = api.RemoveRoleEndpoints(request) - - c.Assert(err, IsNil) - c.Check(record, HasLen, 2) - // Check GetRole was performed. - assertGetRoleRequest( - c, api, record[0], request.ServiceName, request.DeploymentName, - request.RoleName) - // Check UpdateRole was performed. - expected := &PersistentVMRole{ - ConfigurationSets: []ConfigurationSet{ - { - ConfigurationSetType: CONFIG_SET_NETWORK, - // InputEndpoints is nil, not the empty slice. - InputEndpoints: nil, - }, - }, - } - expectedXML, err := expected.Serialize() - c.Assert(err, IsNil) - assertUpdateRoleRequest( - c, api, record[1], request.ServiceName, request.DeploymentName, - request.RoleName, expectedXML) -} - -func (suite *suiteRemoveRoleEndpoints) TestWhenGetRoleFails(c *C) { - responses := []DispatcherResponse{ - // No role found. - {response: &x509Response{StatusCode: http.StatusNotFound}}, - } - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - - request := &RemoveRoleEndpointsRequest{ - ServiceName: "service-name", - DeploymentName: "deployment-name", - RoleName: "role-name", - } - err := api.RemoveRoleEndpoints(request) - - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, "GET request failed [(]404: Not Found[)]") - c.Check(record, HasLen, 1) - assertGetRoleRequest( - c, api, record[0], request.ServiceName, request.DeploymentName, - request.RoleName) -} - -func (suite *suiteRemoveRoleEndpoints) TestWhenUpdateFails(c *C) { - var err error - existingRole := &PersistentVMRole{ - ConfigurationSets: []ConfigurationSet{ - {ConfigurationSetType: CONFIG_SET_NETWORK}, - }, - } - responses := makeOKXMLResponse(c, existingRole) - responses = append(responses, DispatcherResponse{ - // Cannot accept upload of new role endpoint - response: &x509Response{StatusCode: http.StatusInternalServerError}, - }) - record := []*X509Request{} - rigRecordingPreparedResponseDispatcher(&record, responses) - api := makeAPI(c) - - request := &RemoveRoleEndpointsRequest{ - ServiceName: "service-name", - DeploymentName: "deployment-name", - RoleName: "role-name", - } - err = api.RemoveRoleEndpoints(request) - - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, "PUT request failed [(]500: Internal Server Error[)]") - c.Check(record, HasLen, 2) -} - -type suiteCompareInputEndpoints struct{} - -var _ = Suite(&suiteCompareInputEndpoints{}) - -func (suite *suiteCompareInputEndpoints) TestEqualWhenEmpty(c *C) { - a := &InputEndpoint{} - b := &InputEndpoint{} - c.Assert(CompareInputEndpoints(a, b), Equals, true) -} - -func (suite *suiteCompareInputEndpoints) TestEquality(c *C) { - checkComparison := func(a, b InputEndpoint, expected bool) { - c.Check(CompareInputEndpoints(&a, &b), Equals, expected) - } - // Name has no influence on comparison. - checkComparison( - InputEndpoint{Name: "foo"}, - InputEndpoint{Name: "bar"}, - true) - // LoadBalancerProbe has no influence on comparison. - checkComparison( - InputEndpoint{ - LoadBalancerProbe: &LoadBalancerProbe{Path: "foo"}, - }, - InputEndpoint{ - LoadBalancerProbe: &LoadBalancerProbe{Path: "bar"}, - }, - true, - ) - // Port influences comparisons. - checkComparison( - InputEndpoint{Port: 1234}, - InputEndpoint{Port: 1234}, - true) - checkComparison( - InputEndpoint{Port: 1234}, - InputEndpoint{Port: 5678}, - false) - // Protocol influences comparisons. - checkComparison( - InputEndpoint{Protocol: "TCP"}, - InputEndpoint{Protocol: "TCP"}, - true) - checkComparison( - InputEndpoint{Protocol: "TCP"}, - InputEndpoint{Protocol: "UDP"}, - false) - // VIP influences comparisons. - checkComparison( - InputEndpoint{VIP: "1.2.3.4"}, - InputEndpoint{VIP: "1.2.3.4"}, - true) - checkComparison( - InputEndpoint{VIP: "1.2.3.4"}, - InputEndpoint{VIP: "5.6.7.8"}, - false) - // LoadBalancedEndpointSetName influences comparisons. - checkComparison( - InputEndpoint{LoadBalancedEndpointSetName: "foo"}, - InputEndpoint{LoadBalancedEndpointSetName: "foo"}, - true) - checkComparison( - InputEndpoint{LoadBalancedEndpointSetName: "foo"}, - InputEndpoint{LoadBalancedEndpointSetName: "bar"}, - false) - // LocalPort influences comparisons only when LoadBalancedEndpointSetName - // is not the empty string. - checkComparison( - InputEndpoint{LocalPort: 1234}, - InputEndpoint{LocalPort: 5678}, - true) - checkComparison( - InputEndpoint{LoadBalancedEndpointSetName: "foo", LocalPort: 1234}, - InputEndpoint{LoadBalancedEndpointSetName: "foo", LocalPort: 5678}, - false) -} === removed file 'src/launchpad.net/gwacl/master_test.go' --- src/launchpad.net/gwacl/master_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/master_test.go 1970-01-01 00:00:00 +0000 @@ -1,14 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - . "launchpad.net/gocheck" - "testing" -) - -// Master loader for all tests. -func Test(t *testing.T) { - TestingT(t) -} === removed file 'src/launchpad.net/gwacl/names.go' --- src/launchpad.net/gwacl/names.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/names.go 1970-01-01 00:00:00 +0000 @@ -1,151 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "fmt" -) - -// pickOne returns a random choice of one of the characters in chars. -func pickOne(chars string) string { - index := random.Intn(len(chars)) - return string(chars[index]) -} - -const ( - upperCaseLetters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - letters = "abcdefghijklmnopqrstuvwxyz" - digits = "0123456789" -) - -// makeRandomIdentifier creates an arbitrary identifier of the given length, -// consisting of only ASCII digits and lower-case ASCII letters. -// The identifier will start with the given prefix. The prefix must be no -// longer than the specified length, or there'll be trouble. - -func makeRandomIdentifier(prefix string, length int) string { - // Only digits and lower-case ASCII letters are accepted. - const ( - chars = letters + digits - ) - - if len(prefix) > length { - panic(fmt.Errorf("prefix '%s' is more than the requested %d characters long", prefix, length)) - } - - if len(prefix)+5 > length { - panic(fmt.Errorf( - "prefix '%s' is too long; space is needed for at least 5 random characters, only %d given", - prefix, length-len(prefix))) - } - - if len(prefix) == 0 { - // No prefix. Still have to start with a letter, so pick one. - prefix = pickOne(letters) - } - - id := prefix - for len(id) < length { - id += pickOne(chars) - } - return id -} - -const ( - // We don't know of any documentation on how long a hosted-service name can - // be, but this is the maximum length that worked in experiments. - HostedServiceNameMaxiumSize = 63 - // The number of random characters used when generating random Hosted - // Service names. - HostedServiceNameRandomChars = 10 - // The maximum length allowed for a Hosted Service name prefix (as passed - // to MakeRandomHostedServiceName().) - HostedServiceNameMaximumPrefixSize = HostedServiceNameMaxiumSize - HostedServiceNameRandomChars -) - -// MakeRandomHostedServiceName generates a pseudo-random name for a hosted -// service, with the given prefix. -// -// The prefix must be as short as possible, be entirely in ASCII, start with -// a lower-case letter, and contain only lower-case letters and digits after -// that. -func MakeRandomHostedServiceName(prefix string) string { - // We don't know of any documentation on long a hosted-service name can - // be, but this is the maximum length that worked in experiments. - size := len(prefix) + HostedServiceNameRandomChars - if size > HostedServiceNameMaxiumSize { - panic(fmt.Errorf("prefix '%s' is too long; it can be at most %d characters", prefix, HostedServiceNameMaximumPrefixSize)) - } - - return makeRandomIdentifier(prefix, size) -} - -// MakeRandomHostname generates a pseudo-random hostname for a virtual machine, -// with the given prefix. -// -// The prefix must be as short as possible, be entirely in ASCII, start with -// a lower-case letter, and contain only lower-case letters and digits after -// that. -func MakeRandomHostname(prefix string) string { - // Azure documentation says the hostname can be between 1 and 64 - // letters long, but in practice we found it didn't work with anything - // over 55 letters long. - return makeRandomIdentifier(prefix, 55) -} - -// MakeRandomDiskName generates a pseudo-random disk name for a virtual machine -// with the given prefix. -// -// The prefix must be as short as possible, be entirely in ASCII, start with -// a lower-case letter, and contain only lower-case letters and digits after -// that. -func MakeRandomDiskName(prefix string) string { - // Azure documentation does not say what the maximum size of a disk name - // is. Testing indicate that 50 works. - return makeRandomIdentifier(prefix, 50) -} - -// MakeRandomRoleName generates a pseudo-random role name for a virtual machine -// with the given prefix. -// -// The prefix must be as short as possible, be entirely in ASCII, start with -// a lower-case letter, and contain only lower-case letters and digits after -// that. -func MakeRandomRoleName(prefix string) string { - // Azure documentation does not say what the maximum size of a role name - // is. Testing indicate that 50 works. - return makeRandomIdentifier(prefix, 50) -} - -// MakeRandomVirtualNetworkName generates a pseudo-random name for a virtual -// network with the given prefix. -// -// The prefix must be as short as possible, be entirely in ASCII, start with -// a lower-case letter, and contain only lower-case letters and digits after -// that. -func MakeRandomVirtualNetworkName(prefix string) string { - return makeRandomIdentifier(prefix, 20) -} - -const ( - // Valid passwords must be 6-72 characters long. - passwordSize = 50 -) - -// MakeRandomPassword generates a pseudo-random password for a Linux Virtual -// Machine. -func MakeRandomPassword() string { - const chars = letters + digits + upperCaseLetters - - upperCaseLetter := pickOne(upperCaseLetters) - letter := pickOne(letters) - digit := pickOne(digits) - // Make sure the password has at least one letter, one upper-case letter - // and a digit to meet Azure's password complexity requirements. - password := letter + upperCaseLetter + digit - for len(password) < passwordSize { - password += pickOne(chars) - } - return password -} === removed file 'src/launchpad.net/gwacl/names_test.go' --- src/launchpad.net/gwacl/names_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/names_test.go 1970-01-01 00:00:00 +0000 @@ -1,117 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - . "launchpad.net/gocheck" - "strings" -) - -type namesSuite struct{} - -var _ = Suite(&namesSuite{}) - -func (*namesSuite) TestPickOneReturnsOneCharacter(c *C) { - c.Check(len(pickOne("abcd")), Equals, 1) -} - -func (*namesSuite) TestMakeRandomIdentifierObeysLength(c *C) { - length := 6 + random.Intn(50) - c.Check(len(makeRandomIdentifier("x", length)), Equals, length) -} - -// makeRandomIdentifier ensures that there are at least 5 random characters in -// an identifier. -func (*namesSuite) TestMakeRandomIdentifierEnsuresSomeRandomness(c *C) { - c.Check(makeRandomIdentifier("1234-", 10), Matches, "1234-[a-z0-9]{5}") - c.Check( - func() { makeRandomIdentifier("12345-", 10) }, PanicMatches, - "prefix '12345-' is too long; space is needed for at least 5 random characters, only 4 given") -} - -func (*namesSuite) TestMakeRandomIdentifierRandomizes(c *C) { - // There is a minute chance that this will start failing just because - // the randomizer repeats a pattern of results. If so, seed it. - c.Check( - makeRandomIdentifier("x", 100), - Not(Equals), - makeRandomIdentifier("x", 100)) -} - -func (*namesSuite) TestMakeRandomIdentifierPicksDifferentCharacters(c *C) { - // There is a minute chance that this will start failing just because - // the randomizer repeats a pattern of results. If so, seed it. - chars := make(map[rune]bool) - for _, chr := range makeRandomIdentifier("", 100) { - chars[chr] = true - } - c.Check(len(chars), Not(Equals), 1) -} - -func (*namesSuite) TestMakeRandomIdentifierUsesPrefix(c *C) { - c.Check(makeRandomIdentifier("prefix", 11), Matches, "prefix.*") -} - -func (*namesSuite) TestMakeRandomIdentifierUsesOnlyAcceptedCharacters(c *C) { - c.Check(makeRandomIdentifier("", 100), Matches, "[0-9a-z]*") -} - -func (*namesSuite) TestMakeRandomIdentifierAcceptsEmptyPrefix(c *C) { - // In particular, the first character must still be a letter. - c.Check(makeRandomIdentifier("", 5), Matches, "[a-z].*") -} - -func (*namesSuite) TestMakeRandomDiskName(c *C) { - c.Check(MakeRandomDiskName(""), Not(HasLen), 0) -} - -func (*namesSuite) TestMakeRandomRoleName(c *C) { - c.Check(MakeRandomRoleName(""), Not(HasLen), 0) -} - -func (*namesSuite) TestMakeRandomVirtualNetworkName(c *C) { - c.Check(MakeRandomVirtualNetworkName(""), Not(HasLen), 0) -} - -func (*namesSuite) TestMakeRandomHostedServiceName(c *C) { - c.Check(MakeRandomHostedServiceName(""), Not(HasLen), 0) -} - -func (*namesSuite) TestMakeRandomHostedUsesALimitedNumberOfRandomChars(c *C) { - prefix := "prefix" - expectedSize := len(prefix) + HostedServiceNameRandomChars - c.Check(MakeRandomHostedServiceName(prefix), HasLen, expectedSize) -} - -func (*namesSuite) TestMakeRandomHostedRejectsLongPrefix(c *C) { - tooLongPrefix := makeRandomIdentifier("", HostedServiceNameMaximumPrefixSize+1) - c.Check( - func() { MakeRandomHostedServiceName(tooLongPrefix) }, PanicMatches, - ".*is too long.*") -} - -func (*namesSuite) TestMakeRandomHostedAcceptsLongestPrefix(c *C) { - prefix := makeRandomIdentifier("", HostedServiceNameMaximumPrefixSize) - c.Check(MakeRandomHostedServiceName(prefix), HasLen, HostedServiceNameMaxiumSize) -} - -func assertIsAzureValidPassword(c *C, password string) { - c.Check(MakeRandomPassword(), HasLen, passwordSize) - if !strings.ContainsAny(password, upperCaseLetters) { - c.Errorf("Password %v does not contain a single upper-case letter!", password) - } - if !strings.ContainsAny(password, letters) { - c.Errorf("Password %v does not contain a single lower-case letter!", password) - } - if !strings.ContainsAny(password, digits) { - c.Errorf("Password %v does not contain a single digit!", password) - } -} - -func (*namesSuite) TestMakeRandomPassword(c *C) { - for index := 0; index < 100; index += 1 { - password := MakeRandomPassword() - assertIsAzureValidPassword(c, password) - } -} === removed file 'src/launchpad.net/gwacl/poller.go' --- src/launchpad.net/gwacl/poller.go 2015-10-23 18:28:45 +0000 +++ src/launchpad.net/gwacl/poller.go 1970-01-01 00:00:00 +0000 @@ -1,122 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "fmt" - "time" -) - -// Generic poller interface/methods. - -// A poller exposes two methods to query a remote server and decide when -// the response given by the server means that the polling is finished. -type poller interface { - poll() (*x509Response, error) - isDone(*x509Response, error) (bool, error) -} - -// performPolling calls the poll() method of the given 'poller' object every -// 'interval' until poller.isDone() returns true. -func performPolling(poller poller, interval time.Duration, timeout time.Duration) (*x509Response, error) { - timeoutChannel := time.After(timeout) - ticker := time.Tick(interval) - // Function to do a single poll, checking for timeout. The bool returned - // indicates if polling is finished, one way or another. - poll := func() (bool, *x509Response, error) { - // This may need to tolerate some transient failures, such as network - // failures that may go away after a few retries. - select { - case <-timeoutChannel: - return true, nil, fmt.Errorf("polling timed out waiting for an asynchronous operation") - default: - response, pollerErr := poller.poll() - done, err := poller.isDone(response, pollerErr) - if err != nil { - return true, nil, err - } - if done { - return true, response, nil - } - } - return false, nil, nil - } - // Do an initial poll. - done, response, err := poll() - if done { - return response, err - } - // Poll every interval. - for _ = range ticker { - done, response, err := poll() - if done { - return response, err - } - } - // This code cannot be reached but Go insists on having a return or a panic - // statement at the end of this method. Sigh. - panic("invalid poller state!") -} - -// Operation poller structs/methods. - -// performOperationPolling calls performPolling on the given arguments and converts -// the returned object into an *Operation. -func performOperationPolling(poller poller, interval time.Duration, timeout time.Duration) (*Operation, error) { - response, err := performPolling(poller, interval, timeout) - if err != nil { - return nil, err - } - operation := Operation{} - err = operation.Deserialize(response.Body) - return &operation, err -} - -// operationPoller is an object implementing the poller interface, used to -// poll the Window Azure server until the operation referenced by the given -// operationID is completed. -type operationPoller struct { - api *ManagementAPI - operationID string -} - -var _ poller = &operationPoller{} - -// newOperationPoller returns a poller object associated with the given -// management API object and the given operationID. It can track (by polling -// the server) the status of the operation associated with the provided -// operationID string. -func newOperationPoller(api *ManagementAPI, operationID string) poller { - return operationPoller{api: api, operationID: operationID} -} - -// Poll issues a blocking request to microsoft Azure to fetch the information -// related to the operation associated with the poller. -// See http://msdn.microsoft.com/en-us/library/windowsazure/ee460783.aspx -func (poller operationPoller) poll() (*x509Response, error) { - URI := "operations/" + poller.operationID - return poller.api.session.get(URI, baseAPIVersion) -} - -// IsDone returns true if the given response has a status code indicating -// success and if the returned XML response corresponds to a valid Operation -// with a status indicating that the operation is completed. -func (poller operationPoller) isDone(response *x509Response, pollerError error) (bool, error) { - // TODO: Add a timeout so that polling won't continue forever if the - // server cannot be reached. - if pollerError != nil { - return true, pollerError - } - if response.StatusCode >= 200 && response.StatusCode < 300 { - operation := Operation{} - err := operation.Deserialize(response.Body) - if err != nil { - return false, err - } - status := operation.Status - done := (status != "" && status != InProgressOperationStatus) - return done, nil - } - return false, nil -} === removed file 'src/launchpad.net/gwacl/poller_test.go' --- src/launchpad.net/gwacl/poller_test.go 2015-10-23 18:28:45 +0000 +++ src/launchpad.net/gwacl/poller_test.go 1970-01-01 00:00:00 +0000 @@ -1,218 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "encoding/xml" - "fmt" - "net/http" - "time" - - . "launchpad.net/gocheck" - "launchpad.net/gwacl/dedent" -) - -type pollerSuite struct{} - -var _ = Suite(&pollerSuite{}) - -func (suite *pollerSuite) makeAPI(c *C) *ManagementAPI { - subscriptionId := "subscriptionId" - subscriptionId = subscriptionId - api, err := NewManagementAPI(subscriptionId, "", "West US") - c.Assert(err, IsNil) - return api -} - -// testPoller is a struct which implements the Poller interface. It records -// the number of calls to testPoller.Poll(). -type testPoller struct { - recordedPollCalls int - notDoneCalls int - errorCalls int -} - -var testPollerResponse = x509Response{} -var testPollerError = fmt.Errorf("Test error") - -// newTestPoller return a pointer to a testPoller object that will return -// false when IsDone() will be called 'notDoneCalls' number of times and that -// will error when Poll() will be called 'errorCalls' number of times. -func newTestPoller(notDoneCalls int, errorCalls int) *testPoller { - return &testPoller{0, notDoneCalls, errorCalls} -} - -func (poller *testPoller) poll() (*x509Response, error) { - if poller.errorCalls > 0 { - poller.errorCalls -= 1 - return nil, testPollerError - } - poller.recordedPollCalls += 1 - return &testPollerResponse, nil -} - -func (poller *testPoller) isDone(response *x509Response, pollerError error) (bool, error) { - if pollerError != nil { - return true, pollerError - } - if poller.notDoneCalls == 0 { - return true, nil - } - poller.notDoneCalls = poller.notDoneCalls - 1 - return false, nil -} - -func (suite *pollerSuite) TestperformPollingPollsOnceImmediately(c *C) { - poller := newTestPoller(0, 0) - interval := time.Second * 10 - start := time.Now() - response, err := performPolling(poller, interval, interval*2) - c.Assert(err, Equals, nil) - c.Assert(time.Since(start) < interval, Equals, true) - c.Assert(response, DeepEquals, &testPollerResponse) -} - -func (suite *pollerSuite) TestperformPollingReturnsError(c *C) { - poller := newTestPoller(0, 1) - response, err := performPolling(poller, time.Nanosecond, time.Minute) - - c.Assert(err, Equals, testPollerError) - c.Assert(response, IsNil) -} - -func (suite *pollerSuite) TestperformPollingTimesout(c *C) { - poller := newTestPoller(10, 0) - response, err := performPolling(poller, time.Millisecond, 5*time.Millisecond) - - c.Assert(response, IsNil) - c.Check(err, ErrorMatches, ".*polling timed out waiting for an asynchronous operation.*") -} - -func (suite *pollerSuite) TestperformPollingRetries(c *C) { - poller := newTestPoller(2, 0) - response, err := performPolling(poller, time.Nanosecond, time.Minute) - - c.Assert(err, IsNil) - c.Assert(response, DeepEquals, &testPollerResponse) - // Poll() has been called 3 times: two calls for which IsDone() returned - // false and one for which IsDone() return true. - c.Assert(poller.recordedPollCalls, Equals, 3) -} - -func (suite *pollerSuite) TestnewOperationPoller(c *C) { - api := suite.makeAPI(c) - operationID := "operationID" - - poller := newOperationPoller(api, operationID) - - operationPollerInstance := poller.(operationPoller) - c.Check(operationPollerInstance.api, Equals, api) - c.Check(operationPollerInstance.operationID, Equals, operationID) -} - -func (suite *pollerSuite) TestOperationPollerPoll(c *C) { - api := suite.makeAPI(c) - operationID := "operationID" - poller := newOperationPoller(api, operationID) - recordedRequests := setUpDispatcher("operationID") - - _, err := poller.poll() - - c.Assert(err, IsNil) - expectedURL := defaultManagement + api.session.subscriptionId + "/operations/" + operationID - checkOneRequest(c, recordedRequests, expectedURL, baseAPIVersion, nil, "GET") -} - -var operationXMLTemplate = dedent.Dedent(` - - - bogus-request-id - %s - -`) - -func (suite *pollerSuite) TestOperationPollerIsDoneReturnsTrueIfOperationDone(c *C) { - poller := newOperationPoller(suite.makeAPI(c), "operationID") - operationStatuses := []string{"Succeeded", "Failed"} - for _, operationStatus := range operationStatuses { - body := fmt.Sprintf(operationXMLTemplate, operationStatus) - response := x509Response{ - Body: []byte(body), - StatusCode: http.StatusOK, - } - - isDone, err := poller.isDone(&response, nil) - c.Assert(err, IsNil) - c.Assert(isDone, Equals, true) - } -} - -func (suite *pollerSuite) TestOperationPollerIsDoneReturnsFalse(c *C) { - poller := newOperationPoller(suite.makeAPI(c), "operationID") - notDoneResponses := []x509Response{ - // 'InProgress' response. - { - Body: []byte(fmt.Sprintf(operationXMLTemplate, "InProgress")), - StatusCode: http.StatusOK, - }, - // Error statuses. - {StatusCode: http.StatusNotFound}, - {StatusCode: http.StatusBadRequest}, - {StatusCode: http.StatusInternalServerError}, - } - for _, response := range notDoneResponses { - isDone, _ := poller.isDone(&response, nil) - c.Assert(isDone, Equals, false) - } -} - -func (suite *pollerSuite) TestOperationPollerIsDoneReturnsXMLParsingError(c *C) { - poller := newOperationPoller(suite.makeAPI(c), "operationID") - // Invalid XML content. - response := x509Response{ - Body: []byte("> 0 && ret.policy.isRetryCode(httpStatusCode) { - ret.retriesLeft-- - return true - } - return false -} - -// A retrier is a struct used to repeat a request as governed by a retry -// policy. retrier is usually created using RetryPolicy.getRetrier(). -type retrier struct { - *retryHelper - - // The client used to perform requests. - client *http.Client -} - -func (ret *retrier) RetryRequest(request *http.Request) (*http.Response, error) { - for { - response, err := ret.client.Do(request) - if err != nil { - return nil, err - } - if !ret.shouldRetry(response.StatusCode) { - return response, nil - } - time.Sleep(ret.policy.Delay) - } -} - -// getRetrier returns a `retrier` object used to enforce the retry policy. -func (policy RetryPolicy) getRetrier(client *http.Client) *retrier { - helper := policy.getRetryHelper() - return &retrier{retryHelper: helper, client: client} -} - -// A forkedHttpRetrier is a struct used to repeat a request as governed by a -// retry policy. forkedHttpRetrier is usually created using -// RetryPolicy.getForkedHttpRetrier(). It's the same as the `retrier` struct -// except it deals with the forked version of the http package. -type forkedHttpRetrier struct { - *retryHelper - - // The client used to perform requests. - client *forkedHttp.Client -} - -func (ret *forkedHttpRetrier) RetryRequest(request *forkedHttp.Request) (*forkedHttp.Response, error) { - for { - response, err := ret.client.Do(request) - if err != nil { - return nil, err - } - if !ret.shouldRetry(response.StatusCode) { - return response, nil - } - time.Sleep(ret.policy.Delay) - } -} - -// getRetrier returns a `retrier` object used to enforce the retry policy. -func (policy RetryPolicy) getForkedHttpRetrier(client *forkedHttp.Client) *forkedHttpRetrier { - helper := policy.getRetryHelper() - return &forkedHttpRetrier{retryHelper: helper, client: client} -} === removed file 'src/launchpad.net/gwacl/retry_policy_test.go' --- src/launchpad.net/gwacl/retry_policy_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/retry_policy_test.go 1970-01-01 00:00:00 +0000 @@ -1,150 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "fmt" - . "launchpad.net/gocheck" - forkedHttp "launchpad.net/gwacl/fork/http" - "net/http" - "time" -) - -type retryPolicySuite struct{} - -var _ = Suite(&retryPolicySuite{}) - -func (*retryPolicySuite) TestNoRetryPolicyDoesNotRetry(c *C) { - c.Check(NoRetryPolicy.NbRetries, Equals, 0) -} - -func (*retryPolicySuite) TestDefaultPolicyIsNoRetryPolicy(c *C) { - c.Check(NoRetryPolicy, DeepEquals, RetryPolicy{}) -} - -func (*retryPolicySuite) TestIsRetryCodeChecksStatusCode(c *C) { - c.Check( - RetryPolicy{HttpStatusCodes: []int{http.StatusConflict}}.isRetryCode(http.StatusConflict), - Equals, true) - c.Check( - RetryPolicy{HttpStatusCodes: []int{}}.isRetryCode(http.StatusOK), - Equals, false) - c.Check( - RetryPolicy{HttpStatusCodes: []int{http.StatusConflict}}.isRetryCode(http.StatusOK), - Equals, false) - -} - -func (*retryPolicySuite) TestGetRetryHelperReturnsHelper(c *C) { - policy := RetryPolicy{NbRetries: 7, HttpStatusCodes: []int{http.StatusConflict}, Delay: time.Minute} - helper := policy.getRetryHelper() - c.Check(*helper.policy, DeepEquals, policy) - c.Check(helper.retriesLeft, Equals, policy.NbRetries) -} - -type retryHelperSuite struct{} - -var _ = Suite(&retryHelperSuite{}) - -func (*retryHelperSuite) TestShouldRetryExhaustsRetries(c *C) { - nbTries := 3 - policy := RetryPolicy{NbRetries: nbTries, HttpStatusCodes: []int{http.StatusConflict}, Delay: time.Nanosecond} - helper := policy.getRetryHelper() - retries := []bool{} - for i := 0; i < nbTries+1; i++ { - retries = append(retries, helper.shouldRetry(http.StatusConflict)) - } - expectedRetries := []bool{true, true, true, false} - c.Check(retries, DeepEquals, expectedRetries) -} - -func (*retryHelperSuite) TestShouldRetryReturnsFalseIfCodeNotInHttpStatusCodes(c *C) { - policy := RetryPolicy{NbRetries: 10, HttpStatusCodes: []int{http.StatusConflict}, Delay: time.Nanosecond} - helper := policy.getRetryHelper() - c.Check(helper.shouldRetry(http.StatusOK), Equals, false) -} - -type retrierSuite struct{} - -var _ = Suite(&retrierSuite{}) - -func (*retrierSuite) TestGetRetrier(c *C) { - client := &http.Client{} - policy := RetryPolicy{NbRetries: 10, HttpStatusCodes: []int{http.StatusConflict}, Delay: time.Nanosecond} - retrier := policy.getRetrier(client) - c.Check(*retrier.policy, DeepEquals, policy) - c.Check(retrier.client, DeepEquals, client) -} - -func (*retrierSuite) TestRetryRequest(c *C) { - nbTries := 3 - transport := &MockingTransport{} - client := &http.Client{Transport: transport} - for i := 0; i < nbTries; i++ { - response := makeHttpResponse(http.StatusConflict, "") - transport.AddExchange(response, nil) - } - response := makeHttpResponse(http.StatusOK, "") - transport.AddExchange(response, nil) - - policy := RetryPolicy{NbRetries: nbTries, HttpStatusCodes: []int{http.StatusConflict}, Delay: time.Nanosecond} - retrier := policy.getRetrier(client) - req, err := http.NewRequest("GET", "http://example.com/", nil) - c.Assert(err, IsNil) - - resp, err := retrier.RetryRequest(req) - c.Assert(err, IsNil) - - c.Check(resp.StatusCode, Equals, http.StatusOK) - c.Check(transport.ExchangeCount, Equals, nbTries+1) -} - -func (*retrierSuite) TestRetryRequestBailsOutWhenError(c *C) { - nbTries := 3 - transport := &MockingTransport{} - client := &http.Client{Transport: transport} - transport.AddExchange(nil, fmt.Errorf("request error")) - - policy := RetryPolicy{NbRetries: nbTries, HttpStatusCodes: []int{http.StatusConflict}, Delay: time.Nanosecond} - retrier := policy.getRetrier(client) - req, err := http.NewRequest("GET", "http://example.com/", nil) - c.Assert(err, IsNil) - - _, err = retrier.RetryRequest(req) - c.Check(err, ErrorMatches, ".*request error.*") - - c.Check(transport.ExchangeCount, Equals, 1) -} - -type forkedHttpRetrierSuite struct{} - -var _ = Suite(&forkedHttpRetrierSuite{}) - -func (*forkedHttpRetrierSuite) TestGetRetrier(c *C) { - client := &forkedHttp.Client{} - policy := RetryPolicy{NbRetries: 10, HttpStatusCodes: []int{forkedHttp.StatusConflict}, Delay: time.Nanosecond} - retrier := policy.getForkedHttpRetrier(client) - c.Check(*retrier.policy, DeepEquals, policy) - c.Check(retrier.client, DeepEquals, client) -} - -func (*forkedHttpRetrierSuite) TestRetryRequest(c *C) { - nbTries := 3 - nbRequests := nbTries + 1 - client := &forkedHttp.Client{} - httpRequests := make(chan *Request, nbRequests) - server := makeRecordingHTTPServer(httpRequests, http.StatusConflict, nil, nil) - defer server.Close() - - policy := RetryPolicy{NbRetries: nbTries, HttpStatusCodes: []int{forkedHttp.StatusConflict}, Delay: time.Nanosecond} - retrier := policy.getForkedHttpRetrier(client) - req, err := forkedHttp.NewRequest("GET", server.URL, nil) - c.Assert(err, IsNil) - - resp, err := retrier.RetryRequest(req) - c.Assert(err, IsNil) - - c.Check(resp.StatusCode, Equals, forkedHttp.StatusConflict) - c.Check(len(httpRequests), Equals, nbTries+1) -} === removed file 'src/launchpad.net/gwacl/rolesizes.go' --- src/launchpad.net/gwacl/rolesizes.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/rolesizes.go 1970-01-01 00:00:00 +0000 @@ -1,748 +0,0 @@ -// Copyright 2013-2014 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -// Define the role sizes available in Azure. - -package gwacl - -import ( - "fmt" -) - -// RoleSize is a representation of the machine specs available in the Azure -// documentation here: -// http://msdn.microsoft.com/en-us/library/windowsazure/dn197896.aspx -// -// Pricing from here: -// http://azure.microsoft.com/en-us/pricing/details/virtual-machines -// -// Detailed specifications here: -// http://msdn.microsoft.com/en-us/library/windowsazure/dn197896.aspx -// -// Our specifications may be inaccurate or out of date. When in doubt, check! -// -// The Disk Space values are only the maxumim permitted; actual space is -// determined by the OS image being used. -// -// Sizes and costs last updated 2014-06-23. -type RoleSize struct { - // Name is the name that Azure assigns to role sizes. - Name string - - // Alias is an alternative name for the role size. This may be empty. - // - // Alias exists because the canonical role size names are inconsistent. - // There are currently three different formats for "standard" role sizes: - // e.g. ExtraSmall, A6, Standard_D1. - Alias string - - CpuCores uint64 - Mem uint64 // In MB - OSDiskSpace uint64 // In MB - TempDiskSpace uint64 // In MB - MaxDataDisks uint64 // 1TB each -} - -// decicentsPerHour is the unit of cost we store for RoleSizeCost. -type decicentsPerHour uint64 - -const ( - // MB is the unit in which we specify sizes, so it's 1. - // But please include it anyway, so that units are always explicit. - MB = 1 - GB = 1024 * MB - TB = 1024 * GB -) - -// Basic tier roles. -var basicRoleSizes = []RoleSize{{ // A0..A4: general purpose - Name: "Basic_A0", - CpuCores: 1, // shared - Mem: 768 * MB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 20 * GB, - MaxDataDisks: 1, -}, { - Name: "Basic_A1", - CpuCores: 1, - Mem: 1.75 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 40 * GB, - MaxDataDisks: 2, -}, { - Name: "Basic_A2", - CpuCores: 2, - Mem: 3.5 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 60 * GB, - MaxDataDisks: 4, -}, { - Name: "Basic_A3", - CpuCores: 4, - Mem: 7 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 120 * GB, - MaxDataDisks: 8, -}, { - Name: "Basic_A4", - CpuCores: 8, - Mem: 14 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 240 * GB, - MaxDataDisks: 16, -}} - -// Standard tier roles. -var standardRoleSizes = []RoleSize{{ // A0..A4: general purpose - Name: "ExtraSmall", - Alias: "A0", - CpuCores: 1, // shared - Mem: 768 * MB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 20 * GB, - MaxDataDisks: 1, -}, { - Name: "Small", - Alias: "A1", - CpuCores: 1, - Mem: 1.75 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 70 * GB, - MaxDataDisks: 2, -}, { - Name: "Medium", - Alias: "A2", - CpuCores: 2, - Mem: 3.5 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 135 * GB, - MaxDataDisks: 4, -}, { - Name: "Large", - Alias: "A3", - CpuCores: 4, - Mem: 7 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 285 * GB, - MaxDataDisks: 8, -}, { - Name: "ExtraLarge", - Alias: "A4", - CpuCores: 8, - Mem: 14 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 605 * GB, - MaxDataDisks: 16, -}, { // A5..A7: memory intensive - Name: "A5", - CpuCores: 2, - Mem: 14 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 135 * GB, - MaxDataDisks: 4, -}, { - Name: "A6", - CpuCores: 4, - Mem: 28 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 285 * GB, - MaxDataDisks: 8, -}, { - Name: "A7", - CpuCores: 8, - Mem: 56 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 605 * GB, - MaxDataDisks: 16, -}, { // A8..A9: compute intensive / fast network & infiniband suport - Name: "A8", - CpuCores: 8, - Mem: 56 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 382 * GB, - MaxDataDisks: 16, -}, { - Name: "A9", - CpuCores: 16, - Mem: 112 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 382 * GB, - MaxDataDisks: 16, -}, { // D Series - xeon v3 and ssd local - Name: "Standard_D1", - Alias: "D1", - CpuCores: 1, - Mem: 3.5 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 50 * GB, - MaxDataDisks: 2, -}, { - Name: "Standard_D2", - Alias: "D2", - CpuCores: 2, - Mem: 7 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 100 * GB, - MaxDataDisks: 4, -}, { - Name: "Standard_D3", - Alias: "D3", - CpuCores: 4, - Mem: 14 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 200 * GB, - MaxDataDisks: 8, -}, { - Name: "Standard_D4", - Alias: "D4", - CpuCores: 8, - Mem: 28 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 400 * GB, - MaxDataDisks: 16, -}, { - Name: "Standard_D11", - Alias: "D11", - CpuCores: 2, - Mem: 14 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 100 * GB, - MaxDataDisks: 4, -}, { - Name: "Standard_D12", - Alias: "D12", - CpuCores: 4, - Mem: 28 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 200 * GB, - MaxDataDisks: 8, -}, { - Name: "Standard_D13", - Alias: "D12", - CpuCores: 8, - Mem: 56 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 400 * GB, - MaxDataDisks: 16, -}, { - Name: "Standard_D14", - Alias: "D12", - CpuCores: 16, - Mem: 112 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 800 * GB, - MaxDataDisks: 16, -}, { // G Series Large Compute + SSD Local - Name: "Standard_G1", - Alias: "G1", - CpuCores: 2, - Mem: 28 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 406 * GB, - MaxDataDisks: 4, -}, { - Name: "Standard_G2", - Alias: "G2", - CpuCores: 4, - Mem: 56 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 812 * GB, - MaxDataDisks: 8, -}, { - Name: "Standard_G3", - Alias: "G3", - CpuCores: 8, - Mem: 112 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 1630 * GB, - MaxDataDisks: 16, -}, { - Name: "Standard_G4", - Alias: "G4", - CpuCores: 16, - Mem: 224 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 3250 * GB, - MaxDataDisks: 16, -}, { - Name: "Standard_G5", - Alias: "G5", - CpuCores: 32, - Mem: 448 * GB, - OSDiskSpace: 127 * GB, - TempDiskSpace: 6500 * GB, - MaxDataDisks: 16, -}} - -// RoleSizes describes all known role sizes. -var RoleSizes = append(append([]RoleSize{}, basicRoleSizes...), standardRoleSizes...) - -var allRegionRoleCosts = map[string]map[string]decicentsPerHour{ - "East US": { - "Basic_A0": 18, - "Basic_A1": 44, - "Basic_A2": 88, - "Basic_A3": 176, - "Basic_A4": 352, - "ExtraSmall": 20, - "Small": 60, - "Medium": 120, - "Large": 240, - "ExtraLarge": 480, - "A5": 250, - "A6": 500, - "A7": 1000, - "A8": 1970, - "A9": 4470, - "Standard_D1": 94, - "Standard_D2": 188, - "Standard_D3": 376, - "Standard_D4": 752, - "Standard_D11": 238, - "Standard_D12": 476, - "Standard_D13": 857, - "Standard_D14": 1542, - "Standard_G1": 8940, // Estimate, price is not public - "Standard_G2": 8950, // Estimate, price is not public - "Standard_G3": 8960, // Estimate, price is not public - "Standard_G4": 8970, // Estimate, price is not public - "Standard_G5": 8980, // Estimate, price is not public - }, - "East US 2": { - "Basic_A0": 18, - "Basic_A1": 44, - "Basic_A2": 88, - "Basic_A3": 176, - "Basic_A4": 352, - "ExtraSmall": 20, - "Small": 60, - "Medium": 120, - "Large": 240, - "ExtraLarge": 480, - "A5": 220, - "A6": 440, - "A7": 880, - "A8": 1970, - "A9": 4470, - "Standard_D1": 85, - "Standard_D2": 170, - "Standard_D3": 340, - "Standard_D4": 680, - "Standard_D11": 214, - "Standard_D12": 428, - "Standard_D13": 770, - "Standard_D14": 1387, - "Standard_G1": 8940, // Estimate, price is not public - "Standard_G2": 8950, // Estimate, price is not public - "Standard_G3": 8960, // Estimate, price is not public - "Standard_G4": 8970, // Estimate, price is not public - "Standard_G5": 8980, // Estimate, price is not public - }, - "West US": { - "Basic_A0": 18, - "Basic_A1": 47, - "Basic_A2": 94, - "Basic_A3": 188, - "Basic_A4": 376, - "ExtraSmall": 20, - "Small": 60, - "Medium": 120, - "Large": 240, - "ExtraLarge": 480, - "A5": 250, - "A6": 500, - "A7": 1000, - "A8": 1970, - "A9": 4470, - "Standard_D1": 94, - "Standard_D2": 188, - "Standard_D3": 376, - "Standard_D4": 752, - "Standard_D11": 238, - "Standard_D12": 476, - "Standard_D13": 857, - "Standard_D14": 1542, - "Standard_G1": 8940, // Estimate, price is not public - "Standard_G2": 8950, // Estimate, price is not public - "Standard_G3": 8960, // Estimate, price is not public - "Standard_G4": 8970, // Estimate, price is not public - "Standard_G5": 8980, // Estimate, price is not public - }, - - "Central US": { - "Basic_A0": 18, - "Basic_A1": 60, - "Basic_A2": 94, - "Basic_A3": 188, - "Basic_A4": 376, - "ExtraSmall": 20, - "Small": 60, - "Medium": 120, - "Large": 240, - "ExtraLarge": 480, - "A5": 250, - "A6": 500, - "A7": 1000, - "A8": 1970, - "A9": 4470, - "Standard_D1": 94, - "Standard_D2": 188, - "Standard_D3": 376, - "Standard_D4": 752, - "Standard_D11": 238, - "Standard_D12": 476, - "Standard_D13": 857, - "Standard_D14": 1542, - "Standard_G1": 8940, // Estimate, price is not public - "Standard_G2": 8950, // Estimate, price is not public - "Standard_G3": 8960, // Estimate, price is not public - "Standard_G4": 8970, // Estimate, price is not public - "Standard_G5": 8980, // Estimate, price is not public - }, - "North Central US": { - "Basic_A0": 18, - "Basic_A1": 47, - "Basic_A2": 94, - "Basic_A3": 188, - "Basic_A4": 376, - "ExtraSmall": 20, - "Small": 60, - "Medium": 120, - "Large": 240, - "ExtraLarge": 480, - "A5": 250, - "A6": 500, - "A7": 1000, - "A8": 1970, - "A9": 4470, - "Standard_D1": 94, - "Standard_D2": 188, - "Standard_D3": 376, - "Standard_D4": 752, - "Standard_D11": 238, - "Standard_D12": 476, - "Standard_D13": 857, - "Standard_D14": 1542, - "Standard_G1": 8940, // Estimate, price is not public - "Standard_G2": 8950, // Estimate, price is not public - "Standard_G3": 8960, // Estimate, price is not public - "Standard_G4": 8970, // Estimate, price is not public - "Standard_G5": 8980, // Estimate, price is not public - }, - "South Central US": { - "Basic_A0": 18, - "Basic_A1": 44, - "Basic_A2": 88, - "Basic_A3": 176, - "Basic_A4": 352, - "ExtraSmall": 20, - "Small": 60, - "Medium": 120, - "Large": 240, - "ExtraLarge": 480, - "A5": 220, - "A6": 440, - "A7": 880, - "A8": 1970, - "A9": 4470, - "Standard_D1": 85, - "Standard_D2": 170, - "Standard_D3": 340, - "Standard_D4": 680, - "Standard_D11": 214, - "Standard_D12": 428, - "Standard_D13": 770, - "Standard_D14": 1387, - "Standard_G1": 8940, // Estimate, price is not public - "Standard_G2": 8950, // Estimate, price is not public - "Standard_G3": 8960, // Estimate, price is not public - "Standard_G4": 8970, // Estimate, price is not public - "Standard_G5": 8980, // Estimate, price is not public - }, - "North Europe": { - "Basic_A0": 18, - "Basic_A1": 47, - "Basic_A2": 94, - "Basic_A3": 188, - "Basic_A4": 376, - "ExtraSmall": 20, - "Small": 60, - "Medium": 120, - "Large": 240, - "ExtraLarge": 480, - "A5": 248, - "A6": 496, - "A7": 992, - "A8": 1970, - "A9": 4470, - "Standard_D1": 94, - "Standard_D2": 188, - "Standard_D3": 376, - "Standard_D4": 752, - "Standard_D11": 238, - "Standard_D12": 476, - "Standard_D13": 857, - "Standard_D14": 1542, - "Standard_G1": 8940, // Estimate, price is not public - "Standard_G2": 8950, // Estimate, price is not public - "Standard_G3": 8960, // Estimate, price is not public - "Standard_G4": 8970, // Estimate, price is not public - "Standard_G5": 8980, // Estimate, price is not public - }, - "West Europe": { - "Basic_A0": 18, - "Basic_A1": 51, - "Basic_A2": 102, - "Basic_A3": 204, - "Basic_A4": 408, - "ExtraSmall": 20, - "Small": 60, - "Medium": 120, - "Large": 240, - "ExtraLarge": 480, - "A5": 270, - "A6": 540, - "A7": 1080, - "A8": 1970, - "A9": 4470, - "Standard_D1": 115, - "Standard_D2": 230, - "Standard_D3": 460, - "Standard_D4": 920, - "Standard_D11": 273, - "Standard_D12": 546, - "Standard_D13": 983, - "Standard_D14": 1769, - "Standard_G1": 8940, // Estimate, price is not public - "Standard_G2": 8950, // Estimate, price is not public - "Standard_G3": 8960, // Estimate, price is not public - "Standard_G4": 8970, // Estimate, price is not public - "Standard_G5": 8980, // Estimate, price is not public - }, - "Southeast Asia": { - "Basic_A0": 18, - "Basic_A1": 58, - "Basic_A2": 116, - "Basic_A3": 232, - "Basic_A4": 464, - "ExtraSmall": 20, - "Small": 60, - "Medium": 120, - "Large": 240, - "ExtraLarge": 480, - "A5": 270, - "A6": 540, - "A7": 1080, - "A8": 1970, - "A9": 4470, - "Standard_D1": 120, - "Standard_D2": 240, - "Standard_D3": 480, - "Standard_D4": 960, - "Standard_D11": 256, - "Standard_D12": 512, - "Standard_D13": 922, - "Standard_D14": 1659, - "Standard_G1": 8940, // Estimate, price is not public - "Standard_G2": 8950, // Estimate, price is not public - "Standard_G3": 8960, // Estimate, price is not public - "Standard_G4": 8970, // Estimate, price is not public - "Standard_G5": 8980, // Estimate, price is not public - }, - "East Asia": { - "Basic_A0": 18, - "Basic_A1": 58, - "Basic_A2": 116, - "Basic_A3": 232, - "Basic_A4": 464, - "ExtraSmall": 20, - "Small": 60, - "Medium": 120, - "Large": 240, - "ExtraLarge": 480, - "A5": 294, - "A6": 588, - "A7": 1176, - "A8": 1970, - "A9": 4470, - "Standard_D1": 138, - "Standard_D2": 276, - "Standard_D3": 552, - "Standard_D4": 1104, - "Standard_D11": 295, - "Standard_D12": 590, - "Standard_D13": 1062, - "Standard_D14": 1912, - "Standard_G1": 8940, // Estimate, price is not public - "Standard_G2": 8950, // Estimate, price is not public - "Standard_G3": 8960, // Estimate, price is not public - "Standard_G4": 8970, // Estimate, price is not public - "Standard_G5": 8980, // Estimate, price is not public - }, - "Japan East": { - "Basic_A0": 18, - "Basic_A1": 69, - "Basic_A2": 138, - "Basic_A3": 276, - "Basic_A4": 552, - "ExtraSmall": 27, - "Small": 81, - "Medium": 162, - "Large": 324, - "ExtraLarge": 648, - "A5": 281, - "A6": 562, - "A7": 1124, - "A8": 2325, - "A9": 5275, - "Standard_D1": 142, - "Standard_D2": 284, - "Standard_D3": 568, - "Standard_D4": 1136, - "Standard_D11": 295, - "Standard_D12": 590, - "Standard_D13": 1062, - "Standard_D14": 1912, - "Standard_G1": 8940, // Estimate, price is not public - "Standard_G2": 8950, // Estimate, price is not public - "Standard_G3": 8960, // Estimate, price is not public - "Standard_G4": 8970, // Estimate, price is not public - "Standard_G5": 8980, // Estimate, price is not public - }, - "Japan West": { - "Basic_A0": 19, - "Basic_A1": 61, - "Basic_A2": 122, - "Basic_A3": 244, - "Basic_A4": 488, - "ExtraSmall": 21, - "Small": 73, - "Medium": 146, - "Large": 292, - "ExtraLarge": 584, - "A5": 258, - "A6": 516, - "A7": 1032, - "A8": 2088, - "A9": 4738, - "Standard_D1": 123, - "Standard_D2": 246, - "Standard_D3": 492, - "Standard_D4": 984, - "Standard_D11": 256, - "Standard_D12": 512, - "Standard_D13": 922, - "Standard_D14": 1659, - "Standard_G1": 8940, // Estimate, price is not public - "Standard_G2": 8950, // Estimate, price is not public - "Standard_G3": 8960, // Estimate, price is not public - "Standard_G4": 8970, // Estimate, price is not public - "Standard_G5": 8980, // Estimate, price is not public - }, - "Brazil South": { - "Basic_A0": 22, - "Basic_A1": 58, - "Basic_A2": 116, - "Basic_A3": 232, - "Basic_A4": 464, - "ExtraSmall": 24, - "Small": 80, - "Medium": 160, - "Large": 320, - "ExtraLarge": 640, - "A5": 291, - "A6": 582, - "A7": 1164, - "A8": 2403, - "A9": 5453, - "Standard_D1": 116, - "Standard_D2": 232, - "Standard_D3": 464, - "Standard_D4": 928, - "Standard_D11": 290, - "Standard_D12": 580, - "Standard_D13": 1044, - "Standard_D14": 1879, - "Standard_G1": 8940, // Estimate, price is not public - "Standard_G2": 8950, // Estimate, price is not public - "Standard_G3": 8960, // Estimate, price is not public - "Standard_G4": 8970, // Estimate, price is not public - "Standard_G5": 8980, // Estimate, price is not public - }, - "Australia East": { - "Basic_A0": 24, - "Basic_A1": 58, - "Basic_A2": 116, - "Basic_A3": 232, - "Basic_A4": 464, - "ExtraSmall": 29, - "Small": 71, - "Medium": 142, - "Large": 284, - "ExtraLarge": 568, - "A5": 278, - "A6": 556, - "A7": 1112, - "A8": 2224, - "A9": 4448, - "Standard_D1": 120, - "Standard_D2": 239, - "Standard_D3": 478, - "Standard_D4": 956, - "Standard_D11": 256, - "Standard_D12": 512, - "Standard_D13": 922, - "Standard_D14": 1660, - "Standard_G1": 8940, // Estimate, price is not public - "Standard_G2": 8950, // Estimate, price is not public - "Standard_G3": 8960, // Estimate, price is not public - "Standard_G4": 8970, // Estimate, price is not public - "Standard_G5": 8980, // Estimate, price is not public - }, - "Australia Southeast": { - "Basic_A0": 24, - "Basic_A1": 58, - "Basic_A2": 116, - "Basic_A3": 232, - "Basic_A4": 464, - "ExtraSmall": 29, - "Small": 71, - "Medium": 142, - "Large": 284, - "ExtraLarge": 568, - "A5": 278, - "A6": 556, - "A7": 1112, - "A8": 2224, - "A9": 4448, - "Standard_D1": 120, - "Standard_D2": 239, - "Standard_D3": 478, - "Standard_D4": 956, - "Standard_D11": 256, - "Standard_D12": 512, - "Standard_D13": 922, - "Standard_D14": 1660, - "Standard_G1": 8940, // Estimate, price is not public - "Standard_G2": 8950, // Estimate, price is not public - "Standard_G3": 8960, // Estimate, price is not public - "Standard_G4": 8970, // Estimate, price is not public - "Standard_G5": 8980, // Estimate, price is not public - }, -} - -// RoleSizeCost returns the cost associated with the given role size and region. -func RoleSizeCost(region string, roleSize string) (decicentsPerHour uint64, err error) { - costs, ok := allRegionRoleCosts[region] - if !ok { - return 0, fmt.Errorf("no cost data for region %q", region) - } - cost, ok := costs[roleSize] - if ok { - return uint64(cost), nil - } - return 0, fmt.Errorf( - "no cost data for role size %q in region %q", - roleSize, region, - ) -} === removed file 'src/launchpad.net/gwacl/rolesizes_test.go' --- src/launchpad.net/gwacl/rolesizes_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/rolesizes_test.go 1970-01-01 00:00:00 +0000 @@ -1,59 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - . "launchpad.net/gocheck" -) - -type rolesizeSuite struct{} - -var _ = Suite(&rolesizeSuite{}) - -var knownRegions = []string{ - "East US", - "East US 2", - "West US", - "Central US", - "North Central US", - "South Central US", - "North Europe", - "West Europe", - "Southeast Asia", - "East Asia", - "Japan East", - "Japan West", - "Brazil South", - "Australia Southeast", - "Australia East", -} - -var knownSizes = []string{ - "Basic_A0", "Basic_A1", "Basic_A2", "Basic_A3", "Basic_A4", - "ExtraSmall", "Small", "Medium", "Large", "ExtraLarge", - "A5", "A6", "A7", "A8", "A9", "Standard_D1", "Standard_D2", - "Standard_D3", "Standard_D4", "Standard_D11", "Standard_D12", - "Standard_D13", "Standard_D14", "Standard_G1", "Standard_G2", - "Standard_G3", "Standard_G4", "Standard_G5", -} - -func (suite *rolesizeSuite) TestRoleCostKnownRegions(c *C) { - for _, region := range knownRegions { - for _, roleSize := range knownSizes { - cost, err := RoleSizeCost(region, roleSize) - c.Check(err, IsNil) - c.Check(cost, Not(Equals), uint64(0)) - } - } -} - -func (suite *rolesizeSuite) TestRoleCostUnknownRegion(c *C) { - _, err := RoleSizeCost("Eastasia", "A0") - c.Assert(err, ErrorMatches, `no cost data for region "Eastasia"`) -} - -func (suite *rolesizeSuite) TestRoleCostUnknownRoleSize(c *C) { - _, err := RoleSizeCost("East US", "A10") - c.Assert(err, ErrorMatches, `no cost data for role size "A10" in region "East US"`) -} === removed file 'src/launchpad.net/gwacl/shared_signature.go' --- src/launchpad.net/gwacl/shared_signature.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/shared_signature.go 1970-01-01 00:00:00 +0000 @@ -1,80 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "fmt" - "net/url" - "time" -) - -// sharedSignatureParams is a object which encapsulate all the parameters -// required to delegate access to a Windows Azure object using the -// "Shared Access Signature" mechanism. -type sharedSignatureParams struct { - permission string - signedStart string - signedExpiry string - path string - accountName string - signedIdentifier string - signedVersion string - signedRessource string - accountKey string -} - -// composeSharedSignature composes the "Shared Access Signature" as described -// in the paragraph "Constructing the Signature String" in -// http://msdn.microsoft.com/en-us/library/windowsazure/dn140255.aspx -func (params *sharedSignatureParams) composeSharedSignature() (string, error) { - // Compose the string to sign. - canonicalizedResource := fmt.Sprintf("/%s%s", params.accountName, params.path) - stringToSign := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", - params.permission, params.signedStart, params.signedExpiry, canonicalizedResource, params.signedIdentifier, params.signedVersion) - // Create the signature. - signature, err := sign(params.accountKey, stringToSign) - if err != nil { - return "", err - } - return signature, nil -} - -// composeAccessQueryValues returns the values that correspond to the query -// string used to build a Shared Access Signature URI as described in -// http://msdn.microsoft.com/en-us/library/windowsazure/dn140255.aspx -func (params *sharedSignatureParams) composeAccessQueryValues() (url.Values, error) { - signature, err := params.composeSharedSignature() - if err != nil { - return nil, err - } - // Compose the "Shared Access Signature" query string. - values := url.Values{} - values.Set("sv", params.signedVersion) - values.Set("se", params.signedExpiry) - values.Set("sr", params.signedRessource) - values.Set("sp", params.permission) - values.Set("sig", signature) - return values, nil -} - -// getReadBlobAccessValues returns the values that correspond to the query -// string used to build a Shared Access Signature URI. The signature grants -// read access to the given blob. -func getReadBlobAccessValues(container, filename, accountName, accountKey string, expires time.Time) (url.Values, error) { - expiryDateString := expires.UTC().Format(time.RFC3339) - - path := fmt.Sprintf("/%s/%s", container, filename) - signatureParams := &sharedSignatureParams{ - permission: "r", - signedExpiry: expiryDateString, - signedStart: "", - path: path, - accountName: accountName, - signedIdentifier: "", - signedVersion: "2012-02-12", - signedRessource: "b", - accountKey: accountKey, - } - return signatureParams.composeAccessQueryValues() -} === removed file 'src/launchpad.net/gwacl/shared_signature_test.go' --- src/launchpad.net/gwacl/shared_signature_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/shared_signature_test.go 1970-01-01 00:00:00 +0000 @@ -1,73 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "encoding/base64" - . "launchpad.net/gocheck" - "time" -) - -type sharedSignatureSuite struct{} - -var _ = Suite(&sharedSignatureSuite{}) - -func (*sharedSignatureSuite) TestComposeSharedSignature(c *C) { - params := &sharedSignatureParams{ - permission: "r", - signedExpiry: "2015-02-12", - path: "/path/to/file", - accountName: "name", - signedIdentifier: "identifier", - signedVersion: "2012-02-12", - signedRessource: "/the/ressource", - accountKey: base64.StdEncoding.EncodeToString([]byte("key")), - } - - signature, err := params.composeSharedSignature() - c.Assert(err, IsNil) - c.Check(signature, Equals, "C/COJt8UagHJR2LBT1129bhDChtgfLGFqfZ0YQpBdF0=") -} - -func (*sharedSignatureSuite) TestComposeAccessQueryValues(c *C) { - params := &sharedSignatureParams{ - permission: "r", - signedExpiry: "2015-02-12", - path: "/path/to/file", - accountName: "name", - signedIdentifier: "identifier", - signedVersion: "2012-02-12", - signedRessource: "/the/ressource", - accountKey: base64.StdEncoding.EncodeToString([]byte("key")), - } - - values, err := params.composeAccessQueryValues() - c.Assert(err, IsNil) - - c.Check(values.Get("sv"), Equals, params.signedVersion) - c.Check(values.Get("se"), Equals, params.signedExpiry) - c.Check(values.Get("sr"), Equals, params.signedRessource) - c.Check(values.Get("sp"), Equals, params.permission) - c.Check(values.Get("sig"), Not(HasLen), 0) -} - -func (*sharedSignatureSuite) TestGetReadBlobAccessValues(c *C) { - container := "container" - filename := "/path/to/file" - accountName := "name" - accountKey := base64.StdEncoding.EncodeToString([]byte("key")) - expires, err := time.Parse("Monday, 02-Jan-06 15:04:05 MST", time.RFC850) - c.Assert(err, IsNil) - - values, err := getReadBlobAccessValues(container, filename, accountName, accountKey, expires) - c.Assert(err, IsNil) - - c.Check(values.Get("sv"), Equals, "2012-02-12") - expiryDateString := values.Get("se") - expectedExpiryDateString := expires.UTC().Format(time.RFC3339) - c.Check(expiryDateString, Equals, expectedExpiryDateString) - c.Check(values.Get("sr"), Equals, "b") - c.Check(values.Get("sp"), Equals, "r") - c.Check(values.Get("sig"), Equals, "HK7xmxiUY/vBNkaIWoJkIcv27g/+QFjwKVgO/I3yWmI=") -} === removed file 'src/launchpad.net/gwacl/storage.go' --- src/launchpad.net/gwacl/storage.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/storage.go 1970-01-01 00:00:00 +0000 @@ -1,222 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -// This file contains higher level operations necessary to work with the Azure -// file storage API. - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - . "launchpad.net/gwacl/logging" - "strconv" - "strings" -) - -// UploadBlockBlob uses PutBlock and PutBlockList API operations to upload -// arbitrarily large files, 1MB at a time. -func (context *StorageContext) UploadBlockBlob( - container, filename string, data io.Reader) error { - - buffer := make([]byte, 1024*1024) // 1MB buffer - blockList := &BlockList{} - - // Upload the file in chunks. - for blockNum := int64(0); ; blockNum++ { - blockSize, err := data.Read(buffer) - if err == io.EOF { - break - } - if err != nil { - return err - } - block := bytes.NewReader(buffer[:blockSize]) - blockID := strconv.FormatInt(blockNum, 36) - // Block IDs must be a consistent length, so pad it out. - blockID = fmt.Sprintf("%030s", blockID) - Debugf("Uploading block %d (size=%d, id=%s).\n", - blockNum, blockSize, blockID) - err = context.PutBlock(container, filename, blockID, block) - if err != nil { - return err - } - blockList.Add(BlockListLatest, blockID) - } - - // Commit those blocks by writing the block list. - Debugf("Committing %d blocks.\n", len(blockList.Items)) - return context.PutBlockList(container, filename, blockList) -} - -// ListAllBlobs requests from the API a list of blobs in a container. -func (context *StorageContext) ListAllBlobs(request *ListBlobsRequest) (*BlobEnumerationResults, error) { - blobs := make([]Blob, 0) - var batch *BlobEnumerationResults - - // Request the initial result, using the empty marker. Then, for as long - // as the result has a nonempty NextMarker, request the next batch using - // that marker. - // This loop is very similar to the one in ListAllContainers(). - for marker, nextMarker := "", "x"; nextMarker != ""; marker = nextMarker { - var err error - // Don't use := here or you'll shadow variables from the function's - // outer scopes. - request.Marker = marker - batch, err = context.ListBlobs(request) - if err != nil { - return nil, err - } - // The response may contain a NextMarker field, to let us request a - // subsequent batch of results. The XML parser won't trim whitespace out - // of the marker tag, so we do that here. - nextMarker = strings.TrimSpace(batch.NextMarker) - blobs = append(blobs, batch.Blobs...) - } - - // There's more in a BlobsEnumerationResults than just the blobs. - // Return the latest batch, but give it the full cumulative blobs list - // instead of just the last batch. - // To the caller, this will look like they made one call to Azure's - // List Blobs method, but batch size was unlimited. - batch.Blobs = blobs - return batch, nil -} - -type DeleteAllBlobsRequest struct { - Container string - // Other params possible, add later. -} - -// RemoveAllBlobs requests a deletion of all the blobs in a container. -// The blobs are not deleted immediately, so when this call returns they -// may still be present for a while. -func (context *StorageContext) DeleteAllBlobs(request *DeleteAllBlobsRequest) error { - blobs, err := context.ListAllBlobs(&ListBlobsRequest{ - Container: request.Container}) - if err != nil { - return err - } - - for _, blob := range blobs.Blobs { - err := context.DeleteBlob(request.Container, blob.Name) - if err != nil { - return err - } - } - - return nil -} - -// ListAllContainers requests from the storage service a list of containers -// in the storage account. -func (context *StorageContext) ListAllContainers() (*ContainerEnumerationResults, error) { - containers := make([]Container, 0) - var batch *ContainerEnumerationResults - - // Request the initial result, using the empty marker. Then, for as long - // as the result has a nonempty NextMarker, request the next batch using - // that marker. - for marker, nextMarker := "", "x"; nextMarker != ""; marker = nextMarker { - var err error - // Don't use := here or you'll shadow variables from the function's - // outer scopes. - request := &ListContainersRequest{Marker: marker} - batch, err = context.ListContainers(request) - if err != nil { - return nil, err - } - // The response may contain a NextMarker field, to let us request a - // subsequent batch of results. The XML parser won't trim whitespace out - // of the marker tag, so we do that here. - nextMarker = strings.TrimSpace(batch.NextMarker) - containers = append(containers, batch.Containers...) - } - - // There's more in a ContainerEnumerationResults than just the containers. - // Return the latest batch, but give it the full cumulative containers list - // instead of just the last batch. - // To the caller, this will look like they made one call to Azure's - // List Containers method, but batch size was unlimited. - batch.Containers = containers - return batch, nil -} - -type CreateVHDRequest struct { - Container string // Container name in the storage account - Filename string // Specify the filename in which to store the VHD - FilesystemData io.Reader // A formatted filesystem, e.g. iso9660. - Size int // How many bytes from the Filesystem data to upload. *Must* be a multiple of 512. -} - -// CreateInstanceDataVHD will take the supplied filesystem data and create an -// Azure VHD in a page blob out of it. The data cannot be bigger than -// gwacl.VHD_SIZE-512. This is intended to be used as a way of passing -// arbitrary data to a new instance - create a disk here and then attach it to -// the new instance. -func (context *StorageContext) CreateInstanceDataVHD(req *CreateVHDRequest) error { - // We need several steps: - // 1. Create an empty page blob of exactly VHD_SIZE bytes (see - // vhd_footer.go) - // 2. Upload VHD_FOOTER to the last page of the blob. - // 3. Upload the supplied FilesystemData from the start of the blob. - - var err error - - if req.Size%512 != 0 { - return fmt.Errorf("Size must be a multiple of 512") - } - if req.Size > VHD_SIZE-512 { - // Protect against writing over the VHD footer. - return fmt.Errorf("Size cannot be bigger than %d", VHD_SIZE-512) - } - - // Step 1. - err = context.PutBlob(&PutBlobRequest{ - Container: req.Container, - BlobType: "page", - Filename: req.Filename, - Size: VHD_SIZE, - }) - - if err != nil { - return err - } - - // Step 2. - data, err := base64.StdEncoding.DecodeString(VHD_FOOTER) - if err != nil { - // This really shouldn't ever happen since there's a test to make sure - // it can be decoded. - panic(err) - } - dataReader := bytes.NewReader(data) - err = context.PutPage(&PutPageRequest{ - Container: req.Container, - Filename: req.Filename, - StartRange: VHD_SIZE - 512, // last page of the blob - EndRange: VHD_SIZE - 1, - Data: dataReader, - }) - - if err != nil { - return err - } - - // Step 3. - err = context.PutPage(&PutPageRequest{ - Container: req.Container, - Filename: req.Filename, - StartRange: 0, - EndRange: req.Size - 1, - Data: req.FilesystemData, - }) - - if err != nil { - return err - } - - return nil -} === removed file 'src/launchpad.net/gwacl/storage_base.go' --- src/launchpad.net/gwacl/storage_base.go 2015-10-23 18:28:45 +0000 +++ src/launchpad.net/gwacl/storage_base.go 1970-01-01 00:00:00 +0000 @@ -1,755 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -// This file contains the operations necessary to work with the Azure -// file storage API. For more details, see -// http://msdn.microsoft.com/en-us/library/windowsazure/dd179355.aspx - -// TODO Improve function documentation: the Go documentation convention is for -// function documentation to start out with the name of the function. This may -// have special significance for godoc. - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "sort" - "strings" - "time" -) - -const defaultStorageAPIVersion = "2014-02-14" - -var headersToSign = []string{ - "Content-Encoding", - "Content-Language", - "Content-Length", - "Content-MD5", - "Content-Type", - "Date", - "If-Modified-Since", - "If-Match", - "If-None-Match", - "If-Unmodified-Since", - "Range", -} - -func init() { - // See https://code.google.com/p/go/issues/detail?id=4677 - // We need to force the connection to close each time so that we don't - // hit the above Go bug. - roundTripper := http.DefaultClient.Transport - if transport, ok := roundTripper.(*http.Transport); ok { - transport.DisableKeepAlives = true - } - http.DefaultTransport.(*http.Transport).DisableKeepAlives = true -} - -// sign returns the base64-encoded HMAC-SHA256 signature of the given string -// using the given base64-encoded key. -func sign(accountKey, signable string) (string, error) { - // Allegedly, this is already UTF8 encoded. - decodedKey, err := base64.StdEncoding.DecodeString(accountKey) - if err != nil { - return "", fmt.Errorf("invalid account key: %s", err) - } - hash := hmac.New(sha256.New, decodedKey) - _, err = hash.Write([]byte(signable)) - if err != nil { - return "", fmt.Errorf("failed to write hash: %s", err) - } - var hashed []byte - hashed = hash.Sum(hashed) - b64Hashed := base64.StdEncoding.EncodeToString(hashed) - return b64Hashed, nil -} - -// Calculate the value required for an Authorization header. -func composeAuthHeader(req *http.Request, accountName, accountKey string) (string, error) { - signable := composeStringToSign(req, accountName) - - b64Hashed, err := sign(accountKey, signable) - if err != nil { - return "", err - } - return fmt.Sprintf("SharedKey %s:%s", accountName, b64Hashed), nil -} - -// Calculate the string that needs to be HMAC signed. It is comprised of -// the headers in headersToSign, x-ms-* headers and the URI params. -func composeStringToSign(req *http.Request, accountName string) string { - // TODO: whitespace should be normalised in value strings. - return fmt.Sprintf( - "%s\n%s%s%s", req.Method, composeHeaders(req), - composeCanonicalizedHeaders(req), - composeCanonicalizedResource(req, accountName)) -} - -// toLowerKeys lower cases all map keys. If two keys exist, that differ -// by the case of their keys, the values will be concatenated. -func toLowerKeys(values url.Values) map[string][]string { - m := make(map[string][]string) - for k, v := range values { - k = strings.ToLower(k) - m[k] = append(m[k], v...) - } - for _, v := range m { - sort.Strings(v) - } - return m -} - -// Encode the URI params as required by the API. They are lower-cased, -// sorted and formatted as param:value,value,...\nparam:value... -func encodeParams(values map[string][]string) string { - var keys []string - values = toLowerKeys(values) - for k := range values { - keys = append(keys, k) - } - sort.Strings(keys) - var result []string - for _, v := range keys { - result = append(result, fmt.Sprintf("%v:%s", v, strings.Join(values[v], ","))) - } - return strings.Join(result, "\n") -} - -// Calculate the headers required in the string to sign. -func composeHeaders(req *http.Request) string { - var result []string - for _, headerName := range headersToSign { - result = append(result, req.Header.Get(headerName)+"\n") - } - return strings.Join(result, "") -} - -// Calculate the x-ms-* headers, encode as for encodeParams. -func composeCanonicalizedHeaders(req *http.Request) string { - var results []string - for headerName, values := range req.Header { - headerName = strings.ToLower(headerName) - if strings.HasPrefix(headerName, "x-ms-") { - results = append(results, fmt.Sprintf("%v:%s\n", headerName, strings.Join(values, ","))) - } - } - sort.Strings(results) - return strings.Join(results, "") -} - -// Calculate the URI params and encode them in the string. -// See http://msdn.microsoft.com/en-us/library/windowsazure/dd179428.aspx -// for details of this encoding. -func composeCanonicalizedResource(req *http.Request, accountName string) string { - path := req.URL.Path - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - - values := req.URL.Query() - valuesLower := toLowerKeys(values) - paramString := encodeParams(valuesLower) - - result := "/" + accountName + path - if paramString != "" { - result += "\n" + paramString - } - - return result -} - -// Take the passed msVersion string and add it to the request headers. -func addVersionHeader(req *http.Request, msVersion string) { - req.Header.Set("x-ms-version", msVersion) -} - -// Calculate the mD5sum and content length for the request payload and add -// as the Content-MD5 header and Content-Length header respectively. -func addContentHeaders(req *http.Request) { - if req.Body == nil { - if req.Method == "PUT" || req.Method == "POST" { - // This cannot be set for a GET, likewise it *must* be set for - // PUT and POST. - req.Header.Set("Content-Length", "0") - } - return - } - reqdata, err := ioutil.ReadAll(req.Body) - if err != nil { - panic(fmt.Errorf("Unable to read request body: %s", err)) - } - // Replace the request's data because we just destroyed it by reading it. - req.Body = ioutil.NopCloser(bytes.NewReader(reqdata)) - req.Header.Set("Content-Length", fmt.Sprintf("%d", len(reqdata))) - // Stop Go's http lib from chunking the data because Azure will return - // an authorization error if it's chunked. - req.ContentLength = int64(len(reqdata)) -} - -// Add a Date: header in RFC1123 format. -func addDateHeader(req *http.Request) { - now := time.Now().UTC().Format(time.RFC1123) - // The Azure API requires "GMT" and not "UTC". - now = strings.Replace(now, "UTC", "GMT", 1) - req.Header.Set("Date", now) -} - -// signRequest adds the Authorization: header to a Request. -// Don't make any further changes to the request before sending it, or the -// signature will not be valid. -func (context *StorageContext) signRequest(req *http.Request) error { - // Only sign the request if the key is not empty. - if context.Key != "" { - header, err := composeAuthHeader(req, context.Account, context.Key) - if err != nil { - return err - } - req.Header.Set("Authorization", header) - } - return nil -} - -// StorageContext keeps track of the mandatory parameters required to send a -// request to the storage services API. It also has an HTTP Client to allow -// overriding for custom behaviour, during testing for example. -type StorageContext struct { - // Account is a storage account name. - Account string - - // Key authenticates the storage account. Access will be anonymous if this - // is left empty. - Key string - - // AzureEndpoint specifies a base service endpoint URL for the Azure APIs. - // This field is required. - AzureEndpoint APIEndpoint - - client *http.Client - - RetryPolicy RetryPolicy -} - -// getClient is used when sending a request. If a custom client is specified -// in context.client it is returned, otherwise net.http.DefaultClient is -// returned. -func (context *StorageContext) getClient() *http.Client { - if context.client == nil { - return http.DefaultClient - } - return context.client -} - -// Any object that deserializes XML must meet this interface. -type Deserializer interface { - Deserialize([]byte) error -} - -// requestParams is a Parameter Object for performRequest(). -type requestParams struct { - Method string // HTTP method, e.g. "GET" or "PUT". - URL string // Resource locator, e.g. "http://example.com/my/resource". - Body io.Reader // Optional request body. - APIVersion string // Expected Azure API version, e.g. "2014-02-14". - ExtraHeaders http.Header // Optional extra request headers. - Result Deserializer // Optional object to parse API response into. - ExpectedStatus HTTPStatus // Expected response status, e.g. http.StatusOK. -} - -// Check performs a basic sanity check on the request. This will only catch -// a few superficial problems that you can spot at compile time, to save a -// debugging cycle for the most basic mistakes. -func (params *requestParams) Check() { - const panicPrefix = "invalid request: " - if params.Method == "" { - panic(errors.New(panicPrefix + "HTTP method not specified")) - } - if params.URL == "" { - panic(errors.New(panicPrefix + "URL not specified")) - } - if params.APIVersion == "" { - panic(errors.New(panicPrefix + "API version not specified")) - } - if params.ExpectedStatus == 0 { - panic(errors.New(panicPrefix + "expected HTTP status not specified")) - } - methods := map[string]bool{"GET": true, "PUT": true, "POST": true, "DELETE": true} - if _, ok := methods[params.Method]; !ok { - panic(fmt.Errorf(panicPrefix+"unsupported HTTP method '%s'", params.Method)) - } -} - -// performRequest issues an HTTP request to Azure. -// -// It returns the response body contents and the response headers. -func (context *StorageContext) performRequest(params requestParams) ([]byte, http.Header, error) { - params.Check() - req, err := http.NewRequest(params.Method, params.URL, params.Body) - if err != nil { - return nil, nil, err - } - // net/http has no way of adding headers en-masse, hence this abomination. - for header, values := range params.ExtraHeaders { - for _, value := range values { - req.Header.Add(header, value) - } - } - addVersionHeader(req, params.APIVersion) - addDateHeader(req) - addContentHeaders(req) - if err := context.signRequest(req); err != nil { - return nil, nil, err - } - return context.send(req, params.Result, params.ExpectedStatus) -} - -// Send a request to the storage service and process the response. -// The "res" parameter is typically an XML struct that will deserialize the -// raw XML into the struct data. The http Response object's body is returned. -// -// If the response's HTTP status code is not the same as "expectedStatus" -// then an HTTPError will be returned as the error. -func (context *StorageContext) send(req *http.Request, res Deserializer, expectedStatus HTTPStatus) ([]byte, http.Header, error) { - client := context.getClient() - - retrier := context.RetryPolicy.getRetrier(client) - resp, err := retrier.RetryRequest(req) - if err != nil { - return nil, nil, err - } - - body, err := readAndClose(resp.Body) - if err != nil { - return nil, nil, fmt.Errorf("failed to read response data: %v", err) - } - - if resp.StatusCode != int(expectedStatus) { - msg := newHTTPError(resp.StatusCode, body, "Azure request failed") - return body, resp.Header, msg - } - - // If the caller didn't supply an object to deserialize the message into - // then just return. - if res == nil { - return body, resp.Header, nil - } - - // TODO: Also deserialize response headers into the "res" object. - err = res.Deserialize(body) - if err != nil { - msg := fmt.Errorf("Failed to deserialize data: %s", err) - return body, resp.Header, msg - } - - return body, resp.Header, nil -} - -// getAccountURL returns the base URL for the context's storage account. -// (The result ends in a slash.) -func (context *StorageContext) getAccountURL() string { - if context.AzureEndpoint == APIEndpoint("") { - panic(errors.New("no AzureEndpoint specified in gwacl.StorageContext")) - } - return context.AzureEndpoint.BlobStorageAPI(context.Account) -} - -// getContainerURL returns the URL for a given storage container. -// (The result does not end in a slash.) -func (context *StorageContext) getContainerURL(container string) string { - return strings.TrimRight(context.getAccountURL(), "/") + "/" + url.QueryEscape(container) -} - -// GetFileURL returns the URL for a given file in the given container. -// (The result does not end in a slash.) -func (context *StorageContext) GetFileURL(container, filename string) string { - return context.getContainerURL(container) + "/" + url.QueryEscape(filename) -} - -// GetAnonymousFileURL returns an anonymously-accessible URL for a given file -// in the given container. -func (context *StorageContext) GetAnonymousFileURL(container, filename string, expires time.Time) (string, error) { - url := context.GetFileURL(container, filename) - values, err := getReadBlobAccessValues(container, filename, context.Account, context.Key, expires) - if err != nil { - return "", err - } - return fmt.Sprintf("%s?%s", url, values.Encode()), nil -} - -type ListContainersRequest struct { - Marker string -} - -// ListContainers calls the "List Containers" operation on the storage -// API, and returns a single batch of results. -// The marker argument should be empty for a new List Containers request. for -// subsequent calls to get additional batches of the same result, pass the -// NextMarker from the previous call's result. -func (context *StorageContext) ListContainers(request *ListContainersRequest) (*ContainerEnumerationResults, error) { - uri := addURLQueryParams(context.getAccountURL(), "comp", "list") - if request.Marker != "" { - uri = addURLQueryParams(uri, "marker", request.Marker) - } - containers := ContainerEnumerationResults{} - _, _, err := context.performRequest(requestParams{ - Method: "GET", - URL: uri, - APIVersion: defaultStorageAPIVersion, - Result: &containers, - ExpectedStatus: http.StatusOK, - }) - if err != nil { - msg := "request for containers list failed: " - return nil, extendError(err, msg) - } - return &containers, nil -} - -type ListBlobsRequest struct { - Container string - Marker string - Prefix string -} - -// ListBlobs calls the "List Blobs" operation on the storage API, and returns -// a single batch of results. -// The request.Marker argument should be empty for a new List Blobs request. -// For subsequent calls to get additional batches of the same result, pass the -// NextMarker from the previous call's result. -func (context *StorageContext) ListBlobs(request *ListBlobsRequest) (*BlobEnumerationResults, error) { - uri := addURLQueryParams( - context.getContainerURL(request.Container), - "restype", "container", - "comp", "list") - if request.Marker != "" { - uri = addURLQueryParams(uri, "marker", request.Marker) - } - if request.Prefix != "" { - uri = addURLQueryParams(uri, "prefix", request.Prefix) - } - blobs := BlobEnumerationResults{} - _, _, err := context.performRequest(requestParams{ - Method: "GET", - URL: uri, - APIVersion: defaultStorageAPIVersion, - Result: &blobs, - ExpectedStatus: http.StatusOK, - }) - if err != nil { - msg := "request for blobs list failed: " - return nil, extendError(err, msg) - } - return &blobs, err -} - -// Send a request to the storage service to create a new container. If the -// request fails, error is non-nil. -func (context *StorageContext) CreateContainer(container string) error { - uri := addURLQueryParams( - context.getContainerURL(container), - "restype", "container") - _, _, err := context.performRequest(requestParams{ - Method: "PUT", - URL: uri, - APIVersion: defaultStorageAPIVersion, - ExpectedStatus: http.StatusCreated, - }) - if err != nil { - msg := fmt.Sprintf("failed to create container %s: ", container) - return extendError(err, msg) - } - return nil -} - -// Send a request to the storage service to delete a container. It will also -// delete all the blobs inside it. -func (context *StorageContext) DeleteContainer(container string) error { - uri := addURLQueryParams( - context.getContainerURL(container), - "restype", "container") - _, _, err := context.performRequest(requestParams{ - Method: "DELETE", - URL: uri, - APIVersion: defaultStorageAPIVersion, - ExpectedStatus: http.StatusAccepted, - }) - if err != nil { - // If the error is an Azure 404 error, return silently: the container - // does not exist. - if IsNotFoundError(err) { - return nil - } - msg := fmt.Sprintf("failed to delete container %s: ", container) - return extendError(err, msg) - } - return nil -} - -// Send a request to the storage service to retrieve a container's properties. -// Also doubles as a handy way to see if a container exists. -func (context *StorageContext) GetContainerProperties(container string) (*Properties, error) { - uri := addURLQueryParams( - context.getContainerURL(container), - "restype", "container") - params := requestParams{ - Method: "GET", - URL: uri, - APIVersion: defaultStorageAPIVersion, - ExpectedStatus: http.StatusOK, - } - _, headers, err := context.performRequest(params) - if err != nil { - msg := fmt.Sprintf("failed to find container %s: ", container) - return nil, extendError(err, msg) - } - - props := &Properties{ - LastModified: headers.Get(http.CanonicalHeaderKey("Last-Modified")), - ETag: headers.Get(http.CanonicalHeaderKey("ETag")), - LeaseStatus: headers.Get(http.CanonicalHeaderKey("X-Ms-Lease-Status")), - LeaseState: headers.Get(http.CanonicalHeaderKey("X-Ms-Lease-State")), - LeaseDuration: headers.Get(http.CanonicalHeaderKey("X-Ms-Lease-Duration")), - } - - return props, nil -} - -type PutBlobRequest struct { - Container string // Container name in the storage account - BlobType string // Pass "page" or "block" - Filename string // Filename for the new blob - Size int // Size for the new blob. Only required for page blobs. -} - -// Send a request to create a space to upload a blob. Note that this does not -// do the uploading, it just makes an empty file. -func (context *StorageContext) PutBlob(req *PutBlobRequest) error { - var blobType string - switch req.BlobType { - case "page": - blobType = "PageBlob" - if req.Size == 0 { - return fmt.Errorf("Must supply a size for a page blob") - } - if req.Size%512 != 0 { - return fmt.Errorf("Size must be a multiple of 512 bytes") - } - case "block": - blobType = "BlockBlob" - default: - panic("blockType must be 'page' or 'block'") - } - - extraHeaders := http.Header{} - extraHeaders.Add("x-ms-blob-type", blobType) - if req.BlobType == "page" { - size := fmt.Sprintf("%d", req.Size) - extraHeaders.Add("x-ms-blob-content-length", size) - } - - _, _, err := context.performRequest(requestParams{ - Method: "PUT", - URL: context.GetFileURL(req.Container, req.Filename), - APIVersion: defaultStorageAPIVersion, - ExtraHeaders: extraHeaders, - ExpectedStatus: http.StatusCreated, - }) - if err != nil { - msg := fmt.Sprintf("failed to create blob %s: ", req.Filename) - return extendError(err, msg) - } - return nil -} - -type PutPageRequest struct { - Container string // Container name in the storage account - Filename string // The blob's file name - StartRange int // Must be modulo 512, or an error is returned. - EndRange int // Must be (modulo 512)-1, or an error is returned. - Data io.Reader // The data to upload to the page. -} - -// Send a request to add a range of data into a page blob. -// See http://msdn.microsoft.com/en-us/library/windowsazure/ee691975.aspx -func (context *StorageContext) PutPage(req *PutPageRequest) error { - validStart := (req.StartRange % 512) == 0 - validEnd := (req.EndRange % 512) == 511 - if !(validStart && validEnd) { - return fmt.Errorf( - "StartRange must be a multiple of 512, EndRange must be one less than a multiple of 512") - } - uri := addURLQueryParams( - context.GetFileURL(req.Container, req.Filename), - "comp", "page") - - extraHeaders := http.Header{} - - rangeData := fmt.Sprintf("bytes=%d-%d", req.StartRange, req.EndRange) - extraHeaders.Add("x-ms-range", rangeData) - extraHeaders.Add("x-ms-page-write", "update") - - _, _, err := context.performRequest(requestParams{ - Method: "PUT", - URL: uri, - Body: req.Data, - APIVersion: defaultStorageAPIVersion, - ExtraHeaders: extraHeaders, - ExpectedStatus: http.StatusCreated, - }) - if err != nil { - msg := fmt.Sprintf("failed to put page for file %s: ", req.Filename) - return extendError(err, msg) - } - return nil -} - -// Send a request to fetch the list of blocks that have been uploaded as part -// of a block blob. -func (context *StorageContext) GetBlockList(container, filename string) (*GetBlockList, error) { - uri := addURLQueryParams( - context.GetFileURL(container, filename), - "comp", "blocklist", - "blocklisttype", "all") - bl := GetBlockList{} - _, _, err := context.performRequest(requestParams{ - Method: "GET", - URL: uri, - APIVersion: defaultStorageAPIVersion, - Result: &bl, - ExpectedStatus: http.StatusOK, - }) - if err != nil { - msg := fmt.Sprintf("request for block list in file %s failed: ", filename) - return nil, extendError(err, msg) - } - return &bl, nil -} - -// Send a request to create a new block. The request payload contains the -// data block to upload. -func (context *StorageContext) PutBlock(container, filename, id string, data io.Reader) error { - base64ID := base64.StdEncoding.EncodeToString([]byte(id)) - uri := addURLQueryParams( - context.GetFileURL(container, filename), - "comp", "block", - "blockid", base64ID) - _, _, err := context.performRequest(requestParams{ - Method: "PUT", - URL: uri, - Body: data, - APIVersion: defaultStorageAPIVersion, - ExpectedStatus: http.StatusCreated, - }) - if err != nil { - msg := fmt.Sprintf("failed to put block %s for file %s: ", id, filename) - return extendError(err, msg) - } - return nil -} - -// Send a request to piece together blocks into a list that specifies a blob. -func (context *StorageContext) PutBlockList(container, filename string, blocklist *BlockList) error { - uri := addURLQueryParams( - context.GetFileURL(container, filename), - "comp", "blocklist") - data, err := blocklist.Serialize() - if err != nil { - return err - } - dataReader := bytes.NewReader(data) - - _, _, err = context.performRequest(requestParams{ - Method: "PUT", - URL: uri, - Body: dataReader, - APIVersion: defaultStorageAPIVersion, - ExpectedStatus: http.StatusCreated, - }) - if err != nil { - msg := fmt.Sprintf("failed to put blocklist for file %s: ", filename) - return extendError(err, msg) - } - return nil -} - -// Delete the specified blob from the given container. Deleting a non-existant -// blob will return without an error. -func (context *StorageContext) DeleteBlob(container, filename string) error { - _, _, err := context.performRequest(requestParams{ - Method: "DELETE", - URL: context.GetFileURL(container, filename), - APIVersion: defaultStorageAPIVersion, - ExpectedStatus: http.StatusAccepted, - }) - if err != nil { - // If the error is an Azure 404 error, return silently: the blob does - // not exist. - if IsNotFoundError(err) { - return nil - } - msg := fmt.Sprintf("failed to delete blob %s: ", filename) - return extendError(err, msg) - } - return nil -} - -// Get the specified blob from the given container. -func (context *StorageContext) GetBlob(container, filename string) (io.ReadCloser, error) { - body, _, err := context.performRequest(requestParams{ - Method: "GET", - URL: context.GetFileURL(container, filename), - APIVersion: defaultStorageAPIVersion, - ExpectedStatus: http.StatusOK, - }) - if err != nil { - msg := fmt.Sprintf("failed to get blob %q: ", filename) - return nil, extendError(err, msg) - } - return ioutil.NopCloser(bytes.NewBuffer(body)), nil -} - -type SetContainerACLRequest struct { - Container string // Container name in the storage account - Access string // "container", "blob", or "private" -} - -// SetContainerACL sets the specified container's access rights. -// See http://msdn.microsoft.com/en-us/library/windowsazure/dd179391.aspx -func (context *StorageContext) SetContainerACL(req *SetContainerACLRequest) error { - uri := addURLQueryParams( - context.getContainerURL(req.Container), - "restype", "container", - "comp", "acl") - - extraHeaders := http.Header{} - switch req.Access { - case "container", "blob": - extraHeaders.Add("x-ms-blob-public-access", req.Access) - case "private": - // Don't add a header, Azure resets to private if it's omitted. - default: - panic("Access must be one of 'container', 'blob' or 'private'") - } - - _, _, err := context.performRequest(requestParams{ - Method: "PUT", - URL: uri, - APIVersion: defaultStorageAPIVersion, - ExtraHeaders: extraHeaders, - ExpectedStatus: http.StatusOK, - }) - - if err != nil { - msg := fmt.Sprintf("failed to set ACL for container %s: ", req.Container) - return extendError(err, msg) - } - return nil -} === removed file 'src/launchpad.net/gwacl/storage_base_test.go' --- src/launchpad.net/gwacl/storage_base_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/storage_base_test.go 1970-01-01 00:00:00 +0000 @@ -1,1467 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "bytes" - "encoding/base64" - "errors" - "fmt" - "io/ioutil" - . "launchpad.net/gocheck" - "launchpad.net/gwacl/dedent" - "net/http" - "net/url" - "strings" - "time" -) - -type testComposeHeaders struct{} - -var _ = Suite(&testComposeHeaders{}) - -func makeHttpResponse(status int, body string) *http.Response { - return &http.Response{ - Status: fmt.Sprintf("%d", status), - StatusCode: status, - Body: makeResponseBody(body), - } -} - -func (suite *testComposeHeaders) TestNoHeaders(c *C) { - req, err := http.NewRequest("GET", "http://example.com", nil) - c.Assert(err, IsNil) - - observed := composeHeaders(req) - expected := "\n\n\n\n\n\n\n\n\n\n\n" - - c.Assert(observed, Equals, expected) -} - -func (suite *testComposeHeaders) TestCreatesHeaders(c *C) { - req, err := http.NewRequest("GET", "http://example.com", nil) - c.Assert(err, IsNil) - - var items []string - for i, headerName := range headersToSign { - v := fmt.Sprintf("%d", i) - req.Header.Set(headerName, v) - items = append(items, v+"\n") - } - expected := strings.Join(items, "") - - observed := composeHeaders(req) - c.Assert(observed, Equals, expected) -} - -func (suite *testComposeHeaders) TestCanonicalizedHeaders(c *C) { - req, err := http.NewRequest("GET", "http://example.com", nil) - c.Assert(err, IsNil) - req.Header.Set("x-ms-why", "aye") - req.Header.Set("x-ms-foo", "bar") - req.Header.Set("invalid", "blah") - - expected := "x-ms-foo:bar\nx-ms-why:aye\n" - observed := composeCanonicalizedHeaders(req) - c.Check(observed, Equals, expected) -} - -type TestRetryRequests struct{} - -var _ = Suite(&TestRetryRequests{}) - -func (suite *TestRetryRequests) TestRequestIsRetried(c *C) { - transport := &MockingTransport{} - body := []byte("data") - transport.AddExchange(&http.Response{StatusCode: http.StatusConflict, Body: Empty}, nil) - transport.AddExchange(&http.Response{StatusCode: http.StatusConflict, Body: Empty}, nil) - transport.AddExchange(&http.Response{StatusCode: http.StatusOK, Body: makeResponseBody(string(body))}, nil) - retryPolicy := RetryPolicy{NbRetries: 3, HttpStatusCodes: []int{http.StatusConflict}, Delay: time.Nanosecond} - context := makeStorageContext(transport) - context.RetryPolicy = retryPolicy - req, err := http.NewRequest("GET", "http://example.com", nil) - c.Assert(err, IsNil) - - resBody, _, err := context.send(req, nil, http.StatusOK) - c.Assert(err, IsNil) - c.Assert(transport.ExchangeCount, Equals, 3) - c.Check(resBody, DeepEquals, body) -} - -type TestComposeCanonicalizedResource struct{} - -var _ = Suite(&TestComposeCanonicalizedResource{}) - -func (suite *TestComposeCanonicalizedResource) TestPrependsSlash(c *C) { - req, err := http.NewRequest("GET", "http://example.com", nil) - c.Assert(err, IsNil) - path := MakeRandomString(10) - req.URL.Path = path - accountName := MakeRandomString(10) - observed := composeCanonicalizedResource(req, accountName) - expected := "/" + accountName + "/" + path - c.Assert(observed, Equals, expected) -} - -func (suite *TestComposeCanonicalizedResource) TestCreatesResource(c *C) { - path := MakeRandomString(5) - req, err := http.NewRequest("GET", fmt.Sprintf("http://example.com/%s", path), nil) - c.Assert(err, IsNil) - - accountName := MakeRandomString(10) - observed := composeCanonicalizedResource(req, accountName) - expected := "/" + accountName + "/" + path - c.Assert(observed, Equals, expected) -} - -func (suite *TestComposeCanonicalizedResource) TestQueryParams(c *C) { - req, err := http.NewRequest( - "GET", "http://example.com/?Kevin=Perry&foo=bar", nil) - c.Assert(err, IsNil) - - accountName := MakeRandomString(10) - observed := composeCanonicalizedResource(req, accountName) - expected := "/" + accountName + "/\n" + "foo:bar" + "\n" + "kevin:Perry" - c.Assert(observed, Equals, expected) -} - -func (suite *TestComposeCanonicalizedResource) TestToLowerKeys(c *C) { - values := url.Values{ - "foo": []string{"bar", "baz"}, - "alpha": []string{"gamma", "delta"}, - "quux": []string{"flobble"}, - "BIG": []string{"Big", "Data"}, - "big": []string{"Big", "Little"}, - } - - observed := toLowerKeys(values) - expected := map[string][]string{ - "foo": {"bar", "baz"}, - "alpha": {"delta", "gamma"}, - "quux": {"flobble"}, - "big": {"Big", "Big", "Data", "Little"}, - } - - c.Check(observed, DeepEquals, expected) -} - -func (suite *TestComposeCanonicalizedResource) TestEncodeParams(c *C) { - input := map[string][]string{ - "foo": {"bar", "baz"}, - "alpha": {"delta", "gamma"}, - "quux": {"flobble"}, - "big": {"Big", "Big", "Data", "Little"}, - } - - observed := encodeParams(input) - expected := ("alpha:delta,gamma\nbig:Big,Big,Data,Little\n" + - "foo:bar,baz\nquux:flobble") - c.Assert(observed, Equals, expected) -} - -func (suite *TestComposeCanonicalizedResource) TestEncodeParamsEmpty(c *C) { - input := map[string][]string{} - observed := encodeParams(input) - expected := "" - c.Assert(observed, Equals, expected) -} - -type TestComposeStringToSign struct{} - -var _ = Suite(&TestComposeStringToSign{}) - -func (suite *TestComposeStringToSign) TestFullRequest(c *C) { - req, err := http.NewRequest( - "GET", "http://example.com/mypath?Kevin=Perry&foo=bar", nil) - c.Assert(err, IsNil) - for i, headerName := range headersToSign { - req.Header.Set(headerName, fmt.Sprintf("%v", i)) - } - req.Header.Set("x-ms-testing", "foo") - expected := "GET\n0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n10\nx-ms-testing:foo\n/myaccount/mypath\nfoo:bar\nkevin:Perry" - observed := composeStringToSign(req, "myaccount") - c.Assert(observed, Equals, expected) -} - -type TestSign struct{} - -var _ = Suite(&TestSign{}) - -func (suite *TestSign) TestSign(c *C) { - key := base64.StdEncoding.EncodeToString([]byte("dummykey")) - signable := "a-string-to-sign" - - observed, err := sign(key, signable) - c.Assert(err, IsNil) - expected := "5j1DSsm07IEh3u9JQQd0KPwtM6pEGChzrAF7Zf/LxLc=" - c.Assert(observed, Equals, expected) -} - -type TestComposeAuthHeader struct{} - -var _ = Suite(&TestComposeAuthHeader{}) - -func (suite *TestComposeAuthHeader) TestCreatesHeaderString(c *C) { - req, err := http.NewRequest( - "GET", "http://example.com/mypath?Kevin=Perry&foo=bar", nil) - c.Assert(err, IsNil) - - key := base64.StdEncoding.EncodeToString([]byte("dummykey")) - - observed, err := composeAuthHeader(req, "myname", key) - c.Assert(err, IsNil) - expected := "SharedKey myname:Xf9hWQ99mM0IyEOL6rNeAUdTQlixVqiYnt2TpLCCpY0=" - c.Assert(observed, Equals, expected) -} - -type TestSignRequest struct{} - -var _ = Suite(&TestSignRequest{}) - -func (suite *TestSignRequest) TestAddsHeaderToRequest(c *C) { - req, err := http.NewRequest( - "GET", "http://example.com/mypath?Kevin=Perry&foo=bar", nil) - c.Assert(err, IsNil) - c.Assert(req.Header.Get("Authorization"), Equals, "") - - key := base64.StdEncoding.EncodeToString([]byte("dummykey")) - context := StorageContext{client: nil, Account: "myname", Key: key} - context.signRequest(req) - - expected := "SharedKey myname:Xf9hWQ99mM0IyEOL6rNeAUdTQlixVqiYnt2TpLCCpY0=" - c.Assert(req.Header.Get("Authorization"), Equals, expected) -} - -func (suite *TestSignRequest) TestDoesNotAddHeaderIfEmptyKey(c *C) { - req, err := http.NewRequest( - "GET", "http://example.com/mypath?Kevin=Perry&foo=bar", nil) - c.Assert(err, IsNil) - c.Assert(req.Header.Get("Authorization"), Equals, "") - - context := StorageContext{client: nil, Account: "myname", Key: ""} - context.signRequest(req) - - c.Assert(req.Header.Get("Authorization"), Equals, "") -} - -type TestRequestHeaders struct{} - -var _ = Suite(&TestRequestHeaders{}) - -func (suite *TestRequestHeaders) TestAddsVersionHeaderToRequest(c *C) { - req, err := http.NewRequest("GET", "http://example.com/", nil) - c.Assert(err, IsNil) - addVersionHeader(req, "2012-02-12") - c.Assert(req.Header.Get("x-ms-version"), Equals, "2012-02-12") -} - -func (suite *TestRequestHeaders) TestContentHeader(c *C) { - data := "test data" - req, err := http.NewRequest("PUT", "http://example.com/", strings.NewReader(data)) - c.Assert(err, IsNil) - addContentHeaders(req) - c.Assert( - req.Header.Get("Content-Length"), Equals, fmt.Sprintf("%d", len(data))) - - // Ensure that reading the request data didn't destroy it. - reqdata, _ := ioutil.ReadAll(req.Body) - c.Assert(data, Equals, string(reqdata)) -} - -func (suite *TestRequestHeaders) TestLengthHeaderNotSetForGET(c *C) { - req, err := http.NewRequest("GET", "http://example.com/", nil) - c.Assert(err, IsNil) - addContentHeaders(req) - _, lengthPresent := req.Header[http.CanonicalHeaderKey("Content-Length")] - c.Assert(lengthPresent, Equals, false) -} - -func (suite *TestRequestHeaders) TestContentHeaderWithNoBody(c *C) { - req, err := http.NewRequest("PUT", "http://example.com/", nil) - c.Assert(err, IsNil) - addContentHeaders(req) - _, md5Present := req.Header[http.CanonicalHeaderKey("Content-MD5")] - c.Check(md5Present, Equals, false) - content := req.Header.Get("Content-Length") - c.Check(content, Equals, "0") -} - -func (suite *TestRequestHeaders) TestDateHeader(c *C) { - req, err := http.NewRequest("GET", "http://example.com/", nil) - c.Assert(err, IsNil) - c.Assert(req.Header.Get("Date"), Equals, "") - addDateHeader(req) - observed := req.Header.Get("Date") - observedTime, err := time.Parse(time.RFC1123, observed) - c.Assert(err, IsNil) - difference := time.Now().UTC().Sub(observedTime) - if difference.Minutes() > 1.0 { - c.FailNow() - } -} - -type TestStorageContext struct{} - -var _ = Suite(&TestStorageContext{}) - -// makeNastyURLUnfriendlyString returns a string that really needs escaping -// before it can be included in a URL. -func makeNastyURLUnfriendlyString() string { - return MakeRandomString(3) + "?&" + MakeRandomString(3) + "$%" -} - -func (suite *TestStorageContext) TestGetAccountURLCombinesAccountAndEndpoint(c *C) { - context := StorageContext{ - Account: "myaccount", - AzureEndpoint: "http://example.com", - } - c.Check( - context.getAccountURL(), - Equals, - "http://myaccount.blob.example.com") -} - -func (suite *TestStorageContext) TestGetAccountURLEscapesHostname(c *C) { - account := makeNastyURLUnfriendlyString() - context := StorageContext{ - Account: account, - AzureEndpoint: "http://example.com", - } - c.Check( - context.getAccountURL(), - Equals, - "http://"+url.QueryEscape(account)+".blob.example.com") -} - -func (*TestStorageContext) TestGetAccountURLRequiresEndpoint(c *C) { - context := StorageContext{Account: "myaccount"} - c.Check( - context.getAccountURL, - Panics, - errors.New("no AzureEndpoint specified in gwacl.StorageContext")) -} - -func (suite *TestStorageContext) TestGetContainerURLAddsContainer(c *C) { - account := makeNastyURLUnfriendlyString() - container := makeNastyURLUnfriendlyString() - context := StorageContext{ - Account: account, - AzureEndpoint: "http://example.com/", - } - c.Check( - context.getContainerURL(container), - Equals, - "http://"+url.QueryEscape(account)+".blob.example.com/"+url.QueryEscape(container)) -} - -func (suite *TestStorageContext) TestGetContainerURLAddsSlashIfNeeded(c *C) { - context := StorageContext{ - Account: "account", - AzureEndpoint: "http://example.com", - } - c.Check( - context.getContainerURL("container"), - Equals, - "http://account.blob.example.com/container") -} - -func (suite *TestStorageContext) TestGetFileURL(c *C) { - account := makeNastyURLUnfriendlyString() - container := makeNastyURLUnfriendlyString() - file := makeNastyURLUnfriendlyString() - context := StorageContext{ - Account: account, - AzureEndpoint: "http://example.com/", - } - c.Check( - context.GetFileURL(container, file), - Equals, - "http://"+url.QueryEscape(account)+".blob.example.com/"+url.QueryEscape(container)+"/"+url.QueryEscape(file)) -} - -func (suite *TestStorageContext) TestGetSignedFileURL(c *C) { - account := "account" - container := "container" - file := "/a/file" - key := base64.StdEncoding.EncodeToString([]byte("dummykey")) - context := StorageContext{ - Account: account, - Key: key, - AzureEndpoint: "http://example.com/", - } - expires := time.Now() - - signedURL, err := context.GetAnonymousFileURL(container, file, expires) - c.Assert(err, IsNil) - // The only difference with the non-anon URL is the query string. - parsed, err := url.Parse(signedURL) - c.Assert(err, IsNil) - fileURL, err := url.Parse(context.GetFileURL(container, file)) - c.Assert(err, IsNil) - c.Check(parsed.Scheme, Equals, fileURL.Scheme) - c.Check(parsed.Host, Equals, fileURL.Host) - c.Check(parsed.Path, Equals, fileURL.Path) - - values, err := url.ParseQuery(parsed.RawQuery) - c.Assert(err, IsNil) - signature := values.Get("sig") - readValues, err := getReadBlobAccessValues(container, file, account, key, expires) - c.Assert(err, IsNil) - expectedSignature := readValues.Get("sig") - c.Check(signature, Equals, expectedSignature) -} - -func (suite *TestStorageContext) TestGetClientReturnsDefaultClient(c *C) { - context := &StorageContext{client: nil} - c.Assert(context.getClient(), Equals, http.DefaultClient) -} - -func (suite *TestStorageContext) TestGetClientReturnsSpecifiedClient(c *C) { - context := &StorageContext{client: &http.Client{}} - c.Assert(context.getClient(), Not(Equals), http.DefaultClient) - c.Assert(context.getClient(), Equals, context.client) -} - -type TestListContainers struct{} - -var _ = Suite(&TestListContainers{}) - -// The ListContainers Storage API call returns a ContainerEnumerationResults -// struct on success. -func (suite *TestListContainers) Test(c *C) { - responseBody := ` - - - prefix-value - marker-value - max-results-value - - - name-value - url-value - - date/time-value - etag-value - lease-status-value - lease-state-value - lease-duration-value - - - metadata-value - - - - - ` - response := makeHttpResponse(http.StatusOK, responseBody) - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - context.AzureEndpoint = "http://example.com/" - request := &ListContainersRequest{Marker: ""} - results, err := context.ListContainers(request) - c.Assert(err, IsNil) - c.Check(transport.Request.URL.String(), Equals, fmt.Sprintf( - "http://%s.blob.example.com/?comp=list", context.Account)) - c.Check(transport.Request.Header.Get("Authorization"), Not(Equals), "") - c.Assert(results, NotNil) - c.Assert(results.Containers[0].Name, Equals, "name-value") -} - -// Client-side errors from the HTTP client are propagated back to the caller. -func (suite *TestListContainers) TestError(c *C) { - error := fmt.Errorf("canned-error") - context := makeStorageContext(&TestTransport{Error: error}) - request := &ListContainersRequest{Marker: ""} - _, err := context.ListContainers(request) - c.Assert(err, NotNil) -} - -// Azure HTTP errors (for instance 404 responses) are propagated back to -// the caller as ServerError objects. -func (suite *TestListContainers) TestServerError(c *C) { - response := makeHttpResponse(http.StatusNotFound, "not found") - context := makeStorageContext(&TestTransport{Response: response}) - request := &ListContainersRequest{Marker: ""} - _, err := context.ListContainers(request) - serverError, ok := err.(*ServerError) - c.Check(ok, Equals, true) - c.Check(serverError.HTTPStatus.StatusCode(), Equals, http.StatusNotFound) -} - -func (suite *TestListContainers) TestListContainersBatchPassesMarker(c *C) { - transport := &MockingTransport{} - transport.AddExchange(&http.Response{StatusCode: http.StatusOK, Body: Empty}, nil) - context := makeStorageContext(transport) - - // Call ListContainers. This will fail because of the empty - // response, but no matter. We only care about the request. - request := &ListContainersRequest{Marker: "thismarkerhere"} - _, err := context.ListContainers(request) - c.Assert(err, ErrorMatches, ".*Failed to deserialize data.*") - c.Assert(transport.ExchangeCount, Equals, 1) - - query := transport.Exchanges[0].Request.URL.RawQuery - values, err := url.ParseQuery(query) - c.Assert(err, IsNil) - c.Check(values["marker"], DeepEquals, []string{"thismarkerhere"}) -} - -func (suite *TestListContainers) TestListContainersBatchDoesNotPassEmptyMarker(c *C) { - transport := &MockingTransport{} - transport.AddExchange(&http.Response{StatusCode: http.StatusOK, Body: Empty}, nil) - context := makeStorageContext(transport) - - // The error is OK. We only care about the request. - request := &ListContainersRequest{Marker: ""} - _, err := context.ListContainers(request) - c.Assert(err, ErrorMatches, ".*Failed to deserialize data.*") - c.Assert(transport.ExchangeCount, Equals, 1) - - query := transport.Exchanges[0].Request.URL.RawQuery - values, err := url.ParseQuery(query) - c.Assert(err, IsNil) - marker, present := values["marker"] - c.Check(present, Equals, false) - c.Check(marker, DeepEquals, []string(nil)) -} - -func (suite *TestListContainers) TestListContainersBatchEscapesMarker(c *C) { - transport := &MockingTransport{} - transport.AddExchange(&http.Response{StatusCode: http.StatusOK, Body: Empty}, nil) - context := makeStorageContext(transport) - - // The error is OK. We only care about the request. - request := &ListContainersRequest{Marker: "x&y"} - _, err := context.ListContainers(request) - c.Assert(err, ErrorMatches, ".*Failed to deserialize data.*") - c.Assert(transport.ExchangeCount, Equals, 1) - - query := transport.Exchanges[0].Request.URL.RawQuery - values, err := url.ParseQuery(query) - c.Assert(err, IsNil) - c.Check(values["marker"], DeepEquals, []string{"x&y"}) -} - -type TestListBlobs struct{} - -var _ = Suite(&TestListBlobs{}) - -// The ListBlobs Storage API call returns a BlobEnumerationResults struct on -// success. -func (suite *TestListBlobs) Test(c *C) { - responseBody := ` - - - prefix - marker - maxresults - delimiter - - - blob-name - snapshot-date-time - blob-address - - last-modified - etag - size-in-bytes - blob-content-type - - - - - sequence-number - blobtype - leasestatus - leasestate - leasesduration - id - copystatus - copysource - copyprogress - copycompletiontime - copydesc - - - metadataname1 - metadataname2 - - - - blob-prefix - - - - ` - response := makeHttpResponse(http.StatusOK, responseBody) - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - - request := &ListBlobsRequest{Container: "container"} - results, err := context.ListBlobs(request) - c.Assert(err, IsNil) - c.Check(transport.Request.URL.String(), Matches, context.getContainerURL("container")+"?.*") - c.Check(transport.Request.URL.Query(), DeepEquals, url.Values{ - "restype": {"container"}, - "comp": {"list"}, - }) - c.Check(transport.Request.Header.Get("Authorization"), Not(Equals), "") - c.Assert(results, NotNil) -} - -// Client-side errors from the HTTP client are propagated back to the caller. -func (suite *TestListBlobs) TestError(c *C) { - error := fmt.Errorf("canned-error") - context := makeStorageContext(&TestTransport{Error: error}) - - request := &ListBlobsRequest{Container: "container"} - _, err := context.ListBlobs(request) - c.Assert(err, NotNil) -} - -// Azure HTTP errors (for instance 404 responses) are propagated back to -// the caller as ServerError objects. -func (suite *TestListBlobs) TestServerError(c *C) { - response := makeHttpResponse(http.StatusNotFound, "not found") - context := makeStorageContext(&TestTransport{Response: response}) - request := &ListBlobsRequest{Container: "container"} - _, err := context.ListBlobs(request) - serverError, ok := err.(*ServerError) - c.Check(ok, Equals, true) - c.Check(serverError.HTTPStatus.StatusCode(), Equals, http.StatusNotFound) -} - -func (suite *TestListBlobs) TestListBlobsPassesMarker(c *C) { - transport := &MockingTransport{} - transport.AddExchange(&http.Response{StatusCode: http.StatusOK, Body: Empty}, nil) - context := makeStorageContext(transport) - - // Call ListBlobs. This will fail because of the empty - // response, but no matter. We only care about the request. - request := &ListBlobsRequest{Container: "mycontainer", Marker: "thismarkerhere"} - _, err := context.ListBlobs(request) - c.Assert(err, ErrorMatches, ".*Failed to deserialize data.*") - c.Assert(transport.ExchangeCount, Equals, 1) - - query := transport.Exchanges[0].Request.URL.RawQuery - values, err := url.ParseQuery(query) - c.Assert(err, IsNil) - c.Check(values["marker"], DeepEquals, []string{"thismarkerhere"}) -} - -func (suite *TestListBlobs) TestListBlobsDoesNotPassEmptyMarker(c *C) { - transport := &MockingTransport{} - transport.AddExchange(&http.Response{StatusCode: http.StatusOK, Body: Empty}, nil) - context := makeStorageContext(transport) - - // The error is OK. We only care about the request. - request := &ListBlobsRequest{Container: "mycontainer"} - _, err := context.ListBlobs(request) - c.Assert(err, ErrorMatches, ".*Failed to deserialize data.*") - c.Assert(transport.ExchangeCount, Equals, 1) - - query := transport.Exchanges[0].Request.URL.RawQuery - values, err := url.ParseQuery(query) - c.Assert(err, IsNil) - marker, present := values["marker"] - c.Check(present, Equals, false) - c.Check(marker, DeepEquals, []string(nil)) -} - -func (suite *TestListBlobs) TestListBlobsPassesPrefix(c *C) { - transport := &MockingTransport{} - transport.AddExchange(&http.Response{StatusCode: http.StatusOK, Body: Empty}, nil) - context := makeStorageContext(transport) - - // Call ListBlobs. This will fail because of the empty - // response, but no matter. We only care about the request. - request := &ListBlobsRequest{Container: "mycontainer", Prefix: "thisprefixhere"} - _, err := context.ListBlobs(request) - c.Assert(err, ErrorMatches, ".*Failed to deserialize data.*") - c.Assert(transport.ExchangeCount, Equals, 1) - - query := transport.Exchanges[0].Request.URL.RawQuery - values, err := url.ParseQuery(query) - c.Assert(err, IsNil) - c.Check(values["prefix"], DeepEquals, []string{"thisprefixhere"}) -} - -func (suite *TestListBlobs) TestListBlobsDoesNotPassEmptyPrefix(c *C) { - transport := &MockingTransport{} - transport.AddExchange(&http.Response{StatusCode: http.StatusOK, Body: Empty}, nil) - context := makeStorageContext(transport) - - // The error is OK. We only care about the request. - request := &ListBlobsRequest{Container: "mycontainer"} - _, err := context.ListBlobs(request) - c.Assert(err, ErrorMatches, ".*Failed to deserialize data.*") - c.Assert(transport.ExchangeCount, Equals, 1) - - query := transport.Exchanges[0].Request.URL.RawQuery - values, err := url.ParseQuery(query) - c.Assert(err, IsNil) - prefix, present := values["prefix"] - c.Check(present, Equals, false) - c.Check(prefix, DeepEquals, []string(nil)) -} - -type TestCreateContainer struct{} - -var _ = Suite(&TestCreateContainer{}) - -// The CreateContainer Storage API call returns without error when the -// container has been created successfully. -func (suite *TestCreateContainer) Test(c *C) { - response := makeHttpResponse(http.StatusCreated, "") - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - context.AzureEndpoint = "http://example.com/" - containerName := MakeRandomString(10) - err := context.CreateContainer(containerName) - c.Assert(err, IsNil) - c.Check(transport.Request.URL.String(), Equals, fmt.Sprintf( - "http://%s.blob.example.com/%s?restype=container", - context.Account, containerName)) - c.Check(transport.Request.Header.Get("Authorization"), Not(Equals), "") -} - -// Client-side errors from the HTTP client are propagated back to the caller. -func (suite *TestCreateContainer) TestError(c *C) { - error := fmt.Errorf("canned-error") - context := makeStorageContext(&TestTransport{Error: error}) - err := context.CreateContainer("container") - c.Assert(err, NotNil) -} - -// Server-side errors are propagated back to the caller. -func (suite *TestCreateContainer) TestErrorResponse(c *C) { - response := makeHttpResponse(http.StatusNotFound, "not found") - context := makeStorageContext(&TestTransport{Response: response}) - err := context.CreateContainer("container") - c.Assert(err, NotNil) -} - -// Server-side errors are propagated back to the caller. -func (suite *TestCreateContainer) TestNotCreatedResponse(c *C) { - response := makeHttpResponse(http.StatusOK, "") - context := makeStorageContext(&TestTransport{Response: response}) - err := context.CreateContainer("container") - c.Assert(err, NotNil) -} - -// Azure HTTP errors (for instance 404 responses) are propagated back to -// the caller as ServerError objects. -func (suite *TestCreateContainer) TestServerError(c *C) { - response := makeHttpResponse(http.StatusNotFound, "not found") - context := makeStorageContext(&TestTransport{Response: response}) - err := context.CreateContainer("container") - serverError, ok := err.(*ServerError) - c.Check(ok, Equals, true) - c.Check(serverError.HTTPStatus.StatusCode(), Equals, http.StatusNotFound) -} - -type TestDeleteContainer struct{} - -var _ = Suite(&TestDeleteContainer{}) - -// The DeleteContainer Storage API call returns without error when the -// container has been created successfully. -func (suite *TestDeleteContainer) Test(c *C) { - response := makeHttpResponse(http.StatusAccepted, "") - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - context.AzureEndpoint = "http://example.com/" - containerName := MakeRandomString(10) - err := context.DeleteContainer(containerName) - c.Assert(err, IsNil) - c.Check(transport.Request.URL.String(), Equals, fmt.Sprintf( - "http://%s.blob.example.com/%s?restype=container", - context.Account, containerName)) - c.Check(transport.Request.Method, Equals, "DELETE") - c.Check(transport.Request.Header.Get("Authorization"), Not(Equals), "") -} - -// Client-side errors from the HTTP client are propagated back to the caller. -func (suite *TestDeleteContainer) TestError(c *C) { - error := fmt.Errorf("canned-error") - context := makeStorageContext(&TestTransport{Error: error}) - err := context.DeleteContainer("container") - c.Assert(err, ErrorMatches, ".*canned-error.*") -} - -// Server-side errors are propagated back to the caller. -func (suite *TestDeleteContainer) TestNotCreatedResponse(c *C) { - response := makeHttpResponse(http.StatusOK, "") - context := makeStorageContext(&TestTransport{Response: response}) - err := context.DeleteContainer("container") - c.Assert(err, ErrorMatches, ".*Azure request failed.*") -} - -// Azure HTTP errors (for instance 404 responses) are propagated back to -// the caller as ServerError objects. -func (suite *TestDeleteContainer) TestServerError(c *C) { - response := makeHttpResponse(http.StatusMethodNotAllowed, "not allowed") - context := makeStorageContext(&TestTransport{Response: response}) - err := context.DeleteContainer("container") - serverError, ok := err.(*ServerError) - c.Check(ok, Equals, true) - c.Check(serverError.HTTPStatus.StatusCode(), Equals, http.StatusMethodNotAllowed) -} - -func (suite *TestDeleteContainer) TestDeleteNotExistentContainerDoesNotFail(c *C) { - response := makeHttpResponse(http.StatusNotFound, "not found") - context := makeStorageContext(&TestTransport{Response: response}) - err := context.DeleteContainer("container") - c.Assert(err, IsNil) -} - -type TestGetContainerProperties struct{} - -var _ = Suite(&TestGetContainerProperties{}) - -// The GetContainerProperties Storage API call returns without error when the -// container has been created successfully. -func (suite *TestGetContainerProperties) Test(c *C) { - header := make(http.Header) - header.Add("Last-Modified", "last-modified") - header.Add("ETag", "etag") - header.Add("X-Ms-Lease-Status", "status") - header.Add("X-Ms-Lease-State", "state") - header.Add("X-Ms-Lease-Duration", "duration") - response := &http.Response{ - Status: fmt.Sprintf("%d", http.StatusOK), - StatusCode: http.StatusOK, - Body: makeResponseBody(""), - Header: header, - } - - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - context.AzureEndpoint = "http://example.com/" - containerName := MakeRandomString(10) - props, err := context.GetContainerProperties(containerName) - c.Assert(err, IsNil) - c.Check(transport.Request.URL.String(), Equals, fmt.Sprintf( - "http://%s.blob.example.com/%s?restype=container", - context.Account, containerName)) - c.Check(transport.Request.Method, Equals, "GET") - c.Check(transport.Request.Header.Get("Authorization"), Not(Equals), "") - - c.Check(props.LastModified, Equals, "last-modified") - c.Check(props.ETag, Equals, "etag") - c.Check(props.LeaseStatus, Equals, "status") - c.Check(props.LeaseState, Equals, "state") - c.Check(props.LeaseDuration, Equals, "duration") -} - -func (suite *TestGetContainerProperties) TestWithoutAllHeaders(c *C) { - response := &http.Response{ - Status: fmt.Sprintf("%d", http.StatusOK), - StatusCode: http.StatusOK, - Body: makeResponseBody(""), - } - - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - containerName := MakeRandomString(10) - props, err := context.GetContainerProperties(containerName) - c.Assert(err, IsNil) - - c.Check(props.LastModified, Equals, "") - c.Check(props.ETag, Equals, "") - c.Check(props.LeaseStatus, Equals, "") - c.Check(props.LeaseState, Equals, "") - c.Check(props.LeaseDuration, Equals, "") -} - -// Client-side errors from the HTTP client are propagated back to the caller. -func (suite *TestGetContainerProperties) TestError(c *C) { - error := fmt.Errorf("canned-error") - context := makeStorageContext(&TestTransport{Error: error}) - _, err := context.GetContainerProperties("container") - c.Assert(err, ErrorMatches, ".*canned-error.*") -} - -// Server-side errors are propagated back to the caller. -func (suite *TestGetContainerProperties) TestErrorResponse(c *C) { - response := makeHttpResponse(http.StatusNotFound, "not found") - context := makeStorageContext(&TestTransport{Response: response}) - _, err := context.GetContainerProperties("container") - c.Assert(err, ErrorMatches, ".*Not Found.*") -} - -// Azure HTTP errors (for instance 404 responses) are propagated back to -// the caller as ServerError objects. -func (suite *TestGetContainerProperties) TestServerError(c *C) { - response := makeHttpResponse(http.StatusNotFound, "not found") - context := makeStorageContext(&TestTransport{Response: response}) - _, err := context.GetContainerProperties("container") - serverError, ok := err.(*ServerError) - c.Assert(ok, Equals, true) - c.Check(serverError.HTTPStatus.StatusCode(), Equals, http.StatusNotFound) -} - -type TestPutPage struct{} - -var _ = Suite(&TestPutPage{}) - -// Basic happy path testing. -func (suite *TestPutPage) TestHappyPath(c *C) { - response := makeHttpResponse(http.StatusCreated, "") - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - randomData := MakeRandomByteSlice(10) - dataReader := bytes.NewReader(randomData) - - err := context.PutPage(&PutPageRequest{ - Container: "container", Filename: "filename", StartRange: 0, - EndRange: 511, Data: dataReader}) - c.Assert(err, IsNil) - - // Ensure that container was set right. - c.Check(transport.Request.URL.String(), Matches, context.GetFileURL("container", "filename")+"?.*") - // Ensure that the Authorization header is set. - c.Check(transport.Request.Header.Get("Authorization"), Not(Equals), "") - // Check the range is set. - c.Check(transport.Request.Header.Get("x-ms-range"), Equals, "bytes=0-511") - // Check special page write header. - c.Check(transport.Request.Header.Get("x-ms-page-write"), Equals, "update") - // "?comp=page" should be part of the URL. - c.Check(transport.Request.URL.Query(), DeepEquals, url.Values{ - "comp": {"page"}, - }) - // Check the data payload. - data, err := ioutil.ReadAll(transport.Request.Body) - c.Assert(err, IsNil) - c.Check(data, DeepEquals, randomData) -} - -// Client-side errors from the HTTP client are propagated back to the caller. -func (suite *TestPutPage) TestError(c *C) { - cannedError := fmt.Errorf("canned-error") - context := makeStorageContext(&TestTransport{Error: cannedError}) - err := context.PutPage(&PutPageRequest{ - Container: "container", Filename: "filename", StartRange: 0, - EndRange: 511, Data: nil}) - c.Assert(err, NotNil) -} - -// Server-side errors are propagated back to the caller. -func (suite *TestPutPage) TestErrorResponse(c *C) { - responseBody := "Frotzedfailed to put blob" - response := makeHttpResponse(102, responseBody) - context := makeStorageContext(&TestTransport{Response: response}) - err := context.PutPage(&PutPageRequest{ - Container: "container", Filename: "filename", StartRange: 0, - EndRange: 511, Data: nil}) - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, ".*102.*") - c.Check(err, ErrorMatches, ".*Frotzed.*") - c.Check(err, ErrorMatches, ".*failed to put blob.*") -} - -// Azure HTTP errors (for instance 404 responses) are propagated back to -// the caller as ServerError objects. -func (suite *TestPutPage) TestServerError(c *C) { - response := makeHttpResponse(http.StatusNotFound, "not found") - context := makeStorageContext(&TestTransport{Response: response}) - err := context.PutPage(&PutPageRequest{ - Container: "container", Filename: "filename", StartRange: 0, - EndRange: 511, Data: nil}) - serverError, ok := err.(*ServerError) - c.Check(ok, Equals, true) - c.Check(serverError.HTTPStatus.StatusCode(), Equals, http.StatusNotFound) -} - -// Range values outside the limits should get rejected. -func (suite *TestPutPage) TestRangeLimits(c *C) { - context := makeStorageContext(&TestTransport{}) - err := context.PutPage(&PutPageRequest{ - StartRange: 513, EndRange: 555}) - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, ".*StartRange must be a multiple of 512, EndRange must be one less than a multiple of 512.*") -} - -type TestPutBlob struct{} - -var _ = Suite(&TestPutBlob{}) - -// Test basic PutBlob happy path functionality. -func (suite *TestPutBlob) TestPutBlockBlob(c *C) { - response := makeHttpResponse(http.StatusCreated, "") - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - - err := context.PutBlob(&PutBlobRequest{ - Container: "container", BlobType: "block", Filename: "blobname"}) - c.Assert(err, IsNil) - // Ensure that container was set right. - c.Check(transport.Request.URL.String(), Equals, context.GetFileURL("container", "blobname")) - // Ensure that the Authorization header is set. - c.Check(transport.Request.Header.Get("Authorization"), Not(Equals), "") - // The blob type should be a block. - c.Check(transport.Request.Header.Get("x-ms-blob-type"), Equals, "BlockBlob") -} - -// PutBlob should set x-ms-blob-type to PageBlob for Page Blobs. -func (suite *TestPutBlob) TestPutPageBlob(c *C) { - response := makeHttpResponse(http.StatusCreated, "") - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - err := context.PutBlob(&PutBlobRequest{ - Container: "container", BlobType: "page", Filename: "blobname", - Size: 512}) - c.Assert(err, IsNil) - c.Check(transport.Request.Header.Get("x-ms-blob-type"), Equals, "PageBlob") - c.Check(transport.Request.Header.Get("x-ms-blob-content-length"), Equals, "512") -} - -// PutBlob for a page should return an error when Size is not specified. -func (suite *TestPutBlob) TestPutPageBlobWithSizeOmitted(c *C) { - context := makeStorageContext(&TestTransport{}) - err := context.PutBlob(&PutBlobRequest{ - Container: "container", BlobType: "page", Filename: "blob"}) - c.Assert(err, ErrorMatches, "Must supply a size for a page blob") -} - -// PutBlob for a page should return an error when Size is not a multiple -// of 512 bytes. -func (suite *TestPutBlob) TestPutPageBlobWithInvalidSiuze(c *C) { - context := makeStorageContext(&TestTransport{}) - err := context.PutBlob(&PutBlobRequest{ - Container: "container", BlobType: "page", Filename: "blob", - Size: 1015}) - c.Assert(err, ErrorMatches, "Size must be a multiple of 512 bytes") -} - -// Passing a BlobType other than page or block results in a panic. -func (suite *TestPutBlob) TestBlobType(c *C) { - defer func() { - err := recover() - c.Assert(err, Equals, "blockType must be 'page' or 'block'") - }() - context := makeStorageContext(&TestTransport{}) - context.PutBlob(&PutBlobRequest{ - Container: "container", BlobType: "invalid-blob-type", - Filename: "blobname"}) - c.Assert("This should have panicked", Equals, "But it didn't.") -} - -// Client-side errors from the HTTP client are propagated back to the caller. -func (suite *TestPutBlob) TestError(c *C) { - error := fmt.Errorf("canned-error") - context := makeStorageContext(&TestTransport{Error: error}) - err := context.PutBlob(&PutBlobRequest{ - Container: "container", BlobType: "block", Filename: "blobname"}) - c.Assert(err, NotNil) -} - -// Server-side errors are propagated back to the caller. -func (suite *TestPutBlob) TestErrorResponse(c *C) { - responseBody := "Frotzedfailed to put blob" - response := makeHttpResponse(102, responseBody) - context := makeStorageContext(&TestTransport{Response: response}) - err := context.PutBlob(&PutBlobRequest{ - Container: "container", BlobType: "block", Filename: "blobname"}) - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, ".*102.*") - c.Check(err, ErrorMatches, ".*Frotzed.*") - c.Check(err, ErrorMatches, ".*failed to put blob.*") -} - -// Azure HTTP errors (for instance 404 responses) are propagated back to -// the caller as ServerError objects. -func (suite *TestPutBlob) TestServerError(c *C) { - response := makeHttpResponse(http.StatusNotFound, "not found") - context := makeStorageContext(&TestTransport{Response: response}) - err := context.PutBlob(&PutBlobRequest{ - Container: "container", BlobType: "block", Filename: "blobname"}) - serverError, ok := err.(*ServerError) - c.Check(ok, Equals, true) - c.Check(serverError.HTTPStatus.StatusCode(), Equals, http.StatusNotFound) -} - -type TestPutBlock struct{} - -var _ = Suite(&TestPutBlock{}) - -func (suite *TestPutBlock) Test(c *C) { - response := makeHttpResponse(http.StatusCreated, "") - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - blockid := "\x1b\xea\xf7Mv\xb5\xddH\xebm" - randomData := MakeRandomByteSlice(10) - dataReader := bytes.NewReader(randomData) - err := context.PutBlock("container", "blobname", blockid, dataReader) - c.Assert(err, IsNil) - - // The blockid should have been base64 encoded and url escaped. - base64ID := base64.StdEncoding.EncodeToString([]byte(blockid)) - c.Check(transport.Request.URL.String(), Matches, context.GetFileURL("container", "blobname")+"?.*") - c.Check(transport.Request.URL.Query(), DeepEquals, url.Values{ - "comp": {"block"}, - "blockid": {base64ID}, - }) - c.Check(transport.Request.Header.Get("Authorization"), Not(Equals), "") - - data, err := ioutil.ReadAll(transport.Request.Body) - c.Assert(err, IsNil) - c.Check(data, DeepEquals, randomData) -} - -// Client-side errors from the HTTP client are propagated back to the caller. -func (suite *TestPutBlock) TestError(c *C) { - error := fmt.Errorf("canned-error") - context := makeStorageContext(&TestTransport{Error: error}) - dataReader := bytes.NewReader(MakeRandomByteSlice(10)) - err := context.PutBlock("container", "blobname", "blockid", dataReader) - c.Assert(err, NotNil) -} - -// Server-side errors are propagated back to the caller. -func (suite *TestPutBlock) TestErrorResponse(c *C) { - responseBody := "Frotzedfailed to put block" - response := makeHttpResponse(102, responseBody) - context := makeStorageContext(&TestTransport{Response: response}) - dataReader := bytes.NewReader(MakeRandomByteSlice(10)) - err := context.PutBlock("container", "blobname", "blockid", dataReader) - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, ".*102.*") - c.Check(err, ErrorMatches, ".*Frotzed.*") - c.Check(err, ErrorMatches, ".*failed to put block.*") -} - -// Azure HTTP errors (for instance 404 responses) are propagated back to -// the caller as ServerError objects. -func (suite *TestPutBlock) TestServerError(c *C) { - response := makeHttpResponse(http.StatusNotFound, "not found") - context := makeStorageContext(&TestTransport{Response: response}) - dataReader := bytes.NewReader(MakeRandomByteSlice(10)) - err := context.PutBlock("container", "blobname", "blockid", dataReader) - serverError, ok := err.(*ServerError) - c.Check(ok, Equals, true) - c.Check(serverError.HTTPStatus.StatusCode(), Equals, http.StatusNotFound) -} - -type TestPutBlockList struct{} - -var _ = Suite(&TestPutBlockList{}) - -func (suite *TestPutBlockList) Test(c *C) { - response := makeHttpResponse(http.StatusCreated, "") - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - context.AzureEndpoint = "http://example.com/" - blocklist := &BlockList{} - blocklist.Add(BlockListLatest, "b1") - blocklist.Add(BlockListLatest, "b2") - err := context.PutBlockList("container", "blobname", blocklist) - c.Assert(err, IsNil) - - c.Check(transport.Request.Method, Equals, "PUT") - c.Check(transport.Request.URL.String(), Equals, fmt.Sprintf( - "http://%s.blob.example.com/container/blobname?comp=blocklist", - context.Account)) - c.Check(transport.Request.Header.Get("Authorization"), Not(Equals), "") - - data, err := ioutil.ReadAll(transport.Request.Body) - c.Assert(err, IsNil) - expected := dedent.Dedent(` - - YjE= - YjI= - `) - c.Check(strings.TrimSpace(string(data)), Equals, strings.TrimSpace(expected)) -} - -// Client-side errors from the HTTP client are propagated back to the caller. -func (suite *TestPutBlockList) TestError(c *C) { - error := fmt.Errorf("canned-error") - context := makeStorageContext(&TestTransport{Error: error}) - blocklist := &BlockList{} - err := context.PutBlockList("container", "blobname", blocklist) - c.Assert(err, NotNil) -} - -// Server-side errors are propagated back to the caller. -func (suite *TestPutBlockList) TestErrorResponse(c *C) { - responseBody := "Frotzedfailed to put blocklist" - response := makeHttpResponse(102, responseBody) - context := makeStorageContext(&TestTransport{Response: response}) - blocklist := &BlockList{} - err := context.PutBlockList("container", "blobname", blocklist) - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, ".*102.*") - c.Check(err, ErrorMatches, ".*Frotzed.*") - c.Check(err, ErrorMatches, ".*failed to put blocklist.*") -} - -// Azure HTTP errors (for instance 404 responses) are propagated back to -// the caller as ServerError objects. -func (suite *TestPutBlockList) TestServerError(c *C) { - response := makeHttpResponse(http.StatusNotFound, "not found") - context := makeStorageContext(&TestTransport{Response: response}) - blocklist := &BlockList{} - err := context.PutBlockList("container", "blobname", blocklist) - serverError, ok := err.(*ServerError) - c.Check(ok, Equals, true) - c.Check(serverError.HTTPStatus.StatusCode(), Equals, http.StatusNotFound) -} - -type TestGetBlockList struct{} - -var _ = Suite(&TestGetBlockList{}) - -// The GetBlockList Storage API call returns a GetBlockList struct on -// success. -func (suite *TestGetBlockList) Test(c *C) { - responseBody := ` - - - - - BlockId001 - 4194304 - - - - - BlockId002 - 1024 - - - ` - - response := makeHttpResponse(http.StatusOK, responseBody) - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - results, err := context.GetBlockList("container", "myfilename") - c.Assert(err, IsNil) - c.Check(transport.Request.URL.String(), Matches, context.GetFileURL("container", "myfilename")+"?.*") - c.Check(transport.Request.URL.Query(), DeepEquals, url.Values{ - "comp": {"blocklist"}, - "blocklisttype": {"all"}, - }) - c.Check(transport.Request.Header.Get("Authorization"), Not(Equals), "") - c.Assert(results, NotNil) -} - -// Client-side errors from the HTTP client are propagated back to the caller. -func (suite *TestGetBlockList) TestError(c *C) { - error := fmt.Errorf("canned-error") - context := makeStorageContext(&TestTransport{Error: error}) - _, err := context.GetBlockList("container", "myfilename") - c.Assert(err, NotNil) -} - -// Azure HTTP errors (for instance 404 responses) are propagated back to -// the caller as ServerError objects. -func (suite *TestGetBlockList) TestServerError(c *C) { - response := makeHttpResponse(http.StatusNotFound, "not found") - context := makeStorageContext(&TestTransport{Response: response}) - _, err := context.GetBlockList("container", "blobname") - serverError, ok := err.(*ServerError) - c.Check(ok, Equals, true) - c.Check(serverError.HTTPStatus.StatusCode(), Equals, http.StatusNotFound) -} - -type TestDeleteBlob struct{} - -var _ = Suite(&TestDeleteBlob{}) - -func (suite *TestDeleteBlob) Test(c *C) { - response := makeHttpResponse(http.StatusAccepted, "") - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - err := context.DeleteBlob("container", "blobname") - c.Assert(err, IsNil) - - c.Check(transport.Request.Method, Equals, "DELETE") - c.Check(transport.Request.URL.String(), Equals, context.GetFileURL("container", "blobname")) - c.Check(transport.Request.Header.Get("Authorization"), Not(Equals), "") - c.Check(transport.Request.Body, IsNil) -} - -// Client-side errors from the HTTP client are propagated back to the caller. -func (suite *TestDeleteBlob) TestError(c *C) { - error := fmt.Errorf("canned-error") - context := makeStorageContext(&TestTransport{Error: error}) - err := context.DeleteBlob("container", "blobname") - c.Assert(err, NotNil) -} - -// Azure HTTP errors (for instance 404 responses) are propagated back to -// the caller as ServerError objects. -func (suite *TestDeleteBlob) TestServerError(c *C) { - // We're not using http.StatusNotFound for the test here because - // 404 errors are handled in a special way by DeleteBlob(). See the test - // TestDeleteNotExistentBlobDoesNotFail. - response := makeHttpResponse(http.StatusMethodNotAllowed, "not allowed") - context := makeStorageContext(&TestTransport{Response: response}) - err := context.DeleteBlob("container", "blobname") - serverError, ok := err.(*ServerError) - c.Check(ok, Equals, true) - c.Check(serverError.HTTPStatus.StatusCode(), Equals, http.StatusMethodNotAllowed) -} - -func (suite *TestDeleteBlob) TestDeleteNotExistentBlobDoesNotFail(c *C) { - response := makeHttpResponse(http.StatusNotFound, "not found") - context := makeStorageContext(&TestTransport{Response: response}) - err := context.DeleteBlob("container", "blobname") - c.Assert(err, IsNil) -} - -// Server-side errors are propagated back to the caller. -func (suite *TestDeleteBlob) TestErrorResponse(c *C) { - responseBody := "Frotzedfailed to delete blob" - response := makeHttpResponse(146, responseBody) - context := makeStorageContext(&TestTransport{Response: response}) - err := context.DeleteBlob("container", "blobname") - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, ".*146.*") - c.Check(err, ErrorMatches, ".*Frotzed.*") - c.Check(err, ErrorMatches, ".*failed to delete blob.*") -} - -type TestGetBlob struct{} - -var _ = Suite(&TestGetBlob{}) - -func (suite *TestGetBlob) Test(c *C) { - responseBody := "blob-in-a-can" - response := makeHttpResponse(http.StatusOK, responseBody) - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - reader, err := context.GetBlob("container", "blobname") - c.Assert(err, IsNil) - c.Assert(reader, NotNil) - defer reader.Close() - - c.Check(transport.Request.Method, Equals, "GET") - c.Check(transport.Request.URL.String(), Equals, context.GetFileURL("container", "blobname")) - c.Check(transport.Request.Header.Get("Authorization"), Not(Equals), "") - - data, err := ioutil.ReadAll(reader) - c.Assert(err, IsNil) - c.Check(string(data), Equals, responseBody) -} - -// Client-side errors from the HTTP client are propagated back to the caller. -func (suite *TestGetBlob) TestError(c *C) { - error := fmt.Errorf("canned-error") - context := makeStorageContext(&TestTransport{Error: error}) - reader, err := context.GetBlob("container", "blobname") - c.Check(reader, IsNil) - c.Assert(err, NotNil) -} - -// Azure HTTP errors (for instance 404 responses) are propagated back to -// the caller as ServerError objects. -func (suite *TestGetBlob) TestServerError(c *C) { - response := makeHttpResponse(http.StatusNotFound, "not found") - context := makeStorageContext(&TestTransport{Response: response}) - reader, err := context.GetBlob("container", "blobname") - c.Check(reader, IsNil) - c.Assert(err, NotNil) - serverError, ok := err.(*ServerError) - c.Check(ok, Equals, true) - c.Check(serverError.HTTPStatus.StatusCode(), Equals, http.StatusNotFound) - c.Check(IsNotFoundError(err), Equals, true) -} - -// Server-side errors are propagated back to the caller. -func (suite *TestGetBlob) TestErrorResponse(c *C) { - response := &http.Response{ - Status: "246 Frotzed", - StatusCode: 246, - Body: makeResponseBody("Frotzedfailed to get blob"), - } - context := makeStorageContext(&TestTransport{Response: response}) - reader, err := context.GetBlob("container", "blobname") - c.Check(reader, IsNil) - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, ".*246.*") - c.Check(err, ErrorMatches, ".*Frotzed.*") - c.Check(err, ErrorMatches, ".*failed to get blob.*") -} - -type TestSetContainerACL struct{} - -var _ = Suite(&TestSetContainerACL{}) - -func (suite *TestSetContainerACL) TestHappyPath(c *C) { - response := makeHttpResponse(http.StatusOK, "") - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - context.AzureEndpoint = "http://example.com/" - err := context.SetContainerACL(&SetContainerACLRequest{ - Container: "mycontainer", Access: "container"}) - - c.Assert(err, IsNil) - c.Check(transport.Request.Method, Equals, "PUT") - c.Check(transport.Request.URL.String(), Matches, - fmt.Sprintf( - "http://%s.blob.example.com/mycontainer?.*", context.Account)) - c.Check(transport.Request.URL.Query(), DeepEquals, url.Values{ - "comp": {"acl"}, - "restype": {"container"}, - }) - - c.Check(transport.Request.Header.Get("Authorization"), Not(Equals), "") - c.Check(transport.Request.Header.Get("x-ms-blob-public-access"), Equals, "container") -} - -func (suite *TestSetContainerACL) TestAcceptsBlobAccess(c *C) { - response := makeHttpResponse(http.StatusOK, "") - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - err := context.SetContainerACL(&SetContainerACLRequest{ - Container: "mycontainer", Access: "blob"}) - c.Assert(err, IsNil) - c.Check(transport.Request.Header.Get("x-ms-blob-public-access"), Equals, "blob") -} - -func (suite *TestSetContainerACL) TestAccessHeaderOmittedWhenPrivate(c *C) { - response := makeHttpResponse(http.StatusOK, "") - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - err := context.SetContainerACL(&SetContainerACLRequest{ - Container: "mycontainer", Access: "private"}) - - c.Assert(err, IsNil) - c.Check(transport.Request.Header.Get("x-ms-blob-public-access"), Equals, "") -} - -func (suite *TestSetContainerACL) TestInvalidAccessTypePanics(c *C) { - defer func() { - err := recover() - c.Assert(err, Equals, "Access must be one of 'container', 'blob' or 'private'") - }() - context := makeStorageContext(&TestTransport{}) - context.SetContainerACL(&SetContainerACLRequest{ - Container: "mycontainer", Access: "thisisnotvalid"}) - c.Assert("This test failed", Equals, "because there was no panic") -} - -func (suite *TestSetContainerACL) TestClientSideError(c *C) { - error := fmt.Errorf("canned-error") - context := makeStorageContext(&TestTransport{Error: error}) - err := context.SetContainerACL(&SetContainerACLRequest{ - Container: "mycontainer", Access: "private"}) - c.Assert(err, NotNil) -} - -// Azure HTTP errors (for instance 404 responses) are propagated back to -// the caller as ServerError objects. -func (suite *TestSetContainerACL) TestServerError(c *C) { - response := makeHttpResponse(http.StatusNotFound, "not found") - context := makeStorageContext(&TestTransport{Response: response}) - err := context.SetContainerACL(&SetContainerACLRequest{ - Container: "mycontainer", Access: "private"}) - c.Assert(err, NotNil) - serverError, ok := err.(*ServerError) - c.Check(ok, Equals, true) - c.Check(serverError.HTTPStatus.StatusCode(), Equals, http.StatusNotFound) - c.Check(IsNotFoundError(err), Equals, true) -} === removed file 'src/launchpad.net/gwacl/storage_test.go' --- src/launchpad.net/gwacl/storage_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/storage_test.go 1970-01-01 00:00:00 +0000 @@ -1,507 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "bytes" - "encoding/base64" - "fmt" - "io/ioutil" - . "launchpad.net/gocheck" - "net/http" - "net/url" - "strings" -) - -type testUploadBlockBlob struct{} - -var _ = Suite(&testUploadBlockBlob{}) - -func (suite *testUploadBlockBlob) TestSmallFile(c *C) { - transport := &MockingTransport{} - context := makeStorageContext(transport) - // UploadBlockBlob uses PutBlock to upload the data. - transport.AddExchange(makeFakeCreatedResponse(), nil) - // UploadBlockBlob then sends the list of blocks with PutBlockList. - transport.AddExchange(makeFakeCreatedResponse(), nil) - // Upload a random blob of data. - data := uploadRandomBlob(c, context, 10, "MyContainer", "MyFile") - // There were two exchanges. - c.Assert(transport.ExchangeCount, Equals, 2) - // The first request is a Put Block with the block data. - fileURL := context.GetFileURL("MyContainer", "MyFile") - assertBlockSent(c, context, data, b64("000000000000000000000000000000"), transport.Exchanges[0], fileURL) - // The second request is Put Block List to commit the block above. - assertBlockListSent(c, context, []string{b64("000000000000000000000000000000")}, transport.Exchanges[1], fileURL) -} - -func (suite *testUploadBlockBlob) TestLargeFile(c *C) { - transport := &MockingTransport{} - context := makeStorageContext(transport) - // UploadBlockBlob uses PutBlock twice to upload the data. - transport.AddExchange(makeFakeCreatedResponse(), nil) - transport.AddExchange(makeFakeCreatedResponse(), nil) - // UploadBlockBlob then sends the list of blocks with PutBlockList. - transport.AddExchange(makeFakeCreatedResponse(), nil) - // Upload a large random blob of data. - data := uploadRandomBlob(c, context, 1348*1024, "MyContainer", "MyFile") - // There were three exchanges. - c.Assert(transport.ExchangeCount, Equals, 3) - // The first two requests are Put Block with chunks of the block data. The - // weird looking block IDs are base64 encodings of the strings "0" and "1". - fileURL := context.GetFileURL("MyContainer", "MyFile") - assertBlockSent(c, context, data[:1024*1024], b64("000000000000000000000000000000"), transport.Exchanges[0], fileURL) - assertBlockSent(c, context, data[1024*1024:], b64("000000000000000000000000000001"), transport.Exchanges[1], fileURL) - // The second request is Put Block List to commit the block above. - assertBlockListSent(c, context, []string{b64("000000000000000000000000000000"), b64("000000000000000000000000000001")}, transport.Exchanges[2], fileURL) -} - -func uploadRandomBlob(c *C, context *StorageContext, size int, container, filename string) []byte { - data := MakeRandomByteSlice(size) - err := context.UploadBlockBlob( - container, filename, bytes.NewReader(data)) - c.Assert(err, IsNil) - return data -} - -func assertBlockSent( - c *C, context *StorageContext, data []byte, blockID string, exchange *MockingTransportExchange, fileURL string) { - c.Check(exchange.Request.URL.String(), Matches, fileURL+"?.*") - c.Check(exchange.Request.URL.Query(), DeepEquals, url.Values{ - "comp": {"block"}, - "blockid": {blockID}, - }) - body, err := ioutil.ReadAll(exchange.Request.Body) - c.Assert(err, IsNil) - // DeepEquals is painfully slow when comparing larger structures, so we - // compare the expected (data) and observed (body) slices directly. - c.Assert(len(body), Equals, len(data)) - for i := range body { - // c.Assert also noticably slows things down; this condition is an - // optimisation of the c.Assert call contained within. - if body[i] != data[i] { - c.Assert(body[i], Equals, data[i]) - } - } -} - -func assertBlockListSent( - c *C, context *StorageContext, blockIDs []string, exchange *MockingTransportExchange, fileURL string) { - c.Check(exchange.Request.URL.String(), Matches, fileURL+"?.*") - c.Check(exchange.Request.URL.Query(), DeepEquals, url.Values{ - "comp": {"blocklist"}, - }) - body, err := ioutil.ReadAll(exchange.Request.Body) - c.Check(err, IsNil) - expected := "\n" - for _, blockID := range blockIDs { - expected += " " + blockID + "\n" - } - expected += "" - c.Check(strings.TrimSpace(string(body)), Equals, strings.TrimSpace(expected)) -} - -type testListAllBlobs struct{} - -var _ = Suite(&testListAllBlobs{}) - -// The ListAllBlobs Storage API call returns a BlobEnumerationResults struct -// on success. -func (suite *testListAllBlobs) Test(c *C) { - responseBody := ` - - - prefix - marker - maxresults - delimiter - - - blob-name - snapshot-date-time - blob-address - - last-modified - etag - size-in-bytes - blob-content-type - - - - - sequence-number - blobtype - leasestatus - leasestate - leasesduration - id - copystatus - copysource - copyprogress - copycompletiontime - copydesc - - - metadataname1 - metadataname2 - - - - blob-prefix - - - - ` - response := &http.Response{ - Status: fmt.Sprintf("%d", http.StatusOK), - StatusCode: http.StatusOK, - Body: makeResponseBody(responseBody), - } - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - request := &ListBlobsRequest{Container: "container"} - results, err := context.ListAllBlobs(request) - c.Assert(err, IsNil) - c.Check(transport.Request.URL.String(), Matches, context.getContainerURL("container")+"?.*") - c.Check(transport.Request.URL.Query(), DeepEquals, url.Values{ - "restype": {"container"}, - "comp": {"list"}, - }) - c.Check(transport.Request.Header.Get("Authorization"), Not(Equals), "") - c.Assert(results, NotNil) -} - -// Client-side errors from the HTTP client are propagated back to the caller. -func (suite *testListAllBlobs) TestError(c *C) { - error := fmt.Errorf("canned-error") - context := makeStorageContext(&TestTransport{Error: error}) - request := &ListBlobsRequest{Container: "container"} - _, err := context.ListAllBlobs(request) - c.Assert(err, NotNil) -} - -// Server-side errors are propagated back to the caller. -func (suite *testListAllBlobs) TestErrorResponse(c *C) { - response := &http.Response{ - Status: fmt.Sprintf("%d", http.StatusNotFound), - StatusCode: http.StatusNotFound, - } - context := makeStorageContext(&TestTransport{Response: response}) - request := &ListBlobsRequest{Container: "container"} - _, err := context.ListAllBlobs(request) - c.Assert(err, NotNil) -} - -// ListAllBlobs combines multiple batches of output. -func (suite *testListAllBlobs) TestBatchedResult(c *C) { - firstBlob := "blob1" - lastBlob := "blob2" - marker := "moreplease" - firstBatch := http.Response{ - StatusCode: http.StatusOK, - Body: makeResponseBody(fmt.Sprintf(` - - - - %s - - - %s - - `, firstBlob, marker)), - } - lastBatch := http.Response{ - StatusCode: http.StatusOK, - Body: makeResponseBody(fmt.Sprintf(` - - - - %s - - - - `, lastBlob)), - } - transport := &MockingTransport{} - transport.AddExchange(&firstBatch, nil) - transport.AddExchange(&lastBatch, nil) - context := makeStorageContext(transport) - - request := &ListBlobsRequest{Container: "mycontainer"} - blobs, err := context.ListAllBlobs(request) - c.Assert(err, IsNil) - - c.Check(len(blobs.Blobs), Equals, 2) - c.Check(blobs.Blobs[0].Name, Equals, firstBlob) - c.Check(blobs.Blobs[1].Name, Equals, lastBlob) -} - -type testListAllContainers struct{} - -var _ = Suite(&testListAllContainers{}) - -// The ListAllContainers Storage API call returns a ContainerEnumerationResults -// struct on success. -func (suite *testListAllContainers) Test(c *C) { - responseBody := ` - - - prefix-value - marker-value - max-results-value - - - name-value - url-value - - date/time-value - etag-value - lease-status-value - lease-state-value - lease-duration-value - - - metadata-value - - - - - ` - response := &http.Response{ - Status: fmt.Sprintf("%d", http.StatusOK), - StatusCode: http.StatusOK, - Body: makeResponseBody(responseBody), - } - transport := &TestTransport{Response: response} - context := makeStorageContext(transport) - context.AzureEndpoint = "http://example.com/" - results, err := context.ListAllContainers() - c.Assert(err, IsNil) - c.Check(transport.Request.URL.String(), Equals, fmt.Sprintf( - "http://%s.blob.example.com/?comp=list", context.Account)) - c.Check(transport.Request.Header.Get("Authorization"), Not(Equals), "") - c.Assert(results, NotNil) - c.Assert(results.Containers[0].Name, Equals, "name-value") -} - -// Client-side errors from the HTTP client are propagated back to the caller. -func (suite *testListAllContainers) TestError(c *C) { - error := fmt.Errorf("canned-error") - context := makeStorageContext(&TestTransport{Error: error}) - _, err := context.ListAllContainers() - c.Assert(err, NotNil) -} - -// Server-side errors are propagated back to the caller. -func (suite *testListAllContainers) TestErrorResponse(c *C) { - response := &http.Response{ - Status: fmt.Sprintf("%d", http.StatusNotFound), - StatusCode: http.StatusNotFound, - } - context := makeStorageContext(&TestTransport{Response: response}) - _, err := context.ListAllContainers() - c.Assert(err, NotNil) -} - -// ListAllContainers combines multiple batches of output. -func (suite *testListAllContainers) TestBatchedResult(c *C) { - firstContainer := "container1" - lastContainer := "container2" - marker := "moreplease" - firstBatch := http.Response{ - StatusCode: http.StatusOK, - Body: makeResponseBody(fmt.Sprintf(` - - - - %s - container-address - - - %s - - `, firstContainer, marker)), - } - lastBatch := http.Response{ - StatusCode: http.StatusOK, - Body: makeResponseBody(fmt.Sprintf(` - - - - %s - container-address - - - - `, lastContainer)), - } - transport := &MockingTransport{} - transport.AddExchange(&firstBatch, nil) - transport.AddExchange(&lastBatch, nil) - context := makeStorageContext(transport) - - containers, err := context.ListAllContainers() - c.Assert(err, IsNil) - - c.Check(len(containers.Containers), Equals, 2) - c.Check(containers.Containers[0].Name, Equals, firstContainer) - c.Check(containers.Containers[1].Name, Equals, lastContainer) -} - -type testDeleteAllBlobs struct{} - -var _ = Suite(&testDeleteAllBlobs{}) - -func (s *testDeleteAllBlobs) makeListingResponse() *http.Response { - return &http.Response{ - Status: fmt.Sprintf("%d", http.StatusOK), - StatusCode: http.StatusOK, - Body: makeResponseBody(` - - - prefix - marker - maxresults - delimiter - - - blob-name - - - blob-name2 - - - - `), - } -} - -func (s *testDeleteAllBlobs) TestHappyPath(c *C) { - listResponse := s.makeListingResponse() - deleteResponse := &http.Response{ - Status: fmt.Sprintf("%d", http.StatusAccepted), - StatusCode: http.StatusAccepted, - } - - transport := &MockingTransport{} - transport.AddExchange(listResponse, nil) - transport.AddExchange(deleteResponse, nil) - transport.AddExchange(deleteResponse, nil) - context := makeStorageContext(transport) - - err := context.DeleteAllBlobs(&DeleteAllBlobsRequest{Container: "container"}) - c.Assert(err, IsNil) - c.Assert(transport.ExchangeCount, Equals, 3) - - // Check the ListAllBlobs exchange. - c.Check( - transport.Exchanges[0].Request.URL.String(), - Matches, context.getContainerURL("container")+"[?].*") - c.Check(transport.Exchanges[0].Request.URL.Query(), - DeepEquals, url.Values{ - "restype": {"container"}, - "comp": {"list"}, - }) - - // Check the DeleteBlob exchanges. - c.Check( - transport.Exchanges[1].Request.URL.String(), - Equals, context.GetFileURL("container", "blob-name")) - c.Check(transport.Exchanges[1].Request.Method, Equals, "DELETE") - - c.Check( - transport.Exchanges[2].Request.URL.String(), - Equals, context.GetFileURL("container", "blob-name2")) - c.Check(transport.Exchanges[2].Request.Method, Equals, "DELETE") -} - -func (s *testDeleteAllBlobs) TestErrorsListing(c *C) { - transport := &MockingTransport{} - transport.AddExchange(&http.Response{ - Status: fmt.Sprintf("%d", http.StatusNotFound), - StatusCode: http.StatusNotFound}, nil) - context := makeStorageContext(transport) - err := context.DeleteAllBlobs(&DeleteAllBlobsRequest{Container: "c"}) - c.Assert(err, ErrorMatches, `request for blobs list failed: Azure request failed \(404: Not Found\)`) -} - -func (s *testDeleteAllBlobs) TestErrorsDeleting(c *C) { - transport := &MockingTransport{} - listResponse := s.makeListingResponse() - transport.AddExchange(listResponse, nil) - transport.AddExchange(&http.Response{ - Status: fmt.Sprintf("%d", http.StatusBadRequest), - StatusCode: http.StatusBadRequest}, nil) - context := makeStorageContext(transport) - err := context.DeleteAllBlobs(&DeleteAllBlobsRequest{Container: "c"}) - c.Assert(err, ErrorMatches, `failed to delete blob blob-name: Azure request failed \(400: Bad Request\)`) -} - -type testCreateInstanceDataVHD struct{} - -var _ = Suite(&testCreateInstanceDataVHD{}) - -func (s *testCreateInstanceDataVHD) TestHappyPath(c *C) { - response := http.Response{ - Status: fmt.Sprintf("%d", http.StatusOK), - StatusCode: http.StatusCreated, - } - transport := &MockingTransport{} - transport.AddExchange(&response, nil) // putblob response - transport.AddExchange(&response, nil) // first putpage - transport.AddExchange(&response, nil) // second putpage - context := makeStorageContext(transport) - - randomData := MakeRandomByteSlice(512) - dataReader := bytes.NewReader(randomData) - - var err error - - err = context.CreateInstanceDataVHD(&CreateVHDRequest{ - Container: "container", Filename: "filename", - FilesystemData: dataReader, Size: 512}) - c.Assert(err, IsNil) - - // Check the PutBlob exchange. - c.Check( - transport.Exchanges[0].Request.Header.Get("x-ms-blob-type"), - Equals, "PageBlob") - expectedSize := fmt.Sprintf("%d", VHD_SIZE) - c.Check( - transport.Exchanges[0].Request.Header.Get("x-ms-blob-content-length"), - Equals, expectedSize) - - // Check the PutPage for the footer exchange. - data, err := ioutil.ReadAll(transport.Exchanges[1].Request.Body) - c.Assert(err, IsNil) - expectedData, err := base64.StdEncoding.DecodeString(VHD_FOOTER) - c.Assert(err, IsNil) - c.Check(data, DeepEquals, expectedData) - expectedRange := fmt.Sprintf("bytes=%d-%d", VHD_SIZE-512, VHD_SIZE-1) - c.Check( - transport.Exchanges[1].Request.Header.Get("x-ms-range"), - Equals, expectedRange) - - // Check the PutPage for the data exchange. - data, err = ioutil.ReadAll(transport.Exchanges[2].Request.Body) - c.Assert(err, IsNil) - c.Check(data, DeepEquals, randomData) - - c.Check( - transport.Exchanges[2].Request.Header.Get("x-ms-range"), - Equals, "bytes=0-511") -} - -func (s *testCreateInstanceDataVHD) TestSizeConstraints(c *C) { - var err error - context := makeStorageContext(&TestTransport{}) - - err = context.CreateInstanceDataVHD(&CreateVHDRequest{Size: 10}) - c.Check(err, ErrorMatches, "Size must be a multiple of 512") - - err = context.CreateInstanceDataVHD(&CreateVHDRequest{ - Size: VHD_SIZE}) - errString := fmt.Sprintf("Size cannot be bigger than %d", VHD_SIZE-512) - c.Check(err, ErrorMatches, errString) -} === removed file 'src/launchpad.net/gwacl/testhelpers_x509dispatch.go' --- src/launchpad.net/gwacl/testhelpers_x509dispatch.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/testhelpers_x509dispatch.go 1970-01-01 00:00:00 +0000 @@ -1,87 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). -// -// Helpers for testing with x509 requests. These help inject fake responses -// into the x509 request dispatcher. - -package gwacl - -import ( - "launchpad.net/gwacl/fork/http" -) - -// rigRecordingDispatcher sets up a request dispatcher that records incoming -// requests by appending them to *record. It returns the result of whatever -// dispatcher was already active. -// If you also want the dispatcher to return a particular result, rig it for -// that result first (using one of the other rig...Dispatcher functions) and -// then chain the recording dispatcher in front of it. -func rigRecordingDispatcher(record *[]*X509Request) { - previousDispatcher := _X509Dispatcher - _X509Dispatcher = func(session *x509Session, request *X509Request) (*x509Response, error) { - *record = append(*record, request) - return previousDispatcher(session, request) - } -} - -// rigFixedResponseDispatcher sets up a request dispatcher that always returns -// a prepared response. -func rigFixedResponseDispatcher(response *x509Response) { - _X509Dispatcher = func(*x509Session, *X509Request) (*x509Response, error) { - return response, nil - } -} - -// rigFailingDispatcher sets up a request dispatcher that returns a given -// error. -func rigFailingDispatcher(failure error) { - _X509Dispatcher = func(*x509Session, *X509Request) (*x509Response, error) { - return nil, failure - } -} - -type DispatcherResponse struct { - response *x509Response - errorObject error -} - -// rigPreparedResponseDispatcher sets up a request dispatcher that returns, -// for each consecutive request, the next of a series of prepared responses. -func rigPreparedResponseDispatcher(responses []DispatcherResponse) { - index := 0 - _X509Dispatcher = func(*x509Session, *X509Request) (*x509Response, error) { - response := responses[index] - index += 1 - return response.response, response.errorObject - } -} - -// rigRecordingPreparedResponseDispatcher sets up a request dispatcher that -// returns, for each consecutive request, the next of a series of prepared -// responses, and records each request. -func rigRecordingPreparedResponseDispatcher(record *[]*X509Request, responses []DispatcherResponse) { - index := 0 - _X509Dispatcher = func(session *x509Session, request *X509Request) (*x509Response, error) { - *record = append(*record, request) - response := responses[index] - index += 1 - return response.response, response.errorObject - } -} - -// setUpDispatcher sets up a request dispatcher that: -// - records requests -// - returns empty responses -func setUpDispatcher(operationID string) *[]*X509Request { - header := http.Header{} - header.Set("X-Ms-Request-Id", operationID) - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - Body: []byte{}, - Header: header, - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - return &recordedRequests -} === removed file 'src/launchpad.net/gwacl/testing.go' --- src/launchpad.net/gwacl/testing.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/testing.go 1970-01-01 00:00:00 +0000 @@ -1,77 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "net/http" -) - -// NewTestStorageContext returns a StorageContext object built using -// the given *http.Client object. It's meant to be used in the tests -// of other applications using gwacl to create a StorageContext that will -// interact with a fake client object. -func NewTestStorageContext(client *http.Client) *StorageContext { - storageContext := &StorageContext{} - storageContext.client = client - storageContext.AzureEndpoint = "http://127.0.0.1/" - return storageContext -} - -// PatchManagementAPIResponses patches gwacl's ManagementAPI objects so that -// they can be used in tests. Calling PatchManagementAPIResponses will make -// the ManagementAPI objects talk to a fake http server instead of talking to -// the Azure server and get the pre-canned responses from a fake http server. -// Use the returned X509Requests to inspect the issued requests. -// It's meant to be used in the tests of other applications using gwacl's -// ManagementAPI objects. -func PatchManagementAPIResponses(responses []DispatcherResponse) *[]*X509Request { - rigPreparedResponseDispatcher(responses) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - return &recordedRequests -} - -// NewDispatcherResponse creates a DispatcherResponse that can then be used by -// PatchManagementAPIResponses to simulate responses from the Azure server. -// It's meant to be used in the tests of other applications using gwacl's -// ManagementAPI objects. -func NewDispatcherResponse(body []byte, statusCode int, errorObject error) DispatcherResponse { - return DispatcherResponse{ - response: &x509Response{ - Body: body, - StatusCode: statusCode, - }, - errorObject: errorObject} -} - -// MockingTransport is used as an http.Client.Transport for testing. It -// records the sequence of requests, and returns a predetermined sequence of -// Responses and errors. -type MockingTransport struct { - Exchanges []*MockingTransportExchange - ExchangeCount int -} - -// MockingTransport implements the http.RoundTripper interface. -var _ http.RoundTripper = &MockingTransport{} - -func (t *MockingTransport) AddExchange(response *http.Response, error error) { - exchange := MockingTransportExchange{Response: response, Error: error} - t.Exchanges = append(t.Exchanges, &exchange) -} - -func (t *MockingTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { - exchange := t.Exchanges[t.ExchangeCount] - t.ExchangeCount += 1 - exchange.Request = req - return exchange.Response, exchange.Error -} - -// MockingTransportExchange is a recording of a request and a response over -// HTTP. -type MockingTransportExchange struct { - Request *http.Request - Response *http.Response - Error error -} === removed file 'src/launchpad.net/gwacl/testing_test.go' --- src/launchpad.net/gwacl/testing_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/testing_test.go 1970-01-01 00:00:00 +0000 @@ -1,58 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "fmt" - . "launchpad.net/gocheck" - "net/http" -) - -type testTesting struct{} - -var _ = Suite(&testTesting{}) - -func (*testTesting) TestNewTestStorageContextCreatesCompleteContext(c *C) { - client := &http.Client{Transport: &TestTransport{}} - context := NewTestStorageContext(client) - context.Account = "myaccount" - - c.Check(context.Account, Equals, "myaccount") - c.Check(context.getAccountURL(), Matches, ".*myaccount.*") -} - -func (*testTesting) TestNewTestStorageContextWorksWithTransport(c *C) { - errorMessage := "canned-error" - error := fmt.Errorf(errorMessage) - transport := &TestTransport{Error: error} - client := &http.Client{Transport: transport} - context := NewTestStorageContext(client) - request := &ListContainersRequest{Marker: ""} - _, err := context.ListContainers(request) - c.Check(err, ErrorMatches, ".*"+errorMessage+".*") -} - -func (*testTesting) TestNewDispatcherResponse(c *C) { - body := []byte("test body") - statusCode := http.StatusOK - errorObject := fmt.Errorf("canned-error") - dispatcherResponse := NewDispatcherResponse(body, statusCode, errorObject) - c.Check(dispatcherResponse.errorObject, Equals, errorObject) - c.Check(dispatcherResponse.response.Body, DeepEquals, body) - c.Check(dispatcherResponse.response.StatusCode, Equals, statusCode) -} - -func (*testTesting) TestPatchManagementAPIResponses(c *C) { - response := NewDispatcherResponse([]byte(""), http.StatusOK, nil) - responses := []DispatcherResponse{response, response} - requests := PatchManagementAPIResponses(responses) - api := makeAPI(c) - _, err := api.ListOSImages() - c.Assert(err, IsNil) - _, err = api.ListOSImages() - c.Assert(err, IsNil) - c.Assert(len(*requests), Equals, 2) - c.Check((*requests)[0].URL, Equals, api.session.composeURL("services/images")) - c.Check((*requests)[1].URL, Equals, api.session.composeURL("services/images")) -} === removed file 'src/launchpad.net/gwacl/utils.go' --- src/launchpad.net/gwacl/utils.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/utils.go 1970-01-01 00:00:00 +0000 @@ -1,59 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "fmt" - "io" - "io/ioutil" - "net/url" -) - -// checkPathComponents checks that none of the passed components contains any -// special characters, where special means "needs percent-encoding in a URI", -// does not contain any forward slashes, and is not the string "..". -func checkPathComponents(components ...string) { - for _, component := range components { - if component != url.QueryEscape(component) { - panic(fmt.Errorf("'%s' contains URI special characters", component)) - } - if component == ".." { - panic(fmt.Errorf("'..' is not allowed")) - } - } -} - -// readAndClose reads and closes the given ReadCloser. -// -// Trying to read from a nil simply returns nil, no error. -func readAndClose(stream io.ReadCloser) ([]byte, error) { - if stream == nil { - return nil, nil - } - defer stream.Close() - return ioutil.ReadAll(stream) -} - -// addURLQueryParams adds query parameters to a URL (and escapes as needed). -// Parameters are URL, [key, value, [key, value, [...]]]. -// The original URL must be correct, i.e. it should parse without error. -func addURLQueryParams(originalURL string, params ...string) string { - if len(params)%2 != 0 { - panic(fmt.Errorf("got %d parameter argument(s), instead of matched key/value pairs", len(params))) - } - parsedURL, err := url.Parse(originalURL) - if err != nil { - panic(err) - } - query := parsedURL.Query() - - for index := 0; index < len(params); index += 2 { - key := params[index] - value := params[index+1] - query.Add(key, value) - } - - parsedURL.RawQuery = query.Encode() - return parsedURL.String() -} === removed file 'src/launchpad.net/gwacl/utils_test.go' --- src/launchpad.net/gwacl/utils_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/utils_test.go 1970-01-01 00:00:00 +0000 @@ -1,133 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "io" - "io/ioutil" - . "launchpad.net/gocheck" - "net/url" - "strings" -) - -type UtilsSuite struct{} - -var _ = Suite(&UtilsSuite{}) - -func (suite *UtilsSuite) TestCheckPathComponents(c *C) { - checkPathComponents("fred", "bob", "123", "a..b") // All okay. - c.Check( - func() { checkPathComponents("foo^bar") }, - PanicMatches, "'foo\\^bar' contains URI special characters") - c.Check( - func() { checkPathComponents("foo/bar") }, - PanicMatches, "'foo/bar' contains URI special characters") - c.Check( - func() { checkPathComponents("..") }, - PanicMatches, "'[.][.]' is not allowed") -} - -func (*UtilsSuite) TestReadAndCloseReturnsEmptyStringForNil(c *C) { - data, err := readAndClose(nil) - c.Assert(err, IsNil) - c.Check(string(data), Equals, "") -} - -func (*UtilsSuite) TestReadAndCloseReturnsContents(c *C) { - content := "Stream contents." - stream := ioutil.NopCloser(strings.NewReader(content)) - - data, err := readAndClose(stream) - c.Assert(err, IsNil) - - c.Check(string(data), Equals, content) -} - -// fakeStream is a very simple fake implementation of io.ReadCloser. It -// acts like an empty stream, but it tracks whether it's been closed yet. -type fakeStream struct { - closed bool -} - -func (stream *fakeStream) Read([]byte) (int, error) { - if stream.closed { - panic("Read() from closed fakeStream") - } - return 0, io.EOF -} - -func (stream *fakeStream) Close() error { - stream.closed = true - return nil -} - -func (*UtilsSuite) TestReadAndCloseCloses(c *C) { - stream := &fakeStream{} - - _, err := readAndClose(stream) - c.Assert(err, IsNil) - - c.Check(stream.closed, Equals, true) -} - -type TestAddURLQueryParams struct{} - -var _ = Suite(&TestAddURLQueryParams{}) - -func (*TestAddURLQueryParams) TestUsesBaseURL(c *C) { - baseURL := "http://example.com" - - extendedURL := addURLQueryParams(baseURL, "key", "value") - - parsedURL, err := url.Parse(extendedURL) - c.Assert(err, IsNil) - c.Check(parsedURL.Scheme, Equals, "http") - c.Check(parsedURL.Host, Equals, "example.com") -} - -func (suite *TestAddURLQueryParams) TestEscapesParams(c *C) { - key := "key&key" - value := "value%value" - - uri := addURLQueryParams("http://example.com", key, value) - - parsedURL, err := url.Parse(uri) - c.Assert(err, IsNil) - c.Check(parsedURL.Query()[key], DeepEquals, []string{value}) -} - -func (suite *TestAddURLQueryParams) TestAddsToExistingParams(c *C) { - uri := addURLQueryParams("http://example.com?a=one", "b", "two") - - parsedURL, err := url.Parse(uri) - c.Assert(err, IsNil) - c.Check(parsedURL.Query(), DeepEquals, url.Values{ - "a": {"one"}, - "b": {"two"}, - }) -} - -func (suite *TestAddURLQueryParams) TestAppendsRepeatedParams(c *C) { - uri := addURLQueryParams("http://example.com?foo=bar", "foo", "bar") - c.Check(uri, Equals, "http://example.com?foo=bar&foo=bar") -} - -func (suite *TestAddURLQueryParams) TestAddsMultipleParams(c *C) { - uri := addURLQueryParams("http://example.com", "one", "un", "two", "deux") - parsedURL, err := url.Parse(uri) - c.Assert(err, IsNil) - c.Check(parsedURL.Query(), DeepEquals, url.Values{ - "one": {"un"}, - "two": {"deux"}, - }) -} - -func (suite *TestAddURLQueryParams) TestRejectsOddNumberOfParams(c *C) { - defer func() { - err := recover() - c.Check(err, ErrorMatches, ".*got 1 parameter.*") - }() - addURLQueryParams("http://example.com", "key") - c.Assert("This should have panicked", Equals, "But it didn't.") -} === removed file 'src/launchpad.net/gwacl/vhd_footer.go' --- src/launchpad.net/gwacl/vhd_footer.go 2013-07-11 17:18:27 +0000 +++ src/launchpad.net/gwacl/vhd_footer.go 1970-01-01 00:00:00 +0000 @@ -1,33 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -// This is a pre-defined, base64-encoded 512-byte footer for a Virtual Hard -// Disk (VHD) file. The footer sets the size of the VHD to a fixed 20Mb -// (20972032 bytes) and is intended to be uploaded as the last page in a blob -// of that size. -// -// The end use of this is to have a quick way of defining a fixed-size VHD -// that can be attached to a VM instance. The rest of the file can be -// sparse-filled as necessary with a filesystem to create a final, valid, -// mountable disk. -// -// In case you were wondering *why* you would want to do this, it's the only -// way of making additional data available to a new instance at boot time. -// -// If you want to generate a new one of these (if you need a new size for -// example), the easiest way is to use VirtualBox to define a new one, and -// then do 'tail -c 512 | base64' on that file. - -const VHD_SIZE = 20972032 // This is 20Mib + 512 bytes -const VHD_FOOTER = ` -Y29uZWN0aXgAAAACAAEAAP//////////GVKuuHZib3gABAACV2kyawAAAAABQAAAAAAAAAFAAAAC -WgQRAAAAAv//5y4OEjVapHY7QpuodZNf77j6AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA -AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=` === removed file 'src/launchpad.net/gwacl/vhd_footer_test.go' --- src/launchpad.net/gwacl/vhd_footer_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/vhd_footer_test.go 1970-01-01 00:00:00 +0000 @@ -1,20 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "encoding/base64" - . "launchpad.net/gocheck" -) - -type testVHDFooter struct{} - -var _ = Suite(&testVHDFooter{}) - -func (s *testVHDFooter) TestValidDecode(c *C) { - var err error - decoded, err := base64.StdEncoding.DecodeString(VHD_FOOTER) - c.Assert(err, IsNil) - c.Assert(512, Equals, len(decoded)) -} === removed file 'src/launchpad.net/gwacl/x509dispatcher.go' --- src/launchpad.net/gwacl/x509dispatcher.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/x509dispatcher.go 1970-01-01 00:00:00 +0000 @@ -1,156 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "bytes" - "fmt" - "io" - "launchpad.net/gwacl/fork/http" - . "launchpad.net/gwacl/logging" - "net/url" -) - -type X509Request struct { - APIVersion string - Method string - URL string - Payload []byte - ContentType string -} - -// newX509RequestGET initializes an X509Request for a GET. You may still need -// to set further values. -func newX509RequestGET(url, apiVersion string) *X509Request { - return &X509Request{ - Method: "GET", - URL: url, - APIVersion: apiVersion, - } -} - -// newX509RequestPOST initializes an X509Request for a POST. You may still -// need to set further values. -func newX509RequestPOST(url, apiVersion string, payload []byte, contentType string) *X509Request { - return &X509Request{ - Method: "POST", - URL: url, - APIVersion: apiVersion, - Payload: payload, - ContentType: contentType, - } -} - -// newX509RequestDELETE initializes an X509Request for a DELETE. -func newX509RequestDELETE(url, apiVersion string) *X509Request { - return &X509Request{ - Method: "DELETE", - URL: url, - APIVersion: apiVersion, - } -} - -// newX509RequestPUT initializes an X509Request for a PUT. You may still -// need to set further values. -func newX509RequestPUT(url, apiVersion string, payload []byte, contentType string) *X509Request { - return &X509Request{ - Method: "PUT", - URL: url, - APIVersion: apiVersion, - Payload: payload, - ContentType: contentType, - } -} - -type x509Response struct { - StatusCode int - // TODO: What exactly do we get back? How will we know its encoding? - Body []byte - Header http.Header -} - -// rewindCloser returns an io.ReadCloser that seeks back -// to the beginning of the reader upon Close being called. -type rewindCloser struct { - io.ReadSeeker -} - -func (c rewindCloser) Close() error { - _, err := c.Seek(0, 0) - return err -} - -func performX509Request(session *x509Session, request *X509Request) (*x509Response, error) { - response := &x509Response{} - - Debugf("Request: %s %s", request.Method, request.URL) - if len(request.Payload) > 0 { - Debugf("Request body:\n%s", request.Payload) - } - - bodyReader := rewindCloser{bytes.NewReader(request.Payload)} - httpRequest, err := http.NewRequest(request.Method, request.URL, bodyReader) - if err != nil { - return nil, err - } - httpRequest.ContentLength = int64(len(request.Payload)) - httpRequest.Header.Set("Content-Type", request.ContentType) - httpRequest.Header.Set("x-ms-version", request.APIVersion) - retrier := session.retryPolicy.getForkedHttpRetrier(session.client) - httpResponse, err := handleRequestRedirects(retrier, httpRequest) - if err != nil { - return nil, err - } - - response.StatusCode = httpResponse.StatusCode - response.Body, err = readAndClose(httpResponse.Body) - if err != nil { - return nil, err - } - response.Header = httpResponse.Header - - Debugf("Response: %d %s", response.StatusCode, http.StatusText(response.StatusCode)) - if response.Header != nil { - buf := bytes.Buffer{} - response.Header.Write(&buf) - Debugf("Response headers:\n%s", buf.String()) - } - if len(response.Body) > 0 { - Debugf("Response body:\n%s", response.Body) - } - - return response, nil -} - -func handleRequestRedirects(retrier *forkedHttpRetrier, request *http.Request) (*http.Response, error) { - const maxRedirects = 10 - // Handle temporary redirects. - redirects := -1 - for { - redirects++ - if redirects >= maxRedirects { - return nil, fmt.Errorf("stopped after %d redirects", redirects) - } - response, err := retrier.RetryRequest(request) - // For GET and HEAD, we cause the request execution - // to return httpRedirectErr if a temporary redirect - // is returned, and then deal with it here. - if err, ok := err.(*url.Error); ok && err.Err == httpRedirectErr { - request.URL, err.Err = url.Parse(err.URL) - if err.Err != nil { - return nil, err - } - continue - } - // For other methods, we must check the response code. - if err == nil && response.StatusCode == http.StatusTemporaryRedirect { - request.URL, err = response.Location() - if err != nil { - return nil, err - } - continue - } - return response, err - } -} === removed file 'src/launchpad.net/gwacl/x509dispatcher_test.go' --- src/launchpad.net/gwacl/x509dispatcher_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/x509dispatcher_test.go 1970-01-01 00:00:00 +0000 @@ -1,265 +0,0 @@ -package gwacl - -import ( - "io/ioutil" - . "launchpad.net/gocheck" - "net/http" - "net/http/httptest" - "time" -) - -type x509DispatcherSuite struct{} - -var _ = Suite(&x509DispatcherSuite{}) - -type Request struct { - *http.Request - BodyContent []byte -} - -// makeRecordingHTTPServer creates an http server (don't forget to Close() it when done) -// that serves at the given base URL, copies incoming requests into the given -// channel, and finally returns the given status code. If body is not nil, it -// will be returned as the request body. -func makeRecordingHTTPServer(requests chan *Request, status int, body []byte, headers http.Header) *httptest.Server { - var server *httptest.Server - returnRequest := func(w http.ResponseWriter, r *http.Request) { - // Capture all the request body content for later inspection. - requestBody, err := ioutil.ReadAll(r.Body) - if err != nil { - panic(err) - } - requests <- &Request{r, requestBody} - // Set a default Location so we can test redirect loops easily. - w.Header().Set("Location", server.URL+r.URL.Path) - for header, values := range headers { - for _, value := range values { - w.Header().Set(header, value) - } - } - w.WriteHeader(status) - if body != nil { - w.Write(body) - } - } - serveMux := http.NewServeMux() - serveMux.HandleFunc("/", returnRequest) - server = httptest.NewServer(serveMux) - return server -} - -func (*x509DispatcherSuite) TestGetRequestDoesHTTPGET(c *C) { - httpRequests := make(chan *Request, 1) - server := makeRecordingHTTPServer(httpRequests, http.StatusOK, nil, nil) - defer server.Close() - // No real certificate needed since we're testing on http, not https. - session, err := newX509Session("subscriptionid", "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - path := "/foo/bar" - version := "test-version" - request := newX509RequestGET(server.URL+path, version) - - response, err := performX509Request(session, request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - httpRequest := <-httpRequests - c.Check(httpRequest.Method, Equals, "GET") - c.Check(httpRequest.Header[http.CanonicalHeaderKey("X-Ms-Version")], DeepEquals, []string{version}) - c.Check(httpRequest.URL.String(), Equals, path) - c.Check(httpRequest.BodyContent, HasLen, 0) -} - -func (*x509DispatcherSuite) TestRetryPolicyCausesRequestsToBeRetried(c *C) { - nbRetries := 2 - nbRequests := nbRetries + 1 - httpRequests := make(chan *Request, nbRequests) - server := makeRecordingHTTPServer(httpRequests, http.StatusConflict, nil, nil) - defer server.Close() - // No real certificate needed since we're testing on http, not https. - retryPolicy := RetryPolicy{NbRetries: nbRetries, HttpStatusCodes: []int{http.StatusConflict}, Delay: time.Nanosecond} - session, err := newX509Session("subscriptionid", "", "West US", retryPolicy) - c.Assert(err, IsNil) - path := "/foo/bar" - version := "test-version" - request := newX509RequestGET(server.URL+path, version) - - response, err := performX509Request(session, request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusConflict) - - // nbRequests request were performed. - c.Check(httpRequests, HasLen, nbRequests) -} - -func (*x509DispatcherSuite) TestPostRequestDoesHTTPPOST(c *C) { - httpRequests := make(chan *Request, 1) - requestBody := []byte{1, 2, 3} - responseBody := []byte{4, 5, 6} - requestContentType := "bogusContentType" - server := makeRecordingHTTPServer(httpRequests, http.StatusOK, responseBody, nil) - defer server.Close() - // No real certificate needed since we're testing on http, not https. - session, err := newX509Session("subscriptionid", "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - path := "/foo/bar" - version := "test-version" - request := newX509RequestPOST(server.URL+path, version, requestBody, requestContentType) - - response, err := performX509Request(session, request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - c.Check(response.Body, DeepEquals, responseBody) - - httpRequest := <-httpRequests - c.Check(httpRequest.Header[http.CanonicalHeaderKey("Content-Type")], DeepEquals, []string{requestContentType}) - c.Check(httpRequest.Header[http.CanonicalHeaderKey("X-Ms-Version")], DeepEquals, []string{request.APIVersion}) - c.Check(httpRequest.Method, Equals, "POST") - c.Check(httpRequest.URL.String(), Equals, path) - c.Check(httpRequest.BodyContent, DeepEquals, requestBody) -} - -func (*x509DispatcherSuite) TestDeleteRequestDoesHTTPDELETE(c *C) { - httpRequests := make(chan *Request, 1) - server := makeRecordingHTTPServer(httpRequests, http.StatusOK, nil, nil) - defer server.Close() - // No real certificate needed since we're testing on http, not https. - session, err := newX509Session("subscriptionid", "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - path := "/foo/bar" - version := "test-version" - request := newX509RequestDELETE(server.URL+path, version) - - response, err := performX509Request(session, request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - - httpRequest := <-httpRequests - c.Check(httpRequest.Method, Equals, "DELETE") - c.Check(httpRequest.Header[http.CanonicalHeaderKey("X-Ms-Version")], DeepEquals, []string{version}) - c.Check(httpRequest.URL.String(), Equals, path) - c.Check(httpRequest.BodyContent, HasLen, 0) -} - -func (*x509DispatcherSuite) TestPutRequestDoesHTTPPUT(c *C) { - httpRequests := make(chan *Request, 1) - requestBody := []byte{1, 2, 3} - responseBody := []byte{4, 5, 6} - server := makeRecordingHTTPServer(httpRequests, http.StatusOK, responseBody, nil) - defer server.Close() - // No real certificate needed since we're testing on http, not https. - session, err := newX509Session("subscriptionid", "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - path := "/foo/bar" - version := "test-version" - request := newX509RequestPUT(server.URL+path, version, requestBody, "application/octet-stream") - - response, err := performX509Request(session, request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusOK) - c.Check(response.Body, DeepEquals, responseBody) - - httpRequest := <-httpRequests - c.Check(httpRequest.Method, Equals, "PUT") - c.Check(httpRequest.Header[http.CanonicalHeaderKey("X-Ms-Version")], DeepEquals, []string{version}) - c.Check(httpRequest.URL.String(), Equals, path) - c.Check(httpRequest.BodyContent, DeepEquals, requestBody) -} - -func (*x509DispatcherSuite) TestRequestRegistersHeader(c *C) { - customHeader := http.CanonicalHeaderKey("x-gwacl-test") - customValue := []string{"present"} - returnRequest := func(w http.ResponseWriter, r *http.Request) { - w.Header()[customHeader] = customValue - w.WriteHeader(http.StatusOK) - } - serveMux := http.NewServeMux() - serveMux.HandleFunc("/", returnRequest) - server := httptest.NewServer(serveMux) - defer server.Close() - session, err := newX509Session("subscriptionid", "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - path := "/foo/bar" - request := newX509RequestGET(server.URL+path, "testversion") - - response, err := performX509Request(session, request) - c.Assert(err, IsNil) - - c.Check(response.Header[customHeader], DeepEquals, customValue) -} - -func (*x509DispatcherSuite) TestRequestsFollowRedirects(c *C) { - httpRequests := make(chan *Request, 2) - serverConflict := makeRecordingHTTPServer(httpRequests, http.StatusConflict, nil, nil) - defer serverConflict.Close() - redirPath := "/else/where" - responseHeaders := make(http.Header) - responseHeaders.Set("Location", serverConflict.URL+redirPath) - serverRedir := makeRecordingHTTPServer(httpRequests, http.StatusTemporaryRedirect, nil, responseHeaders) - defer serverRedir.Close() - session, err := newX509Session("subscriptionid", "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - path := "/foo/bar" - version := "test-version" - - // Test both GET and DELETE: DELETE does not normally - // automatically follow redirects, however Azure requires - // us to. - requests := []*X509Request{ - newX509RequestGET(serverRedir.URL+path, version), - newX509RequestDELETE(serverRedir.URL+path, version), - } - for _, request := range requests { - response, err := performX509Request(session, request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusConflict) - c.Assert(httpRequests, HasLen, 2) - c.Assert((<-httpRequests).URL.String(), Equals, path) - c.Assert((<-httpRequests).URL.String(), Equals, redirPath) - } -} - -func (*x509DispatcherSuite) TestRedirectRewindsBody(c *C) { - httpRequests := make(chan *Request, 2) - serverConflict := makeRecordingHTTPServer(httpRequests, http.StatusConflict, nil, nil) - defer serverConflict.Close() - redirPath := "/else/where" - responseHeaders := make(http.Header) - responseHeaders.Set("Location", serverConflict.URL+redirPath) - serverRedir := makeRecordingHTTPServer(httpRequests, http.StatusTemporaryRedirect, nil, responseHeaders) - defer serverRedir.Close() - session, err := newX509Session("subscriptionid", "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - path := "/foo/bar" - version := "test-version" - content := []byte("ponies") - contentType := "text/plain" - - request := newX509RequestPOST(serverRedir.URL+path, version, content, contentType) - response, err := performX509Request(session, request) - c.Assert(err, IsNil) - c.Assert(response.StatusCode, Equals, http.StatusConflict) - c.Assert(httpRequests, HasLen, 2) - c.Assert((<-httpRequests).BodyContent, DeepEquals, content) - c.Assert((<-httpRequests).BodyContent, DeepEquals, content) -} - -func (*x509DispatcherSuite) TestRequestsLimitRedirects(c *C) { - httpRequests := make(chan *Request, 10) - serverRedir := makeRecordingHTTPServer(httpRequests, http.StatusTemporaryRedirect, nil, nil) - defer serverRedir.Close() - session, err := newX509Session("subscriptionid", "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - path := "/foo/bar" - version := "test-version" - request := newX509RequestGET(serverRedir.URL+path, version) - - response, err := performX509Request(session, request) - c.Assert(err, ErrorMatches, "stopped after 10 redirects") - c.Assert(response, IsNil) - c.Assert(httpRequests, HasLen, 10) - close(httpRequests) - for req := range httpRequests { - c.Assert(req.URL.String(), Equals, path) - } -} === removed file 'src/launchpad.net/gwacl/x509session.go' --- src/launchpad.net/gwacl/x509session.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/x509session.go 1970-01-01 00:00:00 +0000 @@ -1,177 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "errors" - "fmt" - "launchpad.net/gwacl/fork/http" - "launchpad.net/gwacl/fork/tls" - "net/url" - "strings" -) - -type x509Session struct { - subscriptionId string - client *http.Client - baseURL *url.URL - retryPolicy RetryPolicy -} - -// httpRedirectErr is a unique error used to prevent -// net/http from automatically following redirects. -// See commentary on CheckRedirect in newX509Session. -var httpRedirectErr = errors.New("redirect") - -// newX509Session creates and returns a new x509Session based on credentials -// and X509 certificate files. -// For testing purposes, certFile can be passed as the empty string and it -// will be ignored. -func newX509Session(subscriptionId, certFile, location string, retryPolicy RetryPolicy) (*x509Session, error) { - certs := []tls.Certificate{} - if certFile != "" { - cert, err := tls.LoadX509KeyPair(certFile, certFile) - if err != nil { - return nil, err - } - certs = append(certs, cert) - } - return newX509SessionCerts(subscriptionId, certs, location, retryPolicy) -} - -// newX509SessionCertData creates and returns a new x509Session based on credentials -// and X509 certificate byte arrays. -func newX509SessionCertData(subscriptionId string, cert, key []byte, location string, retryPolicy RetryPolicy) (*x509Session, error) { - certs := []tls.Certificate{} - if cert != nil && key != nil { - cert, err := tls.X509KeyPair(cert, key) - if err != nil { - return nil, err - } - certs = append(certs, cert) - } - return newX509SessionCerts(subscriptionId, certs, location, retryPolicy) -} - -// newX509SessionCerts creates and returns a new x509Session based on credentials -// and X509 certificate files. -func newX509SessionCerts(subscriptionId string, certs []tls.Certificate, location string, retryPolicy RetryPolicy) (*x509Session, error) { - client := http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - Certificates: certs, - }, - // See https://code.google.com/p/go/issues/detail?id=4677 - // We need to force the connection to close each time so that we don't - // hit the above Go bug. - DisableKeepAlives: true, - }, - // See https://code.google.com/p/go/issues/detail?id=4800 - // We need to follow temporary redirects (307s), while - // retaining headers. We also need to follow redirects - // for POST and DELETE automatically. - CheckRedirect: func(req *http.Request, via []*http.Request) error { - return httpRedirectErr - }, - } - - endpointURL := GetEndpoint(location).ManagementAPI() - baseURL, err := url.Parse(endpointURL) - if err != nil { - panic(fmt.Errorf("cannot parse Azure endpoint URL '%s' - %v", endpointURL, err)) - } - - session := x509Session{ - subscriptionId: subscriptionId, - client: &client, - baseURL: baseURL, - retryPolicy: retryPolicy, - } - return &session, nil -} - -// composeURL puts together a URL for an item on the Azure API based on -// the starting point used by the session, and a given relative path from -// there. -func (session *x509Session) composeURL(path string) string { - if strings.HasPrefix(path, "/") { - panic(fmt.Errorf("got absolute API path '%s' instead of relative one", path)) - } - escapedID := url.QueryEscape(session.subscriptionId) - pathURL, err := url.Parse(escapedID + "/" + path) - if err != nil { - panic(err) - } - return session.baseURL.ResolveReference(pathURL).String() -} - -// _X509Dispatcher is the function used to dispatch requests. We call the -// function through this pointer, not directly, so that tests can inject -// fakes. -var _X509Dispatcher = performX509Request - -// getServerError returns a ServerError matching the given server response -// status, or nil if the server response indicates success. -func (session *x509Session) getServerError(status int, body []byte, description string) error { - if status < http.StatusOK || status >= http.StatusMultipleChoices { - return newHTTPError(status, body, description) - } - return nil -} - -// get performs a GET request to the Azure management API. -// It returns the response body and/or an error. If the error is a -// ServerError, the returned body will be the one received from the server. -// For any other kind of error, the returned body will be nil. -func (session *x509Session) get(path, apiVersion string) (*x509Response, error) { - request := newX509RequestGET(session.composeURL(path), apiVersion) - response, err := _X509Dispatcher(session, request) - if err != nil { - return nil, err - } - err = session.getServerError(response.StatusCode, response.Body, "GET request failed") - return response, err -} - -// post performs a POST request to the Azure management API. -// It returns the response body and/or an error. If the error is a -// ServerError, the returned body will be the one received from the server. -// For any other kind of error, the returned body will be nil. -// Be aware that Azure may perform POST operations asynchronously. If you are -// not sure, call blockUntilCompleted() on the response. -func (session *x509Session) post(path, apiVersion string, body []byte, contentType string) (*x509Response, error) { - request := newX509RequestPOST(session.composeURL(path), apiVersion, body, contentType) - response, err := _X509Dispatcher(session, request) - if err != nil { - return nil, err - } - err = session.getServerError(response.StatusCode, response.Body, "POST request failed") - return response, err -} - -// delete performs a DELETE request to the Azure management API. -// Be aware that Azure may perform DELETE operations asynchronously. If you -// are not sure, call blockUntilCompleted() on the response. -func (session *x509Session) delete(path, apiVersion string) (*x509Response, error) { - request := newX509RequestDELETE(session.composeURL(path), apiVersion) - response, err := _X509Dispatcher(session, request) - if err != nil { - return response, err - } - err = session.getServerError(response.StatusCode, response.Body, "DELETE request failed") - return response, err -} - -// put performs a PUT request to the Azure management API. -// Be aware that Azure may perform PUT operations asynchronously. If you are -// not sure, call blockUntilCompleted() on the response. -func (session *x509Session) put(path, apiVersion string, body []byte, contentType string) (*x509Response, error) { - request := newX509RequestPUT(session.composeURL(path), apiVersion, body, contentType) - response, err := _X509Dispatcher(session, request) - if err != nil { - return nil, err - } - err = session.getServerError(response.StatusCode, response.Body, "PUT request failed") - return response, err -} === removed file 'src/launchpad.net/gwacl/x509session_test.go' --- src/launchpad.net/gwacl/x509session_test.go 2015-01-08 15:17:37 +0000 +++ src/launchpad.net/gwacl/x509session_test.go 1970-01-01 00:00:00 +0000 @@ -1,346 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - . "launchpad.net/gocheck" - "math/big" - "net/http" - "os" - "time" -) - -// defaultManagement is the international management API for Azure. -// (Mainland China gets a different URL). -const defaultManagement = "https://management.core.windows.net/" - -// x509DispatcherFixture records the current x509 dispatcher before a test, -// and restores it after. This gives your test the freedom to replace the -// dispatcher with test doubles, using any of the rig*Dispatcher functions. -// Call the fixture's SetUpTest/TearDownTest methods before/after your test, -// or if you have no other setup/teardown methods, just embed the fixture in -// your test suite. -type x509DispatcherFixture struct { - oldDispatcher func(*x509Session, *X509Request) (*x509Response, error) -} - -func (suite *x509DispatcherFixture) SetUpTest(c *C) { - // Record the original X509 dispatcher. Will be restored at the end of - // each test. - suite.oldDispatcher = _X509Dispatcher -} - -func (suite *x509DispatcherFixture) TearDownTest(c *C) { - // Restore old dispatcher. - _X509Dispatcher = suite.oldDispatcher -} - -type x509SessionSuite struct { - x509DispatcherFixture -} - -var _ = Suite(&x509SessionSuite{}) - -// Create a cert and pem file in a temporary dir in /tmp and return the -// names of the files. The caller is responsible for cleaning up the files. -func makeX509Certificate() (string, string) { - // Code is shamelessly stolen from - // http://golang.org/src/pkg/crypto/tls/generate_cert.go - priv, err := rsa.GenerateKey(rand.Reader, 1024) - if err != nil { - panic(fmt.Errorf("Failed to generate rsa key: %v", err)) - } - - // Create a template for x509.CreateCertificate. - now := time.Now() - template := x509.Certificate{ - SerialNumber: new(big.Int).SetInt64(0), - Subject: pkix.Name{ - CommonName: "localhost", - Organization: []string{"Bogocorp"}, - }, - NotBefore: now.Add(-5 * time.Minute).UTC(), - NotAfter: now.AddDate(1, 0, 0).UTC(), // valid for 1 year. - SubjectKeyId: []byte{1, 2, 3, 4}, - KeyUsage: x509.KeyUsageKeyEncipherment | - x509.KeyUsageDigitalSignature, - } - - // Create the certificate itself. - derBytes, err := x509.CreateCertificate( - rand.Reader, &template, &template, &priv.PublicKey, priv) - if err != nil { - panic(fmt.Errorf("Failed to generate x509 certificate: %v", err)) - } - - // Write the certificate file out. - dirname := os.TempDir() + "/" + MakeRandomString(10) - os.Mkdir(dirname, 0700) - certFile := dirname + "/cert.pem" - certOut, err := os.Create(certFile) - if err != nil { - panic(fmt.Errorf("Failed to create %s: %v", certFile, err)) - } - pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - certOut.Close() - - // Write the key file out. - keyFile := dirname + "/key.pem" - keyOut, err := os.OpenFile( - keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - panic(fmt.Errorf("Failed to create %s: %v", keyFile, err)) - } - pem.Encode( - keyOut, - &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(priv)}) - keyOut.Close() - - return certFile, keyFile -} - -func (suite *x509SessionSuite) TestNewX509Session(c *C) { - session, err := newX509Session("subscriptionid", "", "China East", NoRetryPolicy) - c.Assert(err, IsNil) - c.Assert(session.baseURL, NotNil) - c.Check(session.baseURL.String(), Equals, GetEndpoint("China East").ManagementAPI()) -} - -func (suite *x509SessionSuite) TestComposeURLComposesURLWithRelativePath(c *C) { - const subscriptionID = "subscriptionid" - const path = "foo/bar" - session, err := newX509Session(subscriptionID, "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - - url := session.composeURL(path) - - c.Check(url, Matches, defaultManagement+subscriptionID+"/"+path) -} - -func (suite *x509SessionSuite) TestComposeURLRejectsAbsolutePath(c *C) { - defer func() { - err := recover() - c.Assert(err, NotNil) - c.Check(err, ErrorMatches, ".*absolute.*path.*") - }() - session, err := newX509Session("subscriptionid", "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - - // This panics because we're passing an absolute path. - session.composeURL("/foo") -} - -func (suite *x509SessionSuite) TestGetServerErrorProducesServerError(c *C) { - msg := "huhwhat" - status := http.StatusNotFound - session, err := newX509Session("subscriptionid", "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - - err = session.getServerError(status, []byte{}, msg) - c.Assert(err, NotNil) - - c.Check(err, ErrorMatches, ".*"+msg+".*") - serverError := err.(*ServerError) - c.Check(serverError.StatusCode(), Equals, status) -} - -func (suite *x509SessionSuite) TestGetServerErrorLikes20x(c *C) { - goodCodes := []int{ - http.StatusOK, - http.StatusNoContent, - } - session, err := newX509Session("subscriptionid", "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - - for _, status := range goodCodes { - c.Check(session.getServerError(status, []byte{}, ""), IsNil) - } -} - -func (suite *x509SessionSuite) TestGetServerReturnsErrorsForFailures(c *C) { - badCodes := []int{ - http.StatusSwitchingProtocols, - http.StatusBadRequest, - http.StatusPaymentRequired, - http.StatusForbidden, - http.StatusGone, - http.StatusInternalServerError, - http.StatusNotImplemented, - } - session, err := newX509Session("subscriptionid", "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - - for _, status := range badCodes { - c.Check(session.getServerError(status, []byte{}, ""), NotNil) - } -} - -func (suite *x509SessionSuite) TestGetIssuesRequest(c *C) { - subscriptionID := "subscriptionID" - uri := "resource" - session, err := newX509Session(subscriptionID, "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - // Record incoming requests, and have them return a given reply. - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - Body: []byte("Response body"), - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - version := "test-version" - receivedResponse, err := session.get(uri, version) - c.Assert(err, IsNil) - - c.Assert(len(recordedRequests), Equals, 1) - request := recordedRequests[0] - c.Check(request.URL, Equals, defaultManagement+subscriptionID+"/"+uri) - c.Check(request.Method, Equals, "GET") - c.Check(request.APIVersion, Equals, version) - c.Check(*receivedResponse, DeepEquals, fixedResponse) -} - -func (suite *x509SessionSuite) TestGetReportsClientSideError(c *C) { - session, err := newX509Session("subscriptionid", "", "West US", NoRetryPolicy) - msg := "could not dispatch request" - rigFailingDispatcher(fmt.Errorf(msg)) - - body, err := session.get("flop", "version") - c.Assert(err, NotNil) - - c.Check(body, IsNil) - c.Check(err, ErrorMatches, ".*"+msg+".*") -} - -func (suite *x509SessionSuite) TestGetReportsServerSideError(c *C) { - session, err := newX509Session("subscriptionid", "", "West US", NoRetryPolicy) - fixedResponse := x509Response{ - StatusCode: http.StatusForbidden, - Body: []byte("Body"), - } - rigFixedResponseDispatcher(&fixedResponse) - - response, err := session.get("fail", "version") - c.Assert(err, NotNil) - - serverError := err.(*ServerError) - c.Check(serverError.StatusCode(), Equals, fixedResponse.StatusCode) - c.Check(*response, DeepEquals, fixedResponse) -} - -func (suite *x509SessionSuite) TestPostIssuesRequest(c *C) { - subscriptionID := "subscriptionID" - uri := "resource" - version := "test-version" - requestBody := []byte("Request body") - requestContentType := "bogusContentType" - session, err := newX509Session(subscriptionID, "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - // Record incoming requests, and have them return a given reply. - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - Body: []byte("Response body"), - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - receivedResponse, err := session.post(uri, version, requestBody, requestContentType) - c.Assert(err, IsNil) - - c.Assert(len(recordedRequests), Equals, 1) - request := recordedRequests[0] - c.Check(request.URL, Equals, defaultManagement+subscriptionID+"/"+uri) - c.Check(request.Method, Equals, "POST") - c.Check(request.APIVersion, Equals, version) - c.Check(request.ContentType, Equals, requestContentType) - c.Check(request.Payload, DeepEquals, requestBody) - c.Check(*receivedResponse, DeepEquals, fixedResponse) -} - -func (suite *x509SessionSuite) TestPostReportsClientSideError(c *C) { - session, err := newX509Session("subscriptionid", "", "West US", NoRetryPolicy) - msg := "could not dispatch request" - rigFailingDispatcher(fmt.Errorf(msg)) - - body, err := session.post("flop", "version", []byte("body"), "contentType") - c.Assert(err, NotNil) - - c.Check(body, IsNil) - c.Check(err, ErrorMatches, ".*"+msg+".*") -} - -func (suite *x509SessionSuite) TestPostReportsServerSideError(c *C) { - session, err := newX509Session("subscriptionid", "", "West US", NoRetryPolicy) - fixedResponse := x509Response{ - StatusCode: http.StatusForbidden, - Body: []byte("Body"), - } - rigFixedResponseDispatcher(&fixedResponse) - - reponse, err := session.post("fail", "version", []byte("request body"), "contentType") - c.Assert(err, NotNil) - - serverError := err.(*ServerError) - c.Check(serverError.StatusCode(), Equals, fixedResponse.StatusCode) - c.Check(*reponse, DeepEquals, fixedResponse) -} - -func (suite *x509SessionSuite) TestDeleteIssuesRequest(c *C) { - subscriptionID := "subscriptionID" - uri := "resource" - version := "test-version" - session, err := newX509Session(subscriptionID, "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - // Record incoming requests, and have them return a given reply. - fixedResponse := x509Response{StatusCode: http.StatusOK} - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - response, err := session.delete(uri, version) - c.Assert(err, IsNil) - - c.Check(*response, DeepEquals, fixedResponse) - c.Assert(len(recordedRequests), Equals, 1) - request := recordedRequests[0] - c.Check(request.URL, Equals, defaultManagement+subscriptionID+"/"+uri) - c.Check(request.Method, Equals, "DELETE") - c.Check(request.APIVersion, Equals, version) -} - -func (suite *x509SessionSuite) TestPutIssuesRequest(c *C) { - subscriptionID := "subscriptionID" - uri := "resource" - version := "test-version" - requestBody := []byte("Request body") - session, err := newX509Session(subscriptionID, "", "West US", NoRetryPolicy) - c.Assert(err, IsNil) - // Record incoming requests, and have them return a given reply. - fixedResponse := x509Response{ - StatusCode: http.StatusOK, - } - rigFixedResponseDispatcher(&fixedResponse) - recordedRequests := make([]*X509Request, 0) - rigRecordingDispatcher(&recordedRequests) - - _, err = session.put(uri, version, requestBody, "text/plain") - c.Assert(err, IsNil) - - c.Assert(len(recordedRequests), Equals, 1) - request := recordedRequests[0] - c.Check(request.URL, Equals, defaultManagement+subscriptionID+"/"+uri) - c.Check(request.Method, Equals, "PUT") - c.Check(request.APIVersion, Equals, version) - c.Check(request.Payload, DeepEquals, requestBody) -} === removed file 'src/launchpad.net/gwacl/xmlobjects.go' --- src/launchpad.net/gwacl/xmlobjects.go 2015-10-23 18:28:45 +0000 +++ src/launchpad.net/gwacl/xmlobjects.go 1970-01-01 00:00:00 +0000 @@ -1,1194 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "encoding/base64" - "encoding/xml" - "fmt" - "net/url" - "regexp" - "sort" - "strings" - "time" -) - -// It's impossible to have any kind of common method inherited into all the -// various serializable objects because the receiver of the method is the -// wrong class which confuses the xml marshaller. Hence, this mess. -type AzureObject interface { - Serialize() (string, error) -} - -// marshalXML is a wrapper for serializing objects to XML in the visual layout -// that gwacl prefers. -func marshalXML(obj interface{}) ([]byte, error) { - return xml.MarshalIndent(obj, "", " ") -} - -func toxml(obj AzureObject) (string, error) { - out, err := marshalXML(obj) - if err != nil { - return "", err - } - return string(out), nil -} - -// CertificateSetting specifies the parameters for the certificate which to -// provision to the new Virtual Machine. -type CertificateSetting struct { - StoreLocation string - StoreName string - Thumbprint string -} - -// WinRMListener specifies the protocol and certificate information for a WinRM -// listener. -type WinRMListener struct { - Protocol WinRMProtocol - CertificateThumbprint string `xml:",omitempty"` -} - -type WinRMProtocol string - -// Enum values for WinRMProtocol -const ( - WinRMProtocolHTTP WinRMProtocol = "Http" - WinRMProtocolHTTPS WinRMProtocol = "Https" -) - -// -// ConfigurationSet bits -// - -const ( - CONFIG_SET_LINUX_PROVISIONING = "LinuxProvisioningConfiguration" - CONFIG_SET_WINDOWS_PROVISIONING = "WindowsProvisioningConfiguration" - CONFIG_SET_NETWORK = "NetworkConfiguration" -) - -// A ConfigurationSet object can be different things depending on its 'type'. -// The types we currently support are: -// - LinuxProvisioningConfigurationSet: configuration of a Linux VM -// - NetworkConfiguration: configuration of the network of a VM -type ConfigurationSet struct { - ConfigurationSetType string `xml:"ConfigurationSetType"` // "ConfigurationSet" - - // LinuxProvisioningConfiguration fields. - Hostname string `xml:"HostName,omitempty"` - Username string `xml:"UserName,omitempty"` - Password string `xml:"UserPassword,omitempty"` - DisableSSHPasswordAuthentication string `xml:"DisableSshPasswordAuthentication,omitempty"` - - // WindowsProvisioningConfiguration fields. - ComputerName string `xml:"ComputerName,omitempty"` - AdminPassword string `xml:"AdminPassword,omitempty"` - EnableAutomaticUpdates string `xml:"EnableAutomaticUpdates,omitempty"` - TimeZone string `xml:"TimeZone,omitempty"` - StoredCertificateSettings []CertificateSetting `xml:"StoredCertificateSettings,omitempty"` - WinRMListeners *WinRMListener `xml:"WinRM>Listeners>Listener,omitempty"` - AdminUsername string `xml:"AdminUsername,omitempty"` - AdditionalUnattendContent string `xml:"AdditionalUnattendContent,omitempty"` - - CustomData string `xml:"CustomData,omitempty"` - // NetworkConfiguration fields. - // We use slice pointers to work around a Go bug: - // https://code.google.com/p/go/issues/detail?id=4168 - // We need the whole 'InputEndpoints' and 'SubnetNames' element to be omitted - // when no InputEndpoint objects are present (this happens when the - // ConfigurationSet object has a LinuxProvisioningConfiguration type for - // instance). - InputEndpoints *[]InputEndpoint `xml:"InputEndpoints>InputEndpoint,omitempty"` - SubnetNames *[]string `xml:"SubnetNames>SubnetName,omitempty"` - StaticVirtualNetworkIPAddress string `xml:"StaticVirtualNetworkIPAddress,omitempty"` - NetworkSecurityGroup string `xml:"NetworkSecurityGroup,omitempty"` - // TODO(axw) PublicIPs - // TODO(axw) NetworkInterfaces -} - -func (c *ConfigurationSet) inputEndpoints() []InputEndpoint { - return *c.InputEndpoints -} - -func (c *ConfigurationSet) Serialize() (string, error) { - return toxml(c) -} - -// NewLinuxProvisioningConfiguration creates and returns a ConfigurationSet of -// type "LinuxProvisioningConfiguration" which is used when deploying a Linux -// VM instance. Note that CustomData is passed to Azure *as-is* which also -// stores it as passed, so consider base64 encoding it. -func NewLinuxProvisioningConfigurationSet( - Hostname, Username, Password, CustomData string, - DisableSSHPasswordAuthentication string) *ConfigurationSet { - return &ConfigurationSet{ - ConfigurationSetType: CONFIG_SET_LINUX_PROVISIONING, - Hostname: Hostname, - Username: Username, - Password: Password, - CustomData: CustomData, - DisableSSHPasswordAuthentication: DisableSSHPasswordAuthentication, - } -} - -func NewWindowsProvisioningConfigurationSet( - ComputerName, AdminPassword, EnableAutomaticUpdates string, TimeZone string, - StoredCertificateSettings []CertificateSetting, WinRMListeners *WinRMListener, - AdminUsername, AdditionalUnattendContent, CustomData string) *ConfigurationSet { - return &ConfigurationSet{ - ConfigurationSetType: CONFIG_SET_WINDOWS_PROVISIONING, - ComputerName: ComputerName, - AdminPassword: AdminPassword, - EnableAutomaticUpdates: EnableAutomaticUpdates, - TimeZone: TimeZone, - StoredCertificateSettings: StoredCertificateSettings, - WinRMListeners: WinRMListeners, - AdminUsername: AdminUsername, - CustomData: CustomData, - AdditionalUnattendContent: AdditionalUnattendContent, - } -} - -// NewNetworkConfiguration creates a ConfigurationSet of type "NetworkConfiguration". -func NewNetworkConfigurationSet( - inputEndpoints []InputEndpoint, subnetNames []string) *ConfigurationSet { - return &ConfigurationSet{ - ConfigurationSetType: CONFIG_SET_NETWORK, - InputEndpoints: &inputEndpoints, - SubnetNames: &subnetNames, - } -} - -// -// InputEndpoint bits -// - -type LoadBalancerProbe struct { - Path string `xml:"Path,omitempty"` - Port int `xml:"Port"` // Not uint16; see https://bugs.launchpad.net/juju-core/+bug/1201880 - Protocol string `xml:"Protocol"` -} - -type InputEndpoint struct { - LoadBalancedEndpointSetName string `xml:"LoadBalancedEndpointSetName,omitempty"` - LocalPort int `xml:"LocalPort"` // Not uint16; see https://bugs.launchpad.net/juju-core/+bug/1201880 - Name string `xml:"Name"` - Port int `xml:"Port"` // Not uint16; see https://bugs.launchpad.net/juju-core/+bug/1201880 - LoadBalancerProbe *LoadBalancerProbe `xml:"LoadBalancerProbe,omitempty"` - Protocol string `xml:"Protocol"` // TCP or UDP - VIP string `xml:"Vip,omitempty"` -} - -func (c *InputEndpoint) Serialize() (string, error) { - return toxml(c) -} - -// -// Images bits -// - -// Images is a series of OSImages. -type Images struct { - Images []OSImage `xml:"OSImage"` -} - -func (i *Images) Deserialize(data []byte) error { - return xml.Unmarshal(data, i) -} - -var canonicalPublisherName = "Canonical" -var imageFamilyFormatRegexp = "^Ubuntu Server %s.*$" - -func (images *Images) Len() int { - return len(images.Images) -} - -func (images *Images) Swap(i, j int) { - images.Images[i], images.Images[j] = images.Images[j], images.Images[i] -} - -// Less returns true if the image at index i is newer than the one at index j, comparing by -// PublishedDate. -// This function is used by sort.Sort(). -func (images *Images) Less(i, j int) bool { - // We need to implement the sort interface so Less cannot return an error. We panic if - // one of the dates cannot be parse and the calling method will recover this. - dateStringI := images.Images[i].PublishedDate - dateI, err := time.Parse(time.RFC3339, dateStringI) - if err != nil { - panic(fmt.Errorf("Failed to parse image's 'PublishedDate': %s", dateStringI)) - } - dateStringJ := images.Images[j].PublishedDate - dateJ, err := time.Parse(time.RFC3339, dateStringJ) - if err != nil { - panic(fmt.Errorf("Failed to parse image's 'PublishedDate': %s", dateStringJ)) - } - return dateI.After(dateJ) -} - -// GetLatestUbuntuImage returns the most recent released available OSImage, -// for the given release name and location. The 'releaseName' parameter is -// the Ubuntu version number present in the 'ImageFamily' tag present in -// Azure's representation of an OS Image (e.g. '12.04', '12.10'). -func (images *Images) GetLatestUbuntuImage(releaseName string, location string) (image *OSImage, err error) { - // The Less method defined above can panic if one of the published dates cannot be parsed, - // this code recovers from that and transforms that into an error. - defer func() { - if recoveredErr := recover(); recoveredErr != nil { - image = nil - err = recoveredErr.(error) - } - }() - matcherRegexp := regexp.MustCompile(fmt.Sprintf(imageFamilyFormatRegexp, releaseName)) - matchingImages := Images{} - for _, image := range images.Images { - if image.PublisherName == canonicalPublisherName && - matcherRegexp.MatchString(image.ImageFamily) && - image.hasLocation(location) && - !image.isDailyBuild() { - matchingImages.Images = append(matchingImages.Images, image) - } - } - if matchingImages.Len() == 0 { - return nil, fmt.Errorf("No matching images found") - } - sort.Sort(&matchingImages) - return &matchingImages.Images[0], nil -} - -// -// OSImage bits -// - -// OSImage represents a disk image containing an operating system. -// Confusingly, the Azure API documentation also calls it a VM image. -type OSImage struct { - AffinityGroup string `xml:"AffinityGroup,omitempty"` - Category string `xml:"Category"` - Label string `xml:"Label"` - Location string `xml:"Location"` - LogicalSizeInGB float32 `xml:"LogicalSizeInGB"` - MediaLink string `xml:"MediaLink"` - Name string `xml:"Name"` - OS string `xml:"OS"` - EULA string `xml:"Eula,omitempty"` - Description string `xml:"Description,omitempty"` - ImageFamily string `xml:"ImageFamily,omitempty"` - PublishedDate string `xml:"PublishedDate,omitempty"` - IsPremium string `xml:"IsPremium,omitempty"` - PrivacyURI string `xml:"PrivacyUri,omitempty"` - PricingDetailLink string `xml:"PricingDetailLink,omitempty"` - IconURI string `xml:"IconUri,omitempty"` - RecommendedVMSize string `xml:"RecommendedVMSize,omitempty"` - PublisherName string `xml:"PublisherName"` - ShowInGUI string `xml:"ShowInGui"` - SmallIconURI string `xml:"SmallIconUri,omitempty"` - Language string `xml:"Language"` - IOType string `xml:"IOType,omitempty"` -} - -func (image *OSImage) hasLocation(location string) bool { - locations := strings.Split(image.Location, ";") - for _, loc := range locations { - if loc == location { - return true - } - } - return false -} - -// isDailyBuild returns whether this image is a daily build. -func (image *OSImage) isDailyBuild() bool { - return strings.Contains(image.Label, "DAILY") -} - -func (i *OSImage) Deserialize(data []byte) error { - return xml.Unmarshal(data, i) -} - -// -// Location -// - -type Location struct { - Name string `xml:"Name"` - DisplayName string `xml:"DisplayName"` - AvailableServices []string `xml:"AvailableServices>AvailableService,omitempty"` - ComputeCapabilities *ComputeCapabilities `xml:"ComputeCapabilities,omitempty"` -} - -type ComputeCapabilities struct { - // The tag name is plural VirtualMachines, contrary to the online documentation. - VirtualMachineRoleSizes []string `xml:"VirtualMachinesRoleSizes>RoleSize"` - WebWorkerRoleSizes []string `xml:"WebWorkerRoleSizes>RoleSize"` -} - -// -// DataVirtualHardDisk -// - -type DataVirtualHardDisk struct { - XMLNS string `xml:"xmlns,attr,omitempty"` - XMLNS_I string `xml:"xmlns:i,attr,omitempty"` - HostCaching string `xml:"HostCaching,omitempty"` - DiskLabel string `xml:"DiskLabel,omitempty"` - DiskName string `xml:"DiskName,omitempty"` - LUN int `xml:"Lun,omitempty"` - LogicalDiskSizeInGB int `xml:"LogicalDiskSizeInGB,omitempty"` - MediaLink string `xml:"MediaLink,omitempty"` - SourceMediaLink string `xml:"SourceMediaLink,omitempty"` -} - -// -// Disk -// - -type Disk struct { - XMLNS string `xml:"xmlns,attr,omitempty"` - XMLNS_I string `xml:"xmlns:i,attr,omitempty"` - AffinityGroup string `xml:"AffinityGroup,omitempty"` - AttachedTo *DiskAttachment `xml:"AttachedTo,omitempty"` - OS string `xml:"OS"` - Location string `xml:"Location,omitempty"` - LogicalSizeInGB int `xml:"LogicalSizeInGB,omitempty"` - MediaLink string `xml:"MediaLink"` - Label string `xml:"Label,omitempty"` - Name string `xml:"Name"` - SourceImageName string `xml:"SourceImageName,omitempty"` - CreatedTime string `xml:"CreatedTime,omitempty"` - IOType string `xml:"IOType,omitempty"` -} - -type DiskAttachment struct { - HostedServiceName string `xml:"HostedServiceName"` - DeploymentName string `xml:"DeploymentName"` - RoleName string `xml:"RoleName"` -} - -// -// OSVirtualHardDisk bits -// - -type HostCachingType string - -const ( - HostCachingRO HostCachingType = "ReadOnly" - HostCachingRW HostCachingType = "ReadWrite" -) - -type OSVirtualHardDisk struct { - HostCaching string `xml:"HostCaching,omitempty"` - DiskLabel string `xml:"DiskLabel,omitempty"` - DiskName string `xml:"DiskName,omitempty"` - MediaLink string `xml:"MediaLink,omitempty"` - SourceImageName string `xml:"SourceImageName,omitempty"` - OS string `xml:"OS,omitempty"` - RemoteSourceImageLink string `xml:"RemoteSourceImageLink,omitempty"` - ResizedSizeInGB int `xml:"ResizedSizeInGB,omitempty"` -} - -func (c *OSVirtualHardDisk) Serialize() (string, error) { - return toxml(c) -} - -func NewOSVirtualHardDisk( - HostCaching HostCachingType, DiskLabel, DiskName, MediaLink, - SourceImageName, OS string) *OSVirtualHardDisk { - return &OSVirtualHardDisk{ - HostCaching: string(HostCaching), - DiskLabel: DiskLabel, - DiskName: DiskName, - SourceImageName: SourceImageName, - MediaLink: MediaLink, - OS: OS, - } -} - -// CreateVirtualHardDiskMediaLink creates a media link string used to specify -// the location of a physical blob in the given Windows Azure storage account. -// Example: http://example.blob.core.windows.net/disks/mydatadisk.vhd -func CreateVirtualHardDiskMediaLink(StorageName, StoragePath string) string { - pathComponents := strings.Split(StoragePath, "/") - components := append(pathComponents, StorageName) - checkPathComponents(components...) - return fmt.Sprintf("http://%s.blob.core.windows.net/%s", StorageName, StoragePath) -} - -// ResourceExtensionReference contains a collection of resource extensions that -// are to be installed on the Virtual Machine. The VM Agent must be installed on -// the Virtual Machine to install resource extensions. For more information, see -// Manage Extensions: -// -// https://msdn.microsoft.com/en-us/library/dn606311.aspx. -type ResourceExtensionReference struct { - ReferenceName string `xml:"ReferenceName"` - Publisher string `xml:"Publisher"` - Name string `xml:"Name"` - Version string `xml:"Version"` - ParameterValues []ResourceExtensionParameter `xml:"ResourceExtensionParameterValues>ResourceExtensionParameterValue,omitempty"` - State string `xml:"State,omitempty"` -} - -func (r *ResourceExtensionReference) Serialize() (string, error) { - return toxml(r) -} - -func NewResourceExtensionReference( - ReferenceName, Publisher, Name, Version, State string, - ParameterValues []ResourceExtensionParameter, -) *ResourceExtensionReference { - return &ResourceExtensionReference{ - ReferenceName: ReferenceName, - Publisher: Publisher, - Name: Name, - Version: Version, - ParameterValues: ParameterValues, - State: State, - } -} - -// ResourceExtensionParameter specifies the key, value, and type of a parameter that is passed to the -// resource extension when it is installed. -type ResourceExtensionParameter struct { - Key string - Value string - Type ResourceExtensionParameterType // If this value is set to Private, the parameter will not be returned by Get Deployment (). -} - -type ResourceExtensionParameterType string - -// Enum values for ResourceExtensionParameterType -const ( - ResourceExtensionParameterTypePublic ResourceExtensionParameterType = "Public" - ResourceExtensionParameterTypePrivate ResourceExtensionParameterType = "Private" -) - -func (r *ResourceExtensionParameter) Serialize() (string, error) { - return toxml(r) -} - -func NewResourceExtensionParameter( - Key, Value string, Type ResourceExtensionParameterType, -) *ResourceExtensionParameter { - return &ResourceExtensionParameter{ - Key: Key, - Value: Value, - Type: Type, - } -} - -type Role struct { - XMLNS string `xml:"xmlns,attr,omitempty"` - RoleName string `xml:"RoleName"` - RoleType string `xml:"RoleType"` // Always "PersistentVMRole" - VMImage string `xml:"VMImage,omitempty"` - MediaLocation string `xml:"MediaLocation,omitempty"` - ConfigurationSets []ConfigurationSet `xml:"ConfigurationSets>ConfigurationSet"` - // TODO(axw) ResourceExtensionReferences - ResourceExtensionReferences *[]ResourceExtensionReference `xml:"ResourceExtensionReferences>ResourceExtensionReference,omitempty"` - AvailabilitySetName string `xml:"AvailabilitySetName,omitempty"` - DataVirtualHardDisks *[]DataVirtualHardDisk `xml:"DataVirtualHardDisks>DataVirtualHardDisk,omitempty"` - OSVirtualHardDisk *OSVirtualHardDisk `xml:"OSVirtualHardDisk,omitempty"` - RoleSize string `xml:"RoleSize"` - ProvisionGuestAgent string `xml:"ProvisionGuestAgent,omitempty"` - DefaultWinRmCertificateThumbprint string `xml:"DefaultWinRmCertificateThumbprint,omitempty"` - // TODO(axw) VMImageInput -} - -// -// Role bits -// - -func (c *Role) Serialize() (string, error) { - return toxml(c) -} - -func newRole(RoleSize, RoleName string, vhd *OSVirtualHardDisk, ConfigurationSets []ConfigurationSet) *Role { - return &Role{ - RoleSize: RoleSize, - RoleName: RoleName, - RoleType: "PersistentVMRole", - ConfigurationSets: ConfigurationSets, - OSVirtualHardDisk: vhd, - } -} - -func NewLinuxRole(RoleSize, RoleName string, vhd *OSVirtualHardDisk, ConfigurationSets []ConfigurationSet) *Role { - return newRole(RoleSize, RoleName, vhd, ConfigurationSets) -} - -func NewWindowsRole( - RoleSize, RoleName string, vhd *OSVirtualHardDisk, ConfigurationSets []ConfigurationSet, - ResourceExtensionReferences *[]ResourceExtensionReference, ProvisionGuestAgent string, -) *Role { - role := newRole(RoleSize, RoleName, vhd, ConfigurationSets) - role.ResourceExtensionReferences = ResourceExtensionReferences - role.ProvisionGuestAgent = ProvisionGuestAgent - return role -} - -// -// DnsServer bits -// - -type DnsServer struct { - Name string `xml:"Name"` - Address string `xml:"Address"` -} - -func (c *DnsServer) Serialize() (string, error) { - return toxml(c) -} - -// -// Hosted service bits -// - -// HostedService represents a cloud service in Azure. -// See http://msdn.microsoft.com/en-us/library/windowsazure/ee460806.aspx -type HostedService struct { - HostedServiceDescriptor - XMLNS string `xml:"xmlns,attr"` - Deployments []Deployment `xml:"Deployments>Deployment"` -} - -func (c HostedService) Serialize() (string, error) { - return toxml(c) -} - -func (c *HostedService) Deserialize(data []byte) error { - return xml.Unmarshal(data, c) -} - -type HostedServiceDescriptorList struct { - XMLName xml.Name `xml:"HostedServices"` - XMLNS string `xml:"xmlns,attr"` - HostedServices []HostedServiceDescriptor `xml:"HostedService"` -} - -func (c *HostedServiceDescriptorList) Serialize() (string, error) { - return toxml(c) -} - -func (c *HostedServiceDescriptorList) Deserialize(data []byte) error { - return xml.Unmarshal(data, c) -} - -// HostedServiceDescriptor contains a subset of the details in HostedService, -// and is used when describing a list of HostedServices. -// See http://msdn.microsoft.com/en-us/library/windowsazure/ee460781.aspx -type HostedServiceDescriptor struct { - URL string `xml:"Url"` - ServiceName string `xml:"ServiceName"` - Description string `xml:"HostedServiceProperties>Description"` - AffinityGroup string `xml:"HostedServiceProperties>AffinityGroup"` - Location string `xml:"HostedServiceProperties>Location"` - Label string `xml:"HostedServiceProperties>Label"` - Status string `xml:"HostedServiceProperties>Status"` - DateCreated string `xml:"HostedServiceProperties>DateCreated"` - DateLastModified string `xml:"HostedServiceProperties>DateLastModified"` - ExtendedProperties []ExtendedProperty `xml:"HostedServiceProperties>ExtendedProperties>ExtendedProperty"` - ReverseDnsFqdn string `xml:"HostedServiceProperties>ReverseDnsFqdn,omitempty"` - DefaultWinRMCertificateThumbprint string `xml:"DefaultWinRMCertificateThumbprint,omitempty"` - ComputeCapabilities *ComputeCapabilities `xml:"ComputeCapabilities,omitempty"` -} - -func (c HostedServiceDescriptor) Serialize() (string, error) { - return toxml(c) -} - -func (service *HostedServiceDescriptor) GetLabel() (string, error) { - label, err := base64.StdEncoding.DecodeString(service.Label) - if err != nil { - return "", err - } - return string(label), nil -} - -type CreateHostedService struct { - XMLNS string `xml:"xmlns,attr"` - ServiceName string `xml:"ServiceName"` - Label string `xml:"Label"` // base64-encoded - Description string `xml:"Description"` - Location string `xml:"Location,omitempty"` - AffinityGroup string `xml:"AffinityGroup,omitempty"` - ExtendedProperties []ExtendedProperty `xml:"ExtendedProperties>ExtendedProperty"` - ReverseDnsFqdn string `xml:"ReverseDnsFqdn,omitempty"` -} - -func NewCreateHostedServiceWithLocation(serviceName, label, location string) *CreateHostedService { - base64label := base64.StdEncoding.EncodeToString([]byte(label)) - return &CreateHostedService{ - XMLNS: XMLNS, - ServiceName: serviceName, - Label: base64label, - Location: location, - } -} - -func (s *CreateHostedService) Deserialize(data []byte) error { - return xml.Unmarshal(data, s) -} - -// AvailabilityResponse is the reply from a Check Hosted Service Name -// Availability operation. -type AvailabilityResponse struct { - XMLNS string `xml:"xmlns,attr"` - Result string `xml:"Result"` - Reason string `xml:"Reason"` -} - -func (a *AvailabilityResponse) Deserialize(data []byte) error { - return xml.Unmarshal(data, a) -} - -// UpdateHostedService contains the details necessary to call the -// UpdateHostedService management API call. -// See http://msdn.microsoft.com/en-us/library/windowsazure/gg441303.aspx -type UpdateHostedService struct { - XMLNS string `xml:"xmlns,attr"` - Label string `xml:"Label,omitempty"` // base64-encoded - Description string `xml:"Description,omitempty"` - ExtendedProperties []ExtendedProperty `xml:"ExtendedProperties>ExtendedProperty,omitempty"` - ReverseDnsFqdn string `xml:"ReverseDnsFqdn,omitempty"` -} - -func (u *UpdateHostedService) Serialize() (string, error) { - return toxml(u) -} - -func NewUpdateHostedService(label, description string, properties []ExtendedProperty) *UpdateHostedService { - base64label := base64.StdEncoding.EncodeToString([]byte(label)) - return &UpdateHostedService{ - XMLNS: XMLNS, - Label: base64label, - Description: description, - ExtendedProperties: properties, - } -} - -// -// Deployment bits -// - -// Deployment is used both as input for the "Create Virtual Machine Deployment" -// call, and as a return value for "Get Deployment." -type Deployment struct { - XMLNS string `xml:"xmlns,attr"` - XMLNS_I string `xml:"xmlns:i,attr"` - Name string `xml:"Name"` - // DeploymentSlot is either "Production" or "Staging". - DeploymentSlot string `xml:"DeploymentSlot"` - PrivateID string `xml:"PrivateID,omitempty"` // Only used for "Get Deployment." - Status string `xml:"Status,omitempty"` // Only used for "Get Deployment." - Label string `xml:"Label"` - URL string `xml:"Url,omitempty"` // Only used for "Get Deployment." - Configuration string `xml:"Configuration,omitempty"` // Only used for "Get Deployment." - RoleInstanceList []RoleInstance `xml:"RoleInstanceList>RoleInstance"` - // TODO(axw) UpgradeStatus - UpgradeDomainCount string `xml:"UpgradeDomainCount,omitempty"` // Only used for "Get Deployment." - RoleList []Role `xml:"RoleList>Role"` - SDKVersion string `xml:"SdkVersion,omitempty"` // Only used for "Get Deployment." - Locked string `xml:"Locked,omitempty"` // Only used for "Get Deployment." - RollbackAllowed string `xml:"RollbackAllowed,omitempty"` // Only used for "Get Deployment." - CreatedTime string `xml:"CreatedTime,omitempty"` - LastModifiedTime string `xml:"LastModifiedTime,omitempty"` - VirtualNetworkName string `xml:VirtualNetworkName,omitempty"` - DNS []DnsServer `xml:"Dns>DnsServers>DnsServer,omitempty"` - // TODO(axw) LoadBalancers - ExtendedProperties []ExtendedProperty `xml:"ExtendedProperties>ExtendedProperty,omitempty"` // Only used for "Get Deployment." - // TODO(axw) PersistentVMDowntime - // TODO(axw) VirtualIPs - // TODO(axw) ExtensionConfiguration - ReservedIPName string `xml:"ReservedIPName,omitempty"` - InternalDnsSuffix string `xml:"InternalDnsSuffix,omitempty"` -} - -func (deployment *Deployment) GetFQDN() (string, error) { - if deployment.URL == "" { - return "", fmt.Errorf("Deployment's URL field is empty") - } - parsedURL, err := url.Parse(deployment.URL) - if err != nil { - return "", err - } - return parsedURL.Host, nil -} - -func (s *Deployment) Deserialize(data []byte) error { - return xml.Unmarshal(data, s) -} - -func (c *Deployment) Serialize() (string, error) { - return toxml(c) -} - -// RoleInstance is a component of a Deployment. -type RoleInstance struct { - RoleName string `xml:"RoleName"` - InstanceName string `xml:"InstanceName"` - InstanceStatus string `xml:"InstanceStatus"` - InstanceUpgradeDomain string `xml:"InstanceUpgradeDomain"` - InstanceFaultDomain string `xml:"InstanceFaultDomain"` - InstanceSize string `xml:"InstanceSize"` - InstanceStateDetails string `xml:"InstanceStateDetails"` - InstanceErrorCode string `xml:"InstanceErrorCode"` - IPAddress string `xml:"IpAddress"` - InstanceEndpoints []InstanceEndpoint `xml:"InstanceEndpoints>InstanceEndpoint"` - PowerState string `xml:"PowerState"` - HostName string `xml:"HostName"` - RemoteAccessCertificateThumbprint string `xml:"RemoteAccessCertificateThumbprint"` -} - -// InstanceEndpoint is a component of a RoleInstance. -type InstanceEndpoint struct { - Name string `xml:"Name"` - VIP string `xml:"Vip"` - PublicPort int `xml:"PublicPort"` // Not uint16; see https://bugs.launchpad.net/juju-core/+bug/1201880 - LocalPort int `xml:"LocalPort"` // Not uint16; see https://bugs.launchpad.net/juju-core/+bug/1201880 - Protocol string `xml:"Protocol"` -} - -// newDeploymentForCreateVMDeployment creates a Deployment object for the -// purpose of passing it to "Create Virtual Machine Deployment." -// You may still want to set the optional DNS attribute. -func NewDeploymentForCreateVMDeployment(name, deploymentSlot, label string, roles []Role, virtualNetworkName string) *Deployment { - deployment := Deployment{ - XMLNS: XMLNS, - XMLNS_I: XMLNS_I, - Name: name, - DeploymentSlot: deploymentSlot, - Label: base64.StdEncoding.EncodeToString([]byte(label)), - RoleList: roles, - VirtualNetworkName: virtualNetworkName, - } - return &deployment -} - -const XMLNS = "http://schemas.microsoft.com/windowsazure" -const XMLNS_I = "http://www.w3.org/2001/XMLSchema-instance" -const XMLNS_NC = "http://schemas.microsoft.com/ServiceHosting/2011/07/NetworkConfiguration" - -// -// Role Operations bits -// - -type RoleOperation struct { - XMLName xml.Name - XMLNS string `xml:"xmlns,attr"` - XMLNS_I string `xml:"xmlns:i,attr"` - OperationType string `xml:"OperationType"` -} - -func newRoleOperation(operationType string) *RoleOperation { - operation := RoleOperation{ - XMLNS: XMLNS, - XMLNS_I: XMLNS_I, - OperationType: operationType, - } - operation.XMLName.Local = operationType - return &operation -} - -// The Start Role operation starts a virtual machine. -// http://msdn.microsoft.com/en-us/library/windowsazure/jj157189.aspx -var startRoleOperation = newRoleOperation("StartRoleOperation") - -// The Shutdown Role operation shuts down a virtual machine. -// http://msdn.microsoft.com/en-us/library/windowsazure/jj157195.aspx -var shutdownRoleOperation = newRoleOperation("ShutdownRoleOperation") - -// The Restart role operation restarts a virtual machine. -// http://msdn.microsoft.com/en-us/library/windowsazure/jj157197.aspx -var restartRoleOperation = newRoleOperation("RestartRoleOperation") - -// -// PersistentVMRole, as used by GetRole, UpdateRole, etc. -// -type PersistentVMRole Role - -func (role *PersistentVMRole) Deserialize(data []byte) error { - return xml.Unmarshal(data, role) -} - -func (role *PersistentVMRole) Serialize() (string, error) { - if role.XMLNS == "" { - clone := *role - clone.XMLNS = XMLNS - role = &clone - } - return toxml(role) -} - -// -// Virtual Networks -// - -type VirtualNetDnsServer struct { - XMLName string `xml:"DnsServer"` - Name string `xml:"name,attr"` - IPAddress string `xml:"IPAddress,attr"` -} - -type LocalNetworkSite struct { - XMLName string `xml:"LocalNetworkSite"` - Name string `xml:"name,attr"` - AddressSpacePrefixes []string `xml:"AddressSpace>AddressPrefix"` - VPNGatewayAddress string `xml:"VPNGatewayAddress"` -} - -type Subnet struct { - XMLName string `xml:"Subnet"` - Name string `xml:"name,attr"` - AddressPrefix string `xml:"AddressPrefix"` -} - -type LocalNetworkSiteRefConnection struct { - XMLName string `xml:"Connection"` - Type string `xml:"type,attr"` -} - -type LocalNetworkSiteRef struct { - XMLName string `xml:"LocalNetworkSiteRef"` - Name string `xml:"name,attr"` - Connection LocalNetworkSiteRefConnection `xml:"Connection"` -} - -type Gateway struct { - XMLName string `xml:"Gateway"` - Profile string `xml:"profile,attr"` - VPNClientAddressPoolPrefixes []string `xml:"VPNClientAddressPool>AddressPrefix"` - LocalNetworkSiteRef LocalNetworkSiteRef `xml:"ConnectionsToLocalNetwork>LocalNetworkSiteRef"` -} - -type DnsServerRef struct { - XMLName string `xml:"DnsServerRef"` - Name string `xml:"name,attr"` -} - -type VirtualNetworkSite struct { - Name string `xml:"name,attr"` - AffinityGroup string `xml:"AffinityGroup,attr,omitempty"` - Location string `xml:"Location,attr,omitempty"` - AddressSpacePrefixes []string `xml:"AddressSpace>AddressPrefix"` - Subnets *[]Subnet `xml:"Subnets>Subnet,omitempty"` - DnsServersRef *[]DnsServerRef `xml:"DnsServersRef>DnsServerRef,omitempty"` - Gateway *Gateway `xml:"Gateway,omitempty"` -} - -type NetworkConfiguration struct { - XMLNS string `xml:"xmlns,attr"` - DNS *[]VirtualNetDnsServer `xml:"VirtualNetworkConfiguration>Dns>DnsServers>DnsServer,omitempty"` - LocalNetworkSites *[]LocalNetworkSite `xml:"VirtualNetworkConfiguration>LocalNetworkSites>LocalNetworkSite,omitempty"` - VirtualNetworkSites *[]VirtualNetworkSite `xml:"VirtualNetworkConfiguration>VirtualNetworkSites>VirtualNetworkSite,omitempty"` -} - -func (nc *NetworkConfiguration) Serialize() (string, error) { - return toxml(nc) -} - -func (nc *NetworkConfiguration) Deserialize(data []byte) error { - return xml.Unmarshal(data, nc) -} - -// -// Affinity Group -// - -// See http://msdn.microsoft.com/en-us/library/windowsazure/gg715317.aspx -type CreateAffinityGroup struct { - XMLNS string `xml:"xmlns,attr"` - Name string `xml:"Name"` - Label string `xml:"Label"` // Must be base64 encoded. - Description string `xml:"Description,omitempty"` - Location string `xml:"Location"` // Value comes from ListLocations. -} - -func (c *CreateAffinityGroup) Serialize() (string, error) { - return toxml(c) -} - -func NewCreateAffinityGroup(name, label, description, location string) *CreateAffinityGroup { - base64label := base64.StdEncoding.EncodeToString([]byte(label)) - return &CreateAffinityGroup{ - XMLNS: XMLNS, - Name: name, - Label: base64label, - Description: description, - Location: location, - } -} - -// See http://msdn.microsoft.com/en-us/library/windowsazure/gg715316.aspx -type UpdateAffinityGroup struct { - XMLNS string `xml:"xmlns,attr"` - Label string `xml:"Label"` // Must be base64 encoded. - Description string `xml:"Description,omitempty"` -} - -func (u *UpdateAffinityGroup) Serialize() (string, error) { - return toxml(u) -} - -func NewUpdateAffinityGroup(label, description string) *UpdateAffinityGroup { - base64label := base64.StdEncoding.EncodeToString([]byte(label)) - return &UpdateAffinityGroup{ - XMLNS: XMLNS, - Label: base64label, - Description: description, - } -} - -// -// Storage Services bits -// - -type ExtendedProperty struct { - Name string `xml:"Name"` - Value string `xml:"Value"` -} - -type StorageService struct { - // List Storage Accounts. - // See http://msdn.microsoft.com/en-us/library/windowsazure/ee460787.aspx - URL string `xml:"Url"` - ServiceName string `xml:"ServiceName"` - Description string `xml:"StorageServiceProperties>Description"` - AffinityGroup string `xml:"StorageServiceProperties>AffinityGroup"` - Label string `xml:"StorageServiceProperties>Label"` // base64 - Status string `xml:"StorageServiceProperties>Status"` - Endpoints []string `xml:"StorageServiceProperties>Endpoints>Endpoint"` - GeoReplicationEnabled string `xml:"StorageServiceProperties>GeoReplicationEnabled"` - GeoPrimaryRegion string `xml:"StorageServiceProperties>GeoPrimaryRegion"` - StatusOfPrimary string `xml:"StorageServiceProperties>StatusOfPrimary"` - LastGeoFailoverTime string `xml:"StorageServiceProperties>LastGeoFailoverTime"` - GeoSecondaryRegion string `xml:"StorageServiceProperties>GeoSecondaryRegion"` - StatusOfSecondary string `xml:"StorageServiceProperties>StatusOfSecondary"` - ExtendedProperties []ExtendedProperty `xml:"StorageServiceProperties>ExtendedProperties>ExtendedProperty"` - - // TODO: Add accessors for non-string data encoded as strings. -} - -type StorageServices struct { - XMLNS string `xml:"xmlns,attr"` - StorageServices []StorageService `xml:"StorageService"` -} - -func (s *StorageServices) Deserialize(data []byte) error { - return xml.Unmarshal(data, s) -} - -// CreateStorageServiceInput is a request to create a storage account. -// (Azure's "storage services" seem to have been renamed to "storage accounts" -// but the old terminology is still evident in the API). -type CreateStorageServiceInput struct { - // See http://msdn.microsoft.com/en-us/library/windowsazure/hh264518.aspx - XMLNS string `xml:"xmlns,attr"` - ServiceName string `xml:"ServiceName"` - Label string `xml:"Label"` - Description string `xml:"Description,omitempty"` - Location string `xml:"Location"` - AffinityGroup string `xml:"AffinityGroup,omitempty"` - GeoReplicationEnabled string `xml:"GeoReplicationEnabled,omitempty"` - ExtendedProperties []ExtendedProperty `xml:"ExtendedProperties>ExtendedProperty"` - AccountType string `xml:"AccountType,omitempty"` -} - -func (c *CreateStorageServiceInput) Serialize() (string, error) { - return toxml(c) -} - -// NewCreateStorageServiceInputWithLocation creates a location-based -// CreateStorageServiceInput, with all required fields filled out. -func NewCreateStorageServiceInputWithLocation(name, label, location string, geoReplicationEnabled string) *CreateStorageServiceInput { - return &CreateStorageServiceInput{ - XMLNS: XMLNS, - ServiceName: name, - Label: base64.StdEncoding.EncodeToString([]byte(label)), - Location: location, - GeoReplicationEnabled: geoReplicationEnabled, - } -} - -type MetadataItem struct { - XMLName xml.Name - Value string `xml:",chardata"` -} - -func (item *MetadataItem) Name() string { - return item.XMLName.Local -} - -type Metadata struct { - Items []MetadataItem `xml:",any"` -} - -type Blob struct { - Name string `xml:"Name"` - Snapshot string `xml:"Snapshot"` - LastModified string `xml:"Properties>Last-Modified"` - ETag string `xml:"Properties>Etag"` - ContentLength string `xml:"Properties>Content-Length"` - ContentType string `xml:"Properties>Content-Type"` - BlobSequenceNumber string `xml:"Properties>x-ms-blob-sequence-number"` - BlobType string `xml:"Properties>BlobType"` - LeaseStatus string `xml:"Properties>LeaseStatus"` - LeaseState string `xml:"Properties>LeaseState"` - LeaseDuration string `xml:"Properties>LeaseDuration"` - CopyID string `xml:"Properties>CopyId"` - CopyStatus string `xml:"Properties>CopyStatus"` - CopySource string `xml:"Properties>CopySource"` - CopyProgress string `xml:"Properties>CopyProgress"` - CopyCompletionTime string `xml:"Properties>CopyCompletionTime"` - CopyStatusDescription string `xml:"Properties>CopyStatusDescription"` - Metadata Metadata `xml:"Metadata"` -} - -type BlobEnumerationResults struct { - // http://msdn.microsoft.com/en-us/library/windowsazure/dd135734.aspx - XMLName xml.Name `xml:"EnumerationResults"` - ContainerName string `xml:"ContainerName,attr"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - MaxResults string `xml:"MaxResults"` - Delimiter string `xml:"Delimiter"` - Blobs []Blob `xml:"Blobs>Blob"` - BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` - NextMarker string `xml:"NextMarker"` -} - -func (b *BlobEnumerationResults) Deserialize(data []byte) error { - return xml.Unmarshal(data, b) -} - -type StorageAccountKeys struct { - // See http://msdn.microsoft.com/en-us/library/windowsazure/ee460785.aspx - XMLName xml.Name `xml:"StorageService"` - URL string `xml:"Url"` - Primary string `xml:"StorageServiceKeys>Primary"` - Secondary string `xml:"StorageServiceKeys>Secondary"` -} - -func (s *StorageAccountKeys) Deserialize(data []byte) error { - return xml.Unmarshal(data, s) -} - -type ContainerEnumerationResults struct { - // See http://msdn.microsoft.com/en-us/library/windowsazure/dd179352.aspx - XMLName xml.Name `xml:"EnumerationResults"` - Prefix string `xml:"Prefix"` - Marker string `xml:"Marker"` - MaxResults string `xml:"MaxResults"` - Containers []Container `xml:"Containers>Container"` - NextMarker string `xml:"NextMarker"` -} - -func (s *ContainerEnumerationResults) Deserialize(data []byte) error { - return xml.Unmarshal(data, s) -} - -type Container struct { - XMLName xml.Name `xml:"Container"` - Name string `xml:"Name"` - Properties Properties `xml:"Properties"` - Metadata Metadata `xml:"Metadata"` -} - -type Properties struct { - LastModified string `xml:"Last-Modified"` - ETag string `xml:"Etag"` - LeaseStatus string `xml:"LeaseStatus"` - LeaseState string `xml:"LeaseState"` - LeaseDuration string `xml:"LeaseDuration"` -} - -// An enumeration-lite type to define from which list (committed, uncommitted, -// latest) to get a block during a PutBlockList Storage API operation. -type BlockListType string - -const ( - BlockListUncommitted BlockListType = "Uncommitted" - BlockListCommitted BlockListType = "Committed" - BlockListLatest BlockListType = "Latest" -) - -// Payload for the PutBlockList operation. -type BlockList struct { - XMLName xml.Name `xml:"BlockList"` - Items []*BlockListItem -} - -func (s *BlockList) Serialize() ([]byte, error) { - return marshalXML(s) -} - -// Add a BlockListItem to a BlockList. -func (s *BlockList) Add(blockType BlockListType, blockID string) { - base64ID := base64.StdEncoding.EncodeToString([]byte(blockID)) - item := NewBlockListItem(blockType, base64ID) - s.Items = append(s.Items, item) -} - -type BlockListItem struct { - XMLName xml.Name - BlockID string `xml:",chardata"` -} - -// Create a new BlockListItem. -func NewBlockListItem(blockType BlockListType, blockID string) *BlockListItem { - return &BlockListItem{ - XMLName: xml.Name{Local: string(blockType)}, - BlockID: blockID, - } -} - -func (item *BlockListItem) Type() BlockListType { - switch BlockListType(item.XMLName.Local) { - case BlockListUncommitted: - return BlockListUncommitted - case BlockListCommitted: - return BlockListCommitted - case BlockListLatest: - return BlockListLatest - } - panic(fmt.Errorf("type not recognized: %s", item.XMLName.Local)) -} - -// GetBlockList result struct. -type Block struct { - Name string `xml:"Name"` - Size string `xml:"Size"` -} - -type GetBlockList struct { - XMLName xml.Name `xml:"BlockList"` - CommittedBlocks []Block `xml:"CommittedBlocks>Block"` - UncommittedBlocks []Block `xml:"UncommittedBlocks>Block"` -} - -func (g *GetBlockList) Deserialize(data []byte) error { - return xml.Unmarshal(data, g) -} - -// -// Operation Services bits -// - -const ( - InProgressOperationStatus = "InProgress" - SucceededOperationStatus = "Succeeded" - FailedOperationStatus = "Failed" -) - -type Operation struct { - ID string `xml:"ID"` - Status string `xml:"Status"` - HTTPStatusCode int `xml:"HttpStatusCode"` - ErrorCode string `xml:"Error>Code"` - ErrorMessage string `xml:"Error>Message"` -} - -func (o *Operation) Deserialize(data []byte) error { - return xml.Unmarshal(data, o) -} === removed file 'src/launchpad.net/gwacl/xmlobjects_test.go' --- src/launchpad.net/gwacl/xmlobjects_test.go 2015-10-23 18:28:45 +0000 +++ src/launchpad.net/gwacl/xmlobjects_test.go 1970-01-01 00:00:00 +0000 @@ -1,2318 +0,0 @@ -// Copyright 2013 Canonical Ltd. This software is licensed under the -// GNU Lesser General Public License version 3 (see the file COPYING). - -package gwacl - -import ( - "encoding/base64" - "encoding/xml" - "fmt" - "sort" - "strings" - - . "launchpad.net/gocheck" - "launchpad.net/gwacl/dedent" -) - -type xmlSuite struct{} - -var _ = Suite(&xmlSuite{}) - -// -// Tests for Marshallers -// - -func (suite *xmlSuite) TestLinuxConfigurationSet(c *C) { - config := makeLinuxProvisioningConfiguration() - - xml, err := config.Serialize() - c.Assert(err, IsNil) - template := dedent.Dedent(` - - LinuxProvisioningConfiguration - %s - %s - %s - %v - %s - `) - expected := fmt.Sprintf(template, config.Hostname, config.Username, - config.Password, config.DisableSSHPasswordAuthentication, - config.CustomData) - c.Check(strings.TrimSpace(xml), Equals, strings.TrimSpace(expected)) -} - -func (suite *xmlSuite) TestWindowsConfigurationSet(c *C) { - config := makeWindowsProvisioningConfiguration() - - xml, err := config.Serialize() - c.Assert(err, IsNil) - template := dedent.Dedent(` - - WindowsProvisioningConfiguration - %s - %s - %s - %v - - %s - %s - %s - - - - - %s - %s - - - - %s - %s - %s - `) - expected := fmt.Sprintf(template, config.ComputerName, config.AdminPassword, - config.EnableAutomaticUpdates, config.TimeZone, - config.StoredCertificateSettings[0].StoreLocation, - config.StoredCertificateSettings[0].StoreName, - config.StoredCertificateSettings[0].Thumbprint, - config.WinRMListeners.Protocol, config.WinRMListeners.CertificateThumbprint, - config.AdminUsername, config.AdditionalUnattendContent, - config.CustomData) - c.Check(strings.TrimSpace(xml), Equals, strings.TrimSpace(expected)) -} - -func (suite *xmlSuite) TestInputEndpoint(c *C) { - endpoint := populateEndpoint(&InputEndpoint{}) - - xml, err := endpoint.Serialize() - c.Assert(err, IsNil) - template := dedent.Dedent(` - - %s - %v - %s - %v - - %s - %d - %s - - %s - %s - `) - expected := fmt.Sprintf(template, endpoint.LoadBalancedEndpointSetName, - endpoint.LocalPort, endpoint.Name, endpoint.Port, - endpoint.LoadBalancerProbe.Path, endpoint.LoadBalancerProbe.Port, - endpoint.LoadBalancerProbe.Protocol, endpoint.Protocol, endpoint.VIP) - c.Check(strings.TrimSpace(xml), Equals, strings.TrimSpace(expected)) -} - -func (suite *xmlSuite) TestOSVirtualHardDisk(c *C) { - disk := makeOSVirtualHardDisk() - - xml, err := disk.Serialize() - c.Assert(err, IsNil) - template := dedent.Dedent(` - - %s - %s - %s - %s - %s - `) - expected := fmt.Sprintf(template, disk.HostCaching, disk.DiskLabel, - disk.DiskName, disk.MediaLink, disk.SourceImageName) - c.Check(strings.TrimSpace(xml), Equals, strings.TrimSpace(expected)) -} - -func (suite *xmlSuite) TestConfigurationSetNetworkConfiguration(c *C) { - endpoint1 := populateEndpoint(&InputEndpoint{}) - endpoint2 := populateEndpoint(&InputEndpoint{}) - endpoints := []InputEndpoint{*endpoint1, *endpoint2} - subnet1 := MakeRandomString(10) - subnet2 := MakeRandomString(10) - config := NewNetworkConfigurationSet(endpoints, []string{subnet1, subnet2}) - xml, err := config.Serialize() - c.Assert(err, IsNil) - template := dedent.Dedent(` - - NetworkConfiguration - - - %s - %v - %s - %v - - %s - %d - %s - - %s - %s - - - %s - %v - %s - %v - - %s - %d - %s - - %s - %s - - - - %s - %s - - `) - expected := fmt.Sprintf(template, - endpoint1.LoadBalancedEndpointSetName, endpoint1.LocalPort, - endpoint1.Name, endpoint1.Port, endpoint1.LoadBalancerProbe.Path, - endpoint1.LoadBalancerProbe.Port, - endpoint1.LoadBalancerProbe.Protocol, endpoint1.Protocol, - endpoint1.VIP, - endpoint2.LoadBalancedEndpointSetName, endpoint2.LocalPort, - endpoint2.Name, endpoint2.Port, endpoint2.LoadBalancerProbe.Path, - endpoint2.LoadBalancerProbe.Port, - endpoint2.LoadBalancerProbe.Protocol, endpoint2.Protocol, - endpoint2.VIP, - subnet1, subnet2) - c.Check(strings.TrimSpace(xml), Equals, strings.TrimSpace(expected)) -} - -func (suite *xmlSuite) TestLinuxRole(c *C) { - role := makeLinuxRole() - config := role.ConfigurationSets[0] - - xml, err := role.Serialize() - c.Assert(err, IsNil) - template := dedent.Dedent(` - - %s - PersistentVMRole - - - %s - %s - %s - %s - %v - %s - - - %s - `) - expected := fmt.Sprintf(template, role.RoleName, - config.ConfigurationSetType, config.Hostname, config.Username, - config.Password, config.DisableSSHPasswordAuthentication, - config.CustomData, role.RoleSize) - c.Check(strings.TrimSpace(xml), Equals, strings.TrimSpace(expected)) -} - -func (suite *xmlSuite) TestWindowsRole(c *C) { - role := makeWindowsRole() - config := role.ConfigurationSets[0] - resources := role.ResourceExtensionReferences - resource := (*resources)[0] - - xml, err := role.Serialize() - c.Assert(err, IsNil) - template := dedent.Dedent(` - - %s - PersistentVMRole - - - WindowsProvisioningConfiguration - %s - %s - %s - %v - - %s - %s - %s - - - - - %s - %s - - - - %s - %s - %s - - - - - %s - %s - %s - %s - - - %s - %s - %s - - - %s - %s - %s - - - %s - - - %s - %s - `) - expected := fmt.Sprintf(template, role.RoleName, - config.ComputerName, config.AdminPassword, - config.EnableAutomaticUpdates, config.TimeZone, - config.StoredCertificateSettings[0].StoreLocation, - config.StoredCertificateSettings[0].StoreName, - config.StoredCertificateSettings[0].Thumbprint, - config.WinRMListeners.Protocol, config.WinRMListeners.CertificateThumbprint, - config.AdminUsername, config.AdditionalUnattendContent, - config.CustomData, - resource.ReferenceName, resource.Publisher, resource.Name, - resource.Version, - resource.ParameterValues[0].Key, - resource.ParameterValues[0].Value, - resource.ParameterValues[0].Type, - resource.ParameterValues[1].Key, - resource.ParameterValues[1].Value, - resource.ParameterValues[1].Type, - resource.State, - role.RoleSize, - role.ProvisionGuestAgent, - ) - c.Check(strings.TrimSpace(xml), Equals, strings.TrimSpace(expected)) -} - -func makePersistentVMRole(rolename string) string { - // This template is from - // http://msdn.microsoft.com/en-us/library/windowsazure/jj157193.aspx - template := dedent.Dedent(` - - %s - PersistentVMRole - - - NetworkConfiguration - - - name-of-load-balanced-endpoint-set - 1 - name-of-input-endpoint - 1 - - path-of-probe - 1234 - protocol-of-input-endpoint-1 - - TCP|UDP - virtual-ip-address-of-input-endpoint-1 - - - name-of-load-balanced-endpoint-set - 2 - name-of-input-endpoint - 2 - - path-of-probe - 5678 - protocol-of-input-endpoint-2 - - TCP|UDP - virtual-ip-address-of-input-endpoint-2 - - - - name-of-subnet - - - - name-of-availability-set - - - host-caching-mode-of-data-disk - new-or-existing-disk-name - 31 - 42 - path-to-vhd - - - - host-caching-mode-of-os-disk - name-of-os-disk - path-to-vhd - image-used-to-create-os-disk - operating-system-on-os-disk - - size-of-instance - winrm-cert-thumbprint - `) - return fmt.Sprintf(template, rolename) -} - -func (suite *xmlSuite) TestPersistentVMRoleDeserialize(c *C) { - expected := &PersistentVMRole{ - XMLNS: XMLNS, - RoleName: "name-of-the-vm", - RoleType: "PersistentVMRole", - ConfigurationSets: []ConfigurationSet{ - { - ConfigurationSetType: CONFIG_SET_NETWORK, - InputEndpoints: &[]InputEndpoint{ - { - LoadBalancedEndpointSetName: "name-of-load-balanced-endpoint-set", - LocalPort: 1, - Name: "name-of-input-endpoint", - Port: 1, - LoadBalancerProbe: &LoadBalancerProbe{ - Path: "path-of-probe", - Port: 1234, - Protocol: "protocol-of-input-endpoint-1", - }, - Protocol: "TCP|UDP", - VIP: "virtual-ip-address-of-input-endpoint-1", - }, - { - LoadBalancedEndpointSetName: "name-of-load-balanced-endpoint-set", - LocalPort: 2, - Name: "name-of-input-endpoint", - Port: 2, - LoadBalancerProbe: &LoadBalancerProbe{ - Path: "path-of-probe", - Port: 5678, - Protocol: "protocol-of-input-endpoint-2", - }, - Protocol: "TCP|UDP", - VIP: "virtual-ip-address-of-input-endpoint-2", - }, - }, - SubnetNames: &[]string{"name-of-subnet"}, - }, - }, - AvailabilitySetName: "name-of-availability-set", - DataVirtualHardDisks: &[]DataVirtualHardDisk{ - { - HostCaching: "host-caching-mode-of-data-disk", - DiskName: "new-or-existing-disk-name", - LUN: 31, - LogicalDiskSizeInGB: 42, - MediaLink: "path-to-vhd", - }, - }, - OSVirtualHardDisk: &OSVirtualHardDisk{ - HostCaching: "host-caching-mode-of-os-disk", - DiskName: "name-of-os-disk", - MediaLink: "path-to-vhd", - SourceImageName: "image-used-to-create-os-disk", - OS: "operating-system-on-os-disk", - }, - RoleSize: "size-of-instance", - DefaultWinRmCertificateThumbprint: "winrm-cert-thumbprint", - } - - template := makePersistentVMRole("name-of-the-vm") - - observed := &PersistentVMRole{} - err := observed.Deserialize([]byte(template)) - c.Assert(err, IsNil) - c.Assert(observed, DeepEquals, expected) -} - -func (suite *xmlSuite) TestPersistentVMRoleSerialize(c *C) { - role := &PersistentVMRole{ - XMLNS: XMLNS, - RoleName: "name-of-the-vm", - RoleType: "PersistentVMRole", - ConfigurationSets: []ConfigurationSet{ - { - ConfigurationSetType: CONFIG_SET_NETWORK, - InputEndpoints: &[]InputEndpoint{ - { - LoadBalancedEndpointSetName: "name-of-load-balanced-endpoint-set", - LocalPort: 1, - Name: "name-of-input-endpoint", - Port: 1, - LoadBalancerProbe: &LoadBalancerProbe{ - Path: "path-of-probe", - Port: 1234, - Protocol: "protocol-of-input-endpoint-1", - }, - Protocol: "TCP|UDP", - VIP: "virtual-ip-address-of-input-endpoint-1", - }, - { - LoadBalancedEndpointSetName: "name-of-load-balanced-endpoint-set", - LocalPort: 2, - Name: "name-of-input-endpoint", - Port: 2, - LoadBalancerProbe: &LoadBalancerProbe{ - Path: "path-of-probe", - Port: 5678, - Protocol: "protocol-of-input-endpoint-2", - }, - Protocol: "TCP|UDP", - VIP: "virtual-ip-address-of-input-endpoint-2", - }, - }, - SubnetNames: &[]string{"name-of-subnet"}, - }, - }, - AvailabilitySetName: "name-of-availability-set", - DataVirtualHardDisks: &[]DataVirtualHardDisk{ - { - HostCaching: "host-caching-mode-of-data-disk", - DiskName: "new-or-existing-disk-name", - LUN: 31, - LogicalDiskSizeInGB: 42, - MediaLink: "path-to-vhd", - }, - }, - OSVirtualHardDisk: &OSVirtualHardDisk{ - HostCaching: "host-caching-mode-of-os-disk", - DiskName: "name-of-os-disk", - MediaLink: "path-to-vhd", - SourceImageName: "image-used-to-create-os-disk", - OS: "operating-system-on-os-disk", - }, - RoleSize: "size-of-instance", - DefaultWinRmCertificateThumbprint: "winrm-cert-thumbprint", - } - expected := makePersistentVMRole("name-of-the-vm") - - observed, err := role.Serialize() - - c.Assert(err, IsNil) - c.Assert(strings.TrimSpace(observed), DeepEquals, strings.TrimSpace(expected)) -} - -func (suite *xmlSuite) TestNetworkConfigurationSerialize(c *C) { - // Template from - // http://msdn.microsoft.com/en-us/library/windowsazure/jj157181.aspx - expected := dedent.Dedent(` - - - - - - - - - - - CIDR-identifier - - IPV4-address-of-the-vpn-gateway - - - - - - CIDR-identifier - - - - CIDR-identifier - - - - - - - - CIDR-identifier - - - - - - - - - - - `) - - input := NetworkConfiguration{ - XMLNS: XMLNS_NC, - DNS: &[]VirtualNetDnsServer{ - { - Name: "dns-server-name", - IPAddress: "IPV4-address-of-the-server", - }, - }, - LocalNetworkSites: &[]LocalNetworkSite{ - { - Name: "local-site-name", - AddressSpacePrefixes: []string{ - "CIDR-identifier", - }, - VPNGatewayAddress: "IPV4-address-of-the-vpn-gateway", - }, - }, - VirtualNetworkSites: &[]VirtualNetworkSite{ - { - Name: "virtual-network-name", - AffinityGroup: "affinity-group-name", - AddressSpacePrefixes: []string{ - "CIDR-identifier", - }, - Subnets: &[]Subnet{ - { - Name: "subnet-name", - AddressPrefix: "CIDR-identifier", - }, - }, - DnsServersRef: &[]DnsServerRef{ - { - Name: "primary-DNS-name", - }, - }, - Gateway: &Gateway{ - Profile: "Small", - VPNClientAddressPoolPrefixes: []string{ - "CIDR-identifier", - }, - LocalNetworkSiteRef: LocalNetworkSiteRef{ - Name: "local-site-name", - Connection: LocalNetworkSiteRefConnection{ - Type: "connection-type", - }, - }, - }, - }, - }, - } - - observed, err := input.Serialize() - c.Assert(err, IsNil) - c.Assert(strings.TrimSpace(observed), Equals, strings.TrimSpace(expected)) -} - -func (suite *xmlSuite) TestNetworkConfigurationSerializeMinimal(c *C) { - expected := fmt.Sprintf( - "", - XMLNS_NC) - input := NetworkConfiguration{XMLNS: XMLNS_NC} - observed, err := input.Serialize() - c.Assert(err, IsNil) - c.Assert(strings.TrimSpace(observed), Equals, strings.TrimSpace(expected)) -} - -func (suite *xmlSuite) TestNetworkConfigurationSerializeSimpleVirtualNetworkSite(c *C) { - expected := dedent.Dedent(` - - - - - - CIDR-identifier - - - - - `) - input := NetworkConfiguration{ - XMLNS: XMLNS_NC, - VirtualNetworkSites: &[]VirtualNetworkSite{ - { - Name: "virtual-network-name", - AffinityGroup: "affinity-group-name", - AddressSpacePrefixes: []string{ - "CIDR-identifier", - }, - }, - }, - } - observed, err := input.Serialize() - c.Assert(err, IsNil) - c.Assert(strings.TrimSpace(observed), Equals, strings.TrimSpace(expected)) -} - -func (suite *xmlSuite) TestCreateAffinityGroup(c *C) { - expected := dedent.Dedent(` - - affinity-group-name - - affinity-group-description - location - `) - - input := CreateAffinityGroup{ - XMLNS: XMLNS, - Name: "affinity-group-name", - Label: "base64-encoded-affinity-group-label", - Description: "affinity-group-description", - Location: "location"} - - observed, err := input.Serialize() - c.Assert(err, IsNil) - c.Assert(strings.TrimSpace(observed), Equals, strings.TrimSpace(expected)) -} - -func (suite *xmlSuite) TestNewCreateAffinityGroup(c *C) { - name := "name" - label := "label" - description := "description" - location := "location" - ag := NewCreateAffinityGroup(name, label, description, location) - base64label := base64.StdEncoding.EncodeToString([]byte(label)) - c.Check(ag.XMLNS, Equals, XMLNS) - c.Check(ag.Name, Equals, name) - c.Check(ag.Label, Equals, base64label) - c.Check(ag.Description, Equals, description) - c.Check(ag.Location, Equals, location) -} - -func (suite *xmlSuite) TestUpdateAffinityGroup(c *C) { - expected := dedent.Dedent(` - - - affinity-group-description - `) - input := UpdateAffinityGroup{ - XMLNS: XMLNS, - Label: "base64-encoded-affinity-group-label", - Description: "affinity-group-description"} - - observed, err := input.Serialize() - c.Assert(err, IsNil) - c.Assert(strings.TrimSpace(observed), Equals, strings.TrimSpace(expected)) -} - -func (suite *xmlSuite) TestNewUpdateAffinityGroup(c *C) { - label := "label" - description := "description" - ag := NewUpdateAffinityGroup(label, description) - base64label := base64.StdEncoding.EncodeToString([]byte(label)) - c.Check(ag.XMLNS, Equals, XMLNS) - c.Check(ag.Label, Equals, base64label) - c.Check(ag.Description, Equals, description) -} - -func (suite *xmlSuite) TestNetworkConfigurationDeserialize(c *C) { - // Template from - // http://msdn.microsoft.com/en-us/library/windowsazure/jj157196.aspx - input := ` - - - - - - - - - - - CIDR-identifier - - IPV4-address-of-the-vpn-gateway - - - - - - - CIDR-identifier - - - - CIDR-identifier - - - - - - - - CIDR-identifier - - - - - - - - - - - ` - expected := &NetworkConfiguration{ - XMLNS: XMLNS_NC, - DNS: &[]VirtualNetDnsServer{ - { - Name: "dns-server-name", - IPAddress: "IPV4-address-of-the-server", - }, - }, - LocalNetworkSites: &[]LocalNetworkSite{ - { - Name: "local-site-name", - AddressSpacePrefixes: []string{ - "CIDR-identifier", - }, - VPNGatewayAddress: "IPV4-address-of-the-vpn-gateway", - }, - }, - VirtualNetworkSites: &[]VirtualNetworkSite{ - { - Name: "virtual-network-name", - AffinityGroup: "affinity-group-name", - AddressSpacePrefixes: []string{ - "CIDR-identifier", - }, - Subnets: &[]Subnet{ - { - Name: "subnet-name", - AddressPrefix: "CIDR-identifier", - }, - }, - DnsServersRef: &[]DnsServerRef{ - { - Name: "primary-DNS-name", - }, - }, - Gateway: &Gateway{ - Profile: "Small", - VPNClientAddressPoolPrefixes: []string{ - "CIDR-identifier", - }, - LocalNetworkSiteRef: LocalNetworkSiteRef{ - Name: "local-site-name", - Connection: LocalNetworkSiteRefConnection{ - Type: "connection-type", - }, - }, - }, - }, - }, - } - networkConfig := &NetworkConfiguration{} - err := networkConfig.Deserialize([]byte(input)) - c.Assert(err, IsNil) - // Check sub-components of the overall structure. - c.Check(networkConfig.DNS, DeepEquals, expected.DNS) - c.Check(networkConfig.LocalNetworkSites, DeepEquals, expected.LocalNetworkSites) - c.Check(networkConfig.VirtualNetworkSites, DeepEquals, expected.VirtualNetworkSites) - // Check the whole thing. - c.Check(networkConfig, DeepEquals, expected) -} - -func (suite *xmlSuite) TestDeployment(c *C) { - deployment := makeDeployment() - dns := deployment.DNS[0] - role := deployment.RoleList[0] - config := role.ConfigurationSets[0] - - xml, err := deployment.Serialize() - c.Assert(err, IsNil) - template := dedent.Dedent(` - - %s - %s - - - - - %s - PersistentVMRole - - - %s - %s - %s - %s - %v - %s - - - %s - - - %s - - - - %s -
%s
-
-
-
- -
`) - expected := fmt.Sprintf(template, deployment.Name, - deployment.DeploymentSlot, deployment.Label, - role.RoleName, config.ConfigurationSetType, config.Hostname, - config.Username, config.Password, config.DisableSSHPasswordAuthentication, - config.CustomData, role.RoleSize, - deployment.VirtualNetworkName, dns.Name, dns.Address) - c.Check(strings.TrimSpace(xml), Equals, strings.TrimSpace(expected)) -} - -// From http://msdn.microsoft.com/en-us/library/windowsazure/ee460804.aspx -var deploymentXML = ` - - - name-of-deployment - current-deployment-environment - deployment-id - status-of-deployment - - http://name-of-deployment.cloudapp.net - base-64-encoded-configuration-file - - - name-of-role - name-of-role-instance - status-of-role-instance - update-domain-of-role-instance - fault-domain-of-role-instance - size-of-role-instance - state-of-role-instance - error-code-returned-for-role-instance - ip-address-of-role-instance - - - name-of-endpoint - virtual-ip-address-of-instance-endpoint - 1234 - 5678 - protocol-of-instance-endpoint - - - state-of-role-instance - dns-name-of-service - cert-thumbprint-for-remote-access - - - - auto|manual - before|during - n - - number-of-upgrade-domains-in-deployment - - - name-of-role - - - LinuxProvisioningConfiguration - false - - - - - name-of-role - PersistentVMRole - - - NetworkConfiguration - - - 2222 - 111 - TCP - test-name - - - - name-of-subnet - - - - name-of-availability-set - - - host-caching-mode-of-data-disk - name-of-data-disk - 31 - 42 - path-to-vhd - - - - host-caching-mode-of-os-disk - name-of-os-disk - path-to-vhd - image-used-to-create-os-disk - operating-system-on-os-disk - - size-of-instance - - - sdk-version-used-to-create-package - status-of-deployment-write-allowed - rollback-operation-allowed - name-of-virtual-network - - - - name-of-dns-server -
address-of-dns-server
-
-
-
- - - name-of-property - value-of-property - - - - start-of-downtime - end-of-downtime - status-of-downtime - - - -
virtual-ip-address-of-deployment
-
-
- - - - identifier-of-extension - - ... - - - - role_name1 - - - identifier-of-extension - - ... - - - - -
-` - -func (suite *xmlSuite) TestDeploymentWRTGetDeployment(c *C) { - expected := &Deployment{ - XMLNS: "http://schemas.microsoft.com/windowsazure", - Name: "name-of-deployment", - DeploymentSlot: "current-deployment-environment", - PrivateID: "deployment-id", - Status: "status-of-deployment", - Label: "base64-encoded-name-of-deployment", - URL: "http://name-of-deployment.cloudapp.net", - Configuration: "base-64-encoded-configuration-file", - RoleInstanceList: []RoleInstance{ - { - RoleName: "name-of-role", - InstanceName: "name-of-role-instance", - InstanceStatus: "status-of-role-instance", - InstanceUpgradeDomain: "update-domain-of-role-instance", - InstanceFaultDomain: "fault-domain-of-role-instance", - InstanceSize: "size-of-role-instance", - InstanceStateDetails: "state-of-role-instance", - InstanceErrorCode: "error-code-returned-for-role-instance", - IPAddress: "ip-address-of-role-instance", - InstanceEndpoints: []InstanceEndpoint{ - { - Name: "name-of-endpoint", - VIP: "virtual-ip-address-of-instance-endpoint", - PublicPort: 1234, - LocalPort: 5678, - Protocol: "protocol-of-instance-endpoint", - }, - }, - PowerState: "state-of-role-instance", - HostName: "dns-name-of-service", - RemoteAccessCertificateThumbprint: "cert-thumbprint-for-remote-access", - }, - }, - UpgradeDomainCount: "number-of-upgrade-domains-in-deployment", - RoleList: []Role{ - { - RoleName: "name-of-role", - ConfigurationSets: []ConfigurationSet{ - { - ConfigurationSetType: "LinuxProvisioningConfiguration", - DisableSSHPasswordAuthentication: "false", - }, - }, - }, - { - RoleName: "name-of-role", - RoleType: "PersistentVMRole", - ConfigurationSets: []ConfigurationSet{ - { - ConfigurationSetType: CONFIG_SET_NETWORK, - InputEndpoints: &[]InputEndpoint{ - { - Name: "test-name", - Port: 2222, - LocalPort: 111, - Protocol: "TCP", - }, - }, - SubnetNames: &[]string{"name-of-subnet"}, - }, - }, - AvailabilitySetName: "name-of-availability-set", - DataVirtualHardDisks: &[]DataVirtualHardDisk{ - { - HostCaching: "host-caching-mode-of-data-disk", - DiskName: "name-of-data-disk", - LUN: 31, - LogicalDiskSizeInGB: 42, - MediaLink: "path-to-vhd", - }, - }, - OSVirtualHardDisk: &OSVirtualHardDisk{ - HostCaching: "host-caching-mode-of-os-disk", - DiskName: "name-of-os-disk", - MediaLink: "path-to-vhd", - SourceImageName: "image-used-to-create-os-disk", - OS: "operating-system-on-os-disk", - }, - RoleSize: "size-of-instance", - }, - }, - SDKVersion: "sdk-version-used-to-create-package", - Locked: "status-of-deployment-write-allowed", - RollbackAllowed: "rollback-operation-allowed", - VirtualNetworkName: "name-of-virtual-network", - DNS: []DnsServer{ - { - Name: "name-of-dns-server", - Address: "address-of-dns-server", - }, - }, - ExtendedProperties: []ExtendedProperty{ - { - Name: "name-of-property", - Value: "value-of-property", - }, - }, - } - observed := &Deployment{} - err := observed.Deserialize([]byte(deploymentXML)) - c.Assert(err, IsNil) - c.Assert(observed, DeepEquals, expected) -} - -func (suite *xmlSuite) TestDeploymentGetFQDNExtractsFQDN(c *C) { - deployment := &Deployment{} - err := deployment.Deserialize([]byte(deploymentXML)) - c.Assert(err, IsNil) - fqdn, err := deployment.GetFQDN() - c.Assert(err, IsNil) - c.Assert(fqdn, Equals, "name-of-deployment.cloudapp.net") -} - -var deploymentXMLEmptyURL = ` - - - name-of-deployment - - - -` - -func (suite *xmlSuite) TestDeploymentGetFQDNErrorsIfURLIsEmpty(c *C) { - deployment := &Deployment{} - err := deployment.Deserialize([]byte(deploymentXMLEmptyURL)) - c.Assert(err, IsNil) - _, err = deployment.GetFQDN() - c.Check(err, ErrorMatches, ".*URL field is empty.*") -} - -var deploymentXMLFaultyURL = ` - - - name-of-deployment - - %z - -` - -func (suite *xmlSuite) TestDeploymentGetFQDNErrorsIfURLCannotBeParsed(c *C) { - deployment := &Deployment{} - err := deployment.Deserialize([]byte(deploymentXMLFaultyURL)) - c.Assert(err, IsNil) - _, err = deployment.GetFQDN() - c.Check(err, ErrorMatches, ".*invalid URL.*") -} - -func (suite *xmlSuite) TestNewDeploymentForCreateVMDeployment(c *C) { - name := "deploymentName" - deploymentSlot := "staging" - label := "deploymentLabel" - vhd := NewOSVirtualHardDisk("hostCaching", "diskLabel", "diskName", "mediaLink", "sourceImageName", "os") - roles := []Role{*NewLinuxRole("size", "name", vhd, []ConfigurationSet{})} - virtualNetworkName := "network" - - deployment := NewDeploymentForCreateVMDeployment(name, deploymentSlot, label, roles, virtualNetworkName) - - c.Check(deployment.XMLNS, Equals, XMLNS) - c.Check(deployment.XMLNS_I, Equals, XMLNS_I) - c.Check(deployment.Name, Equals, name) - c.Check(deployment.DeploymentSlot, Equals, deploymentSlot) - c.Check(deployment.RoleList, DeepEquals, roles) - decodedLabel, err := base64.StdEncoding.DecodeString(deployment.Label) - c.Assert(err, IsNil) - c.Check(string(decodedLabel), Equals, label) - c.Check(deployment.VirtualNetworkName, Equals, virtualNetworkName) -} - -func (suite *xmlSuite) TestCreateVirtualHardDiskMediaLinkHappyPath(c *C) { - mediaLink := CreateVirtualHardDiskMediaLink("storage-name", "storage/path") - c.Check(mediaLink, Equals, "http://storage-name.blob.core.windows.net/storage/path") -} - -func (suite *xmlSuite) TestCreateVirtualHardDiskMediaLinkChecksParams(c *C) { - c.Check( - func() { CreateVirtualHardDiskMediaLink("foo^bar", "valid") }, - PanicMatches, "'foo\\^bar' contains URI special characters") - c.Check( - func() { CreateVirtualHardDiskMediaLink("valid", "a/foo^bar/test") }, - PanicMatches, "'foo\\^bar' contains URI special characters") -} - -func (suite *xmlSuite) TestCreateStorageServiceInput(c *C) { - s := makeCreateStorageServiceInput() - extProperty := s.ExtendedProperties[0] - xml, err := s.Serialize() - c.Assert(err, IsNil) - template := dedent.Dedent(` - - %s - - %s - %s - %s - %s - - - %s - %s - - - `) - expected := fmt.Sprintf(template, s.ServiceName, s.Label, s.Description, - s.Location, s.AffinityGroup, s.GeoReplicationEnabled, extProperty.Name, - extProperty.Value) - c.Assert(strings.TrimSpace(xml), Equals, strings.TrimSpace(expected)) -} - -// -// Tests for Unmarshallers -// - -func (suite *xmlSuite) TestStorageServicesUnmarshal(c *C) { - inputTemplate := ` - - - - %s - %s - - %s - %s - - %s - - %s - %s - %s - - %s - %s - %s - %s - %s - %s - - - %s - %s - - - %s - %s - - - - - ` - url := MakeRandomString(10) - servicename := MakeRandomString(10) - desc := MakeRandomString(10) - affinity := MakeRandomString(10) - label := MakeRandomString(10) - status := MakeRandomString(10) - blobEndpoint := MakeRandomString(10) - queueEndpoint := MakeRandomString(10) - tableEndpoint := MakeRandomString(10) - geoRepl := BoolToString(MakeRandomBool()) - geoRegion := MakeRandomString(10) - statusPrimary := MakeRandomString(10) - failoverTime := MakeRandomString(10) - geoSecRegion := MakeRandomString(10) - statusSec := MakeRandomString(10) - p1Name := MakeRandomString(10) - p1Val := MakeRandomString(10) - p2Name := MakeRandomString(10) - p2Val := MakeRandomString(10) - - input := fmt.Sprintf(inputTemplate, url, servicename, desc, affinity, - label, status, blobEndpoint, queueEndpoint, tableEndpoint, geoRepl, - geoRegion, statusPrimary, failoverTime, geoSecRegion, statusSec, - p1Name, p1Val, p2Name, p2Val) - data := []byte(input) - - services := &StorageServices{} - err := services.Deserialize(data) - c.Assert(err, IsNil) - - c.Check(len(services.StorageServices), Equals, 1) - s := services.StorageServices[0] - - // Oh jeez, here we go.... - c.Check(s.URL, Equals, url) - c.Check(s.ServiceName, Equals, servicename) - c.Check(s.Description, Equals, desc) - c.Check(s.AffinityGroup, Equals, affinity) - c.Check(s.Label, Equals, label) - c.Check(s.Status, Equals, status) - c.Check(s.GeoReplicationEnabled, Equals, geoRepl) - c.Check(s.GeoPrimaryRegion, Equals, geoRegion) - c.Check(s.StatusOfPrimary, Equals, statusPrimary) - c.Check(s.LastGeoFailoverTime, Equals, failoverTime) - c.Check(s.GeoSecondaryRegion, Equals, geoSecRegion) - c.Check(s.StatusOfSecondary, Equals, statusSec) - - endpoints := s.Endpoints - c.Check(len(endpoints), Equals, 3) - c.Check(endpoints[0], Equals, blobEndpoint) - c.Check(endpoints[1], Equals, queueEndpoint) - c.Check(endpoints[2], Equals, tableEndpoint) - - properties := s.ExtendedProperties - c.Check(properties[0].Name, Equals, p1Name) - c.Check(properties[0].Value, Equals, p1Val) - c.Check(properties[1].Name, Equals, p2Name) - c.Check(properties[1].Value, Equals, p2Val) -} - -func (suite *xmlSuite) TestBlobEnumerationResuts(c *C) { - input := ` - - - prefix - marker - maxresults - delimiter - - - blob-name - snapshot-date-time - - last-modified - etag - size-in-bytes - blob-content-type - - - - - sequence-number - blobtype - leasestatus - leasestate - leasesduration - id - copystatus - copysource - copyprogress - copycompletiontime - copydesc - - - metadataname1 - metadataname2 - - - - blob-prefix - - - - ` - data := []byte(input) - r := &BlobEnumerationResults{} - err := r.Deserialize(data) - c.Assert(err, IsNil) - c.Check(r.ContainerName, Equals, "http://myaccount.blob.core.windows.net/mycontainer") - c.Check(r.Prefix, Equals, "prefix") - c.Check(r.Marker, Equals, "marker") - c.Check(r.MaxResults, Equals, "maxresults") - c.Check(r.Delimiter, Equals, "delimiter") - c.Check(r.NextMarker, Equals, "") - b := r.Blobs[0] - c.Check(b.Name, Equals, "blob-name") - c.Check(b.Snapshot, Equals, "snapshot-date-time") - c.Check(b.LastModified, Equals, "last-modified") - c.Check(b.ETag, Equals, "etag") - c.Check(b.ContentLength, Equals, "size-in-bytes") - c.Check(b.ContentType, Equals, "blob-content-type") - c.Check(b.BlobSequenceNumber, Equals, "sequence-number") - c.Check(b.BlobType, Equals, "blobtype") - c.Check(b.LeaseStatus, Equals, "leasestatus") - c.Check(b.LeaseState, Equals, "leasestate") - c.Check(b.LeaseDuration, Equals, "leasesduration") - c.Check(b.CopyID, Equals, "id") - c.Check(b.CopyStatus, Equals, "copystatus") - c.Check(b.CopySource, Equals, "copysource") - c.Check(b.CopyProgress, Equals, "copyprogress") - c.Check(b.CopyCompletionTime, Equals, "copycompletiontime") - c.Check(b.CopyStatusDescription, Equals, "copydesc") - m1 := b.Metadata.Items[0] - m2 := b.Metadata.Items[1] - c.Check(m1.Name(), Equals, "MetaName1") - c.Check(m1.Value, Equals, "metadataname1") - c.Check(m2.Name(), Equals, "MetaName2") - c.Check(m2.Value, Equals, "metadataname2") - prefix := r.BlobPrefixes[0] - c.Check(prefix, Equals, "blob-prefix") -} - -func (suite *xmlSuite) TestStorageAccountKeysUnmarshal(c *C) { - template := ` - - - %s - - %s - %s - - ` - url := MakeRandomString(10) - key1 := MakeRandomString(10) - key2 := MakeRandomString(10) - input := fmt.Sprintf(template, url, key1, key2) - data := []byte(input) - - keys := &StorageAccountKeys{} - err := keys.Deserialize(data) - c.Assert(err, IsNil) - c.Check(keys.URL, Equals, url) - c.Check(keys.Primary, Equals, key1) - c.Check(keys.Secondary, Equals, key2) -} - -// Tests for object factory functions. - -func (suite *xmlSuite) TestNewRole(c *C) { - rolesize := MakeRandomString(10) - rolename := MakeRandomString(10) - config := makeLinuxProvisioningConfiguration() - configset := []ConfigurationSet{*config} - vhd := NewOSVirtualHardDisk("hostCaching", "diskLabel", "diskName", "mediaLink", "sourceImageName", "os") - - role := NewLinuxRole(rolesize, rolename, vhd, configset) - c.Check(role.RoleSize, Equals, rolesize) - c.Check(role.RoleName, Equals, rolename) - c.Check(role.ConfigurationSets, DeepEquals, configset) - c.Check(role.RoleType, Equals, "PersistentVMRole") -} - -func (suite *xmlSuite) TestNewLinuxProvisioningConfiguration(c *C) { - hostname := MakeRandomString(10) - username := MakeRandomString(10) - password := MakeRandomString(10) - disablessh := BoolToString(MakeRandomBool()) - customdata := MakeRandomString(10) - - config := NewLinuxProvisioningConfigurationSet( - hostname, username, password, customdata, disablessh) - c.Check(config.Hostname, Equals, hostname) - c.Check(config.Username, Equals, username) - c.Check(config.Password, Equals, password) - c.Check(config.CustomData, Equals, customdata) - c.Check(config.DisableSSHPasswordAuthentication, Equals, disablessh) - c.Check(config.ConfigurationSetType, Equals, "LinuxProvisioningConfiguration") -} - -func (suite *xmlSuite) TestNewNetworkConfiguration(c *C) { - name := "name1" - port := 242 - localPort := 922 - protocol := "TCP" - bName := "bname1" - inputendpoint := InputEndpoint{ - LoadBalancedEndpointSetName: bName, LocalPort: localPort, Name: name, Port: port, Protocol: protocol} - - config := NewNetworkConfigurationSet([]InputEndpoint{inputendpoint}, []string{"subnet-1", "subnet-2"}) - inputEndpoints := *config.InputEndpoints - c.Check(inputEndpoints, HasLen, 1) - inputEndpoint := inputEndpoints[0] - c.Check(inputEndpoint.Name, Equals, name) - c.Check(inputEndpoint.Port, Equals, port) - c.Check(inputEndpoint.Protocol, Equals, protocol) - c.Check(inputEndpoint.LoadBalancedEndpointSetName, Equals, bName) - c.Check(inputEndpoint.LocalPort, Equals, localPort) - c.Check(config.ConfigurationSetType, Equals, CONFIG_SET_NETWORK) - c.Check(*config.SubnetNames, DeepEquals, []string{"subnet-1", "subnet-2"}) -} - -func (suite *xmlSuite) TestNewOSVirtualHardDisk(c *C) { - var hostcaching HostCachingType = "ReadWrite" - disklabel := MakeRandomString(10) - diskname := MakeRandomString(10) - MediaLink := MakeRandomString(10) - SourceImageName := MakeRandomString(10) - OS := MakeRandomString(10) - - disk := NewOSVirtualHardDisk( - hostcaching, disklabel, diskname, MediaLink, SourceImageName, OS) - c.Check(disk.HostCaching, Equals, string(hostcaching)) - c.Check(disk.DiskLabel, Equals, disklabel) - c.Check(disk.DiskName, Equals, diskname) - c.Check(disk.MediaLink, Equals, MediaLink) - c.Check(disk.SourceImageName, Equals, SourceImageName) - c.Check(disk.OS, Equals, OS) -} - -// Properties XML subtree for ListContainers Storage API call. -func (suite *xmlSuite) TestProperties(c *C) { - input := ` - - - date/time-value - etag-value - lease-status-value - lease-state-value - lease-duration-value - ` - observed := &Properties{} - err := xml.Unmarshal([]byte(input), observed) - c.Assert(err, IsNil) - - expected := &Properties{ - LastModified: "date/time-value", - ETag: "etag-value", - LeaseStatus: "lease-status-value", - LeaseState: "lease-state-value", - LeaseDuration: "lease-duration-value", - } - - c.Assert(observed, DeepEquals, expected) -} - -// Metadata XML subtree for ListContainers Storage API call. -func (suite *xmlSuite) TestMetadata(c *C) { - input := ` - - - metadata-value - ` - observed := &Metadata{} - err := xml.Unmarshal([]byte(input), observed) - c.Assert(err, IsNil) - - expected := &Metadata{ - Items: []MetadataItem{ - { - XMLName: xml.Name{Local: "metadata-name"}, - Value: "metadata-value", - }, - }, - } - - c.Assert(observed, DeepEquals, expected) -} - -// Container XML subtree for ListContainers Storage API call. -func (suite *xmlSuite) TestContainer(c *C) { - input := ` - - - name-value - - date/time-value - etag-value - lease-status-value - lease-state-value - lease-duration-value - - - metadata-value - - ` - observed := &Container{} - err := xml.Unmarshal([]byte(input), observed) - c.Assert(err, IsNil) - - expected := &Container{ - XMLName: xml.Name{Local: "Container"}, - Name: "name-value", - Properties: Properties{ - LastModified: "date/time-value", - ETag: "etag-value", - LeaseStatus: "lease-status-value", - LeaseState: "lease-state-value", - LeaseDuration: "lease-duration-value", - }, - Metadata: Metadata{ - Items: []MetadataItem{ - { - XMLName: xml.Name{Local: "metadata-name"}, - Value: "metadata-value", - }, - }, - }, - } - - c.Assert(observed, DeepEquals, expected) -} - -// EnumerationResults XML tree for ListContainers Storage API call. -func (suite *xmlSuite) TestContainerEnumerationResults(c *C) { - input := ` - - - prefix-value - marker-value - max-results-value - - - name-value - - date/time-value - etag-value - lease-status-value - lease-state-value - lease-duration-value - - - metadata-value - - - - next-marker-value - ` - observed := &ContainerEnumerationResults{} - err := observed.Deserialize([]byte(input)) - c.Assert(err, IsNil) - - expected := &ContainerEnumerationResults{ - XMLName: xml.Name{Local: "EnumerationResults"}, - Prefix: "prefix-value", - Marker: "marker-value", - MaxResults: "max-results-value", - Containers: []Container{ - { - XMLName: xml.Name{Local: "Container"}, - Name: "name-value", - Properties: Properties{ - LastModified: "date/time-value", - ETag: "etag-value", - LeaseStatus: "lease-status-value", - LeaseState: "lease-state-value", - LeaseDuration: "lease-duration-value", - }, - Metadata: Metadata{ - Items: []MetadataItem{ - { - XMLName: xml.Name{Local: "metadata-name"}, - Value: "metadata-value", - }, - }, - }, - }, - }, - NextMarker: "next-marker-value", - } - - c.Assert(observed, DeepEquals, expected) - c.Assert(observed.Containers[0].Metadata.Items[0].Name(), Equals, "metadata-name") -} - -func (suite *xmlSuite) TestHostedService(c *C) { - input := ` - - - hosted-service-url - hosted-service-name - - description - name-of-affinity-group - location-of-service - - current-status-of-service - creation-date-of-service - last-modification-date-of-service - - - name-of-property - value-of-property - - - - - - name-of-deployment - - - - ` - expected := &HostedService{ - XMLNS: "http://schemas.microsoft.com/windowsazure", - HostedServiceDescriptor: HostedServiceDescriptor{ - URL: "hosted-service-url", - ServiceName: "hosted-service-name", - Description: "description", - AffinityGroup: "name-of-affinity-group", - Location: "location-of-service", - Label: "base-64-encoded-name-of-service", - Status: "current-status-of-service", - DateCreated: "creation-date-of-service", - DateLastModified: "last-modification-date-of-service", - ExtendedProperties: []ExtendedProperty{ - { - Name: "name-of-property", - Value: "value-of-property", - }, - }}, - Deployments: []Deployment{{ - XMLNS: "http://schemas.microsoft.com/windowsazure", - Name: "name-of-deployment", - }}, - } - - observed := &HostedService{} - err := observed.Deserialize([]byte(input)) - c.Assert(err, IsNil) - c.Assert(observed, DeepEquals, expected) -} - -func makeHostedServiceDescriptorList(url string) string { - input := ` - - - - %s - hosted-service-name - - description - affinity-group - service-location - - status - date-created - date-modified - - - property-name - property-value - - - - - - ` - return fmt.Sprintf(input, url) -} - -func (suite *xmlSuite) TestHostedServiceDescriptorList(c *C) { - input := makeHostedServiceDescriptorList("hosted-service-address") - expected := &HostedServiceDescriptorList{ - XMLName: xml.Name{ - Space: "http://schemas.microsoft.com/windowsazure", - Local: "HostedServices"}, - XMLNS: "http://schemas.microsoft.com/windowsazure", - HostedServices: []HostedServiceDescriptor{ - { - URL: "hosted-service-address", - ServiceName: "hosted-service-name", - Description: "description", - AffinityGroup: "affinity-group", - Location: "service-location", - Label: "label", - Status: "status", - DateCreated: "date-created", - DateLastModified: "date-modified", - ExtendedProperties: []ExtendedProperty{ - { - Name: "property-name", - Value: "property-value", - }, - }, - }, - }, - } - - observed := &HostedServiceDescriptorList{} - err := observed.Deserialize([]byte(input)) - c.Assert(err, IsNil) - c.Assert(observed, DeepEquals, expected) -} - -func (suite *xmlSuite) TestHostedServiceDescriptorGetLabel(c *C) { - serviceDesc := HostedServiceDescriptor{Label: ""} - label := MakeRandomString(10) - base64Label := base64.StdEncoding.EncodeToString([]byte(label)) - serviceDesc.Label = base64Label - decodedLabel, err := serviceDesc.GetLabel() - c.Assert(err, IsNil) - c.Check(decodedLabel, DeepEquals, label) -} - -// TestCreateStorageService demonstrates that CreateHostedService is a -// suitable container for the CreateHostedService XML tree that are required -// for the Create Hosted Service API call. -func (suite *xmlSuite) TestCreateHostedService(c *C) { - // From http://msdn.microsoft.com/en-us/library/windowsazure/gg441304.aspx - input := ` - - - service-name - - description - location - affinity-group - - - property-name - property-value - - - - ` - expected := &CreateHostedService{ - XMLNS: XMLNS, - ServiceName: "service-name", - Label: "base64-encoded-service-label", - Description: "description", - Location: "location", - AffinityGroup: "affinity-group", - ExtendedProperties: []ExtendedProperty{ - { - Name: "property-name", - Value: "property-value", - }, - }, - } - observed := &CreateHostedService{} - err := observed.Deserialize([]byte(input)) - c.Assert(err, IsNil) - c.Assert(observed, DeepEquals, expected) -} - -func (suite *xmlSuite) TestNewCreateHostedServiceWithLocation(c *C) { - serviceName := "serviceName" - label := "label" - location := "location" - createdHostedService := NewCreateHostedServiceWithLocation(serviceName, label, location) - base64label := base64.StdEncoding.EncodeToString([]byte(label)) - c.Check(createdHostedService.ServiceName, DeepEquals, serviceName) - c.Check(createdHostedService.Label, DeepEquals, base64label) - c.Check(createdHostedService.Location, DeepEquals, location) -} - -func (suite *xmlSuite) TestNewCreateStorageServiceInputWithLocation(c *C) { - cssi := NewCreateStorageServiceInputWithLocation("name", "label", "location", "false") - c.Check(cssi.XMLNS, Equals, XMLNS) - c.Check(cssi.ServiceName, Equals, "name") - c.Check(cssi.Label, Equals, base64.StdEncoding.EncodeToString([]byte("label"))) - c.Check(cssi.Location, Equals, "location") - c.Check(cssi.GeoReplicationEnabled, Equals, "false") -} - -func (*xmlSuite) TestAvailabilityResponse(c *C) { - input := ` - - - name-availability - reason - ` - expected := &AvailabilityResponse{ - XMLNS: XMLNS, - Result: "name-availability", - Reason: "reason", - } - observed := &AvailabilityResponse{} - err := observed.Deserialize([]byte(input)) - c.Assert(err, IsNil) - c.Assert(observed, DeepEquals, expected) -} - -func makeUpdateHostedService(label, description string, property ExtendedProperty) string { - template := dedent.Dedent(` - - - %s - - - %s - %s - - - `) - return fmt.Sprintf(template, label, description, property.Name, property.Value) -} - -func (suite *xmlSuite) TestUpdateHostedService(c *C) { - label := MakeRandomString(10) - description := MakeRandomString(10) - property := ExtendedProperty{ - Name: "property-name", - Value: "property-value", - } - expected := makeUpdateHostedService(label, description, property) - input := UpdateHostedService{ - XMLNS: XMLNS, - Label: label, - Description: description, - ExtendedProperties: []ExtendedProperty{ - property, - }, - } - - observed, err := input.Serialize() - c.Assert(err, IsNil) - c.Assert(strings.TrimSpace(observed), Equals, strings.TrimSpace(expected)) -} - -func (suite *xmlSuite) TestNewUpdateHostedService(c *C) { - label := MakeRandomString(10) - description := MakeRandomString(10) - properties := []ExtendedProperty{ - { - Name: MakeRandomString(10), - Value: MakeRandomString(10), - }, - } - updateHostedService := NewUpdateHostedService( - label, description, properties) - c.Check( - updateHostedService.Label, Equals, - base64.StdEncoding.EncodeToString([]byte(label))) - c.Check(updateHostedService.Description, Equals, description) - c.Check(updateHostedService.ExtendedProperties, DeepEquals, properties) -} - -func (suite *xmlSuite) TestBlockListSerialize(c *C) { - blockList := &BlockList{ - XMLName: xml.Name{Local: "BlockList"}, - } - blockList.Add(BlockListCommitted, "first-base64-encoded-block-id") - blockList.Add(BlockListUncommitted, "second-base64-encoded-block-id") - blockList.Add(BlockListLatest, "third-base64-encoded-block-id") - observed, err := blockList.Serialize() - c.Assert(err, IsNil) - expected := dedent.Dedent(` - - Zmlyc3QtYmFzZTY0LWVuY29kZWQtYmxvY2staWQ= - c2Vjb25kLWJhc2U2NC1lbmNvZGVkLWJsb2NrLWlk - dGhpcmQtYmFzZTY0LWVuY29kZWQtYmxvY2staWQ= - `) - c.Assert(strings.TrimSpace(string(observed)), Equals, strings.TrimSpace(expected)) -} - -func (suite *xmlSuite) TestGetBlockListDeserialize(c *C) { - input := ` - - - - - BlockId001 - 4194304 - - - - - BlockId002 - 1024 - - - ` - observed := GetBlockList{} - err := observed.Deserialize([]byte(input)) - c.Assert(err, IsNil) - expected := GetBlockList{ - XMLName: xml.Name{Local: "BlockList"}, - CommittedBlocks: []Block{ - { - Name: "BlockId001", - Size: "4194304"}, - }, - UncommittedBlocks: []Block{ - { - Name: "BlockId002", - Size: "1024"}, - }, - } - c.Check(observed, DeepEquals, expected) -} - -func makeOperationXML(operationType string) string { - XMLTemplate := dedent.Dedent(` - <%s xmlns="http://schemas.microsoft.com/windowsazure" xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> - %s - `) - return fmt.Sprintf(XMLTemplate, operationType, operationType, operationType) -} - -func (suite *managementAPISuite) TestStartRoleOperation(c *C) { - expectedXML := makeOperationXML("StartRoleOperation") - xml, err := marshalXML(startRoleOperation) - c.Assert(err, IsNil) - c.Check(strings.TrimSpace(string(xml)), Equals, strings.TrimSpace(expectedXML)) -} - -func (suite *managementAPISuite) TestRestartRoleOperation(c *C) { - expectedXML := makeOperationXML("RestartRoleOperation") - xml, err := marshalXML(restartRoleOperation) - c.Assert(err, IsNil) - c.Check(strings.TrimSpace(string(xml)), Equals, strings.TrimSpace(expectedXML)) -} - -func (suite *managementAPISuite) TestShutdownRoleOperation(c *C) { - expectedXML := makeOperationXML("ShutdownRoleOperation") - xml, err := marshalXML(shutdownRoleOperation) - c.Assert(err, IsNil) - c.Check(strings.TrimSpace(string(xml)), Equals, strings.TrimSpace(expectedXML)) -} - -// TestOSImageWRTAddOSImage demonstrates the OSImage is a suitable container -// for the OSImage XML trees that are required for the Add OS Image API call. -func (suite *xmlSuite) TestOSImageWRTAddOSImage(c *C) { - // From http://msdn.microsoft.com/en-us/library/windowsazure/jj157192.aspx - input := ` - - - uri-of-the-containing-blob - image-name - Linux|Windows - image-eula - image-description - image-family - published-date - true/false - true/false - http://www.example.com/privacypolicy.html - http://www.example.com/favicon.png - Small/Large/Medium/ExtraLarge - http://www.example.com/smallfavicon.png - language-of-image - - ` - expected := &OSImage{ - Label: "image-label", - MediaLink: "uri-of-the-containing-blob", - Name: "image-name", - OS: "Linux|Windows", - EULA: "image-eula", - Description: "image-description", - ImageFamily: "image-family", - PublishedDate: "published-date", - IsPremium: "true/false", - ShowInGUI: "true/false", - PrivacyURI: "http://www.example.com/privacypolicy.html", - IconURI: "http://www.example.com/favicon.png", - RecommendedVMSize: "Small/Large/Medium/ExtraLarge", - SmallIconURI: "http://www.example.com/smallfavicon.png", - Language: "language-of-image", - } - - osimage := &OSImage{} - err := osimage.Deserialize([]byte(input)) - c.Assert(err, IsNil) - c.Assert(osimage, DeepEquals, expected) -} - -// TestOSImageWRTListOSImages demonstrates that OSImage is a suitable -// container for the OSImage XML subtrees that are returned from the List OS -// Images API call. -func (suite *xmlSuite) TestOSImageWRTListOSImages(c *C) { - // From http://msdn.microsoft.com/en-us/library/windowsazure/jj157191.aspx - input := ` - - name-of-the-affinity-group - category-of-the-image - - geo-location-of-the-stored-image - 123.456 - url-of-the-containing-blob - image-name - operating-system-of-the-image - image-eula - image-description - image-family - true|false - published-date - true|false - uri-of-privacy-policy - size-of-the-virtual-machine - publisher-identifier - pricing-details - uri-of-icon - language-of-image - - ` - expected := &OSImage{ - AffinityGroup: "name-of-the-affinity-group", - Category: "category-of-the-image", - Label: "image-description", - Location: "geo-location-of-the-stored-image", - LogicalSizeInGB: 123.456, - MediaLink: "url-of-the-containing-blob", - Name: "image-name", - OS: "operating-system-of-the-image", - EULA: "image-eula", - Description: "image-description", - ImageFamily: "image-family", - ShowInGUI: "true|false", - PublishedDate: "published-date", - IsPremium: "true|false", - PrivacyURI: "uri-of-privacy-policy", - RecommendedVMSize: "size-of-the-virtual-machine", - PublisherName: "publisher-identifier", - PricingDetailLink: "pricing-details", - SmallIconURI: "uri-of-icon", - Language: "language-of-image", - } - - osimage := &OSImage{} - err := osimage.Deserialize([]byte(input)) - c.Assert(err, IsNil) - c.Assert(osimage, DeepEquals, expected) -} - -// TestOSImageWRTUpdateOSImage demonstrates the OSImage is a suitable -// container for the OSImage XML trees that are required for the Update OS -// Image API call. -func (suite *xmlSuite) TestOSImageWRTUpdateOSImage(c *C) { - // From http://msdn.microsoft.com/en-us/library/windowsazure/jj157198.aspx - input := ` - - - image-eula - Image-Description - Image-Family - published-date - true/false - true/false - http://www.example.com/privacypolicy.html - http://www.example.com/favicon.png - Small/Large/Medium/ExtraLarge - http://www.example.com/smallfavicon.png - language-of-image - - ` - expected := &OSImage{ - Label: "image-label", - EULA: "image-eula", - Description: "Image-Description", - ImageFamily: "Image-Family", - PublishedDate: "published-date", - IsPremium: "true/false", - ShowInGUI: "true/false", - PrivacyURI: "http://www.example.com/privacypolicy.html", - IconURI: "http://www.example.com/favicon.png", - RecommendedVMSize: "Small/Large/Medium/ExtraLarge", - SmallIconURI: "http://www.example.com/smallfavicon.png", - Language: "language-of-image", - } - - osimage := &OSImage{} - err := osimage.Deserialize([]byte(input)) - c.Assert(err, IsNil) - c.Assert(osimage, DeepEquals, expected) -} - -func (suite *xmlSuite) TestOSImageHasLocation(c *C) { - image := &OSImage{ - Location: "East Asia;Southeast Asia;North Europe;West Europe;East US;West US", - } - - var testValues = []struct { - location string - expectedResult bool - }{ - {"East Asia", true}, - {"West US", true}, - {"Unknown location", false}, - } - for _, test := range testValues { - c.Check(image.hasLocation(test.location), Equals, test.expectedResult) - } -} - -func (suite *xmlSuite) TestIsDailyBuild(c *C) { - c.Check((&OSImage{Label: "Ubuntu Server 12.04.2 LTS DAILY"}).isDailyBuild(), Equals, true) - c.Check((&OSImage{Label: "Ubuntu Server 12.04.2 LTS"}).isDailyBuild(), Equals, false) - c.Check((&OSImage{Label: "Ubuntu Server 13.04"}).isDailyBuild(), Equals, false) -} - -func (suite *xmlSuite) TestSortImages(c *C) { - input := ` - - - - - 2012-04-25T00:00:00Z - - - - 2013-02-15T00:00:00Z - - - - 2013-04-13T00:00:00Z - - - - 2013-03-15T00:00:00Z - -` - - images := &Images{} - err := images.Deserialize([]byte(input)) - c.Assert(err, IsNil) - - sort.Sort(images) - labels := []string{ - (*images).Images[0].Label, - (*images).Images[1].Label, - (*images).Images[2].Label, - (*images).Images[3].Label, - } - c.Check(labels, DeepEquals, []string{"Label 3", "Label 4", "Label 2", "Label 1"}) -} - -func (suite *xmlSuite) TestGetLatestUbuntuImage(c *C) { - // This is real-world XML input. - input := ` - - - - Canonical - - East Asia;Southeast Asia;North Europe;West Europe;East US;West US - 30 - b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_2-LTS-amd64-server-20130415-en-us-30GB - Linux - Ubuntu Server 12.04 LTS - 2013-04-15T00:00:00Z - false - Canonical - - - Canonical - - East Asia;Southeast Asia;North Europe;West Europe;East US;West US - 30 - b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_10-amd64-server-20130414-en-us-30GB - Linux - Ubuntu 12.10 - 2013-04-15T00:00:00Z - Canonical - - - Canonical - - East Asia;Southeast Asia;North Europe;West Europe;East US;West US - 30 - fake-name__Ubuntu-13_04-amd64-server-20130423-en-us-30GB - Linux - Ubuntu Server 13.04 - 2013-06-25T00:00:00Z - Canonical - - - Canonical - - East Asia;Southeast Asia;North Europe;West Europe;East US;West US - 30 - b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_04-amd64-server-20130423-en-us-30GB - Linux - Ubuntu Server 13.04 - 2013-04-25T00:00:00Z - Canonical - - - Canonical - - East Asia;Southeast Asia;North Europe;West Europe;East US;West US - 30 - b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_04-amd64-server-20130423-en-us-30GB - Linux - 2013-05-25T00:00:00Z - Ubuntu Server 13.04 - Bogus publisher name - -` - images := &Images{} - err := images.Deserialize([]byte(input)) - c.Assert(err, IsNil) - - var testValues = []struct { - releaseName string - location string - expectedError error - expectedLabel string - }{ - {"13.04", "West US", nil, "Ubuntu Server 13.04"}, - {"12.04", "West US", nil, "Ubuntu Server 12.04.2 LTS"}, - {"bogus-name", "Unknown location", fmt.Errorf("No matching images found"), ""}, - } - for _, test := range testValues { - image, err := images.GetLatestUbuntuImage(test.releaseName, test.location) - c.Check(err, DeepEquals, test.expectedError) - if image != nil { - c.Check(image.Label, Equals, test.expectedLabel) - } - } -} - -func (suite *xmlSuite) TestOperation(c *C) { - // From http://msdn.microsoft.com/en-us/library/windowsazure/ee460783.aspx - input := ` - - - request-id - InProgress|Succeeded|Failed - - 200 - - - error-code - error-message - - - ` - expected := &Operation{ - ID: "request-id", - Status: "InProgress|Succeeded|Failed", - HTTPStatusCode: 200, - ErrorCode: "error-code", - ErrorMessage: "error-message", - } - observed := &Operation{} - err := observed.Deserialize([]byte(input)) - c.Assert(err, IsNil) - c.Assert(observed, DeepEquals, expected) -} === modified file 'src/launchpad.net/tomb/tomb.go' --- src/launchpad.net/tomb/tomb.go 2014-09-11 18:13:12 +0000 +++ src/launchpad.net/tomb/tomb.go 2016-03-22 15:18:22 +0000 @@ -26,9 +26,7 @@ // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// The tomb package helps with clean goroutine termination. -// -// See the Tomb type for details. +// PROJECT MOVED: https://gopkg.in/tomb.v1 package tomb import (